From 95b4fc8ba644fcb7d321c84218de1fd957d85ab8 Mon Sep 17 00:00:00 2001 From: Beau Sievers Date: Sun, 10 Apr 2016 11:30:20 -0400 Subject: [PATCH 0001/1665] fsl.MotionOutliers output spec modified so out_file does not need to exist --- nipype/interfaces/fsl/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/utils.py b/nipype/interfaces/fsl/utils.py index 3edc3a73e6..545be0e2d5 100644 --- a/nipype/interfaces/fsl/utils.py +++ b/nipype/interfaces/fsl/utils.py @@ -2045,7 +2045,7 @@ class MotionOutliersInputSpec(FSLCommandInputSpec): class MotionOutliersOutputSpec(TraitedSpec): - out_file = File(exists=True) + out_file = File() out_metric_values = File(exists=True) out_metric_plot = File(exists=True) From 1108430872eee2cad77416c489e4ec3fc063870a Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 25 Jul 2018 14:03:56 -0400 Subject: [PATCH 0002/1665] CI: Drop Python 2 tests --- .circleci/{test_py3_docs.sh => build_docs.sh} | 0 .circleci/config.yml | 81 +++++++------------ ...l1.sh => test_fmri_fsl_feeds_linear_l1.sh} | 0 ...l1.sh => test_fmri_fsl_reuse_linear_l1.sh} | 0 ...h => test_fmri_spm_dartel_multiproc_l1.sh} | 0 ...h => test_fmri_spm_dartel_multiproc_l2.sh} | 0 ...inear_3d.sh => test_fmri_spm_linear_3d.sh} | 0 ...inear_4d.sh => test_fmri_spm_linear_4d.sh} | 0 ...h => test_fmri_spm_nested_multiproc_l1.sh} | 0 ...h => test_fmri_spm_nested_multiproc_l2.sh} | 2 +- .circleci/test_py2_pytest.sh | 3 - .../{test_py3_pytest.sh => test_pytest.sh} | 0 12 files changed, 30 insertions(+), 56 deletions(-) rename .circleci/{test_py3_docs.sh => build_docs.sh} (100%) rename .circleci/{test_py3_fmri_fsl_feeds_linear_l1.sh => test_fmri_fsl_feeds_linear_l1.sh} (100%) rename .circleci/{test_py3_fmri_fsl_reuse_linear_l1.sh => test_fmri_fsl_reuse_linear_l1.sh} (100%) rename .circleci/{test_py3_fmri_spm_dartel_multiproc_l1.sh => test_fmri_spm_dartel_multiproc_l1.sh} (100%) rename .circleci/{test_py3_fmri_spm_dartel_multiproc_l2.sh => test_fmri_spm_dartel_multiproc_l2.sh} (100%) rename .circleci/{test_py3_fmri_spm_linear_3d.sh => test_fmri_spm_linear_3d.sh} (100%) rename .circleci/{test_py3_fmri_spm_linear_4d.sh => test_fmri_spm_linear_4d.sh} (100%) rename .circleci/{test_py3_fmri_spm_nested_multiproc_l1.sh => test_fmri_spm_nested_multiproc_l1.sh} (100%) rename .circleci/{test_py2_fmri_spm_nested_multiproc_l2.sh => test_fmri_spm_nested_multiproc_l2.sh} (75%) delete mode 100644 .circleci/test_py2_pytest.sh rename .circleci/{test_py3_pytest.sh => test_pytest.sh} (100%) diff --git a/.circleci/test_py3_docs.sh b/.circleci/build_docs.sh similarity index 100% rename from .circleci/test_py3_docs.sh rename to .circleci/build_docs.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index ef55a28734..54b7180002 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -56,20 +56,6 @@ _build_main_image_py36: &build_main_image_py36 --build-arg VCS_REF="$(git rev-parse --short HEAD)" \ --build-arg VERSION="${CIRCLE_TAG}" /home/circleci/nipype -_build_main_image_py27: &build_main_image_py27 - name: Build main image (py27) - no_output_timeout: 60m - command: | - tools/retry_cmd.sh -n 5 -s 15 \ - docker build \ - --rm=false \ - --tag nipype/nipype:py27 \ - --build-arg PYTHON_VERSION_MAJOR=2 \ - --build-arg PYTHON_VERSION_MINOR=7 \ - --build-arg BUILD_DATE="$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ - --build-arg VCS_REF="$(git rev-parse --short HEAD)" \ - --build-arg VERSION="${CIRCLE_TAG}-py27" /home/circleci/nipype - _download_test_data: &_download_test_data name: Download test data no_output_timeout: 20m @@ -161,28 +147,22 @@ jobs: - run: *modify_nipype_version - run: *get_base_image - run: *build_main_image_py36 - - run: *build_main_image_py27 - run: *_get_codecov - run: *_download_test_data - run: *prepare_working_directory - run: - name: Run pytests (py36) - no_output_timeout: 30m - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_py3_pytest.sh - - run: - name: Run pytests (py27) + name: Run pytests no_output_timeout: 30m environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_py2_pytest.sh + command: bash -ux /home/circleci/nipype/.circleci/test_pytest.sh - run: *_run_codecov_coverage - store_artifacts: *store_artifacts_kwds - store_test_results: *store_artifacts_kwds - run: - name: Build docs (py36) + name: Build docs no_output_timeout: 30m environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_py3_docs.sh + command: bash -ux /home/circleci/nipype/.circleci/build_docs.sh - store_artifacts: path: /home/circleci/work/docs - run: @@ -192,16 +172,15 @@ jobs: if [ "$CIRCLE_BRANCH" = "master" -a -z "$CIRCLE_PULL_REQUEST" ]; then docker save nipype/nipype:base \ nipype/nipype:latest \ - nipype/nipype:py27 \ - nipype/nipype:py36 | gzip -1 > /tmp/docker/nipype-base-latest-py36-py27.tar.gz \ - && du -h /tmp/docker/nipype-base-latest-py36-py27.tar.gz + nipype/nipype:py36 | gzip -1 > /tmp/docker/nipype-base-latest-py36.tar.gz \ + && du -h /tmp/docker/nipype-base-latest-py36.tar.gz fi - persist_to_workspace: root: /tmp paths: - docker - test_py3_fmri_fsl_spm: + test_fmri_fsl_spm: machine: *machine_kwds working_directory: /home/circleci/nipype steps: @@ -218,24 +197,24 @@ jobs: - run: *_download_test_data - run: *prepare_working_directory - run: - name: Run FSL reuse pipeline (py36) + name: Run FSL reuse pipeline no_output_timeout: 40m environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_py3_fmri_fsl_reuse_linear_l1.sh + command: bash -ux /home/circleci/nipype/.circleci/test_fmri_fsl_reuse_linear_l1.sh - run: - name: Run SPM test workflow - 3D inputs (py36) + name: Run SPM test workflow - 3D inputs no_output_timeout: 40m environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_py3_fmri_spm_linear_3d.sh + command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_linear_3d.sh - run: - name: Run SPM test workflow - 4D inputs (py36) + name: Run SPM test workflow - 4D inputs no_output_timeout: 40m environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_py3_fmri_spm_linear_4d.sh + command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_linear_4d.sh - run: *_run_codecov_smoke - store_artifacts: *store_artifacts_kwds - test_py3_fmri_spm_dartel_multiproc: + test_fmri_spm_dartel_multiproc: machine: *machine_kwds working_directory: /home/circleci/nipype steps: @@ -252,15 +231,15 @@ jobs: - run: *_download_test_data - run: *prepare_working_directory - run: - name: Run SPM DARTEL Level 1 pipeline (py36) + name: Run SPM DARTEL Level 1 pipeline no_output_timeout: 1h environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_py3_fmri_spm_dartel_multiproc_l1.sh + command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_dartel_multiproc_l1.sh - run: - name: Run SPM DARTEL Level 2 pipeline (py36) + name: Run SPM DARTEL Level 2 pipeline no_output_timeout: 30m environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_py3_fmri_spm_dartel_multiproc_l2.sh + command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_dartel_multiproc_l2.sh - run: *_run_codecov_smoke - store_artifacts: *store_artifacts_kwds @@ -277,25 +256,24 @@ jobs: - run: *modify_nipype_version - run: *get_base_image - run: *build_main_image_py36 - - run: *build_main_image_py27 - run: *_get_codecov - run: *_download_test_data - run: *prepare_working_directory - run: - name: Run SPM Nested Level 1 pipeline (py36) + name: Run SPM Nested Level 1 pipeline no_output_timeout: 1h environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_py3_fmri_spm_nested_multiproc_l1.sh + command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_nested_multiproc_l1.sh - run: - name: Run SPM Nested Level 2 pipeline (py27) + name: Run SPM Nested Level 2 pipeline no_output_timeout: 30m environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_py2_fmri_spm_nested_multiproc_l2.sh + command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_nested_multiproc_l2.sh - run: - name: Run FSL FEEDS pipeline (py36) + name: Run FSL FEEDS pipeline no_output_timeout: 40m environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_py3_fmri_fsl_feeds_linear_l1.sh + command: bash -ux /home/circleci/nipype/.circleci/test_fmri_fsl_feeds_linear_l1.sh - run: *_run_codecov_smoke - store_artifacts: *store_artifacts_kwds @@ -310,7 +288,7 @@ jobs: name: Load saved Docker images. no_output_timeout: 60m command: | - docker load < /tmp/docker/nipype-base-latest-py36-py27.tar.gz + docker load < /tmp/docker/nipype-base-latest-py36.tar.gz - run: name: Push to DockerHub no_output_timeout: 120m @@ -319,7 +297,6 @@ jobs: docker push nipype/nipype:base docker push nipype/nipype:latest docker push nipype/nipype:py36 - docker push nipype/nipype:py27 - run: name: Move pruned Dockerfile to /tmp/docker/cache directory command: | @@ -403,10 +380,10 @@ workflows: only: /.*/ requires: - compare_base_dockerfiles - - test_py3_fmri_fsl_spm: + - test_fmri_fsl_spm: requires: - compare_base_dockerfiles - - test_py3_fmri_spm_dartel_multiproc: + - test_fmri_spm_dartel_multiproc: requires: - compare_base_dockerfiles - test_fmri_spm_nested_fsl_feeds: @@ -419,8 +396,8 @@ workflows: requires: - test_pytest - test_fmri_spm_nested_fsl_feeds - - test_py3_fmri_fsl_spm - - test_py3_fmri_spm_dartel_multiproc + - test_fmri_fsl_spm + - test_fmri_spm_dartel_multiproc - deploy_pypi: filters: branches: diff --git a/.circleci/test_py3_fmri_fsl_feeds_linear_l1.sh b/.circleci/test_fmri_fsl_feeds_linear_l1.sh similarity index 100% rename from .circleci/test_py3_fmri_fsl_feeds_linear_l1.sh rename to .circleci/test_fmri_fsl_feeds_linear_l1.sh diff --git a/.circleci/test_py3_fmri_fsl_reuse_linear_l1.sh b/.circleci/test_fmri_fsl_reuse_linear_l1.sh similarity index 100% rename from .circleci/test_py3_fmri_fsl_reuse_linear_l1.sh rename to .circleci/test_fmri_fsl_reuse_linear_l1.sh diff --git a/.circleci/test_py3_fmri_spm_dartel_multiproc_l1.sh b/.circleci/test_fmri_spm_dartel_multiproc_l1.sh similarity index 100% rename from .circleci/test_py3_fmri_spm_dartel_multiproc_l1.sh rename to .circleci/test_fmri_spm_dartel_multiproc_l1.sh diff --git a/.circleci/test_py3_fmri_spm_dartel_multiproc_l2.sh b/.circleci/test_fmri_spm_dartel_multiproc_l2.sh similarity index 100% rename from .circleci/test_py3_fmri_spm_dartel_multiproc_l2.sh rename to .circleci/test_fmri_spm_dartel_multiproc_l2.sh diff --git a/.circleci/test_py3_fmri_spm_linear_3d.sh b/.circleci/test_fmri_spm_linear_3d.sh similarity index 100% rename from .circleci/test_py3_fmri_spm_linear_3d.sh rename to .circleci/test_fmri_spm_linear_3d.sh diff --git a/.circleci/test_py3_fmri_spm_linear_4d.sh b/.circleci/test_fmri_spm_linear_4d.sh similarity index 100% rename from .circleci/test_py3_fmri_spm_linear_4d.sh rename to .circleci/test_fmri_spm_linear_4d.sh diff --git a/.circleci/test_py3_fmri_spm_nested_multiproc_l1.sh b/.circleci/test_fmri_spm_nested_multiproc_l1.sh similarity index 100% rename from .circleci/test_py3_fmri_spm_nested_multiproc_l1.sh rename to .circleci/test_fmri_spm_nested_multiproc_l1.sh diff --git a/.circleci/test_py2_fmri_spm_nested_multiproc_l2.sh b/.circleci/test_fmri_spm_nested_multiproc_l2.sh similarity index 75% rename from .circleci/test_py2_fmri_spm_nested_multiproc_l2.sh rename to .circleci/test_fmri_spm_nested_multiproc_l2.sh index 1aa2a4f8c5..c0926be148 100644 --- a/.circleci/test_py2_fmri_spm_nested_multiproc_l2.sh +++ b/.circleci/test_fmri_spm_nested_multiproc_l2.sh @@ -1,3 +1,3 @@ #!/bin/bash -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work -e NIPYPE_NUMBER_OF_CPUS=4 -e NIPYPE_RESOURCE_MONITOR=1 "${DOCKER_IMAGE}:py27" /usr/bin/run_examples.sh fmri_spm_nested MultiProc /data/examples/ l2pipeline +docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work -e NIPYPE_NUMBER_OF_CPUS=4 -e NIPYPE_RESOURCE_MONITOR=1 "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_spm_nested MultiProc /data/examples/ l2pipeline diff --git a/.circleci/test_py2_pytest.sh b/.circleci/test_py2_pytest.sh deleted file mode 100644 index df1489d240..0000000000 --- a/.circleci/test_py2_pytest.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work -e CI_SKIP_TEST=1 -e NIPYPE_RESOURCE_MONITOR=1 "${DOCKER_IMAGE}:py27" /usr/bin/run_pytests.sh diff --git a/.circleci/test_py3_pytest.sh b/.circleci/test_pytest.sh similarity index 100% rename from .circleci/test_py3_pytest.sh rename to .circleci/test_pytest.sh From 4a84a0275eaf207b951ee309b84d1a8fdba002c9 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 25 Jul 2018 15:16:02 -0400 Subject: [PATCH 0003/1665] RF: Remove builtins --- nipype/__init__.py | 3 - nipype/algorithms/confounds.py | 4 - nipype/algorithms/icc.py | 3 - nipype/algorithms/mesh.py | 4 - nipype/algorithms/metrics.py | 4 - nipype/algorithms/misc.py | 10 +- nipype/algorithms/modelgen.py | 4 - nipype/algorithms/rapidart.py | 4 - nipype/algorithms/stats.py | 2 - nipype/algorithms/tests/test_icc_anova.py | 1 - .../algorithms/tests/test_normalize_tpms.py | 1 - nipype/algorithms/tests/test_rapidart.py | 2 - nipype/caching/memory.py | 4 - nipype/info.py | 3 - nipype/interfaces/__init__.py | 2 - nipype/interfaces/afni/base.py | 7 +- nipype/interfaces/afni/preprocess.py | 4 - nipype/interfaces/afni/utils.py | 4 - nipype/interfaces/ants/base.py | 4 - nipype/interfaces/ants/legacy.py | 325 ------------------ nipype/interfaces/ants/registration.py | 3 - nipype/interfaces/ants/resampling.py | 3 - nipype/interfaces/ants/segmentation.py | 4 - .../ants/tests/test_spec_JointFusion.py | 2 - nipype/interfaces/base/core.py | 8 - nipype/interfaces/base/specs.py | 4 - nipype/interfaces/base/support.py | 4 - nipype/interfaces/base/tests/test_core.py | 5 - nipype/interfaces/base/tests/test_specs.py | 4 - nipype/interfaces/base/tests/test_support.py | 6 +- nipype/interfaces/base/traits_extension.py | 7 - nipype/interfaces/cmtk/cmtk.py | 4 - nipype/interfaces/cmtk/nx.py | 4 - nipype/interfaces/cmtk/parcellation.py | 4 - nipype/interfaces/dcm2nii.py | 3 - nipype/interfaces/diffusion_toolkit/base.py | 3 - nipype/interfaces/diffusion_toolkit/dti.py | 4 - nipype/interfaces/diffusion_toolkit/odf.py | 4 - nipype/interfaces/dipy/reconstruction.py | 6 - nipype/interfaces/dipy/simulate.py | 3 - nipype/interfaces/elastix/registration.py | 4 - nipype/interfaces/elastix/utils.py | 4 - nipype/interfaces/freesurfer/base.py | 4 - nipype/interfaces/freesurfer/preprocess.py | 4 - .../freesurfer/tests/test_preprocess.py | 1 - .../interfaces/freesurfer/tests/test_utils.py | 3 - nipype/interfaces/freesurfer/utils.py | 4 - nipype/interfaces/fsl/dti.py | 4 - nipype/interfaces/fsl/epi.py | 4 - nipype/interfaces/fsl/model.py | 4 - nipype/interfaces/fsl/preprocess.py | 4 - nipype/interfaces/fsl/tests/test_dti.py | 2 - nipype/interfaces/fsl/tests/test_maths.py | 3 - nipype/interfaces/fsl/tests/test_model.py | 3 - .../interfaces/fsl/tests/test_preprocess.py | 4 - nipype/interfaces/fsl/utils.py | 4 - nipype/interfaces/io.py | 4 - nipype/interfaces/matlab.py | 3 - nipype/interfaces/minc/base.py | 3 - nipype/interfaces/minc/minc.py | 4 - nipype/interfaces/mne/base.py | 4 - nipype/interfaces/niftyreg/base.py | 4 - nipype/interfaces/niftyreg/reg.py | 4 - nipype/interfaces/niftyreg/regutils.py | 4 - nipype/interfaces/niftyseg/label_fusion.py | 2 - nipype/interfaces/nipy/model.py | 4 - nipype/interfaces/nipy/preprocess.py | 4 - nipype/interfaces/nitime/analysis.py | 4 - nipype/interfaces/slicer/generate_classes.py | 3 - nipype/interfaces/spm/base.py | 4 - nipype/interfaces/spm/model.py | 4 - nipype/interfaces/spm/preprocess.py | 4 - nipype/interfaces/spm/tests/test_base.py | 3 - nipype/interfaces/tests/test_io.py | 4 - nipype/interfaces/utility/base.py | 7 - nipype/interfaces/utility/csv.py | 7 - nipype/interfaces/utility/wrappers.py | 9 - nipype/pipeline/engine/base.py | 4 - nipype/pipeline/engine/nodes.py | 7 - nipype/pipeline/engine/tests/test_engine.py | 5 - nipype/pipeline/engine/tests/test_join.py | 4 - nipype/pipeline/engine/tests/test_nodes.py | 2 - nipype/pipeline/engine/tests/test_utils.py | 4 - nipype/pipeline/engine/utils.py | 9 +- nipype/pipeline/engine/workflows.py | 8 - nipype/pipeline/plugins/base.py | 4 - nipype/pipeline/plugins/dagman.py | 4 - nipype/pipeline/plugins/ipython.py | 21 +- nipype/pipeline/plugins/oar.py | 4 - nipype/pipeline/plugins/pbs.py | 4 - nipype/pipeline/plugins/pbsgraph.py | 4 - nipype/pipeline/plugins/sge.py | 5 - nipype/pipeline/plugins/sgegraph.py | 4 - nipype/pipeline/plugins/slurm.py | 4 - nipype/pipeline/plugins/slurmgraph.py | 4 - .../pipeline/plugins/tests/test_callback.py | 3 - .../tests/test_legacymultiproc_nondaemon.py | 4 - nipype/pipeline/plugins/tools.py | 4 - nipype/pkg_info.py | 6 - nipype/scripts/utils.py | 2 +- nipype/sphinxext/plot_workflow.py | 4 +- nipype/testing/fixtures.py | 6 - nipype/testing/utils.py | 9 +- nipype/utils/config.py | 6 - nipype/utils/docparse.py | 4 - nipype/utils/draw_gantt_chart.py | 8 +- nipype/utils/filemanip.py | 17 +- nipype/utils/functions.py | 4 +- nipype/utils/logger.py | 4 - nipype/utils/matlabtools.py | 4 - nipype/utils/misc.py | 14 +- nipype/utils/nipype2boutiques.py | 3 - nipype/utils/nipype_cmd.py | 3 - nipype/utils/onetime.py | 4 - nipype/utils/profiler.py | 5 +- nipype/utils/provenance.py | 7 - nipype/utils/spm_docs.py | 6 +- nipype/utils/tests/test_cmd.py | 6 - nipype/utils/tests/test_config.py | 3 - nipype/utils/tests/test_filemanip.py | 3 - nipype/utils/tests/test_misc.py | 4 - nipype/utils/tests/test_nipype2boutiques.py | 3 - nipype/utils/tests/test_provenance.py | 5 - nipype/utils/tmpdirs.py | 2 +- .../dmri/connectivity/group_connectivity.py | 13 +- nipype/workflows/dmri/dipy/denoise.py | 1 - nipype/workflows/dmri/fsl/epi.py | 3 - nipype/workflows/dmri/fsl/utils.py | 4 - nipype/workflows/fmri/fsl/estimate.py | 1 - nipype/workflows/misc/utils.py | 4 - nipype/workflows/rsfmri/fsl/resting.py | 4 - .../workflows/smri/ants/ANTSBuildTemplate.py | 5 - .../ants/antsRegistrationBuildTemplate.py | 3 - nipype/workflows/smri/niftyreg/groupwise.py | 3 - 134 files changed, 26 insertions(+), 894 deletions(-) delete mode 100644 nipype/interfaces/ants/legacy.py diff --git a/nipype/__init__.py b/nipype/__init__.py index ad961e7df4..cf23c06656 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -1,9 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) - import os from distutils.version import LooseVersion diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index d0f9a5733a..9aa80913c1 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -4,10 +4,6 @@ ''' Algorithms to compute confounds in :abbr:`fMRI (functional MRI)` ''' -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range - import os import os.path as op diff --git a/nipype/algorithms/icc.py b/nipype/algorithms/icc.py index 5d5ec1c39f..ba7eb3bedc 100644 --- a/nipype/algorithms/icc.py +++ b/nipype/algorithms/icc.py @@ -1,7 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range import os import numpy as np from numpy import ones, kron, mean, eye, hstack, dot, tile diff --git a/nipype/algorithms/mesh.py b/nipype/algorithms/mesh.py index eeb2ddbb80..6d3c675c41 100644 --- a/nipype/algorithms/mesh.py +++ b/nipype/algorithms/mesh.py @@ -4,10 +4,6 @@ """ Miscellaneous algorithms for 2D contours and 3D triangularized meshes handling """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import zip, str, bytes - import os.path as op import numpy as np from numpy import linalg as nla diff --git a/nipype/algorithms/metrics.py b/nipype/algorithms/metrics.py index d9074c48d3..9e925c8d54 100644 --- a/nipype/algorithms/metrics.py +++ b/nipype/algorithms/metrics.py @@ -5,10 +5,6 @@ Image assessment algorithms. Typical overlap and error computation measures to evaluate results from other processing units. ''' -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import zip, range - import os import os.path as op diff --git a/nipype/algorithms/misc.py b/nipype/algorithms/misc.py index 97906c1d69..67bd677c6f 100644 --- a/nipype/algorithms/misc.py +++ b/nipype/algorithms/misc.py @@ -4,11 +4,6 @@ ''' Miscellaneous algorithms ''' -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str, zip, range, open -from future.utils import raise_from - import os import os.path as op @@ -794,9 +789,8 @@ def _run_interface(self, runtime): try: import pandas as pd except ImportError as e: - raise_from( - ImportError('This interface requires pandas ' - '(http://pandas.pydata.org/) to run.'), e) + raise ImportError('This interface requires pandas ' + '(http://pandas.pydata.org/) to run.') from e try: import lockfile as pl diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index 7741139201..7a6cf438c7 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -11,10 +11,6 @@ * SpecifyModel: allows specification of sparse and non-sparse models """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, str, bytes, int - from copy import deepcopy import os diff --git a/nipype/algorithms/rapidart.py b/nipype/algorithms/rapidart.py index a4d5b592c9..af93c3d8a9 100644 --- a/nipype/algorithms/rapidart.py +++ b/nipype/algorithms/rapidart.py @@ -12,10 +12,6 @@ * StimulusCorrelation: determines correlation between stimuli schedule and movement/intensity parameters """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open, range, str, bytes - import os from copy import deepcopy diff --git a/nipype/algorithms/stats.py b/nipype/algorithms/stats.py index 51a3bc9088..29a45f5844 100644 --- a/nipype/algorithms/stats.py +++ b/nipype/algorithms/stats.py @@ -4,8 +4,6 @@ """ Managing statistical maps """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import nibabel as nb import numpy as np diff --git a/nipype/algorithms/tests/test_icc_anova.py b/nipype/algorithms/tests/test_icc_anova.py index ef19b7f410..79ed312f40 100644 --- a/nipype/algorithms/tests/test_icc_anova.py +++ b/nipype/algorithms/tests/test_icc_anova.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import division import numpy as np from nipype.algorithms.icc import ICC_rep_anova diff --git a/nipype/algorithms/tests/test_normalize_tpms.py b/nipype/algorithms/tests/test_normalize_tpms.py index a65cc66770..31eb2b96dd 100644 --- a/nipype/algorithms/tests/test_normalize_tpms.py +++ b/nipype/algorithms/tests/test_normalize_tpms.py @@ -3,7 +3,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from builtins import range import os import pytest diff --git a/nipype/algorithms/tests/test_rapidart.py b/nipype/algorithms/tests/test_rapidart.py index 9c29648626..fbdce11819 100644 --- a/nipype/algorithms/tests/test_rapidart.py +++ b/nipype/algorithms/tests/test_rapidart.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import division - import numpy as np import numpy.testing as npt diff --git a/nipype/caching/memory.py b/nipype/caching/memory.py index 9fcf694d4b..53bf92cce7 100644 --- a/nipype/caching/memory.py +++ b/nipype/caching/memory.py @@ -3,10 +3,6 @@ Using nipype with persistence and lazy recomputation but without explicit name-steps pipeline: getting back scope in command-line based programming. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import object, open - import os import hashlib import pickle diff --git a/nipype/info.py b/nipype/info.py index 6b60da6603..f1d72f18e9 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -2,9 +2,6 @@ settings in setup.py, the nipy top-level docstring, and for building the docs. In setup.py in particular, we exec this file, so it cannot import nipy """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) - import sys # nipype version information. An empty version_extra corresponds to a diff --git a/nipype/interfaces/__init__.py b/nipype/interfaces/__init__.py index a19efa64e5..7ad7166476 100644 --- a/nipype/interfaces/__init__.py +++ b/nipype/interfaces/__init__.py @@ -6,8 +6,6 @@ Requires Packages to be installed """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) __docformat__ = 'restructuredtext' from .io import DataGrabber, DataSink, SelectFiles, BIDSDataGrabber diff --git a/nipype/interfaces/afni/base.py b/nipype/interfaces/afni/base.py index d4b8e474ff..d7b56d095a 100644 --- a/nipype/interfaces/afni/base.py +++ b/nipype/interfaces/afni/base.py @@ -2,11 +2,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Provide interface to AFNI commands.""" -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import object, str -from future.utils import raise_from - import os from sys import platform from distutils import spawn @@ -63,7 +58,7 @@ def output_type_to_ext(cls, outputtype): return cls.ftypes[outputtype] except KeyError as e: msg = 'Invalid AFNIOUTPUTTYPE: ', outputtype - raise_from(KeyError(msg), e) + raise KeyError(msg) from e @classmethod def outputtype(cls): diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 4e56f7578b..c3b81187d6 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -3,10 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """AFNI preprocessing interfaces """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - import os import os.path as op diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py index ce32d183ef..a799935bff 100644 --- a/nipype/interfaces/afni/utils.py +++ b/nipype/interfaces/afni/utils.py @@ -7,10 +7,6 @@ -------- See the docstrings of the individual classes for examples. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str, bytes - import os import os.path as op import re diff --git a/nipype/interfaces/ants/base.py b/nipype/interfaces/ants/base.py index 34b64a0ec1..e27cfc0920 100644 --- a/nipype/interfaces/ants/base.py +++ b/nipype/interfaces/ants/base.py @@ -2,10 +2,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The ants module provides basic functions for interfacing with ANTS tools.""" -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str - import os # Local imports diff --git a/nipype/interfaces/ants/legacy.py b/nipype/interfaces/ants/legacy.py deleted file mode 100644 index 40f2def728..0000000000 --- a/nipype/interfaces/ants/legacy.py +++ /dev/null @@ -1,325 +0,0 @@ -# -*- coding: utf-8 -*- -# NOTE: This implementation has been superceeded buy the antsApplyTransform -# implmeentation that more closely follows the strucutre and capabilities -# of the antsApplyTransform program. This implementation is here -# for backwards compatibility. -"""ANTS Apply Transforms interface -""" - -from builtins import range - -import os -from glob import glob - -from .base import ANTSCommand, ANTSCommandInputSpec -from ..base import TraitedSpec, File, traits, isdefined, OutputMultiPath -from ...utils.filemanip import split_filename - - -class antsIntroductionInputSpec(ANTSCommandInputSpec): - dimension = traits.Enum( - 3, - 2, - argstr='-d %d', - usedefault=True, - desc='image dimension (2 or 3)', - position=1) - reference_image = File( - exists=True, - argstr='-r %s', - desc='template file to warp to', - mandatory=True, - copyfile=True) - input_image = File( - exists=True, - argstr='-i %s', - desc='input image to warp to template', - mandatory=True, - copyfile=False) - force_proceed = traits.Bool( - argstr='-f 1', - desc=('force script to proceed even if headers ' - 'may be incompatible')) - inverse_warp_template_labels = traits.Bool( - argstr='-l', - desc=('Applies inverse warp to the template labels ' - 'to estimate label positions in target space (use ' - 'for template-based segmentation)')) - max_iterations = traits.List( - traits.Int, - argstr='-m %s', - sep='x', - desc=('maximum number of iterations (must be ' - 'list of integers in the form [J,K,L...]: ' - 'J = coarsest resolution iterations, K = ' - 'middle resolution interations, L = fine ' - 'resolution iterations')) - bias_field_correction = traits.Bool( - argstr='-n 1', - desc=('Applies bias field correction to moving ' - 'image')) - similarity_metric = traits.Enum( - 'PR', - 'CC', - 'MI', - 'MSQ', - argstr='-s %s', - desc=('Type of similartiy metric used for registration ' - '(CC = cross correlation, MI = mutual information, ' - 'PR = probability mapping, MSQ = mean square difference)')) - transformation_model = traits.Enum( - 'GR', - 'EL', - 'SY', - 'S2', - 'EX', - 'DD', - 'RI', - 'RA', - argstr='-t %s', - usedefault=True, - desc=('Type of transofmration model used for registration ' - '(EL = elastic transformation model, SY = SyN with time, ' - 'arbitrary number of time points, S2 = SyN with time ' - 'optimized for 2 time points, GR = greedy SyN, EX = ' - 'exponential, DD = diffeomorphic demons style exponential ' - 'mapping, RI = purely rigid, RA = affine rigid')) - out_prefix = traits.Str( - 'ants_', - argstr='-o %s', - usedefault=True, - desc=('Prefix that is prepended to all output ' - 'files (default = ants_)')) - quality_check = traits.Bool( - argstr='-q 1', desc='Perform a quality check of the result') - - -class antsIntroductionOutputSpec(TraitedSpec): - affine_transformation = File( - exists=True, desc='affine (prefix_Affine.txt)') - warp_field = File(exists=True, desc='warp field (prefix_Warp.nii)') - inverse_warp_field = File( - exists=True, desc='inverse warp field (prefix_InverseWarp.nii)') - input_file = File(exists=True, desc='input image (prefix_repaired.nii)') - output_file = File(exists=True, desc='output image (prefix_deformed.nii)') - - -class antsIntroduction(ANTSCommand): - """Uses ANTS to generate matrices to warp data from one space to another. - - Examples - -------- - - >>> from nipype.interfaces.ants.legacy import antsIntroduction - >>> warp = antsIntroduction() - >>> warp.inputs.reference_image = 'Template_6.nii' - >>> warp.inputs.input_image = 'structural.nii' - >>> warp.inputs.max_iterations = [30,90,20] - >>> warp.cmdline - 'antsIntroduction.sh -d 3 -i structural.nii -m 30x90x20 -o ants_ -r Template_6.nii -t GR' - - """ - - _cmd = 'antsIntroduction.sh' - input_spec = antsIntroductionInputSpec - output_spec = antsIntroductionOutputSpec - - def _list_outputs(self): - outputs = self._outputs().get() - transmodel = self.inputs.transformation_model - - # When transform is set as 'RI'/'RA', wrap fields should not be expected - # The default transformation is GR, which outputs the wrap fields - if not isdefined(transmodel) or (isdefined(transmodel) - and transmodel not in ['RI', 'RA']): - outputs['warp_field'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'Warp.nii.gz') - outputs['inverse_warp_field'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'InverseWarp.nii.gz') - - outputs['affine_transformation'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'Affine.txt') - outputs['input_file'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'repaired.nii.gz') - outputs['output_file'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'deformed.nii.gz') - - return outputs - - -# How do we make a pass through so that GenWarpFields is just an alias for antsIntroduction ? - - -class GenWarpFields(antsIntroduction): - pass - - -class buildtemplateparallelInputSpec(ANTSCommandInputSpec): - dimension = traits.Enum( - 3, - 2, - 4, - argstr='-d %d', - usedefault=True, - desc='image dimension (2, 3 or 4)', - position=1) - out_prefix = traits.Str( - 'antsTMPL_', - argstr='-o %s', - usedefault=True, - desc=('Prefix that is prepended to all output ' - 'files (default = antsTMPL_)')) - in_files = traits.List( - File(exists=True), - mandatory=True, - desc='list of images to generate template from', - argstr='%s', - position=-1) - parallelization = traits.Enum( - 0, - 1, - 2, - argstr='-c %d', - usedefault=True, - desc=('control for parallel processing (0 = ' - 'serial, 1 = use PBS, 2 = use PEXEC, 3 = ' - 'use Apple XGrid')) - gradient_step_size = traits.Float( - argstr='-g %f', - desc=('smaller magnitude results in ' - 'more cautious steps (default = ' - '.25)')) - iteration_limit = traits.Int( - 4, - argstr='-i %d', - usedefault=True, - desc='iterations of template construction') - num_cores = traits.Int( - argstr='-j %d', - requires=['parallelization'], - desc=('Requires parallelization = 2 (PEXEC). ' - 'Sets number of cpu cores to use')) - max_iterations = traits.List( - traits.Int, - argstr='-m %s', - sep='x', - desc=('maximum number of iterations (must be ' - 'list of integers in the form [J,K,L...]: ' - 'J = coarsest resolution iterations, K = ' - 'middle resolution interations, L = fine ' - 'resolution iterations')) - bias_field_correction = traits.Bool( - argstr='-n 1', - desc=('Applies bias field correction to moving ' - 'image')) - rigid_body_registration = traits.Bool( - argstr='-r 1', - desc=('registers inputs before creating template ' - '(useful if no initial template available)')) - similarity_metric = traits.Enum( - 'PR', - 'CC', - 'MI', - 'MSQ', - argstr='-s %s', - desc=('Type of similartiy metric used for registration ' - '(CC = cross correlation, MI = mutual information, ' - 'PR = probability mapping, MSQ = mean square difference)')) - transformation_model = traits.Enum( - 'GR', - 'EL', - 'SY', - 'S2', - 'EX', - 'DD', - argstr='-t %s', - usedefault=True, - desc=('Type of transofmration model used for registration ' - '(EL = elastic transformation model, SY = SyN with time, ' - 'arbitrary number of time points, S2 = SyN with time ' - 'optimized for 2 time points, GR = greedy SyN, EX = ' - 'exponential, DD = diffeomorphic demons style exponential ' - 'mapping')) - use_first_as_target = traits.Bool( - desc=('uses first volume as target of ' - 'all inputs. When not used, an ' - 'unbiased average image is used ' - 'to start.')) - - -class buildtemplateparallelOutputSpec(TraitedSpec): - final_template_file = File(exists=True, desc='final ANTS template') - template_files = OutputMultiPath( - File(exists=True), desc='Templates from different stages of iteration') - subject_outfiles = OutputMultiPath( - File(exists=True), - desc=('Outputs for each input image. Includes warp ' - 'field, inverse warp, Affine, original image ' - '(repaired) and warped image (deformed)')) - - -class buildtemplateparallel(ANTSCommand): - """Generate a optimal average template - - .. warning:: - - This can take a VERY long time to complete - - Examples - -------- - - >>> from nipype.interfaces.ants.legacy import buildtemplateparallel - >>> tmpl = buildtemplateparallel() - >>> tmpl.inputs.in_files = ['T1.nii', 'structural.nii'] - >>> tmpl.inputs.max_iterations = [30, 90, 20] - >>> tmpl.cmdline - 'buildtemplateparallel.sh -d 3 -i 4 -m 30x90x20 -o antsTMPL_ -c 0 -t GR T1.nii structural.nii' - - """ - - _cmd = 'buildtemplateparallel.sh' - input_spec = buildtemplateparallelInputSpec - output_spec = buildtemplateparallelOutputSpec - - def _format_arg(self, opt, spec, val): - if opt == 'num_cores': - if self.inputs.parallelization == 2: - return '-j ' + str(val) - else: - return '' - if opt == 'in_files': - if self.inputs.use_first_as_target: - start = '-z ' - else: - start = '' - return start + ' '.join(name for name in val) - return super(buildtemplateparallel, self)._format_arg(opt, spec, val) - - def _list_outputs(self): - outputs = self._outputs().get() - outputs['template_files'] = [] - for i in range(len(glob(os.path.realpath('*iteration*')))): - temp = os.path.realpath('%s_iteration_%d/%stemplate.nii.gz' % - (self.inputs.transformation_model, i, - self.inputs.out_prefix)) - os.rename( - temp, - os.path.realpath('%s_iteration_%d/%stemplate_i%d.nii.gz' % - (self.inputs.transformation_model, i, - self.inputs.out_prefix, i))) - file_ = ('%s_iteration_%d/%stemplate_i%d.nii.gz' % - (self.inputs.transformation_model, i, - self.inputs.out_prefix, i)) - - outputs['template_files'].append(os.path.realpath(file_)) - outputs['final_template_file'] = \ - os.path.realpath('%stemplate.nii.gz' % - self.inputs.out_prefix) - outputs['subject_outfiles'] = [] - for filename in self.inputs.in_files: - _, base, _ = split_filename(filename) - temp = glob( - os.path.realpath('%s%s*' % (self.inputs.out_prefix, base))) - for file_ in temp: - outputs['subject_outfiles'].append(file_) - return outputs diff --git a/nipype/interfaces/ants/registration.py b/nipype/interfaces/ants/registration.py index 7190816cf3..6421e271b9 100644 --- a/nipype/interfaces/ants/registration.py +++ b/nipype/interfaces/ants/registration.py @@ -2,9 +2,6 @@ """The ants module provides basic functions for interfacing with ants functions. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, str import os from ...utils.filemanip import ensure_list diff --git a/nipype/interfaces/ants/resampling.py b/nipype/interfaces/ants/resampling.py index e26a48ed6a..94bccbdf7d 100644 --- a/nipype/interfaces/ants/resampling.py +++ b/nipype/interfaces/ants/resampling.py @@ -1,9 +1,6 @@ # -*- coding: utf-8 -*- """ANTS Apply Transforms interface """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, str import os from .base import ANTSCommand, ANTSCommandInputSpec diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index 6ccfb28ba2..cb679faabd 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- """The ants module provides basic functions for interfacing with ants functions. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, str - import os from ...external.due import BibTeX from ...utils.filemanip import split_filename, copyfile, which diff --git a/nipype/interfaces/ants/tests/test_spec_JointFusion.py b/nipype/interfaces/ants/tests/test_spec_JointFusion.py index b2ca69926a..a1dc03cc40 100644 --- a/nipype/interfaces/ants/tests/test_spec_JointFusion.py +++ b/nipype/interfaces/ants/tests/test_spec_JointFusion.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import division -from builtins import range from nipype.testing import example_data from nipype.interfaces.base import InputMultiPath from traits.trait_errors import TraitError diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 6069c12041..c4f4840dd5 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -12,11 +12,6 @@ interfaces are found in the ``specs`` module. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from builtins import object, open, str, bytes - import gc from copy import deepcopy from datetime import datetime as dt @@ -46,9 +41,6 @@ StdOutCommandLineInputSpec, MpiCommandLineInputSpec) from .support import (Bunch, Stream, InterfaceResult, NipypeInterfaceError) -from future import standard_library -standard_library.install_aliases() - iflogger = logging.getLogger('nipype.interface') PY35 = sys.version_info >= (3, 5) diff --git a/nipype/interfaces/base/specs.py b/nipype/interfaces/base/specs.py index c07bdc4a71..90ed1715a5 100644 --- a/nipype/interfaces/base/specs.py +++ b/nipype/interfaces/base/specs.py @@ -9,13 +9,9 @@ Define the API for the I/O of interfaces """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) - import os from copy import deepcopy from warnings import warn -from builtins import str, bytes from packaging.version import Version from ...utils.filemanip import md5, hash_infile, hash_timestamp, to_str diff --git a/nipype/interfaces/base/support.py b/nipype/interfaces/base/support.py index 87252fd6d3..b827488381 100644 --- a/nipype/interfaces/base/support.py +++ b/nipype/interfaces/base/support.py @@ -7,10 +7,6 @@ ...................................................... """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, object, str - import os from copy import deepcopy diff --git a/nipype/interfaces/base/tests/test_core.py b/nipype/interfaces/base/tests/test_core.py index fe1b3a227e..f4dd0ca3d3 100644 --- a/nipype/interfaces/base/tests/test_core.py +++ b/nipype/interfaces/base/tests/test_core.py @@ -1,9 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import print_function, unicode_literals -from future import standard_library -from builtins import open import os import simplejson as json @@ -13,8 +10,6 @@ from ....testing import example_data from ... import base as nib -standard_library.install_aliases() - def check_dict(ref_dict, tst_dict): """Compare dictionaries of inputs and and those loaded from json files""" diff --git a/nipype/interfaces/base/tests/test_specs.py b/nipype/interfaces/base/tests/test_specs.py index 2586fc7b6a..b8a346169c 100644 --- a/nipype/interfaces/base/tests/test_specs.py +++ b/nipype/interfaces/base/tests/test_specs.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import print_function, unicode_literals -from future import standard_library import os import warnings @@ -12,8 +10,6 @@ from ... import base as nib from ...base import traits, Undefined -standard_library.install_aliases() - @pytest.fixture(scope="module") def setup_file(request, tmpdir_factory): diff --git a/nipype/interfaces/base/tests/test_support.py b/nipype/interfaces/base/tests/test_support.py index e6db69a458..fbd6dcc209 100644 --- a/nipype/interfaces/base/tests/test_support.py +++ b/nipype/interfaces/base/tests/test_support.py @@ -1,18 +1,14 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import print_function, unicode_literals import os import pytest -from builtins import open -from future import standard_library + from pkg_resources import resource_filename as pkgrf from ....utils.filemanip import md5 from ... import base as nib -standard_library.install_aliases() - @pytest.mark.parametrize("args", [{}, {'a': 1, 'b': [2, 3]}]) def test_bunch(args): diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index a98ec020c8..e6db35cca0 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -20,10 +20,6 @@ (usually by Robert Kern). """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from builtins import str, bytes import os import collections @@ -36,13 +32,10 @@ from traits.api import BaseUnicode from traits.api import Unicode -from future import standard_library if traits_version < '3.7.0': raise ImportError('Traits version 3.7.0 or higher must be installed') -standard_library.install_aliases() - class Str(Unicode): """Replacement for the default traits.Str based in bytes""" diff --git a/nipype/interfaces/cmtk/cmtk.py b/nipype/interfaces/cmtk/cmtk.py index c26220c6b0..296da6db65 100644 --- a/nipype/interfaces/cmtk/cmtk.py +++ b/nipype/interfaces/cmtk/cmtk.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, open - import pickle import os.path as op diff --git a/nipype/interfaces/cmtk/nx.py b/nipype/interfaces/cmtk/nx.py index 1b58494f2c..7fc36a9e4d 100644 --- a/nipype/interfaces/cmtk/nx.py +++ b/nipype/interfaces/cmtk/nx.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str, open, range - import os.path as op import pickle diff --git a/nipype/interfaces/cmtk/parcellation.py b/nipype/interfaces/cmtk/parcellation.py index 0e25e8eb10..a80c4e895f 100644 --- a/nipype/interfaces/cmtk/parcellation.py +++ b/nipype/interfaces/cmtk/parcellation.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range - import os import os.path as op import shutil diff --git a/nipype/interfaces/dcm2nii.py b/nipype/interfaces/dcm2nii.py index db0c0ec4cf..d65dba811f 100644 --- a/nipype/interfaces/dcm2nii.py +++ b/nipype/interfaces/dcm2nii.py @@ -1,9 +1,6 @@ # -*- coding: utf-8 -*- """The dcm2nii module provides basic functions for dicom conversion """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str, open import os import re from copy import deepcopy diff --git a/nipype/interfaces/diffusion_toolkit/base.py b/nipype/interfaces/diffusion_toolkit/base.py index c8e3a17c61..bac8e781d1 100644 --- a/nipype/interfaces/diffusion_toolkit/base.py +++ b/nipype/interfaces/diffusion_toolkit/base.py @@ -13,9 +13,6 @@ See the docstrings for the individual classes for 'working' examples. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import object import re from ..base import CommandLine diff --git a/nipype/interfaces/diffusion_toolkit/dti.py b/nipype/interfaces/diffusion_toolkit/dti.py index 570ae55df5..1b90b55e91 100644 --- a/nipype/interfaces/diffusion_toolkit/dti.py +++ b/nipype/interfaces/diffusion_toolkit/dti.py @@ -3,10 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Provides interfaces to various commands provided by diffusion toolkit """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, open - import os import re diff --git a/nipype/interfaces/diffusion_toolkit/odf.py b/nipype/interfaces/diffusion_toolkit/odf.py index cf4eb683a2..9c2d6d2505 100644 --- a/nipype/interfaces/diffusion_toolkit/odf.py +++ b/nipype/interfaces/diffusion_toolkit/odf.py @@ -3,10 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Provides interfaces to various commands provided by diffusion toolkit """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range - import os import re diff --git a/nipype/interfaces/dipy/reconstruction.py b/nipype/interfaces/dipy/reconstruction.py index 26a7014f79..3921a28088 100644 --- a/nipype/interfaces/dipy/reconstruction.py +++ b/nipype/interfaces/dipy/reconstruction.py @@ -3,12 +3,6 @@ Interfaces to the reconstruction algorithms in dipy """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from future import standard_library -standard_library.install_aliases() -from builtins import str, open - import os.path as op import numpy as np diff --git a/nipype/interfaces/dipy/simulate.py b/nipype/interfaces/dipy/simulate.py index 4a995c8fa9..9b3c12536f 100644 --- a/nipype/interfaces/dipy/simulate.py +++ b/nipype/interfaces/dipy/simulate.py @@ -1,9 +1,6 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) from multiprocessing import (Pool, cpu_count) import os.path as op -from builtins import range import numpy as np import nibabel as nb diff --git a/nipype/interfaces/elastix/registration.py b/nipype/interfaces/elastix/registration.py index 7f2565d58c..47b6d98233 100644 --- a/nipype/interfaces/elastix/registration.py +++ b/nipype/interfaces/elastix/registration.py @@ -7,10 +7,6 @@ displacement maps to images and points. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - import os.path as op import re diff --git a/nipype/interfaces/elastix/utils.py b/nipype/interfaces/elastix/utils.py index 505115b05a..52fd7023a2 100644 --- a/nipype/interfaces/elastix/utils.py +++ b/nipype/interfaces/elastix/utils.py @@ -7,10 +7,6 @@ transform files (to configure warpings) """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - import os.path as op from ... import logging diff --git a/nipype/interfaces/freesurfer/base.py b/nipype/interfaces/freesurfer/base.py index cda527a5ea..53d3b512c5 100644 --- a/nipype/interfaces/freesurfer/base.py +++ b/nipype/interfaces/freesurfer/base.py @@ -14,10 +14,6 @@ See the docstrings for the individual classes for 'working' examples. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open, object, str - import os from ... import LooseVersion diff --git a/nipype/interfaces/freesurfer/preprocess.py b/nipype/interfaces/freesurfer/preprocess.py index f99696e02a..7b2610d595 100644 --- a/nipype/interfaces/freesurfer/preprocess.py +++ b/nipype/interfaces/freesurfer/preprocess.py @@ -3,10 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Provides interfaces to various commands provided by FreeSurfer """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range - import os import os.path as op from glob import glob diff --git a/nipype/interfaces/freesurfer/tests/test_preprocess.py b/nipype/interfaces/freesurfer/tests/test_preprocess.py index f9fc09515a..b0a6db293a 100644 --- a/nipype/interfaces/freesurfer/tests/test_preprocess.py +++ b/nipype/interfaces/freesurfer/tests/test_preprocess.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from builtins import str import os import pytest diff --git a/nipype/interfaces/freesurfer/tests/test_utils.py b/nipype/interfaces/freesurfer/tests/test_utils.py index f3ff8fd5ee..203b2483f8 100644 --- a/nipype/interfaces/freesurfer/tests/test_utils.py +++ b/nipype/interfaces/freesurfer/tests/test_utils.py @@ -1,9 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open import os import os.path as op import pytest diff --git a/nipype/interfaces/freesurfer/utils.py b/nipype/interfaces/freesurfer/utils.py index b258d41720..a1dcec8ae1 100644 --- a/nipype/interfaces/freesurfer/utils.py +++ b/nipype/interfaces/freesurfer/utils.py @@ -3,10 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Interfaces to assorted Freesurfer utility programs. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str, open - import os import re import shutil diff --git a/nipype/interfaces/fsl/dti.py b/nipype/interfaces/fsl/dti.py index d8812cec6c..ba4f3f9887 100644 --- a/nipype/interfaces/fsl/dti.py +++ b/nipype/interfaces/fsl/dti.py @@ -5,10 +5,6 @@ `_ command line tools. This was written to work with FSL version 4.1.4. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, open - import os import warnings diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index e45a94ed2c..826bbd6756 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -5,10 +5,6 @@ `_ command line tools. This was written to work with FSL version 5.0.4. """ -from __future__ import print_function, division, unicode_literals, \ - absolute_import -from builtins import str - import os import numpy as np import nibabel as nb diff --git a/nipype/interfaces/fsl/model.py b/nipype/interfaces/fsl/model.py index 113f785120..a52a631ebe 100644 --- a/nipype/interfaces/fsl/model.py +++ b/nipype/interfaces/fsl/model.py @@ -5,10 +5,6 @@ `_ command line tools. This was written to work with FSL version 4.1.4. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, open - import os from glob import glob from shutil import rmtree diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py index dd2c969945..fc1e1a005c 100644 --- a/nipype/interfaces/fsl/preprocess.py +++ b/nipype/interfaces/fsl/preprocess.py @@ -5,10 +5,6 @@ `_ command line tools. This was written to work with FSL version 4.1.4. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, open - import os import os.path as op from warnings import warn diff --git a/nipype/interfaces/fsl/tests/test_dti.py b/nipype/interfaces/fsl/tests/test_dti.py index cd76766dd8..19d807d217 100644 --- a/nipype/interfaces/fsl/tests/test_dti.py +++ b/nipype/interfaces/fsl/tests/test_dti.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from builtins import open, range import os import nipype.interfaces.fsl.dti as fsl diff --git a/nipype/interfaces/fsl/tests/test_maths.py b/nipype/interfaces/fsl/tests/test_maths.py index b8aa41dc6b..7760c6dbe4 100644 --- a/nipype/interfaces/fsl/tests/test_maths.py +++ b/nipype/interfaces/fsl/tests/test_maths.py @@ -1,9 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import division -from __future__ import unicode_literals -from builtins import open import os import numpy as np diff --git a/nipype/interfaces/fsl/tests/test_model.py b/nipype/interfaces/fsl/tests/test_model.py index 8b8d0b7b40..c5c2f5bf35 100644 --- a/nipype/interfaces/fsl/tests/test_model.py +++ b/nipype/interfaces/fsl/tests/test_model.py @@ -1,9 +1,6 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from builtins import open - import os import pytest diff --git a/nipype/interfaces/fsl/tests/test_preprocess.py b/nipype/interfaces/fsl/tests/test_preprocess.py index 4b387201cf..5caffae1b1 100644 --- a/nipype/interfaces/fsl/tests/test_preprocess.py +++ b/nipype/interfaces/fsl/tests/test_preprocess.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals -from builtins import str # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from builtins import open - import os from copy import deepcopy diff --git a/nipype/interfaces/fsl/utils.py b/nipype/interfaces/fsl/utils.py index e237124bb3..6d49047f44 100644 --- a/nipype/interfaces/fsl/utils.py +++ b/nipype/interfaces/fsl/utils.py @@ -9,10 +9,6 @@ -------- See the docstrings of the individual classes for examples. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import map, range - import os import os.path as op import re diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 7a89675e8d..044b809aec 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -11,10 +11,6 @@ To come : XNATSink """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import object, zip, filter, range, open, str - import glob import fnmatch import string diff --git a/nipype/interfaces/matlab.py b/nipype/interfaces/matlab.py index fed7bfeb57..71fc7ab0e5 100644 --- a/nipype/interfaces/matlab.py +++ b/nipype/interfaces/matlab.py @@ -2,9 +2,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ General matlab interface code """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open import os from .. import config diff --git a/nipype/interfaces/minc/base.py b/nipype/interfaces/minc/base.py index 67b7938176..9436c2c3d1 100644 --- a/nipype/interfaces/minc/base.py +++ b/nipype/interfaces/minc/base.py @@ -8,9 +8,6 @@ Author: Carlo Hamalainen http://carlo-hamalainen.net """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import object import os import os.path import warnings diff --git a/nipype/interfaces/minc/minc.py b/nipype/interfaces/minc/minc.py index 8ac8babe52..5b3dc144dd 100644 --- a/nipype/interfaces/minc/minc.py +++ b/nipype/interfaces/minc/minc.py @@ -8,10 +8,6 @@ Author: Carlo Hamalainen http://carlo-hamalainen.net """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - import glob import os import os.path diff --git a/nipype/interfaces/mne/base.py b/nipype/interfaces/mne/base.py index 7f53071372..7ffc95b08a 100644 --- a/nipype/interfaces/mne/base.py +++ b/nipype/interfaces/mne/base.py @@ -1,8 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str, bytes - import os.path as op import glob diff --git a/nipype/interfaces/niftyreg/base.py b/nipype/interfaces/niftyreg/base.py index bd8a280aa5..0b1e0c514a 100644 --- a/nipype/interfaces/niftyreg/base.py +++ b/nipype/interfaces/niftyreg/base.py @@ -15,10 +15,6 @@ See the docstrings of the individual classes for examples. """ - -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import property, super from distutils.version import StrictVersion import os diff --git a/nipype/interfaces/niftyreg/reg.py b/nipype/interfaces/niftyreg/reg.py index f36752b872..26985a3e58 100644 --- a/nipype/interfaces/niftyreg/reg.py +++ b/nipype/interfaces/niftyreg/reg.py @@ -7,10 +7,6 @@ The interfaces were written to work with niftyreg version 1.5.10 """ - -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import staticmethod import os from ..base import TraitedSpec, File, traits, isdefined diff --git a/nipype/interfaces/niftyreg/regutils.py b/nipype/interfaces/niftyreg/regutils.py index c90aa53bed..2d08a7119d 100644 --- a/nipype/interfaces/niftyreg/regutils.py +++ b/nipype/interfaces/niftyreg/regutils.py @@ -6,10 +6,6 @@ The interfaces were written to work with niftyreg version 1.5.10 """ - -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import len, open, property, super import os from ..base import TraitedSpec, File, traits, isdefined diff --git a/nipype/interfaces/niftyseg/label_fusion.py b/nipype/interfaces/niftyseg/label_fusion.py index 1b0237d37c..5185b64f68 100644 --- a/nipype/interfaces/niftyseg/label_fusion.py +++ b/nipype/interfaces/niftyseg/label_fusion.py @@ -4,8 +4,6 @@ The fusion module provides higher-level interfaces to some of the operations that can be performed with the seg_LabFusion command-line program. """ - -from builtins import str import os import warnings diff --git a/nipype/interfaces/nipy/model.py b/nipype/interfaces/nipy/model.py index 8746728990..3d0f9ae19c 100644 --- a/nipype/interfaces/nipy/model.py +++ b/nipype/interfaces/nipy/model.py @@ -1,8 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, str, bytes - import os import nibabel as nb diff --git a/nipype/interfaces/nipy/preprocess.py b/nipype/interfaces/nipy/preprocess.py index 252463e3f7..0db8eb522a 100644 --- a/nipype/interfaces/nipy/preprocess.py +++ b/nipype/interfaces/nipy/preprocess.py @@ -1,8 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - import os import nibabel as nb diff --git a/nipype/interfaces/nitime/analysis.py b/nipype/interfaces/nitime/analysis.py index 6e8ff17d0f..7f393efadf 100644 --- a/nipype/interfaces/nitime/analysis.py +++ b/nipype/interfaces/nitime/analysis.py @@ -10,10 +10,6 @@ - nitime.viz.drawmatrix_channels """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import zip, object, open - import numpy as np import tempfile diff --git a/nipype/interfaces/slicer/generate_classes.py b/nipype/interfaces/slicer/generate_classes.py index 6fe3ae927f..850b561c47 100644 --- a/nipype/interfaces/slicer/generate_classes.py +++ b/nipype/interfaces/slicer/generate_classes.py @@ -3,9 +3,6 @@ modules are selected from the hardcoded list below and generated code is placed in the cli_modules.py file (and imported in __init__.py). For this to work correctly you must have your CLI executabes in $PATH""" -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str, bytes, open import xml.dom.minidom import subprocess import os diff --git a/nipype/interfaces/spm/base.py b/nipype/interfaces/spm/base.py index 214a6e7a2f..064d5f8cad 100644 --- a/nipype/interfaces/spm/base.py +++ b/nipype/interfaces/spm/base.py @@ -14,10 +14,6 @@ spm.SPMCommand().version """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, object, str, bytes - # Standard library imports import os from copy import deepcopy diff --git a/nipype/interfaces/spm/model.py b/nipype/interfaces/spm/model.py index 3e26ab6e2a..78179f3c6b 100644 --- a/nipype/interfaces/spm/model.py +++ b/nipype/interfaces/spm/model.py @@ -4,10 +4,6 @@ """The spm module provides basic functions for interfacing with matlab and spm to access spm tools. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str, bytes - # Standard library imports import os from glob import glob diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index cdf7a6e0e7..9aba34c760 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -3,10 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """SPM wrappers for preprocessing data """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range - import os from copy import deepcopy diff --git a/nipype/interfaces/spm/tests/test_base.py b/nipype/interfaces/spm/tests/test_base.py index a8a23e8def..c59cd6b234 100644 --- a/nipype/interfaces/spm/tests/test_base.py +++ b/nipype/interfaces/spm/tests/test_base.py @@ -1,9 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import unicode_literals -from builtins import str, bytes - import os import numpy as np diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index abff491f36..e89aac43d9 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -1,9 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import print_function, unicode_literals -from builtins import str, zip, range, open -from future import standard_library import os import copy import simplejson @@ -65,7 +62,6 @@ no_local_ssh = True # Check for fakes3 -standard_library.install_aliases() from subprocess import check_call, CalledProcessError try: ret_code = check_call(['which', 'fakes3'], stdout=open(os.devnull, 'wb')) diff --git a/nipype/interfaces/utility/base.py b/nipype/interfaces/utility/base.py index f2da6cf2a6..4ccd1f2f0e 100644 --- a/nipype/interfaces/utility/base.py +++ b/nipype/interfaces/utility/base.py @@ -6,13 +6,6 @@ >>> tmp = getfixture('tmpdir') >>> old = tmp.chdir() """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range - -from future import standard_library -standard_library.install_aliases() - import os import re import numpy as np diff --git a/nipype/interfaces/utility/csv.py b/nipype/interfaces/utility/csv.py index 16c377e3b5..d22b146d74 100644 --- a/nipype/interfaces/utility/csv.py +++ b/nipype/interfaces/utility/csv.py @@ -3,13 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """CSV Handling utilities """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import zip, range, str, open - -from future import standard_library -standard_library.install_aliases() - from ..base import (traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface) from ..io import add_traits diff --git a/nipype/interfaces/utility/wrappers.py b/nipype/interfaces/utility/wrappers.py index 80a6f89738..e775c9a540 100644 --- a/nipype/interfaces/utility/wrappers.py +++ b/nipype/interfaces/utility/wrappers.py @@ -6,15 +6,6 @@ >>> tmp = getfixture('tmpdir') >>> old = tmp.chdir() """ - -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from future import standard_library -standard_library.install_aliases() - -from builtins import str, bytes - from ... import logging from ..base import (traits, DynamicTraitedSpec, Undefined, isdefined, BaseInterfaceInputSpec) diff --git a/nipype/pipeline/engine/base.py b/nipype/pipeline/engine/base.py index 9d0bc3c699..3912e10326 100644 --- a/nipype/pipeline/engine/base.py +++ b/nipype/pipeline/engine/base.py @@ -6,10 +6,6 @@ The `EngineBase` class implements the more general view of a task. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import object - from copy import deepcopy import re import numpy as np diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 5ac9e72fae..e24270528b 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -5,10 +5,6 @@ The `Node` class provides core functionality for batch processing. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, str, bytes, open - from collections import OrderedDict import os @@ -20,7 +16,6 @@ from logging import INFO from tempfile import mkdtemp -from future import standard_library from ... import config, logging from ...utils.misc import flatten, unflatten, str2bool, dict_diff @@ -39,8 +34,6 @@ clean_working_directory, merge_dict, evaluate_connect_function) from .base import EngineBase -standard_library.install_aliases() - logger = logging.getLogger('nipype.workflow') diff --git a/nipype/pipeline/engine/tests/test_engine.py b/nipype/pipeline/engine/tests/test_engine.py index 151849241c..5f41d70d34 100644 --- a/nipype/pipeline/engine/tests/test_engine.py +++ b/nipype/pipeline/engine/tests/test_engine.py @@ -3,15 +3,10 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Tests for the engine module """ - -from __future__ import print_function -from __future__ import unicode_literals -from builtins import open from copy import deepcopy from glob import glob import os - import pytest from ... import engine as pe from .test_base import EngineTestInterface diff --git a/nipype/pipeline/engine/tests/test_join.py b/nipype/pipeline/engine/tests/test_join.py index 77fc0f2fdf..d0e71eaa63 100644 --- a/nipype/pipeline/engine/tests/test_join.py +++ b/nipype/pipeline/engine/tests/test_join.py @@ -3,10 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Tests for join expansion """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - from ... import engine as pe from ....interfaces import base as nib from ....interfaces.utility import IdentityInterface, Function, Merge diff --git a/nipype/pipeline/engine/tests/test_nodes.py b/nipype/pipeline/engine/tests/test_nodes.py index 4a04b94766..b724aaec59 100644 --- a/nipype/pipeline/engine/tests/test_nodes.py +++ b/nipype/pipeline/engine/tests/test_nodes.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import print_function, unicode_literals -from builtins import str import os from copy import deepcopy import pytest diff --git a/nipype/pipeline/engine/tests/test_utils.py b/nipype/pipeline/engine/tests/test_utils.py index 42f8b2434e..e867e4d0a1 100644 --- a/nipype/pipeline/engine/tests/test_utils.py +++ b/nipype/pipeline/engine/tests/test_utils.py @@ -3,10 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Tests for the engine utils module """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, open - import os import sys from copy import deepcopy diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 4ec36afe68..17554e3615 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -2,10 +2,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Utility routines for workflow graphs""" -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str, open, next, zip, range - import os import sys import pickle @@ -22,7 +18,6 @@ import numpy as np import networkx as nx -from future import standard_library from ... import logging, config, LooseVersion from ...utils.filemanip import ( @@ -51,7 +46,6 @@ except ImportError: from funcsigs import signature -standard_library.install_aliases() logger = logging.getLogger('nipype.workflow') PY3 = sys.version_info[0] > 2 @@ -383,8 +377,7 @@ def format_node(node, format='python', include_config=False): if include_config: lines = [ - importline, "from future import standard_library", - "standard_library.install_aliases()", + importline, "from collections import OrderedDict", comment, nodedef ] lines.append('%s.config = %s' % (name, node.config)) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index d2f040786e..da6e3d6fbf 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -6,10 +6,6 @@ The `Workflow` class provides core functionality for batch processing. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str, bytes, open - import os import os.path as op import sys @@ -35,10 +31,6 @@ from .base import EngineBase from .nodes import MapNode -# Py2 compat: http://python-future.org/compatible_idioms.html#collections-counter-and-ordereddict -from future import standard_library -standard_library.install_aliases() - logger = logging.getLogger('nipype.workflow') diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index 122d7e57cd..83fe1d9db8 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -3,10 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Common graph operations for execution """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, object, open - import sys from copy import deepcopy from glob import glob diff --git a/nipype/pipeline/plugins/dagman.py b/nipype/pipeline/plugins/dagman.py index 28b766f2ea..9db1f70fe5 100644 --- a/nipype/pipeline/plugins/dagman.py +++ b/nipype/pipeline/plugins/dagman.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- """Parallel workflow execution via Condor DAGMan """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - import os import sys import uuid diff --git a/nipype/pipeline/plugins/ipython.py b/nipype/pipeline/plugins/ipython.py index aa20f935c1..eafeb904e3 100644 --- a/nipype/pipeline/plugins/ipython.py +++ b/nipype/pipeline/plugins/ipython.py @@ -3,13 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Parallel workflow execution via IPython controller """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from future import standard_library -standard_library.install_aliases() -from future.utils import raise_from - from pickle import dumps import sys @@ -76,20 +69,18 @@ def run(self, graph, config, updatehash=False): __import__(name) self.iparallel = sys.modules[name] except ImportError as e: - raise_from( - ImportError("ipyparallel not found. Parallel execution " - "will be unavailable"), e) + raise ImportError("ipyparallel not found. Parallel execution " + "will be unavailable") from e try: self.taskclient = self.iparallel.Client(**self.client_args) except Exception as e: if isinstance(e, TimeoutError): - raise_from(Exception("No IPython clients found."), e) + raise Exception("No IPython clients found.") from e if isinstance(e, IOError): - raise_from( - Exception("ipcluster/ipcontroller has not been started"), - e) + raise Exception("ipcluster/ipcontroller has not been started") \ + from e if isinstance(e, ValueError): - raise_from(Exception("Ipython kernel not installed"), e) + raise Exception("Ipython kernel not installed") from e else: raise e return super(IPythonPlugin, self).run( diff --git a/nipype/pipeline/plugins/oar.py b/nipype/pipeline/plugins/oar.py index c68b42379f..5ce718c250 100644 --- a/nipype/pipeline/plugins/oar.py +++ b/nipype/pipeline/plugins/oar.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- """Parallel workflow execution via OAR http://oar.imag.fr """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from builtins import str, open import os import stat from time import sleep diff --git a/nipype/pipeline/plugins/pbs.py b/nipype/pipeline/plugins/pbs.py index 0738638765..cc5bbed93b 100644 --- a/nipype/pipeline/plugins/pbs.py +++ b/nipype/pipeline/plugins/pbs.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- """Parallel workflow execution via PBS/Torque """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str, open - import os from time import sleep diff --git a/nipype/pipeline/plugins/pbsgraph.py b/nipype/pipeline/plugins/pbsgraph.py index 68fc651f5f..9b6e9da755 100644 --- a/nipype/pipeline/plugins/pbsgraph.py +++ b/nipype/pipeline/plugins/pbsgraph.py @@ -1,9 +1,5 @@ """Parallel workflow execution via PBS/Torque """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - import os import sys diff --git a/nipype/pipeline/plugins/sge.py b/nipype/pipeline/plugins/sge.py index a4ce28297c..61b127e188 100644 --- a/nipype/pipeline/plugins/sge.py +++ b/nipype/pipeline/plugins/sge.py @@ -1,11 +1,6 @@ # -*- coding: utf-8 -*- """Parallel workflow execution via SGE """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from builtins import object - import os import pwd import re diff --git a/nipype/pipeline/plugins/sgegraph.py b/nipype/pipeline/plugins/sgegraph.py index fa07d6a436..06e5719654 100644 --- a/nipype/pipeline/plugins/sgegraph.py +++ b/nipype/pipeline/plugins/sgegraph.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- """Parallel workflow execution via SGE """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - import os import sys diff --git a/nipype/pipeline/plugins/slurm.py b/nipype/pipeline/plugins/slurm.py index 285d2d6584..d63e15dd6f 100644 --- a/nipype/pipeline/plugins/slurm.py +++ b/nipype/pipeline/plugins/slurm.py @@ -5,10 +5,6 @@ Parallel workflow execution with SLURM ''' -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - import os import re from time import sleep diff --git a/nipype/pipeline/plugins/slurmgraph.py b/nipype/pipeline/plugins/slurmgraph.py index b4013163cb..1b62177457 100644 --- a/nipype/pipeline/plugins/slurmgraph.py +++ b/nipype/pipeline/plugins/slurmgraph.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- """Parallel workflow execution via SLURM """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - import os import sys diff --git a/nipype/pipeline/plugins/tests/test_callback.py b/nipype/pipeline/plugins/tests/test_callback.py index 29c5cbd404..73f0cb8322 100644 --- a/nipype/pipeline/plugins/tests/test_callback.py +++ b/nipype/pipeline/plugins/tests/test_callback.py @@ -3,9 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Tests for workflow callbacks """ - -from builtins import object - import pytest import sys import nipype.interfaces.utility as niu diff --git a/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py b/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py index a83d426ada..3c4e673f55 100644 --- a/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py +++ b/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py @@ -3,10 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Testing module for functions and classes from multiproc.py """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, open - # Import packages import os import sys diff --git a/nipype/pipeline/plugins/tools.py b/nipype/pipeline/plugins/tools.py index 54fffd2398..4eef64994d 100644 --- a/nipype/pipeline/plugins/tools.py +++ b/nipype/pipeline/plugins/tools.py @@ -3,10 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Common graph operations for execution """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - import os import getpass from socket import gethostname diff --git a/nipype/pkg_info.py b/nipype/pkg_info.py index f2aa07030d..1d1150e318 100644 --- a/nipype/pkg_info.py +++ b/nipype/pkg_info.py @@ -1,10 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from future import standard_library -standard_library.install_aliases() -from builtins import open import configparser import os diff --git a/nipype/scripts/utils.py b/nipype/scripts/utils.py index f4b8a86fb1..432b6308ce 100644 --- a/nipype/scripts/utils.py +++ b/nipype/scripts/utils.py @@ -5,7 +5,7 @@ from __future__ import (print_function, division, unicode_literals, absolute_import) -from builtins import bytes, str + import re import click diff --git a/nipype/sphinxext/plot_workflow.py b/nipype/sphinxext/plot_workflow.py index 7fa0769401..5caa4e1645 100644 --- a/nipype/sphinxext/plot_workflow.py +++ b/nipype/sphinxext/plot_workflow.py @@ -106,8 +106,6 @@ Provide a customized template for preparing restructured text. """ -from __future__ import print_function, division, absolute_import, unicode_literals - import sys import os import shutil @@ -144,7 +142,7 @@ def format_template(template, **kw): except ImportError as e: missing_imports.append(str(e)) -from builtins import str, bytes + PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 diff --git a/nipype/testing/fixtures.py b/nipype/testing/fixtures.py index 6d8b3b0874..dabf3b3307 100644 --- a/nipype/testing/fixtures.py +++ b/nipype/testing/fixtures.py @@ -4,17 +4,11 @@ """ Pytest fixtures used in tests. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) - import os import pytest import numpy as np import nibabel as nb -from io import open -from builtins import str - from nipype.utils.filemanip import ensure_list from nipype.interfaces.fsl import Info from nipype.interfaces.fsl.base import FSLCommand diff --git a/nipype/testing/utils.py b/nipype/testing/utils.py index 716b16da78..6c36ce514e 100644 --- a/nipype/testing/utils.py +++ b/nipype/testing/utils.py @@ -3,10 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Additional handy utilities for testing """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import range, object, open - import os import time import shutil @@ -14,7 +10,6 @@ import subprocess from subprocess import CalledProcessError from tempfile import mkdtemp -from future.utils import raise_from from ..utils.misc import package_check __docformat__ = 'restructuredtext' @@ -59,13 +54,13 @@ def __init__(self, size_in_mbytes=8, delay=0.5): subprocess.check_call( args=mkfs_args, stdout=self.dev_null, stderr=self.dev_null) except CalledProcessError as e: - raise_from(IOError("mkfs.vfat failed"), e) + raise IOError("mkfs.vfat failed") from e try: self.fusefat = subprocess.Popen( args=mount_args, stdout=self.dev_null, stderr=self.dev_null) except OSError as e: - raise_from(IOError("fusefat is not installed"), e) + raise IOError("fusefat is not installed") from e time.sleep(self.delay) diff --git a/nipype/utils/config.py b/nipype/utils/config.py index e4e518960c..10dce49f79 100644 --- a/nipype/utils/config.py +++ b/nipype/utils/config.py @@ -9,8 +9,6 @@ @author: Chris Filo Gorgolewski ''' -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import sys import errno @@ -20,15 +18,11 @@ import configparser import numpy as np -from builtins import bytes, str, object, open from simplejson import load, dump -from future import standard_library from .misc import str2bool from ..external import portalocker -standard_library.install_aliases() - CONFIG_DEPRECATIONS = { 'profile_runtime': ('monitoring.enabled', '1.0'), 'filemanip_level': ('logging.utils_level', '1.0'), diff --git a/nipype/utils/docparse.py b/nipype/utils/docparse.py index 1df779f2ce..041e3ad5cf 100644 --- a/nipype/utils/docparse.py +++ b/nipype/utils/docparse.py @@ -13,10 +13,6 @@ docstring = docparse.get_doc(better.cmd, better.opt_map) """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str, open, bytes - import subprocess from ..interfaces.base import CommandLine from .misc import is_container diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 7a52205090..7e6dbf94ed 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -5,18 +5,12 @@ Module to draw an html gantt chart from logfile produced by ``nipype.utils.profiler.log_nodes_cb()`` """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) - # Import packages import sys import random import datetime import simplejson as json -from builtins import str, range, open -# Py2 compat: http://python-future.org/compatible_idioms.html#collections-counter-and-ordereddict -from future import standard_library -standard_library.install_aliases() + from collections import OrderedDict # Pandas diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 80fc262f03..43f2d8d0ec 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -3,9 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Miscellaneous file manipulation functions """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) - import sys import pickle import errno @@ -21,14 +18,9 @@ import contextlib import posixpath import simplejson as json -import numpy as np - -from builtins import str, bytes, open from .. import logging, config from .misc import is_container -from future import standard_library -standard_library.install_aliases() fmlogger = logging.getLogger('nipype.utils') @@ -38,7 +30,6 @@ ('.BRIK', '.HEAD'), ] -PY3 = sys.version_info[0] >= 3 class FileNotFoundError(Exception): pass @@ -662,7 +653,7 @@ def loadpkl(infile, versioning=False): return unpkl - # Unpickling problems + # Unpickling problems except Exception as e: if not versioning: raise e @@ -919,18 +910,12 @@ def canonicalize_env(env): if os.name != 'nt': return env - # convert unicode to string for python 2 - if not PY3: - from future.utils import bytes_to_native_str out_env = {} for key, val in env.items(): if not isinstance(key, bytes): key = key.encode('utf-8') if not isinstance(val, bytes): val = val.encode('utf-8') - if not PY3: - key = bytes_to_native_str(key) - val = bytes_to_native_str(val) out_env[key] = val return out_env diff --git a/nipype/utils/functions.py b/nipype/utils/functions.py index 00b9412d5d..a36a7edfeb 100644 --- a/nipype/utils/functions.py +++ b/nipype/utils/functions.py @@ -3,8 +3,6 @@ Handles custom functions used in Function interface. Future imports are avoided to keep namespace as clear as possible. """ -from builtins import next, str -from future.utils import raise_from import inspect from textwrap import dedent @@ -41,7 +39,7 @@ def create_function_from_source(function_source, imports=None): "They cannot be declared either interactively or inside " "another function or inline in the connect string. Any " "imports should be done inside the function.") - raise_from(RuntimeError(msg), e) + raise RuntimeError(msg) from e ns_funcs = list(set(ns) - set(import_keys + ['__builtins__'])) assert len(ns_funcs) == 1, "Function or inputs are ill-defined" func = ns[ns_funcs[0]] diff --git a/nipype/utils/logger.py b/nipype/utils/logger.py index 2c8fe4607a..6acd76665c 100644 --- a/nipype/utils/logger.py +++ b/nipype/utils/logger.py @@ -1,8 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from builtins import object # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: diff --git a/nipype/utils/matlabtools.py b/nipype/utils/matlabtools.py index bff9567ed5..c62627c2a8 100644 --- a/nipype/utils/matlabtools.py +++ b/nipype/utils/matlabtools.py @@ -2,10 +2,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Useful Functions for working with matlab""" -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from builtins import range # Stdlib imports import os diff --git a/nipype/utils/misc.py b/nipype/utils/misc.py index 6b7629e32a..9b16d53cb2 100644 --- a/nipype/utils/misc.py +++ b/nipype/utils/misc.py @@ -3,10 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Miscellaneous utility functions """ -from __future__ import (print_function, unicode_literals, division, - absolute_import) -from builtins import next, str - import os import sys import re @@ -16,8 +12,6 @@ from distutils.version import LooseVersion import numpy as np -from future.utils import raise_from -from future import standard_library try: from textwrap import indent as textwrap_indent except ImportError: @@ -30,9 +24,6 @@ def textwrap_indent(text, prefix): return prefix + prefix.join(splittext) -standard_library.install_aliases() - - def human_order_sorted(l): """Sorts string in human order (i.e. 'stat10' will go after 'stat2')""" @@ -179,14 +170,13 @@ def package_check(pkg_name, try: mod = __import__(pkg_name) except ImportError as e: - raise_from(exc_failed_import(msg), e) + raise exc_failed_import(msg) from e if not version: return try: have_version = mod.__version__ except AttributeError as e: - raise_from( - exc_failed_check('Cannot find version for %s' % pkg_name), e) + raise exc_failed_check('Cannot find version for %s' % pkg_name) from e if checker(have_version) < checker(version): raise exc_failed_check(msg) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 21ecbc0eee..a160365f09 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -1,8 +1,5 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str, open, bytes # This tool exports a Nipype interface in the Boutiques (https://github.com/boutiques) JSON format. # Boutiques tools can be imported in CBRAIN (https://github.com/aces/cbrain) among other platforms. # diff --git a/nipype/utils/nipype_cmd.py b/nipype/utils/nipype_cmd.py index b31795aa92..7d72d1b1bb 100644 --- a/nipype/utils/nipype_cmd.py +++ b/nipype/utils/nipype_cmd.py @@ -1,7 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str import os import argparse import inspect diff --git a/nipype/utils/onetime.py b/nipype/utils/onetime.py index 6983bc5c0f..e9a905254b 100644 --- a/nipype/utils/onetime.py +++ b/nipype/utils/onetime.py @@ -17,10 +17,6 @@ [2] Python data model, http://docs.python.org/reference/datamodel.html """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from builtins import object class OneTimeProperty(object): diff --git a/nipype/utils/profiler.py b/nipype/utils/profiler.py index d8ec32ffe3..40abb2b267 100644 --- a/nipype/utils/profiler.py +++ b/nipype/utils/profiler.py @@ -4,9 +4,6 @@ """ Utilities to keep track of performance """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) - import threading from time import time try: @@ -14,7 +11,7 @@ except ImportError as exc: psutil = None -from builtins import open, range + from .. import config, logging proflogger = logging.getLogger('nipype.utils') diff --git a/nipype/utils/provenance.py b/nipype/utils/provenance.py index fb2306ee4b..cb11ff9326 100644 --- a/nipype/utils/provenance.py +++ b/nipype/utils/provenance.py @@ -1,11 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open, object, str, bytes - -# Py2 compat: http://python-future.org/compatible_idioms.html#collections-counter-and-ordereddict -from future import standard_library -standard_library.install_aliases() from collections import OrderedDict from copy import deepcopy diff --git a/nipype/utils/spm_docs.py b/nipype/utils/spm_docs.py index 4c71c05523..641ebd858e 100644 --- a/nipype/utils/spm_docs.py +++ b/nipype/utils/spm_docs.py @@ -2,10 +2,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Grab documentation from spm.""" -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from future.utils import raise_from - import os from ..interfaces import matlab @@ -60,4 +56,4 @@ def _strip_header(doc): index = len(doc) return doc[:index] except KeyError as e: - raise_from(IOError('This docstring was not generated by Nipype!\n'), e) + raise IOError('This docstring was not generated by Nipype!\n') from e diff --git a/nipype/utils/tests/test_cmd.py b/nipype/utils/tests/test_cmd.py index 0e16e0aad8..87a6005c2e 100644 --- a/nipype/utils/tests/test_cmd.py +++ b/nipype/utils/tests/test_cmd.py @@ -1,10 +1,4 @@ #!/usr/bin/env python -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from future import standard_library -standard_library.install_aliases() - import pytest import sys from contextlib import contextmanager diff --git a/nipype/utils/tests/test_config.py b/nipype/utils/tests/test_config.py index a3c0480b29..b52238da44 100644 --- a/nipype/utils/tests/test_config.py +++ b/nipype/utils/tests/test_config.py @@ -1,14 +1,11 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import sys import pytest from nipype import config from mock import MagicMock -from builtins import object try: import xvfbwrapper diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index ae5316c7d7..591df0d336 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -1,9 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import unicode_literals -from builtins import open - import os import time import warnings diff --git a/nipype/utils/tests/test_misc.py b/nipype/utils/tests/test_misc.py index 8896039763..7c04a2a361 100644 --- a/nipype/utils/tests/test_misc.py +++ b/nipype/utils/tests/test_misc.py @@ -1,12 +1,8 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from future import standard_library -standard_library.install_aliases() - import os from shutil import rmtree -from builtins import next import pytest diff --git a/nipype/utils/tests/test_nipype2boutiques.py b/nipype/utils/tests/test_nipype2boutiques.py index f1d0c46eed..a376e02435 100644 --- a/nipype/utils/tests/test_nipype2boutiques.py +++ b/nipype/utils/tests/test_nipype2boutiques.py @@ -1,9 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from future import standard_library -standard_library.install_aliases() - from ..nipype2boutiques import generate_boutiques_descriptor diff --git a/nipype/utils/tests/test_provenance.py b/nipype/utils/tests/test_provenance.py index 1d7233907a..cc48e899c5 100644 --- a/nipype/utils/tests/test_provenance.py +++ b/nipype/utils/tests/test_provenance.py @@ -1,11 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import unicode_literals -from builtins import str, bytes -from future import standard_library -standard_library.install_aliases() - import os from nipype.utils.provenance import ProvStore, safe_encode diff --git a/nipype/utils/tmpdirs.py b/nipype/utils/tmpdirs.py index 73f0c4ecc4..a1295c0a50 100644 --- a/nipype/utils/tmpdirs.py +++ b/nipype/utils/tmpdirs.py @@ -2,7 +2,7 @@ from __future__ import (print_function, division, unicode_literals, absolute_import) -from builtins import object + # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os diff --git a/nipype/workflows/dmri/connectivity/group_connectivity.py b/nipype/workflows/dmri/connectivity/group_connectivity.py index a918104bd1..98d75e7b38 100644 --- a/nipype/workflows/dmri/connectivity/group_connectivity.py +++ b/nipype/workflows/dmri/connectivity/group_connectivity.py @@ -1,10 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - -from future.utils import raise_from - import os.path as op from ....interfaces import io as nio # Data i/o @@ -535,10 +529,9 @@ def create_average_networks_by_group_workflow(group_list, l4infosource.inputs.group_id1 = list(group_list.keys())[0] l4infosource.inputs.group_id2 = list(group_list.keys())[1] except IndexError as e: - raise_from( - Exception( - 'The create_average_networks_by_group_workflow requires 2 groups' - ), e) + raise Exception( + 'The create_average_networks_by_group_workflow requires 2 groups' + ) from e l4info = dict( networks=[['group_id', '']], diff --git a/nipype/workflows/dmri/dipy/denoise.py b/nipype/workflows/dmri/dipy/denoise.py index a45f507b3c..07645dcfa1 100644 --- a/nipype/workflows/dmri/dipy/denoise.py +++ b/nipype/workflows/dmri/dipy/denoise.py @@ -3,7 +3,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from builtins import range from ....pipeline import engine as pe from ....interfaces import utility as niu from ....interfaces import dipy diff --git a/nipype/workflows/dmri/fsl/epi.py b/nipype/workflows/dmri/fsl/epi.py index 3bd88a99b7..334cd34305 100644 --- a/nipype/workflows/dmri/fsl/epi.py +++ b/nipype/workflows/dmri/fsl/epi.py @@ -1,8 +1,5 @@ # -*- coding: utf-8 -*- # coding: utf-8 -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open, str import warnings diff --git a/nipype/workflows/dmri/fsl/utils.py b/nipype/workflows/dmri/fsl/utils.py index bd53f5cb55..1779c42b6a 100644 --- a/nipype/workflows/dmri/fsl/utils.py +++ b/nipype/workflows/dmri/fsl/utils.py @@ -3,10 +3,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import zip, next, range, str - from ....pipeline import engine as pe from ....interfaces import utility as niu from ....interfaces import fsl diff --git a/nipype/workflows/fmri/fsl/estimate.py b/nipype/workflows/fmri/fsl/estimate.py index 638e422bfc..9f0480badb 100644 --- a/nipype/workflows/fmri/fsl/estimate.py +++ b/nipype/workflows/fmri/fsl/estimate.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from builtins import range from ....interfaces import fsl as fsl # fsl from ....interfaces import utility as util # utility diff --git a/nipype/workflows/misc/utils.py b/nipype/workflows/misc/utils.py index b581ec8c54..b81b932c54 100644 --- a/nipype/workflows/misc/utils.py +++ b/nipype/workflows/misc/utils.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from builtins import map, range def get_vox_dims(volume): diff --git a/nipype/workflows/rsfmri/fsl/resting.py b/nipype/workflows/rsfmri/fsl/resting.py index 12d44a83cf..a731322446 100644 --- a/nipype/workflows/rsfmri/fsl/resting.py +++ b/nipype/workflows/rsfmri/fsl/resting.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str - from ....interfaces import fsl as fsl # fsl from ....interfaces import utility as util # utility from ....pipeline import engine as pe # pypeline engine diff --git a/nipype/workflows/smri/ants/ANTSBuildTemplate.py b/nipype/workflows/smri/ants/ANTSBuildTemplate.py index 5a43d47bac..f5226d7dfd 100644 --- a/nipype/workflows/smri/ants/ANTSBuildTemplate.py +++ b/nipype/workflows/smri/ants/ANTSBuildTemplate.py @@ -10,11 +10,6 @@ # PURPOSE. ## ################################################################################# -from __future__ import print_function -from builtins import map -from builtins import zip -from builtins import range - from ....pipeline import engine as pe from ....interfaces import utility as util from ....interfaces.utility import Function diff --git a/nipype/workflows/smri/ants/antsRegistrationBuildTemplate.py b/nipype/workflows/smri/ants/antsRegistrationBuildTemplate.py index 3574935fc1..7c7478d3d4 100644 --- a/nipype/workflows/smri/ants/antsRegistrationBuildTemplate.py +++ b/nipype/workflows/smri/ants/antsRegistrationBuildTemplate.py @@ -10,9 +10,6 @@ # PURPOSE. ## ################################################################################# -from __future__ import print_function -from builtins import range - from ....pipeline import engine as pe from ....interfaces import utility as util from ....interfaces.utility import Function diff --git a/nipype/workflows/smri/niftyreg/groupwise.py b/nipype/workflows/smri/niftyreg/groupwise.py index fd8d25541b..c17452edf6 100644 --- a/nipype/workflows/smri/niftyreg/groupwise.py +++ b/nipype/workflows/smri/niftyreg/groupwise.py @@ -6,9 +6,6 @@ pipelines. Including linear and non-linear image co-registration """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str, range from ....interfaces import utility as niu from ....interfaces import niftyreg as niftyreg from ....pipeline import engine as pe From 9e47ec05b79f619902a46eefe986c98586d26e53 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 25 Jul 2018 15:26:57 -0400 Subject: [PATCH 0004/1665] RF: Drop one-line __future__ imports --- nipype/algorithms/tests/test_modelgen.py | 1 - nipype/external/fsl_imglob.py | 1 - nipype/interfaces/ants/tests/test_extra_Registration.py | 1 - nipype/interfaces/cmtk/tests/test_nbs.py | 1 - nipype/interfaces/dtitk/base.py | 3 --- nipype/interfaces/elastix/__init__.py | 1 - nipype/interfaces/freesurfer/tests/test_BBRegister.py | 1 - nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py | 1 - nipype/interfaces/mrtrix3/__init__.py | 1 - nipype/interfaces/niftyseg/stats.py | 1 - nipype/interfaces/quickshear.py | 1 - nipype/interfaces/semtools/__init__.py | 1 - nipype/interfaces/semtools/brains/__init__.py | 1 - nipype/interfaces/semtools/diffusion/__init__.py | 1 - nipype/interfaces/semtools/diffusion/tractography/__init__.py | 1 - nipype/interfaces/semtools/filtering/__init__.py | 1 - nipype/interfaces/semtools/legacy/__init__.py | 1 - nipype/interfaces/semtools/registration/__init__.py | 1 - nipype/interfaces/semtools/segmentation/__init__.py | 1 - nipype/interfaces/semtools/testing/__init__.py | 1 - nipype/interfaces/semtools/utilities/__init__.py | 1 - nipype/interfaces/slicer/__init__.py | 1 - nipype/interfaces/slicer/diffusion/__init__.py | 1 - nipype/interfaces/slicer/filtering/__init__.py | 1 - nipype/interfaces/slicer/legacy/__init__.py | 1 - nipype/interfaces/slicer/legacy/diffusion/__init__.py | 1 - nipype/interfaces/slicer/quantification/__init__.py | 1 - nipype/interfaces/slicer/registration/__init__.py | 1 - nipype/interfaces/slicer/segmentation/__init__.py | 1 - nipype/interfaces/utility/tests/test_base.py | 1 - nipype/interfaces/utility/tests/test_csv.py | 1 - nipype/interfaces/utility/tests/test_wrappers.py | 1 - nipype/pipeline/engine/__init__.py | 1 - nipype/pipeline/engine/tests/test_base.py | 1 - nipype/utils/__init__.py | 1 - nipype/workflows/dmri/camino/__init__.py | 1 - nipype/workflows/dmri/connectivity/__init__.py | 1 - nipype/workflows/dmri/dipy/__init__.py | 1 - nipype/workflows/dmri/dtitk/__init__.py | 1 - nipype/workflows/dmri/fsl/__init__.py | 1 - nipype/workflows/dmri/fsl/dti.py | 1 - nipype/workflows/dmri/fsl/tests/test_dti.py | 1 - nipype/workflows/dmri/mrtrix/__init__.py | 1 - nipype/workflows/fmri/fsl/preprocess.py | 1 - 44 files changed, 46 deletions(-) diff --git a/nipype/algorithms/tests/test_modelgen.py b/nipype/algorithms/tests/test_modelgen.py index 824a634354..2bcdf08a6e 100644 --- a/nipype/algorithms/tests/test_modelgen.py +++ b/nipype/algorithms/tests/test_modelgen.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import division from copy import deepcopy import os diff --git a/nipype/external/fsl_imglob.py b/nipype/external/fsl_imglob.py index 3b9a4eea4d..ea4cfe245a 100755 --- a/nipype/external/fsl_imglob.py +++ b/nipype/external/fsl_imglob.py @@ -63,7 +63,6 @@ # Innovation Limited ("Isis"), the technology transfer company of the # University, to negotiate a licence. Contact details are: # innovation@isis.ox.ac.uk quoting reference DE/9564. -from __future__ import print_function import sys import glob from builtins import range diff --git a/nipype/interfaces/ants/tests/test_extra_Registration.py b/nipype/interfaces/ants/tests/test_extra_Registration.py index 745b825c65..1f38f3c61f 100644 --- a/nipype/interfaces/ants/tests/test_extra_Registration.py +++ b/nipype/interfaces/ants/tests/test_extra_Registration.py @@ -1,6 +1,5 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import unicode_literals from nipype.interfaces.ants import registration import os import pytest diff --git a/nipype/interfaces/cmtk/tests/test_nbs.py b/nipype/interfaces/cmtk/tests/test_nbs.py index be3008fb09..a03b00de0f 100644 --- a/nipype/interfaces/cmtk/tests/test_nbs.py +++ b/nipype/interfaces/cmtk/tests/test_nbs.py @@ -1,4 +1,3 @@ -from __future__ import unicode_literals from ..nbs import NetworkBasedStatistic from ....utils.misc import package_check import numpy as np diff --git a/nipype/interfaces/dtitk/base.py b/nipype/interfaces/dtitk/base.py index 5cfb81d9dd..32289e5217 100644 --- a/nipype/interfaces/dtitk/base.py +++ b/nipype/interfaces/dtitk/base.py @@ -25,9 +25,6 @@ See the docstrings of the individual classes for examples. """ -from __future__ import print_function, division, unicode_literals, \ - absolute_import - import os from ... import logging diff --git a/nipype/interfaces/elastix/__init__.py b/nipype/interfaces/elastix/__init__.py index 9dcdb88c18..e7ddc2a9f7 100644 --- a/nipype/interfaces/elastix/__init__.py +++ b/nipype/interfaces/elastix/__init__.py @@ -4,7 +4,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Top-level namespace for elastix.""" -from __future__ import absolute_import from .registration import Registration, ApplyWarp, AnalyzeWarp, PointsWarp from .utils import EditTransform diff --git a/nipype/interfaces/freesurfer/tests/test_BBRegister.py b/nipype/interfaces/freesurfer/tests/test_BBRegister.py index 3e80ad4e9c..81a67742e2 100644 --- a/nipype/interfaces/freesurfer/tests/test_BBRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_BBRegister.py @@ -1,4 +1,3 @@ -from __future__ import unicode_literals from ..preprocess import BBRegister, BBRegisterInputSpec6 diff --git a/nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py b/nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py index 704dfeaaf3..bc34a8d7aa 100644 --- a/nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py +++ b/nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py @@ -1,5 +1,4 @@ # Modified 2017.04.21 by Chris Markiewicz -from __future__ import unicode_literals import pytest from ..base import FSSurfaceCommand diff --git a/nipype/interfaces/mrtrix3/__init__.py b/nipype/interfaces/mrtrix3/__init__.py index 507380c30e..3ed323539a 100644 --- a/nipype/interfaces/mrtrix3/__init__.py +++ b/nipype/interfaces/mrtrix3/__init__.py @@ -1,4 +1,3 @@ -from __future__ import absolute_import # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # -*- coding: utf-8 -*- diff --git a/nipype/interfaces/niftyseg/stats.py b/nipype/interfaces/niftyseg/stats.py index 796e07410c..94c7abd49e 100644 --- a/nipype/interfaces/niftyseg/stats.py +++ b/nipype/interfaces/niftyseg/stats.py @@ -4,7 +4,6 @@ The stats module provides higher-level interfaces to some of the operations that can be performed with the niftyseg stats (seg_stats) command-line program. """ -from __future__ import print_function import numpy as np from ..base import TraitedSpec, File, traits, CommandLineInputSpec diff --git a/nipype/interfaces/quickshear.py b/nipype/interfaces/quickshear.py index b1317c3599..7508c0a356 100644 --- a/nipype/interfaces/quickshear.py +++ b/nipype/interfaces/quickshear.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- """ Quickshear is a simple geometric defacing algorithm """ -from __future__ import unicode_literals from .base import CommandLineInputSpec, CommandLine, traits, TraitedSpec, File from ..external.due import BibTeX diff --git a/nipype/interfaces/semtools/__init__.py b/nipype/interfaces/semtools/__init__.py index 14473b8381..243e3a43a0 100644 --- a/nipype/interfaces/semtools/__init__.py +++ b/nipype/interfaces/semtools/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .diffusion import * from .featurecreator import GenerateCsfClippedFromClassifiedImage from .segmentation import * diff --git a/nipype/interfaces/semtools/brains/__init__.py b/nipype/interfaces/semtools/brains/__init__.py index ebfab84bf3..a8bd05a1be 100644 --- a/nipype/interfaces/semtools/brains/__init__.py +++ b/nipype/interfaces/semtools/brains/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .segmentation import SimilarityIndex, BRAINSTalairach, BRAINSTalairachMask from .utilities import (HistogramMatchingFilter, GenerateEdgeMapImage, GeneratePurePlugMask) diff --git a/nipype/interfaces/semtools/diffusion/__init__.py b/nipype/interfaces/semtools/diffusion/__init__.py index 215cfa41d7..f9f414d087 100644 --- a/nipype/interfaces/semtools/diffusion/__init__.py +++ b/nipype/interfaces/semtools/diffusion/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .diffusion import dtiaverage, dtiestim, dtiprocess, DWIConvert from .tractography import * from .gtract import ( diff --git a/nipype/interfaces/semtools/diffusion/tractography/__init__.py b/nipype/interfaces/semtools/diffusion/tractography/__init__.py index f846b7fde5..ac45b2050f 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/__init__.py +++ b/nipype/interfaces/semtools/diffusion/tractography/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .commandlineonly import fiberstats from .fiberprocess import fiberprocess from .fibertrack import fibertrack diff --git a/nipype/interfaces/semtools/filtering/__init__.py b/nipype/interfaces/semtools/filtering/__init__.py index 1e69233303..82ad67a9c1 100644 --- a/nipype/interfaces/semtools/filtering/__init__.py +++ b/nipype/interfaces/semtools/filtering/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .denoising import UnbiasedNonLocalMeans from .featuredetection import ( GenerateSummedGradientImage, CannySegmentationLevelSetImageFilter, diff --git a/nipype/interfaces/semtools/legacy/__init__.py b/nipype/interfaces/semtools/legacy/__init__.py index 3309d49d62..343704adb7 100644 --- a/nipype/interfaces/semtools/legacy/__init__.py +++ b/nipype/interfaces/semtools/legacy/__init__.py @@ -1,3 +1,2 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .registration import scalartransform diff --git a/nipype/interfaces/semtools/registration/__init__.py b/nipype/interfaces/semtools/registration/__init__.py index 33bd60ad59..3487d13134 100644 --- a/nipype/interfaces/semtools/registration/__init__.py +++ b/nipype/interfaces/semtools/registration/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .specialized import (VBRAINSDemonWarp, BRAINSDemonWarp, BRAINSTransformFromFiducials) from .brainsresample import BRAINSResample diff --git a/nipype/interfaces/semtools/segmentation/__init__.py b/nipype/interfaces/semtools/segmentation/__init__.py index d0cd69e0c9..e5ea4e2776 100644 --- a/nipype/interfaces/semtools/segmentation/__init__.py +++ b/nipype/interfaces/semtools/segmentation/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .specialized import ( BRAINSCut, BRAINSROIAuto, BRAINSConstellationDetector, BRAINSCreateLabelMapFromProbabilityMaps, BinaryMaskEditorBasedOnLandmarks, diff --git a/nipype/interfaces/semtools/testing/__init__.py b/nipype/interfaces/semtools/testing/__init__.py index 66a4a2262e..d06a7ea2df 100644 --- a/nipype/interfaces/semtools/testing/__init__.py +++ b/nipype/interfaces/semtools/testing/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .featuredetection import SphericalCoordinateGeneration from .landmarkscompare import LandmarksCompare from .generateaveragelmkfile import GenerateAverageLmkFile diff --git a/nipype/interfaces/semtools/utilities/__init__.py b/nipype/interfaces/semtools/utilities/__init__.py index 698b76a534..b59f373cf9 100644 --- a/nipype/interfaces/semtools/utilities/__init__.py +++ b/nipype/interfaces/semtools/utilities/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .brains import ( BRAINSConstellationModeler, landmarksConstellationWeights, BRAINSTrimForegroundInDirection, BRAINSLmkTransform, BRAINSMush, diff --git a/nipype/interfaces/slicer/__init__.py b/nipype/interfaces/slicer/__init__.py index c7ffc42259..5198d81be3 100644 --- a/nipype/interfaces/slicer/__init__.py +++ b/nipype/interfaces/slicer/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .diffusion import * from .segmentation import * from .filtering import * diff --git a/nipype/interfaces/slicer/diffusion/__init__.py b/nipype/interfaces/slicer/diffusion/__init__.py index fd7231cefe..f6081f6c0c 100644 --- a/nipype/interfaces/slicer/diffusion/__init__.py +++ b/nipype/interfaces/slicer/diffusion/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .diffusion import ( ResampleDTIVolume, DWIRicianLMMSEFilter, TractographyLabelMapSeeding, DWIJointRicianLMMSEFilter, DiffusionWeightedVolumeMasking, DTIimport, diff --git a/nipype/interfaces/slicer/filtering/__init__.py b/nipype/interfaces/slicer/filtering/__init__.py index 30ace1d2f2..1270e20d26 100644 --- a/nipype/interfaces/slicer/filtering/__init__.py +++ b/nipype/interfaces/slicer/filtering/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .morphology import (GrayscaleGrindPeakImageFilter, GrayscaleFillHoleImageFilter) from .denoising import (GradientAnisotropicDiffusion, diff --git a/nipype/interfaces/slicer/legacy/__init__.py b/nipype/interfaces/slicer/legacy/__init__.py index 75c6b9d327..92cbc1ff73 100644 --- a/nipype/interfaces/slicer/legacy/__init__.py +++ b/nipype/interfaces/slicer/legacy/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .diffusion import * from .segmentation import OtsuThresholdSegmentation from .filtering import OtsuThresholdImageFilter, ResampleScalarVolume diff --git a/nipype/interfaces/slicer/legacy/diffusion/__init__.py b/nipype/interfaces/slicer/legacy/diffusion/__init__.py index f66daabb5b..9a9143d214 100644 --- a/nipype/interfaces/slicer/legacy/diffusion/__init__.py +++ b/nipype/interfaces/slicer/legacy/diffusion/__init__.py @@ -1,3 +1,2 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .denoising import DWIUnbiasedNonLocalMeansFilter diff --git a/nipype/interfaces/slicer/quantification/__init__.py b/nipype/interfaces/slicer/quantification/__init__.py index 6054dddd59..c0e1e870e7 100644 --- a/nipype/interfaces/slicer/quantification/__init__.py +++ b/nipype/interfaces/slicer/quantification/__init__.py @@ -1,4 +1,3 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .changequantification import IntensityDifferenceMetric from .petstandarduptakevaluecomputation import PETStandardUptakeValueComputation diff --git a/nipype/interfaces/slicer/registration/__init__.py b/nipype/interfaces/slicer/registration/__init__.py index 375b9b5416..2d03aabe03 100644 --- a/nipype/interfaces/slicer/registration/__init__.py +++ b/nipype/interfaces/slicer/registration/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .specialized import (ACPCTransform, FiducialRegistration, VBRAINSDemonWarp, BRAINSDemonWarp) from .brainsresample import BRAINSResample diff --git a/nipype/interfaces/slicer/segmentation/__init__.py b/nipype/interfaces/slicer/segmentation/__init__.py index d966f07e27..5b3cf6d468 100644 --- a/nipype/interfaces/slicer/segmentation/__init__.py +++ b/nipype/interfaces/slicer/segmentation/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .specialized import (RobustStatisticsSegmenter, EMSegmentCommandLine, BRAINSROIAuto) from .simpleregiongrowingsegmentation import SimpleRegionGrowingSegmentation diff --git a/nipype/interfaces/utility/tests/test_base.py b/nipype/interfaces/utility/tests/test_base.py index 159454a7fc..0356452638 100644 --- a/nipype/interfaces/utility/tests/test_base.py +++ b/nipype/interfaces/utility/tests/test_base.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import print_function, unicode_literals import os import pytest diff --git a/nipype/interfaces/utility/tests/test_csv.py b/nipype/interfaces/utility/tests/test_csv.py index a5c678153e..3c15c81239 100644 --- a/nipype/interfaces/utility/tests/test_csv.py +++ b/nipype/interfaces/utility/tests/test_csv.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import print_function, unicode_literals from nipype.interfaces import utility diff --git a/nipype/interfaces/utility/tests/test_wrappers.py b/nipype/interfaces/utility/tests/test_wrappers.py index 392ae094b0..79b2682af9 100644 --- a/nipype/interfaces/utility/tests/test_wrappers.py +++ b/nipype/interfaces/utility/tests/test_wrappers.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import print_function, unicode_literals import os import pytest diff --git a/nipype/pipeline/engine/__init__.py b/nipype/pipeline/engine/__init__.py index e950086307..4dc6784276 100644 --- a/nipype/pipeline/engine/__init__.py +++ b/nipype/pipeline/engine/__init__.py @@ -7,7 +7,6 @@ """ -from __future__ import absolute_import __docformat__ = 'restructuredtext' from .workflows import Workflow from .nodes import Node, MapNode, JoinNode diff --git a/nipype/pipeline/engine/tests/test_base.py b/nipype/pipeline/engine/tests/test_base.py index fd87aa6878..5933996d59 100644 --- a/nipype/pipeline/engine/tests/test_base.py +++ b/nipype/pipeline/engine/tests/test_base.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import print_function, unicode_literals import pytest from ..base import EngineBase diff --git a/nipype/utils/__init__.py b/nipype/utils/__init__.py index 4a0741e48e..6508602eb8 100644 --- a/nipype/utils/__init__.py +++ b/nipype/utils/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .config import NUMPY_MMAP from .onetime import OneTimeProperty, setattr_on_read diff --git a/nipype/workflows/dmri/camino/__init__.py b/nipype/workflows/dmri/camino/__init__.py index 07ba37fc52..c012d1c726 100644 --- a/nipype/workflows/dmri/camino/__init__.py +++ b/nipype/workflows/dmri/camino/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .diffusion import create_camino_dti_pipeline from .connectivity_mapping import create_connectivity_pipeline from .group_connectivity import create_group_connectivity_pipeline diff --git a/nipype/workflows/dmri/connectivity/__init__.py b/nipype/workflows/dmri/connectivity/__init__.py index b34ca0dacb..39b3060eb0 100644 --- a/nipype/workflows/dmri/connectivity/__init__.py +++ b/nipype/workflows/dmri/connectivity/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .nx import (create_networkx_pipeline, create_cmats_to_csv_pipeline) from .group_connectivity import ( create_merge_networks_by_group_workflow, diff --git a/nipype/workflows/dmri/dipy/__init__.py b/nipype/workflows/dmri/dipy/__init__.py index 354ba7a7e6..55b3c1502d 100644 --- a/nipype/workflows/dmri/dipy/__init__.py +++ b/nipype/workflows/dmri/dipy/__init__.py @@ -3,5 +3,4 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import absolute_import from .denoise import nlmeans_pipeline diff --git a/nipype/workflows/dmri/dtitk/__init__.py b/nipype/workflows/dmri/dtitk/__init__.py index 02dbf25549..454ca31373 100644 --- a/nipype/workflows/dmri/dtitk/__init__.py +++ b/nipype/workflows/dmri/dtitk/__init__.py @@ -3,6 +3,5 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import absolute_import from .tensor_registration import (affine_tensor_pipeline, diffeomorphic_tensor_pipeline) diff --git a/nipype/workflows/dmri/fsl/__init__.py b/nipype/workflows/dmri/fsl/__init__.py index 66be352b84..efb43f1edb 100644 --- a/nipype/workflows/dmri/fsl/__init__.py +++ b/nipype/workflows/dmri/fsl/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .dti import create_bedpostx_pipeline, bedpostx_parallel from .artifacts import (all_fmb_pipeline, all_peb_pipeline, all_fsl_pipeline, diff --git a/nipype/workflows/dmri/fsl/dti.py b/nipype/workflows/dmri/fsl/dti.py index ee7e48dd5a..ef4bf6ae6e 100644 --- a/nipype/workflows/dmri/fsl/dti.py +++ b/nipype/workflows/dmri/fsl/dti.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # coding: utf-8 -from __future__ import absolute_import from ....pipeline import engine as pe from ....interfaces import utility as niu diff --git a/nipype/workflows/dmri/fsl/tests/test_dti.py b/nipype/workflows/dmri/fsl/tests/test_dti.py index 23cd8f37d8..2fcd59d0de 100644 --- a/nipype/workflows/dmri/fsl/tests/test_dti.py +++ b/nipype/workflows/dmri/fsl/tests/test_dti.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function, absolute_import import os import pytest diff --git a/nipype/workflows/dmri/mrtrix/__init__.py b/nipype/workflows/dmri/mrtrix/__init__.py index 6851021111..c8b70ac0bc 100644 --- a/nipype/workflows/dmri/mrtrix/__init__.py +++ b/nipype/workflows/dmri/mrtrix/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import from .diffusion import create_mrtrix_dti_pipeline from .connectivity_mapping import create_connectivity_pipeline from .group_connectivity import (create_group_connectivity_pipeline) diff --git a/nipype/workflows/fmri/fsl/preprocess.py b/nipype/workflows/fmri/fsl/preprocess.py index ac235bdba1..dbda71e861 100644 --- a/nipype/workflows/fmri/fsl/preprocess.py +++ b/nipype/workflows/fmri/fsl/preprocess.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import division import os from ....interfaces import fsl as fsl # fsl From 06ecbe442accc4f51e355233d9a23d38996ef508 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 25 Jul 2018 15:28:29 -0400 Subject: [PATCH 0005/1665] RF: Remove two-line __future__ imports --- nipype/external/portalocker.py | 2 -- nipype/interfaces/afni/model.py | 2 -- nipype/interfaces/afni/svm.py | 2 -- nipype/interfaces/ants/utils.py | 2 -- nipype/interfaces/ants/visualization.py | 2 -- nipype/interfaces/base/tests/test_resource_monitor.py | 2 -- nipype/interfaces/brainsuite/brainsuite.py | 2 -- nipype/interfaces/bru2nii.py | 2 -- nipype/interfaces/c3.py | 2 -- nipype/interfaces/camino/calib.py | 2 -- nipype/interfaces/camino/connectivity.py | 2 -- nipype/interfaces/camino/convert.py | 2 -- nipype/interfaces/camino/dti.py | 2 -- nipype/interfaces/camino/odf.py | 2 -- nipype/interfaces/camino/utils.py | 2 -- nipype/interfaces/camino2trackvis/convert.py | 2 -- nipype/interfaces/cmtk/convert.py | 2 -- nipype/interfaces/cmtk/nbs.py | 2 -- nipype/interfaces/dcmstack.py | 2 -- nipype/interfaces/diffusion_toolkit/postproc.py | 2 -- nipype/interfaces/dipy/anisotropic_power.py | 2 -- nipype/interfaces/dipy/base.py | 2 -- nipype/interfaces/dipy/preprocess.py | 2 -- nipype/interfaces/dipy/setup.py | 2 -- nipype/interfaces/dipy/tensors.py | 2 -- nipype/interfaces/dipy/tracks.py | 2 -- nipype/interfaces/dynamic_slicer.py | 2 -- nipype/interfaces/elastix/base.py | 2 -- nipype/interfaces/freesurfer/longitudinal.py | 2 -- nipype/interfaces/freesurfer/model.py | 2 -- nipype/interfaces/freesurfer/registration.py | 2 -- nipype/interfaces/fsl/aroma.py | 2 -- nipype/interfaces/fsl/base.py | 2 -- nipype/interfaces/fsl/fix.py | 2 -- nipype/interfaces/fsl/maths.py | 2 -- nipype/interfaces/meshfix.py | 2 -- nipype/interfaces/minc/testdata.py | 2 -- nipype/interfaces/mipav/__init__.py | 2 -- nipype/interfaces/mixins/reporting.py | 2 -- nipype/interfaces/mrtrix/convert.py | 2 -- nipype/interfaces/mrtrix/preprocess.py | 2 -- nipype/interfaces/mrtrix/tensors.py | 2 -- nipype/interfaces/mrtrix/tracking.py | 2 -- nipype/interfaces/mrtrix3/base.py | 2 -- nipype/interfaces/mrtrix3/connectivity.py | 2 -- nipype/interfaces/mrtrix3/preprocess.py | 2 -- nipype/interfaces/mrtrix3/reconst.py | 2 -- nipype/interfaces/mrtrix3/tracking.py | 2 -- nipype/interfaces/mrtrix3/utils.py | 2 -- nipype/interfaces/niftyseg/base.py | 2 -- nipype/interfaces/nilearn.py | 2 -- nipype/interfaces/nipy/utils.py | 2 -- nipype/interfaces/petpvc.py | 2 -- nipype/interfaces/slicer/generate_classes.py | 1 - nipype/interfaces/spm/utils.py | 2 -- nipype/interfaces/vista/vista.py | 2 -- nipype/interfaces/vtkbase.py | 2 -- nipype/interfaces/workbench/base.py | 2 -- nipype/interfaces/workbench/metric.py | 2 -- nipype/pipeline/__init__.py | 2 -- nipype/pipeline/plugins/__init__.py | 2 -- nipype/pipeline/plugins/condor.py | 2 -- nipype/pipeline/plugins/debug.py | 2 -- nipype/pipeline/plugins/legacymultiproc.py | 2 -- nipype/pipeline/plugins/linear.py | 2 -- nipype/pipeline/plugins/lsf.py | 2 -- nipype/pipeline/plugins/multiproc.py | 2 -- nipype/pipeline/plugins/semaphore_singleton.py | 2 -- nipype/pipeline/plugins/somaflow.py | 2 -- nipype/scripts/utils.py | 2 -- nipype/sphinxext/__init__.py | 1 - nipype/utils/tmpdirs.py | 2 -- nipype/workflows/data/__init__.py | 2 -- nipype/workflows/dmri/__init__.py | 2 -- nipype/workflows/dmri/fsl/artifacts.py | 2 -- nipype/workflows/fmri/__init__.py | 2 -- nipype/workflows/graph/__init__.py | 2 -- nipype/workflows/rsfmri/__init__.py | 2 -- nipype/workflows/smri/__init__.py | 2 -- nipype/workflows/smri/freesurfer/autorecon1.py | 2 -- nipype/workflows/smri/freesurfer/autorecon2.py | 2 -- nipype/workflows/smri/freesurfer/autorecon3.py | 2 -- nipype/workflows/smri/freesurfer/ba_maps.py | 2 -- nipype/workflows/smri/freesurfer/recon.py | 2 -- 84 files changed, 166 deletions(-) diff --git a/nipype/external/portalocker.py b/nipype/external/portalocker.py index 1da24d894c..0b171f9825 100644 --- a/nipype/external/portalocker.py +++ b/nipype/external/portalocker.py @@ -49,8 +49,6 @@ Version: $Id: portalocker.py 5474 2008-05-16 20:53:50Z lowell $ ''' -from __future__ import (print_function, division, unicode_literals, - absolute_import) from builtins import open __all__ = [ diff --git a/nipype/interfaces/afni/model.py b/nipype/interfaces/afni/model.py index 2cccdfe869..615db086f7 100644 --- a/nipype/interfaces/afni/model.py +++ b/nipype/interfaces/afni/model.py @@ -7,8 +7,6 @@ -------- See the docstrings of the individual classes for examples. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os diff --git a/nipype/interfaces/afni/svm.py b/nipype/interfaces/afni/svm.py index d465c1caaa..d7cef574a7 100644 --- a/nipype/interfaces/afni/svm.py +++ b/nipype/interfaces/afni/svm.py @@ -3,8 +3,6 @@ # vi: set ft = python sts = 4 ts = 4 sw = 4 et: """Afni svm interfaces """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) from ..base import TraitedSpec, traits, File from .base import AFNICommand, AFNICommandInputSpec, AFNICommandOutputSpec diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index 5d284b89c0..0725f45edc 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- """ANTS Apply Transforms interface """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os diff --git a/nipype/interfaces/ants/visualization.py b/nipype/interfaces/ants/visualization.py index 21186931ce..b5ab661889 100644 --- a/nipype/interfaces/ants/visualization.py +++ b/nipype/interfaces/ants/visualization.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- """The ants visualisation module provides basic functions based on ITK. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os diff --git a/nipype/interfaces/base/tests/test_resource_monitor.py b/nipype/interfaces/base/tests/test_resource_monitor.py index f82a82661d..555e163c5c 100644 --- a/nipype/interfaces/base/tests/test_resource_monitor.py +++ b/nipype/interfaces/base/tests/test_resource_monitor.py @@ -6,8 +6,6 @@ Module to unit test the resource_monitor in nipype """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import pytest diff --git a/nipype/interfaces/brainsuite/brainsuite.py b/nipype/interfaces/brainsuite/brainsuite.py index aa75d2bf81..919e9aba1a 100644 --- a/nipype/interfaces/brainsuite/brainsuite.py +++ b/nipype/interfaces/brainsuite/brainsuite.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import re as regex diff --git a/nipype/interfaces/bru2nii.py b/nipype/interfaces/bru2nii.py index c1c1484d38..eada2350a0 100644 --- a/nipype/interfaces/bru2nii.py +++ b/nipype/interfaces/bru2nii.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- """The bru2nii module provides basic functions for dicom conversion """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os from .base import (CommandLine, CommandLineInputSpec, traits, TraitedSpec, diff --git a/nipype/interfaces/c3.py b/nipype/interfaces/c3.py index 115804cc3f..db81fce55f 100644 --- a/nipype/interfaces/c3.py +++ b/nipype/interfaces/c3.py @@ -2,8 +2,6 @@ """The ants module provides basic functions for interfacing with ants functions. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os from glob import glob diff --git a/nipype/interfaces/camino/calib.py b/nipype/interfaces/camino/calib.py index 1921f62651..24eb993bf6 100644 --- a/nipype/interfaces/camino/calib.py +++ b/nipype/interfaces/camino/calib.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os diff --git a/nipype/interfaces/camino/connectivity.py b/nipype/interfaces/camino/connectivity.py index 97e400e0f5..78172db9cc 100644 --- a/nipype/interfaces/camino/connectivity.py +++ b/nipype/interfaces/camino/connectivity.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os from ...utils.filemanip import split_filename diff --git a/nipype/interfaces/camino/convert.py b/nipype/interfaces/camino/convert.py index ee2ae2eb82..b187e89b0c 100644 --- a/nipype/interfaces/camino/convert.py +++ b/nipype/interfaces/camino/convert.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import glob diff --git a/nipype/interfaces/camino/dti.py b/nipype/interfaces/camino/dti.py index b32b9dc528..13a243434d 100644 --- a/nipype/interfaces/camino/dti.py +++ b/nipype/interfaces/camino/dti.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os diff --git a/nipype/interfaces/camino/odf.py b/nipype/interfaces/camino/odf.py index 6fea6fdcfd..163c41fd87 100644 --- a/nipype/interfaces/camino/odf.py +++ b/nipype/interfaces/camino/odf.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os diff --git a/nipype/interfaces/camino/utils.py b/nipype/interfaces/camino/utils.py index 57fcd58d9a..6cfba21653 100644 --- a/nipype/interfaces/camino/utils.py +++ b/nipype/interfaces/camino/utils.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os from ..base import (traits, TraitedSpec, File, CommandLine, diff --git a/nipype/interfaces/camino2trackvis/convert.py b/nipype/interfaces/camino2trackvis/convert.py index 573ddffe2d..3f5664b975 100644 --- a/nipype/interfaces/camino2trackvis/convert.py +++ b/nipype/interfaces/camino2trackvis/convert.py @@ -2,8 +2,6 @@ """ Provides interfaces to various commands provided by Camino-Trackvis """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os diff --git a/nipype/interfaces/cmtk/convert.py b/nipype/interfaces/cmtk/convert.py index 93802d5eb8..170ca44366 100644 --- a/nipype/interfaces/cmtk/convert.py +++ b/nipype/interfaces/cmtk/convert.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import os.path as op diff --git a/nipype/interfaces/cmtk/nbs.py b/nipype/interfaces/cmtk/nbs.py index f21f2b33ea..19425d2977 100644 --- a/nipype/interfaces/cmtk/nbs.py +++ b/nipype/interfaces/cmtk/nbs.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os.path as op diff --git a/nipype/interfaces/dcmstack.py b/nipype/interfaces/dcmstack.py index 626cabe6cf..711d84920f 100644 --- a/nipype/interfaces/dcmstack.py +++ b/nipype/interfaces/dcmstack.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- """Provides interfaces to various commands provided by dcmstack """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os from os import path as op diff --git a/nipype/interfaces/diffusion_toolkit/postproc.py b/nipype/interfaces/diffusion_toolkit/postproc.py index 20aaeea927..19cafe8d7d 100644 --- a/nipype/interfaces/diffusion_toolkit/postproc.py +++ b/nipype/interfaces/diffusion_toolkit/postproc.py @@ -3,8 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Provides interfaces to various commands provided by diffusion toolkit """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os from ..base import (TraitedSpec, File, traits, CommandLine, InputMultiPath, diff --git a/nipype/interfaces/dipy/anisotropic_power.py b/nipype/interfaces/dipy/anisotropic_power.py index e28ae2bd19..21208326f4 100644 --- a/nipype/interfaces/dipy/anisotropic_power.py +++ b/nipype/interfaces/dipy/anisotropic_power.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import nibabel as nb diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py index 7a9221e3d1..30d10bd3a3 100644 --- a/nipype/interfaces/dipy/base.py +++ b/nipype/interfaces/dipy/base.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- """ Base interfaces for dipy """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os.path as op import numpy as np diff --git a/nipype/interfaces/dipy/preprocess.py b/nipype/interfaces/dipy/preprocess.py index cc589f6579..1245ece316 100644 --- a/nipype/interfaces/dipy/preprocess.py +++ b/nipype/interfaces/dipy/preprocess.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os.path as op import nibabel as nb diff --git a/nipype/interfaces/dipy/setup.py b/nipype/interfaces/dipy/setup.py index e6c8f000b2..408d7af731 100644 --- a/nipype/interfaces/dipy/setup.py +++ b/nipype/interfaces/dipy/setup.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) def configuration(parent_package='', top_path=None): diff --git a/nipype/interfaces/dipy/tensors.py b/nipype/interfaces/dipy/tensors.py index c79e063683..49bab47bfd 100644 --- a/nipype/interfaces/dipy/tensors.py +++ b/nipype/interfaces/dipy/tensors.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import nibabel as nb diff --git a/nipype/interfaces/dipy/tracks.py b/nipype/interfaces/dipy/tracks.py index b7d965ece1..7600ee3dba 100644 --- a/nipype/interfaces/dipy/tracks.py +++ b/nipype/interfaces/dipy/tracks.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os.path as op import numpy as np diff --git a/nipype/interfaces/dynamic_slicer.py b/nipype/interfaces/dynamic_slicer.py index 5d3a3c1899..db238127c9 100644 --- a/nipype/interfaces/dynamic_slicer.py +++ b/nipype/interfaces/dynamic_slicer.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import warnings diff --git a/nipype/interfaces/elastix/base.py b/nipype/interfaces/elastix/base.py index b47e1fec17..748f69f44d 100644 --- a/nipype/interfaces/elastix/base.py +++ b/nipype/interfaces/elastix/base.py @@ -9,8 +9,6 @@ """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) from ... import logging from ..base import CommandLineInputSpec, Directory, traits diff --git a/nipype/interfaces/freesurfer/longitudinal.py b/nipype/interfaces/freesurfer/longitudinal.py index 1d982a7a44..eed32173b4 100644 --- a/nipype/interfaces/freesurfer/longitudinal.py +++ b/nipype/interfaces/freesurfer/longitudinal.py @@ -3,8 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Provides interfaces to various longitudinal commands provided by freesurfer """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os diff --git a/nipype/interfaces/freesurfer/model.py b/nipype/interfaces/freesurfer/model.py index 58d168e2d7..f8ce0291e9 100644 --- a/nipype/interfaces/freesurfer/model.py +++ b/nipype/interfaces/freesurfer/model.py @@ -4,8 +4,6 @@ """The freesurfer module provides basic functions for interfacing with freesurfer tools. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os diff --git a/nipype/interfaces/freesurfer/registration.py b/nipype/interfaces/freesurfer/registration.py index 99ee7d0179..79d12bbab9 100644 --- a/nipype/interfaces/freesurfer/registration.py +++ b/nipype/interfaces/freesurfer/registration.py @@ -3,8 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Provides interfaces to various longitudinal commands provided by freesurfer """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import os.path diff --git a/nipype/interfaces/fsl/aroma.py b/nipype/interfaces/fsl/aroma.py index a86763e3e5..ed0b85df90 100644 --- a/nipype/interfaces/fsl/aroma.py +++ b/nipype/interfaces/fsl/aroma.py @@ -5,8 +5,6 @@ `ICA-AROMA.py`_ command line tool. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) from ..base import (TraitedSpec, CommandLineInputSpec, CommandLine, File, Directory, traits, isdefined) import os diff --git a/nipype/interfaces/fsl/base.py b/nipype/interfaces/fsl/base.py index db74e900b8..bd62751c1a 100644 --- a/nipype/interfaces/fsl/base.py +++ b/nipype/interfaces/fsl/base.py @@ -25,8 +25,6 @@ See the docstrings of the individual classes for examples. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) from glob import glob import os diff --git a/nipype/interfaces/fsl/fix.py b/nipype/interfaces/fsl/fix.py index ebe986eb79..b681e62b2f 100644 --- a/nipype/interfaces/fsl/fix.py +++ b/nipype/interfaces/fsl/fix.py @@ -54,8 +54,6 @@ outgraph = fix_pipeline.run() """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) from ..base import (TraitedSpec, CommandLineInputSpec, CommandLine, InputMultiPath, OutputMultiPath, BaseInterface, diff --git a/nipype/interfaces/fsl/maths.py b/nipype/interfaces/fsl/maths.py index 3862cea8c7..afa9328205 100644 --- a/nipype/interfaces/fsl/maths.py +++ b/nipype/interfaces/fsl/maths.py @@ -5,8 +5,6 @@ The maths module provides higher-level interfaces to some of the operations that can be performed with the fslmaths command-line program. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import numpy as np diff --git a/nipype/interfaces/meshfix.py b/nipype/interfaces/meshfix.py index 4b9db519a9..880ae2da06 100644 --- a/nipype/interfaces/meshfix.py +++ b/nipype/interfaces/meshfix.py @@ -3,8 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """ Fixes meshes: """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os.path as op from ..utils.filemanip import split_filename diff --git a/nipype/interfaces/minc/testdata.py b/nipype/interfaces/minc/testdata.py index 0a8d6bfd88..f027efa35e 100644 --- a/nipype/interfaces/minc/testdata.py +++ b/nipype/interfaces/minc/testdata.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os from ...testing import example_data diff --git a/nipype/interfaces/mipav/__init__.py b/nipype/interfaces/mipav/__init__.py index 8a9e08fd9b..2d5fce8652 100644 --- a/nipype/interfaces/mipav/__init__.py +++ b/nipype/interfaces/mipav/__init__.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) from .developer import ( JistLaminarVolumetricLayering, JistBrainMgdmSegmentation, JistLaminarProfileGeometry, JistLaminarProfileCalculator, MedicAlgorithmN3, diff --git a/nipype/interfaces/mixins/reporting.py b/nipype/interfaces/mixins/reporting.py index 3f4d1b1317..fecdb337c5 100644 --- a/nipype/interfaces/mixins/reporting.py +++ b/nipype/interfaces/mixins/reporting.py @@ -2,8 +2,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ class mixin and utilities for enabling reports for nipype interfaces """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os from abc import abstractmethod diff --git a/nipype/interfaces/mrtrix/convert.py b/nipype/interfaces/mrtrix/convert.py index a3a280c895..62b53fc19b 100644 --- a/nipype/interfaces/mrtrix/convert.py +++ b/nipype/interfaces/mrtrix/convert.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) from io import open import os.path as op diff --git a/nipype/interfaces/mrtrix/preprocess.py b/nipype/interfaces/mrtrix/preprocess.py index 5fc67177a1..adc2ef27d7 100644 --- a/nipype/interfaces/mrtrix/preprocess.py +++ b/nipype/interfaces/mrtrix/preprocess.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os.path as op diff --git a/nipype/interfaces/mrtrix/tensors.py b/nipype/interfaces/mrtrix/tensors.py index 04c901f1f9..6751b6196d 100644 --- a/nipype/interfaces/mrtrix/tensors.py +++ b/nipype/interfaces/mrtrix/tensors.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os.path as op import numpy as np diff --git a/nipype/interfaces/mrtrix/tracking.py b/nipype/interfaces/mrtrix/tracking.py index 7a7ed995f0..f115841482 100644 --- a/nipype/interfaces/mrtrix/tracking.py +++ b/nipype/interfaces/mrtrix/tracking.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import os.path as op diff --git a/nipype/interfaces/mrtrix3/base.py b/nipype/interfaces/mrtrix3/base.py index 55ca385153..a4ad818681 100644 --- a/nipype/interfaces/mrtrix3/base.py +++ b/nipype/interfaces/mrtrix3/base.py @@ -1,8 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) from ... import logging from ..base import (CommandLineInputSpec, CommandLine, traits, File, isdefined) diff --git a/nipype/interfaces/mrtrix3/connectivity.py b/nipype/interfaces/mrtrix3/connectivity.py index 0e246c796b..63277d7a38 100644 --- a/nipype/interfaces/mrtrix3/connectivity.py +++ b/nipype/interfaces/mrtrix3/connectivity.py @@ -1,8 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import os.path as op diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index fc3559c918..5fd28a7494 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -1,8 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os.path as op diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index 6583c58996..f7d09d740c 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -1,8 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os.path as op diff --git a/nipype/interfaces/mrtrix3/tracking.py b/nipype/interfaces/mrtrix3/tracking.py index e8bf15a33f..b5cd33612f 100644 --- a/nipype/interfaces/mrtrix3/tracking.py +++ b/nipype/interfaces/mrtrix3/tracking.py @@ -1,8 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os.path as op diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index a667c716f4..e82733c674 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -1,8 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os.path as op diff --git a/nipype/interfaces/niftyseg/base.py b/nipype/interfaces/niftyseg/base.py index d68bbcc73b..cc831aa9b5 100644 --- a/nipype/interfaces/niftyseg/base.py +++ b/nipype/interfaces/niftyseg/base.py @@ -15,8 +15,6 @@ -------- See the docstrings of the individual classes for examples. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) from ..niftyfit.base import NiftyFitCommand diff --git a/nipype/interfaces/nilearn.py b/nipype/interfaces/nilearn.py index 699b7dc8d3..06bfdf899f 100644 --- a/nipype/interfaces/nilearn.py +++ b/nipype/interfaces/nilearn.py @@ -4,8 +4,6 @@ ''' Algorithms to compute statistics on :abbr:`fMRI (functional MRI)` ''' -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import numpy as np diff --git a/nipype/interfaces/nipy/utils.py b/nipype/interfaces/nipy/utils.py index 406960fc6d..22856c01d6 100644 --- a/nipype/interfaces/nipy/utils.py +++ b/nipype/interfaces/nipy/utils.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import warnings import nibabel as nb diff --git a/nipype/interfaces/petpvc.py b/nipype/interfaces/petpvc.py index 598dd898c5..1cf4f3a42e 100644 --- a/nipype/interfaces/petpvc.py +++ b/nipype/interfaces/petpvc.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os diff --git a/nipype/interfaces/slicer/generate_classes.py b/nipype/interfaces/slicer/generate_classes.py index 850b561c47..08d0030317 100644 --- a/nipype/interfaces/slicer/generate_classes.py +++ b/nipype/interfaces/slicer/generate_classes.py @@ -37,7 +37,6 @@ def add_class_to_package(class_codes, class_names, module_name, package_dir): If you spot a bug, please report it on the mailing list and/or change the generator.\"\"\"\n\n""" ) imports = """from __future__ import (print_function, division, unicode_literals, - absolute_import) from ..base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath) import os\n\n\n""" diff --git a/nipype/interfaces/spm/utils.py b/nipype/interfaces/spm/utils.py index 275f0781a9..919b5853ab 100644 --- a/nipype/interfaces/spm/utils.py +++ b/nipype/interfaces/spm/utils.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import numpy as np diff --git a/nipype/interfaces/vista/vista.py b/nipype/interfaces/vista/vista.py index 5000036d02..ada6f430f1 100644 --- a/nipype/interfaces/vista/vista.py +++ b/nipype/interfaces/vista/vista.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) from ..base import CommandLineInputSpec, CommandLine, TraitedSpec, File diff --git a/nipype/interfaces/vtkbase.py b/nipype/interfaces/vtkbase.py index 1ec66ea614..67edef41c3 100644 --- a/nipype/interfaces/vtkbase.py +++ b/nipype/interfaces/vtkbase.py @@ -6,8 +6,6 @@ Code using tvtk should import it through this module """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os from .. import logging diff --git a/nipype/interfaces/workbench/base.py b/nipype/interfaces/workbench/base.py index 4adc9dc69b..2c2620dbb6 100644 --- a/nipype/interfaces/workbench/base.py +++ b/nipype/interfaces/workbench/base.py @@ -10,8 +10,6 @@ Human Connectome Project. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import re diff --git a/nipype/interfaces/workbench/metric.py b/nipype/interfaces/workbench/metric.py index e5bbb60739..b3653576a4 100644 --- a/nipype/interfaces/workbench/metric.py +++ b/nipype/interfaces/workbench/metric.py @@ -2,8 +2,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """This module provides interfaces for workbench surface commands""" -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os from ..base import (TraitedSpec, File, traits, CommandLineInputSpec) diff --git a/nipype/pipeline/__init__.py b/nipype/pipeline/__init__.py index badfda5ba0..b410fc8ea0 100644 --- a/nipype/pipeline/__init__.py +++ b/nipype/pipeline/__init__.py @@ -5,7 +5,5 @@ Package contains modules for generating pipelines using interfaces """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) __docformat__ = 'restructuredtext' from .engine import Node, MapNode, JoinNode, Workflow diff --git a/nipype/pipeline/plugins/__init__.py b/nipype/pipeline/plugins/__init__.py index e3c797a10a..83f4869a41 100644 --- a/nipype/pipeline/plugins/__init__.py +++ b/nipype/pipeline/plugins/__init__.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) from .debug import DebugPlugin from .linear import LinearPlugin diff --git a/nipype/pipeline/plugins/condor.py b/nipype/pipeline/plugins/condor.py index 9f5ca632e5..bdf598c5f6 100644 --- a/nipype/pipeline/plugins/condor.py +++ b/nipype/pipeline/plugins/condor.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- """Parallel workflow execution via Condor """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os from time import sleep diff --git a/nipype/pipeline/plugins/debug.py b/nipype/pipeline/plugins/debug.py index 9921bb9cf4..0b9b009c2f 100644 --- a/nipype/pipeline/plugins/debug.py +++ b/nipype/pipeline/plugins/debug.py @@ -3,8 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Debug plugin """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import networkx as nx from .base import PluginBase, logger diff --git a/nipype/pipeline/plugins/legacymultiproc.py b/nipype/pipeline/plugins/legacymultiproc.py index d93e6e77d1..c10f9dd45b 100644 --- a/nipype/pipeline/plugins/legacymultiproc.py +++ b/nipype/pipeline/plugins/legacymultiproc.py @@ -6,8 +6,6 @@ Support for child processes running as non-daemons based on http://stackoverflow.com/a/8963618/1183453 """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) # Import packages import os diff --git a/nipype/pipeline/plugins/linear.py b/nipype/pipeline/plugins/linear.py index 2180d614ad..a3cf7556be 100644 --- a/nipype/pipeline/plugins/linear.py +++ b/nipype/pipeline/plugins/linear.py @@ -3,8 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Local serial workflow execution """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os diff --git a/nipype/pipeline/plugins/lsf.py b/nipype/pipeline/plugins/lsf.py index bdaabc31e6..6e996c5aeb 100644 --- a/nipype/pipeline/plugins/lsf.py +++ b/nipype/pipeline/plugins/lsf.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- """Parallel workflow execution via LSF """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import re diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index c89a6af8e8..7f57b6625e 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -6,8 +6,6 @@ Support for child processes running as non-daemons based on http://stackoverflow.com/a/8963618/1183453 """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) # Import packages import os diff --git a/nipype/pipeline/plugins/semaphore_singleton.py b/nipype/pipeline/plugins/semaphore_singleton.py index 96dfe657bd..fe25cf2c61 100644 --- a/nipype/pipeline/plugins/semaphore_singleton.py +++ b/nipype/pipeline/plugins/semaphore_singleton.py @@ -1,5 +1,3 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import threading semaphore = threading.Semaphore(0) diff --git a/nipype/pipeline/plugins/somaflow.py b/nipype/pipeline/plugins/somaflow.py index 174b277c6f..e31a901169 100644 --- a/nipype/pipeline/plugins/somaflow.py +++ b/nipype/pipeline/plugins/somaflow.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- """Parallel workflow execution via PBS/Torque """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os import sys diff --git a/nipype/scripts/utils.py b/nipype/scripts/utils.py index 432b6308ce..bcdf195c2f 100644 --- a/nipype/scripts/utils.py +++ b/nipype/scripts/utils.py @@ -2,8 +2,6 @@ """ Utilities for the CLI functions. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) diff --git a/nipype/sphinxext/__init__.py b/nipype/sphinxext/__init__.py index 5f8ba4da2b..5ad1fa8c52 100644 --- a/nipype/sphinxext/__init__.py +++ b/nipype/sphinxext/__init__.py @@ -2,5 +2,4 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, absolute_import, unicode_literals) diff --git a/nipype/utils/tmpdirs.py b/nipype/utils/tmpdirs.py index a1295c0a50..4752514e8f 100644 --- a/nipype/utils/tmpdirs.py +++ b/nipype/utils/tmpdirs.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- diff --git a/nipype/workflows/data/__init__.py b/nipype/workflows/data/__init__.py index 85fcd2dee0..bc6b1d6e9d 100644 --- a/nipype/workflows/data/__init__.py +++ b/nipype/workflows/data/__init__.py @@ -2,8 +2,6 @@ # coding: utf-8 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os.path as op diff --git a/nipype/workflows/dmri/__init__.py b/nipype/workflows/dmri/__init__.py index 628b6c2bc1..ab59dad57e 100644 --- a/nipype/workflows/dmri/__init__.py +++ b/nipype/workflows/dmri/__init__.py @@ -1,4 +1,2 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) from . import camino, mrtrix, fsl, dipy diff --git a/nipype/workflows/dmri/fsl/artifacts.py b/nipype/workflows/dmri/fsl/artifacts.py index 3b29c5a07c..d1faebafdf 100644 --- a/nipype/workflows/dmri/fsl/artifacts.py +++ b/nipype/workflows/dmri/fsl/artifacts.py @@ -2,8 +2,6 @@ # coding: utf-8 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) from ....interfaces.io import JSONFileGrabber from ....interfaces import utility as niu diff --git a/nipype/workflows/fmri/__init__.py b/nipype/workflows/fmri/__init__.py index 5523a0c412..a71a96ad06 100644 --- a/nipype/workflows/fmri/__init__.py +++ b/nipype/workflows/fmri/__init__.py @@ -1,4 +1,2 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) from . import fsl, spm diff --git a/nipype/workflows/graph/__init__.py b/nipype/workflows/graph/__init__.py index ead6180dc8..40a96afc6f 100644 --- a/nipype/workflows/graph/__init__.py +++ b/nipype/workflows/graph/__init__.py @@ -1,3 +1 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) diff --git a/nipype/workflows/rsfmri/__init__.py b/nipype/workflows/rsfmri/__init__.py index bd58039343..6346a6afa2 100644 --- a/nipype/workflows/rsfmri/__init__.py +++ b/nipype/workflows/rsfmri/__init__.py @@ -1,5 +1,3 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) from . import fsl diff --git a/nipype/workflows/smri/__init__.py b/nipype/workflows/smri/__init__.py index b6d7bf5731..2e8b34eab2 100644 --- a/nipype/workflows/smri/__init__.py +++ b/nipype/workflows/smri/__init__.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) from . import ants from . import freesurfer diff --git a/nipype/workflows/smri/freesurfer/autorecon1.py b/nipype/workflows/smri/freesurfer/autorecon1.py index 0973e210a7..31783eec5a 100644 --- a/nipype/workflows/smri/freesurfer/autorecon1.py +++ b/nipype/workflows/smri/freesurfer/autorecon1.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) from ....utils import NUMPY_MMAP from ....pipeline import engine as pe from ....interfaces.utility import Function, IdentityInterface diff --git a/nipype/workflows/smri/freesurfer/autorecon2.py b/nipype/workflows/smri/freesurfer/autorecon2.py index a11587412d..7c2de2b40d 100644 --- a/nipype/workflows/smri/freesurfer/autorecon2.py +++ b/nipype/workflows/smri/freesurfer/autorecon2.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) from ....interfaces.utility import Function, IdentityInterface, Merge from ....pipeline import engine as pe from ....interfaces.freesurfer import * diff --git a/nipype/workflows/smri/freesurfer/autorecon3.py b/nipype/workflows/smri/freesurfer/autorecon3.py index 477198d2da..47359afa20 100644 --- a/nipype/workflows/smri/freesurfer/autorecon3.py +++ b/nipype/workflows/smri/freesurfer/autorecon3.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) from ....interfaces.utility import IdentityInterface, Merge, Function from ....pipeline import engine as pe from ....interfaces.freesurfer import * diff --git a/nipype/workflows/smri/freesurfer/ba_maps.py b/nipype/workflows/smri/freesurfer/ba_maps.py index 8a4ae6caf1..480d6a9ece 100644 --- a/nipype/workflows/smri/freesurfer/ba_maps.py +++ b/nipype/workflows/smri/freesurfer/ba_maps.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) import os from ....interfaces.utility import Function, IdentityInterface from ....pipeline import engine as pe # pypeline engine diff --git a/nipype/workflows/smri/freesurfer/recon.py b/nipype/workflows/smri/freesurfer/recon.py index f0ad4ad6fd..5c6c2579fc 100644 --- a/nipype/workflows/smri/freesurfer/recon.py +++ b/nipype/workflows/smri/freesurfer/recon.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) from ....pipeline import engine as pe from ....interfaces import freesurfer as fs from ....interfaces import utility as niu From b90f7d586c162060b915566a380dcd75e4d1534b Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 25 Jul 2018 15:29:47 -0400 Subject: [PATCH 0006/1665] MAINT: Update version, supported Python versions --- nipype/info.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nipype/info.py b/nipype/info.py index f1d72f18e9..ad17ffe8d4 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -8,7 +8,7 @@ # full release. '.dev' as a version_extra string means this is a development # version # Remove -dev for release -__version__ = '1.1.1-dev' +__version__ = '2.0.0-dev' def get_nipype_gitversion(): @@ -53,7 +53,6 @@ def get_nipype_gitversion(): 'License :: OSI Approved :: Apache Software License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX :: Linux', - 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Scientific/Engineering' From b5ef915ff5fd8191df12529c27aae7dc8e0b5e2d Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 25 Jul 2018 15:31:20 -0400 Subject: [PATCH 0007/1665] FIX: Dangling import --- nipype/sphinxext/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/sphinxext/__init__.py b/nipype/sphinxext/__init__.py index 5ad1fa8c52..7f877fb023 100644 --- a/nipype/sphinxext/__init__.py +++ b/nipype/sphinxext/__init__.py @@ -2,4 +2,3 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: - unicode_literals) From e968b8de650b0cd724863d22b7536ab607d8dc0b Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 25 Jul 2018 15:36:51 -0400 Subject: [PATCH 0008/1665] RF: Drop imports in checkspecs --- tools/checkspecs.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tools/checkspecs.py b/tools/checkspecs.py index e282728c8e..6b7e4e2813 100644 --- a/tools/checkspecs.py +++ b/tools/checkspecs.py @@ -2,9 +2,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Attempt to check each interface in nipype """ -from __future__ import print_function, unicode_literals -from builtins import object, str, bytes, open - # Stdlib imports import os import re @@ -223,7 +220,6 @@ def test_specs(self, uri): if not os.path.exists(nonautotest): cmd = [ '# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT', - 'from __future__ import unicode_literals', 'from ..%s import %s' % (uri.split('.')[-1], c), '' ] cmd.append('\ndef test_%s_inputs():' % c) From 7a0e638a852dd57a48e9fb259124ca4221557c87 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 25 Jul 2018 15:37:05 -0400 Subject: [PATCH 0009/1665] MAINT: make specs --- nipype/algorithms/tests/test_auto_ACompCor.py | 1 - nipype/algorithms/tests/test_auto_ActivationCount.py | 1 - nipype/algorithms/tests/test_auto_AddCSVColumn.py | 1 - nipype/algorithms/tests/test_auto_AddCSVRow.py | 1 - nipype/algorithms/tests/test_auto_AddNoise.py | 1 - nipype/algorithms/tests/test_auto_ArtifactDetect.py | 1 - nipype/algorithms/tests/test_auto_CalculateMedian.py | 1 - nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py | 1 - nipype/algorithms/tests/test_auto_ComputeDVARS.py | 1 - nipype/algorithms/tests/test_auto_ComputeMeshWarp.py | 1 - nipype/algorithms/tests/test_auto_CreateNifti.py | 1 - nipype/algorithms/tests/test_auto_Distance.py | 1 - nipype/algorithms/tests/test_auto_FramewiseDisplacement.py | 1 - nipype/algorithms/tests/test_auto_FuzzyOverlap.py | 1 - nipype/algorithms/tests/test_auto_Gunzip.py | 1 - nipype/algorithms/tests/test_auto_ICC.py | 1 - nipype/algorithms/tests/test_auto_Matlab2CSV.py | 1 - nipype/algorithms/tests/test_auto_MergeCSVFiles.py | 1 - nipype/algorithms/tests/test_auto_MergeROIs.py | 1 - nipype/algorithms/tests/test_auto_MeshWarpMaths.py | 1 - nipype/algorithms/tests/test_auto_ModifyAffine.py | 1 - nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py | 1 - nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py | 1 - nipype/algorithms/tests/test_auto_P2PDistance.py | 1 - nipype/algorithms/tests/test_auto_PickAtlas.py | 1 - nipype/algorithms/tests/test_auto_Similarity.py | 1 - nipype/algorithms/tests/test_auto_SimpleThreshold.py | 1 - nipype/algorithms/tests/test_auto_SpecifyModel.py | 1 - nipype/algorithms/tests/test_auto_SpecifySPMModel.py | 1 - nipype/algorithms/tests/test_auto_SpecifySparseModel.py | 1 - nipype/algorithms/tests/test_auto_SplitROIs.py | 1 - nipype/algorithms/tests/test_auto_StimulusCorrelation.py | 1 - nipype/algorithms/tests/test_auto_TCompCor.py | 1 - nipype/algorithms/tests/test_auto_TVTKBaseInterface.py | 1 - nipype/algorithms/tests/test_auto_WarpPoints.py | 1 - nipype/interfaces/afni/tests/test_auto_ABoverlap.py | 1 - nipype/interfaces/afni/tests/test_auto_AFNICommand.py | 1 - nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py | 1 - nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py | 1 - nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py | 1 - nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py | 1 - nipype/interfaces/afni/tests/test_auto_Allineate.py | 1 - nipype/interfaces/afni/tests/test_auto_AutoTLRC.py | 1 - nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py | 1 - nipype/interfaces/afni/tests/test_auto_Autobox.py | 1 - nipype/interfaces/afni/tests/test_auto_Automask.py | 1 - nipype/interfaces/afni/tests/test_auto_Axialize.py | 1 - nipype/interfaces/afni/tests/test_auto_Bandpass.py | 1 - nipype/interfaces/afni/tests/test_auto_BlurInMask.py | 1 - nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py | 1 - nipype/interfaces/afni/tests/test_auto_BrickStat.py | 1 - nipype/interfaces/afni/tests/test_auto_Bucket.py | 1 - nipype/interfaces/afni/tests/test_auto_Calc.py | 1 - nipype/interfaces/afni/tests/test_auto_Cat.py | 1 - nipype/interfaces/afni/tests/test_auto_CatMatvec.py | 1 - nipype/interfaces/afni/tests/test_auto_CenterMass.py | 1 - nipype/interfaces/afni/tests/test_auto_ClipLevel.py | 1 - nipype/interfaces/afni/tests/test_auto_ConvertDset.py | 1 - nipype/interfaces/afni/tests/test_auto_Copy.py | 1 - nipype/interfaces/afni/tests/test_auto_Deconvolve.py | 1 - nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py | 1 - nipype/interfaces/afni/tests/test_auto_Despike.py | 1 - nipype/interfaces/afni/tests/test_auto_Detrend.py | 1 - nipype/interfaces/afni/tests/test_auto_Dot.py | 1 - nipype/interfaces/afni/tests/test_auto_ECM.py | 1 - nipype/interfaces/afni/tests/test_auto_Edge3.py | 1 - nipype/interfaces/afni/tests/test_auto_Eval.py | 1 - nipype/interfaces/afni/tests/test_auto_FWHMx.py | 1 - nipype/interfaces/afni/tests/test_auto_Fim.py | 1 - nipype/interfaces/afni/tests/test_auto_Fourier.py | 1 - nipype/interfaces/afni/tests/test_auto_GCOR.py | 1 - nipype/interfaces/afni/tests/test_auto_Hist.py | 1 - nipype/interfaces/afni/tests/test_auto_LFCD.py | 1 - nipype/interfaces/afni/tests/test_auto_LocalBistat.py | 1 - nipype/interfaces/afni/tests/test_auto_MaskTool.py | 1 - nipype/interfaces/afni/tests/test_auto_Maskave.py | 1 - nipype/interfaces/afni/tests/test_auto_Means.py | 1 - nipype/interfaces/afni/tests/test_auto_Merge.py | 1 - nipype/interfaces/afni/tests/test_auto_Notes.py | 1 - nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py | 1 - nipype/interfaces/afni/tests/test_auto_NwarpApply.py | 1 - nipype/interfaces/afni/tests/test_auto_NwarpCat.py | 1 - nipype/interfaces/afni/tests/test_auto_OneDToolPy.py | 1 - nipype/interfaces/afni/tests/test_auto_OutlierCount.py | 1 - nipype/interfaces/afni/tests/test_auto_QualityIndex.py | 1 - nipype/interfaces/afni/tests/test_auto_Qwarp.py | 1 - nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py | 1 - nipype/interfaces/afni/tests/test_auto_ROIStats.py | 1 - nipype/interfaces/afni/tests/test_auto_Refit.py | 1 - nipype/interfaces/afni/tests/test_auto_Remlfit.py | 1 - nipype/interfaces/afni/tests/test_auto_Resample.py | 1 - nipype/interfaces/afni/tests/test_auto_Retroicor.py | 1 - nipype/interfaces/afni/tests/test_auto_SVMTest.py | 1 - nipype/interfaces/afni/tests/test_auto_SVMTrain.py | 1 - nipype/interfaces/afni/tests/test_auto_Seg.py | 1 - nipype/interfaces/afni/tests/test_auto_SkullStrip.py | 1 - nipype/interfaces/afni/tests/test_auto_Synthesize.py | 1 - nipype/interfaces/afni/tests/test_auto_TCat.py | 1 - nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py | 1 - nipype/interfaces/afni/tests/test_auto_TCorr1D.py | 1 - nipype/interfaces/afni/tests/test_auto_TCorrMap.py | 1 - nipype/interfaces/afni/tests/test_auto_TCorrelate.py | 1 - nipype/interfaces/afni/tests/test_auto_TNorm.py | 1 - nipype/interfaces/afni/tests/test_auto_TProject.py | 1 - nipype/interfaces/afni/tests/test_auto_TShift.py | 1 - nipype/interfaces/afni/tests/test_auto_TStat.py | 1 - nipype/interfaces/afni/tests/test_auto_To3D.py | 1 - nipype/interfaces/afni/tests/test_auto_Undump.py | 1 - nipype/interfaces/afni/tests/test_auto_Unifize.py | 1 - nipype/interfaces/afni/tests/test_auto_Volreg.py | 1 - nipype/interfaces/afni/tests/test_auto_Warp.py | 1 - nipype/interfaces/afni/tests/test_auto_ZCutUp.py | 1 - nipype/interfaces/afni/tests/test_auto_Zcat.py | 1 - nipype/interfaces/afni/tests/test_auto_Zeropad.py | 1 - nipype/interfaces/ants/tests/test_auto_ANTS.py | 1 - nipype/interfaces/ants/tests/test_auto_ANTSCommand.py | 1 - nipype/interfaces/ants/tests/test_auto_AffineInitializer.py | 1 - nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py | 1 - nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py | 1 - .../interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py | 1 - nipype/interfaces/ants/tests/test_auto_Atropos.py | 1 - nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py | 1 - nipype/interfaces/ants/tests/test_auto_AverageImages.py | 1 - nipype/interfaces/ants/tests/test_auto_BrainExtraction.py | 1 - nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py | 1 - .../interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py | 1 - nipype/interfaces/ants/tests/test_auto_CorticalThickness.py | 1 - .../ants/tests/test_auto_CreateJacobianDeterminantImage.py | 1 - nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py | 1 - nipype/interfaces/ants/tests/test_auto_DenoiseImage.py | 1 - nipype/interfaces/ants/tests/test_auto_JointFusion.py | 1 - nipype/interfaces/ants/tests/test_auto_KellyKapowski.py | 1 - nipype/interfaces/ants/tests/test_auto_LabelGeometry.py | 1 - nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py | 1 - nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py | 1 - nipype/interfaces/ants/tests/test_auto_MultiplyImages.py | 1 - nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py | 1 - nipype/interfaces/ants/tests/test_auto_Registration.py | 1 - nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py | 1 - .../interfaces/ants/tests/test_auto_WarpImageMultiTransform.py | 1 - .../ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py | 1 - nipype/interfaces/base/tests/test_auto_BaseInterface.py | 1 - nipype/interfaces/base/tests/test_auto_CommandLine.py | 1 - nipype/interfaces/base/tests/test_auto_LibraryBaseInterface.py | 1 - nipype/interfaces/base/tests/test_auto_MpiCommandLine.py | 1 - nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py | 1 - nipype/interfaces/base/tests/test_auto_SimpleInterface.py | 1 - nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py | 1 - nipype/interfaces/brainsuite/tests/test_auto_BDP.py | 1 - nipype/interfaces/brainsuite/tests/test_auto_Bfc.py | 1 - nipype/interfaces/brainsuite/tests/test_auto_Bse.py | 1 - nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py | 1 - nipype/interfaces/brainsuite/tests/test_auto_Cortex.py | 1 - nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py | 1 - nipype/interfaces/brainsuite/tests/test_auto_Dfs.py | 1 - nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py | 1 - nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py | 1 - nipype/interfaces/brainsuite/tests/test_auto_Pvc.py | 1 - nipype/interfaces/brainsuite/tests/test_auto_SVReg.py | 1 - nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py | 1 - nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py | 1 - nipype/interfaces/brainsuite/tests/test_auto_Tca.py | 1 - nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py | 1 - nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py | 1 - nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py | 1 - .../camino/tests/test_auto_ComputeFractionalAnisotropy.py | 1 - .../interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py | 1 - nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py | 1 - nipype/interfaces/camino/tests/test_auto_Conmat.py | 1 - nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py | 1 - nipype/interfaces/camino/tests/test_auto_DTIFit.py | 1 - nipype/interfaces/camino/tests/test_auto_DTLUTGen.py | 1 - nipype/interfaces/camino/tests/test_auto_DTMetric.py | 1 - nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py | 1 - nipype/interfaces/camino/tests/test_auto_Image2Voxel.py | 1 - nipype/interfaces/camino/tests/test_auto_ImageStats.py | 1 - nipype/interfaces/camino/tests/test_auto_LinRecon.py | 1 - nipype/interfaces/camino/tests/test_auto_MESD.py | 1 - nipype/interfaces/camino/tests/test_auto_ModelFit.py | 1 - nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py | 1 - nipype/interfaces/camino/tests/test_auto_PicoPDFs.py | 1 - nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py | 1 - nipype/interfaces/camino/tests/test_auto_QBallMX.py | 1 - nipype/interfaces/camino/tests/test_auto_SFLUTGen.py | 1 - nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py | 1 - nipype/interfaces/camino/tests/test_auto_SFPeaks.py | 1 - nipype/interfaces/camino/tests/test_auto_Shredder.py | 1 - nipype/interfaces/camino/tests/test_auto_Track.py | 1 - nipype/interfaces/camino/tests/test_auto_TrackBallStick.py | 1 - nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py | 1 - nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py | 1 - nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py | 1 - nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py | 1 - nipype/interfaces/camino/tests/test_auto_TrackDT.py | 1 - nipype/interfaces/camino/tests/test_auto_TrackPICo.py | 1 - nipype/interfaces/camino/tests/test_auto_TractShredder.py | 1 - nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py | 1 - .../camino2trackvis/tests/test_auto_Camino2Trackvis.py | 1 - .../camino2trackvis/tests/test_auto_Trackvis2Camino.py | 1 - nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py | 1 - nipype/interfaces/cmtk/tests/test_auto_CFFBaseInterface.py | 1 - nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py | 1 - nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py | 1 - nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py | 1 - nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py | 1 - nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py | 1 - nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py | 1 - nipype/interfaces/cmtk/tests/test_auto_Parcellate.py | 1 - nipype/interfaces/cmtk/tests/test_auto_ROIGen.py | 1 - nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py | 1 - .../interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py | 1 - nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py | 1 - nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py | 1 - .../interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py | 1 - .../interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py | 1 - .../interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py | 1 - nipype/interfaces/dipy/tests/test_auto_APMQball.py | 1 - nipype/interfaces/dipy/tests/test_auto_CSD.py | 1 - nipype/interfaces/dipy/tests/test_auto_DTI.py | 1 - nipype/interfaces/dipy/tests/test_auto_Denoise.py | 1 - nipype/interfaces/dipy/tests/test_auto_DipyBaseInterface.py | 1 - nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py | 1 - nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py | 1 - nipype/interfaces/dipy/tests/test_auto_RESTORE.py | 1 - nipype/interfaces/dipy/tests/test_auto_Resample.py | 1 - nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py | 1 - nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py | 1 - nipype/interfaces/dipy/tests/test_auto_TensorMode.py | 1 - nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py | 1 - nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py | 1 - nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py | 1 - nipype/interfaces/dtitk/tests/test_auto_Affine.py | 1 - nipype/interfaces/dtitk/tests/test_auto_AffineTask.py | 1 - nipype/interfaces/dtitk/tests/test_auto_BinThresh.py | 1 - nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py | 1 - nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py | 1 - nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py | 1 - nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py | 1 - nipype/interfaces/dtitk/tests/test_auto_Diffeo.py | 1 - nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py | 1 - nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py | 1 - nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py | 1 - nipype/interfaces/dtitk/tests/test_auto_Rigid.py | 1 - nipype/interfaces/dtitk/tests/test_auto_RigidTask.py | 1 - nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py | 1 - nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py | 1 - nipype/interfaces/dtitk/tests/test_auto_SVResample.py | 1 - nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py | 1 - nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py | 1 - nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py | 1 - nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py | 1 - nipype/interfaces/dtitk/tests/test_auto_TVResample.py | 1 - nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py | 1 - nipype/interfaces/dtitk/tests/test_auto_TVtool.py | 1 - nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py | 1 - nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py | 1 - nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py | 1 - nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py | 1 - .../interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py | 1 - nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py | 1 - nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py | 1 - nipype/interfaces/elastix/tests/test_auto_EditTransform.py | 1 - nipype/interfaces/elastix/tests/test_auto_PointsWarp.py | 1 - nipype/interfaces/elastix/tests/test_auto_Registration.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py | 1 - .../interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Binarize.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_CALabel.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_CARegister.py | 1 - .../freesurfer/tests/test_auto_CheckTalairachAlignment.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Contrast.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Curvature.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py | 1 - .../freesurfer/tests/test_auto_ExtractMainComponent.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py | 1 - .../interfaces/freesurfer/tests/test_auto_FuseSegmentations.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py | 1 - .../interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py | 1 - .../interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py | 1 - .../interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Normalize.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Paint.py | 1 - .../interfaces/freesurfer/tests/test_auto_ParcellationStats.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Register.py | 1 - .../freesurfer/tests/test_auto_RegisterAVItoTalairach.py | 1 - .../freesurfer/tests/test_auto_RelabelHypointensities.py | 1 - .../interfaces/freesurfer/tests/test_auto_RemoveIntersection.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Resample.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_SegStats.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Smooth.py | 1 - .../interfaces/freesurfer/tests/test_auto_SmoothTessellation.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Sphere.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py | 1 - .../freesurfer/tests/test_auto_Surface2VolTransform.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py | 1 - nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py | 1 - .../interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py | 1 - nipype/interfaces/fsl/tests/test_auto_AR1Image.py | 1 - nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py | 1 - nipype/interfaces/fsl/tests/test_auto_ApplyMask.py | 1 - nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py | 1 - nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py | 1 - nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py | 1 - nipype/interfaces/fsl/tests/test_auto_AvScale.py | 1 - nipype/interfaces/fsl/tests/test_auto_B0Calc.py | 1 - nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py | 1 - nipype/interfaces/fsl/tests/test_auto_BET.py | 1 - nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py | 1 - nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py | 1 - nipype/interfaces/fsl/tests/test_auto_Classifier.py | 1 - nipype/interfaces/fsl/tests/test_auto_Cleaner.py | 1 - nipype/interfaces/fsl/tests/test_auto_Cluster.py | 1 - nipype/interfaces/fsl/tests/test_auto_Complex.py | 1 - nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py | 1 - nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py | 1 - nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py | 1 - nipype/interfaces/fsl/tests/test_auto_CopyGeom.py | 1 - nipype/interfaces/fsl/tests/test_auto_DTIFit.py | 1 - nipype/interfaces/fsl/tests/test_auto_DilateImage.py | 1 - nipype/interfaces/fsl/tests/test_auto_DistanceMap.py | 1 - nipype/interfaces/fsl/tests/test_auto_DualRegression.py | 1 - nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py | 1 - nipype/interfaces/fsl/tests/test_auto_Eddy.py | 1 - nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py | 1 - nipype/interfaces/fsl/tests/test_auto_EpiReg.py | 1 - nipype/interfaces/fsl/tests/test_auto_ErodeImage.py | 1 - nipype/interfaces/fsl/tests/test_auto_ExtractROI.py | 1 - nipype/interfaces/fsl/tests/test_auto_FAST.py | 1 - nipype/interfaces/fsl/tests/test_auto_FEAT.py | 1 - nipype/interfaces/fsl/tests/test_auto_FEATModel.py | 1 - nipype/interfaces/fsl/tests/test_auto_FEATRegister.py | 1 - nipype/interfaces/fsl/tests/test_auto_FIRST.py | 1 - nipype/interfaces/fsl/tests/test_auto_FLAMEO.py | 1 - nipype/interfaces/fsl/tests/test_auto_FLIRT.py | 1 - nipype/interfaces/fsl/tests/test_auto_FNIRT.py | 1 - nipype/interfaces/fsl/tests/test_auto_FSLCommand.py | 1 - nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py | 1 - nipype/interfaces/fsl/tests/test_auto_FUGUE.py | 1 - nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py | 1 - nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py | 1 - nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py | 1 - nipype/interfaces/fsl/tests/test_auto_GLM.py | 1 - nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py | 1 - nipype/interfaces/fsl/tests/test_auto_ImageMaths.py | 1 - nipype/interfaces/fsl/tests/test_auto_ImageMeants.py | 1 - nipype/interfaces/fsl/tests/test_auto_ImageStats.py | 1 - nipype/interfaces/fsl/tests/test_auto_InvWarp.py | 1 - nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py | 1 - nipype/interfaces/fsl/tests/test_auto_L2Model.py | 1 - nipype/interfaces/fsl/tests/test_auto_Level1Design.py | 1 - nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py | 1 - nipype/interfaces/fsl/tests/test_auto_MELODIC.py | 1 - nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py | 1 - nipype/interfaces/fsl/tests/test_auto_MathsCommand.py | 1 - nipype/interfaces/fsl/tests/test_auto_MaxImage.py | 1 - nipype/interfaces/fsl/tests/test_auto_MaxnImage.py | 1 - nipype/interfaces/fsl/tests/test_auto_MeanImage.py | 1 - nipype/interfaces/fsl/tests/test_auto_MedianImage.py | 1 - nipype/interfaces/fsl/tests/test_auto_Merge.py | 1 - nipype/interfaces/fsl/tests/test_auto_MinImage.py | 1 - nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py | 1 - nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py | 1 - nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py | 1 - nipype/interfaces/fsl/tests/test_auto_Overlay.py | 1 - nipype/interfaces/fsl/tests/test_auto_PRELUDE.py | 1 - nipype/interfaces/fsl/tests/test_auto_PercentileImage.py | 1 - nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py | 1 - nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py | 1 - nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py | 1 - nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py | 1 - nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py | 1 - nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py | 1 - nipype/interfaces/fsl/tests/test_auto_ProjThresh.py | 1 - nipype/interfaces/fsl/tests/test_auto_Randomise.py | 1 - nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py | 1 - nipype/interfaces/fsl/tests/test_auto_RobustFOV.py | 1 - nipype/interfaces/fsl/tests/test_auto_SMM.py | 1 - nipype/interfaces/fsl/tests/test_auto_SUSAN.py | 1 - nipype/interfaces/fsl/tests/test_auto_SigLoss.py | 1 - nipype/interfaces/fsl/tests/test_auto_Slice.py | 1 - nipype/interfaces/fsl/tests/test_auto_SliceTimer.py | 1 - nipype/interfaces/fsl/tests/test_auto_Slicer.py | 1 - nipype/interfaces/fsl/tests/test_auto_Smooth.py | 1 - nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py | 1 - nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py | 1 - nipype/interfaces/fsl/tests/test_auto_Split.py | 1 - nipype/interfaces/fsl/tests/test_auto_StdImage.py | 1 - nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py | 1 - nipype/interfaces/fsl/tests/test_auto_TOPUP.py | 1 - nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py | 1 - nipype/interfaces/fsl/tests/test_auto_Threshold.py | 1 - nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py | 1 - nipype/interfaces/fsl/tests/test_auto_Training.py | 1 - nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py | 1 - nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py | 1 - nipype/interfaces/fsl/tests/test_auto_VecReg.py | 1 - nipype/interfaces/fsl/tests/test_auto_WarpPoints.py | 1 - nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py | 1 - nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py | 1 - nipype/interfaces/fsl/tests/test_auto_WarpUtils.py | 1 - nipype/interfaces/fsl/tests/test_auto_XFibres5.py | 1 - nipype/interfaces/minc/tests/test_auto_Average.py | 1 - nipype/interfaces/minc/tests/test_auto_BBox.py | 1 - nipype/interfaces/minc/tests/test_auto_Beast.py | 1 - nipype/interfaces/minc/tests/test_auto_BestLinReg.py | 1 - nipype/interfaces/minc/tests/test_auto_BigAverage.py | 1 - nipype/interfaces/minc/tests/test_auto_Blob.py | 1 - nipype/interfaces/minc/tests/test_auto_Blur.py | 1 - nipype/interfaces/minc/tests/test_auto_Calc.py | 1 - nipype/interfaces/minc/tests/test_auto_Convert.py | 1 - nipype/interfaces/minc/tests/test_auto_Copy.py | 1 - nipype/interfaces/minc/tests/test_auto_Dump.py | 1 - nipype/interfaces/minc/tests/test_auto_Extract.py | 1 - nipype/interfaces/minc/tests/test_auto_Gennlxfm.py | 1 - nipype/interfaces/minc/tests/test_auto_Math.py | 1 - nipype/interfaces/minc/tests/test_auto_NlpFit.py | 1 - nipype/interfaces/minc/tests/test_auto_Norm.py | 1 - nipype/interfaces/minc/tests/test_auto_Pik.py | 1 - nipype/interfaces/minc/tests/test_auto_Resample.py | 1 - nipype/interfaces/minc/tests/test_auto_Reshape.py | 1 - nipype/interfaces/minc/tests/test_auto_ToEcat.py | 1 - nipype/interfaces/minc/tests/test_auto_ToRaw.py | 1 - nipype/interfaces/minc/tests/test_auto_VolSymm.py | 1 - nipype/interfaces/minc/tests/test_auto_Volcentre.py | 1 - nipype/interfaces/minc/tests/test_auto_Voliso.py | 1 - nipype/interfaces/minc/tests/test_auto_Volpad.py | 1 - nipype/interfaces/minc/tests/test_auto_XfmAvg.py | 1 - nipype/interfaces/minc/tests/test_auto_XfmConcat.py | 1 - nipype/interfaces/minc/tests/test_auto_XfmInvert.py | 1 - .../mipav/tests/test_auto_JistBrainMgdmSegmentation.py | 1 - .../mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py | 1 - .../mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py | 1 - .../mipav/tests/test_auto_JistBrainPartialVolumeFilter.py | 1 - .../mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py | 1 - .../mipav/tests/test_auto_JistIntensityMp2rageMasking.py | 1 - .../mipav/tests/test_auto_JistLaminarProfileCalculator.py | 1 - .../mipav/tests/test_auto_JistLaminarProfileGeometry.py | 1 - .../mipav/tests/test_auto_JistLaminarProfileSampling.py | 1 - .../interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py | 1 - .../mipav/tests/test_auto_JistLaminarVolumetricLayering.py | 1 - .../mipav/tests/test_auto_MedicAlgorithmImageCalculator.py | 1 - .../mipav/tests/test_auto_MedicAlgorithmLesionToads.py | 1 - .../mipav/tests/test_auto_MedicAlgorithmMipavReorient.py | 1 - nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py | 1 - .../mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py | 1 - .../mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py | 1 - nipype/interfaces/mipav/tests/test_auto_RandomVol.py | 1 - .../interfaces/mixins/tests/test_auto_ReportCapableInterface.py | 1 - nipype/interfaces/mne/tests/test_auto_WatershedBEM.py | 1 - .../mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py | 1 - .../mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py | 1 - .../mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_Erode.py | 1 - .../interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py | 1 - .../interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py | 1 - ...t_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py | 1 - .../tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py | 1 - .../mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py | 1 - .../mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_Threshold.py | 1 - nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py | 1 - nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py | 1 - nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py | 1 - nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py | 1 - nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py | 1 - nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py | 1 - nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py | 1 - nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py | 1 - nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py | 1 - nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py | 1 - nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py | 1 - nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py | 1 - nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py | 1 - nipype/interfaces/niftyreg/tests/test_auto_RegResample.py | 1 - nipype/interfaces/niftyreg/tests/test_auto_RegTools.py | 1 - nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py | 1 - nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py | 1 - nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py | 1 - nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py | 1 - nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py | 1 - nipype/interfaces/niftyseg/tests/test_auto_EM.py | 1 - nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py | 1 - nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py | 1 - nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py | 1 - nipype/interfaces/niftyseg/tests/test_auto_Merge.py | 1 - nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py | 1 - nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py | 1 - nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py | 1 - nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py | 1 - nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py | 1 - nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py | 1 - nipype/interfaces/nipy/tests/test_auto_ComputeMask.py | 1 - nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py | 1 - nipype/interfaces/nipy/tests/test_auto_FitGLM.py | 1 - nipype/interfaces/nipy/tests/test_auto_NipyBaseInterface.py | 1 - nipype/interfaces/nipy/tests/test_auto_Similarity.py | 1 - nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py | 1 - nipype/interfaces/nipy/tests/test_auto_Trim.py | 1 - nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py | 1 - nipype/interfaces/nitime/tests/test_auto_NitimeBaseInterface.py | 1 - .../brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py | 1 - .../semtools/brains/tests/test_auto_BRAINSTalairach.py | 1 - .../semtools/brains/tests/test_auto_BRAINSTalairachMask.py | 1 - .../semtools/brains/tests/test_auto_GenerateEdgeMapImage.py | 1 - .../semtools/brains/tests/test_auto_GeneratePurePlugMask.py | 1 - .../semtools/brains/tests/test_auto_HistogramMatchingFilter.py | 1 - .../semtools/brains/tests/test_auto_SimilarityIndex.py | 1 - .../interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py | 1 - .../semtools/diffusion/tests/test_auto_compareTractInclusion.py | 1 - .../interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py | 1 - nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py | 1 - .../interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py | 1 - .../semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py | 1 - .../semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py | 1 - .../semtools/diffusion/tests/test_auto_gtractAverageBvalues.py | 1 - .../semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py | 1 - .../semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py | 1 - .../semtools/diffusion/tests/test_auto_gtractConcatDwi.py | 1 - .../diffusion/tests/test_auto_gtractCopyImageOrientation.py | 1 - .../semtools/diffusion/tests/test_auto_gtractCoregBvalues.py | 1 - .../semtools/diffusion/tests/test_auto_gtractCostFastMarching.py | 1 - .../semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py | 1 - .../diffusion/tests/test_auto_gtractFastMarchingTracking.py | 1 - .../semtools/diffusion/tests/test_auto_gtractFiberTracking.py | 1 - .../semtools/diffusion/tests/test_auto_gtractImageConformity.py | 1 - .../diffusion/tests/test_auto_gtractInvertBSplineTransform.py | 1 - .../diffusion/tests/test_auto_gtractInvertDisplacementField.py | 1 - .../diffusion/tests/test_auto_gtractInvertRigidTransform.py | 1 - .../diffusion/tests/test_auto_gtractResampleAnisotropy.py | 1 - .../semtools/diffusion/tests/test_auto_gtractResampleB0.py | 1 - .../diffusion/tests/test_auto_gtractResampleCodeImage.py | 1 - .../diffusion/tests/test_auto_gtractResampleDWIInPlace.py | 1 - .../semtools/diffusion/tests/test_auto_gtractResampleFibers.py | 1 - .../semtools/diffusion/tests/test_auto_gtractTensor.py | 1 - .../tests/test_auto_gtractTransformToDisplacementField.py | 1 - .../semtools/diffusion/tests/test_auto_maxcurvature.py | 1 - .../diffusion/tractography/tests/test_auto_UKFTractography.py | 1 - .../diffusion/tractography/tests/test_auto_fiberprocess.py | 1 - .../diffusion/tractography/tests/test_auto_fiberstats.py | 1 - .../diffusion/tractography/tests/test_auto_fibertrack.py | 1 - .../interfaces/semtools/filtering/tests/test_auto_CannyEdge.py | 1 - .../tests/test_auto_CannySegmentationLevelSetImageFilter.py | 1 - .../interfaces/semtools/filtering/tests/test_auto_DilateImage.py | 1 - .../interfaces/semtools/filtering/tests/test_auto_DilateMask.py | 1 - .../semtools/filtering/tests/test_auto_DistanceMaps.py | 1 - .../filtering/tests/test_auto_DumpBinaryTrainingVectors.py | 1 - .../interfaces/semtools/filtering/tests/test_auto_ErodeImage.py | 1 - .../semtools/filtering/tests/test_auto_FlippedDifference.py | 1 - .../filtering/tests/test_auto_GenerateBrainClippedImage.py | 1 - .../filtering/tests/test_auto_GenerateSummedGradientImage.py | 1 - .../semtools/filtering/tests/test_auto_GenerateTestImage.py | 1 - .../tests/test_auto_GradientAnisotropicDiffusionImageFilter.py | 1 - .../semtools/filtering/tests/test_auto_HammerAttributeCreator.py | 1 - .../semtools/filtering/tests/test_auto_NeighborhoodMean.py | 1 - .../semtools/filtering/tests/test_auto_NeighborhoodMedian.py | 1 - .../semtools/filtering/tests/test_auto_STAPLEAnalysis.py | 1 - .../filtering/tests/test_auto_TextureFromNoiseImageFilter.py | 1 - .../semtools/filtering/tests/test_auto_TextureMeasureFilter.py | 1 - .../semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py | 1 - .../semtools/legacy/tests/test_auto_scalartransform.py | 1 - .../semtools/registration/tests/test_auto_BRAINSDemonWarp.py | 1 - .../semtools/registration/tests/test_auto_BRAINSFit.py | 1 - .../semtools/registration/tests/test_auto_BRAINSResample.py | 1 - .../semtools/registration/tests/test_auto_BRAINSResize.py | 1 - .../registration/tests/test_auto_BRAINSTransformFromFiducials.py | 1 - .../semtools/registration/tests/test_auto_VBRAINSDemonWarp.py | 1 - .../semtools/segmentation/tests/test_auto_BRAINSABC.py | 1 - .../segmentation/tests/test_auto_BRAINSConstellationDetector.py | 1 - .../tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py | 1 - .../semtools/segmentation/tests/test_auto_BRAINSCut.py | 1 - .../semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py | 1 - .../semtools/segmentation/tests/test_auto_BRAINSROIAuto.py | 1 - .../tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py | 1 - nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py | 1 - nipype/interfaces/semtools/tests/test_auto_DWICompare.py | 1 - nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py | 1 - .../tests/test_auto_GenerateCsfClippedFromClassifiedImage.py | 1 - .../semtools/utilities/tests/test_auto_BRAINSAlignMSP.py | 1 - .../semtools/utilities/tests/test_auto_BRAINSClipInferior.py | 1 - .../utilities/tests/test_auto_BRAINSConstellationModeler.py | 1 - .../semtools/utilities/tests/test_auto_BRAINSEyeDetector.py | 1 - .../utilities/tests/test_auto_BRAINSInitializedControlPoints.py | 1 - .../utilities/tests/test_auto_BRAINSLandmarkInitializer.py | 1 - .../utilities/tests/test_auto_BRAINSLinearModelerEPCA.py | 1 - .../semtools/utilities/tests/test_auto_BRAINSLmkTransform.py | 1 - .../interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py | 1 - .../semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py | 1 - .../semtools/utilities/tests/test_auto_BRAINSTransformConvert.py | 1 - .../utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py | 1 - .../semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py | 1 - .../semtools/utilities/tests/test_auto_FindCenterOfBrain.py | 1 - .../tests/test_auto_GenerateLabelMapFromProbabilityMap.py | 1 - .../semtools/utilities/tests/test_auto_ImageRegionPlotter.py | 1 - .../semtools/utilities/tests/test_auto_JointHistogram.py | 1 - .../semtools/utilities/tests/test_auto_ShuffleVectorsModule.py | 1 - .../semtools/utilities/tests/test_auto_fcsv_to_hdf5.py | 1 - .../semtools/utilities/tests/test_auto_insertMidACPCpoint.py | 1 - .../utilities/tests/test_auto_landmarksConstellationAligner.py | 1 - .../utilities/tests/test_auto_landmarksConstellationWeights.py | 1 - nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py | 1 - nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py | 1 - .../diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py | 1 - .../slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py | 1 - .../slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py | 1 - .../tests/test_auto_DiffusionTensorScalarMeasurements.py | 1 - .../diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py | 1 - .../slicer/diffusion/tests/test_auto_ResampleDTIVolume.py | 1 - .../diffusion/tests/test_auto_TractographyLabelMapSeeding.py | 1 - .../slicer/filtering/tests/test_auto_AddScalarVolumes.py | 1 - .../slicer/filtering/tests/test_auto_CastScalarVolume.py | 1 - .../slicer/filtering/tests/test_auto_CheckerBoardFilter.py | 1 - .../filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py | 1 - .../slicer/filtering/tests/test_auto_ExtractSkeleton.py | 1 - .../slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py | 1 - .../filtering/tests/test_auto_GradientAnisotropicDiffusion.py | 1 - .../filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py | 1 - .../filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py | 1 - .../slicer/filtering/tests/test_auto_HistogramMatching.py | 1 - .../slicer/filtering/tests/test_auto_ImageLabelCombine.py | 1 - .../slicer/filtering/tests/test_auto_MaskScalarVolume.py | 1 - .../slicer/filtering/tests/test_auto_MedianImageFilter.py | 1 - .../slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py | 1 - .../slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py | 1 - .../filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py | 1 - .../slicer/filtering/tests/test_auto_SubtractScalarVolumes.py | 1 - .../slicer/filtering/tests/test_auto_ThresholdScalarVolume.py | 1 - .../tests/test_auto_VotingBinaryHoleFillingImageFilter.py | 1 - .../diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py | 1 - .../slicer/legacy/tests/test_auto_AffineRegistration.py | 1 - .../legacy/tests/test_auto_BSplineDeformableRegistration.py | 1 - .../slicer/legacy/tests/test_auto_BSplineToDeformationField.py | 1 - .../slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py | 1 - .../slicer/legacy/tests/test_auto_LinearRegistration.py | 1 - .../legacy/tests/test_auto_MultiResolutionAffineRegistration.py | 1 - .../slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py | 1 - .../slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py | 1 - .../slicer/legacy/tests/test_auto_ResampleScalarVolume.py | 1 - .../slicer/legacy/tests/test_auto_RigidRegistration.py | 1 - .../quantification/tests/test_auto_IntensityDifferenceMetric.py | 1 - .../tests/test_auto_PETStandardUptakeValueComputation.py | 1 - .../slicer/registration/tests/test_auto_ACPCTransform.py | 1 - .../slicer/registration/tests/test_auto_BRAINSDemonWarp.py | 1 - .../interfaces/slicer/registration/tests/test_auto_BRAINSFit.py | 1 - .../slicer/registration/tests/test_auto_BRAINSResample.py | 1 - .../slicer/registration/tests/test_auto_FiducialRegistration.py | 1 - .../slicer/registration/tests/test_auto_VBRAINSDemonWarp.py | 1 - .../slicer/segmentation/tests/test_auto_BRAINSROIAuto.py | 1 - .../slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py | 1 - .../segmentation/tests/test_auto_RobustStatisticsSegmenter.py | 1 - .../tests/test_auto_SimpleRegionGrowingSegmentation.py | 1 - nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py | 1 - .../slicer/tests/test_auto_EMSegmentTransformToNewFormat.py | 1 - nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py | 1 - nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py | 1 - nipype/interfaces/slicer/tests/test_auto_MergeModels.py | 1 - nipype/interfaces/slicer/tests/test_auto_ModelMaker.py | 1 - nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py | 1 - nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py | 1 - nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py | 1 - nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py | 1 - nipype/interfaces/spm/tests/test_auto_Analyze2nii.py | 1 - nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py | 1 - nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py | 1 - nipype/interfaces/spm/tests/test_auto_ApplyTransform.py | 1 - nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py | 1 - nipype/interfaces/spm/tests/test_auto_Coregister.py | 1 - nipype/interfaces/spm/tests/test_auto_CreateWarped.py | 1 - nipype/interfaces/spm/tests/test_auto_DARTEL.py | 1 - nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py | 1 - nipype/interfaces/spm/tests/test_auto_DicomImport.py | 1 - nipype/interfaces/spm/tests/test_auto_EstimateContrast.py | 1 - nipype/interfaces/spm/tests/test_auto_EstimateModel.py | 1 - nipype/interfaces/spm/tests/test_auto_FactorialDesign.py | 1 - nipype/interfaces/spm/tests/test_auto_FieldMap.py | 1 - nipype/interfaces/spm/tests/test_auto_Level1Design.py | 1 - .../interfaces/spm/tests/test_auto_MultipleRegressionDesign.py | 1 - nipype/interfaces/spm/tests/test_auto_NewSegment.py | 1 - nipype/interfaces/spm/tests/test_auto_Normalize.py | 1 - nipype/interfaces/spm/tests/test_auto_Normalize12.py | 1 - nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py | 1 - nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py | 1 - nipype/interfaces/spm/tests/test_auto_Realign.py | 1 - nipype/interfaces/spm/tests/test_auto_Reslice.py | 1 - nipype/interfaces/spm/tests/test_auto_ResliceToReference.py | 1 - nipype/interfaces/spm/tests/test_auto_SPMCommand.py | 1 - nipype/interfaces/spm/tests/test_auto_Segment.py | 1 - nipype/interfaces/spm/tests/test_auto_SliceTiming.py | 1 - nipype/interfaces/spm/tests/test_auto_Smooth.py | 1 - nipype/interfaces/spm/tests/test_auto_Threshold.py | 1 - nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py | 1 - nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py | 1 - nipype/interfaces/spm/tests/test_auto_VBMSegment.py | 1 - nipype/interfaces/tests/test_auto_BIDSDataGrabber.py | 1 - nipype/interfaces/tests/test_auto_Bru2.py | 1 - nipype/interfaces/tests/test_auto_C3d.py | 1 - nipype/interfaces/tests/test_auto_C3dAffineTool.py | 1 - nipype/interfaces/tests/test_auto_CopyMeta.py | 1 - nipype/interfaces/tests/test_auto_DataFinder.py | 1 - nipype/interfaces/tests/test_auto_DataGrabber.py | 1 - nipype/interfaces/tests/test_auto_DataSink.py | 1 - nipype/interfaces/tests/test_auto_Dcm2nii.py | 1 - nipype/interfaces/tests/test_auto_Dcm2niix.py | 1 - nipype/interfaces/tests/test_auto_DcmStack.py | 1 - nipype/interfaces/tests/test_auto_FreeSurferSource.py | 1 - nipype/interfaces/tests/test_auto_GroupAndStack.py | 1 - nipype/interfaces/tests/test_auto_IOBase.py | 1 - nipype/interfaces/tests/test_auto_JSONFileGrabber.py | 1 - nipype/interfaces/tests/test_auto_JSONFileSink.py | 1 - nipype/interfaces/tests/test_auto_LookupMeta.py | 1 - nipype/interfaces/tests/test_auto_MatlabCommand.py | 1 - nipype/interfaces/tests/test_auto_MergeNifti.py | 1 - nipype/interfaces/tests/test_auto_MeshFix.py | 1 - nipype/interfaces/tests/test_auto_MySQLSink.py | 1 - nipype/interfaces/tests/test_auto_NiftiGeneratorBase.py | 1 - nipype/interfaces/tests/test_auto_NilearnBaseInterface.py | 1 - nipype/interfaces/tests/test_auto_PETPVC.py | 1 - nipype/interfaces/tests/test_auto_Quickshear.py | 1 - nipype/interfaces/tests/test_auto_Reorient.py | 1 - nipype/interfaces/tests/test_auto_Rescale.py | 1 - nipype/interfaces/tests/test_auto_S3DataGrabber.py | 1 - nipype/interfaces/tests/test_auto_SQLiteSink.py | 1 - nipype/interfaces/tests/test_auto_SSHDataGrabber.py | 1 - nipype/interfaces/tests/test_auto_SelectFiles.py | 1 - nipype/interfaces/tests/test_auto_SignalExtraction.py | 1 - nipype/interfaces/tests/test_auto_SlicerCommandLine.py | 1 - nipype/interfaces/tests/test_auto_SplitNifti.py | 1 - nipype/interfaces/tests/test_auto_XNATSink.py | 1 - nipype/interfaces/tests/test_auto_XNATSource.py | 1 - nipype/interfaces/utility/tests/test_auto_AssertEqual.py | 1 - nipype/interfaces/utility/tests/test_auto_CSVReader.py | 1 - nipype/interfaces/utility/tests/test_auto_Function.py | 1 - nipype/interfaces/utility/tests/test_auto_IdentityInterface.py | 1 - nipype/interfaces/utility/tests/test_auto_Merge.py | 1 - nipype/interfaces/utility/tests/test_auto_Rename.py | 1 - nipype/interfaces/utility/tests/test_auto_Select.py | 1 - nipype/interfaces/utility/tests/test_auto_Split.py | 1 - nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py | 1 - nipype/interfaces/vista/tests/test_auto_VtoMat.py | 1 - nipype/interfaces/workbench/tests/test_auto_MetricResample.py | 1 - nipype/interfaces/workbench/tests/test_auto_WBCommand.py | 1 - 827 files changed, 827 deletions(-) diff --git a/nipype/algorithms/tests/test_auto_ACompCor.py b/nipype/algorithms/tests/test_auto_ACompCor.py index eadbf3e126..168dd27249 100644 --- a/nipype/algorithms/tests/test_auto_ACompCor.py +++ b/nipype/algorithms/tests/test_auto_ACompCor.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..confounds import ACompCor diff --git a/nipype/algorithms/tests/test_auto_ActivationCount.py b/nipype/algorithms/tests/test_auto_ActivationCount.py index d6e3ff7165..d88bf9b025 100644 --- a/nipype/algorithms/tests/test_auto_ActivationCount.py +++ b/nipype/algorithms/tests/test_auto_ActivationCount.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..stats import ActivationCount diff --git a/nipype/algorithms/tests/test_auto_AddCSVColumn.py b/nipype/algorithms/tests/test_auto_AddCSVColumn.py index feedcf46e8..acab44d3c7 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVColumn.py +++ b/nipype/algorithms/tests/test_auto_AddCSVColumn.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import AddCSVColumn diff --git a/nipype/algorithms/tests/test_auto_AddCSVRow.py b/nipype/algorithms/tests/test_auto_AddCSVRow.py index 4666a147d2..17a56d43e8 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVRow.py +++ b/nipype/algorithms/tests/test_auto_AddCSVRow.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import AddCSVRow diff --git a/nipype/algorithms/tests/test_auto_AddNoise.py b/nipype/algorithms/tests/test_auto_AddNoise.py index 6e0655a93e..6cd2b78e69 100644 --- a/nipype/algorithms/tests/test_auto_AddNoise.py +++ b/nipype/algorithms/tests/test_auto_AddNoise.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import AddNoise diff --git a/nipype/algorithms/tests/test_auto_ArtifactDetect.py b/nipype/algorithms/tests/test_auto_ArtifactDetect.py index 85c57b8823..0811629c6a 100644 --- a/nipype/algorithms/tests/test_auto_ArtifactDetect.py +++ b/nipype/algorithms/tests/test_auto_ArtifactDetect.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..rapidart import ArtifactDetect diff --git a/nipype/algorithms/tests/test_auto_CalculateMedian.py b/nipype/algorithms/tests/test_auto_CalculateMedian.py index 1f9aa6cd4d..1da36b096f 100644 --- a/nipype/algorithms/tests/test_auto_CalculateMedian.py +++ b/nipype/algorithms/tests/test_auto_CalculateMedian.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import CalculateMedian diff --git a/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py b/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py index 3dbbd772c8..59e18068fd 100644 --- a/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py +++ b/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import CalculateNormalizedMoments diff --git a/nipype/algorithms/tests/test_auto_ComputeDVARS.py b/nipype/algorithms/tests/test_auto_ComputeDVARS.py index ca263c77ac..c3bbcafe73 100644 --- a/nipype/algorithms/tests/test_auto_ComputeDVARS.py +++ b/nipype/algorithms/tests/test_auto_ComputeDVARS.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..confounds import ComputeDVARS diff --git a/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py index 0308653786..835fde1bfa 100644 --- a/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py +++ b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..mesh import ComputeMeshWarp diff --git a/nipype/algorithms/tests/test_auto_CreateNifti.py b/nipype/algorithms/tests/test_auto_CreateNifti.py index f8bf8a405f..99b1360500 100644 --- a/nipype/algorithms/tests/test_auto_CreateNifti.py +++ b/nipype/algorithms/tests/test_auto_CreateNifti.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import CreateNifti diff --git a/nipype/algorithms/tests/test_auto_Distance.py b/nipype/algorithms/tests/test_auto_Distance.py index 2c5d098d73..d768235e2e 100644 --- a/nipype/algorithms/tests/test_auto_Distance.py +++ b/nipype/algorithms/tests/test_auto_Distance.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import Distance diff --git a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py index 685dec61e8..65c030df73 100644 --- a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py +++ b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..confounds import FramewiseDisplacement diff --git a/nipype/algorithms/tests/test_auto_FuzzyOverlap.py b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py index e9e28aaa44..bf2bf4fcac 100644 --- a/nipype/algorithms/tests/test_auto_FuzzyOverlap.py +++ b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import FuzzyOverlap diff --git a/nipype/algorithms/tests/test_auto_Gunzip.py b/nipype/algorithms/tests/test_auto_Gunzip.py index f12e1f9b45..8ee8ad9dbb 100644 --- a/nipype/algorithms/tests/test_auto_Gunzip.py +++ b/nipype/algorithms/tests/test_auto_Gunzip.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import Gunzip diff --git a/nipype/algorithms/tests/test_auto_ICC.py b/nipype/algorithms/tests/test_auto_ICC.py index 1a4a2b1517..4b1fb99b32 100644 --- a/nipype/algorithms/tests/test_auto_ICC.py +++ b/nipype/algorithms/tests/test_auto_ICC.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..icc import ICC diff --git a/nipype/algorithms/tests/test_auto_Matlab2CSV.py b/nipype/algorithms/tests/test_auto_Matlab2CSV.py index fcc1648bf9..53f12ceeac 100644 --- a/nipype/algorithms/tests/test_auto_Matlab2CSV.py +++ b/nipype/algorithms/tests/test_auto_Matlab2CSV.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import Matlab2CSV diff --git a/nipype/algorithms/tests/test_auto_MergeCSVFiles.py b/nipype/algorithms/tests/test_auto_MergeCSVFiles.py index fd882e850a..353bee7380 100644 --- a/nipype/algorithms/tests/test_auto_MergeCSVFiles.py +++ b/nipype/algorithms/tests/test_auto_MergeCSVFiles.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import MergeCSVFiles diff --git a/nipype/algorithms/tests/test_auto_MergeROIs.py b/nipype/algorithms/tests/test_auto_MergeROIs.py index 01b2b097a8..bda2f01f7d 100644 --- a/nipype/algorithms/tests/test_auto_MergeROIs.py +++ b/nipype/algorithms/tests/test_auto_MergeROIs.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import MergeROIs diff --git a/nipype/algorithms/tests/test_auto_MeshWarpMaths.py b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py index f89b16017b..603ac40bbc 100644 --- a/nipype/algorithms/tests/test_auto_MeshWarpMaths.py +++ b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..mesh import MeshWarpMaths diff --git a/nipype/algorithms/tests/test_auto_ModifyAffine.py b/nipype/algorithms/tests/test_auto_ModifyAffine.py index a0c4150a98..d524a3cf58 100644 --- a/nipype/algorithms/tests/test_auto_ModifyAffine.py +++ b/nipype/algorithms/tests/test_auto_ModifyAffine.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import ModifyAffine diff --git a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py index b86fe3df03..9c0ab829e8 100644 --- a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py +++ b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..confounds import NonSteadyStateDetector diff --git a/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py b/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py index 4c9a5584d0..27e16e8ce0 100644 --- a/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py +++ b/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import NormalizeProbabilityMapSet diff --git a/nipype/algorithms/tests/test_auto_P2PDistance.py b/nipype/algorithms/tests/test_auto_P2PDistance.py index 9948e3675e..71788dbce2 100644 --- a/nipype/algorithms/tests/test_auto_P2PDistance.py +++ b/nipype/algorithms/tests/test_auto_P2PDistance.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..mesh import P2PDistance diff --git a/nipype/algorithms/tests/test_auto_PickAtlas.py b/nipype/algorithms/tests/test_auto_PickAtlas.py index 3b15c302e5..c379d86ee8 100644 --- a/nipype/algorithms/tests/test_auto_PickAtlas.py +++ b/nipype/algorithms/tests/test_auto_PickAtlas.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import PickAtlas diff --git a/nipype/algorithms/tests/test_auto_Similarity.py b/nipype/algorithms/tests/test_auto_Similarity.py index b2ad79b5eb..8e2427c140 100644 --- a/nipype/algorithms/tests/test_auto_Similarity.py +++ b/nipype/algorithms/tests/test_auto_Similarity.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..metrics import Similarity diff --git a/nipype/algorithms/tests/test_auto_SimpleThreshold.py b/nipype/algorithms/tests/test_auto_SimpleThreshold.py index 4e34d86799..2c66dc9f76 100644 --- a/nipype/algorithms/tests/test_auto_SimpleThreshold.py +++ b/nipype/algorithms/tests/test_auto_SimpleThreshold.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import SimpleThreshold diff --git a/nipype/algorithms/tests/test_auto_SpecifyModel.py b/nipype/algorithms/tests/test_auto_SpecifyModel.py index 452a048764..17650c68f8 100644 --- a/nipype/algorithms/tests/test_auto_SpecifyModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifyModel.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..modelgen import SpecifyModel diff --git a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py index 1f3ec7058d..29cef2cf46 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..modelgen import SpecifySPMModel diff --git a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py index 93fc035fc2..02893582b9 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..modelgen import SpecifySparseModel diff --git a/nipype/algorithms/tests/test_auto_SplitROIs.py b/nipype/algorithms/tests/test_auto_SplitROIs.py index 963926666d..53f12d2b29 100644 --- a/nipype/algorithms/tests/test_auto_SplitROIs.py +++ b/nipype/algorithms/tests/test_auto_SplitROIs.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..misc import SplitROIs diff --git a/nipype/algorithms/tests/test_auto_StimulusCorrelation.py b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py index 8c7ef276d9..6af303270e 100644 --- a/nipype/algorithms/tests/test_auto_StimulusCorrelation.py +++ b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..rapidart import StimulusCorrelation diff --git a/nipype/algorithms/tests/test_auto_TCompCor.py b/nipype/algorithms/tests/test_auto_TCompCor.py index 44b01b2972..b531ef8f94 100644 --- a/nipype/algorithms/tests/test_auto_TCompCor.py +++ b/nipype/algorithms/tests/test_auto_TCompCor.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..confounds import TCompCor diff --git a/nipype/algorithms/tests/test_auto_TVTKBaseInterface.py b/nipype/algorithms/tests/test_auto_TVTKBaseInterface.py index 9fc2d17aba..d9b7309778 100644 --- a/nipype/algorithms/tests/test_auto_TVTKBaseInterface.py +++ b/nipype/algorithms/tests/test_auto_TVTKBaseInterface.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..mesh import TVTKBaseInterface diff --git a/nipype/algorithms/tests/test_auto_WarpPoints.py b/nipype/algorithms/tests/test_auto_WarpPoints.py index b6965065a2..21dc4fae69 100644 --- a/nipype/algorithms/tests/test_auto_WarpPoints.py +++ b/nipype/algorithms/tests/test_auto_WarpPoints.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..mesh import WarpPoints diff --git a/nipype/interfaces/afni/tests/test_auto_ABoverlap.py b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py index 55cb12c27f..df36cee5fa 100644 --- a/nipype/interfaces/afni/tests/test_auto_ABoverlap.py +++ b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ABoverlap diff --git a/nipype/interfaces/afni/tests/test_auto_AFNICommand.py b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py index 724c98dcb2..b178be3f70 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNICommand.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import AFNICommand diff --git a/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py b/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py index 431baec30b..ce0a85708c 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import AFNICommandBase diff --git a/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py index ba2411edfb..27be6a2006 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import AFNIPythonCommand diff --git a/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py index d89519d571..b97334c9e2 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import AFNItoNIFTI diff --git a/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py index dfbfc648f7..00643dd8cf 100644 --- a/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py +++ b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import AlignEpiAnatPy diff --git a/nipype/interfaces/afni/tests/test_auto_Allineate.py b/nipype/interfaces/afni/tests/test_auto_Allineate.py index be02af14da..1e0caddc49 100644 --- a/nipype/interfaces/afni/tests/test_auto_Allineate.py +++ b/nipype/interfaces/afni/tests/test_auto_Allineate.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Allineate diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py index bb00b3b585..5d0920eaf7 100644 --- a/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py +++ b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import AutoTLRC diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py index 424b7d25b1..62ce618e16 100644 --- a/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py +++ b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import AutoTcorrelate diff --git a/nipype/interfaces/afni/tests/test_auto_Autobox.py b/nipype/interfaces/afni/tests/test_auto_Autobox.py index f158263c54..593461d2d0 100644 --- a/nipype/interfaces/afni/tests/test_auto_Autobox.py +++ b/nipype/interfaces/afni/tests/test_auto_Autobox.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Autobox diff --git a/nipype/interfaces/afni/tests/test_auto_Automask.py b/nipype/interfaces/afni/tests/test_auto_Automask.py index efffd19cba..c0145dba15 100644 --- a/nipype/interfaces/afni/tests/test_auto_Automask.py +++ b/nipype/interfaces/afni/tests/test_auto_Automask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Automask diff --git a/nipype/interfaces/afni/tests/test_auto_Axialize.py b/nipype/interfaces/afni/tests/test_auto_Axialize.py index 62b425c932..a998ec3e93 100644 --- a/nipype/interfaces/afni/tests/test_auto_Axialize.py +++ b/nipype/interfaces/afni/tests/test_auto_Axialize.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Axialize diff --git a/nipype/interfaces/afni/tests/test_auto_Bandpass.py b/nipype/interfaces/afni/tests/test_auto_Bandpass.py index fb0861a747..41bdbd8ca1 100644 --- a/nipype/interfaces/afni/tests/test_auto_Bandpass.py +++ b/nipype/interfaces/afni/tests/test_auto_Bandpass.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Bandpass diff --git a/nipype/interfaces/afni/tests/test_auto_BlurInMask.py b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py index 334116d945..dce4ae290e 100644 --- a/nipype/interfaces/afni/tests/test_auto_BlurInMask.py +++ b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import BlurInMask diff --git a/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py index 2f88a1edcb..4b219d38a5 100644 --- a/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py +++ b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import BlurToFWHM diff --git a/nipype/interfaces/afni/tests/test_auto_BrickStat.py b/nipype/interfaces/afni/tests/test_auto_BrickStat.py index 0056df5980..c584719309 100644 --- a/nipype/interfaces/afni/tests/test_auto_BrickStat.py +++ b/nipype/interfaces/afni/tests/test_auto_BrickStat.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import BrickStat diff --git a/nipype/interfaces/afni/tests/test_auto_Bucket.py b/nipype/interfaces/afni/tests/test_auto_Bucket.py index c3faf87a09..ae6fd3317c 100644 --- a/nipype/interfaces/afni/tests/test_auto_Bucket.py +++ b/nipype/interfaces/afni/tests/test_auto_Bucket.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Bucket diff --git a/nipype/interfaces/afni/tests/test_auto_Calc.py b/nipype/interfaces/afni/tests/test_auto_Calc.py index 28863d5a4b..d719c4d315 100644 --- a/nipype/interfaces/afni/tests/test_auto_Calc.py +++ b/nipype/interfaces/afni/tests/test_auto_Calc.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Calc diff --git a/nipype/interfaces/afni/tests/test_auto_Cat.py b/nipype/interfaces/afni/tests/test_auto_Cat.py index 3da86c66d6..80cc6cf86f 100644 --- a/nipype/interfaces/afni/tests/test_auto_Cat.py +++ b/nipype/interfaces/afni/tests/test_auto_Cat.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Cat diff --git a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py index b67ab485d4..0b3f294bec 100644 --- a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py +++ b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import CatMatvec diff --git a/nipype/interfaces/afni/tests/test_auto_CenterMass.py b/nipype/interfaces/afni/tests/test_auto_CenterMass.py index 322218a50d..f7dec27c41 100644 --- a/nipype/interfaces/afni/tests/test_auto_CenterMass.py +++ b/nipype/interfaces/afni/tests/test_auto_CenterMass.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import CenterMass diff --git a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py index 8b8c61208d..cd1d963248 100644 --- a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py +++ b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import ClipLevel diff --git a/nipype/interfaces/afni/tests/test_auto_ConvertDset.py b/nipype/interfaces/afni/tests/test_auto_ConvertDset.py index 06ba3a54f2..5b52606b51 100644 --- a/nipype/interfaces/afni/tests/test_auto_ConvertDset.py +++ b/nipype/interfaces/afni/tests/test_auto_ConvertDset.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ConvertDset diff --git a/nipype/interfaces/afni/tests/test_auto_Copy.py b/nipype/interfaces/afni/tests/test_auto_Copy.py index a8e67d5607..c347e111c6 100644 --- a/nipype/interfaces/afni/tests/test_auto_Copy.py +++ b/nipype/interfaces/afni/tests/test_auto_Copy.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Copy diff --git a/nipype/interfaces/afni/tests/test_auto_Deconvolve.py b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py index ae42a77019..256f68fc8f 100644 --- a/nipype/interfaces/afni/tests/test_auto_Deconvolve.py +++ b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import Deconvolve diff --git a/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py index 664cca5985..f939dcdadf 100644 --- a/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py +++ b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import DegreeCentrality diff --git a/nipype/interfaces/afni/tests/test_auto_Despike.py b/nipype/interfaces/afni/tests/test_auto_Despike.py index ff0b8b532a..5cb7f9d17a 100644 --- a/nipype/interfaces/afni/tests/test_auto_Despike.py +++ b/nipype/interfaces/afni/tests/test_auto_Despike.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Despike diff --git a/nipype/interfaces/afni/tests/test_auto_Detrend.py b/nipype/interfaces/afni/tests/test_auto_Detrend.py index 1938529cf7..1b70ad27c6 100644 --- a/nipype/interfaces/afni/tests/test_auto_Detrend.py +++ b/nipype/interfaces/afni/tests/test_auto_Detrend.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Detrend diff --git a/nipype/interfaces/afni/tests/test_auto_Dot.py b/nipype/interfaces/afni/tests/test_auto_Dot.py index 7623e90ca7..1a1054aaa0 100644 --- a/nipype/interfaces/afni/tests/test_auto_Dot.py +++ b/nipype/interfaces/afni/tests/test_auto_Dot.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Dot diff --git a/nipype/interfaces/afni/tests/test_auto_ECM.py b/nipype/interfaces/afni/tests/test_auto_ECM.py index 8a4793fb7f..9a7e4a0e84 100644 --- a/nipype/interfaces/afni/tests/test_auto_ECM.py +++ b/nipype/interfaces/afni/tests/test_auto_ECM.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import ECM diff --git a/nipype/interfaces/afni/tests/test_auto_Edge3.py b/nipype/interfaces/afni/tests/test_auto_Edge3.py index 8fc6953c28..6d0820f856 100644 --- a/nipype/interfaces/afni/tests/test_auto_Edge3.py +++ b/nipype/interfaces/afni/tests/test_auto_Edge3.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Edge3 diff --git a/nipype/interfaces/afni/tests/test_auto_Eval.py b/nipype/interfaces/afni/tests/test_auto_Eval.py index 35c1360a6e..751f446bd8 100644 --- a/nipype/interfaces/afni/tests/test_auto_Eval.py +++ b/nipype/interfaces/afni/tests/test_auto_Eval.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Eval diff --git a/nipype/interfaces/afni/tests/test_auto_FWHMx.py b/nipype/interfaces/afni/tests/test_auto_FWHMx.py index 14ab26fa4f..8e8049ea5d 100644 --- a/nipype/interfaces/afni/tests/test_auto_FWHMx.py +++ b/nipype/interfaces/afni/tests/test_auto_FWHMx.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import FWHMx diff --git a/nipype/interfaces/afni/tests/test_auto_Fim.py b/nipype/interfaces/afni/tests/test_auto_Fim.py index 931e5ff244..b1207e841b 100644 --- a/nipype/interfaces/afni/tests/test_auto_Fim.py +++ b/nipype/interfaces/afni/tests/test_auto_Fim.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Fim diff --git a/nipype/interfaces/afni/tests/test_auto_Fourier.py b/nipype/interfaces/afni/tests/test_auto_Fourier.py index 69cd955cbc..bed8de92dd 100644 --- a/nipype/interfaces/afni/tests/test_auto_Fourier.py +++ b/nipype/interfaces/afni/tests/test_auto_Fourier.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Fourier diff --git a/nipype/interfaces/afni/tests/test_auto_GCOR.py b/nipype/interfaces/afni/tests/test_auto_GCOR.py index 9d603dda36..4d0c67132f 100644 --- a/nipype/interfaces/afni/tests/test_auto_GCOR.py +++ b/nipype/interfaces/afni/tests/test_auto_GCOR.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import GCOR diff --git a/nipype/interfaces/afni/tests/test_auto_Hist.py b/nipype/interfaces/afni/tests/test_auto_Hist.py index 48499a9605..d86b999174 100644 --- a/nipype/interfaces/afni/tests/test_auto_Hist.py +++ b/nipype/interfaces/afni/tests/test_auto_Hist.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Hist diff --git a/nipype/interfaces/afni/tests/test_auto_LFCD.py b/nipype/interfaces/afni/tests/test_auto_LFCD.py index 9cbde10b56..c7606278db 100644 --- a/nipype/interfaces/afni/tests/test_auto_LFCD.py +++ b/nipype/interfaces/afni/tests/test_auto_LFCD.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import LFCD diff --git a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py index 22f0b717d8..40982c80c8 100644 --- a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py +++ b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import LocalBistat diff --git a/nipype/interfaces/afni/tests/test_auto_MaskTool.py b/nipype/interfaces/afni/tests/test_auto_MaskTool.py index 1644bee878..f53f733ec9 100644 --- a/nipype/interfaces/afni/tests/test_auto_MaskTool.py +++ b/nipype/interfaces/afni/tests/test_auto_MaskTool.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MaskTool diff --git a/nipype/interfaces/afni/tests/test_auto_Maskave.py b/nipype/interfaces/afni/tests/test_auto_Maskave.py index a318e685a9..7833854376 100644 --- a/nipype/interfaces/afni/tests/test_auto_Maskave.py +++ b/nipype/interfaces/afni/tests/test_auto_Maskave.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Maskave diff --git a/nipype/interfaces/afni/tests/test_auto_Means.py b/nipype/interfaces/afni/tests/test_auto_Means.py index 2e422e68db..db21d18737 100644 --- a/nipype/interfaces/afni/tests/test_auto_Means.py +++ b/nipype/interfaces/afni/tests/test_auto_Means.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Means diff --git a/nipype/interfaces/afni/tests/test_auto_Merge.py b/nipype/interfaces/afni/tests/test_auto_Merge.py index c36ee6f7a1..1305dcb952 100644 --- a/nipype/interfaces/afni/tests/test_auto_Merge.py +++ b/nipype/interfaces/afni/tests/test_auto_Merge.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Merge diff --git a/nipype/interfaces/afni/tests/test_auto_Notes.py b/nipype/interfaces/afni/tests/test_auto_Notes.py index 965bacb000..2d489e02b5 100644 --- a/nipype/interfaces/afni/tests/test_auto_Notes.py +++ b/nipype/interfaces/afni/tests/test_auto_Notes.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Notes diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py b/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py index 48b59a2968..28d900bfe4 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import NwarpAdjust diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py index 63f6baa044..dcf43fe06e 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import NwarpApply diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpCat.py b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py index c6b3689c9e..aa6ff98c3c 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpCat.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import NwarpCat diff --git a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py index fe037851af..444cf9c496 100644 --- a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py +++ b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import OneDToolPy diff --git a/nipype/interfaces/afni/tests/test_auto_OutlierCount.py b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py index a63fbb8bef..0a2b9871e2 100644 --- a/nipype/interfaces/afni/tests/test_auto_OutlierCount.py +++ b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import OutlierCount diff --git a/nipype/interfaces/afni/tests/test_auto_QualityIndex.py b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py index 679a3e0393..60aad8f23f 100644 --- a/nipype/interfaces/afni/tests/test_auto_QualityIndex.py +++ b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import QualityIndex diff --git a/nipype/interfaces/afni/tests/test_auto_Qwarp.py b/nipype/interfaces/afni/tests/test_auto_Qwarp.py index f6df3d0ab5..be6dcb3255 100644 --- a/nipype/interfaces/afni/tests/test_auto_Qwarp.py +++ b/nipype/interfaces/afni/tests/test_auto_Qwarp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Qwarp diff --git a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py index 2b823cf8af..57527b5e55 100644 --- a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py +++ b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import QwarpPlusMinus diff --git a/nipype/interfaces/afni/tests/test_auto_ROIStats.py b/nipype/interfaces/afni/tests/test_auto_ROIStats.py index 57eb59d284..f52c37f06f 100644 --- a/nipype/interfaces/afni/tests/test_auto_ROIStats.py +++ b/nipype/interfaces/afni/tests/test_auto_ROIStats.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import ROIStats diff --git a/nipype/interfaces/afni/tests/test_auto_Refit.py b/nipype/interfaces/afni/tests/test_auto_Refit.py index 4983eafc81..0134abfc6d 100644 --- a/nipype/interfaces/afni/tests/test_auto_Refit.py +++ b/nipype/interfaces/afni/tests/test_auto_Refit.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Refit diff --git a/nipype/interfaces/afni/tests/test_auto_Remlfit.py b/nipype/interfaces/afni/tests/test_auto_Remlfit.py index 4cdc8b2ff2..54de8603a2 100644 --- a/nipype/interfaces/afni/tests/test_auto_Remlfit.py +++ b/nipype/interfaces/afni/tests/test_auto_Remlfit.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import Remlfit diff --git a/nipype/interfaces/afni/tests/test_auto_Resample.py b/nipype/interfaces/afni/tests/test_auto_Resample.py index 9ab2309307..575219ac69 100644 --- a/nipype/interfaces/afni/tests/test_auto_Resample.py +++ b/nipype/interfaces/afni/tests/test_auto_Resample.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Resample diff --git a/nipype/interfaces/afni/tests/test_auto_Retroicor.py b/nipype/interfaces/afni/tests/test_auto_Retroicor.py index 4c2a1c2393..5f9b3c64e3 100644 --- a/nipype/interfaces/afni/tests/test_auto_Retroicor.py +++ b/nipype/interfaces/afni/tests/test_auto_Retroicor.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Retroicor diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTest.py b/nipype/interfaces/afni/tests/test_auto_SVMTest.py index 4b31d3a11c..1c7a63df11 100644 --- a/nipype/interfaces/afni/tests/test_auto_SVMTest.py +++ b/nipype/interfaces/afni/tests/test_auto_SVMTest.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..svm import SVMTest diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTrain.py b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py index ddb3f8c05f..52f49d4234 100644 --- a/nipype/interfaces/afni/tests/test_auto_SVMTrain.py +++ b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..svm import SVMTrain diff --git a/nipype/interfaces/afni/tests/test_auto_Seg.py b/nipype/interfaces/afni/tests/test_auto_Seg.py index e93d81a7aa..2759706abd 100644 --- a/nipype/interfaces/afni/tests/test_auto_Seg.py +++ b/nipype/interfaces/afni/tests/test_auto_Seg.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Seg diff --git a/nipype/interfaces/afni/tests/test_auto_SkullStrip.py b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py index e960369787..6d60c87473 100644 --- a/nipype/interfaces/afni/tests/test_auto_SkullStrip.py +++ b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import SkullStrip diff --git a/nipype/interfaces/afni/tests/test_auto_Synthesize.py b/nipype/interfaces/afni/tests/test_auto_Synthesize.py index 87278098fb..da0461c5de 100644 --- a/nipype/interfaces/afni/tests/test_auto_Synthesize.py +++ b/nipype/interfaces/afni/tests/test_auto_Synthesize.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import Synthesize diff --git a/nipype/interfaces/afni/tests/test_auto_TCat.py b/nipype/interfaces/afni/tests/test_auto_TCat.py index 5c51b02637..55fd3bdb42 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCat.py +++ b/nipype/interfaces/afni/tests/test_auto_TCat.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import TCat diff --git a/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py index 09b21ea408..9ea0b12f1b 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py +++ b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import TCatSubBrick diff --git a/nipype/interfaces/afni/tests/test_auto_TCorr1D.py b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py index e1b23a3387..261fa14eda 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorr1D.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import TCorr1D diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py index 32778fcf11..9780950bd8 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import TCorrMap diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrelate.py b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py index 8d773f429e..99c381ea55 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrelate.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import TCorrelate diff --git a/nipype/interfaces/afni/tests/test_auto_TNorm.py b/nipype/interfaces/afni/tests/test_auto_TNorm.py index 3fb246684b..482dba8f43 100644 --- a/nipype/interfaces/afni/tests/test_auto_TNorm.py +++ b/nipype/interfaces/afni/tests/test_auto_TNorm.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import TNorm diff --git a/nipype/interfaces/afni/tests/test_auto_TProject.py b/nipype/interfaces/afni/tests/test_auto_TProject.py index ba4efd6189..00fdef29e9 100644 --- a/nipype/interfaces/afni/tests/test_auto_TProject.py +++ b/nipype/interfaces/afni/tests/test_auto_TProject.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import TProject diff --git a/nipype/interfaces/afni/tests/test_auto_TShift.py b/nipype/interfaces/afni/tests/test_auto_TShift.py index a1208753d9..627352ed5b 100644 --- a/nipype/interfaces/afni/tests/test_auto_TShift.py +++ b/nipype/interfaces/afni/tests/test_auto_TShift.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import TShift diff --git a/nipype/interfaces/afni/tests/test_auto_TStat.py b/nipype/interfaces/afni/tests/test_auto_TStat.py index 5a3ebd8a21..1f3a7358ac 100644 --- a/nipype/interfaces/afni/tests/test_auto_TStat.py +++ b/nipype/interfaces/afni/tests/test_auto_TStat.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import TStat diff --git a/nipype/interfaces/afni/tests/test_auto_To3D.py b/nipype/interfaces/afni/tests/test_auto_To3D.py index 66ccabefcb..a6ced4b418 100644 --- a/nipype/interfaces/afni/tests/test_auto_To3D.py +++ b/nipype/interfaces/afni/tests/test_auto_To3D.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import To3D diff --git a/nipype/interfaces/afni/tests/test_auto_Undump.py b/nipype/interfaces/afni/tests/test_auto_Undump.py index 07d0ff8e81..86597b2d48 100644 --- a/nipype/interfaces/afni/tests/test_auto_Undump.py +++ b/nipype/interfaces/afni/tests/test_auto_Undump.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Undump diff --git a/nipype/interfaces/afni/tests/test_auto_Unifize.py b/nipype/interfaces/afni/tests/test_auto_Unifize.py index 3215d51e64..72031e7244 100644 --- a/nipype/interfaces/afni/tests/test_auto_Unifize.py +++ b/nipype/interfaces/afni/tests/test_auto_Unifize.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Unifize diff --git a/nipype/interfaces/afni/tests/test_auto_Volreg.py b/nipype/interfaces/afni/tests/test_auto_Volreg.py index 293b7613e8..d3ac19f997 100644 --- a/nipype/interfaces/afni/tests/test_auto_Volreg.py +++ b/nipype/interfaces/afni/tests/test_auto_Volreg.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Volreg diff --git a/nipype/interfaces/afni/tests/test_auto_Warp.py b/nipype/interfaces/afni/tests/test_auto_Warp.py index b85692310a..541f5f2c46 100644 --- a/nipype/interfaces/afni/tests/test_auto_Warp.py +++ b/nipype/interfaces/afni/tests/test_auto_Warp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Warp diff --git a/nipype/interfaces/afni/tests/test_auto_ZCutUp.py b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py index cacb6b1534..9c88d931c8 100644 --- a/nipype/interfaces/afni/tests/test_auto_ZCutUp.py +++ b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ZCutUp diff --git a/nipype/interfaces/afni/tests/test_auto_Zcat.py b/nipype/interfaces/afni/tests/test_auto_Zcat.py index 3a2b7ff853..c45919fbea 100644 --- a/nipype/interfaces/afni/tests/test_auto_Zcat.py +++ b/nipype/interfaces/afni/tests/test_auto_Zcat.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Zcat diff --git a/nipype/interfaces/afni/tests/test_auto_Zeropad.py b/nipype/interfaces/afni/tests/test_auto_Zeropad.py index cfba8e2f3c..eda00b114a 100644 --- a/nipype/interfaces/afni/tests/test_auto_Zeropad.py +++ b/nipype/interfaces/afni/tests/test_auto_Zeropad.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Zeropad diff --git a/nipype/interfaces/ants/tests/test_auto_ANTS.py b/nipype/interfaces/ants/tests/test_auto_ANTS.py index 7c7ef6682c..1d8e84f756 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTS.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTS.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import ANTS diff --git a/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py b/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py index 7423579ef7..54d1effe3a 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import ANTSCommand diff --git a/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py index fed21cdbef..4d42172975 100644 --- a/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py +++ b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import AffineInitializer diff --git a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py index 292e6b398b..d5cd221c4a 100644 --- a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..segmentation import AntsJointFusion diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py index c2bbffa14f..dbf548cb03 100644 --- a/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..resampling import ApplyTransforms diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py index 92c6c21ec1..086cdbaf60 100644 --- a/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..resampling import ApplyTransformsToPoints diff --git a/nipype/interfaces/ants/tests/test_auto_Atropos.py b/nipype/interfaces/ants/tests/test_auto_Atropos.py index 90bc13778a..ad71b01d33 100644 --- a/nipype/interfaces/ants/tests/test_auto_Atropos.py +++ b/nipype/interfaces/ants/tests/test_auto_Atropos.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..segmentation import Atropos diff --git a/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py index 3fe3abe5cf..adaad2d179 100644 --- a/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import AverageAffineTransform diff --git a/nipype/interfaces/ants/tests/test_auto_AverageImages.py b/nipype/interfaces/ants/tests/test_auto_AverageImages.py index 41e0c99007..93bad09b5c 100644 --- a/nipype/interfaces/ants/tests/test_auto_AverageImages.py +++ b/nipype/interfaces/ants/tests/test_auto_AverageImages.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import AverageImages diff --git a/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py index 2d48192199..be8735414f 100644 --- a/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py +++ b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..segmentation import BrainExtraction diff --git a/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py index 83fb2ed2aa..45b0a0d35e 100644 --- a/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ComposeMultiTransform diff --git a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py index ea9a16cbe2..a9b47e6b58 100644 --- a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py +++ b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..visualization import ConvertScalarImageToRGB diff --git a/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py index dd39568bc0..76be0e9f9e 100644 --- a/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..segmentation import CorticalThickness diff --git a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py index b32e7b98e0..2833faf328 100644 --- a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py +++ b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import CreateJacobianDeterminantImage diff --git a/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py index 74c2b0a7c2..ad3fe2aff2 100644 --- a/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py +++ b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..visualization import CreateTiledMosaic diff --git a/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py index 882cb21854..bfca341cd5 100644 --- a/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py +++ b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..segmentation import DenoiseImage diff --git a/nipype/interfaces/ants/tests/test_auto_JointFusion.py b/nipype/interfaces/ants/tests/test_auto_JointFusion.py index 9e82584729..bfc5907034 100644 --- a/nipype/interfaces/ants/tests/test_auto_JointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_JointFusion.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..segmentation import JointFusion diff --git a/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py index 6129670d39..813673d363 100644 --- a/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py +++ b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..segmentation import KellyKapowski diff --git a/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py b/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py index a5de244711..035a8a796e 100644 --- a/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py +++ b/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import LabelGeometry diff --git a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py index 8f9a13d832..cdf0ceac88 100644 --- a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..segmentation import LaplacianThickness diff --git a/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py index 1a5041ae74..f6fb6bd389 100644 --- a/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py +++ b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import MeasureImageSimilarity diff --git a/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py index 1bf787018d..d6d39ca81d 100644 --- a/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py +++ b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MultiplyImages diff --git a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py index 66edf0c3d6..24cd8774c0 100644 --- a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py +++ b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..segmentation import N4BiasFieldCorrection diff --git a/nipype/interfaces/ants/tests/test_auto_Registration.py b/nipype/interfaces/ants/tests/test_auto_Registration.py index 4bd253c3d0..ed379dcbb8 100644 --- a/nipype/interfaces/ants/tests/test_auto_Registration.py +++ b/nipype/interfaces/ants/tests/test_auto_Registration.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import Registration diff --git a/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py b/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py index 8bc79392e1..ca5ab4cbe4 100644 --- a/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py +++ b/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import RegistrationSynQuick diff --git a/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py index 42020f6db9..8d7ecdd3f8 100644 --- a/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..resampling import WarpImageMultiTransform diff --git a/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py index de3131f056..da719861d8 100644 --- a/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..resampling import WarpTimeSeriesImageMultiTransform diff --git a/nipype/interfaces/base/tests/test_auto_BaseInterface.py b/nipype/interfaces/base/tests/test_auto_BaseInterface.py index eb0272d495..4ee8ea9359 100644 --- a/nipype/interfaces/base/tests/test_auto_BaseInterface.py +++ b/nipype/interfaces/base/tests/test_auto_BaseInterface.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..core import BaseInterface diff --git a/nipype/interfaces/base/tests/test_auto_CommandLine.py b/nipype/interfaces/base/tests/test_auto_CommandLine.py index fb16422864..22d7406921 100644 --- a/nipype/interfaces/base/tests/test_auto_CommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_CommandLine.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..core import CommandLine diff --git a/nipype/interfaces/base/tests/test_auto_LibraryBaseInterface.py b/nipype/interfaces/base/tests/test_auto_LibraryBaseInterface.py index 19beef071f..9893a781a9 100644 --- a/nipype/interfaces/base/tests/test_auto_LibraryBaseInterface.py +++ b/nipype/interfaces/base/tests/test_auto_LibraryBaseInterface.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..core import LibraryBaseInterface diff --git a/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py b/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py index 2d66f2f623..4084a19945 100644 --- a/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..core import MpiCommandLine diff --git a/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py b/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py index aceabfe2a2..18e788f3a4 100644 --- a/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..core import SEMLikeCommandLine diff --git a/nipype/interfaces/base/tests/test_auto_SimpleInterface.py b/nipype/interfaces/base/tests/test_auto_SimpleInterface.py index 2a4454dd9f..ac62059628 100644 --- a/nipype/interfaces/base/tests/test_auto_SimpleInterface.py +++ b/nipype/interfaces/base/tests/test_auto_SimpleInterface.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..core import SimpleInterface diff --git a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py index e39dc3acaa..5e4367addb 100644 --- a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..core import StdOutCommandLine diff --git a/nipype/interfaces/brainsuite/tests/test_auto_BDP.py b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py index ff705edfeb..e68d97201d 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_BDP.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsuite import BDP diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py index 01200e50fc..c039b45b70 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsuite import Bfc diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bse.py b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py index bbd154bc24..cfda027046 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Bse.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsuite import Bse diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py index 808e4347c3..0413d3beb2 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsuite import Cerebro diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py index 536cb158f2..375d178685 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsuite import Cortex diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py index ba430fb1fd..b55a5792f7 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsuite import Dewisp diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py index c69232fd01..95eb385ca2 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsuite import Dfs diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py index 9e3db80dd9..6c1c768739 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsuite import Hemisplit diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py index afc621a56e..fae6dde517 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsuite import Pialmesh diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py index d425d4ddac..62a319fc51 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsuite import Pvc diff --git a/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py b/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py index 7449e1488a..3d861a0677 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsuite import SVReg diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py index eb672d12b7..8aaba3d7d4 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsuite import Scrubmask diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py index 2191f7b133..81d1860f99 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsuite import Skullfinder diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Tca.py b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py index ec2886b42f..b4ac0c4982 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Tca.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsuite import Tca diff --git a/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py b/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py index d614ce335a..cf2174690e 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsuite import ThicknessPVC diff --git a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py index 5d796126cc..6ccda11987 100644 --- a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py +++ b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..convert import AnalyzeHeader diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py index caee9e68cf..df6e55f614 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import ComputeEigensystem diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py index 49cfdfd96a..494eca16fb 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import ComputeFractionalAnisotropy diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py index 212477d149..58da9d9212 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import ComputeMeanDiffusivity diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py index 1112c7c743..9402b6f047 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import ComputeTensorTrace diff --git a/nipype/interfaces/camino/tests/test_auto_Conmat.py b/nipype/interfaces/camino/tests/test_auto_Conmat.py index 06a76fdf2b..3cad4700de 100644 --- a/nipype/interfaces/camino/tests/test_auto_Conmat.py +++ b/nipype/interfaces/camino/tests/test_auto_Conmat.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..connectivity import Conmat diff --git a/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py index b20dcbf3bf..1e19db47c1 100644 --- a/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py +++ b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..convert import DT2NIfTI diff --git a/nipype/interfaces/camino/tests/test_auto_DTIFit.py b/nipype/interfaces/camino/tests/test_auto_DTIFit.py index a23da89552..addceabfcf 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/camino/tests/test_auto_DTIFit.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import DTIFit diff --git a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py index 95e8e0bea5..4fa20efa22 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import DTLUTGen diff --git a/nipype/interfaces/camino/tests/test_auto_DTMetric.py b/nipype/interfaces/camino/tests/test_auto_DTMetric.py index 12256a50d8..22fd8dab6a 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTMetric.py +++ b/nipype/interfaces/camino/tests/test_auto_DTMetric.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import DTMetric diff --git a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py index a2ebfbf0a6..0af0ffd55e 100644 --- a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py +++ b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..convert import FSL2Scheme diff --git a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py index adae10f0bb..7f69837444 100644 --- a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py +++ b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..convert import Image2Voxel diff --git a/nipype/interfaces/camino/tests/test_auto_ImageStats.py b/nipype/interfaces/camino/tests/test_auto_ImageStats.py index 3813051025..8608bcf1fd 100644 --- a/nipype/interfaces/camino/tests/test_auto_ImageStats.py +++ b/nipype/interfaces/camino/tests/test_auto_ImageStats.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ImageStats diff --git a/nipype/interfaces/camino/tests/test_auto_LinRecon.py b/nipype/interfaces/camino/tests/test_auto_LinRecon.py index 8998f42602..960e50b57a 100644 --- a/nipype/interfaces/camino/tests/test_auto_LinRecon.py +++ b/nipype/interfaces/camino/tests/test_auto_LinRecon.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..odf import LinRecon diff --git a/nipype/interfaces/camino/tests/test_auto_MESD.py b/nipype/interfaces/camino/tests/test_auto_MESD.py index 73089a3395..abdffa0819 100644 --- a/nipype/interfaces/camino/tests/test_auto_MESD.py +++ b/nipype/interfaces/camino/tests/test_auto_MESD.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..odf import MESD diff --git a/nipype/interfaces/camino/tests/test_auto_ModelFit.py b/nipype/interfaces/camino/tests/test_auto_ModelFit.py index 04d22bbe59..e9fdf86251 100644 --- a/nipype/interfaces/camino/tests/test_auto_ModelFit.py +++ b/nipype/interfaces/camino/tests/test_auto_ModelFit.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import ModelFit diff --git a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py index 9a4867a73c..2864e3a374 100644 --- a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py +++ b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..convert import NIfTIDT2Camino diff --git a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py index f2b59666a2..17d8e10ebc 100644 --- a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py +++ b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import PicoPDFs diff --git a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py index 8838bb72cf..48a8c7c8ea 100644 --- a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..convert import ProcStreamlines diff --git a/nipype/interfaces/camino/tests/test_auto_QBallMX.py b/nipype/interfaces/camino/tests/test_auto_QBallMX.py index e965cb606a..c3049ddf10 100644 --- a/nipype/interfaces/camino/tests/test_auto_QBallMX.py +++ b/nipype/interfaces/camino/tests/test_auto_QBallMX.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..odf import QBallMX diff --git a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py index c52ca04929..2fc3036dc6 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..calib import SFLUTGen diff --git a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py index e83a01e2c3..cb7ed0fe25 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..calib import SFPICOCalibData diff --git a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py index 959545d042..bbfd8ea602 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..odf import SFPeaks diff --git a/nipype/interfaces/camino/tests/test_auto_Shredder.py b/nipype/interfaces/camino/tests/test_auto_Shredder.py index 1099693874..29b60e886a 100644 --- a/nipype/interfaces/camino/tests/test_auto_Shredder.py +++ b/nipype/interfaces/camino/tests/test_auto_Shredder.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..convert import Shredder diff --git a/nipype/interfaces/camino/tests/test_auto_Track.py b/nipype/interfaces/camino/tests/test_auto_Track.py index ad8d3ff2bb..b0ec0f157e 100644 --- a/nipype/interfaces/camino/tests/test_auto_Track.py +++ b/nipype/interfaces/camino/tests/test_auto_Track.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import Track diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py index 89515cef55..b055e4e4eb 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import TrackBallStick diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py index 2815ef18ee..9f0e8443d1 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import TrackBayesDirac diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py index 827dbb2f27..e0f1feb8ca 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import TrackBedpostxDeter diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py index e250ec9c40..ce93d7e275 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import TrackBedpostxProba diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py index 90ffd05a2d..037506a7e9 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import TrackBootstrap diff --git a/nipype/interfaces/camino/tests/test_auto_TrackDT.py b/nipype/interfaces/camino/tests/test_auto_TrackDT.py index 7ab3772468..d9f61370bd 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackDT.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackDT.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import TrackDT diff --git a/nipype/interfaces/camino/tests/test_auto_TrackPICo.py b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py index 30ff12db9f..9732b8f5f8 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackPICo.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import TrackPICo diff --git a/nipype/interfaces/camino/tests/test_auto_TractShredder.py b/nipype/interfaces/camino/tests/test_auto_TractShredder.py index c9cf40e1c3..80a5e5a7a6 100644 --- a/nipype/interfaces/camino/tests/test_auto_TractShredder.py +++ b/nipype/interfaces/camino/tests/test_auto_TractShredder.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..convert import TractShredder diff --git a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py index 8dc8bd03e9..0d1470b998 100644 --- a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..convert import VtkStreamlines diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py index b4d5092dab..4d7b622945 100644 --- a/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..convert import Camino2Trackvis diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py index 978b2439a2..fc8c619e30 100644 --- a/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..convert import Trackvis2Camino diff --git a/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py index a80bbe757a..c9b6588eba 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py +++ b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..nx import AverageNetworks diff --git a/nipype/interfaces/cmtk/tests/test_auto_CFFBaseInterface.py b/nipype/interfaces/cmtk/tests/test_auto_CFFBaseInterface.py index 942f477518..449ffe2c8c 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CFFBaseInterface.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CFFBaseInterface.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import CFFBaseInterface diff --git a/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py index 00a1acea98..97805f169d 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..convert import CFFConverter diff --git a/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py b/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py index 60e8596f5e..342b7486bf 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..cmtk import CreateMatrix diff --git a/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py index 3635f21e59..02e7eef490 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..cmtk import CreateNodes diff --git a/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py index ceaa6d8dea..53e2d7aa8e 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py +++ b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..convert import MergeCNetworks diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py index e3220e4790..e703e840f0 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..nbs import NetworkBasedStatistic diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py index d9a3f0c740..764c073850 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..nx import NetworkXMetrics diff --git a/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py index edcdf2e7a1..8e398ac4db 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py +++ b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..parcellation import Parcellate diff --git a/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py index dd2ce50aec..f6f13d69d4 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py +++ b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..cmtk import ROIGen diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py index a39dbf6c3b..f437bcdf8c 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import DTIRecon diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py index cf483d00d0..4cea0a510e 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import DTITracker diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py index 59bc8c25a5..2b8185c8d4 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..odf import HARDIMat diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py index 1e66b93bec..4fd8b31c05 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..odf import ODFRecon diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py index 41b2d530f6..929b0ca9e5 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..odf import ODFTracker diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py index 8648a1f1f9..8eb5ecab83 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..postproc import SplineFilter diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py index b004678175..a95feee63a 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..postproc import TrackMerge diff --git a/nipype/interfaces/dipy/tests/test_auto_APMQball.py b/nipype/interfaces/dipy/tests/test_auto_APMQball.py index f6f3f2e4c6..0ff3ce8549 100644 --- a/nipype/interfaces/dipy/tests/test_auto_APMQball.py +++ b/nipype/interfaces/dipy/tests/test_auto_APMQball.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..anisotropic_power import APMQball diff --git a/nipype/interfaces/dipy/tests/test_auto_CSD.py b/nipype/interfaces/dipy/tests/test_auto_CSD.py index a30efaa3cc..1440c01077 100644 --- a/nipype/interfaces/dipy/tests/test_auto_CSD.py +++ b/nipype/interfaces/dipy/tests/test_auto_CSD.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..reconstruction import CSD diff --git a/nipype/interfaces/dipy/tests/test_auto_DTI.py b/nipype/interfaces/dipy/tests/test_auto_DTI.py index 68c9ae1437..38361c1752 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DTI.py +++ b/nipype/interfaces/dipy/tests/test_auto_DTI.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tensors import DTI diff --git a/nipype/interfaces/dipy/tests/test_auto_Denoise.py b/nipype/interfaces/dipy/tests/test_auto_Denoise.py index 88a1bc5314..6be88bb0ca 100644 --- a/nipype/interfaces/dipy/tests/test_auto_Denoise.py +++ b/nipype/interfaces/dipy/tests/test_auto_Denoise.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Denoise diff --git a/nipype/interfaces/dipy/tests/test_auto_DipyBaseInterface.py b/nipype/interfaces/dipy/tests/test_auto_DipyBaseInterface.py index e133e266c4..d422dc5290 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DipyBaseInterface.py +++ b/nipype/interfaces/dipy/tests/test_auto_DipyBaseInterface.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import DipyBaseInterface diff --git a/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py index ee9022ca58..eb85610eef 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py +++ b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import DipyDiffusionInterface diff --git a/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py index 1270d94b13..d5f4e28fe6 100644 --- a/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py +++ b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..reconstruction import EstimateResponseSH diff --git a/nipype/interfaces/dipy/tests/test_auto_RESTORE.py b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py index 9f62d05e0c..54b93f4c53 100644 --- a/nipype/interfaces/dipy/tests/test_auto_RESTORE.py +++ b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..reconstruction import RESTORE diff --git a/nipype/interfaces/dipy/tests/test_auto_Resample.py b/nipype/interfaces/dipy/tests/test_auto_Resample.py index ff51e0efe4..429af5a15f 100644 --- a/nipype/interfaces/dipy/tests/test_auto_Resample.py +++ b/nipype/interfaces/dipy/tests/test_auto_Resample.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Resample diff --git a/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py index db46a3b982..56f800d98c 100644 --- a/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py +++ b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..simulate import SimulateMultiTensor diff --git a/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py index 1bc1a2ea97..64bbf19c77 100644 --- a/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py +++ b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tracks import StreamlineTractography diff --git a/nipype/interfaces/dipy/tests/test_auto_TensorMode.py b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py index d072af78fc..f27432871b 100644 --- a/nipype/interfaces/dipy/tests/test_auto_TensorMode.py +++ b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tensors import TensorMode diff --git a/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py index 79af3b5940..9f43210cc5 100644 --- a/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py +++ b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tracks import TrackDensityMap diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py index 5f9262d788..58309b45f4 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import AffScalarVol diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py index 6d7abc852a..8b235731c5 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import AffSymTensor3DVol diff --git a/nipype/interfaces/dtitk/tests/test_auto_Affine.py b/nipype/interfaces/dtitk/tests/test_auto_Affine.py index 78d2e6f011..a475b182a9 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Affine.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Affine.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import Affine diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py index 5f3b43153a..1810d238d5 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import AffineTask diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py index cbd4efccb2..b56f405968 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py +++ b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import BinThresh diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py index dcd396abe2..60cf100437 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import BinThreshTask diff --git a/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py b/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py index b43fcc5e97..2e064a3150 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py +++ b/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import CommandLineDtitk diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py index 09fcdc186e..0e1256ae3f 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py +++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import ComposeXfm diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py index 99c6f6d340..ddeae4622a 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import ComposeXfmTask diff --git a/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py index ad532bd631..fddda3f954 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import Diffeo diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py index 7d1305d384..cd818a52d6 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import DiffeoScalarVol diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py index b934c56d2b..1b3be6d62b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import DiffeoSymTensor3DVol diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py index 5aea665d4c..cada2a9f9b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import DiffeoTask diff --git a/nipype/interfaces/dtitk/tests/test_auto_Rigid.py b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py index ecb7c2d33b..612a6d006c 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Rigid.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import Rigid diff --git a/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py index c627fda741..4c1e46d06f 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import RigidTask diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py index 3574906455..86f91e664f 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import SVAdjustVoxSp diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py index 40a7592c19..2c371386be 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import SVAdjustVoxSpTask diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResample.py b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py index 91ca638f22..95eb4e0bf7 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVResample.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import SVResample diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py index 8a7574bfd8..ead9ff81ed 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import SVResampleTask diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py index bda9128369..4c6d2392e1 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import TVAdjustOriginTask diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py index b8ce9039d5..bcadb6362f 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import TVAdjustVoxSp diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py index e9ef8137dc..6da06f1582 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import TVAdjustVoxSpTask diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResample.py b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py index aefafc6430..e12b094107 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVResample.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import TVResample diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py index b3c70bb729..604f61a71b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import TVResampleTask diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtool.py b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py index 2267228631..59ed29ad4b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVtool.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import TVtool diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py index 252d7c9d0a..db0e944d0e 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import TVtoolTask diff --git a/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py index 72330737b6..00f520ca15 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import affScalarVolTask diff --git a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py index da8fae25cc..d86c71e121 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import affSymTensor3DVolTask diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py index 10965b7077..350f5ded11 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import diffeoScalarVolTask diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py index 52112735b1..40ccf89eec 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import diffeoSymTensor3DVolTask diff --git a/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py index cb486b8487..8e3318d296 100644 --- a/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import AnalyzeWarp diff --git a/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py index 16f2b54079..dfd97421d7 100644 --- a/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import ApplyWarp diff --git a/nipype/interfaces/elastix/tests/test_auto_EditTransform.py b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py index ef227da7f8..730b14b6bd 100644 --- a/nipype/interfaces/elastix/tests/test_auto_EditTransform.py +++ b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import EditTransform diff --git a/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py index c93a0526c1..fd4d04f21e 100644 --- a/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import PointsWarp diff --git a/nipype/interfaces/elastix/tests/test_auto_Registration.py b/nipype/interfaces/elastix/tests/test_auto_Registration.py index 4e774ab036..15f1929ba0 100644 --- a/nipype/interfaces/elastix/tests/test_auto_Registration.py +++ b/nipype/interfaces/elastix/tests/test_auto_Registration.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import Registration diff --git a/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py index ed32693df5..6a37ea98d7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import AddXFormToHeader diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py index fc5bc39b6f..c230c7510d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Aparc2Aseg diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py index 7044fdde2f..a3eaed3ce4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Apas2Aseg diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py index 69f56d7dde..96a411642d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ApplyMask diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py index c9e8f85904..3fd68f696e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import ApplyVolTransform diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py index 93db55cd30..1913aeac4d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import Binarize diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py index 1cf35fcedb..0b8611da10 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import CALabel diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py index 0e8ec025ec..8fc53bfab1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import CANormalize diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py index e3cbf52f10..d2a3435aec 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import CARegister diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py index a7d3ad3bb0..69d5c88858 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import CheckTalairachAlignment diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py index e4e8efb718..a68bb2af3f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import Concatenate diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py index 40a465b249..ace0296c0f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import ConcatenateLTA diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py index 7999001813..df405f189e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Contrast diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py index f29b76df29..12290d76c5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Curvature diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py index 19d6a4772e..2146e30134 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import CurvatureStats diff --git a/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py index efd53fb773..ca7f820925 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import DICOMConvert diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py index e681a03871..af1fd66181 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import EMRegister diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py index f971f4fab9..1d26f90b72 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import EditWMwithAseg diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py index 996d079b48..4defac4c67 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import EulerNumber diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py index f3f4896a75..c7de90d578 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ExtractMainComponent diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py b/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py index 0546d99006..dc7678e7e6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import FSCommand diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py b/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py index 5b8bca1484..b4c56e8aeb 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import FSCommandOpenMP diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py b/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py index e791bb5976..39b16b845e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import FSScriptCommand diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py index 9e6b1cbc8b..aa54604d61 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import FitMSParams diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py index 8b61823b42..d609b515bd 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import FixTopology diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py index 8608444fc8..672f11caa0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..longitudinal import FuseSegmentations diff --git a/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py index 828bbfab03..f1554c2a08 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import GLMFit diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py index 2fa225e87e..46b79ffffd 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ImageInfo diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py index 49f5e6b48f..d6a7623ddb 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Jacobian diff --git a/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py index bcbf971d01..c6cfc4e2d9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import LTAConvert diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py index 2d7761006b..fe56719f86 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import Label2Annot diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py index 7511cd0dbb..fdc952bcc9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import Label2Label diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py index fb2726635f..d11a4ddc25 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import Label2Vol diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py index 7b6ae4a945..78be667586 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import MNIBiasCorrection diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py index b4b7436120..9f98ba4931 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import MPRtoMNI305 diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py index b44cf7a308..5e826bbbbf 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import MRIConvert diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py index 6c12cabdc2..a808a15235 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import MRICoreg diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py index 719986961d..2e9cf785d9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MRIFill diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py index f43d217256..8e42484200 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MRIMarchingCubes diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py index 2e7c7d2ea5..61ed187ea9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MRIPretess diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py index c92b4fea15..a8fb58aff9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import MRISPreproc diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py index e72d8adb9e..d36dcb9b05 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import MRISPreprocReconAll diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py index a5899cfe70..f71b331e66 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MRITessellate diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py index 441be3e0a4..f76b3ab0ab 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import MRIsCALabel diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py index cbd68c8457..80b395654c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MRIsCalc diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py index 657292c5c7..07ff63969c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MRIsCombine diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py index 21622b2449..ddf1cfb00f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MRIsConvert diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py index 8639c1e7b6..7cf9654b5c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MRIsExpand diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py index 73e48a2521..06f14228e8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MRIsInflate diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py index f3453818c5..11e07e9e5c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import MS_LDA diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py index 5dfb555346..63e75d33a8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MakeAverageSubject diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py index 7ff18eeba6..23dac2ef86 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MakeSurfaces diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py index 1923b5ceed..ec926d60b0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Normalize diff --git a/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py index 1091bdbdc3..15fba362ae 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import OneSampleTTest diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Paint.py b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py index c452594e55..643edc75f9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Paint.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import Paint diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py index 81ac276b2f..6e3d8a904a 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ParcellationStats diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py index 93254ddcb3..6031d6c072 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import ParseDICOMDir diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py index 39f6a8c942..a167ba20a3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import ReconAll diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Register.py b/nipype/interfaces/freesurfer/tests/test_auto_Register.py index b4eff5133c..5dd3be2424 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Register.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Register.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import Register diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py index 0bc88bf935..0662c8d428 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import RegisterAVItoTalairach diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py index 859aff0820..d7db4be8ab 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import RelabelHypointensities diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py index 69e1d453a4..7f25dbcee6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import RemoveIntersection diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py index 9e095ddba0..0444cdd549 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import RemoveNeck diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Resample.py b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py index ecb939d89a..3cc1770bf7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Resample.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Resample diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py index af9020c278..4a7917842b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import RobustRegister diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py index e845a764e4..6ed1bd47ef 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..longitudinal import RobustTemplate diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py index 0926eebba2..d162ee3061 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import SampleToSurface diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py index 8feb61d9d8..9ba9cfa161 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import SegStats diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py index e65dc82e3b..b7681a46a5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import SegStatsReconAll diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py index e6b5fb2679..76bcf6e076 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import SegmentCC diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py index aa742e8fea..3e2b8987aa 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import SegmentWM diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py index fe4581dee0..3a427e21c1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Smooth diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py index 8ce4dce075..742db823e4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import SmoothTessellation diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py index 461398e6a8..c871c1c1f1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Sphere diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py index efdc032787..14ba83b9a2 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import SphericalAverage diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py index ca3f96c42b..cd4f8ac7e2 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Surface2VolTransform diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py index 84bef6ed7a..3edb4f52ec 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import SurfaceSmooth diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py index a413d410b1..8e22824a00 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import SurfaceSnapshots diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py index 0546a275dc..1fdf458752 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import SurfaceTransform diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py index ea121d877e..f2cb589ba9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import SynthesizeFLASH diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py index 8ce925fcc7..b0e040dc6c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import TalairachAVI diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py index d07e572365..6acb4f4890 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import TalairachQC diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py index 17f8e53a1f..5c74cbf5f4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Tkregister2 diff --git a/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py index 9427e60940..8a67ccfed6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import UnpackSDICOMDir diff --git a/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py index 3e898a81f7..bde3c12faa 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import VolumeMask diff --git a/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py index 3586e7d234..0442527088 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import WatershedSkullStrip diff --git a/nipype/interfaces/fsl/tests/test_auto_AR1Image.py b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py index 32efb1826e..5dbfaddcf8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AR1Image.py +++ b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import AR1Image diff --git a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py index 062d6367f7..f6f7522e96 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py +++ b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..fix import AccuracyTester diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py index 8a39956c18..35478d618e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import ApplyMask diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py index 7a803f9695..6853c02725 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..epi import ApplyTOPUP diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py index 71977cb873..fd404effcf 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import ApplyWarp diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py index f515be1f04..4258934546 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import ApplyXFM diff --git a/nipype/interfaces/fsl/tests/test_auto_AvScale.py b/nipype/interfaces/fsl/tests/test_auto_AvScale.py index 762ada916d..6a3ba79710 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AvScale.py +++ b/nipype/interfaces/fsl/tests/test_auto_AvScale.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import AvScale diff --git a/nipype/interfaces/fsl/tests/test_auto_B0Calc.py b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py index 96ec6b949c..da6edd7ddc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_B0Calc.py +++ b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..possum import B0Calc diff --git a/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py index 1ac5db111b..3577859a3b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py +++ b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import BEDPOSTX5 diff --git a/nipype/interfaces/fsl/tests/test_auto_BET.py b/nipype/interfaces/fsl/tests/test_auto_BET.py index eccf348b3e..db5426a78f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BET.py +++ b/nipype/interfaces/fsl/tests/test_auto_BET.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import BET diff --git a/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py index 2548ae68e5..194abaeda2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import BinaryMaths diff --git a/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py index aab508f2bf..8b5e004329 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py +++ b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import ChangeDataType diff --git a/nipype/interfaces/fsl/tests/test_auto_Classifier.py b/nipype/interfaces/fsl/tests/test_auto_Classifier.py index a8db888acb..b1b914519f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Classifier.py +++ b/nipype/interfaces/fsl/tests/test_auto_Classifier.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..fix import Classifier diff --git a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py index 5fc505a174..26f8516a3b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..fix import Cleaner diff --git a/nipype/interfaces/fsl/tests/test_auto_Cluster.py b/nipype/interfaces/fsl/tests/test_auto_Cluster.py index f876f85e64..bfcf6755ab 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cluster.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cluster.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import Cluster diff --git a/nipype/interfaces/fsl/tests/test_auto_Complex.py b/nipype/interfaces/fsl/tests/test_auto_Complex.py index 7e49bcdfa8..f4265d5030 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Complex.py +++ b/nipype/interfaces/fsl/tests/test_auto_Complex.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Complex diff --git a/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py index a82ea8fe43..9e90f36871 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py +++ b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import ContrastMgr diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py index 2ed14aaad2..df1daf2e84 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ConvertWarp diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py index c017a39a52..d97527368c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ConvertXFM diff --git a/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py index c66feff211..8dceb7dbd5 100644 --- a/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py +++ b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import CopyGeom diff --git a/nipype/interfaces/fsl/tests/test_auto_DTIFit.py b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py index 8bfdf5f36e..bf4d71eeac 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import DTIFit diff --git a/nipype/interfaces/fsl/tests/test_auto_DilateImage.py b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py index 3ed43fbd22..7b84d284ca 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DilateImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import DilateImage diff --git a/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py index 5f3321de78..25696e1562 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py +++ b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import DistanceMap diff --git a/nipype/interfaces/fsl/tests/test_auto_DualRegression.py b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py index 4c2fdd3ceb..b7cf9becce 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DualRegression.py +++ b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import DualRegression diff --git a/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py index 2a5f0c86b6..450c1d5dcf 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..epi import EPIDeWarp diff --git a/nipype/interfaces/fsl/tests/test_auto_Eddy.py b/nipype/interfaces/fsl/tests/test_auto_Eddy.py index 1e3cdf644d..11f0a80ced 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Eddy.py +++ b/nipype/interfaces/fsl/tests/test_auto_Eddy.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..epi import Eddy diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py index e88219aa04..9915ba0217 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py +++ b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..epi import EddyCorrect diff --git a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py index 29a935333e..143176f130 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..epi import EpiReg diff --git a/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py index 797a403d45..99d7c23f36 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import ErodeImage diff --git a/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py index df8de64144..6917397821 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py +++ b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ExtractROI diff --git a/nipype/interfaces/fsl/tests/test_auto_FAST.py b/nipype/interfaces/fsl/tests/test_auto_FAST.py index 0b983181af..b89147c333 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FAST.py +++ b/nipype/interfaces/fsl/tests/test_auto_FAST.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import FAST diff --git a/nipype/interfaces/fsl/tests/test_auto_FEAT.py b/nipype/interfaces/fsl/tests/test_auto_FEAT.py index 01fc72506f..43e7ce8e96 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEAT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEAT.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import FEAT diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATModel.py b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py index 34ea37d47f..9c3516da39 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEATModel.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import FEATModel diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py index a8f59a0ec3..45494a73d6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import FEATRegister diff --git a/nipype/interfaces/fsl/tests/test_auto_FIRST.py b/nipype/interfaces/fsl/tests/test_auto_FIRST.py index 964ee4922c..0dbba25d87 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FIRST.py +++ b/nipype/interfaces/fsl/tests/test_auto_FIRST.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import FIRST diff --git a/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py index bd335282e3..be971096c1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py +++ b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import FLAMEO diff --git a/nipype/interfaces/fsl/tests/test_auto_FLIRT.py b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py index 0b59550e5a..2c3c970279 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FLIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import FLIRT diff --git a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py index 2b7d0b1b00..30516dfbe8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import FNIRT diff --git a/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py b/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py index 1d95b77df5..5398e4532d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import FSLCommand diff --git a/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py index fd85eee3bf..a6e234a104 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import FSLXCommand diff --git a/nipype/interfaces/fsl/tests/test_auto_FUGUE.py b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py index 0e96f1e867..f7d74947a0 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FUGUE.py +++ b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import FUGUE diff --git a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py index 3945c40a87..f62ffc451f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py +++ b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..fix import FeatureExtractor diff --git a/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py index 36cbb979d7..a11b8bdde5 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py +++ b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import FilterRegressor diff --git a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py index 3731c842e7..774d7389bf 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py +++ b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import FindTheBiggest diff --git a/nipype/interfaces/fsl/tests/test_auto_GLM.py b/nipype/interfaces/fsl/tests/test_auto_GLM.py index 1dbf7eba19..b5826c6a52 100644 --- a/nipype/interfaces/fsl/tests/test_auto_GLM.py +++ b/nipype/interfaces/fsl/tests/test_auto_GLM.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import GLM diff --git a/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py index 05bbb5b106..2d532ac064 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py +++ b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..aroma import ICA_AROMA diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py index 6d4e06827e..1f76edd76a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ImageMaths diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py index 042f93112b..d9b8d4571f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ImageMeants diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py index 91a6059798..59cf5495d7 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ImageStats diff --git a/nipype/interfaces/fsl/tests/test_auto_InvWarp.py b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py index 973f42ea34..a0d18a6183 100644 --- a/nipype/interfaces/fsl/tests/test_auto_InvWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import InvWarp diff --git a/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py index 4b6192c98d..6614e04abb 100644 --- a/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py +++ b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import IsotropicSmooth diff --git a/nipype/interfaces/fsl/tests/test_auto_L2Model.py b/nipype/interfaces/fsl/tests/test_auto_L2Model.py index 9d3588666f..b159565c18 100644 --- a/nipype/interfaces/fsl/tests/test_auto_L2Model.py +++ b/nipype/interfaces/fsl/tests/test_auto_L2Model.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import L2Model diff --git a/nipype/interfaces/fsl/tests/test_auto_Level1Design.py b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py index 45451f8eff..8b1c076ac6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Level1Design.py +++ b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import Level1Design diff --git a/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py index daeca07b10..90057d88cb 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import MCFLIRT diff --git a/nipype/interfaces/fsl/tests/test_auto_MELODIC.py b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py index b22078c450..831acc1618 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MELODIC.py +++ b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import MELODIC diff --git a/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py index c7a5a3e465..893795447c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py +++ b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import MakeDyadicVectors diff --git a/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py index 64b0f8b089..749c87cfe2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import MathsCommand diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py index 9910f9c4e8..1f7cd7e970 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MaxImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import MaxImage diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py index f49c5f462c..bff13d7544 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import MaxnImage diff --git a/nipype/interfaces/fsl/tests/test_auto_MeanImage.py b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py index 2172dcfa9e..2e599c5a15 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MeanImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import MeanImage diff --git a/nipype/interfaces/fsl/tests/test_auto_MedianImage.py b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py index c14bf8d839..5fec30259f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MedianImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import MedianImage diff --git a/nipype/interfaces/fsl/tests/test_auto_Merge.py b/nipype/interfaces/fsl/tests/test_auto_Merge.py index 826270239e..6af08ba6e8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Merge.py +++ b/nipype/interfaces/fsl/tests/test_auto_Merge.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Merge diff --git a/nipype/interfaces/fsl/tests/test_auto_MinImage.py b/nipype/interfaces/fsl/tests/test_auto_MinImage.py index 4e9002c259..3359a05d53 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MinImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MinImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import MinImage diff --git a/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py index 97d2426b53..0c0bff09ba 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py +++ b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MotionOutliers diff --git a/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py index 50a2977a4c..201eb78165 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import MultiImageMaths diff --git a/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py index 78bd97ff85..6202cbecbc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py +++ b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import MultipleRegressDesign diff --git a/nipype/interfaces/fsl/tests/test_auto_Overlay.py b/nipype/interfaces/fsl/tests/test_auto_Overlay.py index 9818ae1eda..f124e41897 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Overlay.py +++ b/nipype/interfaces/fsl/tests/test_auto_Overlay.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Overlay diff --git a/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py index 328a8e3272..e6169c59d4 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py +++ b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import PRELUDE diff --git a/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py index 4e08c18db0..1b93e4f422 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import PercentileImage diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py index 45a5b43945..a7203e2f57 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py +++ b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import PlotMotionParams diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py index 62ced498e1..75da74289b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py +++ b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import PlotTimeSeries diff --git a/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py index 18a812c00b..5a041a87ab 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py +++ b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import PowerSpectrum diff --git a/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py index 66bfd51fab..ffc3dfa055 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py +++ b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..epi import PrepareFieldmap diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py index 5368d0dd37..25d8926359 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import ProbTrackX diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py index 8592b5ae1a..8b2095eb7f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import ProbTrackX2 diff --git a/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py b/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py index dc3878e2c0..304beace0d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import ProjThresh diff --git a/nipype/interfaces/fsl/tests/test_auto_Randomise.py b/nipype/interfaces/fsl/tests/test_auto_Randomise.py index 3d52347265..48e7c0cfad 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Randomise.py +++ b/nipype/interfaces/fsl/tests/test_auto_Randomise.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import Randomise diff --git a/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py index 76647d82d8..a6a028f3f8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py +++ b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Reorient2Std diff --git a/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py index e1cbac6fc9..b23b6a30cb 100644 --- a/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py +++ b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import RobustFOV diff --git a/nipype/interfaces/fsl/tests/test_auto_SMM.py b/nipype/interfaces/fsl/tests/test_auto_SMM.py index 1e1cc308cc..28ac3b4d64 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SMM.py +++ b/nipype/interfaces/fsl/tests/test_auto_SMM.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import SMM diff --git a/nipype/interfaces/fsl/tests/test_auto_SUSAN.py b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py index ebbef0d427..256596c806 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SUSAN.py +++ b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import SUSAN diff --git a/nipype/interfaces/fsl/tests/test_auto_SigLoss.py b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py index f3be97b350..d59dc2f6ac 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SigLoss.py +++ b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import SigLoss diff --git a/nipype/interfaces/fsl/tests/test_auto_Slice.py b/nipype/interfaces/fsl/tests/test_auto_Slice.py index d1e9093e37..11d4b8ad01 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Slice.py +++ b/nipype/interfaces/fsl/tests/test_auto_Slice.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Slice diff --git a/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py index 75b9918ad9..e57ab30d3b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py +++ b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import SliceTimer diff --git a/nipype/interfaces/fsl/tests/test_auto_Slicer.py b/nipype/interfaces/fsl/tests/test_auto_Slicer.py index d313cda474..8220b0090c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Slicer.py +++ b/nipype/interfaces/fsl/tests/test_auto_Slicer.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Slicer diff --git a/nipype/interfaces/fsl/tests/test_auto_Smooth.py b/nipype/interfaces/fsl/tests/test_auto_Smooth.py index e8d8e06117..529e972370 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Smooth.py +++ b/nipype/interfaces/fsl/tests/test_auto_Smooth.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Smooth diff --git a/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py index df7544f931..b09a40ffc5 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py +++ b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import SmoothEstimate diff --git a/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py index d62f904655..8e2101a897 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py +++ b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import SpatialFilter diff --git a/nipype/interfaces/fsl/tests/test_auto_Split.py b/nipype/interfaces/fsl/tests/test_auto_Split.py index cec2ab462d..234c556ff2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Split.py +++ b/nipype/interfaces/fsl/tests/test_auto_Split.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Split diff --git a/nipype/interfaces/fsl/tests/test_auto_StdImage.py b/nipype/interfaces/fsl/tests/test_auto_StdImage.py index 302ab9c4c2..0837f10367 100644 --- a/nipype/interfaces/fsl/tests/test_auto_StdImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_StdImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import StdImage diff --git a/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py index 7cbb57491c..ade3c61f5b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py +++ b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import SwapDimensions diff --git a/nipype/interfaces/fsl/tests/test_auto_TOPUP.py b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py index 0e7e89e4e3..1efe1d5103 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TOPUP.py +++ b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..epi import TOPUP diff --git a/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py index c762b99d31..90c4c22ca1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py +++ b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import TemporalFilter diff --git a/nipype/interfaces/fsl/tests/test_auto_Threshold.py b/nipype/interfaces/fsl/tests/test_auto_Threshold.py index 5c2bb46cc8..a4f7a0a711 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Threshold.py +++ b/nipype/interfaces/fsl/tests/test_auto_Threshold.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import Threshold diff --git a/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py index 4308ee8153..7944dfb867 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py +++ b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import TractSkeleton diff --git a/nipype/interfaces/fsl/tests/test_auto_Training.py b/nipype/interfaces/fsl/tests/test_auto_Training.py index 91d48e4c01..f289ba0ad8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Training.py +++ b/nipype/interfaces/fsl/tests/test_auto_Training.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..fix import Training diff --git a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py index 18ef078a79..04fc159c77 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py +++ b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..fix import TrainingSetCreator diff --git a/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py index a0dcf2610d..4b20b45ef9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import UnaryMaths diff --git a/nipype/interfaces/fsl/tests/test_auto_VecReg.py b/nipype/interfaces/fsl/tests/test_auto_VecReg.py index 61a28f7369..cf307fedb7 100644 --- a/nipype/interfaces/fsl/tests/test_auto_VecReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_VecReg.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import VecReg diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py index 30924223cf..3d7df8f05e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import WarpPoints diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py index 627ef60ad6..52bfbeaf6d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import WarpPointsFromStd diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py index 3d37ad1486..a1498acbc6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import WarpPointsToStd diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py index 04d3a05f14..8e73a68622 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import WarpUtils diff --git a/nipype/interfaces/fsl/tests/test_auto_XFibres5.py b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py index d72bb3bb4b..9278dc95ba 100644 --- a/nipype/interfaces/fsl/tests/test_auto_XFibres5.py +++ b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dti import XFibres5 diff --git a/nipype/interfaces/minc/tests/test_auto_Average.py b/nipype/interfaces/minc/tests/test_auto_Average.py index 57bdc5ccd2..cac8c62a83 100644 --- a/nipype/interfaces/minc/tests/test_auto_Average.py +++ b/nipype/interfaces/minc/tests/test_auto_Average.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Average diff --git a/nipype/interfaces/minc/tests/test_auto_BBox.py b/nipype/interfaces/minc/tests/test_auto_BBox.py index a34aeab9b5..c721b1ba8f 100644 --- a/nipype/interfaces/minc/tests/test_auto_BBox.py +++ b/nipype/interfaces/minc/tests/test_auto_BBox.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import BBox diff --git a/nipype/interfaces/minc/tests/test_auto_Beast.py b/nipype/interfaces/minc/tests/test_auto_Beast.py index 4834cf3c4a..39359069ff 100644 --- a/nipype/interfaces/minc/tests/test_auto_Beast.py +++ b/nipype/interfaces/minc/tests/test_auto_Beast.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Beast diff --git a/nipype/interfaces/minc/tests/test_auto_BestLinReg.py b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py index fb9061040a..e13ab5fd89 100644 --- a/nipype/interfaces/minc/tests/test_auto_BestLinReg.py +++ b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import BestLinReg diff --git a/nipype/interfaces/minc/tests/test_auto_BigAverage.py b/nipype/interfaces/minc/tests/test_auto_BigAverage.py index ce1fb2b91e..fa098d09ba 100644 --- a/nipype/interfaces/minc/tests/test_auto_BigAverage.py +++ b/nipype/interfaces/minc/tests/test_auto_BigAverage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import BigAverage diff --git a/nipype/interfaces/minc/tests/test_auto_Blob.py b/nipype/interfaces/minc/tests/test_auto_Blob.py index b489ac944a..64de50fd2d 100644 --- a/nipype/interfaces/minc/tests/test_auto_Blob.py +++ b/nipype/interfaces/minc/tests/test_auto_Blob.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Blob diff --git a/nipype/interfaces/minc/tests/test_auto_Blur.py b/nipype/interfaces/minc/tests/test_auto_Blur.py index fb6e405012..387dcd05bc 100644 --- a/nipype/interfaces/minc/tests/test_auto_Blur.py +++ b/nipype/interfaces/minc/tests/test_auto_Blur.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Blur diff --git a/nipype/interfaces/minc/tests/test_auto_Calc.py b/nipype/interfaces/minc/tests/test_auto_Calc.py index 7bec782c1c..a6a8728d77 100644 --- a/nipype/interfaces/minc/tests/test_auto_Calc.py +++ b/nipype/interfaces/minc/tests/test_auto_Calc.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Calc diff --git a/nipype/interfaces/minc/tests/test_auto_Convert.py b/nipype/interfaces/minc/tests/test_auto_Convert.py index 6df596c682..b7b74c4620 100644 --- a/nipype/interfaces/minc/tests/test_auto_Convert.py +++ b/nipype/interfaces/minc/tests/test_auto_Convert.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Convert diff --git a/nipype/interfaces/minc/tests/test_auto_Copy.py b/nipype/interfaces/minc/tests/test_auto_Copy.py index e91470ba6d..e9a2b8d2dc 100644 --- a/nipype/interfaces/minc/tests/test_auto_Copy.py +++ b/nipype/interfaces/minc/tests/test_auto_Copy.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Copy diff --git a/nipype/interfaces/minc/tests/test_auto_Dump.py b/nipype/interfaces/minc/tests/test_auto_Dump.py index bcca2a4801..3b708d2405 100644 --- a/nipype/interfaces/minc/tests/test_auto_Dump.py +++ b/nipype/interfaces/minc/tests/test_auto_Dump.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Dump diff --git a/nipype/interfaces/minc/tests/test_auto_Extract.py b/nipype/interfaces/minc/tests/test_auto_Extract.py index 77126eac18..2f6e687345 100644 --- a/nipype/interfaces/minc/tests/test_auto_Extract.py +++ b/nipype/interfaces/minc/tests/test_auto_Extract.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Extract diff --git a/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py index c89dc65de6..963f35b74e 100644 --- a/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py +++ b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Gennlxfm diff --git a/nipype/interfaces/minc/tests/test_auto_Math.py b/nipype/interfaces/minc/tests/test_auto_Math.py index 1d011034d2..e5f33d7c5b 100644 --- a/nipype/interfaces/minc/tests/test_auto_Math.py +++ b/nipype/interfaces/minc/tests/test_auto_Math.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Math diff --git a/nipype/interfaces/minc/tests/test_auto_NlpFit.py b/nipype/interfaces/minc/tests/test_auto_NlpFit.py index 1a728a90dd..738fe6e81a 100644 --- a/nipype/interfaces/minc/tests/test_auto_NlpFit.py +++ b/nipype/interfaces/minc/tests/test_auto_NlpFit.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import NlpFit diff --git a/nipype/interfaces/minc/tests/test_auto_Norm.py b/nipype/interfaces/minc/tests/test_auto_Norm.py index 3a0d28f06d..1d9064a68f 100644 --- a/nipype/interfaces/minc/tests/test_auto_Norm.py +++ b/nipype/interfaces/minc/tests/test_auto_Norm.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Norm diff --git a/nipype/interfaces/minc/tests/test_auto_Pik.py b/nipype/interfaces/minc/tests/test_auto_Pik.py index d74d9a86ad..5e1e18ac50 100644 --- a/nipype/interfaces/minc/tests/test_auto_Pik.py +++ b/nipype/interfaces/minc/tests/test_auto_Pik.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Pik diff --git a/nipype/interfaces/minc/tests/test_auto_Resample.py b/nipype/interfaces/minc/tests/test_auto_Resample.py index bd00bd224d..ba713331b2 100644 --- a/nipype/interfaces/minc/tests/test_auto_Resample.py +++ b/nipype/interfaces/minc/tests/test_auto_Resample.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Resample diff --git a/nipype/interfaces/minc/tests/test_auto_Reshape.py b/nipype/interfaces/minc/tests/test_auto_Reshape.py index 45e6ddeb4a..595f359570 100644 --- a/nipype/interfaces/minc/tests/test_auto_Reshape.py +++ b/nipype/interfaces/minc/tests/test_auto_Reshape.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Reshape diff --git a/nipype/interfaces/minc/tests/test_auto_ToEcat.py b/nipype/interfaces/minc/tests/test_auto_ToEcat.py index 26a3ac0436..d40da0386a 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToEcat.py +++ b/nipype/interfaces/minc/tests/test_auto_ToEcat.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import ToEcat diff --git a/nipype/interfaces/minc/tests/test_auto_ToRaw.py b/nipype/interfaces/minc/tests/test_auto_ToRaw.py index e010da322d..e0eec67253 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToRaw.py +++ b/nipype/interfaces/minc/tests/test_auto_ToRaw.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import ToRaw diff --git a/nipype/interfaces/minc/tests/test_auto_VolSymm.py b/nipype/interfaces/minc/tests/test_auto_VolSymm.py index 048ffcde9b..403a321287 100644 --- a/nipype/interfaces/minc/tests/test_auto_VolSymm.py +++ b/nipype/interfaces/minc/tests/test_auto_VolSymm.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import VolSymm diff --git a/nipype/interfaces/minc/tests/test_auto_Volcentre.py b/nipype/interfaces/minc/tests/test_auto_Volcentre.py index b095751a4d..3d543b8c89 100644 --- a/nipype/interfaces/minc/tests/test_auto_Volcentre.py +++ b/nipype/interfaces/minc/tests/test_auto_Volcentre.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Volcentre diff --git a/nipype/interfaces/minc/tests/test_auto_Voliso.py b/nipype/interfaces/minc/tests/test_auto_Voliso.py index 967642a328..791b87063c 100644 --- a/nipype/interfaces/minc/tests/test_auto_Voliso.py +++ b/nipype/interfaces/minc/tests/test_auto_Voliso.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Voliso diff --git a/nipype/interfaces/minc/tests/test_auto_Volpad.py b/nipype/interfaces/minc/tests/test_auto_Volpad.py index 865bc79e69..6d329cc06d 100644 --- a/nipype/interfaces/minc/tests/test_auto_Volpad.py +++ b/nipype/interfaces/minc/tests/test_auto_Volpad.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import Volpad diff --git a/nipype/interfaces/minc/tests/test_auto_XfmAvg.py b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py index 6d036a0c0e..0fff5457d9 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmAvg.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import XfmAvg diff --git a/nipype/interfaces/minc/tests/test_auto_XfmConcat.py b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py index eb748953ef..9bb744eeaa 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmConcat.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import XfmConcat diff --git a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py index d729e90639..17d46f5bac 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..minc import XfmInvert diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py index 16605acbb7..2509fd3ba7 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import JistBrainMgdmSegmentation diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py index ebb65bb789..f84dad9746 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import JistBrainMp2rageDuraEstimation diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py index abf794b662..69ae55f9f8 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import JistBrainMp2rageSkullStripping diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py index bb86144c20..28ba9e10c4 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import JistBrainPartialVolumeFilter diff --git a/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py index 407b9755ca..bac8f5795b 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import JistCortexSurfaceMeshInflation diff --git a/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py index bfdace4944..14b19475fe 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import JistIntensityMp2rageMasking diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py index 12203a1aa6..c8714765e3 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import JistLaminarProfileCalculator diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py index ddc4d5d922..a1c10a2b49 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import JistLaminarProfileGeometry diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py index 58de472b85..401fd56ddf 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import JistLaminarProfileSampling diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py index bb9577ccee..0c313ce894 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import JistLaminarROIAveraging diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py index 4aa9f9d77b..deabdeb78f 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import JistLaminarVolumetricLayering diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py index 016beee263..8e598046de 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import MedicAlgorithmImageCalculator diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py index 87f9ab6a72..234cbfded2 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import MedicAlgorithmLesionToads diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py index 7c8c7248ad..03c6a65230 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import MedicAlgorithmMipavReorient diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py index 9d5a148a24..59fe4a64fa 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import MedicAlgorithmN3 diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py index b2d247e9dc..a2cb0c4c95 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import MedicAlgorithmSPECTRE2010 diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py index b498025401..e4bfae7e0f 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import MedicAlgorithmThresholdToBinaryMask diff --git a/nipype/interfaces/mipav/tests/test_auto_RandomVol.py b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py index d3d92142c4..0508164c2b 100644 --- a/nipype/interfaces/mipav/tests/test_auto_RandomVol.py +++ b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..developer import RandomVol diff --git a/nipype/interfaces/mixins/tests/test_auto_ReportCapableInterface.py b/nipype/interfaces/mixins/tests/test_auto_ReportCapableInterface.py index 8985d7069d..0cf527114b 100644 --- a/nipype/interfaces/mixins/tests/test_auto_ReportCapableInterface.py +++ b/nipype/interfaces/mixins/tests/test_auto_ReportCapableInterface.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..reporting import ReportCapableInterface diff --git a/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py index 4ced8de75a..608f47ee8e 100644 --- a/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py +++ b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import WatershedBEM diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py index 78a275f6a7..5d5174f851 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tensors import ConstrainedSphericalDeconvolution diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py index dc95cff525..b49c0ca795 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tensors import DWI2SphericalHarmonicsImage diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py index 86975950bf..df7415c752 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import DWI2Tensor diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py index 2c6417fe3a..23b3919efc 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tracking import DiffusionTensorStreamlineTrack diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py index b5474bd18c..27727079bd 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tensors import Directions2Amplitude diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Erode.py b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py index 6215593ab0..bdfe3da696 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Erode.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Erode diff --git a/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py index 983433104e..8bff6e7ec9 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tensors import EstimateResponseForSH diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py b/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py index 6261b6b0a5..a378d9b894 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tensors import FSL2MRTrix diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py index 6e0e670d06..1cf7d566df 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tracking import FilterTracks diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py index 5d9f51739c..19ad75e155 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tensors import FindShPeaks diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py index 2cfd89bb6f..0f10bb12ef 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tensors import GenerateDirections diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py index 6a0305f9e2..e48de0b988 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import GenerateWhiteMatterMask diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py index 5aef0bfd13..0571338f35 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import MRConvert diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py index a5a864289a..47d8bbe3cd 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import MRMultiply diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py index d6a3db0a1d..36fd18a64a 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import MRTransform diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py index b6fefac9a0..bbcddcb9ff 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..convert import MRTrix2TrackVis diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py index 0022de4d7a..c4997d1c56 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import MRTrixInfo diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py index 117258ea17..dd8b6066c3 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import MRTrixViewer diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py index 0e7daabcaa..61e970205f 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import MedianFilter3D diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py index 6d75b1b9a2..f1e8b4cfe9 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tracking import ProbabilisticSphericallyDeconvolutedStreamlineTrack diff --git a/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py index 9bed95276d..4b43c39f81 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tracking import SphericallyDeconvolutedStreamlineTrack diff --git a/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py index c898a60eb3..90c75c1898 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tracking import StreamlineTrack diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py index e9546d7e90..88a10dea3a 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Tensor2ApparentDiffusion diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py index d16a907f62..46743e3baa 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Tensor2FractionalAnisotropy diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py index c07d0a8db4..58fcde0c67 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Tensor2Vector diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py index 39fbf14d2c..0386944098 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Threshold diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py index f984203ab1..740c2d630e 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tracking import Tracks2Prob diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py index abb3ba6831..ac3a785653 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import ACTPrepareFSL diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py index c318be0bfd..219ccd1c03 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import BrainMask diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py index 95e5797d06..4141963083 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..connectivity import BuildConnectome diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py index 38369e8fe5..3a798ab548 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ComputeTDI diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py index 769ccb34a9..bdeced9977 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import DWIDenoise diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py index 0114c5efe0..a17705c4c2 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import DWIExtract diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py index a5c152e928..3fd0563014 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..reconst import EstimateFOD diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py index 40b7baa931..4e2b661c87 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..reconst import FitTensor diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py index dcfbe1fc6f..fa264daba6 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Generate5tt diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py index 2de0e6c115..7ba6316b1b 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..connectivity import LabelConfig diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py index 1e93ae82d8..ea927fa6f3 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..connectivity import LabelConvert diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py index ece24e1d47..267fa4cfb5 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MRConvert diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py index eec06b6276..ecb3a92136 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import MRMath diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py index 453baa053a..ed91143ab8 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import MRTrix3Base diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py index 9aa29acbe8..dfe1a3dc45 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Mesh2PVE diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py index 502a0f7fb1..2a14e85298 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import ReplaceFSwithFIRST diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py index 027527ec85..34dbea9f3b 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import ResponseSD diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py index 93a39484f8..a77a2a27be 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import TCK2VTK diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py index ae532c9910..8b3e93a6c3 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import TensorMetrics diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py index dff5c783ee..876b186e50 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..tracking import Tractography diff --git a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py index 20995e806e..05e25c730a 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dwi import DwiTool diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py index b2e1bef961..b2bd425dd1 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..asl import FitAsl diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py index 700d9a31c4..5818011320 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dwi import FitDwi diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py index 0505ee514c..0ff9e86002 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..qt1 import FitQt1 diff --git a/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py b/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py index 6bfeb3c9d3..83d600d26e 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import NiftyFitCommand diff --git a/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py b/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py index 89615b50d7..016ca5654b 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import NiftyRegCommand diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py index 9507f53fa9..8e738c1367 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..reg import RegAladin diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py index 2510a46eb8..2f27151bae 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..regutils import RegAverage diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py index e9bdab82c0..461f34396d 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..reg import RegF3D diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py index 63f917c683..32bdfc32fe 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..regutils import RegJacobian diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py index 3321d87afc..90120a754e 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..regutils import RegMeasure diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py index 06b2b48401..e461075031 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..regutils import RegResample diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py index 5deb4206e6..78a08213ec 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..regutils import RegTools diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py index 57c1b0ad86..9b0befc6ec 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..regutils import RegTransform diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py index cc7bc896c6..54cb50ea76 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import BinaryMaths diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py index 464ed6f6c4..e7f7eaa625 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import BinaryMathsInteger diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py index 0771d72f5c..c6f159bcd4 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..stats import BinaryStats diff --git a/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py index dceea837c0..11a503c98b 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..label_fusion import CalcTopNCC diff --git a/nipype/interfaces/niftyseg/tests/test_auto_EM.py b/nipype/interfaces/niftyseg/tests/test_auto_EM.py index fc270ed22f..54b50ff7a1 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_EM.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_EM.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..em import EM diff --git a/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py index 7cbfe58c6c..cc20deefc7 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..lesions import FillLesions diff --git a/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py index eea345e1df..d31f7819a0 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..label_fusion import LabelFusion diff --git a/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py index 247dc9773e..f0ad3cf9ae 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import MathsCommand diff --git a/nipype/interfaces/niftyseg/tests/test_auto_Merge.py b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py index c53e4edf40..80726439eb 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_Merge.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import Merge diff --git a/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py index d840a40b2a..c0d36cdfe6 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import NiftySegCommand diff --git a/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py index bfeded6eef..755e674226 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..patchmatch import PatchMatch diff --git a/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py index e845e18952..205b78f01e 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..stats import StatsCommand diff --git a/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py index 7241264eee..baf7c8b7d8 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import TupleMaths diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py index 9b1cd7d194..95d8ac7109 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maths import UnaryMaths diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py index 34edf1a190..b13f94a864 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..stats import UnaryStats diff --git a/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py index f0f2453274..c68597b5bb 100644 --- a/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py +++ b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import ComputeMask diff --git a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py index 383d1cbea3..880d01c70d 100644 --- a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import EstimateContrast diff --git a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py index 4ac3aa720e..64fd0a863d 100644 --- a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py +++ b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import FitGLM diff --git a/nipype/interfaces/nipy/tests/test_auto_NipyBaseInterface.py b/nipype/interfaces/nipy/tests/test_auto_NipyBaseInterface.py index ca91500995..64df6d9636 100644 --- a/nipype/interfaces/nipy/tests/test_auto_NipyBaseInterface.py +++ b/nipype/interfaces/nipy/tests/test_auto_NipyBaseInterface.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import NipyBaseInterface diff --git a/nipype/interfaces/nipy/tests/test_auto_Similarity.py b/nipype/interfaces/nipy/tests/test_auto_Similarity.py index fbdb6fffa6..484f5ded3d 100644 --- a/nipype/interfaces/nipy/tests/test_auto_Similarity.py +++ b/nipype/interfaces/nipy/tests/test_auto_Similarity.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Similarity diff --git a/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py b/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py index fca6664a49..c511737c70 100644 --- a/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py +++ b/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import SpaceTimeRealigner diff --git a/nipype/interfaces/nipy/tests/test_auto_Trim.py b/nipype/interfaces/nipy/tests/test_auto_Trim.py index 6073c9082b..1d8ba8aefb 100644 --- a/nipype/interfaces/nipy/tests/test_auto_Trim.py +++ b/nipype/interfaces/nipy/tests/test_auto_Trim.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Trim diff --git a/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py index 4a641ebb50..01ee867470 100644 --- a/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py +++ b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..analysis import CoherenceAnalyzer diff --git a/nipype/interfaces/nitime/tests/test_auto_NitimeBaseInterface.py b/nipype/interfaces/nitime/tests/test_auto_NitimeBaseInterface.py index bf827b81b2..56b6b5d923 100644 --- a/nipype/interfaces/nitime/tests/test_auto_NitimeBaseInterface.py +++ b/nipype/interfaces/nitime/tests/test_auto_NitimeBaseInterface.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import NitimeBaseInterface diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py index 81a22bfe38..43ef4e54b5 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..classify import BRAINSPosteriorToContinuousClass diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py index ac589ad6dc..684160366e 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..segmentation import BRAINSTalairach diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py index 5ada1576e7..d52f546f93 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..segmentation import BRAINSTalairachMask diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py index eb4bdcffae..4cabcfc57a 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utilities import GenerateEdgeMapImage diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py index ae16bc8fab..3de0bba0ee 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utilities import GeneratePurePlugMask diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py index 1377072149..0a1e0d4e35 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utilities import HistogramMatchingFilter diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py index 534488ad10..8b678e9a6d 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..segmentation import SimilarityIndex diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py index 05593527fb..658fb56f72 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..diffusion import DWIConvert diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py index eb9d8c6184..f9f8ef6c22 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import compareTractInclusion diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py index 92c00853f4..443e89e335 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..diffusion import dtiaverage diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py index 440fd4df07..d389f2b033 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..diffusion import dtiestim diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py index 3148d1edb5..69c7d4dcd9 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..diffusion import dtiprocess diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py index ac5784f1c7..45e1506cd5 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import extractNrrdVectorIndex diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py index 195d7dc3e1..b673134a1e 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractAnisotropyMap diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py index fbb5bd6a55..f1f8ba2a76 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractAverageBvalues diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py index fed980c463..691c4725bc 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractClipAnisotropy diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py index 456e1e79fa..04d21192e6 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractCoRegAnatomy diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py index 7b75858eff..9643cbd290 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractConcatDwi diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py index d9e0b725c8..a534091999 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractCopyImageOrientation diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py index 3143b16dfb..1e6e67b77b 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractCoregBvalues diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py index 32d1e68898..d7e3a10cd6 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractCostFastMarching diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py index bbe375bbb9..212ed1e367 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractCreateGuideFiber diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py index ec3a99b91e..c4d93a1dca 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractFastMarchingTracking diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py index 96c93b8b64..025063edba 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractFiberTracking diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py index 9ddde832b2..2bbb3ee639 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractImageConformity diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py index ca642ceb66..0201d2c581 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractInvertBSplineTransform diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py index e4fd213d39..8d58b4a0bb 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractInvertDisplacementField diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py index c035862663..197c2f9f92 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractInvertRigidTransform diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py index 43f428b233..f1e5c7e4f3 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractResampleAnisotropy diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py index 812afca5c0..5d52a0b578 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractResampleB0 diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py index decc017c60..487c8eb2a3 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractResampleCodeImage diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py index b815d3d3d6..e0f31893fe 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractResampleDWIInPlace diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py index d0d2cd5664..13d9d1ed01 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractResampleFibers diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py index ead96d1c71..61de0e24ad 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractTensor diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py index 2dfde189e2..91a9a802bc 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..gtract import gtractTransformToDisplacementField diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py index cc06c5ede5..b5bddf6825 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..maxcurvature import maxcurvature diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py index 7c61974ef3..db15f7cd4f 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..ukftractography import UKFTractography diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py index b4756a9406..ee8f39260e 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..fiberprocess import fiberprocess diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py index d6f3a5cd50..aa10c9a472 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..commandlineonly import fiberstats diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py index 33e57c0ca3..4620040d0f 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..fibertrack import fibertrack diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py index cfbee7449c..e39be3a413 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import CannyEdge diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py index b4196db823..a77554cbfe 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import CannySegmentationLevelSetImageFilter diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py index 7945ec8ade..3376c9c9f7 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import DilateImage diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py index 6bd6235e80..5f6bb2fbf0 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import DilateMask diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py index f7aaf722c4..be17908b30 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import DistanceMaps diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py index 320bb76b77..6b400b8d9c 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import DumpBinaryTrainingVectors diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py index 9e096b0062..6fe6645250 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import ErodeImage diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py index 539660f73b..0030b159cf 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import FlippedDifference diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py index 0a211f23d2..3ff2bedffb 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import GenerateBrainClippedImage diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py index 58d3f35c5a..57a90bf24e 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import GenerateSummedGradientImage diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py index 1348e61d4b..afe0a9ce6a 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import GenerateTestImage diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py index 27ebb18d29..ee4032fbaf 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import GradientAnisotropicDiffusionImageFilter diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py index 9ad82598bb..454ed43986 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import HammerAttributeCreator diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py index c6b1ef20a9..0998051c75 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import NeighborhoodMean diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py index 2c2b2d585f..5962549938 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import NeighborhoodMedian diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py index 75bb83315f..6dcb4242c7 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import STAPLEAnalysis diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py index cd4b4a1af2..62d4aab5d6 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import TextureFromNoiseImageFilter diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py index 0ad523c7d0..6976309713 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featuredetection import TextureMeasureFilter diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py index b84cda1fc3..c81908284e 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..denoising import UnbiasedNonLocalMeans diff --git a/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py index f9ff60cfd9..55bbf35bc8 100644 --- a/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py +++ b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import scalartransform diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py index c631f9b96e..2fdd5ed445 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import BRAINSDemonWarp diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py index 18059c4f57..2ea79a7ad9 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsfit import BRAINSFit diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py index 98ec5f4ff3..8e582519fb 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsresample import BRAINSResample diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py index 4ce0d7159c..ec2858ab43 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsresize import BRAINSResize diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py index 2a700eaac7..0432be0e4c 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import BRAINSTransformFromFiducials diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py index b3255da1d3..40f7fbdb5b 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import VBRAINSDemonWarp diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py index 2d66884522..a1929ef390 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import BRAINSABC diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py index 9b2d10061c..88bf30e2dc 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import BRAINSConstellationDetector diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py index 9f2b00c311..c423893a83 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import BRAINSCreateLabelMapFromProbabilityMaps diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py index 9d66d4a463..14778d7b75 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import BRAINSCut diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py index 826ec19f0e..62b4778081 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import BRAINSMultiSTAPLE diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py index 3e70b75883..c52bd3d3aa 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import BRAINSROIAuto diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py index d71e7003ff..87e5acffb5 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import BinaryMaskEditorBasedOnLandmarks diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py index 2e099b4f56..d6b8378e09 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import ESLR diff --git a/nipype/interfaces/semtools/tests/test_auto_DWICompare.py b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py index c00b0cc36f..03238109d3 100644 --- a/nipype/interfaces/semtools/tests/test_auto_DWICompare.py +++ b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..converters import DWICompare diff --git a/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py index e515bc613c..a0fab812a5 100644 --- a/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py +++ b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..converters import DWISimpleCompare diff --git a/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py index 63f45831af..85ea94070f 100644 --- a/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py +++ b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..featurecreator import GenerateCsfClippedFromClassifiedImage diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py index 27b65a4eba..a3dabe9db4 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import BRAINSAlignMSP diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py index 7a0528f201..4a3586a933 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import BRAINSClipInferior diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py index a124ad60d9..8c2b24f654 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import BRAINSConstellationModeler diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py index 9df3c8b8d6..e840daf05c 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import BRAINSEyeDetector diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py index bc3807f9f8..64720e5588 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import BRAINSInitializedControlPoints diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py index 2456b8fb4d..ac6df55fc6 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import BRAINSLandmarkInitializer diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py index b6b83baa12..52bd2ecb2e 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import BRAINSLinearModelerEPCA diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py index 262d948029..ed5bb265d4 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import BRAINSLmkTransform diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py index 9627258bcf..12aaa1583a 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import BRAINSMush diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py index 1b803956e7..69855fa3a7 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import BRAINSSnapShotWriter diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py index c6d10736d7..d303f26905 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import BRAINSTransformConvert diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py index d2f3b74140..65e991e9e3 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import BRAINSTrimForegroundInDirection diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py b/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py index 936437886a..fe56cb0e38 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import CleanUpOverlapLabels diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py index 46924bf180..f987349d7a 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import FindCenterOfBrain diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py index 9087d6667b..c75c9b588a 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import GenerateLabelMapFromProbabilityMap diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py index f5372a240e..9a5536dadc 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import ImageRegionPlotter diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py index daf6d5634f..20d677b24e 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import JointHistogram diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py index 7bea38e2f1..768c3f8376 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import ShuffleVectorsModule diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py index 32e6ce65cd..b2492324c8 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import fcsv_to_hdf5 diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py index 196b09b304..305d04e0dc 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import insertMidACPCpoint diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py index 151a2c7b3a..e4df73fcba 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import landmarksConstellationAligner diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py index 0bcd747d36..0c0bf738a5 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brains import landmarksConstellationWeights diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py index c8d5df2ba2..2af326b6f6 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..diffusion import DTIexport diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py index ec1d66bc0b..816d94b1fa 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..diffusion import DTIimport diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py index a2caa2f633..e3304d8039 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..diffusion import DWIJointRicianLMMSEFilter diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py index 7eff851562..75f3e1a396 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..diffusion import DWIRicianLMMSEFilter diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py index 217f91edf0..4ad80df888 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..diffusion import DWIToDTIEstimation diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py index b75989c349..0a1897b007 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..diffusion import DiffusionTensorScalarMeasurements diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py index 5e8c39a4ac..75a41d81ab 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..diffusion import DiffusionWeightedVolumeMasking diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py index 30860d9da0..80a0fae1cc 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..diffusion import ResampleDTIVolume diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py index 31fb7d5c0d..3b3b06b0f8 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..diffusion import TractographyLabelMapSeeding diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py index 9f2209c1eb..4661bc763c 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..arithmetic import AddScalarVolumes diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py index a659aa47f7..964918150d 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..arithmetic import CastScalarVolume diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py index ae662cf2ba..d76e42980f 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..checkerboardfilter import CheckerBoardFilter diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py index 5613eb0c4c..2777e004ca 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..denoising import CurvatureAnisotropicDiffusion diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py index 5db6c65c7e..c365c0f15c 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..extractskeleton import ExtractSkeleton diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py index ff46d6d308..6765c7add9 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..denoising import GaussianBlurImageFilter diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py index 8ec7dbb156..5d2af57254 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..denoising import GradientAnisotropicDiffusion diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py index 062a23c293..f614a3092e 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..morphology import GrayscaleFillHoleImageFilter diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py index edb6081ed3..583b083a0c 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..morphology import GrayscaleGrindPeakImageFilter diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py index f73690586d..d396da88ec 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..histogrammatching import HistogramMatching diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py index 0bae22c342..f8996a2ec8 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..imagelabelcombine import ImageLabelCombine diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py index cd04072890..772f365aa7 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..arithmetic import MaskScalarVolume diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py index 35bb9496c9..78dc5eae1a 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..denoising import MedianImageFilter diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py index 6590c4b133..e447278ffe 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..arithmetic import MultiplyScalarVolumes diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py index c6ad0bf24d..5b226f2fd9 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..n4itkbiasfieldcorrection import N4ITKBiasFieldCorrection diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py index ebe34fd9b3..042122a046 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..resamplescalarvectordwivolume import ResampleScalarVectorDWIVolume diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py index d62589881f..218afc16c6 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..arithmetic import SubtractScalarVolumes diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py index 34de317104..ce6f2a449b 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..thresholdscalarvolume import ThresholdScalarVolume diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py index 8e7890de85..592172b8f3 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..votingbinaryholefillingimagefilter import VotingBinaryHoleFillingImageFilter diff --git a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py index 0e25ffad9a..70bc5400b7 100644 --- a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py +++ b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..denoising import DWIUnbiasedNonLocalMeansFilter diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py index 949cdc7f61..a7d0239227 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import AffineRegistration diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py index 17339f8859..0d56a05d1d 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import BSplineDeformableRegistration diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py index 8335515c13..6e11383220 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..converters import BSplineToDeformationField diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py index 7fa8b77d63..c721bd8168 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import ExpertAutomatedRegistration diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py index a55432f9b0..2c6e79f57a 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import LinearRegistration diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py index 7903fd1d5c..e7e2447a8b 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import MultiResolutionAffineRegistration diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py index a1af6c71b3..c83e402168 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..filtering import OtsuThresholdImageFilter diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py index 02beeee464..0533a4fc8c 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..segmentation import OtsuThresholdSegmentation diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py index baa698246c..65b3e63396 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..filtering import ResampleScalarVolume diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py index 6c2a0eb072..8017f73be9 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..registration import RigidRegistration diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py index e46f046add..720f666dd3 100644 --- a/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..changequantification import IntensityDifferenceMetric diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py index 61141f65db..874a19b6e4 100644 --- a/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..petstandarduptakevaluecomputation import PETStandardUptakeValueComputation diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py index 454e290102..1914033d8d 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import ACPCTransform diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py index c631f9b96e..2fdd5ed445 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import BRAINSDemonWarp diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py index bb62633d94..e0dffb2e10 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsfit import BRAINSFit diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py index 98ec5f4ff3..8e582519fb 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..brainsresample import BRAINSResample diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py index 36d42fe8df..faff151975 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import FiducialRegistration diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py index b3255da1d3..40f7fbdb5b 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import VBRAINSDemonWarp diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py index 89863fb730..61f28399c1 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import BRAINSROIAuto diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py index 09b0b1300f..41b24b123b 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import EMSegmentCommandLine diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py index ed46177df0..b676880b37 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..specialized import RobustStatisticsSegmenter diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py index 3c5e2124d0..7b5077d733 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..simpleregiongrowingsegmentation import SimpleRegionGrowingSegmentation diff --git a/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py b/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py index 2997e805f9..3e6b0d875d 100644 --- a/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py +++ b/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..converters import DicomToNrrdConverter diff --git a/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py index 279d68e0ab..70fbde6153 100644 --- a/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py +++ b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utilities import EMSegmentTransformToNewFormat diff --git a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py index 7ad8cac8e9..cdd9682786 100644 --- a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..surface import GrayscaleModelMaker diff --git a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py index bb3780495b..8249617521 100644 --- a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py +++ b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..surface import LabelMapSmoothing diff --git a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py index 6453957a79..32f340aa05 100644 --- a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py +++ b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..surface import MergeModels diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py index ed182137cf..54bcddff38 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..surface import ModelMaker diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py index efd11f1040..322aace661 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..surface import ModelToLabelMap diff --git a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py index f3d1908cd0..9fddc5b083 100644 --- a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py +++ b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..converters import OrientScalarVolume diff --git a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py index 32a2fc2139..2256b61df2 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py +++ b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..surface import ProbeVolumeWithModel diff --git a/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py b/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py index b0e1e2c3b0..b8eeb5bd56 100644 --- a/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py +++ b/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import SlicerCommandLine diff --git a/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py index 1b1aa6edcd..57a595cd66 100644 --- a/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py +++ b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Analyze2nii diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py index bbc925ec9e..259db6e1f0 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import ApplyDeformations diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py index d1c35dbcd2..0b17f56630 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ApplyInverseDeformation diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py index a686e95485..11818cf70a 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ApplyTransform diff --git a/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py index 69695ca7b5..c5a562f41b 100644 --- a/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py +++ b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import CalcCoregAffine diff --git a/nipype/interfaces/spm/tests/test_auto_Coregister.py b/nipype/interfaces/spm/tests/test_auto_Coregister.py index 957a2c84a9..05d8cf06ab 100644 --- a/nipype/interfaces/spm/tests/test_auto_Coregister.py +++ b/nipype/interfaces/spm/tests/test_auto_Coregister.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Coregister diff --git a/nipype/interfaces/spm/tests/test_auto_CreateWarped.py b/nipype/interfaces/spm/tests/test_auto_CreateWarped.py index dd98d30e37..b25e377ad3 100644 --- a/nipype/interfaces/spm/tests/test_auto_CreateWarped.py +++ b/nipype/interfaces/spm/tests/test_auto_CreateWarped.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import CreateWarped diff --git a/nipype/interfaces/spm/tests/test_auto_DARTEL.py b/nipype/interfaces/spm/tests/test_auto_DARTEL.py index cc1b9eee1b..2ee9ea5521 100644 --- a/nipype/interfaces/spm/tests/test_auto_DARTEL.py +++ b/nipype/interfaces/spm/tests/test_auto_DARTEL.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import DARTEL diff --git a/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py index d25bde2e5d..92daefe664 100644 --- a/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py +++ b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import DARTELNorm2MNI diff --git a/nipype/interfaces/spm/tests/test_auto_DicomImport.py b/nipype/interfaces/spm/tests/test_auto_DicomImport.py index fec62ddeeb..833794b628 100644 --- a/nipype/interfaces/spm/tests/test_auto_DicomImport.py +++ b/nipype/interfaces/spm/tests/test_auto_DicomImport.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import DicomImport diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py index bc9bb9006e..e374a5acf0 100644 --- a/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import EstimateContrast diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateModel.py b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py index 05f511a0bd..61a438848c 100644 --- a/nipype/interfaces/spm/tests/test_auto_EstimateModel.py +++ b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import EstimateModel diff --git a/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py index 38d2b1c6fb..d5d571117c 100644 --- a/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import FactorialDesign diff --git a/nipype/interfaces/spm/tests/test_auto_FieldMap.py b/nipype/interfaces/spm/tests/test_auto_FieldMap.py index 43fbbcb8f4..c36d6e78b6 100644 --- a/nipype/interfaces/spm/tests/test_auto_FieldMap.py +++ b/nipype/interfaces/spm/tests/test_auto_FieldMap.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import FieldMap diff --git a/nipype/interfaces/spm/tests/test_auto_Level1Design.py b/nipype/interfaces/spm/tests/test_auto_Level1Design.py index 7ad8ab8195..6fe77da96d 100644 --- a/nipype/interfaces/spm/tests/test_auto_Level1Design.py +++ b/nipype/interfaces/spm/tests/test_auto_Level1Design.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import Level1Design diff --git a/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py index 26957f2fbb..7afac238a3 100644 --- a/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import MultipleRegressionDesign diff --git a/nipype/interfaces/spm/tests/test_auto_NewSegment.py b/nipype/interfaces/spm/tests/test_auto_NewSegment.py index 3f03685e11..505296bbf4 100644 --- a/nipype/interfaces/spm/tests/test_auto_NewSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_NewSegment.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import NewSegment diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize.py b/nipype/interfaces/spm/tests/test_auto_Normalize.py index fde0bf7fff..0a8f6afbc6 100644 --- a/nipype/interfaces/spm/tests/test_auto_Normalize.py +++ b/nipype/interfaces/spm/tests/test_auto_Normalize.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Normalize diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize12.py b/nipype/interfaces/spm/tests/test_auto_Normalize12.py index bf8da2dba1..532917ab3e 100644 --- a/nipype/interfaces/spm/tests/test_auto_Normalize12.py +++ b/nipype/interfaces/spm/tests/test_auto_Normalize12.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Normalize12 diff --git a/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py index dbb02a6275..5be8b04455 100644 --- a/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import OneSampleTTestDesign diff --git a/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py index 3e1662268a..64e0d9cd61 100644 --- a/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import PairedTTestDesign diff --git a/nipype/interfaces/spm/tests/test_auto_Realign.py b/nipype/interfaces/spm/tests/test_auto_Realign.py index ac2f5bbd92..3532979617 100644 --- a/nipype/interfaces/spm/tests/test_auto_Realign.py +++ b/nipype/interfaces/spm/tests/test_auto_Realign.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Realign diff --git a/nipype/interfaces/spm/tests/test_auto_Reslice.py b/nipype/interfaces/spm/tests/test_auto_Reslice.py index 81299fc748..ff4d31da56 100644 --- a/nipype/interfaces/spm/tests/test_auto_Reslice.py +++ b/nipype/interfaces/spm/tests/test_auto_Reslice.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import Reslice diff --git a/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py index 3e39ade181..6acc320089 100644 --- a/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py +++ b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..utils import ResliceToReference diff --git a/nipype/interfaces/spm/tests/test_auto_SPMCommand.py b/nipype/interfaces/spm/tests/test_auto_SPMCommand.py index 7c1ba5cbc2..32a6b28b5c 100644 --- a/nipype/interfaces/spm/tests/test_auto_SPMCommand.py +++ b/nipype/interfaces/spm/tests/test_auto_SPMCommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import SPMCommand diff --git a/nipype/interfaces/spm/tests/test_auto_Segment.py b/nipype/interfaces/spm/tests/test_auto_Segment.py index f6df46e1de..d6b685fbcf 100644 --- a/nipype/interfaces/spm/tests/test_auto_Segment.py +++ b/nipype/interfaces/spm/tests/test_auto_Segment.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Segment diff --git a/nipype/interfaces/spm/tests/test_auto_SliceTiming.py b/nipype/interfaces/spm/tests/test_auto_SliceTiming.py index 85fb1bc4e4..369392b5e9 100644 --- a/nipype/interfaces/spm/tests/test_auto_SliceTiming.py +++ b/nipype/interfaces/spm/tests/test_auto_SliceTiming.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import SliceTiming diff --git a/nipype/interfaces/spm/tests/test_auto_Smooth.py b/nipype/interfaces/spm/tests/test_auto_Smooth.py index eea60ed619..b4e2d42b0f 100644 --- a/nipype/interfaces/spm/tests/test_auto_Smooth.py +++ b/nipype/interfaces/spm/tests/test_auto_Smooth.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import Smooth diff --git a/nipype/interfaces/spm/tests/test_auto_Threshold.py b/nipype/interfaces/spm/tests/test_auto_Threshold.py index 078adb3a1b..c822ec4dbf 100644 --- a/nipype/interfaces/spm/tests/test_auto_Threshold.py +++ b/nipype/interfaces/spm/tests/test_auto_Threshold.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import Threshold diff --git a/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py index ccdc441e04..a074b47893 100644 --- a/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py +++ b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import ThresholdStatistics diff --git a/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py index 4dfbc12570..94418c7233 100644 --- a/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..model import TwoSampleTTestDesign diff --git a/nipype/interfaces/spm/tests/test_auto_VBMSegment.py b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py index 6aaac2b489..9ef45e4903 100644 --- a/nipype/interfaces/spm/tests/test_auto_VBMSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..preprocess import VBMSegment diff --git a/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py index d77e9d7509..62a92e270a 100644 --- a/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py +++ b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..io import BIDSDataGrabber diff --git a/nipype/interfaces/tests/test_auto_Bru2.py b/nipype/interfaces/tests/test_auto_Bru2.py index d386b3f095..4cfb412d10 100644 --- a/nipype/interfaces/tests/test_auto_Bru2.py +++ b/nipype/interfaces/tests/test_auto_Bru2.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..bru2nii import Bru2 diff --git a/nipype/interfaces/tests/test_auto_C3d.py b/nipype/interfaces/tests/test_auto_C3d.py index 9e74e82e2a..04f6b38778 100644 --- a/nipype/interfaces/tests/test_auto_C3d.py +++ b/nipype/interfaces/tests/test_auto_C3d.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..c3 import C3d diff --git a/nipype/interfaces/tests/test_auto_C3dAffineTool.py b/nipype/interfaces/tests/test_auto_C3dAffineTool.py index 510ea2f02a..5578e3d860 100644 --- a/nipype/interfaces/tests/test_auto_C3dAffineTool.py +++ b/nipype/interfaces/tests/test_auto_C3dAffineTool.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..c3 import C3dAffineTool diff --git a/nipype/interfaces/tests/test_auto_CopyMeta.py b/nipype/interfaces/tests/test_auto_CopyMeta.py index 012edfa886..bb56d75f50 100644 --- a/nipype/interfaces/tests/test_auto_CopyMeta.py +++ b/nipype/interfaces/tests/test_auto_CopyMeta.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dcmstack import CopyMeta diff --git a/nipype/interfaces/tests/test_auto_DataFinder.py b/nipype/interfaces/tests/test_auto_DataFinder.py index 9a1b7418df..6ee0258218 100644 --- a/nipype/interfaces/tests/test_auto_DataFinder.py +++ b/nipype/interfaces/tests/test_auto_DataFinder.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..io import DataFinder diff --git a/nipype/interfaces/tests/test_auto_DataGrabber.py b/nipype/interfaces/tests/test_auto_DataGrabber.py index d39db0b527..c395eadd1a 100644 --- a/nipype/interfaces/tests/test_auto_DataGrabber.py +++ b/nipype/interfaces/tests/test_auto_DataGrabber.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..io import DataGrabber diff --git a/nipype/interfaces/tests/test_auto_DataSink.py b/nipype/interfaces/tests/test_auto_DataSink.py index da26854451..15af11755c 100644 --- a/nipype/interfaces/tests/test_auto_DataSink.py +++ b/nipype/interfaces/tests/test_auto_DataSink.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..io import DataSink diff --git a/nipype/interfaces/tests/test_auto_Dcm2nii.py b/nipype/interfaces/tests/test_auto_Dcm2nii.py index 0caa45a1f1..7515809925 100644 --- a/nipype/interfaces/tests/test_auto_Dcm2nii.py +++ b/nipype/interfaces/tests/test_auto_Dcm2nii.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dcm2nii import Dcm2nii diff --git a/nipype/interfaces/tests/test_auto_Dcm2niix.py b/nipype/interfaces/tests/test_auto_Dcm2niix.py index 5917f48583..d33cf3ecd2 100644 --- a/nipype/interfaces/tests/test_auto_Dcm2niix.py +++ b/nipype/interfaces/tests/test_auto_Dcm2niix.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dcm2nii import Dcm2niix diff --git a/nipype/interfaces/tests/test_auto_DcmStack.py b/nipype/interfaces/tests/test_auto_DcmStack.py index 53a5259af5..53bb069426 100644 --- a/nipype/interfaces/tests/test_auto_DcmStack.py +++ b/nipype/interfaces/tests/test_auto_DcmStack.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dcmstack import DcmStack diff --git a/nipype/interfaces/tests/test_auto_FreeSurferSource.py b/nipype/interfaces/tests/test_auto_FreeSurferSource.py index 15ea9c66cd..2ce1df80ec 100644 --- a/nipype/interfaces/tests/test_auto_FreeSurferSource.py +++ b/nipype/interfaces/tests/test_auto_FreeSurferSource.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..io import FreeSurferSource diff --git a/nipype/interfaces/tests/test_auto_GroupAndStack.py b/nipype/interfaces/tests/test_auto_GroupAndStack.py index a8f30e32f9..a566467007 100644 --- a/nipype/interfaces/tests/test_auto_GroupAndStack.py +++ b/nipype/interfaces/tests/test_auto_GroupAndStack.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dcmstack import GroupAndStack diff --git a/nipype/interfaces/tests/test_auto_IOBase.py b/nipype/interfaces/tests/test_auto_IOBase.py index c2c2f96431..ddac7b4449 100644 --- a/nipype/interfaces/tests/test_auto_IOBase.py +++ b/nipype/interfaces/tests/test_auto_IOBase.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..io import IOBase diff --git a/nipype/interfaces/tests/test_auto_JSONFileGrabber.py b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py index 03a65cf6c2..e5ffe9ca44 100644 --- a/nipype/interfaces/tests/test_auto_JSONFileGrabber.py +++ b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..io import JSONFileGrabber diff --git a/nipype/interfaces/tests/test_auto_JSONFileSink.py b/nipype/interfaces/tests/test_auto_JSONFileSink.py index 002997912b..878ef899c4 100644 --- a/nipype/interfaces/tests/test_auto_JSONFileSink.py +++ b/nipype/interfaces/tests/test_auto_JSONFileSink.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..io import JSONFileSink diff --git a/nipype/interfaces/tests/test_auto_LookupMeta.py b/nipype/interfaces/tests/test_auto_LookupMeta.py index 29100aaef7..c0ee8d9b59 100644 --- a/nipype/interfaces/tests/test_auto_LookupMeta.py +++ b/nipype/interfaces/tests/test_auto_LookupMeta.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dcmstack import LookupMeta diff --git a/nipype/interfaces/tests/test_auto_MatlabCommand.py b/nipype/interfaces/tests/test_auto_MatlabCommand.py index c1b971d25d..b661dcb157 100644 --- a/nipype/interfaces/tests/test_auto_MatlabCommand.py +++ b/nipype/interfaces/tests/test_auto_MatlabCommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..matlab import MatlabCommand diff --git a/nipype/interfaces/tests/test_auto_MergeNifti.py b/nipype/interfaces/tests/test_auto_MergeNifti.py index 9e0a017c60..a2e4e54913 100644 --- a/nipype/interfaces/tests/test_auto_MergeNifti.py +++ b/nipype/interfaces/tests/test_auto_MergeNifti.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dcmstack import MergeNifti diff --git a/nipype/interfaces/tests/test_auto_MeshFix.py b/nipype/interfaces/tests/test_auto_MeshFix.py index 3cc1541d6d..7b3461e6ee 100644 --- a/nipype/interfaces/tests/test_auto_MeshFix.py +++ b/nipype/interfaces/tests/test_auto_MeshFix.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..meshfix import MeshFix diff --git a/nipype/interfaces/tests/test_auto_MySQLSink.py b/nipype/interfaces/tests/test_auto_MySQLSink.py index 048699659a..c67c938e7a 100644 --- a/nipype/interfaces/tests/test_auto_MySQLSink.py +++ b/nipype/interfaces/tests/test_auto_MySQLSink.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..io import MySQLSink diff --git a/nipype/interfaces/tests/test_auto_NiftiGeneratorBase.py b/nipype/interfaces/tests/test_auto_NiftiGeneratorBase.py index 88bc12dfa2..9bdd24ac4a 100644 --- a/nipype/interfaces/tests/test_auto_NiftiGeneratorBase.py +++ b/nipype/interfaces/tests/test_auto_NiftiGeneratorBase.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dcmstack import NiftiGeneratorBase diff --git a/nipype/interfaces/tests/test_auto_NilearnBaseInterface.py b/nipype/interfaces/tests/test_auto_NilearnBaseInterface.py index 38e4cfd698..0c9f8e2fc9 100644 --- a/nipype/interfaces/tests/test_auto_NilearnBaseInterface.py +++ b/nipype/interfaces/tests/test_auto_NilearnBaseInterface.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..nilearn import NilearnBaseInterface diff --git a/nipype/interfaces/tests/test_auto_PETPVC.py b/nipype/interfaces/tests/test_auto_PETPVC.py index c5283435d5..6b598f8d70 100644 --- a/nipype/interfaces/tests/test_auto_PETPVC.py +++ b/nipype/interfaces/tests/test_auto_PETPVC.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..petpvc import PETPVC diff --git a/nipype/interfaces/tests/test_auto_Quickshear.py b/nipype/interfaces/tests/test_auto_Quickshear.py index 7f39a6bc96..4f5be9f0b5 100644 --- a/nipype/interfaces/tests/test_auto_Quickshear.py +++ b/nipype/interfaces/tests/test_auto_Quickshear.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..quickshear import Quickshear diff --git a/nipype/interfaces/tests/test_auto_Reorient.py b/nipype/interfaces/tests/test_auto_Reorient.py index 2e45a1ca7f..6683d6de51 100644 --- a/nipype/interfaces/tests/test_auto_Reorient.py +++ b/nipype/interfaces/tests/test_auto_Reorient.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..image import Reorient diff --git a/nipype/interfaces/tests/test_auto_Rescale.py b/nipype/interfaces/tests/test_auto_Rescale.py index e180c82988..0354dd7777 100644 --- a/nipype/interfaces/tests/test_auto_Rescale.py +++ b/nipype/interfaces/tests/test_auto_Rescale.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..image import Rescale diff --git a/nipype/interfaces/tests/test_auto_S3DataGrabber.py b/nipype/interfaces/tests/test_auto_S3DataGrabber.py index 7c69413eb0..ff13619248 100644 --- a/nipype/interfaces/tests/test_auto_S3DataGrabber.py +++ b/nipype/interfaces/tests/test_auto_S3DataGrabber.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..io import S3DataGrabber diff --git a/nipype/interfaces/tests/test_auto_SQLiteSink.py b/nipype/interfaces/tests/test_auto_SQLiteSink.py index ea03663c4c..e826eb3b8a 100644 --- a/nipype/interfaces/tests/test_auto_SQLiteSink.py +++ b/nipype/interfaces/tests/test_auto_SQLiteSink.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..io import SQLiteSink diff --git a/nipype/interfaces/tests/test_auto_SSHDataGrabber.py b/nipype/interfaces/tests/test_auto_SSHDataGrabber.py index cc7aa22e38..e3755c8a75 100644 --- a/nipype/interfaces/tests/test_auto_SSHDataGrabber.py +++ b/nipype/interfaces/tests/test_auto_SSHDataGrabber.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..io import SSHDataGrabber diff --git a/nipype/interfaces/tests/test_auto_SelectFiles.py b/nipype/interfaces/tests/test_auto_SelectFiles.py index bf438fb826..ed6c276ddf 100644 --- a/nipype/interfaces/tests/test_auto_SelectFiles.py +++ b/nipype/interfaces/tests/test_auto_SelectFiles.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..io import SelectFiles diff --git a/nipype/interfaces/tests/test_auto_SignalExtraction.py b/nipype/interfaces/tests/test_auto_SignalExtraction.py index bc76f5261a..3ddd5ea222 100644 --- a/nipype/interfaces/tests/test_auto_SignalExtraction.py +++ b/nipype/interfaces/tests/test_auto_SignalExtraction.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..nilearn import SignalExtraction diff --git a/nipype/interfaces/tests/test_auto_SlicerCommandLine.py b/nipype/interfaces/tests/test_auto_SlicerCommandLine.py index 057628e879..43ae697eec 100644 --- a/nipype/interfaces/tests/test_auto_SlicerCommandLine.py +++ b/nipype/interfaces/tests/test_auto_SlicerCommandLine.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dynamic_slicer import SlicerCommandLine diff --git a/nipype/interfaces/tests/test_auto_SplitNifti.py b/nipype/interfaces/tests/test_auto_SplitNifti.py index e1f6539fab..b0859f3d4a 100644 --- a/nipype/interfaces/tests/test_auto_SplitNifti.py +++ b/nipype/interfaces/tests/test_auto_SplitNifti.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..dcmstack import SplitNifti diff --git a/nipype/interfaces/tests/test_auto_XNATSink.py b/nipype/interfaces/tests/test_auto_XNATSink.py index b4db5ec8d3..57aa329ce4 100644 --- a/nipype/interfaces/tests/test_auto_XNATSink.py +++ b/nipype/interfaces/tests/test_auto_XNATSink.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..io import XNATSink diff --git a/nipype/interfaces/tests/test_auto_XNATSource.py b/nipype/interfaces/tests/test_auto_XNATSource.py index 8faa79af81..7b6acd6a6f 100644 --- a/nipype/interfaces/tests/test_auto_XNATSource.py +++ b/nipype/interfaces/tests/test_auto_XNATSource.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..io import XNATSource diff --git a/nipype/interfaces/utility/tests/test_auto_AssertEqual.py b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py index 284e0f4d62..df61f8c2c0 100644 --- a/nipype/interfaces/utility/tests/test_auto_AssertEqual.py +++ b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import AssertEqual diff --git a/nipype/interfaces/utility/tests/test_auto_CSVReader.py b/nipype/interfaces/utility/tests/test_auto_CSVReader.py index 8a51ca4170..6026fa8377 100644 --- a/nipype/interfaces/utility/tests/test_auto_CSVReader.py +++ b/nipype/interfaces/utility/tests/test_auto_CSVReader.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..csv import CSVReader diff --git a/nipype/interfaces/utility/tests/test_auto_Function.py b/nipype/interfaces/utility/tests/test_auto_Function.py index 5c2505fe16..f4e353bb27 100644 --- a/nipype/interfaces/utility/tests/test_auto_Function.py +++ b/nipype/interfaces/utility/tests/test_auto_Function.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..wrappers import Function diff --git a/nipype/interfaces/utility/tests/test_auto_IdentityInterface.py b/nipype/interfaces/utility/tests/test_auto_IdentityInterface.py index 97523d0b86..be12e0bad7 100644 --- a/nipype/interfaces/utility/tests/test_auto_IdentityInterface.py +++ b/nipype/interfaces/utility/tests/test_auto_IdentityInterface.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import IdentityInterface diff --git a/nipype/interfaces/utility/tests/test_auto_Merge.py b/nipype/interfaces/utility/tests/test_auto_Merge.py index 71e7d2db0a..a7ed5c3a31 100644 --- a/nipype/interfaces/utility/tests/test_auto_Merge.py +++ b/nipype/interfaces/utility/tests/test_auto_Merge.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import Merge diff --git a/nipype/interfaces/utility/tests/test_auto_Rename.py b/nipype/interfaces/utility/tests/test_auto_Rename.py index 1e6e1cab34..c98d50414e 100644 --- a/nipype/interfaces/utility/tests/test_auto_Rename.py +++ b/nipype/interfaces/utility/tests/test_auto_Rename.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import Rename diff --git a/nipype/interfaces/utility/tests/test_auto_Select.py b/nipype/interfaces/utility/tests/test_auto_Select.py index e241f7ed76..abc81b45da 100644 --- a/nipype/interfaces/utility/tests/test_auto_Select.py +++ b/nipype/interfaces/utility/tests/test_auto_Select.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import Select diff --git a/nipype/interfaces/utility/tests/test_auto_Split.py b/nipype/interfaces/utility/tests/test_auto_Split.py index 8acbceef99..20dbd948dc 100644 --- a/nipype/interfaces/utility/tests/test_auto_Split.py +++ b/nipype/interfaces/utility/tests/test_auto_Split.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import Split diff --git a/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py index 785e87e8b1..1f07ed7d70 100644 --- a/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py +++ b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..vista import Vnifti2Image diff --git a/nipype/interfaces/vista/tests/test_auto_VtoMat.py b/nipype/interfaces/vista/tests/test_auto_VtoMat.py index ee16266402..114e63fce8 100644 --- a/nipype/interfaces/vista/tests/test_auto_VtoMat.py +++ b/nipype/interfaces/vista/tests/test_auto_VtoMat.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..vista import VtoMat diff --git a/nipype/interfaces/workbench/tests/test_auto_MetricResample.py b/nipype/interfaces/workbench/tests/test_auto_MetricResample.py index 46a66aa728..e08c87658e 100644 --- a/nipype/interfaces/workbench/tests/test_auto_MetricResample.py +++ b/nipype/interfaces/workbench/tests/test_auto_MetricResample.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..metric import MetricResample diff --git a/nipype/interfaces/workbench/tests/test_auto_WBCommand.py b/nipype/interfaces/workbench/tests/test_auto_WBCommand.py index b496a270dd..cccba55c95 100644 --- a/nipype/interfaces/workbench/tests/test_auto_WBCommand.py +++ b/nipype/interfaces/workbench/tests/test_auto_WBCommand.py @@ -1,5 +1,4 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals from ..base import WBCommand From fa347ae8fec92547b55c6e9e564fb5a6034dbe00 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 25 Jul 2018 15:37:57 -0400 Subject: [PATCH 0010/1665] CI: Drop 2.7, add 3.7-dev on Travis --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index bc60cab99c..c57910ed80 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,10 +4,10 @@ sudo: required language: python # our build matrix python: -- 2.7 - 3.4 - 3.5 - 3.6 +- 3.7-dev env: global: From f25cfc0e75ad302bcf7ce0137bffda224362f734 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 25 Jul 2018 15:44:35 -0400 Subject: [PATCH 0011/1665] RF: Purge PY2/PY3 indicators --- nipype/interfaces/base/core.py | 4 +--- nipype/pipeline/engine/utils.py | 5 ++--- nipype/pkg_info.py | 7 +------ nipype/sphinxext/plot_workflow.py | 13 ++----------- nipype/utils/draw_gantt_chart.py | 9 ++------- nipype/utils/tests/test_cmd.py | 6 ------ setup.py | 13 ++----------- 7 files changed, 10 insertions(+), 47 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index c4f4840dd5..ee58d0e018 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -43,8 +43,6 @@ iflogger = logging.getLogger('nipype.interface') -PY35 = sys.version_info >= (3, 5) -PY3 = sys.version_info[0] > 2 VALID_TERMINAL_OUTPUT = [ 'stream', 'allatonce', 'file', 'file_split', 'file_stdout', 'file_stderr', 'none' @@ -644,7 +642,7 @@ def save_inputs_to_json(self, json_file): """ inputs = self.inputs.get_traitsfree() iflogger.debug('saving inputs {}', inputs) - with open(json_file, 'w' if PY3 else 'wb') as fhandle: + with open(json_file, 'w') as fhandle: json.dump(inputs, fhandle, indent=4, ensure_ascii=False) def _pre_run_hook(self, runtime): diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 17554e3615..cfca520d00 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -47,7 +47,6 @@ from funcsigs import signature logger = logging.getLogger('nipype.workflow') -PY3 = sys.version_info[0] > 2 try: dfs_preorder = nx.dfs_preorder @@ -1595,7 +1594,7 @@ def write_workflow_resources(graph, filename=None, append=None): # If we append different runs, then we will see different # "bursts" of timestamps corresponding to those executions. if append and os.path.isfile(filename): - with open(filename, 'r' if PY3 else 'rb') as rsf: + with open(filename, 'r') as rsf: big_dict = json.load(rsf) for _, node in enumerate(graph.nodes()): @@ -1633,7 +1632,7 @@ def write_workflow_resources(graph, filename=None, append=None): big_dict['mapnode'] += [subidx] * nsamples big_dict['params'] += [params] * nsamples - with open(filename, 'w' if PY3 else 'wb') as rsf: + with open(filename, 'w') as rsf: json.dump(big_dict, rsf, ensure_ascii=False) return filename diff --git a/nipype/pkg_info.py b/nipype/pkg_info.py index 1d1150e318..ab664f743c 100644 --- a/nipype/pkg_info.py +++ b/nipype/pkg_info.py @@ -6,7 +6,6 @@ import subprocess COMMIT_INFO_FNAME = 'COMMIT_INFO.txt' -PY3 = sys.version_info[0] >= 3 def pkg_commit_hash(pkg_path): @@ -61,9 +60,7 @@ def pkg_commit_hash(pkg_path): shell=True) repo_commit, _ = proc.communicate() if repo_commit: - if PY3: - repo_commit = repo_commit.decode() - return 'repository', repo_commit.strip() + return 'repository', repo_commit.decode().strip() return '(none found)', '' @@ -82,8 +79,6 @@ def get_pkg_info(pkg_path): ''' src, hsh = pkg_commit_hash(pkg_path) from .info import VERSION - if not PY3: - src, hsh, VERSION = src.encode(), hsh.encode(), VERSION.encode() import networkx import nibabel import numpy diff --git a/nipype/sphinxext/plot_workflow.py b/nipype/sphinxext/plot_workflow.py index 5caa4e1645..1425b19450 100644 --- a/nipype/sphinxext/plot_workflow.py +++ b/nipype/sphinxext/plot_workflow.py @@ -144,10 +144,6 @@ def format_template(template, **kw): -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 - - def _mkdirp(folder): """ Equivalent to bash's mkdir -p @@ -274,8 +270,7 @@ def setup(app): app.add_config_value('wf_working_directory', None, True) app.add_config_value('wf_template', None, True) - app.connect('doctree-read'.encode() - if PY2 else 'doctree-read', mark_wf_labels) + app.connect('doctree-read', mark_wf_labels) metadata = {'parallel_read_safe': True, 'parallel_write_safe': True} return metadata @@ -456,11 +451,7 @@ def run_code(code, code_path, ns=None, function_name=None): # Redirect stdout stdout = sys.stdout - if PY3: - sys.stdout = io.StringIO() - else: - from cStringIO import StringIO - sys.stdout = StringIO() + sys.stdout = io.StringIO() # Assign a do-nothing print function to the namespace. There # doesn't seem to be any other way to provide a way to (not) print diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 7e6dbf94ed..6a037d34e0 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -21,8 +21,6 @@ 'install the pandas package') pass -PY3 = sys.version_info[0] > 2 - def create_event_dict(start_time, nodes_list): ''' @@ -316,10 +314,7 @@ def draw_resource_bar(start_time, finish_time, time_series, space_between_minutes = space_between_minutes / scale # Iterate through time series - if PY3: - ts_items = time_series.items() - else: - ts_items = time_series.iteritems() + ts_items = time_series.items() ts_len = len(time_series) for idx, (ts_start, amount) in enumerate(ts_items): @@ -555,5 +550,5 @@ def generate_gantt_chart(logfile, ''' # save file - with open(logfile + '.html', 'w' if PY3 else 'wb') as html_file: + with open(logfile + '.html', 'w') as html_file: html_file.write(html_string) diff --git a/nipype/utils/tests/test_cmd.py b/nipype/utils/tests/test_cmd.py index 87a6005c2e..52861e9fb7 100644 --- a/nipype/utils/tests/test_cmd.py +++ b/nipype/utils/tests/test_cmd.py @@ -6,8 +6,6 @@ from io import StringIO from ...utils import nipype_cmd -PY2 = sys.version_info[0] < 3 - @contextmanager def capture_sys_output(): @@ -35,10 +33,6 @@ def test_main_returns_2_on_empty(self): nipype_cmd: error: the following arguments are required: module, interface """ - if PY2: - msg = """usage: nipype_cmd [-h] module interface -nipype_cmd: error: too few arguments -""" assert stderr.getvalue() == msg assert stdout.getvalue() == '' diff --git a/setup.py b/setup.py index 37677afd35..b48b5693df 100755 --- a/setup.py +++ b/setup.py @@ -22,9 +22,6 @@ from setuptools.command.build_py import build_py -PY3 = sys.version_info[0] >= 3 - - class BuildWithCommitInfoCommand(build_py): """ Return extended build command class for recording commit @@ -70,20 +67,14 @@ def run(self): stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) - repo_commit, _ = proc.communicate() - # Fix for python 3 - if PY3: - repo_commit = repo_commit.decode() + repo_commit, _ = proc.communicate().decode() # We write the installation commit even if it's empty cfg_parser = configparser.RawConfigParser() cfg_parser.read(pjoin('nipype', 'COMMIT_INFO.txt')) cfg_parser.set('commit hash', 'install_hash', repo_commit.strip()) out_pth = pjoin(self.build_lib, 'nipype', 'COMMIT_INFO.txt') - if PY3: - cfg_parser.write(open(out_pth, 'wt')) - else: - cfg_parser.write(open(out_pth, 'wb')) + cfg_parser.write(open(out_pth, 'wt')) def main(): From 323663f2856090fd34ee5a7822c15c9bfc3cbe62 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 25 Jul 2018 15:47:10 -0400 Subject: [PATCH 0012/1665] RF: Purge "from io import open" --- nipype/algorithms/tests/test_confounds.py | 2 -- nipype/interfaces/mrtrix/convert.py | 2 -- nipype/scripts/cli.py | 2 -- 3 files changed, 6 deletions(-) diff --git a/nipype/algorithms/tests/test_confounds.py b/nipype/algorithms/tests/test_confounds.py index 2c601374ab..c9ef93f49f 100644 --- a/nipype/algorithms/tests/test_confounds.py +++ b/nipype/algorithms/tests/test_confounds.py @@ -2,8 +2,6 @@ # -*- coding: utf-8 -*- import os -from io import open - import pytest from nipype.testing import example_data from nipype.algorithms.confounds import FramewiseDisplacement, ComputeDVARS, \ diff --git a/nipype/interfaces/mrtrix/convert.py b/nipype/interfaces/mrtrix/convert.py index 62b53fc19b..860ae3cfef 100644 --- a/nipype/interfaces/mrtrix/convert.py +++ b/nipype/interfaces/mrtrix/convert.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from io import open - import os.path as op import nibabel as nb import nibabel.trackvis as trk diff --git a/nipype/scripts/cli.py b/nipype/scripts/cli.py index 59d8672cfb..b3bb89bb7b 100644 --- a/nipype/scripts/cli.py +++ b/nipype/scripts/cli.py @@ -1,8 +1,6 @@ #!python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from io import open - import click from .instance import list_interfaces From e2dc9f84fab79cc9453cfba705713f2309f92b71 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 25 Jul 2018 15:49:01 -0400 Subject: [PATCH 0013/1665] DOC: Suggest raise from syntax --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f55c09a41c..cee622ed7b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -116,7 +116,7 @@ One your PR is ready a member of the development team will review your changes t In general, do not catch exceptions without good reason. For non-fatal exceptions, log the exception as a warning and add more information about what may have caused the error. -If you do need to catch an exception, raise a new exception using ``raise_from(NewException("message"), oldException)`` from ``future``. +If you do need to catch an exception, raise a new exception using ``raise NewException("message") from oldException)``. Do not log this, as it creates redundant/confusing logs. #### Testing From 9517c5ab1a3f87e018bea605d0f5554736e96d09 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 26 Jul 2018 08:32:04 -0400 Subject: [PATCH 0014/1665] TEST: Drop legacy auto tests --- .../ants/tests/test_auto_GenWarpFields.py | 67 ----------------- .../ants/tests/test_auto_antsIntroduction.py | 67 ----------------- .../tests/test_auto_buildtemplateparallel.py | 72 ------------------- 3 files changed, 206 deletions(-) delete mode 100644 nipype/interfaces/ants/tests/test_auto_GenWarpFields.py delete mode 100644 nipype/interfaces/ants/tests/test_auto_antsIntroduction.py delete mode 100644 nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py diff --git a/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py b/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py deleted file mode 100644 index af91f9a8af..0000000000 --- a/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py +++ /dev/null @@ -1,67 +0,0 @@ -# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals -from ..legacy import GenWarpFields - - -def test_GenWarpFields_inputs(): - input_map = dict( - args=dict(argstr='%s', ), - bias_field_correction=dict(argstr='-n 1', ), - dimension=dict( - argstr='-d %d', - position=1, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - force_proceed=dict(argstr='-f 1', ), - input_image=dict( - argstr='-i %s', - copyfile=False, - mandatory=True, - ), - inverse_warp_template_labels=dict(argstr='-l', ), - max_iterations=dict( - argstr='-m %s', - sep='x', - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_prefix=dict( - argstr='-o %s', - usedefault=True, - ), - quality_check=dict(argstr='-q 1', ), - reference_image=dict( - argstr='-r %s', - copyfile=True, - mandatory=True, - ), - similarity_metric=dict(argstr='-s %s', ), - transformation_model=dict( - argstr='-t %s', - usedefault=True, - ), - ) - inputs = GenWarpFields.input_spec() - - for key, metadata in list(input_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(inputs.traits()[key], metakey) == value -def test_GenWarpFields_outputs(): - output_map = dict( - affine_transformation=dict(), - input_file=dict(), - inverse_warp_field=dict(), - output_file=dict(), - warp_field=dict(), - ) - outputs = GenWarpFields.output_spec() - - for key, metadata in list(output_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py b/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py deleted file mode 100644 index fe21858500..0000000000 --- a/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py +++ /dev/null @@ -1,67 +0,0 @@ -# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals -from ..legacy import antsIntroduction - - -def test_antsIntroduction_inputs(): - input_map = dict( - args=dict(argstr='%s', ), - bias_field_correction=dict(argstr='-n 1', ), - dimension=dict( - argstr='-d %d', - position=1, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - force_proceed=dict(argstr='-f 1', ), - input_image=dict( - argstr='-i %s', - copyfile=False, - mandatory=True, - ), - inverse_warp_template_labels=dict(argstr='-l', ), - max_iterations=dict( - argstr='-m %s', - sep='x', - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_prefix=dict( - argstr='-o %s', - usedefault=True, - ), - quality_check=dict(argstr='-q 1', ), - reference_image=dict( - argstr='-r %s', - copyfile=True, - mandatory=True, - ), - similarity_metric=dict(argstr='-s %s', ), - transformation_model=dict( - argstr='-t %s', - usedefault=True, - ), - ) - inputs = antsIntroduction.input_spec() - - for key, metadata in list(input_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(inputs.traits()[key], metakey) == value -def test_antsIntroduction_outputs(): - output_map = dict( - affine_transformation=dict(), - input_file=dict(), - inverse_warp_field=dict(), - output_file=dict(), - warp_field=dict(), - ) - outputs = antsIntroduction.output_spec() - - for key, metadata in list(output_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py b/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py deleted file mode 100644 index 8513003c29..0000000000 --- a/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py +++ /dev/null @@ -1,72 +0,0 @@ -# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals -from ..legacy import buildtemplateparallel - - -def test_buildtemplateparallel_inputs(): - input_map = dict( - args=dict(argstr='%s', ), - bias_field_correction=dict(argstr='-n 1', ), - dimension=dict( - argstr='-d %d', - position=1, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gradient_step_size=dict(argstr='-g %f', ), - in_files=dict( - argstr='%s', - mandatory=True, - position=-1, - ), - iteration_limit=dict( - argstr='-i %d', - usedefault=True, - ), - max_iterations=dict( - argstr='-m %s', - sep='x', - ), - num_cores=dict( - argstr='-j %d', - requires=['parallelization'], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_prefix=dict( - argstr='-o %s', - usedefault=True, - ), - parallelization=dict( - argstr='-c %d', - usedefault=True, - ), - rigid_body_registration=dict(argstr='-r 1', ), - similarity_metric=dict(argstr='-s %s', ), - transformation_model=dict( - argstr='-t %s', - usedefault=True, - ), - use_first_as_target=dict(), - ) - inputs = buildtemplateparallel.input_spec() - - for key, metadata in list(input_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(inputs.traits()[key], metakey) == value -def test_buildtemplateparallel_outputs(): - output_map = dict( - final_template_file=dict(), - subject_outfiles=dict(), - template_files=dict(), - ) - outputs = buildtemplateparallel.output_spec() - - for key, metadata in list(output_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(outputs.traits()[key], metakey) == value From 1e5344ef29211d2fcc63a968d69ae48fb8f2ae93 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 13 Aug 2018 09:31:33 -0400 Subject: [PATCH 0015/1665] CI: Build Python 3 wheels only --- .circleci/config.yml | 6 ++++-- setup.cfg | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 884b395809..860070acf3 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -316,6 +316,7 @@ jobs: - run: name: Check pypi preconditions command: | + pyenv local 3.5.2 pip install --upgrade twine future wheel readme_renderer setuptools python setup.py check -r -s python setup.py sdist bdist_wheel @@ -323,7 +324,7 @@ jobs: name: Validate Python 3 installation command: | pyenv local 3.5.2 - pip install dist/nipype-*-py2.py3-none-any.whl + pip install dist/nipype-*-py3-none-any.whl # Futures should not install in Python 3 test $(pip show futures 2>/dev/null | wc -l) = "0" - store_artifacts: @@ -338,6 +339,7 @@ jobs: - run: name: Deploy to PyPI command: | + pyenv local 3.5.2 pip install --upgrade twine future wheel readme_renderer setuptools python setup.py check -r -s python setup.py sdist bdist_wheel @@ -382,7 +384,7 @@ workflows: - pypi_precheck: filters: branches: - only: /rel\/.*/ + only: /(rel|dev)\/.*/ tags: only: /.*/ - compare_base_dockerfiles: diff --git a/setup.cfg b/setup.cfg index 3c6e79cf31..bf1d5a3c1b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,2 @@ [bdist_wheel] -universal=1 +python-tag=py3 From 055a59f38c683795af21be061732641b5be96c71 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 13 Aug 2018 09:51:52 -0400 Subject: [PATCH 0016/1665] FIX: Run decode on bytes, not tuple --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b48b5693df..637432ff8f 100755 --- a/setup.py +++ b/setup.py @@ -67,7 +67,7 @@ def run(self): stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) - repo_commit, _ = proc.communicate().decode() + repo_commit = proc.communicate()[0].decode() # We write the installation commit even if it's empty cfg_parser = configparser.RawConfigParser() From 938cdf4322111020cd192052911ffc5830c8a6eb Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 13 Aug 2018 11:43:35 -0400 Subject: [PATCH 0017/1665] CI: Do not install future, remove futures check --- .circleci/config.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 860070acf3..932152cae7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -317,7 +317,7 @@ jobs: name: Check pypi preconditions command: | pyenv local 3.5.2 - pip install --upgrade twine future wheel readme_renderer setuptools + pip install --upgrade twine wheel readme_renderer setuptools python setup.py check -r -s python setup.py sdist bdist_wheel - run: @@ -325,8 +325,6 @@ jobs: command: | pyenv local 3.5.2 pip install dist/nipype-*-py3-none-any.whl - # Futures should not install in Python 3 - test $(pip show futures 2>/dev/null | wc -l) = "0" - store_artifacts: path: /home/circleci/nipype/dist @@ -340,7 +338,7 @@ jobs: name: Deploy to PyPI command: | pyenv local 3.5.2 - pip install --upgrade twine future wheel readme_renderer setuptools + pip install --upgrade twine wheel readme_renderer setuptools python setup.py check -r -s python setup.py sdist bdist_wheel twine upload dist/* From ffd0d8eb213d7da40857e8d484114b130f1bab9a Mon Sep 17 00:00:00 2001 From: adelavega Date: Tue, 9 Oct 2018 09:47:47 -0400 Subject: [PATCH 0018/1665] Lock travis Python 0.6.5 --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 6aaec256d4..3e9a9aaf40 100644 --- a/.travis.yml +++ b/.travis.yml @@ -87,7 +87,7 @@ before_install: - travis_retry pip install -r requirements.txt - travis_retry pip install grabbit==0.1.2 -- travis_retry git clone https://github.com/INCF/pybids.git ${HOME}/pybids && +- travis_retry git clone https://github.com/INCF/pybids.git ${HOME}/pybids && git checkout 0.6.5 pip install -e ${HOME}/pybids install: From 801b295b1e22b87cb273d7003cd31b6bfb112074 Mon Sep 17 00:00:00 2001 From: adelavega Date: Mon, 15 Oct 2018 12:49:07 -0500 Subject: [PATCH 0019/1665] Update BIDSDataGrabber for pybids 0.7, including tests, and derivatives handling --- .travis.yml | 2 +- nipype/interfaces/io.py | 23 ++++++++++++++--------- nipype/interfaces/tests/test_io.py | 4 ++-- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/.travis.yml b/.travis.yml index 37316aea61..235bf9892c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -87,7 +87,7 @@ before_install: - travis_retry pip install -r requirements.txt - travis_retry pip install grabbit==0.1.2 -- travis_retry git clone -b 0.6.5 https://github.com/INCF/pybids.git ${HOME}/pybids && pip install -e ${HOME}/pybids +- travis_retry git clone https://github.com/INCF/pybids.git ${HOME}/pybids && pip install -e ${HOME}/pybids install: - travis_retry pip install $EXTRA_PIP_FLAGS -e .[$NIPYPE_EXTRAS] diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 627d9ca7ac..3aaab428bf 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2761,8 +2761,10 @@ class BIDSDataGrabberInputSpec(DynamicTraitedSpec): desc='Generate exception if list is empty ' 'for a given field') return_type = traits.Enum('file', 'namedtuple', usedefault=True) - strict = traits.Bool(desc='Return only BIDS "proper" files (e.g., ' - 'ignore derivatives/, sourcedata/, etc.)') + index_derivatives = traits.Bool(False, usedefault=True, + desc='Index derivatives/ sub-directory') + extra_derivatives = traits.List(Directory(exists=True), + desc='Additional derivative directories to index') class BIDSDataGrabber(IOBase): @@ -2788,7 +2790,7 @@ class BIDSDataGrabber(IOBase): are filtered on common entities, which can be explicitly defined as infields. - >>> bg = BIDSDataGrabber(infields = ['subject'], outfields = ['dwi']) + >>> bg = BIDSDataGrabber(infields = ['subject']) >>> bg.inputs.base_dir = 'ds005/' >>> bg.inputs.subject = '01' >>> bg.inputs.output_query['dwi'] = dict(modality='dwi') @@ -2810,8 +2812,10 @@ def __init__(self, infields=None, **kwargs): if not isdefined(self.inputs.output_query): self.inputs.output_query = { - "func": {"modality": "func", 'extensions': ['nii', '.nii.gz']}, - "anat": {"modality": "anat", 'extensions': ['nii', '.nii.gz']}, + "bold": {"datatype": "func", "suffix": "bold", + "extensions": ["nii", ".nii.gz"]}, + "T1w": {"datatype": "anat", "suffix": "T1w", + "extensions": ["nii", ".nii.gz"]}, } # If infields is empty, use all BIDS entities @@ -2838,10 +2842,11 @@ def _run_interface(self, runtime): return runtime def _list_outputs(self): - exclude = None - if self.inputs.strict: - exclude = ['derivatives/', 'code/', 'sourcedata/'] - layout = bidslayout.BIDSLayout(self.inputs.base_dir, exclude=exclude) + layout = bidslayout.BIDSLayout(self.inputs.base_dir, + derivatives=self.inputs.derivatives) + + if isdefined(self.inputs.extra_derivatives): + layout.add_derivatives(self.inputs.extra_derivatives) # If infield is not given nm input value, silently ignore filters = {} diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index c81e6f8e06..a2762284b7 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -591,9 +591,9 @@ def test_bids_grabber(tmpdir): bg.inputs.base_dir = os.path.join(datadir, 'ds005') bg.inputs.subject = '01' results = bg.run() - assert 'sub-01_T1w.nii.gz' in map(os.path.basename, results.outputs.anat) + assert 'sub-01_T1w.nii.gz' in map(os.path.basename, results.outputs.T1w) assert 'sub-01_task-mixedgamblestask_run-01_bold.nii.gz' in \ - map(os.path.basename, results.outputs.func) + map(os.path.basename, results.outputs.bold) @pytest.mark.skipif(not have_pybids, From 1d267c89068301a629ab3906d1c353ac947d8f3e Mon Sep 17 00:00:00 2001 From: adelavega Date: Mon, 15 Oct 2018 13:18:38 -0500 Subject: [PATCH 0020/1665] Fix test, outfields --- nipype/interfaces/tests/test_io.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index a2762284b7..f00555e521 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -633,9 +633,9 @@ def test_bids_infields_outfields(tmpdir): for outfield in outfields: assert(outfield in bg._outputs().traits()) - # now try without defining outfields, we should get anat and func for free + # now try without defining outfields bg = nio.BIDSDataGrabber() - for outfield in ['anat', 'func']: + for outfield in ['T1w', 'bold']: assert outfield in bg._outputs().traits() From b0121c03f06c30fabaaf4d7012e4f8424bbb7392 Mon Sep 17 00:00:00 2001 From: adelavega Date: Tue, 16 Oct 2018 17:07:52 -0500 Subject: [PATCH 0021/1665] Refer to proper input, index_derivatives --- nipype/interfaces/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 3aaab428bf..303c808263 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2843,7 +2843,7 @@ def _run_interface(self, runtime): def _list_outputs(self): layout = bidslayout.BIDSLayout(self.inputs.base_dir, - derivatives=self.inputs.derivatives) + derivatives=self.inputs.index_derivatives) if isdefined(self.inputs.extra_derivatives): layout.add_derivatives(self.inputs.extra_derivatives) From cc89c582cda5eeaeb6b958e7525f3678a92ced28 Mon Sep 17 00:00:00 2001 From: adelavega Date: Fri, 19 Oct 2018 11:05:10 -0700 Subject: [PATCH 0022/1665] Datatype not modality, in pybids tests --- nipype/interfaces/io.py | 2 +- nipype/interfaces/tests/test_io.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 303c808263..c64d01e795 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2793,7 +2793,7 @@ class BIDSDataGrabber(IOBase): >>> bg = BIDSDataGrabber(infields = ['subject']) >>> bg.inputs.base_dir = 'ds005/' >>> bg.inputs.subject = '01' - >>> bg.inputs.output_query['dwi'] = dict(modality='dwi') + >>> bg.inputs.output_query['dwi'] = dict(datatype='dwi') >>> results = bg.run() # doctest: +SKIP """ diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index f00555e521..b4ac80366b 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -607,7 +607,7 @@ def test_bids_fields(tmpdir): bg = nio.BIDSDataGrabber(infields = ['subject'], outfields = ['dwi']) bg.inputs.base_dir = os.path.join(datadir, 'ds005') bg.inputs.subject = '01' - bg.inputs.output_query['dwi'] = dict(modality='dwi') + bg.inputs.output_query['dwi'] = dict(datatype='dwi') results = bg.run() assert 'sub-01_dwi.nii.gz' in map(os.path.basename, results.outputs.dwi) From dcec1ba83384b36664b6e0a34ebce0b2769b042e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Je=CC=81re=CC=81my=20Guillon?= Date: Thu, 13 Dec 2018 12:12:49 +0100 Subject: [PATCH 0023/1665] Quick fix of MRtrix tckgen option --- nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py | 2 +- nipype/interfaces/mrtrix3/tracking.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py index dff5c783ee..461915ff19 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py @@ -40,7 +40,7 @@ def test_Tractography_inputs(): argstr='-samples %d', usedefault=True, ), - n_tracks=dict(argstr='-number %d', ), + n_tracks=dict(argstr='-select %d', ), n_trials=dict(argstr='-trials %d', ), noprecompt=dict(argstr='-noprecomputed', ), nthreads=dict( diff --git a/nipype/interfaces/mrtrix3/tracking.py b/nipype/interfaces/mrtrix3/tracking.py index e8bf15a33f..cb879f5c79 100644 --- a/nipype/interfaces/mrtrix3/tracking.py +++ b/nipype/interfaces/mrtrix3/tracking.py @@ -75,7 +75,7 @@ class TractographyInputSpec(MRTrix3BaseInputSpec): desc=('set the maximum angle between successive steps (default ' 'is 90deg x stepsize / voxelsize)')) n_tracks = traits.Int( - argstr='-number %d', + argstr='-select %d', desc=('set the desired number of tracks. The program will continue' ' to generate tracks until this number of tracks have been ' 'selected and written to the output file')) From aada7305145c4a926b3e640a1fe657fe6d667463 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Je=CC=81re=CC=81my=20Guillon?= Date: Thu, 13 Dec 2018 12:13:39 +0100 Subject: [PATCH 0024/1665] Some auto pep8 fixes --- .mailmap | 2 +- README.rst | 2 +- doc/devel/interface_specs.rst | 4 +- .../ants/tests/test_auto_AntsJointFusion.py | 4 +- nipype/interfaces/dcm2nii.py | 2 +- nipype/interfaces/fsl/tests/test_auto_Eddy.py | 14 +++- nipype/interfaces/spm/__init__.py | 4 +- nipype/interfaces/spm/preprocess.py | 74 +++++++++---------- nipype/pipeline/engine/tests/test_engine.py | 2 +- nipype/pipeline/plugins/legacymultiproc.py | 2 +- 10 files changed, 58 insertions(+), 52 deletions(-) diff --git a/.mailmap b/.mailmap index 6a13f46ee1..141a6455af 100644 --- a/.mailmap +++ b/.mailmap @@ -93,7 +93,7 @@ Josh Warner JDWarner Josh Warner (Mac) Kai Schlamp medihack Jessica Forbes jessicaforbes Katie Bottenhorn 62442katieb -Kesshi Jordan Kesshi Jordan +Kesshi Jordan Kesshi Jordan Kesshi Jordan Kesshi Jordan Kesshi Jordan Kesshi Jordan Kesshi Jordan Kesshi Jordan diff --git a/README.rst b/README.rst index b0d0e7a0be..24ec6eee26 100644 --- a/README.rst +++ b/README.rst @@ -91,7 +91,7 @@ To participate in the Nipype development related discussions please use the foll Please add *[nipype]* to the subject line when posting on the mailing list. -You can even hangout with the Nipype developers in their +You can even hangout with the Nipype developers in their `Gitter `_ channel or in the BrainHack `Slack `_ channel. (Click `here `_ to join the Slack workspace.) diff --git a/doc/devel/interface_specs.rst b/doc/devel/interface_specs.rst index 56e1be9ed4..26623266c6 100644 --- a/doc/devel/interface_specs.rst +++ b/doc/devel/interface_specs.rst @@ -12,8 +12,8 @@ In case of trouble, we encourage you to post on `NeuroStars `_ channel or in the +Alternatively, you're welcome to chat with us in the Nipype +`Gitter `_ channel or in the BrainHack `Slack `_ channel. (Click `here `_ to join the Slack workspace.) diff --git a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py index 341f250a2c..292e6b398b 100644 --- a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py @@ -52,8 +52,8 @@ def test_AntsJointFusion_inputs(): hash_files=False, ), out_label_post_prob_name_format=dict( - requires=['out_label_fusion', - 'out_intensity_fusion_name_format'], ), + requires=['out_label_fusion', 'out_intensity_fusion_name_format'], + ), patch_metric=dict(argstr='-m %s', ), patch_radius=dict( argstr='-p %s', diff --git a/nipype/interfaces/dcm2nii.py b/nipype/interfaces/dcm2nii.py index 65771873a5..31885adaba 100644 --- a/nipype/interfaces/dcm2nii.py +++ b/nipype/interfaces/dcm2nii.py @@ -374,7 +374,7 @@ class Dcm2niix(CommandLine): converts any files in the directory containing the files in the list. We also do not support nested filenames with this option. **Thus all files must have a common root directory.** - + >>> converter = Dcm2niix() >>> converter.inputs.source_names = ['functional_1.dcm', 'functional_2.dcm'] >>> converter.inputs.compression = 5 diff --git a/nipype/interfaces/fsl/tests/test_auto_Eddy.py b/nipype/interfaces/fsl/tests/test_auto_Eddy.py index c3cebf6937..55d9409e17 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Eddy.py +++ b/nipype/interfaces/fsl/tests/test_auto_Eddy.py @@ -6,6 +6,10 @@ def test_Eddy_inputs(): input_map = dict( args=dict(argstr='%s', ), + cnr_maps=dict( + argstr='--cnr_maps', + min_ver='5.0.10', + ), dont_peas=dict(argstr='--dont_peas', ), dont_sep_offs_move=dict(argstr='--dont_sep_offs_move', ), environ=dict( @@ -71,11 +75,13 @@ def test_Eddy_inputs(): ), output_type=dict(), repol=dict(argstr='--repol', ), + residuals=dict( + argstr='--residuals', + min_ver='5.0.10', + ), session=dict(argstr='--session=%s', ), slm=dict(argstr='--slm=%s', ), use_cuda=dict(), - cnr_maps=dict(argstr='--cnr_maps', min_ver='5.0.10', ), - residuals=dict(argstr='--residuals', min_ver='5.0.10', ), ) inputs = Eddy.input_spec() @@ -84,15 +90,15 @@ def test_Eddy_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Eddy_outputs(): output_map = dict( + out_cnr_maps=dict(), out_corrected=dict(), out_movement_rms=dict(), out_outlier_report=dict(), out_parameter=dict(), + out_residuals=dict(), out_restricted_movement_rms=dict(), out_rotated_bvecs=dict(), out_shell_alignment_parameters=dict(), - out_cnr_maps=dict(), - out_residuals=dict(), ) outputs = Eddy.output_spec() diff --git a/nipype/interfaces/spm/__init__.py b/nipype/interfaces/spm/__init__.py index 09a680f692..b97c828450 100644 --- a/nipype/interfaces/spm/__init__.py +++ b/nipype/interfaces/spm/__init__.py @@ -5,9 +5,9 @@ from .base import (Info, SPMCommand, logger, no_spm, scans_for_fname, scans_for_fnames) -from .preprocess import (FieldMap, SliceTiming, Realign, RealignUnwarp, +from .preprocess import (FieldMap, SliceTiming, Realign, RealignUnwarp, Coregister, Normalize, Normalize12, Segment, - Smooth, NewSegment, DARTEL, DARTELNorm2MNI, + Smooth, NewSegment, DARTEL, DARTELNorm2MNI, CreateWarped, VBMSegment) from .model import (Level1Design, EstimateModel, EstimateContrast, Threshold, OneSampleTTestDesign, TwoSampleTTestDesign, diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index d23887c890..3548f8c597 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -443,7 +443,7 @@ class RealignUnwarpInputSpec(SPMCommandInputSpec): traits.Either(ImageFileSPM(exists=True), traits.List(ImageFileSPM(exists=True))), field='data.scans', - mandatory=True, + mandatory=True, copyfile=True, desc='list of filenames to realign and unwarp') phase_map = File( @@ -452,52 +452,52 @@ class RealignUnwarpInputSpec(SPMCommandInputSpec): 'behaviour, the same map will be used for all sessions', copyfile=False) quality = traits.Range( - low=0.0, - high=1.0, + low=0.0, + high=1.0, field='eoptions.quality', desc='0.1 = fast, 1.0 = precise') fwhm = traits.Range( - low=0.0, + low=0.0, field='eoptions.fwhm', desc='gaussian smoothing kernel width') separation = traits.Range( - low=0.0, + low=0.0, field='eoptions.sep', desc='sampling separation in mm') register_to_mean = traits.Bool( field='eoptions.rtm', desc='Indicate whether realignment is done to the mean image') weight_img = File( - exists=True, + exists=True, field='eoptions.weight', desc='filename of weighting image') interp = traits.Range( - low=0, - high=7, + low=0, + high=7, field='eoptions.einterp', desc='degree of b-spline used for interpolation') wrap = traits.List( - traits.Int(), - minlen=3, + traits.Int(), + minlen=3, maxlen=3, field='eoptions.ewrap', desc='Check if interpolation should wrap in [x,y,z]') est_basis_func = traits.List( - traits.Int(), - minlen=2, + traits.Int(), + minlen=2, maxlen=2, field='uweoptions.basfcn', desc='Number of basis functions to use for each dimension') est_reg_order = traits.Range( - low=0, - high=3, + low=0, + high=3, field='uweoptions.regorder', desc=('This parameter determines how to balance the compromise between likelihood ' 'maximization and smoothness maximization of the estimated field.')) est_reg_factor = traits.ListInt( - [100000], + [100000], field='uweoptions.lambda', - minlen=1, + minlen=1, maxlen=1, usedefault=True, desc='Regularisation factor. Default: 100000 (medium).') @@ -506,51 +506,51 @@ class RealignUnwarpInputSpec(SPMCommandInputSpec): desc=('Jacobian deformations. In theory a good idea to include them, ' ' in practice a bad idea. Default: No.')) est_first_order_effects = traits.List( - traits.Int(), - minlen=1, + traits.Int(), + minlen=1, maxlen=6, field='uweoptions.fot', desc='First order effects should only depend on pitch and roll, i.e. [4 5]') est_second_order_effects = traits.List( - traits.Int(), - minlen=1, + traits.Int(), + minlen=1, maxlen=6, field='uweoptions.sot', desc='List of second order terms to model second derivatives of.') est_unwarp_fwhm = traits.Range( - low=0.0, + low=0.0, field='uweoptions.uwfwhm', desc='gaussian smoothing kernel width for unwarp') est_re_est_mov_par = traits.Bool( field='uweoptions.rem', desc='Re-estimate movement parameters at each unwarping iteration.') est_num_of_iterations = traits.ListInt( - [5], + [5], field='uweoptions.noi', - minlen=1, - maxlen=1, + minlen=1, + maxlen=1, usedefault=True, desc='Number of iterations.') est_taylor_expansion_point = traits.String( - 'Average', + 'Average', field='uweoptions.expround', usedefault=True, desc='Point in position space to perform Taylor-expansion around.') reslice_which = traits.ListInt( - [2, 1], + [2, 1], field='uwroptions.uwwhich', - minlen=2, - maxlen=2, + minlen=2, + maxlen=2, usedefault=True, desc='determines which images to reslice') reslice_interp = traits.Range( - low=0, - high=7, + low=0, + high=7, field='uwroptions.rinterp', desc='degree of b-spline used for interpolation') reslice_wrap = traits.List( - traits.Int(), - minlen=3, + traits.Int(), + minlen=3, maxlen=3, field='uwroptions.wrap', desc='Check if interpolation should wrap in [x,y,z]') @@ -558,8 +558,8 @@ class RealignUnwarpInputSpec(SPMCommandInputSpec): field='uwroptions.mask', desc='True/False mask output image') out_prefix = traits.String( - 'u', - field='uwroptions.prefix', + 'u', + field='uwroptions.prefix', usedefault=True, desc='realigned and unwarped output prefix') @@ -585,7 +585,7 @@ class RealignUnwarpOutputSpec(TraitedSpec): class RealignUnwarp(SPMCommand): """Use spm_uw_estimate for estimating within subject registration and unwarping - of time series. Function accepts only one single field map. If in_files is a + of time series. Function accepts only one single field map. If in_files is a list of files they will be treated as separate sessions but associated to the same fieldmap. @@ -617,7 +617,7 @@ def _format_arg(self, opt, spec, val): keep4d=False, separate_sessions=True) return super(RealignUnwarp, self)._format_arg(opt, spec, val) - + def _parse_inputs(self, skip=()): @@ -630,7 +630,7 @@ def _parse_inputs(self, skip=()): if isdefined(self.inputs.in_files): if isinstance(self.inputs.in_files, list): - data = [dict(scans = sess, pmscan = pmscan) + data = [dict(scans = sess, pmscan = pmscan) for sess in spmdict['data']['scans']] else: data = [dict(scans = spmdict['data']['scans'], pmscan = pmscan)] diff --git a/nipype/pipeline/engine/tests/test_engine.py b/nipype/pipeline/engine/tests/test_engine.py index 56d05a1d56..189a918833 100644 --- a/nipype/pipeline/engine/tests/test_engine.py +++ b/nipype/pipeline/engine/tests/test_engine.py @@ -441,7 +441,7 @@ def test_write_graph_runs(tmpdir): assert os.path.exists('graph.dot') or os.path.exists( 'graph_detailed.dot') - + try: os.remove('graph.dot') except OSError: diff --git a/nipype/pipeline/plugins/legacymultiproc.py b/nipype/pipeline/plugins/legacymultiproc.py index bfc1773a92..9e96144a4a 100644 --- a/nipype/pipeline/plugins/legacymultiproc.py +++ b/nipype/pipeline/plugins/legacymultiproc.py @@ -81,7 +81,7 @@ class NonDaemonMixin(object): @property def daemon(self): return False - + @daemon.setter def daemon(self, val): pass From 617a2a9b48ae3e65162cecaac4d7d97ea1537904 Mon Sep 17 00:00:00 2001 From: skoudoro Date: Thu, 13 Dec 2018 14:41:43 -0500 Subject: [PATCH 0025/1665] add DIPY to index --- doc/index.rst | 2 +- doc/links_names.txt | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/index.rst b/doc/index.rst index 62502bd37d..05a72d3495 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -16,7 +16,7 @@ these packages within a single workflow. Nipype provides an environment that encourages interactive exploration of algorithms from different packages (e.g., ANTS_, SPM_, FSL_, FreeSurfer_, Camino_, MRtrix_, MNE_, AFNI_, - Slicer_), eases the design of workflows within and between packages, and + Slicer_, DIPY_), eases the design of workflows within and between packages, and reduces the learning curve necessary to use different packages. Nipype is creating a collaborative platform for neuroimaging software development in a high-level language and addressing limitations of existing pipeline diff --git a/doc/links_names.txt b/doc/links_names.txt index 3c8ee54f32..4cf07795f7 100644 --- a/doc/links_names.txt +++ b/doc/links_names.txt @@ -97,6 +97,7 @@ .. _MRtrix3: http://www.mrtrix.org/ .. _MNE: https://martinos.org/mne/index.html .. _ANTS: http://stnava.github.io/ANTs/ +.. _DIPY: http://dipy.org .. General software .. _gcc: http://gcc.gnu.org From 6e4af1fdb4626d2fb789d3dc489a24e94bb29e2f Mon Sep 17 00:00:00 2001 From: skoudoro Date: Fri, 14 Dec 2018 15:25:48 -0500 Subject: [PATCH 0026/1665] dipy workflow to nipype --- nipype/interfaces/dipy/base.py | 123 ++++++++++++++++++++++- nipype/interfaces/dipy/reconstruction.py | 19 +++- nipype/interfaces/dipy/registration.py | 18 ++++ nipype/interfaces/dipy/tracks.py | 21 +++- 4 files changed, 178 insertions(+), 3 deletions(-) create mode 100644 nipype/interfaces/dipy/registration.py diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py index 7a9221e3d1..a1c6b8899f 100644 --- a/nipype/interfaces/dipy/base.py +++ b/nipype/interfaces/dipy/base.py @@ -7,11 +7,12 @@ import numpy as np from ... import logging from ..base import (traits, File, isdefined, LibraryBaseInterface, - BaseInterfaceInputSpec) + BaseInterfaceInputSpec, TraitedSpec) HAVE_DIPY = True try: import dipy + from dipy.workflows.base import IntrospectiveArgumentParser except ImportError: HAVE_DIPY = False @@ -75,3 +76,123 @@ def _gen_filename(self, name, ext=None): ext = fext return out_prefix + '_' + name + ext + + +def convert_to_traits_type(dipy_type, is_file=False): + """Convert DIPY type to Traits type.""" + dipy_type = dipy_type.lower() + is_mandatory = bool("optional" not in dipy_type) + if "variable" in dipy_type and "string" in dipy_type: + return traits.ListStr, is_mandatory + elif "variable" in dipy_type and "int" in dipy_type: + return traits.ListInt, is_mandatory + elif "variable" in dipy_type and "float" in dipy_type: + return traits.ListFloat, is_mandatory + elif "variable" in dipy_type and "bool" in dipy_type: + return traits.ListBool, is_mandatory + elif "variable" in dipy_type and "complex" in dipy_type: + return traits.ListComplex, is_mandatory + elif "string" in dipy_type and not is_file: + return traits.Str, is_mandatory + elif "string" in dipy_type and is_file: + return traits.File, is_mandatory + elif "int" in dipy_type: + return traits.Int, is_mandatory + elif "float" in dipy_type: + return traits.Float, is_mandatory + elif "bool" in dipy_type: + return traits.Bool, is_mandatory + elif "complex" in dipy_type: + return traits.Complex, is_mandatory + else: + msg = "Error during convert_to_traits_type({0}).".format(dipy_type) + \ + "Unknown DIPY type." + raise IOError(msg) + + +def create_interface_specs(class_name, params=None, BaseClass=TraitedSpec): + """Create IN/Out interface specifications dynamically. + + Parameters + ---------- + class_name: str + The future class name(e.g, (MyClassInSpec)) + params: list of tuple + dipy argument list + BaseClass: TraitedSpec object + parent class + + Returns + ------- + newclass: object + new nipype interface specification class + + """ + attr = {} + if params is not None: + for name, dipy_type, desc in params: + is_file = bool("files" in name or "out_" in name) + traits_type, is_mandatory = convert_to_traits_type(dipy_type, + is_file) + # print(name, dipy_type, desc, is_file, traits_type, is_mandatory) + if isinstance(BaseClass, BaseInterfaceInputSpec): + attr[name] = traits_type(desc=desc[-1], mandatory=is_mandatory) + else: + attr[name] = traits_type(desc=desc[-1], exists=True) + + newclass = type(class_name, (BaseClass, ), attr) + return newclass + + +def dipy_to_nipype_interface(cls_name, dipy_flow, BaseClass=DipyBaseInterface): + """Construct a class in order to respect nipype interface specifications. + + This convenient class factory convert a DIPY Workflow to a nipype + interface. + + Parameters + ---------- + cls_name: string + new class name + dipy_flow: Workflow class type. + It should be any children class of `dipy.workflows.workflow.Worflow` + BaseClass: object + nipype instance object + + Returns + ------- + newclass: object + new nipype interface specification class + + """ + parser = IntrospectiveArgumentParser() + parser.add_workflow(dipy_flow()) + input_parameters = parser.positional_parameters + parser.optional_parameters + + input_spec = create_interface_specs("{}InputSpec".format(cls_name), + input_parameters, + BaseClass=BaseInterfaceInputSpec) + + output_spec = create_interface_specs("{}OutputSpec".format(cls_name), + parser.output_parameters, + BaseClass=TraitedSpec) + + def _run_interface(self, runtime): + flow = dipy_flow() + args = self.inputs.get() + flow.run(**args) + + def _list_outputs(self): + outputs = self._outputs().get() + out_dir = outputs.get("out_dir", ".") + for key, values in outputs.items(): + outputs[key] = op.join(out_dir, values) + + return outputs + + newclass = type(cls_name, (BaseClass, ), + {"input_spec": input_spec, + "output_spec": output_spec, + "_run_interface": _run_interface, + "_list_outputs:": _list_outputs}) + return newclass diff --git a/nipype/interfaces/dipy/reconstruction.py b/nipype/interfaces/dipy/reconstruction.py index 7879553609..00b5a91557 100644 --- a/nipype/interfaces/dipy/reconstruction.py +++ b/nipype/interfaces/dipy/reconstruction.py @@ -13,13 +13,30 @@ import numpy as np import nibabel as nb +from distutils.version import LooseVersion from ... import logging from ..base import TraitedSpec, File, traits, isdefined -from .base import DipyDiffusionInterface, DipyBaseInterfaceInputSpec +from .base import (DipyDiffusionInterface, DipyBaseInterfaceInputSpec, + HAVE_DIPY, dipy_version, dipy_to_nipype_interface) + IFLOGGER = logging.getLogger('nipype.interface') +if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): + from dipy.workflows.reconst import (ReconstDkiFlow, ReconstCSAFlow, + ReconstCSDFlow, ReconstMAPMRIFlow, + ReconstDtiFlow) + import ipdb; ipdb.set_trace() + DKIModel = dipy_to_nipype_interface("DKIModel", ReconstDkiFlow) + MapmriModel = dipy_to_nipype_interface("MapmriModel", ReconstMAPMRIFlow) + DTIModel = dipy_to_nipype_interface("DTIModel", ReconstDtiFlow) + CSAModel = dipy_to_nipype_interface("CSAModel", ReconstCSAFlow) + CSDModel = dipy_to_nipype_interface("CSDModel", ReconstCSDFlow) +else: + IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" + " activate DKIModel, MapmriModel, DTIModel, CSAModel.") + class RESTOREInputSpec(DipyBaseInterfaceInputSpec): in_mask = File(exists=True, desc=('input mask in which compute tensors')) diff --git a/nipype/interfaces/dipy/registration.py b/nipype/interfaces/dipy/registration.py new file mode 100644 index 0000000000..b1c42c7e39 --- /dev/null +++ b/nipype/interfaces/dipy/registration.py @@ -0,0 +1,18 @@ + +from distutils.version import LooseVersion +from ... import logging +from .base import HAVE_DIPY, dipy_version, dipy_to_nipype_interface + +IFLOGGER = logging.getLogger('nipype.interface') + +if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): + + from dipy.workflows.align import ResliceFlow, SlrWithQbxFlow + + Reslice = dipy_to_nipype_interface("Reslice", ResliceFlow) + StreamlineRegistration = dipy_to_nipype_interface("StreamlineRegistration", + SlrWithQbxFlow) + +else: + IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" + " activate Reslice, StreamlineRegistration.") diff --git a/nipype/interfaces/dipy/tracks.py b/nipype/interfaces/dipy/tracks.py index cd47590f94..1a9a8a1582 100644 --- a/nipype/interfaces/dipy/tracks.py +++ b/nipype/interfaces/dipy/tracks.py @@ -6,14 +6,33 @@ import numpy as np import nibabel as nb import nibabel.trackvis as nbt +from distutils.version import LooseVersion from ... import logging from ..base import (TraitedSpec, BaseInterfaceInputSpec, File, isdefined, traits) -from .base import DipyBaseInterface +from .base import (DipyBaseInterface, HAVE_DIPY, dipy_version, + dipy_to_nipype_interface) + IFLOGGER = logging.getLogger('nipype.interface') +if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): + + from dipy.workflows.segment import RecoBundlesFlow, LabelsBundlesFlow + from dipy.workflows.tracking import DetTrackPAMFlow + + RecoBundles = dipy_to_nipype_interface("RecoBundles", RecoBundlesFlow) + LabelsBundles = dipy_to_nipype_interface("LabelsBundles", + LabelsBundlesFlow) + DeterministicTracking = dipy_to_nipype_interface("DeterministicTracking", + DetTrackPAMFlow) + +else: + IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" + " activate RecoBundles, LabelsBundles, DetTrackPAMFlow.") + + class TrackDensityMapInputSpec(BaseInterfaceInputSpec): in_file = File( exists=True, mandatory=True, desc='The input TrackVis track file') From cc45dd3819537c0edc9c6093095c58a0f7014afc Mon Sep 17 00:00:00 2001 From: skoudoro Date: Fri, 14 Dec 2018 15:46:03 -0500 Subject: [PATCH 0027/1665] remove pdb --- nipype/interfaces/dipy/reconstruction.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/dipy/reconstruction.py b/nipype/interfaces/dipy/reconstruction.py index 00b5a91557..48ec5d29f9 100644 --- a/nipype/interfaces/dipy/reconstruction.py +++ b/nipype/interfaces/dipy/reconstruction.py @@ -27,7 +27,7 @@ from dipy.workflows.reconst import (ReconstDkiFlow, ReconstCSAFlow, ReconstCSDFlow, ReconstMAPMRIFlow, ReconstDtiFlow) - import ipdb; ipdb.set_trace() + DKIModel = dipy_to_nipype_interface("DKIModel", ReconstDkiFlow) MapmriModel = dipy_to_nipype_interface("MapmriModel", ReconstMAPMRIFlow) DTIModel = dipy_to_nipype_interface("DTIModel", ReconstDtiFlow) From de91a1b3dce89f085545db6633f0323585961b75 Mon Sep 17 00:00:00 2001 From: skoudoro Date: Fri, 14 Dec 2018 17:44:33 -0500 Subject: [PATCH 0028/1665] setup default values --- nipype/interfaces/dipy/base.py | 33 +++++++++++++++++------- nipype/interfaces/dipy/reconstruction.py | 2 +- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py index a1c6b8899f..f30af98415 100644 --- a/nipype/interfaces/dipy/base.py +++ b/nipype/interfaces/dipy/base.py @@ -4,6 +4,7 @@ absolute_import) import os.path as op +import inspect import numpy as np from ... import logging from ..base import (traits, File, isdefined, LibraryBaseInterface, @@ -130,17 +131,25 @@ def create_interface_specs(class_name, params=None, BaseClass=TraitedSpec): """ attr = {} if params is not None: - for name, dipy_type, desc in params: + for p in params: + name, dipy_type, desc = p[0], p[1], p[2] is_file = bool("files" in name or "out_" in name) traits_type, is_mandatory = convert_to_traits_type(dipy_type, is_file) # print(name, dipy_type, desc, is_file, traits_type, is_mandatory) - if isinstance(BaseClass, BaseInterfaceInputSpec): - attr[name] = traits_type(desc=desc[-1], mandatory=is_mandatory) + if BaseClass.__name__ == BaseInterfaceInputSpec.__name__: + if len(p) > 3: + attr[name] = traits_type(p[3], desc=desc[-1], + usedefault=True, + mandatory=is_mandatory) + else: + attr[name] = traits_type(desc=desc[-1], + mandatory=is_mandatory) else: - attr[name] = traits_type(desc=desc[-1], exists=True) + attr[name] = traits_type(p[3], desc=desc[-1], exists=True, + usedefault=True,) - newclass = type(class_name, (BaseClass, ), attr) + newclass = type(str(class_name), (BaseClass, ), attr) return newclass @@ -166,15 +175,21 @@ def dipy_to_nipype_interface(cls_name, dipy_flow, BaseClass=DipyBaseInterface): """ parser = IntrospectiveArgumentParser() - parser.add_workflow(dipy_flow()) - input_parameters = parser.positional_parameters + parser.optional_parameters + flow = dipy_flow() + parser.add_workflow(flow) + default_values = inspect.getargspec(flow.run).defaults + optional_params = [args + (val,) for args, val in zip(parser.optional_parameters, default_values)] + start = len(parser.optional_parameters) - len(parser.output_parameters) + + output_parameters = [args + (val,) for args, val in zip(parser.output_parameters, default_values[start:])] + input_parameters = parser.positional_parameters + optional_params input_spec = create_interface_specs("{}InputSpec".format(cls_name), input_parameters, BaseClass=BaseInterfaceInputSpec) output_spec = create_interface_specs("{}OutputSpec".format(cls_name), - parser.output_parameters, + output_parameters, BaseClass=TraitedSpec) def _run_interface(self, runtime): @@ -190,7 +205,7 @@ def _list_outputs(self): return outputs - newclass = type(cls_name, (BaseClass, ), + newclass = type(str(cls_name), (BaseClass, ), {"input_spec": input_spec, "output_spec": output_spec, "_run_interface": _run_interface, diff --git a/nipype/interfaces/dipy/reconstruction.py b/nipype/interfaces/dipy/reconstruction.py index 48ec5d29f9..a254a5da62 100644 --- a/nipype/interfaces/dipy/reconstruction.py +++ b/nipype/interfaces/dipy/reconstruction.py @@ -27,7 +27,7 @@ from dipy.workflows.reconst import (ReconstDkiFlow, ReconstCSAFlow, ReconstCSDFlow, ReconstMAPMRIFlow, ReconstDtiFlow) - + DKIModel = dipy_to_nipype_interface("DKIModel", ReconstDkiFlow) MapmriModel = dipy_to_nipype_interface("MapmriModel", ReconstMAPMRIFlow) DTIModel = dipy_to_nipype_interface("DTIModel", ReconstDtiFlow) From 83e4dc95f08458ec47231a61cbd2ebd494f9a7d2 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 17 Dec 2018 10:15:25 -0500 Subject: [PATCH 0029/1665] PY3: Drop future/builtins imports --- nipype/utils/subprocess.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nipype/utils/subprocess.py b/nipype/utils/subprocess.py index 5516482936..b29d575a37 100644 --- a/nipype/utils/subprocess.py +++ b/nipype/utils/subprocess.py @@ -3,8 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Miscellaneous utility functions """ -from __future__ import (print_function, unicode_literals, division, - absolute_import) import os import sys import gc @@ -17,8 +15,6 @@ from .. import logging -from builtins import range, object - iflogger = logging.getLogger('nipype.interface') From e0579713c1b2ebae58f2862c7284507625d56661 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 17 Dec 2018 10:16:31 -0500 Subject: [PATCH 0030/1665] MAINT: Minimum python version > 3.2 --- nipype/pkg_info.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/nipype/pkg_info.py b/nipype/pkg_info.py index a6323c7a34..7367e4d8e5 100644 --- a/nipype/pkg_info.py +++ b/nipype/pkg_info.py @@ -44,10 +44,7 @@ def pkg_commit_hash(pkg_path): raise IOError('Missing commit info file %s' % pth) cfg_parser = configparser.RawConfigParser() with open(pth, encoding='utf-8') as fp: - if sys.version_info >= (3, 2): - cfg_parser.read_file(fp) - else: - cfg_parser.readfp(fp) + cfg_parser.read_file(fp) archive_subst = cfg_parser.get('commit hash', 'archive_subst_hash') if not archive_subst.startswith('$Format'): # it has been substituted return 'archive substitution', archive_subst From b0d464c7c7ba7bd557fb8f7daa059f462eccab63 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 17 Dec 2018 10:33:08 -0500 Subject: [PATCH 0031/1665] RF: Drop various remaining compatibilities for Python < 3.5 --- nipype/info.py | 4 -- nipype/interfaces/tests/test_io.py | 8 --- nipype/pipeline/engine/tests/test_utils.py | 7 -- nipype/pipeline/plugins/tools.py | 3 +- nipype/sphinxext/plot_workflow.py | 19 +----- nipype/utils/filemanip.py | 76 ++-------------------- nipype/utils/tests/test_functions.py | 6 +- 7 files changed, 8 insertions(+), 115 deletions(-) diff --git a/nipype/info.py b/nipype/info.py index cb2ea6ebde..354623c8d6 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -2,7 +2,6 @@ settings in setup.py, the nipy top-level docstring, and for building the docs. In setup.py in particular, we exec this file, so it cannot import nipy """ -import sys # nipype version information. An empty version_extra corresponds to a # full release. '.dev' as a version_extra string means this is a development @@ -152,9 +151,6 @@ def get_nipype_gitversion(): 'futures; python_version == "2.7"', ] -if sys.version_info <= (3, 4): - REQUIRES.append('configparser') - TESTS_REQUIRES = ['pytest-cov', 'codecov', 'pytest-env', 'coverage<5'] EXTRA_REQUIRES = { diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index 298cf3bfa4..e8b28924ca 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -5,9 +5,7 @@ import copy import simplejson import glob -import shutil import os.path as op -import sys from subprocess import Popen import hashlib from collections import namedtuple @@ -577,8 +575,6 @@ def test_jsonsink(tmpdir, inputs_attributes): # There are three reasons these tests will be skipped: @pytest.mark.skipif(not have_pybids, reason="Pybids is not installed") -@pytest.mark.skipif(sys.version_info < (3, 0), - reason="Pybids no longer supports Python 2") @pytest.mark.skipif(not dist_is_editable('pybids'), reason="Pybids is not installed in editable mode") def test_bids_grabber(tmpdir): @@ -594,8 +590,6 @@ def test_bids_grabber(tmpdir): @pytest.mark.skipif(not have_pybids, reason="Pybids is not installed") -@pytest.mark.skipif(sys.version_info < (3, 0), - reason="Pybids no longer supports Python 2") @pytest.mark.skipif(not dist_is_editable('pybids'), reason="Pybids is not installed in editable mode") def test_bids_fields(tmpdir): @@ -610,8 +604,6 @@ def test_bids_fields(tmpdir): @pytest.mark.skipif(not have_pybids, reason="Pybids is not installed") -@pytest.mark.skipif(sys.version_info < (3, 0), - reason="Pybids no longer supports Python 2") @pytest.mark.skipif(not dist_is_editable('pybids'), reason="Pybids is not installed in editable mode") def test_bids_infields_outfields(tmpdir): diff --git a/nipype/pipeline/engine/tests/test_utils.py b/nipype/pipeline/engine/tests/test_utils.py index e867e4d0a1..a46860d58e 100644 --- a/nipype/pipeline/engine/tests/test_utils.py +++ b/nipype/pipeline/engine/tests/test_utils.py @@ -4,7 +4,6 @@ """Tests for the engine utils module """ import os -import sys from copy import deepcopy import pytest @@ -159,8 +158,6 @@ def dummy_func(value): return value + 1 -@pytest.mark.skipif( - sys.version_info < (3, 0), reason="the famous segfault #1788") def test_mapnode_crash(tmpdir): """Test mapnode crash when stop_on_first_crash is True""" cwd = os.getcwd() @@ -180,8 +177,6 @@ def test_mapnode_crash(tmpdir): os.chdir(cwd) -@pytest.mark.skipif( - sys.version_info < (3, 0), reason="the famous segfault #1788") def test_mapnode_crash2(tmpdir): """Test mapnode crash when stop_on_first_crash is False""" cwd = os.getcwd() @@ -200,8 +195,6 @@ def test_mapnode_crash2(tmpdir): os.chdir(cwd) -@pytest.mark.skipif( - sys.version_info < (3, 0), reason="the famous segfault #1788") def test_mapnode_crash3(tmpdir): """Test mapnode crash when mapnode is embedded in a workflow""" tmpdir.chdir() diff --git a/nipype/pipeline/plugins/tools.py b/nipype/pipeline/plugins/tools.py index 4eef64994d..91d038950e 100644 --- a/nipype/pipeline/plugins/tools.py +++ b/nipype/pipeline/plugins/tools.py @@ -115,8 +115,7 @@ def create_pyscript(node, updatehash=False, store_exception=True): batchdir = '%s' from nipype.utils.filemanip import loadpkl, savepkl try: - if not sys.version_info < (2, 7): - from collections import OrderedDict + from collections import OrderedDict config_dict=%s config.update_config(config_dict) ## Only configure matplotlib if it was successfully imported, diff --git a/nipype/sphinxext/plot_workflow.py b/nipype/sphinxext/plot_workflow.py index 1425b19450..740c121926 100644 --- a/nipype/sphinxext/plot_workflow.py +++ b/nipype/sphinxext/plot_workflow.py @@ -144,23 +144,6 @@ def format_template(template, **kw): -def _mkdirp(folder): - """ - Equivalent to bash's mkdir -p - """ - if sys.version_info > (3, 4, 1): - os.makedirs(folder, exist_ok=True) - return folder - - try: - os.makedirs(folder) - except OSError as exc: - if exc.errno != EEXIST or not os.path.isdir(folder): - raise - - return folder - - def wf_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): if len(missing_imports) == 0: @@ -737,7 +720,7 @@ def run(arguments, content, options, state_machine, state, lineno): state_machine.insert_input(total_lines, source=source_file_name) # copy image files to builder's output directory, if necessary - _mkdirp(dest_dir) + os.makedirs(dest_dir, exist_ok=True) for code_piece, images in results: for img in images: for fn in img.filenames(): diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 5a0b937fa7..fa7e46dbfc 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -91,56 +91,7 @@ def to_str(value): Manipulates ordered dicts before they are hashed (Py2/3 compat.) """ - if sys.version_info[0] > 2: - retval = str(value) - else: - retval = to_str_py27(value) - return retval - - -def to_str_py27(value): - """ - Encode dictionary for python 2 - """ - - if isinstance(value, dict): - entry = '{}: {}'.format - retval = '{' - for key, val in list(value.items()): - if len(retval) > 1: - retval += ', ' - kenc = repr(key) - if kenc.startswith(("u'", 'u"')): - kenc = kenc[1:] - venc = to_str_py27(val) - if venc.startswith(("u'", 'u"')): - venc = venc[1:] - retval += entry(kenc, venc) - retval += '}' - return retval - - istuple = isinstance(value, tuple) - if isinstance(value, (tuple, list)): - retval = '(' if istuple else '[' - nels = len(value) - for i, v in enumerate(value): - venc = to_str_py27(v) - if venc.startswith(("u'", 'u"')): - venc = venc[1:] - retval += venc - - if i < nels - 1: - retval += ', ' - - if istuple and nels == 1: - retval += ',' - retval += ')' if istuple else ']' - return retval - - retval = repr(value).decode() - if retval.startswith(("u'", 'u"')): - retval = retval[1:] - return retval + return str(value) def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True): @@ -593,8 +544,6 @@ def save_json(filename, data): """ mode = 'w' - if sys.version_info[0] < 3: - mode = 'wb' with open(filename, mode) as fp: json.dump(data, fp, sort_keys=True, indent=4) @@ -841,27 +790,10 @@ def which(cmd, env=None, pathext=None): if env and 'PATH' in env: path = env.get("PATH") - if sys.version_info >= (3, 3): - for ext in pathext: - filename = shutil.which(cmd + ext, path=path) - if filename: - return filename - return None - - def isexec(path): - return os.path.isfile(path) and os.access(path, os.X_OK) - for ext in pathext: - extcmd = cmd + ext - fpath, fname = os.path.split(extcmd) - if fpath: - if isexec(extcmd): - return extcmd - else: - for directory in path.split(os.pathsep): - filename = op.join(directory, extcmd) - if isexec(filename): - return filename + filename = shutil.which(cmd + ext, path=path) + if filename: + return filename return None diff --git a/nipype/utils/tests/test_functions.py b/nipype/utils/tests/test_functions.py index 377bfe338f..a933507c96 100644 --- a/nipype/utils/tests/test_functions.py +++ b/nipype/utils/tests/test_functions.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -import sys import pytest from nipype.utils.functions import (getsource, create_function_from_source) @@ -27,7 +26,7 @@ def test_func_to_str_err(): def _print_statement(): try: - exec('print ""') + exec('print("")') return True except SyntaxError: return False @@ -41,7 +40,6 @@ def is_string(): assert is_string() == wrapped_func() -@pytest.mark.skipif(sys.version_info[0] > 2, reason="breaks python 3") -def test_func_print_py2(): +def test_func_print(): wrapped_func = create_function_from_source(getsource(_print_statement)) assert wrapped_func() From be2d8c53424aef3821f46687e1012eec45b07b62 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 17 Dec 2018 11:57:11 -0500 Subject: [PATCH 0032/1665] RF: Drop to_str, makedirs compatibility functions --- nipype/interfaces/base/specs.py | 4 ++-- nipype/interfaces/base/support.py | 4 ++-- nipype/pipeline/engine/nodes.py | 18 +++++++-------- nipype/pipeline/engine/utils.py | 8 +++---- nipype/pipeline/engine/workflows.py | 30 ++++++++++++------------- nipype/pipeline/plugins/tools.py | 4 ++-- nipype/utils/filemanip.py | 35 +---------------------------- 7 files changed, 34 insertions(+), 69 deletions(-) diff --git a/nipype/interfaces/base/specs.py b/nipype/interfaces/base/specs.py index a6f74f9a66..8d2c94f761 100644 --- a/nipype/interfaces/base/specs.py +++ b/nipype/interfaces/base/specs.py @@ -15,7 +15,7 @@ from warnings import warn from packaging.version import Version -from ...utils.filemanip import md5, hash_infile, hash_timestamp, to_str +from ...utils.filemanip import md5, hash_infile, hash_timestamp from .traits_extension import ( traits, Undefined, @@ -251,7 +251,7 @@ def get_hashval(self, hash_method=None): True, hash_method=hash_method, hash_files=hash_files))) - return list_withhash, md5(to_str(list_nofilename).encode()).hexdigest() + return list_withhash, md5(str(list_nofilename).encode()).hexdigest() def _get_sorteddict(self, objekt, diff --git a/nipype/interfaces/base/support.py b/nipype/interfaces/base/support.py index d23d31b545..69e504c6c3 100644 --- a/nipype/interfaces/base/support.py +++ b/nipype/interfaces/base/support.py @@ -14,7 +14,7 @@ from ... import logging from ...utils.misc import is_container -from ...utils.filemanip import md5, to_str, hash_infile +from ...utils.filemanip import md5, hash_infile iflogger = logging.getLogger('nipype.interface') HELP_LINEWIDTH = 70 @@ -161,7 +161,7 @@ def _get_bunch_hash(self): # Sort the items of the dictionary, before hashing the string # representation so we get a predictable order of the # dictionary. - sorted_dict = to_str(sorted(dict_nofilename.items())) + sorted_dict = str(sorted(dict_nofilename.items())) return dict_withhash, md5(sorted_dict.encode()).hexdigest() def _repr_pretty_(self, p, cycle): diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index e0c8cce7b1..734cdf5b60 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -21,8 +21,8 @@ from ...utils.misc import flatten, unflatten, str2bool, dict_diff from ...utils.filemanip import (md5, FileNotFoundError, ensure_list, simplify_list, copyfiles, fnames_presuffix, - loadpkl, split_filename, load_json, makedirs, - emptydirs, savepkl, to_str, indirectory) + loadpkl, split_filename, load_json, + emptydirs, savepkl, indirectory) from ...interfaces.base import (traits, InputMultiPath, CommandLine, Undefined, DynamicTraitedSpec, Bunch, InterfaceResult, @@ -267,7 +267,7 @@ def output_dir(self): def set_input(self, parameter, val): """Set interface input value""" logger.debug('[Node] %s - setting input %s = %s', self.name, parameter, - to_str(val)) + str(val)) setattr(self.inputs, parameter, deepcopy(val)) def get_output(self, parameter): @@ -453,7 +453,7 @@ def run(self, updatehash=False): os.remove(filename) # Make sure outdir is created - makedirs(outdir, exist_ok=True) + os.makedirs(outdir, exist_ok=True) # Store runtime-hashfile, pre-execution report, the node and the inputs set. _save_hashfile(hashfile_unfinished, self._hashed_inputs) @@ -663,7 +663,7 @@ def _copyfiles_to_wd(self, execute=True, linksonly=False): if execute and linksonly: olddir = outdir outdir = op.join(outdir, '_tempinput') - makedirs(outdir, exist_ok=True) + os.makedirs(outdir, exist_ok=True) for info in filecopy_info: files = self.inputs.trait_get().get(info['key']) @@ -1019,13 +1019,13 @@ def set_input(self, parameter, val): Set interface input value or nodewrapper attribute Priority goes to interface. """ - logger.debug('setting nodelevel(%s) input %s = %s', to_str(self), - parameter, to_str(val)) + logger.debug('setting nodelevel(%s) input %s = %s', str(self), + parameter, str(val)) self._set_mapnode_input(parameter, deepcopy(val)) def _set_mapnode_input(self, name, newvalue): - logger.debug('setting mapnode(%s) input: %s -> %s', to_str(self), name, - to_str(newvalue)) + logger.debug('setting mapnode(%s) input: %s -> %s', str(self), name, + str(newvalue)) if name in self.iterfield: setattr(self._inputs, name, newvalue) else: diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index ec063d9e54..d40e788083 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -21,9 +21,7 @@ from ... import logging, config, LooseVersion from ...utils.filemanip import ( relpath, - makedirs, fname_presuffix, - to_str, ensure_list, get_related_files, FileNotFoundError, @@ -117,7 +115,7 @@ def write_report(node, report_type=None, is_mapnode=False): cwd = node.output_dir() report_dir = os.path.join(cwd, '_report') report_file = os.path.join(report_dir, 'report.rst') - makedirs(report_dir, exist_ok=True) + os.makedirs(report_dir, exist_ok=True) logger.debug('[Node] Writing %s-exec report to "%s"', report_type[:-4], report_file) @@ -627,7 +625,7 @@ def _get_valid_pathstr(pathstr): Replaces: ',' -> '.' """ if not isinstance(pathstr, (str, bytes)): - pathstr = to_str(pathstr) + pathstr = str(pathstr) pathstr = pathstr.replace(os.sep, '..') pathstr = re.sub(r'''[][ (){}?:<>#!|"';]''', '', pathstr) pathstr = pathstr.replace(',', '.') @@ -1355,7 +1353,7 @@ def export_graph(graph_in, if base_dir is None: base_dir = os.getcwd() - makedirs(base_dir, exist_ok=True) + os.makedirs(base_dir, exist_ok=True) out_dot = fname_presuffix( dotfilename, suffix='_detailed.dot', use_ext=False, newpath=base_dir) _write_detailed_dot(graph, out_dot) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 35d30d22e8..a56c05e6be 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -22,7 +22,7 @@ from ...interfaces.base import (traits, TraitedSpec, TraitDictObject, TraitListObject) -from ...utils.filemanip import save_json, makedirs, to_str +from ...utils.filemanip import save_json from .utils import (generate_expanded_graph, export_graph, write_workflow_prov, write_workflow_resources, format_dot, topological_sort, get_print_name, merge_dict, format_node) @@ -218,12 +218,12 @@ def connect(self, *args, **kwargs): edge_data = self._graph.get_edge_data(srcnode, destnode, None) if edge_data: logger.debug('(%s, %s): Edge data exists: %s', srcnode, - destnode, to_str(edge_data)) + destnode, str(edge_data)) for data in connects: if data not in edge_data['connect']: edge_data['connect'].append(data) if disconnect: - logger.debug('Removing connection: %s', to_str(data)) + logger.debug('Removing connection: %s', str(data)) edge_data['connect'].remove(data) if edge_data['connect']: self._graph.add_edges_from([(srcnode, destnode, @@ -240,7 +240,7 @@ def connect(self, *args, **kwargs): })]) edge_data = self._graph.get_edge_data(srcnode, destnode) logger.debug('(%s, %s): new edge data: %s', srcnode, destnode, - to_str(edge_data)) + str(edge_data)) def disconnect(self, *args): """Disconnect nodes @@ -256,7 +256,7 @@ def disconnect(self, *args): for srcnode, dstnode, conn in connection_list: logger.debug('disconnect(): %s->%s %s', srcnode, dstnode, - to_str(conn)) + str(conn)) if self in [srcnode, dstnode]: raise IOError( 'Workflow connect cannot contain itself as node: src[%s] ' @@ -277,10 +277,10 @@ def disconnect(self, *args): # idx = ed_conns.index(edge) remove.append((edge[0], edge[1])) - logger.debug('disconnect(): remove list %s', to_str(remove)) + logger.debug('disconnect(): remove list %s', str(remove)) for el in remove: edge_data['connect'].remove(el) - logger.debug('disconnect(): removed connection %s', to_str(el)) + logger.debug('disconnect(): removed connection %s', str(el)) if not edge_data['connect']: self._graph.remove_edge(srcnode, dstnode) @@ -410,7 +410,7 @@ def write_graph(self, base_dir = op.join(base_dir, self.name) else: base_dir = os.getcwd() - base_dir = makedirs(base_dir, exist_ok=True) + base_dir = os.makedirs(base_dir, exist_ok=True) if graph2use in ['hierarchical', 'colored']: if self.name[:1].isdigit(): # these graphs break if int raise ValueError('{} graph failed, workflow name cannot begin ' @@ -576,7 +576,7 @@ def run(self, plugin=None, plugin_args=None, updatehash=False): flatgraph = self._create_flat_graph() self.config = merge_dict(deepcopy(config._sections), self.config) logger.info('Workflow %s settings: %s', self.name, - to_str(sorted(self.config))) + str(sorted(self.config))) self._set_needed_outputs(flatgraph) execgraph = generate_expanded_graph(deepcopy(flatgraph)) for index, node in enumerate(execgraph.nodes()): @@ -609,7 +609,7 @@ def _write_report_info(self, workingdir, name, graph): if workingdir is None: workingdir = os.getcwd() report_dir = op.join(workingdir, name) - makedirs(report_dir, exist_ok=True) + os.makedirs(report_dir, exist_ok=True) shutil.copyfile( op.join(op.dirname(__file__), 'report_template.html'), op.join(report_dir, 'index.html')) @@ -821,7 +821,7 @@ def _set_node_input(self, node, param, source, sourceinfo): newval = dict(val) if isinstance(val, TraitListObject): newval = val[:] - logger.debug('setting node input: %s->%s', param, to_str(newval)) + logger.debug('setting node input: %s->%s', param, str(newval)) node.set_input(param, deepcopy(newval)) def _get_all_nodes(self): @@ -881,9 +881,9 @@ def _generate_flatgraph(self): # dj: added list() for networkx ver.2 for u, _, d in list( self._graph.in_edges(nbunch=node, data=True)): - logger.debug('in: connections-> %s', to_str(d['connect'])) + logger.debug('in: connections-> %s', str(d['connect'])) for cd in deepcopy(d['connect']): - logger.debug("in: %s", to_str(cd)) + logger.debug("in: %s", str(cd)) dstnode = node._get_parameter_node(cd[1], subtype='in') srcnode = u srcout = cd[0] @@ -896,9 +896,9 @@ def _generate_flatgraph(self): # dj: for ver 2 use list(out_edges) for _, v, d in list( self._graph.out_edges(nbunch=node, data=True)): - logger.debug('out: connections-> %s', to_str(d['connect'])) + logger.debug('out: connections-> %s', str(d['connect'])) for cd in deepcopy(d['connect']): - logger.debug("out: %s", to_str(cd)) + logger.debug("out: %s", str(cd)) dstnode = v if isinstance(cd[0], tuple): parameter = cd[0][0] diff --git a/nipype/pipeline/plugins/tools.py b/nipype/pipeline/plugins/tools.py index 91d038950e..1b6725107c 100644 --- a/nipype/pipeline/plugins/tools.py +++ b/nipype/pipeline/plugins/tools.py @@ -12,7 +12,7 @@ from traceback import format_exception from ... import logging -from ...utils.filemanip import savepkl, crash2txt, makedirs +from ...utils.filemanip import savepkl, crash2txt logger = logging.getLogger('nipype.workflow') @@ -42,7 +42,7 @@ def report_crash(node, traceback=None, hostname=None): str(uuid.uuid4())) crashdir = node.config['execution'].get('crashdump_dir', os.getcwd()) - makedirs(crashdir, exist_ok=True) + os.makedirs(crashdir, exist_ok=True) crashfile = os.path.join(crashdir, crashfile) if node.config['execution']['crashfile_format'].lower() in ['text', 'txt']: diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index fa7e46dbfc..d2fba5cb2a 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -86,14 +86,6 @@ def split_filename(fname): return pth, fname, ext -def to_str(value): - """ - Manipulates ordered dicts before they are hashed (Py2/3 compat.) - - """ - return str(value) - - def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True): """Manipulates path and name of input filename @@ -711,31 +703,6 @@ def dist_is_editable(dist): return False -def makedirs(path, exist_ok=False): - """ - Create path, if it doesn't exist. - - Parameters - ---------- - path : output directory to create - - """ - if not exist_ok: # The old makedirs - os.makedirs(path) - return path - - # this odd approach deals with concurrent directory cureation - if not op.exists(op.abspath(path)): - fmlogger.debug("Creating directory %s", path) - try: - os.makedirs(path) - except OSError: - fmlogger.debug("Problem creating directory %s", path) - if not op.exists(path): - raise OSError('Could not create directory %s' % path) - return path - - def emptydirs(path, noexist_ok=False): """ Empty an existing directory, without deleting it. Do not @@ -769,7 +736,7 @@ def emptydirs(path, noexist_ok=False): else: raise ex - makedirs(path) + os.makedirs(path) def which(cmd, env=None, pathext=None): From 5ab2fa0b61453598bbe566c118c0775e3cddcd8d Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 17 Dec 2018 11:59:06 -0500 Subject: [PATCH 0033/1665] FIX: import os - mistakenly dropped --- nipype/utils/profiler.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/utils/profiler.py b/nipype/utils/profiler.py index 428696e773..a9b8c926cb 100644 --- a/nipype/utils/profiler.py +++ b/nipype/utils/profiler.py @@ -4,6 +4,7 @@ """ Utilities to keep track of performance """ +import os import threading from time import time try: From 757a6f473ceefae2d28408762ac9e51ebf724948 Mon Sep 17 00:00:00 2001 From: skoudoro Date: Mon, 17 Dec 2018 14:58:35 -0500 Subject: [PATCH 0034/1665] add some tests --- nipype/interfaces/dipy/tests/test_base.py | 139 ++++++++++++++++++++++ 1 file changed, 139 insertions(+) create mode 100644 nipype/interfaces/dipy/tests/test_base.py diff --git a/nipype/interfaces/dipy/tests/test_base.py b/nipype/interfaces/dipy/tests/test_base.py new file mode 100644 index 0000000000..c0f5174cb3 --- /dev/null +++ b/nipype/interfaces/dipy/tests/test_base.py @@ -0,0 +1,139 @@ +import pytest +from collections import namedtuple +from ...base import traits, TraitedSpec, BaseInterfaceInputSpec +from ..base import (convert_to_traits_type, create_interface_specs, + dipy_to_nipype_interface, DipyBaseInterface) +from dipy.workflows.workflow import Workflow + + +def test_convert_to_traits_type(): + Params = namedtuple("Params", "traits_type is_file") + Res = namedtuple("Res", "traits_type is_mandatory") + l_entries = [Params('variable string', False), + Params('variable int', False), + Params('variable float', False), + Params('variable bool', False), + Params('variable complex', False), + Params('variable int, optional', False), + Params('variable string, optional', False), + Params('variable float, optional', False), + Params('variable bool, optional', False), + Params('variable complex, optional', False), + Params('string', False), Params('int', False), + Params('string', True), Params('float', False), + Params('bool', False), Params('complex', False), + Params('string, optional', False), + Params('int, optional', False), + Params('string, optional', True), + Params('float, optional', False), + Params('bool, optional', False), + Params('complex, optional', False), + ] + l_expected = [Res(traits.ListStr, True), Res(traits.ListInt, True), + Res(traits.ListFloat, True), Res(traits.ListBool, True), + Res(traits.ListComplex, True), Res(traits.ListInt, False), + Res(traits.ListStr, False), Res(traits.ListFloat, False), + Res(traits.ListBool, False), Res(traits.ListComplex, False), + Res(traits.Str, True), Res(traits.Int, True), + Res(traits.File, True), Res(traits.Float, True), + Res(traits.Bool, True), Res(traits.Complex, True), + Res(traits.Str, False), Res(traits.Int, False), + Res(traits.File, False), Res(traits.Float, False), + Res(traits.Bool, False), Res(traits.Complex, False), + ] + + for entry, res in zip(l_entries, l_expected): + traits_type, is_mandatory = convert_to_traits_type(entry.traits_type, + entry.is_file) + assert traits_type == res.traits_type + assert is_mandatory == res.is_mandatory + + with pytest.raises(IOError): + convert_to_traits_type("file, optional") + + +def test_create_interface_specs(): + new_interface = create_interface_specs("MyInterface") + + assert new_interface.__base__ == TraitedSpec + assert isinstance(new_interface(), TraitedSpec) + assert new_interface.__name__ == "MyInterface" + assert not new_interface().get() + + new_interface = create_interface_specs("MyInterface", + BaseClass=BaseInterfaceInputSpec) + assert new_interface.__base__ == BaseInterfaceInputSpec + assert isinstance(new_interface(), BaseInterfaceInputSpec) + assert new_interface.__name__ == "MyInterface" + assert not new_interface().get() + + params = [("params1", "string", ["my description"]), ("params2_files", "string", ["my description @"]), + ("params3", "int, optional", ["useful option"]), ("out_params", "string", ["my out description"])] + + new_interface = create_interface_specs("MyInterface", params=params, + BaseClass=BaseInterfaceInputSpec) + + assert new_interface.__base__ == BaseInterfaceInputSpec + assert isinstance(new_interface(), BaseInterfaceInputSpec) + assert new_interface.__name__ == "MyInterface" + current_params = new_interface().get() + assert len(current_params) == 4 + for key, expected in zip(current_params.keys(), params): + assert key == expected[0] + + +class DummyWorkflow(Workflow): + + @classmethod + def get_short_name(cls): + return 'dwf1' + + def run(self, in_files, param1=1, out_dir='', out_ref='out1.txt'): + """Workflow used to test basic workflows. + + Parameters + ---------- + in_files : string + fake input string param + param1 : int, optional + fake positional param (default 1) + out_dir : string, optional + fake output directory (default '') + out_ref : string, optional + fake out file (default out1.txt) + + References + ----------- + dummy references + + """ + return param1 + + +def test_dipy_to_nipype_interface(): + + new_specs = dipy_to_nipype_interface("MyModelSpec", DummyWorkflow) + assert new_specs.__base__ == DipyBaseInterface + assert isinstance(new_specs(), DipyBaseInterface) + assert new_specs.__name__ == "MyModelSpec" + assert hasattr(new_specs, 'input_spec') + assert new_specs().input_spec.__base__ == BaseInterfaceInputSpec + assert hasattr(new_specs, 'output_spec') + assert new_specs().output_spec.__base__ == TraitedSpec + assert hasattr(new_specs, '_run_interface') + assert hasattr(new_specs, '_list_outputs') + params_in = new_specs().inputs.get() + params_out = new_specs()._outputs().get() + assert len(params_in) == 4 + assert 'in_files' in params_in.keys() + assert 'param1' in params_in.keys() + assert 'out_dir' in params_out.keys() + assert 'out_ref' in params_out.keys() + + with pytest.raises(ValueError): + new_specs().run() + + +# test_convert_to_traits_type() +# test_create_interface_specs() +# test_dipy_to_nipype_interface() From dc896811b18d86450f90dcc0ff3913553aa02d8e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 17 Dec 2018 16:24:22 -0500 Subject: [PATCH 0035/1665] FIX: os.makedirs does not return directory --- nipype/pipeline/engine/workflows.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index a56c05e6be..195ebc6f69 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -410,7 +410,7 @@ def write_graph(self, base_dir = op.join(base_dir, self.name) else: base_dir = os.getcwd() - base_dir = os.makedirs(base_dir, exist_ok=True) + os.makedirs(base_dir, exist_ok=True) if graph2use in ['hierarchical', 'colored']: if self.name[:1].isdigit(): # these graphs break if int raise ValueError('{} graph failed, workflow name cannot begin ' From 9b1e9282a1fa8414fae74e929d5ceec5580ccac2 Mon Sep 17 00:00:00 2001 From: skoudoro Date: Tue, 18 Dec 2018 11:43:53 -0500 Subject: [PATCH 0036/1665] from random dict to static test --- nipype/interfaces/dipy/tests/test_base.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/dipy/tests/test_base.py b/nipype/interfaces/dipy/tests/test_base.py index c0f5174cb3..4935e14a59 100644 --- a/nipype/interfaces/dipy/tests/test_base.py +++ b/nipype/interfaces/dipy/tests/test_base.py @@ -78,8 +78,10 @@ def test_create_interface_specs(): assert new_interface.__name__ == "MyInterface" current_params = new_interface().get() assert len(current_params) == 4 - for key, expected in zip(current_params.keys(), params): - assert key == expected[0] + assert 'params1' in current_params.keys() + assert 'params2_files' in current_params.keys() + assert 'params3' in current_params.keys() + assert 'out_params' in current_params.keys() class DummyWorkflow(Workflow): @@ -134,6 +136,7 @@ def test_dipy_to_nipype_interface(): new_specs().run() -# test_convert_to_traits_type() -# test_create_interface_specs() -# test_dipy_to_nipype_interface() +if __name__ == "__main__": + test_convert_to_traits_type() + test_create_interface_specs() + test_dipy_to_nipype_interface() From 63c1d7ca7f4d2e867378fa09d40825d7cc67e6c6 Mon Sep 17 00:00:00 2001 From: skoudoro Date: Tue, 18 Dec 2018 12:42:40 -0500 Subject: [PATCH 0037/1665] add skipif --- nipype/interfaces/dipy/tests/test_base.py | 60 +++++++++++------------ 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/nipype/interfaces/dipy/tests/test_base.py b/nipype/interfaces/dipy/tests/test_base.py index 4935e14a59..5f769480d2 100644 --- a/nipype/interfaces/dipy/tests/test_base.py +++ b/nipype/interfaces/dipy/tests/test_base.py @@ -2,8 +2,7 @@ from collections import namedtuple from ...base import traits, TraitedSpec, BaseInterfaceInputSpec from ..base import (convert_to_traits_type, create_interface_specs, - dipy_to_nipype_interface, DipyBaseInterface) -from dipy.workflows.workflow import Workflow + dipy_to_nipype_interface, DipyBaseInterface, no_dipy) def test_convert_to_traits_type(): @@ -84,35 +83,36 @@ def test_create_interface_specs(): assert 'out_params' in current_params.keys() -class DummyWorkflow(Workflow): - - @classmethod - def get_short_name(cls): - return 'dwf1' - - def run(self, in_files, param1=1, out_dir='', out_ref='out1.txt'): - """Workflow used to test basic workflows. - - Parameters - ---------- - in_files : string - fake input string param - param1 : int, optional - fake positional param (default 1) - out_dir : string, optional - fake output directory (default '') - out_ref : string, optional - fake out file (default out1.txt) - - References - ----------- - dummy references - - """ - return param1 - - +@pytest.mark.skipif(no_dipy(), reason="DIPY is not installed") def test_dipy_to_nipype_interface(): + from dipy.workflows.workflow import Workflow + + class DummyWorkflow(Workflow): + + @classmethod + def get_short_name(cls): + return 'dwf1' + + def run(self, in_files, param1=1, out_dir='', out_ref='out1.txt'): + """Workflow used to test basic workflows. + + Parameters + ---------- + in_files : string + fake input string param + param1 : int, optional + fake positional param (default 1) + out_dir : string, optional + fake output directory (default '') + out_ref : string, optional + fake out file (default out1.txt) + + References + ----------- + dummy references + + """ + return param1 new_specs = dipy_to_nipype_interface("MyModelSpec", DummyWorkflow) assert new_specs.__base__ == DipyBaseInterface From c5d444ebeb557a842cf7b8a4001bb1a8fb878ea3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Je=CC=81re=CC=81my=20Guillon?= Date: Wed, 19 Dec 2018 16:37:10 +0100 Subject: [PATCH 0038/1665] Add max and min ver --- .../interfaces/ants/tests/test_auto_AntsJointFusion.py | 4 ++-- .../interfaces/mrtrix3/tests/test_auto_Tractography.py | 9 ++++++++- nipype/interfaces/mrtrix3/tracking.py | 7 +++++++ 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py index 292e6b398b..341f250a2c 100644 --- a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py @@ -52,8 +52,8 @@ def test_AntsJointFusion_inputs(): hash_files=False, ), out_label_post_prob_name_format=dict( - requires=['out_label_fusion', 'out_intensity_fusion_name_format'], - ), + requires=['out_label_fusion', + 'out_intensity_fusion_name_format'], ), patch_metric=dict(argstr='-m %s', ), patch_radius=dict( argstr='-p %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py index 461915ff19..affad3a2a0 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py @@ -40,7 +40,10 @@ def test_Tractography_inputs(): argstr='-samples %d', usedefault=True, ), - n_tracks=dict(argstr='-select %d', ), + n_tracks=dict( + argstr='-number %d', + max_ver=0.4, + ), n_trials=dict(argstr='-trials %d', ), noprecompt=dict(argstr='-noprecomputed', ), nthreads=dict( @@ -77,6 +80,10 @@ def test_Tractography_inputs(): xor=['seed_image', 'seed_grid_voxel'], ), seed_sphere=dict(argstr='-seed_sphere %f,%f,%f,%f', ), + select=dict( + argstr='-select %d', + min_ver=3, + ), sph_trait=dict(argstr='%f,%f,%f,%f', ), step_size=dict(argstr='-step %f', ), stop=dict(argstr='-stop', ), diff --git a/nipype/interfaces/mrtrix3/tracking.py b/nipype/interfaces/mrtrix3/tracking.py index cb879f5c79..e8dbb9423f 100644 --- a/nipype/interfaces/mrtrix3/tracking.py +++ b/nipype/interfaces/mrtrix3/tracking.py @@ -75,7 +75,14 @@ class TractographyInputSpec(MRTrix3BaseInputSpec): desc=('set the maximum angle between successive steps (default ' 'is 90deg x stepsize / voxelsize)')) n_tracks = traits.Int( + argstr='-number %d', + max_ver=0.4, + desc=('set the desired number of tracks. The program will continue' + ' to generate tracks until this number of tracks have been ' + 'selected and written to the output file')) + select = traits.Int( argstr='-select %d', + min_ver=3, desc=('set the desired number of tracks. The program will continue' ' to generate tracks until this number of tracks have been ' 'selected and written to the output file')) From a0e92f17b234b16898d813771be6ce7512e5b119 Mon Sep 17 00:00:00 2001 From: delavega4 Date: Fri, 11 Jan 2019 13:44:17 -0500 Subject: [PATCH 0039/1665] Lint BIDSGrabbder --- nipype/interfaces/io.py | 40 +++++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 84978316a0..f90afdac05 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2721,20 +2721,25 @@ def _list_outputs(self): class BIDSDataGrabberInputSpec(DynamicTraitedSpec): - base_dir = Directory(exists=True, - desc='Path to BIDS Directory.', - mandatory=True) - output_query = traits.Dict(key_trait=Str, - value_trait=traits.Dict, - desc='Queries for outfield outputs') - raise_on_empty = traits.Bool(True, usedefault=True, - desc='Generate exception if list is empty ' - 'for a given field') - return_type = traits.Enum('file', 'namedtuple', usedefault=True) - index_derivatives = traits.Bool(False, usedefault=True, - desc='Index derivatives/ sub-directory') - extra_derivatives = traits.List(Directory(exists=True), - desc='Additional derivative directories to index') + base_dir = Directory( + exists=True, + desc='Path to BIDS Directory.', + mandatory=True) + output_query = traits.Dict( + key_trait=Str, + value_trait=traits.Dict, + desc='Queries for outfield outputs') + raise_on_empty = traits.Bool( + True, usedefault=True, + desc='Generate exception if list is empty for a given field') + return_type = traits.Enum( + 'file', 'namedtuple', usedefault=True) + index_derivatives = traits.Bool( + False, usedefault=True, + desc='Index derivatives/ sub-directory') + extra_derivatives = traits.List( + Directory(exists=True), + desc='Additional derivative directories to index') class BIDSDataGrabber(LibraryBaseInterface, IOBase): @@ -2786,13 +2791,14 @@ def __init__(self, infields=None, **kwargs): "bold": {"datatype": "func", "suffix": "bold", "extensions": ["nii", ".nii.gz"]}, "T1w": {"datatype": "anat", "suffix": "T1w", - "extensions": ["nii", ".nii.gz"]}, + "extensions": ["nii", ".nii.gz"]}, } # If infields is empty, use all BIDS entities if infields is None: from bids import layout as bidslayout - bids_config = join(dirname(bidslayout.__file__), 'config', 'bids.json') + bids_config = join( + dirname(bidslayout.__file__), 'config', 'bids.json') bids_config = json.load(open(bids_config, 'r')) infields = [i['name'] for i in bids_config['entities']] @@ -2809,7 +2815,7 @@ def __init__(self, infields=None, **kwargs): def _list_outputs(self): from bids import BIDSLayout layout = BIDSLayout(self.inputs.base_dir, - derivatives=self.inputs.index_derivatives) + derivatives=self.inputs.index_derivatives) if isdefined(self.inputs.extra_derivatives): layout.add_derivatives(self.inputs.extra_derivatives) From 0f94d9da4d7b8e3b21b4e8859a20ab0986117653 Mon Sep 17 00:00:00 2001 From: delavega4 Date: Fri, 11 Jan 2019 13:51:29 -0500 Subject: [PATCH 0040/1665] Bump pybids version to 0.7.0 --- .travis.yml | 4 ++-- docker/generate_dockerfiles.sh | 2 +- nipype/info.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 235bf9892c..56fc6ea7f7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -86,8 +86,8 @@ before_install: fi; - travis_retry pip install -r requirements.txt -- travis_retry pip install grabbit==0.1.2 -- travis_retry git clone https://github.com/INCF/pybids.git ${HOME}/pybids && pip install -e ${HOME}/pybids +- travis_retry pip install grabbit==0.2.6 +- travis_retry pip install git+git@github.com:bids-standard/pybids.git@0.7.0#egg=pybids install: - travis_retry pip install $EXTRA_PIP_FLAGS -e .[$NIPYPE_EXTRAS] diff --git a/docker/generate_dockerfiles.sh b/docker/generate_dockerfiles.sh index 9c281f8bbc..31b43d62b6 100755 --- a/docker/generate_dockerfiles.sh +++ b/docker/generate_dockerfiles.sh @@ -92,7 +92,7 @@ function generate_main_dockerfile() { conda_install='python=${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR} icu=58.1 libxml2 libxslt matplotlib mkl numpy paramiko pandas psutil scikit-learn scipy traits=4.6.0' \ - pip_install="grabbit==0.1.2 https://github.com/INCF/pybids/tarball/0.6.5" \ + pip_install="grabbit==0.2.6 https://github.com/bids-standard/pybids/tarball/0.7.0" \ activate=true \ --copy docker/files/run_builddocs.sh docker/files/run_examples.sh \ docker/files/run_pytests.sh nipype/external/fsl_imglob.py /usr/bin/ \ diff --git a/nipype/info.py b/nipype/info.py index c6503ac753..bdf020d76d 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -169,7 +169,7 @@ def get_nipype_gitversion(): 'profiler': ['psutil>=5.0'], 'duecredit': ['duecredit'], 'xvfbwrapper': ['xvfbwrapper'], - 'pybids': ['pybids==0.6.5'], + 'pybids': ['pybids==0.7.0'], 'ssh': ['paramiko'], # 'mesh': ['mayavi'] # Enable when it works } From 77a051013856fbd12bb0be3a499c0f918aeaa75b Mon Sep 17 00:00:00 2001 From: delavega4 Date: Fri, 11 Jan 2019 14:39:32 -0500 Subject: [PATCH 0041/1665] use https for travis pybids requirement --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 56fc6ea7f7..a762a02d00 100644 --- a/.travis.yml +++ b/.travis.yml @@ -87,7 +87,7 @@ before_install: - travis_retry pip install -r requirements.txt - travis_retry pip install grabbit==0.2.6 -- travis_retry pip install git+git@github.com:bids-standard/pybids.git@0.7.0#egg=pybids +- travis_retry pip install git+https://github.com/bids-standard/pybids.git@0.7.0#egg=pybids install: - travis_retry pip install $EXTRA_PIP_FLAGS -e .[$NIPYPE_EXTRAS] From 0430c8565a041465c9efa74c5d089ef1ee0b4009 Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Fri, 11 Jan 2019 15:51:04 -0500 Subject: [PATCH 0042/1665] RF+ENH(TST): use %s instead of %f when rendering cmdline float point args This way makes it easier to test cmdline formation so we have 4.9 and not 4.90000 --- nipype/interfaces/ants/segmentation.py | 10 ++--- .../ants/tests/test_segmentation.py | 40 +++++++++++++++++++ 2 files changed, 45 insertions(+), 5 deletions(-) create mode 100644 nipype/interfaces/ants/tests/test_segmentation.py diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index 11bc0c48c7..94e8c59c79 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -208,24 +208,24 @@ class LaplacianThicknessInputSpec(ANTSCommandInputSpec): keep_extension=True, hash_files=False) smooth_param = traits.Float( - argstr='%f', + argstr='%s', desc='Sigma of the Laplacian Recursive Image Filter (defaults to 1)', position=4) prior_thickness = traits.Float( - argstr='%f', + argstr='%s', desc='Prior thickness (defaults to 500)', position=5) dT = traits.Float( - argstr='%f', + argstr='%s', desc='Time delta used during integration (defaults to 0.01)', position=6) sulcus_prior = traits.Float( - argstr='%f', + argstr='%s', desc='Positive floating point number for sulcus prior. ' 'Authors said that 0.15 might be a reasonable value', position=7) tolerance = traits.Float( - argstr='%f', + argstr='%s', desc='Tolerance to reach during optimization (defaults to 0.001)', position=8) diff --git a/nipype/interfaces/ants/tests/test_segmentation.py b/nipype/interfaces/ants/tests/test_segmentation.py new file mode 100644 index 0000000000..dca2ee68d4 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_segmentation.py @@ -0,0 +1,40 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from ..segmentation import LaplacianThickness +from .test_resampling import change_dir + +import os +import pytest + + +@pytest.fixture() +def change_dir(request): + orig_dir = os.getcwd() + filepath = os.path.dirname(os.path.realpath(__file__)) + datadir = os.path.realpath(os.path.join(filepath, '../../../testing/data')) + os.chdir(datadir) + + def move2orig(): + os.chdir(orig_dir) + + request.addfinalizer(move2orig) + + +@pytest.fixture() +def create_lt(): + lt = LaplacianThickness() + # we do not run, so I stick some not really proper files as input + lt.inputs.input_gm = 'diffusion_weighted.nii' + lt.inputs.input_wm = 'functional.nii' + return lt + + +def test_LaplacianThickness_defaults(change_dir, create_lt): + lt = create_lt + base_cmd = 'LaplacianThickness functional.nii diffusion_weighted.nii functional_thickness.nii' + assert lt.cmdline == base_cmd + lt.inputs.smooth_param = 4.5 + assert lt.cmdline == base_cmd + " 4.5" + lt.inputs.prior_thickness = 5.9 + assert lt.cmdline == base_cmd + " 4.5 5.9" From fc18c784266c53e831c0c4247a978613fd9f2985 Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Fri, 11 Jan 2019 15:53:27 -0500 Subject: [PATCH 0043/1665] TST(BK): test_LaplacianThickness_wrongargs to demonstrate #2847 --- nipype/interfaces/ants/tests/test_segmentation.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/nipype/interfaces/ants/tests/test_segmentation.py b/nipype/interfaces/ants/tests/test_segmentation.py index dca2ee68d4..d595317713 100644 --- a/nipype/interfaces/ants/tests/test_segmentation.py +++ b/nipype/interfaces/ants/tests/test_segmentation.py @@ -38,3 +38,12 @@ def test_LaplacianThickness_defaults(change_dir, create_lt): assert lt.cmdline == base_cmd + " 4.5" lt.inputs.prior_thickness = 5.9 assert lt.cmdline == base_cmd + " 4.5 5.9" + + +def test_LaplacianThickness_wrongargs(change_dir, create_lt): + lt = create_lt + lt.inputs.prior_thickness = 5.9 + # 500 must not be placed as smooth_param + assert lt.cmdline != 'LaplacianThickness functional.nii diffusion_weighted.nii functional_thickness.nii 5.9' + # probably should have just raised an exception that "smooth_param" + # should also be defined \ No newline at end of file From a47c1ef82094ddd26a9b85e42e1a14539bba91ab Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Sat, 12 Jan 2019 14:03:18 -0500 Subject: [PATCH 0044/1665] enh: add s3 datagrabber example several examples here: https://github.com/kellyhennigan/cueexp_scripts/blob/25372d4f221762af6d79d1af67a8570f2c546b20/svmrfe/venv/lib64/python2.7/site-packages/nipype/interfaces/tests/test_io.py --- nipype/interfaces/io.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index e11ba47479..d1f2ecdfb3 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -829,7 +829,22 @@ class S3DataGrabber(LibraryBaseInterface, IOBase): "template" uses regex style formatting, rather than the glob-style found in the original DataGrabber. + Examples + -------- + + >>> s3grab = S3DataGrabber(infields=['subj_id'], outfields=["func", "anat"])) + >>> s3grab.inputs.bucket = 'openneuro' + >>> s3grab.inputs.sort_filelist = True + >>> s3grab.inputs.template = '*' + >>> s3grab.inputs.anon = True + >>> s3grab.inputs.bucket_path = 'ds000101/ds000101_R2.0.0/uncompressed/' + >>> s3grab.inputs.local_directory = '/tmp' + >>> s3grab.inputs.field_template = {'anat': '%s/anat/%s_T1w.nii.gz', 'func': '%s/func/%s_task-simon_run-1_bold.nii.gz'} + >>> s3grab.inputs.template_args = {'anat': [['subj_id', 'subj_id']], 'func': [['subj_id', 'subj_id']]} + >>> s3grab.inputs.subj_id = 'sub-01' + >>> s3grab.run() # doctest: +SKIP """ + input_spec = S3DataGrabberInputSpec output_spec = DynamicTraitedSpec _always_run = True From ea4fb4b3d954847f42041f2a67b0aaaefdaf5213 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Sat, 12 Jan 2019 14:10:36 -0500 Subject: [PATCH 0045/1665] fix: typo --- nipype/interfaces/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index d1f2ecdfb3..923cebdc5a 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -832,7 +832,7 @@ class S3DataGrabber(LibraryBaseInterface, IOBase): Examples -------- - >>> s3grab = S3DataGrabber(infields=['subj_id'], outfields=["func", "anat"])) + >>> s3grab = S3DataGrabber(infields=['subj_id'], outfields=["func", "anat"]) >>> s3grab.inputs.bucket = 'openneuro' >>> s3grab.inputs.sort_filelist = True >>> s3grab.inputs.template = '*' From 3720651085e3b3876b1d1828c3739821a02e7e5a Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Sat, 12 Jan 2019 14:50:45 -0500 Subject: [PATCH 0046/1665] Remove pytest from requirements.txt which duplicates requirements --- requirements.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 1d1a4d1f9c..560a72ca38 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,9 +11,6 @@ neurdflib click>=6.6.0 funcsigs configparser -pytest>=3.0 -pytest-xdist -pytest-env mock pydotplus pydot>=1.2.3 From 5590d4dc43d838301c4931289894f8e4bab24420 Mon Sep 17 00:00:00 2001 From: delavega4 Date: Mon, 14 Jan 2019 11:48:10 -0500 Subject: [PATCH 0047/1665] Re-enable editable mode, and run tests in Python 2 --- .travis.yml | 2 +- nipype/interfaces/tests/test_io.py | 6 ------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index a762a02d00..b1bc985cd4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -87,7 +87,7 @@ before_install: - travis_retry pip install -r requirements.txt - travis_retry pip install grabbit==0.2.6 -- travis_retry pip install git+https://github.com/bids-standard/pybids.git@0.7.0#egg=pybids +- travis_retry pip install -e git+https://github.com/bids-standard/pybids.git@0.7.0#egg=pybids install: - travis_retry pip install $EXTRA_PIP_FLAGS -e .[$NIPYPE_EXTRAS] diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index b4ac80366b..4420e4a49c 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -581,8 +581,6 @@ def test_jsonsink(tmpdir, inputs_attributes): # There are three reasons these tests will be skipped: @pytest.mark.skipif(not have_pybids, reason="Pybids is not installed") -@pytest.mark.skipif(sys.version_info < (3, 0), - reason="Pybids no longer supports Python 2") @pytest.mark.skipif(not dist_is_editable('pybids'), reason="Pybids is not installed in editable mode") def test_bids_grabber(tmpdir): @@ -598,8 +596,6 @@ def test_bids_grabber(tmpdir): @pytest.mark.skipif(not have_pybids, reason="Pybids is not installed") -@pytest.mark.skipif(sys.version_info < (3, 0), - reason="Pybids no longer supports Python 2") @pytest.mark.skipif(not dist_is_editable('pybids'), reason="Pybids is not installed in editable mode") def test_bids_fields(tmpdir): @@ -614,8 +610,6 @@ def test_bids_fields(tmpdir): @pytest.mark.skipif(not have_pybids, reason="Pybids is not installed") -@pytest.mark.skipif(sys.version_info < (3, 0), - reason="Pybids no longer supports Python 2") @pytest.mark.skipif(not dist_is_editable('pybids'), reason="Pybids is not installed in editable mode") def test_bids_infields_outfields(tmpdir): From 8a168d96c7fc8c57b81521e0ae8b2f0329d4d0db Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Mon, 14 Jan 2019 12:18:52 -0500 Subject: [PATCH 0048/1665] RF: move mock into tests_requires --- nipype/info.py | 2 +- requirements.txt | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/nipype/info.py b/nipype/info.py index 5780b71e1d..db3d1d73a4 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -147,7 +147,6 @@ def get_nipype_gitversion(): 'neurdflib', 'click>=%s' % CLICK_MIN_VERSION, 'funcsigs', - 'mock', 'pydotplus', 'pydot>=%s' % PYDOT_MIN_VERSION, 'packaging', @@ -158,6 +157,7 @@ def get_nipype_gitversion(): REQUIRES.append('configparser') TESTS_REQUIRES = [ + 'mock', 'pytest>=%s' % PYTEST_MIN_VERSION, 'pytest-xdist', 'pytest-cov', diff --git a/requirements.txt b/requirements.txt index 560a72ca38..0d951f49c0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,7 +11,6 @@ neurdflib click>=6.6.0 funcsigs configparser -mock pydotplus pydot>=1.2.3 packaging From 0be4459d671fd3349a63fe466431b9561c8bf3fe Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 15 Jan 2019 15:31:49 +0100 Subject: [PATCH 0049/1665] Update nipype/interfaces/mrtrix3/tracking.py Co-Authored-By: jguillon --- nipype/interfaces/mrtrix3/tracking.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/tracking.py b/nipype/interfaces/mrtrix3/tracking.py index e8dbb9423f..367b3dc2d4 100644 --- a/nipype/interfaces/mrtrix3/tracking.py +++ b/nipype/interfaces/mrtrix3/tracking.py @@ -82,7 +82,7 @@ class TractographyInputSpec(MRTrix3BaseInputSpec): 'selected and written to the output file')) select = traits.Int( argstr='-select %d', - min_ver=3, + min_ver='3', desc=('set the desired number of tracks. The program will continue' ' to generate tracks until this number of tracks have been ' 'selected and written to the output file')) From ba5363488fd0898c2b18d79ea2ccd3662316e61b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 15 Jan 2019 15:32:03 +0100 Subject: [PATCH 0050/1665] Update nipype/interfaces/mrtrix3/tracking.py Co-Authored-By: jguillon --- nipype/interfaces/mrtrix3/tracking.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/tracking.py b/nipype/interfaces/mrtrix3/tracking.py index 367b3dc2d4..ea71752763 100644 --- a/nipype/interfaces/mrtrix3/tracking.py +++ b/nipype/interfaces/mrtrix3/tracking.py @@ -76,7 +76,7 @@ class TractographyInputSpec(MRTrix3BaseInputSpec): 'is 90deg x stepsize / voxelsize)')) n_tracks = traits.Int( argstr='-number %d', - max_ver=0.4, + max_ver='0.4', desc=('set the desired number of tracks. The program will continue' ' to generate tracks until this number of tracks have been ' 'selected and written to the output file')) From 8e2fad255f208fdffa57cc9d1d267d927f78bba9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Je=CC=81re=CC=81my=20Guillon?= Date: Tue, 15 Jan 2019 17:13:26 +0100 Subject: [PATCH 0051/1665] Update autotest Tracto --- nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py index affad3a2a0..c79fce8867 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py @@ -42,7 +42,7 @@ def test_Tractography_inputs(): ), n_tracks=dict( argstr='-number %d', - max_ver=0.4, + max_ver='0.4', ), n_trials=dict(argstr='-trials %d', ), noprecompt=dict(argstr='-noprecomputed', ), @@ -82,7 +82,7 @@ def test_Tractography_inputs(): seed_sphere=dict(argstr='-seed_sphere %f,%f,%f,%f', ), select=dict( argstr='-select %d', - min_ver=3, + min_ver='3', ), sph_trait=dict(argstr='%f,%f,%f,%f', ), step_size=dict(argstr='-step %f', ), From 0161e814a6488b0a210b6e598227321032d9e728 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 16 Jan 2019 16:25:25 -0500 Subject: [PATCH 0052/1665] RF: Drop pytest-xdist requirement, minimum pytest version --- nipype/__init__.py | 11 ++++++++--- nipype/info.py | 4 +--- nipype/pytest.ini | 2 +- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index ad961e7df4..9e0cb5b1e0 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -25,16 +25,21 @@ class NipypeTester(object): - def __call__(self, doctests=True, parallel=True): + def __call__(self, doctests=True, parallel=False): try: import pytest - except: + except ImportError: raise RuntimeError( 'py.test not installed, run: pip install pytest') args = [] if not doctests: args.extend(['-p', 'no:doctest']) - if not parallel: + if parallel: + try: + import xdist + except ImportError: + raise RuntimeError( + "pytest-xdist required for parallel run") args.append('-n0') args.append(os.path.dirname(__file__)) pytest.main(args=args) diff --git a/nipype/info.py b/nipype/info.py index f87883380f..91ac0b0b76 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -109,7 +109,6 @@ def get_nipype_gitversion(): SCIPY_MIN_VERSION = '0.14' TRAITS_MIN_VERSION = '4.6' DATEUTIL_MIN_VERSION = '2.2' -PYTEST_MIN_VERSION = '3.6' FUTURE_MIN_VERSION = '0.16.0' SIMPLEJSON_MIN_VERSION = '3.8.0' PROV_VERSION = '1.5.2' @@ -160,8 +159,7 @@ def get_nipype_gitversion(): TESTS_REQUIRES = [ 'mock', - 'pytest>=%s' % PYTEST_MIN_VERSION, - 'pytest-xdist', + 'pytest', 'pytest-cov', 'codecov', 'pytest-env', diff --git a/nipype/pytest.ini b/nipype/pytest.ini index 70f12b64aa..c5706c3f75 100644 --- a/nipype/pytest.ini +++ b/nipype/pytest.ini @@ -1,6 +1,6 @@ [pytest] norecursedirs = .git build dist doc nipype/external tools examples src -addopts = --doctest-modules -n auto +addopts = --doctest-modules doctest_optionflags = ALLOW_UNICODE NORMALIZE_WHITESPACE env = PYTHONHASHSEED=0 From fb0801a42a7b19ece2084e981914883260ab0ce1 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 16 Jan 2019 16:31:11 -0500 Subject: [PATCH 0053/1665] CI: Install pytest-xdist in CI for speed --- .travis.yml | 5 ++++- docker/files/run_pytests.sh | 1 + docker/generate_dockerfiles.sh | 2 ++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 871e176ff0..5b3f937e44 100644 --- a/.travis.yml +++ b/.travis.yml @@ -95,8 +95,11 @@ before_install: install: - travis_retry pip install $EXTRA_PIP_FLAGS -e .[$NIPYPE_EXTRAS] +after_install: +- travis_retry pip install pytest-xdist + script: -- py.test -v --cov nipype --cov-config .coveragerc --cov-report xml:cov.xml -c nipype/pytest.ini --doctest-modules nipype +- py.test -v --cov nipype --cov-config .coveragerc --cov-report xml:cov.xml -c nipype/pytest.ini --doctest-modules nipype -n auto after_script: - codecov --file cov.xml --flags unittests -e TRAVIS_JOB_NUMBER diff --git a/docker/files/run_pytests.sh b/docker/files/run_pytests.sh index 24d9466f80..c3d33f2f53 100644 --- a/docker/files/run_pytests.sh +++ b/docker/files/run_pytests.sh @@ -29,6 +29,7 @@ export COVERAGE_FILE=${WORKDIR}/tests/.coverage.py${PYTHON_VERSION} py.test -v --junitxml=${WORKDIR}/tests/pytests_py${PYTHON_VERSION}.xml \ --cov nipype --cov-config /src/nipype/.coveragerc \ --cov-report xml:${WORKDIR}/tests/coverage_py${PYTHON_VERSION}.xml \ + -n auto \ -c ${TESTPATH}/pytest.ini ${TESTPATH} exit_code=$? diff --git a/docker/generate_dockerfiles.sh b/docker/generate_dockerfiles.sh index 9c281f8bbc..8b35cea2f8 100755 --- a/docker/generate_dockerfiles.sh +++ b/docker/generate_dockerfiles.sh @@ -108,6 +108,8 @@ function generate_main_dockerfile() { --miniconda use_env=neuro \ pip_opts="-e" \ pip_install="/src/nipype[all]" \ + --miniconda use_env=neuro \ + pip_install="pytest-xdist" \ --workdir /work \ --label org.label-schema.build-date='$BUILD_DATE' \ org.label-schema.name="NIPYPE" \ From b7a12488b4f5b513b23f8562ed8724f349f077f4 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 16 Jan 2019 16:38:39 -0500 Subject: [PATCH 0054/1665] MAINT: Move configparser to PEP508 style --- nipype/info.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nipype/info.py b/nipype/info.py index 91ac0b0b76..2dd5489e0a 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -152,11 +152,9 @@ def get_nipype_gitversion(): 'pydot>=%s' % PYDOT_MIN_VERSION, 'packaging', 'futures; python_version == "2.7"', + 'configparser; python_version <= "3.4"', ] -if sys.version_info <= (3, 4): - REQUIRES.append('configparser') - TESTS_REQUIRES = [ 'mock', 'pytest', From c3fc35ab6eca14876b5f041b2d6cede7f8f53b97 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 17 Jan 2019 10:18:51 -0500 Subject: [PATCH 0055/1665] CI: after_install isn't a thing Stop trying to make it a thing. --- .travis.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 5b3f937e44..6ebe0821fc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -94,8 +94,6 @@ before_install: install: - travis_retry pip install $EXTRA_PIP_FLAGS -e .[$NIPYPE_EXTRAS] - -after_install: - travis_retry pip install pytest-xdist script: From be9bc9c6d426c6b166b4eac2dc69df0e21de0ff2 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 18 Jan 2019 09:26:25 -0500 Subject: [PATCH 0056/1665] Update nipype/__init__.py --- nipype/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index 9e0cb5b1e0..eca783c7d0 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -40,7 +40,7 @@ def __call__(self, doctests=True, parallel=False): except ImportError: raise RuntimeError( "pytest-xdist required for parallel run") - args.append('-n0') + args.append('-n auto') args.append(os.path.dirname(__file__)) pytest.main(args=args) From 12fe2b9bdc30be58f6e005200c8f822016cff554 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 18 Jan 2019 09:26:48 -0500 Subject: [PATCH 0057/1665] Update nipype/__init__.py --- nipype/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index eca783c7d0..172617e887 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -40,7 +40,7 @@ def __call__(self, doctests=True, parallel=False): except ImportError: raise RuntimeError( "pytest-xdist required for parallel run") - args.append('-n auto') + args.append('-n auto') args.append(os.path.dirname(__file__)) pytest.main(args=args) From 329c74d63a1e1fb963d8e01035e36617acfaaec9 Mon Sep 17 00:00:00 2001 From: rciric Date: Sat, 19 Jan 2019 11:18:52 -0800 Subject: [PATCH 0058/1665] add variance-driven component selection, return component metadata --- nipype/algorithms/confounds.py | 81 ++++++++++++++++++++++++++-------- 1 file changed, 63 insertions(+), 18 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 79c0b96f4e..96ee500fda 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -1139,15 +1139,29 @@ def combine_mask_files(mask_files, mask_method=None, mask_index=None): return [img] -def compute_noise_components(imgseries, mask_images, num_components, - filter_type, degree, period_cut, repetition_time): +def compute_noise_components(imgseries, mask_images, num_components=0.5, + filter_type=False, degree=0, period_cut=128, + repetition_time=None, failure_mode='error'): """Compute the noise components from the imgseries for each mask - imgseries: a nibabel img - mask_images: a list of nibabel images - num_components: number of noise components to return - filter_type: type off filter to apply to time series before computing - noise components. + Parameters + ---------- + imgseries: nibabel NIfTI object + Time series data to be decomposed. + mask_images: list + List of nibabel images. Time series data from `img_series` is subset + according to the spatial extent of each mask, and the subset data is + then decomposed using principal component analysis. Masks should be + coextensive with either anatomical or spatial noise ROIs. + num_components: float + Number of noise components to return. If this is a decimal value + between 0 and 1, then `create_noise_components` will instead return + the smallest number of components necessary to explain the indicated + fraction of variance. If `num_components` is -1, then all + components will be returned. + filter_type: str + Type of filter to apply to time series before computing + noise components. 'polynomial' - Legendre polynomial basis 'cosine' - Discrete cosine (DCT) basis False - None (mean-removal only) @@ -1158,16 +1172,20 @@ def compute_noise_components(imgseries, mask_images, num_components, period_cut: minimum period (in sec) for DCT high-pass filter repetition_time: time (in sec) between volume acquisitions - returns: - - components: a numpy array - basis: a numpy array containing the (non-constant) filter regressors - + Outputs + ------- + components: numpy array + Numpy array containing the requested set of noise components + basis: numpy array + Numpy array containing the (non-constant) filter regressors + metadata: dict(numpy array) + Dictionary of eigenvalues, fractional explained variances, and + cumulative explained variances. """ components = None basis = np.array([]) - for img in mask_images: - mask = img.get_data().astype(np.bool) + for i, img in enumerate(mask_images): + mask = img.get_data().astype(np.bool).squeeze() if imgseries.shape[:3] != mask.shape: raise ValueError( 'Inputs for CompCor, timeseries and mask, do not have ' @@ -1201,20 +1219,47 @@ def compute_noise_components(imgseries, mask_images, num_components, # "The covariance matrix C = MMT was constructed and decomposed into its # principal components using a singular value decomposition." try: - u, _, _ = fallback_svd(M, full_matrices=False) + u, s, _ = fallback_svd(M, full_matrices=False) except np.linalg.LinAlgError: if self.inputs.failure_mode == 'error': raise - u = np.ones((M.shape[0], num_components), dtype=np.float32) * np.nan + if num_components >= 1: + u = np.empty((M.shape[0], num_components), + dtype=np.float32) * np.nan + else: + continue + + variance_explained = np.array([value**2/np.sum(s**2) for value in s]) + cumulative_variance_explained = np.cumsum(variance_explained) + if 0 < num_components < 1: + num_components = np.searchsorted(cumulative_variance_explained, + num_components) + 1 + elif num_components == -1: + num_components = len(s) if components is None: components = u[:, :num_components] + metadata = { + 'mask': np.array([i] * len(s)), + 'singular_values': s, + 'variance_explained': variance_explained, + 'cumulative_variance_explained': cumulative_variance_explained + } else: components = np.hstack((components, u[:, :num_components])) - if components is None and num_components > 0: + metadata['mask'] = np.hstack((metadata['mask'], [i] * len(s))) + metadata['singular_values'] = ( + np.hstack((metadata['singular_values'], s))) + metadata['variance_explained'] = ( + np.hstack((metadata['variance_explained'], + variance_explained))) + metadata['cumulative_variance_explained'] = ( + np.hstack((metadata['cumulative_variance_explained'], + cumulative_variance_explained))) + if components is None and num_components != 0: if self.inputs.failure_mode == 'error': raise ValueError('No components found') components = np.ones((M.shape[0], num_components), dtype=np.float32) * np.nan - return components, basis + return components, basis, metadata def _compute_tSTD(M, x, axis=0): From 17f3e120b5cb48e14000a43203828c78c23b76d0 Mon Sep 17 00:00:00 2001 From: rciric Date: Sat, 19 Jan 2019 12:32:20 -0800 Subject: [PATCH 0059/1665] expose metadata to interface, fix component selection for multiple masks --- nipype/algorithms/confounds.py | 67 +++++++++++++++++++++++++--------- 1 file changed, 49 insertions(+), 18 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 96ee500fda..c8c8c1ac1d 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -390,7 +390,16 @@ class CompCorInputSpec(BaseInterfaceInputSpec): 'components_file.txt', usedefault=True, desc='Filename to store physiological components') - num_components = traits.Int(6, usedefault=True) # 6 for BOLD, 4 for ASL + num_components = traits.Float(6, usedefault=True, + desc='Number of components to return from the decomposition.' + 'If `num_components` is a positive integer, then ' + '`num_components` components will be retained. If ' + '`num_components` is a fractional value between 0 and 1, then ' + 'the number of components retained will be equal to the minimum ' + 'number of components necessary to explain the provided ' + 'fraction of variance in the masked time series. If ' + '`num_components` is -1, then all components will be retained.') + # 6 for BOLD, 4 for ASL pre_filter = traits.Enum( 'polynomial', 'cosine', @@ -418,6 +427,8 @@ class CompCorInputSpec(BaseInterfaceInputSpec): 'unspecified') save_pre_filter = traits.Either( traits.Bool, File, desc='Save pre-filter basis as text file') + save_metadata = traits.Either( + traits.Bool, File, desc='Save component metadata as text file') ignore_initial_volumes = traits.Range( low=0, usedefault=True, @@ -433,6 +444,7 @@ class CompCorOutputSpec(TraitedSpec): components_file = File( exists=True, desc='text file containing the noise components') pre_filter_file = File(desc='text file containing high-pass filter basis') + metadata_file = File(desc='text file containing component metadata') class CompCor(BaseInterface): @@ -548,7 +560,7 @@ def _run_interface(self, runtime): '{} cannot detect repetition time from image - ' 'Set the repetition_time input'.format(self._header)) - components, filter_basis = compute_noise_components( + components, filter_basis, metadata = compute_noise_components( imgseries.get_data(), mask_images, self.inputs.num_components, self.inputs.pre_filter, degree, self.inputs.high_pass_cutoff, TR) @@ -597,6 +609,16 @@ def _run_interface(self, runtime): header='\t'.join(header), comments='') + if self.inputs.save_metadata: + metadata_file = self._list_outputs()['metadata_file'] + np.savetxt( + metadata_file, + np.vstack(metadata.values()).T, + fmt=['%s', b'%.10f', b'%.10f', b'%.10f'], + delimiter='\t', + header='\t'.join(list(metadata.keys())), + comments='') + return runtime def _process_masks(self, mask_images, timeseries=None): @@ -613,6 +635,12 @@ def _list_outputs(self): save_pre_filter = os.path.abspath('pre_filter.tsv') outputs['pre_filter_file'] = save_pre_filter + save_metadata = self.inputs.save_metadata + if save_metadata: + if isinstance(save_metadata, bool): + save_metadata = os.path.abspath('component_metadata.tsv') + outputs['metadata_file'] = save_metadata + return outputs def _make_headers(self, num_col): @@ -1139,7 +1167,7 @@ def combine_mask_files(mask_files, mask_method=None, mask_index=None): return [img] -def compute_noise_components(imgseries, mask_images, num_components=0.5, +def compute_noise_components(imgseries, mask_images, components_criterion=0.5, filter_type=False, degree=0, period_cut=128, repetition_time=None, failure_mode='error'): """Compute the noise components from the imgseries for each mask @@ -1153,11 +1181,11 @@ def compute_noise_components(imgseries, mask_images, num_components=0.5, according to the spatial extent of each mask, and the subset data is then decomposed using principal component analysis. Masks should be coextensive with either anatomical or spatial noise ROIs. - num_components: float + components_criterion: float Number of noise components to return. If this is a decimal value between 0 and 1, then `create_noise_components` will instead return the smallest number of components necessary to explain the indicated - fraction of variance. If `num_components` is -1, then all + fraction of variance. If `components_criterion` is -1, then all components will be returned. filter_type: str Type of filter to apply to time series before computing @@ -1204,38 +1232,40 @@ def compute_noise_components(imgseries, mask_images, num_components=0.5, voxel_timecourses, repetition_time, period_cut) elif filter_type in ('polynomial', False): # from paper: - # "The constant and linear trends of the columns in the matrix M were - # removed [prior to ...]" + # "The constant and linear trends of the columns in the matrix M + # were removed [prior to ...]" voxel_timecourses, basis = regress_poly(degree, voxel_timecourses) - # "Voxel time series from the noise ROI (either anatomical or tSTD) were - # placed in a matrix M of size Nxm, with time along the row dimension - # and voxels along the column dimension." + # "Voxel time series from the noise ROI (either anatomical or tSTD) + # were placed in a matrix M of size Nxm, with time along the row + # dimension and voxels along the column dimension." M = voxel_timecourses.T # "[... were removed] prior to column-wise variance normalization." M = M / _compute_tSTD(M, 1.) - # "The covariance matrix C = MMT was constructed and decomposed into its - # principal components using a singular value decomposition." + # "The covariance matrix C = MMT was constructed and decomposed into + # its principal components using a singular value decomposition." try: u, s, _ = fallback_svd(M, full_matrices=False) except np.linalg.LinAlgError: if self.inputs.failure_mode == 'error': raise - if num_components >= 1: - u = np.empty((M.shape[0], num_components), + if components_criterion >= 1: + u = np.empty((M.shape[0], components_criterion), dtype=np.float32) * np.nan else: continue variance_explained = np.array([value**2/np.sum(s**2) for value in s]) cumulative_variance_explained = np.cumsum(variance_explained) - if 0 < num_components < 1: + if 0 < components_criterion < 1: num_components = np.searchsorted(cumulative_variance_explained, - num_components) + 1 - elif num_components == -1: + components_criterion) + 1 + elif components_criterion == -1: num_components = len(s) + else: + num_components = components_criterion if components is None: components = u[:, :num_components] metadata = { @@ -1258,7 +1288,8 @@ def compute_noise_components(imgseries, mask_images, num_components=0.5, if components is None and num_components != 0: if self.inputs.failure_mode == 'error': raise ValueError('No components found') - components = np.ones((M.shape[0], num_components), dtype=np.float32) * np.nan + components = np.ones((M.shape[0], num_components), + dtype=np.float32) * np.nan return components, basis, metadata From 114e6d43f244881136a5d53aff768525d1d987f1 Mon Sep 17 00:00:00 2001 From: rciric Date: Sat, 19 Jan 2019 12:35:42 -0800 Subject: [PATCH 0060/1665] propagate failure mode if provided --- nipype/algorithms/confounds.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index c8c8c1ac1d..3d06fdd6ee 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -562,7 +562,8 @@ def _run_interface(self, runtime): components, filter_basis, metadata = compute_noise_components( imgseries.get_data(), mask_images, self.inputs.num_components, - self.inputs.pre_filter, degree, self.inputs.high_pass_cutoff, TR) + self.inputs.pre_filter, degree, self.inputs.high_pass_cutoff, TR, + self.inputs.failure_mode) if skip_vols: old_comp = components From 6f4fc19749d8a11864bb6ff20f728424cf83e716 Mon Sep 17 00:00:00 2001 From: rciric Date: Sat, 19 Jan 2019 13:34:56 -0800 Subject: [PATCH 0061/1665] allow mask naming in metadata --- nipype/algorithms/confounds.py | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 3d06fdd6ee..7c2ada89fb 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -386,6 +386,10 @@ class CompCorInputSpec(BaseInterfaceInputSpec): requires=['mask_files'], desc=('Position of mask in `mask_files` to use - ' 'first is the default.')) + mask_names = traits.List(traits.Str, + desc='Names for provided masks (for printing into metadata). ' + 'If provided, it must be as long as the final mask list ' + '(after any merge and indexing operations).') components_file = traits.Str( 'components_file.txt', usedefault=True, @@ -563,7 +567,7 @@ def _run_interface(self, runtime): components, filter_basis, metadata = compute_noise_components( imgseries.get_data(), mask_images, self.inputs.num_components, self.inputs.pre_filter, degree, self.inputs.high_pass_cutoff, TR, - self.inputs.failure_mode) + self.inputs.failure_mode, self.inputs.mask_names) if skip_vols: old_comp = components @@ -612,13 +616,11 @@ def _run_interface(self, runtime): if self.inputs.save_metadata: metadata_file = self._list_outputs()['metadata_file'] - np.savetxt( - metadata_file, - np.vstack(metadata.values()).T, - fmt=['%s', b'%.10f', b'%.10f', b'%.10f'], - delimiter='\t', - header='\t'.join(list(metadata.keys())), - comments='') + with open(metadata_file, 'w') as f: + f.write('{}\t{}\t{}\t{}\n'.format(*list(metadata.keys()))) + for i in zip(*metadata.values()): + f.write('{0[0]}\t{0[1]:.10f}\t{0[2]:.10f}\t' + '{0[3]:.10f}\n'.format(i)) return runtime @@ -650,6 +652,9 @@ def _make_headers(self, num_col): headers = ['{}{:02d}'.format(header, i) for i in range(num_col)] return '\t'.join(headers) + def _print_metadata(self, x, f): + f.write('{0[0]}\t{0[1]:.10f}\t{0[2]:.10f}\t{0[3]:.10f}\n'.format(x)) + class ACompCor(CompCor): """ @@ -1170,7 +1175,8 @@ def combine_mask_files(mask_files, mask_method=None, mask_index=None): def compute_noise_components(imgseries, mask_images, components_criterion=0.5, filter_type=False, degree=0, period_cut=128, - repetition_time=None, failure_mode='error'): + repetition_time=None, failure_mode='error', + mask_names=''): """Compute the noise components from the imgseries for each mask Parameters @@ -1213,7 +1219,9 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, """ components = None basis = np.array([]) - for i, img in enumerate(mask_images): + if not mask_names: + mask_names = range(len(mask_images)) + for i, img in zip(mask_names, mask_images): mask = img.get_data().astype(np.bool).squeeze() if imgseries.shape[:3] != mask.shape: raise ValueError( From 4d2208e874591a83867dfd2db528b4add7617451 Mon Sep 17 00:00:00 2001 From: rciric Date: Sat, 19 Jan 2019 14:48:56 -0800 Subject: [PATCH 0062/1665] add contributor --- .zenodo.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index 9fccdcc316..f773f8854f 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -301,6 +301,11 @@ "name": "Liem, Franz", "orcid": "0000-0003-0646-4810" }, + { + "affiliation": "Stanford University", + "name": "Ciric, Rastko", + "orcid": "0000-0001-6347-7939" + }, { "affiliation": "UniversityHospital Heidelberg, Germany", "name": "Kleesiek, Jens" From bfbde82ee16c0803b16bf3152035d951a33e0305 Mon Sep 17 00:00:00 2001 From: rciric Date: Sat, 19 Jan 2019 15:10:40 -0800 Subject: [PATCH 0063/1665] include component index in metadata --- nipype/algorithms/confounds.py | 37 +++++++++++++++++----------------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 7c2ada89fb..0b85e4b3c1 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -578,12 +578,13 @@ def _run_interface(self, runtime): components_file = os.path.join(os.getcwd(), self.inputs.components_file) + components_header = self._make_headers(components.shape[1]) np.savetxt( components_file, components, fmt=b"%.10f", delimiter='\t', - header=self._make_headers(components.shape[1]), + header='\t'.join(components_header), comments='') if self.inputs.pre_filter and self.inputs.save_pre_filter: @@ -617,10 +618,11 @@ def _run_interface(self, runtime): if self.inputs.save_metadata: metadata_file = self._list_outputs()['metadata_file'] with open(metadata_file, 'w') as f: - f.write('{}\t{}\t{}\t{}\n'.format(*list(metadata.keys()))) - for i in zip(*metadata.values()): - f.write('{0[0]}\t{0[1]:.10f}\t{0[2]:.10f}\t' - '{0[3]:.10f}\n'.format(i)) + f.write('{}\t{}\t{}\t{}\t{}\n'.format('component', + *list(metadata.keys()))) + for i in zip(components_header, *metadata.values()): + f.write('{0[0]}\t{0[1]}\t{0[2]:.10f}\t' + '{0[3]:.10f}\t{0[4]:.10f}\n'.format(i)) return runtime @@ -650,10 +652,7 @@ def _make_headers(self, num_col): header = self.inputs.header_prefix if \ isdefined(self.inputs.header_prefix) else self._header headers = ['{}{:02d}'.format(header, i) for i in range(num_col)] - return '\t'.join(headers) - - def _print_metadata(self, x, f): - f.write('{0[0]}\t{0[1]:.10f}\t{0[2]:.10f}\t{0[3]:.10f}\n'.format(x)) + return headers class ACompCor(CompCor): @@ -1274,26 +1273,28 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, elif components_criterion == -1: num_components = len(s) else: - num_components = components_criterion + num_components = int(components_criterion) if components is None: components = u[:, :num_components] metadata = { - 'mask': np.array([i] * len(s)), - 'singular_values': s, - 'variance_explained': variance_explained, - 'cumulative_variance_explained': cumulative_variance_explained + 'mask': np.array([i] * num_components), + 'singular_values': s[:num_components], + 'variance_explained': variance_explained[:num_components], + 'cumulative_variance_explained': + cumulative_variance_explained[:num_components] } else: components = np.hstack((components, u[:, :num_components])) - metadata['mask'] = np.hstack((metadata['mask'], [i] * len(s))) + metadata['mask'] = np.hstack((metadata['mask'], + [i] * num_components)) metadata['singular_values'] = ( - np.hstack((metadata['singular_values'], s))) + np.hstack((metadata['singular_values'], s[:num_components]))) metadata['variance_explained'] = ( np.hstack((metadata['variance_explained'], - variance_explained))) + variance_explained[:num_components]))) metadata['cumulative_variance_explained'] = ( np.hstack((metadata['cumulative_variance_explained'], - cumulative_variance_explained))) + cumulative_variance_explained[:num_components]))) if components is None and num_components != 0: if self.inputs.failure_mode == 'error': raise ValueError('No components found') From 037387901e825bd410acce5cdfa3baa47e637f23 Mon Sep 17 00:00:00 2001 From: rciric Date: Sat, 19 Jan 2019 15:26:48 -0800 Subject: [PATCH 0064/1665] update autotests and make naming consistent --- nipype/algorithms/confounds.py | 6 +++--- nipype/algorithms/tests/test_auto_ACompCor.py | 3 +++ nipype/algorithms/tests/test_auto_TCompCor.py | 3 +++ 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 0b85e4b3c1..d946c9df5a 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -1278,7 +1278,7 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, components = u[:, :num_components] metadata = { 'mask': np.array([i] * num_components), - 'singular_values': s[:num_components], + 'singular_value': s[:num_components], 'variance_explained': variance_explained[:num_components], 'cumulative_variance_explained': cumulative_variance_explained[:num_components] @@ -1287,8 +1287,8 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, components = np.hstack((components, u[:, :num_components])) metadata['mask'] = np.hstack((metadata['mask'], [i] * num_components)) - metadata['singular_values'] = ( - np.hstack((metadata['singular_values'], s[:num_components]))) + metadata['singular_value'] = ( + np.hstack((metadata['singular_value'], s[:num_components]))) metadata['variance_explained'] = ( np.hstack((metadata['variance_explained'], variance_explained[:num_components]))) diff --git a/nipype/algorithms/tests/test_auto_ACompCor.py b/nipype/algorithms/tests/test_auto_ACompCor.py index 235d15da9e..ae69dac4e5 100644 --- a/nipype/algorithms/tests/test_auto_ACompCor.py +++ b/nipype/algorithms/tests/test_auto_ACompCor.py @@ -15,6 +15,7 @@ def test_ACompCor_inputs(): requires=['mask_files'], xor=['merge_method'], ), + mask_names=dict(), merge_method=dict( requires=['mask_files'], xor=['mask_index'], @@ -24,6 +25,7 @@ def test_ACompCor_inputs(): realigned_file=dict(mandatory=True, ), regress_poly_degree=dict(usedefault=True, ), repetition_time=dict(), + save_metadata=dict(), save_pre_filter=dict(), use_regress_poly=dict( deprecated='0.15.0', @@ -38,6 +40,7 @@ def test_ACompCor_inputs(): def test_ACompCor_outputs(): output_map = dict( components_file=dict(), + metadata_file=dict(), pre_filter_file=dict(), ) outputs = ACompCor.output_spec() diff --git a/nipype/algorithms/tests/test_auto_TCompCor.py b/nipype/algorithms/tests/test_auto_TCompCor.py index 59a5b84f76..484f7ca5f4 100644 --- a/nipype/algorithms/tests/test_auto_TCompCor.py +++ b/nipype/algorithms/tests/test_auto_TCompCor.py @@ -15,6 +15,7 @@ def test_TCompCor_inputs(): requires=['mask_files'], xor=['merge_method'], ), + mask_names=dict(), merge_method=dict( requires=['mask_files'], xor=['mask_index'], @@ -25,6 +26,7 @@ def test_TCompCor_inputs(): realigned_file=dict(mandatory=True, ), regress_poly_degree=dict(usedefault=True, ), repetition_time=dict(), + save_metadata=dict(), save_pre_filter=dict(), use_regress_poly=dict( deprecated='0.15.0', @@ -40,6 +42,7 @@ def test_TCompCor_outputs(): output_map = dict( components_file=dict(), high_variance_masks=dict(), + metadata_file=dict(), pre_filter_file=dict(), ) outputs = TCompCor.output_spec() From 50e2f47d5c2e9fb6ddacd113d4c4d187b604ed46 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 20 Jan 2019 10:20:28 -0500 Subject: [PATCH 0065/1665] CI: Move pytest-xdist installation --- docker/generate_dockerfiles.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docker/generate_dockerfiles.sh b/docker/generate_dockerfiles.sh index 8b35cea2f8..cbe26d05a8 100755 --- a/docker/generate_dockerfiles.sh +++ b/docker/generate_dockerfiles.sh @@ -92,7 +92,7 @@ function generate_main_dockerfile() { conda_install='python=${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR} icu=58.1 libxml2 libxslt matplotlib mkl numpy paramiko pandas psutil scikit-learn scipy traits=4.6.0' \ - pip_install="grabbit==0.1.2 https://github.com/INCF/pybids/tarball/0.6.5" \ + pip_install="pytest-xdist" \ activate=true \ --copy docker/files/run_builddocs.sh docker/files/run_examples.sh \ docker/files/run_pytests.sh nipype/external/fsl_imglob.py /usr/bin/ \ @@ -108,8 +108,6 @@ function generate_main_dockerfile() { --miniconda use_env=neuro \ pip_opts="-e" \ pip_install="/src/nipype[all]" \ - --miniconda use_env=neuro \ - pip_install="pytest-xdist" \ --workdir /work \ --label org.label-schema.build-date='$BUILD_DATE' \ org.label-schema.name="NIPYPE" \ From e88a825858090883856a91cb1e71655c04a8c2a5 Mon Sep 17 00:00:00 2001 From: Horea Christian Date: Thu, 10 Jan 2019 23:43:43 +0100 Subject: [PATCH 0066/1665] Support for BIDS event files --- nipype/algorithms/modelgen.py | 81 ++++++++++++++++++- .../tests/test_auto_SpecifyModel.py | 13 +++ .../tests/test_auto_SpecifySPMModel.py | 13 +++ .../tests/test_auto_SpecifySparseModel.py | 13 +++ 4 files changed, 119 insertions(+), 1 deletion(-) diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index 75abd8410b..ddb8539c49 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -144,6 +144,61 @@ def scale_timings(timelist, input_units, output_units, time_repetition): timelist = [np.max([0., _scalefactor * t]) for t in timelist] return timelist +def bids_gen_info(bids_event_files, + condition_column='trial_type', + amplitude_column=None, + time_repetition=False, + ): + """Generate subject_info structure from a list of BIDS .tsv event files. + + Parameters + ---------- + + bids_event_files : list of str + Filenames of BIDS .tsv event files containing columns including: + 'onset', 'duration', and 'trial_type' or the `condition_column` value. + condition_column : str + Column of files in `bids_event_files` based on the values of which + events will be sorted into different regressors + amplitude_column : str + Column of files in `bids_event_files` based on the values of which + to apply amplitudes to events. If unspecified, all events will be + represented with an amplitude of 1. + + Returns + ------- + + list of Bunch + """ + info = [] + for bids_event_file in bids_event_files: + with open(bids_event_file) as f: + f_events = csv.DictReader(f, skipinitialspace=True, delimiter='\t') + events = [{k: v for k, v in row.items()} for row in f_events] + conditions = list(set([i[condition_column] for i in events])) + runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[]) + for condition in conditions: + selected_events = [i for i in events if i[condition_column]==condition] + onsets = [float(i['onset']) for i in selected_events] + durations = [float(i['duration']) for i in selected_events] + if time_repetition: + decimals = math.ceil(-math.log10(time_repetition)) + onsets = [np.round(i, decimals) for i in onsets] + durations = [np.round(i ,decimals) for i in durations] + if condition: + runinfo.conditions.append(condition) + else: + runinfo.conditions.append('e0') + runinfo.onsets.append(onsets) + runinfo.durations.append(durations) + try: + amplitudes = [float(i[amplitude_column]) for i in selected_events] + runinfo.amplitudes.append(amplitudes) + except KeyError: + runinfo.amplitudes.append([1] * len(onsets)) + info.append(runinfo) + return info + def gen_info(run_event_files): """Generate subject_info structure from a list of event files @@ -190,6 +245,23 @@ class SpecifyModelInputSpec(BaseInterfaceInputSpec): desc='List of event description files 1, 2 or 3 ' 'column format corresponding to onsets, ' 'durations and amplitudes') + bids_event_file = InputMultiPath( + File(exists=True), + mandatory=True, + xor=['subject_info', 'event_files', 'bids_event_file'], + desc='TSV event file containing common BIDS fields: `onset`,' + '`duration`, and categorization and amplitude columns') + bids_condition_column = traits.Str(exists=True, + mandatory=False, + default_value='trial_type', + usedefault=True, + desc='Column of the file passed to `bids_event_file` to the ' + 'unique values of which events will be assigned' + 'to regressors') + bids_amplitude_column = traits.Str(exists=True, + mandatory=False, + desc='Column of the file passed to `bids_event_file` ' + 'according to which to assign amplitudes to events') realignment_parameters = InputMultiPath( File(exists=True), desc='Realignment parameters returned ' @@ -432,8 +504,15 @@ def _generate_design(self, infolist=None): if infolist is None: if isdefined(self.inputs.subject_info): infolist = self.inputs.subject_info - else: + elif isdefined(self.inputs.event_files): infolist = gen_info(self.inputs.event_files) + elif isdefined(self.inputs.bids_event_file): + infolist = bids_gen_info( + self.inputs.bids_event_file, + self.inputs.bids_condition_column, + self.inputs.bids_amplitude_column, + self.inputs.time_repetition, + ) self._sessinfo = self._generate_standard_design( infolist, functional_runs=self.inputs.functional_runs, diff --git a/nipype/algorithms/tests/test_auto_SpecifyModel.py b/nipype/algorithms/tests/test_auto_SpecifyModel.py index 452a048764..3dfc22a82f 100644 --- a/nipype/algorithms/tests/test_auto_SpecifyModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifyModel.py @@ -5,6 +5,19 @@ def test_SpecifyModel_inputs(): input_map = dict( + bids_amplitude_column=dict( + exists=True, + mandatory=False, + ), + bids_condition_column=dict( + exists=True, + mandatory=False, + usedefault=True, + ), + bids_event_file=dict( + mandatory=True, + xor=['subject_info', 'event_files', 'bids_event_file'], + ), event_files=dict( mandatory=True, xor=['subject_info', 'event_files'], diff --git a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py index 1f3ec7058d..34c1c9548f 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py @@ -5,6 +5,19 @@ def test_SpecifySPMModel_inputs(): input_map = dict( + bids_amplitude_column=dict( + exists=True, + mandatory=False, + ), + bids_condition_column=dict( + exists=True, + mandatory=False, + usedefault=True, + ), + bids_event_file=dict( + mandatory=True, + xor=['subject_info', 'event_files', 'bids_event_file'], + ), concatenate_runs=dict(usedefault=True, ), event_files=dict( mandatory=True, diff --git a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py index 93fc035fc2..71964dad54 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py @@ -5,6 +5,19 @@ def test_SpecifySparseModel_inputs(): input_map = dict( + bids_amplitude_column=dict( + exists=True, + mandatory=False, + ), + bids_condition_column=dict( + exists=True, + mandatory=False, + usedefault=True, + ), + bids_event_file=dict( + mandatory=True, + xor=['subject_info', 'event_files', 'bids_event_file'], + ), event_files=dict( mandatory=True, xor=['subject_info', 'event_files'], From 2c551d0534f49fabbc03a97bb8ea7d9ac1b61465 Mon Sep 17 00:00:00 2001 From: rciric Date: Sun, 20 Jan 2019 15:06:47 -0800 Subject: [PATCH 0067/1665] (CompCor) more intuitive interface following review from @effigies --- nipype/algorithms/confounds.py | 40 ++++++++++++++----- nipype/algorithms/tests/test_auto_ACompCor.py | 3 +- nipype/algorithms/tests/test_auto_TCompCor.py | 3 +- 3 files changed, 34 insertions(+), 12 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index d946c9df5a..baf96e9fd8 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -394,16 +394,22 @@ class CompCorInputSpec(BaseInterfaceInputSpec): 'components_file.txt', usedefault=True, desc='Filename to store physiological components') - num_components = traits.Float(6, usedefault=True, - desc='Number of components to return from the decomposition.' - 'If `num_components` is a positive integer, then ' - '`num_components` components will be retained. If ' - '`num_components` is a fractional value between 0 and 1, then ' + num_components = traits.Either('all', traits.Int, + xor=['variance_threshold'], + desc='Number of components to return from the decomposition. If ' + '`num_components` is `all`, then all components will be ' + 'retained.') + # 6 for BOLD, 4 for ASL + # automatically instantiated to 6 in CompCor below if neither + # `num_components` nor `variance_threshold` is defined (for + # backward compatibility) + variance_threshold = traits.Float(xor=['num_components'], + desc='Select the number of components to be returned automatically ' + 'based on their ability to explain variance in the dataset. ' + '`variance_threshold` is a fractional value between 0 and 1; ' 'the number of components retained will be equal to the minimum ' 'number of components necessary to explain the provided ' - 'fraction of variance in the masked time series. If ' - '`num_components` is -1, then all components will be retained.') - # 6 for BOLD, 4 for ASL + 'fraction of variance in the masked time series.') pre_filter = traits.Enum( 'polynomial', 'cosine', @@ -564,8 +570,20 @@ def _run_interface(self, runtime): '{} cannot detect repetition time from image - ' 'Set the repetition_time input'.format(self._header)) + if isdefined(self.inputs.variance_threshold): + components_criterion = self.inputs.variance_threshold + elif isdefined(self.inputs.num_components): + components_criterion = self.inputs.num_components + else: + components_criterion = 6 + IFLOGGER.warning('`num_components` and `variance_threshold` are ' + 'not defined. Setting number of components to 6 ' + 'for backward compatibility. Please set either ' + '`num_components` or `variance_threshold`, as ' + 'this feature may be deprecated in the future.') + components, filter_basis, metadata = compute_noise_components( - imgseries.get_data(), mask_images, self.inputs.num_components, + imgseries.get_data(), mask_images, components_criterion, self.inputs.pre_filter, degree, self.inputs.high_pass_cutoff, TR, self.inputs.failure_mode, self.inputs.mask_names) @@ -1191,7 +1209,7 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, Number of noise components to return. If this is a decimal value between 0 and 1, then `create_noise_components` will instead return the smallest number of components necessary to explain the indicated - fraction of variance. If `components_criterion` is -1, then all + fraction of variance. If `components_criterion` is `all`, then all components will be returned. filter_type: str Type of filter to apply to time series before computing @@ -1218,6 +1236,8 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, """ components = None basis = np.array([]) + if components_criterion == 'all': + components_criterion = -1 if not mask_names: mask_names = range(len(mask_images)) for i, img in zip(mask_names, mask_images): diff --git a/nipype/algorithms/tests/test_auto_ACompCor.py b/nipype/algorithms/tests/test_auto_ACompCor.py index ae69dac4e5..95a9f51a88 100644 --- a/nipype/algorithms/tests/test_auto_ACompCor.py +++ b/nipype/algorithms/tests/test_auto_ACompCor.py @@ -20,7 +20,7 @@ def test_ACompCor_inputs(): requires=['mask_files'], xor=['mask_index'], ), - num_components=dict(usedefault=True, ), + num_components=dict(xor=['variance_threshold'], ), pre_filter=dict(usedefault=True, ), realigned_file=dict(mandatory=True, ), regress_poly_degree=dict(usedefault=True, ), @@ -31,6 +31,7 @@ def test_ACompCor_inputs(): deprecated='0.15.0', new_name='pre_filter', ), + variance_threshold=dict(xor=['num_components'], ), ) inputs = ACompCor.input_spec() diff --git a/nipype/algorithms/tests/test_auto_TCompCor.py b/nipype/algorithms/tests/test_auto_TCompCor.py index 484f7ca5f4..1e94ef4241 100644 --- a/nipype/algorithms/tests/test_auto_TCompCor.py +++ b/nipype/algorithms/tests/test_auto_TCompCor.py @@ -20,7 +20,7 @@ def test_TCompCor_inputs(): requires=['mask_files'], xor=['mask_index'], ), - num_components=dict(usedefault=True, ), + num_components=dict(xor=['variance_threshold'], ), percentile_threshold=dict(usedefault=True, ), pre_filter=dict(usedefault=True, ), realigned_file=dict(mandatory=True, ), @@ -32,6 +32,7 @@ def test_TCompCor_inputs(): deprecated='0.15.0', new_name='pre_filter', ), + variance_threshold=dict(xor=['num_components'], ), ) inputs = TCompCor.input_spec() From a53cd46c4c004f358115e3b04dcb59b7a498d5e0 Mon Sep 17 00:00:00 2001 From: rciric Date: Sun, 20 Jan 2019 15:41:45 -0800 Subject: [PATCH 0068/1665] manually set `num_components` in test --- nipype/algorithms/tests/test_CompCor.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nipype/algorithms/tests/test_CompCor.py b/nipype/algorithms/tests/test_CompCor.py index 488ad3c960..b4c4739cfb 100644 --- a/nipype/algorithms/tests/test_CompCor.py +++ b/nipype/algorithms/tests/test_CompCor.py @@ -61,7 +61,7 @@ def test_compcor(self): 'aCompCor') def test_tcompcor(self): - ccinterface = TCompCor( + ccinterface = TCompCor(num_components=6, realigned_file=self.realigned_file, percentile_threshold=0.75) self.run_cc(ccinterface, [['-0.1114536190', '-0.4632908609'], [ '0.4566907310', '0.6983205193' @@ -70,7 +70,8 @@ def test_tcompcor(self): ], ['-0.1342351356', '0.1407855119']], 'tCompCor') def test_tcompcor_no_percentile(self): - ccinterface = TCompCor(realigned_file=self.realigned_file) + ccinterface = TCompCor(num_components=6, + realigned_file=self.realigned_file) ccinterface.run() mask = nb.load('mask_000.nii.gz').get_data() @@ -160,7 +161,6 @@ def run_cc(self, assert ccresult.outputs.components_file == expected_file assert os.path.exists(expected_file) assert os.path.getsize(expected_file) > 0 - assert ccinterface.inputs.num_components == 6 with open(ccresult.outputs.components_file, 'r') as components_file: expected_n_components = min(ccinterface.inputs.num_components, From b811d47a07eb729cc83b983cab6e9981278ae3c4 Mon Sep 17 00:00:00 2001 From: rciric Date: Sun, 20 Jan 2019 15:41:45 -0800 Subject: [PATCH 0069/1665] manually set `num_components` in test --- nipype/algorithms/tests/test_CompCor.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/nipype/algorithms/tests/test_CompCor.py b/nipype/algorithms/tests/test_CompCor.py index 488ad3c960..b20bc3d2c4 100644 --- a/nipype/algorithms/tests/test_CompCor.py +++ b/nipype/algorithms/tests/test_CompCor.py @@ -48,12 +48,14 @@ def test_compcor(self): self.run_cc( CompCor( + num_components=6, realigned_file=self.realigned_file, mask_files=self.mask_files, mask_index=0), expected_components) self.run_cc( ACompCor( + num_components=6, realigned_file=self.realigned_file, mask_files=self.mask_files, mask_index=0, @@ -61,7 +63,7 @@ def test_compcor(self): 'aCompCor') def test_tcompcor(self): - ccinterface = TCompCor( + ccinterface = TCompCor(num_components=6, realigned_file=self.realigned_file, percentile_threshold=0.75) self.run_cc(ccinterface, [['-0.1114536190', '-0.4632908609'], [ '0.4566907310', '0.6983205193' @@ -70,7 +72,8 @@ def test_tcompcor(self): ], ['-0.1342351356', '0.1407855119']], 'tCompCor') def test_tcompcor_no_percentile(self): - ccinterface = TCompCor(realigned_file=self.realigned_file) + ccinterface = TCompCor(num_components=6, + realigned_file=self.realigned_file) ccinterface.run() mask = nb.load('mask_000.nii.gz').get_data() @@ -80,6 +83,7 @@ def test_tcompcor_no_percentile(self): def test_compcor_no_regress_poly(self): self.run_cc( CompCor( + num_components=6, realigned_file=self.realigned_file, mask_files=self.mask_files, mask_index=0, @@ -160,7 +164,6 @@ def run_cc(self, assert ccresult.outputs.components_file == expected_file assert os.path.exists(expected_file) assert os.path.getsize(expected_file) > 0 - assert ccinterface.inputs.num_components == 6 with open(ccresult.outputs.components_file, 'r') as components_file: expected_n_components = min(ccinterface.inputs.num_components, From 577e3957ee11039664b2f3ccdf68fedcd564c928 Mon Sep 17 00:00:00 2001 From: rciric Date: Sun, 20 Jan 2019 18:48:25 -0800 Subject: [PATCH 0070/1665] add unit test for variance_threshold condition --- nipype/algorithms/tests/test_CompCor.py | 56 ++++++++++++++++++++++--- 1 file changed, 50 insertions(+), 6 deletions(-) diff --git a/nipype/algorithms/tests/test_CompCor.py b/nipype/algorithms/tests/test_CompCor.py index b20bc3d2c4..5fbe641a0b 100644 --- a/nipype/algorithms/tests/test_CompCor.py +++ b/nipype/algorithms/tests/test_CompCor.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os +import re import nibabel as nb import numpy as np @@ -62,6 +63,30 @@ def test_compcor(self): components_file='acc_components_file'), expected_components, 'aCompCor') + def test_compcor_variance_threshold_and_metadata(self): + expected_components = [['-0.2027150345', '-0.4954813834'], + ['0.2565929051', '0.7866217875'], + ['-0.3550986008', '-0.0089784905'], + ['0.7512786244', '-0.3599828482'], + ['-0.4500578942', '0.0778209345']] + expected_metadata = { + 'component': 'CompCor00', + 'mask': '0', + 'singular_value': '4.0720553036', + 'variance_explained': '0.5527211465', + 'cumulative_variance_explained': '0.5527211465' + } + ccinterface = CompCor( + variance_threshold=0.7, + realigned_file=self.realigned_file, + mask_files=self.mask_files, + mask_index=1, + save_metadata=True) + self.run_cc(ccinterface=ccinterface, + expected_components=expected_components, + expected_n_components=2, + expected_metadata=expected_metadata) + def test_tcompcor(self): ccinterface = TCompCor(num_components=6, realigned_file=self.realigned_file, percentile_threshold=0.75) @@ -155,7 +180,9 @@ def test_tcompcor_multi_mask_no_index(self): def run_cc(self, ccinterface, expected_components, - expected_header='CompCor'): + expected_header='CompCor', + expected_n_components=None, + expected_metadata=None): # run ccresult = ccinterface.run() @@ -166,10 +193,12 @@ def run_cc(self, assert os.path.getsize(expected_file) > 0 with open(ccresult.outputs.components_file, 'r') as components_file: - expected_n_components = min(ccinterface.inputs.num_components, - self.fake_data.shape[3]) + if expected_n_components is None: + expected_n_components = min(ccinterface.inputs.num_components, + self.fake_data.shape[3]) - components_data = [line.split('\t') for line in components_file] + components_data = [re.sub('\n', '', line).split('\t') + for line in components_file] # the first item will be '#', we can throw it out header = components_data.pop(0) @@ -183,9 +212,24 @@ def run_cc(self, num_got_timepoints = len(components_data) assert num_got_timepoints == self.fake_data.shape[3] for index, timepoint in enumerate(components_data): - assert (len(timepoint) == ccinterface.inputs.num_components - or len(timepoint) == self.fake_data.shape[3]) + assert (len(timepoint) == expected_n_components) assert timepoint[:2] == expected_components[index] + + if ccinterface.inputs.save_metadata: + expected_metadata_file = ( + ccinterface._list_outputs()['metadata_file']) + assert ccresult.outputs.metadata_file == expected_metadata_file + assert os.path.exists(expected_metadata_file) + assert os.path.getsize(expected_metadata_file) > 0 + + with open(ccresult.outputs.metadata_file, 'r') as metadata_file: + components_metadata = [re.sub('\n', '', line).split('\t') + for line in metadata_file] + components_metadata = {i: j for i, j in + zip(components_metadata[0], + components_metadata[1])} + assert components_metadata == expected_metadata + return ccresult @staticmethod From 66c754079b11e7b5d54d1b2c331f0f282971ba79 Mon Sep 17 00:00:00 2001 From: rciric Date: Sun, 20 Jan 2019 21:24:43 -0800 Subject: [PATCH 0071/1665] provide mask name to circumvent test failure --- nipype/algorithms/tests/test_CompCor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/algorithms/tests/test_CompCor.py b/nipype/algorithms/tests/test_CompCor.py index 5fbe641a0b..53c0959664 100644 --- a/nipype/algorithms/tests/test_CompCor.py +++ b/nipype/algorithms/tests/test_CompCor.py @@ -71,7 +71,7 @@ def test_compcor_variance_threshold_and_metadata(self): ['-0.4500578942', '0.0778209345']] expected_metadata = { 'component': 'CompCor00', - 'mask': '0', + 'mask': 'mask', 'singular_value': '4.0720553036', 'variance_explained': '0.5527211465', 'cumulative_variance_explained': '0.5527211465' @@ -80,6 +80,7 @@ def test_compcor_variance_threshold_and_metadata(self): variance_threshold=0.7, realigned_file=self.realigned_file, mask_files=self.mask_files, + mask_names=['mask'], mask_index=1, save_metadata=True) self.run_cc(ccinterface=ccinterface, From 0bb009616be85caa154301ae170806542cf388d1 Mon Sep 17 00:00:00 2001 From: rciric Date: Mon, 21 Jan 2019 00:29:56 -0800 Subject: [PATCH 0072/1665] (CompCor) try using an OrderedDict for metadata --- nipype/algorithms/confounds.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index baf96e9fd8..657a201de5 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -8,6 +8,11 @@ absolute_import) from builtins import range +# Py2 compat: http://python-future.org/compatible_idioms.html#collections-counter-and-ordereddict +from future import standard_library +standard_library.install_aliases() +from collections import OrderedDict + import os import os.path as op @@ -1230,7 +1235,7 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, Numpy array containing the requested set of noise components basis: numpy array Numpy array containing the (non-constant) filter regressors - metadata: dict(numpy array) + metadata: OrderedDict{str: numpy array} Dictionary of eigenvalues, fractional explained variances, and cumulative explained variances. """ @@ -1296,13 +1301,12 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, num_components = int(components_criterion) if components is None: components = u[:, :num_components] - metadata = { - 'mask': np.array([i] * num_components), - 'singular_value': s[:num_components], - 'variance_explained': variance_explained[:num_components], - 'cumulative_variance_explained': - cumulative_variance_explained[:num_components] - } + metadata = OrderedDict() + metadata['mask'] = np.array([i] * num_components) + metadata['singular_value'] = s[:num_components] + metadata['variance_explained'] = variance_explained[:num_components] + metadata['cumulative_variance_explained'] = ( + cumulative_variance_explained[:num_components]) else: components = np.hstack((components, u[:, :num_components])) metadata['mask'] = np.hstack((metadata['mask'], From e35cbf81de41881637df5bd421cd98b64057f76f Mon Sep 17 00:00:00 2001 From: Adam Richie-Halford Date: Wed, 12 Dec 2018 09:33:31 -0800 Subject: [PATCH 0073/1665] Add EddyQuad interface --- nipype/interfaces/fsl/__init__.py | 2 +- nipype/interfaces/fsl/epi.py | 214 ++++++++++++++++++++++++++++++ 2 files changed, 215 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/__init__.py b/nipype/interfaces/fsl/__init__.py index e8f192f4f2..c6de303307 100644 --- a/nipype/interfaces/fsl/__init__.py +++ b/nipype/interfaces/fsl/__init__.py @@ -21,7 +21,7 @@ WarpPointsFromStd, RobustFOV, CopyGeom, MotionOutliers) from .epi import (PrepareFieldmap, TOPUP, ApplyTOPUP, Eddy, EPIDeWarp, SigLoss, - EddyCorrect, EpiReg) + EddyCorrect, EpiReg, EddyQuad) from .dti import (BEDPOSTX, XFibres, DTIFit, ProbTrackX, ProbTrackX2, VecReg, ProjThresh, FindTheBiggest, DistanceMap, TractSkeleton, MakeDyadicVectors, BEDPOSTX5, XFibres5) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index a13da0e0dc..cbd2772dd5 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1232,3 +1232,217 @@ def _run_interface(self, runtime): if runtime.stderr: self.raise_exception(runtime) return runtime + + +class EddyQuadInputSpec(FSLCommandInputSpec): + base_name = traits.Str( + 'eddy_corrected', + argstr='%s', + desc="Basename (including path) specified when running EDDY", + position=0, + ) + idx_file = File( + exists=True, + mandatory=True, + argstr="--eddyIdx=%s", + desc=("File containing indices for all volumes into acquisition " + "parameters") + ) + param_file = File( + exists=True, + mandatory=True, + argstr="--eddyParams=%s", + desc="File containing acquisition parameters" + ) + mask_file = File( + exists=True, + mandatory=True, + argstr="--mask=%s", + desc="Binary mask file" + ) + bval_file = File( + exists=True, + mandatory=True, + argstr="--bvals=%s", + desc="b-values file" + ) + bvec_file = File( + exists=True, + mandatory=False, + argstr="--bvecs=%s", + desc=("b-vectors file - only used when .eddy_residuals " + "file is present") + ) + output_dir = traits.Str( + 'eddy_corrected.qc', + mandatory=False, + argstr='--output-dir=%s', + desc="Output directory - default = '.qc'", + ) + field = File( + mandatory=False, + argstr='--field=%s', + desc="TOPUP estimated field (in Hz)", + ) + slspec = File( + mandatory=False, + argstr='--slspec=%s', + desc="Text file specifying slice/group acquisition", + ) + verbose = traits.Bool( + False, + mandatory=False, + argstr='--verbose', + desc="Display debug messages", + ) + + +class EddyQuadOutputSpec(TraitedSpec): + out_qc_json = File( + exists=True, + mandatory=True, + desc=("Single subject database containing quality metrics and data " + "info.") + ) + out_qc_pdf = File( + exists=True, + mandatory=True, + desc="Single subject QC report." + ) + out_avg_b_png = traits.List( + File( + exists=True, + mandatory=True, + desc=("Image showing mid-sagittal, -coronal and -axial slices of " + "each averaged b-shell volume.") + ) + ) + out_avg_b0_png = traits.List( + File( + exists=True, + mandatory=False, + desc=("Image showing mid-sagittal, -coronal and -axial slices of " + "each averaged pe-direction b0 volume. Generated when using " + "the -f option.") + ) + ) + out_cnr_png = traits.List( + File( + exists=True, + mandatory=False, + desc=("Image showing mid-sagittal, -coronal and -axial slices of " + "each b-shell CNR volume. Generated when CNR maps are " + "available.") + ) + ) + out_vdm_png = File( + exists=True, + mandatory=False, + desc=("Image showing mid-sagittal, -coronal and -axial slices of " + "the voxel displacement map. Generated when using the -f " + "option.") + ) + out_residuals = File( + exists=True, + mandatory=False, + desc=("Text file containing the volume-wise mask-averaged squared " + "residuals. Generated when residual maps are available.") + ) + out_clean_volumes = File( + exists=True, + mandatory=False, + desc=("Text file containing a list of clean volumes, based on " + "the eddy squared residuals. To generate a version of the " + "pre-processed dataset without outlier volumes, use: " + "`fslselectvols -i -o " + "eddy_corrected_data_clean --vols=vols_no_outliers.txt`") + ) + + +class EddyQuad(FSLCommand): + """ + Interface for FSL eddy_quad, a tool for generating single subject reports + and storing the quality assessment indices for each subject. + `User guide `_ + + Examples + -------- + + >>> from nipype.interfaces.fsl import EddyQuad + >>> quad = EddyQuad() + >>> quad.inputs.in_file = 'epi.nii' + >>> quad.inputs.in_mask = 'epi_mask.nii' + >>> quad.inputs.in_index = 'epi_index.txt' + >>> quad.inputs.in_acqp = 'epi_acqp.txt' + >>> quad.inputs.in_bvec = 'bvecs.scheme' + >>> quad.inputs.in_bval = 'bvals.scheme' + >>> quad.inputs.use_cuda = True + >>> quad.cmdline # doctest: +ELLIPSIS + 'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \ +--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \ +--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected' + >>> quad.cmdline # doctest: +ELLIPSIS + 'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \ +--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \ +--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected' + >>> res = quad.run() # doctest: +SKIP + + """ + _cmd = 'eddy_quad' + input_spec = EddyQuadInputSpec + output_spec = EddyQuadOutputSpec + + def __init__(self, **inputs): + super(EddyQuad, self).__init__(**inputs) + + def _list_outputs(self): + outputs = self.output_spec().get() + out_dir = self.inputs.output_dir + outputs['out_qc_json'] = os.path.abspath( + os.path.join(self.inputs.output_dir, 'out.json') + ) + outputs['out_qc_json'] = os.path.abspath( + os.path.join(self.inputs.output_dir, 'out.json') + ) + outputs['out_parameter'] = os.path.abspath( + '%s.eddy_parameters' % self.inputs.out_base) + + # File generation might depend on the version of EDDY + out_rotated_bvecs = os.path.abspath( + '%s.eddy_rotated_bvecs' % self.inputs.out_base) + out_movement_rms = os.path.abspath( + '%s.eddy_movement_rms' % self.inputs.out_base) + out_restricted_movement_rms = os.path.abspath( + '%s.eddy_restricted_movement_rms' % self.inputs.out_base) + out_shell_alignment_parameters = os.path.abspath( + '%s.eddy_post_eddy_shell_alignment_parameters' % + self.inputs.out_base) + out_outlier_report = os.path.abspath( + '%s.eddy_outlier_report' % self.inputs.out_base) + if isdefined(self.inputs.cnr_maps) and self.inputs.cnr_maps: + out_cnr_maps = os.path.abspath( + '%s.eddy_cnr_maps.nii.gz' % self.inputs.out_base) + if os.path.exists(out_cnr_maps): + outputs['out_cnr_maps'] = out_cnr_maps + if isdefined(self.inputs.residuals) and self.inputs.residuals: + out_residuals = os.path.abspath( + '%s.eddy_residuals.nii.gz' % self.inputs.out_base) + if os.path.exists(out_residuals): + outputs['out_residuals'] = out_residuals + + if os.path.exists(out_rotated_bvecs): + outputs['out_rotated_bvecs'] = out_rotated_bvecs + if os.path.exists(out_movement_rms): + outputs['out_movement_rms'] = out_movement_rms + if os.path.exists(out_restricted_movement_rms): + outputs['out_restricted_movement_rms'] = \ + out_restricted_movement_rms + if os.path.exists(out_shell_alignment_parameters): + outputs['out_shell_alignment_parameters'] = \ + out_shell_alignment_parameters + if os.path.exists(out_outlier_report): + outputs['out_outlier_report'] = out_outlier_report + + return outputs + + From 9e4d5e897874675823066bd317b20c3988482ff1 Mon Sep 17 00:00:00 2001 From: akeshavan Date: Wed, 12 Dec 2018 10:13:10 -0800 Subject: [PATCH 0074/1665] fix: completed outputspec for EddyQuad --- nipype/interfaces/fsl/epi.py | 81 +++++++++++++++++++----------------- 1 file changed, 42 insertions(+), 39 deletions(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index cbd2772dd5..427553ab82 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1304,11 +1304,13 @@ class EddyQuadOutputSpec(TraitedSpec): desc=("Single subject database containing quality metrics and data " "info.") ) + out_qc_pdf = File( exists=True, mandatory=True, desc="Single subject QC report." ) + out_avg_b_png = traits.List( File( exists=True, @@ -1317,6 +1319,7 @@ class EddyQuadOutputSpec(TraitedSpec): "each averaged b-shell volume.") ) ) + out_avg_b0_png = traits.List( File( exists=True, @@ -1326,6 +1329,7 @@ class EddyQuadOutputSpec(TraitedSpec): "the -f option.") ) ) + out_cnr_png = traits.List( File( exists=True, @@ -1335,6 +1339,7 @@ class EddyQuadOutputSpec(TraitedSpec): "available.") ) ) + out_vdm_png = File( exists=True, mandatory=False, @@ -1342,12 +1347,14 @@ class EddyQuadOutputSpec(TraitedSpec): "the voxel displacement map. Generated when using the -f " "option.") ) + out_residuals = File( exists=True, mandatory=False, desc=("Text file containing the volume-wise mask-averaged squared " "residuals. Generated when residual maps are available.") ) + out_clean_volumes = File( exists=True, mandatory=False, @@ -1396,52 +1403,48 @@ def __init__(self, **inputs): super(EddyQuad, self).__init__(**inputs) def _list_outputs(self): + from glob import glob outputs = self.output_spec().get() out_dir = self.inputs.output_dir outputs['out_qc_json'] = os.path.abspath( - os.path.join(self.inputs.output_dir, 'out.json') + os.path.join(out_dir, 'qc.json') ) - outputs['out_qc_json'] = os.path.abspath( - os.path.join(self.inputs.output_dir, 'out.json') + outputs['out_qc_pdf'] = os.path.abspath( + os.path.join(out_dir, 'qc.pdf') ) - outputs['out_parameter'] = os.path.abspath( - '%s.eddy_parameters' % self.inputs.out_base) - # File generation might depend on the version of EDDY - out_rotated_bvecs = os.path.abspath( - '%s.eddy_rotated_bvecs' % self.inputs.out_base) - out_movement_rms = os.path.abspath( - '%s.eddy_movement_rms' % self.inputs.out_base) - out_restricted_movement_rms = os.path.abspath( - '%s.eddy_restricted_movement_rms' % self.inputs.out_base) - out_shell_alignment_parameters = os.path.abspath( - '%s.eddy_post_eddy_shell_alignment_parameters' % - self.inputs.out_base) - out_outlier_report = os.path.abspath( - '%s.eddy_outlier_report' % self.inputs.out_base) - if isdefined(self.inputs.cnr_maps) and self.inputs.cnr_maps: - out_cnr_maps = os.path.abspath( - '%s.eddy_cnr_maps.nii.gz' % self.inputs.out_base) - if os.path.exists(out_cnr_maps): - outputs['out_cnr_maps'] = out_cnr_maps - if isdefined(self.inputs.residuals) and self.inputs.residuals: - out_residuals = os.path.abspath( - '%s.eddy_residuals.nii.gz' % self.inputs.out_base) - if os.path.exists(out_residuals): - outputs['out_residuals'] = out_residuals + outputs['out_avg_b0_png'] = glob(os.path.abspath( + os.path.join(out_dir, 'avg_b0_pe*.png') + )) - if os.path.exists(out_rotated_bvecs): - outputs['out_rotated_bvecs'] = out_rotated_bvecs - if os.path.exists(out_movement_rms): - outputs['out_movement_rms'] = out_movement_rms - if os.path.exists(out_restricted_movement_rms): - outputs['out_restricted_movement_rms'] = \ - out_restricted_movement_rms - if os.path.exists(out_shell_alignment_parameters): - outputs['out_shell_alignment_parameters'] = \ - out_shell_alignment_parameters - if os.path.exists(out_outlier_report): - outputs['out_outlier_report'] = out_outlier_report + outputs['out_avg_b_png'] = [b for b in glob(os.path.abspath( + os.path.join(out_dir, 'avg_b*.png') + )) if b not in outputs['out_avg_b0_png']] + + outputs['out_cnr_png'] = glob(os.path.abspath( + os.path.join(out_dir, 'cnr*.png') + )) + + vdm = os.path.abspath( + os.path.join(out_dir, 'vdm.png') + ) + + if os.path.exists(vdm): + outputs['out_vdm_png'] = vdm + + residuals = os.path.abspath( + os.path.join(out_dir, 'eddy_msr.txt') + ) + + if os.path.exists(residuals): + outputs['out_residuals'] = residuals + + outlier_vols = os.path.abspath( + os.path.join(out_dir, 'vols_no_outliers.txt') + ) + + if os.path.exists(outlier_vols): + outputs['out_clean_volumes'] = outlier_vols return outputs From 292d5fbd4d38e4b63e93506745af6c037666a74f Mon Sep 17 00:00:00 2001 From: akeshavan Date: Wed, 12 Dec 2018 10:53:14 -0800 Subject: [PATCH 0075/1665] fix: made the outputdir be mandatory and use the default val --- nipype/interfaces/fsl/epi.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index 427553ab82..1aea937a51 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1275,7 +1275,8 @@ class EddyQuadInputSpec(FSLCommandInputSpec): ) output_dir = traits.Str( 'eddy_corrected.qc', - mandatory=False, + mandatory=True, + usedefault=True, argstr='--output-dir=%s', desc="Output directory - default = '.qc'", ) From 8bfc9fabd2135f344179ec558dbe4f26b852748f Mon Sep 17 00:00:00 2001 From: Adam Richie-Halford Date: Wed, 12 Dec 2018 14:06:16 -0800 Subject: [PATCH 0076/1665] Add doctest for EddyQuad --- nipype/interfaces/fsl/epi.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index 1aea937a51..b9e191744f 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1378,21 +1378,19 @@ class EddyQuad(FSLCommand): >>> from nipype.interfaces.fsl import EddyQuad >>> quad = EddyQuad() - >>> quad.inputs.in_file = 'epi.nii' - >>> quad.inputs.in_mask = 'epi_mask.nii' - >>> quad.inputs.in_index = 'epi_index.txt' - >>> quad.inputs.in_acqp = 'epi_acqp.txt' - >>> quad.inputs.in_bvec = 'bvecs.scheme' - >>> quad.inputs.in_bval = 'bvals.scheme' - >>> quad.inputs.use_cuda = True + >>> quad.inputs.base_name = 'eddy_corrected' + >>> quad.inputs.idx_file = 'index.txt' + >>> quad.inputs.param_file = 'encfile.txt' + >>> quad.inputs.mask_file = 'mask.nii.gz' + >>> quad.inputs.bval_file = 'dwi.bval' + >>> quad.inputs.bvec_file = 'dwi.bvec' + >>> quad.inputs.output_dir = 'eddy_corrected.qc' + >>> quad.inputs.field = 'field.nii.gz' + >>> quad.verbose = True >>> quad.cmdline # doctest: +ELLIPSIS - 'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \ ---bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \ ---mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected' - >>> quad.cmdline # doctest: +ELLIPSIS - 'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \ ---bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \ ---mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected' + 'eddy_quad eddy_corrected --eddyIdx=index.txt --eddyParams=encfile.txt \ +--mask=mask.nii.gz --bvals=dwi.bval --bvecs=dwi.bvec \ +--output-dir=eddy_corrected.qc --field=field.nii.gz --verbose' >>> res = quad.run() # doctest: +SKIP """ From 48482fea7c04f39b72be546bf25f469c33b46a6c Mon Sep 17 00:00:00 2001 From: Adam Richie-Halford Date: Wed, 12 Dec 2018 20:49:37 -0800 Subject: [PATCH 0077/1665] Add name to .zenodo.json --- .zenodo.json | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.zenodo.json b/.zenodo.json index 9fccdcc316..01ea25ecd5 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -589,7 +589,12 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" - } + }, + { + "affiliation": "University of Washington", + "name": "Richie-Halford, Adam", + "orcid": "0000-0001-9276-9084" + }, ], "keywords": [ "neuroimaging", From 4c197753d988b0a73c267470f6a5c299afe30a61 Mon Sep 17 00:00:00 2001 From: Adam Richie-Halford Date: Thu, 13 Dec 2018 10:44:26 -0800 Subject: [PATCH 0078/1665] Fix tests for EddyQuad in interfaces/fsl/epi.py --- nipype/interfaces/fsl/epi.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index b9e191744f..c4eeef6708 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1379,18 +1379,19 @@ class EddyQuad(FSLCommand): >>> from nipype.interfaces.fsl import EddyQuad >>> quad = EddyQuad() >>> quad.inputs.base_name = 'eddy_corrected' - >>> quad.inputs.idx_file = 'index.txt' - >>> quad.inputs.param_file = 'encfile.txt' - >>> quad.inputs.mask_file = 'mask.nii.gz' - >>> quad.inputs.bval_file = 'dwi.bval' - >>> quad.inputs.bvec_file = 'dwi.bvec' + >>> quad.inputs.idx_file = 'epi_index.txt' + >>> quad.inputs.param_file = 'epi_acqp.txt' + >>> quad.inputs.mask_file = 'epi_mask.nii' + >>> quad.inputs.bval_file = 'bvals.scheme' + >>> quad.inputs.bvec_file = 'bvecs.scheme' >>> quad.inputs.output_dir = 'eddy_corrected.qc' - >>> quad.inputs.field = 'field.nii.gz' - >>> quad.verbose = True + >>> quad.inputs.field = 'fieldmap_phase_fslprepared.nii' + >>> quad.inputs.verbose = True >>> quad.cmdline # doctest: +ELLIPSIS - 'eddy_quad eddy_corrected --eddyIdx=index.txt --eddyParams=encfile.txt \ ---mask=mask.nii.gz --bvals=dwi.bval --bvecs=dwi.bvec \ ---output-dir=eddy_corrected.qc --field=field.nii.gz --verbose' + 'eddy_quad eddy_corrected --bvals=bvals.scheme --bvecs=bvecs.scheme \ +--field=fieldmap_phase_fslprepared.nii --eddyIdx=epi_index.txt \ +--mask=epi_mask.nii --output-dir=eddy_corrected.qc --eddyParams=epi_acqp.txt \ +--verbose' >>> res = quad.run() # doctest: +SKIP """ From 5675276d15db26d5aa1ec76e5ba9fb94640e725c Mon Sep 17 00:00:00 2001 From: Adam Richie-Halford Date: Fri, 14 Dec 2018 14:22:04 -0800 Subject: [PATCH 0079/1665] Edit in response to @effigies comments on PR #2825 - Remove redundant `__init__` method in `EddyQuad` class. - Use `os.path.abspath()` earlier in order to remove from later statements. - Improve `EddyQuad`'s `base_name` input description. - Remove unnecessary `mandatory=False` params. - Rename `slspec` to `slice_spec`. - Use default for `EddyQuad`'s `base_name` input. - Use a name template for `EddyQuad`'s `output_dir` input. --- nipype/interfaces/fsl/epi.py | 80 +++++++++++++++--------------------- 1 file changed, 33 insertions(+), 47 deletions(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index c4eeef6708..1211fbc759 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1237,8 +1237,10 @@ def _run_interface(self, runtime): class EddyQuadInputSpec(FSLCommandInputSpec): base_name = traits.Str( 'eddy_corrected', + usedefault=True, argstr='%s', - desc="Basename (including path) specified when running EDDY", + desc=("Basename (including path) for EDDY output files, i.e., " + "corrected images and QC files"), position=0, ) idx_file = File( @@ -1268,31 +1270,26 @@ class EddyQuadInputSpec(FSLCommandInputSpec): ) bvec_file = File( exists=True, - mandatory=False, argstr="--bvecs=%s", desc=("b-vectors file - only used when .eddy_residuals " "file is present") ) output_dir = traits.Str( - 'eddy_corrected.qc', - mandatory=True, - usedefault=True, + name_template='%s.qc', + name_source=['base_name'], argstr='--output-dir=%s', desc="Output directory - default = '.qc'", ) field = File( - mandatory=False, argstr='--field=%s', desc="TOPUP estimated field (in Hz)", ) - slspec = File( - mandatory=False, + slice_spec = File( argstr='--slspec=%s', desc="Text file specifying slice/group acquisition", ) verbose = traits.Bool( False, - mandatory=False, argstr='--verbose', desc="Display debug messages", ) @@ -1321,7 +1318,7 @@ class EddyQuadOutputSpec(TraitedSpec): ) ) - out_avg_b0_png = traits.List( + out_avg_b0_pe_png = traits.List( File( exists=True, mandatory=False, @@ -1399,52 +1396,41 @@ class EddyQuad(FSLCommand): input_spec = EddyQuadInputSpec output_spec = EddyQuadOutputSpec - def __init__(self, **inputs): - super(EddyQuad, self).__init__(**inputs) - def _list_outputs(self): - from glob import glob + import json outputs = self.output_spec().get() - out_dir = self.inputs.output_dir - outputs['out_qc_json'] = os.path.abspath( - os.path.join(out_dir, 'qc.json') - ) - outputs['out_qc_pdf'] = os.path.abspath( - os.path.join(out_dir, 'qc.pdf') - ) - - outputs['out_avg_b0_png'] = glob(os.path.abspath( - os.path.join(out_dir, 'avg_b0_pe*.png') - )) - - outputs['out_avg_b_png'] = [b for b in glob(os.path.abspath( - os.path.join(out_dir, 'avg_b*.png') - )) if b not in outputs['out_avg_b0_png']] + out_dir = os.path.abspath(self.inputs.output_dir) + outputs['out_qc_json'] = os.path.join(out_dir, 'qc.json') + outputs['out_qc_pdf'] = os.path.join(out_dir, 'qc.pdf') - outputs['out_cnr_png'] = glob(os.path.abspath( - os.path.join(out_dir, 'cnr*.png') - )) + with open(outputs['out_qc_json']) as fp: + qc = json.load(fp) - vdm = os.path.abspath( - os.path.join(out_dir, 'vdm.png') - ) + outputs['out_avg_b_png'] = [ + os.path.join(out_dir, 'avg_b{bval:d}.png'.format(bval=bval)) + for bval in list(set([0] + qc.get('data_unique_bvals'))) + ] - if os.path.exists(vdm): - outputs['out_vdm_png'] = vdm + if qc.get('qc_field_flag'): + outputs['out_avg_b0_pe_png'] = [ + os.path.join(out_dir, 'avg_b0_pe{i:d}'.format(i=i)) + for i in range(qc.get('data_no_PE_dirs')) + ] - residuals = os.path.abspath( - os.path.join(out_dir, 'eddy_msr.txt') - ) + outputs['out_vdm_png'] = os.path.join(out_dir, 'vdm.png') - if os.path.exists(residuals): - outputs['out_residuals'] = residuals + if qc.get('qc_cnr_flag'): + outputs['out_cnr_png'] = [ + os.path.join(out_dir, 'cnr{i:04d}.nii.gz.png') + for i, _ in enumerate(qc.get('qc_cnr_avg')) + ] - outlier_vols = os.path.abspath( - os.path.join(out_dir, 'vols_no_outliers.txt') - ) + if qc.get('qc_rss_flag'): + outputs['out_residuals'] = os.path.join(out_dir, 'eddy_msr.txt') - if os.path.exists(outlier_vols): - outputs['out_clean_volumes'] = outlier_vols + if qc.get('qc_ol_flag'): + outputs['out_clean_volumes'] = os.path.join(out_dir, + 'vols_no_outliers.txt') return outputs From 4a41f5ecedd4ad33722f4eaf84a2d4529efda877 Mon Sep 17 00:00:00 2001 From: Adam Richie-Halford Date: Fri, 18 Jan 2019 13:56:58 -0800 Subject: [PATCH 0080/1665] Add glob stuff back in --- .zenodo.json | 2 +- nipype/interfaces/fsl/epi.py | 81 ++++++++++++++++-------------------- 2 files changed, 37 insertions(+), 46 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 01ea25ecd5..c70775cf39 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -594,7 +594,7 @@ "affiliation": "University of Washington", "name": "Richie-Halford, Adam", "orcid": "0000-0001-9276-9084" - }, + } ], "keywords": [ "neuroimaging", diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index 1211fbc759..c69cbc87ce 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1298,49 +1298,37 @@ class EddyQuadInputSpec(FSLCommandInputSpec): class EddyQuadOutputSpec(TraitedSpec): out_qc_json = File( exists=True, - mandatory=True, desc=("Single subject database containing quality metrics and data " "info.") ) out_qc_pdf = File( exists=True, - mandatory=True, desc="Single subject QC report." ) out_avg_b_png = traits.List( - File( - exists=True, - mandatory=True, - desc=("Image showing mid-sagittal, -coronal and -axial slices of " - "each averaged b-shell volume.") - ) + File(exists=True), + desc=("Image showing mid-sagittal, -coronal and -axial slices of " + "each averaged b-shell volume.") ) out_avg_b0_pe_png = traits.List( - File( - exists=True, - mandatory=False, - desc=("Image showing mid-sagittal, -coronal and -axial slices of " - "each averaged pe-direction b0 volume. Generated when using " - "the -f option.") - ) + File(exists=True), + desc=("Image showing mid-sagittal, -coronal and -axial slices of " + "each averaged pe-direction b0 volume. Generated when using " + "the -f option.") ) out_cnr_png = traits.List( - File( - exists=True, - mandatory=False, - desc=("Image showing mid-sagittal, -coronal and -axial slices of " - "each b-shell CNR volume. Generated when CNR maps are " - "available.") - ) + File(exists=True), + desc=("Image showing mid-sagittal, -coronal and -axial slices of " + "each b-shell CNR volume. Generated when CNR maps are " + "available.") ) out_vdm_png = File( exists=True, - mandatory=False, desc=("Image showing mid-sagittal, -coronal and -axial slices of " "the voxel displacement map. Generated when using the -f " "option.") @@ -1348,14 +1336,12 @@ class EddyQuadOutputSpec(TraitedSpec): out_residuals = File( exists=True, - mandatory=False, desc=("Text file containing the volume-wise mask-averaged squared " "residuals. Generated when residual maps are available.") ) out_clean_volumes = File( exists=True, - mandatory=False, desc=("Text file containing a list of clean volumes, based on " "the eddy squared residuals. To generate a version of the " "pre-processed dataset without outlier volumes, use: " @@ -1397,41 +1383,46 @@ class EddyQuad(FSLCommand): output_spec = EddyQuadOutputSpec def _list_outputs(self): - import json + from glob import glob outputs = self.output_spec().get() out_dir = os.path.abspath(self.inputs.output_dir) outputs['out_qc_json'] = os.path.join(out_dir, 'qc.json') outputs['out_qc_pdf'] = os.path.join(out_dir, 'qc.pdf') - with open(outputs['out_qc_json']) as fp: - qc = json.load(fp) - outputs['out_avg_b_png'] = [ os.path.join(out_dir, 'avg_b{bval:d}.png'.format(bval=bval)) for bval in list(set([0] + qc.get('data_unique_bvals'))) ] - if qc.get('qc_field_flag'): - outputs['out_avg_b0_pe_png'] = [ - os.path.join(out_dir, 'avg_b0_pe{i:d}'.format(i=i)) - for i in range(qc.get('data_no_PE_dirs')) - ] + # Grab all b* files here. This will also grab the b0_pe* files + # as well, but only if the field input was provided. So we'll remove + # them later in the next conditional. + outputs['out_avg_b_png'] = sorted(glob( + os.path.join(out_dir, 'avg_b*.png') + )) + + if isdefined(self.inputs.field): + outputs['out_avg_b0_pe_png'] = sorted(glob( + os.path.join(out_dir, 'avg_b0_pe*.png') + )) + + # The previous glob for `out_avg_b_png` also grabbed the + # `out_avg_b0_pe_png` files so we have to remove them + # from `out_avg_b_png`. + for fname in outputs['out_avg_b0_pe_png']: + outputs['out_avg_b_png'].remove(fname) outputs['out_vdm_png'] = os.path.join(out_dir, 'vdm.png') - if qc.get('qc_cnr_flag'): - outputs['out_cnr_png'] = [ - os.path.join(out_dir, 'cnr{i:04d}.nii.gz.png') - for i, _ in enumerate(qc.get('qc_cnr_avg')) - ] + outputs['out_cnr_png'] = sorted(glob(os.path.join(out_dir, 'cnr*.png'))) - if qc.get('qc_rss_flag'): - outputs['out_residuals'] = os.path.join(out_dir, 'eddy_msr.txt') + residuals = os.path.join(out_dir, 'eddy_msr.txt') + if os.path.isfile(residuals): + outputs['out_residuals'] = residuals - if qc.get('qc_ol_flag'): - outputs['out_clean_volumes'] = os.path.join(out_dir, - 'vols_no_outliers.txt') + clean_volumes = os.path.join(out_dir, 'vols_no_outliers.txt') + if os.path.isfile(clean_volumes): + outputs['out_clean_volumes'] = clean_volumes return outputs - From d01bf40d53ef158e040984c259c75464175af703 Mon Sep 17 00:00:00 2001 From: Adam Richie-Halford Date: Fri, 18 Jan 2019 14:03:48 -0800 Subject: [PATCH 0081/1665] Remove redundant out_avg_b_png lines --- nipype/interfaces/fsl/epi.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index c69cbc87ce..ff3bda2479 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1389,11 +1389,6 @@ def _list_outputs(self): outputs['out_qc_json'] = os.path.join(out_dir, 'qc.json') outputs['out_qc_pdf'] = os.path.join(out_dir, 'qc.pdf') - outputs['out_avg_b_png'] = [ - os.path.join(out_dir, 'avg_b{bval:d}.png'.format(bval=bval)) - for bval in list(set([0] + qc.get('data_unique_bvals'))) - ] - # Grab all b* files here. This will also grab the b0_pe* files # as well, but only if the field input was provided. So we'll remove # them later in the next conditional. From f6b49f5898b91068c46527887d20c0c9494b7261 Mon Sep 17 00:00:00 2001 From: Adam Richie-Halford Date: Fri, 18 Jan 2019 14:49:56 -0800 Subject: [PATCH 0082/1665] Add output_dir check to EddyQuad._list_outputs() --- nipype/interfaces/fsl/epi.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index ff3bda2479..651af15cf6 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1385,7 +1385,14 @@ class EddyQuad(FSLCommand): def _list_outputs(self): from glob import glob outputs = self.output_spec().get() - out_dir = os.path.abspath(self.inputs.output_dir) + + # If the output directory isn't defined, the interface seems to use + # the default but not set its value in `self.inputs.output_dir` + if not isdefined(self.inputs.output_dir): + out_dir = os.path.abspath(self.inputs.base_name + '.qc.nii.gz') + else: + out_dir = os.path.abspath(self.inputs.output_dir) + outputs['out_qc_json'] = os.path.join(out_dir, 'qc.json') outputs['out_qc_pdf'] = os.path.join(out_dir, 'qc.pdf') From 14027c59cb5f4e21e17bacafa1459271a16c1cbe Mon Sep 17 00:00:00 2001 From: Adam Richie-Halford Date: Fri, 18 Jan 2019 14:57:06 -0800 Subject: [PATCH 0083/1665] Use os.path.basename for the fallback output_dir in EddyQuad._list_outputs() --- nipype/interfaces/fsl/epi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index 651af15cf6..67a5ca86a2 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1389,7 +1389,7 @@ def _list_outputs(self): # If the output directory isn't defined, the interface seems to use # the default but not set its value in `self.inputs.output_dir` if not isdefined(self.inputs.output_dir): - out_dir = os.path.abspath(self.inputs.base_name + '.qc.nii.gz') + out_dir = os.path.abspath(os.path.basename(self.inputs.base_name) + '.qc.nii.gz') else: out_dir = os.path.abspath(self.inputs.output_dir) From 51b7f45533fdf890ab6efeb6b1e05e6963a92ea1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 20 Jan 2019 21:50:47 -0800 Subject: [PATCH 0084/1665] Apply minor edits from code review Co-Authored-By: richford --- nipype/interfaces/fsl/epi.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index 67a5ca86a2..127a5519f9 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1281,15 +1281,16 @@ class EddyQuadInputSpec(FSLCommandInputSpec): desc="Output directory - default = '.qc'", ) field = File( + exists=True, argstr='--field=%s', desc="TOPUP estimated field (in Hz)", ) slice_spec = File( + exists=True, argstr='--slspec=%s', desc="Text file specifying slice/group acquisition", ) verbose = traits.Bool( - False, argstr='--verbose', desc="Display debug messages", ) @@ -1301,45 +1302,38 @@ class EddyQuadOutputSpec(TraitedSpec): desc=("Single subject database containing quality metrics and data " "info.") ) - out_qc_pdf = File( exists=True, desc="Single subject QC report." ) - out_avg_b_png = traits.List( File(exists=True), desc=("Image showing mid-sagittal, -coronal and -axial slices of " "each averaged b-shell volume.") ) - out_avg_b0_pe_png = traits.List( File(exists=True), desc=("Image showing mid-sagittal, -coronal and -axial slices of " "each averaged pe-direction b0 volume. Generated when using " "the -f option.") ) - out_cnr_png = traits.List( File(exists=True), desc=("Image showing mid-sagittal, -coronal and -axial slices of " "each b-shell CNR volume. Generated when CNR maps are " "available.") ) - out_vdm_png = File( exists=True, desc=("Image showing mid-sagittal, -coronal and -axial slices of " "the voxel displacement map. Generated when using the -f " "option.") ) - out_residuals = File( exists=True, desc=("Text file containing the volume-wise mask-averaged squared " "residuals. Generated when residual maps are available.") ) - out_clean_volumes = File( exists=True, desc=("Text file containing a list of clean volumes, based on " @@ -1370,7 +1364,7 @@ class EddyQuad(FSLCommand): >>> quad.inputs.output_dir = 'eddy_corrected.qc' >>> quad.inputs.field = 'fieldmap_phase_fslprepared.nii' >>> quad.inputs.verbose = True - >>> quad.cmdline # doctest: +ELLIPSIS + >>> quad.cmdline 'eddy_quad eddy_corrected --bvals=bvals.scheme --bvecs=bvecs.scheme \ --field=fieldmap_phase_fslprepared.nii --eddyIdx=epi_index.txt \ --mask=epi_mask.nii --output-dir=eddy_corrected.qc --eddyParams=epi_acqp.txt \ From 41d82ad49065ec1059fbd32533762e665c741940 Mon Sep 17 00:00:00 2001 From: Adam Richie-Halford Date: Mon, 21 Jan 2019 06:58:38 -0800 Subject: [PATCH 0085/1665] Remove out_ prefix from EddyQuad outputs --- nipype/interfaces/fsl/epi.py | 42 ++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index 127a5519f9..3e47576ec7 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1297,44 +1297,44 @@ class EddyQuadInputSpec(FSLCommandInputSpec): class EddyQuadOutputSpec(TraitedSpec): - out_qc_json = File( + qc_json = File( exists=True, desc=("Single subject database containing quality metrics and data " "info.") ) - out_qc_pdf = File( + qc_pdf = File( exists=True, desc="Single subject QC report." ) - out_avg_b_png = traits.List( + avg_b_png = traits.List( File(exists=True), desc=("Image showing mid-sagittal, -coronal and -axial slices of " "each averaged b-shell volume.") ) - out_avg_b0_pe_png = traits.List( + avg_b0_pe_png = traits.List( File(exists=True), desc=("Image showing mid-sagittal, -coronal and -axial slices of " "each averaged pe-direction b0 volume. Generated when using " "the -f option.") ) - out_cnr_png = traits.List( + cnr_png = traits.List( File(exists=True), desc=("Image showing mid-sagittal, -coronal and -axial slices of " "each b-shell CNR volume. Generated when CNR maps are " "available.") ) - out_vdm_png = File( + vdm_png = File( exists=True, desc=("Image showing mid-sagittal, -coronal and -axial slices of " "the voxel displacement map. Generated when using the -f " "option.") ) - out_residuals = File( + residuals = File( exists=True, desc=("Text file containing the volume-wise mask-averaged squared " "residuals. Generated when residual maps are available.") ) - out_clean_volumes = File( + clean_volumes = File( exists=True, desc=("Text file containing a list of clean volumes, based on " "the eddy squared residuals. To generate a version of the " @@ -1387,38 +1387,38 @@ def _list_outputs(self): else: out_dir = os.path.abspath(self.inputs.output_dir) - outputs['out_qc_json'] = os.path.join(out_dir, 'qc.json') - outputs['out_qc_pdf'] = os.path.join(out_dir, 'qc.pdf') + outputs['qc_json'] = os.path.join(out_dir, 'qc.json') + outputs['qc_pdf'] = os.path.join(out_dir, 'qc.pdf') # Grab all b* files here. This will also grab the b0_pe* files # as well, but only if the field input was provided. So we'll remove # them later in the next conditional. - outputs['out_avg_b_png'] = sorted(glob( + outputs['avg_b_png'] = sorted(glob( os.path.join(out_dir, 'avg_b*.png') )) if isdefined(self.inputs.field): - outputs['out_avg_b0_pe_png'] = sorted(glob( + outputs['avg_b0_pe_png'] = sorted(glob( os.path.join(out_dir, 'avg_b0_pe*.png') )) - # The previous glob for `out_avg_b_png` also grabbed the - # `out_avg_b0_pe_png` files so we have to remove them - # from `out_avg_b_png`. - for fname in outputs['out_avg_b0_pe_png']: - outputs['out_avg_b_png'].remove(fname) + # The previous glob for `avg_b_png` also grabbed the + # `avg_b0_pe_png` files so we have to remove them + # from `avg_b_png`. + for fname in outputs['avg_b0_pe_png']: + outputs['avg_b_png'].remove(fname) - outputs['out_vdm_png'] = os.path.join(out_dir, 'vdm.png') + outputs['vdm_png'] = os.path.join(out_dir, 'vdm.png') - outputs['out_cnr_png'] = sorted(glob(os.path.join(out_dir, 'cnr*.png'))) + outputs['cnr_png'] = sorted(glob(os.path.join(out_dir, 'cnr*.png'))) residuals = os.path.join(out_dir, 'eddy_msr.txt') if os.path.isfile(residuals): - outputs['out_residuals'] = residuals + outputs['residuals'] = residuals clean_volumes = os.path.join(out_dir, 'vols_no_outliers.txt') if os.path.isfile(clean_volumes): - outputs['out_clean_volumes'] = clean_volumes + outputs['clean_volumes'] = clean_volumes return outputs From 562dab0f90a110c5a17a72a40660f4c505c0669a Mon Sep 17 00:00:00 2001 From: hstojic Date: Mon, 21 Jan 2019 17:47:45 +0000 Subject: [PATCH 0086/1665] added flags option to the Level1Design function in SPM interface --- nipype/interfaces/spm/model.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/spm/model.py b/nipype/interfaces/spm/model.py index 5293346dbb..81cdeb8116 100644 --- a/nipype/interfaces/spm/model.py +++ b/nipype/interfaces/spm/model.py @@ -106,6 +106,7 @@ class Level1DesignInputSpec(SPMCommandInputSpec): desc=('Model serial correlations ' 'AR(1), FAST or none. FAST ' 'is available in SPM12')) + flags = traits.Dict(desc='Additional arguments to the job, e.g. a common SPm operation is to modify the default masking threshold (mthresh)') class Level1DesignOutputSpec(TraitedSpec): @@ -125,6 +126,7 @@ class Level1Design(SPMCommand): >>> level1design.inputs.interscan_interval = 2.5 >>> level1design.inputs.bases = {'hrf':{'derivs': [0,0]}} >>> level1design.inputs.session_info = 'session_info.npz' + >>> level1design.inputs.flags = {'mthresh': 0.4} >>> level1design.run() # doctest: +SKIP """ @@ -151,7 +153,11 @@ def _parse_inputs(self): """validate spm realign options if set to None ignore """ einputs = super(Level1Design, - self)._parse_inputs(skip=('mask_threshold')) + self)._parse_inputs(skip=('mask_threshold', 'flags')) + if isdefined(self.inputs.flags): + einputs[0].update( + {flag: val + for (flag, val) in self.inputs.flags.items()}) for sessinfo in einputs[0]['sess']: sessinfo['scans'] = scans_for_fnames( ensure_list(sessinfo['scans']), keep4d=False) From bd1ab8a106bd0dd0905c941ac369409ac5ce717e Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 22 Jan 2019 09:44:41 -0500 Subject: [PATCH 0087/1665] STY: Whitespace, line length --- nipype/interfaces/io.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 923cebdc5a..6083dfb25e 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -835,16 +835,18 @@ class S3DataGrabber(LibraryBaseInterface, IOBase): >>> s3grab = S3DataGrabber(infields=['subj_id'], outfields=["func", "anat"]) >>> s3grab.inputs.bucket = 'openneuro' >>> s3grab.inputs.sort_filelist = True - >>> s3grab.inputs.template = '*' + >>> s3grab.inputs.template = '*' >>> s3grab.inputs.anon = True - >>> s3grab.inputs.bucket_path = 'ds000101/ds000101_R2.0.0/uncompressed/' - >>> s3grab.inputs.local_directory = '/tmp' - >>> s3grab.inputs.field_template = {'anat': '%s/anat/%s_T1w.nii.gz', 'func': '%s/func/%s_task-simon_run-1_bold.nii.gz'} - >>> s3grab.inputs.template_args = {'anat': [['subj_id', 'subj_id']], 'func': [['subj_id', 'subj_id']]} + >>> s3grab.inputs.bucket_path = 'ds000101/ds000101_R2.0.0/uncompressed/' + >>> s3grab.inputs.local_directory = '/tmp' + >>> s3grab.inputs.field_template = {'anat': '%s/anat/%s_T1w.nii.gz', + ... 'func': '%s/func/%s_task-simon_run-1_bold.nii.gz'} + >>> s3grab.inputs.template_args = {'anat': [['subj_id', 'subj_id']], + ... 'func': [['subj_id', 'subj_id']]} >>> s3grab.inputs.subj_id = 'sub-01' >>> s3grab.run() # doctest: +SKIP """ - + input_spec = S3DataGrabberInputSpec output_spec = DynamicTraitedSpec _always_run = True From b75bf7cc13301b7732565cc3a430e28098b69170 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 22 Jan 2019 10:44:53 -0600 Subject: [PATCH 0088/1665] Update nipype/info.py Co-Authored-By: adelavega --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 9d4b04efd3..3350e93ef6 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -176,7 +176,7 @@ def get_nipype_gitversion(): 'profiler': ['psutil>=5.0'], 'duecredit': ['duecredit'], 'xvfbwrapper': ['xvfbwrapper'], - 'pybids': ['pybids==0.7.0'], + 'pybids': ['pybids>=0.7.0'], 'ssh': ['paramiko'], # 'mesh': ['mayavi'] # Enable when it works } From 7105cb541d4491cd79137a6ab947035bf78d312d Mon Sep 17 00:00:00 2001 From: delavega4 Date: Tue, 22 Jan 2019 10:47:02 -0600 Subject: [PATCH 0089/1665] Remove return type named tuple --- nipype/interfaces/io.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index f90afdac05..e085dad2cd 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2732,8 +2732,6 @@ class BIDSDataGrabberInputSpec(DynamicTraitedSpec): raise_on_empty = traits.Bool( True, usedefault=True, desc='Generate exception if list is empty for a given field') - return_type = traits.Enum( - 'file', 'namedtuple', usedefault=True) index_derivatives = traits.Bool( False, usedefault=True, desc='Index derivatives/ sub-directory') @@ -2831,7 +2829,7 @@ def _list_outputs(self): for key, query in self.inputs.output_query.items(): args = query.copy() args.update(filters) - filelist = layout.get(return_type=self.inputs.return_type, **args) + filelist = layout.get(return_type='file', **args) if len(filelist) == 0: msg = 'Output key: %s returned no files' % key if self.inputs.raise_on_empty: From 75a5a2be76a229614f7e2de6d87c2c141b8fdfb4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 22 Jan 2019 10:47:52 -0600 Subject: [PATCH 0090/1665] Update nipype/interfaces/io.py Mandatory index_derivatives Co-Authored-By: adelavega --- nipype/interfaces/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index f90afdac05..85d3c9e35c 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2735,7 +2735,7 @@ class BIDSDataGrabberInputSpec(DynamicTraitedSpec): return_type = traits.Enum( 'file', 'namedtuple', usedefault=True) index_derivatives = traits.Bool( - False, usedefault=True, + False, mandatory=True, usedefault=True, desc='Index derivatives/ sub-directory') extra_derivatives = traits.List( Directory(exists=True), From b623cddcb2c0cfe6cf18f49bade97def88432070 Mon Sep 17 00:00:00 2001 From: delavega4 Date: Tue, 22 Jan 2019 12:47:37 -0600 Subject: [PATCH 0091/1665] remake specs --- nipype/interfaces/tests/test_auto_BIDSDataGrabber.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py index 8acdcc9787..24cbda657b 100644 --- a/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py +++ b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py @@ -6,10 +6,13 @@ def test_BIDSDataGrabber_inputs(): input_map = dict( base_dir=dict(mandatory=True, ), + extra_derivatives=dict(), + index_derivatives=dict( + mandatory=True, + usedefault=True, + ), output_query=dict(), raise_on_empty=dict(usedefault=True, ), - return_type=dict(usedefault=True, ), - strict=dict(), ) inputs = BIDSDataGrabber.input_spec() From aeda06a840994bd39e6fdf614a73fdb4bec5cb09 Mon Sep 17 00:00:00 2001 From: delavega4 Date: Tue, 22 Jan 2019 15:58:06 -0600 Subject: [PATCH 0092/1665] Add FSL auto test --- .../fsl/tests/test_auto_EddyQuad.py | 65 +++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 nipype/interfaces/fsl/tests/test_auto_EddyQuad.py diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py new file mode 100644 index 0000000000..4302cdc312 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py @@ -0,0 +1,65 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..epi import EddyQuad + + +def test_EddyQuad_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + base_name=dict( + argstr='%s', + position=0, + usedefault=True, + ), + bval_file=dict( + argstr='--bvals=%s', + mandatory=True, + ), + bvec_file=dict(argstr='--bvecs=%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + field=dict(argstr='--field=%s', ), + idx_file=dict( + argstr='--eddyIdx=%s', + mandatory=True, + ), + mask_file=dict( + argstr='--mask=%s', + mandatory=True, + ), + output_dir=dict( + argstr='--output-dir=%s', + name_source=['base_name'], + name_template='%s.qc', + ), + output_type=dict(), + param_file=dict( + argstr='--eddyParams=%s', + mandatory=True, + ), + slice_spec=dict(argstr='--slspec=%s', ), + verbose=dict(argstr='--verbose', ), + ) + inputs = EddyQuad.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EddyQuad_outputs(): + output_map = dict( + avg_b0_pe_png=dict(), + avg_b_png=dict(), + clean_volumes=dict(), + cnr_png=dict(), + qc_json=dict(), + qc_pdf=dict(), + residuals=dict(), + vdm_png=dict(), + ) + outputs = EddyQuad.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value From 79cc144f971a4a984f34f17a80015e408b0a350c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 22 Jan 2019 17:09:50 -0500 Subject: [PATCH 0093/1665] MNT: Install numpy!=1.16.0 from conda in Docker --- docker/generate_dockerfiles.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/generate_dockerfiles.sh b/docker/generate_dockerfiles.sh index cbe26d05a8..0fb3c08b87 100755 --- a/docker/generate_dockerfiles.sh +++ b/docker/generate_dockerfiles.sh @@ -90,7 +90,7 @@ function generate_main_dockerfile() { --user neuro \ --miniconda create_env=neuro \ conda_install='python=${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR} - icu=58.1 libxml2 libxslt matplotlib mkl numpy paramiko + icu=58.1 libxml2 libxslt matplotlib mkl "numpy!=1.16.0" paramiko pandas psutil scikit-learn scipy traits=4.6.0' \ pip_install="pytest-xdist" \ activate=true \ From 1b4625f47a216f5d01bad3d222980860f5591c71 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 23 Jan 2019 17:32:17 +0100 Subject: [PATCH 0094/1665] Update nipype/interfaces/dipy/reconstruction.py Co-Authored-By: skoudoro --- nipype/interfaces/dipy/reconstruction.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/dipy/reconstruction.py b/nipype/interfaces/dipy/reconstruction.py index a254a5da62..388071cdc3 100644 --- a/nipype/interfaces/dipy/reconstruction.py +++ b/nipype/interfaces/dipy/reconstruction.py @@ -35,7 +35,7 @@ CSDModel = dipy_to_nipype_interface("CSDModel", ReconstCSDFlow) else: IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" - " activate DKIModel, MapmriModel, DTIModel, CSAModel.") + " activate DKIModel, MapmriModel, DTIModel, CSAModel, CSDModel.") class RESTOREInputSpec(DipyBaseInterfaceInputSpec): From ebe49d038d2b5c52fce449cc2cb3181bf600a9ea Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 23 Jan 2019 17:32:33 +0100 Subject: [PATCH 0095/1665] Update nipype/interfaces/dipy/tracks.py Co-Authored-By: skoudoro --- nipype/interfaces/dipy/tracks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/dipy/tracks.py b/nipype/interfaces/dipy/tracks.py index 1a9a8a1582..3643654001 100644 --- a/nipype/interfaces/dipy/tracks.py +++ b/nipype/interfaces/dipy/tracks.py @@ -30,7 +30,7 @@ else: IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" - " activate RecoBundles, LabelsBundles, DetTrackPAMFlow.") + " activate RecoBundles, LabelsBundles, DeterministicTracking.") class TrackDensityMapInputSpec(BaseInterfaceInputSpec): From 6e18a80ce51186ef73f0c4441d75241b99948ef6 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 24 Jan 2019 21:59:30 -0500 Subject: [PATCH 0096/1665] DOC: 1.1.8 changelog --- doc/changelog/1.X.X-changelog | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index 5dfb3f3cb7..bfa8c51298 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -1,6 +1,30 @@ +1.1.8 (January 28, 2019) +======================== + +##### [Full changelog](https://github.com/nipy/nipype/milestone/29?closed=1) + + * FIX: ANTS LaplacianThickness cmdline opts fixed up (https://github.com/nipy/nipype/pull/2846) + * FIX: Resolve LinAlgError during SVD (https://github.com/nipy/nipype/pull/2838) + * ENH: Add interfaces wrapping DIPY worflows (https://github.com/nipy/nipype/pull/2830) + * ENH: Update BIDSDataGrabber for pybids 0.7 (https://github.com/nipy/nipype/pull/2737) + * ENH: Add FSL `eddy_quad` interface (https://github.com/nipy/nipype/pull/2825) + * ENH: Support tckgen -select in MRtrix3 v3+ (https://github.com/nipy/nipype/pull/2823) + * ENH: Support for BIDS event files (https://github.com/nipy/nipype/pull/2845) + * ENH: CompositeTransformUtil, new ANTs interface (https://github.com/nipy/nipype/pull/2785) + * RF: Move pytest and pytest-xdist from general requirement into tests_required (https://github.com/nipy/nipype/pull/2850) + * DOC: Add S3DataGrabber example (https://github.com/nipy/nipype/pull/2849) + * DOC: Skip conftest module in API generation (https://github.com/nipy/nipype/pull/2852) + * DOC: Hyperlink DOIs to preferred resolver (https://github.com/nipy/nipype/pull/2833) + * MAINT: Install numpy!=1.16.0 from conda in Docker (https://github.com/nipy/nipype/pull/2862) + * MAINT: Drop pytest-xdist requirement, minimum pytest version (https://github.com/nipy/nipype/pull/2856) + * MAINT: Disable numpy 1.16.0 for Py2.7 (https://github.com/nipy/nipype/pull/2855) + + 1.1.7 (December 17, 2018) ========================= +##### [Full changelog](https://github.com/nipy/nipype/milestone/28?closed=1) + * FIX: Copy node list before generating a flat graph (https://github.com/nipy/nipype/pull/2828) * FIX: Update pytest req'd version to 3.6 (https://github.com/nipy/nipype/pull/2827) * FIX: Set ResourceMonitor.fname to an absolute path (https://github.com/nipy/nipype/pull/2824) From ef077f98554ed9ad6b01f26ff3836a16f43039c3 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 24 Jan 2019 22:02:17 -0500 Subject: [PATCH 0097/1665] MNT: Version 1.1.8 --- doc/conf.py | 4 ++-- nipype/info.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index b8c8e5ac3e..47b229ac0c 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -73,7 +73,7 @@ # General information about the project. project = u'nipype' -copyright = u'2009-18, Neuroimaging in Python team' +copyright = u'2009-19, Neuroimaging in Python team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -82,7 +82,7 @@ # The short X.Y version. version = nipype.__version__ # The full version, including alpha/beta/rc tags. -release = "1.1.7" +release = "1.1.8" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/nipype/info.py b/nipype/info.py index 9251de88a7..708b3a009d 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -11,7 +11,7 @@ # full release. '.dev' as a version_extra string means this is a development # version # Remove -dev for release -__version__ = '1.1.8-dev' +__version__ = '1.1.8' def get_nipype_gitversion(): From 4aed1e24c564a80f0d8fb0956c682785b4dd1344 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 24 Jan 2019 22:09:35 -0500 Subject: [PATCH 0098/1665] MNT: Update .zenodo ordering --- .zenodo.json | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index c70775cf39..a73a844ad1 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -110,16 +110,16 @@ "affiliation": "National Institutes of Health", "name": "Clark, Michael G. " }, - { - "affiliation": "Dartmouth College", - "name": "Visconti di Oleggio Castello, Matteo", - "orcid": "0000-0001-7931-5272" - }, { "affiliation": "Mayo Clinic, Neurology, Rochester, MN, USA", "name": "Dayan, Michael", "orcid": "0000-0002-2666-0969" }, + { + "affiliation": "Dartmouth College", + "name": "Visconti di Oleggio Castello, Matteo", + "orcid": "0000-0001-7931-5272" + }, { "affiliation": "UC Berkeley", "name": "Clark, Dav", @@ -142,15 +142,20 @@ "name": "Berleant, Shoshana" }, { - "affiliation": "UC San Diego", - "name": "Cipollini, Ben", - "orcid": "0000-0002-7782-0790" + "affiliation": "Institute for Biomedical Engineering, ETH and University of Zurich", + "name": "Horea, Christian", + "orcid": "0000-0001-7037-2449" }, { "affiliation": "Dartmouth College: Hanover, NH, United States", "name": "Halchenko, Yaroslav O.", "orcid": "0000-0003-3456-2493" }, + { + "affiliation": "UC San Diego", + "name": "Cipollini, Ben", + "orcid": "0000-0002-7782-0790" + }, { "affiliation": "Montreal Neurological Institute and Hospital", "name": "Markello, Ross", @@ -179,11 +184,6 @@ "name": "Varoquaux, Gael", "orcid": "0000-0003-1076-5122" }, - { - "affiliation": "Institute for Biomedical Engineering, ETH and University of Zurich", - "name": "Horea, Christian", - "orcid": "0000-0001-7037-2449" - }, { "affiliation": "Athena EPI, Inria Sophia-Antipolis", "name": "Wassermann , Demian", @@ -235,6 +235,11 @@ "name": "Iqbal, Shariq", "orcid": "0000-0003-2766-8425" }, + { + "affiliation": "University of Washington", + "name": "Richie-Halford, Adam", + "orcid": "0000-0001-9276-9084" + }, { "name": "Schwartz, Yannick" }, @@ -380,6 +385,11 @@ { "name": "Hallquist, Michael" }, + { + "affiliation": "University of Texas at Austin", + "name": "De La Vega, Alejandro", + "orcid": "0000-0001-9062-3778" + }, { "affiliation": "Donders Institute for Brain, Cognition and Behavior, Center for Cognitive Neuroimaging", "name": "Chetverikov, Andrey", @@ -461,11 +471,6 @@ "name": "Geisler, Daniel", "orcid": "0000-0003-2076-5329" }, - { - "affiliation": "University of Texas at Austin", - "name": "De La Vega, Alejandro", - "orcid": "0000-0001-9062-3778" - }, { "affiliation": "University of illinois urbana champaign", "name": "Sharp, Paul" @@ -589,11 +594,6 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" - }, - { - "affiliation": "University of Washington", - "name": "Richie-Halford, Adam", - "orcid": "0000-0001-9276-9084" } ], "keywords": [ From d633a57ce001452e420341616a608563911de7bd Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Thu, 24 Jan 2019 22:44:43 -0500 Subject: [PATCH 0099/1665] BF: provide chain of requires for LaplacianThickness Thanks @effigies. This should prevent incorrect specification` of parameters in the command line since for ants they are just positional ones, so all previous ones should be specified --- nipype/interfaces/ants/segmentation.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index 94e8c59c79..adaf765527 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -214,19 +214,23 @@ class LaplacianThicknessInputSpec(ANTSCommandInputSpec): prior_thickness = traits.Float( argstr='%s', desc='Prior thickness (defaults to 500)', + requires=['smooth_param'], position=5) dT = traits.Float( argstr='%s', desc='Time delta used during integration (defaults to 0.01)', + requires=['prior_thickness'], position=6) sulcus_prior = traits.Float( argstr='%s', desc='Positive floating point number for sulcus prior. ' 'Authors said that 0.15 might be a reasonable value', + requires=['dT'], position=7) tolerance = traits.Float( argstr='%s', desc='Tolerance to reach during optimization (defaults to 0.001)', + requires=['sulcus_prior'], position=8) From 81670e4f540c29965e901856116fffcf9504c25e Mon Sep 17 00:00:00 2001 From: Horea Christian Date: Sat, 26 Jan 2019 06:57:38 +0100 Subject: [PATCH 0100/1665] BF: allowing bids_event_file as alternate input --- nipype/algorithms/modelgen.py | 4 ++-- nipype/algorithms/tests/test_auto_SpecifyModel.py | 4 ++-- nipype/algorithms/tests/test_auto_SpecifySPMModel.py | 4 ++-- nipype/algorithms/tests/test_auto_SpecifySparseModel.py | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index ddb8539c49..9a0984c885 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -233,7 +233,7 @@ class SpecifyModelInputSpec(BaseInterfaceInputSpec): subject_info = InputMultiPath( Bunch, mandatory=True, - xor=['subject_info', 'event_files'], + xor=['subject_info', 'event_files', 'bids_event_file'], desc='Bunch or List(Bunch) subject-specific ' 'condition information. see ' ':ref:`SpecifyModel` or ' @@ -241,7 +241,7 @@ class SpecifyModelInputSpec(BaseInterfaceInputSpec): event_files = InputMultiPath( traits.List(File(exists=True)), mandatory=True, - xor=['subject_info', 'event_files'], + xor=['subject_info', 'event_files', 'bids_event_file'], desc='List of event description files 1, 2 or 3 ' 'column format corresponding to onsets, ' 'durations and amplitudes') diff --git a/nipype/algorithms/tests/test_auto_SpecifyModel.py b/nipype/algorithms/tests/test_auto_SpecifyModel.py index 3dfc22a82f..5acfb84e37 100644 --- a/nipype/algorithms/tests/test_auto_SpecifyModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifyModel.py @@ -20,7 +20,7 @@ def test_SpecifyModel_inputs(): ), event_files=dict( mandatory=True, - xor=['subject_info', 'event_files'], + xor=['subject_info', 'event_files', 'bids_event_file'], ), functional_runs=dict( copyfile=False, @@ -33,7 +33,7 @@ def test_SpecifyModel_inputs(): realignment_parameters=dict(copyfile=False, ), subject_info=dict( mandatory=True, - xor=['subject_info', 'event_files'], + xor=['subject_info', 'event_files', 'bids_event_file'], ), time_repetition=dict(mandatory=True, ), ) diff --git a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py index 34c1c9548f..6145791254 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py @@ -21,7 +21,7 @@ def test_SpecifySPMModel_inputs(): concatenate_runs=dict(usedefault=True, ), event_files=dict( mandatory=True, - xor=['subject_info', 'event_files'], + xor=['subject_info', 'event_files', 'bids_event_file'], ), functional_runs=dict( copyfile=False, @@ -35,7 +35,7 @@ def test_SpecifySPMModel_inputs(): realignment_parameters=dict(copyfile=False, ), subject_info=dict( mandatory=True, - xor=['subject_info', 'event_files'], + xor=['subject_info', 'event_files', 'bids_event_file'], ), time_repetition=dict(mandatory=True, ), ) diff --git a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py index 71964dad54..dc12dd3a89 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py @@ -20,7 +20,7 @@ def test_SpecifySparseModel_inputs(): ), event_files=dict( mandatory=True, - xor=['subject_info', 'event_files'], + xor=['subject_info', 'event_files', 'bids_event_file'], ), functional_runs=dict( copyfile=False, @@ -38,7 +38,7 @@ def test_SpecifySparseModel_inputs(): stimuli_as_impulses=dict(usedefault=True, ), subject_info=dict( mandatory=True, - xor=['subject_info', 'event_files'], + xor=['subject_info', 'event_files', 'bids_event_file'], ), time_acquisition=dict(mandatory=True, ), time_repetition=dict(mandatory=True, ), From 59a7c3df0e1bf35665e806844a3ed04afc7cc088 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 26 Jan 2019 23:08:03 -0500 Subject: [PATCH 0101/1665] MAINT: Update .mailmap --- .mailmap | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.mailmap b/.mailmap index 141a6455af..859f55e147 100644 --- a/.mailmap +++ b/.mailmap @@ -3,6 +3,7 @@ Aimi Watanabe stymy Alejandro Tabas qTabs Alejandro Tabas qtabs Alejandro de la Vega adelavega +Alejandro de la Vega delavega4 Alexander Schaefer Alexander Schaefer Alexander Schaefer alexschaefer83 Alexander Schaefer aschaefer @@ -62,6 +63,7 @@ Erik Ziegler Erik Ziegler Erik Ziegler erik Erik Ziegler erikz Erik Ziegler swederik +Feilong Ma Ma Feilong Fernando Pérez-García Fernando Franz Liem fliem Franz Liem fliem From 8d292cb526de82742e2aab22723134adde9c770d Mon Sep 17 00:00:00 2001 From: Katrin Leinweber <9948149+katrinleinweber@users.noreply.github.com> Date: Sun, 27 Jan 2019 11:15:43 +0100 Subject: [PATCH 0102/1665] Accept invitation as Zenodo release co-author (see #2864) --- .zenodo.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index a73a844ad1..c7fd5115b0 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -594,6 +594,11 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" + }, + { + "affiliation": "TIB – Leibniz Information Centre for Science and Technology and University Library, Hannover, Germany", + "name": "Leinweber, Katrin", + "orcid": "0000-0001-5135-5758" } ], "keywords": [ From 2ef8ab154569b840a14b79ab01f8d2bccf41982d Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 28 Jan 2019 12:01:51 -0500 Subject: [PATCH 0103/1665] MNT: Update .zenodo ordering --- .zenodo.json | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index c7fd5115b0..b8e77af57f 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -262,6 +262,11 @@ { "name": "Kent, James" }, + { + "affiliation": "University of Texas at Austin", + "name": "De La Vega, Alejandro", + "orcid": "0000-0001-9062-3778" + }, { "name": "Watanabe, Aimi" }, @@ -372,6 +377,11 @@ "affiliation": "MPI CBS Leipzig, Germany", "name": "Lampe, Leonie" }, + { + "affiliation": "TIB \u2013 Leibniz Information Centre for Science and Technology and University Library, Hannover, Germany", + "name": "Leinweber, Katrin", + "orcid": "0000-0001-5135-5758" + }, { "affiliation": "Leibniz Institute for Neurobiology", "name": "Stadler, J\u00f6rg", @@ -385,11 +395,6 @@ { "name": "Hallquist, Michael" }, - { - "affiliation": "University of Texas at Austin", - "name": "De La Vega, Alejandro", - "orcid": "0000-0001-9062-3778" - }, { "affiliation": "Donders Institute for Brain, Cognition and Behavior, Center for Cognitive Neuroimaging", "name": "Chetverikov, Andrey", @@ -594,11 +599,6 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" - }, - { - "affiliation": "TIB – Leibniz Information Centre for Science and Technology and University Library, Hannover, Germany", - "name": "Leinweber, Katrin", - "orcid": "0000-0001-5135-5758" } ], "keywords": [ From d63f778816e680b9e8ba5ee218130a2dc01b9629 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 28 Jan 2019 12:10:42 -0500 Subject: [PATCH 0104/1665] MNT: Update .mailmap --- .mailmap | 1 + 1 file changed, 1 insertion(+) diff --git a/.mailmap b/.mailmap index 859f55e147..59c81dc119 100644 --- a/.mailmap +++ b/.mailmap @@ -41,6 +41,7 @@ Christopher J. Markiewicz Chris Markiewicz Christopher J. Johnson Christopher J. Markiewicz Christopher J. Markiewicz Christopher J. Markiewicz Christopher J. Markiewicz +Christopher J. Markiewicz Chris Markiewicz Cindee Madison cindeem Cindee Madison cindeem <> Colin Buchanan Colin Buchanan From dabb674111bdb453ab0d5fbd01beb3a8146b2611 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 28 Jan 2019 16:01:17 -0500 Subject: [PATCH 0105/1665] MNT: Add @feilong to .zenodo, update ordering --- .zenodo.json | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index b8e77af57f..4216748a10 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -377,11 +377,6 @@ "affiliation": "MPI CBS Leipzig, Germany", "name": "Lampe, Leonie" }, - { - "affiliation": "TIB \u2013 Leibniz Information Centre for Science and Technology and University Library, Hannover, Germany", - "name": "Leinweber, Katrin", - "orcid": "0000-0001-5135-5758" - }, { "affiliation": "Leibniz Institute for Neurobiology", "name": "Stadler, J\u00f6rg", @@ -395,6 +390,16 @@ { "name": "Hallquist, Michael" }, + { + "affiliation": "Dartmouth College", + "name": "Ma, Feilong", + "orcid": "0000-0002-6838-3971" + }, + { + "affiliation": "TIB \u2013 Leibniz Information Centre for Science and Technology and University Library, Hannover, Germany", + "name": "Leinweber, Katrin", + "orcid": "0000-0001-5135-5758" + }, { "affiliation": "Donders Institute for Brain, Cognition and Behavior, Center for Cognitive Neuroimaging", "name": "Chetverikov, Andrey", From 0772cd3b935566ff87d2cfe967000651c083d9e1 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 28 Jan 2019 16:39:59 -0500 Subject: [PATCH 0106/1665] MAINT: Bump dev version --- doc/documentation.rst | 2 +- nipype/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/documentation.rst b/doc/documentation.rst index 48c372bde6..57bb932ac4 100644 --- a/doc/documentation.rst +++ b/doc/documentation.rst @@ -9,7 +9,7 @@ Documentation :Release: |version| :Date: |today| -Previous versions: `1.1.7 `_ `1.1.6 `_ +Previous versions: `1.1.8 `_ `1.1.7 `_ .. container:: doc2 diff --git a/nipype/info.py b/nipype/info.py index 708b3a009d..fcfe81d433 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -11,7 +11,7 @@ # full release. '.dev' as a version_extra string means this is a development # version # Remove -dev for release -__version__ = '1.1.8' +__version__ = '1.1.9-dev' def get_nipype_gitversion(): From ae10cd457741ea7fcf0e02da23e3fbd0f5039a5c Mon Sep 17 00:00:00 2001 From: Horea Christian Date: Tue, 29 Jan 2019 16:40:04 +0100 Subject: [PATCH 0107/1665] BF: unambiguous conditions order If this is not done, condition order for orthogonalizarion is unpredictable. --- nipype/algorithms/modelgen.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index 9a0984c885..301f0e85fe 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -176,6 +176,7 @@ def bids_gen_info(bids_event_files, f_events = csv.DictReader(f, skipinitialspace=True, delimiter='\t') events = [{k: v for k, v in row.items()} for row in f_events] conditions = list(set([i[condition_column] for i in events])) + conditions = sorted(conditions) runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[]) for condition in conditions: selected_events = [i for i in events if i[condition_column]==condition] From ce65afba686dcea29194e03a4b57f5d05dee39dd Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 29 Jan 2019 11:43:54 -0500 Subject: [PATCH 0108/1665] Revert "BF: unambiguous conditions order" This reverts commit ae10cd457741ea7fcf0e02da23e3fbd0f5039a5c. --- nipype/algorithms/modelgen.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index 301f0e85fe..9a0984c885 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -176,7 +176,6 @@ def bids_gen_info(bids_event_files, f_events = csv.DictReader(f, skipinitialspace=True, delimiter='\t') events = [{k: v for k, v in row.items()} for row in f_events] conditions = list(set([i[condition_column] for i in events])) - conditions = sorted(conditions) runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[]) for condition in conditions: selected_events = [i for i in events if i[condition_column]==condition] From ca3cf74d21a17f3c67dce38783fd05a742116aa3 Mon Sep 17 00:00:00 2001 From: Joerg Stadler Date: Wed, 30 Jan 2019 13:05:06 +0100 Subject: [PATCH 0109/1665] camino: fix procstreamline raw streamline output, without outputroot as suggested by @effigies https://github.com/nipy/nipype/pull/2739 --- nipype/interfaces/camino/convert.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nipype/interfaces/camino/convert.py b/nipype/interfaces/camino/convert.py index ee2ae2eb82..25f618610b 100644 --- a/nipype/interfaces/camino/convert.py +++ b/nipype/interfaces/camino/convert.py @@ -441,6 +441,10 @@ def _format_arg(self, name, spec, value): return spec.argstr % self._get_actual_outputroot(value) return super(ProcStreamlines, self)._format_arg(name, spec, value) + def __init__(self, *args, **kwargs): + super(ProcStreamlines, self).__init__(*args, **kwargs) + self.outputroot_files = [] + def _run_interface(self, runtime): outputroot = self.inputs.outputroot if isdefined(outputroot): From 28a943a7a1e7d6b3a1b3844e72aac6175d4e5572 Mon Sep 17 00:00:00 2001 From: Horea Christian Date: Wed, 30 Jan 2019 18:34:11 +0100 Subject: [PATCH 0110/1665] DOC: typos and spacing --- nipype/interfaces/ants/registration.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/ants/registration.py b/nipype/interfaces/ants/registration.py index 7c4a696c5a..c08c7646a9 100644 --- a/nipype/interfaces/ants/registration.py +++ b/nipype/interfaces/ants/registration.py @@ -305,8 +305,8 @@ class RegistrationInputSpec(ANTSCommandInputSpec): initial_moving_transform = InputMultiPath( File(exists=True), argstr='%s', - desc='A transform or a list of transforms that should be applied' - 'before the registration begins. Note that, when a list is given,' + desc='A transform or a list of transforms that should be applied ' + 'before the registration begins. Note that, when a list is given, ' 'the transformations are applied in reverse order.', xor=['initial_moving_transform_com']) invert_initial_moving_transform = InputMultiPath( @@ -323,9 +323,9 @@ class RegistrationInputSpec(ANTSCommandInputSpec): 2, argstr='%s', xor=['initial_moving_transform'], - desc="Align the moving_image nad fixed_image befor registration using" - "the geometric center of the images (=0), the image intensities (=1)," - "or the origin of the images (=2)") + desc="Align the moving_image and fixed_image before registration using " + "the geometric center of the images (=0), the image intensities (=1), " + "or the origin of the images (=2).") metric_item_trait = traits.Enum("CC", "MeanSquares", "Demons", "GC", "MI", "Mattes") metric_stage_trait = traits.Either(metric_item_trait, From 67054f44a934d59d9f8c7fede01735cb5ade29a6 Mon Sep 17 00:00:00 2001 From: mathiasg Date: Thu, 31 Jan 2019 16:34:36 -0500 Subject: [PATCH 0111/1665] enh: wb cifti-smoothing --- nipype/interfaces/workbench/__init__.py | 1 + nipype/interfaces/workbench/cifti.py | 144 ++++++++++++++++++ .../workbench/tests/test_auto_CiftiSmooth.py | 95 ++++++++++++ .../sub-01.L.midthickness.32k_fs_LR.surf.gii | 0 .../sub-01.R.midthickness.32k_fs_LR.surf.gii | 0 .../data/sub-01_task-rest.dtseries.nii | 0 6 files changed, 240 insertions(+) create mode 100644 nipype/interfaces/workbench/cifti.py create mode 100644 nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py create mode 100644 nipype/testing/data/sub-01.L.midthickness.32k_fs_LR.surf.gii create mode 100644 nipype/testing/data/sub-01.R.midthickness.32k_fs_LR.surf.gii create mode 100644 nipype/testing/data/sub-01_task-rest.dtseries.nii diff --git a/nipype/interfaces/workbench/__init__.py b/nipype/interfaces/workbench/__init__.py index 5ced0d2fb3..1de46f8953 100644 --- a/nipype/interfaces/workbench/__init__.py +++ b/nipype/interfaces/workbench/__init__.py @@ -3,3 +3,4 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: from .metric import MetricResample +from .cifti import CiftiSmooth diff --git a/nipype/interfaces/workbench/cifti.py b/nipype/interfaces/workbench/cifti.py new file mode 100644 index 0000000000..c7244c02c7 --- /dev/null +++ b/nipype/interfaces/workbench/cifti.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""This module provides interfaces for workbench CIFTI commands""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +import os + +from ..base import (TraitedSpec, File, traits, CommandLineInputSpec) +from .base import WBCommand +from ... import logging + +iflogger = logging.getLogger('nipype.interface') + + +class CiftiSmoothInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + mandatory=True, + argstr="%s", + position=0, + desc="The input CIFTI file") + sigma_surf = traits.Float( + mandatory=True, + argstr="%s", + position=1, + desc="the sigma for the gaussian surface smoothing kernel, in mm") + sigma_vol = traits.Float( + mandatory=True, + argstr="%s", + position=2, + desc="the sigma for the gaussian volume smoothing kernel, in mm") + direction = traits.Enum( + "ROW", + "COLUMN", + mandatory=True, + argstr="%s", + position=3, + desc="which dimension to smooth along, ROW or COLUMN") + out_file = File( + name_source=["in_file"], + name_template="smoothed_%s.nii", + keep_extension=True, + argstr="%s", + position=4, + desc="The output CIFTI") + left_surf = File( + exists=True, + mandatory=True, + position=5, + argstr="-left-surface %s", + desc="Specify the left surface to use") + left_corrected_areas = File( + exists=True, + position=6, + argstr="-left-corrected-areas %s", + desc="vertex areas (as a metric) to use instead of computing them from " + "the left surface.") + right_surf = File( + exists=True, + mandatory=True, + position=7, + argstr="-right-surface %s", + desc="Specify the right surface to use") + right_corrected_areas = File( + exists=True, + position=8, + argstr="-right-corrected-areas %s", + desc="vertex areas (as a metric) to use instead of computing them from " + "the right surface") + cerebellum_surf = File( + exists=True, + position=9, + argstr="-cerebellum-surface %s", + desc="specify the cerebellum surface to use") + cerebellum_corrected_areas = File( + exists=True, + position=10, + requires=["cerebellum_surf"], + argstr="cerebellum-corrected-areas %s", + desc="vertex areas (as a metric) to use instead of computing them from " + "the cerebellum surface") + cifti_roi = File( + exists=True, + position=11, + argstr="-cifti-roi %s", + desc="CIFTI file for ROI smoothing") + fix_zeros_vol = traits.Bool( + position=12, + argstr="-fix-zeros-volume", + desc="treat values of zero in the volume as missing data") + fix_zeros_surf = traits.Bool( + position=13, + argstr="-fix-zeros-surface", + desc="treat values of zero on the surface as missing data") + merged_volume = traits.Bool( + position=14, + argstr="-merged-volume", + desc="smooth across subcortical structure boundaries") + + +class CiftiSmoothOutputSpec(TraitedSpec): + out_file = File(exists=True, desc="output CIFTI file") + + +class CiftiSmooth(WBCommand): + """ + Smooth a CIFTI file + + The input cifti file must have a brain models mapping on the chosen + dimension, columns for .dtseries, and either for .dconn. By default, + data in different structures is smoothed independently (i.e., "parcel + constrained" smoothing), so volume structures that touch do not smooth + across this boundary. Specify ``merged_volume`` to ignore these + boundaries. Surface smoothing uses the ``GEO_GAUSS_AREA`` smoothing method. + + The ``*_corrected_areas`` options are intended for when it is unavoidable + to smooth on group average surfaces, it is only an approximate correction + for the reduction of structure in a group average surface. It is better + to smooth the data on individuals before averaging, when feasible. + + The ``fix_zeros_*`` options will treat values of zero as lack of data, and + not use that value when generating the smoothed values, but will fill + zeros with extrapolated values. The ROI should have a brain models + mapping along columns, exactly matching the mapping of the chosen + direction in the input file. Data outside the ROI is ignored. + + >>> from nipype.interfaces.workbench import CiftiSmooth + >>> smooth = CiftiSmooth() + >>> smooth.inputs.in_file = 'sub-01_task-rest.dtseries.nii' + >>> smooth.inputs.sigma_surf = 4 + >>> smooth.inputs.sigma_vol = 4 + >>> smooth.inputs.direction = 'COLUMN' + >>> smooth.inputs.right_surf = 'sub-01.R.midthickness.32k_fs_LR.surf.gii' + >>> smooth.inputs.left_surf = 'sub-01.L.midthickness.32k_fs_LR.surf.gii' + >>> smooth.cmdline + 'wb_command -cifti-smoothing sub-01_task-rest.dtseries.nii 4.0 4.0 COLUMN \ + smoothed_sub-01_task-rest.dtseries.nii \ + -left-surface sub-01.L.midthickness.32k_fs_LR.surf.gii \ + -right-surface sub-01.R.midthickness.32k_fs_LR.surf.gii' + """ + input_spec = CiftiSmoothInputSpec + output_spec = CiftiSmoothOutputSpec + _cmd = 'wb_command -cifti-smoothing' diff --git a/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py b/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py new file mode 100644 index 0000000000..b510a3b34e --- /dev/null +++ b/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py @@ -0,0 +1,95 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..cifti import CiftiSmooth + + +def test_CiftiSmooth_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + cerebellum_corrected_areas=dict( + argstr='cerebellum-corrected-areas %s', + position=10, + requires=['cerebellum_surf'], + ), + cerebellum_surf=dict( + argstr='-cerebellum-surface %s', + position=9, + ), + cifti_roi=dict( + argstr='-cifti-roi %s', + position=11, + ), + direction=dict( + argstr='%s', + mandatory=True, + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fix_zeros_surf=dict( + argstr='-fix-zeros-surface', + position=13, + ), + fix_zeros_vol=dict( + argstr='-fix-zeros-volume', + position=12, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=0, + ), + left_corrected_areas=dict( + argstr='-left-corrected-areas %s', + position=6, + ), + left_surf=dict( + argstr='-left-surface %s', + mandatory=True, + position=5, + ), + merged_volume=dict( + argstr='-merged-volume', + position=14, + ), + out_file=dict( + argstr='%s', + keep_extension=True, + name_source=['in_file'], + name_template='smoothed_%s.nii', + position=4, + ), + right_corrected_areas=dict( + argstr='-right-corrected-areas %s', + position=8, + ), + right_surf=dict( + argstr='-right-surface %s', + mandatory=True, + position=7, + ), + sigma_surf=dict( + argstr='%s', + mandatory=True, + position=1, + ), + sigma_vol=dict( + argstr='%s', + mandatory=True, + position=2, + ), + ) + inputs = CiftiSmooth.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CiftiSmooth_outputs(): + output_map = dict(out_file=dict(), ) + outputs = CiftiSmooth.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/testing/data/sub-01.L.midthickness.32k_fs_LR.surf.gii b/nipype/testing/data/sub-01.L.midthickness.32k_fs_LR.surf.gii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/sub-01.R.midthickness.32k_fs_LR.surf.gii b/nipype/testing/data/sub-01.R.midthickness.32k_fs_LR.surf.gii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/sub-01_task-rest.dtseries.nii b/nipype/testing/data/sub-01_task-rest.dtseries.nii new file mode 100644 index 0000000000..e69de29bb2 From efdbc711c9cbe22b5f03a01e9109512ed4d3f5fa Mon Sep 17 00:00:00 2001 From: mathiasg Date: Thu, 31 Jan 2019 16:52:56 -0500 Subject: [PATCH 0112/1665] sty: remove unused import --- nipype/interfaces/workbench/cifti.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/workbench/cifti.py b/nipype/interfaces/workbench/cifti.py index c7244c02c7..be4051e5b4 100644 --- a/nipype/interfaces/workbench/cifti.py +++ b/nipype/interfaces/workbench/cifti.py @@ -4,7 +4,6 @@ """This module provides interfaces for workbench CIFTI commands""" from __future__ import (print_function, division, unicode_literals, absolute_import) -import os from ..base import (TraitedSpec, File, traits, CommandLineInputSpec) from .base import WBCommand From 94bea4a14284801270c2e778e2950270a93b9d98 Mon Sep 17 00:00:00 2001 From: rciric Date: Sun, 3 Feb 2019 23:49:13 -0800 Subject: [PATCH 0113/1665] first-pass refactor CompCor to SimpleInterface --- nipype/algorithms/confounds.py | 46 +++++++++++++++------------------- 1 file changed, 20 insertions(+), 26 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 657a201de5..712996f0f0 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -24,7 +24,8 @@ from ..external.due import BibTeX from ..interfaces.base import (traits, TraitedSpec, BaseInterface, BaseInterfaceInputSpec, File, isdefined, - InputMultiPath, OutputMultiPath) + InputMultiPath, OutputMultiPath, + SimpleInterface) from ..utils import NUMPY_MMAP from ..utils.misc import normalize_mc_params @@ -462,7 +463,7 @@ class CompCorOutputSpec(TraitedSpec): metadata_file = File(desc='text file containing component metadata') -class CompCor(BaseInterface): +class CompCor(SimpleInterface): """ Interface with core CompCor computation, used in aCompCor and tCompCor @@ -609,9 +610,16 @@ def _run_interface(self, runtime): delimiter='\t', header='\t'.join(components_header), comments='') + self._results['components_file'] = components_file - if self.inputs.pre_filter and self.inputs.save_pre_filter: - pre_filter_file = self._list_outputs()['pre_filter_file'] + save_pre_filter = self.inputs.save_pre_filter + if save_pre_filter: + if isinstance(save_pre_filter, bool): + pre_filter_file = os.path.abspath('pre_filter.tsv') + else: + pre_filter_file = save_pre_filter + self._results['pre_filter_file'] = pre_filter_file + if self.inputs.pre_filter and save_pre_filter: ftype = { 'polynomial': 'Legendre', 'cosine': 'Cosine' @@ -638,8 +646,13 @@ def _run_interface(self, runtime): header='\t'.join(header), comments='') - if self.inputs.save_metadata: - metadata_file = self._list_outputs()['metadata_file'] + save_metadata = self.inputs.save_metadata + if save_metadata: + if isinstance(save_metadata, bool): + metadata_file = os.path.abspath('component_metadata.tsv') + else: + metadata_file = save_metadata + self._results['metadata_file'] = metadata_file with open(metadata_file, 'w') as f: f.write('{}\t{}\t{}\t{}\t{}\n'.format('component', *list(metadata.keys()))) @@ -652,25 +665,6 @@ def _run_interface(self, runtime): def _process_masks(self, mask_images, timeseries=None): return mask_images - def _list_outputs(self): - outputs = self._outputs().get() - outputs['components_file'] = os.path.abspath( - self.inputs.components_file) - - save_pre_filter = self.inputs.save_pre_filter - if save_pre_filter: - if isinstance(save_pre_filter, bool): - save_pre_filter = os.path.abspath('pre_filter.tsv') - outputs['pre_filter_file'] = save_pre_filter - - save_metadata = self.inputs.save_metadata - if save_metadata: - if isinstance(save_metadata, bool): - save_metadata = os.path.abspath('component_metadata.tsv') - outputs['metadata_file'] = save_metadata - - return outputs - def _make_headers(self, num_col): header = self.inputs.header_prefix if \ isdefined(self.inputs.header_prefix) else self._header @@ -1229,7 +1223,7 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, period_cut: minimum period (in sec) for DCT high-pass filter repetition_time: time (in sec) between volume acquisitions - Outputs + Returns ------- components: numpy array Numpy array containing the requested set of noise components From 5a689fc4d03727d186d6dd0b196a5acc1c07858a Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 5 Feb 2019 09:23:11 -0500 Subject: [PATCH 0114/1665] CI: Restore Py2.7 --pre test --- .travis.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 645d6c623b..29f43a75d9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -60,9 +60,6 @@ matrix: NIPYPE_EXTRAS="doc,tests,nipy,profiler" EXTRA_PIP_FLAGS="--pre $EXTRA_PIP_FLAGS --find-links $PRE_WHEELS" CI_SKIP_TEST=1 - allow_failures: - - python: 2.7 - env: INSTALL_DEB_DEPENDECIES=true NIPYPE_EXTRAS="doc,tests,nipy,profiler" EXTRA_PIP_FLAGS="--pre $EXTRA_PIP_FLAGS --find-links $PRE_WHEELS" CI_SKIP_TEST=1 addons: apt: From addb0e9ece8d0563e2e6c3af2e389bf011dbf839 Mon Sep 17 00:00:00 2001 From: rciric Date: Tue, 5 Feb 2019 23:06:42 -0800 Subject: [PATCH 0115/1665] return metadata for all components regardless of retention criterion --- nipype/algorithms/confounds.py | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 712996f0f0..0039b3a6e3 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -652,11 +652,15 @@ def _run_interface(self, runtime): metadata_file = os.path.abspath('component_metadata.tsv') else: metadata_file = save_metadata + components_names = np.array(dtype='object_', + object=['dropped' for i in range(len(metadata['mask']))]) + components_names[np.where(metadata['retained'])] = ( + components_header) self._results['metadata_file'] = metadata_file with open(metadata_file, 'w') as f: f.write('{}\t{}\t{}\t{}\t{}\n'.format('component', *list(metadata.keys()))) - for i in zip(components_header, *metadata.values()): + for i in zip(components_names, *metadata.values()): f.write('{0[0]}\t{0[1]}\t{0[2]:.10f}\t' '{0[3]:.10f}\t{0[4]:.10f}\n'.format(i)) @@ -1296,23 +1300,30 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, if components is None: components = u[:, :num_components] metadata = OrderedDict() - metadata['mask'] = np.array([i] * num_components) - metadata['singular_value'] = s[:num_components] - metadata['variance_explained'] = variance_explained[:num_components] + metadata['mask'] = np.array([i] * len(s)) + metadata['singular_value'] = s + metadata['variance_explained'] = variance_explained metadata['cumulative_variance_explained'] = ( - cumulative_variance_explained[:num_components]) + cumulative_variance_explained) + metadata['retained'] = np.array( + [True if i < num_components + else False for i in range(len(s))], dtype='bool') else: components = np.hstack((components, u[:, :num_components])) metadata['mask'] = np.hstack((metadata['mask'], - [i] * num_components)) + [i] * len(s))) metadata['singular_value'] = ( - np.hstack((metadata['singular_value'], s[:num_components]))) + np.hstack((metadata['singular_value'], s))) metadata['variance_explained'] = ( np.hstack((metadata['variance_explained'], - variance_explained[:num_components]))) + variance_explained))) metadata['cumulative_variance_explained'] = ( np.hstack((metadata['cumulative_variance_explained'], - cumulative_variance_explained[:num_components]))) + cumulative_variance_explained))) + metadata['retained'] = np.hstack((metadata['retained'], + [True if i < num_components + else False + for i in range(len(s))])) if components is None and num_components != 0: if self.inputs.failure_mode == 'error': raise ValueError('No components found') From 7c7d4627b3144ac78ed8e5ca9d23cee445442dd0 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Wed, 6 Feb 2019 14:31:04 -0500 Subject: [PATCH 0116/1665] updated nipype2boutiques to conform to current schema, added default value for tool-version when interface version is null --- nipype/utils/nipype2boutiques.py | 36 ++++++++++++--------- nipype/utils/tests/test_nipype2boutiques.py | 5 +-- 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 21ecbc0eee..617a5d28d2 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -25,15 +25,18 @@ def generate_boutiques_descriptor( - module, interface_name, ignored_template_inputs, docker_image, - docker_index, verbose, ignore_template_numbers): + module, interface_name, ignored_template_inputs, container_image, + container_index, container_type, verbose, ignore_template_numbers): ''' Returns a JSON string containing a JSON Boutiques description of a Nipype interface. Arguments: * module: module where the Nipype interface is declared. - * interface: Nipype interface. + * interface_name: name of Nipype interface. * ignored_template_inputs: a list of input names that should be ignored in the generation of output path templates. * ignore_template_numbers: True if numbers must be ignored in output path creations. + * container_image: name of the container image where the tool is installed + * container_index: optional index where the image is available + * container_type: type of container image (Docker or Singularity) ''' if not module: @@ -59,13 +62,15 @@ def generate_boutiques_descriptor( tool_desc[ 'description'] = interface_name + ", as implemented in Nipype (module: " + module_name + ", interface: " + interface_name + ")." tool_desc['inputs'] = [] - tool_desc['outputs'] = [] - tool_desc['tool-version'] = interface.version - tool_desc['schema-version'] = '0.2-snapshot' - if docker_image: - tool_desc['docker-image'] = docker_image - if docker_index: - tool_desc['docker-index'] = docker_index + tool_desc['output-files'] = [] + tool_desc['tool-version'] = interface.version if interface.version is not None else 'undefined' + tool_desc['schema-version'] = '0.5' + if container_image: + tool_desc['container-image'] = {} + tool_desc['container-image']['image'] = container_image + tool_desc['container-image']['type'] = container_type + if container_index: + tool_desc['container-image']['index'] = container_index # Generates tool inputs for name, spec in sorted(interface.inputs.traits(transient=None).items()): @@ -73,7 +78,7 @@ def generate_boutiques_descriptor( ignored_template_inputs, verbose, ignore_template_numbers) tool_desc['inputs'].append(input) - tool_desc['command-line'] += input['command-line-key'] + " " + tool_desc['command-line'] += input['value-key'] + " " if verbose: print("-> Adding input " + input['name']) @@ -82,13 +87,13 @@ def generate_boutiques_descriptor( output = get_boutiques_output(name, interface, tool_desc['inputs'], verbose) if output['path-template'] != "": - tool_desc['outputs'].append(output) + tool_desc['output-files'].append(output) if verbose: print("-> Adding output " + output['name']) elif verbose: print("xx Skipping output " + output['name'] + " with no path template.") - if tool_desc['outputs'] == []: + if tool_desc['output-files'] == []: raise Exception("Tool has no output.") # Removes all temporary values from inputs (otherwise they will @@ -125,7 +130,7 @@ def get_boutiques_input(inputs, interface, input_name, spec, input['name'] = input_name.replace('_', ' ').capitalize() input['type'] = get_type_from_spec_info(spec_info) input['list'] = is_list(spec_info) - input['command-line-key'] = "[" + input_name.upper( + input['value-key'] = "[" + input_name.upper( ) + "]" # assumes that input names are unique input['command-line-flag'] = ("--%s" % input_name + " ").strip() input['tempvalue'] = None @@ -178,7 +183,6 @@ def get_boutiques_output(name, interface, tool_inputs, verbose=False): output = {} output['name'] = name.replace('_', ' ').capitalize() output['id'] = name - output['type'] = "File" output['path-template'] = "" output[ 'optional'] = True # no real way to determine if an output is always produced, regardless of the input values. @@ -201,7 +205,7 @@ def get_boutiques_output(name, interface, tool_inputs, verbose=False): if str(input_value) in output_value: output_value = os.path.basename( output_value.replace(input_value, - input['command-line-key']) + input['value-key']) ) # FIXME: this only works if output is written in the current directory output['path-template'] = os.path.basename(output_value) return output diff --git a/nipype/utils/tests/test_nipype2boutiques.py b/nipype/utils/tests/test_nipype2boutiques.py index f1d0c46eed..3aecefda63 100644 --- a/nipype/utils/tests/test_nipype2boutiques.py +++ b/nipype/utils/tests/test_nipype2boutiques.py @@ -11,7 +11,8 @@ def test_generate(): generate_boutiques_descriptor(module='nipype.interfaces.ants.registration', interface_name='ANTS', ignored_template_inputs=(), - docker_image=None, - docker_index=None, + container_image=None, + container_index=None, + container_type=None, verbose=False, ignore_template_numbers=False) From fc27d01227127f1c61033e72c4922afdd1b03368 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Wed, 6 Feb 2019 15:56:10 -0500 Subject: [PATCH 0117/1665] added a step to make sure output IDs are unique in case output name is the same as an input name --- nipype/utils/nipype2boutiques.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 617a5d28d2..626cf13067 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -182,7 +182,16 @@ def get_boutiques_output(name, interface, tool_inputs, verbose=False): """ output = {} output['name'] = name.replace('_', ' ').capitalize() - output['id'] = name + + # Check if the output name was already used as an input name + # If so, append '_outfile' to the end of the ID + unique_id = True + for inp in tool_inputs: + if inp['id'] == name: + unique_id = False + break + output['id'] = name if unique_id else name + '_outfile' + output['path-template'] = "" output[ 'optional'] = True # no real way to determine if an output is always produced, regardless of the input values. From efe838ee08a1c145732428abc8ff0198690c458d Mon Sep 17 00:00:00 2001 From: "Michael R. Crusoe" <1330696+mr-c@users.noreply.github.com> Date: Thu, 7 Feb 2019 10:17:31 +0200 Subject: [PATCH 0118/1665] replace typo with words --- nipype/pipeline/engine/tests/test_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/tests/test_utils.py b/nipype/pipeline/engine/tests/test_utils.py index 42f8b2434e..4f4383f169 100644 --- a/nipype/pipeline/engine/tests/test_utils.py +++ b/nipype/pipeline/engine/tests/test_utils.py @@ -220,7 +220,7 @@ def test_mapnode_crash3(tmpdir): wf = pe.Workflow('testmapnodecrash') wf.add_nodes([node]) wf.base_dir = tmpdir.strpath - # changing crashdump dir to cwl (to avoid problems with read-only systems) + # changing crashdump dir to current working directory (to avoid problems with read-only systems) wf.config["execution"]["crashdump_dir"] = os.getcwd() with pytest.raises(RuntimeError): wf.run(plugin='Linear') From fbf2c35f533b7805ca93c742006472e0809d8d03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Je=CC=81re=CC=81my=20Guillon?= Date: Thu, 7 Feb 2019 12:53:28 +0100 Subject: [PATCH 0119/1665] Accept invitation as Zenodo release co-author (see #2864) --- .zenodo.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index 4216748a10..4d25cefa52 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -604,6 +604,11 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" + }, + { + "affiliation": "ARAMIS LAB, Brain and Spine Institute (ICM), Paris, France.", + "name": "Guillon, Jeremy", + "orcid": "0000-0002-2672-7510" } ], "keywords": [ From e7739e74c1fc78efa6603fa09b510069b33be544 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Thu, 7 Feb 2019 16:55:36 -0500 Subject: [PATCH 0120/1665] descriptor gets saved to a file for easier exporting, added value-choices extraction, added method to get input type from handler type --- nipype/utils/nipype2boutiques.py | 39 +++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 626cf13067..33eaeb56c1 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -101,6 +101,10 @@ def generate_boutiques_descriptor( for input in tool_desc['inputs']: del input['tempvalue'] + # Save descriptor to a file + with open(interface_name + '.json', 'w') as outfile: + json.dump(tool_desc, outfile) + return json.dumps(tool_desc, indent=4, separators=(',', ': ')) @@ -128,7 +132,10 @@ def get_boutiques_input(inputs, interface, input_name, spec, input = {} input['id'] = input_name input['name'] = input_name.replace('_', ' ').capitalize() - input['type'] = get_type_from_spec_info(spec_info) + + # Figure out the input type from its handler type + input['type'] = get_type_from_handler_type(spec.handler) + input['list'] = is_list(spec_info) input['value-key'] = "[" + input_name.upper( ) + "]" # assumes that input names are unique @@ -145,6 +152,14 @@ def get_boutiques_input(inputs, interface, input_name, spec, if spec.usedefault: input['default-value'] = spec.default_value()[1] + try: + value_choices = spec.handler.values + except AttributeError: + pass + else: + if value_choices is not None: + input['value-choices'] = value_choices + # Create unique, temporary value. temp_value = must_generate_value(input_name, input['type'], ignored_template_inputs, spec_info, spec, @@ -158,9 +173,9 @@ def get_boutiques_input(inputs, interface, input_name, spec, str(tempvalue)) # Now that temp values have been generated, set Boolean types to - # Number (there is no Boolean type in Boutiques) + # Flag (there is no Boolean type in Boutiques) if input['type'] == "Boolean": - input['type'] = "Number" + input['type'] = "Flag" return input @@ -217,9 +232,16 @@ def get_boutiques_output(name, interface, tool_inputs, verbose=False): input['value-key']) ) # FIXME: this only works if output is written in the current directory output['path-template'] = os.path.basename(output_value) + + if not output_value: + # Look for an input with the same name and use this as the path template + for input in tool_inputs: + if input['id'] == name: + output['path-template'] = input['value-key'] return output +# TODO remove this once we know get_type_from_handler_type works well def get_type_from_spec_info(spec_info): ''' Returns an input type from the spec info. There must be a better @@ -235,6 +257,17 @@ def get_type_from_spec_info(spec_info): return "String" +def get_type_from_handler_type(handler): + handler_type = type(handler).__name__ + if handler_type == "File" or handler_type == "Directory": + return "File" + elif handler_type == "Int" or handler_type == "Float": + return "Number" + elif handler_type == "Bool": + return "Flag" + else: + return "String" + def is_list(spec_info): ''' Returns True if the spec info looks like it describes a list From 181c481e003410e7e3f95156eaf04a1a0668c084 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Thu, 7 Feb 2019 18:16:59 -0500 Subject: [PATCH 0121/1665] added warning message when cannot determine path template for output, added check for integer types, added method to get description from spec --- nipype/utils/nipype2boutiques.py | 66 +++++++++++++++++++++++++------- 1 file changed, 52 insertions(+), 14 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 33eaeb56c1..83148fc10f 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -63,7 +63,7 @@ def generate_boutiques_descriptor( 'description'] = interface_name + ", as implemented in Nipype (module: " + module_name + ", interface: " + interface_name + ")." tool_desc['inputs'] = [] tool_desc['output-files'] = [] - tool_desc['tool-version'] = interface.version if interface.version is not None else 'undefined' + tool_desc['tool-version'] = interface.version if interface.version is not None else "No version provided." tool_desc['schema-version'] = '0.5' if container_image: tool_desc['container-image'] = {} @@ -84,7 +84,7 @@ def generate_boutiques_descriptor( # Generates tool outputs for name, spec in sorted(outputs.traits(transient=None).items()): - output = get_boutiques_output(name, interface, tool_desc['inputs'], + output = get_boutiques_output(outputs, name, spec, interface, tool_desc['inputs'], verbose) if output['path-template'] != "": tool_desc['output-files'].append(output) @@ -105,6 +105,8 @@ def generate_boutiques_descriptor( with open(interface_name + '.json', 'w') as outfile: json.dump(tool_desc, outfile) + print("NOTE: Descriptors produced by this script may not entirely conform to the Nipype interface " + "specs. Please check that the descriptor is correct before using it.") return json.dumps(tool_desc, indent=4, separators=(',', ': ')) @@ -125,8 +127,6 @@ def get_boutiques_input(inputs, interface, input_name, spec, Assumes that: * Input names are unique. """ - if not spec.desc: - spec.desc = "No description provided." spec_info = spec.full_info(inputs, input_name, None) input = {} @@ -134,17 +134,17 @@ def get_boutiques_input(inputs, interface, input_name, spec, input['name'] = input_name.replace('_', ' ').capitalize() # Figure out the input type from its handler type - input['type'] = get_type_from_handler_type(spec.handler) + input_type = get_type_from_handler_type(spec.handler) + input['type'] = input_type[0] + if input_type[1]: + input['integer'] = True input['list'] = is_list(spec_info) input['value-key'] = "[" + input_name.upper( ) + "]" # assumes that input names are unique input['command-line-flag'] = ("--%s" % input_name + " ").strip() input['tempvalue'] = None - input['description'] = spec_info.capitalize( - ) + ". " + spec.desc.capitalize() - if not input['description'].endswith('.'): - input['description'] += '.' + input['description'] = get_description_from_spec(inputs, input_name, spec) if not (hasattr(spec, "mandatory") and spec.mandatory): input['optional'] = True else: @@ -180,12 +180,14 @@ def get_boutiques_input(inputs, interface, input_name, spec, return input -def get_boutiques_output(name, interface, tool_inputs, verbose=False): +def get_boutiques_output(outputs, name, spec, interface, tool_inputs, verbose=False): """ Returns a dictionary containing the Boutiques output corresponding to a Nipype output. Args: + * outputs: outputs of the Nipype interface. * name: name of the Nipype output. + * spec: Nipype output spec. * interface: Nipype interface. * tool_inputs: list of tool inputs (as produced by method get_boutiques_input). @@ -211,9 +213,13 @@ def get_boutiques_output(name, interface, tool_inputs, verbose=False): output[ 'optional'] = True # no real way to determine if an output is always produced, regardless of the input values. + output['description'] = get_description_from_spec(outputs, name, spec) + # Path template creation. output_value = interface._list_outputs()[name] + + # If output value is defined, use its basename if output_value != "" and isinstance( output_value, str): # FIXME: this crashes when there are multiple output values. @@ -233,11 +239,20 @@ def get_boutiques_output(name, interface, tool_inputs, verbose=False): ) # FIXME: this only works if output is written in the current directory output['path-template'] = os.path.basename(output_value) + # If output value is undefined, create a placeholder for the path template if not output_value: # Look for an input with the same name and use this as the path template + found = False for input in tool_inputs: if input['id'] == name: output['path-template'] = input['value-key'] + found = True + break + # If no input with the same name was found, warn the user they should provide it manually + if not found: + print("WARNING: Could not determine path template for output %s. Please provide one for the " + "descriptor manually." % name) + output['path-template'] = "WARNING: No path template provided." return output @@ -258,15 +273,21 @@ def get_type_from_spec_info(spec_info): def get_type_from_handler_type(handler): + ''' + Gets the input type from the spec handler type. + Returns a tuple containing the type and a boolean to specify + if the type is an integer. + ''' handler_type = type(handler).__name__ + print("TYPE", handler_type) if handler_type == "File" or handler_type == "Directory": - return "File" + return "File", False elif handler_type == "Int" or handler_type == "Float": - return "Number" + return "Number", handler_type == "Int" elif handler_type == "Bool": - return "Flag" + return "Flag", False else: - return "String" + return "String", False def is_list(spec_info): ''' @@ -336,3 +357,20 @@ def must_generate_value(name, type, ignored_template_inputs, spec_info, spec, if not ignored_template_inputs: return True return not (name in ignored_template_inputs) + + +def get_description_from_spec(object, name, spec): + ''' + Generates a description based on the input or output spec. + ''' + if not spec.desc: + spec.desc = "No description provided." + spec_info = spec.full_info(object, name, None) + + boutiques_description = (spec_info.capitalize( + ) + ". " + spec.desc.capitalize()).replace("\n", '') + + if not boutiques_description.endswith('.'): + boutiques_description += '.' + + return boutiques_description From ce068151b7dcc0538f302557ac1b3af3cc86c03d Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Thu, 7 Feb 2019 19:50:29 -0500 Subject: [PATCH 0122/1665] fixed json file formatting, added save parameter, fixed command-line-flag to take argstr from input spec, added requires-inputs and disables-inputs --- nipype/utils/nipype2boutiques.py | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 83148fc10f..251269167b 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -26,7 +26,7 @@ def generate_boutiques_descriptor( module, interface_name, ignored_template_inputs, container_image, - container_index, container_type, verbose, ignore_template_numbers): + container_index, container_type, verbose, ignore_template_numbers, save): ''' Returns a JSON string containing a JSON Boutiques description of a Nipype interface. Arguments: @@ -37,6 +37,7 @@ def generate_boutiques_descriptor( * container_image: name of the container image where the tool is installed * container_index: optional index where the image is available * container_type: type of container image (Docker or Singularity) + * save: True if you want to save descriptor to a file ''' if not module: @@ -102,8 +103,11 @@ def generate_boutiques_descriptor( del input['tempvalue'] # Save descriptor to a file - with open(interface_name + '.json', 'w') as outfile: - json.dump(tool_desc, outfile) + if save: + with open(interface_name + '.json', 'w') as outfile: + json.dump(tool_desc, outfile, indent=4, separators=(',', ': ')) + if verbose: + print("-> Descriptor saved to file " + outfile.name) print("NOTE: Descriptors produced by this script may not entirely conform to the Nipype interface " "specs. Please check that the descriptor is correct before using it.") @@ -142,7 +146,14 @@ def get_boutiques_input(inputs, interface, input_name, spec, input['list'] = is_list(spec_info) input['value-key'] = "[" + input_name.upper( ) + "]" # assumes that input names are unique - input['command-line-flag'] = ("--%s" % input_name + " ").strip() + + # Add the command line flag specified by argstr + # If no argstr is provided and input type is Flag, create a flag from the name + if spec.argstr and spec.argstr.split("%")[0]: + input['command-line-flag'] = spec.argstr.split("%")[0].strip() + elif input['type'] == "Flag": + input['command-line-flag'] = ("--%s" % input_name + " ").strip() + input['tempvalue'] = None input['description'] = get_description_from_spec(inputs, input_name, spec) if not (hasattr(spec, "mandatory") and spec.mandatory): @@ -160,6 +171,12 @@ def get_boutiques_input(inputs, interface, input_name, spec, if value_choices is not None: input['value-choices'] = value_choices + if spec.requires is not None: + input['requires-inputs'] = spec.requires + + if spec.xor is not None: + input['disables-inputs'] = spec.xor + # Create unique, temporary value. temp_value = must_generate_value(input_name, input['type'], ignored_template_inputs, spec_info, spec, @@ -279,7 +296,6 @@ def get_type_from_handler_type(handler): if the type is an integer. ''' handler_type = type(handler).__name__ - print("TYPE", handler_type) if handler_type == "File" or handler_type == "Directory": return "File", False elif handler_type == "Int" or handler_type == "Float": From c1cfc80da1b85beafadb825251f840e700b7dbaa Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Thu, 7 Feb 2019 21:52:33 -0500 Subject: [PATCH 0123/1665] added logic to deal with range and list inputs, added try except block to deal with undefined output values --- nipype/utils/nipype2boutiques.py | 68 +++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 22 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 251269167b..f1a4f46017 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -138,12 +138,48 @@ def get_boutiques_input(inputs, interface, input_name, spec, input['name'] = input_name.replace('_', ' ').capitalize() # Figure out the input type from its handler type - input_type = get_type_from_handler_type(spec.handler) - input['type'] = input_type[0] - if input_type[1]: + handler_type = type(spec.handler).__name__ + + if handler_type == "File" or handler_type == "Directory": + input['type'] = "File" + elif handler_type == "Int": + input['type'] = "Number" input['integer'] = True + elif handler_type == "Float": + input['type'] = "Number" + elif handler_type == "Bool": + input['type'] = "Flag" + else: + input['type'] = "String" + + # Deal with range inputs + if handler_type == "Range": + input['type'] = "Number" + if spec.handler.low is not None: + input['minimum'] = spec.handler.low + if spec.handler.high is not None: + input['maximum'] = spec.handler.high + if spec.handler.exclude_low is not None: + input['exclusive-minimum'] = spec.handler.exclude_low + if spec.handler.exclude_high is not None: + input['exclusive-maximum'] = spec.handler.exclude_high + + # Deal with list inputs + if handler_type == "List": + input['list'] = True + trait_type = type(spec.handler.item_trait.trait_type).__name__ + if trait_type == "Int": + input['integer'] = True + input['type'] = "Number" + elif trait_type == "Float": + input['type'] = "Number" + else: + input['type'] = "String" + if spec.handler.minlen is not None: + input['min-list-entries'] = spec.handler.minlen + if spec.handler.maxlen is not None: + input['max-list-entries'] = spec.handler.maxlen - input['list'] = is_list(spec_info) input['value-key'] = "[" + input_name.upper( ) + "]" # assumes that input names are unique @@ -234,7 +270,10 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs, verbose=Fa # Path template creation. - output_value = interface._list_outputs()[name] + try: + output_value = interface._list_outputs()[name] + except TypeError: + output_value = None # If output value is defined, use its basename if output_value != "" and isinstance( @@ -273,7 +312,7 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs, verbose=Fa return output -# TODO remove this once we know get_type_from_handler_type works well +# TODO remove this def get_type_from_spec_info(spec_info): ''' Returns an input type from the spec info. There must be a better @@ -289,22 +328,7 @@ def get_type_from_spec_info(spec_info): return "String" -def get_type_from_handler_type(handler): - ''' - Gets the input type from the spec handler type. - Returns a tuple containing the type and a boolean to specify - if the type is an integer. - ''' - handler_type = type(handler).__name__ - if handler_type == "File" or handler_type == "Directory": - return "File", False - elif handler_type == "Int" or handler_type == "Float": - return "Number", handler_type == "Int" - elif handler_type == "Bool": - return "Flag", False - else: - return "String", False - +# TODO remove this def is_list(spec_info): ''' Returns True if the spec info looks like it describes a list From b04c9ca59ebd3a69476268c797d7d79087cc8cb6 Mon Sep 17 00:00:00 2001 From: rciric Date: Fri, 8 Feb 2019 11:26:38 -0800 Subject: [PATCH 0124/1665] @oesteban: limit np array use, clean up conditionals, remove invalid obj --- nipype/algorithms/confounds.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 0039b3a6e3..d72d25c41d 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -400,7 +400,7 @@ class CompCorInputSpec(BaseInterfaceInputSpec): 'components_file.txt', usedefault=True, desc='Filename to store physiological components') - num_components = traits.Either('all', traits.Int, + num_components = traits.Either('all', traits.Range(low=1), xor=['variance_threshold'], desc='Number of components to return from the decomposition. If ' '`num_components` is `all`, then all components will be ' @@ -1280,7 +1280,7 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, try: u, s, _ = fallback_svd(M, full_matrices=False) except np.linalg.LinAlgError: - if self.inputs.failure_mode == 'error': + if failure_mode == 'error': raise if components_criterion >= 1: u = np.empty((M.shape[0], components_criterion), @@ -1288,26 +1288,28 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, else: continue - variance_explained = np.array([value**2/np.sum(s**2) for value in s]) + variance_explained = [value ** 2 / np.sum(s ** 2) for value in s] cumulative_variance_explained = np.cumsum(variance_explained) + + num_components = int(components_criterion) if 0 < components_criterion < 1: num_components = np.searchsorted(cumulative_variance_explained, components_criterion) + 1 elif components_criterion == -1: num_components = len(s) - else: - num_components = int(components_criterion) + + num_components = int(num_components) + # check whether num_components == 0, break if so if components is None: components = u[:, :num_components] metadata = OrderedDict() - metadata['mask'] = np.array([i] * len(s)) + metadata['mask'] = [i] * len(s) metadata['singular_value'] = s metadata['variance_explained'] = variance_explained metadata['cumulative_variance_explained'] = ( cumulative_variance_explained) - metadata['retained'] = np.array( - [True if i < num_components - else False for i in range(len(s))], dtype='bool') + metadata['retained'] = [ + i < num_components for i in range(len(s))] else: components = np.hstack((components, u[:, :num_components])) metadata['mask'] = np.hstack((metadata['mask'], @@ -1324,8 +1326,8 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, [True if i < num_components else False for i in range(len(s))])) - if components is None and num_components != 0: - if self.inputs.failure_mode == 'error': + if components is None: + if failure_mode == 'error': raise ValueError('No components found') components = np.ones((M.shape[0], num_components), dtype=np.float32) * np.nan From e957e87ddc7b741b46991e761f2f132ac674f86b Mon Sep 17 00:00:00 2001 From: rciric Date: Sat, 9 Feb 2019 02:29:28 -0800 Subject: [PATCH 0125/1665] less np array use; unique names for dropped components --- nipype/algorithms/confounds.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index d72d25c41d..6c6b7cd513 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -652,10 +652,13 @@ def _run_interface(self, runtime): metadata_file = os.path.abspath('component_metadata.tsv') else: metadata_file = save_metadata - components_names = np.array(dtype='object_', - object=['dropped' for i in range(len(metadata['mask']))]) - components_names[np.where(metadata['retained'])] = ( - components_header) + components_names = np.empty(len(metadata['mask']), + dtype='object_') + retained = np.where(metadata['retained']) + not_retained = np.where(np.logical_not(metadata['retained'])) + components_names[retained] = components_header + components_names[not_retained] = ([ + 'dropped{}'.format(i) for i in range(len(not_retained[0]))]) self._results['metadata_file'] = metadata_file with open(metadata_file, 'w') as f: f.write('{}\t{}\t{}\t{}\t{}\n'.format('component', @@ -1288,7 +1291,7 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, else: continue - variance_explained = [value ** 2 / np.sum(s ** 2) for value in s] + variance_explained = (s ** 2) / np.sum(s ** 2) cumulative_variance_explained = np.cumsum(variance_explained) num_components = int(components_criterion) @@ -1299,7 +1302,8 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, num_components = len(s) num_components = int(num_components) - # check whether num_components == 0, break if so + if num_components == 0: + break if components is None: components = u[:, :num_components] metadata = OrderedDict() @@ -1308,12 +1312,10 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, metadata['variance_explained'] = variance_explained metadata['cumulative_variance_explained'] = ( cumulative_variance_explained) - metadata['retained'] = [ - i < num_components for i in range(len(s))] + metadata['retained'] = [i < num_components for i in range(len(s))] else: components = np.hstack((components, u[:, :num_components])) - metadata['mask'] = np.hstack((metadata['mask'], - [i] * len(s))) + metadata['mask'] = metadata['mask'] + [i] * len(s) metadata['singular_value'] = ( np.hstack((metadata['singular_value'], s))) metadata['variance_explained'] = ( @@ -1322,15 +1324,13 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, metadata['cumulative_variance_explained'] = ( np.hstack((metadata['cumulative_variance_explained'], cumulative_variance_explained))) - metadata['retained'] = np.hstack((metadata['retained'], - [True if i < num_components - else False - for i in range(len(s))])) + metadata['retained'] = (metadata['retained'] + + [i < num_components + for i in range(len(s))]) if components is None: if failure_mode == 'error': raise ValueError('No components found') - components = np.ones((M.shape[0], num_components), - dtype=np.float32) * np.nan + components = np.full((M.shape[0], num_components), np.NaN) return components, basis, metadata From 797801e92731b5564b9d673ec2de3585587b6898 Mon Sep 17 00:00:00 2001 From: rciric Date: Sat, 9 Feb 2019 04:07:58 -0800 Subject: [PATCH 0126/1665] ensure absolute path to components file --- nipype/algorithms/confounds.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 6c6b7cd513..6a03d5830c 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -610,7 +610,8 @@ def _run_interface(self, runtime): delimiter='\t', header='\t'.join(components_header), comments='') - self._results['components_file'] = components_file + self._results['components_file'] = os.path.abspath( + self.inputs.components_file) save_pre_filter = self.inputs.save_pre_filter if save_pre_filter: From 67a3276aaaf0ca385c50f4ab23b26f7479deb120 Mon Sep 17 00:00:00 2001 From: rciric Date: Sat, 9 Feb 2019 04:31:28 -0800 Subject: [PATCH 0127/1665] (CompCor) try BaseInterface --- nipype/algorithms/confounds.py | 46 +++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 6a03d5830c..db5e3cb00a 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -24,8 +24,7 @@ from ..external.due import BibTeX from ..interfaces.base import (traits, TraitedSpec, BaseInterface, BaseInterfaceInputSpec, File, isdefined, - InputMultiPath, OutputMultiPath, - SimpleInterface) + InputMultiPath, OutputMultiPath) from ..utils import NUMPY_MMAP from ..utils.misc import normalize_mc_params @@ -463,7 +462,7 @@ class CompCorOutputSpec(TraitedSpec): metadata_file = File(desc='text file containing component metadata') -class CompCor(SimpleInterface): +class CompCor(BaseInterface): """ Interface with core CompCor computation, used in aCompCor and tCompCor @@ -610,17 +609,9 @@ def _run_interface(self, runtime): delimiter='\t', header='\t'.join(components_header), comments='') - self._results['components_file'] = os.path.abspath( - self.inputs.components_file) - save_pre_filter = self.inputs.save_pre_filter - if save_pre_filter: - if isinstance(save_pre_filter, bool): - pre_filter_file = os.path.abspath('pre_filter.tsv') - else: - pre_filter_file = save_pre_filter - self._results['pre_filter_file'] = pre_filter_file - if self.inputs.pre_filter and save_pre_filter: + if self.inputs.pre_filter and self.inputs.save_pre_filter: + pre_filter_file = self._list_outputs()['pre_filter_file'] ftype = { 'polynomial': 'Legendre', 'cosine': 'Cosine' @@ -647,12 +638,8 @@ def _run_interface(self, runtime): header='\t'.join(header), comments='') - save_metadata = self.inputs.save_metadata - if save_metadata: - if isinstance(save_metadata, bool): - metadata_file = os.path.abspath('component_metadata.tsv') - else: - metadata_file = save_metadata + if self.inputs.save_metadata: + metadata_file = self._list_outputs()['metadata_file'] components_names = np.empty(len(metadata['mask']), dtype='object_') retained = np.where(metadata['retained']) @@ -660,7 +647,6 @@ def _run_interface(self, runtime): components_names[retained] = components_header components_names[not_retained] = ([ 'dropped{}'.format(i) for i in range(len(not_retained[0]))]) - self._results['metadata_file'] = metadata_file with open(metadata_file, 'w') as f: f.write('{}\t{}\t{}\t{}\t{}\n'.format('component', *list(metadata.keys()))) @@ -673,6 +659,26 @@ def _run_interface(self, runtime): def _process_masks(self, mask_images, timeseries=None): return mask_images + def _list_outputs(self): + + outputs = self._outputs().get() + outputs['components_file'] = os.path.abspath( + self.inputs.components_file) + + save_pre_filter = self.inputs.save_pre_filter + if save_pre_filter: + if isinstance(save_pre_filter, bool): + save_pre_filter = os.path.abspath('pre_filter.tsv') + outputs['pre_filter_file'] = save_pre_filter + + save_metadata = self.inputs.save_metadata + if save_metadata: + if isinstance(save_metadata, bool): + save_metadata = os.path.abspath('component_metadata.tsv') + outputs['metadata_file'] = save_metadata + + return outputs + def _make_headers(self, num_col): header = self.inputs.header_prefix if \ isdefined(self.inputs.header_prefix) else self._header From fe430f59330ff7b238cc3d5a85bcec412f627b1d Mon Sep 17 00:00:00 2001 From: rciric Date: Sat, 9 Feb 2019 04:45:52 -0800 Subject: [PATCH 0128/1665] ensure absolute path to components file --- nipype/algorithms/confounds.py | 46 +++++++++++++++------------------- 1 file changed, 20 insertions(+), 26 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index db5e3cb00a..6a03d5830c 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -24,7 +24,8 @@ from ..external.due import BibTeX from ..interfaces.base import (traits, TraitedSpec, BaseInterface, BaseInterfaceInputSpec, File, isdefined, - InputMultiPath, OutputMultiPath) + InputMultiPath, OutputMultiPath, + SimpleInterface) from ..utils import NUMPY_MMAP from ..utils.misc import normalize_mc_params @@ -462,7 +463,7 @@ class CompCorOutputSpec(TraitedSpec): metadata_file = File(desc='text file containing component metadata') -class CompCor(BaseInterface): +class CompCor(SimpleInterface): """ Interface with core CompCor computation, used in aCompCor and tCompCor @@ -609,9 +610,17 @@ def _run_interface(self, runtime): delimiter='\t', header='\t'.join(components_header), comments='') + self._results['components_file'] = os.path.abspath( + self.inputs.components_file) - if self.inputs.pre_filter and self.inputs.save_pre_filter: - pre_filter_file = self._list_outputs()['pre_filter_file'] + save_pre_filter = self.inputs.save_pre_filter + if save_pre_filter: + if isinstance(save_pre_filter, bool): + pre_filter_file = os.path.abspath('pre_filter.tsv') + else: + pre_filter_file = save_pre_filter + self._results['pre_filter_file'] = pre_filter_file + if self.inputs.pre_filter and save_pre_filter: ftype = { 'polynomial': 'Legendre', 'cosine': 'Cosine' @@ -638,8 +647,12 @@ def _run_interface(self, runtime): header='\t'.join(header), comments='') - if self.inputs.save_metadata: - metadata_file = self._list_outputs()['metadata_file'] + save_metadata = self.inputs.save_metadata + if save_metadata: + if isinstance(save_metadata, bool): + metadata_file = os.path.abspath('component_metadata.tsv') + else: + metadata_file = save_metadata components_names = np.empty(len(metadata['mask']), dtype='object_') retained = np.where(metadata['retained']) @@ -647,6 +660,7 @@ def _run_interface(self, runtime): components_names[retained] = components_header components_names[not_retained] = ([ 'dropped{}'.format(i) for i in range(len(not_retained[0]))]) + self._results['metadata_file'] = metadata_file with open(metadata_file, 'w') as f: f.write('{}\t{}\t{}\t{}\t{}\n'.format('component', *list(metadata.keys()))) @@ -659,26 +673,6 @@ def _run_interface(self, runtime): def _process_masks(self, mask_images, timeseries=None): return mask_images - def _list_outputs(self): - - outputs = self._outputs().get() - outputs['components_file'] = os.path.abspath( - self.inputs.components_file) - - save_pre_filter = self.inputs.save_pre_filter - if save_pre_filter: - if isinstance(save_pre_filter, bool): - save_pre_filter = os.path.abspath('pre_filter.tsv') - outputs['pre_filter_file'] = save_pre_filter - - save_metadata = self.inputs.save_metadata - if save_metadata: - if isinstance(save_metadata, bool): - save_metadata = os.path.abspath('component_metadata.tsv') - outputs['metadata_file'] = save_metadata - - return outputs - def _make_headers(self, num_col): header = self.inputs.header_prefix if \ isdefined(self.inputs.header_prefix) else self._header From ee4b935b3b823df1b61254393632d9c3f7ce70c9 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Mon, 11 Feb 2019 17:25:54 -0500 Subject: [PATCH 0129/1665] added logic to deal with compound traits, added check to make sure inputs don't disable themselves, made default output path template the output id --- nipype/utils/nipype2boutiques.py | 81 ++++++++++++++------- nipype/utils/tests/test_nipype2boutiques.py | 3 +- 2 files changed, 57 insertions(+), 27 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index f1a4f46017..34b79437fa 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -78,10 +78,17 @@ def generate_boutiques_descriptor( input = get_boutiques_input(inputs, interface, name, spec, ignored_template_inputs, verbose, ignore_template_numbers) - tool_desc['inputs'].append(input) - tool_desc['command-line'] += input['value-key'] + " " - if verbose: - print("-> Adding input " + input['name']) + if isinstance(input, list): + for i in input: + tool_desc['inputs'].append(i) + tool_desc['command-line'] += i['value-key'] + " " + if verbose: + print("-> Adding input " + i['name']) + else: + tool_desc['inputs'].append(input) + tool_desc['command-line'] += input['value-key'] + " " + if verbose: + print("-> Adding input " + input['name']) # Generates tool outputs for name, spec in sorted(outputs.traits(transient=None).items()): @@ -116,7 +123,7 @@ def generate_boutiques_descriptor( def get_boutiques_input(inputs, interface, input_name, spec, ignored_template_inputs, verbose, - ignore_template_numbers): + ignore_template_numbers, handler=None, input_number=None): """ Returns a dictionary containing the Boutiques input corresponding to a Nipype intput. @@ -134,11 +141,32 @@ def get_boutiques_input(inputs, interface, input_name, spec, spec_info = spec.full_info(inputs, input_name, None) input = {} - input['id'] = input_name + + if input_number is not None: + input['id'] = input_name + "_" + str(input_number + 1) + else: + input['id'] = input_name + input['name'] = input_name.replace('_', ' ').capitalize() + if handler is None: + trait_handler = spec.handler + else: + trait_handler = handler + # Figure out the input type from its handler type - handler_type = type(spec.handler).__name__ + handler_type = type(trait_handler).__name__ + + # Deal with compound traits + # TODO create a mutually exclusive group for members of compound traits + if handler_type == "TraitCompound": + input_list = [] + # Recursively create an input for each trait + for i in range(0, len(trait_handler.handlers)): + input_list.append(get_boutiques_input(inputs, interface, input_name, spec, + ignored_template_inputs, verbose, + ignore_template_numbers, trait_handler.handlers[i], i)) + return input_list if handler_type == "File" or handler_type == "Directory": input['type'] = "File" @@ -155,19 +183,19 @@ def get_boutiques_input(inputs, interface, input_name, spec, # Deal with range inputs if handler_type == "Range": input['type'] = "Number" - if spec.handler.low is not None: - input['minimum'] = spec.handler.low - if spec.handler.high is not None: - input['maximum'] = spec.handler.high - if spec.handler.exclude_low is not None: - input['exclusive-minimum'] = spec.handler.exclude_low - if spec.handler.exclude_high is not None: - input['exclusive-maximum'] = spec.handler.exclude_high + if trait_handler.low is not None: + input['minimum'] = trait_handler.low + if trait_handler.high is not None: + input['maximum'] = trait_handler.high + if trait_handler.exclude_low is not None: + input['exclusive-minimum'] = trait_handler.exclude_low + if trait_handler.exclude_high is not None: + input['exclusive-maximum'] = trait_handler.exclude_high # Deal with list inputs if handler_type == "List": input['list'] = True - trait_type = type(spec.handler.item_trait.trait_type).__name__ + trait_type = type(trait_handler.item_trait.trait_type).__name__ if trait_type == "Int": input['integer'] = True input['type'] = "Number" @@ -175,10 +203,10 @@ def get_boutiques_input(inputs, interface, input_name, spec, input['type'] = "Number" else: input['type'] = "String" - if spec.handler.minlen is not None: - input['min-list-entries'] = spec.handler.minlen - if spec.handler.maxlen is not None: - input['max-list-entries'] = spec.handler.maxlen + if trait_handler.minlen is not None: + input['min-list-entries'] = trait_handler.minlen + if trait_handler.maxlen is not None: + input['max-list-entries'] = trait_handler.maxlen input['value-key'] = "[" + input_name.upper( ) + "]" # assumes that input names are unique @@ -200,7 +228,7 @@ def get_boutiques_input(inputs, interface, input_name, spec, input['default-value'] = spec.default_value()[1] try: - value_choices = spec.handler.values + value_choices = trait_handler.values except AttributeError: pass else: @@ -211,7 +239,10 @@ def get_boutiques_input(inputs, interface, input_name, spec, input['requires-inputs'] = spec.requires if spec.xor is not None: - input['disables-inputs'] = spec.xor + input['disables-inputs'] = list(spec.xor) + # Make sure input does not disable itself + if input['id'] in input['disables-inputs']: + input['disables-inputs'].remove(input['id']) # Create unique, temporary value. temp_value = must_generate_value(input_name, input['type'], @@ -304,11 +335,9 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs, verbose=Fa output['path-template'] = input['value-key'] found = True break - # If no input with the same name was found, warn the user they should provide it manually + # If no input with the same name was found, use the output ID if not found: - print("WARNING: Could not determine path template for output %s. Please provide one for the " - "descriptor manually." % name) - output['path-template'] = "WARNING: No path template provided." + output['path-template'] = output['id'] return output diff --git a/nipype/utils/tests/test_nipype2boutiques.py b/nipype/utils/tests/test_nipype2boutiques.py index 3aecefda63..ac12c527f7 100644 --- a/nipype/utils/tests/test_nipype2boutiques.py +++ b/nipype/utils/tests/test_nipype2boutiques.py @@ -15,4 +15,5 @@ def test_generate(): container_index=None, container_type=None, verbose=False, - ignore_template_numbers=False) + ignore_template_numbers=False, + save=False) From ad36016e581703ea23d79122a5bfb5a7acb6c54c Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Mon, 11 Feb 2019 22:49:52 -0500 Subject: [PATCH 0130/1665] put compound inputs into mutex group --- nipype/utils/nipype2boutiques.py | 44 ++++++++++---------------------- 1 file changed, 13 insertions(+), 31 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 34b79437fa..2e29be8aba 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -78,12 +78,20 @@ def generate_boutiques_descriptor( input = get_boutiques_input(inputs, interface, name, spec, ignored_template_inputs, verbose, ignore_template_numbers) + # Handle compound inputs (inputs that can be of multiple types and are mutually exclusive) if isinstance(input, list): + mutex_group_members = [] for i in input: tool_desc['inputs'].append(i) tool_desc['command-line'] += i['value-key'] + " " + mutex_group_members.append(i['id']) if verbose: print("-> Adding input " + i['name']) + # Put inputs into a mutually exclusive group + tool_desc['groups'] = [{'id': input[0]['id'] + "_group", + 'name': input[0]['name'], + 'members': mutex_group_members, + 'mutually-exclusive': True}] else: tool_desc['inputs'].append(input) tool_desc['command-line'] += input['value-key'] + " " @@ -123,7 +131,8 @@ def generate_boutiques_descriptor( def get_boutiques_input(inputs, interface, input_name, spec, ignored_template_inputs, verbose, - ignore_template_numbers, handler=None, input_number=None): + ignore_template_numbers, handler=None, + input_number=None): """ Returns a dictionary containing the Boutiques input corresponding to a Nipype intput. @@ -134,6 +143,8 @@ def get_boutiques_input(inputs, interface, input_name, spec, * spec: Nipype input spec. * ignored_template_inputs: input names for which no temporary value must be generated. * ignore_template_numbers: True if numbers must be ignored in output path creations. + * handler: used when handling compound inputs, which don't have their own input spec + * input_number: used when handling compound inputs to assign each a unique ID Assumes that: * Input names are unique. @@ -142,7 +153,7 @@ def get_boutiques_input(inputs, interface, input_name, spec, input = {} - if input_number is not None: + if input_number is not None and input_number != 0: # No need to append a number to the first of a list of compound inputs input['id'] = input_name + "_" + str(input_number + 1) else: input['id'] = input_name @@ -158,7 +169,6 @@ def get_boutiques_input(inputs, interface, input_name, spec, handler_type = type(trait_handler).__name__ # Deal with compound traits - # TODO create a mutually exclusive group for members of compound traits if handler_type == "TraitCompound": input_list = [] # Recursively create an input for each trait @@ -341,34 +351,6 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs, verbose=Fa return output -# TODO remove this -def get_type_from_spec_info(spec_info): - ''' - Returns an input type from the spec info. There must be a better - way to get an input type in Nipype than to parse the spec info. - ''' - if ("an existing file name" in spec_info) or ( - "input volumes" in spec_info): - return "File" - elif ("an integer" in spec_info or "a float" in spec_info): - return "Number" - elif "a boolean" in spec_info: - return "Boolean" - return "String" - - -# TODO remove this -def is_list(spec_info): - ''' - Returns True if the spec info looks like it describes a list - parameter. There must be a better way in Nipype to check if an input - is a list. - ''' - if "a list" in spec_info: - return True - return False - - def get_unique_value(type, id): ''' Returns a unique value of type 'type', for input with id 'id', From bb260f8459a3e8cb016c635f8cea9b7c3e20c4af Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Tue, 12 Feb 2019 15:04:58 -0500 Subject: [PATCH 0131/1665] added method to get all the mutex and all-or-none groups from the input specs --- nipype/utils/nipype2boutiques.py | 54 ++++++++++++++++++++++++-------- 1 file changed, 41 insertions(+), 13 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 2e29be8aba..1f46b24885 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -64,6 +64,7 @@ def generate_boutiques_descriptor( 'description'] = interface_name + ", as implemented in Nipype (module: " + module_name + ", interface: " + interface_name + ")." tool_desc['inputs'] = [] tool_desc['output-files'] = [] + tool_desc['groups'] = [] tool_desc['tool-version'] = interface.version if interface.version is not None else "No version provided." tool_desc['schema-version'] = '0.5' if container_image: @@ -88,16 +89,21 @@ def generate_boutiques_descriptor( if verbose: print("-> Adding input " + i['name']) # Put inputs into a mutually exclusive group - tool_desc['groups'] = [{'id': input[0]['id'] + "_group", - 'name': input[0]['name'], - 'members': mutex_group_members, - 'mutually-exclusive': True}] + tool_desc['groups'].append({'id': input[0]['id'] + "_group", + 'name': input[0]['name'] + " group", + 'members': mutex_group_members, + 'mutually-exclusive': True}) else: tool_desc['inputs'].append(input) tool_desc['command-line'] += input['value-key'] + " " if verbose: print("-> Adding input " + input['name']) + # Generates input groups + tool_desc['groups'] += get_boutiques_groups(interface.inputs.traits(transient=None).items()) + if len(tool_desc['groups']) == 0: + del tool_desc['groups'] + # Generates tool outputs for name, spec in sorted(outputs.traits(transient=None).items()): output = get_boutiques_output(outputs, name, spec, interface, tool_desc['inputs'], @@ -245,15 +251,6 @@ def get_boutiques_input(inputs, interface, input_name, spec, if value_choices is not None: input['value-choices'] = value_choices - if spec.requires is not None: - input['requires-inputs'] = spec.requires - - if spec.xor is not None: - input['disables-inputs'] = list(spec.xor) - # Make sure input does not disable itself - if input['id'] in input['disables-inputs']: - input['disables-inputs'].remove(input['id']) - # Create unique, temporary value. temp_value = must_generate_value(input_name, input['type'], ignored_template_inputs, spec_info, spec, @@ -351,6 +348,37 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs, verbose=Fa return output +def get_boutiques_groups(input_traits): + desc_groups = [] + all_or_none_input_sets = [] + mutex_input_sets = [] + + # Get all the groups + for name, spec in input_traits: + if spec.requires is not None: + group_members = set([name] + list(spec.requires)) + if group_members not in all_or_none_input_sets: + all_or_none_input_sets.append(group_members) + if spec.xor is not None: + group_members = set([name] + list(spec.xor)) + if group_members not in mutex_input_sets: + mutex_input_sets.append(group_members) + + # Create a dictionary for each one + for i in range(0, len(all_or_none_input_sets)): + desc_groups.append({'id': "all_or_none_group" + ("_" + str(i + 1) if i != 0 else ""), + 'name': "All or none group" + (" " + str(i + 1) if i != 0 else ""), + 'members': list(all_or_none_input_sets[i]), + 'all-or-none': True}) + + for i in range(0, len(mutex_input_sets)): + desc_groups.append({'id': "mutex_group" + ("_" + str(i + 1) if i != 0 else ""), + 'name': "Mutex group" + (" " + str(i + 1) if i != 0 else ""), + 'members': list(mutex_input_sets[i]), + 'mutually-exclusive': True}) + + return desc_groups + def get_unique_value(type, id): ''' Returns a unique value of type 'type', for input with id 'id', From 078d722f96b5b09b62cefa4c32673d7485991bdf Mon Sep 17 00:00:00 2001 From: Gio at UMCU Date: Thu, 14 Feb 2019 14:46:02 +0100 Subject: [PATCH 0132/1665] import math and csv modules for bids_gen_info --- nipype/algorithms/modelgen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index 9a0984c885..b57e6bfcd5 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -16,7 +16,7 @@ from builtins import range, str, bytes, int from copy import deepcopy -import os +import os, math, csv from nibabel import load import numpy as np From 56b222751df496fca1f7dbdec20501f480a250d7 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 14 Feb 2019 10:31:06 -0500 Subject: [PATCH 0133/1665] FIX: Requires error text was backwards --- nipype/interfaces/base/core.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index ae002cf17f..77ab6cf398 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -202,10 +202,16 @@ def _check_requires(self, spec, name, value): for field in spec.requires ] if any(values) and isdefined(value): - msg = ("%s requires a value for input '%s' because one of %s " - "is set. For a list of required inputs, see %s.help()" % - (self.__class__.__name__, name, - ', '.join(spec.requires), self.__class__.__name__)) + if len(values) > 1: + fmt = ("%s requires values for inputs %s because '%s' is set. " + "For a list of required inputs, see %s.help()") + else: + fmt = ("%s requires a value for input %s because '%s' is set. " + "For a list of required inputs, see %s.help()") + msg = fmt % (self.__class__.__name__, + ', '.join("'%s'" % req for req in spec.requires), + name, + self.__class__.__name__) raise ValueError(msg) def _check_xor(self, spec, name, value): From b706af826015d3a6039dcb1007c660300dcbcc8e Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 14 Feb 2019 10:31:35 -0500 Subject: [PATCH 0134/1665] TEST: Thorough test of LaplacianThickness requirement cascade --- .../interfaces/ants/tests/test_segmentation.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/ants/tests/test_segmentation.py b/nipype/interfaces/ants/tests/test_segmentation.py index d595317713..e9a1443934 100644 --- a/nipype/interfaces/ants/tests/test_segmentation.py +++ b/nipype/interfaces/ants/tests/test_segmentation.py @@ -42,8 +42,18 @@ def test_LaplacianThickness_defaults(change_dir, create_lt): def test_LaplacianThickness_wrongargs(change_dir, create_lt): lt = create_lt + lt.inputs.tolerance = 0.001 + with pytest.raises(ValueError, match=r".* requires a value for input 'sulcus_prior' .*"): + lt.cmdline + lt.inputs.sulcus_prior = 0.15 + with pytest.raises(ValueError, match=r".* requires a value for input 'dT' .*"): + lt.cmdline + lt.inputs.dT = 0.01 + with pytest.raises(ValueError, match=r".* requires a value for input 'prior_thickness' .*"): + lt.cmdline lt.inputs.prior_thickness = 5.9 - # 500 must not be placed as smooth_param - assert lt.cmdline != 'LaplacianThickness functional.nii diffusion_weighted.nii functional_thickness.nii 5.9' - # probably should have just raised an exception that "smooth_param" - # should also be defined \ No newline at end of file + with pytest.raises(ValueError, match=r".* requires a value for input 'smooth_param' .*"): + lt.cmdline + lt.inputs.smooth_param = 4.5 + assert lt.cmdline == 'LaplacianThickness functional.nii diffusion_weighted.nii ' \ + 'functional_thickness.nii 4.5 5.9 0.01 0.15 0.001' From 1625bdbf66b1ae20565e6261334c6b57630c7785 Mon Sep 17 00:00:00 2001 From: rciric Date: Fri, 15 Feb 2019 10:58:46 -0800 Subject: [PATCH 0135/1665] update per @oesteban 's review --- nipype/algorithms/confounds.py | 101 +++++++++++++++------------------ 1 file changed, 46 insertions(+), 55 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 6a03d5830c..b8d344ab03 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -8,13 +8,9 @@ absolute_import) from builtins import range -# Py2 compat: http://python-future.org/compatible_idioms.html#collections-counter-and-ordereddict -from future import standard_library -standard_library.install_aliases() -from collections import OrderedDict - import os import os.path as op +from collections import OrderedDict import nibabel as nb import numpy as np @@ -615,44 +611,41 @@ def _run_interface(self, runtime): save_pre_filter = self.inputs.save_pre_filter if save_pre_filter: - if isinstance(save_pre_filter, bool): + self._results['pre_filter_file'] = save_pre_filter + if save_pre_filter is True: pre_filter_file = os.path.abspath('pre_filter.tsv') - else: - pre_filter_file = save_pre_filter - self._results['pre_filter_file'] = pre_filter_file - if self.inputs.pre_filter and save_pre_filter: - ftype = { - 'polynomial': 'Legendre', - 'cosine': 'Cosine' - }[self.inputs.pre_filter] - ncols = filter_basis.shape[1] if filter_basis.size > 0 else 0 - header = ['{}{:02d}'.format(ftype, i) for i in range(ncols)] - if skip_vols: - old_basis = filter_basis - # nrows defined above - filter_basis = np.zeros( - (nrows, ncols + skip_vols), dtype=filter_basis.dtype) - if old_basis.size > 0: - filter_basis[skip_vols:, :ncols] = old_basis - filter_basis[:skip_vols, -skip_vols:] = np.eye(skip_vols) - header.extend([ - 'NonSteadyStateOutlier{:02d}'.format(i) - for i in range(skip_vols) - ]) - np.savetxt( - pre_filter_file, - filter_basis, - fmt=b'%.10f', - delimiter='\t', - header='\t'.join(header), - comments='') - - save_metadata = self.inputs.save_metadata - if save_metadata: - if isinstance(save_metadata, bool): + if self.inputs.pre_filter: + ftype = { + 'polynomial': 'Legendre', + 'cosine': 'Cosine' + }[self.inputs.pre_filter] + ncols = filter_basis.shape[1] if filter_basis.size > 0 else 0 + header = ['{}{:02d}'.format(ftype, i) for i in range(ncols)] + if skip_vols: + old_basis = filter_basis + # nrows defined above + filter_basis = np.zeros( + (nrows, ncols + skip_vols), dtype=filter_basis.dtype) + if old_basis.size > 0: + filter_basis[skip_vols:, :ncols] = old_basis + filter_basis[:skip_vols, -skip_vols:] = np.eye(skip_vols) + header.extend([ + 'NonSteadyStateOutlier{:02d}'.format(i) + for i in range(skip_vols) + ]) + np.savetxt( + self._results['pre_filter_file'], + filter_basis, + fmt=b'%.10f', + delimiter='\t', + header='\t'.join(header), + comments='') + + metadata_file = self.inputs.save_metadata + if metadata_file: + self._results['metadata_file'] = metadata_file + if metadata_file is True: metadata_file = os.path.abspath('component_metadata.tsv') - else: - metadata_file = save_metadata components_names = np.empty(len(metadata['mask']), dtype='object_') retained = np.where(metadata['retained']) @@ -660,7 +653,6 @@ def _run_interface(self, runtime): components_names[retained] = components_header components_names[not_retained] = ([ 'dropped{}'.format(i) for i in range(len(not_retained[0]))]) - self._results['metadata_file'] = metadata_file with open(metadata_file, 'w') as f: f.write('{}\t{}\t{}\t{}\t{}\n'.format('component', *list(metadata.keys()))) @@ -1200,7 +1192,7 @@ def combine_mask_files(mask_files, mask_method=None, mask_index=None): def compute_noise_components(imgseries, mask_images, components_criterion=0.5, filter_type=False, degree=0, period_cut=128, repetition_time=None, failure_mode='error', - mask_names=''): + mask_names=None): """Compute the noise components from the imgseries for each mask Parameters @@ -1245,9 +1237,8 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, basis = np.array([]) if components_criterion == 'all': components_criterion = -1 - if not mask_names: - mask_names = range(len(mask_images)) - for i, img in zip(mask_names, mask_images): + mask_names = mask_names or range(len(mask_images)) + for name, img in zip(mask_names, mask_images): mask = img.get_data().astype(np.bool).squeeze() if imgseries.shape[:3] != mask.shape: raise ValueError( @@ -1267,20 +1258,20 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, voxel_timecourses, repetition_time, period_cut) elif filter_type in ('polynomial', False): # from paper: - # "The constant and linear trends of the columns in the matrix M - # were removed [prior to ...]" + # "The constant and linear trends of the columns in the matrix M were + # removed [prior to ...]" voxel_timecourses, basis = regress_poly(degree, voxel_timecourses) - # "Voxel time series from the noise ROI (either anatomical or tSTD) - # were placed in a matrix M of size Nxm, with time along the row - # dimension and voxels along the column dimension." + # "Voxel time series from the noise ROI (either anatomical or tSTD) were + # placed in a matrix M of size Nxm, with time along the row dimension + # and voxels along the column dimension." M = voxel_timecourses.T # "[... were removed] prior to column-wise variance normalization." M = M / _compute_tSTD(M, 1.) - # "The covariance matrix C = MMT was constructed and decomposed into - # its principal components using a singular value decomposition." + # "The covariance matrix C = MMT was constructed and decomposed into its + # principal components using a singular value decomposition." try: u, s, _ = fallback_svd(M, full_matrices=False) except np.linalg.LinAlgError: @@ -1308,7 +1299,7 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, if components is None: components = u[:, :num_components] metadata = OrderedDict() - metadata['mask'] = [i] * len(s) + metadata['mask'] = [name] * len(s) metadata['singular_value'] = s metadata['variance_explained'] = variance_explained metadata['cumulative_variance_explained'] = ( @@ -1316,7 +1307,7 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, metadata['retained'] = [i < num_components for i in range(len(s))] else: components = np.hstack((components, u[:, :num_components])) - metadata['mask'] = metadata['mask'] + [i] * len(s) + metadata['mask'] = metadata['mask'] + [name] * len(s) metadata['singular_value'] = ( np.hstack((metadata['singular_value'], s))) metadata['variance_explained'] = ( From 2ff466e68d1ae01a5f88dc8c9b75e8ca8ed95399 Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Fri, 15 Feb 2019 14:25:28 -0500 Subject: [PATCH 0136/1665] BF: regenerated test_auto_LaplacianThickness using wonderfully long running tools/checkspecs.py --- .../ants/tests/test_auto_LaplacianThickness.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py index 1bb82f0e33..608ba10889 100644 --- a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py @@ -7,8 +7,9 @@ def test_LaplacianThickness_inputs(): input_map = dict( args=dict(argstr='%s', ), dT=dict( - argstr='%f', + argstr='%s', position=6, + requires=['prior_thickness'], ), environ=dict( nohash=True, @@ -39,20 +40,23 @@ def test_LaplacianThickness_inputs(): position=3, ), prior_thickness=dict( - argstr='%f', + argstr='%s', position=5, + requires=['smooth_param'], ), smooth_param=dict( - argstr='%f', + argstr='%s', position=4, ), sulcus_prior=dict( - argstr='%f', + argstr='%s', position=7, + requires=['dT'], ), tolerance=dict( - argstr='%f', + argstr='%s', position=8, + requires=['sulcus_prior'], ), ) inputs = LaplacianThickness.input_spec() From 9afb3f5d92afe26676c70c28c049fda701ec1397 Mon Sep 17 00:00:00 2001 From: rciric Date: Fri, 15 Feb 2019 11:35:01 -0800 Subject: [PATCH 0137/1665] assign output to _results --- nipype/algorithms/confounds.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index b8d344ab03..b02a7b1201 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -645,7 +645,8 @@ def _run_interface(self, runtime): if metadata_file: self._results['metadata_file'] = metadata_file if metadata_file is True: - metadata_file = os.path.abspath('component_metadata.tsv') + self._results['metadata_file'] = ( + os.path.abspath('component_metadata.tsv')) components_names = np.empty(len(metadata['mask']), dtype='object_') retained = np.where(metadata['retained']) From 689d064bf42b3660e49175ab1b79adc028edbdf8 Mon Sep 17 00:00:00 2001 From: rciric Date: Fri, 15 Feb 2019 11:43:44 -0800 Subject: [PATCH 0138/1665] assign output to _results --- nipype/algorithms/confounds.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index b02a7b1201..d7f38176cb 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -613,7 +613,7 @@ def _run_interface(self, runtime): if save_pre_filter: self._results['pre_filter_file'] = save_pre_filter if save_pre_filter is True: - pre_filter_file = os.path.abspath('pre_filter.tsv') + self._results['pre_filter_file'] = os.path.abspath('pre_filter.tsv') if self.inputs.pre_filter: ftype = { 'polynomial': 'Legendre', @@ -654,7 +654,7 @@ def _run_interface(self, runtime): components_names[retained] = components_header components_names[not_retained] = ([ 'dropped{}'.format(i) for i in range(len(not_retained[0]))]) - with open(metadata_file, 'w') as f: + with open(self._results['metadata_file'], 'w') as f: f.write('{}\t{}\t{}\t{}\t{}\n'.format('component', *list(metadata.keys()))) for i in zip(components_names, *metadata.values()): From f390bc64ca0db0a2a320f1f97a4e9a62b2202f3e Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 15 Feb 2019 17:51:22 -0800 Subject: [PATCH 0139/1665] some fixes --- nipype/algorithms/confounds.py | 126 +++++++++++++++++---------------- 1 file changed, 66 insertions(+), 60 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index d7f38176cb..871a512b34 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -388,7 +388,8 @@ class CompCorInputSpec(BaseInterfaceInputSpec): requires=['mask_files'], desc=('Position of mask in `mask_files` to use - ' 'first is the default.')) - mask_names = traits.List(traits.Str, + mask_names = traits.List( + traits.Str, desc='Names for provided masks (for printing into metadata). ' 'If provided, it must be as long as the final mask list ' '(after any merge and indexing operations).') @@ -396,16 +397,17 @@ class CompCorInputSpec(BaseInterfaceInputSpec): 'components_file.txt', usedefault=True, desc='Filename to store physiological components') - num_components = traits.Either('all', traits.Range(low=1), - xor=['variance_threshold'], + num_components = traits.Either( + 'all', traits.Range(low=1), xor=['variance_threshold'], desc='Number of components to return from the decomposition. If ' '`num_components` is `all`, then all components will be ' 'retained.') - # 6 for BOLD, 4 for ASL - # automatically instantiated to 6 in CompCor below if neither - # `num_components` nor `variance_threshold` is defined (for - # backward compatibility) - variance_threshold = traits.Float(xor=['num_components'], + # 6 for BOLD, 4 for ASL + # automatically instantiated to 6 in CompCor below if neither + # `num_components` nor `variance_threshold` is defined (for + # backward compatibility) + variance_threshold = traits.Range( + low=0.0, high=1.0, exclude_low=True, exclude_high=True, xor=['num_components'], desc='Select the number of components to be returned automatically ' 'based on their ability to explain variance in the dataset. ' '`variance_threshold` is a fractional value between 0 and 1; ' @@ -438,9 +440,11 @@ class CompCorInputSpec(BaseInterfaceInputSpec): desc='Repetition time (TR) of series - derived from image header if ' 'unspecified') save_pre_filter = traits.Either( - traits.Bool, File, desc='Save pre-filter basis as text file') + traits.Bool, File, default=False, usedefault=True, + desc='Save pre-filter basis as text file') save_metadata = traits.Either( - traits.Bool, File, desc='Save component metadata as text file') + traits.Bool, File, default=False, usedefault=True, + desc='Save component metadata as text file') ignore_initial_volumes = traits.Range( low=0, usedefault=True, @@ -497,20 +501,20 @@ class CompCor(SimpleInterface): input_spec = CompCorInputSpec output_spec = CompCorOutputSpec references_ = [{ + 'tags': ['method', 'implementation'], 'entry': - BibTeX( - "@article{compcor_2007," - "title = {A component based noise correction method (CompCor) for BOLD and perfusion based}," - "volume = {37}," - "number = {1}," - "doi = {10.1016/j.neuroimage.2007.04.042}," - "urldate = {2016-08-13}," - "journal = {NeuroImage}," - "author = {Behzadi, Yashar and Restom, Khaled and Liau, Joy and Liu, Thomas T.}," - "year = {2007}," - "pages = {90-101},}"), - 'tags': ['method', 'implementation'] - }] + BibTeX("""\ +@article{compcor_2007, + title = {A component based noise correction method (CompCor) for BOLD and perfusion based}, + volume = {37}, + number = {1}, + doi = {10.1016/j.neuroimage.2007.04.042}, + urldate = {2016-08-13}, + journal = {NeuroImage}, + author = {Behzadi, Yashar and Restom, Khaled and Liau, Joy and Liu, Thomas T.}, + year = {2007}, + pages = {90-101} +}""")}] def __init__(self, *args, **kwargs): ''' exactly the same as compcor except the header ''' @@ -606,57 +610,60 @@ def _run_interface(self, runtime): delimiter='\t', header='\t'.join(components_header), comments='') - self._results['components_file'] = os.path.abspath( - self.inputs.components_file) + self._results['components_file'] = os.path.join( + runtime.cwd, self.inputs.components_file) + + save_pre_filter = False + if self.inputs.pre_filter in ['polynomial', 'cosine']: + save_pre_filter = self.inputs.save_pre_filter - save_pre_filter = self.inputs.save_pre_filter if save_pre_filter: self._results['pre_filter_file'] = save_pre_filter if save_pre_filter is True: - self._results['pre_filter_file'] = os.path.abspath('pre_filter.tsv') - if self.inputs.pre_filter: - ftype = { - 'polynomial': 'Legendre', - 'cosine': 'Cosine' - }[self.inputs.pre_filter] - ncols = filter_basis.shape[1] if filter_basis.size > 0 else 0 - header = ['{}{:02d}'.format(ftype, i) for i in range(ncols)] - if skip_vols: - old_basis = filter_basis - # nrows defined above - filter_basis = np.zeros( - (nrows, ncols + skip_vols), dtype=filter_basis.dtype) - if old_basis.size > 0: - filter_basis[skip_vols:, :ncols] = old_basis - filter_basis[:skip_vols, -skip_vols:] = np.eye(skip_vols) - header.extend([ - 'NonSteadyStateOutlier{:02d}'.format(i) - for i in range(skip_vols) - ]) - np.savetxt( - self._results['pre_filter_file'], - filter_basis, - fmt=b'%.10f', - delimiter='\t', - header='\t'.join(header), - comments='') + self._results['pre_filter_file'] = os.path.join( + runtime.cwd, 'pre_filter.tsv') + + ftype = { + 'polynomial': 'Legendre', + 'cosine': 'Cosine' + }[self.inputs.pre_filter] + ncols = filter_basis.shape[1] if filter_basis.size > 0 else 0 + header = ['{}{:02d}'.format(ftype, i) for i in range(ncols)] + if skip_vols: + old_basis = filter_basis + # nrows defined above + filter_basis = np.zeros( + (nrows, ncols + skip_vols), dtype=filter_basis.dtype) + if old_basis.size > 0: + filter_basis[skip_vols:, :ncols] = old_basis + filter_basis[:skip_vols, -skip_vols:] = np.eye(skip_vols) + header.extend([ + 'NonSteadyStateOutlier{:02d}'.format(i) + for i in range(skip_vols) + ]) + np.savetxt( + self._results['pre_filter_file'], + filter_basis, + fmt=b'%.10f', + delimiter='\t', + header='\t'.join(header), + comments='') metadata_file = self.inputs.save_metadata if metadata_file: self._results['metadata_file'] = metadata_file if metadata_file is True: self._results['metadata_file'] = ( - os.path.abspath('component_metadata.tsv')) + os.path.join(runtime.cwd, 'component_metadata.tsv')) components_names = np.empty(len(metadata['mask']), - dtype='object_') + dtype='object_') retained = np.where(metadata['retained']) not_retained = np.where(np.logical_not(metadata['retained'])) components_names[retained] = components_header components_names[not_retained] = ([ 'dropped{}'.format(i) for i in range(len(not_retained[0]))]) with open(self._results['metadata_file'], 'w') as f: - f.write('{}\t{}\t{}\t{}\t{}\n'.format('component', - *list(metadata.keys()))) + f.write('\t'.join(['component'] + list(metadata.keys())) + '\n') for i in zip(components_names, *metadata.values()): f.write('{0[0]}\t{0[1]}\t{0[2]:.10f}\t' '{0[3]:.10f}\t{0[4]:.10f}\n'.format(i)) @@ -1317,9 +1324,8 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, metadata['cumulative_variance_explained'] = ( np.hstack((metadata['cumulative_variance_explained'], cumulative_variance_explained))) - metadata['retained'] = (metadata['retained'] - + [i < num_components - for i in range(len(s))]) + metadata['retained'] = ( + metadata['retained'] + [i < num_components for i in range(len(s))]) if components is None: if failure_mode == 'error': raise ValueError('No components found') From ad3d4401b35086549c954d341c4050c630c5e79d Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 15 Feb 2019 18:19:19 -0800 Subject: [PATCH 0140/1665] testing pickling of variance_threshold --- nipype/workflows/rsfmri/fsl/resting.py | 1 - .../rsfmri/fsl/tests/test_resting.py | 24 ++++++++++--------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/nipype/workflows/rsfmri/fsl/resting.py b/nipype/workflows/rsfmri/fsl/resting.py index 12d44a83cf..176a0ed6f7 100644 --- a/nipype/workflows/rsfmri/fsl/resting.py +++ b/nipype/workflows/rsfmri/fsl/resting.py @@ -3,7 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import (print_function, division, unicode_literals, absolute_import) -from builtins import str from ....interfaces import fsl as fsl # fsl from ....interfaces import utility as util # utility diff --git a/nipype/workflows/rsfmri/fsl/tests/test_resting.py b/nipype/workflows/rsfmri/fsl/tests/test_resting.py index 799041df37..2179a8b93e 100644 --- a/nipype/workflows/rsfmri/fsl/tests/test_resting.py +++ b/nipype/workflows/rsfmri/fsl/tests/test_resting.py @@ -78,7 +78,8 @@ def setup_class(self, tmpdir): def test_create_resting_preproc(self, mock_node, mock_realign_wf): wflow = create_resting_preproc(base_dir=os.getcwd()) - wflow.inputs.inputspec.num_noise_components = self.num_noise_components + # wflow.inputs.inputspec.num_noise_components = self.num_noise_components + wflow.inputs.compcor.variance_threshold = 0.15 mask_in = wflow.get_node('threshold').inputs mask_in.out_file = self.in_filenames['mask_file'] func_in = wflow.get_node('slicetimer').inputs @@ -89,16 +90,17 @@ def test_create_resting_preproc(self, mock_node, mock_realign_wf): # assert expected_file = os.path.abspath(self.out_filenames['components_file']) with open(expected_file, 'r') as components_file: - components_data = [line.split() for line in components_file] - num_got_components = len(components_data) - assert (num_got_components == self.num_noise_components - or num_got_components == self.fake_data.shape[3]) - first_two = [row[:2] for row in components_data[1:]] - assert first_two == [['-0.5172356654', '-0.6973053243'], [ - '0.2574722644', '0.1645270737' - ], ['-0.0806469590', - '0.5156853779'], ['0.7187176051', '-0.3235820287'], - ['-0.3783072450', '0.3406749013']] + components_data = [line.split() + for line in components_file.read().splitlines()] + num_got_components = len(components_data) + assert (num_got_components == self.num_noise_components or + num_got_components == self.fake_data.shape[3]) + first_two = [row[:2] for row in components_data[1:]] + assert first_two == [['-0.5172356654', '-0.6973053243'], + ['0.2574722644', '0.1645270737'], + ['-0.0806469590', '0.5156853779'], + ['0.7187176051', '-0.3235820287'], + ['-0.3783072450', '0.3406749013']] fake_data = np.array([[[[2, 4, 3, 9, 1], [3, 6, 4, 7, 4]], [[8, 3, 4, 6, 2], [4, 0, 4, 4, 2]]], From fd41b74a5432c241c47914ce4bdf22fb20feceaf Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 15 Feb 2019 18:35:33 -0800 Subject: [PATCH 0141/1665] ``traits.Range`` cannot be pickled with traits>=5 and python 2.7 --- nipype/info.py | 3 ++- nipype/workflows/rsfmri/fsl/tests/test_resting.py | 3 +-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nipype/info.py b/nipype/info.py index 7b1a757789..c6ead490b5 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -142,7 +142,8 @@ def get_nipype_gitversion(): 'numpy>=%s ; python_version >= "3.7"' % NUMPY_MIN_VERSION_37, 'python-dateutil>=%s' % DATEUTIL_MIN_VERSION, 'scipy>=%s' % SCIPY_MIN_VERSION, - 'traits>=%s' % TRAITS_MIN_VERSION, + 'traits>=%s,<%s ; python_version == "2.7"' % (TRAITS_MIN_VERSION, '5.0.0'), + 'traits>=%s ; python_version >= "3.0"' % TRAITS_MIN_VERSION, 'future>=%s' % FUTURE_MIN_VERSION, 'simplejson>=%s' % SIMPLEJSON_MIN_VERSION, 'prov>=%s' % PROV_VERSION, diff --git a/nipype/workflows/rsfmri/fsl/tests/test_resting.py b/nipype/workflows/rsfmri/fsl/tests/test_resting.py index 2179a8b93e..a0ba2439a7 100644 --- a/nipype/workflows/rsfmri/fsl/tests/test_resting.py +++ b/nipype/workflows/rsfmri/fsl/tests/test_resting.py @@ -78,8 +78,7 @@ def setup_class(self, tmpdir): def test_create_resting_preproc(self, mock_node, mock_realign_wf): wflow = create_resting_preproc(base_dir=os.getcwd()) - # wflow.inputs.inputspec.num_noise_components = self.num_noise_components - wflow.inputs.compcor.variance_threshold = 0.15 + wflow.inputs.inputspec.num_noise_components = self.num_noise_components mask_in = wflow.get_node('threshold').inputs mask_in.out_file = self.in_filenames['mask_file'] func_in = wflow.get_node('slicetimer').inputs From a742c9ce7c57ce5547910e0aceacde9b185c48df Mon Sep 17 00:00:00 2001 From: rciric Date: Sat, 16 Feb 2019 00:02:17 -0800 Subject: [PATCH 0142/1665] pacify codacy --- .../workflows/rsfmri/fsl/tests/test_resting.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/nipype/workflows/rsfmri/fsl/tests/test_resting.py b/nipype/workflows/rsfmri/fsl/tests/test_resting.py index a0ba2439a7..a6383c6f79 100644 --- a/nipype/workflows/rsfmri/fsl/tests/test_resting.py +++ b/nipype/workflows/rsfmri/fsl/tests/test_resting.py @@ -92,14 +92,16 @@ def test_create_resting_preproc(self, mock_node, mock_realign_wf): components_data = [line.split() for line in components_file.read().splitlines()] num_got_components = len(components_data) - assert (num_got_components == self.num_noise_components or - num_got_components == self.fake_data.shape[3]) + if not (num_got_components == self.num_noise_components or + num_got_components == self.fake_data.shape[3]): + raise AssertionError() first_two = [row[:2] for row in components_data[1:]] - assert first_two == [['-0.5172356654', '-0.6973053243'], - ['0.2574722644', '0.1645270737'], - ['-0.0806469590', '0.5156853779'], - ['0.7187176051', '-0.3235820287'], - ['-0.3783072450', '0.3406749013']] + if first_two != [['-0.5172356654', '-0.6973053243'], + ['0.2574722644', '0.1645270737'], + ['-0.0806469590', '0.5156853779'], + ['0.7187176051', '-0.3235820287'], + ['-0.3783072450', '0.3406749013']]: + raise AssertionError() fake_data = np.array([[[[2, 4, 3, 9, 1], [3, 6, 4, 7, 4]], [[8, 3, 4, 6, 2], [4, 0, 4, 4, 2]]], From c08285c4d40af1e3ca13fb09a4faec8ec5d76fdf Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Tue, 19 Feb 2019 09:10:53 -0500 Subject: [PATCH 0143/1665] ENH: minor - compute non degenerate stddev map once --- nipype/algorithms/confounds.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 79c0b96f4e..4274e2f9fc 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -799,8 +799,9 @@ def _run_interface(self, runtime): meanimg = np.mean(data, axis=3) stddevimg = np.std(data, axis=3) tsnr = np.zeros_like(meanimg) - tsnr[stddevimg > 1.e-3] = meanimg[stddevimg > 1.e-3] / stddevimg[ - stddevimg > 1.e-3] + stddevimg_nonzero = stddevimg > 1.e-3 + tsnr[stddevimg_nonzero] = meanimg[stddevimg_nonzero] / stddevimg[ + stddevimg_nonzero] img = nb.Nifti1Image(tsnr, img.affine, header) nb.save(img, op.abspath(self.inputs.tsnr_file)) img = nb.Nifti1Image(meanimg, img.affine, header) From 518a48930aa732513ebb3b93bc3b47031329984c Mon Sep 17 00:00:00 2001 From: rciric Date: Thu, 21 Feb 2019 10:35:09 -0800 Subject: [PATCH 0144/1665] revert unnecessary squeeze, correct docs --- nipype/algorithms/confounds.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 871a512b34..a85aa786e8 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -1205,7 +1205,7 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, Parameters ---------- - imgseries: nibabel NIfTI object + imgseries: nibabel image Time series data to be decomposed. mask_images: list List of nibabel images. Time series data from `img_series` is subset @@ -1247,7 +1247,7 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, components_criterion = -1 mask_names = mask_names or range(len(mask_images)) for name, img in zip(mask_names, mask_images): - mask = img.get_data().astype(np.bool).squeeze() + mask = img.get_data().astype(np.bool) if imgseries.shape[:3] != mask.shape: raise ValueError( 'Inputs for CompCor, timeseries and mask, do not have ' From deceb95b089ca99c7fca9f7fbf9112b245872093 Mon Sep 17 00:00:00 2001 From: rciric Date: Thu, 21 Feb 2019 18:40:56 -0800 Subject: [PATCH 0145/1665] revise in accordance with @effigies review --- nipype/algorithms/confounds.py | 29 +++++++++++++++---- .../rsfmri/fsl/tests/test_resting.py | 16 +++++----- 2 files changed, 30 insertions(+), 15 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index a85aa786e8..017a9d1c36 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -1224,12 +1224,26 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, 'polynomial' - Legendre polynomial basis 'cosine' - Discrete cosine (DCT) basis False - None (mean-removal only) + failure_mode: str + Action to be taken in the event that any decomposition fails to + identify any components. `error` indicates that the routine should + raise an exception and exit, while any other value indicates that the + routine should return a matrix of NaN values equal in size to the + requested decomposition matrix. + mask_names: list or None + List of names for each image in `mask_images`. This should be equal in + length to `mask_images`, with the ith element of `mask_names` naming + the ith element of `mask_images`. Filter options: - degree: order of polynomial used to remove trends from the timeseries - period_cut: minimum period (in sec) for DCT high-pass filter - repetition_time: time (in sec) between volume acquisitions + degree: int + Order of polynomial used to remove trends from the timeseries + period_cut: float + Minimum period (in sec) for DCT high-pass filter + repetition_time: float + Time (in sec) between volume acquisitions. This must be defined if + the `filter_type` is `cosine`. Returns ------- @@ -1262,6 +1276,9 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, # Currently support Legendre-polynomial or cosine or detrending # With no filter, the mean is nonetheless removed (poly w/ degree 0) if filter_type == 'cosine': + if repetition_time is None: + raise ValueError( + 'Repetition time must be provided for cosine filter') voxel_timecourses, basis = cosine_filter( voxel_timecourses, repetition_time, period_cut) elif filter_type in ('polynomial', False): @@ -1286,8 +1303,8 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, if failure_mode == 'error': raise if components_criterion >= 1: - u = np.empty((M.shape[0], components_criterion), - dtype=np.float32) * np.nan + u = np.full((M.shape[0], components_criterion), + np.nan, dtype=np.float32) else: continue @@ -1329,7 +1346,7 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, if components is None: if failure_mode == 'error': raise ValueError('No components found') - components = np.full((M.shape[0], num_components), np.NaN) + components = np.full((M.shape[0], num_components), np.nan) return components, basis, metadata diff --git a/nipype/workflows/rsfmri/fsl/tests/test_resting.py b/nipype/workflows/rsfmri/fsl/tests/test_resting.py index a6383c6f79..a0ba2439a7 100644 --- a/nipype/workflows/rsfmri/fsl/tests/test_resting.py +++ b/nipype/workflows/rsfmri/fsl/tests/test_resting.py @@ -92,16 +92,14 @@ def test_create_resting_preproc(self, mock_node, mock_realign_wf): components_data = [line.split() for line in components_file.read().splitlines()] num_got_components = len(components_data) - if not (num_got_components == self.num_noise_components or - num_got_components == self.fake_data.shape[3]): - raise AssertionError() + assert (num_got_components == self.num_noise_components or + num_got_components == self.fake_data.shape[3]) first_two = [row[:2] for row in components_data[1:]] - if first_two != [['-0.5172356654', '-0.6973053243'], - ['0.2574722644', '0.1645270737'], - ['-0.0806469590', '0.5156853779'], - ['0.7187176051', '-0.3235820287'], - ['-0.3783072450', '0.3406749013']]: - raise AssertionError() + assert first_two == [['-0.5172356654', '-0.6973053243'], + ['0.2574722644', '0.1645270737'], + ['-0.0806469590', '0.5156853779'], + ['0.7187176051', '-0.3235820287'], + ['-0.3783072450', '0.3406749013']] fake_data = np.array([[[[2, 4, 3, 9, 1], [3, 6, 4, 7, 4]], [[8, 3, 4, 6, 2], [4, 0, 4, 4, 2]]], From bba3eeb99cb45956e1044bb9516146ecf89fe1e4 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 22 Feb 2019 11:50:38 -0500 Subject: [PATCH 0146/1665] DOC: 1.1.9 changelog --- doc/changelog/1.X.X-changelog | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index bfa8c51298..f72659162d 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -1,3 +1,17 @@ +1.1.9 (February 25, 2019) +========================= + +##### [Full changelog](https://github.com/nipy/nipype/milestone/30?closed=1) + + * FIX: Make positional arguments to LaplacianThickness require previous argument (https://github.com/nipy/nipype/pull/2848) + * FIX: Import math and csv modules for bids_gen_info (https://github.com/nipy/nipype/pull/2881) + * FIX: Ensure outputs can be listed in camino.ProcStreamlines by defining instance variable (https://github.com/nipy/nipype/pull/2739) + * ENH: Threshold stddev once only in TSNR (https://github.com/nipy/nipype/pull/2883) + * ENH: Add workbench.CiftiSmooth interface (https://github.com/nipy/nipype/pull/2871) + * DOC: Replace initialism typo in comment with intended phrase (https://github.com/nipy/nipype/pull/2875) + * DOC: Fix typos in ANTs Registration input documentation (https://github.com/nipy/nipype/pull/2869) + + 1.1.8 (January 28, 2019) ======================== From fba0cd7f311c0c039a89e60039af04d7753fc1a6 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 22 Feb 2019 11:51:35 -0500 Subject: [PATCH 0147/1665] MNT: Version 1.1.9 --- doc/conf.py | 2 +- nipype/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 47b229ac0c..3a45d0338a 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -82,7 +82,7 @@ # The short X.Y version. version = nipype.__version__ # The full version, including alpha/beta/rc tags. -release = "1.1.8" +release = "1.1.9" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/nipype/info.py b/nipype/info.py index fcfe81d433..88e2dcd8d8 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -11,7 +11,7 @@ # full release. '.dev' as a version_extra string means this is a development # version # Remove -dev for release -__version__ = '1.1.9-dev' +__version__ = '1.1.9' def get_nipype_gitversion(): From 8cde64a11ff555ef536764224172b57b06b08b8e Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 22 Feb 2019 12:06:08 -0500 Subject: [PATCH 0148/1665] MNT: Update .mailmap, .zenodo.json --- .mailmap | 5 +++++ .zenodo.json | 34 +++++++++++++++++----------------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/.mailmap b/.mailmap index 59c81dc119..d58a4ea5ac 100644 --- a/.mailmap +++ b/.mailmap @@ -16,6 +16,7 @@ Anisha Keshavan Anisha Keshavan Anisha Keshavan akeshavan Ariel Rokem arokem Ariel Rokem arokem +Ariel Rokem Ariel Rokem Arman Eshaghi armaneshaghi Ashely Gillman Ashley Gillman Ashely Gillman ashgillman @@ -73,6 +74,7 @@ Gael Varoquaux GaelVaroquaux Gael Varoquaux GaelVaroquaux Gavin Cooper gjcooper Gilles de Hollander Gilles86 +Gio Piantoni Gio at UMCU Hans Johnson Hans Johnson Hans Johnson hjmjohnson Horea Christian Horea Christian @@ -91,6 +93,9 @@ Jessica Forbes jessicaforbes GUILLON Jeremy Joerg Stadler Joerg Stadler Joerg Stadler Jörg Stadler +Joerg Stadler Joerg Stadler +John Lee leej3 +John Lee john anthony lee Joke Durnez jokedurnez Josh Warner JDWarner Josh Warner Josh Warner (Mac) diff --git a/.zenodo.json b/.zenodo.json index 4d25cefa52..3b89fbdc54 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -141,21 +141,26 @@ { "name": "Berleant, Shoshana" }, - { - "affiliation": "Institute for Biomedical Engineering, ETH and University of Zurich", - "name": "Horea, Christian", - "orcid": "0000-0001-7037-2449" - }, { "affiliation": "Dartmouth College: Hanover, NH, United States", "name": "Halchenko, Yaroslav O.", "orcid": "0000-0003-3456-2493" }, + { + "affiliation": "Institute for Biomedical Engineering, ETH and University of Zurich", + "name": "Christian, Horea", + "orcid": "0000-0001-7037-2449" + }, { "affiliation": "UC San Diego", "name": "Cipollini, Ben", "orcid": "0000-0002-7782-0790" }, + { + "affiliation": "ARAMIS LAB, Brain and Spine Institute (ICM), Paris, France.", + "name": "Guillon, Je\u0301re\u0301my", + "orcid": "0000-0002-2672-7510" + }, { "affiliation": "Montreal Neurological Institute and Hospital", "name": "Markello, Ross", @@ -186,7 +191,7 @@ }, { "affiliation": "Athena EPI, Inria Sophia-Antipolis", - "name": "Wassermann , Demian", + "name": "Wassermann, Demian", "orcid": "0000-0001-5194-6056" }, { @@ -196,7 +201,7 @@ }, { "affiliation": "Vrije Universiteit, Amsterdam", - "name": "Gilles de Hollander", + "name": "de Hollander, Gilles", "orcid": "0000-0003-1988-5091" }, { @@ -358,6 +363,11 @@ "affiliation": "The University of Sydney", "name": "Liu, Siqi" }, + { + "affiliation": "Leibniz Institute for Neurobiology", + "name": "Stadler, J\u00f6rg", + "orcid": "0000-0003-4313-129X" + }, { "affiliation": "University of Pennsylvania", "name": "Kahn, Ari E.", @@ -377,11 +387,6 @@ "affiliation": "MPI CBS Leipzig, Germany", "name": "Lampe, Leonie" }, - { - "affiliation": "Leibniz Institute for Neurobiology", - "name": "Stadler, J\u00f6rg", - "orcid": "0000-0003-4313-129X" - }, { "affiliation": "State Key Laboratory of Cognitive Neuroscience and Learning & IDG/McGovern Institute for Brain Research, Beijing Normal University, Beijing, China; Max Planck Institute for Psycholinguistics, Nijmegen, the Netherlands", "name": "Kong, Xiang-Zhen", @@ -604,11 +609,6 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" - }, - { - "affiliation": "ARAMIS LAB, Brain and Spine Institute (ICM), Paris, France.", - "name": "Guillon, Jeremy", - "orcid": "0000-0002-2672-7510" } ], "keywords": [ From a6d1d6e6236122424ebb187a17bf8b6578da1abd Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 22 Feb 2019 14:42:22 -0500 Subject: [PATCH 0149/1665] MNT: Update mailmap to avoid renames in script --- .mailmap | 51 +++++++++++++++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/.mailmap b/.mailmap index d58a4ea5ac..4f59dcb7b1 100644 --- a/.mailmap +++ b/.mailmap @@ -10,19 +10,21 @@ Alexander Schaefer aschaefer Alexandre M. S Alexandre M. Savio Alexandre M. S Alexandre M. Savio Alexandre Manhaes Savio +Andrew Floren afloren Andrey Chetverikov Andrey Chetverikov Andrey Chetverikov achetverikov Anisha Keshavan Anisha Keshavan Anisha Keshavan akeshavan +Ariel Rokem Ariel Rokem Ariel Rokem arokem Ariel Rokem arokem -Ariel Rokem Ariel Rokem Arman Eshaghi armaneshaghi Ashely Gillman Ashley Gillman Ashely Gillman ashgillman Basille Pinsard bpinsard Basille Pinsard bpinsard Ben Cipollini Ben Cipollini +Benjamin Acland Ben Acland Benjamin Yvernault Benjamin Yvernault Benjamin Yvernault byvernault Blake Dewey Blake Dewey @@ -30,19 +32,11 @@ Blake Dewey blakedewey Blake Dewey blakedewey Brendan Moloney moloney Caroline Froehlich carolFrohlich -Chris Filo Gorgolewski Chris Filo Gorgolewski -Chris Filo Gorgolewski Chris Filo Gorgolewski -Chris Filo Gorgolewski Chris Filo Gorgolewski -Chris Filo Gorgolewski Chris Gorgolewski -Chris Filo Gorgolewski Krzysztof Gorgolewski -Chris Filo Gorgolewski filo -Chris Filo Gorgolewski filo -Chris Filo Gorgolewski filo Christopher J. Markiewicz Chris Markiewicz +Christopher J. Markiewicz Chris Markiewicz Christopher J. Markiewicz Christopher J. Johnson Christopher J. Markiewicz Christopher J. Markiewicz Christopher J. Markiewicz Christopher J. Markiewicz -Christopher J. Markiewicz Chris Markiewicz Cindee Madison cindeem Cindee Madison cindeem <> Colin Buchanan Colin Buchanan @@ -55,10 +49,13 @@ Daniel Ginsburg danginsburg danmc David Ellis David Ellis David Ellis David Ellis +David Mordom dmordom David Welch David Welch Dimitri Papadopoulos Orfanos Dimitri Papadopoulos Dmytro belevtsoff -Dylan Nielson Dylan +Dylan M. Nielson Dylan +Dylan M. Nielson Dylan Nielson +Eduard Ort eort Elizabeth DuPre emdupre Erik Ziegler Erik Erik Ziegler Erik Ziegler @@ -75,6 +72,7 @@ Gael Varoquaux GaelVaroquaux gjcooper Gilles de Hollander Gilles86 Gio Piantoni Gio at UMCU +Guillaume Flandin Guillaume Hans Johnson Hans Johnson Hans Johnson hjmjohnson Horea Christian Horea Christian @@ -92,14 +90,14 @@ Jens Kleesiek JensNRAD Jessica Forbes jessicaforbes Jérémy Guillon GUILLON Jeremy Joerg Stadler Joerg Stadler -Joerg Stadler Jörg Stadler Joerg Stadler Joerg Stadler -John Lee leej3 +Joerg Stadler Jörg Stadler John Lee john anthony lee +John Lee leej3 Joke Durnez jokedurnez Josh Warner JDWarner Josh Warner Josh Warner (Mac) -Kai Schlamp medihack Jessica Forbes jessicaforbes +Kai Schlamp medihack Katie Bottenhorn 62442katieb Kesshi Jordan Kesshi Jordan Kesshi Jordan Kesshi Jordan @@ -110,6 +108,17 @@ Kesshi Jordan Kesshi jordan kesshijordan Kevin Sitek sitek Kevin Sitek sitek +Kornelius Podranski Kornelius +Krzysztof J. Gorgolewski Chris Filo Gorgolewski +Krzysztof J. Gorgolewski Chris Filo Gorgolewski +Krzysztof J. Gorgolewski Chris Filo Gorgolewski +Krzysztof J. Gorgolewski Chris Filo Gorgolewski +Krzysztof J. Gorgolewski Chris Gorgolewski +Krzysztof J. Gorgolewski Krzysztof Gorgolewski +Krzysztof J. Gorgolewski filo +Krzysztof J. Gorgolewski filo +Krzysztof J. Gorgolewski filo +Krzysztof J. Gorgolewski filo Leonie Lampe Leonie Lmape Lukas Snoek Lukas Snoek Marcel Falkiewicz Marcel Falkiewicz @@ -118,24 +127,28 @@ Mathias Goncalves mathiasg Mathieu Dubois Mathieu Dubois Mathieu Dubois duboism Matteo Mancini matteomancini -Matteo Visconti dOC Matteo Visconti dOC -Matteo Visconti dOC mvdoc +Matteo Visconti di Oleggio Castello Matteo Visconti dOC +Matteo Visconti di Oleggio Castello Matteo Visconti dOC +Matteo Visconti di Oleggio Castello mvdoc Michael Clark Clark Michael Dayan Michael Michael Dayan Michael Michael Dayan mick-d -Michael Notter miykael +Michael Philipp Notter Michael Notter +Michael Philipp Notter miykael Michael Waskom Michael Waskom Michael Waskom Michael Waskom Michael Waskom Michael Waskom Michael Waskom mwaskom Michael Waskom mwaskom Michael Waskom mwaskom +Miguel Molina-Romero Miguel Molina Oliver Contier oliver-contier Oscar Esteban Oscar Esteban Oscar Esteban oesteban Pablo Polosecki pipolose Pablo Polosecki pipolose +Paul Sharp psharp1289 Ranjit Khanuja RanjitK Ross Markello Ross Markello Russell Poldrack Russ Poldrack @@ -143,6 +156,7 @@ Russell Poldrack poldrack Salma Bougacha Salma BOUGACHA Salma Bougacha salma Salma Bougacha salma1601 +Sami Kristian Andberg Sami Andberg Satrajit Ghosh Satrajit Ghosh Sebastian Urchs sebastian Sharad Sikka ssikka @@ -150,7 +164,8 @@ Shariq Iqbal shariqiqbal2810 shariqiqbal2810 Shoshana Berleant Shoshana Berleant Shoshana Berleant Shoshana Berleant -Simon R Simon Rothmeier +Simon Rothmei Simon R +Simon Rothmei Simon Rothmeier Siqi Liu siqi liu Siqi Liu sql Steven Giavasis Steven Giavasis From f079ed286d456b531f4f325ddc689f2572654edd Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 22 Feb 2019 14:54:39 -0500 Subject: [PATCH 0150/1665] ENH: Add zenodo updating script --- .zenodo.json | 22 +++++------ tools/update_zenodo.py | 85 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+), 11 deletions(-) create mode 100755 tools/update_zenodo.py diff --git a/.zenodo.json b/.zenodo.json index 3b89fbdc54..7358a2aec2 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -468,11 +468,6 @@ { "name": "Cheung, Brian" }, - { - "affiliation": "The University of Texas at Austin", - "name": "Floren, Andrew", - "orcid": "0000-0003-3618-2056" - }, { "name": "Urchs, Sebastian" }, @@ -487,14 +482,19 @@ "orcid": "0000-0003-2076-5329" }, { - "affiliation": "University of illinois urbana champaign", - "name": "Sharp, Paul" + "affiliation": "The University of Texas at Austin", + "name": "Floren, Andrew", + "orcid": "0000-0003-3618-2056" }, { "affiliation": "Institute of Neuroinformatics, ETH/University of Zurich", "name": "Gerhard, Stephan", "orcid": "0000-0003-4454-6171" }, + { + "affiliation": "University of Illinois Urbana Champaign", + "name": "Sharp, Paul" + }, { "affiliation": "Technical University Munich", "name": "Molina-Romero, Miguel", @@ -545,10 +545,6 @@ { "name": "Tarbert, Claire" }, - { - "affiliation": "Vrije Universiteit Amsterdam", - "name": "Ort, Eduard" - }, { "name": "Nickson, Thomas" }, @@ -569,6 +565,10 @@ { "name": "Flandin, Guillaume" }, + { + "affiliation": "Vrije Universiteit Amsterdam", + "name": "Ort, Eduard" + }, { "name": "Shachnev, Dmitry" }, diff --git a/tools/update_zenodo.py b/tools/update_zenodo.py new file mode 100755 index 0000000000..185a56965b --- /dev/null +++ b/tools/update_zenodo.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 +import json +from fuzzywuzzy import fuzz, process +import shutil +import os +import subprocess as sp + +if os.path.exists('line-contributions.txt'): + with open('line-contributions.txt', 'rt') as fp: + lines = fp.readlines() +else: + if shutil.which('git-line-summary'): + print("Running git-line-summary on nipype repo") + lines = sp.check_output(['git-line-summary']).decode().split('\n') + else: + raise RuntimeError("Install Git Extras to view git contributors") + +data = [' '.join(line.strip().split()[1:-1]) for line in lines if '%' in line] + +# load zenodo from master +with open('.zenodo.json', 'rt') as fp: + zenodo = json.load(fp) +zen_names = [' '.join(val['name'].split(',')[::-1]).strip() + for val in zenodo['creators']] + +name_matches = [] + +for ele in data: + matches = process.extract(ele, zen_names, scorer=fuzz.token_sort_ratio, + limit=2) + # matches is a list [('First match', % Match), ('Second match', % Match)] + if matches[0][1] > 80: + val = zenodo['creators'][zen_names.index(matches[0][0])] + else: + # skip unmatched names + print("No entry to sort:", ele) + continue + + if val not in name_matches: + name_matches.append(val) + +# for entries not found in line-contributions +missing_entries = [ + {"name": "Varada, Jan"}, + {"name": "Schwabacher, Isaac"}, + {"affiliation": "Child Mind Institute / Nathan Kline Institute", + "name": "Pellman, John", + "orcid": "0000-0001-6810-4461"}, + {"name": "Perez-Guevara, Martin"}, + {"name": "Khanuja, Ranjeet"}, + {"affiliation": + "Medical Imaging & Biomarkers, Bioclinica, Newark, CA, USA.", + "name": "Pannetier, Nicolas", + "orcid": "0000-0002-0744-5155"}, + {"name": "McDermottroe, Conor"}, +] + +for entry in missing_entries: + name_matches.append(entry) + + +def fix_position(creators): + # position first / last authors + f_authr = None + l_authr = None + + for i, info in enumerate(creators): + if info['name'] == 'Gorgolewski, Krzysztof J.': + f_authr = i + if info['name'] == 'Ghosh, Satrajit': + l_authr = i + + if f_authr is None or l_authr is None: + raise AttributeError('Missing important people') + + creators.insert(0, creators.pop(f_authr)) + creators.insert(len(creators), creators.pop(l_authr + 1)) + return creators + + +zenodo['creators'] = fix_position(name_matches) + +with open('.zenodo.json', 'wt') as fp: + json.dump(zenodo, fp, indent=2, sort_keys=True) + fp.write('\n') From 1f9f106bc8fecc4c9ebaa538ebf3e9a1ea69595b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 23 Feb 2019 11:51:56 +0000 Subject: [PATCH 0151/1665] Update nipype/interfaces/spm/model.py Co-Authored-By: hstojic --- nipype/interfaces/spm/model.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/spm/model.py b/nipype/interfaces/spm/model.py index 81cdeb8116..4fb1c51e08 100644 --- a/nipype/interfaces/spm/model.py +++ b/nipype/interfaces/spm/model.py @@ -106,7 +106,9 @@ class Level1DesignInputSpec(SPMCommandInputSpec): desc=('Model serial correlations ' 'AR(1), FAST or none. FAST ' 'is available in SPM12')) - flags = traits.Dict(desc='Additional arguments to the job, e.g. a common SPm operation is to modify the default masking threshold (mthresh)') + flags = traits.Dict( + desc='Additional arguments to the job, e.g., a common SPM operation is to ' + 'modify the default masking threshold (mthresh)') class Level1DesignOutputSpec(TraitedSpec): From fa64907561cf7707c713f251b13c1a3b1cbe7dd4 Mon Sep 17 00:00:00 2001 From: rciric Date: Sat, 23 Feb 2019 15:36:51 -0800 Subject: [PATCH 0152/1665] revise in accordance with @effigies review --- nipype/algorithms/confounds.py | 2 +- nipype/workflows/rsfmri/fsl/tests/test_resting.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 017a9d1c36..03b74a340f 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -1306,7 +1306,7 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, u = np.full((M.shape[0], components_criterion), np.nan, dtype=np.float32) else: - continue + u = np.full((M.shape[0], 1), np.nan, dtype=np.float32) variance_explained = (s ** 2) / np.sum(s ** 2) cumulative_variance_explained = np.cumsum(variance_explained) diff --git a/nipype/workflows/rsfmri/fsl/tests/test_resting.py b/nipype/workflows/rsfmri/fsl/tests/test_resting.py index a0ba2439a7..eba73a75b1 100644 --- a/nipype/workflows/rsfmri/fsl/tests/test_resting.py +++ b/nipype/workflows/rsfmri/fsl/tests/test_resting.py @@ -89,8 +89,8 @@ def test_create_resting_preproc(self, mock_node, mock_realign_wf): # assert expected_file = os.path.abspath(self.out_filenames['components_file']) with open(expected_file, 'r') as components_file: - components_data = [line.split() - for line in components_file.read().splitlines()] + components_data = [line.rstrip().split() + for line in components_file] num_got_components = len(components_data) assert (num_got_components == self.num_noise_components or num_got_components == self.fake_data.shape[3]) From 298f819e5b6c45cd390c831fdf30fc1656b85b5e Mon Sep 17 00:00:00 2001 From: oliver-contier Date: Sat, 23 Feb 2019 15:54:28 +0100 Subject: [PATCH 0153/1665] MNT: Add Oliver Contier name to .zenodo.json --- .zenodo.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index 7358a2aec2..6ed3552b5c 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -609,6 +609,11 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" + }, + { + "affiliation": "Otto-von-Guericke-University Magdeburg, Germany", + "name": "Contier, Oliver", + "orcid": "0000-0002-2983-4709" } ], "keywords": [ From 073f6965ef25e0d7d6250486756714918ccfd09c Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 24 Feb 2019 12:59:25 -0500 Subject: [PATCH 0154/1665] MNT: Update .zenodo ordering --- .zenodo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 6ed3552b5c..6fdd61404b 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -327,6 +327,11 @@ "affiliation": "Child Mind Institute", "name": "Giavasis, Steven" }, + { + "affiliation": "Otto-von-Guericke-University Magdeburg, Germany", + "name": "Contier, Oliver", + "orcid": "0000-0002-2983-4709" + }, { "name": "Correa, Carlos" }, @@ -609,11 +614,6 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" - }, - { - "affiliation": "Otto-von-Guericke-University Magdeburg, Germany", - "name": "Contier, Oliver", - "orcid": "0000-0002-2983-4709" } ], "keywords": [ From 7d4af09a82d6df2f375f86377ff9baec0dda0868 Mon Sep 17 00:00:00 2001 From: hstojic Date: Mon, 25 Feb 2019 07:20:44 +0000 Subject: [PATCH 0155/1665] afni utils.py - masktool - InputMultiPath for in_file argument --- nipype/interfaces/afni/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py index 987fcec135..3cb8d5f69e 100644 --- a/nipype/interfaces/afni/utils.py +++ b/nipype/interfaces/afni/utils.py @@ -1636,12 +1636,12 @@ def _format_arg(self, name, spec, value): class MaskToolInputSpec(AFNICommandInputSpec): - in_file = File( + in_file = InputMultiPath( + File(exists=True), desc='input file or files to 3dmask_tool', argstr='-input %s', position=-1, mandatory=True, - exists=True, copyfile=False) out_file = File( name_template='%s_mask', From 2d219a1889a5262bf515290b7293eeb1d1d3bb02 Mon Sep 17 00:00:00 2001 From: qTabs Date: Mon, 25 Feb 2019 12:03:33 +0100 Subject: [PATCH 0156/1665] Update .zenodo.json --- .zenodo.json | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/.zenodo.json b/.zenodo.json index 6fdd61404b..d9c495e69e 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -614,7 +614,17 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" - } + }, + { + "affiliation": "Max Planck Institute for Human Cognitive and Brain Sciences, Leipzig, Germany.", + "name": "Tabas, Alejandro", + "orcid": "0000-0002-8643-1543" + }, + { + "affiliation": "Max Planck Institute for Human Cognitive and Brain Sciences, Leipzig, Germany.", + "name": "Mihai, Paul Glad", + "orcid": "0000-0001-5715-6442" + } ], "keywords": [ "neuroimaging", From e82609eb0b4ca08b1a1debc92393fcc6bcc3bc1e Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 25 Feb 2019 10:11:46 -0500 Subject: [PATCH 0157/1665] MNT: Update zenodo ordering --- .zenodo.json | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index d9c495e69e..5d90ac5111 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -212,6 +212,11 @@ "name": "Gillman, Ashley", "orcid": "0000-0001-9130-1092" }, + { + "affiliation": "Max Planck Institute for Human Cognitive and Brain Sciences, Leipzig, Germany.", + "name": "Tabas, Alejandro", + "orcid": "0000-0002-8643-1543" + }, { "name": "Buchanan, Colin" }, @@ -327,17 +332,17 @@ "affiliation": "Child Mind Institute", "name": "Giavasis, Steven" }, - { - "affiliation": "Otto-von-Guericke-University Magdeburg, Germany", - "name": "Contier, Oliver", - "orcid": "0000-0002-2983-4709" - }, { "name": "Correa, Carlos" }, { "name": "Ghayoor, Ali" }, + { + "affiliation": "Otto-von-Guericke-University Magdeburg, Germany", + "name": "Contier, Oliver", + "orcid": "0000-0002-2983-4709" + }, { "name": "K\u00fcttner, Ren\u00e9" }, @@ -614,17 +619,7 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" - }, - { - "affiliation": "Max Planck Institute for Human Cognitive and Brain Sciences, Leipzig, Germany.", - "name": "Tabas, Alejandro", - "orcid": "0000-0002-8643-1543" - }, - { - "affiliation": "Max Planck Institute for Human Cognitive and Brain Sciences, Leipzig, Germany.", - "name": "Mihai, Paul Glad", - "orcid": "0000-0001-5715-6442" - } + } ], "keywords": [ "neuroimaging", From 913e68b1c0fe8ab7e024f42eb8ee35d4a09744bb Mon Sep 17 00:00:00 2001 From: anibalsolon Date: Mon, 25 Feb 2019 09:57:15 -0500 Subject: [PATCH 0158/1665] add to zenodo --- .zenodo.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index 5d90ac5111..d9160a6201 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -615,6 +615,10 @@ { "name": "McDermottroe, Conor" }, + { + "name": "Heinsfeld, Anibal S\u00f3lon", + "orcid": "0000-0002-2050-0614" + }, { "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", From 7e86ff4623316661717bd86641dcd6cc0a98093d Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 25 Feb 2019 10:38:45 -0500 Subject: [PATCH 0159/1665] MNT: Add kchawla-pi to Zenodo, update mailmap and ordering --- .mailmap | 2 ++ .zenodo.json | 19 ++++++++++++------- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/.mailmap b/.mailmap index 4f59dcb7b1..5ed5a7d2f6 100644 --- a/.mailmap +++ b/.mailmap @@ -13,6 +13,7 @@ Alexandre M. Savio Alexandre Manhaes Savio afloren Andrey Chetverikov Andrey Chetverikov Andrey Chetverikov achetverikov +Anibal Sólon Heinsfeld anibalsolon Anisha Keshavan Anisha Keshavan Anisha Keshavan akeshavan Ariel Rokem Ariel Rokem @@ -119,6 +120,7 @@ Krzysztof J. Gorgolewski filo filo Krzysztof J. Gorgolewski filo Krzysztof J. Gorgolewski filo +Kshitij Chawla Kshitij Chawla (kchawla-pi) Leonie Lampe Leonie Lmape Lukas Snoek Lukas Snoek Marcel Falkiewicz Marcel Falkiewicz diff --git a/.zenodo.json b/.zenodo.json index d9160a6201..eaac96b72f 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -212,14 +212,14 @@ "name": "Gillman, Ashley", "orcid": "0000-0001-9130-1092" }, + { + "name": "Buchanan, Colin" + }, { "affiliation": "Max Planck Institute for Human Cognitive and Brain Sciences, Leipzig, Germany.", "name": "Tabas, Alejandro", "orcid": "0000-0002-8643-1543" }, - { - "name": "Buchanan, Colin" - }, { "name": "Tungaraza, Rosalia" }, @@ -261,6 +261,10 @@ { "name": "Dubois, Mathieu" }, + { + "name": "Heinsfeld, Anibal S\u00f3lon", + "orcid": "0000-0002-2050-0614" + }, { "affiliation": "Child Mind Institute", "name": "Frohlich, Caroline" @@ -425,6 +429,11 @@ "name": "Grignard, Martin", "orcid": "0000-0001-5549-1861" }, + { + "affiliation": "INRIA-Saclay, Team Parietal", + "name": "Chawla, Kshitij", + "orcid": "0000-0002-7517-6321" + }, { "name": "Salvatore, John" }, @@ -615,10 +624,6 @@ { "name": "McDermottroe, Conor" }, - { - "name": "Heinsfeld, Anibal S\u00f3lon", - "orcid": "0000-0002-2050-0614" - }, { "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", From 06ebcfbc2cb573b74aed7d6dace106a7aed47201 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 25 Feb 2019 12:54:48 -0500 Subject: [PATCH 0160/1665] MNT: Add Katherine Bottenhorn, Paul Mihai to Zenodo --- .mailmap | 3 ++- .zenodo.json | 15 +++++++++++++++ tools/update_zenodo.py | 5 +++++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index 5ed5a7d2f6..91e14a8077 100644 --- a/.mailmap +++ b/.mailmap @@ -99,7 +99,8 @@ Joke Durnez jokedurnez Josh Warner JDWarner Josh Warner Josh Warner (Mac) Kai Schlamp medihack -Katie Bottenhorn 62442katieb +Katherine Bottenhorn 62442katieb +Katherine Bottenhorn Katie Bottenhorn Kesshi Jordan Kesshi Jordan Kesshi Jordan Kesshi Jordan Kesshi Jordan Kesshi Jordan diff --git a/.zenodo.json b/.zenodo.json index eaac96b72f..065e188d8b 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -273,6 +273,11 @@ "affiliation": "University of Iowa", "name": "Welch, David" }, + { + "affiliation": "Florida International University", + "name": "Bottenhorn, Katherine", + "orcid": "0000-0002-7796-8795" + }, { "name": "Kent, James" }, @@ -495,6 +500,11 @@ "name": "Durnez, Joke", "orcid": "0000-0001-9030-2202" }, + { + "affiliation": "CNRS, UMS3552 IRMaGe", + "name": "Condamine, Eric", + "orcid": "0000-0002-9533-3769" + }, { "affiliation": "Technische Universit\u00e4t Dresden, Faculty of Medicine, Department of Child and Adolescent Psychiatry", "name": "Geisler, Daniel", @@ -624,6 +634,11 @@ { "name": "McDermottroe, Conor" }, + { + "affiliation": "Max Planck Institute for Human Cognitive and Brain Sciences, Leipzig, Germany.", + "name": "Mihai, Paul Glad", + "orcid": "0000-0001-5715-6442" + }, { "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", diff --git a/tools/update_zenodo.py b/tools/update_zenodo.py index 185a56965b..637defad49 100755 --- a/tools/update_zenodo.py +++ b/tools/update_zenodo.py @@ -53,6 +53,11 @@ "name": "Pannetier, Nicolas", "orcid": "0000-0002-0744-5155"}, {"name": "McDermottroe, Conor"}, + {"affiliation": + "Max Planck Institute for Human Cognitive and Brain Sciences, " + "Leipzig, Germany.", + "name": "Mihai, Paul Glad", + "orcid": "0000-0001-5715-6442"}, ] for entry in missing_entries: From 5ced29fdf866445eb22992c043df9fde33ccb875 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 25 Feb 2019 13:54:31 -0500 Subject: [PATCH 0161/1665] MNT: Update changelog --- doc/changelog/1.X.X-changelog | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index f72659162d..c88c9a9c7b 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -6,6 +6,8 @@ * FIX: Make positional arguments to LaplacianThickness require previous argument (https://github.com/nipy/nipype/pull/2848) * FIX: Import math and csv modules for bids_gen_info (https://github.com/nipy/nipype/pull/2881) * FIX: Ensure outputs can be listed in camino.ProcStreamlines by defining instance variable (https://github.com/nipy/nipype/pull/2739) + * ENH: Allow afni.MaskTool to take multiple input files (https://github.com/nipy/nipype/pull/2892) + * ENH: Add flags dictionary input to spm.Level1Design (https://github.com/nipy/nipype/pull/2861) * ENH: Threshold stddev once only in TSNR (https://github.com/nipy/nipype/pull/2883) * ENH: Add workbench.CiftiSmooth interface (https://github.com/nipy/nipype/pull/2871) * DOC: Replace initialism typo in comment with intended phrase (https://github.com/nipy/nipype/pull/2875) From 5fa95f4d66f5417359b8deb162ab1afa9d513f58 Mon Sep 17 00:00:00 2001 From: Horea Christian Date: Wed, 30 Jan 2019 03:02:49 +0100 Subject: [PATCH 0162/1665] BF: tests and unambiguous conditions order If this is not done, condition order for orthogonalizarion is unpredictable. --- nipype/algorithms/modelgen.py | 19 +++++++++++-------- nipype/algorithms/tests/test_modelgen.py | 14 ++++++++++++-- nipype/testing/data/events.tsv | 9 +++++++++ 3 files changed, 32 insertions(+), 10 deletions(-) create mode 100644 nipype/testing/data/events.tsv diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index b57e6bfcd5..d23ae392ba 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -13,10 +13,12 @@ """ from __future__ import (print_function, division, unicode_literals, absolute_import) + +str_basetype = str from builtins import range, str, bytes, int from copy import deepcopy -import os, math, csv +import csv, math, os from nibabel import load import numpy as np @@ -145,7 +147,7 @@ def scale_timings(timelist, input_units, output_units, time_repetition): return timelist def bids_gen_info(bids_event_files, - condition_column='trial_type', + condition_column='', amplitude_column=None, time_repetition=False, ): @@ -173,9 +175,13 @@ def bids_gen_info(bids_event_files, info = [] for bids_event_file in bids_event_files: with open(bids_event_file) as f: - f_events = csv.DictReader(f, skipinitialspace=True, delimiter='\t') + f_events = csv.DictReader(f, skipinitialspace=True, delimiter=str_basetype('\t')) events = [{k: v for k, v in row.items()} for row in f_events] - conditions = list(set([i[condition_column] for i in events])) + if not condition_column: + condition_column = '_trial_type' + for i in events: + i.update({condition_column: 'ev0'}) + conditions = sorted(set([i[condition_column] for i in events])) runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[]) for condition in conditions: selected_events = [i for i in events if i[condition_column]==condition] @@ -185,10 +191,7 @@ def bids_gen_info(bids_event_files, decimals = math.ceil(-math.log10(time_repetition)) onsets = [np.round(i, decimals) for i in onsets] durations = [np.round(i ,decimals) for i in durations] - if condition: - runinfo.conditions.append(condition) - else: - runinfo.conditions.append('e0') + runinfo.conditions.append(condition) runinfo.onsets.append(onsets) runinfo.durations.append(durations) try: diff --git a/nipype/algorithms/tests/test_modelgen.py b/nipype/algorithms/tests/test_modelgen.py index 824a634354..01d9b1d0c6 100644 --- a/nipype/algorithms/tests/test_modelgen.py +++ b/nipype/algorithms/tests/test_modelgen.py @@ -11,9 +11,19 @@ import pytest import numpy.testing as npt +from nipype.testing import example_data from nipype.interfaces.base import Bunch, TraitError -from nipype.algorithms.modelgen import (SpecifyModel, SpecifySparseModel, - SpecifySPMModel) +from nipype.algorithms.modelgen import (bids_gen_info, SpecifyModel, + SpecifySparseModel, SpecifySPMModel) + + +def test_bids_gen_info(): + fname = example_data('events.tsv') + res = bids_gen_info([fname]) + assert res[0].onsets == [[183.75, 313.75, 483.75, 633.75, 783.75, 933.75, 1083.75, 1233.75]] + assert res[0].durations == [[20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0]] + assert res[0].amplitudes ==[[1, 1, 1, 1, 1, 1, 1, 1]] + assert res[0].conditions == ['ev0'] def test_modelgen1(tmpdir): diff --git a/nipype/testing/data/events.tsv b/nipype/testing/data/events.tsv new file mode 100644 index 0000000000..9f9fba67ef --- /dev/null +++ b/nipype/testing/data/events.tsv @@ -0,0 +1,9 @@ +onset duration frequency pulse_width amplitude +183.75 20.0 20.0 0.005 1.0 +313.75 20.0 20.0 0.005 1.0 +483.75 20.0 20.0 0.005 1.0 +633.75 20.0 20.0 0.005 1.0 +783.75 20.0 20.0 0.005 1.0 +933.75 20.0 20.0 0.005 1.0 +1083.75 20.0 20.0 0.005 1.0 +1233.75 20.0 20.0 0.005 1.0 From ef02ff0956cad3110be815bfdd8663e62a0ab79d Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 25 Feb 2019 16:55:09 -0500 Subject: [PATCH 0163/1665] MNT: Bump dev version --- doc/documentation.rst | 2 +- nipype/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/documentation.rst b/doc/documentation.rst index 57bb932ac4..66044e75d8 100644 --- a/doc/documentation.rst +++ b/doc/documentation.rst @@ -9,7 +9,7 @@ Documentation :Release: |version| :Date: |today| -Previous versions: `1.1.8 `_ `1.1.7 `_ +Previous versions: `1.1.9 `_ `1.1.8 `_ .. container:: doc2 diff --git a/nipype/info.py b/nipype/info.py index 88e2dcd8d8..a3a0dfeec0 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -11,7 +11,7 @@ # full release. '.dev' as a version_extra string means this is a development # version # Remove -dev for release -__version__ = '1.1.9' +__version__ = '1.2.0-dev' def get_nipype_gitversion(): From 1110b4fcf8e3a242661e55478753cd3488c5fb6b Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Mon, 25 Feb 2019 21:44:15 -0500 Subject: [PATCH 0164/1665] removed input tempvalue stuff and added logic to rerun the interface with various different inputs to try and generate as many outputs as possible --- nipype/utils/nipype2boutiques.py | 178 +++++++++++++++++++------------ 1 file changed, 109 insertions(+), 69 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 1f46b24885..54d43e94ec 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -25,18 +25,19 @@ def generate_boutiques_descriptor( - module, interface_name, ignored_template_inputs, container_image, - container_index, container_type, verbose, ignore_template_numbers, save): + module, interface_name, container_image, container_type, container_index=None, + ignored_template_inputs=(), ignore_template_numbers=False, verbose=False, save=False): ''' Returns a JSON string containing a JSON Boutiques description of a Nipype interface. Arguments: * module: module where the Nipype interface is declared. * interface_name: name of Nipype interface. - * ignored_template_inputs: a list of input names that should be ignored in the generation of output path templates. - * ignore_template_numbers: True if numbers must be ignored in output path creations. * container_image: name of the container image where the tool is installed - * container_index: optional index where the image is available * container_type: type of container image (Docker or Singularity) + * container_index: optional index where the image is available + * ignored_template_inputs: a list of input names that should be ignored in the generation of output path templates. + * ignore_template_numbers: True if numbers must be ignored in output path creations. + * verbose: print information messages * save: True if you want to save descriptor to a file ''' @@ -105,23 +106,21 @@ def generate_boutiques_descriptor( del tool_desc['groups'] # Generates tool outputs - for name, spec in sorted(outputs.traits(transient=None).items()): - output = get_boutiques_output(outputs, name, spec, interface, tool_desc['inputs'], - verbose) - if output['path-template'] != "": - tool_desc['output-files'].append(output) - if verbose: - print("-> Adding output " + output['name']) - elif verbose: - print("xx Skipping output " + output['name'] + - " with no path template.") - if tool_desc['output-files'] == []: - raise Exception("Tool has no output.") + generate_tool_outputs(outputs, interface, tool_desc, verbose, True) - # Removes all temporary values from inputs (otherwise they will - # appear in the JSON output) - for input in tool_desc['inputs']: - del input['tempvalue'] + # Generate outputs with various different inputs to try to generate + # as many output values as possible + custom_inputs = generate_custom_inputs(tool_desc['inputs']) + + for input_dict in custom_inputs: + interface = getattr(module, interface_name)(**input_dict) + outputs = interface.output_spec() + generate_tool_outputs(outputs, interface, tool_desc, verbose, False) + + # Fill in all missing output paths + for output in tool_desc['output-files']: + if output['path-template'] == "": + fill_in_missing_output_path(output, output['name'], tool_desc['inputs']) # Save descriptor to a file if save: @@ -135,6 +134,26 @@ def generate_boutiques_descriptor( return json.dumps(tool_desc, indent=4, separators=(',', ': ')) +def generate_tool_outputs(outputs, interface, tool_desc, verbose, first_run): + for name, spec in sorted(outputs.traits(transient=None).items()): + output = get_boutiques_output(outputs, name, spec, interface, tool_desc['inputs'], + verbose) + # If this is the first time we are generating outputs, add the full output to the descriptor. + # Otherwise, find the existing output and update its path template if it's still undefined. + if first_run: + tool_desc['output-files'].append(output) + else: + for existing_output in tool_desc['output-files']: + if output['id'] == existing_output['id'] and existing_output['path-template'] == "": + existing_output['path-template'] = output['path-template'] + break + if verbose: + print("-> Adding output " + output['name']) + + if len(tool_desc['output-files']) == 0: + raise Exception("Tool has no output.") + + def get_boutiques_input(inputs, interface, input_name, spec, ignored_template_inputs, verbose, ignore_template_numbers, handler=None, @@ -155,7 +174,6 @@ def get_boutiques_input(inputs, interface, input_name, spec, Assumes that: * Input names are unique. """ - spec_info = spec.full_info(inputs, input_name, None) input = {} @@ -234,7 +252,6 @@ def get_boutiques_input(inputs, interface, input_name, spec, elif input['type'] == "Flag": input['command-line-flag'] = ("--%s" % input_name + " ").strip() - input['tempvalue'] = None input['description'] = get_description_from_spec(inputs, input_name, spec) if not (hasattr(spec, "mandatory") and spec.mandatory): input['optional'] = True @@ -251,20 +268,7 @@ def get_boutiques_input(inputs, interface, input_name, spec, if value_choices is not None: input['value-choices'] = value_choices - # Create unique, temporary value. - temp_value = must_generate_value(input_name, input['type'], - ignored_template_inputs, spec_info, spec, - ignore_template_numbers) - if temp_value: - tempvalue = get_unique_value(input['type'], input_name) - setattr(interface.inputs, input_name, tempvalue) - input['tempvalue'] = tempvalue - if verbose: - print("oo Path-template creation using " + input['id'] + "=" + - str(tempvalue)) - - # Now that temp values have been generated, set Boolean types to - # Flag (there is no Boolean type in Boutiques) + # Set Boolean types to Flag (there is no Boolean type in Boutiques) if input['type'] == "Boolean": input['type'] = "Flag" @@ -313,38 +317,13 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs, verbose=Fa except TypeError: output_value = None - # If output value is defined, use its basename - if output_value != "" and isinstance( - output_value, - str): # FIXME: this crashes when there are multiple output values. - # Go find from which input value it was built - for input in tool_inputs: - if not input['tempvalue']: - continue - input_value = input['tempvalue'] - if input['type'] == "File": - # Take the base name - input_value = os.path.splitext( - os.path.basename(input_value))[0] - if str(input_value) in output_value: - output_value = os.path.basename( - output_value.replace(input_value, - input['value-key']) - ) # FIXME: this only works if output is written in the current directory - output['path-template'] = os.path.basename(output_value) - - # If output value is undefined, create a placeholder for the path template - if not output_value: - # Look for an input with the same name and use this as the path template - found = False - for input in tool_inputs: - if input['id'] == name: - output['path-template'] = input['value-key'] - found = True - break - # If no input with the same name was found, use the output ID - if not found: - output['path-template'] = output['id'] + # If an output value is defined, use its relative path + # Otherwise, put blank string and try to fill it on another iteration + if output_value: + output['path-template'] = os.path.relpath(output_value) + else: + output['path-template'] = "" + return output @@ -379,6 +358,7 @@ def get_boutiques_groups(input_traits): return desc_groups + def get_unique_value(type, id): ''' Returns a unique value of type 'type', for input with id 'id', @@ -453,3 +433,63 @@ def get_description_from_spec(object, name, spec): boutiques_description += '.' return boutiques_description + + +def fill_in_missing_output_path(output, output_name, tool_inputs): + ''' + Creates a path template for outputs that are missing one + This is needed for the descriptor to be valid (path template is required) + ''' + # Look for an input with the same name as the output and use its value key + found = False + for input in tool_inputs: + if input['name'] == output_name: + output['path-template'] = input['value-key'] + found = True + break + # If no input with the same name was found, use the output ID + if not found: + output['path-template'] = output['id'] + return output + + + +def generate_custom_inputs(desc_inputs): + ''' + Generates a bunch of custom input dictionaries in order to generate as many outputs as possible + (to get their path templates) + Limitations: + -Does not support String inputs since some interfaces require specific strings + -Does not support File inputs since the file has to actually exist or the interface will fail + -Does not support list inputs yet + ''' + custom_input_dicts = [] + for desc_input in desc_inputs: + if desc_input.get('list'): # TODO support list inputs + continue + if desc_input['type'] == 'Flag': + custom_input_dicts.append({desc_input['id']: True}) + elif desc_input['type'] == 'Number': + custom_input_dicts.append({desc_input['id']: generate_random_number_input(desc_input)}) + elif desc_input.get('value-choices'): + for value in desc_input['value-choices']: + custom_input_dicts.append({desc_input['id']: value}) + return custom_input_dicts + + +def generate_random_number_input(desc_input): + ''' + Generates a random number input based on the input spec + ''' + if not desc_input.get('minimum') and not desc_input.get('maximum'): + return 1 + + if desc_input.get('integer'): + offset = 1 + else: + offset = 0.1 + + if desc_input.get('minimum'): + return desc_input['minimum'] if desc_input.get('exclusive-minimum') else desc_input['minimum'] + offset + if desc_input.get('maximum'): + return desc_input['maximum'] if desc_input.get('exclusive-maximum') else desc_input['maximum'] - offset From 8e20d2d142452c48e0bb6c9f876e2d29b9746e5f Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Mon, 25 Feb 2019 23:42:01 -0500 Subject: [PATCH 0165/1665] added logic to handle multioutputs and inputs where value choices are numbers, some other minor fixes --- nipype/utils/nipype2boutiques.py | 53 ++++++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 10 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 54d43e94ec..852887052f 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -20,6 +20,8 @@ import sys import tempfile import simplejson as json +import copy +import six from ..scripts.instance import import_module @@ -83,9 +85,9 @@ def generate_boutiques_descriptor( # Handle compound inputs (inputs that can be of multiple types and are mutually exclusive) if isinstance(input, list): mutex_group_members = [] + tool_desc['command-line'] += input[0]['value-key'] + " " for i in input: tool_desc['inputs'].append(i) - tool_desc['command-line'] += i['value-key'] + " " mutex_group_members.append(i['id']) if verbose: print("-> Adding input " + i['name']) @@ -100,6 +102,9 @@ def generate_boutiques_descriptor( if verbose: print("-> Adding input " + input['name']) + # Remove the extra space at the end of the command line + tool_desc['command-line'] = tool_desc['command-line'].strip() + # Generates input groups tool_desc['groups'] += get_boutiques_groups(interface.inputs.traits(transient=None).items()) if len(tool_desc['groups']) == 0: @@ -141,14 +146,20 @@ def generate_tool_outputs(outputs, interface, tool_desc, verbose, first_run): # If this is the first time we are generating outputs, add the full output to the descriptor. # Otherwise, find the existing output and update its path template if it's still undefined. if first_run: - tool_desc['output-files'].append(output) + if isinstance(output, list): + tool_desc['output-files'].extend(output) + if verbose: + print("-> Adding output " + output[0]['name']) + else: + tool_desc['output-files'].append(output) + if verbose: + print("-> Adding output " + output['name']) else: for existing_output in tool_desc['output-files']: - if output['id'] == existing_output['id'] and existing_output['path-template'] == "": + if not isinstance(output, list) and output['id'] == existing_output['id'] \ + and existing_output['path-template'] == "": existing_output['path-template'] = output['path-template'] break - if verbose: - print("-> Adding output " + output['name']) if len(tool_desc['output-files']) == 0: raise Exception("Tool has no output.") @@ -197,9 +208,11 @@ def get_boutiques_input(inputs, interface, input_name, spec, input_list = [] # Recursively create an input for each trait for i in range(0, len(trait_handler.handlers)): - input_list.append(get_boutiques_input(inputs, interface, input_name, spec, - ignored_template_inputs, verbose, - ignore_template_numbers, trait_handler.handlers[i], i)) + inp = get_boutiques_input(inputs, interface, input_name, spec, + ignored_template_inputs, verbose, + ignore_template_numbers, trait_handler.handlers[i], i) + inp['optional'] = True + input_list.append(inp) return input_list if handler_type == "File" or handler_type == "Directory": @@ -227,6 +240,7 @@ def get_boutiques_input(inputs, interface, input_name, spec, input['exclusive-maximum'] = trait_handler.exclude_high # Deal with list inputs + # TODO handle lists of lists (e.g. FSL ProbTrackX seed input) if handler_type == "List": input['list'] = True trait_type = type(trait_handler.item_trait.trait_type).__name__ @@ -235,11 +249,13 @@ def get_boutiques_input(inputs, interface, input_name, spec, input['type'] = "Number" elif trait_type == "Float": input['type'] = "Number" + elif trait_type == "File": + input['type'] = "File" else: input['type'] = "String" - if trait_handler.minlen is not None: + if trait_handler.minlen != 0: input['min-list-entries'] = trait_handler.minlen - if trait_handler.maxlen is not None: + if trait_handler.maxlen != six.MAXSIZE: input['max-list-entries'] = trait_handler.maxlen input['value-key'] = "[" + input_name.upper( @@ -266,6 +282,11 @@ def get_boutiques_input(inputs, interface, input_name, spec, pass else: if value_choices is not None: + if all(isinstance(n, int) for n in value_choices): + input['type'] = "Number" + input['integer'] = True + elif all(isinstance(n, float) for n in value_choices): + input['type'] = "Number" input['value-choices'] = value_choices # Set Boolean types to Flag (there is no Boolean type in Boutiques) @@ -316,6 +337,18 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs, verbose=Fa output_value = interface._list_outputs()[name] except TypeError: output_value = None + except AttributeError: + output_value = None + + # Handle multi-outputs + if isinstance(output_value, list): + output_list = [] + for i in range(0, len(output_value)): + output_copy = copy.deepcopy(output) + output_copy['path-template'] = os.path.relpath(output_value[i]) + output_copy['id'] += ("_" + str(i+1)) if i > 0 else "" + output_list.append(output_copy) + return output_list # If an output value is defined, use its relative path # Otherwise, put blank string and try to fill it on another iteration From 61c836fcdf168afb8556244b4c36753213967153 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Tue, 26 Feb 2019 14:22:46 -0500 Subject: [PATCH 0166/1665] fixed multioutput, added handling of multiinput, added option to supply custom path for saved file --- nipype/utils/nipype2boutiques.py | 44 ++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 852887052f..41492640bb 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -28,7 +28,7 @@ def generate_boutiques_descriptor( module, interface_name, container_image, container_type, container_index=None, - ignored_template_inputs=(), ignore_template_numbers=False, verbose=False, save=False): + ignored_template_inputs=(), ignore_template_numbers=False, verbose=False, save=False, save_path=None): ''' Returns a JSON string containing a JSON Boutiques description of a Nipype interface. Arguments: @@ -41,6 +41,7 @@ def generate_boutiques_descriptor( * ignore_template_numbers: True if numbers must be ignored in output path creations. * verbose: print information messages * save: True if you want to save descriptor to a file + * save_path: file path for the saved descriptor (defaults to name of the interface in current directory) ''' if not module: @@ -129,7 +130,8 @@ def generate_boutiques_descriptor( # Save descriptor to a file if save: - with open(interface_name + '.json', 'w') as outfile: + path = save_path if save_path is not None else os.path.join(os.getcwd(), interface_name + '.json') + with open(path, 'w') as outfile: json.dump(tool_desc, outfile, indent=4, separators=(',', ': ')) if verbose: print("-> Descriptor saved to file " + outfile.name) @@ -146,18 +148,12 @@ def generate_tool_outputs(outputs, interface, tool_desc, verbose, first_run): # If this is the first time we are generating outputs, add the full output to the descriptor. # Otherwise, find the existing output and update its path template if it's still undefined. if first_run: - if isinstance(output, list): - tool_desc['output-files'].extend(output) - if verbose: - print("-> Adding output " + output[0]['name']) - else: - tool_desc['output-files'].append(output) - if verbose: - print("-> Adding output " + output['name']) + tool_desc['output-files'].append(output) + if verbose: + print("-> Adding output " + output['name']) else: for existing_output in tool_desc['output-files']: - if not isinstance(output, list) and output['id'] == existing_output['id'] \ - and existing_output['path-template'] == "": + if output['id'] == existing_output['id'] and existing_output['path-template'] == "": existing_output['path-template'] = output['path-template'] break @@ -258,6 +254,11 @@ def get_boutiques_input(inputs, interface, input_name, spec, if trait_handler.maxlen != six.MAXSIZE: input['max-list-entries'] = trait_handler.maxlen + # Deal with multi-input + if handler_type == "InputMultiObject": + input['type'] = "File" + input['list'] = True + input['value-key'] = "[" + input_name.upper( ) + "]" # assumes that input names are unique @@ -342,13 +343,18 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs, verbose=Fa # Handle multi-outputs if isinstance(output_value, list): - output_list = [] - for i in range(0, len(output_value)): - output_copy = copy.deepcopy(output) - output_copy['path-template'] = os.path.relpath(output_value[i]) - output_copy['id'] += ("_" + str(i+1)) if i > 0 else "" - output_list.append(output_copy) - return output_list + output['list'] = True + # Check if all extensions are the same + extensions = [] + for val in output_value: + extensions.append(os.path.splitext(val)[1]) + # If extensions all the same, set path template as wildcard + extension + # Otherwise just use a wildcard + if len(set(extensions)) == 1: + output['path-template'] = "*" + extensions[0] + else: + output['path-template'] = "*" + return output # If an output value is defined, use its relative path # Otherwise, put blank string and try to fill it on another iteration From f4c8867f88b0895c1531e961c6b693981d6ebcf7 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Tue, 26 Feb 2019 15:56:35 -0500 Subject: [PATCH 0167/1665] fixed number input min and max, removed numbers from custom inputs (caused too many failures) --- nipype/utils/nipype2boutiques.py | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 41492640bb..8ff2d11880 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -226,14 +226,14 @@ def get_boutiques_input(inputs, interface, input_name, spec, # Deal with range inputs if handler_type == "Range": input['type'] = "Number" - if trait_handler.low is not None: - input['minimum'] = trait_handler.low - if trait_handler.high is not None: - input['maximum'] = trait_handler.high - if trait_handler.exclude_low is not None: - input['exclusive-minimum'] = trait_handler.exclude_low - if trait_handler.exclude_high is not None: - input['exclusive-maximum'] = trait_handler.exclude_high + if trait_handler._low is not None: + input['minimum'] = trait_handler._low + if trait_handler._high is not None: + input['maximum'] = trait_handler._high + if trait_handler._exclude_low: + input['exclusive-minimum'] = True + if trait_handler._exclude_high: + input['exclusive-maximum'] = True # Deal with list inputs # TODO handle lists of lists (e.g. FSL ProbTrackX seed input) @@ -492,24 +492,16 @@ def fill_in_missing_output_path(output, output_name, tool_inputs): return output - def generate_custom_inputs(desc_inputs): ''' Generates a bunch of custom input dictionaries in order to generate as many outputs as possible (to get their path templates) - Limitations: - -Does not support String inputs since some interfaces require specific strings - -Does not support File inputs since the file has to actually exist or the interface will fail - -Does not support list inputs yet + Currently only works with flag inputs and inputs with defined value choices. ''' custom_input_dicts = [] for desc_input in desc_inputs: - if desc_input.get('list'): # TODO support list inputs - continue if desc_input['type'] == 'Flag': custom_input_dicts.append({desc_input['id']: True}) - elif desc_input['type'] == 'Number': - custom_input_dicts.append({desc_input['id']: generate_random_number_input(desc_input)}) elif desc_input.get('value-choices'): for value in desc_input['value-choices']: custom_input_dicts.append({desc_input['id']: value}) From 6961f02bcb1ba0e85c7de6a85ba34d74755f3dc1 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Tue, 26 Feb 2019 16:39:03 -0500 Subject: [PATCH 0168/1665] removed unused things, updated comments --- nipype/utils/nipype2boutiques.py | 112 ++------------------ nipype/utils/tests/test_nipype2boutiques.py | 2 - 2 files changed, 11 insertions(+), 103 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 8ff2d11880..ff389fdb52 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -7,20 +7,12 @@ # Boutiques tools can be imported in CBRAIN (https://github.com/aces/cbrain) among other platforms. # # Limitations: -# * List outputs are not supported. -# * Default values are not extracted from the documentation of the Nipype interface. -# * The following input types must be ignored for the output path template creation (see option -t): -# ** String restrictions, i.e. String inputs that accept only a restricted set of values. -# ** mutually exclusive inputs. -# * Path-templates are wrong when output files are not created in the execution directory (e.g. when a sub-directory is created). -# * Optional outputs, i.e. outputs that not always produced, may not be detected. +# * Optional outputs, i.e. outputs that not always produced, may not be detected. They will, however, still be listed +# with a placeholder for the path template (either a value key or the output ID) that should be verified and corrected. import os -import argparse import sys -import tempfile import simplejson as json -import copy import six from ..scripts.instance import import_module @@ -28,7 +20,7 @@ def generate_boutiques_descriptor( module, interface_name, container_image, container_type, container_index=None, - ignored_template_inputs=(), ignore_template_numbers=False, verbose=False, save=False, save_path=None): + verbose=False, save=False, save_path=None): ''' Returns a JSON string containing a JSON Boutiques description of a Nipype interface. Arguments: @@ -37,8 +29,6 @@ def generate_boutiques_descriptor( * container_image: name of the container image where the tool is installed * container_type: type of container image (Docker or Singularity) * container_index: optional index where the image is available - * ignored_template_inputs: a list of input names that should be ignored in the generation of output path templates. - * ignore_template_numbers: True if numbers must be ignored in output path creations. * verbose: print information messages * save: True if you want to save descriptor to a file * save_path: file path for the saved descriptor (defaults to name of the interface in current directory) @@ -80,9 +70,7 @@ def generate_boutiques_descriptor( # Generates tool inputs for name, spec in sorted(interface.inputs.traits(transient=None).items()): - input = get_boutiques_input(inputs, interface, name, spec, - ignored_template_inputs, verbose, - ignore_template_numbers) + input = get_boutiques_input(inputs, interface, name, spec, verbose) # Handle compound inputs (inputs that can be of multiple types and are mutually exclusive) if isinstance(input, list): mutex_group_members = [] @@ -162,9 +150,7 @@ def generate_tool_outputs(outputs, interface, tool_desc, verbose, first_run): def get_boutiques_input(inputs, interface, input_name, spec, - ignored_template_inputs, verbose, - ignore_template_numbers, handler=None, - input_number=None): + verbose, handler=None, input_number=None): """ Returns a dictionary containing the Boutiques input corresponding to a Nipype intput. @@ -173,8 +159,7 @@ def get_boutiques_input(inputs, interface, input_name, spec, * interface: Nipype interface. * input_name: name of the Nipype input. * spec: Nipype input spec. - * ignored_template_inputs: input names for which no temporary value must be generated. - * ignore_template_numbers: True if numbers must be ignored in output path creations. + * verbose: print information messages. * handler: used when handling compound inputs, which don't have their own input spec * input_number: used when handling compound inputs to assign each a unique ID @@ -205,8 +190,7 @@ def get_boutiques_input(inputs, interface, input_name, spec, # Recursively create an input for each trait for i in range(0, len(trait_handler.handlers)): inp = get_boutiques_input(inputs, interface, input_name, spec, - ignored_template_inputs, verbose, - ignore_template_numbers, trait_handler.handlers[i], i) + verbose, trait_handler.handlers[i], i) inp['optional'] = True input_list.append(inp) return input_list @@ -297,7 +281,7 @@ def get_boutiques_input(inputs, interface, input_name, spec, return input -def get_boutiques_output(outputs, name, spec, interface, tool_inputs, verbose=False): +def get_boutiques_output(outputs, name, spec, interface, tool_inputs): """ Returns a dictionary containing the Boutiques output corresponding to a Nipype output. @@ -367,6 +351,9 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs, verbose=Fa def get_boutiques_groups(input_traits): + """ + Returns a list of dictionaries containing Boutiques groups for the mutually exclusive and all-or-none Nipype inputs. + """ desc_groups = [] all_or_none_input_sets = [] mutex_input_sets = [] @@ -398,65 +385,6 @@ def get_boutiques_groups(input_traits): return desc_groups -def get_unique_value(type, id): - ''' - Returns a unique value of type 'type', for input with id 'id', - assuming id is unique. - ''' - return { - "File": os.path.abspath(create_tempfile()), - "Boolean": True, - "Number": abs(hash(id)), # abs in case input param must be positive... - "String": id - }[type] - - -def create_tempfile(): - ''' - Creates a temp file and returns its name. - ''' - fileTemp = tempfile.NamedTemporaryFile(delete=False) - fileTemp.write(b"hello") - fileTemp.close() - return fileTemp.name - - -def must_generate_value(name, type, ignored_template_inputs, spec_info, spec, - ignore_template_numbers): - ''' - Return True if a temporary value must be generated for this input. - Arguments: - * name: input name. - * type: input_type. - * ignored_template_inputs: a list of inputs names for which no value must be generated. - * spec_info: spec info of the Nipype input - * ignore_template_numbers: True if numbers must be ignored. - ''' - # Return false when type is number and numbers must be ignored. - if ignore_template_numbers and type == "Number": - return False - # Only generate value for the first element of mutually exclusive inputs. - if spec.xor and spec.xor[0] != name: - return False - # Directory types are not supported - if "an existing directory name" in spec_info: - return False - # Don't know how to generate a list. - if "a list" in spec_info or "a tuple" in spec_info: - return False - # Don't know how to generate a dictionary. - if "a dictionary" in spec_info: - return False - # Best guess to detect string restrictions... - if "' or '" in spec_info: - return False - if spec.default or spec.default_value(): - return False - if not ignored_template_inputs: - return True - return not (name in ignored_template_inputs) - - def get_description_from_spec(object, name, spec): ''' Generates a description based on the input or output spec. @@ -506,21 +434,3 @@ def generate_custom_inputs(desc_inputs): for value in desc_input['value-choices']: custom_input_dicts.append({desc_input['id']: value}) return custom_input_dicts - - -def generate_random_number_input(desc_input): - ''' - Generates a random number input based on the input spec - ''' - if not desc_input.get('minimum') and not desc_input.get('maximum'): - return 1 - - if desc_input.get('integer'): - offset = 1 - else: - offset = 0.1 - - if desc_input.get('minimum'): - return desc_input['minimum'] if desc_input.get('exclusive-minimum') else desc_input['minimum'] + offset - if desc_input.get('maximum'): - return desc_input['maximum'] if desc_input.get('exclusive-maximum') else desc_input['maximum'] - offset diff --git a/nipype/utils/tests/test_nipype2boutiques.py b/nipype/utils/tests/test_nipype2boutiques.py index ac12c527f7..926de0f2cd 100644 --- a/nipype/utils/tests/test_nipype2boutiques.py +++ b/nipype/utils/tests/test_nipype2boutiques.py @@ -10,10 +10,8 @@ def test_generate(): generate_boutiques_descriptor(module='nipype.interfaces.ants.registration', interface_name='ANTS', - ignored_template_inputs=(), container_image=None, container_index=None, container_type=None, verbose=False, - ignore_template_numbers=False, save=False) From f67d04b4b1c45728ca1961f8c11b859c35f5a852 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Wed, 27 Feb 2019 19:20:43 -0500 Subject: [PATCH 0169/1665] fixed small error --- nipype/utils/nipype2boutiques.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index ff389fdb52..aba64c4414 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -131,8 +131,7 @@ def generate_boutiques_descriptor( def generate_tool_outputs(outputs, interface, tool_desc, verbose, first_run): for name, spec in sorted(outputs.traits(transient=None).items()): - output = get_boutiques_output(outputs, name, spec, interface, tool_desc['inputs'], - verbose) + output = get_boutiques_output(outputs, name, spec, interface, tool_desc['inputs']) # If this is the first time we are generating outputs, add the full output to the descriptor. # Otherwise, find the existing output and update its path template if it's still undefined. if first_run: From e6dfe7d2aa5240fb7d68c891c04d78b83cf3c69f Mon Sep 17 00:00:00 2001 From: rciric Date: Fri, 1 Mar 2019 00:48:20 -0800 Subject: [PATCH 0170/1665] ensure s is defined, support NaN failure mode with empty mask --- nipype/algorithms/confounds.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 03b74a340f..e0d8df6f68 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -1299,9 +1299,10 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, # principal components using a singular value decomposition." try: u, s, _ = fallback_svd(M, full_matrices=False) - except np.linalg.LinAlgError: + except (np.linalg.LinAlgError, ValueError): if failure_mode == 'error': raise + s = np.full(M.shape[0], np.nan, dtype=np.float32) if components_criterion >= 1: u = np.full((M.shape[0], components_criterion), np.nan, dtype=np.float32) From 7219ca3adff72ba3d87e53f4253f9430dbb09c81 Mon Sep 17 00:00:00 2001 From: BenjaminMey <34044274+BenjaminMey@users.noreply.github.com> Date: Sat, 9 Mar 2019 20:59:17 -0500 Subject: [PATCH 0171/1665] Update outputs for warped_file log_file to cwd --- nipype/interfaces/fsl/preprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py index da06a5c574..66e3c5904d 100644 --- a/nipype/interfaces/fsl/preprocess.py +++ b/nipype/interfaces/fsl/preprocess.py @@ -1160,7 +1160,7 @@ def _list_outputs(self): if suffix.endswith('.txt'): change_ext = False if isdefined(inval): - outputs[key] = inval + outputs[key] = os.path.abspath(inval) else: outputs[key] = self._gen_fname( self.inputs.in_file, From 2bf03cf1d6287984da2606cadde30d12e7408102 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Tue, 12 Mar 2019 16:20:32 -0400 Subject: [PATCH 0172/1665] added author and ignored inputs parameters, removed nipype from command line, added logic to deal with inputs containing name_source and name_template metadata --- nipype/utils/nipype2boutiques.py | 53 +++++++++++++++++++++++++------- 1 file changed, 42 insertions(+), 11 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index aba64c4414..0472673469 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -20,7 +20,7 @@ def generate_boutiques_descriptor( module, interface_name, container_image, container_type, container_index=None, - verbose=False, save=False, save_path=None): + verbose=False, save=False, save_path=None, author=None, ignore_inputs=None): ''' Returns a JSON string containing a JSON Boutiques description of a Nipype interface. Arguments: @@ -32,6 +32,8 @@ def generate_boutiques_descriptor( * verbose: print information messages * save: True if you want to save descriptor to a file * save_path: file path for the saved descriptor (defaults to name of the interface in current directory) + * author: author of the tool (required for publishing) + * ignore_inputs: list of interface inputs to not include in the descriptor ''' if not module: @@ -53,7 +55,10 @@ def generate_boutiques_descriptor( tool_desc = {} tool_desc['name'] = interface_name tool_desc[ - 'command-line'] = "nipype_cmd " + module_name + " " + interface_name + " " + 'command-line'] = interface_name + " " + tool_desc['author'] = "Nipype (interface)" + if author is not None: + tool_desc['author'] = tool_desc['author'] + ", " + author + " (tool)" tool_desc[ 'description'] = interface_name + ", as implemented in Nipype (module: " + module_name + ", interface: " + interface_name + ")." tool_desc['inputs'] = [] @@ -70,8 +75,10 @@ def generate_boutiques_descriptor( # Generates tool inputs for name, spec in sorted(interface.inputs.traits(transient=None).items()): - input = get_boutiques_input(inputs, interface, name, spec, verbose) + input = get_boutiques_input(inputs, interface, name, spec, verbose, ignore_inputs=ignore_inputs) # Handle compound inputs (inputs that can be of multiple types and are mutually exclusive) + if input is None: + continue if isinstance(input, list): mutex_group_members = [] tool_desc['command-line'] += input[0]['value-key'] + " " @@ -91,9 +98,6 @@ def generate_boutiques_descriptor( if verbose: print("-> Adding input " + input['name']) - # Remove the extra space at the end of the command line - tool_desc['command-line'] = tool_desc['command-line'].strip() - # Generates input groups tool_desc['groups'] += get_boutiques_groups(interface.inputs.traits(transient=None).items()) if len(tool_desc['groups']) == 0: @@ -116,6 +120,9 @@ def generate_boutiques_descriptor( if output['path-template'] == "": fill_in_missing_output_path(output, output['name'], tool_desc['inputs']) + # Remove the extra space at the end of the command line + tool_desc['command-line'] = tool_desc['command-line'].strip() + # Save descriptor to a file if save: path = save_path if save_path is not None else os.path.join(os.getcwd(), interface_name + '.json') @@ -136,6 +143,8 @@ def generate_tool_outputs(outputs, interface, tool_desc, verbose, first_run): # Otherwise, find the existing output and update its path template if it's still undefined. if first_run: tool_desc['output-files'].append(output) + if output.get('value-key'): + tool_desc['command-line'] += output['value-key'] + " " if verbose: print("-> Adding output " + output['name']) else: @@ -143,13 +152,15 @@ def generate_tool_outputs(outputs, interface, tool_desc, verbose, first_run): if output['id'] == existing_output['id'] and existing_output['path-template'] == "": existing_output['path-template'] = output['path-template'] break + if output.get('value-key') and output['value-key'] not in tool_desc['command-line']: + tool_desc['command-line'] += output['value-key'] + " " if len(tool_desc['output-files']) == 0: raise Exception("Tool has no output.") -def get_boutiques_input(inputs, interface, input_name, spec, - verbose, handler=None, input_number=None): +def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=None, + input_number=None, ignore_inputs=None): """ Returns a dictionary containing the Boutiques input corresponding to a Nipype intput. @@ -161,11 +172,17 @@ def get_boutiques_input(inputs, interface, input_name, spec, * verbose: print information messages. * handler: used when handling compound inputs, which don't have their own input spec * input_number: used when handling compound inputs to assign each a unique ID + * ignore_inputs: list of interface inputs to not include in the descriptor Assumes that: * Input names are unique. """ + # If spec has a name source, means it's an output, so skip it here. + # Also skip any ignored inputs + if spec.name_source or ignore_inputs is not None and input_name in ignore_inputs: + return None + input = {} if input_number is not None and input_number != 0: # No need to append a number to the first of a list of compound inputs @@ -339,12 +356,26 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs): output['path-template'] = "*" return output - # If an output value is defined, use its relative path - # Otherwise, put blank string and try to fill it on another iteration + # If an output value is defined, use its relative path, if one exists. + # If no relative path, look for an input with the same name containing a name source + # and name template. Otherwise, put blank string as placeholder and try to fill it on + # another iteration. + output['path-template'] = "" + if output_value: output['path-template'] = os.path.relpath(output_value) else: - output['path-template'] = "" + for inp_name, inp_spec in sorted(interface.inputs.traits(transient=None).items()): + if inp_name == name and inp_spec.name_source and inp_spec.name_template: + if isinstance(inp_spec.name_source, list): + source = inp_spec.name_source[0] + else: + source = inp_spec.name_source + output['path-template'] = inp_spec.name_template.replace("%s", "[" + source.upper() + "]") + output['value-key'] = "[" + name.upper() + "]" + if inp_spec.argstr and inp_spec.argstr.split("%")[0]: + output['command-line-flag'] = inp_spec.argstr.split("%")[0].strip() + break return output From d009a88ac46e8971bf727f1221f18808014f74be Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Tue, 12 Mar 2019 16:57:10 -0400 Subject: [PATCH 0173/1665] fixed code quality issues --- nipype/utils/nipype2boutiques.py | 120 +++++++++++++++---------------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 0472673469..e5b91b0ea7 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -75,28 +75,28 @@ def generate_boutiques_descriptor( # Generates tool inputs for name, spec in sorted(interface.inputs.traits(transient=None).items()): - input = get_boutiques_input(inputs, interface, name, spec, verbose, ignore_inputs=ignore_inputs) + inp = get_boutiques_input(inputs, interface, name, spec, verbose, ignore_inputs=ignore_inputs) # Handle compound inputs (inputs that can be of multiple types and are mutually exclusive) - if input is None: + if inp is None: continue - if isinstance(input, list): + if isinstance(inp, list): mutex_group_members = [] - tool_desc['command-line'] += input[0]['value-key'] + " " - for i in input: + tool_desc['command-line'] += inp[0]['value-key'] + " " + for i in inp: tool_desc['inputs'].append(i) mutex_group_members.append(i['id']) if verbose: print("-> Adding input " + i['name']) # Put inputs into a mutually exclusive group - tool_desc['groups'].append({'id': input[0]['id'] + "_group", - 'name': input[0]['name'] + " group", + tool_desc['groups'].append({'id': inp[0]['id'] + "_group", + 'name': inp[0]['name'] + " group", 'members': mutex_group_members, 'mutually-exclusive': True}) else: - tool_desc['inputs'].append(input) - tool_desc['command-line'] += input['value-key'] + " " + tool_desc['inputs'].append(inp) + tool_desc['command-line'] += inp['value-key'] + " " if verbose: - print("-> Adding input " + input['name']) + print("-> Adding input " + inp['name']) # Generates input groups tool_desc['groups'] += get_boutiques_groups(interface.inputs.traits(transient=None).items()) @@ -183,14 +183,14 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=No if spec.name_source or ignore_inputs is not None and input_name in ignore_inputs: return None - input = {} + inp = {} if input_number is not None and input_number != 0: # No need to append a number to the first of a list of compound inputs - input['id'] = input_name + "_" + str(input_number + 1) + inp['id'] = input_name + "_" + str(input_number + 1) else: - input['id'] = input_name + inp['id'] = input_name - input['name'] = input_name.replace('_', ' ').capitalize() + inp['name'] = input_name.replace('_', ' ').capitalize() if handler is None: trait_handler = spec.handler @@ -212,70 +212,70 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=No return input_list if handler_type == "File" or handler_type == "Directory": - input['type'] = "File" + inp['type'] = "File" elif handler_type == "Int": - input['type'] = "Number" - input['integer'] = True + inp['type'] = "Number" + inp['integer'] = True elif handler_type == "Float": - input['type'] = "Number" + inp['type'] = "Number" elif handler_type == "Bool": - input['type'] = "Flag" + inp['type'] = "Flag" else: - input['type'] = "String" + inp['type'] = "String" # Deal with range inputs if handler_type == "Range": - input['type'] = "Number" + inp['type'] = "Number" if trait_handler._low is not None: - input['minimum'] = trait_handler._low + inp['minimum'] = trait_handler._low if trait_handler._high is not None: - input['maximum'] = trait_handler._high + inp['maximum'] = trait_handler._high if trait_handler._exclude_low: - input['exclusive-minimum'] = True + inp['exclusive-minimum'] = True if trait_handler._exclude_high: - input['exclusive-maximum'] = True + inp['exclusive-maximum'] = True # Deal with list inputs # TODO handle lists of lists (e.g. FSL ProbTrackX seed input) if handler_type == "List": - input['list'] = True + inp['list'] = True trait_type = type(trait_handler.item_trait.trait_type).__name__ if trait_type == "Int": - input['integer'] = True - input['type'] = "Number" + inp['integer'] = True + inp['type'] = "Number" elif trait_type == "Float": - input['type'] = "Number" + inp['type'] = "Number" elif trait_type == "File": - input['type'] = "File" + inp['type'] = "File" else: - input['type'] = "String" + inp['type'] = "String" if trait_handler.minlen != 0: - input['min-list-entries'] = trait_handler.minlen + inp['min-list-entries'] = trait_handler.minlen if trait_handler.maxlen != six.MAXSIZE: - input['max-list-entries'] = trait_handler.maxlen + inp['max-list-entries'] = trait_handler.maxlen # Deal with multi-input if handler_type == "InputMultiObject": - input['type'] = "File" - input['list'] = True + inp['type'] = "File" + inp['list'] = True - input['value-key'] = "[" + input_name.upper( + inp['value-key'] = "[" + input_name.upper( ) + "]" # assumes that input names are unique # Add the command line flag specified by argstr # If no argstr is provided and input type is Flag, create a flag from the name if spec.argstr and spec.argstr.split("%")[0]: - input['command-line-flag'] = spec.argstr.split("%")[0].strip() - elif input['type'] == "Flag": - input['command-line-flag'] = ("--%s" % input_name + " ").strip() + inp['command-line-flag'] = spec.argstr.split("%")[0].strip() + elif inp['type'] == "Flag": + inp['command-line-flag'] = ("--%s" % input_name + " ").strip() - input['description'] = get_description_from_spec(inputs, input_name, spec) + inp['description'] = get_description_from_spec(inputs, input_name, spec) if not (hasattr(spec, "mandatory") and spec.mandatory): - input['optional'] = True + inp['optional'] = True else: - input['optional'] = False + inp['optional'] = False if spec.usedefault: - input['default-value'] = spec.default_value()[1] + inp['default-value'] = spec.default_value()[1] try: value_choices = trait_handler.values @@ -284,17 +284,17 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=No else: if value_choices is not None: if all(isinstance(n, int) for n in value_choices): - input['type'] = "Number" - input['integer'] = True + inp['type'] = "Number" + inp['integer'] = True elif all(isinstance(n, float) for n in value_choices): - input['type'] = "Number" - input['value-choices'] = value_choices + inp['type'] = "Number" + inp['value-choices'] = value_choices # Set Boolean types to Flag (there is no Boolean type in Boutiques) - if input['type'] == "Boolean": - input['type'] = "Flag" + if inp['type'] == "Boolean": + inp['type'] = "Flag" - return input + return inp def get_boutiques_output(outputs, name, spec, interface, tool_inputs): @@ -400,28 +400,28 @@ def get_boutiques_groups(input_traits): mutex_input_sets.append(group_members) # Create a dictionary for each one - for i in range(0, len(all_or_none_input_sets)): - desc_groups.append({'id': "all_or_none_group" + ("_" + str(i + 1) if i != 0 else ""), - 'name': "All or none group" + (" " + str(i + 1) if i != 0 else ""), - 'members': list(all_or_none_input_sets[i]), + for i, inp_set in enumerate(all_or_none_input_sets, 1): + desc_groups.append({'id': "all_or_none_group" + ("_" + str(i) if i != 1 else ""), + 'name': "All or none group" + (" " + str(i) if i != 1 else ""), + 'members': list(inp_set), 'all-or-none': True}) - for i in range(0, len(mutex_input_sets)): - desc_groups.append({'id': "mutex_group" + ("_" + str(i + 1) if i != 0 else ""), - 'name': "Mutex group" + (" " + str(i + 1) if i != 0 else ""), - 'members': list(mutex_input_sets[i]), + for i, inp_set in enumerate(mutex_input_sets, 1): + desc_groups.append({'id': "mutex_group" + ("_" + str(i) if i != 1 else ""), + 'name': "Mutex group" + (" " + str(i) if i != 1 else ""), + 'members': list(inp_set), 'mutually-exclusive': True}) return desc_groups -def get_description_from_spec(object, name, spec): +def get_description_from_spec(obj, name, spec): ''' Generates a description based on the input or output spec. ''' if not spec.desc: spec.desc = "No description provided." - spec_info = spec.full_info(object, name, None) + spec_info = spec.full_info(obj, name, None) boutiques_description = (spec_info.capitalize( ) + ". " + spec.desc.capitalize()).replace("\n", '') From 6a49acd6d3285a23bb95960bfdc70a10b9431372 Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Mon, 18 Mar 2019 16:51:24 -0400 Subject: [PATCH 0174/1665] initial attempt at adding mrtrix commands --- nipype/interfaces/fsl/epi.py | 3 +- nipype/interfaces/mrtrix3/__init__.py | 2 +- nipype/interfaces/mrtrix3/preprocess.py | 145 +++++++++++++++++++++++- 3 files changed, 146 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index 3e47576ec7..bbe84149c8 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1383,7 +1383,7 @@ def _list_outputs(self): # If the output directory isn't defined, the interface seems to use # the default but not set its value in `self.inputs.output_dir` if not isdefined(self.inputs.output_dir): - out_dir = os.path.abspath(os.path.basename(self.inputs.base_name) + '.qc.nii.gz') + out_dir = os.path.abspath(os.path.basename(self.inputs.base_name) + '.qc') else: out_dir = os.path.abspath(self.inputs.output_dir) @@ -1421,4 +1421,3 @@ def _list_outputs(self): outputs['clean_volumes'] = clean_volumes return outputs - diff --git a/nipype/interfaces/mrtrix3/__init__.py b/nipype/interfaces/mrtrix3/__init__.py index 507380c30e..c9c131dde3 100644 --- a/nipype/interfaces/mrtrix3/__init__.py +++ b/nipype/interfaces/mrtrix3/__init__.py @@ -6,7 +6,7 @@ from .utils import (Mesh2PVE, Generate5tt, BrainMask, TensorMetrics, ComputeTDI, TCK2VTK, MRMath, MRConvert, DWIExtract) from .preprocess import (ResponseSD, ACTPrepareFSL, ReplaceFSwithFIRST, - DWIDenoise) + DWIDenoise, MRDeGibbs, DWIBiasCorrect) from .tracking import Tractography from .reconst import FitTensor, EstimateFOD from .connectivity import LabelConfig, LabelConvert, BuildConnectome diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index fc3559c918..935d978b60 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -21,7 +21,7 @@ class DWIDenoiseInputSpec(MRTrix3BaseInputSpec): mask = File( exists=True, argstr='-mask %s', - position=1, + #osition=1, desc='mask image') extent = traits.Tuple((traits.Int, traits.Int, traits.Int), argstr='-extent %d,%d,%d', @@ -75,6 +75,149 @@ class DWIDenoise(MRTrix3Base): output_spec = DWIDenoiseOutputSpec +class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): + in_file = File( + exists=True, + argstr='%s', + position=-2, + mandatory=True, + desc='input DWI image') + axes = InputMultiObject( + traits.Int, + value=[0], + usedefault=True, + argstr='-axes %d', + desc='select the slice axes (default = 0)') + nshifts = InputMultiObject( + traits.Int, + value=[20], + usedefault=True, + argstr='-nshifts %d', + desc='discretizaiton of subpixel spacing (default = 20)') + minW = InputMultiObject( + traits.Int, + value=[1], + usedefault=True, + argstr='-minW %d', + desc='left border of window used for TV computation (default = 1)') + maxW = InputMultiObject( + traits.Int, + value=[3], + usedefault=True, + argstr='-maxW %d', + desc='right border of window used for TV computation (default = 3)') + out_file = File(name_template='%s_unring', + name_source='in_file', + keep_extension=True, + argstr="%s", + position=-1, + desc="the output unringed DWI image") + +class MRDeGibbsOutputSpec(TraitedSpec): + out_file = File(desc="the output unringed DWI image", exists=True) + +class MRDeGibbs(MRTrix3Base): + """ + Remove Gibbs ringing artifacts. + + This application attempts to remove Gibbs ringing artefacts from MRI images + using the method of local subvoxel-shifts proposed by Kellner et al. + + This command is designed to run on data directly after it has been + reconstructed by the scanner, before any interpolation of any kind has + taken place. You should not run this command after any form of motion + correction (e.g. not after dwipreproc). Similarly, if you intend running + dwidenoise, you should run this command afterwards, since it has the + potential to alter the noise structure, which would impact on dwidenoise’s + performance. + + Note that this method is designed to work on images acquired with full + k-space coverage. Running this method on partial Fourier (‘half-scan’) data + may lead to suboptimal and/or biased results, as noted in the original + reference below. There is currently no means of dealing with this; users + should exercise caution when using this method on partial Fourier data, and + inspect its output for any obvious artefacts. + + For more information, see + + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> unring = mrt.MRDeGibbs() + >>> unring.inputs.in_file = 'dwi.mif' + >>> unring.cmdline + 'mrdegibbs dwi.mif dwi_unring.mif' + >>> unring.run() + """ + + _cmd = 'mrdegibbs' + input_spec = MRDeGibbsInputSpec + output_spec = MRDeGibbsOutputSpec + + +class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): + in_file = File( + exists=True, + argstr='%s', + position=-2, + mandatory=True, + desc='input DWI image') + mask = File( + argstr='-mask %s', + desc='mask image') + bias = File( + argstr='-bias %s', + desc='bias field') + ants = traits.Bool( + True, + argstr='-ants', + desc='use ANTS N4') + fsl = traits.Bool( + False, + argstr='-fsl', + desc='use FSL FAST', + min_ver='5.0.10') + grad = File( + argstr='-grad %s', + desc='diffusion gradient table in MRtrix format') + fslgrad = File( + argstr='-fslgrad %s %s', + desc='diffusion gradient table in FSL bvecs/bvals format') + out_file = File(name_template='%s_unbias', + name_source='in_file', + keep_extension=True, + argstr="%s", + position=-1, + desc="the output bias corrected DWI image") + +class DWIBiasCorrectOutputSpec(TraitedSpec): + out_file = File(desc="the output bias corrected DWI image", exists=True) + +class DWIBiasCorrect(MRTrix3Base): + """ + Perform B1 field inhomogeneity correction for a DWI volume series. + + For more information, see + + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> bias_correct = mrt.DWIBiasCorrect() + >>> bias_correct.inputs.in_file = 'dwi.mif' + >>> bias_correct.cmdline + 'dwibiascorrect dwi.mif dwi_unbias.mif' + >>> bias_correct.run() + """ + + _cmd = 'dwibiascorrect' + input_spec = DWIBiasCorrectInputSpec + output_spec = DWIBiasCorrectOutputSpec + + class ResponseSDInputSpec(MRTrix3BaseInputSpec): algorithm = traits.Enum( 'msmt_5tt', From 63d96be755b3f550fc3db97ff2ac4272b54825ff Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Mon, 18 Mar 2019 17:17:14 -0400 Subject: [PATCH 0175/1665] added option to supply descriptor tags, changed default tool version to 1.0.0, added logic to deal with 0/1 booleans and tuples, added list separator and flag separator checks --- nipype/utils/nipype2boutiques.py | 59 +++++++++++++++++++++++++++----- 1 file changed, 50 insertions(+), 9 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index e5b91b0ea7..6f2988c4ba 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -20,7 +20,7 @@ def generate_boutiques_descriptor( module, interface_name, container_image, container_type, container_index=None, - verbose=False, save=False, save_path=None, author=None, ignore_inputs=None): + verbose=False, save=False, save_path=None, author=None, ignore_inputs=None, tags=None): ''' Returns a JSON string containing a JSON Boutiques description of a Nipype interface. Arguments: @@ -34,6 +34,8 @@ def generate_boutiques_descriptor( * save_path: file path for the saved descriptor (defaults to name of the interface in current directory) * author: author of the tool (required for publishing) * ignore_inputs: list of interface inputs to not include in the descriptor + * tags: JSON object containing tags to include in the descriptor, e.g. "{/"key1/": /"value1/"}" + (note: the tags 'domain:neuroinformatics' and 'interface-type:nipype' are included by default) ''' if not module: @@ -64,7 +66,7 @@ def generate_boutiques_descriptor( tool_desc['inputs'] = [] tool_desc['output-files'] = [] tool_desc['groups'] = [] - tool_desc['tool-version'] = interface.version if interface.version is not None else "No version provided." + tool_desc['tool-version'] = interface.version if interface.version is not None else "1.0.0" tool_desc['schema-version'] = '0.5' if container_image: tool_desc['container-image'] = {} @@ -120,6 +122,24 @@ def generate_boutiques_descriptor( if output['path-template'] == "": fill_in_missing_output_path(output, output['name'], tool_desc['inputs']) + # Add tags + desc_tags = { + 'domain': 'neuroinformatics', + 'source': 'nipype-interface' + } + + if tags is not None: + tags_dict = json.loads(tags) + for k, v in tags_dict.items(): + if k in desc_tags: + if not isinstance(desc_tags[k], list): + desc_tags[k] = [desc_tags[k]] + desc_tags[k].append(v) + else: + desc_tags[k] = v + + tool_desc['tags'] = desc_tags + # Remove the extra space at the end of the command line tool_desc['command-line'] = tool_desc['command-line'].strip() @@ -219,7 +239,13 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=No elif handler_type == "Float": inp['type'] = "Number" elif handler_type == "Bool": - inp['type'] = "Flag" + if spec.argstr and len(spec.argstr.split("=")) > 1 and (spec.argstr.split("=")[1] == '0' or spec.argstr.split("=")[1] == '1'): + inp['type'] = "Number" + inp['integer'] = True + inp['minimum'] = 0 + inp['maximum'] = 1 + else: + inp['type'] = "Flag" else: inp['type'] = "String" @@ -253,6 +279,21 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=No inp['min-list-entries'] = trait_handler.minlen if trait_handler.maxlen != six.MAXSIZE: inp['max-list-entries'] = trait_handler.maxlen + if spec.sep: + inp['list-separator'] = spec.sep + + if handler_type == "Tuple": + inp['list'] = True + inp['min-list-entries'] = len(spec.default) + inp['max-list-entries'] = len(spec.default) + input_type = type(spec.default[0]).__name__ + if input_type == 'int': + inp['type'] = "Number" + inp['integer'] = True + elif input_type == 'float': + inp['type'] = "Number" + else: + inp['type'] = "String" # Deal with multi-input if handler_type == "InputMultiObject": @@ -264,8 +305,12 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=No # Add the command line flag specified by argstr # If no argstr is provided and input type is Flag, create a flag from the name - if spec.argstr and spec.argstr.split("%")[0]: - inp['command-line-flag'] = spec.argstr.split("%")[0].strip() + if spec.argstr: + if "=" in spec.argstr: + inp['command-line-flag'] = spec.argstr.split("=")[0].strip() + inp['command-line-flag-separator'] = "=" + elif spec.argstr.split("%")[0]: + inp['command-line-flag'] = spec.argstr.split("%")[0].strip() elif inp['type'] == "Flag": inp['command-line-flag'] = ("--%s" % input_name + " ").strip() @@ -290,10 +335,6 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=No inp['type'] = "Number" inp['value-choices'] = value_choices - # Set Boolean types to Flag (there is no Boolean type in Boutiques) - if inp['type'] == "Boolean": - inp['type'] = "Flag" - return inp From c32258cded70494d1ffc3ae73dc943517839efb8 Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Tue, 19 Mar 2019 18:15:15 -0400 Subject: [PATCH 0176/1665] additional changes to new mrtrix3 interfaces --- nipype/interfaces/mrtrix3/preprocess.py | 57 +++++++++++++++---------- 1 file changed, 35 insertions(+), 22 deletions(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index 935d978b60..a2b434e43b 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -26,17 +26,22 @@ class DWIDenoiseInputSpec(MRTrix3BaseInputSpec): extent = traits.Tuple((traits.Int, traits.Int, traits.Int), argstr='-extent %d,%d,%d', desc='set the window size of the denoising filter. (default = 5,5,5)') - noise = File( + out_noise = File(name_template='%s_noise', + name_source='in_file', + keep_extension=True, argstr='-noise %s', - desc='noise map') + desc="the output noise map", + genfile=True) out_file = File(name_template='%s_denoised', name_source='in_file', keep_extension=True, argstr="%s", position=-1, - desc="the output denoised DWI image") + desc="the output denoised DWI image", + genfile=True) class DWIDenoiseOutputSpec(TraitedSpec): + out_noise = File(desc="the output noise map", exists=True) out_file = File(desc="the output denoised DWI image", exists=True) class DWIDenoise(MRTrix3Base): @@ -84,10 +89,10 @@ class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): desc='input DWI image') axes = InputMultiObject( traits.Int, - value=[0], + value=[0,1], usedefault=True, - argstr='-axes %d', - desc='select the slice axes (default = 0)') + argstr='-axes %s', # how to define list? + desc='select the slice axes (default = 0,1)') nshifts = InputMultiObject( traits.Int, value=[20], @@ -99,19 +104,22 @@ class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): value=[1], usedefault=True, argstr='-minW %d', - desc='left border of window used for TV computation (default = 1)') + desc='left border of window used for total variation (TV) computation ' + '(default = 1)') maxW = InputMultiObject( traits.Int, value=[3], usedefault=True, argstr='-maxW %d', - desc='right border of window used for TV computation (default = 3)') + desc='right border of window used for total variation (TV) computation ' + '(default = 3)') out_file = File(name_template='%s_unring', name_source='in_file', keep_extension=True, argstr="%s", position=-1, - desc="the output unringed DWI image") + desc="the output unringed DWI image", + genfile=True) class MRDeGibbsOutputSpec(TraitedSpec): out_file = File(desc="the output unringed DWI image", exists=True) @@ -147,9 +155,9 @@ class MRDeGibbs(MRTrix3Base): >>> import nipype.interfaces.mrtrix3 as mrt >>> unring = mrt.MRDeGibbs() >>> unring.inputs.in_file = 'dwi.mif' - >>> unring.cmdline + >>> unring.cmdline # doctest: +ELLIPSIS 'mrdegibbs dwi.mif dwi_unring.mif' - >>> unring.run() + >>> unring.run() # doctest: +SKIP """ _cmd = 'mrdegibbs' @@ -164,33 +172,38 @@ class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): position=-2, mandatory=True, desc='input DWI image') - mask = File( + in_mask = File( argstr='-mask %s', desc='mask image') - bias = File( - argstr='-bias %s', - desc='bias field') ants = traits.Bool( True, argstr='-ants', - desc='use ANTS N4') + desc='use ANTS N4 to estimate the inhomogeneity field') fsl = traits.Bool( False, argstr='-fsl', - desc='use FSL FAST', + desc='use FSL FAST to estimate the inhomogeneity field', min_ver='5.0.10') + # only one of either grad or fslgrad should be supplied grad = File( argstr='-grad %s', desc='diffusion gradient table in MRtrix format') fslgrad = File( argstr='-fslgrad %s %s', desc='diffusion gradient table in FSL bvecs/bvals format') - out_file = File(name_template='%s_unbias', + out_bias = File(name_template='%s_biasfield', + name_source='in_file', + keep_extension=True, + argstr='-bias %s', + desc='bias field', + genfile=True) + out_file = File(name_template='%s_biascorr', name_source='in_file', keep_extension=True, argstr="%s", position=-1, - desc="the output bias corrected DWI image") + desc="the output bias corrected DWI image", + genfile=True) class DWIBiasCorrectOutputSpec(TraitedSpec): out_file = File(desc="the output bias corrected DWI image", exists=True) @@ -208,9 +221,9 @@ class DWIBiasCorrect(MRTrix3Base): >>> import nipype.interfaces.mrtrix3 as mrt >>> bias_correct = mrt.DWIBiasCorrect() >>> bias_correct.inputs.in_file = 'dwi.mif' - >>> bias_correct.cmdline - 'dwibiascorrect dwi.mif dwi_unbias.mif' - >>> bias_correct.run() + >>> bias_correct.cmdline # doctest: +ELLIPSIS + 'dwibiascorrect dwi.mif dwi_biascorr.mif' + >>> bias_correct.run() # doctest: +SKIP """ _cmd = 'dwibiascorrect' From 1e15b9989f2498e698294532c1a0703596c7b3e2 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Tue, 19 Mar 2019 22:48:42 -0400 Subject: [PATCH 0177/1665] added method to take into account the order of positional command line args --- nipype/utils/nipype2boutiques.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 6f2988c4ba..c5280d2d45 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -140,6 +140,9 @@ def generate_boutiques_descriptor( tool_desc['tags'] = desc_tags + # Check for positional arguments and reorder command line args if necessary + tool_desc['command-line'] = reorder_cmd_line_args(tool_desc['command-line'], interface, ignore_inputs) + # Remove the extra space at the end of the command line tool_desc['command-line'] = tool_desc['command-line'].strip() @@ -505,3 +508,31 @@ def generate_custom_inputs(desc_inputs): for value in desc_input['value-choices']: custom_input_dicts.append({desc_input['id']: value}) return custom_input_dicts + + +def reorder_cmd_line_args(cmd_line, interface, ignore_inputs=None): + ''' + Generates a new command line with the positional arguments in the correct order + ''' + interface_name = cmd_line.split()[0] + positional_arg_dict = {} + positional_args = [] + non_positional_args = [] + + for name, spec in sorted(interface.inputs.traits(transient=None).items()): + if ignore_inputs is not None and name in ignore_inputs: + continue + value_key = "[" + name.upper() + "]" + if spec.position is not None: + positional_arg_dict[spec.position] = value_key + else: + non_positional_args.append(value_key) + + last_arg = None + for item in sorted(positional_arg_dict.items()): + if item[0] == -1: + last_arg = item[1] + continue + positional_args.append(item[1]) + + return interface_name + " " + " ".join(positional_args) + " " + ((last_arg + " ") if last_arg else "") + " ".join(non_positional_args) From 83bc6a4ab204dd41b1316a4ca86c8e90cbaf52f1 Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Mon, 25 Mar 2019 07:14:08 -0400 Subject: [PATCH 0178/1665] add noisemap to dwidenoise and fix mrdegibbs --- nipype/interfaces/mrtrix3/preprocess.py | 43 +++++++++++++------------ 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index a2b434e43b..273ae468e2 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -26,7 +26,7 @@ class DWIDenoiseInputSpec(MRTrix3BaseInputSpec): extent = traits.Tuple((traits.Int, traits.Int, traits.Int), argstr='-extent %d,%d,%d', desc='set the window size of the denoising filter. (default = 5,5,5)') - out_noise = File(name_template='%s_noise', + out_noisemap = File(name_template='%s_noisemap', name_source='in_file', keep_extension=True, argstr='-noise %s', @@ -41,7 +41,7 @@ class DWIDenoiseInputSpec(MRTrix3BaseInputSpec): genfile=True) class DWIDenoiseOutputSpec(TraitedSpec): - out_noise = File(desc="the output noise map", exists=True) + out_noisemap = File(desc="the output noise map", exists=True) out_file = File(desc="the output denoised DWI image", exists=True) class DWIDenoise(MRTrix3Base): @@ -87,28 +87,27 @@ class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): position=-2, mandatory=True, desc='input DWI image') - axes = InputMultiObject( - traits.Int, - value=[0,1], + axes = traits.ListInt( + default_value=[0,1], + sep=',', + minlen=1, + maxlen=4, usedefault=True, - argstr='-axes %s', # how to define list? + argstr='-axes %s', desc='select the slice axes (default = 0,1)') - nshifts = InputMultiObject( - traits.Int, - value=[20], + nshifts = traits.Int( + default_value=20, usedefault=True, argstr='-nshifts %d', desc='discretizaiton of subpixel spacing (default = 20)') - minW = InputMultiObject( - traits.Int, - value=[1], + minW = traits.Int( + default_value=1, usedefault=True, argstr='-minW %d', desc='left border of window used for total variation (TV) computation ' '(default = 1)') - maxW = InputMultiObject( - traits.Int, - value=[3], + maxW = traits.Int( + default_value=3, usedefault=True, argstr='-maxW %d', desc='right border of window used for total variation (TV) computation ' @@ -174,15 +173,16 @@ class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): desc='input DWI image') in_mask = File( argstr='-mask %s', - desc='mask image') - ants = traits.Bool( - True, + desc='input mask image for bias field estimation') + _xor_inputs = ('use_ants', 'use_fsl') + use_ants = traits.Bool( argstr='-ants', - desc='use ANTS N4 to estimate the inhomogeneity field') - fsl = traits.Bool( - False, + desc='use ANTS N4 to estimate the inhomogeneity field', + xor=_xor_inputs) + use_fsl = traits.Bool( argstr='-fsl', desc='use FSL FAST to estimate the inhomogeneity field', + xor=_xor_inputs, min_ver='5.0.10') # only one of either grad or fslgrad should be supplied grad = File( @@ -206,6 +206,7 @@ class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): genfile=True) class DWIBiasCorrectOutputSpec(TraitedSpec): + out_bias = File(desc="the output estimated bias field") out_file = File(desc="the output bias corrected DWI image", exists=True) class DWIBiasCorrect(MRTrix3Base): From 7269ff0fcace6409d316fd4248a1733d221490d8 Mon Sep 17 00:00:00 2001 From: mathiasg Date: Mon, 25 Mar 2019 13:12:46 -0400 Subject: [PATCH 0179/1665] maint: avoid deprecation warnings --- nipype/algorithms/tests/test_CompCor.py | 9 ++++++--- nipype/testing/__init__.py | 4 ++-- nipype/testing/decorators.py | 9 ++++----- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/nipype/algorithms/tests/test_CompCor.py b/nipype/algorithms/tests/test_CompCor.py index 488ad3c960..c31546a48f 100644 --- a/nipype/algorithms/tests/test_CompCor.py +++ b/nipype/algorithms/tests/test_CompCor.py @@ -108,15 +108,17 @@ def test_compcor_bad_input_shapes(self): data_file = utils.save_toy_nii(np.zeros(data_shape), 'temp.nii') interface = CompCor( realigned_file=data_file, mask_files=self.mask_files[0]) - with pytest.raises(ValueError, message="Dimension mismatch"): + with pytest.raises(ValueError): interface.run() + pytest.fail("Dimension mismatch") def test_tcompcor_bad_input_dim(self): bad_dims = (2, 2, 2) data_file = utils.save_toy_nii(np.zeros(bad_dims), 'temp.nii') interface = TCompCor(realigned_file=data_file) - with pytest.raises(ValueError, message='Not a 4D file'): + with pytest.raises(ValueError): interface.run() + pytest.fail("Not a 4D file") def test_tcompcor_merge_intersect_masks(self): for method in ['union', 'intersect']: @@ -145,8 +147,9 @@ def test_tcompcor_index_mask(self): def test_tcompcor_multi_mask_no_index(self): interface = TCompCor( realigned_file=self.realigned_file, mask_files=self.mask_files) - with pytest.raises(ValueError, message='more than one mask file'): + with pytest.raises(ValueError): interface.run() + pytest.fail("more than one mask file") def run_cc(self, ccinterface, diff --git a/nipype/testing/__init__.py b/nipype/testing/__init__.py index 9d57ba87af..2167e7e54a 100644 --- a/nipype/testing/__init__.py +++ b/nipype/testing/__init__.py @@ -16,10 +16,10 @@ template = funcfile transfm = funcfile -from . import decorators as dec +from . import decorators from .utils import package_check, TempFATFS -skipif = dec.skipif +skipif = decorators.dec.skipif def example_data(infile='functional.nii'): diff --git a/nipype/testing/decorators.py b/nipype/testing/decorators.py index f849815700..36c647634c 100644 --- a/nipype/testing/decorators.py +++ b/nipype/testing/decorators.py @@ -4,8 +4,7 @@ """ Extend numpy's decorators to use nipype's gui and data labels. """ - -from numpy.testing.decorators import knownfailureif, skipif +from numpy.testing import dec from nibabel.data import DataError @@ -81,19 +80,19 @@ def needs_review(msg): """ def skip_func(func): - return skipif(True, msg)(func) + return dec.skipif(True, msg)(func) return skip_func # Easier version of the numpy knownfailure def knownfailure(f): - return knownfailureif(True)(f) + return dec.knownfailureif(True)(f) def if_datasource(ds, msg): try: ds.get_filename() except DataError: - return skipif(True, msg) + return dec.skipif(True, msg) return lambda f: f From ef7c805b17abe6be5e4b07c318fa54d709df96e5 Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Mon, 25 Mar 2019 13:15:18 -0400 Subject: [PATCH 0180/1665] update mrdegibbs and dwibiascorrect --- nipype/interfaces/mrtrix3/preprocess.py | 61 +++++++++++++------------ 1 file changed, 33 insertions(+), 28 deletions(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index 273ae468e2..0334a7d063 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -30,19 +30,19 @@ class DWIDenoiseInputSpec(MRTrix3BaseInputSpec): name_source='in_file', keep_extension=True, argstr='-noise %s', - desc="the output noise map", + desc='the output noise map', genfile=True) out_file = File(name_template='%s_denoised', name_source='in_file', keep_extension=True, - argstr="%s", + argstr='%s', position=-1, - desc="the output denoised DWI image", + desc='the output denoised DWI image', genfile=True) class DWIDenoiseOutputSpec(TraitedSpec): - out_noisemap = File(desc="the output noise map", exists=True) - out_file = File(desc="the output denoised DWI image", exists=True) + out_noisemap = File(desc='the output noise map', exists=True) + out_file = File(desc='the output denoised DWI image', exists=True) class DWIDenoise(MRTrix3Base): """ @@ -89,17 +89,18 @@ class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): desc='input DWI image') axes = traits.ListInt( default_value=[0,1], - sep=',', - minlen=1, - maxlen=4, usedefault=True, + sep=',', + minlen=2, + maxlen=2, argstr='-axes %s', - desc='select the slice axes (default = 0,1)') + desc='indicate the plane in which the data was acquired (axial = 0,1; ' + 'coronal = 0,2; sagittal = 1,2') nshifts = traits.Int( default_value=20, usedefault=True, argstr='-nshifts %d', - desc='discretizaiton of subpixel spacing (default = 20)') + desc='discretization of subpixel spacing (default = 20)') minW = traits.Int( default_value=1, usedefault=True, @@ -112,16 +113,16 @@ class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): argstr='-maxW %d', desc='right border of window used for total variation (TV) computation ' '(default = 3)') - out_file = File(name_template='%s_unring', + out_file = File(name_template='%s_unr', name_source='in_file', keep_extension=True, - argstr="%s", + argstr='%s', position=-1, - desc="the output unringed DWI image", + desc='the output unringed DWI image', genfile=True) class MRDeGibbsOutputSpec(TraitedSpec): - out_file = File(desc="the output unringed DWI image", exists=True) + out_file = File(desc='the output unringed DWI image', exists=True) class MRDeGibbs(MRTrix3Base): """ @@ -155,7 +156,7 @@ class MRDeGibbs(MRTrix3Base): >>> unring = mrt.MRDeGibbs() >>> unring.inputs.in_file = 'dwi.mif' >>> unring.cmdline # doctest: +ELLIPSIS - 'mrdegibbs dwi.mif dwi_unring.mif' + 'mrdegibbs dwi.mif dwi_unr.mif' >>> unring.run() # doctest: +SKIP """ @@ -174,23 +175,27 @@ class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): in_mask = File( argstr='-mask %s', desc='input mask image for bias field estimation') - _xor_inputs = ('use_ants', 'use_fsl') + _xor_methods = ('use_ants', 'use_fsl') use_ants = traits.Bool( + default_value=True, + usedefault=True, argstr='-ants', desc='use ANTS N4 to estimate the inhomogeneity field', - xor=_xor_inputs) + xor=_xor_methods) use_fsl = traits.Bool( argstr='-fsl', desc='use FSL FAST to estimate the inhomogeneity field', - xor=_xor_inputs, + xor=_xor_methods, min_ver='5.0.10') - # only one of either grad or fslgrad should be supplied - grad = File( + _xor_grads = ('mrtrix_grad', 'fsl_grad') + mrtrix_grad = File( argstr='-grad %s', - desc='diffusion gradient table in MRtrix format') - fslgrad = File( + desc='diffusion gradient table in MRtrix format', + xor=_xor_grads) + fsl_grad = File( argstr='-fslgrad %s %s', - desc='diffusion gradient table in FSL bvecs/bvals format') + desc='diffusion gradient table in FSL bvecs/bvals format', + xor=_xor_grads) out_bias = File(name_template='%s_biasfield', name_source='in_file', keep_extension=True, @@ -200,14 +205,14 @@ class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): out_file = File(name_template='%s_biascorr', name_source='in_file', keep_extension=True, - argstr="%s", + argstr='%s', position=-1, - desc="the output bias corrected DWI image", + desc='the output bias corrected DWI image', genfile=True) class DWIBiasCorrectOutputSpec(TraitedSpec): - out_bias = File(desc="the output estimated bias field") - out_file = File(desc="the output bias corrected DWI image", exists=True) + out_bias = File(desc='the output estimated bias field') + out_file = File(desc='the output bias corrected DWI image', exists=True) class DWIBiasCorrect(MRTrix3Base): """ @@ -223,7 +228,7 @@ class DWIBiasCorrect(MRTrix3Base): >>> bias_correct = mrt.DWIBiasCorrect() >>> bias_correct.inputs.in_file = 'dwi.mif' >>> bias_correct.cmdline # doctest: +ELLIPSIS - 'dwibiascorrect dwi.mif dwi_biascorr.mif' + 'dwibiascorrect -ants dwi.mif dwi_biascorr.mif' >>> bias_correct.run() # doctest: +SKIP """ From 51b6f15923eef857bf926d17ccfda2c44f62e1cf Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Mon, 25 Mar 2019 13:19:55 -0400 Subject: [PATCH 0181/1665] add contribution --- .zenodo.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index 065e188d8b..dca3d7ef20 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -330,6 +330,11 @@ "name": "Liem, Franz", "orcid": "0000-0003-0646-4810" }, + { + "affiliation": "The Centre for Addiction and Mental Health", + "name": "Joseph, Michael", + "orcid": "0000-0002-0068-230X" + } { "affiliation": "UniversityHospital Heidelberg, Germany", "name": "Kleesiek, Jens" From 55ad341d7676c9f2bf74a03cf0ba347842710bbf Mon Sep 17 00:00:00 2001 From: mathiasg Date: Mon, 25 Mar 2019 13:32:02 -0400 Subject: [PATCH 0182/1665] maint: remove message --- nipype/algorithms/tests/test_CompCor.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/nipype/algorithms/tests/test_CompCor.py b/nipype/algorithms/tests/test_CompCor.py index c31546a48f..6fdae49f18 100644 --- a/nipype/algorithms/tests/test_CompCor.py +++ b/nipype/algorithms/tests/test_CompCor.py @@ -109,16 +109,14 @@ def test_compcor_bad_input_shapes(self): interface = CompCor( realigned_file=data_file, mask_files=self.mask_files[0]) with pytest.raises(ValueError): - interface.run() - pytest.fail("Dimension mismatch") + interface.run() # Dimension mismatch def test_tcompcor_bad_input_dim(self): bad_dims = (2, 2, 2) data_file = utils.save_toy_nii(np.zeros(bad_dims), 'temp.nii') interface = TCompCor(realigned_file=data_file) with pytest.raises(ValueError): - interface.run() - pytest.fail("Not a 4D file") + interface.run() # Not a 4D file def test_tcompcor_merge_intersect_masks(self): for method in ['union', 'intersect']: @@ -148,8 +146,7 @@ def test_tcompcor_multi_mask_no_index(self): interface = TCompCor( realigned_file=self.realigned_file, mask_files=self.mask_files) with pytest.raises(ValueError): - interface.run() - pytest.fail("more than one mask file") + interface.run() # more than one mask file def run_cc(self, ccinterface, From 6512e7add754eacd5b037355742623f84eb64597 Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Mon, 25 Mar 2019 13:32:56 -0400 Subject: [PATCH 0183/1665] fix typo and update docstrings --- nipype/interfaces/mrtrix3/preprocess.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index 0334a7d063..e67712f1b8 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -21,7 +21,7 @@ class DWIDenoiseInputSpec(MRTrix3BaseInputSpec): mask = File( exists=True, argstr='-mask %s', - #osition=1, + position=1, desc='mask image') extent = traits.Tuple((traits.Int, traits.Int, traits.Int), argstr='-extent %d,%d,%d', @@ -71,7 +71,7 @@ class DWIDenoise(MRTrix3Base): >>> denoise.inputs.in_file = 'dwi.mif' >>> denoise.inputs.mask = 'mask.mif' >>> denoise.cmdline # doctest: +ELLIPSIS - 'dwidenoise -mask mask.mif dwi.mif dwi_denoised.mif' + 'dwidenoise -mask mask.mif -noise dwi_noisemap.mif dwi.mif dwi_denoised.mif' >>> denoise.run() # doctest: +SKIP """ @@ -156,7 +156,7 @@ class MRDeGibbs(MRTrix3Base): >>> unring = mrt.MRDeGibbs() >>> unring.inputs.in_file = 'dwi.mif' >>> unring.cmdline # doctest: +ELLIPSIS - 'mrdegibbs dwi.mif dwi_unr.mif' + 'mrdegibbs -axes 0,1 -maxW 3 -minW 1 -nshifts 20 dwi.mif dwi_unr.mif' >>> unring.run() # doctest: +SKIP """ @@ -228,7 +228,7 @@ class DWIBiasCorrect(MRTrix3Base): >>> bias_correct = mrt.DWIBiasCorrect() >>> bias_correct.inputs.in_file = 'dwi.mif' >>> bias_correct.cmdline # doctest: +ELLIPSIS - 'dwibiascorrect -ants dwi.mif dwi_biascorr.mif' + 'dwibiascorrect -ants -bias dwi_biasfield.mif dwi.mif dwi_biascorr.mif' >>> bias_correct.run() # doctest: +SKIP """ From 059f3eedb7b38b34dc7bd5dee5b9e0f4178dc14d Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Mon, 25 Mar 2019 13:38:04 -0400 Subject: [PATCH 0184/1665] add autogenerated tests --- .../mrtrix3/tests/test_auto_DWIBiasCorrect.py | 76 +++++++++++++++++++ .../mrtrix3/tests/test_auto_DWIDenoise.py | 14 +++- .../mrtrix3/tests/test_auto_MRDeGibbs.py | 66 ++++++++++++++++ 3 files changed, 154 insertions(+), 2 deletions(-) create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py new file mode 100644 index 0000000000..07986752e8 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py @@ -0,0 +1,76 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import DWIBiasCorrect + + +def test_DWIBiasCorrect_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bval_scale=dict(argstr='-bvalue_scaling %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fsl_grad=dict( + argstr='-fslgrad %s %s', + xor=('mrtrix_grad', 'fsl_grad'), + ), + grad_file=dict(argstr='-grad %s', ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + in_mask=dict(argstr='-mask %s', ), + mrtrix_grad=dict( + argstr='-grad %s', + xor=('mrtrix_grad', 'fsl_grad'), + ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + out_bias=dict( + argstr='-bias %s', + genfile=True, + keep_extension=True, + name_source='in_file', + name_template='%s_biasfield', + ), + out_file=dict( + argstr='%s', + genfile=True, + keep_extension=True, + name_source='in_file', + name_template='%s_biascorr', + position=-1, + ), + use_ants=dict( + argstr='-ants', + usedefault=True, + xor=('use_ants', 'use_fsl'), + ), + use_fsl=dict( + argstr='-fsl', + min_ver='5.0.10', + xor=('use_ants', 'use_fsl'), + ), + ) + inputs = DWIBiasCorrect.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DWIBiasCorrect_outputs(): + output_map = dict( + out_bias=dict(), + out_file=dict(), + ) + outputs = DWIBiasCorrect.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py index 769ccb34a9..cf11ac3834 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py @@ -25,18 +25,25 @@ def test_DWIDenoise_inputs(): argstr='-mask %s', position=1, ), - noise=dict(argstr='-noise %s', ), nthreads=dict( argstr='-nthreads %d', nohash=True, ), out_file=dict( argstr='%s', + genfile=True, keep_extension=True, name_source='in_file', name_template='%s_denoised', position=-1, ), + out_noisemap=dict( + argstr='-noise %s', + genfile=True, + keep_extension=True, + name_source='in_file', + name_template='%s_noisemap', + ), ) inputs = DWIDenoise.input_spec() @@ -44,7 +51,10 @@ def test_DWIDenoise_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIDenoise_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict( + out_file=dict(), + out_noisemap=dict(), + ) outputs = DWIDenoise.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py new file mode 100644 index 0000000000..c9290b562b --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py @@ -0,0 +1,66 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import MRDeGibbs + + +def test_MRDeGibbs_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + axes=dict( + argstr='-axes %s', + maxlen=2, + minlen=2, + sep=',', + usedefault=True, + ), + bval_scale=dict(argstr='-bvalue_scaling %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict(argstr='-grad %s', ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + maxW=dict( + argstr='-maxW %d', + usedefault=True, + ), + minW=dict( + argstr='-minW %d', + usedefault=True, + ), + nshifts=dict( + argstr='-nshifts %d', + usedefault=True, + ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + out_file=dict( + argstr='%s', + genfile=True, + keep_extension=True, + name_source='in_file', + name_template='%s_unr', + position=-1, + ), + ) + inputs = MRDeGibbs.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRDeGibbs_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MRDeGibbs.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value From 572d2e60db9745dcae639e13cd5ddc0d5818088c Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Mon, 25 Mar 2019 13:46:54 -0400 Subject: [PATCH 0185/1665] missing comma --- .zenodo.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zenodo.json b/.zenodo.json index dca3d7ef20..db64d56b49 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -334,7 +334,7 @@ "affiliation": "The Centre for Addiction and Mental Health", "name": "Joseph, Michael", "orcid": "0000-0002-0068-230X" - } + }, { "affiliation": "UniversityHospital Heidelberg, Germany", "name": "Kleesiek, Jens" From 27ed03f3ed420707ea01599e42cbd42ea1e2adf3 Mon Sep 17 00:00:00 2001 From: rciric Date: Wed, 27 Mar 2019 10:03:43 -0700 Subject: [PATCH 0186/1665] filter handles empty masks, use `squeeze_image` --- nipype/algorithms/confounds.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index e0d8df6f68..07f1208540 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -1084,6 +1084,8 @@ def is_outlier(points, thresh=3.5): def cosine_filter(data, timestep, period_cut, remove_mean=True, axis=-1): datashape = data.shape timepoints = datashape[axis] + if datashape[0] == 0: + return data, np.array([]) data = data.reshape((-1, timepoints)) @@ -1115,6 +1117,8 @@ def regress_poly(degree, data, remove_mean=True, axis=-1): datashape = data.shape timepoints = datashape[axis] + if datashape[0] == 0: + return data, np.array([]) # Rearrange all voxel-wise time-series in rows data = data.reshape((-1, timepoints)) @@ -1261,7 +1265,7 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, components_criterion = -1 mask_names = mask_names or range(len(mask_images)) for name, img in zip(mask_names, mask_images): - mask = img.get_data().astype(np.bool) + mask = nb.squeeze_image(img).get_data().astype(np.bool) if imgseries.shape[:3] != mask.shape: raise ValueError( 'Inputs for CompCor, timeseries and mask, do not have ' From 30950acdcd73a68d46a1ce45c816f7749a03ccb7 Mon Sep 17 00:00:00 2001 From: mathiasg Date: Wed, 27 Mar 2019 13:26:08 -0400 Subject: [PATCH 0187/1665] fix: dipy import --- nipype/interfaces/dipy/tracks.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/dipy/tracks.py b/nipype/interfaces/dipy/tracks.py index 3643654001..b3d51151e2 100644 --- a/nipype/interfaces/dipy/tracks.py +++ b/nipype/interfaces/dipy/tracks.py @@ -20,13 +20,16 @@ if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): from dipy.workflows.segment import RecoBundlesFlow, LabelsBundlesFlow - from dipy.workflows.tracking import DetTrackPAMFlow + try: + from dipy.workflows.tracking import LocalFiberTrackingPAMFlow as DetTrackFlow + except ImportError: # different name in 0.15 + from dipy.workflows.tracking import DetTrackPAMFlow as DetTrackFlow RecoBundles = dipy_to_nipype_interface("RecoBundles", RecoBundlesFlow) LabelsBundles = dipy_to_nipype_interface("LabelsBundles", LabelsBundlesFlow) DeterministicTracking = dipy_to_nipype_interface("DeterministicTracking", - DetTrackPAMFlow) + DetTrackFlow) else: IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" From f0a11b835ede75463ee50745896d60afc0d962b2 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Wed, 27 Mar 2019 17:02:40 -0400 Subject: [PATCH 0188/1665] added methods to generate command line flag and generate a boutiques output from a Nipype input spec, code style fixes, some refactoring and improvements --- nipype/utils/nipype2boutiques.py | 169 +++++++++++++++++++------------ 1 file changed, 103 insertions(+), 66 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index c5280d2d45..981c8d0cb3 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -9,6 +9,7 @@ # Limitations: # * Optional outputs, i.e. outputs that not always produced, may not be detected. They will, however, still be listed # with a placeholder for the path template (either a value key or the output ID) that should be verified and corrected. +# * Still need to add some fields to the descriptor manually, e.g. url, descriptor-url, path-template-stripped-extensions, etc. import os import sys @@ -77,28 +78,34 @@ def generate_boutiques_descriptor( # Generates tool inputs for name, spec in sorted(interface.inputs.traits(transient=None).items()): - inp = get_boutiques_input(inputs, interface, name, spec, verbose, ignore_inputs=ignore_inputs) - # Handle compound inputs (inputs that can be of multiple types and are mutually exclusive) - if inp is None: + # Skip ignored inputs + if ignore_inputs is not None and name in ignore_inputs: continue - if isinstance(inp, list): - mutex_group_members = [] - tool_desc['command-line'] += inp[0]['value-key'] + " " - for i in inp: - tool_desc['inputs'].append(i) - mutex_group_members.append(i['id']) - if verbose: - print("-> Adding input " + i['name']) - # Put inputs into a mutually exclusive group - tool_desc['groups'].append({'id': inp[0]['id'] + "_group", - 'name': inp[0]['name'] + " group", - 'members': mutex_group_members, - 'mutually-exclusive': True}) + # If spec has a name source, this means it actually represents an output, so create a + # Boutiques output from it + elif spec.name_source and spec.name_template: + tool_desc['output-files'].append(get_boutiques_output_from_inp(inputs, spec, name)) else: - tool_desc['inputs'].append(inp) - tool_desc['command-line'] += inp['value-key'] + " " - if verbose: - print("-> Adding input " + inp['name']) + inp = get_boutiques_input(inputs, interface, name, spec, verbose, ignore_inputs=ignore_inputs) + # Handle compound inputs (inputs that can be of multiple types and are mutually exclusive) + if isinstance(inp, list): + mutex_group_members = [] + tool_desc['command-line'] += inp[0]['value-key'] + " " + for i in inp: + tool_desc['inputs'].append(i) + mutex_group_members.append(i['id']) + if verbose: + print("-> Adding input " + i['name']) + # Put inputs into a mutually exclusive group + tool_desc['groups'].append({'id': inp[0]['id'] + "_group", + 'name': inp[0]['name'] + " group", + 'members': mutex_group_members, + 'mutually-exclusive': True}) + else: + tool_desc['inputs'].append(inp) + tool_desc['command-line'] += inp['value-key'] + " " + if verbose: + print("-> Adding input " + inp['name']) # Generates input groups tool_desc['groups'] += get_boutiques_groups(interface.inputs.traits(transient=None).items()) @@ -148,7 +155,7 @@ def generate_boutiques_descriptor( # Save descriptor to a file if save: - path = save_path if save_path is not None else os.path.join(os.getcwd(), interface_name + '.json') + path = save_path or os.path.join(os.getcwd(), interface_name + '.json') with open(path, 'w') as outfile: json.dump(tool_desc, outfile, indent=4, separators=(',', ': ')) if verbose: @@ -200,15 +207,9 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=No Assumes that: * Input names are unique. """ - - # If spec has a name source, means it's an output, so skip it here. - # Also skip any ignored inputs - if spec.name_source or ignore_inputs is not None and input_name in ignore_inputs: - return None - inp = {} - if input_number is not None and input_number != 0: # No need to append a number to the first of a list of compound inputs + if input_number: # No need to append a number to the first of a list of compound inputs inp['id'] = input_name + "_" + str(input_number + 1) else: inp['id'] = input_name @@ -302,20 +303,18 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=No if handler_type == "InputMultiObject": inp['type'] = "File" inp['list'] = True + if spec.sep: + inp['list-separator'] = spec.sep inp['value-key'] = "[" + input_name.upper( ) + "]" # assumes that input names are unique - # Add the command line flag specified by argstr - # If no argstr is provided and input type is Flag, create a flag from the name - if spec.argstr: - if "=" in spec.argstr: - inp['command-line-flag'] = spec.argstr.split("=")[0].strip() - inp['command-line-flag-separator'] = "=" - elif spec.argstr.split("%")[0]: - inp['command-line-flag'] = spec.argstr.split("%")[0].strip() - elif inp['type'] == "Flag": - inp['command-line-flag'] = ("--%s" % input_name + " ").strip() + flag, flag_sep = get_command_line_flag(spec, inp['type'] == "Flag", input_name) + + if flag is not None: + inp['command-line-flag'] = flag + if flag_sep is not None: + inp['command-line-flag-separator'] = flag_sep inp['description'] = get_description_from_spec(inputs, input_name, spec) if not (hasattr(spec, "mandatory") and spec.mandatory): @@ -384,42 +383,32 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs): output_value = None except AttributeError: output_value = None + except KeyError: + output_value = None # Handle multi-outputs - if isinstance(output_value, list): + if isinstance(output_value, list) or type(spec.handler).__name__ == "OutputMultiObject": output['list'] = True - # Check if all extensions are the same - extensions = [] - for val in output_value: - extensions.append(os.path.splitext(val)[1]) - # If extensions all the same, set path template as wildcard + extension - # Otherwise just use a wildcard - if len(set(extensions)) == 1: - output['path-template'] = "*" + extensions[0] - else: - output['path-template'] = "*" - return output + if output_value: + # Check if all extensions are the same + extensions = [] + for val in output_value: + extensions.append(os.path.splitext(val)[1]) + # If extensions all the same, set path template as wildcard + extension + # Otherwise just use a wildcard + if len(set(extensions)) == 1: + output['path-template'] = "*" + extensions[0] + else: + output['path-template'] = "*" + return output # If an output value is defined, use its relative path, if one exists. - # If no relative path, look for an input with the same name containing a name source - # and name template. Otherwise, put blank string as placeholder and try to fill it on + # Otherwise, put blank string as placeholder and try to fill it on # another iteration. - output['path-template'] = "" - if output_value: output['path-template'] = os.path.relpath(output_value) else: - for inp_name, inp_spec in sorted(interface.inputs.traits(transient=None).items()): - if inp_name == name and inp_spec.name_source and inp_spec.name_template: - if isinstance(inp_spec.name_source, list): - source = inp_spec.name_source[0] - else: - source = inp_spec.name_source - output['path-template'] = inp_spec.name_template.replace("%s", "[" + source.upper() + "]") - output['value-key'] = "[" + name.upper() + "]" - if inp_spec.argstr and inp_spec.argstr.split("%")[0]: - output['command-line-flag'] = inp_spec.argstr.split("%")[0].strip() - break + output['path-template'] = "" return output @@ -535,4 +524,52 @@ def reorder_cmd_line_args(cmd_line, interface, ignore_inputs=None): continue positional_args.append(item[1]) - return interface_name + " " + " ".join(positional_args) + " " + ((last_arg + " ") if last_arg else "") + " ".join(non_positional_args) + return interface_name + " " +\ + ((" ".join(positional_args) + " ") if len(positional_args) > 0 else "") +\ + ((last_arg + " ") if last_arg else "") +\ + " ".join(non_positional_args) + + +def get_command_line_flag(input_spec, is_flag_type=False, input_name=None): + ''' + Generates the command line flag for a given input + ''' + flag, flag_sep = None, None + if input_spec.argstr: + if "=" in input_spec.argstr: + flag = input_spec.argstr.split("=")[0].strip() + flag_sep = "=" + elif input_spec.argstr.split("%")[0]: + flag = input_spec.argstr.split("%")[0].strip() + elif is_flag_type: + flag = ("--%s" % input_name + " ").strip() + return flag, flag_sep + + +def get_boutiques_output_from_inp(inputs, inp_spec, inp_name): + ''' + Takes a Nipype input representing an output file and generates a Boutiques output for it + ''' + output = {} + output['name'] = inp_name.replace('_', ' ').capitalize() + output['id'] = inp_name + output['optional'] = True + output['description'] = get_description_from_spec(inputs, inp_name, inp_spec) + if not (hasattr(inp_spec, "mandatory") and inp_spec.mandatory): + output['optional'] = True + else: + output['optional'] = False + if inp_spec.usedefault: + output['default-value'] = inp_spec.default_value()[1] + if isinstance(inp_spec.name_source, list): + source = inp_spec.name_source[0] + else: + source = inp_spec.name_source + output['path-template'] = inp_spec.name_template.replace("%s", "[" + source.upper() + "]") + output['value-key'] = "[" + inp_name.upper() + "]" + flag, flag_sep = get_command_line_flag(inp_spec) + if flag is not None: + output['command-line-flag'] = flag + if flag_sep is not None: + output['command-line-flag-separator'] = flag_sep + return output From b4b2de077fc94978c30c70355252d8ce9ea68775 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Wed, 27 Mar 2019 21:20:26 -0400 Subject: [PATCH 0189/1665] updated nipypecli, fixed code style --- nipype/scripts/cli.py | 61 +++++++----- nipype/utils/nipype2boutiques.py | 166 +++++++++++++++++++------------ 2 files changed, 139 insertions(+), 88 deletions(-) diff --git a/nipype/scripts/cli.py b/nipype/scripts/cli.py index 59d8672cfb..482f359aae 100644 --- a/nipype/scripts/cli.py +++ b/nipype/scripts/cli.py @@ -211,48 +211,55 @@ def convert(): help="JSON file name where the Boutiques descriptor will be " "written.") @click.option( - "-t", - "--ignored-template-inputs", + "-c", + "--container-image", + required=True, type=str, - multiple=True, - help="Interface inputs ignored in path template creations.") + help="Name of the container image where the tool is installed.") @click.option( - "-d", - "--docker-image", + "-p", + "--container-type", + required=True, type=str, - help="Name of the Docker image where the Nipype interface is " - "available.") + help="Type of container image (Docker or Singularity).") @click.option( - "-r", - "--docker-index", + "-x", + "--container-index", type=str, - help="Docker index where the Docker image is stored (e.g. " + help="Optional index where the image is available (e.g. " "http://index.docker.io).") @click.option( - "-n", - "--ignore-template-numbers", - is_flag=True, - flag_value=True, - help="Ignore all numbers in path template creations.") + "-g", + "--ignore-inputs", + type=str, + multiple=True, + help="List of interface inputs to not include in the descriptor.") @click.option( "-v", "--verbose", is_flag=True, flag_value=True, - help="Enable verbose output.") -def boutiques(interface, module, output, ignored_template_inputs, docker_image, - docker_index, ignore_template_numbers, verbose): + help="Print information messages.") +@click.option( + "-a", + "--author", + type=str, + help="Author of the tool (required for publishing).") +@click.option( + "-t", + "--tags", + type=str, + help="JSON string containing tags to include in the descriptor," + "e.g. \"{\"key1\": \"value1\"}\"") +def boutiques(module, interface, container_image, container_type, output, + container_index, verbose, author, ignore_inputs, tags): """Nipype to Boutiques exporter. See Boutiques specification at https://github.com/boutiques/schema. """ from nipype.utils.nipype2boutiques import generate_boutiques_descriptor - # Generates JSON string - json_string = generate_boutiques_descriptor( - module, interface, ignored_template_inputs, docker_image, docker_index, - verbose, ignore_template_numbers) - - # Writes JSON string to file - with open(output, 'w') as f: - f.write(json_string) + # Generates JSON string and saves it to file + generate_boutiques_descriptor( + module, interface, container_image, container_type, container_index, + verbose, True, output, author, ignore_inputs, tags) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 981c8d0cb3..a9500a71a8 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -3,13 +3,18 @@ absolute_import) from builtins import str, open, bytes -# This tool exports a Nipype interface in the Boutiques (https://github.com/boutiques) JSON format. -# Boutiques tools can be imported in CBRAIN (https://github.com/aces/cbrain) among other platforms. +# This tool exports a Nipype interface in the Boutiques +# (https://github.com/boutiques) JSON format. Boutiques tools +# can be imported in CBRAIN (https://github.com/aces/cbrain) +# among other platforms. # # Limitations: -# * Optional outputs, i.e. outputs that not always produced, may not be detected. They will, however, still be listed -# with a placeholder for the path template (either a value key or the output ID) that should be verified and corrected. -# * Still need to add some fields to the descriptor manually, e.g. url, descriptor-url, path-template-stripped-extensions, etc. +# * Optional outputs, i.e. outputs that not always produced, may not be +# detected. They will, however, still be listed with a placeholder for +# the path template (either a value key or the output ID) that should +# be verified and corrected. +# * Still need to add some fields to the descriptor manually, e.g. url, +# descriptor-url, path-template-stripped-extensions, etc. import os import sys @@ -20,10 +25,12 @@ def generate_boutiques_descriptor( - module, interface_name, container_image, container_type, container_index=None, - verbose=False, save=False, save_path=None, author=None, ignore_inputs=None, tags=None): + module, interface_name, container_image, container_type, + container_index=None, verbose=False, save=False, save_path=None, + author=None, ignore_inputs=None, tags=None): ''' - Returns a JSON string containing a JSON Boutiques description of a Nipype interface. + Returns a JSON string containing a JSON Boutiques description of a + Nipype interface. Arguments: * module: module where the Nipype interface is declared. * interface_name: name of Nipype interface. @@ -32,11 +39,13 @@ def generate_boutiques_descriptor( * container_index: optional index where the image is available * verbose: print information messages * save: True if you want to save descriptor to a file - * save_path: file path for the saved descriptor (defaults to name of the interface in current directory) + * save_path: file path for the saved descriptor (defaults to name of the + interface in current directory) * author: author of the tool (required for publishing) * ignore_inputs: list of interface inputs to not include in the descriptor - * tags: JSON object containing tags to include in the descriptor, e.g. "{/"key1/": /"value1/"}" - (note: the tags 'domain:neuroinformatics' and 'interface-type:nipype' are included by default) + * tags: JSON object containing tags to include in the descriptor, + e.g. "{\"key1\": \"value1\"}" (note: the tags 'domain:neuroinformatics' + and 'interface-type:nipype' are included by default) ''' if not module: @@ -62,12 +71,15 @@ def generate_boutiques_descriptor( tool_desc['author'] = "Nipype (interface)" if author is not None: tool_desc['author'] = tool_desc['author'] + ", " + author + " (tool)" - tool_desc[ - 'description'] = interface_name + ", as implemented in Nipype (module: " + module_name + ", interface: " + interface_name + ")." + tool_desc['description'] = (interface_name + + ", as implemented in Nipype (module: " + + module_name + ", interface: " + + interface_name + ").") tool_desc['inputs'] = [] tool_desc['output-files'] = [] tool_desc['groups'] = [] - tool_desc['tool-version'] = interface.version if interface.version is not None else "1.0.0" + tool_desc['tool-version'] = interface.version \ + if interface.version is not None else "1.0.0" tool_desc['schema-version'] = '0.5' if container_image: tool_desc['container-image'] = {} @@ -81,13 +93,15 @@ def generate_boutiques_descriptor( # Skip ignored inputs if ignore_inputs is not None and name in ignore_inputs: continue - # If spec has a name source, this means it actually represents an output, so create a - # Boutiques output from it + # If spec has a name source, this means it actually represents an + # output, so create a Boutiques output from it elif spec.name_source and spec.name_template: - tool_desc['output-files'].append(get_boutiques_output_from_inp(inputs, spec, name)) + tool_desc['output-files']\ + .append(get_boutiques_output_from_inp(inputs, spec, name)) else: - inp = get_boutiques_input(inputs, interface, name, spec, verbose, ignore_inputs=ignore_inputs) - # Handle compound inputs (inputs that can be of multiple types and are mutually exclusive) + inp = get_boutiques_input(inputs, interface, name, spec, verbose) + # Handle compound inputs (inputs that can be of multiple types + # and are mutually exclusive) if isinstance(inp, list): mutex_group_members = [] tool_desc['command-line'] += inp[0]['value-key'] + " " @@ -108,7 +122,8 @@ def generate_boutiques_descriptor( print("-> Adding input " + inp['name']) # Generates input groups - tool_desc['groups'] += get_boutiques_groups(interface.inputs.traits(transient=None).items()) + tool_desc['groups'] +=\ + get_boutiques_groups(interface.inputs.traits(transient=None).items()) if len(tool_desc['groups']) == 0: del tool_desc['groups'] @@ -127,7 +142,8 @@ def generate_boutiques_descriptor( # Fill in all missing output paths for output in tool_desc['output-files']: if output['path-template'] == "": - fill_in_missing_output_path(output, output['name'], tool_desc['inputs']) + fill_in_missing_output_path(output, output['name'], + tool_desc['inputs']) # Add tags desc_tags = { @@ -148,7 +164,8 @@ def generate_boutiques_descriptor( tool_desc['tags'] = desc_tags # Check for positional arguments and reorder command line args if necessary - tool_desc['command-line'] = reorder_cmd_line_args(tool_desc['command-line'], interface, ignore_inputs) + tool_desc['command-line'] = reorder_cmd_line_args( + tool_desc['command-line'], interface, ignore_inputs) # Remove the extra space at the end of the command line tool_desc['command-line'] = tool_desc['command-line'].strip() @@ -161,16 +178,19 @@ def generate_boutiques_descriptor( if verbose: print("-> Descriptor saved to file " + outfile.name) - print("NOTE: Descriptors produced by this script may not entirely conform to the Nipype interface " - "specs. Please check that the descriptor is correct before using it.") + print("NOTE: Descriptors produced by this script may not entirely conform " + "to the Nipype interface specs. Please check that the descriptor is " + "correct before using it.") return json.dumps(tool_desc, indent=4, separators=(',', ': ')) def generate_tool_outputs(outputs, interface, tool_desc, verbose, first_run): for name, spec in sorted(outputs.traits(transient=None).items()): - output = get_boutiques_output(outputs, name, spec, interface, tool_desc['inputs']) - # If this is the first time we are generating outputs, add the full output to the descriptor. - # Otherwise, find the existing output and update its path template if it's still undefined. + output = get_boutiques_output(outputs, name, spec, interface, + tool_desc['inputs']) + # If this is the first time we are generating outputs, add the full + # output to the descriptor. Otherwise, find the existing output and + # update its path template if it's still undefined. if first_run: tool_desc['output-files'].append(output) if output.get('value-key'): @@ -179,20 +199,23 @@ def generate_tool_outputs(outputs, interface, tool_desc, verbose, first_run): print("-> Adding output " + output['name']) else: for existing_output in tool_desc['output-files']: - if output['id'] == existing_output['id'] and existing_output['path-template'] == "": + if (output['id'] == existing_output['id'] and + existing_output['path-template'] == ""): existing_output['path-template'] = output['path-template'] break - if output.get('value-key') and output['value-key'] not in tool_desc['command-line']: + if (output.get('value-key') and + output['value-key'] not in tool_desc['command-line']): tool_desc['command-line'] += output['value-key'] + " " if len(tool_desc['output-files']) == 0: raise Exception("Tool has no output.") -def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=None, - input_number=None, ignore_inputs=None): +def get_boutiques_input(inputs, interface, input_name, spec, verbose, + handler=None, input_number=None): """ - Returns a dictionary containing the Boutiques input corresponding to a Nipype intput. + Returns a dictionary containing the Boutiques input corresponding + to a Nipype input. Args: * inputs: inputs of the Nipype interface. @@ -200,16 +223,18 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=No * input_name: name of the Nipype input. * spec: Nipype input spec. * verbose: print information messages. - * handler: used when handling compound inputs, which don't have their own input spec - * input_number: used when handling compound inputs to assign each a unique ID - * ignore_inputs: list of interface inputs to not include in the descriptor + * handler: used when handling compound inputs, which don't have their + own input spec + * input_number: used when handling compound inputs to assign each a + unique ID Assumes that: * Input names are unique. """ inp = {} - if input_number: # No need to append a number to the first of a list of compound inputs + # No need to append a number to the first of a list of compound inputs + if input_number: inp['id'] = input_name + "_" + str(input_number + 1) else: inp['id'] = input_name @@ -243,7 +268,9 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=No elif handler_type == "Float": inp['type'] = "Number" elif handler_type == "Bool": - if spec.argstr and len(spec.argstr.split("=")) > 1 and (spec.argstr.split("=")[1] == '0' or spec.argstr.split("=")[1] == '1'): + if (spec.argstr and len(spec.argstr.split("=")) > 1 and + (spec.argstr.split("=")[1] == '0' + or spec.argstr.split("=")[1] == '1')): inp['type'] = "Number" inp['integer'] = True inp['minimum'] = 0 @@ -309,7 +336,8 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=No inp['value-key'] = "[" + input_name.upper( ) + "]" # assumes that input names are unique - flag, flag_sep = get_command_line_flag(spec, inp['type'] == "Flag", input_name) + flag, flag_sep = get_command_line_flag(spec, inp['type'] == "Flag", + input_name) if flag is not None: inp['command-line-flag'] = flag @@ -342,14 +370,16 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=No def get_boutiques_output(outputs, name, spec, interface, tool_inputs): """ - Returns a dictionary containing the Boutiques output corresponding to a Nipype output. + Returns a dictionary containing the Boutiques output corresponding + to a Nipype output. Args: * outputs: outputs of the Nipype interface. * name: name of the Nipype output. * spec: Nipype output spec. * interface: Nipype interface. - * tool_inputs: list of tool inputs (as produced by method get_boutiques_input). + * tool_inputs: list of tool inputs (as produced by method + get_boutiques_input). Assumes that: * Output names are unique. @@ -370,8 +400,10 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs): output['id'] = name if unique_id else name + '_outfile' output['path-template'] = "" - output[ - 'optional'] = True # no real way to determine if an output is always produced, regardless of the input values. + + # No real way to determine if an output is always + # produced, regardless of the input values. + output['optional'] = True output['description'] = get_description_from_spec(outputs, name, spec) @@ -387,15 +419,16 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs): output_value = None # Handle multi-outputs - if isinstance(output_value, list) or type(spec.handler).__name__ == "OutputMultiObject": + if (isinstance(output_value, list) or + type(spec.handler).__name__ == "OutputMultiObject"): output['list'] = True if output_value: # Check if all extensions are the same extensions = [] for val in output_value: extensions.append(os.path.splitext(val)[1]) - # If extensions all the same, set path template as wildcard + extension - # Otherwise just use a wildcard + # If extensions all the same, set path template as + # wildcard + extension. Otherwise just use a wildcard if len(set(extensions)) == 1: output['path-template'] = "*" + extensions[0] else: @@ -415,7 +448,8 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs): def get_boutiques_groups(input_traits): """ - Returns a list of dictionaries containing Boutiques groups for the mutually exclusive and all-or-none Nipype inputs. + Returns a list of dictionaries containing Boutiques groups for the mutually + exclusive and all-or-none Nipype inputs. """ desc_groups = [] all_or_none_input_sets = [] @@ -434,14 +468,18 @@ def get_boutiques_groups(input_traits): # Create a dictionary for each one for i, inp_set in enumerate(all_or_none_input_sets, 1): - desc_groups.append({'id': "all_or_none_group" + ("_" + str(i) if i != 1 else ""), - 'name': "All or none group" + (" " + str(i) if i != 1 else ""), + desc_groups.append({'id': "all_or_none_group" + + ("_" + str(i) if i != 1 else ""), + 'name': "All or none group" + + (" " + str(i) if i != 1 else ""), 'members': list(inp_set), 'all-or-none': True}) for i, inp_set in enumerate(mutex_input_sets, 1): - desc_groups.append({'id': "mutex_group" + ("_" + str(i) if i != 1 else ""), - 'name': "Mutex group" + (" " + str(i) if i != 1 else ""), + desc_groups.append({'id': "mutex_group" + + ("_" + str(i) if i != 1 else ""), + 'name': "Mutex group" + + (" " + str(i) if i != 1 else ""), 'members': list(inp_set), 'mutually-exclusive': True}) @@ -485,9 +523,10 @@ def fill_in_missing_output_path(output, output_name, tool_inputs): def generate_custom_inputs(desc_inputs): ''' - Generates a bunch of custom input dictionaries in order to generate as many outputs as possible - (to get their path templates) - Currently only works with flag inputs and inputs with defined value choices. + Generates a bunch of custom input dictionaries in order to generate + as many outputs as possible (to get their path templates). + Currently only works with flag inputs and inputs with defined value + choices. ''' custom_input_dicts = [] for desc_input in desc_inputs: @@ -501,7 +540,8 @@ def generate_custom_inputs(desc_inputs): def reorder_cmd_line_args(cmd_line, interface, ignore_inputs=None): ''' - Generates a new command line with the positional arguments in the correct order + Generates a new command line with the positional arguments in the + correct order ''' interface_name = cmd_line.split()[0] positional_arg_dict = {} @@ -524,10 +564,11 @@ def reorder_cmd_line_args(cmd_line, interface, ignore_inputs=None): continue positional_args.append(item[1]) - return interface_name + " " +\ - ((" ".join(positional_args) + " ") if len(positional_args) > 0 else "") +\ - ((last_arg + " ") if last_arg else "") +\ - " ".join(non_positional_args) + return (interface_name + " " + + ((" ".join(positional_args) + " ") + if len(positional_args) > 0 else "") + + ((last_arg + " ") if last_arg else "") + + " ".join(non_positional_args)) def get_command_line_flag(input_spec, is_flag_type=False, input_name=None): @@ -548,13 +589,15 @@ def get_command_line_flag(input_spec, is_flag_type=False, input_name=None): def get_boutiques_output_from_inp(inputs, inp_spec, inp_name): ''' - Takes a Nipype input representing an output file and generates a Boutiques output for it + Takes a Nipype input representing an output file and generates a + Boutiques output for it ''' output = {} output['name'] = inp_name.replace('_', ' ').capitalize() output['id'] = inp_name output['optional'] = True - output['description'] = get_description_from_spec(inputs, inp_name, inp_spec) + output['description'] = get_description_from_spec(inputs, inp_name, + inp_spec) if not (hasattr(inp_spec, "mandatory") and inp_spec.mandatory): output['optional'] = True else: @@ -565,7 +608,8 @@ def get_boutiques_output_from_inp(inputs, inp_spec, inp_name): source = inp_spec.name_source[0] else: source = inp_spec.name_source - output['path-template'] = inp_spec.name_template.replace("%s", "[" + source.upper() + "]") + output['path-template'] = inp_spec.name_template.replace( + "%s", "[" + source.upper() + "]") output['value-key'] = "[" + inp_name.upper() + "]" flag, flag_sep = get_command_line_flag(inp_spec) if flag is not None: From 82a25c2308df0fa29568d5c27b25bc3ad4d4e4f5 Mon Sep 17 00:00:00 2001 From: rciric Date: Thu, 28 Mar 2019 08:41:05 -0700 Subject: [PATCH 0190/1665] default to old behaviour for temporal filters --- nipype/algorithms/confounds.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index ad5a6ab8b5..949f510d2e 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -1082,10 +1082,11 @@ def is_outlier(points, thresh=3.5): return timepoints_to_discard -def cosine_filter(data, timestep, period_cut, remove_mean=True, axis=-1): +def cosine_filter(data, timestep, period_cut, remove_mean=True, axis=-1, + failure_mode='error'): datashape = data.shape timepoints = datashape[axis] - if datashape[0] == 0: + if datashape[0] == 0 and failure_mode != 'error': return data, np.array([]) data = data.reshape((-1, timepoints)) @@ -1105,7 +1106,8 @@ def cosine_filter(data, timestep, period_cut, remove_mean=True, axis=-1): return residuals.reshape(datashape), non_constant_regressors -def regress_poly(degree, data, remove_mean=True, axis=-1): +def regress_poly(degree, data, remove_mean=True, axis=-1, + failure_mode='error'): """ Returns data with degree polynomial regressed out. @@ -1118,7 +1120,7 @@ def regress_poly(degree, data, remove_mean=True, axis=-1): datashape = data.shape timepoints = datashape[axis] - if datashape[0] == 0: + if datashape[0] == 0 and failure_mode != 'error': return data, np.array([]) # Rearrange all voxel-wise time-series in rows @@ -1285,12 +1287,14 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, raise ValueError( 'Repetition time must be provided for cosine filter') voxel_timecourses, basis = cosine_filter( - voxel_timecourses, repetition_time, period_cut) + voxel_timecourses, repetition_time, period_cut, + failure_mode=failure_mode) elif filter_type in ('polynomial', False): # from paper: # "The constant and linear trends of the columns in the matrix M were # removed [prior to ...]" - voxel_timecourses, basis = regress_poly(degree, voxel_timecourses) + voxel_timecourses, basis = regress_poly(degree, voxel_timecourses, + failure_mode=failure_mode) # "Voxel time series from the noise ROI (either anatomical or tSTD) were # placed in a matrix M of size Nxm, with time along the row dimension From dde5c4f7dd4fcdc3d612ba7c6c9d1cb756514f15 Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Thu, 28 Mar 2019 12:24:09 -0400 Subject: [PATCH 0191/1665] revert variable name changes to dwidenoise and address code review --- nipype/interfaces/mrtrix3/preprocess.py | 24 +++++++------------ .../mrtrix3/tests/test_auto_DWIBiasCorrect.py | 13 ++-------- .../mrtrix3/tests/test_auto_DWIDenoise.py | 13 ++-------- 3 files changed, 12 insertions(+), 38 deletions(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index e67712f1b8..7e02d4dcf8 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -26,12 +26,9 @@ class DWIDenoiseInputSpec(MRTrix3BaseInputSpec): extent = traits.Tuple((traits.Int, traits.Int, traits.Int), argstr='-extent %d,%d,%d', desc='set the window size of the denoising filter. (default = 5,5,5)') - out_noisemap = File(name_template='%s_noisemap', - name_source='in_file', - keep_extension=True, + noise = File( argstr='-noise %s', - desc='the output noise map', - genfile=True) + desc='the output noise map') out_file = File(name_template='%s_denoised', name_source='in_file', keep_extension=True, @@ -41,7 +38,6 @@ class DWIDenoiseInputSpec(MRTrix3BaseInputSpec): genfile=True) class DWIDenoiseOutputSpec(TraitedSpec): - out_noisemap = File(desc='the output noise map', exists=True) out_file = File(desc='the output denoised DWI image', exists=True) class DWIDenoise(MRTrix3Base): @@ -71,7 +67,7 @@ class DWIDenoise(MRTrix3Base): >>> denoise.inputs.in_file = 'dwi.mif' >>> denoise.inputs.mask = 'mask.mif' >>> denoise.cmdline # doctest: +ELLIPSIS - 'dwidenoise -mask mask.mif -noise dwi_noisemap.mif dwi.mif dwi_denoised.mif' + 'dwidenoise -mask mask.mif dwi.mif dwi_denoised.mif' >>> denoise.run() # doctest: +SKIP """ @@ -155,7 +151,7 @@ class MRDeGibbs(MRTrix3Base): >>> import nipype.interfaces.mrtrix3 as mrt >>> unring = mrt.MRDeGibbs() >>> unring.inputs.in_file = 'dwi.mif' - >>> unring.cmdline # doctest: +ELLIPSIS + >>> unring.cmdline 'mrdegibbs -axes 0,1 -maxW 3 -minW 1 -nshifts 20 dwi.mif dwi_unr.mif' >>> unring.run() # doctest: +SKIP """ @@ -196,12 +192,9 @@ class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): argstr='-fslgrad %s %s', desc='diffusion gradient table in FSL bvecs/bvals format', xor=_xor_grads) - out_bias = File(name_template='%s_biasfield', - name_source='in_file', - keep_extension=True, + bias = File( argstr='-bias %s', - desc='bias field', - genfile=True) + desc='bias field') out_file = File(name_template='%s_biascorr', name_source='in_file', keep_extension=True, @@ -211,7 +204,6 @@ class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): genfile=True) class DWIBiasCorrectOutputSpec(TraitedSpec): - out_bias = File(desc='the output estimated bias field') out_file = File(desc='the output bias corrected DWI image', exists=True) class DWIBiasCorrect(MRTrix3Base): @@ -227,8 +219,8 @@ class DWIBiasCorrect(MRTrix3Base): >>> import nipype.interfaces.mrtrix3 as mrt >>> bias_correct = mrt.DWIBiasCorrect() >>> bias_correct.inputs.in_file = 'dwi.mif' - >>> bias_correct.cmdline # doctest: +ELLIPSIS - 'dwibiascorrect -ants -bias dwi_biasfield.mif dwi.mif dwi_biascorr.mif' + >>> bias_correct.cmdline + 'dwibiascorrect -ants dwi.mif dwi_biascorr.mif' >>> bias_correct.run() # doctest: +SKIP """ diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py index 07986752e8..9ac0515314 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py @@ -6,6 +6,7 @@ def test_DWIBiasCorrect_inputs(): input_map = dict( args=dict(argstr='%s', ), + bias=dict(argstr='-bias %s', ), bval_scale=dict(argstr='-bvalue_scaling %s', ), environ=dict( nohash=True, @@ -33,13 +34,6 @@ def test_DWIBiasCorrect_inputs(): argstr='-nthreads %d', nohash=True, ), - out_bias=dict( - argstr='-bias %s', - genfile=True, - keep_extension=True, - name_source='in_file', - name_template='%s_biasfield', - ), out_file=dict( argstr='%s', genfile=True, @@ -65,10 +59,7 @@ def test_DWIBiasCorrect_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIBiasCorrect_outputs(): - output_map = dict( - out_bias=dict(), - out_file=dict(), - ) + output_map = dict(out_file=dict(), ) outputs = DWIBiasCorrect.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py index cf11ac3834..7ef30c10b8 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py @@ -25,6 +25,7 @@ def test_DWIDenoise_inputs(): argstr='-mask %s', position=1, ), + noise=dict(argstr='-noise %s', ), nthreads=dict( argstr='-nthreads %d', nohash=True, @@ -37,13 +38,6 @@ def test_DWIDenoise_inputs(): name_template='%s_denoised', position=-1, ), - out_noisemap=dict( - argstr='-noise %s', - genfile=True, - keep_extension=True, - name_source='in_file', - name_template='%s_noisemap', - ), ) inputs = DWIDenoise.input_spec() @@ -51,10 +45,7 @@ def test_DWIDenoise_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIDenoise_outputs(): - output_map = dict( - out_file=dict(), - out_noisemap=dict(), - ) + output_map = dict(out_file=dict(), ) outputs = DWIDenoise.output_spec() for key, metadata in list(output_map.items()): From 1334636ce774f54284c665d998effae637e41d37 Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Thu, 28 Mar 2019 17:23:44 -0400 Subject: [PATCH 0192/1665] add list outputs --- nipype/interfaces/mrtrix3/preprocess.py | 16 ++++++++++++++++ .../mrtrix3/tests/test_auto_DWIBiasCorrect.py | 5 ++++- .../mrtrix3/tests/test_auto_DWIDenoise.py | 5 ++++- 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index 7e02d4dcf8..ba5f734621 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -38,6 +38,7 @@ class DWIDenoiseInputSpec(MRTrix3BaseInputSpec): genfile=True) class DWIDenoiseOutputSpec(TraitedSpec): + noise = File(desc='the output noise map', exists=True) out_file = File(desc='the output denoised DWI image', exists=True) class DWIDenoise(MRTrix3Base): @@ -75,6 +76,13 @@ class DWIDenoise(MRTrix3Base): input_spec = DWIDenoiseInputSpec output_spec = DWIDenoiseOutputSpec + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + if self.inputs.noise != Undefined: + outputs['noise'] = op.abspath(self.inputs.noise) + return outputs + class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): in_file = File( @@ -204,6 +212,7 @@ class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): genfile=True) class DWIBiasCorrectOutputSpec(TraitedSpec): + bias = File(desc='the output bias field', exists=True) out_file = File(desc='the output bias corrected DWI image', exists=True) class DWIBiasCorrect(MRTrix3Base): @@ -228,6 +237,13 @@ class DWIBiasCorrect(MRTrix3Base): input_spec = DWIBiasCorrectInputSpec output_spec = DWIBiasCorrectOutputSpec + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + if self.inputs.bias != Undefined: + outputs['bias'] = op.abspath(self.inputs.bias) + return outputs + class ResponseSDInputSpec(MRTrix3BaseInputSpec): algorithm = traits.Enum( diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py index 9ac0515314..ea4d3f05d8 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py @@ -59,7 +59,10 @@ def test_DWIBiasCorrect_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIBiasCorrect_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict( + bias=dict(), + out_file=dict(), + ) outputs = DWIBiasCorrect.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py index 7ef30c10b8..7b6930ee82 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py @@ -45,7 +45,10 @@ def test_DWIDenoise_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIDenoise_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict( + noise=dict(), + out_file=dict(), + ) outputs = DWIDenoise.output_spec() for key, metadata in list(output_map.items()): From a8548691ef8fa3ccf3f8acce774bf608665a3f46 Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Thu, 28 Mar 2019 17:30:00 -0400 Subject: [PATCH 0193/1665] add get_dipy_workflows function --- nipype/interfaces/dipy/base.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py index f30af98415..d46e945e1c 100644 --- a/nipype/interfaces/dipy/base.py +++ b/nipype/interfaces/dipy/base.py @@ -211,3 +211,29 @@ def _list_outputs(self): "_run_interface": _run_interface, "_list_outputs:": _list_outputs}) return newclass + + +def get_dipy_workflows(module): + """Search for DIPY workflow class. + + Parameters + ---------- + module : object + module object + + Returns + ------- + l_wkflw : list of tuple + This a list of tuple containing 2 elements: + Worflow name, Workflow class obj + + Examples + -------- + >>> from dipy.workflows import align + >>> get_dipy_workflows(align) + + """ + return [(m, obj) for m, obj in inspect.getmembers(module) + if inspect.isclass(obj) and + issubclass(obj, module.Workflow) and + m not in ['Workflow', 'CombinedWorkflow']] From 61ebb2963237b87daf890d1e2c6e3f75fe82c7b8 Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Thu, 28 Mar 2019 17:30:15 -0400 Subject: [PATCH 0194/1665] add tests --- nipype/interfaces/dipy/tests/test_base.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/dipy/tests/test_base.py b/nipype/interfaces/dipy/tests/test_base.py index 5f769480d2..045c0a4d24 100644 --- a/nipype/interfaces/dipy/tests/test_base.py +++ b/nipype/interfaces/dipy/tests/test_base.py @@ -2,7 +2,8 @@ from collections import namedtuple from ...base import traits, TraitedSpec, BaseInterfaceInputSpec from ..base import (convert_to_traits_type, create_interface_specs, - dipy_to_nipype_interface, DipyBaseInterface, no_dipy) + dipy_to_nipype_interface, DipyBaseInterface, no_dipy, + get_dipy_workflows) def test_convert_to_traits_type(): @@ -136,6 +137,16 @@ def run(self, in_files, param1=1, out_dir='', out_ref='out1.txt'): new_specs().run() +@pytest.mark.skipif(no_dipy(), reason="DIPY is not installed") +def test_get_dipy_workflows(): + from dipy.workflows import align + + l_wkflw = get_dipy_workflows(align) + for name, obj in l_wkflw: + assert name.endswith('Flow') + assert issubclass(align.Workflow) + + if __name__ == "__main__": test_convert_to_traits_type() test_create_interface_specs() From dfef1775080274a3a5574332981ccdb4ad368e6b Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Thu, 28 Mar 2019 17:31:06 -0400 Subject: [PATCH 0195/1665] add preprocessing cmd line --- nipype/interfaces/dipy/preprocess.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/dipy/preprocess.py b/nipype/interfaces/dipy/preprocess.py index 285464847e..117351bf22 100644 --- a/nipype/interfaces/dipy/preprocess.py +++ b/nipype/interfaces/dipy/preprocess.py @@ -5,15 +5,30 @@ import os.path as op import nibabel as nb import numpy as np +from distutils.version import LooseVersion from ...utils import NUMPY_MMAP from ... import logging from ..base import (traits, TraitedSpec, File, isdefined) -from .base import DipyBaseInterface +from .base import (HAVE_DIPY, dipy_version, dipy_to_nipype_interface, + get_dipy_workflows, DipyBaseInterface) IFLOGGER = logging.getLogger('nipype.interface') +if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): + from dipy.workflows import denoise, mask + + l_wkflw = get_dipy_workflows(denoise) + get_dipy_workflows(mask) + for name, obj in l_wkflw: + new_name = name.replace('Flow', '') + globals()[new_name] = dipy_to_nipype_interface(new_name, obj) + del l_wkflw + +else: + IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" + " open access to more function") + class ResampleInputSpec(TraitedSpec): in_file = File( From ab693cf6bc022378d931476101fc9d69e65ae088 Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Thu, 28 Mar 2019 17:31:33 -0400 Subject: [PATCH 0196/1665] update reconst cmd line --- nipype/interfaces/dipy/reconstruction.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/dipy/reconstruction.py b/nipype/interfaces/dipy/reconstruction.py index 388071cdc3..6deccd7f9f 100644 --- a/nipype/interfaces/dipy/reconstruction.py +++ b/nipype/interfaces/dipy/reconstruction.py @@ -18,12 +18,13 @@ from ... import logging from ..base import TraitedSpec, File, traits, isdefined from .base import (DipyDiffusionInterface, DipyBaseInterfaceInputSpec, - HAVE_DIPY, dipy_version, dipy_to_nipype_interface) + HAVE_DIPY, dipy_version, dipy_to_nipype_interface, + get_dipy_workflows) IFLOGGER = logging.getLogger('nipype.interface') -if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): +if HAVE_DIPY and (LooseVersion('0.15') >= LooseVersion(dipy_version()) >= LooseVersion('0.16')): from dipy.workflows.reconst import (ReconstDkiFlow, ReconstCSAFlow, ReconstCSDFlow, ReconstMAPMRIFlow, ReconstDtiFlow) @@ -33,9 +34,19 @@ DTIModel = dipy_to_nipype_interface("DTIModel", ReconstDtiFlow) CSAModel = dipy_to_nipype_interface("CSAModel", ReconstCSAFlow) CSDModel = dipy_to_nipype_interface("CSDModel", ReconstCSDFlow) + +elif HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('1.0'): + from dipy.workflows import reconst + + l_wkflw = get_dipy_workflows(reconst) + for name, obj in l_wkflw: + new_name = name.replace('Flow', '') + globals()[new_name] = dipy_to_nipype_interface(new_name, obj) + del l_wkflw + else: IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" - " activate DKIModel, MapmriModel, DTIModel, CSAModel, CSDModel.") + " open access to more models") class RESTOREInputSpec(DipyBaseInterfaceInputSpec): From 2575f3de7408e9b8d69e73612248916f7ff059d2 Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Thu, 28 Mar 2019 17:31:52 -0400 Subject: [PATCH 0197/1665] update registration cmd line --- nipype/interfaces/dipy/registration.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/dipy/registration.py b/nipype/interfaces/dipy/registration.py index b1c42c7e39..6d6d023635 100644 --- a/nipype/interfaces/dipy/registration.py +++ b/nipype/interfaces/dipy/registration.py @@ -1,11 +1,12 @@ from distutils.version import LooseVersion from ... import logging -from .base import HAVE_DIPY, dipy_version, dipy_to_nipype_interface +from .base import (HAVE_DIPY, dipy_version, dipy_to_nipype_interface, + get_dipy_workflows) IFLOGGER = logging.getLogger('nipype.interface') -if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): +if HAVE_DIPY and (LooseVersion('0.15') >= LooseVersion(dipy_version()) >= LooseVersion('0.16')): from dipy.workflows.align import ResliceFlow, SlrWithQbxFlow @@ -13,6 +14,15 @@ StreamlineRegistration = dipy_to_nipype_interface("StreamlineRegistration", SlrWithQbxFlow) +elif HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('1.0'): + from dipy.workflows import align + + l_wkflw = get_dipy_workflows(align) + for name, obj in l_wkflw: + new_name = name.replace('Flow', '') + globals()[new_name] = dipy_to_nipype_interface(new_name, obj) + del l_wkflw + else: IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" - " activate Reslice, StreamlineRegistration.") + " open access to more function") From 0a2ff69f60c3752c50e73bf8403206d73c06d528 Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Thu, 28 Mar 2019 17:32:09 -0400 Subject: [PATCH 0198/1665] add stats cmd line --- nipype/interfaces/dipy/stats.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 nipype/interfaces/dipy/stats.py diff --git a/nipype/interfaces/dipy/stats.py b/nipype/interfaces/dipy/stats.py new file mode 100644 index 0000000000..6aba67226b --- /dev/null +++ b/nipype/interfaces/dipy/stats.py @@ -0,0 +1,20 @@ + +from distutils.version import LooseVersion +from ... import logging +from .base import (HAVE_DIPY, dipy_version, dipy_to_nipype_interface, + get_dipy_workflows) + +IFLOGGER = logging.getLogger('nipype.interface') + +if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): + from dipy.workflows import stats + + l_wkflw = get_dipy_workflows(stats) + for name, obj in l_wkflw: + new_name = name.replace('Flow', '') + globals()[new_name] = dipy_to_nipype_interface(new_name, obj) + del l_wkflw + +else: + IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" + " open access to more function") From f1fc014bddcc4153ef660dd9bd448ae0a25408fc Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Thu, 28 Mar 2019 17:32:43 -0400 Subject: [PATCH 0199/1665] update track cmd line --- nipype/interfaces/dipy/tracks.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/dipy/tracks.py b/nipype/interfaces/dipy/tracks.py index b3d51151e2..77c4b55491 100644 --- a/nipype/interfaces/dipy/tracks.py +++ b/nipype/interfaces/dipy/tracks.py @@ -12,12 +12,12 @@ from ..base import (TraitedSpec, BaseInterfaceInputSpec, File, isdefined, traits) from .base import (DipyBaseInterface, HAVE_DIPY, dipy_version, - dipy_to_nipype_interface) + dipy_to_nipype_interface, get_dipy_workflows) IFLOGGER = logging.getLogger('nipype.interface') -if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): +if HAVE_DIPY and (LooseVersion('0.15') >= LooseVersion(dipy_version()) >= LooseVersion('0.16')): from dipy.workflows.segment import RecoBundlesFlow, LabelsBundlesFlow try: @@ -31,9 +31,18 @@ DeterministicTracking = dipy_to_nipype_interface("DeterministicTracking", DetTrackFlow) +elif HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('1.0'): + from dipy.workflows import segment, tracking + + l_wkflw = get_dipy_workflows(segment) + get_dipy_workflows(tracking) + for name, obj in l_wkflw: + new_name = name.replace('Flow', '') + globals()[new_name] = dipy_to_nipype_interface(new_name, obj) + del l_wkflw + else: IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" - " activate RecoBundles, LabelsBundles, DeterministicTracking.") + " open access to more function") class TrackDensityMapInputSpec(BaseInterfaceInputSpec): From 5f2445ce76f4e3bf998bdf8f063f4df34d2acda4 Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Thu, 28 Mar 2019 18:42:21 -0400 Subject: [PATCH 0200/1665] update zenodo --- .zenodo.json | 4 ++++ nipype/interfaces/dipy/stats.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.zenodo.json b/.zenodo.json index 065e188d8b..857cd9f8de 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -370,6 +370,10 @@ { "name": "Haselgrove, Christian" }, + { + "name": "Koudoro, Serge", + "affiliation": "Indiana University, IN, USA" + }, { "affiliation": "1 McGill Centre for Integrative Neuroscience (MCIN), Ludmer Centre for Neuroinformatics and Mental Health, Montreal Neurological Institute (MNI), McGill University, Montr\u00e9al, 3801 University Street, WB-208, H3A 2B4, Qu\u00e9bec, Canada. 2 University of Lyon, CNRS, INSERM, CREATIS., Villeurbanne, 7, avenue Jean Capelle, 69621, France.", "name": "Glatard, Tristan", diff --git a/nipype/interfaces/dipy/stats.py b/nipype/interfaces/dipy/stats.py index 6aba67226b..8f55b3322a 100644 --- a/nipype/interfaces/dipy/stats.py +++ b/nipype/interfaces/dipy/stats.py @@ -6,7 +6,7 @@ IFLOGGER = logging.getLogger('nipype.interface') -if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): +if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.16'): from dipy.workflows import stats l_wkflw = get_dipy_workflows(stats) From 289d85669c0403218219424a8f77a36b89fd84e6 Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Thu, 28 Mar 2019 19:09:12 -0400 Subject: [PATCH 0201/1665] fix tests --- nipype/interfaces/dipy/tests/test_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/dipy/tests/test_base.py b/nipype/interfaces/dipy/tests/test_base.py index 045c0a4d24..1d475ac0f7 100644 --- a/nipype/interfaces/dipy/tests/test_base.py +++ b/nipype/interfaces/dipy/tests/test_base.py @@ -144,7 +144,7 @@ def test_get_dipy_workflows(): l_wkflw = get_dipy_workflows(align) for name, obj in l_wkflw: assert name.endswith('Flow') - assert issubclass(align.Workflow) + assert issubclass(obj, align.Workflow) if __name__ == "__main__": From 4f09bac63a7ce8276754fb5cad0a0c3c7c11bb85 Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Thu, 28 Mar 2019 19:10:56 -0400 Subject: [PATCH 0202/1665] doctest skip --- nipype/interfaces/dipy/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py index d46e945e1c..4e9b2c7e01 100644 --- a/nipype/interfaces/dipy/base.py +++ b/nipype/interfaces/dipy/base.py @@ -230,7 +230,7 @@ def get_dipy_workflows(module): Examples -------- >>> from dipy.workflows import align - >>> get_dipy_workflows(align) + >>> get_dipy_workflows(align) # doctest: +SKIP """ return [(m, obj) for m, obj in inspect.getmembers(module) From ebe7da702d84a405901a2aabb11e5873207b9622 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Thu, 28 Mar 2019 20:47:03 -0400 Subject: [PATCH 0203/1665] updated nipype2boutiques test --- nipype/utils/nipype2boutiques_example.json | 549 ++++++++++++++++++++ nipype/utils/tests/test_nipype2boutiques.py | 42 +- 2 files changed, 582 insertions(+), 9 deletions(-) create mode 100644 nipype/utils/nipype2boutiques_example.json diff --git a/nipype/utils/nipype2boutiques_example.json b/nipype/utils/nipype2boutiques_example.json new file mode 100644 index 0000000000..45359f49ed --- /dev/null +++ b/nipype/utils/nipype2boutiques_example.json @@ -0,0 +1,549 @@ +{ + "name": "FLIRT", + "command-line": "FLIRT [IN_FILE] [REFERENCE] [OUT_FILE] [OUT_MATRIX_FILE] [ANGLE_REP] [APPLY_ISOXFM] [APPLY_XFM] [BBRSLOPE] [BBRTYPE] [BGVALUE] [BINS] [COARSE_SEARCH] [COST] [COST_FUNC] [DATATYPE] [DISPLAY_INIT] [DOF] [ECHOSPACING] [FIELDMAP] [FIELDMAPMASK] [FINE_SEARCH] [FORCE_SCALING] [IN_MATRIX_FILE] [IN_WEIGHT] [INTERP] [MIN_SAMPLING] [NO_CLAMP] [NO_RESAMPLE] [NO_RESAMPLE_BLUR] [NO_SEARCH] [OUT_LOG] [PADDING_SIZE] [PEDIR] [REF_WEIGHT] [RIGID2D] [SAVE_LOG] [SCHEDULE] [SEARCHR_X] [SEARCHR_Y] [SEARCHR_Z] [SINC_WIDTH] [SINC_WINDOW] [USES_QFORM] [VERBOSE] [WM_SEG] [WMCOORDS] [WMNORMS]", + "author": "Nipype (interface), Oxford Centre for Functional MRI of the Brain (FMRIB) (tool)", + "description": "FLIRT, as implemented in Nipype (module: nipype.interfaces.fsl, interface: FLIRT).", + "inputs": [ + { + "id": "angle_rep", + "name": "Angle rep", + "type": "String", + "value-key": "[ANGLE_REP]", + "command-line-flag": "-anglerep", + "description": "'quaternion' or 'euler'. Representation of rotation angles.", + "optional": true, + "value-choices": [ + "quaternion", + "euler" + ] + }, + { + "id": "apply_isoxfm", + "name": "Apply isoxfm", + "type": "Number", + "value-key": "[APPLY_ISOXFM]", + "command-line-flag": "-applyisoxfm", + "description": "A float. As applyxfm but forces isotropic resampling.", + "optional": true + }, + { + "id": "apply_xfm", + "name": "Apply xfm", + "type": "Flag", + "value-key": "[APPLY_XFM]", + "command-line-flag": "-applyxfm", + "description": "A boolean. Apply transformation supplied by in_matrix_file or uses_qform to use the affine matrix stored in the reference header.", + "optional": true + }, + { + "id": "bbrslope", + "name": "Bbrslope", + "type": "Number", + "value-key": "[BBRSLOPE]", + "command-line-flag": "-bbrslope", + "description": "A float. Value of bbr slope.", + "optional": true + }, + { + "id": "bbrtype", + "name": "Bbrtype", + "type": "String", + "value-key": "[BBRTYPE]", + "command-line-flag": "-bbrtype", + "description": "'signed' or 'global_abs' or 'local_abs'. Type of bbr cost function: signed [default], global_abs, local_abs.", + "optional": true, + "value-choices": [ + "signed", + "global_abs", + "local_abs" + ] + }, + { + "id": "bgvalue", + "name": "Bgvalue", + "type": "Number", + "value-key": "[BGVALUE]", + "command-line-flag": "-setbackground", + "description": "A float. Use specified background value for points outside fov.", + "optional": true + }, + { + "id": "bins", + "name": "Bins", + "type": "Number", + "integer": true, + "value-key": "[BINS]", + "command-line-flag": "-bins", + "description": "An integer (int or long). Number of histogram bins.", + "optional": true + }, + { + "id": "coarse_search", + "name": "Coarse search", + "type": "Number", + "integer": true, + "value-key": "[COARSE_SEARCH]", + "command-line-flag": "-coarsesearch", + "description": "An integer (int or long). Coarse search delta angle.", + "optional": true + }, + { + "id": "cost", + "name": "Cost", + "type": "String", + "value-key": "[COST]", + "command-line-flag": "-cost", + "description": "'mutualinfo' or 'corratio' or 'normcorr' or 'normmi' or 'leastsq' or 'labeldiff' or 'bbr'. Cost function.", + "optional": true, + "value-choices": [ + "mutualinfo", + "corratio", + "normcorr", + "normmi", + "leastsq", + "labeldiff", + "bbr" + ] + }, + { + "id": "cost_func", + "name": "Cost func", + "type": "String", + "value-key": "[COST_FUNC]", + "command-line-flag": "-searchcost", + "description": "'mutualinfo' or 'corratio' or 'normcorr' or 'normmi' or 'leastsq' or 'labeldiff' or 'bbr'. Cost function.", + "optional": true, + "value-choices": [ + "mutualinfo", + "corratio", + "normcorr", + "normmi", + "leastsq", + "labeldiff", + "bbr" + ] + }, + { + "id": "datatype", + "name": "Datatype", + "type": "String", + "value-key": "[DATATYPE]", + "command-line-flag": "-datatype", + "description": "'char' or 'short' or 'int' or 'float' or 'double'. Force output data type.", + "optional": true, + "value-choices": [ + "char", + "short", + "int", + "float", + "double" + ] + }, + { + "id": "display_init", + "name": "Display init", + "type": "Flag", + "value-key": "[DISPLAY_INIT]", + "command-line-flag": "-displayinit", + "description": "A boolean. Display initial matrix.", + "optional": true + }, + { + "id": "dof", + "name": "Dof", + "type": "Number", + "integer": true, + "value-key": "[DOF]", + "command-line-flag": "-dof", + "description": "An integer (int or long). Number of transform degrees of freedom.", + "optional": true + }, + { + "id": "echospacing", + "name": "Echospacing", + "type": "Number", + "value-key": "[ECHOSPACING]", + "command-line-flag": "-echospacing", + "description": "A float. Value of epi echo spacing - units of seconds.", + "optional": true + }, + { + "id": "fieldmap", + "name": "Fieldmap", + "type": "File", + "value-key": "[FIELDMAP]", + "command-line-flag": "-fieldmap", + "description": "A file name. Fieldmap image in rads/s - must be already registered to the reference image.", + "optional": true + }, + { + "id": "fieldmapmask", + "name": "Fieldmapmask", + "type": "File", + "value-key": "[FIELDMAPMASK]", + "command-line-flag": "-fieldmapmask", + "description": "A file name. Mask for fieldmap image.", + "optional": true + }, + { + "id": "fine_search", + "name": "Fine search", + "type": "Number", + "integer": true, + "value-key": "[FINE_SEARCH]", + "command-line-flag": "-finesearch", + "description": "An integer (int or long). Fine search delta angle.", + "optional": true + }, + { + "id": "force_scaling", + "name": "Force scaling", + "type": "Flag", + "value-key": "[FORCE_SCALING]", + "command-line-flag": "-forcescaling", + "description": "A boolean. Force rescaling even for low-res images.", + "optional": true + }, + { + "id": "in_file", + "name": "In file", + "type": "File", + "value-key": "[IN_FILE]", + "command-line-flag": "-in", + "description": "An existing file name. Input file.", + "optional": false + }, + { + "id": "in_matrix_file", + "name": "In matrix file", + "type": "File", + "value-key": "[IN_MATRIX_FILE]", + "command-line-flag": "-init", + "description": "A file name. Input 4x4 affine matrix.", + "optional": true + }, + { + "id": "in_weight", + "name": "In weight", + "type": "File", + "value-key": "[IN_WEIGHT]", + "command-line-flag": "-inweight", + "description": "An existing file name. File for input weighting volume.", + "optional": true + }, + { + "id": "interp", + "name": "Interp", + "type": "String", + "value-key": "[INTERP]", + "command-line-flag": "-interp", + "description": "'trilinear' or 'nearestneighbour' or 'sinc' or 'spline'. Final interpolation method used in reslicing.", + "optional": true, + "value-choices": [ + "trilinear", + "nearestneighbour", + "sinc", + "spline" + ] + }, + { + "id": "min_sampling", + "name": "Min sampling", + "type": "Number", + "value-key": "[MIN_SAMPLING]", + "command-line-flag": "-minsampling", + "description": "A float. Set minimum voxel dimension for sampling.", + "optional": true + }, + { + "id": "no_clamp", + "name": "No clamp", + "type": "Flag", + "value-key": "[NO_CLAMP]", + "command-line-flag": "-noclamp", + "description": "A boolean. Do not use intensity clamping.", + "optional": true + }, + { + "id": "no_resample", + "name": "No resample", + "type": "Flag", + "value-key": "[NO_RESAMPLE]", + "command-line-flag": "-noresample", + "description": "A boolean. Do not change input sampling.", + "optional": true + }, + { + "id": "no_resample_blur", + "name": "No resample blur", + "type": "Flag", + "value-key": "[NO_RESAMPLE_BLUR]", + "command-line-flag": "-noresampblur", + "description": "A boolean. Do not use blurring on downsampling.", + "optional": true + }, + { + "id": "no_search", + "name": "No search", + "type": "Flag", + "value-key": "[NO_SEARCH]", + "command-line-flag": "-nosearch", + "description": "A boolean. Set all angular searches to ranges 0 to 0.", + "optional": true + }, + { + "id": "padding_size", + "name": "Padding size", + "type": "Number", + "integer": true, + "value-key": "[PADDING_SIZE]", + "command-line-flag": "-paddingsize", + "description": "An integer (int or long). For applyxfm: interpolates outside image by size.", + "optional": true + }, + { + "id": "pedir", + "name": "Pedir", + "type": "Number", + "integer": true, + "value-key": "[PEDIR]", + "command-line-flag": "-pedir", + "description": "An integer (int or long). Phase encode direction of epi - 1/2/3=x/y/z & -1/-2/-3=-x/-y/-z.", + "optional": true + }, + { + "id": "ref_weight", + "name": "Ref weight", + "type": "File", + "value-key": "[REF_WEIGHT]", + "command-line-flag": "-refweight", + "description": "An existing file name. File for reference weighting volume.", + "optional": true + }, + { + "id": "reference", + "name": "Reference", + "type": "File", + "value-key": "[REFERENCE]", + "command-line-flag": "-ref", + "description": "An existing file name. Reference file.", + "optional": false + }, + { + "id": "rigid2D", + "name": "Rigid2d", + "type": "Flag", + "value-key": "[RIGID2D]", + "command-line-flag": "-2D", + "description": "A boolean. Use 2d rigid body mode - ignores dof.", + "optional": true + }, + { + "id": "save_log", + "name": "Save log", + "type": "Flag", + "value-key": "[SAVE_LOG]", + "command-line-flag": "--save_log", + "description": "A boolean. Save to log file.", + "optional": true + }, + { + "id": "schedule", + "name": "Schedule", + "type": "File", + "value-key": "[SCHEDULE]", + "command-line-flag": "-schedule", + "description": "An existing file name. Replaces default schedule.", + "optional": true + }, + { + "id": "searchr_x", + "name": "Searchr x", + "type": "Number", + "list": true, + "integer": true, + "min-list-entries": 2, + "max-list-entries": 2, + "value-key": "[SEARCHR_X]", + "command-line-flag": "-searchrx", + "description": "A list of from 2 to 2 items which are an integer (int or long). Search angles along x-axis, in degrees.", + "optional": true + }, + { + "id": "searchr_y", + "name": "Searchr y", + "type": "Number", + "list": true, + "integer": true, + "min-list-entries": 2, + "max-list-entries": 2, + "value-key": "[SEARCHR_Y]", + "command-line-flag": "-searchry", + "description": "A list of from 2 to 2 items which are an integer (int or long). Search angles along y-axis, in degrees.", + "optional": true + }, + { + "id": "searchr_z", + "name": "Searchr z", + "type": "Number", + "list": true, + "integer": true, + "min-list-entries": 2, + "max-list-entries": 2, + "value-key": "[SEARCHR_Z]", + "command-line-flag": "-searchrz", + "description": "A list of from 2 to 2 items which are an integer (int or long). Search angles along z-axis, in degrees.", + "optional": true + }, + { + "id": "sinc_width", + "name": "Sinc width", + "type": "Number", + "integer": true, + "value-key": "[SINC_WIDTH]", + "command-line-flag": "-sincwidth", + "description": "An integer (int or long). Full-width in voxels.", + "optional": true + }, + { + "id": "sinc_window", + "name": "Sinc window", + "type": "String", + "value-key": "[SINC_WINDOW]", + "command-line-flag": "-sincwindow", + "description": "'rectangular' or 'hanning' or 'blackman'. Sinc window.", + "optional": true, + "value-choices": [ + "rectangular", + "hanning", + "blackman" + ] + }, + { + "id": "uses_qform", + "name": "Uses qform", + "type": "Flag", + "value-key": "[USES_QFORM]", + "command-line-flag": "-usesqform", + "description": "A boolean. Initialize using sform or qform.", + "optional": true + }, + { + "id": "verbose", + "name": "Verbose", + "type": "Number", + "integer": true, + "value-key": "[VERBOSE]", + "command-line-flag": "-verbose", + "description": "An integer (int or long). Verbose mode, 0 is least.", + "optional": true + }, + { + "id": "wm_seg", + "name": "Wm seg", + "type": "File", + "value-key": "[WM_SEG]", + "command-line-flag": "-wmseg", + "description": "A file name. White matter segmentation volume needed by bbr cost function.", + "optional": true + }, + { + "id": "wmcoords", + "name": "Wmcoords", + "type": "File", + "value-key": "[WMCOORDS]", + "command-line-flag": "-wmcoords", + "description": "A file name. White matter boundary coordinates for bbr cost function.", + "optional": true + }, + { + "id": "wmnorms", + "name": "Wmnorms", + "type": "File", + "value-key": "[WMNORMS]", + "command-line-flag": "-wmnorms", + "description": "A file name. White matter boundary normals for bbr cost function.", + "optional": true + } + ], + "output-files": [ + { + "name": "Out file", + "id": "out_file", + "optional": true, + "description": "A file name. Registered output file.", + "path-template": "[IN_FILE]_flirt", + "value-key": "[OUT_FILE]", + "command-line-flag": "-out" + }, + { + "name": "Out log", + "id": "out_log", + "optional": true, + "description": "A file name. Output log.", + "path-template": "[IN_FILE]_flirt.log", + "value-key": "[OUT_LOG]" + }, + { + "name": "Out matrix file", + "id": "out_matrix_file", + "optional": true, + "description": "A file name. Output affine matrix in 4x4 asciii format.", + "path-template": "[IN_FILE]_flirt.mat", + "value-key": "[OUT_MATRIX_FILE]", + "command-line-flag": "-omat" + }, + { + "name": "Out file", + "id": "out_file", + "path-template": "out_file", + "optional": true, + "description": "An existing file name. Path/name of registered file (if generated)." + }, + { + "name": "Out log", + "id": "out_log", + "path-template": "out_log", + "optional": true, + "description": "A file name. Path/name of output log (if generated)." + }, + { + "name": "Out matrix file", + "id": "out_matrix_file", + "path-template": "out_matrix_file", + "optional": true, + "description": "An existing file name. Path/name of calculated affine transform (if generated)." + } + ], + "groups": [ + { + "id": "all_or_none_group", + "name": "All or none group", + "members": [ + "save_log", + "out_log" + ], + "all-or-none": true + }, + { + "id": "mutex_group", + "name": "Mutex group", + "members": [ + "apply_isoxfm", + "apply_xfm" + ], + "mutually-exclusive": true + } + ], + "tool-version": "1.0.0", + "schema-version": "0.5", + "container-image": { + "image": "mcin/docker-fsl:latest", + "type": "docker", + "index": "index.docker.io" + }, + "tags": { + "domain": "neuroinformatics", + "source": "nipype-interface" + } +} \ No newline at end of file diff --git a/nipype/utils/tests/test_nipype2boutiques.py b/nipype/utils/tests/test_nipype2boutiques.py index 926de0f2cd..f5c6c6494a 100644 --- a/nipype/utils/tests/test_nipype2boutiques.py +++ b/nipype/utils/tests/test_nipype2boutiques.py @@ -2,16 +2,40 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from future import standard_library -standard_library.install_aliases() - from ..nipype2boutiques import generate_boutiques_descriptor +import json +standard_library.install_aliases() def test_generate(): - generate_boutiques_descriptor(module='nipype.interfaces.ants.registration', - interface_name='ANTS', - container_image=None, - container_index=None, - container_type=None, - verbose=False, - save=False) + ignored_inputs = [ + "args", + "environ", + "output_type" + ] + desc = generate_boutiques_descriptor(module='nipype.interfaces.fsl', + interface_name='FLIRT', + container_image=('mcin/' + 'docker-fsl:latest'), + container_index='index.docker.io', + container_type='docker', + verbose=False, + save=False, + ignore_inputs=ignored_inputs, + author=("Oxford Centre for Functional" + " MRI of the Brain (FMRIB)")) + + with open('nipype/utils/nipype2boutiques_example.json', 'r', + encoding='utf-8') as desc_file: + assert ordered(json.loads(desc)) == ordered(json.load(desc_file)) + + +# Recursively sorts all items in a JSON object +# Used when comparing two JSON objects whose ordering may differ +def ordered(obj): + if isinstance(obj, dict): + return sorted((k, ordered(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(ordered(x) for x in obj) + else: + return obj From 2ff50eda8cf09914867ad72212e064735216600e Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Thu, 28 Mar 2019 21:55:58 -0400 Subject: [PATCH 0204/1665] fix test file path --- nipype/utils/tests/test_nipype2boutiques.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/utils/tests/test_nipype2boutiques.py b/nipype/utils/tests/test_nipype2boutiques.py index f5c6c6494a..520f3c7862 100644 --- a/nipype/utils/tests/test_nipype2boutiques.py +++ b/nipype/utils/tests/test_nipype2boutiques.py @@ -25,7 +25,7 @@ def test_generate(): author=("Oxford Centre for Functional" " MRI of the Brain (FMRIB)")) - with open('nipype/utils/nipype2boutiques_example.json', 'r', + with open('utils/nipype2boutiques_example.json', 'r', encoding='utf-8') as desc_file: assert ordered(json.loads(desc)) == ordered(json.load(desc_file)) From 70150049c30168878b6d9aa6e8a420ede063940c Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Fri, 29 Mar 2019 09:47:16 -0400 Subject: [PATCH 0205/1665] removed non ASCII characters --- nipype/interfaces/mrtrix3/preprocess.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index ba5f734621..99c6c4fb03 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -140,11 +140,11 @@ class MRDeGibbs(MRTrix3Base): taken place. You should not run this command after any form of motion correction (e.g. not after dwipreproc). Similarly, if you intend running dwidenoise, you should run this command afterwards, since it has the - potential to alter the noise structure, which would impact on dwidenoise’s + potential to alter the noise structure, which would impact on dwidenoise's performance. Note that this method is designed to work on images acquired with full - k-space coverage. Running this method on partial Fourier (‘half-scan’) data + k-space coverage. Running this method on partial Fourier ('half-scan') data may lead to suboptimal and/or biased results, as noted in the original reference below. There is currently no means of dealing with this; users should exercise caution when using this method on partial Fourier data, and From ece338be8ae976d2a0ca84037f3a5a038e54cbc4 Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Fri, 29 Mar 2019 10:16:27 -0400 Subject: [PATCH 0206/1665] fix doctests error --- nipype/interfaces/dipy/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py index 4e9b2c7e01..a86d5635fb 100644 --- a/nipype/interfaces/dipy/base.py +++ b/nipype/interfaces/dipy/base.py @@ -229,7 +229,7 @@ def get_dipy_workflows(module): Examples -------- - >>> from dipy.workflows import align + >>> from dipy.workflows import align # doctest: +SKIP >>> get_dipy_workflows(align) # doctest: +SKIP """ From c828feca0efe3108736e0869da33689082897b4c Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Fri, 29 Mar 2019 16:46:24 -0400 Subject: [PATCH 0207/1665] fixed test --- nipype/{utils => testing/data}/nipype2boutiques_example.json | 0 nipype/utils/tests/test_nipype2boutiques.py | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) rename nipype/{utils => testing/data}/nipype2boutiques_example.json (100%) diff --git a/nipype/utils/nipype2boutiques_example.json b/nipype/testing/data/nipype2boutiques_example.json similarity index 100% rename from nipype/utils/nipype2boutiques_example.json rename to nipype/testing/data/nipype2boutiques_example.json diff --git a/nipype/utils/tests/test_nipype2boutiques.py b/nipype/utils/tests/test_nipype2boutiques.py index 520f3c7862..f3ad944650 100644 --- a/nipype/utils/tests/test_nipype2boutiques.py +++ b/nipype/utils/tests/test_nipype2boutiques.py @@ -3,6 +3,7 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: from future import standard_library from ..nipype2boutiques import generate_boutiques_descriptor +from nipype.testing import example_data import json standard_library.install_aliases() @@ -25,8 +26,7 @@ def test_generate(): author=("Oxford Centre for Functional" " MRI of the Brain (FMRIB)")) - with open('utils/nipype2boutiques_example.json', 'r', - encoding='utf-8') as desc_file: + with open(example_data('nipype2boutiques_example.json'), 'r') as desc_file: assert ordered(json.loads(desc)) == ordered(json.load(desc_file)) From 7e96311e26f8589e980576a1b6f13fcd9796a51d Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Tue, 2 Apr 2019 10:33:05 -0400 Subject: [PATCH 0208/1665] changed test to only check a subset of descriptor fields --- nipype/utils/tests/test_nipype2boutiques.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/nipype/utils/tests/test_nipype2boutiques.py b/nipype/utils/tests/test_nipype2boutiques.py index f3ad944650..5bba16be6f 100644 --- a/nipype/utils/tests/test_nipype2boutiques.py +++ b/nipype/utils/tests/test_nipype2boutiques.py @@ -27,7 +27,21 @@ def test_generate(): " MRI of the Brain (FMRIB)")) with open(example_data('nipype2boutiques_example.json'), 'r') as desc_file: - assert ordered(json.loads(desc)) == ordered(json.load(desc_file)) + # Make sure that output descriptor matches the expected descriptor. + output_desc = json.loads(desc) + expected_desc = json.load(desc_file) + assert (output_desc.get('name') == + expected_desc.get('name')) + assert (output_desc.get('author') == + expected_desc.get('author')) + assert (output_desc.get('command-line') == + expected_desc.get('command-line')) + assert (output_desc.get('description') == + expected_desc.get('description')) + assert (ordered(output_desc.get('inputs')) == + ordered(expected_desc.get('inputs'))) + assert (ordered(output_desc.get('container-image')) == + ordered(expected_desc.get('container-image'))) # Recursively sorts all items in a JSON object From 0eae216930657d07f80ee02b0a0e1dd93e93cd00 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Thu, 4 Apr 2019 14:53:37 -0400 Subject: [PATCH 0209/1665] changed test again --- nipype/utils/tests/test_nipype2boutiques.py | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/nipype/utils/tests/test_nipype2boutiques.py b/nipype/utils/tests/test_nipype2boutiques.py index 5bba16be6f..19735df6b5 100644 --- a/nipype/utils/tests/test_nipype2boutiques.py +++ b/nipype/utils/tests/test_nipype2boutiques.py @@ -38,18 +38,9 @@ def test_generate(): expected_desc.get('command-line')) assert (output_desc.get('description') == expected_desc.get('description')) - assert (ordered(output_desc.get('inputs')) == - ordered(expected_desc.get('inputs'))) - assert (ordered(output_desc.get('container-image')) == - ordered(expected_desc.get('container-image'))) - - -# Recursively sorts all items in a JSON object -# Used when comparing two JSON objects whose ordering may differ -def ordered(obj): - if isinstance(obj, dict): - return sorted((k, ordered(v)) for k, v in obj.items()) - if isinstance(obj, list): - return sorted(ordered(x) for x in obj) - else: - return obj + assert (len(output_desc.get('inputs')) == + len(expected_desc.get('inputs'))) + assert(len(output_desc.get('output-files')) == + len(expected_desc.get('output-files'))) + assert (output_desc.get('container-image').get('image') == + expected_desc.get('container-image').get('image')) From 4a3a42e3fbb0cc038ff3a1e4698b7210ead3a66d Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 9 Apr 2019 11:15:32 -0400 Subject: [PATCH 0210/1665] FIX: Import nibabel reorientation bug fix --- nipype/interfaces/image.py | 16 ++++++---------- nipype/interfaces/tests/test_image.py | 4 ++-- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/nipype/interfaces/image.py b/nipype/interfaces/image.py index 061bd1e2cc..50fad60ebb 100644 --- a/nipype/interfaces/image.py +++ b/nipype/interfaces/image.py @@ -5,6 +5,7 @@ from ..utils.filemanip import fname_presuffix from .base import (SimpleInterface, TraitedSpec, BaseInterfaceInputSpec, traits, File) +from .. import LooseVersion class RescaleInputSpec(BaseInterfaceInputSpec): @@ -186,8 +187,8 @@ def _run_interface(self, runtime): transform = ornt_transform(orig_ornt, targ_ornt) affine_xfm = inv_ornt_aff(transform, orig_img.shape) - # Check can be eliminated when minimum nibabel version >= 2.2 - if hasattr(orig_img, 'as_reoriented'): + # Check can be eliminated when minimum nibabel version >= 2.4 + if LooseVersion(nb.__version__) >= LooseVersion('2.4.0'): reoriented = orig_img.as_reoriented(transform) else: reoriented = _as_reoriented_backport(orig_img, transform) @@ -212,7 +213,7 @@ def _run_interface(self, runtime): def _as_reoriented_backport(img, ornt): - """Backport of img.as_reoriented as of nibabel 2.2.0""" + """Backport of img.as_reoriented as of nibabel 2.4.0""" import numpy as np import nibabel as nb from nibabel.orientations import inv_ornt_aff @@ -225,13 +226,8 @@ def _as_reoriented_backport(img, ornt): if isinstance(reoriented, nb.Nifti1Pair): # Also apply the transform to the dim_info fields - new_dim = list(reoriented.header.get_dim_info()) - for idx, value in enumerate(new_dim): - # For each value, leave as None if it was that way, - # otherwise check where we have mapped it to - if value is None: - continue - new_dim[idx] = np.where(ornt[:, 0] == idx)[0] + new_dim = [None if orig_dim is None else int(ornt[orig_dim, 0]) + for orig_dim in img.header.get_dim_info()] reoriented.header.set_dim_info(*new_dim) diff --git a/nipype/interfaces/tests/test_image.py b/nipype/interfaces/tests/test_image.py index bb4adf1d01..43030ff1b4 100644 --- a/nipype/interfaces/tests/test_image.py +++ b/nipype/interfaces/tests/test_image.py @@ -9,10 +9,10 @@ from ..image import _as_reoriented_backport, _orientations from ... import LooseVersion -nibabel22 = LooseVersion(nb.__version__) >= LooseVersion('2.2.0') +nibabel24 = LooseVersion(nb.__version__) >= LooseVersion('2.4.0') -@pytest.mark.skipif(not nibabel22, +@pytest.mark.skipif(not nibabel24, reason="Old nibabel - can't directly compare") def test_reorientation_backport(): pixdims = ((1, 1, 1), (2, 2, 3)) From 52d87a9cf091668f292ad57c77fcce28ce4b33af Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Tue, 9 Apr 2019 22:33:18 -0400 Subject: [PATCH 0211/1665] added handling for lists with value choices, fixed logic to handle case when command line flag includes a 0 or 1 (seen in FNIRT) --- nipype/utils/nipype2boutiques.py | 38 ++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index a9500a71a8..5f88239da1 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -268,15 +268,7 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, elif handler_type == "Float": inp['type'] = "Number" elif handler_type == "Bool": - if (spec.argstr and len(spec.argstr.split("=")) > 1 and - (spec.argstr.split("=")[1] == '0' - or spec.argstr.split("=")[1] == '1')): - inp['type'] = "Number" - inp['integer'] = True - inp['minimum'] = 0 - inp['maximum'] = 1 - else: - inp['type'] = "Flag" + inp['type'] = "Flag" else: inp['type'] = "String" @@ -296,14 +288,24 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, # TODO handle lists of lists (e.g. FSL ProbTrackX seed input) if handler_type == "List": inp['list'] = True - trait_type = type(trait_handler.item_trait.trait_type).__name__ - if trait_type == "Int": + item_type = trait_handler.item_trait.trait_type + item_type_name = type(item_type).__name__ + if item_type_name == "Int": inp['integer'] = True inp['type'] = "Number" - elif trait_type == "Float": + elif item_type_name == "Float": inp['type'] = "Number" - elif trait_type == "File": + elif item_type_name == "File": inp['type'] = "File" + elif item_type_name == "Enum": + value_choices = item_type.values + if value_choices is not None: + if all(isinstance(n, int) for n in value_choices): + inp['type'] = "Number" + inp['integer'] = True + elif all(isinstance(n, float) for n in value_choices): + inp['type'] = "Number" + inp['value-choices'] = value_choices else: inp['type'] = "String" if trait_handler.minlen != 0: @@ -532,7 +534,7 @@ def generate_custom_inputs(desc_inputs): for desc_input in desc_inputs: if desc_input['type'] == 'Flag': custom_input_dicts.append({desc_input['id']: True}) - elif desc_input.get('value-choices'): + elif desc_input.get('value-choices') and not desc_input.get('list'): for value in desc_input['value-choices']: custom_input_dicts.append({desc_input['id']: value}) return custom_input_dicts @@ -578,8 +580,12 @@ def get_command_line_flag(input_spec, is_flag_type=False, input_name=None): flag, flag_sep = None, None if input_spec.argstr: if "=" in input_spec.argstr: - flag = input_spec.argstr.split("=")[0].strip() - flag_sep = "=" + if (input_spec.argstr.split("=")[1] == '0' + or input_spec.argstr.split("=")[1] == '1'): + flag = input_spec.argstr + else: + flag = input_spec.argstr.split("=")[0].strip() + flag_sep = "=" elif input_spec.argstr.split("%")[0]: flag = input_spec.argstr.split("%")[0].strip() elif is_flag_type: From db623d6883b87464aae13695ef072ee3f416e19f Mon Sep 17 00:00:00 2001 From: Gio at UMCU Date: Fri, 5 Apr 2019 11:07:39 +0200 Subject: [PATCH 0212/1665] BUGFIX when filename does not have + --- nipype/interfaces/afni/preprocess.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 0ecbe4b347..91edbc1bfe 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -168,10 +168,12 @@ class AlignEpiAnatPy(AFNIPythonCommand): def _list_outputs(self): outputs = self.output_spec().get() - anat_prefix = ''.join( - self._gen_fname(self.inputs.anat).split('+')[:-1]) - epi_prefix = ''.join( - self._gen_fname(self.inputs.in_file).split('+')[:-1]) + anat_prefix = self._gen_fname(self.inputs.anat) + epi_prefix = self._gen_fname(self.inputs.in_file) + if '+' in anat_prefix: + anat_prefix = ''.join(anat_prefix.split('+')[:-1]) + if '+' in epi_prefix: + epi_prefix = ''.join(epi_prefix.split('+')[:-1]) outputtype = self.inputs.outputtype if outputtype == 'AFNI': ext = '.HEAD' From e3840805ff7d862227293e8fd7fa7e312dddeba4 Mon Sep 17 00:00:00 2001 From: Gio at UMCU Date: Fri, 5 Apr 2019 13:29:22 +0200 Subject: [PATCH 0213/1665] match _overload_extension in AFNItoNIFTI --- nipype/interfaces/afni/preprocess.py | 2 +- nipype/interfaces/afni/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 91edbc1bfe..c2ccb1988a 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -178,7 +178,7 @@ def _list_outputs(self): if outputtype == 'AFNI': ext = '.HEAD' else: - Info.output_type_to_ext(outputtype) + ext = Info.output_type_to_ext(outputtype) matext = '.1D' suffix = self.inputs.suffix if self.inputs.anat2epi: diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py index 3cb8d5f69e..5fe3e199ec 100644 --- a/nipype/interfaces/afni/utils.py +++ b/nipype/interfaces/afni/utils.py @@ -143,7 +143,7 @@ class AFNItoNIFTI(AFNICommand): input_spec = AFNItoNIFTIInputSpec output_spec = AFNICommandOutputSpec - def _overload_extension(self, value): + def _overload_extension(self, value, name=None): path, base, ext = split_filename(value) if ext.lower() not in ['.nii', '.nii.gz', '.1d', '.1D']: ext += '.nii' From 36df0c7c327cb56ab79ea5a9dd91df7fb8a6e7a6 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 18 Apr 2019 09:15:25 -0700 Subject: [PATCH 0214/1665] fix(node): do not override exceptions when the unfinished hashfile does not exist --- nipype/pipeline/engine/nodes.py | 6 ++++-- nipype/utils/filemanip.py | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index b338fd862d..9a617dc8cb 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -27,7 +27,7 @@ from ...utils.filemanip import (md5, FileNotFoundError, ensure_list, simplify_list, copyfiles, fnames_presuffix, loadpkl, split_filename, load_json, makedirs, - emptydirs, savepkl, to_str, indirectory) + emptydirs, savepkl, to_str, indirectory, silentrm) from ...interfaces.base import (traits, InputMultiPath, CommandLine, Undefined, DynamicTraitedSpec, Bunch, InterfaceResult, @@ -474,7 +474,9 @@ def run(self, updatehash=False): except Exception: logger.warning('[Node] Error on "%s" (%s)', self.fullname, outdir) # Tear-up after error - os.remove(hashfile_unfinished) + if not silentrm(hashfile_unfinished): + logger.debug('Unfinished hashfile %s does not exist', + hashfile_unfinished) raise # Tear-up after success diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index f229061396..d8a65a6712 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -832,6 +832,27 @@ def emptydirs(path, noexist_ok=False): makedirs(path) +def silentrm(filename): + """ + Equivalent to ``rm -f``, returns ``False`` if the file did not + exist. + + Parameters + ---------- + + filename : str + file to be deleted + + """ + try: + os.remove(filename) + except OSError as e: + if e.errno != errno.ENOENT: + raise + return False + return True + + def which(cmd, env=None, pathext=None): """ Return the path to an executable which would be run if the given From bdd4d21d677ea199f345603e3d0cd25ab7d4936f Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Thu, 18 Apr 2019 16:17:16 -0400 Subject: [PATCH 0215/1665] address ariel comment --- nipype/interfaces/dipy/base.py | 6 +++++- nipype/interfaces/dipy/reconstruction.py | 13 +------------ nipype/interfaces/dipy/registration.py | 9 +-------- nipype/interfaces/dipy/tracks.py | 7 +------ 4 files changed, 8 insertions(+), 27 deletions(-) diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py index a86d5635fb..27f26e989a 100644 --- a/nipype/interfaces/dipy/base.py +++ b/nipype/interfaces/dipy/base.py @@ -10,7 +10,11 @@ from ..base import (traits, File, isdefined, LibraryBaseInterface, BaseInterfaceInputSpec, TraitedSpec) +# List of workflows to ignore +SKIP_WORKFLOWS_LIST = ['Workflow', 'CombinedWorkflow'] + HAVE_DIPY = True + try: import dipy from dipy.workflows.base import IntrospectiveArgumentParser @@ -236,4 +240,4 @@ def get_dipy_workflows(module): return [(m, obj) for m, obj in inspect.getmembers(module) if inspect.isclass(obj) and issubclass(obj, module.Workflow) and - m not in ['Workflow', 'CombinedWorkflow']] + m not in SKIP_WORKFLOWS_LIST] diff --git a/nipype/interfaces/dipy/reconstruction.py b/nipype/interfaces/dipy/reconstruction.py index 6deccd7f9f..6c7c3a847b 100644 --- a/nipype/interfaces/dipy/reconstruction.py +++ b/nipype/interfaces/dipy/reconstruction.py @@ -24,18 +24,7 @@ IFLOGGER = logging.getLogger('nipype.interface') -if HAVE_DIPY and (LooseVersion('0.15') >= LooseVersion(dipy_version()) >= LooseVersion('0.16')): - from dipy.workflows.reconst import (ReconstDkiFlow, ReconstCSAFlow, - ReconstCSDFlow, ReconstMAPMRIFlow, - ReconstDtiFlow) - - DKIModel = dipy_to_nipype_interface("DKIModel", ReconstDkiFlow) - MapmriModel = dipy_to_nipype_interface("MapmriModel", ReconstMAPMRIFlow) - DTIModel = dipy_to_nipype_interface("DTIModel", ReconstDtiFlow) - CSAModel = dipy_to_nipype_interface("CSAModel", ReconstCSAFlow) - CSDModel = dipy_to_nipype_interface("CSDModel", ReconstCSDFlow) - -elif HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('1.0'): +if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): from dipy.workflows import reconst l_wkflw = get_dipy_workflows(reconst) diff --git a/nipype/interfaces/dipy/registration.py b/nipype/interfaces/dipy/registration.py index 6d6d023635..e2e5c1e7ec 100644 --- a/nipype/interfaces/dipy/registration.py +++ b/nipype/interfaces/dipy/registration.py @@ -6,15 +6,8 @@ IFLOGGER = logging.getLogger('nipype.interface') -if HAVE_DIPY and (LooseVersion('0.15') >= LooseVersion(dipy_version()) >= LooseVersion('0.16')): - from dipy.workflows.align import ResliceFlow, SlrWithQbxFlow - - Reslice = dipy_to_nipype_interface("Reslice", ResliceFlow) - StreamlineRegistration = dipy_to_nipype_interface("StreamlineRegistration", - SlrWithQbxFlow) - -elif HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('1.0'): +if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): from dipy.workflows import align l_wkflw = get_dipy_workflows(align) diff --git a/nipype/interfaces/dipy/tracks.py b/nipype/interfaces/dipy/tracks.py index 77c4b55491..60b5127dd1 100644 --- a/nipype/interfaces/dipy/tracks.py +++ b/nipype/interfaces/dipy/tracks.py @@ -18,20 +18,15 @@ if HAVE_DIPY and (LooseVersion('0.15') >= LooseVersion(dipy_version()) >= LooseVersion('0.16')): - - from dipy.workflows.segment import RecoBundlesFlow, LabelsBundlesFlow try: from dipy.workflows.tracking import LocalFiberTrackingPAMFlow as DetTrackFlow except ImportError: # different name in 0.15 from dipy.workflows.tracking import DetTrackPAMFlow as DetTrackFlow - RecoBundles = dipy_to_nipype_interface("RecoBundles", RecoBundlesFlow) - LabelsBundles = dipy_to_nipype_interface("LabelsBundles", - LabelsBundlesFlow) DeterministicTracking = dipy_to_nipype_interface("DeterministicTracking", DetTrackFlow) -elif HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('1.0'): +if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): from dipy.workflows import segment, tracking l_wkflw = get_dipy_workflows(segment) + get_dipy_workflows(tracking) From 6a0763c0419081634521d4ca43107c0b8660d40c Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 18 Apr 2019 15:29:01 -0700 Subject: [PATCH 0216/1665] fix(nodes): flag that this error may point to race conditions --- nipype/pipeline/engine/nodes.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 9a617dc8cb..fc9e6e4b4e 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -440,7 +440,6 @@ def run(self, updatehash=False): for outdatedhash in glob(op.join(self.output_dir(), '_0x*.json')): os.remove(outdatedhash) - # Hashfile while running hashfile_unfinished = op.join( outdir, '_0x%s_unfinished.json' % self._hashvalue) @@ -475,8 +474,10 @@ def run(self, updatehash=False): logger.warning('[Node] Error on "%s" (%s)', self.fullname, outdir) # Tear-up after error if not silentrm(hashfile_unfinished): - logger.debug('Unfinished hashfile %s does not exist', - hashfile_unfinished) + logger.warning("""\ +Interface finished unexpectedly and the corresponding unfinished hashfile %s \ +does not exist. Another nipype instance may be running against the same work \ +directory. Please ensure no other concurrent workflows are racing""", hashfile_unfinished) raise # Tear-up after success From 4908efc0cb7562b6a391444250b037508a25a159 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 18 Apr 2019 16:15:26 -0700 Subject: [PATCH 0217/1665] fix(nodes): better handle error condition, printing out more info --- nipype/pipeline/engine/nodes.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index b338fd862d..22565722d1 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -514,32 +514,35 @@ def _get_inputs(self): logger.debug('input: %s', key) results_file = info[0] logger.debug('results file: %s', results_file) - results = loadpkl(results_file) + outputs = loadpkl(results_file).outputs + if outputs is None: + raise RuntimeError("""\ +Error populating the input "%s" of node "%s": the results file of the source node \ +(%s) does not contain any outputs.""" % (key, self.name, results_file)) output_value = Undefined if isinstance(info[1], tuple): output_name = info[1][0] - value = getattr(results.outputs, output_name) + value = getattr(outputs, output_name) if isdefined(value): output_value = evaluate_connect_function( info[1][1], info[1][2], value) else: output_name = info[1] try: - output_value = results.outputs.trait_get()[output_name] + output_value = outputs.trait_get()[output_name] except AttributeError: - output_value = results.outputs.dictcopy()[output_name] + output_value = outputs.dictcopy()[output_name] logger.debug('output: %s', output_name) try: self.set_input(key, deepcopy(output_value)) except traits.TraitError as e: - msg = [ - 'Error setting node input:', - 'Node: %s' % self.name, - 'input: %s' % key, + msg = ( + e.args[0], '', 'Error setting node input:', + 'Node: %s' % self.name, 'input: %s' % key, 'results_file: %s' % results_file, - 'value: %s' % str(output_value) - ] - e.args = (e.args[0] + "\n" + '\n'.join(msg), ) + 'value: %s' % str(output_value), + ) + e.args = ('\n'.join(msg), ) raise # Successfully set inputs From 79e840d8763944f5cb1a4406971506b393fcdc06 Mon Sep 17 00:00:00 2001 From: rciric Date: Thu, 18 Apr 2019 23:28:23 -0700 Subject: [PATCH 0218/1665] integrate @effigies review comments --- nipype/algorithms/confounds.py | 56 ++++++++++++++----------- nipype/algorithms/tests/test_CompCor.py | 5 +-- nipype/info.py | 3 +- 3 files changed, 34 insertions(+), 30 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 949f510d2e..4fb4e1c91e 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -11,6 +11,7 @@ import os import os.path as op from collections import OrderedDict +from itertools import chain import nibabel as nb import numpy as np @@ -1262,11 +1263,18 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, Dictionary of eigenvalues, fractional explained variances, and cumulative explained variances. """ - components = None basis = np.array([]) if components_criterion == 'all': components_criterion = -1 mask_names = mask_names or range(len(mask_images)) + + comp_list = [] + md_mask = [] + md_sv = [] + md_var = [] + md_cumvar = [] + md_retained = [] + for name, img in zip(mask_names, mask_images): mask = nb.squeeze_image(img).get_data().astype(np.bool) if imgseries.shape[:3] != mask.shape: @@ -1331,32 +1339,30 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, num_components = int(num_components) if num_components == 0: break - if components is None: - components = u[:, :num_components] - metadata = OrderedDict() - metadata['mask'] = [name] * len(s) - metadata['singular_value'] = s - metadata['variance_explained'] = variance_explained - metadata['cumulative_variance_explained'] = ( - cumulative_variance_explained) - metadata['retained'] = [i < num_components for i in range(len(s))] - else: - components = np.hstack((components, u[:, :num_components])) - metadata['mask'] = metadata['mask'] + [name] * len(s) - metadata['singular_value'] = ( - np.hstack((metadata['singular_value'], s))) - metadata['variance_explained'] = ( - np.hstack((metadata['variance_explained'], - variance_explained))) - metadata['cumulative_variance_explained'] = ( - np.hstack((metadata['cumulative_variance_explained'], - cumulative_variance_explained))) - metadata['retained'] = ( - metadata['retained'] + [i < num_components for i in range(len(s))]) - if components is None: + + components.append(u[:, :num_components]) + md_mask.append([name] * len(s)) + md_sv.append(s) + md_var.append(variance_explained) + md_cumvar.append(cumulative_variance_explained) + md_retained.append(i < num_components for i in range(len(s))) + + if len(components) > 0: + components = np.hstack(components) + else: if failure_mode == 'error': raise ValueError('No components found') - components = np.full((M.shape[0], num_components), np.nan) + components = np.full((M.shape[0], num_components), + np.nan, dtype=np.float32) + + metadata = OrderedDict( + ('mask', list(chain(*md_mask))), + ('singular_value', np.hstack(md_sv)), + ('variance_explained', np.hstack(md_var)), + ('cumulative_variance_explained', np.hstack(md_cumvar)), + ('retained', list(chain(*md_retained))) + ) + return components, basis, metadata diff --git a/nipype/algorithms/tests/test_CompCor.py b/nipype/algorithms/tests/test_CompCor.py index 4361c3155c..c7b770fb25 100644 --- a/nipype/algorithms/tests/test_CompCor.py +++ b/nipype/algorithms/tests/test_CompCor.py @@ -1,7 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os -import re import nibabel as nb import numpy as np @@ -198,7 +197,7 @@ def run_cc(self, expected_n_components = min(ccinterface.inputs.num_components, self.fake_data.shape[3]) - components_data = [re.sub('\n', '', line).split('\t') + components_data = [line.rstrip().split('\t') for line in components_file] # the first item will be '#', we can throw it out @@ -224,7 +223,7 @@ def run_cc(self, assert os.path.getsize(expected_metadata_file) > 0 with open(ccresult.outputs.metadata_file, 'r') as metadata_file: - components_metadata = [re.sub('\n', '', line).split('\t') + components_metadata = [line.rstrip().split('\t') for line in metadata_file] components_metadata = {i: j for i, j in zip(components_metadata[0], diff --git a/nipype/info.py b/nipype/info.py index 2f29f6126d..f9361031bb 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -141,8 +141,7 @@ def get_nipype_gitversion(): 'numpy>=%s ; python_version >= "3.7"' % NUMPY_MIN_VERSION_37, 'python-dateutil>=%s' % DATEUTIL_MIN_VERSION, 'scipy>=%s' % SCIPY_MIN_VERSION, - 'traits>=%s,<%s ; python_version == "2.7"' % (TRAITS_MIN_VERSION, '5.0.0'), - 'traits>=%s ; python_version >= "3.0"' % TRAITS_MIN_VERSION, + 'traits>=%s,!=5.0' % TRAITS_MIN_VERSION, 'future>=%s' % FUTURE_MIN_VERSION, 'simplejson>=%s' % SIMPLEJSON_MIN_VERSION, 'prov>=%s' % PROV_VERSION, From 1b1b6fa2b342acd500f599fe6890b2d683f2ec57 Mon Sep 17 00:00:00 2001 From: rciric Date: Fri, 19 Apr 2019 03:02:30 -0700 Subject: [PATCH 0219/1665] propagate retention status to metadata; use list instead of generator; correct var name --- nipype/algorithms/confounds.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 4fb4e1c91e..a987c98e55 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -667,7 +667,7 @@ def _run_interface(self, runtime): f.write('\t'.join(['component'] + list(metadata.keys())) + '\n') for i in zip(components_names, *metadata.values()): f.write('{0[0]}\t{0[1]}\t{0[2]:.10f}\t' - '{0[3]:.10f}\t{0[4]:.10f}\n'.format(i)) + '{0[3]:.10f}\t{0[4]:.10f}\t{0[5]}\n'.format(i)) return runtime @@ -1268,7 +1268,7 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, components_criterion = -1 mask_names = mask_names or range(len(mask_images)) - comp_list = [] + components = [] md_mask = [] md_sv = [] md_var = [] @@ -1345,8 +1345,8 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, md_sv.append(s) md_var.append(variance_explained) md_cumvar.append(cumulative_variance_explained) - md_retained.append(i < num_components for i in range(len(s))) - + md_retained.append([i < num_components for i in range(len(s))]) + if len(components) > 0: components = np.hstack(components) else: @@ -1355,13 +1355,13 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, components = np.full((M.shape[0], num_components), np.nan, dtype=np.float32) - metadata = OrderedDict( + metadata = OrderedDict([ ('mask', list(chain(*md_mask))), ('singular_value', np.hstack(md_sv)), ('variance_explained', np.hstack(md_var)), ('cumulative_variance_explained', np.hstack(md_cumvar)), ('retained', list(chain(*md_retained))) - ) + ]) return components, basis, metadata From b80a3d7f1cde35573a73246271bd0dcf42dc7f4b Mon Sep 17 00:00:00 2001 From: rciric Date: Fri, 19 Apr 2019 11:06:16 -0700 Subject: [PATCH 0220/1665] update unit test to include new metadata field --- nipype/algorithms/tests/test_CompCor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/algorithms/tests/test_CompCor.py b/nipype/algorithms/tests/test_CompCor.py index c7b770fb25..3aa535dc19 100644 --- a/nipype/algorithms/tests/test_CompCor.py +++ b/nipype/algorithms/tests/test_CompCor.py @@ -73,7 +73,8 @@ def test_compcor_variance_threshold_and_metadata(self): 'mask': 'mask', 'singular_value': '4.0720553036', 'variance_explained': '0.5527211465', - 'cumulative_variance_explained': '0.5527211465' + 'cumulative_variance_explained': '0.5527211465', + 'retained': 'True', } ccinterface = CompCor( variance_threshold=0.7, From 5c0684d77c712ed115376b2884cdaa956b5539d1 Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Wed, 24 Apr 2019 16:47:12 -0400 Subject: [PATCH 0221/1665] removed six, changed all-or-none groups to go under requires-inputs field instead --- nipype/utils/nipype2boutiques.py | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py index 5f88239da1..4f692a267b 100644 --- a/nipype/utils/nipype2boutiques.py +++ b/nipype/utils/nipype2boutiques.py @@ -19,7 +19,6 @@ import os import sys import simplejson as json -import six from ..scripts.instance import import_module @@ -310,7 +309,7 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, inp['type'] = "String" if trait_handler.minlen != 0: inp['min-list-entries'] = trait_handler.minlen - if trait_handler.maxlen != six.MAXSIZE: + if trait_handler.maxlen != sys.maxsize: inp['max-list-entries'] = trait_handler.maxlen if spec.sep: inp['list-separator'] = spec.sep @@ -353,6 +352,8 @@ def get_boutiques_input(inputs, interface, input_name, spec, verbose, inp['optional'] = False if spec.usedefault: inp['default-value'] = spec.default_value()[1] + if spec.requires is not None: + inp['requires-inputs'] = spec.requires try: value_choices = trait_handler.values @@ -422,7 +423,8 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs): # Handle multi-outputs if (isinstance(output_value, list) or - type(spec.handler).__name__ == "OutputMultiObject"): + type(spec.handler).__name__ == "OutputMultiObject" or + type(spec.handler).__name__ == "List"): output['list'] = True if output_value: # Check if all extensions are the same @@ -451,32 +453,19 @@ def get_boutiques_output(outputs, name, spec, interface, tool_inputs): def get_boutiques_groups(input_traits): """ Returns a list of dictionaries containing Boutiques groups for the mutually - exclusive and all-or-none Nipype inputs. + exclusive Nipype inputs. """ desc_groups = [] - all_or_none_input_sets = [] mutex_input_sets = [] # Get all the groups for name, spec in input_traits: - if spec.requires is not None: - group_members = set([name] + list(spec.requires)) - if group_members not in all_or_none_input_sets: - all_or_none_input_sets.append(group_members) if spec.xor is not None: group_members = set([name] + list(spec.xor)) if group_members not in mutex_input_sets: mutex_input_sets.append(group_members) # Create a dictionary for each one - for i, inp_set in enumerate(all_or_none_input_sets, 1): - desc_groups.append({'id': "all_or_none_group" + - ("_" + str(i) if i != 1 else ""), - 'name': "All or none group" + - (" " + str(i) if i != 1 else ""), - 'members': list(inp_set), - 'all-or-none': True}) - for i, inp_set in enumerate(mutex_input_sets, 1): desc_groups.append({'id': "mutex_group" + ("_" + str(i) if i != 1 else ""), From bf727705409dd3029a76d15f66a882d67ae333a1 Mon Sep 17 00:00:00 2001 From: mathiasg Date: Thu, 2 May 2019 13:13:23 -0400 Subject: [PATCH 0222/1665] tst: fix CI builds --- docker/generate_dockerfiles.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/generate_dockerfiles.sh b/docker/generate_dockerfiles.sh index 2c37082a8c..453e6bd232 100755 --- a/docker/generate_dockerfiles.sh +++ b/docker/generate_dockerfiles.sh @@ -90,8 +90,8 @@ function generate_main_dockerfile() { --user neuro \ --miniconda create_env=neuro \ conda_install='python=${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR} - icu=58.1 libxml2 libxslt matplotlib mkl "numpy!=1.16.0" paramiko - pandas psutil scikit-learn scipy traits=4.6.0' \ + libxml2 libxslt matplotlib mkl "numpy!=1.16.0" paramiko + pandas psutil scikit-learn scipy traits' \ pip_install="pytest-xdist" \ activate=true \ --copy docker/files/run_builddocs.sh docker/files/run_examples.sh \ From e498940d88aa15136d79c14844bf8b4164f2909b Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 8 May 2019 16:33:15 -0400 Subject: [PATCH 0223/1665] DOC: Update changelog --- doc/changelog/1.X.X-changelog | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index c88c9a9c7b..296e7e5c0e 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -1,3 +1,22 @@ +1.2.0 (May 09, 2019) +==================== + +##### [Full changelog](https://github.com/nipy/nipype/milestone/31?closed=1) + + * FIX: Parsing of filename in AlignEpiAnatPy when filename does not have + (https://github.com/nipy/nipype/pull/2909) + * FIX: Import nibabel reorientation bug fix (https://github.com/nipy/nipype/pull/2912) + * FIX: Update FNIRT outputs for warped_file log_file to include cwd (https://github.com/nipy/nipype/pull/2900) + * FIX: Sort conditions in bids_gen_info to ensure consistent order (https://github.com/nipy/nipype/pull/2867) + * FIX: Some traits-5.0.0 don't work with Python 2.7 (https://github.com/nipy/nipype/pull/1) + * ENH: CompCor enhancement (https://github.com/nipy/nipype/pull/2878) + * ENH: Do not override caught exceptions with FileNotFoundError from unfinished hashfile (https://github.com/nipy/nipype/pull/2919) + * ENH: More verbose description when a faulty results file is loaded (https://github.com/nipy/nipype/pull/2920) + * ENH: Add all DIPY workflows dynamically (https://github.com/nipy/nipype/pull/2905) + * ENH: Add mrdegibbs and dwibiascorrect from mrtrix3 (https://github.com/nipy/nipype/pull/2904) + * TEST: Fix CI builds (https://github.com/nipy/nipype/pull/2927) + * MAINT: Reduce deprecation warnings (https://github.com/nipy/nipype/pull/2903) + + 1.1.9 (February 25, 2019) ========================= From a2c4b750bd722192f10276e733712bdd7705a5d3 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 8 May 2019 16:44:09 -0400 Subject: [PATCH 0224/1665] MNT: Version 1.2.0 --- doc/conf.py | 2 +- nipype/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 3a45d0338a..4049fe7ee5 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -82,7 +82,7 @@ # The short X.Y version. version = nipype.__version__ # The full version, including alpha/beta/rc tags. -release = "1.1.9" +release = "1.2.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/nipype/info.py b/nipype/info.py index f9361031bb..380c7f5a04 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -11,7 +11,7 @@ # full release. '.dev' as a version_extra string means this is a development # version # Remove -dev for release -__version__ = '1.2.0-dev' +__version__ = '1.2.0' def get_nipype_gitversion(): From 32c4de5bca1a7241570f451da28f07bd80fbcf19 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 8 May 2019 16:45:59 -0400 Subject: [PATCH 0225/1665] MNT: Update mailmap, zenodo --- .mailmap | 4 +++ .zenodo.json | 74 ++++++++++++++++++++++++++-------------------------- 2 files changed, 41 insertions(+), 37 deletions(-) diff --git a/.mailmap b/.mailmap index 91e14a8077..243bd4f2d5 100644 --- a/.mailmap +++ b/.mailmap @@ -153,6 +153,8 @@ Pablo Polosecki pipolose pipolose Paul Sharp psharp1289 Ranjit Khanuja RanjitK +Rastko Ćirić rciric +Rastko Ćirić Rastko Ćirić Ross Markello Ross Markello Russell Poldrack Russ Poldrack Russell Poldrack poldrack @@ -162,6 +164,7 @@ Salma Bougacha salma1601 Sami Kristian Andberg Sami Andberg Satrajit Ghosh Satrajit Ghosh Sebastian Urchs sebastian +Serge Koudoro skoudoro Sharad Sikka ssikka Shariq Iqbal shariqiqbal2810 Shariq Iqbal shariqiqbal2810 @@ -176,6 +179,7 @@ Steven Giavasis sgiavasis Steven Giavasis sgiavasis Tristan Glatard Tristan Glatard Victor Saase vsaase +Weijie Huang forwho William Triplett William Triplett Wolfgang Pauli Wolfgang Pauli Xiangzhen Kong bnucon diff --git a/.zenodo.json b/.zenodo.json index f1b2d5db62..d47039364b 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -89,16 +89,16 @@ "name": "Clark, Daniel", "orcid": "0000-0002-8121-8954" }, - { - "affiliation": "National Institute of Mental Health", - "name": "Nielson, Dylan M.", - "orcid": "0000-0003-4613-6643" - }, { "affiliation": "Department of Electrical and Computer Engineering, Johns Hopkins University", "name": "Dewey, Blake E", "orcid": "0000-0003-4554-5058" }, + { + "affiliation": "National Institute of Mental Health", + "name": "Nielson, Dylan M.", + "orcid": "0000-0003-4613-6643" + }, { "name": "Madison, Cindee" }, @@ -106,6 +106,11 @@ "affiliation": "Molecular Imaging Research Center, CEA, France", "name": "Bougacha, Salma" }, + { + "affiliation": "Stanford University", + "name": "\u0106iri\u0107 , Rastko", + "orcid": "0000-0001-6347-7939" + }, { "affiliation": "National Institutes of Health", "name": "Clark, Michael G. " @@ -115,16 +120,16 @@ "name": "Dayan, Michael", "orcid": "0000-0002-2666-0969" }, - { - "affiliation": "Dartmouth College", - "name": "Visconti di Oleggio Castello, Matteo", - "orcid": "0000-0001-7931-5272" - }, { "affiliation": "UC Berkeley", "name": "Clark, Dav", "orcid": "0000-0002-3982-4416" }, + { + "affiliation": "Dartmouth College", + "name": "Visconti di Oleggio Castello, Matteo", + "orcid": "0000-0001-7931-5272" + }, { "affiliation": "UC Berkeley - UCSF Graduate Program in Bioengineering", "name": "Keshavan, Anisha", @@ -141,16 +146,16 @@ { "name": "Berleant, Shoshana" }, - { - "affiliation": "Dartmouth College: Hanover, NH, United States", - "name": "Halchenko, Yaroslav O.", - "orcid": "0000-0003-3456-2493" - }, { "affiliation": "Institute for Biomedical Engineering, ETH and University of Zurich", "name": "Christian, Horea", "orcid": "0000-0001-7037-2449" }, + { + "affiliation": "Dartmouth College: Hanover, NH, United States", + "name": "Halchenko, Yaroslav O.", + "orcid": "0000-0003-3456-2493" + }, { "affiliation": "UC San Diego", "name": "Cipollini, Ben", @@ -166,6 +171,10 @@ "name": "Markello, Ross", "orcid": "0000-0003-1057-1336" }, + { + "affiliation": "Indiana University, IN, USA", + "name": "Koudoro, Serge" + }, { "affiliation": "The University of Washington eScience Institute", "name": "Rokem, Ariel", @@ -189,6 +198,11 @@ "name": "Varoquaux, Gael", "orcid": "0000-0003-1076-5122" }, + { + "affiliation": "The Centre for Addiction and Mental Health", + "name": "Joseph, Michael", + "orcid": "0000-0002-0068-230X" + }, { "affiliation": "Athena EPI, Inria Sophia-Antipolis", "name": "Wassermann, Demian", @@ -245,14 +259,14 @@ "name": "Iqbal, Shariq", "orcid": "0000-0003-2766-8425" }, + { + "name": "Schwartz, Yannick" + }, { "affiliation": "University of Washington", "name": "Richie-Halford, Adam", "orcid": "0000-0001-9276-9084" }, - { - "name": "Schwartz, Yannick" - }, { "affiliation": "University College London", "name": "Malone, Ian B.", @@ -330,16 +344,6 @@ "name": "Liem, Franz", "orcid": "0000-0003-0646-4810" }, - { - "affiliation": "Stanford University", - "name": "Ciric, Rastko", - "orcid": "0000-0001-6347-7939" - }, - { - "affiliation": "The Centre for Addiction and Mental Health", - "name": "Joseph, Michael", - "orcid": "0000-0002-0068-230X" - }, { "affiliation": "UniversityHospital Heidelberg, Germany", "name": "Kleesiek, Jens" @@ -380,10 +384,6 @@ { "name": "Haselgrove, Christian" }, - { - "name": "Koudoro, Serge", - "affiliation": "Indiana University, IN, USA" - }, { "affiliation": "1 McGill Centre for Integrative Neuroscience (MCIN), Ludmer Centre for Neuroinformatics and Mental Health, Montreal Neurological Institute (MNI), McGill University, Montr\u00e9al, 3801 University Street, WB-208, H3A 2B4, Qu\u00e9bec, Canada. 2 University of Lyon, CNRS, INSERM, CREATIS., Villeurbanne, 7, avenue Jean Capelle, 69621, France.", "name": "Glatard, Tristan", @@ -428,11 +428,6 @@ { "name": "Hallquist, Michael" }, - { - "affiliation": "Dartmouth College", - "name": "Ma, Feilong", - "orcid": "0000-0002-6838-3971" - }, { "affiliation": "TIB \u2013 Leibniz Information Centre for Science and Technology and University Library, Hannover, Germany", "name": "Leinweber, Katrin", @@ -448,6 +443,11 @@ "name": "Grignard, Martin", "orcid": "0000-0001-5549-1861" }, + { + "affiliation": "Dartmouth College", + "name": "Ma, Feilong", + "orcid": "0000-0002-6838-3971" + }, { "affiliation": "INRIA-Saclay, Team Parietal", "name": "Chawla, Kshitij", From 904455497efb7a643948b0ba7befc33c797366da Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 8 May 2019 17:05:27 -0400 Subject: [PATCH 0226/1665] MAINT: Require networkx<2.3 for Python 2 --- nipype/info.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 380c7f5a04..06077286d6 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -101,6 +101,7 @@ def get_nipype_gitversion(): # versions NIBABEL_MIN_VERSION = '2.1.0' NETWORKX_MIN_VERSION = '1.9' +NETWORKX_MAX_VERSION_27 = '2.2' NUMPY_MIN_VERSION = '1.9.0' # Numpy bug in python 3.7: # https://www.opensourceanswers.com/blog/you-shouldnt-use-python-37-for-data-science-right-now.html @@ -135,7 +136,8 @@ def get_nipype_gitversion(): PROVIDES = ['nipype'] REQUIRES = [ 'nibabel>=%s' % NIBABEL_MIN_VERSION, - 'networkx>=%s' % NETWORKX_MIN_VERSION, + 'networkx>=%s,<=%s ; python_version < "3.0"' % (NETWORKX_MIN_VERSION, NETWORKX_MAX_VERSION_27), + 'networkx>=%s ; python_version >= "3.0"' % NETWORKX_MIN_VERSION, 'numpy>=%s,!=%s ; python_version == "2.7"' % (NUMPY_MIN_VERSION, NUMPY_BAD_VERSION_27), 'numpy>=%s ; python_version > "3.0" and python_version < "3.7"' % NUMPY_MIN_VERSION, 'numpy>=%s ; python_version >= "3.7"' % NUMPY_MIN_VERSION_37, From 8c315b9b61350da0b533fcc7476444332597977f Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 9 May 2019 09:43:33 -0400 Subject: [PATCH 0227/1665] [skip ci] MAINT: Add Michael Crusoe to Zenodo, update ordering --- .zenodo.json | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index d47039364b..4cf6b17689 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -171,15 +171,15 @@ "name": "Markello, Ross", "orcid": "0000-0003-1057-1336" }, - { - "affiliation": "Indiana University, IN, USA", - "name": "Koudoro, Serge" - }, { "affiliation": "The University of Washington eScience Institute", "name": "Rokem, Ariel", "orcid": "0000-0003-0679-1985" }, + { + "affiliation": "Indiana University, IN, USA", + "name": "Koudoro, Serge" + }, { "affiliation": "Montreal Neurological Institute and Hospital", "name": "DuPre, Elizabeth", @@ -591,6 +591,10 @@ { "name": "Nickson, Thomas" }, + { + "name": "Crusoe, Michael R.", + "orcid": "0000-0002-2961-9670" + }, { "name": "Brett, Matthew" }, From ea934d4172f46109a6867958665fac386f9bc3a5 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 9 May 2019 09:58:31 -0400 Subject: [PATCH 0228/1665] MAINT: Bump dev version --- doc/documentation.rst | 2 +- nipype/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/documentation.rst b/doc/documentation.rst index 66044e75d8..54e8243800 100644 --- a/doc/documentation.rst +++ b/doc/documentation.rst @@ -9,7 +9,7 @@ Documentation :Release: |version| :Date: |today| -Previous versions: `1.1.9 `_ `1.1.8 `_ +Previous versions: `1.2.0 `_ `1.1.9 `_ .. container:: doc2 diff --git a/nipype/info.py b/nipype/info.py index 06077286d6..d4a5df9cac 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -11,7 +11,7 @@ # full release. '.dev' as a version_extra string means this is a development # version # Remove -dev for release -__version__ = '1.2.0' +__version__ = '1.2.1-dev' def get_nipype_gitversion(): From 80bb6e9ff3148af51647bfd9fbdce2ad07bb736b Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 9 May 2019 10:10:36 -0400 Subject: [PATCH 0229/1665] MAINT: Add John Lee, Matt Cieslak to Zenodo, update ordering --- .mailmap | 5 +++-- .zenodo.json | 10 ++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/.mailmap b/.mailmap index 243bd4f2d5..fed5c3caf5 100644 --- a/.mailmap +++ b/.mailmap @@ -93,8 +93,8 @@ Jérémy Guillon GUILLON Jeremy Joerg Stadler Joerg Stadler Joerg Stadler Joerg Stadler Jörg Stadler -John Lee john anthony lee -John Lee leej3 +John A. Lee john anthony lee +John A. Lee leej3 Joke Durnez jokedurnez Josh Warner JDWarner Josh Warner Josh Warner (Mac) @@ -133,6 +133,7 @@ Matteo Mancini matteomancini Matteo Visconti dOC Matteo Visconti di Oleggio Castello Matteo Visconti dOC Matteo Visconti di Oleggio Castello mvdoc +Matthew Cieslak Matt Cieslak Michael Clark Clark Michael Dayan Michael Michael Dayan Michael diff --git a/.zenodo.json b/.zenodo.json index 4cf6b17689..e2604322b3 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -355,6 +355,11 @@ "affiliation": "Child Mind Institute", "name": "Giavasis, Steven" }, + { + "affiliation": "NIMH IRP", + "name": "Lee, John A.", + "orcid": "0000-0001-5884-4247" + }, { "name": "Correa, Carlos" }, @@ -448,6 +453,11 @@ "name": "Ma, Feilong", "orcid": "0000-0002-6838-3971" }, + { + "affiliation": "Department of Neuropsychiatry, University of Pennsylvania", + "name": "Cieslak, Matthew", + "orcid": "0000-0002-1931-4734" + }, { "affiliation": "INRIA-Saclay, Team Parietal", "name": "Chawla, Kshitij", From 460c1bb11d629d1a9a4455fb950cace49d176544 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 9 May 2019 12:18:45 -0400 Subject: [PATCH 0230/1665] [skip ci] MAINT: Add Benjamin Meyer to Zenodo, update ordering --- .mailmap | 1 + .zenodo.json | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/.mailmap b/.mailmap index fed5c3caf5..02f5d57270 100644 --- a/.mailmap +++ b/.mailmap @@ -26,6 +26,7 @@ Basille Pinsard bpinsard bpinsard Ben Cipollini Ben Cipollini Benjamin Acland Ben Acland +Benjamin Meyers <34044274+BenjaminMey@users.noreply.github.com> BenjaminMey <34044274+BenjaminMey@users.noreply.github.com> Benjamin Yvernault Benjamin Yvernault Benjamin Yvernault byvernault Blake Dewey Blake Dewey diff --git a/.zenodo.json b/.zenodo.json index e2604322b3..a7333e6ec4 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -634,6 +634,11 @@ "name": "McNamee, Daniel", "orcid": "0000-0001-9928-4960" }, + { + "affiliation": "University of Pittsburgh", + "name": "Meyers, Benjamin", + "orcid": "0000-0001-9137-4363" + }, { "name": "Davison, Andrew" }, From 64e01c301ed34608479685971d781a6f402b1eda Mon Sep 17 00:00:00 2001 From: Garikoitz Lerma-Usabiaga Date: Fri, 31 May 2019 16:16:49 -0700 Subject: [PATCH 0231/1665] FIX: Allow ``max_sh``not to be set (auto mode) For further information and why it can be left unset see http://community.mrtrix.org/t/lmax-for-dwi2response-and-dwi2fod/1099/4 cc / @oesteban --- nipype/interfaces/mrtrix3/preprocess.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index 99c6c4fb03..2be3e532b4 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -276,8 +276,6 @@ class ResponseSDInputSpec(MRTrix3BaseInputSpec): exists=True, argstr='-mask %s', desc='provide initial mask image') max_sh = InputMultiObject( traits.Int, - value=[8], - usedefault=True, argstr='-lmax %s', sep=',', desc=('maximum harmonic degree of response function - single value for ' @@ -303,7 +301,7 @@ class ResponseSD(MRTrix3Base): >>> resp.inputs.algorithm = 'tournier' >>> resp.inputs.grad_fsl = ('bvecs', 'bvals') >>> resp.cmdline # doctest: +ELLIPSIS - 'dwi2response tournier -fslgrad bvecs bvals -lmax 8 dwi.mif wm.txt' + 'dwi2response tournier -fslgrad bvecs bvals dwi.mif wm.txt' >>> resp.run() # doctest: +SKIP # We can also pass in multiple harmonic degrees in the case of multi-shell From 68952d4e2472ca13b3e08955bd636fac8a78d658 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Fri, 31 May 2019 17:06:43 -0700 Subject: [PATCH 0232/1665] update specs --- nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py index 027527ec85..f4b4660d78 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py @@ -37,7 +37,6 @@ def test_ResponseSD_inputs(): max_sh=dict( argstr='-lmax %s', sep=',', - usedefault=True, ), mtt_file=dict( argstr='%s', From b755445a5bb250a0b83007a92d343c397480737a Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Thu, 6 Jun 2019 10:25:45 -0400 Subject: [PATCH 0233/1665] eddy_quad doesn't like = signs --- nipype/interfaces/fsl/epi.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index bbe84149c8..8c35a5b447 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1246,7 +1246,7 @@ class EddyQuadInputSpec(FSLCommandInputSpec): idx_file = File( exists=True, mandatory=True, - argstr="--eddyIdx=%s", + argstr="--eddyIdx %s", desc=("File containing indices for all volumes into acquisition " "parameters") ) @@ -1259,35 +1259,35 @@ class EddyQuadInputSpec(FSLCommandInputSpec): mask_file = File( exists=True, mandatory=True, - argstr="--mask=%s", + argstr="--mask %s", desc="Binary mask file" ) bval_file = File( exists=True, mandatory=True, - argstr="--bvals=%s", + argstr="--bvals %s", desc="b-values file" ) bvec_file = File( exists=True, - argstr="--bvecs=%s", + argstr="--bvecs %s", desc=("b-vectors file - only used when .eddy_residuals " "file is present") ) output_dir = traits.Str( name_template='%s.qc', name_source=['base_name'], - argstr='--output-dir=%s', + argstr='--output-dir %s', desc="Output directory - default = '.qc'", ) field = File( exists=True, - argstr='--field=%s', + argstr='--field %s', desc="TOPUP estimated field (in Hz)", ) slice_spec = File( exists=True, - argstr='--slspec=%s', + argstr='--slspec %s', desc="Text file specifying slice/group acquisition", ) verbose = traits.Bool( @@ -1365,9 +1365,9 @@ class EddyQuad(FSLCommand): >>> quad.inputs.field = 'fieldmap_phase_fslprepared.nii' >>> quad.inputs.verbose = True >>> quad.cmdline - 'eddy_quad eddy_corrected --bvals=bvals.scheme --bvecs=bvecs.scheme \ ---field=fieldmap_phase_fslprepared.nii --eddyIdx=epi_index.txt \ ---mask=epi_mask.nii --output-dir=eddy_corrected.qc --eddyParams=epi_acqp.txt \ + 'eddy_quad eddy_corrected --bvals bvals.scheme --bvecs bvecs.scheme \ +--field fieldmap_phase_fslprepared.nii --eddyIdx epi_index.txt \ +--mask epi_mask.nii --output-dir eddy_corrected.qc --eddyParams epi_acqp.txt \ --verbose' >>> res = quad.run() # doctest: +SKIP From 1fc0507b448337e528447d59cf51ccc307bba7f6 Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Thu, 6 Jun 2019 12:12:59 -0400 Subject: [PATCH 0234/1665] ran make specs --- nipype/algorithms/tests/test_auto_ACompCor.py | 4 ++-- nipype/algorithms/tests/test_auto_TCompCor.py | 4 ++-- nipype/interfaces/fsl/tests/test_auto_EddyQuad.py | 14 +++++++------- .../interfaces/spm/tests/test_auto_Level1Design.py | 1 + 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/nipype/algorithms/tests/test_auto_ACompCor.py b/nipype/algorithms/tests/test_auto_ACompCor.py index 95a9f51a88..e28c77d467 100644 --- a/nipype/algorithms/tests/test_auto_ACompCor.py +++ b/nipype/algorithms/tests/test_auto_ACompCor.py @@ -25,8 +25,8 @@ def test_ACompCor_inputs(): realigned_file=dict(mandatory=True, ), regress_poly_degree=dict(usedefault=True, ), repetition_time=dict(), - save_metadata=dict(), - save_pre_filter=dict(), + save_metadata=dict(usedefault=True, ), + save_pre_filter=dict(usedefault=True, ), use_regress_poly=dict( deprecated='0.15.0', new_name='pre_filter', diff --git a/nipype/algorithms/tests/test_auto_TCompCor.py b/nipype/algorithms/tests/test_auto_TCompCor.py index 1e94ef4241..c3bdcac192 100644 --- a/nipype/algorithms/tests/test_auto_TCompCor.py +++ b/nipype/algorithms/tests/test_auto_TCompCor.py @@ -26,8 +26,8 @@ def test_TCompCor_inputs(): realigned_file=dict(mandatory=True, ), regress_poly_degree=dict(usedefault=True, ), repetition_time=dict(), - save_metadata=dict(), - save_pre_filter=dict(), + save_metadata=dict(usedefault=True, ), + save_pre_filter=dict(usedefault=True, ), use_regress_poly=dict( deprecated='0.15.0', new_name='pre_filter', diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py index 4302cdc312..a0d5085fbb 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py +++ b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py @@ -12,25 +12,25 @@ def test_EddyQuad_inputs(): usedefault=True, ), bval_file=dict( - argstr='--bvals=%s', + argstr='--bvals %s', mandatory=True, ), - bvec_file=dict(argstr='--bvecs=%s', ), + bvec_file=dict(argstr='--bvecs %s', ), environ=dict( nohash=True, usedefault=True, ), - field=dict(argstr='--field=%s', ), + field=dict(argstr='--field %s', ), idx_file=dict( - argstr='--eddyIdx=%s', + argstr='--eddyIdx %s', mandatory=True, ), mask_file=dict( - argstr='--mask=%s', + argstr='--mask %s', mandatory=True, ), output_dir=dict( - argstr='--output-dir=%s', + argstr='--output-dir %s', name_source=['base_name'], name_template='%s.qc', ), @@ -39,7 +39,7 @@ def test_EddyQuad_inputs(): argstr='--eddyParams=%s', mandatory=True, ), - slice_spec=dict(argstr='--slspec=%s', ), + slice_spec=dict(argstr='--slspec %s', ), verbose=dict(argstr='--verbose', ), ) inputs = EddyQuad.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Level1Design.py b/nipype/interfaces/spm/tests/test_auto_Level1Design.py index 7ad8ab8195..7dce3dda2a 100644 --- a/nipype/interfaces/spm/tests/test_auto_Level1Design.py +++ b/nipype/interfaces/spm/tests/test_auto_Level1Design.py @@ -10,6 +10,7 @@ def test_Level1Design_inputs(): mandatory=True, ), factor_info=dict(field='fact', ), + flags=dict(), global_intensity_normalization=dict(field='global', ), interscan_interval=dict( field='timing.RT', From bdf2d2f3858f4b8d02f66f58422fac82026513d8 Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Thu, 6 Jun 2019 20:03:33 -0400 Subject: [PATCH 0235/1665] forgot = --- nipype/interfaces/fsl/epi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index 8c35a5b447..f94bda1147 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1253,7 +1253,7 @@ class EddyQuadInputSpec(FSLCommandInputSpec): param_file = File( exists=True, mandatory=True, - argstr="--eddyParams=%s", + argstr="--eddyParams %s", desc="File containing acquisition parameters" ) mask_file = File( From b98990035705bde9fff63dff2d26b8ac59f46f3c Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Thu, 6 Jun 2019 20:04:21 -0400 Subject: [PATCH 0236/1665] Update test_auto_EddyQuad.py --- nipype/interfaces/fsl/tests/test_auto_EddyQuad.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py index a0d5085fbb..3cc6da2027 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py +++ b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py @@ -36,7 +36,7 @@ def test_EddyQuad_inputs(): ), output_type=dict(), param_file=dict( - argstr='--eddyParams=%s', + argstr='--eddyParams %s', mandatory=True, ), slice_spec=dict(argstr='--slspec %s', ), From ab4e15a397e42d6b7a023c7b460ed66099534c85 Mon Sep 17 00:00:00 2001 From: Olivia Stanley Date: Mon, 10 Jun 2019 11:20:25 -0400 Subject: [PATCH 0237/1665] modified afni's cat_matvec to accept empty string opposed to opkey --- nipype/interfaces/afni/utils.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py index 5fe3e199ec..bbb174954f 100644 --- a/nipype/interfaces/afni/utils.py +++ b/nipype/interfaces/afni/utils.py @@ -617,11 +617,15 @@ class CatMatvec(AFNICommand): def _format_arg(self, name, spec, value): if name == 'in_file': - return spec.argstr % (' '.join([i[0] + ' -' + i[1] - for i in value])) + xfm_args='' + for v in value: + if len(v[1])>0: + xfm_args += ' ' + v[0] + ' -' + v[1] + ' ' + else: + xfm_args += ' ' + v[0] + ' ' + return spec.argstr % (xfm_args) return super(CatMatvec, self)._format_arg(name, spec, value) - class CenterMassInputSpec(CommandLineInputSpec): in_file = File( desc='input file to 3dCM', From f473b39bcbf03ebc96bd0a7a17a859a44d05a086 Mon Sep 17 00:00:00 2001 From: Michael Date: Fri, 21 Jun 2019 16:11:50 +0200 Subject: [PATCH 0238/1665] correct option for bedpostx probability tracking to work as attended --- nipype/interfaces/camino/dti.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/camino/dti.py b/nipype/interfaces/camino/dti.py index b32b9dc528..0cabe5fe7e 100644 --- a/nipype/interfaces/camino/dti.py +++ b/nipype/interfaces/camino/dti.py @@ -995,7 +995,7 @@ class TrackBedpostxProba(Track): input_spec = TrackBedpostxProbaInputSpec def __init__(self, command=None, **inputs): - inputs["inputmodel"] = "bedpostx_dyad" + inputs["inputmodel"] = "bedpostx" return super(TrackBedpostxProba, self).__init__(command, **inputs) From 020461708973b4c784abb75521e17db63b8cda72 Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 16 Jul 2019 14:45:07 -0700 Subject: [PATCH 0239/1665] MAINT: Sort dependencies alphabetically --- nipype/info.py | 40 ++++++++++++++++++++-------------------- requirements.txt | 24 ++++++++++++------------ rtd_requirements.txt | 30 +++++++++++++++--------------- 3 files changed, 47 insertions(+), 47 deletions(-) diff --git a/nipype/info.py b/nipype/info.py index d4a5df9cac..0ad54a6682 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -135,47 +135,47 @@ def get_nipype_gitversion(): VERSION = __version__ PROVIDES = ['nipype'] REQUIRES = [ - 'nibabel>=%s' % NIBABEL_MIN_VERSION, - 'networkx>=%s,<=%s ; python_version < "3.0"' % (NETWORKX_MIN_VERSION, NETWORKX_MAX_VERSION_27), + 'click>=%s' % CLICK_MIN_VERSION, + 'configparser; python_version <= "3.4"', + 'funcsigs', + 'future>=%s' % FUTURE_MIN_VERSION, + 'futures; python_version == "2.7"', 'networkx>=%s ; python_version >= "3.0"' % NETWORKX_MIN_VERSION, - 'numpy>=%s,!=%s ; python_version == "2.7"' % (NUMPY_MIN_VERSION, NUMPY_BAD_VERSION_27), + 'networkx>=%s,<=%s ; python_version < "3.0"' % (NETWORKX_MIN_VERSION, NETWORKX_MAX_VERSION_27), + 'neurdflib', + 'nibabel>=%s' % NIBABEL_MIN_VERSION, 'numpy>=%s ; python_version > "3.0" and python_version < "3.7"' % NUMPY_MIN_VERSION, 'numpy>=%s ; python_version >= "3.7"' % NUMPY_MIN_VERSION_37, + 'numpy>=%s,!=%s ; python_version == "2.7"' % (NUMPY_MIN_VERSION, NUMPY_BAD_VERSION_27), + 'packaging', + 'prov>=%s' % PROV_VERSION, + 'pydot>=%s' % PYDOT_MIN_VERSION, + 'pydotplus', 'python-dateutil>=%s' % DATEUTIL_MIN_VERSION, 'scipy>=%s' % SCIPY_MIN_VERSION, - 'traits>=%s,!=5.0' % TRAITS_MIN_VERSION, - 'future>=%s' % FUTURE_MIN_VERSION, 'simplejson>=%s' % SIMPLEJSON_MIN_VERSION, - 'prov>=%s' % PROV_VERSION, - 'neurdflib', - 'click>=%s' % CLICK_MIN_VERSION, - 'funcsigs', - 'pydotplus', - 'pydot>=%s' % PYDOT_MIN_VERSION, - 'packaging', - 'futures; python_version == "2.7"', - 'configparser; python_version <= "3.4"', + 'traits>=%s,!=5.0' % TRAITS_MIN_VERSION, ] TESTS_REQUIRES = [ + 'codecov', + 'coverage<5', 'mock', 'pytest', 'pytest-cov', - 'codecov', 'pytest-env', - 'coverage<5' ] EXTRA_REQUIRES = { 'doc': ['Sphinx>=1.4', 'numpydoc', 'matplotlib', 'pydotplus', 'pydot>=1.2.3'], - 'tests': TESTS_REQUIRES, - 'specs': ['yapf'], + 'duecredit': ['duecredit'], 'nipy': ['nitime', 'nilearn<0.5.0', 'dipy', 'nipy', 'matplotlib'], 'profiler': ['psutil>=5.0'], - 'duecredit': ['duecredit'], - 'xvfbwrapper': ['xvfbwrapper'], 'pybids': ['pybids>=0.7.0'], + 'specs': ['yapf'], 'ssh': ['paramiko'], + 'tests': TESTS_REQUIRES, + 'xvfbwrapper': ['xvfbwrapper'], # 'mesh': ['mayavi'] # Enable when it works } diff --git a/requirements.txt b/requirements.txt index 0d951f49c0..66d250209c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,16 +1,16 @@ -numpy>=1.9.0 -scipy>=0.14 +click>=6.6.0 +configparser +funcsigs +future>=0.16.0 networkx>=1.9 -traits>=4.6 -python-dateutil>=2.2 +neurdflib nibabel>=2.1.0 -future>=0.16.0 -simplejson>=3.8.0 +numpy>=1.9.0 +packaging prov>=1.5.2 -neurdflib -click>=6.6.0 -funcsigs -configparser -pydotplus pydot>=1.2.3 -packaging +pydotplus +python-dateutil>=2.2 +scipy>=0.14 +simplejson>=3.8.0 +traits>=4.6 diff --git a/rtd_requirements.txt b/rtd_requirements.txt index 68a366bbdf..8f8295c280 100644 --- a/rtd_requirements.txt +++ b/rtd_requirements.txt @@ -1,19 +1,19 @@ -numpy>=1.9.0 -scipy>=0.14 +configparser +funcsigs +future>=0.16.0 +matplotlib +mock networkx>=1.9 -traits>=4.6 -python-dateutil>=2.2 nibabel>=2.1.0 -future>=0.16.0 -simplejson>=3.8.0 +numpy>=1.9.0 +numpydoc +packaging prov==1.5.0 -funcsigs -configparser -pytest>=3.0 -mock -pydotplus -pydot>=1.2.3 psutil -matplotlib -packaging -numpydoc +pydot>=1.2.3 +pydotplus +pytest>=3.0 +python-dateutil>=2.2 +scipy>=0.14 +simplejson>=3.8.0 +traits>=4.6 From 052461d2a9ae2e04e23750fdcd38848e675e6e53 Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 16 Jul 2019 14:55:28 -0700 Subject: [PATCH 0240/1665] ENH: Modify ``Directory`` and ``File`` traits to get along with pathlib Closes #2959 --- nipype/info.py | 1 + nipype/interfaces/base/specs.py | 72 +-- nipype/interfaces/base/support.py | 2 +- nipype/interfaces/base/tests/test_specs.py | 9 +- nipype/interfaces/base/traits_extension.py | 498 ++++++++++++--------- nipype/interfaces/io.py | 21 +- nipype/interfaces/spm/base.py | 36 +- requirements.txt | 1 + 8 files changed, 350 insertions(+), 290 deletions(-) diff --git a/nipype/info.py b/nipype/info.py index d4a5df9cac..75e9bae6d7 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -155,6 +155,7 @@ def get_nipype_gitversion(): 'packaging', 'futures; python_version == "2.7"', 'configparser; python_version <= "3.4"', + 'pathlib2; python_version <= "3.4"', ] TESTS_REQUIRES = [ diff --git a/nipype/interfaces/base/specs.py b/nipype/interfaces/base/specs.py index dbbc816dc9..47fec9ad1d 100644 --- a/nipype/interfaces/base/specs.py +++ b/nipype/interfaces/base/specs.py @@ -19,19 +19,26 @@ from builtins import str, bytes from packaging.version import Version +from traits.trait_errors import TraitError +from traits.trait_handlers import TraitDictObject, TraitListObject from ...utils.filemanip import md5, hash_infile, hash_timestamp, to_str from .traits_extension import ( traits, Undefined, isdefined, - TraitError, - TraitDictObject, - TraitListObject, has_metadata, ) from ... import config, __version__ + +USING_PATHLIB2 = False +try: + from pathlib import Path +except ImportError: + from pathlib2 import Path # noqa + USING_PATHLIB2 = True + FLOAT_FORMAT = '{:.10f}'.format nipype_version = Version(__version__) @@ -314,6 +321,39 @@ def __all__(self): return self.copyable_trait_names() +def _deepcopypatch(self, memo): + """ + Replace the ``__deepcopy__`` member with a traits-friendly implementation. + + A bug in ``__deepcopy__`` for ``HasTraits`` results in weird cloning behaviors. + Occurs for all specs in Python<3 and only for DynamicTraitedSpec in Python>2. + + """ + id_self = id(self) + if id_self in memo: + return memo[id_self] + dup_dict = deepcopy(self.trait_get(), memo) + # access all keys + for key in self.copyable_trait_names(): + if key in self.__dict__.keys(): + _ = getattr(self, key) + # clone once + dup = self.clone_traits(memo=memo) + for key in self.copyable_trait_names(): + try: + _ = getattr(dup, key) + except: + pass + # clone twice + dup = self.clone_traits(memo=memo) + dup.trait_set(**dup_dict) + return dup + + +if USING_PATHLIB2: + BaseTraitedSpec.__deepcopy__ = _deepcopypatch + + class TraitedSpec(BaseTraitedSpec): """ Create a subclass with strict traits. @@ -333,29 +373,9 @@ class DynamicTraitedSpec(BaseTraitedSpec): functioning well together. """ - def __deepcopy__(self, memo): - """ bug in deepcopy for HasTraits results in weird cloning behavior for - added traits - """ - id_self = id(self) - if id_self in memo: - return memo[id_self] - dup_dict = deepcopy(self.trait_get(), memo) - # access all keys - for key in self.copyable_trait_names(): - if key in self.__dict__.keys(): - _ = getattr(self, key) - # clone once - dup = self.clone_traits(memo=memo) - for key in self.copyable_trait_names(): - try: - _ = getattr(dup, key) - except: - pass - # clone twice - dup = self.clone_traits(memo=memo) - dup.trait_set(**dup_dict) - return dup + +if not USING_PATHLIB2: + DynamicTraitedSpec.__deepcopy__ = _deepcopypatch class CommandLineInputSpec(BaseInterfaceInputSpec): diff --git a/nipype/interfaces/base/support.py b/nipype/interfaces/base/support.py index de9d46f61a..0fd1d27674 100644 --- a/nipype/interfaces/base/support.py +++ b/nipype/interfaces/base/support.py @@ -278,7 +278,7 @@ def _inputs_help(cls): >>> from nipype.interfaces.afni import GCOR >>> _inputs_help(GCOR) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE - ['Inputs::', '', '\t[Mandatory]', '\tin_file: (an existing file name)', ... + ['Inputs::', '', '\t[Mandatory]', '\tin_file: (a pathlike object or string... """ helpstr = ['Inputs::'] diff --git a/nipype/interfaces/base/tests/test_specs.py b/nipype/interfaces/base/tests/test_specs.py index bab112e96d..b27daea6a8 100644 --- a/nipype/interfaces/base/tests/test_specs.py +++ b/nipype/interfaces/base/tests/test_specs.py @@ -2,9 +2,9 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function, unicode_literals -from future import standard_library import os import warnings +from future import standard_library import pytest @@ -420,7 +420,8 @@ def test_ImageFile(): # setup traits x.add_trait('nifti', nib.ImageFile(types=['nifti1', 'dicom'])) x.add_trait('anytype', nib.ImageFile()) - x.add_trait('newtype', nib.ImageFile(types=['nifti10'])) + with pytest.raises(ValueError): + x.add_trait('newtype', nib.ImageFile(types=['nifti10'])) x.add_trait('nocompress', nib.ImageFile(types=['mgh'], allow_compressed=False)) @@ -428,10 +429,8 @@ def test_ImageFile(): x.nifti = 'test.mgz' x.nifti = 'test.nii' x.anytype = 'test.xml' - with pytest.raises(AttributeError): - x.newtype = 'test.nii' with pytest.raises(nib.TraitError): - x.nocompress = 'test.nii.gz' + x.nocompress = 'test.mgz' x.nocompress = 'test.mgh' diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index 7a464cc557..3538c4e27e 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ @@ -24,28 +23,72 @@ absolute_import) from builtins import str, bytes -import os from collections import Sequence # perform all external trait imports here from traits import __version__ as traits_version import traits.api as traits -from traits.trait_handlers import TraitDictObject, TraitListObject -from traits.trait_errors import TraitError -from traits.trait_base import _Undefined, class_of +from traits.trait_handlers import TraitType, NoDefaultSpecified +from traits.trait_base import _Undefined -from traits.api import BaseUnicode from traits.api import Unicode from future import standard_library +try: + from pathlib import Path +except ImportError: + from pathlib2 import Path + + if traits_version < '3.7.0': raise ImportError('Traits version 3.7.0 or higher must be installed') standard_library.install_aliases() +IMG_FORMATS = { + 'afni': ('.HEAD', '.BRIK'), + 'cifti2': ('.nii', '.nii.gz'), + 'dicom': ('.dcm', '.IMA', '.tar', '.tar.gz'), + 'gifti': ('.gii', '.gii.gz'), + 'mgh': ('.mgh', '.mgz', '.mgh.gz'), + 'nifti1': ('.nii', '.nii.gz', '.hdr', '.img', '.img.gz'), + 'nifti2': ('.nii', '.nii.gz'), + 'nrrd': ('.nrrd', '.nhdr'), +} +IMG_ZIP_FMT = set(['.nii.gz', 'tar.gz', '.gii.gz', '.mgz', '.mgh.gz', 'img.gz']) + +""" +The functions that pop-up the Traits GUIs, edit_traits and +configure_traits, were failing because all of our inputs default to +Undefined deep and down in traits/ui/wx/list_editor.py it checks for +the len() of the elements of the list. The _Undefined class in traits +does not define the __len__ method and would error. I tried defining +our own Undefined and even sublassing Undefined, but both of those +failed with a TraitError in our initializer when we assign the +Undefined to the inputs because of an incompatible type: + +TraitError: The 'vertical_gradient' trait of a BetInputSpec instance must be \ +a float, but a value of was specified. + +So... in order to keep the same type but add the missing method, I +monkey patched. +""" + + +def _length(self): + return 0 + + +########################################################################## +# Apply monkeypatch here +_Undefined.__len__ = _length +########################################################################## + +Undefined = _Undefined() + class Str(Unicode): - """Replacement for the default traits.Str based in bytes""" + """Replaces the default traits.Str based in bytes.""" # Monkeypatch Str and DictStrStr for Python 2 compatibility @@ -54,244 +97,254 @@ class Str(Unicode): traits.DictStrStr = DictStrStr -class File(BaseUnicode): - """ Defines a trait whose value must be the name of a file. - """ +class BasePath(TraitType): + """Defines a trait whose value must be a valid filesystem path.""" # A description of the type of value this trait accepts: - info_text = 'a file name' - - def __init__(self, - value='', - filter=None, - auto_set=False, - entries=0, - exists=False, - **metadata): - """ Creates a File trait. - - Parameters - ---------- - value : string - The default value for the trait - filter : string - A wildcard string to filter filenames in the file dialog box used by - the attribute trait editor. - auto_set : boolean - Indicates whether the file editor updates the trait value after - every key stroke. - exists : boolean - Indicates whether the trait value must be an existing file or - not. - - Default Value - ------------- - *value* or '' - """ - self.filter = filter - self.auto_set = auto_set - self.entries = entries + info_text = 'a pathlike object or string' + exists = False + pathlike = False + resolve = False + _is_file = False + _is_dir = False + + def __init__(self, value=Undefined, + exists=False, pathlike=False, resolve=False, **metadata): + """Create a BasePath trait.""" self.exists = exists + self.resolve = resolve + self.pathlike = pathlike + if any((exists, self._is_file, self._is_dir)): + self.info_text += ' representing a' + if exists: + self.info_text += 'n existing' + if self._is_file: + self.info_text += ' file' + elif self._is_dir: + self.info_text += ' directory' + else: + self.info_text += ' file or directory' - if exists: - self.info_text = 'an existing file name' + super(BasePath, self).__init__(value, **metadata) - super(File, self).__init__(value, **metadata) + def validate(self, objekt, name, value, return_pathlike=False): + """Validate a value change.""" + try: + value = Path('%s' % value) # Use pathlib's validation + except Exception: + self.error(objekt, name, str(value)) - def validate(self, object, name, value): - """ Validates that a specified value is valid for this trait.""" - validated_value = super(File, self).validate(object, name, value) - if not self.exists: - return validated_value - elif os.path.isfile(value): - return validated_value - else: - raise TraitError( - args='The trait \'{}\' of {} instance is {}, but the path ' - ' \'{}\' does not exist.'.format(name, class_of(object), - self.info_text, value)) + if self.exists: + if self.exists and not value.exists(): + self.error(objekt, name, str(value)) + + if self._is_file and not value.is_file(): + self.error(objekt, name, str(value)) + + if self._is_dir and not value.is_dir(): + self.error(objekt, name, str(value)) + + if self.resolve: + try: + value = value.resolve(strict=self.exists) + except TypeError: + if self.exists: + value = value.resolve() + elif not value.is_absolute(): + value = Path().resolve() / value + + if not return_pathlike and not self.pathlike: + value = str(value) + + return value + + # def get_value(self, objekt, name, trait=None): + # value = super(BasePath, self).get_value(objekt, name) + # if value is Undefined: + # return self.default_value + + # if self.pathlike: + # return value + # return str(value) + + +class Directory(BasePath): + """ + Defines a trait whose value must be a directory path. + + Examples:: + + >>> from nipype.interfaces.base import Directory, TraitedSpec, TraitError + >>> class A(TraitedSpec): + ... foo = Directory(exists=False) + >>> a = A() + >>> a.foo + + + >>> a.foo = '/some/made/out/path' + >>> a.foo + '/some/made/out/path' + + >>> class A(TraitedSpec): + ... foo = Directory(exists=False, resolve=True) + >>> a = A(foo='relative_dir') + >>> a.foo # doctest: +ELLIPSIS + '.../relative_dir' + + >>> class A(TraitedSpec): + ... foo = Directory(exists=True, resolve=True) + >>> a = A() + >>> a.foo = 'relative_dir' # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + TraitError: + + >>> from os import mkdir + >>> mkdir('relative_dir') + >>> a.foo = 'relative_dir' + >>> a.foo # doctest: +ELLIPSIS + '.../relative_dir' + + >>> class A(TraitedSpec): + ... foo = Directory(exists=True, resolve=False) + >>> a = A(foo='relative_dir') + >>> a.foo + 'relative_dir' - self.error(object, name, value) + >>> class A(TraitedSpec): + ... foo = Directory('tmpdir') + >>> a = A() + >>> a.foo # doctest: +ELLIPSIS + -# ------------------------------------------------------------------------------- -# 'Directory' trait -# ------------------------------------------------------------------------------- + >>> class A(TraitedSpec): + ... foo = Directory('tmpdir', usedefault=True) + >>> a = A() + >>> a.foo # doctest: +ELLIPSIS + 'tmpdir' -class Directory(BaseUnicode): """ - Defines a trait whose value must be the name of a directory. + + _is_dir = True + + +class File(BasePath): """ + Defines a trait whose value must be a file path. - # A description of the type of value this trait accepts: - info_text = 'a directory name' - - def __init__(self, - value='', - auto_set=False, - entries=0, - exists=False, - **metadata): - """ Creates a Directory trait. - - Parameters - ---------- - value : string - The default value for the trait - auto_set : boolean - Indicates whether the directory editor updates the trait value - after every key stroke. - exists : boolean - Indicates whether the trait value must be an existing directory or - not. - - Default Value - ------------- - *value* or '' - """ - self.entries = entries - self.auto_set = auto_set - self.exists = exists + >>> from nipype.interfaces.base import File, TraitedSpec, TraitError + >>> class A(TraitedSpec): + ... foo = File() + >>> a = A() + >>> a.foo + - if exists: - self.info_text = 'an existing directory name' + >>> a.foo = '/some/made/out/path/to/file' + >>> a.foo + '/some/made/out/path/to/file' - super(Directory, self).__init__(value, **metadata) + >>> class A(TraitedSpec): + ... foo = File(exists=False, resolve=True) + >>> a = A(foo='idontexist.txt') + >>> a.foo # doctest: +ELLIPSIS + '.../idontexist.txt' - def validate(self, object, name, value): - """ Validates that a specified value is valid for this trait.""" - if isinstance(value, (str, bytes)): - if not self.exists: - return value - if os.path.isdir(value): - return value - else: - raise TraitError( - args='The trait \'{}\' of {} instance is {}, but the path ' - ' \'{}\' does not exist.'.format(name, class_of(object), - self.info_text, value)) - - self.error(object, name, value) - - -# lists of tuples -# each element consists of : -# - uncompressed (tuple[0]) extension -# - compressed (tuple[1]) extension -img_fmt_types = { - 'nifti1': [('.nii', '.nii.gz'), (('.hdr', '.img'), ('.hdr', '.img.gz'))], - 'mgh': [('.mgh', '.mgz'), ('.mgh', '.mgh.gz')], - 'nifti2': [('.nii', '.nii.gz')], - 'cifti2': [('.nii', '.nii.gz')], - 'gifti': [('.gii', '.gii.gz')], - 'dicom': [('.dcm', '.dcm'), ('.IMA', '.IMA'), ('.tar', '.tar.gz')], - 'nrrd': [('.nrrd', 'nrrd'), ('nhdr', 'nhdr')], - 'afni': [('.HEAD', '.HEAD'), ('.BRIK', '.BRIK')] -} + >>> class A(TraitedSpec): + ... foo = File(exists=True, resolve=True) + >>> a = A() + >>> a.foo = 'idontexist.txt' # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + TraitError: + >>> open('idoexist.txt', 'w').close() + >>> a.foo = 'idoexist.txt' + >>> a.foo # doctest: +ELLIPSIS + '.../idoexist.txt' -class ImageFile(File): - """ Defines a trait of specific neuroimaging files """ - - def __init__(self, - value='', - filter=None, - auto_set=False, - entries=0, - exists=False, - types=[], - allow_compressed=True, - **metadata): - """ Trait handles neuroimaging files. - - Parameters - ---------- - types : list - Strings of file format types accepted - compressed : boolean - Indicates whether the file format can compressed - """ - self.types = types - self.allow_compressed = allow_compressed - super(ImageFile, self).__init__(value, filter, auto_set, entries, - exists, **metadata) - - def info(self): - existing = 'n existing' if self.exists else '' - comma = ',' if self.exists and not self.allow_compressed else '' - uncompressed = ' uncompressed' if not self.allow_compressed else '' - with_ext = ' (valid extensions: [{}])'.format( - ', '.join(self.grab_exts())) if self.types else '' - return 'a{existing}{comma}{uncompressed} file{with_ext}'.format( - existing=existing, comma=comma, uncompressed=uncompressed, - with_ext=with_ext) - - def grab_exts(self): - # TODO: file type validation - exts = [] - for fmt in self.types: - if fmt in img_fmt_types: - exts.extend( - sum([[u for u in y[0]] - if isinstance(y[0], tuple) else [y[0]] - for y in img_fmt_types[fmt]], [])) - if self.allow_compressed: - exts.extend( - sum([[u for u in y[-1]] - if isinstance(y[-1], tuple) else [y[-1]] - for y in img_fmt_types[fmt]], [])) - else: - raise AttributeError( - 'Information has not been added for format' - ' type {} yet. Supported formats include: ' - '{}'.format(fmt, ', '.join(img_fmt_types.keys()))) - return list(set(exts)) - - def validate(self, object, name, value): - """ Validates that a specified value is valid for this trait. - """ - validated_value = super(ImageFile, self).validate(object, name, value) - if validated_value and self.types: - _exts = self.grab_exts() - if not any(validated_value.endswith(x) for x in _exts): - raise TraitError( - args="{} is not included in allowed types: {}".format( - validated_value, ', '.join(_exts))) - return validated_value + >>> class A(TraitedSpec): + ... foo = File('idoexist.txt') + >>> a = A() + >>> a.foo + + >>> class A(TraitedSpec): + ... foo = File('idoexist.txt', usedefault=True) + >>> a = A() + >>> a.foo + 'idoexist.txt' -""" -The functions that pop-up the Traits GUIs, edit_traits and -configure_traits, were failing because all of our inputs default to -Undefined deep and down in traits/ui/wx/list_editor.py it checks for -the len() of the elements of the list. The _Undefined class in traits -does not define the __len__ method and would error. I tried defining -our own Undefined and even sublassing Undefined, but both of those -failed with a TraitError in our initializer when we assign the -Undefined to the inputs because of an incompatible type: + >>> class A(TraitedSpec): + ... foo = File(exists=True, resolve=True, extensions=['.txt', 'txt.gz']) + >>> a = A() + >>> a.foo = 'idoexist.badtxt' # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + TraitError: -TraitError: The 'vertical_gradient' trait of a BetInputSpec instance must be a float, but a value of was specified. + >>> a.foo = 'idoexist.txt' + >>> a.foo # doctest: +ELLIPSIS + '.../idoexist.txt' -So... in order to keep the same type but add the missing method, I -monkey patched. -""" + """ + _is_file = True + _exts = None -def length(self): - return 0 + def __init__(self, value=NoDefaultSpecified, exists=False, pathlike=False, + resolve=False, allow_compressed=True, extensions=None, **metadata): + """Create a File trait.""" + if extensions is not None: + if isinstance(extensions, (bytes, str)): + extensions = [extensions] + if allow_compressed is False: + extensions = list(set(extensions) - IMG_ZIP_FMT) -########################################################################## -# Apply monkeypatch here -_Undefined.__len__ = length -########################################################################## + self._exts = sorted(set(['.%s' % ext if not ext.startswith('.') else ext + for ext in extensions])) -Undefined = _Undefined() + super(File, self).__init__(value=value, exists=exists, + pathlike=pathlike, resolve=resolve, **metadata) + + def validate(self, objekt, name, value, return_pathlike=False): + """Validate a value change.""" + value = super(File, self).validate(objekt, name, value, return_pathlike=True) + if self._exts: + ext = ''.join(value.suffixes) + if ext not in self._exts: + self.error(objekt, name, str(value)) + + if not return_pathlike and not self.pathlike: + value = str(value) + + return value -def isdefined(object): - return not isinstance(object, _Undefined) +class ImageFile(File): + """Defines a trait whose value must be a known neuroimaging file.""" + + def __init__(self, value=NoDefaultSpecified, exists=False, + pathlike=False, resolve=False, types=None, **metadata): + """Create an ImageFile trait.""" + extensions = None + if types is not None: + if isinstance(types, (bytes, str)): + types = [types] + + if set(types) - set(IMG_FORMATS.keys()): + invalid = set(types) - set(IMG_FORMATS.keys()) + raise ValueError("""\ +Unknown value(s) %s for metadata type of an ImageFile input.\ +""" % ', '.join(['"%s"' % t for t in invalid])) + extensions = [ext for t in types for ext in IMG_FORMATS[t]] + + super(ImageFile, self).__init__( + value=value, exists=exists, extensions=extensions, + pathlike=pathlike, resolve=resolve, **metadata) + + +def isdefined(objekt): + return not isinstance(objekt, _Undefined) def has_metadata(trait, metadata, value=None, recursive=True): @@ -319,7 +372,7 @@ class MultiObject(traits.List): """ Abstract class - shared functionality of input and output MultiObject """ - def validate(self, object, name, value): + def validate(self, objekt, name, value): # want to treat range and other sequences (except str) as list if not isinstance(value, (str, bytes)) and isinstance( @@ -338,12 +391,12 @@ def validate(self, object, name, value): not isinstance(inner_trait.trait_type, InputMultiObject) and not isinstance(value[0], list)): newvalue = [value] - value = super(MultiObject, self).validate(object, name, newvalue) + value = super(MultiObject, self).validate(objekt, name, newvalue) if value: return value - self.error(object, name, value) + self.error(objekt, name, value) class OutputMultiObject(MultiObject): @@ -379,8 +432,8 @@ class OutputMultiObject(MultiObject): """ - def get(self, object, name): - value = self.get_value(object, name) + def get(self, objekt, name): + value = self.get_value(objekt, name) if len(value) == 0: return Undefined elif len(value) == 1: @@ -388,8 +441,8 @@ def get(self, object, name): else: return value - def set(self, object, name, value): - self.set_value(object, name, value) + def set(self, objekt, name, value): + self.set_value(objekt, name, value) class InputMultiObject(MultiObject): @@ -425,5 +478,6 @@ class InputMultiObject(MultiObject): """ pass + InputMultiPath = InputMultiObject OutputMultiPath = OutputMultiObject diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 60e8b6fafc..4b9afd1ce1 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -1024,15 +1024,18 @@ def _list_outputs(self): def s3tolocal(self, s3path, bkt): import boto # path formatting - if not os.path.split(self.inputs.local_directory)[1] == '': - self.inputs.local_directory += '/' - if not os.path.split(self.inputs.bucket_path)[1] == '': - self.inputs.bucket_path += '/' - if self.inputs.template[0] == '/': - self.inputs.template = self.inputs.template[1:] - - localpath = s3path.replace(self.inputs.bucket_path, - self.inputs.local_directory) + local_directory = str(self.inputs.local_directory) + bucket_path = str(self.inputs.bucket_path) + template = str(self.inputs.template) + if not os.path.split(local_directory)[1] == '': + local_directory += '/' + if not os.path.split(bucket_path)[1] == '': + bucket_path += '/' + if template[0] == '/': + template = template[1:] + + localpath = s3path.replace(bucket_path, + local_directory) localdir = os.path.split(localpath)[0] if not os.path.exists(localdir): os.makedirs(localdir) diff --git a/nipype/interfaces/spm/base.py b/nipype/interfaces/spm/base.py index fd93dfc522..bbabbf42ba 100644 --- a/nipype/interfaces/spm/base.py +++ b/nipype/interfaces/spm/base.py @@ -32,6 +32,7 @@ from ..base import (BaseInterface, traits, isdefined, InputMultiPath, BaseInterfaceInputSpec, Directory, Undefined, ImageFile, PackageInfo) +from ..base.traits_extension import NoDefaultSpecified from ..matlab import MatlabCommand from ...external.due import due, Doi, BibTeX @@ -597,30 +598,11 @@ def _make_matlab_command(self, contents, postscript=None): class ImageFileSPM(ImageFile): - """ - Defines an ImageFile trait specific to SPM interfaces. - """ - - def __init__(self, - value='', - filter=None, - auto_set=False, - entries=0, - exists=False, - types=['nifti1', 'nifti2'], - allow_compressed=False, - **metadata): - """ Trait handles neuroimaging files. - - Parameters - ---------- - types : list - Strings of file format types accepted - compressed : boolean - Indicates whether the file format can compressed - """ - self.types = types - self.allow_compressed = allow_compressed - super(ImageFileSPM, - self).__init__(value, filter, auto_set, entries, exists, types, - allow_compressed, **metadata) + """Defines a trait whose value must be a NIfTI file.""" + + def __init__(self, value=NoDefaultSpecified, exists=False, + pathlike=False, resolve=False, **metadata): + """Create an ImageFileSPM trait.""" + super(ImageFileSPM, self).__init__( + value=value, exists=exists, types=['nifti1', 'nifti2'], + pathlike=pathlike, resolve=resolve, **metadata) diff --git a/requirements.txt b/requirements.txt index 0d951f49c0..a5cd2d8cd9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,6 +11,7 @@ neurdflib click>=6.6.0 funcsigs configparser +pathlib2 pydotplus pydot>=1.2.3 packaging From b76860dd068466c4a0ede3bf75df68f37591d8e9 Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 16 Jul 2019 15:06:16 -0700 Subject: [PATCH 0241/1665] sty: better placement of imports from enthought.traits --- nipype/interfaces/base/__init__.py | 5 ++++- nipype/interfaces/base/core.py | 3 ++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/base/__init__.py b/nipype/interfaces/base/__init__.py index 2284c1763a..30d44db56a 100644 --- a/nipype/interfaces/base/__init__.py +++ b/nipype/interfaces/base/__init__.py @@ -8,6 +8,9 @@ This module defines the API of all nipype interfaces. """ +from traits.trait_handlers import TraitDictObject, TraitListObject +from traits.trait_errors import TraitError + from .core import (Interface, BaseInterface, SimpleInterface, CommandLine, StdOutCommandLine, MpiCommandLine, SEMLikeCommandLine, LibraryBaseInterface, PackageInfo) @@ -17,7 +20,7 @@ StdOutCommandLineInputSpec) from .traits_extension import ( - traits, Undefined, TraitDictObject, TraitListObject, TraitError, isdefined, + traits, Undefined, isdefined, File, Directory, Str, DictStrStr, has_metadata, ImageFile, OutputMultiObject, InputMultiObject, OutputMultiPath, InputMultiPath) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 77ab6cf398..0011c925dd 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -27,6 +27,7 @@ import simplejson as json from dateutil.parser import parse as parseutc from future import standard_library +from traits.trait_errors import TraitError from ... import config, logging, LooseVersion from ...utils.provenance import write_provenance @@ -37,7 +38,7 @@ from ...external.due import due -from .traits_extension import traits, isdefined, TraitError +from .traits_extension import traits, isdefined from .specs import (BaseInterfaceInputSpec, CommandLineInputSpec, StdOutCommandLineInputSpec, MpiCommandLineInputSpec, get_filecopy_info) From e993a7f1ad4d518a4a3b1391882aaf2a5de90c52 Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 16 Jul 2019 17:47:15 -0700 Subject: [PATCH 0242/1665] enh(filemanip): Solidify a patched version of ``pathlib.Path`` for internal consumption. --- nipype/interfaces/base/specs.py | 10 +---- nipype/interfaces/base/traits_extension.py | 24 +---------- nipype/utils/filemanip.py | 47 +++++++++++++++++++++- nipype/utils/tests/test_filemanip.py | 19 ++++++++- 4 files changed, 67 insertions(+), 33 deletions(-) diff --git a/nipype/interfaces/base/specs.py b/nipype/interfaces/base/specs.py index 47fec9ad1d..fb0f3295f0 100644 --- a/nipype/interfaces/base/specs.py +++ b/nipype/interfaces/base/specs.py @@ -21,7 +21,8 @@ from traits.trait_errors import TraitError from traits.trait_handlers import TraitDictObject, TraitListObject -from ...utils.filemanip import md5, hash_infile, hash_timestamp, to_str +from ...utils.filemanip import ( + md5, hash_infile, hash_timestamp, to_str, USING_PATHLIB2) from .traits_extension import ( traits, Undefined, @@ -32,13 +33,6 @@ from ... import config, __version__ -USING_PATHLIB2 = False -try: - from pathlib import Path -except ImportError: - from pathlib2 import Path # noqa - USING_PATHLIB2 = True - FLOAT_FORMAT = '{:.10f}'.format nipype_version = Version(__version__) diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index 3538c4e27e..ce4c43e4cc 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -34,10 +34,7 @@ from traits.api import Unicode from future import standard_library -try: - from pathlib import Path -except ImportError: - from pathlib2 import Path +from ...utils.filemanip import Path if traits_version < '3.7.0': @@ -145,35 +142,18 @@ def validate(self, objekt, name, value, return_pathlike=False): self.error(objekt, name, str(value)) if self.resolve: - try: - value = value.resolve(strict=self.exists) - except TypeError: - if self.exists: - value = value.resolve() - elif not value.is_absolute(): - value = Path().resolve() / value + value = value.resolve(strict=self.exists) if not return_pathlike and not self.pathlike: value = str(value) return value - # def get_value(self, objekt, name, trait=None): - # value = super(BasePath, self).get_value(objekt, name) - # if value is Undefined: - # return self.default_value - - # if self.pathlike: - # return value - # return str(value) - class Directory(BasePath): """ Defines a trait whose value must be a directory path. - Examples:: - >>> from nipype.interfaces.base import Directory, TraitedSpec, TraitError >>> class A(TraitedSpec): ... foo = Directory(exists=False) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index d8a65a6712..3012fd6e56 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -21,7 +21,6 @@ import contextlib import posixpath import simplejson as json -import numpy as np from builtins import str, bytes, open @@ -40,8 +39,52 @@ PY3 = sys.version_info[0] >= 3 -class FileNotFoundError(Exception): + +class FileNotFoundError(OSError): + """Defines the expception for Python 2.""" + + def __init__(self, path): + """Initialize the exception.""" + super(FileNotFoundError, self).__init__( + 2, 'No such file or directory', '%s' % path) + + +USING_PATHLIB2 = False +USING_PATHLIB_PATCHED = False +try: + from pathlib import Path +except ImportError: + from pathlib2 import Path + USING_PATHLIB2 = True + +try: + Path('/invented/file/path').resolve(strict=True) +except TypeError: + def _patch_resolve(self, strict=False): + """Add the argument strict to signature in Python>3,<3.6.""" + resolved = Path().old_resolve() / self + + if strict and not resolved.exists(): + raise FileNotFoundError(resolved) + return resolved + + Path.old_resolve = Path.resolve + Path.resolve = _patch_resolve + USING_PATHLIB_PATCHED = True +except FileNotFoundError: pass +except OSError: + def _patch_resolve(self, strict=False): + """Add the argument strict to signature for pathlib2.""" + try: + resolved = self.old_resolve(strict=strict) + except OSError: + raise FileNotFoundError(self.old_resolve()) + + return resolved + + Path.old_resolve = Path.resolve + Path.resolve = _patch_resolve def split_filename(fname): diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index b9a413d557..5d064a3817 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -16,7 +16,7 @@ check_forhash, _parse_mount_table, _cifs_table, on_cifs, copyfile, copyfiles, ensure_list, simplify_list, check_depends, split_filename, get_related_files, indirectory, - loadpkl, loadcrash, savepkl) + loadpkl, loadcrash, savepkl, FileNotFoundError, Path) def _ignore_atime(stat): @@ -570,3 +570,20 @@ def test_unversioned_pklization(tmpdir): with pytest.raises(Exception): with mock.patch('nipype.utils.tests.test_filemanip.Pickled', PickledBreaker): loadpkl('./pickled.pkz', versioning=True) + + +def test_Path_strict_resolve(tmpdir): + """Check the monkeypatch to test strict resolution of Path.""" + tmpdir.chdir() + + # Default strict=False should work out out of the box + testfile = Path('somefile.txt') + assert '%s/somefile.txt' % tmpdir == '%s' % testfile.resolve() + + # Switching to strict=True must raise FileNotFoundError (also in Python2) + with pytest.raises(FileNotFoundError): + testfile.resolve(strict=True) + + # If the file is created, it should not raise + testfile.write_text('') + assert '%s/somefile.txt' % tmpdir == '%s' % testfile.resolve(strict=True) From 06ba99f6722f5a224e8ffaa71ac8ae3b2957ef1c Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 16 Jul 2019 18:34:52 -0700 Subject: [PATCH 0243/1665] fix(py2/34): address compatibility issues --- nipype/interfaces/base/traits_extension.py | 9 ++++++--- nipype/utils/filemanip.py | 4 ++-- nipype/utils/tests/test_filemanip.py | 2 +- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index ce4c43e4cc..652ca5f772 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -33,9 +33,10 @@ from traits.api import Unicode from future import standard_library +from ...utils.filemanip import Path, USING_PATHLIB2 -from ...utils.filemanip import Path - +if USING_PATHLIB2: + from future.types.newstr import newstr if traits_version < '3.7.0': raise ImportError('Traits version 3.7.0 or higher must be installed') @@ -127,7 +128,9 @@ def __init__(self, value=Undefined, def validate(self, objekt, name, value, return_pathlike=False): """Validate a value change.""" try: - value = Path('%s' % value) # Use pathlib's validation + if USING_PATHLIB2 and isinstance(value, newstr): + value = '%s' % value # pathlib2 doesn't like newstr + value = Path(value) # Use pathlib's validation except Exception: self.error(objekt, name, str(value)) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 3012fd6e56..7403671a48 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -41,7 +41,7 @@ class FileNotFoundError(OSError): - """Defines the expception for Python 2.""" + """Defines the exception for Python 2.""" def __init__(self, path): """Initialize the exception.""" @@ -75,7 +75,7 @@ def _patch_resolve(self, strict=False): pass except OSError: def _patch_resolve(self, strict=False): - """Add the argument strict to signature for pathlib2.""" + """Raise FileNotFoundError instead of OSError with pathlib2.""" try: resolved = self.old_resolve(strict=strict) except OSError: diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index 5d064a3817..7eaa8b9c86 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -585,5 +585,5 @@ def test_Path_strict_resolve(tmpdir): testfile.resolve(strict=True) # If the file is created, it should not raise - testfile.write_text('') + open('somefile.txt', 'w').close() assert '%s/somefile.txt' % tmpdir == '%s' % testfile.resolve(strict=True) From f7de5db865af64f6d5d1acf51744306b6258fafb Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 16 Jul 2019 19:09:18 -0700 Subject: [PATCH 0244/1665] pin(neurodocker): set docker image tagged 0.5.0 with the hope of reenable image building. --- docker/generate_dockerfiles.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/generate_dockerfiles.sh b/docker/generate_dockerfiles.sh index 453e6bd232..c31bd1a350 100755 --- a/docker/generate_dockerfiles.sh +++ b/docker/generate_dockerfiles.sh @@ -52,8 +52,8 @@ do esac done -# neurodocker version 0.4.1-22-g7c44e01 -NEURODOCKER_IMAGE="kaczmarj/neurodocker:master@sha256:858632a7533cac100f70932749b4cfc77fc40f667f41fca208f406215cff8a27" +# neurodocker version 0.5.0 +NEURODOCKER_IMAGE="kaczmarj/neurodocker:0.5.0" # neurodebian:stretch-non-free pulled on September 19, 2018 BASE_IMAGE="neurodebian:stretch-non-free@sha256:7cd978427d7ad215834fee221d0536ed7825b3cddebc481eba2d792dfc2f7332" From 0ac149ea91fa3d205606e3a47cb4c11c6a598dcf Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 16 Jul 2019 19:11:48 -0700 Subject: [PATCH 0245/1665] enh: make sure neurdflib comes after prov see https://github.com/nipy/nipype/pull/2961#issuecomment-512035484 --- nipype/info.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 0ad54a6682..268a3763e2 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -142,7 +142,6 @@ def get_nipype_gitversion(): 'futures; python_version == "2.7"', 'networkx>=%s ; python_version >= "3.0"' % NETWORKX_MIN_VERSION, 'networkx>=%s,<=%s ; python_version < "3.0"' % (NETWORKX_MIN_VERSION, NETWORKX_MAX_VERSION_27), - 'neurdflib', 'nibabel>=%s' % NIBABEL_MIN_VERSION, 'numpy>=%s ; python_version > "3.0" and python_version < "3.7"' % NUMPY_MIN_VERSION, 'numpy>=%s ; python_version >= "3.7"' % NUMPY_MIN_VERSION_37, @@ -157,6 +156,10 @@ def get_nipype_gitversion(): 'traits>=%s,!=5.0' % TRAITS_MIN_VERSION, ] +# neurdflib has to come after prov +# https://github.com/nipy/nipype/pull/2961#issuecomment-512035484 +REQUIRES += ['neurdflib'] + TESTS_REQUIRES = [ 'codecov', 'coverage<5', From 104a51f890abfe386866df24a60fa0913ad41888 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 17 Jul 2019 01:09:37 -0400 Subject: [PATCH 0246/1665] FIX: Docker build --- docker/generate_dockerfiles.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docker/generate_dockerfiles.sh b/docker/generate_dockerfiles.sh index 453e6bd232..e73f8a6e95 100755 --- a/docker/generate_dockerfiles.sh +++ b/docker/generate_dockerfiles.sh @@ -52,8 +52,8 @@ do esac done -# neurodocker version 0.4.1-22-g7c44e01 -NEURODOCKER_IMAGE="kaczmarj/neurodocker:master@sha256:858632a7533cac100f70932749b4cfc77fc40f667f41fca208f406215cff8a27" +# neurodocker version 0.5.0 +NEURODOCKER_IMAGE="kaczmarj/neurodocker:0.5.0@sha256:7b3222162262fe9004376700695e0fba85b5cb9fa7b26eb8f66662ebf183425f" # neurodebian:stretch-non-free pulled on September 19, 2018 BASE_IMAGE="neurodebian:stretch-non-free@sha256:7cd978427d7ad215834fee221d0536ed7825b3cddebc481eba2d792dfc2f7332" @@ -88,6 +88,7 @@ function generate_main_dockerfile() { OMP_NUM_THREADS=1 \ --arg PYTHON_VERSION_MAJOR=3 PYTHON_VERSION_MINOR=6 BUILD_DATE VCS_REF VERSION \ --user neuro \ + --workdir /home/neuro \ --miniconda create_env=neuro \ conda_install='python=${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR} libxml2 libxslt matplotlib mkl "numpy!=1.16.0" paramiko From c588cbc6898b95ed2431d63dddb6b8e91e402eaa Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 17 Jul 2019 01:21:25 -0400 Subject: [PATCH 0247/1665] REVERT: neurodocker version --- docker/generate_dockerfiles.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/generate_dockerfiles.sh b/docker/generate_dockerfiles.sh index e73f8a6e95..fd42dc4c8d 100755 --- a/docker/generate_dockerfiles.sh +++ b/docker/generate_dockerfiles.sh @@ -52,8 +52,8 @@ do esac done -# neurodocker version 0.5.0 -NEURODOCKER_IMAGE="kaczmarj/neurodocker:0.5.0@sha256:7b3222162262fe9004376700695e0fba85b5cb9fa7b26eb8f66662ebf183425f" +# neurodocker version 0.4.1-22-g7c44e01 +NEURODOCKER_IMAGE="kaczmarj/neurodocker:master@sha256:858632a7533cac100f70932749b4cfc77fc40f667f41fca208f406215cff8a27" # neurodebian:stretch-non-free pulled on September 19, 2018 BASE_IMAGE="neurodebian:stretch-non-free@sha256:7cd978427d7ad215834fee221d0536ed7825b3cddebc481eba2d792dfc2f7332" From 5e69e7a552c34ff5f0ba1e608c4af2cdd897baae Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 17 Jul 2019 06:50:33 -0700 Subject: [PATCH 0248/1665] Update nipype/interfaces/spm/base.py [skip ci] Co-Authored-By: Satrajit Ghosh --- nipype/interfaces/spm/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/spm/base.py b/nipype/interfaces/spm/base.py index bbabbf42ba..98d53b761b 100644 --- a/nipype/interfaces/spm/base.py +++ b/nipype/interfaces/spm/base.py @@ -604,5 +604,5 @@ def __init__(self, value=NoDefaultSpecified, exists=False, pathlike=False, resolve=False, **metadata): """Create an ImageFileSPM trait.""" super(ImageFileSPM, self).__init__( - value=value, exists=exists, types=['nifti1', 'nifti2'], + value=value, exists=exists, types=['nifti1', 'nifti2'], allow_compressed=False, pathlike=pathlike, resolve=resolve, **metadata) From 523a21e5dbfc4bcff60f6f8a2e43dc3649857ace Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 17 Jul 2019 06:54:31 -0700 Subject: [PATCH 0249/1665] Update nipype/interfaces/base/traits_extension.py [skip ci] Co-Authored-By: Chris Markiewicz --- nipype/interfaces/base/traits_extension.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index 652ca5f772..cf1b3353c2 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -135,7 +135,7 @@ def validate(self, objekt, name, value, return_pathlike=False): self.error(objekt, name, str(value)) if self.exists: - if self.exists and not value.exists(): + if not value.exists(): self.error(objekt, name, str(value)) if self._is_file and not value.is_file(): From c1d1151bd20d1ab109bda7965b35c72e20054f37 Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 17 Jul 2019 08:02:15 -0700 Subject: [PATCH 0250/1665] fix: make info_text of ``BasePath``s a property (@effigies' suggestion) --- nipype/interfaces/base/traits_extension.py | 28 ++++++++++++---------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index cf1b3353c2..4d3538fd41 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -99,30 +99,34 @@ class BasePath(TraitType): """Defines a trait whose value must be a valid filesystem path.""" # A description of the type of value this trait accepts: - info_text = 'a pathlike object or string' exists = False pathlike = False resolve = False _is_file = False _is_dir = False + @property + def info_text(self): + """Create the trait's general description.""" + info_text = 'a pathlike object or string' + if any((self.exists, self._is_file, self._is_dir)): + info_text += ' representing a' + if self.exists: + info_text += 'n existing' + if self._is_file: + info_text += ' file' + elif self._is_dir: + info_text += ' directory' + else: + info_text += ' file or directory' + return info_text + def __init__(self, value=Undefined, exists=False, pathlike=False, resolve=False, **metadata): """Create a BasePath trait.""" self.exists = exists self.resolve = resolve self.pathlike = pathlike - if any((exists, self._is_file, self._is_dir)): - self.info_text += ' representing a' - if exists: - self.info_text += 'n existing' - if self._is_file: - self.info_text += ' file' - elif self._is_dir: - self.info_text += ' directory' - else: - self.info_text += ' file or directory' - super(BasePath, self).__init__(value, **metadata) def validate(self, objekt, name, value, return_pathlike=False): From 6c3858ab81c6d375a53050cb89c896f94c86b916 Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 17 Jul 2019 08:07:35 -0700 Subject: [PATCH 0251/1665] fix: remove pathlike metadata as it wasn't useful --- nipype/interfaces/base/traits_extension.py | 21 +++++++++------------ nipype/interfaces/spm/base.py | 7 +++---- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index 4d3538fd41..de215beb96 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -100,7 +100,6 @@ class BasePath(TraitType): # A description of the type of value this trait accepts: exists = False - pathlike = False resolve = False _is_file = False _is_dir = False @@ -121,12 +120,10 @@ def info_text(self): info_text += ' file or directory' return info_text - def __init__(self, value=Undefined, - exists=False, pathlike=False, resolve=False, **metadata): + def __init__(self, value=Undefined, exists=False, resolve=False, **metadata): """Create a BasePath trait.""" self.exists = exists self.resolve = resolve - self.pathlike = pathlike super(BasePath, self).__init__(value, **metadata) def validate(self, objekt, name, value, return_pathlike=False): @@ -151,7 +148,7 @@ def validate(self, objekt, name, value, return_pathlike=False): if self.resolve: value = value.resolve(strict=self.exists) - if not return_pathlike and not self.pathlike: + if not return_pathlike: value = str(value) return value @@ -277,8 +274,8 @@ class File(BasePath): _is_file = True _exts = None - def __init__(self, value=NoDefaultSpecified, exists=False, pathlike=False, - resolve=False, allow_compressed=True, extensions=None, **metadata): + def __init__(self, value=NoDefaultSpecified, exists=False, resolve=False, + allow_compressed=True, extensions=None, **metadata): """Create a File trait.""" if extensions is not None: if isinstance(extensions, (bytes, str)): @@ -290,8 +287,8 @@ def __init__(self, value=NoDefaultSpecified, exists=False, pathlike=False, self._exts = sorted(set(['.%s' % ext if not ext.startswith('.') else ext for ext in extensions])) - super(File, self).__init__(value=value, exists=exists, - pathlike=pathlike, resolve=resolve, **metadata) + super(File, self).__init__(value=value, exists=exists, resolve=resolve, + extensions=self._exts, **metadata) def validate(self, objekt, name, value, return_pathlike=False): """Validate a value change.""" @@ -301,7 +298,7 @@ def validate(self, objekt, name, value, return_pathlike=False): if ext not in self._exts: self.error(objekt, name, str(value)) - if not return_pathlike and not self.pathlike: + if not return_pathlike: value = str(value) return value @@ -311,7 +308,7 @@ class ImageFile(File): """Defines a trait whose value must be a known neuroimaging file.""" def __init__(self, value=NoDefaultSpecified, exists=False, - pathlike=False, resolve=False, types=None, **metadata): + resolve=False, types=None, **metadata): """Create an ImageFile trait.""" extensions = None if types is not None: @@ -327,7 +324,7 @@ def __init__(self, value=NoDefaultSpecified, exists=False, super(ImageFile, self).__init__( value=value, exists=exists, extensions=extensions, - pathlike=pathlike, resolve=resolve, **metadata) + resolve=resolve, **metadata) def isdefined(objekt): diff --git a/nipype/interfaces/spm/base.py b/nipype/interfaces/spm/base.py index 98d53b761b..fda02d40f1 100644 --- a/nipype/interfaces/spm/base.py +++ b/nipype/interfaces/spm/base.py @@ -600,9 +600,8 @@ def _make_matlab_command(self, contents, postscript=None): class ImageFileSPM(ImageFile): """Defines a trait whose value must be a NIfTI file.""" - def __init__(self, value=NoDefaultSpecified, exists=False, - pathlike=False, resolve=False, **metadata): + def __init__(self, value=NoDefaultSpecified, exists=False, resolve=False, **metadata): """Create an ImageFileSPM trait.""" super(ImageFileSPM, self).__init__( - value=value, exists=exists, types=['nifti1', 'nifti2'], allow_compressed=False, - pathlike=pathlike, resolve=resolve, **metadata) + value=value, exists=exists, types=['nifti1', 'nifti2'], + allow_compressed=False, resolve=resolve, **metadata) From 66c0f9b1ae83f0646ae71b913ef2991938ea21bb Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 17 Jul 2019 08:09:06 -0700 Subject: [PATCH 0252/1665] fix: remove unused ``USING_PATHLIB_PATCHED`` --- nipype/utils/filemanip.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 7403671a48..edf23532f5 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -50,7 +50,6 @@ def __init__(self, path): USING_PATHLIB2 = False -USING_PATHLIB_PATCHED = False try: from pathlib import Path except ImportError: @@ -70,7 +69,6 @@ def _patch_resolve(self, strict=False): Path.old_resolve = Path.resolve Path.resolve = _patch_resolve - USING_PATHLIB_PATCHED = True except FileNotFoundError: pass except OSError: From d42db563bd11dadc2a38b8550f43677c0d22fb28 Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 17 Jul 2019 08:10:08 -0700 Subject: [PATCH 0253/1665] sty: make codacy happy (Redefining built-in 'FileNotFoundError') --- nipype/utils/filemanip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index edf23532f5..e19563ed54 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -40,7 +40,7 @@ PY3 = sys.version_info[0] >= 3 -class FileNotFoundError(OSError): +class FileNotFoundError(OSError): # noqa """Defines the exception for Python 2.""" def __init__(self, path): From e848165b3bfe53860c6a16c07fa52e384f8f266b Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 17 Jul 2019 10:57:00 -0700 Subject: [PATCH 0254/1665] sty: clarity suggestions from @mgxd See https://github.com/nipy/nipype/pull/2962#pullrequestreview-263125415 --- nipype/interfaces/io.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 4b9afd1ce1..c409d4e78a 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -1027,9 +1027,9 @@ def s3tolocal(self, s3path, bkt): local_directory = str(self.inputs.local_directory) bucket_path = str(self.inputs.bucket_path) template = str(self.inputs.template) - if not os.path.split(local_directory)[1] == '': + if not os.path.basename(local_directory) == '': local_directory += '/' - if not os.path.split(bucket_path)[1] == '': + if not os.path.basename(bucket_path) == '': bucket_path += '/' if template[0] == '/': template = template[1:] From 114ddf8c2dad87e8421956a4d44dd1e0fe55498d Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 17 Jul 2019 11:23:57 -0700 Subject: [PATCH 0255/1665] sty: pep257 and other stylistic changes to AFNI interfaces --- nipype/interfaces/afni/base.py | 108 +++++++++------------------ nipype/interfaces/afni/model.py | 54 +++++++++++++- nipype/interfaces/afni/preprocess.py | 92 ++++++++++++----------- 3 files changed, 136 insertions(+), 118 deletions(-) diff --git a/nipype/interfaces/afni/base.py b/nipype/interfaces/afni/base.py index d4b8e474ff..815f0619c8 100644 --- a/nipype/interfaces/afni/base.py +++ b/nipype/interfaces/afni/base.py @@ -4,7 +4,6 @@ """Provide interface to AFNI commands.""" from __future__ import (print_function, division, unicode_literals, absolute_import) -from builtins import object, str from future.utils import raise_from import os @@ -12,8 +11,7 @@ from distutils import spawn from ... import logging, LooseVersion -from ...utils.filemanip import split_filename, fname_presuffix - +from ...utils.filemanip import split_filename from ..base import (CommandLine, traits, CommandLineInputSpec, isdefined, File, TraitedSpec, PackageInfo) from ...external.due import BibTeX @@ -23,14 +21,15 @@ class Info(PackageInfo): - """Handle afni output type and version information. - """ + """Handle afni output type and version information.""" + __outputtype = 'AFNI' ftypes = {'NIFTI': '.nii', 'AFNI': '', 'NIFTI_GZ': '.nii.gz'} version_cmd = 'afni --version' @staticmethod def parse_version(raw_info): + """Check and parse AFNI's version.""" version_stamp = raw_info.split('\n')[0].split('Version ')[1] if version_stamp.startswith('AFNI'): version_stamp = version_stamp.split('AFNI_')[1] @@ -46,7 +45,8 @@ def parse_version(raw_info): @classmethod def output_type_to_ext(cls, outputtype): - """Get the file extension for the given output type. + """ + Get the file extension for the given output type. Parameters ---------- @@ -57,8 +57,8 @@ def output_type_to_ext(cls, outputtype): ------- extension : str The file extension for the output type. - """ + """ try: return cls.ftypes[outputtype] except KeyError as e: @@ -67,24 +67,28 @@ def output_type_to_ext(cls, outputtype): @classmethod def outputtype(cls): - """AFNI has no environment variables, - Output filetypes get set in command line calls - Nipype uses AFNI as default + """ + Set default output filetype. + + AFNI has no environment variables, Output filetypes get set in command line calls + Nipype uses ``AFNI`` as default Returns ------- None + """ - # warn(('AFNI has no environment variable that sets filetype ' - # 'Nipype uses NIFTI_GZ as default')) return 'AFNI' @staticmethod def standard_image(img_name): - '''Grab an image from the standard location. + """ + Grab an image from the standard location. - Could be made more fancy to allow for more relocatability''' + Could be made more fancy to allow for more relocatability + + """ clout = CommandLine( 'which afni', ignore_exception=True, @@ -101,6 +105,7 @@ def standard_image(img_name): class AFNICommandBase(CommandLine): """ A base class to fix a linking problem in OSX and afni. + See http://afni.nimh.nih.gov/afni/community/board/read.php?1,145346,145347#msg-145347 """ @@ -127,7 +132,8 @@ class AFNICommandOutputSpec(TraitedSpec): class AFNICommand(AFNICommandBase): - """Shared options for several AFNI commands """ + """Shared options for several AFNI commands.""" + input_spec = AFNICommandInputSpec _outputtype = None @@ -162,6 +168,7 @@ class AFNICommand(AFNICommandBase): @property def num_threads(self): + """Get number of threads.""" return self.inputs.num_threads @num_threads.setter @@ -170,20 +177,21 @@ def num_threads(self, value): @classmethod def set_default_output_type(cls, outputtype): - """Set the default output type for AFNI classes. + """ + Set the default output type for AFNI classes. This method is used to set the default output type for all afni subclasses. However, setting this will not update the output type for any existing instances. For these, assign the .inputs.outputtype. """ - if outputtype in Info.ftypes: cls._outputtype = outputtype else: raise AttributeError('Invalid AFNI outputtype: %s' % outputtype) def __init__(self, **inputs): + """Instantiate an AFNI command tool wrapper.""" super(AFNICommand, self).__init__(**inputs) self.inputs.on_trait_change(self._output_update, 'outputtype') @@ -199,13 +207,16 @@ def __init__(self, **inputs): self._output_update() def _nthreads_update(self): - """Update environment with new number of threads""" + """Update environment with new number of threads.""" self.inputs.environ['OMP_NUM_THREADS'] = '%d' % self.inputs.num_threads def _output_update(self): - """ i think? updates class private attribute based on instance input - in fsl also updates ENVIRON variable....not valid in afni - as it uses no environment variables + """ + Update the internal property with the provided input. + + i think? updates class private attribute based on instance input + in fsl also updates ENVIRON variable....not valid in afni + as it uses no environment variables """ self._outputtype = self.inputs.outputtype @@ -226,59 +237,9 @@ def _list_outputs(self): outputs[name] = outputs[name] + "+orig.BRIK" return outputs - def _gen_fname(self, - basename, - cwd=None, - suffix=None, - change_ext=True, - ext=None): - """Generate a filename based on the given parameters. - - The filename will take the form: cwd/basename. - If change_ext is True, it will use the extentions specified in - intputs.output_type. - - Parameters - ---------- - basename : str - Filename to base the new filename on. - cwd : str - Path to prefix to the new filename. (default is os.getcwd()) - suffix : str - Suffix to add to the `basename`. (defaults is '' ) - change_ext : bool - Flag to change the filename extension to the FSL output type. - (default True) - - Returns - ------- - fname : str - New filename based on given parameters. - - """ - - if basename == '': - msg = 'Unable to generate filename for command %s. ' % self.cmd - msg += 'basename is not set!' - raise ValueError(msg) - if cwd is None: - cwd = os.getcwd() - if ext is None: - ext = Info.output_type_to_ext(self.inputs.outputtype) - if change_ext: - if suffix: - suffix = ''.join((suffix, ext)) - else: - suffix = ext - if suffix is None: - suffix = '' - fname = fname_presuffix( - basename, suffix=suffix, use_ext=False, newpath=cwd) - return fname - def no_afni(): - """ Checks if AFNI is available """ + """Check whether AFNI is not available.""" if Info.version() is None: return True return False @@ -292,8 +253,11 @@ class AFNIPythonCommandInputSpec(CommandLineInputSpec): class AFNIPythonCommand(AFNICommand): + """A subtype of AFNI command line for Python scripts.""" + @property def cmd(self): + """Revise the command path.""" orig_cmd = super(AFNIPythonCommand, self).cmd found = spawn.find_executable(orig_cmd) return found if found is not None else orig_cmd diff --git a/nipype/interfaces/afni/model.py b/nipype/interfaces/afni/model.py index 2cccdfe869..2d62f7dfa4 100644 --- a/nipype/interfaces/afni/model.py +++ b/nipype/interfaces/afni/model.py @@ -17,7 +17,7 @@ from ...external.due import BibTeX from .base import (AFNICommandBase, AFNICommand, AFNICommandInputSpec, - AFNICommandOutputSpec) + AFNICommandOutputSpec, Info) class DeconvolveInputSpec(AFNICommandInputSpec): @@ -307,6 +307,58 @@ def _list_outputs(self): return outputs + def _gen_fname(self, + basename, + cwd=None, + suffix=None, + change_ext=True, + ext=None): + """Generate a filename based on the given parameters. + + The filename will take the form: cwd/basename. + If change_ext is True, it will use the extentions specified in + intputs.output_type. + + Parameters + ---------- + basename : str + Filename to base the new filename on. + cwd : str + Path to prefix to the new filename. (default is os.getcwd()) + suffix : str + Suffix to add to the `basename`. (defaults is '' ) + change_ext : bool + Flag to change the filename extension to the FSL output type. + (default True) + + Returns + ------- + fname : str + New filename based on given parameters. + + """ + from nipype.utils.filemanip import fname_presuffix + + if basename == '': + msg = 'Unable to generate filename for command %s. ' % self.cmd + msg += 'basename is not set!' + raise ValueError(msg) + if cwd is None: + cwd = os.getcwd() + if ext is None: + ext = Info.output_type_to_ext(self.inputs.outputtype) + if change_ext: + if suffix: + suffix = ''.join((suffix, ext)) + else: + suffix = ext + if suffix is None: + suffix = '' + fname = fname_presuffix( + basename, suffix=suffix, use_ext=False, newpath=cwd) + return fname + + class RemlfitInputSpec(AFNICommandInputSpec): # mandatory files in_files = InputMultiPath( diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index c2ccb1988a..9729e22fe2 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -757,8 +757,7 @@ class BandpassInputSpec(AFNICommandInputSpec): desc='output file from 3dBandpass', argstr='-prefix %s', position=1, - name_source='in_file', - genfile=True) + name_source='in_file') lowpass = traits.Float( desc='lowpass', argstr='%f', position=-2, mandatory=True) highpass = traits.Float( @@ -3087,43 +3086,42 @@ class QwarpInputSpec(AFNICommandInputSpec): copyfile=False) out_file = File( argstr='-prefix %s', - name_template='%s_QW', + name_template='ppp_%s', name_source=['in_file'], - genfile=True, - desc='out_file ppp' - 'Sets the prefix for the output datasets.' - '* The source dataset is warped to match the base' - 'and gets prefix \'ppp\'. (Except if \'-plusminus\' is used.)' - '* The final interpolation to this output dataset is' - 'done using the \'wsinc5\' method. See the output of' - ' 3dAllineate -HELP' - '(in the "Modifying \'-final wsinc5\'" section) for' - 'the lengthy technical details.' - '* The 3D warp used is saved in a dataset with' - 'prefix \'ppp_WARP\' -- this dataset can be used' - 'with 3dNwarpApply and 3dNwarpCat, for example.' - '* To be clear, this is the warp from source dataset' - ' coordinates to base dataset coordinates, where the' - ' values at each base grid point are the xyz displacments' - ' needed to move that grid point\'s xyz values to the' - ' corresponding xyz values in the source dataset:' - ' base( (x,y,z) + WARP(x,y,z) ) matches source(x,y,z)' - ' Another way to think of this warp is that it \'pulls\'' - ' values back from source space to base space.' - '* 3dNwarpApply would use \'ppp_WARP\' to transform datasets' - 'aligned with the source dataset to be aligned with the' - 'base dataset.' - '** If you do NOT want this warp saved, use the option \'-nowarp\'.' - '-->> (However, this warp is usually the most valuable possible output!)' - '* If you want to calculate and save the inverse 3D warp,' - 'use the option \'-iwarp\'. This inverse warp will then be' - 'saved in a dataset with prefix \'ppp_WARPINV\'.' - '* This inverse warp could be used to transform data from base' - 'space to source space, if you need to do such an operation.' - '* You can easily compute the inverse later, say by a command like' - ' 3dNwarpCat -prefix Z_WARPINV \'INV(Z_WARP+tlrc)\'' - 'or the inverse can be computed as needed in 3dNwarpApply, like' - ' 3dNwarpApply -nwarp \'INV(Z_WARP+tlrc)\' -source Dataset.nii ...') + desc="""\ +Sets the prefix/suffix for the output datasets. \ +* The source dataset is warped to match the base \ +and gets prefix \'ppp\'. (Except if \'-plusminus\' is used.) \ +* The final interpolation to this output dataset is \ +done using the \'wsinc5\' method. See the output of \ + 3dAllineate -HELP \ +(in the "Modifying \'-final wsinc5\'" section) for \ +the lengthy technical details. \ +* The 3D warp used is saved in a dataset with \ +prefix \'ppp_WARP\' -- this dataset can be used \ +with 3dNwarpApply and 3dNwarpCat, for example. \ +* To be clear, this is the warp from source dataset \ + coordinates to base dataset coordinates, where the \ + values at each base grid point are the xyz displacments \ + needed to move that grid point\'s xyz values to the \ + corresponding xyz values in the source dataset: \ + base( (x,y,z) + WARP(x,y,z) ) matches source(x,y,z) \ + Another way to think of this warp is that it \'pulls\' \ + values back from source space to base space. \ +* 3dNwarpApply would use \'ppp_WARP\' to transform datasets \ +aligned with the source dataset to be aligned with the \ +base dataset. \ +** If you do NOT want this warp saved, use the option \'-nowarp\'. \ +-->> (However, this warp is usually the most valuable possible output!) \ +* If you want to calculate and save the inverse 3D warp, \ +use the option \'-iwarp\'. This inverse warp will then be \ +saved in a dataset with prefix \'ppp_WARPINV\'. \ +* This inverse warp could be used to transform data from base \ +space to source space, if you need to do such an operation. \ +* You can easily compute the inverse later, say by a command like \ + 3dNwarpCat -prefix Z_WARPINV \'INV(Z_WARP+tlrc)\' \ +or the inverse can be computed as needed in 3dNwarpApply, like \ + 3dNwarpApply -nwarp \'INV(Z_WARP+tlrc)\' -source Dataset.nii ...""") resample = traits.Bool( desc='This option simply resamples the source dataset to match the' 'base dataset grid. You can use this if the two datasets' @@ -3619,7 +3617,8 @@ class Qwarp(AFNICommand): >>> qwarp.inputs.base_file = 'sub-01_dir-RL_epi.nii.gz' >>> qwarp.inputs.plusminus = True >>> qwarp.cmdline - '3dQwarp -base sub-01_dir-RL_epi.nii.gz -source sub-01_dir-LR_epi.nii.gz -nopadWARP -prefix sub-01_dir-LR_epi_QW -plusminus' + '3dQwarp -base sub-01_dir-RL_epi.nii.gz -source sub-01_dir-LR_epi.nii.gz -nopadWARP \ +-prefix ppp_sub-01_dir-LR_epi -plusminus' >>> res = qwarp.run() # doctest: +SKIP >>> from nipype.interfaces import afni @@ -3628,7 +3627,7 @@ class Qwarp(AFNICommand): >>> qwarp.inputs.base_file = 'mni.nii' >>> qwarp.inputs.resample = True >>> qwarp.cmdline - '3dQwarp -base mni.nii -source structural.nii -prefix structural_QW -resample' + '3dQwarp -base mni.nii -source structural.nii -prefix ppp_structural -resample' >>> res = qwarp.run() # doctest: +SKIP >>> from nipype.interfaces import afni @@ -3642,7 +3641,8 @@ class Qwarp(AFNICommand): >>> qwarp.inputs.iwarp = True >>> qwarp.inputs.blur = [0,3] >>> qwarp.cmdline - '3dQwarp -base epi.nii -blur 0.0 3.0 -source structural.nii -iwarp -prefix anatSSQ.nii.gz -resample -verb -lpc' + '3dQwarp -base epi.nii -blur 0.0 3.0 -source structural.nii -iwarp -prefix anatSSQ.nii.gz \ +-resample -verb -lpc' >>> res = qwarp.run() # doctest: +SKIP >>> from nipype.interfaces import afni @@ -3652,7 +3652,7 @@ class Qwarp(AFNICommand): >>> qwarp.inputs.duplo = True >>> qwarp.inputs.blur = [0,3] >>> qwarp.cmdline - '3dQwarp -base mni.nii -blur 0.0 3.0 -duplo -source structural.nii -prefix structural_QW' + '3dQwarp -base mni.nii -blur 0.0 3.0 -duplo -source structural.nii -prefix ppp_structural' >>> res = qwarp.run() # doctest: +SKIP >>> from nipype.interfaces import afni @@ -3674,7 +3674,8 @@ class Qwarp(AFNICommand): >>> qwarp2.inputs.inilev = 7 >>> qwarp2.inputs.iniwarp = ['Q25_warp+tlrc.HEAD'] >>> qwarp2.cmdline - '3dQwarp -base mni.nii -blur 0.0 2.0 -source structural.nii -inilev 7 -iniwarp Q25_warp+tlrc.HEAD -prefix Q11' + '3dQwarp -base mni.nii -blur 0.0 2.0 -source structural.nii -inilev 7 -iniwarp Q25_\ +warp+tlrc.HEAD -prefix Q11' >>> res2 = qwarp2.run() # doctest: +SKIP >>> res2 = qwarp2.run() # doctest: +SKIP >>> qwarp3 = afni.Qwarp() @@ -3683,7 +3684,8 @@ class Qwarp(AFNICommand): >>> qwarp3.inputs.allineate = True >>> qwarp3.inputs.allineate_opts = '-cose lpa -verb' >>> qwarp3.cmdline - "3dQwarp -allineate -allineate_opts '-cose lpa -verb' -base mni.nii -source structural.nii -prefix structural_QW" + "3dQwarp -allineate -allineate_opts '-cose lpa -verb' -base mni.nii -source structural.nii \ +-prefix ppp_structural" >>> res3 = qwarp3.run() # doctest: +SKIP """ _cmd = '3dQwarp' input_spec = QwarpInputSpec @@ -3759,8 +3761,8 @@ class QwarpPlusMinusInputSpec(QwarpInputSpec): new_name='in_file', copyfile=False) out_file = File( + 'Qwarp.nii.gz', argstr='-prefix %s', - value='Qwarp.nii.gz', position=0, usedefault=True, desc="Output file") From 406ab585130e672479aaf2348e345e881f1f278d Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 17 Jul 2019 11:25:09 -0700 Subject: [PATCH 0256/1665] fix: make specs (updated extensions metadata for Files) --- nipype/algorithms/tests/test_auto_ACompCor.py | 11 +- .../tests/test_auto_ActivationCount.py | 6 +- .../tests/test_auto_AddCSVColumn.py | 12 +- .../algorithms/tests/test_auto_AddCSVRow.py | 2 +- nipype/algorithms/tests/test_auto_AddNoise.py | 11 +- .../tests/test_auto_ArtifactDetect.py | 2 +- .../test_auto_CalculateNormalizedMoments.py | 5 +- .../tests/test_auto_ComputeDVARS.py | 24 ++-- .../tests/test_auto_ComputeMeshWarp.py | 24 +++- .../algorithms/tests/test_auto_CreateNifti.py | 12 +- nipype/algorithms/tests/test_auto_Distance.py | 14 ++- .../tests/test_auto_FramewiseDisplacement.py | 19 +++- .../tests/test_auto_FuzzyOverlap.py | 7 +- nipype/algorithms/tests/test_auto_Gunzip.py | 8 +- nipype/algorithms/tests/test_auto_ICC.py | 11 +- .../algorithms/tests/test_auto_Matlab2CSV.py | 5 +- .../tests/test_auto_MergeCSVFiles.py | 7 +- .../algorithms/tests/test_auto_MergeROIs.py | 4 +- .../tests/test_auto_MeshWarpMaths.py | 19 +++- .../tests/test_auto_NonSteadyStateDetector.py | 6 +- .../test_auto_NormalizeProbabilityMapSet.py | 2 +- .../algorithms/tests/test_auto_P2PDistance.py | 24 +++- .../algorithms/tests/test_auto_PickAtlas.py | 9 +- .../algorithms/tests/test_auto_Similarity.py | 14 ++- .../tests/test_auto_SpecifySparseModel.py | 4 +- .../algorithms/tests/test_auto_SplitROIs.py | 7 +- .../tests/test_auto_StimulusCorrelation.py | 5 +- nipype/algorithms/tests/test_auto_TCompCor.py | 11 +- .../algorithms/tests/test_auto_WarpPoints.py | 13 ++- .../afni/tests/test_auto_ABoverlap.py | 5 +- .../afni/tests/test_auto_AFNICommand.py | 1 + .../afni/tests/test_auto_AFNIPythonCommand.py | 1 + .../afni/tests/test_auto_AFNItoNIFTI.py | 4 +- .../afni/tests/test_auto_AlignEpiAnatPy.py | 22 ++-- .../afni/tests/test_auto_Allineate.py | 33 ++++-- .../afni/tests/test_auto_AutoTLRC.py | 3 +- .../afni/tests/test_auto_AutoTcorrelate.py | 10 +- .../afni/tests/test_auto_Autobox.py | 4 +- .../afni/tests/test_auto_Automask.py | 7 +- .../afni/tests/test_auto_Axialize.py | 4 +- .../afni/tests/test_auto_Bandpass.py | 11 +- .../afni/tests/test_auto_BlurInMask.py | 14 ++- .../afni/tests/test_auto_BlurToFWHM.py | 14 ++- .../afni/tests/test_auto_BrickStat.py | 2 + .../interfaces/afni/tests/test_auto_Bucket.py | 3 +- .../interfaces/afni/tests/test_auto_Calc.py | 11 +- nipype/interfaces/afni/tests/test_auto_Cat.py | 3 +- .../afni/tests/test_auto_CatMatvec.py | 3 +- .../afni/tests/test_auto_CenterMass.py | 11 +- .../afni/tests/test_auto_ClipLevel.py | 1 + .../afni/tests/test_auto_ConvertDset.py | 4 +- .../interfaces/afni/tests/test_auto_Copy.py | 4 +- .../afni/tests/test_auto_Deconvolve.py | 38 +++++-- .../afni/tests/test_auto_DegreeCentrality.py | 11 +- .../afni/tests/test_auto_Despike.py | 4 +- .../afni/tests/test_auto_Detrend.py | 4 +- nipype/interfaces/afni/tests/test_auto_Dot.py | 8 +- nipype/interfaces/afni/tests/test_auto_ECM.py | 9 +- .../interfaces/afni/tests/test_auto_Edge3.py | 4 +- .../interfaces/afni/tests/test_auto_Eval.py | 11 +- .../interfaces/afni/tests/test_auto_FWHMx.py | 17 ++- nipype/interfaces/afni/tests/test_auto_Fim.py | 5 +- .../afni/tests/test_auto_Fourier.py | 4 +- .../interfaces/afni/tests/test_auto_GCOR.py | 2 + .../interfaces/afni/tests/test_auto_Hist.py | 12 +- .../interfaces/afni/tests/test_auto_LFCD.py | 9 +- .../afni/tests/test_auto_LocalBistat.py | 4 +- .../afni/tests/test_auto_Localstat.py | 3 +- .../afni/tests/test_auto_MaskTool.py | 3 +- .../afni/tests/test_auto_Maskave.py | 5 +- .../interfaces/afni/tests/test_auto_Means.py | 5 +- .../interfaces/afni/tests/test_auto_Merge.py | 3 +- .../interfaces/afni/tests/test_auto_Notes.py | 8 +- .../afni/tests/test_auto_NwarpAdjust.py | 3 +- .../afni/tests/test_auto_NwarpApply.py | 3 +- .../afni/tests/test_auto_NwarpCat.py | 3 +- .../afni/tests/test_auto_OneDToolPy.py | 4 +- .../afni/tests/test_auto_OutlierCount.py | 8 +- .../afni/tests/test_auto_QualityIndex.py | 5 +- .../interfaces/afni/tests/test_auto_Qwarp.py | 23 ++-- .../afni/tests/test_auto_QwarpPlusMinus.py | 21 +++- .../afni/tests/test_auto_ROIStats.py | 10 +- .../interfaces/afni/tests/test_auto_ReHo.py | 10 +- .../interfaces/afni/tests/test_auto_Refit.py | 8 +- .../afni/tests/test_auto_Remlfit.py | 103 +++++++++++++----- .../afni/tests/test_auto_Resample.py | 4 +- .../afni/tests/test_auto_Retroicor.py | 8 +- .../afni/tests/test_auto_SVMTest.py | 9 +- .../afni/tests/test_auto_SVMTrain.py | 21 +++- nipype/interfaces/afni/tests/test_auto_Seg.py | 3 +- .../afni/tests/test_auto_SkullStrip.py | 4 +- .../afni/tests/test_auto_Synthesize.py | 5 +- .../interfaces/afni/tests/test_auto_TCat.py | 3 +- .../afni/tests/test_auto_TCatSubBrick.py | 3 +- .../afni/tests/test_auto_TCorr1D.py | 5 +- .../afni/tests/test_auto_TCorrMap.py | 47 +++++--- .../afni/tests/test_auto_TCorrelate.py | 5 +- .../interfaces/afni/tests/test_auto_TNorm.py | 4 +- .../afni/tests/test_auto_TProject.py | 24 +++- .../interfaces/afni/tests/test_auto_TShift.py | 6 +- .../interfaces/afni/tests/test_auto_TStat.py | 9 +- .../interfaces/afni/tests/test_auto_To3D.py | 3 +- .../interfaces/afni/tests/test_auto_Undump.py | 9 +- .../afni/tests/test_auto_Unifize.py | 11 +- .../interfaces/afni/tests/test_auto_Volreg.py | 14 ++- .../interfaces/afni/tests/test_auto_Warp.py | 21 +++- .../interfaces/afni/tests/test_auto_ZCutUp.py | 4 +- .../interfaces/afni/tests/test_auto_Zcat.py | 3 +- .../afni/tests/test_auto_Zeropad.py | 4 +- .../interfaces/ants/tests/test_auto_ANTS.py | 10 +- .../ants/tests/test_auto_AffineInitializer.py | 5 +- .../ants/tests/test_auto_AntsJointFusion.py | 8 +- .../ants/tests/test_auto_ApplyTransforms.py | 4 +- .../test_auto_ApplyTransformsToPoints.py | 3 +- .../ants/tests/test_auto_Atropos.py | 4 +- .../tests/test_auto_AverageAffineTransform.py | 3 +- .../ants/tests/test_auto_AverageImages.py | 3 +- .../ants/tests/test_auto_BrainExtraction.py | 44 ++++---- .../tests/test_auto_ComposeMultiTransform.py | 4 +- .../tests/test_auto_CompositeTransformUtil.py | 7 +- .../test_auto_ConvertScalarImageToRGB.py | 4 +- .../ants/tests/test_auto_CorticalThickness.py | 35 +++--- ...est_auto_CreateJacobianDeterminantImage.py | 4 +- .../ants/tests/test_auto_CreateTiledMosaic.py | 9 +- .../ants/tests/test_auto_DenoiseImage.py | 7 +- .../ants/tests/test_auto_GenWarpFields.py | 12 +- .../ants/tests/test_auto_JointFusion.py | 8 +- .../ants/tests/test_auto_KellyKapowski.py | 20 +++- .../ants/tests/test_auto_LabelGeometry.py | 4 +- .../tests/test_auto_LaplacianThickness.py | 5 +- .../tests/test_auto_MeasureImageSimilarity.py | 20 +++- .../ants/tests/test_auto_MultiplyImages.py | 4 +- .../tests/test_auto_N4BiasFieldCorrection.py | 20 +++- .../ants/tests/test_auto_Registration.py | 22 ++-- .../tests/test_auto_RegistrationSynQuick.py | 10 +- .../test_auto_WarpImageMultiTransform.py | 6 +- ..._auto_WarpTimeSeriesImageMultiTransform.py | 4 +- .../ants/tests/test_auto_antsIntroduction.py | 12 +- .../tests/test_auto_buildtemplateparallel.py | 2 +- .../brainsuite/tests/test_auto_BDP.py | 52 +++++++-- .../brainsuite/tests/test_auto_Bfc.py | 18 ++- .../brainsuite/tests/test_auto_Bse.py | 19 +++- .../brainsuite/tests/test_auto_Cerebro.py | 20 +++- .../brainsuite/tests/test_auto_Cortex.py | 5 +- .../brainsuite/tests/test_auto_Dewisp.py | 4 +- .../brainsuite/tests/test_auto_Dfs.py | 9 +- .../brainsuite/tests/test_auto_Hemisplit.py | 19 +++- .../brainsuite/tests/test_auto_Pialmesh.py | 6 +- .../brainsuite/tests/test_auto_Pvc.py | 12 +- .../brainsuite/tests/test_auto_Scrubmask.py | 4 +- .../brainsuite/tests/test_auto_Skullfinder.py | 5 +- .../brainsuite/tests/test_auto_Tca.py | 4 +- .../camino/tests/test_auto_AnalyzeHeader.py | 10 +- .../tests/test_auto_ComputeEigensystem.py | 3 +- .../test_auto_ComputeFractionalAnisotropy.py | 4 +- .../tests/test_auto_ComputeMeanDiffusivity.py | 5 +- .../tests/test_auto_ComputeTensorTrace.py | 4 +- .../camino/tests/test_auto_Conmat.py | 13 ++- .../camino/tests/test_auto_DT2NIfTI.py | 9 +- .../camino/tests/test_auto_DTIFit.py | 9 +- .../camino/tests/test_auto_DTLUTGen.py | 3 +- .../camino/tests/test_auto_DTMetric.py | 9 +- .../camino/tests/test_auto_FSL2Scheme.py | 4 +- .../camino/tests/test_auto_Image2Voxel.py | 3 +- .../camino/tests/test_auto_ImageStats.py | 3 +- .../camino/tests/test_auto_LinRecon.py | 10 +- .../interfaces/camino/tests/test_auto_MESD.py | 9 +- .../camino/tests/test_auto_ModelFit.py | 29 ++++- .../camino/tests/test_auto_NIfTIDT2Camino.py | 18 ++- .../camino/tests/test_auto_PicoPDFs.py | 3 +- .../camino/tests/test_auto_ProcStreamlines.py | 33 ++++-- .../camino/tests/test_auto_QBallMX.py | 3 +- .../camino/tests/test_auto_SFLUTGen.py | 6 +- .../camino/tests/test_auto_SFPICOCalibData.py | 6 +- .../camino/tests/test_auto_SFPeaks.py | 8 +- .../camino/tests/test_auto_Shredder.py | 3 +- .../camino/tests/test_auto_Track.py | 11 +- .../camino/tests/test_auto_TrackBallStick.py | 11 +- .../camino/tests/test_auto_TrackBayesDirac.py | 17 ++- .../tests/test_auto_TrackBedpostxDeter.py | 11 +- .../tests/test_auto_TrackBedpostxProba.py | 11 +- .../camino/tests/test_auto_TrackBootstrap.py | 17 ++- .../camino/tests/test_auto_TrackDT.py | 11 +- .../camino/tests/test_auto_TrackPICo.py | 11 +- .../camino/tests/test_auto_TractShredder.py | 3 +- .../camino/tests/test_auto_VtkStreamlines.py | 6 +- .../tests/test_auto_Camino2Trackvis.py | 6 +- .../tests/test_auto_Trackvis2Camino.py | 5 +- .../cmtk/tests/test_auto_AverageNetworks.py | 10 +- .../cmtk/tests/test_auto_CFFConverter.py | 7 +- .../cmtk/tests/test_auto_CreateMatrix.py | 80 +++++++++----- .../cmtk/tests/test_auto_CreateNodes.py | 17 ++- .../cmtk/tests/test_auto_MergeCNetworks.py | 7 +- .../tests/test_auto_NetworkBasedStatistic.py | 10 +- .../cmtk/tests/test_auto_NetworkXMetrics.py | 54 ++++++--- .../cmtk/tests/test_auto_Parcellate.py | 21 ++-- .../interfaces/cmtk/tests/test_auto_ROIGen.py | 24 +++- .../tests/test_auto_DTIRecon.py | 31 +++--- .../tests/test_auto_DTITracker.py | 14 ++- .../tests/test_auto_HARDIMat.py | 24 +++- .../tests/test_auto_ODFRecon.py | 12 +- .../tests/test_auto_ODFTracker.py | 15 ++- .../tests/test_auto_SplineFilter.py | 4 +- .../tests/test_auto_TrackMerge.py | 3 +- .../dipy/tests/test_auto_APMQball.py | 19 +++- nipype/interfaces/dipy/tests/test_auto_CSD.py | 25 +++-- nipype/interfaces/dipy/tests/test_auto_DTI.py | 29 +++-- .../dipy/tests/test_auto_Denoise.py | 13 ++- .../tests/test_auto_DipyDiffusionInterface.py | 15 ++- .../tests/test_auto_EstimateResponseSH.py | 36 ++++-- .../dipy/tests/test_auto_RESTORE.py | 33 ++++-- .../dipy/tests/test_auto_Resample.py | 7 +- .../tests/test_auto_SimulateMultiTensor.py | 41 ++++--- .../tests/test_auto_StreamlineTractography.py | 23 ++-- .../dipy/tests/test_auto_TensorMode.py | 19 +++- .../dipy/tests/test_auto_TrackDensityMap.py | 14 ++- .../dtitk/tests/test_auto_AffScalarVol.py | 6 +- .../tests/test_auto_AffSymTensor3DVol.py | 6 +- .../dtitk/tests/test_auto_Affine.py | 7 +- .../dtitk/tests/test_auto_AffineTask.py | 7 +- .../dtitk/tests/test_auto_BinThresh.py | 4 +- .../dtitk/tests/test_auto_BinThreshTask.py | 4 +- .../dtitk/tests/test_auto_ComposeXfm.py | 5 +- .../dtitk/tests/test_auto_ComposeXfmTask.py | 5 +- .../dtitk/tests/test_auto_Diffeo.py | 7 +- .../dtitk/tests/test_auto_DiffeoScalarVol.py | 6 +- .../tests/test_auto_DiffeoSymTensor3DVol.py | 6 +- .../dtitk/tests/test_auto_DiffeoTask.py | 7 +- .../interfaces/dtitk/tests/test_auto_Rigid.py | 7 +- .../dtitk/tests/test_auto_RigidTask.py | 7 +- .../dtitk/tests/test_auto_SVAdjustVoxSp.py | 5 +- .../tests/test_auto_SVAdjustVoxSpTask.py | 5 +- .../dtitk/tests/test_auto_SVResample.py | 5 +- .../dtitk/tests/test_auto_SVResampleTask.py | 5 +- .../tests/test_auto_TVAdjustOriginTask.py | 5 +- .../dtitk/tests/test_auto_TVAdjustVoxSp.py | 5 +- .../tests/test_auto_TVAdjustVoxSpTask.py | 5 +- .../dtitk/tests/test_auto_TVResample.py | 5 +- .../dtitk/tests/test_auto_TVResampleTask.py | 5 +- .../dtitk/tests/test_auto_TVtool.py | 4 +- .../dtitk/tests/test_auto_TVtoolTask.py | 4 +- .../dtitk/tests/test_auto_affScalarVolTask.py | 6 +- .../tests/test_auto_affSymTensor3DVolTask.py | 6 +- .../tests/test_auto_diffeoScalarVolTask.py | 6 +- .../test_auto_diffeoSymTensor3DVolTask.py | 6 +- .../elastix/tests/test_auto_AnalyzeWarp.py | 12 +- .../elastix/tests/test_auto_ApplyWarp.py | 4 +- .../elastix/tests/test_auto_EditTransform.py | 11 +- .../elastix/tests/test_auto_PointsWarp.py | 4 +- .../elastix/tests/test_auto_Registration.py | 19 +++- .../tests/test_auto_AddXFormToHeader.py | 5 +- .../freesurfer/tests/test_auto_Aparc2Aseg.py | 64 ++++++++--- .../freesurfer/tests/test_auto_Apas2Aseg.py | 8 +- .../freesurfer/tests/test_auto_ApplyMask.py | 20 +++- .../tests/test_auto_ApplyVolTransform.py | 15 ++- .../freesurfer/tests/test_auto_Binarize.py | 16 ++- .../freesurfer/tests/test_auto_CALabel.py | 16 ++- .../freesurfer/tests/test_auto_CANormalize.py | 21 +++- .../freesurfer/tests/test_auto_CARegister.py | 13 ++- .../test_auto_CheckTalairachAlignment.py | 1 + .../freesurfer/tests/test_auto_Concatenate.py | 13 ++- .../tests/test_auto_ConcatenateLTA.py | 4 +- .../freesurfer/tests/test_auto_Contrast.py | 26 +++-- .../freesurfer/tests/test_auto_Curvature.py | 5 +- .../tests/test_auto_CurvatureStats.py | 10 +- .../tests/test_auto_DICOMConvert.py | 2 +- .../freesurfer/tests/test_auto_EMRegister.py | 15 ++- .../tests/test_auto_EditWMwithAseg.py | 6 +- .../freesurfer/tests/test_auto_EulerNumber.py | 3 +- .../tests/test_auto_ExtractMainComponent.py | 4 +- .../freesurfer/tests/test_auto_FitMSParams.py | 6 +- .../freesurfer/tests/test_auto_FixTopology.py | 22 +++- .../tests/test_auto_FuseSegmentations.py | 3 +- .../freesurfer/tests/test_auto_GLMFit.py | 49 ++++++--- .../freesurfer/tests/test_auto_ImageInfo.py | 3 +- .../freesurfer/tests/test_auto_Jacobian.py | 5 +- .../freesurfer/tests/test_auto_LTAConvert.py | 25 +++-- .../freesurfer/tests/test_auto_Label2Annot.py | 12 +- .../freesurfer/tests/test_auto_Label2Label.py | 24 +++- .../freesurfer/tests/test_auto_Label2Vol.py | 18 ++- .../tests/test_auto_MNIBiasCorrection.py | 14 ++- .../freesurfer/tests/test_auto_MPRtoMNI305.py | 8 +- .../freesurfer/tests/test_auto_MRIConvert.py | 42 +++++-- .../freesurfer/tests/test_auto_MRICoreg.py | 8 +- .../freesurfer/tests/test_auto_MRIFill.py | 21 +++- .../tests/test_auto_MRIMarchingCubes.py | 4 +- .../freesurfer/tests/test_auto_MRIPretess.py | 5 +- .../freesurfer/tests/test_auto_MRISPreproc.py | 5 +- .../tests/test_auto_MRISPreprocReconAll.py | 16 ++- .../tests/test_auto_MRITessellate.py | 4 +- .../freesurfer/tests/test_auto_MRIsCALabel.py | 20 +++- .../freesurfer/tests/test_auto_MRIsCalc.py | 5 +- .../freesurfer/tests/test_auto_MRIsCombine.py | 3 +- .../freesurfer/tests/test_auto_MRIsConvert.py | 34 ++++-- .../freesurfer/tests/test_auto_MRIsExpand.py | 3 +- .../freesurfer/tests/test_auto_MRIsInflate.py | 11 +- .../freesurfer/tests/test_auto_MS_LDA.py | 4 +- .../tests/test_auto_MakeAverageSubject.py | 1 + .../tests/test_auto_MakeSurfaces.py | 46 +++++--- .../freesurfer/tests/test_auto_Normalize.py | 14 ++- .../tests/test_auto_OneSampleTTest.py | 49 ++++++--- .../freesurfer/tests/test_auto_Paint.py | 5 +- .../tests/test_auto_ParcellationStats.py | 56 +++++++--- .../tests/test_auto_ParseDICOMDir.py | 3 +- .../freesurfer/tests/test_auto_ReconAll.py | 62 +++++++++-- .../freesurfer/tests/test_auto_Register.py | 11 +- .../tests/test_auto_RegisterAVItoTalairach.py | 9 +- .../tests/test_auto_RelabelHypointensities.py | 10 +- .../tests/test_auto_RemoveIntersection.py | 4 +- .../freesurfer/tests/test_auto_RemoveNeck.py | 6 +- .../freesurfer/tests/test_auto_Resample.py | 4 +- .../tests/test_auto_RobustRegister.py | 33 ++++-- .../tests/test_auto_RobustTemplate.py | 3 +- .../tests/test_auto_SampleToSurface.py | 15 ++- .../freesurfer/tests/test_auto_SegStats.py | 37 +++++-- .../tests/test_auto_SegStatsReconAll.py | 76 ++++++++++--- .../freesurfer/tests/test_auto_SegmentCC.py | 12 +- .../freesurfer/tests/test_auto_SegmentWM.py | 4 +- .../freesurfer/tests/test_auto_Smooth.py | 5 +- .../tests/test_auto_SmoothTessellation.py | 14 ++- .../freesurfer/tests/test_auto_Sphere.py | 9 +- .../tests/test_auto_SphericalAverage.py | 9 +- .../tests/test_auto_Surface2VolTransform.py | 13 ++- .../tests/test_auto_SurfaceSmooth.py | 4 +- .../tests/test_auto_SurfaceSnapshots.py | 14 ++- .../tests/test_auto_SurfaceTransform.py | 5 +- .../tests/test_auto_SynthesizeFLASH.py | 5 +- .../tests/test_auto_TalairachAVI.py | 2 + .../freesurfer/tests/test_auto_TalairachQC.py | 7 +- .../freesurfer/tests/test_auto_Tkregister2.py | 24 +++- .../tests/test_auto_UnpackSDICOMDir.py | 12 +- .../freesurfer/tests/test_auto_VolumeMask.py | 32 ++++-- .../tests/test_auto_WatershedSkullStrip.py | 6 +- .../fsl/tests/test_auto_AR1Image.py | 4 +- .../fsl/tests/test_auto_AccuracyTester.py | 1 + .../fsl/tests/test_auto_ApplyMask.py | 5 +- .../fsl/tests/test_auto_ApplyTOPUP.py | 6 +- .../fsl/tests/test_auto_ApplyWarp.py | 25 ++++- .../fsl/tests/test_auto_ApplyXFM.py | 36 ++++-- .../interfaces/fsl/tests/test_auto_AvScale.py | 2 + .../interfaces/fsl/tests/test_auto_B0Calc.py | 4 +- .../fsl/tests/test_auto_BEDPOSTX5.py | 26 +++-- nipype/interfaces/fsl/tests/test_auto_BET.py | 25 +++-- .../fsl/tests/test_auto_BinaryMaths.py | 5 +- .../fsl/tests/test_auto_ChangeDataType.py | 4 +- .../fsl/tests/test_auto_Classifier.py | 5 +- .../interfaces/fsl/tests/test_auto_Cleaner.py | 3 +- .../interfaces/fsl/tests/test_auto_Cluster.py | 32 ++++-- .../interfaces/fsl/tests/test_auto_Complex.py | 21 +++- .../fsl/tests/test_auto_ContrastMgr.py | 9 +- .../fsl/tests/test_auto_ConvertWarp.py | 34 ++++-- .../fsl/tests/test_auto_ConvertXFM.py | 5 +- .../fsl/tests/test_auto_CopyGeom.py | 4 +- .../interfaces/fsl/tests/test_auto_DTIFit.py | 38 ++++--- .../fsl/tests/test_auto_DilateImage.py | 5 +- .../fsl/tests/test_auto_DistanceMap.py | 11 +- .../fsl/tests/test_auto_DualRegression.py | 3 + .../fsl/tests/test_auto_EPIDeWarp.py | 20 +++- nipype/interfaces/fsl/tests/test_auto_Eddy.py | 40 +++++-- .../fsl/tests/test_auto_EddyCorrect.py | 4 +- .../fsl/tests/test_auto_EddyQuad.py | 29 +++-- .../interfaces/fsl/tests/test_auto_EpiReg.py | 56 ++++++---- .../fsl/tests/test_auto_ErodeImage.py | 5 +- .../fsl/tests/test_auto_ExtractROI.py | 4 +- nipype/interfaces/fsl/tests/test_auto_FAST.py | 21 +++- nipype/interfaces/fsl/tests/test_auto_FEAT.py | 1 + .../fsl/tests/test_auto_FEATModel.py | 11 +- .../fsl/tests/test_auto_FEATRegister.py | 7 +- .../interfaces/fsl/tests/test_auto_FIRST.py | 7 +- .../interfaces/fsl/tests/test_auto_FLAMEO.py | 20 +++- .../interfaces/fsl/tests/test_auto_FLIRT.py | 36 ++++-- .../interfaces/fsl/tests/test_auto_FNIRT.py | 36 ++++-- .../fsl/tests/test_auto_FSLXCommand.py | 10 +- .../interfaces/fsl/tests/test_auto_FUGUE.py | 45 ++++++-- .../fsl/tests/test_auto_FilterRegressor.py | 10 +- .../fsl/tests/test_auto_FindTheBiggest.py | 7 +- nipype/interfaces/fsl/tests/test_auto_GLM.py | 70 +++++++++--- .../fsl/tests/test_auto_ICA_AROMA.py | 9 +- .../fsl/tests/test_auto_ImageMaths.py | 10 +- .../fsl/tests/test_auto_ImageMeants.py | 9 +- .../fsl/tests/test_auto_ImageStats.py | 6 +- .../interfaces/fsl/tests/test_auto_InvWarp.py | 5 +- .../fsl/tests/test_auto_IsotropicSmooth.py | 4 +- .../interfaces/fsl/tests/test_auto_L2Model.py | 6 +- .../interfaces/fsl/tests/test_auto_MCFLIRT.py | 22 ++-- .../interfaces/fsl/tests/test_auto_MELODIC.py | 45 ++++++-- .../fsl/tests/test_auto_MakeDyadicVectors.py | 8 +- .../fsl/tests/test_auto_MathsCommand.py | 4 +- .../fsl/tests/test_auto_MaxImage.py | 4 +- .../fsl/tests/test_auto_MaxnImage.py | 4 +- .../fsl/tests/test_auto_MeanImage.py | 4 +- .../fsl/tests/test_auto_MedianImage.py | 4 +- .../interfaces/fsl/tests/test_auto_Merge.py | 3 +- .../fsl/tests/test_auto_MinImage.py | 4 +- .../fsl/tests/test_auto_MotionOutliers.py | 15 ++- .../fsl/tests/test_auto_MultiImageMaths.py | 4 +- .../tests/test_auto_MultipleRegressDesign.py | 8 +- .../interfaces/fsl/tests/test_auto_Overlay.py | 6 +- .../interfaces/fsl/tests/test_auto_PRELUDE.py | 14 ++- .../fsl/tests/test_auto_PercentileImage.py | 4 +- .../fsl/tests/test_auto_PlotMotionParams.py | 3 +- .../fsl/tests/test_auto_PlotTimeSeries.py | 8 +- .../fsl/tests/test_auto_PowerSpectrum.py | 4 +- .../fsl/tests/test_auto_PrepareFieldmap.py | 5 +- .../fsl/tests/test_auto_ProbTrackX.py | 45 ++++++-- .../fsl/tests/test_auto_ProbTrackX2.py | 75 +++++++++---- .../fsl/tests/test_auto_Randomise.py | 18 ++- .../fsl/tests/test_auto_Reorient2Std.py | 4 +- .../fsl/tests/test_auto_RobustFOV.py | 7 +- nipype/interfaces/fsl/tests/test_auto_SMM.py | 8 +- .../interfaces/fsl/tests/test_auto_SUSAN.py | 4 +- .../interfaces/fsl/tests/test_auto_SigLoss.py | 9 +- .../interfaces/fsl/tests/test_auto_Slice.py | 1 + .../fsl/tests/test_auto_SliceTimer.py | 14 ++- .../interfaces/fsl/tests/test_auto_Slicer.py | 6 +- .../interfaces/fsl/tests/test_auto_Smooth.py | 4 +- .../fsl/tests/test_auto_SmoothEstimate.py | 3 + .../fsl/tests/test_auto_SpatialFilter.py | 5 +- .../interfaces/fsl/tests/test_auto_Split.py | 1 + .../fsl/tests/test_auto_StdImage.py | 4 +- .../fsl/tests/test_auto_SwapDimensions.py | 4 +- .../interfaces/fsl/tests/test_auto_TOPUP.py | 18 ++- .../fsl/tests/test_auto_TemporalFilter.py | 4 +- .../fsl/tests/test_auto_Threshold.py | 4 +- .../fsl/tests/test_auto_TractSkeleton.py | 26 +++-- .../fsl/tests/test_auto_Training.py | 2 +- .../fsl/tests/test_auto_UnaryMaths.py | 4 +- .../interfaces/fsl/tests/test_auto_VecReg.py | 35 ++++-- .../fsl/tests/test_auto_WarpPoints.py | 8 +- .../fsl/tests/test_auto_WarpPointsFromStd.py | 7 +- .../fsl/tests/test_auto_WarpPointsToStd.py | 13 ++- .../fsl/tests/test_auto_WarpUtils.py | 12 +- .../fsl/tests/test_auto_XFibres5.py | 15 ++- .../minc/tests/test_auto_Average.py | 3 +- .../interfaces/minc/tests/test_auto_BBox.py | 4 +- .../interfaces/minc/tests/test_auto_Beast.py | 2 +- .../minc/tests/test_auto_BestLinReg.py | 8 +- .../minc/tests/test_auto_BigAverage.py | 6 +- .../interfaces/minc/tests/test_auto_Blob.py | 4 +- .../interfaces/minc/tests/test_auto_Blur.py | 14 ++- .../interfaces/minc/tests/test_auto_Calc.py | 3 +- .../minc/tests/test_auto_Convert.py | 4 +- .../interfaces/minc/tests/test_auto_Copy.py | 4 +- .../interfaces/minc/tests/test_auto_Dump.py | 4 +- .../minc/tests/test_auto_Extract.py | 4 +- .../minc/tests/test_auto_Gennlxfm.py | 10 +- .../interfaces/minc/tests/test_auto_Math.py | 3 +- .../interfaces/minc/tests/test_auto_NlpFit.py | 10 +- .../interfaces/minc/tests/test_auto_Norm.py | 6 +- nipype/interfaces/minc/tests/test_auto_Pik.py | 4 +- .../minc/tests/test_auto_Resample.py | 4 +- .../minc/tests/test_auto_Reshape.py | 3 +- .../interfaces/minc/tests/test_auto_ToEcat.py | 4 +- .../interfaces/minc/tests/test_auto_ToRaw.py | 4 +- .../minc/tests/test_auto_VolSymm.py | 12 +- .../minc/tests/test_auto_Volcentre.py | 4 +- .../interfaces/minc/tests/test_auto_Voliso.py | 4 +- .../interfaces/minc/tests/test_auto_Volpad.py | 4 +- .../interfaces/minc/tests/test_auto_XfmAvg.py | 5 +- .../minc/tests/test_auto_XfmConcat.py | 3 +- .../minc/tests/test_auto_XfmInvert.py | 5 +- .../test_auto_JistBrainMgdmSegmentation.py | 38 +++++-- ...est_auto_JistBrainMp2rageDuraEstimation.py | 12 +- ...est_auto_JistBrainMp2rageSkullStripping.py | 28 +++-- .../test_auto_JistBrainPartialVolumeFilter.py | 7 +- ...est_auto_JistCortexSurfaceMeshInflation.py | 9 +- .../test_auto_JistIntensityMp2rageMasking.py | 23 ++-- .../test_auto_JistLaminarProfileCalculator.py | 12 +- .../test_auto_JistLaminarProfileGeometry.py | 7 +- .../test_auto_JistLaminarProfileSampling.py | 19 +++- .../test_auto_JistLaminarROIAveraging.py | 17 ++- ...test_auto_JistLaminarVolumetricLayering.py | 16 ++- ...test_auto_MedicAlgorithmImageCalculator.py | 12 +- .../test_auto_MedicAlgorithmLesionToads.py | 48 +++++--- .../test_auto_MedicAlgorithmMipavReorient.py | 5 +- .../mipav/tests/test_auto_MedicAlgorithmN3.py | 9 +- .../test_auto_MedicAlgorithmSPECTRE2010.py | 28 +++-- .../mipav/tests/test_auto_RandomVol.py | 2 +- .../mne/tests/test_auto_WatershedBEM.py | 21 +++- ..._auto_ConstrainedSphericalDeconvolution.py | 9 +- .../test_auto_DWI2SphericalHarmonicsImage.py | 5 +- .../mrtrix/tests/test_auto_DWI2Tensor.py | 4 +- ...est_auto_DiffusionTensorStreamlineTrack.py | 9 +- .../tests/test_auto_Directions2Amplitude.py | 9 +- .../mrtrix/tests/test_auto_Erode.py | 4 +- .../tests/test_auto_EstimateResponseForSH.py | 6 +- .../mrtrix/tests/test_auto_FSL2MRTrix.py | 17 ++- .../mrtrix/tests/test_auto_FilterTracks.py | 6 +- .../mrtrix/tests/test_auto_FindShPeaks.py | 10 +- .../tests/test_auto_GenerateDirections.py | 3 +- .../test_auto_GenerateWhiteMatterMask.py | 6 +- .../mrtrix/tests/test_auto_MRConvert.py | 4 +- .../mrtrix/tests/test_auto_MRMultiply.py | 3 +- .../mrtrix/tests/test_auto_MRTransform.py | 6 +- .../mrtrix/tests/test_auto_MRTrix2TrackVis.py | 14 ++- .../mrtrix/tests/test_auto_MRTrixInfo.py | 1 + .../mrtrix/tests/test_auto_MedianFilter3D.py | 4 +- ...cSphericallyDeconvolutedStreamlineTrack.py | 8 +- ..._SphericallyDeconvolutedStreamlineTrack.py | 8 +- .../mrtrix/tests/test_auto_StreamlineTrack.py | 8 +- .../test_auto_Tensor2ApparentDiffusion.py | 4 +- .../test_auto_Tensor2FractionalAnisotropy.py | 4 +- .../mrtrix/tests/test_auto_Tensor2Vector.py | 4 +- .../mrtrix/tests/test_auto_Threshold.py | 4 +- .../mrtrix/tests/test_auto_Tracks2Prob.py | 5 +- .../mrtrix3/tests/test_auto_ACTPrepareFSL.py | 4 +- .../mrtrix3/tests/test_auto_BrainMask.py | 16 ++- .../tests/test_auto_BuildConnectome.py | 15 ++- .../mrtrix3/tests/test_auto_ComputeTDI.py | 24 +++- .../mrtrix3/tests/test_auto_DWIBiasCorrect.py | 30 +++-- .../mrtrix3/tests/test_auto_DWIDenoise.py | 24 +++- .../mrtrix3/tests/test_auto_DWIExtract.py | 16 ++- .../mrtrix3/tests/test_auto_EstimateFOD.py | 44 ++++++-- .../mrtrix3/tests/test_auto_FitTensor.py | 21 +++- .../mrtrix3/tests/test_auto_Generate5tt.py | 16 ++- .../mrtrix3/tests/test_auto_LabelConfig.py | 30 ++++- .../mrtrix3/tests/test_auto_LabelConvert.py | 11 +- .../mrtrix3/tests/test_auto_MRConvert.py | 16 ++- .../mrtrix3/tests/test_auto_MRDeGibbs.py | 16 ++- .../mrtrix3/tests/test_auto_MRMath.py | 16 ++- .../mrtrix3/tests/test_auto_Mesh2PVE.py | 10 +- .../tests/test_auto_ReplaceFSwithFIRST.py | 6 +- .../mrtrix3/tests/test_auto_ResponseSD.py | 37 +++++-- .../mrtrix3/tests/test_auto_TCK2VTK.py | 14 ++- .../mrtrix3/tests/test_auto_TensorMetrics.py | 34 ++++-- .../mrtrix3/tests/test_auto_Tractography.py | 40 +++++-- .../niftyfit/tests/test_auto_FitQt1.py | 24 ++-- .../niftyreg/tests/test_auto_RegAladin.py | 23 +++- .../niftyreg/tests/test_auto_RegAverage.py | 7 +- .../niftyreg/tests/test_auto_RegF3D.py | 32 ++++-- .../niftyreg/tests/test_auto_RegJacobian.py | 9 +- .../niftyreg/tests/test_auto_RegMeasure.py | 5 +- .../niftyreg/tests/test_auto_RegResample.py | 10 +- .../niftyreg/tests/test_auto_RegTools.py | 14 ++- .../niftyreg/tests/test_auto_RegTransform.py | 15 ++- .../niftyseg/tests/test_auto_BinaryMaths.py | 5 +- .../tests/test_auto_BinaryMathsInteger.py | 4 +- .../niftyseg/tests/test_auto_BinaryStats.py | 3 + .../niftyseg/tests/test_auto_CalcTopNCC.py | 6 +- .../interfaces/niftyseg/tests/test_auto_EM.py | 16 ++- .../niftyseg/tests/test_auto_FillLesions.py | 10 +- .../niftyseg/tests/test_auto_LabelFusion.py | 16 ++- .../niftyseg/tests/test_auto_MathsCommand.py | 4 +- .../niftyseg/tests/test_auto_Merge.py | 4 +- .../niftyseg/tests/test_auto_PatchMatch.py | 6 +- .../niftyseg/tests/test_auto_StatsCommand.py | 2 + .../niftyseg/tests/test_auto_TupleMaths.py | 6 +- .../niftyseg/tests/test_auto_UnaryMaths.py | 4 +- .../niftyseg/tests/test_auto_UnaryStats.py | 2 + .../nipy/tests/test_auto_ComputeMask.py | 9 +- .../nipy/tests/test_auto_EstimateContrast.py | 10 +- .../interfaces/nipy/tests/test_auto_FitGLM.py | 6 +- .../nipy/tests/test_auto_Similarity.py | 14 ++- .../interfaces/nipy/tests/test_auto_Trim.py | 9 +- .../tests/test_auto_CoherenceAnalyzer.py | 17 +-- ...t_auto_BRAINSPosteriorToContinuousClass.py | 37 +++++-- .../brains/tests/test_auto_BRAINSTalairach.py | 9 +- .../tests/test_auto_BRAINSTalairachMask.py | 17 ++- .../tests/test_auto_GenerateEdgeMapImage.py | 9 +- .../tests/test_auto_GeneratePurePlugMask.py | 2 +- .../test_auto_HistogramMatchingFilter.py | 22 +++- .../brains/tests/test_auto_SimilarityIndex.py | 15 ++- .../diffusion/tests/test_auto_DWIConvert.py | 28 +++-- .../tests/test_auto_compareTractInclusion.py | 10 +- .../diffusion/tests/test_auto_dtiaverage.py | 2 +- .../diffusion/tests/test_auto_dtiestim.py | 23 ++-- .../diffusion/tests/test_auto_dtiprocess.py | 60 ++++++---- .../tests/test_auto_extractNrrdVectorIndex.py | 7 +- .../tests/test_auto_gtractAnisotropyMap.py | 7 +- .../tests/test_auto_gtractAverageBvalues.py | 7 +- .../tests/test_auto_gtractClipAnisotropy.py | 7 +- .../tests/test_auto_gtractCoRegAnatomy.py | 17 ++- .../tests/test_auto_gtractConcatDwi.py | 2 +- .../test_auto_gtractCopyImageOrientation.py | 12 +- .../tests/test_auto_gtractCoregBvalues.py | 14 ++- .../tests/test_auto_gtractCostFastMarching.py | 18 ++- .../tests/test_auto_gtractCreateGuideFiber.py | 7 +- .../test_auto_gtractFastMarchingTracking.py | 21 +++- .../tests/test_auto_gtractFiberTracking.py | 25 ++++- .../tests/test_auto_gtractImageConformity.py | 12 +- .../test_auto_gtractInvertBSplineTransform.py | 12 +- ...test_auto_gtractInvertDisplacementField.py | 12 +- .../test_auto_gtractInvertRigidTransform.py | 7 +- .../test_auto_gtractResampleAnisotropy.py | 17 ++- .../tests/test_auto_gtractResampleB0.py | 17 ++- .../test_auto_gtractResampleCodeImage.py | 17 ++- .../test_auto_gtractResampleDWIInPlace.py | 24 +++- .../tests/test_auto_gtractResampleFibers.py | 15 ++- .../diffusion/tests/test_auto_gtractTensor.py | 12 +- ...auto_gtractTransformToDisplacementField.py | 12 +- .../diffusion/tests/test_auto_maxcurvature.py | 7 +- .../tests/test_auto_UKFTractography.py | 19 +++- .../tests/test_auto_fiberprocess.py | 24 +++- .../tests/test_auto_fiberstats.py | 5 +- .../tests/test_auto_fibertrack.py | 12 +- .../filtering/tests/test_auto_CannyEdge.py | 7 +- ...to_CannySegmentationLevelSetImageFilter.py | 14 ++- .../filtering/tests/test_auto_DilateImage.py | 12 +- .../filtering/tests/test_auto_DilateMask.py | 12 +- .../filtering/tests/test_auto_DistanceMaps.py | 12 +- .../test_auto_DumpBinaryTrainingVectors.py | 10 +- .../filtering/tests/test_auto_ErodeImage.py | 12 +- .../tests/test_auto_FlippedDifference.py | 12 +- .../test_auto_GenerateBrainClippedImage.py | 12 +- .../test_auto_GenerateSummedGradientImage.py | 12 +- .../tests/test_auto_GenerateTestImage.py | 7 +- ...GradientAnisotropicDiffusionImageFilter.py | 7 +- .../tests/test_auto_HammerAttributeCreator.py | 15 ++- .../tests/test_auto_NeighborhoodMean.py | 12 +- .../tests/test_auto_NeighborhoodMedian.py | 12 +- .../tests/test_auto_STAPLEAnalysis.py | 2 +- .../test_auto_TextureFromNoiseImageFilter.py | 7 +- .../tests/test_auto_TextureMeasureFilter.py | 12 +- .../tests/test_auto_UnbiasedNonLocalMeans.py | 7 +- .../legacy/tests/test_auto_scalartransform.py | 14 ++- .../tests/test_auto_BRAINSDemonWarp.py | 35 ++++-- .../registration/tests/test_auto_BRAINSFit.py | 51 ++++++--- .../tests/test_auto_BRAINSResample.py | 22 +++- .../tests/test_auto_BRAINSResize.py | 7 +- .../test_auto_BRAINSTransformFromFiducials.py | 12 +- .../tests/test_auto_VBRAINSDemonWarp.py | 25 +++-- .../segmentation/tests/test_auto_BRAINSABC.py | 20 ++-- .../test_auto_BRAINSConstellationDetector.py | 53 ++++++--- ...BRAINSCreateLabelMapFromProbabilityMaps.py | 9 +- .../segmentation/tests/test_auto_BRAINSCut.py | 9 +- .../tests/test_auto_BRAINSMultiSTAPLE.py | 9 +- .../tests/test_auto_BRAINSROIAuto.py | 9 +- ...t_auto_BinaryMaskEditorBasedOnLandmarks.py | 12 +- .../segmentation/tests/test_auto_ESLR.py | 7 +- .../semtools/tests/test_auto_DWICompare.py | 10 +- .../tests/test_auto_DWISimpleCompare.py | 10 +- ...o_GenerateCsfClippedFromClassifiedImage.py | 7 +- .../tests/test_auto_BRAINSAlignMSP.py | 7 +- .../tests/test_auto_BRAINSClipInferior.py | 7 +- .../test_auto_BRAINSConstellationModeler.py | 7 +- .../tests/test_auto_BRAINSEyeDetector.py | 7 +- ...est_auto_BRAINSInitializedControlPoints.py | 7 +- .../test_auto_BRAINSLandmarkInitializer.py | 15 ++- .../test_auto_BRAINSLinearModelerEPCA.py | 5 +- .../tests/test_auto_BRAINSLmkTransform.py | 24 +++- .../utilities/tests/test_auto_BRAINSMush.py | 21 +++- .../tests/test_auto_BRAINSSnapShotWriter.py | 2 +- .../tests/test_auto_BRAINSTransformConvert.py | 14 ++- ...st_auto_BRAINSTrimForegroundInDirection.py | 7 +- .../tests/test_auto_FindCenterOfBrain.py | 22 ++-- ...auto_GenerateLabelMapFromProbabilityMap.py | 2 +- .../tests/test_auto_ImageRegionPlotter.py | 20 +++- .../tests/test_auto_JointHistogram.py | 20 +++- .../tests/test_auto_ShuffleVectorsModule.py | 7 +- .../utilities/tests/test_auto_fcsv_to_hdf5.py | 9 +- .../tests/test_auto_insertMidACPCpoint.py | 7 +- ...test_auto_landmarksConstellationAligner.py | 7 +- ...test_auto_landmarksConstellationWeights.py | 17 ++- .../diffusion/tests/test_auto_DTIexport.py | 7 +- .../diffusion/tests/test_auto_DTIimport.py | 7 +- .../test_auto_DWIJointRicianLMMSEFilter.py | 7 +- .../tests/test_auto_DWIRicianLMMSEFilter.py | 7 +- .../tests/test_auto_DWIToDTIEstimation.py | 16 ++- ..._auto_DiffusionTensorScalarMeasurements.py | 7 +- ...est_auto_DiffusionWeightedVolumeMasking.py | 11 +- .../tests/test_auto_ResampleDTIVolume.py | 22 +++- .../test_auto_TractographyLabelMapSeeding.py | 11 +- .../tests/test_auto_AddScalarVolumes.py | 8 +- .../tests/test_auto_CastScalarVolume.py | 7 +- .../tests/test_auto_CheckerBoardFilter.py | 8 +- ...test_auto_CurvatureAnisotropicDiffusion.py | 7 +- .../tests/test_auto_ExtractSkeleton.py | 7 +- .../test_auto_GaussianBlurImageFilter.py | 7 +- .../test_auto_GradientAnisotropicDiffusion.py | 7 +- .../test_auto_GrayscaleFillHoleImageFilter.py | 7 +- ...test_auto_GrayscaleGrindPeakImageFilter.py | 7 +- .../tests/test_auto_HistogramMatching.py | 8 +- .../tests/test_auto_ImageLabelCombine.py | 8 +- .../tests/test_auto_MaskScalarVolume.py | 8 +- .../tests/test_auto_MedianImageFilter.py | 7 +- .../tests/test_auto_MultiplyScalarVolumes.py | 8 +- .../test_auto_N4ITKBiasFieldCorrection.py | 19 +++- ...test_auto_ResampleScalarVectorDWIVolume.py | 22 +++- .../tests/test_auto_SubtractScalarVolumes.py | 8 +- .../tests/test_auto_ThresholdScalarVolume.py | 7 +- ...auto_VotingBinaryHoleFillingImageFilter.py | 7 +- ...est_auto_DWIUnbiasedNonLocalMeansFilter.py | 7 +- .../tests/test_auto_AffineRegistration.py | 11 +- ...test_auto_BSplineDeformableRegistration.py | 13 ++- .../test_auto_BSplineToDeformationField.py | 12 +- .../test_auto_ExpertAutomatedRegistration.py | 16 ++- .../tests/test_auto_LinearRegistration.py | 11 +- ..._auto_MultiResolutionAffineRegistration.py | 11 +- .../test_auto_OtsuThresholdImageFilter.py | 7 +- .../test_auto_OtsuThresholdSegmentation.py | 7 +- .../tests/test_auto_ResampleScalarVolume.py | 7 +- .../tests/test_auto_RigidRegistration.py | 11 +- .../test_auto_IntensityDifferenceMetric.py | 10 +- ..._auto_PETStandardUptakeValueComputation.py | 17 ++- .../tests/test_auto_ACPCTransform.py | 2 +- .../tests/test_auto_BRAINSDemonWarp.py | 35 ++++-- .../registration/tests/test_auto_BRAINSFit.py | 39 +++++-- .../tests/test_auto_BRAINSResample.py | 22 +++- .../tests/test_auto_FiducialRegistration.py | 2 +- .../tests/test_auto_VBRAINSDemonWarp.py | 25 +++-- .../tests/test_auto_BRAINSROIAuto.py | 9 +- .../tests/test_auto_EMSegmentCommandLine.py | 15 ++- .../test_auto_RobustStatisticsSegmenter.py | 8 +- ...st_auto_SimpleRegionGrowingSegmentation.py | 7 +- ...test_auto_EMSegmentTransformToNewFormat.py | 7 +- .../tests/test_auto_GrayscaleModelMaker.py | 7 +- .../tests/test_auto_LabelMapSmoothing.py | 7 +- .../slicer/tests/test_auto_MergeModels.py | 8 +- .../slicer/tests/test_auto_ModelMaker.py | 6 +- .../slicer/tests/test_auto_ModelToLabelMap.py | 8 +- .../tests/test_auto_OrientScalarVolume.py | 7 +- .../tests/test_auto_ProbeVolumeWithModel.py | 8 +- .../spm/tests/test_auto_Analyze2nii.py | 7 +- .../spm/tests/test_auto_ApplyDeformations.py | 2 + .../test_auto_ApplyInverseDeformation.py | 7 +- .../spm/tests/test_auto_ApplyTransform.py | 13 ++- .../spm/tests/test_auto_CalcCoregAffine.py | 14 ++- .../spm/tests/test_auto_Coregister.py | 1 + .../interfaces/spm/tests/test_auto_DARTEL.py | 2 +- .../spm/tests/test_auto_DARTELNorm2MNI.py | 3 +- .../spm/tests/test_auto_EstimateContrast.py | 4 +- .../spm/tests/test_auto_EstimateModel.py | 11 +- .../spm/tests/test_auto_FactorialDesign.py | 7 +- .../spm/tests/test_auto_FieldMap.py | 7 +- .../spm/tests/test_auto_Level1Design.py | 7 +- .../test_auto_MultipleRegressionDesign.py | 7 +- .../spm/tests/test_auto_Normalize.py | 4 + .../spm/tests/test_auto_Normalize12.py | 3 + .../tests/test_auto_OneSampleTTestDesign.py | 7 +- .../spm/tests/test_auto_PairedTTestDesign.py | 7 +- .../interfaces/spm/tests/test_auto_Realign.py | 7 +- .../spm/tests/test_auto_RealignUnwarp.py | 8 +- .../interfaces/spm/tests/test_auto_Reslice.py | 14 ++- .../spm/tests/test_auto_ResliceToReference.py | 5 +- .../interfaces/spm/tests/test_auto_Segment.py | 30 ++--- .../spm/tests/test_auto_Threshold.py | 6 +- .../tests/test_auto_ThresholdStatistics.py | 2 + .../tests/test_auto_TwoSampleTTestDesign.py | 7 +- .../spm/tests/test_auto_VBMSegment.py | 9 +- nipype/interfaces/tests/test_auto_Bru2.py | 2 +- nipype/interfaces/tests/test_auto_C3d.py | 1 + .../tests/test_auto_C3dAffineTool.py | 5 +- nipype/interfaces/tests/test_auto_CopyMeta.py | 12 +- nipype/interfaces/tests/test_auto_Dcm2nii.py | 1 + nipype/interfaces/tests/test_auto_DcmStack.py | 2 +- .../tests/test_auto_FreeSurferSource.py | 55 ++++++++-- .../tests/test_auto_JSONFileGrabber.py | 2 +- .../tests/test_auto_JSONFileSink.py | 4 +- .../interfaces/tests/test_auto_LookupMeta.py | 5 +- .../tests/test_auto_MatlabCommand.py | 10 +- .../interfaces/tests/test_auto_MergeNifti.py | 2 +- nipype/interfaces/tests/test_auto_MeshFix.py | 5 +- .../interfaces/tests/test_auto_MySQLSink.py | 1 + nipype/interfaces/tests/test_auto_PETPVC.py | 5 +- .../interfaces/tests/test_auto_Quickshear.py | 5 +- nipype/interfaces/tests/test_auto_Reorient.py | 9 +- nipype/interfaces/tests/test_auto_Rescale.py | 12 +- .../interfaces/tests/test_auto_SQLiteSink.py | 5 +- .../tests/test_auto_SignalExtraction.py | 12 +- .../interfaces/tests/test_auto_SplitNifti.py | 5 +- nipype/interfaces/tests/test_auto_XNATSink.py | 1 + .../interfaces/tests/test_auto_XNATSource.py | 1 + .../utility/tests/test_auto_AssertEqual.py | 10 +- .../utility/tests/test_auto_CSVReader.py | 5 +- .../utility/tests/test_auto_Rename.py | 5 +- .../vista/tests/test_auto_Vnifti2Image.py | 5 +- .../vista/tests/test_auto_VtoMat.py | 4 +- .../workbench/tests/test_auto_CiftiSmooth.py | 11 +- .../tests/test_auto_MetricResample.py | 11 +- 769 files changed, 6730 insertions(+), 2309 deletions(-) diff --git a/nipype/algorithms/tests/test_auto_ACompCor.py b/nipype/algorithms/tests/test_auto_ACompCor.py index e28c77d467..d216a26a4b 100644 --- a/nipype/algorithms/tests/test_auto_ACompCor.py +++ b/nipype/algorithms/tests/test_auto_ACompCor.py @@ -22,7 +22,10 @@ def test_ACompCor_inputs(): ), num_components=dict(xor=['variance_threshold'], ), pre_filter=dict(usedefault=True, ), - realigned_file=dict(mandatory=True, ), + realigned_file=dict( + extensions=None, + mandatory=True, + ), regress_poly_degree=dict(usedefault=True, ), repetition_time=dict(), save_metadata=dict(usedefault=True, ), @@ -40,9 +43,9 @@ def test_ACompCor_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_ACompCor_outputs(): output_map = dict( - components_file=dict(), - metadata_file=dict(), - pre_filter_file=dict(), + components_file=dict(extensions=None, ), + metadata_file=dict(extensions=None, ), + pre_filter_file=dict(extensions=None, ), ) outputs = ACompCor.output_spec() diff --git a/nipype/algorithms/tests/test_auto_ActivationCount.py b/nipype/algorithms/tests/test_auto_ActivationCount.py index d6e3ff7165..0925ca1a95 100644 --- a/nipype/algorithms/tests/test_auto_ActivationCount.py +++ b/nipype/algorithms/tests/test_auto_ActivationCount.py @@ -15,9 +15,9 @@ def test_ActivationCount_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_ActivationCount_outputs(): output_map = dict( - acm_neg=dict(), - acm_pos=dict(), - out_file=dict(), + acm_neg=dict(extensions=None, ), + acm_pos=dict(extensions=None, ), + out_file=dict(extensions=None, ), ) outputs = ActivationCount.output_spec() diff --git a/nipype/algorithms/tests/test_auto_AddCSVColumn.py b/nipype/algorithms/tests/test_auto_AddCSVColumn.py index feedcf46e8..4acd27380e 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVColumn.py +++ b/nipype/algorithms/tests/test_auto_AddCSVColumn.py @@ -7,8 +7,14 @@ def test_AddCSVColumn_inputs(): input_map = dict( extra_column_heading=dict(), extra_field=dict(), - in_file=dict(mandatory=True, ), - out_file=dict(usedefault=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), ) inputs = AddCSVColumn.input_spec() @@ -16,7 +22,7 @@ def test_AddCSVColumn_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AddCSVColumn_outputs(): - output_map = dict(csv_file=dict(), ) + output_map = dict(csv_file=dict(extensions=None, ), ) outputs = AddCSVColumn.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_AddCSVRow.py b/nipype/algorithms/tests/test_auto_AddCSVRow.py index 4666a147d2..3090c8b6a9 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVRow.py +++ b/nipype/algorithms/tests/test_auto_AddCSVRow.py @@ -14,7 +14,7 @@ def test_AddCSVRow_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AddCSVRow_outputs(): - output_map = dict(csv_file=dict(), ) + output_map = dict(csv_file=dict(extensions=None, ), ) outputs = AddCSVRow.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_AddNoise.py b/nipype/algorithms/tests/test_auto_AddNoise.py index 6e0655a93e..bc9772d501 100644 --- a/nipype/algorithms/tests/test_auto_AddNoise.py +++ b/nipype/algorithms/tests/test_auto_AddNoise.py @@ -13,9 +13,12 @@ def test_AddNoise_inputs(): mandatory=True, usedefault=True, ), - in_file=dict(mandatory=True, ), - in_mask=dict(), - out_file=dict(), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict(extensions=None, ), + out_file=dict(extensions=None, ), snr=dict(usedefault=True, ), ) inputs = AddNoise.input_spec() @@ -24,7 +27,7 @@ def test_AddNoise_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AddNoise_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = AddNoise.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ArtifactDetect.py b/nipype/algorithms/tests/test_auto_ArtifactDetect.py index 85c57b8823..cc09f4feff 100644 --- a/nipype/algorithms/tests/test_auto_ArtifactDetect.py +++ b/nipype/algorithms/tests/test_auto_ArtifactDetect.py @@ -8,7 +8,7 @@ def test_ArtifactDetect_inputs(): bound_by_brainmask=dict(usedefault=True, ), global_threshold=dict(usedefault=True, ), intersect_mask=dict(usedefault=True, ), - mask_file=dict(), + mask_file=dict(extensions=None, ), mask_threshold=dict(), mask_type=dict(mandatory=True, ), norm_threshold=dict( diff --git a/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py b/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py index 3dbbd772c8..b175f45fa6 100644 --- a/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py +++ b/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py @@ -6,7 +6,10 @@ def test_CalculateNormalizedMoments_inputs(): input_map = dict( moment=dict(mandatory=True, ), - timeseries_file=dict(mandatory=True, ), + timeseries_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CalculateNormalizedMoments.input_spec() diff --git a/nipype/algorithms/tests/test_auto_ComputeDVARS.py b/nipype/algorithms/tests/test_auto_ComputeDVARS.py index ca263c77ac..b6a9b4dfb5 100644 --- a/nipype/algorithms/tests/test_auto_ComputeDVARS.py +++ b/nipype/algorithms/tests/test_auto_ComputeDVARS.py @@ -8,8 +8,14 @@ def test_ComputeDVARS_inputs(): figdpi=dict(usedefault=True, ), figformat=dict(usedefault=True, ), figsize=dict(usedefault=True, ), - in_file=dict(mandatory=True, ), - in_mask=dict(mandatory=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict( + extensions=None, + mandatory=True, + ), intensity_normalization=dict(usedefault=True, ), remove_zerovariance=dict(usedefault=True, ), save_all=dict(usedefault=True, ), @@ -29,13 +35,13 @@ def test_ComputeDVARS_outputs(): avg_nstd=dict(), avg_std=dict(), avg_vxstd=dict(), - fig_nstd=dict(), - fig_std=dict(), - fig_vxstd=dict(), - out_all=dict(), - out_nstd=dict(), - out_std=dict(), - out_vxstd=dict(), + fig_nstd=dict(extensions=None, ), + fig_std=dict(extensions=None, ), + fig_vxstd=dict(extensions=None, ), + out_all=dict(extensions=None, ), + out_nstd=dict(extensions=None, ), + out_std=dict(extensions=None, ), + out_vxstd=dict(extensions=None, ), ) outputs = ComputeDVARS.output_spec() diff --git a/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py index 0308653786..adaebaaa00 100644 --- a/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py +++ b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py @@ -6,10 +6,22 @@ def test_ComputeMeshWarp_inputs(): input_map = dict( metric=dict(usedefault=True, ), - out_file=dict(usedefault=True, ), - out_warp=dict(usedefault=True, ), - surface1=dict(mandatory=True, ), - surface2=dict(mandatory=True, ), + out_file=dict( + extensions=None, + usedefault=True, + ), + out_warp=dict( + extensions=None, + usedefault=True, + ), + surface1=dict( + extensions=None, + mandatory=True, + ), + surface2=dict( + extensions=None, + mandatory=True, + ), weighting=dict(usedefault=True, ), ) inputs = ComputeMeshWarp.input_spec() @@ -20,8 +32,8 @@ def test_ComputeMeshWarp_inputs(): def test_ComputeMeshWarp_outputs(): output_map = dict( distance=dict(), - out_file=dict(), - out_warp=dict(), + out_file=dict(extensions=None, ), + out_warp=dict(extensions=None, ), ) outputs = ComputeMeshWarp.output_spec() diff --git a/nipype/algorithms/tests/test_auto_CreateNifti.py b/nipype/algorithms/tests/test_auto_CreateNifti.py index f8bf8a405f..a40f446d74 100644 --- a/nipype/algorithms/tests/test_auto_CreateNifti.py +++ b/nipype/algorithms/tests/test_auto_CreateNifti.py @@ -6,8 +6,14 @@ def test_CreateNifti_inputs(): input_map = dict( affine=dict(), - data_file=dict(mandatory=True, ), - header_file=dict(mandatory=True, ), + data_file=dict( + extensions=None, + mandatory=True, + ), + header_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CreateNifti.input_spec() @@ -15,7 +21,7 @@ def test_CreateNifti_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CreateNifti_outputs(): - output_map = dict(nifti_file=dict(), ) + output_map = dict(nifti_file=dict(extensions=None, ), ) outputs = CreateNifti.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_Distance.py b/nipype/algorithms/tests/test_auto_Distance.py index 2c5d098d73..5b3a165f56 100644 --- a/nipype/algorithms/tests/test_auto_Distance.py +++ b/nipype/algorithms/tests/test_auto_Distance.py @@ -5,10 +5,16 @@ def test_Distance_inputs(): input_map = dict( - mask_volume=dict(), + mask_volume=dict(extensions=None, ), method=dict(usedefault=True, ), - volume1=dict(mandatory=True, ), - volume2=dict(mandatory=True, ), + volume1=dict( + extensions=None, + mandatory=True, + ), + volume2=dict( + extensions=None, + mandatory=True, + ), ) inputs = Distance.input_spec() @@ -18,7 +24,7 @@ def test_Distance_inputs(): def test_Distance_outputs(): output_map = dict( distance=dict(), - histogram=dict(), + histogram=dict(extensions=None, ), point1=dict(), point2=dict(), ) diff --git a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py index 685dec61e8..47334ab77c 100644 --- a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py +++ b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py @@ -7,10 +7,19 @@ def test_FramewiseDisplacement_inputs(): input_map = dict( figdpi=dict(usedefault=True, ), figsize=dict(usedefault=True, ), - in_file=dict(mandatory=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), normalize=dict(usedefault=True, ), - out_figure=dict(usedefault=True, ), - out_file=dict(usedefault=True, ), + out_figure=dict( + extensions=None, + usedefault=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), parameter_source=dict(mandatory=True, ), radius=dict(usedefault=True, ), save_plot=dict(usedefault=True, ), @@ -24,8 +33,8 @@ def test_FramewiseDisplacement_inputs(): def test_FramewiseDisplacement_outputs(): output_map = dict( fd_average=dict(), - out_figure=dict(), - out_file=dict(), + out_figure=dict(extensions=None, ), + out_file=dict(extensions=None, ), ) outputs = FramewiseDisplacement.output_spec() diff --git a/nipype/algorithms/tests/test_auto_FuzzyOverlap.py b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py index e9e28aaa44..c92c033643 100644 --- a/nipype/algorithms/tests/test_auto_FuzzyOverlap.py +++ b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py @@ -5,10 +5,13 @@ def test_FuzzyOverlap_inputs(): input_map = dict( - in_mask=dict(), + in_mask=dict(extensions=None, ), in_ref=dict(mandatory=True, ), in_tst=dict(mandatory=True, ), - out_file=dict(usedefault=True, ), + out_file=dict( + extensions=None, + usedefault=True, + ), weighting=dict(usedefault=True, ), ) inputs = FuzzyOverlap.input_spec() diff --git a/nipype/algorithms/tests/test_auto_Gunzip.py b/nipype/algorithms/tests/test_auto_Gunzip.py index f12e1f9b45..e583deaa10 100644 --- a/nipype/algorithms/tests/test_auto_Gunzip.py +++ b/nipype/algorithms/tests/test_auto_Gunzip.py @@ -4,14 +4,18 @@ def test_Gunzip_inputs(): - input_map = dict(in_file=dict(mandatory=True, ), ) + input_map = dict( + in_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = Gunzip.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Gunzip_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Gunzip.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ICC.py b/nipype/algorithms/tests/test_auto_ICC.py index 1a4a2b1517..78e600bdd4 100644 --- a/nipype/algorithms/tests/test_auto_ICC.py +++ b/nipype/algorithms/tests/test_auto_ICC.py @@ -5,7 +5,10 @@ def test_ICC_inputs(): input_map = dict( - mask=dict(mandatory=True, ), + mask=dict( + extensions=None, + mandatory=True, + ), subjects_sessions=dict(mandatory=True, ), ) inputs = ICC.input_spec() @@ -15,9 +18,9 @@ def test_ICC_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_ICC_outputs(): output_map = dict( - icc_map=dict(), - session_var_map=dict(), - subject_var_map=dict(), + icc_map=dict(extensions=None, ), + session_var_map=dict(extensions=None, ), + subject_var_map=dict(extensions=None, ), ) outputs = ICC.output_spec() diff --git a/nipype/algorithms/tests/test_auto_Matlab2CSV.py b/nipype/algorithms/tests/test_auto_Matlab2CSV.py index fcc1648bf9..6bc2a9caba 100644 --- a/nipype/algorithms/tests/test_auto_Matlab2CSV.py +++ b/nipype/algorithms/tests/test_auto_Matlab2CSV.py @@ -5,7 +5,10 @@ def test_Matlab2CSV_inputs(): input_map = dict( - in_file=dict(mandatory=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), reshape_matrix=dict(usedefault=True, ), ) inputs = Matlab2CSV.input_spec() diff --git a/nipype/algorithms/tests/test_auto_MergeCSVFiles.py b/nipype/algorithms/tests/test_auto_MergeCSVFiles.py index fd882e850a..f109fea025 100644 --- a/nipype/algorithms/tests/test_auto_MergeCSVFiles.py +++ b/nipype/algorithms/tests/test_auto_MergeCSVFiles.py @@ -9,7 +9,10 @@ def test_MergeCSVFiles_inputs(): extra_column_heading=dict(), extra_field=dict(), in_files=dict(mandatory=True, ), - out_file=dict(usedefault=True, ), + out_file=dict( + extensions=None, + usedefault=True, + ), row_heading_title=dict(usedefault=True, ), row_headings=dict(), ) @@ -19,7 +22,7 @@ def test_MergeCSVFiles_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MergeCSVFiles_outputs(): - output_map = dict(csv_file=dict(), ) + output_map = dict(csv_file=dict(extensions=None, ), ) outputs = MergeCSVFiles.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_MergeROIs.py b/nipype/algorithms/tests/test_auto_MergeROIs.py index 01b2b097a8..528f847b22 100644 --- a/nipype/algorithms/tests/test_auto_MergeROIs.py +++ b/nipype/algorithms/tests/test_auto_MergeROIs.py @@ -7,7 +7,7 @@ def test_MergeROIs_inputs(): input_map = dict( in_files=dict(), in_index=dict(), - in_reference=dict(), + in_reference=dict(extensions=None, ), ) inputs = MergeROIs.input_spec() @@ -15,7 +15,7 @@ def test_MergeROIs_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MergeROIs_outputs(): - output_map = dict(merged_file=dict(), ) + output_map = dict(merged_file=dict(extensions=None, ), ) outputs = MergeROIs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_MeshWarpMaths.py b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py index f89b16017b..664e6e2bc3 100644 --- a/nipype/algorithms/tests/test_auto_MeshWarpMaths.py +++ b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py @@ -6,14 +6,23 @@ def test_MeshWarpMaths_inputs(): input_map = dict( float_trait=dict(), - in_surf=dict(mandatory=True, ), + in_surf=dict( + extensions=None, + mandatory=True, + ), operation=dict(usedefault=True, ), operator=dict( mandatory=True, usedefault=True, ), - out_file=dict(usedefault=True, ), - out_warp=dict(usedefault=True, ), + out_file=dict( + extensions=None, + usedefault=True, + ), + out_warp=dict( + extensions=None, + usedefault=True, + ), ) inputs = MeshWarpMaths.input_spec() @@ -22,8 +31,8 @@ def test_MeshWarpMaths_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_MeshWarpMaths_outputs(): output_map = dict( - out_file=dict(), - out_warp=dict(), + out_file=dict(extensions=None, ), + out_warp=dict(extensions=None, ), ) outputs = MeshWarpMaths.output_spec() diff --git a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py index b86fe3df03..4da9dad47d 100644 --- a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py +++ b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py @@ -4,7 +4,11 @@ def test_NonSteadyStateDetector_inputs(): - input_map = dict(in_file=dict(mandatory=True, ), ) + input_map = dict( + in_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = NonSteadyStateDetector.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py b/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py index 4c9a5584d0..59ffc9d9cb 100644 --- a/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py +++ b/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py @@ -6,7 +6,7 @@ def test_NormalizeProbabilityMapSet_inputs(): input_map = dict( in_files=dict(), - in_mask=dict(), + in_mask=dict(extensions=None, ), ) inputs = NormalizeProbabilityMapSet.input_spec() diff --git a/nipype/algorithms/tests/test_auto_P2PDistance.py b/nipype/algorithms/tests/test_auto_P2PDistance.py index 9948e3675e..65461ede3c 100644 --- a/nipype/algorithms/tests/test_auto_P2PDistance.py +++ b/nipype/algorithms/tests/test_auto_P2PDistance.py @@ -6,10 +6,22 @@ def test_P2PDistance_inputs(): input_map = dict( metric=dict(usedefault=True, ), - out_file=dict(usedefault=True, ), - out_warp=dict(usedefault=True, ), - surface1=dict(mandatory=True, ), - surface2=dict(mandatory=True, ), + out_file=dict( + extensions=None, + usedefault=True, + ), + out_warp=dict( + extensions=None, + usedefault=True, + ), + surface1=dict( + extensions=None, + mandatory=True, + ), + surface2=dict( + extensions=None, + mandatory=True, + ), weighting=dict(usedefault=True, ), ) inputs = P2PDistance.input_spec() @@ -20,8 +32,8 @@ def test_P2PDistance_inputs(): def test_P2PDistance_outputs(): output_map = dict( distance=dict(), - out_file=dict(), - out_warp=dict(), + out_file=dict(extensions=None, ), + out_warp=dict(extensions=None, ), ) outputs = P2PDistance.output_spec() diff --git a/nipype/algorithms/tests/test_auto_PickAtlas.py b/nipype/algorithms/tests/test_auto_PickAtlas.py index 3b15c302e5..9787af79dd 100644 --- a/nipype/algorithms/tests/test_auto_PickAtlas.py +++ b/nipype/algorithms/tests/test_auto_PickAtlas.py @@ -5,11 +5,14 @@ def test_PickAtlas_inputs(): input_map = dict( - atlas=dict(mandatory=True, ), + atlas=dict( + extensions=None, + mandatory=True, + ), dilation_size=dict(usedefault=True, ), hemi=dict(usedefault=True, ), labels=dict(mandatory=True, ), - output_file=dict(), + output_file=dict(extensions=None, ), ) inputs = PickAtlas.input_spec() @@ -17,7 +20,7 @@ def test_PickAtlas_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_PickAtlas_outputs(): - output_map = dict(mask_file=dict(), ) + output_map = dict(mask_file=dict(extensions=None, ), ) outputs = PickAtlas.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_Similarity.py b/nipype/algorithms/tests/test_auto_Similarity.py index b2ad79b5eb..b74728774c 100644 --- a/nipype/algorithms/tests/test_auto_Similarity.py +++ b/nipype/algorithms/tests/test_auto_Similarity.py @@ -5,11 +5,17 @@ def test_Similarity_inputs(): input_map = dict( - mask1=dict(), - mask2=dict(), + mask1=dict(extensions=None, ), + mask2=dict(extensions=None, ), metric=dict(usedefault=True, ), - volume1=dict(mandatory=True, ), - volume2=dict(mandatory=True, ), + volume1=dict( + extensions=None, + mandatory=True, + ), + volume2=dict( + extensions=None, + mandatory=True, + ), ) inputs = Similarity.input_spec() diff --git a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py index dc12dd3a89..933793f363 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py @@ -53,8 +53,8 @@ def test_SpecifySparseModel_inputs(): def test_SpecifySparseModel_outputs(): output_map = dict( session_info=dict(), - sparse_png_file=dict(), - sparse_svg_file=dict(), + sparse_png_file=dict(extensions=None, ), + sparse_svg_file=dict(extensions=None, ), ) outputs = SpecifySparseModel.output_spec() diff --git a/nipype/algorithms/tests/test_auto_SplitROIs.py b/nipype/algorithms/tests/test_auto_SplitROIs.py index 963926666d..23633e98f4 100644 --- a/nipype/algorithms/tests/test_auto_SplitROIs.py +++ b/nipype/algorithms/tests/test_auto_SplitROIs.py @@ -5,8 +5,11 @@ def test_SplitROIs_inputs(): input_map = dict( - in_file=dict(mandatory=True, ), - in_mask=dict(), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict(extensions=None, ), roi_size=dict(), ) inputs = SplitROIs.input_spec() diff --git a/nipype/algorithms/tests/test_auto_StimulusCorrelation.py b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py index 8c7ef276d9..9ed943ce2a 100644 --- a/nipype/algorithms/tests/test_auto_StimulusCorrelation.py +++ b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py @@ -8,7 +8,10 @@ def test_StimulusCorrelation_inputs(): concatenated_design=dict(mandatory=True, ), intensity_values=dict(mandatory=True, ), realignment_parameters=dict(mandatory=True, ), - spm_mat_file=dict(mandatory=True, ), + spm_mat_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = StimulusCorrelation.input_spec() diff --git a/nipype/algorithms/tests/test_auto_TCompCor.py b/nipype/algorithms/tests/test_auto_TCompCor.py index c3bdcac192..ba448fd965 100644 --- a/nipype/algorithms/tests/test_auto_TCompCor.py +++ b/nipype/algorithms/tests/test_auto_TCompCor.py @@ -23,7 +23,10 @@ def test_TCompCor_inputs(): num_components=dict(xor=['variance_threshold'], ), percentile_threshold=dict(usedefault=True, ), pre_filter=dict(usedefault=True, ), - realigned_file=dict(mandatory=True, ), + realigned_file=dict( + extensions=None, + mandatory=True, + ), regress_poly_degree=dict(usedefault=True, ), repetition_time=dict(), save_metadata=dict(usedefault=True, ), @@ -41,10 +44,10 @@ def test_TCompCor_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_TCompCor_outputs(): output_map = dict( - components_file=dict(), + components_file=dict(extensions=None, ), high_variance_masks=dict(), - metadata_file=dict(), - pre_filter_file=dict(), + metadata_file=dict(extensions=None, ), + pre_filter_file=dict(extensions=None, ), ) outputs = TCompCor.output_spec() diff --git a/nipype/algorithms/tests/test_auto_WarpPoints.py b/nipype/algorithms/tests/test_auto_WarpPoints.py index b6965065a2..67b101724d 100644 --- a/nipype/algorithms/tests/test_auto_WarpPoints.py +++ b/nipype/algorithms/tests/test_auto_WarpPoints.py @@ -10,13 +10,20 @@ def test_WarpPoints_inputs(): usedefault=True, ), out_points=dict( + extensions=None, keep_extension=True, name_source='points', name_template='%s_warped', output_name='out_points', ), - points=dict(mandatory=True, ), - warp=dict(mandatory=True, ), + points=dict( + extensions=None, + mandatory=True, + ), + warp=dict( + extensions=None, + mandatory=True, + ), ) inputs = WarpPoints.input_spec() @@ -24,7 +31,7 @@ def test_WarpPoints_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_WarpPoints_outputs(): - output_map = dict(out_points=dict(), ) + output_map = dict(out_points=dict(extensions=None, ), ) outputs = WarpPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ABoverlap.py b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py index 55cb12c27f..db17259c17 100644 --- a/nipype/interfaces/afni/tests/test_auto_ABoverlap.py +++ b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py @@ -13,12 +13,14 @@ def test_ABoverlap_inputs(): in_file_a=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-3, ), in_file_b=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-2, ), @@ -29,6 +31,7 @@ def test_ABoverlap_inputs(): ), out_file=dict( argstr=' |& tee %s', + extensions=None, position=-1, ), outputtype=dict(), @@ -41,7 +44,7 @@ def test_ABoverlap_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ABoverlap_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ABoverlap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AFNICommand.py b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py index 724c98dcb2..f5b7184391 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNICommand.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py @@ -16,6 +16,7 @@ def test_AFNICommand_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source=['in_file'], name_template='%s_afni', ), diff --git a/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py index ba2411edfb..e530fc1309 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py @@ -16,6 +16,7 @@ def test_AFNIPythonCommand_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source=['in_file'], name_template='%s_afni', ), diff --git a/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py index d89519d571..0c8bf19a7a 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py @@ -14,6 +14,7 @@ def test_AFNItoNIFTI_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -31,6 +32,7 @@ def test_AFNItoNIFTI_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, hash_files=False, name_source='in_file', name_template='%s.nii', @@ -44,7 +46,7 @@ def test_AFNItoNIFTI_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AFNItoNIFTI_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = AFNItoNIFTI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py index dfbfc648f7..d177480726 100644 --- a/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py +++ b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py @@ -8,6 +8,7 @@ def test_AlignEpiAnatPy_inputs(): anat=dict( argstr='-anat %s', copyfile=False, + extensions=None, mandatory=True, ), anat2epi=dict(argstr='-anat2epi', ), @@ -25,6 +26,7 @@ def test_AlignEpiAnatPy_inputs(): in_file=dict( argstr='-epi %s', copyfile=False, + extensions=None, mandatory=True, ), outputtype=dict(), @@ -50,16 +52,16 @@ def test_AlignEpiAnatPy_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_AlignEpiAnatPy_outputs(): output_map = dict( - anat_al_mat=dict(), - anat_al_orig=dict(), - epi_al_mat=dict(), - epi_al_orig=dict(), - epi_al_tlrc_mat=dict(), - epi_reg_al_mat=dict(), - epi_tlrc_al=dict(), - epi_vr_al_mat=dict(), - epi_vr_motion=dict(), - skullstrip=dict(), + anat_al_mat=dict(extensions=None, ), + anat_al_orig=dict(extensions=None, ), + epi_al_mat=dict(extensions=None, ), + epi_al_orig=dict(extensions=None, ), + epi_al_tlrc_mat=dict(extensions=None, ), + epi_reg_al_mat=dict(extensions=None, ), + epi_tlrc_al=dict(extensions=None, ), + epi_vr_al_mat=dict(extensions=None, ), + epi_vr_motion=dict(extensions=None, ), + skullstrip=dict(extensions=None, ), ) outputs = AlignEpiAnatPy.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Allineate.py b/nipype/interfaces/afni/tests/test_auto_Allineate.py index be02af14da..0b110e669a 100644 --- a/nipype/interfaces/afni/tests/test_auto_Allineate.py +++ b/nipype/interfaces/afni/tests/test_auto_Allineate.py @@ -7,6 +7,7 @@ def test_Allineate_inputs(): input_map = dict( allcostx=dict( argstr='-allcostx |& tee %s', + extensions=None, position=-1, xor=[ 'out_file', 'out_matrix', 'out_param_file', 'out_weight_file' @@ -30,19 +31,25 @@ def test_Allineate_inputs(): in_file=dict( argstr='-source %s', copyfile=False, + extensions=None, mandatory=True, ), in_matrix=dict( argstr='-1Dmatrix_apply %s', + extensions=None, position=-3, xor=['out_matrix'], ), in_param_file=dict( argstr='-1Dparam_apply %s', + extensions=None, xor=['out_param_file'], ), interpolation=dict(argstr='-interp %s', ), - master=dict(argstr='-master %s', ), + master=dict( + argstr='-master %s', + extensions=None, + ), maxrot=dict(argstr='-maxrot %f', ), maxscl=dict(argstr='-maxscl %f', ), maxshf=dict(argstr='-maxshf %f', ), @@ -61,6 +68,7 @@ def test_Allineate_inputs(): one_pass=dict(argstr='-onepass', ), out_file=dict( argstr='-prefix %s', + extensions=None, hash_files=False, name_source='in_file', name_template='%s_allineate', @@ -68,10 +76,12 @@ def test_Allineate_inputs(): ), out_matrix=dict( argstr='-1Dmatrix_save %s', + extensions=None, xor=['in_matrix', 'allcostx'], ), out_param_file=dict( argstr='-1Dparam_save %s', + extensions=None, xor=['in_param_file', 'allcostx'], ), out_weight_file=dict( @@ -81,11 +91,17 @@ def test_Allineate_inputs(): outputtype=dict(), overwrite=dict(argstr='-overwrite', ), quiet=dict(argstr='-quiet', ), - reference=dict(argstr='-base %s', ), + reference=dict( + argstr='-base %s', + extensions=None, + ), replacebase=dict(argstr='-replacebase', ), replacemeth=dict(argstr='-replacemeth %s', ), source_automask=dict(argstr='-source_automask+%d', ), - source_mask=dict(argstr='-source_mask %s', ), + source_mask=dict( + argstr='-source_mask %s', + extensions=None, + ), two_best=dict(argstr='-twobest %d', ), two_blur=dict(argstr='-twoblur %f', ), two_first=dict(argstr='-twofirst', ), @@ -98,6 +114,7 @@ def test_Allineate_inputs(): weight_file=dict( argstr='-weight %s', deprecated='1.0.0', + extensions=None, new_name='weight', ), zclip=dict(argstr='-zclip', ), @@ -109,11 +126,11 @@ def test_Allineate_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Allineate_outputs(): output_map = dict( - allcostx=dict(), - out_file=dict(), - out_matrix=dict(), - out_param_file=dict(), - out_weight_file=dict(), + allcostx=dict(extensions=None, ), + out_file=dict(extensions=None, ), + out_matrix=dict(extensions=None, ), + out_param_file=dict(extensions=None, ), + out_weight_file=dict(extensions=None, ), ) outputs = Allineate.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py index bb00b3b585..8d251a9620 100644 --- a/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py +++ b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py @@ -17,6 +17,7 @@ def test_AutoTLRC_inputs(): in_file=dict( argstr='-input %s', copyfile=False, + extensions=None, mandatory=True, ), no_ss=dict(argstr='-no_ss', ), @@ -28,7 +29,7 @@ def test_AutoTLRC_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AutoTLRC_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = AutoTLRC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py index 424b7d25b1..34c99aa0c0 100644 --- a/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py +++ b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py @@ -14,16 +14,21 @@ def test_AutoTcorrelate_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), mask_only_targets=dict( argstr='-mask_only_targets', xor=['mask_source'], ), mask_source=dict( argstr='-mask_source %s', + extensions=None, xor=['mask_only_targets'], ), num_threads=dict( @@ -32,6 +37,7 @@ def test_AutoTcorrelate_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_similarity_matrix.1D', ), @@ -44,7 +50,7 @@ def test_AutoTcorrelate_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AutoTcorrelate_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = AutoTcorrelate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Autobox.py b/nipype/interfaces/afni/tests/test_auto_Autobox.py index f158263c54..ab7826469e 100644 --- a/nipype/interfaces/afni/tests/test_auto_Autobox.py +++ b/nipype/interfaces/afni/tests/test_auto_Autobox.py @@ -13,6 +13,7 @@ def test_Autobox_inputs(): in_file=dict( argstr='-input %s', copyfile=False, + extensions=None, mandatory=True, ), no_clustering=dict(argstr='-noclust', ), @@ -22,6 +23,7 @@ def test_Autobox_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_autobox', ), @@ -35,7 +37,7 @@ def test_Autobox_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Autobox_outputs(): output_map = dict( - out_file=dict(), + out_file=dict(extensions=None, ), x_max=dict(), x_min=dict(), y_max=dict(), diff --git a/nipype/interfaces/afni/tests/test_auto_Automask.py b/nipype/interfaces/afni/tests/test_auto_Automask.py index efffd19cba..223419ad71 100644 --- a/nipype/interfaces/afni/tests/test_auto_Automask.py +++ b/nipype/interfaces/afni/tests/test_auto_Automask.py @@ -8,6 +8,7 @@ def test_Automask_inputs(): args=dict(argstr='%s', ), brain_file=dict( argstr='-apply_prefix %s', + extensions=None, name_source='in_file', name_template='%s_masked', ), @@ -21,6 +22,7 @@ def test_Automask_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -30,6 +32,7 @@ def test_Automask_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_mask', ), @@ -42,8 +45,8 @@ def test_Automask_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Automask_outputs(): output_map = dict( - brain_file=dict(), - out_file=dict(), + brain_file=dict(extensions=None, ), + out_file=dict(extensions=None, ), ) outputs = Automask.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Axialize.py b/nipype/interfaces/afni/tests/test_auto_Axialize.py index 62b425c932..b89957b6f9 100644 --- a/nipype/interfaces/afni/tests/test_auto_Axialize.py +++ b/nipype/interfaces/afni/tests/test_auto_Axialize.py @@ -21,6 +21,7 @@ def test_Axialize_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-2, ), @@ -31,6 +32,7 @@ def test_Axialize_inputs(): orientation=dict(argstr='-orient %s', ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_axialize', ), @@ -47,7 +49,7 @@ def test_Axialize_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Axialize_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Axialize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Bandpass.py b/nipype/interfaces/afni/tests/test_auto_Bandpass.py index fb0861a747..c4a3be3022 100644 --- a/nipype/interfaces/afni/tests/test_auto_Bandpass.py +++ b/nipype/interfaces/afni/tests/test_auto_Bandpass.py @@ -21,6 +21,7 @@ def test_Bandpass_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -32,6 +33,7 @@ def test_Bandpass_inputs(): ), mask=dict( argstr='-mask %s', + extensions=None, position=2, ), nfft=dict(argstr='-nfft %d', ), @@ -42,11 +44,14 @@ def test_Bandpass_inputs(): nohash=True, usedefault=True, ), - orthogonalize_dset=dict(argstr='-dsort %s', ), + orthogonalize_dset=dict( + argstr='-dsort %s', + extensions=None, + ), orthogonalize_file=dict(argstr='-ort %s', ), out_file=dict( argstr='-prefix %s', - genfile=True, + extensions=None, name_source='in_file', name_template='%s_bp', position=1, @@ -60,7 +65,7 @@ def test_Bandpass_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Bandpass_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Bandpass.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_BlurInMask.py b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py index 334116d945..bf42a587b7 100644 --- a/nipype/interfaces/afni/tests/test_auto_BlurInMask.py +++ b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py @@ -19,11 +19,18 @@ def test_BlurInMask_inputs(): in_file=dict( argstr='-input %s', copyfile=False, + extensions=None, mandatory=True, position=1, ), - mask=dict(argstr='-mask %s', ), - multimask=dict(argstr='-Mmask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), + multimask=dict( + argstr='-Mmask %s', + extensions=None, + ), num_threads=dict( nohash=True, usedefault=True, @@ -34,6 +41,7 @@ def test_BlurInMask_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_blur', position=-1, @@ -47,7 +55,7 @@ def test_BlurInMask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BlurInMask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = BlurInMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py index 2f88a1edcb..38864aa09f 100644 --- a/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py +++ b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py @@ -7,7 +7,10 @@ def test_BlurToFWHM_inputs(): input_map = dict( args=dict(argstr='%s', ), automask=dict(argstr='-automask', ), - blurmaster=dict(argstr='-blurmaster %s', ), + blurmaster=dict( + argstr='-blurmaster %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -16,15 +19,20 @@ def test_BlurToFWHM_inputs(): fwhmxy=dict(argstr='-FWHMxy %f', ), in_file=dict( argstr='-input %s', + extensions=None, mandatory=True, ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), num_threads=dict( nohash=True, usedefault=True, ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source=['in_file'], name_template='%s_afni', ), @@ -36,7 +44,7 @@ def test_BlurToFWHM_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BlurToFWHM_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = BlurToFWHM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_BrickStat.py b/nipype/interfaces/afni/tests/test_auto_BrickStat.py index 0056df5980..d7f36eb79b 100644 --- a/nipype/interfaces/afni/tests/test_auto_BrickStat.py +++ b/nipype/interfaces/afni/tests/test_auto_BrickStat.py @@ -12,11 +12,13 @@ def test_BrickStat_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), mask=dict( argstr='-mask %s', + extensions=None, position=2, ), max=dict(argstr='-max', ), diff --git a/nipype/interfaces/afni/tests/test_auto_Bucket.py b/nipype/interfaces/afni/tests/test_auto_Bucket.py index c3faf87a09..dfd28d6ccd 100644 --- a/nipype/interfaces/afni/tests/test_auto_Bucket.py +++ b/nipype/interfaces/afni/tests/test_auto_Bucket.py @@ -21,6 +21,7 @@ def test_Bucket_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_template='buck', ), outputtype=dict(), @@ -31,7 +32,7 @@ def test_Bucket_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Bucket_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Bucket.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Calc.py b/nipype/interfaces/afni/tests/test_auto_Calc.py index 28863d5a4b..677cd5dbbf 100644 --- a/nipype/interfaces/afni/tests/test_auto_Calc.py +++ b/nipype/interfaces/afni/tests/test_auto_Calc.py @@ -17,24 +17,31 @@ def test_Calc_inputs(): ), in_file_a=dict( argstr='-a %s', + extensions=None, mandatory=True, position=0, ), in_file_b=dict( argstr='-b %s', + extensions=None, position=1, ), in_file_c=dict( argstr='-c %s', + extensions=None, position=2, ), num_threads=dict( nohash=True, usedefault=True, ), - other=dict(argstr='', ), + other=dict( + argstr='', + extensions=None, + ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file_a', name_template='%s_calc', ), @@ -50,7 +57,7 @@ def test_Calc_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Calc_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Calc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Cat.py b/nipype/interfaces/afni/tests/test_auto_Cat.py index 3da86c66d6..345806f746 100644 --- a/nipype/interfaces/afni/tests/test_auto_Cat.py +++ b/nipype/interfaces/afni/tests/test_auto_Cat.py @@ -31,6 +31,7 @@ def test_Cat_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, mandatory=True, position=-1, usedefault=True, @@ -67,7 +68,7 @@ def test_Cat_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Cat_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Cat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py index b67ab485d4..2250878719 100644 --- a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py +++ b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py @@ -33,6 +33,7 @@ def test_CatMatvec_inputs(): ), out_file=dict( argstr=' > %s', + extensions=None, keep_extension=False, mandatory=True, name_source='in_file', @@ -47,7 +48,7 @@ def test_CatMatvec_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CatMatvec_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = CatMatvec.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_CenterMass.py b/nipype/interfaces/afni/tests/test_auto_CenterMass.py index 322218a50d..41eddd5ed7 100644 --- a/nipype/interfaces/afni/tests/test_auto_CenterMass.py +++ b/nipype/interfaces/afni/tests/test_auto_CenterMass.py @@ -10,6 +10,7 @@ def test_CenterMass_inputs(): automask=dict(argstr='-automask', ), cm_file=dict( argstr='> %s', + extensions=None, hash_files=False, keep_extension=False, name_source='in_file', @@ -23,11 +24,15 @@ def test_CenterMass_inputs(): in_file=dict( argstr='%s', copyfile=True, + extensions=None, mandatory=True, position=-2, ), local_ijk=dict(argstr='-local_ijk', ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), roi_vals=dict(argstr='-roi_vals %s', ), set_cm=dict(argstr='-set %f %f %f', ), ) @@ -39,8 +44,8 @@ def test_CenterMass_inputs(): def test_CenterMass_outputs(): output_map = dict( cm=dict(), - cm_file=dict(), - out_file=dict(), + cm_file=dict(extensions=None, ), + out_file=dict(extensions=None, ), ) outputs = CenterMass.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py index 8b8c61208d..f9b3dbf705 100644 --- a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py +++ b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py @@ -22,6 +22,7 @@ def test_ClipLevel_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), diff --git a/nipype/interfaces/afni/tests/test_auto_ConvertDset.py b/nipype/interfaces/afni/tests/test_auto_ConvertDset.py index 06ba3a54f2..39613d7981 100644 --- a/nipype/interfaces/afni/tests/test_auto_ConvertDset.py +++ b/nipype/interfaces/afni/tests/test_auto_ConvertDset.py @@ -12,6 +12,7 @@ def test_ConvertDset_inputs(): ), in_file=dict( argstr='-input %s', + extensions=None, mandatory=True, position=-2, ), @@ -21,6 +22,7 @@ def test_ConvertDset_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, mandatory=True, position=-1, ), @@ -37,7 +39,7 @@ def test_ConvertDset_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ConvertDset_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ConvertDset.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Copy.py b/nipype/interfaces/afni/tests/test_auto_Copy.py index a8e67d5607..447fcaa24f 100644 --- a/nipype/interfaces/afni/tests/test_auto_Copy.py +++ b/nipype/interfaces/afni/tests/test_auto_Copy.py @@ -13,6 +13,7 @@ def test_Copy_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-2, ), @@ -22,6 +23,7 @@ def test_Copy_inputs(): ), out_file=dict( argstr='%s', + extensions=None, name_source='in_file', name_template='%s_copy', position=-1, @@ -35,7 +37,7 @@ def test_Copy_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Copy_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Copy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Deconvolve.py b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py index ae42a77019..713589dbc0 100644 --- a/nipype/interfaces/afni/tests/test_auto_Deconvolve.py +++ b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py @@ -5,13 +5,19 @@ def test_Deconvolve_inputs(): input_map = dict( - STATmask=dict(argstr='-STATmask %s', ), + STATmask=dict( + argstr='-STATmask %s', + extensions=None, + ), TR_1D=dict(argstr='-TR_1D %f', ), allzero_OK=dict(argstr='-allzero_OK', ), args=dict(argstr='%s', ), automask=dict(argstr='-automask', ), cbucket=dict(argstr='-cbucket %s', ), - censor=dict(argstr='-censor %s', ), + censor=dict( + argstr='-censor %s', + extensions=None, + ), dmbase=dict(argstr='-dmbase', ), dname=dict(argstr='-D%s=%s', ), environ=dict( @@ -43,13 +49,19 @@ def test_Deconvolve_inputs(): position=1, sep=' ', ), - input1D=dict(argstr='-input1D %s', ), + input1D=dict( + argstr='-input1D %s', + extensions=None, + ), legendre=dict(argstr='-legendre', ), local_times=dict( argstr='-local_times', xor=['global_times'], ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), noblock=dict(argstr='-noblock', ), nocond=dict(argstr='-nocond', ), nodmbase=dict(argstr='-nodmbase', ), @@ -69,7 +81,10 @@ def test_Deconvolve_inputs(): nohash=True, ), ortvec=dict(argstr='-ortvec %s %s', ), - out_file=dict(argstr='-bucket %s', ), + out_file=dict( + argstr='-bucket %s', + extensions=None, + ), outputtype=dict(), polort=dict(argstr='-polort %d', ), rmsmin=dict(argstr='-rmsmin %f', ), @@ -96,7 +111,10 @@ def test_Deconvolve_inputs(): xor=['sat'], ), vout=dict(argstr='-vout', ), - x1D=dict(argstr='-x1D %s', ), + x1D=dict( + argstr='-x1D %s', + extensions=None, + ), x1D_stop=dict(argstr='-x1D_stop', ), ) inputs = Deconvolve.input_spec() @@ -106,10 +124,10 @@ def test_Deconvolve_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Deconvolve_outputs(): output_map = dict( - cbucket=dict(), - out_file=dict(), - reml_script=dict(), - x1D=dict(), + cbucket=dict(extensions=None, ), + out_file=dict(extensions=None, ), + reml_script=dict(extensions=None, ), + x1D=dict(extensions=None, ), ) outputs = Deconvolve.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py index 664cca5985..721f161af3 100644 --- a/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py +++ b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py @@ -15,10 +15,14 @@ def test_DegreeCentrality_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), num_threads=dict( nohash=True, usedefault=True, @@ -26,6 +30,7 @@ def test_DegreeCentrality_inputs(): oned_file=dict(argstr='-out1D %s', ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source=['in_file'], name_template='%s_afni', ), @@ -41,8 +46,8 @@ def test_DegreeCentrality_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DegreeCentrality_outputs(): output_map = dict( - oned_file=dict(), - out_file=dict(), + oned_file=dict(extensions=None, ), + out_file=dict(extensions=None, ), ) outputs = DegreeCentrality.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Despike.py b/nipype/interfaces/afni/tests/test_auto_Despike.py index ff0b8b532a..cb495e9c5b 100644 --- a/nipype/interfaces/afni/tests/test_auto_Despike.py +++ b/nipype/interfaces/afni/tests/test_auto_Despike.py @@ -13,6 +13,7 @@ def test_Despike_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -22,6 +23,7 @@ def test_Despike_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_despike', ), @@ -33,7 +35,7 @@ def test_Despike_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Despike_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Despike.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Detrend.py b/nipype/interfaces/afni/tests/test_auto_Detrend.py index 1938529cf7..6c1bd3cbba 100644 --- a/nipype/interfaces/afni/tests/test_auto_Detrend.py +++ b/nipype/interfaces/afni/tests/test_auto_Detrend.py @@ -13,6 +13,7 @@ def test_Detrend_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -22,6 +23,7 @@ def test_Detrend_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_detrend', ), @@ -33,7 +35,7 @@ def test_Detrend_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Detrend_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Detrend.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Dot.py b/nipype/interfaces/afni/tests/test_auto_Dot.py index 7623e90ca7..66e535dc54 100644 --- a/nipype/interfaces/afni/tests/test_auto_Dot.py +++ b/nipype/interfaces/afni/tests/test_auto_Dot.py @@ -22,7 +22,10 @@ def test_Dot_inputs(): argstr='%s ...', position=-2, ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), mrange=dict(argstr='-mrange %s %s', ), num_threads=dict( nohash=True, @@ -30,6 +33,7 @@ def test_Dot_inputs(): ), out_file=dict( argstr=' |& tee %s', + extensions=None, position=-1, ), outputtype=dict(), @@ -42,7 +46,7 @@ def test_Dot_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Dot_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Dot.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ECM.py b/nipype/interfaces/afni/tests/test_auto_ECM.py index 8a4793fb7f..9f69ea3c69 100644 --- a/nipype/interfaces/afni/tests/test_auto_ECM.py +++ b/nipype/interfaces/afni/tests/test_auto_ECM.py @@ -18,10 +18,14 @@ def test_ECM_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), max_iter=dict(argstr='-max_iter %d', ), memory=dict(argstr='-memory %f', ), num_threads=dict( @@ -30,6 +34,7 @@ def test_ECM_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source=['in_file'], name_template='%s_afni', ), @@ -46,7 +51,7 @@ def test_ECM_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ECM_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ECM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Edge3.py b/nipype/interfaces/afni/tests/test_auto_Edge3.py index 8fc6953c28..722037d2d1 100644 --- a/nipype/interfaces/afni/tests/test_auto_Edge3.py +++ b/nipype/interfaces/afni/tests/test_auto_Edge3.py @@ -22,6 +22,7 @@ def test_Edge3_inputs(): in_file=dict( argstr='-input %s', copyfile=False, + extensions=None, mandatory=True, position=0, ), @@ -35,6 +36,7 @@ def test_Edge3_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, position=-1, ), outputtype=dict(), @@ -50,7 +52,7 @@ def test_Edge3_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Edge3_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Edge3.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Eval.py b/nipype/interfaces/afni/tests/test_auto_Eval.py index 35c1360a6e..9f29aa0b91 100644 --- a/nipype/interfaces/afni/tests/test_auto_Eval.py +++ b/nipype/interfaces/afni/tests/test_auto_Eval.py @@ -17,25 +17,32 @@ def test_Eval_inputs(): ), in_file_a=dict( argstr='-a %s', + extensions=None, mandatory=True, position=0, ), in_file_b=dict( argstr='-b %s', + extensions=None, position=1, ), in_file_c=dict( argstr='-c %s', + extensions=None, position=2, ), num_threads=dict( nohash=True, usedefault=True, ), - other=dict(argstr='', ), + other=dict( + argstr='', + extensions=None, + ), out1D=dict(argstr='-1D', ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file_a', name_template='%s_calc', ), @@ -50,7 +57,7 @@ def test_Eval_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Eval_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Eval.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_FWHMx.py b/nipype/interfaces/afni/tests/test_auto_FWHMx.py index 14ab26fa4f..ac2fdffbb0 100644 --- a/nipype/interfaces/afni/tests/test_auto_FWHMx.py +++ b/nipype/interfaces/afni/tests/test_auto_FWHMx.py @@ -39,17 +39,23 @@ def test_FWHMx_inputs(): ), in_file=dict( argstr='-input %s', + extensions=None, mandatory=True, ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), out_detrend=dict( argstr='-detprefix %s', + extensions=None, keep_extension=False, name_source='in_file', name_template='%s_detrend', ), out_file=dict( argstr='> %s', + extensions=None, keep_extension=False, name_source='in_file', name_template='%s_fwhmx.out', @@ -57,6 +63,7 @@ def test_FWHMx_inputs(): ), out_subbricks=dict( argstr='-out %s', + extensions=None, keep_extension=False, name_source='in_file', name_template='%s_subbricks.out', @@ -72,10 +79,10 @@ def test_FWHMx_outputs(): output_map = dict( acf_param=dict(), fwhm=dict(), - out_acf=dict(), - out_detrend=dict(), - out_file=dict(), - out_subbricks=dict(), + out_acf=dict(extensions=None, ), + out_detrend=dict(extensions=None, ), + out_file=dict(extensions=None, ), + out_subbricks=dict(extensions=None, ), ) outputs = FWHMx.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Fim.py b/nipype/interfaces/afni/tests/test_auto_Fim.py index 931e5ff244..8550747727 100644 --- a/nipype/interfaces/afni/tests/test_auto_Fim.py +++ b/nipype/interfaces/afni/tests/test_auto_Fim.py @@ -16,12 +16,14 @@ def test_Fim_inputs(): ), ideal_file=dict( argstr='-ideal_file %s', + extensions=None, mandatory=True, position=2, ), in_file=dict( argstr='-input %s', copyfile=False, + extensions=None, mandatory=True, position=1, ), @@ -35,6 +37,7 @@ def test_Fim_inputs(): ), out_file=dict( argstr='-bucket %s', + extensions=None, name_source='in_file', name_template='%s_fim', ), @@ -46,7 +49,7 @@ def test_Fim_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Fim_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Fim.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Fourier.py b/nipype/interfaces/afni/tests/test_auto_Fourier.py index 69cd955cbc..ee3dd6672a 100644 --- a/nipype/interfaces/afni/tests/test_auto_Fourier.py +++ b/nipype/interfaces/afni/tests/test_auto_Fourier.py @@ -17,6 +17,7 @@ def test_Fourier_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -30,6 +31,7 @@ def test_Fourier_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_fourier', ), @@ -42,7 +44,7 @@ def test_Fourier_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Fourier_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Fourier.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_GCOR.py b/nipype/interfaces/afni/tests/test_auto_GCOR.py index 9d603dda36..60dfd9f7b3 100644 --- a/nipype/interfaces/afni/tests/test_auto_GCOR.py +++ b/nipype/interfaces/afni/tests/test_auto_GCOR.py @@ -13,12 +13,14 @@ def test_GCOR_inputs(): in_file=dict( argstr='-input %s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), mask=dict( argstr='-mask %s', copyfile=False, + extensions=None, ), nfirst=dict(argstr='-nfirst %d', ), no_demean=dict(argstr='-no_demean', ), diff --git a/nipype/interfaces/afni/tests/test_auto_Hist.py b/nipype/interfaces/afni/tests/test_auto_Hist.py index 48499a9605..65f4b0566b 100644 --- a/nipype/interfaces/afni/tests/test_auto_Hist.py +++ b/nipype/interfaces/afni/tests/test_auto_Hist.py @@ -14,21 +14,27 @@ def test_Hist_inputs(): in_file=dict( argstr='-input %s', copyfile=False, + extensions=None, mandatory=True, position=1, ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), max_value=dict(argstr='-max %f', ), min_value=dict(argstr='-min %f', ), nbin=dict(argstr='-nbin %d', ), out_file=dict( argstr='-prefix %s', + extensions=None, keep_extension=False, name_source=['in_file'], name_template='%s_hist', ), out_show=dict( argstr='> %s', + extensions=None, keep_extension=False, name_source='in_file', name_template='%s_hist.out', @@ -46,8 +52,8 @@ def test_Hist_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Hist_outputs(): output_map = dict( - out_file=dict(), - out_show=dict(), + out_file=dict(extensions=None, ), + out_show=dict(extensions=None, ), ) outputs = Hist.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_LFCD.py b/nipype/interfaces/afni/tests/test_auto_LFCD.py index 9cbde10b56..51a5fffb68 100644 --- a/nipype/interfaces/afni/tests/test_auto_LFCD.py +++ b/nipype/interfaces/afni/tests/test_auto_LFCD.py @@ -15,16 +15,21 @@ def test_LFCD_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), num_threads=dict( nohash=True, usedefault=True, ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source=['in_file'], name_template='%s_afni', ), @@ -38,7 +43,7 @@ def test_LFCD_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_LFCD_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = LFCD.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py index 22f0b717d8..ed3e61d74d 100644 --- a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py +++ b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py @@ -16,11 +16,13 @@ def test_LocalBistat_inputs(): ), in_file1=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), in_file2=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), @@ -56,7 +58,7 @@ def test_LocalBistat_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_LocalBistat_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = LocalBistat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Localstat.py b/nipype/interfaces/afni/tests/test_auto_Localstat.py index c2f53c3db0..011ce44da8 100644 --- a/nipype/interfaces/afni/tests/test_auto_Localstat.py +++ b/nipype/interfaces/afni/tests/test_auto_Localstat.py @@ -17,6 +17,7 @@ def test_Localstat_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), @@ -63,7 +64,7 @@ def test_Localstat_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Localstat_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Localstat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_MaskTool.py b/nipype/interfaces/afni/tests/test_auto_MaskTool.py index 1644bee878..070686dd8d 100644 --- a/nipype/interfaces/afni/tests/test_auto_MaskTool.py +++ b/nipype/interfaces/afni/tests/test_auto_MaskTool.py @@ -36,6 +36,7 @@ def test_MaskTool_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_mask', ), @@ -49,7 +50,7 @@ def test_MaskTool_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MaskTool_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MaskTool.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Maskave.py b/nipype/interfaces/afni/tests/test_auto_Maskave.py index a318e685a9..9185f94266 100644 --- a/nipype/interfaces/afni/tests/test_auto_Maskave.py +++ b/nipype/interfaces/afni/tests/test_auto_Maskave.py @@ -13,11 +13,13 @@ def test_Maskave_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-2, ), mask=dict( argstr='-mask %s', + extensions=None, position=1, ), num_threads=dict( @@ -26,6 +28,7 @@ def test_Maskave_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_maskave.1D', @@ -43,7 +46,7 @@ def test_Maskave_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Maskave_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Maskave.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Means.py b/nipype/interfaces/afni/tests/test_auto_Means.py index 2e422e68db..9db304bb3d 100644 --- a/nipype/interfaces/afni/tests/test_auto_Means.py +++ b/nipype/interfaces/afni/tests/test_auto_Means.py @@ -14,11 +14,13 @@ def test_Means_inputs(): ), in_file_a=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), in_file_b=dict( argstr='%s', + extensions=None, position=-1, ), mask_inter=dict(argstr='-mask_inter', ), @@ -30,6 +32,7 @@ def test_Means_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file_a', name_template='%s_mean', ), @@ -45,7 +48,7 @@ def test_Means_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Means_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Means.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Merge.py b/nipype/interfaces/afni/tests/test_auto_Merge.py index c36ee6f7a1..934bf17b96 100644 --- a/nipype/interfaces/afni/tests/test_auto_Merge.py +++ b/nipype/interfaces/afni/tests/test_auto_Merge.py @@ -27,6 +27,7 @@ def test_Merge_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_files', name_template='%s_merge', ), @@ -38,7 +39,7 @@ def test_Merge_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Merge_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Notes.py b/nipype/interfaces/afni/tests/test_auto_Notes.py index 965bacb000..51084ba3b9 100644 --- a/nipype/interfaces/afni/tests/test_auto_Notes.py +++ b/nipype/interfaces/afni/tests/test_auto_Notes.py @@ -19,6 +19,7 @@ def test_Notes_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -26,7 +27,10 @@ def test_Notes_inputs(): nohash=True, usedefault=True, ), - out_file=dict(argstr='%s', ), + out_file=dict( + argstr='%s', + extensions=None, + ), outputtype=dict(), rep_history=dict( argstr='-HH "%s"', @@ -40,7 +44,7 @@ def test_Notes_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Notes_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Notes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py b/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py index 48b59a2968..c4c8dc6804 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py @@ -17,6 +17,7 @@ def test_NwarpAdjust_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, keep_extension=True, name_source='in_files', name_template='%s_NwarpAdjust', @@ -34,7 +35,7 @@ def test_NwarpAdjust_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_NwarpAdjust_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = NwarpAdjust.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py index 63f6baa044..e00457f4f3 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py @@ -23,6 +23,7 @@ def test_NwarpApply_inputs(): master=dict(argstr='-master %s', ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_Nwarp', ), @@ -46,7 +47,7 @@ def test_NwarpApply_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_NwarpApply_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = NwarpApply.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpCat.py b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py index c6b3689c9e..b5724c98dd 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpCat.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py @@ -27,6 +27,7 @@ def test_NwarpCat_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_files', name_template='%s_NwarpCat', ), @@ -40,7 +41,7 @@ def test_NwarpCat_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_NwarpCat_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = NwarpCat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py index fe037851af..f8e664a727 100644 --- a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py +++ b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py @@ -16,10 +16,12 @@ def test_OneDToolPy_inputs(): ), in_file=dict( argstr='-infile %s', + extensions=None, mandatory=True, ), out_file=dict( argstr='-write %s', + extensions=None, xor=['show_cormat_warnings'], ), outputtype=dict(), @@ -41,7 +43,7 @@ def test_OneDToolPy_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_OneDToolPy_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = OneDToolPy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_OutlierCount.py b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py index a63fbb8bef..2a1eb61af3 100644 --- a/nipype/interfaces/afni/tests/test_auto_OutlierCount.py +++ b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py @@ -26,6 +26,7 @@ def test_OutlierCount_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -39,15 +40,18 @@ def test_OutlierCount_inputs(): ), mask=dict( argstr='-mask %s', + extensions=None, xor=['autoclip', 'automask'], ), out_file=dict( + extensions=None, keep_extension=False, name_source=['in_file'], name_template='%s_outliers', ), outliers_file=dict( argstr='-save %s', + extensions=None, keep_extension=True, name_source=['in_file'], name_template='%s_outliers', @@ -67,8 +71,8 @@ def test_OutlierCount_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_OutlierCount_outputs(): output_map = dict( - out_file=dict(), - out_outliers=dict(), + out_file=dict(extensions=None, ), + out_outliers=dict(extensions=None, ), ) outputs = OutlierCount.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_QualityIndex.py b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py index 679a3e0393..27fd6727c4 100644 --- a/nipype/interfaces/afni/tests/test_auto_QualityIndex.py +++ b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py @@ -23,6 +23,7 @@ def test_QualityIndex_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -32,10 +33,12 @@ def test_QualityIndex_inputs(): ), mask=dict( argstr='-mask %s', + extensions=None, xor=['autoclip', 'automask'], ), out_file=dict( argstr='> %s', + extensions=None, keep_extension=False, name_source=['in_file'], name_template='%s_tqual', @@ -56,7 +59,7 @@ def test_QualityIndex_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_QualityIndex_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = QualityIndex.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Qwarp.py b/nipype/interfaces/afni/tests/test_auto_Qwarp.py index f6df3d0ab5..3ef8c2e9b2 100644 --- a/nipype/interfaces/afni/tests/test_auto_Qwarp.py +++ b/nipype/interfaces/afni/tests/test_auto_Qwarp.py @@ -24,6 +24,7 @@ def test_Qwarp_inputs(): base_file=dict( argstr='-base %s', copyfile=False, + extensions=None, mandatory=True, ), baxopt=dict( @@ -41,6 +42,7 @@ def test_Qwarp_inputs(): emask=dict( argstr='-emask %s', copyfile=False, + extensions=None, ), environ=dict( nohash=True, @@ -53,6 +55,7 @@ def test_Qwarp_inputs(): gridlist=dict( argstr='-gridlist %s', copyfile=False, + extensions=None, xor=['duplo', 'plusminus'], ), hel=dict( @@ -62,6 +65,7 @@ def test_Qwarp_inputs(): in_file=dict( argstr='-source %s', copyfile=False, + extensions=None, mandatory=True, ), inilev=dict( @@ -117,9 +121,9 @@ def test_Qwarp_inputs(): ), out_file=dict( argstr='-prefix %s', - genfile=True, + extensions=None, name_source=['in_file'], - name_template='%s_QW', + name_template='ppp_%s', ), out_weight_file=dict(argstr='-wtprefix %s', ), outputtype=dict(), @@ -141,7 +145,10 @@ def test_Qwarp_inputs(): xor=['quiet'], ), wball=dict(argstr='-wball %s', ), - weight=dict(argstr='-weight %s', ), + weight=dict( + argstr='-weight %s', + extensions=None, + ), wmask=dict(argstr='-wpass %s %f', ), workhard=dict( argstr='-workhard', @@ -155,11 +162,11 @@ def test_Qwarp_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Qwarp_outputs(): output_map = dict( - base_warp=dict(), - source_warp=dict(), - warped_base=dict(), - warped_source=dict(), - weights=dict(), + base_warp=dict(extensions=None, ), + source_warp=dict(extensions=None, ), + warped_base=dict(extensions=None, ), + warped_source=dict(extensions=None, ), + weights=dict(extensions=None, ), ) outputs = Qwarp.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py index 340f3a0e4a..ca27a0d682 100644 --- a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py +++ b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py @@ -24,6 +24,7 @@ def test_QwarpPlusMinus_inputs(): base_file=dict( argstr='-base %s', copyfile=False, + extensions=None, mandatory=True, ), baxopt=dict( @@ -41,6 +42,7 @@ def test_QwarpPlusMinus_inputs(): emask=dict( argstr='-emask %s', copyfile=False, + extensions=None, ), environ=dict( nohash=True, @@ -53,6 +55,7 @@ def test_QwarpPlusMinus_inputs(): gridlist=dict( argstr='-gridlist %s', copyfile=False, + extensions=None, xor=['duplo', 'plusminus'], ), hel=dict( @@ -62,6 +65,7 @@ def test_QwarpPlusMinus_inputs(): in_file=dict( argstr='-source %s', copyfile=False, + extensions=None, mandatory=True, ), inilev=dict( @@ -117,6 +121,7 @@ def test_QwarpPlusMinus_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, position=0, usedefault=True, ), @@ -141,6 +146,7 @@ def test_QwarpPlusMinus_inputs(): argstr='-source %s', copyfile=False, deprecated='1.1.2', + extensions=None, new_name='in_file', ), verb=dict( @@ -148,7 +154,10 @@ def test_QwarpPlusMinus_inputs(): xor=['quiet'], ), wball=dict(argstr='-wball %s', ), - weight=dict(argstr='-weight %s', ), + weight=dict( + argstr='-weight %s', + extensions=None, + ), wmask=dict(argstr='-wpass %s %f', ), workhard=dict( argstr='-workhard', @@ -162,11 +171,11 @@ def test_QwarpPlusMinus_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_QwarpPlusMinus_outputs(): output_map = dict( - base_warp=dict(), - source_warp=dict(), - warped_base=dict(), - warped_source=dict(), - weights=dict(), + base_warp=dict(extensions=None, ), + source_warp=dict(extensions=None, ), + warped_base=dict(extensions=None, ), + warped_source=dict(extensions=None, ), + weights=dict(extensions=None, ), ) outputs = QwarpPlusMinus.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_ROIStats.py b/nipype/interfaces/afni/tests/test_auto_ROIStats.py index fbda92e9a5..d3c956f7c5 100644 --- a/nipype/interfaces/afni/tests/test_auto_ROIStats.py +++ b/nipype/interfaces/afni/tests/test_auto_ROIStats.py @@ -21,22 +21,28 @@ def test_ROIStats_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), mask=dict( argstr='-mask %s', deprecated='1.1.4', + extensions=None, new_name='mask_file', position=3, ), mask_f2short=dict(argstr='-mask_f2short', ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), nobriklab=dict(argstr='-nobriklab', ), nomeanout=dict(argstr='-nomeanout', ), num_roi=dict(argstr='-numroi %s', ), out_file=dict( argstr='> %s', + extensions=None, keep_extension=False, name_source='in_file', name_template='%s_roistat.1D', @@ -56,7 +62,7 @@ def test_ROIStats_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ROIStats_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ROIStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ReHo.py b/nipype/interfaces/afni/tests/test_auto_ReHo.py index 1809759485..0edcedcdaf 100644 --- a/nipype/interfaces/afni/tests/test_auto_ReHo.py +++ b/nipype/interfaces/afni/tests/test_auto_ReHo.py @@ -17,10 +17,14 @@ def test_ReHo_inputs(): ), in_file=dict( argstr='-inset %s', + extensions=None, mandatory=True, position=1, ), - label_set=dict(argstr='-in_rois %s', ), + label_set=dict( + argstr='-in_rois %s', + extensions=None, + ), mask_file=dict(argstr='-mask %s', ), neighborhood=dict( argstr='-nneigh %s', @@ -46,8 +50,8 @@ def test_ReHo_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_ReHo_outputs(): output_map = dict( - out_file=dict(), - out_vals=dict(), + out_file=dict(extensions=None, ), + out_vals=dict(extensions=None, ), ) outputs = ReHo.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Refit.py b/nipype/interfaces/afni/tests/test_auto_Refit.py index 4983eafc81..c289f94ce5 100644 --- a/nipype/interfaces/afni/tests/test_auto_Refit.py +++ b/nipype/interfaces/afni/tests/test_auto_Refit.py @@ -11,7 +11,10 @@ def test_Refit_inputs(): atrint=dict(argstr='-atrint %s %s', ), atrstring=dict(argstr='-atrstring %s %s', ), deoblique=dict(argstr='-deoblique', ), - duporigin_file=dict(argstr='-duporigin %s', ), + duporigin_file=dict( + argstr='-duporigin %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -19,6 +22,7 @@ def test_Refit_inputs(): in_file=dict( argstr='%s', copyfile=True, + extensions=None, mandatory=True, position=-1, ), @@ -39,7 +43,7 @@ def test_Refit_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Refit_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Refit.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Remlfit.py b/nipype/interfaces/afni/tests/test_auto_Remlfit.py index 4cdc8b2ff2..f2e6703bf6 100644 --- a/nipype/interfaces/afni/tests/test_auto_Remlfit.py +++ b/nipype/interfaces/afni/tests/test_auto_Remlfit.py @@ -5,7 +5,10 @@ def test_Remlfit_inputs(): input_map = dict( - STATmask=dict(argstr='-STATmask %s', ), + STATmask=dict( + argstr='-STATmask %s', + extensions=None, + ), addbase=dict( argstr='-addbase %s', copyfile=False, @@ -19,6 +22,7 @@ def test_Remlfit_inputs(): dsort=dict( argstr='-dsort %s', copyfile=False, + extensions=None, ), dsort_nods=dict( argstr='-dsort_nods', @@ -28,10 +32,19 @@ def test_Remlfit_inputs(): nohash=True, usedefault=True, ), - errts_file=dict(argstr='-Rerrts %s', ), - fitts_file=dict(argstr='-Rfitts %s', ), + errts_file=dict( + argstr='-Rerrts %s', + extensions=None, + ), + fitts_file=dict( + argstr='-Rfitts %s', + extensions=None, + ), fout=dict(argstr='-fout', ), - glt_file=dict(argstr='-Rglt %s', ), + glt_file=dict( + argstr='-Rglt %s', + extensions=None, + ), gltsym=dict(argstr='-gltsym "%s" %s...', ), in_files=dict( argstr='-input "%s"', @@ -39,13 +52,17 @@ def test_Remlfit_inputs(): mandatory=True, sep=' ', ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), matim=dict( argstr='-matim %s', xor=['matrix'], ), matrix=dict( argstr='-matrix %s', + extensions=None, mandatory=True, ), nobout=dict(argstr='-nobout', ), @@ -58,28 +75,58 @@ def test_Remlfit_inputs(): nohash=True, usedefault=True, ), - obeta=dict(argstr='-Obeta %s', ), - obuck=dict(argstr='-Obuck %s', ), - oerrts=dict(argstr='-Oerrts %s', ), - ofitts=dict(argstr='-Ofitts %s', ), - oglt=dict(argstr='-Oglt %s', ), - out_file=dict(argstr='-Rbuck %s', ), + obeta=dict( + argstr='-Obeta %s', + extensions=None, + ), + obuck=dict( + argstr='-Obuck %s', + extensions=None, + ), + oerrts=dict( + argstr='-Oerrts %s', + extensions=None, + ), + ofitts=dict( + argstr='-Ofitts %s', + extensions=None, + ), + oglt=dict( + argstr='-Oglt %s', + extensions=None, + ), + out_file=dict( + argstr='-Rbuck %s', + extensions=None, + ), outputtype=dict(), - ovar=dict(argstr='-Ovar %s', ), + ovar=dict( + argstr='-Ovar %s', + extensions=None, + ), polort=dict( argstr='-polort %d', xor=['matrix'], ), quiet=dict(argstr='-quiet', ), - rbeta_file=dict(argstr='-Rbeta %s', ), + rbeta_file=dict( + argstr='-Rbeta %s', + extensions=None, + ), rout=dict(argstr='-rout', ), slibase=dict(argstr='-slibase %s', ), slibase_sm=dict(argstr='-slibase_sm %s', ), tout=dict(argstr='-tout', ), usetemp=dict(argstr='-usetemp', ), - var_file=dict(argstr='-Rvar %s', ), + var_file=dict( + argstr='-Rvar %s', + extensions=None, + ), verb=dict(argstr='-verb', ), - wherr_file=dict(argstr='-Rwherr %s', ), + wherr_file=dict( + argstr='-Rwherr %s', + extensions=None, + ), ) inputs = Remlfit.input_spec() @@ -88,19 +135,19 @@ def test_Remlfit_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Remlfit_outputs(): output_map = dict( - errts_file=dict(), - fitts_file=dict(), - glt_file=dict(), - obeta=dict(), - obuck=dict(), - oerrts=dict(), - ofitts=dict(), - oglt=dict(), - out_file=dict(), - ovar=dict(), - rbeta_file=dict(), - var_file=dict(), - wherr_file=dict(), + errts_file=dict(extensions=None, ), + fitts_file=dict(extensions=None, ), + glt_file=dict(extensions=None, ), + obeta=dict(extensions=None, ), + obuck=dict(extensions=None, ), + oerrts=dict(extensions=None, ), + ofitts=dict(extensions=None, ), + oglt=dict(extensions=None, ), + out_file=dict(extensions=None, ), + ovar=dict(extensions=None, ), + rbeta_file=dict(extensions=None, ), + var_file=dict(extensions=None, ), + wherr_file=dict(extensions=None, ), ) outputs = Remlfit.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Resample.py b/nipype/interfaces/afni/tests/test_auto_Resample.py index 9ab2309307..560d883d75 100644 --- a/nipype/interfaces/afni/tests/test_auto_Resample.py +++ b/nipype/interfaces/afni/tests/test_auto_Resample.py @@ -13,6 +13,7 @@ def test_Resample_inputs(): in_file=dict( argstr='-inset %s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -24,6 +25,7 @@ def test_Resample_inputs(): orientation=dict(argstr='-orient %s', ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_resample', ), @@ -37,7 +39,7 @@ def test_Resample_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Resample_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Retroicor.py b/nipype/interfaces/afni/tests/test_auto_Retroicor.py index 4c2a1c2393..49f3c5ec98 100644 --- a/nipype/interfaces/afni/tests/test_auto_Retroicor.py +++ b/nipype/interfaces/afni/tests/test_auto_Retroicor.py @@ -8,10 +8,12 @@ def test_Retroicor_inputs(): args=dict(argstr='%s', ), card=dict( argstr='-card %s', + extensions=None, position=-2, ), cardphase=dict( argstr='-cardphase %s', + extensions=None, hash_files=False, position=-6, ), @@ -22,6 +24,7 @@ def test_Retroicor_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -35,6 +38,7 @@ def test_Retroicor_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source=['in_file'], name_template='%s_retroicor', position=1, @@ -42,10 +46,12 @@ def test_Retroicor_inputs(): outputtype=dict(), resp=dict( argstr='-resp %s', + extensions=None, position=-3, ), respphase=dict( argstr='-respphase %s', + extensions=None, hash_files=False, position=-7, ), @@ -60,7 +66,7 @@ def test_Retroicor_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Retroicor_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Retroicor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTest.py b/nipype/interfaces/afni/tests/test_auto_SVMTest.py index 4b31d3a11c..98654e36f3 100644 --- a/nipype/interfaces/afni/tests/test_auto_SVMTest.py +++ b/nipype/interfaces/afni/tests/test_auto_SVMTest.py @@ -13,6 +13,7 @@ def test_SVMTest_inputs(): ), in_file=dict( argstr='-testvol %s', + extensions=None, mandatory=True, ), model=dict( @@ -29,10 +30,14 @@ def test_SVMTest_inputs(): options=dict(argstr='%s', ), out_file=dict( argstr='-predictions %s', + extensions=None, name_template='%s_predictions', ), outputtype=dict(), - testlabels=dict(argstr='-testlabels %s', ), + testlabels=dict( + argstr='-testlabels %s', + extensions=None, + ), ) inputs = SVMTest.input_spec() @@ -40,7 +45,7 @@ def test_SVMTest_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SVMTest_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = SVMTest.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTrain.py b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py index ddb3f8c05f..03e2d95e52 100644 --- a/nipype/interfaces/afni/tests/test_auto_SVMTrain.py +++ b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py @@ -7,12 +7,16 @@ def test_SVMTrain_inputs(): input_map = dict( alphas=dict( argstr='-alpha %s', + extensions=None, name_source='in_file', name_template='%s_alphas', suffix='_alphas', ), args=dict(argstr='%s', ), - censor=dict(argstr='-censor %s', ), + censor=dict( + argstr='-censor %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -20,17 +24,20 @@ def test_SVMTrain_inputs(): in_file=dict( argstr='-trainvol %s', copyfile=False, + extensions=None, mandatory=True, ), kernel=dict(argstr='-kernel %s', ), mask=dict( argstr='-mask %s', copyfile=False, + extensions=None, position=-1, ), max_iterations=dict(argstr='-max_iterations %d', ), model=dict( argstr='-model %s', + extensions=None, name_source='in_file', name_template='%s_model', suffix='_model', @@ -43,12 +50,16 @@ def test_SVMTrain_inputs(): options=dict(argstr='%s', ), out_file=dict( argstr='-bucket %s', + extensions=None, name_source='in_file', name_template='%s_vectors', suffix='_bucket', ), outputtype=dict(), - trainlabels=dict(argstr='-trainlabels %s', ), + trainlabels=dict( + argstr='-trainlabels %s', + extensions=None, + ), ttype=dict( argstr='-type %s', mandatory=True, @@ -62,9 +73,9 @@ def test_SVMTrain_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_SVMTrain_outputs(): output_map = dict( - alphas=dict(), - model=dict(), - out_file=dict(), + alphas=dict(extensions=None, ), + model=dict(extensions=None, ), + out_file=dict(extensions=None, ), ) outputs = SVMTrain.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Seg.py b/nipype/interfaces/afni/tests/test_auto_Seg.py index e93d81a7aa..56720a7e38 100644 --- a/nipype/interfaces/afni/tests/test_auto_Seg.py +++ b/nipype/interfaces/afni/tests/test_auto_Seg.py @@ -18,6 +18,7 @@ def test_Seg_inputs(): in_file=dict( argstr='-anat %s', copyfile=True, + extensions=None, mandatory=True, position=-1, ), @@ -37,7 +38,7 @@ def test_Seg_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Seg_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Seg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_SkullStrip.py b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py index e960369787..6fc8d3ad3d 100644 --- a/nipype/interfaces/afni/tests/test_auto_SkullStrip.py +++ b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py @@ -13,6 +13,7 @@ def test_SkullStrip_inputs(): in_file=dict( argstr='-input %s', copyfile=False, + extensions=None, mandatory=True, position=1, ), @@ -22,6 +23,7 @@ def test_SkullStrip_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_skullstrip', ), @@ -33,7 +35,7 @@ def test_SkullStrip_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SkullStrip_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = SkullStrip.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Synthesize.py b/nipype/interfaces/afni/tests/test_auto_Synthesize.py index 87278098fb..8c7ee9afaf 100644 --- a/nipype/interfaces/afni/tests/test_auto_Synthesize.py +++ b/nipype/interfaces/afni/tests/test_auto_Synthesize.py @@ -10,6 +10,7 @@ def test_Synthesize_inputs(): cbucket=dict( argstr='-cbucket %s', copyfile=False, + extensions=None, mandatory=True, ), cenfill=dict(argstr='-cenfill %s', ), @@ -21,6 +22,7 @@ def test_Synthesize_inputs(): matrix=dict( argstr='-matrix %s', copyfile=False, + extensions=None, mandatory=True, ), num_threads=dict( @@ -29,6 +31,7 @@ def test_Synthesize_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_template='syn', ), outputtype=dict(), @@ -43,7 +46,7 @@ def test_Synthesize_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Synthesize_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Synthesize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCat.py b/nipype/interfaces/afni/tests/test_auto_TCat.py index 5c51b02637..042916893e 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCat.py +++ b/nipype/interfaces/afni/tests/test_auto_TCat.py @@ -22,6 +22,7 @@ def test_TCat_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_files', name_template='%s_tcat', ), @@ -38,7 +39,7 @@ def test_TCat_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TCat_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TCat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py index 09b21ea408..4f60118e35 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py +++ b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py @@ -22,6 +22,7 @@ def test_TCatSubBrick_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, genfile=True, ), outputtype=dict(), @@ -36,7 +37,7 @@ def test_TCatSubBrick_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TCatSubBrick_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TCatSubBrick.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCorr1D.py b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py index e1b23a3387..5fe7fffb81 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorr1D.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py @@ -21,6 +21,7 @@ def test_TCorr1D_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, keep_extension=True, name_source='xset', name_template='%s_correlation.nii.gz', @@ -44,11 +45,13 @@ def test_TCorr1D_inputs(): xset=dict( argstr=' %s', copyfile=False, + extensions=None, mandatory=True, position=-2, ), y_1d=dict( argstr=' %s', + extensions=None, mandatory=True, position=-1, ), @@ -59,7 +62,7 @@ def test_TCorr1D_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TCorr1D_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TCorr1D.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py index 32778fcf11..1ea4c0790e 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py @@ -7,6 +7,7 @@ def test_TCorrMap_inputs(): input_map = dict( absolute_threshold=dict( argstr='-Thresh %f %s', + extensions=None, name_source='in_file', suffix='_thresh', xor=('absolute_threshold', 'var_absolute_threshold', @@ -16,12 +17,14 @@ def test_TCorrMap_inputs(): automask=dict(argstr='-automask', ), average_expr=dict( argstr='-Aexpr %s %s', + extensions=None, name_source='in_file', suffix='_aexpr', xor=('average_expr', 'average_expr_nonzero', 'sum_expr'), ), average_expr_nonzero=dict( argstr='-Cexpr %s %s', + extensions=None, name_source='in_file', suffix='_cexpr', xor=('average_expr', 'average_expr_nonzero', 'sum_expr'), @@ -30,10 +33,12 @@ def test_TCorrMap_inputs(): blur_fwhm=dict(argstr='-Gblur %f', ), correlation_maps=dict( argstr='-CorrMap %s', + extensions=None, name_source='in_file', ), correlation_maps_masked=dict( argstr='-CorrMask %s', + extensions=None, name_source='in_file', ), environ=dict( @@ -43,6 +48,7 @@ def test_TCorrMap_inputs(): expr=dict(), histogram=dict( argstr='-Hist %d %s', + extensions=None, name_source='in_file', suffix='_hist', ), @@ -50,11 +56,16 @@ def test_TCorrMap_inputs(): in_file=dict( argstr='-input %s', copyfile=False, + extensions=None, mandatory=True, ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), mean_file=dict( argstr='-Mean %s', + extensions=None, name_source='in_file', suffix='_mean', ), @@ -64,24 +75,28 @@ def test_TCorrMap_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source=['in_file'], name_template='%s_afni', ), outputtype=dict(), pmean=dict( argstr='-Pmean %s', + extensions=None, name_source='in_file', suffix='_pmean', ), polort=dict(argstr='-polort %d', ), qmean=dict( argstr='-Qmean %s', + extensions=None, name_source='in_file', suffix='_qmean', ), regress_out_timeseries=dict(argstr='-ort %s', ), seeds=dict( argstr='-seed %s', + extensions=None, xor='seeds_width', ), seeds_width=dict( @@ -90,6 +105,7 @@ def test_TCorrMap_inputs(): ), sum_expr=dict( argstr='-Sexpr %s %s', + extensions=None, name_source='in_file', suffix='_sexpr', xor=('average_expr', 'average_expr_nonzero', 'sum_expr'), @@ -97,6 +113,7 @@ def test_TCorrMap_inputs(): thresholds=dict(), var_absolute_threshold=dict( argstr='-VarThresh %f %f %f %s', + extensions=None, name_source='in_file', suffix='_varthresh', xor=('absolute_threshold', 'var_absolute_threshold', @@ -104,6 +121,7 @@ def test_TCorrMap_inputs(): ), var_absolute_threshold_normalize=dict( argstr='-VarThreshN %f %f %f %s', + extensions=None, name_source='in_file', suffix='_varthreshn', xor=('absolute_threshold', 'var_absolute_threshold', @@ -111,6 +129,7 @@ def test_TCorrMap_inputs(): ), zmean=dict( argstr='-Zmean %s', + extensions=None, name_source='in_file', suffix='_zmean', ), @@ -122,19 +141,19 @@ def test_TCorrMap_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_TCorrMap_outputs(): output_map = dict( - absolute_threshold=dict(), - average_expr=dict(), - average_expr_nonzero=dict(), - correlation_maps=dict(), - correlation_maps_masked=dict(), - histogram=dict(), - mean_file=dict(), - pmean=dict(), - qmean=dict(), - sum_expr=dict(), - var_absolute_threshold=dict(), - var_absolute_threshold_normalize=dict(), - zmean=dict(), + absolute_threshold=dict(extensions=None, ), + average_expr=dict(extensions=None, ), + average_expr_nonzero=dict(extensions=None, ), + correlation_maps=dict(extensions=None, ), + correlation_maps_masked=dict(extensions=None, ), + histogram=dict(extensions=None, ), + mean_file=dict(extensions=None, ), + pmean=dict(extensions=None, ), + qmean=dict(extensions=None, ), + sum_expr=dict(extensions=None, ), + var_absolute_threshold=dict(extensions=None, ), + var_absolute_threshold_normalize=dict(extensions=None, ), + zmean=dict(extensions=None, ), ) outputs = TCorrMap.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrelate.py b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py index 8d773f429e..fd3569b0bf 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrelate.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py @@ -16,6 +16,7 @@ def test_TCorrelate_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='xset', name_template='%s_tcorr', ), @@ -25,12 +26,14 @@ def test_TCorrelate_inputs(): xset=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-2, ), yset=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -41,7 +44,7 @@ def test_TCorrelate_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TCorrelate_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TCorrelate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TNorm.py b/nipype/interfaces/afni/tests/test_auto_TNorm.py index 3fb246684b..a8ba790aa1 100644 --- a/nipype/interfaces/afni/tests/test_auto_TNorm.py +++ b/nipype/interfaces/afni/tests/test_auto_TNorm.py @@ -14,6 +14,7 @@ def test_TNorm_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -27,6 +28,7 @@ def test_TNorm_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_tnorm', ), @@ -39,7 +41,7 @@ def test_TNorm_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TNorm_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TNorm.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TProject.py b/nipype/interfaces/afni/tests/test_auto_TProject.py index ba4efd6189..66f7caadc4 100644 --- a/nipype/interfaces/afni/tests/test_auto_TProject.py +++ b/nipype/interfaces/afni/tests/test_auto_TProject.py @@ -14,9 +14,15 @@ def test_TProject_inputs(): bandpass=dict(argstr='-bandpass %g %g', ), blur=dict(argstr='-blur %g', ), cenmode=dict(argstr='-cenmode %s', ), - censor=dict(argstr='-censor %s', ), + censor=dict( + argstr='-censor %s', + extensions=None, + ), censortr=dict(argstr='-CENSORTR %s', ), - concat=dict(argstr='-concat %s', ), + concat=dict( + argstr='-concat %s', + extensions=None, + ), dsort=dict(argstr='-dsort %s...', ), environ=dict( nohash=True, @@ -25,19 +31,27 @@ def test_TProject_inputs(): in_file=dict( argstr='-input %s', copyfile=False, + extensions=None, mandatory=True, position=1, ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), noblock=dict(argstr='-noblock', ), norm=dict(argstr='-norm', ), num_threads=dict( nohash=True, usedefault=True, ), - ort=dict(argstr='-ort %s', ), + ort=dict( + argstr='-ort %s', + extensions=None, + ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_tproject', position=-1, @@ -52,7 +66,7 @@ def test_TProject_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TProject_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TProject.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TShift.py b/nipype/interfaces/afni/tests/test_auto_TShift.py index b267986952..14d58011a9 100644 --- a/nipype/interfaces/afni/tests/test_auto_TShift.py +++ b/nipype/interfaces/afni/tests/test_auto_TShift.py @@ -14,6 +14,7 @@ def test_TShift_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -24,6 +25,7 @@ def test_TShift_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_tshift', ), @@ -56,8 +58,8 @@ def test_TShift_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_TShift_outputs(): output_map = dict( - out_file=dict(), - timing_file=dict(), + out_file=dict(extensions=None, ), + timing_file=dict(extensions=None, ), ) outputs = TShift.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_TStat.py b/nipype/interfaces/afni/tests/test_auto_TStat.py index 5a3ebd8a21..8f2da6aeff 100644 --- a/nipype/interfaces/afni/tests/test_auto_TStat.py +++ b/nipype/interfaces/afni/tests/test_auto_TStat.py @@ -13,10 +13,14 @@ def test_TStat_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), num_threads=dict( nohash=True, usedefault=True, @@ -24,6 +28,7 @@ def test_TStat_inputs(): options=dict(argstr='%s', ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_tstat', ), @@ -35,7 +40,7 @@ def test_TStat_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TStat_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TStat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_To3D.py b/nipype/interfaces/afni/tests/test_auto_To3D.py index 66ccabefcb..90946f4e79 100644 --- a/nipype/interfaces/afni/tests/test_auto_To3D.py +++ b/nipype/interfaces/afni/tests/test_auto_To3D.py @@ -25,6 +25,7 @@ def test_To3D_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source=['in_folder'], name_template='%s', ), @@ -37,7 +38,7 @@ def test_To3D_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_To3D_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = To3D.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Undump.py b/nipype/interfaces/afni/tests/test_auto_Undump.py index 07d0ff8e81..fad4bfc94f 100644 --- a/nipype/interfaces/afni/tests/test_auto_Undump.py +++ b/nipype/interfaces/afni/tests/test_auto_Undump.py @@ -18,10 +18,14 @@ def test_Undump_inputs(): in_file=dict( argstr='-master %s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), num_threads=dict( nohash=True, usedefault=True, @@ -29,6 +33,7 @@ def test_Undump_inputs(): orient=dict(argstr='-orient %s', ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', ), outputtype=dict(), @@ -40,7 +45,7 @@ def test_Undump_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Undump_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Undump.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Unifize.py b/nipype/interfaces/afni/tests/test_auto_Unifize.py index 3215d51e64..77b0bed4df 100644 --- a/nipype/interfaces/afni/tests/test_auto_Unifize.py +++ b/nipype/interfaces/afni/tests/test_auto_Unifize.py @@ -20,6 +20,7 @@ def test_Unifize_inputs(): in_file=dict( argstr='-input %s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -30,13 +31,17 @@ def test_Unifize_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_unifized', ), outputtype=dict(), quiet=dict(argstr='-quiet', ), rbt=dict(argstr='-rbt %f %f %f', ), - scale_file=dict(argstr='-ssave %s', ), + scale_file=dict( + argstr='-ssave %s', + extensions=None, + ), t2=dict(argstr='-T2', ), t2_up=dict(argstr='-T2up %f', ), urad=dict(argstr='-Urad %s', ), @@ -48,8 +53,8 @@ def test_Unifize_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Unifize_outputs(): output_map = dict( - out_file=dict(), - scale_file=dict(), + out_file=dict(extensions=None, ), + scale_file=dict(extensions=None, ), ) outputs = Unifize.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Volreg.py b/nipype/interfaces/afni/tests/test_auto_Volreg.py index 293b7613e8..8aba661319 100644 --- a/nipype/interfaces/afni/tests/test_auto_Volreg.py +++ b/nipype/interfaces/afni/tests/test_auto_Volreg.py @@ -8,6 +8,7 @@ def test_Volreg_inputs(): args=dict(argstr='%s', ), basefile=dict( argstr='-base %s', + extensions=None, position=-6, ), copyorigin=dict(argstr='-twodup', ), @@ -18,6 +19,7 @@ def test_Volreg_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -25,6 +27,7 @@ def test_Volreg_inputs(): interp=dict(argstr='-%s', ), md1d_file=dict( argstr='-maxdisp1D %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_md.1D', @@ -36,18 +39,21 @@ def test_Volreg_inputs(): ), oned_file=dict( argstr='-1Dfile %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s.1D', ), oned_matrix_save=dict( argstr='-1Dmatrix_save %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s.aff12.1D', ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_volreg', ), @@ -66,10 +72,10 @@ def test_Volreg_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Volreg_outputs(): output_map = dict( - md1d_file=dict(), - oned_file=dict(), - oned_matrix_save=dict(), - out_file=dict(), + md1d_file=dict(extensions=None, ), + oned_file=dict(extensions=None, ), + oned_matrix_save=dict(extensions=None, ), + out_file=dict(extensions=None, ), ) outputs = Volreg.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Warp.py b/nipype/interfaces/afni/tests/test_auto_Warp.py index b85692310a..024d979f41 100644 --- a/nipype/interfaces/afni/tests/test_auto_Warp.py +++ b/nipype/interfaces/afni/tests/test_auto_Warp.py @@ -11,24 +11,35 @@ def test_Warp_inputs(): nohash=True, usedefault=True, ), - gridset=dict(argstr='-gridset %s', ), + gridset=dict( + argstr='-gridset %s', + extensions=None, + ), in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), interp=dict(argstr='-%s', ), - matparent=dict(argstr='-matparent %s', ), + matparent=dict( + argstr='-matparent %s', + extensions=None, + ), mni2tta=dict(argstr='-mni2tta', ), newgrid=dict(argstr='-newgrid %f', ), num_threads=dict( nohash=True, usedefault=True, ), - oblique_parent=dict(argstr='-oblique_parent %s', ), + oblique_parent=dict( + argstr='-oblique_parent %s', + extensions=None, + ), out_file=dict( argstr='-prefix %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_warp', @@ -46,8 +57,8 @@ def test_Warp_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Warp_outputs(): output_map = dict( - out_file=dict(), - warp_file=dict(), + out_file=dict(extensions=None, ), + warp_file=dict(extensions=None, ), ) outputs = Warp.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_ZCutUp.py b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py index cacb6b1534..2942acad85 100644 --- a/nipype/interfaces/afni/tests/test_auto_ZCutUp.py +++ b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py @@ -13,6 +13,7 @@ def test_ZCutUp_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -23,6 +24,7 @@ def test_ZCutUp_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_file', name_template='%s_zcutup', ), @@ -34,7 +36,7 @@ def test_ZCutUp_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ZCutUp_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ZCutUp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Zcat.py b/nipype/interfaces/afni/tests/test_auto_Zcat.py index 3a2b7ff853..13675f61b9 100644 --- a/nipype/interfaces/afni/tests/test_auto_Zcat.py +++ b/nipype/interfaces/afni/tests/test_auto_Zcat.py @@ -31,6 +31,7 @@ def test_Zcat_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_source='in_files', name_template='%s_zcat', ), @@ -43,7 +44,7 @@ def test_Zcat_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Zcat_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Zcat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Zeropad.py b/nipype/interfaces/afni/tests/test_auto_Zeropad.py index cfba8e2f3c..abeceda432 100644 --- a/nipype/interfaces/afni/tests/test_auto_Zeropad.py +++ b/nipype/interfaces/afni/tests/test_auto_Zeropad.py @@ -49,6 +49,7 @@ def test_Zeropad_inputs(): in_files=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -66,6 +67,7 @@ def test_Zeropad_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, name_template='zeropad', ), outputtype=dict(), @@ -80,7 +82,7 @@ def test_Zeropad_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Zeropad_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Zeropad.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_ANTS.py b/nipype/interfaces/ants/tests/test_auto_ANTS.py index 7c7ef6682c..5c6fa2c501 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTS.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTS.py @@ -84,11 +84,11 @@ def test_ANTS_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_ANTS_outputs(): output_map = dict( - affine_transform=dict(), - inverse_warp_transform=dict(), - metaheader=dict(), - metaheader_raw=dict(), - warp_transform=dict(), + affine_transform=dict(extensions=None, ), + inverse_warp_transform=dict(extensions=None, ), + metaheader=dict(extensions=None, ), + metaheader_raw=dict(extensions=None, ), + warp_transform=dict(extensions=None, ), ) outputs = ANTS.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py index fed21cdbef..e2e1d3d53e 100644 --- a/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py +++ b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py @@ -17,6 +17,7 @@ def test_AffineInitializer_inputs(): ), fixed_image=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), @@ -27,6 +28,7 @@ def test_AffineInitializer_inputs(): ), moving_image=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -36,6 +38,7 @@ def test_AffineInitializer_inputs(): ), out_file=dict( argstr='%s', + extensions=None, position=3, usedefault=True, ), @@ -61,7 +64,7 @@ def test_AffineInitializer_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AffineInitializer_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = AffineInitializer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py index 341f250a2c..02ebe6431c 100644 --- a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py @@ -36,7 +36,10 @@ def test_AntsJointFusion_inputs(): argstr='-e %s', requires=['exclusion_image'], ), - mask_image=dict(argstr='-x %s', ), + mask_image=dict( + argstr='-x %s', + extensions=None, + ), num_threads=dict( nohash=True, usedefault=True, @@ -49,6 +52,7 @@ def test_AntsJointFusion_inputs(): out_intensity_fusion_name_format=dict(argstr='', ), out_label_fusion=dict( argstr='%s', + extensions=None, hash_files=False, ), out_label_post_prob_name_format=dict( @@ -88,7 +92,7 @@ def test_AntsJointFusion_outputs(): output_map = dict( out_atlas_voting_weight_name_format=dict(), out_intensity_fusion_name_format=dict(), - out_label_fusion=dict(), + out_label_fusion=dict(extensions=None, ), out_label_post_prob_name_format=dict(), ) outputs = AntsJointFusion.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py index c2bbffa14f..d431323c0b 100644 --- a/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py @@ -21,6 +21,7 @@ def test_ApplyTransforms_inputs(): ), input_image=dict( argstr='--input %s', + extensions=None, mandatory=True, ), input_image_type=dict(argstr='--input-image-type %d', ), @@ -43,6 +44,7 @@ def test_ApplyTransforms_inputs(): print_out_composite_warp_file=dict(requires=['output_image'], ), reference_image=dict( argstr='--reference-image %s', + extensions=None, mandatory=True, ), transforms=dict( @@ -56,7 +58,7 @@ def test_ApplyTransforms_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ApplyTransforms_outputs(): - output_map = dict(output_image=dict(), ) + output_map = dict(output_image=dict(extensions=None, ), ) outputs = ApplyTransforms.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py index 92c6c21ec1..c0b8f2d8f4 100644 --- a/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py @@ -13,6 +13,7 @@ def test_ApplyTransformsToPoints_inputs(): ), input_file=dict( argstr='--input %s', + extensions=None, mandatory=True, ), invert_transform_flags=dict(), @@ -37,7 +38,7 @@ def test_ApplyTransformsToPoints_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ApplyTransformsToPoints_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = ApplyTransformsToPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_Atropos.py b/nipype/interfaces/ants/tests/test_auto_Atropos.py index 90bc13778a..be1a271f9a 100644 --- a/nipype/interfaces/ants/tests/test_auto_Atropos.py +++ b/nipype/interfaces/ants/tests/test_auto_Atropos.py @@ -28,6 +28,7 @@ def test_Atropos_inputs(): likelihood_model=dict(argstr='--likelihood-model %s', ), mask_image=dict( argstr='--mask-image %s', + extensions=None, mandatory=True, ), maximum_number_of_icm_terations=dict( @@ -42,6 +43,7 @@ def test_Atropos_inputs(): number_of_tissue_classes=dict(mandatory=True, ), out_classified_image_name=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, ), @@ -65,7 +67,7 @@ def test_Atropos_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Atropos_outputs(): output_map = dict( - classified_image=dict(), + classified_image=dict(extensions=None, ), posteriors=dict(), ) outputs = Atropos.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py index 3fe3abe5cf..a8646c482c 100644 --- a/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py @@ -21,6 +21,7 @@ def test_AverageAffineTransform_inputs(): ), output_affine_transform=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), @@ -36,7 +37,7 @@ def test_AverageAffineTransform_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AverageAffineTransform_outputs(): - output_map = dict(affine_transform=dict(), ) + output_map = dict(affine_transform=dict(extensions=None, ), ) outputs = AverageAffineTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_AverageImages.py b/nipype/interfaces/ants/tests/test_auto_AverageImages.py index 41e0c99007..fff94b8ec2 100644 --- a/nipype/interfaces/ants/tests/test_auto_AverageImages.py +++ b/nipype/interfaces/ants/tests/test_auto_AverageImages.py @@ -31,6 +31,7 @@ def test_AverageImages_inputs(): ), output_average_image=dict( argstr='%s', + extensions=None, hash_files=False, position=1, usedefault=True, @@ -42,7 +43,7 @@ def test_AverageImages_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AverageImages_outputs(): - output_map = dict(output_average_image=dict(), ) + output_map = dict(output_average_image=dict(extensions=None, ), ) outputs = AverageImages.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py index 2d48192199..91861ac0b9 100644 --- a/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py +++ b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py @@ -7,16 +7,19 @@ def test_BrainExtraction_inputs(): input_map = dict( anatomical_image=dict( argstr='-a %s', + extensions=None, mandatory=True, ), args=dict(argstr='%s', ), brain_probability_mask=dict( argstr='-m %s', copyfile=False, + extensions=None, mandatory=True, ), brain_template=dict( argstr='-e %s', + extensions=None, mandatory=True, ), debug=dict(argstr='-z 1', ), @@ -28,7 +31,10 @@ def test_BrainExtraction_inputs(): nohash=True, usedefault=True, ), - extraction_registration_mask=dict(argstr='-f %s', ), + extraction_registration_mask=dict( + argstr='-f %s', + extensions=None, + ), image_suffix=dict( argstr='-s %s', usedefault=True, @@ -52,24 +58,24 @@ def test_BrainExtraction_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BrainExtraction_outputs(): output_map = dict( - BrainExtractionBrain=dict(), - BrainExtractionCSF=dict(), - BrainExtractionGM=dict(), - BrainExtractionInitialAffine=dict(), - BrainExtractionInitialAffineFixed=dict(), - BrainExtractionInitialAffineMoving=dict(), - BrainExtractionLaplacian=dict(), - BrainExtractionMask=dict(), - BrainExtractionPrior0GenericAffine=dict(), - BrainExtractionPrior1InverseWarp=dict(), - BrainExtractionPrior1Warp=dict(), - BrainExtractionPriorWarped=dict(), - BrainExtractionSegmentation=dict(), - BrainExtractionTemplateLaplacian=dict(), - BrainExtractionTmp=dict(), - BrainExtractionWM=dict(), - N4Corrected0=dict(), - N4Truncated0=dict(), + BrainExtractionBrain=dict(extensions=None, ), + BrainExtractionCSF=dict(extensions=None, ), + BrainExtractionGM=dict(extensions=None, ), + BrainExtractionInitialAffine=dict(extensions=None, ), + BrainExtractionInitialAffineFixed=dict(extensions=None, ), + BrainExtractionInitialAffineMoving=dict(extensions=None, ), + BrainExtractionLaplacian=dict(extensions=None, ), + BrainExtractionMask=dict(extensions=None, ), + BrainExtractionPrior0GenericAffine=dict(extensions=None, ), + BrainExtractionPrior1InverseWarp=dict(extensions=None, ), + BrainExtractionPrior1Warp=dict(extensions=None, ), + BrainExtractionPriorWarped=dict(extensions=None, ), + BrainExtractionSegmentation=dict(extensions=None, ), + BrainExtractionTemplateLaplacian=dict(extensions=None, ), + BrainExtractionTmp=dict(extensions=None, ), + BrainExtractionWM=dict(extensions=None, ), + N4Corrected0=dict(extensions=None, ), + N4Truncated0=dict(extensions=None, ), ) outputs = BrainExtraction.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py index 83fb2ed2aa..1f71fd6c27 100644 --- a/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py @@ -21,6 +21,7 @@ def test_ComposeMultiTransform_inputs(): ), output_transform=dict( argstr='%s', + extensions=None, keep_extension=True, name_source=['transforms'], name_template='%s_composed', @@ -28,6 +29,7 @@ def test_ComposeMultiTransform_inputs(): ), reference_image=dict( argstr='%s', + extensions=None, position=2, ), transforms=dict( @@ -42,7 +44,7 @@ def test_ComposeMultiTransform_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ComposeMultiTransform_outputs(): - output_map = dict(output_transform=dict(), ) + output_map = dict(output_transform=dict(extensions=None, ), ) outputs = ComposeMultiTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py b/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py index d2e6f943e2..214ac0d4b5 100644 --- a/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py +++ b/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py @@ -21,6 +21,7 @@ def test_CompositeTransformUtil_inputs(): ), out_file=dict( argstr='%s', + extensions=None, position=2, ), output_prefix=dict( @@ -41,9 +42,9 @@ def test_CompositeTransformUtil_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_CompositeTransformUtil_outputs(): output_map = dict( - affine_transform=dict(), - displacement_field=dict(), - out_file=dict(), + affine_transform=dict(extensions=None, ), + displacement_field=dict(extensions=None, ), + out_file=dict(extensions=None, ), ) outputs = CompositeTransformUtil.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py index ea9a16cbe2..5f44196a51 100644 --- a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py +++ b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py @@ -29,11 +29,13 @@ def test_ConvertScalarImageToRGB_inputs(): ), input_image=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), mask_image=dict( argstr='%s', + extensions=None, position=3, usedefault=True, ), @@ -73,7 +75,7 @@ def test_ConvertScalarImageToRGB_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ConvertScalarImageToRGB_outputs(): - output_map = dict(output_image=dict(), ) + output_map = dict(output_image=dict(extensions=None, ), ) outputs = ConvertScalarImageToRGB.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py index e2595a710f..005e3c949c 100644 --- a/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py @@ -7,6 +7,7 @@ def test_CorticalThickness_inputs(): input_map = dict( anatomical_image=dict( argstr='-a %s', + extensions=None, mandatory=True, ), args=dict(argstr='%s', ), @@ -14,13 +15,15 @@ def test_CorticalThickness_inputs(): brain_probability_mask=dict( argstr='-m %s', copyfile=False, + extensions=None, mandatory=True, ), brain_template=dict( argstr='-e %s', + extensions=None, mandatory=True, ), - cortical_label_image=dict(), + cortical_label_image=dict(extensions=None, ), debug=dict(argstr='-z 1', ), dimension=dict( argstr='-d %d', @@ -30,7 +33,10 @@ def test_CorticalThickness_inputs(): nohash=True, usedefault=True, ), - extraction_registration_mask=dict(argstr='-f %s', ), + extraction_registration_mask=dict( + argstr='-f %s', + extensions=None, + ), image_suffix=dict( argstr='-s %s', usedefault=True, @@ -56,6 +62,7 @@ def test_CorticalThickness_inputs(): ), t1_registration_template=dict( argstr='-t %s', + extensions=None, mandatory=True, ), use_floatingpoint_precision=dict(argstr='-j %d', ), @@ -68,19 +75,19 @@ def test_CorticalThickness_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_CorticalThickness_outputs(): output_map = dict( - BrainExtractionMask=dict(), - BrainSegmentation=dict(), - BrainSegmentationN4=dict(), + BrainExtractionMask=dict(extensions=None, ), + BrainSegmentation=dict(extensions=None, ), + BrainSegmentationN4=dict(extensions=None, ), BrainSegmentationPosteriors=dict(), - BrainVolumes=dict(), - CorticalThickness=dict(), - CorticalThicknessNormedToTemplate=dict(), - ExtractedBrainN4=dict(), - SubjectToTemplate0GenericAffine=dict(), - SubjectToTemplate1Warp=dict(), - SubjectToTemplateLogJacobian=dict(), - TemplateToSubject0Warp=dict(), - TemplateToSubject1GenericAffine=dict(), + BrainVolumes=dict(extensions=None, ), + CorticalThickness=dict(extensions=None, ), + CorticalThicknessNormedToTemplate=dict(extensions=None, ), + ExtractedBrainN4=dict(extensions=None, ), + SubjectToTemplate0GenericAffine=dict(extensions=None, ), + SubjectToTemplate1Warp=dict(extensions=None, ), + SubjectToTemplateLogJacobian=dict(extensions=None, ), + TemplateToSubject0Warp=dict(extensions=None, ), + TemplateToSubject1GenericAffine=dict(extensions=None, ), ) outputs = CorticalThickness.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py index b32e7b98e0..0734b73c29 100644 --- a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py +++ b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py @@ -8,6 +8,7 @@ def test_CreateJacobianDeterminantImage_inputs(): args=dict(argstr='%s', ), deformationField=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), @@ -30,6 +31,7 @@ def test_CreateJacobianDeterminantImage_inputs(): ), outputImage=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -44,7 +46,7 @@ def test_CreateJacobianDeterminantImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CreateJacobianDeterminantImage_outputs(): - output_map = dict(jacobian_image=dict(), ) + output_map = dict(jacobian_image=dict(extensions=None, ), ) outputs = CreateJacobianDeterminantImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py index 74c2b0a7c2..22e160cb10 100644 --- a/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py +++ b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py @@ -15,9 +15,13 @@ def test_CreateTiledMosaic_inputs(): flip_slice=dict(argstr='-f %s', ), input_image=dict( argstr='-i %s', + extensions=None, mandatory=True, ), - mask_image=dict(argstr='-x %s', ), + mask_image=dict( + argstr='-x %s', + extensions=None, + ), num_threads=dict( nohash=True, usedefault=True, @@ -30,6 +34,7 @@ def test_CreateTiledMosaic_inputs(): permute_axes=dict(argstr='-g', ), rgb_image=dict( argstr='-r %s', + extensions=None, mandatory=True, ), slices=dict(argstr='-s %s', ), @@ -41,7 +46,7 @@ def test_CreateTiledMosaic_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CreateTiledMosaic_outputs(): - output_map = dict(output_image=dict(), ) + output_map = dict(output_image=dict(extensions=None, ), ) outputs = CreateTiledMosaic.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py index 882cb21854..6b8bebbbe1 100644 --- a/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py +++ b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py @@ -13,9 +13,11 @@ def test_DenoiseImage_inputs(): ), input_image=dict( argstr='-i %s', + extensions=None, mandatory=True, ), noise_image=dict( + extensions=None, hash_files=False, keep_extension=True, name_source=['input_image'], @@ -31,6 +33,7 @@ def test_DenoiseImage_inputs(): ), output_image=dict( argstr='-o %s', + extensions=None, hash_files=False, keep_extension=True, name_source=['input_image'], @@ -54,8 +57,8 @@ def test_DenoiseImage_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DenoiseImage_outputs(): output_map = dict( - noise_image=dict(), - output_image=dict(), + noise_image=dict(extensions=None, ), + output_image=dict(extensions=None, ), ) outputs = DenoiseImage.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py b/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py index af91f9a8af..69ed18ff19 100644 --- a/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py +++ b/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py @@ -20,6 +20,7 @@ def test_GenWarpFields_inputs(): input_image=dict( argstr='-i %s', copyfile=False, + extensions=None, mandatory=True, ), inverse_warp_template_labels=dict(argstr='-l', ), @@ -39,6 +40,7 @@ def test_GenWarpFields_inputs(): reference_image=dict( argstr='-r %s', copyfile=True, + extensions=None, mandatory=True, ), similarity_metric=dict(argstr='-s %s', ), @@ -54,11 +56,11 @@ def test_GenWarpFields_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_GenWarpFields_outputs(): output_map = dict( - affine_transformation=dict(), - input_file=dict(), - inverse_warp_field=dict(), - output_file=dict(), - warp_field=dict(), + affine_transformation=dict(extensions=None, ), + input_file=dict(extensions=None, ), + inverse_warp_field=dict(extensions=None, ), + output_file=dict(extensions=None, ), + warp_field=dict(extensions=None, ), ) outputs = GenWarpFields.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_JointFusion.py b/nipype/interfaces/ants/tests/test_auto_JointFusion.py index 9e82584729..50bb8b1c84 100644 --- a/nipype/interfaces/ants/tests/test_auto_JointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_JointFusion.py @@ -26,7 +26,10 @@ def test_JointFusion_inputs(): nohash=True, usedefault=True, ), - exclusion_region=dict(argstr='-x %s', ), + exclusion_region=dict( + argstr='-x %s', + extensions=None, + ), method=dict( argstr='-m %s', usedefault=True, @@ -42,6 +45,7 @@ def test_JointFusion_inputs(): ), output_label_image=dict( argstr='%s', + extensions=None, mandatory=True, name_template='%s', output_name='output_label_image', @@ -76,7 +80,7 @@ def test_JointFusion_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_JointFusion_outputs(): - output_map = dict(output_label_image=dict(), ) + output_map = dict(output_label_image=dict(extensions=None, ), ) outputs = JointFusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py index 6129670d39..aecffa75b7 100644 --- a/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py +++ b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py @@ -12,6 +12,7 @@ def test_KellyKapowski_inputs(): ), cortical_thickness=dict( argstr='--output "%s"', + extensions=None, hash_files=False, keep_extension=True, name_source=['segmentation_image'], @@ -31,7 +32,9 @@ def test_KellyKapowski_inputs(): ), gray_matter_label=dict(usedefault=True, ), gray_matter_prob_image=dict( - argstr='--gray-matter-probability-image "%s"', ), + argstr='--gray-matter-probability-image "%s"', + extensions=None, + ), max_invert_displacement_field_iters=dict( argstr= '--maximum-number-of-invert-displacement-field-iterations %d', @@ -47,6 +50,7 @@ def test_KellyKapowski_inputs(): ), segmentation_image=dict( argstr='--segmentation-image "%s"', + extensions=None, mandatory=True, ), smoothing_variance=dict( @@ -61,9 +65,13 @@ def test_KellyKapowski_inputs(): argstr='--thickness-prior-estimate %f', usedefault=True, ), - thickness_prior_image=dict(argstr='--thickness-prior-image "%s"', ), + thickness_prior_image=dict( + argstr='--thickness-prior-image "%s"', + extensions=None, + ), use_bspline_smoothing=dict(argstr='--use-bspline-smoothing 1', ), warped_white_matter=dict( + extensions=None, hash_files=False, keep_extension=True, name_source=['segmentation_image'], @@ -71,7 +79,9 @@ def test_KellyKapowski_inputs(): ), white_matter_label=dict(usedefault=True, ), white_matter_prob_image=dict( - argstr='--white-matter-probability-image "%s"', ), + argstr='--white-matter-probability-image "%s"', + extensions=None, + ), ) inputs = KellyKapowski.input_spec() @@ -80,8 +90,8 @@ def test_KellyKapowski_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_KellyKapowski_outputs(): output_map = dict( - cortical_thickness=dict(), - warped_white_matter=dict(), + cortical_thickness=dict(extensions=None, ), + warped_white_matter=dict(extensions=None, ), ) outputs = KellyKapowski.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py b/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py index a5de244711..cc44d0f059 100644 --- a/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py +++ b/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py @@ -17,12 +17,14 @@ def test_LabelGeometry_inputs(): ), intensity_image=dict( argstr='%s', + extensions=None, mandatory=True, position=2, usedefault=True, ), label_image=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), @@ -43,7 +45,7 @@ def test_LabelGeometry_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_LabelGeometry_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = LabelGeometry.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py index 608ba10889..d10e154ce6 100644 --- a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py @@ -18,12 +18,14 @@ def test_LaplacianThickness_inputs(): input_gm=dict( argstr='%s', copyfile=True, + extensions=None, mandatory=True, position=2, ), input_wm=dict( argstr='%s', copyfile=True, + extensions=None, mandatory=True, position=1, ), @@ -33,6 +35,7 @@ def test_LaplacianThickness_inputs(): ), output_image=dict( argstr='%s', + extensions=None, hash_files=False, keep_extension=True, name_source=['input_wm'], @@ -65,7 +68,7 @@ def test_LaplacianThickness_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_LaplacianThickness_outputs(): - output_map = dict(output_image=dict(), ) + output_map = dict(output_image=dict(extensions=None, ), ) outputs = LaplacianThickness.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py index 1a5041ae74..88eeaf2360 100644 --- a/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py +++ b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py @@ -14,8 +14,14 @@ def test_MeasureImageSimilarity_inputs(): nohash=True, usedefault=True, ), - fixed_image=dict(mandatory=True, ), - fixed_image_mask=dict(argstr='%s', ), + fixed_image=dict( + extensions=None, + mandatory=True, + ), + fixed_image_mask=dict( + argstr='%s', + extensions=None, + ), metric=dict( argstr='%s', mandatory=True, @@ -24,8 +30,14 @@ def test_MeasureImageSimilarity_inputs(): requires=['metric'], usedefault=True, ), - moving_image=dict(mandatory=True, ), - moving_image_mask=dict(requires=['fixed_image_mask'], ), + moving_image=dict( + extensions=None, + mandatory=True, + ), + moving_image_mask=dict( + extensions=None, + requires=['fixed_image_mask'], + ), num_threads=dict( nohash=True, usedefault=True, diff --git a/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py index 1bf787018d..72166bd17d 100644 --- a/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py +++ b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py @@ -17,6 +17,7 @@ def test_MultiplyImages_inputs(): ), first_input=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), @@ -26,6 +27,7 @@ def test_MultiplyImages_inputs(): ), output_product_image=dict( argstr='%s', + extensions=None, mandatory=True, position=3, ), @@ -41,7 +43,7 @@ def test_MultiplyImages_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MultiplyImages_outputs(): - output_map = dict(output_product_image=dict(), ) + output_map = dict(output_product_image=dict(extensions=None, ), ) outputs = MultiplyImages.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py index 66edf0c3d6..c93d847d4b 100644 --- a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py +++ b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py @@ -6,7 +6,10 @@ def test_N4BiasFieldCorrection_inputs(): input_map = dict( args=dict(argstr='%s', ), - bias_image=dict(hash_files=False, ), + bias_image=dict( + extensions=None, + hash_files=False, + ), bspline_fitting_distance=dict(argstr='--bspline-fitting %s', ), bspline_order=dict(requires=['bspline_fitting_distance'], ), convergence_threshold=dict(requires=['n_iterations'], ), @@ -24,9 +27,13 @@ def test_N4BiasFieldCorrection_inputs(): ), input_image=dict( argstr='--input-image %s', + extensions=None, mandatory=True, ), - mask_image=dict(argstr='--mask-image %s', ), + mask_image=dict( + argstr='--mask-image %s', + extensions=None, + ), n_iterations=dict(argstr='--convergence %s', ), num_threads=dict( nohash=True, @@ -43,7 +50,10 @@ def test_N4BiasFieldCorrection_inputs(): xor=['bias_image'], ), shrink_factor=dict(argstr='--shrink-factor %d', ), - weight_image=dict(argstr='--weight-image %s', ), + weight_image=dict( + argstr='--weight-image %s', + extensions=None, + ), ) inputs = N4BiasFieldCorrection.input_spec() @@ -52,8 +62,8 @@ def test_N4BiasFieldCorrection_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_N4BiasFieldCorrection_outputs(): output_map = dict( - bias_image=dict(), - output_image=dict(), + bias_image=dict(extensions=None, ), + output_image=dict(extensions=None, ), ) outputs = N4BiasFieldCorrection.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_Registration.py b/nipype/interfaces/ants/tests/test_auto_Registration.py index 4bd253c3d0..1b49f44678 100644 --- a/nipype/interfaces/ants/tests/test_auto_Registration.py +++ b/nipype/interfaces/ants/tests/test_auto_Registration.py @@ -29,6 +29,7 @@ def test_Registration_inputs(): fixed_image=dict(mandatory=True, ), fixed_image_mask=dict( argstr='%s', + extensions=None, max_ver='2.1.0', xor=['fixed_image_masks'], ), @@ -70,6 +71,7 @@ def test_Registration_inputs(): metric_weight_stage_trait=dict(), moving_image=dict(mandatory=True, ), moving_image_mask=dict( + extensions=None, max_ver='2.1.0', requires=['fixed_image_mask'], xor=['moving_image_masks'], @@ -98,7 +100,10 @@ def test_Registration_inputs(): requires=['metric_weight'], usedefault=True, ), - restore_state=dict(argstr='--restore-state %s', ), + restore_state=dict( + argstr='--restore-state %s', + extensions=None, + ), restrict_deformation=dict(), sampling_percentage=dict(requires=['sampling_strategy'], ), sampling_percentage_item_trait=dict(), @@ -106,7 +111,10 @@ def test_Registration_inputs(): sampling_strategy=dict(requires=['metric_weight'], ), sampling_strategy_item_trait=dict(), sampling_strategy_stage_trait=dict(), - save_state=dict(argstr='--save-state %s', ), + save_state=dict( + argstr='--save-state %s', + extensions=None, + ), shrink_factors=dict(mandatory=True, ), sigma_units=dict(requires=['smoothing_sigmas'], ), smoothing_sigmas=dict(mandatory=True, ), @@ -141,17 +149,17 @@ def test_Registration_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Registration_outputs(): output_map = dict( - composite_transform=dict(), + composite_transform=dict(extensions=None, ), elapsed_time=dict(), forward_invert_flags=dict(), forward_transforms=dict(), - inverse_composite_transform=dict(), - inverse_warped_image=dict(), + inverse_composite_transform=dict(extensions=None, ), + inverse_warped_image=dict(extensions=None, ), metric_value=dict(), reverse_invert_flags=dict(), reverse_transforms=dict(), - save_state=dict(), - warped_image=dict(), + save_state=dict(extensions=None, ), + warped_image=dict(extensions=None, ), ) outputs = Registration.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py b/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py index 8bc79392e1..d2b527556b 100644 --- a/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py +++ b/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py @@ -55,11 +55,11 @@ def test_RegistrationSynQuick_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_RegistrationSynQuick_outputs(): output_map = dict( - forward_warp_field=dict(), - inverse_warp_field=dict(), - inverse_warped_image=dict(), - out_matrix=dict(), - warped_image=dict(), + forward_warp_field=dict(extensions=None, ), + inverse_warp_field=dict(extensions=None, ), + inverse_warped_image=dict(extensions=None, ), + out_matrix=dict(extensions=None, ), + warped_image=dict(extensions=None, ), ) outputs = RegistrationSynQuick.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py index 42020f6db9..8a62b58b42 100644 --- a/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py @@ -17,6 +17,7 @@ def test_WarpImageMultiTransform_inputs(): ), input_image=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -26,12 +27,14 @@ def test_WarpImageMultiTransform_inputs(): usedefault=True, ), out_postfix=dict( + extensions=None, hash_files=False, usedefault=True, xor=['output_image'], ), output_image=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=3, @@ -39,6 +42,7 @@ def test_WarpImageMultiTransform_inputs(): ), reference_image=dict( argstr='-R %s', + extensions=None, xor=['tightest_box'], ), reslice_by_header=dict(argstr='--reslice-by-header', ), @@ -60,7 +64,7 @@ def test_WarpImageMultiTransform_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_WarpImageMultiTransform_outputs(): - output_map = dict(output_image=dict(), ) + output_map = dict(output_image=dict(extensions=None, ), ) outputs = WarpImageMultiTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py index de3131f056..b9b17585f6 100644 --- a/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py @@ -18,6 +18,7 @@ def test_WarpTimeSeriesImageMultiTransform_inputs(): input_image=dict( argstr='%s', copyfile=True, + extensions=None, mandatory=True, ), invert_affine=dict(), @@ -31,6 +32,7 @@ def test_WarpTimeSeriesImageMultiTransform_inputs(): ), reference_image=dict( argstr='-R %s', + extensions=None, xor=['tightest_box'], ), reslice_by_header=dict(argstr='--reslice-by-header', ), @@ -52,7 +54,7 @@ def test_WarpTimeSeriesImageMultiTransform_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_WarpTimeSeriesImageMultiTransform_outputs(): - output_map = dict(output_image=dict(), ) + output_map = dict(output_image=dict(extensions=None, ), ) outputs = WarpTimeSeriesImageMultiTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py b/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py index fe21858500..d2cdc074a4 100644 --- a/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py +++ b/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py @@ -20,6 +20,7 @@ def test_antsIntroduction_inputs(): input_image=dict( argstr='-i %s', copyfile=False, + extensions=None, mandatory=True, ), inverse_warp_template_labels=dict(argstr='-l', ), @@ -39,6 +40,7 @@ def test_antsIntroduction_inputs(): reference_image=dict( argstr='-r %s', copyfile=True, + extensions=None, mandatory=True, ), similarity_metric=dict(argstr='-s %s', ), @@ -54,11 +56,11 @@ def test_antsIntroduction_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_antsIntroduction_outputs(): output_map = dict( - affine_transformation=dict(), - input_file=dict(), - inverse_warp_field=dict(), - output_file=dict(), - warp_field=dict(), + affine_transformation=dict(extensions=None, ), + input_file=dict(extensions=None, ), + inverse_warp_field=dict(extensions=None, ), + output_file=dict(extensions=None, ), + warp_field=dict(extensions=None, ), ) outputs = antsIntroduction.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py b/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py index 8513003c29..a1d77b11ac 100644 --- a/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py +++ b/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py @@ -61,7 +61,7 @@ def test_buildtemplateparallel_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_buildtemplateparallel_outputs(): output_map = dict( - final_template_file=dict(), + final_template_file=dict(extensions=None, ), subject_outfiles=dict(), template_files=dict(), ) diff --git a/nipype/interfaces/brainsuite/tests/test_auto_BDP.py b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py index ff705edfeb..a948b7c698 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_BDP.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py @@ -14,6 +14,7 @@ def test_BDP_inputs(): args=dict(argstr='%s', ), bMatrixFile=dict( argstr='--bmat %s', + extensions=None, mandatory=True, position=-1, xor=['BVecBValPair'], @@ -21,17 +22,30 @@ def test_BDP_inputs(): bValRatioThreshold=dict(argstr='--bval-ratio-threshold %f', ), bfcFile=dict( argstr='%s', + extensions=None, mandatory=True, position=0, xor=['noStructuralRegistration'], ), - customDiffusionLabel=dict(argstr='--custom-diffusion-label %s', ), - customLabelXML=dict(argstr='--custom-label-xml %s', ), - customT1Label=dict(argstr='--custom-t1-label %s', ), + customDiffusionLabel=dict( + argstr='--custom-diffusion-label %s', + extensions=None, + ), + customLabelXML=dict( + argstr='--custom-label-xml %s', + extensions=None, + ), + customT1Label=dict( + argstr='--custom-t1-label %s', + extensions=None, + ), dataSinkDelay=dict(argstr='%s', ), dcorrRegMeasure=dict(argstr='--dcorr-reg-method %s', ), dcorrWeight=dict(argstr='--dcorr-regularization-wt %f', ), - dwiMask=dict(argstr='--dwi-mask %s', ), + dwiMask=dict( + argstr='--dwi-mask %s', + extensions=None, + ), echoSpacing=dict(argstr='--echo-spacing=%f', ), environ=dict( nohash=True, @@ -43,6 +57,7 @@ def test_BDP_inputs(): estimateTensors=dict(argstr='--tensors', ), fieldmapCorrection=dict( argstr='--fieldmap-correction %s', + extensions=None, requires=['echoSpacing'], ), fieldmapCorrectionMethod=dict( @@ -50,13 +65,17 @@ def test_BDP_inputs(): xor=['skipIntensityCorr'], ), fieldmapSmooth=dict(argstr='--fieldmap-smooth3=%f', ), - flagConfigFile=dict(argstr='--flag-conf-file %s', ), + flagConfigFile=dict( + argstr='--flag-conf-file %s', + extensions=None, + ), forcePartialROIStats=dict(argstr='--force-partial-roi-stats', ), generateStats=dict(argstr='--generate-stats', ), ignoreFieldmapFOV=dict(argstr='--ignore-fieldmap-fov', ), ignoreMemory=dict(argstr='--ignore-memory', ), inputDiffusionData=dict( argstr='--nii %s', + extensions=None, mandatory=True, position=-2, ), @@ -81,16 +100,29 @@ def test_BDP_inputs(): xor=['fieldmapCorrectionMethod'], ), skipNonuniformityCorr=dict(argstr='--no-nonuniformity-correction', ), - t1Mask=dict(argstr='--t1-mask %s', ), + t1Mask=dict( + argstr='--t1-mask %s', + extensions=None, + ), threads=dict(argstr='--threads=%d', ), transformDataOnly=dict(argstr='--transform-data-only', ), transformDiffusionSurface=dict( - argstr='--transform-diffusion-surface %s', ), + argstr='--transform-diffusion-surface %s', + extensions=None, + ), transformDiffusionVolume=dict( - argstr='--transform-diffusion-volume %s', ), + argstr='--transform-diffusion-volume %s', + extensions=None, + ), transformInterpolation=dict(argstr='--transform-interpolation %s', ), - transformT1Surface=dict(argstr='--transform-t1-surface %s', ), - transformT1Volume=dict(argstr='--transform-t1-volume %s', ), + transformT1Surface=dict( + argstr='--transform-t1-surface %s', + extensions=None, + ), + transformT1Volume=dict( + argstr='--transform-t1-volume %s', + extensions=None, + ), ) inputs = BDP.input_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py index 01200e50fc..a29e1b75cf 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py @@ -14,7 +14,10 @@ def test_Bfc_inputs(): convergenceThreshold=dict(argstr='--eps %f', ), correctWholeVolume=dict(argstr='--extrapolate', ), correctedImagesOutputPrefix=dict(argstr='--prefix %s', ), - correctionScheduleFile=dict(argstr='--schedule %s', ), + correctionScheduleFile=dict( + argstr='--schedule %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -23,10 +26,12 @@ def test_Bfc_inputs(): histogramType=dict(argstr='%s', ), inputMRIFile=dict( argstr='-i %s', + extensions=None, mandatory=True, ), inputMaskFile=dict( argstr='-m %s', + extensions=None, hash_files=False, ), intermediate_file_type=dict(argstr='%s', ), @@ -41,15 +46,18 @@ def test_Bfc_inputs(): ), outputBiasField=dict( argstr='--bias %s', + extensions=None, hash_files=False, ), outputMRIVolume=dict( argstr='-o %s', + extensions=None, genfile=True, hash_files=False, ), outputMaskedBiasField=dict( argstr='--maskedbias %s', + extensions=None, hash_files=False, ), splineLambda=dict(argstr='-w %f', ), @@ -63,10 +71,10 @@ def test_Bfc_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Bfc_outputs(): output_map = dict( - correctionScheduleFile=dict(), - outputBiasField=dict(), - outputMRIVolume=dict(), - outputMaskedBiasField=dict(), + correctionScheduleFile=dict(extensions=None, ), + outputBiasField=dict(extensions=None, ), + outputMRIVolume=dict(extensions=None, ), + outputMaskedBiasField=dict(extensions=None, ), ) outputs = Bfc.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bse.py b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py index bbd154bc24..46850631bd 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Bse.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py @@ -28,32 +28,39 @@ def test_Bse_inputs(): ), inputMRIFile=dict( argstr='-i %s', + extensions=None, mandatory=True, ), noRotate=dict(argstr='--norotate', ), outputCortexFile=dict( argstr='--cortex %s', + extensions=None, hash_files=False, ), outputDetailedBrainMask=dict( argstr='--hires %s', + extensions=None, hash_files=False, ), outputDiffusionFilter=dict( argstr='--adf %s', + extensions=None, hash_files=False, ), outputEdgeMap=dict( argstr='--edge %s', + extensions=None, hash_files=False, ), outputMRIVolume=dict( argstr='-o %s', + extensions=None, genfile=True, hash_files=False, ), outputMaskFile=dict( argstr='--mask %s', + extensions=None, genfile=True, hash_files=False, ), @@ -78,12 +85,12 @@ def test_Bse_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Bse_outputs(): output_map = dict( - outputCortexFile=dict(), - outputDetailedBrainMask=dict(), - outputDiffusionFilter=dict(), - outputEdgeMap=dict(), - outputMRIVolume=dict(), - outputMaskFile=dict(), + outputCortexFile=dict(extensions=None, ), + outputDetailedBrainMask=dict(extensions=None, ), + outputDiffusionFilter=dict(extensions=None, ), + outputEdgeMap=dict(extensions=None, ), + outputMRIVolume=dict(extensions=None, ), + outputMaskFile=dict(extensions=None, ), ) outputs = Bse.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py index 808e4347c3..ea0c58c981 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py @@ -16,33 +16,43 @@ def test_Cerebro_inputs(): ), inputAtlasLabelFile=dict( argstr='--atlaslabels %s', + extensions=None, mandatory=True, ), inputAtlasMRIFile=dict( argstr='--atlas %s', + extensions=None, mandatory=True, ), - inputBrainMaskFile=dict(argstr='-m %s', ), + inputBrainMaskFile=dict( + argstr='-m %s', + extensions=None, + ), inputMRIFile=dict( argstr='-i %s', + extensions=None, mandatory=True, ), keepTempFiles=dict(argstr='--keep', ), linearConvergence=dict(argstr='--linconv %f', ), outputAffineTransformFile=dict( argstr='--air %s', + extensions=None, genfile=True, ), outputCerebrumMaskFile=dict( argstr='-o %s', + extensions=None, genfile=True, ), outputLabelVolumeFile=dict( argstr='-l %s', + extensions=None, genfile=True, ), outputWarpTransformFile=dict( argstr='--warp %s', + extensions=None, genfile=True, ), tempDirectory=dict(argstr='--tempdir %s', ), @@ -59,10 +69,10 @@ def test_Cerebro_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Cerebro_outputs(): output_map = dict( - outputAffineTransformFile=dict(), - outputCerebrumMaskFile=dict(), - outputLabelVolumeFile=dict(), - outputWarpTransformFile=dict(), + outputAffineTransformFile=dict(extensions=None, ), + outputCerebrumMaskFile=dict(extensions=None, ), + outputLabelVolumeFile=dict(extensions=None, ), + outputWarpTransformFile=dict(extensions=None, ), ) outputs = Cerebro.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py index 536cb158f2..d8099b932c 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py @@ -21,14 +21,17 @@ def test_Cortex_inputs(): ), inputHemisphereLabelFile=dict( argstr='-h %s', + extensions=None, mandatory=True, ), inputTissueFractionFile=dict( argstr='-f %s', + extensions=None, mandatory=True, ), outputCerebrumMask=dict( argstr='-o %s', + extensions=None, genfile=True, ), timer=dict(argstr='--timer', ), @@ -44,7 +47,7 @@ def test_Cortex_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Cortex_outputs(): - output_map = dict(outputCerebrumMask=dict(), ) + output_map = dict(outputCerebrumMask=dict(extensions=None, ), ) outputs = Cortex.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py index ba430fb1fd..276185df1e 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py @@ -12,11 +12,13 @@ def test_Dewisp_inputs(): ), inputMaskFile=dict( argstr='-i %s', + extensions=None, mandatory=True, ), maximumIterations=dict(argstr='-n %d', ), outputMaskFile=dict( argstr='-o %s', + extensions=None, genfile=True, ), sizeThreshold=dict(argstr='-t %d', ), @@ -29,7 +31,7 @@ def test_Dewisp_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Dewisp_outputs(): - output_map = dict(outputMaskFile=dict(), ) + output_map = dict(outputMaskFile=dict(extensions=None, ), ) outputs = Dewisp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py index c69232fd01..efb216f30d 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py @@ -14,9 +14,13 @@ def test_Dfs_inputs(): nohash=True, usedefault=True, ), - inputShadingVolume=dict(argstr='-c %s', ), + inputShadingVolume=dict( + argstr='-c %s', + extensions=None, + ), inputVolumeFile=dict( argstr='-i %s', + extensions=None, mandatory=True, ), noNormalsFlag=dict(argstr='--nonormals', ), @@ -26,6 +30,7 @@ def test_Dfs_inputs(): ), outputSurfaceFile=dict( argstr='-o %s', + extensions=None, genfile=True, ), postSmoothFlag=dict(argstr='--postsmooth', ), @@ -55,7 +60,7 @@ def test_Dfs_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Dfs_outputs(): - output_map = dict(outputSurfaceFile=dict(), ) + output_map = dict(outputSurfaceFile=dict(extensions=None, ), ) outputs = Dfs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py index 9e3db80dd9..567d3bcdc4 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py @@ -12,29 +12,38 @@ def test_Hemisplit_inputs(): ), inputHemisphereLabelFile=dict( argstr='-l %s', + extensions=None, mandatory=True, ), inputSurfaceFile=dict( argstr='-i %s', + extensions=None, mandatory=True, ), outputLeftHemisphere=dict( argstr='--left %s', + extensions=None, genfile=True, ), outputLeftPialHemisphere=dict( argstr='-pl %s', + extensions=None, genfile=True, ), outputRightHemisphere=dict( argstr='--right %s', + extensions=None, genfile=True, ), outputRightPialHemisphere=dict( argstr='-pr %s', + extensions=None, genfile=True, ), - pialSurfaceFile=dict(argstr='-p %s', ), + pialSurfaceFile=dict( + argstr='-p %s', + extensions=None, + ), timer=dict(argstr='--timer', ), verbosity=dict(argstr='-v %d', ), ) @@ -45,10 +54,10 @@ def test_Hemisplit_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Hemisplit_outputs(): output_map = dict( - outputLeftHemisphere=dict(), - outputLeftPialHemisphere=dict(), - outputRightHemisphere=dict(), - outputRightPialHemisphere=dict(), + outputLeftHemisphere=dict(extensions=None, ), + outputLeftPialHemisphere=dict(extensions=None, ), + outputRightHemisphere=dict(extensions=None, ), + outputRightPialHemisphere=dict(extensions=None, ), ) outputs = Hemisplit.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py index afc621a56e..125a0504ed 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py @@ -13,14 +13,17 @@ def test_Pialmesh_inputs(): exportPrefix=dict(argstr='--prefix %s', ), inputMaskFile=dict( argstr='-m %s', + extensions=None, mandatory=True, ), inputSurfaceFile=dict( argstr='-i %s', + extensions=None, mandatory=True, ), inputTissueFractionFile=dict( argstr='-f %s', + extensions=None, mandatory=True, ), laplacianSmoothing=dict( @@ -45,6 +48,7 @@ def test_Pialmesh_inputs(): ), outputSurfaceFile=dict( argstr='-o %s', + extensions=None, genfile=True, ), recomputeNormals=dict(argstr='--norm', ), @@ -70,7 +74,7 @@ def test_Pialmesh_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Pialmesh_outputs(): - output_map = dict(outputSurfaceFile=dict(), ) + output_map = dict(outputSurfaceFile=dict(extensions=None, ), ) outputs = Pialmesh.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py index d425d4ddac..3f79a0cdf6 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py @@ -12,15 +12,21 @@ def test_Pvc_inputs(): ), inputMRIFile=dict( argstr='-i %s', + extensions=None, mandatory=True, ), - inputMaskFile=dict(argstr='-m %s', ), + inputMaskFile=dict( + argstr='-m %s', + extensions=None, + ), outputLabelFile=dict( argstr='-o %s', + extensions=None, genfile=True, ), outputTissueFractionFile=dict( argstr='-f %s', + extensions=None, genfile=True, ), spatialPrior=dict(argstr='-l %f', ), @@ -35,8 +41,8 @@ def test_Pvc_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Pvc_outputs(): output_map = dict( - outputLabelFile=dict(), - outputTissueFractionFile=dict(), + outputLabelFile=dict(extensions=None, ), + outputTissueFractionFile=dict(extensions=None, ), ) outputs = Pvc.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py index eb672d12b7..17ec62fe83 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py @@ -20,11 +20,13 @@ def test_Scrubmask_inputs(): ), inputMaskFile=dict( argstr='-i %s', + extensions=None, mandatory=True, ), numberIterations=dict(argstr='-n %d', ), outputMaskFile=dict( argstr='-o %s', + extensions=None, genfile=True, ), timer=dict(argstr='--timer', ), @@ -36,7 +38,7 @@ def test_Scrubmask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Scrubmask_outputs(): - output_map = dict(outputMaskFile=dict(), ) + output_map = dict(outputMaskFile=dict(extensions=None, ), ) outputs = Scrubmask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py index 2191f7b133..ed921d09b9 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py @@ -14,15 +14,18 @@ def test_Skullfinder_inputs(): ), inputMRIFile=dict( argstr='-i %s', + extensions=None, mandatory=True, ), inputMaskFile=dict( argstr='-m %s', + extensions=None, mandatory=True, ), lowerThreshold=dict(argstr='-l %d', ), outputLabelFile=dict( argstr='-o %s', + extensions=None, genfile=True, ), performFinalOpening=dict(argstr='--finalOpening', ), @@ -39,7 +42,7 @@ def test_Skullfinder_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Skullfinder_outputs(): - output_map = dict(outputLabelFile=dict(), ) + output_map = dict(outputLabelFile=dict(extensions=None, ), ) outputs = Skullfinder.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Tca.py b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py index ec2886b42f..94f7868f94 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Tca.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py @@ -16,6 +16,7 @@ def test_Tca_inputs(): ), inputMaskFile=dict( argstr='-i %s', + extensions=None, mandatory=True, ), maxCorrectionSize=dict(argstr='-n %d', ), @@ -25,6 +26,7 @@ def test_Tca_inputs(): ), outputMaskFile=dict( argstr='-o %s', + extensions=None, genfile=True, ), timer=dict(argstr='--timer', ), @@ -36,7 +38,7 @@ def test_Tca_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Tca_outputs(): - output_map = dict(outputMaskFile=dict(), ) + output_map = dict(outputMaskFile=dict(extensions=None, ), ) outputs = Tca.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py index 5d796126cc..28170946ac 100644 --- a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py +++ b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py @@ -29,11 +29,13 @@ def test_AnalyzeHeader_inputs(): ), in_file=dict( argstr='< %s', + extensions=None, mandatory=True, position=1, ), initfromheader=dict( argstr='-initfromheader %s', + extensions=None, position=3, ), intelbyteorder=dict(argstr='-intelbyteorder', ), @@ -57,22 +59,27 @@ def test_AnalyzeHeader_inputs(): ), printbigendian=dict( argstr='-printbigendian %s', + extensions=None, position=3, ), printimagedims=dict( argstr='-printimagedims %s', + extensions=None, position=3, ), printintelbyteorder=dict( argstr='-printintelbyteorder %s', + extensions=None, position=3, ), printprogargs=dict( argstr='-printprogargs %s', + extensions=None, position=3, ), readheader=dict( argstr='-readheader %s', + extensions=None, position=3, ), scaleinter=dict( @@ -85,6 +92,7 @@ def test_AnalyzeHeader_inputs(): ), scheme_file=dict( argstr='%s', + extensions=None, position=2, ), voxel_dims=dict( @@ -98,7 +106,7 @@ def test_AnalyzeHeader_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AnalyzeHeader_outputs(): - output_map = dict(header=dict(), ) + output_map = dict(header=dict(extensions=None, ), ) outputs = AnalyzeHeader.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py index caee9e68cf..66ac282175 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py @@ -12,6 +12,7 @@ def test_ComputeEigensystem_inputs(): ), in_file=dict( argstr='< %s', + extensions=None, mandatory=True, position=1, ), @@ -37,7 +38,7 @@ def test_ComputeEigensystem_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ComputeEigensystem_outputs(): - output_map = dict(eigen=dict(), ) + output_map = dict(eigen=dict(extensions=None, ), ) outputs = ComputeEigensystem.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py index 49cfdfd96a..9cfae77b2f 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py @@ -12,6 +12,7 @@ def test_ComputeFractionalAnisotropy_inputs(): ), in_file=dict( argstr='< %s', + extensions=None, mandatory=True, position=1, ), @@ -25,6 +26,7 @@ def test_ComputeFractionalAnisotropy_inputs(): outputdatatype=dict(argstr='-outputdatatype %s', ), scheme_file=dict( argstr='%s', + extensions=None, position=2, ), ) @@ -34,7 +36,7 @@ def test_ComputeFractionalAnisotropy_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ComputeFractionalAnisotropy_outputs(): - output_map = dict(fa=dict(), ) + output_map = dict(fa=dict(extensions=None, ), ) outputs = ComputeFractionalAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py index 212477d149..faff5234c3 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py @@ -12,6 +12,7 @@ def test_ComputeMeanDiffusivity_inputs(): ), in_file=dict( argstr='< %s', + extensions=None, mandatory=True, position=1, ), @@ -19,12 +20,14 @@ def test_ComputeMeanDiffusivity_inputs(): inputmodel=dict(argstr='-inputmodel %s', ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), outputdatatype=dict(argstr='-outputdatatype %s', ), scheme_file=dict( argstr='%s', + extensions=None, position=2, ), ) @@ -34,7 +37,7 @@ def test_ComputeMeanDiffusivity_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ComputeMeanDiffusivity_outputs(): - output_map = dict(md=dict(), ) + output_map = dict(md=dict(extensions=None, ), ) outputs = ComputeMeanDiffusivity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py index 1112c7c743..1443a253bd 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py @@ -12,6 +12,7 @@ def test_ComputeTensorTrace_inputs(): ), in_file=dict( argstr='< %s', + extensions=None, mandatory=True, position=1, ), @@ -25,6 +26,7 @@ def test_ComputeTensorTrace_inputs(): outputdatatype=dict(argstr='-outputdatatype %s', ), scheme_file=dict( argstr='%s', + extensions=None, position=2, ), ) @@ -34,7 +36,7 @@ def test_ComputeTensorTrace_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ComputeTensorTrace_outputs(): - output_map = dict(trace=dict(), ) + output_map = dict(trace=dict(extensions=None, ), ) outputs = ComputeTensorTrace.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Conmat.py b/nipype/interfaces/camino/tests/test_auto_Conmat.py index 06a76fdf2b..cbcebd61c7 100644 --- a/nipype/interfaces/camino/tests/test_auto_Conmat.py +++ b/nipype/interfaces/camino/tests/test_auto_Conmat.py @@ -12,21 +12,28 @@ def test_Conmat_inputs(): ), in_file=dict( argstr='-inputfile %s', + extensions=None, mandatory=True, ), output_root=dict( argstr='-outputroot %s', + extensions=None, genfile=True, ), scalar_file=dict( argstr='-scalarfile %s', + extensions=None, requires=['tract_stat'], ), target_file=dict( argstr='-targetfile %s', + extensions=None, mandatory=True, ), - targetname_file=dict(argstr='-targetnamefile %s', ), + targetname_file=dict( + argstr='-targetnamefile %s', + extensions=None, + ), tract_prop=dict( argstr='-tractstat %s', units='NA', @@ -46,8 +53,8 @@ def test_Conmat_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Conmat_outputs(): output_map = dict( - conmat_sc=dict(), - conmat_ts=dict(), + conmat_sc=dict(extensions=None, ), + conmat_ts=dict(extensions=None, ), ) outputs = Conmat.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py index b20dcbf3bf..244ef010bd 100644 --- a/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py +++ b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py @@ -12,16 +12,19 @@ def test_DT2NIfTI_inputs(): ), header_file=dict( argstr='-header %s', + extensions=None, mandatory=True, position=3, ), in_file=dict( argstr='-inputfile %s', + extensions=None, mandatory=True, position=1, ), output_root=dict( argstr='-outputroot %s', + extensions=None, genfile=True, position=2, ), @@ -33,9 +36,9 @@ def test_DT2NIfTI_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DT2NIfTI_outputs(): output_map = dict( - dt=dict(), - exitcode=dict(), - lns0=dict(), + dt=dict(extensions=None, ), + exitcode=dict(extensions=None, ), + lns0=dict(extensions=None, ), ) outputs = DT2NIfTI.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_DTIFit.py b/nipype/interfaces/camino/tests/test_auto_DTIFit.py index a23da89552..26d27d57d2 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/camino/tests/test_auto_DTIFit.py @@ -6,13 +6,17 @@ def test_DTIFit_inputs(): input_map = dict( args=dict(argstr='%s', ), - bgmask=dict(argstr='-bgmask %s', ), + bgmask=dict( + argstr='-bgmask %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), @@ -27,6 +31,7 @@ def test_DTIFit_inputs(): ), scheme_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -37,7 +42,7 @@ def test_DTIFit_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DTIFit_outputs(): - output_map = dict(tensor_fitted=dict(), ) + output_map = dict(tensor_fitted=dict(extensions=None, ), ) outputs = DTIFit.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py index 95e8e0bea5..f7caf77ef1 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py @@ -37,6 +37,7 @@ def test_DTLUTGen_inputs(): ), scheme_file=dict( argstr='-schemefile %s', + extensions=None, mandatory=True, position=2, ), @@ -60,7 +61,7 @@ def test_DTLUTGen_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DTLUTGen_outputs(): - output_map = dict(dtLUT=dict(), ) + output_map = dict(dtLUT=dict(extensions=None, ), ) outputs = DTLUTGen.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_DTMetric.py b/nipype/interfaces/camino/tests/test_auto_DTMetric.py index 12256a50d8..fd9b6333f4 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTMetric.py +++ b/nipype/interfaces/camino/tests/test_auto_DTMetric.py @@ -6,9 +6,13 @@ def test_DTMetric_inputs(): input_map = dict( args=dict(argstr='%s', ), - data_header=dict(argstr='-header %s', ), + data_header=dict( + argstr='-header %s', + extensions=None, + ), eigen_data=dict( argstr='-inputfile %s', + extensions=None, mandatory=True, ), environ=dict( @@ -29,6 +33,7 @@ def test_DTMetric_inputs(): ), outputfile=dict( argstr='-outputfile %s', + extensions=None, genfile=True, ), ) @@ -38,7 +43,7 @@ def test_DTMetric_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DTMetric_outputs(): - output_map = dict(metric_stats=dict(), ) + output_map = dict(metric_stats=dict(extensions=None, ), ) outputs = DTMetric.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py index a2ebfbf0a6..350c604c99 100644 --- a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py +++ b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py @@ -12,11 +12,13 @@ def test_FSL2Scheme_inputs(): ), bval_file=dict( argstr='-bvalfile %s', + extensions=None, mandatory=True, position=2, ), bvec_file=dict( argstr='-bvecfile %s', + extensions=None, mandatory=True, position=1, ), @@ -49,7 +51,7 @@ def test_FSL2Scheme_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FSL2Scheme_outputs(): - output_map = dict(scheme=dict(), ) + output_map = dict(scheme=dict(extensions=None, ), ) outputs = FSL2Scheme.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py index adae10f0bb..29864bef82 100644 --- a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py +++ b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py @@ -12,6 +12,7 @@ def test_Image2Voxel_inputs(): ), in_file=dict( argstr='-4dimage %s', + extensions=None, mandatory=True, position=1, ), @@ -32,7 +33,7 @@ def test_Image2Voxel_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Image2Voxel_outputs(): - output_map = dict(voxel_order=dict(), ) + output_map = dict(voxel_order=dict(extensions=None, ), ) outputs = Image2Voxel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ImageStats.py b/nipype/interfaces/camino/tests/test_auto_ImageStats.py index 3813051025..7a16ea7732 100644 --- a/nipype/interfaces/camino/tests/test_auto_ImageStats.py +++ b/nipype/interfaces/camino/tests/test_auto_ImageStats.py @@ -21,6 +21,7 @@ def test_ImageStats_inputs(): ), output_root=dict( argstr='-outputroot %s', + extensions=None, mandatory=True, ), stat=dict( @@ -35,7 +36,7 @@ def test_ImageStats_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ImageStats_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ImageStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_LinRecon.py b/nipype/interfaces/camino/tests/test_auto_LinRecon.py index 8998f42602..996d8f4b99 100644 --- a/nipype/interfaces/camino/tests/test_auto_LinRecon.py +++ b/nipype/interfaces/camino/tests/test_auto_LinRecon.py @@ -6,13 +6,17 @@ def test_LinRecon_inputs(): input_map = dict( args=dict(argstr='%s', ), - bgmask=dict(argstr='-bgmask %s', ), + bgmask=dict( + argstr='-bgmask %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), @@ -25,11 +29,13 @@ def test_LinRecon_inputs(): ), qball_mat=dict( argstr='%s', + extensions=None, mandatory=True, position=3, ), scheme_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -40,7 +46,7 @@ def test_LinRecon_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_LinRecon_outputs(): - output_map = dict(recon_data=dict(), ) + output_map = dict(recon_data=dict(extensions=None, ), ) outputs = LinRecon.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_MESD.py b/nipype/interfaces/camino/tests/test_auto_MESD.py index 73089a3395..57dbbf3b28 100644 --- a/nipype/interfaces/camino/tests/test_auto_MESD.py +++ b/nipype/interfaces/camino/tests/test_auto_MESD.py @@ -6,7 +6,10 @@ def test_MESD_inputs(): input_map = dict( args=dict(argstr='%s', ), - bgmask=dict(argstr='-bgmask %s', ), + bgmask=dict( + argstr='-bgmask %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -17,6 +20,7 @@ def test_MESD_inputs(): ), in_file=dict( argstr='-inputfile %s', + extensions=None, mandatory=True, position=1, ), @@ -43,6 +47,7 @@ def test_MESD_inputs(): ), scheme_file=dict( argstr='-schemefile %s', + extensions=None, mandatory=True, ), ) @@ -52,7 +57,7 @@ def test_MESD_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MESD_outputs(): - output_map = dict(mesd_data=dict(), ) + output_map = dict(mesd_data=dict(extensions=None, ), ) outputs = MESD.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ModelFit.py b/nipype/interfaces/camino/tests/test_auto_ModelFit.py index 04d22bbe59..e5c16ec975 100644 --- a/nipype/interfaces/camino/tests/test_auto_ModelFit.py +++ b/nipype/interfaces/camino/tests/test_auto_ModelFit.py @@ -6,7 +6,10 @@ def test_ModelFit_inputs(): input_map = dict( args=dict(argstr='%s', ), - bgmask=dict(argstr='-bgmask %s', ), + bgmask=dict( + argstr='-bgmask %s', + extensions=None, + ), bgthresh=dict(argstr='-bgthresh %G', ), cfthresh=dict(argstr='-csfthresh %G', ), environ=dict( @@ -17,6 +20,7 @@ def test_ModelFit_inputs(): fixedmodq=dict(argstr='-fixedmod %s', ), in_file=dict( argstr='-inputfile %s', + extensions=None, mandatory=True, ), inputdatatype=dict(argstr='-inputdatatype %s', ), @@ -24,17 +28,30 @@ def test_ModelFit_inputs(): argstr='-model %s', mandatory=True, ), - noisemap=dict(argstr='-noisemap %s', ), + noisemap=dict( + argstr='-noisemap %s', + extensions=None, + ), out_file=dict( argstr='> %s', genfile=True, position=-1, ), - outlier=dict(argstr='-outliermap %s', ), - outputfile=dict(argstr='-outputfile %s', ), - residualmap=dict(argstr='-residualmap %s', ), + outlier=dict( + argstr='-outliermap %s', + extensions=None, + ), + outputfile=dict( + argstr='-outputfile %s', + extensions=None, + ), + residualmap=dict( + argstr='-residualmap %s', + extensions=None, + ), scheme_file=dict( argstr='-schemefile %s', + extensions=None, mandatory=True, ), sigma=dict(argstr='-sigma %G', ), @@ -46,7 +63,7 @@ def test_ModelFit_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ModelFit_outputs(): - output_map = dict(fitted_data=dict(), ) + output_map = dict(fitted_data=dict(extensions=None, ), ) outputs = ModelFit.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py index 9a4867a73c..0a5583b03e 100644 --- a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py +++ b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py @@ -6,23 +6,33 @@ def test_NIfTIDT2Camino_inputs(): input_map = dict( args=dict(argstr='%s', ), - bgmask=dict(argstr='-bgmask %s', ), + bgmask=dict( + argstr='-bgmask %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, ), in_file=dict( argstr='-inputfile %s', + extensions=None, mandatory=True, position=1, ), - lns0_file=dict(argstr='-lns0 %s', ), + lns0_file=dict( + argstr='-lns0 %s', + extensions=None, + ), out_file=dict( argstr='> %s', genfile=True, position=-1, ), - s0_file=dict(argstr='-s0 %s', ), + s0_file=dict( + argstr='-s0 %s', + extensions=None, + ), scaleinter=dict(argstr='-scaleinter %s', ), scaleslope=dict(argstr='-scaleslope %s', ), uppertriangular=dict(argstr='-uppertriangular %s', ), @@ -33,7 +43,7 @@ def test_NIfTIDT2Camino_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_NIfTIDT2Camino_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = NIfTIDT2Camino.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py index f2b59666a2..c918a372f1 100644 --- a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py +++ b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py @@ -13,6 +13,7 @@ def test_PicoPDFs_inputs(): ), in_file=dict( argstr='< %s', + extensions=None, mandatory=True, position=1, ), @@ -50,7 +51,7 @@ def test_PicoPDFs_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_PicoPDFs_outputs(): - output_map = dict(pdfs=dict(), ) + output_map = dict(pdfs=dict(extensions=None, ), ) outputs = PicoPDFs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py index 8838bb72cf..c30ef08d8c 100644 --- a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py @@ -16,15 +16,22 @@ def test_ProcStreamlines_inputs(): units='NA', ), discardloops=dict(argstr='-discardloops', ), - endpointfile=dict(argstr='-endpointfile %s', ), + endpointfile=dict( + argstr='-endpointfile %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, ), - exclusionfile=dict(argstr='-exclusionfile %s', ), + exclusionfile=dict( + argstr='-exclusionfile %s', + extensions=None, + ), gzip=dict(argstr='-gzip', ), in_file=dict( argstr='-inputfile %s', + extensions=None, mandatory=True, position=1, ), @@ -70,7 +77,10 @@ def test_ProcStreamlines_inputs(): argstr='-outputcp', requires=['outputroot', 'seedfile'], ), - outputroot=dict(argstr='-outputroot %s', ), + outputroot=dict( + argstr='-outputroot %s', + extensions=None, + ), outputsc=dict( argstr='-outputsc', requires=['outputroot', 'seedfile'], @@ -84,7 +94,10 @@ def test_ProcStreamlines_inputs(): argstr='-resamplestepsize %d', units='NA', ), - seedfile=dict(argstr='-seedfile %s', ), + seedfile=dict( + argstr='-seedfile %s', + extensions=None, + ), seedpointmm=dict( argstr='-seedpointmm %s', units='mm', @@ -93,14 +106,20 @@ def test_ProcStreamlines_inputs(): argstr='-seedpointvox %s', units='voxels', ), - targetfile=dict(argstr='-targetfile %s', ), + targetfile=dict( + argstr='-targetfile %s', + extensions=None, + ), truncateinexclusion=dict(argstr='-truncateinexclusion', ), truncateloops=dict(argstr='-truncateloops', ), voxeldims=dict( argstr='-voxeldims %s', units='mm', ), - waypointfile=dict(argstr='-waypointfile %s', ), + waypointfile=dict( + argstr='-waypointfile %s', + extensions=None, + ), ) inputs = ProcStreamlines.input_spec() @@ -110,7 +129,7 @@ def test_ProcStreamlines_inputs(): def test_ProcStreamlines_outputs(): output_map = dict( outputroot_files=dict(), - proc=dict(), + proc=dict(extensions=None, ), ) outputs = ProcStreamlines.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_QBallMX.py b/nipype/interfaces/camino/tests/test_auto_QBallMX.py index e965cb606a..7f7d0bc99f 100644 --- a/nipype/interfaces/camino/tests/test_auto_QBallMX.py +++ b/nipype/interfaces/camino/tests/test_auto_QBallMX.py @@ -33,6 +33,7 @@ def test_QBallMX_inputs(): ), scheme_file=dict( argstr='-schemefile %s', + extensions=None, mandatory=True, ), smoothingsigma=dict( @@ -46,7 +47,7 @@ def test_QBallMX_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_QBallMX_outputs(): - output_map = dict(qmat=dict(), ) + output_map = dict(qmat=dict(extensions=None, ), ) outputs = QBallMX.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py index c52ca04929..795138ea89 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py @@ -17,10 +17,12 @@ def test_SFLUTGen_inputs(): ), in_file=dict( argstr='-inputfile %s', + extensions=None, mandatory=True, ), info_file=dict( argstr='-infofile %s', + extensions=None, mandatory=True, ), minvectsperbin=dict( @@ -52,8 +54,8 @@ def test_SFLUTGen_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_SFLUTGen_outputs(): output_map = dict( - lut_one_fibre=dict(), - lut_two_fibres=dict(), + lut_one_fibre=dict(extensions=None, ), + lut_two_fibres=dict(extensions=None, ), ) outputs = SFLUTGen.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py index e83a01e2c3..80222fce4c 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py @@ -12,6 +12,7 @@ def test_SFPICOCalibData_inputs(): ), info_file=dict( argstr='-infooutputfile %s', + extensions=None, genfile=True, hash_files=False, mandatory=True, @@ -31,6 +32,7 @@ def test_SFPICOCalibData_inputs(): ), scheme_file=dict( argstr='-schemefile %s', + extensions=None, mandatory=True, ), seed=dict( @@ -77,8 +79,8 @@ def test_SFPICOCalibData_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_SFPICOCalibData_outputs(): output_map = dict( - PICOCalib=dict(), - calib_info=dict(), + PICOCalib=dict(extensions=None, ), + calib_info=dict(extensions=None, ), ) outputs = SFPICOCalibData.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py index 959545d042..8db250c58c 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py @@ -16,6 +16,7 @@ def test_SFPeaks_inputs(): ), in_file=dict( argstr='-inputfile %s', + extensions=None, mandatory=True, ), inputmodel=dict( @@ -52,7 +53,10 @@ def test_SFPeaks_inputs(): argstr='-rbfpointset %d', units='NA', ), - scheme_file=dict(argstr='%s', ), + scheme_file=dict( + argstr='%s', + extensions=None, + ), searchradius=dict( argstr='-searchradius %f', units='NA', @@ -68,7 +72,7 @@ def test_SFPeaks_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SFPeaks_outputs(): - output_map = dict(peaks=dict(), ) + output_map = dict(peaks=dict(extensions=None, ), ) outputs = SFPeaks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Shredder.py b/nipype/interfaces/camino/tests/test_auto_Shredder.py index 1099693874..594b35fc60 100644 --- a/nipype/interfaces/camino/tests/test_auto_Shredder.py +++ b/nipype/interfaces/camino/tests/test_auto_Shredder.py @@ -17,6 +17,7 @@ def test_Shredder_inputs(): ), in_file=dict( argstr='< %s', + extensions=None, mandatory=True, position=-2, ), @@ -42,7 +43,7 @@ def test_Shredder_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Shredder_outputs(): - output_map = dict(shredded=dict(), ) + output_map = dict(shredded=dict(extensions=None, ), ) outputs = Shredder.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Track.py b/nipype/interfaces/camino/tests/test_auto_Track.py index ad8d3ff2bb..fa03ce29d3 100644 --- a/nipype/interfaces/camino/tests/test_auto_Track.py +++ b/nipype/interfaces/camino/tests/test_auto_Track.py @@ -5,7 +5,10 @@ def test_Track_inputs(): input_map = dict( - anisfile=dict(argstr='-anisfile %s', ), + anisfile=dict( + argstr='-anisfile %s', + extensions=None, + ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), curveinterval=dict( @@ -24,6 +27,7 @@ def test_Track_inputs(): gzip=dict(argstr='-gzip', ), in_file=dict( argstr='-inputfile %s', + extensions=None, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), @@ -43,16 +47,19 @@ def test_Track_inputs(): ), out_file=dict( argstr='-outputfile %s', + extensions=None, genfile=True, position=-1, ), output_root=dict( argstr='-outputroot %s', + extensions=None, position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), seed_file=dict( argstr='-seedfile %s', + extensions=None, position=2, ), stepsize=dict( @@ -74,7 +81,7 @@ def test_Track_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Track_outputs(): - output_map = dict(tracked=dict(), ) + output_map = dict(tracked=dict(extensions=None, ), ) outputs = Track.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py index 89515cef55..d6c7d332ac 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py @@ -5,7 +5,10 @@ def test_TrackBallStick_inputs(): input_map = dict( - anisfile=dict(argstr='-anisfile %s', ), + anisfile=dict( + argstr='-anisfile %s', + extensions=None, + ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), curveinterval=dict( @@ -24,6 +27,7 @@ def test_TrackBallStick_inputs(): gzip=dict(argstr='-gzip', ), in_file=dict( argstr='-inputfile %s', + extensions=None, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), @@ -43,16 +47,19 @@ def test_TrackBallStick_inputs(): ), out_file=dict( argstr='-outputfile %s', + extensions=None, genfile=True, position=-1, ), output_root=dict( argstr='-outputroot %s', + extensions=None, position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), seed_file=dict( argstr='-seedfile %s', + extensions=None, position=2, ), stepsize=dict( @@ -74,7 +81,7 @@ def test_TrackBallStick_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TrackBallStick_outputs(): - output_map = dict(tracked=dict(), ) + output_map = dict(tracked=dict(extensions=None, ), ) outputs = TrackBallStick.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py index 2815ef18ee..fe28f030a3 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py @@ -5,7 +5,10 @@ def test_TrackBayesDirac_inputs(): input_map = dict( - anisfile=dict(argstr='-anisfile %s', ), + anisfile=dict( + argstr='-anisfile %s', + extensions=None, + ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), curveinterval=dict( @@ -25,10 +28,14 @@ def test_TrackBayesDirac_inputs(): usedefault=True, ), extpriordatatype=dict(argstr='-extpriordatatype %s', ), - extpriorfile=dict(argstr='-extpriorfile %s', ), + extpriorfile=dict( + argstr='-extpriorfile %s', + extensions=None, + ), gzip=dict(argstr='-gzip', ), in_file=dict( argstr='-inputfile %s', + extensions=None, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), @@ -52,11 +59,13 @@ def test_TrackBayesDirac_inputs(): ), out_file=dict( argstr='-outputfile %s', + extensions=None, genfile=True, position=-1, ), output_root=dict( argstr='-outputroot %s', + extensions=None, position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), @@ -64,10 +73,12 @@ def test_TrackBayesDirac_inputs(): pointset=dict(argstr='-pointset %s', ), scheme_file=dict( argstr='-schemefile %s', + extensions=None, mandatory=True, ), seed_file=dict( argstr='-seedfile %s', + extensions=None, position=2, ), stepsize=dict( @@ -89,7 +100,7 @@ def test_TrackBayesDirac_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TrackBayesDirac_outputs(): - output_map = dict(tracked=dict(), ) + output_map = dict(tracked=dict(extensions=None, ), ) outputs = TrackBayesDirac.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py index 827dbb2f27..483e752914 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py @@ -5,7 +5,10 @@ def test_TrackBedpostxDeter_inputs(): input_map = dict( - anisfile=dict(argstr='-anisfile %s', ), + anisfile=dict( + argstr='-anisfile %s', + extensions=None, + ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), bedpostxdir=dict( @@ -28,6 +31,7 @@ def test_TrackBedpostxDeter_inputs(): gzip=dict(argstr='-gzip', ), in_file=dict( argstr='-inputfile %s', + extensions=None, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), @@ -51,16 +55,19 @@ def test_TrackBedpostxDeter_inputs(): ), out_file=dict( argstr='-outputfile %s', + extensions=None, genfile=True, position=-1, ), output_root=dict( argstr='-outputroot %s', + extensions=None, position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), seed_file=dict( argstr='-seedfile %s', + extensions=None, position=2, ), stepsize=dict( @@ -82,7 +89,7 @@ def test_TrackBedpostxDeter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TrackBedpostxDeter_outputs(): - output_map = dict(tracked=dict(), ) + output_map = dict(tracked=dict(extensions=None, ), ) outputs = TrackBedpostxDeter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py index e250ec9c40..b37cf9e550 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py @@ -5,7 +5,10 @@ def test_TrackBedpostxProba_inputs(): input_map = dict( - anisfile=dict(argstr='-anisfile %s', ), + anisfile=dict( + argstr='-anisfile %s', + extensions=None, + ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), bedpostxdir=dict( @@ -28,6 +31,7 @@ def test_TrackBedpostxProba_inputs(): gzip=dict(argstr='-gzip', ), in_file=dict( argstr='-inputfile %s', + extensions=None, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), @@ -55,16 +59,19 @@ def test_TrackBedpostxProba_inputs(): ), out_file=dict( argstr='-outputfile %s', + extensions=None, genfile=True, position=-1, ), output_root=dict( argstr='-outputroot %s', + extensions=None, position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), seed_file=dict( argstr='-seedfile %s', + extensions=None, position=2, ), stepsize=dict( @@ -86,7 +93,7 @@ def test_TrackBedpostxProba_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TrackBedpostxProba_outputs(): - output_map = dict(tracked=dict(), ) + output_map = dict(tracked=dict(extensions=None, ), ) outputs = TrackBedpostxProba.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py index 90ffd05a2d..dec2509b1f 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py @@ -5,10 +5,16 @@ def test_TrackBootstrap_inputs(): input_map = dict( - anisfile=dict(argstr='-anisfile %s', ), + anisfile=dict( + argstr='-anisfile %s', + extensions=None, + ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), - bgmask=dict(argstr='-bgmask %s', ), + bgmask=dict( + argstr='-bgmask %s', + extensions=None, + ), bsdatafiles=dict( argstr='-bsdatafile %s', mandatory=True, @@ -29,6 +35,7 @@ def test_TrackBootstrap_inputs(): gzip=dict(argstr='-gzip', ), in_file=dict( argstr='-inputfile %s', + extensions=None, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), @@ -53,20 +60,24 @@ def test_TrackBootstrap_inputs(): ), out_file=dict( argstr='-outputfile %s', + extensions=None, genfile=True, position=-1, ), output_root=dict( argstr='-outputroot %s', + extensions=None, position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), scheme_file=dict( argstr='-schemefile %s', + extensions=None, mandatory=True, ), seed_file=dict( argstr='-seedfile %s', + extensions=None, position=2, ), stepsize=dict( @@ -88,7 +99,7 @@ def test_TrackBootstrap_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TrackBootstrap_outputs(): - output_map = dict(tracked=dict(), ) + output_map = dict(tracked=dict(extensions=None, ), ) outputs = TrackBootstrap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackDT.py b/nipype/interfaces/camino/tests/test_auto_TrackDT.py index 7ab3772468..ace97fc8f7 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackDT.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackDT.py @@ -5,7 +5,10 @@ def test_TrackDT_inputs(): input_map = dict( - anisfile=dict(argstr='-anisfile %s', ), + anisfile=dict( + argstr='-anisfile %s', + extensions=None, + ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), curveinterval=dict( @@ -24,6 +27,7 @@ def test_TrackDT_inputs(): gzip=dict(argstr='-gzip', ), in_file=dict( argstr='-inputfile %s', + extensions=None, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), @@ -43,16 +47,19 @@ def test_TrackDT_inputs(): ), out_file=dict( argstr='-outputfile %s', + extensions=None, genfile=True, position=-1, ), output_root=dict( argstr='-outputroot %s', + extensions=None, position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), seed_file=dict( argstr='-seedfile %s', + extensions=None, position=2, ), stepsize=dict( @@ -74,7 +81,7 @@ def test_TrackDT_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TrackDT_outputs(): - output_map = dict(tracked=dict(), ) + output_map = dict(tracked=dict(extensions=None, ), ) outputs = TrackDT.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackPICo.py b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py index 30ff12db9f..e28e87dc8f 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackPICo.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py @@ -5,7 +5,10 @@ def test_TrackPICo_inputs(): input_map = dict( - anisfile=dict(argstr='-anisfile %s', ), + anisfile=dict( + argstr='-anisfile %s', + extensions=None, + ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), curveinterval=dict( @@ -24,6 +27,7 @@ def test_TrackPICo_inputs(): gzip=dict(argstr='-gzip', ), in_file=dict( argstr='-inputfile %s', + extensions=None, position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), @@ -47,17 +51,20 @@ def test_TrackPICo_inputs(): ), out_file=dict( argstr='-outputfile %s', + extensions=None, genfile=True, position=-1, ), output_root=dict( argstr='-outputroot %s', + extensions=None, position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), pdf=dict(argstr='-pdf %s', ), seed_file=dict( argstr='-seedfile %s', + extensions=None, position=2, ), stepsize=dict( @@ -79,7 +86,7 @@ def test_TrackPICo_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TrackPICo_outputs(): - output_map = dict(tracked=dict(), ) + output_map = dict(tracked=dict(extensions=None, ), ) outputs = TrackPICo.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TractShredder.py b/nipype/interfaces/camino/tests/test_auto_TractShredder.py index c9cf40e1c3..eeed244533 100644 --- a/nipype/interfaces/camino/tests/test_auto_TractShredder.py +++ b/nipype/interfaces/camino/tests/test_auto_TractShredder.py @@ -17,6 +17,7 @@ def test_TractShredder_inputs(): ), in_file=dict( argstr='< %s', + extensions=None, mandatory=True, position=-2, ), @@ -42,7 +43,7 @@ def test_TractShredder_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TractShredder_outputs(): - output_map = dict(shredded=dict(), ) + output_map = dict(shredded=dict(extensions=None, ), ) outputs = TractShredder.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py index 8dc8bd03e9..72b6f106d8 100644 --- a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py @@ -13,6 +13,7 @@ def test_VtkStreamlines_inputs(): ), in_file=dict( argstr=' < %s', + extensions=None, mandatory=True, position=-2, ), @@ -29,14 +30,17 @@ def test_VtkStreamlines_inputs(): ), scalar_file=dict( argstr='-scalarfile %s', + extensions=None, position=3, ), seed_file=dict( argstr='-seedfile %s', + extensions=None, position=1, ), target_file=dict( argstr='-targetfile %s', + extensions=None, position=2, ), voxeldims=dict( @@ -51,7 +55,7 @@ def test_VtkStreamlines_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_VtkStreamlines_outputs(): - output_map = dict(vtk=dict(), ) + output_map = dict(vtk=dict(extensions=None, ), ) outputs = VtkStreamlines.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py index b4d5092dab..4a88a119fb 100644 --- a/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py @@ -18,6 +18,7 @@ def test_Camino2Trackvis_inputs(): ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, position=1, ), @@ -28,10 +29,12 @@ def test_Camino2Trackvis_inputs(): ), nifti_file=dict( argstr='--nifti %s', + extensions=None, position=7, ), out_file=dict( argstr='-o %s', + extensions=None, genfile=True, position=2, ), @@ -43,6 +46,7 @@ def test_Camino2Trackvis_inputs(): ), voxel_order=dict( argstr='--voxel-order %s', + extensions=None, mandatory=True, position=6, ), @@ -53,7 +57,7 @@ def test_Camino2Trackvis_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Camino2Trackvis_outputs(): - output_map = dict(trackvis=dict(), ) + output_map = dict(trackvis=dict(extensions=None, ), ) outputs = Camino2Trackvis.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py index 978b2439a2..f54284fe50 100644 --- a/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py @@ -7,6 +7,7 @@ def test_Trackvis2Camino_inputs(): input_map = dict( append_file=dict( argstr='-a %s', + extensions=None, position=2, ), args=dict(argstr='%s', ), @@ -16,11 +17,13 @@ def test_Trackvis2Camino_inputs(): ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, position=1, ), out_file=dict( argstr='-o %s', + extensions=None, genfile=True, position=2, ), @@ -31,7 +34,7 @@ def test_Trackvis2Camino_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Trackvis2Camino_outputs(): - output_map = dict(camino=dict(), ) + output_map = dict(camino=dict(extensions=None, ), ) outputs = Trackvis2Camino.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py index a80bbe757a..bf7b1a8553 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py +++ b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py @@ -7,9 +7,9 @@ def test_AverageNetworks_inputs(): input_map = dict( group_id=dict(usedefault=True, ), in_files=dict(mandatory=True, ), - out_gexf_groupavg=dict(), - out_gpickled_groupavg=dict(), - resolution_network_file=dict(), + out_gexf_groupavg=dict(extensions=None, ), + out_gpickled_groupavg=dict(extensions=None, ), + resolution_network_file=dict(extensions=None, ), ) inputs = AverageNetworks.input_spec() @@ -18,8 +18,8 @@ def test_AverageNetworks_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_AverageNetworks_outputs(): output_map = dict( - gexf_groupavg=dict(), - gpickled_groupavg=dict(), + gexf_groupavg=dict(extensions=None, ), + gpickled_groupavg=dict(extensions=None, ), matlab_groupavgs=dict(), ) outputs = AverageNetworks.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py index 00a1acea98..254aa23d6f 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py @@ -15,7 +15,10 @@ def test_CFFConverter_inputs(): graphml_networks=dict(), license=dict(), nifti_volumes=dict(), - out_file=dict(usedefault=True, ), + out_file=dict( + extensions=None, + usedefault=True, + ), publisher=dict(), references=dict(), relation=dict(), @@ -32,7 +35,7 @@ def test_CFFConverter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CFFConverter_outputs(): - output_map = dict(connectome_file=dict(), ) + output_map = dict(connectome_file=dict(extensions=None, ), ) outputs = CFFConverter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py b/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py index 60e8596f5e..282e343c07 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py @@ -6,16 +6,46 @@ def test_CreateMatrix_inputs(): input_map = dict( count_region_intersections=dict(usedefault=True, ), - out_endpoint_array_name=dict(genfile=True, ), - out_fiber_length_std_matrix_mat_file=dict(genfile=True, ), - out_intersection_matrix_mat_file=dict(genfile=True, ), - out_matrix_file=dict(genfile=True, ), - out_matrix_mat_file=dict(usedefault=True, ), - out_mean_fiber_length_matrix_mat_file=dict(genfile=True, ), - out_median_fiber_length_matrix_mat_file=dict(genfile=True, ), - resolution_network_file=dict(mandatory=True, ), - roi_file=dict(mandatory=True, ), - tract_file=dict(mandatory=True, ), + out_endpoint_array_name=dict( + extensions=None, + genfile=True, + ), + out_fiber_length_std_matrix_mat_file=dict( + extensions=None, + genfile=True, + ), + out_intersection_matrix_mat_file=dict( + extensions=None, + genfile=True, + ), + out_matrix_file=dict( + extensions=None, + genfile=True, + ), + out_matrix_mat_file=dict( + extensions=None, + usedefault=True, + ), + out_mean_fiber_length_matrix_mat_file=dict( + extensions=None, + genfile=True, + ), + out_median_fiber_length_matrix_mat_file=dict( + extensions=None, + genfile=True, + ), + resolution_network_file=dict( + extensions=None, + mandatory=True, + ), + roi_file=dict( + extensions=None, + mandatory=True, + ), + tract_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CreateMatrix.input_spec() @@ -24,24 +54,24 @@ def test_CreateMatrix_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_CreateMatrix_outputs(): output_map = dict( - endpoint_file=dict(), - endpoint_file_mm=dict(), - fiber_label_file=dict(), - fiber_labels_noorphans=dict(), - fiber_length_file=dict(), - fiber_length_std_matrix_mat_file=dict(), + endpoint_file=dict(extensions=None, ), + endpoint_file_mm=dict(extensions=None, ), + fiber_label_file=dict(extensions=None, ), + fiber_labels_noorphans=dict(extensions=None, ), + fiber_length_file=dict(extensions=None, ), + fiber_length_std_matrix_mat_file=dict(extensions=None, ), filtered_tractographies=dict(), - filtered_tractography=dict(), - filtered_tractography_by_intersections=dict(), - intersection_matrix_file=dict(), - intersection_matrix_mat_file=dict(), + filtered_tractography=dict(extensions=None, ), + filtered_tractography_by_intersections=dict(extensions=None, ), + intersection_matrix_file=dict(extensions=None, ), + intersection_matrix_mat_file=dict(extensions=None, ), matlab_matrix_files=dict(), - matrix_file=dict(), + matrix_file=dict(extensions=None, ), matrix_files=dict(), - matrix_mat_file=dict(), - mean_fiber_length_matrix_mat_file=dict(), - median_fiber_length_matrix_mat_file=dict(), - stats_file=dict(), + matrix_mat_file=dict(extensions=None, ), + mean_fiber_length_matrix_mat_file=dict(extensions=None, ), + median_fiber_length_matrix_mat_file=dict(extensions=None, ), + stats_file=dict(extensions=None, ), ) outputs = CreateMatrix.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py index 3635f21e59..ada2fa4b6a 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py @@ -5,9 +5,18 @@ def test_CreateNodes_inputs(): input_map = dict( - out_filename=dict(usedefault=True, ), - resolution_network_file=dict(mandatory=True, ), - roi_file=dict(mandatory=True, ), + out_filename=dict( + extensions=None, + usedefault=True, + ), + resolution_network_file=dict( + extensions=None, + mandatory=True, + ), + roi_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CreateNodes.input_spec() @@ -15,7 +24,7 @@ def test_CreateNodes_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CreateNodes_outputs(): - output_map = dict(node_network=dict(), ) + output_map = dict(node_network=dict(extensions=None, ), ) outputs = CreateNodes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py index ceaa6d8dea..770b225e13 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py +++ b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py @@ -6,7 +6,10 @@ def test_MergeCNetworks_inputs(): input_map = dict( in_files=dict(mandatory=True, ), - out_file=dict(usedefault=True, ), + out_file=dict( + extensions=None, + usedefault=True, + ), ) inputs = MergeCNetworks.input_spec() @@ -14,7 +17,7 @@ def test_MergeCNetworks_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MergeCNetworks_outputs(): - output_map = dict(connectome_file=dict(), ) + output_map = dict(connectome_file=dict(extensions=None, ), ) outputs = MergeCNetworks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py index e3220e4790..ac41946548 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py @@ -8,10 +8,10 @@ def test_NetworkBasedStatistic_inputs(): edge_key=dict(usedefault=True, ), in_group1=dict(mandatory=True, ), in_group2=dict(mandatory=True, ), - node_position_network=dict(), + node_position_network=dict(extensions=None, ), number_of_permutations=dict(usedefault=True, ), - out_nbs_network=dict(), - out_nbs_pval_network=dict(), + out_nbs_network=dict(extensions=None, ), + out_nbs_pval_network=dict(extensions=None, ), t_tail=dict(usedefault=True, ), threshold=dict(usedefault=True, ), ) @@ -22,8 +22,8 @@ def test_NetworkBasedStatistic_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_NetworkBasedStatistic_outputs(): output_map = dict( - nbs_network=dict(), - nbs_pval_network=dict(), + nbs_network=dict(extensions=None, ), + nbs_pval_network=dict(extensions=None, ), network_files=dict(), ) outputs = NetworkBasedStatistic.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py index d9a3f0c740..09b661a3fb 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py @@ -6,14 +6,38 @@ def test_NetworkXMetrics_inputs(): input_map = dict( compute_clique_related_measures=dict(usedefault=True, ), - in_file=dict(mandatory=True, ), - out_edge_metrics_matlab=dict(genfile=True, ), - out_global_metrics_matlab=dict(genfile=True, ), - out_k_core=dict(usedefault=True, ), - out_k_crust=dict(usedefault=True, ), - out_k_shell=dict(usedefault=True, ), - out_node_metrics_matlab=dict(genfile=True, ), - out_pickled_extra_measures=dict(usedefault=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_edge_metrics_matlab=dict( + extensions=None, + genfile=True, + ), + out_global_metrics_matlab=dict( + extensions=None, + genfile=True, + ), + out_k_core=dict( + extensions=None, + usedefault=True, + ), + out_k_crust=dict( + extensions=None, + usedefault=True, + ), + out_k_shell=dict( + extensions=None, + usedefault=True, + ), + out_node_metrics_matlab=dict( + extensions=None, + genfile=True, + ), + out_pickled_extra_measures=dict( + extensions=None, + usedefault=True, + ), treat_as_weighted_graph=dict(usedefault=True, ), ) inputs = NetworkXMetrics.input_spec() @@ -24,18 +48,18 @@ def test_NetworkXMetrics_inputs(): def test_NetworkXMetrics_outputs(): output_map = dict( edge_measure_networks=dict(), - edge_measures_matlab=dict(), - global_measures_matlab=dict(), + edge_measures_matlab=dict(extensions=None, ), + global_measures_matlab=dict(extensions=None, ), gpickled_network_files=dict(), - k_core=dict(), - k_crust=dict(), + k_core=dict(extensions=None, ), + k_crust=dict(extensions=None, ), k_networks=dict(), - k_shell=dict(), + k_shell=dict(extensions=None, ), matlab_dict_measures=dict(), matlab_matrix_files=dict(), node_measure_networks=dict(), - node_measures_matlab=dict(), - pickled_extra_measures=dict(), + node_measures_matlab=dict(extensions=None, ), + pickled_extra_measures=dict(extensions=None, ), ) outputs = NetworkXMetrics.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py index edcdf2e7a1..ca7609298a 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py +++ b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py @@ -7,7 +7,10 @@ def test_Parcellate_inputs(): input_map = dict( dilation=dict(usedefault=True, ), freesurfer_dir=dict(), - out_roi_file=dict(genfile=True, ), + out_roi_file=dict( + extensions=None, + genfile=True, + ), parcellation_name=dict(usedefault=True, ), subject_id=dict(mandatory=True, ), subjects_dir=dict(), @@ -19,14 +22,14 @@ def test_Parcellate_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Parcellate_outputs(): output_map = dict( - aseg_file=dict(), - cc_unknown_file=dict(), - dilated_roi_file_in_structural_space=dict(), - ribbon_file=dict(), - roi_file=dict(), - roi_file_in_structural_space=dict(), - roiv_file=dict(), - white_matter_mask_file=dict(), + aseg_file=dict(extensions=None, ), + cc_unknown_file=dict(extensions=None, ), + dilated_roi_file_in_structural_space=dict(extensions=None, ), + ribbon_file=dict(extensions=None, ), + roi_file=dict(extensions=None, ), + roi_file_in_structural_space=dict(extensions=None, ), + roiv_file=dict(extensions=None, ), + white_matter_mask_file=dict(extensions=None, ), ) outputs = Parcellate.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py index dd2ce50aec..0d6383f391 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py +++ b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py @@ -5,11 +5,23 @@ def test_ROIGen_inputs(): input_map = dict( - LUT_file=dict(xor=['use_freesurfer_LUT'], ), - aparc_aseg_file=dict(mandatory=True, ), + LUT_file=dict( + extensions=None, + xor=['use_freesurfer_LUT'], + ), + aparc_aseg_file=dict( + extensions=None, + mandatory=True, + ), freesurfer_dir=dict(requires=['use_freesurfer_LUT'], ), - out_dict_file=dict(genfile=True, ), - out_roi_file=dict(genfile=True, ), + out_dict_file=dict( + extensions=None, + genfile=True, + ), + out_roi_file=dict( + extensions=None, + genfile=True, + ), use_freesurfer_LUT=dict(xor=['LUT_file'], ), ) inputs = ROIGen.input_spec() @@ -19,8 +31,8 @@ def test_ROIGen_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_ROIGen_outputs(): output_map = dict( - dict_file=dict(), - roi_file=dict(), + dict_file=dict(extensions=None, ), + roi_file=dict(extensions=None, ), ) outputs = ROIGen.output_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py index a39dbf6c3b..c4faac0a67 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py @@ -7,14 +7,19 @@ def test_DTIRecon_inputs(): input_map = dict( DWI=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), args=dict(argstr='%s', ), b0_threshold=dict(argstr='-b0_th', ), - bvals=dict(mandatory=True, ), + bvals=dict( + extensions=None, + mandatory=True, + ), bvecs=dict( argstr='-gm %s', + extensions=None, mandatory=True, ), environ=dict( @@ -41,18 +46,18 @@ def test_DTIRecon_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DTIRecon_outputs(): output_map = dict( - ADC=dict(), - B0=dict(), - FA=dict(), - FA_color=dict(), - L1=dict(), - L2=dict(), - L3=dict(), - V1=dict(), - V2=dict(), - V3=dict(), - exp=dict(), - tensor=dict(), + ADC=dict(extensions=None, ), + B0=dict(extensions=None, ), + FA=dict(extensions=None, ), + FA_color=dict(extensions=None, ), + L1=dict(extensions=None, ), + L2=dict(extensions=None, ), + L3=dict(extensions=None, ), + V1=dict(extensions=None, ), + V2=dict(extensions=None, ), + V3=dict(extensions=None, ), + exp=dict(extensions=None, ), + tensor=dict(extensions=None, ), ) outputs = DTIRecon.output_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py index cf483d00d0..3fd69ee6b8 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py @@ -23,28 +23,34 @@ def test_DTITracker_inputs(): invert_z=dict(argstr='-iz', ), mask1_file=dict( argstr='-m %s', + extensions=None, mandatory=True, position=2, ), mask1_threshold=dict(position=3, ), mask2_file=dict( argstr='-m2 %s', + extensions=None, position=4, ), mask2_threshold=dict(position=5, ), output_file=dict( argstr='%s', + extensions=None, position=1, usedefault=True, ), - output_mask=dict(argstr='-om %s', ), + output_mask=dict( + argstr='-om %s', + extensions=None, + ), primary_vector=dict(argstr='-%s', ), random_seed=dict(argstr='-rseed %d', ), step_length=dict(argstr='-l %f', ), swap_xy=dict(argstr='-sxy', ), swap_yz=dict(argstr='-syz', ), swap_zx=dict(argstr='-szx', ), - tensor_file=dict(), + tensor_file=dict(extensions=None, ), tracking_method=dict(argstr='-%s', ), ) inputs = DTITracker.input_spec() @@ -54,8 +60,8 @@ def test_DTITracker_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DTITracker_outputs(): output_map = dict( - mask_file=dict(), - track_file=dict(), + mask_file=dict(extensions=None, ), + track_file=dict(extensions=None, ), ) outputs = DTITracker.output_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py index 59bc8c25a5..5391290021 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py @@ -6,9 +6,13 @@ def test_HARDIMat_inputs(): input_map = dict( args=dict(argstr='%s', ), - bvals=dict(mandatory=True, ), + bvals=dict( + extensions=None, + mandatory=True, + ), bvecs=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), @@ -16,17 +20,27 @@ def test_HARDIMat_inputs(): nohash=True, usedefault=True, ), - image_info=dict(argstr='-info %s', ), + image_info=dict( + argstr='-info %s', + extensions=None, + ), image_orientation_vectors=dict(argstr='-iop %f', ), oblique_correction=dict(argstr='-oc', ), - odf_file=dict(argstr='-odf %s', ), + odf_file=dict( + argstr='-odf %s', + extensions=None, + ), order=dict(argstr='-order %s', ), out_file=dict( argstr='%s', + extensions=None, position=2, usedefault=True, ), - reference_file=dict(argstr='-ref %s', ), + reference_file=dict( + argstr='-ref %s', + extensions=None, + ), ) inputs = HARDIMat.input_spec() @@ -34,7 +48,7 @@ def test_HARDIMat_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_HARDIMat_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = HARDIMat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py index 1e66b93bec..c7a2d30989 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py @@ -7,6 +7,7 @@ def test_ODFRecon_inputs(): input_map = dict( DWI=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), @@ -20,6 +21,7 @@ def test_ODFRecon_inputs(): image_orientation_vectors=dict(argstr='-iop %f', ), matrix=dict( argstr='-mat %s', + extensions=None, mandatory=True, ), n_b0=dict( @@ -57,11 +59,11 @@ def test_ODFRecon_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_ODFRecon_outputs(): output_map = dict( - B0=dict(), - DWI=dict(), - ODF=dict(), - entropy=dict(), - max=dict(), + B0=dict(extensions=None, ), + DWI=dict(extensions=None, ), + ODF=dict(extensions=None, ), + entropy=dict(extensions=None, ), + max=dict(extensions=None, ), ) outputs = ODFRecon.output_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py index 41b2d530f6..3973421df8 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py @@ -5,7 +5,10 @@ def test_ODFTracker_inputs(): input_map = dict( - ODF=dict(mandatory=True, ), + ODF=dict( + extensions=None, + mandatory=True, + ), angle_threshold=dict(argstr='-at %f', ), args=dict(argstr='%s', ), disc=dict(argstr='-disc', ), @@ -30,18 +33,24 @@ def test_ODFTracker_inputs(): limit=dict(argstr='-limit %d', ), mask1_file=dict( argstr='-m %s', + extensions=None, mandatory=True, position=2, ), mask1_threshold=dict(position=3, ), mask2_file=dict( argstr='-m2 %s', + extensions=None, position=4, ), mask2_threshold=dict(position=5, ), - max=dict(mandatory=True, ), + max=dict( + extensions=None, + mandatory=True, + ), out_file=dict( argstr='%s', + extensions=None, position=1, usedefault=True, ), @@ -60,7 +69,7 @@ def test_ODFTracker_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ODFTracker_outputs(): - output_map = dict(track_file=dict(), ) + output_map = dict(track_file=dict(extensions=None, ), ) outputs = ODFTracker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py index 8648a1f1f9..85c0d08d13 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py @@ -12,6 +12,7 @@ def test_SplineFilter_inputs(): ), output_file=dict( argstr='%s', + extensions=None, position=2, usedefault=True, ), @@ -22,6 +23,7 @@ def test_SplineFilter_inputs(): ), track_file=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), @@ -32,7 +34,7 @@ def test_SplineFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SplineFilter_outputs(): - output_map = dict(smoothed_track_file=dict(), ) + output_map = dict(smoothed_track_file=dict(extensions=None, ), ) outputs = SplineFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py index b004678175..cf9756887a 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py @@ -12,6 +12,7 @@ def test_TrackMerge_inputs(): ), output_file=dict( argstr='%s', + extensions=None, position=-1, usedefault=True, ), @@ -27,7 +28,7 @@ def test_TrackMerge_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TrackMerge_outputs(): - output_map = dict(track_file=dict(), ) + output_map = dict(track_file=dict(extensions=None, ), ) outputs = TrackMerge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_APMQball.py b/nipype/interfaces/dipy/tests/test_auto_APMQball.py index f6f3f2e4c6..b50bd96b58 100644 --- a/nipype/interfaces/dipy/tests/test_auto_APMQball.py +++ b/nipype/interfaces/dipy/tests/test_auto_APMQball.py @@ -6,10 +6,19 @@ def test_APMQball_inputs(): input_map = dict( b0_thres=dict(usedefault=True, ), - in_bval=dict(mandatory=True, ), - in_bvec=dict(mandatory=True, ), - in_file=dict(mandatory=True, ), - mask_file=dict(), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + mask_file=dict(extensions=None, ), out_prefix=dict(), ) inputs = APMQball.input_spec() @@ -18,7 +27,7 @@ def test_APMQball_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_APMQball_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = APMQball.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_CSD.py b/nipype/interfaces/dipy/tests/test_auto_CSD.py index a30efaa3cc..1343c0ddd7 100644 --- a/nipype/interfaces/dipy/tests/test_auto_CSD.py +++ b/nipype/interfaces/dipy/tests/test_auto_CSD.py @@ -6,13 +6,22 @@ def test_CSD_inputs(): input_map = dict( b0_thres=dict(usedefault=True, ), - in_bval=dict(mandatory=True, ), - in_bvec=dict(mandatory=True, ), - in_file=dict(mandatory=True, ), - in_mask=dict(), - out_fods=dict(), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict(extensions=None, ), + out_fods=dict(extensions=None, ), out_prefix=dict(), - response=dict(), + response=dict(extensions=None, ), save_fods=dict(usedefault=True, ), sh_order=dict(usedefault=True, ), ) @@ -23,8 +32,8 @@ def test_CSD_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_CSD_outputs(): output_map = dict( - model=dict(), - out_fods=dict(), + model=dict(extensions=None, ), + out_fods=dict(extensions=None, ), ) outputs = CSD.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_DTI.py b/nipype/interfaces/dipy/tests/test_auto_DTI.py index f543856065..c1c22c6263 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DTI.py +++ b/nipype/interfaces/dipy/tests/test_auto_DTI.py @@ -6,10 +6,19 @@ def test_DTI_inputs(): input_map = dict( b0_thres=dict(usedefault=True, ), - in_bval=dict(mandatory=True, ), - in_bvec=dict(mandatory=True, ), - in_file=dict(mandatory=True, ), - mask_file=dict(), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + mask_file=dict(extensions=None, ), out_prefix=dict(), ) inputs = DTI.input_spec() @@ -19,12 +28,12 @@ def test_DTI_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DTI_outputs(): output_map = dict( - ad_file=dict(), - color_fa_file=dict(), - fa_file=dict(), - md_file=dict(), - out_file=dict(), - rd_file=dict(), + ad_file=dict(extensions=None, ), + color_fa_file=dict(extensions=None, ), + fa_file=dict(extensions=None, ), + md_file=dict(extensions=None, ), + out_file=dict(extensions=None, ), + rd_file=dict(extensions=None, ), ) outputs = DTI.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_Denoise.py b/nipype/interfaces/dipy/tests/test_auto_Denoise.py index 88a1bc5314..57beb6e06f 100644 --- a/nipype/interfaces/dipy/tests/test_auto_Denoise.py +++ b/nipype/interfaces/dipy/tests/test_auto_Denoise.py @@ -6,15 +6,18 @@ def test_Denoise_inputs(): input_map = dict( block_radius=dict(usedefault=True, ), - in_file=dict(mandatory=True, ), - in_mask=dict(), - noise_mask=dict(), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict(extensions=None, ), + noise_mask=dict(extensions=None, ), noise_model=dict( mandatory=True, usedefault=True, ), patch_radius=dict(usedefault=True, ), - signal_mask=dict(), + signal_mask=dict(extensions=None, ), snr=dict(), ) inputs = Denoise.input_spec() @@ -23,7 +26,7 @@ def test_Denoise_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Denoise_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Denoise.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py index ee9022ca58..99249e3506 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py +++ b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py @@ -6,9 +6,18 @@ def test_DipyDiffusionInterface_inputs(): input_map = dict( b0_thres=dict(usedefault=True, ), - in_bval=dict(mandatory=True, ), - in_bvec=dict(mandatory=True, ), - in_file=dict(mandatory=True, ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), out_prefix=dict(), ) inputs = DipyDiffusionInterface.input_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py index 1270d94b13..bf18299fd4 100644 --- a/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py +++ b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py @@ -8,15 +8,33 @@ def test_EstimateResponseSH_inputs(): auto=dict(xor=['recursive'], ), b0_thres=dict(usedefault=True, ), fa_thresh=dict(usedefault=True, ), - in_bval=dict(mandatory=True, ), - in_bvec=dict(mandatory=True, ), - in_evals=dict(mandatory=True, ), - in_file=dict(mandatory=True, ), - in_mask=dict(), - out_mask=dict(usedefault=True, ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_evals=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict(extensions=None, ), + out_mask=dict( + extensions=None, + usedefault=True, + ), out_prefix=dict(), recursive=dict(xor=['auto'], ), - response=dict(usedefault=True, ), + response=dict( + extensions=None, + usedefault=True, + ), roi_radius=dict(usedefault=True, ), ) inputs = EstimateResponseSH.input_spec() @@ -26,8 +44,8 @@ def test_EstimateResponseSH_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_EstimateResponseSH_outputs(): output_map = dict( - out_mask=dict(), - response=dict(), + out_mask=dict(extensions=None, ), + response=dict(extensions=None, ), ) outputs = EstimateResponseSH.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_RESTORE.py b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py index 9f62d05e0c..9f4e52ee66 100644 --- a/nipype/interfaces/dipy/tests/test_auto_RESTORE.py +++ b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py @@ -6,11 +6,20 @@ def test_RESTORE_inputs(): input_map = dict( b0_thres=dict(usedefault=True, ), - in_bval=dict(mandatory=True, ), - in_bvec=dict(mandatory=True, ), - in_file=dict(mandatory=True, ), - in_mask=dict(), - noise_mask=dict(), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict(extensions=None, ), + noise_mask=dict(extensions=None, ), out_prefix=dict(), ) inputs = RESTORE.input_spec() @@ -20,13 +29,13 @@ def test_RESTORE_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_RESTORE_outputs(): output_map = dict( - evals=dict(), - evecs=dict(), - fa=dict(), - md=dict(), - mode=dict(), - rd=dict(), - trace=dict(), + evals=dict(extensions=None, ), + evecs=dict(extensions=None, ), + fa=dict(extensions=None, ), + md=dict(extensions=None, ), + mode=dict(extensions=None, ), + rd=dict(extensions=None, ), + trace=dict(extensions=None, ), ) outputs = RESTORE.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_Resample.py b/nipype/interfaces/dipy/tests/test_auto_Resample.py index ff51e0efe4..a3e44c942a 100644 --- a/nipype/interfaces/dipy/tests/test_auto_Resample.py +++ b/nipype/interfaces/dipy/tests/test_auto_Resample.py @@ -5,7 +5,10 @@ def test_Resample_inputs(): input_map = dict( - in_file=dict(mandatory=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), interp=dict( mandatory=True, usedefault=True, @@ -18,7 +21,7 @@ def test_Resample_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Resample_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py index db46a3b982..48a12ec076 100644 --- a/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py +++ b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py @@ -5,23 +5,38 @@ def test_SimulateMultiTensor_inputs(): input_map = dict( - baseline=dict(mandatory=True, ), + baseline=dict( + extensions=None, + mandatory=True, + ), bvalues=dict(usedefault=True, ), diff_iso=dict(usedefault=True, ), diff_sf=dict(usedefault=True, ), - gradients=dict(), - in_bval=dict(), - in_bvec=dict(), + gradients=dict(extensions=None, ), + in_bval=dict(extensions=None, ), + in_bvec=dict(extensions=None, ), in_dirs=dict(mandatory=True, ), in_frac=dict(mandatory=True, ), - in_mask=dict(), + in_mask=dict(extensions=None, ), in_vfms=dict(mandatory=True, ), n_proc=dict(usedefault=True, ), num_dirs=dict(usedefault=True, ), - out_bval=dict(usedefault=True, ), - out_bvec=dict(usedefault=True, ), - out_file=dict(usedefault=True, ), - out_mask=dict(usedefault=True, ), + out_bval=dict( + extensions=None, + usedefault=True, + ), + out_bvec=dict( + extensions=None, + usedefault=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), + out_mask=dict( + extensions=None, + usedefault=True, + ), snr=dict(usedefault=True, ), ) inputs = SimulateMultiTensor.input_spec() @@ -31,10 +46,10 @@ def test_SimulateMultiTensor_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_SimulateMultiTensor_outputs(): output_map = dict( - out_bval=dict(), - out_bvec=dict(), - out_file=dict(), - out_mask=dict(), + out_bval=dict(extensions=None, ), + out_bvec=dict(extensions=None, ), + out_file=dict(extensions=None, ), + out_mask=dict(extensions=None, ), ) outputs = SimulateMultiTensor.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py index 1bc1a2ea97..71c20e8ec9 100644 --- a/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py +++ b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py @@ -9,9 +9,12 @@ def test_StreamlineTractography_inputs(): mandatory=True, usedefault=True, ), - in_file=dict(mandatory=True, ), - in_model=dict(), - in_peaks=dict(), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_model=dict(extensions=None, ), + in_peaks=dict(extensions=None, ), min_angle=dict( mandatory=True, usedefault=True, @@ -33,9 +36,9 @@ def test_StreamlineTractography_inputs(): mandatory=True, usedefault=True, ), - seed_coord=dict(), - seed_mask=dict(), - tracking_mask=dict(), + seed_coord=dict(extensions=None, ), + seed_mask=dict(extensions=None, ), + tracking_mask=dict(extensions=None, ), ) inputs = StreamlineTractography.input_spec() @@ -44,10 +47,10 @@ def test_StreamlineTractography_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_StreamlineTractography_outputs(): output_map = dict( - gfa=dict(), - odf_peaks=dict(), - out_seeds=dict(), - tracks=dict(), + gfa=dict(extensions=None, ), + odf_peaks=dict(extensions=None, ), + out_seeds=dict(extensions=None, ), + tracks=dict(extensions=None, ), ) outputs = StreamlineTractography.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_TensorMode.py b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py index d072af78fc..532b930c80 100644 --- a/nipype/interfaces/dipy/tests/test_auto_TensorMode.py +++ b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py @@ -6,10 +6,19 @@ def test_TensorMode_inputs(): input_map = dict( b0_thres=dict(usedefault=True, ), - in_bval=dict(mandatory=True, ), - in_bvec=dict(mandatory=True, ), - in_file=dict(mandatory=True, ), - mask_file=dict(), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + mask_file=dict(extensions=None, ), out_prefix=dict(), ) inputs = TensorMode.input_spec() @@ -18,7 +27,7 @@ def test_TensorMode_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TensorMode_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TensorMode.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py index 79af3b5940..f1112421f5 100644 --- a/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py +++ b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py @@ -6,10 +6,16 @@ def test_TrackDensityMap_inputs(): input_map = dict( data_dims=dict(), - in_file=dict(mandatory=True, ), - out_filename=dict(usedefault=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_filename=dict( + extensions=None, + usedefault=True, + ), points_space=dict(usedefault=True, ), - reference=dict(), + reference=dict(extensions=None, ), voxel_dims=dict(), ) inputs = TrackDensityMap.input_spec() @@ -18,7 +24,7 @@ def test_TrackDensityMap_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TrackDensityMap_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TrackDensityMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py index 5f9262d788..f0d6da8d09 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py @@ -20,6 +20,7 @@ def test_AffScalarVol_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), interpolation=dict( @@ -28,16 +29,19 @@ def test_AffScalarVol_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_affxfmd', ), target=dict( argstr='-target %s', + extensions=None, xor=['transform'], ), transform=dict( argstr='-trans %s', + extensions=None, xor=['target', 'translation', 'euler', 'deformation'], ), translation=dict( @@ -51,7 +55,7 @@ def test_AffScalarVol_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AffScalarVol_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = AffScalarVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py index 6d7abc852a..dd40117ace 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py @@ -20,6 +20,7 @@ def test_AffSymTensor3DVol_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), interpolation=dict( @@ -28,6 +29,7 @@ def test_AffSymTensor3DVol_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_affxfmd', @@ -38,10 +40,12 @@ def test_AffSymTensor3DVol_inputs(): ), target=dict( argstr='-target %s', + extensions=None, xor=['transform'], ), transform=dict( argstr='-trans %s', + extensions=None, xor=['target', 'translation', 'euler', 'deformation'], ), translation=dict( @@ -55,7 +59,7 @@ def test_AffSymTensor3DVol_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AffSymTensor3DVol_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = AffSymTensor3DVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_Affine.py b/nipype/interfaces/dtitk/tests/test_auto_Affine.py index 78d2e6f011..89383c159d 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Affine.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Affine.py @@ -13,6 +13,7 @@ def test_Affine_inputs(): fixed_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=0, ), @@ -25,11 +26,13 @@ def test_Affine_inputs(): initialize_xfm=dict( argstr='%s', copyfile=True, + extensions=None, position=5, ), moving_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=1, ), @@ -53,8 +56,8 @@ def test_Affine_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Affine_outputs(): output_map = dict( - out_file=dict(), - out_file_xfm=dict(), + out_file=dict(extensions=None, ), + out_file_xfm=dict(extensions=None, ), ) outputs = Affine.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py index 5f3b43153a..dadba5445f 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py @@ -13,6 +13,7 @@ def test_AffineTask_inputs(): fixed_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=0, ), @@ -25,11 +26,13 @@ def test_AffineTask_inputs(): initialize_xfm=dict( argstr='%s', copyfile=True, + extensions=None, position=5, ), moving_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=1, ), @@ -53,8 +56,8 @@ def test_AffineTask_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_AffineTask_outputs(): output_map = dict( - out_file=dict(), - out_file_xfm=dict(), + out_file=dict(extensions=None, ), + out_file_xfm=dict(extensions=None, ), ) outputs = AffineTask.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py index cbd4efccb2..9872df1ee3 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py +++ b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py @@ -12,6 +12,7 @@ def test_BinThresh_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), @@ -29,6 +30,7 @@ def test_BinThresh_inputs(): ), out_file=dict( argstr='%s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_thrbin', @@ -53,7 +55,7 @@ def test_BinThresh_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BinThresh_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = BinThresh.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py index dcd396abe2..a3cdcb5149 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py @@ -12,6 +12,7 @@ def test_BinThreshTask_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), @@ -29,6 +30,7 @@ def test_BinThreshTask_inputs(): ), out_file=dict( argstr='%s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_thrbin', @@ -53,7 +55,7 @@ def test_BinThreshTask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BinThreshTask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = BinThreshTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py index 09fcdc186e..a325f68b9f 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py +++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py @@ -12,14 +12,17 @@ def test_ComposeXfm_inputs(): ), in_aff=dict( argstr='-aff %s', + extensions=None, mandatory=True, ), in_df=dict( argstr='-df %s', + extensions=None, mandatory=True, ), out_file=dict( argstr='-out %s', + extensions=None, genfile=True, ), ) @@ -29,7 +32,7 @@ def test_ComposeXfm_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ComposeXfm_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ComposeXfm.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py index 99c6f6d340..52bbd5cbcb 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py @@ -12,14 +12,17 @@ def test_ComposeXfmTask_inputs(): ), in_aff=dict( argstr='-aff %s', + extensions=None, mandatory=True, ), in_df=dict( argstr='-df %s', + extensions=None, mandatory=True, ), out_file=dict( argstr='-out %s', + extensions=None, genfile=True, ), ) @@ -29,7 +32,7 @@ def test_ComposeXfmTask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ComposeXfmTask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ComposeXfmTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py index ad532bd631..66d28957de 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py @@ -12,6 +12,7 @@ def test_Diffeo_inputs(): ), fixed_file=dict( argstr='%s', + extensions=None, position=0, ), ftol=dict( @@ -28,11 +29,13 @@ def test_Diffeo_inputs(): ), mask_file=dict( argstr='%s', + extensions=None, position=2, ), moving_file=dict( argstr='%s', copyfile=False, + extensions=None, position=1, ), n_iters=dict( @@ -49,8 +52,8 @@ def test_Diffeo_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Diffeo_outputs(): output_map = dict( - out_file=dict(), - out_file_xfm=dict(), + out_file=dict(extensions=None, ), + out_file_xfm=dict(extensions=None, ), ) outputs = Diffeo.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py index 7d1305d384..0d459d7a0b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py @@ -13,6 +13,7 @@ def test_DiffeoScalarVol_inputs(): flip=dict(argstr='-flip %d %d %d', ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), interpolation=dict( @@ -21,6 +22,7 @@ def test_DiffeoScalarVol_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_diffeoxfmd', @@ -28,10 +30,12 @@ def test_DiffeoScalarVol_inputs(): resampling_type=dict(argstr='-type %s', ), target=dict( argstr='-target %s', + extensions=None, xor=['voxel_size'], ), transform=dict( argstr='-trans %s', + extensions=None, mandatory=True, ), voxel_size=dict( @@ -45,7 +49,7 @@ def test_DiffeoScalarVol_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DiffeoScalarVol_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = DiffeoScalarVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py index b934c56d2b..a051d9f60f 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py @@ -17,6 +17,7 @@ def test_DiffeoSymTensor3DVol_inputs(): flip=dict(argstr='-flip %d %d %d', ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), interpolation=dict( @@ -25,6 +26,7 @@ def test_DiffeoSymTensor3DVol_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_diffeoxfmd', @@ -36,10 +38,12 @@ def test_DiffeoSymTensor3DVol_inputs(): resampling_type=dict(argstr='-type %s', ), target=dict( argstr='-target %s', + extensions=None, xor=['voxel_size'], ), transform=dict( argstr='-trans %s', + extensions=None, mandatory=True, ), voxel_size=dict( @@ -53,7 +57,7 @@ def test_DiffeoSymTensor3DVol_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DiffeoSymTensor3DVol_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = DiffeoSymTensor3DVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py index 5aea665d4c..8d78b37a55 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py @@ -12,6 +12,7 @@ def test_DiffeoTask_inputs(): ), fixed_file=dict( argstr='%s', + extensions=None, position=0, ), ftol=dict( @@ -28,11 +29,13 @@ def test_DiffeoTask_inputs(): ), mask_file=dict( argstr='%s', + extensions=None, position=2, ), moving_file=dict( argstr='%s', copyfile=False, + extensions=None, position=1, ), n_iters=dict( @@ -49,8 +52,8 @@ def test_DiffeoTask_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DiffeoTask_outputs(): output_map = dict( - out_file=dict(), - out_file_xfm=dict(), + out_file=dict(extensions=None, ), + out_file_xfm=dict(extensions=None, ), ) outputs = DiffeoTask.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_Rigid.py b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py index ecb7c2d33b..6e9bf18764 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Rigid.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py @@ -13,6 +13,7 @@ def test_Rigid_inputs(): fixed_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=0, ), @@ -25,11 +26,13 @@ def test_Rigid_inputs(): initialize_xfm=dict( argstr='%s', copyfile=True, + extensions=None, position=5, ), moving_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=1, ), @@ -53,8 +56,8 @@ def test_Rigid_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Rigid_outputs(): output_map = dict( - out_file=dict(), - out_file_xfm=dict(), + out_file=dict(extensions=None, ), + out_file_xfm=dict(extensions=None, ), ) outputs = Rigid.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py index c627fda741..987888d80f 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py @@ -13,6 +13,7 @@ def test_RigidTask_inputs(): fixed_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=0, ), @@ -25,11 +26,13 @@ def test_RigidTask_inputs(): initialize_xfm=dict( argstr='%s', copyfile=True, + extensions=None, position=5, ), moving_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=1, ), @@ -53,8 +56,8 @@ def test_RigidTask_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_RigidTask_outputs(): output_map = dict( - out_file=dict(), - out_file_xfm=dict(), + out_file=dict(extensions=None, ), + out_file_xfm=dict(extensions=None, ), ) outputs = RigidTask.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py index 3574906455..51a9bbe4b5 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py @@ -12,6 +12,7 @@ def test_SVAdjustVoxSp_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), origin=dict( @@ -20,12 +21,14 @@ def test_SVAdjustVoxSp_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_avs', ), target_file=dict( argstr='-target %s', + extensions=None, xor=['voxel_size', 'origin'], ), voxel_size=dict( @@ -39,7 +42,7 @@ def test_SVAdjustVoxSp_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SVAdjustVoxSp_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = SVAdjustVoxSp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py index 40a7592c19..0c051ef5d2 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py @@ -12,6 +12,7 @@ def test_SVAdjustVoxSpTask_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), origin=dict( @@ -20,12 +21,14 @@ def test_SVAdjustVoxSpTask_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_avs', ), target_file=dict( argstr='-target %s', + extensions=None, xor=['voxel_size', 'origin'], ), voxel_size=dict( @@ -39,7 +42,7 @@ def test_SVAdjustVoxSpTask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SVAdjustVoxSpTask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = SVAdjustVoxSpTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResample.py b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py index 91ca638f22..880366e412 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVResample.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py @@ -17,6 +17,7 @@ def test_SVResample_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), origin=dict( @@ -25,12 +26,14 @@ def test_SVResample_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_resampled', ), target_file=dict( argstr='-target %s', + extensions=None, xor=['array_size', 'voxel_size', 'origin'], ), voxel_size=dict( @@ -44,7 +47,7 @@ def test_SVResample_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SVResample_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = SVResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py index 8a7574bfd8..1a8ea16023 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py @@ -17,6 +17,7 @@ def test_SVResampleTask_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), origin=dict( @@ -25,12 +26,14 @@ def test_SVResampleTask_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_resampled', ), target_file=dict( argstr='-target %s', + extensions=None, xor=['array_size', 'voxel_size', 'origin'], ), voxel_size=dict( @@ -44,7 +47,7 @@ def test_SVResampleTask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SVResampleTask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = SVResampleTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py index bda9128369..3f8f236a6b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py @@ -12,6 +12,7 @@ def test_TVAdjustOriginTask_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), origin=dict( @@ -20,12 +21,14 @@ def test_TVAdjustOriginTask_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_avs', ), target_file=dict( argstr='-target %s', + extensions=None, xor=['voxel_size', 'origin'], ), voxel_size=dict( @@ -39,7 +42,7 @@ def test_TVAdjustOriginTask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TVAdjustOriginTask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TVAdjustOriginTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py index b8ce9039d5..9165dce54a 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py @@ -12,6 +12,7 @@ def test_TVAdjustVoxSp_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), origin=dict( @@ -20,12 +21,14 @@ def test_TVAdjustVoxSp_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_avs', ), target_file=dict( argstr='-target %s', + extensions=None, xor=['voxel_size', 'origin'], ), voxel_size=dict( @@ -39,7 +42,7 @@ def test_TVAdjustVoxSp_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TVAdjustVoxSp_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TVAdjustVoxSp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py index e9ef8137dc..a04f1078b4 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py @@ -12,6 +12,7 @@ def test_TVAdjustVoxSpTask_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), origin=dict( @@ -20,12 +21,14 @@ def test_TVAdjustVoxSpTask_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_avs', ), target_file=dict( argstr='-target %s', + extensions=None, xor=['voxel_size', 'origin'], ), voxel_size=dict( @@ -39,7 +42,7 @@ def test_TVAdjustVoxSpTask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TVAdjustVoxSpTask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TVAdjustVoxSpTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResample.py b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py index aefafc6430..fe7b7bbdbd 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVResample.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py @@ -17,6 +17,7 @@ def test_TVResample_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), interpolation=dict(argstr='-interp %s', ), @@ -26,12 +27,14 @@ def test_TVResample_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_resampled', ), target_file=dict( argstr='-target %s', + extensions=None, xor=['array_size', 'voxel_size', 'origin'], ), voxel_size=dict( @@ -45,7 +48,7 @@ def test_TVResample_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TVResample_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TVResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py index b3c70bb729..d2b971fe00 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py @@ -17,6 +17,7 @@ def test_TVResampleTask_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), interpolation=dict(argstr='-interp %s', ), @@ -26,12 +27,14 @@ def test_TVResampleTask_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_resampled', ), target_file=dict( argstr='-target %s', + extensions=None, xor=['array_size', 'voxel_size', 'origin'], ), voxel_size=dict( @@ -45,7 +48,7 @@ def test_TVResampleTask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TVResampleTask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TVResampleTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtool.py b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py index 2267228631..15f06af243 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVtool.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py @@ -12,11 +12,13 @@ def test_TVtool_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), in_flag=dict(argstr='-%s', ), out_file=dict( argstr='-out %s', + extensions=None, genfile=True, ), ) @@ -26,7 +28,7 @@ def test_TVtool_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TVtool_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TVtool.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py index 252d7c9d0a..eb44949b5d 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py @@ -12,11 +12,13 @@ def test_TVtoolTask_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), in_flag=dict(argstr='-%s', ), out_file=dict( argstr='-out %s', + extensions=None, genfile=True, ), ) @@ -26,7 +28,7 @@ def test_TVtoolTask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TVtoolTask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TVtoolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py index 72330737b6..e9aeab1260 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py @@ -20,6 +20,7 @@ def test_affScalarVolTask_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), interpolation=dict( @@ -28,16 +29,19 @@ def test_affScalarVolTask_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_affxfmd', ), target=dict( argstr='-target %s', + extensions=None, xor=['transform'], ), transform=dict( argstr='-trans %s', + extensions=None, xor=['target', 'translation', 'euler', 'deformation'], ), translation=dict( @@ -51,7 +55,7 @@ def test_affScalarVolTask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_affScalarVolTask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = affScalarVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py index da8fae25cc..568a83436f 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py @@ -20,6 +20,7 @@ def test_affSymTensor3DVolTask_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), interpolation=dict( @@ -28,6 +29,7 @@ def test_affSymTensor3DVolTask_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_affxfmd', @@ -38,10 +40,12 @@ def test_affSymTensor3DVolTask_inputs(): ), target=dict( argstr='-target %s', + extensions=None, xor=['transform'], ), transform=dict( argstr='-trans %s', + extensions=None, xor=['target', 'translation', 'euler', 'deformation'], ), translation=dict( @@ -55,7 +59,7 @@ def test_affSymTensor3DVolTask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_affSymTensor3DVolTask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = affSymTensor3DVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py index 10965b7077..b9ed3a7054 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py @@ -13,6 +13,7 @@ def test_diffeoScalarVolTask_inputs(): flip=dict(argstr='-flip %d %d %d', ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), interpolation=dict( @@ -21,6 +22,7 @@ def test_diffeoScalarVolTask_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_diffeoxfmd', @@ -28,10 +30,12 @@ def test_diffeoScalarVolTask_inputs(): resampling_type=dict(argstr='-type %s', ), target=dict( argstr='-target %s', + extensions=None, xor=['voxel_size'], ), transform=dict( argstr='-trans %s', + extensions=None, mandatory=True, ), voxel_size=dict( @@ -45,7 +49,7 @@ def test_diffeoScalarVolTask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_diffeoScalarVolTask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = diffeoScalarVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py index 52112735b1..2fc1cd6f6f 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py @@ -17,6 +17,7 @@ def test_diffeoSymTensor3DVolTask_inputs(): flip=dict(argstr='-flip %d %d %d', ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), interpolation=dict( @@ -25,6 +26,7 @@ def test_diffeoSymTensor3DVolTask_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_diffeoxfmd', @@ -36,10 +38,12 @@ def test_diffeoSymTensor3DVolTask_inputs(): resampling_type=dict(argstr='-type %s', ), target=dict( argstr='-target %s', + extensions=None, xor=['voxel_size'], ), transform=dict( argstr='-trans %s', + extensions=None, mandatory=True, ), voxel_size=dict( @@ -53,7 +57,7 @@ def test_diffeoSymTensor3DVolTask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_diffeoSymTensor3DVolTask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = diffeoSymTensor3DVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py index f05821d9c1..d246429304 100644 --- a/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py @@ -18,7 +18,10 @@ def test_AnalyzeWarp_inputs(): argstr='-jacmat %s', usedefault=True, ), - moving_image=dict(argstr='-in %s', ), + moving_image=dict( + argstr='-in %s', + extensions=None, + ), num_threads=dict( argstr='-threads %01d', nohash=True, @@ -36,6 +39,7 @@ def test_AnalyzeWarp_inputs(): ), transform_file=dict( argstr='-tp %s', + extensions=None, mandatory=True, ), ) @@ -46,9 +50,9 @@ def test_AnalyzeWarp_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_AnalyzeWarp_outputs(): output_map = dict( - disp_field=dict(), - jacdet_map=dict(), - jacmat_map=dict(), + disp_field=dict(extensions=None, ), + jacdet_map=dict(extensions=None, ), + jacmat_map=dict(extensions=None, ), ) outputs = AnalyzeWarp.output_spec() diff --git a/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py index 16f2b54079..f13798790d 100644 --- a/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py @@ -12,6 +12,7 @@ def test_ApplyWarp_inputs(): ), moving_image=dict( argstr='-in %s', + extensions=None, mandatory=True, ), num_threads=dict( @@ -26,6 +27,7 @@ def test_ApplyWarp_inputs(): ), transform_file=dict( argstr='-tp %s', + extensions=None, mandatory=True, ), ) @@ -35,7 +37,7 @@ def test_ApplyWarp_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ApplyWarp_outputs(): - output_map = dict(warped_file=dict(), ) + output_map = dict(warped_file=dict(extensions=None, ), ) outputs = ApplyWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_EditTransform.py b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py index ef227da7f8..7b67511f43 100644 --- a/nipype/interfaces/elastix/tests/test_auto_EditTransform.py +++ b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py @@ -9,11 +9,14 @@ def test_EditTransform_inputs(): argstr='FinalBSplineInterpolationOrder', usedefault=True, ), - output_file=dict(), + output_file=dict(extensions=None, ), output_format=dict(argstr='ResultImageFormat', ), output_type=dict(argstr='ResultImagePixelType', ), - reference_image=dict(), - transform_file=dict(mandatory=True, ), + reference_image=dict(extensions=None, ), + transform_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = EditTransform.input_spec() @@ -21,7 +24,7 @@ def test_EditTransform_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_EditTransform_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = EditTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py index c93a0526c1..cc6be5c6fb 100644 --- a/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py @@ -22,10 +22,12 @@ def test_PointsWarp_inputs(): ), points_file=dict( argstr='-def %s', + extensions=None, mandatory=True, ), transform_file=dict( argstr='-tp %s', + extensions=None, mandatory=True, ), ) @@ -35,7 +37,7 @@ def test_PointsWarp_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_PointsWarp_outputs(): - output_map = dict(warped_file=dict(), ) + output_map = dict(warped_file=dict(extensions=None, ), ) outputs = PointsWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_Registration.py b/nipype/interfaces/elastix/tests/test_auto_Registration.py index 4e774ab036..0c0d45f863 100644 --- a/nipype/interfaces/elastix/tests/test_auto_Registration.py +++ b/nipype/interfaces/elastix/tests/test_auto_Registration.py @@ -12,15 +12,26 @@ def test_Registration_inputs(): ), fixed_image=dict( argstr='-f %s', + extensions=None, mandatory=True, ), - fixed_mask=dict(argstr='-fMask %s', ), - initial_transform=dict(argstr='-t0 %s', ), + fixed_mask=dict( + argstr='-fMask %s', + extensions=None, + ), + initial_transform=dict( + argstr='-t0 %s', + extensions=None, + ), moving_image=dict( argstr='-m %s', + extensions=None, mandatory=True, ), - moving_mask=dict(argstr='-mMask %s', ), + moving_mask=dict( + argstr='-mMask %s', + extensions=None, + ), num_threads=dict( argstr='-threads %01d', nohash=True, @@ -44,7 +55,7 @@ def test_Registration_inputs(): def test_Registration_outputs(): output_map = dict( transform=dict(), - warped_file=dict(), + warped_file=dict(extensions=None, ), warped_files=dict(), warped_files_flags=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py index ed32693df5..03fa095470 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py @@ -13,17 +13,20 @@ def test_AddXFormToHeader_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), out_file=dict( argstr='%s', + extensions=None, position=-1, usedefault=True, ), subjects_dir=dict(), transform=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -35,7 +38,7 @@ def test_AddXFormToHeader_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AddXFormToHeader_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = AddXFormToHeader.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py index fc5bc39b6f..8a83130a4d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py @@ -7,29 +7,63 @@ def test_Aparc2Aseg_inputs(): input_map = dict( a2009s=dict(argstr='--a2009s', ), args=dict(argstr='%s', ), - aseg=dict(argstr='--aseg %s', ), + aseg=dict( + argstr='--aseg %s', + extensions=None, + ), copy_inputs=dict(), - ctxseg=dict(argstr='--ctxseg %s', ), + ctxseg=dict( + argstr='--ctxseg %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, ), - filled=dict(), + filled=dict(extensions=None, ), hypo_wm=dict(argstr='--hypo-as-wm', ), label_wm=dict(argstr='--labelwm', ), - lh_annotation=dict(mandatory=True, ), - lh_pial=dict(mandatory=True, ), - lh_ribbon=dict(mandatory=True, ), - lh_white=dict(mandatory=True, ), + lh_annotation=dict( + extensions=None, + mandatory=True, + ), + lh_pial=dict( + extensions=None, + mandatory=True, + ), + lh_ribbon=dict( + extensions=None, + mandatory=True, + ), + lh_white=dict( + extensions=None, + mandatory=True, + ), out_file=dict( argstr='--o %s', + extensions=None, + mandatory=True, + ), + rh_annotation=dict( + extensions=None, + mandatory=True, + ), + rh_pial=dict( + extensions=None, + mandatory=True, + ), + rh_ribbon=dict( + extensions=None, + mandatory=True, + ), + rh_white=dict( + extensions=None, + mandatory=True, + ), + ribbon=dict( + extensions=None, mandatory=True, ), - rh_annotation=dict(mandatory=True, ), - rh_pial=dict(mandatory=True, ), - rh_ribbon=dict(mandatory=True, ), - rh_white=dict(mandatory=True, ), - ribbon=dict(mandatory=True, ), rip_unknown=dict(argstr='--rip-unknown', ), subject_id=dict( argstr='--s %s', @@ -45,7 +79,11 @@ def test_Aparc2Aseg_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Aparc2Aseg_outputs(): - output_map = dict(out_file=dict(argstr='%s', ), ) + output_map = dict( + out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = Aparc2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py index 7044fdde2f..fea3617596 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py @@ -12,10 +12,12 @@ def test_Apas2Aseg_inputs(): ), in_file=dict( argstr='--i %s', + extensions=None, mandatory=True, ), out_file=dict( argstr='--o %s', + extensions=None, mandatory=True, ), subjects_dir=dict(), @@ -26,7 +28,11 @@ def test_Apas2Aseg_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Apas2Aseg_outputs(): - output_map = dict(out_file=dict(argstr='%s', ), ) + output_map = dict( + out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = Apas2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py index 69f56d7dde..c4544feb88 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py @@ -12,6 +12,7 @@ def test_ApplyMask_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -19,12 +20,14 @@ def test_ApplyMask_inputs(): keep_mask_deletion_edits=dict(argstr='-keep_mask_deletion_edits', ), mask_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), mask_thresh=dict(argstr='-T %.4f', ), out_file=dict( argstr='%s', + extensions=None, hash_files=True, keep_extension=True, name_source=['in_file'], @@ -34,9 +37,18 @@ def test_ApplyMask_inputs(): subjects_dir=dict(), transfer=dict(argstr='-transfer %d', ), use_abs=dict(argstr='-abs', ), - xfm_file=dict(argstr='-xform %s', ), - xfm_source=dict(argstr='-lta_src %s', ), - xfm_target=dict(argstr='-lta_dst %s', ), + xfm_file=dict( + argstr='-xform %s', + extensions=None, + ), + xfm_source=dict( + argstr='-lta_src %s', + extensions=None, + ), + xfm_target=dict( + argstr='-lta_dst %s', + extensions=None, + ), ) inputs = ApplyMask.input_spec() @@ -44,7 +56,7 @@ def test_ApplyMask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ApplyMask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ApplyMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py index c9e8f85904..753be038df 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py @@ -18,6 +18,7 @@ def test_ApplyVolTransform_inputs(): ), fsl_reg_file=dict( argstr='--fsl %s', + extensions=None, mandatory=True, xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), @@ -30,17 +31,22 @@ def test_ApplyVolTransform_inputs(): ), lta_file=dict( argstr='--lta %s', + extensions=None, mandatory=True, xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), ), lta_inv_file=dict( argstr='--lta-inv %s', + extensions=None, mandatory=True, xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), ), - m3z_file=dict(argstr='--m3z %s', ), + m3z_file=dict( + argstr='--m3z %s', + extensions=None, + ), mni_152_reg=dict( argstr='--regheader', mandatory=True, @@ -54,6 +60,7 @@ def test_ApplyVolTransform_inputs(): no_resample=dict(argstr='--no-resample', ), reg_file=dict( argstr='--reg %s', + extensions=None, mandatory=True, xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), @@ -67,6 +74,7 @@ def test_ApplyVolTransform_inputs(): source_file=dict( argstr='--mov %s', copyfile=False, + extensions=None, mandatory=True, ), subject=dict( @@ -84,15 +92,18 @@ def test_ApplyVolTransform_inputs(): tal_resolution=dict(argstr='--talres %.10f', ), target_file=dict( argstr='--targ %s', + extensions=None, mandatory=True, xor=('target_file', 'tal', 'fs_target'), ), transformed_file=dict( argstr='--o %s', + extensions=None, genfile=True, ), xfm_reg_file=dict( argstr='--xfm %s', + extensions=None, mandatory=True, xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), @@ -104,7 +115,7 @@ def test_ApplyVolTransform_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ApplyVolTransform_outputs(): - output_map = dict(transformed_file=dict(), ) + output_map = dict(transformed_file=dict(extensions=None, ), ) outputs = ApplyVolTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py index 93db55cd30..7dfb9a1815 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py @@ -12,6 +12,7 @@ def test_Binarize_inputs(): bin_val_not=dict(argstr='--binvalnot %d', ), binary_file=dict( argstr='--o %s', + extensions=None, genfile=True, ), count_file=dict(argstr='--count %s', ), @@ -26,17 +27,24 @@ def test_Binarize_inputs(): in_file=dict( argstr='--i %s', copyfile=False, + extensions=None, mandatory=True, ), invert=dict(argstr='--inv', ), - mask_file=dict(argstr='--mask maskvol', ), + mask_file=dict( + argstr='--mask maskvol', + extensions=None, + ), mask_thresh=dict(argstr='--mask-thresh %f', ), match=dict(argstr='--match %d...', ), max=dict( argstr='--max %f', xor=['wm_ven_csf'], ), - merge_file=dict(argstr='--merge %s', ), + merge_file=dict( + argstr='--merge %s', + extensions=None, + ), min=dict( argstr='--min %f', xor=['wm_ven_csf'], @@ -61,8 +69,8 @@ def test_Binarize_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Binarize_outputs(): output_map = dict( - binary_file=dict(), - count_file=dict(), + binary_file=dict(extensions=None, ), + count_file=dict(extensions=None, ), ) outputs = Binarize.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py index 1cf35fcedb..f3bfd5ad62 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py @@ -14,16 +14,24 @@ def test_CALabel_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-4, ), - in_vol=dict(argstr='-r %s', ), - intensities=dict(argstr='-r %s', ), + in_vol=dict( + argstr='-r %s', + extensions=None, + ), + intensities=dict( + argstr='-r %s', + extensions=None, + ), label=dict(argstr='-l %s', ), no_big_ventricles=dict(argstr='-nobigventricles', ), num_threads=dict(), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), @@ -32,11 +40,13 @@ def test_CALabel_inputs(): subjects_dir=dict(), template=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), transform=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -47,7 +57,7 @@ def test_CALabel_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CALabel_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = CALabel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py index 0e8ec025ec..1f6546ae3a 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py @@ -8,23 +8,35 @@ def test_CANormalize_inputs(): args=dict(argstr='%s', ), atlas=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), - control_points=dict(argstr='-c %s', ), + control_points=dict( + argstr='-c %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-4, ), - long_file=dict(argstr='-long %s', ), - mask=dict(argstr='-mask %s', ), + long_file=dict( + argstr='-long %s', + extensions=None, + ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), out_file=dict( argstr='%s', + extensions=None, hash_files=False, keep_extension=True, name_source=['in_file'], @@ -34,6 +46,7 @@ def test_CANormalize_inputs(): subjects_dir=dict(), transform=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -45,7 +58,7 @@ def test_CANormalize_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_CANormalize_outputs(): output_map = dict( - control_points=dict(), + control_points=dict(extensions=None, ), out_file=dict(), ) outputs = CANormalize.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py index e3cbf52f10..83f669b218 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py @@ -14,6 +14,7 @@ def test_CARegister_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -23,20 +24,28 @@ def test_CARegister_inputs(): ), l_files=dict(argstr='-l %s', ), levels=dict(argstr='-levels %d', ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), no_big_ventricles=dict(argstr='-nobigventricles', ), num_threads=dict(), out_file=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), subjects_dir=dict(), template=dict( argstr='%s', + extensions=None, position=-2, ), - transform=dict(argstr='-T %s', ), + transform=dict( + argstr='-T %s', + extensions=None, + ), ) inputs = CARegister.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py index a7d3ad3bb0..68a5a98e66 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py @@ -12,6 +12,7 @@ def test_CheckTalairachAlignment_inputs(): ), in_file=dict( argstr='-xfm %s', + extensions=None, mandatory=True, position=-1, xor=['subject'], diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py index e4e8efb718..72f60e58a8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py @@ -10,6 +10,7 @@ def test_Concatenate_inputs(): combine=dict(argstr='--combine', ), concatenated_file=dict( argstr='--o %s', + extensions=None, genfile=True, ), environ=dict( @@ -22,12 +23,18 @@ def test_Concatenate_inputs(): mandatory=True, ), keep_dtype=dict(argstr='--keep-datatype', ), - mask_file=dict(argstr='--mask %s', ), + mask_file=dict( + argstr='--mask %s', + extensions=None, + ), max_bonfcor=dict(argstr='--max-bonfcor', ), max_index=dict(argstr='--max-index', ), mean_div_n=dict(argstr='--mean-div-n', ), multiply_by=dict(argstr='--mul %f', ), - multiply_matrix_file=dict(argstr='--mtx %s', ), + multiply_matrix_file=dict( + argstr='--mtx %s', + extensions=None, + ), paired_stats=dict(argstr='--paired-%s', ), sign=dict(argstr='--%s', ), sort=dict(argstr='--sort', ), @@ -41,7 +48,7 @@ def test_Concatenate_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Concatenate_outputs(): - output_map = dict(concatenated_file=dict(), ) + output_map = dict(concatenated_file=dict(extensions=None, ), ) outputs = Concatenate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py index 40a465b249..8acab945c1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py @@ -12,6 +12,7 @@ def test_ConcatenateLTA_inputs(): ), in_lta1=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -25,6 +26,7 @@ def test_ConcatenateLTA_inputs(): invert_out=dict(argstr='-invertout', ), out_file=dict( argstr='%s', + extensions=None, hash_files=False, keep_extension=True, name_source=['in_lta1'], @@ -51,7 +53,7 @@ def test_ConcatenateLTA_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ConcatenateLTA_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ConcatenateLTA.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py index 7999001813..3c4e5aa484 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py @@ -17,16 +17,28 @@ def test_Contrast_inputs(): argstr='--%s-only', mandatory=True, ), - orig=dict(mandatory=True, ), - rawavg=dict(mandatory=True, ), + orig=dict( + extensions=None, + mandatory=True, + ), + rawavg=dict( + extensions=None, + mandatory=True, + ), subject_id=dict( argstr='--s %s', mandatory=True, usedefault=True, ), subjects_dir=dict(), - thickness=dict(mandatory=True, ), - white=dict(mandatory=True, ), + thickness=dict( + extensions=None, + mandatory=True, + ), + white=dict( + extensions=None, + mandatory=True, + ), ) inputs = Contrast.input_spec() @@ -35,9 +47,9 @@ def test_Contrast_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Contrast_outputs(): output_map = dict( - out_contrast=dict(), - out_log=dict(), - out_stats=dict(), + out_contrast=dict(extensions=None, ), + out_log=dict(extensions=None, ), + out_stats=dict(extensions=None, ), ) outputs = Contrast.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py index f29b76df29..05bc424830 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py @@ -16,6 +16,7 @@ def test_Curvature_inputs(): in_file=dict( argstr='%s', copyfile=True, + extensions=None, mandatory=True, position=-2, ), @@ -31,8 +32,8 @@ def test_Curvature_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Curvature_outputs(): output_map = dict( - out_gauss=dict(), - out_mean=dict(), + out_gauss=dict(extensions=None, ), + out_mean=dict(extensions=None, ), ) outputs = Curvature.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py index 19d6a4772e..f815952eaf 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py @@ -9,11 +9,13 @@ def test_CurvatureStats_inputs(): copy_inputs=dict(), curvfile1=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), curvfile2=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), @@ -29,6 +31,7 @@ def test_CurvatureStats_inputs(): min_max=dict(argstr='-m', ), out_file=dict( argstr='-o %s', + extensions=None, hash_files=False, name_source=['hemisphere'], name_template='%s.curv.stats', @@ -40,7 +43,10 @@ def test_CurvatureStats_inputs(): usedefault=True, ), subjects_dir=dict(), - surface=dict(argstr='-F %s', ), + surface=dict( + argstr='-F %s', + extensions=None, + ), values=dict(argstr='-G', ), write=dict(argstr='--writeCurvatureFiles', ), ) @@ -50,7 +56,7 @@ def test_CurvatureStats_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CurvatureStats_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = CurvatureStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py index efd53fb773..3c0a95c437 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py @@ -8,7 +8,7 @@ def test_DICOMConvert_inputs(): args=dict(argstr='%s', ), base_output_dir=dict(mandatory=True, ), dicom_dir=dict(mandatory=True, ), - dicom_info=dict(), + dicom_info=dict(extensions=None, ), environ=dict( nohash=True, usedefault=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py index e681a03871..2a58c98872 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py @@ -12,14 +12,19 @@ def test_EMRegister_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), nbrspacing=dict(argstr='-uns %d', ), num_threads=dict(), out_file=dict( argstr='%s', + extensions=None, hash_files=False, keep_extension=False, name_source=['in_file'], @@ -30,10 +35,14 @@ def test_EMRegister_inputs(): subjects_dir=dict(), template=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), - transform=dict(argstr='-t %s', ), + transform=dict( + argstr='-t %s', + extensions=None, + ), ) inputs = EMRegister.input_spec() @@ -41,7 +50,7 @@ def test_EMRegister_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_EMRegister_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = EMRegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py index f971f4fab9..2ca6d59cc2 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py @@ -8,6 +8,7 @@ def test_EditWMwithAseg_inputs(): args=dict(argstr='%s', ), brain_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -17,17 +18,20 @@ def test_EditWMwithAseg_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-4, ), keep_in=dict(argstr='-keep-in', ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), seg_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -39,7 +43,7 @@ def test_EditWMwithAseg_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_EditWMwithAseg_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = EditWMwithAseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py index 996d079b48..4ab7d9f45c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py @@ -12,6 +12,7 @@ def test_EulerNumber_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), @@ -23,7 +24,7 @@ def test_EulerNumber_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_EulerNumber_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = EulerNumber.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py index f3f4896a75..dba95bb3ba 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py @@ -12,11 +12,13 @@ def test_ExtractMainComponent_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), out_file=dict( argstr='%s', + extensions=None, name_source='in_file', name_template='%s.maincmp', position=2, @@ -28,7 +30,7 @@ def test_ExtractMainComponent_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ExtractMainComponent_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ExtractMainComponent.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py index 9e6b1cbc8b..35dae21d21 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py @@ -33,9 +33,9 @@ def test_FitMSParams_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_FitMSParams_outputs(): output_map = dict( - pd_image=dict(), - t1_image=dict(), - t2star_image=dict(), + pd_image=dict(extensions=None, ), + t1_image=dict(extensions=None, ), + t2star_image=dict(extensions=None, ), ) outputs = FitMSParams.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py index 8b61823b42..4ba8442b14 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py @@ -17,10 +17,22 @@ def test_FixTopology_inputs(): mandatory=True, position=-1, ), - in_brain=dict(mandatory=True, ), - in_inflated=dict(mandatory=True, ), - in_orig=dict(mandatory=True, ), - in_wm=dict(mandatory=True, ), + in_brain=dict( + extensions=None, + mandatory=True, + ), + in_inflated=dict( + extensions=None, + mandatory=True, + ), + in_orig=dict( + extensions=None, + mandatory=True, + ), + in_wm=dict( + extensions=None, + mandatory=True, + ), mgz=dict(argstr='-mgz', ), seed=dict(argstr='-seed %d', ), sphere=dict(argstr='-sphere %s', ), @@ -38,7 +50,7 @@ def test_FixTopology_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FixTopology_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = FixTopology.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py index 8608444fc8..d2d7467df1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py @@ -23,6 +23,7 @@ def test_FuseSegmentations_inputs(): mandatory=True, ), out_file=dict( + extensions=None, mandatory=True, position=-1, ), @@ -43,7 +44,7 @@ def test_FuseSegmentations_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FuseSegmentations_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = FuseSegmentations.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py index 828bbfab03..f2f6a5899f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py @@ -19,6 +19,7 @@ def test_GLMFit_inputs(): debug=dict(argstr='--debug', ), design=dict( argstr='--X %s', + extensions=None, xor=('fsgd', 'design', 'one_sample'), ), diag=dict(argstr='--diag %d', ), @@ -33,9 +34,13 @@ def test_GLMFit_inputs(): ), fixed_fx_dof_file=dict( argstr='--ffxdofdat %d', + extensions=None, xor=['fixed_fx_dof'], ), - fixed_fx_var=dict(argstr='--yffxvar %s', ), + fixed_fx_var=dict( + argstr='--yffxvar %s', + extensions=None, + ), force_perm=dict(argstr='--perm-force', ), fsgd=dict( argstr='--fsgd %s %s', @@ -50,14 +55,19 @@ def test_GLMFit_inputs(): in_file=dict( argstr='--y %s', copyfile=False, + extensions=None, mandatory=True, ), invert_mask=dict(argstr='--mask-inv', ), label_file=dict( argstr='--label %s', + extensions=None, xor=['cortex'], ), - mask_file=dict(argstr='--mask %s', ), + mask_file=dict( + argstr='--mask %s', + extensions=None, + ), no_contrast_ok=dict(argstr='--no-contrasts-ok', ), no_est_fwhm=dict(argstr='--no-est-fwhm', ), no_mask_smooth=dict(argstr='--no-mask-smooth', ), @@ -84,7 +94,10 @@ def test_GLMFit_inputs(): save_residual=dict(argstr='--eres-save', ), seed=dict(argstr='--seed %d', ), self_reg=dict(argstr='--selfreg %d %d %d', ), - sim_done_file=dict(argstr='--sim-done %s', ), + sim_done_file=dict( + argstr='--sim-done %s', + extensions=None, + ), sim_sign=dict(argstr='--sim-sign %s', ), simulation=dict(argstr='--sim %s %d %f %s', ), subject_id=dict(), @@ -98,7 +111,10 @@ def test_GLMFit_inputs(): uniform=dict(argstr='--uniform %f %f', ), var_fwhm=dict(argstr='--var-fwhm %f', ), vox_dump=dict(argstr='--voxdump %d %d %d', ), - weight_file=dict(xor=['weighted_ls'], ), + weight_file=dict( + extensions=None, + xor=['weighted_ls'], + ), weight_inv=dict( argstr='--w-inv', xor=['weighted_ls'], @@ -109,6 +125,7 @@ def test_GLMFit_inputs(): ), weighted_ls=dict( argstr='--wls %s', + extensions=None, xor=('weight_file', 'weight_inv', 'weight_sqrt'), ), ) @@ -119,23 +136,23 @@ def test_GLMFit_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_GLMFit_outputs(): output_map = dict( - beta_file=dict(), - dof_file=dict(), - error_file=dict(), - error_stddev_file=dict(), - error_var_file=dict(), - estimate_file=dict(), - frame_eigenvectors=dict(), + beta_file=dict(extensions=None, ), + dof_file=dict(extensions=None, ), + error_file=dict(extensions=None, ), + error_stddev_file=dict(extensions=None, ), + error_var_file=dict(extensions=None, ), + estimate_file=dict(extensions=None, ), + frame_eigenvectors=dict(extensions=None, ), ftest_file=dict(), - fwhm_file=dict(), + fwhm_file=dict(extensions=None, ), gamma_file=dict(), gamma_var_file=dict(), glm_dir=dict(), - mask_file=dict(), + mask_file=dict(extensions=None, ), sig_file=dict(), - singular_values=dict(), - spatial_eigenvectors=dict(), - svd_stats_file=dict(), + singular_values=dict(extensions=None, ), + spatial_eigenvectors=dict(extensions=None, ), + svd_stats_file=dict(extensions=None, ), ) outputs = GLMFit.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py index 2fa225e87e..28be31b456 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py @@ -12,6 +12,7 @@ def test_ImageInfo_inputs(): ), in_file=dict( argstr='%s', + extensions=None, position=1, ), subjects_dir=dict(), @@ -31,7 +32,7 @@ def test_ImageInfo_outputs(): file_format=dict(), info=dict(), orientation=dict(), - out_file=dict(), + out_file=dict(extensions=None, ), ph_enc_dir=dict(), vox_sizes=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py index 49f5e6b48f..d3f8f16df3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py @@ -12,16 +12,19 @@ def test_Jacobian_inputs(): ), in_mappedsurf=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), in_origsurf=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), out_file=dict( argstr='%s', + extensions=None, hash_files=False, keep_extension=False, name_source=['in_origsurf'], @@ -36,7 +39,7 @@ def test_Jacobian_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Jacobian_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Jacobian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py index bcbf971d01..0a2df1b8c4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py @@ -12,12 +12,14 @@ def test_LTAConvert_inputs(): ), in_fsl=dict( argstr='--infsl %s', + extensions=None, mandatory=True, xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', 'in_itk'), ), in_itk=dict( argstr='--initk %s', + extensions=None, mandatory=True, xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', 'in_itk'), @@ -30,18 +32,21 @@ def test_LTAConvert_inputs(): ), in_mni=dict( argstr='--inmni %s', + extensions=None, mandatory=True, xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', 'in_itk'), ), in_niftyreg=dict( argstr='--inniftyreg %s', + extensions=None, mandatory=True, xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', 'in_itk'), ), in_reg=dict( argstr='--inreg %s', + extensions=None, mandatory=True, xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', 'in_itk'), @@ -56,9 +61,15 @@ def test_LTAConvert_inputs(): out_lta=dict(argstr='--outlta %s', ), out_mni=dict(argstr='--outmni %s', ), out_reg=dict(argstr='--outreg %s', ), - source_file=dict(argstr='--src %s', ), + source_file=dict( + argstr='--src %s', + extensions=None, + ), target_conform=dict(argstr='--trgconform', ), - target_file=dict(argstr='--trg %s', ), + target_file=dict( + argstr='--trg %s', + extensions=None, + ), ) inputs = LTAConvert.input_spec() @@ -67,11 +78,11 @@ def test_LTAConvert_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_LTAConvert_outputs(): output_map = dict( - out_fsl=dict(), - out_itk=dict(), - out_lta=dict(), - out_mni=dict(), - out_reg=dict(), + out_fsl=dict(extensions=None, ), + out_itk=dict(extensions=None, ), + out_lta=dict(extensions=None, ), + out_mni=dict(extensions=None, ), + out_reg=dict(extensions=None, ), ) outputs = LTAConvert.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py index 2d7761006b..b7d594dabd 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py @@ -6,7 +6,10 @@ def test_Label2Annot_inputs(): input_map = dict( args=dict(argstr='%s', ), - color_table=dict(argstr='--ctab %s', ), + color_table=dict( + argstr='--ctab %s', + extensions=None, + ), copy_inputs=dict(), environ=dict( nohash=True, @@ -21,7 +24,10 @@ def test_Label2Annot_inputs(): mandatory=True, ), keep_max=dict(argstr='--maxstatwinner', ), - orig=dict(mandatory=True, ), + orig=dict( + extensions=None, + mandatory=True, + ), out_annot=dict( argstr='--a %s', mandatory=True, @@ -40,7 +46,7 @@ def test_Label2Annot_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Label2Annot_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Label2Annot.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py index 7511cd0dbb..4cc1b62ec0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py @@ -17,6 +17,7 @@ def test_Label2Label_inputs(): ), out_file=dict( argstr='--trglabel %s', + extensions=None, hash_files=False, keep_extension=True, name_source=['source_label'], @@ -28,22 +29,35 @@ def test_Label2Label_inputs(): ), source_label=dict( argstr='--srclabel %s', + extensions=None, + mandatory=True, + ), + source_sphere_reg=dict( + extensions=None, mandatory=True, ), - source_sphere_reg=dict(mandatory=True, ), source_subject=dict( argstr='--srcsubject %s', mandatory=True, ), - source_white=dict(mandatory=True, ), - sphere_reg=dict(mandatory=True, ), + source_white=dict( + extensions=None, + mandatory=True, + ), + sphere_reg=dict( + extensions=None, + mandatory=True, + ), subject_id=dict( argstr='--trgsubject %s', mandatory=True, usedefault=True, ), subjects_dir=dict(), - white=dict(mandatory=True, ), + white=dict( + extensions=None, + mandatory=True, + ), ) inputs = Label2Label.input_spec() @@ -51,7 +65,7 @@ def test_Label2Label_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Label2Label_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Label2Label.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py index fb2726635f..fe277288d5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py @@ -8,6 +8,7 @@ def test_Label2Vol_inputs(): annot_file=dict( argstr='--annot %s', copyfile=False, + extensions=None, mandatory=True, requires=('subject_id', 'hemi'), xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), @@ -35,9 +36,15 @@ def test_Label2Vol_inputs(): mandatory=True, xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), ), - label_hit_file=dict(argstr='--hits %s', ), + label_hit_file=dict( + argstr='--hits %s', + extensions=None, + ), label_voxel_volume=dict(argstr='--labvoxvol %f', ), - map_label_stat=dict(argstr='--label-stat %s', ), + map_label_stat=dict( + argstr='--label-stat %s', + extensions=None, + ), native_vox2ras=dict(argstr='--native-vox2ras', ), proj=dict( argstr='--proj %s %f %f %f', @@ -45,15 +52,18 @@ def test_Label2Vol_inputs(): ), reg_file=dict( argstr='--reg %s', + extensions=None, xor=('reg_file', 'reg_header', 'identity'), ), reg_header=dict( argstr='--regheader %s', + extensions=None, xor=('reg_file', 'reg_header', 'identity'), ), seg_file=dict( argstr='--seg %s', copyfile=False, + extensions=None, mandatory=True, xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), ), @@ -62,10 +72,12 @@ def test_Label2Vol_inputs(): surface=dict(argstr='--surf %s', ), template_file=dict( argstr='--temp %s', + extensions=None, mandatory=True, ), vol_label_file=dict( argstr='--o %s', + extensions=None, genfile=True, ), ) @@ -75,7 +87,7 @@ def test_Label2Vol_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Label2Vol_outputs(): - output_map = dict(vol_label_file=dict(), ) + output_map = dict(vol_label_file=dict(extensions=None, ), ) outputs = Label2Vol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py index 7b6ae4a945..da6b2f94ec 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py @@ -13,16 +13,21 @@ def test_MNIBiasCorrection_inputs(): ), in_file=dict( argstr='--i %s', + extensions=None, mandatory=True, ), iterations=dict( argstr='--n %d', usedefault=True, ), - mask=dict(argstr='--mask %s', ), + mask=dict( + argstr='--mask %s', + extensions=None, + ), no_rescale=dict(argstr='--no-rescale', ), out_file=dict( argstr='--o %s', + extensions=None, hash_files=False, keep_extension=True, name_source=['in_file'], @@ -32,7 +37,10 @@ def test_MNIBiasCorrection_inputs(): shrink=dict(argstr='--shrink %d', ), stop=dict(argstr='--stop %f', ), subjects_dir=dict(), - transform=dict(argstr='--uchar %s', ), + transform=dict( + argstr='--uchar %s', + extensions=None, + ), ) inputs = MNIBiasCorrection.input_spec() @@ -40,7 +48,7 @@ def test_MNIBiasCorrection_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MNIBiasCorrection_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MNIBiasCorrection.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py index b4b7436120..44d836fac4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py @@ -12,6 +12,7 @@ def test_MPRtoMNI305_inputs(): ), in_file=dict( argstr='%s', + extensions=None, usedefault=True, ), reference_dir=dict( @@ -31,8 +32,11 @@ def test_MPRtoMNI305_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_MPRtoMNI305_outputs(): output_map = dict( - log_file=dict(usedefault=True, ), - out_file=dict(), + log_file=dict( + extensions=None, + usedefault=True, + ), + out_file=dict(extensions=None, ), ) outputs = MPRtoMNI305.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py index b44cf7a308..d66ad1a4c4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py @@ -5,12 +5,24 @@ def test_MRIConvert_inputs(): input_map = dict( - apply_inv_transform=dict(argstr='--apply_inverse_transform %s', ), - apply_transform=dict(argstr='--apply_transform %s', ), + apply_inv_transform=dict( + argstr='--apply_inverse_transform %s', + extensions=None, + ), + apply_transform=dict( + argstr='--apply_transform %s', + extensions=None, + ), args=dict(argstr='%s', ), ascii=dict(argstr='--ascii', ), - autoalign_matrix=dict(argstr='--autoalign %s', ), - color_file=dict(argstr='--color_file %s', ), + autoalign_matrix=dict( + argstr='--autoalign %s', + extensions=None, + ), + color_file=dict( + argstr='--color_file %s', + extensions=None, + ), conform=dict(argstr='--conform', ), conform_min=dict(argstr='--conform_min', ), conform_size=dict(argstr='--conform_size %s', ), @@ -33,6 +45,7 @@ def test_MRIConvert_inputs(): in_center=dict(argstr='--in_center %s', ), in_file=dict( argstr='--input_volume %s', + extensions=None, mandatory=True, position=-2, ), @@ -43,7 +56,10 @@ def test_MRIConvert_inputs(): in_j_size=dict(argstr='--in_j_size %d', ), in_k_dir=dict(argstr='--in_k_direction %f %f %f', ), in_k_size=dict(argstr='--in_k_size %d', ), - in_like=dict(argstr='--in_like %s', ), + in_like=dict( + argstr='--in_like %s', + extensions=None, + ), in_matrix=dict(argstr='--in_matrix', ), in_orientation=dict(argstr='--in_orientation %s', ), in_scale=dict(argstr='--scale %f', ), @@ -59,6 +75,7 @@ def test_MRIConvert_inputs(): out_datatype=dict(argstr='--out_data_type %s', ), out_file=dict( argstr='--output_volume %s', + extensions=None, genfile=True, position=-1, ), @@ -81,8 +98,14 @@ def test_MRIConvert_inputs(): read_only=dict(argstr='--read_only', ), reorder=dict(argstr='--reorder %d %d %d', ), resample_type=dict(argstr='--resample_type %s', ), - reslice_like=dict(argstr='--reslice_like %s', ), - sdcm_list=dict(argstr='--sdcmlist %s', ), + reslice_like=dict( + argstr='--reslice_like %s', + extensions=None, + ), + sdcm_list=dict( + argstr='--sdcmlist %s', + extensions=None, + ), skip_n=dict(argstr='--nskip %d', ), slice_bias=dict(argstr='--slice-bias %f', ), slice_crop=dict(argstr='--slice-crop %d %d', ), @@ -90,7 +113,10 @@ def test_MRIConvert_inputs(): smooth_parcellation=dict(argstr='--smooth_parcellation', ), sphinx=dict(argstr='--sphinx', ), split=dict(argstr='--split', ), - status_file=dict(argstr='--status %s', ), + status_file=dict( + argstr='--status %s', + extensions=None, + ), subject_name=dict(argstr='--subject_name %s', ), subjects_dir=dict(), te=dict(argstr='-te %d', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py index 6c12cabdc2..508899d6b0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py @@ -43,6 +43,7 @@ def test_MRICoreg_inputs(): reference_file=dict( argstr='--ref %s', copyfile=False, + extensions=None, mandatory=True, xor=['subject_id'], ), @@ -55,6 +56,7 @@ def test_MRICoreg_inputs(): source_file=dict( argstr='--mov %s', copyfile=False, + extensions=None, mandatory=True, ), source_mask=dict(argstr='--mov-mask', ), @@ -75,9 +77,9 @@ def test_MRICoreg_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_MRICoreg_outputs(): output_map = dict( - out_lta_file=dict(), - out_params_file=dict(), - out_reg_file=dict(), + out_lta_file=dict(extensions=None, ), + out_params_file=dict(extensions=None, ), + out_reg_file=dict(extensions=None, ), ) outputs = MRICoreg.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py index 719986961d..748cc6f5e6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py @@ -12,18 +12,29 @@ def test_MRIFill_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), - log_file=dict(argstr='-a %s', ), + log_file=dict( + argstr='-a %s', + extensions=None, + ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), - segmentation=dict(argstr='-segmentation %s', ), + segmentation=dict( + argstr='-segmentation %s', + extensions=None, + ), subjects_dir=dict(), - transform=dict(argstr='-xform %s', ), + transform=dict( + argstr='-xform %s', + extensions=None, + ), ) inputs = MRIFill.input_spec() @@ -32,8 +43,8 @@ def test_MRIFill_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_MRIFill_outputs(): output_map = dict( - log_file=dict(), - out_file=dict(), + log_file=dict(extensions=None, ), + out_file=dict(extensions=None, ), ) outputs = MRIFill.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py index f43d217256..bd22d3f486 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py @@ -17,6 +17,7 @@ def test_MRIMarchingCubes_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), @@ -27,6 +28,7 @@ def test_MRIMarchingCubes_inputs(): ), out_file=dict( argstr='./%s', + extensions=None, genfile=True, position=-2, ), @@ -38,7 +40,7 @@ def test_MRIMarchingCubes_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRIMarchingCubes_outputs(): - output_map = dict(surface=dict(), ) + output_map = dict(surface=dict(extensions=None, ), ) outputs = MRIMarchingCubes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py index 2e7c7d2ea5..ff07c64b42 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py @@ -12,11 +12,13 @@ def test_MRIPretess_inputs(): ), in_filled=dict( argstr='%s', + extensions=None, mandatory=True, position=-4, ), in_norm=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -30,6 +32,7 @@ def test_MRIPretess_inputs(): nocorners=dict(argstr='-nocorners', ), out_file=dict( argstr='%s', + extensions=None, keep_extension=True, name_source=['in_filled'], name_template='%s_pretesswm', @@ -44,7 +47,7 @@ def test_MRIPretess_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRIPretess_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MRIPretess.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py index c92b4fea15..2208760d74 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py @@ -12,6 +12,7 @@ def test_MRISPreproc_inputs(): ), fsgd_file=dict( argstr='--fsgd %s', + extensions=None, xor=('subjects', 'fsgd_file', 'subject_file'), ), fwhm=dict( @@ -36,6 +37,7 @@ def test_MRISPreproc_inputs(): ), out_file=dict( argstr='--out %s', + extensions=None, genfile=True, ), proj_frac=dict(argstr='--projfrac %s', ), @@ -43,6 +45,7 @@ def test_MRISPreproc_inputs(): source_format=dict(argstr='--srcfmt %s', ), subject_file=dict( argstr='--f %s', + extensions=None, xor=('subjects', 'fsgd_file', 'subject_file'), ), subjects=dict( @@ -75,7 +78,7 @@ def test_MRISPreproc_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRISPreproc_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MRISPreproc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py index e72d8adb9e..3c36fd6a43 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py @@ -13,6 +13,7 @@ def test_MRISPreprocReconAll_inputs(): ), fsgd_file=dict( argstr='--fsgd %s', + extensions=None, xor=('subjects', 'fsgd_file', 'subject_file'), ), fwhm=dict( @@ -27,7 +28,10 @@ def test_MRISPreprocReconAll_inputs(): argstr='--hemi %s', mandatory=True, ), - lh_surfreg_target=dict(requires=['surfreg_files'], ), + lh_surfreg_target=dict( + extensions=None, + requires=['surfreg_files'], + ), num_iters=dict( argstr='--niters %d', xor=['fwhm'], @@ -38,14 +42,19 @@ def test_MRISPreprocReconAll_inputs(): ), out_file=dict( argstr='--out %s', + extensions=None, genfile=True, ), proj_frac=dict(argstr='--projfrac %s', ), - rh_surfreg_target=dict(requires=['surfreg_files'], ), + rh_surfreg_target=dict( + extensions=None, + requires=['surfreg_files'], + ), smooth_cortex_only=dict(argstr='--smooth-cortex-only', ), source_format=dict(argstr='--srcfmt %s', ), subject_file=dict( argstr='--f %s', + extensions=None, xor=('subjects', 'fsgd_file', 'subject_file'), ), subject_id=dict( @@ -69,6 +78,7 @@ def test_MRISPreprocReconAll_inputs(): ), surf_measure_file=dict( argstr='--meas %s', + extensions=None, xor=('surf_measure', 'surf_measure_file', 'surf_area'), ), surfreg_files=dict( @@ -87,7 +97,7 @@ def test_MRISPreprocReconAll_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRISPreprocReconAll_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MRISPreprocReconAll.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py index a5899cfe70..315cfd7e2f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py @@ -12,6 +12,7 @@ def test_MRITessellate_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -22,6 +23,7 @@ def test_MRITessellate_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -35,7 +37,7 @@ def test_MRITessellate_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRITessellate_outputs(): - output_map = dict(surface=dict(), ) + output_map = dict(surface=dict(extensions=None, ), ) outputs = MRITessellate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py index 441be3e0a4..10c7af6832 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py @@ -9,16 +9,21 @@ def test_MRIsCALabel_inputs(): aseg=dict(argstr='-aseg %s', ), canonsurf=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), classifier=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), copy_inputs=dict(), - curv=dict(mandatory=True, ), + curv=dict( + extensions=None, + mandatory=True, + ), environ=dict( nohash=True, usedefault=True, @@ -32,6 +37,7 @@ def test_MRIsCALabel_inputs(): num_threads=dict(), out_file=dict( argstr='%s', + extensions=None, hash_files=False, keep_extension=True, name_source=['hemisphere'], @@ -39,7 +45,10 @@ def test_MRIsCALabel_inputs(): position=-1, ), seed=dict(argstr='-seed %d', ), - smoothwm=dict(mandatory=True, ), + smoothwm=dict( + extensions=None, + mandatory=True, + ), subject_id=dict( argstr='%s', mandatory=True, @@ -47,7 +56,10 @@ def test_MRIsCALabel_inputs(): usedefault=True, ), subjects_dir=dict(), - sulc=dict(mandatory=True, ), + sulc=dict( + extensions=None, + mandatory=True, + ), ) inputs = MRIsCALabel.input_spec() @@ -55,7 +67,7 @@ def test_MRIsCALabel_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRIsCALabel_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MRIsCALabel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py index cbd68c8457..296b7726a7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py @@ -17,11 +17,13 @@ def test_MRIsCalc_inputs(): ), in_file1=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), in_file2=dict( argstr='%s', + extensions=None, position=-1, xor=['in_float', 'in_int'], ), @@ -37,6 +39,7 @@ def test_MRIsCalc_inputs(): ), out_file=dict( argstr='-o %s', + extensions=None, mandatory=True, ), subjects_dir=dict(), @@ -47,7 +50,7 @@ def test_MRIsCalc_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRIsCalc_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MRIsCalc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py index 657292c5c7..006dd272e1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py @@ -17,6 +17,7 @@ def test_MRIsCombine_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, mandatory=True, position=-1, @@ -29,7 +30,7 @@ def test_MRIsCombine_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRIsCombine_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MRIsCombine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py index 21622b2449..92c397be96 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py @@ -5,21 +5,34 @@ def test_MRIsConvert_inputs(): input_map = dict( - annot_file=dict(argstr='--annot %s', ), + annot_file=dict( + argstr='--annot %s', + extensions=None, + ), args=dict(argstr='%s', ), dataarray_num=dict(argstr='--da_num %d', ), environ=dict( nohash=True, usedefault=True, ), - functional_file=dict(argstr='-f %s', ), + functional_file=dict( + argstr='-f %s', + extensions=None, + ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), - label_file=dict(argstr='--label %s', ), - labelstats_outfile=dict(argstr='--labelstats %s', ), + label_file=dict( + argstr='--label %s', + extensions=None, + ), + labelstats_outfile=dict( + argstr='--labelstats %s', + extensions=None, + ), normal=dict(argstr='-n', ), origname=dict(argstr='-o %s', ), out_datatype=dict( @@ -28,15 +41,22 @@ def test_MRIsConvert_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, mandatory=True, position=-1, xor=['out_datatype'], ), - parcstats_file=dict(argstr='--parcstats %s', ), + parcstats_file=dict( + argstr='--parcstats %s', + extensions=None, + ), patch=dict(argstr='-p', ), rescale=dict(argstr='-r', ), - scalarcurv_file=dict(argstr='-c %s', ), + scalarcurv_file=dict( + argstr='-c %s', + extensions=None, + ), scale=dict(argstr='-s %.3f', ), subjects_dir=dict(), talairachxfm_subjid=dict(argstr='-t %s', ), @@ -51,7 +71,7 @@ def test_MRIsConvert_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRIsConvert_outputs(): - output_map = dict(converted=dict(), ) + output_map = dict(converted=dict(extensions=None, ), ) outputs = MRIsConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py index 8639c1e7b6..12f2bfb294 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py @@ -19,6 +19,7 @@ def test_MRIsExpand_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-3, ), @@ -52,7 +53,7 @@ def test_MRIsExpand_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRIsExpand_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MRIsExpand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py index 73e48a2521..147a26275c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py @@ -13,6 +13,7 @@ def test_MRIsInflate_inputs(): in_file=dict( argstr='%s', copyfile=True, + extensions=None, mandatory=True, position=-2, ), @@ -22,13 +23,17 @@ def test_MRIsInflate_inputs(): ), out_file=dict( argstr='%s', + extensions=None, hash_files=False, keep_extension=True, name_source=['in_file'], name_template='%s.inflated', position=-1, ), - out_sulc=dict(xor=['no_save_sulc'], ), + out_sulc=dict( + extensions=None, + xor=['no_save_sulc'], + ), subjects_dir=dict(), ) inputs = MRIsInflate.input_spec() @@ -38,8 +43,8 @@ def test_MRIsInflate_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_MRIsInflate_outputs(): output_map = dict( - out_file=dict(), - out_sulc=dict(), + out_file=dict(extensions=None, ), + out_sulc=dict(extensions=None, ), ) outputs = MRIsInflate.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py index f3453818c5..badb3b4f0c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py @@ -43,8 +43,8 @@ def test_MS_LDA_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_MS_LDA_outputs(): output_map = dict( - vol_synth_file=dict(), - weight_file=dict(), + vol_synth_file=dict(extensions=None, ), + weight_file=dict(extensions=None, ), ) outputs = MS_LDA.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py index 5dfb555346..527bc5135b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py @@ -12,6 +12,7 @@ def test_MakeAverageSubject_inputs(): ), out_name=dict( argstr='--out %s', + extensions=None, usedefault=True, ), subjects_dir=dict(), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py index 7ff18eeba6..3719b3a8af 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py @@ -17,16 +17,32 @@ def test_MakeSurfaces_inputs(): mandatory=True, position=-1, ), - in_T1=dict(argstr='-T1 %s', ), - in_aseg=dict(argstr='-aseg %s', ), - in_filled=dict(mandatory=True, ), - in_label=dict(xor=['noaparc'], ), + in_T1=dict( + argstr='-T1 %s', + extensions=None, + ), + in_aseg=dict( + argstr='-aseg %s', + extensions=None, + ), + in_filled=dict( + extensions=None, + mandatory=True, + ), + in_label=dict( + extensions=None, + xor=['noaparc'], + ), in_orig=dict( argstr='-orig %s', + extensions=None, + mandatory=True, + ), + in_white=dict(extensions=None, ), + in_wm=dict( + extensions=None, mandatory=True, ), - in_white=dict(), - in_wm=dict(mandatory=True, ), longitudinal=dict(argstr='-long', ), maximum=dict(argstr='-max %.1f', ), mgz=dict(argstr='-mgz', ), @@ -37,9 +53,13 @@ def test_MakeSurfaces_inputs(): ), orig_pial=dict( argstr='-orig_pial %s', + extensions=None, requires=['in_label'], ), - orig_white=dict(argstr='-orig_white %s', ), + orig_white=dict( + argstr='-orig_white %s', + extensions=None, + ), subject_id=dict( argstr='%s', mandatory=True, @@ -57,12 +77,12 @@ def test_MakeSurfaces_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_MakeSurfaces_outputs(): output_map = dict( - out_area=dict(), - out_cortex=dict(), - out_curv=dict(), - out_pial=dict(), - out_thickness=dict(), - out_white=dict(), + out_area=dict(extensions=None, ), + out_cortex=dict(extensions=None, ), + out_curv=dict(extensions=None, ), + out_pial=dict(extensions=None, ), + out_thickness=dict(extensions=None, ), + out_white=dict(extensions=None, ), ) outputs = MakeSurfaces.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py index 1923b5ceed..2384adbb2b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py @@ -13,21 +13,29 @@ def test_Normalize_inputs(): gradient=dict(argstr='-g %d', ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), out_file=dict( argstr='%s', + extensions=None, hash_files=False, keep_extension=True, name_source=['in_file'], name_template='%s_norm', position=-1, ), - segmentation=dict(argstr='-aseg %s', ), + segmentation=dict( + argstr='-aseg %s', + extensions=None, + ), subjects_dir=dict(), - transform=dict(), + transform=dict(extensions=None, ), ) inputs = Normalize.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py index 1091bdbdc3..f0d182cb6c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py @@ -19,6 +19,7 @@ def test_OneSampleTTest_inputs(): debug=dict(argstr='--debug', ), design=dict( argstr='--X %s', + extensions=None, xor=('fsgd', 'design', 'one_sample'), ), diag=dict(argstr='--diag %d', ), @@ -33,9 +34,13 @@ def test_OneSampleTTest_inputs(): ), fixed_fx_dof_file=dict( argstr='--ffxdofdat %d', + extensions=None, xor=['fixed_fx_dof'], ), - fixed_fx_var=dict(argstr='--yffxvar %s', ), + fixed_fx_var=dict( + argstr='--yffxvar %s', + extensions=None, + ), force_perm=dict(argstr='--perm-force', ), fsgd=dict( argstr='--fsgd %s %s', @@ -50,14 +55,19 @@ def test_OneSampleTTest_inputs(): in_file=dict( argstr='--y %s', copyfile=False, + extensions=None, mandatory=True, ), invert_mask=dict(argstr='--mask-inv', ), label_file=dict( argstr='--label %s', + extensions=None, xor=['cortex'], ), - mask_file=dict(argstr='--mask %s', ), + mask_file=dict( + argstr='--mask %s', + extensions=None, + ), no_contrast_ok=dict(argstr='--no-contrasts-ok', ), no_est_fwhm=dict(argstr='--no-est-fwhm', ), no_mask_smooth=dict(argstr='--no-mask-smooth', ), @@ -84,7 +94,10 @@ def test_OneSampleTTest_inputs(): save_residual=dict(argstr='--eres-save', ), seed=dict(argstr='--seed %d', ), self_reg=dict(argstr='--selfreg %d %d %d', ), - sim_done_file=dict(argstr='--sim-done %s', ), + sim_done_file=dict( + argstr='--sim-done %s', + extensions=None, + ), sim_sign=dict(argstr='--sim-sign %s', ), simulation=dict(argstr='--sim %s %d %f %s', ), subject_id=dict(), @@ -98,7 +111,10 @@ def test_OneSampleTTest_inputs(): uniform=dict(argstr='--uniform %f %f', ), var_fwhm=dict(argstr='--var-fwhm %f', ), vox_dump=dict(argstr='--voxdump %d %d %d', ), - weight_file=dict(xor=['weighted_ls'], ), + weight_file=dict( + extensions=None, + xor=['weighted_ls'], + ), weight_inv=dict( argstr='--w-inv', xor=['weighted_ls'], @@ -109,6 +125,7 @@ def test_OneSampleTTest_inputs(): ), weighted_ls=dict( argstr='--wls %s', + extensions=None, xor=('weight_file', 'weight_inv', 'weight_sqrt'), ), ) @@ -119,23 +136,23 @@ def test_OneSampleTTest_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_OneSampleTTest_outputs(): output_map = dict( - beta_file=dict(), - dof_file=dict(), - error_file=dict(), - error_stddev_file=dict(), - error_var_file=dict(), - estimate_file=dict(), - frame_eigenvectors=dict(), + beta_file=dict(extensions=None, ), + dof_file=dict(extensions=None, ), + error_file=dict(extensions=None, ), + error_stddev_file=dict(extensions=None, ), + error_var_file=dict(extensions=None, ), + estimate_file=dict(extensions=None, ), + frame_eigenvectors=dict(extensions=None, ), ftest_file=dict(), - fwhm_file=dict(), + fwhm_file=dict(extensions=None, ), gamma_file=dict(), gamma_var_file=dict(), glm_dir=dict(), - mask_file=dict(), + mask_file=dict(extensions=None, ), sig_file=dict(), - singular_values=dict(), - spatial_eigenvectors=dict(), - svd_stats_file=dict(), + singular_values=dict(extensions=None, ), + spatial_eigenvectors=dict(extensions=None, ), + svd_stats_file=dict(extensions=None, ), ) outputs = OneSampleTTest.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Paint.py b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py index c452594e55..311af9714b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Paint.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py @@ -13,11 +13,13 @@ def test_Paint_inputs(): ), in_surf=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), out_file=dict( argstr='%s', + extensions=None, hash_files=False, keep_extension=False, name_source=['in_surf'], @@ -27,6 +29,7 @@ def test_Paint_inputs(): subjects_dir=dict(), template=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -38,7 +41,7 @@ def test_Paint_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Paint_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Paint.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py index 81ac276b2f..be19b2bd37 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py @@ -6,10 +6,16 @@ def test_ParcellationStats_inputs(): input_map = dict( args=dict(argstr='%s', ), - aseg=dict(mandatory=True, ), - brainmask=dict(mandatory=True, ), + aseg=dict( + extensions=None, + mandatory=True, + ), + brainmask=dict( + extensions=None, + mandatory=True, + ), copy_inputs=dict(), - cortex_label=dict(), + cortex_label=dict(extensions=None, ), environ=dict( nohash=True, usedefault=True, @@ -28,8 +34,14 @@ def test_ParcellationStats_inputs(): argstr='-l %s', xor=['in_annotatoin', 'out_color'], ), - lh_pial=dict(mandatory=True, ), - lh_white=dict(mandatory=True, ), + lh_pial=dict( + extensions=None, + mandatory=True, + ), + lh_white=dict( + extensions=None, + mandatory=True, + ), mgz=dict(argstr='-mgz', ), out_color=dict( argstr='-c %s', @@ -41,9 +53,18 @@ def test_ParcellationStats_inputs(): genfile=True, requires=['tabular_output'], ), - rh_pial=dict(mandatory=True, ), - rh_white=dict(mandatory=True, ), - ribbon=dict(mandatory=True, ), + rh_pial=dict( + extensions=None, + mandatory=True, + ), + rh_white=dict( + extensions=None, + mandatory=True, + ), + ribbon=dict( + extensions=None, + mandatory=True, + ), subject_id=dict( argstr='%s', mandatory=True, @@ -60,9 +81,18 @@ def test_ParcellationStats_inputs(): argstr='-th3', requires=['cortex_label'], ), - thickness=dict(mandatory=True, ), - transform=dict(mandatory=True, ), - wm=dict(mandatory=True, ), + thickness=dict( + extensions=None, + mandatory=True, + ), + transform=dict( + extensions=None, + mandatory=True, + ), + wm=dict( + extensions=None, + mandatory=True, + ), ) inputs = ParcellationStats.input_spec() @@ -71,8 +101,8 @@ def test_ParcellationStats_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_ParcellationStats_outputs(): output_map = dict( - out_color=dict(), - out_table=dict(), + out_color=dict(extensions=None, ), + out_table=dict(extensions=None, ), ) outputs = ParcellationStats.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py index 93254ddcb3..148044abd3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py @@ -12,6 +12,7 @@ def test_ParseDICOMDir_inputs(): ), dicom_info_file=dict( argstr='--o %s', + extensions=None, usedefault=True, ), environ=dict( @@ -28,7 +29,7 @@ def test_ParseDICOMDir_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ParseDICOMDir_outputs(): - output_map = dict(dicom_info_file=dict(), ) + output_map = dict(dicom_info_file=dict(extensions=None, ), ) outputs = ParseDICOMDir.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py index 39f6a8c942..18a4dfeb43 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py @@ -7,11 +7,13 @@ def test_ReconAll_inputs(): input_map = dict( FLAIR_file=dict( argstr='-FLAIR %s', + extensions=None, min_ver='5.3.0', ), T1_files=dict(argstr='-i %s...', ), T2_file=dict( argstr='-T2 %s', + extensions=None, min_ver='5.3.0', ), args=dict(argstr='%s', ), @@ -26,7 +28,10 @@ def test_ReconAll_inputs(): nohash=True, usedefault=True, ), - expert=dict(argstr='-expert %s', ), + expert=dict( + argstr='-expert %s', + extensions=None, + ), flags=dict(argstr='%s', ), hemi=dict(argstr='-hemi %s', ), hippocampal_subfields_T1=dict( @@ -102,7 +107,10 @@ def test_ReconAll_outputs(): altkey='BA', loc='stats', ), - T1=dict(loc='mri', ), + T1=dict( + extensions=None, + loc='mri', + ), annot=dict( altkey='*annot', loc='label', @@ -123,14 +131,23 @@ def test_ReconAll_outputs(): altkey='area.pial', loc='surf', ), - aseg=dict(loc='mri', ), + aseg=dict( + extensions=None, + loc='mri', + ), aseg_stats=dict( altkey='aseg', loc='stats', ), avg_curv=dict(loc='surf', ), - brain=dict(loc='mri', ), - brainmask=dict(loc='mri', ), + brain=dict( + extensions=None, + loc='mri', + ), + brainmask=dict( + extensions=None, + loc='mri', + ), curv=dict(loc='surf', ), curv_pial=dict( altkey='curv.pial', @@ -144,7 +161,10 @@ def test_ReconAll_outputs(): altkey='entorhinal_exvivo', loc='stats', ), - filled=dict(loc='mri', ), + filled=dict( + extensions=None, + loc='mri', + ), graymid=dict( altkey=['graymid', 'midthickness'], loc='surf', @@ -155,11 +175,23 @@ def test_ReconAll_outputs(): altkey='*label', loc='label', ), - norm=dict(loc='mri', ), - nu=dict(loc='mri', ), - orig=dict(loc='mri', ), + norm=dict( + extensions=None, + loc='mri', + ), + nu=dict( + extensions=None, + loc='mri', + ), + orig=dict( + extensions=None, + loc='mri', + ), pial=dict(loc='surf', ), - rawavg=dict(loc='mri', ), + rawavg=dict( + extensions=None, + loc='mri', + ), ribbon=dict( altkey='*ribbon', loc='mri', @@ -176,8 +208,14 @@ def test_ReconAll_outputs(): thickness=dict(loc='surf', ), volume=dict(loc='surf', ), white=dict(loc='surf', ), - wm=dict(loc='mri', ), - wmparc=dict(loc='mri', ), + wm=dict( + extensions=None, + loc='mri', + ), + wmparc=dict( + extensions=None, + loc='mri', + ), wmparc_stats=dict( altkey='wmparc', loc='stats', diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Register.py b/nipype/interfaces/freesurfer/tests/test_auto_Register.py index b4eff5133c..3adccf2139 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Register.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Register.py @@ -14,25 +14,32 @@ def test_Register_inputs(): nohash=True, usedefault=True, ), - in_smoothwm=dict(copyfile=True, ), + in_smoothwm=dict( + copyfile=True, + extensions=None, + ), in_sulc=dict( copyfile=True, + extensions=None, mandatory=True, ), in_surf=dict( argstr='%s', copyfile=True, + extensions=None, mandatory=True, position=-3, ), out_file=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), subjects_dir=dict(), target=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -43,7 +50,7 @@ def test_Register_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Register_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Register.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py index 0bc88bf935..f3406d41fc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py @@ -12,22 +12,26 @@ def test_RegisterAVItoTalairach_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), out_file=dict( argstr='%s', + extensions=None, position=3, usedefault=True, ), subjects_dir=dict(), target=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), vox2vox=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -39,7 +43,10 @@ def test_RegisterAVItoTalairach_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_RegisterAVItoTalairach_outputs(): output_map = dict( - log_file=dict(usedefault=True, ), + log_file=dict( + extensions=None, + usedefault=True, + ), out_file=dict(), ) outputs = RegisterAVItoTalairach.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py index 859aff0820..28ff49d99e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py @@ -8,6 +8,7 @@ def test_RelabelHypointensities_inputs(): args=dict(argstr='%s', ), aseg=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -17,10 +18,12 @@ def test_RelabelHypointensities_inputs(): ), lh_white=dict( copyfile=True, + extensions=None, mandatory=True, ), out_file=dict( argstr='%s', + extensions=None, hash_files=False, keep_extension=False, name_source=['aseg'], @@ -29,6 +32,7 @@ def test_RelabelHypointensities_inputs(): ), rh_white=dict( copyfile=True, + extensions=None, mandatory=True, ), subjects_dir=dict(), @@ -44,7 +48,11 @@ def test_RelabelHypointensities_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RelabelHypointensities_outputs(): - output_map = dict(out_file=dict(argstr='%s', ), ) + output_map = dict( + out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = RelabelHypointensities.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py index 69e1d453a4..38afb704c1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py @@ -13,11 +13,13 @@ def test_RemoveIntersection_inputs(): in_file=dict( argstr='%s', copyfile=True, + extensions=None, mandatory=True, position=-2, ), out_file=dict( argstr='%s', + extensions=None, hash_files=False, keep_extension=True, name_source=['in_file'], @@ -32,7 +34,7 @@ def test_RemoveIntersection_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RemoveIntersection_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = RemoveIntersection.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py index 9e095ddba0..f99d1ca785 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py @@ -12,11 +12,13 @@ def test_RemoveNeck_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-4, ), out_file=dict( argstr='%s', + extensions=None, hash_files=False, keep_extension=True, name_source=['in_file'], @@ -27,11 +29,13 @@ def test_RemoveNeck_inputs(): subjects_dir=dict(), template=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), transform=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -42,7 +46,7 @@ def test_RemoveNeck_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RemoveNeck_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = RemoveNeck.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Resample.py b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py index ecb939d89a..1633369d13 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Resample.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py @@ -12,11 +12,13 @@ def test_Resample_inputs(): ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, position=-2, ), resampled_file=dict( argstr='-o %s', + extensions=None, genfile=True, position=-1, ), @@ -32,7 +34,7 @@ def test_Resample_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Resample_outputs(): - output_map = dict(resampled_file=dict(), ) + output_map = dict(resampled_file=dict(extensions=None, ), ) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py index af9020c278..0efbe528be 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py @@ -24,12 +24,21 @@ def test_RobustRegister_inputs(): half_targ_xfm=dict(argstr='--halfdstlta %s', ), half_weights=dict(argstr='--halfweights %s', ), high_iterations=dict(argstr='--highit %d', ), - in_xfm_file=dict(argstr='--transform', ), + in_xfm_file=dict( + argstr='--transform', + extensions=None, + ), init_orient=dict(argstr='--initorient', ), iteration_thresh=dict(argstr='--epsit %.3f', ), least_squares=dict(argstr='--leastsquares', ), - mask_source=dict(argstr='--maskmov %s', ), - mask_target=dict(argstr='--maskdst %s', ), + mask_source=dict( + argstr='--maskmov %s', + extensions=None, + ), + mask_target=dict( + argstr='--maskdst %s', + extensions=None, + ), max_iterations=dict(argstr='--maxit %d', ), no_init=dict(argstr='--noinit', ), no_multi=dict(argstr='--nomulti', ), @@ -46,12 +55,14 @@ def test_RobustRegister_inputs(): registered_file=dict(argstr='--warp %s', ), source_file=dict( argstr='--mov %s', + extensions=None, mandatory=True, ), subjects_dir=dict(), subsample_thresh=dict(argstr='--subsample %d', ), target_file=dict( argstr='--dst %s', + extensions=None, mandatory=True, ), trans_only=dict(argstr='--transonly', ), @@ -65,14 +76,14 @@ def test_RobustRegister_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_RobustRegister_outputs(): output_map = dict( - half_source=dict(), - half_source_xfm=dict(), - half_targ=dict(), - half_targ_xfm=dict(), - half_weights=dict(), - out_reg_file=dict(), - registered_file=dict(), - weights_file=dict(), + half_source=dict(extensions=None, ), + half_source_xfm=dict(extensions=None, ), + half_targ=dict(extensions=None, ), + half_targ_xfm=dict(extensions=None, ), + half_weights=dict(extensions=None, ), + out_reg_file=dict(extensions=None, ), + registered_file=dict(extensions=None, ), + weights_file=dict(extensions=None, ), ) outputs = RobustRegister.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py index e845a764e4..3b5c635a82 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py @@ -29,6 +29,7 @@ def test_RobustTemplate_inputs(): num_threads=dict(), out_file=dict( argstr='--template %s', + extensions=None, mandatory=True, usedefault=True, ), @@ -49,7 +50,7 @@ def test_RobustTemplate_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_RobustTemplate_outputs(): output_map = dict( - out_file=dict(), + out_file=dict(extensions=None, ), scaled_intensity_outputs=dict(), transform_outputs=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py index 0926eebba2..f36d84fbfc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py @@ -32,6 +32,7 @@ def test_SampleToSurface_inputs(): interp_method=dict(argstr='--interp %s', ), mask_label=dict( argstr='--mask %s', + extensions=None, xor=['cortex_mask'], ), mni152reg=dict( @@ -45,6 +46,7 @@ def test_SampleToSurface_inputs(): ), out_file=dict( argstr='--o %s', + extensions=None, genfile=True, ), out_type=dict(argstr='--out_type %s', ), @@ -56,9 +58,13 @@ def test_SampleToSurface_inputs(): mandatory=True, xor=['sampling_method'], ), - reference_file=dict(argstr='--ref %s', ), + reference_file=dict( + argstr='--ref %s', + extensions=None, + ), reg_file=dict( argstr='--reg %s', + extensions=None, mandatory=True, xor=['reg_file', 'reg_header', 'mni152reg'], ), @@ -86,6 +92,7 @@ def test_SampleToSurface_inputs(): smooth_vol=dict(argstr='--fwhm %.3f', ), source_file=dict( argstr='--mov %s', + extensions=None, mandatory=True, ), subject_id=dict(), @@ -105,9 +112,9 @@ def test_SampleToSurface_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_SampleToSurface_outputs(): output_map = dict( - hits_file=dict(), - out_file=dict(), - vox_file=dict(), + hits_file=dict(extensions=None, ), + out_file=dict(extensions=None, ), + vox_file=dict(extensions=None, ), ) outputs = SampleToSurface.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py index 8feb61d9d8..e9a17b13ea 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py @@ -14,11 +14,15 @@ def test_SegStats_inputs(): avgwf_file=dict(argstr='--avgwfvol %s', ), avgwf_txt_file=dict(argstr='--avgwf %s', ), brain_vol=dict(argstr='--%s', ), - brainmask_file=dict(argstr='--brainmask %s', ), + brainmask_file=dict( + argstr='--brainmask %s', + extensions=None, + ), calc_power=dict(argstr='--%s', ), calc_snr=dict(argstr='--snr', ), color_table_file=dict( argstr='--ctab %s', + extensions=None, xor=('color_table_file', 'default_color_table', 'gca_color_table'), ), cortex_vol_from_surf=dict(argstr='--surf-ctx-vol', ), @@ -39,26 +43,40 @@ def test_SegStats_inputs(): frame=dict(argstr='--frame %d', ), gca_color_table=dict( argstr='--ctab-gca %s', + extensions=None, xor=('color_table_file', 'default_color_table', 'gca_color_table'), ), - in_file=dict(argstr='--i %s', ), - in_intensity=dict(argstr='--in %s --in-intensity-name %s', ), + in_file=dict( + argstr='--i %s', + extensions=None, + ), + in_intensity=dict( + argstr='--in %s --in-intensity-name %s', + extensions=None, + ), intensity_units=dict( argstr='--in-intensity-units %s', requires=['in_intensity'], ), mask_erode=dict(argstr='--maskerode %d', ), - mask_file=dict(argstr='--mask %s', ), + mask_file=dict( + argstr='--mask %s', + extensions=None, + ), mask_frame=dict(requires=['mask_file'], ), mask_invert=dict(argstr='--maskinvert', ), mask_sign=dict(), mask_thresh=dict(argstr='--maskthresh %f', ), multiply=dict(argstr='--mul %f', ), non_empty_only=dict(argstr='--nonempty', ), - partial_volume_file=dict(argstr='--pv %s', ), + partial_volume_file=dict( + argstr='--pv %s', + extensions=None, + ), segment_id=dict(argstr='--id %s...', ), segmentation_file=dict( argstr='--seg %s', + extensions=None, mandatory=True, xor=('segmentation_file', 'annot', 'surf_label'), ), @@ -67,6 +85,7 @@ def test_SegStats_inputs(): subjects_dir=dict(), summary_file=dict( argstr='--sum %s', + extensions=None, genfile=True, position=-1, ), @@ -87,10 +106,10 @@ def test_SegStats_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_SegStats_outputs(): output_map = dict( - avgwf_file=dict(), - avgwf_txt_file=dict(), - sf_avg_file=dict(), - summary_file=dict(), + avgwf_file=dict(extensions=None, ), + avgwf_txt_file=dict(extensions=None, ), + sf_avg_file=dict(extensions=None, ), + summary_file=dict(extensions=None, ), ) outputs = SegStats.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py index e65dc82e3b..3cf47c71ce 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py @@ -11,15 +11,19 @@ def test_SegStatsReconAll_inputs(): xor=('segmentation_file', 'annot', 'surf_label'), ), args=dict(argstr='%s', ), - aseg=dict(), + aseg=dict(extensions=None, ), avgwf_file=dict(argstr='--avgwfvol %s', ), avgwf_txt_file=dict(argstr='--avgwf %s', ), brain_vol=dict(argstr='--%s', ), - brainmask_file=dict(argstr='--brainmask %s', ), + brainmask_file=dict( + argstr='--brainmask %s', + extensions=None, + ), calc_power=dict(argstr='--%s', ), calc_snr=dict(argstr='--snr', ), color_table_file=dict( argstr='--ctab %s', + extensions=None, xor=('color_table_file', 'default_color_table', 'gca_color_table'), ), copy_inputs=dict(), @@ -41,34 +45,66 @@ def test_SegStatsReconAll_inputs(): frame=dict(argstr='--frame %d', ), gca_color_table=dict( argstr='--ctab-gca %s', + extensions=None, xor=('color_table_file', 'default_color_table', 'gca_color_table'), ), - in_file=dict(argstr='--i %s', ), - in_intensity=dict(argstr='--in %s --in-intensity-name %s', ), + in_file=dict( + argstr='--i %s', + extensions=None, + ), + in_intensity=dict( + argstr='--in %s --in-intensity-name %s', + extensions=None, + ), intensity_units=dict( argstr='--in-intensity-units %s', requires=['in_intensity'], ), - lh_orig_nofix=dict(mandatory=True, ), - lh_pial=dict(mandatory=True, ), - lh_white=dict(mandatory=True, ), + lh_orig_nofix=dict( + extensions=None, + mandatory=True, + ), + lh_pial=dict( + extensions=None, + mandatory=True, + ), + lh_white=dict( + extensions=None, + mandatory=True, + ), mask_erode=dict(argstr='--maskerode %d', ), - mask_file=dict(argstr='--mask %s', ), + mask_file=dict( + argstr='--mask %s', + extensions=None, + ), mask_frame=dict(requires=['mask_file'], ), mask_invert=dict(argstr='--maskinvert', ), mask_sign=dict(), mask_thresh=dict(argstr='--maskthresh %f', ), multiply=dict(argstr='--mul %f', ), non_empty_only=dict(argstr='--nonempty', ), - partial_volume_file=dict(argstr='--pv %s', ), - presurf_seg=dict(), - rh_orig_nofix=dict(mandatory=True, ), - rh_pial=dict(mandatory=True, ), - rh_white=dict(mandatory=True, ), + partial_volume_file=dict( + argstr='--pv %s', + extensions=None, + ), + presurf_seg=dict(extensions=None, ), + rh_orig_nofix=dict( + extensions=None, + mandatory=True, + ), + rh_pial=dict( + extensions=None, + mandatory=True, + ), + rh_white=dict( + extensions=None, + mandatory=True, + ), ribbon=dict(mandatory=True, ), segment_id=dict(argstr='--id %s...', ), segmentation_file=dict( argstr='--seg %s', + extensions=None, mandatory=True, xor=('segmentation_file', 'annot', 'surf_label'), ), @@ -82,6 +118,7 @@ def test_SegStatsReconAll_inputs(): subjects_dir=dict(), summary_file=dict( argstr='--sum %s', + extensions=None, genfile=True, position=-1, ), @@ -92,7 +129,10 @@ def test_SegStatsReconAll_inputs(): xor=('segmentation_file', 'annot', 'surf_label'), ), total_gray=dict(argstr='--totalgray', ), - transform=dict(mandatory=True, ), + transform=dict( + extensions=None, + mandatory=True, + ), vox=dict(argstr='--vox %s', ), wm_vol_from_surf=dict(argstr='--surf-wm-vol', ), ) @@ -103,10 +143,10 @@ def test_SegStatsReconAll_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_SegStatsReconAll_outputs(): output_map = dict( - avgwf_file=dict(), - avgwf_txt_file=dict(), - sf_avg_file=dict(), - summary_file=dict(), + avgwf_file=dict(extensions=None, ), + avgwf_txt_file=dict(extensions=None, ), + sf_avg_file=dict(extensions=None, ), + summary_file=dict(extensions=None, ), ) outputs = SegStatsReconAll.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py index e6b5fb2679..8cda53a762 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py @@ -13,11 +13,16 @@ def test_SegmentCC_inputs(): ), in_file=dict( argstr='-aseg %s', + extensions=None, + mandatory=True, + ), + in_norm=dict( + extensions=None, mandatory=True, ), - in_norm=dict(mandatory=True, ), out_file=dict( argstr='-o %s', + extensions=None, hash_files=False, keep_extension=False, name_source=['in_file'], @@ -25,6 +30,7 @@ def test_SegmentCC_inputs(): ), out_rotation=dict( argstr='-lta %s', + extensions=None, mandatory=True, ), subject_id=dict( @@ -42,8 +48,8 @@ def test_SegmentCC_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_SegmentCC_outputs(): output_map = dict( - out_file=dict(), - out_rotation=dict(), + out_file=dict(extensions=None, ), + out_rotation=dict(extensions=None, ), ) outputs = SegmentCC.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py index aa742e8fea..c86bc926dc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py @@ -12,11 +12,13 @@ def test_SegmentWM_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), @@ -28,7 +30,7 @@ def test_SegmentWM_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SegmentWM_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = SegmentWM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py index fe4581dee0..3c52c01430 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py @@ -12,6 +12,7 @@ def test_Smooth_inputs(): ), in_file=dict( argstr='--i %s', + extensions=None, mandatory=True, ), num_iters=dict( @@ -29,10 +30,12 @@ def test_Smooth_inputs(): ), reg_file=dict( argstr='--reg %s', + extensions=None, mandatory=True, ), smoothed_file=dict( argstr='--o %s', + extensions=None, genfile=True, ), subjects_dir=dict(), @@ -50,7 +53,7 @@ def test_Smooth_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Smooth_outputs(): - output_map = dict(smoothed_file=dict(), ) + output_map = dict(smoothed_file=dict(extensions=None, ), ) outputs = Smooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py index 8ce4dce075..1f855c5606 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py @@ -17,14 +17,22 @@ def test_SmoothTessellation_inputs(): in_file=dict( argstr='%s', copyfile=True, + extensions=None, mandatory=True, position=-2, ), normalize_area=dict(argstr='-area', ), - out_area_file=dict(argstr='-b %s', ), - out_curvature_file=dict(argstr='-c %s', ), + out_area_file=dict( + argstr='-b %s', + extensions=None, + ), + out_curvature_file=dict( + argstr='-c %s', + extensions=None, + ), out_file=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -41,7 +49,7 @@ def test_SmoothTessellation_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SmoothTessellation_outputs(): - output_map = dict(surface=dict(), ) + output_map = dict(surface=dict(extensions=None, ), ) outputs = SmoothTessellation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py index 461398e6a8..a5795653fc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py @@ -13,14 +13,19 @@ def test_Sphere_inputs(): in_file=dict( argstr='%s', copyfile=True, + extensions=None, mandatory=True, position=-2, ), - in_smoothwm=dict(copyfile=True, ), + in_smoothwm=dict( + copyfile=True, + extensions=None, + ), magic=dict(argstr='-q', ), num_threads=dict(), out_file=dict( argstr='%s', + extensions=None, hash_files=False, name_source=['in_file'], name_template='%s.sphere', @@ -35,7 +40,7 @@ def test_Sphere_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Sphere_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Sphere.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py index efdc032787..3a38f8c205 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py @@ -26,14 +26,19 @@ def test_SphericalAverage_inputs(): genfile=True, position=-2, ), - in_orig=dict(argstr='-orig %s', ), + in_orig=dict( + argstr='-orig %s', + extensions=None, + ), in_surf=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), out_file=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -55,7 +60,7 @@ def test_SphericalAverage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SphericalAverage_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = SphericalAverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py index ca3f96c42b..b291385039 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py @@ -21,12 +21,14 @@ def test_Surface2VolTransform_inputs(): projfrac=dict(argstr='--projfrac %s', ), reg_file=dict( argstr='--volreg %s', + extensions=None, mandatory=True, xor=['subject_id'], ), source_file=dict( argstr='--surfval %s', copyfile=False, + extensions=None, mandatory=True, xor=['mkmask'], ), @@ -36,15 +38,20 @@ def test_Surface2VolTransform_inputs(): ), subjects_dir=dict(argstr='--sd %s', ), surf_name=dict(argstr='--surf %s', ), - template_file=dict(argstr='--template %s', ), + template_file=dict( + argstr='--template %s', + extensions=None, + ), transformed_file=dict( argstr='--outvol %s', + extensions=None, hash_files=False, name_source=['source_file'], name_template='%s_asVol.nii', ), vertexvol_file=dict( argstr='--vtxvol %s', + extensions=None, hash_files=False, name_source=['source_file'], name_template='%s_asVol_vertex.nii', @@ -57,8 +64,8 @@ def test_Surface2VolTransform_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Surface2VolTransform_outputs(): output_map = dict( - transformed_file=dict(), - vertexvol_file=dict(), + transformed_file=dict(extensions=None, ), + vertexvol_file=dict(extensions=None, ), ) outputs = Surface2VolTransform.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py index 84bef6ed7a..c845ae50f7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py @@ -24,10 +24,12 @@ def test_SurfaceSmooth_inputs(): ), in_file=dict( argstr='--sval %s', + extensions=None, mandatory=True, ), out_file=dict( argstr='--tval %s', + extensions=None, genfile=True, ), reshape=dict(argstr='--reshape', ), @@ -47,7 +49,7 @@ def test_SurfaceSmooth_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SurfaceSmooth_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = SurfaceSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py index a413d410b1..26dcbe3458 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py @@ -7,6 +7,7 @@ def test_SurfaceSnapshots_inputs(): input_map = dict( annot_file=dict( argstr='-annotation %s', + extensions=None, xor=['annot_name'], ), annot_name=dict( @@ -14,7 +15,10 @@ def test_SurfaceSnapshots_inputs(): xor=['annot_file'], ), args=dict(argstr='%s', ), - colortable=dict(argstr='-colortable %s', ), + colortable=dict( + argstr='-colortable %s', + extensions=None, + ), demean_overlay=dict(argstr='-zm', ), environ=dict( nohash=True, @@ -32,6 +36,7 @@ def test_SurfaceSnapshots_inputs(): invert_overlay=dict(argstr='-invphaseflag 1', ), label_file=dict( argstr='-label %s', + extensions=None, xor=['label_name'], ), label_name=dict( @@ -47,6 +52,7 @@ def test_SurfaceSnapshots_inputs(): orig_suffix=dict(argstr='-orig %s', ), overlay=dict( argstr='-overlay %s', + extensions=None, requires=['overlay_range'], ), overlay_range=dict(argstr='%s', ), @@ -55,7 +61,10 @@ def test_SurfaceSnapshots_inputs(): argstr='-overlay-reg %s', xor=['overlay_reg', 'identity_reg', 'mni152_reg'], ), - patch_file=dict(argstr='-patch %s', ), + patch_file=dict( + argstr='-patch %s', + extensions=None, + ), reverse_overlay=dict(argstr='-revphaseflag 1', ), screenshot_stem=dict(), show_color_scale=dict(argstr='-colscalebarflag 1', ), @@ -84,6 +93,7 @@ def test_SurfaceSnapshots_inputs(): ), tcl_script=dict( argstr='%s', + extensions=None, genfile=True, ), truncate_overlay=dict(argstr='-truncphaseflag 1', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py index 0546a275dc..5d0dc75073 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py @@ -16,17 +16,20 @@ def test_SurfaceTransform_inputs(): ), out_file=dict( argstr='--tval %s', + extensions=None, genfile=True, ), reshape=dict(argstr='--reshape', ), reshape_factor=dict(argstr='--reshape-factor', ), source_annot_file=dict( argstr='--sval-annot %s', + extensions=None, mandatory=True, xor=['source_file'], ), source_file=dict( argstr='--sval %s', + extensions=None, mandatory=True, xor=['source_annot_file'], ), @@ -52,7 +55,7 @@ def test_SurfaceTransform_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SurfaceTransform_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = SurfaceTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py index ea121d877e..a216d35efa 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py @@ -21,16 +21,19 @@ def test_SynthesizeFLASH_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, ), pd_image=dict( argstr='%s', + extensions=None, mandatory=True, position=6, ), subjects_dir=dict(), t1_image=dict( argstr='%s', + extensions=None, mandatory=True, position=5, ), @@ -51,7 +54,7 @@ def test_SynthesizeFLASH_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SynthesizeFLASH_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = SynthesizeFLASH.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py index 8ce925fcc7..9d248c1a7d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py @@ -13,10 +13,12 @@ def test_TalairachAVI_inputs(): ), in_file=dict( argstr='--i %s', + extensions=None, mandatory=True, ), out_file=dict( argstr='--xfm %s', + extensions=None, mandatory=True, ), subjects_dir=dict(), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py index d07e572365..053d429c9b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py @@ -12,6 +12,7 @@ def test_TalairachQC_inputs(): ), log_file=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), @@ -23,7 +24,11 @@ def test_TalairachQC_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TalairachQC_outputs(): - output_map = dict(log_file=dict(usedefault=True, ), ) + output_map = dict( + log_file=dict( + extensions=None, + usedefault=True, + ), ) outputs = TalairachQC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py index 17f8e53a1f..a1b04aab81 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py @@ -10,7 +10,10 @@ def test_Tkregister2_inputs(): nohash=True, usedefault=True, ), - fsl_in_matrix=dict(argstr='--fsl %s', ), + fsl_in_matrix=dict( + argstr='--fsl %s', + extensions=None, + ), fsl_out=dict(argstr='--fslregout %s', ), fstal=dict( argstr='--fstal', @@ -25,10 +28,14 @@ def test_Tkregister2_inputs(): argstr='--ltaout-inv', requires=['lta_in'], ), - lta_in=dict(argstr='--lta %s', ), + lta_in=dict( + argstr='--lta %s', + extensions=None, + ), lta_out=dict(argstr='--ltaout %s', ), moving_image=dict( argstr='--mov %s', + extensions=None, mandatory=True, ), movscale=dict(argstr='--movscale %f', ), @@ -38,6 +45,7 @@ def test_Tkregister2_inputs(): ), reg_file=dict( argstr='--reg %s', + extensions=None, mandatory=True, usedefault=True, ), @@ -46,9 +54,13 @@ def test_Tkregister2_inputs(): subjects_dir=dict(), target_image=dict( argstr='--targ %s', + extensions=None, xor=['fstarg'], ), - xfm=dict(argstr='--xfm %s', ), + xfm=dict( + argstr='--xfm %s', + extensions=None, + ), ) inputs = Tkregister2.input_spec() @@ -57,9 +69,9 @@ def test_Tkregister2_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Tkregister2_outputs(): output_map = dict( - fsl_file=dict(), - lta_file=dict(), - reg_file=dict(), + fsl_file=dict(extensions=None, ), + lta_file=dict(extensions=None, ), + reg_file=dict(extensions=None, ), ) outputs = Tkregister2.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py index 9427e60940..11b1d8c2ce 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py @@ -8,6 +8,7 @@ def test_UnpackSDICOMDir_inputs(): args=dict(argstr='%s', ), config=dict( argstr='-cfg %s', + extensions=None, mandatory=True, xor=('run_info', 'config', 'seq_config'), ), @@ -16,7 +17,10 @@ def test_UnpackSDICOMDir_inputs(): nohash=True, usedefault=True, ), - log_file=dict(argstr='-log %s', ), + log_file=dict( + argstr='-log %s', + extensions=None, + ), no_info_dump=dict(argstr='-noinfodump', ), no_unpack_err=dict(argstr='-no-unpackerr', ), output_dir=dict(argstr='-targ %s', ), @@ -25,9 +29,13 @@ def test_UnpackSDICOMDir_inputs(): mandatory=True, xor=('run_info', 'config', 'seq_config'), ), - scan_only=dict(argstr='-scanonly %s', ), + scan_only=dict( + argstr='-scanonly %s', + extensions=None, + ), seq_config=dict( argstr='-seqcfg %s', + extensions=None, mandatory=True, xor=('run_info', 'config', 'seq_config'), ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py index 3e898a81f7..bfdf5a457e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py @@ -6,7 +6,10 @@ def test_VolumeMask_inputs(): input_map = dict( args=dict(argstr='%s', ), - aseg=dict(xor=['in_aseg'], ), + aseg=dict( + extensions=None, + xor=['in_aseg'], + ), copy_inputs=dict(), environ=dict( nohash=True, @@ -14,6 +17,7 @@ def test_VolumeMask_inputs(): ), in_aseg=dict( argstr='--aseg_name %s', + extensions=None, xor=['aseg'], ), left_ribbonlabel=dict( @@ -24,10 +28,22 @@ def test_VolumeMask_inputs(): argstr='--label_left_white %d', mandatory=True, ), - lh_pial=dict(mandatory=True, ), - lh_white=dict(mandatory=True, ), - rh_pial=dict(mandatory=True, ), - rh_white=dict(mandatory=True, ), + lh_pial=dict( + extensions=None, + mandatory=True, + ), + lh_white=dict( + extensions=None, + mandatory=True, + ), + rh_pial=dict( + extensions=None, + mandatory=True, + ), + rh_white=dict( + extensions=None, + mandatory=True, + ), right_ribbonlabel=dict( argstr='--label_right_ribbon %d', mandatory=True, @@ -52,9 +68,9 @@ def test_VolumeMask_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_VolumeMask_outputs(): output_map = dict( - lh_ribbon=dict(), - out_ribbon=dict(), - rh_ribbon=dict(), + lh_ribbon=dict(extensions=None, ), + out_ribbon=dict(extensions=None, ), + rh_ribbon=dict(extensions=None, ), ) outputs = VolumeMask.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py index 3586e7d234..8112a8f52f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py @@ -8,6 +8,7 @@ def test_WatershedSkullStrip_inputs(): args=dict(argstr='%s', ), brain_atlas=dict( argstr='-brain_atlas %s', + extensions=None, position=-4, ), environ=dict( @@ -16,11 +17,13 @@ def test_WatershedSkullStrip_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, usedefault=True, @@ -29,6 +32,7 @@ def test_WatershedSkullStrip_inputs(): t1=dict(argstr='-T1', ), transform=dict( argstr='%s', + extensions=None, position=-3, ), ) @@ -38,7 +42,7 @@ def test_WatershedSkullStrip_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_WatershedSkullStrip_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = WatershedSkullStrip.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_AR1Image.py b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py index 32efb1826e..260f53b978 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AR1Image.py +++ b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py @@ -17,6 +17,7 @@ def test_AR1Image_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -30,6 +31,7 @@ def test_AR1Image_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -46,7 +48,7 @@ def test_AR1Image_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AR1Image_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = AR1Image.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py index 062d6367f7..de32f63f5f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py +++ b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py @@ -23,6 +23,7 @@ def test_AccuracyTester_inputs(): ), trained_wts_file=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py index 8a39956c18..ad8df118b6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py @@ -12,6 +12,7 @@ def test_ApplyMask_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -21,6 +22,7 @@ def test_ApplyMask_inputs(): ), mask_file=dict( argstr='-mas %s', + extensions=None, mandatory=True, position=4, ), @@ -30,6 +32,7 @@ def test_ApplyMask_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -46,7 +49,7 @@ def test_ApplyMask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ApplyMask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ApplyMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py index 7a803f9695..805884f23c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py @@ -9,6 +9,7 @@ def test_ApplyTOPUP_inputs(): datatype=dict(argstr='-d=%s', ), encoding_file=dict( argstr='--datain=%s', + extensions=None, mandatory=True, ), environ=dict( @@ -27,16 +28,19 @@ def test_ApplyTOPUP_inputs(): in_topup_fieldcoef=dict( argstr='--topup=%s', copyfile=False, + extensions=None, requires=['in_topup_movpar'], ), in_topup_movpar=dict( copyfile=False, + extensions=None, requires=['in_topup_fieldcoef'], ), interp=dict(argstr='--interp=%s', ), method=dict(argstr='--method=%s', ), out_corrected=dict( argstr='--out=%s', + extensions=None, name_source=['in_files'], name_template='%s_corrected', ), @@ -48,7 +52,7 @@ def test_ApplyTOPUP_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ApplyTOPUP_outputs(): - output_map = dict(out_corrected=dict(), ) + output_map = dict(out_corrected=dict(extensions=None, ), ) outputs = ApplyTOPUP.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py index 71977cb873..2af381012b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py @@ -15,9 +15,13 @@ def test_ApplyWarp_inputs(): nohash=True, usedefault=True, ), - field_file=dict(argstr='--warp=%s', ), + field_file=dict( + argstr='--warp=%s', + extensions=None, + ), in_file=dict( argstr='--in=%s', + extensions=None, mandatory=True, position=0, ), @@ -25,18 +29,29 @@ def test_ApplyWarp_inputs(): argstr='--interp=%s', position=-2, ), - mask_file=dict(argstr='--mask=%s', ), + mask_file=dict( + argstr='--mask=%s', + extensions=None, + ), out_file=dict( argstr='--out=%s', + extensions=None, genfile=True, hash_files=False, position=2, ), output_type=dict(), - postmat=dict(argstr='--postmat=%s', ), - premat=dict(argstr='--premat=%s', ), + postmat=dict( + argstr='--postmat=%s', + extensions=None, + ), + premat=dict( + argstr='--premat=%s', + extensions=None, + ), ref_file=dict( argstr='--ref=%s', + extensions=None, mandatory=True, position=1, ), @@ -54,7 +69,7 @@ def test_ApplyWarp_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ApplyWarp_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ApplyWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py index f515be1f04..38a45ce775 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py @@ -44,10 +44,12 @@ def test_ApplyXFM_inputs(): ), fieldmap=dict( argstr='-fieldmap %s', + extensions=None, min_ver='5.0.0', ), fieldmapmask=dict( argstr='-fieldmapmask %s', + extensions=None, min_ver='5.0.0', ), fine_search=dict( @@ -57,11 +59,18 @@ def test_ApplyXFM_inputs(): force_scaling=dict(argstr='-forcescaling', ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, position=0, ), - in_matrix_file=dict(argstr='-init %s', ), - in_weight=dict(argstr='-inweight %s', ), + in_matrix_file=dict( + argstr='-init %s', + extensions=None, + ), + in_weight=dict( + argstr='-inweight %s', + extensions=None, + ), interp=dict(argstr='-interp %s', ), min_sampling=dict( argstr='-minsampling %f', @@ -73,12 +82,14 @@ def test_ApplyXFM_inputs(): no_search=dict(argstr='-nosearch', ), out_file=dict( argstr='-out %s', + extensions=None, hash_files=False, name_source=['in_file'], name_template='%s_flirt', position=2, ), out_log=dict( + extensions=None, keep_extension=True, name_source=['in_file'], name_template='%s_flirt.log', @@ -86,6 +97,7 @@ def test_ApplyXFM_inputs(): ), out_matrix_file=dict( argstr='-omat %s', + extensions=None, hash_files=False, keep_extension=True, name_source=['in_file'], @@ -101,15 +113,22 @@ def test_ApplyXFM_inputs(): argstr='-pedir %d', min_ver='5.0.0', ), - ref_weight=dict(argstr='-refweight %s', ), + ref_weight=dict( + argstr='-refweight %s', + extensions=None, + ), reference=dict( argstr='-ref %s', + extensions=None, mandatory=True, position=1, ), rigid2D=dict(argstr='-2D', ), save_log=dict(), - schedule=dict(argstr='-schedule %s', ), + schedule=dict( + argstr='-schedule %s', + extensions=None, + ), searchr_x=dict( argstr='-searchrx %s', units='degrees', @@ -131,14 +150,17 @@ def test_ApplyXFM_inputs(): verbose=dict(argstr='-verbose %d', ), wm_seg=dict( argstr='-wmseg %s', + extensions=None, min_ver='5.0.0', ), wmcoords=dict( argstr='-wmcoords %s', + extensions=None, min_ver='5.0.0', ), wmnorms=dict( argstr='-wmnorms %s', + extensions=None, min_ver='5.0.0', ), ) @@ -149,9 +171,9 @@ def test_ApplyXFM_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_ApplyXFM_outputs(): output_map = dict( - out_file=dict(), - out_log=dict(), - out_matrix_file=dict(), + out_file=dict(extensions=None, ), + out_log=dict(extensions=None, ), + out_matrix_file=dict(extensions=None, ), ) outputs = ApplyXFM.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_AvScale.py b/nipype/interfaces/fsl/tests/test_auto_AvScale.py index 762ada916d..a6df228f71 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AvScale.py +++ b/nipype/interfaces/fsl/tests/test_auto_AvScale.py @@ -13,10 +13,12 @@ def test_AvScale_inputs(): ), mat_file=dict( argstr='%s', + extensions=None, position=-2, ), ref_file=dict( argstr='%s', + extensions=None, position=-1, ), ) diff --git a/nipype/interfaces/fsl/tests/test_auto_B0Calc.py b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py index 96ec6b949c..1127f900d1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_B0Calc.py +++ b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py @@ -32,11 +32,13 @@ def test_B0Calc_inputs(): ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, position=0, ), out_file=dict( argstr='-o %s', + extensions=None, name_source=['in_file'], name_template='%s_b0field', output_name='out_file', @@ -81,7 +83,7 @@ def test_B0Calc_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_B0Calc_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = B0Calc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py index 1ac5db111b..e93cf42b9d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py +++ b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py @@ -18,13 +18,22 @@ def test_BEDPOSTX5_inputs(): argstr='--burnin_noard=%d', usedefault=True, ), - bvals=dict(mandatory=True, ), - bvecs=dict(mandatory=True, ), + bvals=dict( + extensions=None, + mandatory=True, + ), + bvecs=dict( + extensions=None, + mandatory=True, + ), cnlinear=dict( argstr='--cnonlinear', xor=('no_spat', 'non_linear', 'cnlinear'), ), - dwi=dict(mandatory=True, ), + dwi=dict( + extensions=None, + mandatory=True, + ), environ=dict( nohash=True, usedefault=True, @@ -42,10 +51,13 @@ def test_BEDPOSTX5_inputs(): usedefault=True, ), fudge=dict(argstr='-w %d', ), - grad_dev=dict(), + grad_dev=dict(extensions=None, ), gradnonlin=dict(argstr='-g', ), logdir=dict(argstr='--logdir=%s', ), - mask=dict(mandatory=True, ), + mask=dict( + extensions=None, + mandatory=True, + ), model=dict(argstr='-model %d', ), n_fibres=dict( argstr='-n %d', @@ -96,8 +108,8 @@ def test_BEDPOSTX5_outputs(): output_map = dict( dyads=dict(), dyads_dispersion=dict(), - mean_S0samples=dict(), - mean_dsamples=dict(), + mean_S0samples=dict(extensions=None, ), + mean_dsamples=dict(extensions=None, ), mean_fsamples=dict(), mean_phsamples=dict(), mean_thsamples=dict(), diff --git a/nipype/interfaces/fsl/tests/test_auto_BET.py b/nipype/interfaces/fsl/tests/test_auto_BET.py index eccf348b3e..ea155c249b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BET.py +++ b/nipype/interfaces/fsl/tests/test_auto_BET.py @@ -22,6 +22,7 @@ def test_BET_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), @@ -30,6 +31,7 @@ def test_BET_inputs(): no_output=dict(argstr='-n', ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=1, @@ -68,6 +70,7 @@ def test_BET_inputs(): ), t2_guided=dict( argstr='-A2 %s', + extensions=None, xor=('functional', 'reduce_bias', 'robust', 'padding', 'remove_eyes', 'surfaces', 't2_guided'), ), @@ -81,17 +84,17 @@ def test_BET_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BET_outputs(): output_map = dict( - inskull_mask_file=dict(), - inskull_mesh_file=dict(), - mask_file=dict(), - meshfile=dict(), - out_file=dict(), - outline_file=dict(), - outskin_mask_file=dict(), - outskin_mesh_file=dict(), - outskull_mask_file=dict(), - outskull_mesh_file=dict(), - skull_mask_file=dict(), + inskull_mask_file=dict(extensions=None, ), + inskull_mesh_file=dict(extensions=None, ), + mask_file=dict(extensions=None, ), + meshfile=dict(extensions=None, ), + out_file=dict(extensions=None, ), + outline_file=dict(extensions=None, ), + outskin_mask_file=dict(extensions=None, ), + outskin_mesh_file=dict(extensions=None, ), + outskull_mask_file=dict(extensions=None, ), + outskull_mesh_file=dict(extensions=None, ), + skull_mask_file=dict(extensions=None, ), ) outputs = BET.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py index 2548ae68e5..8020c49bbc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py @@ -12,6 +12,7 @@ def test_BinaryMaths_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -25,6 +26,7 @@ def test_BinaryMaths_inputs(): ), operand_file=dict( argstr='%s', + extensions=None, mandatory=True, position=5, xor=['operand_value'], @@ -42,6 +44,7 @@ def test_BinaryMaths_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -58,7 +61,7 @@ def test_BinaryMaths_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BinaryMaths_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = BinaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py index aab508f2bf..665a2cb47d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py +++ b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py @@ -12,6 +12,7 @@ def test_ChangeDataType_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -25,6 +26,7 @@ def test_ChangeDataType_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -42,7 +44,7 @@ def test_ChangeDataType_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ChangeDataType_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ChangeDataType.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Classifier.py b/nipype/interfaces/fsl/tests/test_auto_Classifier.py index a8db888acb..b477b38ea4 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Classifier.py +++ b/nipype/interfaces/fsl/tests/test_auto_Classifier.py @@ -6,7 +6,7 @@ def test_Classifier_inputs(): input_map = dict( args=dict(argstr='%s', ), - artifacts_list_file=dict(), + artifacts_list_file=dict(extensions=None, ), environ=dict( nohash=True, usedefault=True, @@ -24,6 +24,7 @@ def test_Classifier_inputs(): trained_wts_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=2, ), @@ -34,7 +35,7 @@ def test_Classifier_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Classifier_outputs(): - output_map = dict(artifacts_list_file=dict(), ) + output_map = dict(artifacts_list_file=dict(extensions=None, ), ) outputs = Classifier.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py index 5fc505a174..a75df99db5 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py @@ -12,6 +12,7 @@ def test_Cleaner_inputs(): args=dict(argstr='%s', ), artifacts_list_file=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), @@ -47,7 +48,7 @@ def test_Cleaner_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Cleaner_outputs(): - output_map = dict(cleaned_functional_file=dict(), ) + output_map = dict(cleaned_functional_file=dict(extensions=None, ), ) outputs = Cleaner.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Cluster.py b/nipype/interfaces/fsl/tests/test_auto_Cluster.py index f876f85e64..6ed34ab816 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cluster.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cluster.py @@ -23,6 +23,7 @@ def test_Cluster_inputs(): ), in_file=dict( argstr='--in=%s', + extensions=None, mandatory=True, ), minclustersize=dict( @@ -72,7 +73,10 @@ def test_Cluster_inputs(): argstr='--pthresh=%.10f', requires=['dlh', 'volume'], ), - std_space_file=dict(argstr='--stdvol=%s', ), + std_space_file=dict( + argstr='--stdvol=%s', + extensions=None, + ), threshold=dict( argstr='--thresh=%.10f', mandatory=True, @@ -82,8 +86,14 @@ def test_Cluster_inputs(): usedefault=True, ), volume=dict(argstr='--volume=%d', ), - warpfield_file=dict(argstr='--warpvol=%s', ), - xfm_file=dict(argstr='--xfm=%s', ), + warpfield_file=dict( + argstr='--warpvol=%s', + extensions=None, + ), + xfm_file=dict( + argstr='--xfm=%s', + extensions=None, + ), ) inputs = Cluster.input_spec() @@ -92,14 +102,14 @@ def test_Cluster_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Cluster_outputs(): output_map = dict( - index_file=dict(), - localmax_txt_file=dict(), - localmax_vol_file=dict(), - max_file=dict(), - mean_file=dict(), - pval_file=dict(), - size_file=dict(), - threshold_file=dict(), + index_file=dict(extensions=None, ), + localmax_txt_file=dict(extensions=None, ), + localmax_vol_file=dict(extensions=None, ), + max_file=dict(extensions=None, ), + mean_file=dict(extensions=None, ), + pval_file=dict(extensions=None, ), + size_file=dict(extensions=None, ), + threshold_file=dict(extensions=None, ), ) outputs = Cluster.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Complex.py b/nipype/interfaces/fsl/tests/test_auto_Complex.py index 7e49bcdfa8..09f2add2bc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Complex.py +++ b/nipype/interfaces/fsl/tests/test_auto_Complex.py @@ -16,10 +16,12 @@ def test_Complex_inputs(): ), complex_in_file=dict( argstr='%s', + extensions=None, position=2, ), complex_in_file2=dict( argstr='%s', + extensions=None, position=3, ), complex_merge=dict( @@ -33,6 +35,7 @@ def test_Complex_inputs(): ), complex_out_file=dict( argstr='%s', + extensions=None, genfile=True, position=-3, xor=[ @@ -67,10 +70,12 @@ def test_Complex_inputs(): ), imaginary_in_file=dict( argstr='%s', + extensions=None, position=3, ), imaginary_out_file=dict( argstr='%s', + extensions=None, genfile=True, position=-3, xor=[ @@ -81,10 +86,12 @@ def test_Complex_inputs(): ), magnitude_in_file=dict( argstr='%s', + extensions=None, position=2, ), magnitude_out_file=dict( argstr='%s', + extensions=None, genfile=True, position=-4, xor=[ @@ -96,10 +103,12 @@ def test_Complex_inputs(): output_type=dict(), phase_in_file=dict( argstr='%s', + extensions=None, position=3, ), phase_out_file=dict( argstr='%s', + extensions=None, genfile=True, position=-3, xor=[ @@ -118,10 +127,12 @@ def test_Complex_inputs(): ), real_in_file=dict( argstr='%s', + extensions=None, position=2, ), real_out_file=dict( argstr='%s', + extensions=None, genfile=True, position=-4, xor=[ @@ -150,11 +161,11 @@ def test_Complex_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Complex_outputs(): output_map = dict( - complex_out_file=dict(), - imaginary_out_file=dict(), - magnitude_out_file=dict(), - phase_out_file=dict(), - real_out_file=dict(), + complex_out_file=dict(extensions=None, ), + imaginary_out_file=dict(extensions=None, ), + magnitude_out_file=dict(extensions=None, ), + phase_out_file=dict(extensions=None, ), + real_out_file=dict(extensions=None, ), ) outputs = Complex.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py index a82ea8fe43..7f4c0f5367 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py +++ b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py @@ -9,18 +9,23 @@ def test_ContrastMgr_inputs(): contrast_num=dict(argstr='-cope', ), corrections=dict( copyfile=False, + extensions=None, mandatory=True, ), dof_file=dict( argstr='', copyfile=False, + extensions=None, mandatory=True, ), environ=dict( nohash=True, usedefault=True, ), - fcon_file=dict(argstr='-f %s', ), + fcon_file=dict( + argstr='-f %s', + extensions=None, + ), output_type=dict(), param_estimates=dict( argstr='', @@ -30,12 +35,14 @@ def test_ContrastMgr_inputs(): sigmasquareds=dict( argstr='', copyfile=False, + extensions=None, mandatory=True, position=-2, ), suffix=dict(argstr='-suffix %s', ), tcon_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py index 2ed14aaad2..602ca86b47 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py @@ -17,13 +17,17 @@ def test_ConvertWarp_inputs(): ), jacobian_max=dict(argstr='--jmax=%f', ), jacobian_min=dict(argstr='--jmin=%f', ), - midmat=dict(argstr='--midmat=%s', ), + midmat=dict( + argstr='--midmat=%s', + extensions=None, + ), out_abswarp=dict( argstr='--absout', xor=['out_relwarp'], ), out_file=dict( argstr='--out=%s', + extensions=None, name_source=['reference'], name_template='%s_concatwarp', output_name='out_file', @@ -34,10 +38,17 @@ def test_ConvertWarp_inputs(): xor=['out_abswarp'], ), output_type=dict(), - postmat=dict(argstr='--postmat=%s', ), - premat=dict(argstr='--premat=%s', ), + postmat=dict( + argstr='--postmat=%s', + extensions=None, + ), + premat=dict( + argstr='--premat=%s', + extensions=None, + ), reference=dict( argstr='--ref=%s', + extensions=None, mandatory=True, position=1, ), @@ -49,9 +60,18 @@ def test_ConvertWarp_inputs(): argstr='--shiftdir=%s', requires=['shift_in_file'], ), - shift_in_file=dict(argstr='--shiftmap=%s', ), - warp1=dict(argstr='--warp1=%s', ), - warp2=dict(argstr='--warp2=%s', ), + shift_in_file=dict( + argstr='--shiftmap=%s', + extensions=None, + ), + warp1=dict( + argstr='--warp1=%s', + extensions=None, + ), + warp2=dict( + argstr='--warp2=%s', + extensions=None, + ), ) inputs = ConvertWarp.input_spec() @@ -59,7 +79,7 @@ def test_ConvertWarp_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ConvertWarp_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ConvertWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py index c017a39a52..a49a846ad9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py @@ -24,11 +24,13 @@ def test_ConvertXFM_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), in_file2=dict( argstr='%s', + extensions=None, position=-2, ), invert_xfm=dict( @@ -38,6 +40,7 @@ def test_ConvertXFM_inputs(): ), out_file=dict( argstr='-omat %s', + extensions=None, genfile=True, hash_files=False, position=1, @@ -50,7 +53,7 @@ def test_ConvertXFM_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ConvertXFM_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ConvertXFM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py index c66feff211..2cf330edee 100644 --- a/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py +++ b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py @@ -9,6 +9,7 @@ def test_CopyGeom_inputs(): dest_file=dict( argstr='%s', copyfile=True, + extensions=None, mandatory=True, name_source='dest_file', name_template='%s', @@ -25,6 +26,7 @@ def test_CopyGeom_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), @@ -36,7 +38,7 @@ def test_CopyGeom_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CopyGeom_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = CopyGeom.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_DTIFit.py b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py index b1d3b4822d..b88fc68c82 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py @@ -13,17 +13,23 @@ def test_DTIFit_inputs(): ), bvals=dict( argstr='-b %s', + extensions=None, mandatory=True, position=4, ), bvecs=dict( argstr='-r %s', + extensions=None, mandatory=True, position=3, ), - cni=dict(argstr='--cni=%s', ), + cni=dict( + argstr='--cni=%s', + extensions=None, + ), dwi=dict( argstr='-k %s', + extensions=None, mandatory=True, position=0, ), @@ -31,10 +37,14 @@ def test_DTIFit_inputs(): nohash=True, usedefault=True, ), - gradnonlin=dict(argstr='--gradnonlin=%s', ), + gradnonlin=dict( + argstr='--gradnonlin=%s', + extensions=None, + ), little_bit=dict(argstr='--littlebit', ), mask=dict( argstr='-m %s', + extensions=None, mandatory=True, position=2, ), @@ -55,18 +65,18 @@ def test_DTIFit_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DTIFit_outputs(): output_map = dict( - FA=dict(), - L1=dict(), - L2=dict(), - L3=dict(), - MD=dict(), - MO=dict(), - S0=dict(), - V1=dict(), - V2=dict(), - V3=dict(), - sse=dict(), - tensor=dict(), + FA=dict(extensions=None, ), + L1=dict(extensions=None, ), + L2=dict(extensions=None, ), + L3=dict(extensions=None, ), + MD=dict(extensions=None, ), + MO=dict(extensions=None, ), + S0=dict(extensions=None, ), + V1=dict(extensions=None, ), + V2=dict(extensions=None, ), + V3=dict(extensions=None, ), + sse=dict(extensions=None, ), + tensor=dict(extensions=None, ), ) outputs = DTIFit.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_DilateImage.py b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py index 3ed43fbd22..12df7f13af 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DilateImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py @@ -12,6 +12,7 @@ def test_DilateImage_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -21,6 +22,7 @@ def test_DilateImage_inputs(): ), kernel_file=dict( argstr='%s', + extensions=None, position=5, xor=['kernel_size'], ), @@ -44,6 +46,7 @@ def test_DilateImage_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -60,7 +63,7 @@ def test_DilateImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DilateImage_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = DilateImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py index 5f3321de78..88f53ad2eb 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py +++ b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py @@ -8,6 +8,7 @@ def test_DistanceMap_inputs(): args=dict(argstr='%s', ), distance_map=dict( argstr='--out=%s', + extensions=None, genfile=True, hash_files=False, ), @@ -17,6 +18,7 @@ def test_DistanceMap_inputs(): ), in_file=dict( argstr='--in=%s', + extensions=None, mandatory=True, ), invert_input=dict(argstr='--invert', ), @@ -24,7 +26,10 @@ def test_DistanceMap_inputs(): argstr='--localmax=%s', hash_files=False, ), - mask_file=dict(argstr='--mask=%s', ), + mask_file=dict( + argstr='--mask=%s', + extensions=None, + ), output_type=dict(), ) inputs = DistanceMap.input_spec() @@ -34,8 +39,8 @@ def test_DistanceMap_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DistanceMap_outputs(): output_map = dict( - distance_map=dict(), - local_max_file=dict(), + distance_map=dict(extensions=None, ), + local_max_file=dict(extensions=None, ), ) outputs = DistanceMap.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_DualRegression.py b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py index 4c2fdd3ceb..99556ae8fa 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DualRegression.py +++ b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py @@ -8,6 +8,7 @@ def test_DualRegression_inputs(): args=dict(argstr='%s', ), con_file=dict( argstr='%s', + extensions=None, position=4, ), des_norm=dict( @@ -17,6 +18,7 @@ def test_DualRegression_inputs(): ), design_file=dict( argstr='%s', + extensions=None, position=3, ), environ=dict( @@ -25,6 +27,7 @@ def test_DualRegression_inputs(): ), group_IC_maps_4D=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), diff --git a/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py index 2a5f0c86b6..fa5fbcf041 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py @@ -9,13 +9,17 @@ def test_EPIDeWarp_inputs(): cleanup=dict(argstr='--cleanup', ), dph_file=dict( argstr='--dph %s', + extensions=None, mandatory=True, ), environ=dict( nohash=True, usedefault=True, ), - epi_file=dict(argstr='--epi %s', ), + epi_file=dict( + argstr='--epi %s', + extensions=None, + ), epidw=dict( argstr='--epidw %s', genfile=False, @@ -24,13 +28,17 @@ def test_EPIDeWarp_inputs(): argstr='--esp %s', usedefault=True, ), - exf_file=dict(argstr='--exf %s', ), + exf_file=dict( + argstr='--exf %s', + extensions=None, + ), exfdw=dict( argstr='--exfdw %s', genfile=True, ), mag_file=dict( argstr='--mag %s', + extensions=None, mandatory=True, position=0, ), @@ -63,10 +71,10 @@ def test_EPIDeWarp_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_EPIDeWarp_outputs(): output_map = dict( - exf_mask=dict(), - exfdw=dict(), - unwarped_file=dict(), - vsm_file=dict(), + exf_mask=dict(extensions=None, ), + exfdw=dict(extensions=None, ), + unwarped_file=dict(extensions=None, ), + vsm_file=dict(extensions=None, ), ) outputs = EPIDeWarp.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Eddy.py b/nipype/interfaces/fsl/tests/test_auto_Eddy.py index 55d9409e17..4bd6a3e840 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Eddy.py +++ b/nipype/interfaces/fsl/tests/test_auto_Eddy.py @@ -18,7 +18,10 @@ def test_Eddy_inputs(): ), fep=dict(argstr='--fep', ), field=dict(argstr='--field=%s', ), - field_mat=dict(argstr='--field_mat=%s', ), + field_mat=dict( + argstr='--field_mat=%s', + extensions=None, + ), flm=dict(argstr='--flm=%s', ), fudge_factor=dict( argstr='--ff=%s', @@ -27,33 +30,43 @@ def test_Eddy_inputs(): fwhm=dict(argstr='--fwhm=%s', ), in_acqp=dict( argstr='--acqp=%s', + extensions=None, mandatory=True, ), in_bval=dict( argstr='--bvals=%s', + extensions=None, mandatory=True, ), in_bvec=dict( argstr='--bvecs=%s', + extensions=None, mandatory=True, ), in_file=dict( argstr='--imain=%s', + extensions=None, mandatory=True, ), in_index=dict( argstr='--index=%s', + extensions=None, mandatory=True, ), in_mask=dict( argstr='--mask=%s', + extensions=None, mandatory=True, ), in_topup_fieldcoef=dict( argstr='--topup=%s', + extensions=None, requires=['in_topup_movpar'], ), - in_topup_movpar=dict(requires=['in_topup_fieldcoef'], ), + in_topup_movpar=dict( + extensions=None, + requires=['in_topup_fieldcoef'], + ), interp=dict(argstr='--interp=%s', ), is_shelled=dict(argstr='--data_is_shelled', ), method=dict(argstr='--resamp=%s', ), @@ -79,7 +92,10 @@ def test_Eddy_inputs(): argstr='--residuals', min_ver='5.0.10', ), - session=dict(argstr='--session=%s', ), + session=dict( + argstr='--session=%s', + extensions=None, + ), slm=dict(argstr='--slm=%s', ), use_cuda=dict(), ) @@ -90,15 +106,15 @@ def test_Eddy_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Eddy_outputs(): output_map = dict( - out_cnr_maps=dict(), - out_corrected=dict(), - out_movement_rms=dict(), - out_outlier_report=dict(), - out_parameter=dict(), - out_residuals=dict(), - out_restricted_movement_rms=dict(), - out_rotated_bvecs=dict(), - out_shell_alignment_parameters=dict(), + out_cnr_maps=dict(extensions=None, ), + out_corrected=dict(extensions=None, ), + out_movement_rms=dict(extensions=None, ), + out_outlier_report=dict(extensions=None, ), + out_parameter=dict(extensions=None, ), + out_residuals=dict(extensions=None, ), + out_restricted_movement_rms=dict(extensions=None, ), + out_rotated_bvecs=dict(extensions=None, ), + out_shell_alignment_parameters=dict(extensions=None, ), ) outputs = Eddy.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py index e88219aa04..86134137ac 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py +++ b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py @@ -12,11 +12,13 @@ def test_EddyCorrect_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), out_file=dict( argstr='%s', + extensions=None, name_source=['in_file'], name_template='%s_edc', output_name='eddy_corrected', @@ -36,7 +38,7 @@ def test_EddyCorrect_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_EddyCorrect_outputs(): - output_map = dict(eddy_corrected=dict(), ) + output_map = dict(eddy_corrected=dict(extensions=None, ), ) outputs = EddyCorrect.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py index 3cc6da2027..945b2e54ba 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py +++ b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py @@ -13,20 +13,29 @@ def test_EddyQuad_inputs(): ), bval_file=dict( argstr='--bvals %s', + extensions=None, mandatory=True, ), - bvec_file=dict(argstr='--bvecs %s', ), + bvec_file=dict( + argstr='--bvecs %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, ), - field=dict(argstr='--field %s', ), + field=dict( + argstr='--field %s', + extensions=None, + ), idx_file=dict( argstr='--eddyIdx %s', + extensions=None, mandatory=True, ), mask_file=dict( argstr='--mask %s', + extensions=None, mandatory=True, ), output_dir=dict( @@ -37,9 +46,13 @@ def test_EddyQuad_inputs(): output_type=dict(), param_file=dict( argstr='--eddyParams %s', + extensions=None, mandatory=True, ), - slice_spec=dict(argstr='--slspec %s', ), + slice_spec=dict( + argstr='--slspec %s', + extensions=None, + ), verbose=dict(argstr='--verbose', ), ) inputs = EddyQuad.input_spec() @@ -51,12 +64,12 @@ def test_EddyQuad_outputs(): output_map = dict( avg_b0_pe_png=dict(), avg_b_png=dict(), - clean_volumes=dict(), + clean_volumes=dict(extensions=None, ), cnr_png=dict(), - qc_json=dict(), - qc_pdf=dict(), - residuals=dict(), - vdm_png=dict(), + qc_json=dict(extensions=None, ), + qc_pdf=dict(extensions=None, ), + residuals=dict(extensions=None, ), + vdm_png=dict(extensions=None, ), ) outputs = EddyQuad.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py index 634a75e376..a6b06e0ca7 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py @@ -13,12 +13,22 @@ def test_EpiReg_inputs(): ), epi=dict( argstr='--epi=%s', + extensions=None, mandatory=True, position=-4, ), - fmap=dict(argstr='--fmap=%s', ), - fmapmag=dict(argstr='--fmapmag=%s', ), - fmapmagbrain=dict(argstr='--fmapmagbrain=%s', ), + fmap=dict( + argstr='--fmap=%s', + extensions=None, + ), + fmapmag=dict( + argstr='--fmapmag=%s', + extensions=None, + ), + fmapmagbrain=dict( + argstr='--fmapmagbrain=%s', + extensions=None, + ), no_clean=dict( argstr='--noclean', usedefault=True, @@ -33,16 +43,24 @@ def test_EpiReg_inputs(): pedir=dict(argstr='--pedir=%s', ), t1_brain=dict( argstr='--t1brain=%s', + extensions=None, mandatory=True, position=-2, ), t1_head=dict( argstr='--t1=%s', + extensions=None, mandatory=True, position=-3, ), - weight_image=dict(argstr='--weight=%s', ), - wmseg=dict(argstr='--wmseg=%s', ), + weight_image=dict( + argstr='--weight=%s', + extensions=None, + ), + wmseg=dict( + argstr='--wmseg=%s', + extensions=None, + ), ) inputs = EpiReg.input_spec() @@ -51,20 +69,20 @@ def test_EpiReg_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_EpiReg_outputs(): output_map = dict( - epi2str_inv=dict(), - epi2str_mat=dict(), - fmap2epi_mat=dict(), - fmap2str_mat=dict(), - fmap_epi=dict(), - fmap_str=dict(), - fmapmag_str=dict(), - fullwarp=dict(), - out_1vol=dict(), - out_file=dict(), - seg=dict(), - shiftmap=dict(), - wmedge=dict(), - wmseg=dict(), + epi2str_inv=dict(extensions=None, ), + epi2str_mat=dict(extensions=None, ), + fmap2epi_mat=dict(extensions=None, ), + fmap2str_mat=dict(extensions=None, ), + fmap_epi=dict(extensions=None, ), + fmap_str=dict(extensions=None, ), + fmapmag_str=dict(extensions=None, ), + fullwarp=dict(extensions=None, ), + out_1vol=dict(extensions=None, ), + out_file=dict(extensions=None, ), + seg=dict(extensions=None, ), + shiftmap=dict(extensions=None, ), + wmedge=dict(extensions=None, ), + wmseg=dict(extensions=None, ), ) outputs = EpiReg.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py index 797a403d45..bd7d6f2b83 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py @@ -12,6 +12,7 @@ def test_ErodeImage_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -21,6 +22,7 @@ def test_ErodeImage_inputs(): ), kernel_file=dict( argstr='%s', + extensions=None, position=5, xor=['kernel_size'], ), @@ -44,6 +46,7 @@ def test_ErodeImage_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -60,7 +63,7 @@ def test_ErodeImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ErodeImage_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ErodeImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py index df8de64144..414a068177 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py +++ b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py @@ -20,12 +20,14 @@ def test_ExtractROI_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), output_type=dict(), roi_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=1, @@ -69,7 +71,7 @@ def test_ExtractROI_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ExtractROI_outputs(): - output_map = dict(roi_file=dict(), ) + output_map = dict(roi_file=dict(extensions=None, ), ) outputs = ExtractROI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FAST.py b/nipype/interfaces/fsl/tests/test_auto_FAST.py index 0b983181af..b36c941cff 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FAST.py +++ b/nipype/interfaces/fsl/tests/test_auto_FAST.py @@ -24,15 +24,24 @@ def test_FAST_inputs(): position=-1, ), init_seg_smooth=dict(argstr='-f %.3f', ), - init_transform=dict(argstr='-a %s', ), + init_transform=dict( + argstr='-a %s', + extensions=None, + ), iters_afterbias=dict(argstr='-O %d', ), - manual_seg=dict(argstr='-s %s', ), + manual_seg=dict( + argstr='-s %s', + extensions=None, + ), mixel_smooth=dict(argstr='-R %.2f', ), no_bias=dict(argstr='-N', ), no_pve=dict(argstr='--nopve', ), number_classes=dict(argstr='-n %d', ), other_priors=dict(argstr='-A %s', ), - out_basename=dict(argstr='-o %s', ), + out_basename=dict( + argstr='-o %s', + extensions=None, + ), output_biascorrected=dict(argstr='-B', ), output_biasfield=dict(argstr='-b', ), output_type=dict(), @@ -50,13 +59,13 @@ def test_FAST_inputs(): def test_FAST_outputs(): output_map = dict( bias_field=dict(), - mixeltype=dict(), + mixeltype=dict(extensions=None, ), partial_volume_files=dict(), - partial_volume_map=dict(), + partial_volume_map=dict(extensions=None, ), probability_maps=dict(), restored_image=dict(), tissue_class_files=dict(), - tissue_class_map=dict(), + tissue_class_map=dict(extensions=None, ), ) outputs = FAST.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FEAT.py b/nipype/interfaces/fsl/tests/test_auto_FEAT.py index 01fc72506f..e193a0ae27 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEAT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEAT.py @@ -12,6 +12,7 @@ def test_FEAT_inputs(): ), fsf_file=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATModel.py b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py index 34ea37d47f..b1103396d8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEATModel.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py @@ -19,6 +19,7 @@ def test_FEATModel_inputs(): fsf_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=0, ), @@ -31,11 +32,11 @@ def test_FEATModel_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_FEATModel_outputs(): output_map = dict( - con_file=dict(), - design_cov=dict(), - design_file=dict(), - design_image=dict(), - fcon_file=dict(), + con_file=dict(extensions=None, ), + design_cov=dict(extensions=None, ), + design_file=dict(extensions=None, ), + design_image=dict(extensions=None, ), + fcon_file=dict(extensions=None, ), ) outputs = FEATModel.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py index a8f59a0ec3..d0d008956f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py @@ -7,7 +7,10 @@ def test_FEATRegister_inputs(): input_map = dict( feat_dirs=dict(mandatory=True, ), reg_dof=dict(usedefault=True, ), - reg_image=dict(mandatory=True, ), + reg_image=dict( + extensions=None, + mandatory=True, + ), ) inputs = FEATRegister.input_spec() @@ -15,7 +18,7 @@ def test_FEATRegister_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FEATRegister_outputs(): - output_map = dict(fsf_file=dict(), ) + output_map = dict(fsf_file=dict(extensions=None, ), ) outputs = FEATRegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FIRST.py b/nipype/interfaces/fsl/tests/test_auto_FIRST.py index 964ee4922c..fc7914a828 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FIRST.py +++ b/nipype/interfaces/fsl/tests/test_auto_FIRST.py @@ -7,6 +7,7 @@ def test_FIRST_inputs(): input_map = dict( affine_file=dict( argstr='-a %s', + extensions=None, position=6, ), args=dict(argstr='%s', ), @@ -21,6 +22,7 @@ def test_FIRST_inputs(): in_file=dict( argstr='-i %s', copyfile=False, + extensions=None, mandatory=True, position=-2, ), @@ -45,6 +47,7 @@ def test_FIRST_inputs(): ), out_file=dict( argstr='-o %s', + extensions=None, hash_files=False, mandatory=True, position=-1, @@ -64,8 +67,8 @@ def test_FIRST_inputs(): def test_FIRST_outputs(): output_map = dict( bvars=dict(), - original_segmentations=dict(), - segmentation_file=dict(), + original_segmentations=dict(extensions=None, ), + segmentation_file=dict(extensions=None, ), vtk_surfaces=dict(), ) outputs = FIRST.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py index bd335282e3..5de114a4c3 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py +++ b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py @@ -9,22 +9,31 @@ def test_FLAMEO_inputs(): burnin=dict(argstr='--burnin=%d', ), cope_file=dict( argstr='--copefile=%s', + extensions=None, mandatory=True, ), cov_split_file=dict( argstr='--covsplitfile=%s', + extensions=None, mandatory=True, ), design_file=dict( argstr='--designfile=%s', + extensions=None, mandatory=True, ), - dof_var_cope_file=dict(argstr='--dofvarcopefile=%s', ), + dof_var_cope_file=dict( + argstr='--dofvarcopefile=%s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, ), - f_con_file=dict(argstr='--fcontrastsfile=%s', ), + f_con_file=dict( + argstr='--fcontrastsfile=%s', + extensions=None, + ), fix_mean=dict(argstr='--fixmean', ), infer_outliers=dict(argstr='--inferoutliers', ), log_dir=dict( @@ -33,6 +42,7 @@ def test_FLAMEO_inputs(): ), mask_file=dict( argstr='--maskfile=%s', + extensions=None, mandatory=True, ), n_jumps=dict(argstr='--njumps=%d', ), @@ -47,9 +57,13 @@ def test_FLAMEO_inputs(): sigma_dofs=dict(argstr='--sigma_dofs=%d', ), t_con_file=dict( argstr='--tcontrastsfile=%s', + extensions=None, mandatory=True, ), - var_cope_file=dict(argstr='--varcopefile=%s', ), + var_cope_file=dict( + argstr='--varcopefile=%s', + extensions=None, + ), ) inputs = FLAMEO.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FLIRT.py b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py index 0b59550e5a..08074ccf94 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FLIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py @@ -41,10 +41,12 @@ def test_FLIRT_inputs(): ), fieldmap=dict( argstr='-fieldmap %s', + extensions=None, min_ver='5.0.0', ), fieldmapmask=dict( argstr='-fieldmapmask %s', + extensions=None, min_ver='5.0.0', ), fine_search=dict( @@ -54,11 +56,18 @@ def test_FLIRT_inputs(): force_scaling=dict(argstr='-forcescaling', ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, position=0, ), - in_matrix_file=dict(argstr='-init %s', ), - in_weight=dict(argstr='-inweight %s', ), + in_matrix_file=dict( + argstr='-init %s', + extensions=None, + ), + in_weight=dict( + argstr='-inweight %s', + extensions=None, + ), interp=dict(argstr='-interp %s', ), min_sampling=dict( argstr='-minsampling %f', @@ -70,12 +79,14 @@ def test_FLIRT_inputs(): no_search=dict(argstr='-nosearch', ), out_file=dict( argstr='-out %s', + extensions=None, hash_files=False, name_source=['in_file'], name_template='%s_flirt', position=2, ), out_log=dict( + extensions=None, keep_extension=True, name_source=['in_file'], name_template='%s_flirt.log', @@ -83,6 +94,7 @@ def test_FLIRT_inputs(): ), out_matrix_file=dict( argstr='-omat %s', + extensions=None, hash_files=False, keep_extension=True, name_source=['in_file'], @@ -98,15 +110,22 @@ def test_FLIRT_inputs(): argstr='-pedir %d', min_ver='5.0.0', ), - ref_weight=dict(argstr='-refweight %s', ), + ref_weight=dict( + argstr='-refweight %s', + extensions=None, + ), reference=dict( argstr='-ref %s', + extensions=None, mandatory=True, position=1, ), rigid2D=dict(argstr='-2D', ), save_log=dict(), - schedule=dict(argstr='-schedule %s', ), + schedule=dict( + argstr='-schedule %s', + extensions=None, + ), searchr_x=dict( argstr='-searchrx %s', units='degrees', @@ -128,14 +147,17 @@ def test_FLIRT_inputs(): verbose=dict(argstr='-verbose %d', ), wm_seg=dict( argstr='-wmseg %s', + extensions=None, min_ver='5.0.0', ), wmcoords=dict( argstr='-wmcoords %s', + extensions=None, min_ver='5.0.0', ), wmnorms=dict( argstr='-wmnorms %s', + extensions=None, min_ver='5.0.0', ), ) @@ -146,9 +168,9 @@ def test_FLIRT_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_FLIRT_outputs(): output_map = dict( - out_file=dict(), - out_log=dict(), - out_matrix_file=dict(), + out_file=dict(extensions=None, ), + out_log=dict(extensions=None, ), + out_matrix_file=dict(extensions=None, ), ) outputs = FLIRT.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py index 2b7d0b1b00..95afc3aa4d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py @@ -5,7 +5,10 @@ def test_FNIRT_inputs(): input_map = dict( - affine_file=dict(argstr='--aff=%s', ), + affine_file=dict( + argstr='--aff=%s', + extensions=None, + ), apply_inmask=dict( argstr='--applyinmask=%s', sep=',', @@ -38,6 +41,7 @@ def test_FNIRT_inputs(): hessian_precision=dict(argstr='--numprec=%s', ), in_file=dict( argstr='--in=%s', + extensions=None, mandatory=True, ), in_fwhm=dict( @@ -48,11 +52,17 @@ def test_FNIRT_inputs(): argstr='--intin=%s', copyfile=False, ), - inmask_file=dict(argstr='--inmask=%s', ), + inmask_file=dict( + argstr='--inmask=%s', + extensions=None, + ), inmask_val=dict(argstr='--impinval=%f', ), intensity_mapping_model=dict(argstr='--intmod=%s', ), intensity_mapping_order=dict(argstr='--intorder=%d', ), - inwarp_file=dict(argstr='--inwarp=%s', ), + inwarp_file=dict( + argstr='--inwarp=%s', + extensions=None, + ), jacobian_file=dict( argstr='--jout=%s', hash_files=False, @@ -60,6 +70,7 @@ def test_FNIRT_inputs(): jacobian_range=dict(argstr='--jacrange=%f,%f', ), log_file=dict( argstr='--logout=%s', + extensions=None, genfile=True, hash_files=False, ), @@ -78,13 +89,17 @@ def test_FNIRT_inputs(): output_type=dict(), ref_file=dict( argstr='--ref=%s', + extensions=None, mandatory=True, ), ref_fwhm=dict( argstr='--reffwhm=%s', sep=',', ), - refmask_file=dict(argstr='--refmask=%s', ), + refmask_file=dict( + argstr='--refmask=%s', + extensions=None, + ), refmask_val=dict(argstr='--imprefval=%f', ), regularization_lambda=dict( argstr='--lambda=%s', @@ -114,6 +129,7 @@ def test_FNIRT_inputs(): warp_resolution=dict(argstr='--warpres=%d,%d,%d', ), warped_file=dict( argstr='--iout=%s', + extensions=None, genfile=True, hash_files=False, ), @@ -125,13 +141,13 @@ def test_FNIRT_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_FNIRT_outputs(): output_map = dict( - field_file=dict(), - fieldcoeff_file=dict(), - jacobian_file=dict(), - log_file=dict(), - modulatedref_file=dict(), + field_file=dict(extensions=None, ), + fieldcoeff_file=dict(extensions=None, ), + jacobian_file=dict(extensions=None, ), + log_file=dict(extensions=None, ), + modulatedref_file=dict(extensions=None, ), out_intensitymap_file=dict(), - warped_file=dict(), + warped_file=dict(extensions=None, ), ) outputs = FNIRT.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py index fd85eee3bf..e0ecf18807 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py @@ -20,10 +20,12 @@ def test_FSLXCommand_inputs(): ), bvals=dict( argstr='--bvals=%s', + extensions=None, mandatory=True, ), bvecs=dict( argstr='--bvecs=%s', + extensions=None, mandatory=True, ), cnlinear=dict( @@ -32,6 +34,7 @@ def test_FSLXCommand_inputs(): ), dwi=dict( argstr='--data=%s', + extensions=None, mandatory=True, ), environ=dict( @@ -57,6 +60,7 @@ def test_FSLXCommand_inputs(): ), mask=dict( argstr='--mask=%s', + extensions=None, mandatory=True, ), model=dict(argstr='--model=%d', ), @@ -102,10 +106,10 @@ def test_FSLXCommand_outputs(): output_map = dict( dyads=dict(), fsamples=dict(), - mean_S0samples=dict(), - mean_dsamples=dict(), + mean_S0samples=dict(extensions=None, ), + mean_dsamples=dict(extensions=None, ), mean_fsamples=dict(), - mean_tausamples=dict(), + mean_tausamples=dict(extensions=None, ), phsamples=dict(), thsamples=dict(), ) diff --git a/nipype/interfaces/fsl/tests/test_auto_FUGUE.py b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py index 0e96f1e867..5862d2e901 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FUGUE.py +++ b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py @@ -15,8 +15,14 @@ def test_FUGUE_inputs(): nohash=True, usedefault=True, ), - fmap_in_file=dict(argstr='--loadfmap=%s', ), - fmap_out_file=dict(argstr='--savefmap=%s', ), + fmap_in_file=dict( + argstr='--loadfmap=%s', + extensions=None, + ), + fmap_out_file=dict( + argstr='--savefmap=%s', + extensions=None, + ), forward_warping=dict(usedefault=True, ), fourier_order=dict(argstr='--fourier=%d', ), icorr=dict( @@ -27,8 +33,14 @@ def test_FUGUE_inputs(): argstr='--icorronly', requires=['unwarped_file'], ), - in_file=dict(argstr='--in=%s', ), - mask_file=dict(argstr='--mask=%s', ), + in_file=dict( + argstr='--in=%s', + extensions=None, + ), + mask_file=dict( + argstr='--mask=%s', + extensions=None, + ), median_2dfilter=dict(argstr='--median', ), no_extend=dict(argstr='--noextend', ), no_gap_fill=dict(argstr='--nofill', ), @@ -36,7 +48,10 @@ def test_FUGUE_inputs(): output_type=dict(), pava=dict(argstr='--pava', ), phase_conjugate=dict(argstr='--phaseconj', ), - phasemap_in_file=dict(argstr='--phasemap=%s', ), + phasemap_in_file=dict( + argstr='--phasemap=%s', + extensions=None, + ), poly_order=dict(argstr='--poly=%d', ), save_fmap=dict(xor=['save_unmasked_fmap'], ), save_shift=dict(xor=['save_unmasked_shift'], ), @@ -48,18 +63,26 @@ def test_FUGUE_inputs(): argstr='--unmaskshift', xor=['save_shift'], ), - shift_in_file=dict(argstr='--loadshift=%s', ), - shift_out_file=dict(argstr='--saveshift=%s', ), + shift_in_file=dict( + argstr='--loadshift=%s', + extensions=None, + ), + shift_out_file=dict( + argstr='--saveshift=%s', + extensions=None, + ), smooth2d=dict(argstr='--smooth2=%.2f', ), smooth3d=dict(argstr='--smooth3=%.2f', ), unwarp_direction=dict(argstr='--unwarpdir=%s', ), unwarped_file=dict( argstr='--unwarp=%s', + extensions=None, requires=['in_file'], xor=['warped_file'], ), warped_file=dict( argstr='--warp=%s', + extensions=None, requires=['in_file'], xor=['unwarped_file'], ), @@ -71,10 +94,10 @@ def test_FUGUE_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_FUGUE_outputs(): output_map = dict( - fmap_out_file=dict(), - shift_out_file=dict(), - unwarped_file=dict(), - warped_file=dict(), + fmap_out_file=dict(extensions=None, ), + shift_out_file=dict(extensions=None, ), + unwarped_file=dict(extensions=None, ), + warped_file=dict(extensions=None, ), ) outputs = FUGUE.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py index 36cbb979d7..3e7dcc2c41 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py +++ b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py @@ -8,6 +8,7 @@ def test_FilterRegressor_inputs(): args=dict(argstr='%s', ), design_file=dict( argstr='-d %s', + extensions=None, mandatory=True, position=3, ), @@ -29,12 +30,17 @@ def test_FilterRegressor_inputs(): ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, position=1, ), - mask=dict(argstr='-m %s', ), + mask=dict( + argstr='-m %s', + extensions=None, + ), out_file=dict( argstr='-o %s', + extensions=None, genfile=True, hash_files=False, position=2, @@ -49,7 +55,7 @@ def test_FilterRegressor_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FilterRegressor_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = FilterRegressor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py index 3731c842e7..5c7b717d93 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py +++ b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py @@ -17,6 +17,7 @@ def test_FindTheBiggest_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=2, @@ -29,7 +30,11 @@ def test_FindTheBiggest_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FindTheBiggest_outputs(): - output_map = dict(out_file=dict(argstr='%s', ), ) + output_map = dict( + out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = FindTheBiggest.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_GLM.py b/nipype/interfaces/fsl/tests/test_auto_GLM.py index 1dbf7eba19..eb6f49f554 100644 --- a/nipype/interfaces/fsl/tests/test_auto_GLM.py +++ b/nipype/interfaces/fsl/tests/test_auto_GLM.py @@ -6,12 +6,16 @@ def test_GLM_inputs(): input_map = dict( args=dict(argstr='%s', ), - contrasts=dict(argstr='-c %s', ), + contrasts=dict( + argstr='-c %s', + extensions=None, + ), dat_norm=dict(argstr='--dat_norm', ), demean=dict(argstr='--demean', ), des_norm=dict(argstr='--des_norm', ), design=dict( argstr='-d %s', + extensions=None, mandatory=True, position=2, ), @@ -22,28 +26,66 @@ def test_GLM_inputs(): ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, position=1, ), - mask=dict(argstr='-m %s', ), - out_cope=dict(argstr='--out_cope=%s', ), - out_data_name=dict(argstr='--out_data=%s', ), - out_f_name=dict(argstr='--out_f=%s', ), + mask=dict( + argstr='-m %s', + extensions=None, + ), + out_cope=dict( + argstr='--out_cope=%s', + extensions=None, + ), + out_data_name=dict( + argstr='--out_data=%s', + extensions=None, + ), + out_f_name=dict( + argstr='--out_f=%s', + extensions=None, + ), out_file=dict( argstr='-o %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_glm', position=3, ), - out_p_name=dict(argstr='--out_p=%s', ), - out_pf_name=dict(argstr='--out_pf=%s', ), - out_res_name=dict(argstr='--out_res=%s', ), - out_sigsq_name=dict(argstr='--out_sigsq=%s', ), - out_t_name=dict(argstr='--out_t=%s', ), - out_varcb_name=dict(argstr='--out_varcb=%s', ), - out_vnscales_name=dict(argstr='--out_vnscales=%s', ), - out_z_name=dict(argstr='--out_z=%s', ), + out_p_name=dict( + argstr='--out_p=%s', + extensions=None, + ), + out_pf_name=dict( + argstr='--out_pf=%s', + extensions=None, + ), + out_res_name=dict( + argstr='--out_res=%s', + extensions=None, + ), + out_sigsq_name=dict( + argstr='--out_sigsq=%s', + extensions=None, + ), + out_t_name=dict( + argstr='--out_t=%s', + extensions=None, + ), + out_varcb_name=dict( + argstr='--out_varcb=%s', + extensions=None, + ), + out_vnscales_name=dict( + argstr='--out_vnscales=%s', + extensions=None, + ), + out_z_name=dict( + argstr='--out_z=%s', + extensions=None, + ), output_type=dict(), var_norm=dict(argstr='--vn', ), ) @@ -57,7 +99,7 @@ def test_GLM_outputs(): out_cope=dict(), out_data=dict(), out_f=dict(), - out_file=dict(), + out_file=dict(extensions=None, ), out_p=dict(), out_pf=dict(), out_res=dict(), diff --git a/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py index 05bbb5b106..c3f10c2664 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py +++ b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py @@ -26,24 +26,29 @@ def test_ICA_AROMA_inputs(): ), fnirt_warp_file=dict( argstr='-warp %s', + extensions=None, xor=['feat_dir'], ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, xor=['feat_dir'], ), mask=dict( argstr='-m %s', + extensions=None, xor=['feat_dir'], ), mat_file=dict( argstr='-affmat %s', + extensions=None, xor=['feat_dir'], ), melodic_dir=dict(argstr='-meldir %s', ), motion_parameters=dict( argstr='-mc %s', + extensions=None, mandatory=True, xor=['feat_dir'], ), @@ -60,8 +65,8 @@ def test_ICA_AROMA_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_ICA_AROMA_outputs(): output_map = dict( - aggr_denoised_file=dict(), - nonaggr_denoised_file=dict(), + aggr_denoised_file=dict(extensions=None, ), + nonaggr_denoised_file=dict(extensions=None, ), out_dir=dict(), ) outputs = ICA_AROMA.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py index 6d4e06827e..d25ed8bc77 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py @@ -12,14 +12,19 @@ def test_ImageMaths_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), in_file2=dict( argstr='%s', + extensions=None, position=3, ), - mask_file=dict(argstr='-mas %s', ), + mask_file=dict( + argstr='-mas %s', + extensions=None, + ), op_string=dict( argstr='%s', position=2, @@ -30,6 +35,7 @@ def test_ImageMaths_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -43,7 +49,7 @@ def test_ImageMaths_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ImageMaths_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ImageMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py index 042f93112b..5c83c40150 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py @@ -13,10 +13,14 @@ def test_ImageMeants_inputs(): ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, position=0, ), - mask=dict(argstr='-m %s', ), + mask=dict( + argstr='-m %s', + extensions=None, + ), nobin=dict(argstr='--no_bin', ), order=dict( argstr='--order=%d', @@ -24,6 +28,7 @@ def test_ImageMeants_inputs(): ), out_file=dict( argstr='-o %s', + extensions=None, genfile=True, hash_files=False, ), @@ -39,7 +44,7 @@ def test_ImageMeants_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ImageMeants_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ImageMeants.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py index 91a6059798..0230245f2e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py @@ -12,10 +12,14 @@ def test_ImageStats_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), - mask_file=dict(argstr='', ), + mask_file=dict( + argstr='', + extensions=None, + ), op_string=dict( argstr='%s', mandatory=True, diff --git a/nipype/interfaces/fsl/tests/test_auto_InvWarp.py b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py index 973f42ea34..fa39ceac7d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_InvWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py @@ -16,6 +16,7 @@ def test_InvWarp_inputs(): ), inverse_warp=dict( argstr='--out=%s', + extensions=None, hash_files=False, name_source=['warp'], name_template='%s_inverse', @@ -27,6 +28,7 @@ def test_InvWarp_inputs(): output_type=dict(), reference=dict( argstr='--ref=%s', + extensions=None, mandatory=True, ), regularise=dict(argstr='--regularise=%f', ), @@ -36,6 +38,7 @@ def test_InvWarp_inputs(): ), warp=dict( argstr='--warp=%s', + extensions=None, mandatory=True, ), ) @@ -45,7 +48,7 @@ def test_InvWarp_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_InvWarp_outputs(): - output_map = dict(inverse_warp=dict(), ) + output_map = dict(inverse_warp=dict(extensions=None, ), ) outputs = InvWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py index 4b6192c98d..5c224aeeb6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py +++ b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py @@ -18,6 +18,7 @@ def test_IsotropicSmooth_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -31,6 +32,7 @@ def test_IsotropicSmooth_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -53,7 +55,7 @@ def test_IsotropicSmooth_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_IsotropicSmooth_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = IsotropicSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_L2Model.py b/nipype/interfaces/fsl/tests/test_auto_L2Model.py index 9d3588666f..160fba2fb5 100644 --- a/nipype/interfaces/fsl/tests/test_auto_L2Model.py +++ b/nipype/interfaces/fsl/tests/test_auto_L2Model.py @@ -12,9 +12,9 @@ def test_L2Model_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_L2Model_outputs(): output_map = dict( - design_con=dict(), - design_grp=dict(), - design_mat=dict(), + design_con=dict(extensions=None, ), + design_grp=dict(extensions=None, ), + design_mat=dict(extensions=None, ), ) outputs = L2Model.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py index daeca07b10..f9f1868613 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py @@ -15,19 +15,27 @@ def test_MCFLIRT_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, position=0, ), - init=dict(argstr='-init %s', ), + init=dict( + argstr='-init %s', + extensions=None, + ), interpolation=dict(argstr='-%s_final', ), mean_vol=dict(argstr='-meanvol', ), out_file=dict( argstr='-out %s', + extensions=None, genfile=True, hash_files=False, ), output_type=dict(), - ref_file=dict(argstr='-reffile %s', ), + ref_file=dict( + argstr='-reffile %s', + extensions=None, + ), ref_vol=dict(argstr='-refvol %d', ), rotation=dict(argstr='-rotation %d', ), save_mats=dict(argstr='-mats', ), @@ -48,12 +56,12 @@ def test_MCFLIRT_inputs(): def test_MCFLIRT_outputs(): output_map = dict( mat_file=dict(), - mean_img=dict(), - out_file=dict(), - par_file=dict(), + mean_img=dict(extensions=None, ), + out_file=dict(extensions=None, ), + par_file=dict(extensions=None, ), rms_files=dict(), - std_img=dict(), - variance_img=dict(), + std_img=dict(extensions=None, ), + variance_img=dict(extensions=None, ), ) outputs = MCFLIRT.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MELODIC.py b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py index b22078c450..b024a9f572 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MELODIC.py +++ b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py @@ -5,10 +5,16 @@ def test_MELODIC_inputs(): input_map = dict( - ICs=dict(argstr='--ICs=%s', ), + ICs=dict( + argstr='--ICs=%s', + extensions=None, + ), approach=dict(argstr='-a %s', ), args=dict(argstr='%s', ), - bg_image=dict(argstr='--bgimage=%s', ), + bg_image=dict( + argstr='--bgimage=%s', + extensions=None, + ), bg_threshold=dict(argstr='--bgthreshold=%f', ), cov_weight=dict(argstr='--covarweight=%f', ), dim=dict(argstr='-d %d', ), @@ -26,14 +32,20 @@ def test_MELODIC_inputs(): sep=',', ), log_power=dict(argstr='--logPower', ), - mask=dict(argstr='-m %s', ), + mask=dict( + argstr='-m %s', + extensions=None, + ), max_restart=dict(argstr='--maxrestart=%d', ), maxit=dict(argstr='--maxit=%d', ), migp=dict(argstr='--migp', ), migpN=dict(argstr='--migpN %d', ), migp_factor=dict(argstr='--migp_factor %d', ), migp_shuffle=dict(argstr='--migp_shuffle', ), - mix=dict(argstr='--mix=%s', ), + mix=dict( + argstr='--mix=%s', + extensions=None, + ), mm_thresh=dict(argstr='--mmthresh=%f', ), no_bet=dict(argstr='--nobet', ), no_mask=dict(argstr='--nomask', ), @@ -57,13 +69,28 @@ def test_MELODIC_inputs(): remove_deriv=dict(argstr='--remove_deriv', ), report=dict(argstr='--report', ), report_maps=dict(argstr='--report_maps=%s', ), - s_con=dict(argstr='--Scon=%s', ), - s_des=dict(argstr='--Sdes=%s', ), + s_con=dict( + argstr='--Scon=%s', + extensions=None, + ), + s_des=dict( + argstr='--Sdes=%s', + extensions=None, + ), sep_vn=dict(argstr='--sep_vn', ), sep_whiten=dict(argstr='--sep_whiten', ), - smode=dict(argstr='--smode=%s', ), - t_con=dict(argstr='--Tcon=%s', ), - t_des=dict(argstr='--Tdes=%s', ), + smode=dict( + argstr='--smode=%s', + extensions=None, + ), + t_con=dict( + argstr='--Tcon=%s', + extensions=None, + ), + t_des=dict( + argstr='--Tdes=%s', + extensions=None, + ), tr_sec=dict(argstr='--tr=%f', ), update_mask=dict(argstr='--update_mask', ), var_norm=dict(argstr='--vn', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py index c7a5a3e465..832946a499 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py +++ b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py @@ -12,10 +12,12 @@ def test_MakeDyadicVectors_inputs(): ), mask=dict( argstr='%s', + extensions=None, position=2, ), output=dict( argstr='%s', + extensions=None, hash_files=False, position=3, usedefault=True, @@ -27,11 +29,13 @@ def test_MakeDyadicVectors_inputs(): ), phi_vol=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), theta_vol=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), @@ -43,8 +47,8 @@ def test_MakeDyadicVectors_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_MakeDyadicVectors_outputs(): output_map = dict( - dispersion=dict(), - dyads=dict(), + dispersion=dict(extensions=None, ), + dyads=dict(extensions=None, ), ) outputs = MakeDyadicVectors.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py index 64b0f8b089..061bb66e99 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py @@ -12,6 +12,7 @@ def test_MathsCommand_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -25,6 +26,7 @@ def test_MathsCommand_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -41,7 +43,7 @@ def test_MathsCommand_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MathsCommand_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MathsCommand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py index 9910f9c4e8..608a886199 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MaxImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py @@ -17,6 +17,7 @@ def test_MaxImage_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -30,6 +31,7 @@ def test_MaxImage_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -46,7 +48,7 @@ def test_MaxImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MaxImage_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MaxImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py index f49c5f462c..fb6da29ee6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py @@ -17,6 +17,7 @@ def test_MaxnImage_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -30,6 +31,7 @@ def test_MaxnImage_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -46,7 +48,7 @@ def test_MaxnImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MaxnImage_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MaxnImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MeanImage.py b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py index 2172dcfa9e..745be40f92 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MeanImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py @@ -17,6 +17,7 @@ def test_MeanImage_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -30,6 +31,7 @@ def test_MeanImage_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -46,7 +48,7 @@ def test_MeanImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MeanImage_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MeanImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MedianImage.py b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py index c14bf8d839..e777ffbc34 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MedianImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py @@ -17,6 +17,7 @@ def test_MedianImage_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -30,6 +31,7 @@ def test_MedianImage_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -46,7 +48,7 @@ def test_MedianImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MedianImage_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MedianImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Merge.py b/nipype/interfaces/fsl/tests/test_auto_Merge.py index 826270239e..d6176eb92e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Merge.py +++ b/nipype/interfaces/fsl/tests/test_auto_Merge.py @@ -22,6 +22,7 @@ def test_Merge_inputs(): ), merged_file=dict( argstr='%s', + extensions=None, hash_files=False, name_source='in_files', name_template='%s_merged', @@ -39,7 +40,7 @@ def test_Merge_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Merge_outputs(): - output_map = dict(merged_file=dict(), ) + output_map = dict(merged_file=dict(extensions=None, ), ) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MinImage.py b/nipype/interfaces/fsl/tests/test_auto_MinImage.py index 4e9002c259..88561bbe78 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MinImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MinImage.py @@ -17,6 +17,7 @@ def test_MinImage_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -30,6 +31,7 @@ def test_MinImage_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -46,7 +48,7 @@ def test_MinImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MinImage_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MinImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py index 97d2426b53..1c199ca83f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py +++ b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py @@ -13,13 +13,18 @@ def test_MotionOutliers_inputs(): ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, ), - mask=dict(argstr='-m %s', ), + mask=dict( + argstr='-m %s', + extensions=None, + ), metric=dict(argstr='--%s', ), no_motion_correction=dict(argstr='--nomoco', ), out_file=dict( argstr='-o %s', + extensions=None, hash_files=False, keep_extension=True, name_source='in_file', @@ -27,6 +32,7 @@ def test_MotionOutliers_inputs(): ), out_metric_plot=dict( argstr='-p %s', + extensions=None, hash_files=False, keep_extension=True, name_source='in_file', @@ -34,6 +40,7 @@ def test_MotionOutliers_inputs(): ), out_metric_values=dict( argstr='-s %s', + extensions=None, hash_files=False, keep_extension=True, name_source='in_file', @@ -49,9 +56,9 @@ def test_MotionOutliers_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_MotionOutliers_outputs(): output_map = dict( - out_file=dict(), - out_metric_plot=dict(), - out_metric_values=dict(), + out_file=dict(extensions=None, ), + out_metric_plot=dict(extensions=None, ), + out_metric_values=dict(extensions=None, ), ) outputs = MotionOutliers.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py index 50a2977a4c..edeb802c1c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py @@ -12,6 +12,7 @@ def test_MultiImageMaths_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -31,6 +32,7 @@ def test_MultiImageMaths_inputs(): operand_files=dict(mandatory=True, ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -47,7 +49,7 @@ def test_MultiImageMaths_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MultiImageMaths_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MultiImageMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py index 78bd97ff85..05c67e6d94 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py +++ b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py @@ -16,10 +16,10 @@ def test_MultipleRegressDesign_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_MultipleRegressDesign_outputs(): output_map = dict( - design_con=dict(), - design_fts=dict(), - design_grp=dict(), - design_mat=dict(), + design_con=dict(extensions=None, ), + design_fts=dict(extensions=None, ), + design_grp=dict(extensions=None, ), + design_mat=dict(extensions=None, ), ) outputs = MultipleRegressDesign.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Overlay.py b/nipype/interfaces/fsl/tests/test_auto_Overlay.py index 9818ae1eda..de4024c36d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Overlay.py +++ b/nipype/interfaces/fsl/tests/test_auto_Overlay.py @@ -14,6 +14,7 @@ def test_Overlay_inputs(): ), background_image=dict( argstr='%s', + extensions=None, mandatory=True, position=4, ), @@ -35,6 +36,7 @@ def test_Overlay_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-1, @@ -52,11 +54,13 @@ def test_Overlay_inputs(): ), stat_image=dict( argstr='%s', + extensions=None, mandatory=True, position=6, ), stat_image2=dict( argstr='%s', + extensions=None, position=9, xor=['show_negative_stats'], ), @@ -85,7 +89,7 @@ def test_Overlay_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Overlay_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Overlay.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py index 328a8e3272..c160745dd5 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py +++ b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py @@ -8,6 +8,7 @@ def test_PRELUDE_inputs(): args=dict(argstr='%s', ), complex_phase_file=dict( argstr='--complex=%s', + extensions=None, mandatory=True, xor=['magnitude_file', 'phase_file'], ), @@ -18,19 +19,25 @@ def test_PRELUDE_inputs(): ), label_file=dict( argstr='--labels=%s', + extensions=None, hash_files=False, ), labelprocess2d=dict(argstr='--labelslices', ), magnitude_file=dict( argstr='--abs=%s', + extensions=None, mandatory=True, xor=['complex_phase_file'], ), - mask_file=dict(argstr='--mask=%s', ), + mask_file=dict( + argstr='--mask=%s', + extensions=None, + ), num_partitions=dict(argstr='--numphasesplit=%d', ), output_type=dict(), phase_file=dict( argstr='--phase=%s', + extensions=None, mandatory=True, xor=['complex_phase_file'], ), @@ -44,17 +51,20 @@ def test_PRELUDE_inputs(): ), rawphase_file=dict( argstr='--rawphase=%s', + extensions=None, hash_files=False, ), removeramps=dict(argstr='--removeramps', ), savemask_file=dict( argstr='--savemask=%s', + extensions=None, hash_files=False, ), start=dict(argstr='--start=%d', ), threshold=dict(argstr='--thresh=%.10f', ), unwrapped_phase_file=dict( argstr='--unwrap=%s', + extensions=None, genfile=True, hash_files=False, ), @@ -65,7 +75,7 @@ def test_PRELUDE_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_PRELUDE_outputs(): - output_map = dict(unwrapped_phase_file=dict(), ) + output_map = dict(unwrapped_phase_file=dict(extensions=None, ), ) outputs = PRELUDE.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py index 4e08c18db0..7a27272e67 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py @@ -17,6 +17,7 @@ def test_PercentileImage_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -30,6 +31,7 @@ def test_PercentileImage_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -50,7 +52,7 @@ def test_PercentileImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_PercentileImage_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = PercentileImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py index 45a5b43945..d88374f105 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py +++ b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py @@ -18,6 +18,7 @@ def test_PlotMotionParams_inputs(): in_source=dict(mandatory=True, ), out_file=dict( argstr='-o %s', + extensions=None, genfile=True, hash_files=False, ), @@ -34,7 +35,7 @@ def test_PlotMotionParams_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_PlotMotionParams_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = PlotMotionParams.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py index 62ced498e1..d923f16b6b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py +++ b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py @@ -16,9 +16,13 @@ def test_PlotTimeSeries_inputs(): position=1, ), labels=dict(argstr='%s', ), - legend_file=dict(argstr='--legend=%s', ), + legend_file=dict( + argstr='--legend=%s', + extensions=None, + ), out_file=dict( argstr='-o %s', + extensions=None, genfile=True, hash_files=False, ), @@ -62,7 +66,7 @@ def test_PlotTimeSeries_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_PlotTimeSeries_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = PlotTimeSeries.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py index 18a812c00b..15e6adc101 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py +++ b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py @@ -12,11 +12,13 @@ def test_PowerSpectrum_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=1, @@ -29,7 +31,7 @@ def test_PowerSpectrum_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_PowerSpectrum_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = PowerSpectrum.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py index 66bfd51fab..d3515701b1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py +++ b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py @@ -18,11 +18,13 @@ def test_PrepareFieldmap_inputs(): ), in_magnitude=dict( argstr='%s', + extensions=None, mandatory=True, position=3, ), in_phase=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -33,6 +35,7 @@ def test_PrepareFieldmap_inputs(): ), out_fieldmap=dict( argstr='%s', + extensions=None, position=4, ), output_type=dict(), @@ -48,7 +51,7 @@ def test_PrepareFieldmap_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_PrepareFieldmap_outputs(): - output_map = dict(out_fieldmap=dict(), ) + output_map = dict(out_fieldmap=dict(extensions=None, ), ) outputs = PrepareFieldmap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py index 5368d0dd37..689b8c755d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py @@ -6,7 +6,10 @@ def test_ProbTrackX_inputs(): input_map = dict( args=dict(argstr='%s', ), - avoid_mp=dict(argstr='--avoid=%s', ), + avoid_mp=dict( + argstr='--avoid=%s', + extensions=None, + ), c_thresh=dict(argstr='--cthr=%.3f', ), correct_path_distribution=dict(argstr='--pd', ), dist_thresh=dict(argstr='--distthresh=%.3f', ), @@ -20,14 +23,24 @@ def test_ProbTrackX_inputs(): usedefault=True, ), fsamples=dict(mandatory=True, ), - inv_xfm=dict(argstr='--invxfm=%s', ), + inv_xfm=dict( + argstr='--invxfm=%s', + extensions=None, + ), loop_check=dict(argstr='--loopcheck', ), mask=dict( argstr='-m %s', + extensions=None, mandatory=True, ), - mask2=dict(argstr='--mask2=%s', ), - mesh=dict(argstr='--mesh=%s', ), + mask2=dict( + argstr='--mask2=%s', + extensions=None, + ), + mesh=dict( + argstr='--mesh=%s', + extensions=None, + ), mod_euler=dict(argstr='--modeuler', ), mode=dict( argstr='--mode=%s', @@ -62,15 +75,27 @@ def test_ProbTrackX_inputs(): argstr='--seed=%s', mandatory=True, ), - seed_ref=dict(argstr='--seedref=%s', ), + seed_ref=dict( + argstr='--seedref=%s', + extensions=None, + ), step_length=dict(argstr='--steplength=%.3f', ), - stop_mask=dict(argstr='--stop=%s', ), + stop_mask=dict( + argstr='--stop=%s', + extensions=None, + ), target_masks=dict(argstr='--targetmasks=%s', ), thsamples=dict(mandatory=True, ), use_anisotropy=dict(argstr='--usef', ), verbose=dict(argstr='--verbose=%d', ), - waypoints=dict(argstr='--waypoints=%s', ), - xfm=dict(argstr='--xfm=%s', ), + waypoints=dict( + argstr='--waypoints=%s', + extensions=None, + ), + xfm=dict( + argstr='--xfm=%s', + extensions=None, + ), ) inputs = ProbTrackX.input_spec() @@ -80,10 +105,10 @@ def test_ProbTrackX_inputs(): def test_ProbTrackX_outputs(): output_map = dict( fdt_paths=dict(), - log=dict(), + log=dict(extensions=None, ), particle_files=dict(), targets=dict(), - way_total=dict(), + way_total=dict(extensions=None, ), ) outputs = ProbTrackX.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py index 8592b5ae1a..8ddf5f3a9a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py @@ -6,9 +6,15 @@ def test_ProbTrackX2_inputs(): input_map = dict( args=dict(argstr='%s', ), - avoid_mp=dict(argstr='--avoid=%s', ), + avoid_mp=dict( + argstr='--avoid=%s', + extensions=None, + ), c_thresh=dict(argstr='--cthr=%.3f', ), - colmask4=dict(argstr='--colmask4=%s', ), + colmask4=dict( + argstr='--colmask4=%s', + extensions=None, + ), correct_path_distribution=dict(argstr='--pd', ), dist_thresh=dict(argstr='--distthresh=%.3f', ), distthresh1=dict(argstr='--distthresh1=%.3f', ), @@ -18,17 +24,27 @@ def test_ProbTrackX2_inputs(): usedefault=True, ), fibst=dict(argstr='--fibst=%d', ), - fopd=dict(argstr='--fopd=%s', ), + fopd=dict( + argstr='--fopd=%s', + extensions=None, + ), force_dir=dict( argstr='--forcedir', usedefault=True, ), fsamples=dict(mandatory=True, ), - inv_xfm=dict(argstr='--invxfm=%s', ), + inv_xfm=dict( + argstr='--invxfm=%s', + extensions=None, + ), loop_check=dict(argstr='--loopcheck', ), - lrtarget3=dict(argstr='--lrtarget3=%s', ), + lrtarget3=dict( + argstr='--lrtarget3=%s', + extensions=None, + ), mask=dict( argstr='-m %s', + extensions=None, mandatory=True, ), meshspace=dict(argstr='--meshspace=%s', ), @@ -73,21 +89,42 @@ def test_ProbTrackX2_inputs(): argstr='--seed=%s', mandatory=True, ), - seed_ref=dict(argstr='--seedref=%s', ), + seed_ref=dict( + argstr='--seedref=%s', + extensions=None, + ), simple=dict(argstr='--simple', ), step_length=dict(argstr='--steplength=%.3f', ), - stop_mask=dict(argstr='--stop=%s', ), - target2=dict(argstr='--target2=%s', ), - target3=dict(argstr='--target3=%s', ), - target4=dict(argstr='--target4=%s', ), + stop_mask=dict( + argstr='--stop=%s', + extensions=None, + ), + target2=dict( + argstr='--target2=%s', + extensions=None, + ), + target3=dict( + argstr='--target3=%s', + extensions=None, + ), + target4=dict( + argstr='--target4=%s', + extensions=None, + ), target_masks=dict(argstr='--targetmasks=%s', ), thsamples=dict(mandatory=True, ), use_anisotropy=dict(argstr='--usef', ), verbose=dict(argstr='--verbose=%d', ), waycond=dict(argstr='--waycond=%s', ), wayorder=dict(argstr='--wayorder', ), - waypoints=dict(argstr='--waypoints=%s', ), - xfm=dict(argstr='--xfm=%s', ), + waypoints=dict( + argstr='--waypoints=%s', + extensions=None, + ), + xfm=dict( + argstr='--xfm=%s', + extensions=None, + ), ) inputs = ProbTrackX2.input_spec() @@ -97,15 +134,15 @@ def test_ProbTrackX2_inputs(): def test_ProbTrackX2_outputs(): output_map = dict( fdt_paths=dict(), - log=dict(), - lookup_tractspace=dict(), - matrix1_dot=dict(), - matrix2_dot=dict(), - matrix3_dot=dict(), - network_matrix=dict(), + log=dict(extensions=None, ), + lookup_tractspace=dict(extensions=None, ), + matrix1_dot=dict(extensions=None, ), + matrix2_dot=dict(extensions=None, ), + matrix3_dot=dict(extensions=None, ), + network_matrix=dict(extensions=None, ), particle_files=dict(), targets=dict(), - way_total=dict(), + way_total=dict(extensions=None, ), ) outputs = ProbTrackX2.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Randomise.py b/nipype/interfaces/fsl/tests/test_auto_Randomise.py index 3d52347265..21a9142639 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Randomise.py +++ b/nipype/interfaces/fsl/tests/test_auto_Randomise.py @@ -16,6 +16,7 @@ def test_Randomise_inputs(): demean=dict(argstr='-D', ), design_mat=dict( argstr='-d %s', + extensions=None, position=2, ), environ=dict( @@ -25,13 +26,20 @@ def test_Randomise_inputs(): f_c_thresh=dict(argstr='-F %.2f', ), f_cm_thresh=dict(argstr='-S %.2f', ), f_only=dict(argstr='--f_only', ), - fcon=dict(argstr='-f %s', ), + fcon=dict( + argstr='-f %s', + extensions=None, + ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, position=0, ), - mask=dict(argstr='-m %s', ), + mask=dict( + argstr='-m %s', + extensions=None, + ), num_perm=dict(argstr='-n %d', ), one_sample_group_mean=dict(argstr='-1', ), output_type=dict(), @@ -42,6 +50,7 @@ def test_Randomise_inputs(): show_total_perms=dict(argstr='-q', ), tcon=dict( argstr='-t %s', + extensions=None, position=3, ), tfce=dict(argstr='-T', ), @@ -51,7 +60,10 @@ def test_Randomise_inputs(): tfce_H=dict(argstr='--tfce_H=%.2f', ), var_smooth=dict(argstr='-v %d', ), vox_p_values=dict(argstr='-x', ), - x_block_labels=dict(argstr='-e %s', ), + x_block_labels=dict( + argstr='-e %s', + extensions=None, + ), ) inputs = Randomise.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py index 76647d82d8..aeced99424 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py +++ b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py @@ -12,10 +12,12 @@ def test_Reorient2Std_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, ), @@ -27,7 +29,7 @@ def test_Reorient2Std_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Reorient2Std_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Reorient2Std.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py index e1cbac6fc9..d7adfa8ca2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py +++ b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py @@ -13,17 +13,20 @@ def test_RobustFOV_inputs(): ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, position=0, ), out_roi=dict( argstr='-r %s', + extensions=None, hash_files=False, name_source=['in_file'], name_template='%s_ROI', ), out_transform=dict( argstr='-m %s', + extensions=None, hash_files=False, name_source=['in_file'], name_template='%s_to_ROI', @@ -37,8 +40,8 @@ def test_RobustFOV_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_RobustFOV_outputs(): output_map = dict( - out_roi=dict(), - out_transform=dict(), + out_roi=dict(extensions=None, ), + out_transform=dict(extensions=None, ), ) outputs = RobustFOV.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_SMM.py b/nipype/interfaces/fsl/tests/test_auto_SMM.py index 1e1cc308cc..f42eb5189a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SMM.py +++ b/nipype/interfaces/fsl/tests/test_auto_SMM.py @@ -13,6 +13,7 @@ def test_SMM_inputs(): mask=dict( argstr='--mask="%s"', copyfile=False, + extensions=None, mandatory=True, position=1, ), @@ -24,6 +25,7 @@ def test_SMM_inputs(): spatial_data_file=dict( argstr='--sdf="%s"', copyfile=False, + extensions=None, mandatory=True, position=0, ), @@ -35,9 +37,9 @@ def test_SMM_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_SMM_outputs(): output_map = dict( - activation_p_map=dict(), - deactivation_p_map=dict(), - null_p_map=dict(), + activation_p_map=dict(extensions=None, ), + deactivation_p_map=dict(extensions=None, ), + null_p_map=dict(extensions=None, ), ) outputs = SMM.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_SUSAN.py b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py index ebbef0d427..69616b77ac 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SUSAN.py +++ b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py @@ -27,11 +27,13 @@ def test_SUSAN_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-1, @@ -54,7 +56,7 @@ def test_SUSAN_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SUSAN_outputs(): - output_map = dict(smoothed_file=dict(), ) + output_map = dict(smoothed_file=dict(extensions=None, ), ) outputs = SUSAN.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SigLoss.py b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py index f3be97b350..dc2b91a47f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SigLoss.py +++ b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py @@ -13,11 +13,16 @@ def test_SigLoss_inputs(): ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, ), - mask_file=dict(argstr='-m %s', ), + mask_file=dict( + argstr='-m %s', + extensions=None, + ), out_file=dict( argstr='-s %s', + extensions=None, genfile=True, ), output_type=dict(), @@ -29,7 +34,7 @@ def test_SigLoss_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SigLoss_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = SigLoss.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Slice.py b/nipype/interfaces/fsl/tests/test_auto_Slice.py index d1e9093e37..d6202b6514 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Slice.py +++ b/nipype/interfaces/fsl/tests/test_auto_Slice.py @@ -13,6 +13,7 @@ def test_Slice_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=0, ), diff --git a/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py index 75b9918ad9..71915c8da9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py +++ b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py @@ -6,8 +6,14 @@ def test_SliceTimer_inputs(): input_map = dict( args=dict(argstr='%s', ), - custom_order=dict(argstr='--ocustom=%s', ), - custom_timings=dict(argstr='--tcustom=%s', ), + custom_order=dict( + argstr='--ocustom=%s', + extensions=None, + ), + custom_timings=dict( + argstr='--tcustom=%s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -15,6 +21,7 @@ def test_SliceTimer_inputs(): global_shift=dict(argstr='--tglobal', ), in_file=dict( argstr='--in=%s', + extensions=None, mandatory=True, position=0, ), @@ -22,6 +29,7 @@ def test_SliceTimer_inputs(): interleaved=dict(argstr='--odd', ), out_file=dict( argstr='--out=%s', + extensions=None, genfile=True, hash_files=False, ), @@ -35,7 +43,7 @@ def test_SliceTimer_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SliceTimer_outputs(): - output_map = dict(slice_time_corrected_file=dict(), ) + output_map = dict(slice_time_corrected_file=dict(extensions=None, ), ) outputs = SliceTimer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Slicer.py b/nipype/interfaces/fsl/tests/test_auto_Slicer.py index d313cda474..14ad2d375e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Slicer.py +++ b/nipype/interfaces/fsl/tests/test_auto_Slicer.py @@ -14,6 +14,7 @@ def test_Slicer_inputs(): args=dict(argstr='%s', ), colour_map=dict( argstr='-l %s', + extensions=None, position=4, ), dither_edges=dict( @@ -26,6 +27,7 @@ def test_Slicer_inputs(): ), image_edges=dict( argstr='%s', + extensions=None, position=2, ), image_width=dict( @@ -34,6 +36,7 @@ def test_Slicer_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), @@ -57,6 +60,7 @@ def test_Slicer_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-1, @@ -98,7 +102,7 @@ def test_Slicer_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Slicer_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Slicer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Smooth.py b/nipype/interfaces/fsl/tests/test_auto_Smooth.py index e8d8e06117..a08bd946a0 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Smooth.py +++ b/nipype/interfaces/fsl/tests/test_auto_Smooth.py @@ -18,6 +18,7 @@ def test_Smooth_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), @@ -30,6 +31,7 @@ def test_Smooth_inputs(): ), smoothed_file=dict( argstr='%s', + extensions=None, hash_files=False, name_source=['in_file'], name_template='%s_smooth', @@ -42,7 +44,7 @@ def test_Smooth_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Smooth_outputs(): - output_map = dict(smoothed_file=dict(), ) + output_map = dict(smoothed_file=dict(extensions=None, ), ) outputs = Smooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py index df7544f931..21c1f13b62 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py +++ b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py @@ -17,15 +17,18 @@ def test_SmoothEstimate_inputs(): ), mask_file=dict( argstr='--mask=%s', + extensions=None, mandatory=True, ), output_type=dict(), residual_fit_file=dict( argstr='--res=%s', + extensions=None, requires=['dof'], ), zstat_file=dict( argstr='--zstat=%s', + extensions=None, xor=['dof'], ), ) diff --git a/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py index d62f904655..3ef493d752 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py +++ b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py @@ -12,6 +12,7 @@ def test_SpatialFilter_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -21,6 +22,7 @@ def test_SpatialFilter_inputs(): ), kernel_file=dict( argstr='%s', + extensions=None, position=5, xor=['kernel_size'], ), @@ -44,6 +46,7 @@ def test_SpatialFilter_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -60,7 +63,7 @@ def test_SpatialFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SpatialFilter_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = SpatialFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Split.py b/nipype/interfaces/fsl/tests/test_auto_Split.py index cec2ab462d..fed820ebbe 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Split.py +++ b/nipype/interfaces/fsl/tests/test_auto_Split.py @@ -17,6 +17,7 @@ def test_Split_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), diff --git a/nipype/interfaces/fsl/tests/test_auto_StdImage.py b/nipype/interfaces/fsl/tests/test_auto_StdImage.py index 302ab9c4c2..134d5d0d16 100644 --- a/nipype/interfaces/fsl/tests/test_auto_StdImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_StdImage.py @@ -17,6 +17,7 @@ def test_StdImage_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -30,6 +31,7 @@ def test_StdImage_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -46,7 +48,7 @@ def test_StdImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_StdImage_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = StdImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py index 7cbb57491c..78aca77b26 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py +++ b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py @@ -12,6 +12,7 @@ def test_SwapDimensions_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position='1', ), @@ -21,6 +22,7 @@ def test_SwapDimensions_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, ), @@ -32,7 +34,7 @@ def test_SwapDimensions_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SwapDimensions_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = SwapDimensions.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TOPUP.py b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py index 3f91b3722a..ef3fed494c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TOPUP.py +++ b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py @@ -18,6 +18,7 @@ def test_TOPUP_inputs(): ), encoding_file=dict( argstr='--datain=%s', + extensions=None, mandatory=True, xor=['encoding_direction'], ), @@ -29,6 +30,7 @@ def test_TOPUP_inputs(): fwhm=dict(argstr='--fwhm=%f', ), in_file=dict( argstr='--imain=%s', + extensions=None, mandatory=True, ), interp=dict(argstr='--interp=%s', ), @@ -37,18 +39,21 @@ def test_TOPUP_inputs(): numprec=dict(argstr='--numprec=%s', ), out_base=dict( argstr='--out=%s', + extensions=None, hash_files=False, name_source=['in_file'], name_template='%s_base', ), out_corrected=dict( argstr='--iout=%s', + extensions=None, hash_files=False, name_source=['in_file'], name_template='%s_corrected', ), out_field=dict( argstr='--fout=%s', + extensions=None, hash_files=False, name_source=['in_file'], name_template='%s_field', @@ -60,6 +65,7 @@ def test_TOPUP_inputs(): ), out_logfile=dict( argstr='--logout=%s', + extensions=None, hash_files=False, keep_extension=True, name_source=['in_file'], @@ -97,14 +103,14 @@ def test_TOPUP_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_TOPUP_outputs(): output_map = dict( - out_corrected=dict(), - out_enc_file=dict(), - out_field=dict(), - out_fieldcoef=dict(), + out_corrected=dict(extensions=None, ), + out_enc_file=dict(extensions=None, ), + out_field=dict(extensions=None, ), + out_fieldcoef=dict(extensions=None, ), out_jacs=dict(), - out_logfile=dict(), + out_logfile=dict(extensions=None, ), out_mats=dict(), - out_movpar=dict(), + out_movpar=dict(extensions=None, ), out_warps=dict(), ) outputs = TOPUP.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py index c762b99d31..61145ebc1a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py +++ b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py @@ -17,6 +17,7 @@ def test_TemporalFilter_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -35,6 +36,7 @@ def test_TemporalFilter_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -51,7 +53,7 @@ def test_TemporalFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TemporalFilter_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TemporalFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Threshold.py b/nipype/interfaces/fsl/tests/test_auto_Threshold.py index 5c2bb46cc8..e09faaa6ea 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Threshold.py +++ b/nipype/interfaces/fsl/tests/test_auto_Threshold.py @@ -13,6 +13,7 @@ def test_Threshold_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -26,6 +27,7 @@ def test_Threshold_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -49,7 +51,7 @@ def test_Threshold_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Threshold_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Threshold.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py index 4308ee8153..b46c9d8839 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py +++ b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py @@ -5,17 +5,24 @@ def test_TractSkeleton_inputs(): input_map = dict( - alt_data_file=dict(argstr='-a %s', ), - alt_skeleton=dict(argstr='-s %s', ), + alt_data_file=dict( + argstr='-a %s', + extensions=None, + ), + alt_skeleton=dict( + argstr='-s %s', + extensions=None, + ), args=dict(argstr='%s', ), - data_file=dict(), - distance_map=dict(), + data_file=dict(extensions=None, ), + distance_map=dict(extensions=None, ), environ=dict( nohash=True, usedefault=True, ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, ), output_type=dict(), @@ -23,8 +30,11 @@ def test_TractSkeleton_inputs(): argstr='-p %.3f %s %s %s %s', requires=['threshold', 'distance_map', 'data_file'], ), - projected_data=dict(), - search_mask_file=dict(xor=['use_cingulum_mask'], ), + projected_data=dict(extensions=None, ), + search_mask_file=dict( + extensions=None, + xor=['use_cingulum_mask'], + ), skeleton_file=dict(argstr='-o %s', ), threshold=dict(), use_cingulum_mask=dict( @@ -39,8 +49,8 @@ def test_TractSkeleton_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_TractSkeleton_outputs(): output_map = dict( - projected_data=dict(), - skeleton_file=dict(), + projected_data=dict(extensions=None, ), + skeleton_file=dict(extensions=None, ), ) outputs = TractSkeleton.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Training.py b/nipype/interfaces/fsl/tests/test_auto_Training.py index 91d48e4c01..1917419503 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Training.py +++ b/nipype/interfaces/fsl/tests/test_auto_Training.py @@ -30,7 +30,7 @@ def test_Training_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Training_outputs(): - output_map = dict(trained_wts_file=dict(), ) + output_map = dict(trained_wts_file=dict(extensions=None, ), ) outputs = Training.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py index a0dcf2610d..c92f00eb3b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py @@ -12,6 +12,7 @@ def test_UnaryMaths_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -30,6 +31,7 @@ def test_UnaryMaths_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, position=-2, @@ -46,7 +48,7 @@ def test_UnaryMaths_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_UnaryMaths_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = UnaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_VecReg.py b/nipype/interfaces/fsl/tests/test_auto_VecReg.py index 61a28f7369..18e7b03dad 100644 --- a/nipype/interfaces/fsl/tests/test_auto_VecReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_VecReg.py @@ -5,7 +5,10 @@ def test_VecReg_inputs(): input_map = dict( - affine_mat=dict(argstr='-t %s', ), + affine_mat=dict( + argstr='-t %s', + extensions=None, + ), args=dict(argstr='%s', ), environ=dict( nohash=True, @@ -13,24 +16,42 @@ def test_VecReg_inputs(): ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, ), interpolation=dict(argstr='--interp=%s', ), - mask=dict(argstr='-m %s', ), + mask=dict( + argstr='-m %s', + extensions=None, + ), out_file=dict( argstr='-o %s', + extensions=None, genfile=True, hash_files=False, ), output_type=dict(), - ref_mask=dict(argstr='--refmask=%s', ), + ref_mask=dict( + argstr='--refmask=%s', + extensions=None, + ), ref_vol=dict( argstr='-r %s', + extensions=None, mandatory=True, ), - rotation_mat=dict(argstr='--rotmat=%s', ), - rotation_warp=dict(argstr='--rotwarp=%s', ), - warp_field=dict(argstr='-w %s', ), + rotation_mat=dict( + argstr='--rotmat=%s', + extensions=None, + ), + rotation_warp=dict( + argstr='--rotwarp=%s', + extensions=None, + ), + warp_field=dict( + argstr='-w %s', + extensions=None, + ), ) inputs = VecReg.input_spec() @@ -38,7 +59,7 @@ def test_VecReg_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_VecReg_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = VecReg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py index 30924223cf..d3473c31a5 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py @@ -16,6 +16,7 @@ def test_WarpPoints_inputs(): ), dest_file=dict( argstr='-dest %s', + extensions=None, mandatory=True, ), environ=dict( @@ -24,24 +25,29 @@ def test_WarpPoints_inputs(): ), in_coords=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), out_file=dict( + extensions=None, name_source='in_coords', name_template='%s_warped', output_name='out_file', ), src_file=dict( argstr='-src %s', + extensions=None, mandatory=True, ), warp_file=dict( argstr='-warp %s', + extensions=None, xor=['xfm_file'], ), xfm_file=dict( argstr='-xfm %s', + extensions=None, xor=['warp_file'], ), ) @@ -51,7 +57,7 @@ def test_WarpPoints_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_WarpPoints_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = WarpPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py index 627ef60ad6..e1afc49d9d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py @@ -20,23 +20,28 @@ def test_WarpPointsFromStd_inputs(): ), img_file=dict( argstr='-img %s', + extensions=None, mandatory=True, ), in_coords=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), std_file=dict( argstr='-std %s', + extensions=None, mandatory=True, ), warp_file=dict( argstr='-warp %s', + extensions=None, xor=['xfm_file'], ), xfm_file=dict( argstr='-xfm %s', + extensions=None, xor=['warp_file'], ), ) @@ -46,7 +51,7 @@ def test_WarpPointsFromStd_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_WarpPointsFromStd_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = WarpPointsFromStd.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py index 3d37ad1486..245c587b2e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py @@ -20,29 +20,38 @@ def test_WarpPointsToStd_inputs(): ), img_file=dict( argstr='-img %s', + extensions=None, mandatory=True, ), in_coords=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), out_file=dict( + extensions=None, name_source='in_coords', name_template='%s_warped', output_name='out_file', ), - premat_file=dict(argstr='-premat %s', ), + premat_file=dict( + argstr='-premat %s', + extensions=None, + ), std_file=dict( argstr='-std %s', + extensions=None, mandatory=True, ), warp_file=dict( argstr='-warp %s', + extensions=None, xor=['xfm_file'], ), xfm_file=dict( argstr='-xfm %s', + extensions=None, xor=['warp_file'], ), ) @@ -52,7 +61,7 @@ def test_WarpPointsToStd_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_WarpPointsToStd_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = WarpPointsToStd.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py index 04d3a05f14..59f762c2eb 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py @@ -12,20 +12,26 @@ def test_WarpUtils_inputs(): ), in_file=dict( argstr='--in=%s', + extensions=None, mandatory=True, ), knot_space=dict(argstr='--knotspace=%d,%d,%d', ), out_file=dict( argstr='--out=%s', + extensions=None, name_source=['in_file'], output_name='out_file', position=-1, ), out_format=dict(argstr='--outformat=%s', ), - out_jacobian=dict(argstr='--jac=%s', ), + out_jacobian=dict( + argstr='--jac=%s', + extensions=None, + ), output_type=dict(), reference=dict( argstr='--ref=%s', + extensions=None, mandatory=True, ), warp_resolution=dict(argstr='--warpres=%0.4f,%0.4f,%0.4f', ), @@ -42,8 +48,8 @@ def test_WarpUtils_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_WarpUtils_outputs(): output_map = dict( - out_file=dict(), - out_jacobian=dict(), + out_file=dict(extensions=None, ), + out_jacobian=dict(extensions=None, ), ) outputs = WarpUtils.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_XFibres5.py b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py index d72bb3bb4b..80ee0fca8f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_XFibres5.py +++ b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py @@ -20,10 +20,12 @@ def test_XFibres5_inputs(): ), bvals=dict( argstr='--bvals=%s', + extensions=None, mandatory=True, ), bvecs=dict( argstr='--bvecs=%s', + extensions=None, mandatory=True, ), cnlinear=dict( @@ -32,6 +34,7 @@ def test_XFibres5_inputs(): ), dwi=dict( argstr='--data=%s', + extensions=None, mandatory=True, ), environ=dict( @@ -51,13 +54,17 @@ def test_XFibres5_inputs(): usedefault=True, ), fudge=dict(argstr='--fudge=%d', ), - gradnonlin=dict(argstr='--gradnonlin=%s', ), + gradnonlin=dict( + argstr='--gradnonlin=%s', + extensions=None, + ), logdir=dict( argstr='--logdir=%s', usedefault=True, ), mask=dict( argstr='--mask=%s', + extensions=None, mandatory=True, ), model=dict(argstr='--model=%d', ), @@ -103,10 +110,10 @@ def test_XFibres5_outputs(): output_map = dict( dyads=dict(), fsamples=dict(), - mean_S0samples=dict(), - mean_dsamples=dict(), + mean_S0samples=dict(extensions=None, ), + mean_dsamples=dict(extensions=None, ), mean_fsamples=dict(), - mean_tausamples=dict(), + mean_tausamples=dict(extensions=None, ), phsamples=dict(), thsamples=dict(), ) diff --git a/nipype/interfaces/minc/tests/test_auto_Average.py b/nipype/interfaces/minc/tests/test_auto_Average.py index 57bdc5ccd2..678ab93a52 100644 --- a/nipype/interfaces/minc/tests/test_auto_Average.py +++ b/nipype/interfaces/minc/tests/test_auto_Average.py @@ -115,6 +115,7 @@ def test_Average_inputs(): ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['input_files'], @@ -147,7 +148,7 @@ def test_Average_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Average_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = Average.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_BBox.py b/nipype/interfaces/minc/tests/test_auto_BBox.py index a34aeab9b5..c1b3515cea 100644 --- a/nipype/interfaces/minc/tests/test_auto_BBox.py +++ b/nipype/interfaces/minc/tests/test_auto_BBox.py @@ -15,6 +15,7 @@ def test_BBox_inputs(): format_mincreshape=dict(argstr='-mincreshape', ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -28,6 +29,7 @@ def test_BBox_inputs(): position=-1, ), output_file=dict( + extensions=None, hash_files=False, keep_extension=False, name_source=['input_file'], @@ -46,7 +48,7 @@ def test_BBox_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BBox_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = BBox.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Beast.py b/nipype/interfaces/minc/tests/test_auto_Beast.py index 4834cf3c4a..bc4705db2e 100644 --- a/nipype/interfaces/minc/tests/test_auto_Beast.py +++ b/nipype/interfaces/minc/tests/test_auto_Beast.py @@ -78,7 +78,7 @@ def test_Beast_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Beast_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = Beast.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_BestLinReg.py b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py index fb9061040a..ef6e5af537 100644 --- a/nipype/interfaces/minc/tests/test_auto_BestLinReg.py +++ b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py @@ -16,6 +16,7 @@ def test_BestLinReg_inputs(): ), output_mnc=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, keep_extension=False, @@ -25,6 +26,7 @@ def test_BestLinReg_inputs(): ), output_xfm=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, keep_extension=False, @@ -34,11 +36,13 @@ def test_BestLinReg_inputs(): ), source=dict( argstr='%s', + extensions=None, mandatory=True, position=-4, ), target=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -51,8 +55,8 @@ def test_BestLinReg_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BestLinReg_outputs(): output_map = dict( - output_mnc=dict(), - output_xfm=dict(), + output_mnc=dict(extensions=None, ), + output_xfm=dict(extensions=None, ), ) outputs = BestLinReg.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_BigAverage.py b/nipype/interfaces/minc/tests/test_auto_BigAverage.py index ce1fb2b91e..b9933c116b 100644 --- a/nipype/interfaces/minc/tests/test_auto_BigAverage.py +++ b/nipype/interfaces/minc/tests/test_auto_BigAverage.py @@ -22,6 +22,7 @@ def test_BigAverage_inputs(): ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['input_files'], @@ -32,6 +33,7 @@ def test_BigAverage_inputs(): robust=dict(argstr='-robust', ), sd_file=dict( argstr='--sdfile %s', + extensions=None, hash_files=False, name_source=['input_files'], name_template='%s_bigaverage_stdev.mnc', @@ -46,8 +48,8 @@ def test_BigAverage_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BigAverage_outputs(): output_map = dict( - output_file=dict(), - sd_file=dict(), + output_file=dict(extensions=None, ), + sd_file=dict(extensions=None, ), ) outputs = BigAverage.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Blob.py b/nipype/interfaces/minc/tests/test_auto_Blob.py index b489ac944a..b83d5b90a5 100644 --- a/nipype/interfaces/minc/tests/test_auto_Blob.py +++ b/nipype/interfaces/minc/tests/test_auto_Blob.py @@ -13,12 +13,14 @@ def test_Blob_inputs(): ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), magnitude=dict(argstr='-magnitude', ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['input_file'], @@ -34,7 +36,7 @@ def test_Blob_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Blob_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = Blob.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Blur.py b/nipype/interfaces/minc/tests/test_auto_Blur.py index fb6e405012..2342d65974 100644 --- a/nipype/interfaces/minc/tests/test_auto_Blur.py +++ b/nipype/interfaces/minc/tests/test_auto_Blur.py @@ -32,12 +32,14 @@ def test_Blur_inputs(): gradient=dict(argstr='-gradient', ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), no_apodize=dict(argstr='-no_apodize', ), output_file_base=dict( argstr='%s', + extensions=None, position=-1, ), partial=dict(argstr='-partial', ), @@ -58,12 +60,12 @@ def test_Blur_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Blur_outputs(): output_map = dict( - gradient_dxyz=dict(), - output_file=dict(), - partial_dx=dict(), - partial_dxyz=dict(), - partial_dy=dict(), - partial_dz=dict(), + gradient_dxyz=dict(extensions=None, ), + output_file=dict(extensions=None, ), + partial_dx=dict(extensions=None, ), + partial_dxyz=dict(extensions=None, ), + partial_dy=dict(extensions=None, ), + partial_dz=dict(extensions=None, ), ) outputs = Blur.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Calc.py b/nipype/interfaces/minc/tests/test_auto_Calc.py index 7bec782c1c..e9df677150 100644 --- a/nipype/interfaces/minc/tests/test_auto_Calc.py +++ b/nipype/interfaces/minc/tests/test_auto_Calc.py @@ -112,6 +112,7 @@ def test_Calc_inputs(): outfiles=dict(), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['input_files'], @@ -148,7 +149,7 @@ def test_Calc_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Calc_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = Calc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Convert.py b/nipype/interfaces/minc/tests/test_auto_Convert.py index 6df596c682..174efd8a10 100644 --- a/nipype/interfaces/minc/tests/test_auto_Convert.py +++ b/nipype/interfaces/minc/tests/test_auto_Convert.py @@ -18,11 +18,13 @@ def test_Convert_inputs(): ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['input_file'], @@ -38,7 +40,7 @@ def test_Convert_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Convert_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = Convert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Copy.py b/nipype/interfaces/minc/tests/test_auto_Copy.py index e91470ba6d..d92a63d812 100644 --- a/nipype/interfaces/minc/tests/test_auto_Copy.py +++ b/nipype/interfaces/minc/tests/test_auto_Copy.py @@ -12,11 +12,13 @@ def test_Copy_inputs(): ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['input_file'], @@ -38,7 +40,7 @@ def test_Copy_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Copy_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = Copy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Dump.py b/nipype/interfaces/minc/tests/test_auto_Dump.py index bcca2a4801..19c299dac8 100644 --- a/nipype/interfaces/minc/tests/test_auto_Dump.py +++ b/nipype/interfaces/minc/tests/test_auto_Dump.py @@ -28,6 +28,7 @@ def test_Dump_inputs(): ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -39,6 +40,7 @@ def test_Dump_inputs(): position=-1, ), output_file=dict( + extensions=None, hash_files=False, keep_extension=False, name_source=['input_file'], @@ -57,7 +59,7 @@ def test_Dump_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Dump_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = Dump.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Extract.py b/nipype/interfaces/minc/tests/test_auto_Extract.py index 77126eac18..35f6162c7f 100644 --- a/nipype/interfaces/minc/tests/test_auto_Extract.py +++ b/nipype/interfaces/minc/tests/test_auto_Extract.py @@ -70,6 +70,7 @@ def test_Extract_inputs(): image_range=dict(argstr='-image_range %s %s', ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -87,6 +88,7 @@ def test_Extract_inputs(): position=-1, ), output_file=dict( + extensions=None, hash_files=False, keep_extension=False, name_source=['input_file'], @@ -155,7 +157,7 @@ def test_Extract_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Extract_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = Extract.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py index c89dc65de6..0249c861bc 100644 --- a/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py +++ b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py @@ -15,9 +15,13 @@ def test_Gennlxfm_inputs(): usedefault=True, ), ident=dict(argstr='-ident', ), - like=dict(argstr='-like %s', ), + like=dict( + argstr='-like %s', + extensions=None, + ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['like'], @@ -34,8 +38,8 @@ def test_Gennlxfm_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Gennlxfm_outputs(): output_map = dict( - output_file=dict(), - output_grid=dict(), + output_file=dict(extensions=None, ), + output_grid=dict(extensions=None, ), ) outputs = Gennlxfm.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Math.py b/nipype/interfaces/minc/tests/test_auto_Math.py index 1d011034d2..6bc142b15d 100644 --- a/nipype/interfaces/minc/tests/test_auto_Math.py +++ b/nipype/interfaces/minc/tests/test_auto_Math.py @@ -122,6 +122,7 @@ def test_Math_inputs(): nsegment=dict(argstr='-nsegment -const2 %s %s', ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['input_files'], @@ -161,7 +162,7 @@ def test_Math_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Math_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = Math.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_NlpFit.py b/nipype/interfaces/minc/tests/test_auto_NlpFit.py index 1a728a90dd..b0ad4087fa 100644 --- a/nipype/interfaces/minc/tests/test_auto_NlpFit.py +++ b/nipype/interfaces/minc/tests/test_auto_NlpFit.py @@ -12,6 +12,7 @@ def test_NlpFit_inputs(): ), config_file=dict( argstr='-config_file %s', + extensions=None, mandatory=True, ), environ=dict( @@ -20,25 +21,30 @@ def test_NlpFit_inputs(): ), init_xfm=dict( argstr='-init_xfm %s', + extensions=None, mandatory=True, ), input_grid_files=dict(), output_xfm=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), source=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), source_mask=dict( argstr='-source_mask %s', + extensions=None, mandatory=True, ), target=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -51,8 +57,8 @@ def test_NlpFit_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_NlpFit_outputs(): output_map = dict( - output_grid=dict(), - output_xfm=dict(), + output_grid=dict(extensions=None, ), + output_xfm=dict(extensions=None, ), ) outputs = NlpFit.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Norm.py b/nipype/interfaces/minc/tests/test_auto_Norm.py index 3a0d28f06d..9fb0d3c5ba 100644 --- a/nipype/interfaces/minc/tests/test_auto_Norm.py +++ b/nipype/interfaces/minc/tests/test_auto_Norm.py @@ -21,6 +21,7 @@ def test_Norm_inputs(): ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -30,6 +31,7 @@ def test_Norm_inputs(): out_floor=dict(argstr='-out_floor %s', ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['input_file'], @@ -55,8 +57,8 @@ def test_Norm_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Norm_outputs(): output_map = dict( - output_file=dict(), - output_threshold_mask=dict(), + output_file=dict(extensions=None, ), + output_threshold_mask=dict(extensions=None, ), ) outputs = Norm.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Pik.py b/nipype/interfaces/minc/tests/test_auto_Pik.py index d74d9a86ad..748f21a976 100644 --- a/nipype/interfaces/minc/tests/test_auto_Pik.py +++ b/nipype/interfaces/minc/tests/test_auto_Pik.py @@ -30,6 +30,7 @@ def test_Pik_inputs(): ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -38,6 +39,7 @@ def test_Pik_inputs(): minc_range=dict(argstr='--range %s %s', ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, keep_extension=False, @@ -84,7 +86,7 @@ def test_Pik_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Pik_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = Pik.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Resample.py b/nipype/interfaces/minc/tests/test_auto_Resample.py index bd00bd224d..8d4b24ff41 100644 --- a/nipype/interfaces/minc/tests/test_auto_Resample.py +++ b/nipype/interfaces/minc/tests/test_auto_Resample.py @@ -84,6 +84,7 @@ def test_Resample_inputs(): ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -118,6 +119,7 @@ def test_Resample_inputs(): origin=dict(argstr='-origin %s %s %s', ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['input_file'], @@ -243,7 +245,7 @@ def test_Resample_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Resample_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Reshape.py b/nipype/interfaces/minc/tests/test_auto_Reshape.py index 45e6ddeb4a..669425da95 100644 --- a/nipype/interfaces/minc/tests/test_auto_Reshape.py +++ b/nipype/interfaces/minc/tests/test_auto_Reshape.py @@ -21,6 +21,7 @@ def test_Reshape_inputs(): ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['input_file'], @@ -36,7 +37,7 @@ def test_Reshape_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Reshape_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = Reshape.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_ToEcat.py b/nipype/interfaces/minc/tests/test_auto_ToEcat.py index 26a3ac0436..b755263566 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToEcat.py +++ b/nipype/interfaces/minc/tests/test_auto_ToEcat.py @@ -21,12 +21,14 @@ def test_ToEcat_inputs(): ignore_study_variable=dict(argstr='-ignore_study_variable', ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), no_decay_corr_fctr=dict(argstr='-no_decay_corr_fctr', ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, keep_extension=False, @@ -42,7 +44,7 @@ def test_ToEcat_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ToEcat_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = ToEcat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_ToRaw.py b/nipype/interfaces/minc/tests/test_auto_ToRaw.py index e010da322d..aeda687c97 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToRaw.py +++ b/nipype/interfaces/minc/tests/test_auto_ToRaw.py @@ -12,6 +12,7 @@ def test_ToRaw_inputs(): ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -29,6 +30,7 @@ def test_ToRaw_inputs(): position=-1, ), output_file=dict( + extensions=None, hash_files=False, keep_extension=False, name_source=['input_file'], @@ -81,7 +83,7 @@ def test_ToRaw_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ToRaw_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = ToRaw.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_VolSymm.py b/nipype/interfaces/minc/tests/test_auto_VolSymm.py index 048ffcde9b..b710d59543 100644 --- a/nipype/interfaces/minc/tests/test_auto_VolSymm.py +++ b/nipype/interfaces/minc/tests/test_auto_VolSymm.py @@ -10,7 +10,10 @@ def test_VolSymm_inputs(): argstr='-clobber', usedefault=True, ), - config_file=dict(argstr='-config_file %s', ), + config_file=dict( + argstr='-config_file %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -26,6 +29,7 @@ def test_VolSymm_inputs(): nofit=dict(argstr='-nofit', ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['input_file'], @@ -53,9 +57,9 @@ def test_VolSymm_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_VolSymm_outputs(): output_map = dict( - output_file=dict(), - output_grid=dict(), - trans_file=dict(), + output_file=dict(extensions=None, ), + output_grid=dict(extensions=None, ), + trans_file=dict(extensions=None, ), ) outputs = VolSymm.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Volcentre.py b/nipype/interfaces/minc/tests/test_auto_Volcentre.py index b095751a4d..0e54c67583 100644 --- a/nipype/interfaces/minc/tests/test_auto_Volcentre.py +++ b/nipype/interfaces/minc/tests/test_auto_Volcentre.py @@ -18,11 +18,13 @@ def test_Volcentre_inputs(): ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['input_file'], @@ -38,7 +40,7 @@ def test_Volcentre_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Volcentre_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = Volcentre.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Voliso.py b/nipype/interfaces/minc/tests/test_auto_Voliso.py index 967642a328..33923046d0 100644 --- a/nipype/interfaces/minc/tests/test_auto_Voliso.py +++ b/nipype/interfaces/minc/tests/test_auto_Voliso.py @@ -17,6 +17,7 @@ def test_Voliso_inputs(): ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -24,6 +25,7 @@ def test_Voliso_inputs(): minstep=dict(argstr='--minstep %s', ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['input_file'], @@ -38,7 +40,7 @@ def test_Voliso_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Voliso_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = Voliso.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Volpad.py b/nipype/interfaces/minc/tests/test_auto_Volpad.py index 865bc79e69..c5fe679935 100644 --- a/nipype/interfaces/minc/tests/test_auto_Volpad.py +++ b/nipype/interfaces/minc/tests/test_auto_Volpad.py @@ -19,11 +19,13 @@ def test_Volpad_inputs(): ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['input_file'], @@ -40,7 +42,7 @@ def test_Volpad_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Volpad_outputs(): - output_map = dict(output_file=dict(), ) + output_map = dict(output_file=dict(extensions=None, ), ) outputs = Volpad.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_XfmAvg.py b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py index 6d036a0c0e..283ef63e40 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmAvg.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py @@ -27,6 +27,7 @@ def test_XfmAvg_inputs(): input_grid_files=dict(), output_file=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -39,8 +40,8 @@ def test_XfmAvg_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_XfmAvg_outputs(): output_map = dict( - output_file=dict(), - output_grid=dict(), + output_file=dict(extensions=None, ), + output_grid=dict(extensions=None, ), ) outputs = XfmAvg.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_XfmConcat.py b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py index eb748953ef..4e42b4ed45 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmConcat.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py @@ -23,6 +23,7 @@ def test_XfmConcat_inputs(): input_grid_files=dict(), output_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, name_source=['input_files'], @@ -38,7 +39,7 @@ def test_XfmConcat_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_XfmConcat_outputs(): output_map = dict( - output_file=dict(), + output_file=dict(extensions=None, ), output_grids=dict(), ) outputs = XfmConcat.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py index d729e90639..9e242300da 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py @@ -21,6 +21,7 @@ def test_XfmInvert_inputs(): ), output_file=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -33,8 +34,8 @@ def test_XfmInvert_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_XfmInvert_outputs(): output_map = dict( - output_file=dict(), - output_grid=dict(), + output_file=dict(extensions=None, ), + output_grid=dict(extensions=None, ), ) outputs = XfmInvert.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py index 16605acbb7..4bc44eaeb5 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py @@ -11,18 +11,36 @@ def test_JistBrainMgdmSegmentation_inputs(): usedefault=True, ), inAdjust=dict(argstr='--inAdjust %s', ), - inAtlas=dict(argstr='--inAtlas %s', ), + inAtlas=dict( + argstr='--inAtlas %s', + extensions=None, + ), inCompute=dict(argstr='--inCompute %s', ), inCurvature=dict(argstr='--inCurvature %f', ), inData=dict(argstr='--inData %f', ), - inFLAIR=dict(argstr='--inFLAIR %s', ), - inMP2RAGE=dict(argstr='--inMP2RAGE %s', ), - inMP2RAGE2=dict(argstr='--inMP2RAGE2 %s', ), - inMPRAGE=dict(argstr='--inMPRAGE %s', ), + inFLAIR=dict( + argstr='--inFLAIR %s', + extensions=None, + ), + inMP2RAGE=dict( + argstr='--inMP2RAGE %s', + extensions=None, + ), + inMP2RAGE2=dict( + argstr='--inMP2RAGE2 %s', + extensions=None, + ), + inMPRAGE=dict( + argstr='--inMPRAGE %s', + extensions=None, + ), inMax=dict(argstr='--inMax %d', ), inMin=dict(argstr='--inMin %f', ), inOutput=dict(argstr='--inOutput %s', ), - inPV=dict(argstr='--inPV %s', ), + inPV=dict( + argstr='--inPV %s', + extensions=None, + ), inPosterior=dict(argstr='--inPosterior %f', ), inSteps=dict(argstr='--inSteps %d', ), inTopology=dict(argstr='--inTopology %s', ), @@ -57,10 +75,10 @@ def test_JistBrainMgdmSegmentation_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_JistBrainMgdmSegmentation_outputs(): output_map = dict( - outLevelset=dict(), - outPosterior2=dict(), - outPosterior3=dict(), - outSegmented=dict(), + outLevelset=dict(extensions=None, ), + outPosterior2=dict(extensions=None, ), + outPosterior3=dict(extensions=None, ), + outSegmented=dict(extensions=None, ), ) outputs = JistBrainMgdmSegmentation.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py index ebb65bb789..9ecec6f789 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py @@ -11,8 +11,14 @@ def test_JistBrainMp2rageDuraEstimation_inputs(): usedefault=True, ), inDistance=dict(argstr='--inDistance %f', ), - inSecond=dict(argstr='--inSecond %s', ), - inSkull=dict(argstr='--inSkull %s', ), + inSecond=dict( + argstr='--inSecond %s', + extensions=None, + ), + inSkull=dict( + argstr='--inSkull %s', + extensions=None, + ), inoutput=dict(argstr='--inoutput %s', ), null=dict(argstr='--null %s', ), outDura=dict( @@ -32,7 +38,7 @@ def test_JistBrainMp2rageDuraEstimation_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_JistBrainMp2rageDuraEstimation_outputs(): - output_map = dict(outDura=dict(), ) + output_map = dict(outDura=dict(extensions=None, ), ) outputs = JistBrainMp2rageDuraEstimation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py index abf794b662..73d02f8c97 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py @@ -10,11 +10,23 @@ def test_JistBrainMp2rageSkullStripping_inputs(): nohash=True, usedefault=True, ), - inFilter=dict(argstr='--inFilter %s', ), - inSecond=dict(argstr='--inSecond %s', ), + inFilter=dict( + argstr='--inFilter %s', + extensions=None, + ), + inSecond=dict( + argstr='--inSecond %s', + extensions=None, + ), inSkip=dict(argstr='--inSkip %s', ), - inT1=dict(argstr='--inT1 %s', ), - inT1weighted=dict(argstr='--inT1weighted %s', ), + inT1=dict( + argstr='--inT1 %s', + extensions=None, + ), + inT1weighted=dict( + argstr='--inT1weighted %s', + extensions=None, + ), null=dict(argstr='--null %s', ), outBrain=dict( argstr='--outBrain %s', @@ -46,10 +58,10 @@ def test_JistBrainMp2rageSkullStripping_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_JistBrainMp2rageSkullStripping_outputs(): output_map = dict( - outBrain=dict(), - outMasked=dict(), - outMasked2=dict(), - outMasked3=dict(), + outBrain=dict(extensions=None, ), + outMasked=dict(extensions=None, ), + outMasked2=dict(extensions=None, ), + outMasked3=dict(extensions=None, ), ) outputs = JistBrainMp2rageSkullStripping.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py index bb86144c20..0ab4cea836 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py @@ -10,7 +10,10 @@ def test_JistBrainPartialVolumeFilter_inputs(): nohash=True, usedefault=True, ), - inInput=dict(argstr='--inInput %s', ), + inInput=dict( + argstr='--inInput %s', + extensions=None, + ), inPV=dict(argstr='--inPV %s', ), inoutput=dict(argstr='--inoutput %s', ), null=dict(argstr='--null %s', ), @@ -31,7 +34,7 @@ def test_JistBrainPartialVolumeFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_JistBrainPartialVolumeFilter_outputs(): - output_map = dict(outPartial=dict(), ) + output_map = dict(outPartial=dict(extensions=None, ), ) outputs = JistBrainPartialVolumeFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py index 407b9755ca..5c3a8b1cba 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py @@ -10,7 +10,10 @@ def test_JistCortexSurfaceMeshInflation_inputs(): nohash=True, usedefault=True, ), - inLevelset=dict(argstr='--inLevelset %s', ), + inLevelset=dict( + argstr='--inLevelset %s', + extensions=None, + ), inLorentzian=dict(argstr='--inLorentzian %s', ), inMax=dict(argstr='--inMax %d', ), inMean=dict(argstr='--inMean %f', ), @@ -40,8 +43,8 @@ def test_JistCortexSurfaceMeshInflation_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_JistCortexSurfaceMeshInflation_outputs(): output_map = dict( - outInflated=dict(), - outOriginal=dict(), + outInflated=dict(extensions=None, ), + outOriginal=dict(extensions=None, ), ) outputs = JistCortexSurfaceMeshInflation.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py index bfdace4944..9b02ce9346 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py @@ -12,10 +12,19 @@ def test_JistIntensityMp2rageMasking_inputs(): ), inBackground=dict(argstr='--inBackground %s', ), inMasking=dict(argstr='--inMasking %s', ), - inQuantitative=dict(argstr='--inQuantitative %s', ), - inSecond=dict(argstr='--inSecond %s', ), + inQuantitative=dict( + argstr='--inQuantitative %s', + extensions=None, + ), + inSecond=dict( + argstr='--inSecond %s', + extensions=None, + ), inSkip=dict(argstr='--inSkip %s', ), - inT1weighted=dict(argstr='--inT1weighted %s', ), + inT1weighted=dict( + argstr='--inT1weighted %s', + extensions=None, + ), null=dict(argstr='--null %s', ), outMasked=dict( argstr='--outMasked_T1_Map %s', @@ -47,10 +56,10 @@ def test_JistIntensityMp2rageMasking_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_JistIntensityMp2rageMasking_outputs(): output_map = dict( - outMasked=dict(), - outMasked2=dict(), - outSignal=dict(), - outSignal2=dict(), + outMasked=dict(extensions=None, ), + outMasked2=dict(extensions=None, ), + outSignal=dict(extensions=None, ), + outSignal2=dict(extensions=None, ), ) outputs = JistIntensityMp2rageMasking.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py index 12203a1aa6..e8740dca7f 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py @@ -10,8 +10,14 @@ def test_JistLaminarProfileCalculator_inputs(): nohash=True, usedefault=True, ), - inIntensity=dict(argstr='--inIntensity %s', ), - inMask=dict(argstr='--inMask %s', ), + inIntensity=dict( + argstr='--inIntensity %s', + extensions=None, + ), + inMask=dict( + argstr='--inMask %s', + extensions=None, + ), incomputed=dict(argstr='--incomputed %s', ), null=dict(argstr='--null %s', ), outResult=dict( @@ -31,7 +37,7 @@ def test_JistLaminarProfileCalculator_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_JistLaminarProfileCalculator_outputs(): - output_map = dict(outResult=dict(), ) + output_map = dict(outResult=dict(extensions=None, ), ) outputs = JistLaminarProfileCalculator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py index ddc4d5d922..fd4e496fbf 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py @@ -10,7 +10,10 @@ def test_JistLaminarProfileGeometry_inputs(): nohash=True, usedefault=True, ), - inProfile=dict(argstr='--inProfile %s', ), + inProfile=dict( + argstr='--inProfile %s', + extensions=None, + ), incomputed=dict(argstr='--incomputed %s', ), inoutside=dict(argstr='--inoutside %f', ), inregularization=dict(argstr='--inregularization %s', ), @@ -33,7 +36,7 @@ def test_JistLaminarProfileGeometry_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_JistLaminarProfileGeometry_outputs(): - output_map = dict(outResult=dict(), ) + output_map = dict(outResult=dict(extensions=None, ), ) outputs = JistLaminarProfileGeometry.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py index 58de472b85..37f4a26212 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py @@ -10,9 +10,18 @@ def test_JistLaminarProfileSampling_inputs(): nohash=True, usedefault=True, ), - inCortex=dict(argstr='--inCortex %s', ), - inIntensity=dict(argstr='--inIntensity %s', ), - inProfile=dict(argstr='--inProfile %s', ), + inCortex=dict( + argstr='--inCortex %s', + extensions=None, + ), + inIntensity=dict( + argstr='--inIntensity %s', + extensions=None, + ), + inProfile=dict( + argstr='--inProfile %s', + extensions=None, + ), null=dict(argstr='--null %s', ), outProfile2=dict( argstr='--outProfile2 %s', @@ -36,8 +45,8 @@ def test_JistLaminarProfileSampling_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_JistLaminarProfileSampling_outputs(): output_map = dict( - outProfile2=dict(), - outProfilemapped=dict(), + outProfile2=dict(extensions=None, ), + outProfilemapped=dict(extensions=None, ), ) outputs = JistLaminarProfileSampling.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py index bb9577ccee..9ffe1ddb52 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py @@ -10,9 +10,18 @@ def test_JistLaminarROIAveraging_inputs(): nohash=True, usedefault=True, ), - inIntensity=dict(argstr='--inIntensity %s', ), - inMask=dict(argstr='--inMask %s', ), - inROI=dict(argstr='--inROI %s', ), + inIntensity=dict( + argstr='--inIntensity %s', + extensions=None, + ), + inMask=dict( + argstr='--inMask %s', + extensions=None, + ), + inROI=dict( + argstr='--inROI %s', + extensions=None, + ), inROI2=dict(argstr='--inROI2 %s', ), null=dict(argstr='--null %s', ), outROI3=dict( @@ -32,7 +41,7 @@ def test_JistLaminarROIAveraging_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_JistLaminarROIAveraging_outputs(): - output_map = dict(outROI3=dict(), ) + output_map = dict(outROI3=dict(extensions=None, ), ) outputs = JistLaminarROIAveraging.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py index 4aa9f9d77b..1fb5c644cc 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py @@ -10,13 +10,19 @@ def test_JistLaminarVolumetricLayering_inputs(): nohash=True, usedefault=True, ), - inInner=dict(argstr='--inInner %s', ), + inInner=dict( + argstr='--inInner %s', + extensions=None, + ), inLayering=dict(argstr='--inLayering %s', ), inLayering2=dict(argstr='--inLayering2 %s', ), inMax=dict(argstr='--inMax %d', ), inMin=dict(argstr='--inMin %f', ), inNumber=dict(argstr='--inNumber %d', ), - inOuter=dict(argstr='--inOuter %s', ), + inOuter=dict( + argstr='--inOuter %s', + extensions=None, + ), inTopology=dict(argstr='--inTopology %s', ), incurvature=dict(argstr='--incurvature %d', ), inpresmooth=dict(argstr='--inpresmooth %s', ), @@ -48,9 +54,9 @@ def test_JistLaminarVolumetricLayering_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_JistLaminarVolumetricLayering_outputs(): output_map = dict( - outContinuous=dict(), - outDiscrete=dict(), - outLayer=dict(), + outContinuous=dict(extensions=None, ), + outDiscrete=dict(extensions=None, ), + outLayer=dict(extensions=None, ), ) outputs = JistLaminarVolumetricLayering.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py index 016beee263..96dcf57050 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py @@ -11,8 +11,14 @@ def test_MedicAlgorithmImageCalculator_inputs(): usedefault=True, ), inOperation=dict(argstr='--inOperation %s', ), - inVolume=dict(argstr='--inVolume %s', ), - inVolume2=dict(argstr='--inVolume2 %s', ), + inVolume=dict( + argstr='--inVolume %s', + extensions=None, + ), + inVolume2=dict( + argstr='--inVolume2 %s', + extensions=None, + ), null=dict(argstr='--null %s', ), outResult=dict( argstr='--outResult %s', @@ -31,7 +37,7 @@ def test_MedicAlgorithmImageCalculator_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MedicAlgorithmImageCalculator_outputs(): - output_map = dict(outResult=dict(), ) + output_map = dict(outResult=dict(extensions=None, ), ) outputs = MedicAlgorithmImageCalculator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py index 87f9ab6a72..c9191fb0e7 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py @@ -11,14 +11,26 @@ def test_MedicAlgorithmLesionToads_inputs(): usedefault=True, ), inAtlas=dict(argstr='--inAtlas %s', ), - inAtlas2=dict(argstr='--inAtlas2 %s', ), - inAtlas3=dict(argstr='--inAtlas3 %s', ), - inAtlas4=dict(argstr='--inAtlas4 %s', ), + inAtlas2=dict( + argstr='--inAtlas2 %s', + extensions=None, + ), + inAtlas3=dict( + argstr='--inAtlas3 %s', + extensions=None, + ), + inAtlas4=dict( + argstr='--inAtlas4 %s', + extensions=None, + ), inAtlas5=dict(argstr='--inAtlas5 %f', ), inAtlas6=dict(argstr='--inAtlas6 %s', ), inConnectivity=dict(argstr='--inConnectivity %s', ), inCorrect=dict(argstr='--inCorrect %s', ), - inFLAIR=dict(argstr='--inFLAIR %s', ), + inFLAIR=dict( + argstr='--inFLAIR %s', + extensions=None, + ), inInclude=dict(argstr='--inInclude %s', ), inMaximum=dict(argstr='--inMaximum %d', ), inMaximum2=dict(argstr='--inMaximum2 %d', ), @@ -29,8 +41,14 @@ def test_MedicAlgorithmLesionToads_inputs(): inOutput2=dict(argstr='--inOutput2 %s', ), inOutput3=dict(argstr='--inOutput3 %s', ), inSmooting=dict(argstr='--inSmooting %f', ), - inT1_MPRAGE=dict(argstr='--inT1_MPRAGE %s', ), - inT1_SPGR=dict(argstr='--inT1_SPGR %s', ), + inT1_MPRAGE=dict( + argstr='--inT1_MPRAGE %s', + extensions=None, + ), + inT1_SPGR=dict( + argstr='--inT1_SPGR %s', + extensions=None, + ), null=dict(argstr='--null %s', ), outCortical=dict( argstr='--outCortical %s', @@ -82,15 +100,15 @@ def test_MedicAlgorithmLesionToads_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_MedicAlgorithmLesionToads_outputs(): output_map = dict( - outCortical=dict(), - outFilled=dict(), - outHard=dict(), - outHard2=dict(), - outInhomogeneity=dict(), - outLesion=dict(), - outMembership=dict(), - outSulcal=dict(), - outWM=dict(), + outCortical=dict(extensions=None, ), + outFilled=dict(extensions=None, ), + outHard=dict(extensions=None, ), + outHard2=dict(extensions=None, ), + outInhomogeneity=dict(extensions=None, ), + outLesion=dict(extensions=None, ), + outMembership=dict(extensions=None, ), + outSulcal=dict(extensions=None, ), + outWM=dict(extensions=None, ), ) outputs = MedicAlgorithmLesionToads.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py index 7c8c7248ad..4abe453f31 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py @@ -17,7 +17,10 @@ def test_MedicAlgorithmMipavReorient_inputs(): argstr='--inSource %s', sep=';', ), - inTemplate=dict(argstr='--inTemplate %s', ), + inTemplate=dict( + argstr='--inTemplate %s', + extensions=None, + ), inUser=dict(argstr='--inUser %s', ), inUser2=dict(argstr='--inUser2 %s', ), inUser3=dict(argstr='--inUser3 %s', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py index 9d5a148a24..f06f6f41d8 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py @@ -13,7 +13,10 @@ def test_MedicAlgorithmN3_inputs(): inAutomatic=dict(argstr='--inAutomatic %s', ), inEnd=dict(argstr='--inEnd %f', ), inField=dict(argstr='--inField %f', ), - inInput=dict(argstr='--inInput %s', ), + inInput=dict( + argstr='--inInput %s', + extensions=None, + ), inKernel=dict(argstr='--inKernel %f', ), inMaximum=dict(argstr='--inMaximum %d', ), inSignal=dict(argstr='--inSignal %f', ), @@ -42,8 +45,8 @@ def test_MedicAlgorithmN3_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_MedicAlgorithmN3_outputs(): output_map = dict( - outInhomogeneity=dict(), - outInhomogeneity2=dict(), + outInhomogeneity=dict(extensions=None, ), + outInhomogeneity2=dict(extensions=None, ), ) outputs = MedicAlgorithmN3.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py index b2d247e9dc..a3cfba10af 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py @@ -11,7 +11,10 @@ def test_MedicAlgorithmSPECTRE2010_inputs(): usedefault=True, ), inApply=dict(argstr='--inApply %s', ), - inAtlas=dict(argstr='--inAtlas %s', ), + inAtlas=dict( + argstr='--inAtlas %s', + extensions=None, + ), inBackground=dict(argstr='--inBackground %f', ), inCoarse=dict(argstr='--inCoarse %f', ), inCost=dict(argstr='--inCost %s', ), @@ -22,7 +25,10 @@ def test_MedicAlgorithmSPECTRE2010_inputs(): inInhomogeneity=dict(argstr='--inInhomogeneity %s', ), inInitial=dict(argstr='--inInitial %d', ), inInitial2=dict(argstr='--inInitial2 %f', ), - inInput=dict(argstr='--inInput %s', ), + inInput=dict( + argstr='--inInput %s', + extensions=None, + ), inMMC=dict(argstr='--inMMC %d', ), inMMC2=dict(argstr='--inMMC2 %d', ), inMaximum=dict(argstr='--inMaximum %f', ), @@ -95,15 +101,15 @@ def test_MedicAlgorithmSPECTRE2010_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_MedicAlgorithmSPECTRE2010_outputs(): output_map = dict( - outFANTASM=dict(), - outMask=dict(), - outMidsagittal=dict(), - outOriginal=dict(), - outPrior=dict(), - outSegmentation=dict(), - outSplitHalves=dict(), - outStripped=dict(), - outd0=dict(), + outFANTASM=dict(extensions=None, ), + outMask=dict(extensions=None, ), + outMidsagittal=dict(extensions=None, ), + outOriginal=dict(extensions=None, ), + outPrior=dict(extensions=None, ), + outSegmentation=dict(extensions=None, ), + outSplitHalves=dict(extensions=None, ), + outStripped=dict(extensions=None, ), + outd0=dict(extensions=None, ), ) outputs = MedicAlgorithmSPECTRE2010.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_RandomVol.py b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py index d3d92142c4..e97aa87285 100644 --- a/nipype/interfaces/mipav/tests/test_auto_RandomVol.py +++ b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py @@ -37,7 +37,7 @@ def test_RandomVol_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RandomVol_outputs(): - output_map = dict(outRand1=dict(), ) + output_map = dict(outRand1=dict(extensions=None, ), ) outputs = RandomVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py index 4ced8de75a..0fe9f88276 100644 --- a/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py +++ b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py @@ -35,19 +35,32 @@ def test_WatershedBEM_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_WatershedBEM_outputs(): output_map = dict( - brain_surface=dict(loc='bem/watershed', ), + brain_surface=dict( + extensions=None, + loc='bem/watershed', + ), cor_files=dict( altkey='COR', loc='bem/watershed/ws', ), fif_file=dict( altkey='fif', + extensions=None, loc='bem', ), - inner_skull_surface=dict(loc='bem/watershed', ), + inner_skull_surface=dict( + extensions=None, + loc='bem/watershed', + ), mesh_files=dict(), - outer_skin_surface=dict(loc='bem/watershed', ), - outer_skull_surface=dict(loc='bem/watershed', ), + outer_skin_surface=dict( + extensions=None, + loc='bem/watershed', + ), + outer_skull_surface=dict( + extensions=None, + loc='bem/watershed', + ), ) outputs = WatershedBEM.output_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py index 78a275f6a7..3b5dd2f5c6 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py @@ -9,10 +9,12 @@ def test_ConstrainedSphericalDeconvolution_inputs(): debug=dict(argstr='-debug', ), directions_file=dict( argstr='-directions %s', + extensions=None, position=-2, ), encoding_file=dict( argstr='-grad %s', + extensions=None, position=1, ), environ=dict( @@ -21,10 +23,12 @@ def test_ConstrainedSphericalDeconvolution_inputs(): ), filter_file=dict( argstr='-filter %s', + extensions=None, position=-2, ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -32,6 +36,7 @@ def test_ConstrainedSphericalDeconvolution_inputs(): lambda_value=dict(argstr='-lambda %s', ), mask_image=dict( argstr='-mask %s', + extensions=None, position=2, ), maximum_harmonic_order=dict(argstr='-lmax %s', ), @@ -41,11 +46,13 @@ def test_ConstrainedSphericalDeconvolution_inputs(): ), out_filename=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), response_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -57,7 +64,7 @@ def test_ConstrainedSphericalDeconvolution_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ConstrainedSphericalDeconvolution_outputs(): - output_map = dict(spherical_harmonics_image=dict(), ) + output_map = dict(spherical_harmonics_image=dict(extensions=None, ), ) outputs = ConstrainedSphericalDeconvolution.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py index dc95cff525..6911f153ed 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py @@ -8,6 +8,7 @@ def test_DWI2SphericalHarmonicsImage_inputs(): args=dict(argstr='%s', ), encoding_file=dict( argstr='-grad %s', + extensions=None, mandatory=True, position=1, ), @@ -17,6 +18,7 @@ def test_DWI2SphericalHarmonicsImage_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -27,6 +29,7 @@ def test_DWI2SphericalHarmonicsImage_inputs(): ), out_filename=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -37,7 +40,7 @@ def test_DWI2SphericalHarmonicsImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWI2SphericalHarmonicsImage_outputs(): - output_map = dict(spherical_harmonics_image=dict(), ) + output_map = dict(spherical_harmonics_image=dict(extensions=None, ), ) outputs = DWI2SphericalHarmonicsImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py index 86975950bf..0844fb58f2 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py @@ -12,6 +12,7 @@ def test_DWI2Tensor_inputs(): ), encoding_file=dict( argstr='-grad %s', + extensions=None, position=2, ), environ=dict( @@ -35,6 +36,7 @@ def test_DWI2Tensor_inputs(): ), out_filename=dict( argstr='%s', + extensions=None, name_source='in_file', name_template='%s_tensor.mif', output_name='tensor', @@ -51,7 +53,7 @@ def test_DWI2Tensor_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWI2Tensor_outputs(): - output_map = dict(tensor=dict(), ) + output_map = dict(tensor=dict(extensions=None, ), ) outputs = DWI2Tensor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py index 2c6417fe3a..4a411a85b2 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py @@ -18,6 +18,7 @@ def test_DiffusionTensorStreamlineTrack_inputs(): ), exclude_file=dict( argstr='-exclude %s', + extensions=None, xor=['exclude_file', 'exclude_spec'], ), exclude_spec=dict( @@ -29,16 +30,19 @@ def test_DiffusionTensorStreamlineTrack_inputs(): ), gradient_encoding_file=dict( argstr='-grad %s', + extensions=None, mandatory=True, position=-2, ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), include_file=dict( argstr='-include %s', + extensions=None, xor=['include_file', 'include_spec'], ), include_spec=dict( @@ -63,6 +67,7 @@ def test_DiffusionTensorStreamlineTrack_inputs(): ), mask_file=dict( argstr='-mask %s', + extensions=None, xor=['mask_file', 'mask_spec'], ), mask_spec=dict( @@ -88,6 +93,7 @@ def test_DiffusionTensorStreamlineTrack_inputs(): no_mask_interpolation=dict(argstr='-nomaskinterp', ), out_file=dict( argstr='%s', + extensions=None, name_source=['in_file'], name_template='%s_tracked.tck', output_name='tracked', @@ -95,6 +101,7 @@ def test_DiffusionTensorStreamlineTrack_inputs(): ), seed_file=dict( argstr='-seed %s', + extensions=None, xor=['seed_file', 'seed_spec'], ), seed_spec=dict( @@ -117,7 +124,7 @@ def test_DiffusionTensorStreamlineTrack_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DiffusionTensorStreamlineTrack_outputs(): - output_map = dict(tracked=dict(), ) + output_map = dict(tracked=dict(extensions=None, ), ) outputs = DiffusionTensorStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py index b5474bd18c..a674b2be88 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py @@ -14,12 +14,14 @@ def test_Directions2Amplitude_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), num_peaks=dict(argstr='-num %s', ), out_file=dict( argstr='%s', + extensions=None, hash_files=False, keep_extension=False, name_source=['in_file'], @@ -30,7 +32,10 @@ def test_Directions2Amplitude_inputs(): argstr='-direction %s', sep=' ', ), - peaks_image=dict(argstr='-peaks %s', ), + peaks_image=dict( + argstr='-peaks %s', + extensions=None, + ), quiet_display=dict(argstr='-quiet', ), ) inputs = Directions2Amplitude.input_spec() @@ -39,7 +44,7 @@ def test_Directions2Amplitude_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Directions2Amplitude_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Directions2Amplitude.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Erode.py b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py index 6215593ab0..f723a002c1 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Erode.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py @@ -20,12 +20,14 @@ def test_Erode_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), number_of_passes=dict(argstr='-npass %s', ), out_filename=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -40,7 +42,7 @@ def test_Erode_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Erode_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Erode.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py index 983433104e..25f9718501 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py @@ -9,6 +9,7 @@ def test_EstimateResponseForSH_inputs(): debug=dict(argstr='-debug', ), encoding_file=dict( argstr='-grad %s', + extensions=None, mandatory=True, position=1, ), @@ -18,11 +19,13 @@ def test_EstimateResponseForSH_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), mask_image=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -30,6 +33,7 @@ def test_EstimateResponseForSH_inputs(): normalise=dict(argstr='-normalise', ), out_filename=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -41,7 +45,7 @@ def test_EstimateResponseForSH_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_EstimateResponseForSH_outputs(): - output_map = dict(response=dict(), ) + output_map = dict(response=dict(extensions=None, ), ) outputs = EstimateResponseForSH.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py b/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py index 6261b6b0a5..96ce4a84f4 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py @@ -5,12 +5,21 @@ def test_FSL2MRTrix_inputs(): input_map = dict( - bval_file=dict(mandatory=True, ), - bvec_file=dict(mandatory=True, ), + bval_file=dict( + extensions=None, + mandatory=True, + ), + bvec_file=dict( + extensions=None, + mandatory=True, + ), invert_x=dict(usedefault=True, ), invert_y=dict(usedefault=True, ), invert_z=dict(usedefault=True, ), - out_encoding_file=dict(genfile=True, ), + out_encoding_file=dict( + extensions=None, + genfile=True, + ), ) inputs = FSL2MRTrix.input_spec() @@ -18,7 +27,7 @@ def test_FSL2MRTrix_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FSL2MRTrix_outputs(): - output_map = dict(encoding_file=dict(), ) + output_map = dict(encoding_file=dict(extensions=None, ), ) outputs = FSL2MRTrix.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py index 6e0e670d06..182e8f79de 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py @@ -16,6 +16,7 @@ def test_FilterTracks_inputs(): ), exclude_file=dict( argstr='-exclude %s', + extensions=None, xor=['exclude_file', 'exclude_spec'], ), exclude_spec=dict( @@ -27,11 +28,13 @@ def test_FilterTracks_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), include_file=dict( argstr='-include %s', + extensions=None, xor=['include_file', 'include_spec'], ), include_spec=dict( @@ -49,6 +52,7 @@ def test_FilterTracks_inputs(): no_mask_interpolation=dict(argstr='-nomaskinterp', ), out_file=dict( argstr='%s', + extensions=None, hash_files=False, name_source=['in_file'], name_template='%s_filt', @@ -65,7 +69,7 @@ def test_FilterTracks_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FilterTracks_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = FilterTracks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py index 5d9f51739c..7bd9657d66 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py @@ -8,6 +8,7 @@ def test_FindShPeaks_inputs(): args=dict(argstr='%s', ), directions_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -19,12 +20,14 @@ def test_FindShPeaks_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), num_peaks=dict(argstr='-num %s', ), out_file=dict( argstr='%s', + extensions=None, hash_files=False, keep_extension=False, name_source=['in_file'], @@ -36,7 +39,10 @@ def test_FindShPeaks_inputs(): sep=' ', ), peak_threshold=dict(argstr='-threshold %s', ), - peaks_image=dict(argstr='-peaks %s', ), + peaks_image=dict( + argstr='-peaks %s', + extensions=None, + ), quiet_display=dict(argstr='-quiet', ), ) inputs = FindShPeaks.input_spec() @@ -45,7 +51,7 @@ def test_FindShPeaks_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FindShPeaks_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = FindShPeaks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py index 2cfd89bb6f..7449a7fcaa 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py @@ -20,6 +20,7 @@ def test_GenerateDirections_inputs(): ), out_file=dict( argstr='%s', + extensions=None, hash_files=False, name_source=['num_dirs'], name_template='directions_%d.txt', @@ -34,7 +35,7 @@ def test_GenerateDirections_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GenerateDirections_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = GenerateDirections.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py index 6a0305f9e2..98fa1d6e54 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py @@ -8,11 +8,13 @@ def test_GenerateWhiteMatterMask_inputs(): args=dict(argstr='%s', ), binary_mask=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), encoding_file=dict( argstr='-grad %s', + extensions=None, mandatory=True, position=1, ), @@ -22,12 +24,14 @@ def test_GenerateWhiteMatterMask_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), noise_level_margin=dict(argstr='-margin %s', ), out_WMProb_filename=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -38,7 +42,7 @@ def test_GenerateWhiteMatterMask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GenerateWhiteMatterMask_outputs(): - output_map = dict(WMprobabilitymap=dict(), ) + output_map = dict(WMprobabilitymap=dict(extensions=None, ), ) outputs = GenerateWhiteMatterMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py index 5aef0bfd13..2dec516f43 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py @@ -25,6 +25,7 @@ def test_MRConvert_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -39,6 +40,7 @@ def test_MRConvert_inputs(): ), out_filename=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -71,7 +73,7 @@ def test_MRConvert_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRConvert_outputs(): - output_map = dict(converted=dict(), ) + output_map = dict(converted=dict(extensions=None, ), ) outputs = MRConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py index a5a864289a..c8446664bf 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py @@ -21,6 +21,7 @@ def test_MRMultiply_inputs(): ), out_filename=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -35,7 +36,7 @@ def test_MRMultiply_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRMultiply_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MRMultiply.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py index d6a3db0a1d..31b74cd09d 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py @@ -29,6 +29,7 @@ def test_MRTransform_inputs(): ), out_filename=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -38,6 +39,7 @@ def test_MRTransform_inputs(): ), reference_image=dict( argstr='-reference %s', + extensions=None, position=1, ), replace_transform=dict( @@ -46,10 +48,12 @@ def test_MRTransform_inputs(): ), template_image=dict( argstr='-template %s', + extensions=None, position=1, ), transformation_file=dict( argstr='-transform %s', + extensions=None, position=1, ), ) @@ -59,7 +63,7 @@ def test_MRTransform_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRTransform_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MRTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py index b6fefac9a0..9e663ee818 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py @@ -5,14 +5,18 @@ def test_MRTrix2TrackVis_inputs(): input_map = dict( - image_file=dict(), - in_file=dict(mandatory=True, ), - matrix_file=dict(), + image_file=dict(extensions=None, ), + in_file=dict( + extensions=None, + mandatory=True, + ), + matrix_file=dict(extensions=None, ), out_filename=dict( + extensions=None, genfile=True, usedefault=True, ), - registration_image_file=dict(), + registration_image_file=dict(extensions=None, ), ) inputs = MRTrix2TrackVis.input_spec() @@ -20,7 +24,7 @@ def test_MRTrix2TrackVis_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRTrix2TrackVis_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MRTrix2TrackVis.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py index 0022de4d7a..05107b81bd 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py @@ -12,6 +12,7 @@ def test_MRTrixInfo_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py index 0e7daabcaa..b4c6c2af91 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py @@ -16,11 +16,13 @@ def test_MedianFilter3D_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), out_filename=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -35,7 +37,7 @@ def test_MedianFilter3D_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MedianFilter3D_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MedianFilter3D.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py index 6d75b1b9a2..29f0a38e6d 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py @@ -18,6 +18,7 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): ), exclude_file=dict( argstr='-exclude %s', + extensions=None, xor=['exclude_file', 'exclude_spec'], ), exclude_spec=dict( @@ -29,11 +30,13 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), include_file=dict( argstr='-include %s', + extensions=None, xor=['include_file', 'include_spec'], ), include_spec=dict( @@ -58,6 +61,7 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): ), mask_file=dict( argstr='-mask %s', + extensions=None, xor=['mask_file', 'mask_spec'], ), mask_spec=dict( @@ -84,6 +88,7 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): no_mask_interpolation=dict(argstr='-nomaskinterp', ), out_file=dict( argstr='%s', + extensions=None, name_source=['in_file'], name_template='%s_tracked.tck', output_name='tracked', @@ -91,6 +96,7 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): ), seed_file=dict( argstr='-seed %s', + extensions=None, xor=['seed_file', 'seed_spec'], ), seed_spec=dict( @@ -113,7 +119,7 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_outputs(): - output_map = dict(tracked=dict(), ) + output_map = dict(tracked=dict(extensions=None, ), ) outputs = ProbabilisticSphericallyDeconvolutedStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py index 9bed95276d..f54aec96a7 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py @@ -18,6 +18,7 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): ), exclude_file=dict( argstr='-exclude %s', + extensions=None, xor=['exclude_file', 'exclude_spec'], ), exclude_spec=dict( @@ -29,11 +30,13 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), include_file=dict( argstr='-include %s', + extensions=None, xor=['include_file', 'include_spec'], ), include_spec=dict( @@ -58,6 +61,7 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): ), mask_file=dict( argstr='-mask %s', + extensions=None, xor=['mask_file', 'mask_spec'], ), mask_spec=dict( @@ -83,6 +87,7 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): no_mask_interpolation=dict(argstr='-nomaskinterp', ), out_file=dict( argstr='%s', + extensions=None, name_source=['in_file'], name_template='%s_tracked.tck', output_name='tracked', @@ -90,6 +95,7 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): ), seed_file=dict( argstr='-seed %s', + extensions=None, xor=['seed_file', 'seed_spec'], ), seed_spec=dict( @@ -112,7 +118,7 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SphericallyDeconvolutedStreamlineTrack_outputs(): - output_map = dict(tracked=dict(), ) + output_map = dict(tracked=dict(extensions=None, ), ) outputs = SphericallyDeconvolutedStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py index c898a60eb3..69f1ce65c0 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py @@ -18,6 +18,7 @@ def test_StreamlineTrack_inputs(): ), exclude_file=dict( argstr='-exclude %s', + extensions=None, xor=['exclude_file', 'exclude_spec'], ), exclude_spec=dict( @@ -29,11 +30,13 @@ def test_StreamlineTrack_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), include_file=dict( argstr='-include %s', + extensions=None, xor=['include_file', 'include_spec'], ), include_spec=dict( @@ -58,6 +61,7 @@ def test_StreamlineTrack_inputs(): ), mask_file=dict( argstr='-mask %s', + extensions=None, xor=['mask_file', 'mask_spec'], ), mask_spec=dict( @@ -83,6 +87,7 @@ def test_StreamlineTrack_inputs(): no_mask_interpolation=dict(argstr='-nomaskinterp', ), out_file=dict( argstr='%s', + extensions=None, name_source=['in_file'], name_template='%s_tracked.tck', output_name='tracked', @@ -90,6 +95,7 @@ def test_StreamlineTrack_inputs(): ), seed_file=dict( argstr='-seed %s', + extensions=None, xor=['seed_file', 'seed_spec'], ), seed_spec=dict( @@ -112,7 +118,7 @@ def test_StreamlineTrack_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_StreamlineTrack_outputs(): - output_map = dict(tracked=dict(), ) + output_map = dict(tracked=dict(extensions=None, ), ) outputs = StreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py index e9546d7e90..dfa7f862e3 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py @@ -16,11 +16,13 @@ def test_Tensor2ApparentDiffusion_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), out_filename=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -35,7 +37,7 @@ def test_Tensor2ApparentDiffusion_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Tensor2ApparentDiffusion_outputs(): - output_map = dict(ADC=dict(), ) + output_map = dict(ADC=dict(extensions=None, ), ) outputs = Tensor2ApparentDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py index d16a907f62..eafdba5a7c 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py @@ -16,11 +16,13 @@ def test_Tensor2FractionalAnisotropy_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), out_filename=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -35,7 +37,7 @@ def test_Tensor2FractionalAnisotropy_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Tensor2FractionalAnisotropy_outputs(): - output_map = dict(FA=dict(), ) + output_map = dict(FA=dict(extensions=None, ), ) outputs = Tensor2FractionalAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py index c07d0a8db4..b982c46bbd 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py @@ -16,11 +16,13 @@ def test_Tensor2Vector_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), out_filename=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -35,7 +37,7 @@ def test_Tensor2Vector_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Tensor2Vector_outputs(): - output_map = dict(vector=dict(), ) + output_map = dict(vector=dict(extensions=None, ), ) outputs = Tensor2Vector.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py index 39fbf14d2c..f53b8098eb 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py @@ -17,6 +17,7 @@ def test_Threshold_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -26,6 +27,7 @@ def test_Threshold_inputs(): ), out_filename=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -45,7 +47,7 @@ def test_Threshold_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Threshold_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Threshold.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py index f984203ab1..fa9eb184ef 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py @@ -20,11 +20,13 @@ def test_Tracks2Prob_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), out_filename=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), @@ -39,6 +41,7 @@ def test_Tracks2Prob_inputs(): ), template_file=dict( argstr='-template %s', + extensions=None, position=1, ), voxel_dims=dict( @@ -53,7 +56,7 @@ def test_Tracks2Prob_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Tracks2Prob_outputs(): - output_map = dict(tract_image=dict(), ) + output_map = dict(tract_image=dict(extensions=None, ), ) outputs = Tracks2Prob.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py index abb3ba6831..43f588e075 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py @@ -12,11 +12,13 @@ def test_ACTPrepareFSL_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, usedefault=True, @@ -28,7 +30,7 @@ def test_ACTPrepareFSL_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ACTPrepareFSL_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ACTPrepareFSL.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py index c318be0bfd..90c5272ed0 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py @@ -11,12 +11,19 @@ def test_BrainMask_inputs(): nohash=True, usedefault=True, ), - grad_file=dict(argstr='-grad %s', ), + grad_file=dict( + argstr='-grad %s', + extensions=None, + ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - in_bval=dict(), - in_bvec=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(extensions=None, ), + in_bvec=dict( + argstr='-fslgrad %s %s', + extensions=None, + ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -26,6 +33,7 @@ def test_BrainMask_inputs(): ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, usedefault=True, @@ -37,7 +45,7 @@ def test_BrainMask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BrainMask_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = BrainMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py index 95e5797d06..dba96f097e 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py @@ -12,15 +12,23 @@ def test_BuildConnectome_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), in_parc=dict( argstr='%s', + extensions=None, position=-2, ), - in_scalar=dict(argstr='-image %s', ), - in_weights=dict(argstr='-tck_weights_in %s', ), + in_scalar=dict( + argstr='-image %s', + extensions=None, + ), + in_weights=dict( + argstr='-tck_weights_in %s', + extensions=None, + ), keep_unassigned=dict(argstr='-keep_unassigned', ), metric=dict(argstr='-metric %s', ), nthreads=dict( @@ -29,6 +37,7 @@ def test_BuildConnectome_inputs(): ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, usedefault=True, @@ -45,7 +54,7 @@ def test_BuildConnectome_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BuildConnectome_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = BuildConnectome.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py index 38369e8fe5..b4eb5de3a0 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py @@ -8,7 +8,10 @@ def test_ComputeTDI_inputs(): args=dict(argstr='%s', ), contrast=dict(argstr='-constrast %s', ), data_type=dict(argstr='-datatype %s', ), - dixel=dict(argstr='-dixel %s', ), + dixel=dict( + argstr='-dixel %s', + extensions=None, + ), ends_only=dict(argstr='-ends_only', ), environ=dict( nohash=True, @@ -17,10 +20,14 @@ def test_ComputeTDI_inputs(): fwhm_tck=dict(argstr='-fwhm_tck %f', ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), - in_map=dict(argstr='-image %s', ), + in_map=dict( + argstr='-image %s', + extensions=None, + ), map_zero=dict(argstr='-map_zero', ), max_tod=dict(argstr='-tod %d', ), nthreads=dict( @@ -29,14 +36,21 @@ def test_ComputeTDI_inputs(): ), out_file=dict( argstr='%s', + extensions=None, position=-1, usedefault=True, ), precise=dict(argstr='-precise', ), - reference=dict(argstr='-template %s', ), + reference=dict( + argstr='-template %s', + extensions=None, + ), stat_tck=dict(argstr='-stat_tck %s', ), stat_vox=dict(argstr='-stat_vox %s', ), - tck_weights=dict(argstr='-tck_weights_in %s', ), + tck_weights=dict( + argstr='-tck_weights_in %s', + extensions=None, + ), upsample=dict(argstr='-upsample %d', ), use_dec=dict(argstr='-dec', ), vox_size=dict( @@ -50,7 +64,7 @@ def test_ComputeTDI_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ComputeTDI_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ComputeTDI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py index ea4d3f05d8..d70545be88 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py @@ -6,7 +6,10 @@ def test_DWIBiasCorrect_inputs(): input_map = dict( args=dict(argstr='%s', ), - bias=dict(argstr='-bias %s', ), + bias=dict( + argstr='-bias %s', + extensions=None, + ), bval_scale=dict(argstr='-bvalue_scaling %s', ), environ=dict( nohash=True, @@ -14,20 +17,32 @@ def test_DWIBiasCorrect_inputs(): ), fsl_grad=dict( argstr='-fslgrad %s %s', + extensions=None, xor=('mrtrix_grad', 'fsl_grad'), ), - grad_file=dict(argstr='-grad %s', ), + grad_file=dict( + argstr='-grad %s', + extensions=None, + ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - in_bval=dict(), - in_bvec=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(extensions=None, ), + in_bvec=dict( + argstr='-fslgrad %s %s', + extensions=None, + ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), - in_mask=dict(argstr='-mask %s', ), + in_mask=dict( + argstr='-mask %s', + extensions=None, + ), mrtrix_grad=dict( argstr='-grad %s', + extensions=None, xor=('mrtrix_grad', 'fsl_grad'), ), nthreads=dict( @@ -36,6 +51,7 @@ def test_DWIBiasCorrect_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, keep_extension=True, name_source='in_file', @@ -60,8 +76,8 @@ def test_DWIBiasCorrect_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DWIBiasCorrect_outputs(): output_map = dict( - bias=dict(), - out_file=dict(), + bias=dict(extensions=None, ), + out_file=dict(extensions=None, ), ) outputs = DWIBiasCorrect.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py index 7b6930ee82..97acf7f526 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py @@ -12,26 +12,38 @@ def test_DWIDenoise_inputs(): usedefault=True, ), extent=dict(argstr='-extent %d,%d,%d', ), - grad_file=dict(argstr='-grad %s', ), + grad_file=dict( + argstr='-grad %s', + extensions=None, + ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - in_bval=dict(), - in_bvec=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(extensions=None, ), + in_bvec=dict( + argstr='-fslgrad %s %s', + extensions=None, + ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), mask=dict( argstr='-mask %s', + extensions=None, position=1, ), - noise=dict(argstr='-noise %s', ), + noise=dict( + argstr='-noise %s', + extensions=None, + ), nthreads=dict( argstr='-nthreads %d', nohash=True, ), out_file=dict( argstr='%s', + extensions=None, genfile=True, keep_extension=True, name_source='in_file', @@ -46,8 +58,8 @@ def test_DWIDenoise_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DWIDenoise_outputs(): output_map = dict( - noise=dict(), - out_file=dict(), + noise=dict(extensions=None, ), + out_file=dict(extensions=None, ), ) outputs = DWIDenoise.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py index 0114c5efe0..7658201223 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py @@ -12,12 +12,19 @@ def test_DWIExtract_inputs(): nohash=True, usedefault=True, ), - grad_file=dict(argstr='-grad %s', ), + grad_file=dict( + argstr='-grad %s', + extensions=None, + ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - in_bval=dict(), - in_bvec=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(extensions=None, ), + in_bvec=dict( + argstr='-fslgrad %s %s', + extensions=None, + ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -28,6 +35,7 @@ def test_DWIExtract_inputs(): ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), @@ -43,7 +51,7 @@ def test_DWIExtract_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIExtract_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = DWIExtract.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py index a5c152e928..c64efa2ca4 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py @@ -14,11 +14,13 @@ def test_EstimateFOD_inputs(): bval_scale=dict(argstr='-bvalue_scaling %s', ), csf_odf=dict( argstr='%s', + extensions=None, position=-1, usedefault=True, ), csf_txt=dict( argstr='%s', + extensions=None, position=-2, ), environ=dict( @@ -27,24 +29,39 @@ def test_EstimateFOD_inputs(): ), gm_odf=dict( argstr='%s', + extensions=None, position=-3, usedefault=True, ), gm_txt=dict( argstr='%s', + extensions=None, position=-4, ), - grad_file=dict(argstr='-grad %s', ), + grad_file=dict( + argstr='-grad %s', + extensions=None, + ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - in_bval=dict(), - in_bvec=dict(argstr='-fslgrad %s %s', ), - in_dirs=dict(argstr='-directions %s', ), + in_bval=dict(extensions=None, ), + in_bvec=dict( + argstr='-fslgrad %s %s', + extensions=None, + ), + in_dirs=dict( + argstr='-directions %s', + extensions=None, + ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-7, ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), max_sh=dict( argstr='-lmax %d', usedefault=True, @@ -59,12 +76,14 @@ def test_EstimateFOD_inputs(): ), wm_odf=dict( argstr='%s', + extensions=None, mandatory=True, position=-5, usedefault=True, ), wm_txt=dict( argstr='%s', + extensions=None, mandatory=True, position=-6, ), @@ -76,9 +95,18 @@ def test_EstimateFOD_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_EstimateFOD_outputs(): output_map = dict( - csf_odf=dict(argstr='%s', ), - gm_odf=dict(argstr='%s', ), - wm_odf=dict(argstr='%s', ), + csf_odf=dict( + argstr='%s', + extensions=None, + ), + gm_odf=dict( + argstr='%s', + extensions=None, + ), + wm_odf=dict( + argstr='%s', + extensions=None, + ), ) outputs = EstimateFOD.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py index 85c103cc10..84404fdde6 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py @@ -11,16 +11,26 @@ def test_FitTensor_inputs(): nohash=True, usedefault=True, ), - grad_file=dict(argstr='-grad %s', ), + grad_file=dict( + argstr='-grad %s', + extensions=None, + ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - in_bval=dict(), - in_bvec=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(extensions=None, ), + in_bvec=dict( + argstr='-fslgrad %s %s', + extensions=None, + ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), - in_mask=dict(argstr='-mask %s', ), + in_mask=dict( + argstr='-mask %s', + extensions=None, + ), method=dict(argstr='-method %s', ), nthreads=dict( argstr='-nthreads %d', @@ -28,6 +38,7 @@ def test_FitTensor_inputs(): ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, usedefault=True, @@ -43,7 +54,7 @@ def test_FitTensor_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FitTensor_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = FitTensor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py index dcfbe1fc6f..92594da593 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py @@ -16,12 +16,19 @@ def test_Generate5tt_inputs(): nohash=True, usedefault=True, ), - grad_file=dict(argstr='-grad %s', ), + grad_file=dict( + argstr='-grad %s', + extensions=None, + ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - in_bval=dict(), - in_bvec=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(extensions=None, ), + in_bvec=dict( + argstr='-fslgrad %s %s', + extensions=None, + ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -31,6 +38,7 @@ def test_Generate5tt_inputs(): ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), @@ -41,7 +49,7 @@ def test_Generate5tt_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Generate5tt_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Generate5tt.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py index 2de0e6c115..1dc8d6e0b1 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py @@ -12,28 +12,46 @@ def test_LabelConfig_inputs(): ), in_config=dict( argstr='%s', + extensions=None, position=-2, ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), - lut_aal=dict(argstr='-lut_aal %s', ), - lut_basic=dict(argstr='-lut_basic %s', ), - lut_fs=dict(argstr='-lut_freesurfer %s', ), - lut_itksnap=dict(argstr='-lut_itksnap %s', ), + lut_aal=dict( + argstr='-lut_aal %s', + extensions=None, + ), + lut_basic=dict( + argstr='-lut_basic %s', + extensions=None, + ), + lut_fs=dict( + argstr='-lut_freesurfer %s', + extensions=None, + ), + lut_itksnap=dict( + argstr='-lut_itksnap %s', + extensions=None, + ), nthreads=dict( argstr='-nthreads %d', nohash=True, ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, usedefault=True, ), - spine=dict(argstr='-spine %s', ), + spine=dict( + argstr='-spine %s', + extensions=None, + ), ) inputs = LabelConfig.input_spec() @@ -41,7 +59,7 @@ def test_LabelConfig_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_LabelConfig_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = LabelConfig.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py index 1e93ae82d8..82cebd8bbc 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py @@ -12,15 +12,18 @@ def test_LabelConvert_inputs(): ), in_config=dict( argstr='%s', + extensions=None, position=-2, ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-4, ), in_lut=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -30,11 +33,15 @@ def test_LabelConvert_inputs(): ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, usedefault=True, ), - spine=dict(argstr='-spine %s', ), + spine=dict( + argstr='-spine %s', + extensions=None, + ), ) inputs = LabelConvert.input_spec() @@ -42,7 +49,7 @@ def test_LabelConvert_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_LabelConvert_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = LabelConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py index ece24e1d47..9271d07517 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py @@ -19,12 +19,19 @@ def test_MRConvert_inputs(): nohash=True, usedefault=True, ), - grad_file=dict(argstr='-grad %s', ), + grad_file=dict( + argstr='-grad %s', + extensions=None, + ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - in_bval=dict(), - in_bvec=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(extensions=None, ), + in_bvec=dict( + argstr='-fslgrad %s %s', + extensions=None, + ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -34,6 +41,7 @@ def test_MRConvert_inputs(): ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, usedefault=True, @@ -53,7 +61,7 @@ def test_MRConvert_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRConvert_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MRConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py index c9290b562b..e6bdd21243 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py @@ -18,12 +18,19 @@ def test_MRDeGibbs_inputs(): nohash=True, usedefault=True, ), - grad_file=dict(argstr='-grad %s', ), + grad_file=dict( + argstr='-grad %s', + extensions=None, + ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - in_bval=dict(), - in_bvec=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(extensions=None, ), + in_bvec=dict( + argstr='-fslgrad %s %s', + extensions=None, + ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -45,6 +52,7 @@ def test_MRDeGibbs_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, keep_extension=True, name_source='in_file', @@ -58,7 +66,7 @@ def test_MRDeGibbs_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRDeGibbs_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MRDeGibbs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py index eec06b6276..ae494bb932 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py @@ -12,12 +12,19 @@ def test_MRMath_inputs(): nohash=True, usedefault=True, ), - grad_file=dict(argstr='-grad %s', ), + grad_file=dict( + argstr='-grad %s', + extensions=None, + ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - in_bval=dict(), - in_bvec=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(extensions=None, ), + in_bvec=dict( + argstr='-fslgrad %s %s', + extensions=None, + ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -32,6 +39,7 @@ def test_MRMath_inputs(): ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), @@ -42,7 +50,7 @@ def test_MRMath_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MRMath_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MRMath.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py index 9aa29acbe8..f2605532e6 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py @@ -12,18 +12,24 @@ def test_Mesh2PVE_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), - in_first=dict(argstr='-first %s', ), + in_first=dict( + argstr='-first %s', + extensions=None, + ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, usedefault=True, ), reference=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -34,7 +40,7 @@ def test_Mesh2PVE_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Mesh2PVE_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Mesh2PVE.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py index 502a0f7fb1..561a1a9538 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py @@ -12,20 +12,24 @@ def test_ReplaceFSwithFIRST_inputs(): ), in_config=dict( argstr='%s', + extensions=None, position=-2, ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-4, ), in_t1w=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, usedefault=True, @@ -37,7 +41,7 @@ def test_ReplaceFSwithFIRST_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ReplaceFSwithFIRST_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ReplaceFSwithFIRST.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py index f4b4660d78..dd326bad5b 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py @@ -14,6 +14,7 @@ def test_ResponseSD_inputs(): bval_scale=dict(argstr='-bvalue_scaling %s', ), csf_file=dict( argstr='%s', + extensions=None, position=-1, ), environ=dict( @@ -22,24 +23,36 @@ def test_ResponseSD_inputs(): ), gm_file=dict( argstr='%s', + extensions=None, position=-2, ), - grad_file=dict(argstr='-grad %s', ), + grad_file=dict( + argstr='-grad %s', + extensions=None, + ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - in_bval=dict(), - in_bvec=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(extensions=None, ), + in_bvec=dict( + argstr='-fslgrad %s %s', + extensions=None, + ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-5, ), - in_mask=dict(argstr='-mask %s', ), + in_mask=dict( + argstr='-mask %s', + extensions=None, + ), max_sh=dict( argstr='-lmax %s', sep=',', ), mtt_file=dict( argstr='%s', + extensions=None, position=-4, ), nthreads=dict( @@ -48,6 +61,7 @@ def test_ResponseSD_inputs(): ), wm_file=dict( argstr='%s', + extensions=None, position=-3, usedefault=True, ), @@ -59,9 +73,18 @@ def test_ResponseSD_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_ResponseSD_outputs(): output_map = dict( - csf_file=dict(argstr='%s', ), - gm_file=dict(argstr='%s', ), - wm_file=dict(argstr='%s', ), + csf_file=dict( + argstr='%s', + extensions=None, + ), + gm_file=dict( + argstr='%s', + extensions=None, + ), + wm_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = ResponseSD.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py index 93a39484f8..064e15288e 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py @@ -12,6 +12,7 @@ def test_TCK2VTK_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -21,11 +22,18 @@ def test_TCK2VTK_inputs(): ), out_file=dict( argstr='%s', + extensions=None, position=-1, usedefault=True, ), - reference=dict(argstr='-image %s', ), - voxel=dict(argstr='-image %s', ), + reference=dict( + argstr='-image %s', + extensions=None, + ), + voxel=dict( + argstr='-image %s', + extensions=None, + ), ) inputs = TCK2VTK.input_spec() @@ -33,7 +41,7 @@ def test_TCK2VTK_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TCK2VTK_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TCK2VTK.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py index ae532c9910..b6bc738109 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py @@ -17,15 +17,31 @@ def test_TensorMetrics_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, ), - in_mask=dict(argstr='-mask %s', ), + in_mask=dict( + argstr='-mask %s', + extensions=None, + ), modulate=dict(argstr='-modulate %s', ), - out_adc=dict(argstr='-adc %s', ), - out_eval=dict(argstr='-value %s', ), - out_evec=dict(argstr='-vector %s', ), - out_fa=dict(argstr='-fa %s', ), + out_adc=dict( + argstr='-adc %s', + extensions=None, + ), + out_eval=dict( + argstr='-value %s', + extensions=None, + ), + out_evec=dict( + argstr='-vector %s', + extensions=None, + ), + out_fa=dict( + argstr='-fa %s', + extensions=None, + ), ) inputs = TensorMetrics.input_spec() @@ -34,10 +50,10 @@ def test_TensorMetrics_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_TensorMetrics_outputs(): output_map = dict( - out_adc=dict(), - out_eval=dict(), - out_evec=dict(), - out_fa=dict(), + out_adc=dict(extensions=None, ), + out_eval=dict(extensions=None, ), + out_evec=dict(extensions=None, ), + out_fa=dict(extensions=None, ), ) outputs = TensorMetrics.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py index c79fce8867..39f239b81d 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py @@ -5,7 +5,10 @@ def test_Tractography_inputs(): input_map = dict( - act_file=dict(argstr='-act %s', ), + act_file=dict( + argstr='-act %s', + extensions=None, + ), algorithm=dict( argstr='-algorithm %s', usedefault=True, @@ -22,12 +25,19 @@ def test_Tractography_inputs(): nohash=True, usedefault=True, ), - grad_file=dict(argstr='-grad %s', ), + grad_file=dict( + argstr='-grad %s', + extensions=None, + ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - in_bval=dict(), - in_bvec=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(extensions=None, ), + in_bvec=dict( + argstr='-fslgrad %s %s', + extensions=None, + ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -52,29 +62,41 @@ def test_Tractography_inputs(): ), out_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-1, usedefault=True, ), out_seeds=dict( argstr='-output_seeds %s', + extensions=None, usedefault=True, ), power=dict(argstr='-power %d', ), roi_excl=dict(argstr='-exclude %s', ), roi_incl=dict(argstr='-include %s', ), roi_mask=dict(argstr='-mask %s', ), - seed_dynamic=dict(argstr='-seed_dynamic %s', ), + seed_dynamic=dict( + argstr='-seed_dynamic %s', + extensions=None, + ), seed_gmwmi=dict( argstr='-seed_gmwmi %s', + extensions=None, requires=['act_file'], ), seed_grid_voxel=dict( argstr='-seed_grid_per_voxel %s %d', xor=['seed_image', 'seed_rnd_voxel'], ), - seed_image=dict(argstr='-seed_image %s', ), - seed_rejection=dict(argstr='-seed_rejection %s', ), + seed_image=dict( + argstr='-seed_image %s', + extensions=None, + ), + seed_rejection=dict( + argstr='-seed_rejection %s', + extensions=None, + ), seed_rnd_voxel=dict( argstr='-seed_random_per_voxel %s %d', xor=['seed_image', 'seed_grid_voxel'], @@ -97,8 +119,8 @@ def test_Tractography_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Tractography_outputs(): output_map = dict( - out_file=dict(), - out_seeds=dict(), + out_file=dict(extensions=None, ), + out_seeds=dict(extensions=None, ), ) outputs = Tractography.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py index 0505ee514c..392654fd5c 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py @@ -10,6 +10,7 @@ def test_FitQt1_inputs(): b1map=dict(argstr='-b1map %s', ), comp_file=dict( argstr='-comp %s', + extensions=None, name_source=['source_file'], name_template='%s_comp.nii.gz', ), @@ -19,6 +20,7 @@ def test_FitQt1_inputs(): ), error_file=dict( argstr='-error %s', + extensions=None, name_source=['source_file'], name_template='%s_error.nii.gz', ), @@ -41,11 +43,13 @@ def test_FitQt1_inputs(): ), m0map_file=dict( argstr='-m0map %s', + extensions=None, name_source=['source_file'], name_template='%s_m0map.nii.gz', ), mask=dict( argstr='-mask %s', + extensions=None, position=2, ), maxit=dict( @@ -54,6 +58,7 @@ def test_FitQt1_inputs(): ), mcmap_file=dict( argstr='-mcmap %s', + extensions=None, name_source=['source_file'], name_template='%s_mcmap.nii.gz', ), @@ -66,10 +71,12 @@ def test_FitQt1_inputs(): ), prior=dict( argstr='-prior %s', + extensions=None, position=3, ), res_file=dict( argstr='-res %s', + extensions=None, name_source=['source_file'], name_template='%s_res.nii.gz', ), @@ -79,6 +86,7 @@ def test_FitQt1_inputs(): ), source_file=dict( argstr='-source %s', + extensions=None, mandatory=True, position=1, ), @@ -89,12 +97,14 @@ def test_FitQt1_inputs(): ), syn_file=dict( argstr='-syn %s', + extensions=None, name_source=['source_file'], name_template='%s_syn.nii.gz', ), t1_list=dict(argstr='-T1list %s', ), t1map_file=dict( argstr='-t1map %s', + extensions=None, name_source=['source_file'], name_template='%s_t1map.nii.gz', ), @@ -126,13 +136,13 @@ def test_FitQt1_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_FitQt1_outputs(): output_map = dict( - comp_file=dict(), - error_file=dict(), - m0map_file=dict(), - mcmap_file=dict(), - res_file=dict(), - syn_file=dict(), - t1map_file=dict(), + comp_file=dict(extensions=None, ), + error_file=dict(extensions=None, ), + m0map_file=dict(extensions=None, ), + mcmap_file=dict(extensions=None, ), + res_file=dict(extensions=None, ), + syn_file=dict(extensions=None, ), + t1map_file=dict(extensions=None, ), ) outputs = FitQt1.output_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py index 9507f53fa9..436fbb9b29 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py @@ -8,6 +8,7 @@ def test_RegAladin_inputs(): aff_direct_flag=dict(argstr='-affDirect', ), aff_file=dict( argstr='-aff %s', + extensions=None, name_source=['flo_file'], name_template='%s_aff.txt', ), @@ -19,14 +20,21 @@ def test_RegAladin_inputs(): ), flo_file=dict( argstr='-flo %s', + extensions=None, mandatory=True, ), flo_low_val=dict(argstr='-floLowThr %f', ), flo_up_val=dict(argstr='-floUpThr %f', ), - fmask_file=dict(argstr='-fmask %s', ), + fmask_file=dict( + argstr='-fmask %s', + extensions=None, + ), gpuid_val=dict(argstr='-gpuid %i', ), i_val=dict(argstr='-pi %d', ), - in_aff_file=dict(argstr='-inaff %s', ), + in_aff_file=dict( + argstr='-inaff %s', + extensions=None, + ), ln_val=dict(argstr='-ln %d', ), lp_val=dict(argstr='-lp %d', ), maxit_val=dict(argstr='-maxit %d', ), @@ -39,17 +47,22 @@ def test_RegAladin_inputs(): platform_val=dict(argstr='-platf %i', ), ref_file=dict( argstr='-ref %s', + extensions=None, mandatory=True, ), ref_low_val=dict(argstr='-refLowThr %f', ), ref_up_val=dict(argstr='-refUpThr %f', ), res_file=dict( argstr='-res %s', + extensions=None, name_source=['flo_file'], name_template='%s_res.nii.gz', ), rig_only_flag=dict(argstr='-rigOnly', ), - rmask_file=dict(argstr='-rmask %s', ), + rmask_file=dict( + argstr='-rmask %s', + extensions=None, + ), smoo_f_val=dict(argstr='-smooF %f', ), smoo_r_val=dict(argstr='-smooR %f', ), v_val=dict(argstr='-pv %d', ), @@ -62,9 +75,9 @@ def test_RegAladin_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_RegAladin_outputs(): output_map = dict( - aff_file=dict(), + aff_file=dict(extensions=None, ), avg_output=dict(), - res_file=dict(), + res_file=dict(extensions=None, ), ) outputs = RegAladin.output_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py index 2510a46eb8..b0a39752cf 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py @@ -26,6 +26,7 @@ def test_RegAverage_inputs(): ), avg_ref_file=dict( argstr='-avg_tran %s', + extensions=None, position=1, requires=['warp_files'], xor=[ @@ -35,6 +36,7 @@ def test_RegAverage_inputs(): ), demean1_ref_file=dict( argstr='-demean1 %s', + extensions=None, position=1, requires=['warp_files'], xor=[ @@ -44,6 +46,7 @@ def test_RegAverage_inputs(): ), demean2_ref_file=dict( argstr='-demean2 %s', + extensions=None, position=1, requires=['warp_files'], xor=[ @@ -53,6 +56,7 @@ def test_RegAverage_inputs(): ), demean3_ref_file=dict( argstr='-demean3 %s', + extensions=None, position=1, requires=['warp_files'], xor=[ @@ -70,6 +74,7 @@ def test_RegAverage_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, position=0, ), @@ -86,7 +91,7 @@ def test_RegAverage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RegAverage_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = RegAverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py index e9bdab82c0..7d1fb8d27c 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py @@ -5,12 +5,16 @@ def test_RegF3D_inputs(): input_map = dict( - aff_file=dict(argstr='-aff %s', ), + aff_file=dict( + argstr='-aff %s', + extensions=None, + ), amc_flag=dict(argstr='-amc', ), args=dict(argstr='%s', ), be_val=dict(argstr='-be %f', ), cpp_file=dict( argstr='-cpp %s', + extensions=None, name_source=['flo_file'], name_template='%s_cpp.nii.gz', ), @@ -22,15 +26,22 @@ def test_RegF3D_inputs(): fbn_val=dict(argstr='--fbn %d', ), flo_file=dict( argstr='-flo %s', + extensions=None, mandatory=True, ), flo_smooth_val=dict(argstr='-smooF %f', ), flwth2_thr_val=dict(argstr='-fLwTh %d %f', ), flwth_thr_val=dict(argstr='--fLwTh %f', ), - fmask_file=dict(argstr='-fmask %s', ), + fmask_file=dict( + argstr='-fmask %s', + extensions=None, + ), fupth2_thr_val=dict(argstr='-fUpTh %d %f', ), fupth_thr_val=dict(argstr='--fUpTh %f', ), - incpp_file=dict(argstr='-incpp %s', ), + incpp_file=dict( + argstr='-incpp %s', + extensions=None, + ), jl_val=dict(argstr='-jl %f', ), kld2_flag=dict(argstr='-kld %d', ), kld_flag=dict(argstr='--kld', ), @@ -57,17 +68,22 @@ def test_RegF3D_inputs(): rbn_val=dict(argstr='--rbn %d', ), ref_file=dict( argstr='-ref %s', + extensions=None, mandatory=True, ), ref_smooth_val=dict(argstr='-smooR %f', ), res_file=dict( argstr='-res %s', + extensions=None, name_source=['flo_file'], name_template='%s_res.nii.gz', ), rlwth2_thr_val=dict(argstr='-rLwTh %d %f', ), rlwth_thr_val=dict(argstr='--rLwTh %f', ), - rmask_file=dict(argstr='-rmask %s', ), + rmask_file=dict( + argstr='-rmask %s', + extensions=None, + ), rupth2_thr_val=dict(argstr='-rUpTh %d %f', ), rupth_thr_val=dict(argstr='--rUpTh %f', ), smooth_grad_val=dict(argstr='-smoothGrad %f', ), @@ -87,10 +103,10 @@ def test_RegF3D_inputs(): def test_RegF3D_outputs(): output_map = dict( avg_output=dict(), - cpp_file=dict(), - invcpp_file=dict(), - invres_file=dict(), - res_file=dict(), + cpp_file=dict(extensions=None, ), + invcpp_file=dict(extensions=None, ), + invres_file=dict(extensions=None, ), + res_file=dict(extensions=None, ), ) outputs = RegF3D.output_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py index 63f917c683..38615d8edd 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py @@ -16,13 +16,18 @@ def test_RegJacobian_inputs(): ), out_file=dict( argstr='%s', + extensions=None, name_source=['trans_file'], name_template='%s', position=-1, ), - ref_file=dict(argstr='-ref %s', ), + ref_file=dict( + argstr='-ref %s', + extensions=None, + ), trans_file=dict( argstr='-trans %s', + extensions=None, mandatory=True, ), type=dict( @@ -37,7 +42,7 @@ def test_RegJacobian_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RegJacobian_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = RegJacobian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py index 3321d87afc..4f30029e2f 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py @@ -12,6 +12,7 @@ def test_RegMeasure_inputs(): ), flo_file=dict( argstr='-flo %s', + extensions=None, mandatory=True, ), measure_type=dict( @@ -24,11 +25,13 @@ def test_RegMeasure_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, name_source=['flo_file'], name_template='%s', ), ref_file=dict( argstr='-ref %s', + extensions=None, mandatory=True, ), ) @@ -38,7 +41,7 @@ def test_RegMeasure_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RegMeasure_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = RegMeasure.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py index 06b2b48401..80e4241ec7 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py @@ -12,6 +12,7 @@ def test_RegResample_inputs(): ), flo_file=dict( argstr='-flo %s', + extensions=None, mandatory=True, ), inter_val=dict(argstr='-inter %d', ), @@ -21,6 +22,7 @@ def test_RegResample_inputs(): ), out_file=dict( argstr='%s', + extensions=None, name_source=['flo_file'], name_template='%s', position=-1, @@ -30,10 +32,14 @@ def test_RegResample_inputs(): psf_flag=dict(argstr='-psf', ), ref_file=dict( argstr='-ref %s', + extensions=None, mandatory=True, ), tensor_flag=dict(argstr='-tensor ', ), - trans_file=dict(argstr='-trans %s', ), + trans_file=dict( + argstr='-trans %s', + extensions=None, + ), type=dict( argstr='-%s', position=-2, @@ -47,7 +53,7 @@ def test_RegResample_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RegResample_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = RegResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py index 5deb4206e6..c1d1f65542 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py @@ -17,11 +17,15 @@ def test_RegTools_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, ), inter_val=dict(argstr='-interp %d', ), iso_flag=dict(argstr='-iso', ), - mask_file=dict(argstr='-nan %s', ), + mask_file=dict( + argstr='-nan %s', + extensions=None, + ), mul_val=dict(argstr='-mul %s', ), noscl_flag=dict(argstr='-noscl', ), omp_core_val=dict( @@ -30,10 +34,14 @@ def test_RegTools_inputs(): ), out_file=dict( argstr='-out %s', + extensions=None, name_source=['in_file'], name_template='%s_tools.nii.gz', ), - rms_val=dict(argstr='-rms %s', ), + rms_val=dict( + argstr='-rms %s', + extensions=None, + ), smo_g_val=dict(argstr='-smoG %f %f %f', ), smo_s_val=dict(argstr='-smoS %f %f %f', ), sub_val=dict(argstr='-sub %s', ), @@ -45,7 +53,7 @@ def test_RegTools_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RegTools_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = RegTools.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py index 57c1b0ad86..50f666a1df 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py @@ -7,6 +7,7 @@ def test_RegTransform_inputs(): input_map = dict( aff_2_rig_input=dict( argstr='-aff2rig %s', + extensions=None, position=-2, xor=[ 'def_input', 'disp_input', 'flow_input', 'comp_input', @@ -17,6 +18,7 @@ def test_RegTransform_inputs(): args=dict(argstr='%s', ), comp_input=dict( argstr='-comp %s', + extensions=None, position=-3, requires=['comp_input2'], xor=[ @@ -27,10 +29,12 @@ def test_RegTransform_inputs(): ), comp_input2=dict( argstr='%s', + extensions=None, position=-2, ), def_input=dict( argstr='-def %s', + extensions=None, position=-2, xor=[ 'disp_input', 'flow_input', 'comp_input', 'upd_s_form_input', @@ -40,6 +44,7 @@ def test_RegTransform_inputs(): ), disp_input=dict( argstr='-disp %s', + extensions=None, position=-2, xor=[ 'def_input', 'flow_input', 'comp_input', 'upd_s_form_input', @@ -62,6 +67,7 @@ def test_RegTransform_inputs(): ), flow_input=dict( argstr='-flow %s', + extensions=None, position=-2, xor=[ 'def_input', 'disp_input', 'comp_input', 'upd_s_form_input', @@ -71,6 +77,7 @@ def test_RegTransform_inputs(): ), half_input=dict( argstr='-half %s', + extensions=None, position=-2, xor=[ 'def_input', 'disp_input', 'flow_input', 'comp_input', @@ -80,6 +87,7 @@ def test_RegTransform_inputs(): ), inv_aff_input=dict( argstr='-invAff %s', + extensions=None, position=-2, xor=[ 'def_input', 'disp_input', 'flow_input', 'comp_input', @@ -111,20 +119,24 @@ def test_RegTransform_inputs(): ), out_file=dict( argstr='%s', + extensions=None, genfile=True, position=-1, ), ref1_file=dict( argstr='-ref %s', + extensions=None, position=0, ), ref2_file=dict( argstr='-ref2 %s', + extensions=None, position=1, requires=['ref1_file'], ), upd_s_form_input=dict( argstr='-updSform %s', + extensions=None, position=-3, requires=['upd_s_form_input2'], xor=[ @@ -135,6 +147,7 @@ def test_RegTransform_inputs(): ), upd_s_form_input2=dict( argstr='%s', + extensions=None, position=-2, requires=['upd_s_form_input'], ), @@ -145,7 +158,7 @@ def test_RegTransform_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RegTransform_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = RegTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py index cc7bc896c6..657ceddf4d 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py @@ -12,11 +12,13 @@ def test_BinaryMaths_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), operand_file=dict( argstr='%s', + extensions=None, mandatory=True, position=5, xor=['operand_value', 'operand_str'], @@ -40,6 +42,7 @@ def test_BinaryMaths_inputs(): ), out_file=dict( argstr='%s', + extensions=None, name_source=['in_file'], name_template='%s', position=-2, @@ -55,7 +58,7 @@ def test_BinaryMaths_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BinaryMaths_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = BinaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py index 464ed6f6c4..94ad3be1a4 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py @@ -12,6 +12,7 @@ def test_BinaryMathsInteger_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -27,6 +28,7 @@ def test_BinaryMathsInteger_inputs(): ), out_file=dict( argstr='%s', + extensions=None, name_source=['in_file'], name_template='%s', position=-2, @@ -42,7 +44,7 @@ def test_BinaryMathsInteger_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BinaryMathsInteger_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = BinaryMathsInteger.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py index 0771d72f5c..092ccce73a 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py @@ -12,6 +12,7 @@ def test_BinaryStats_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -21,10 +22,12 @@ def test_BinaryStats_inputs(): ), mask_file=dict( argstr='-m %s', + extensions=None, position=-2, ), operand_file=dict( argstr='%s', + extensions=None, mandatory=True, position=5, xor=['operand_value'], diff --git a/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py index dceea837c0..d9dd712482 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py @@ -12,6 +12,7 @@ def test_CalcTopNCC_inputs(): ), in_file=dict( argstr='-target %s', + extensions=None, mandatory=True, position=1, ), @@ -20,7 +21,10 @@ def test_CalcTopNCC_inputs(): mandatory=True, position=3, ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), num_templates=dict( argstr='-templates %s', mandatory=True, diff --git a/nipype/interfaces/niftyseg/tests/test_auto_EM.py b/nipype/interfaces/niftyseg/tests/test_auto_EM.py index fc270ed22f..1fc798c2ec 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_EM.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_EM.py @@ -20,10 +20,14 @@ def test_EM_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, position=4, ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), max_iter=dict( argstr='-max_iter %s', usedefault=True, @@ -40,22 +44,26 @@ def test_EM_inputs(): ), out_bc_file=dict( argstr='-bc_out %s', + extensions=None, name_source=['in_file'], name_template='%s_bc_em.nii.gz', ), out_file=dict( argstr='-out %s', + extensions=None, name_source=['in_file'], name_template='%s_em.nii.gz', ), out_outlier_file=dict( argstr='-out_outlier %s', + extensions=None, name_source=['in_file'], name_template='%s_outlier_em.nii.gz', ), outlier_val=dict(argstr='-outlier %s %s', ), prior_4D=dict( argstr='-prior4D %s', + extensions=None, mandatory=True, xor=['no_prior', 'priors'], ), @@ -74,9 +82,9 @@ def test_EM_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_EM_outputs(): output_map = dict( - out_bc_file=dict(), - out_file=dict(), - out_outlier_file=dict(), + out_bc_file=dict(extensions=None, ), + out_file=dict(extensions=None, ), + out_outlier_file=dict(extensions=None, ), ) outputs = EM.output_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py index 7cbfe58c6c..d798046995 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py @@ -6,7 +6,10 @@ def test_FillLesions_inputs(): input_map = dict( args=dict(argstr='%s', ), - bin_mask=dict(argstr='-mask %s', ), + bin_mask=dict( + argstr='-mask %s', + extensions=None, + ), cwf=dict(argstr='-cwf %f', ), debug=dict(argstr='-debug', ), environ=dict( @@ -16,11 +19,13 @@ def test_FillLesions_inputs(): in_dilation=dict(argstr='-dil %d', ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, position=1, ), lesion_mask=dict( argstr='-l %s', + extensions=None, mandatory=True, position=2, ), @@ -29,6 +34,7 @@ def test_FillLesions_inputs(): out_datatype=dict(argstr='-odt %s', ), out_file=dict( argstr='-o %s', + extensions=None, name_source=['in_file'], name_template='%s_lesions_filled.nii.gz', position=3, @@ -45,7 +51,7 @@ def test_FillLesions_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FillLesions_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = FillLesions.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py index eea345e1df..f3613f6130 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py @@ -17,18 +17,26 @@ def test_LabelFusion_inputs(): nohash=True, usedefault=True, ), - file_to_seg=dict(mandatory=True, ), + file_to_seg=dict( + extensions=None, + mandatory=True, + ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, position=1, ), kernel_size=dict(), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), max_iter=dict(argstr='-max_iter %d', ), mrf_value=dict(argstr='-MRF_beta %f', ), out_file=dict( argstr='-out %s', + extensions=None, name_source=['in_file'], name_template='%s', ), @@ -41,7 +49,7 @@ def test_LabelFusion_inputs(): position=3, usedefault=True, ), - template_file=dict(), + template_file=dict(extensions=None, ), template_num=dict(), unc=dict(argstr='-unc', ), unc_thresh=dict(argstr='-uncthres %f', ), @@ -53,7 +61,7 @@ def test_LabelFusion_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_LabelFusion_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = LabelFusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py index 247dc9773e..f93739d102 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py @@ -12,11 +12,13 @@ def test_MathsCommand_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), out_file=dict( argstr='%s', + extensions=None, name_source=['in_file'], name_template='%s', position=-2, @@ -32,7 +34,7 @@ def test_MathsCommand_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MathsCommand_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MathsCommand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_Merge.py b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py index c53e4edf40..714a8f87b4 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_Merge.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py @@ -13,6 +13,7 @@ def test_Merge_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -23,6 +24,7 @@ def test_Merge_inputs(): ), out_file=dict( argstr='%s', + extensions=None, name_source=['in_file'], name_template='%s', position=-2, @@ -38,7 +40,7 @@ def test_Merge_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Merge_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py index bfeded6eef..e88270d650 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py @@ -9,6 +9,7 @@ def test_PatchMatch_inputs(): cs_size=dict(argstr='-cs %i', ), database_file=dict( argstr='-db %s', + extensions=None, mandatory=True, position=3, ), @@ -18,18 +19,21 @@ def test_PatchMatch_inputs(): ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, position=1, ), it_num=dict(argstr='-it %i', ), mask_file=dict( argstr='-m %s', + extensions=None, mandatory=True, position=2, ), match_num=dict(argstr='-match %i', ), out_file=dict( argstr='-o %s', + extensions=None, name_source=['in_file'], name_template='%s_pm.nii.gz', position=4, @@ -43,7 +47,7 @@ def test_PatchMatch_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_PatchMatch_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = PatchMatch.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py index e845e18952..9ce93ab93a 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py @@ -12,6 +12,7 @@ def test_StatsCommand_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -21,6 +22,7 @@ def test_StatsCommand_inputs(): ), mask_file=dict( argstr='-m %s', + extensions=None, position=-2, ), ) diff --git a/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py index 7241264eee..75177ab0e0 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py @@ -12,17 +12,20 @@ def test_TupleMaths_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), operand_file1=dict( argstr='%s', + extensions=None, mandatory=True, position=5, xor=['operand_value1'], ), operand_file2=dict( argstr='%s', + extensions=None, mandatory=True, position=6, xor=['operand_value2'], @@ -46,6 +49,7 @@ def test_TupleMaths_inputs(): ), out_file=dict( argstr='%s', + extensions=None, name_source=['in_file'], name_template='%s', position=-2, @@ -61,7 +65,7 @@ def test_TupleMaths_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TupleMaths_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TupleMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py index 9b1cd7d194..0cd68ca4ee 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py @@ -12,6 +12,7 @@ def test_UnaryMaths_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -22,6 +23,7 @@ def test_UnaryMaths_inputs(): ), out_file=dict( argstr='%s', + extensions=None, name_source=['in_file'], name_template='%s', position=-2, @@ -37,7 +39,7 @@ def test_UnaryMaths_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_UnaryMaths_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = UnaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py index 34edf1a190..3f158fac20 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py @@ -12,6 +12,7 @@ def test_UnaryStats_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), @@ -21,6 +22,7 @@ def test_UnaryStats_inputs(): ), mask_file=dict( argstr='-m %s', + extensions=None, position=-2, ), operation=dict( diff --git a/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py index f0f2453274..84e74e969c 100644 --- a/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py +++ b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py @@ -8,8 +8,11 @@ def test_ComputeMask_inputs(): M=dict(), cc=dict(), m=dict(), - mean_volume=dict(mandatory=True, ), - reference_volume=dict(), + mean_volume=dict( + extensions=None, + mandatory=True, + ), + reference_volume=dict(extensions=None, ), ) inputs = ComputeMask.input_spec() @@ -17,7 +20,7 @@ def test_ComputeMask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ComputeMask_outputs(): - output_map = dict(brain_mask=dict(), ) + output_map = dict(brain_mask=dict(extensions=None, ), ) outputs = ComputeMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py index 383d1cbea3..3fe17160db 100644 --- a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py @@ -6,14 +6,20 @@ def test_EstimateContrast_inputs(): input_map = dict( axis=dict(mandatory=True, ), - beta=dict(mandatory=True, ), + beta=dict( + extensions=None, + mandatory=True, + ), constants=dict(mandatory=True, ), contrasts=dict(mandatory=True, ), dof=dict(mandatory=True, ), mask=dict(), nvbeta=dict(mandatory=True, ), reg_names=dict(mandatory=True, ), - s2=dict(mandatory=True, ), + s2=dict( + extensions=None, + mandatory=True, + ), ) inputs = EstimateContrast.input_spec() diff --git a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py index 4ac3aa720e..a700c18d43 100644 --- a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py +++ b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py @@ -23,15 +23,15 @@ def test_FitGLM_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_FitGLM_outputs(): output_map = dict( - a=dict(), + a=dict(extensions=None, ), axis=dict(), - beta=dict(), + beta=dict(extensions=None, ), constants=dict(), dof=dict(), nvbeta=dict(), reg_names=dict(), residuals=dict(), - s2=dict(), + s2=dict(extensions=None, ), ) outputs = FitGLM.output_spec() diff --git a/nipype/interfaces/nipy/tests/test_auto_Similarity.py b/nipype/interfaces/nipy/tests/test_auto_Similarity.py index fbdb6fffa6..09a466b8a3 100644 --- a/nipype/interfaces/nipy/tests/test_auto_Similarity.py +++ b/nipype/interfaces/nipy/tests/test_auto_Similarity.py @@ -5,11 +5,17 @@ def test_Similarity_inputs(): input_map = dict( - mask1=dict(), - mask2=dict(), + mask1=dict(extensions=None, ), + mask2=dict(extensions=None, ), metric=dict(usedefault=True, ), - volume1=dict(mandatory=True, ), - volume2=dict(mandatory=True, ), + volume1=dict( + extensions=None, + mandatory=True, + ), + volume2=dict( + extensions=None, + mandatory=True, + ), ) inputs = Similarity.input_spec() diff --git a/nipype/interfaces/nipy/tests/test_auto_Trim.py b/nipype/interfaces/nipy/tests/test_auto_Trim.py index 6073c9082b..1e013f2063 100644 --- a/nipype/interfaces/nipy/tests/test_auto_Trim.py +++ b/nipype/interfaces/nipy/tests/test_auto_Trim.py @@ -7,8 +7,11 @@ def test_Trim_inputs(): input_map = dict( begin_index=dict(usedefault=True, ), end_index=dict(usedefault=True, ), - in_file=dict(mandatory=True, ), - out_file=dict(), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_file=dict(extensions=None, ), suffix=dict(usedefault=True, ), ) inputs = Trim.input_spec() @@ -17,7 +20,7 @@ def test_Trim_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Trim_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Trim.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py index 4a641ebb50..a9f1ca8769 100644 --- a/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py +++ b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py @@ -10,10 +10,13 @@ def test_CoherenceAnalyzer_inputs(): figure_type=dict(usedefault=True, ), frequency_range=dict(usedefault=True, ), in_TS=dict(), - in_file=dict(requires=('TR', ), ), + in_file=dict( + extensions=None, + requires=('TR', ), + ), n_overlap=dict(usedefault=True, ), - output_csv_file=dict(), - output_figure_file=dict(), + output_csv_file=dict(extensions=None, ), + output_figure_file=dict(extensions=None, ), ) inputs = CoherenceAnalyzer.input_spec() @@ -23,11 +26,11 @@ def test_CoherenceAnalyzer_inputs(): def test_CoherenceAnalyzer_outputs(): output_map = dict( coherence_array=dict(), - coherence_csv=dict(), - coherence_fig=dict(), + coherence_csv=dict(extensions=None, ), + coherence_fig=dict(extensions=None, ), timedelay_array=dict(), - timedelay_csv=dict(), - timedelay_fig=dict(), + timedelay_csv=dict(extensions=None, ), + timedelay_fig=dict(extensions=None, ), ) outputs = CoherenceAnalyzer.output_spec() diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py index 81a22bfe38..88efbd6ed2 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py @@ -10,13 +10,34 @@ def test_BRAINSPosteriorToContinuousClass_inputs(): nohash=True, usedefault=True, ), - inputBasalGmVolume=dict(argstr='--inputBasalGmVolume %s', ), - inputCrblGmVolume=dict(argstr='--inputCrblGmVolume %s', ), - inputCrblWmVolume=dict(argstr='--inputCrblWmVolume %s', ), - inputCsfVolume=dict(argstr='--inputCsfVolume %s', ), - inputSurfaceGmVolume=dict(argstr='--inputSurfaceGmVolume %s', ), - inputVbVolume=dict(argstr='--inputVbVolume %s', ), - inputWhiteVolume=dict(argstr='--inputWhiteVolume %s', ), + inputBasalGmVolume=dict( + argstr='--inputBasalGmVolume %s', + extensions=None, + ), + inputCrblGmVolume=dict( + argstr='--inputCrblGmVolume %s', + extensions=None, + ), + inputCrblWmVolume=dict( + argstr='--inputCrblWmVolume %s', + extensions=None, + ), + inputCsfVolume=dict( + argstr='--inputCsfVolume %s', + extensions=None, + ), + inputSurfaceGmVolume=dict( + argstr='--inputSurfaceGmVolume %s', + extensions=None, + ), + inputVbVolume=dict( + argstr='--inputVbVolume %s', + extensions=None, + ), + inputWhiteVolume=dict( + argstr='--inputWhiteVolume %s', + extensions=None, + ), outputVolume=dict( argstr='--outputVolume %s', hash_files=False, @@ -28,7 +49,7 @@ def test_BRAINSPosteriorToContinuousClass_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSPosteriorToContinuousClass_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = BRAINSPosteriorToContinuousClass.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py index ac589ad6dc..8161cd4fad 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py @@ -30,7 +30,10 @@ def test_BRAINSTalairach_inputs(): nohash=True, usedefault=True, ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), outputBox=dict( argstr='--outputBox %s', hash_files=False, @@ -47,8 +50,8 @@ def test_BRAINSTalairach_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSTalairach_outputs(): output_map = dict( - outputBox=dict(), - outputGrid=dict(), + outputBox=dict(extensions=None, ), + outputGrid=dict(extensions=None, ), ) outputs = BRAINSTalairach.output_spec() diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py index 5ada1576e7..19fd531a7e 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py @@ -12,13 +12,22 @@ def test_BRAINSTalairachMask_inputs(): ), expand=dict(argstr='--expand ', ), hemisphereMode=dict(argstr='--hemisphereMode %s', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), outputVolume=dict( argstr='--outputVolume %s', hash_files=False, ), - talairachBox=dict(argstr='--talairachBox %s', ), - talairachParameters=dict(argstr='--talairachParameters %s', ), + talairachBox=dict( + argstr='--talairachBox %s', + extensions=None, + ), + talairachParameters=dict( + argstr='--talairachParameters %s', + extensions=None, + ), ) inputs = BRAINSTalairachMask.input_spec() @@ -26,7 +35,7 @@ def test_BRAINSTalairachMask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSTalairachMask_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = BRAINSTalairachMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py index eb4bdcffae..a9e12c5584 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py @@ -11,7 +11,10 @@ def test_GenerateEdgeMapImage_inputs(): usedefault=True, ), inputMRVolumes=dict(argstr='--inputMRVolumes %s...', ), - inputMask=dict(argstr='--inputMask %s', ), + inputMask=dict( + argstr='--inputMask %s', + extensions=None, + ), lowerPercentileMatching=dict(argstr='--lowerPercentileMatching %f', ), maximumOutputRange=dict(argstr='--maximumOutputRange %d', ), minimumOutputRange=dict(argstr='--minimumOutputRange %d', ), @@ -33,8 +36,8 @@ def test_GenerateEdgeMapImage_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_GenerateEdgeMapImage_outputs(): output_map = dict( - outputEdgeMap=dict(), - outputMaximumGradientImage=dict(), + outputEdgeMap=dict(extensions=None, ), + outputMaximumGradientImage=dict(extensions=None, ), ) outputs = GenerateEdgeMapImage.output_spec() diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py index ae16bc8fab..5728b575a3 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py @@ -27,7 +27,7 @@ def test_GeneratePurePlugMask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GeneratePurePlugMask_outputs(): - output_map = dict(outputMaskFile=dict(), ) + output_map = dict(outputMaskFile=dict(extensions=None, ), ) outputs = GeneratePurePlugMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py index 1377072149..0f4fbd5a18 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py @@ -11,16 +11,28 @@ def test_HistogramMatchingFilter_inputs(): usedefault=True, ), histogramAlgorithm=dict(argstr='--histogramAlgorithm %s', ), - inputBinaryVolume=dict(argstr='--inputBinaryVolume %s', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputBinaryVolume=dict( + argstr='--inputBinaryVolume %s', + extensions=None, + ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), outputVolume=dict( argstr='--outputVolume %s', hash_files=False, ), - referenceBinaryVolume=dict(argstr='--referenceBinaryVolume %s', ), - referenceVolume=dict(argstr='--referenceVolume %s', ), + referenceBinaryVolume=dict( + argstr='--referenceBinaryVolume %s', + extensions=None, + ), + referenceVolume=dict( + argstr='--referenceVolume %s', + extensions=None, + ), verbose=dict(argstr='--verbose ', ), writeHistogram=dict(argstr='--writeHistogram %s', ), ) @@ -30,7 +42,7 @@ def test_HistogramMatchingFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_HistogramMatchingFilter_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = HistogramMatchingFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py index 534488ad10..f66caff4f6 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py @@ -5,14 +5,23 @@ def test_SimilarityIndex_inputs(): input_map = dict( - ANNContinuousVolume=dict(argstr='--ANNContinuousVolume %s', ), + ANNContinuousVolume=dict( + argstr='--ANNContinuousVolume %s', + extensions=None, + ), args=dict(argstr='%s', ), environ=dict( nohash=True, usedefault=True, ), - inputManualVolume=dict(argstr='--inputManualVolume %s', ), - outputCSVFilename=dict(argstr='--outputCSVFilename %s', ), + inputManualVolume=dict( + argstr='--inputManualVolume %s', + extensions=None, + ), + outputCSVFilename=dict( + argstr='--outputCSVFilename %s', + extensions=None, + ), thresholdInterval=dict(argstr='--thresholdInterval %f', ), ) inputs = SimilarityIndex.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py index 05593527fb..734363402a 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py @@ -13,15 +13,27 @@ def test_DWIConvert_inputs(): usedefault=True, ), fMRI=dict(argstr='--fMRI ', ), - fslNIFTIFile=dict(argstr='--fslNIFTIFile %s', ), + fslNIFTIFile=dict( + argstr='--fslNIFTIFile %s', + extensions=None, + ), gradientVectorFile=dict( argstr='--gradientVectorFile %s', hash_files=False, ), - inputBValues=dict(argstr='--inputBValues %s', ), - inputBVectors=dict(argstr='--inputBVectors %s', ), + inputBValues=dict( + argstr='--inputBValues %s', + extensions=None, + ), + inputBVectors=dict( + argstr='--inputBVectors %s', + extensions=None, + ), inputDicomDirectory=dict(argstr='--inputDicomDirectory %s', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), outputBValues=dict( argstr='--outputBValues %s', hash_files=False, @@ -54,11 +66,11 @@ def test_DWIConvert_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DWIConvert_outputs(): output_map = dict( - gradientVectorFile=dict(), - outputBValues=dict(), - outputBVectors=dict(), + gradientVectorFile=dict(extensions=None, ), + outputBValues=dict(extensions=None, ), + outputBVectors=dict(extensions=None, ), outputDirectory=dict(), - outputVolume=dict(), + outputVolume=dict(extensions=None, ), ) outputs = DWIConvert.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py index eb9d8c6184..3ab83055bb 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py @@ -13,8 +13,14 @@ def test_compareTractInclusion_inputs(): ), numberOfPoints=dict(argstr='--numberOfPoints %d', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), - standardFiber=dict(argstr='--standardFiber %s', ), - testFiber=dict(argstr='--testFiber %s', ), + standardFiber=dict( + argstr='--standardFiber %s', + extensions=None, + ), + testFiber=dict( + argstr='--testFiber %s', + extensions=None, + ), testForBijection=dict(argstr='--testForBijection ', ), testForFiberCardinality=dict(argstr='--testForFiberCardinality ', ), writeXMLPolyDataFile=dict(argstr='--writeXMLPolyDataFile ', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py index 92c00853f4..6b21e7a648 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py @@ -24,7 +24,7 @@ def test_dtiaverage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_dtiaverage_outputs(): - output_map = dict(tensor_output=dict(), ) + output_map = dict(tensor_output=dict(extensions=None, ), ) outputs = dtiaverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py index 440fd4df07..acc0fb30f9 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py @@ -15,14 +15,23 @@ def test_dtiestim_inputs(): ), DTI_double=dict(argstr='--DTI_double ', ), args=dict(argstr='%s', ), - bad_region_mask=dict(argstr='--bad_region_mask %s', ), - brain_mask=dict(argstr='--brain_mask %s', ), + bad_region_mask=dict( + argstr='--bad_region_mask %s', + extensions=None, + ), + brain_mask=dict( + argstr='--brain_mask %s', + extensions=None, + ), correction=dict(argstr='--correction %s', ), defaultTensor=dict( argstr='--defaultTensor %s', sep=',', ), - dwi_image=dict(argstr='--dwi_image %s', ), + dwi_image=dict( + argstr='--dwi_image %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -51,10 +60,10 @@ def test_dtiestim_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_dtiestim_outputs(): output_map = dict( - B0=dict(), - B0_mask_output=dict(), - idwi=dict(), - tensor_output=dict(), + B0=dict(extensions=None, ), + B0_mask_output=dict(extensions=None, ), + idwi=dict(extensions=None, ), + tensor_output=dict(extensions=None, ), ) outputs = dtiestim.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py index 3148d1edb5..cb06505959 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py @@ -10,7 +10,10 @@ def test_dtiprocess_inputs(): argstr='--RD_output %s', hash_files=False, ), - affineitk_file=dict(argstr='--affineitk_file %s', ), + affineitk_file=dict( + argstr='--affineitk_file %s', + extensions=None, + ), args=dict(argstr='%s', ), color_fa_output=dict( argstr='--color_fa_output %s', @@ -21,8 +24,14 @@ def test_dtiprocess_inputs(): argstr='--deformation_output %s', hash_files=False, ), - dof_file=dict(argstr='--dof_file %s', ), - dti_image=dict(argstr='--dti_image %s', ), + dof_file=dict( + argstr='--dof_file %s', + extensions=None, + ), + dti_image=dict( + argstr='--dti_image %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -39,7 +48,10 @@ def test_dtiprocess_inputs(): argstr='--fa_output %s', hash_files=False, ), - forward=dict(argstr='--forward %s', ), + forward=dict( + argstr='--forward %s', + extensions=None, + ), frobenius_norm_output=dict( argstr='--frobenius_norm_output %s', hash_files=False, @@ -58,7 +70,10 @@ def test_dtiprocess_inputs(): argstr='--lambda3_output %s', hash_files=False, ), - mask=dict(argstr='--mask %s', ), + mask=dict( + argstr='--mask %s', + extensions=None, + ), md_output=dict( argstr='--md_output %s', hash_files=False, @@ -67,7 +82,10 @@ def test_dtiprocess_inputs(): argstr='--negative_eigenvector_output %s', hash_files=False, ), - newdof_file=dict(argstr='--newdof_file %s', ), + newdof_file=dict( + argstr='--newdof_file %s', + extensions=None, + ), outmask=dict( argstr='--outmask %s', hash_files=False, @@ -92,21 +110,21 @@ def test_dtiprocess_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_dtiprocess_outputs(): output_map = dict( - RD_output=dict(), - color_fa_output=dict(), - deformation_output=dict(), - fa_gradient_output=dict(), - fa_gradmag_output=dict(), - fa_output=dict(), - frobenius_norm_output=dict(), - lambda1_output=dict(), - lambda2_output=dict(), - lambda3_output=dict(), - md_output=dict(), - negative_eigenvector_output=dict(), - outmask=dict(), - principal_eigenvector_output=dict(), - rot_output=dict(), + RD_output=dict(extensions=None, ), + color_fa_output=dict(extensions=None, ), + deformation_output=dict(extensions=None, ), + fa_gradient_output=dict(extensions=None, ), + fa_gradmag_output=dict(extensions=None, ), + fa_output=dict(extensions=None, ), + frobenius_norm_output=dict(extensions=None, ), + lambda1_output=dict(extensions=None, ), + lambda2_output=dict(extensions=None, ), + lambda3_output=dict(extensions=None, ), + md_output=dict(extensions=None, ), + negative_eigenvector_output=dict(extensions=None, ), + outmask=dict(extensions=None, ), + principal_eigenvector_output=dict(extensions=None, ), + rot_output=dict(extensions=None, ), ) outputs = dtiprocess.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py index ac5784f1c7..d39aaec246 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py @@ -10,7 +10,10 @@ def test_extractNrrdVectorIndex_inputs(): nohash=True, usedefault=True, ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputVolume=dict( argstr='--outputVolume %s', @@ -25,7 +28,7 @@ def test_extractNrrdVectorIndex_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_extractNrrdVectorIndex_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = extractNrrdVectorIndex.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py index 195d7dc3e1..e0d804827c 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py @@ -11,7 +11,10 @@ def test_gtractAnisotropyMap_inputs(): nohash=True, usedefault=True, ), - inputTensorVolume=dict(argstr='--inputTensorVolume %s', ), + inputTensorVolume=dict( + argstr='--inputTensorVolume %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputVolume=dict( argstr='--outputVolume %s', @@ -24,7 +27,7 @@ def test_gtractAnisotropyMap_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractAnisotropyMap_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = gtractAnisotropyMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py index fbb5bd6a55..44bd558800 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py @@ -12,7 +12,10 @@ def test_gtractAverageBvalues_inputs(): nohash=True, usedefault=True, ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputVolume=dict( argstr='--outputVolume %s', @@ -25,7 +28,7 @@ def test_gtractAverageBvalues_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractAverageBvalues_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = gtractAverageBvalues.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py index fed980c463..0b3a0505ef 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py @@ -12,7 +12,10 @@ def test_gtractClipAnisotropy_inputs(): nohash=True, usedefault=True, ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputVolume=dict( argstr='--outputVolume %s', @@ -25,7 +28,7 @@ def test_gtractClipAnisotropy_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractClipAnisotropy_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = gtractClipAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py index 456e1e79fa..e6c241d053 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py @@ -17,9 +17,18 @@ def test_gtractCoRegAnatomy_inputs(): argstr='--gridSize %s', sep=',', ), - inputAnatomicalVolume=dict(argstr='--inputAnatomicalVolume %s', ), - inputRigidTransform=dict(argstr='--inputRigidTransform %s', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputAnatomicalVolume=dict( + argstr='--inputAnatomicalVolume %s', + extensions=None, + ), + inputRigidTransform=dict( + argstr='--inputRigidTransform %s', + extensions=None, + ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), maxBSplineDisplacement=dict(argstr='--maxBSplineDisplacement %f', ), maximumStepSize=dict(argstr='--maximumStepSize %f', ), minimumStepSize=dict(argstr='--minimumStepSize %f', ), @@ -47,7 +56,7 @@ def test_gtractCoRegAnatomy_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractCoRegAnatomy_outputs(): - output_map = dict(outputTransformName=dict(), ) + output_map = dict(outputTransformName=dict(extensions=None, ), ) outputs = gtractCoRegAnatomy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py index 7b75858eff..5099ba1bac 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py @@ -24,7 +24,7 @@ def test_gtractConcatDwi_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractConcatDwi_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = gtractConcatDwi.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py index d9e0b725c8..a464fcd7a1 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py @@ -10,8 +10,14 @@ def test_gtractCopyImageOrientation_inputs(): nohash=True, usedefault=True, ), - inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputReferenceVolume=dict( + argstr='--inputReferenceVolume %s', + extensions=None, + ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputVolume=dict( argstr='--outputVolume %s', @@ -24,7 +30,7 @@ def test_gtractCopyImageOrientation_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractCopyImageOrientation_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = gtractCopyImageOrientation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py index 3143b16dfb..f99c27aa68 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py @@ -12,11 +12,17 @@ def test_gtractCoregBvalues_inputs(): nohash=True, usedefault=True, ), - fixedVolume=dict(argstr='--fixedVolume %s', ), + fixedVolume=dict( + argstr='--fixedVolume %s', + extensions=None, + ), fixedVolumeIndex=dict(argstr='--fixedVolumeIndex %d', ), maximumStepSize=dict(argstr='--maximumStepSize %f', ), minimumStepSize=dict(argstr='--minimumStepSize %f', ), - movingVolume=dict(argstr='--movingVolume %s', ), + movingVolume=dict( + argstr='--movingVolume %s', + extensions=None, + ), numberOfIterations=dict(argstr='--numberOfIterations %d', ), numberOfSpatialSamples=dict(argstr='--numberOfSpatialSamples %d', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), @@ -40,8 +46,8 @@ def test_gtractCoregBvalues_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_gtractCoregBvalues_outputs(): output_map = dict( - outputTransform=dict(), - outputVolume=dict(), + outputTransform=dict(extensions=None, ), + outputVolume=dict(extensions=None, ), ) outputs = gtractCoregBvalues.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py index 32d1e68898..75d5d13e4e 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py @@ -11,10 +11,18 @@ def test_gtractCostFastMarching_inputs(): nohash=True, usedefault=True, ), - inputAnisotropyVolume=dict(argstr='--inputAnisotropyVolume %s', ), + inputAnisotropyVolume=dict( + argstr='--inputAnisotropyVolume %s', + extensions=None, + ), inputStartingSeedsLabelMapVolume=dict( - argstr='--inputStartingSeedsLabelMapVolume %s', ), - inputTensorVolume=dict(argstr='--inputTensorVolume %s', ), + argstr='--inputStartingSeedsLabelMapVolume %s', + extensions=None, + ), + inputTensorVolume=dict( + argstr='--inputTensorVolume %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputCostVolume=dict( argstr='--outputCostVolume %s', @@ -35,8 +43,8 @@ def test_gtractCostFastMarching_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_gtractCostFastMarching_outputs(): output_map = dict( - outputCostVolume=dict(), - outputSpeedVolume=dict(), + outputCostVolume=dict(extensions=None, ), + outputSpeedVolume=dict(extensions=None, ), ) outputs = gtractCostFastMarching.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py index bbe375bbb9..e2dff9e309 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py @@ -10,7 +10,10 @@ def test_gtractCreateGuideFiber_inputs(): nohash=True, usedefault=True, ), - inputFiber=dict(argstr='--inputFiber %s', ), + inputFiber=dict( + argstr='--inputFiber %s', + extensions=None, + ), numberOfPoints=dict(argstr='--numberOfPoints %d', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputFiber=dict( @@ -25,7 +28,7 @@ def test_gtractCreateGuideFiber_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractCreateGuideFiber_outputs(): - output_map = dict(outputFiber=dict(), ) + output_map = dict(outputFiber=dict(extensions=None, ), ) outputs = gtractCreateGuideFiber.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py index ec3a99b91e..2342a73cb1 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py @@ -11,11 +11,22 @@ def test_gtractFastMarchingTracking_inputs(): nohash=True, usedefault=True, ), - inputAnisotropyVolume=dict(argstr='--inputAnisotropyVolume %s', ), - inputCostVolume=dict(argstr='--inputCostVolume %s', ), + inputAnisotropyVolume=dict( + argstr='--inputAnisotropyVolume %s', + extensions=None, + ), + inputCostVolume=dict( + argstr='--inputCostVolume %s', + extensions=None, + ), inputStartingSeedsLabelMapVolume=dict( - argstr='--inputStartingSeedsLabelMapVolume %s', ), - inputTensorVolume=dict(argstr='--inputTensorVolume %s', ), + argstr='--inputStartingSeedsLabelMapVolume %s', + extensions=None, + ), + inputTensorVolume=dict( + argstr='--inputTensorVolume %s', + extensions=None, + ), maximumStepSize=dict(argstr='--maximumStepSize %f', ), minimumStepSize=dict(argstr='--minimumStepSize %f', ), numberOfIterations=dict(argstr='--numberOfIterations %d', ), @@ -35,7 +46,7 @@ def test_gtractFastMarchingTracking_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractFastMarchingTracking_outputs(): - output_map = dict(outputTract=dict(), ) + output_map = dict(outputTract=dict(extensions=None, ), ) outputs = gtractFastMarchingTracking.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py index 96c93b8b64..233f7358f5 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py @@ -16,13 +16,26 @@ def test_gtractFiberTracking_inputs(): ), guidedCurvatureThreshold=dict( argstr='--guidedCurvatureThreshold %f', ), - inputAnisotropyVolume=dict(argstr='--inputAnisotropyVolume %s', ), + inputAnisotropyVolume=dict( + argstr='--inputAnisotropyVolume %s', + extensions=None, + ), inputEndingSeedsLabelMapVolume=dict( - argstr='--inputEndingSeedsLabelMapVolume %s', ), + argstr='--inputEndingSeedsLabelMapVolume %s', + extensions=None, + ), inputStartingSeedsLabelMapVolume=dict( - argstr='--inputStartingSeedsLabelMapVolume %s', ), - inputTensorVolume=dict(argstr='--inputTensorVolume %s', ), - inputTract=dict(argstr='--inputTract %s', ), + argstr='--inputStartingSeedsLabelMapVolume %s', + extensions=None, + ), + inputTensorVolume=dict( + argstr='--inputTensorVolume %s', + extensions=None, + ), + inputTract=dict( + argstr='--inputTract %s', + extensions=None, + ), maximumBranchPoints=dict(argstr='--maximumBranchPoints %d', ), maximumGuideDistance=dict(argstr='--maximumGuideDistance %f', ), maximumLength=dict(argstr='--maximumLength %f', ), @@ -51,7 +64,7 @@ def test_gtractFiberTracking_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractFiberTracking_outputs(): - output_map = dict(outputTract=dict(), ) + output_map = dict(outputTract=dict(extensions=None, ), ) outputs = gtractFiberTracking.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py index 9ddde832b2..e811627909 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py @@ -10,8 +10,14 @@ def test_gtractImageConformity_inputs(): nohash=True, usedefault=True, ), - inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputReferenceVolume=dict( + argstr='--inputReferenceVolume %s', + extensions=None, + ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputVolume=dict( argstr='--outputVolume %s', @@ -24,7 +30,7 @@ def test_gtractImageConformity_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractImageConformity_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = gtractImageConformity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py index ca642ceb66..9ffcb11e11 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py @@ -10,8 +10,14 @@ def test_gtractInvertBSplineTransform_inputs(): nohash=True, usedefault=True, ), - inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', ), - inputTransform=dict(argstr='--inputTransform %s', ), + inputReferenceVolume=dict( + argstr='--inputReferenceVolume %s', + extensions=None, + ), + inputTransform=dict( + argstr='--inputTransform %s', + extensions=None, + ), landmarkDensity=dict( argstr='--landmarkDensity %s', sep=',', @@ -28,7 +34,7 @@ def test_gtractInvertBSplineTransform_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractInvertBSplineTransform_outputs(): - output_map = dict(outputTransform=dict(), ) + output_map = dict(outputTransform=dict(extensions=None, ), ) outputs = gtractInvertBSplineTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py index e4fd213d39..7d1468dba6 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py @@ -6,8 +6,14 @@ def test_gtractInvertDisplacementField_inputs(): input_map = dict( args=dict(argstr='%s', ), - baseImage=dict(argstr='--baseImage %s', ), - deformationImage=dict(argstr='--deformationImage %s', ), + baseImage=dict( + argstr='--baseImage %s', + extensions=None, + ), + deformationImage=dict( + argstr='--deformationImage %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -25,7 +31,7 @@ def test_gtractInvertDisplacementField_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractInvertDisplacementField_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = gtractInvertDisplacementField.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py index c035862663..0d67bb2669 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py @@ -10,7 +10,10 @@ def test_gtractInvertRigidTransform_inputs(): nohash=True, usedefault=True, ), - inputTransform=dict(argstr='--inputTransform %s', ), + inputTransform=dict( + argstr='--inputTransform %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputTransform=dict( argstr='--outputTransform %s', @@ -23,7 +26,7 @@ def test_gtractInvertRigidTransform_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractInvertRigidTransform_outputs(): - output_map = dict(outputTransform=dict(), ) + output_map = dict(outputTransform=dict(extensions=None, ), ) outputs = gtractInvertRigidTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py index 43f428b233..5e95276c83 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py @@ -10,9 +10,18 @@ def test_gtractResampleAnisotropy_inputs(): nohash=True, usedefault=True, ), - inputAnatomicalVolume=dict(argstr='--inputAnatomicalVolume %s', ), - inputAnisotropyVolume=dict(argstr='--inputAnisotropyVolume %s', ), - inputTransform=dict(argstr='--inputTransform %s', ), + inputAnatomicalVolume=dict( + argstr='--inputAnatomicalVolume %s', + extensions=None, + ), + inputAnisotropyVolume=dict( + argstr='--inputAnisotropyVolume %s', + extensions=None, + ), + inputTransform=dict( + argstr='--inputTransform %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputVolume=dict( argstr='--outputVolume %s', @@ -26,7 +35,7 @@ def test_gtractResampleAnisotropy_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractResampleAnisotropy_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = gtractResampleAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py index 812afca5c0..c9bde14e9a 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py @@ -10,9 +10,18 @@ def test_gtractResampleB0_inputs(): nohash=True, usedefault=True, ), - inputAnatomicalVolume=dict(argstr='--inputAnatomicalVolume %s', ), - inputTransform=dict(argstr='--inputTransform %s', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputAnatomicalVolume=dict( + argstr='--inputAnatomicalVolume %s', + extensions=None, + ), + inputTransform=dict( + argstr='--inputTransform %s', + extensions=None, + ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputVolume=dict( argstr='--outputVolume %s', @@ -27,7 +36,7 @@ def test_gtractResampleB0_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractResampleB0_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = gtractResampleB0.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py index decc017c60..27ad9e5e7d 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py @@ -10,9 +10,18 @@ def test_gtractResampleCodeImage_inputs(): nohash=True, usedefault=True, ), - inputCodeVolume=dict(argstr='--inputCodeVolume %s', ), - inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', ), - inputTransform=dict(argstr='--inputTransform %s', ), + inputCodeVolume=dict( + argstr='--inputCodeVolume %s', + extensions=None, + ), + inputReferenceVolume=dict( + argstr='--inputReferenceVolume %s', + extensions=None, + ), + inputTransform=dict( + argstr='--inputTransform %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputVolume=dict( argstr='--outputVolume %s', @@ -26,7 +35,7 @@ def test_gtractResampleCodeImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractResampleCodeImage_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = gtractResampleCodeImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py index b815d3d3d6..be98d4a3e3 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py @@ -15,8 +15,14 @@ def test_gtractResampleDWIInPlace_inputs(): argstr='--imageOutputSize %s', sep=',', ), - inputTransform=dict(argstr='--inputTransform %s', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputTransform=dict( + argstr='--inputTransform %s', + extensions=None, + ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputResampledB0=dict( argstr='--outputResampledB0 %s', @@ -26,8 +32,14 @@ def test_gtractResampleDWIInPlace_inputs(): argstr='--outputVolume %s', hash_files=False, ), - referenceVolume=dict(argstr='--referenceVolume %s', ), - warpDWITransform=dict(argstr='--warpDWITransform %s', ), + referenceVolume=dict( + argstr='--referenceVolume %s', + extensions=None, + ), + warpDWITransform=dict( + argstr='--warpDWITransform %s', + extensions=None, + ), ) inputs = gtractResampleDWIInPlace.input_spec() @@ -36,8 +48,8 @@ def test_gtractResampleDWIInPlace_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_gtractResampleDWIInPlace_outputs(): output_map = dict( - outputResampledB0=dict(), - outputVolume=dict(), + outputResampledB0=dict(extensions=None, ), + outputVolume=dict(extensions=None, ), ) outputs = gtractResampleDWIInPlace.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py index d0d2cd5664..ea9612996c 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py @@ -11,10 +11,17 @@ def test_gtractResampleFibers_inputs(): usedefault=True, ), inputForwardDeformationFieldVolume=dict( - argstr='--inputForwardDeformationFieldVolume %s', ), + argstr='--inputForwardDeformationFieldVolume %s', + extensions=None, + ), inputReverseDeformationFieldVolume=dict( - argstr='--inputReverseDeformationFieldVolume %s', ), - inputTract=dict(argstr='--inputTract %s', ), + argstr='--inputReverseDeformationFieldVolume %s', + extensions=None, + ), + inputTract=dict( + argstr='--inputTract %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputTract=dict( argstr='--outputTract %s', @@ -28,7 +35,7 @@ def test_gtractResampleFibers_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractResampleFibers_outputs(): - output_map = dict(outputTract=dict(), ) + output_map = dict(outputTract=dict(extensions=None, ), ) outputs = gtractResampleFibers.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py index ead96d1c71..9887f0e224 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py @@ -18,9 +18,15 @@ def test_gtractTensor_inputs(): argstr='--ignoreIndex %s', sep=',', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), maskProcessingMode=dict(argstr='--maskProcessingMode %s', ), - maskVolume=dict(argstr='--maskVolume %s', ), + maskVolume=dict( + argstr='--maskVolume %s', + extensions=None, + ), medianFilterSize=dict( argstr='--medianFilterSize %s', sep=',', @@ -39,7 +45,7 @@ def test_gtractTensor_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractTensor_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = gtractTensor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py index 2dfde189e2..17731d6648 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py @@ -10,8 +10,14 @@ def test_gtractTransformToDisplacementField_inputs(): nohash=True, usedefault=True, ), - inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', ), - inputTransform=dict(argstr='--inputTransform %s', ), + inputReferenceVolume=dict( + argstr='--inputReferenceVolume %s', + extensions=None, + ), + inputTransform=dict( + argstr='--inputTransform %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputDeformationFieldVolume=dict( argstr='--outputDeformationFieldVolume %s', @@ -24,7 +30,7 @@ def test_gtractTransformToDisplacementField_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_gtractTransformToDisplacementField_outputs(): - output_map = dict(outputDeformationFieldVolume=dict(), ) + output_map = dict(outputDeformationFieldVolume=dict(extensions=None, ), ) outputs = gtractTransformToDisplacementField.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py index cc06c5ede5..511e6c8844 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py @@ -10,7 +10,10 @@ def test_maxcurvature_inputs(): nohash=True, usedefault=True, ), - image=dict(argstr='--image %s', ), + image=dict( + argstr='--image %s', + extensions=None, + ), output=dict( argstr='--output %s', hash_files=False, @@ -24,7 +27,7 @@ def test_maxcurvature_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_maxcurvature_outputs(): - output_map = dict(output=dict(), ) + output_map = dict(output=dict(extensions=None, ), ) outputs = maxcurvature.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py index 7c61974ef3..b713d0b053 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py @@ -10,7 +10,10 @@ def test_UKFTractography_inputs(): Qw=dict(argstr='--Qw %f', ), Rs=dict(argstr='--Rs %f', ), args=dict(argstr='%s', ), - dwiFile=dict(argstr='--dwiFile %s', ), + dwiFile=dict( + argstr='--dwiFile %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -21,7 +24,10 @@ def test_UKFTractography_inputs(): argstr='--labels %s', sep=',', ), - maskFile=dict(argstr='--maskFile %s', ), + maskFile=dict( + argstr='--maskFile %s', + extensions=None, + ), maxBranchingAngle=dict(argstr='--maxBranchingAngle %f', ), maxHalfFiberLength=dict(argstr='--maxHalfFiberLength %f', ), minBranchingAngle=dict(argstr='--minBranchingAngle %f', ), @@ -38,7 +44,10 @@ def test_UKFTractography_inputs(): recordTensors=dict(argstr='--recordTensors ', ), recordTrace=dict(argstr='--recordTrace ', ), seedFALimit=dict(argstr='--seedFALimit %f', ), - seedsFile=dict(argstr='--seedsFile %s', ), + seedsFile=dict( + argstr='--seedsFile %s', + extensions=None, + ), seedsPerVoxel=dict(argstr='--seedsPerVoxel %d', ), stepLength=dict(argstr='--stepLength %f', ), storeGlyphs=dict(argstr='--storeGlyphs ', ), @@ -60,8 +69,8 @@ def test_UKFTractography_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_UKFTractography_outputs(): output_map = dict( - tracts=dict(), - tractsWithSecondTensor=dict(), + tracts=dict(extensions=None, ), + tractsWithSecondTensor=dict(extensions=None, ), ) outputs = UKFTractography.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py index b4756a9406..3cd4ad6135 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py @@ -6,23 +6,35 @@ def test_fiberprocess_inputs(): input_map = dict( args=dict(argstr='%s', ), - displacement_field=dict(argstr='--displacement_field %s', ), + displacement_field=dict( + argstr='--displacement_field %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, ), - fiber_file=dict(argstr='--fiber_file %s', ), + fiber_file=dict( + argstr='--fiber_file %s', + extensions=None, + ), fiber_output=dict( argstr='--fiber_output %s', hash_files=False, ), fiber_radius=dict(argstr='--fiber_radius %f', ), - h_field=dict(argstr='--h_field %s', ), + h_field=dict( + argstr='--h_field %s', + extensions=None, + ), index_space=dict(argstr='--index_space ', ), noDataChange=dict(argstr='--noDataChange ', ), no_warp=dict(argstr='--no_warp ', ), saveProperties=dict(argstr='--saveProperties ', ), - tensor_volume=dict(argstr='--tensor_volume %s', ), + tensor_volume=dict( + argstr='--tensor_volume %s', + extensions=None, + ), verbose=dict(argstr='--verbose ', ), voxel_label=dict(argstr='--voxel_label %d', ), voxelize=dict( @@ -38,8 +50,8 @@ def test_fiberprocess_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_fiberprocess_outputs(): output_map = dict( - fiber_output=dict(), - voxelize=dict(), + fiber_output=dict(extensions=None, ), + voxelize=dict(extensions=None, ), ) outputs = fiberprocess.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py index d6f3a5cd50..e02ddc54a7 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py @@ -10,7 +10,10 @@ def test_fiberstats_inputs(): nohash=True, usedefault=True, ), - fiber_file=dict(argstr='--fiber_file %s', ), + fiber_file=dict( + argstr='--fiber_file %s', + extensions=None, + ), verbose=dict(argstr='--verbose ', ), ) inputs = fiberstats.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py index 33e57c0ca3..397f7ebd2d 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py @@ -12,8 +12,14 @@ def test_fibertrack_inputs(): ), forbidden_label=dict(argstr='--forbidden_label %d', ), force=dict(argstr='--force ', ), - input_roi_file=dict(argstr='--input_roi_file %s', ), - input_tensor_file=dict(argstr='--input_tensor_file %s', ), + input_roi_file=dict( + argstr='--input_roi_file %s', + extensions=None, + ), + input_tensor_file=dict( + argstr='--input_tensor_file %s', + extensions=None, + ), max_angle=dict(argstr='--max_angle %f', ), min_fa=dict(argstr='--min_fa %f', ), output_fiber_file=dict( @@ -33,7 +39,7 @@ def test_fibertrack_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_fibertrack_outputs(): - output_map = dict(output_fiber_file=dict(), ) + output_map = dict(output_fiber_file=dict(extensions=None, ), ) outputs = fibertrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py index cfbee7449c..fbaf4dc841 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py @@ -10,7 +10,10 @@ def test_CannyEdge_inputs(): nohash=True, usedefault=True, ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), lowerThreshold=dict(argstr='--lowerThreshold %f', ), outputVolume=dict( argstr='--outputVolume %s', @@ -25,7 +28,7 @@ def test_CannyEdge_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CannyEdge_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = CannyEdge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py index b4196db823..19bd6122ee 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py @@ -13,9 +13,15 @@ def test_CannySegmentationLevelSetImageFilter_inputs(): nohash=True, usedefault=True, ), - initialModel=dict(argstr='--initialModel %s', ), + initialModel=dict( + argstr='--initialModel %s', + extensions=None, + ), initialModelIsovalue=dict(argstr='--initialModelIsovalue %f', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), maxIterations=dict(argstr='--maxIterations %d', ), outputSpeedVolume=dict( argstr='--outputSpeedVolume %s', @@ -33,8 +39,8 @@ def test_CannySegmentationLevelSetImageFilter_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_CannySegmentationLevelSetImageFilter_outputs(): output_map = dict( - outputSpeedVolume=dict(), - outputVolume=dict(), + outputSpeedVolume=dict(extensions=None, ), + outputVolume=dict(extensions=None, ), ) outputs = CannySegmentationLevelSetImageFilter.output_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py index 7945ec8ade..a054c2cf06 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py @@ -10,9 +10,15 @@ def test_DilateImage_inputs(): nohash=True, usedefault=True, ), - inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), + inputMaskVolume=dict( + argstr='--inputMaskVolume %s', + extensions=None, + ), inputRadius=dict(argstr='--inputRadius %d', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), outputVolume=dict( argstr='--outputVolume %s', hash_files=False, @@ -24,7 +30,7 @@ def test_DilateImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DilateImage_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = DilateImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py index 6bd6235e80..bd1ce09b18 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py @@ -10,8 +10,14 @@ def test_DilateMask_inputs(): nohash=True, usedefault=True, ), - inputBinaryVolume=dict(argstr='--inputBinaryVolume %s', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputBinaryVolume=dict( + argstr='--inputBinaryVolume %s', + extensions=None, + ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), lowerThreshold=dict(argstr='--lowerThreshold %f', ), outputVolume=dict( argstr='--outputVolume %s', @@ -25,7 +31,7 @@ def test_DilateMask_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DilateMask_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = DilateMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py index f7aaf722c4..d956ea2ac1 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py @@ -10,8 +10,14 @@ def test_DistanceMaps_inputs(): nohash=True, usedefault=True, ), - inputLabelVolume=dict(argstr='--inputLabelVolume %s', ), - inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), + inputLabelVolume=dict( + argstr='--inputLabelVolume %s', + extensions=None, + ), + inputMaskVolume=dict( + argstr='--inputMaskVolume %s', + extensions=None, + ), inputTissueLabel=dict(argstr='--inputTissueLabel %d', ), outputVolume=dict( argstr='--outputVolume %s', @@ -24,7 +30,7 @@ def test_DistanceMaps_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DistanceMaps_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = DistanceMaps.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py index 320bb76b77..9e453b2f86 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py @@ -10,8 +10,14 @@ def test_DumpBinaryTrainingVectors_inputs(): nohash=True, usedefault=True, ), - inputHeaderFilename=dict(argstr='--inputHeaderFilename %s', ), - inputVectorFilename=dict(argstr='--inputVectorFilename %s', ), + inputHeaderFilename=dict( + argstr='--inputHeaderFilename %s', + extensions=None, + ), + inputVectorFilename=dict( + argstr='--inputVectorFilename %s', + extensions=None, + ), ) inputs = DumpBinaryTrainingVectors.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py index 9e096b0062..1180f25bd5 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py @@ -10,9 +10,15 @@ def test_ErodeImage_inputs(): nohash=True, usedefault=True, ), - inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), + inputMaskVolume=dict( + argstr='--inputMaskVolume %s', + extensions=None, + ), inputRadius=dict(argstr='--inputRadius %d', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), outputVolume=dict( argstr='--outputVolume %s', hash_files=False, @@ -24,7 +30,7 @@ def test_ErodeImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ErodeImage_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = ErodeImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py index 539660f73b..e0de87a033 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py @@ -10,8 +10,14 @@ def test_FlippedDifference_inputs(): nohash=True, usedefault=True, ), - inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputMaskVolume=dict( + argstr='--inputMaskVolume %s', + extensions=None, + ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), outputVolume=dict( argstr='--outputVolume %s', hash_files=False, @@ -23,7 +29,7 @@ def test_FlippedDifference_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FlippedDifference_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = FlippedDifference.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py index 0a211f23d2..e987e60e73 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py @@ -10,8 +10,14 @@ def test_GenerateBrainClippedImage_inputs(): nohash=True, usedefault=True, ), - inputImg=dict(argstr='--inputImg %s', ), - inputMsk=dict(argstr='--inputMsk %s', ), + inputImg=dict( + argstr='--inputImg %s', + extensions=None, + ), + inputMsk=dict( + argstr='--inputMsk %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputFileName=dict( argstr='--outputFileName %s', @@ -24,7 +30,7 @@ def test_GenerateBrainClippedImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GenerateBrainClippedImage_outputs(): - output_map = dict(outputFileName=dict(), ) + output_map = dict(outputFileName=dict(extensions=None, ), ) outputs = GenerateBrainClippedImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py index 58d3f35c5a..a538ea0933 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py @@ -11,8 +11,14 @@ def test_GenerateSummedGradientImage_inputs(): nohash=True, usedefault=True, ), - inputVolume1=dict(argstr='--inputVolume1 %s', ), - inputVolume2=dict(argstr='--inputVolume2 %s', ), + inputVolume1=dict( + argstr='--inputVolume1 %s', + extensions=None, + ), + inputVolume2=dict( + argstr='--inputVolume2 %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputFileName=dict( argstr='--outputFileName %s', @@ -25,7 +31,7 @@ def test_GenerateSummedGradientImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GenerateSummedGradientImage_outputs(): - output_map = dict(outputFileName=dict(), ) + output_map = dict(outputFileName=dict(extensions=None, ), ) outputs = GenerateSummedGradientImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py index 1348e61d4b..38ff43b5d1 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py @@ -10,7 +10,10 @@ def test_GenerateTestImage_inputs(): nohash=True, usedefault=True, ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), lowerBoundOfOutputVolume=dict( argstr='--lowerBoundOfOutputVolume %f', ), outputVolume=dict( @@ -27,7 +30,7 @@ def test_GenerateTestImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GenerateTestImage_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = GenerateTestImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py index 27ebb18d29..23e035478e 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py @@ -11,7 +11,10 @@ def test_GradientAnisotropicDiffusionImageFilter_inputs(): nohash=True, usedefault=True, ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), numberOfIterations=dict(argstr='--numberOfIterations %d', ), outputVolume=dict( argstr='--outputVolume %s', @@ -25,7 +28,7 @@ def test_GradientAnisotropicDiffusionImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GradientAnisotropicDiffusionImageFilter_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = GradientAnisotropicDiffusionImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py index 9ad82598bb..0ca8acdee7 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py @@ -12,9 +12,18 @@ def test_HammerAttributeCreator_inputs(): nohash=True, usedefault=True, ), - inputCSFVolume=dict(argstr='--inputCSFVolume %s', ), - inputGMVolume=dict(argstr='--inputGMVolume %s', ), - inputWMVolume=dict(argstr='--inputWMVolume %s', ), + inputCSFVolume=dict( + argstr='--inputCSFVolume %s', + extensions=None, + ), + inputGMVolume=dict( + argstr='--inputGMVolume %s', + extensions=None, + ), + inputWMVolume=dict( + argstr='--inputWMVolume %s', + extensions=None, + ), outputVolumeBase=dict(argstr='--outputVolumeBase %s', ), ) inputs = HammerAttributeCreator.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py index c6b1ef20a9..661b2ba0ae 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py @@ -10,9 +10,15 @@ def test_NeighborhoodMean_inputs(): nohash=True, usedefault=True, ), - inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), + inputMaskVolume=dict( + argstr='--inputMaskVolume %s', + extensions=None, + ), inputRadius=dict(argstr='--inputRadius %d', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), outputVolume=dict( argstr='--outputVolume %s', hash_files=False, @@ -24,7 +30,7 @@ def test_NeighborhoodMean_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_NeighborhoodMean_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = NeighborhoodMean.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py index 2c2b2d585f..c3b9895edd 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py @@ -10,9 +10,15 @@ def test_NeighborhoodMedian_inputs(): nohash=True, usedefault=True, ), - inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), + inputMaskVolume=dict( + argstr='--inputMaskVolume %s', + extensions=None, + ), inputRadius=dict(argstr='--inputRadius %d', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), outputVolume=dict( argstr='--outputVolume %s', hash_files=False, @@ -24,7 +30,7 @@ def test_NeighborhoodMedian_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_NeighborhoodMedian_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = NeighborhoodMedian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py index 75bb83315f..7b68d3d05e 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py @@ -23,7 +23,7 @@ def test_STAPLEAnalysis_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_STAPLEAnalysis_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = STAPLEAnalysis.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py index cd4b4a1af2..e76b825ca6 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py @@ -11,7 +11,10 @@ def test_TextureFromNoiseImageFilter_inputs(): usedefault=True, ), inputRadius=dict(argstr='--inputRadius %d', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), outputVolume=dict( argstr='--outputVolume %s', hash_files=False, @@ -23,7 +26,7 @@ def test_TextureFromNoiseImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TextureFromNoiseImageFilter_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = TextureFromNoiseImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py index 0ad523c7d0..9981d7b8ac 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py @@ -11,8 +11,14 @@ def test_TextureMeasureFilter_inputs(): nohash=True, usedefault=True, ), - inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputMaskVolume=dict( + argstr='--inputMaskVolume %s', + extensions=None, + ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), insideROIValue=dict(argstr='--insideROIValue %f', ), outputFilename=dict( argstr='--outputFilename %s', @@ -25,7 +31,7 @@ def test_TextureMeasureFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TextureMeasureFilter_outputs(): - output_map = dict(outputFilename=dict(), ) + output_map = dict(outputFilename=dict(extensions=None, ), ) outputs = TextureMeasureFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py index b84cda1fc3..866af46740 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py @@ -13,6 +13,7 @@ def test_UnbiasedNonLocalMeans_inputs(): hp=dict(argstr='--hp %f', ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), outputVolume=dict( @@ -37,7 +38,11 @@ def test_UnbiasedNonLocalMeans_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_UnbiasedNonLocalMeans_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = UnbiasedNonLocalMeans.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py index f9ff60cfd9..7be4644031 100644 --- a/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py +++ b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py @@ -6,13 +6,19 @@ def test_scalartransform_inputs(): input_map = dict( args=dict(argstr='%s', ), - deformation=dict(argstr='--deformation %s', ), + deformation=dict( + argstr='--deformation %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, ), h_field=dict(argstr='--h_field ', ), - input_image=dict(argstr='--input_image %s', ), + input_image=dict( + argstr='--input_image %s', + extensions=None, + ), interpolation=dict(argstr='--interpolation %s', ), invert=dict(argstr='--invert ', ), output_image=dict( @@ -31,8 +37,8 @@ def test_scalartransform_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_scalartransform_outputs(): output_map = dict( - output_image=dict(), - transformation=dict(), + output_image=dict(extensions=None, ), + transformation=dict(extensions=None, ), ) outputs = scalartransform.output_spec() diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py index c631f9b96e..18cd4c63ff 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py @@ -19,14 +19,25 @@ def test_BRAINSDemonWarp_inputs(): nohash=True, usedefault=True, ), - fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), - fixedVolume=dict(argstr='--fixedVolume %s', ), + fixedBinaryVolume=dict( + argstr='--fixedBinaryVolume %s', + extensions=None, + ), + fixedVolume=dict( + argstr='--fixedVolume %s', + extensions=None, + ), gradient_type=dict(argstr='--gradient_type %s', ), gui=dict(argstr='--gui ', ), histogramMatch=dict(argstr='--histogramMatch ', ), initializeWithDisplacementField=dict( - argstr='--initializeWithDisplacementField %s', ), - initializeWithTransform=dict(argstr='--initializeWithTransform %s', ), + argstr='--initializeWithDisplacementField %s', + extensions=None, + ), + initializeWithTransform=dict( + argstr='--initializeWithTransform %s', + extensions=None, + ), inputPixelType=dict(argstr='--inputPixelType %s', ), interpolationMode=dict(argstr='--interpolationMode %s', ), lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), @@ -44,8 +55,14 @@ def test_BRAINSDemonWarp_inputs(): argstr='--minimumMovingPyramid %s', sep=',', ), - movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), - movingVolume=dict(argstr='--movingVolume %s', ), + movingBinaryVolume=dict( + argstr='--movingBinaryVolume %s', + extensions=None, + ), + movingVolume=dict( + argstr='--movingVolume %s', + extensions=None, + ), neighborhoodForBOBF=dict( argstr='--neighborhoodForBOBF %s', sep=',', @@ -92,9 +109,9 @@ def test_BRAINSDemonWarp_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(), - outputDisplacementFieldVolume=dict(), - outputVolume=dict(), + outputCheckerboardVolume=dict(extensions=None, ), + outputDisplacementFieldVolume=dict(extensions=None, ), + outputVolume=dict(extensions=None, ), ) outputs = BRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py index 18059c4f57..2e949b33fb 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py @@ -22,13 +22,25 @@ def test_BRAINSFit_inputs(): usedefault=True, ), failureExitCode=dict(argstr='--failureExitCode %d', ), - fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), - fixedVolume=dict(argstr='--fixedVolume %s', ), - fixedVolume2=dict(argstr='--fixedVolume2 %s', ), + fixedBinaryVolume=dict( + argstr='--fixedBinaryVolume %s', + extensions=None, + ), + fixedVolume=dict( + argstr='--fixedVolume %s', + extensions=None, + ), + fixedVolume2=dict( + argstr='--fixedVolume2 %s', + extensions=None, + ), fixedVolumeTimeIndex=dict(argstr='--fixedVolumeTimeIndex %d', ), gui=dict(argstr='--gui ', ), histogramMatch=dict(argstr='--histogramMatch ', ), - initialTransform=dict(argstr='--initialTransform %s', ), + initialTransform=dict( + argstr='--initialTransform %s', + extensions=None, + ), initializeRegistrationByCurrentGenericTransform=dict( argstr='--initializeRegistrationByCurrentGenericTransform ', ), initializeTransformMode=dict(argstr='--initializeTransformMode %s', ), @@ -59,9 +71,18 @@ def test_BRAINSFit_inputs(): argstr='--minimumStepLength %s', sep=',', ), - movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), - movingVolume=dict(argstr='--movingVolume %s', ), - movingVolume2=dict(argstr='--movingVolume2 %s', ), + movingBinaryVolume=dict( + argstr='--movingBinaryVolume %s', + extensions=None, + ), + movingVolume=dict( + argstr='--movingVolume %s', + extensions=None, + ), + movingVolume2=dict( + argstr='--movingVolume2 %s', + extensions=None, + ), movingVolumeTimeIndex=dict(argstr='--movingVolumeTimeIndex %d', ), numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), numberOfIterations=dict( @@ -129,14 +150,14 @@ def test_BRAINSFit_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSFit_outputs(): output_map = dict( - bsplineTransform=dict(), - linearTransform=dict(), - logFileReport=dict(), - outputFixedVolumeROI=dict(), - outputMovingVolumeROI=dict(), - outputTransform=dict(), - outputVolume=dict(), - strippedOutputTransform=dict(), + bsplineTransform=dict(extensions=None, ), + linearTransform=dict(extensions=None, ), + logFileReport=dict(extensions=None, ), + outputFixedVolumeROI=dict(extensions=None, ), + outputMovingVolumeROI=dict(extensions=None, ), + outputTransform=dict(extensions=None, ), + outputVolume=dict(extensions=None, ), + strippedOutputTransform=dict(extensions=None, ), ) outputs = BRAINSFit.output_spec() diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py index 98ec5f4ff3..e8d4c187b0 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py @@ -7,7 +7,10 @@ def test_BRAINSResample_inputs(): input_map = dict( args=dict(argstr='%s', ), defaultValue=dict(argstr='--defaultValue %f', ), - deformationVolume=dict(argstr='--deformationVolume %s', ), + deformationVolume=dict( + argstr='--deformationVolume %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -16,7 +19,10 @@ def test_BRAINSResample_inputs(): argstr='--gridSpacing %s', sep=',', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), interpolationMode=dict(argstr='--interpolationMode %s', ), inverseTransform=dict(argstr='--inverseTransform ', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), @@ -25,8 +31,14 @@ def test_BRAINSResample_inputs(): hash_files=False, ), pixelType=dict(argstr='--pixelType %s', ), - referenceVolume=dict(argstr='--referenceVolume %s', ), - warpTransform=dict(argstr='--warpTransform %s', ), + referenceVolume=dict( + argstr='--referenceVolume %s', + extensions=None, + ), + warpTransform=dict( + argstr='--warpTransform %s', + extensions=None, + ), ) inputs = BRAINSResample.input_spec() @@ -34,7 +46,7 @@ def test_BRAINSResample_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSResample_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = BRAINSResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py index 4ce0d7159c..188bb31d49 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py @@ -10,7 +10,10 @@ def test_BRAINSResize_inputs(): nohash=True, usedefault=True, ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), outputVolume=dict( argstr='--outputVolume %s', hash_files=False, @@ -24,7 +27,7 @@ def test_BRAINSResize_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSResize_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = BRAINSResize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py index 2a700eaac7..258a0fcc83 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py @@ -11,9 +11,15 @@ def test_BRAINSTransformFromFiducials_inputs(): usedefault=True, ), fixedLandmarks=dict(argstr='--fixedLandmarks %s...', ), - fixedLandmarksFile=dict(argstr='--fixedLandmarksFile %s', ), + fixedLandmarksFile=dict( + argstr='--fixedLandmarksFile %s', + extensions=None, + ), movingLandmarks=dict(argstr='--movingLandmarks %s...', ), - movingLandmarksFile=dict(argstr='--movingLandmarksFile %s', ), + movingLandmarksFile=dict( + argstr='--movingLandmarksFile %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), saveTransform=dict( argstr='--saveTransform %s', @@ -27,7 +33,7 @@ def test_BRAINSTransformFromFiducials_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSTransformFromFiducials_outputs(): - output_map = dict(saveTransform=dict(), ) + output_map = dict(saveTransform=dict(extensions=None, ), ) outputs = BRAINSTransformFromFiducials.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py index b3255da1d3..b196d318d4 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -19,14 +19,22 @@ def test_VBRAINSDemonWarp_inputs(): nohash=True, usedefault=True, ), - fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), + fixedBinaryVolume=dict( + argstr='--fixedBinaryVolume %s', + extensions=None, + ), fixedVolume=dict(argstr='--fixedVolume %s...', ), gradient_type=dict(argstr='--gradient_type %s', ), gui=dict(argstr='--gui ', ), histogramMatch=dict(argstr='--histogramMatch ', ), initializeWithDisplacementField=dict( - argstr='--initializeWithDisplacementField %s', ), - initializeWithTransform=dict(argstr='--initializeWithTransform %s', ), + argstr='--initializeWithDisplacementField %s', + extensions=None, + ), + initializeWithTransform=dict( + argstr='--initializeWithTransform %s', + extensions=None, + ), inputPixelType=dict(argstr='--inputPixelType %s', ), interpolationMode=dict(argstr='--interpolationMode %s', ), lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), @@ -44,7 +52,10 @@ def test_VBRAINSDemonWarp_inputs(): argstr='--minimumMovingPyramid %s', sep=',', ), - movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), + movingBinaryVolume=dict( + argstr='--movingBinaryVolume %s', + extensions=None, + ), movingVolume=dict(argstr='--movingVolume %s...', ), neighborhoodForBOBF=dict( argstr='--neighborhoodForBOBF %s', @@ -96,9 +107,9 @@ def test_VBRAINSDemonWarp_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_VBRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(), - outputDisplacementFieldVolume=dict(), - outputVolume=dict(), + outputCheckerboardVolume=dict(extensions=None, ), + outputDisplacementFieldVolume=dict(extensions=None, ), + outputVolume=dict(extensions=None, ), ) outputs = VBRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py index 2d66884522..6efd92d499 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py @@ -6,7 +6,10 @@ def test_BRAINSABC_inputs(): input_map = dict( args=dict(argstr='%s', ), - atlasDefinition=dict(argstr='--atlasDefinition %s', ), + atlasDefinition=dict( + argstr='--atlasDefinition %s', + extensions=None, + ), atlasToSubjectInitialTransform=dict( argstr='--atlasToSubjectInitialTransform %s', hash_files=False, @@ -71,7 +74,10 @@ def test_BRAINSABC_inputs(): ), posteriorTemplate=dict(argstr='--posteriorTemplate %s', ), purePlugsThreshold=dict(argstr='--purePlugsThreshold %f', ), - restoreState=dict(argstr='--restoreState %s', ), + restoreState=dict( + argstr='--restoreState %s', + extensions=None, + ), saveState=dict( argstr='--saveState %s', hash_files=False, @@ -88,14 +94,14 @@ def test_BRAINSABC_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSABC_outputs(): output_map = dict( - atlasToSubjectInitialTransform=dict(), - atlasToSubjectTransform=dict(), + atlasToSubjectInitialTransform=dict(extensions=None, ), + atlasToSubjectTransform=dict(extensions=None, ), implicitOutputs=dict(), outputDir=dict(), - outputDirtyLabels=dict(), - outputLabels=dict(), + outputDirtyLabels=dict(extensions=None, ), + outputLabels=dict(extensions=None, ), outputVolumes=dict(), - saveState=dict(), + saveState=dict(extensions=None, ), ) outputs = BRAINSABC.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py index 9b2d10061c..6f6ceaf53e 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py @@ -6,12 +6,24 @@ def test_BRAINSConstellationDetector_inputs(): input_map = dict( BackgroundFillValue=dict(argstr='--BackgroundFillValue %s', ), - LLSModel=dict(argstr='--LLSModel %s', ), + LLSModel=dict( + argstr='--LLSModel %s', + extensions=None, + ), acLowerBound=dict(argstr='--acLowerBound %f', ), args=dict(argstr='%s', ), - atlasLandmarkWeights=dict(argstr='--atlasLandmarkWeights %s', ), - atlasLandmarks=dict(argstr='--atlasLandmarks %s', ), - atlasVolume=dict(argstr='--atlasVolume %s', ), + atlasLandmarkWeights=dict( + argstr='--atlasLandmarkWeights %s', + extensions=None, + ), + atlasLandmarks=dict( + argstr='--atlasLandmarks %s', + extensions=None, + ), + atlasVolume=dict( + argstr='--atlasVolume %s', + extensions=None, + ), cutOutHeadInOutputVolume=dict(argstr='--cutOutHeadInOutputVolume ', ), debug=dict(argstr='--debug ', ), environ=dict( @@ -37,9 +49,18 @@ def test_BRAINSConstellationDetector_inputs(): sep=',', ), houghEyeDetectorMode=dict(argstr='--houghEyeDetectorMode %d', ), - inputLandmarksEMSP=dict(argstr='--inputLandmarksEMSP %s', ), - inputTemplateModel=dict(argstr='--inputTemplateModel %s', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputLandmarksEMSP=dict( + argstr='--inputLandmarksEMSP %s', + extensions=None, + ), + inputTemplateModel=dict( + argstr='--inputTemplateModel %s', + extensions=None, + ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), interpolationMode=dict(argstr='--interpolationMode %s', ), mspQualityLevel=dict(argstr='--mspQualityLevel %d', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), @@ -105,16 +126,16 @@ def test_BRAINSConstellationDetector_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSConstellationDetector_outputs(): output_map = dict( - outputLandmarksInACPCAlignedSpace=dict(), - outputLandmarksInInputSpace=dict(), - outputMRML=dict(), - outputResampledVolume=dict(), - outputTransform=dict(), - outputUntransformedClippedVolume=dict(), - outputVerificationScript=dict(), - outputVolume=dict(), + outputLandmarksInACPCAlignedSpace=dict(extensions=None, ), + outputLandmarksInInputSpace=dict(extensions=None, ), + outputMRML=dict(extensions=None, ), + outputResampledVolume=dict(extensions=None, ), + outputTransform=dict(extensions=None, ), + outputUntransformedClippedVolume=dict(extensions=None, ), + outputVerificationScript=dict(extensions=None, ), + outputVolume=dict(extensions=None, ), resultsDir=dict(), - writeBranded2DImage=dict(), + writeBranded2DImage=dict(extensions=None, ), ) outputs = BRAINSConstellationDetector.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py index 9f2b00c311..8280bddf37 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py @@ -24,7 +24,10 @@ def test_BRAINSCreateLabelMapFromProbabilityMaps_inputs(): ), inclusionThreshold=dict(argstr='--inclusionThreshold %f', ), inputProbabilityVolume=dict(argstr='--inputProbabilityVolume %s...', ), - nonAirRegionMask=dict(argstr='--nonAirRegionMask %s', ), + nonAirRegionMask=dict( + argstr='--nonAirRegionMask %s', + extensions=None, + ), priorLabelCodes=dict( argstr='--priorLabelCodes %s', sep=',', @@ -37,8 +40,8 @@ def test_BRAINSCreateLabelMapFromProbabilityMaps_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSCreateLabelMapFromProbabilityMaps_outputs(): output_map = dict( - cleanLabelVolume=dict(), - dirtyLabelVolume=dict(), + cleanLabelVolume=dict(extensions=None, ), + dirtyLabelVolume=dict(extensions=None, ), ) outputs = BRAINSCreateLabelMapFromProbabilityMaps.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py index 9d66d4a463..4f4c336670 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py @@ -19,10 +19,15 @@ def test_BRAINSCut_inputs(): histogramEqualization=dict(argstr='--histogramEqualization ', ), method=dict(argstr='--method %s', ), modelConfigurationFilename=dict( - argstr='--modelConfigurationFilename %s', ), + argstr='--modelConfigurationFilename %s', + extensions=None, + ), modelFilename=dict(argstr='--modelFilename %s', ), multiStructureThreshold=dict(argstr='--multiStructureThreshold ', ), - netConfiguration=dict(argstr='--netConfiguration %s', ), + netConfiguration=dict( + argstr='--netConfiguration %s', + extensions=None, + ), numberOfTrees=dict(argstr='--numberOfTrees %d', ), randomTreeDepth=dict(argstr='--randomTreeDepth %d', ), trainModel=dict(argstr='--trainModel ', ), diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py index 826ec19f0e..5b1ba96146 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py @@ -10,7 +10,10 @@ def test_BRAINSMultiSTAPLE_inputs(): nohash=True, usedefault=True, ), - inputCompositeT1Volume=dict(argstr='--inputCompositeT1Volume %s', ), + inputCompositeT1Volume=dict( + argstr='--inputCompositeT1Volume %s', + extensions=None, + ), inputLabelVolume=dict(argstr='--inputLabelVolume %s...', ), inputTransform=dict(argstr='--inputTransform %s...', ), labelForUndecidedPixels=dict(argstr='--labelForUndecidedPixels %d', ), @@ -32,8 +35,8 @@ def test_BRAINSMultiSTAPLE_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSMultiSTAPLE_outputs(): output_map = dict( - outputConfusionMatrix=dict(), - outputMultiSTAPLE=dict(), + outputConfusionMatrix=dict(extensions=None, ), + outputMultiSTAPLE=dict(extensions=None, ), ) outputs = BRAINSMultiSTAPLE.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py index 3e70b75883..469387ff3d 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -13,7 +13,10 @@ def test_BRAINSROIAuto_inputs(): nohash=True, usedefault=True, ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), maskOutput=dict(argstr='--maskOutput ', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), @@ -36,8 +39,8 @@ def test_BRAINSROIAuto_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSROIAuto_outputs(): output_map = dict( - outputROIMaskVolume=dict(), - outputVolume=dict(), + outputROIMaskVolume=dict(extensions=None, ), + outputVolume=dict(extensions=None, ), ) outputs = BRAINSROIAuto.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py index d71e7003ff..126db9eddb 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py @@ -10,7 +10,10 @@ def test_BinaryMaskEditorBasedOnLandmarks_inputs(): nohash=True, usedefault=True, ), - inputBinaryVolume=dict(argstr='--inputBinaryVolume %s', ), + inputBinaryVolume=dict( + argstr='--inputBinaryVolume %s', + extensions=None, + ), inputLandmarkNames=dict( argstr='--inputLandmarkNames %s', sep=',', @@ -19,7 +22,10 @@ def test_BinaryMaskEditorBasedOnLandmarks_inputs(): argstr='--inputLandmarkNamesForObliquePlane %s', sep=',', ), - inputLandmarksFilename=dict(argstr='--inputLandmarksFilename %s', ), + inputLandmarksFilename=dict( + argstr='--inputLandmarksFilename %s', + extensions=None, + ), outputBinaryVolume=dict( argstr='--outputBinaryVolume %s', hash_files=False, @@ -39,7 +45,7 @@ def test_BinaryMaskEditorBasedOnLandmarks_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BinaryMaskEditorBasedOnLandmarks_outputs(): - output_map = dict(outputBinaryVolume=dict(), ) + output_map = dict(outputBinaryVolume=dict(extensions=None, ), ) outputs = BinaryMaskEditorBasedOnLandmarks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py index 2e099b4f56..f8848a6efb 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py @@ -12,7 +12,10 @@ def test_ESLR_inputs(): usedefault=True, ), high=dict(argstr='--high %d', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), low=dict(argstr='--low %d', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), openingSize=dict(argstr='--openingSize %d', ), @@ -29,7 +32,7 @@ def test_ESLR_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ESLR_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = ESLR.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/tests/test_auto_DWICompare.py b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py index c00b0cc36f..1d9b9f4f71 100644 --- a/nipype/interfaces/semtools/tests/test_auto_DWICompare.py +++ b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py @@ -10,8 +10,14 @@ def test_DWICompare_inputs(): nohash=True, usedefault=True, ), - inputVolume1=dict(argstr='--inputVolume1 %s', ), - inputVolume2=dict(argstr='--inputVolume2 %s', ), + inputVolume1=dict( + argstr='--inputVolume1 %s', + extensions=None, + ), + inputVolume2=dict( + argstr='--inputVolume2 %s', + extensions=None, + ), ) inputs = DWICompare.input_spec() diff --git a/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py index e515bc613c..b76979129f 100644 --- a/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py +++ b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py @@ -11,8 +11,14 @@ def test_DWISimpleCompare_inputs(): nohash=True, usedefault=True, ), - inputVolume1=dict(argstr='--inputVolume1 %s', ), - inputVolume2=dict(argstr='--inputVolume2 %s', ), + inputVolume1=dict( + argstr='--inputVolume1 %s', + extensions=None, + ), + inputVolume2=dict( + argstr='--inputVolume2 %s', + extensions=None, + ), ) inputs = DWISimpleCompare.input_spec() diff --git a/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py index 63f45831af..ac8ae0efeb 100644 --- a/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py +++ b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py @@ -10,7 +10,10 @@ def test_GenerateCsfClippedFromClassifiedImage_inputs(): nohash=True, usedefault=True, ), - inputCassifiedVolume=dict(argstr='--inputCassifiedVolume %s', ), + inputCassifiedVolume=dict( + argstr='--inputCassifiedVolume %s', + extensions=None, + ), outputVolume=dict( argstr='--outputVolume %s', hash_files=False, @@ -22,7 +25,7 @@ def test_GenerateCsfClippedFromClassifiedImage_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GenerateCsfClippedFromClassifiedImage_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = GenerateCsfClippedFromClassifiedImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py index 27b65a4eba..5db3d12da1 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py @@ -15,7 +15,10 @@ def test_BRAINSAlignMSP_inputs(): nohash=True, usedefault=True, ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), interpolationMode=dict(argstr='--interpolationMode %s', ), mspQualityLevel=dict(argstr='--mspQualityLevel %d', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), @@ -40,7 +43,7 @@ def test_BRAINSAlignMSP_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSAlignMSP_outputs(): output_map = dict( - OutputresampleMSP=dict(), + OutputresampleMSP=dict(extensions=None, ), resultsDir=dict(), ) outputs = BRAINSAlignMSP.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py index 7a0528f201..1585e4152d 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py @@ -12,7 +12,10 @@ def test_BRAINSClipInferior_inputs(): nohash=True, usedefault=True, ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputVolume=dict( argstr='--outputVolume %s', @@ -25,7 +28,7 @@ def test_BRAINSClipInferior_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSClipInferior_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = BRAINSClipInferior.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py index a124ad60d9..38eba27d7e 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py @@ -11,7 +11,10 @@ def test_BRAINSConstellationModeler_inputs(): nohash=True, usedefault=True, ), - inputTrainingList=dict(argstr='--inputTrainingList %s', ), + inputTrainingList=dict( + argstr='--inputTrainingList %s', + extensions=None, + ), mspQualityLevel=dict(argstr='--mspQualityLevel %d', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), optimizedLandmarksFilenameExtender=dict( @@ -42,7 +45,7 @@ def test_BRAINSConstellationModeler_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSConstellationModeler_outputs(): output_map = dict( - outputModel=dict(), + outputModel=dict(extensions=None, ), resultsDir=dict(), ) outputs = BRAINSConstellationModeler.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py index 9df3c8b8d6..0d9179f13f 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py @@ -11,7 +11,10 @@ def test_BRAINSEyeDetector_inputs(): nohash=True, usedefault=True, ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputVolume=dict( argstr='--outputVolume %s', @@ -24,7 +27,7 @@ def test_BRAINSEyeDetector_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSEyeDetector_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = BRAINSEyeDetector.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py index bc3807f9f8..46aac4e578 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py @@ -10,7 +10,10 @@ def test_BRAINSInitializedControlPoints_inputs(): nohash=True, usedefault=True, ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputLandmarksFile=dict(argstr='--outputLandmarksFile %s', ), outputVolume=dict( @@ -32,7 +35,7 @@ def test_BRAINSInitializedControlPoints_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSInitializedControlPoints_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = BRAINSInitializedControlPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py index 2456b8fb4d..172e076e6e 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py @@ -11,10 +11,17 @@ def test_BRAINSLandmarkInitializer_inputs(): usedefault=True, ), inputFixedLandmarkFilename=dict( - argstr='--inputFixedLandmarkFilename %s', ), + argstr='--inputFixedLandmarkFilename %s', + extensions=None, + ), inputMovingLandmarkFilename=dict( - argstr='--inputMovingLandmarkFilename %s', ), - inputWeightFilename=dict(argstr='--inputWeightFilename %s', ), + argstr='--inputMovingLandmarkFilename %s', + extensions=None, + ), + inputWeightFilename=dict( + argstr='--inputWeightFilename %s', + extensions=None, + ), outputTransformFilename=dict( argstr='--outputTransformFilename %s', hash_files=False, @@ -26,7 +33,7 @@ def test_BRAINSLandmarkInitializer_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSLandmarkInitializer_outputs(): - output_map = dict(outputTransformFilename=dict(), ) + output_map = dict(outputTransformFilename=dict(extensions=None, ), ) outputs = BRAINSLandmarkInitializer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py index b6b83baa12..1dc026f988 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py @@ -10,7 +10,10 @@ def test_BRAINSLinearModelerEPCA_inputs(): nohash=True, usedefault=True, ), - inputTrainingList=dict(argstr='--inputTrainingList %s', ), + inputTrainingList=dict( + argstr='--inputTrainingList %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), ) inputs = BRAINSLinearModelerEPCA.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py index 262d948029..876ab830c5 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py @@ -10,10 +10,22 @@ def test_BRAINSLmkTransform_inputs(): nohash=True, usedefault=True, ), - inputFixedLandmarks=dict(argstr='--inputFixedLandmarks %s', ), - inputMovingLandmarks=dict(argstr='--inputMovingLandmarks %s', ), - inputMovingVolume=dict(argstr='--inputMovingVolume %s', ), - inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', ), + inputFixedLandmarks=dict( + argstr='--inputFixedLandmarks %s', + extensions=None, + ), + inputMovingLandmarks=dict( + argstr='--inputMovingLandmarks %s', + extensions=None, + ), + inputMovingVolume=dict( + argstr='--inputMovingVolume %s', + extensions=None, + ), + inputReferenceVolume=dict( + argstr='--inputReferenceVolume %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputAffineTransform=dict( argstr='--outputAffineTransform %s', @@ -31,8 +43,8 @@ def test_BRAINSLmkTransform_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSLmkTransform_outputs(): output_map = dict( - outputAffineTransform=dict(), - outputResampledVolume=dict(), + outputAffineTransform=dict(extensions=None, ), + outputResampledVolume=dict(extensions=None, ), ) outputs = BRAINSLmkTransform.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py index 9627258bcf..cb612ee8f2 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py @@ -20,9 +20,18 @@ def test_BRAINSMush_inputs(): nohash=True, usedefault=True, ), - inputFirstVolume=dict(argstr='--inputFirstVolume %s', ), - inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), - inputSecondVolume=dict(argstr='--inputSecondVolume %s', ), + inputFirstVolume=dict( + argstr='--inputFirstVolume %s', + extensions=None, + ), + inputMaskVolume=dict( + argstr='--inputMaskVolume %s', + extensions=None, + ), + inputSecondVolume=dict( + argstr='--inputSecondVolume %s', + extensions=None, + ), lowerThresholdFactor=dict(argstr='--lowerThresholdFactor %f', ), lowerThresholdFactorPre=dict(argstr='--lowerThresholdFactorPre %f', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), @@ -52,9 +61,9 @@ def test_BRAINSMush_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSMush_outputs(): output_map = dict( - outputMask=dict(), - outputVolume=dict(), - outputWeightsFile=dict(), + outputMask=dict(extensions=None, ), + outputVolume=dict(extensions=None, ), + outputWeightsFile=dict(extensions=None, ), ) outputs = BRAINSMush.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py index 1b803956e7..573252e1fc 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py @@ -39,7 +39,7 @@ def test_BRAINSSnapShotWriter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSSnapShotWriter_outputs(): - output_map = dict(outputFilename=dict(), ) + output_map = dict(outputFilename=dict(extensions=None, ), ) outputs = BRAINSSnapShotWriter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py index c6d10736d7..29309f53b0 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py @@ -14,14 +14,20 @@ def test_BRAINSTransformConvert_inputs(): nohash=True, usedefault=True, ), - inputTransform=dict(argstr='--inputTransform %s', ), + inputTransform=dict( + argstr='--inputTransform %s', + extensions=None, + ), outputPrecisionType=dict(argstr='--outputPrecisionType %s', ), outputTransform=dict( argstr='--outputTransform %s', hash_files=False, ), outputTransformType=dict(argstr='--outputTransformType %s', ), - referenceVolume=dict(argstr='--referenceVolume %s', ), + referenceVolume=dict( + argstr='--referenceVolume %s', + extensions=None, + ), ) inputs = BRAINSTransformConvert.input_spec() @@ -30,8 +36,8 @@ def test_BRAINSTransformConvert_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSTransformConvert_outputs(): output_map = dict( - displacementVolume=dict(), - outputTransform=dict(), + displacementVolume=dict(extensions=None, ), + outputTransform=dict(extensions=None, ), ) outputs = BRAINSTransformConvert.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py index d2f3b74140..0bb8f0a13b 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py @@ -14,7 +14,10 @@ def test_BRAINSTrimForegroundInDirection_inputs(): usedefault=True, ), headSizeLimit=dict(argstr='--headSizeLimit %f', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), outputVolume=dict( @@ -28,7 +31,7 @@ def test_BRAINSTrimForegroundInDirection_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSTrimForegroundInDirection_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = BRAINSTrimForegroundInDirection.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py index 46924bf180..339f9a19ba 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py @@ -40,8 +40,14 @@ def test_FindCenterOfBrain_inputs(): generateDebugImages=dict(argstr='--generateDebugImages ', ), headSizeEstimate=dict(argstr='--headSizeEstimate %f', ), headSizeLimit=dict(argstr='--headSizeLimit %f', ), - imageMask=dict(argstr='--imageMask %s', ), - inputVolume=dict(argstr='--inputVolume %s', ), + imageMask=dict( + argstr='--imageMask %s', + extensions=None, + ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), maximize=dict(argstr='--maximize ', ), otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), ) @@ -52,12 +58,12 @@ def test_FindCenterOfBrain_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_FindCenterOfBrain_outputs(): output_map = dict( - clippedImageMask=dict(), - debugAfterGridComputationsForegroundImage=dict(), - debugClippedImageMask=dict(), - debugDistanceImage=dict(), - debugGridImage=dict(), - debugTrimmedImage=dict(), + clippedImageMask=dict(extensions=None, ), + debugAfterGridComputationsForegroundImage=dict(extensions=None, ), + debugClippedImageMask=dict(extensions=None, ), + debugDistanceImage=dict(extensions=None, ), + debugGridImage=dict(extensions=None, ), + debugTrimmedImage=dict(extensions=None, ), ) outputs = FindCenterOfBrain.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py index 9087d6667b..caf6323e68 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py @@ -23,7 +23,7 @@ def test_GenerateLabelMapFromProbabilityMap_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GenerateLabelMapFromProbabilityMap_outputs(): - output_map = dict(outputLabelVolume=dict(), ) + output_map = dict(outputLabelVolume=dict(extensions=None, ), ) outputs = GenerateLabelMapFromProbabilityMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py index f5372a240e..3cc1740778 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py @@ -10,10 +10,22 @@ def test_ImageRegionPlotter_inputs(): nohash=True, usedefault=True, ), - inputBinaryROIVolume=dict(argstr='--inputBinaryROIVolume %s', ), - inputLabelVolume=dict(argstr='--inputLabelVolume %s', ), - inputVolume1=dict(argstr='--inputVolume1 %s', ), - inputVolume2=dict(argstr='--inputVolume2 %s', ), + inputBinaryROIVolume=dict( + argstr='--inputBinaryROIVolume %s', + extensions=None, + ), + inputLabelVolume=dict( + argstr='--inputLabelVolume %s', + extensions=None, + ), + inputVolume1=dict( + argstr='--inputVolume1 %s', + extensions=None, + ), + inputVolume2=dict( + argstr='--inputVolume2 %s', + extensions=None, + ), numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), outputJointHistogramData=dict( argstr='--outputJointHistogramData %s', ), diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py index daf6d5634f..c686badea2 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py @@ -10,10 +10,22 @@ def test_JointHistogram_inputs(): nohash=True, usedefault=True, ), - inputMaskVolumeInXAxis=dict(argstr='--inputMaskVolumeInXAxis %s', ), - inputMaskVolumeInYAxis=dict(argstr='--inputMaskVolumeInYAxis %s', ), - inputVolumeInXAxis=dict(argstr='--inputVolumeInXAxis %s', ), - inputVolumeInYAxis=dict(argstr='--inputVolumeInYAxis %s', ), + inputMaskVolumeInXAxis=dict( + argstr='--inputMaskVolumeInXAxis %s', + extensions=None, + ), + inputMaskVolumeInYAxis=dict( + argstr='--inputMaskVolumeInYAxis %s', + extensions=None, + ), + inputVolumeInXAxis=dict( + argstr='--inputVolumeInXAxis %s', + extensions=None, + ), + inputVolumeInYAxis=dict( + argstr='--inputVolumeInYAxis %s', + extensions=None, + ), outputJointHistogramImage=dict( argstr='--outputJointHistogramImage %s', ), verbose=dict(argstr='--verbose ', ), diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py index 7bea38e2f1..1f50a296ab 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py @@ -10,7 +10,10 @@ def test_ShuffleVectorsModule_inputs(): nohash=True, usedefault=True, ), - inputVectorFileBaseName=dict(argstr='--inputVectorFileBaseName %s', ), + inputVectorFileBaseName=dict( + argstr='--inputVectorFileBaseName %s', + extensions=None, + ), outputVectorFileBaseName=dict( argstr='--outputVectorFileBaseName %s', hash_files=False, @@ -23,7 +26,7 @@ def test_ShuffleVectorsModule_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ShuffleVectorsModule_outputs(): - output_map = dict(outputVectorFileBaseName=dict(), ) + output_map = dict(outputVectorFileBaseName=dict(extensions=None, ), ) outputs = ShuffleVectorsModule.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py index 32e6ce65cd..2134ec42f0 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py @@ -11,7 +11,10 @@ def test_fcsv_to_hdf5_inputs(): usedefault=True, ), landmarkGlobPattern=dict(argstr='--landmarkGlobPattern %s', ), - landmarkTypesList=dict(argstr='--landmarkTypesList %s', ), + landmarkTypesList=dict( + argstr='--landmarkTypesList %s', + extensions=None, + ), landmarksInformationFile=dict( argstr='--landmarksInformationFile %s', hash_files=False, @@ -30,8 +33,8 @@ def test_fcsv_to_hdf5_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_fcsv_to_hdf5_outputs(): output_map = dict( - landmarksInformationFile=dict(), - modelFile=dict(), + landmarksInformationFile=dict(extensions=None, ), + modelFile=dict(extensions=None, ), ) outputs = fcsv_to_hdf5.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py index 196b09b304..042194c5ea 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py @@ -10,7 +10,10 @@ def test_insertMidACPCpoint_inputs(): nohash=True, usedefault=True, ), - inputLandmarkFile=dict(argstr='--inputLandmarkFile %s', ), + inputLandmarkFile=dict( + argstr='--inputLandmarkFile %s', + extensions=None, + ), outputLandmarkFile=dict( argstr='--outputLandmarkFile %s', hash_files=False, @@ -22,7 +25,7 @@ def test_insertMidACPCpoint_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_insertMidACPCpoint_outputs(): - output_map = dict(outputLandmarkFile=dict(), ) + output_map = dict(outputLandmarkFile=dict(extensions=None, ), ) outputs = insertMidACPCpoint.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py index 151a2c7b3a..860a0a972e 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py @@ -10,7 +10,10 @@ def test_landmarksConstellationAligner_inputs(): nohash=True, usedefault=True, ), - inputLandmarksPaired=dict(argstr='--inputLandmarksPaired %s', ), + inputLandmarksPaired=dict( + argstr='--inputLandmarksPaired %s', + extensions=None, + ), outputLandmarksPaired=dict( argstr='--outputLandmarksPaired %s', hash_files=False, @@ -22,7 +25,7 @@ def test_landmarksConstellationAligner_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_landmarksConstellationAligner_outputs(): - output_map = dict(outputLandmarksPaired=dict(), ) + output_map = dict(outputLandmarksPaired=dict(extensions=None, ), ) outputs = landmarksConstellationAligner.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py index 0bcd747d36..689c78de76 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py @@ -5,14 +5,23 @@ def test_landmarksConstellationWeights_inputs(): input_map = dict( - LLSModel=dict(argstr='--LLSModel %s', ), + LLSModel=dict( + argstr='--LLSModel %s', + extensions=None, + ), args=dict(argstr='%s', ), environ=dict( nohash=True, usedefault=True, ), - inputTemplateModel=dict(argstr='--inputTemplateModel %s', ), - inputTrainingList=dict(argstr='--inputTrainingList %s', ), + inputTemplateModel=dict( + argstr='--inputTemplateModel %s', + extensions=None, + ), + inputTrainingList=dict( + argstr='--inputTrainingList %s', + extensions=None, + ), outputWeightsList=dict( argstr='--outputWeightsList %s', hash_files=False, @@ -24,7 +33,7 @@ def test_landmarksConstellationWeights_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_landmarksConstellationWeights_outputs(): - output_map = dict(outputWeightsList=dict(), ) + output_map = dict(outputWeightsList=dict(extensions=None, ), ) outputs = landmarksConstellationWeights.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py index c8d5df2ba2..1ca79a6e96 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py @@ -12,6 +12,7 @@ def test_DTIexport_inputs(): ), inputTensor=dict( argstr='%s', + extensions=None, position=-2, ), outputFile=dict( @@ -26,7 +27,11 @@ def test_DTIexport_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DTIexport_outputs(): - output_map = dict(outputFile=dict(position=-1, ), ) + output_map = dict( + outputFile=dict( + extensions=None, + position=-1, + ), ) outputs = DTIexport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py index ec1d66bc0b..58ae495f3f 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py @@ -12,6 +12,7 @@ def test_DTIimport_inputs(): ), inputFile=dict( argstr='%s', + extensions=None, position=-2, ), outputTensor=dict( @@ -27,7 +28,11 @@ def test_DTIimport_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DTIimport_outputs(): - output_map = dict(outputTensor=dict(position=-1, ), ) + output_map = dict( + outputTensor=dict( + extensions=None, + position=-1, + ), ) outputs = DTIimport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py index a2caa2f633..f8000c49b1 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py @@ -13,6 +13,7 @@ def test_DWIJointRicianLMMSEFilter_inputs(): ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), ng=dict(argstr='--ng %d', ), @@ -36,7 +37,11 @@ def test_DWIJointRicianLMMSEFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIJointRicianLMMSEFilter_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = DWIJointRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py index 7eff851562..29b5e0b7e1 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py @@ -14,6 +14,7 @@ def test_DWIRicianLMMSEFilter_inputs(): hrf=dict(argstr='--hrf %f', ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), iter=dict(argstr='--iter %d', ), @@ -42,7 +43,11 @@ def test_DWIRicianLMMSEFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIRicianLMMSEFilter_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = DWIRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py index 217f91edf0..4f9702bb32 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py @@ -13,9 +13,13 @@ def test_DWIToDTIEstimation_inputs(): ), inputVolume=dict( argstr='%s', + extensions=None, position=-3, ), - mask=dict(argstr='--mask %s', ), + mask=dict( + argstr='--mask %s', + extensions=None, + ), outputBaseline=dict( argstr='%s', hash_files=False, @@ -35,8 +39,14 @@ def test_DWIToDTIEstimation_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DWIToDTIEstimation_outputs(): output_map = dict( - outputBaseline=dict(position=-1, ), - outputTensor=dict(position=-2, ), + outputBaseline=dict( + extensions=None, + position=-1, + ), + outputTensor=dict( + extensions=None, + position=-2, + ), ) outputs = DWIToDTIEstimation.output_spec() diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py index b75989c349..5dcdbd7e2e 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py @@ -13,6 +13,7 @@ def test_DiffusionTensorScalarMeasurements_inputs(): ), inputVolume=dict( argstr='%s', + extensions=None, position=-3, ), outputScalar=dict( @@ -27,7 +28,11 @@ def test_DiffusionTensorScalarMeasurements_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DiffusionTensorScalarMeasurements_outputs(): - output_map = dict(outputScalar=dict(position=-1, ), ) + output_map = dict( + outputScalar=dict( + extensions=None, + position=-1, + ), ) outputs = DiffusionTensorScalarMeasurements.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py index 5e8c39a4ac..e023e6293c 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py @@ -12,6 +12,7 @@ def test_DiffusionWeightedVolumeMasking_inputs(): ), inputVolume=dict( argstr='%s', + extensions=None, position=-4, ), otsuomegathreshold=dict(argstr='--otsuomegathreshold %f', ), @@ -34,8 +35,14 @@ def test_DiffusionWeightedVolumeMasking_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DiffusionWeightedVolumeMasking_outputs(): output_map = dict( - outputBaseline=dict(position=-2, ), - thresholdMask=dict(position=-1, ), + outputBaseline=dict( + extensions=None, + position=-2, + ), + thresholdMask=dict( + extensions=None, + position=-1, + ), ) outputs = DiffusionWeightedVolumeMasking.output_spec() diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py index 30860d9da0..bd35268bdb 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py @@ -7,11 +7,17 @@ def test_ResampleDTIVolume_inputs(): input_map = dict( Inverse_ITK_Transformation=dict( argstr='--Inverse_ITK_Transformation ', ), - Reference=dict(argstr='--Reference %s', ), + Reference=dict( + argstr='--Reference %s', + extensions=None, + ), args=dict(argstr='%s', ), centered_transform=dict(argstr='--centered_transform ', ), correction=dict(argstr='--correction %s', ), - defField=dict(argstr='--defField %s', ), + defField=dict( + argstr='--defField %s', + extensions=None, + ), default_pixel_value=dict(argstr='--default_pixel_value %f', ), direction_matrix=dict( argstr='--direction_matrix %s', @@ -25,6 +31,7 @@ def test_ResampleDTIVolume_inputs(): image_center=dict(argstr='--image_center %s', ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), interpolation=dict(argstr='--interpolation %s', ), @@ -54,7 +61,10 @@ def test_ResampleDTIVolume_inputs(): ), transform_order=dict(argstr='--transform_order %s', ), transform_tensor_method=dict(argstr='--transform_tensor_method %s', ), - transformationFile=dict(argstr='--transformationFile %s', ), + transformationFile=dict( + argstr='--transformationFile %s', + extensions=None, + ), window_function=dict(argstr='--window_function %s', ), ) inputs = ResampleDTIVolume.input_spec() @@ -63,7 +73,11 @@ def test_ResampleDTIVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ResampleDTIVolume_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ResampleDTIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py index 31fb7d5c0d..09fef7d133 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py @@ -7,6 +7,7 @@ def test_TractographyLabelMapSeeding_inputs(): input_map = dict( InputVolume=dict( argstr='%s', + extensions=None, position=-2, ), OutputFibers=dict( @@ -20,7 +21,10 @@ def test_TractographyLabelMapSeeding_inputs(): nohash=True, usedefault=True, ), - inputroi=dict(argstr='--inputroi %s', ), + inputroi=dict( + argstr='--inputroi %s', + extensions=None, + ), integrationsteplength=dict(argstr='--integrationsteplength %f', ), label=dict(argstr='--label %d', ), maximumlength=dict(argstr='--maximumlength %f', ), @@ -45,7 +49,10 @@ def test_TractographyLabelMapSeeding_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_TractographyLabelMapSeeding_outputs(): output_map = dict( - OutputFibers=dict(position=-1, ), + OutputFibers=dict( + extensions=None, + position=-1, + ), outputdirectory=dict(), ) outputs = TractographyLabelMapSeeding.output_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py index 9f2209c1eb..d2b17c562a 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py @@ -12,10 +12,12 @@ def test_AddScalarVolumes_inputs(): ), inputVolume1=dict( argstr='%s', + extensions=None, position=-3, ), inputVolume2=dict( argstr='%s', + extensions=None, position=-2, ), order=dict(argstr='--order %s', ), @@ -31,7 +33,11 @@ def test_AddScalarVolumes_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AddScalarVolumes_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = AddScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py index a659aa47f7..bbcb2f077c 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py @@ -7,6 +7,7 @@ def test_CastScalarVolume_inputs(): input_map = dict( InputVolume=dict( argstr='%s', + extensions=None, position=-2, ), OutputVolume=dict( @@ -27,7 +28,11 @@ def test_CastScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CastScalarVolume_outputs(): - output_map = dict(OutputVolume=dict(position=-1, ), ) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = CastScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py index ae662cf2ba..24f5b74307 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py @@ -16,10 +16,12 @@ def test_CheckerBoardFilter_inputs(): ), inputVolume1=dict( argstr='%s', + extensions=None, position=-3, ), inputVolume2=dict( argstr='%s', + extensions=None, position=-2, ), outputVolume=dict( @@ -34,7 +36,11 @@ def test_CheckerBoardFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CheckerBoardFilter_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = CheckerBoardFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py index 5613eb0c4c..0240ad676a 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py @@ -13,6 +13,7 @@ def test_CurvatureAnisotropicDiffusion_inputs(): ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), iterations=dict(argstr='--iterations %d', ), @@ -29,7 +30,11 @@ def test_CurvatureAnisotropicDiffusion_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CurvatureAnisotropicDiffusion_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = CurvatureAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py index 5db6c65c7e..c47382aef5 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py @@ -7,6 +7,7 @@ def test_ExtractSkeleton_inputs(): input_map = dict( InputImageFileName=dict( argstr='%s', + extensions=None, position=-2, ), OutputImageFileName=dict( @@ -30,7 +31,11 @@ def test_ExtractSkeleton_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ExtractSkeleton_outputs(): - output_map = dict(OutputImageFileName=dict(position=-1, ), ) + output_map = dict( + OutputImageFileName=dict( + extensions=None, + position=-1, + ), ) outputs = ExtractSkeleton.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py index ff46d6d308..2bdb73c4d5 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py @@ -12,6 +12,7 @@ def test_GaussianBlurImageFilter_inputs(): ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), outputVolume=dict( @@ -27,7 +28,11 @@ def test_GaussianBlurImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GaussianBlurImageFilter_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GaussianBlurImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py index 8ec7dbb156..6d3e40c8de 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py @@ -13,6 +13,7 @@ def test_GradientAnisotropicDiffusion_inputs(): ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), iterations=dict(argstr='--iterations %d', ), @@ -29,7 +30,11 @@ def test_GradientAnisotropicDiffusion_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GradientAnisotropicDiffusion_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GradientAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py index 062a23c293..f1ff2c3809 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py @@ -12,6 +12,7 @@ def test_GrayscaleFillHoleImageFilter_inputs(): ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), outputVolume=dict( @@ -26,7 +27,11 @@ def test_GrayscaleFillHoleImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GrayscaleFillHoleImageFilter_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GrayscaleFillHoleImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py index edb6081ed3..6aee86282a 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py @@ -12,6 +12,7 @@ def test_GrayscaleGrindPeakImageFilter_inputs(): ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), outputVolume=dict( @@ -26,7 +27,11 @@ def test_GrayscaleGrindPeakImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GrayscaleGrindPeakImageFilter_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GrayscaleGrindPeakImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py index f73690586d..c839c50abf 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py @@ -12,6 +12,7 @@ def test_HistogramMatching_inputs(): ), inputVolume=dict( argstr='%s', + extensions=None, position=-3, ), numberOfHistogramLevels=dict(argstr='--numberOfHistogramLevels %d', ), @@ -23,6 +24,7 @@ def test_HistogramMatching_inputs(): ), referenceVolume=dict( argstr='%s', + extensions=None, position=-2, ), threshold=dict(argstr='--threshold ', ), @@ -33,7 +35,11 @@ def test_HistogramMatching_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_HistogramMatching_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = HistogramMatching.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py index 0bae22c342..7fd2c31db3 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py @@ -7,10 +7,12 @@ def test_ImageLabelCombine_inputs(): input_map = dict( InputLabelMap_A=dict( argstr='%s', + extensions=None, position=-3, ), InputLabelMap_B=dict( argstr='%s', + extensions=None, position=-2, ), OutputLabelMap=dict( @@ -31,7 +33,11 @@ def test_ImageLabelCombine_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ImageLabelCombine_outputs(): - output_map = dict(OutputLabelMap=dict(position=-1, ), ) + output_map = dict( + OutputLabelMap=dict( + extensions=None, + position=-1, + ), ) outputs = ImageLabelCombine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py index cd04072890..56d68199af 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py @@ -7,10 +7,12 @@ def test_MaskScalarVolume_inputs(): input_map = dict( InputVolume=dict( argstr='%s', + extensions=None, position=-3, ), MaskVolume=dict( argstr='%s', + extensions=None, position=-2, ), OutputVolume=dict( @@ -32,7 +34,11 @@ def test_MaskScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MaskScalarVolume_outputs(): - output_map = dict(OutputVolume=dict(position=-1, ), ) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = MaskScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py index 35bb9496c9..c0bf97e152 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py @@ -12,6 +12,7 @@ def test_MedianImageFilter_inputs(): ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), neighborhood=dict( @@ -30,7 +31,11 @@ def test_MedianImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MedianImageFilter_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = MedianImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py index 6590c4b133..cc39d5b7b1 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py @@ -12,10 +12,12 @@ def test_MultiplyScalarVolumes_inputs(): ), inputVolume1=dict( argstr='%s', + extensions=None, position=-3, ), inputVolume2=dict( argstr='%s', + extensions=None, position=-2, ), order=dict(argstr='--order %s', ), @@ -31,7 +33,11 @@ def test_MultiplyScalarVolumes_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MultiplyScalarVolumes_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = MultiplyScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py index c6ad0bf24d..b2ce50a0fa 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py @@ -16,12 +16,18 @@ def test_N4ITKBiasFieldCorrection_inputs(): argstr='--histogramsharpening %s', sep=',', ), - inputimage=dict(argstr='--inputimage %s', ), + inputimage=dict( + argstr='--inputimage %s', + extensions=None, + ), iterations=dict( argstr='--iterations %s', sep=',', ), - maskimage=dict(argstr='--maskimage %s', ), + maskimage=dict( + argstr='--maskimage %s', + extensions=None, + ), meshresolution=dict( argstr='--meshresolution %s', sep=',', @@ -36,7 +42,10 @@ def test_N4ITKBiasFieldCorrection_inputs(): ), shrinkfactor=dict(argstr='--shrinkfactor %d', ), splinedistance=dict(argstr='--splinedistance %f', ), - weightimage=dict(argstr='--weightimage %s', ), + weightimage=dict( + argstr='--weightimage %s', + extensions=None, + ), ) inputs = N4ITKBiasFieldCorrection.input_spec() @@ -45,8 +54,8 @@ def test_N4ITKBiasFieldCorrection_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_N4ITKBiasFieldCorrection_outputs(): output_map = dict( - outputbiasfield=dict(), - outputimage=dict(), + outputbiasfield=dict(extensions=None, ), + outputimage=dict(extensions=None, ), ) outputs = N4ITKBiasFieldCorrection.output_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py index ebe34fd9b3..12d7af77c4 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py @@ -7,10 +7,16 @@ def test_ResampleScalarVectorDWIVolume_inputs(): input_map = dict( Inverse_ITK_Transformation=dict( argstr='--Inverse_ITK_Transformation ', ), - Reference=dict(argstr='--Reference %s', ), + Reference=dict( + argstr='--Reference %s', + extensions=None, + ), args=dict(argstr='%s', ), centered_transform=dict(argstr='--centered_transform ', ), - defField=dict(argstr='--defField %s', ), + defField=dict( + argstr='--defField %s', + extensions=None, + ), default_pixel_value=dict(argstr='--default_pixel_value %f', ), direction_matrix=dict( argstr='--direction_matrix %s', @@ -24,6 +30,7 @@ def test_ResampleScalarVectorDWIVolume_inputs(): image_center=dict(argstr='--image_center %s', ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), interpolation=dict(argstr='--interpolation %s', ), @@ -52,7 +59,10 @@ def test_ResampleScalarVectorDWIVolume_inputs(): sep=',', ), transform_order=dict(argstr='--transform_order %s', ), - transformationFile=dict(argstr='--transformationFile %s', ), + transformationFile=dict( + argstr='--transformationFile %s', + extensions=None, + ), window_function=dict(argstr='--window_function %s', ), ) inputs = ResampleScalarVectorDWIVolume.input_spec() @@ -61,7 +71,11 @@ def test_ResampleScalarVectorDWIVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ResampleScalarVectorDWIVolume_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ResampleScalarVectorDWIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py index d62589881f..106cd843a4 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py @@ -12,10 +12,12 @@ def test_SubtractScalarVolumes_inputs(): ), inputVolume1=dict( argstr='%s', + extensions=None, position=-3, ), inputVolume2=dict( argstr='%s', + extensions=None, position=-2, ), order=dict(argstr='--order %s', ), @@ -31,7 +33,11 @@ def test_SubtractScalarVolumes_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SubtractScalarVolumes_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = SubtractScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py index 34de317104..e63ed5923f 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py @@ -7,6 +7,7 @@ def test_ThresholdScalarVolume_inputs(): input_map = dict( InputVolume=dict( argstr='%s', + extensions=None, position=-2, ), OutputVolume=dict( @@ -31,7 +32,11 @@ def test_ThresholdScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ThresholdScalarVolume_outputs(): - output_map = dict(OutputVolume=dict(position=-1, ), ) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ThresholdScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py index 8e7890de85..89832bf9ba 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py @@ -14,6 +14,7 @@ def test_VotingBinaryHoleFillingImageFilter_inputs(): foreground=dict(argstr='--foreground %d', ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), majorityThreshold=dict(argstr='--majorityThreshold %d', ), @@ -33,7 +34,11 @@ def test_VotingBinaryHoleFillingImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_VotingBinaryHoleFillingImageFilter_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = VotingBinaryHoleFillingImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py index 0e25ffad9a..f3c1ec6ff0 100644 --- a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py +++ b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py @@ -13,6 +13,7 @@ def test_DWIUnbiasedNonLocalMeansFilter_inputs(): hp=dict(argstr='--hp %f', ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), ng=dict(argstr='--ng %d', ), @@ -40,7 +41,11 @@ def test_DWIUnbiasedNonLocalMeansFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIUnbiasedNonLocalMeansFilter_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = DWIUnbiasedNonLocalMeansFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py index 949cdc7f61..059e7ee694 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py @@ -7,10 +7,12 @@ def test_AffineRegistration_inputs(): input_map = dict( FixedImageFileName=dict( argstr='%s', + extensions=None, position=-2, ), MovingImageFileName=dict( argstr='%s', + extensions=None, position=-1, ), args=dict(argstr='%s', ), @@ -20,7 +22,10 @@ def test_AffineRegistration_inputs(): ), fixedsmoothingfactor=dict(argstr='--fixedsmoothingfactor %d', ), histogrambins=dict(argstr='--histogrambins %d', ), - initialtransform=dict(argstr='--initialtransform %s', ), + initialtransform=dict( + argstr='--initialtransform %s', + extensions=None, + ), iterations=dict(argstr='--iterations %d', ), movingsmoothingfactor=dict(argstr='--movingsmoothingfactor %d', ), outputtransform=dict( @@ -41,8 +46,8 @@ def test_AffineRegistration_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_AffineRegistration_outputs(): output_map = dict( - outputtransform=dict(), - resampledmovingfilename=dict(), + outputtransform=dict(extensions=None, ), + resampledmovingfilename=dict(extensions=None, ), ) outputs = AffineRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py index 17339f8859..bee2d60af6 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py @@ -7,10 +7,12 @@ def test_BSplineDeformableRegistration_inputs(): input_map = dict( FixedImageFileName=dict( argstr='%s', + extensions=None, position=-2, ), MovingImageFileName=dict( argstr='%s', + extensions=None, position=-1, ), args=dict(argstr='%s', ), @@ -22,7 +24,10 @@ def test_BSplineDeformableRegistration_inputs(): ), gridSize=dict(argstr='--gridSize %d', ), histogrambins=dict(argstr='--histogrambins %d', ), - initialtransform=dict(argstr='--initialtransform %s', ), + initialtransform=dict( + argstr='--initialtransform %s', + extensions=None, + ), iterations=dict(argstr='--iterations %d', ), maximumDeformation=dict(argstr='--maximumDeformation %f', ), outputtransform=dict( @@ -46,9 +51,9 @@ def test_BSplineDeformableRegistration_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BSplineDeformableRegistration_outputs(): output_map = dict( - outputtransform=dict(), - outputwarp=dict(), - resampledmovingfilename=dict(), + outputtransform=dict(extensions=None, ), + outputwarp=dict(extensions=None, ), + resampledmovingfilename=dict(extensions=None, ), ) outputs = BSplineDeformableRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py index 8335515c13..cba80ed391 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py @@ -14,8 +14,14 @@ def test_BSplineToDeformationField_inputs(): nohash=True, usedefault=True, ), - refImage=dict(argstr='--refImage %s', ), - tfm=dict(argstr='--tfm %s', ), + refImage=dict( + argstr='--refImage %s', + extensions=None, + ), + tfm=dict( + argstr='--tfm %s', + extensions=None, + ), ) inputs = BSplineToDeformationField.input_spec() @@ -23,7 +29,7 @@ def test_BSplineToDeformationField_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BSplineToDeformationField_outputs(): - output_map = dict(defImage=dict(), ) + output_map = dict(defImage=dict(extensions=None, ), ) outputs = BSplineToDeformationField.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py index 7fa8b77d63..8cf4622906 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py @@ -21,17 +21,25 @@ def test_ExpertAutomatedRegistration_inputs(): expectedSkew=dict(argstr='--expectedSkew %f', ), fixedImage=dict( argstr='%s', + extensions=None, position=-2, ), - fixedImageMask=dict(argstr='--fixedImageMask %s', ), + fixedImageMask=dict( + argstr='--fixedImageMask %s', + extensions=None, + ), fixedLandmarks=dict(argstr='--fixedLandmarks %s...', ), initialization=dict(argstr='--initialization %s', ), interpolation=dict(argstr='--interpolation %s', ), - loadTransform=dict(argstr='--loadTransform %s', ), + loadTransform=dict( + argstr='--loadTransform %s', + extensions=None, + ), metric=dict(argstr='--metric %s', ), minimizeMemory=dict(argstr='--minimizeMemory ', ), movingImage=dict( argstr='%s', + extensions=None, position=-1, ), movingLandmarks=dict(argstr='--movingLandmarks %s...', ), @@ -58,8 +66,8 @@ def test_ExpertAutomatedRegistration_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_ExpertAutomatedRegistration_outputs(): output_map = dict( - resampledImage=dict(), - saveTransform=dict(), + resampledImage=dict(extensions=None, ), + saveTransform=dict(extensions=None, ), ) outputs = ExpertAutomatedRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py index a55432f9b0..50a9f2b292 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py @@ -7,10 +7,12 @@ def test_LinearRegistration_inputs(): input_map = dict( FixedImageFileName=dict( argstr='%s', + extensions=None, position=-2, ), MovingImageFileName=dict( argstr='%s', + extensions=None, position=-1, ), args=dict(argstr='%s', ), @@ -20,7 +22,10 @@ def test_LinearRegistration_inputs(): ), fixedsmoothingfactor=dict(argstr='--fixedsmoothingfactor %d', ), histogrambins=dict(argstr='--histogrambins %d', ), - initialtransform=dict(argstr='--initialtransform %s', ), + initialtransform=dict( + argstr='--initialtransform %s', + extensions=None, + ), iterations=dict( argstr='--iterations %s', sep=',', @@ -48,8 +53,8 @@ def test_LinearRegistration_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_LinearRegistration_outputs(): output_map = dict( - outputtransform=dict(), - resampledmovingfilename=dict(), + outputtransform=dict(extensions=None, ), + resampledmovingfilename=dict(extensions=None, ), ) outputs = LinearRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py index 7903fd1d5c..f6cfc49710 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py @@ -12,13 +12,18 @@ def test_MultiResolutionAffineRegistration_inputs(): ), fixedImage=dict( argstr='%s', + extensions=None, position=-2, ), - fixedImageMask=dict(argstr='--fixedImageMask %s', ), + fixedImageMask=dict( + argstr='--fixedImageMask %s', + extensions=None, + ), fixedImageROI=dict(argstr='--fixedImageROI %s', ), metricTolerance=dict(argstr='--metricTolerance %f', ), movingImage=dict( argstr='%s', + extensions=None, position=-1, ), numIterations=dict(argstr='--numIterations %d', ), @@ -41,8 +46,8 @@ def test_MultiResolutionAffineRegistration_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_MultiResolutionAffineRegistration_outputs(): output_map = dict( - resampledImage=dict(), - saveTransform=dict(), + resampledImage=dict(extensions=None, ), + saveTransform=dict(extensions=None, ), ) outputs = MultiResolutionAffineRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py index a1af6c71b3..19b41f7127 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py @@ -12,6 +12,7 @@ def test_OtsuThresholdImageFilter_inputs(): ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), insideValue=dict(argstr='--insideValue %d', ), @@ -29,7 +30,11 @@ def test_OtsuThresholdImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_OtsuThresholdImageFilter_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = OtsuThresholdImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py index 02beeee464..b9fa12f2aa 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py @@ -14,6 +14,7 @@ def test_OtsuThresholdSegmentation_inputs(): faceConnected=dict(argstr='--faceConnected ', ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), minimumObjectSize=dict(argstr='--minimumObjectSize %d', ), @@ -30,7 +31,11 @@ def test_OtsuThresholdSegmentation_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_OtsuThresholdSegmentation_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = OtsuThresholdSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py index baa698246c..7f1c259cd6 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py @@ -7,6 +7,7 @@ def test_ResampleScalarVolume_inputs(): input_map = dict( InputVolume=dict( argstr='%s', + extensions=None, position=-2, ), OutputVolume=dict( @@ -31,7 +32,11 @@ def test_ResampleScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ResampleScalarVolume_outputs(): - output_map = dict(OutputVolume=dict(position=-1, ), ) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ResampleScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py index 6c2a0eb072..1c29d1295a 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py @@ -7,10 +7,12 @@ def test_RigidRegistration_inputs(): input_map = dict( FixedImageFileName=dict( argstr='%s', + extensions=None, position=-2, ), MovingImageFileName=dict( argstr='%s', + extensions=None, position=-1, ), args=dict(argstr='%s', ), @@ -20,7 +22,10 @@ def test_RigidRegistration_inputs(): ), fixedsmoothingfactor=dict(argstr='--fixedsmoothingfactor %d', ), histogrambins=dict(argstr='--histogrambins %d', ), - initialtransform=dict(argstr='--initialtransform %s', ), + initialtransform=dict( + argstr='--initialtransform %s', + extensions=None, + ), iterations=dict( argstr='--iterations %s', sep=',', @@ -49,8 +54,8 @@ def test_RigidRegistration_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_RigidRegistration_outputs(): output_map = dict( - outputtransform=dict(), - resampledmovingfilename=dict(), + outputtransform=dict(extensions=None, ), + resampledmovingfilename=dict(extensions=None, ), ) outputs = RigidRegistration.output_spec() diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py index e46f046add..386e368de7 100644 --- a/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py @@ -8,10 +8,12 @@ def test_IntensityDifferenceMetric_inputs(): args=dict(argstr='%s', ), baselineSegmentationVolume=dict( argstr='%s', + extensions=None, position=-3, ), baselineVolume=dict( argstr='%s', + extensions=None, position=-4, ), changingBandSize=dict(argstr='--changingBandSize %d', ), @@ -21,6 +23,7 @@ def test_IntensityDifferenceMetric_inputs(): ), followupVolume=dict( argstr='%s', + extensions=None, position=-2, ), outputVolume=dict( @@ -41,8 +44,11 @@ def test_IntensityDifferenceMetric_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_IntensityDifferenceMetric_outputs(): output_map = dict( - outputVolume=dict(position=-1, ), - reportFileName=dict(), + outputVolume=dict( + extensions=None, + position=-1, + ), + reportFileName=dict(extensions=None, ), ) outputs = IntensityDifferenceMetric.output_spec() diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py index 61141f65db..51ae985574 100644 --- a/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py @@ -11,7 +11,10 @@ def test_PETStandardUptakeValueComputation_inputs(): SUVMean=dict(argstr='--SUVMean %s', ), SUVMin=dict(argstr='--SUVMin %s', ), args=dict(argstr='%s', ), - color=dict(argstr='--color %s', ), + color=dict( + argstr='--color %s', + extensions=None, + ), csvFile=dict( argstr='--csvFile %s', hash_files=False, @@ -20,9 +23,15 @@ def test_PETStandardUptakeValueComputation_inputs(): nohash=True, usedefault=True, ), - labelMap=dict(argstr='--labelMap %s', ), + labelMap=dict( + argstr='--labelMap %s', + extensions=None, + ), petDICOMPath=dict(argstr='--petDICOMPath %s', ), - petVolume=dict(argstr='--petVolume %s', ), + petVolume=dict( + argstr='--petVolume %s', + extensions=None, + ), ) inputs = PETStandardUptakeValueComputation.input_spec() @@ -30,7 +39,7 @@ def test_PETStandardUptakeValueComputation_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_PETStandardUptakeValueComputation_outputs(): - output_map = dict(csvFile=dict(), ) + output_map = dict(csvFile=dict(extensions=None, ), ) outputs = PETStandardUptakeValueComputation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py index 454e290102..c8969b06b7 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py @@ -24,7 +24,7 @@ def test_ACPCTransform_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ACPCTransform_outputs(): - output_map = dict(outputTransform=dict(), ) + output_map = dict(outputTransform=dict(extensions=None, ), ) outputs = ACPCTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py index c631f9b96e..18cd4c63ff 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py @@ -19,14 +19,25 @@ def test_BRAINSDemonWarp_inputs(): nohash=True, usedefault=True, ), - fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), - fixedVolume=dict(argstr='--fixedVolume %s', ), + fixedBinaryVolume=dict( + argstr='--fixedBinaryVolume %s', + extensions=None, + ), + fixedVolume=dict( + argstr='--fixedVolume %s', + extensions=None, + ), gradient_type=dict(argstr='--gradient_type %s', ), gui=dict(argstr='--gui ', ), histogramMatch=dict(argstr='--histogramMatch ', ), initializeWithDisplacementField=dict( - argstr='--initializeWithDisplacementField %s', ), - initializeWithTransform=dict(argstr='--initializeWithTransform %s', ), + argstr='--initializeWithDisplacementField %s', + extensions=None, + ), + initializeWithTransform=dict( + argstr='--initializeWithTransform %s', + extensions=None, + ), inputPixelType=dict(argstr='--inputPixelType %s', ), interpolationMode=dict(argstr='--interpolationMode %s', ), lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), @@ -44,8 +55,14 @@ def test_BRAINSDemonWarp_inputs(): argstr='--minimumMovingPyramid %s', sep=',', ), - movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), - movingVolume=dict(argstr='--movingVolume %s', ), + movingBinaryVolume=dict( + argstr='--movingBinaryVolume %s', + extensions=None, + ), + movingVolume=dict( + argstr='--movingVolume %s', + extensions=None, + ), neighborhoodForBOBF=dict( argstr='--neighborhoodForBOBF %s', sep=',', @@ -92,9 +109,9 @@ def test_BRAINSDemonWarp_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(), - outputDisplacementFieldVolume=dict(), - outputVolume=dict(), + outputCheckerboardVolume=dict(extensions=None, ), + outputDisplacementFieldVolume=dict(extensions=None, ), + outputVolume=dict(extensions=None, ), ) outputs = BRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py index bb62633d94..450dc2ed9b 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py @@ -28,13 +28,22 @@ def test_BRAINSFit_inputs(): usedefault=True, ), failureExitCode=dict(argstr='--failureExitCode %d', ), - fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), - fixedVolume=dict(argstr='--fixedVolume %s', ), + fixedBinaryVolume=dict( + argstr='--fixedBinaryVolume %s', + extensions=None, + ), + fixedVolume=dict( + argstr='--fixedVolume %s', + extensions=None, + ), fixedVolumeTimeIndex=dict(argstr='--fixedVolumeTimeIndex %d', ), forceMINumberOfThreads=dict(argstr='--forceMINumberOfThreads %d', ), gui=dict(argstr='--gui ', ), histogramMatch=dict(argstr='--histogramMatch ', ), - initialTransform=dict(argstr='--initialTransform %s', ), + initialTransform=dict( + argstr='--initialTransform %s', + extensions=None, + ), initializeTransformMode=dict(argstr='--initializeTransformMode %s', ), interpolationMode=dict(argstr='--interpolationMode %s', ), linearTransform=dict( @@ -54,8 +63,14 @@ def test_BRAINSFit_inputs(): argstr='--minimumStepLength %s', sep=',', ), - movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), - movingVolume=dict(argstr='--movingVolume %s', ), + movingBinaryVolume=dict( + argstr='--movingBinaryVolume %s', + extensions=None, + ), + movingVolume=dict( + argstr='--movingVolume %s', + extensions=None, + ), movingVolumeTimeIndex=dict(argstr='--movingVolumeTimeIndex %d', ), numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), numberOfIterations=dict( @@ -127,13 +142,13 @@ def test_BRAINSFit_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSFit_outputs(): output_map = dict( - bsplineTransform=dict(), - linearTransform=dict(), - outputFixedVolumeROI=dict(), - outputMovingVolumeROI=dict(), - outputTransform=dict(), - outputVolume=dict(), - strippedOutputTransform=dict(), + bsplineTransform=dict(extensions=None, ), + linearTransform=dict(extensions=None, ), + outputFixedVolumeROI=dict(extensions=None, ), + outputMovingVolumeROI=dict(extensions=None, ), + outputTransform=dict(extensions=None, ), + outputVolume=dict(extensions=None, ), + strippedOutputTransform=dict(extensions=None, ), ) outputs = BRAINSFit.output_spec() diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py index 98ec5f4ff3..e8d4c187b0 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py @@ -7,7 +7,10 @@ def test_BRAINSResample_inputs(): input_map = dict( args=dict(argstr='%s', ), defaultValue=dict(argstr='--defaultValue %f', ), - deformationVolume=dict(argstr='--deformationVolume %s', ), + deformationVolume=dict( + argstr='--deformationVolume %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -16,7 +19,10 @@ def test_BRAINSResample_inputs(): argstr='--gridSpacing %s', sep=',', ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), interpolationMode=dict(argstr='--interpolationMode %s', ), inverseTransform=dict(argstr='--inverseTransform ', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), @@ -25,8 +31,14 @@ def test_BRAINSResample_inputs(): hash_files=False, ), pixelType=dict(argstr='--pixelType %s', ), - referenceVolume=dict(argstr='--referenceVolume %s', ), - warpTransform=dict(argstr='--warpTransform %s', ), + referenceVolume=dict( + argstr='--referenceVolume %s', + extensions=None, + ), + warpTransform=dict( + argstr='--warpTransform %s', + extensions=None, + ), ) inputs = BRAINSResample.input_spec() @@ -34,7 +46,7 @@ def test_BRAINSResample_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSResample_outputs(): - output_map = dict(outputVolume=dict(), ) + output_map = dict(outputVolume=dict(extensions=None, ), ) outputs = BRAINSResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py index 36d42fe8df..d190957049 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py @@ -26,7 +26,7 @@ def test_FiducialRegistration_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FiducialRegistration_outputs(): - output_map = dict(saveTransform=dict(), ) + output_map = dict(saveTransform=dict(extensions=None, ), ) outputs = FiducialRegistration.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py index b3255da1d3..b196d318d4 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -19,14 +19,22 @@ def test_VBRAINSDemonWarp_inputs(): nohash=True, usedefault=True, ), - fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), + fixedBinaryVolume=dict( + argstr='--fixedBinaryVolume %s', + extensions=None, + ), fixedVolume=dict(argstr='--fixedVolume %s...', ), gradient_type=dict(argstr='--gradient_type %s', ), gui=dict(argstr='--gui ', ), histogramMatch=dict(argstr='--histogramMatch ', ), initializeWithDisplacementField=dict( - argstr='--initializeWithDisplacementField %s', ), - initializeWithTransform=dict(argstr='--initializeWithTransform %s', ), + argstr='--initializeWithDisplacementField %s', + extensions=None, + ), + initializeWithTransform=dict( + argstr='--initializeWithTransform %s', + extensions=None, + ), inputPixelType=dict(argstr='--inputPixelType %s', ), interpolationMode=dict(argstr='--interpolationMode %s', ), lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), @@ -44,7 +52,10 @@ def test_VBRAINSDemonWarp_inputs(): argstr='--minimumMovingPyramid %s', sep=',', ), - movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), + movingBinaryVolume=dict( + argstr='--movingBinaryVolume %s', + extensions=None, + ), movingVolume=dict(argstr='--movingVolume %s...', ), neighborhoodForBOBF=dict( argstr='--neighborhoodForBOBF %s', @@ -96,9 +107,9 @@ def test_VBRAINSDemonWarp_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_VBRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(), - outputDisplacementFieldVolume=dict(), - outputVolume=dict(), + outputCheckerboardVolume=dict(extensions=None, ), + outputDisplacementFieldVolume=dict(extensions=None, ), + outputVolume=dict(extensions=None, ), ) outputs = VBRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py index 89863fb730..8d56bade08 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -12,7 +12,10 @@ def test_BRAINSROIAuto_inputs(): nohash=True, usedefault=True, ), - inputVolume=dict(argstr='--inputVolume %s', ), + inputVolume=dict( + argstr='--inputVolume %s', + extensions=None, + ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), outputClippedVolumeROI=dict( @@ -34,8 +37,8 @@ def test_BRAINSROIAuto_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_BRAINSROIAuto_outputs(): output_map = dict( - outputClippedVolumeROI=dict(), - outputROIMaskVolume=dict(), + outputClippedVolumeROI=dict(extensions=None, ), + outputROIMaskVolume=dict(extensions=None, ), ) outputs = BRAINSROIAuto.output_spec() diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py index 09b0b1300f..f5b7a28b99 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py @@ -25,7 +25,10 @@ def test_EMSegmentCommandLine_inputs(): keepTempFiles=dict(argstr='--keepTempFiles ', ), loadAtlasNonCentered=dict(argstr='--loadAtlasNonCentered ', ), loadTargetCentered=dict(argstr='--loadTargetCentered ', ), - mrmlSceneFileName=dict(argstr='--mrmlSceneFileName %s', ), + mrmlSceneFileName=dict( + argstr='--mrmlSceneFileName %s', + extensions=None, + ), parametersMRMLNodeName=dict(argstr='--parametersMRMLNodeName %s', ), registrationAffineType=dict(argstr='--registrationAffineType %d', ), registrationDeformableType=dict( @@ -36,7 +39,9 @@ def test_EMSegmentCommandLine_inputs(): hash_files=False, ), resultStandardVolumeFileName=dict( - argstr='--resultStandardVolumeFileName %s', ), + argstr='--resultStandardVolumeFileName %s', + extensions=None, + ), resultVolumeFileName=dict( argstr='--resultVolumeFileName %s', hash_files=False, @@ -53,9 +58,9 @@ def test_EMSegmentCommandLine_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_EMSegmentCommandLine_outputs(): output_map = dict( - generateEmptyMRMLSceneAndQuit=dict(), - resultMRMLSceneFileName=dict(), - resultVolumeFileName=dict(), + generateEmptyMRMLSceneAndQuit=dict(extensions=None, ), + resultMRMLSceneFileName=dict(extensions=None, ), + resultVolumeFileName=dict(extensions=None, ), ) outputs = EMSegmentCommandLine.output_spec() diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py index ed46177df0..6befb36860 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py @@ -15,12 +15,14 @@ def test_RobustStatisticsSegmenter_inputs(): intensityHomogeneity=dict(argstr='--intensityHomogeneity %f', ), labelImageFileName=dict( argstr='%s', + extensions=None, position=-2, ), labelValue=dict(argstr='--labelValue %d', ), maxRunningTime=dict(argstr='--maxRunningTime %f', ), originalImageFileName=dict( argstr='%s', + extensions=None, position=-3, ), segmentedImageFileName=dict( @@ -35,7 +37,11 @@ def test_RobustStatisticsSegmenter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RobustStatisticsSegmenter_outputs(): - output_map = dict(segmentedImageFileName=dict(position=-1, ), ) + output_map = dict( + segmentedImageFileName=dict( + extensions=None, + position=-1, + ), ) outputs = RobustStatisticsSegmenter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py index 3c5e2124d0..5f880c08cb 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py @@ -12,6 +12,7 @@ def test_SimpleRegionGrowingSegmentation_inputs(): ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), iterations=dict(argstr='--iterations %d', ), @@ -33,7 +34,11 @@ def test_SimpleRegionGrowingSegmentation_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SimpleRegionGrowingSegmentation_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = SimpleRegionGrowingSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py index 279d68e0ab..8494a85837 100644 --- a/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py +++ b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py @@ -10,7 +10,10 @@ def test_EMSegmentTransformToNewFormat_inputs(): nohash=True, usedefault=True, ), - inputMRMLFileName=dict(argstr='--inputMRMLFileName %s', ), + inputMRMLFileName=dict( + argstr='--inputMRMLFileName %s', + extensions=None, + ), outputMRMLFileName=dict( argstr='--outputMRMLFileName %s', hash_files=False, @@ -23,7 +26,7 @@ def test_EMSegmentTransformToNewFormat_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_EMSegmentTransformToNewFormat_outputs(): - output_map = dict(outputMRMLFileName=dict(), ) + output_map = dict(outputMRMLFileName=dict(extensions=None, ), ) outputs = EMSegmentTransformToNewFormat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py index 7ad8cac8e9..3669a21fc3 100644 --- a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py @@ -7,6 +7,7 @@ def test_GrayscaleModelMaker_inputs(): input_map = dict( InputVolume=dict( argstr='%s', + extensions=None, position=-2, ), OutputGeometry=dict( @@ -32,7 +33,11 @@ def test_GrayscaleModelMaker_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GrayscaleModelMaker_outputs(): - output_map = dict(OutputGeometry=dict(position=-1, ), ) + output_map = dict( + OutputGeometry=dict( + extensions=None, + position=-1, + ), ) outputs = GrayscaleModelMaker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py index bb3780495b..faf982a342 100644 --- a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py +++ b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py @@ -13,6 +13,7 @@ def test_LabelMapSmoothing_inputs(): gaussianSigma=dict(argstr='--gaussianSigma %f', ), inputVolume=dict( argstr='%s', + extensions=None, position=-2, ), labelToSmooth=dict(argstr='--labelToSmooth %d', ), @@ -30,7 +31,11 @@ def test_LabelMapSmoothing_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_LabelMapSmoothing_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = LabelMapSmoothing.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py index 6453957a79..c25d4ebcf3 100644 --- a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py +++ b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py @@ -7,10 +7,12 @@ def test_MergeModels_inputs(): input_map = dict( Model1=dict( argstr='%s', + extensions=None, position=-3, ), Model2=dict( argstr='%s', + extensions=None, position=-2, ), ModelOutput=dict( @@ -30,7 +32,11 @@ def test_MergeModels_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MergeModels_outputs(): - output_map = dict(ModelOutput=dict(position=-1, ), ) + output_map = dict( + ModelOutput=dict( + extensions=None, + position=-1, + ), ) outputs = MergeModels.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py index ed182137cf..164034a4e7 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py @@ -7,10 +7,14 @@ def test_ModelMaker_inputs(): input_map = dict( InputVolume=dict( argstr='%s', + extensions=None, position=-1, ), args=dict(argstr='%s', ), - color=dict(argstr='--color %s', ), + color=dict( + argstr='--color %s', + extensions=None, + ), debug=dict(argstr='--debug ', ), decimate=dict(argstr='--decimate %f', ), end=dict(argstr='--end %d', ), diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py index efd11f1040..d39ae392bd 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py @@ -7,6 +7,7 @@ def test_ModelToLabelMap_inputs(): input_map = dict( InputVolume=dict( argstr='%s', + extensions=None, position=-3, ), OutputVolume=dict( @@ -22,6 +23,7 @@ def test_ModelToLabelMap_inputs(): ), surface=dict( argstr='%s', + extensions=None, position=-2, ), ) @@ -31,7 +33,11 @@ def test_ModelToLabelMap_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ModelToLabelMap_outputs(): - output_map = dict(OutputVolume=dict(position=-1, ), ) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ModelToLabelMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py index f3d1908cd0..d0d437915b 100644 --- a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py +++ b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py @@ -12,6 +12,7 @@ def test_OrientScalarVolume_inputs(): ), inputVolume1=dict( argstr='%s', + extensions=None, position=-2, ), orientation=dict(argstr='--orientation %s', ), @@ -27,7 +28,11 @@ def test_OrientScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_OrientScalarVolume_outputs(): - output_map = dict(outputVolume=dict(position=-1, ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = OrientScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py index 32a2fc2139..528ba47e1b 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py +++ b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py @@ -7,10 +7,12 @@ def test_ProbeVolumeWithModel_inputs(): input_map = dict( InputModel=dict( argstr='%s', + extensions=None, position=-2, ), InputVolume=dict( argstr='%s', + extensions=None, position=-3, ), OutputModel=dict( @@ -30,7 +32,11 @@ def test_ProbeVolumeWithModel_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ProbeVolumeWithModel_outputs(): - output_map = dict(OutputModel=dict(position=-1, ), ) + output_map = dict( + OutputModel=dict( + extensions=None, + position=-1, + ), ) outputs = ProbeVolumeWithModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py index 1b1aa6edcd..25b577ff96 100644 --- a/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py +++ b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py @@ -5,7 +5,10 @@ def test_Analyze2nii_inputs(): input_map = dict( - analyze_file=dict(mandatory=True, ), + analyze_file=dict( + extensions=None, + mandatory=True, + ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), @@ -24,7 +27,7 @@ def test_Analyze2nii_outputs(): output_map = dict( matlab_cmd=dict(), mfile=dict(usedefault=True, ), - nifti_file=dict(), + nifti_file=dict(extensions=None, ), paths=dict(), use_mcr=dict(), use_v8struct=dict( diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py index bbc925ec9e..a9f652ba6d 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py @@ -6,6 +6,7 @@ def test_ApplyDeformations_inputs(): input_map = dict( deformation_field=dict( + extensions=None, field='comp{1}.def', mandatory=True, ), @@ -18,6 +19,7 @@ def test_ApplyDeformations_inputs(): mfile=dict(usedefault=True, ), paths=dict(), reference_volume=dict( + extensions=['.hdr', '.img', '.img.gz', '.nii'], field='comp{2}.id.space', mandatory=True, ), diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py index d1c35dbcd2..b2be189f03 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py @@ -7,10 +7,12 @@ def test_ApplyInverseDeformation_inputs(): input_map = dict( bounding_box=dict(field='comp{1}.inv.comp{1}.sn2def.bb', ), deformation=dict( + extensions=None, field='comp{1}.inv.comp{1}.sn2def.matname', xor=['deformation_field'], ), deformation_field=dict( + extensions=None, field='comp{1}.inv.comp{1}.def', xor=['deformation'], ), @@ -22,7 +24,10 @@ def test_ApplyInverseDeformation_inputs(): matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), - target=dict(field='comp{1}.inv.space', ), + target=dict( + extensions=None, + field='comp{1}.inv.space', + ), use_mcr=dict(), use_v8struct=dict( min_ver='8', diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py index a686e95485..cfef023fa7 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py @@ -7,12 +7,19 @@ def test_ApplyTransform_inputs(): input_map = dict( in_file=dict( copyfile=True, + extensions=None, + mandatory=True, + ), + mat=dict( + extensions=None, mandatory=True, ), - mat=dict(mandatory=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), - out_file=dict(genfile=True, ), + out_file=dict( + extensions=None, + genfile=True, + ), paths=dict(), use_mcr=dict(), use_v8struct=dict( @@ -26,7 +33,7 @@ def test_ApplyTransform_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ApplyTransform_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = ApplyTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py index 69695ca7b5..2da4a64024 100644 --- a/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py +++ b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py @@ -5,16 +5,20 @@ def test_CalcCoregAffine_inputs(): input_map = dict( - invmat=dict(), - mat=dict(), + invmat=dict(extensions=None, ), + mat=dict(extensions=None, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), moving=dict( copyfile=False, + extensions=None, mandatory=True, ), paths=dict(), - target=dict(mandatory=True, ), + target=dict( + extensions=None, + mandatory=True, + ), use_mcr=dict(), use_v8struct=dict( min_ver='8', @@ -28,8 +32,8 @@ def test_CalcCoregAffine_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_CalcCoregAffine_outputs(): output_map = dict( - invmat=dict(), - mat=dict(), + invmat=dict(extensions=None, ), + mat=dict(extensions=None, ), ) outputs = CalcCoregAffine.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Coregister.py b/nipype/interfaces/spm/tests/test_auto_Coregister.py index 957a2c84a9..260c3afc03 100644 --- a/nipype/interfaces/spm/tests/test_auto_Coregister.py +++ b/nipype/interfaces/spm/tests/test_auto_Coregister.py @@ -27,6 +27,7 @@ def test_Coregister_inputs(): ), target=dict( copyfile=False, + extensions=['.hdr', '.img', '.img.gz', '.nii'], field='ref', mandatory=True, ), diff --git a/nipype/interfaces/spm/tests/test_auto_DARTEL.py b/nipype/interfaces/spm/tests/test_auto_DARTEL.py index cc1b9eee1b..2cc2485f49 100644 --- a/nipype/interfaces/spm/tests/test_auto_DARTEL.py +++ b/nipype/interfaces/spm/tests/test_auto_DARTEL.py @@ -34,7 +34,7 @@ def test_DARTEL_inputs(): def test_DARTEL_outputs(): output_map = dict( dartel_flow_fields=dict(), - final_template_file=dict(), + final_template_file=dict(extensions=None, ), template_files=dict(), ) outputs = DARTEL.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py index d25bde2e5d..e4818e8b58 100644 --- a/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py +++ b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py @@ -22,6 +22,7 @@ def test_DARTELNorm2MNI_inputs(): paths=dict(), template_file=dict( copyfile=False, + extensions=['.hdr', '.img', '.img.gz', '.nii'], field='mni_norm.template', mandatory=True, ), @@ -39,7 +40,7 @@ def test_DARTELNorm2MNI_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DARTELNorm2MNI_outputs(): output_map = dict( - normalization_parameter_file=dict(), + normalization_parameter_file=dict(extensions=None, ), normalized_files=dict(), ) outputs = DARTELNorm2MNI.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py index bc9bb9006e..1b2d20bd04 100644 --- a/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py @@ -16,10 +16,12 @@ def test_EstimateContrast_inputs(): paths=dict(), residual_image=dict( copyfile=False, + extensions=None, mandatory=True, ), spm_mat_file=dict( copyfile=True, + extensions=None, field='spmmat', mandatory=True, ), @@ -41,7 +43,7 @@ def test_EstimateContrast_outputs(): ess_images=dict(), spmF_images=dict(), spmT_images=dict(), - spm_mat_file=dict(), + spm_mat_file=dict(extensions=None, ), ) outputs = EstimateContrast.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateModel.py b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py index 05f511a0bd..a143366d8f 100644 --- a/nipype/interfaces/spm/tests/test_auto_EstimateModel.py +++ b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py @@ -15,6 +15,7 @@ def test_EstimateModel_inputs(): paths=dict(), spm_mat_file=dict( copyfile=True, + extensions=None, field='spmmat', mandatory=True, ), @@ -34,15 +35,15 @@ def test_EstimateModel_outputs(): output_map = dict( ARcoef=dict(), Cbetas=dict(), - RPVimage=dict(), + RPVimage=dict(extensions=['.hdr', '.img', '.img.gz', '.nii'], ), SDbetas=dict(), SDerror=dict(), beta_images=dict(), - labels=dict(), - mask_image=dict(), - residual_image=dict(), + labels=dict(extensions=['.hdr', '.img', '.img.gz', '.nii'], ), + mask_image=dict(extensions=['.hdr', '.img', '.img.gz', '.nii'], ), + residual_image=dict(extensions=['.hdr', '.img', '.img.gz', '.nii'], ), residual_images=dict(), - spm_mat_file=dict(), + spm_mat_file=dict(extensions=None, ), ) outputs = EstimateModel.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py index 38d2b1c6fb..892e1d2694 100644 --- a/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py @@ -6,7 +6,10 @@ def test_FactorialDesign_inputs(): input_map = dict( covariates=dict(field='cov', ), - explicit_mask_file=dict(field='masking.em', ), + explicit_mask_file=dict( + extensions=None, + field='masking.em', + ), global_calc_mean=dict( field='globalc.g_mean', xor=['global_calc_omit', 'global_calc_values'], @@ -50,7 +53,7 @@ def test_FactorialDesign_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FactorialDesign_outputs(): - output_map = dict(spm_mat_file=dict(), ) + output_map = dict(spm_mat_file=dict(extensions=None, ), ) outputs = FactorialDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_FieldMap.py b/nipype/interfaces/spm/tests/test_auto_FieldMap.py index 43fbbcb8f4..a680fbd54e 100644 --- a/nipype/interfaces/spm/tests/test_auto_FieldMap.py +++ b/nipype/interfaces/spm/tests/test_auto_FieldMap.py @@ -7,6 +7,7 @@ def test_FieldMap_inputs(): input_map = dict( anat_file=dict( copyfile=False, + extensions=None, field='subj.anat', ), blip_direction=dict( @@ -19,6 +20,7 @@ def test_FieldMap_inputs(): ), epi_file=dict( copyfile=False, + extensions=None, field='subj.session.epi', mandatory=True, ), @@ -33,6 +35,7 @@ def test_FieldMap_inputs(): jobtype=dict(usedefault=True, ), magnitude_file=dict( copyfile=False, + extensions=None, field='subj.data.presubphasemag.magnitude', mandatory=True, ), @@ -73,6 +76,7 @@ def test_FieldMap_inputs(): paths=dict(), phase_file=dict( copyfile=False, + extensions=None, field='subj.data.presubphasemag.phase', mandatory=True, ), @@ -86,6 +90,7 @@ def test_FieldMap_inputs(): ), template=dict( copyfile=False, + extensions=None, field='subj.defaults.defaultsval.mflags.template', ), thresh=dict( @@ -120,7 +125,7 @@ def test_FieldMap_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FieldMap_outputs(): - output_map = dict(vdm=dict(), ) + output_map = dict(vdm=dict(extensions=None, ), ) outputs = FieldMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Level1Design.py b/nipype/interfaces/spm/tests/test_auto_Level1Design.py index 7dce3dda2a..10c74f72b8 100644 --- a/nipype/interfaces/spm/tests/test_auto_Level1Design.py +++ b/nipype/interfaces/spm/tests/test_auto_Level1Design.py @@ -16,7 +16,10 @@ def test_Level1Design_inputs(): field='timing.RT', mandatory=True, ), - mask_image=dict(field='mask', ), + mask_image=dict( + extensions=None, + field='mask', + ), mask_threshold=dict(usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), @@ -46,7 +49,7 @@ def test_Level1Design_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Level1Design_outputs(): - output_map = dict(spm_mat_file=dict(), ) + output_map = dict(spm_mat_file=dict(extensions=None, ), ) outputs = Level1Design.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py index 26957f2fbb..435c288811 100644 --- a/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py @@ -6,7 +6,10 @@ def test_MultipleRegressionDesign_inputs(): input_map = dict( covariates=dict(field='cov', ), - explicit_mask_file=dict(field='masking.em', ), + explicit_mask_file=dict( + extensions=None, + field='masking.em', + ), global_calc_mean=dict( field='globalc.g_mean', xor=['global_calc_omit', 'global_calc_values'], @@ -59,7 +62,7 @@ def test_MultipleRegressionDesign_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MultipleRegressionDesign_outputs(): - output_map = dict(spm_mat_file=dict(), ) + output_map = dict(spm_mat_file=dict(extensions=None, ), ) outputs = MultipleRegressionDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize.py b/nipype/interfaces/spm/tests/test_auto_Normalize.py index fde0bf7fff..fbf5e278b0 100644 --- a/nipype/interfaces/spm/tests/test_auto_Normalize.py +++ b/nipype/interfaces/spm/tests/test_auto_Normalize.py @@ -22,6 +22,7 @@ def test_Normalize_inputs(): ), parameter_file=dict( copyfile=False, + extensions=None, field='subj.matname', mandatory=True, xor=['source', 'template'], @@ -36,10 +37,12 @@ def test_Normalize_inputs(): source_image_smoothing=dict(field='eoptions.smosrc', ), source_weight=dict( copyfile=False, + extensions=None, field='subj.wtsrc', ), template=dict( copyfile=False, + extensions=None, field='eoptions.template', mandatory=True, xor=['parameter_file'], @@ -47,6 +50,7 @@ def test_Normalize_inputs(): template_image_smoothing=dict(field='eoptions.smoref', ), template_weight=dict( copyfile=False, + extensions=None, field='eoptions.weight', ), use_mcr=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize12.py b/nipype/interfaces/spm/tests/test_auto_Normalize12.py index bf8da2dba1..25f77519e0 100644 --- a/nipype/interfaces/spm/tests/test_auto_Normalize12.py +++ b/nipype/interfaces/spm/tests/test_auto_Normalize12.py @@ -14,12 +14,14 @@ def test_Normalize12_inputs(): bias_regularization=dict(field='eoptions.biasreg', ), deformation_file=dict( copyfile=False, + extensions=['.hdr', '.img', '.img.gz', '.nii'], field='subj.def', mandatory=True, xor=['image_to_align', 'tpm'], ), image_to_align=dict( copyfile=True, + extensions=['.hdr', '.img', '.img.gz', '.nii'], field='subj.vol', mandatory=True, xor=['deformation_file'], @@ -36,6 +38,7 @@ def test_Normalize12_inputs(): smoothness=dict(field='eoptions.fwhm', ), tpm=dict( copyfile=False, + extensions=None, field='eoptions.tpm', xor=['deformation_file'], ), diff --git a/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py index dbb02a6275..d45165957b 100644 --- a/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py @@ -6,7 +6,10 @@ def test_OneSampleTTestDesign_inputs(): input_map = dict( covariates=dict(field='cov', ), - explicit_mask_file=dict(field='masking.em', ), + explicit_mask_file=dict( + extensions=None, + field='masking.em', + ), global_calc_mean=dict( field='globalc.g_mean', xor=['global_calc_omit', 'global_calc_values'], @@ -54,7 +57,7 @@ def test_OneSampleTTestDesign_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_OneSampleTTestDesign_outputs(): - output_map = dict(spm_mat_file=dict(), ) + output_map = dict(spm_mat_file=dict(extensions=None, ), ) outputs = OneSampleTTestDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py index 3e1662268a..0ad104820e 100644 --- a/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py @@ -7,7 +7,10 @@ def test_PairedTTestDesign_inputs(): input_map = dict( ancova=dict(field='des.pt.ancova', ), covariates=dict(field='cov', ), - explicit_mask_file=dict(field='masking.em', ), + explicit_mask_file=dict( + extensions=None, + field='masking.em', + ), global_calc_mean=dict( field='globalc.g_mean', xor=['global_calc_omit', 'global_calc_values'], @@ -56,7 +59,7 @@ def test_PairedTTestDesign_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_PairedTTestDesign_outputs(): - output_map = dict(spm_mat_file=dict(), ) + output_map = dict(spm_mat_file=dict(extensions=None, ), ) outputs = PairedTTestDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Realign.py b/nipype/interfaces/spm/tests/test_auto_Realign.py index ac2f5bbd92..4812665c78 100644 --- a/nipype/interfaces/spm/tests/test_auto_Realign.py +++ b/nipype/interfaces/spm/tests/test_auto_Realign.py @@ -28,7 +28,10 @@ def test_Realign_inputs(): min_ver='8', usedefault=True, ), - weight_img=dict(field='eoptions.weight', ), + weight_img=dict( + extensions=None, + field='eoptions.weight', + ), wrap=dict(field='eoptions.wrap', ), write_interp=dict(field='roptions.interp', ), write_mask=dict(field='roptions.mask', ), @@ -47,7 +50,7 @@ def test_Realign_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Realign_outputs(): output_map = dict( - mean_image=dict(), + mean_image=dict(extensions=None, ), modified_in_files=dict(), realigned_files=dict(), realignment_parameters=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py b/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py index 5c7c36f80b..9517e060d5 100644 --- a/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py +++ b/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py @@ -44,6 +44,7 @@ def test_RealignUnwarp_inputs(): paths=dict(), phase_map=dict( copyfile=False, + extensions=None, field='data.pmscan', ), quality=dict(field='eoptions.quality', ), @@ -63,7 +64,10 @@ def test_RealignUnwarp_inputs(): min_ver='8', usedefault=True, ), - weight_img=dict(field='eoptions.weight', ), + weight_img=dict( + extensions=None, + field='eoptions.weight', + ), wrap=dict(field='eoptions.ewrap', ), ) inputs = RealignUnwarp.input_spec() @@ -73,7 +77,7 @@ def test_RealignUnwarp_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_RealignUnwarp_outputs(): output_map = dict( - mean_image=dict(), + mean_image=dict(extensions=None, ), modified_in_files=dict(), realigned_unwarped_files=dict(), realignment_parameters=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_Reslice.py b/nipype/interfaces/spm/tests/test_auto_Reslice.py index 81299fc748..08a29b0bd7 100644 --- a/nipype/interfaces/spm/tests/test_auto_Reslice.py +++ b/nipype/interfaces/spm/tests/test_auto_Reslice.py @@ -5,13 +5,19 @@ def test_Reslice_inputs(): input_map = dict( - in_file=dict(mandatory=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), interp=dict(usedefault=True, ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), - out_file=dict(), + out_file=dict(extensions=None, ), paths=dict(), - space_defining=dict(mandatory=True, ), + space_defining=dict( + extensions=None, + mandatory=True, + ), use_mcr=dict(), use_v8struct=dict( min_ver='8', @@ -24,7 +30,7 @@ def test_Reslice_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Reslice_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Reslice.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py index 3e39ade181..3d5ac6847c 100644 --- a/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py +++ b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py @@ -14,7 +14,10 @@ def test_ResliceToReference_inputs(): matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), - target=dict(field='comp{1}.id.space', ), + target=dict( + extensions=None, + field='comp{1}.id.space', + ), use_mcr=dict(), use_v8struct=dict( min_ver='8', diff --git a/nipype/interfaces/spm/tests/test_auto_Segment.py b/nipype/interfaces/spm/tests/test_auto_Segment.py index f6df46e1de..257e7c12ff 100644 --- a/nipype/interfaces/spm/tests/test_auto_Segment.py +++ b/nipype/interfaces/spm/tests/test_auto_Segment.py @@ -17,7 +17,10 @@ def test_Segment_inputs(): ), gaussians_per_class=dict(field='opts.ngaus', ), gm_output_type=dict(field='output.GM', ), - mask_image=dict(field='opts.msk', ), + mask_image=dict( + extensions=None, + field='opts.msk', + ), matlab_cmd=dict(), mfile=dict(usedefault=True, ), paths=dict(), @@ -40,22 +43,23 @@ def test_Segment_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Segment_outputs(): output_map = dict( - bias_corrected_image=dict(), - inverse_transformation_mat=dict(), - modulated_csf_image=dict(), - modulated_gm_image=dict(), + bias_corrected_image=dict(extensions=None, ), + inverse_transformation_mat=dict(extensions=None, ), + modulated_csf_image=dict(extensions=None, ), + modulated_gm_image=dict(extensions=None, ), modulated_input_image=dict( deprecated='0.10', + extensions=None, new_name='bias_corrected_image', ), - modulated_wm_image=dict(), - native_csf_image=dict(), - native_gm_image=dict(), - native_wm_image=dict(), - normalized_csf_image=dict(), - normalized_gm_image=dict(), - normalized_wm_image=dict(), - transformation_mat=dict(), + modulated_wm_image=dict(extensions=None, ), + native_csf_image=dict(extensions=None, ), + native_gm_image=dict(extensions=None, ), + native_wm_image=dict(extensions=None, ), + normalized_csf_image=dict(extensions=None, ), + normalized_gm_image=dict(extensions=None, ), + normalized_wm_image=dict(extensions=None, ), + transformation_mat=dict(extensions=None, ), ) outputs = Segment.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Threshold.py b/nipype/interfaces/spm/tests/test_auto_Threshold.py index 078adb3a1b..ab23139515 100644 --- a/nipype/interfaces/spm/tests/test_auto_Threshold.py +++ b/nipype/interfaces/spm/tests/test_auto_Threshold.py @@ -16,10 +16,12 @@ def test_Threshold_inputs(): paths=dict(), spm_mat_file=dict( copyfile=True, + extensions=None, mandatory=True, ), stat_image=dict( copyfile=False, + extensions=None, mandatory=True, ), use_fwe_correction=dict(usedefault=True, ), @@ -40,9 +42,9 @@ def test_Threshold_outputs(): activation_forced=dict(), cluster_forming_thr=dict(), n_clusters=dict(), - pre_topo_fdr_map=dict(), + pre_topo_fdr_map=dict(extensions=None, ), pre_topo_n_clusters=dict(), - thresholded_map=dict(), + thresholded_map=dict(extensions=None, ), ) outputs = Threshold.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py index ccdc441e04..c025f5f4d0 100644 --- a/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py +++ b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py @@ -13,10 +13,12 @@ def test_ThresholdStatistics_inputs(): paths=dict(), spm_mat_file=dict( copyfile=True, + extensions=None, mandatory=True, ), stat_image=dict( copyfile=False, + extensions=None, mandatory=True, ), use_mcr=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py index 4dfbc12570..faab30cd06 100644 --- a/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py @@ -7,7 +7,10 @@ def test_TwoSampleTTestDesign_inputs(): input_map = dict( covariates=dict(field='cov', ), dependent=dict(field='des.t2.dept', ), - explicit_mask_file=dict(field='masking.em', ), + explicit_mask_file=dict( + extensions=None, + field='masking.em', + ), global_calc_mean=dict( field='globalc.g_mean', xor=['global_calc_omit', 'global_calc_values'], @@ -60,7 +63,7 @@ def test_TwoSampleTTestDesign_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TwoSampleTTestDesign_outputs(): - output_map = dict(spm_mat_file=dict(), ) + output_map = dict(spm_mat_file=dict(extensions=None, ), ) outputs = TwoSampleTTestDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_VBMSegment.py b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py index 6aaac2b489..561c0e6082 100644 --- a/nipype/interfaces/spm/tests/test_auto_VBMSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py @@ -46,7 +46,9 @@ def test_VBMSegment_inputs(): usedefault=True, ), dartel_template=dict( - field='estwrite.extopts.dartelwarp.normhigh.darteltpm', ), + extensions=['.hdr', '.img', '.img.gz', '.nii'], + field='estwrite.extopts.dartelwarp.normhigh.darteltpm', + ), deformation_field=dict( field='estwrite.output.warps', usedefault=True, @@ -105,7 +107,10 @@ def test_VBMSegment_inputs(): usedefault=True, ), spatial_normalization=dict(usedefault=True, ), - tissues=dict(field='estwrite.tpm', ), + tissues=dict( + extensions=['.hdr', '.img', '.img.gz', '.nii'], + field='estwrite.tpm', + ), use_mcr=dict(), use_sanlm_denoising_filter=dict( field='estwrite.extopts.sanlm', diff --git a/nipype/interfaces/tests/test_auto_Bru2.py b/nipype/interfaces/tests/test_auto_Bru2.py index ce436ac03e..07fda82064 100644 --- a/nipype/interfaces/tests/test_auto_Bru2.py +++ b/nipype/interfaces/tests/test_auto_Bru2.py @@ -30,7 +30,7 @@ def test_Bru2_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Bru2_outputs(): - output_map = dict(nii_file=dict(), ) + output_map = dict(nii_file=dict(extensions=None, ), ) outputs = Bru2.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_C3d.py b/nipype/interfaces/tests/test_auto_C3d.py index 9e74e82e2a..1e741f5a09 100644 --- a/nipype/interfaces/tests/test_auto_C3d.py +++ b/nipype/interfaces/tests/test_auto_C3d.py @@ -24,6 +24,7 @@ def test_C3d_inputs(): ), out_file=dict( argstr='-o %s', + extensions=None, position=-1, xor=['out_files'], ), diff --git a/nipype/interfaces/tests/test_auto_C3dAffineTool.py b/nipype/interfaces/tests/test_auto_C3dAffineTool.py index 510ea2f02a..1326213e05 100644 --- a/nipype/interfaces/tests/test_auto_C3dAffineTool.py +++ b/nipype/interfaces/tests/test_auto_C3dAffineTool.py @@ -21,14 +21,17 @@ def test_C3dAffineTool_inputs(): ), reference_file=dict( argstr='-ref %s', + extensions=None, position=1, ), source_file=dict( argstr='-src %s', + extensions=None, position=2, ), transform_file=dict( argstr='%s', + extensions=None, position=3, ), ) @@ -38,7 +41,7 @@ def test_C3dAffineTool_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_C3dAffineTool_outputs(): - output_map = dict(itk_transform=dict(), ) + output_map = dict(itk_transform=dict(extensions=None, ), ) outputs = C3dAffineTool.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_CopyMeta.py b/nipype/interfaces/tests/test_auto_CopyMeta.py index 012edfa886..afe30789d7 100644 --- a/nipype/interfaces/tests/test_auto_CopyMeta.py +++ b/nipype/interfaces/tests/test_auto_CopyMeta.py @@ -5,10 +5,16 @@ def test_CopyMeta_inputs(): input_map = dict( - dest_file=dict(mandatory=True, ), + dest_file=dict( + extensions=None, + mandatory=True, + ), exclude_classes=dict(), include_classes=dict(), - src_file=dict(mandatory=True, ), + src_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CopyMeta.input_spec() @@ -16,7 +22,7 @@ def test_CopyMeta_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CopyMeta_outputs(): - output_map = dict(dest_file=dict(), ) + output_map = dict(dest_file=dict(extensions=None, ), ) outputs = CopyMeta.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_Dcm2nii.py b/nipype/interfaces/tests/test_auto_Dcm2nii.py index 0caa45a1f1..cff1205261 100644 --- a/nipype/interfaces/tests/test_auto_Dcm2nii.py +++ b/nipype/interfaces/tests/test_auto_Dcm2nii.py @@ -16,6 +16,7 @@ def test_Dcm2nii_inputs(): ), config_file=dict( argstr='-b %s', + extensions=None, genfile=True, ), convert_all_pars=dict( diff --git a/nipype/interfaces/tests/test_auto_DcmStack.py b/nipype/interfaces/tests/test_auto_DcmStack.py index 53a5259af5..e9a671b919 100644 --- a/nipype/interfaces/tests/test_auto_DcmStack.py +++ b/nipype/interfaces/tests/test_auto_DcmStack.py @@ -20,7 +20,7 @@ def test_DcmStack_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DcmStack_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = DcmStack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_FreeSurferSource.py b/nipype/interfaces/tests/test_auto_FreeSurferSource.py index 15ea9c66cd..8942b8d5e1 100644 --- a/nipype/interfaces/tests/test_auto_FreeSurferSource.py +++ b/nipype/interfaces/tests/test_auto_FreeSurferSource.py @@ -20,7 +20,10 @@ def test_FreeSurferSource_outputs(): altkey='BA', loc='stats', ), - T1=dict(loc='mri', ), + T1=dict( + extensions=None, + loc='mri', + ), annot=dict( altkey='*annot', loc='label', @@ -41,14 +44,23 @@ def test_FreeSurferSource_outputs(): altkey='area.pial', loc='surf', ), - aseg=dict(loc='mri', ), + aseg=dict( + extensions=None, + loc='mri', + ), aseg_stats=dict( altkey='aseg', loc='stats', ), avg_curv=dict(loc='surf', ), - brain=dict(loc='mri', ), - brainmask=dict(loc='mri', ), + brain=dict( + extensions=None, + loc='mri', + ), + brainmask=dict( + extensions=None, + loc='mri', + ), curv=dict(loc='surf', ), curv_pial=dict( altkey='curv.pial', @@ -62,7 +74,10 @@ def test_FreeSurferSource_outputs(): altkey='entorhinal_exvivo', loc='stats', ), - filled=dict(loc='mri', ), + filled=dict( + extensions=None, + loc='mri', + ), graymid=dict( altkey=['graymid', 'midthickness'], loc='surf', @@ -73,11 +88,23 @@ def test_FreeSurferSource_outputs(): altkey='*label', loc='label', ), - norm=dict(loc='mri', ), - nu=dict(loc='mri', ), - orig=dict(loc='mri', ), + norm=dict( + extensions=None, + loc='mri', + ), + nu=dict( + extensions=None, + loc='mri', + ), + orig=dict( + extensions=None, + loc='mri', + ), pial=dict(loc='surf', ), - rawavg=dict(loc='mri', ), + rawavg=dict( + extensions=None, + loc='mri', + ), ribbon=dict( altkey='*ribbon', loc='mri', @@ -92,8 +119,14 @@ def test_FreeSurferSource_outputs(): thickness=dict(loc='surf', ), volume=dict(loc='surf', ), white=dict(loc='surf', ), - wm=dict(loc='mri', ), - wmparc=dict(loc='mri', ), + wm=dict( + extensions=None, + loc='mri', + ), + wmparc=dict( + extensions=None, + loc='mri', + ), wmparc_stats=dict( altkey='wmparc', loc='stats', diff --git a/nipype/interfaces/tests/test_auto_JSONFileGrabber.py b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py index 03a65cf6c2..7666a5c53f 100644 --- a/nipype/interfaces/tests/test_auto_JSONFileGrabber.py +++ b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py @@ -6,7 +6,7 @@ def test_JSONFileGrabber_inputs(): input_map = dict( defaults=dict(), - in_file=dict(), + in_file=dict(extensions=None, ), ) inputs = JSONFileGrabber.input_spec() diff --git a/nipype/interfaces/tests/test_auto_JSONFileSink.py b/nipype/interfaces/tests/test_auto_JSONFileSink.py index 002997912b..4346a67252 100644 --- a/nipype/interfaces/tests/test_auto_JSONFileSink.py +++ b/nipype/interfaces/tests/test_auto_JSONFileSink.py @@ -7,7 +7,7 @@ def test_JSONFileSink_inputs(): input_map = dict( _outputs=dict(usedefault=True, ), in_dict=dict(usedefault=True, ), - out_file=dict(), + out_file=dict(extensions=None, ), ) inputs = JSONFileSink.input_spec() @@ -15,7 +15,7 @@ def test_JSONFileSink_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_JSONFileSink_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = JSONFileSink.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_LookupMeta.py b/nipype/interfaces/tests/test_auto_LookupMeta.py index 29100aaef7..b63b96576a 100644 --- a/nipype/interfaces/tests/test_auto_LookupMeta.py +++ b/nipype/interfaces/tests/test_auto_LookupMeta.py @@ -5,7 +5,10 @@ def test_LookupMeta_inputs(): input_map = dict( - in_file=dict(mandatory=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), meta_keys=dict(mandatory=True, ), ) inputs = LookupMeta.input_spec() diff --git a/nipype/interfaces/tests/test_auto_MatlabCommand.py b/nipype/interfaces/tests/test_auto_MatlabCommand.py index c1b971d25d..0ab56c426e 100644 --- a/nipype/interfaces/tests/test_auto_MatlabCommand.py +++ b/nipype/interfaces/tests/test_auto_MatlabCommand.py @@ -10,7 +10,10 @@ def test_MatlabCommand_inputs(): nohash=True, usedefault=True, ), - logfile=dict(argstr='-logfile %s', ), + logfile=dict( + argstr='-logfile %s', + extensions=None, + ), mfile=dict(usedefault=True, ), nodesktop=dict( argstr='-nodesktop', @@ -30,7 +33,10 @@ def test_MatlabCommand_inputs(): mandatory=True, position=-1, ), - script_file=dict(usedefault=True, ), + script_file=dict( + extensions=None, + usedefault=True, + ), single_comp_thread=dict( argstr='-singleCompThread', nohash=True, diff --git a/nipype/interfaces/tests/test_auto_MergeNifti.py b/nipype/interfaces/tests/test_auto_MergeNifti.py index 9e0a017c60..9cfde35a9a 100644 --- a/nipype/interfaces/tests/test_auto_MergeNifti.py +++ b/nipype/interfaces/tests/test_auto_MergeNifti.py @@ -18,7 +18,7 @@ def test_MergeNifti_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MergeNifti_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = MergeNifti.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_MeshFix.py b/nipype/interfaces/tests/test_auto_MeshFix.py index f306f4abed..39a8ff5ccf 100644 --- a/nipype/interfaces/tests/test_auto_MeshFix.py +++ b/nipype/interfaces/tests/test_auto_MeshFix.py @@ -41,11 +41,13 @@ def test_MeshFix_inputs(): ), in_file1=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), in_file2=dict( argstr='%s', + extensions=None, position=2, ), join_closest_components=dict( @@ -60,6 +62,7 @@ def test_MeshFix_inputs(): number_of_biggest_shells=dict(argstr='--shells %d', ), out_filename=dict( argstr='-o %s', + extensions=None, genfile=True, ), output_type=dict(usedefault=True, ), @@ -94,7 +97,7 @@ def test_MeshFix_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MeshFix_outputs(): - output_map = dict(mesh_file=dict(), ) + output_map = dict(mesh_file=dict(extensions=None, ), ) outputs = MeshFix.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_MySQLSink.py b/nipype/interfaces/tests/test_auto_MySQLSink.py index 048699659a..d534608fac 100644 --- a/nipype/interfaces/tests/test_auto_MySQLSink.py +++ b/nipype/interfaces/tests/test_auto_MySQLSink.py @@ -6,6 +6,7 @@ def test_MySQLSink_inputs(): input_map = dict( config=dict( + extensions=None, mandatory=True, xor=['host'], ), diff --git a/nipype/interfaces/tests/test_auto_PETPVC.py b/nipype/interfaces/tests/test_auto_PETPVC.py index c5283435d5..60f5b29826 100644 --- a/nipype/interfaces/tests/test_auto_PETPVC.py +++ b/nipype/interfaces/tests/test_auto_PETPVC.py @@ -32,10 +32,12 @@ def test_PETPVC_inputs(): ), in_file=dict( argstr='-i %s', + extensions=None, mandatory=True, ), mask_file=dict( argstr='-m %s', + extensions=None, mandatory=True, ), n_deconv=dict( @@ -48,6 +50,7 @@ def test_PETPVC_inputs(): ), out_file=dict( argstr='-o %s', + extensions=None, genfile=True, hash_files=False, ), @@ -66,7 +69,7 @@ def test_PETPVC_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_PETPVC_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = PETPVC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_Quickshear.py b/nipype/interfaces/tests/test_auto_Quickshear.py index 7f39a6bc96..d08985a639 100644 --- a/nipype/interfaces/tests/test_auto_Quickshear.py +++ b/nipype/interfaces/tests/test_auto_Quickshear.py @@ -16,16 +16,19 @@ def test_Quickshear_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), mask_file=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), out_file=dict( argstr='%s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_defaced', @@ -38,7 +41,7 @@ def test_Quickshear_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Quickshear_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Quickshear.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_Reorient.py b/nipype/interfaces/tests/test_auto_Reorient.py index 2e45a1ca7f..76d393e0df 100644 --- a/nipype/interfaces/tests/test_auto_Reorient.py +++ b/nipype/interfaces/tests/test_auto_Reorient.py @@ -5,7 +5,10 @@ def test_Reorient_inputs(): input_map = dict( - in_file=dict(mandatory=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), orientation=dict(usedefault=True, ), ) inputs = Reorient.input_spec() @@ -15,8 +18,8 @@ def test_Reorient_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_Reorient_outputs(): output_map = dict( - out_file=dict(), - transform=dict(), + out_file=dict(extensions=None, ), + transform=dict(extensions=None, ), ) outputs = Reorient.output_spec() diff --git a/nipype/interfaces/tests/test_auto_Rescale.py b/nipype/interfaces/tests/test_auto_Rescale.py index e180c82988..2bdc274d81 100644 --- a/nipype/interfaces/tests/test_auto_Rescale.py +++ b/nipype/interfaces/tests/test_auto_Rescale.py @@ -5,10 +5,16 @@ def test_Rescale_inputs(): input_map = dict( - in_file=dict(mandatory=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), invert=dict(), percentile=dict(usedefault=True, ), - ref_file=dict(mandatory=True, ), + ref_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = Rescale.input_spec() @@ -16,7 +22,7 @@ def test_Rescale_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Rescale_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Rescale.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_SQLiteSink.py b/nipype/interfaces/tests/test_auto_SQLiteSink.py index ea03663c4c..190e8aeee4 100644 --- a/nipype/interfaces/tests/test_auto_SQLiteSink.py +++ b/nipype/interfaces/tests/test_auto_SQLiteSink.py @@ -5,7 +5,10 @@ def test_SQLiteSink_inputs(): input_map = dict( - database_file=dict(mandatory=True, ), + database_file=dict( + extensions=None, + mandatory=True, + ), table_name=dict(mandatory=True, ), ) inputs = SQLiteSink.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SignalExtraction.py b/nipype/interfaces/tests/test_auto_SignalExtraction.py index bc76f5261a..b102048118 100644 --- a/nipype/interfaces/tests/test_auto_SignalExtraction.py +++ b/nipype/interfaces/tests/test_auto_SignalExtraction.py @@ -7,11 +7,17 @@ def test_SignalExtraction_inputs(): input_map = dict( class_labels=dict(mandatory=True, ), detrend=dict(usedefault=True, ), - in_file=dict(mandatory=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), incl_shared_variance=dict(usedefault=True, ), include_global=dict(usedefault=True, ), label_files=dict(mandatory=True, ), - out_file=dict(usedefault=True, ), + out_file=dict( + extensions=None, + usedefault=True, + ), ) inputs = SignalExtraction.input_spec() @@ -19,7 +25,7 @@ def test_SignalExtraction_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SignalExtraction_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = SignalExtraction.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_SplitNifti.py b/nipype/interfaces/tests/test_auto_SplitNifti.py index e1f6539fab..18e8ee9798 100644 --- a/nipype/interfaces/tests/test_auto_SplitNifti.py +++ b/nipype/interfaces/tests/test_auto_SplitNifti.py @@ -5,7 +5,10 @@ def test_SplitNifti_inputs(): input_map = dict( - in_file=dict(mandatory=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), out_ext=dict(usedefault=True, ), out_format=dict(), out_path=dict(), diff --git a/nipype/interfaces/tests/test_auto_XNATSink.py b/nipype/interfaces/tests/test_auto_XNATSink.py index b4db5ec8d3..b3e800e4e4 100644 --- a/nipype/interfaces/tests/test_auto_XNATSink.py +++ b/nipype/interfaces/tests/test_auto_XNATSink.py @@ -9,6 +9,7 @@ def test_XNATSink_inputs(): assessor_id=dict(xor=['reconstruction_id'], ), cache_dir=dict(), config=dict( + extensions=None, mandatory=True, xor=['server'], ), diff --git a/nipype/interfaces/tests/test_auto_XNATSource.py b/nipype/interfaces/tests/test_auto_XNATSource.py index 8faa79af81..adb5d8657a 100644 --- a/nipype/interfaces/tests/test_auto_XNATSource.py +++ b/nipype/interfaces/tests/test_auto_XNATSource.py @@ -7,6 +7,7 @@ def test_XNATSource_inputs(): input_map = dict( cache_dir=dict(), config=dict( + extensions=None, mandatory=True, xor=['server'], ), diff --git a/nipype/interfaces/utility/tests/test_auto_AssertEqual.py b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py index 284e0f4d62..d0af3a5ed9 100644 --- a/nipype/interfaces/utility/tests/test_auto_AssertEqual.py +++ b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py @@ -5,8 +5,14 @@ def test_AssertEqual_inputs(): input_map = dict( - volume1=dict(mandatory=True, ), - volume2=dict(mandatory=True, ), + volume1=dict( + extensions=None, + mandatory=True, + ), + volume2=dict( + extensions=None, + mandatory=True, + ), ) inputs = AssertEqual.input_spec() diff --git a/nipype/interfaces/utility/tests/test_auto_CSVReader.py b/nipype/interfaces/utility/tests/test_auto_CSVReader.py index 8a51ca4170..b0ce577808 100644 --- a/nipype/interfaces/utility/tests/test_auto_CSVReader.py +++ b/nipype/interfaces/utility/tests/test_auto_CSVReader.py @@ -6,7 +6,10 @@ def test_CSVReader_inputs(): input_map = dict( header=dict(usedefault=True, ), - in_file=dict(mandatory=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CSVReader.input_spec() diff --git a/nipype/interfaces/utility/tests/test_auto_Rename.py b/nipype/interfaces/utility/tests/test_auto_Rename.py index 1e6e1cab34..6b56badd6f 100644 --- a/nipype/interfaces/utility/tests/test_auto_Rename.py +++ b/nipype/interfaces/utility/tests/test_auto_Rename.py @@ -6,7 +6,10 @@ def test_Rename_inputs(): input_map = dict( format_string=dict(mandatory=True, ), - in_file=dict(mandatory=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), keep_ext=dict(), parse_string=dict(), use_fullpath=dict(usedefault=True, ), diff --git a/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py index 785e87e8b1..d5e5958ef4 100644 --- a/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py +++ b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py @@ -8,6 +8,7 @@ def test_Vnifti2Image_inputs(): args=dict(argstr='%s', ), attributes=dict( argstr='-attr %s', + extensions=None, position=2, ), environ=dict( @@ -16,11 +17,13 @@ def test_Vnifti2Image_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, position=1, ), out_file=dict( argstr='-out %s', + extensions=None, hash_files=False, keep_extension=False, name_source=['in_file'], @@ -34,7 +37,7 @@ def test_Vnifti2Image_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Vnifti2Image_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Vnifti2Image.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/vista/tests/test_auto_VtoMat.py b/nipype/interfaces/vista/tests/test_auto_VtoMat.py index ee16266402..c789175132 100644 --- a/nipype/interfaces/vista/tests/test_auto_VtoMat.py +++ b/nipype/interfaces/vista/tests/test_auto_VtoMat.py @@ -12,11 +12,13 @@ def test_VtoMat_inputs(): ), in_file=dict( argstr='-in %s', + extensions=None, mandatory=True, position=1, ), out_file=dict( argstr='-out %s', + extensions=None, hash_files=False, keep_extension=False, name_source=['in_file'], @@ -30,7 +32,7 @@ def test_VtoMat_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_VtoMat_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = VtoMat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py b/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py index b510a3b34e..b77a318deb 100644 --- a/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py +++ b/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py @@ -8,15 +8,18 @@ def test_CiftiSmooth_inputs(): args=dict(argstr='%s', ), cerebellum_corrected_areas=dict( argstr='cerebellum-corrected-areas %s', + extensions=None, position=10, requires=['cerebellum_surf'], ), cerebellum_surf=dict( argstr='-cerebellum-surface %s', + extensions=None, position=9, ), cifti_roi=dict( argstr='-cifti-roi %s', + extensions=None, position=11, ), direction=dict( @@ -38,15 +41,18 @@ def test_CiftiSmooth_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), left_corrected_areas=dict( argstr='-left-corrected-areas %s', + extensions=None, position=6, ), left_surf=dict( argstr='-left-surface %s', + extensions=None, mandatory=True, position=5, ), @@ -56,6 +62,7 @@ def test_CiftiSmooth_inputs(): ), out_file=dict( argstr='%s', + extensions=None, keep_extension=True, name_source=['in_file'], name_template='smoothed_%s.nii', @@ -63,10 +70,12 @@ def test_CiftiSmooth_inputs(): ), right_corrected_areas=dict( argstr='-right-corrected-areas %s', + extensions=None, position=8, ), right_surf=dict( argstr='-right-surface %s', + extensions=None, mandatory=True, position=7, ), @@ -87,7 +96,7 @@ def test_CiftiSmooth_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CiftiSmooth_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = CiftiSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/workbench/tests/test_auto_MetricResample.py b/nipype/interfaces/workbench/tests/test_auto_MetricResample.py index 46a66aa728..ad6494bc09 100644 --- a/nipype/interfaces/workbench/tests/test_auto_MetricResample.py +++ b/nipype/interfaces/workbench/tests/test_auto_MetricResample.py @@ -18,10 +18,12 @@ def test_MetricResample_inputs(): args=dict(argstr='%s', ), current_area=dict( argstr='%s', + extensions=None, position=6, ), current_sphere=dict( argstr='%s', + extensions=None, mandatory=True, position=1, ), @@ -31,6 +33,7 @@ def test_MetricResample_inputs(): ), in_file=dict( argstr='%s', + extensions=None, mandatory=True, position=0, ), @@ -45,15 +48,18 @@ def test_MetricResample_inputs(): ), new_area=dict( argstr='%s', + extensions=None, position=7, ), new_sphere=dict( argstr='%s', + extensions=None, mandatory=True, position=2, ), out_file=dict( argstr='%s', + extensions=None, keep_extension=True, name_source=['new_sphere'], name_template='%s.out', @@ -61,6 +67,7 @@ def test_MetricResample_inputs(): ), roi_metric=dict( argstr='-current-roi %s', + extensions=None, position=8, ), valid_roi_out=dict( @@ -75,8 +82,8 @@ def test_MetricResample_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_MetricResample_outputs(): output_map = dict( - out_file=dict(), - roi_file=dict(), + out_file=dict(extensions=None, ), + roi_file=dict(extensions=None, ), ) outputs = MetricResample.output_spec() From d29ea7b5b754f31a1174965650ae874900a01ca7 Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 17 Jul 2019 11:34:35 -0700 Subject: [PATCH 0257/1665] fix: error in Makefile --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 095452210a..48f2c112c2 100644 --- a/Makefile +++ b/Makefile @@ -56,7 +56,7 @@ inplace: $(PYTHON) setup.py build_ext -i test-code: in - py.test --doctest-module nipype + py.test --doctest-modules nipype test-coverage: clean-tests in py.test --doctest-modules --cov-config .coveragerc --cov=nipype nipype From 005281435b096d94c793db7b331822aac34914dc Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 17 Jul 2019 11:34:51 -0700 Subject: [PATCH 0258/1665] sty: further pep257 changes --- nipype/interfaces/afni/model.py | 4 +++- nipype/interfaces/afni/preprocess.py | 3 +-- nipype/interfaces/afni/svm.py | 3 +-- nipype/interfaces/afni/utils.py | 4 +++- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/nipype/interfaces/afni/model.py b/nipype/interfaces/afni/model.py index 2d62f7dfa4..e091a87c57 100644 --- a/nipype/interfaces/afni/model.py +++ b/nipype/interfaces/afni/model.py @@ -1,11 +1,13 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft = python sts = 4 ts = 4 sw = 4 et: -"""AFNI modeling interfaces +""" +AFNI modeling interfaces. Examples -------- See the docstrings of the individual classes for examples. + """ from __future__ import (print_function, division, unicode_literals, absolute_import) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 9729e22fe2..c4934fa8b8 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -1,8 +1,7 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -"""AFNI preprocessing interfaces -""" +"""AFNI preprocessing interfaces.""" from __future__ import (print_function, division, unicode_literals, absolute_import) from builtins import open diff --git a/nipype/interfaces/afni/svm.py b/nipype/interfaces/afni/svm.py index d465c1caaa..4a813ddb7e 100644 --- a/nipype/interfaces/afni/svm.py +++ b/nipype/interfaces/afni/svm.py @@ -1,8 +1,7 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft = python sts = 4 ts = 4 sw = 4 et: -"""Afni svm interfaces -""" +"""AFNI's svm interfaces.""" from __future__ import (print_function, division, unicode_literals, absolute_import) diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py index 5fe3e199ec..e16e1bbd79 100644 --- a/nipype/interfaces/afni/utils.py +++ b/nipype/interfaces/afni/utils.py @@ -1,11 +1,13 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft = python sts = 4 ts = 4 sw = 4 et: -"""AFNI utility interfaces +""" +AFNI utility interfaces. Examples -------- See the docstrings of the individual classes for examples. + """ from __future__ import (print_function, division, unicode_literals, absolute_import) From 772d0c86d1bd3a796f65313038473a733375c1bf Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 17 Jul 2019 11:50:00 -0700 Subject: [PATCH 0259/1665] Update rtd_requirements.txt Co-Authored-By: Chris Markiewicz --- rtd_requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/rtd_requirements.txt b/rtd_requirements.txt index a47818f6d7..8cb274347a 100644 --- a/rtd_requirements.txt +++ b/rtd_requirements.txt @@ -9,6 +9,7 @@ numpy>=1.9.0 numpydoc packaging prov>=1.5.2 +neurdflib psutil pydot>=1.2.3 pydotplus From 8bee7e6e41b706a8eeabeb680dcbf9936f4ab8d5 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 17 Jul 2019 12:22:26 -0700 Subject: [PATCH 0260/1665] Update nipype/interfaces/afni/preprocess.py [skip ci] Co-Authored-By: Chris Markiewicz --- nipype/interfaces/afni/preprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index c4934fa8b8..c3ce52c097 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -3090,7 +3090,7 @@ class QwarpInputSpec(AFNICommandInputSpec): desc="""\ Sets the prefix/suffix for the output datasets. \ * The source dataset is warped to match the base \ -and gets prefix \'ppp\'. (Except if \'-plusminus\' is used.) \ +and gets prefix 'ppp'. (Except if '-plusminus' is used.) * The final interpolation to this output dataset is \ done using the \'wsinc5\' method. See the output of \ 3dAllineate -HELP \ From 9257f756eafcc4bc19577d152c80017e7041c70d Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 17 Jul 2019 12:24:54 -0700 Subject: [PATCH 0261/1665] sty: break lines in desc, do not escape quote marks [skip ci] --- nipype/interfaces/afni/preprocess.py | 66 ++++++++++++++-------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index c3ce52c097..ee81ee7e2f 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -3088,39 +3088,39 @@ class QwarpInputSpec(AFNICommandInputSpec): name_template='ppp_%s', name_source=['in_file'], desc="""\ -Sets the prefix/suffix for the output datasets. \ -* The source dataset is warped to match the base \ -and gets prefix 'ppp'. (Except if '-plusminus' is used.) -* The final interpolation to this output dataset is \ -done using the \'wsinc5\' method. See the output of \ - 3dAllineate -HELP \ -(in the "Modifying \'-final wsinc5\'" section) for \ -the lengthy technical details. \ -* The 3D warp used is saved in a dataset with \ -prefix \'ppp_WARP\' -- this dataset can be used \ -with 3dNwarpApply and 3dNwarpCat, for example. \ -* To be clear, this is the warp from source dataset \ - coordinates to base dataset coordinates, where the \ - values at each base grid point are the xyz displacments \ - needed to move that grid point\'s xyz values to the \ - corresponding xyz values in the source dataset: \ - base( (x,y,z) + WARP(x,y,z) ) matches source(x,y,z) \ - Another way to think of this warp is that it \'pulls\' \ - values back from source space to base space. \ -* 3dNwarpApply would use \'ppp_WARP\' to transform datasets \ -aligned with the source dataset to be aligned with the \ -base dataset. \ -** If you do NOT want this warp saved, use the option \'-nowarp\'. \ --->> (However, this warp is usually the most valuable possible output!) \ -* If you want to calculate and save the inverse 3D warp, \ -use the option \'-iwarp\'. This inverse warp will then be \ -saved in a dataset with prefix \'ppp_WARPINV\'. \ -* This inverse warp could be used to transform data from base \ -space to source space, if you need to do such an operation. \ -* You can easily compute the inverse later, say by a command like \ - 3dNwarpCat -prefix Z_WARPINV \'INV(Z_WARP+tlrc)\' \ -or the inverse can be computed as needed in 3dNwarpApply, like \ - 3dNwarpApply -nwarp \'INV(Z_WARP+tlrc)\' -source Dataset.nii ...""") +Sets the prefix/suffix for the output datasets. +* The source dataset is warped to match the base +and gets prefix 'ppp'. (Except if '-plusminus' is used +* The final interpolation to this output dataset is +done using the 'wsinc5' method. See the output of + 3dAllineate -HELP +(in the "Modifying '-final wsinc5'" section) for +the lengthy technical details. +* The 3D warp used is saved in a dataset with +prefix 'ppp_WARP' -- this dataset can be used +with 3dNwarpApply and 3dNwarpCat, for example. +* To be clear, this is the warp from source dataset + coordinates to base dataset coordinates, where the + values at each base grid point are the xyz displacments + needed to move that grid point's xyz values to the + corresponding xyz values in the source dataset: + base( (x,y,z) + WARP(x,y,z) ) matches source(x,y,z) + Another way to think of this warp is that it 'pulls' + values back from source space to base space. +* 3dNwarpApply would use 'ppp_WARP' to transform datasets +aligned with the source dataset to be aligned with the +base dataset. +** If you do NOT want this warp saved, use the option '-nowarp'. +-->> (However, this warp is usually the most valuable possible output!) +* If you want to calculate and save the inverse 3D warp, +use the option '-iwarp'. This inverse warp will then be +saved in a dataset with prefix 'ppp_WARPINV'. +* This inverse warp could be used to transform data from base +space to source space, if you need to do such an operation. +* You can easily compute the inverse later, say by a command like + 3dNwarpCat -prefix Z_WARPINV 'INV(Z_WARP+tlrc)' +or the inverse can be computed as needed in 3dNwarpApply, like + 3dNwarpApply -nwarp 'INV(Z_WARP+tlrc)' -source Dataset.nii ...""") resample = traits.Bool( desc='This option simply resamples the source dataset to match the' 'base dataset grid. You can use this if the two datasets' From e4114e1b5b1ab051b5adafcb7a3d8e028b3b9c86 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 17 Jul 2019 18:48:20 -0400 Subject: [PATCH 0262/1665] MAINT: Bump neurodocker version --- docker/generate_dockerfiles.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/generate_dockerfiles.sh b/docker/generate_dockerfiles.sh index fd42dc4c8d..37fc5d338b 100755 --- a/docker/generate_dockerfiles.sh +++ b/docker/generate_dockerfiles.sh @@ -52,8 +52,8 @@ do esac done -# neurodocker version 0.4.1-22-g7c44e01 -NEURODOCKER_IMAGE="kaczmarj/neurodocker:master@sha256:858632a7533cac100f70932749b4cfc77fc40f667f41fca208f406215cff8a27" +# neurodocker version 0.5.0-3-g1788917 +NEURODOCKER_IMAGE="kaczmarj/neurodocker:master@sha256:ac2085702daac716481daae5da055e2062be52075f8f3881672e958e0cd53e6b" # neurodebian:stretch-non-free pulled on September 19, 2018 BASE_IMAGE="neurodebian:stretch-non-free@sha256:7cd978427d7ad215834fee221d0536ed7825b3cddebc481eba2d792dfc2f7332" From 251362efba81be01e9e3b82d8bf6389fedaea7c5 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 18 Jul 2019 21:29:55 -0700 Subject: [PATCH 0263/1665] MAINT: Refactor ``aggregate_outputs`` for readability --- nipype/interfaces/base/core.py | 59 +++++++++++++++++----------------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 0011c925dd..34037d9dfd 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -204,10 +204,10 @@ def _check_requires(self, spec, name, value): ] if any(values) and isdefined(value): if len(values) > 1: - fmt = ("%s requires values for inputs %s because '%s' is set. " + fmt = ("%s requires values for inputs %s because '%s' is set. " "For a list of required inputs, see %s.help()") else: - fmt = ("%s requires a value for input %s because '%s' is set. " + fmt = ("%s requires a value for input %s because '%s' is set. " "For a list of required inputs, see %s.help()") msg = fmt % (self.__class__.__name__, ', '.join("'%s'" % req for req in spec.requires), @@ -450,34 +450,35 @@ def _list_outputs(self): return None def aggregate_outputs(self, runtime=None, needed_outputs=None): - """ Collate expected outputs and check for existence - """ - - predicted_outputs = self._list_outputs() - outputs = self._outputs() - if predicted_outputs: - _unavailable_outputs = [] - if outputs: - _unavailable_outputs = \ - self._check_version_requirements(self._outputs()) - for key, val in list(predicted_outputs.items()): - if needed_outputs and key not in needed_outputs: - continue - if key in _unavailable_outputs: - raise KeyError(('Output trait %s not available in version ' - '%s of interface %s. Please inform ' - 'developers.') % (key, self.version, - self.__class__.__name__)) - try: - setattr(outputs, key, val) - except TraitError as error: - if getattr(error, 'info', - 'default').startswith('an existing'): - msg = ("File/Directory '%s' not found for %s output " - "'%s'." % (val, self.__class__.__name__, key)) - raise FileNotFoundError(msg) - raise error + """Collate expected outputs and apply output traits validation.""" + outputs = self._outputs() # Generate an empty output spec object + predicted_outputs = self._list_outputs() # Predictions from _list_outputs + if not predicted_outputs: + return outputs + # Precalculate the list of output trait names that should be aggregated + aggregate_names = set(predicted_outputs.keys()) + if needed_outputs is not None: + aggregate_names = set(needed_outputs).intersection(aggregate_names) + + if aggregate_names: # Make sure outputs are compatible + _na_outputs = self._check_version_requirements(outputs) + na_names = aggregate_names.intersection(set(_na_outputs)) + if na_names: + raise TypeError("""\ +Output trait(s) %s not available in version %s of interface %s.\ +""" % (', '.join(na_names), self.version, self.__class__.__name__)) + + for key in aggregate_names: # Final aggregation + val = predicted_outputs[key] + try: + setattr(outputs, key, val) + except TraitError as error: + if 'an existing' in getattr(error, 'info', 'default'): + msg = "No such file or directory for output '%s' of a %s interface" % \ + (key, self.__class__.__name__) + raise FileNotFoundError(val, message=msg) + raise error return outputs @property From 0c6d121883a8896cc1caf148f9a99d1e5efbb083 Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 19 Jul 2019 19:40:49 -0700 Subject: [PATCH 0264/1665] fix: failing test since raised exception has been changed --- nipype/interfaces/base/tests/test_core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/base/tests/test_core.py b/nipype/interfaces/base/tests/test_core.py index 265edc444f..3192e75f81 100644 --- a/nipype/interfaces/base/tests/test_core.py +++ b/nipype/interfaces/base/tests/test_core.py @@ -289,7 +289,7 @@ def _list_outputs(self): return {'foo': 1} obj = DerivedInterface1() - with pytest.raises(KeyError): + with pytest.raises(TypeError): obj.run() From 79228aa5ef746042f07cd2631d27936381d88ec5 Mon Sep 17 00:00:00 2001 From: mjoseph Date: Thu, 25 Jul 2019 10:55:27 -0400 Subject: [PATCH 0265/1665] update dwibiascorrect --- nipype/interfaces/mrtrix3/preprocess.py | 24 +++---------------- .../mrtrix3/tests/test_auto_DWIBiasCorrect.py | 15 ++---------- 2 files changed, 5 insertions(+), 34 deletions(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index 2be3e532b4..9e2b0404bc 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -179,27 +179,16 @@ class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): in_mask = File( argstr='-mask %s', desc='input mask image for bias field estimation') - _xor_methods = ('use_ants', 'use_fsl') use_ants = traits.Bool( - default_value=True, - usedefault=True, argstr='-ants', desc='use ANTS N4 to estimate the inhomogeneity field', - xor=_xor_methods) + xor=['use_fsl']) use_fsl = traits.Bool( argstr='-fsl', desc='use FSL FAST to estimate the inhomogeneity field', - xor=_xor_methods, + xor=['use_ants'], min_ver='5.0.10') - _xor_grads = ('mrtrix_grad', 'fsl_grad') - mrtrix_grad = File( - argstr='-grad %s', - desc='diffusion gradient table in MRtrix format', - xor=_xor_grads) - fsl_grad = File( - argstr='-fslgrad %s %s', - desc='diffusion gradient table in FSL bvecs/bvals format', - xor=_xor_grads) + _xor_grads = ('grad_file', 'grad_fsl') bias = File( argstr='-bias %s', desc='bias field') @@ -237,13 +226,6 @@ class DWIBiasCorrect(MRTrix3Base): input_spec = DWIBiasCorrectInputSpec output_spec = DWIBiasCorrectOutputSpec - def _list_outputs(self): - outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) - if self.inputs.bias != Undefined: - outputs['bias'] = op.abspath(self.inputs.bias) - return outputs - class ResponseSDInputSpec(MRTrix3BaseInputSpec): algorithm = traits.Enum( diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py index d70545be88..ed6908bb98 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py @@ -15,11 +15,6 @@ def test_DWIBiasCorrect_inputs(): nohash=True, usedefault=True, ), - fsl_grad=dict( - argstr='-fslgrad %s %s', - extensions=None, - xor=('mrtrix_grad', 'fsl_grad'), - ), grad_file=dict( argstr='-grad %s', extensions=None, @@ -40,11 +35,6 @@ def test_DWIBiasCorrect_inputs(): argstr='-mask %s', extensions=None, ), - mrtrix_grad=dict( - argstr='-grad %s', - extensions=None, - xor=('mrtrix_grad', 'fsl_grad'), - ), nthreads=dict( argstr='-nthreads %d', nohash=True, @@ -60,13 +50,12 @@ def test_DWIBiasCorrect_inputs(): ), use_ants=dict( argstr='-ants', - usedefault=True, - xor=('use_ants', 'use_fsl'), + xor=['use_fsl'], ), use_fsl=dict( argstr='-fsl', min_ver='5.0.10', - xor=('use_ants', 'use_fsl'), + xor=['use_ants'], ), ) inputs = DWIBiasCorrect.input_spec() From 3d09e0a57e935f54348ab588ebcdcbba74d2221e Mon Sep 17 00:00:00 2001 From: mjoseph Date: Thu, 25 Jul 2019 15:53:47 -0400 Subject: [PATCH 0266/1665] update dwidenoise, mrdegibbs and dwibiascorrect --- nipype/interfaces/mrtrix3/base.py | 6 +- nipype/interfaces/mrtrix3/preprocess.py | 64 ++++++------------- .../mrtrix3/tests/test_auto_DWIBiasCorrect.py | 22 ++----- .../mrtrix3/tests/test_auto_DWIDenoise.py | 10 ++- .../mrtrix3/tests/test_auto_MRDeGibbs.py | 22 +++---- 5 files changed, 49 insertions(+), 75 deletions(-) diff --git a/nipype/interfaces/mrtrix3/base.py b/nipype/interfaces/mrtrix3/base.py index b4e050292e..dfcc2c3787 100644 --- a/nipype/interfaces/mrtrix3/base.py +++ b/nipype/interfaces/mrtrix3/base.py @@ -47,12 +47,14 @@ class MRTrix3BaseInputSpec(CommandLineInputSpec): grad_file = File( exists=True, argstr='-grad %s', - desc='dw gradient scheme (MRTrix format') + desc='dw gradient scheme (MRTrix format)', + xor=['grad_fsl']) grad_fsl = traits.Tuple( File(exists=True), File(exists=True), argstr='-fslgrad %s %s', - desc='(bvecs, bvals) dw gradient scheme (FSL format') + desc='(bvecs, bvals) dw gradient scheme (FSL format)', + xor=['grad_file']) bval_scale = traits.Enum( 'yes', 'no', diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index 2be3e532b4..b486b4963f 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -23,20 +23,27 @@ class DWIDenoiseInputSpec(MRTrix3BaseInputSpec): argstr='-mask %s', position=1, desc='mask image') - extent = traits.Tuple((traits.Int, traits.Int, traits.Int), + extent = traits.Tuple( + (traits.Int, traits.Int, traits.Int), argstr='-extent %d,%d,%d', desc='set the window size of the denoising filter. (default = 5,5,5)') noise = File( argstr='-noise %s', - desc='the output noise map') - out_file = File(name_template='%s_denoised', + name_template='%s_noise', name_source='in_file', keep_extension=True, + desc='the output noise map', + genfile=True) + out_file = File( argstr='%s', position=-1, + name_template='%s_denoised', + name_source='in_file', + keep_extension=True, desc='the output denoised DWI image', genfile=True) + class DWIDenoiseOutputSpec(TraitedSpec): noise = File(desc='the output noise map', exists=True) out_file = File(desc='the output denoised DWI image', exists=True) @@ -76,13 +83,6 @@ class DWIDenoise(MRTrix3Base): input_spec = DWIDenoiseInputSpec output_spec = DWIDenoiseOutputSpec - def _list_outputs(self): - outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) - if self.inputs.noise != Undefined: - outputs['noise'] = op.abspath(self.inputs.noise) - return outputs - class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): in_file = File( @@ -92,8 +92,7 @@ class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): mandatory=True, desc='input DWI image') axes = traits.ListInt( - default_value=[0,1], - usedefault=True, + [0,1], sep=',', minlen=2, maxlen=2, @@ -101,23 +100,21 @@ class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): desc='indicate the plane in which the data was acquired (axial = 0,1; ' 'coronal = 0,2; sagittal = 1,2') nshifts = traits.Int( - default_value=20, - usedefault=True, + 20, argstr='-nshifts %d', desc='discretization of subpixel spacing (default = 20)') minW = traits.Int( - default_value=1, - usedefault=True, + 1, argstr='-minW %d', desc='left border of window used for total variation (TV) computation ' '(default = 1)') maxW = traits.Int( - default_value=3, - usedefault=True, + 3, argstr='-maxW %d', desc='right border of window used for total variation (TV) computation ' '(default = 3)') - out_file = File(name_template='%s_unr', + out_file = File( + name_template='%s_unr', name_source='in_file', keep_extension=True, argstr='%s', @@ -160,7 +157,7 @@ class MRDeGibbs(MRTrix3Base): >>> unring = mrt.MRDeGibbs() >>> unring.inputs.in_file = 'dwi.mif' >>> unring.cmdline - 'mrdegibbs -axes 0,1 -maxW 3 -minW 1 -nshifts 20 dwi.mif dwi_unr.mif' + 'mrdegibbs dwi.mif dwi_unr.mif' >>> unring.run() # doctest: +SKIP """ @@ -179,31 +176,19 @@ class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): in_mask = File( argstr='-mask %s', desc='input mask image for bias field estimation') - _xor_methods = ('use_ants', 'use_fsl') use_ants = traits.Bool( - default_value=True, - usedefault=True, argstr='-ants', desc='use ANTS N4 to estimate the inhomogeneity field', - xor=_xor_methods) + xor=['use_fsl']) use_fsl = traits.Bool( argstr='-fsl', desc='use FSL FAST to estimate the inhomogeneity field', - xor=_xor_methods, - min_ver='5.0.10') - _xor_grads = ('mrtrix_grad', 'fsl_grad') - mrtrix_grad = File( - argstr='-grad %s', - desc='diffusion gradient table in MRtrix format', - xor=_xor_grads) - fsl_grad = File( - argstr='-fslgrad %s %s', - desc='diffusion gradient table in FSL bvecs/bvals format', - xor=_xor_grads) + xor=['use_ants']) bias = File( argstr='-bias %s', desc='bias field') - out_file = File(name_template='%s_biascorr', + out_file = File( + name_template='%s_biascorr', name_source='in_file', keep_extension=True, argstr='%s', @@ -237,13 +222,6 @@ class DWIBiasCorrect(MRTrix3Base): input_spec = DWIBiasCorrectInputSpec output_spec = DWIBiasCorrectOutputSpec - def _list_outputs(self): - outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) - if self.inputs.bias != Undefined: - outputs['bias'] = op.abspath(self.inputs.bias) - return outputs - class ResponseSDInputSpec(MRTrix3BaseInputSpec): algorithm = traits.Enum( diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py index d70545be88..bd9ac531a6 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py @@ -15,16 +15,15 @@ def test_DWIBiasCorrect_inputs(): nohash=True, usedefault=True, ), - fsl_grad=dict( - argstr='-fslgrad %s %s', - extensions=None, - xor=('mrtrix_grad', 'fsl_grad'), - ), grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', @@ -40,11 +39,6 @@ def test_DWIBiasCorrect_inputs(): argstr='-mask %s', extensions=None, ), - mrtrix_grad=dict( - argstr='-grad %s', - extensions=None, - xor=('mrtrix_grad', 'fsl_grad'), - ), nthreads=dict( argstr='-nthreads %d', nohash=True, @@ -60,13 +54,11 @@ def test_DWIBiasCorrect_inputs(): ), use_ants=dict( argstr='-ants', - usedefault=True, - xor=('use_ants', 'use_fsl'), + xor=['use_fsl'], ), use_fsl=dict( argstr='-fsl', - min_ver='5.0.10', - xor=('use_ants', 'use_fsl'), + xor=['use_ants'], ), ) inputs = DWIBiasCorrect.input_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py index 97acf7f526..cf57f839af 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py @@ -15,8 +15,12 @@ def test_DWIDenoise_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', @@ -36,6 +40,10 @@ def test_DWIDenoise_inputs(): noise=dict( argstr='-noise %s', extensions=None, + genfile=True, + keep_extension=True, + name_source='in_file', + name_template='%s_noise', ), nthreads=dict( argstr='-nthreads %d', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py index e6bdd21243..52e126cbb5 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py @@ -11,7 +11,6 @@ def test_MRDeGibbs_inputs(): maxlen=2, minlen=2, sep=',', - usedefault=True, ), bval_scale=dict(argstr='-bvalue_scaling %s', ), environ=dict( @@ -21,8 +20,12 @@ def test_MRDeGibbs_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', @@ -34,18 +37,9 @@ def test_MRDeGibbs_inputs(): mandatory=True, position=-2, ), - maxW=dict( - argstr='-maxW %d', - usedefault=True, - ), - minW=dict( - argstr='-minW %d', - usedefault=True, - ), - nshifts=dict( - argstr='-nshifts %d', - usedefault=True, - ), + maxW=dict(argstr='-maxW %d', ), + minW=dict(argstr='-minW %d', ), + nshifts=dict(argstr='-nshifts %d', ), nthreads=dict( argstr='-nthreads %d', nohash=True, From 043a871add1fb75c5e7eac7a2685d2e7ccac28dd Mon Sep 17 00:00:00 2001 From: mjoseph Date: Thu, 25 Jul 2019 16:45:21 -0400 Subject: [PATCH 0267/1665] fix failing doctests --- nipype/interfaces/mrtrix3/preprocess.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index b486b4963f..902070bcf0 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -74,8 +74,9 @@ class DWIDenoise(MRTrix3Base): >>> denoise = mrt.DWIDenoise() >>> denoise.inputs.in_file = 'dwi.mif' >>> denoise.inputs.mask = 'mask.mif' + >>> denoise.inputs.noise = 'noise.mif' >>> denoise.cmdline # doctest: +ELLIPSIS - 'dwidenoise -mask mask.mif dwi.mif dwi_denoised.mif' + 'dwidenoise -mask mask.mif -noise noise.mif dwi.mif dwi_denoised.mif' >>> denoise.run() # doctest: +SKIP """ @@ -213,6 +214,7 @@ class DWIBiasCorrect(MRTrix3Base): >>> import nipype.interfaces.mrtrix3 as mrt >>> bias_correct = mrt.DWIBiasCorrect() >>> bias_correct.inputs.in_file = 'dwi.mif' + >>> bias_correct.inputs.use_ants = True >>> bias_correct.cmdline 'dwibiascorrect -ants dwi.mif dwi_biascorr.mif' >>> bias_correct.run() # doctest: +SKIP From 91c5c84ae8db627024c2e4cdc79b71d5c52322de Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Fri, 26 Jul 2019 06:19:12 -0400 Subject: [PATCH 0268/1665] address suggested changes --- nipype/interfaces/mrtrix3/preprocess.py | 23 ++++++++++--------- .../mrtrix3/tests/test_auto_DWIDenoise.py | 2 -- .../mrtrix3/tests/test_auto_MRDeGibbs.py | 17 ++++++++++---- 3 files changed, 25 insertions(+), 17 deletions(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index 902070bcf0..07b9e83658 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -32,16 +32,14 @@ class DWIDenoiseInputSpec(MRTrix3BaseInputSpec): name_template='%s_noise', name_source='in_file', keep_extension=True, - desc='the output noise map', - genfile=True) + desc='the output noise map') out_file = File( argstr='%s', position=-1, name_template='%s_denoised', name_source='in_file', keep_extension=True, - desc='the output denoised DWI image', - genfile=True) + desc='the output denoised DWI image') class DWIDenoiseOutputSpec(TraitedSpec): @@ -93,7 +91,8 @@ class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): mandatory=True, desc='input DWI image') axes = traits.ListInt( - [0,1], + default_value=[0, 1], + use_default=True, sep=',', minlen=2, maxlen=2, @@ -101,16 +100,19 @@ class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): desc='indicate the plane in which the data was acquired (axial = 0,1; ' 'coronal = 0,2; sagittal = 1,2') nshifts = traits.Int( - 20, + default_value=20, + use_default=True, argstr='-nshifts %d', desc='discretization of subpixel spacing (default = 20)') minW = traits.Int( - 1, + default_value=1, + use_default=True, argstr='-minW %d', desc='left border of window used for total variation (TV) computation ' '(default = 1)') maxW = traits.Int( - 3, + default_value=3, + use_default=True, argstr='-maxW %d', desc='right border of window used for total variation (TV) computation ' '(default = 3)') @@ -120,8 +122,7 @@ class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): keep_extension=True, argstr='%s', position=-1, - desc='the output unringed DWI image', - genfile=True) + desc='the output unringed DWI image') class MRDeGibbsOutputSpec(TraitedSpec): out_file = File(desc='the output unringed DWI image', exists=True) @@ -158,7 +159,7 @@ class MRDeGibbs(MRTrix3Base): >>> unring = mrt.MRDeGibbs() >>> unring.inputs.in_file = 'dwi.mif' >>> unring.cmdline - 'mrdegibbs dwi.mif dwi_unr.mif' + 'mrdegibbs -axes 0,1 -maxW 3 -minW 1 -nshifts 20 dwi.mif dwi_unr.mif' >>> unring.run() # doctest: +SKIP """ diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py index cf57f839af..64ef1284e7 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py @@ -40,7 +40,6 @@ def test_DWIDenoise_inputs(): noise=dict( argstr='-noise %s', extensions=None, - genfile=True, keep_extension=True, name_source='in_file', name_template='%s_noise', @@ -52,7 +51,6 @@ def test_DWIDenoise_inputs(): out_file=dict( argstr='%s', extensions=None, - genfile=True, keep_extension=True, name_source='in_file', name_template='%s_denoised', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py index 52e126cbb5..7982b6bf10 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py @@ -11,6 +11,7 @@ def test_MRDeGibbs_inputs(): maxlen=2, minlen=2, sep=',', + use_default=True, ), bval_scale=dict(argstr='-bvalue_scaling %s', ), environ=dict( @@ -37,9 +38,18 @@ def test_MRDeGibbs_inputs(): mandatory=True, position=-2, ), - maxW=dict(argstr='-maxW %d', ), - minW=dict(argstr='-minW %d', ), - nshifts=dict(argstr='-nshifts %d', ), + maxW=dict( + argstr='-maxW %d', + use_default=True, + ), + minW=dict( + argstr='-minW %d', + use_default=True, + ), + nshifts=dict( + argstr='-nshifts %d', + use_default=True, + ), nthreads=dict( argstr='-nthreads %d', nohash=True, @@ -47,7 +57,6 @@ def test_MRDeGibbs_inputs(): out_file=dict( argstr='%s', extensions=None, - genfile=True, keep_extension=True, name_source='in_file', name_template='%s_unr', From 535a4b06205a829593ddd1d1113b7dbf8e81a15c Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Fri, 26 Jul 2019 07:21:49 -0400 Subject: [PATCH 0269/1665] fix doctest --- nipype/interfaces/mrtrix3/preprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index 07b9e83658..e90a873988 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -159,7 +159,7 @@ class MRDeGibbs(MRTrix3Base): >>> unring = mrt.MRDeGibbs() >>> unring.inputs.in_file = 'dwi.mif' >>> unring.cmdline - 'mrdegibbs -axes 0,1 -maxW 3 -minW 1 -nshifts 20 dwi.mif dwi_unr.mif' + 'mrdegibbs dwi.mif dwi_unr.mif' >>> unring.run() # doctest: +SKIP """ From aaf7d20f4cc7f7d031ad67f8775d65bb369885e3 Mon Sep 17 00:00:00 2001 From: mjoseph Date: Fri, 26 Jul 2019 10:06:06 -0400 Subject: [PATCH 0270/1665] usedefault instead of use_default --- nipype/interfaces/mrtrix3/preprocess.py | 12 +++++++----- .../mrtrix3/tests/test_auto_DWIBiasCorrect.py | 2 ++ .../interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py | 8 ++++---- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index e90a873988..3f8056c7dd 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -92,7 +92,7 @@ class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): desc='input DWI image') axes = traits.ListInt( default_value=[0, 1], - use_default=True, + usedefault=True, sep=',', minlen=2, maxlen=2, @@ -101,18 +101,18 @@ class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): 'coronal = 0,2; sagittal = 1,2') nshifts = traits.Int( default_value=20, - use_default=True, + usedefault=True, argstr='-nshifts %d', desc='discretization of subpixel spacing (default = 20)') minW = traits.Int( default_value=1, - use_default=True, + usedefault=True, argstr='-minW %d', desc='left border of window used for total variation (TV) computation ' '(default = 1)') maxW = traits.Int( default_value=3, - use_default=True, + usedefault=True, argstr='-maxW %d', desc='right border of window used for total variation (TV) computation ' '(default = 3)') @@ -159,7 +159,7 @@ class MRDeGibbs(MRTrix3Base): >>> unring = mrt.MRDeGibbs() >>> unring.inputs.in_file = 'dwi.mif' >>> unring.cmdline - 'mrdegibbs dwi.mif dwi_unr.mif' + 'mrdegibbs -axes 0,1 -maxW 3 -minW 1 -nshifts 20 dwi.mif dwi_unr.mif' >>> unring.run() # doctest: +SKIP """ @@ -180,10 +180,12 @@ class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): desc='input mask image for bias field estimation') use_ants = traits.Bool( argstr='-ants', + mandatory=True, desc='use ANTS N4 to estimate the inhomogeneity field', xor=['use_fsl']) use_fsl = traits.Bool( argstr='-fsl', + mandatory=True, desc='use FSL FAST to estimate the inhomogeneity field', xor=['use_ants']) bias = File( diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py index bd9ac531a6..f72f36daaa 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py @@ -54,10 +54,12 @@ def test_DWIBiasCorrect_inputs(): ), use_ants=dict( argstr='-ants', + mandatory=True, xor=['use_fsl'], ), use_fsl=dict( argstr='-fsl', + mandatory=True, xor=['use_ants'], ), ) diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py index 7982b6bf10..cd730465e1 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py @@ -11,7 +11,7 @@ def test_MRDeGibbs_inputs(): maxlen=2, minlen=2, sep=',', - use_default=True, + usedefault=True, ), bval_scale=dict(argstr='-bvalue_scaling %s', ), environ=dict( @@ -40,15 +40,15 @@ def test_MRDeGibbs_inputs(): ), maxW=dict( argstr='-maxW %d', - use_default=True, + usedefault=True, ), minW=dict( argstr='-minW %d', - use_default=True, + usedefault=True, ), nshifts=dict( argstr='-nshifts %d', - use_default=True, + usedefault=True, ), nthreads=dict( argstr='-nthreads %d', From 259e7bc1071a79fb41436d9a37790b6497853aa0 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Fri, 26 Jul 2019 09:16:59 -0700 Subject: [PATCH 0271/1665] Apply suggestions from code review Co-Authored-By: Chris Markiewicz --- nipype/interfaces/base/core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 34037d9dfd..279f9a81f0 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -457,13 +457,13 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): return outputs # Precalculate the list of output trait names that should be aggregated - aggregate_names = set(predicted_outputs.keys()) + aggregate_names = set(predicted_outputs) if needed_outputs is not None: aggregate_names = set(needed_outputs).intersection(aggregate_names) if aggregate_names: # Make sure outputs are compatible _na_outputs = self._check_version_requirements(outputs) - na_names = aggregate_names.intersection(set(_na_outputs)) + na_names = aggregate_names.intersection(_na_outputs) if na_names: raise TypeError("""\ Output trait(s) %s not available in version %s of interface %s.\ From bb736ae60e08f6dbf9e60e14871fdd995a744e7e Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Fri, 26 Jul 2019 10:13:37 -0700 Subject: [PATCH 0272/1665] Apply suggestions from code review Co-Authored-By: Chris Markiewicz --- nipype/interfaces/base/core.py | 3 ++- nipype/interfaces/base/tests/test_core.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 279f9a81f0..9345ef731a 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -465,7 +465,8 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): _na_outputs = self._check_version_requirements(outputs) na_names = aggregate_names.intersection(_na_outputs) if na_names: - raise TypeError("""\ + # XXX Change to TypeError in Nipype 2.0 + raise KeyError("""\ Output trait(s) %s not available in version %s of interface %s.\ """ % (', '.join(na_names), self.version, self.__class__.__name__)) diff --git a/nipype/interfaces/base/tests/test_core.py b/nipype/interfaces/base/tests/test_core.py index 3192e75f81..265edc444f 100644 --- a/nipype/interfaces/base/tests/test_core.py +++ b/nipype/interfaces/base/tests/test_core.py @@ -289,7 +289,7 @@ def _list_outputs(self): return {'foo': 1} obj = DerivedInterface1() - with pytest.raises(TypeError): + with pytest.raises(KeyError): obj.run() From adda5f3905cec40dd6aa60f8184e6a6d49a3dcc0 Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 31 Jul 2019 12:22:12 -0700 Subject: [PATCH 0273/1665] FIX: Python 3.4 is not compatible with the recent release of lxml-4.4.0 We are starting to see failed jobs in Travis. --- nipype/info.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/info.py b/nipype/info.py index 448dec0c0e..d1aeb56f5a 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -140,6 +140,7 @@ def get_nipype_gitversion(): 'funcsigs', 'future>=%s' % FUTURE_MIN_VERSION, 'futures; python_version == "2.7"', + 'lxml<4.4.0; python_version == "3.4"', 'networkx>=%s ; python_version >= "3.0"' % NETWORKX_MIN_VERSION, 'networkx>=%s,<=%s ; python_version < "3.0"' % (NETWORKX_MIN_VERSION, NETWORKX_MAX_VERSION_27), 'nibabel>=%s' % NIBABEL_MIN_VERSION, From 39522fc3355ced7713234a20caf260ceedfe0229 Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 31 Jul 2019 17:27:47 -0700 Subject: [PATCH 0274/1665] fix: synchronize ``requirements.txt``, which is what Travis is using --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 606fcf4d88..99b97a19a6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,6 +8,7 @@ numpy>=1.9.0 packaging pathlib2 prov>=1.5.2 +lxml<4.4.0 ; python_version == "3.4" neurdflib pydot>=1.2.3 pydotplus From 0da0f2b8ac4232c172e4dd9efffe552cd861015c Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 31 Jul 2019 17:46:55 -0700 Subject: [PATCH 0275/1665] TST: Parametrize over config ``needed_outputs`` JoinNode expansion tests --- nipype/pipeline/engine/tests/test_join.py | 59 +++++++++++++---------- 1 file changed, 33 insertions(+), 26 deletions(-) diff --git a/nipype/pipeline/engine/tests/test_join.py b/nipype/pipeline/engine/tests/test_join.py index 77fc0f2fdf..9565bbd353 100644 --- a/nipype/pipeline/engine/tests/test_join.py +++ b/nipype/pipeline/engine/tests/test_join.py @@ -6,7 +6,9 @@ from __future__ import (print_function, division, unicode_literals, absolute_import) from builtins import open +import pytest +from .... import config from ... import engine as pe from ....interfaces import base as nib from ....interfaces.utility import IdentityInterface, Function, Merge @@ -45,19 +47,15 @@ class IncrementOutputSpec(nib.TraitedSpec): output1 = nib.traits.Int(desc='ouput') -class IncrementInterface(nib.BaseInterface): +class IncrementInterface(nib.SimpleInterface): input_spec = IncrementInputSpec output_spec = IncrementOutputSpec def _run_interface(self, runtime): runtime.returncode = 0 + self._results['output1'] = self.inputs.input1 + self.inputs.inc return runtime - def _list_outputs(self): - outputs = self._outputs().get() - outputs['output1'] = self.inputs.input1 + self.inputs.inc - return outputs - _sums = [] @@ -73,23 +71,19 @@ class SumOutputSpec(nib.TraitedSpec): operands = nib.traits.List(nib.traits.Int, desc='operands') -class SumInterface(nib.BaseInterface): +class SumInterface(nib.SimpleInterface): input_spec = SumInputSpec output_spec = SumOutputSpec def _run_interface(self, runtime): - runtime.returncode = 0 - return runtime - - def _list_outputs(self): global _sum global _sum_operands - outputs = self._outputs().get() - outputs['operands'] = self.inputs.input1 - _sum_operands.append(outputs['operands']) - outputs['output1'] = sum(self.inputs.input1) - _sums.append(outputs['output1']) - return outputs + runtime.returncode = 0 + self._results['operands'] = self.inputs.input1 + self._results['output1'] = sum(self.inputs.input1) + _sum_operands.append(self.inputs.input1) + _sums.append(sum(self.inputs.input1)) + return runtime _set_len = None @@ -148,9 +142,19 @@ def _list_outputs(self): return outputs -def test_join_expansion(tmpdir): +@pytest.mark.parametrize('needed_outputs', [True, False]) +def test_join_expansion(tmpdir, needed_outputs): + global _sums + global _sum_operands + global _products tmpdir.chdir() + # Clean up, just in case some other test modified them + _products = [] + _sum_operands = [] + _sums = [] + + config.set('execution', 'remove_unnecessary_outputs', ['false', 'true'][needed_outputs]) # Make the workflow. wf = pe.Workflow(name='test') # the iterated input node @@ -158,25 +162,27 @@ def test_join_expansion(tmpdir): inputspec.iterables = [('n', [1, 2])] # a pre-join node in the iterated path pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') - wf.connect(inputspec, 'n', pre_join1, 'input1') # another pre-join node in the iterated path pre_join2 = pe.Node(IncrementInterface(), name='pre_join2') - wf.connect(pre_join1, 'output1', pre_join2, 'input1') # the join node join = pe.JoinNode( SumInterface(), joinsource='inputspec', joinfield='input1', name='join') - wf.connect(pre_join2, 'output1', join, 'input1') # an uniterated post-join node post_join1 = pe.Node(IncrementInterface(), name='post_join1') - wf.connect(join, 'output1', post_join1, 'input1') # a post-join node in the iterated path post_join2 = pe.Node(ProductInterface(), name='post_join2') - wf.connect(join, 'output1', post_join2, 'input1') - wf.connect(pre_join1, 'output1', post_join2, 'input2') + wf.connect([ + (inputspec, pre_join1, [('n', 'input1')]), + (pre_join1, pre_join2, [('output1', 'input1')]), + (pre_join1, post_join2, [('output1', 'input2')]), + (pre_join2, join, [('output1', 'input1')]), + (join, post_join1, [('output1', 'input1')]), + (join, post_join2, [('output1', 'input1')]), + ]) result = wf.run() # the two expanded pre-join predecessor nodes feed into one join node @@ -185,8 +191,8 @@ def test_join_expansion(tmpdir): # the expanded graph contains 2 * 2 = 4 iteration pre-join nodes, 1 join # node, 1 non-iterated post-join node and 2 * 1 iteration post-join nodes. # Nipype factors away the IdentityInterface. - assert len( - result.nodes()) == 8, "The number of expanded nodes is incorrect." + assert len(result.nodes()) == 8, "The number of expanded nodes is incorrect." + # the join Sum result is (1 + 1 + 1) + (2 + 1 + 1) assert len(_sums) == 1, "The number of join outputs is incorrect" assert _sums[ @@ -199,6 +205,7 @@ def test_join_expansion(tmpdir): "The number of iterated post-join outputs is incorrect" + def test_node_joinsource(tmpdir): """Test setting the joinsource to a Node.""" tmpdir.chdir() From ca5cae3c48b8ca41d41bdc0063905564fa531dfc Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 16 Jul 2019 14:55:28 -0700 Subject: [PATCH 0276/1665] ENH: Modify ``Directory`` and ``File`` traits to get along with pathlib Closes #2959 --- nipype/interfaces/base/traits_extension.py | 9 ++++++--- nipype/interfaces/spm/base.py | 1 + 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index de215beb96..fdb79f93bb 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -38,6 +38,12 @@ if USING_PATHLIB2: from future.types.newstr import newstr +try: + from pathlib import Path +except ImportError: + from pathlib2 import Path + + if traits_version < '3.7.0': raise ImportError('Traits version 3.7.0 or higher must be installed') @@ -153,7 +159,6 @@ def validate(self, objekt, name, value, return_pathlike=False): return value - class Directory(BasePath): """ Defines a trait whose value must be a directory path. @@ -194,14 +199,12 @@ class Directory(BasePath): >>> a.foo 'relative_dir' - >>> class A(TraitedSpec): ... foo = Directory('tmpdir') >>> a = A() >>> a.foo # doctest: +ELLIPSIS - >>> class A(TraitedSpec): ... foo = Directory('tmpdir', usedefault=True) >>> a = A() diff --git a/nipype/interfaces/spm/base.py b/nipype/interfaces/spm/base.py index fda02d40f1..08a31d8185 100644 --- a/nipype/interfaces/spm/base.py +++ b/nipype/interfaces/spm/base.py @@ -605,3 +605,4 @@ def __init__(self, value=NoDefaultSpecified, exists=False, resolve=False, **meta super(ImageFileSPM, self).__init__( value=value, exists=exists, types=['nifti1', 'nifti2'], allow_compressed=False, resolve=resolve, **metadata) + From 5af9dc309c1716b548ad8aee55a776fd75f1b3e0 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 18 Jul 2019 22:40:40 -0700 Subject: [PATCH 0277/1665] ENH: Add resolve/rebase ``BasePath`` traits methods & tests Two new methods ``resolve_path_traits`` and ``rebase_path_traits`` are being included. They take trait instances from a spec (selected via ``spec.trait('traitname')``, the value and a base path. These two functions will be usefull to progress towards #2944. --- .../base/tests/test_traits_extension.py | 152 ++++++++++++++++++ nipype/interfaces/base/traits_extension.py | 123 ++++++++++++++ 2 files changed, 275 insertions(+) create mode 100644 nipype/interfaces/base/tests/test_traits_extension.py diff --git a/nipype/interfaces/base/tests/test_traits_extension.py b/nipype/interfaces/base/tests/test_traits_extension.py new file mode 100644 index 0000000000..4a2b884921 --- /dev/null +++ b/nipype/interfaces/base/tests/test_traits_extension.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function, unicode_literals + +from ... import base as nib +from ..traits_extension import rebase_path_traits, resolve_path_traits, Path + + +class _test_spec(nib.TraitedSpec): + a = nib.traits.File() + b = nib.traits.Tuple(nib.File(), + nib.File()) + c = nib.traits.List(nib.File()) + d = nib.traits.Either(nib.File(), nib.traits.Float()) + e = nib.OutputMultiObject(nib.File()) + f = nib.traits.Dict(nib.Str, nib.File()) + g = nib.traits.Either(nib.File, nib.Str) + h = nib.Str + ee = nib.OutputMultiObject(nib.Str) + + +def test_rebase_path_traits(): + """Check rebase_path_traits.""" + spec = _test_spec() + + a = rebase_path_traits( + spec.trait('a'), '/some/path/f1.txt', '/some/path') + assert '%s' % a == 'f1.txt' + + b = rebase_path_traits( + spec.trait('b'), ('/some/path/f1.txt', '/some/path/f2.txt'), '/some/path') + assert b == (Path('f1.txt'), Path('f2.txt')) + + c = rebase_path_traits( + spec.trait('c'), ['/some/path/f1.txt', '/some/path/f2.txt', '/some/path/f3.txt'], + '/some/path') + assert c == [Path('f1.txt'), Path('f2.txt'), Path('f3.txt')] + + d = rebase_path_traits( + spec.trait('d'), 2.0, '/some/path') + assert d == 2.0 + + d = rebase_path_traits( + spec.trait('d'), '/some/path/either.txt', '/some/path') + assert '%s' % d == 'either.txt' + + e = rebase_path_traits( + spec.trait('e'), ['/some/path/f1.txt', '/some/path/f2.txt', '/some/path/f3.txt'], + '/some/path') + assert e == [Path('f1.txt'), Path('f2.txt'), Path('f3.txt')] + + e = rebase_path_traits( + spec.trait('e'), [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]], + '/some/path') + assert e == [[Path('f1.txt'), Path('f2.txt')], [[Path('f3.txt')]]] + + f = rebase_path_traits( + spec.trait('f'), {'1': '/some/path/f1.txt'}, '/some/path') + assert f == {'1': Path('f1.txt')} + + g = rebase_path_traits( + spec.trait('g'), 'some/path/either.txt', '/some/path') + assert '%s' % g == 'some/path/either.txt' + + g = rebase_path_traits( + spec.trait('g'), '/some/path/either.txt', '/some') + assert '%s' % g == 'path/either.txt' + + g = rebase_path_traits(spec.trait('g'), 'string', '/some') + assert '%s' % g == 'string' + + g = rebase_path_traits(spec.trait('g'), '2', '/some/path') + assert g == '2' # You dont want this one to be a Path + + h = rebase_path_traits(spec.trait('h'), '2', '/some/path') + assert h == '2' + + ee = rebase_path_traits( + spec.trait('ee'), [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]], + '/some/path') + assert ee == [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]] + + +def test_resolve_path_traits(): + """Check resolve_path_traits.""" + spec = _test_spec() + + a = resolve_path_traits( + spec.trait('a'), 'f1.txt', '/some/path') + assert a == Path('/some/path/f1.txt') + + b = resolve_path_traits( + spec.trait('b'), ('f1.txt', 'f2.txt'), '/some/path') + assert b == (Path('/some/path/f1.txt'), Path('/some/path/f2.txt')) + + c = resolve_path_traits( + spec.trait('c'), ['f1.txt', 'f2.txt', 'f3.txt'], + '/some/path') + assert c == [Path('/some/path/f1.txt'), Path('/some/path/f2.txt'), Path('/some/path/f3.txt')] + + d = resolve_path_traits( + spec.trait('d'), 2.0, '/some/path') + assert d == 2.0 + + d = resolve_path_traits( + spec.trait('d'), 'either.txt', '/some/path') + assert '%s' % d == '/some/path/either.txt' + + e = resolve_path_traits( + spec.trait('e'), ['f1.txt', 'f2.txt', 'f3.txt'], + '/some/path') + assert e == [Path('/some/path/f1.txt'), Path('/some/path/f2.txt'), Path('/some/path/f3.txt')] + + e = resolve_path_traits( + spec.trait('e'), [['f1.txt', 'f2.txt'], [['f3.txt']]], + '/some/path') + assert e == [[Path('/some/path/f1.txt'), Path('/some/path/f2.txt')], + [[Path('/some/path/f3.txt')]]] + + f = resolve_path_traits( + spec.trait('f'), {'1': 'path/f1.txt'}, '/some') + assert f == {'1': Path('/some/path/f1.txt')} + + g = resolve_path_traits( + spec.trait('g'), '/either.txt', '/some/path') + assert g == Path('/either.txt') + + # This is a problematic case, it is impossible to know whether this + # was meant to be a string or a file. + # Commented out because in this implementation, strings take precedence + # g = resolve_path_traits( + # spec.trait('g'), 'path/either.txt', '/some') + # assert g == Path('/some/path/either.txt') + + # This is a problematic case, it is impossible to know whether this + # was meant to be a string or a file. + g = resolve_path_traits(spec.trait('g'), 'string', '/some') + assert g == 'string' + + # This is a problematic case, it is impossible to know whether this + # was meant to be a string or a file. + g = resolve_path_traits(spec.trait('g'), '2', '/some/path') + assert g == '2' # You dont want this one to be a Path + + h = resolve_path_traits(spec.trait('h'), '2', '/some/path') + assert h == '2' + + ee = resolve_path_traits( + spec.trait('ee'), [['f1.txt', 'f2.txt'], [['f3.txt']]], + '/some/path') + assert ee == [['f1.txt', 'f2.txt'], [['f3.txt']]] diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index fdb79f93bb..a2586564c6 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -30,6 +30,7 @@ import traits.api as traits from traits.trait_handlers import TraitType, NoDefaultSpecified from traits.trait_base import _Undefined +from traits.traits import _TraitMaker, trait_from from traits.api import Unicode from future import standard_library @@ -307,6 +308,11 @@ def validate(self, objekt, name, value, return_pathlike=False): return value +# Patch in traits these two new +traits.File = File +traits.Directory = Directory + + class ImageFile(File): """Defines a trait whose value must be a known neuroimaging file.""" @@ -468,3 +474,120 @@ class InputMultiObject(MultiObject): InputMultiPath = InputMultiObject OutputMultiPath = OutputMultiObject + + +class Tuple(traits.BaseTuple): + """Defines a new type of Tuple trait that reports inner types.""" + + def init_fast_validator(self, *args): + """Set up the C-level fast validator.""" + super(Tuple, self).init_fast_validator(*args) + self.fast_validate = args + + def inner_traits(self): + """Return the *inner trait* (or traits) for this trait.""" + return self.types + + +class PatchedEither(TraitType): + """Defines a trait whose value can be any of of a specified list of traits.""" + + def __init__(self, *traits, **metadata): + """Create a trait whose value can be any of of a specified list of traits.""" + metadata['alternatives'] = tuple(trait_from(t) for t in traits) + self.trait_maker = _TraitMaker( + metadata.pop("default", None), *traits, **metadata) + + def as_ctrait(self): + """Return a CTrait corresponding to the trait defined by this class.""" + return self.trait_maker.as_ctrait() + + +traits.Tuple = Tuple +traits.Either = PatchedEither + + +def _rebase_path(value, cwd): + if isinstance(value, list): + return [_rebase_path(v, cwd) for v in value] + + try: + value = Path(value) + except TypeError: + pass + else: + try: + value = Path(value).relative_to(cwd) + except ValueError: + pass + return value + + +def rebase_path_traits(thistrait, value, cwd): + """Rebase a BasePath-derived trait given an interface spec.""" + if thistrait.is_trait_type(BasePath): + value = _rebase_path(value, cwd) + elif thistrait.is_trait_type(traits.List): + innertrait, = thistrait.inner_traits + if not isinstance(value, (list, tuple)): + value = rebase_path_traits(innertrait, value, cwd) + else: + value = [rebase_path_traits(innertrait, v, cwd) + for v in value] + elif thistrait.is_trait_type(traits.Dict): + _, innertrait = thistrait.inner_traits + value = {k: rebase_path_traits(innertrait, v, cwd) + for k, v in value.items()} + elif thistrait.is_trait_type(Tuple): + value = tuple([rebase_path_traits(subtrait, v, cwd) + for subtrait, v in zip(thistrait.inner_traits, value)]) + elif thistrait.alternatives: + is_str = [f.is_trait_type((traits.String, traits.BaseStr, traits.BaseBytes, Str)) + for f in thistrait.alternatives] + if any(is_str) and isinstance(value, (bytes, str)) and not value.startswith('/'): + return value + for subtrait in thistrait.alternatives: + value = rebase_path_traits(subtrait, value, cwd) + return value + + +def _resolve_path(value, cwd): + if isinstance(value, list): + return [_resolve_path(v, cwd) for v in value] + + try: + value = Path(value) + except TypeError: + pass + else: + if not value.is_absolute(): + value = Path(cwd) / value + return value + + +def resolve_path_traits(thistrait, value, cwd): + """Resolve a BasePath-derived trait given an interface spec.""" + if thistrait.is_trait_type(BasePath): + value = _resolve_path(value, cwd) + elif thistrait.is_trait_type(traits.List): + innertrait, = thistrait.inner_traits + if not isinstance(value, (list, tuple)): + value = resolve_path_traits(innertrait, value, cwd) + else: + value = [resolve_path_traits(innertrait, v, cwd) + for v in value] + elif thistrait.is_trait_type(traits.Dict): + _, innertrait = thistrait.inner_traits + value = {k: resolve_path_traits(innertrait, v, cwd) + for k, v in value.items()} + elif thistrait.is_trait_type(Tuple): + value = tuple([resolve_path_traits(subtrait, v, cwd) + for subtrait, v in zip(thistrait.inner_traits, value)]) + elif thistrait.alternatives: + is_str = [f.is_trait_type((traits.String, traits.BaseStr, traits.BaseBytes, Str)) + for f in thistrait.alternatives] + if any(is_str) and isinstance(value, (bytes, str)) and not value.startswith('/'): + return value + for subtrait in thistrait.alternatives: + value = resolve_path_traits(subtrait, value, cwd) + return value From 5f74273237f96def205164a5f9ec5aeae796c447 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Fri, 19 Jul 2019 12:21:34 -0700 Subject: [PATCH 0278/1665] Apply suggestions from code review Co-Authored-By: Chris Markiewicz --- nipype/interfaces/base/traits_extension.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index a2586564c6..dbe37eefb1 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -517,7 +517,7 @@ def _rebase_path(value, cwd): pass else: try: - value = Path(value).relative_to(cwd) + value = value.relative_to(cwd) except ValueError: pass return value @@ -561,7 +561,7 @@ def _resolve_path(value, cwd): pass else: if not value.is_absolute(): - value = Path(cwd) / value + value = Path(cwd).absolute() / value return value From 8a2a4e35a7bc6af8e77818dbe7beda7b4399dc56 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Fri, 19 Jul 2019 12:22:04 -0700 Subject: [PATCH 0279/1665] Update nipype/interfaces/base/tests/test_traits_extension.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/base/tests/test_traits_extension.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/base/tests/test_traits_extension.py b/nipype/interfaces/base/tests/test_traits_extension.py index 4a2b884921..9cb6c12d86 100644 --- a/nipype/interfaces/base/tests/test_traits_extension.py +++ b/nipype/interfaces/base/tests/test_traits_extension.py @@ -26,7 +26,7 @@ def test_rebase_path_traits(): a = rebase_path_traits( spec.trait('a'), '/some/path/f1.txt', '/some/path') - assert '%s' % a == 'f1.txt' + assert a == Path('f1.txt') b = rebase_path_traits( spec.trait('b'), ('/some/path/f1.txt', '/some/path/f2.txt'), '/some/path') From cb66a49d215974764f13e300b15983d64e7c29e1 Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 19 Jul 2019 15:05:45 -0700 Subject: [PATCH 0280/1665] fix: addressed @effigies' comments: - [x] Removed traits.api patches - [x] Deduplicated code with a _recurse_on_path_traits proxy (https://github.com/nipy/nipype/pull/2970#discussion_r305469775) - [x] Added the two proposed test cases - [x] Optimized loop (see https://github.com/nipy/nipype/pull/2970#discussion_r305466839) --- nipype/interfaces/base/__init__.py | 5 +- .../base/tests/test_traits_extension.py | 25 ++++--- nipype/interfaces/base/traits_extension.py | 67 +++++++------------ 3 files changed, 42 insertions(+), 55 deletions(-) diff --git a/nipype/interfaces/base/__init__.py b/nipype/interfaces/base/__init__.py index 30d44db56a..672b30178b 100644 --- a/nipype/interfaces/base/__init__.py +++ b/nipype/interfaces/base/__init__.py @@ -20,8 +20,9 @@ StdOutCommandLineInputSpec) from .traits_extension import ( - traits, Undefined, isdefined, - File, Directory, Str, DictStrStr, has_metadata, ImageFile, + traits, Undefined, isdefined, has_metadata, + File, ImageFile, Directory, + Tuple, Either, Str, DictStrStr, OutputMultiObject, InputMultiObject, OutputMultiPath, InputMultiPath) diff --git a/nipype/interfaces/base/tests/test_traits_extension.py b/nipype/interfaces/base/tests/test_traits_extension.py index 9cb6c12d86..832f8f9592 100644 --- a/nipype/interfaces/base/tests/test_traits_extension.py +++ b/nipype/interfaces/base/tests/test_traits_extension.py @@ -8,14 +8,13 @@ class _test_spec(nib.TraitedSpec): - a = nib.traits.File() - b = nib.traits.Tuple(nib.File(), - nib.File()) + a = nib.File() + b = nib.Tuple(nib.File(), nib.File()) c = nib.traits.List(nib.File()) - d = nib.traits.Either(nib.File(), nib.traits.Float()) + d = nib.Either(nib.File(), nib.traits.Float()) e = nib.OutputMultiObject(nib.File()) f = nib.traits.Dict(nib.Str, nib.File()) - g = nib.traits.Either(nib.File, nib.Str) + g = nib.Either(nib.File, nib.Str) h = nib.Str ee = nib.OutputMultiObject(nib.Str) @@ -28,6 +27,10 @@ def test_rebase_path_traits(): spec.trait('a'), '/some/path/f1.txt', '/some/path') assert a == Path('f1.txt') + a = rebase_path_traits( + spec.trait('a'), '/some/path/f1.txt', '/some/other/path') + assert a == Path('/some/path/f1.txt') + b = rebase_path_traits( spec.trait('b'), ('/some/path/f1.txt', '/some/path/f2.txt'), '/some/path') assert b == (Path('f1.txt'), Path('f2.txt')) @@ -90,6 +93,10 @@ def test_resolve_path_traits(): spec.trait('a'), 'f1.txt', '/some/path') assert a == Path('/some/path/f1.txt') + a = resolve_path_traits( + spec.trait('a'), '/already/absolute/f1.txt', '/some/path') + assert a == Path('/already/absolute/f1.txt') + b = resolve_path_traits( spec.trait('b'), ('f1.txt', 'f2.txt'), '/some/path') assert b == (Path('/some/path/f1.txt'), Path('/some/path/f2.txt')) @@ -128,10 +135,10 @@ def test_resolve_path_traits(): # This is a problematic case, it is impossible to know whether this # was meant to be a string or a file. - # Commented out because in this implementation, strings take precedence - # g = resolve_path_traits( - # spec.trait('g'), 'path/either.txt', '/some') - # assert g == Path('/some/path/either.txt') + # In this implementation, strings take precedence + g = resolve_path_traits( + spec.trait('g'), 'path/either.txt', '/some') + assert g == 'path/either.txt' # This is a problematic case, it is impossible to know whether this # was meant to be a string or a file. diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index dbe37eefb1..64bf45755f 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -308,11 +308,6 @@ def validate(self, objekt, name, value, return_pathlike=False): return value -# Patch in traits these two new -traits.File = File -traits.Directory = Directory - - class ImageFile(File): """Defines a trait whose value must be a known neuroimaging file.""" @@ -489,7 +484,7 @@ def inner_traits(self): return self.types -class PatchedEither(TraitType): +class Either(TraitType): """Defines a trait whose value can be any of of a specified list of traits.""" def __init__(self, *traits, **metadata): @@ -504,7 +499,7 @@ def as_ctrait(self): traits.Tuple = Tuple -traits.Either = PatchedEither +traits.Either = Either def _rebase_path(value, cwd): @@ -523,34 +518,6 @@ def _rebase_path(value, cwd): return value -def rebase_path_traits(thistrait, value, cwd): - """Rebase a BasePath-derived trait given an interface spec.""" - if thistrait.is_trait_type(BasePath): - value = _rebase_path(value, cwd) - elif thistrait.is_trait_type(traits.List): - innertrait, = thistrait.inner_traits - if not isinstance(value, (list, tuple)): - value = rebase_path_traits(innertrait, value, cwd) - else: - value = [rebase_path_traits(innertrait, v, cwd) - for v in value] - elif thistrait.is_trait_type(traits.Dict): - _, innertrait = thistrait.inner_traits - value = {k: rebase_path_traits(innertrait, v, cwd) - for k, v in value.items()} - elif thistrait.is_trait_type(Tuple): - value = tuple([rebase_path_traits(subtrait, v, cwd) - for subtrait, v in zip(thistrait.inner_traits, value)]) - elif thistrait.alternatives: - is_str = [f.is_trait_type((traits.String, traits.BaseStr, traits.BaseBytes, Str)) - for f in thistrait.alternatives] - if any(is_str) and isinstance(value, (bytes, str)) and not value.startswith('/'): - return value - for subtrait in thistrait.alternatives: - value = rebase_path_traits(subtrait, value, cwd) - return value - - def _resolve_path(value, cwd): if isinstance(value, list): return [_resolve_path(v, cwd) for v in value] @@ -565,29 +532,41 @@ def _resolve_path(value, cwd): return value -def resolve_path_traits(thistrait, value, cwd): - """Resolve a BasePath-derived trait given an interface spec.""" +def _recurse_on_path_traits(func, thistrait, value, cwd): + """Run func recursively on BasePath-derived traits.""" if thistrait.is_trait_type(BasePath): - value = _resolve_path(value, cwd) + value = func(value, cwd) elif thistrait.is_trait_type(traits.List): innertrait, = thistrait.inner_traits if not isinstance(value, (list, tuple)): - value = resolve_path_traits(innertrait, value, cwd) + value = _recurse_on_path_traits(func, innertrait, value, cwd) else: - value = [resolve_path_traits(innertrait, v, cwd) + value = [_recurse_on_path_traits(func, innertrait, v, cwd) for v in value] elif thistrait.is_trait_type(traits.Dict): _, innertrait = thistrait.inner_traits - value = {k: resolve_path_traits(innertrait, v, cwd) + value = {k: _recurse_on_path_traits(func, innertrait, v, cwd) for k, v in value.items()} elif thistrait.is_trait_type(Tuple): - value = tuple([resolve_path_traits(subtrait, v, cwd) + value = tuple([_recurse_on_path_traits(func, subtrait, v, cwd) for subtrait, v in zip(thistrait.inner_traits, value)]) elif thistrait.alternatives: is_str = [f.is_trait_type((traits.String, traits.BaseStr, traits.BaseBytes, Str)) for f in thistrait.alternatives] if any(is_str) and isinstance(value, (bytes, str)) and not value.startswith('/'): return value - for subtrait in thistrait.alternatives: - value = resolve_path_traits(subtrait, value, cwd) + is_basepath = [f.is_trait_type(BasePath) for f in thistrait.alternatives] + if any(is_basepath): + subtrait = thistrait.alternatives[is_basepath.index(True)] + value = _recurse_on_path_traits(func, subtrait, value, cwd) return value + + +def rebase_path_traits(thistrait, value, cwd): + """Rebase a BasePath-derived trait given an interface spec.""" + return _recurse_on_path_traits(_rebase_path, thistrait, value, cwd) + + +def resolve_path_traits(thistrait, value, cwd): + """Resolve a BasePath-derived trait given an interface spec.""" + return _recurse_on_path_traits(_resolve_path, thistrait, value, cwd) From 2c70fa80b2a0aeb503f69c3c46867072a2fa09ed Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 19 Jul 2019 15:16:06 -0700 Subject: [PATCH 0281/1665] fix: replace all ``traits.File`` -> ``File`` --- nipype/algorithms/misc.py | 2 +- nipype/interfaces/afni/model.py | 2 +- nipype/interfaces/afni/preprocess.py | 10 +- nipype/interfaces/afni/utils.py | 28 ++--- nipype/interfaces/base/specs.py | 6 +- nipype/interfaces/dipy/base.py | 2 +- nipype/interfaces/dipy/tests/test_base.py | 6 +- nipype/interfaces/freesurfer/model.py | 12 +- nipype/interfaces/freesurfer/preprocess.py | 18 +-- nipype/interfaces/freesurfer/registration.py | 2 +- nipype/interfaces/freesurfer/utils.py | 32 +++--- nipype/interfaces/fsl/fix.py | 6 +- nipype/interfaces/fsl/model.py | 2 +- nipype/interfaces/minc/minc.py | 58 +++++----- nipype/interfaces/niftyfit/asl.py | 30 ++--- nipype/interfaces/niftyfit/dwi.py | 106 +++++++++--------- nipype/interfaces/niftyfit/qt1.py | 10 +- nipype/interfaces/nipy/model.py | 6 +- nipype/interfaces/utility/base.py | 2 +- .../interfaces/utility/tests/test_wrappers.py | 2 +- nipype/pipeline/engine/tests/test_base.py | 2 +- 21 files changed, 173 insertions(+), 171 deletions(-) diff --git a/nipype/algorithms/misc.py b/nipype/algorithms/misc.py index a4ecd3a5e2..a2eaad3610 100644 --- a/nipype/algorithms/misc.py +++ b/nipype/algorithms/misc.py @@ -727,7 +727,7 @@ def _list_outputs(self): class AddCSVRowInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - in_file = traits.File( + in_file = File( mandatory=True, desc='Input comma-separated value (CSV) files') _outputs = traits.Dict(traits.Any, value={}, usedefault=True) diff --git a/nipype/interfaces/afni/model.py b/nipype/interfaces/afni/model.py index e091a87c57..295a01ce0c 100644 --- a/nipype/interfaces/afni/model.py +++ b/nipype/interfaces/afni/model.py @@ -383,7 +383,7 @@ class RemlfitInputSpec(AFNICommandInputSpec): 'produces a matrix with a single column of all ones', argstr='-polort %d', xor=['matrix']) - matim = traits.File( + matim = File( desc='read a standard file as the matrix. You can use only Col as ' 'a name in GLTs with these nonstandard matrix input methods, ' 'since the other names come from the \'matrix\' file. ' diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index ee81ee7e2f..a2e9cb7f4d 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -369,7 +369,7 @@ class AllineateInputSpec(AFNICommandInputSpec): 'larger weights mean that voxel count more in the cost function. ' 'If an image file is given, the volume must be defined on the ' 'same grid as the base dataset') - out_weight_file = traits.File( + out_weight_file = File( argstr='-wtprefix %s', desc='Write the weight volume to disk as a dataset', xor=['allcostx']) @@ -966,7 +966,7 @@ class ClipLevelInputSpec(CommandLineInputSpec): argstr='-doall', position=3, xor=('grad')) - grad = traits.File( + grad = File( desc='Also compute a \'gradual\' clip level as a function of voxel ' 'position, and output that to a dataset.', argstr='-grad %s', @@ -1831,7 +1831,7 @@ class ROIStatsInputSpec(CommandLineInputSpec): 'a \'0\' in the output file. Only active if `num_roi` is ' 'enabled.', argstr='-zerofill %s') - roisel = traits.File( + roisel = File( exists=True, desc='Only considers ROIs denoted by values found in the specified ' 'file. Note that the order of the ROIs as specified in the file ' @@ -2260,7 +2260,7 @@ class TCorrMapInputSpec(AFNICommandInputSpec): polort = traits.Int(argstr='-polort %d') bandpass = traits.Tuple( (traits.Float(), traits.Float()), argstr='-bpass %f %f') - regress_out_timeseries = traits.File(exists=True, argstr='-ort %s') + regress_out_timeseries = File(exists=True, argstr='-ort %s') blur_fwhm = traits.Float(argstr='-Gblur %f') seeds_width = traits.Float(argstr='-Mseed %f', xor=('seeds')) @@ -3249,7 +3249,7 @@ class QwarpInputSpec(AFNICommandInputSpec): '* As with \'-wball\', the factor \'f\' should be between 1 and 100.' '* You cannot use \'-wball\' and \'-wmask\' together!', argstr='-wpass %s %f') - out_weight_file = traits.File( + out_weight_file = File( argstr='-wtprefix %s', desc='Write the weight volume to disk as a dataset') blur = traits.List( diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py index e16e1bbd79..4f08ee26a0 100644 --- a/nipype/interfaces/afni/utils.py +++ b/nipype/interfaces/afni/utils.py @@ -1397,7 +1397,7 @@ class LocalBistatInputSpec(AFNICommandInputSpec): ' * ALL = all of the above, in that order' 'More than one option can be used.', argstr='-stat %s...') - mask_file = traits.File( + mask_file = File( exists=True, desc='mask image file name. Voxels NOT in the mask will not be used ' 'in the neighborhood of any voxel. Also, a voxel NOT in the mask ' @@ -1407,13 +1407,13 @@ class LocalBistatInputSpec(AFNICommandInputSpec): desc='Compute the mask as in program 3dAutomask.', argstr='-automask', xor=['weight_file']) - weight_file = traits.File( + weight_file = File( exists=True, desc='File name of an image to use as a weight. Only applies to ' '\'pearson\' statistics.', argstr='-weight %s', xor=['automask']) - out_file = traits.File( + out_file = File( desc='Output dataset.', argstr='-prefix %s', name_source='in_file1', @@ -1527,7 +1527,7 @@ class LocalstatInputSpec(AFNICommandInputSpec): ' mean, median, MAD, P2skew\n' 'More than one option can be used.', argstr='-stat %s...') - mask_file = traits.File( + mask_file = File( exists=True, desc='Mask image file name. Voxels NOT in the mask will not be used ' 'in the neighborhood of any voxel. Also, a voxel NOT in the ' @@ -1589,7 +1589,7 @@ class LocalstatInputSpec(AFNICommandInputSpec): overwrite = traits.Bool( desc='overwrite output file if it already exists', argstr='-overwrite') - out_file = traits.File( + out_file = File( desc='Output dataset.', argstr='-prefix %s', name_source='in_file', @@ -1905,7 +1905,7 @@ class NwarpApplyInputSpec(CommandLineInputSpec): inv_warp = traits.Bool( desc='After the warp specified in \'-nwarp\' is computed, invert it', argstr='-iwarp') - master = traits.File( + master = File( exists=True, desc='the name of the master dataset, which defines the output grid', argstr='-master %s') @@ -1980,10 +1980,10 @@ class NwarpApply(AFNICommandBase): class NwarpCatInputSpec(AFNICommandInputSpec): in_files = traits.List( - traits.Either(traits.File(), + traits.Either(File(), traits.Tuple( traits.Enum('IDENT', 'INV', 'SQRT', 'SQRTINV'), - traits.File())), + File())), desc="list of tuples of 3D warps and associated functions", mandatory=True, argstr="%s", @@ -2137,7 +2137,7 @@ class OneDToolPyInputSpec(AFNIPythonCommandInputSpec): desc= 'display a list of TRs which were not censored in the specified style', argstr='-show_trs_uncensored %s') - show_cormat_warnings = traits.File( + show_cormat_warnings = File( desc='Write cormat warnings to a file', argstr="-show_cormat_warnings |& tee %s", position=-1, @@ -2226,7 +2226,7 @@ class RefitInputSpec(CommandLineInputSpec): desc='Associates the dataset with a specific template type, e.g. ' 'TLRC, MNI, ORIG') atrcopy = traits.Tuple( - traits.File(exists=True), + File(exists=True), traits.Str(), argstr='-atrcopy %s %s', desc='Copy AFNI header attribute from the given file into the header ' @@ -2310,7 +2310,7 @@ class ReHoInputSpec(CommandLineInputSpec): position=1, mandatory=True, exists=True) - out_file = traits.File( + out_file = File( desc='Output dataset.', argstr='-prefix %s', name_source='in_file', @@ -2323,7 +2323,7 @@ class ReHoInputSpec(CommandLineInputSpec): 'Kendall\'s W. This option is currently compatible only with ' 'the AFNI (BRIK/HEAD) output type; the chi-squared value will ' 'be the second sub-brick of the output dataset.') - mask_file = traits.File( + mask_file = File( desc='Mask within which ReHo should be calculated voxelwise', argstr='-mask %s') neighborhood = traits.Enum( @@ -2451,7 +2451,7 @@ class ResampleInputSpec(AFNICommandInputSpec): *[traits.Float()] * 3, argstr='-dxyz %f %f %f', desc='resample to new dx, dy and dz') - master = traits.File( + master = File( argstr='-master %s', desc='align dataset grid to a reference file') @@ -3247,7 +3247,7 @@ class ZeropadInputSpec(AFNICommandInputSpec): 'and slice thickness = 2.5 mm ==> 2 slices added', argstr='-mm', xor=['master']) - master = traits.File( + master = File( desc='match the volume described in dataset ' '\'mset\', where mset must have the same ' 'orientation and grid spacing as dataset to be ' diff --git a/nipype/interfaces/base/specs.py b/nipype/interfaces/base/specs.py index fb0f3295f0..5028d5c30a 100644 --- a/nipype/interfaces/base/specs.py +++ b/nipype/interfaces/base/specs.py @@ -25,6 +25,8 @@ md5, hash_infile, hash_timestamp, to_str, USING_PATHLIB2) from .traits_extension import ( traits, + File, + Str, Undefined, isdefined, has_metadata, @@ -373,13 +375,13 @@ class DynamicTraitedSpec(BaseTraitedSpec): class CommandLineInputSpec(BaseInterfaceInputSpec): - args = traits.Str(argstr='%s', desc='Additional parameters to the command') + args = Str(argstr='%s', desc='Additional parameters to the command') environ = traits.DictStrStr( desc='Environment variables', usedefault=True, nohash=True) class StdOutCommandLineInputSpec(CommandLineInputSpec): - out_file = traits.File(argstr="> %s", position=-1, genfile=True) + out_file = File(argstr="> %s", position=-1, genfile=True) class MpiCommandLineInputSpec(CommandLineInputSpec): diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py index 27f26e989a..3e0e064bb4 100644 --- a/nipype/interfaces/dipy/base.py +++ b/nipype/interfaces/dipy/base.py @@ -100,7 +100,7 @@ def convert_to_traits_type(dipy_type, is_file=False): elif "string" in dipy_type and not is_file: return traits.Str, is_mandatory elif "string" in dipy_type and is_file: - return traits.File, is_mandatory + return File, is_mandatory elif "int" in dipy_type: return traits.Int, is_mandatory elif "float" in dipy_type: diff --git a/nipype/interfaces/dipy/tests/test_base.py b/nipype/interfaces/dipy/tests/test_base.py index 1d475ac0f7..740057bcd3 100644 --- a/nipype/interfaces/dipy/tests/test_base.py +++ b/nipype/interfaces/dipy/tests/test_base.py @@ -1,6 +1,6 @@ import pytest from collections import namedtuple -from ...base import traits, TraitedSpec, BaseInterfaceInputSpec +from ...base import traits, File, TraitedSpec, BaseInterfaceInputSpec from ..base import (convert_to_traits_type, create_interface_specs, dipy_to_nipype_interface, DipyBaseInterface, no_dipy, get_dipy_workflows) @@ -35,10 +35,10 @@ def test_convert_to_traits_type(): Res(traits.ListStr, False), Res(traits.ListFloat, False), Res(traits.ListBool, False), Res(traits.ListComplex, False), Res(traits.Str, True), Res(traits.Int, True), - Res(traits.File, True), Res(traits.Float, True), + Res(File, True), Res(traits.Float, True), Res(traits.Bool, True), Res(traits.Complex, True), Res(traits.Str, False), Res(traits.Int, False), - Res(traits.File, False), Res(traits.Float, False), + Res(File, False), Res(traits.Float, False), Res(traits.Bool, False), Res(traits.Complex, False), ] diff --git a/nipype/interfaces/freesurfer/model.py b/nipype/interfaces/freesurfer/model.py index 58d168e2d7..d1c8dcdc93 100644 --- a/nipype/interfaces/freesurfer/model.py +++ b/nipype/interfaces/freesurfer/model.py @@ -988,7 +988,7 @@ class SegStatsReconAllInputSpec(SegStatsInputSpec): mandatory=True, desc="Subject id being processed") # implicit - ribbon = traits.File( + ribbon = File( mandatory=True, exists=True, desc="Input file mri/ribbon.mgz") presurf_seg = File(exists=True, desc="Input segmentation volume") transform = File(mandatory=True, exists=True, desc="Input transform file") @@ -1216,19 +1216,19 @@ class MS_LDAInputSpec(FSTraitedSpec): maxlen=2, sep=' ', desc='pair of class labels to optimize') - weight_file = traits.File( + weight_file = File( argstr='-weight %s', mandatory=True, desc='filename for the LDA weights (input or output)') - vol_synth_file = traits.File( + vol_synth_file = File( exists=False, argstr='-synth %s', mandatory=True, desc=('filename for the synthesized output ' 'volume')) - label_file = traits.File( + label_file = File( exists=True, argstr='-label %s', desc='filename of the label volume') - mask_file = traits.File( + mask_file = File( exists=True, argstr='-mask %s', desc='filename of the brain mask volume') @@ -1532,7 +1532,7 @@ class SphericalAverageInputSpec(FSTraitedSpec): exists=False, position=-1, desc="Output filename") - in_average = traits.Directory( + in_average = Directory( argstr="%s", exists=True, genfile=True, diff --git a/nipype/interfaces/freesurfer/preprocess.py b/nipype/interfaces/freesurfer/preprocess.py index 2941968f85..682910cfb3 100644 --- a/nipype/interfaces/freesurfer/preprocess.py +++ b/nipype/interfaces/freesurfer/preprocess.py @@ -2313,7 +2313,7 @@ class NormalizeInputSpec(FSTraitedSpec): class NormalizeOutputSpec(TraitedSpec): - out_file = traits.File(exists=False, desc="The output file for Normalize") + out_file = File(exists=False, desc="The output file for Normalize") class Normalize(FSCommand): @@ -2379,7 +2379,7 @@ class CANormalizeInputSpec(FSTraitedSpec): class CANormalizeOutputSpec(TraitedSpec): - out_file = traits.File(exists=False, desc="The output file for Normalize") + out_file = File(exists=False, desc="The output file for Normalize") control_points = File( exists=False, desc="The output control points for Normalize") @@ -2460,7 +2460,7 @@ class CARegisterInputSpec(FSTraitedSpecOpenMP): class CARegisterOutputSpec(TraitedSpec): - out_file = traits.File(exists=False, desc="The output file for CARegister") + out_file = File(exists=False, desc="The output file for CARegister") class CARegister(FSCommandOpenMP): @@ -2540,13 +2540,13 @@ class CALabelInputSpec(FSTraitedSpecOpenMP): desc=("Reclassify voxels at least some std" " devs from the mean using some size" " Gaussian window")) - label = traits.File( + label = File( argstr="-l %s", exists=True, desc= "Undocumented flag. Autorecon3 uses ../label/{hemisphere}.cortex.label as input file" ) - aseg = traits.File( + aseg = File( argstr="-aseg %s", exists=True, desc= @@ -2630,13 +2630,13 @@ class MRIsCALabelInputSpec(FSTraitedSpecOpenMP): name_template="%s.aparc.annot", desc="Annotated surface output file") # optional - label = traits.File( + label = File( argstr="-l %s", exists=True, desc= "Undocumented flag. Autorecon3 uses ../label/{hemisphere}.cortex.label as input file" ) - aseg = traits.File( + aseg = File( argstr="-aseg %s", exists=True, desc= @@ -2985,13 +2985,13 @@ class ConcatenateLTAInputSpec(FSTraitedSpec): 'VOX2VOX', 'RAS2RAS', argstr='-out_type %d', desc='set final LTA type') # Talairach options - tal_source_file = traits.File( + tal_source_file = File( exists=True, argstr='-tal %s', position=-5, requires=['tal_template_file'], desc='if in_lta2 is talairach.xfm, specify source for talairach') - tal_template_file = traits.File( + tal_template_file = File( exists=True, argstr='%s', position=-4, diff --git a/nipype/interfaces/freesurfer/registration.py b/nipype/interfaces/freesurfer/registration.py index 99ee7d0179..9cf6fb7b44 100644 --- a/nipype/interfaces/freesurfer/registration.py +++ b/nipype/interfaces/freesurfer/registration.py @@ -134,7 +134,7 @@ class RegisterAVItoTalairachInputSpec(FSTraitedSpec): class RegisterAVItoTalairachOutputSpec(FSScriptOutputSpec): - out_file = traits.File( + out_file = File( exists=False, desc="The output file for RegisterAVItoTalairach") diff --git a/nipype/interfaces/freesurfer/utils.py b/nipype/interfaces/freesurfer/utils.py index 55e38576bb..a60c5f7a11 100644 --- a/nipype/interfaces/freesurfer/utils.py +++ b/nipype/interfaces/freesurfer/utils.py @@ -13,7 +13,7 @@ from ... import logging from ...utils.filemanip import fname_presuffix, split_filename -from ..base import (TraitedSpec, File, traits, OutputMultiPath, isdefined, +from ..base import (TraitedSpec, Directory, File, traits, OutputMultiPath, isdefined, CommandLine, CommandLineInputSpec) from .base import (FSCommand, FSTraitedSpec, FSSurfaceCommand, FSScriptCommand, FSScriptOutputSpec, FSTraitedSpecOpenMP, FSCommandOpenMP) @@ -769,7 +769,7 @@ class SurfaceSnapshotsInputSpec(FSTraitedSpec): desc="load an overlay volume/surface", requires=["overlay_range"]) reg_xors = ["overlay_reg", "identity_reg", "mni152_reg"] - overlay_reg = traits.File( + overlay_reg = File( exists=True, argstr="-overlay-reg %s", xor=reg_xors, @@ -1956,7 +1956,7 @@ class CheckTalairachAlignmentInputSpec(FSTraitedSpec): class CheckTalairachAlignmentOutputSpec(TraitedSpec): - out_file = traits.File( + out_file = File( exists=True, desc="The input file for CheckTalairachAlignment") @@ -2002,11 +2002,11 @@ class TalairachAVIInputSpec(FSTraitedSpec): class TalairachAVIOutputSpec(TraitedSpec): - out_file = traits.File( + out_file = File( exists=False, desc="The output transform for TalairachAVI") - out_log = traits.File( + out_log = File( exists=False, desc="The output log file for TalairachAVI") - out_txt = traits.File( + out_txt = File( exists=False, desc="The output text file for TaliarachAVI") @@ -2348,7 +2348,7 @@ class FixTopologyInputSpec(FSTraitedSpec): desc= "No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu" ) - sphere = traits.File(argstr="-sphere %s", desc="Sphere input file") + sphere = File(argstr="-sphere %s", desc="Sphere input file") class FixTopologyOutputSpec(TraitedSpec): @@ -3215,27 +3215,27 @@ class ParcellationStatsInputSpec(FSTraitedSpec): surface = traits.String( position=-1, argstr="%s", desc="Input surface (e.g. 'white')") mgz = traits.Bool(argstr="-mgz", desc="Look for mgz files") - in_cortex = traits.File( + in_cortex = File( argstr="-cortex %s", exists=True, desc="Input cortex label") - in_annotation = traits.File( + in_annotation = File( argstr="-a %s", exists=True, xor=['in_label'], desc= "compute properties for each label in the annotation file separately") - in_label = traits.File( + in_label = File( argstr="-l %s", exists=True, xor=['in_annotatoin', 'out_color'], desc="limit calculations to specified label") tabular_output = traits.Bool(argstr="-b", desc="Tabular output") - out_table = traits.File( + out_table = File( argstr="-f %s", exists=False, genfile=True, requires=['tabular_output'], desc="Table output to tablefile") - out_color = traits.File( + out_color = File( argstr="-c %s", exists=False, genfile=True, @@ -3395,13 +3395,13 @@ class ContrastInputSpec(FSTraitedSpec): mandatory=True, exists=True, desc="Input file must be /surf/.white") - annotation = traits.File( + annotation = File( mandatory=True, exists=True, desc= "Input annotation file must be /label/.aparc.annot" ) - cortex = traits.File( + cortex = File( mandatory=True, exists=True, desc= @@ -3500,7 +3500,7 @@ class RelabelHypointensitiesInputSpec(FSTraitedSpec): mandatory=True, exists=True, desc="Input aseg file") - surf_directory = traits.Directory( + surf_directory = Directory( '.', argstr="%s", position=-2, @@ -3804,7 +3804,7 @@ class MRIsExpandInputSpec(FSTraitedSpec): # desc=('Tuple of (n_averages, min_averages) parameters ' # '(implicit: (16, 0))')) # target_intensity = traits.Tuple( - # traits.Float, traits.File(exists=True), + # traits.Float, File(exists=True), # argstr='-intensity %g %s', # desc='Tuple of intensity and brain volume to crop to target intensity') diff --git a/nipype/interfaces/fsl/fix.py b/nipype/interfaces/fsl/fix.py index ebe986eb79..5b5a044e88 100644 --- a/nipype/interfaces/fsl/fix.py +++ b/nipype/interfaces/fsl/fix.py @@ -329,13 +329,13 @@ class CleanerInputSpec(CommandLineInputSpec): 'Apply aggressive (full variance) cleanup, instead of the default less-aggressive (unique variance) cleanup.', position=3) - confound_file = traits.File( + confound_file = File( argstr='-x %s', desc='Include additional confound file.', position=4) - confound_file_1 = traits.File( + confound_file_1 = File( argstr='-x %s', desc='Include additional confound file.', position=5) - confound_file_2 = traits.File( + confound_file_2 = File( argstr='-x %s', desc='Include additional confound file.', position=6) diff --git a/nipype/interfaces/fsl/model.py b/nipype/interfaces/fsl/model.py index 113f785120..ad69968363 100644 --- a/nipype/interfaces/fsl/model.py +++ b/nipype/interfaces/fsl/model.py @@ -1891,7 +1891,7 @@ class ClusterInputSpec(FSLCommandInputSpec): peak_distance = traits.Float( argstr='--peakdist=%.10f', desc='minimum distance between local maxima/minima, in mm (default 0)') - cope_file = traits.File(argstr='--cope=%s', desc='cope volume') + cope_file = File(argstr='--cope=%s', desc='cope volume') volume = traits.Int( argstr='--volume=%d', desc='number of voxels in the mask') dlh = traits.Float( diff --git a/nipype/interfaces/minc/minc.py b/nipype/interfaces/minc/minc.py index 8ac8babe52..907d35675f 100644 --- a/nipype/interfaces/minc/minc.py +++ b/nipype/interfaces/minc/minc.py @@ -677,7 +677,7 @@ class AverageInputSpec(CommandLineInputSpec): _xor_input_files = ('input_files', 'filelist') input_files = InputMultiPath( - traits.File(exists=True), + File(exists=True), desc='input file(s)', mandatory=True, sep=' ', @@ -685,7 +685,7 @@ class AverageInputSpec(CommandLineInputSpec): position=-2, xor=_xor_input_files) - filelist = traits.File( + filelist = File( desc='Specify the name of a file containing input file names.', argstr='-filelist %s', exists=True, @@ -806,7 +806,7 @@ class AverageInputSpec(CommandLineInputSpec): argstr='-range %d %d', desc='Valid range for output data.') - sdfile = traits.File( + sdfile = File( desc='Specify an output sd file (default=none).', argstr='-sdfile %s') _xor_copy_header = ('copy_header', 'no_copy_header') @@ -938,7 +938,7 @@ class CalcInputSpec(CommandLineInputSpec): _xor_input_files = ('input_files', 'filelist') input_files = InputMultiPath( - traits.File(exists=True), + File(exists=True), desc='input file(s) for calculation', mandatory=True, sep=' ', @@ -979,7 +979,7 @@ class CalcInputSpec(CommandLineInputSpec): debug = traits.Bool(desc='Print out debugging messages.', argstr='-debug') - filelist = traits.File( + filelist = File( desc='Specify the name of a file containing input file names.', argstr='-filelist %s', mandatory=True, @@ -1095,7 +1095,7 @@ class CalcInputSpec(CommandLineInputSpec): argstr='-expression \'%s\'', xor=_xor_expression, mandatory=True) - expfile = traits.File( + expfile = File( desc='Name of file containing expression.', argstr='-expfile %s', xor=_xor_expression, @@ -1106,7 +1106,7 @@ class CalcInputSpec(CommandLineInputSpec): outfiles = traits.List( traits.Tuple( traits.Str, - traits.File, + File, argstr='-outfile %s %s', desc= ('List of (symbol, file) tuples indicating that output should be written' @@ -1300,7 +1300,7 @@ class BeastInputSpec(CommandLineInputSpec): usedefault=True, default_value=True) - configuration_file = traits.File( + configuration_file = File( desc='Specify configuration file.', argstr='-configuration %s') voxel_size = traits.Int( @@ -1349,11 +1349,11 @@ class BeastInputSpec(CommandLineInputSpec): desc='Output final mask with the same resolution as input file.', argstr='-same_resolution') - library_dir = traits.Directory( + library_dir = Directory( desc='library directory', position=-3, argstr='%s', mandatory=True) - input_file = traits.File( + input_file = File( desc='input file', position=-2, argstr='%s', mandatory=True) - output_file = traits.File( + output_file = File( desc='output file', position=-1, argstr='%s', @@ -1741,7 +1741,7 @@ class MathInputSpec(CommandLineInputSpec): _xor_input_files = ('input_files', 'filelist') input_files = InputMultiPath( - traits.File(exists=True), + File(exists=True), desc='input file(s) for calculation', mandatory=True, sep=' ', @@ -1758,7 +1758,7 @@ class MathInputSpec(CommandLineInputSpec): hash_files=False, name_template='%s_mincmath.mnc') - filelist = traits.File( + filelist = File( desc='Specify the name of a file containing input file names.', argstr='-filelist %s', exists=True, @@ -2193,7 +2193,7 @@ class ResampleInputSpec(CommandLineInputSpec): # This is a dummy input. input_grid_files = InputMultiPath( - traits.File, + File, desc='input grid file(s)', ) @@ -2257,7 +2257,7 @@ class ResampleInputSpec(CommandLineInputSpec): xor=_xor_sinc_window_type, requires=['sinc_interpolation']) - transformation = traits.File( + transformation = File( desc='File giving world transformation. (Default = identity).', exists=True, argstr='-transformation %s') @@ -2278,7 +2278,7 @@ class ResampleInputSpec(CommandLineInputSpec): argstr='-use_input_sampling', xor=_xor_input_sampling) - like = traits.File( + like = File( desc='Specifies a model file for the resampling.', argstr='-like %s', exists=True) @@ -2578,7 +2578,7 @@ class NormInputSpec(CommandLineInputSpec): hash_files=False, name_template='%s_norm.mnc') - output_threshold_mask = traits.File( + output_threshold_mask = File( desc='File in which to store the threshold mask.', argstr='-threshold_mask %s', name_source=['input_file'], @@ -2592,7 +2592,7 @@ class NormInputSpec(CommandLineInputSpec): default_value=True) # Normalisation Options - mask = traits.File( + mask = File( desc='Calculate the image normalisation within a mask.', argstr='-mask %s', exists=True) @@ -2988,7 +2988,7 @@ def _list_outputs(self): class XfmConcatInputSpec(CommandLineInputSpec): input_files = InputMultiPath( - traits.File(exists=True), + File(exists=True), desc='input file(s)', mandatory=True, sep=' ', @@ -2997,7 +2997,7 @@ class XfmConcatInputSpec(CommandLineInputSpec): # This is a dummy input. input_grid_files = InputMultiPath( - traits.File, + File, desc='input grid file(s)', ) @@ -3165,7 +3165,7 @@ class NlpFitInputSpec(CommandLineInputSpec): # This is a dummy input. input_grid_files = InputMultiPath( - traits.File, + File, desc='input grid file(s)', ) @@ -3256,7 +3256,7 @@ def _list_outputs(self): class XfmAvgInputSpec(CommandLineInputSpec): input_files = InputMultiPath( - traits.File(exists=True), + File(exists=True), desc='input file(s)', mandatory=True, sep=' ', @@ -3265,7 +3265,7 @@ class XfmAvgInputSpec(CommandLineInputSpec): # This is a dummy input. input_grid_files = InputMultiPath( - traits.File, + File, desc='input grid file(s)', ) @@ -3355,7 +3355,7 @@ def _list_outputs(self): class XfmInvertInputSpec(CommandLineInputSpec): - input_file = traits.File( + input_file = File( desc='input file', exists=True, mandatory=True, @@ -3430,7 +3430,7 @@ def _list_outputs(self): class BigAverageInputSpec(CommandLineInputSpec): input_files = InputMultiPath( - traits.File(exists=True), + File(exists=True), desc='input file(s)', mandatory=True, sep=' ', @@ -3520,7 +3520,7 @@ class BigAverage(CommandLine): class ReshapeInputSpec(CommandLineInputSpec): - input_file = traits.File( + input_file = File( desc='input file', exists=True, mandatory=True, @@ -3578,14 +3578,14 @@ class Reshape(CommandLine): class VolSymmInputSpec(CommandLineInputSpec): - input_file = traits.File( + input_file = File( desc='input file', exists=True, mandatory=True, argstr='%s', position=-3) - trans_file = traits.File( + trans_file = File( desc='output xfm trans file', genfile=True, argstr='%s', @@ -3606,7 +3606,7 @@ class VolSymmInputSpec(CommandLineInputSpec): # This is a dummy input. input_grid_files = InputMultiPath( - traits.File, + File, desc='input grid file(s)', ) diff --git a/nipype/interfaces/niftyfit/asl.py b/nipype/interfaces/niftyfit/asl.py index c4920dc195..0d13880e1e 100644 --- a/nipype/interfaces/niftyfit/asl.py +++ b/nipype/interfaces/niftyfit/asl.py @@ -4,7 +4,7 @@ The ASL module of niftyfit, which wraps the fitting methods in NiftyFit. """ -from ..base import TraitedSpec, traits, CommandLineInputSpec +from ..base import File, TraitedSpec, traits, CommandLineInputSpec from .base import NiftyFitCommand from ..niftyreg.base import get_custom_path @@ -12,7 +12,7 @@ class FitAslInputSpec(CommandLineInputSpec): """ Input Spec for FitAsl. """ desc = 'Filename of the 4D ASL (control/label) source image (mandatory).' - source_file = traits.File( + source_file = File( position=1, exists=True, argstr='-source %s', @@ -23,17 +23,17 @@ class FitAslInputSpec(CommandLineInputSpec): # *** Output options: desc = 'Filename of the Cerebral Blood Flow map (in ml/100g/min).' - cbf_file = traits.File( + cbf_file = File( name_source=['source_file'], name_template='%s_cbf.nii.gz', argstr='-cbf %s', desc=desc) - error_file = traits.File( + error_file = File( name_source=['source_file'], name_template='%s_error.nii.gz', argstr='-error %s', desc='Filename of the CBF error map.') - syn_file = traits.File( + syn_file = File( name_source=['source_file'], name_template='%s_syn.nii.gz', argstr='-syn %s', @@ -41,20 +41,20 @@ class FitAslInputSpec(CommandLineInputSpec): # *** Input options (see also fit_qt1 for generic T1 fitting): desc = 'Filename of the estimated input T1 map (in ms).' - t1map = traits.File(exists=True, argstr='-t1map %s', desc=desc) + t1map = File(exists=True, argstr='-t1map %s', desc=desc) desc = 'Filename of the estimated input M0 map.' - m0map = traits.File(exists=True, argstr='-m0map %s', desc=desc) + m0map = File(exists=True, argstr='-m0map %s', desc=desc) desc = 'Filename of the estimated input M0 map error.' - m0mape = traits.File(exists=True, argstr='-m0mape %s', desc=desc) + m0mape = File(exists=True, argstr='-m0mape %s', desc=desc) desc = 'Filename of a [1,2,5]s Inversion Recovery volume (T1/M0 fitting \ carried out internally).' - ir_volume = traits.File(exists=True, argstr='-IRvolume %s', desc=desc) + ir_volume = File(exists=True, argstr='-IRvolume %s', desc=desc) desc = 'Output of [1,2,5]s Inversion Recovery fitting.' - ir_output = traits.File(exists=True, argstr='-IRoutput %s', desc=desc) + ir_output = File(exists=True, argstr='-IRoutput %s', desc=desc) # *** Experimental options (Choose those suitable for the model!): - mask = traits.File( + mask = File( position=2, exists=True, desc='Filename of image mask.', @@ -104,7 +104,7 @@ class FitAslInputSpec(CommandLineInputSpec): desc = 'Filename of the 4D segmentation (in ASL space) for L/T1 \ estimation and PV correction {WM,GM,CSF}.' - seg = traits.File(exists=True, argstr='-seg %s', desc=desc) + seg = File(exists=True, argstr='-seg %s', desc=desc) desc = 'Use sigmoid to estimate L from T1: L(T1|gmL,wmL) [Off].' sig = traits.Bool(desc=desc, argstr='-sig') desc = 'Simple PV correction (CBF=vg*CBFg + vw*CBFw, with CBFw=f*CBFg) \ @@ -131,11 +131,11 @@ class FitAslInputSpec(CommandLineInputSpec): class FitAslOutputSpec(TraitedSpec): """ Output Spec for FitAsl. """ desc = 'Filename of the Cerebral Blood Flow map (in ml/100g/min).' - cbf_file = traits.File(exists=True, desc=desc) + cbf_file = File(exists=True, desc=desc) desc = 'Filename of the CBF error map.' - error_file = traits.File(exists=True, desc=desc) + error_file = File(exists=True, desc=desc) desc = 'Filename of the synthetic ASL data.' - syn_file = traits.File(exists=True, desc=desc) + syn_file = File(exists=True, desc=desc) class FitAsl(NiftyFitCommand): diff --git a/nipype/interfaces/niftyfit/dwi.py b/nipype/interfaces/niftyfit/dwi.py index 23b73aea90..900a558fa1 100644 --- a/nipype/interfaces/niftyfit/dwi.py +++ b/nipype/interfaces/niftyfit/dwi.py @@ -4,7 +4,7 @@ The dwi module of niftyfit, which wraps the fitting methods in NiftyFit. """ -from ..base import TraitedSpec, traits, isdefined, CommandLineInputSpec +from ..base import File, TraitedSpec, traits, isdefined, CommandLineInputSpec from .base import NiftyFitCommand from ..niftyreg.base import get_custom_path @@ -12,61 +12,61 @@ class FitDwiInputSpec(CommandLineInputSpec): """ Input Spec for FitDwi. """ # Inputs options - source_file = traits.File( + source_file = File( position=1, exists=True, argstr='-source %s', mandatory=True, desc='The source image containing the dwi data.') desc = 'The file containing the bvalues of the source DWI.' - bval_file = traits.File( + bval_file = File( position=2, exists=True, argstr='-bval %s', mandatory=True, desc=desc) desc = 'The file containing the bvectors of the source DWI.' - bvec_file = traits.File( + bvec_file = File( position=3, exists=True, argstr='-bvec %s', mandatory=True, desc=desc) - te_file = traits.File( + te_file = File( exists=True, argstr='-TE %s', desc='Filename of TEs (ms).', xor=['te_file']) - te_value = traits.File( + te_value = File( exists=True, argstr='-TE %s', desc='Value of TEs (ms).', xor=['te_file']) - mask_file = traits.File( + mask_file = File( exists=True, desc='The image mask', argstr='-mask %s') desc = 'Filename of parameter priors for -ball and -nod.' - prior_file = traits.File(exists=True, argstr='-prior %s', desc=desc) + prior_file = File(exists=True, argstr='-prior %s', desc=desc) desc = 'Rotate the output tensors according to the q/s form of the image \ (resulting tensors will be in mm coordinates, default: 0).' rot_sform_flag = traits.Int(desc=desc, argstr='-rotsform %d') # generic output options: - error_file = traits.File( + error_file = File( name_source=['source_file'], name_template='%s_error.nii.gz', desc='Filename of parameter error maps.', argstr='-error %s') - res_file = traits.File( + res_file = File( name_source=['source_file'], name_template='%s_resmap.nii.gz', desc='Filename of model residual map.', argstr='-res %s') - syn_file = traits.File( + syn_file = File( name_source=['source_file'], name_template='%s_syn.nii.gz', desc='Filename of synthetic image.', argstr='-syn %s') - nodiff_file = traits.File( + nodiff_file = File( name_source=['source_file'], name_template='%s_no_diff.nii.gz', desc='Filename of average no diffusion image.', argstr='-nodiff %s') # Output options, with templated output names based on the source image - mcmap_file = traits.File( + mcmap_file = File( name_source=['source_file'], name_template='%s_mcmap.nii.gz', desc='Filename of multi-compartment model parameter map ' @@ -75,22 +75,22 @@ class FitDwiInputSpec(CommandLineInputSpec): requires=['nodv_flag']) # Model Specific Output options: - mdmap_file = traits.File( + mdmap_file = File( name_source=['source_file'], name_template='%s_mdmap.nii.gz', desc='Filename of MD map/ADC', argstr='-mdmap %s') - famap_file = traits.File( + famap_file = File( name_source=['source_file'], name_template='%s_famap.nii.gz', desc='Filename of FA map', argstr='-famap %s') - v1map_file = traits.File( + v1map_file = File( name_source=['source_file'], name_template='%s_v1map.nii.gz', desc='Filename of PDD map [x,y,z]', argstr='-v1map %s') - rgbmap_file = traits.File( + rgbmap_file = File( name_source=['source_file'], name_template='%s_rgbmap.nii.gz', desc='Filename of colour-coded FA map', @@ -103,13 +103,13 @@ class FitDwiInputSpec(CommandLineInputSpec): ten_type = traits.Enum( 'lower-tri', 'diag-off-diag', desc=desc, usedefault=True) - tenmap_file = traits.File( + tenmap_file = File( name_source=['source_file'], name_template='%s_tenmap.nii.gz', desc='Filename of tensor map [diag,offdiag].', argstr='-tenmap %s', requires=['dti_flag']) - tenmap2_file = traits.File( + tenmap2_file = File( name_source=['source_file'], name_template='%s_tenmap2.nii.gz', desc='Filename of tensor map [lower tri]', @@ -195,7 +195,7 @@ class FitDwiInputSpec(CommandLineInputSpec): identity covariance...).' vb_flag = traits.Bool(desc=desc, argstr='-vb') - cov_file = traits.File( + cov_file = File( exists=True, desc='Filename of ithe nc*nc covariance matrix [I]', argstr='-cov %s') @@ -221,7 +221,7 @@ class FitDwiInputSpec(CommandLineInputSpec): perf_thr = traits.Float(desc=desc, argstr='-perfthreshold %f') # MCMC options: - mcout = traits.File( + mcout = File( name_source=['source_file'], name_template='%s_mcout.txt', desc='Filename of mc samples (ascii text file)', @@ -238,20 +238,20 @@ class FitDwiInputSpec(CommandLineInputSpec): class FitDwiOutputSpec(TraitedSpec): """ Output Spec for FitDwi. """ - error_file = traits.File(desc='Filename of parameter error maps') - res_file = traits.File(desc='Filename of model residual map') - syn_file = traits.File(desc='Filename of synthetic image') - nodiff_file = traits.File(desc='Filename of average no diffusion image.') - mdmap_file = traits.File(desc='Filename of MD map/ADC') - famap_file = traits.File(desc='Filename of FA map') - v1map_file = traits.File(desc='Filename of PDD map [x,y,z]') - rgbmap_file = traits.File(desc='Filename of colour FA map') - tenmap_file = traits.File(desc='Filename of tensor map') - tenmap2_file = traits.File(desc='Filename of tensor map [lower tri]') - - mcmap_file = traits.File(desc='Filename of multi-compartment model ' + error_file = File(desc='Filename of parameter error maps') + res_file = File(desc='Filename of model residual map') + syn_file = File(desc='Filename of synthetic image') + nodiff_file = File(desc='Filename of average no diffusion image.') + mdmap_file = File(desc='Filename of MD map/ADC') + famap_file = File(desc='Filename of FA map') + v1map_file = File(desc='Filename of PDD map [x,y,z]') + rgbmap_file = File(desc='Filename of colour FA map') + tenmap_file = File(desc='Filename of tensor map') + tenmap2_file = File(desc='Filename of tensor map [lower tri]') + + mcmap_file = File(desc='Filename of multi-compartment model ' 'parameter map (-ivim,-ball,-nod).') - mcout = traits.File(desc='Filename of mc samples (ascii text file)') + mcout = File(desc='Filename of mc samples (ascii text file)') class FitDwi(NiftyFitCommand): @@ -297,63 +297,63 @@ def _format_arg(self, name, trait_spec, value): class DwiToolInputSpec(CommandLineInputSpec): """ Input Spec for DwiTool. """ desc = 'The source image containing the fitted model.' - source_file = traits.File( + source_file = File( position=1, exists=True, desc=desc, argstr='-source %s', mandatory=True) desc = 'The file containing the bvalues of the source DWI.' - bval_file = traits.File( + bval_file = File( position=2, exists=True, desc=desc, argstr='-bval %s', mandatory=True) desc = 'The file containing the bvectors of the source DWI.' - bvec_file = traits.File( + bvec_file = File( position=3, exists=True, desc=desc, argstr='-bvec %s') - b0_file = traits.File( + b0_file = File( position=4, exists=True, desc='The B0 image corresponding to the source DWI', argstr='-b0 %s') - mask_file = traits.File( + mask_file = File( position=5, exists=True, desc='The image mask', argstr='-mask %s') # Output options, with templated output names based on the source image desc = 'Filename of multi-compartment model parameter map \ (-ivim,-ball,-nod)' - mcmap_file = traits.File( + mcmap_file = File( name_source=['source_file'], name_template='%s_mcmap.nii.gz', desc=desc, argstr='-mcmap %s') desc = 'Filename of synthetic image. Requires: bvec_file/b0_file.' - syn_file = traits.File( + syn_file = File( name_source=['source_file'], name_template='%s_syn.nii.gz', desc=desc, argstr='-syn %s', requires=['bvec_file', 'b0_file']) - mdmap_file = traits.File( + mdmap_file = File( name_source=['source_file'], name_template='%s_mdmap.nii.gz', desc='Filename of MD map/ADC', argstr='-mdmap %s') - famap_file = traits.File( + famap_file = File( name_source=['source_file'], name_template='%s_famap.nii.gz', desc='Filename of FA map', argstr='-famap %s') - v1map_file = traits.File( + v1map_file = File( name_source=['source_file'], name_template='%s_v1map.nii.gz', desc='Filename of PDD map [x,y,z]', argstr='-v1map %s') - rgbmap_file = traits.File( + rgbmap_file = File( name_source=['source_file'], name_template='%s_rgbmap.nii.gz', desc='Filename of colour FA map.', argstr='-rgbmap %s') - logdti_file = traits.File( + logdti_file = File( name_source=['source_file'], name_template='%s_logdti2.nii.gz', desc='Filename of output logdti map.', @@ -442,13 +442,13 @@ class DwiToolOutputSpec(TraitedSpec): desc = 'Filename of multi-compartment model parameter map \ (-ivim,-ball,-nod)' - mcmap_file = traits.File(desc=desc) - syn_file = traits.File(desc='Filename of synthetic image') - mdmap_file = traits.File(desc='Filename of MD map/ADC') - famap_file = traits.File(desc='Filename of FA map') - v1map_file = traits.File(desc='Filename of PDD map [x,y,z]') - rgbmap_file = traits.File(desc='Filename of colour FA map') - logdti_file = traits.File(desc='Filename of output logdti map') + mcmap_file = File(desc=desc) + syn_file = File(desc='Filename of synthetic image') + mdmap_file = File(desc='Filename of MD map/ADC') + famap_file = File(desc='Filename of FA map') + v1map_file = File(desc='Filename of PDD map [x,y,z]') + rgbmap_file = File(desc='Filename of colour FA map') + logdti_file = File(desc='Filename of output logdti map') class DwiTool(NiftyFitCommand): diff --git a/nipype/interfaces/niftyfit/qt1.py b/nipype/interfaces/niftyfit/qt1.py index ceefbae281..9df8034526 100644 --- a/nipype/interfaces/niftyfit/qt1.py +++ b/nipype/interfaces/niftyfit/qt1.py @@ -109,11 +109,11 @@ class FitQt1InputSpec(CommandLineInputSpec): desc='Inversion times for T1 data [1s,2s,5s].', argstr='-TIs %s', sep=' ') - tis_list = traits.File( + tis_list = File( exists=True, argstr='-TIlist %s', desc='Filename of list of pre-defined TIs.') - t1_list = traits.File( + t1_list = File( exists=True, argstr='-T1list %s', desc='Filename of list of pre-defined T1s') @@ -127,12 +127,12 @@ class FitQt1InputSpec(CommandLineInputSpec): flips = traits.List( traits.Float, desc='Flip angles', argstr='-flips %s', sep=' ') desc = 'Filename of list of pre-defined flip angles (deg).' - flips_list = traits.File(exists=True, argstr='-fliplist %s', desc=desc) + flips_list = File(exists=True, argstr='-fliplist %s', desc=desc) desc = 'Filename of B1 estimate for fitting (or include in prior).' - b1map = traits.File(exists=True, argstr='-b1map %s', desc=desc) + b1map = File(exists=True, argstr='-b1map %s', desc=desc) # MCMC options: - mcout = traits.File( + mcout = File( exists=True, desc='Filename of mc samples (ascii text file)', argstr='-mcout %s') diff --git a/nipype/interfaces/nipy/model.py b/nipype/interfaces/nipy/model.py index 0c2ddf4334..c26168e7b3 100644 --- a/nipype/interfaces/nipy/model.py +++ b/nipype/interfaces/nipy/model.py @@ -51,7 +51,7 @@ class FitGLMInputSpec(BaseInterfaceInputSpec): "is more time consuming but it supports " "autoregressive model"), usedefault=True) - mask = traits.File( + mask = File( exists=True, desc=("restrict the fitting only to the region defined " "by this mask")) @@ -72,7 +72,7 @@ class FitGLMOutputSpec(TraitedSpec): constants = traits.Any() axis = traits.Any() reg_names = traits.List() - residuals = traits.File() + residuals = File() a = File(exists=True) @@ -264,7 +264,7 @@ class EstimateContrastInputSpec(BaseInterfaceInputSpec): constants = traits.Any(mandatory=True) axis = traits.Any(mandatory=True) reg_names = traits.List(mandatory=True) - mask = traits.File(exists=True) + mask = File(exists=True) class EstimateContrastOutputSpec(TraitedSpec): diff --git a/nipype/interfaces/utility/base.py b/nipype/interfaces/utility/base.py index 864951f36a..d01a0a17b9 100644 --- a/nipype/interfaces/utility/base.py +++ b/nipype/interfaces/utility/base.py @@ -216,7 +216,7 @@ class RenameInputSpec(DynamicTraitedSpec): class RenameOutputSpec(TraitedSpec): - out_file = traits.File( + out_file = File( exists=True, desc="softlink to original file with new name") diff --git a/nipype/interfaces/utility/tests/test_wrappers.py b/nipype/interfaces/utility/tests/test_wrappers.py index 392ae094b0..380d840cee 100644 --- a/nipype/interfaces/utility/tests/test_wrappers.py +++ b/nipype/interfaces/utility/tests/test_wrappers.py @@ -51,7 +51,7 @@ def increment_array(in_array): def make_random_array(size): - return np.random.randn(size, size) + return np.random.randn(size, size) # noqa def should_fail(tmp): diff --git a/nipype/pipeline/engine/tests/test_base.py b/nipype/pipeline/engine/tests/test_base.py index fd87aa6878..3513152c06 100644 --- a/nipype/pipeline/engine/tests/test_base.py +++ b/nipype/pipeline/engine/tests/test_base.py @@ -13,7 +13,7 @@ class InputSpec(nib.TraitedSpec): input1 = nib.traits.Int(desc='a random int') input2 = nib.traits.Int(desc='a random int') - input_file = nib.traits.File(desc='Random File') + input_file = nib.File(desc='Random File') class OutputSpec(nib.TraitedSpec): From ad10517c362da6d8aa5b4c074a96df84a53dc722 Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 19 Jul 2019 16:51:46 -0700 Subject: [PATCH 0282/1665] fix: potential fix for #2968 in the context of resolving/rebasing paths --- nipype/interfaces/base/traits_extension.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index 64bf45755f..76f62e27d1 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -539,10 +539,10 @@ def _recurse_on_path_traits(func, thistrait, value, cwd): elif thistrait.is_trait_type(traits.List): innertrait, = thistrait.inner_traits if not isinstance(value, (list, tuple)): - value = _recurse_on_path_traits(func, innertrait, value, cwd) - else: - value = [_recurse_on_path_traits(func, innertrait, v, cwd) - for v in value] + value = [value] + + value = [_recurse_on_path_traits(func, innertrait, v, cwd) + for v in value] elif thistrait.is_trait_type(traits.Dict): _, innertrait = thistrait.inner_traits value = {k: _recurse_on_path_traits(func, innertrait, v, cwd) From 83380fbc927fc317af5f700d6e95d308e770c51c Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 30 Jul 2019 21:47:14 -0700 Subject: [PATCH 0283/1665] fix(Either): better implementation that allows ``make specs`` --- nipype/interfaces/base/traits_extension.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index 76f62e27d1..69a66a3470 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -489,14 +489,19 @@ class Either(TraitType): def __init__(self, *traits, **metadata): """Create a trait whose value can be any of of a specified list of traits.""" - metadata['alternatives'] = tuple(trait_from(t) for t in traits) + _either_traits = tuple(trait_from(t) for t in traits) self.trait_maker = _TraitMaker( metadata.pop("default", None), *traits, **metadata) + self.either_traits = _either_traits def as_ctrait(self): """Return a CTrait corresponding to the trait defined by this class.""" return self.trait_maker.as_ctrait() + def inner_traits(self): + """Return the *inner trait* (or traits) for this trait.""" + return self.either_traits + traits.Tuple = Tuple traits.Either = Either @@ -550,14 +555,14 @@ def _recurse_on_path_traits(func, thistrait, value, cwd): elif thistrait.is_trait_type(Tuple): value = tuple([_recurse_on_path_traits(func, subtrait, v, cwd) for subtrait, v in zip(thistrait.inner_traits, value)]) - elif thistrait.alternatives: + elif thistrait.is_trait_type(Either): is_str = [f.is_trait_type((traits.String, traits.BaseStr, traits.BaseBytes, Str)) - for f in thistrait.alternatives] + for f in thistrait.inner_traits] if any(is_str) and isinstance(value, (bytes, str)) and not value.startswith('/'): return value - is_basepath = [f.is_trait_type(BasePath) for f in thistrait.alternatives] + is_basepath = [f.is_trait_type(BasePath) for f in thistrait.inner_traits] if any(is_basepath): - subtrait = thistrait.alternatives[is_basepath.index(True)] + subtrait = thistrait.inner_traits[is_basepath.index(True)] value = _recurse_on_path_traits(func, subtrait, value, cwd) return value From cf0403be7f0f60bf4119090df7d1007617b24e7e Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 30 Jul 2019 23:37:41 -0700 Subject: [PATCH 0284/1665] fix(traits): avoid redefining ``Either`` and ``Tuple`` and new test-cases --- .../base/tests/test_traits_extension.py | 29 +++++++++-- nipype/interfaces/base/traits_extension.py | 52 +++++-------------- 2 files changed, 38 insertions(+), 43 deletions(-) diff --git a/nipype/interfaces/base/tests/test_traits_extension.py b/nipype/interfaces/base/tests/test_traits_extension.py index 832f8f9592..0a4a23905b 100644 --- a/nipype/interfaces/base/tests/test_traits_extension.py +++ b/nipype/interfaces/base/tests/test_traits_extension.py @@ -13,10 +13,13 @@ class _test_spec(nib.TraitedSpec): c = nib.traits.List(nib.File()) d = nib.Either(nib.File(), nib.traits.Float()) e = nib.OutputMultiObject(nib.File()) + ee = nib.OutputMultiObject(nib.Str) f = nib.traits.Dict(nib.Str, nib.File()) g = nib.Either(nib.File, nib.Str) h = nib.Str - ee = nib.OutputMultiObject(nib.Str) + i = nib.Either(nib.File, nib.Tuple(nib.File, nib.traits.Int)) + j = nib.Either(nib.File, nib.Tuple(nib.File, nib.traits.Int), + nib.traits.Dict(nib.Str, nib.File())) def test_rebase_path_traits(): @@ -58,6 +61,11 @@ def test_rebase_path_traits(): '/some/path') assert e == [[Path('f1.txt'), Path('f2.txt')], [[Path('f3.txt')]]] + ee = rebase_path_traits( + spec.trait('ee'), [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]], + '/some/path') + assert ee == [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]] + f = rebase_path_traits( spec.trait('f'), {'1': '/some/path/f1.txt'}, '/some/path') assert f == {'1': Path('f1.txt')} @@ -79,10 +87,21 @@ def test_rebase_path_traits(): h = rebase_path_traits(spec.trait('h'), '2', '/some/path') assert h == '2' - ee = rebase_path_traits( - spec.trait('ee'), [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]], - '/some/path') - assert ee == [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]] + i = rebase_path_traits(spec.trait('i'), '/some/path/either/file.txt', '/some/path') + assert '%s' % i == 'either/file.txt' + + i = rebase_path_traits(spec.trait('i'), ('/some/path/either/tuple/file.txt', 2), '/some/path') + assert ('%s' % i[0], i[1]) == ('either/tuple/file.txt', 2) + + j = rebase_path_traits(spec.trait('j'), '/some/path/either/file.txt', '/some/path') + assert '%s' % j == 'either/file.txt' + + j = rebase_path_traits(spec.trait('j'), ('/some/path/either/tuple/file.txt', 2), '/some/path') + assert ('%s' % j[0], j[1]) == ('either/tuple/file.txt', 2) + + j = rebase_path_traits(spec.trait('j'), {'a': '/some/path/either/dict/file.txt'}, + '/some/path') + assert j == {'a': Path('either/dict/file.txt')} def test_resolve_path_traits(): diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index 69a66a3470..f6999480f5 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -471,36 +471,12 @@ class InputMultiObject(MultiObject): OutputMultiPath = OutputMultiObject -class Tuple(traits.BaseTuple): - """Defines a new type of Tuple trait that reports inner types.""" - - def init_fast_validator(self, *args): - """Set up the C-level fast validator.""" - super(Tuple, self).init_fast_validator(*args) - self.fast_validate = args - - def inner_traits(self): - """Return the *inner trait* (or traits) for this trait.""" - return self.types - - -class Either(TraitType): - """Defines a trait whose value can be any of of a specified list of traits.""" - - def __init__(self, *traits, **metadata): - """Create a trait whose value can be any of of a specified list of traits.""" - _either_traits = tuple(trait_from(t) for t in traits) - self.trait_maker = _TraitMaker( - metadata.pop("default", None), *traits, **metadata) - self.either_traits = _either_traits +class Tuple(traits.Tuple): + pass - def as_ctrait(self): - """Return a CTrait corresponding to the trait defined by this class.""" - return self.trait_maker.as_ctrait() - def inner_traits(self): - """Return the *inner trait* (or traits) for this trait.""" - return self.either_traits +class Either(traits.Either): + pass traits.Tuple = Tuple @@ -548,22 +524,22 @@ def _recurse_on_path_traits(func, thistrait, value, cwd): value = [_recurse_on_path_traits(func, innertrait, v, cwd) for v in value] - elif thistrait.is_trait_type(traits.Dict): + elif isinstance(value, dict) and thistrait.is_trait_type(traits.Dict): _, innertrait = thistrait.inner_traits value = {k: _recurse_on_path_traits(func, innertrait, v, cwd) for k, v in value.items()} - elif thistrait.is_trait_type(Tuple): + elif isinstance(value, tuple) and thistrait.is_trait_type(Tuple): value = tuple([_recurse_on_path_traits(func, subtrait, v, cwd) - for subtrait, v in zip(thistrait.inner_traits, value)]) - elif thistrait.is_trait_type(Either): - is_str = [f.is_trait_type((traits.String, traits.BaseStr, traits.BaseBytes, Str)) - for f in thistrait.inner_traits] + for subtrait, v in zip(thistrait.handler.types, value)]) + elif thistrait.is_trait_type(traits.TraitCompound): + is_str = [isinstance(f, (traits.String, traits.BaseStr, traits.BaseBytes, Str)) + for f in thistrait.handler.handlers] if any(is_str) and isinstance(value, (bytes, str)) and not value.startswith('/'): return value - is_basepath = [f.is_trait_type(BasePath) for f in thistrait.inner_traits] - if any(is_basepath): - subtrait = thistrait.inner_traits[is_basepath.index(True)] - value = _recurse_on_path_traits(func, subtrait, value, cwd) + + for subtrait in thistrait.handler.handlers: + value = _recurse_on_path_traits(func, subtrait(), value, cwd) + return value From 676f5666aebadde8b8d307c7b4fc310a16d6bd7e Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 31 Jul 2019 08:26:10 -0700 Subject: [PATCH 0285/1665] fix: final cleanup after rebase --- nipype/interfaces/base/__init__.py | 2 +- .../base/tests/test_traits_extension.py | 12 +++++------ nipype/interfaces/base/traits_extension.py | 20 +------------------ 3 files changed, 8 insertions(+), 26 deletions(-) diff --git a/nipype/interfaces/base/__init__.py b/nipype/interfaces/base/__init__.py index 672b30178b..a846794561 100644 --- a/nipype/interfaces/base/__init__.py +++ b/nipype/interfaces/base/__init__.py @@ -22,7 +22,7 @@ from .traits_extension import ( traits, Undefined, isdefined, has_metadata, File, ImageFile, Directory, - Tuple, Either, Str, DictStrStr, + Str, DictStrStr, OutputMultiObject, InputMultiObject, OutputMultiPath, InputMultiPath) diff --git a/nipype/interfaces/base/tests/test_traits_extension.py b/nipype/interfaces/base/tests/test_traits_extension.py index 0a4a23905b..1c60e8cc85 100644 --- a/nipype/interfaces/base/tests/test_traits_extension.py +++ b/nipype/interfaces/base/tests/test_traits_extension.py @@ -9,17 +9,17 @@ class _test_spec(nib.TraitedSpec): a = nib.File() - b = nib.Tuple(nib.File(), nib.File()) + b = nib.traits.Tuple(nib.File(), nib.File()) c = nib.traits.List(nib.File()) - d = nib.Either(nib.File(), nib.traits.Float()) + d = nib.traits.Either(nib.File(), nib.traits.Float()) e = nib.OutputMultiObject(nib.File()) ee = nib.OutputMultiObject(nib.Str) f = nib.traits.Dict(nib.Str, nib.File()) - g = nib.Either(nib.File, nib.Str) + g = nib.traits.Either(nib.File, nib.Str) h = nib.Str - i = nib.Either(nib.File, nib.Tuple(nib.File, nib.traits.Int)) - j = nib.Either(nib.File, nib.Tuple(nib.File, nib.traits.Int), - nib.traits.Dict(nib.Str, nib.File())) + i = nib.traits.Either(nib.File, nib.traits.Tuple(nib.File, nib.traits.Int)) + j = nib.traits.Either(nib.File, nib.traits.Tuple(nib.File, nib.traits.Int), + nib.traits.Dict(nib.Str, nib.File())) def test_rebase_path_traits(): diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index f6999480f5..2a824c1fa0 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -30,7 +30,6 @@ import traits.api as traits from traits.trait_handlers import TraitType, NoDefaultSpecified from traits.trait_base import _Undefined -from traits.traits import _TraitMaker, trait_from from traits.api import Unicode from future import standard_library @@ -39,11 +38,6 @@ if USING_PATHLIB2: from future.types.newstr import newstr -try: - from pathlib import Path -except ImportError: - from pathlib2 import Path - if traits_version < '3.7.0': raise ImportError('Traits version 3.7.0 or higher must be installed') @@ -471,18 +465,6 @@ class InputMultiObject(MultiObject): OutputMultiPath = OutputMultiObject -class Tuple(traits.Tuple): - pass - - -class Either(traits.Either): - pass - - -traits.Tuple = Tuple -traits.Either = Either - - def _rebase_path(value, cwd): if isinstance(value, list): return [_rebase_path(v, cwd) for v in value] @@ -528,7 +510,7 @@ def _recurse_on_path_traits(func, thistrait, value, cwd): _, innertrait = thistrait.inner_traits value = {k: _recurse_on_path_traits(func, innertrait, v, cwd) for k, v in value.items()} - elif isinstance(value, tuple) and thistrait.is_trait_type(Tuple): + elif isinstance(value, tuple) and thistrait.is_trait_type(traits.Tuple): value = tuple([_recurse_on_path_traits(func, subtrait, v, cwd) for subtrait, v in zip(thistrait.handler.types, value)]) elif thistrait.is_trait_type(traits.TraitCompound): From 17e48d7288676a2044802f404f11db90a795f8de Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 31 Jul 2019 11:52:29 -0700 Subject: [PATCH 0286/1665] fix(tests): combine rebase/resolve tests to check idempotence --- .../base/tests/test_traits_extension.py | 232 +++++++++--------- 1 file changed, 118 insertions(+), 114 deletions(-) diff --git a/nipype/interfaces/base/tests/test_traits_extension.py b/nipype/interfaces/base/tests/test_traits_extension.py index 1c60e8cc85..6f6fb6edc1 100644 --- a/nipype/interfaces/base/tests/test_traits_extension.py +++ b/nipype/interfaces/base/tests/test_traits_extension.py @@ -1,6 +1,6 @@ -# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: +"""Check the resolving/rebasing feature of ``BasePath``s.""" from __future__ import print_function, unicode_literals from ... import base as nib @@ -22,157 +22,161 @@ class _test_spec(nib.TraitedSpec): nib.traits.Dict(nib.Str, nib.File())) -def test_rebase_path_traits(): - """Check rebase_path_traits.""" +def test_rebase_resolve_path_traits(): + """Check rebase_path_traits and resolve_path_traits and idempotence.""" spec = _test_spec() - a = rebase_path_traits( - spec.trait('a'), '/some/path/f1.txt', '/some/path') + v = '/some/path/f1.txt' + a = rebase_path_traits(spec.trait('a'), v, '/some/path') assert a == Path('f1.txt') - a = rebase_path_traits( - spec.trait('a'), '/some/path/f1.txt', '/some/other/path') - assert a == Path('/some/path/f1.txt') + a = resolve_path_traits(spec.trait('a'), a, '/some/path') + assert a == Path(v) - b = rebase_path_traits( - spec.trait('b'), ('/some/path/f1.txt', '/some/path/f2.txt'), '/some/path') + a = rebase_path_traits(spec.trait('a'), v, '/some/other/path') + assert a == Path(v) + + a = resolve_path_traits(spec.trait('a'), a, '/some/path') + assert a == Path(v) + + v = ('/some/path/f1.txt', '/some/path/f2.txt') + b = rebase_path_traits(spec.trait('b'), v, '/some/path') assert b == (Path('f1.txt'), Path('f2.txt')) - c = rebase_path_traits( - spec.trait('c'), ['/some/path/f1.txt', '/some/path/f2.txt', '/some/path/f3.txt'], - '/some/path') + b = resolve_path_traits(spec.trait('b'), b, '/some/path') + assert b == (Path(v[0]), Path(v[1])) + + v = ['/some/path/f1.txt', '/some/path/f2.txt', '/some/path/f3.txt'] + c = rebase_path_traits(spec.trait('c'), v, '/some/path') assert c == [Path('f1.txt'), Path('f2.txt'), Path('f3.txt')] - d = rebase_path_traits( - spec.trait('d'), 2.0, '/some/path') - assert d == 2.0 + c = resolve_path_traits(spec.trait('c'), c, '/some/path') + assert c == [Path(vp) for vp in v] - d = rebase_path_traits( - spec.trait('d'), '/some/path/either.txt', '/some/path') - assert '%s' % d == 'either.txt' + v = 2.0 + d = rebase_path_traits(spec.trait('d'), v, '/some/path') + assert d == v - e = rebase_path_traits( - spec.trait('e'), ['/some/path/f1.txt', '/some/path/f2.txt', '/some/path/f3.txt'], - '/some/path') - assert e == [Path('f1.txt'), Path('f2.txt'), Path('f3.txt')] + d = resolve_path_traits(spec.trait('d'), d, '/some/path') + assert d == v - e = rebase_path_traits( - spec.trait('e'), [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]], - '/some/path') - assert e == [[Path('f1.txt'), Path('f2.txt')], [[Path('f3.txt')]]] + v = '/some/path/either.txt' + d = rebase_path_traits(spec.trait('d'), v, '/some/path') + assert d == Path('either.txt') - ee = rebase_path_traits( - spec.trait('ee'), [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]], - '/some/path') - assert ee == [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]] + d = resolve_path_traits(spec.trait('d'), d, '/some/path') + assert d == Path(v) - f = rebase_path_traits( - spec.trait('f'), {'1': '/some/path/f1.txt'}, '/some/path') - assert f == {'1': Path('f1.txt')} + v = ['/some/path/f1.txt', '/some/path/f2.txt', '/some/path/f3.txt'] + e = rebase_path_traits(spec.trait('e'), v, '/some/path') + assert e == [Path('f1.txt'), Path('f2.txt'), Path('f3.txt')] - g = rebase_path_traits( - spec.trait('g'), 'some/path/either.txt', '/some/path') - assert '%s' % g == 'some/path/either.txt' + e = resolve_path_traits(spec.trait('e'), e, '/some/path') + assert e == [Path(vp) for vp in v] - g = rebase_path_traits( - spec.trait('g'), '/some/path/either.txt', '/some') - assert '%s' % g == 'path/either.txt' + v = [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]] + e = rebase_path_traits(spec.trait('e'), v, '/some/path') + assert e == [[Path('f1.txt'), Path('f2.txt')], [[Path('f3.txt')]]] - g = rebase_path_traits(spec.trait('g'), 'string', '/some') - assert '%s' % g == 'string' + e = resolve_path_traits(spec.trait('e'), e, '/some/path') + assert e == [[[Path(vpp) for vpp in vp] if isinstance(vp, list) else Path(vp) for vp in inner] + for inner in v] - g = rebase_path_traits(spec.trait('g'), '2', '/some/path') - assert g == '2' # You dont want this one to be a Path + # These are Str - no rebasing/resolving should happen + v = [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]] + ee = rebase_path_traits(spec.trait('ee'), v, '/some/path') + assert ee == v - h = rebase_path_traits(spec.trait('h'), '2', '/some/path') - assert h == '2' + ee = resolve_path_traits(spec.trait('ee'), [['f1.txt', 'f2.txt'], [['f3.txt']]], '/some/path') + assert ee == [['f1.txt', 'f2.txt'], [['f3.txt']]] - i = rebase_path_traits(spec.trait('i'), '/some/path/either/file.txt', '/some/path') - assert '%s' % i == 'either/file.txt' + v = {'1': '/some/path/f1.txt'} + f = rebase_path_traits(spec.trait('f'), v, '/some') + assert f == {'1': Path('path/f1.txt')} - i = rebase_path_traits(spec.trait('i'), ('/some/path/either/tuple/file.txt', 2), '/some/path') - assert ('%s' % i[0], i[1]) == ('either/tuple/file.txt', 2) + f = resolve_path_traits(spec.trait('f'), f, '/some') + assert f == {k: Path(val) for k, val in v.items()} - j = rebase_path_traits(spec.trait('j'), '/some/path/either/file.txt', '/some/path') - assert '%s' % j == 'either/file.txt' + # Either(Str, File): passing in path-like apply manipulation + v = '/some/path/either.txt' + g = rebase_path_traits(spec.trait('g'), v, '/some/path') + assert g == Path('either.txt') - j = rebase_path_traits(spec.trait('j'), ('/some/path/either/tuple/file.txt', 2), '/some/path') - assert ('%s' % j[0], j[1]) == ('either/tuple/file.txt', 2) + g = resolve_path_traits(spec.trait('g'), g, '/some/path') + assert g == Path(v) - j = rebase_path_traits(spec.trait('j'), {'a': '/some/path/either/dict/file.txt'}, - '/some/path') - assert j == {'a': Path('either/dict/file.txt')} + g = rebase_path_traits(spec.trait('g'), v, '/some') + assert g == Path('path/either.txt') + g = resolve_path_traits(spec.trait('g'), g, '/some') + assert g == Path(v) -def test_resolve_path_traits(): - """Check resolve_path_traits.""" - spec = _test_spec() + # Either(Str, File): passing str discards File + v = 'either.txt' + g = rebase_path_traits(spec.trait('g'), v, '/some/path') + assert g == v + + # This is a problematic case, it is impossible to know whether this + # was meant to be a string or a file. + # In this implementation, strings take precedence + g = resolve_path_traits(spec.trait('g'), g, '/some/path') + assert g == v - a = resolve_path_traits( - spec.trait('a'), 'f1.txt', '/some/path') - assert a == Path('/some/path/f1.txt') + v = 'string' + g = rebase_path_traits(spec.trait('g'), v, '/some') + assert g == v - a = resolve_path_traits( - spec.trait('a'), '/already/absolute/f1.txt', '/some/path') - assert a == Path('/already/absolute/f1.txt') + # This is a problematic case, it is impossible to know whether this + # was meant to be a string or a file. + g = resolve_path_traits(spec.trait('g'), v, '/some') + assert g == v - b = resolve_path_traits( - spec.trait('b'), ('f1.txt', 'f2.txt'), '/some/path') - assert b == (Path('/some/path/f1.txt'), Path('/some/path/f2.txt')) + v = v + g = rebase_path_traits(spec.trait('g'), v, '/some/path') + assert g == v # You dont want this one to be a Path - c = resolve_path_traits( - spec.trait('c'), ['f1.txt', 'f2.txt', 'f3.txt'], - '/some/path') - assert c == [Path('/some/path/f1.txt'), Path('/some/path/f2.txt'), Path('/some/path/f3.txt')] + # This is a problematic case, it is impossible to know whether this + # was meant to be a string or a file. + g = resolve_path_traits(spec.trait('g'), g, '/some/path') + assert g == v # You dont want this one to be a Path - d = resolve_path_traits( - spec.trait('d'), 2.0, '/some/path') - assert d == 2.0 + h = rebase_path_traits(spec.trait('h'), v, '/some/path') + assert h == v - d = resolve_path_traits( - spec.trait('d'), 'either.txt', '/some/path') - assert '%s' % d == '/some/path/either.txt' + h = resolve_path_traits(spec.trait('h'), h, '/some/path') + assert h == v - e = resolve_path_traits( - spec.trait('e'), ['f1.txt', 'f2.txt', 'f3.txt'], - '/some/path') - assert e == [Path('/some/path/f1.txt'), Path('/some/path/f2.txt'), Path('/some/path/f3.txt')] + v = '/some/path/either/file.txt' + i = rebase_path_traits(spec.trait('i'), v, '/some/path') + assert i == Path('either/file.txt') - e = resolve_path_traits( - spec.trait('e'), [['f1.txt', 'f2.txt'], [['f3.txt']]], - '/some/path') - assert e == [[Path('/some/path/f1.txt'), Path('/some/path/f2.txt')], - [[Path('/some/path/f3.txt')]]] + i = resolve_path_traits(spec.trait('i'), i, '/some/path') + assert i == Path(v) - f = resolve_path_traits( - spec.trait('f'), {'1': 'path/f1.txt'}, '/some') - assert f == {'1': Path('/some/path/f1.txt')} + v = ('/some/path/either/tuple/file.txt', 2) + i = rebase_path_traits(spec.trait('i'), v, '/some/path') + assert i == (Path('either/tuple/file.txt'), 2) - g = resolve_path_traits( - spec.trait('g'), '/either.txt', '/some/path') - assert g == Path('/either.txt') + i = resolve_path_traits(spec.trait('i'), i, '/some/path') + assert i == (Path(v[0]), v[1]) - # This is a problematic case, it is impossible to know whether this - # was meant to be a string or a file. - # In this implementation, strings take precedence - g = resolve_path_traits( - spec.trait('g'), 'path/either.txt', '/some') - assert g == 'path/either.txt' + v = '/some/path/either/file.txt' + j = rebase_path_traits(spec.trait('j'), v, '/some/path') + assert j == Path('either/file.txt') - # This is a problematic case, it is impossible to know whether this - # was meant to be a string or a file. - g = resolve_path_traits(spec.trait('g'), 'string', '/some') - assert g == 'string' + j = resolve_path_traits(spec.trait('j'), j, '/some/path') + assert j == Path(v) - # This is a problematic case, it is impossible to know whether this - # was meant to be a string or a file. - g = resolve_path_traits(spec.trait('g'), '2', '/some/path') - assert g == '2' # You dont want this one to be a Path + v = ('/some/path/either/tuple/file.txt', 2) + j = rebase_path_traits(spec.trait('j'), ('/some/path/either/tuple/file.txt', 2), '/some/path') + assert j == (Path('either/tuple/file.txt'), 2) - h = resolve_path_traits(spec.trait('h'), '2', '/some/path') - assert h == '2' + j = resolve_path_traits(spec.trait('j'), j, '/some/path') + assert j == (Path(v[0]), v[1]) - ee = resolve_path_traits( - spec.trait('ee'), [['f1.txt', 'f2.txt'], [['f3.txt']]], - '/some/path') - assert ee == [['f1.txt', 'f2.txt'], [['f3.txt']]] + v = {'a': '/some/path/either/dict/file.txt'} + j = rebase_path_traits(spec.trait('j'), v, '/some/path') + assert j == {'a': Path('either/dict/file.txt')} + + j = resolve_path_traits(spec.trait('j'), j, '/some/path') + assert j == {k: Path(val) for k, val in v.items()} From eb24c6b3b32a8d4735ff15778f649aae166721e1 Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 31 Jul 2019 12:43:57 -0700 Subject: [PATCH 0287/1665] tests: add real idempotence tests --- .../base/tests/test_traits_extension.py | 120 ++++++++++++++++++ 1 file changed, 120 insertions(+) diff --git a/nipype/interfaces/base/tests/test_traits_extension.py b/nipype/interfaces/base/tests/test_traits_extension.py index 6f6fb6edc1..0834bc63f7 100644 --- a/nipype/interfaces/base/tests/test_traits_extension.py +++ b/nipype/interfaces/base/tests/test_traits_extension.py @@ -30,29 +30,53 @@ def test_rebase_resolve_path_traits(): a = rebase_path_traits(spec.trait('a'), v, '/some/path') assert a == Path('f1.txt') + # Idempotence + assert rebase_path_traits(spec.trait('a'), a, '/some/path') == a + a = resolve_path_traits(spec.trait('a'), a, '/some/path') assert a == Path(v) + # Idempotence + assert resolve_path_traits(spec.trait('a'), a, '/some/path') == a + a = rebase_path_traits(spec.trait('a'), v, '/some/other/path') assert a == Path(v) + # Idempotence + assert rebase_path_traits(spec.trait('a'), a, '/some/other/path') == a + a = resolve_path_traits(spec.trait('a'), a, '/some/path') assert a == Path(v) + # Idempotence + assert resolve_path_traits(spec.trait('a'), a, '/some/path') == a + v = ('/some/path/f1.txt', '/some/path/f2.txt') b = rebase_path_traits(spec.trait('b'), v, '/some/path') assert b == (Path('f1.txt'), Path('f2.txt')) + # Idempotence + assert rebase_path_traits(spec.trait('b'), b, '/some/path') == b + b = resolve_path_traits(spec.trait('b'), b, '/some/path') assert b == (Path(v[0]), Path(v[1])) + # Idempotence + assert resolve_path_traits(spec.trait('b'), b, '/some/path') == b + v = ['/some/path/f1.txt', '/some/path/f2.txt', '/some/path/f3.txt'] c = rebase_path_traits(spec.trait('c'), v, '/some/path') assert c == [Path('f1.txt'), Path('f2.txt'), Path('f3.txt')] + # Idempotence + assert rebase_path_traits(spec.trait('c'), c, '/some/path') == c + c = resolve_path_traits(spec.trait('c'), c, '/some/path') assert c == [Path(vp) for vp in v] + # Idempotence + assert resolve_path_traits(spec.trait('c'), c, '/some/path') == c + v = 2.0 d = rebase_path_traits(spec.trait('d'), v, '/some/path') assert d == v @@ -64,119 +88,215 @@ def test_rebase_resolve_path_traits(): d = rebase_path_traits(spec.trait('d'), v, '/some/path') assert d == Path('either.txt') + # Idempotence + assert rebase_path_traits(spec.trait('d'), d, '/some/path') == d + d = resolve_path_traits(spec.trait('d'), d, '/some/path') assert d == Path(v) + # Idempotence + assert resolve_path_traits(spec.trait('d'), d, '/some/path') == d + v = ['/some/path/f1.txt', '/some/path/f2.txt', '/some/path/f3.txt'] e = rebase_path_traits(spec.trait('e'), v, '/some/path') assert e == [Path('f1.txt'), Path('f2.txt'), Path('f3.txt')] + # Idempotence + assert rebase_path_traits(spec.trait('e'), e, '/some/path') == e + e = resolve_path_traits(spec.trait('e'), e, '/some/path') assert e == [Path(vp) for vp in v] + # Idempotence + assert resolve_path_traits(spec.trait('e'), e, '/some/path') == e + v = [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]] e = rebase_path_traits(spec.trait('e'), v, '/some/path') assert e == [[Path('f1.txt'), Path('f2.txt')], [[Path('f3.txt')]]] + # Idempotence + assert rebase_path_traits(spec.trait('e'), e, '/some/path') == e + e = resolve_path_traits(spec.trait('e'), e, '/some/path') assert e == [[[Path(vpp) for vpp in vp] if isinstance(vp, list) else Path(vp) for vp in inner] for inner in v] + # Idempotence + assert resolve_path_traits(spec.trait('e'), e, '/some/path') == e + # These are Str - no rebasing/resolving should happen v = [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]] ee = rebase_path_traits(spec.trait('ee'), v, '/some/path') assert ee == v + # Idempotence + assert rebase_path_traits(spec.trait('ee'), ee, '/some/path') == ee + ee = resolve_path_traits(spec.trait('ee'), [['f1.txt', 'f2.txt'], [['f3.txt']]], '/some/path') assert ee == [['f1.txt', 'f2.txt'], [['f3.txt']]] + # Idempotence + assert resolve_path_traits(spec.trait('ee'), ee, '/some/path') == ee + v = {'1': '/some/path/f1.txt'} f = rebase_path_traits(spec.trait('f'), v, '/some') assert f == {'1': Path('path/f1.txt')} + # Idempotence + assert rebase_path_traits(spec.trait('f'), f, '/some') == f + f = resolve_path_traits(spec.trait('f'), f, '/some') assert f == {k: Path(val) for k, val in v.items()} + # Idempotence + assert resolve_path_traits(spec.trait('f'), f, '/some') == f + # Either(Str, File): passing in path-like apply manipulation v = '/some/path/either.txt' g = rebase_path_traits(spec.trait('g'), v, '/some/path') assert g == Path('either.txt') + # Idempotence + assert rebase_path_traits(spec.trait('g'), g, '/some/path') == g + g = resolve_path_traits(spec.trait('g'), g, '/some/path') assert g == Path(v) + # Idempotence + assert resolve_path_traits(spec.trait('g'), g, '/some/path') == g + g = rebase_path_traits(spec.trait('g'), v, '/some') assert g == Path('path/either.txt') + # Idempotence + assert rebase_path_traits(spec.trait('g'), g, '/some/path') == g + g = resolve_path_traits(spec.trait('g'), g, '/some') assert g == Path(v) + # Idempotence + assert resolve_path_traits(spec.trait('g'), g, '/some/path') == g + # Either(Str, File): passing str discards File v = 'either.txt' g = rebase_path_traits(spec.trait('g'), v, '/some/path') assert g == v + # Idempotence + assert rebase_path_traits(spec.trait('g'), g, '/some/path') == g + # This is a problematic case, it is impossible to know whether this # was meant to be a string or a file. # In this implementation, strings take precedence g = resolve_path_traits(spec.trait('g'), g, '/some/path') assert g == v + # Idempotence + assert resolve_path_traits(spec.trait('g'), g, '/some/path') == g + v = 'string' g = rebase_path_traits(spec.trait('g'), v, '/some') assert g == v + # Idempotence + assert rebase_path_traits(spec.trait('g'), g, '/some') == g + # This is a problematic case, it is impossible to know whether this # was meant to be a string or a file. g = resolve_path_traits(spec.trait('g'), v, '/some') assert g == v + # Idempotence + assert resolve_path_traits(spec.trait('g'), g, '/some') == g + v = v g = rebase_path_traits(spec.trait('g'), v, '/some/path') assert g == v # You dont want this one to be a Path + # Idempotence + assert rebase_path_traits(spec.trait('g'), g, '/some/path') == g + # This is a problematic case, it is impossible to know whether this # was meant to be a string or a file. g = resolve_path_traits(spec.trait('g'), g, '/some/path') assert g == v # You dont want this one to be a Path + # Idempotence + assert resolve_path_traits(spec.trait('g'), g, '/some/path') == g + h = rebase_path_traits(spec.trait('h'), v, '/some/path') assert h == v + # Idempotence + assert rebase_path_traits(spec.trait('h'), h, '/some/path') == h + h = resolve_path_traits(spec.trait('h'), h, '/some/path') assert h == v + # Idempotence + assert resolve_path_traits(spec.trait('h'), h, '/some/path') == h + v = '/some/path/either/file.txt' i = rebase_path_traits(spec.trait('i'), v, '/some/path') assert i == Path('either/file.txt') + # Idempotence + assert rebase_path_traits(spec.trait('i'), i, '/some/path') == i + i = resolve_path_traits(spec.trait('i'), i, '/some/path') assert i == Path(v) + # Idempotence + assert resolve_path_traits(spec.trait('i'), i, '/some/path') == i + v = ('/some/path/either/tuple/file.txt', 2) i = rebase_path_traits(spec.trait('i'), v, '/some/path') assert i == (Path('either/tuple/file.txt'), 2) + # Idempotence + assert rebase_path_traits(spec.trait('i'), i, '/some/path') == i + i = resolve_path_traits(spec.trait('i'), i, '/some/path') assert i == (Path(v[0]), v[1]) + # Idempotence + assert resolve_path_traits(spec.trait('i'), i, '/some/path') == i + v = '/some/path/either/file.txt' j = rebase_path_traits(spec.trait('j'), v, '/some/path') assert j == Path('either/file.txt') + # Idempotence + assert rebase_path_traits(spec.trait('j'), j, '/some/path') == j + j = resolve_path_traits(spec.trait('j'), j, '/some/path') assert j == Path(v) + # Idempotence + assert resolve_path_traits(spec.trait('j'), j, '/some/path') == j + v = ('/some/path/either/tuple/file.txt', 2) j = rebase_path_traits(spec.trait('j'), ('/some/path/either/tuple/file.txt', 2), '/some/path') assert j == (Path('either/tuple/file.txt'), 2) + # Idempotence + assert rebase_path_traits(spec.trait('j'), j, '/some/path') == j + j = resolve_path_traits(spec.trait('j'), j, '/some/path') assert j == (Path(v[0]), v[1]) + # Idempotence + assert resolve_path_traits(spec.trait('j'), j, '/some/path') == j + v = {'a': '/some/path/either/dict/file.txt'} j = rebase_path_traits(spec.trait('j'), v, '/some/path') assert j == {'a': Path('either/dict/file.txt')} + # Idempotence + assert rebase_path_traits(spec.trait('j'), j, '/some/path') == j + j = resolve_path_traits(spec.trait('j'), j, '/some/path') assert j == {k: Path(val) for k, val in v.items()} + + # Idempotence + assert resolve_path_traits(spec.trait('j'), j, '/some/path') == j From b0c80d7bd50ad856f0c59a89c967b85fe3ecc781 Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 31 Jul 2019 17:22:44 -0700 Subject: [PATCH 0288/1665] fix: cover one more edge case (``nipype.interfaces.base.DictStrStr``) --- .../interfaces/base/tests/test_traits_extension.py | 12 +++++++++++- nipype/interfaces/base/traits_extension.py | 7 ++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/base/tests/test_traits_extension.py b/nipype/interfaces/base/tests/test_traits_extension.py index 0834bc63f7..8138d691f3 100644 --- a/nipype/interfaces/base/tests/test_traits_extension.py +++ b/nipype/interfaces/base/tests/test_traits_extension.py @@ -20,6 +20,7 @@ class _test_spec(nib.TraitedSpec): i = nib.traits.Either(nib.File, nib.traits.Tuple(nib.File, nib.traits.Int)) j = nib.traits.Either(nib.File, nib.traits.Tuple(nib.File, nib.traits.Int), nib.traits.Dict(nib.Str, nib.File())) + k = nib.DictStrStr def test_rebase_resolve_path_traits(): @@ -209,7 +210,6 @@ def test_rebase_resolve_path_traits(): # Idempotence assert resolve_path_traits(spec.trait('g'), g, '/some') == g - v = v g = rebase_path_traits(spec.trait('g'), v, '/some/path') assert g == v # You dont want this one to be a Path @@ -300,3 +300,13 @@ def test_rebase_resolve_path_traits(): # Idempotence assert resolve_path_traits(spec.trait('j'), j, '/some/path') == j + + v = {'path': '/some/path/f1.txt'} + k = rebase_path_traits(spec.trait('k'), v, '/some/path') + assert k == v + + # Idempotence + assert rebase_path_traits(spec.trait('k'), k, '/some/path') == k + + k = resolve_path_traits(spec.trait('k'), k, '/some/path') + assert k == v diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index 2a824c1fa0..585a385feb 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -520,7 +520,12 @@ def _recurse_on_path_traits(func, thistrait, value, cwd): return value for subtrait in thistrait.handler.handlers: - value = _recurse_on_path_traits(func, subtrait(), value, cwd) + try: + sb_instance = subtrait() + except TypeError: + return value + else: + value = _recurse_on_path_traits(func, sb_instance, value, cwd) return value From a6dab6161d4890c9bb7395f05d99e2bc14ceca66 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Thu, 1 Aug 2019 09:02:44 -0700 Subject: [PATCH 0289/1665] Apply suggestions from code review [skip ci] Co-Authored-By: Chris Markiewicz --- nipype/pipeline/engine/tests/test_join.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/pipeline/engine/tests/test_join.py b/nipype/pipeline/engine/tests/test_join.py index 9565bbd353..20b836b26c 100644 --- a/nipype/pipeline/engine/tests/test_join.py +++ b/nipype/pipeline/engine/tests/test_join.py @@ -142,7 +142,7 @@ def _list_outputs(self): return outputs -@pytest.mark.parametrize('needed_outputs', [True, False]) +@pytest.mark.parametrize('needed_outputs', ['true', 'false']) def test_join_expansion(tmpdir, needed_outputs): global _sums global _sum_operands @@ -154,7 +154,7 @@ def test_join_expansion(tmpdir, needed_outputs): _sum_operands = [] _sums = [] - config.set('execution', 'remove_unnecessary_outputs', ['false', 'true'][needed_outputs]) + config.set('execution', 'remove_unnecessary_outputs', needed_outputs) # Make the workflow. wf = pe.Workflow(name='test') # the iterated input node From 2e9ecc14eb19095d8fab83b299bdf2458ce67f97 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 1 Aug 2019 09:10:19 -0700 Subject: [PATCH 0290/1665] fix: reset initial state after test (addresses @effigies' review comment) --- nipype/pipeline/engine/tests/test_join.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/tests/test_join.py b/nipype/pipeline/engine/tests/test_join.py index 20b836b26c..8553b79dc9 100644 --- a/nipype/pipeline/engine/tests/test_join.py +++ b/nipype/pipeline/engine/tests/test_join.py @@ -154,6 +154,7 @@ def test_join_expansion(tmpdir, needed_outputs): _sum_operands = [] _sums = [] + prev_state = config.get('execution', 'remove_unnecessary_outputs') config.set('execution', 'remove_unnecessary_outputs', needed_outputs) # Make the workflow. wf = pe.Workflow(name='test') @@ -203,7 +204,7 @@ def test_join_expansion(tmpdir, needed_outputs): # there are two iterations of the post-join node in the iterable path assert len(_products) == 2,\ "The number of iterated post-join outputs is incorrect" - + config.set('execution', 'remove_unnecessary_outputs', prev_state) def test_node_joinsource(tmpdir): From c8a37979456b75db0c6e0c1c65bba559a59131cd Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 1 Aug 2019 12:48:02 -0700 Subject: [PATCH 0291/1665] FIX: Correctly pickle ``OuputMulti{Object,Path}`` traits It seems that #2944 has uncovered a rats-nest hidden in the engine. In resolving that issue, I found out that a great deal of boilerplate was set in place when loading/saving results to deal with ``OutputMulti{Object,Path}`` traits. The reason being that these traits flatten single-element-list values. This PR fixes the pickling behavior of traited specs containing these types of traits. Additionally, this PR also avoids the ``modify_paths`` function that was causing problems originally in #2944. Therefore, this PR effectively make results files static, meaning: caching if the ``base_dir`` of the workflow is changed will not work anymore. I plan to re-insert this feature (results file mobility) with #2971. This PR is just to split that one in more digestible bits. All the boilerplate mentioned above has been cleaned up. --- nipype/interfaces/base/specs.py | 28 ++++++++++ nipype/pipeline/engine/utils.py | 91 ++------------------------------- 2 files changed, 31 insertions(+), 88 deletions(-) diff --git a/nipype/interfaces/base/specs.py b/nipype/interfaces/base/specs.py index 5028d5c30a..30d191163f 100644 --- a/nipype/interfaces/base/specs.py +++ b/nipype/interfaces/base/specs.py @@ -30,6 +30,8 @@ Undefined, isdefined, has_metadata, + OutputMultiObject, + OutputMultiPath, ) from ... import config, __version__ @@ -316,6 +318,32 @@ def _get_sorteddict(self, def __all__(self): return self.copyable_trait_names() + def __getstate__(self): + """ + Override __getstate__ so that OutputMultiObjects are correctly pickled. + + >>> class OutputSpec(TraitedSpec): + ... out = OutputMultiObject(traits.List(traits.Int)) + >>> spec = OutputSpec() + >>> spec.out = [[4]] + >>> spec.out + [4] + + >>> spec.__getstate__()['out'] + [[4]] + + >>> spec.__setstate__(spec.__getstate__()) + >>> spec.out + [4] + + """ + state = super(BaseTraitedSpec, self).__getstate__() + for key in self.__all__: + _trait_spec = self.trait(key) + if _trait_spec.is_trait_type((OutputMultiObject, OutputMultiPath)): + state[key] = _trait_spec.handler.get_value(self, key) + return state + def _deepcopypatch(self, memo): """ diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 3d961126d5..b6c0afda64 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -226,87 +226,12 @@ def write_report(node, report_type=None, is_mapnode=False): return -def _identify_collapses(hastraits): - """ Identify traits that will collapse when being set to themselves. - - ``OutputMultiObject``s automatically unwrap a list of length 1 to directly - reference the element of that list. - If that element is itself a list of length 1, then the following will - result in modified values. - - hastraits.trait_set(**hastraits.trait_get()) - - Cloning performs this operation on a copy of the original traited object, - allowing us to identify traits that will be affected. - """ - raw = hastraits.trait_get() - cloned = hastraits.clone_traits().trait_get() - - collapsed = set() - for key in cloned: - orig = raw[key] - new = cloned[key] - # Allow numpy to handle the equality checks, as mixed lists and arrays - # can be problematic. - if isinstance(orig, list) and len(orig) == 1 and ( - not np.array_equal(orig, new) and np.array_equal(orig[0], new)): - collapsed.add(key) - - return collapsed - - -def _uncollapse(indexable, collapsed): - """ Wrap collapsible values in a list to prevent double-collapsing. - - Should be used with _identify_collapses to provide the following - idempotent operation: - - collapsed = _identify_collapses(hastraits) - hastraits.trait_set(**_uncollapse(hastraits.trait_get(), collapsed)) - - NOTE: Modifies object in-place, in addition to returning it. - """ - - for key in indexable: - if key in collapsed: - indexable[key] = [indexable[key]] - return indexable - - -def _protect_collapses(hastraits): - """ A collapse-protected replacement for hastraits.trait_get() - - May be used as follows to provide an idempotent trait_set: - - hastraits.trait_set(**_protect_collapses(hastraits)) - """ - collapsed = _identify_collapses(hastraits) - return _uncollapse(hastraits.trait_get(), collapsed) - - def save_resultfile(result, cwd, name): """Save a result pklz file to ``cwd``""" resultsfile = os.path.join(cwd, 'result_%s.pklz' % name) - if result.outputs: - try: - collapsed = _identify_collapses(result.outputs) - outputs = _uncollapse(result.outputs.trait_get(), collapsed) - # Double-protect tosave so that the original, uncollapsed trait - # is saved in the pickle file. Thus, when the loading process - # collapses, the original correct value is loaded. - tosave = _uncollapse(outputs.copy(), collapsed) - except AttributeError: - tosave = outputs = result.outputs.dictcopy() # outputs was a bunch - for k, v in list(modify_paths(tosave, relative=True, basedir=cwd).items()): - setattr(result.outputs, k, v) - savepkl(resultsfile, result) logger.debug('saved results in %s', resultsfile) - if result.outputs: - for k, v in list(outputs.items()): - setattr(result.outputs, k, v) - def load_resultfile(path, name): """ @@ -349,20 +274,10 @@ def load_resultfile(path, name): logger.debug( 'some file does not exist. hence trait cannot be set') else: - if result.outputs: - try: - outputs = _protect_collapses(result.outputs) - except AttributeError: - outputs = result.outputs.dictcopy() # outputs == Bunch - try: - for k, v in list(modify_paths(outputs, relative=False, - basedir=path).items()): - setattr(result.outputs, k, v) - except FileNotFoundError: - logger.debug('conversion to full path results in ' - 'non existent file') aggregate = False - pkl_file.close() + finally: + pkl_file.close() + logger.debug('Aggregate: %s', aggregate) return result, aggregate, attribute_error From 9c374e4bebddc142709c682f71eceaab1b4fc92a Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Thu, 1 Aug 2019 13:32:34 -0700 Subject: [PATCH 0292/1665] Update nipype/interfaces/base/specs.py [skip ci] Co-Authored-By: Chris Markiewicz --- nipype/interfaces/base/specs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/base/specs.py b/nipype/interfaces/base/specs.py index 30d191163f..bbafda607b 100644 --- a/nipype/interfaces/base/specs.py +++ b/nipype/interfaces/base/specs.py @@ -340,7 +340,7 @@ def __getstate__(self): state = super(BaseTraitedSpec, self).__getstate__() for key in self.__all__: _trait_spec = self.trait(key) - if _trait_spec.is_trait_type((OutputMultiObject, OutputMultiPath)): + if _trait_spec.is_trait_type(OutputMultiObject): state[key] = _trait_spec.handler.get_value(self, key) return state From 0cd60b6d66a572a7a3e7b065c99234e4f7750211 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Thu, 1 Aug 2019 14:06:39 -0700 Subject: [PATCH 0293/1665] Update nipype/interfaces/base/specs.py [skip ci] Co-Authored-By: Chris Markiewicz --- nipype/interfaces/base/specs.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/base/specs.py b/nipype/interfaces/base/specs.py index bbafda607b..c99620e9d8 100644 --- a/nipype/interfaces/base/specs.py +++ b/nipype/interfaces/base/specs.py @@ -31,7 +31,6 @@ isdefined, has_metadata, OutputMultiObject, - OutputMultiPath, ) from ... import config, __version__ From 7d92126960c60c408348611359ab8c776cca7333 Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 2 Aug 2019 09:56:08 -0700 Subject: [PATCH 0294/1665] FIX: Use ``load_resultfile`` when loading a results pickle Some sections of the code were using ``loadpkl`` which does not resolve paths. This PR also improves ``loadpkl`` for readability and reliability. --- nipype/pipeline/engine/nodes.py | 8 +++-- nipype/pipeline/engine/utils.py | 20 +++++------ nipype/pipeline/plugins/base.py | 7 ++-- nipype/utils/filemanip.py | 62 ++++++++++++++++----------------- 4 files changed, 46 insertions(+), 51 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 006558b296..6314411a07 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -194,7 +194,8 @@ def interface(self): @property def result(self): """Get result from result file (do not hold it in memory)""" - return _load_resultfile(self.output_dir(), self.name)[0] + return _load_resultfile( + op.join(self.output_dir(), 'result_%s.pklz' % self.name))[0] @property def inputs(self): @@ -517,7 +518,7 @@ def _get_inputs(self): logger.debug('input: %s', key) results_file = info[0] logger.debug('results file: %s', results_file) - outputs = loadpkl(results_file).outputs + outputs = _load_resultfile(results_file)[0].outputs if outputs is None: raise RuntimeError("""\ Error populating the input "%s" of node "%s": the results file of the source node \ @@ -564,7 +565,8 @@ def _run_interface(self, execute=True, updatehash=False): def _load_results(self): cwd = self.output_dir() - result, aggregate, attribute_error = _load_resultfile(cwd, self.name) + result, aggregate, attribute_error = _load_resultfile( + op.join(cwd, 'result_%s.pklz' % self.name)) # try aggregating first if aggregate: logger.debug('aggregating results') diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index b6c0afda64..9821f62ffc 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -16,7 +16,6 @@ from traceback import format_exception from hashlib import sha1 -import gzip from functools import reduce @@ -25,6 +24,7 @@ from ... import logging, config, LooseVersion from ...utils.filemanip import ( + Path, relpath, makedirs, fname_presuffix, @@ -34,6 +34,7 @@ FileNotFoundError, save_json, savepkl, + loadpkl, write_rst_header, write_rst_dict, write_rst_list, @@ -233,7 +234,7 @@ def save_resultfile(result, cwd, name): logger.debug('saved results in %s', resultsfile) -def load_resultfile(path, name): +def load_resultfile(results_file): """ Load InterfaceResult file from path @@ -253,17 +254,14 @@ def load_resultfile(path, name): rerun """ aggregate = True - resultsoutputfile = os.path.join(path, 'result_%s.pklz' % name) + results_file = Path(results_file) + path = '%s' % results_file.parents + result = None attribute_error = False - if os.path.exists(resultsoutputfile): - pkl_file = gzip.open(resultsoutputfile, 'rb') + if results_file.exists(): try: - result = pickle.load(pkl_file) - except UnicodeDecodeError: - # Was this pickle created with Python 2.x? - pickle.load(pkl_file, fix_imports=True, encoding='utf-8') - logger.warning('Successfully loaded pkl in compatibility mode') + result = loadpkl(results_file) except (traits.TraitError, AttributeError, ImportError, EOFError) as err: if isinstance(err, (AttributeError, ImportError)): @@ -275,8 +273,6 @@ def load_resultfile(path, name): 'some file does not exist. hence trait cannot be set') else: aggregate = False - finally: - pkl_file.close() logger.debug('Aggregate: %s', aggregate) return result, aggregate, attribute_error diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index a30838a323..eee9a36657 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -18,9 +18,8 @@ import numpy as np from ... import logging -from ...utils.filemanip import loadpkl from ...utils.misc import str2bool -from ..engine.utils import topological_sort +from ..engine.utils import topological_sort, load_resultfile from ..engine import MapNode from .tools import report_crash, report_nodes_not_run, create_pyscript @@ -504,7 +503,7 @@ def _get_result(self, taskid): result_data['traceback'] = '\n'.join(format_exception(*sys.exc_info())) else: results_file = glob(os.path.join(node_dir, 'result_*.pklz'))[0] - result_data = loadpkl(results_file) + result_data = load_resultfile(results_file) result_out = dict(result=None, traceback=None) if isinstance(result_data, dict): result_out['result'] = result_data['result'] @@ -602,7 +601,7 @@ def _get_result(self, taskid): glob(os.path.join(node_dir, 'result_*.pklz')).pop() results_file = glob(os.path.join(node_dir, 'result_*.pklz'))[0] - result_data = loadpkl(results_file) + result_data = load_resultfile(results_file) result_out = dict(result=None, traceback=None) if isinstance(result_data, dict): diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index e19563ed54..70c9b50114 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -676,52 +676,50 @@ def loadcrash(infile, *args): def loadpkl(infile, versioning=False): - """Load a zipped or plain cPickled file - """ + """Load a zipped or plain cPickled file.""" + infile = Path(infile) fmlogger.debug('Loading pkl: %s', infile) - if infile.endswith('pklz'): - pkl_file = gzip.open(infile, 'rb') - else: - pkl_file = open(infile, 'rb') + pklopen = gzip.open if infile.suffix == '.pklz' else open + pkl_metadata = None - if versioning: - pkl_metadata = {} + with indirectory(str(infile.parent)): + pkl_file = pklopen(infile.name, 'rb') - # Look if pkl file contains version file - try: + try: # Look if pkl file contains version file pkl_metadata_line = pkl_file.readline() pkl_metadata = json.loads(pkl_metadata_line) - except: + except UnicodeDecodeError: + pass + finally: # Could not get version info pkl_file.seek(0) - try: try: unpkl = pickle.load(pkl_file) except UnicodeDecodeError: + # Was this pickle created with Python 2.x? unpkl = pickle.load(pkl_file, fix_imports=True, encoding='utf-8') - - return unpkl - - # Unpickling problems - except Exception as e: - if not versioning: + fmlogger.info('Successfully loaded pkl in compatibility mode.') + # Unpickling problems + except Exception as e: + if not versioning: + raise e + + if pkl_metadata and 'version' in pkl_metadata: + from nipype import __version__ as version + if pkl_metadata['version'] != version: + fmlogger.error("""\ +Attempted to open a results file generated by Nipype version %s, \ +with an incompatible Nipype version (%s)""", pkl_metadata['version'], version) + raise e + fmlogger.error("""\ +No metadata was found in the pkl file. Make sure you are currently using \ +the same Nipype version from the generated pkl.""") raise e - - from nipype import __version__ as version - - if 'version' in pkl_metadata: - if pkl_metadata['version'] != version: - fmlogger.error('Your Nipype version is: %s', - version) - fmlogger.error('Nipype version of the pkl is: %s', - pkl_metadata['version']) else: - fmlogger.error('No metadata was found in the pkl file.') - fmlogger.error('Make sure that you are using the same Nipype' - 'version from the generated pkl.') - - raise e + return unpkl + finally: + pkl_file.close() def crash2txt(filename, record): From 88b6fc05f2af332fbe51ddd51a08973d2316cd4d Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 2 Aug 2019 10:31:05 -0700 Subject: [PATCH 0295/1665] fix(py27): uncaught exception in python 2 --- nipype/pipeline/engine/utils.py | 2 -- nipype/utils/filemanip.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 9821f62ffc..638c91ed97 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -255,8 +255,6 @@ def load_resultfile(results_file): """ aggregate = True results_file = Path(results_file) - path = '%s' % results_file.parents - result = None attribute_error = False if results_file.exists(): diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 70c9b50114..2bf0541b43 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -688,7 +688,7 @@ def loadpkl(infile, versioning=False): try: # Look if pkl file contains version file pkl_metadata_line = pkl_file.readline() pkl_metadata = json.loads(pkl_metadata_line) - except UnicodeDecodeError: + except (UnicodeDecodeError, json.JSONDecodeError): pass finally: # Could not get version info From be28002e24e63c63df531d22f90524c39373dd29 Mon Sep 17 00:00:00 2001 From: oesteban Date: Mon, 5 Aug 2019 10:40:49 -0700 Subject: [PATCH 0296/1665] FIX: Incorrect extension identified when checking ``File`` traits Rewritten the validation of filename extensions, as it was incorrectly interpreting the extension. Fixes #2984. --- nipype/interfaces/base/traits_extension.py | 28 ++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index 585a385feb..395ae0732e 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -267,6 +267,30 @@ class File(BasePath): >>> a.foo # doctest: +ELLIPSIS '.../idoexist.txt' + >>> class A(TraitedSpec): + ... foo = File(extensions=['.nii', '.nii.gz']) + >>> a = A() + >>> a.foo = 'badext.txt' # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + TraitError: + + >>> class A(TraitedSpec): + ... foo = File(extensions=['.nii', '.nii.gz']) + >>> a = A() + >>> a.foo = 'goodext.nii' + >>> a.foo + 'goodext.nii' + + >>> a = A() + >>> a.foo = 'idontexist.000.nii' + >>> a.foo # doctest: +ELLIPSIS + 'idontexist.000.nii' + + >>> a = A() + >>> a.foo = 'idontexist.000.nii.gz' + >>> a.foo # doctest: +ELLIPSIS + 'idontexist.000.nii.gz' + """ _is_file = True @@ -292,8 +316,8 @@ def validate(self, objekt, name, value, return_pathlike=False): """Validate a value change.""" value = super(File, self).validate(objekt, name, value, return_pathlike=True) if self._exts: - ext = ''.join(value.suffixes) - if ext not in self._exts: + fname = value.name + if not any((fname.endswith(e) for e in self._exts)): self.error(objekt, name, str(value)) if not return_pathlike: From 55be540a000f37cb0bd245de0acf648de5c52d94 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Tue, 6 Aug 2019 09:31:37 -0700 Subject: [PATCH 0297/1665] ENH: Let ``indirectory`` handle ``nipype.utils.filemanip.Path`` Addresses https://github.com/nipy/nipype/pull/2985#discussion_r311126456 --- nipype/utils/filemanip.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 2bf0541b43..44654c0197 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -682,7 +682,7 @@ def loadpkl(infile, versioning=False): pklopen = gzip.open if infile.suffix == '.pklz' else open pkl_metadata = None - with indirectory(str(infile.parent)): + with indirectory(infile.parent): pkl_file = pklopen(infile.name, 'rb') try: # Look if pkl file contains version file @@ -1039,7 +1039,7 @@ def relpath(path, start=None): @contextlib.contextmanager def indirectory(path): cwd = os.getcwd() - os.chdir(path) + os.chdir(str(path)) try: yield finally: From 8292a7afcd44dd06b6da4fa80d04e8e596743644 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 18 Jul 2019 23:27:10 -0700 Subject: [PATCH 0298/1665] enh: add resolving to the results loader and rebasing to saver Fixes #2944. --- nipype/pipeline/engine/tests/test_utils.py | 59 +++++++++++++++++ nipype/pipeline/engine/utils.py | 73 +++++++++++++++------- 2 files changed, 111 insertions(+), 21 deletions(-) diff --git a/nipype/pipeline/engine/tests/test_utils.py b/nipype/pipeline/engine/tests/test_utils.py index 4f4383f169..c462ea1533 100644 --- a/nipype/pipeline/engine/tests/test_utils.py +++ b/nipype/pipeline/engine/tests/test_utils.py @@ -224,3 +224,62 @@ def test_mapnode_crash3(tmpdir): wf.config["execution"]["crashdump_dir"] = os.getcwd() with pytest.raises(RuntimeError): wf.run(plugin='Linear') + +class StrPathConfuserInputSpec(nib.TraitedSpec): + in_str = nib.traits.String() + + +class StrPathConfuserOutputSpec(nib.TraitedSpec): + out_tuple = nib.traits.Tuple(nib.File, nib.traits.String) + out_dict_path = nib.traits.Dict(nib.traits.String, nib.File(exists=True)) + out_dict_str = nib.traits.DictStrStr() + out_list = nib.traits.List(nib.traits.String) + out_str = nib.traits.String() + out_path = nib.File(exists=True) + + +class StrPathConfuser(nib.SimpleInterface): + input_spec = StrPathConfuserInputSpec + output_spec = StrPathConfuserOutputSpec + + def _run_interface(self, runtime): + out_path = os.path.abspath(os.path.basename(self.inputs.in_str) + '_path') + open(out_path, 'w').close() + self._results['out_str'] = self.inputs.in_str + self._results['out_path'] = out_path + self._results['out_tuple'] = (out_path, self.inputs.in_str) + self._results['out_dict_path'] = {self.inputs.in_str: out_path} + self._results['out_dict_str'] = {self.inputs.in_str: self.inputs.in_str} + self._results['out_list'] = [self.inputs.in_str] * 2 + return runtime + + +def test_modify_paths_bug(tmpdir): + """ + There was a bug in which, if the current working directory contained a file with the name + of an output String, the string would get transformed into a path, and generally wreak havoc. + This attempts to replicate that condition, using an object with strings and paths in various + trait configurations, to ensure that the guards added resolve the issue. + Please see https://github.com/nipy/nipype/issues/2944 for more details. + """ + tmpdir.chdir() + + spc = pe.Node(StrPathConfuser(in_str='2'), name='spc') + + open('2', 'w').close() + + outputs = spc.run().outputs + + # Basic check that string was not manipulated + out_str = outputs.out_str + assert out_str == '2' + + # Check path exists and is absolute + out_path = outputs.out_path + assert os.path.isabs(out_path) + + # Assert data structures pass through correctly + assert outputs.out_tuple == (out_path, out_str) + assert outputs.out_dict_path == {out_str: out_path} + assert outputs.out_dict_str == {out_str: out_str} + assert outputs.out_list == [out_str] * 2 diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 638c91ed97..afd450ef68 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -25,13 +25,13 @@ from ... import logging, config, LooseVersion from ...utils.filemanip import ( Path, + indirectory, relpath, makedirs, fname_presuffix, to_str, ensure_list, get_related_files, - FileNotFoundError, save_json, savepkl, loadpkl, @@ -41,6 +41,7 @@ ) from ...utils.misc import str2bool from ...utils.functions import create_function_from_source +from ...interfaces.base.traits_extension import rebase_path_traits, resolve_path_traits from ...interfaces.base import (Bunch, CommandLine, isdefined, Undefined, InterfaceResult, traits) from ...interfaces.utility import IdentityInterface @@ -227,52 +228,82 @@ def write_report(node, report_type=None, is_mapnode=False): return -def save_resultfile(result, cwd, name): - """Save a result pklz file to ``cwd``""" +def save_resultfile(result, cwd, name, rebase=True): + """Save a result pklz file to ``cwd``.""" + cwd = os.path.abspath(cwd) resultsfile = os.path.join(cwd, 'result_%s.pklz' % name) - savepkl(resultsfile, result) - logger.debug('saved results in %s', resultsfile) + logger.debug("Saving results file: '%s'", resultsfile) + if result.outputs is None: + logger.warn('Storing result file without outputs') + savepkl(resultsfile, result) + return + try: + outputs = result.outputs.trait_get() + except AttributeError: + logger.debug('Storing non-traited results, skipping rebase of paths') + savepkl(resultsfile, result) + return -def load_resultfile(results_file): + try: + with indirectory(cwd): + # All the magic to fix #2944 resides here: + for key, val in list(outputs.items()): + val = rebase_path_traits(result.outputs.trait(key), val, cwd) + setattr(result.outputs, key, val) + savepkl(resultsfile, result) + finally: + # Reset resolved paths from the outputs dict no matter what + for key, val in list(outputs.items()): + setattr(result.outputs, key, val) + + +def load_resultfile(results_file, resolve=True): """ - Load InterfaceResult file from path + Load InterfaceResult file from path. Parameter --------- - path : base_dir of node name : name of node Returns ------- - result : InterfaceResult structure aggregate : boolean indicating whether node should aggregate_outputs attribute error : boolean indicating whether there was some mismatch in versions of traits used to store result and hence node needs to rerun + """ - aggregate = True results_file = Path(results_file) + aggregate = True result = None attribute_error = False - if results_file.exists(): + + if not results_file.exists(): + return result, aggregate, attribute_error + + with indirectory(str(results_file.parent)): try: result = loadpkl(results_file) - except (traits.TraitError, AttributeError, ImportError, - EOFError) as err: - if isinstance(err, (AttributeError, ImportError)): - attribute_error = True - logger.debug('attribute error: %s probably using ' - 'different trait pickled file', str(err)) - else: - logger.debug( - 'some file does not exist. hence trait cannot be set') + except (traits.TraitError, EOFError): + logger.debug( + 'some file does not exist. hence trait cannot be set') + except (AttributeError, ImportError) as err: + attribute_error = True + logger.debug('attribute error: %s probably using ' + 'different trait pickled file', str(err)) else: aggregate = False - logger.debug('Aggregate: %s', aggregate) + if resolve and not aggregate: + logger.debug('Resolving paths in outputs loaded from results file.') + for trait_name, old_value in list(result.outputs.get().items()): + value = resolve_path_traits(result.outputs.trait(trait_name), old_value, + results_file.parent) + setattr(result.outputs, trait_name, value) + return result, aggregate, attribute_error From c87f9525adb69f149c1cacf738f2fbb0d4e8c953 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 18 Jul 2019 23:58:36 -0700 Subject: [PATCH 0299/1665] fix: final fixups for tests to pass Modified ``test_outputmultipath_collapse`` due to a derivation of #2968. --- nipype/pipeline/engine/nodes.py | 2 +- nipype/pipeline/engine/tests/test_nodes.py | 10 +++++----- nipype/pipeline/engine/utils.py | 9 +++++++-- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 6314411a07..e90f1dbb51 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -1260,7 +1260,7 @@ def _run_interface(self, execute=True, updatehash=False): stop_first=str2bool( self.config['execution']['stop_on_first_crash']))) # And store results - _save_resultfile(result, cwd, self.name) + _save_resultfile(result, cwd, self.name, rebase=False) # remove any node directories no longer required dirs2remove = [] for path in glob(op.join(cwd, 'mapflow', '*')): diff --git a/nipype/pipeline/engine/tests/test_nodes.py b/nipype/pipeline/engine/tests/test_nodes.py index ea03fe69ae..53531eb5c4 100644 --- a/nipype/pipeline/engine/tests/test_nodes.py +++ b/nipype/pipeline/engine/tests/test_nodes.py @@ -295,13 +295,13 @@ def test_inputs_removal(tmpdir): def test_outputmultipath_collapse(tmpdir): """Test an OutputMultiPath whose initial value is ``[[x]]`` to ensure that it is returned as ``[x]``, regardless of how accessed.""" - select_if = niu.Select(inlist=[[1, 2, 3], [4]], index=1) - select_nd = pe.Node(niu.Select(inlist=[[1, 2, 3], [4]], index=1), + select_if = niu.Select(inlist=[[1, 2, 3], [4, 5]], index=1) + select_nd = pe.Node(niu.Select(inlist=[[1, 2, 3], [4, 5]], index=1), name='select_nd') ifres = select_if.run() ndres = select_nd.run() - assert ifres.outputs.out == [4] - assert ndres.outputs.out == [4] - assert select_nd.result.outputs.out == [4] + assert ifres.outputs.out == [4, 5] + assert ndres.outputs.out == [4, 5] + assert select_nd.result.outputs.out == [4, 5] diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index afd450ef68..0fb1d0c85d 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -297,9 +297,14 @@ def load_resultfile(results_file, resolve=True): else: aggregate = False - if resolve and not aggregate: + if resolve and result.outputs: + try: + outputs = result.outputs.get() + except TypeError: # This is a Bunch + return result, aggregate, attribute_error + logger.debug('Resolving paths in outputs loaded from results file.') - for trait_name, old_value in list(result.outputs.get().items()): + for trait_name, old_value in list(outputs.items()): value = resolve_path_traits(result.outputs.trait(trait_name), old_value, results_file.parent) setattr(result.outputs, trait_name, value) From ece8f08cf0d6e635fc2c93c7057f7092dc255deb Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 19 Jul 2019 09:06:38 -0700 Subject: [PATCH 0300/1665] fix: reset only changed paths (workaround to preempt #2968) --- nipype/pipeline/engine/utils.py | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 0fb1d0c85d..7879428816 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -248,12 +248,13 @@ def save_resultfile(result, cwd, name, rebase=True): try: with indirectory(cwd): # All the magic to fix #2944 resides here: - for key, val in list(outputs.items()): - val = rebase_path_traits(result.outputs.trait(key), val, cwd) - setattr(result.outputs, key, val) + for key, old in list(outputs.items()): + val = rebase_path_traits(result.outputs.trait(key), old, cwd) + if old != val: # Workaround #2968: Reset only changed values + setattr(result.outputs, key, val) savepkl(resultsfile, result) finally: - # Reset resolved paths from the outputs dict no matter what + # Restore resolved paths from the outputs dict no matter what for key, val in list(outputs.items()): setattr(result.outputs, key, val) @@ -297,17 +298,18 @@ def load_resultfile(results_file, resolve=True): else: aggregate = False - if resolve and result.outputs: - try: - outputs = result.outputs.get() - except TypeError: # This is a Bunch - return result, aggregate, attribute_error - - logger.debug('Resolving paths in outputs loaded from results file.') - for trait_name, old_value in list(outputs.items()): - value = resolve_path_traits(result.outputs.trait(trait_name), old_value, - results_file.parent) - setattr(result.outputs, trait_name, value) + if resolve and result.outputs: + try: + outputs = result.outputs.get() + except TypeError: # This is a Bunch + return result, aggregate, attribute_error + + logger.debug('Resolving paths in outputs loaded from results file.') + for trait_name, old in list(outputs.items()): + value = resolve_path_traits(result.outputs.trait(trait_name), old, + results_file.parent) + if value != old: # Workaround #2968: Reset only changed values + setattr(result.outputs, trait_name, value) return result, aggregate, attribute_error From 4c813049e990ef7962f0bb1daf004885e9eb88d2 Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 31 Jul 2019 18:33:00 -0700 Subject: [PATCH 0301/1665] fix: do not set ``Undefined`` values - fixes join nodes with remove_needed_outputs=true --- nipype/pipeline/engine/utils.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 7879428816..56beb70c73 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -249,14 +249,15 @@ def save_resultfile(result, cwd, name, rebase=True): with indirectory(cwd): # All the magic to fix #2944 resides here: for key, old in list(outputs.items()): - val = rebase_path_traits(result.outputs.trait(key), old, cwd) - if old != val: # Workaround #2968: Reset only changed values + if isdefined(old): + val = rebase_path_traits(result.outputs.trait(key), old, cwd) setattr(result.outputs, key, val) savepkl(resultsfile, result) finally: # Restore resolved paths from the outputs dict no matter what for key, val in list(outputs.items()): - setattr(result.outputs, key, val) + if isdefined(val): + setattr(result.outputs, key, val) def load_resultfile(results_file, resolve=True): @@ -306,9 +307,9 @@ def load_resultfile(results_file, resolve=True): logger.debug('Resolving paths in outputs loaded from results file.') for trait_name, old in list(outputs.items()): - value = resolve_path_traits(result.outputs.trait(trait_name), old, - results_file.parent) - if value != old: # Workaround #2968: Reset only changed values + if isdefined(old): + value = resolve_path_traits(result.outputs.trait(trait_name), old, + results_file.parent) setattr(result.outputs, trait_name, value) return result, aggregate, attribute_error From 3ad68d7ef02e2d6b337a5e83fd5e7f401c76b3d5 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 1 Aug 2019 09:27:04 -0700 Subject: [PATCH 0302/1665] fix: reset test_nodes so that the OutputMultiObject issue surfaces Once we figure out the problem of ``OutputMultiObject``, we could go ahead and set fix #2944, fix poldracklab/fmriprep#1674, close #2945. --- nipype/pipeline/engine/tests/test_nodes.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nipype/pipeline/engine/tests/test_nodes.py b/nipype/pipeline/engine/tests/test_nodes.py index 53531eb5c4..ea03fe69ae 100644 --- a/nipype/pipeline/engine/tests/test_nodes.py +++ b/nipype/pipeline/engine/tests/test_nodes.py @@ -295,13 +295,13 @@ def test_inputs_removal(tmpdir): def test_outputmultipath_collapse(tmpdir): """Test an OutputMultiPath whose initial value is ``[[x]]`` to ensure that it is returned as ``[x]``, regardless of how accessed.""" - select_if = niu.Select(inlist=[[1, 2, 3], [4, 5]], index=1) - select_nd = pe.Node(niu.Select(inlist=[[1, 2, 3], [4, 5]], index=1), + select_if = niu.Select(inlist=[[1, 2, 3], [4]], index=1) + select_nd = pe.Node(niu.Select(inlist=[[1, 2, 3], [4]], index=1), name='select_nd') ifres = select_if.run() ndres = select_nd.run() - assert ifres.outputs.out == [4, 5] - assert ndres.outputs.out == [4, 5] - assert select_nd.result.outputs.out == [4, 5] + assert ifres.outputs.out == [4] + assert ndres.outputs.out == [4] + assert select_nd.result.outputs.out == [4] From 829957c0874803c521c8e75ea23f4e4909ef5ce9 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 1 Aug 2019 11:40:29 -0700 Subject: [PATCH 0303/1665] enh: read true value from traits --- nipype/pipeline/engine/utils.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 56beb70c73..de15ee6dcb 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -245,19 +245,21 @@ def save_resultfile(result, cwd, name, rebase=True): savepkl(resultsfile, result) return + backup_traits = {} try: with indirectory(cwd): # All the magic to fix #2944 resides here: for key, old in list(outputs.items()): if isdefined(old): + old = result.outputs.trait(key).handler.get_value(result.outputs, key) + backup_traits[key] = old val = rebase_path_traits(result.outputs.trait(key), old, cwd) setattr(result.outputs, key, val) savepkl(resultsfile, result) finally: # Restore resolved paths from the outputs dict no matter what - for key, val in list(outputs.items()): - if isdefined(val): - setattr(result.outputs, key, val) + for key, val in list(backup_traits.items()): + setattr(result.outputs, key, val) def load_resultfile(results_file, resolve=True): @@ -308,6 +310,8 @@ def load_resultfile(results_file, resolve=True): logger.debug('Resolving paths in outputs loaded from results file.') for trait_name, old in list(outputs.items()): if isdefined(old): + old = result.outputs.trait(trait_name).handler.get_value( + result.outputs, trait_name) value = resolve_path_traits(result.outputs.trait(trait_name), old, results_file.parent) setattr(result.outputs, trait_name, value) From 5f917f22e855dcbba79883500d0fe22de2cfecf7 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 1 Aug 2019 16:47:51 -0700 Subject: [PATCH 0304/1665] fix: final touches to the PR Close #2944. Close #2949. --- nipype/caching/tests/test_memory.py | 3 +-- nipype/pipeline/engine/tests/test_base.py | 8 ++------ nipype/pipeline/engine/utils.py | 21 +++++++++++++-------- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/nipype/caching/tests/test_memory.py b/nipype/caching/tests/test_memory.py index 3ea594f22a..642fee363d 100644 --- a/nipype/caching/tests/test_memory.py +++ b/nipype/caching/tests/test_memory.py @@ -15,8 +15,7 @@ class SideEffectInterface(EngineTestInterface): def _run_interface(self, runtime): global nb_runs nb_runs += 1 - runtime.returncode = 0 - return runtime + return super(SideEffectInterface, self)._run_interface(runtime) def test_caching(tmpdir): diff --git a/nipype/pipeline/engine/tests/test_base.py b/nipype/pipeline/engine/tests/test_base.py index 3513152c06..5b072e9ee6 100644 --- a/nipype/pipeline/engine/tests/test_base.py +++ b/nipype/pipeline/engine/tests/test_base.py @@ -20,19 +20,15 @@ class OutputSpec(nib.TraitedSpec): output1 = nib.traits.List(nib.traits.Int, desc='outputs') -class EngineTestInterface(nib.BaseInterface): +class EngineTestInterface(nib.SimpleInterface): input_spec = InputSpec output_spec = OutputSpec def _run_interface(self, runtime): runtime.returncode = 0 + self._results['output1'] = [1, self.inputs.input1] return runtime - def _list_outputs(self): - outputs = self._outputs().get() - outputs['output1'] = [1, self.inputs.input1] - return outputs - @pytest.mark.parametrize( 'name', ['valid1', 'valid_node', 'valid-node', 'ValidNode0']) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index de15ee6dcb..f7b2772f50 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -41,9 +41,10 @@ ) from ...utils.misc import str2bool from ...utils.functions import create_function_from_source -from ...interfaces.base.traits_extension import rebase_path_traits, resolve_path_traits -from ...interfaces.base import (Bunch, CommandLine, isdefined, Undefined, - InterfaceResult, traits) +from ...interfaces.base.traits_extension import ( + rebase_path_traits, resolve_path_traits, OutputMultiPath, isdefined, Undefined, traits) +from ...interfaces.base.support import Bunch, InterfaceResult +from ...interfaces.base import CommandLine from ...interfaces.utility import IdentityInterface from ...utils.provenance import ProvStore, pm, nipype_ns, get_id @@ -239,7 +240,7 @@ def save_resultfile(result, cwd, name, rebase=True): savepkl(resultsfile, result) return try: - outputs = result.outputs.trait_get() + output_names = result.outputs.copyable_trait_names() except AttributeError: logger.debug('Storing non-traited results, skipping rebase of paths') savepkl(resultsfile, result) @@ -249,9 +250,12 @@ def save_resultfile(result, cwd, name, rebase=True): try: with indirectory(cwd): # All the magic to fix #2944 resides here: - for key, old in list(outputs.items()): + for key in output_names: + old = getattr(result.outputs, key) if isdefined(old): - old = result.outputs.trait(key).handler.get_value(result.outputs, key) + if result.outputs.trait(key).is_trait_type(OutputMultiPath): + old = result.outputs.trait(key).handler.get_value( + result.outputs, key) backup_traits[key] = old val = rebase_path_traits(result.outputs.trait(key), old, cwd) setattr(result.outputs, key, val) @@ -310,8 +314,9 @@ def load_resultfile(results_file, resolve=True): logger.debug('Resolving paths in outputs loaded from results file.') for trait_name, old in list(outputs.items()): if isdefined(old): - old = result.outputs.trait(trait_name).handler.get_value( - result.outputs, trait_name) + if result.outputs.trait(trait_name).is_trait_type(OutputMultiPath): + old = result.outputs.trait(trait_name).handler.get_value( + result.outputs, trait_name) value = resolve_path_traits(result.outputs.trait(trait_name), old, results_file.parent) setattr(result.outputs, trait_name, value) From a06166d9800ec7ff07492a6cf36f276d0e53d11c Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 2 Aug 2019 15:07:44 -0700 Subject: [PATCH 0305/1665] fix: return same type of value for traits containing lists --- nipype/interfaces/base/traits_extension.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index 395ae0732e..27c4103a74 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -526,7 +526,7 @@ def _recurse_on_path_traits(func, thistrait, value, cwd): elif thistrait.is_trait_type(traits.List): innertrait, = thistrait.inner_traits if not isinstance(value, (list, tuple)): - value = [value] + return _recurse_on_path_traits(func, innertrait, value, cwd) value = [_recurse_on_path_traits(func, innertrait, v, cwd) for v in value] From 0c1a4cdcf2867523c6dac84ab2ca0745ca5f0d08 Mon Sep 17 00:00:00 2001 From: Lucinda Sisk Date: Tue, 6 Aug 2019 13:49:59 -0400 Subject: [PATCH 0306/1665] Update reconst.py Change max_sh to InputMultiObject, so that one can pass a list of lmax values when using a multishell model. --- nipype/interfaces/mrtrix3/reconst.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index f1e9c604d2..93d6b26aac 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -6,7 +6,7 @@ import os.path as op -from ..base import traits, TraitedSpec, File, Undefined +from ..base import traits, TraitedSpec, File, Undefined, InputMultiObject from .base import MRTrix3BaseInputSpec, MRTrix3Base @@ -115,10 +115,14 @@ class EstimateFODInputSpec(MRTrix3BaseInputSpec): sep=',', argstr='-shell %s', desc='specify one or more dw gradient shells') - max_sh = traits.Int( - 8, usedefault=True, - argstr='-lmax %d', - desc='maximum harmonic degree of response function') + max_sh = InputMultiObject( + traits.Int, + value=[8], + usedefault=True, + argstr='-lmax %s', + sep=',', + desc=('maximum harmonic degree of response function - single value for ' + 'single-shell response, list for multi-shell response')) in_dirs = File( exists=True, argstr='-directions %s', From 6dee6074ba49b7593a9ad9ab72c978be507dfc06 Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 7 Aug 2019 12:27:26 -0700 Subject: [PATCH 0307/1665] fix: honor ``use_relative_paths`` option --- nipype/pipeline/engine/nodes.py | 16 ++++++++++++---- nipype/pipeline/engine/utils.py | 5 ++++- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index e90f1dbb51..2c441a5c57 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -587,7 +587,9 @@ def _load_results(self): runtime=runtime, inputs=self._interface.inputs.get_traitsfree(), outputs=aggouts) - _save_resultfile(result, cwd, self.name) + _save_resultfile( + result, cwd, self.name, + rebase=str2bool(self.config['execution']['use_relative_paths'])) else: logger.debug('aggregating mapnode results') result = self._run_interface() @@ -634,7 +636,9 @@ def _run_command(self, execute, copyfiles=True): except Exception as msg: result.runtime.stderr = '{}\n\n{}'.format( getattr(result.runtime, 'stderr', ''), msg) - _save_resultfile(result, outdir, self.name) + _save_resultfile( + result, outdir, self.name, + rebase=str2bool(self.config['execution']['use_relative_paths'])) raise cmdfile = op.join(outdir, 'command.txt') with open(cmdfile, 'wt') as fd: @@ -646,7 +650,9 @@ def _run_command(self, execute, copyfiles=True): except Exception as msg: result.runtime.stderr = '%s\n\n%s'.format( getattr(result.runtime, 'stderr', ''), msg) - _save_resultfile(result, outdir, self.name) + _save_resultfile( + result, outdir, self.name, + rebase=str2bool(self.config['execution']['use_relative_paths'])) raise dirs2keep = None @@ -660,7 +666,9 @@ def _run_command(self, execute, copyfiles=True): self.needed_outputs, self.config, dirs2keep=dirs2keep) - _save_resultfile(result, outdir, self.name) + _save_resultfile( + result, outdir, self.name, + rebase=str2bool(self.config['execution']['use_relative_paths'])) return result diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index f7b2772f50..65170f14c9 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -229,8 +229,11 @@ def write_report(node, report_type=None, is_mapnode=False): return -def save_resultfile(result, cwd, name, rebase=True): +def save_resultfile(result, cwd, name, rebase=None): """Save a result pklz file to ``cwd``.""" + if rebase is None: + rebase = config.getboolean('execution', 'use_relative_paths') + cwd = os.path.abspath(cwd) resultsfile = os.path.join(cwd, 'result_%s.pklz' % name) logger.debug("Saving results file: '%s'", resultsfile) From 26bf3af2086a9e98b7d087013f7d165b08da801b Mon Sep 17 00:00:00 2001 From: Lucinda Sisk Date: Tue, 13 Aug 2019 12:27:46 -0400 Subject: [PATCH 0308/1665] Revert argstr to use %d --- nipype/interfaces/mrtrix3/reconst.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index 93d6b26aac..9b42f1bbcd 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -119,10 +119,9 @@ class EstimateFODInputSpec(MRTrix3BaseInputSpec): traits.Int, value=[8], usedefault=True, - argstr='-lmax %s', + argstr='-lmax %d', sep=',', - desc=('maximum harmonic degree of response function - single value for ' - 'single-shell response, list for multi-shell response')) + desc=('maximum harmonic degree of response function - single value for single-shell response, list for multi-shell response')) in_dirs = File( exists=True, argstr='-directions %s', From 847b42326e14fffe6b708663e28b24fef1025bae Mon Sep 17 00:00:00 2001 From: Lucinda Sisk Date: Fri, 16 Aug 2019 13:01:32 -0400 Subject: [PATCH 0309/1665] TEST: Update EstimateFOD autotest --- nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py index c64efa2ca4..a935f9aae9 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py @@ -41,8 +41,12 @@ def test_EstimateFOD_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', @@ -64,6 +68,7 @@ def test_EstimateFOD_inputs(): ), max_sh=dict( argstr='-lmax %d', + sep=',', usedefault=True, ), nthreads=dict( From 4d2d260b8ad4ae6df9eed64847425f3129d75843 Mon Sep 17 00:00:00 2001 From: Lucinda Sisk Date: Fri, 16 Aug 2019 21:40:38 -0400 Subject: [PATCH 0310/1665] Update nipype/interfaces/mrtrix3/reconst.py - change back to %s Co-Authored-By: Chris Markiewicz --- nipype/interfaces/mrtrix3/reconst.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index 9b42f1bbcd..4e2db8de3f 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -119,7 +119,7 @@ class EstimateFODInputSpec(MRTrix3BaseInputSpec): traits.Int, value=[8], usedefault=True, - argstr='-lmax %d', + argstr='-lmax %s', sep=',', desc=('maximum harmonic degree of response function - single value for single-shell response, list for multi-shell response')) in_dirs = File( From 3b1b62af7d4da3b944a34f9bbbf755a05d0ff4db Mon Sep 17 00:00:00 2001 From: Lucinda Sisk Date: Fri, 16 Aug 2019 21:41:03 -0400 Subject: [PATCH 0311/1665] Update nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py index a935f9aae9..9f991d51cc 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py @@ -67,7 +67,7 @@ def test_EstimateFOD_inputs(): extensions=None, ), max_sh=dict( - argstr='-lmax %d', + argstr='-lmax %s', sep=',', usedefault=True, ), From f3d1317f36e8d25d24f33e782e0d26ff15962f3d Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 16 Aug 2019 16:43:48 -0400 Subject: [PATCH 0312/1665] DOC: Update changelog --- doc/changelog/1.X.X-changelog | 26 ++++++++++++++++++++++++++ tools/update_changes.sh | 3 ++- 2 files changed, 28 insertions(+), 1 deletion(-) mode change 100644 => 100755 tools/update_changes.sh diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index 296e7e5c0e..ef38af2eba 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -1,3 +1,29 @@ +1.2.1 (August 19, 2019) +======================= + +##### [Full changelog](https://github.com/nipy/nipype/milestone/32?closed=1) + + * FIX: Resolve/rebase paths from/to results files (https://github.com/nipy/nipype/pull/2971) + * FIX: Use ``load_resultfile`` when loading a results pickle (https://github.com/nipy/nipype/pull/2985) + * FIX: Incorrect extension identified when checking ``File`` traits (https://github.com/nipy/nipype/pull/2987) + * FIX: Correctly pickle ``OuputMultiObject`` traits (https://github.com/nipy/nipype/pull/2983) + * FIX: Improve output handling in DWIDenoise and DWIBiasCorrect (https://github.com/nipy/nipype/pull/2978) + * FIX: Docker build (https://github.com/nipy/nipype/pull/2963) + * FIX: Remove '=' signs from EddyQuad argument specifications (https://github.com/nipy/nipype/pull/2941) + * FIX: Set input model to bedpostx for camino.TrackBedpostxProba (https://github.com/nipy/nipype/pull/2947) + * FIX: Allow ``max_sh``not to be set (auto mode) (https://github.com/nipy/nipype/pull/2940) + * ENH: Let ``indirectory`` handle ``nipype.utils.filemanip.Path`` (https://github.com/nipy/nipype/pull/2989) + * ENH: Add resolve/rebase ``BasePath`` traits methods & tests (https://github.com/nipy/nipype/pull/2970) + * ENH: Modify ``Directory`` and ``File`` traits to get along with pathlib (https://github.com/nipy/nipype/pull/2962) + * REF: Update nipype2boutiques script (https://github.com/nipy/nipype/pull/2894) + * TST: Parametrize JoinNode expansion tests over config ``needed_outputs`` (https://github.com/nipy/nipype/pull/2981) + * MAINT: Pin lxml<4.4.0 for Python 3.4 (https://github.com/nipy/nipype/pull/2980) + * MAINT: Refactor ``aggregate_outputs`` for readability (https://github.com/nipy/nipype/pull/2969) + * MAINT: Bump neurodocker version (https://github.com/nipy/nipype/pull/2965) + * MAINT: Various minor improvements to complement previous PR (https://github.com/nipy/nipype/pull/2964) + * MAINT: Sort dependencies alphabetically (https://github.com/nipy/nipype/pull/2961) + + 1.2.0 (May 09, 2019) ==================== diff --git a/tools/update_changes.sh b/tools/update_changes.sh old mode 100644 new mode 100755 index 017c0623f2..1ba3528b1f --- a/tools/update_changes.sh +++ b/tools/update_changes.sh @@ -12,7 +12,8 @@ set -u # Treat unset variables as an error when substituting. set -x # Print command traces before executing command. -CHANGES=../doc/changelog/1.X.X-changelog +ROOT=$( git rev-parse --show-toplevel ) +CHANGES=$ROOT/doc/changelog/1.X.X-changelog # Check whether the Upcoming release header is present head -1 $CHANGES | grep -q Upcoming From 82c4c6c41979913e0667d23c8c24a9e88a58631f Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 16 Aug 2019 16:49:59 -0400 Subject: [PATCH 0313/1665] MNT: Update mailmap, zenodo --- .mailmap | 3 +++ .zenodo.json | 75 ++++++++++++++++++++++++++++------------------------ 2 files changed, 43 insertions(+), 35 deletions(-) diff --git a/.mailmap b/.mailmap index 02f5d57270..259fbe170d 100644 --- a/.mailmap +++ b/.mailmap @@ -139,6 +139,9 @@ Michael Clark Clark Michael Dayan Michael Michael Dayan Michael Michael Dayan mick-d +Michael Dayan Michael +Michael Joseph mjoseph +Michael Joseph Michael Joseph Michael Philipp Notter Michael Notter Michael Philipp Notter miykael Michael Waskom Michael Waskom diff --git a/.zenodo.json b/.zenodo.json index a7333e6ec4..4e6443b8c7 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -30,16 +30,16 @@ "name": "Jarecka, Dorota", "orcid": "0000-0003-1857-8129" }, - { - "affiliation": "The Laboratory for Investigative Neurophysiology (The LINE), Department of Radiology and Department of Clinical Neurosciences, Lausanne, Switzerland; Center for Biomedical Imaging (CIBM), Lausanne, Switzerland", - "name": "Notter, Michael Philipp", - "orcid": "0000-0002-5866-047X" - }, { "affiliation": "University of Iowa", "name": "Johnson, Hans", "orcid": "0000-0001-9513-2660" }, + { + "affiliation": "The Laboratory for Investigative Neurophysiology (The LINE), Department of Radiology and Department of Clinical Neurosciences, Lausanne, Switzerland; Center for Biomedical Imaging (CIBM), Lausanne, Switzerland", + "name": "Notter, Michael Philipp", + "orcid": "0000-0002-5866-047X" + }, { "name": "Burns, Christopher" }, @@ -81,14 +81,14 @@ { "name": "Modat, Marc" }, - { - "name": "Loney, Fred" - }, { "affiliation": "Developer", "name": "Clark, Daniel", "orcid": "0000-0002-8121-8954" }, + { + "name": "Loney, Fred" + }, { "affiliation": "Department of Electrical and Computer Engineering, Johns Hopkins University", "name": "Dewey, Blake E", @@ -125,16 +125,16 @@ "name": "Clark, Dav", "orcid": "0000-0002-3982-4416" }, - { - "affiliation": "Dartmouth College", - "name": "Visconti di Oleggio Castello, Matteo", - "orcid": "0000-0001-7931-5272" - }, { "affiliation": "UC Berkeley - UCSF Graduate Program in Bioengineering", "name": "Keshavan, Anisha", "orcid": "0000-0003-3554-043X" }, + { + "affiliation": "Dartmouth College", + "name": "Visconti di Oleggio Castello, Matteo", + "orcid": "0000-0001-7931-5272" + }, { "name": "Pinsard, Basile" }, @@ -166,11 +166,6 @@ "name": "Guillon, Je\u0301re\u0301my", "orcid": "0000-0002-2672-7510" }, - { - "affiliation": "Montreal Neurological Institute and Hospital", - "name": "Markello, Ross", - "orcid": "0000-0003-1057-1336" - }, { "affiliation": "The University of Washington eScience Institute", "name": "Rokem, Ariel", @@ -185,6 +180,11 @@ "name": "DuPre, Elizabeth", "orcid": "0000-0003-1358-196X" }, + { + "affiliation": "Montreal Neurological Institute and Hospital", + "name": "Markello, Ross", + "orcid": "0000-0003-1057-1336" + }, { "affiliation": "MIT", "name": "Kaczmarzyk, Jakub", @@ -275,10 +275,6 @@ { "name": "Dubois, Mathieu" }, - { - "name": "Heinsfeld, Anibal S\u00f3lon", - "orcid": "0000-0002-2050-0614" - }, { "affiliation": "Child Mind Institute", "name": "Frohlich, Caroline" @@ -293,12 +289,11 @@ "orcid": "0000-0002-7796-8795" }, { - "name": "Kent, James" + "name": "Heinsfeld, Anibal S\u00f3lon", + "orcid": "0000-0002-2050-0614" }, { - "affiliation": "University of Texas at Austin", - "name": "De La Vega, Alejandro", - "orcid": "0000-0001-9062-3778" + "name": "Kent, James" }, { "name": "Watanabe, Aimi" @@ -316,6 +311,11 @@ "name": "Nichols, B. Nolan", "orcid": "0000-0003-1099-3328" }, + { + "affiliation": "University of Texas at Austin", + "name": "De La Vega, Alejandro", + "orcid": "0000-0001-9062-3778" + }, { "affiliation": "University College London", "name": "Eshaghi, Arman", @@ -389,11 +389,6 @@ { "name": "Haselgrove, Christian" }, - { - "affiliation": "1 McGill Centre for Integrative Neuroscience (MCIN), Ludmer Centre for Neuroinformatics and Mental Health, Montreal Neurological Institute (MNI), McGill University, Montr\u00e9al, 3801 University Street, WB-208, H3A 2B4, Qu\u00e9bec, Canada. 2 University of Lyon, CNRS, INSERM, CREATIS., Villeurbanne, 7, avenue Jean Capelle, 69621, France.", - "name": "Glatard, Tristan", - "orcid": "0000-0003-2620-5883" - }, { "name": "Renfro, Mandy" }, @@ -469,6 +464,11 @@ { "name": "Park, Anne" }, + { + "affiliation": "1 McGill Centre for Integrative Neuroscience (MCIN), Ludmer Centre for Neuroinformatics and Mental Health, Montreal Neurological Institute (MNI), McGill University, Montr\u00e9al, 3801 University Street, WB-208, H3A 2B4, Qu\u00e9bec, Canada. 2 University of Lyon, CNRS, INSERM, CREATIS., Villeurbanne, 7, avenue Jean Capelle, 69621, France.", + "name": "Glatard, Tristan", + "orcid": "0000-0003-2620-5883" + }, { "name": "Poldrack, Russell" }, @@ -485,10 +485,6 @@ "name": "Cooper, Gavin", "orcid": "0000-0002-7186-5293" }, - { - "affiliation": "Institute of Imaging & Computer Vision, RWTH Aachen University, Germany", - "name": "Weninger, Leon" - }, { "name": "Inati, Souheil" }, @@ -501,6 +497,10 @@ "name": "Lukas Snoek", "orcid": "0000-0001-8972-204X" }, + { + "affiliation": "Institute of Imaging & Computer Vision, RWTH Aachen University, Germany", + "name": "Weninger, Leon" + }, { "name": "Marina, Ana" }, @@ -639,6 +639,11 @@ "name": "Meyers, Benjamin", "orcid": "0000-0001-9137-4363" }, + { + "affiliation": "Washington University in St Louis", + "name": "Van, Andrew", + "orcid": "0000-0002-8787-0943" + }, { "name": "Davison, Andrew" }, From 79ee1b3f95987a762fa1ef2ffd2294fd4fff458d Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 16 Aug 2019 16:59:21 -0400 Subject: [PATCH 0314/1665] MNT: Version 1.2.0 --- doc/conf.py | 2 +- nipype/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 4049fe7ee5..2a253eb7aa 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -82,7 +82,7 @@ # The short X.Y version. version = nipype.__version__ # The full version, including alpha/beta/rc tags. -release = "1.2.0" +release = "1.2.1" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/nipype/info.py b/nipype/info.py index d1aeb56f5a..868802d250 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -11,7 +11,7 @@ # full release. '.dev' as a version_extra string means this is a development # version # Remove -dev for release -__version__ = '1.2.1-dev' +__version__ = '1.2.1' def get_nipype_gitversion(): From 74c34e0830afbf7b7d30bbe3456868d2b8be25ee Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 16 Aug 2019 17:12:59 -0400 Subject: [PATCH 0315/1665] MNT: Constrain numpy < 1.17 if Python < 3 --- nipype/info.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 868802d250..63b4d3673d 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -107,6 +107,8 @@ def get_nipype_gitversion(): # https://www.opensourceanswers.com/blog/you-shouldnt-use-python-37-for-data-science-right-now.html NUMPY_MIN_VERSION_37 = '1.15.3' NUMPY_BAD_VERSION_27 = '1.16.0' +# Numpy drops 2.7 support in 1.17 +NUMPY_MAX_VERSION_27 = '1.17.0' SCIPY_MIN_VERSION = '0.14' TRAITS_MIN_VERSION = '4.6' DATEUTIL_MIN_VERSION = '2.2' @@ -146,7 +148,9 @@ def get_nipype_gitversion(): 'nibabel>=%s' % NIBABEL_MIN_VERSION, 'numpy>=%s ; python_version > "3.0" and python_version < "3.7"' % NUMPY_MIN_VERSION, 'numpy>=%s ; python_version >= "3.7"' % NUMPY_MIN_VERSION_37, - 'numpy>=%s,!=%s ; python_version == "2.7"' % (NUMPY_MIN_VERSION, NUMPY_BAD_VERSION_27), + 'numpy>=%s,!=%s,<%s ; python_version == "2.7"' % (NUMPY_MIN_VERSION, + NUMPY_BAD_VERSION_27, + NUMPY_MAX_VERSION_27), 'packaging', 'pathlib2; python_version <= "3.4"', 'prov>=%s' % PROV_VERSION, From b1b56ef4655b343cd6293626d95278bd64bc9b61 Mon Sep 17 00:00:00 2001 From: Garikoitz Lerma-Usabiaga Date: Fri, 16 Aug 2019 14:32:29 -0700 Subject: [PATCH 0316/1665] Update .zenodo.json --- .zenodo.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index 4e6443b8c7..d6ec94f140 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -501,6 +501,11 @@ "affiliation": "Institute of Imaging & Computer Vision, RWTH Aachen University, Germany", "name": "Weninger, Leon" }, + { + "affiliation": "Stanford University", + "name": "Lerma-Usabiaga, Garikoitz", + "orcid": "0000-0001-9800-4816" + }, { "name": "Marina, Ana" }, From dcf120806a025f6103ccdbba39b1f8814d94e379 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 16 Aug 2019 18:48:57 -0400 Subject: [PATCH 0317/1665] CI: Update pip to respect python_requires --- .circleci/config.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index c2356cebdf..828fcf44f6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -339,13 +339,14 @@ jobs: - run: name: Check pypi preconditions command: | - pip install --upgrade twine future wheel readme_renderer setuptools + pip install --upgrade pip twine future wheel readme_renderer setuptools python setup.py check -r -s python setup.py sdist bdist_wheel - run: name: Validate Python 2 installation command: | pyenv local 2.7.12 + pip install --upgrade pip pip install dist/nipype-*-py2.py3-none-any.whl # Futures should install in Python 2 pip show futures 2>/dev/null | grep "Name: futures" @@ -353,6 +354,7 @@ jobs: name: Validate Python 3 installation command: | pyenv local 3.5.2 + pip install --upgrade pip pip install dist/nipype-*-py2.py3-none-any.whl # Futures should not install in Python 3 test $(pip show futures 2>/dev/null | wc -l) = "0" From 3a37ba000083c86898c8b5fca31b2786933f2ebc Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 16 Aug 2019 18:51:23 -0400 Subject: [PATCH 0318/1665] MNT: Constrain scipy < 1.3 if Python < 3.5 --- nipype/info.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 63b4d3673d..9aaa904580 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -110,6 +110,8 @@ def get_nipype_gitversion(): # Numpy drops 2.7 support in 1.17 NUMPY_MAX_VERSION_27 = '1.17.0' SCIPY_MIN_VERSION = '0.14' +# Scipy drops 2.7 and 3.4 support in 1.3 +SCIPY_MAX_VERSION_34 = '1.3.0' TRAITS_MIN_VERSION = '4.6' DATEUTIL_MIN_VERSION = '2.2' FUTURE_MIN_VERSION = '0.16.0' @@ -157,7 +159,8 @@ def get_nipype_gitversion(): 'pydot>=%s' % PYDOT_MIN_VERSION, 'pydotplus', 'python-dateutil>=%s' % DATEUTIL_MIN_VERSION, - 'scipy>=%s' % SCIPY_MIN_VERSION, + 'scipy>=%s ; python_version >= "3.5"' % SCIPY_MIN_VERSION, + 'scipy>=%s,<%s ; python_version <= "3.4"' % (SCIPY_MIN_VERSION, SCIPY_MAX_VERSION_34), 'simplejson>=%s' % SIMPLEJSON_MIN_VERSION, 'traits>=%s,!=5.0' % TRAITS_MIN_VERSION, ] From 9c90924651caf139919d03451428f6a5b45eb4c5 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 16 Aug 2019 21:46:46 -0400 Subject: [PATCH 0319/1665] MNT: Update Python version classifiers --- nipype/info.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 9aaa904580..da010b51fa 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -59,7 +59,9 @@ def get_nipype_gitversion(): 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', 'Topic :: Scientific/Engineering' + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Topic :: Scientific/Engineering' ] description = 'Neuroimaging in Python: Pipelines and Interfaces' From a9b65f30aa22692e02f5d63faae62c67f2ef352e Mon Sep 17 00:00:00 2001 From: Erin Benderoff Date: Fri, 16 Aug 2019 22:57:44 -0400 Subject: [PATCH 0320/1665] add name to zenodo --- .zenodo.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index d6ec94f140..a49f9b002f 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -78,6 +78,10 @@ "affiliation": "Shattuck Lab, UCLA Brain Mapping Center", "name": "Wong, Jason" }, + { + "affiliation": "Concordia University", + "name": "Benderoff, Erin" + }, { "name": "Modat, Marc" }, From 735ae5920da4b2cb3ce9046589810af8ece9d6a3 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 17 Aug 2019 09:15:49 -0400 Subject: [PATCH 0321/1665] DOC: Update changelog --- doc/changelog/1.X.X-changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index ef38af2eba..9ef4474bf1 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -12,6 +12,7 @@ * FIX: Remove '=' signs from EddyQuad argument specifications (https://github.com/nipy/nipype/pull/2941) * FIX: Set input model to bedpostx for camino.TrackBedpostxProba (https://github.com/nipy/nipype/pull/2947) * FIX: Allow ``max_sh``not to be set (auto mode) (https://github.com/nipy/nipype/pull/2940) + * ENH: Update mrtrix reconst.py EstimateFOD max_sh to be able to accept list (https://github.com/nipy/nipype/pull/2990) * ENH: Let ``indirectory`` handle ``nipype.utils.filemanip.Path`` (https://github.com/nipy/nipype/pull/2989) * ENH: Add resolve/rebase ``BasePath`` traits methods & tests (https://github.com/nipy/nipype/pull/2970) * ENH: Modify ``Directory`` and ``File`` traits to get along with pathlib (https://github.com/nipy/nipype/pull/2962) From 17019b417e87bfb53b9e4cfd3e2e483bc6eaf88c Mon Sep 17 00:00:00 2001 From: Lucinda Sisk Date: Sat, 17 Aug 2019 10:54:39 -0400 Subject: [PATCH 0322/1665] Update .zenodo.json --- .zenodo.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index a49f9b002f..ee1771da97 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -432,6 +432,11 @@ { "name": "Hallquist, Michael" }, + { + "affiliation": "Yale University; New Haven, CT, United States", + "name": "Sisk, Lucinda M.", + "orcid": "0000-0003-4900-9770" + }, { "affiliation": "TIB \u2013 Leibniz Information Centre for Science and Technology and University Library, Hannover, Germany", "name": "Leinweber, Katrin", From e7ab9cb2ecc81063fd89119c1dbb9ace97ec80eb Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 19 Aug 2019 08:57:20 -0400 Subject: [PATCH 0323/1665] MNT: Version 1.2.2-dev --- doc/documentation.rst | 2 +- nipype/info.py | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/doc/documentation.rst b/doc/documentation.rst index 54e8243800..ab33e90904 100644 --- a/doc/documentation.rst +++ b/doc/documentation.rst @@ -9,7 +9,7 @@ Documentation :Release: |version| :Date: |today| -Previous versions: `1.2.0 `_ `1.1.9 `_ +Previous versions: `1.2.1 `_ `1.2.0 `_ .. container:: doc2 diff --git a/nipype/info.py b/nipype/info.py index da010b51fa..f03e625dda 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -7,11 +7,9 @@ import sys -# nipype version information. An empty version_extra corresponds to a -# full release. '.dev' as a version_extra string means this is a development -# version +# nipype version information # Remove -dev for release -__version__ = '1.2.1' +__version__ = '1.2.2-dev' def get_nipype_gitversion(): From f4906165bd65ddbf87cfac02fd24fced6d4509ce Mon Sep 17 00:00:00 2001 From: Matteo Mancini Date: Mon, 19 Aug 2019 15:13:50 -0400 Subject: [PATCH 0324/1665] Fixed issue with CSD estimation in MRtrix3 --- nipype/interfaces/mrtrix3/reconst.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index 4e2db8de3f..5dede1244a 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -102,10 +102,10 @@ class EstimateFODInputSpec(MRTrix3BaseInputSpec): mandatory=True, desc='output WM ODF') gm_txt = File(argstr='%s', position=-4, desc='GM response text file') - gm_odf = File('gm.mif', usedefault=True, argstr='%s', + gm_odf = File('gm.mif', usedefault=False, argstr='%s', position=-3, desc='output GM ODF') csf_txt = File(argstr='%s', position=-2, desc='CSF response text file') - csf_odf = File('csf.mif', usedefault=True, argstr='%s', + csf_odf = File('csf.mif', usedefault=False, argstr='%s', position=-1, desc='output CSF ODF') mask_file = File(exists=True, argstr='-mask %s', desc='mask image') From 4bca4a44614d11663659e6b81508932b693cb8b8 Mon Sep 17 00:00:00 2001 From: Matteo Mancini Date: Mon, 19 Aug 2019 15:22:11 -0400 Subject: [PATCH 0325/1665] Removing forced optional argument in MRtrix3 --- nipype/interfaces/mrtrix3/reconst.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index 5dede1244a..cdb0dd0a46 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -118,7 +118,6 @@ class EstimateFODInputSpec(MRTrix3BaseInputSpec): max_sh = InputMultiObject( traits.Int, value=[8], - usedefault=True, argstr='-lmax %s', sep=',', desc=('maximum harmonic degree of response function - single value for single-shell response, list for multi-shell response')) From 9c5dc6ec2efb80a6fe834aadb8cafa557029ee68 Mon Sep 17 00:00:00 2001 From: Matteo Mancini Date: Mon, 19 Aug 2019 16:15:46 -0400 Subject: [PATCH 0326/1665] Fix to make test pass --- nipype/interfaces/mrtrix3/reconst.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index cdb0dd0a46..6deb4c22b8 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -150,7 +150,7 @@ class EstimateFOD(MRTrix3Base): >>> fod.inputs.wm_txt = 'wm.txt' >>> fod.inputs.grad_fsl = ('bvecs', 'bvals') >>> fod.cmdline # doctest: +ELLIPSIS - 'dwi2fod -fslgrad bvecs bvals -lmax 8 csd dwi.mif wm.txt wm.mif gm.mif csf.mif' + 'dwi2fod -fslgrad bvecs bvals csd dwi.mif wm.txt wm.mif' >>> fod.run() # doctest: +SKIP """ From 862f9d6e98fd765ad27b815fa9fbafc3eb428632 Mon Sep 17 00:00:00 2001 From: Victor Ferat Date: Tue, 20 Aug 2019 11:10:33 +0200 Subject: [PATCH 0327/1665] Fix #2995 --- nipype/interfaces/mne/base.py | 110 +++++++++++++++++++--------------- 1 file changed, 62 insertions(+), 48 deletions(-) diff --git a/nipype/interfaces/mne/base.py b/nipype/interfaces/mne/base.py index 7f53071372..c0aec045f2 100644 --- a/nipype/interfaces/mne/base.py +++ b/nipype/interfaces/mne/base.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) +from __future__ import print_function, division, unicode_literals, absolute_import from builtins import str, bytes import os.path as op @@ -8,74 +7,86 @@ from ... import logging from ...utils.filemanip import simplify_list -from ..base import (traits, File, Directory, TraitedSpec, OutputMultiPath) +from ..base import traits, File, Directory, TraitedSpec, OutputMultiPath from ..freesurfer.base import FSCommand, FSTraitedSpec -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") class WatershedBEMInputSpec(FSTraitedSpec): subject_id = traits.Str( - argstr='--subject %s', + argstr="--subject %s", mandatory=True, - desc='Subject ID (must have a complete Freesurfer directory)') + desc="Subject ID (must have a complete Freesurfer directory)", + ) subjects_dir = Directory( exists=True, mandatory=True, usedefault=True, - desc='Path to Freesurfer subjects directory') + desc="Path to Freesurfer subjects directory", + ) volume = traits.Enum( - 'T1', - 'aparc+aseg', - 'aseg', - 'brain', - 'orig', - 'brainmask', - 'ribbon', - argstr='--volume %s', + "T1", + "aparc+aseg", + "aseg", + "brain", + "orig", + "brainmask", + "ribbon", + argstr="--volume %s", usedefault=True, - desc='The volume from the "mri" directory to use (defaults to T1)') + desc='The volume from the "mri" directory to use (defaults to T1)', + ) overwrite = traits.Bool( True, usedefault=True, - argstr='--overwrite', - desc='Overwrites the existing files') + argstr="--overwrite", + desc="Overwrites the existing files", + ) atlas_mode = traits.Bool( - argstr='--atlas', - desc='Use atlas mode for registration (default: no rigid alignment)') + argstr="--atlas", + desc="Use atlas mode for registration (default: no rigid alignment)", + ) class WatershedBEMOutputSpec(TraitedSpec): mesh_files = OutputMultiPath( File(exists=True), - desc=('Paths to the output meshes (brain, inner ' - 'skull, outer skull, outer skin)')) + desc=( + "Paths to the output meshes (brain, inner " + "skull, outer skull, outer skin)" + ), + ) brain_surface = File( - exists=True, - loc='bem/watershed', - desc='Brain surface (in Freesurfer format)') + exists=True, loc="bem/watershed", desc="Brain surface (in Freesurfer format)" + ) inner_skull_surface = File( exists=True, - loc='bem/watershed', - desc='Inner skull surface (in Freesurfer format)') + loc="bem/watershed", + desc="Inner skull surface (in Freesurfer format)", + ) outer_skull_surface = File( exists=True, - loc='bem/watershed', - desc='Outer skull surface (in Freesurfer format)') + loc="bem/watershed", + desc="Outer skull surface (in Freesurfer format)", + ) outer_skin_surface = File( exists=True, - loc='bem/watershed', - desc='Outer skin surface (in Freesurfer format)') + loc="bem/watershed", + desc="Outer skin surface (in Freesurfer format)", + ) fif_file = File( exists=True, - loc='bem', - altkey='fif', - desc='"fif" format file for EEG processing in MNE') + loc="bem", + altkey="fif", + desc='"fif" format file for EEG processing in MNE', + ) cor_files = OutputMultiPath( File(exists=True), - loc='bem/watershed/ws', - altkey='COR', - desc='"COR" format files') + loc="bem/watershed/ws", + altkey="COR", + desc='"COR" format files', + ) class WatershedBEM(FSCommand): @@ -94,18 +105,18 @@ class WatershedBEM(FSCommand): """ - _cmd = 'mne_watershed_bem' + _cmd = "mne watershed_bem" input_spec = WatershedBEMInputSpec output_spec = WatershedBEMOutputSpec - _additional_metadata = ['loc', 'altkey'] + _additional_metadata = ["loc", "altkey"] def _get_files(self, path, key, dirval, altkey=None): - globsuffix = '*' - globprefix = '*' + globsuffix = "*" + globprefix = "*" keydir = op.join(path, dirval) if altkey: key = altkey - globpattern = op.join(keydir, ''.join((globprefix, key, globsuffix))) + globpattern = op.join(keydir, "".join((globprefix, key, globsuffix))) return glob.glob(globpattern) def _list_outputs(self): @@ -115,10 +126,13 @@ def _list_outputs(self): output_traits = self._outputs() mesh_paths = [] for k in list(outputs.keys()): - if k != 'mesh_files': - val = self._get_files(subject_path, k, - output_traits.traits()[k].loc, - output_traits.traits()[k].altkey) + if k != "mesh_files": + val = self._get_files( + subject_path, + k, + output_traits.traits()[k].loc, + output_traits.traits()[k].altkey, + ) if val: value_list = simplify_list(val) if isinstance(value_list, list): @@ -130,7 +144,7 @@ def _list_outputs(self): else: raise TypeError outputs[k] = out_files - if not k.rfind('surface') == -1: + if not k.rfind("surface") == -1: mesh_paths.append(out_files) - outputs['mesh_files'] = mesh_paths + outputs["mesh_files"] = mesh_paths return outputs From 6f716542929a77bb61865e2d4c7a0aa67f40ce2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20F=C3=A9rat?= Date: Tue, 20 Aug 2019 11:43:10 +0200 Subject: [PATCH 0328/1665] Fix documentation --- nipype/interfaces/mne/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mne/base.py b/nipype/interfaces/mne/base.py index c0aec045f2..7a4f186496 100644 --- a/nipype/interfaces/mne/base.py +++ b/nipype/interfaces/mne/base.py @@ -100,7 +100,7 @@ class WatershedBEM(FSCommand): >>> bem.inputs.subject_id = 'subj1' >>> bem.inputs.subjects_dir = '.' >>> bem.cmdline - 'mne_watershed_bem --overwrite --subject subj1 --volume T1' + 'mne watershed_bem --overwrite --subject subj1 --volume T1' >>> bem.run() # doctest: +SKIP """ From 01fa94e82e12b98ad2890187398832485e3c9f29 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Tue, 20 Aug 2019 17:56:24 -0400 Subject: [PATCH 0329/1665] Add index_mask_file input to ImageStats Allows fslstats calls with the -K option --- .zenodo.json | 5 +++++ nipype/interfaces/fsl/utils.py | 6 ++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index ee1771da97..bf1cb6f692 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -695,6 +695,11 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" + }, + { + "affiliation": "Holland Bloorview Kids Rehabilitation Hospital", + "name": "Tilley II, Steven", + "orcid": "0000-0003-4853-5082" } ], "keywords": [ diff --git a/nipype/interfaces/fsl/utils.py b/nipype/interfaces/fsl/utils.py index f4ef73c0e9..6985c880a8 100644 --- a/nipype/interfaces/fsl/utils.py +++ b/nipype/interfaces/fsl/utils.py @@ -725,18 +725,20 @@ class ImageStatsInputSpec(FSLCommandInputSpec): exists=True, argstr="%s", mandatory=True, - position=2, + position=3, desc='input file to generate stats of') op_string = traits.Str( argstr="%s", mandatory=True, - position=3, + position=4, desc=("string defining the operation, options are " "applied in order, e.g. -M -l 10 -M will " "report the non-zero mean, apply a threshold " "and then report the new nonzero mean")) mask_file = File( exists=True, argstr="", desc='mask file used for option -k %s') + index_mask_file = File( + exists=True, argstr="-K %s", position=2) class ImageStatsOutputSpec(TraitedSpec): From 08cd3d3a7abcf87ceec572a04b378af152d09b26 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 20 Aug 2019 20:43:33 -0400 Subject: [PATCH 0330/1665] MAINT: Sort dependencies --- nipype/info.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/nipype/info.py b/nipype/info.py index e896a48a82..f678ebf8e6 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -129,22 +129,22 @@ def get_nipype_gitversion(): VERSION = __version__ PROVIDES = ['nipype'] REQUIRES = [ - 'nibabel>=%s' % NIBABEL_MIN_VERSION, + 'click>=%s' % CLICK_MIN_VERSION, + 'funcsigs', + 'future>=%s' % FUTURE_MIN_VERSION, 'networkx>=%s' % NETWORKX_MIN_VERSION, + 'neurdflib', + 'nibabel>=%s' % NIBABEL_MIN_VERSION, 'numpy>=%s ; python_version < "3.7"' % NUMPY_MIN_VERSION, 'numpy>=%s ; python_version >= "3.7"' % NUMPY_MIN_VERSION_37, + 'packaging', + 'prov>=%s' % PROV_VERSION, + 'pydot>=%s' % PYDOT_MIN_VERSION, + 'pydotplus', 'python-dateutil>=%s' % DATEUTIL_MIN_VERSION, 'scipy>=%s' % SCIPY_MIN_VERSION, - 'traits>=%s,!=5.0' % TRAITS_MIN_VERSION, - 'future>=%s' % FUTURE_MIN_VERSION, 'simplejson>=%s' % SIMPLEJSON_MIN_VERSION, - 'prov>=%s' % PROV_VERSION, - 'neurdflib', - 'click>=%s' % CLICK_MIN_VERSION, - 'funcsigs', - 'pydotplus', - 'pydot>=%s' % PYDOT_MIN_VERSION, - 'packaging', + 'traits>=%s,!=5.0' % TRAITS_MIN_VERSION, ] TESTS_REQUIRES = [ From facabc4531a4964fd124f1412967773e8c5f54c8 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 20 Aug 2019 21:23:56 -0400 Subject: [PATCH 0331/1665] MAINT: Add python_requires to package metadata --- nipype/info.py | 1 + setup.py | 1 + 2 files changed, 2 insertions(+) diff --git a/nipype/info.py b/nipype/info.py index f03e625dda..827bc93a12 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -61,6 +61,7 @@ def get_nipype_gitversion(): 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering' ] +PYTHON_REQUIRES = ">= 2.7, != 3.0.*, != 3.1.*, != 3.2.*, != 3.3.*" description = 'Neuroimaging in Python: Pipelines and Interfaces' diff --git a/setup.py b/setup.py index 37677afd35..72ba59d904 100755 --- a/setup.py +++ b/setup.py @@ -139,6 +139,7 @@ def main(): author_email=ldict['AUTHOR_EMAIL'], platforms=ldict['PLATFORMS'], version=ldict['VERSION'], + python_requires=ldict['PYTHON_REQUIRES'], install_requires=ldict['REQUIRES'], setup_requires=SETUP_REQUIRES, provides=ldict['PROVIDES'], From 7baa6d3c13ac6661f4b213aa3957712dae1acfa2 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 20 Aug 2019 21:45:16 -0400 Subject: [PATCH 0332/1665] PY3: Remove PY2 str hack --- nipype/algorithms/modelgen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index 4e37943d2b..ba7296bb07 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -169,7 +169,7 @@ def bids_gen_info(bids_event_files, info = [] for bids_event_file in bids_event_files: with open(bids_event_file) as f: - f_events = csv.DictReader(f, skipinitialspace=True, delimiter=str_basetype('\t')) + f_events = csv.DictReader(f, skipinitialspace=True, delimiter='\t') events = [{k: v for k, v in row.items()} for row in f_events] if not condition_column: condition_column = '_trial_type' From 3261b8dba0037a898b0325a910be309d13f9e6d1 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 20 Aug 2019 21:27:55 -0400 Subject: [PATCH 0333/1665] CI: Check python_requires blocks installation --- .circleci/config.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 828fcf44f6..6f612dae6a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -368,6 +368,14 @@ jobs: pip install dist/nipype-*-py2.py3-none-any.whl # Numpy should be upgraded to >= 1.15.3 test "$(pip show numpy | grep Version)" \> "Version: 1.15.2" + - run: + name: Check python_requires prevents installation on Python 3.3 + command: | + pyenv install 3.3.7 + pyenv local 3.3.7 + FAIL=false + pip install dist/nipype-*-py2.py3-none-any.whl || FAIL=true + $FAIL - store_artifacts: path: /home/circleci/nipype/dist From 70d6cbd6a1b4c2b91693844a299edc77d15092a4 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 20 Aug 2019 21:36:30 -0400 Subject: [PATCH 0334/1665] CI: Switch from setup.py check to twine check --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6f612dae6a..a5d59eb75b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -340,8 +340,8 @@ jobs: name: Check pypi preconditions command: | pip install --upgrade pip twine future wheel readme_renderer setuptools - python setup.py check -r -s python setup.py sdist bdist_wheel + twine check dist/* - run: name: Validate Python 2 installation command: | From ac54739effc8fdd7d89a57b5aac91b3f7cefd760 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Wed, 21 Aug 2019 10:51:48 -0400 Subject: [PATCH 0335/1665] add new autogenerated test file --- nipype/interfaces/fsl/tests/test_auto_ImageStats.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py index 0230245f2e..f2dc0af947 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py @@ -14,6 +14,11 @@ def test_ImageStats_inputs(): argstr='%s', extensions=None, mandatory=True, + position=3, + ), + index_mask_file=dict( + argstr='-K %s', + extensions=None, position=2, ), mask_file=dict( @@ -23,7 +28,7 @@ def test_ImageStats_inputs(): op_string=dict( argstr='%s', mandatory=True, - position=3, + position=4, ), output_type=dict(), split_4d=dict( From 22efaf766bef74221fabb1f6f0dba4dde954c704 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Thu, 22 Aug 2019 17:19:46 -0400 Subject: [PATCH 0336/1665] add index_mask_file desc Change by @effigies Co-Authored-By: Chris Markiewicz --- nipype/interfaces/fsl/utils.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/utils.py b/nipype/interfaces/fsl/utils.py index 6985c880a8..a572259e2e 100644 --- a/nipype/interfaces/fsl/utils.py +++ b/nipype/interfaces/fsl/utils.py @@ -738,7 +738,12 @@ class ImageStatsInputSpec(FSLCommandInputSpec): mask_file = File( exists=True, argstr="", desc='mask file used for option -k %s') index_mask_file = File( - exists=True, argstr="-K %s", position=2) + exists=True, + argstr="-K %s", + position=2, + desc="generate seperate n submasks from indexMask, " + "for indexvalues 1..n where n is the maximum index " + "value in indexMask, and generate statistics for each submask") class ImageStatsOutputSpec(TraitedSpec): From 8129ff4e004fdfcd6ce9b0730ee62d58586ff647 Mon Sep 17 00:00:00 2001 From: Victor Date: Fri, 23 Aug 2019 15:11:54 +0200 Subject: [PATCH 0337/1665] run make check-before-commit without error --- .../algorithms/tests/test_auto_AddCSVRow.py | 5 +- nipype/algorithms/tests/test_auto_Gunzip.py | 9 ++- .../tests/test_auto_NonSteadyStateDetector.py | 9 ++- .../afni/tests/test_auto_Allineate.py | 1 + nipype/interfaces/afni/tests/test_auto_Cat.py | 7 +-- .../afni/tests/test_auto_ClipLevel.py | 1 + .../afni/tests/test_auto_LocalBistat.py | 7 ++- .../afni/tests/test_auto_Localstat.py | 6 +- .../afni/tests/test_auto_NwarpApply.py | 5 +- .../afni/tests/test_auto_OneDToolPy.py | 1 + .../interfaces/afni/tests/test_auto_Qwarp.py | 5 +- .../afni/tests/test_auto_QwarpPlusMinus.py | 5 +- .../afni/tests/test_auto_ROIStats.py | 5 +- .../interfaces/afni/tests/test_auto_ReHo.py | 6 +- .../afni/tests/test_auto_Remlfit.py | 1 + .../afni/tests/test_auto_Resample.py | 5 +- .../afni/tests/test_auto_TCorrMap.py | 5 +- .../afni/tests/test_auto_Zeropad.py | 1 + .../interfaces/ants/tests/test_auto_ANTS.py | 4 +- .../ants/tests/test_auto_AntsJointFusion.py | 9 ++- .../ants/tests/test_auto_Atropos.py | 4 +- .../base/tests/test_auto_StdOutCommandLine.py | 1 + .../camino/tests/test_auto_AnalyzeHeader.py | 1 + .../tests/test_auto_ComputeEigensystem.py | 1 + .../test_auto_ComputeFractionalAnisotropy.py | 1 + .../tests/test_auto_ComputeTensorTrace.py | 1 + .../camino/tests/test_auto_DTIFit.py | 1 + .../camino/tests/test_auto_DTLUTGen.py | 1 + .../camino/tests/test_auto_FSL2Scheme.py | 1 + .../camino/tests/test_auto_Image2Voxel.py | 1 + .../camino/tests/test_auto_LinRecon.py | 1 + .../interfaces/camino/tests/test_auto_MESD.py | 1 + .../camino/tests/test_auto_ModelFit.py | 1 + .../camino/tests/test_auto_NIfTIDT2Camino.py | 1 + .../camino/tests/test_auto_PicoPDFs.py | 1 + .../camino/tests/test_auto_ProcStreamlines.py | 1 + .../camino/tests/test_auto_QBallMX.py | 1 + .../camino/tests/test_auto_SFLUTGen.py | 1 + .../camino/tests/test_auto_SFPICOCalibData.py | 1 + .../camino/tests/test_auto_SFPeaks.py | 1 + .../camino/tests/test_auto_Shredder.py | 1 + .../camino/tests/test_auto_TractShredder.py | 1 + .../camino/tests/test_auto_VtkStreamlines.py | 1 + .../freesurfer/tests/test_auto_Aparc2Aseg.py | 9 ++- .../freesurfer/tests/test_auto_Apas2Aseg.py | 9 ++- .../freesurfer/tests/test_auto_CALabel.py | 10 +++- .../freesurfer/tests/test_auto_CANormalize.py | 2 +- .../freesurfer/tests/test_auto_CARegister.py | 2 +- .../test_auto_CheckTalairachAlignment.py | 2 +- .../tests/test_auto_ConcatenateLTA.py | 2 + .../freesurfer/tests/test_auto_Contrast.py | 10 +++- .../freesurfer/tests/test_auto_FixTopology.py | 5 +- .../freesurfer/tests/test_auto_MRIsCALabel.py | 10 +++- .../freesurfer/tests/test_auto_MS_LDA.py | 12 +++- .../freesurfer/tests/test_auto_Normalize.py | 2 +- .../tests/test_auto_ParcellationStats.py | 9 ++- .../tests/test_auto_RegisterAVItoTalairach.py | 2 +- .../tests/test_auto_RelabelHypointensities.py | 9 ++- .../tests/test_auto_SegStatsReconAll.py | 5 +- .../tests/test_auto_SurfaceSnapshots.py | 1 + .../tests/test_auto_TalairachAVI.py | 6 +- .../freesurfer/tests/test_auto_TalairachQC.py | 9 ++- .../fsl/tests/test_auto_AccuracyTester.py | 9 ++- .../interfaces/fsl/tests/test_auto_Cleaner.py | 3 + .../interfaces/fsl/tests/test_auto_Cluster.py | 5 +- .../fsl/tests/test_auto_FeatureExtractor.py | 11 ++-- .../fsl/tests/test_auto_FindTheBiggest.py | 9 ++- .../fsl/tests/test_auto_TrainingSetCreator.py | 22 ++++---- .../minc/tests/test_auto_Average.py | 6 +- .../interfaces/minc/tests/test_auto_BBox.py | 1 + .../interfaces/minc/tests/test_auto_Beast.py | 7 ++- .../interfaces/minc/tests/test_auto_Calc.py | 2 + .../interfaces/minc/tests/test_auto_Dump.py | 1 + .../minc/tests/test_auto_Extract.py | 1 + .../interfaces/minc/tests/test_auto_Math.py | 1 + .../interfaces/minc/tests/test_auto_Norm.py | 6 +- .../minc/tests/test_auto_Resample.py | 10 +++- .../minc/tests/test_auto_Reshape.py | 1 + .../interfaces/minc/tests/test_auto_ToRaw.py | 1 + .../minc/tests/test_auto_VolSymm.py | 2 + .../minc/tests/test_auto_XfmInvert.py | 1 + .../mrtrix3/tests/test_auto_BrainMask.py | 6 +- .../mrtrix3/tests/test_auto_DWIExtract.py | 6 +- .../mrtrix3/tests/test_auto_EstimateFOD.py | 6 +- .../mrtrix3/tests/test_auto_FitTensor.py | 6 +- .../mrtrix3/tests/test_auto_Generate5tt.py | 6 +- .../mrtrix3/tests/test_auto_MRConvert.py | 6 +- .../mrtrix3/tests/test_auto_MRMath.py | 6 +- .../mrtrix3/tests/test_auto_ResponseSD.py | 6 +- .../mrtrix3/tests/test_auto_Tractography.py | 6 +- .../niftyfit/tests/test_auto_DwiTool.py | 26 ++++++--- .../niftyfit/tests/test_auto_FitAsl.py | 41 +++++++++++--- .../niftyfit/tests/test_auto_FitDwi.py | 56 ++++++++++++++----- .../niftyfit/tests/test_auto_FitQt1.py | 25 +++++++-- .../nipy/tests/test_auto_EstimateContrast.py | 2 +- .../interfaces/nipy/tests/test_auto_FitGLM.py | 4 +- .../tests/test_auto_UnbiasedNonLocalMeans.py | 9 ++- .../diffusion/tests/test_auto_DTIexport.py | 9 ++- .../diffusion/tests/test_auto_DTIimport.py | 9 ++- .../test_auto_DWIJointRicianLMMSEFilter.py | 9 ++- .../tests/test_auto_DWIRicianLMMSEFilter.py | 9 ++- ..._auto_DiffusionTensorScalarMeasurements.py | 9 ++- .../tests/test_auto_ResampleDTIVolume.py | 9 ++- .../tests/test_auto_AddScalarVolumes.py | 9 ++- .../tests/test_auto_CastScalarVolume.py | 9 ++- .../tests/test_auto_CheckerBoardFilter.py | 9 ++- ...test_auto_CurvatureAnisotropicDiffusion.py | 9 ++- .../tests/test_auto_ExtractSkeleton.py | 9 ++- .../test_auto_GaussianBlurImageFilter.py | 9 ++- .../test_auto_GradientAnisotropicDiffusion.py | 9 ++- .../test_auto_GrayscaleFillHoleImageFilter.py | 9 ++- ...test_auto_GrayscaleGrindPeakImageFilter.py | 9 ++- .../tests/test_auto_HistogramMatching.py | 9 ++- .../tests/test_auto_ImageLabelCombine.py | 9 ++- .../tests/test_auto_MaskScalarVolume.py | 9 ++- .../tests/test_auto_MedianImageFilter.py | 9 ++- .../tests/test_auto_MultiplyScalarVolumes.py | 9 ++- ...test_auto_ResampleScalarVectorDWIVolume.py | 9 ++- .../tests/test_auto_SubtractScalarVolumes.py | 9 ++- .../tests/test_auto_ThresholdScalarVolume.py | 9 ++- ...auto_VotingBinaryHoleFillingImageFilter.py | 9 ++- ...est_auto_DWIUnbiasedNonLocalMeansFilter.py | 9 ++- .../test_auto_OtsuThresholdImageFilter.py | 9 ++- .../test_auto_OtsuThresholdSegmentation.py | 9 ++- .../tests/test_auto_ResampleScalarVolume.py | 9 ++- .../test_auto_RobustStatisticsSegmenter.py | 9 ++- ...st_auto_SimpleRegionGrowingSegmentation.py | 9 ++- .../tests/test_auto_GrayscaleModelMaker.py | 9 ++- .../tests/test_auto_LabelMapSmoothing.py | 9 ++- .../slicer/tests/test_auto_MergeModels.py | 9 ++- .../slicer/tests/test_auto_ModelToLabelMap.py | 9 ++- .../tests/test_auto_OrientScalarVolume.py | 9 ++- .../tests/test_auto_ProbeVolumeWithModel.py | 9 ++- .../utility/tests/test_auto_Rename.py | 2 +- 134 files changed, 541 insertions(+), 340 deletions(-) diff --git a/nipype/algorithms/tests/test_auto_AddCSVRow.py b/nipype/algorithms/tests/test_auto_AddCSVRow.py index 3090c8b6a9..a8c4467cbf 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVRow.py +++ b/nipype/algorithms/tests/test_auto_AddCSVRow.py @@ -6,7 +6,10 @@ def test_AddCSVRow_inputs(): input_map = dict( _outputs=dict(usedefault=True, ), - in_file=dict(mandatory=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = AddCSVRow.input_spec() diff --git a/nipype/algorithms/tests/test_auto_Gunzip.py b/nipype/algorithms/tests/test_auto_Gunzip.py index e583deaa10..d84e84ad61 100644 --- a/nipype/algorithms/tests/test_auto_Gunzip.py +++ b/nipype/algorithms/tests/test_auto_Gunzip.py @@ -4,11 +4,10 @@ def test_Gunzip_inputs(): - input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), ) + input_map = dict(in_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = Gunzip.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py index 4da9dad47d..10eb41190b 100644 --- a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py +++ b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py @@ -4,11 +4,10 @@ def test_NonSteadyStateDetector_inputs(): - input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), ) + input_map = dict(in_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = NonSteadyStateDetector.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Allineate.py b/nipype/interfaces/afni/tests/test_auto_Allineate.py index 0b110e669a..e1565376bd 100644 --- a/nipype/interfaces/afni/tests/test_auto_Allineate.py +++ b/nipype/interfaces/afni/tests/test_auto_Allineate.py @@ -86,6 +86,7 @@ def test_Allineate_inputs(): ), out_weight_file=dict( argstr='-wtprefix %s', + extensions=None, xor=['allcostx'], ), outputtype=dict(), diff --git a/nipype/interfaces/afni/tests/test_auto_Cat.py b/nipype/interfaces/afni/tests/test_auto_Cat.py index 345806f746..50806add01 100644 --- a/nipype/interfaces/afni/tests/test_auto_Cat.py +++ b/nipype/interfaces/afni/tests/test_auto_Cat.py @@ -21,10 +21,9 @@ def test_Cat_inputs(): usedefault=True, ), omitconst=dict(argstr='-nonconst', ), - out_cint=dict( - xor=[ - 'out_format', 'out_nice', 'out_double', 'out_fint', 'out_int' - ], ), + out_cint=dict(xor=[ + 'out_format', 'out_nice', 'out_double', 'out_fint', 'out_int' + ], ), out_double=dict( argstr='-d', xor=['out_format', 'out_nice', 'out_int', 'out_fint', 'out_cint'], diff --git a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py index f9b3dbf705..96f928a809 100644 --- a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py +++ b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py @@ -17,6 +17,7 @@ def test_ClipLevel_inputs(): ), grad=dict( argstr='-grad %s', + extensions=None, position=3, xor='doall', ), diff --git a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py index ed3e61d74d..4632e4cf7d 100644 --- a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py +++ b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py @@ -26,7 +26,10 @@ def test_LocalBistat_inputs(): mandatory=True, position=-1, ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), neighborhood=dict( argstr="-nbhd '%s(%s)'", mandatory=True, @@ -37,6 +40,7 @@ def test_LocalBistat_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, keep_extension=True, name_source='in_file1', name_template='%s_bistat', @@ -49,6 +53,7 @@ def test_LocalBistat_inputs(): ), weight_file=dict( argstr='-weight %s', + extensions=None, xor=['automask'], ), ) diff --git a/nipype/interfaces/afni/tests/test_auto_Localstat.py b/nipype/interfaces/afni/tests/test_auto_Localstat.py index 011ce44da8..62dc800941 100644 --- a/nipype/interfaces/afni/tests/test_auto_Localstat.py +++ b/nipype/interfaces/afni/tests/test_auto_Localstat.py @@ -21,7 +21,10 @@ def test_Localstat_inputs(): mandatory=True, position=-1, ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), neighborhood=dict( argstr="-nbhd '%s(%s)'", mandatory=True, @@ -33,6 +36,7 @@ def test_Localstat_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_localstat', diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py index e00457f4f3..87388c65ec 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py @@ -20,7 +20,10 @@ def test_NwarpApply_inputs(): usedefault=True, ), inv_warp=dict(argstr='-iwarp', ), - master=dict(argstr='-master %s', ), + master=dict( + argstr='-master %s', + extensions=None, + ), out_file=dict( argstr='-prefix %s', extensions=None, diff --git a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py index f8e664a727..114fe53fba 100644 --- a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py +++ b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py @@ -30,6 +30,7 @@ def test_OneDToolPy_inputs(): show_censor_count=dict(argstr='-show_censor_count', ), show_cormat_warnings=dict( argstr='-show_cormat_warnings |& tee %s', + extensions=None, position=-1, xor=['out_file'], ), diff --git a/nipype/interfaces/afni/tests/test_auto_Qwarp.py b/nipype/interfaces/afni/tests/test_auto_Qwarp.py index 3ef8c2e9b2..0b8a9e38ec 100644 --- a/nipype/interfaces/afni/tests/test_auto_Qwarp.py +++ b/nipype/interfaces/afni/tests/test_auto_Qwarp.py @@ -125,7 +125,10 @@ def test_Qwarp_inputs(): name_source=['in_file'], name_template='ppp_%s', ), - out_weight_file=dict(argstr='-wtprefix %s', ), + out_weight_file=dict( + argstr='-wtprefix %s', + extensions=None, + ), outputtype=dict(), overwrite=dict(argstr='-overwrite', ), pblur=dict(argstr='-pblur %s', ), diff --git a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py index ca27a0d682..e282d0d0a5 100644 --- a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py +++ b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py @@ -125,7 +125,10 @@ def test_QwarpPlusMinus_inputs(): position=0, usedefault=True, ), - out_weight_file=dict(argstr='-wtprefix %s', ), + out_weight_file=dict( + argstr='-wtprefix %s', + extensions=None, + ), outputtype=dict(), overwrite=dict(argstr='-overwrite', ), pblur=dict(argstr='-pblur %s', ), diff --git a/nipype/interfaces/afni/tests/test_auto_ROIStats.py b/nipype/interfaces/afni/tests/test_auto_ROIStats.py index d3c956f7c5..c7fc517ccc 100644 --- a/nipype/interfaces/afni/tests/test_auto_ROIStats.py +++ b/nipype/interfaces/afni/tests/test_auto_ROIStats.py @@ -49,7 +49,10 @@ def test_ROIStats_inputs(): position=-1, ), quiet=dict(argstr='-quiet', ), - roisel=dict(argstr='-roisel %s', ), + roisel=dict( + argstr='-roisel %s', + extensions=None, + ), stat=dict(argstr='%s...', ), zerofill=dict( argstr='-zerofill %s', diff --git a/nipype/interfaces/afni/tests/test_auto_ReHo.py b/nipype/interfaces/afni/tests/test_auto_ReHo.py index 0edcedcdaf..cf3e468159 100644 --- a/nipype/interfaces/afni/tests/test_auto_ReHo.py +++ b/nipype/interfaces/afni/tests/test_auto_ReHo.py @@ -25,13 +25,17 @@ def test_ReHo_inputs(): argstr='-in_rois %s', extensions=None, ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), neighborhood=dict( argstr='-nneigh %s', xor=['sphere', 'ellipsoid'], ), out_file=dict( argstr='-prefix %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_reho', diff --git a/nipype/interfaces/afni/tests/test_auto_Remlfit.py b/nipype/interfaces/afni/tests/test_auto_Remlfit.py index f2e6703bf6..05ee75210e 100644 --- a/nipype/interfaces/afni/tests/test_auto_Remlfit.py +++ b/nipype/interfaces/afni/tests/test_auto_Remlfit.py @@ -58,6 +58,7 @@ def test_Remlfit_inputs(): ), matim=dict( argstr='-matim %s', + extensions=None, xor=['matrix'], ), matrix=dict( diff --git a/nipype/interfaces/afni/tests/test_auto_Resample.py b/nipype/interfaces/afni/tests/test_auto_Resample.py index 560d883d75..f63e1347f5 100644 --- a/nipype/interfaces/afni/tests/test_auto_Resample.py +++ b/nipype/interfaces/afni/tests/test_auto_Resample.py @@ -17,7 +17,10 @@ def test_Resample_inputs(): mandatory=True, position=-1, ), - master=dict(argstr='-master %s', ), + master=dict( + argstr='-master %s', + extensions=None, + ), num_threads=dict( nohash=True, usedefault=True, diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py index 1ea4c0790e..209473fe6f 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py @@ -93,7 +93,10 @@ def test_TCorrMap_inputs(): name_source='in_file', suffix='_qmean', ), - regress_out_timeseries=dict(argstr='-ort %s', ), + regress_out_timeseries=dict( + argstr='-ort %s', + extensions=None, + ), seeds=dict( argstr='-seed %s', extensions=None, diff --git a/nipype/interfaces/afni/tests/test_auto_Zeropad.py b/nipype/interfaces/afni/tests/test_auto_Zeropad.py index abeceda432..1bd80cfad8 100644 --- a/nipype/interfaces/afni/tests/test_auto_Zeropad.py +++ b/nipype/interfaces/afni/tests/test_auto_Zeropad.py @@ -55,6 +55,7 @@ def test_Zeropad_inputs(): ), master=dict( argstr='-master %s', + extensions=None, xor=['I', 'S', 'A', 'P', 'L', 'R', 'z', 'RL', 'AP', 'IS', 'mm'], ), mm=dict( diff --git a/nipype/interfaces/ants/tests/test_auto_ANTS.py b/nipype/interfaces/ants/tests/test_auto_ANTS.py index 5c6fa2c501..1ee5cabe21 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTS.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTS.py @@ -57,8 +57,8 @@ def test_ANTS_inputs(): regularization=dict(argstr='%s', ), regularization_deformation_field_sigma=dict( requires=['regularization'], ), - regularization_gradient_field_sigma=dict( - requires=['regularization'], ), + regularization_gradient_field_sigma=dict(requires=['regularization' + ], ), smoothing_sigmas=dict( argstr='--gaussian-smoothing-sigmas %s', sep='x', diff --git a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py index 02ebe6431c..a2829be3fa 100644 --- a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py @@ -44,11 +44,10 @@ def test_AntsJointFusion_inputs(): nohash=True, usedefault=True, ), - out_atlas_voting_weight_name_format=dict( - requires=[ - 'out_label_fusion', 'out_intensity_fusion_name_format', - 'out_label_post_prob_name_format' - ], ), + out_atlas_voting_weight_name_format=dict(requires=[ + 'out_label_fusion', 'out_intensity_fusion_name_format', + 'out_label_post_prob_name_format' + ], ), out_intensity_fusion_name_format=dict(argstr='', ), out_label_fusion=dict( argstr='%s', diff --git a/nipype/interfaces/ants/tests/test_auto_Atropos.py b/nipype/interfaces/ants/tests/test_auto_Atropos.py index be1a271f9a..73d7f6813a 100644 --- a/nipype/interfaces/ants/tests/test_auto_Atropos.py +++ b/nipype/interfaces/ants/tests/test_auto_Atropos.py @@ -53,8 +53,8 @@ def test_Atropos_inputs(): prior_probability_threshold=dict(requires=['prior_weighting'], ), prior_weighting=dict(), save_posteriors=dict(), - use_mixture_model_proportions=dict( - requires=['posterior_formulation'], ), + use_mixture_model_proportions=dict(requires=['posterior_formulation' + ], ), use_random_seed=dict( argstr='--use-random-seed %d', usedefault=True, diff --git a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py index e39dc3acaa..95afcd3216 100644 --- a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py @@ -12,6 +12,7 @@ def test_StdOutCommandLine_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py index 28170946ac..c40082d836 100644 --- a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py +++ b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py @@ -50,6 +50,7 @@ def test_AnalyzeHeader_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py index 66ac282175..edf38864fa 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py @@ -24,6 +24,7 @@ def test_ComputeEigensystem_inputs(): maxcomponents=dict(argstr='-maxcomponents %d', ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py index 9cfae77b2f..75604df01b 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py @@ -20,6 +20,7 @@ def test_ComputeFractionalAnisotropy_inputs(): inputmodel=dict(argstr='-inputmodel %s', ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py index 1443a253bd..4d31fa884c 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py @@ -20,6 +20,7 @@ def test_ComputeTensorTrace_inputs(): inputmodel=dict(argstr='-inputmodel %s', ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_DTIFit.py b/nipype/interfaces/camino/tests/test_auto_DTIFit.py index 26d27d57d2..0870a77752 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/camino/tests/test_auto_DTIFit.py @@ -26,6 +26,7 @@ def test_DTIFit_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py index f7caf77ef1..3242163c3a 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py @@ -28,6 +28,7 @@ def test_DTLUTGen_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py index 350c604c99..9b8a74be6a 100644 --- a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py +++ b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py @@ -40,6 +40,7 @@ def test_FSL2Scheme_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py index 29864bef82..2c013cf216 100644 --- a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py +++ b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py @@ -18,6 +18,7 @@ def test_Image2Voxel_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_LinRecon.py b/nipype/interfaces/camino/tests/test_auto_LinRecon.py index 996d8f4b99..147cfaab5e 100644 --- a/nipype/interfaces/camino/tests/test_auto_LinRecon.py +++ b/nipype/interfaces/camino/tests/test_auto_LinRecon.py @@ -24,6 +24,7 @@ def test_LinRecon_inputs(): normalize=dict(argstr='-normalize', ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_MESD.py b/nipype/interfaces/camino/tests/test_auto_MESD.py index 57dbbf3b28..f7dfecfdb9 100644 --- a/nipype/interfaces/camino/tests/test_auto_MESD.py +++ b/nipype/interfaces/camino/tests/test_auto_MESD.py @@ -42,6 +42,7 @@ def test_MESD_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_ModelFit.py b/nipype/interfaces/camino/tests/test_auto_ModelFit.py index e5c16ec975..6d49969eb6 100644 --- a/nipype/interfaces/camino/tests/test_auto_ModelFit.py +++ b/nipype/interfaces/camino/tests/test_auto_ModelFit.py @@ -34,6 +34,7 @@ def test_ModelFit_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py index 0a5583b03e..65e0210268 100644 --- a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py +++ b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py @@ -26,6 +26,7 @@ def test_NIfTIDT2Camino_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py index c918a372f1..e5cdea5cae 100644 --- a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py +++ b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py @@ -36,6 +36,7 @@ def test_PicoPDFs_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py index c30ef08d8c..0386e0d54f 100644 --- a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py @@ -62,6 +62,7 @@ def test_ProcStreamlines_inputs(): noresample=dict(argstr='-noresample', ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_QBallMX.py b/nipype/interfaces/camino/tests/test_auto_QBallMX.py index 7f7d0bc99f..f452f26350 100644 --- a/nipype/interfaces/camino/tests/test_auto_QBallMX.py +++ b/nipype/interfaces/camino/tests/test_auto_QBallMX.py @@ -20,6 +20,7 @@ def test_QBallMX_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py index 795138ea89..220e116255 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py @@ -35,6 +35,7 @@ def test_SFLUTGen_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py index 80222fce4c..1b71553676 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py @@ -27,6 +27,7 @@ def test_SFPICOCalibData_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py index 8db250c58c..49ac58aa06 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py @@ -38,6 +38,7 @@ def test_SFPeaks_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_Shredder.py b/nipype/interfaces/camino/tests/test_auto_Shredder.py index 594b35fc60..2d8ec43589 100644 --- a/nipype/interfaces/camino/tests/test_auto_Shredder.py +++ b/nipype/interfaces/camino/tests/test_auto_Shredder.py @@ -28,6 +28,7 @@ def test_Shredder_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_TractShredder.py b/nipype/interfaces/camino/tests/test_auto_TractShredder.py index eeed244533..e4df010c60 100644 --- a/nipype/interfaces/camino/tests/test_auto_TractShredder.py +++ b/nipype/interfaces/camino/tests/test_auto_TractShredder.py @@ -28,6 +28,7 @@ def test_TractShredder_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py index 72b6f106d8..bd8d295572 100644 --- a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py @@ -25,6 +25,7 @@ def test_VtkStreamlines_inputs(): interpolatescalars=dict(argstr='-interpolatescalars', ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py index 8a83130a4d..494ce981c5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py @@ -79,11 +79,10 @@ def test_Aparc2Aseg_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Aparc2Aseg_outputs(): - output_map = dict( - out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict(out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = Aparc2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py index fea3617596..03385da2ad 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py @@ -28,11 +28,10 @@ def test_Apas2Aseg_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Apas2Aseg_outputs(): - output_map = dict( - out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict(out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = Apas2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py index f3bfd5ad62..4d56d217c7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py @@ -7,7 +7,10 @@ def test_CALabel_inputs(): input_map = dict( align=dict(argstr='-align', ), args=dict(argstr='%s', ), - aseg=dict(argstr='-aseg %s', ), + aseg=dict( + argstr='-aseg %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -26,7 +29,10 @@ def test_CALabel_inputs(): argstr='-r %s', extensions=None, ), - label=dict(argstr='-l %s', ), + label=dict( + argstr='-l %s', + extensions=None, + ), no_big_ventricles=dict(argstr='-nobigventricles', ), num_threads=dict(), out_file=dict( diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py index 1f6546ae3a..5700103e84 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py @@ -59,7 +59,7 @@ def test_CANormalize_inputs(): def test_CANormalize_outputs(): output_map = dict( control_points=dict(extensions=None, ), - out_file=dict(), + out_file=dict(extensions=None, ), ) outputs = CANormalize.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py index 83f669b218..dcf5aa84a8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py @@ -53,7 +53,7 @@ def test_CARegister_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CARegister_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = CARegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py index 68a5a98e66..fe9c1a3121 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py @@ -35,7 +35,7 @@ def test_CheckTalairachAlignment_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CheckTalairachAlignment_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = CheckTalairachAlignment.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py index 8acab945c1..ee1d3ae7f3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py @@ -38,11 +38,13 @@ def test_ConcatenateLTA_inputs(): subjects_dir=dict(), tal_source_file=dict( argstr='-tal %s', + extensions=None, position=-5, requires=['tal_template_file'], ), tal_template_file=dict( argstr='%s', + extensions=None, position=-4, requires=['tal_source_file'], ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py index 3c4e5aa484..3cd62e8ee7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py @@ -5,10 +5,16 @@ def test_Contrast_inputs(): input_map = dict( - annotation=dict(mandatory=True, ), + annotation=dict( + extensions=None, + mandatory=True, + ), args=dict(argstr='%s', ), copy_inputs=dict(), - cortex=dict(mandatory=True, ), + cortex=dict( + extensions=None, + mandatory=True, + ), environ=dict( nohash=True, usedefault=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py index 4ba8442b14..83427dcd20 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py @@ -35,7 +35,10 @@ def test_FixTopology_inputs(): ), mgz=dict(argstr='-mgz', ), seed=dict(argstr='-seed %d', ), - sphere=dict(argstr='-sphere %s', ), + sphere=dict( + argstr='-sphere %s', + extensions=None, + ), subject_id=dict( argstr='%s', mandatory=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py index 10c7af6832..eaf91bc1e8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py @@ -6,7 +6,10 @@ def test_MRIsCALabel_inputs(): input_map = dict( args=dict(argstr='%s', ), - aseg=dict(argstr='-aseg %s', ), + aseg=dict( + argstr='-aseg %s', + extensions=None, + ), canonsurf=dict( argstr='%s', extensions=None, @@ -33,7 +36,10 @@ def test_MRIsCALabel_inputs(): mandatory=True, position=-4, ), - label=dict(argstr='-l %s', ), + label=dict( + argstr='-l %s', + extensions=None, + ), num_threads=dict(), out_file=dict( argstr='%s', diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py index badb3b4f0c..3d3f5cde11 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py @@ -17,22 +17,30 @@ def test_MS_LDA_inputs(): mandatory=True, position=-1, ), - label_file=dict(argstr='-label %s', ), + label_file=dict( + argstr='-label %s', + extensions=None, + ), lda_labels=dict( argstr='-lda %s', mandatory=True, sep=' ', ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), shift=dict(argstr='-shift %d', ), subjects_dir=dict(), use_weights=dict(argstr='-W', ), vol_synth_file=dict( argstr='-synth %s', + extensions=None, mandatory=True, ), weight_file=dict( argstr='-weight %s', + extensions=None, mandatory=True, ), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py index 2384adbb2b..baf03f026c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py @@ -43,7 +43,7 @@ def test_Normalize_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Normalize_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Normalize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py index be19b2bd37..8b4ae45d67 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py @@ -27,11 +27,16 @@ def test_ParcellationStats_inputs(): ), in_annotation=dict( argstr='-a %s', + extensions=None, xor=['in_label'], ), - in_cortex=dict(argstr='-cortex %s', ), + in_cortex=dict( + argstr='-cortex %s', + extensions=None, + ), in_label=dict( argstr='-l %s', + extensions=None, xor=['in_annotatoin', 'out_color'], ), lh_pial=dict( @@ -45,11 +50,13 @@ def test_ParcellationStats_inputs(): mgz=dict(argstr='-mgz', ), out_color=dict( argstr='-c %s', + extensions=None, genfile=True, xor=['in_label'], ), out_table=dict( argstr='-f %s', + extensions=None, genfile=True, requires=['tabular_output'], ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py index f3406d41fc..5a609f586e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py @@ -47,7 +47,7 @@ def test_RegisterAVItoTalairach_outputs(): extensions=None, usedefault=True, ), - out_file=dict(), + out_file=dict(extensions=None, ), ) outputs = RegisterAVItoTalairach.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py index 28ff49d99e..f99acec899 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py @@ -48,11 +48,10 @@ def test_RelabelHypointensities_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RelabelHypointensities_outputs(): - output_map = dict( - out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict(out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = RelabelHypointensities.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py index 3cf47c71ce..09da0d001d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py @@ -100,7 +100,10 @@ def test_SegStatsReconAll_inputs(): extensions=None, mandatory=True, ), - ribbon=dict(mandatory=True, ), + ribbon=dict( + extensions=None, + mandatory=True, + ), segment_id=dict(argstr='--id %s...', ), segmentation_file=dict( argstr='--seg %s', diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py index 26dcbe3458..968041ff67 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py @@ -59,6 +59,7 @@ def test_SurfaceSnapshots_inputs(): overlay_range_offset=dict(argstr='-foffset %.3f', ), overlay_reg=dict( argstr='-overlay-reg %s', + extensions=None, xor=['overlay_reg', 'identity_reg', 'mni152_reg'], ), patch_file=dict( diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py index 9d248c1a7d..2a28765d67 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py @@ -30,9 +30,9 @@ def test_TalairachAVI_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_TalairachAVI_outputs(): output_map = dict( - out_file=dict(), - out_log=dict(), - out_txt=dict(), + out_file=dict(extensions=None, ), + out_log=dict(extensions=None, ), + out_txt=dict(extensions=None, ), ) outputs = TalairachAVI.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py index 053d429c9b..1961e3ded6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py @@ -24,11 +24,10 @@ def test_TalairachQC_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TalairachQC_outputs(): - output_map = dict( - log_file=dict( - extensions=None, - usedefault=True, - ), ) + output_map = dict(log_file=dict( + extensions=None, + usedefault=True, + ), ) outputs = TalairachQC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py index de32f63f5f..ffd5b42bd1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py +++ b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py @@ -34,11 +34,10 @@ def test_AccuracyTester_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AccuracyTester_outputs(): - output_map = dict( - output_directory=dict( - argstr='%s', - position=1, - ), ) + output_map = dict(output_directory=dict( + argstr='%s', + position=1, + ), ) outputs = AccuracyTester.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py index a75df99db5..597dc88d5e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py @@ -22,14 +22,17 @@ def test_Cleaner_inputs(): ), confound_file=dict( argstr='-x %s', + extensions=None, position=4, ), confound_file_1=dict( argstr='-x %s', + extensions=None, position=5, ), confound_file_2=dict( argstr='-x %s', + extensions=None, position=6, ), environ=dict( diff --git a/nipype/interfaces/fsl/tests/test_auto_Cluster.py b/nipype/interfaces/fsl/tests/test_auto_Cluster.py index 6ed34ab816..19340b9383 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cluster.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cluster.py @@ -7,7 +7,10 @@ def test_Cluster_inputs(): input_map = dict( args=dict(argstr='%s', ), connectivity=dict(argstr='--connectivity=%d', ), - cope_file=dict(argstr='--cope=%s', ), + cope_file=dict( + argstr='--cope=%s', + extensions=None, + ), dlh=dict(argstr='--dlh=%.10f', ), environ=dict( nohash=True, diff --git a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py index 3945c40a87..eaeeb34ce8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py +++ b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py @@ -22,12 +22,11 @@ def test_FeatureExtractor_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FeatureExtractor_outputs(): - output_map = dict( - mel_ica=dict( - argstr='%s', - copyfile=False, - position=-1, - ), ) + output_map = dict(mel_ica=dict( + argstr='%s', + copyfile=False, + position=-1, + ), ) outputs = FeatureExtractor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py index 5c7b717d93..a8262c9be1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py +++ b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py @@ -30,11 +30,10 @@ def test_FindTheBiggest_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FindTheBiggest_outputs(): - output_map = dict( - out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict(out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = FindTheBiggest.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py index 18ef078a79..638bcdd156 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py +++ b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py @@ -4,24 +4,22 @@ def test_TrainingSetCreator_inputs(): - input_map = dict( - mel_icas_in=dict( - argstr='%s', - copyfile=False, - position=-1, - ), ) + input_map = dict(mel_icas_in=dict( + argstr='%s', + copyfile=False, + position=-1, + ), ) inputs = TrainingSetCreator.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TrainingSetCreator_outputs(): - output_map = dict( - mel_icas_out=dict( - argstr='%s', - copyfile=False, - position=-1, - ), ) + output_map = dict(mel_icas_out=dict( + argstr='%s', + copyfile=False, + position=-1, + ), ) outputs = TrainingSetCreator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Average.py b/nipype/interfaces/minc/tests/test_auto_Average.py index 678ab93a52..6f0e84d144 100644 --- a/nipype/interfaces/minc/tests/test_auto_Average.py +++ b/nipype/interfaces/minc/tests/test_auto_Average.py @@ -29,6 +29,7 @@ def test_Average_inputs(): ), filelist=dict( argstr='-filelist %s', + extensions=None, mandatory=True, xor=('input_files', 'filelist'), ), @@ -126,7 +127,10 @@ def test_Average_inputs(): argstr='-quiet', xor=('verbose', 'quiet'), ), - sdfile=dict(argstr='-sdfile %s', ), + sdfile=dict( + argstr='-sdfile %s', + extensions=None, + ), two=dict(argstr='-2', ), verbose=dict( argstr='-verbose', diff --git a/nipype/interfaces/minc/tests/test_auto_BBox.py b/nipype/interfaces/minc/tests/test_auto_BBox.py index c1b3515cea..7bdf35f03d 100644 --- a/nipype/interfaces/minc/tests/test_auto_BBox.py +++ b/nipype/interfaces/minc/tests/test_auto_BBox.py @@ -25,6 +25,7 @@ def test_BBox_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/minc/tests/test_auto_Beast.py b/nipype/interfaces/minc/tests/test_auto_Beast.py index bc4705db2e..3a44b436b0 100644 --- a/nipype/interfaces/minc/tests/test_auto_Beast.py +++ b/nipype/interfaces/minc/tests/test_auto_Beast.py @@ -18,7 +18,10 @@ def test_Beast_inputs(): argstr='-alpha %s', usedefault=True, ), - configuration_file=dict(argstr='-configuration %s', ), + configuration_file=dict( + argstr='-configuration %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -27,6 +30,7 @@ def test_Beast_inputs(): flip_images=dict(argstr='-flip', ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -44,6 +48,7 @@ def test_Beast_inputs(): ), output_file=dict( argstr='%s', + extensions=None, hash_files=False, name_source=['input_file'], name_template='%s_beast_mask.mnc', diff --git a/nipype/interfaces/minc/tests/test_auto_Calc.py b/nipype/interfaces/minc/tests/test_auto_Calc.py index e9df677150..d5fb316e39 100644 --- a/nipype/interfaces/minc/tests/test_auto_Calc.py +++ b/nipype/interfaces/minc/tests/test_auto_Calc.py @@ -26,6 +26,7 @@ def test_Calc_inputs(): eval_width=dict(argstr='-eval_width %s', ), expfile=dict( argstr='-expfile %s', + extensions=None, mandatory=True, xor=('expression', 'expfile'), ), @@ -36,6 +37,7 @@ def test_Calc_inputs(): ), filelist=dict( argstr='-filelist %s', + extensions=None, mandatory=True, xor=('input_files', 'filelist'), ), diff --git a/nipype/interfaces/minc/tests/test_auto_Dump.py b/nipype/interfaces/minc/tests/test_auto_Dump.py index 19c299dac8..fbd33f5a46 100644 --- a/nipype/interfaces/minc/tests/test_auto_Dump.py +++ b/nipype/interfaces/minc/tests/test_auto_Dump.py @@ -36,6 +36,7 @@ def test_Dump_inputs(): netcdf_name=dict(argstr='-n %s', ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/minc/tests/test_auto_Extract.py b/nipype/interfaces/minc/tests/test_auto_Extract.py index 35f6162c7f..fbd0a84729 100644 --- a/nipype/interfaces/minc/tests/test_auto_Extract.py +++ b/nipype/interfaces/minc/tests/test_auto_Extract.py @@ -84,6 +84,7 @@ def test_Extract_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/minc/tests/test_auto_Math.py b/nipype/interfaces/minc/tests/test_auto_Math.py index 6bc142b15d..41d75379d4 100644 --- a/nipype/interfaces/minc/tests/test_auto_Math.py +++ b/nipype/interfaces/minc/tests/test_auto_Math.py @@ -36,6 +36,7 @@ def test_Math_inputs(): exp=dict(argstr='-exp -const2 %s %s', ), filelist=dict( argstr='-filelist %s', + extensions=None, mandatory=True, xor=('input_files', 'filelist'), ), diff --git a/nipype/interfaces/minc/tests/test_auto_Norm.py b/nipype/interfaces/minc/tests/test_auto_Norm.py index 9fb0d3c5ba..cdd4a3db7c 100644 --- a/nipype/interfaces/minc/tests/test_auto_Norm.py +++ b/nipype/interfaces/minc/tests/test_auto_Norm.py @@ -26,7 +26,10 @@ def test_Norm_inputs(): position=-2, ), lower=dict(argstr='-lower %s', ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), out_ceil=dict(argstr='-out_ceil %s', ), out_floor=dict(argstr='-out_floor %s', ), output_file=dict( @@ -40,6 +43,7 @@ def test_Norm_inputs(): ), output_threshold_mask=dict( argstr='-threshold_mask %s', + extensions=None, hash_files=False, name_source=['input_file'], name_template='%s_norm_threshold_mask.mnc', diff --git a/nipype/interfaces/minc/tests/test_auto_Resample.py b/nipype/interfaces/minc/tests/test_auto_Resample.py index 8d4b24ff41..da512d9f62 100644 --- a/nipype/interfaces/minc/tests/test_auto_Resample.py +++ b/nipype/interfaces/minc/tests/test_auto_Resample.py @@ -94,7 +94,10 @@ def test_Resample_inputs(): argstr='-keep_real_range', xor=('keep_real_range', 'nokeep_real_range'), ), - like=dict(argstr='-like %s', ), + like=dict( + argstr='-like %s', + extensions=None, + ), nearest_neighbour_interpolation=dict( argstr='-nearest_neighbour', xor=('trilinear_interpolation', 'tricubic_interpolation', @@ -157,7 +160,10 @@ def test_Resample_inputs(): xor=('nelements', 'nelements_x_y_or_z'), ), talairach=dict(argstr='-talairach', ), - transformation=dict(argstr='-transformation %s', ), + transformation=dict( + argstr='-transformation %s', + extensions=None, + ), transverse_slices=dict( argstr='-transverse', xor=('transverse', 'sagittal', 'coronal'), diff --git a/nipype/interfaces/minc/tests/test_auto_Reshape.py b/nipype/interfaces/minc/tests/test_auto_Reshape.py index 669425da95..d80f9a377b 100644 --- a/nipype/interfaces/minc/tests/test_auto_Reshape.py +++ b/nipype/interfaces/minc/tests/test_auto_Reshape.py @@ -16,6 +16,7 @@ def test_Reshape_inputs(): ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), diff --git a/nipype/interfaces/minc/tests/test_auto_ToRaw.py b/nipype/interfaces/minc/tests/test_auto_ToRaw.py index aeda687c97..42ba72f145 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToRaw.py +++ b/nipype/interfaces/minc/tests/test_auto_ToRaw.py @@ -26,6 +26,7 @@ def test_ToRaw_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/minc/tests/test_auto_VolSymm.py b/nipype/interfaces/minc/tests/test_auto_VolSymm.py index b710d59543..ae3332b7f2 100644 --- a/nipype/interfaces/minc/tests/test_auto_VolSymm.py +++ b/nipype/interfaces/minc/tests/test_auto_VolSymm.py @@ -22,6 +22,7 @@ def test_VolSymm_inputs(): fit_nonlinear=dict(argstr='-nonlinear', ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -38,6 +39,7 @@ def test_VolSymm_inputs(): ), trans_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, keep_extension=False, diff --git a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py index 9e242300da..69c455f875 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py @@ -16,6 +16,7 @@ def test_XfmInvert_inputs(): ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py index 90c5272ed0..5ee1e8cb00 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py @@ -14,8 +14,12 @@ def test_BrainMask_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py index 7658201223..59617b30ab 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py @@ -15,8 +15,12 @@ def test_DWIExtract_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py index c64efa2ca4..c2816f2646 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py @@ -41,8 +41,12 @@ def test_EstimateFOD_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py index 84404fdde6..8f8142746a 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py @@ -14,8 +14,12 @@ def test_FitTensor_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py index 92594da593..f7032cb624 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py @@ -19,8 +19,12 @@ def test_Generate5tt_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py index 9271d07517..f11b615cc8 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py @@ -22,8 +22,12 @@ def test_MRConvert_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py index ae494bb932..fe934c45b1 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py @@ -15,8 +15,12 @@ def test_MRMath_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py index dd326bad5b..9415cae211 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py @@ -29,8 +29,12 @@ def test_ResponseSD_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py index 39f239b81d..cf685a50d8 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py @@ -28,8 +28,12 @@ def test_Tractography_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py index 20995e806e..2a58b39d57 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py @@ -8,6 +8,7 @@ def test_DwiTool_inputs(): args=dict(argstr='%s', ), b0_file=dict( argstr='-b0 %s', + extensions=None, position=4, ), ball_flag=dict( @@ -28,11 +29,13 @@ def test_DwiTool_inputs(): ), bval_file=dict( argstr='-bval %s', + extensions=None, mandatory=True, position=2, ), bvec_file=dict( argstr='-bvec %s', + extensions=None, position=3, ), diso_val=dict(argstr='-diso %f', ), @@ -59,6 +62,7 @@ def test_DwiTool_inputs(): ), famap_file=dict( argstr='-famap %s', + extensions=None, name_source=['source_file'], name_template='%s_famap.nii.gz', ), @@ -72,20 +76,24 @@ def test_DwiTool_inputs(): ), logdti_file=dict( argstr='-logdti2 %s', + extensions=None, name_source=['source_file'], name_template='%s_logdti2.nii.gz', ), mask_file=dict( argstr='-mask %s', + extensions=None, position=5, ), mcmap_file=dict( argstr='-mcmap %s', + extensions=None, name_source=['source_file'], name_template='%s_mcmap.nii.gz', ), mdmap_file=dict( argstr='-mdmap %s', + extensions=None, name_source=['source_file'], name_template='%s_mdmap.nii.gz', ), @@ -115,22 +123,26 @@ def test_DwiTool_inputs(): ), rgbmap_file=dict( argstr='-rgbmap %s', + extensions=None, name_source=['source_file'], name_template='%s_rgbmap.nii.gz', ), source_file=dict( argstr='-source %s', + extensions=None, mandatory=True, position=1, ), syn_file=dict( argstr='-syn %s', + extensions=None, name_source=['source_file'], name_template='%s_syn.nii.gz', requires=['bvec_file', 'b0_file'], ), v1map_file=dict( argstr='-v1map %s', + extensions=None, name_source=['source_file'], name_template='%s_v1map.nii.gz', ), @@ -142,13 +154,13 @@ def test_DwiTool_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DwiTool_outputs(): output_map = dict( - famap_file=dict(), - logdti_file=dict(), - mcmap_file=dict(), - mdmap_file=dict(), - rgbmap_file=dict(), - syn_file=dict(), - v1map_file=dict(), + famap_file=dict(extensions=None, ), + logdti_file=dict(extensions=None, ), + mcmap_file=dict(extensions=None, ), + mdmap_file=dict(extensions=None, ), + rgbmap_file=dict(extensions=None, ), + syn_file=dict(extensions=None, ), + v1map_file=dict(extensions=None, ), ) outputs = DwiTool.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py index b2e1bef961..94a489a4ad 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py @@ -8,6 +8,7 @@ def test_FitAsl_inputs(): args=dict(argstr='%s', ), cbf_file=dict( argstr='-cbf %s', + extensions=None, name_source=['source_file'], name_template='%s_cbf.nii.gz', ), @@ -20,19 +21,33 @@ def test_FitAsl_inputs(): ), error_file=dict( argstr='-error %s', + extensions=None, name_source=['source_file'], name_template='%s_error.nii.gz', ), gm_plasma=dict(argstr='-gmL %f', ), gm_t1=dict(argstr='-gmT1 %f', ), gm_ttt=dict(argstr='-gmTTT %f', ), - ir_output=dict(argstr='-IRoutput %s', ), - ir_volume=dict(argstr='-IRvolume %s', ), + ir_output=dict( + argstr='-IRoutput %s', + extensions=None, + ), + ir_volume=dict( + argstr='-IRvolume %s', + extensions=None, + ), ldd=dict(argstr='-LDD %f', ), - m0map=dict(argstr='-m0map %s', ), - m0mape=dict(argstr='-m0mape %s', ), + m0map=dict( + argstr='-m0map %s', + extensions=None, + ), + m0mape=dict( + argstr='-m0mape %s', + extensions=None, + ), mask=dict( argstr='-mask %s', + extensions=None, position=2, ), mul=dict(argstr='-mul %f', ), @@ -46,21 +61,29 @@ def test_FitAsl_inputs(): pv2=dict(argstr='-pv2 %d', ), pv3=dict(argstr='-pv3 %d %d %d', ), pv_threshold=dict(argstr='-pvthreshold', ), - seg=dict(argstr='-seg %s', ), + seg=dict( + argstr='-seg %s', + extensions=None, + ), segstyle=dict(argstr='-segstyle', ), sig=dict(argstr='-sig', ), source_file=dict( argstr='-source %s', + extensions=None, mandatory=True, position=1, ), syn_file=dict( argstr='-syn %s', + extensions=None, name_source=['source_file'], name_template='%s_syn.nii.gz', ), t1_art_cmp=dict(argstr='-T1a %f', ), - t1map=dict(argstr='-t1map %s', ), + t1map=dict( + argstr='-t1map %s', + extensions=None, + ), t_inv1=dict(argstr='-Tinv1 %f', ), t_inv2=dict(argstr='-Tinv2 %f', ), wm_plasma=dict(argstr='-wmL %f', ), @@ -74,9 +97,9 @@ def test_FitAsl_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_FitAsl_outputs(): output_map = dict( - cbf_file=dict(), - error_file=dict(), - syn_file=dict(), + cbf_file=dict(extensions=None, ), + error_file=dict(extensions=None, ), + syn_file=dict(extensions=None, ), ) outputs = FitAsl.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py index 700d9a31c4..2086d12a2b 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py @@ -25,15 +25,20 @@ def test_FitDwi_inputs(): ), bval_file=dict( argstr='-bval %s', + extensions=None, mandatory=True, position=2, ), bvec_file=dict( argstr='-bvec %s', + extensions=None, mandatory=True, position=3, ), - cov_file=dict(argstr='-cov %s', ), + cov_file=dict( + argstr='-cov %s', + extensions=None, + ), csf_t2_val=dict(argstr='-csfT2 %f', ), diso_val=dict(argstr='-diso %f', ), dpr_val=dict(argstr='-dpr %f', ), @@ -51,11 +56,13 @@ def test_FitDwi_inputs(): ), error_file=dict( argstr='-error %s', + extensions=None, name_source=['source_file'], name_template='%s_error.nii.gz', ), famap_file=dict( argstr='-famap %s', + extensions=None, name_source=['source_file'], name_template='%s_famap.nii.gz', ), @@ -75,13 +82,17 @@ def test_FitDwi_inputs(): argstr='-lm %f %f', requires=['gn_flag'], ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), maxit_val=dict( argstr='-maxit %d', requires=['gn_flag'], ), mcmap_file=dict( argstr='-mcmap %s', + extensions=None, name_source=['source_file'], name_template='%s_mcmap.nii.gz', requires=['nodv_flag'], @@ -89,12 +100,14 @@ def test_FitDwi_inputs(): mcmaxit=dict(argstr='-mcmaxit %d', ), mcout=dict( argstr='-mcout %s', + extensions=None, name_source=['source_file'], name_template='%s_mcout.txt', ), mcsamples=dict(argstr='-mcsamples %d', ), mdmap_file=dict( argstr='-mdmap %s', + extensions=None, name_source=['source_file'], name_template='%s_mdmap.nii.gz', ), @@ -116,6 +129,7 @@ def test_FitDwi_inputs(): ), nodiff_file=dict( argstr='-nodiff %s', + extensions=None, name_source=['source_file'], name_template='%s_no_diff.nii.gz', ), @@ -128,14 +142,19 @@ def test_FitDwi_inputs(): ], ), perf_thr=dict(argstr='-perfthreshold %f', ), - prior_file=dict(argstr='-prior %s', ), + prior_file=dict( + argstr='-prior %s', + extensions=None, + ), res_file=dict( argstr='-res %s', + extensions=None, name_source=['source_file'], name_template='%s_resmap.nii.gz', ), rgbmap_file=dict( argstr='-rgbmap %s', + extensions=None, name_source=['source_file'], name_template='%s_rgbmap.nii.gz', requires=['dti_flag'], @@ -144,38 +163,45 @@ def test_FitDwi_inputs(): slice_no=dict(argstr='-slice %d', ), source_file=dict( argstr='-source %s', + extensions=None, mandatory=True, position=1, ), swls_val=dict(argstr='-swls %f', ), syn_file=dict( argstr='-syn %s', + extensions=None, name_source=['source_file'], name_template='%s_syn.nii.gz', ), te_file=dict( argstr='-TE %s', + extensions=None, xor=['te_file'], ), te_value=dict( argstr='-TE %s', + extensions=None, xor=['te_file'], ), ten_type=dict(usedefault=True, ), tenmap2_file=dict( argstr='-tenmap2 %s', + extensions=None, name_source=['source_file'], name_template='%s_tenmap2.nii.gz', requires=['dti_flag'], ), tenmap_file=dict( argstr='-tenmap %s', + extensions=None, name_source=['source_file'], name_template='%s_tenmap.nii.gz', requires=['dti_flag'], ), v1map_file=dict( argstr='-v1map %s', + extensions=None, name_source=['source_file'], name_template='%s_v1map.nii.gz', ), @@ -194,18 +220,18 @@ def test_FitDwi_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_FitDwi_outputs(): output_map = dict( - error_file=dict(), - famap_file=dict(), - mcmap_file=dict(), - mcout=dict(), - mdmap_file=dict(), - nodiff_file=dict(), - res_file=dict(), - rgbmap_file=dict(), - syn_file=dict(), - tenmap2_file=dict(), - tenmap_file=dict(), - v1map_file=dict(), + error_file=dict(extensions=None, ), + famap_file=dict(extensions=None, ), + mcmap_file=dict(extensions=None, ), + mcout=dict(extensions=None, ), + mdmap_file=dict(extensions=None, ), + nodiff_file=dict(extensions=None, ), + res_file=dict(extensions=None, ), + rgbmap_file=dict(extensions=None, ), + syn_file=dict(extensions=None, ), + tenmap2_file=dict(extensions=None, ), + tenmap_file=dict(extensions=None, ), + v1map_file=dict(extensions=None, ), ) outputs = FitDwi.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py index 392654fd5c..b184e448cd 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py @@ -7,7 +7,10 @@ def test_FitQt1_inputs(): input_map = dict( acceptance=dict(argstr='-acceptance %f', ), args=dict(argstr='%s', ), - b1map=dict(argstr='-b1map %s', ), + b1map=dict( + argstr='-b1map %s', + extensions=None, + ), comp_file=dict( argstr='-comp %s', extensions=None, @@ -28,7 +31,10 @@ def test_FitQt1_inputs(): argstr='-flips %s', sep=' ', ), - flips_list=dict(argstr='-fliplist %s', ), + flips_list=dict( + argstr='-fliplist %s', + extensions=None, + ), gn_flag=dict( argstr='-gn', position=8, @@ -63,7 +69,10 @@ def test_FitQt1_inputs(): name_template='%s_mcmap.nii.gz', ), mcmaxit=dict(argstr='-mcmaxit %d', ), - mcout=dict(argstr='-mcout %s', ), + mcout=dict( + argstr='-mcout %s', + extensions=None, + ), mcsamples=dict(argstr='-mcsamples %d', ), nb_comp=dict( argstr='-nc %d', @@ -101,7 +110,10 @@ def test_FitQt1_inputs(): name_source=['source_file'], name_template='%s_syn.nii.gz', ), - t1_list=dict(argstr='-T1list %s', ), + t1_list=dict( + argstr='-T1list %s', + extensions=None, + ), t1map_file=dict( argstr='-t1map %s', extensions=None, @@ -119,7 +131,10 @@ def test_FitQt1_inputs(): position=14, sep=' ', ), - tis_list=dict(argstr='-TIlist %s', ), + tis_list=dict( + argstr='-TIlist %s', + extensions=None, + ), tr_value=dict( argstr='-TR %f', position=5, diff --git a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py index 3fe17160db..9a9b5c421c 100644 --- a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py @@ -13,7 +13,7 @@ def test_EstimateContrast_inputs(): constants=dict(mandatory=True, ), contrasts=dict(mandatory=True, ), dof=dict(mandatory=True, ), - mask=dict(), + mask=dict(extensions=None, ), nvbeta=dict(mandatory=True, ), reg_names=dict(mandatory=True, ), s2=dict( diff --git a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py index a700c18d43..71ca221efc 100644 --- a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py +++ b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py @@ -8,7 +8,7 @@ def test_FitGLM_inputs(): TR=dict(mandatory=True, ), drift_model=dict(usedefault=True, ), hrf_model=dict(usedefault=True, ), - mask=dict(), + mask=dict(extensions=None, ), method=dict(usedefault=True, ), model=dict(usedefault=True, ), normalize_design_matrix=dict(usedefault=True, ), @@ -30,7 +30,7 @@ def test_FitGLM_outputs(): dof=dict(), nvbeta=dict(), reg_names=dict(), - residuals=dict(), + residuals=dict(extensions=None, ), s2=dict(extensions=None, ), ) outputs = FitGLM.output_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py index 866af46740..2eaeb1acef 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py @@ -38,11 +38,10 @@ def test_UnbiasedNonLocalMeans_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_UnbiasedNonLocalMeans_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = UnbiasedNonLocalMeans.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py index 1ca79a6e96..33e52ed707 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py @@ -27,11 +27,10 @@ def test_DTIexport_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DTIexport_outputs(): - output_map = dict( - outputFile=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputFile=dict( + extensions=None, + position=-1, + ), ) outputs = DTIexport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py index 58ae495f3f..ddb07c9428 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py @@ -28,11 +28,10 @@ def test_DTIimport_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DTIimport_outputs(): - output_map = dict( - outputTensor=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputTensor=dict( + extensions=None, + position=-1, + ), ) outputs = DTIimport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py index f8000c49b1..aa0b6f97b5 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py @@ -37,11 +37,10 @@ def test_DWIJointRicianLMMSEFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIJointRicianLMMSEFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = DWIJointRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py index 29b5e0b7e1..69509fcf28 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py @@ -43,11 +43,10 @@ def test_DWIRicianLMMSEFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIRicianLMMSEFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = DWIRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py index 5dcdbd7e2e..060c47ecc2 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py @@ -28,11 +28,10 @@ def test_DiffusionTensorScalarMeasurements_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DiffusionTensorScalarMeasurements_outputs(): - output_map = dict( - outputScalar=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputScalar=dict( + extensions=None, + position=-1, + ), ) outputs = DiffusionTensorScalarMeasurements.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py index bd35268bdb..9cfcdb14fb 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py @@ -73,11 +73,10 @@ def test_ResampleDTIVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ResampleDTIVolume_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ResampleDTIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py index d2b17c562a..42a7b17a19 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py @@ -33,11 +33,10 @@ def test_AddScalarVolumes_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AddScalarVolumes_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = AddScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py index bbcb2f077c..0c05002ff8 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py @@ -28,11 +28,10 @@ def test_CastScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CastScalarVolume_outputs(): - output_map = dict( - OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = CastScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py index 24f5b74307..b602012d0c 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py @@ -36,11 +36,10 @@ def test_CheckerBoardFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CheckerBoardFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = CheckerBoardFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py index 0240ad676a..7389aa0cee 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py @@ -30,11 +30,10 @@ def test_CurvatureAnisotropicDiffusion_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CurvatureAnisotropicDiffusion_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = CurvatureAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py index c47382aef5..d6f93cb41d 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py @@ -31,11 +31,10 @@ def test_ExtractSkeleton_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ExtractSkeleton_outputs(): - output_map = dict( - OutputImageFileName=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputImageFileName=dict( + extensions=None, + position=-1, + ), ) outputs = ExtractSkeleton.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py index 2bdb73c4d5..9511f9ce1d 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py @@ -28,11 +28,10 @@ def test_GaussianBlurImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GaussianBlurImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GaussianBlurImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py index 6d3e40c8de..02c9d7fb20 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py @@ -30,11 +30,10 @@ def test_GradientAnisotropicDiffusion_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GradientAnisotropicDiffusion_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GradientAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py index f1ff2c3809..45b2b4b8dc 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py @@ -27,11 +27,10 @@ def test_GrayscaleFillHoleImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GrayscaleFillHoleImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GrayscaleFillHoleImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py index 6aee86282a..77536e147d 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py @@ -27,11 +27,10 @@ def test_GrayscaleGrindPeakImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GrayscaleGrindPeakImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GrayscaleGrindPeakImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py index c839c50abf..9541702731 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py @@ -35,11 +35,10 @@ def test_HistogramMatching_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_HistogramMatching_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = HistogramMatching.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py index 7fd2c31db3..c3373f80ec 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py @@ -33,11 +33,10 @@ def test_ImageLabelCombine_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ImageLabelCombine_outputs(): - output_map = dict( - OutputLabelMap=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputLabelMap=dict( + extensions=None, + position=-1, + ), ) outputs = ImageLabelCombine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py index 56d68199af..11999f2001 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py @@ -34,11 +34,10 @@ def test_MaskScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MaskScalarVolume_outputs(): - output_map = dict( - OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = MaskScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py index c0bf97e152..4c890aa30f 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py @@ -31,11 +31,10 @@ def test_MedianImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MedianImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = MedianImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py index cc39d5b7b1..99772b41db 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py @@ -33,11 +33,10 @@ def test_MultiplyScalarVolumes_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MultiplyScalarVolumes_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = MultiplyScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py index 12d7af77c4..28a9876943 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py @@ -71,11 +71,10 @@ def test_ResampleScalarVectorDWIVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ResampleScalarVectorDWIVolume_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ResampleScalarVectorDWIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py index 106cd843a4..62bf214bf4 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py @@ -33,11 +33,10 @@ def test_SubtractScalarVolumes_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SubtractScalarVolumes_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = SubtractScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py index e63ed5923f..ce7f97ec58 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py @@ -32,11 +32,10 @@ def test_ThresholdScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ThresholdScalarVolume_outputs(): - output_map = dict( - OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ThresholdScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py index 89832bf9ba..c8100fd74b 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py @@ -34,11 +34,10 @@ def test_VotingBinaryHoleFillingImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_VotingBinaryHoleFillingImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = VotingBinaryHoleFillingImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py index f3c1ec6ff0..a121f85614 100644 --- a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py +++ b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py @@ -41,11 +41,10 @@ def test_DWIUnbiasedNonLocalMeansFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIUnbiasedNonLocalMeansFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = DWIUnbiasedNonLocalMeansFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py index 19b41f7127..6af3c1d54c 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py @@ -30,11 +30,10 @@ def test_OtsuThresholdImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_OtsuThresholdImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = OtsuThresholdImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py index b9fa12f2aa..c73313751e 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py @@ -31,11 +31,10 @@ def test_OtsuThresholdSegmentation_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_OtsuThresholdSegmentation_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = OtsuThresholdSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py index 7f1c259cd6..66207fb604 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py @@ -32,11 +32,10 @@ def test_ResampleScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ResampleScalarVolume_outputs(): - output_map = dict( - OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ResampleScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py index 6befb36860..c956ae34ec 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py @@ -37,11 +37,10 @@ def test_RobustStatisticsSegmenter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RobustStatisticsSegmenter_outputs(): - output_map = dict( - segmentedImageFileName=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(segmentedImageFileName=dict( + extensions=None, + position=-1, + ), ) outputs = RobustStatisticsSegmenter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py index 5f880c08cb..8ffdd04355 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py @@ -34,11 +34,10 @@ def test_SimpleRegionGrowingSegmentation_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SimpleRegionGrowingSegmentation_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = SimpleRegionGrowingSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py index 3669a21fc3..e18c0f0d5d 100644 --- a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py @@ -33,11 +33,10 @@ def test_GrayscaleModelMaker_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GrayscaleModelMaker_outputs(): - output_map = dict( - OutputGeometry=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputGeometry=dict( + extensions=None, + position=-1, + ), ) outputs = GrayscaleModelMaker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py index faf982a342..d196dbf5d8 100644 --- a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py +++ b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py @@ -31,11 +31,10 @@ def test_LabelMapSmoothing_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_LabelMapSmoothing_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = LabelMapSmoothing.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py index c25d4ebcf3..2e09b69bd1 100644 --- a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py +++ b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py @@ -32,11 +32,10 @@ def test_MergeModels_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MergeModels_outputs(): - output_map = dict( - ModelOutput=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(ModelOutput=dict( + extensions=None, + position=-1, + ), ) outputs = MergeModels.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py index d39ae392bd..2e684e3e65 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py @@ -33,11 +33,10 @@ def test_ModelToLabelMap_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ModelToLabelMap_outputs(): - output_map = dict( - OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ModelToLabelMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py index d0d437915b..479bb842bb 100644 --- a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py +++ b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py @@ -28,11 +28,10 @@ def test_OrientScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_OrientScalarVolume_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = OrientScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py index 528ba47e1b..b4bbdbe283 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py +++ b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py @@ -32,11 +32,10 @@ def test_ProbeVolumeWithModel_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ProbeVolumeWithModel_outputs(): - output_map = dict( - OutputModel=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputModel=dict( + extensions=None, + position=-1, + ), ) outputs = ProbeVolumeWithModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Rename.py b/nipype/interfaces/utility/tests/test_auto_Rename.py index 6b56badd6f..70ced0ecdd 100644 --- a/nipype/interfaces/utility/tests/test_auto_Rename.py +++ b/nipype/interfaces/utility/tests/test_auto_Rename.py @@ -20,7 +20,7 @@ def test_Rename_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Rename_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Rename.output_spec() for key, metadata in list(output_map.items()): From bdd554e99494fd1510fa986b2362ff03fd144419 Mon Sep 17 00:00:00 2001 From: Murat Bilgel Date: Thu, 29 Aug 2019 15:37:11 -0400 Subject: [PATCH 0338/1665] correct stop criterion flag --- nipype/interfaces/petpvc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/petpvc.py b/nipype/interfaces/petpvc.py index c73b408bf3..63a0a8ed83 100644 --- a/nipype/interfaces/petpvc.py +++ b/nipype/interfaces/petpvc.py @@ -56,7 +56,7 @@ class PETPVCInputSpec(CommandLineInputSpec): argstr="-a %.4f") stop_crit = traits.Float( desc="Stopping criterion", default_value=0.01, usedefault=True, - argstr="-a %.4f") + argstr="-s %.4f") class PETPVCOutputSpec(TraitedSpec): From d18b1590935ddafa7b0a670f589316b5afb89e03 Mon Sep 17 00:00:00 2001 From: Murat Bilgel Date: Thu, 29 Aug 2019 16:16:13 -0400 Subject: [PATCH 0339/1665] update PETPVC tests --- nipype/interfaces/tests/test_auto_PETPVC.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_auto_PETPVC.py b/nipype/interfaces/tests/test_auto_PETPVC.py index 60f5b29826..de7c1fa272 100644 --- a/nipype/interfaces/tests/test_auto_PETPVC.py +++ b/nipype/interfaces/tests/test_auto_PETPVC.py @@ -59,7 +59,7 @@ def test_PETPVC_inputs(): mandatory=True, ), stop_crit=dict( - argstr='-a %.4f', + argstr='-s %.4f', usedefault=True, ), ) From 7d8fdfe65129081d67a496d94ee5e6db2d57efab Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Thu, 29 Aug 2019 15:16:18 -0700 Subject: [PATCH 0340/1665] ENH: Add ``--rescale-intensities`` and name_source to N4BiasFieldCorrection --- nipype/interfaces/ants/segmentation.py | 80 +++++++++---------- .../tests/test_auto_N4BiasFieldCorrection.py | 8 +- 2 files changed, 47 insertions(+), 41 deletions(-) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index adaf765527..277a716286 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -7,7 +7,7 @@ import os from ...external.due import BibTeX -from ...utils.filemanip import split_filename, copyfile, which +from ...utils.filemanip import split_filename, copyfile, which, fname_presuffix from ..base import TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, isdefined from .base import ANTSCommand, ANTSCommandInputSpec @@ -274,18 +274,20 @@ class N4BiasFieldCorrectionInputSpec(ANTSCommandInputSpec): argstr='--input-image %s', mandatory=True, desc=('input for bias correction. Negative values or values close to ' - 'zero should be processed prior to correction')) + 'zero should be processed prior to correction')) mask_image = File( argstr='--mask-image %s', desc=('image to specify region to perform final bias correction in')) weight_image = File( argstr='--weight-image %s', desc=('image for relative weighting (e.g. probability map of the white ' - 'matter) of voxels during the B-spline fitting. ')) + 'matter) of voxels during the B-spline fitting. ')) output_image = traits.Str( argstr='--output %s', desc='output file name', - genfile=True, + name_source=['input_image'], + name_template='%s_corrected', + keep_extension=True, hash_files=False) bspline_fitting_distance = traits.Float(argstr="--bspline-fitting %s") bspline_order = traits.Int(requires=['bspline_fitting_distance']) @@ -306,6 +308,14 @@ class N4BiasFieldCorrectionInputSpec(ANTSCommandInputSpec): usedefault=True, desc='copy headers of the original image into the ' 'output (corrected) file') + rescale_intensities = traits.Bool( + False, usedefault=True, argstr='-r', + desc="""\ +At each iteration, a new intensity mapping is calculated and applied but there +is nothing which constrains the new intensity range to be within certain values. +The result is that the range can "drift" from the original at each iteration. +This option rescales to the [min,max] range of the original image intensities +within the user-specified mask.""") class N4BiasFieldCorrectionOutputSpec(TraitedSpec): @@ -314,7 +324,10 @@ class N4BiasFieldCorrectionOutputSpec(TraitedSpec): class N4BiasFieldCorrection(ANTSCommand): - """N4 is a variant of the popular N3 (nonparameteric nonuniform normalization) + """ + Bias field correction. + + N4 is a variant of the popular N3 (nonparameteric nonuniform normalization) retrospective bias correction algorithm. Based on the assumption that the corruption of the low frequency bias field can be modeled as a convolution of the intensity histogram by a Gaussian, the basic algorithmic protocol is to @@ -373,28 +386,9 @@ class N4BiasFieldCorrection(ANTSCommand): input_spec = N4BiasFieldCorrectionInputSpec output_spec = N4BiasFieldCorrectionOutputSpec - def _gen_filename(self, name): - if name == 'output_image': - output = self.inputs.output_image - if not isdefined(output): - _, name, ext = split_filename(self.inputs.input_image) - output = name + '_corrected' + ext - return output - - if name == 'bias_image': - output = self.inputs.bias_image - if not isdefined(output): - _, name, ext = split_filename(self.inputs.input_image) - output = name + '_bias' + ext - return output - return None - def _format_arg(self, name, trait_spec, value): - if ((name == 'output_image') and - (self.inputs.save_bias or isdefined(self.inputs.bias_image))): - bias_image = self._gen_filename('bias_image') - output = self._gen_filename('output_image') - newval = '[ %s, %s ]' % (output, bias_image) + if name == 'output_image' and getattr(self, '_out_bias_file', None): + newval = '[ %s, %s ]' % (value, getattr(self, '_out_bias_file')) return trait_spec.argstr % newval if name == 'bspline_fitting_distance': @@ -418,19 +412,25 @@ def _format_arg(self, name, trait_spec, value): name, trait_spec, value) def _parse_inputs(self, skip=None): - if skip is None: - skip = [] - skip += ['save_bias', 'bias_image'] + skip = (skip or []) + ['save_bias', 'bias_image'] + if self.inputs.save_bias or isdefined(self.inputs.bias_image): + bias_image = self.inputs.bias_image + if not isdefined(bias_image): + bias_image = fname_presuffix(os.path.basename(self.inputs.input_image), + suffix='_bias') + setattr(self, '_out_bias_file', bias_image) + else: + try: + delattr(self, '_out_bias_file') + except AttributeError: + pass return super(N4BiasFieldCorrection, self)._parse_inputs(skip=skip) def _list_outputs(self): - outputs = self._outputs().get() - outputs['output_image'] = os.path.abspath( - self._gen_filename('output_image')) - - if self.inputs.save_bias or isdefined(self.inputs.bias_image): - outputs['bias_image'] = os.path.abspath( - self._gen_filename('bias_image')) + outputs = super(N4BiasFieldCorrection, self)._list_outputs() + bias_image = getattr(self, '_out_bias_file', None) + if bias_image: + outputs['bias_image'] = bias_image return outputs def _run_interface(self, runtime, correct_return_codes=(0, )): @@ -438,14 +438,14 @@ def _run_interface(self, runtime, correct_return_codes=(0, )): runtime, correct_return_codes) if self.inputs.copy_header and runtime.returncode in correct_return_codes: - self._copy_header(self._gen_filename('output_image')) - if self.inputs.save_bias or isdefined(self.inputs.bias_image): - self._copy_header(self._gen_filename('bias_image')) + self._copy_header(self.inputs.output_image) + if getattr(self, '_out_bias_file', None): + self._copy_header(getattr(self, '_out_bias_file')) return runtime def _copy_header(self, fname): - """Copy header from input image to an output image""" + """Copy header from input image to an output image.""" import nibabel as nb in_img = nb.load(self.inputs.input_image) out_img = nb.load(fname, mmap=False) diff --git a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py index c93d847d4b..b1fc710ea5 100644 --- a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py +++ b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py @@ -41,8 +41,14 @@ def test_N4BiasFieldCorrection_inputs(): ), output_image=dict( argstr='--output %s', - genfile=True, hash_files=False, + keep_extension=True, + name_source=['input_image'], + name_template='%s_corrected', + ), + rescale_intensities=dict( + argstr='-r', + usedefault=True, ), save_bias=dict( mandatory=True, From 355965dea991ce1ff091ced3a2d57b5ae12f7ffa Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Thu, 29 Aug 2019 21:13:03 -0700 Subject: [PATCH 0341/1665] fix: add _out_bias_file as a member, following @effigies' suggestion --- nipype/interfaces/ants/segmentation.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index 277a716286..a62fdab8d9 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -386,9 +386,13 @@ class N4BiasFieldCorrection(ANTSCommand): input_spec = N4BiasFieldCorrectionInputSpec output_spec = N4BiasFieldCorrectionOutputSpec + def __init__(self, *args, **kwargs): + self._out_bias_file = None + super(N4BiasFieldCorrection, self).__init__(*args, **kwargs) + def _format_arg(self, name, trait_spec, value): - if name == 'output_image' and getattr(self, '_out_bias_file', None): - newval = '[ %s, %s ]' % (value, getattr(self, '_out_bias_file')) + if name == 'output_image' and self._out_bias_file: + newval = '[ %s, %s ]' % (value, self._out_bias_file) return trait_spec.argstr % newval if name == 'bspline_fitting_distance': @@ -413,24 +417,19 @@ def _format_arg(self, name, trait_spec, value): def _parse_inputs(self, skip=None): skip = (skip or []) + ['save_bias', 'bias_image'] + self._out_bias_file = None if self.inputs.save_bias or isdefined(self.inputs.bias_image): bias_image = self.inputs.bias_image if not isdefined(bias_image): bias_image = fname_presuffix(os.path.basename(self.inputs.input_image), suffix='_bias') - setattr(self, '_out_bias_file', bias_image) - else: - try: - delattr(self, '_out_bias_file') - except AttributeError: - pass + self._out_bias_file = bias_image return super(N4BiasFieldCorrection, self)._parse_inputs(skip=skip) def _list_outputs(self): outputs = super(N4BiasFieldCorrection, self)._list_outputs() - bias_image = getattr(self, '_out_bias_file', None) - if bias_image: - outputs['bias_image'] = bias_image + if self._out_bias_file: + outputs['bias_image'] = self._out_bias_file return outputs def _run_interface(self, runtime, correct_return_codes=(0, )): @@ -439,8 +438,8 @@ def _run_interface(self, runtime, correct_return_codes=(0, )): if self.inputs.copy_header and runtime.returncode in correct_return_codes: self._copy_header(self.inputs.output_image) - if getattr(self, '_out_bias_file', None): - self._copy_header(getattr(self, '_out_bias_file')) + if self._out_bias_file: + self._copy_header(self._out_bias_file) return runtime From 834df60d1b03c82028a894d0fe27825cca04cb0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20F=C3=A9rat?= Date: Fri, 30 Aug 2019 12:07:50 +0200 Subject: [PATCH 0342/1665] Revert "Merge pull request #1 from vferat/fix/mne_watershed_bem" This reverts commit 1f92bd569e5d92ee1e3233e093a4a77242759f03, reversing changes made to 8129ff4e004fdfcd6ce9b0730ee62d58586ff647. --- nipype/interfaces/mne/base.py | 112 +++++++++++++++------------------- 1 file changed, 49 insertions(+), 63 deletions(-) diff --git a/nipype/interfaces/mne/base.py b/nipype/interfaces/mne/base.py index 7a4f186496..7f53071372 100644 --- a/nipype/interfaces/mne/base.py +++ b/nipype/interfaces/mne/base.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- -from __future__ import print_function, division, unicode_literals, absolute_import +from __future__ import (print_function, division, unicode_literals, + absolute_import) from builtins import str, bytes import os.path as op @@ -7,86 +8,74 @@ from ... import logging from ...utils.filemanip import simplify_list -from ..base import traits, File, Directory, TraitedSpec, OutputMultiPath +from ..base import (traits, File, Directory, TraitedSpec, OutputMultiPath) from ..freesurfer.base import FSCommand, FSTraitedSpec -iflogger = logging.getLogger("nipype.interface") +iflogger = logging.getLogger('nipype.interface') class WatershedBEMInputSpec(FSTraitedSpec): subject_id = traits.Str( - argstr="--subject %s", + argstr='--subject %s', mandatory=True, - desc="Subject ID (must have a complete Freesurfer directory)", - ) + desc='Subject ID (must have a complete Freesurfer directory)') subjects_dir = Directory( exists=True, mandatory=True, usedefault=True, - desc="Path to Freesurfer subjects directory", - ) + desc='Path to Freesurfer subjects directory') volume = traits.Enum( - "T1", - "aparc+aseg", - "aseg", - "brain", - "orig", - "brainmask", - "ribbon", - argstr="--volume %s", + 'T1', + 'aparc+aseg', + 'aseg', + 'brain', + 'orig', + 'brainmask', + 'ribbon', + argstr='--volume %s', usedefault=True, - desc='The volume from the "mri" directory to use (defaults to T1)', - ) + desc='The volume from the "mri" directory to use (defaults to T1)') overwrite = traits.Bool( True, usedefault=True, - argstr="--overwrite", - desc="Overwrites the existing files", - ) + argstr='--overwrite', + desc='Overwrites the existing files') atlas_mode = traits.Bool( - argstr="--atlas", - desc="Use atlas mode for registration (default: no rigid alignment)", - ) + argstr='--atlas', + desc='Use atlas mode for registration (default: no rigid alignment)') class WatershedBEMOutputSpec(TraitedSpec): mesh_files = OutputMultiPath( File(exists=True), - desc=( - "Paths to the output meshes (brain, inner " - "skull, outer skull, outer skin)" - ), - ) + desc=('Paths to the output meshes (brain, inner ' + 'skull, outer skull, outer skin)')) brain_surface = File( - exists=True, loc="bem/watershed", desc="Brain surface (in Freesurfer format)" - ) + exists=True, + loc='bem/watershed', + desc='Brain surface (in Freesurfer format)') inner_skull_surface = File( exists=True, - loc="bem/watershed", - desc="Inner skull surface (in Freesurfer format)", - ) + loc='bem/watershed', + desc='Inner skull surface (in Freesurfer format)') outer_skull_surface = File( exists=True, - loc="bem/watershed", - desc="Outer skull surface (in Freesurfer format)", - ) + loc='bem/watershed', + desc='Outer skull surface (in Freesurfer format)') outer_skin_surface = File( exists=True, - loc="bem/watershed", - desc="Outer skin surface (in Freesurfer format)", - ) + loc='bem/watershed', + desc='Outer skin surface (in Freesurfer format)') fif_file = File( exists=True, - loc="bem", - altkey="fif", - desc='"fif" format file for EEG processing in MNE', - ) + loc='bem', + altkey='fif', + desc='"fif" format file for EEG processing in MNE') cor_files = OutputMultiPath( File(exists=True), - loc="bem/watershed/ws", - altkey="COR", - desc='"COR" format files', - ) + loc='bem/watershed/ws', + altkey='COR', + desc='"COR" format files') class WatershedBEM(FSCommand): @@ -100,23 +89,23 @@ class WatershedBEM(FSCommand): >>> bem.inputs.subject_id = 'subj1' >>> bem.inputs.subjects_dir = '.' >>> bem.cmdline - 'mne watershed_bem --overwrite --subject subj1 --volume T1' + 'mne_watershed_bem --overwrite --subject subj1 --volume T1' >>> bem.run() # doctest: +SKIP """ - _cmd = "mne watershed_bem" + _cmd = 'mne_watershed_bem' input_spec = WatershedBEMInputSpec output_spec = WatershedBEMOutputSpec - _additional_metadata = ["loc", "altkey"] + _additional_metadata = ['loc', 'altkey'] def _get_files(self, path, key, dirval, altkey=None): - globsuffix = "*" - globprefix = "*" + globsuffix = '*' + globprefix = '*' keydir = op.join(path, dirval) if altkey: key = altkey - globpattern = op.join(keydir, "".join((globprefix, key, globsuffix))) + globpattern = op.join(keydir, ''.join((globprefix, key, globsuffix))) return glob.glob(globpattern) def _list_outputs(self): @@ -126,13 +115,10 @@ def _list_outputs(self): output_traits = self._outputs() mesh_paths = [] for k in list(outputs.keys()): - if k != "mesh_files": - val = self._get_files( - subject_path, - k, - output_traits.traits()[k].loc, - output_traits.traits()[k].altkey, - ) + if k != 'mesh_files': + val = self._get_files(subject_path, k, + output_traits.traits()[k].loc, + output_traits.traits()[k].altkey) if val: value_list = simplify_list(val) if isinstance(value_list, list): @@ -144,7 +130,7 @@ def _list_outputs(self): else: raise TypeError outputs[k] = out_files - if not k.rfind("surface") == -1: + if not k.rfind('surface') == -1: mesh_paths.append(out_files) - outputs["mesh_files"] = mesh_paths + outputs['mesh_files'] = mesh_paths return outputs From 183ab8823da99abb1b908f9e2a4394484c789dd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20F=C3=A9rat?= Date: Fri, 30 Aug 2019 12:08:03 +0200 Subject: [PATCH 0343/1665] Revert "run make check-before-commit without error" This reverts commit 8129ff4e004fdfcd6ce9b0730ee62d58586ff647. --- .../algorithms/tests/test_auto_AddCSVRow.py | 5 +- nipype/algorithms/tests/test_auto_Gunzip.py | 9 +-- .../tests/test_auto_NonSteadyStateDetector.py | 9 +-- .../afni/tests/test_auto_Allineate.py | 1 - nipype/interfaces/afni/tests/test_auto_Cat.py | 7 ++- .../afni/tests/test_auto_ClipLevel.py | 1 - .../afni/tests/test_auto_LocalBistat.py | 7 +-- .../afni/tests/test_auto_Localstat.py | 6 +- .../afni/tests/test_auto_NwarpApply.py | 5 +- .../afni/tests/test_auto_OneDToolPy.py | 1 - .../interfaces/afni/tests/test_auto_Qwarp.py | 5 +- .../afni/tests/test_auto_QwarpPlusMinus.py | 5 +- .../afni/tests/test_auto_ROIStats.py | 5 +- .../interfaces/afni/tests/test_auto_ReHo.py | 6 +- .../afni/tests/test_auto_Remlfit.py | 1 - .../afni/tests/test_auto_Resample.py | 5 +- .../afni/tests/test_auto_TCorrMap.py | 5 +- .../afni/tests/test_auto_Zeropad.py | 1 - .../interfaces/ants/tests/test_auto_ANTS.py | 4 +- .../ants/tests/test_auto_AntsJointFusion.py | 9 +-- .../ants/tests/test_auto_Atropos.py | 4 +- .../base/tests/test_auto_StdOutCommandLine.py | 1 - .../camino/tests/test_auto_AnalyzeHeader.py | 1 - .../tests/test_auto_ComputeEigensystem.py | 1 - .../test_auto_ComputeFractionalAnisotropy.py | 1 - .../tests/test_auto_ComputeTensorTrace.py | 1 - .../camino/tests/test_auto_DTIFit.py | 1 - .../camino/tests/test_auto_DTLUTGen.py | 1 - .../camino/tests/test_auto_FSL2Scheme.py | 1 - .../camino/tests/test_auto_Image2Voxel.py | 1 - .../camino/tests/test_auto_LinRecon.py | 1 - .../interfaces/camino/tests/test_auto_MESD.py | 1 - .../camino/tests/test_auto_ModelFit.py | 1 - .../camino/tests/test_auto_NIfTIDT2Camino.py | 1 - .../camino/tests/test_auto_PicoPDFs.py | 1 - .../camino/tests/test_auto_ProcStreamlines.py | 1 - .../camino/tests/test_auto_QBallMX.py | 1 - .../camino/tests/test_auto_SFLUTGen.py | 1 - .../camino/tests/test_auto_SFPICOCalibData.py | 1 - .../camino/tests/test_auto_SFPeaks.py | 1 - .../camino/tests/test_auto_Shredder.py | 1 - .../camino/tests/test_auto_TractShredder.py | 1 - .../camino/tests/test_auto_VtkStreamlines.py | 1 - .../freesurfer/tests/test_auto_Aparc2Aseg.py | 9 +-- .../freesurfer/tests/test_auto_Apas2Aseg.py | 9 +-- .../freesurfer/tests/test_auto_CALabel.py | 10 +--- .../freesurfer/tests/test_auto_CANormalize.py | 2 +- .../freesurfer/tests/test_auto_CARegister.py | 2 +- .../test_auto_CheckTalairachAlignment.py | 2 +- .../tests/test_auto_ConcatenateLTA.py | 2 - .../freesurfer/tests/test_auto_Contrast.py | 10 +--- .../freesurfer/tests/test_auto_FixTopology.py | 5 +- .../freesurfer/tests/test_auto_MRIsCALabel.py | 10 +--- .../freesurfer/tests/test_auto_MS_LDA.py | 12 +--- .../freesurfer/tests/test_auto_Normalize.py | 2 +- .../tests/test_auto_ParcellationStats.py | 9 +-- .../tests/test_auto_RegisterAVItoTalairach.py | 2 +- .../tests/test_auto_RelabelHypointensities.py | 9 +-- .../tests/test_auto_SegStatsReconAll.py | 5 +- .../tests/test_auto_SurfaceSnapshots.py | 1 - .../tests/test_auto_TalairachAVI.py | 6 +- .../freesurfer/tests/test_auto_TalairachQC.py | 9 +-- .../fsl/tests/test_auto_AccuracyTester.py | 9 +-- .../interfaces/fsl/tests/test_auto_Cleaner.py | 3 - .../interfaces/fsl/tests/test_auto_Cluster.py | 5 +- .../fsl/tests/test_auto_FeatureExtractor.py | 11 ++-- .../fsl/tests/test_auto_FindTheBiggest.py | 9 +-- .../fsl/tests/test_auto_TrainingSetCreator.py | 22 ++++---- .../minc/tests/test_auto_Average.py | 6 +- .../interfaces/minc/tests/test_auto_BBox.py | 1 - .../interfaces/minc/tests/test_auto_Beast.py | 7 +-- .../interfaces/minc/tests/test_auto_Calc.py | 2 - .../interfaces/minc/tests/test_auto_Dump.py | 1 - .../minc/tests/test_auto_Extract.py | 1 - .../interfaces/minc/tests/test_auto_Math.py | 1 - .../interfaces/minc/tests/test_auto_Norm.py | 6 +- .../minc/tests/test_auto_Resample.py | 10 +--- .../minc/tests/test_auto_Reshape.py | 1 - .../interfaces/minc/tests/test_auto_ToRaw.py | 1 - .../minc/tests/test_auto_VolSymm.py | 2 - .../minc/tests/test_auto_XfmInvert.py | 1 - .../mrtrix3/tests/test_auto_BrainMask.py | 6 +- .../mrtrix3/tests/test_auto_DWIExtract.py | 6 +- .../mrtrix3/tests/test_auto_EstimateFOD.py | 6 +- .../mrtrix3/tests/test_auto_FitTensor.py | 6 +- .../mrtrix3/tests/test_auto_Generate5tt.py | 6 +- .../mrtrix3/tests/test_auto_MRConvert.py | 6 +- .../mrtrix3/tests/test_auto_MRMath.py | 6 +- .../mrtrix3/tests/test_auto_ResponseSD.py | 6 +- .../mrtrix3/tests/test_auto_Tractography.py | 6 +- .../niftyfit/tests/test_auto_DwiTool.py | 26 +++------ .../niftyfit/tests/test_auto_FitAsl.py | 41 +++----------- .../niftyfit/tests/test_auto_FitDwi.py | 56 +++++-------------- .../niftyfit/tests/test_auto_FitQt1.py | 25 ++------- .../nipy/tests/test_auto_EstimateContrast.py | 2 +- .../interfaces/nipy/tests/test_auto_FitGLM.py | 4 +- .../tests/test_auto_UnbiasedNonLocalMeans.py | 9 +-- .../diffusion/tests/test_auto_DTIexport.py | 9 +-- .../diffusion/tests/test_auto_DTIimport.py | 9 +-- .../test_auto_DWIJointRicianLMMSEFilter.py | 9 +-- .../tests/test_auto_DWIRicianLMMSEFilter.py | 9 +-- ..._auto_DiffusionTensorScalarMeasurements.py | 9 +-- .../tests/test_auto_ResampleDTIVolume.py | 9 +-- .../tests/test_auto_AddScalarVolumes.py | 9 +-- .../tests/test_auto_CastScalarVolume.py | 9 +-- .../tests/test_auto_CheckerBoardFilter.py | 9 +-- ...test_auto_CurvatureAnisotropicDiffusion.py | 9 +-- .../tests/test_auto_ExtractSkeleton.py | 9 +-- .../test_auto_GaussianBlurImageFilter.py | 9 +-- .../test_auto_GradientAnisotropicDiffusion.py | 9 +-- .../test_auto_GrayscaleFillHoleImageFilter.py | 9 +-- ...test_auto_GrayscaleGrindPeakImageFilter.py | 9 +-- .../tests/test_auto_HistogramMatching.py | 9 +-- .../tests/test_auto_ImageLabelCombine.py | 9 +-- .../tests/test_auto_MaskScalarVolume.py | 9 +-- .../tests/test_auto_MedianImageFilter.py | 9 +-- .../tests/test_auto_MultiplyScalarVolumes.py | 9 +-- ...test_auto_ResampleScalarVectorDWIVolume.py | 9 +-- .../tests/test_auto_SubtractScalarVolumes.py | 9 +-- .../tests/test_auto_ThresholdScalarVolume.py | 9 +-- ...auto_VotingBinaryHoleFillingImageFilter.py | 9 +-- ...est_auto_DWIUnbiasedNonLocalMeansFilter.py | 9 +-- .../test_auto_OtsuThresholdImageFilter.py | 9 +-- .../test_auto_OtsuThresholdSegmentation.py | 9 +-- .../tests/test_auto_ResampleScalarVolume.py | 9 +-- .../test_auto_RobustStatisticsSegmenter.py | 9 +-- ...st_auto_SimpleRegionGrowingSegmentation.py | 9 +-- .../tests/test_auto_GrayscaleModelMaker.py | 9 +-- .../tests/test_auto_LabelMapSmoothing.py | 9 +-- .../slicer/tests/test_auto_MergeModels.py | 9 +-- .../slicer/tests/test_auto_ModelToLabelMap.py | 9 +-- .../tests/test_auto_OrientScalarVolume.py | 9 +-- .../tests/test_auto_ProbeVolumeWithModel.py | 9 +-- .../utility/tests/test_auto_Rename.py | 2 +- 134 files changed, 340 insertions(+), 541 deletions(-) diff --git a/nipype/algorithms/tests/test_auto_AddCSVRow.py b/nipype/algorithms/tests/test_auto_AddCSVRow.py index a8c4467cbf..3090c8b6a9 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVRow.py +++ b/nipype/algorithms/tests/test_auto_AddCSVRow.py @@ -6,10 +6,7 @@ def test_AddCSVRow_inputs(): input_map = dict( _outputs=dict(usedefault=True, ), - in_file=dict( - extensions=None, - mandatory=True, - ), + in_file=dict(mandatory=True, ), ) inputs = AddCSVRow.input_spec() diff --git a/nipype/algorithms/tests/test_auto_Gunzip.py b/nipype/algorithms/tests/test_auto_Gunzip.py index d84e84ad61..e583deaa10 100644 --- a/nipype/algorithms/tests/test_auto_Gunzip.py +++ b/nipype/algorithms/tests/test_auto_Gunzip.py @@ -4,10 +4,11 @@ def test_Gunzip_inputs(): - input_map = dict(in_file=dict( - extensions=None, - mandatory=True, - ), ) + input_map = dict( + in_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = Gunzip.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py index 10eb41190b..4da9dad47d 100644 --- a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py +++ b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py @@ -4,10 +4,11 @@ def test_NonSteadyStateDetector_inputs(): - input_map = dict(in_file=dict( - extensions=None, - mandatory=True, - ), ) + input_map = dict( + in_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = NonSteadyStateDetector.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Allineate.py b/nipype/interfaces/afni/tests/test_auto_Allineate.py index e1565376bd..0b110e669a 100644 --- a/nipype/interfaces/afni/tests/test_auto_Allineate.py +++ b/nipype/interfaces/afni/tests/test_auto_Allineate.py @@ -86,7 +86,6 @@ def test_Allineate_inputs(): ), out_weight_file=dict( argstr='-wtprefix %s', - extensions=None, xor=['allcostx'], ), outputtype=dict(), diff --git a/nipype/interfaces/afni/tests/test_auto_Cat.py b/nipype/interfaces/afni/tests/test_auto_Cat.py index 50806add01..345806f746 100644 --- a/nipype/interfaces/afni/tests/test_auto_Cat.py +++ b/nipype/interfaces/afni/tests/test_auto_Cat.py @@ -21,9 +21,10 @@ def test_Cat_inputs(): usedefault=True, ), omitconst=dict(argstr='-nonconst', ), - out_cint=dict(xor=[ - 'out_format', 'out_nice', 'out_double', 'out_fint', 'out_int' - ], ), + out_cint=dict( + xor=[ + 'out_format', 'out_nice', 'out_double', 'out_fint', 'out_int' + ], ), out_double=dict( argstr='-d', xor=['out_format', 'out_nice', 'out_int', 'out_fint', 'out_cint'], diff --git a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py index 96f928a809..f9b3dbf705 100644 --- a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py +++ b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py @@ -17,7 +17,6 @@ def test_ClipLevel_inputs(): ), grad=dict( argstr='-grad %s', - extensions=None, position=3, xor='doall', ), diff --git a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py index 4632e4cf7d..ed3e61d74d 100644 --- a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py +++ b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py @@ -26,10 +26,7 @@ def test_LocalBistat_inputs(): mandatory=True, position=-1, ), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), + mask_file=dict(argstr='-mask %s', ), neighborhood=dict( argstr="-nbhd '%s(%s)'", mandatory=True, @@ -40,7 +37,6 @@ def test_LocalBistat_inputs(): ), out_file=dict( argstr='-prefix %s', - extensions=None, keep_extension=True, name_source='in_file1', name_template='%s_bistat', @@ -53,7 +49,6 @@ def test_LocalBistat_inputs(): ), weight_file=dict( argstr='-weight %s', - extensions=None, xor=['automask'], ), ) diff --git a/nipype/interfaces/afni/tests/test_auto_Localstat.py b/nipype/interfaces/afni/tests/test_auto_Localstat.py index 62dc800941..011ce44da8 100644 --- a/nipype/interfaces/afni/tests/test_auto_Localstat.py +++ b/nipype/interfaces/afni/tests/test_auto_Localstat.py @@ -21,10 +21,7 @@ def test_Localstat_inputs(): mandatory=True, position=-1, ), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), + mask_file=dict(argstr='-mask %s', ), neighborhood=dict( argstr="-nbhd '%s(%s)'", mandatory=True, @@ -36,7 +33,6 @@ def test_Localstat_inputs(): ), out_file=dict( argstr='-prefix %s', - extensions=None, keep_extension=True, name_source='in_file', name_template='%s_localstat', diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py index 87388c65ec..e00457f4f3 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py @@ -20,10 +20,7 @@ def test_NwarpApply_inputs(): usedefault=True, ), inv_warp=dict(argstr='-iwarp', ), - master=dict( - argstr='-master %s', - extensions=None, - ), + master=dict(argstr='-master %s', ), out_file=dict( argstr='-prefix %s', extensions=None, diff --git a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py index 114fe53fba..f8e664a727 100644 --- a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py +++ b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py @@ -30,7 +30,6 @@ def test_OneDToolPy_inputs(): show_censor_count=dict(argstr='-show_censor_count', ), show_cormat_warnings=dict( argstr='-show_cormat_warnings |& tee %s', - extensions=None, position=-1, xor=['out_file'], ), diff --git a/nipype/interfaces/afni/tests/test_auto_Qwarp.py b/nipype/interfaces/afni/tests/test_auto_Qwarp.py index 0b8a9e38ec..3ef8c2e9b2 100644 --- a/nipype/interfaces/afni/tests/test_auto_Qwarp.py +++ b/nipype/interfaces/afni/tests/test_auto_Qwarp.py @@ -125,10 +125,7 @@ def test_Qwarp_inputs(): name_source=['in_file'], name_template='ppp_%s', ), - out_weight_file=dict( - argstr='-wtprefix %s', - extensions=None, - ), + out_weight_file=dict(argstr='-wtprefix %s', ), outputtype=dict(), overwrite=dict(argstr='-overwrite', ), pblur=dict(argstr='-pblur %s', ), diff --git a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py index e282d0d0a5..ca27a0d682 100644 --- a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py +++ b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py @@ -125,10 +125,7 @@ def test_QwarpPlusMinus_inputs(): position=0, usedefault=True, ), - out_weight_file=dict( - argstr='-wtprefix %s', - extensions=None, - ), + out_weight_file=dict(argstr='-wtprefix %s', ), outputtype=dict(), overwrite=dict(argstr='-overwrite', ), pblur=dict(argstr='-pblur %s', ), diff --git a/nipype/interfaces/afni/tests/test_auto_ROIStats.py b/nipype/interfaces/afni/tests/test_auto_ROIStats.py index c7fc517ccc..d3c956f7c5 100644 --- a/nipype/interfaces/afni/tests/test_auto_ROIStats.py +++ b/nipype/interfaces/afni/tests/test_auto_ROIStats.py @@ -49,10 +49,7 @@ def test_ROIStats_inputs(): position=-1, ), quiet=dict(argstr='-quiet', ), - roisel=dict( - argstr='-roisel %s', - extensions=None, - ), + roisel=dict(argstr='-roisel %s', ), stat=dict(argstr='%s...', ), zerofill=dict( argstr='-zerofill %s', diff --git a/nipype/interfaces/afni/tests/test_auto_ReHo.py b/nipype/interfaces/afni/tests/test_auto_ReHo.py index cf3e468159..0edcedcdaf 100644 --- a/nipype/interfaces/afni/tests/test_auto_ReHo.py +++ b/nipype/interfaces/afni/tests/test_auto_ReHo.py @@ -25,17 +25,13 @@ def test_ReHo_inputs(): argstr='-in_rois %s', extensions=None, ), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), + mask_file=dict(argstr='-mask %s', ), neighborhood=dict( argstr='-nneigh %s', xor=['sphere', 'ellipsoid'], ), out_file=dict( argstr='-prefix %s', - extensions=None, keep_extension=True, name_source='in_file', name_template='%s_reho', diff --git a/nipype/interfaces/afni/tests/test_auto_Remlfit.py b/nipype/interfaces/afni/tests/test_auto_Remlfit.py index 05ee75210e..f2e6703bf6 100644 --- a/nipype/interfaces/afni/tests/test_auto_Remlfit.py +++ b/nipype/interfaces/afni/tests/test_auto_Remlfit.py @@ -58,7 +58,6 @@ def test_Remlfit_inputs(): ), matim=dict( argstr='-matim %s', - extensions=None, xor=['matrix'], ), matrix=dict( diff --git a/nipype/interfaces/afni/tests/test_auto_Resample.py b/nipype/interfaces/afni/tests/test_auto_Resample.py index f63e1347f5..560d883d75 100644 --- a/nipype/interfaces/afni/tests/test_auto_Resample.py +++ b/nipype/interfaces/afni/tests/test_auto_Resample.py @@ -17,10 +17,7 @@ def test_Resample_inputs(): mandatory=True, position=-1, ), - master=dict( - argstr='-master %s', - extensions=None, - ), + master=dict(argstr='-master %s', ), num_threads=dict( nohash=True, usedefault=True, diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py index 209473fe6f..1ea4c0790e 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py @@ -93,10 +93,7 @@ def test_TCorrMap_inputs(): name_source='in_file', suffix='_qmean', ), - regress_out_timeseries=dict( - argstr='-ort %s', - extensions=None, - ), + regress_out_timeseries=dict(argstr='-ort %s', ), seeds=dict( argstr='-seed %s', extensions=None, diff --git a/nipype/interfaces/afni/tests/test_auto_Zeropad.py b/nipype/interfaces/afni/tests/test_auto_Zeropad.py index 1bd80cfad8..abeceda432 100644 --- a/nipype/interfaces/afni/tests/test_auto_Zeropad.py +++ b/nipype/interfaces/afni/tests/test_auto_Zeropad.py @@ -55,7 +55,6 @@ def test_Zeropad_inputs(): ), master=dict( argstr='-master %s', - extensions=None, xor=['I', 'S', 'A', 'P', 'L', 'R', 'z', 'RL', 'AP', 'IS', 'mm'], ), mm=dict( diff --git a/nipype/interfaces/ants/tests/test_auto_ANTS.py b/nipype/interfaces/ants/tests/test_auto_ANTS.py index 1ee5cabe21..5c6fa2c501 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTS.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTS.py @@ -57,8 +57,8 @@ def test_ANTS_inputs(): regularization=dict(argstr='%s', ), regularization_deformation_field_sigma=dict( requires=['regularization'], ), - regularization_gradient_field_sigma=dict(requires=['regularization' - ], ), + regularization_gradient_field_sigma=dict( + requires=['regularization'], ), smoothing_sigmas=dict( argstr='--gaussian-smoothing-sigmas %s', sep='x', diff --git a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py index a2829be3fa..02ebe6431c 100644 --- a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py @@ -44,10 +44,11 @@ def test_AntsJointFusion_inputs(): nohash=True, usedefault=True, ), - out_atlas_voting_weight_name_format=dict(requires=[ - 'out_label_fusion', 'out_intensity_fusion_name_format', - 'out_label_post_prob_name_format' - ], ), + out_atlas_voting_weight_name_format=dict( + requires=[ + 'out_label_fusion', 'out_intensity_fusion_name_format', + 'out_label_post_prob_name_format' + ], ), out_intensity_fusion_name_format=dict(argstr='', ), out_label_fusion=dict( argstr='%s', diff --git a/nipype/interfaces/ants/tests/test_auto_Atropos.py b/nipype/interfaces/ants/tests/test_auto_Atropos.py index 73d7f6813a..be1a271f9a 100644 --- a/nipype/interfaces/ants/tests/test_auto_Atropos.py +++ b/nipype/interfaces/ants/tests/test_auto_Atropos.py @@ -53,8 +53,8 @@ def test_Atropos_inputs(): prior_probability_threshold=dict(requires=['prior_weighting'], ), prior_weighting=dict(), save_posteriors=dict(), - use_mixture_model_proportions=dict(requires=['posterior_formulation' - ], ), + use_mixture_model_proportions=dict( + requires=['posterior_formulation'], ), use_random_seed=dict( argstr='--use-random-seed %d', usedefault=True, diff --git a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py index 95afcd3216..e39dc3acaa 100644 --- a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py @@ -12,7 +12,6 @@ def test_StdOutCommandLine_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py index c40082d836..28170946ac 100644 --- a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py +++ b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py @@ -50,7 +50,6 @@ def test_AnalyzeHeader_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py index edf38864fa..66ac282175 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py @@ -24,7 +24,6 @@ def test_ComputeEigensystem_inputs(): maxcomponents=dict(argstr='-maxcomponents %d', ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py index 75604df01b..9cfae77b2f 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py @@ -20,7 +20,6 @@ def test_ComputeFractionalAnisotropy_inputs(): inputmodel=dict(argstr='-inputmodel %s', ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py index 4d31fa884c..1443a253bd 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py @@ -20,7 +20,6 @@ def test_ComputeTensorTrace_inputs(): inputmodel=dict(argstr='-inputmodel %s', ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_DTIFit.py b/nipype/interfaces/camino/tests/test_auto_DTIFit.py index 0870a77752..26d27d57d2 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/camino/tests/test_auto_DTIFit.py @@ -26,7 +26,6 @@ def test_DTIFit_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py index 3242163c3a..f7caf77ef1 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py @@ -28,7 +28,6 @@ def test_DTLUTGen_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py index 9b8a74be6a..350c604c99 100644 --- a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py +++ b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py @@ -40,7 +40,6 @@ def test_FSL2Scheme_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py index 2c013cf216..29864bef82 100644 --- a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py +++ b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py @@ -18,7 +18,6 @@ def test_Image2Voxel_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_LinRecon.py b/nipype/interfaces/camino/tests/test_auto_LinRecon.py index 147cfaab5e..996d8f4b99 100644 --- a/nipype/interfaces/camino/tests/test_auto_LinRecon.py +++ b/nipype/interfaces/camino/tests/test_auto_LinRecon.py @@ -24,7 +24,6 @@ def test_LinRecon_inputs(): normalize=dict(argstr='-normalize', ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_MESD.py b/nipype/interfaces/camino/tests/test_auto_MESD.py index f7dfecfdb9..57dbbf3b28 100644 --- a/nipype/interfaces/camino/tests/test_auto_MESD.py +++ b/nipype/interfaces/camino/tests/test_auto_MESD.py @@ -42,7 +42,6 @@ def test_MESD_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_ModelFit.py b/nipype/interfaces/camino/tests/test_auto_ModelFit.py index 6d49969eb6..e5c16ec975 100644 --- a/nipype/interfaces/camino/tests/test_auto_ModelFit.py +++ b/nipype/interfaces/camino/tests/test_auto_ModelFit.py @@ -34,7 +34,6 @@ def test_ModelFit_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py index 65e0210268..0a5583b03e 100644 --- a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py +++ b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py @@ -26,7 +26,6 @@ def test_NIfTIDT2Camino_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py index e5cdea5cae..c918a372f1 100644 --- a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py +++ b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py @@ -36,7 +36,6 @@ def test_PicoPDFs_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py index 0386e0d54f..c30ef08d8c 100644 --- a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py @@ -62,7 +62,6 @@ def test_ProcStreamlines_inputs(): noresample=dict(argstr='-noresample', ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_QBallMX.py b/nipype/interfaces/camino/tests/test_auto_QBallMX.py index f452f26350..7f7d0bc99f 100644 --- a/nipype/interfaces/camino/tests/test_auto_QBallMX.py +++ b/nipype/interfaces/camino/tests/test_auto_QBallMX.py @@ -20,7 +20,6 @@ def test_QBallMX_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py index 220e116255..795138ea89 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py @@ -35,7 +35,6 @@ def test_SFLUTGen_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py index 1b71553676..80222fce4c 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py @@ -27,7 +27,6 @@ def test_SFPICOCalibData_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py index 49ac58aa06..8db250c58c 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py @@ -38,7 +38,6 @@ def test_SFPeaks_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_Shredder.py b/nipype/interfaces/camino/tests/test_auto_Shredder.py index 2d8ec43589..594b35fc60 100644 --- a/nipype/interfaces/camino/tests/test_auto_Shredder.py +++ b/nipype/interfaces/camino/tests/test_auto_Shredder.py @@ -28,7 +28,6 @@ def test_Shredder_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_TractShredder.py b/nipype/interfaces/camino/tests/test_auto_TractShredder.py index e4df010c60..eeed244533 100644 --- a/nipype/interfaces/camino/tests/test_auto_TractShredder.py +++ b/nipype/interfaces/camino/tests/test_auto_TractShredder.py @@ -28,7 +28,6 @@ def test_TractShredder_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py index bd8d295572..72b6f106d8 100644 --- a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py @@ -25,7 +25,6 @@ def test_VtkStreamlines_inputs(): interpolatescalars=dict(argstr='-interpolatescalars', ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py index 494ce981c5..8a83130a4d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py @@ -79,10 +79,11 @@ def test_Aparc2Aseg_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Aparc2Aseg_outputs(): - output_map = dict(out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict( + out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = Aparc2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py index 03385da2ad..fea3617596 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py @@ -28,10 +28,11 @@ def test_Apas2Aseg_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Apas2Aseg_outputs(): - output_map = dict(out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict( + out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = Apas2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py index 4d56d217c7..f3bfd5ad62 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py @@ -7,10 +7,7 @@ def test_CALabel_inputs(): input_map = dict( align=dict(argstr='-align', ), args=dict(argstr='%s', ), - aseg=dict( - argstr='-aseg %s', - extensions=None, - ), + aseg=dict(argstr='-aseg %s', ), environ=dict( nohash=True, usedefault=True, @@ -29,10 +26,7 @@ def test_CALabel_inputs(): argstr='-r %s', extensions=None, ), - label=dict( - argstr='-l %s', - extensions=None, - ), + label=dict(argstr='-l %s', ), no_big_ventricles=dict(argstr='-nobigventricles', ), num_threads=dict(), out_file=dict( diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py index 5700103e84..1f6546ae3a 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py @@ -59,7 +59,7 @@ def test_CANormalize_inputs(): def test_CANormalize_outputs(): output_map = dict( control_points=dict(extensions=None, ), - out_file=dict(extensions=None, ), + out_file=dict(), ) outputs = CANormalize.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py index dcf5aa84a8..83f669b218 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py @@ -53,7 +53,7 @@ def test_CARegister_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CARegister_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(), ) outputs = CARegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py index fe9c1a3121..68a5a98e66 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py @@ -35,7 +35,7 @@ def test_CheckTalairachAlignment_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CheckTalairachAlignment_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(), ) outputs = CheckTalairachAlignment.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py index ee1d3ae7f3..8acab945c1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py @@ -38,13 +38,11 @@ def test_ConcatenateLTA_inputs(): subjects_dir=dict(), tal_source_file=dict( argstr='-tal %s', - extensions=None, position=-5, requires=['tal_template_file'], ), tal_template_file=dict( argstr='%s', - extensions=None, position=-4, requires=['tal_source_file'], ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py index 3cd62e8ee7..3c4e5aa484 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py @@ -5,16 +5,10 @@ def test_Contrast_inputs(): input_map = dict( - annotation=dict( - extensions=None, - mandatory=True, - ), + annotation=dict(mandatory=True, ), args=dict(argstr='%s', ), copy_inputs=dict(), - cortex=dict( - extensions=None, - mandatory=True, - ), + cortex=dict(mandatory=True, ), environ=dict( nohash=True, usedefault=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py index 83427dcd20..4ba8442b14 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py @@ -35,10 +35,7 @@ def test_FixTopology_inputs(): ), mgz=dict(argstr='-mgz', ), seed=dict(argstr='-seed %d', ), - sphere=dict( - argstr='-sphere %s', - extensions=None, - ), + sphere=dict(argstr='-sphere %s', ), subject_id=dict( argstr='%s', mandatory=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py index eaf91bc1e8..10c7af6832 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py @@ -6,10 +6,7 @@ def test_MRIsCALabel_inputs(): input_map = dict( args=dict(argstr='%s', ), - aseg=dict( - argstr='-aseg %s', - extensions=None, - ), + aseg=dict(argstr='-aseg %s', ), canonsurf=dict( argstr='%s', extensions=None, @@ -36,10 +33,7 @@ def test_MRIsCALabel_inputs(): mandatory=True, position=-4, ), - label=dict( - argstr='-l %s', - extensions=None, - ), + label=dict(argstr='-l %s', ), num_threads=dict(), out_file=dict( argstr='%s', diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py index 3d3f5cde11..badb3b4f0c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py @@ -17,30 +17,22 @@ def test_MS_LDA_inputs(): mandatory=True, position=-1, ), - label_file=dict( - argstr='-label %s', - extensions=None, - ), + label_file=dict(argstr='-label %s', ), lda_labels=dict( argstr='-lda %s', mandatory=True, sep=' ', ), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), + mask_file=dict(argstr='-mask %s', ), shift=dict(argstr='-shift %d', ), subjects_dir=dict(), use_weights=dict(argstr='-W', ), vol_synth_file=dict( argstr='-synth %s', - extensions=None, mandatory=True, ), weight_file=dict( argstr='-weight %s', - extensions=None, mandatory=True, ), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py index baf03f026c..2384adbb2b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py @@ -43,7 +43,7 @@ def test_Normalize_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Normalize_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(), ) outputs = Normalize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py index 8b4ae45d67..be19b2bd37 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py @@ -27,16 +27,11 @@ def test_ParcellationStats_inputs(): ), in_annotation=dict( argstr='-a %s', - extensions=None, xor=['in_label'], ), - in_cortex=dict( - argstr='-cortex %s', - extensions=None, - ), + in_cortex=dict(argstr='-cortex %s', ), in_label=dict( argstr='-l %s', - extensions=None, xor=['in_annotatoin', 'out_color'], ), lh_pial=dict( @@ -50,13 +45,11 @@ def test_ParcellationStats_inputs(): mgz=dict(argstr='-mgz', ), out_color=dict( argstr='-c %s', - extensions=None, genfile=True, xor=['in_label'], ), out_table=dict( argstr='-f %s', - extensions=None, genfile=True, requires=['tabular_output'], ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py index 5a609f586e..f3406d41fc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py @@ -47,7 +47,7 @@ def test_RegisterAVItoTalairach_outputs(): extensions=None, usedefault=True, ), - out_file=dict(extensions=None, ), + out_file=dict(), ) outputs = RegisterAVItoTalairach.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py index f99acec899..28ff49d99e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py @@ -48,10 +48,11 @@ def test_RelabelHypointensities_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RelabelHypointensities_outputs(): - output_map = dict(out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict( + out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = RelabelHypointensities.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py index 09da0d001d..3cf47c71ce 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py @@ -100,10 +100,7 @@ def test_SegStatsReconAll_inputs(): extensions=None, mandatory=True, ), - ribbon=dict( - extensions=None, - mandatory=True, - ), + ribbon=dict(mandatory=True, ), segment_id=dict(argstr='--id %s...', ), segmentation_file=dict( argstr='--seg %s', diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py index 968041ff67..26dcbe3458 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py @@ -59,7 +59,6 @@ def test_SurfaceSnapshots_inputs(): overlay_range_offset=dict(argstr='-foffset %.3f', ), overlay_reg=dict( argstr='-overlay-reg %s', - extensions=None, xor=['overlay_reg', 'identity_reg', 'mni152_reg'], ), patch_file=dict( diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py index 2a28765d67..9d248c1a7d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py @@ -30,9 +30,9 @@ def test_TalairachAVI_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_TalairachAVI_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - out_log=dict(extensions=None, ), - out_txt=dict(extensions=None, ), + out_file=dict(), + out_log=dict(), + out_txt=dict(), ) outputs = TalairachAVI.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py index 1961e3ded6..053d429c9b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py @@ -24,10 +24,11 @@ def test_TalairachQC_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TalairachQC_outputs(): - output_map = dict(log_file=dict( - extensions=None, - usedefault=True, - ), ) + output_map = dict( + log_file=dict( + extensions=None, + usedefault=True, + ), ) outputs = TalairachQC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py index ffd5b42bd1..de32f63f5f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py +++ b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py @@ -34,10 +34,11 @@ def test_AccuracyTester_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AccuracyTester_outputs(): - output_map = dict(output_directory=dict( - argstr='%s', - position=1, - ), ) + output_map = dict( + output_directory=dict( + argstr='%s', + position=1, + ), ) outputs = AccuracyTester.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py index 597dc88d5e..a75df99db5 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py @@ -22,17 +22,14 @@ def test_Cleaner_inputs(): ), confound_file=dict( argstr='-x %s', - extensions=None, position=4, ), confound_file_1=dict( argstr='-x %s', - extensions=None, position=5, ), confound_file_2=dict( argstr='-x %s', - extensions=None, position=6, ), environ=dict( diff --git a/nipype/interfaces/fsl/tests/test_auto_Cluster.py b/nipype/interfaces/fsl/tests/test_auto_Cluster.py index 19340b9383..6ed34ab816 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cluster.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cluster.py @@ -7,10 +7,7 @@ def test_Cluster_inputs(): input_map = dict( args=dict(argstr='%s', ), connectivity=dict(argstr='--connectivity=%d', ), - cope_file=dict( - argstr='--cope=%s', - extensions=None, - ), + cope_file=dict(argstr='--cope=%s', ), dlh=dict(argstr='--dlh=%.10f', ), environ=dict( nohash=True, diff --git a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py index eaeeb34ce8..3945c40a87 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py +++ b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py @@ -22,11 +22,12 @@ def test_FeatureExtractor_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FeatureExtractor_outputs(): - output_map = dict(mel_ica=dict( - argstr='%s', - copyfile=False, - position=-1, - ), ) + output_map = dict( + mel_ica=dict( + argstr='%s', + copyfile=False, + position=-1, + ), ) outputs = FeatureExtractor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py index a8262c9be1..5c7b717d93 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py +++ b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py @@ -30,10 +30,11 @@ def test_FindTheBiggest_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FindTheBiggest_outputs(): - output_map = dict(out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict( + out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = FindTheBiggest.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py index 638bcdd156..18ef078a79 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py +++ b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py @@ -4,22 +4,24 @@ def test_TrainingSetCreator_inputs(): - input_map = dict(mel_icas_in=dict( - argstr='%s', - copyfile=False, - position=-1, - ), ) + input_map = dict( + mel_icas_in=dict( + argstr='%s', + copyfile=False, + position=-1, + ), ) inputs = TrainingSetCreator.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TrainingSetCreator_outputs(): - output_map = dict(mel_icas_out=dict( - argstr='%s', - copyfile=False, - position=-1, - ), ) + output_map = dict( + mel_icas_out=dict( + argstr='%s', + copyfile=False, + position=-1, + ), ) outputs = TrainingSetCreator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Average.py b/nipype/interfaces/minc/tests/test_auto_Average.py index 6f0e84d144..678ab93a52 100644 --- a/nipype/interfaces/minc/tests/test_auto_Average.py +++ b/nipype/interfaces/minc/tests/test_auto_Average.py @@ -29,7 +29,6 @@ def test_Average_inputs(): ), filelist=dict( argstr='-filelist %s', - extensions=None, mandatory=True, xor=('input_files', 'filelist'), ), @@ -127,10 +126,7 @@ def test_Average_inputs(): argstr='-quiet', xor=('verbose', 'quiet'), ), - sdfile=dict( - argstr='-sdfile %s', - extensions=None, - ), + sdfile=dict(argstr='-sdfile %s', ), two=dict(argstr='-2', ), verbose=dict( argstr='-verbose', diff --git a/nipype/interfaces/minc/tests/test_auto_BBox.py b/nipype/interfaces/minc/tests/test_auto_BBox.py index 7bdf35f03d..c1b3515cea 100644 --- a/nipype/interfaces/minc/tests/test_auto_BBox.py +++ b/nipype/interfaces/minc/tests/test_auto_BBox.py @@ -25,7 +25,6 @@ def test_BBox_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/minc/tests/test_auto_Beast.py b/nipype/interfaces/minc/tests/test_auto_Beast.py index 3a44b436b0..bc4705db2e 100644 --- a/nipype/interfaces/minc/tests/test_auto_Beast.py +++ b/nipype/interfaces/minc/tests/test_auto_Beast.py @@ -18,10 +18,7 @@ def test_Beast_inputs(): argstr='-alpha %s', usedefault=True, ), - configuration_file=dict( - argstr='-configuration %s', - extensions=None, - ), + configuration_file=dict(argstr='-configuration %s', ), environ=dict( nohash=True, usedefault=True, @@ -30,7 +27,6 @@ def test_Beast_inputs(): flip_images=dict(argstr='-flip', ), input_file=dict( argstr='%s', - extensions=None, mandatory=True, position=-2, ), @@ -48,7 +44,6 @@ def test_Beast_inputs(): ), output_file=dict( argstr='%s', - extensions=None, hash_files=False, name_source=['input_file'], name_template='%s_beast_mask.mnc', diff --git a/nipype/interfaces/minc/tests/test_auto_Calc.py b/nipype/interfaces/minc/tests/test_auto_Calc.py index d5fb316e39..e9df677150 100644 --- a/nipype/interfaces/minc/tests/test_auto_Calc.py +++ b/nipype/interfaces/minc/tests/test_auto_Calc.py @@ -26,7 +26,6 @@ def test_Calc_inputs(): eval_width=dict(argstr='-eval_width %s', ), expfile=dict( argstr='-expfile %s', - extensions=None, mandatory=True, xor=('expression', 'expfile'), ), @@ -37,7 +36,6 @@ def test_Calc_inputs(): ), filelist=dict( argstr='-filelist %s', - extensions=None, mandatory=True, xor=('input_files', 'filelist'), ), diff --git a/nipype/interfaces/minc/tests/test_auto_Dump.py b/nipype/interfaces/minc/tests/test_auto_Dump.py index fbd33f5a46..19c299dac8 100644 --- a/nipype/interfaces/minc/tests/test_auto_Dump.py +++ b/nipype/interfaces/minc/tests/test_auto_Dump.py @@ -36,7 +36,6 @@ def test_Dump_inputs(): netcdf_name=dict(argstr='-n %s', ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/minc/tests/test_auto_Extract.py b/nipype/interfaces/minc/tests/test_auto_Extract.py index fbd0a84729..35f6162c7f 100644 --- a/nipype/interfaces/minc/tests/test_auto_Extract.py +++ b/nipype/interfaces/minc/tests/test_auto_Extract.py @@ -84,7 +84,6 @@ def test_Extract_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/minc/tests/test_auto_Math.py b/nipype/interfaces/minc/tests/test_auto_Math.py index 41d75379d4..6bc142b15d 100644 --- a/nipype/interfaces/minc/tests/test_auto_Math.py +++ b/nipype/interfaces/minc/tests/test_auto_Math.py @@ -36,7 +36,6 @@ def test_Math_inputs(): exp=dict(argstr='-exp -const2 %s %s', ), filelist=dict( argstr='-filelist %s', - extensions=None, mandatory=True, xor=('input_files', 'filelist'), ), diff --git a/nipype/interfaces/minc/tests/test_auto_Norm.py b/nipype/interfaces/minc/tests/test_auto_Norm.py index cdd4a3db7c..9fb0d3c5ba 100644 --- a/nipype/interfaces/minc/tests/test_auto_Norm.py +++ b/nipype/interfaces/minc/tests/test_auto_Norm.py @@ -26,10 +26,7 @@ def test_Norm_inputs(): position=-2, ), lower=dict(argstr='-lower %s', ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), + mask=dict(argstr='-mask %s', ), out_ceil=dict(argstr='-out_ceil %s', ), out_floor=dict(argstr='-out_floor %s', ), output_file=dict( @@ -43,7 +40,6 @@ def test_Norm_inputs(): ), output_threshold_mask=dict( argstr='-threshold_mask %s', - extensions=None, hash_files=False, name_source=['input_file'], name_template='%s_norm_threshold_mask.mnc', diff --git a/nipype/interfaces/minc/tests/test_auto_Resample.py b/nipype/interfaces/minc/tests/test_auto_Resample.py index da512d9f62..8d4b24ff41 100644 --- a/nipype/interfaces/minc/tests/test_auto_Resample.py +++ b/nipype/interfaces/minc/tests/test_auto_Resample.py @@ -94,10 +94,7 @@ def test_Resample_inputs(): argstr='-keep_real_range', xor=('keep_real_range', 'nokeep_real_range'), ), - like=dict( - argstr='-like %s', - extensions=None, - ), + like=dict(argstr='-like %s', ), nearest_neighbour_interpolation=dict( argstr='-nearest_neighbour', xor=('trilinear_interpolation', 'tricubic_interpolation', @@ -160,10 +157,7 @@ def test_Resample_inputs(): xor=('nelements', 'nelements_x_y_or_z'), ), talairach=dict(argstr='-talairach', ), - transformation=dict( - argstr='-transformation %s', - extensions=None, - ), + transformation=dict(argstr='-transformation %s', ), transverse_slices=dict( argstr='-transverse', xor=('transverse', 'sagittal', 'coronal'), diff --git a/nipype/interfaces/minc/tests/test_auto_Reshape.py b/nipype/interfaces/minc/tests/test_auto_Reshape.py index d80f9a377b..669425da95 100644 --- a/nipype/interfaces/minc/tests/test_auto_Reshape.py +++ b/nipype/interfaces/minc/tests/test_auto_Reshape.py @@ -16,7 +16,6 @@ def test_Reshape_inputs(): ), input_file=dict( argstr='%s', - extensions=None, mandatory=True, position=-2, ), diff --git a/nipype/interfaces/minc/tests/test_auto_ToRaw.py b/nipype/interfaces/minc/tests/test_auto_ToRaw.py index 42ba72f145..aeda687c97 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToRaw.py +++ b/nipype/interfaces/minc/tests/test_auto_ToRaw.py @@ -26,7 +26,6 @@ def test_ToRaw_inputs(): ), out_file=dict( argstr='> %s', - extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/minc/tests/test_auto_VolSymm.py b/nipype/interfaces/minc/tests/test_auto_VolSymm.py index ae3332b7f2..b710d59543 100644 --- a/nipype/interfaces/minc/tests/test_auto_VolSymm.py +++ b/nipype/interfaces/minc/tests/test_auto_VolSymm.py @@ -22,7 +22,6 @@ def test_VolSymm_inputs(): fit_nonlinear=dict(argstr='-nonlinear', ), input_file=dict( argstr='%s', - extensions=None, mandatory=True, position=-3, ), @@ -39,7 +38,6 @@ def test_VolSymm_inputs(): ), trans_file=dict( argstr='%s', - extensions=None, genfile=True, hash_files=False, keep_extension=False, diff --git a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py index 69c455f875..9e242300da 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py @@ -16,7 +16,6 @@ def test_XfmInvert_inputs(): ), input_file=dict( argstr='%s', - extensions=None, mandatory=True, position=-2, ), diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py index 5ee1e8cb00..90c5272ed0 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py @@ -14,12 +14,8 @@ def test_BrainMask_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py index 59617b30ab..7658201223 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py @@ -15,12 +15,8 @@ def test_DWIExtract_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py index c2816f2646..c64efa2ca4 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py @@ -41,12 +41,8 @@ def test_EstimateFOD_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py index 8f8142746a..84404fdde6 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py @@ -14,12 +14,8 @@ def test_FitTensor_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py index f7032cb624..92594da593 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py @@ -19,12 +19,8 @@ def test_Generate5tt_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py index f11b615cc8..9271d07517 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py @@ -22,12 +22,8 @@ def test_MRConvert_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py index fe934c45b1..ae494bb932 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py @@ -15,12 +15,8 @@ def test_MRMath_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py index 9415cae211..dd326bad5b 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py @@ -29,12 +29,8 @@ def test_ResponseSD_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py index cf685a50d8..39f239b81d 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py @@ -28,12 +28,8 @@ def test_Tractography_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py index 2a58b39d57..20995e806e 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py @@ -8,7 +8,6 @@ def test_DwiTool_inputs(): args=dict(argstr='%s', ), b0_file=dict( argstr='-b0 %s', - extensions=None, position=4, ), ball_flag=dict( @@ -29,13 +28,11 @@ def test_DwiTool_inputs(): ), bval_file=dict( argstr='-bval %s', - extensions=None, mandatory=True, position=2, ), bvec_file=dict( argstr='-bvec %s', - extensions=None, position=3, ), diso_val=dict(argstr='-diso %f', ), @@ -62,7 +59,6 @@ def test_DwiTool_inputs(): ), famap_file=dict( argstr='-famap %s', - extensions=None, name_source=['source_file'], name_template='%s_famap.nii.gz', ), @@ -76,24 +72,20 @@ def test_DwiTool_inputs(): ), logdti_file=dict( argstr='-logdti2 %s', - extensions=None, name_source=['source_file'], name_template='%s_logdti2.nii.gz', ), mask_file=dict( argstr='-mask %s', - extensions=None, position=5, ), mcmap_file=dict( argstr='-mcmap %s', - extensions=None, name_source=['source_file'], name_template='%s_mcmap.nii.gz', ), mdmap_file=dict( argstr='-mdmap %s', - extensions=None, name_source=['source_file'], name_template='%s_mdmap.nii.gz', ), @@ -123,26 +115,22 @@ def test_DwiTool_inputs(): ), rgbmap_file=dict( argstr='-rgbmap %s', - extensions=None, name_source=['source_file'], name_template='%s_rgbmap.nii.gz', ), source_file=dict( argstr='-source %s', - extensions=None, mandatory=True, position=1, ), syn_file=dict( argstr='-syn %s', - extensions=None, name_source=['source_file'], name_template='%s_syn.nii.gz', requires=['bvec_file', 'b0_file'], ), v1map_file=dict( argstr='-v1map %s', - extensions=None, name_source=['source_file'], name_template='%s_v1map.nii.gz', ), @@ -154,13 +142,13 @@ def test_DwiTool_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DwiTool_outputs(): output_map = dict( - famap_file=dict(extensions=None, ), - logdti_file=dict(extensions=None, ), - mcmap_file=dict(extensions=None, ), - mdmap_file=dict(extensions=None, ), - rgbmap_file=dict(extensions=None, ), - syn_file=dict(extensions=None, ), - v1map_file=dict(extensions=None, ), + famap_file=dict(), + logdti_file=dict(), + mcmap_file=dict(), + mdmap_file=dict(), + rgbmap_file=dict(), + syn_file=dict(), + v1map_file=dict(), ) outputs = DwiTool.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py index 94a489a4ad..b2e1bef961 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py @@ -8,7 +8,6 @@ def test_FitAsl_inputs(): args=dict(argstr='%s', ), cbf_file=dict( argstr='-cbf %s', - extensions=None, name_source=['source_file'], name_template='%s_cbf.nii.gz', ), @@ -21,33 +20,19 @@ def test_FitAsl_inputs(): ), error_file=dict( argstr='-error %s', - extensions=None, name_source=['source_file'], name_template='%s_error.nii.gz', ), gm_plasma=dict(argstr='-gmL %f', ), gm_t1=dict(argstr='-gmT1 %f', ), gm_ttt=dict(argstr='-gmTTT %f', ), - ir_output=dict( - argstr='-IRoutput %s', - extensions=None, - ), - ir_volume=dict( - argstr='-IRvolume %s', - extensions=None, - ), + ir_output=dict(argstr='-IRoutput %s', ), + ir_volume=dict(argstr='-IRvolume %s', ), ldd=dict(argstr='-LDD %f', ), - m0map=dict( - argstr='-m0map %s', - extensions=None, - ), - m0mape=dict( - argstr='-m0mape %s', - extensions=None, - ), + m0map=dict(argstr='-m0map %s', ), + m0mape=dict(argstr='-m0mape %s', ), mask=dict( argstr='-mask %s', - extensions=None, position=2, ), mul=dict(argstr='-mul %f', ), @@ -61,29 +46,21 @@ def test_FitAsl_inputs(): pv2=dict(argstr='-pv2 %d', ), pv3=dict(argstr='-pv3 %d %d %d', ), pv_threshold=dict(argstr='-pvthreshold', ), - seg=dict( - argstr='-seg %s', - extensions=None, - ), + seg=dict(argstr='-seg %s', ), segstyle=dict(argstr='-segstyle', ), sig=dict(argstr='-sig', ), source_file=dict( argstr='-source %s', - extensions=None, mandatory=True, position=1, ), syn_file=dict( argstr='-syn %s', - extensions=None, name_source=['source_file'], name_template='%s_syn.nii.gz', ), t1_art_cmp=dict(argstr='-T1a %f', ), - t1map=dict( - argstr='-t1map %s', - extensions=None, - ), + t1map=dict(argstr='-t1map %s', ), t_inv1=dict(argstr='-Tinv1 %f', ), t_inv2=dict(argstr='-Tinv2 %f', ), wm_plasma=dict(argstr='-wmL %f', ), @@ -97,9 +74,9 @@ def test_FitAsl_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_FitAsl_outputs(): output_map = dict( - cbf_file=dict(extensions=None, ), - error_file=dict(extensions=None, ), - syn_file=dict(extensions=None, ), + cbf_file=dict(), + error_file=dict(), + syn_file=dict(), ) outputs = FitAsl.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py index 2086d12a2b..700d9a31c4 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py @@ -25,20 +25,15 @@ def test_FitDwi_inputs(): ), bval_file=dict( argstr='-bval %s', - extensions=None, mandatory=True, position=2, ), bvec_file=dict( argstr='-bvec %s', - extensions=None, mandatory=True, position=3, ), - cov_file=dict( - argstr='-cov %s', - extensions=None, - ), + cov_file=dict(argstr='-cov %s', ), csf_t2_val=dict(argstr='-csfT2 %f', ), diso_val=dict(argstr='-diso %f', ), dpr_val=dict(argstr='-dpr %f', ), @@ -56,13 +51,11 @@ def test_FitDwi_inputs(): ), error_file=dict( argstr='-error %s', - extensions=None, name_source=['source_file'], name_template='%s_error.nii.gz', ), famap_file=dict( argstr='-famap %s', - extensions=None, name_source=['source_file'], name_template='%s_famap.nii.gz', ), @@ -82,17 +75,13 @@ def test_FitDwi_inputs(): argstr='-lm %f %f', requires=['gn_flag'], ), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), + mask_file=dict(argstr='-mask %s', ), maxit_val=dict( argstr='-maxit %d', requires=['gn_flag'], ), mcmap_file=dict( argstr='-mcmap %s', - extensions=None, name_source=['source_file'], name_template='%s_mcmap.nii.gz', requires=['nodv_flag'], @@ -100,14 +89,12 @@ def test_FitDwi_inputs(): mcmaxit=dict(argstr='-mcmaxit %d', ), mcout=dict( argstr='-mcout %s', - extensions=None, name_source=['source_file'], name_template='%s_mcout.txt', ), mcsamples=dict(argstr='-mcsamples %d', ), mdmap_file=dict( argstr='-mdmap %s', - extensions=None, name_source=['source_file'], name_template='%s_mdmap.nii.gz', ), @@ -129,7 +116,6 @@ def test_FitDwi_inputs(): ), nodiff_file=dict( argstr='-nodiff %s', - extensions=None, name_source=['source_file'], name_template='%s_no_diff.nii.gz', ), @@ -142,19 +128,14 @@ def test_FitDwi_inputs(): ], ), perf_thr=dict(argstr='-perfthreshold %f', ), - prior_file=dict( - argstr='-prior %s', - extensions=None, - ), + prior_file=dict(argstr='-prior %s', ), res_file=dict( argstr='-res %s', - extensions=None, name_source=['source_file'], name_template='%s_resmap.nii.gz', ), rgbmap_file=dict( argstr='-rgbmap %s', - extensions=None, name_source=['source_file'], name_template='%s_rgbmap.nii.gz', requires=['dti_flag'], @@ -163,45 +144,38 @@ def test_FitDwi_inputs(): slice_no=dict(argstr='-slice %d', ), source_file=dict( argstr='-source %s', - extensions=None, mandatory=True, position=1, ), swls_val=dict(argstr='-swls %f', ), syn_file=dict( argstr='-syn %s', - extensions=None, name_source=['source_file'], name_template='%s_syn.nii.gz', ), te_file=dict( argstr='-TE %s', - extensions=None, xor=['te_file'], ), te_value=dict( argstr='-TE %s', - extensions=None, xor=['te_file'], ), ten_type=dict(usedefault=True, ), tenmap2_file=dict( argstr='-tenmap2 %s', - extensions=None, name_source=['source_file'], name_template='%s_tenmap2.nii.gz', requires=['dti_flag'], ), tenmap_file=dict( argstr='-tenmap %s', - extensions=None, name_source=['source_file'], name_template='%s_tenmap.nii.gz', requires=['dti_flag'], ), v1map_file=dict( argstr='-v1map %s', - extensions=None, name_source=['source_file'], name_template='%s_v1map.nii.gz', ), @@ -220,18 +194,18 @@ def test_FitDwi_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_FitDwi_outputs(): output_map = dict( - error_file=dict(extensions=None, ), - famap_file=dict(extensions=None, ), - mcmap_file=dict(extensions=None, ), - mcout=dict(extensions=None, ), - mdmap_file=dict(extensions=None, ), - nodiff_file=dict(extensions=None, ), - res_file=dict(extensions=None, ), - rgbmap_file=dict(extensions=None, ), - syn_file=dict(extensions=None, ), - tenmap2_file=dict(extensions=None, ), - tenmap_file=dict(extensions=None, ), - v1map_file=dict(extensions=None, ), + error_file=dict(), + famap_file=dict(), + mcmap_file=dict(), + mcout=dict(), + mdmap_file=dict(), + nodiff_file=dict(), + res_file=dict(), + rgbmap_file=dict(), + syn_file=dict(), + tenmap2_file=dict(), + tenmap_file=dict(), + v1map_file=dict(), ) outputs = FitDwi.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py index b184e448cd..392654fd5c 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py @@ -7,10 +7,7 @@ def test_FitQt1_inputs(): input_map = dict( acceptance=dict(argstr='-acceptance %f', ), args=dict(argstr='%s', ), - b1map=dict( - argstr='-b1map %s', - extensions=None, - ), + b1map=dict(argstr='-b1map %s', ), comp_file=dict( argstr='-comp %s', extensions=None, @@ -31,10 +28,7 @@ def test_FitQt1_inputs(): argstr='-flips %s', sep=' ', ), - flips_list=dict( - argstr='-fliplist %s', - extensions=None, - ), + flips_list=dict(argstr='-fliplist %s', ), gn_flag=dict( argstr='-gn', position=8, @@ -69,10 +63,7 @@ def test_FitQt1_inputs(): name_template='%s_mcmap.nii.gz', ), mcmaxit=dict(argstr='-mcmaxit %d', ), - mcout=dict( - argstr='-mcout %s', - extensions=None, - ), + mcout=dict(argstr='-mcout %s', ), mcsamples=dict(argstr='-mcsamples %d', ), nb_comp=dict( argstr='-nc %d', @@ -110,10 +101,7 @@ def test_FitQt1_inputs(): name_source=['source_file'], name_template='%s_syn.nii.gz', ), - t1_list=dict( - argstr='-T1list %s', - extensions=None, - ), + t1_list=dict(argstr='-T1list %s', ), t1map_file=dict( argstr='-t1map %s', extensions=None, @@ -131,10 +119,7 @@ def test_FitQt1_inputs(): position=14, sep=' ', ), - tis_list=dict( - argstr='-TIlist %s', - extensions=None, - ), + tis_list=dict(argstr='-TIlist %s', ), tr_value=dict( argstr='-TR %f', position=5, diff --git a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py index 9a9b5c421c..3fe17160db 100644 --- a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py @@ -13,7 +13,7 @@ def test_EstimateContrast_inputs(): constants=dict(mandatory=True, ), contrasts=dict(mandatory=True, ), dof=dict(mandatory=True, ), - mask=dict(extensions=None, ), + mask=dict(), nvbeta=dict(mandatory=True, ), reg_names=dict(mandatory=True, ), s2=dict( diff --git a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py index 71ca221efc..a700c18d43 100644 --- a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py +++ b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py @@ -8,7 +8,7 @@ def test_FitGLM_inputs(): TR=dict(mandatory=True, ), drift_model=dict(usedefault=True, ), hrf_model=dict(usedefault=True, ), - mask=dict(extensions=None, ), + mask=dict(), method=dict(usedefault=True, ), model=dict(usedefault=True, ), normalize_design_matrix=dict(usedefault=True, ), @@ -30,7 +30,7 @@ def test_FitGLM_outputs(): dof=dict(), nvbeta=dict(), reg_names=dict(), - residuals=dict(extensions=None, ), + residuals=dict(), s2=dict(extensions=None, ), ) outputs = FitGLM.output_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py index 2eaeb1acef..866af46740 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py @@ -38,10 +38,11 @@ def test_UnbiasedNonLocalMeans_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_UnbiasedNonLocalMeans_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = UnbiasedNonLocalMeans.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py index 33e52ed707..1ca79a6e96 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py @@ -27,10 +27,11 @@ def test_DTIexport_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DTIexport_outputs(): - output_map = dict(outputFile=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputFile=dict( + extensions=None, + position=-1, + ), ) outputs = DTIexport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py index ddb07c9428..58ae495f3f 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py @@ -28,10 +28,11 @@ def test_DTIimport_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DTIimport_outputs(): - output_map = dict(outputTensor=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputTensor=dict( + extensions=None, + position=-1, + ), ) outputs = DTIimport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py index aa0b6f97b5..f8000c49b1 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py @@ -37,10 +37,11 @@ def test_DWIJointRicianLMMSEFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIJointRicianLMMSEFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = DWIJointRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py index 69509fcf28..29b5e0b7e1 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py @@ -43,10 +43,11 @@ def test_DWIRicianLMMSEFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIRicianLMMSEFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = DWIRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py index 060c47ecc2..5dcdbd7e2e 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py @@ -28,10 +28,11 @@ def test_DiffusionTensorScalarMeasurements_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DiffusionTensorScalarMeasurements_outputs(): - output_map = dict(outputScalar=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputScalar=dict( + extensions=None, + position=-1, + ), ) outputs = DiffusionTensorScalarMeasurements.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py index 9cfcdb14fb..bd35268bdb 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py @@ -73,10 +73,11 @@ def test_ResampleDTIVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ResampleDTIVolume_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ResampleDTIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py index 42a7b17a19..d2b17c562a 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py @@ -33,10 +33,11 @@ def test_AddScalarVolumes_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AddScalarVolumes_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = AddScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py index 0c05002ff8..bbcb2f077c 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py @@ -28,10 +28,11 @@ def test_CastScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CastScalarVolume_outputs(): - output_map = dict(OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = CastScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py index b602012d0c..24f5b74307 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py @@ -36,10 +36,11 @@ def test_CheckerBoardFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CheckerBoardFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = CheckerBoardFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py index 7389aa0cee..0240ad676a 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py @@ -30,10 +30,11 @@ def test_CurvatureAnisotropicDiffusion_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CurvatureAnisotropicDiffusion_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = CurvatureAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py index d6f93cb41d..c47382aef5 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py @@ -31,10 +31,11 @@ def test_ExtractSkeleton_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ExtractSkeleton_outputs(): - output_map = dict(OutputImageFileName=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + OutputImageFileName=dict( + extensions=None, + position=-1, + ), ) outputs = ExtractSkeleton.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py index 9511f9ce1d..2bdb73c4d5 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py @@ -28,10 +28,11 @@ def test_GaussianBlurImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GaussianBlurImageFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GaussianBlurImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py index 02c9d7fb20..6d3e40c8de 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py @@ -30,10 +30,11 @@ def test_GradientAnisotropicDiffusion_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GradientAnisotropicDiffusion_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GradientAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py index 45b2b4b8dc..f1ff2c3809 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py @@ -27,10 +27,11 @@ def test_GrayscaleFillHoleImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GrayscaleFillHoleImageFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GrayscaleFillHoleImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py index 77536e147d..6aee86282a 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py @@ -27,10 +27,11 @@ def test_GrayscaleGrindPeakImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GrayscaleGrindPeakImageFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GrayscaleGrindPeakImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py index 9541702731..c839c50abf 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py @@ -35,10 +35,11 @@ def test_HistogramMatching_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_HistogramMatching_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = HistogramMatching.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py index c3373f80ec..7fd2c31db3 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py @@ -33,10 +33,11 @@ def test_ImageLabelCombine_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ImageLabelCombine_outputs(): - output_map = dict(OutputLabelMap=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + OutputLabelMap=dict( + extensions=None, + position=-1, + ), ) outputs = ImageLabelCombine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py index 11999f2001..56d68199af 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py @@ -34,10 +34,11 @@ def test_MaskScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MaskScalarVolume_outputs(): - output_map = dict(OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = MaskScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py index 4c890aa30f..c0bf97e152 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py @@ -31,10 +31,11 @@ def test_MedianImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MedianImageFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = MedianImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py index 99772b41db..cc39d5b7b1 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py @@ -33,10 +33,11 @@ def test_MultiplyScalarVolumes_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MultiplyScalarVolumes_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = MultiplyScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py index 28a9876943..12d7af77c4 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py @@ -71,10 +71,11 @@ def test_ResampleScalarVectorDWIVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ResampleScalarVectorDWIVolume_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ResampleScalarVectorDWIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py index 62bf214bf4..106cd843a4 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py @@ -33,10 +33,11 @@ def test_SubtractScalarVolumes_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SubtractScalarVolumes_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = SubtractScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py index ce7f97ec58..e63ed5923f 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py @@ -32,10 +32,11 @@ def test_ThresholdScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ThresholdScalarVolume_outputs(): - output_map = dict(OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ThresholdScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py index c8100fd74b..89832bf9ba 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py @@ -34,10 +34,11 @@ def test_VotingBinaryHoleFillingImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_VotingBinaryHoleFillingImageFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = VotingBinaryHoleFillingImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py index a121f85614..f3c1ec6ff0 100644 --- a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py +++ b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py @@ -41,10 +41,11 @@ def test_DWIUnbiasedNonLocalMeansFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIUnbiasedNonLocalMeansFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = DWIUnbiasedNonLocalMeansFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py index 6af3c1d54c..19b41f7127 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py @@ -30,10 +30,11 @@ def test_OtsuThresholdImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_OtsuThresholdImageFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = OtsuThresholdImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py index c73313751e..b9fa12f2aa 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py @@ -31,10 +31,11 @@ def test_OtsuThresholdSegmentation_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_OtsuThresholdSegmentation_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = OtsuThresholdSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py index 66207fb604..7f1c259cd6 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py @@ -32,10 +32,11 @@ def test_ResampleScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ResampleScalarVolume_outputs(): - output_map = dict(OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ResampleScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py index c956ae34ec..6befb36860 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py @@ -37,10 +37,11 @@ def test_RobustStatisticsSegmenter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RobustStatisticsSegmenter_outputs(): - output_map = dict(segmentedImageFileName=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + segmentedImageFileName=dict( + extensions=None, + position=-1, + ), ) outputs = RobustStatisticsSegmenter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py index 8ffdd04355..5f880c08cb 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py @@ -34,10 +34,11 @@ def test_SimpleRegionGrowingSegmentation_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SimpleRegionGrowingSegmentation_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = SimpleRegionGrowingSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py index e18c0f0d5d..3669a21fc3 100644 --- a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py @@ -33,10 +33,11 @@ def test_GrayscaleModelMaker_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GrayscaleModelMaker_outputs(): - output_map = dict(OutputGeometry=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + OutputGeometry=dict( + extensions=None, + position=-1, + ), ) outputs = GrayscaleModelMaker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py index d196dbf5d8..faf982a342 100644 --- a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py +++ b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py @@ -31,10 +31,11 @@ def test_LabelMapSmoothing_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_LabelMapSmoothing_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = LabelMapSmoothing.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py index 2e09b69bd1..c25d4ebcf3 100644 --- a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py +++ b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py @@ -32,10 +32,11 @@ def test_MergeModels_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MergeModels_outputs(): - output_map = dict(ModelOutput=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + ModelOutput=dict( + extensions=None, + position=-1, + ), ) outputs = MergeModels.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py index 2e684e3e65..d39ae392bd 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py @@ -33,10 +33,11 @@ def test_ModelToLabelMap_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ModelToLabelMap_outputs(): - output_map = dict(OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ModelToLabelMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py index 479bb842bb..d0d437915b 100644 --- a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py +++ b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py @@ -28,10 +28,11 @@ def test_OrientScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_OrientScalarVolume_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = OrientScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py index b4bbdbe283..528ba47e1b 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py +++ b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py @@ -32,10 +32,11 @@ def test_ProbeVolumeWithModel_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ProbeVolumeWithModel_outputs(): - output_map = dict(OutputModel=dict( - extensions=None, - position=-1, - ), ) + output_map = dict( + OutputModel=dict( + extensions=None, + position=-1, + ), ) outputs = ProbeVolumeWithModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Rename.py b/nipype/interfaces/utility/tests/test_auto_Rename.py index 70ced0ecdd..6b56badd6f 100644 --- a/nipype/interfaces/utility/tests/test_auto_Rename.py +++ b/nipype/interfaces/utility/tests/test_auto_Rename.py @@ -20,7 +20,7 @@ def test_Rename_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Rename_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(), ) outputs = Rename.output_spec() for key, metadata in list(output_map.items()): From fc221f8dc3275c6e1b8f3baca6bcaab9fb265859 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20F=C3=A9rat?= Date: Fri, 30 Aug 2019 12:25:50 +0200 Subject: [PATCH 0344/1665] FIX command --- nipype/interfaces/mne/base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/mne/base.py b/nipype/interfaces/mne/base.py index 7f53071372..b6c8320e7a 100644 --- a/nipype/interfaces/mne/base.py +++ b/nipype/interfaces/mne/base.py @@ -89,12 +89,12 @@ class WatershedBEM(FSCommand): >>> bem.inputs.subject_id = 'subj1' >>> bem.inputs.subjects_dir = '.' >>> bem.cmdline - 'mne_watershed_bem --overwrite --subject subj1 --volume T1' + 'mne watershed_bem --overwrite --subject subj1 --volume T1' >>> bem.run() # doctest: +SKIP """ - _cmd = 'mne_watershed_bem' + _cmd = 'mne watershed_bem' input_spec = WatershedBEMInputSpec output_spec = WatershedBEMOutputSpec _additional_metadata = ['loc', 'altkey'] From 0b695ae1bc0b95b60d20ac46cd3f09c2338e4f84 Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 30 Aug 2019 09:54:34 -0700 Subject: [PATCH 0345/1665] fix: make sure returned paths are absolute --- nipype/interfaces/ants/segmentation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index a62fdab8d9..ab4c91fc94 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -422,7 +422,7 @@ def _parse_inputs(self, skip=None): bias_image = self.inputs.bias_image if not isdefined(bias_image): bias_image = fname_presuffix(os.path.basename(self.inputs.input_image), - suffix='_bias') + suffix='_bias', newpath=os.getcwd()) self._out_bias_file = bias_image return super(N4BiasFieldCorrection, self)._parse_inputs(skip=skip) From 4af4c07fd0f4a9c70590b86074a53c4539dd9308 Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 30 Aug 2019 10:04:39 -0700 Subject: [PATCH 0346/1665] enh: note that new ``-r`` option works only with ants>=2.1.0 [skip ci] --- nipype/interfaces/ants/segmentation.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index ab4c91fc94..7e361e7404 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -311,6 +311,7 @@ class N4BiasFieldCorrectionInputSpec(ANTSCommandInputSpec): rescale_intensities = traits.Bool( False, usedefault=True, argstr='-r', desc="""\ +[NOTE: Only ANTs>=2.1.0] At each iteration, a new intensity mapping is calculated and applied but there is nothing which constrains the new intensity range to be within certain values. The result is that the range can "drift" from the original at each iteration. From 2820411a861cc7dd805b183246ac2859d443943f Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 30 Aug 2019 13:17:50 -0700 Subject: [PATCH 0347/1665] fix: address @effigies' comments --- nipype/interfaces/ants/segmentation.py | 6 +++--- .../ants/tests/test_auto_N4BiasFieldCorrection.py | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index 7e361e7404..fe5510af18 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -309,7 +309,7 @@ class N4BiasFieldCorrectionInputSpec(ANTSCommandInputSpec): desc='copy headers of the original image into the ' 'output (corrected) file') rescale_intensities = traits.Bool( - False, usedefault=True, argstr='-r', + False, usedefault=True, argstr='-r', min_ver='2.1.0', desc="""\ [NOTE: Only ANTs>=2.1.0] At each iteration, a new intensity mapping is calculated and applied but there @@ -423,8 +423,8 @@ def _parse_inputs(self, skip=None): bias_image = self.inputs.bias_image if not isdefined(bias_image): bias_image = fname_presuffix(os.path.basename(self.inputs.input_image), - suffix='_bias', newpath=os.getcwd()) - self._out_bias_file = bias_image + suffix='_bias') + self._out_bias_file = os.path.abspath(bias_image) return super(N4BiasFieldCorrection, self)._parse_inputs(skip=skip) def _list_outputs(self): diff --git a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py index b1fc710ea5..8d39968511 100644 --- a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py +++ b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py @@ -48,6 +48,7 @@ def test_N4BiasFieldCorrection_inputs(): ), rescale_intensities=dict( argstr='-r', + min_ver='2.1.0', usedefault=True, ), save_bias=dict( From 6818e979af91f45b0b9f66d2ed24fcc07da785ec Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 30 Aug 2019 14:43:27 -0700 Subject: [PATCH 0348/1665] fix: add ELLIPSIS to doctest --- nipype/interfaces/ants/segmentation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index fe5510af18..98c09c2125 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -378,9 +378,9 @@ class N4BiasFieldCorrection(ANTSCommand): >>> n4_4.inputs.input_image = 'structural.nii' >>> n4_4.inputs.save_bias = True >>> n4_4.inputs.dimension = 3 - >>> n4_4.cmdline + >>> n4_4.cmdline # doctest: +ELLIPSIS 'N4BiasFieldCorrection -d 3 --input-image structural.nii \ ---output [ structural_corrected.nii, structural_bias.nii ]' +--output [ structural_corrected.nii, ...structural_bias.nii ]' """ _cmd = 'N4BiasFieldCorrection' From f37b65eeeaaf4b7e04fb43feeff16c61cf0ec178 Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 30 Aug 2019 14:55:44 -0700 Subject: [PATCH 0349/1665] fix: roll back ELLIPSIS, calculate abspath in _list_outputs() --- nipype/interfaces/ants/segmentation.py | 29 ++++++++++++-------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index 98c09c2125..4118fd53bb 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -378,9 +378,9 @@ class N4BiasFieldCorrection(ANTSCommand): >>> n4_4.inputs.input_image = 'structural.nii' >>> n4_4.inputs.save_bias = True >>> n4_4.inputs.dimension = 3 - >>> n4_4.cmdline # doctest: +ELLIPSIS + >>> n4_4.cmdline 'N4BiasFieldCorrection -d 3 --input-image structural.nii \ ---output [ structural_corrected.nii, ...structural_bias.nii ]' +--output [ structural_corrected.nii, structural_bias.nii ]' """ _cmd = 'N4BiasFieldCorrection' @@ -388,6 +388,7 @@ class N4BiasFieldCorrection(ANTSCommand): output_spec = N4BiasFieldCorrectionOutputSpec def __init__(self, *args, **kwargs): + """Instantiate the N4BiasFieldCorrection interface.""" self._out_bias_file = None super(N4BiasFieldCorrection, self).__init__(*args, **kwargs) @@ -424,32 +425,28 @@ def _parse_inputs(self, skip=None): if not isdefined(bias_image): bias_image = fname_presuffix(os.path.basename(self.inputs.input_image), suffix='_bias') - self._out_bias_file = os.path.abspath(bias_image) + self._out_bias_file = bias_image return super(N4BiasFieldCorrection, self)._parse_inputs(skip=skip) def _list_outputs(self): outputs = super(N4BiasFieldCorrection, self)._list_outputs() - if self._out_bias_file: - outputs['bias_image'] = self._out_bias_file - return outputs - - def _run_interface(self, runtime, correct_return_codes=(0, )): - runtime = super(N4BiasFieldCorrection, self)._run_interface( - runtime, correct_return_codes) - if self.inputs.copy_header and runtime.returncode in correct_return_codes: - self._copy_header(self.inputs.output_image) - if self._out_bias_file: - self._copy_header(self._out_bias_file) + # Fix headers + if self.inputs.copy_header: + self._copy_header(outputs['output_image']) - return runtime + if self._out_bias_file: + outputs['bias_image'] = os.path.abspath(self._out_bias_file) + if self.inputs.copy_header: + self._copy_header(outputs['bias_image']) + return outputs def _copy_header(self, fname): """Copy header from input image to an output image.""" import nibabel as nb in_img = nb.load(self.inputs.input_image) out_img = nb.load(fname, mmap=False) - new_img = out_img.__class__(out_img.get_data(), in_img.affine, + new_img = out_img.__class__(out_img.get_fdata(), in_img.affine, in_img.header) new_img.set_data_dtype(out_img.get_data_dtype()) new_img.to_filename(fname) From cd227a2f7db46a26fa55f723ef4f2d734a1134c2 Mon Sep 17 00:00:00 2001 From: rciric Date: Sun, 11 Aug 2019 17:27:18 -0700 Subject: [PATCH 0350/1665] (enh) additional arguments to ANTs N4BiasFieldCorrection --- nipype/interfaces/ants/segmentation.py | 19 +++++++++++++++++++ .../tests/test_auto_N4BiasFieldCorrection.py | 2 ++ 2 files changed, 21 insertions(+) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index 4118fd53bb..eff769c628 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -317,6 +317,16 @@ class N4BiasFieldCorrectionInputSpec(ANTSCommandInputSpec): The result is that the range can "drift" from the original at each iteration. This option rescales to the [min,max] range of the original image intensities within the user-specified mask.""") + histogram_sharpening = traits.Tuple( + (0.15, 0.01, 200), + traits.Float, traits.Float, traits.Int, + argstr='--histogram-sharpening [%g,%g,%d]', + desc="""\ +Three-values tuple of histogram sharpening parameters \ +(FWHM, wienerNose, numberOfHistogramBins). +These options describe the histogram sharpening parameters, i.e. the \ +deconvolution step parameters described in the original N3 algorithm. +The default values have been shown to work fairly well.""") class N4BiasFieldCorrectionOutputSpec(TraitedSpec): @@ -381,6 +391,15 @@ class N4BiasFieldCorrection(ANTSCommand): >>> n4_4.cmdline 'N4BiasFieldCorrection -d 3 --input-image structural.nii \ --output [ structural_corrected.nii, structural_bias.nii ]' + + >>> n4_5 = N4BiasFieldCorrection() + >>> n4_5.inputs.input_image = 'structural.nii' + >>> n4_5.inputs.dimension = 3 + >>> n4_5.inputs.histogram_sharpening = (0.12, 0.02, 200) + >>> n4_5.cmdline + 'N4BiasFieldCorrection -d 3 --histogram-sharpening [0.12,0.02,200] \ +--input-image structural.nii --output structural_corrected.nii' + """ _cmd = 'N4BiasFieldCorrection' diff --git a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py index 8d39968511..833e32315b 100644 --- a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py +++ b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py @@ -25,6 +25,8 @@ def test_N4BiasFieldCorrection_inputs(): nohash=True, usedefault=True, ), + histogram_sharpening=dict( + argstr='--histogram-sharpening [%g,%g,%d]', ), input_image=dict( argstr='--input-image %s', extensions=None, From ba66142b92d973458f8da21ec69b0fd3ddb26bd0 Mon Sep 17 00:00:00 2001 From: Matteo Mancini Date: Tue, 3 Sep 2019 11:36:41 -0400 Subject: [PATCH 0351/1665] Update nipype/interfaces/mrtrix3/reconst.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/mrtrix3/reconst.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index 6deb4c22b8..55c2a82a30 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -117,7 +117,6 @@ class EstimateFODInputSpec(MRTrix3BaseInputSpec): desc='specify one or more dw gradient shells') max_sh = InputMultiObject( traits.Int, - value=[8], argstr='-lmax %s', sep=',', desc=('maximum harmonic degree of response function - single value for single-shell response, list for multi-shell response')) From ef4050476ee00cb80f9c5ed635f1d3baba0d2841 Mon Sep 17 00:00:00 2001 From: Matteo Mancini Date: Tue, 3 Sep 2019 11:36:59 -0400 Subject: [PATCH 0352/1665] Update nipype/interfaces/mrtrix3/reconst.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/mrtrix3/reconst.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index 55c2a82a30..a87bc9aab0 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -105,7 +105,7 @@ class EstimateFODInputSpec(MRTrix3BaseInputSpec): gm_odf = File('gm.mif', usedefault=False, argstr='%s', position=-3, desc='output GM ODF') csf_txt = File(argstr='%s', position=-2, desc='CSF response text file') - csf_odf = File('csf.mif', usedefault=False, argstr='%s', + csf_odf = File(argstr='%s', position=-1, desc='output CSF ODF') mask_file = File(exists=True, argstr='-mask %s', desc='mask image') From be919a58c9e09031ba12b3a9441e4d3c2806266e Mon Sep 17 00:00:00 2001 From: Matteo Mancini Date: Tue, 3 Sep 2019 11:37:08 -0400 Subject: [PATCH 0353/1665] Update nipype/interfaces/mrtrix3/reconst.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/mrtrix3/reconst.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index a87bc9aab0..6487e61d38 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -102,7 +102,7 @@ class EstimateFODInputSpec(MRTrix3BaseInputSpec): mandatory=True, desc='output WM ODF') gm_txt = File(argstr='%s', position=-4, desc='GM response text file') - gm_odf = File('gm.mif', usedefault=False, argstr='%s', + gm_odf = File(argstr='%s', position=-3, desc='output GM ODF') csf_txt = File(argstr='%s', position=-2, desc='CSF response text file') csf_odf = File(argstr='%s', From 508df5e0531fb691b8b052e81a3bc03d1300d594 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 5 Sep 2019 09:29:02 -0400 Subject: [PATCH 0354/1665] TEST: make specs --- .../algorithms/tests/test_auto_AddCSVRow.py | 5 +- nipype/algorithms/tests/test_auto_Gunzip.py | 9 ++- .../tests/test_auto_NonSteadyStateDetector.py | 9 ++- .../afni/tests/test_auto_Allineate.py | 1 + nipype/interfaces/afni/tests/test_auto_Cat.py | 7 +-- .../afni/tests/test_auto_ClipLevel.py | 1 + .../afni/tests/test_auto_LocalBistat.py | 7 ++- .../afni/tests/test_auto_Localstat.py | 6 +- .../afni/tests/test_auto_NwarpApply.py | 5 +- .../afni/tests/test_auto_OneDToolPy.py | 1 + .../interfaces/afni/tests/test_auto_Qwarp.py | 5 +- .../afni/tests/test_auto_QwarpPlusMinus.py | 5 +- .../afni/tests/test_auto_ROIStats.py | 5 +- .../interfaces/afni/tests/test_auto_ReHo.py | 6 +- .../afni/tests/test_auto_Remlfit.py | 1 + .../afni/tests/test_auto_Resample.py | 5 +- .../afni/tests/test_auto_TCorrMap.py | 5 +- .../afni/tests/test_auto_Zeropad.py | 1 + .../interfaces/ants/tests/test_auto_ANTS.py | 4 +- .../ants/tests/test_auto_AntsJointFusion.py | 9 ++- .../ants/tests/test_auto_Atropos.py | 4 +- .../base/tests/test_auto_StdOutCommandLine.py | 1 + .../camino/tests/test_auto_AnalyzeHeader.py | 1 + .../tests/test_auto_ComputeEigensystem.py | 1 + .../test_auto_ComputeFractionalAnisotropy.py | 1 + .../tests/test_auto_ComputeTensorTrace.py | 1 + .../camino/tests/test_auto_DTIFit.py | 1 + .../camino/tests/test_auto_DTLUTGen.py | 1 + .../camino/tests/test_auto_FSL2Scheme.py | 1 + .../camino/tests/test_auto_Image2Voxel.py | 1 + .../camino/tests/test_auto_LinRecon.py | 1 + .../interfaces/camino/tests/test_auto_MESD.py | 1 + .../camino/tests/test_auto_ModelFit.py | 1 + .../camino/tests/test_auto_NIfTIDT2Camino.py | 1 + .../camino/tests/test_auto_PicoPDFs.py | 1 + .../camino/tests/test_auto_ProcStreamlines.py | 1 + .../camino/tests/test_auto_QBallMX.py | 1 + .../camino/tests/test_auto_SFLUTGen.py | 1 + .../camino/tests/test_auto_SFPICOCalibData.py | 1 + .../camino/tests/test_auto_SFPeaks.py | 1 + .../camino/tests/test_auto_Shredder.py | 1 + .../camino/tests/test_auto_TractShredder.py | 1 + .../camino/tests/test_auto_VtkStreamlines.py | 1 + .../freesurfer/tests/test_auto_Aparc2Aseg.py | 9 ++- .../freesurfer/tests/test_auto_Apas2Aseg.py | 9 ++- .../freesurfer/tests/test_auto_CALabel.py | 10 +++- .../freesurfer/tests/test_auto_CANormalize.py | 2 +- .../freesurfer/tests/test_auto_CARegister.py | 2 +- .../test_auto_CheckTalairachAlignment.py | 2 +- .../tests/test_auto_ConcatenateLTA.py | 2 + .../freesurfer/tests/test_auto_Contrast.py | 10 +++- .../freesurfer/tests/test_auto_FixTopology.py | 5 +- .../freesurfer/tests/test_auto_MRIsCALabel.py | 10 +++- .../freesurfer/tests/test_auto_MS_LDA.py | 12 +++- .../freesurfer/tests/test_auto_Normalize.py | 2 +- .../tests/test_auto_ParcellationStats.py | 9 ++- .../tests/test_auto_RegisterAVItoTalairach.py | 2 +- .../tests/test_auto_RelabelHypointensities.py | 9 ++- .../tests/test_auto_SegStatsReconAll.py | 5 +- .../tests/test_auto_SurfaceSnapshots.py | 1 + .../tests/test_auto_TalairachAVI.py | 6 +- .../freesurfer/tests/test_auto_TalairachQC.py | 9 ++- .../fsl/tests/test_auto_AccuracyTester.py | 9 ++- .../interfaces/fsl/tests/test_auto_Cleaner.py | 3 + .../interfaces/fsl/tests/test_auto_Cluster.py | 5 +- .../fsl/tests/test_auto_FeatureExtractor.py | 11 ++-- .../fsl/tests/test_auto_FindTheBiggest.py | 9 ++- .../fsl/tests/test_auto_TrainingSetCreator.py | 22 ++++---- .../minc/tests/test_auto_Average.py | 6 +- .../interfaces/minc/tests/test_auto_BBox.py | 1 + .../interfaces/minc/tests/test_auto_Beast.py | 7 ++- .../interfaces/minc/tests/test_auto_Calc.py | 2 + .../interfaces/minc/tests/test_auto_Dump.py | 1 + .../minc/tests/test_auto_Extract.py | 1 + .../interfaces/minc/tests/test_auto_Math.py | 1 + .../interfaces/minc/tests/test_auto_Norm.py | 6 +- .../minc/tests/test_auto_Resample.py | 10 +++- .../minc/tests/test_auto_Reshape.py | 1 + .../interfaces/minc/tests/test_auto_ToRaw.py | 1 + .../minc/tests/test_auto_VolSymm.py | 2 + .../minc/tests/test_auto_XfmInvert.py | 1 + .../mrtrix3/tests/test_auto_BrainMask.py | 6 +- .../mrtrix3/tests/test_auto_DWIExtract.py | 6 +- .../mrtrix3/tests/test_auto_FitTensor.py | 6 +- .../mrtrix3/tests/test_auto_Generate5tt.py | 6 +- .../mrtrix3/tests/test_auto_MRConvert.py | 6 +- .../mrtrix3/tests/test_auto_MRMath.py | 6 +- .../mrtrix3/tests/test_auto_ResponseSD.py | 6 +- .../mrtrix3/tests/test_auto_Tractography.py | 6 +- .../niftyfit/tests/test_auto_DwiTool.py | 26 ++++++--- .../niftyfit/tests/test_auto_FitAsl.py | 41 +++++++++++--- .../niftyfit/tests/test_auto_FitDwi.py | 56 ++++++++++++++----- .../niftyfit/tests/test_auto_FitQt1.py | 25 +++++++-- .../nipy/tests/test_auto_EstimateContrast.py | 2 +- .../interfaces/nipy/tests/test_auto_FitGLM.py | 4 +- .../tests/test_auto_UnbiasedNonLocalMeans.py | 9 ++- .../diffusion/tests/test_auto_DTIexport.py | 9 ++- .../diffusion/tests/test_auto_DTIimport.py | 9 ++- .../test_auto_DWIJointRicianLMMSEFilter.py | 9 ++- .../tests/test_auto_DWIRicianLMMSEFilter.py | 9 ++- ..._auto_DiffusionTensorScalarMeasurements.py | 9 ++- .../tests/test_auto_ResampleDTIVolume.py | 9 ++- .../tests/test_auto_AddScalarVolumes.py | 9 ++- .../tests/test_auto_CastScalarVolume.py | 9 ++- .../tests/test_auto_CheckerBoardFilter.py | 9 ++- ...test_auto_CurvatureAnisotropicDiffusion.py | 9 ++- .../tests/test_auto_ExtractSkeleton.py | 9 ++- .../test_auto_GaussianBlurImageFilter.py | 9 ++- .../test_auto_GradientAnisotropicDiffusion.py | 9 ++- .../test_auto_GrayscaleFillHoleImageFilter.py | 9 ++- ...test_auto_GrayscaleGrindPeakImageFilter.py | 9 ++- .../tests/test_auto_HistogramMatching.py | 9 ++- .../tests/test_auto_ImageLabelCombine.py | 9 ++- .../tests/test_auto_MaskScalarVolume.py | 9 ++- .../tests/test_auto_MedianImageFilter.py | 9 ++- .../tests/test_auto_MultiplyScalarVolumes.py | 9 ++- ...test_auto_ResampleScalarVectorDWIVolume.py | 9 ++- .../tests/test_auto_SubtractScalarVolumes.py | 9 ++- .../tests/test_auto_ThresholdScalarVolume.py | 9 ++- ...auto_VotingBinaryHoleFillingImageFilter.py | 9 ++- ...est_auto_DWIUnbiasedNonLocalMeansFilter.py | 9 ++- .../test_auto_OtsuThresholdImageFilter.py | 9 ++- .../test_auto_OtsuThresholdSegmentation.py | 9 ++- .../tests/test_auto_ResampleScalarVolume.py | 9 ++- .../test_auto_RobustStatisticsSegmenter.py | 9 ++- ...st_auto_SimpleRegionGrowingSegmentation.py | 9 ++- .../tests/test_auto_GrayscaleModelMaker.py | 9 ++- .../tests/test_auto_LabelMapSmoothing.py | 9 ++- .../slicer/tests/test_auto_MergeModels.py | 9 ++- .../slicer/tests/test_auto_ModelToLabelMap.py | 9 ++- .../tests/test_auto_OrientScalarVolume.py | 9 ++- .../tests/test_auto_ProbeVolumeWithModel.py | 9 ++- .../utility/tests/test_auto_Rename.py | 2 +- 133 files changed, 536 insertions(+), 339 deletions(-) diff --git a/nipype/algorithms/tests/test_auto_AddCSVRow.py b/nipype/algorithms/tests/test_auto_AddCSVRow.py index 3090c8b6a9..a8c4467cbf 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVRow.py +++ b/nipype/algorithms/tests/test_auto_AddCSVRow.py @@ -6,7 +6,10 @@ def test_AddCSVRow_inputs(): input_map = dict( _outputs=dict(usedefault=True, ), - in_file=dict(mandatory=True, ), + in_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = AddCSVRow.input_spec() diff --git a/nipype/algorithms/tests/test_auto_Gunzip.py b/nipype/algorithms/tests/test_auto_Gunzip.py index e583deaa10..d84e84ad61 100644 --- a/nipype/algorithms/tests/test_auto_Gunzip.py +++ b/nipype/algorithms/tests/test_auto_Gunzip.py @@ -4,11 +4,10 @@ def test_Gunzip_inputs(): - input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), ) + input_map = dict(in_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = Gunzip.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py index 4da9dad47d..10eb41190b 100644 --- a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py +++ b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py @@ -4,11 +4,10 @@ def test_NonSteadyStateDetector_inputs(): - input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), ) + input_map = dict(in_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = NonSteadyStateDetector.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Allineate.py b/nipype/interfaces/afni/tests/test_auto_Allineate.py index 0b110e669a..e1565376bd 100644 --- a/nipype/interfaces/afni/tests/test_auto_Allineate.py +++ b/nipype/interfaces/afni/tests/test_auto_Allineate.py @@ -86,6 +86,7 @@ def test_Allineate_inputs(): ), out_weight_file=dict( argstr='-wtprefix %s', + extensions=None, xor=['allcostx'], ), outputtype=dict(), diff --git a/nipype/interfaces/afni/tests/test_auto_Cat.py b/nipype/interfaces/afni/tests/test_auto_Cat.py index 345806f746..50806add01 100644 --- a/nipype/interfaces/afni/tests/test_auto_Cat.py +++ b/nipype/interfaces/afni/tests/test_auto_Cat.py @@ -21,10 +21,9 @@ def test_Cat_inputs(): usedefault=True, ), omitconst=dict(argstr='-nonconst', ), - out_cint=dict( - xor=[ - 'out_format', 'out_nice', 'out_double', 'out_fint', 'out_int' - ], ), + out_cint=dict(xor=[ + 'out_format', 'out_nice', 'out_double', 'out_fint', 'out_int' + ], ), out_double=dict( argstr='-d', xor=['out_format', 'out_nice', 'out_int', 'out_fint', 'out_cint'], diff --git a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py index f9b3dbf705..96f928a809 100644 --- a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py +++ b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py @@ -17,6 +17,7 @@ def test_ClipLevel_inputs(): ), grad=dict( argstr='-grad %s', + extensions=None, position=3, xor='doall', ), diff --git a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py index ed3e61d74d..4632e4cf7d 100644 --- a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py +++ b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py @@ -26,7 +26,10 @@ def test_LocalBistat_inputs(): mandatory=True, position=-1, ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), neighborhood=dict( argstr="-nbhd '%s(%s)'", mandatory=True, @@ -37,6 +40,7 @@ def test_LocalBistat_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, keep_extension=True, name_source='in_file1', name_template='%s_bistat', @@ -49,6 +53,7 @@ def test_LocalBistat_inputs(): ), weight_file=dict( argstr='-weight %s', + extensions=None, xor=['automask'], ), ) diff --git a/nipype/interfaces/afni/tests/test_auto_Localstat.py b/nipype/interfaces/afni/tests/test_auto_Localstat.py index 011ce44da8..62dc800941 100644 --- a/nipype/interfaces/afni/tests/test_auto_Localstat.py +++ b/nipype/interfaces/afni/tests/test_auto_Localstat.py @@ -21,7 +21,10 @@ def test_Localstat_inputs(): mandatory=True, position=-1, ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), neighborhood=dict( argstr="-nbhd '%s(%s)'", mandatory=True, @@ -33,6 +36,7 @@ def test_Localstat_inputs(): ), out_file=dict( argstr='-prefix %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_localstat', diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py index e00457f4f3..87388c65ec 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py @@ -20,7 +20,10 @@ def test_NwarpApply_inputs(): usedefault=True, ), inv_warp=dict(argstr='-iwarp', ), - master=dict(argstr='-master %s', ), + master=dict( + argstr='-master %s', + extensions=None, + ), out_file=dict( argstr='-prefix %s', extensions=None, diff --git a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py index f8e664a727..114fe53fba 100644 --- a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py +++ b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py @@ -30,6 +30,7 @@ def test_OneDToolPy_inputs(): show_censor_count=dict(argstr='-show_censor_count', ), show_cormat_warnings=dict( argstr='-show_cormat_warnings |& tee %s', + extensions=None, position=-1, xor=['out_file'], ), diff --git a/nipype/interfaces/afni/tests/test_auto_Qwarp.py b/nipype/interfaces/afni/tests/test_auto_Qwarp.py index 3ef8c2e9b2..0b8a9e38ec 100644 --- a/nipype/interfaces/afni/tests/test_auto_Qwarp.py +++ b/nipype/interfaces/afni/tests/test_auto_Qwarp.py @@ -125,7 +125,10 @@ def test_Qwarp_inputs(): name_source=['in_file'], name_template='ppp_%s', ), - out_weight_file=dict(argstr='-wtprefix %s', ), + out_weight_file=dict( + argstr='-wtprefix %s', + extensions=None, + ), outputtype=dict(), overwrite=dict(argstr='-overwrite', ), pblur=dict(argstr='-pblur %s', ), diff --git a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py index ca27a0d682..e282d0d0a5 100644 --- a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py +++ b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py @@ -125,7 +125,10 @@ def test_QwarpPlusMinus_inputs(): position=0, usedefault=True, ), - out_weight_file=dict(argstr='-wtprefix %s', ), + out_weight_file=dict( + argstr='-wtprefix %s', + extensions=None, + ), outputtype=dict(), overwrite=dict(argstr='-overwrite', ), pblur=dict(argstr='-pblur %s', ), diff --git a/nipype/interfaces/afni/tests/test_auto_ROIStats.py b/nipype/interfaces/afni/tests/test_auto_ROIStats.py index d3c956f7c5..c7fc517ccc 100644 --- a/nipype/interfaces/afni/tests/test_auto_ROIStats.py +++ b/nipype/interfaces/afni/tests/test_auto_ROIStats.py @@ -49,7 +49,10 @@ def test_ROIStats_inputs(): position=-1, ), quiet=dict(argstr='-quiet', ), - roisel=dict(argstr='-roisel %s', ), + roisel=dict( + argstr='-roisel %s', + extensions=None, + ), stat=dict(argstr='%s...', ), zerofill=dict( argstr='-zerofill %s', diff --git a/nipype/interfaces/afni/tests/test_auto_ReHo.py b/nipype/interfaces/afni/tests/test_auto_ReHo.py index 0edcedcdaf..cf3e468159 100644 --- a/nipype/interfaces/afni/tests/test_auto_ReHo.py +++ b/nipype/interfaces/afni/tests/test_auto_ReHo.py @@ -25,13 +25,17 @@ def test_ReHo_inputs(): argstr='-in_rois %s', extensions=None, ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), neighborhood=dict( argstr='-nneigh %s', xor=['sphere', 'ellipsoid'], ), out_file=dict( argstr='-prefix %s', + extensions=None, keep_extension=True, name_source='in_file', name_template='%s_reho', diff --git a/nipype/interfaces/afni/tests/test_auto_Remlfit.py b/nipype/interfaces/afni/tests/test_auto_Remlfit.py index f2e6703bf6..05ee75210e 100644 --- a/nipype/interfaces/afni/tests/test_auto_Remlfit.py +++ b/nipype/interfaces/afni/tests/test_auto_Remlfit.py @@ -58,6 +58,7 @@ def test_Remlfit_inputs(): ), matim=dict( argstr='-matim %s', + extensions=None, xor=['matrix'], ), matrix=dict( diff --git a/nipype/interfaces/afni/tests/test_auto_Resample.py b/nipype/interfaces/afni/tests/test_auto_Resample.py index 560d883d75..f63e1347f5 100644 --- a/nipype/interfaces/afni/tests/test_auto_Resample.py +++ b/nipype/interfaces/afni/tests/test_auto_Resample.py @@ -17,7 +17,10 @@ def test_Resample_inputs(): mandatory=True, position=-1, ), - master=dict(argstr='-master %s', ), + master=dict( + argstr='-master %s', + extensions=None, + ), num_threads=dict( nohash=True, usedefault=True, diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py index 1ea4c0790e..209473fe6f 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py @@ -93,7 +93,10 @@ def test_TCorrMap_inputs(): name_source='in_file', suffix='_qmean', ), - regress_out_timeseries=dict(argstr='-ort %s', ), + regress_out_timeseries=dict( + argstr='-ort %s', + extensions=None, + ), seeds=dict( argstr='-seed %s', extensions=None, diff --git a/nipype/interfaces/afni/tests/test_auto_Zeropad.py b/nipype/interfaces/afni/tests/test_auto_Zeropad.py index abeceda432..1bd80cfad8 100644 --- a/nipype/interfaces/afni/tests/test_auto_Zeropad.py +++ b/nipype/interfaces/afni/tests/test_auto_Zeropad.py @@ -55,6 +55,7 @@ def test_Zeropad_inputs(): ), master=dict( argstr='-master %s', + extensions=None, xor=['I', 'S', 'A', 'P', 'L', 'R', 'z', 'RL', 'AP', 'IS', 'mm'], ), mm=dict( diff --git a/nipype/interfaces/ants/tests/test_auto_ANTS.py b/nipype/interfaces/ants/tests/test_auto_ANTS.py index 5c6fa2c501..1ee5cabe21 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTS.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTS.py @@ -57,8 +57,8 @@ def test_ANTS_inputs(): regularization=dict(argstr='%s', ), regularization_deformation_field_sigma=dict( requires=['regularization'], ), - regularization_gradient_field_sigma=dict( - requires=['regularization'], ), + regularization_gradient_field_sigma=dict(requires=['regularization' + ], ), smoothing_sigmas=dict( argstr='--gaussian-smoothing-sigmas %s', sep='x', diff --git a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py index 02ebe6431c..a2829be3fa 100644 --- a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py @@ -44,11 +44,10 @@ def test_AntsJointFusion_inputs(): nohash=True, usedefault=True, ), - out_atlas_voting_weight_name_format=dict( - requires=[ - 'out_label_fusion', 'out_intensity_fusion_name_format', - 'out_label_post_prob_name_format' - ], ), + out_atlas_voting_weight_name_format=dict(requires=[ + 'out_label_fusion', 'out_intensity_fusion_name_format', + 'out_label_post_prob_name_format' + ], ), out_intensity_fusion_name_format=dict(argstr='', ), out_label_fusion=dict( argstr='%s', diff --git a/nipype/interfaces/ants/tests/test_auto_Atropos.py b/nipype/interfaces/ants/tests/test_auto_Atropos.py index be1a271f9a..73d7f6813a 100644 --- a/nipype/interfaces/ants/tests/test_auto_Atropos.py +++ b/nipype/interfaces/ants/tests/test_auto_Atropos.py @@ -53,8 +53,8 @@ def test_Atropos_inputs(): prior_probability_threshold=dict(requires=['prior_weighting'], ), prior_weighting=dict(), save_posteriors=dict(), - use_mixture_model_proportions=dict( - requires=['posterior_formulation'], ), + use_mixture_model_proportions=dict(requires=['posterior_formulation' + ], ), use_random_seed=dict( argstr='--use-random-seed %d', usedefault=True, diff --git a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py index e39dc3acaa..95afcd3216 100644 --- a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py @@ -12,6 +12,7 @@ def test_StdOutCommandLine_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py index 28170946ac..c40082d836 100644 --- a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py +++ b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py @@ -50,6 +50,7 @@ def test_AnalyzeHeader_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py index 66ac282175..edf38864fa 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py @@ -24,6 +24,7 @@ def test_ComputeEigensystem_inputs(): maxcomponents=dict(argstr='-maxcomponents %d', ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py index 9cfae77b2f..75604df01b 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py @@ -20,6 +20,7 @@ def test_ComputeFractionalAnisotropy_inputs(): inputmodel=dict(argstr='-inputmodel %s', ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py index 1443a253bd..4d31fa884c 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py @@ -20,6 +20,7 @@ def test_ComputeTensorTrace_inputs(): inputmodel=dict(argstr='-inputmodel %s', ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_DTIFit.py b/nipype/interfaces/camino/tests/test_auto_DTIFit.py index 26d27d57d2..0870a77752 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/camino/tests/test_auto_DTIFit.py @@ -26,6 +26,7 @@ def test_DTIFit_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py index f7caf77ef1..3242163c3a 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py @@ -28,6 +28,7 @@ def test_DTLUTGen_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py index 350c604c99..9b8a74be6a 100644 --- a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py +++ b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py @@ -40,6 +40,7 @@ def test_FSL2Scheme_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py index 29864bef82..2c013cf216 100644 --- a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py +++ b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py @@ -18,6 +18,7 @@ def test_Image2Voxel_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_LinRecon.py b/nipype/interfaces/camino/tests/test_auto_LinRecon.py index 996d8f4b99..147cfaab5e 100644 --- a/nipype/interfaces/camino/tests/test_auto_LinRecon.py +++ b/nipype/interfaces/camino/tests/test_auto_LinRecon.py @@ -24,6 +24,7 @@ def test_LinRecon_inputs(): normalize=dict(argstr='-normalize', ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_MESD.py b/nipype/interfaces/camino/tests/test_auto_MESD.py index 57dbbf3b28..f7dfecfdb9 100644 --- a/nipype/interfaces/camino/tests/test_auto_MESD.py +++ b/nipype/interfaces/camino/tests/test_auto_MESD.py @@ -42,6 +42,7 @@ def test_MESD_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_ModelFit.py b/nipype/interfaces/camino/tests/test_auto_ModelFit.py index e5c16ec975..6d49969eb6 100644 --- a/nipype/interfaces/camino/tests/test_auto_ModelFit.py +++ b/nipype/interfaces/camino/tests/test_auto_ModelFit.py @@ -34,6 +34,7 @@ def test_ModelFit_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py index 0a5583b03e..65e0210268 100644 --- a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py +++ b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py @@ -26,6 +26,7 @@ def test_NIfTIDT2Camino_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py index c918a372f1..e5cdea5cae 100644 --- a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py +++ b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py @@ -36,6 +36,7 @@ def test_PicoPDFs_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py index c30ef08d8c..0386e0d54f 100644 --- a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py @@ -62,6 +62,7 @@ def test_ProcStreamlines_inputs(): noresample=dict(argstr='-noresample', ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_QBallMX.py b/nipype/interfaces/camino/tests/test_auto_QBallMX.py index 7f7d0bc99f..f452f26350 100644 --- a/nipype/interfaces/camino/tests/test_auto_QBallMX.py +++ b/nipype/interfaces/camino/tests/test_auto_QBallMX.py @@ -20,6 +20,7 @@ def test_QBallMX_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py index 795138ea89..220e116255 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py @@ -35,6 +35,7 @@ def test_SFLUTGen_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py index 80222fce4c..1b71553676 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py @@ -27,6 +27,7 @@ def test_SFPICOCalibData_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py index 8db250c58c..49ac58aa06 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py @@ -38,6 +38,7 @@ def test_SFPeaks_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_Shredder.py b/nipype/interfaces/camino/tests/test_auto_Shredder.py index 594b35fc60..2d8ec43589 100644 --- a/nipype/interfaces/camino/tests/test_auto_Shredder.py +++ b/nipype/interfaces/camino/tests/test_auto_Shredder.py @@ -28,6 +28,7 @@ def test_Shredder_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_TractShredder.py b/nipype/interfaces/camino/tests/test_auto_TractShredder.py index eeed244533..e4df010c60 100644 --- a/nipype/interfaces/camino/tests/test_auto_TractShredder.py +++ b/nipype/interfaces/camino/tests/test_auto_TractShredder.py @@ -28,6 +28,7 @@ def test_TractShredder_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py index 72b6f106d8..bd8d295572 100644 --- a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py @@ -25,6 +25,7 @@ def test_VtkStreamlines_inputs(): interpolatescalars=dict(argstr='-interpolatescalars', ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py index 8a83130a4d..494ce981c5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py @@ -79,11 +79,10 @@ def test_Aparc2Aseg_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Aparc2Aseg_outputs(): - output_map = dict( - out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict(out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = Aparc2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py index fea3617596..03385da2ad 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py @@ -28,11 +28,10 @@ def test_Apas2Aseg_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Apas2Aseg_outputs(): - output_map = dict( - out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict(out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = Apas2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py index f3bfd5ad62..4d56d217c7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py @@ -7,7 +7,10 @@ def test_CALabel_inputs(): input_map = dict( align=dict(argstr='-align', ), args=dict(argstr='%s', ), - aseg=dict(argstr='-aseg %s', ), + aseg=dict( + argstr='-aseg %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -26,7 +29,10 @@ def test_CALabel_inputs(): argstr='-r %s', extensions=None, ), - label=dict(argstr='-l %s', ), + label=dict( + argstr='-l %s', + extensions=None, + ), no_big_ventricles=dict(argstr='-nobigventricles', ), num_threads=dict(), out_file=dict( diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py index 1f6546ae3a..5700103e84 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py @@ -59,7 +59,7 @@ def test_CANormalize_inputs(): def test_CANormalize_outputs(): output_map = dict( control_points=dict(extensions=None, ), - out_file=dict(), + out_file=dict(extensions=None, ), ) outputs = CANormalize.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py index 83f669b218..dcf5aa84a8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py @@ -53,7 +53,7 @@ def test_CARegister_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CARegister_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = CARegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py index 68a5a98e66..fe9c1a3121 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py @@ -35,7 +35,7 @@ def test_CheckTalairachAlignment_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CheckTalairachAlignment_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = CheckTalairachAlignment.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py index 8acab945c1..ee1d3ae7f3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py @@ -38,11 +38,13 @@ def test_ConcatenateLTA_inputs(): subjects_dir=dict(), tal_source_file=dict( argstr='-tal %s', + extensions=None, position=-5, requires=['tal_template_file'], ), tal_template_file=dict( argstr='%s', + extensions=None, position=-4, requires=['tal_source_file'], ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py index 3c4e5aa484..3cd62e8ee7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py @@ -5,10 +5,16 @@ def test_Contrast_inputs(): input_map = dict( - annotation=dict(mandatory=True, ), + annotation=dict( + extensions=None, + mandatory=True, + ), args=dict(argstr='%s', ), copy_inputs=dict(), - cortex=dict(mandatory=True, ), + cortex=dict( + extensions=None, + mandatory=True, + ), environ=dict( nohash=True, usedefault=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py index 4ba8442b14..83427dcd20 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py @@ -35,7 +35,10 @@ def test_FixTopology_inputs(): ), mgz=dict(argstr='-mgz', ), seed=dict(argstr='-seed %d', ), - sphere=dict(argstr='-sphere %s', ), + sphere=dict( + argstr='-sphere %s', + extensions=None, + ), subject_id=dict( argstr='%s', mandatory=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py index 10c7af6832..eaf91bc1e8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py @@ -6,7 +6,10 @@ def test_MRIsCALabel_inputs(): input_map = dict( args=dict(argstr='%s', ), - aseg=dict(argstr='-aseg %s', ), + aseg=dict( + argstr='-aseg %s', + extensions=None, + ), canonsurf=dict( argstr='%s', extensions=None, @@ -33,7 +36,10 @@ def test_MRIsCALabel_inputs(): mandatory=True, position=-4, ), - label=dict(argstr='-l %s', ), + label=dict( + argstr='-l %s', + extensions=None, + ), num_threads=dict(), out_file=dict( argstr='%s', diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py index badb3b4f0c..3d3f5cde11 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py @@ -17,22 +17,30 @@ def test_MS_LDA_inputs(): mandatory=True, position=-1, ), - label_file=dict(argstr='-label %s', ), + label_file=dict( + argstr='-label %s', + extensions=None, + ), lda_labels=dict( argstr='-lda %s', mandatory=True, sep=' ', ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), shift=dict(argstr='-shift %d', ), subjects_dir=dict(), use_weights=dict(argstr='-W', ), vol_synth_file=dict( argstr='-synth %s', + extensions=None, mandatory=True, ), weight_file=dict( argstr='-weight %s', + extensions=None, mandatory=True, ), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py index 2384adbb2b..baf03f026c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py @@ -43,7 +43,7 @@ def test_Normalize_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Normalize_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Normalize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py index be19b2bd37..8b4ae45d67 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py @@ -27,11 +27,16 @@ def test_ParcellationStats_inputs(): ), in_annotation=dict( argstr='-a %s', + extensions=None, xor=['in_label'], ), - in_cortex=dict(argstr='-cortex %s', ), + in_cortex=dict( + argstr='-cortex %s', + extensions=None, + ), in_label=dict( argstr='-l %s', + extensions=None, xor=['in_annotatoin', 'out_color'], ), lh_pial=dict( @@ -45,11 +50,13 @@ def test_ParcellationStats_inputs(): mgz=dict(argstr='-mgz', ), out_color=dict( argstr='-c %s', + extensions=None, genfile=True, xor=['in_label'], ), out_table=dict( argstr='-f %s', + extensions=None, genfile=True, requires=['tabular_output'], ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py index f3406d41fc..5a609f586e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py @@ -47,7 +47,7 @@ def test_RegisterAVItoTalairach_outputs(): extensions=None, usedefault=True, ), - out_file=dict(), + out_file=dict(extensions=None, ), ) outputs = RegisterAVItoTalairach.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py index 28ff49d99e..f99acec899 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py @@ -48,11 +48,10 @@ def test_RelabelHypointensities_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RelabelHypointensities_outputs(): - output_map = dict( - out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict(out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = RelabelHypointensities.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py index 3cf47c71ce..09da0d001d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py @@ -100,7 +100,10 @@ def test_SegStatsReconAll_inputs(): extensions=None, mandatory=True, ), - ribbon=dict(mandatory=True, ), + ribbon=dict( + extensions=None, + mandatory=True, + ), segment_id=dict(argstr='--id %s...', ), segmentation_file=dict( argstr='--seg %s', diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py index 26dcbe3458..968041ff67 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py @@ -59,6 +59,7 @@ def test_SurfaceSnapshots_inputs(): overlay_range_offset=dict(argstr='-foffset %.3f', ), overlay_reg=dict( argstr='-overlay-reg %s', + extensions=None, xor=['overlay_reg', 'identity_reg', 'mni152_reg'], ), patch_file=dict( diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py index 9d248c1a7d..2a28765d67 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py @@ -30,9 +30,9 @@ def test_TalairachAVI_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_TalairachAVI_outputs(): output_map = dict( - out_file=dict(), - out_log=dict(), - out_txt=dict(), + out_file=dict(extensions=None, ), + out_log=dict(extensions=None, ), + out_txt=dict(extensions=None, ), ) outputs = TalairachAVI.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py index 053d429c9b..1961e3ded6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py @@ -24,11 +24,10 @@ def test_TalairachQC_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TalairachQC_outputs(): - output_map = dict( - log_file=dict( - extensions=None, - usedefault=True, - ), ) + output_map = dict(log_file=dict( + extensions=None, + usedefault=True, + ), ) outputs = TalairachQC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py index de32f63f5f..ffd5b42bd1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py +++ b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py @@ -34,11 +34,10 @@ def test_AccuracyTester_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AccuracyTester_outputs(): - output_map = dict( - output_directory=dict( - argstr='%s', - position=1, - ), ) + output_map = dict(output_directory=dict( + argstr='%s', + position=1, + ), ) outputs = AccuracyTester.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py index a75df99db5..597dc88d5e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py @@ -22,14 +22,17 @@ def test_Cleaner_inputs(): ), confound_file=dict( argstr='-x %s', + extensions=None, position=4, ), confound_file_1=dict( argstr='-x %s', + extensions=None, position=5, ), confound_file_2=dict( argstr='-x %s', + extensions=None, position=6, ), environ=dict( diff --git a/nipype/interfaces/fsl/tests/test_auto_Cluster.py b/nipype/interfaces/fsl/tests/test_auto_Cluster.py index 6ed34ab816..19340b9383 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cluster.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cluster.py @@ -7,7 +7,10 @@ def test_Cluster_inputs(): input_map = dict( args=dict(argstr='%s', ), connectivity=dict(argstr='--connectivity=%d', ), - cope_file=dict(argstr='--cope=%s', ), + cope_file=dict( + argstr='--cope=%s', + extensions=None, + ), dlh=dict(argstr='--dlh=%.10f', ), environ=dict( nohash=True, diff --git a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py index 3945c40a87..eaeeb34ce8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py +++ b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py @@ -22,12 +22,11 @@ def test_FeatureExtractor_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FeatureExtractor_outputs(): - output_map = dict( - mel_ica=dict( - argstr='%s', - copyfile=False, - position=-1, - ), ) + output_map = dict(mel_ica=dict( + argstr='%s', + copyfile=False, + position=-1, + ), ) outputs = FeatureExtractor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py index 5c7b717d93..a8262c9be1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py +++ b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py @@ -30,11 +30,10 @@ def test_FindTheBiggest_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_FindTheBiggest_outputs(): - output_map = dict( - out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict(out_file=dict( + argstr='%s', + extensions=None, + ), ) outputs = FindTheBiggest.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py index 18ef078a79..638bcdd156 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py +++ b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py @@ -4,24 +4,22 @@ def test_TrainingSetCreator_inputs(): - input_map = dict( - mel_icas_in=dict( - argstr='%s', - copyfile=False, - position=-1, - ), ) + input_map = dict(mel_icas_in=dict( + argstr='%s', + copyfile=False, + position=-1, + ), ) inputs = TrainingSetCreator.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TrainingSetCreator_outputs(): - output_map = dict( - mel_icas_out=dict( - argstr='%s', - copyfile=False, - position=-1, - ), ) + output_map = dict(mel_icas_out=dict( + argstr='%s', + copyfile=False, + position=-1, + ), ) outputs = TrainingSetCreator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Average.py b/nipype/interfaces/minc/tests/test_auto_Average.py index 678ab93a52..6f0e84d144 100644 --- a/nipype/interfaces/minc/tests/test_auto_Average.py +++ b/nipype/interfaces/minc/tests/test_auto_Average.py @@ -29,6 +29,7 @@ def test_Average_inputs(): ), filelist=dict( argstr='-filelist %s', + extensions=None, mandatory=True, xor=('input_files', 'filelist'), ), @@ -126,7 +127,10 @@ def test_Average_inputs(): argstr='-quiet', xor=('verbose', 'quiet'), ), - sdfile=dict(argstr='-sdfile %s', ), + sdfile=dict( + argstr='-sdfile %s', + extensions=None, + ), two=dict(argstr='-2', ), verbose=dict( argstr='-verbose', diff --git a/nipype/interfaces/minc/tests/test_auto_BBox.py b/nipype/interfaces/minc/tests/test_auto_BBox.py index c1b3515cea..7bdf35f03d 100644 --- a/nipype/interfaces/minc/tests/test_auto_BBox.py +++ b/nipype/interfaces/minc/tests/test_auto_BBox.py @@ -25,6 +25,7 @@ def test_BBox_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/minc/tests/test_auto_Beast.py b/nipype/interfaces/minc/tests/test_auto_Beast.py index bc4705db2e..3a44b436b0 100644 --- a/nipype/interfaces/minc/tests/test_auto_Beast.py +++ b/nipype/interfaces/minc/tests/test_auto_Beast.py @@ -18,7 +18,10 @@ def test_Beast_inputs(): argstr='-alpha %s', usedefault=True, ), - configuration_file=dict(argstr='-configuration %s', ), + configuration_file=dict( + argstr='-configuration %s', + extensions=None, + ), environ=dict( nohash=True, usedefault=True, @@ -27,6 +30,7 @@ def test_Beast_inputs(): flip_images=dict(argstr='-flip', ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), @@ -44,6 +48,7 @@ def test_Beast_inputs(): ), output_file=dict( argstr='%s', + extensions=None, hash_files=False, name_source=['input_file'], name_template='%s_beast_mask.mnc', diff --git a/nipype/interfaces/minc/tests/test_auto_Calc.py b/nipype/interfaces/minc/tests/test_auto_Calc.py index e9df677150..d5fb316e39 100644 --- a/nipype/interfaces/minc/tests/test_auto_Calc.py +++ b/nipype/interfaces/minc/tests/test_auto_Calc.py @@ -26,6 +26,7 @@ def test_Calc_inputs(): eval_width=dict(argstr='-eval_width %s', ), expfile=dict( argstr='-expfile %s', + extensions=None, mandatory=True, xor=('expression', 'expfile'), ), @@ -36,6 +37,7 @@ def test_Calc_inputs(): ), filelist=dict( argstr='-filelist %s', + extensions=None, mandatory=True, xor=('input_files', 'filelist'), ), diff --git a/nipype/interfaces/minc/tests/test_auto_Dump.py b/nipype/interfaces/minc/tests/test_auto_Dump.py index 19c299dac8..fbd33f5a46 100644 --- a/nipype/interfaces/minc/tests/test_auto_Dump.py +++ b/nipype/interfaces/minc/tests/test_auto_Dump.py @@ -36,6 +36,7 @@ def test_Dump_inputs(): netcdf_name=dict(argstr='-n %s', ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/minc/tests/test_auto_Extract.py b/nipype/interfaces/minc/tests/test_auto_Extract.py index 35f6162c7f..fbd0a84729 100644 --- a/nipype/interfaces/minc/tests/test_auto_Extract.py +++ b/nipype/interfaces/minc/tests/test_auto_Extract.py @@ -84,6 +84,7 @@ def test_Extract_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/minc/tests/test_auto_Math.py b/nipype/interfaces/minc/tests/test_auto_Math.py index 6bc142b15d..41d75379d4 100644 --- a/nipype/interfaces/minc/tests/test_auto_Math.py +++ b/nipype/interfaces/minc/tests/test_auto_Math.py @@ -36,6 +36,7 @@ def test_Math_inputs(): exp=dict(argstr='-exp -const2 %s %s', ), filelist=dict( argstr='-filelist %s', + extensions=None, mandatory=True, xor=('input_files', 'filelist'), ), diff --git a/nipype/interfaces/minc/tests/test_auto_Norm.py b/nipype/interfaces/minc/tests/test_auto_Norm.py index 9fb0d3c5ba..cdd4a3db7c 100644 --- a/nipype/interfaces/minc/tests/test_auto_Norm.py +++ b/nipype/interfaces/minc/tests/test_auto_Norm.py @@ -26,7 +26,10 @@ def test_Norm_inputs(): position=-2, ), lower=dict(argstr='-lower %s', ), - mask=dict(argstr='-mask %s', ), + mask=dict( + argstr='-mask %s', + extensions=None, + ), out_ceil=dict(argstr='-out_ceil %s', ), out_floor=dict(argstr='-out_floor %s', ), output_file=dict( @@ -40,6 +43,7 @@ def test_Norm_inputs(): ), output_threshold_mask=dict( argstr='-threshold_mask %s', + extensions=None, hash_files=False, name_source=['input_file'], name_template='%s_norm_threshold_mask.mnc', diff --git a/nipype/interfaces/minc/tests/test_auto_Resample.py b/nipype/interfaces/minc/tests/test_auto_Resample.py index 8d4b24ff41..da512d9f62 100644 --- a/nipype/interfaces/minc/tests/test_auto_Resample.py +++ b/nipype/interfaces/minc/tests/test_auto_Resample.py @@ -94,7 +94,10 @@ def test_Resample_inputs(): argstr='-keep_real_range', xor=('keep_real_range', 'nokeep_real_range'), ), - like=dict(argstr='-like %s', ), + like=dict( + argstr='-like %s', + extensions=None, + ), nearest_neighbour_interpolation=dict( argstr='-nearest_neighbour', xor=('trilinear_interpolation', 'tricubic_interpolation', @@ -157,7 +160,10 @@ def test_Resample_inputs(): xor=('nelements', 'nelements_x_y_or_z'), ), talairach=dict(argstr='-talairach', ), - transformation=dict(argstr='-transformation %s', ), + transformation=dict( + argstr='-transformation %s', + extensions=None, + ), transverse_slices=dict( argstr='-transverse', xor=('transverse', 'sagittal', 'coronal'), diff --git a/nipype/interfaces/minc/tests/test_auto_Reshape.py b/nipype/interfaces/minc/tests/test_auto_Reshape.py index 669425da95..d80f9a377b 100644 --- a/nipype/interfaces/minc/tests/test_auto_Reshape.py +++ b/nipype/interfaces/minc/tests/test_auto_Reshape.py @@ -16,6 +16,7 @@ def test_Reshape_inputs(): ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), diff --git a/nipype/interfaces/minc/tests/test_auto_ToRaw.py b/nipype/interfaces/minc/tests/test_auto_ToRaw.py index aeda687c97..42ba72f145 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToRaw.py +++ b/nipype/interfaces/minc/tests/test_auto_ToRaw.py @@ -26,6 +26,7 @@ def test_ToRaw_inputs(): ), out_file=dict( argstr='> %s', + extensions=None, genfile=True, position=-1, ), diff --git a/nipype/interfaces/minc/tests/test_auto_VolSymm.py b/nipype/interfaces/minc/tests/test_auto_VolSymm.py index b710d59543..ae3332b7f2 100644 --- a/nipype/interfaces/minc/tests/test_auto_VolSymm.py +++ b/nipype/interfaces/minc/tests/test_auto_VolSymm.py @@ -22,6 +22,7 @@ def test_VolSymm_inputs(): fit_nonlinear=dict(argstr='-nonlinear', ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-3, ), @@ -38,6 +39,7 @@ def test_VolSymm_inputs(): ), trans_file=dict( argstr='%s', + extensions=None, genfile=True, hash_files=False, keep_extension=False, diff --git a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py index 9e242300da..69c455f875 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py @@ -16,6 +16,7 @@ def test_XfmInvert_inputs(): ), input_file=dict( argstr='%s', + extensions=None, mandatory=True, position=-2, ), diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py index 90c5272ed0..5ee1e8cb00 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py @@ -14,8 +14,12 @@ def test_BrainMask_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py index 7658201223..59617b30ab 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py @@ -15,8 +15,12 @@ def test_DWIExtract_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py index 84404fdde6..8f8142746a 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py @@ -14,8 +14,12 @@ def test_FitTensor_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py index 92594da593..f7032cb624 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py @@ -19,8 +19,12 @@ def test_Generate5tt_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py index 9271d07517..f11b615cc8 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py @@ -22,8 +22,12 @@ def test_MRConvert_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py index ae494bb932..fe934c45b1 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py @@ -15,8 +15,12 @@ def test_MRMath_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py index dd326bad5b..9415cae211 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py @@ -29,8 +29,12 @@ def test_ResponseSD_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py index 39f239b81d..cf685a50d8 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py @@ -28,8 +28,12 @@ def test_Tractography_inputs(): grad_file=dict( argstr='-grad %s', extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], ), - grad_fsl=dict(argstr='-fslgrad %s %s', ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', diff --git a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py index 20995e806e..2a58b39d57 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py @@ -8,6 +8,7 @@ def test_DwiTool_inputs(): args=dict(argstr='%s', ), b0_file=dict( argstr='-b0 %s', + extensions=None, position=4, ), ball_flag=dict( @@ -28,11 +29,13 @@ def test_DwiTool_inputs(): ), bval_file=dict( argstr='-bval %s', + extensions=None, mandatory=True, position=2, ), bvec_file=dict( argstr='-bvec %s', + extensions=None, position=3, ), diso_val=dict(argstr='-diso %f', ), @@ -59,6 +62,7 @@ def test_DwiTool_inputs(): ), famap_file=dict( argstr='-famap %s', + extensions=None, name_source=['source_file'], name_template='%s_famap.nii.gz', ), @@ -72,20 +76,24 @@ def test_DwiTool_inputs(): ), logdti_file=dict( argstr='-logdti2 %s', + extensions=None, name_source=['source_file'], name_template='%s_logdti2.nii.gz', ), mask_file=dict( argstr='-mask %s', + extensions=None, position=5, ), mcmap_file=dict( argstr='-mcmap %s', + extensions=None, name_source=['source_file'], name_template='%s_mcmap.nii.gz', ), mdmap_file=dict( argstr='-mdmap %s', + extensions=None, name_source=['source_file'], name_template='%s_mdmap.nii.gz', ), @@ -115,22 +123,26 @@ def test_DwiTool_inputs(): ), rgbmap_file=dict( argstr='-rgbmap %s', + extensions=None, name_source=['source_file'], name_template='%s_rgbmap.nii.gz', ), source_file=dict( argstr='-source %s', + extensions=None, mandatory=True, position=1, ), syn_file=dict( argstr='-syn %s', + extensions=None, name_source=['source_file'], name_template='%s_syn.nii.gz', requires=['bvec_file', 'b0_file'], ), v1map_file=dict( argstr='-v1map %s', + extensions=None, name_source=['source_file'], name_template='%s_v1map.nii.gz', ), @@ -142,13 +154,13 @@ def test_DwiTool_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_DwiTool_outputs(): output_map = dict( - famap_file=dict(), - logdti_file=dict(), - mcmap_file=dict(), - mdmap_file=dict(), - rgbmap_file=dict(), - syn_file=dict(), - v1map_file=dict(), + famap_file=dict(extensions=None, ), + logdti_file=dict(extensions=None, ), + mcmap_file=dict(extensions=None, ), + mdmap_file=dict(extensions=None, ), + rgbmap_file=dict(extensions=None, ), + syn_file=dict(extensions=None, ), + v1map_file=dict(extensions=None, ), ) outputs = DwiTool.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py index b2e1bef961..94a489a4ad 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py @@ -8,6 +8,7 @@ def test_FitAsl_inputs(): args=dict(argstr='%s', ), cbf_file=dict( argstr='-cbf %s', + extensions=None, name_source=['source_file'], name_template='%s_cbf.nii.gz', ), @@ -20,19 +21,33 @@ def test_FitAsl_inputs(): ), error_file=dict( argstr='-error %s', + extensions=None, name_source=['source_file'], name_template='%s_error.nii.gz', ), gm_plasma=dict(argstr='-gmL %f', ), gm_t1=dict(argstr='-gmT1 %f', ), gm_ttt=dict(argstr='-gmTTT %f', ), - ir_output=dict(argstr='-IRoutput %s', ), - ir_volume=dict(argstr='-IRvolume %s', ), + ir_output=dict( + argstr='-IRoutput %s', + extensions=None, + ), + ir_volume=dict( + argstr='-IRvolume %s', + extensions=None, + ), ldd=dict(argstr='-LDD %f', ), - m0map=dict(argstr='-m0map %s', ), - m0mape=dict(argstr='-m0mape %s', ), + m0map=dict( + argstr='-m0map %s', + extensions=None, + ), + m0mape=dict( + argstr='-m0mape %s', + extensions=None, + ), mask=dict( argstr='-mask %s', + extensions=None, position=2, ), mul=dict(argstr='-mul %f', ), @@ -46,21 +61,29 @@ def test_FitAsl_inputs(): pv2=dict(argstr='-pv2 %d', ), pv3=dict(argstr='-pv3 %d %d %d', ), pv_threshold=dict(argstr='-pvthreshold', ), - seg=dict(argstr='-seg %s', ), + seg=dict( + argstr='-seg %s', + extensions=None, + ), segstyle=dict(argstr='-segstyle', ), sig=dict(argstr='-sig', ), source_file=dict( argstr='-source %s', + extensions=None, mandatory=True, position=1, ), syn_file=dict( argstr='-syn %s', + extensions=None, name_source=['source_file'], name_template='%s_syn.nii.gz', ), t1_art_cmp=dict(argstr='-T1a %f', ), - t1map=dict(argstr='-t1map %s', ), + t1map=dict( + argstr='-t1map %s', + extensions=None, + ), t_inv1=dict(argstr='-Tinv1 %f', ), t_inv2=dict(argstr='-Tinv2 %f', ), wm_plasma=dict(argstr='-wmL %f', ), @@ -74,9 +97,9 @@ def test_FitAsl_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_FitAsl_outputs(): output_map = dict( - cbf_file=dict(), - error_file=dict(), - syn_file=dict(), + cbf_file=dict(extensions=None, ), + error_file=dict(extensions=None, ), + syn_file=dict(extensions=None, ), ) outputs = FitAsl.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py index 700d9a31c4..2086d12a2b 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py @@ -25,15 +25,20 @@ def test_FitDwi_inputs(): ), bval_file=dict( argstr='-bval %s', + extensions=None, mandatory=True, position=2, ), bvec_file=dict( argstr='-bvec %s', + extensions=None, mandatory=True, position=3, ), - cov_file=dict(argstr='-cov %s', ), + cov_file=dict( + argstr='-cov %s', + extensions=None, + ), csf_t2_val=dict(argstr='-csfT2 %f', ), diso_val=dict(argstr='-diso %f', ), dpr_val=dict(argstr='-dpr %f', ), @@ -51,11 +56,13 @@ def test_FitDwi_inputs(): ), error_file=dict( argstr='-error %s', + extensions=None, name_source=['source_file'], name_template='%s_error.nii.gz', ), famap_file=dict( argstr='-famap %s', + extensions=None, name_source=['source_file'], name_template='%s_famap.nii.gz', ), @@ -75,13 +82,17 @@ def test_FitDwi_inputs(): argstr='-lm %f %f', requires=['gn_flag'], ), - mask_file=dict(argstr='-mask %s', ), + mask_file=dict( + argstr='-mask %s', + extensions=None, + ), maxit_val=dict( argstr='-maxit %d', requires=['gn_flag'], ), mcmap_file=dict( argstr='-mcmap %s', + extensions=None, name_source=['source_file'], name_template='%s_mcmap.nii.gz', requires=['nodv_flag'], @@ -89,12 +100,14 @@ def test_FitDwi_inputs(): mcmaxit=dict(argstr='-mcmaxit %d', ), mcout=dict( argstr='-mcout %s', + extensions=None, name_source=['source_file'], name_template='%s_mcout.txt', ), mcsamples=dict(argstr='-mcsamples %d', ), mdmap_file=dict( argstr='-mdmap %s', + extensions=None, name_source=['source_file'], name_template='%s_mdmap.nii.gz', ), @@ -116,6 +129,7 @@ def test_FitDwi_inputs(): ), nodiff_file=dict( argstr='-nodiff %s', + extensions=None, name_source=['source_file'], name_template='%s_no_diff.nii.gz', ), @@ -128,14 +142,19 @@ def test_FitDwi_inputs(): ], ), perf_thr=dict(argstr='-perfthreshold %f', ), - prior_file=dict(argstr='-prior %s', ), + prior_file=dict( + argstr='-prior %s', + extensions=None, + ), res_file=dict( argstr='-res %s', + extensions=None, name_source=['source_file'], name_template='%s_resmap.nii.gz', ), rgbmap_file=dict( argstr='-rgbmap %s', + extensions=None, name_source=['source_file'], name_template='%s_rgbmap.nii.gz', requires=['dti_flag'], @@ -144,38 +163,45 @@ def test_FitDwi_inputs(): slice_no=dict(argstr='-slice %d', ), source_file=dict( argstr='-source %s', + extensions=None, mandatory=True, position=1, ), swls_val=dict(argstr='-swls %f', ), syn_file=dict( argstr='-syn %s', + extensions=None, name_source=['source_file'], name_template='%s_syn.nii.gz', ), te_file=dict( argstr='-TE %s', + extensions=None, xor=['te_file'], ), te_value=dict( argstr='-TE %s', + extensions=None, xor=['te_file'], ), ten_type=dict(usedefault=True, ), tenmap2_file=dict( argstr='-tenmap2 %s', + extensions=None, name_source=['source_file'], name_template='%s_tenmap2.nii.gz', requires=['dti_flag'], ), tenmap_file=dict( argstr='-tenmap %s', + extensions=None, name_source=['source_file'], name_template='%s_tenmap.nii.gz', requires=['dti_flag'], ), v1map_file=dict( argstr='-v1map %s', + extensions=None, name_source=['source_file'], name_template='%s_v1map.nii.gz', ), @@ -194,18 +220,18 @@ def test_FitDwi_inputs(): assert getattr(inputs.traits()[key], metakey) == value def test_FitDwi_outputs(): output_map = dict( - error_file=dict(), - famap_file=dict(), - mcmap_file=dict(), - mcout=dict(), - mdmap_file=dict(), - nodiff_file=dict(), - res_file=dict(), - rgbmap_file=dict(), - syn_file=dict(), - tenmap2_file=dict(), - tenmap_file=dict(), - v1map_file=dict(), + error_file=dict(extensions=None, ), + famap_file=dict(extensions=None, ), + mcmap_file=dict(extensions=None, ), + mcout=dict(extensions=None, ), + mdmap_file=dict(extensions=None, ), + nodiff_file=dict(extensions=None, ), + res_file=dict(extensions=None, ), + rgbmap_file=dict(extensions=None, ), + syn_file=dict(extensions=None, ), + tenmap2_file=dict(extensions=None, ), + tenmap_file=dict(extensions=None, ), + v1map_file=dict(extensions=None, ), ) outputs = FitDwi.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py index 392654fd5c..b184e448cd 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py @@ -7,7 +7,10 @@ def test_FitQt1_inputs(): input_map = dict( acceptance=dict(argstr='-acceptance %f', ), args=dict(argstr='%s', ), - b1map=dict(argstr='-b1map %s', ), + b1map=dict( + argstr='-b1map %s', + extensions=None, + ), comp_file=dict( argstr='-comp %s', extensions=None, @@ -28,7 +31,10 @@ def test_FitQt1_inputs(): argstr='-flips %s', sep=' ', ), - flips_list=dict(argstr='-fliplist %s', ), + flips_list=dict( + argstr='-fliplist %s', + extensions=None, + ), gn_flag=dict( argstr='-gn', position=8, @@ -63,7 +69,10 @@ def test_FitQt1_inputs(): name_template='%s_mcmap.nii.gz', ), mcmaxit=dict(argstr='-mcmaxit %d', ), - mcout=dict(argstr='-mcout %s', ), + mcout=dict( + argstr='-mcout %s', + extensions=None, + ), mcsamples=dict(argstr='-mcsamples %d', ), nb_comp=dict( argstr='-nc %d', @@ -101,7 +110,10 @@ def test_FitQt1_inputs(): name_source=['source_file'], name_template='%s_syn.nii.gz', ), - t1_list=dict(argstr='-T1list %s', ), + t1_list=dict( + argstr='-T1list %s', + extensions=None, + ), t1map_file=dict( argstr='-t1map %s', extensions=None, @@ -119,7 +131,10 @@ def test_FitQt1_inputs(): position=14, sep=' ', ), - tis_list=dict(argstr='-TIlist %s', ), + tis_list=dict( + argstr='-TIlist %s', + extensions=None, + ), tr_value=dict( argstr='-TR %f', position=5, diff --git a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py index 3fe17160db..9a9b5c421c 100644 --- a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py @@ -13,7 +13,7 @@ def test_EstimateContrast_inputs(): constants=dict(mandatory=True, ), contrasts=dict(mandatory=True, ), dof=dict(mandatory=True, ), - mask=dict(), + mask=dict(extensions=None, ), nvbeta=dict(mandatory=True, ), reg_names=dict(mandatory=True, ), s2=dict( diff --git a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py index a700c18d43..71ca221efc 100644 --- a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py +++ b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py @@ -8,7 +8,7 @@ def test_FitGLM_inputs(): TR=dict(mandatory=True, ), drift_model=dict(usedefault=True, ), hrf_model=dict(usedefault=True, ), - mask=dict(), + mask=dict(extensions=None, ), method=dict(usedefault=True, ), model=dict(usedefault=True, ), normalize_design_matrix=dict(usedefault=True, ), @@ -30,7 +30,7 @@ def test_FitGLM_outputs(): dof=dict(), nvbeta=dict(), reg_names=dict(), - residuals=dict(), + residuals=dict(extensions=None, ), s2=dict(extensions=None, ), ) outputs = FitGLM.output_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py index 866af46740..2eaeb1acef 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py @@ -38,11 +38,10 @@ def test_UnbiasedNonLocalMeans_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_UnbiasedNonLocalMeans_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = UnbiasedNonLocalMeans.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py index 1ca79a6e96..33e52ed707 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py @@ -27,11 +27,10 @@ def test_DTIexport_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DTIexport_outputs(): - output_map = dict( - outputFile=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputFile=dict( + extensions=None, + position=-1, + ), ) outputs = DTIexport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py index 58ae495f3f..ddb07c9428 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py @@ -28,11 +28,10 @@ def test_DTIimport_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DTIimport_outputs(): - output_map = dict( - outputTensor=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputTensor=dict( + extensions=None, + position=-1, + ), ) outputs = DTIimport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py index f8000c49b1..aa0b6f97b5 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py @@ -37,11 +37,10 @@ def test_DWIJointRicianLMMSEFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIJointRicianLMMSEFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = DWIJointRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py index 29b5e0b7e1..69509fcf28 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py @@ -43,11 +43,10 @@ def test_DWIRicianLMMSEFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIRicianLMMSEFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = DWIRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py index 5dcdbd7e2e..060c47ecc2 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py @@ -28,11 +28,10 @@ def test_DiffusionTensorScalarMeasurements_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DiffusionTensorScalarMeasurements_outputs(): - output_map = dict( - outputScalar=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputScalar=dict( + extensions=None, + position=-1, + ), ) outputs = DiffusionTensorScalarMeasurements.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py index bd35268bdb..9cfcdb14fb 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py @@ -73,11 +73,10 @@ def test_ResampleDTIVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ResampleDTIVolume_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ResampleDTIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py index d2b17c562a..42a7b17a19 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py @@ -33,11 +33,10 @@ def test_AddScalarVolumes_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_AddScalarVolumes_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = AddScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py index bbcb2f077c..0c05002ff8 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py @@ -28,11 +28,10 @@ def test_CastScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CastScalarVolume_outputs(): - output_map = dict( - OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = CastScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py index 24f5b74307..b602012d0c 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py @@ -36,11 +36,10 @@ def test_CheckerBoardFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CheckerBoardFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = CheckerBoardFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py index 0240ad676a..7389aa0cee 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py @@ -30,11 +30,10 @@ def test_CurvatureAnisotropicDiffusion_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_CurvatureAnisotropicDiffusion_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = CurvatureAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py index c47382aef5..d6f93cb41d 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py @@ -31,11 +31,10 @@ def test_ExtractSkeleton_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ExtractSkeleton_outputs(): - output_map = dict( - OutputImageFileName=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputImageFileName=dict( + extensions=None, + position=-1, + ), ) outputs = ExtractSkeleton.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py index 2bdb73c4d5..9511f9ce1d 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py @@ -28,11 +28,10 @@ def test_GaussianBlurImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GaussianBlurImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GaussianBlurImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py index 6d3e40c8de..02c9d7fb20 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py @@ -30,11 +30,10 @@ def test_GradientAnisotropicDiffusion_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GradientAnisotropicDiffusion_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GradientAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py index f1ff2c3809..45b2b4b8dc 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py @@ -27,11 +27,10 @@ def test_GrayscaleFillHoleImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GrayscaleFillHoleImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GrayscaleFillHoleImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py index 6aee86282a..77536e147d 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py @@ -27,11 +27,10 @@ def test_GrayscaleGrindPeakImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GrayscaleGrindPeakImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = GrayscaleGrindPeakImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py index c839c50abf..9541702731 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py @@ -35,11 +35,10 @@ def test_HistogramMatching_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_HistogramMatching_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = HistogramMatching.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py index 7fd2c31db3..c3373f80ec 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py @@ -33,11 +33,10 @@ def test_ImageLabelCombine_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ImageLabelCombine_outputs(): - output_map = dict( - OutputLabelMap=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputLabelMap=dict( + extensions=None, + position=-1, + ), ) outputs = ImageLabelCombine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py index 56d68199af..11999f2001 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py @@ -34,11 +34,10 @@ def test_MaskScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MaskScalarVolume_outputs(): - output_map = dict( - OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = MaskScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py index c0bf97e152..4c890aa30f 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py @@ -31,11 +31,10 @@ def test_MedianImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MedianImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = MedianImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py index cc39d5b7b1..99772b41db 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py @@ -33,11 +33,10 @@ def test_MultiplyScalarVolumes_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MultiplyScalarVolumes_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = MultiplyScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py index 12d7af77c4..28a9876943 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py @@ -71,11 +71,10 @@ def test_ResampleScalarVectorDWIVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ResampleScalarVectorDWIVolume_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ResampleScalarVectorDWIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py index 106cd843a4..62bf214bf4 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py @@ -33,11 +33,10 @@ def test_SubtractScalarVolumes_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SubtractScalarVolumes_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = SubtractScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py index e63ed5923f..ce7f97ec58 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py @@ -32,11 +32,10 @@ def test_ThresholdScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ThresholdScalarVolume_outputs(): - output_map = dict( - OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ThresholdScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py index 89832bf9ba..c8100fd74b 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py @@ -34,11 +34,10 @@ def test_VotingBinaryHoleFillingImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_VotingBinaryHoleFillingImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = VotingBinaryHoleFillingImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py index f3c1ec6ff0..a121f85614 100644 --- a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py +++ b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py @@ -41,11 +41,10 @@ def test_DWIUnbiasedNonLocalMeansFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DWIUnbiasedNonLocalMeansFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = DWIUnbiasedNonLocalMeansFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py index 19b41f7127..6af3c1d54c 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py @@ -30,11 +30,10 @@ def test_OtsuThresholdImageFilter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_OtsuThresholdImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = OtsuThresholdImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py index b9fa12f2aa..c73313751e 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py @@ -31,11 +31,10 @@ def test_OtsuThresholdSegmentation_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_OtsuThresholdSegmentation_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = OtsuThresholdSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py index 7f1c259cd6..66207fb604 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py @@ -32,11 +32,10 @@ def test_ResampleScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ResampleScalarVolume_outputs(): - output_map = dict( - OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ResampleScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py index 6befb36860..c956ae34ec 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py @@ -37,11 +37,10 @@ def test_RobustStatisticsSegmenter_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_RobustStatisticsSegmenter_outputs(): - output_map = dict( - segmentedImageFileName=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(segmentedImageFileName=dict( + extensions=None, + position=-1, + ), ) outputs = RobustStatisticsSegmenter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py index 5f880c08cb..8ffdd04355 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py @@ -34,11 +34,10 @@ def test_SimpleRegionGrowingSegmentation_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_SimpleRegionGrowingSegmentation_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = SimpleRegionGrowingSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py index 3669a21fc3..e18c0f0d5d 100644 --- a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py @@ -33,11 +33,10 @@ def test_GrayscaleModelMaker_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_GrayscaleModelMaker_outputs(): - output_map = dict( - OutputGeometry=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputGeometry=dict( + extensions=None, + position=-1, + ), ) outputs = GrayscaleModelMaker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py index faf982a342..d196dbf5d8 100644 --- a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py +++ b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py @@ -31,11 +31,10 @@ def test_LabelMapSmoothing_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_LabelMapSmoothing_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = LabelMapSmoothing.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py index c25d4ebcf3..2e09b69bd1 100644 --- a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py +++ b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py @@ -32,11 +32,10 @@ def test_MergeModels_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_MergeModels_outputs(): - output_map = dict( - ModelOutput=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(ModelOutput=dict( + extensions=None, + position=-1, + ), ) outputs = MergeModels.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py index d39ae392bd..2e684e3e65 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py @@ -33,11 +33,10 @@ def test_ModelToLabelMap_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ModelToLabelMap_outputs(): - output_map = dict( - OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = ModelToLabelMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py index d0d437915b..479bb842bb 100644 --- a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py +++ b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py @@ -28,11 +28,10 @@ def test_OrientScalarVolume_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_OrientScalarVolume_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict( + extensions=None, + position=-1, + ), ) outputs = OrientScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py index 528ba47e1b..b4bbdbe283 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py +++ b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py @@ -32,11 +32,10 @@ def test_ProbeVolumeWithModel_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_ProbeVolumeWithModel_outputs(): - output_map = dict( - OutputModel=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputModel=dict( + extensions=None, + position=-1, + ), ) outputs = ProbeVolumeWithModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Rename.py b/nipype/interfaces/utility/tests/test_auto_Rename.py index 6b56badd6f..70ced0ecdd 100644 --- a/nipype/interfaces/utility/tests/test_auto_Rename.py +++ b/nipype/interfaces/utility/tests/test_auto_Rename.py @@ -20,7 +20,7 @@ def test_Rename_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Rename_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = Rename.output_spec() for key, metadata in list(output_map.items()): From 52818ea6665dc8c24284476cc384b088c7367a81 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Thu, 5 Sep 2019 15:27:33 -0400 Subject: [PATCH 0355/1665] FIX: loadpkl failed when pklz file contained versioning info loadpkl called pickle.load at file position 0, which would fail if the first line of the file was json versioning info (as when using savepkl with versioning=True). This was fixed by only seeking to position 0 if no versioning information is found. --- nipype/tests/test_utils.py | 17 +++++++++++++++++ nipype/utils/filemanip.py | 2 -- 2 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 nipype/tests/test_utils.py diff --git a/nipype/tests/test_utils.py b/nipype/tests/test_utils.py new file mode 100644 index 0000000000..6a35f6cf85 --- /dev/null +++ b/nipype/tests/test_utils.py @@ -0,0 +1,17 @@ +from nipype import utils + + +def test_pickle(tmp_path): + testobj = 'iamateststr' + pickle_fname = str(tmp_path / 'testpickle.pklz') + utils.filemanip.savepkl(pickle_fname, testobj) + outobj = utils.filemanip.loadpkl(pickle_fname) + assert outobj == testobj + + +def test_pickle_versioning(tmp_path): + testobj = 'iamateststr' + pickle_fname = str(tmp_path / 'testpickle.pklz') + utils.filemanip.savepkl(pickle_fname, testobj, versioning=True) + outobj = utils.filemanip.loadpkl(pickle_fname) + assert outobj == testobj diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 44654c0197..26859196fc 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -689,8 +689,6 @@ def loadpkl(infile, versioning=False): pkl_metadata_line = pkl_file.readline() pkl_metadata = json.loads(pkl_metadata_line) except (UnicodeDecodeError, json.JSONDecodeError): - pass - finally: # Could not get version info pkl_file.seek(0) From c6047defb99987ad0f1fe1a32f34e7d281c4a767 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Thu, 5 Sep 2019 16:31:58 -0400 Subject: [PATCH 0356/1665] parametrize test (suggested by @effigies) --- nipype/tests/test_utils.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/nipype/tests/test_utils.py b/nipype/tests/test_utils.py index 6a35f6cf85..7c7bc40acd 100644 --- a/nipype/tests/test_utils.py +++ b/nipype/tests/test_utils.py @@ -1,17 +1,11 @@ from nipype import utils +import pytest -def test_pickle(tmp_path): +@pytest.mark.parametrize("versioning", [True, False]) +def test_pickle(tmp_path, versioning): testobj = 'iamateststr' pickle_fname = str(tmp_path / 'testpickle.pklz') - utils.filemanip.savepkl(pickle_fname, testobj) - outobj = utils.filemanip.loadpkl(pickle_fname) - assert outobj == testobj - - -def test_pickle_versioning(tmp_path): - testobj = 'iamateststr' - pickle_fname = str(tmp_path / 'testpickle.pklz') - utils.filemanip.savepkl(pickle_fname, testobj, versioning=True) + utils.filemanip.savepkl(pickle_fname, testobj, versioning=versioning) outobj = utils.filemanip.loadpkl(pickle_fname) assert outobj == testobj From 56ab01ebf50395b4d8d4f112ea3dd1a7459d49f6 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Thu, 5 Sep 2019 16:32:20 -0400 Subject: [PATCH 0357/1665] remove versioning option from loadpkl --- nipype/utils/filemanip.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 26859196fc..e599f38e4c 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -675,7 +675,7 @@ def loadcrash(infile, *args): raise ValueError('Only pickled crashfiles are supported') -def loadpkl(infile, versioning=False): +def loadpkl(infile): """Load a zipped or plain cPickled file.""" infile = Path(infile) fmlogger.debug('Loading pkl: %s', infile) @@ -700,9 +700,6 @@ def loadpkl(infile, versioning=False): fmlogger.info('Successfully loaded pkl in compatibility mode.') # Unpickling problems except Exception as e: - if not versioning: - raise e - if pkl_metadata and 'version' in pkl_metadata: from nipype import __version__ as version if pkl_metadata['version'] != version: From a86d1409514625ee6bd6ee47c715e33bcf44aaab Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Thu, 5 Sep 2019 16:54:54 -0400 Subject: [PATCH 0358/1665] Revert "remove versioning option from loadpkl" This reverts commit 56ab01ebf50395b4d8d4f112ea3dd1a7459d49f6. --- nipype/utils/filemanip.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index e599f38e4c..26859196fc 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -675,7 +675,7 @@ def loadcrash(infile, *args): raise ValueError('Only pickled crashfiles are supported') -def loadpkl(infile): +def loadpkl(infile, versioning=False): """Load a zipped or plain cPickled file.""" infile = Path(infile) fmlogger.debug('Loading pkl: %s', infile) @@ -700,6 +700,9 @@ def loadpkl(infile): fmlogger.info('Successfully loaded pkl in compatibility mode.') # Unpickling problems except Exception as e: + if not versioning: + raise e + if pkl_metadata and 'version' in pkl_metadata: from nipype import __version__ as version if pkl_metadata['version'] != version: From d3a15e94592d2e157bd9fefd209d889632203b58 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Thu, 5 Sep 2019 17:17:01 -0400 Subject: [PATCH 0359/1665] test versioning in loadpkl --- nipype/tests/test_utils.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nipype/tests/test_utils.py b/nipype/tests/test_utils.py index 7c7bc40acd..180ce58172 100644 --- a/nipype/tests/test_utils.py +++ b/nipype/tests/test_utils.py @@ -2,10 +2,11 @@ import pytest -@pytest.mark.parametrize("versioning", [True, False]) -def test_pickle(tmp_path, versioning): +@pytest.mark.parametrize("load_versioning", [True, False]) +@pytest.mark.parametrize("save_versioning", [True, False]) +def test_pickle(tmp_path, save_versioning, load_versioning): testobj = 'iamateststr' pickle_fname = str(tmp_path / 'testpickle.pklz') - utils.filemanip.savepkl(pickle_fname, testobj, versioning=versioning) - outobj = utils.filemanip.loadpkl(pickle_fname) + utils.filemanip.savepkl(pickle_fname, testobj, versioning=save_versioning) + outobj = utils.filemanip.loadpkl(pickle_fname, versioning=load_versioning) assert outobj == testobj From bbc696f344d497a0a41e17e29b55c7448d894bc3 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Thu, 5 Sep 2019 17:38:59 -0400 Subject: [PATCH 0360/1665] move loadpkl tests to proper location --- nipype/tests/test_utils.py | 12 ------------ nipype/utils/tests/test_filemanip.py | 10 ++++++++++ 2 files changed, 10 insertions(+), 12 deletions(-) delete mode 100644 nipype/tests/test_utils.py diff --git a/nipype/tests/test_utils.py b/nipype/tests/test_utils.py deleted file mode 100644 index 180ce58172..0000000000 --- a/nipype/tests/test_utils.py +++ /dev/null @@ -1,12 +0,0 @@ -from nipype import utils -import pytest - - -@pytest.mark.parametrize("load_versioning", [True, False]) -@pytest.mark.parametrize("save_versioning", [True, False]) -def test_pickle(tmp_path, save_versioning, load_versioning): - testobj = 'iamateststr' - pickle_fname = str(tmp_path / 'testpickle.pklz') - utils.filemanip.savepkl(pickle_fname, testobj, versioning=save_versioning) - outobj = utils.filemanip.loadpkl(pickle_fname, versioning=load_versioning) - assert outobj == testobj diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index 7eaa8b9c86..ecfd477504 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -587,3 +587,13 @@ def test_Path_strict_resolve(tmpdir): # If the file is created, it should not raise open('somefile.txt', 'w').close() assert '%s/somefile.txt' % tmpdir == '%s' % testfile.resolve(strict=True) + + +@pytest.mark.parametrize("load_versioning", [True, False]) +@pytest.mark.parametrize("save_versioning", [True, False]) +def test_pickle(tmp_path, save_versioning, load_versioning): + testobj = 'iamateststr' + pickle_fname = str(tmp_path / 'testpickle.pklz') + savepkl(pickle_fname, testobj, versioning=save_versioning) + outobj = loadpkl(pickle_fname, versioning=load_versioning) + assert outobj == testobj From 11d3422717b998766be98c4fc4c20511082734ba Mon Sep 17 00:00:00 2001 From: Gio at UMCU Date: Tue, 25 Jun 2019 17:02:15 +0200 Subject: [PATCH 0361/1665] added simple version of 3dTsmooth --- nipype/interfaces/afni/__init__.py | 2 +- nipype/interfaces/afni/preprocess.py | 47 ++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/afni/__init__.py b/nipype/interfaces/afni/__init__.py index 7af80059f2..015f17df73 100644 --- a/nipype/interfaces/afni/__init__.py +++ b/nipype/interfaces/afni/__init__.py @@ -13,7 +13,7 @@ BlurInMask, BlurToFWHM, ClipLevel, DegreeCentrality, Despike, Detrend, ECM, Fim, Fourier, Hist, LFCD, Maskave, Means, OutlierCount, QualityIndex, ROIStats, Retroicor, Seg, SkullStrip, TCorr1D, TCorrMap, TCorrelate, TNorm, - TProject, TShift, Volreg, Warp, QwarpPlusMinus, Qwarp) + TProject, TShift, TSmooth, Volreg, Warp, QwarpPlusMinus, Qwarp) from .svm import (SVMTest, SVMTrain) from .utils import ( ABoverlap, AFNItoNIFTI, Autobox, Axialize, BrickStat, Bucket, Calc, Cat, diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index a2e9cb7f4d..189216112c 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -2853,6 +2853,53 @@ def _list_outputs(self): return outputs +class TSmoothInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dTSmooth', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_smooth', + desc='output file from 3dTSmooth', + argstr='-prefix %s', + position=1, + name_source='in_file', + genfile=True) + adaptive = traits.Int( + desc='adaptive', + argstr='-adaptive %d', + position=-2, + mandatory=False) + + +class TSmooth(AFNICommand): + """Smooths each voxel time series in a 3D+time dataset and produces + as output a new 3D+time dataset (e.g., lowpass filter in time). + + For complete details, see the `3dBandpass Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> from nipype.testing import example_data + >>> smooth = afni.TSmooth() + >>> smooth.inputs.in_file = 'functional.nii' + >>> smooth.inputs.adaptive = 5 + >>> smooth.cmdline + '3dTsmooth -prefix functional_smooth -adaptive 5 functional.nii' + >>> res = smooth.run() # doctest: +SKIP + + """ + _cmd = '3dTsmooth' + input_spec = TSmoothInputSpec + output_spec = AFNICommandOutputSpec + + class VolregInputSpec(AFNICommandInputSpec): in_file = File( desc='input file to 3dvolreg', From f3410892fadfd4ed7d5dc82d402614d3ff12c017 Mon Sep 17 00:00:00 2001 From: Gio at UMCU Date: Wed, 26 Jun 2019 10:03:45 +0200 Subject: [PATCH 0362/1665] add all options for 3dTsmooth --- nipype/interfaces/afni/preprocess.py | 37 ++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 189216112c..12aefcc2a8 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -2865,21 +2865,48 @@ class TSmoothInputSpec(AFNICommandInputSpec): name_template='%s_smooth', desc='output file from 3dTSmooth', argstr='-prefix %s', - position=1, name_source='in_file', genfile=True) + datum = traits.Str( + desc='Sets the data type of the output dataset', + argstr='-datum %s') + lin = traits.Bool( + desc='3 point linear filter: 0.15*a + 0.70*b + 0.15*c' + '[This is the default smoother]', + argstr='-lin') + med = traits.Bool( + desc='3 point median filter: median(a,b,c)', + argstr='-med') + osf = traits.Bool( + desc='3 point order statistics filter:' + '0.15*min(a,b,c) + 0.70*median(a,b,c) + 0.15*max(a,b,c)', + argstr='-osf') + lin3 = traits.Int( + desc='3 point linear filter: 0.5*(1-m)*a + m*b + 0.5*(1-m)*c' + "Here, 'm' is a number strictly between 0 and 1.", + argstr='-3lin %d') + hamming = traits.Int( + argstr='-hamming %d', + desc='Use N point Hamming windows.' + '(N must be odd and bigger than 1.)') + blackman = traits.Int( + argstr='-blackman %d', + desc='Use N point Blackman windows.' + '(N must be odd and bigger than 1.)') + custom = File( + argstr='-custom %s', + desc='odd # of coefficients must be in a single column in ASCII file') adaptive = traits.Int( - desc='adaptive', argstr='-adaptive %d', - position=-2, - mandatory=False) + desc='use adaptive mean filtering of width N ' + '(where N must be odd and bigger than 3).') class TSmooth(AFNICommand): """Smooths each voxel time series in a 3D+time dataset and produces as output a new 3D+time dataset (e.g., lowpass filter in time). - For complete details, see the `3dBandpass Documentation. + For complete details, see the `3dTsmooth Documentation. `_ Examples From 0a05ce98917fec14fd74355575fdf5dcb671d3bb Mon Sep 17 00:00:00 2001 From: Gio at UMCU Date: Wed, 26 Jun 2019 10:11:14 +0200 Subject: [PATCH 0363/1665] added autotest for TSmooth --- .../afni/tests/test_auto_TSmooth.py | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 nipype/interfaces/afni/tests/test_auto_TSmooth.py diff --git a/nipype/interfaces/afni/tests/test_auto_TSmooth.py b/nipype/interfaces/afni/tests/test_auto_TSmooth.py new file mode 100644 index 0000000000..9aa6eebe7f --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_TSmooth.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import TSmooth + + +def test_TSmooth_inputs(): + input_map = dict( + adaptive=dict(argstr='-adaptive %d', ), + args=dict(argstr='%s', ), + blackman=dict(argstr='-blackman %d', ), + custom=dict(argstr='-custom %s', ), + datum=dict(argstr='-datum %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + hamming=dict(argstr='-hamming %d', ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + lin=dict(argstr='-lin', ), + lin3=dict(argstr='-3lin %d', ), + med=dict(argstr='-med', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + osf=dict(argstr='-osf', ), + out_file=dict( + argstr='-prefix %s', + genfile=True, + name_source='in_file', + name_template='%s_smooth', + ), + outputtype=dict(), + ) + inputs = TSmooth.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TSmooth_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TSmooth.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value From 8d76fd2f44af0c94b40a5ef7228f048ca02924d7 Mon Sep 17 00:00:00 2001 From: Gio at UMCU Date: Thu, 5 Sep 2019 10:00:17 +0200 Subject: [PATCH 0364/1665] remove genfile from out_file of TSmooth and fix order of arguments in command line of 3dTsmooth --- nipype/interfaces/afni/preprocess.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 12aefcc2a8..7b7d252c9f 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -2865,8 +2865,7 @@ class TSmoothInputSpec(AFNICommandInputSpec): name_template='%s_smooth', desc='output file from 3dTSmooth', argstr='-prefix %s', - name_source='in_file', - genfile=True) + name_source='in_file') datum = traits.Str( desc='Sets the data type of the output dataset', argstr='-datum %s') @@ -2918,7 +2917,7 @@ class TSmooth(AFNICommand): >>> smooth.inputs.in_file = 'functional.nii' >>> smooth.inputs.adaptive = 5 >>> smooth.cmdline - '3dTsmooth -prefix functional_smooth -adaptive 5 functional.nii' + '3dTsmooth -adaptive 5 -prefix functional_smooth functional.nii' >>> res = smooth.run() # doctest: +SKIP """ From 0fc9b8863a735aa5b80794c83fa40df053158c7b Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Fri, 6 Sep 2019 09:29:57 -0400 Subject: [PATCH 0365/1665] Remove versioning from loadpkl --- nipype/utils/filemanip.py | 7 ++----- nipype/utils/tests/test_filemanip.py | 9 ++++----- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 26859196fc..4f04b73a1e 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -670,12 +670,12 @@ def load_json(filename): def loadcrash(infile, *args): if infile.endswith('pkl') or infile.endswith('pklz'): - return loadpkl(infile, versioning=True) + return loadpkl(infile) else: raise ValueError('Only pickled crashfiles are supported') -def loadpkl(infile, versioning=False): +def loadpkl(infile): """Load a zipped or plain cPickled file.""" infile = Path(infile) fmlogger.debug('Loading pkl: %s', infile) @@ -700,9 +700,6 @@ def loadpkl(infile, versioning=False): fmlogger.info('Successfully loaded pkl in compatibility mode.') # Unpickling problems except Exception as e: - if not versioning: - raise e - if pkl_metadata and 'version' in pkl_metadata: from nipype import __version__ as version if pkl_metadata['version'] != version: diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index ecfd477504..4ec78e1984 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -558,7 +558,7 @@ def test_versioned_pklization(tmpdir): with mock.patch('nipype.utils.tests.test_filemanip.Pickled', PickledBreaker), \ mock.patch('nipype.__version__', '0.0.0'): - loadpkl('./pickled.pkz', versioning=True) + loadpkl('./pickled.pkz') def test_unversioned_pklization(tmpdir): @@ -569,7 +569,7 @@ def test_unversioned_pklization(tmpdir): with pytest.raises(Exception): with mock.patch('nipype.utils.tests.test_filemanip.Pickled', PickledBreaker): - loadpkl('./pickled.pkz', versioning=True) + loadpkl('./pickled.pkz') def test_Path_strict_resolve(tmpdir): @@ -589,11 +589,10 @@ def test_Path_strict_resolve(tmpdir): assert '%s/somefile.txt' % tmpdir == '%s' % testfile.resolve(strict=True) -@pytest.mark.parametrize("load_versioning", [True, False]) @pytest.mark.parametrize("save_versioning", [True, False]) -def test_pickle(tmp_path, save_versioning, load_versioning): +def test_pickle(tmp_path, save_versioning): testobj = 'iamateststr' pickle_fname = str(tmp_path / 'testpickle.pklz') savepkl(pickle_fname, testobj, versioning=save_versioning) - outobj = loadpkl(pickle_fname, versioning=load_versioning) + outobj = loadpkl(pickle_fname) assert outobj == testobj From 56e571532dc1470e63530d8a7e760db59d130ebd Mon Sep 17 00:00:00 2001 From: Gio at UMCU Date: Fri, 6 Sep 2019 16:20:34 +0200 Subject: [PATCH 0366/1665] added auto test for 3dTsmooth (without removing other files) --- nipype/interfaces/afni/tests/test_auto_TSmooth.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/afni/tests/test_auto_TSmooth.py b/nipype/interfaces/afni/tests/test_auto_TSmooth.py index 9aa6eebe7f..cbcc871bc7 100644 --- a/nipype/interfaces/afni/tests/test_auto_TSmooth.py +++ b/nipype/interfaces/afni/tests/test_auto_TSmooth.py @@ -8,7 +8,10 @@ def test_TSmooth_inputs(): adaptive=dict(argstr='-adaptive %d', ), args=dict(argstr='%s', ), blackman=dict(argstr='-blackman %d', ), - custom=dict(argstr='-custom %s', ), + custom=dict( + argstr='-custom %s', + extensions=None, + ), datum=dict(argstr='-datum %s', ), environ=dict( nohash=True, @@ -18,6 +21,7 @@ def test_TSmooth_inputs(): in_file=dict( argstr='%s', copyfile=False, + extensions=None, mandatory=True, position=-1, ), @@ -31,7 +35,7 @@ def test_TSmooth_inputs(): osf=dict(argstr='-osf', ), out_file=dict( argstr='-prefix %s', - genfile=True, + extensions=None, name_source='in_file', name_template='%s_smooth', ), @@ -43,7 +47,7 @@ def test_TSmooth_inputs(): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TSmooth_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(extensions=None, ), ) outputs = TSmooth.output_spec() for key, metadata in list(output_map.items()): From 19ce2c13c7d2244129f4e69464539270c0ea919c Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 5 Sep 2019 09:21:31 -0700 Subject: [PATCH 0367/1665] fix(loadpkl): ensure a value is returned and it is not None Ref.: https://github.com/nipy/nipype/issues/3014#issuecomment-528408995 --- nipype/utils/filemanip.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 4f04b73a1e..6e6fb83623 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -682,6 +682,7 @@ def loadpkl(infile): pklopen = gzip.open if infile.suffix == '.pklz' else open pkl_metadata = None + unpkl = None with indirectory(infile.parent): pkl_file = pklopen(infile.name, 'rb') @@ -711,11 +712,14 @@ def loadpkl(infile): No metadata was found in the pkl file. Make sure you are currently using \ the same Nipype version from the generated pkl.""") raise e - else: - return unpkl finally: pkl_file.close() + if unpkl is None: + raise ValueError('Loading %s resulted in None.' % infile) + + return unpkl + def crash2txt(filename, record): """ Write out plain text crash file """ From 6b996c0e2c06dd4e6220bf9d1591101e75552b78 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 5 Sep 2019 10:55:09 -0700 Subject: [PATCH 0368/1665] rel(1.2.2): Update changelog --- doc/changelog/1.X.X-changelog | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index 9ef4474bf1..2d749b758b 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -1,3 +1,13 @@ +1.2.2 (September 05, 2019) +========================== + + * FIX: Update mne.WatershedBEM command line (https://github.com/nipy/nipype/pull/3007) + * FIX: Specify correct stop criterion flag in PETPVC (https://github.com/nipy/nipype/pull/3010) + * ENH: Additional arguments to ANTs N4BiasFieldCorrection (https://github.com/nipy/nipype/pull/3012) + * ENH: Add ``--rescale-intensities`` and name_source to N4BiasFieldCorrection (https://github.com/nipy/nipype/pull/3011) + * ENH: Add index_mask_file input to ImageStats (https://github.com/nipy/nipype/pull/3005) + * MAINT: Add ``python_requires`` to package metadata (https://github.com/nipy/nipype/pull/3006) + 1.2.1 (August 19, 2019) ======================= From 5f028b3e6c87ceaba8e592718b5c54b8d2aa952d Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 5 Sep 2019 11:05:07 -0700 Subject: [PATCH 0369/1665] rel(1.2.2): Update .mailmap --- .mailmap | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index 259fbe170d..2ffec2dcef 100644 --- a/.mailmap +++ b/.mailmap @@ -151,6 +151,7 @@ Michael Waskom mwaskom Michael Waskom mwaskom Michael Waskom mwaskom Miguel Molina-Romero Miguel Molina +Murat Bilgel Murat Bilgel Oliver Contier oliver-contier Oscar Esteban Oscar Esteban Oscar Esteban oesteban @@ -159,7 +160,7 @@ Pablo Polosecki pipolose psharp1289 Ranjit Khanuja RanjitK Rastko Ćirić rciric -Rastko Ćirić Rastko Ćirić +Rastko Ćirić Rastko Ćirić rciric Ross Markello Ross Markello Russell Poldrack Russ Poldrack Russell Poldrack poldrack @@ -182,7 +183,9 @@ Siqi Liu sql Steven Giavasis Steven Giavasis Steven Giavasis sgiavasis Steven Giavasis sgiavasis +Steven Tilley Steven Tilley Tristan Glatard Tristan Glatard +Victor Férat Victor Victor Ferat Victor Férat Victor Saase vsaase Weijie Huang forwho William Triplett William Triplett From 3428c8a45ecf060f6105ebf32ba6e7d2219b2061 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 5 Sep 2019 11:36:53 -0700 Subject: [PATCH 0370/1665] rel(1.2.2): Update ``.zenodo.json`` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Output of ``python tools/update_zenodo.py``: ``` No entry to sort: Victor Férat No entry to sort: Martin No entry to sort: Isaiah Norton No entry to sort: Niklas Förster No entry to sort: daniel glen No entry to sort: rciric No entry to sort: Kirstie Whitaker No entry to sort: hstojic No entry to sort: Gio Piantoni No entry to sort: Jonathan R. Williford No entry to sort: Hao No entry to sort: Fred Mertz No entry to sort: Murat Bilgel No entry to sort: Kevin Sitek No entry to sort: Ami Tsuchida No entry to sort: Abel González No entry to sort: Michiel Cottaar No entry to sort: Daniel Brenner ``` --- .zenodo.json | 71 +++++++++++++++++++++++++--------------------------- 1 file changed, 34 insertions(+), 37 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index bf1cb6f692..c1567cf65d 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -78,10 +78,6 @@ "affiliation": "Shattuck Lab, UCLA Brain Mapping Center", "name": "Wong, Jason" }, - { - "affiliation": "Concordia University", - "name": "Benderoff, Erin" - }, { "name": "Modat, Marc" }, @@ -150,6 +146,10 @@ { "name": "Berleant, Shoshana" }, + { + "affiliation": "Concordia University", + "name": "Benderoff, Erin" + }, { "affiliation": "Institute for Biomedical Engineering, ETH and University of Zurich", "name": "Christian, Horea", @@ -355,15 +355,15 @@ { "name": "Erickson, Drew" }, - { - "affiliation": "Child Mind Institute", - "name": "Giavasis, Steven" - }, { "affiliation": "NIMH IRP", "name": "Lee, John A.", "orcid": "0000-0001-5884-4247" }, + { + "affiliation": "Child Mind Institute", + "name": "Giavasis, Steven" + }, { "name": "Correa, Carlos" }, @@ -381,9 +381,6 @@ { "name": "Millman, Jarrod" }, - { - "name": "Lai, Jeff" - }, { "name": "Zhou, Dale" }, @@ -400,16 +397,16 @@ "affiliation": "The University of Sydney", "name": "Liu, Siqi" }, - { - "affiliation": "Leibniz Institute for Neurobiology", - "name": "Stadler, J\u00f6rg", - "orcid": "0000-0003-4313-129X" - }, { "affiliation": "University of Pennsylvania", "name": "Kahn, Ari E.", "orcid": "0000-0002-2127-0507" }, + { + "affiliation": "Leibniz Institute for Neurobiology", + "name": "Stadler, J\u00f6rg", + "orcid": "0000-0003-4313-129X" + }, { "affiliation": "University College London", "name": "P\u00e9rez-Garc\u00eda, Fernando", @@ -432,6 +429,11 @@ { "name": "Hallquist, Michael" }, + { + "affiliation": "Holland Bloorview Kids Rehabilitation Hospital", + "name": "Tilley II, Steven", + "orcid": "0000-0003-4853-5082" + }, { "affiliation": "Yale University; New Haven, CT, United States", "name": "Sisk, Lucinda M.", @@ -502,31 +504,26 @@ "name": "Perkins, L. Nathan" }, { - "affiliation": "University of Amsterdam", - "name": "Lukas Snoek", - "orcid": "0000-0001-8972-204X" + "name": "Marina, Ana" }, { - "affiliation": "Institute of Imaging & Computer Vision, RWTH Aachen University, Germany", - "name": "Weninger, Leon" + "name": "Mattfeld, Aaron" }, { - "affiliation": "Stanford University", - "name": "Lerma-Usabiaga, Garikoitz", - "orcid": "0000-0001-9800-4816" + "name": "Noel, Maxime" }, { - "name": "Marina, Ana" + "affiliation": "University of Amsterdam", + "name": "Lukas Snoek", + "orcid": "0000-0001-8972-204X" }, { - "name": "Mattfeld, Aaron" + "affiliation": "Institute of Imaging & Computer Vision, RWTH Aachen University, Germany", + "name": "Weninger, Leon" }, { "name": "Matsubara, K" }, - { - "name": "Noel, Maxime" - }, { "name": "Cheung, Brian" }, @@ -573,9 +570,6 @@ { "name": "Weinstein, Alejandro" }, - { - "name": "Tambini, Arielle" - }, { "affiliation": "Duke University", "name": "Broderick, William", @@ -612,6 +606,9 @@ { "name": "Tarbert, Claire" }, + { + "name": "Tambini, Arielle" + }, { "name": "Nickson, Thomas" }, @@ -636,6 +633,11 @@ { "name": "Flandin, Guillaume" }, + { + "affiliation": "Stanford University", + "name": "Lerma-Usabiaga, Garikoitz", + "orcid": "0000-0001-9800-4816" + }, { "affiliation": "Vrije Universiteit Amsterdam", "name": "Ort, Eduard" @@ -695,11 +697,6 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" - }, - { - "affiliation": "Holland Bloorview Kids Rehabilitation Hospital", - "name": "Tilley II, Steven", - "orcid": "0000-0003-4853-5082" } ], "keywords": [ From 968a76f0887996da853542ec5baed62cce1348f0 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 5 Sep 2019 11:39:48 -0700 Subject: [PATCH 0371/1665] rel(1.2.2): Update version in ``nipype/info.py`` and ``doc/conf.py`` --- doc/conf.py | 2 +- nipype/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 2a253eb7aa..bdf938a70c 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -82,7 +82,7 @@ # The short X.Y version. version = nipype.__version__ # The full version, including alpha/beta/rc tags. -release = "1.2.1" +release = "1.2.2" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/nipype/info.py b/nipype/info.py index 827bc93a12..7c13e94418 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -9,7 +9,7 @@ # nipype version information # Remove -dev for release -__version__ = '1.2.2-dev' +__version__ = '1.2.2' def get_nipype_gitversion(): From 7df091d2689700801aeeb7ed75e9e7515c2906d9 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 5 Sep 2019 12:15:02 -0700 Subject: [PATCH 0372/1665] rel(1.2.2): revise problems spotted by @effigies' --- .mailmap | 7 +++++-- .zenodo.json | 3 +++ doc/changelog/1.X.X-changelog | 2 ++ tools/update_zenodo.py | 1 + 4 files changed, 11 insertions(+), 2 deletions(-) diff --git a/.mailmap b/.mailmap index 2ffec2dcef..946919557c 100644 --- a/.mailmap +++ b/.mailmap @@ -160,7 +160,8 @@ Pablo Polosecki pipolose psharp1289 Ranjit Khanuja RanjitK Rastko Ćirić rciric -Rastko Ćirić Rastko Ćirić rciric +Rastko Ćirić Rastko Ćirić +Rastko Ćirić rciric Ross Markello Ross Markello Russell Poldrack Russ Poldrack Russell Poldrack poldrack @@ -185,7 +186,9 @@ Steven Giavasis sgiavasis Steven Giavasis sgiavasis Steven Tilley Steven Tilley Tristan Glatard Tristan Glatard -Victor Férat Victor Victor Ferat Victor Férat +Victor Férat Victor +Victor Férat Victor Ferat +Victor Férat Victor Férat Victor Saase vsaase Weijie Huang forwho William Triplett William Triplett diff --git a/.zenodo.json b/.zenodo.json index c1567cf65d..8d72952ade 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -693,6 +693,9 @@ "name": "Mihai, Paul Glad", "orcid": "0000-0001-5715-6442" }, + { + "name": "Lai, Jeff" + }, { "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index 2d749b758b..843fe5a209 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -1,6 +1,8 @@ 1.2.2 (September 05, 2019) ========================== +##### [Full changelog](https://github.com/nipy/nipype/milestone/33?closed=1) + * FIX: Update mne.WatershedBEM command line (https://github.com/nipy/nipype/pull/3007) * FIX: Specify correct stop criterion flag in PETPVC (https://github.com/nipy/nipype/pull/3010) * ENH: Additional arguments to ANTs N4BiasFieldCorrection (https://github.com/nipy/nipype/pull/3012) diff --git a/tools/update_zenodo.py b/tools/update_zenodo.py index 637defad49..163f88084d 100755 --- a/tools/update_zenodo.py +++ b/tools/update_zenodo.py @@ -58,6 +58,7 @@ "Leipzig, Germany.", "name": "Mihai, Paul Glad", "orcid": "0000-0001-5715-6442"}, + {"name": "Lai, Jeff"} ] for entry in missing_entries: From 4ac9ecd68ed938a97de400507396e248ffcbf75c Mon Sep 17 00:00:00 2001 From: Murat Bilgel Date: Thu, 5 Sep 2019 15:17:31 -0400 Subject: [PATCH 0373/1665] rel(1.2.2): Add @bilgelm to ``.zenodo.json``, cherry-picking e520a8832 --- .zenodo.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index 8d72952ade..f0ae25659e 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -592,6 +592,11 @@ { "name": "Khanuja, Ranjeet" }, + { + "affiliation": "National Institute on Aging, Baltimore, MD, USA", + "name": "Bilgel, Murat", + "orcid": "0000-0001-5042-7422" + }, { "name": "Schlamp, Kai" }, From 875c75c907bb89ed4d908c1a5da15d257b5e5a23 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 5 Sep 2019 23:50:58 -0700 Subject: [PATCH 0374/1665] rel(1.2.2): update .zenodo.json --- .zenodo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index f0ae25659e..1850eec791 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -390,6 +390,11 @@ { "name": "Haselgrove, Christian" }, + { + "affiliation": "Holland Bloorview Kids Rehabilitation Hospital", + "name": "Tilley II, Steven", + "orcid": "0000-0003-4853-5082" + }, { "name": "Renfro, Mandy" }, @@ -429,11 +434,6 @@ { "name": "Hallquist, Michael" }, - { - "affiliation": "Holland Bloorview Kids Rehabilitation Hospital", - "name": "Tilley II, Steven", - "orcid": "0000-0003-4853-5082" - }, { "affiliation": "Yale University; New Haven, CT, United States", "name": "Sisk, Lucinda M.", From b61585f733ee6cadc699648059c70a9351e1baec Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 5 Sep 2019 23:54:21 -0700 Subject: [PATCH 0375/1665] rel(1.2.2): Update changelog --- doc/changelog/1.X.X-changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index 843fe5a209..eae851279a 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -3,6 +3,7 @@ ##### [Full changelog](https://github.com/nipy/nipype/milestone/33?closed=1) + * FIX: ``loadpkl`` failed when pklz file contained versioning info (https://github.com/nipy/nipype/pull/3017) * FIX: Update mne.WatershedBEM command line (https://github.com/nipy/nipype/pull/3007) * FIX: Specify correct stop criterion flag in PETPVC (https://github.com/nipy/nipype/pull/3010) * ENH: Additional arguments to ANTs N4BiasFieldCorrection (https://github.com/nipy/nipype/pull/3012) From 5f8be53fa72475e8371b48e548fdedf01a01e3b3 Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 6 Sep 2019 14:41:06 -0700 Subject: [PATCH 0376/1665] rel(1.2.2): Update changelog --- doc/changelog/1.X.X-changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index eae851279a..014c1485d0 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -3,6 +3,7 @@ ##### [Full changelog](https://github.com/nipy/nipype/milestone/33?closed=1) + * FIX: Ensure ``loadpkl`` returns a not None value (https://github.com/nipy/nipype/pull/3020) * FIX: ``loadpkl`` failed when pklz file contained versioning info (https://github.com/nipy/nipype/pull/3017) * FIX: Update mne.WatershedBEM command line (https://github.com/nipy/nipype/pull/3007) * FIX: Specify correct stop criterion flag in PETPVC (https://github.com/nipy/nipype/pull/3010) From 4b69cfcfa3d7a4b4f6399294da3fbff880016764 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 7 Sep 2019 15:35:38 -0400 Subject: [PATCH 0377/1665] DOC: Update changelog --- doc/changelog/1.X.X-changelog | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index 014c1485d0..719752561e 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -1,4 +1,4 @@ -1.2.2 (September 05, 2019) +1.2.2 (September 07, 2019) ========================== ##### [Full changelog](https://github.com/nipy/nipype/milestone/33?closed=1) @@ -7,9 +7,11 @@ * FIX: ``loadpkl`` failed when pklz file contained versioning info (https://github.com/nipy/nipype/pull/3017) * FIX: Update mne.WatershedBEM command line (https://github.com/nipy/nipype/pull/3007) * FIX: Specify correct stop criterion flag in PETPVC (https://github.com/nipy/nipype/pull/3010) + * ENH: add interface for AFNI 3dTsmooth (https://github.com/nipy/nipype/pull/2948) * ENH: Additional arguments to ANTs N4BiasFieldCorrection (https://github.com/nipy/nipype/pull/3012) * ENH: Add ``--rescale-intensities`` and name_source to N4BiasFieldCorrection (https://github.com/nipy/nipype/pull/3011) * ENH: Add index_mask_file input to ImageStats (https://github.com/nipy/nipype/pull/3005) + * RF: Remove versioning from loadpkl (https://github.com/nipy/nipype/pull/3019) * MAINT: Add ``python_requires`` to package metadata (https://github.com/nipy/nipype/pull/3006) 1.2.1 (August 19, 2019) From 1faaa3f2d8ab0b93f50ae9d849f6ea29f69337a1 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 7 Sep 2019 15:38:55 -0400 Subject: [PATCH 0378/1665] MAINT: Bump dev version --- doc/documentation.rst | 2 +- nipype/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/documentation.rst b/doc/documentation.rst index ab33e90904..b285811531 100644 --- a/doc/documentation.rst +++ b/doc/documentation.rst @@ -9,7 +9,7 @@ Documentation :Release: |version| :Date: |today| -Previous versions: `1.2.1 `_ `1.2.0 `_ +Previous versions: `1.2.2 `_ `1.2.1 `_ .. container:: doc2 diff --git a/nipype/info.py b/nipype/info.py index 7c13e94418..951c93c528 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -9,7 +9,7 @@ # nipype version information # Remove -dev for release -__version__ = '1.2.2' +__version__ = '1.2.3-dev' def get_nipype_gitversion(): From 9cf76220b6ce776a2f40ce4a86dff9672cc43ea7 Mon Sep 17 00:00:00 2001 From: oesteban Date: Sat, 7 Sep 2019 16:17:10 -0700 Subject: [PATCH 0379/1665] rel(1.2.2): Update changelog --- doc/changelog/1.X.X-changelog | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index 719752561e..71363170df 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -7,11 +7,11 @@ * FIX: ``loadpkl`` failed when pklz file contained versioning info (https://github.com/nipy/nipype/pull/3017) * FIX: Update mne.WatershedBEM command line (https://github.com/nipy/nipype/pull/3007) * FIX: Specify correct stop criterion flag in PETPVC (https://github.com/nipy/nipype/pull/3010) - * ENH: add interface for AFNI 3dTsmooth (https://github.com/nipy/nipype/pull/2948) + * ENH: Add interface for AFNI ``3dTsmooth`` (https://github.com/nipy/nipype/pull/2948) * ENH: Additional arguments to ANTs N4BiasFieldCorrection (https://github.com/nipy/nipype/pull/3012) * ENH: Add ``--rescale-intensities`` and name_source to N4BiasFieldCorrection (https://github.com/nipy/nipype/pull/3011) * ENH: Add index_mask_file input to ImageStats (https://github.com/nipy/nipype/pull/3005) - * RF: Remove versioning from loadpkl (https://github.com/nipy/nipype/pull/3019) + * RF: Remove versioning from ``loadpkl`` (https://github.com/nipy/nipype/pull/3019) * MAINT: Add ``python_requires`` to package metadata (https://github.com/nipy/nipype/pull/3006) 1.2.1 (August 19, 2019) From 51fb423c11e792992cfbf9ee5a048c362add6221 Mon Sep 17 00:00:00 2001 From: oesteban Date: Sat, 7 Sep 2019 16:22:08 -0700 Subject: [PATCH 0380/1665] rel(1.2.2): Update .zenodo.json [skip ci] --- .zenodo.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 1850eec791..06bd8a4ebc 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -293,11 +293,11 @@ "orcid": "0000-0002-7796-8795" }, { - "name": "Heinsfeld, Anibal S\u00f3lon", - "orcid": "0000-0002-2050-0614" + "name": "Kent, James" }, { - "name": "Kent, James" + "name": "Heinsfeld, Anibal S\u00f3lon", + "orcid": "0000-0002-2050-0614" }, { "name": "Watanabe, Aimi" From 651a042029ae1e67230dc8dd9c0f6d1451341f96 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Tue, 10 Sep 2019 14:17:55 -0400 Subject: [PATCH 0381/1665] enh: replace portalocker with filelock and use locking for reading/writing pickled files --- nipype/algorithms/misc.py | 13 +-- nipype/external/cloghandler.py | 23 ++---- nipype/external/portalocker.py | 145 --------------------------------- nipype/utils/config.py | 22 ++--- nipype/utils/filemanip.py | 85 +++++++++---------- requirements.txt | 1 + 6 files changed, 64 insertions(+), 225 deletions(-) delete mode 100644 nipype/external/portalocker.py diff --git a/nipype/algorithms/misc.py b/nipype/algorithms/misc.py index a2eaad3610..7e0305aa36 100644 --- a/nipype/algorithms/misc.py +++ b/nipype/algorithms/misc.py @@ -799,11 +799,11 @@ def _run_interface(self, runtime): '(http://pandas.pydata.org/) to run.'), e) try: - import lockfile as pl + from filelock import SoftFileLock self._have_lock = True except ImportError: from warnings import warn - warn(('Python module lockfile was not found: AddCSVRow will not be' + warn(('Python module filelock was not found: AddCSVRow will not be' ' thread-safe in multi-processor execution')) input_dict = {} @@ -822,7 +822,7 @@ def _run_interface(self, runtime): df = pd.DataFrame([input_dict]) if self._have_lock: - self._lock = pl.FileLock(self.inputs.in_file) + self._lock = SoftFileLock(self.inputs.in_file + '.lock') # Acquire lock self._lock.acquire() @@ -837,13 +837,6 @@ def _run_interface(self, runtime): if self._have_lock: self._lock.release() - # Using nipype.external.portalocker this might be something like: - # with pl.Lock(self.inputs.in_file, timeout=1) as fh: - # if op.exists(fh): - # formerdf = pd.read_csv(fh, index_col=0) - # df = pd.concat([formerdf, df], ignore_index=True) - # df.to_csv(fh) - return runtime def _list_outputs(self): diff --git a/nipype/external/cloghandler.py b/nipype/external/cloghandler.py index 5fda934c84..d99435b94f 100644 --- a/nipype/external/cloghandler.py +++ b/nipype/external/cloghandler.py @@ -36,10 +36,6 @@ testing, performance was more than adequate, but if you need a high-volume or low-latency solution, I suggest you look elsewhere. -This module currently only support the 'nt' and 'posix' platforms due to the -usage of the portalocker module. I do not have access to any other platforms -for testing, patches are welcome. - See the README file for an example usage of this module. """ @@ -63,13 +59,7 @@ except ImportError: codecs = None -# Question/TODO: Should we have a fallback mode if we can't load portalocker / -# we should still be better off than with the standard RotattingFileHandler -# class, right? We do some rename checking... that should prevent some file -# clobbering that the builtin class allows. - -# sibling module than handles all the ugly platform-specific details of file locking -from .portalocker import lock, unlock, LOCK_EX, LOCK_NB, LockException +from filelock import SoftFileLock # A client can set this to true to automatically convert relative paths to # absolute paths (which will also hide the absolute path warnings) @@ -168,11 +158,8 @@ def __init__(self, self.maxBytes = maxBytes self.backupCount = backupCount # Prevent multiple extensions on the lock file (Only handles the normal "*.log" case.) - if filename.endswith(".log"): - lock_file = filename[:-4] - else: - lock_file = filename - self.stream_lock = open(lock_file + ".lock", "w") + self.lock_file = filename + '.lock' + self.stream_lock = SoftFileLock(self.lock_file) # For debug mode, swap out the "_degrade()" method with a more a verbose one. if debug: @@ -189,7 +176,7 @@ def acquire(self): in 'degraded' mode. """ # handle thread lock Handler.acquire(self) - lock(self.stream_lock, LOCK_EX) + self.stream_lock.acquire() if self.stream.closed: self._openFile(self.mode) @@ -206,7 +193,7 @@ def release(self): self.stream.close() finally: try: - unlock(self.stream_lock) + self.stream_lock.release() finally: # release thread lock Handler.release(self) diff --git a/nipype/external/portalocker.py b/nipype/external/portalocker.py deleted file mode 100644 index 1da24d894c..0000000000 --- a/nipype/external/portalocker.py +++ /dev/null @@ -1,145 +0,0 @@ -# -*- coding: utf-8 -*- -# portalocker.py - Cross-platform (posix/nt) API for flock-style file locking. -# Requires python 1.5.2 or better. -'''Cross-platform (posix/nt) API for flock-style file locking. - -Synopsis: - - import portalocker - file = open('somefile', 'r+') - portalocker.lock(file, portalocker.LOCK_EX) - file.seek(12) - file.write('foo') - file.close() - -If you know what you're doing, you may choose to - - portalocker.unlock(file) - -before closing the file, but why? - -Methods: - - lock( file, flags ) - unlock( file ) - -Constants: - - LOCK_EX - LOCK_SH - LOCK_NB - -Exceptions: - - LockException - -Notes: - -For the 'nt' platform, this module requires the Python Extensions for Windows. -Be aware that this may not work as expected on Windows 95/98/ME. - -History: - -I learned the win32 technique for locking files from sample code -provided by John Nielsen in the documentation -that accompanies the win32 modules. - -Author: Jonathan Feinberg , - Lowell Alleman -Version: $Id: portalocker.py 5474 2008-05-16 20:53:50Z lowell $ - -''' -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - -__all__ = [ - 'lock', - 'unlock', - 'LOCK_EX', - 'LOCK_SH', - 'LOCK_NB', - 'LockException', -] - -import os - - -class LockException(Exception): - # Error codes: - LOCK_FAILED = 1 - - -if os.name == 'nt': - import win32con - import win32file - import pywintypes - LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK - LOCK_SH = 0 # the default - LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY - # is there any reason not to reuse the following structure? - __overlapped = pywintypes.OVERLAPPED() -elif os.name == 'posix': - import fcntl - LOCK_EX = fcntl.LOCK_EX - LOCK_SH = fcntl.LOCK_SH - LOCK_NB = fcntl.LOCK_NB -else: - raise RuntimeError('PortaLocker only defined for nt and posix platforms') - -if os.name == 'nt': - - def lock(file, flags): - hfile = win32file._get_osfhandle(file.fileno()) - try: - win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped) - except pywintypes.error as exc_value: - # error: (33, 'LockFileEx', 'The process cannot access the file - # because another process has locked a portion of the file.') - if exc_value[0] == 33: - raise LockException(LockException.LOCK_FAILED, exc_value[2]) - else: - # Q: Are there exceptions/codes we should be dealing with here? - raise - - def unlock(file): - hfile = win32file._get_osfhandle(file.fileno()) - try: - win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped) - except pywintypes.error as exc_value: - if exc_value[0] == 158: - # error: (158, 'UnlockFileEx', 'The segment is already - # unlocked.') To match the 'posix' implementation, silently - # ignore this error - pass - else: - # Q: Are there exceptions/codes we should be dealing with here? - raise - -elif os.name == 'posix': - - def lock(file, flags): - try: - fcntl.flock(file.fileno(), flags) - except IOError as exc_value: - # The exception code varies on different systems so we'll catch - # every IO error - raise LockException(*exc_value) - - def unlock(file): - fcntl.flock(file.fileno(), fcntl.LOCK_UN) - - -if __name__ == '__main__': - from time import time, strftime, localtime - import sys - from . import portalocker - - log = open('log.txt', 'a+') - portalocker.lock(log, portalocker.LOCK_EX) - timestamp = strftime('%m/%d/%Y %H:%M:%S\n', localtime(time())) - log.write(timestamp) - - print('Wrote lines. Hit enter to release lock.') - dummy = sys.stdin.readline() - log.close() diff --git a/nipype/utils/config.py b/nipype/utils/config.py index 79c0bf6b51..2fd56f11a7 100644 --- a/nipype/utils/config.py +++ b/nipype/utils/config.py @@ -25,7 +25,7 @@ from future import standard_library from .misc import str2bool -from ..external import portalocker +from filelock import SoftFileLock standard_library.install_aliases() @@ -209,9 +209,9 @@ def get_data(self, key): """Read options file""" if not os.path.exists(self.data_file): return None - with open(self.data_file, 'rt') as file: - portalocker.lock(file, portalocker.LOCK_EX) - datadict = load(file) + with SoftFileLock(self.data_file + '.lock'): + with open(self.data_file, 'rt') as file: + datadict = load(file) if key in datadict: return datadict[key] return None @@ -220,17 +220,17 @@ def save_data(self, key, value): """Store config flie""" datadict = {} if os.path.exists(self.data_file): - with open(self.data_file, 'rt') as file: - portalocker.lock(file, portalocker.LOCK_EX) - datadict = load(file) + with SoftFileLock(self.data_file + '.lock'): + with open(self.data_file, 'rt') as file: + datadict = load(file) else: dirname = os.path.dirname(self.data_file) if not os.path.exists(dirname): mkdir_p(dirname) - with open(self.data_file, 'wt') as file: - portalocker.lock(file, portalocker.LOCK_EX) - datadict[key] = value - dump(datadict, file) + with SoftFileLock(self.data_file + '.lock'): + with open(self.data_file, 'wt') as file: + datadict[key] = value + dump(datadict, file) def update_config(self, config_dict): """Extend internal dictionary with config_dict""" diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 6e6fb83623..51373ebc0e 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -21,6 +21,7 @@ import contextlib import posixpath import simplejson as json +from filelock import SoftFileLock from builtins import str, bytes, open @@ -684,36 +685,37 @@ def loadpkl(infile): unpkl = None with indirectory(infile.parent): - pkl_file = pklopen(infile.name, 'rb') - - try: # Look if pkl file contains version file - pkl_metadata_line = pkl_file.readline() - pkl_metadata = json.loads(pkl_metadata_line) - except (UnicodeDecodeError, json.JSONDecodeError): - # Could not get version info - pkl_file.seek(0) - - try: - unpkl = pickle.load(pkl_file) - except UnicodeDecodeError: - # Was this pickle created with Python 2.x? - unpkl = pickle.load(pkl_file, fix_imports=True, encoding='utf-8') - fmlogger.info('Successfully loaded pkl in compatibility mode.') - # Unpickling problems - except Exception as e: - if pkl_metadata and 'version' in pkl_metadata: - from nipype import __version__ as version - if pkl_metadata['version'] != version: - fmlogger.error("""\ -Attempted to open a results file generated by Nipype version %s, \ -with an incompatible Nipype version (%s)""", pkl_metadata['version'], version) - raise e - fmlogger.error("""\ -No metadata was found in the pkl file. Make sure you are currently using \ -the same Nipype version from the generated pkl.""") - raise e - finally: - pkl_file.close() + with SoftFileLock(infile.name + '.lock'): + pkl_file = pklopen(infile.name, 'rb') + + try: # Look if pkl file contains version file + pkl_metadata_line = pkl_file.readline() + pkl_metadata = json.loads(pkl_metadata_line) + except (UnicodeDecodeError, json.JSONDecodeError): + # Could not get version info + pkl_file.seek(0) + + try: + unpkl = pickle.load(pkl_file) + except UnicodeDecodeError: + # Was this pickle created with Python 2.x? + unpkl = pickle.load(pkl_file, fix_imports=True, encoding='utf-8') + fmlogger.info('Successfully loaded pkl in compatibility mode.') + # Unpickling problems + except Exception as e: + if pkl_metadata and 'version' in pkl_metadata: + from nipype import __version__ as version + if pkl_metadata['version'] != version: + fmlogger.error("""\ + Attempted to open a results file generated by Nipype version %s, \ + with an incompatible Nipype version (%s)""", pkl_metadata['version'], version) + raise e + fmlogger.error("""\ + No metadata was found in the pkl file. Make sure you are currently using \ + the same Nipype version from the generated pkl.""") + raise e + finally: + pkl_file.close() if unpkl is None: raise ValueError('Loading %s resulted in None.' % infile) @@ -754,20 +756,21 @@ def read_stream(stream, logger=None, encoding=None): def savepkl(filename, record, versioning=False): - if filename.endswith('pklz'): - pkl_file = gzip.open(filename, 'wb') - else: - pkl_file = open(filename, 'wb') + with SoftFileLock(filename + '.lock'): + if filename.endswith('pklz'): + pkl_file = gzip.open(filename, 'wb') + else: + pkl_file = open(filename, 'wb') - if versioning: - from nipype import __version__ as version - metadata = json.dumps({'version': version}) + if versioning: + from nipype import __version__ as version + metadata = json.dumps({'version': version}) - pkl_file.write(metadata.encode('utf-8')) - pkl_file.write('\n'.encode('utf-8')) + pkl_file.write(metadata.encode('utf-8')) + pkl_file.write('\n'.encode('utf-8')) - pickle.dump(record, pkl_file) - pkl_file.close() + pickle.dump(record, pkl_file) + pkl_file.close() rst_levels = ['=', '-', '~', '+'] diff --git a/requirements.txt b/requirements.txt index 99b97a19a6..44226da48b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,3 +16,4 @@ python-dateutil>=2.2 scipy>=0.14 simplejson>=3.8.0 traits>=4.6 +filelock>= 3.0.0 From e1c792bd214d0f1d74cec23cba9fa1bd4f3187b7 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Tue, 10 Sep 2019 15:14:38 -0400 Subject: [PATCH 0382/1665] fix: add filelock to packages --- nipype/info.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/info.py b/nipype/info.py index 951c93c528..83ce1ca43d 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -164,6 +164,7 @@ def get_nipype_gitversion(): 'scipy>=%s,<%s ; python_version <= "3.4"' % (SCIPY_MIN_VERSION, SCIPY_MAX_VERSION_34), 'simplejson>=%s' % SIMPLEJSON_MIN_VERSION, 'traits>=%s,!=5.0' % TRAITS_MIN_VERSION, + 'filelock>=3.0.0' ] # neurdflib has to come after prov From 26b0eb63cad767e7dc4d831d37e71fcc9ced7450 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Tue, 10 Sep 2019 19:26:23 -0400 Subject: [PATCH 0383/1665] Apply suggestions from code review Co-Authored-By: Oscar Esteban --- nipype/algorithms/misc.py | 2 +- nipype/external/cloghandler.py | 2 +- nipype/utils/config.py | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/nipype/algorithms/misc.py b/nipype/algorithms/misc.py index 7e0305aa36..4fcd7c8d85 100644 --- a/nipype/algorithms/misc.py +++ b/nipype/algorithms/misc.py @@ -822,7 +822,7 @@ def _run_interface(self, runtime): df = pd.DataFrame([input_dict]) if self._have_lock: - self._lock = SoftFileLock(self.inputs.in_file + '.lock') + self._lock = SoftFileLock('%s.lock' % self.inputs.in_file) # Acquire lock self._lock.acquire() diff --git a/nipype/external/cloghandler.py b/nipype/external/cloghandler.py index d99435b94f..05e28968dd 100644 --- a/nipype/external/cloghandler.py +++ b/nipype/external/cloghandler.py @@ -158,7 +158,7 @@ def __init__(self, self.maxBytes = maxBytes self.backupCount = backupCount # Prevent multiple extensions on the lock file (Only handles the normal "*.log" case.) - self.lock_file = filename + '.lock' + self.lock_file = '%s.lock' % filename self.stream_lock = SoftFileLock(self.lock_file) # For debug mode, swap out the "_degrade()" method with a more a verbose one. diff --git a/nipype/utils/config.py b/nipype/utils/config.py index 2fd56f11a7..d6d6d0879d 100644 --- a/nipype/utils/config.py +++ b/nipype/utils/config.py @@ -209,7 +209,7 @@ def get_data(self, key): """Read options file""" if not os.path.exists(self.data_file): return None - with SoftFileLock(self.data_file + '.lock'): + with SoftFileLock('%s.lock' % self.data_file): with open(self.data_file, 'rt') as file: datadict = load(file) if key in datadict: @@ -220,14 +220,14 @@ def save_data(self, key, value): """Store config flie""" datadict = {} if os.path.exists(self.data_file): - with SoftFileLock(self.data_file + '.lock'): + with SoftFileLock('%s.lock' % self.data_file): with open(self.data_file, 'rt') as file: datadict = load(file) else: dirname = os.path.dirname(self.data_file) if not os.path.exists(dirname): mkdir_p(dirname) - with SoftFileLock(self.data_file + '.lock'): + with SoftFileLock('%s.lock' % self.data_file): with open(self.data_file, 'wt') as file: datadict[key] = value dump(datadict, file) From fdf12b759cebb46eccc5479fe10d648466bccd5f Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Tue, 10 Sep 2019 20:48:35 -0400 Subject: [PATCH 0384/1665] fix: address review comments --- nipype/utils/filemanip.py | 80 ++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 44 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 51373ebc0e..8fcfdc8beb 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -685,37 +685,33 @@ def loadpkl(infile): unpkl = None with indirectory(infile.parent): - with SoftFileLock(infile.name + '.lock'): - pkl_file = pklopen(infile.name, 'rb') - - try: # Look if pkl file contains version file - pkl_metadata_line = pkl_file.readline() - pkl_metadata = json.loads(pkl_metadata_line) - except (UnicodeDecodeError, json.JSONDecodeError): - # Could not get version info - pkl_file.seek(0) - - try: - unpkl = pickle.load(pkl_file) - except UnicodeDecodeError: - # Was this pickle created with Python 2.x? - unpkl = pickle.load(pkl_file, fix_imports=True, encoding='utf-8') - fmlogger.info('Successfully loaded pkl in compatibility mode.') - # Unpickling problems - except Exception as e: - if pkl_metadata and 'version' in pkl_metadata: - from nipype import __version__ as version - if pkl_metadata['version'] != version: - fmlogger.error("""\ - Attempted to open a results file generated by Nipype version %s, \ - with an incompatible Nipype version (%s)""", pkl_metadata['version'], version) - raise e - fmlogger.error("""\ - No metadata was found in the pkl file. Make sure you are currently using \ - the same Nipype version from the generated pkl.""") - raise e - finally: - pkl_file.close() + with SoftFileLock('%s.lock' % infile.name): + with pklopen(infile.name, 'rb') as pkl_file: + try: # Look if pkl file contains version file + pkl_metadata_line = pkl_file.readline() + pkl_metadata = json.loads(pkl_metadata_line) + except (UnicodeDecodeError, json.JSONDecodeError): + # Could not get version info + pkl_file.seek(0) + try: + unpkl = pickle.load(pkl_file) + except UnicodeDecodeError: + # Was this pickle created with Python 2.x? + unpkl = pickle.load(pkl_file, fix_imports=True, encoding='utf-8') + fmlogger.info('Successfully loaded pkl in compatibility mode.') + # Unpickling problems + except Exception as e: + if pkl_metadata and 'version' in pkl_metadata: + from nipype import __version__ as version + if pkl_metadata['version'] != version: + fmlogger.error("""\ +Attempted to open a results file generated by Nipype version %s, \ +with an incompatible Nipype version (%s)""", pkl_metadata['version'], version) + raise e + fmlogger.error("""\ +No metadata was found in the pkl file. Make sure you are currently using \ +the same Nipype version from the generated pkl.""") + raise e if unpkl is None: raise ValueError('Loading %s resulted in None.' % infile) @@ -756,21 +752,17 @@ def read_stream(stream, logger=None, encoding=None): def savepkl(filename, record, versioning=False): - with SoftFileLock(filename + '.lock'): - if filename.endswith('pklz'): - pkl_file = gzip.open(filename, 'wb') - else: - pkl_file = open(filename, 'wb') - - if versioning: - from nipype import __version__ as version - metadata = json.dumps({'version': version}) + pklopen = gzip.open if filename.endswith('.pklz') else open + with SoftFileLock('%s.lock' % filename): + with pklopen(filename, 'wb') as pkl_file: + if versioning: + from nipype import __version__ as version + metadata = json.dumps({'version': version}) - pkl_file.write(metadata.encode('utf-8')) - pkl_file.write('\n'.encode('utf-8')) + pkl_file.write(metadata.encode('utf-8')) + pkl_file.write('\n'.encode('utf-8')) - pickle.dump(record, pkl_file) - pkl_file.close() + pickle.dump(record, pkl_file) rst_levels = ['=', '-', '~', '+'] From c510d5e09618697301ff43d254ce2b6190a77aaf Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Mon, 9 Sep 2019 23:52:19 -0700 Subject: [PATCH 0385/1665] FIX: Disallow returning ``None`` in ``pipeline.utils.load_resultfile`` Prevents #3009 and #3014 from happening - although this might not solve those issues, this patch will help find their origin by making ``load_resultfile`` more strict (and letting it raise exceptions). The try .. except structure is moved to the only place is was being used within the Node code. --- nipype/pipeline/engine/nodes.py | 66 ++++++++++++++++++--------------- nipype/pipeline/engine/utils.py | 30 ++++----------- 2 files changed, 44 insertions(+), 52 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 2c441a5c57..57c566f890 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -195,7 +195,7 @@ def interface(self): def result(self): """Get result from result file (do not hold it in memory)""" return _load_resultfile( - op.join(self.output_dir(), 'result_%s.pklz' % self.name))[0] + op.join(self.output_dir(), 'result_%s.pklz' % self.name)) @property def inputs(self): @@ -518,7 +518,7 @@ def _get_inputs(self): logger.debug('input: %s', key) results_file = info[0] logger.debug('results file: %s', results_file) - outputs = _load_resultfile(results_file)[0].outputs + outputs = _load_resultfile(results_file).outputs if outputs is None: raise RuntimeError("""\ Error populating the input "%s" of node "%s": the results file of the source node \ @@ -565,34 +565,42 @@ def _run_interface(self, execute=True, updatehash=False): def _load_results(self): cwd = self.output_dir() - result, aggregate, attribute_error = _load_resultfile( - op.join(cwd, 'result_%s.pklz' % self.name)) + + try: + result = _load_resultfile( + op.join(cwd, 'result_%s.pklz' % self.name)) + except (traits.TraitError, EOFError): + logger.debug( + 'Error populating inputs/outputs, (re)aggregating results...') + except (AttributeError, ImportError) as err: + logger.debug('attribute error: %s probably using ' + 'different trait pickled file', str(err)) + old_inputs = loadpkl(op.join(cwd, '_inputs.pklz')) + self.inputs.trait_set(**old_inputs) + else: + return result + # try aggregating first - if aggregate: - logger.debug('aggregating results') - if attribute_error: - old_inputs = loadpkl(op.join(cwd, '_inputs.pklz')) - self.inputs.trait_set(**old_inputs) - if not isinstance(self, MapNode): - self._copyfiles_to_wd(linksonly=True) - aggouts = self._interface.aggregate_outputs( - needed_outputs=self.needed_outputs) - runtime = Bunch( - cwd=cwd, - returncode=0, - environ=dict(os.environ), - hostname=socket.gethostname()) - result = InterfaceResult( - interface=self._interface.__class__, - runtime=runtime, - inputs=self._interface.inputs.get_traitsfree(), - outputs=aggouts) - _save_resultfile( - result, cwd, self.name, - rebase=str2bool(self.config['execution']['use_relative_paths'])) - else: - logger.debug('aggregating mapnode results') - result = self._run_interface() + if not isinstance(self, MapNode): + self._copyfiles_to_wd(linksonly=True) + aggouts = self._interface.aggregate_outputs( + needed_outputs=self.needed_outputs) + runtime = Bunch( + cwd=cwd, + returncode=0, + environ=dict(os.environ), + hostname=socket.gethostname()) + result = InterfaceResult( + interface=self._interface.__class__, + runtime=runtime, + inputs=self._interface.inputs.get_traitsfree(), + outputs=aggouts) + _save_resultfile( + result, cwd, self.name, + rebase=str2bool(self.config['execution']['use_relative_paths'])) + else: + logger.debug('aggregating mapnode results') + result = self._run_interface() return result def _run_command(self, execute, copyfiles=True): diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 65170f14c9..8c1595c64d 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -38,11 +38,12 @@ write_rst_header, write_rst_dict, write_rst_list, + FileNotFoundError, ) from ...utils.misc import str2bool from ...utils.functions import create_function_from_source from ...interfaces.base.traits_extension import ( - rebase_path_traits, resolve_path_traits, OutputMultiPath, isdefined, Undefined, traits) + rebase_path_traits, resolve_path_traits, OutputMultiPath, isdefined, Undefined) from ...interfaces.base.support import Bunch, InterfaceResult from ...interfaces.base import CommandLine from ...interfaces.utility import IdentityInterface @@ -281,38 +282,22 @@ def load_resultfile(results_file, resolve=True): Returns ------- result : InterfaceResult structure - aggregate : boolean indicating whether node should aggregate_outputs - attribute error : boolean indicating whether there was some mismatch in - versions of traits used to store result and hence node needs to - rerun """ results_file = Path(results_file) - aggregate = True - result = None - attribute_error = False if not results_file.exists(): - return result, aggregate, attribute_error + raise FileNotFoundError(results_file) with indirectory(str(results_file.parent)): - try: - result = loadpkl(results_file) - except (traits.TraitError, EOFError): - logger.debug( - 'some file does not exist. hence trait cannot be set') - except (AttributeError, ImportError) as err: - attribute_error = True - logger.debug('attribute error: %s probably using ' - 'different trait pickled file', str(err)) - else: - aggregate = False + result = loadpkl(results_file) if resolve and result.outputs: try: outputs = result.outputs.get() except TypeError: # This is a Bunch - return result, aggregate, attribute_error + logger.debug('Outputs object of loaded result %s is a Bunch.', results_file) + return result logger.debug('Resolving paths in outputs loaded from results file.') for trait_name, old in list(outputs.items()): @@ -323,8 +308,7 @@ def load_resultfile(results_file, resolve=True): value = resolve_path_traits(result.outputs.trait(trait_name), old, results_file.parent) setattr(result.outputs, trait_name, value) - - return result, aggregate, attribute_error + return result def strip_temp(files, wd): From 63d5b68b2f8bbeaaed2356394bf42ec863c89d9c Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Tue, 10 Sep 2019 00:38:01 -0700 Subject: [PATCH 0386/1665] fix: revise ``report_crash`` to get along with the changes --- nipype/pipeline/plugins/tools.py | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/nipype/pipeline/plugins/tools.py b/nipype/pipeline/plugins/tools.py index 54fffd2398..3ddc37387c 100644 --- a/nipype/pipeline/plugins/tools.py +++ b/nipype/pipeline/plugins/tools.py @@ -16,7 +16,7 @@ from traceback import format_exception from ... import logging -from ...utils.filemanip import savepkl, crash2txt, makedirs +from ...utils.filemanip import savepkl, crash2txt, makedirs, FileNotFoundError logger = logging.getLogger('nipype.workflow') @@ -26,17 +26,29 @@ def report_crash(node, traceback=None, hostname=None): """ name = node._id host = None - if node.result and getattr(node.result, 'runtime'): - if isinstance(node.result.runtime, list): - host = node.result.runtime[0].hostname + traceback = traceback or format_exception(*sys.exc_info()) + + try: + result = node.result + except FileNotFoundError: + traceback += """ + +When creating this crashfile, the results file corresponding +to the node could not be found.""".splitlines(keepends=True) + except Exception as exc: + traceback += """ + +During the creation of this crashfile triggered by the above exception, +another exception occurred:\n\n{}.""".format(exc).splitlines(keepends=True) + else: + if isinstance(result.runtime, list): + host = result.runtime[0].hostname else: - host = node.result.runtime.hostname + host = result.runtime.hostname # Try everything to fill in the host host = host or hostname or gethostname() logger.error('Node %s failed to run on host %s.', name, host) - if not traceback: - traceback = format_exception(*sys.exc_info()) timeofcrash = strftime('%Y%m%d-%H%M%S') try: login_name = getpass.getuser() From be6024301c85f76c9054e8dc0da9e1c5401df032 Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 10 Sep 2019 07:43:17 -0700 Subject: [PATCH 0387/1665] tst: adding first test --- nipype/pipeline/engine/tests/test_utils.py | 16 ++++++- nipype/pipeline/engine/utils.py | 53 ++++++++++++---------- 2 files changed, 43 insertions(+), 26 deletions(-) diff --git a/nipype/pipeline/engine/tests/test_utils.py b/nipype/pipeline/engine/tests/test_utils.py index c462ea1533..eb4415f826 100644 --- a/nipype/pipeline/engine/tests/test_utils.py +++ b/nipype/pipeline/engine/tests/test_utils.py @@ -16,7 +16,7 @@ from ....interfaces import base as nib from ....interfaces import utility as niu from .... import config -from ..utils import clean_working_directory, write_workflow_prov +from ..utils import clean_working_directory, write_workflow_prov, load_resultfile class InputSpec(nib.TraitedSpec): @@ -283,3 +283,17 @@ def test_modify_paths_bug(tmpdir): assert outputs.out_dict_path == {out_str: out_path} assert outputs.out_dict_str == {out_str: out_str} assert outputs.out_list == [out_str] * 2 + + +def test_save_load_resultfile(tmpdir): + tmpdir.chdir() + + spc = pe.Node(StrPathConfuser(in_str='2'), name='spc') + spc.base_dir = tmpdir.mkdir('node').strpath + result = spc.run() + + loaded_result = load_resultfile(tmpdir.join('node').join('spc').join('result_spc.pklz')) + + assert result.runtime.dictcopy() == loaded_result.runtime.dictcopy() + assert result.inputs == loaded_result.inputs + assert result.outputs.get() == loaded_result.outputs.get() diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 8c1595c64d..9aa53fb88d 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -274,40 +274,43 @@ def load_resultfile(results_file, resolve=True): """ Load InterfaceResult file from path. - Parameter - --------- - path : base_dir of node - name : name of node + Parameters + ---------- + results_file : pathlike + Path to an existing pickle (``result_.pklz``) created with + ``save_resultfile``. + Raises ``FileNotFoundError`` if ``results_file`` does not exist. + resolve : bool + Determines whether relative paths will be resolved to absolute (default is ``True``). Returns ------- - result : InterfaceResult structure + result : InterfaceResult + A Nipype object containing the runtime, inputs, outputs and other interface information + such as a traceback in the case of errors. """ results_file = Path(results_file) - if not results_file.exists(): raise FileNotFoundError(results_file) - with indirectory(str(results_file.parent)): - result = loadpkl(results_file) - - if resolve and result.outputs: - try: - outputs = result.outputs.get() - except TypeError: # This is a Bunch - logger.debug('Outputs object of loaded result %s is a Bunch.', results_file) - return result - - logger.debug('Resolving paths in outputs loaded from results file.') - for trait_name, old in list(outputs.items()): - if isdefined(old): - if result.outputs.trait(trait_name).is_trait_type(OutputMultiPath): - old = result.outputs.trait(trait_name).handler.get_value( - result.outputs, trait_name) - value = resolve_path_traits(result.outputs.trait(trait_name), old, - results_file.parent) - setattr(result.outputs, trait_name, value) + result = loadpkl(results_file) + if resolve and result.outputs: + try: + outputs = result.outputs.get() + except TypeError: # This is a Bunch + logger.debug('Outputs object of loaded result %s is a Bunch.', results_file) + return result + + logger.debug('Resolving paths in outputs loaded from results file.') + for trait_name, old in list(outputs.items()): + if isdefined(old): + if result.outputs.trait(trait_name).is_trait_type(OutputMultiPath): + old = result.outputs.trait(trait_name).handler.get_value( + result.outputs, trait_name) + value = resolve_path_traits(result.outputs.trait(trait_name), old, + results_file.parent) + setattr(result.outputs, trait_name, value) return result From 49ada18b729690a19f852cb9d65fd7a086c1a229 Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 10 Sep 2019 09:38:42 -0700 Subject: [PATCH 0388/1665] fix: amend tests to work on py35 and add mobility test --- nipype/pipeline/engine/tests/test_utils.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/nipype/pipeline/engine/tests/test_utils.py b/nipype/pipeline/engine/tests/test_utils.py index eb4415f826..3d89c88f17 100644 --- a/nipype/pipeline/engine/tests/test_utils.py +++ b/nipype/pipeline/engine/tests/test_utils.py @@ -16,7 +16,8 @@ from ....interfaces import base as nib from ....interfaces import utility as niu from .... import config -from ..utils import clean_working_directory, write_workflow_prov, load_resultfile +from ..utils import (clean_working_directory, write_workflow_prov, + save_resultfile, load_resultfile) class InputSpec(nib.TraitedSpec): @@ -286,14 +287,31 @@ def test_modify_paths_bug(tmpdir): def test_save_load_resultfile(tmpdir): + """Test minimally the save/load functions for result files.""" + from shutil import copytree tmpdir.chdir() spc = pe.Node(StrPathConfuser(in_str='2'), name='spc') spc.base_dir = tmpdir.mkdir('node').strpath result = spc.run() - loaded_result = load_resultfile(tmpdir.join('node').join('spc').join('result_spc.pklz')) + loaded_result = load_resultfile( + tmpdir.join('node').join('spc').join('result_spc.pklz').strpath) assert result.runtime.dictcopy() == loaded_result.runtime.dictcopy() assert result.inputs == loaded_result.inputs assert result.outputs.get() == loaded_result.outputs.get() + + # Test the mobility of the result file. + copytree(tmpdir.join('node').strpath, tmpdir.join('node2').strpath) + save_resultfile(result, tmpdir.join('node2').strpath, 'spc', rebase=True) + loaded_result2 = load_resultfile( + tmpdir.join('node2').join('spc').join('result_spc.pklz').strpath) + + assert result.runtime.dictcopy() == loaded_result2.runtime.dictcopy() + assert result.inputs == loaded_result2.inputs + assert loaded_result2.outputs.get() != result.outputs.get() + newpath = result.outputs.out_path.replace('/node/', '/node2/') + assert loaded_result2.outputs.out_path == newpath + assert loaded_result2.outputs.out_tuple[0] == newpath + assert loaded_result2.outputs.out_dict_path['2'] == newpath From 0b510eded97e3837a35033cfdde2a7109bfc2e3e Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 10 Sep 2019 17:04:28 -0700 Subject: [PATCH 0389/1665] fix: address @satra's comment --- nipype/pipeline/plugins/tools.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/nipype/pipeline/plugins/tools.py b/nipype/pipeline/plugins/tools.py index 3ddc37387c..ebf023b787 100644 --- a/nipype/pipeline/plugins/tools.py +++ b/nipype/pipeline/plugins/tools.py @@ -41,10 +41,11 @@ def report_crash(node, traceback=None, hostname=None): During the creation of this crashfile triggered by the above exception, another exception occurred:\n\n{}.""".format(exc).splitlines(keepends=True) else: - if isinstance(result.runtime, list): - host = result.runtime[0].hostname - else: - host = result.runtime.hostname + if getattr(result, 'runtime', None): + if isinstance(result.runtime, list): + host = result.runtime[0].hostname + else: + host = result.runtime.hostname # Try everything to fill in the host host = host or hostname or gethostname() @@ -61,7 +62,7 @@ def report_crash(node, traceback=None, hostname=None): makedirs(crashdir, exist_ok=True) crashfile = os.path.join(crashdir, crashfile) - if node.config['execution']['crashfile_format'].lower() in ['text', 'txt']: + if node.config['execution']['crashfile_format'].lower() in ('text', 'txt', '.txt'): crashfile += '.txt' else: crashfile += '.pklz' From a200bc5b151a1e8473d251beb5eddcb1d61d76e7 Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 10 Sep 2019 17:04:48 -0700 Subject: [PATCH 0390/1665] fix: rebase arg not honored + improved tests (add xfail mark for python 2) --- nipype/pipeline/engine/tests/test_utils.py | 43 +++++++++++++++------- nipype/pipeline/engine/utils.py | 4 ++ nipype/utils/filemanip.py | 2 +- 3 files changed, 34 insertions(+), 15 deletions(-) diff --git a/nipype/pipeline/engine/tests/test_utils.py b/nipype/pipeline/engine/tests/test_utils.py index 3d89c88f17..dd44f430e1 100644 --- a/nipype/pipeline/engine/tests/test_utils.py +++ b/nipype/pipeline/engine/tests/test_utils.py @@ -17,7 +17,7 @@ from ....interfaces import utility as niu from .... import config from ..utils import (clean_working_directory, write_workflow_prov, - save_resultfile, load_resultfile) + load_resultfile) class InputSpec(nib.TraitedSpec): @@ -286,13 +286,20 @@ def test_modify_paths_bug(tmpdir): assert outputs.out_list == [out_str] * 2 -def test_save_load_resultfile(tmpdir): +@pytest.mark.xfail(sys.version_info < (3, 4), + reason="rebase does not fully work with Python 2.7") +@pytest.mark.parametrize("use_relative", [True, False]) +def test_save_load_resultfile(tmpdir, use_relative): """Test minimally the save/load functions for result files.""" - from shutil import copytree + from shutil import copytree, rmtree tmpdir.chdir() + old_use_relative = config.getboolean('execution', 'use_relative_paths') + config.set('execution', 'use_relative_paths', use_relative) + spc = pe.Node(StrPathConfuser(in_str='2'), name='spc') spc.base_dir = tmpdir.mkdir('node').strpath + result = spc.run() loaded_result = load_resultfile( @@ -304,14 +311,22 @@ def test_save_load_resultfile(tmpdir): # Test the mobility of the result file. copytree(tmpdir.join('node').strpath, tmpdir.join('node2').strpath) - save_resultfile(result, tmpdir.join('node2').strpath, 'spc', rebase=True) - loaded_result2 = load_resultfile( - tmpdir.join('node2').join('spc').join('result_spc.pklz').strpath) - - assert result.runtime.dictcopy() == loaded_result2.runtime.dictcopy() - assert result.inputs == loaded_result2.inputs - assert loaded_result2.outputs.get() != result.outputs.get() - newpath = result.outputs.out_path.replace('/node/', '/node2/') - assert loaded_result2.outputs.out_path == newpath - assert loaded_result2.outputs.out_tuple[0] == newpath - assert loaded_result2.outputs.out_dict_path['2'] == newpath + rmtree(tmpdir.join('node').strpath) + + if use_relative: + loaded_result2 = load_resultfile( + tmpdir.join('node2').join('spc').join('result_spc.pklz').strpath) + + assert result.runtime.dictcopy() == loaded_result2.runtime.dictcopy() + assert result.inputs == loaded_result2.inputs + assert loaded_result2.outputs.get() != result.outputs.get() + newpath = result.outputs.out_path.replace('/node/', '/node2/') + assert loaded_result2.outputs.out_path == newpath + assert loaded_result2.outputs.out_tuple[0] == newpath + assert loaded_result2.outputs.out_dict_path['2'] == newpath + else: + with pytest.raises(nib.TraitError): + load_resultfile( + tmpdir.join('node2').join('spc').join('result_spc.pklz').strpath) + + config.set('execution', 'use_relative_paths', old_use_relative) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 9aa53fb88d..d22150b7b8 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -250,6 +250,10 @@ def save_resultfile(result, cwd, name, rebase=None): savepkl(resultsfile, result) return + if not rebase: + savepkl(resultsfile, result) + return + backup_traits = {} try: with indirectory(cwd): diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 8fcfdc8beb..35b3de219b 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -708,7 +708,7 @@ def loadpkl(infile): Attempted to open a results file generated by Nipype version %s, \ with an incompatible Nipype version (%s)""", pkl_metadata['version'], version) raise e - fmlogger.error("""\ + fmlogger.warning("""\ No metadata was found in the pkl file. Make sure you are currently using \ the same Nipype version from the generated pkl.""") raise e From 686aef9f6beac904b260347f92bfbfb5ee3e8ea6 Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 10 Sep 2019 22:40:46 -0700 Subject: [PATCH 0391/1665] ENH: Lightweight node cache checking Generating the hashvalue when outputs are not ready at cache check stage when the node's directory does not exist (or no results file is in there) leads to #3014. This PR preempts those problems by delaying the hashval calculation. --- nipype/pipeline/engine/nodes.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 57c566f890..ce89026ad0 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -293,27 +293,29 @@ def is_cached(self, rm_outdated=False): """ outdir = self.output_dir() - # Update hash - hashed_inputs, hashvalue = self._get_hashval() - # The output folder does not exist: not cached - if not op.exists(outdir): - logger.debug('[Node] Directory not found "%s".', outdir) + if not op.exists(outdir) or \ + not op.exists(op.join(outdir, 'result_%s.pklz' % self.name)): + logger.debug('[Node] Not cached "%s".', outdir) return False, False - hashfile = op.join(outdir, '_0x%s.json' % hashvalue) - cached = op.exists(hashfile) - - # Check if updated + # Check if there are hashfiles globhashes = glob(op.join(outdir, '_0x*.json')) unfinished = [ path for path in globhashes if path.endswith('_unfinished.json') ] hashfiles = list(set(globhashes) - set(unfinished)) + + # Update hash + hashed_inputs, hashvalue = self._get_hashval() + + hashfile = op.join(outdir, '_0x%s.json' % hashvalue) logger.debug('[Node] Hashes: %s, %s, %s, %s', hashed_inputs, hashvalue, hashfile, hashfiles) + cached = op.exists(hashfile) + # No previous hashfiles found, we're all set. if cached and len(hashfiles) == 1: assert(hashfile == hashfiles[0]) @@ -441,6 +443,7 @@ def run(self, updatehash=False): for outdatedhash in glob(op.join(self.output_dir(), '_0x*.json')): os.remove(outdatedhash) + self._get_hashval() # Hashfile while running hashfile_unfinished = op.join( outdir, '_0x%s_unfinished.json' % self._hashvalue) From 57a8db2cd99f2c58138d4a29e7ed646349757677 Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 11 Sep 2019 09:16:57 -0700 Subject: [PATCH 0392/1665] maint: add relevant code comment + minimal stylistic changes [skip ci] --- nipype/pipeline/engine/nodes.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index ce89026ad0..8fda6c775e 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -389,17 +389,17 @@ def hash_exists(self, updatehash=False): return cached, self._hashvalue, hashfile, self._hashed_inputs def run(self, updatehash=False): - """Execute the node in its directory. + """ + Execute the node in its directory. Parameters ---------- - updatehash: boolean When the hash stored in the output directory as a result of a previous run does not match that calculated for this execution, updatehash=True only updates the hash without re-running. - """ + """ if self.config is None: self.config = {} self.config = merge_dict(deepcopy(config._sections), self.config) @@ -443,6 +443,10 @@ def run(self, updatehash=False): for outdatedhash in glob(op.join(self.output_dir(), '_0x*.json')): os.remove(outdatedhash) + # _get_hashval needs to be called before running. When there is a valid (or seemingly + # valid cache), the is_cached() member updates the hashval via _get_hashval. + # However, if this node's folder doesn't exist or the result file is not found, then + # the hashval needs to be generated here. See #3026 for a larger context. self._get_hashval() # Hashfile while running hashfile_unfinished = op.join( From dd2903151e54fbc344326e897a02b9717c1da9f8 Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 10 Sep 2019 10:42:00 -0700 Subject: [PATCH 0393/1665] ENH: Avoid loading result from file when writing reports Minimize the access to the ``result`` property when writing pre/post-execution reports. This modification should particularly preempt https://github.com/nipy/nipype/issues/3009#issuecomment-529799338 --- nipype/pipeline/engine/nodes.py | 10 ++-- nipype/pipeline/engine/utils.py | 90 +++++++++++++++------------------ 2 files changed, 46 insertions(+), 54 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 57c566f890..c3b132b5c6 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -37,7 +37,7 @@ from .utils import ( _parameterization_dir, save_hashfile as _save_hashfile, load_resultfile as _load_resultfile, save_resultfile as _save_resultfile, nodelist_runner as - _node_runner, strip_temp as _strip_temp, write_report, + _node_runner, strip_temp as _strip_temp, write_node_report, clean_working_directory, merge_dict, evaluate_connect_function) from .base import EngineBase @@ -464,8 +464,7 @@ def run(self, updatehash=False): # Store runtime-hashfile, pre-execution report, the node and the inputs set. _save_hashfile(hashfile_unfinished, self._hashed_inputs) - write_report( - self, report_type='preexec', is_mapnode=isinstance(self, MapNode)) + write_node_report(self, is_mapnode=isinstance(self, MapNode)) savepkl(op.join(outdir, '_node.pklz'), self) savepkl(op.join(outdir, '_inputs.pklz'), self.inputs.get_traitsfree()) @@ -484,8 +483,7 @@ def run(self, updatehash=False): # Tear-up after success shutil.move(hashfile_unfinished, hashfile_unfinished.replace('_unfinished', '')) - write_report( - self, report_type='postexec', is_mapnode=isinstance(self, MapNode)) + write_node_report(self, result=result, is_mapnode=isinstance(self, MapNode)) logger.info('[Node] Finished "%s".', self.fullname) return result @@ -1204,7 +1202,7 @@ def get_subnodes(self): """Generate subnodes of a mapnode and write pre-execution report""" self._get_inputs() self._check_iterfield() - write_report(self, report_type='preexec', is_mapnode=True) + write_node_report(self, result=None, is_mapnode=True) return [node for _, node in self._make_nodes()] def num_subnodes(self): diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index d22150b7b8..c8e17e7c65 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -116,68 +116,60 @@ def nodelist_runner(nodes, updatehash=False, stop_first=False): yield i, result, err -def write_report(node, report_type=None, is_mapnode=False): - """Write a report file for a node""" +def write_node_report(node, result=None, is_mapnode=False): + """Write a report file for a node.""" if not str2bool(node.config['execution']['create_report']): return - if report_type not in ['preexec', 'postexec']: - logger.warning('[Node] Unknown report type "%s".', report_type) - return - cwd = node.output_dir() - report_dir = os.path.join(cwd, '_report') - report_file = os.path.join(report_dir, 'report.rst') - makedirs(report_dir, exist_ok=True) - - logger.debug('[Node] Writing %s-exec report to "%s"', report_type[:-4], - report_file) - if report_type.startswith('pre'): - lines = [ - write_rst_header('Node: %s' % get_print_name(node), level=0), - write_rst_list( - ['Hierarchy : %s' % node.fullname, - 'Exec ID : %s' % node._id]), - write_rst_header('Original Inputs', level=1), - write_rst_dict(node.inputs.trait_get()), - ] - with open(report_file, 'wt') as fp: - fp.write('\n'.join(lines)) - return + report_file = Path(cwd) / '_report' / 'report.rst' + report_file.parent.mkdir(exist_ok=True) lines = [ + write_rst_header('Node: %s' % get_print_name(node), level=0), + write_rst_list( + ['Hierarchy : %s' % node.fullname, + 'Exec ID : %s' % node._id]), + write_rst_header('Original Inputs', level=1), + write_rst_dict(node.inputs.trait_get()), + ] + + if result is None: + logger.debug('[Node] Writing pre-exec report to "%s"', report_file) + report_file.write_text('\n'.join(lines)) + return + + logger.debug('[Node] Writing post-exec report to "%s"', report_file) + lines += [ write_rst_header('Execution Inputs', level=1), write_rst_dict(node.inputs.trait_get()), + write_rst_header('Execution Outputs', level=1) ] - result = node.result # Locally cache result outputs = result.outputs - if outputs is None: - with open(report_file, 'at') as fp: - fp.write('\n'.join(lines)) + lines += ['None'] + report_file.write_text('\n'.join(lines)) return - lines.append(write_rst_header('Execution Outputs', level=1)) - if isinstance(outputs, Bunch): lines.append(write_rst_dict(outputs.dictcopy())) elif outputs: lines.append(write_rst_dict(outputs.trait_get())) + else: + lines += ['Outputs object was empty.'] if is_mapnode: lines.append(write_rst_header('Subnode reports', level=1)) nitems = len(ensure_list(getattr(node.inputs, node.iterfield[0]))) subnode_report_files = [] for i in range(nitems): - nodecwd = os.path.join(cwd, 'mapflow', '_%s%d' % (node.name, i), - '_report', 'report.rst') - subnode_report_files.append('subnode %d : %s' % (i, nodecwd)) + subnode_file = Path(cwd) / 'mapflow' / ( + '_%s%d' % (node.name, i)) / '_report' / 'report.rst' + subnode_report_files.append('subnode %d : %s' % (i, subnode_file)) lines.append(write_rst_list(subnode_report_files)) - - with open(report_file, 'at') as fp: - fp.write('\n'.join(lines)) + report_file.write_text('\n'.join(lines)) return lines.append(write_rst_header('Runtime info', level=1)) @@ -189,15 +181,9 @@ def write_report(node, report_type=None, is_mapnode=False): 'prev_wd': getattr(result.runtime, 'prevcwd', ''), } - if hasattr(result.runtime, 'cmdline'): - rst_dict['command'] = result.runtime.cmdline - - # Try and insert memory/threads usage if available - if hasattr(result.runtime, 'mem_peak_gb'): - rst_dict['mem_peak_gb'] = result.runtime.mem_peak_gb - - if hasattr(result.runtime, 'cpu_percent'): - rst_dict['cpu_percent'] = result.runtime.cpu_percent + for prop in ('cmdline', 'mem_peak_gb', 'cpu_percent'): + if hasattr(result.runtime, prop): + rst_dict[prop] = getattr(result.runtime, prop) lines.append(write_rst_dict(rst_dict)) @@ -225,9 +211,17 @@ def write_report(node, report_type=None, is_mapnode=False): write_rst_dict(result.runtime.environ), ] - with open(report_file, 'at') as fp: - fp.write('\n'.join(lines)) - return + report_file.write_text('\n'.join(lines)) + + +def write_report(node, report_type=None, is_mapnode=False): + """Write a report file for a node - DEPRECATED""" + if report_type not in ('preexec', 'postexec'): + logger.warning('[Node] Unknown report type "%s".', report_type) + return + + write_node_report(node, is_mapnode=is_mapnode, + result=node.result if report_type == 'postexec' else None) def save_resultfile(result, cwd, name, rebase=None): From 8bfef03f483e9175742957ad969f7a6575a42adc Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 10 Sep 2019 17:55:32 -0700 Subject: [PATCH 0394/1665] fix: address failing tests, add tests for patched Path --- nipype/pipeline/engine/utils.py | 2 +- nipype/utils/filemanip.py | 20 ++++++++++++++++++++ nipype/utils/tests/test_filemanip.py | 18 ++++++++++++++++++ 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index c8e17e7c65..0df39e2a5a 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -123,7 +123,7 @@ def write_node_report(node, result=None, is_mapnode=False): cwd = node.output_dir() report_file = Path(cwd) / '_report' / 'report.rst' - report_file.parent.mkdir(exist_ok=True) + report_file.parent.mkdir(exist_ok=True, parents=True) lines = [ write_rst_header('Node: %s' % get_print_name(node), level=0), diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 35b3de219b..a2ae0b09d6 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -60,6 +60,8 @@ def __init__(self, path): try: Path('/invented/file/path').resolve(strict=True) except TypeError: + from tempfile import gettempdir + def _patch_resolve(self, strict=False): """Add the argument strict to signature in Python>3,<3.6.""" resolved = Path().old_resolve() / self @@ -70,6 +72,24 @@ def _patch_resolve(self, strict=False): Path.old_resolve = Path.resolve Path.resolve = _patch_resolve + + if not hasattr(Path, 'write_text'): + def _write_text(self, text): + with open(str(self), 'w') as f: + f.write(text) + Path.write_text = _write_text + + try: + (Path(gettempdir()) / 'exist_ok_test').mkdir(exist_ok=True) + except TypeError: + def _mkdir(self, mode=0o777, parents=False, exist_ok=False): + if not exist_ok and self.exists(): + raise FileExistsError(str(self)) + if not parents and not Path(str(self.parents)).exists(): + raise FileNotFoundError(str(self.parents)) + os.makedirs(str(self), mode=mode, exist_ok=exist_ok) + Path.mkdir = _mkdir + except FileNotFoundError: pass except OSError: diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index 4ec78e1984..6bae92203b 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -596,3 +596,21 @@ def test_pickle(tmp_path, save_versioning): savepkl(pickle_fname, testobj, versioning=save_versioning) outobj = loadpkl(pickle_fname) assert outobj == testobj + + +def test_Path(tmpdir): + tmp_path = Path(tmpdir.strpath) + + assert hasattr(tmp_path, 'write_text') + + (tmp_path / 'textfile').write_text('some text') + + with pytest.raises(OSError): + (tmp_path / 'no' / 'parents').mkdir(parents=False) + + (tmp_path / 'no' / 'parents').mkdir(parents=True) + + with pytest.raises(OSError): + (tmp_path / 'no' / 'parents').mkdir(parents=False) + + (tmp_path / 'no' / 'parents').mkdir(parents=True, exist_ok=True) From a62a369c22e83fbd37fe88a4a84503d756f4aa9d Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 11 Sep 2019 09:42:27 -0700 Subject: [PATCH 0395/1665] Update nipype/utils/filemanip.py [skip ci] Co-Authored-By: Chris Markiewicz --- nipype/utils/filemanip.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index a2ae0b09d6..541b3f62b9 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -80,7 +80,8 @@ def _write_text(self, text): Path.write_text = _write_text try: - (Path(gettempdir()) / 'exist_ok_test').mkdir(exist_ok=True) + with tempfile.TemporaryDirectory() as tmpdir: + (Path(tmpdir) / 'exist_ok_test').mkdir(exist_ok=True) except TypeError: def _mkdir(self, mode=0o777, parents=False, exist_ok=False): if not exist_ok and self.exists(): From c120ee5e069df1becaee1d7e98af3681df883861 Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 11 Sep 2019 11:02:24 -0700 Subject: [PATCH 0396/1665] fix: address @effigies' review comments --- nipype/utils/filemanip.py | 45 ++++++++++++++-------------- nipype/utils/tests/test_filemanip.py | 2 -- 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 541b3f62b9..c919000d34 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -57,11 +57,9 @@ def __init__(self, path): from pathlib2 import Path USING_PATHLIB2 = True -try: +try: # PY35 - strict mode was added in 3.6 Path('/invented/file/path').resolve(strict=True) except TypeError: - from tempfile import gettempdir - def _patch_resolve(self, strict=False): """Add the argument strict to signature in Python>3,<3.6.""" resolved = Path().old_resolve() / self @@ -72,28 +70,10 @@ def _patch_resolve(self, strict=False): Path.old_resolve = Path.resolve Path.resolve = _patch_resolve - - if not hasattr(Path, 'write_text'): - def _write_text(self, text): - with open(str(self), 'w') as f: - f.write(text) - Path.write_text = _write_text - - try: - with tempfile.TemporaryDirectory() as tmpdir: - (Path(tmpdir) / 'exist_ok_test').mkdir(exist_ok=True) - except TypeError: - def _mkdir(self, mode=0o777, parents=False, exist_ok=False): - if not exist_ok and self.exists(): - raise FileExistsError(str(self)) - if not parents and not Path(str(self.parents)).exists(): - raise FileNotFoundError(str(self.parents)) - os.makedirs(str(self), mode=mode, exist_ok=exist_ok) - Path.mkdir = _mkdir - except FileNotFoundError: pass except OSError: + # PY2 def _patch_resolve(self, strict=False): """Raise FileNotFoundError instead of OSError with pathlib2.""" try: @@ -106,6 +86,27 @@ def _patch_resolve(self, strict=False): Path.old_resolve = Path.resolve Path.resolve = _patch_resolve +if not hasattr(Path, 'write_text'): + # PY34 - Path does not have write_text + def _write_text(self, text): + with open(str(self), 'w') as f: + f.write(text) + Path.write_text = _write_text + +if PY3: + try: # PY34 - mkdir does not have exist_ok + from tempfile import TemporaryDirectory + with TemporaryDirectory() as tmpdir: + (Path(tmpdir) / 'exist_ok_test').mkdir(exist_ok=True) + except TypeError: + def _mkdir(self, mode=0o777, parents=False, exist_ok=False): + if parents: + os.makedirs(str(self), mode=mode, exist_ok=exist_ok) + elif not exist_ok or not self.exists(): + os.mkdir(str(self), mode=mode) + + Path.mkdir = _mkdir + def split_filename(fname): """Split a filename into parts: path, base filename and extension. diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index 6bae92203b..9ee2e4c0ba 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -601,8 +601,6 @@ def test_pickle(tmp_path, save_versioning): def test_Path(tmpdir): tmp_path = Path(tmpdir.strpath) - assert hasattr(tmp_path, 'write_text') - (tmp_path / 'textfile').write_text('some text') with pytest.raises(OSError): From e7a62006fe4f39c44eba02c73b21514bc070d151 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 11 Sep 2019 11:12:32 -0700 Subject: [PATCH 0397/1665] Update nipype/pipeline/engine/nodes.py Co-Authored-By: Chris Markiewicz --- nipype/pipeline/engine/nodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 8fda6c775e..0e3b61ffd2 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -314,7 +314,7 @@ def is_cached(self, rm_outdated=False): logger.debug('[Node] Hashes: %s, %s, %s, %s', hashed_inputs, hashvalue, hashfile, hashfiles) - cached = op.exists(hashfile) + cached = hashfile in hashfiles # No previous hashfiles found, we're all set. if cached and len(hashfiles) == 1: From ea1fe963155bb1b703a237e9310ce78e0af236a6 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Thu, 12 Sep 2019 14:19:01 -0400 Subject: [PATCH 0398/1665] enh: add telemetry to nipype --- nipype/__init__.py | 16 ++++++++++++++++ nipype/info.py | 3 ++- nipype/utils/config.py | 1 + requirements.txt | 1 + 4 files changed, 20 insertions(+), 1 deletion(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index 172617e887..eb5a2f123e 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -22,7 +22,9 @@ config = NipypeConfig() logging = Logging(config) +logger = logging.getLogger('nipype.utils') +INIT_MSG = "Running {packname} version {version} latest {latest}".format class NipypeTester(object): def __call__(self, doctests=True, parallel=False): @@ -56,3 +58,17 @@ def get_info(): from .pipeline import Node, MapNode, JoinNode, Workflow from .interfaces import (DataGrabber, DataSink, SelectFiles, IdentityInterface, Rename, Function, Select, Merge) + + +if config.getboolean('execution', 'check_version'): + import etelemetry + + latest = {"version": 'Unknown'} + try: + latest = etelemetry.get_project("nipy/heudiconv") + except Exception as e: + logger.warning("Could not check for version updates: ", e) + finally: + logger.info(INIT_MSG(packname='nipype', + version=__version__, + latest=latest["version"])) diff --git a/nipype/info.py b/nipype/info.py index 83ce1ca43d..72b34a177f 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -164,7 +164,8 @@ def get_nipype_gitversion(): 'scipy>=%s,<%s ; python_version <= "3.4"' % (SCIPY_MIN_VERSION, SCIPY_MAX_VERSION_34), 'simplejson>=%s' % SIMPLEJSON_MIN_VERSION, 'traits>=%s,!=5.0' % TRAITS_MIN_VERSION, - 'filelock>=3.0.0' + 'filelock>=3.0.0', + 'etelemetry', ] # neurdflib has to come after prov diff --git a/nipype/utils/config.py b/nipype/utils/config.py index d6d6d0879d..219215e0c8 100644 --- a/nipype/utils/config.py +++ b/nipype/utils/config.py @@ -68,6 +68,7 @@ parameterize_dirs = true poll_sleep_duration = 2 xvfb_max_wait = 10 +check_version = true [monitoring] enabled = false diff --git a/requirements.txt b/requirements.txt index 44226da48b..14b7e78ecc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,3 +17,4 @@ scipy>=0.14 simplejson>=3.8.0 traits>=4.6 filelock>= 3.0.0 +etelemetry \ No newline at end of file From ceedfed9e7972d5a8f15ea7e76f6d59dca46743c Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Thu, 12 Sep 2019 14:30:26 -0400 Subject: [PATCH 0399/1665] Apply suggestions from code review Co-Authored-By: Chris Markiewicz --- nipype/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index eb5a2f123e..35641f3d70 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -24,7 +24,7 @@ logging = Logging(config) logger = logging.getLogger('nipype.utils') -INIT_MSG = "Running {packname} version {version} latest {latest}".format +INIT_MSG = "Running {packname} version {version} (latest: {latest})".format class NipypeTester(object): def __call__(self, doctests=True, parallel=False): @@ -65,7 +65,7 @@ def get_info(): latest = {"version": 'Unknown'} try: - latest = etelemetry.get_project("nipy/heudiconv") + latest = etelemetry.get_project("nipy/nipype") except Exception as e: logger.warning("Could not check for version updates: ", e) finally: From 7e7568e1e4ec84b8bab52db1118adf07520f228a Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Thu, 12 Sep 2019 16:22:38 -0400 Subject: [PATCH 0400/1665] Update nipype/__init__.py Co-Authored-By: Oscar Esteban --- nipype/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index 35641f3d70..8d135bf061 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -67,7 +67,7 @@ def get_info(): try: latest = etelemetry.get_project("nipy/nipype") except Exception as e: - logger.warning("Could not check for version updates: ", e) + logger.warning("Could not check for version updates: \n%s", e) finally: logger.info(INIT_MSG(packname='nipype', version=__version__, From 9a23f55b3a8f2756a03489bc5e4fc9e7bb7a5a98 Mon Sep 17 00:00:00 2001 From: Gio at UMCU Date: Fri, 19 Apr 2019 15:08:03 +0200 Subject: [PATCH 0401/1665] Qwarp can have NIFTI and NIFTI_GZ as extensions --- nipype/interfaces/afni/preprocess.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 7b7d252c9f..484c35dab9 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -3773,8 +3773,13 @@ def _list_outputs(self): if not isdefined(self.inputs.out_file): prefix = self._gen_fname(self.inputs.in_file, suffix='_QW') - ext = '.HEAD' - suffix = '+tlrc' + outputtype = self.inputs.outputtype + if outputtype == 'AFNI': + ext = '.HEAD' + suffix = '+tlrc' + else: + ext = Info.output_type_to_ext(outputtype) + suffix = '' else: prefix = self.inputs.out_file ext_ind = max([ From d229372a805641907c683596ded6c651d5bb8fde Mon Sep 17 00:00:00 2001 From: Gio at UMCU Date: Fri, 19 Apr 2019 15:08:33 +0200 Subject: [PATCH 0402/1665] fix typo in parsing extension of out_file for Qwarp --- nipype/interfaces/afni/preprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 484c35dab9..d31541412f 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -3784,7 +3784,7 @@ def _list_outputs(self): prefix = self.inputs.out_file ext_ind = max([ prefix.lower().rfind('.nii.gz'), - prefix.lower().rfind('.nii.') + prefix.lower().rfind('.nii') ]) if ext_ind == -1: ext = '.HEAD' From 8e952443d375bc1dbf0fafa579e7a6add2cc7a82 Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Sun, 15 Sep 2019 07:33:35 -0400 Subject: [PATCH 0403/1665] add mrresize --- nipype/interfaces/mrtrix3/__init__.py | 3 +- .../mrtrix3/tests/test_auto_MRResize.py | 74 +++++++++++++++++ nipype/interfaces/mrtrix3/utils.py | 83 +++++++++++++++++++ 3 files changed, 159 insertions(+), 1 deletion(-) create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py diff --git a/nipype/interfaces/mrtrix3/__init__.py b/nipype/interfaces/mrtrix3/__init__.py index c9c131dde3..87b12e7442 100644 --- a/nipype/interfaces/mrtrix3/__init__.py +++ b/nipype/interfaces/mrtrix3/__init__.py @@ -4,7 +4,8 @@ # -*- coding: utf-8 -*- from .utils import (Mesh2PVE, Generate5tt, BrainMask, TensorMetrics, - ComputeTDI, TCK2VTK, MRMath, MRConvert, DWIExtract) + ComputeTDI, TCK2VTK, MRMath, MRConvert, MRResize, + DWIExtract) from .preprocess import (ResponseSD, ACTPrepareFSL, ReplaceFSwithFIRST, DWIDenoise, MRDeGibbs, DWIBiasCorrect) from .tracking import Tractography diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py new file mode 100644 index 0000000000..448d7809db --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py @@ -0,0 +1,74 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MRResize + + +def test_MRResize_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bval_scale=dict(argstr='-bvalue_scaling %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr='-grad %s', + extensions=None, + xor=['grad_fsl'], + ), + grad_fsl=dict( + argstr='-fslgrad %s %s', + xor=['grad_file'], + ), + image_size=dict( + argstr='-size %d,%d,%d', + mandatory=True, + xor=['voxel_size', 'scale_factor'], + ), + in_bval=dict(extensions=None, ), + in_bvec=dict( + argstr='-fslgrad %s %s', + extensions=None, + ), + in_file=dict( + argstr='%s', + extensions=None, + mandatory=True, + position=-2, + ), + interp=dict(argstr='-interp %s', ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + out_file=dict( + argstr='%s', + extensions=None, + keep_extension=True, + name_source=['in_file'], + name_template='%s_resized', + position=-1, + ), + scale_factor=dict( + argstr='-scale %d,%d,%d', + mandatory=True, + xor=['image_size', 'voxel_size'], + ), + voxel_size=dict( + argstr='-voxel %d,%d,%d', + mandatory=True, + xor=['image_size', 'scale_factor'], + ), + ) + inputs = MRResize.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRResize_outputs(): + output_map = dict(out_file=dict(extensions=None, ), ) + outputs = MRResize.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index a667c716f4..b97ca900c2 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -677,3 +677,86 @@ def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = op.abspath(self.inputs.out_file) return outputs + + +class MRResizeInputSpec(MRTrix3BaseInputSpec): + in_file = File( + exists=True, + argstr='%s', + position=-2, + mandatory=True, + desc='input DWI image' + ) + image_size = traits.Tuple( + (traits.Int, traits.Int, traits.Int), + argstr='-size %d,%d,%d', + mandatory=True, + desc='define the new image size for the output image. This should be ' + 'specified as a comma-separated list.', + xor=['voxel_size', 'scale_factor'], + ) + voxel_size = traits.Tuple( + (traits.Float, traits.Float, traits.Float), + argstr='-voxel %d,%d,%d', + mandatory=True, + desc='define the new voxel size for the output image. This can be ' + 'specified either as a single value to be used for all ' + 'dimensions, or as a comma-separated list of the size for each ' + 'voxel dimension.', + xor=['image_size', 'scale_factor'], + ) + scale_factor = traits.Tuple( + (traits.Float, traits.Float, traits.Float), + argstr='-scale %d,%d,%d', + mandatory=True, + desc='scale the image resolution by the supplied factor. This can be ' + 'specified either as a single value to be used for all ' + 'dimensions, or as a comma-separated list of scale factors for ' + 'each dimension.', + xor=['image_size', 'voxel_size'], + ) + interp = traits.Enum( + 'cubic', + 'nearest', + 'linear', + 'sinc', + argstr='-interp %s', + desc='set the interpolation method to use when resizing (choices: ' + 'nearest, linear, cubic, sinc. Default: cubic).', + ) + out_file = File( + argstr='%s', + name_template='%s_resized', + name_source=['in_file'], + keep_extension=True, + position=-1, + desc='the output resized DWI image', + ) + + +class MRResizeOutputSpec(TraitedSpec): + out_file = File(desc='the output resized DWI image', exists=True) + + +class MRResize(MRTrix3Base): + """ + Resize an image by defining the new image resolution, voxel size or a + scale factor. If the image is 4D, then only the first 3 dimensions can be + resized. Also, if the image is down-sampled, the appropriate smoothing is + automatically applied using Gaussian smoothing. + For more information, see + + Example + ------- + >>> import nipype.interfaces.mrtrix3 as mrt + >>> resize = mrt.MRResize() + >>> resize.inputs.in_file = 'dwi.mif' + >>> resize.inputs.voxel_size = (1, 1, 1) + >>> resize.cmdline # doctest: +ELLIPSIS + 'mrresize -voxel 1,1,1 dwi.mif dwi_resized.mif' + >>> resize.run() # doctest: +SKIP + """ + + _cmd = 'mrresize' + input_spec = MRResizeInputSpec + output_spec = MRResizeOutputSpec From 30df1a12b1e9ca631c859456d2b494aaef586766 Mon Sep 17 00:00:00 2001 From: AKSoo Date: Mon, 16 Sep 2019 13:42:59 +0900 Subject: [PATCH 0404/1665] FSL 6+ specific fix for MCFLIRT outputs --- nipype/interfaces/fsl/preprocess.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py index 66e3c5904d..ce96763d43 100644 --- a/nipype/interfaces/fsl/preprocess.py +++ b/nipype/interfaces/fsl/preprocess.py @@ -16,6 +16,7 @@ import numpy as np from nibabel import load +from ... import LooseVersion from ...utils.filemanip import split_filename from ..base import (TraitedSpec, File, InputMultiPath, OutputMultiPath, Undefined, traits, isdefined) @@ -826,10 +827,17 @@ def _list_outputs(self): output_dir = os.path.dirname(outputs['out_file']) if isdefined(self.inputs.stats_imgs) and self.inputs.stats_imgs: - outputs['variance_img'] = self._gen_fname( - outputs['out_file'] + '_variance.ext', cwd=output_dir) - outputs['std_img'] = self._gen_fname( - outputs['out_file'] + '_sigma.ext', cwd=output_dir) + if LooseVersion(Info.version()) < LooseVersion('6.0.0'): + # FSL <6.0 outputs have .nii.gz_variance.nii.gz as extension + outputs['variance_img'] = self._gen_fname( + outputs['out_file'] + '_variance.ext', cwd=output_dir) + outputs['std_img'] = self._gen_fname( + outputs['out_file'] + '_sigma.ext', cwd=output_dir) + else: + outputs['variance_img'] = self._gen_fname( + outputs['out_file'], suffix='_variance', cwd=output_dir) + outputs['std_img'] = self._gen_fname( + outputs['out_file'], suffix='_sigma', cwd=output_dir) # The mean image created if -stats option is specified ('meanvol') # is missing the top and bottom slices. Therefore we only expose the @@ -838,8 +846,13 @@ def _list_outputs(self): # Note that the same problem holds for the std and variance image. if isdefined(self.inputs.mean_vol) and self.inputs.mean_vol: - outputs['mean_img'] = self._gen_fname( - outputs['out_file'] + '_mean_reg.ext', cwd=output_dir) + if LooseVersion(Info.version()) < LooseVersion('6.0.0'): + # FSL <6.0 outputs have .nii.gz_mean_img.nii.gz as extension + outputs['mean_img'] = self._gen_fname( + outputs['out_file'] + '_mean_reg.ext', cwd=output_dir) + else: + outputs['mean_img'] = self._gen_fname( + outputs['out_file'], suffix='_mean_reg', cwd=output_dir) if isdefined(self.inputs.save_mats) and self.inputs.save_mats: _, filename = os.path.split(outputs['out_file']) From 2efa21a7a7024309d5ef312f3c90b9c84c56d80c Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 16 Sep 2019 09:45:05 -0400 Subject: [PATCH 0405/1665] MAINT: Require yapf >= 0.27 for check specs --- nipype/info.py | 2 +- tools/checkspecs.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 83ce1ca43d..19f85d5178 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -186,7 +186,7 @@ def get_nipype_gitversion(): 'nipy': ['nitime', 'nilearn<0.5.0', 'dipy', 'nipy', 'matplotlib'], 'profiler': ['psutil>=5.0'], 'pybids': ['pybids>=0.7.0'], - 'specs': ['yapf'], + 'specs': ['yapf>=0.27'], 'ssh': ['paramiko'], 'tests': TESTS_REQUIRES, 'xvfbwrapper': ['xvfbwrapper'], diff --git a/tools/checkspecs.py b/tools/checkspecs.py index e282728c8e..92980cc6fb 100644 --- a/tools/checkspecs.py +++ b/tools/checkspecs.py @@ -10,8 +10,14 @@ import re import sys import warnings +from distutils.version import LooseVersion from nipype.interfaces.base import BaseInterface + +import yapf +if LooseVersion(yapf.__version__) < '0.27': + raise ImportError("Please upgrade yapf to version 0.27 or newer for stable formatting") + from yapf.yapflib.yapf_api import FormatCode From 729375d7b2e695a7a9f5ce2bf472d19b53bf188f Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 16 Sep 2019 09:45:48 -0400 Subject: [PATCH 0406/1665] ENH: Expect extensions metadata; FIX: raw docstring --- tools/checkspecs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/checkspecs.py b/tools/checkspecs.py index 92980cc6fb..3eebb0569c 100644 --- a/tools/checkspecs.py +++ b/tools/checkspecs.py @@ -31,7 +31,7 @@ def __init__(self, package_skip_patterns=None, module_skip_patterns=None, class_skip_patterns=None): - ''' Initialize package for parsing + r''' Initialize package for parsing Parameters ---------- @@ -200,7 +200,7 @@ def test_specs(self, uri): 'position', 'mandatory', 'copyfile', 'usedefault', 'sep', 'hash_files', 'deprecated', 'new_name', 'min_ver', 'max_ver', 'name_source', 'name_template', 'keep_extension', 'units', - 'output_name' + 'output_name', 'extensions' ] in_built = [ 'type', 'copy', 'parent', 'instance_handler', 'comparison_mode', From 399529fd29163b47d8d7bada46b64b9990eb44f6 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 16 Sep 2019 09:49:34 -0400 Subject: [PATCH 0407/1665] FIX: Invalid metadata identified by check specs --- nipype/algorithms/modelgen.py | 6 ++---- nipype/algorithms/tests/test_auto_SpecifyModel.py | 11 ++--------- nipype/algorithms/tests/test_auto_SpecifySPMModel.py | 11 ++--------- .../algorithms/tests/test_auto_SpecifySparseModel.py | 11 ++--------- 4 files changed, 8 insertions(+), 31 deletions(-) diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index d23ae392ba..56db46b9f5 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -254,15 +254,13 @@ class SpecifyModelInputSpec(BaseInterfaceInputSpec): xor=['subject_info', 'event_files', 'bids_event_file'], desc='TSV event file containing common BIDS fields: `onset`,' '`duration`, and categorization and amplitude columns') - bids_condition_column = traits.Str(exists=True, - mandatory=False, + bids_condition_column = traits.Str( default_value='trial_type', usedefault=True, desc='Column of the file passed to `bids_event_file` to the ' 'unique values of which events will be assigned' 'to regressors') - bids_amplitude_column = traits.Str(exists=True, - mandatory=False, + bids_amplitude_column = traits.Str( desc='Column of the file passed to `bids_event_file` ' 'according to which to assign amplitudes to events') realignment_parameters = InputMultiPath( diff --git a/nipype/algorithms/tests/test_auto_SpecifyModel.py b/nipype/algorithms/tests/test_auto_SpecifyModel.py index 5acfb84e37..1f883e3677 100644 --- a/nipype/algorithms/tests/test_auto_SpecifyModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifyModel.py @@ -5,15 +5,8 @@ def test_SpecifyModel_inputs(): input_map = dict( - bids_amplitude_column=dict( - exists=True, - mandatory=False, - ), - bids_condition_column=dict( - exists=True, - mandatory=False, - usedefault=True, - ), + bids_amplitude_column=dict(), + bids_condition_column=dict(usedefault=True, ), bids_event_file=dict( mandatory=True, xor=['subject_info', 'event_files', 'bids_event_file'], diff --git a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py index 6145791254..53fe0b3850 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py @@ -5,15 +5,8 @@ def test_SpecifySPMModel_inputs(): input_map = dict( - bids_amplitude_column=dict( - exists=True, - mandatory=False, - ), - bids_condition_column=dict( - exists=True, - mandatory=False, - usedefault=True, - ), + bids_amplitude_column=dict(), + bids_condition_column=dict(usedefault=True, ), bids_event_file=dict( mandatory=True, xor=['subject_info', 'event_files', 'bids_event_file'], diff --git a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py index 933793f363..3e91aa242f 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py @@ -5,15 +5,8 @@ def test_SpecifySparseModel_inputs(): input_map = dict( - bids_amplitude_column=dict( - exists=True, - mandatory=False, - ), - bids_condition_column=dict( - exists=True, - mandatory=False, - usedefault=True, - ), + bids_amplitude_column=dict(), + bids_condition_column=dict(usedefault=True, ), bids_event_file=dict( mandatory=True, xor=['subject_info', 'event_files', 'bids_event_file'], From d38f71c10fd1cff6d3509509da905de2db6d382a Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Mon, 16 Sep 2019 10:35:43 -0400 Subject: [PATCH 0408/1665] Apply suggestions from code review Co-Authored-By: Chris Markiewicz --- nipype/interfaces/mrtrix3/utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index b97ca900c2..caacf41911 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -692,12 +692,11 @@ class MRResizeInputSpec(MRTrix3BaseInputSpec): argstr='-size %d,%d,%d', mandatory=True, desc='define the new image size for the output image. This should be ' - 'specified as a comma-separated list.', xor=['voxel_size', 'scale_factor'], ) voxel_size = traits.Tuple( (traits.Float, traits.Float, traits.Float), - argstr='-voxel %d,%d,%d', + argstr='-voxel %g,%g,%g', mandatory=True, desc='define the new voxel size for the output image. This can be ' 'specified either as a single value to be used for all ' @@ -707,7 +706,7 @@ class MRResizeInputSpec(MRTrix3BaseInputSpec): ) scale_factor = traits.Tuple( (traits.Float, traits.Float, traits.Float), - argstr='-scale %d,%d,%d', + argstr='-scale %g,%g,%g', mandatory=True, desc='scale the image resolution by the supplied factor. This can be ' 'specified either as a single value to be used for all ' @@ -715,7 +714,7 @@ class MRResizeInputSpec(MRTrix3BaseInputSpec): 'each dimension.', xor=['image_size', 'voxel_size'], ) - interp = traits.Enum( + interpolation = traits.Enum( 'cubic', 'nearest', 'linear', @@ -746,6 +745,7 @@ class MRResize(MRTrix3Base): automatically applied using Gaussian smoothing. For more information, see + Example ------- >>> import nipype.interfaces.mrtrix3 as mrt From 270de321fa5450464f717ec43aaf4a97562e8b68 Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Mon, 16 Sep 2019 10:37:30 -0400 Subject: [PATCH 0409/1665] Apply suggestions from code review Co-Authored-By: Chris Markiewicz --- nipype/interfaces/mrtrix3/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index caacf41911..e55c23475b 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -691,7 +691,7 @@ class MRResizeInputSpec(MRTrix3BaseInputSpec): (traits.Int, traits.Int, traits.Int), argstr='-size %d,%d,%d', mandatory=True, - desc='define the new image size for the output image. This should be ' + desc='Number of voxels in each dimension of output image' xor=['voxel_size', 'scale_factor'], ) voxel_size = traits.Tuple( From 1327e5bc1ad4d70051318de1ec9a3b32e35de9df Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Mon, 16 Sep 2019 10:39:04 -0400 Subject: [PATCH 0410/1665] Apply suggestions from code review Co-Authored-By: Chris Markiewicz --- nipype/interfaces/mrtrix3/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index e55c23475b..c4be5b6211 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -720,6 +720,7 @@ class MRResizeInputSpec(MRTrix3BaseInputSpec): 'linear', 'sinc', argstr='-interp %s', + usedefault=True, desc='set the interpolation method to use when resizing (choices: ' 'nearest, linear, cubic, sinc. Default: cubic).', ) From 0f0924a76c7086ab2054ea71d42c472e4267bd0c Mon Sep 17 00:00:00 2001 From: mjoseph Date: Mon, 16 Sep 2019 11:25:12 -0400 Subject: [PATCH 0411/1665] include multiple examples --- nipype/interfaces/mrtrix3/utils.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index c4be5b6211..81060b5669 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -698,20 +698,14 @@ class MRResizeInputSpec(MRTrix3BaseInputSpec): (traits.Float, traits.Float, traits.Float), argstr='-voxel %g,%g,%g', mandatory=True, - desc='define the new voxel size for the output image. This can be ' - 'specified either as a single value to be used for all ' - 'dimensions, or as a comma-separated list of the size for each ' - 'voxel dimension.', + desc='Desired voxel size in mm for the output image', xor=['image_size', 'scale_factor'], ) scale_factor = traits.Tuple( (traits.Float, traits.Float, traits.Float), argstr='-scale %g,%g,%g', mandatory=True, - desc='scale the image resolution by the supplied factor. This can be ' - 'specified either as a single value to be used for all ' - 'dimensions, or as a comma-separated list of scale factors for ' - 'each dimension.', + desc='Scale factors to rescale the image by in each dimension', xor=['image_size', 'voxel_size'], ) interpolation = traits.Enum( @@ -752,9 +746,17 @@ class MRResize(MRTrix3Base): >>> import nipype.interfaces.mrtrix3 as mrt >>> resize = mrt.MRResize() >>> resize.inputs.in_file = 'dwi.mif' + >>> resize.inputs.image_size = (256, 256, 144) + >>> resize.cmdline # doctest: +ELLIPSIS + 'mrresize -interp cubic -size 256,256,144 dwi.mif dwi_resized.mif' + >>> resize.run() # doctest: +SKIP >>> resize.inputs.voxel_size = (1, 1, 1) >>> resize.cmdline # doctest: +ELLIPSIS - 'mrresize -voxel 1,1,1 dwi.mif dwi_resized.mif' + 'mrresize -interp cubic -voxel 1,1,1 dwi.mif dwi_resized.mif' + >>> resize.run() # doctest: +SKIP + >>> resize.inputs.scale_factor = (2.0,2.0,2.0) + >>> resize.cmdline # doctest: +ELLIPSIS + 'mrresize -interp cubic -scale 2.0,2.0,2.0 dwi.mif dwi_resized.mif' >>> resize.run() # doctest: +SKIP """ From 593aea8ece337522cd0ecfb0daf77a4dbcc1ba68 Mon Sep 17 00:00:00 2001 From: mjoseph Date: Mon, 16 Sep 2019 14:30:55 -0400 Subject: [PATCH 0412/1665] update doctests --- nipype/interfaces/mrtrix3/utils.py | 38 +++++++++++++++++++----------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index 81060b5669..52122189bf 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -691,7 +691,7 @@ class MRResizeInputSpec(MRTrix3BaseInputSpec): (traits.Int, traits.Int, traits.Int), argstr='-size %d,%d,%d', mandatory=True, - desc='Number of voxels in each dimension of output image' + desc='Number of voxels in each dimension of output image', xor=['voxel_size', 'scale_factor'], ) voxel_size = traits.Tuple( @@ -744,20 +744,30 @@ class MRResize(MRTrix3Base): Example ------- >>> import nipype.interfaces.mrtrix3 as mrt - >>> resize = mrt.MRResize() - >>> resize.inputs.in_file = 'dwi.mif' - >>> resize.inputs.image_size = (256, 256, 144) - >>> resize.cmdline # doctest: +ELLIPSIS - 'mrresize -interp cubic -size 256,256,144 dwi.mif dwi_resized.mif' - >>> resize.run() # doctest: +SKIP - >>> resize.inputs.voxel_size = (1, 1, 1) - >>> resize.cmdline # doctest: +ELLIPSIS + + Defining the new image resolution: + >>> image_resize = mrt.MRResize() + >>> image_resize.inputs.in_file = 'dwi.mif' + >>> image_resize.inputs.image_size = (256, 256, 144) + >>> image_resize.cmdline # doctest: +ELLIPSIS + 'mrresize -size 256,256,144 -interp cubic dwi.mif dwi_resized.mif' + >>> image_resize.run() # doctest: +SKIP + + Defining the new image's voxel size: + >>> voxel_resize = mrt.MRResize() + >>> voxel_resize.inputs.in_file = 'dwi.mif' + >>> voxel_resize.inputs.voxel_size = (1, 1, 1) + >>> voxel_resize.cmdline # doctest: +ELLIPSIS 'mrresize -interp cubic -voxel 1,1,1 dwi.mif dwi_resized.mif' - >>> resize.run() # doctest: +SKIP - >>> resize.inputs.scale_factor = (2.0,2.0,2.0) - >>> resize.cmdline # doctest: +ELLIPSIS - 'mrresize -interp cubic -scale 2.0,2.0,2.0 dwi.mif dwi_resized.mif' - >>> resize.run() # doctest: +SKIP + >>> voxel_resize.run() # doctest: +SKIP + + Defining the scale factor of each image dimension: + >>> scale_resize = mrt.MRResize() + >>> scale_resize.inputs.in_file = 'dwi.mif' + >>> scale_resize.inputs.scale_factor = (0.5,0.5,0.5) + >>> scale_resize.cmdline # doctest: +ELLIPSIS + 'mrresize -interp cubic -scale 0.5,0.5,0.5 dwi.mif dwi_resized.mif' + >>> scale_resize.run() # doctest: +SKIP """ _cmd = 'mrresize' From 57e83fdc70d14f89356deb75fa525749aa7eba23 Mon Sep 17 00:00:00 2001 From: Michael Joseph Date: Mon, 16 Sep 2019 15:47:58 -0400 Subject: [PATCH 0413/1665] update test --- nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py index 448d7809db..a81528d44a 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py @@ -36,7 +36,10 @@ def test_MRResize_inputs(): mandatory=True, position=-2, ), - interp=dict(argstr='-interp %s', ), + interpolation=dict( + argstr='-interp %s', + usedefault=True, + ), nthreads=dict( argstr='-nthreads %d', nohash=True, @@ -50,12 +53,12 @@ def test_MRResize_inputs(): position=-1, ), scale_factor=dict( - argstr='-scale %d,%d,%d', + argstr='-scale %g,%g,%g', mandatory=True, xor=['image_size', 'voxel_size'], ), voxel_size=dict( - argstr='-voxel %d,%d,%d', + argstr='-voxel %g,%g,%g', mandatory=True, xor=['image_size', 'scale_factor'], ), From 67b10fbe0838756027ae50c1d22d55de5f6fa198 Mon Sep 17 00:00:00 2001 From: Olivia Stanley Date: Tue, 17 Sep 2019 10:35:26 -0400 Subject: [PATCH 0414/1665] made required PR changes to logic and spacing --- nipype/interfaces/afni/utils.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py index bbb174954f..742d157c58 100644 --- a/nipype/interfaces/afni/utils.py +++ b/nipype/interfaces/afni/utils.py @@ -617,15 +617,12 @@ class CatMatvec(AFNICommand): def _format_arg(self, name, spec, value): if name == 'in_file': - xfm_args='' - for v in value: - if len(v[1])>0: - xfm_args += ' ' + v[0] + ' -' + v[1] + ' ' - else: - xfm_args += ' ' + v[0] + ' ' - return spec.argstr % (xfm_args) + # Concatenate a series of filenames, with optional opkeys + return ' '.join('%s -%s' % (mfile, opkey) if opkey else mfile + for mfile, opkey in value) return super(CatMatvec, self)._format_arg(name, spec, value) + class CenterMassInputSpec(CommandLineInputSpec): in_file = File( desc='input file to 3dCM', From 845463d516ffaeb9a14b6a44bb2b1465ee86a044 Mon Sep 17 00:00:00 2001 From: ostanley Date: Tue, 17 Sep 2019 11:41:10 -0400 Subject: [PATCH 0415/1665] added style change Co-Authored-By: Chris Markiewicz --- nipype/interfaces/afni/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py index 06951056ec..38a9da3efb 100644 --- a/nipype/interfaces/afni/utils.py +++ b/nipype/interfaces/afni/utils.py @@ -621,7 +621,7 @@ def _format_arg(self, name, spec, value): if name == 'in_file': # Concatenate a series of filenames, with optional opkeys return ' '.join('%s -%s' % (mfile, opkey) if opkey else mfile - for mfile, opkey in value) + for mfile, opkey in value) return super(CatMatvec, self)._format_arg(name, spec, value) From 3e8e3762bb59419046bfdc4424495fb4eb99568d Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 17 Sep 2019 16:52:46 -0400 Subject: [PATCH 0416/1665] FIX: Drop deprecated message argument to FileNotFoundError --- nipype/interfaces/base/core.py | 6 +++--- nipype/utils/filemanip.py | 18 ++++++++++-------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 9345ef731a..7ba8486082 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -476,9 +476,9 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): setattr(outputs, key, val) except TraitError as error: if 'an existing' in getattr(error, 'info', 'default'): - msg = "No such file or directory for output '%s' of a %s interface" % \ - (key, self.__class__.__name__) - raise FileNotFoundError(val, message=msg) + msg = "No such file or directory '%s' for output '%s' of a %s interface" % \ + (val, key, self.__class__.__name__) + raise FileNotFoundError(msg) raise error return outputs diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index c919000d34..0bfc6b3968 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -40,14 +40,16 @@ PY3 = sys.version_info[0] >= 3 - -class FileNotFoundError(OSError): # noqa - """Defines the exception for Python 2.""" - - def __init__(self, path): - """Initialize the exception.""" - super(FileNotFoundError, self).__init__( - 2, 'No such file or directory', '%s' % path) +try: + from builtins import FileNotFoundError +except ImportError: # PY27 + class FileNotFoundError(OSError): # noqa + """Defines the exception for Python 2.""" + + def __init__(self, path): + """Initialize the exception.""" + super(FileNotFoundError, self).__init__( + 2, 'No such file or directory', '%s' % path) USING_PATHLIB2 = False From 0196beac33d0c8c992bcc07b83c0b368810be02d Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 18 Sep 2019 17:03:00 -0400 Subject: [PATCH 0417/1665] MAINT: PY2 Path compatibility --- nipype/utils/filemanip.py | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 0bfc6b3968..1cdd1e9676 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -95,19 +95,18 @@ def _write_text(self, text): f.write(text) Path.write_text = _write_text -if PY3: - try: # PY34 - mkdir does not have exist_ok - from tempfile import TemporaryDirectory - with TemporaryDirectory() as tmpdir: - (Path(tmpdir) / 'exist_ok_test').mkdir(exist_ok=True) - except TypeError: - def _mkdir(self, mode=0o777, parents=False, exist_ok=False): - if parents: - os.makedirs(str(self), mode=mode, exist_ok=exist_ok) - elif not exist_ok or not self.exists(): - os.mkdir(str(self), mode=mode) +try: # PY27/PY34 - mkdir does not have exist_ok + from .tmpdirs import TemporaryDirectory + with TemporaryDirectory() as tmpdir: + (Path(tmpdir) / 'exist_ok_test').mkdir(exist_ok=True) +except TypeError: + def _mkdir(self, mode=0o777, parents=False, exist_ok=False): + if parents: + makedirs(str(self), mode=mode, exist_ok=exist_ok) + elif not exist_ok or not self.exists(): + os.mkdir(str(self), mode=mode) - Path.mkdir = _mkdir + Path.mkdir = _mkdir def split_filename(fname): @@ -828,7 +827,7 @@ def dist_is_editable(dist): return False -def makedirs(path, exist_ok=False): +def makedirs(path, mode=0o777, exist_ok=False): """ Create path, if it doesn't exist. @@ -838,14 +837,14 @@ def makedirs(path, exist_ok=False): """ if not exist_ok: # The old makedirs - os.makedirs(path) + os.makedirs(path, mode=mode) return path # this odd approach deals with concurrent directory cureation if not op.exists(op.abspath(path)): fmlogger.debug("Creating directory %s", path) try: - os.makedirs(path) + os.makedirs(path, mode=mode) except OSError: fmlogger.debug("Problem creating directory %s", path) if not op.exists(path): From 1f0b85cfc16d1703fda5dbdc1183d07a2e6db66f Mon Sep 17 00:00:00 2001 From: mathiasg Date: Wed, 18 Sep 2019 18:01:49 -0400 Subject: [PATCH 0418/1665] rf+enh: dcm2niix --- docker/generate_dockerfiles.sh | 3 +- nipype/info.py | 1 + nipype/interfaces/dcm2nii.py | 90 ++++++++++--------- nipype/interfaces/tests/test_auto_Dcm2niix.py | 1 + nipype/interfaces/tests/test_extra_dcm2nii.py | 8 +- 5 files changed, 57 insertions(+), 46 deletions(-) diff --git a/docker/generate_dockerfiles.sh b/docker/generate_dockerfiles.sh index 37fc5d338b..44173ee009 100755 --- a/docker/generate_dockerfiles.sh +++ b/docker/generate_dockerfiles.sh @@ -68,10 +68,11 @@ function generate_base_dockerfile() { --spm12 version=r7219 \ --env 'LD_LIBRARY_PATH=/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH' \ --freesurfer version=6.0.0-min \ + --dcm2niix version=v1.0.20190902 method=source \ --run 'echo "cHJpbnRmICJrcnp5c3p0b2YuZ29yZ29sZXdza2lAZ21haWwuY29tCjUxNzIKICpDdnVtdkVWM3pUZmcKRlM1Si8yYzFhZ2c0RQoiID4gL29wdC9mcmVlc3VyZmVyLTYuMC4wLW1pbi9saWNlbnNlLnR4dA==" | base64 -d | sh' \ --install afni ants apt-utils bzip2 convert3d file fsl-core \ fsl-mni152-templates fusefat g++ git graphviz make python ruby \ - unzip xvfb \ + unzip xvfb git-annex-standalone liblzma-dev \ --add-to-entrypoint "source /etc/fsl/fsl.sh && source /etc/afni/afni.sh" \ --env ANTSPATH='/usr/lib/ants' \ PATH='/usr/lib/ants:$PATH' \ diff --git a/nipype/info.py b/nipype/info.py index deec53aee5..313278b555 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -182,6 +182,7 @@ def get_nipype_gitversion(): ] EXTRA_REQUIRES = { + 'data': ['datalad'], 'doc': ['Sphinx>=1.4', 'numpydoc', 'matplotlib', 'pydotplus', 'pydot>=1.2.3'], 'duecredit': ['duecredit'], 'nipy': ['nitime', 'nilearn<0.5.0', 'dipy', 'nipy', 'matplotlib'], diff --git a/nipype/interfaces/dcm2nii.py b/nipype/interfaces/dcm2nii.py index 31885adaba..2635d8e78b 100644 --- a/nipype/interfaces/dcm2nii.py +++ b/nipype/interfaces/dcm2nii.py @@ -7,6 +7,8 @@ import os import re from copy import deepcopy +import itertools as it +from glob import iglob from ..utils.filemanip import split_filename from .base import (CommandLine, CommandLineInputSpec, InputMultiPath, traits, @@ -328,7 +330,7 @@ class Dcm2niixInputSpec(CommandLineInputSpec): False, argstr='-t', usedefault=True, - desc="Flag if text notes include private patient details") + desc="Text notes including private patient details") compression = traits.Enum( 1, 2, 3, 4, 5, 6, 7, 8, 9, argstr='-%d', @@ -346,6 +348,9 @@ class Dcm2niixInputSpec(CommandLineInputSpec): philips_float = traits.Bool( argstr='-p', desc="Philips precise float (not display) scaling") + to_nrrd = traits.Bool( + argstr="-e", + desc="Export as NRRD instead of NIfTI") class Dcm2niixOutputSpec(TraitedSpec): @@ -393,8 +398,11 @@ def version(self): return Info.version() def _format_arg(self, opt, spec, val): - bools = ['bids_format', 'merge_imgs', 'single_file', 'verbose', 'crop', - 'has_private', 'anon_bids', 'ignore_deriv', 'philips_float'] + bools = [ + 'bids_format', 'merge_imgs', 'single_file', 'verbose', 'crop', + 'has_private', 'anon_bids', 'ignore_deriv', 'philips_float', + 'to_nrrd', + ] if opt in bools: spec = deepcopy(spec) if val: @@ -410,52 +418,54 @@ def _run_interface(self, runtime): # may use return code 1 despite conversion runtime = super(Dcm2niix, self)._run_interface( runtime, correct_return_codes=(0, 1, )) - if self.inputs.bids_format: - (self.output_files, self.bvecs, self.bvals, - self.bids) = self._parse_stdout(runtime.stdout) - else: - (self.output_files, self.bvecs, self.bvals) = self._parse_stdout( - runtime.stdout) + self._parse_files(self._parse_stdout(runtime.stdout)) return runtime def _parse_stdout(self, stdout): - files = [] - bvecs = [] - bvals = [] - bids = [] - skip = False - find_b = False + filenames = [] for line in stdout.split("\n"): - if not skip: - out_file = None - if line.startswith("Convert "): # output - fname = str(re.search('\S+/\S+', line).group(0)) - out_file = os.path.abspath(fname) - # extract bvals - if find_b: - bvecs.append(out_file + ".bvec") - bvals.append(out_file + ".bval") - find_b = False - # next scan will have bvals/bvecs - elif 'DTI gradients' in line or 'DTI gradient directions' in line or 'DTI vectors' in line: - find_b = True - if out_file: - ext = '.nii' if self.inputs.compress == 'n' else '.nii.gz' - files.append(out_file + ext) - if self.inputs.bids_format: - bids.append(out_file + ".json") - skip = False - # just return what was done - if not bids: - return files, bvecs, bvals + if line.startswith("Convert "): # output + fname = str(re.search(r'\S+/\S+', line).group(0)) + filenames.append(os.path.abspath(fname)) + return filenames + + def _parse_files(self, filenames): + outfiles, bvals, bvecs, bids = [], [], [], [] + outtypes = [".bval", ".bvec", ".json", ".txt"] + if self.inputs.to_nrrd: + outtypes += [".nrrd", ".nhdr", ".raw.gz"] else: - return files, bvecs, bvals, bids + outtypes += [".nii", ".nii.gz"] + + for filename in filenames: + # search for relevant files, and sort accordingly + for fl in search_files(filename, outtypes): + if ( + fl.endswith(".nii") or + fl.endswith(".gz") or + fl.endswith(".nrrd") or + fl.endswith(".nhdr") + ): + outfiles.append(fl) + elif fl.endswith(".bval"): + bvals.append(fl) + elif fl.endswith(".bvec"): + bvecs.append(fl) + elif fl.endswith(".json") or fl.endswith(".txt"): + bids.append(fl) + self.output_files = outfiles + self.bvecs = bvecs + self.bvals = bvals + self.bids = bids def _list_outputs(self): outputs = self.output_spec().get() outputs['converted_files'] = self.output_files outputs['bvecs'] = self.bvecs outputs['bvals'] = self.bvals - if self.inputs.bids_format: - outputs['bids'] = self.bids + outputs['bids'] = self.bids return outputs + +# https://stackoverflow.com/a/4829130 +def search_files(prefix, outtypes): + return it.chain.from_iterable(iglob(prefix + outtype) for outtype in outtypes) diff --git a/nipype/interfaces/tests/test_auto_Dcm2niix.py b/nipype/interfaces/tests/test_auto_Dcm2niix.py index 5917f48583..729b9aa6db 100644 --- a/nipype/interfaces/tests/test_auto_Dcm2niix.py +++ b/nipype/interfaces/tests/test_auto_Dcm2niix.py @@ -61,6 +61,7 @@ def test_Dcm2niix_inputs(): position=-1, xor=['source_dir'], ), + to_nrrd=dict(argstr='-e', ), verbose=dict( argstr='-v', usedefault=True, diff --git a/nipype/interfaces/tests/test_extra_dcm2nii.py b/nipype/interfaces/tests/test_extra_dcm2nii.py index dd68454ad0..44fb7196f8 100644 --- a/nipype/interfaces/tests/test_extra_dcm2nii.py +++ b/nipype/interfaces/tests/test_extra_dcm2nii.py @@ -41,17 +41,15 @@ def assert_dwi(eg, bids): # ensure all outputs are of equal lengths assert len(set(map(len, outputs))) == 1 else: - assert not eg2.outputs.bids + assert not eg.outputs.bids dcm = Dcm2niix() dcm.inputs.source_dir = datadir dcm.inputs.out_filename = '%u%z' - eg1 = dcm.run() - assert_dwi(eg1, True) + assert_dwi(dcm.run(), True) # now run specifying output directory and removing BIDS option outdir = tmpdir.mkdir('conversion').strpath dcm.inputs.output_dir = outdir dcm.inputs.bids_format = False - eg2 = dcm.run() - assert_dwi(eg2, False) + assert_dwi(dcm.run(), False) From a5a6c3979a46721b9a956742d3214e334fcf0bba Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 18 Sep 2019 10:04:21 -0400 Subject: [PATCH 0419/1665] MNT: Update changelog, Zenodo ordering --- doc/changelog/1.X.X-changelog | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index 71363170df..82747d9903 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -1,3 +1,24 @@ +1.2.3 (September 23, 2019) +========================== + +Python 1.2.3 will be the last version to support Python 3.4. + +##### [Full changelog](https://github.com/nipy/nipype/milestone/35?closed=1) + + * FIX: Patch Path.mkdir for Python 2 (https://github.com/nipy/nipype/pull/3037) + * FIX: Drop deprecated message argument to ``FileNotFoundError`` (https://github.com/nipy/nipype/pull/3035) + * FIX: Handle NIFTI extensions in ``afni.Qwarp`` (https://github.com/nipy/nipype/pull/3028) + * FIX: Disallow returning ``None`` in ``pipeline.utils.load_resultfile`` (https://github.com/nipy/nipype/pull/3023) + * ENH: Allow afni.CatMatvec to accept empty string opkeys (https://github.com/nipy/nipype/pull/2943) + * ENH: Add ``mrrtrix3.MRResize`` interface (https://github.com/nipy/nipype/pull/3031) + * ENH: Add version check / telemetry to Nipype (https://github.com/nipy/nipype/pull/3027) + * ENH: Update MCFLIRT outputs for FSL 6+ (https://github.com/nipy/nipype/pull/3029) + * ENH: Lightweight node cache checking (https://github.com/nipy/nipype/pull/3026) + * ENH: Avoid loading result from file when writing reports (https://github.com/nipy/nipype/pull/3024) + * ENH: replace portalocker with filelock (https://github.com/nipy/nipype/pull/3025) + * MAINT: Set minimum yapf for `checkspecs` to 0.27 (https://github.com/nipy/nipype/pull/3033) + + 1.2.2 (September 07, 2019) ========================== From 9d1fec77f043636ce5316bd8bd24c92ba28cc962 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 18 Sep 2019 16:25:44 -0400 Subject: [PATCH 0420/1665] DOC: Update mailmap, zenodo --- .mailmap | 2 ++ .zenodo.json | 77 ++++++++++++++++++++++-------------------- tools/update_zenodo.py | 1 - 3 files changed, 42 insertions(+), 38 deletions(-) diff --git a/.mailmap b/.mailmap index 946919557c..9f2b618d78 100644 --- a/.mailmap +++ b/.mailmap @@ -126,6 +126,7 @@ Kshitij Chawla Kshitij Chawla (kchawla-pi) Leonie Lmape Lukas Snoek Lukas Snoek Marcel Falkiewicz Marcel Falkiewicz +Martin Perez-Guevara Mathias Goncalves Mathias Goncalves Mathias Goncalves mathiasg Mathieu Dubois Mathieu Dubois @@ -153,6 +154,7 @@ Michael Waskom mwaskom Miguel Molina-Romero Miguel Molina Murat Bilgel Murat Bilgel Oliver Contier oliver-contier +Olivia Stanley Oscar Esteban Oscar Esteban Oscar Esteban oesteban Pablo Polosecki pipolose diff --git a/.zenodo.json b/.zenodo.json index 06bd8a4ebc..668974fd2d 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -78,6 +78,10 @@ "affiliation": "Shattuck Lab, UCLA Brain Mapping Center", "name": "Wong, Jason" }, + { + "affiliation": "Concordia University", + "name": "Benderoff, Erin" + }, { "name": "Modat, Marc" }, @@ -147,8 +151,9 @@ "name": "Berleant, Shoshana" }, { - "affiliation": "Concordia University", - "name": "Benderoff, Erin" + "affiliation": "Dartmouth College: Hanover, NH, United States", + "name": "Halchenko, Yaroslav O.", + "orcid": "0000-0003-3456-2493" }, { "affiliation": "Institute for Biomedical Engineering, ETH and University of Zurich", @@ -156,9 +161,9 @@ "orcid": "0000-0001-7037-2449" }, { - "affiliation": "Dartmouth College: Hanover, NH, United States", - "name": "Halchenko, Yaroslav O.", - "orcid": "0000-0003-3456-2493" + "affiliation": "The Centre for Addiction and Mental Health", + "name": "Joseph, Michael", + "orcid": "0000-0002-0068-230X" }, { "affiliation": "UC San Diego", @@ -202,11 +207,6 @@ "name": "Varoquaux, Gael", "orcid": "0000-0003-1076-5122" }, - { - "affiliation": "The Centre for Addiction and Mental Health", - "name": "Joseph, Michael", - "orcid": "0000-0002-0068-230X" - }, { "affiliation": "Athena EPI, Inria Sophia-Antipolis", "name": "Wassermann, Demian", @@ -250,14 +250,14 @@ "affiliation": "Nathan s Kline institute for psychiatric research", "name": "Sikka, Sharad" }, + { + "name": "Forbes, Jessica" + }, { "affiliation": "University College London", "name": "Mancini, Matteo", "orcid": "0000-0001-7194-4568" }, - { - "name": "Forbes, Jessica" - }, { "affiliation": "Duke University", "name": "Iqbal, Shariq", @@ -295,10 +295,6 @@ { "name": "Kent, James" }, - { - "name": "Heinsfeld, Anibal S\u00f3lon", - "orcid": "0000-0002-2050-0614" - }, { "name": "Watanabe, Aimi" }, @@ -325,6 +321,10 @@ "name": "Eshaghi, Arman", "orcid": "0000-0002-6652-3512" }, + { + "name": "Heinsfeld, Anibal S\u00f3lon", + "orcid": "0000-0002-2050-0614" + }, { "name": "Ginsburg, Daniel" }, @@ -378,9 +378,15 @@ { "name": "K\u00fcttner, Ren\u00e9" }, + { + "name": "Perez-Guevara, Martin" + }, { "name": "Millman, Jarrod" }, + { + "name": "Lai, Jeff" + }, { "name": "Zhou, Dale" }, @@ -402,16 +408,16 @@ "affiliation": "The University of Sydney", "name": "Liu, Siqi" }, - { - "affiliation": "University of Pennsylvania", - "name": "Kahn, Ari E.", - "orcid": "0000-0002-2127-0507" - }, { "affiliation": "Leibniz Institute for Neurobiology", "name": "Stadler, J\u00f6rg", "orcid": "0000-0003-4313-129X" }, + { + "affiliation": "University of Pennsylvania", + "name": "Kahn, Ari E.", + "orcid": "0000-0002-2127-0507" + }, { "affiliation": "University College London", "name": "P\u00e9rez-Garc\u00eda, Fernando", @@ -503,15 +509,6 @@ "affiliation": "Boston University", "name": "Perkins, L. Nathan" }, - { - "name": "Marina, Ana" - }, - { - "name": "Mattfeld, Aaron" - }, - { - "name": "Noel, Maxime" - }, { "affiliation": "University of Amsterdam", "name": "Lukas Snoek", @@ -521,9 +518,18 @@ "affiliation": "Institute of Imaging & Computer Vision, RWTH Aachen University, Germany", "name": "Weninger, Leon" }, + { + "name": "Marina, Ana" + }, + { + "name": "Mattfeld, Aaron" + }, { "name": "Matsubara, K" }, + { + "name": "Noel, Maxime" + }, { "name": "Cheung, Brian" }, @@ -570,6 +576,9 @@ { "name": "Weinstein, Alejandro" }, + { + "name": "Tambini, Arielle" + }, { "affiliation": "Duke University", "name": "Broderick, William", @@ -611,9 +620,6 @@ { "name": "Tarbert, Claire" }, - { - "name": "Tambini, Arielle" - }, { "name": "Nickson, Thomas" }, @@ -679,9 +685,6 @@ "name": "Pellman, John", "orcid": "0000-0001-6810-4461" }, - { - "name": "Perez-Guevara, Martin" - }, { "name": "Khanuja, Ranjeet" }, diff --git a/tools/update_zenodo.py b/tools/update_zenodo.py index 163f88084d..5617772d75 100755 --- a/tools/update_zenodo.py +++ b/tools/update_zenodo.py @@ -46,7 +46,6 @@ {"affiliation": "Child Mind Institute / Nathan Kline Institute", "name": "Pellman, John", "orcid": "0000-0001-6810-4461"}, - {"name": "Perez-Guevara, Martin"}, {"name": "Khanuja, Ranjeet"}, {"affiliation": "Medical Imaging & Biomarkers, Bioclinica, Newark, CA, USA.", From 64455007be98bf6b544899bf78c769b8c7889227 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 18 Sep 2019 16:26:01 -0400 Subject: [PATCH 0421/1665] MNT: Simplify mailmap --- .mailmap | 357 +++++++++++++++++++++++++------------------------------ 1 file changed, 163 insertions(+), 194 deletions(-) diff --git a/.mailmap b/.mailmap index 9f2b618d78..2a1480a874 100644 --- a/.mailmap +++ b/.mailmap @@ -1,199 +1,168 @@ +Aimi Watanabe Aimi Watanabe stymy -Aimi Watanabe stymy -Alejandro Tabas qTabs -Alejandro Tabas qtabs -Alejandro de la Vega adelavega -Alejandro de la Vega delavega4 -Alexander Schaefer Alexander Schaefer -Alexander Schaefer alexschaefer83 -Alexander Schaefer aschaefer -Alexandre M. Savio Alexandre M. S -Alexandre M. Savio Alexandre M. S -Alexandre M. Savio Alexandre Manhaes Savio -Andrew Floren afloren -Andrey Chetverikov Andrey Chetverikov -Andrey Chetverikov achetverikov -Anibal Sólon Heinsfeld anibalsolon -Anisha Keshavan Anisha Keshavan -Anisha Keshavan akeshavan -Ariel Rokem Ariel Rokem -Ariel Rokem arokem -Ariel Rokem arokem -Arman Eshaghi armaneshaghi -Ashely Gillman Ashley Gillman -Ashely Gillman ashgillman -Basille Pinsard bpinsard -Basille Pinsard bpinsard -Ben Cipollini Ben Cipollini -Benjamin Acland Ben Acland -Benjamin Meyers <34044274+BenjaminMey@users.noreply.github.com> BenjaminMey <34044274+BenjaminMey@users.noreply.github.com> -Benjamin Yvernault Benjamin Yvernault -Benjamin Yvernault byvernault -Blake Dewey Blake Dewey -Blake Dewey blakedewey -Blake Dewey blakedewey -Brendan Moloney moloney -Caroline Froehlich carolFrohlich -Christopher J. Markiewicz Chris Markiewicz -Christopher J. Markiewicz Chris Markiewicz -Christopher J. Markiewicz Christopher J. Johnson -Christopher J. Markiewicz Christopher J. Markiewicz -Christopher J. Markiewicz Christopher J. Markiewicz -Cindee Madison cindeem -Cindee Madison cindeem <> -Colin Buchanan Colin Buchanan -Colin Buchanan colinbuchanan -Daniel Brenner brennerd -Daniel Brenner brennerd11 -Daniel Clark dclark87 -Daniel Geisler daniel-ge -Daniel Ginsburg danginsburg -Daniel McNamee danmc -David Ellis David Ellis -David Ellis David Ellis -David Mordom dmordom -David Welch David Welch -Dimitri Papadopoulos Orfanos Dimitri Papadopoulos -Dmytro belevtsoff -Dylan M. Nielson Dylan -Dylan M. Nielson Dylan Nielson -Eduard Ort eort -Elizabeth DuPre emdupre -Erik Ziegler Erik -Erik Ziegler Erik Ziegler -Erik Ziegler erik -Erik Ziegler erikz -Erik Ziegler swederik -Feilong Ma Ma Feilong -Fernando Pérez-García Fernando -Franz Liem fliem -Franz Liem fliem -Fred Loney FredLoney -Gael Varoquaux GaelVaroquaux -Gael Varoquaux GaelVaroquaux -Gavin Cooper gjcooper -Gilles de Hollander Gilles86 -Gio Piantoni Gio at UMCU -Guillaume Flandin Guillaume -Hans Johnson Hans Johnson -Hans Johnson hjmjohnson -Horea Christian Horea Christian -Isaac Schwabacher ischwabacher -Jakub Kaczmarzyk jakubk -Jakub Kaczmarzyk kaczmarj -James Kent jdkent -Janosch Linkersdörfer Januzz -Jason Wong Jason -Jason Wong Jason W -Jason Wong Jason W -Jason Wong jason -Jason Wong jason-wg -Jens Kleesiek JensNRAD -Jessica Forbes jessicaforbes -Jérémy Guillon GUILLON Jeremy -Joerg Stadler Joerg Stadler -Joerg Stadler Joerg Stadler -Joerg Stadler Jörg Stadler -John A. Lee john anthony lee -John A. Lee leej3 -Joke Durnez jokedurnez -Josh Warner JDWarner -Josh Warner Josh Warner (Mac) -Kai Schlamp medihack -Katherine Bottenhorn 62442katieb -Katherine Bottenhorn Katie Bottenhorn -Kesshi Jordan Kesshi Jordan -Kesshi Jordan Kesshi Jordan -Kesshi Jordan Kesshi Jordan -Kesshi Jordan Kesshi Jordan -Kesshi Jordan Kesshi Jordan -Kesshi Jordan Kesshi jordan -Kesshi Jordan kesshijordan -Kevin Sitek sitek -Kevin Sitek sitek -Kornelius Podranski Kornelius -Krzysztof J. Gorgolewski Chris Filo Gorgolewski -Krzysztof J. Gorgolewski Chris Filo Gorgolewski -Krzysztof J. Gorgolewski Chris Filo Gorgolewski -Krzysztof J. Gorgolewski Chris Filo Gorgolewski -Krzysztof J. Gorgolewski Chris Gorgolewski -Krzysztof J. Gorgolewski Krzysztof Gorgolewski -Krzysztof J. Gorgolewski filo -Krzysztof J. Gorgolewski filo -Krzysztof J. Gorgolewski filo -Krzysztof J. Gorgolewski filo -Kshitij Chawla Kshitij Chawla (kchawla-pi) -Leonie Lampe Leonie Lmape -Lukas Snoek Lukas Snoek -Marcel Falkiewicz Marcel Falkiewicz +Alejandro Tabas +Alejandro Tabas +Alejandro de la Vega +Alejandro de la Vega +Alexander Schaefer +Alexander Schaefer +Alexander Schaefer +Alexandre M. Savio +Alexandre M. Savio +Andrew Floren +Andrey Chetverikov +Andrey Chetverikov +Anibal Sólon Heinsfeld +Anisha Keshavan +Anisha Keshavan +Ariel Rokem +Ariel Rokem +Arman Eshaghi +Ashely Gillman +Basille Pinsard +Basille Pinsard +Ben Cipollini +Benjamin Acland +Benjamin Meyers <34044274+BenjaminMey@users.noreply.github.com> +Benjamin Yvernault +Benjamin Yvernault +Blake Dewey +Blake Dewey +Blake Dewey +Brendan Moloney +Caroline Froehlich +Christopher J. Markiewicz +Christopher J. Markiewicz +Christopher J. Markiewicz +Cindee Madison +Colin Buchanan +Colin Buchanan +Daniel Brenner +Daniel Clark +Daniel Geisler +Daniel Ginsburg +Daniel McNamee +David Ellis +David Ellis +David Mordom +David Welch +Dimitri Papadopoulos Orfanos +Dmytro Belevtsoff +Dylan M. Nielson +Dylan M. Nielson +Eduard Ort +Elizabeth DuPre +Erik Ziegler +Erik Ziegler +Erik Ziegler +Feilong Ma +Fernando Pérez-García +Franz Liem +Franz Liem +Fred Loney +Gael Varoquaux +Gavin Cooper +Gilles de Hollander +Gio Piantoni +Guillaume Flandin +Hans Johnson +Horea Christian +Isaac Schwabacher +Jakub Kaczmarzyk +James Kent +Janosch Linkersdörfer +Jason Wong +Jason Wong +Jens Kleesiek +Jessica Forbes +Jérémy Guillon +Joerg Stadler +Joerg Stadler +Joerg Stadler +John A. Lee +Joke Durnez +Josh Warner +Kai Schlamp +Katherine Bottenhorn +Kesshi Jordan +Kesshi Jordan +Kesshi Jordan +Kesshi Jordan +Kesshi Jordan +Kesshi Jordan +Kevin Sitek +Kevin Sitek +Kornelius Podranski +Krzysztof J. Gorgolewski +Krzysztof J. Gorgolewski +Krzysztof J. Gorgolewski +Krzysztof J. Gorgolewski +Krzysztof J. Gorgolewski +Krzysztof J. Gorgolewski +Krzysztof J. Gorgolewski +Kshitij Chawla +Leonie Lampe +Lukas Snoek +Marcel Falkiewicz Martin Perez-Guevara -Mathias Goncalves Mathias Goncalves -Mathias Goncalves mathiasg -Mathieu Dubois Mathieu Dubois -Mathieu Dubois duboism -Matteo Mancini matteomancini -Matteo Visconti di Oleggio Castello Matteo Visconti dOC -Matteo Visconti di Oleggio Castello Matteo Visconti dOC -Matteo Visconti di Oleggio Castello mvdoc -Matthew Cieslak Matt Cieslak -Michael Clark Clark -Michael Dayan Michael -Michael Dayan Michael +Mathias Goncalves +Mathieu Dubois +Mathieu Dubois +Matteo Mancini +Matteo Visconti di Oleggio Castello +Matteo Visconti di Oleggio Castello +Matthew Cieslak +Michael Clark +Michael Dayan +Michael Dayan Michael Dayan mick-d -Michael Dayan Michael -Michael Joseph mjoseph -Michael Joseph Michael Joseph -Michael Philipp Notter Michael Notter -Michael Philipp Notter miykael -Michael Waskom Michael Waskom -Michael Waskom Michael Waskom -Michael Waskom Michael Waskom -Michael Waskom mwaskom -Michael Waskom mwaskom -Michael Waskom mwaskom -Miguel Molina-Romero Miguel Molina -Murat Bilgel Murat Bilgel -Oliver Contier oliver-contier +Michael Dayan +Michael Joseph +Michael Philipp Notter +Michael Philipp Notter +Michael Waskom +Michael Waskom +Michael Waskom +Miguel Molina-Romero +Murat Bilgel +Oliver Contier Olivia Stanley -Oscar Esteban Oscar Esteban -Oscar Esteban oesteban -Pablo Polosecki pipolose -Pablo Polosecki pipolose -Paul Sharp psharp1289 -Ranjit Khanuja RanjitK -Rastko Ćirić rciric -Rastko Ćirić Rastko Ćirić -Rastko Ćirić rciric -Ross Markello Ross Markello -Russell Poldrack Russ Poldrack -Russell Poldrack poldrack -Salma Bougacha Salma BOUGACHA -Salma Bougacha salma -Salma Bougacha salma1601 -Sami Kristian Andberg Sami Andberg -Satrajit Ghosh Satrajit Ghosh -Sebastian Urchs sebastian -Serge Koudoro skoudoro -Sharad Sikka ssikka -Shariq Iqbal shariqiqbal2810 -Shariq Iqbal shariqiqbal2810 +Oscar Esteban +Oscar Esteban +Pablo Polosecki +Pablo Polosecki +Paul Sharp +Ranjit Khanuja +Rastko Ćirić +Rastko Ćirić +Rastko Ćirić +Ross Markello +Russell Poldrack +Russell Poldrack +Salma Bougacha +Sami Kristian Andberg +Satrajit Ghosh +Sebastian Urchs +Serge Koudoro +Sharad Sikka +Shariq Iqbal +Shariq Iqbal Shoshana Berleant Shoshana Berleant -Shoshana Berleant Shoshana Berleant -Simon Rothmei Simon R -Simon Rothmei Simon Rothmeier -Siqi Liu siqi liu -Siqi Liu sql -Steven Giavasis Steven Giavasis -Steven Giavasis sgiavasis -Steven Giavasis sgiavasis -Steven Tilley Steven Tilley -Tristan Glatard Tristan Glatard -Victor Férat Victor -Victor Férat Victor Ferat -Victor Férat Victor Férat -Victor Saase vsaase -Weijie Huang forwho -William Triplett William Triplett -Wolfgang Pauli Wolfgang Pauli -Xiangzhen Kong bnucon -Yaroslav Halchenko Yaroslav Halchenko +Shoshana Berleant +Simon Rothmei +Simon Rothmei +Siqi Liu +Steven Giavasis +Steven Giavasis +Steven Giavasis +Steven Tilley +Tristan Glatard +Victor Férat +Victor Férat +Victor Férat +Victor Saase +Weijie Huang +William Triplett +Wolfgang Pauli +Xiangzhen Kong +Yaroslav Halchenko From dca1a0381fe67aef0433e1a75d5fdff6ffa55154 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 18 Sep 2019 21:28:03 -0400 Subject: [PATCH 0422/1665] MNT: Version 1.2.3 --- doc/conf.py | 2 +- nipype/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index bdf938a70c..10ac3c36db 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -82,7 +82,7 @@ # The short X.Y version. version = nipype.__version__ # The full version, including alpha/beta/rc tags. -release = "1.2.2" +release = "1.2.3" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/nipype/info.py b/nipype/info.py index deec53aee5..dcdbfa593e 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -9,7 +9,7 @@ # nipype version information # Remove -dev for release -__version__ = '1.2.3-dev' +__version__ = '1.2.3' def get_nipype_gitversion(): From bf0a965368e66fec387643a3a245c0ce3cf62e21 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 18 Sep 2019 21:34:28 -0400 Subject: [PATCH 0423/1665] MNT: Request rerender on initial feedstock update --- tools/feedstock.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/feedstock.sh b/tools/feedstock.sh index b34aae2c8b..b37dba8671 100755 --- a/tools/feedstock.sh +++ b/tools/feedstock.sh @@ -108,6 +108,10 @@ Updating feedstock to release branch * URL = https://github.com/$SRCREPO/archive/$REF.tar.gz * SHA256 = \`$SHA256\` + +--- + +@conda-forge-admin, please rerender. END fi From bdef3a33d17614c5afaec4db28c5c1e1875e1620 Mon Sep 17 00:00:00 2001 From: AKSoo Date: Thu, 19 Sep 2019 13:22:17 +0900 Subject: [PATCH 0424/1665] SelectFiles docstring corrected --- nipype/interfaces/io.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index c409d4e78a..f3b2f7e267 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -1296,13 +1296,13 @@ class SelectFilesInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): class SelectFiles(IOBase): """Flexibly collect data from disk to feed into workflows. - This interface uses the {}-based string formatting syntax to plug + This interface uses Python's {}-based string formatting syntax to plug values (possibly known only at workflow execution time) into string - templates and collect files from persistant storage. These templates - can also be combined with glob wildcards. The field names in the - formatting template (i.e. the terms in braces) will become inputs - fields on the interface, and the keys in the templates dictionary - will form the output fields. + templates and collect files from persistant storage. These templates can + also be combined with glob wildcards (*, ?) and character ranges ([...]). + The field names in the formatting template (i.e. the terms in braces) will + become inputs fields on the interface, and the keys in the templates + dictionary will form the output fields. Examples -------- @@ -1310,18 +1310,22 @@ class SelectFiles(IOBase): >>> import pprint >>> from nipype import SelectFiles, Node >>> templates={"T1": "{subject_id}/struct/T1.nii", - ... "epi": "{subject_id}/func/f[0, 1].nii"} + ... "epi": "{subject_id}/func/f[0,1].nii"} >>> dg = Node(SelectFiles(templates), "selectfiles") >>> dg.inputs.subject_id = "subj1" >>> pprint.pprint(dg.outputs.get()) # doctest: {'T1': , 'epi': } - The same thing with dynamic grabbing of specific files: + Note that SelectFiles does not support lists as inputs for the dynamic + fields. Attempts to do so may lead to unexpected results because brackets + also express glob character ranges. For example, - >>> templates["epi"] = "{subject_id}/func/f{run!s}.nii" + >>> templates["epi"] = "{subject_id}/func/f{run}.nii" >>> dg = Node(SelectFiles(templates), "selectfiles") >>> dg.inputs.subject_id = "subj1" - >>> dg.inputs.run = [2, 4] + >>> dg.inputs.run = [10, 11] + + would match f0.nii or f1.nii, not f10.nii or f11.nii. """ input_spec = SelectFilesInputSpec From c15bd757c2c37090f5f3e4343d7a0891952799e0 Mon Sep 17 00:00:00 2001 From: Sin Kim Date: Thu, 19 Sep 2019 12:25:21 +0900 Subject: [PATCH 0425/1665] MNT: Add Kim, Sin to .zenodo.json --- .mailmap | 1 + .zenodo.json | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/.mailmap b/.mailmap index 2a1480a874..380d937cb2 100644 --- a/.mailmap +++ b/.mailmap @@ -92,6 +92,7 @@ Kesshi Jordan Kevin Sitek Kevin Sitek +Kim, Sin Kornelius Podranski Krzysztof J. Gorgolewski Krzysztof J. Gorgolewski diff --git a/.zenodo.json b/.zenodo.json index 668974fd2d..259b53e926 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -408,6 +408,11 @@ "affiliation": "The University of Sydney", "name": "Liu, Siqi" }, + { + "affiliation": "Korea Advanced Institute of Science and Technology", + "name": "Kim, Sin", + "orcid": "0000-0003-4652-3758" + }, { "affiliation": "Leibniz Institute for Neurobiology", "name": "Stadler, J\u00f6rg", From f930a40762b8e907e3dd5244e254e77c1798bb86 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 19 Sep 2019 10:59:16 -0400 Subject: [PATCH 0426/1665] MNT: Add Junhao WEN to .zenodo.json --- .mailmap | 1 + .zenodo.json | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/.mailmap b/.mailmap index 380d937cb2..ced66b4099 100644 --- a/.mailmap +++ b/.mailmap @@ -82,6 +82,7 @@ Joerg Stadler John A. Lee Joke Durnez Josh Warner +Junhao WEN Kai Schlamp Katherine Bottenhorn Kesshi Jordan diff --git a/.zenodo.json b/.zenodo.json index 259b53e926..fba3546a77 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -535,6 +535,11 @@ { "name": "Noel, Maxime" }, + { + "affiliation": "University of Pennsylvania", + "name": "Junhao WEN", + "orcid": "0000-0003-2077-3070" + }, { "name": "Cheung, Brian" }, From d554c43eef36b8b811fa47026b0c07fc6f4f1edb Mon Sep 17 00:00:00 2001 From: leej3 Date: Tue, 17 Sep 2019 11:54:10 -0400 Subject: [PATCH 0427/1665] add goforit to Remlfit --- nipype/interfaces/afni/model.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nipype/interfaces/afni/model.py b/nipype/interfaces/afni/model.py index 295a01ce0c..063ef67daf 100644 --- a/nipype/interfaces/afni/model.py +++ b/nipype/interfaces/afni/model.py @@ -541,6 +541,10 @@ class RemlfitInputSpec(AFNICommandInputSpec): desc='turns on more progress messages, including memory usage ' 'progress reports at various stages', argstr='-verb') + goforit = traits.Bool( + desc='With potential issues flagged in the design matrix, an attempt' + 'will nevertheless be made to fit the model', + argstr='-GOFORIT') ovar = File( desc='dataset for OLSQ st.dev. parameter (kind of boring)', argstr='-Ovar %s') From fbe8b5da3ebdf8b1ccb25fb4c2949a966e5de749 Mon Sep 17 00:00:00 2001 From: john lee Date: Thu, 19 Sep 2019 13:47:43 -0400 Subject: [PATCH 0428/1665] fix formatting Co-Authored-By: Chris Markiewicz --- nipype/interfaces/afni/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/afni/model.py b/nipype/interfaces/afni/model.py index 063ef67daf..96afbe7320 100644 --- a/nipype/interfaces/afni/model.py +++ b/nipype/interfaces/afni/model.py @@ -542,7 +542,7 @@ class RemlfitInputSpec(AFNICommandInputSpec): 'progress reports at various stages', argstr='-verb') goforit = traits.Bool( - desc='With potential issues flagged in the design matrix, an attempt' + desc='With potential issues flagged in the design matrix, an attempt ' 'will nevertheless be made to fit the model', argstr='-GOFORIT') ovar = File( From eb12503fea3f38a06e14c108522c80e61109436e Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 20 Sep 2019 12:12:47 -0700 Subject: [PATCH 0429/1665] MAINT: Revision of Zenodo update script Unlocks first position as per private conversation with @chrisgorgo --- tools/update_zenodo.py | 109 +++++++++++++++++++++-------------------- 1 file changed, 56 insertions(+), 53 deletions(-) diff --git a/tools/update_zenodo.py b/tools/update_zenodo.py index 163f88084d..7600c0b94d 100755 --- a/tools/update_zenodo.py +++ b/tools/update_zenodo.py @@ -1,46 +1,15 @@ #!/usr/bin/env python3 +"""Update and sort the creators list of the zenodo record.""" +import sys +import shutil +from pathlib import Path import json from fuzzywuzzy import fuzz, process -import shutil -import os import subprocess as sp -if os.path.exists('line-contributions.txt'): - with open('line-contributions.txt', 'rt') as fp: - lines = fp.readlines() -else: - if shutil.which('git-line-summary'): - print("Running git-line-summary on nipype repo") - lines = sp.check_output(['git-line-summary']).decode().split('\n') - else: - raise RuntimeError("Install Git Extras to view git contributors") - -data = [' '.join(line.strip().split()[1:-1]) for line in lines if '%' in line] - -# load zenodo from master -with open('.zenodo.json', 'rt') as fp: - zenodo = json.load(fp) -zen_names = [' '.join(val['name'].split(',')[::-1]).strip() - for val in zenodo['creators']] - -name_matches = [] - -for ele in data: - matches = process.extract(ele, zen_names, scorer=fuzz.token_sort_ratio, - limit=2) - # matches is a list [('First match', % Match), ('Second match', % Match)] - if matches[0][1] > 80: - val = zenodo['creators'][zen_names.index(matches[0][0])] - else: - # skip unmatched names - print("No entry to sort:", ele) - continue - - if val not in name_matches: - name_matches.append(val) - +CREATORS_LAST_ORCID = '0000-0002-5312-6729' # This ORCID should go last # for entries not found in line-contributions -missing_entries = [ +MISSING_ENTRIES = [ {"name": "Varada, Jan"}, {"name": "Schwabacher, Isaac"}, {"affiliation": "Child Mind Institute / Nathan Kline Institute", @@ -61,31 +30,65 @@ {"name": "Lai, Jeff"} ] -for entry in missing_entries: - name_matches.append(entry) - def fix_position(creators): + """Place Satra last.""" # position first / last authors - f_authr = None l_authr = None - for i, info in enumerate(creators): - if info['name'] == 'Gorgolewski, Krzysztof J.': - f_authr = i - if info['name'] == 'Ghosh, Satrajit': - l_authr = i + for info in creators: + if 'orcid' in info and info['orcid'] == CREATORS_LAST_ORCID: + l_authr = info - if f_authr is None or l_authr is None: + if l_authr is None: raise AttributeError('Missing important people') - creators.insert(0, creators.pop(f_authr)) - creators.insert(len(creators), creators.pop(l_authr + 1)) + creators.remove(l_authr) + creators.append(l_authr) return creators -zenodo['creators'] = fix_position(name_matches) +if __name__ == '__main__': + contrib_file = Path('line-contributors.txt') + lines = [] + if contrib_file.exists(): + print('WARNING: Reusing existing line-contributors.txt file.', file=sys.stderr) + lines = contrib_file.read_text().splitlines() -with open('.zenodo.json', 'wt') as fp: - json.dump(zenodo, fp, indent=2, sort_keys=True) - fp.write('\n') + if not lines and shutil.which('git-line-summary'): + print("Running git-line-summary on nipype repo") + lines = sp.check_output(['git-line-summary']).decode().splitlines() + contrib_file.write_text('\n'.join(lines)) + + if not lines: + raise RuntimeError('Could not find line-contributors from git repository ' + '(hint: please install git-extras).') + + data = [' '.join(line.strip().split()[1:-1]) for line in lines if '%' in line] + + # load zenodo from master + zenodo_file = Path('.zenodo.json') + zenodo = json.loads(zenodo_file.read_text()) + zen_names = [' '.join(val['name'].split(',')[::-1]).strip() + for val in zenodo['creators']] + + name_matches = [] + for ele in data: + matches = process.extract(ele, zen_names, scorer=fuzz.token_sort_ratio, + limit=2) + # matches is a list [('First match', % Match), ('Second match', % Match)] + if matches[0][1] > 80: + val = zenodo['creators'][zen_names.index(matches[0][0])] + else: + # skip unmatched names + print("No entry to sort:", ele) + continue + + if val not in name_matches: + name_matches.append(val) + + for entry in MISSING_ENTRIES: + name_matches.append(entry) + + zenodo['creators'] = fix_position(name_matches) + zenodo_file.write_text(json.dumps(zenodo, indent=2, sort_keys=True)) From eeeb6509b768bdabaafdb6b61d77b0284dc8c09a Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 20 Sep 2019 12:55:31 -0700 Subject: [PATCH 0430/1665] fix: pin Chris G. to second-last author of the creators list [skip ci] --- tools/update_zenodo.py | 52 ++++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 27 deletions(-) diff --git a/tools/update_zenodo.py b/tools/update_zenodo.py index 7600c0b94d..2784044b4e 100755 --- a/tools/update_zenodo.py +++ b/tools/update_zenodo.py @@ -7,7 +7,8 @@ from fuzzywuzzy import fuzz, process import subprocess as sp -CREATORS_LAST_ORCID = '0000-0002-5312-6729' # This ORCID should go last +# These ORCIDs should go last +CREATORS_LAST = ['Gorgolewski, Krzysztof J.', 'Ghosh, Satrajit'] # for entries not found in line-contributions MISSING_ENTRIES = [ {"name": "Varada, Jan"}, @@ -30,24 +31,6 @@ {"name": "Lai, Jeff"} ] - -def fix_position(creators): - """Place Satra last.""" - # position first / last authors - l_authr = None - - for info in creators: - if 'orcid' in info and info['orcid'] == CREATORS_LAST_ORCID: - l_authr = info - - if l_authr is None: - raise AttributeError('Missing important people') - - creators.remove(l_authr) - creators.append(l_authr) - return creators - - if __name__ == '__main__': contrib_file = Path('line-contributors.txt') lines = [] @@ -55,14 +38,16 @@ def fix_position(creators): print('WARNING: Reusing existing line-contributors.txt file.', file=sys.stderr) lines = contrib_file.read_text().splitlines() - if not lines and shutil.which('git-line-summary'): + git_line_summary_path = shutil.which('git-line-summary') + if not lines and git_line_summary_path: print("Running git-line-summary on nipype repo") - lines = sp.check_output(['git-line-summary']).decode().splitlines() + lines = sp.check_output([git_line_summary_path]).decode().splitlines() contrib_file.write_text('\n'.join(lines)) if not lines: - raise RuntimeError('Could not find line-contributors from git repository ' - '(hint: please install git-extras).') + raise RuntimeError("""\ +Could not find line-contributors from git repository.%s""" % """ \ +git-line-summary not found, please install git-extras. """ * (git_line_summary_path is None)) data = [' '.join(line.strip().split()[1:-1]) for line in lines if '%' in line] @@ -71,8 +56,10 @@ def fix_position(creators): zenodo = json.loads(zenodo_file.read_text()) zen_names = [' '.join(val['name'].split(',')[::-1]).strip() for val in zenodo['creators']] + total_names = len(zen_names) + len(MISSING_ENTRIES) name_matches = [] + position = 1 for ele in data: matches = process.extract(ele, zen_names, scorer=fuzz.token_sort_ratio, limit=2) @@ -85,10 +72,21 @@ def fix_position(creators): continue if val not in name_matches: + if val['name'] not in CREATORS_LAST: + val['position'] = position + position += 1 + else: + val['position'] = total_names + CREATORS_LAST.index(val['name']) name_matches.append(val) - for entry in MISSING_ENTRIES: - name_matches.append(entry) + for missing in MISSING_ENTRIES: + missing['position'] = position + position += 1 + name_matches.append(missing) + + zenodo['creators'] = sorted(name_matches, key=lambda k: k['position']) + # Remove position + for creator in zenodo['creators']: + del creator['position'] - zenodo['creators'] = fix_position(name_matches) - zenodo_file.write_text(json.dumps(zenodo, indent=2, sort_keys=True)) + zenodo_file.write_text('%s\n' % json.dumps(zenodo, indent=2, sort_keys=True)) From dd38a83f2b6542161d1f250e0ced975fa382f326 Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 20 Sep 2019 15:25:00 -0700 Subject: [PATCH 0431/1665] rel(1.2.3): update ``.zenodo.json`` testing the new update script closes #3043 --- .zenodo.json | 63 +++++++++++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 33 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index fba3546a77..ac9972984b 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -1,10 +1,5 @@ { "creators": [ - { - "affiliation": "Department of Psychology, Stanford University", - "name": "Gorgolewski, Krzysztof J.", - "orcid": "0000-0003-3321-7583" - }, { "affiliation": "Department of Psychology, Stanford University", "name": "Esteban, Oscar", @@ -78,10 +73,6 @@ "affiliation": "Shattuck Lab, UCLA Brain Mapping Center", "name": "Wong, Jason" }, - { - "affiliation": "Concordia University", - "name": "Benderoff, Erin" - }, { "name": "Modat, Marc" }, @@ -150,6 +141,10 @@ { "name": "Berleant, Shoshana" }, + { + "affiliation": "Concordia University", + "name": "Benderoff, Erin" + }, { "affiliation": "Dartmouth College: Hanover, NH, United States", "name": "Halchenko, Yaroslav O.", @@ -352,14 +347,14 @@ "affiliation": "UniversityHospital Heidelberg, Germany", "name": "Kleesiek, Jens" }, - { - "name": "Erickson, Drew" - }, { "affiliation": "NIMH IRP", "name": "Lee, John A.", "orcid": "0000-0001-5884-4247" }, + { + "name": "Erickson, Drew" + }, { "affiliation": "Child Mind Institute", "name": "Giavasis, Steven" @@ -384,9 +379,6 @@ { "name": "Millman, Jarrod" }, - { - "name": "Lai, Jeff" - }, { "name": "Zhou, Dale" }, @@ -409,9 +401,9 @@ "name": "Liu, Siqi" }, { - "affiliation": "Korea Advanced Institute of Science and Technology", - "name": "Kim, Sin", - "orcid": "0000-0003-4652-3758" + "affiliation": "University of Pennsylvania", + "name": "Kahn, Ari E.", + "orcid": "0000-0002-2127-0507" }, { "affiliation": "Leibniz Institute for Neurobiology", @@ -419,9 +411,9 @@ "orcid": "0000-0003-4313-129X" }, { - "affiliation": "University of Pennsylvania", - "name": "Kahn, Ari E.", - "orcid": "0000-0002-2127-0507" + "affiliation": "Korea Advanced Institute of Science and Technology", + "name": "Kim, Sin", + "orcid": "0000-0003-4652-3758" }, { "affiliation": "University College London", @@ -515,25 +507,25 @@ "name": "Perkins, L. Nathan" }, { - "affiliation": "University of Amsterdam", - "name": "Lukas Snoek", - "orcid": "0000-0001-8972-204X" + "name": "Marina, Ana" }, { - "affiliation": "Institute of Imaging & Computer Vision, RWTH Aachen University, Germany", - "name": "Weninger, Leon" + "name": "Mattfeld, Aaron" }, { - "name": "Marina, Ana" + "name": "Noel, Maxime" }, { - "name": "Mattfeld, Aaron" + "affiliation": "University of Amsterdam", + "name": "Lukas Snoek", + "orcid": "0000-0001-8972-204X" }, { - "name": "Matsubara, K" + "affiliation": "Institute of Imaging & Computer Vision, RWTH Aachen University, Germany", + "name": "Weninger, Leon" }, { - "name": "Noel, Maxime" + "name": "Matsubara, K" }, { "affiliation": "University of Pennsylvania", @@ -586,9 +578,6 @@ { "name": "Weinstein, Alejandro" }, - { - "name": "Tambini, Arielle" - }, { "affiliation": "Duke University", "name": "Broderick, William", @@ -630,6 +619,9 @@ { "name": "Tarbert, Claire" }, + { + "name": "Tambini, Arielle" + }, { "name": "Nickson, Thomas" }, @@ -714,6 +706,11 @@ { "name": "Lai, Jeff" }, + { + "affiliation": "Department of Psychology, Stanford University", + "name": "Gorgolewski, Krzysztof J.", + "orcid": "0000-0003-3321-7583" + }, { "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", From 56a033548b833dc5fbe831562f966580a3f677c5 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 23 Sep 2019 13:43:15 -0400 Subject: [PATCH 0432/1665] MAINT: Bump dev version --- doc/documentation.rst | 2 +- nipype/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/documentation.rst b/doc/documentation.rst index b285811531..70b55d9a78 100644 --- a/doc/documentation.rst +++ b/doc/documentation.rst @@ -9,7 +9,7 @@ Documentation :Release: |version| :Date: |today| -Previous versions: `1.2.2 `_ `1.2.1 `_ +Previous versions: `1.2.3 `_ `1.2.2 `_ .. container:: doc2 diff --git a/nipype/info.py b/nipype/info.py index e3bd9aa2df..257031c38f 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -9,7 +9,7 @@ # nipype version information # Remove -dev for release -__version__ = '1.2.3' +__version__ = '1.3.0-dev' def get_nipype_gitversion(): From 43183fbae90bf22fbc76083c2b4a2334ef05f113 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Tue, 24 Sep 2019 14:15:34 -0400 Subject: [PATCH 0433/1665] enh: update to provide bad version info to users --- .et | 3 +++ nipype/__init__.py | 9 +++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 .et diff --git a/.et b/.et new file mode 100644 index 0000000000..96ed9287ea --- /dev/null +++ b/.et @@ -0,0 +1,3 @@ +{ "bad_versions" : [ "1.2.1", + "1.2.3"] +} \ No newline at end of file diff --git a/nipype/__init__.py b/nipype/__init__.py index 8d135bf061..ba0d708a93 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -63,12 +63,17 @@ def get_info(): if config.getboolean('execution', 'check_version'): import etelemetry - latest = {"version": 'Unknown'} + latest = {"version": 'Unknown', "bad_versions": []} try: - latest = etelemetry.get_project("nipy/nipype") + latest.update(**etelemetry.get_project("nipy/nipype")) except Exception as e: logger.warning("Could not check for version updates: \n%s", e) finally: logger.info(INIT_MSG(packname='nipype', version=__version__, latest=latest["version"])) + if latest["bad_versions"] and \ + any([LooseVersion(__version__) == LooseVersion(ver) + for ver in latest["bad_versions"]): + logger.critical(('You are using a version of Nipype with a critical ' + 'bug. Please use a different version.')) From ae2dddf21b4713888f2687097c6373935e391060 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Tue, 24 Sep 2019 15:04:40 -0400 Subject: [PATCH 0434/1665] use result to determine output --- nipype/__init__.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index ba0d708a93..091d8ec33c 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -64,16 +64,19 @@ def get_info(): import etelemetry latest = {"version": 'Unknown', "bad_versions": []} + result = None try: - latest.update(**etelemetry.get_project("nipy/nipype")) + result = etelemetry.get_project("nipy/nipype") except Exception as e: logger.warning("Could not check for version updates: \n%s", e) finally: - logger.info(INIT_MSG(packname='nipype', - version=__version__, - latest=latest["version"])) - if latest["bad_versions"] and \ - any([LooseVersion(__version__) == LooseVersion(ver) - for ver in latest["bad_versions"]): - logger.critical(('You are using a version of Nipype with a critical ' - 'bug. Please use a different version.')) + if result: + latest.update(**result) + logger.info(INIT_MSG(packname='nipype', + version=__version__, + latest=latest["version"])) + if latest["bad_versions"] and \ + any([LooseVersion(__version__) == LooseVersion(ver) + for ver in latest["bad_versions"]): + logger.critical(('You are using a version of Nipype with a critical ' + 'bug. Please use a different version.')) From d8411c1b977a93f5b5488914d5ecf64fc1289123 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Tue, 24 Sep 2019 15:19:14 -0400 Subject: [PATCH 0435/1665] enh: only display if versions are different --- nipype/__init__.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index 091d8ec33c..e0ab50477a 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -72,9 +72,10 @@ def get_info(): finally: if result: latest.update(**result) - logger.info(INIT_MSG(packname='nipype', - version=__version__, - latest=latest["version"])) + if LooseVersion(__version__) != LooseVersion(latest["version"]): + logger.info(INIT_MSG(packname='nipype', + version=__version__, + latest=latest["version"])) if latest["bad_versions"] and \ any([LooseVersion(__version__) == LooseVersion(ver) for ver in latest["bad_versions"]): From 861e52c24aa370997e9d9498dcee8dbabb60e35a Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Tue, 24 Sep 2019 15:24:45 -0400 Subject: [PATCH 0436/1665] fix: syntax error --- nipype/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index e0ab50477a..47895518c4 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -78,6 +78,6 @@ def get_info(): latest=latest["version"])) if latest["bad_versions"] and \ any([LooseVersion(__version__) == LooseVersion(ver) - for ver in latest["bad_versions"]): + for ver in latest["bad_versions"]]): logger.critical(('You are using a version of Nipype with a critical ' 'bug. Please use a different version.')) From 72964f450a2a98fdb8cade6b9ded47a017f62495 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Tue, 24 Sep 2019 16:14:12 -0400 Subject: [PATCH 0437/1665] ref: make version checking it's own function --- nipype/__init__.py | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index 47895518c4..419f1701b8 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -22,9 +22,7 @@ config = NipypeConfig() logging = Logging(config) -logger = logging.getLogger('nipype.utils') -INIT_MSG = "Running {packname} version {version} (latest: {latest})".format class NipypeTester(object): def __call__(self, doctests=True, parallel=False): @@ -60,8 +58,18 @@ def get_info(): Rename, Function, Select, Merge) -if config.getboolean('execution', 'check_version'): +def check_version(raise_exception=False): + """Check for the latest version of the library + + parameters: + raise_exception: boolean + Raise a RuntimeError if a bad version is being used + """ + import etelemetry + logger = logging.getLogger('nipype.utils') + + INIT_MSG = "Running {packname} version {version} (latest: {latest})".format latest = {"version": 'Unknown', "bad_versions": []} result = None @@ -77,7 +85,15 @@ def get_info(): version=__version__, latest=latest["version"])) if latest["bad_versions"] and \ - any([LooseVersion(__version__) == LooseVersion(ver) - for ver in latest["bad_versions"]]): - logger.critical(('You are using a version of Nipype with a critical ' - 'bug. Please use a different version.')) + any([LooseVersion(__version__) == LooseVersion(ver) + for ver in latest["bad_versions"]]): + message = ('You are using a version of Nipype with a critical ' + 'bug. Please use a different version.') + if raise_exception: + raise RuntimeError(message) + else: + logger.critical(message) + + +if config.getboolean('execution', 'check_version'): + check_version() From 2328bebf17473df1078734acb14ab0663d7578c9 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 24 Sep 2019 16:25:02 -0400 Subject: [PATCH 0438/1665] RF: Provide functions to augment old Path.mkdir, Path.resolve methods --- nipype/interfaces/base/traits_extension.py | 4 +- nipype/pipeline/engine/utils.py | 3 +- nipype/utils/filemanip.py | 47 ++++++---------------- nipype/utils/tests/test_filemanip.py | 29 ++++++++----- 4 files changed, 35 insertions(+), 48 deletions(-) diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index 27c4103a74..17a637546d 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -33,7 +33,7 @@ from traits.api import Unicode from future import standard_library -from ...utils.filemanip import Path, USING_PATHLIB2 +from ...utils.filemanip import Path, USING_PATHLIB2, path_resolve if USING_PATHLIB2: from future.types.newstr import newstr @@ -147,7 +147,7 @@ def validate(self, objekt, name, value, return_pathlike=False): self.error(objekt, name, str(value)) if self.resolve: - value = value.resolve(strict=self.exists) + value = path_resolve(value, strict=self.exists) if not return_pathlike: value = str(value) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 0df39e2a5a..fab1fb14de 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -25,6 +25,7 @@ from ... import logging, config, LooseVersion from ...utils.filemanip import ( Path, + path_mkdir, indirectory, relpath, makedirs, @@ -123,7 +124,7 @@ def write_node_report(node, result=None, is_mapnode=False): cwd = node.output_dir() report_file = Path(cwd) / '_report' / 'report.rst' - report_file.parent.mkdir(exist_ok=True, parents=True) + path_mkdir(report_file.parent, exist_ok=True, parents=True) lines = [ write_rst_header('Node: %s' % get_print_name(node), level=0), diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 1cdd1e9676..b8534c42ab 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -59,34 +59,26 @@ def __init__(self, path): from pathlib2 import Path USING_PATHLIB2 = True -try: # PY35 - strict mode was added in 3.6 - Path('/invented/file/path').resolve(strict=True) -except TypeError: - def _patch_resolve(self, strict=False): - """Add the argument strict to signature in Python>3,<3.6.""" - resolved = Path().old_resolve() / self +def path_resolve(path, strict=False): + try: + return path.resolve(strict=strict) + except TypeError: # PY35 + resolved = path.resolve() if strict and not resolved.exists(): raise FileNotFoundError(resolved) return resolved - Path.old_resolve = Path.resolve - Path.resolve = _patch_resolve -except FileNotFoundError: - pass -except OSError: - # PY2 - def _patch_resolve(self, strict=False): - """Raise FileNotFoundError instead of OSError with pathlib2.""" - try: - resolved = self.old_resolve(strict=strict) - except OSError: - raise FileNotFoundError(self.old_resolve()) - return resolved +def path_mkdir(path, mode=0o777, parents=False, exist_ok=False): + try: + return path.mkdir(mode=mode, parents=parents, exist_ok=exist_ok) + except TypeError: # PY27/PY34 + if parents: + return makedirs(str(path), mode=mode, exist_ok=exist_ok) + elif not exist_ok or not path.exists(): + return os.mkdir(str(path), mode=mode) - Path.old_resolve = Path.resolve - Path.resolve = _patch_resolve if not hasattr(Path, 'write_text'): # PY34 - Path does not have write_text @@ -95,19 +87,6 @@ def _write_text(self, text): f.write(text) Path.write_text = _write_text -try: # PY27/PY34 - mkdir does not have exist_ok - from .tmpdirs import TemporaryDirectory - with TemporaryDirectory() as tmpdir: - (Path(tmpdir) / 'exist_ok_test').mkdir(exist_ok=True) -except TypeError: - def _mkdir(self, mode=0o777, parents=False, exist_ok=False): - if parents: - makedirs(str(self), mode=mode, exist_ok=exist_ok) - elif not exist_ok or not self.exists(): - os.mkdir(str(self), mode=mode) - - Path.mkdir = _mkdir - def split_filename(fname): """Split a filename into parts: path, base filename and extension. diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index 9ee2e4c0ba..687d7b284e 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -16,7 +16,8 @@ check_forhash, _parse_mount_table, _cifs_table, on_cifs, copyfile, copyfiles, ensure_list, simplify_list, check_depends, split_filename, get_related_files, indirectory, - loadpkl, loadcrash, savepkl, FileNotFoundError, Path) + loadpkl, loadcrash, savepkl, FileNotFoundError, Path, + path_mkdir, path_resolve) def _ignore_atime(stat): @@ -572,21 +573,26 @@ def test_unversioned_pklization(tmpdir): loadpkl('./pickled.pkz') -def test_Path_strict_resolve(tmpdir): +def test_path_strict_resolve(tmpdir): """Check the monkeypatch to test strict resolution of Path.""" tmpdir.chdir() # Default strict=False should work out out of the box testfile = Path('somefile.txt') - assert '%s/somefile.txt' % tmpdir == '%s' % testfile.resolve() + resolved = '%s/somefile.txt' % tmpdir + assert str(testfile.resolve()) == resolved + # path_resolve() is equivalent to Path.resolve() + assert str(path_resolve(testfile)) == resolved + # Strict keyword is always allowed + assert str(path_resolve(testfile, strict=False)) == resolved # Switching to strict=True must raise FileNotFoundError (also in Python2) with pytest.raises(FileNotFoundError): - testfile.resolve(strict=True) + path_resolve(testfile, strict=True) # If the file is created, it should not raise open('somefile.txt', 'w').close() - assert '%s/somefile.txt' % tmpdir == '%s' % testfile.resolve(strict=True) + assert str(path_resolve(testfile, strict=True)) == resolved @pytest.mark.parametrize("save_versioning", [True, False]) @@ -598,17 +604,18 @@ def test_pickle(tmp_path, save_versioning): assert outobj == testobj -def test_Path(tmpdir): +def test_path_mkdir(tmpdir): tmp_path = Path(tmpdir.strpath) - (tmp_path / 'textfile').write_text('some text') + # PY34: Leave as monkey-patch + Path.write_text(tmp_path / 'textfile', 'some text') with pytest.raises(OSError): - (tmp_path / 'no' / 'parents').mkdir(parents=False) + path_mkdir(tmp_path / 'no' / 'parents', parents=False) - (tmp_path / 'no' / 'parents').mkdir(parents=True) + path_mkdir(tmp_path / 'no' / 'parents', parents=True) with pytest.raises(OSError): - (tmp_path / 'no' / 'parents').mkdir(parents=False) + path_mkdir(tmp_path / 'no' / 'parents', parents=False) - (tmp_path / 'no' / 'parents').mkdir(parents=True, exist_ok=True) + path_mkdir(tmp_path / 'no' / 'parents', parents=True, exist_ok=True) From 9eefdcdb1d2fd5557f4107c469d983502979f92e Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 24 Sep 2019 20:35:57 -0400 Subject: [PATCH 0439/1665] FIX: Handle missing paths better pre-3.6 --- nipype/utils/filemanip.py | 26 +++++++++++++++++++++----- nipype/utils/tests/test_filemanip.py | 2 -- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index b8534c42ab..1bbf6879a8 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -60,14 +60,30 @@ def __init__(self, path): USING_PATHLIB2 = True +def _resolve_with_filenotfound(path, **kwargs): + """ Raise FileNotFoundError instead of OSError """ + try: + return path.resolve(**kwargs) + except OSError as e: + if isinstance(e, FileNotFoundError): + raise + raise FileNotFoundError(str(path)) + + def path_resolve(path, strict=False): try: - return path.resolve(strict=strict) + return _resolve_with_filenotfound(path, strict=strict) except TypeError: # PY35 - resolved = path.resolve() - if strict and not resolved.exists(): - raise FileNotFoundError(resolved) - return resolved + pass + + path = path.absolute() + if strict or path.exists(): + return _resolve_with_filenotfound(path) + + # This is a hacky shortcut, using path.absolute() unmodified + # In cases where the existing part of the path contains a + # symlink, different results will be produced + return path def path_mkdir(path, mode=0o777, parents=False, exist_ok=False): diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index 687d7b284e..3d9154db67 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -580,8 +580,6 @@ def test_path_strict_resolve(tmpdir): # Default strict=False should work out out of the box testfile = Path('somefile.txt') resolved = '%s/somefile.txt' % tmpdir - assert str(testfile.resolve()) == resolved - # path_resolve() is equivalent to Path.resolve() assert str(path_resolve(testfile)) == resolved # Strict keyword is always allowed assert str(path_resolve(testfile, strict=False)) == resolved From ee8815712bc7a0b4b06a3199c499973cbfb4d4fa Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Thu, 26 Sep 2019 10:03:30 -0400 Subject: [PATCH 0440/1665] Add "copy" trait to Rename, to allow copying instead of symlink --- nipype/interfaces/utility/base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/utility/base.py b/nipype/interfaces/utility/base.py index d01a0a17b9..d6d652de25 100644 --- a/nipype/interfaces/utility/base.py +++ b/nipype/interfaces/utility/base.py @@ -213,6 +213,8 @@ class RenameInputSpec(DynamicTraitedSpec): "replacement inputs") use_fullpath = traits.Bool( False, usedefault=True, desc="Use full path as input to regex parser") + copy = traits.Bool(False, usedefault=True, desc="Whether to copy the file " + "(True) or try to symlink it (False)") class RenameOutputSpec(TraitedSpec): @@ -301,7 +303,7 @@ def _rename(self): def _run_interface(self, runtime): runtime.returncode = 0 out_file = os.path.join(runtime.cwd, self._rename()) - _ = copyfile(self.inputs.in_file, out_file) + _ = copyfile(self.inputs.in_file, out_file, copy=self.inputs.copy) self._results['out_file'] = out_file return runtime From b8d29a959a5ff4560174870cd28e8273280692e1 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Thu, 26 Sep 2019 10:48:05 -0400 Subject: [PATCH 0441/1665] Revert "Add "copy" trait to Rename, to allow copying instead of symlink" This reverts commit ee8815712bc7a0b4b06a3199c499973cbfb4d4fa. --- nipype/interfaces/utility/base.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nipype/interfaces/utility/base.py b/nipype/interfaces/utility/base.py index d6d652de25..d01a0a17b9 100644 --- a/nipype/interfaces/utility/base.py +++ b/nipype/interfaces/utility/base.py @@ -213,8 +213,6 @@ class RenameInputSpec(DynamicTraitedSpec): "replacement inputs") use_fullpath = traits.Bool( False, usedefault=True, desc="Use full path as input to regex parser") - copy = traits.Bool(False, usedefault=True, desc="Whether to copy the file " - "(True) or try to symlink it (False)") class RenameOutputSpec(TraitedSpec): @@ -303,7 +301,7 @@ def _rename(self): def _run_interface(self, runtime): runtime.returncode = 0 out_file = os.path.join(runtime.cwd, self._rename()) - _ = copyfile(self.inputs.in_file, out_file, copy=self.inputs.copy) + _ = copyfile(self.inputs.in_file, out_file) self._results['out_file'] = out_file return runtime From 1b083f10028456e6bbee6bf18b770e77e1a77f0c Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Thu, 26 Sep 2019 11:16:02 -0400 Subject: [PATCH 0442/1665] create ExportFile interface --- nipype/interfaces/io.py | 31 ++++++++++++++++++++++++++++++ nipype/interfaces/tests/test_io.py | 23 ++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index c409d4e78a..d405cf7150 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -28,6 +28,7 @@ import tempfile from os.path import join, dirname from warnings import warn +import errno from .. import config, logging from ..utils.filemanip import ( @@ -2863,3 +2864,33 @@ def _list_outputs(self): def _add_output_traits(self, base): return add_traits(base, list(self.inputs.output_query.keys())) + + +class ExportFileInputSpec(BaseInterfaceInputSpec): + in_file = File(exists=True, desc='Input file name') + out_file = File(exists=False, desc='Output file name') + check_extension = traits.Bool(False, desc='Ensure that the input and output file extensions match') + clobber = traits.Bool(False, desc='Permit overwriting existing files') + + +class ExportFileOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='Output file name') + + +class ExportFile(BaseInterface): + input_spec = ExportFileInputSpec + output_spec = ExportFileOutputSpec + + def _run_interface(self, runtime): + if not self.inputs.clobber and op.exists(self.inputs.out_file): + raise FileExistsError(errno.EEXIST, f'File {self.inputs.out_file} exists') + if (self.inputs.check_extension and + op.splitext(self.inputs.in_file)[1] != op.splitext(self.inputs.out_file)[1]): + raise RuntimeError(f'{self.inputs.in_file} and {self.inputs.out_file} have different extensions') + shutil.copy(str(self.inputs.in_file), str(self.inputs.out_file)) + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self.inputs.out_file + return outputs diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index 4420e4a49c..4dd408c36d 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -680,3 +680,26 @@ def _mock_get_ssh_client(self): .check(file=True, exists=True)) # exists? old_cwd.chdir() + + +def test_ExportFile(tmp_path): + testin = tmp_path / 'in.txt' + testin.write_text('test string') + i = nio.ExportFile() + i.inputs.in_file = testin + i.inputs.out_file = tmp_path / 'out.tsv' + i.inputs.check_extension = True + with pytest.raises(RuntimeError): + i.run() + i.inputs.check_extension = False + i.run() + assert (tmp_path / 'out.tsv').read_text() == 'test string' + i.inputs.out_file = tmp_path / 'out.txt' + i.inputs.check_extension = True + i.run() + assert (tmp_path / 'out.txt').read_text() == 'test string' + with pytest.raises(FileExistsError): + i.run() + i.inputs.clobber = True + i.run() + assert (tmp_path / 'out.txt').read_text() == 'test string' From d86fa7816278882cd4557aea7b5665ebf0f1ec6e Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 26 Sep 2019 16:24:52 -0400 Subject: [PATCH 0443/1665] ENH: Run memoized check_version at REPL import, Node/Workflow/Interface init --- nipype/__init__.py | 7 ++++++- nipype/interfaces/base/core.py | 4 ++++ nipype/pipeline/engine/base.py | 3 +++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index 419f1701b8..799e92e750 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -5,6 +5,7 @@ absolute_import) import os +import functools from distutils.version import LooseVersion from .info import (LONG_DESCRIPTION as __doc__, URL as __url__, STATUS as @@ -58,6 +59,7 @@ def get_info(): Rename, Function, Select, Merge) +@functools.lru_cache() def check_version(raise_exception=False): """Check for the latest version of the library @@ -95,5 +97,8 @@ def check_version(raise_exception=False): logger.critical(message) +# Run telemetry on import for interactive sessions, such as IPython, Jupyter notebooks, Python REPL if config.getboolean('execution', 'check_version'): - check_version() + import __main__ + if not hasattr(__main__, '__file__'): + check_version() diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 7ba8486082..7132f7fc97 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -169,6 +169,10 @@ class BaseInterface(Interface): def __init__(self, from_file=None, resource_monitor=None, ignore_exception=False, **inputs): + if config.getboolean('execution', 'check_version'): + from ... import check_version + check_version() + if not self.input_spec: raise Exception( 'No input_spec in class: %s' % self.__class__.__name__) diff --git a/nipype/pipeline/engine/base.py b/nipype/pipeline/engine/base.py index 7f7afd3928..ffb64222d4 100644 --- a/nipype/pipeline/engine/base.py +++ b/nipype/pipeline/engine/base.py @@ -41,6 +41,9 @@ def __init__(self, name=None, base_dir=None): self.base_dir = base_dir self.config = deepcopy(config._sections) + if config.getboolean('execution', 'check_version'): + from ... import check_version + check_version() @property def name(self): From 43669cc5b6baf17f3d2d07edc62faefeca1899f8 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Sun, 29 Sep 2019 22:09:30 -0400 Subject: [PATCH 0444/1665] fix: use lru_cache in py3 --- nipype/__init__.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index 799e92e750..4da4a250b2 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -6,6 +6,7 @@ import os import functools +import sys from distutils.version import LooseVersion from .info import (LONG_DESCRIPTION as __doc__, URL as __url__, STATUS as @@ -59,7 +60,16 @@ def get_info(): Rename, Function, Select, Merge) -@functools.lru_cache() +def sys_based_cache(condition): + def decorator(func): + if not condition: + return func + else: + return functools.lru_cache(func) + return decorator + + +@sys_based_cache(sys.version >= 3) def check_version(raise_exception=False): """Check for the latest version of the library From cf72c26f8e622ffbb7a8f84b9da497860da6d27f Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Sun, 29 Sep 2019 22:33:24 -0400 Subject: [PATCH 0445/1665] fix: version checking --- nipype/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index 4da4a250b2..6fa4579f77 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -62,14 +62,14 @@ def get_info(): def sys_based_cache(condition): def decorator(func): - if not condition: + if condition: return func else: return functools.lru_cache(func) return decorator -@sys_based_cache(sys.version >= 3) +@sys_based_cache(sys.version_info < (3,)) def check_version(raise_exception=False): """Check for the latest version of the library From f614ab90069eafaec11e2813153f1226786b13a1 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Sun, 29 Sep 2019 23:27:36 -0400 Subject: [PATCH 0446/1665] fix: decorator and ref: check name --- nipype/__init__.py | 8 ++++---- nipype/interfaces/base/core.py | 4 ++-- nipype/pipeline/engine/base.py | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index 6fa4579f77..ba5ac84624 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -65,12 +65,12 @@ def decorator(func): if condition: return func else: - return functools.lru_cache(func) + return functools.lru_cache()(func) return decorator @sys_based_cache(sys.version_info < (3,)) -def check_version(raise_exception=False): +def check_latest_version(raise_exception=False): """Check for the latest version of the library parameters: @@ -105,10 +105,10 @@ def check_version(raise_exception=False): raise RuntimeError(message) else: logger.critical(message) - + return latest # Run telemetry on import for interactive sessions, such as IPython, Jupyter notebooks, Python REPL if config.getboolean('execution', 'check_version'): import __main__ if not hasattr(__main__, '__file__'): - check_version() + check_latest_version() diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 7132f7fc97..2b885d796e 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -170,8 +170,8 @@ class BaseInterface(Interface): def __init__(self, from_file=None, resource_monitor=None, ignore_exception=False, **inputs): if config.getboolean('execution', 'check_version'): - from ... import check_version - check_version() + from ... import check_latest_version + check_latest_version() if not self.input_spec: raise Exception( diff --git a/nipype/pipeline/engine/base.py b/nipype/pipeline/engine/base.py index ffb64222d4..c1215e4995 100644 --- a/nipype/pipeline/engine/base.py +++ b/nipype/pipeline/engine/base.py @@ -42,8 +42,8 @@ def __init__(self, name=None, base_dir=None): self.base_dir = base_dir self.config = deepcopy(config._sections) if config.getboolean('execution', 'check_version'): - from ... import check_version - check_version() + from ... import check_latest_version + check_latest_version() @property def name(self): From 98e93a4d178d4416cc567d0602d7d78362e66f7b Mon Sep 17 00:00:00 2001 From: mathiasg Date: Mon, 30 Sep 2019 13:05:25 -0400 Subject: [PATCH 0447/1665] tst: create data directory prior to datalad install --- nipype/interfaces/tests/test_extra_dcm2nii.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/nipype/interfaces/tests/test_extra_dcm2nii.py b/nipype/interfaces/tests/test_extra_dcm2nii.py index 44fb7196f8..5a82b87508 100644 --- a/nipype/interfaces/tests/test_extra_dcm2nii.py +++ b/nipype/interfaces/tests/test_extra_dcm2nii.py @@ -14,11 +14,10 @@ DICOM_DIR = 'http://datasets-tests.datalad.org/dicoms/dcm2niix-tests' -def fetch_data(tmpdir, dicoms): +def fetch_data(datadir, dicoms): """Fetches some test DICOMs using datalad""" - data = os.path.join(tmpdir, 'data') - api.install(path=data, source=DICOM_DIR) - data = os.path.join(data, dicoms) + api.install(path=datadir, source=DICOM_DIR) + data = os.path.join(datadir, dicoms) api.get(path=data) return data @@ -26,8 +25,10 @@ def fetch_data(tmpdir, dicoms): @pytest.mark.skipif(no_dcm2niix, reason="Dcm2niix required") def test_dcm2niix_dwi(tmpdir): tmpdir.chdir() + datadir = tmpdir / 'data' + datadir.mkdir() try: - datadir = fetch_data(tmpdir.strpath, 'Siemens_Sag_DTI_20160825_145811') + dicoms = fetch_data(datadir.strpath, 'Siemens_Sag_DTI_20160825_145811') except IncompleteResultsError as exc: pytest.skip("Failed to fetch test data: %s" % str(exc)) @@ -44,7 +45,7 @@ def assert_dwi(eg, bids): assert not eg.outputs.bids dcm = Dcm2niix() - dcm.inputs.source_dir = datadir + dcm.inputs.source_dir = dicoms dcm.inputs.out_filename = '%u%z' assert_dwi(dcm.run(), True) From 5b30d5b7855bdaa0c12f79e20df5afb1245c17c6 Mon Sep 17 00:00:00 2001 From: mathiasg Date: Mon, 30 Sep 2019 13:10:55 -0400 Subject: [PATCH 0448/1665] rf: clean up test --- nipype/interfaces/tests/test_extra_dcm2nii.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/nipype/interfaces/tests/test_extra_dcm2nii.py b/nipype/interfaces/tests/test_extra_dcm2nii.py index 5a82b87508..083b8fcca1 100644 --- a/nipype/interfaces/tests/test_extra_dcm2nii.py +++ b/nipype/interfaces/tests/test_extra_dcm2nii.py @@ -25,20 +25,19 @@ def fetch_data(datadir, dicoms): @pytest.mark.skipif(no_dcm2niix, reason="Dcm2niix required") def test_dcm2niix_dwi(tmpdir): tmpdir.chdir() - datadir = tmpdir / 'data' - datadir.mkdir() + datadir = tmpdir.mkdir('data').strpath try: - dicoms = fetch_data(datadir.strpath, 'Siemens_Sag_DTI_20160825_145811') + dicoms = fetch_data(datadir, 'Siemens_Sag_DTI_20160825_145811') except IncompleteResultsError as exc: pytest.skip("Failed to fetch test data: %s" % str(exc)) - def assert_dwi(eg, bids): + def assert_dwi(eg): "Some assertions we will make" assert eg.outputs.converted_files assert eg.outputs.bvals assert eg.outputs.bvecs outputs = [y for x,y in eg.outputs.get().items()] - if bids: + if eg.inputs.get('bids_format'): # ensure all outputs are of equal lengths assert len(set(map(len, outputs))) == 1 else: @@ -47,10 +46,10 @@ def assert_dwi(eg, bids): dcm = Dcm2niix() dcm.inputs.source_dir = dicoms dcm.inputs.out_filename = '%u%z' - assert_dwi(dcm.run(), True) + assert_dwi(dcm.run()) # now run specifying output directory and removing BIDS option outdir = tmpdir.mkdir('conversion').strpath dcm.inputs.output_dir = outdir dcm.inputs.bids_format = False - assert_dwi(dcm.run(), False) + assert_dwi(dcm.run()) From 9c5d64bfb18a7325b702aad1f40adfcd6a730e7f Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Mon, 30 Sep 2019 21:31:41 -0400 Subject: [PATCH 0449/1665] enh: memoize with global --- nipype/__init__.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index ba5ac84624..5ea3a2a5cd 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -59,17 +59,8 @@ def get_info(): from .interfaces import (DataGrabber, DataSink, SelectFiles, IdentityInterface, Rename, Function, Select, Merge) +etelemetry_results = {} -def sys_based_cache(condition): - def decorator(func): - if condition: - return func - else: - return functools.lru_cache()(func) - return decorator - - -@sys_based_cache(sys.version_info < (3,)) def check_latest_version(raise_exception=False): """Check for the latest version of the library @@ -77,6 +68,8 @@ def check_latest_version(raise_exception=False): raise_exception: boolean Raise a RuntimeError if a bad version is being used """ + if raise_exception in etelemetry_results: + return etelemetry_results[raise_exception] import etelemetry logger = logging.getLogger('nipype.utils') @@ -105,6 +98,7 @@ def check_latest_version(raise_exception=False): raise RuntimeError(message) else: logger.critical(message) + etelemetry_results[raise_exception] = latest return latest # Run telemetry on import for interactive sessions, such as IPython, Jupyter notebooks, Python REPL From f31c2d08679153107e68b9a6768b1dcaf101e20a Mon Sep 17 00:00:00 2001 From: mathiasg Date: Tue, 1 Oct 2019 11:31:31 -0400 Subject: [PATCH 0450/1665] tst: use pytest fixture --- nipype/interfaces/tests/test_extra_dcm2nii.py | 43 ++++++++++--------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/nipype/interfaces/tests/test_extra_dcm2nii.py b/nipype/interfaces/tests/test_extra_dcm2nii.py index 083b8fcca1..969256ae76 100644 --- a/nipype/interfaces/tests/test_extra_dcm2nii.py +++ b/nipype/interfaces/tests/test_extra_dcm2nii.py @@ -14,42 +14,45 @@ DICOM_DIR = 'http://datasets-tests.datalad.org/dicoms/dcm2niix-tests' -def fetch_data(datadir, dicoms): - """Fetches some test DICOMs using datalad""" - api.install(path=datadir, source=DICOM_DIR) - data = os.path.join(datadir, dicoms) - api.get(path=data) - return data +@pytest.fixture +def fetch_data(): + def _fetch_data(datadir, dicoms): + try: + """Fetches some test DICOMs using datalad""" + api.install(path=datadir, source=DICOM_DIR) + data = os.path.join(datadir, dicoms) + api.get(path=data) + except IncompleteResultsError as exc: + pytest.skip("Failed to fetch test data: %s" % str(exc)) + return data + return _fetch_data @pytest.mark.skipif(no_datalad, reason="Datalad required") @pytest.mark.skipif(no_dcm2niix, reason="Dcm2niix required") -def test_dcm2niix_dwi(tmpdir): +def test_dcm2niix_dti(fetch_data, tmpdir): tmpdir.chdir() datadir = tmpdir.mkdir('data').strpath - try: - dicoms = fetch_data(datadir, 'Siemens_Sag_DTI_20160825_145811') - except IncompleteResultsError as exc: - pytest.skip("Failed to fetch test data: %s" % str(exc)) + dicoms = fetch_data(datadir, 'Siemens_Sag_DTI_20160825_145811') - def assert_dwi(eg): + def assert_dti(res): "Some assertions we will make" - assert eg.outputs.converted_files - assert eg.outputs.bvals - assert eg.outputs.bvecs - outputs = [y for x,y in eg.outputs.get().items()] - if eg.inputs.get('bids_format'): + assert res.outputs.converted_files + assert res.outputs.bvals + assert res.outputs.bvecs + outputs = [y for x,y in res.outputs.get().items()] + if res.inputs.get('bids_format'): # ensure all outputs are of equal lengths assert len(set(map(len, outputs))) == 1 else: - assert not eg.outputs.bids + assert not res.outputs.bids dcm = Dcm2niix() dcm.inputs.source_dir = dicoms dcm.inputs.out_filename = '%u%z' - assert_dwi(dcm.run()) + assert_dti(dcm.run()) # now run specifying output directory and removing BIDS option outdir = tmpdir.mkdir('conversion').strpath dcm.inputs.output_dir = outdir dcm.inputs.bids_format = False - assert_dwi(dcm.run()) + assert_dti(dcm.run()) From 47dc288f2f75324f4ef126bc6ac8df5e97609732 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Tue, 1 Oct 2019 14:43:37 -0400 Subject: [PATCH 0451/1665] Remove asynchronous chdir callback --- nipype/pipeline/plugins/multiproc.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index d2ef363a34..eea567ff78 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -146,8 +146,6 @@ def __init__(self, plugin_args=None): self._stats = None def _async_callback(self, args): - # Make sure runtime is not left at a dubious working directory - os.chdir(self._cwd) result = args.result() self._taskresult[result['taskid']] = result From 9bd195ae8f558189a113931591b8972c5225498a Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Tue, 1 Oct 2019 16:21:42 -0400 Subject: [PATCH 0452/1665] Update nipype/interfaces/io.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index d405cf7150..b8e4540974 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2870,7 +2870,7 @@ class ExportFileInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, desc='Input file name') out_file = File(exists=False, desc='Output file name') check_extension = traits.Bool(False, desc='Ensure that the input and output file extensions match') - clobber = traits.Bool(False, desc='Permit overwriting existing files') + clobber = traits.Bool(desc='Permit overwriting existing files') class ExportFileOutputSpec(TraitedSpec): From c870166f19b2d31d5403eb5880d0a2dbec51c63b Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Tue, 1 Oct 2019 16:21:53 -0400 Subject: [PATCH 0453/1665] Update nipype/interfaces/io.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index b8e4540974..51b1422736 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2867,7 +2867,7 @@ def _add_output_traits(self, base): class ExportFileInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, desc='Input file name') + in_file = File(exists=True, mandatory=True, desc='Input file name') out_file = File(exists=False, desc='Output file name') check_extension = traits.Bool(False, desc='Ensure that the input and output file extensions match') clobber = traits.Bool(desc='Permit overwriting existing files') From 0d18f98a3132916e4a66f70075616c221eca0fdd Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Tue, 1 Oct 2019 16:21:59 -0400 Subject: [PATCH 0454/1665] Update nipype/interfaces/io.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 51b1422736..506c6dd768 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2868,7 +2868,7 @@ def _add_output_traits(self, base): class ExportFileInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc='Input file name') - out_file = File(exists=False, desc='Output file name') + out_file = File(mandatory=True, desc='Output file name') check_extension = traits.Bool(False, desc='Ensure that the input and output file extensions match') clobber = traits.Bool(desc='Permit overwriting existing files') From abe83ddad6c068cad5f9952002f65abed1f12a01 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Tue, 1 Oct 2019 16:23:31 -0400 Subject: [PATCH 0455/1665] Update nipype/interfaces/io.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 506c6dd768..d3ee34c86d 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2883,7 +2883,7 @@ class ExportFile(BaseInterface): def _run_interface(self, runtime): if not self.inputs.clobber and op.exists(self.inputs.out_file): - raise FileExistsError(errno.EEXIST, f'File {self.inputs.out_file} exists') + raise FileExistsError(errno.EEXIST, 'File %s exists' % self.inputs.out_file) if (self.inputs.check_extension and op.splitext(self.inputs.in_file)[1] != op.splitext(self.inputs.out_file)[1]): raise RuntimeError(f'{self.inputs.in_file} and {self.inputs.out_file} have different extensions') From cba09b2c0a4f758034869348aa153cfc7da077ed Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Tue, 1 Oct 2019 16:37:30 -0400 Subject: [PATCH 0456/1665] Apply @effigies suggestions --- nipype/interfaces/io.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index d3ee34c86d..f752f09252 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -33,12 +33,12 @@ from .. import config, logging from ..utils.filemanip import ( copyfile, simplify_list, ensure_list, - get_related_files) + get_related_files, split_filename) from ..utils.misc import human_order_sorted, str2bool from .base import ( TraitedSpec, traits, Str, File, Directory, BaseInterface, InputMultiPath, isdefined, OutputMultiPath, DynamicTraitedSpec, Undefined, BaseInterfaceInputSpec, - LibraryBaseInterface) + LibraryBaseInterface, SimpleInterface) iflogger = logging.getLogger('nipype.interface') @@ -2869,7 +2869,7 @@ def _add_output_traits(self, base): class ExportFileInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc='Input file name') out_file = File(mandatory=True, desc='Output file name') - check_extension = traits.Bool(False, desc='Ensure that the input and output file extensions match') + check_extension = traits.Bool(True, desc='Ensure that the input and output file extensions match') clobber = traits.Bool(desc='Permit overwriting existing files') @@ -2877,20 +2877,18 @@ class ExportFileOutputSpec(TraitedSpec): out_file = File(exists=True, desc='Output file name') -class ExportFile(BaseInterface): +class ExportFile(SimpleInterface): input_spec = ExportFileInputSpec output_spec = ExportFileOutputSpec def _run_interface(self, runtime): if not self.inputs.clobber and op.exists(self.inputs.out_file): raise FileExistsError(errno.EEXIST, 'File %s exists' % self.inputs.out_file) + if not op.isabs(self.inputs.out_file): + raise ValueError('Out_file must be an absolute path.') if (self.inputs.check_extension and - op.splitext(self.inputs.in_file)[1] != op.splitext(self.inputs.out_file)[1]): - raise RuntimeError(f'{self.inputs.in_file} and {self.inputs.out_file} have different extensions') + split_filename(self.inputs.in_file)[2] != split_filename(self.inputs.out_file)[2]): + raise RuntimeError('%s and %s have different extensions' % (self.inputs.in_file, self.inputs.out_file)) shutil.copy(str(self.inputs.in_file), str(self.inputs.out_file)) + self._results['out_file'] = self.inputs.out_file return runtime - - def _list_outputs(self): - outputs = self.output_spec().get() - outputs['out_file'] = self.inputs.out_file - return outputs From 7646796c121e8a188140bbc768f4f0a2a5ad836b Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Wed, 2 Oct 2019 15:17:05 -0400 Subject: [PATCH 0457/1665] use a class field limit rerunning check --- nipype/interfaces/base/core.py | 4 +++- nipype/pipeline/engine/base.py | 3 --- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 2b885d796e..8c5c250286 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -166,12 +166,14 @@ class BaseInterface(Interface): _redirect_x = False references_ = [] resource_monitor = True # Enabled for this interface IFF enabled in the config + check_version = None def __init__(self, from_file=None, resource_monitor=None, ignore_exception=False, **inputs): if config.getboolean('execution', 'check_version'): from ... import check_latest_version - check_latest_version() + if BaseInterface.check_version is None: + BaseInterface.check_version = check_latest_version() if not self.input_spec: raise Exception( diff --git a/nipype/pipeline/engine/base.py b/nipype/pipeline/engine/base.py index c1215e4995..7f7afd3928 100644 --- a/nipype/pipeline/engine/base.py +++ b/nipype/pipeline/engine/base.py @@ -41,9 +41,6 @@ def __init__(self, name=None, base_dir=None): self.base_dir = base_dir self.config = deepcopy(config._sections) - if config.getboolean('execution', 'check_version'): - from ... import check_latest_version - check_latest_version() @property def name(self): From f5bec393466bc2b45b6bc221ed5f372f72befc2c Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Wed, 2 Oct 2019 15:19:15 -0400 Subject: [PATCH 0458/1665] store checked version in baseinterface --- nipype/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index 5ea3a2a5cd..59481d9069 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -105,4 +105,5 @@ def check_latest_version(raise_exception=False): if config.getboolean('execution', 'check_version'): import __main__ if not hasattr(__main__, '__file__'): - check_latest_version() + from .interfaces.base import BaseInterface + BaseInterface.check_version = check_latest_version() From e206f3d53198c683e959fe1e73426ce5cf362b93 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 2 Oct 2019 15:20:44 -0400 Subject: [PATCH 0459/1665] FIX: Minimize scope for directory changes while loading results file --- nipype/utils/filemanip.py | 60 +++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 24 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 1bbf6879a8..448a097944 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -699,37 +699,49 @@ def loadpkl(infile): infile = Path(infile) fmlogger.debug('Loading pkl: %s', infile) pklopen = gzip.open if infile.suffix == '.pklz' else open + + with SoftFileLock('%s.lock' % infile.name): + with pklopen(str(infile), 'rb') as pkl_file: + pkl_contents = pkl_file.read() + pkl_metadata = None + # Look if pkl file contains version metadata + idx = pkl_contents.find(b'\n') + if idx >= 0: + try: + pkl_metadata = json.loads(pkl_contents[:idx]) + except (UnicodeDecodeError, json.JSONDecodeError): + # Could not get version info + pass + else: + # On success, skip JSON metadata + pkl_contents = pkl_contents[idx + 1:] + + # Pickle files may contain relative paths that must be resolved relative + # to the working directory, so use indirectory while attempting to load unpkl = None - with indirectory(infile.parent): - with SoftFileLock('%s.lock' % infile.name): - with pklopen(infile.name, 'rb') as pkl_file: - try: # Look if pkl file contains version file - pkl_metadata_line = pkl_file.readline() - pkl_metadata = json.loads(pkl_metadata_line) - except (UnicodeDecodeError, json.JSONDecodeError): - # Could not get version info - pkl_file.seek(0) - try: - unpkl = pickle.load(pkl_file) - except UnicodeDecodeError: - # Was this pickle created with Python 2.x? - unpkl = pickle.load(pkl_file, fix_imports=True, encoding='utf-8') - fmlogger.info('Successfully loaded pkl in compatibility mode.') - # Unpickling problems - except Exception as e: - if pkl_metadata and 'version' in pkl_metadata: - from nipype import __version__ as version - if pkl_metadata['version'] != version: - fmlogger.error("""\ + try: + with indirectory(infile.parent): + unpkl = pickle.loads(pkl_contents) + except UnicodeDecodeError: + # Was this pickle created with Python 2.x? + with indirectory(infile.parent): + unpkl = pickle.loads(pkl_contents, fix_imports=True, encoding='utf-8') + fmlogger.info('Successfully loaded pkl in compatibility mode.') + # Unpickling problems + except Exception as e: + if pkl_metadata and 'version' in pkl_metadata: + from nipype import __version__ as version + if pkl_metadata['version'] != version: + fmlogger.error("""\ Attempted to open a results file generated by Nipype version %s, \ with an incompatible Nipype version (%s)""", pkl_metadata['version'], version) - raise e - fmlogger.warning("""\ + raise e + fmlogger.warning("""\ No metadata was found in the pkl file. Make sure you are currently using \ the same Nipype version from the generated pkl.""") - raise e + raise e if unpkl is None: raise ValueError('Loading %s resulted in None.' % infile) From 26b662f0ff48886524577c83da22a0bc68189a08 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 2 Oct 2019 16:06:39 -0400 Subject: [PATCH 0460/1665] FIX: Lock file based on full path --- nipype/utils/filemanip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 448a097944..a4374f7033 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -700,7 +700,7 @@ def loadpkl(infile): fmlogger.debug('Loading pkl: %s', infile) pklopen = gzip.open if infile.suffix == '.pklz' else open - with SoftFileLock('%s.lock' % infile.name): + with SoftFileLock('%s.lock' % infile): with pklopen(str(infile), 'rb') as pkl_file: pkl_contents = pkl_file.read() From e9f553ef6d87d0285fd7d72983ca5d750e6d7ebb Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Wed, 2 Oct 2019 16:09:27 -0400 Subject: [PATCH 0461/1665] address review comments --- nipype/__init__.py | 12 ++++-------- nipype/interfaces/base/core.py | 6 +++--- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index 59481d9069..26653916f7 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -5,8 +5,6 @@ absolute_import) import os -import functools -import sys from distutils.version import LooseVersion from .info import (LONG_DESCRIPTION as __doc__, URL as __url__, STATUS as @@ -59,7 +57,6 @@ def get_info(): from .interfaces import (DataGrabber, DataSink, SelectFiles, IdentityInterface, Rename, Function, Select, Merge) -etelemetry_results = {} def check_latest_version(raise_exception=False): """Check for the latest version of the library @@ -68,9 +65,6 @@ def check_latest_version(raise_exception=False): raise_exception: boolean Raise a RuntimeError if a bad version is being used """ - if raise_exception in etelemetry_results: - return etelemetry_results[raise_exception] - import etelemetry logger = logging.getLogger('nipype.utils') @@ -98,7 +92,8 @@ def check_latest_version(raise_exception=False): raise RuntimeError(message) else: logger.critical(message) - etelemetry_results[raise_exception] = latest + else: + latest = None return latest # Run telemetry on import for interactive sessions, such as IPython, Jupyter notebooks, Python REPL @@ -106,4 +101,5 @@ def check_latest_version(raise_exception=False): import __main__ if not hasattr(__main__, '__file__'): from .interfaces.base import BaseInterface - BaseInterface.check_version = check_latest_version() + if BaseInterface._etelemetry_version_data is None: + BaseInterface._etelemetry_version_data = check_latest_version() diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 8c5c250286..85cad50045 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -166,14 +166,14 @@ class BaseInterface(Interface): _redirect_x = False references_ = [] resource_monitor = True # Enabled for this interface IFF enabled in the config - check_version = None + _etelemetry_version_data = None def __init__(self, from_file=None, resource_monitor=None, ignore_exception=False, **inputs): if config.getboolean('execution', 'check_version'): from ... import check_latest_version - if BaseInterface.check_version is None: - BaseInterface.check_version = check_latest_version() + if BaseInterface._etelemetry_version_data is None: + BaseInterface._etelemetry_version_data = check_latest_version() if not self.input_spec: raise Exception( From fcbf57bed077f99e2c8b4fce8b492b0416e2bc13 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Wed, 2 Oct 2019 16:12:59 -0400 Subject: [PATCH 0462/1665] fix: allow networks without internet to check only once --- nipype/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index 26653916f7..821a411009 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -92,8 +92,6 @@ def check_latest_version(raise_exception=False): raise RuntimeError(message) else: logger.critical(message) - else: - latest = None return latest # Run telemetry on import for interactive sessions, such as IPython, Jupyter notebooks, Python REPL From 085767cb380077925ba4195201d69f7cc8afd22a Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 2 Oct 2019 19:23:50 -0400 Subject: [PATCH 0463/1665] TEST: Mark test_dcm2niix_dti XFAIL --- nipype/interfaces/tests/test_extra_dcm2nii.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/interfaces/tests/test_extra_dcm2nii.py b/nipype/interfaces/tests/test_extra_dcm2nii.py index 969256ae76..68fb626f75 100644 --- a/nipype/interfaces/tests/test_extra_dcm2nii.py +++ b/nipype/interfaces/tests/test_extra_dcm2nii.py @@ -29,6 +29,7 @@ def _fetch_data(datadir, dicoms): @pytest.mark.skipif(no_datalad, reason="Datalad required") @pytest.mark.skipif(no_dcm2niix, reason="Dcm2niix required") +@pytest.mark.xfail(reason="Intermittent failures. Let's come back to this later.") def test_dcm2niix_dti(fetch_data, tmpdir): tmpdir.chdir() datadir = tmpdir.mkdir('data').strpath From 0adb46d3eac0c18f2c9f28c2cda959a640f8a251 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 2 Oct 2019 18:07:59 -0400 Subject: [PATCH 0464/1665] CI: Drop 3.4 tests --- .travis.yml | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/.travis.yml b/.travis.yml index 29f43a75d9..00baeb8dc3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -31,36 +31,6 @@ env: EXTRA_PIP_FLAGS="--pre $EXTRA_PIP_FLAGS --find-links $PRE_WHEELS" CI_SKIP_TEST=1 -# Python 3.4 is only available on Trusty, so we need to duplicate the -# env matrix specifically for it. -matrix: - include: - - python: 3.4 - dist: trusty - env: - - INSTALL_DEB_DEPENDECIES=true - NIPYPE_EXTRAS="doc,tests,nipy,profiler" - CI_SKIP_TEST=1 - - python: 3.4 - dist: trusty - env: - - INSTALL_DEB_DEPENDECIES=false - NIPYPE_EXTRAS="doc,tests,profiler" - CI_SKIP_TEST=1 - - python: 3.4 - dist: trusty - env: - - INSTALL_DEB_DEPENDECIES=true - NIPYPE_EXTRAS="doc,tests,nipy,profiler,duecredit,ssh" - CI_SKIP_TEST=1 - - python: 3.4 - dist: trusty - env: - - INSTALL_DEB_DEPENDECIES=true - NIPYPE_EXTRAS="doc,tests,nipy,profiler" - EXTRA_PIP_FLAGS="--pre $EXTRA_PIP_FLAGS --find-links $PRE_WHEELS" - CI_SKIP_TEST=1 - addons: apt: packages: From aadc37a985a39077e9ee7b6bc8a5317dc23ee7ee Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 2 Oct 2019 18:08:26 -0400 Subject: [PATCH 0465/1665] MNT: Drop Python 3.4 in package metadata --- nipype/info.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nipype/info.py b/nipype/info.py index 257031c38f..9ba12193ea 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -55,13 +55,12 @@ def get_nipype_gitversion(): 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering' ] -PYTHON_REQUIRES = ">= 2.7, != 3.0.*, != 3.1.*, != 3.2.*, != 3.3.*" +PYTHON_REQUIRES = ">= 2.7, != 3.0.*, != 3.1.*, != 3.2.*, != 3.3.*, != 3.4.*" description = 'Neuroimaging in Python: Pipelines and Interfaces' @@ -145,7 +144,6 @@ def get_nipype_gitversion(): 'funcsigs', 'future>=%s' % FUTURE_MIN_VERSION, 'futures; python_version == "2.7"', - 'lxml<4.4.0; python_version == "3.4"', 'networkx>=%s ; python_version >= "3.0"' % NETWORKX_MIN_VERSION, 'networkx>=%s,<=%s ; python_version < "3.0"' % (NETWORKX_MIN_VERSION, NETWORKX_MAX_VERSION_27), 'nibabel>=%s' % NIBABEL_MIN_VERSION, From 9b69c137035993e365140c03d4c2f6547a2984e5 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Thu, 3 Oct 2019 20:31:47 -0400 Subject: [PATCH 0466/1665] enh: add precommit information for contributors and pre-commit style --- .pre-commit-config.yaml | 14 ++++++++++++++ CONTRIBUTING.md | 28 +++++++++++++++++----------- 2 files changed, 31 insertions(+), 11 deletions(-) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..c429bfa6d9 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,14 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.0.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files +- repo: https://github.com/psf/black + rev: 19.3b0 + hooks: + - id: black diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f55c09a41c..e47cabf282 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contributing to Nipype -Welcome to the Nipype repository! We're excited you're here and want to contribute. +Welcome to the Nipype repository! We're excited you're here and want to contribute. These guidelines are designed to make it as easy as possible to get involved. If you have any questions that aren't discussed below, please let us know by opening an [issue][link_issues]! @@ -72,7 +72,7 @@ One way to do this is to [configure a new remote named "upstream"](https://help. **3. Make the changes you've discussed.** -If you're adding a new tool from an existing neuroimaging toolkit (e.g., 3dDeconvolve from AFNI), +If you're adding a new tool from an existing neuroimaging toolkit (e.g., 3dDeconvolve from AFNI), check out the [guide for adding new interfaces to Nipype][link_new_interfaces]. When you are working on your changes, test frequently to ensure you are not breaking the existing code. @@ -82,7 +82,7 @@ Before pushing your changes to GitHub, run `make check-before-commit`. This will test the entire package, and build the documentation. If you get no errors, you're ready to submit your changes! -It's a good practice to create [a new branch](https://help.github.com/articles/about-branches/) +It's a good practice to create [a new branch](https://help.github.com/articles/about-branches/) of the repository for a new set of changes. @@ -90,19 +90,25 @@ of the repository for a new set of changes. A new pull request for your changes should be created from your fork of the repository. -When opening a pull request, please use one of the following prefixes: +When opening a pull request, please use one of the following prefixes: -* **[ENH]** for enhancements -* **[FIX]** for bug fixes -* **[TST]** for new or updated tests -* **[DOC]** for new or updated documentation -* **[STY]** for stylistic changes -* **[REF]** for refactoring existing code +* **[ENH]** for enhancements +* **[FIX]** for bug fixes +* **[TST]** for new or updated tests +* **[DOC]** for new or updated documentation +* **[STY]** for stylistic changes +* **[REF]** for refactoring existing code + +**5. Install pre-commit.** + +[pre-commit]() is a git hook for running operations at commit time. To use it in +your environment, do `pip install pre-commit` following by `pre-cmmit install` +inside your source directory.
Pull requests should be submitted early and often (please don't mix too many unrelated changes within one PR)! -If your pull request is not yet ready to be merged, please also include the **[WIP]** prefix (you can remove it once your PR is ready to be merged). +If your pull request is not yet ready to be merged, please also include the **[WIP]** prefix (you can remove it once your PR is ready to be merged). This tells the development team that your pull request is a "work-in-progress", and that you plan to continue working on it. Review and discussion on new code can begin well before the work is complete, and the more discussion the better! From 254b0a394a95d4366525bdcaef5088ac7540c8e4 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Thu, 3 Oct 2019 20:34:06 -0400 Subject: [PATCH 0467/1665] doc: add pre-commit url --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e47cabf282..9f9a4dcb31 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -102,7 +102,7 @@ When opening a pull request, please use one of the following prefixes: **5. Install pre-commit.** -[pre-commit]() is a git hook for running operations at commit time. To use it in +[pre-commit](https://pre-commit.com/) is a git hook for running operations at commit time. To use it in your environment, do `pip install pre-commit` following by `pre-cmmit install` inside your source directory. From 344c6bab51c1bba09496c26c9c11019dfb2dee66 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Mon, 7 Oct 2019 09:26:09 -0400 Subject: [PATCH 0468/1665] Update CONTRIBUTING.md Co-Authored-By: Chris Markiewicz --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9f9a4dcb31..331cdd88fe 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -103,7 +103,7 @@ When opening a pull request, please use one of the following prefixes: **5. Install pre-commit.** [pre-commit](https://pre-commit.com/) is a git hook for running operations at commit time. To use it in -your environment, do `pip install pre-commit` following by `pre-cmmit install` +your environment, do `pip install pre-commit` following by `pre-commit install` inside your source directory.
From dda8cf77d0c4b885c6ddccf8eb7d2fb554644fc6 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 7 Oct 2019 09:37:22 -0400 Subject: [PATCH 0469/1665] CI: Run make check-before-commit to ensure the working directory unchanged --- .travis.yml | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 00baeb8dc3..5ffabcf979 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,6 +16,7 @@ env: - EXTRA_WHEELS="https://5cf40426d9f06eb7461d-6fe47d9331aba7cd62fc36c7196769e4.ssl.cf2.rackcdn.com" - PRE_WHEELS="https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" - EXTRA_PIP_FLAGS="--find-links=$EXTRA_WHEELS" + - CHECK_TYPE=test matrix: - INSTALL_DEB_DEPENDECIES=true NIPYPE_EXTRAS="doc,tests,nipy,profiler" @@ -31,6 +32,13 @@ env: EXTRA_PIP_FLAGS="--pre $EXTRA_PIP_FLAGS --find-links $PRE_WHEELS" CI_SKIP_TEST=1 +matrix: + include: + - python: 3.7 + env: + - NIPYPE_EXTRAS=dev + CHECK_TYPE=specs + addons: apt: packages: @@ -64,7 +72,16 @@ install: - travis_retry pip install pytest-xdist script: -- py.test -v --cov nipype --cov-config .coveragerc --cov-report xml:cov.xml -c nipype/pytest.ini --doctest-modules nipype -n auto +- | + if [ "$CHECK_TYPE" = "test" ]; then + py.test -v --cov nipype --cov-config .coveragerc --cov-report xml:cov.xml -c nipype/pytest.ini --doctest-modules nipype -n auto + fi +- | + if [ "$CHECK_TYPE" = "specs" ]; then + make check-before-commit + git add nipype + test "$( git diff --cached | wc -l )" -eq 0 || ( git diff --cached && false ) + fi after_script: - codecov --file cov.xml --flags unittests -e TRAVIS_JOB_NUMBER From b48f1991119dcb911f3bed9336066959e72fdaa9 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 7 Oct 2019 09:41:16 -0400 Subject: [PATCH 0470/1665] TEST: Add goforit input to Remlfit autotest --- nipype/interfaces/afni/tests/test_auto_Remlfit.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/interfaces/afni/tests/test_auto_Remlfit.py b/nipype/interfaces/afni/tests/test_auto_Remlfit.py index 05ee75210e..b1afa427b1 100644 --- a/nipype/interfaces/afni/tests/test_auto_Remlfit.py +++ b/nipype/interfaces/afni/tests/test_auto_Remlfit.py @@ -46,6 +46,7 @@ def test_Remlfit_inputs(): extensions=None, ), gltsym=dict(argstr='-gltsym "%s" %s...', ), + goforit=dict(argstr='-GOFORIT', ), in_files=dict( argstr='-input "%s"', copyfile=False, From 837b52aa52839b28628467412da21ce080a3421b Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 7 Oct 2019 10:14:36 -0400 Subject: [PATCH 0471/1665] MNT: Fix vi modeline --- nipype/interfaces/afni/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py index 38a9da3efb..40db9938cd 100644 --- a/nipype/interfaces/afni/utils.py +++ b/nipype/interfaces/afni/utils.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft = python sts = 4 ts = 4 sw = 4 et: +# vi: set ft=python sts=4 ts=4 sw=4 et: """ AFNI utility interfaces. From 3ade586243f9b1f8e639b24ccae558c21821a6bf Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 7 Oct 2019 10:20:26 -0400 Subject: [PATCH 0472/1665] FIX: Use raw strings for regular expressions --- nipype/interfaces/afni/utils.py | 6 +++--- nipype/interfaces/diffusion_toolkit/dti.py | 2 +- nipype/interfaces/fsl/utils.py | 24 +++++++++++----------- nipype/interfaces/image.py | 4 ++-- nipype/interfaces/io.py | 2 +- nipype/interfaces/tests/test_io.py | 2 +- nipype/interfaces/utility/base.py | 2 +- 7 files changed, 21 insertions(+), 21 deletions(-) diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py index 40db9938cd..b728d8c902 100644 --- a/nipype/interfaces/afni/utils.py +++ b/nipype/interfaces/afni/utils.py @@ -213,9 +213,9 @@ class Autobox(AFNICommand): def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = super(Autobox, self).aggregate_outputs( runtime, needed_outputs) - pattern = 'x=(?P-?\d+)\.\.(?P-?\d+) '\ - 'y=(?P-?\d+)\.\.(?P-?\d+) '\ - 'z=(?P-?\d+)\.\.(?P-?\d+)' + pattern = r'x=(?P-?\d+)\.\.(?P-?\d+) '\ + r'y=(?P-?\d+)\.\.(?P-?\d+) '\ + r'z=(?P-?\d+)\.\.(?P-?\d+)' for line in runtime.stderr.split('\n'): m = re.search(pattern, line) if m: diff --git a/nipype/interfaces/diffusion_toolkit/dti.py b/nipype/interfaces/diffusion_toolkit/dti.py index 570ae55df5..f6776c2e95 100644 --- a/nipype/interfaces/diffusion_toolkit/dti.py +++ b/nipype/interfaces/diffusion_toolkit/dti.py @@ -94,7 +94,7 @@ class DTIRecon(CommandLine): def _create_gradient_matrix(self, bvecs_file, bvals_file): _gradient_matrix_file = 'gradient_matrix.txt' with open(bvals_file) as fbvals: - bvals = [val for val in re.split('\s+', fbvals.readline().strip())] + bvals = [val for val in re.split(r'\s+', fbvals.readline().strip())] with open(bvecs_file) as fbvecs: bvecs_x = fbvecs.readline().split() bvecs_y = fbvecs.readline().split() diff --git a/nipype/interfaces/fsl/utils.py b/nipype/interfaces/fsl/utils.py index a572259e2e..e0b430a2be 100644 --- a/nipype/interfaces/fsl/utils.py +++ b/nipype/interfaces/fsl/utils.py @@ -858,18 +858,18 @@ def _run_interface(self, runtime): runtime = super(AvScale, self)._run_interface(runtime) expr = re.compile( - 'Rotation\ &\ Translation\ Matrix:\n(?P[0-9\.\ \n-]+)[\s\n]*' - '(Rotation\ Angles\ \(x,y,z\)\ \[rads\]\ =\ (?P[0-9\.\ -]+))?[\s\n]*' - '(Translations\ \(x,y,z\)\ \[mm\]\ =\ (?P[0-9\.\ -]+))?[\s\n]*' - 'Scales\ \(x,y,z\)\ =\ (?P[0-9\.\ -]+)[\s\n]*' - 'Skews\ \(xy,xz,yz\)\ =\ (?P[0-9\.\ -]+)[\s\n]*' - 'Average\ scaling\ =\ (?P[0-9\.-]+)[\s\n]*' - 'Determinant\ =\ (?P[0-9\.-]+)[\s\n]*' - 'Left-Right\ orientation:\ (?P[A-Za-z]+)[\s\n]*' - 'Forward\ half\ transform\ =[\s]*\n' - '(?P[0-9\.\ \n-]+)[\s\n]*' - 'Backward\ half\ transform\ =[\s]*\n' - '(?P[0-9\.\ \n-]+)[\s\n]*') + r'Rotation & Translation Matrix:\n(?P[0-9\. \n-]+)[\s\n]*' + r'(Rotation Angles \(x,y,z\) \[rads\] = (?P[0-9\. -]+))?[\s\n]*' + r'(Translations \(x,y,z\) \[mm\] = (?P[0-9\. -]+))?[\s\n]*' + r'Scales \(x,y,z\) = (?P[0-9\. -]+)[\s\n]*' + r'Skews \(xy,xz,yz\) = (?P[0-9\. -]+)[\s\n]*' + r'Average scaling = (?P[0-9\.-]+)[\s\n]*' + r'Determinant = (?P[0-9\.-]+)[\s\n]*' + r'Left-Right orientation: (?P[A-Za-z]+)[\s\n]*' + r'Forward half transform =[\s]*\n' + r'(?P[0-9\. \n-]+)[\s\n]*' + r'Backward half transform =[\s]*\n' + r'(?P[0-9\. \n-]+)[\s\n]*') out = expr.search(runtime.stdout).groupdict() outputs = {} outputs['rotation_translation_matrix'] = [[ diff --git a/nipype/interfaces/image.py b/nipype/interfaces/image.py index 50fad60ebb..d72bb47c42 100644 --- a/nipype/interfaces/image.py +++ b/nipype/interfaces/image.py @@ -43,8 +43,8 @@ class Rescale(SimpleInterface): Examples -------- - To use a high-resolution T1w image as a registration target for a T2\* - image, it may be useful to invert the T1w image and rescale to the T2\* + To use a high-resolution T1w image as a registration target for a T2\\* + image, it may be useful to invert the T1w image and rescale to the T2\\* range. Using the 1st and 99th percentiles may reduce the impact of outlier voxels. diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index c409d4e78a..b3376903bd 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -1466,7 +1466,7 @@ class DataFinder(IOBase): >>> from nipype.interfaces.io import DataFinder >>> df = DataFinder() >>> df.inputs.root_paths = '.' - >>> df.inputs.match_regex = '.+/(?P.+(qT1|ep2d_fid_T1).+)/(?P.+)\.nii.gz' + >>> df.inputs.match_regex = r'.+/(?P.+(qT1|ep2d_fid_T1).+)/(?P.+)\.nii.gz' >>> result = df.run() # doctest: +SKIP >>> result.outputs.out_paths # doctest: +SKIP ['./027-ep2d_fid_T1_Gd4/acquisition.nii.gz', diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index 4420e4a49c..19625b0f68 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -524,7 +524,7 @@ def test_datafinder_unpack(tmpdir): df = nio.DataFinder() df.inputs.root_paths = outdir - df.inputs.match_regex = '.+/(?P.+)\.txt' + df.inputs.match_regex = r'.+/(?P.+)\.txt' df.inputs.unpack_single = True result = df.run() print(result.outputs.out_paths) diff --git a/nipype/interfaces/utility/base.py b/nipype/interfaces/utility/base.py index d01a0a17b9..8ae05ad66b 100644 --- a/nipype/interfaces/utility/base.py +++ b/nipype/interfaces/utility/base.py @@ -254,7 +254,7 @@ class Rename(SimpleInterface, IOBase): >>> rename3 = Rename(format_string="%(subject_id)s_%(seq)s_run%(run)02d.nii") >>> rename3.inputs.in_file = os.path.join(datadir, "func_epi_1_1.nii") - >>> rename3.inputs.parse_string = "func_(?P\w*)_.*" + >>> rename3.inputs.parse_string = r"func_(?P\w*)_.*" >>> rename3.inputs.subject_id = "subj_201" >>> rename3.inputs.run = 2 >>> res = rename3.run() # doctest: +SKIP From cc5746da160646ae4b63fdb3d686409b100d25a0 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 7 Oct 2019 10:20:52 -0400 Subject: [PATCH 0473/1665] FIX: Import deprecated module inside interface to quiet warnings --- nipype/interfaces/dipy/tracks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/dipy/tracks.py b/nipype/interfaces/dipy/tracks.py index 60b5127dd1..81d76d1ef5 100644 --- a/nipype/interfaces/dipy/tracks.py +++ b/nipype/interfaces/dipy/tracks.py @@ -5,7 +5,6 @@ import os.path as op import numpy as np import nibabel as nb -import nibabel.trackvis as nbt from distutils.version import LooseVersion from ... import logging @@ -89,6 +88,7 @@ class TrackDensityMap(DipyBaseInterface): def _run_interface(self, runtime): from numpy import min_scalar_type from dipy.tracking.utils import density_map + import nibabel.trackvis as nbt tracks, header = nbt.read(self.inputs.in_file) streams = ((ii[0]) for ii in tracks) From 2174a062eaece87668c81216d0c361ece7015368 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 7 Oct 2019 10:21:15 -0400 Subject: [PATCH 0474/1665] MNT: Use logger.warning --- nipype/pipeline/engine/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index fab1fb14de..a5245dda48 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -235,7 +235,7 @@ def save_resultfile(result, cwd, name, rebase=None): logger.debug("Saving results file: '%s'", resultsfile) if result.outputs is None: - logger.warn('Storing result file without outputs') + logger.warning('Storing result file without outputs') savepkl(resultsfile, result) return try: From 0050663eec6260f099c25893a92dbf39db263a18 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 7 Oct 2019 11:45:40 -0400 Subject: [PATCH 0475/1665] RF: Redirect nipype.workflows to niflow.nipype1.workflows --- nipype/interfaces/fsl/dti.py | 2 +- nipype/interfaces/fsl/epi.py | 2 +- nipype/interfaces/mrtrix/convert.py | 23 +- nipype/workflows/__init__.py | 22 + nipype/workflows/data/__init__.py | 17 - nipype/workflows/data/ecc.sch | 67 - nipype/workflows/data/hmc.sch | 64 - nipype/workflows/dmri/__init__.py | 4 - nipype/workflows/dmri/camino/__init__.py | 5 - .../dmri/camino/connectivity_mapping.py | 534 ------- nipype/workflows/dmri/camino/diffusion.py | 245 ---- .../dmri/camino/group_connectivity.py | 115 -- .../workflows/dmri/connectivity/__init__.py | 9 - .../dmri/connectivity/group_connectivity.py | 631 -------- nipype/workflows/dmri/connectivity/nx.py | 178 --- nipype/workflows/dmri/dipy/__init__.py | 7 - nipype/workflows/dmri/dipy/denoise.py | 121 -- nipype/workflows/dmri/dtitk/__init__.py | 8 - .../dmri/dtitk/tensor_registration.py | 144 -- nipype/workflows/dmri/fsl/__init__.py | 15 - nipype/workflows/dmri/fsl/artifacts.py | 1061 -------------- nipype/workflows/dmri/fsl/dti.py | 276 ---- nipype/workflows/dmri/fsl/epi.py | 885 ----------- nipype/workflows/dmri/fsl/tbss.py | 590 -------- nipype/workflows/dmri/fsl/tests/__init__.py | 3 - nipype/workflows/dmri/fsl/tests/test_dti.py | 85 -- nipype/workflows/dmri/fsl/tests/test_epi.py | 46 - nipype/workflows/dmri/fsl/tests/test_tbss.py | 211 --- nipype/workflows/dmri/fsl/utils.py | 847 ----------- nipype/workflows/dmri/mrtrix/__init__.py | 5 - .../dmri/mrtrix/connectivity_mapping.py | 639 -------- nipype/workflows/dmri/mrtrix/diffusion.py | 186 --- .../dmri/mrtrix/group_connectivity.py | 139 -- nipype/workflows/fmri/__init__.py | 4 - nipype/workflows/fmri/fsl/__init__.py | 8 - nipype/workflows/fmri/fsl/estimate.py | 298 ---- nipype/workflows/fmri/fsl/preprocess.py | 1293 ----------------- nipype/workflows/fmri/fsl/tests/__init__.py | 2 - .../fmri/fsl/tests/test_preprocess.py | 25 - nipype/workflows/fmri/spm/__init__.py | 3 - nipype/workflows/fmri/spm/estimate.py | 3 - nipype/workflows/fmri/spm/preprocess.py | 332 ----- nipype/workflows/fmri/spm/tests/__init__.py | 2 - nipype/workflows/graph/__init__.py | 3 - nipype/workflows/misc/__init__.py | 1 - nipype/workflows/misc/utils.py | 91 -- nipype/workflows/rsfmri/__init__.py | 5 - nipype/workflows/rsfmri/fsl/__init__.py | 2 - nipype/workflows/rsfmri/fsl/resting.py | 162 --- nipype/workflows/rsfmri/fsl/tests/__init__.py | 0 .../rsfmri/fsl/tests/test_resting.py | 107 -- nipype/workflows/smri/__init__.py | 7 - .../workflows/smri/ants/ANTSBuildTemplate.py | 388 ----- nipype/workflows/smri/ants/__init__.py | 3 - .../ants/antsRegistrationBuildTemplate.py | 535 ------- nipype/workflows/smri/freesurfer/__init__.py | 5 - .../workflows/smri/freesurfer/autorecon1.py | 512 ------- .../workflows/smri/freesurfer/autorecon2.py | 720 --------- .../workflows/smri/freesurfer/autorecon3.py | 959 ------------ nipype/workflows/smri/freesurfer/ba_maps.py | 172 --- nipype/workflows/smri/freesurfer/bem.py | 81 -- nipype/workflows/smri/freesurfer/recon.py | 604 -------- nipype/workflows/smri/freesurfer/utils.py | 498 ------- nipype/workflows/smri/niftyreg/__init__.py | 5 - nipype/workflows/smri/niftyreg/groupwise.py | 384 ----- nipype/workflows/warp/__init__.py | 1 - 66 files changed, 46 insertions(+), 14355 deletions(-) delete mode 100644 nipype/workflows/data/__init__.py delete mode 100644 nipype/workflows/data/ecc.sch delete mode 100644 nipype/workflows/data/hmc.sch delete mode 100644 nipype/workflows/dmri/__init__.py delete mode 100644 nipype/workflows/dmri/camino/__init__.py delete mode 100644 nipype/workflows/dmri/camino/connectivity_mapping.py delete mode 100644 nipype/workflows/dmri/camino/diffusion.py delete mode 100644 nipype/workflows/dmri/camino/group_connectivity.py delete mode 100644 nipype/workflows/dmri/connectivity/__init__.py delete mode 100644 nipype/workflows/dmri/connectivity/group_connectivity.py delete mode 100644 nipype/workflows/dmri/connectivity/nx.py delete mode 100644 nipype/workflows/dmri/dipy/__init__.py delete mode 100644 nipype/workflows/dmri/dipy/denoise.py delete mode 100644 nipype/workflows/dmri/dtitk/__init__.py delete mode 100644 nipype/workflows/dmri/dtitk/tensor_registration.py delete mode 100644 nipype/workflows/dmri/fsl/__init__.py delete mode 100644 nipype/workflows/dmri/fsl/artifacts.py delete mode 100644 nipype/workflows/dmri/fsl/dti.py delete mode 100644 nipype/workflows/dmri/fsl/epi.py delete mode 100644 nipype/workflows/dmri/fsl/tbss.py delete mode 100644 nipype/workflows/dmri/fsl/tests/__init__.py delete mode 100644 nipype/workflows/dmri/fsl/tests/test_dti.py delete mode 100644 nipype/workflows/dmri/fsl/tests/test_epi.py delete mode 100644 nipype/workflows/dmri/fsl/tests/test_tbss.py delete mode 100644 nipype/workflows/dmri/fsl/utils.py delete mode 100644 nipype/workflows/dmri/mrtrix/__init__.py delete mode 100644 nipype/workflows/dmri/mrtrix/connectivity_mapping.py delete mode 100644 nipype/workflows/dmri/mrtrix/diffusion.py delete mode 100644 nipype/workflows/dmri/mrtrix/group_connectivity.py delete mode 100644 nipype/workflows/fmri/__init__.py delete mode 100644 nipype/workflows/fmri/fsl/__init__.py delete mode 100644 nipype/workflows/fmri/fsl/estimate.py delete mode 100644 nipype/workflows/fmri/fsl/preprocess.py delete mode 100644 nipype/workflows/fmri/fsl/tests/__init__.py delete mode 100644 nipype/workflows/fmri/fsl/tests/test_preprocess.py delete mode 100644 nipype/workflows/fmri/spm/__init__.py delete mode 100644 nipype/workflows/fmri/spm/estimate.py delete mode 100644 nipype/workflows/fmri/spm/preprocess.py delete mode 100644 nipype/workflows/fmri/spm/tests/__init__.py delete mode 100644 nipype/workflows/graph/__init__.py delete mode 100644 nipype/workflows/misc/__init__.py delete mode 100644 nipype/workflows/misc/utils.py delete mode 100644 nipype/workflows/rsfmri/__init__.py delete mode 100644 nipype/workflows/rsfmri/fsl/__init__.py delete mode 100644 nipype/workflows/rsfmri/fsl/resting.py delete mode 100644 nipype/workflows/rsfmri/fsl/tests/__init__.py delete mode 100644 nipype/workflows/rsfmri/fsl/tests/test_resting.py delete mode 100644 nipype/workflows/smri/__init__.py delete mode 100644 nipype/workflows/smri/ants/ANTSBuildTemplate.py delete mode 100644 nipype/workflows/smri/ants/__init__.py delete mode 100644 nipype/workflows/smri/ants/antsRegistrationBuildTemplate.py delete mode 100644 nipype/workflows/smri/freesurfer/__init__.py delete mode 100644 nipype/workflows/smri/freesurfer/autorecon1.py delete mode 100644 nipype/workflows/smri/freesurfer/autorecon2.py delete mode 100644 nipype/workflows/smri/freesurfer/autorecon3.py delete mode 100644 nipype/workflows/smri/freesurfer/ba_maps.py delete mode 100644 nipype/workflows/smri/freesurfer/bem.py delete mode 100644 nipype/workflows/smri/freesurfer/recon.py delete mode 100644 nipype/workflows/smri/freesurfer/utils.py delete mode 100644 nipype/workflows/smri/niftyreg/__init__.py delete mode 100644 nipype/workflows/smri/niftyreg/groupwise.py delete mode 100644 nipype/workflows/warp/__init__.py diff --git a/nipype/interfaces/fsl/dti.py b/nipype/interfaces/fsl/dti.py index c842ff05cf..75cdfeebed 100644 --- a/nipype/interfaces/fsl/dti.py +++ b/nipype/interfaces/fsl/dti.py @@ -424,7 +424,7 @@ class BEDPOSTX5(FSLXCommand): .. note:: Consider using - :func:`nipype.workflows.fsl.dmri.create_bedpostx_pipeline` instead. + :func:`niflow.nipype1.workflows.fsl.dmri.create_bedpostx_pipeline` instead. Example diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index f94bda1147..a0fdf843da 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1100,7 +1100,7 @@ class EPIDeWarp(FSLCommand): `_. .. warning:: deprecated in FSL, please use - :func:`nipype.workflows.dmri.preprocess.epi.sdc_fmb` instead. + :func:`niflow.nipype1.workflows.dmri.preprocess.epi.sdc_fmb` instead. Examples -------- diff --git a/nipype/interfaces/mrtrix/convert.py b/nipype/interfaces/mrtrix/convert.py index a3a280c895..4c1b6e4ef5 100644 --- a/nipype/interfaces/mrtrix/convert.py +++ b/nipype/interfaces/mrtrix/convert.py @@ -15,13 +15,34 @@ from ... import logging from ...utils.filemanip import split_filename -from ...workflows.misc.utils import get_data_dims, get_vox_dims from ..base import TraitedSpec, File, isdefined from ..dipy.base import DipyBaseInterface, HAVE_DIPY as have_dipy iflogger = logging.getLogger('nipype.interface') +def get_vox_dims(volume): + import nibabel as nb + from nipype.utils import NUMPY_MMAP + if isinstance(volume, list): + volume = volume[0] + nii = nb.load(volume, mmap=NUMPY_MMAP) + hdr = nii.header + voxdims = hdr.get_zooms() + return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])] + + +def get_data_dims(volume): + import nibabel as nb + from nipype.utils import NUMPY_MMAP + if isinstance(volume, list): + volume = volume[0] + nii = nb.load(volume, mmap=NUMPY_MMAP) + hdr = nii.header + datadims = hdr.get_data_shape() + return [int(datadims[0]), int(datadims[1]), int(datadims[2])] + + def transform_to_affine(streams, header, affine): from dipy.tracking.utils import move_streamlines rotation, scale = np.linalg.qr(affine) diff --git a/nipype/workflows/__init__.py b/nipype/workflows/__init__.py index 99fb243f19..85df461615 100644 --- a/nipype/workflows/__init__.py +++ b/nipype/workflows/__init__.py @@ -1,3 +1,25 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: + +_msg = ["Nipype 1 workflows have been moved to the niflow-nipype1-workflows package."] +try: + from niflow.nipype1.workflows import data, dmri, fmri, misc, rsfmri, smri +except ImportError: + _msg.append("pip install niflow-nipype1-workflows to continue using them.") +else: + import sys + # Hack to make `from nipype.workflows.X import Y` work + sys.modules['nipype.workflows.data'] = data + sys.modules['nipype.workflows.dmri'] = dmri + sys.modules['nipype.workflows.fmri'] = fmri + sys.modules['nipype.workflows.misc'] = misc + sys.modules['nipype.workflows.rsfmri'] = rsfmri + sys.modules['nipype.workflows.smri'] = smri + _msg.append("nipype.workflows.* provides a reference for backwards compatibility. " + "Please use niflow.nipype1.workflows.* to avoid this warning.") + del sys + +import warnings +warnings.warn(' '.join(_msg)) +del warnings, _msg diff --git a/nipype/workflows/data/__init__.py b/nipype/workflows/data/__init__.py deleted file mode 100644 index 85fcd2dee0..0000000000 --- a/nipype/workflows/data/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# -*- coding: utf-8 -*- -# coding: utf-8 -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -import os.path as op - - -def get_flirt_schedule(name): - if name == 'ecc': - return op.abspath(op.join(op.dirname(__file__), 'ecc.sch')) - elif name == 'hmc': - return op.abspath(op.join(op.dirname(__file__), 'hmc.sch')) - else: - raise RuntimeError('Requested file does not exist.') diff --git a/nipype/workflows/data/ecc.sch b/nipype/workflows/data/ecc.sch deleted file mode 100644 index b9e8d8c3c3..0000000000 --- a/nipype/workflows/data/ecc.sch +++ /dev/null @@ -1,67 +0,0 @@ -# 4mm scale -setscale 4 -setoption smoothing 6 -setoption paramsubset 1 0 0 0 0 0 0 1 1 1 1 1 1 -clear U -clear UA -clear UB -clear US -clear UP -# try the identity transform as a starting point at this resolution -clear UQ -setrow UQ 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 -optimise 7 UQ 0.0 0.0 0.0 0.0 0.0 0.0 0.0 rel 4 -sort U -copy U UA -# select best 4 optimised solutions and try perturbations of these -clear U -copy UA:1-4 U -optimise 7 UA:1-4 1.0 0.0 0.0 0.0 0.0 0.0 0.0 rel 4 -optimise 7 UA:1-4 -1.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 4 -optimise 7 UA:1-4 0.0 1.0 0.0 0.0 0.0 0.0 0.0 abs 4 -optimise 7 UA:1-4 0.0 -1.0 0.0 0.0 0.0 0.0 0.0 abs 4 -optimise 7 UA:1-4 0.0 0.0 1.0 0.0 0.0 0.0 0.0 abs 4 -optimise 7 UA:1-4 0.0 0.0 -1.0 0.0 0.0 0.0 0.0 abs 4 -optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 0.1 abs 4 -optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 -0.1 abs 4 -optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 0.2 abs 4 -optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 -0.2 abs 4 -sort U -copy U UB -# 2mm scale -setscale 2 -setoption smoothing 4 -setoption paramsubset 1 0 0 0 0 0 0 1 1 1 1 1 1 -clear U -clear UC -clear UD -clear UE -clear UF -# remeasure costs at this scale -measurecost 7 UB 0 0 0 0 0 0 rel -sort U -copy U UC -clear U -optimise 7 UC:1-3 0.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 2 -copy U UD -sort U -copy U UF -# also try the identity transform as a starting point at this resolution -sort U -clear U UG -clear U -setrow UG 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 -optimise 7 UG 0.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 2 -sort U -copy U UG -# 1mm scale -setscale 1 -setoption smoothing 2 -setoption boundguess 1 -setoption paramsubset 1 0 0 0 0 0 0 1 1 1 1 1 1 -clear U -#also try the identity transform as a starting point at this resolution -setrow UK 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 -optimise 12 UK:1-2 0.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 1 -sort U - diff --git a/nipype/workflows/data/hmc.sch b/nipype/workflows/data/hmc.sch deleted file mode 100644 index aeabcae29a..0000000000 --- a/nipype/workflows/data/hmc.sch +++ /dev/null @@ -1,64 +0,0 @@ -# 4mm scale -setscale 4 -setoption smoothing 6 -clear U -clear UA -clear UB -clear US -clear UP -# try the identity transform as a starting point at this resolution -clear UQ -setrow UQ 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 -optimise 7 UQ 0.0 0.0 0.0 0.0 0.0 0.0 0.0 rel 4 -sort U -copy U UA -# select best 4 optimised solutions and try perturbations of these -clear U -copy UA:1-4 U -optimise 7 UA:1-4 1.0 0.0 0.0 0.0 0.0 0.0 0.0 rel 4 -optimise 7 UA:1-4 -1.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 4 -optimise 7 UA:1-4 0.0 1.0 0.0 0.0 0.0 0.0 0.0 abs 4 -optimise 7 UA:1-4 0.0 -1.0 0.0 0.0 0.0 0.0 0.0 abs 4 -optimise 7 UA:1-4 0.0 0.0 1.0 0.0 0.0 0.0 0.0 abs 4 -optimise 7 UA:1-4 0.0 0.0 -1.0 0.0 0.0 0.0 0.0 abs 4 -optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 0.1 abs 4 -optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 -0.1 abs 4 -optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 0.2 abs 4 -optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 -0.2 abs 4 -sort U -copy U UB -# 2mm scale -setscale 2 -setoption smoothing 4 -clear U -clear UC -clear UD -clear UE -clear UF -# remeasure costs at this scale -measurecost 7 UB 0 0 0 0 0 0 rel -sort U -copy U UC -clear U -optimise 7 UC:1-3 0.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 2 -copy U UD -sort U -copy U UF -# also try the identity transform as a starting point at this resolution -sort U -clear U UG -clear U -setrow UG 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 -optimise 7 UG 0.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 2 -sort U -copy U UG -# 1mm scale -setscale 1 -setoption smoothing 2 -setoption boundguess 1 -clear U -#also try the identity transform as a starting point at this resolution -setrow UK 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 -optimise 12 UK:1-2 0.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 1 -sort U - diff --git a/nipype/workflows/dmri/__init__.py b/nipype/workflows/dmri/__init__.py deleted file mode 100644 index 628b6c2bc1..0000000000 --- a/nipype/workflows/dmri/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from . import camino, mrtrix, fsl, dipy diff --git a/nipype/workflows/dmri/camino/__init__.py b/nipype/workflows/dmri/camino/__init__.py deleted file mode 100644 index 07ba37fc52..0000000000 --- a/nipype/workflows/dmri/camino/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import -from .diffusion import create_camino_dti_pipeline -from .connectivity_mapping import create_connectivity_pipeline -from .group_connectivity import create_group_connectivity_pipeline diff --git a/nipype/workflows/dmri/camino/connectivity_mapping.py b/nipype/workflows/dmri/camino/connectivity_mapping.py deleted file mode 100644 index 3283b5f4e1..0000000000 --- a/nipype/workflows/dmri/camino/connectivity_mapping.py +++ /dev/null @@ -1,534 +0,0 @@ -# -*- coding: utf-8 -*- -import inspect -import os.path as op - -from ....interfaces import io as nio # Data i/o -from ....interfaces import utility as util # utility -from ....pipeline import engine as pe # pypeline engine -from ....interfaces import camino as camino -from ....interfaces import fsl as fsl -from ....interfaces import camino2trackvis as cam2trk -from ....interfaces import freesurfer as fs # freesurfer -from ....interfaces import cmtk as cmtk -from ....algorithms import misc as misc -from ...misc.utils import (get_affine, get_data_dims, get_vox_dims, - select_aparc, select_aparc_annot) - - -def create_connectivity_pipeline(name="connectivity"): - """Creates a pipeline that does the same connectivity processing as in the - :ref:`example_dmri_connectivity` example script. Given a subject id (and completed Freesurfer reconstruction) - diffusion-weighted image, b-values, and b-vectors, the workflow will return the subject's connectome - as a Connectome File Format (CFF) file for use in Connectome Viewer (http://www.cmtk.org). - - Example - ------- - - >>> from nipype.workflows.dmri.camino.connectivity_mapping import create_connectivity_pipeline - >>> conmapper = create_connectivity_pipeline("nipype_conmap") - >>> conmapper.inputs.inputnode.subjects_dir = '.' - >>> conmapper.inputs.inputnode.subject_id = 'subj1' - >>> conmapper.inputs.inputnode.dwi = 'data.nii.gz' - >>> conmapper.inputs.inputnode.bvecs = 'bvecs' - >>> conmapper.inputs.inputnode.bvals = 'bvals' - >>> conmapper.run() # doctest: +SKIP - - Inputs:: - - inputnode.subject_id - inputnode.subjects_dir - inputnode.dwi - inputnode.bvecs - inputnode.bvals - inputnode.resolution_network_file - - Outputs:: - - outputnode.connectome - outputnode.cmatrix - outputnode.gpickled_network - outputnode.fa - outputnode.struct - outputnode.trace - outputnode.tracts - outputnode.tensors - - """ - - inputnode_within = pe.Node( - interface=util.IdentityInterface(fields=[ - "subject_id", - "dwi", - "bvecs", - "bvals", - "subjects_dir", - "resolution_network_file", - ]), - name="inputnode_within") - - FreeSurferSource = pe.Node( - interface=nio.FreeSurferSource(), name='fssource') - - FreeSurferSourceLH = pe.Node( - interface=nio.FreeSurferSource(), name='fssourceLH') - FreeSurferSourceLH.inputs.hemi = 'lh' - - FreeSurferSourceRH = pe.Node( - interface=nio.FreeSurferSource(), name='fssourceRH') - FreeSurferSourceRH.inputs.hemi = 'rh' - """ - Since the b values and b vectors come from the FSL course, we must convert it to a scheme file - for use in Camino. - """ - - fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme") - fsl2scheme.inputs.usegradmod = True - """ - FSL's Brain Extraction tool is used to create a mask from the b0 image - """ - - b0Strip = pe.Node(interface=fsl.BET(mask=True), name='bet_b0') - """ - FSL's FLIRT function is used to coregister the b0 mask and the structural image. - A convert_xfm node is then used to obtain the inverse of the transformation matrix. - FLIRT is used once again to apply the inverse transformation to the parcellated brain image. - """ - - coregister = pe.Node(interface=fsl.FLIRT(dof=6), name='coregister') - coregister.inputs.cost = ('normmi') - - convertxfm = pe.Node(interface=fsl.ConvertXFM(), name='convertxfm') - convertxfm.inputs.invert_xfm = True - - inverse = pe.Node(interface=fsl.FLIRT(), name='inverse') - inverse.inputs.interp = ('nearestneighbour') - - inverse_AparcAseg = pe.Node( - interface=fsl.FLIRT(), name='inverse_AparcAseg') - inverse_AparcAseg.inputs.interp = ('nearestneighbour') - """ - A number of conversion operations are required to obtain NIFTI files from the FreesurferSource for each subject. - Nodes are used to convert the following: - * Original structural image to NIFTI - * Parcellated white matter image to NIFTI - * Parcellated whole-brain image to NIFTI - * Pial, white, inflated, and spherical surfaces for both the left and right hemispheres - are converted to GIFTI for visualization in ConnectomeViewer - * Parcellated annotation files for the left and right hemispheres are also converted to GIFTI - """ - - mri_convert_Brain = pe.Node( - interface=fs.MRIConvert(), name='mri_convert_Brain') - mri_convert_Brain.inputs.out_type = 'nii' - - mri_convert_AparcAseg = mri_convert_Brain.clone('mri_convert_AparcAseg') - - mris_convertLH = pe.Node(interface=fs.MRIsConvert(), name='mris_convertLH') - mris_convertLH.inputs.out_datatype = 'gii' - mris_convertRH = mris_convertLH.clone('mris_convertRH') - mris_convertRHwhite = mris_convertLH.clone('mris_convertRHwhite') - mris_convertLHwhite = mris_convertLH.clone('mris_convertLHwhite') - mris_convertRHinflated = mris_convertLH.clone('mris_convertRHinflated') - mris_convertLHinflated = mris_convertLH.clone('mris_convertLHinflated') - mris_convertRHsphere = mris_convertLH.clone('mris_convertRHsphere') - mris_convertLHsphere = mris_convertLH.clone('mris_convertLHsphere') - mris_convertLHlabels = mris_convertLH.clone('mris_convertLHlabels') - mris_convertRHlabels = mris_convertLH.clone('mris_convertRHlabels') - """ - In this section we create the nodes necessary for diffusion analysis. - First, the diffusion image is converted to voxel order, since this is the format in which Camino does - its processing. - """ - - image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel") - """ - Second, diffusion tensors are fit to the voxel-order data. - If desired, these tensors can be converted to a Nifti tensor image using the DT2NIfTI interface. - """ - - dtifit = pe.Node(interface=camino.DTIFit(), name='dtifit') - """ - Next, a lookup table is generated from the schemefile and the - signal-to-noise ratio (SNR) of the unweighted (q=0) data. - """ - - dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen") - dtlutgen.inputs.snr = 16.0 - dtlutgen.inputs.inversion = 1 - """ - In this tutorial we implement probabilistic tractography using the PICo algorithm. - PICo tractography requires an estimate of the fibre direction and a model of its uncertainty in each voxel; - this probabilitiy distribution map is produced using the following node. - """ - - picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs") - picopdfs.inputs.inputmodel = 'dt' - """ - Finally, tractography is performed. In this tutorial, we will use only one iteration for time-saving purposes. - It is important to note that we use the TrackPICo interface here. This interface now expects the files required - for PICo tracking (i.e. the output from picopdfs). Similar interfaces exist for alternative types of tracking, - such as Bayesian tracking with Dirac priors (TrackBayesDirac). - """ - - track = pe.Node(interface=camino.TrackPICo(), name="track") - track.inputs.iterations = 1 - """ - Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to - convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse. - """ - - camino2trackvis = pe.Node( - interface=cam2trk.Camino2Trackvis(), name="camino2trackvis") - camino2trackvis.inputs.min_length = 30 - camino2trackvis.inputs.voxel_order = 'LAS' - trk2camino = pe.Node( - interface=cam2trk.Trackvis2Camino(), name="trk2camino") - """ - Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview, - using the following two nodes. - """ - - vtkstreamlines = pe.Node( - interface=camino.VtkStreamlines(), name="vtkstreamlines") - procstreamlines = pe.Node( - interface=camino.ProcStreamlines(), name="procstreamlines") - """ - We can easily produce a variety of scalar values from our fitted tensors. The following nodes generate the - fractional anisotropy and diffusivity trace maps and their associated headers, and then merge them back - into a single .nii file. - """ - - fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(), name='fa') - trace = pe.Node(interface=camino.ComputeTensorTrace(), name='trace') - dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig') - - analyzeheader_fa = pe.Node( - interface=camino.AnalyzeHeader(), name='analyzeheader_fa') - analyzeheader_fa.inputs.datatype = 'double' - analyzeheader_trace = pe.Node( - interface=camino.AnalyzeHeader(), name='analyzeheader_trace') - analyzeheader_trace.inputs.datatype = 'double' - - fa2nii = pe.Node(interface=misc.CreateNifti(), name='fa2nii') - trace2nii = fa2nii.clone("trace2nii") - """ - This section adds the Connectome Mapping Toolkit (CMTK) nodes. - These interfaces are fairly experimental and may not function properly. - In order to perform connectivity mapping using CMTK, the parcellated structural data is rewritten - using the indices and parcellation scheme from the connectome mapper (CMP). This process has been - written into the ROIGen interface, which will output a remapped aparc+aseg image as well as a - dictionary of label information (i.e. name, display colours) pertaining to the original and remapped regions. - These label values are input from a user-input lookup table, if specified, and otherwise the default - Freesurfer LUT (/freesurfer/FreeSurferColorLUT.txt). - """ - - roigen = pe.Node(interface=cmtk.ROIGen(), name="ROIGen") - roigen_structspace = roigen.clone("ROIGen_structspace") - """ - The CreateMatrix interface takes in the remapped aparc+aseg image as well as the label dictionary and fiber tracts - and outputs a number of different files. The most important of which is the connectivity network itself, which is stored - as a 'gpickle' and can be loaded using Python's NetworkX package (see CreateMatrix docstring). Also outputted are various - NumPy arrays containing detailed tract information, such as the start and endpoint regions, and statistics on the mean and - standard deviation for the fiber length of each connection. These matrices can be used in the ConnectomeViewer to plot the - specific tracts that connect between user-selected regions. - """ - - createnodes = pe.Node(interface=cmtk.CreateNodes(), name="CreateNodes") - creatematrix = pe.Node(interface=cmtk.CreateMatrix(), name="CreateMatrix") - creatematrix.inputs.count_region_intersections = True - """ - Here we define the endpoint of this tutorial, which is the CFFConverter node, as well as a few nodes which use - the Nipype Merge utility. These are useful for passing lists of the files we want packaged in our CFF file. - """ - - CFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="CFFConverter") - - giftiSurfaces = pe.Node(interface=util.Merge(8), name="GiftiSurfaces") - giftiLabels = pe.Node(interface=util.Merge(2), name="GiftiLabels") - niftiVolumes = pe.Node(interface=util.Merge(3), name="NiftiVolumes") - fiberDataArrays = pe.Node(interface=util.Merge(4), name="FiberDataArrays") - """ - Since we have now created all our nodes, we can define our workflow and start making connections. - """ - - mapping = pe.Workflow(name='mapping') - """ - First, we connect the input node to the early conversion functions. - FreeSurfer input nodes: - """ - - mapping.connect([(inputnode_within, FreeSurferSource, [("subjects_dir", - "subjects_dir")])]) - mapping.connect([(inputnode_within, FreeSurferSource, [("subject_id", - "subject_id")])]) - - mapping.connect([(inputnode_within, FreeSurferSourceLH, - [("subjects_dir", "subjects_dir")])]) - mapping.connect([(inputnode_within, FreeSurferSourceLH, [("subject_id", - "subject_id")])]) - - mapping.connect([(inputnode_within, FreeSurferSourceRH, - [("subjects_dir", "subjects_dir")])]) - mapping.connect([(inputnode_within, FreeSurferSourceRH, [("subject_id", - "subject_id")])]) - """ - Required conversions for processing in Camino: - """ - - mapping.connect([(inputnode_within, image2voxel, - [("dwi", "in_file")]), (inputnode_within, fsl2scheme, - [("bvecs", "bvec_file"), - ("bvals", "bval_file")]), - (image2voxel, dtifit, [['voxel_order', 'in_file']]), - (fsl2scheme, dtifit, [['scheme', 'scheme_file']])]) - """ - Nifti conversions for the subject's stripped brain image from Freesurfer: - """ - - mapping.connect([(FreeSurferSource, mri_convert_Brain, [('brain', - 'in_file')])]) - """ - Surface conversions to GIFTI (pial, white, inflated, and sphere for both hemispheres) - """ - - mapping.connect([(FreeSurferSourceLH, mris_convertLH, [('pial', - 'in_file')])]) - mapping.connect([(FreeSurferSourceRH, mris_convertRH, [('pial', - 'in_file')])]) - mapping.connect([(FreeSurferSourceLH, mris_convertLHwhite, [('white', - 'in_file')])]) - mapping.connect([(FreeSurferSourceRH, mris_convertRHwhite, [('white', - 'in_file')])]) - mapping.connect([(FreeSurferSourceLH, mris_convertLHinflated, - [('inflated', 'in_file')])]) - mapping.connect([(FreeSurferSourceRH, mris_convertRHinflated, - [('inflated', 'in_file')])]) - mapping.connect([(FreeSurferSourceLH, mris_convertLHsphere, - [('sphere', 'in_file')])]) - mapping.connect([(FreeSurferSourceRH, mris_convertRHsphere, - [('sphere', 'in_file')])]) - """ - The annotation files are converted using the pial surface as a map via the MRIsConvert interface. - One of the functions defined earlier is used to select the lh.aparc.annot and rh.aparc.annot files - specifically (rather than i.e. rh.aparc.a2009s.annot) from the output list given by the FreeSurferSource. - """ - - mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, - [('pial', 'in_file')])]) - mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, - [('pial', 'in_file')])]) - mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, - [(('annot', select_aparc_annot), 'annot_file')])]) - mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, - [(('annot', select_aparc_annot), 'annot_file')])]) - """ - This section coregisters the diffusion-weighted and parcellated white-matter / whole brain images. - At present the conmap node connection is left commented, as there have been recent changes in Camino - code that have presented some users with errors. - """ - - mapping.connect([(inputnode_within, b0Strip, [('dwi', 'in_file')])]) - mapping.connect([(inputnode_within, b0Strip, [('dwi', 't2_guided')]) - ]) # Added to improve damaged brain extraction - mapping.connect([(b0Strip, coregister, [('out_file', 'in_file')])]) - mapping.connect([(mri_convert_Brain, coregister, [('out_file', - 'reference')])]) - mapping.connect([(coregister, convertxfm, [('out_matrix_file', - 'in_file')])]) - mapping.connect([(b0Strip, inverse, [('out_file', 'reference')])]) - mapping.connect([(convertxfm, inverse, [('out_file', 'in_matrix_file')])]) - mapping.connect([(mri_convert_Brain, inverse, [('out_file', 'in_file')])]) - """ - The tractography pipeline consists of the following nodes. Further information about the tractography - can be found in nipype/examples/dmri_camino_dti.py. - """ - - mapping.connect([(b0Strip, track, [("mask_file", "seed_file")])]) - mapping.connect([(fsl2scheme, dtlutgen, [("scheme", "scheme_file")])]) - mapping.connect([(dtlutgen, picopdfs, [("dtLUT", "luts")])]) - mapping.connect([(dtifit, picopdfs, [("tensor_fitted", "in_file")])]) - mapping.connect([(picopdfs, track, [("pdfs", "in_file")])]) - """ - Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the - tensor fitting. This is also where our voxel- and data-grabbing functions come in. We pass these functions, - along with the original DWI image from the input node, to the header-generating nodes. This ensures that the - files will be correct and readable. - """ - - mapping.connect([(dtifit, fa, [("tensor_fitted", "in_file")])]) - mapping.connect([(fa, analyzeheader_fa, [("fa", "in_file")])]) - mapping.connect([(inputnode_within, analyzeheader_fa, - [(('dwi', get_vox_dims), 'voxel_dims'), - (('dwi', get_data_dims), 'data_dims')])]) - mapping.connect([(fa, fa2nii, [('fa', 'data_file')])]) - mapping.connect([(inputnode_within, fa2nii, [(('dwi', get_affine), - 'affine')])]) - mapping.connect([(analyzeheader_fa, fa2nii, [('header', 'header_file')])]) - - mapping.connect([(dtifit, trace, [("tensor_fitted", "in_file")])]) - mapping.connect([(trace, analyzeheader_trace, [("trace", "in_file")])]) - mapping.connect([(inputnode_within, analyzeheader_trace, - [(('dwi', get_vox_dims), 'voxel_dims'), - (('dwi', get_data_dims), 'data_dims')])]) - mapping.connect([(trace, trace2nii, [('trace', 'data_file')])]) - mapping.connect([(inputnode_within, trace2nii, [(('dwi', get_affine), - 'affine')])]) - mapping.connect([(analyzeheader_trace, trace2nii, [('header', - 'header_file')])]) - - mapping.connect([(dtifit, dteig, [("tensor_fitted", "in_file")])]) - """ - The output tracts are converted to Trackvis format (and back). Here we also use the voxel- and data-grabbing - functions defined at the beginning of the pipeline. - """ - - mapping.connect([(track, camino2trackvis, [('tracked', 'in_file')]), - (track, vtkstreamlines, [['tracked', 'in_file']]), - (camino2trackvis, trk2camino, [['trackvis', 'in_file']])]) - mapping.connect([(inputnode_within, camino2trackvis, - [(('dwi', get_vox_dims), 'voxel_dims'), - (('dwi', get_data_dims), 'data_dims')])]) - """ - Here the CMTK connectivity mapping nodes are connected. - The original aparc+aseg image is converted to NIFTI, then registered to - the diffusion image and delivered to the ROIGen node. The remapped parcellation, - original tracts, and label file are then given to CreateMatrix. - """ - - mapping.connect(inputnode_within, 'resolution_network_file', createnodes, - 'resolution_network_file') - mapping.connect(createnodes, 'node_network', creatematrix, - 'resolution_network_file') - mapping.connect([(FreeSurferSource, mri_convert_AparcAseg, - [(('aparc_aseg', select_aparc), 'in_file')])]) - - mapping.connect([(b0Strip, inverse_AparcAseg, [('out_file', - 'reference')])]) - mapping.connect([(convertxfm, inverse_AparcAseg, [('out_file', - 'in_matrix_file')])]) - mapping.connect([(mri_convert_AparcAseg, inverse_AparcAseg, - [('out_file', 'in_file')])]) - mapping.connect([(mri_convert_AparcAseg, roigen_structspace, - [('out_file', 'aparc_aseg_file')])]) - mapping.connect([(roigen_structspace, createnodes, [("roi_file", - "roi_file")])]) - - mapping.connect([(inverse_AparcAseg, roigen, [("out_file", - "aparc_aseg_file")])]) - mapping.connect([(roigen, creatematrix, [("roi_file", "roi_file")])]) - mapping.connect([(camino2trackvis, creatematrix, [("trackvis", - "tract_file")])]) - mapping.connect([(inputnode_within, creatematrix, [("subject_id", - "out_matrix_file")])]) - mapping.connect([(inputnode_within, creatematrix, - [("subject_id", "out_matrix_mat_file")])]) - """ - The merge nodes defined earlier are used here to create lists of the files which are - destined for the CFFConverter. - """ - - mapping.connect([(mris_convertLH, giftiSurfaces, [("converted", "in1")])]) - mapping.connect([(mris_convertRH, giftiSurfaces, [("converted", "in2")])]) - mapping.connect([(mris_convertLHwhite, giftiSurfaces, [("converted", - "in3")])]) - mapping.connect([(mris_convertRHwhite, giftiSurfaces, [("converted", - "in4")])]) - mapping.connect([(mris_convertLHinflated, giftiSurfaces, [("converted", - "in5")])]) - mapping.connect([(mris_convertRHinflated, giftiSurfaces, [("converted", - "in6")])]) - mapping.connect([(mris_convertLHsphere, giftiSurfaces, [("converted", - "in7")])]) - mapping.connect([(mris_convertRHsphere, giftiSurfaces, [("converted", - "in8")])]) - - mapping.connect([(mris_convertLHlabels, giftiLabels, [("converted", - "in1")])]) - mapping.connect([(mris_convertRHlabels, giftiLabels, [("converted", - "in2")])]) - - mapping.connect([(roigen, niftiVolumes, [("roi_file", "in1")])]) - mapping.connect([(inputnode_within, niftiVolumes, [("dwi", "in2")])]) - mapping.connect([(mri_convert_Brain, niftiVolumes, [("out_file", "in3")])]) - - mapping.connect([(creatematrix, fiberDataArrays, [("endpoint_file", - "in1")])]) - mapping.connect([(creatematrix, fiberDataArrays, [("endpoint_file_mm", - "in2")])]) - mapping.connect([(creatematrix, fiberDataArrays, [("fiber_length_file", - "in3")])]) - mapping.connect([(creatematrix, fiberDataArrays, [("fiber_label_file", - "in4")])]) - """ - This block actually connects the merged lists to the CFF converter. We pass the surfaces - and volumes that are to be included, as well as the tracts and the network itself. The currently - running pipeline (dmri_connectivity.py) is also scraped and included in the CFF file. This - makes it easy for the user to examine the entire processing pathway used to generate the end - product. - """ - - CFFConverter.inputs.script_files = op.abspath( - inspect.getfile(inspect.currentframe())) - mapping.connect([(giftiSurfaces, CFFConverter, [("out", - "gifti_surfaces")])]) - mapping.connect([(giftiLabels, CFFConverter, [("out", "gifti_labels")])]) - mapping.connect([(creatematrix, CFFConverter, [("matrix_files", - "gpickled_networks")])]) - - mapping.connect([(niftiVolumes, CFFConverter, [("out", "nifti_volumes")])]) - mapping.connect([(fiberDataArrays, CFFConverter, [("out", "data_files")])]) - mapping.connect([(camino2trackvis, CFFConverter, [("trackvis", - "tract_files")])]) - mapping.connect([(inputnode_within, CFFConverter, [("subject_id", - "title")])]) - """ - Finally, we create another higher-level workflow to connect our mapping workflow with the info and datagrabbing nodes - declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding - their names to the subject list and their data to the proper folders. - """ - - inputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - "subject_id", "dwi", "bvecs", "bvals", "subjects_dir", - "resolution_network_file" - ]), - name="inputnode") - - outputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - "fa", "struct", "trace", "tracts", "connectome", "cmatrix", - "networks", "rois", "mean_fiber_length", "fiber_length_std", - "tensors" - ]), - name="outputnode") - - connectivity = pe.Workflow(name="connectivity") - connectivity.base_output_dir = name - - connectivity.connect([ - (inputnode, mapping, - [("dwi", "inputnode_within.dwi"), ("bvals", "inputnode_within.bvals"), - ("bvecs", "inputnode_within.bvecs"), ("subject_id", - "inputnode_within.subject_id"), - ("subjects_dir", "inputnode_within.subjects_dir"), - ("resolution_network_file", - "inputnode_within.resolution_network_file")]) - ]) - - connectivity.connect( - [(mapping, outputnode, - [("camino2trackvis.trackvis", - "tracts"), ("CFFConverter.connectome_file", "connectome"), - ("CreateMatrix.matrix_mat_file", - "cmatrix"), ("CreateMatrix.mean_fiber_length_matrix_mat_file", - "mean_fiber_length"), - ("CreateMatrix.fiber_length_std_matrix_mat_file", - "fiber_length_std"), ("fa2nii.nifti_file", - "fa"), ("CreateMatrix.matrix_files", - "networks"), ("ROIGen.roi_file", - "rois"), - ("mri_convert_Brain.out_file", - "struct"), ("trace2nii.nifti_file", - "trace"), ("dtifit.tensor_fitted", "tensors")])]) - - return connectivity diff --git a/nipype/workflows/dmri/camino/diffusion.py b/nipype/workflows/dmri/camino/diffusion.py deleted file mode 100644 index 708ddb8bc4..0000000000 --- a/nipype/workflows/dmri/camino/diffusion.py +++ /dev/null @@ -1,245 +0,0 @@ -# -*- coding: utf-8 -*- -from ....interfaces import utility as util # utility -from ....pipeline import engine as pe # pypeline engine -from ....interfaces import camino as camino -from ....interfaces import fsl as fsl -from ....interfaces import camino2trackvis as cam2trk -from ....algorithms import misc as misc -from ...misc.utils import get_affine, get_data_dims, get_vox_dims - - -def create_camino_dti_pipeline(name="dtiproc"): - """Creates a pipeline that does the same diffusion processing as in the - :doc:`../../users/examples/dmri_camino_dti` example script. Given a diffusion-weighted image, - b-values, and b-vectors, the workflow will return the tractography - computed from diffusion tensors and from PICo probabilistic tractography. - - Example - ------- - - >>> import os - >>> nipype_camino_dti = create_camino_dti_pipeline("nipype_camino_dti") - >>> nipype_camino_dti.inputs.inputnode.dwi = os.path.abspath('dwi.nii') - >>> nipype_camino_dti.inputs.inputnode.bvecs = os.path.abspath('bvecs') - >>> nipype_camino_dti.inputs.inputnode.bvals = os.path.abspath('bvals') - >>> nipype_camino_dti.run() # doctest: +SKIP - - Inputs:: - - inputnode.dwi - inputnode.bvecs - inputnode.bvals - - Outputs:: - - outputnode.fa - outputnode.trace - outputnode.tracts_pico - outputnode.tracts_dt - outputnode.tensors - - """ - - inputnode1 = pe.Node( - interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), - name="inputnode1") - """ - Setup for Diffusion Tensor Computation - -------------------------------------- - In this section we create the nodes necessary for diffusion analysis. - First, the diffusion image is converted to voxel order. - """ - - image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel") - fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme") - fsl2scheme.inputs.usegradmod = True - """ - Second, diffusion tensors are fit to the voxel-order data. - """ - - dtifit = pe.Node(interface=camino.DTIFit(), name='dtifit') - """ - Next, a lookup table is generated from the schemefile and the - signal-to-noise ratio (SNR) of the unweighted (q=0) data. - """ - - dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen") - dtlutgen.inputs.snr = 16.0 - dtlutgen.inputs.inversion = 1 - """ - In this tutorial we implement probabilistic tractography using the PICo algorithm. - PICo tractography requires an estimate of the fibre direction and a model of its - uncertainty in each voxel; this is produced using the following node. - """ - - picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs") - picopdfs.inputs.inputmodel = 'dt' - """ - An FSL BET node creates a brain mask is generated from the diffusion image for seeding the PICo tractography. - """ - - bet = pe.Node(interface=fsl.BET(), name="bet") - bet.inputs.mask = True - """ - Finally, tractography is performed. - First DT streamline tractography. - """ - - trackdt = pe.Node(interface=camino.TrackDT(), name="trackdt") - """ - Now camino's Probablistic Index of connectivity algorithm. - In this tutorial, we will use only 1 iteration for time-saving purposes. - """ - - trackpico = pe.Node(interface=camino.TrackPICo(), name="trackpico") - trackpico.inputs.iterations = 1 - """ - Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse. - """ - - cam2trk_dt = pe.Node( - interface=cam2trk.Camino2Trackvis(), name="cam2trk_dt") - cam2trk_dt.inputs.min_length = 30 - cam2trk_dt.inputs.voxel_order = 'LAS' - - cam2trk_pico = pe.Node( - interface=cam2trk.Camino2Trackvis(), name="cam2trk_pico") - cam2trk_pico.inputs.min_length = 30 - cam2trk_pico.inputs.voxel_order = 'LAS' - """ - Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview, using the following two nodes. - """ - - # vtkstreamlines = pe.Node(interface=camino.VtkStreamlines(), name="vtkstreamlines") - # procstreamlines = pe.Node(interface=camino.ProcStreamlines(), name="procstreamlines") - # procstreamlines.inputs.outputtracts = 'oogl' - """ - We can also produce a variety of scalar values from our fitted tensors. The following nodes generate the fractional anisotropy and diffusivity trace maps and their associated headers. - """ - - fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(), name='fa') - # md = pe.Node(interface=camino.MD(),name='md') - trace = pe.Node(interface=camino.ComputeTensorTrace(), name='trace') - dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig') - - analyzeheader_fa = pe.Node( - interface=camino.AnalyzeHeader(), name="analyzeheader_fa") - analyzeheader_fa.inputs.datatype = "double" - analyzeheader_trace = analyzeheader_fa.clone('analyzeheader_trace') - - # analyzeheader_md = pe.Node(interface= camino.AnalyzeHeader(), name = "analyzeheader_md") - # analyzeheader_md.inputs.datatype = "double" - # analyzeheader_trace = analyzeheader_md.clone('analyzeheader_trace') - - fa2nii = pe.Node(interface=misc.CreateNifti(), name='fa2nii') - trace2nii = fa2nii.clone("trace2nii") - """ - Since we have now created all our nodes, we can now define our workflow and start making connections. - """ - - tractography = pe.Workflow(name='tractography') - - tractography.connect([(inputnode1, bet, [("dwi", "in_file")])]) - """ - File format conversion - """ - - tractography.connect([(inputnode1, image2voxel, [("dwi", "in_file")]), - (inputnode1, fsl2scheme, [("bvecs", "bvec_file"), - ("bvals", "bval_file")])]) - """ - Tensor fitting - """ - - tractography.connect([(image2voxel, dtifit, [['voxel_order', 'in_file']]), - (fsl2scheme, dtifit, [['scheme', 'scheme_file']])]) - """ - Workflow for applying DT streamline tractogpahy - """ - - tractography.connect([(bet, trackdt, [("mask_file", "seed_file")])]) - tractography.connect([(dtifit, trackdt, [("tensor_fitted", "in_file")])]) - """ - Workflow for applying PICo - """ - - tractography.connect([(bet, trackpico, [("mask_file", "seed_file")])]) - tractography.connect([(fsl2scheme, dtlutgen, [("scheme", "scheme_file")])]) - tractography.connect([(dtlutgen, picopdfs, [("dtLUT", "luts")])]) - tractography.connect([(dtifit, picopdfs, [("tensor_fitted", "in_file")])]) - tractography.connect([(picopdfs, trackpico, [("pdfs", "in_file")])]) - - # Mean diffusivity still appears broken - # tractography.connect([(dtifit, md,[("tensor_fitted","in_file")])]) - # tractography.connect([(md, analyzeheader_md,[("md","in_file")])]) - # tractography.connect([(inputnode, analyzeheader_md,[(('dwi', get_vox_dims), 'voxel_dims'), - # (('dwi', get_data_dims), 'data_dims')])]) - # This line is commented out because the ProcStreamlines node keeps throwing memory errors - # tractography.connect([(track, procstreamlines,[("tracked","in_file")])]) - """ - Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the - tensor fitting. - - This is also where our voxel- and data-grabbing functions come in. We pass these functions, along with the original DWI image from the input node, to the header-generating nodes. This ensures that the files will be correct and readable. - """ - - tractography.connect([(dtifit, fa, [("tensor_fitted", "in_file")])]) - tractography.connect([(fa, analyzeheader_fa, [("fa", "in_file")])]) - tractography.connect([(inputnode1, analyzeheader_fa, - [(('dwi', get_vox_dims), 'voxel_dims'), - (('dwi', get_data_dims), 'data_dims')])]) - tractography.connect([(fa, fa2nii, [('fa', 'data_file')])]) - tractography.connect([(inputnode1, fa2nii, [(('dwi', get_affine), - 'affine')])]) - tractography.connect([(analyzeheader_fa, fa2nii, [('header', - 'header_file')])]) - - tractography.connect([(dtifit, trace, [("tensor_fitted", "in_file")])]) - tractography.connect([(trace, analyzeheader_trace, [("trace", - "in_file")])]) - tractography.connect([(inputnode1, analyzeheader_trace, - [(('dwi', get_vox_dims), 'voxel_dims'), - (('dwi', get_data_dims), 'data_dims')])]) - tractography.connect([(trace, trace2nii, [('trace', 'data_file')])]) - tractography.connect([(inputnode1, trace2nii, [(('dwi', get_affine), - 'affine')])]) - tractography.connect([(analyzeheader_trace, trace2nii, [('header', - 'header_file')])]) - - tractography.connect([(dtifit, dteig, [("tensor_fitted", "in_file")])]) - - tractography.connect([(trackpico, cam2trk_pico, [('tracked', 'in_file')])]) - tractography.connect([(trackdt, cam2trk_dt, [('tracked', 'in_file')])]) - tractography.connect([(inputnode1, cam2trk_pico, - [(('dwi', get_vox_dims), 'voxel_dims'), - (('dwi', get_data_dims), 'data_dims')])]) - - tractography.connect([(inputnode1, cam2trk_dt, - [(('dwi', get_vox_dims), 'voxel_dims'), - (('dwi', get_data_dims), 'data_dims')])]) - - inputnode = pe.Node( - interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), - name="inputnode") - - outputnode = pe.Node( - interface=util.IdentityInterface( - fields=["fa", "trace", "tracts_pico", "tracts_dt", "tensors"]), - name="outputnode") - - workflow = pe.Workflow(name=name) - workflow.base_output_dir = name - - workflow.connect([(inputnode, tractography, - [("dwi", "inputnode1.dwi"), - ("bvals", "inputnode1.bvals"), ("bvecs", - "inputnode1.bvecs")])]) - - workflow.connect([(tractography, outputnode, - [("cam2trk_dt.trackvis", "tracts_dt"), - ("cam2trk_pico.trackvis", - "tracts_pico"), ("fa2nii.nifti_file", "fa"), - ("trace2nii.nifti_file", - "trace"), ("dtifit.tensor_fitted", "tensors")])]) - - return workflow diff --git a/nipype/workflows/dmri/camino/group_connectivity.py b/nipype/workflows/dmri/camino/group_connectivity.py deleted file mode 100644 index 1307f8c4b6..0000000000 --- a/nipype/workflows/dmri/camino/group_connectivity.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- coding: utf-8 -*- -import os.path as op # system functions - -from .connectivity_mapping import create_connectivity_pipeline -from ....interfaces import io as nio # Data i/o -from ....interfaces import utility as util # utility -from ....pipeline import engine as pe # pypeline engine - - -def create_group_connectivity_pipeline(group_list, - group_id, - data_dir, - subjects_dir, - output_dir, - template_args_dict=0): - """Creates a pipeline that performs basic Camino structural connectivity processing - on groups of subjects. Given a diffusion-weighted image, and text files containing - the associated b-values and b-vectors, the workflow will return each subjects' connectomes - in a Connectome File Format (CFF) file, for use in Connectome Viewer (http://www.cmtk.org). - - Example - ------- - - >>> import nipype.interfaces.freesurfer as fs - >>> import nipype.workflows.dmri.camino.group_connectivity as groupwork - >>> subjects_dir = '.' - >>> data_dir = '.' - >>> output_dir = '.' - >>> fs.FSCommand.set_default_subjects_dir(subjects_dir) - >>> group_list = {} - >>> group_list['group1'] = ['subj1', 'subj2'] - >>> group_list['group2'] = ['subj3', 'subj4'] - >>> template_args = dict(dwi=[['subject_id', 'dwi']], bvecs=[['subject_id', 'bvecs']], bvals=[['subject_id', 'bvals']]) - >>> group_id = 'group1' - >>> l1pipeline = groupwork.create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, template_args) - >>> l1pipeline.run() # doctest: +SKIP - - Inputs:: - - group_list: Dictionary of subject lists, keyed by group name - group_id: String containing the group name - data_dir: Path to the data directory - subjects_dir: Path to the Freesurfer 'subjects' directory - output_dir: Path for the output files - template_args_dict: Dictionary of template arguments for the connectivity pipeline datasource - e.g. info = dict(dwi=[['subject_id', 'dwi']], - bvecs=[['subject_id','bvecs']], - bvals=[['subject_id','bvals']]) - """ - group_infosource = pe.Node( - interface=util.IdentityInterface(fields=['group_id']), - name="group_infosource") - group_infosource.inputs.group_id = group_id - subject_list = group_list[group_id] - subj_infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), - name="subj_infosource") - subj_infosource.iterables = ('subject_id', subject_list) - - if template_args_dict == 0: - info = dict( - dwi=[['subject_id', 'dwi']], - bvecs=[['subject_id', 'bvecs']], - bvals=[['subject_id', 'bvals']]) - else: - info = template_args_dict - - datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=list(info.keys())), - name='datasource') - - datasource.inputs.template = "%s/%s" - datasource.inputs.base_directory = data_dir - datasource.inputs.field_template = dict(dwi='%s/%s.nii') - datasource.inputs.template_args = info - datasource.inputs.sort_filelist = True - """ - Create a connectivity mapping workflow - """ - conmapper = create_connectivity_pipeline("nipype_conmap") - conmapper.inputs.inputnode.subjects_dir = subjects_dir - conmapper.base_dir = op.abspath('conmapper') - - datasink = pe.Node(interface=nio.DataSink(), name="datasink") - datasink.inputs.base_directory = output_dir - datasink.inputs.container = group_id - - l1pipeline = pe.Workflow(name="l1pipeline_" + group_id) - l1pipeline.base_dir = output_dir - l1pipeline.base_output_dir = group_id - l1pipeline.connect([(subj_infosource, datasource, [('subject_id', - 'subject_id')])]) - l1pipeline.connect([(subj_infosource, conmapper, - [('subject_id', 'inputnode.subject_id')])]) - l1pipeline.connect([(datasource, conmapper, [ - ("dwi", "inputnode.dwi"), - ("bvals", "inputnode.bvals"), - ("bvecs", "inputnode.bvecs"), - ])]) - l1pipeline.connect([(conmapper, datasink, [ - ("outputnode.connectome", "@l1output.cff"), - ("outputnode.fa", "@l1output.fa"), - ("outputnode.tracts", "@l1output.tracts"), - ("outputnode.trace", "@l1output.trace"), - ("outputnode.cmatrix", "@l1output.cmatrix"), - ("outputnode.rois", "@l1output.rois"), - ("outputnode.struct", "@l1output.struct"), - ("outputnode.networks", "@l1output.networks"), - ("outputnode.mean_fiber_length", "@l1output.mean_fiber_length"), - ("outputnode.fiber_length_std", "@l1output.fiber_length_std"), - ])]) - l1pipeline.connect([(group_infosource, datasink, [('group_id', - '@group_id')])]) - return l1pipeline diff --git a/nipype/workflows/dmri/connectivity/__init__.py b/nipype/workflows/dmri/connectivity/__init__.py deleted file mode 100644 index b34ca0dacb..0000000000 --- a/nipype/workflows/dmri/connectivity/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import -from .nx import (create_networkx_pipeline, create_cmats_to_csv_pipeline) -from .group_connectivity import ( - create_merge_networks_by_group_workflow, - create_merge_network_results_by_group_workflow, - create_merge_group_networks_workflow, - create_merge_group_network_results_workflow, - create_average_networks_by_group_workflow) diff --git a/nipype/workflows/dmri/connectivity/group_connectivity.py b/nipype/workflows/dmri/connectivity/group_connectivity.py deleted file mode 100644 index a918104bd1..0000000000 --- a/nipype/workflows/dmri/connectivity/group_connectivity.py +++ /dev/null @@ -1,631 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - -from future.utils import raise_from - -import os.path as op - -from ....interfaces import io as nio # Data i/o -from ....interfaces import utility as util # utility -from ....interfaces import cmtk as cmtk -from ....algorithms import misc as misc -from ....pipeline import engine as pe # pypeline engine -from ....interfaces.utility import Function -from ....utils.misc import package_check - -have_cmp = True -try: - package_check('cmp') -except Exception as e: - have_cmp = False -else: - import cmp - - -def pullnodeIDs(in_network, name_key='dn_name'): - """ This function will return the values contained, for each node in - a network, given an input key. By default it will return the node names - """ - import networkx as nx - import numpy as np - from nipype.interfaces.base import isdefined - if not isdefined(in_network): - raise ValueError - return None - try: - ntwk = nx.read_graphml(in_network) - except: - ntwk = nx.read_gpickle(in_network) - nodedata = ntwk.node - ids = [] - integer_nodelist = [] - for node in list(nodedata.keys()): - integer_nodelist.append(int(node)) - for node in np.sort(integer_nodelist): - try: - nodeid = nodedata[node][name_key] - except KeyError: - nodeid = nodedata[str(node)][name_key] - ids.append(nodeid) - return ids - - -def concatcsv(in_files): - """ This function will contatenate two "comma-separated value" - text files, but remove the first row (usually column headers) from - all but the first file. - """ - import os.path as op - from nipype.utils.filemanip import split_filename - - if not isinstance(in_files, list): - return in_files - if isinstance(in_files[0], list): - in_files = in_files[0] - first = open(in_files[0], 'r') - path, name, ext = split_filename(in_files[0]) - out_name = op.abspath('concat.csv') - out_file = open(out_name, 'w') - out_file.write(first.readline()) - first.close() - for in_file in in_files: - file_to_read = open(in_file, 'r') - file_to_read.readline() # scrap first line - for line in file_to_read: - out_file.write(line) - return out_name - - -def create_merge_networks_by_group_workflow(group_list, group_id, data_dir, - subjects_dir, output_dir): - """Creates a second-level pipeline to merge the Connectome File Format (CFF) outputs from the group-level - MRtrix structural connectivity processing pipeline into a single CFF file for each group. - - Example - ------- - - >>> import nipype.workflows.dmri.connectivity.group_connectivity as groupwork - >>> from nipype.testing import example_data - >>> subjects_dir = '.' - >>> data_dir = '.' - >>> output_dir = '.' - >>> group_list = {} - >>> group_list['group1'] = ['subj1', 'subj2'] - >>> group_list['group2'] = ['subj3', 'subj4'] - >>> group_id = 'group1' - >>> l2pipeline = groupwork.create_merge_networks_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir) - >>> l2pipeline.run() # doctest: +SKIP - - Inputs:: - - group_list: Dictionary of subject lists, keyed by group name - group_id: String containing the group name - data_dir: Path to the data directory - subjects_dir: Path to the Freesurfer 'subjects' directory - output_dir: Path for the output files - """ - group_infosource = pe.Node( - interface=util.IdentityInterface(fields=['group_id']), - name="group_infosource") - group_infosource.inputs.group_id = group_id - - l2infosource = pe.Node( - interface=util.IdentityInterface(fields=['group_id']), - name='l2infosource') - - l2source = pe.Node( - nio.DataGrabber(infields=['group_id'], outfields=['CFFfiles']), - name='l2source') - l2source.inputs.template_args = dict(CFFfiles=[['group_id']]) - l2source.inputs.template = op.join(output_dir, '%s/cff/*/connectome.cff') - l2source.inputs.base_directory = data_dir - l2source.inputs.sort_filelist = True - - l2inputnode = pe.Node( - interface=util.IdentityInterface(fields=['CFFfiles']), - name='l2inputnode') - MergeCNetworks = pe.Node( - interface=cmtk.MergeCNetworks(), name="MergeCNetworks") - - l2datasink = pe.Node(interface=nio.DataSink(), name="l2datasink") - l2datasink.inputs.base_directory = output_dir - l2datasink.inputs.container = group_id - - l2pipeline = pe.Workflow(name="l2output_" + group_id) - l2pipeline.base_dir = op.join(output_dir, 'l2output') - l2pipeline.connect([(group_infosource, l2infosource, [('group_id', - 'group_id')])]) - - l2pipeline.connect([ - (l2infosource, l2source, [('group_id', 'group_id')]), - (l2source, l2inputnode, [('CFFfiles', 'CFFfiles')]), - ]) - - l2pipeline.connect([(l2inputnode, MergeCNetworks, [('CFFfiles', - 'in_files')])]) - l2pipeline.connect([(group_infosource, MergeCNetworks, [('group_id', - 'out_file')])]) - l2pipeline.connect([(MergeCNetworks, l2datasink, [('connectome_file', - '@l2output')])]) - l2pipeline.connect([(group_infosource, l2datasink, [('group_id', - '@group_id')])]) - return l2pipeline - - -def create_merge_network_results_by_group_workflow( - group_list, group_id, data_dir, subjects_dir, output_dir): - """Creates a second-level pipeline to merge the Connectome File Format (CFF) outputs from the group-level - MRtrix structural connectivity processing pipeline into a single CFF file for each group. - - Example - ------- - - >>> import nipype.workflows.dmri.connectivity.group_connectivity as groupwork - >>> from nipype.testing import example_data - >>> subjects_dir = '.' - >>> data_dir = '.' - >>> output_dir = '.' - >>> group_list = {} - >>> group_list['group1'] = ['subj1', 'subj2'] - >>> group_list['group2'] = ['subj3', 'subj4'] - >>> group_id = 'group1' - >>> l2pipeline = groupwork.create_merge_network_results_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir) - >>> l2pipeline.run() # doctest: +SKIP - - Inputs:: - - group_list: Dictionary of subject lists, keyed by group name - group_id: String containing the group name - data_dir: Path to the data directory - subjects_dir: Path to the Freesurfer 'subjects' directory - output_dir: Path for the output files - """ - group_infosource = pe.Node( - interface=util.IdentityInterface(fields=['group_id']), - name="group_infosource") - group_infosource.inputs.group_id = group_id - - l2infosource = pe.Node( - interface=util.IdentityInterface(fields=['group_id', 'merged']), - name='l2infosource') - - l2source = pe.Node( - nio.DataGrabber( - infields=['group_id'], - outfields=[ - 'CFFfiles', 'CSVmatrices', 'CSVfibers', 'CSVnodal', 'CSVglobal' - ]), - name='l2source') - - l2source.inputs.template_args = dict( - CFFfiles=[['group_id']], - CSVmatrices=[['group_id']], - CSVnodal=[['group_id']], - CSVglobal=[['group_id']], - CSVfibers=[['group_id']]) - l2source.inputs.base_directory = data_dir - l2source.inputs.template = '%s/%s' - l2source.inputs.field_template = dict( - CFFfiles=op.join(output_dir, '%s/cff/*/connectome.cff'), - CSVmatrices=op.join(output_dir, '%s/cmatrices_csv/*/*.csv'), - CSVnodal=op.join(output_dir, '%s/nxcsv/*/*nodal*.csv'), - CSVglobal=op.join(output_dir, '%s/nxcsv/*/*global*.csv'), - CSVfibers=op.join(output_dir, '%s/fiber_csv/*/*fibers*.csv')) - l2source.inputs.sort_filelist = True - - l2inputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'CFFfiles', 'CSVfibers', 'CSVmatrices', 'CSVnodal', 'CSVglobal', - 'network_file' - ]), - name='l2inputnode') - - MergeCNetworks = pe.Node( - interface=cmtk.MergeCNetworks(), name="MergeCNetworks") - - l2datasink = pe.Node(interface=nio.DataSink(), name="l2datasink") - l2datasink.inputs.base_directory = output_dir - l2datasink.inputs.container = group_id - - l2pipeline = pe.Workflow(name="l2output_" + group_id) - l2pipeline.base_dir = op.join(output_dir, 'l2output') - l2pipeline.connect([(group_infosource, l2infosource, [('group_id', - 'group_id')])]) - - l2pipeline.connect([ - (l2infosource, l2source, [('group_id', 'group_id')]), - (l2source, l2inputnode, [('CFFfiles', 'CFFfiles')]), - (l2source, l2inputnode, [('CSVmatrices', 'CSVmatrices')]), - (l2source, l2inputnode, [('CSVnodal', 'CSVnodal')]), - (l2source, l2inputnode, [('CSVglobal', 'CSVglobal')]), - (l2source, l2inputnode, [('CSVfibers', 'CSVfibers')]), - ]) - - l2pipeline.connect([(l2inputnode, MergeCNetworks, [('CFFfiles', - 'in_files')])]) - - l2pipeline.connect([(group_infosource, MergeCNetworks, [('group_id', - 'out_file')])]) - l2pipeline.connect([(MergeCNetworks, l2datasink, [('connectome_file', - '@l2output')])]) - - AddCSVColumn_node = pe.Node( - interface=misc.AddCSVColumn(), name="AddCSVColumn_node") - AddCSVColumn_node.inputs.extra_column_heading = 'group' - AddCSVColumn_global = AddCSVColumn_node.clone(name="AddCSVColumn_global") - AddCSVColumn_matrices = AddCSVColumn_node.clone( - name="AddCSVColumn_matrices") - AddCSVColumn_fibers = AddCSVColumn_node.clone(name="AddCSVColumn_fibers") - - concat_csv_interface = Function( - input_names=["in_files"], - output_names=["out_name"], - function=concatcsv) - - concat_node_csvs = pe.Node( - interface=concat_csv_interface, name='concat_node_csvs') - concat_global_csvs = pe.Node( - interface=concat_csv_interface, name='concat_global_csvs') - concat_matrix_csvs = pe.Node( - interface=concat_csv_interface, name='concat_matrix_csvs') - concat_fiber_csvs = pe.Node( - interface=concat_csv_interface, name='concat_fiber_csvs') - - l2pipeline.connect([(l2inputnode, concat_node_csvs, [('CSVnodal', - 'in_files')])]) - l2pipeline.connect([(concat_node_csvs, AddCSVColumn_node, [('out_name', - 'in_file')])]) - l2pipeline.connect([(group_infosource, AddCSVColumn_node, - [('group_id', 'extra_field')])]) - l2pipeline.connect([(AddCSVColumn_node, l2datasink, - [('csv_file', '@l2output.node_csv')])]) - l2pipeline.connect([(group_infosource, l2datasink, [('group_id', - '@group_id')])]) - - l2pipeline.connect([(l2inputnode, concat_global_csvs, [('CSVglobal', - 'in_files')])]) - l2pipeline.connect([(concat_global_csvs, AddCSVColumn_global, - [('out_name', 'in_file')])]) - l2pipeline.connect([(group_infosource, AddCSVColumn_global, - [('group_id', 'extra_field')])]) - l2pipeline.connect([(AddCSVColumn_global, l2datasink, - [('csv_file', '@l2output.global_csv')])]) - - l2pipeline.connect([(l2inputnode, concat_matrix_csvs, [('CSVmatrices', - 'in_files')])]) - l2pipeline.connect([(concat_matrix_csvs, AddCSVColumn_matrices, - [('out_name', 'in_file')])]) - l2pipeline.connect([(group_infosource, AddCSVColumn_matrices, - [('group_id', 'extra_field')])]) - l2pipeline.connect([(AddCSVColumn_matrices, l2datasink, - [('csv_file', '@l2output.cmatrices_csv')])]) - - l2pipeline.connect([(l2inputnode, concat_fiber_csvs, [('CSVmatrices', - 'in_files')])]) - l2pipeline.connect([(concat_fiber_csvs, AddCSVColumn_fibers, - [('out_name', 'in_file')])]) - l2pipeline.connect([(group_infosource, AddCSVColumn_fibers, - [('group_id', 'extra_field')])]) - l2pipeline.connect([(AddCSVColumn_fibers, l2datasink, - [('csv_file', '@l2output.fibers_csv')])]) - return l2pipeline - - -def create_merge_group_networks_workflow(group_list, - data_dir, - subjects_dir, - output_dir, - title='group'): - """Creates a third-level pipeline to merge the Connectome File Format (CFF) outputs from each group - and combines them into a single CFF file for each group. - - Example - ------- - - >>> import nipype.workflows.dmri.connectivity.group_connectivity as groupwork - >>> from nipype.testing import example_data - >>> subjects_dir = '.' - >>> data_dir = '.' - >>> output_dir = '.' - >>> group_list = {} - >>> group_list['group1'] = ['subj1', 'subj2'] - >>> group_list['group2'] = ['subj3', 'subj4'] - >>> l3pipeline = groupwork.create_merge_group_networks_workflow(group_list, data_dir, subjects_dir, output_dir) - >>> l3pipeline.run() # doctest: +SKIP - - Inputs:: - - group_list: Dictionary of subject lists, keyed by group name - data_dir: Path to the data directory - subjects_dir: Path to the Freesurfer 'subjects' directory - output_dir: Path for the output files - title: String to use as a title for the output merged CFF file (default 'group') - """ - l3infosource = pe.Node( - interface=util.IdentityInterface(fields=['group_id']), - name='l3infosource') - l3infosource.inputs.group_id = list(group_list.keys()) - - l3source = pe.Node( - nio.DataGrabber(infields=['group_id'], outfields=['CFFfiles']), - name='l3source') - l3source.inputs.template_args = dict(CFFfiles=[['group_id', 'group_id']]) - l3source.inputs.template = op.join(output_dir, '%s/%s.cff') - l3source.inputs.sort_filelist = True - - l3inputnode = pe.Node( - interface=util.IdentityInterface(fields=['Group_CFFs']), - name='l3inputnode') - - MergeCNetworks_grp = pe.Node( - interface=cmtk.MergeCNetworks(), name="MergeCNetworks_grp") - MergeCNetworks_grp.inputs.out_file = title - - l3datasink = pe.Node(interface=nio.DataSink(), name="l3datasink") - l3datasink.inputs.base_directory = output_dir - - l3pipeline = pe.Workflow(name="l3output") - l3pipeline.base_dir = output_dir - l3pipeline.connect([ - (l3infosource, l3source, [('group_id', 'group_id')]), - (l3source, l3inputnode, [('CFFfiles', 'Group_CFFs')]), - ]) - - l3pipeline.connect([(l3inputnode, MergeCNetworks_grp, [('Group_CFFs', - 'in_files')])]) - l3pipeline.connect([(MergeCNetworks_grp, l3datasink, [('connectome_file', - '@l3output')])]) - return l3pipeline - - -def create_merge_group_network_results_workflow(group_list, - data_dir, - subjects_dir, - output_dir, - title='group'): - """Creates a third-level pipeline to merge the Connectome File Format (CFF) outputs from each group - and combines them into a single CFF file for each group. This version of the third-level pipeline also - concatenates the comma-separated value files for the NetworkX metrics and the connectivity matrices - into single files. - - Example - ------- - - >>> import nipype.workflows.dmri.connectivity.group_connectivity as groupwork - >>> from nipype.testing import example_data - >>> subjects_dir = '.' - >>> data_dir = '.' - >>> output_dir = '.' - >>> group_list = {} - >>> group_list['group1'] = ['subj1', 'subj2'] - >>> group_list['group2'] = ['subj3', 'subj4'] - >>> l3pipeline = groupwork.create_merge_group_network_results_workflow(group_list, data_dir, subjects_dir, output_dir) - >>> l3pipeline.run() # doctest: +SKIP - - Inputs:: - - group_list: Dictionary of subject lists, keyed by group name - data_dir: Path to the data directory - subjects_dir: Path to the Freesurfer 'subjects' directory - output_dir: Path for the output files - title: String to use as a title for the output merged CFF file (default 'group') - """ - l3infosource = pe.Node( - interface=util.IdentityInterface(fields=['group_id']), - name='l3infosource') - l3infosource.inputs.group_id = list(group_list.keys()) - - l3source = pe.Node( - nio.DataGrabber( - infields=['group_id'], - outfields=[ - 'CFFfiles', 'CSVnodemetrics', 'CSVglobalmetrics', 'CSVmatrices' - ]), - name='l3source') - l3source.inputs.template_args = dict( - CFFfiles=[['group_id']], - CSVnodemetrics=[['group_id']], - CSVglobalmetrics=[['group_id']], - CSVmatrices=[['group_id']]) - l3source.inputs.template = op.join(output_dir, '%s/%s') - l3source.inputs.sort_filelist = True - - l3source.inputs.field_template = dict( - CFFfiles=op.join(output_dir, '%s/*.cff'), - CSVnodemetrics=op.join(output_dir, '%s/node_csv/*.csv'), - CSVglobalmetrics=op.join(output_dir, '%s/global_csv/*.csv'), - CSVmatrices=op.join(output_dir, '%s/cmatrices_csv/*/*.csv')) - - l3inputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'Group_CFFs', 'Group_CSVnodemetrics', 'Group_CSVglobalmetrics', - 'Group_CSVmatrices' - ]), - name='l3inputnode') - - MergeCNetworks_grp = pe.Node( - interface=cmtk.MergeCNetworks(), name="MergeCNetworks_grp") - MergeCNetworks_grp.inputs.out_file = title - - l3datasink = pe.Node(interface=nio.DataSink(), name="l3datasink") - l3datasink.inputs.base_directory = output_dir - - l3pipeline = pe.Workflow(name="l3output") - l3pipeline.base_dir = output_dir - l3pipeline.connect([ - (l3infosource, l3source, [('group_id', 'group_id')]), - (l3source, l3inputnode, [('CFFfiles', 'Group_CFFs')]), - (l3source, l3inputnode, [('CSVnodemetrics', 'Group_CSVnodemetrics')]), - (l3source, l3inputnode, [('CSVglobalmetrics', - 'Group_CSVglobalmetrics')]), - (l3source, l3inputnode, [('CSVmatrices', 'Group_CSVmatrices')]), - ]) - - l3pipeline.connect([(l3inputnode, MergeCNetworks_grp, [('Group_CFFs', - 'in_files')])]) - l3pipeline.connect([(MergeCNetworks_grp, l3datasink, [('connectome_file', - '@l3output')])]) - - concat_csv_interface = Function( - input_names=["in_files"], - output_names=["out_name"], - function=concatcsv) - - concat_node_csvs = pe.Node( - interface=concat_csv_interface, name='concat_node_csvs') - concat_global_csvs = pe.Node( - interface=concat_csv_interface, name='concat_global_csvs') - concat_matrix_csvs = pe.Node( - interface=concat_csv_interface, name='concat_matrix_csvs') - - l3pipeline.connect([(l3inputnode, concat_node_csvs, - [('Group_CSVnodemetrics', 'in_files')])]) - l3pipeline.connect([(concat_node_csvs, l3datasink, - [('out_name', '@l3output.nodal_csv')])]) - - l3pipeline.connect([(l3inputnode, concat_global_csvs, - [('Group_CSVglobalmetrics', 'in_files')])]) - l3pipeline.connect([(concat_global_csvs, l3datasink, - [('out_name', '@l3output.global_csv')])]) - - l3pipeline.connect([(l3inputnode, concat_matrix_csvs, - [('Group_CSVmatrices', 'in_files')])]) - l3pipeline.connect([(concat_matrix_csvs, l3datasink, - [('out_name', '@l3output.csvmatrices')])]) - return l3pipeline - - -def create_average_networks_by_group_workflow(group_list, - data_dir, - subjects_dir, - output_dir, - title='group_average'): - """Creates a fourth-level pipeline to average the networks for two groups and merge them into a single - CFF file. This pipeline will also output the average networks in .gexf format, for visualization in other - graph viewers, such as Gephi. - - Example - ------- - - >>> import nipype.workflows.dmri.connectivity.group_connectivity as groupwork - >>> from nipype.testing import example_data - >>> subjects_dir = '.' - >>> data_dir = '.' - >>> output_dir = '.' - >>> group_list = {} - >>> group_list['group1'] = ['subj1', 'subj2'] - >>> group_list['group2'] = ['subj3', 'subj4'] - >>> l4pipeline = groupwork.create_average_networks_by_group_workflow(group_list, data_dir, subjects_dir, output_dir) - >>> l4pipeline.run() # doctest: +SKIP - - Inputs:: - - group_list: Dictionary of subject lists, keyed by group name - data_dir: Path to the data directory - subjects_dir: Path to the Freesurfer 'subjects' directory - output_dir: Path for the output files - title: String to use as a title for the output merged CFF file (default 'group') - """ - l4infosource = pe.Node( - interface=util.IdentityInterface(fields=['group_id1', 'group_id2']), - name='l4infosource') - try: - l4infosource.inputs.group_id1 = list(group_list.keys())[0] - l4infosource.inputs.group_id2 = list(group_list.keys())[1] - except IndexError as e: - raise_from( - Exception( - 'The create_average_networks_by_group_workflow requires 2 groups' - ), e) - - l4info = dict( - networks=[['group_id', '']], - CMatrices=[['group_id', '']], - fibmean=[['group_id', 'mean_fiber_length']], - fibdev=[['group_id', 'fiber_length_std']]) - - l4source_grp1 = pe.Node( - nio.DataGrabber(infields=['group_id'], outfields=list(l4info.keys())), - name='l4source_grp1') - l4source_grp1.inputs.template = '%s/%s' - l4source_grp1.inputs.field_template = dict( - networks=op.join(output_dir, '%s/networks/*/*%s*intersections*.pck'), - CMatrices=op.join(output_dir, '%s/cmatrix/*/*%s*.mat'), - fibmean=op.join(output_dir, '%s/mean_fiber_length/*/*%s*.mat'), - fibdev=op.join(output_dir, '%s/fiber_length_std/*/*%s*.mat')) - l4source_grp1.inputs.base_directory = output_dir - l4source_grp1.inputs.template_args = l4info - l4source_grp1.inputs.sort_filelist = True - - l4source_grp2 = l4source_grp1.clone(name='l4source_grp2') - - l4inputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'networks_grp1', 'networks_grp2', 'CMatrices_grp1', - 'CMatrices_grp2', 'fibmean_grp1', 'fibmean_grp2', 'fibdev_grp1', - 'fibdev_grp2' - ]), - name='l4inputnode') - - average_networks_grp1 = pe.Node( - interface=cmtk.AverageNetworks(), name='average_networks_grp1') - average_networks_grp2 = average_networks_grp1.clone( - 'average_networks_grp2') - - averagecff = pe.Node(interface=cmtk.CFFConverter(), name="averagecff") - averagecff.inputs.out_file = title - - merge_gpickled_averages = pe.Node( - interface=util.Merge(2), name='merge_gpickled_averages') - merge_gexf_averages = merge_gpickled_averages.clone('merge_gexf_averages') - - l4datasink = pe.Node(interface=nio.DataSink(), name="l4datasink") - l4datasink.inputs.base_directory = output_dir - - l4pipeline = pe.Workflow(name="l4output") - l4pipeline.base_dir = output_dir - l4pipeline.connect([ - (l4infosource, l4source_grp1, [('group_id1', 'group_id')]), - (l4infosource, l4source_grp2, [('group_id2', 'group_id')]), - (l4source_grp1, l4inputnode, [('CMatrices', 'CMatrices_grp1')]), - (l4source_grp2, l4inputnode, [('CMatrices', 'CMatrices_grp2')]), - (l4source_grp1, l4inputnode, [('networks', 'networks_grp1')]), - (l4source_grp2, l4inputnode, [('networks', 'networks_grp2')]), - (l4source_grp1, l4inputnode, [('fibmean', 'fibmean_grp1')]), - (l4source_grp2, l4inputnode, [('fibmean', 'fibmean_grp2')]), - (l4source_grp1, l4inputnode, [('fibdev', 'fibdev_grp1')]), - (l4source_grp2, l4inputnode, [('fibdev', 'fibdev_grp2')]), - ]) - - l4pipeline.connect([(l4inputnode, average_networks_grp1, [('networks_grp1', - 'in_files')])]) - l4pipeline.connect([(l4infosource, average_networks_grp1, [('group_id1', - 'group_id')])]) - - l4pipeline.connect([(l4inputnode, average_networks_grp2, [('networks_grp2', - 'in_files')])]) - l4pipeline.connect([(l4infosource, average_networks_grp2, [('group_id2', - 'group_id')])]) - - l4pipeline.connect([(average_networks_grp1, merge_gpickled_averages, - [('gpickled_groupavg', 'in1')])]) - l4pipeline.connect([(average_networks_grp2, merge_gpickled_averages, - [('gpickled_groupavg', 'in2')])]) - - l4pipeline.connect([(average_networks_grp1, merge_gexf_averages, - [('gexf_groupavg', 'in1')])]) - l4pipeline.connect([(average_networks_grp2, merge_gexf_averages, - [('gexf_groupavg', 'in2')])]) - - l4pipeline.connect([(merge_gpickled_averages, l4datasink, - [('out', '@l4output.gpickled')])]) - l4pipeline.connect([(merge_gpickled_averages, averagecff, - [('out', 'gpickled_networks')])]) - l4pipeline.connect([(averagecff, l4datasink, [('connectome_file', - '@l4output.averagecff')])]) - - l4pipeline.connect([(merge_gexf_averages, l4datasink, - [('out', '@l4output.gexf')])]) - return l4pipeline diff --git a/nipype/workflows/dmri/connectivity/nx.py b/nipype/workflows/dmri/connectivity/nx.py deleted file mode 100644 index 95159dae8f..0000000000 --- a/nipype/workflows/dmri/connectivity/nx.py +++ /dev/null @@ -1,178 +0,0 @@ -# -*- coding: utf-8 -*- -from ....pipeline import engine as pe -from ....interfaces import utility as util -from ....interfaces import cmtk as cmtk -from ....algorithms import misc as misc -from ....algorithms.misc import remove_identical_paths -from .group_connectivity import pullnodeIDs - - -def add_global_to_filename(in_file): - from nipype.utils.filemanip import split_filename - path, name, ext = split_filename(in_file) - return name + '_global' + ext - - -def add_nodal_to_filename(in_file): - from nipype.utils.filemanip import split_filename - path, name, ext = split_filename(in_file) - return name + '_nodal' + ext - - -def create_networkx_pipeline(name="networkx", extra_column_heading="subject"): - """Creates a workflow to calculate various graph measures (via NetworkX) on - an input network. The output measures are then converted to comma-separated value - text files, and an extra column / field is also added. Typically, the user would - connect the subject name to this field. - - Example - ------- - - >>> from nipype.workflows.dmri.connectivity.nx import create_networkx_pipeline - >>> nx = create_networkx_pipeline("networkx", "subject_id") - >>> nx.inputs.inputnode.extra_field = 'subj1' - >>> nx.inputs.inputnode.network_file = 'subj1.pck' - >>> nx.run() # doctest: +SKIP - - Inputs:: - - inputnode.extra_field - inputnode.network_file - - Outputs:: - - outputnode.network_files - outputnode.csv_files - outputnode.matlab_files - - """ - inputnode = pe.Node( - interface=util.IdentityInterface( - fields=["extra_field", "network_file"]), - name="inputnode") - - pipeline = pe.Workflow(name=name) - - ntwkMetrics = pe.Node( - interface=cmtk.NetworkXMetrics(), name="NetworkXMetrics") - Matlab2CSV_node = pe.Node( - interface=misc.Matlab2CSV(), name="Matlab2CSV_node") - MergeCSVFiles_node = pe.Node( - interface=misc.MergeCSVFiles(), name="MergeCSVFiles_node") - MergeCSVFiles_node.inputs.extra_column_heading = extra_column_heading - - Matlab2CSV_global = Matlab2CSV_node.clone(name="Matlab2CSV_global") - MergeCSVFiles_global = MergeCSVFiles_node.clone( - name="MergeCSVFiles_global") - MergeCSVFiles_global.inputs.extra_column_heading = extra_column_heading - - mergeNetworks = pe.Node(interface=util.Merge(2), name="mergeNetworks") - mergeCSVs = mergeNetworks.clone("mergeCSVs") - - pipeline.connect([(inputnode, ntwkMetrics, [("network_file", "in_file")])]) - pipeline.connect([(ntwkMetrics, Matlab2CSV_node, [("node_measures_matlab", - "in_file")])]) - pipeline.connect([(ntwkMetrics, Matlab2CSV_global, - [("global_measures_matlab", "in_file")])]) - - pipeline.connect([(Matlab2CSV_node, MergeCSVFiles_node, [("csv_files", - "in_files")])]) - pipeline.connect([(inputnode, MergeCSVFiles_node, - [(("extra_field", add_nodal_to_filename), - "out_file")])]) - pipeline.connect([(inputnode, MergeCSVFiles_node, [("extra_field", - "extra_field")])]) - pipeline.connect([(inputnode, MergeCSVFiles_node, - [(("network_file", pullnodeIDs), "row_headings")])]) - - pipeline.connect([(Matlab2CSV_global, MergeCSVFiles_global, - [("csv_files", "in_files")])]) - pipeline.connect([(Matlab2CSV_global, MergeCSVFiles_global, - [(("csv_files", remove_identical_paths), - "column_headings")])]) - # MergeCSVFiles_global.inputs.row_heading_title = 'metric' - # MergeCSVFiles_global.inputs.column_headings = ['average'] - - pipeline.connect([(inputnode, MergeCSVFiles_global, - [(("extra_field", add_global_to_filename), - "out_file")])]) - pipeline.connect([(inputnode, MergeCSVFiles_global, [("extra_field", - "extra_field")])]) - - pipeline.connect([(inputnode, mergeNetworks, [("network_file", "in1")])]) - pipeline.connect([(ntwkMetrics, mergeNetworks, [("gpickled_network_files", - "in2")])]) - - outputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - "network_files", "csv_files", "matlab_files", "node_csv", - "global_csv" - ]), - name="outputnode") - - pipeline.connect([(MergeCSVFiles_node, outputnode, [("csv_file", - "node_csv")])]) - pipeline.connect([(MergeCSVFiles_global, outputnode, [("csv_file", - "global_csv")])]) - - pipeline.connect([(MergeCSVFiles_node, mergeCSVs, [("csv_file", "in1")])]) - pipeline.connect([(MergeCSVFiles_global, mergeCSVs, [("csv_file", - "in2")])]) - pipeline.connect([(mergeNetworks, outputnode, [("out", "network_files")])]) - pipeline.connect([(mergeCSVs, outputnode, [("out", "csv_files")])]) - pipeline.connect([(ntwkMetrics, outputnode, [("matlab_matrix_files", - "matlab_files")])]) - return pipeline - - -def create_cmats_to_csv_pipeline(name="cmats_to_csv", - extra_column_heading="subject"): - """Creates a workflow to convert the outputs from CreateMatrix into a single - comma-separated value text file. An extra column / field is also added to the - text file. Typically, the user would connect the subject name to this field. - - Example - ------- - - >>> from nipype.workflows.dmri.connectivity.nx import create_cmats_to_csv_pipeline - >>> csv = create_cmats_to_csv_pipeline("cmats_to_csv", "subject_id") - >>> csv.inputs.inputnode.extra_field = 'subj1' - >>> csv.inputs.inputnode.matlab_matrix_files = ['subj1_cmatrix.mat', 'subj1_mean_fiber_length.mat', 'subj1_median_fiber_length.mat', 'subj1_fiber_length_std.mat'] - >>> csv.run() # doctest: +SKIP - - Inputs:: - - inputnode.extra_field - inputnode.matlab_matrix_files - - Outputs:: - - outputnode.csv_file - - """ - inputnode = pe.Node( - interface=util.IdentityInterface( - fields=["extra_field", "matlab_matrix_files"]), - name="inputnode") - - pipeline = pe.Workflow(name=name) - - Matlab2CSV = pe.MapNode( - interface=misc.Matlab2CSV(), name="Matlab2CSV", iterfield=["in_file"]) - MergeCSVFiles = pe.Node( - interface=misc.MergeCSVFiles(), name="MergeCSVFiles") - MergeCSVFiles.inputs.extra_column_heading = extra_column_heading - - pipeline.connect([(inputnode, Matlab2CSV, [("matlab_matrix_files", - "in_file")])]) - pipeline.connect([(Matlab2CSV, MergeCSVFiles, [("csv_files", - "in_files")])]) - pipeline.connect([(inputnode, MergeCSVFiles, [("extra_field", - "extra_field")])]) - - outputnode = pe.Node( - interface=util.IdentityInterface(fields=["csv_file"]), - name="outputnode") - - pipeline.connect([(MergeCSVFiles, outputnode, [("csv_file", "csv_file")])]) - return pipeline diff --git a/nipype/workflows/dmri/dipy/__init__.py b/nipype/workflows/dmri/dipy/__init__.py deleted file mode 100644 index 354ba7a7e6..0000000000 --- a/nipype/workflows/dmri/dipy/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# -*- coding: utf-8 -*- -# coding: utf-8 -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: - -from __future__ import absolute_import -from .denoise import nlmeans_pipeline diff --git a/nipype/workflows/dmri/dipy/denoise.py b/nipype/workflows/dmri/dipy/denoise.py deleted file mode 100644 index a45f507b3c..0000000000 --- a/nipype/workflows/dmri/dipy/denoise.py +++ /dev/null @@ -1,121 +0,0 @@ -# -*- coding: utf-8 -*- -# coding: utf-8 -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: - -from builtins import range -from ....pipeline import engine as pe -from ....interfaces import utility as niu -from ....interfaces import dipy - - -def nlmeans_pipeline(name='Denoise', - params={ - 'patch_radius': 1, - 'block_radius': 5 - }): - """ - Workflow that performs nlmeans denoising - - Example - ------- - - >>> from nipype.workflows.dmri.dipy.denoise import nlmeans_pipeline - >>> denoise = nlmeans_pipeline() - >>> denoise.inputs.inputnode.in_file = 'diffusion.nii' - >>> denoise.inputs.inputnode.in_mask = 'mask.nii' - >>> denoise.run() # doctest: +SKIP - - - """ - inputnode = pe.Node( - niu.IdentityInterface(fields=['in_file', 'in_mask']), name='inputnode') - outputnode = pe.Node( - niu.IdentityInterface(fields=['out_file']), name='outputnode') - - nmask = pe.Node( - niu.Function( - input_names=['in_file', 'in_mask'], - output_names=['out_file'], - function=bg_mask), - name='NoiseMsk') - nlmeans = pe.Node(dipy.Denoise(**params), name='NLMeans') - - wf = pe.Workflow(name=name) - wf.connect([(inputnode, nmask, [ - ('in_file', 'in_file'), ('in_mask', 'in_mask') - ]), (inputnode, nlmeans, [('in_file', 'in_file'), ('in_mask', 'in_mask')]), - (nmask, nlmeans, [('out_file', 'noise_mask')]), - (nlmeans, outputnode, [('out_file', 'out_file')])]) - return wf - - -def csf_mask(in_file, in_mask, out_file=None): - """ - Artesanal mask of csf in T2w-like images - """ - import nibabel as nb - import numpy as np - from scipy.ndimage import binary_erosion, binary_opening, label - import scipy.ndimage as nd - import os.path as op - from nipype.utils import NUMPY_MMAP - - if out_file is None: - fname, ext = op.splitext(op.basename(in_file)) - if ext == ".gz": - fname, ext2 = op.splitext(fname) - ext = ext2 + ext - out_file = op.abspath("%s_csfmask%s" % (fname, ext)) - - im = nb.load(in_file, mmap=NUMPY_MMAP) - hdr = im.header.copy() - hdr.set_data_dtype(np.uint8) - hdr.set_xyzt_units('mm') - imdata = im.get_data() - msk = nb.load(in_mask, mmap=NUMPY_MMAP).get_data() - msk = binary_erosion(msk, structure=np.ones((15, 15, 10))).astype(np.uint8) - thres = np.percentile(imdata[msk > 0].reshape(-1), 90.0) - imdata[imdata < thres] = 0 - imdata = imdata * msk - imdata[imdata > 0] = 1 - imdata = binary_opening( - imdata, structure=np.ones((2, 2, 2))).astype(np.uint8) - - label_im, nb_labels = label(imdata) - sizes = nd.sum(imdata, label_im, list(range(nb_labels + 1))) - mask_size = sizes != sizes.max() - remove_pixel = mask_size[label_im] - label_im[remove_pixel] = 0 - label_im[label_im > 0] = 1 - nb.Nifti1Image(label_im.astype(np.uint8), im.affine, - hdr).to_filename(out_file) - return out_file - - -def bg_mask(in_file, in_mask, out_file=None): - """ - Rough mask of background from brain masks - """ - import nibabel as nb - import numpy as np - from scipy.ndimage import binary_dilation - import scipy.ndimage as nd - import os.path as op - from nipype.utils import NUMPY_MMAP - - if out_file is None: - fname, ext = op.splitext(op.basename(in_file)) - if ext == ".gz": - fname, ext2 = op.splitext(fname) - ext = ext2 + ext - out_file = op.abspath("%s_bgmask%s" % (fname, ext)) - - im = nb.load(in_file, mmap=NUMPY_MMAP) - hdr = im.header.copy() - hdr.set_data_dtype(np.uint8) - hdr.set_xyzt_units('mm') - msk = nb.load(in_mask, mmap=NUMPY_MMAP).get_data() - msk = 1 - binary_dilation(msk, structure=np.ones((20, 20, 20))) - nb.Nifti1Image(msk.astype(np.uint8), im.affine, hdr).to_filename(out_file) - return out_file diff --git a/nipype/workflows/dmri/dtitk/__init__.py b/nipype/workflows/dmri/dtitk/__init__.py deleted file mode 100644 index 02dbf25549..0000000000 --- a/nipype/workflows/dmri/dtitk/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# -*- coding: utf-8 -*- -# coding: utf-8 -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: - -from __future__ import absolute_import -from .tensor_registration import (affine_tensor_pipeline, - diffeomorphic_tensor_pipeline) diff --git a/nipype/workflows/dmri/dtitk/tensor_registration.py b/nipype/workflows/dmri/dtitk/tensor_registration.py deleted file mode 100644 index faae608a44..0000000000 --- a/nipype/workflows/dmri/dtitk/tensor_registration.py +++ /dev/null @@ -1,144 +0,0 @@ -# -*- coding: utf-8 -*- -# coding: utf-8 -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: - -from ....pipeline import engine as pe -from ....interfaces import utility as niu -from ....interfaces import dtitk - - -def affine_tensor_pipeline(name='AffTen'): - - """ - Workflow that performs a linear registration - (Rigid followed by Affine) - - Example - ------- - - >>> from nipype.workflows.dmri.dtitk.tensor_registration import affine_tensor_pipeline - >>> affine = affine_tensor_pipeline() - >>> affine.inputs.inputnode.fixed_file = 'im1.nii' - >>> affine.inputs.inputnode.moving_file = 'im2.nii' - >>> affine.run() # doctest: +SKIP - - - """ - inputnode = pe.Node(niu.IdentityInterface( - fields=['fixed_file', 'moving_file']), - name='inputnode') - outputnode = pe.Node(niu.IdentityInterface( - fields=['out_file', 'out_file_xfm']), - name='outputnode') - - rigid_node = pe.Node(dtitk.Rigid(), name='rigid_node') - affine_node = pe.Node(dtitk.Affine(), name='affine_node') - - wf = pe.Workflow(name=name) - - wf.connect(inputnode, 'fixed_file', rigid_node, 'fixed_file') - wf.connect(inputnode, 'moving_file', rigid_node, 'moving_file') - wf.connect(rigid_node, 'out_file_xfm', affine_node, 'initialize_xfm') - wf.connect(inputnode, 'fixed_file', affine_node, 'fixed_file') - wf.connect(inputnode, 'moving_file', affine_node, 'moving_file') - wf.connect(affine_node, 'out_file', outputnode, 'out_file') - wf.connect(affine_node, 'out_file_xfm', outputnode, 'out_file_xfm') - - return wf - - -def diffeomorphic_tensor_pipeline(name='DiffeoTen', - params={'array_size': (128, 128, 64)}): - """ - Workflow that performs a diffeomorphic registration - (Rigid and Affine followed by Diffeomorphic) - Note: the requirements for a diffeomorphic registration specify that - the dimension 0 is a power of 2 so images are resliced prior to - registration. Remember to move origin and reslice prior to applying xfm to - another file! - - Example - ------- - - >>> from nipype.workflows.dmri.dtitk.tensor_registration import diffeomorphic_tensor_pipeline - >>> diffeo = diffeomorphic_tensor_pipeline() - >>> diffeo.inputs.inputnode.fixed_file = 'im1.nii' - >>> diffeo.inputs.inputnode.moving_file = 'im2.nii' - >>> diffeo.run() # doctest: +SKIP - - - """ - inputnode = pe.Node(niu.IdentityInterface( - fields=['fixed_file', 'moving_file']), - name='inputnode') - outputnode = pe.Node(niu.IdentityInterface( - fields=['out_file', 'out_file_xfm', - 'fixed_resliced', 'moving_resliced']), - name='outputnode') - origin_node_fixed = pe.Node(dtitk.TVAdjustVoxSp(origin=(0, 0, 0)), - name='origin_node_fixed') - origin_node_moving = origin_node_fixed.clone(name='origin_node_moving') - reslice_node_pow2 = pe.Node(dtitk.TVResample( - origin=(0, 0, 0), - array_size=params['array_size']), - name='reslice_node_pow2') - reslice_node_moving = pe.Node(dtitk.TVResample(), - name='reslice_node_moving') - mask_node = pe.Node(dtitk.BinThresh(lower_bound=0.01, upper_bound=100, - inside_value=1, outside_value=0), - name='mask_node') - rigid_node = pe.Node(dtitk.Rigid(), name='rigid_node') - affine_node = pe.Node(dtitk.Affine(), name='affine_node') - diffeo_node = pe.Node(dtitk.Diffeo(n_iters=6, ftol=0.002), - name='diffeo_node') - compose_xfm_node = pe.Node(dtitk.ComposeXfm(), name='compose_xfm_node') - apply_xfm_node = pe.Node(dtitk.DiffeoSymTensor3DVol(), - name='apply_xfm_node') - adjust_vs_node_to_input = pe.Node(dtitk.TVAdjustVoxSp(), - name='adjust_vs_node_to_input') - reslice_node_to_input = pe.Node(dtitk.TVResample(), - name='reslice_node_to_input') - input_fa = pe.Node(dtitk.TVtool(in_flag='fa'), name='input_fa') - - wf = pe.Workflow(name=name) - - # calculate input FA image for origin reference - wf.connect(inputnode, 'fixed_file', input_fa, 'in_file') - # Reslice input images - wf.connect(inputnode, 'fixed_file', origin_node_fixed, 'in_file') - wf.connect(origin_node_fixed, 'out_file', reslice_node_pow2, 'in_file') - wf.connect(reslice_node_pow2, 'out_file', - reslice_node_moving, 'target_file') - wf.connect(inputnode, 'moving_file', origin_node_moving, 'in_file') - wf.connect(origin_node_moving, 'out_file', reslice_node_moving, 'in_file') - # Rigid registration - wf.connect(reslice_node_pow2, 'out_file', rigid_node, 'fixed_file') - wf.connect(reslice_node_moving, 'out_file', rigid_node, 'moving_file') - # Affine registration - wf.connect(rigid_node, 'out_file_xfm', affine_node, 'initialize_xfm') - wf.connect(reslice_node_pow2, 'out_file', affine_node, 'fixed_file') - wf.connect(reslice_node_moving, 'out_file', affine_node, 'moving_file') - # Diffeo registration - wf.connect(reslice_node_pow2, 'out_file', mask_node, 'in_file') - wf.connect(reslice_node_pow2, 'out_file', diffeo_node, 'fixed_file') - wf.connect(affine_node, 'out_file', diffeo_node, 'moving_file') - wf.connect(mask_node, 'out_file', diffeo_node, 'mask_file') - # Compose transform - wf.connect(diffeo_node, 'out_file_xfm', compose_xfm_node, 'in_df') - wf.connect(affine_node, 'out_file_xfm', compose_xfm_node, 'in_aff') - # Apply transform - wf.connect(reslice_node_moving, 'out_file', apply_xfm_node, 'in_file') - wf.connect(compose_xfm_node, 'out_file', apply_xfm_node, 'transform') - # Move origin and reslice to match original fixed input image - wf.connect(apply_xfm_node, 'out_file', adjust_vs_node_to_input, 'in_file') - wf.connect(input_fa, 'out_file', adjust_vs_node_to_input, 'target_file') - wf.connect(adjust_vs_node_to_input, 'out_file', reslice_node_to_input, 'in_file') - wf.connect(input_fa, 'out_file', reslice_node_to_input, 'target_file') - # Send to output - wf.connect(reslice_node_to_input, 'out_file', outputnode, 'out_file') - wf.connect(compose_xfm_node, 'out_file', outputnode, 'out_file_xfm') - wf.connect(reslice_node_pow2, 'out_file', outputnode, 'fixed_resliced') - wf.connect(reslice_node_moving, 'out_file', outputnode, 'moving_resliced') - - return wf diff --git a/nipype/workflows/dmri/fsl/__init__.py b/nipype/workflows/dmri/fsl/__init__.py deleted file mode 100644 index 66be352b84..0000000000 --- a/nipype/workflows/dmri/fsl/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import -from .dti import create_bedpostx_pipeline, bedpostx_parallel - -from .artifacts import (all_fmb_pipeline, all_peb_pipeline, all_fsl_pipeline, - hmc_pipeline, ecc_pipeline, sdc_fmb, sdc_peb, - remove_bias) - -from .epi import (fieldmap_correction, topup_correction, - create_eddy_correct_pipeline, create_epidewarp_pipeline, - create_dmri_preprocessing) - -from .tbss import (create_tbss_1_preproc, create_tbss_2_reg, - create_tbss_3_postreg, create_tbss_4_prestats, - create_tbss_all, create_tbss_non_FA) diff --git a/nipype/workflows/dmri/fsl/artifacts.py b/nipype/workflows/dmri/fsl/artifacts.py deleted file mode 100644 index 0bdd0a5d66..0000000000 --- a/nipype/workflows/dmri/fsl/artifacts.py +++ /dev/null @@ -1,1061 +0,0 @@ -# -*- coding: utf-8 -*- -# coding: utf-8 -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from ....interfaces.io import JSONFileGrabber -from ....interfaces import utility as niu -from ....interfaces import ants -from ....interfaces import fsl -from ....pipeline import engine as pe -from ...data import get_flirt_schedule - -from .utils import ( - b0_indices, - time_avg, - apply_all_corrections, - b0_average, - hmc_split, - dwi_flirt, - eddy_rotate_bvecs, - rotate_bvecs, - insert_mat, - extract_bval, - recompose_dwi, - recompose_xfm, - siemens2rads, - rads2radsec, - demean_image, - cleanup_edge_pipeline, - add_empty_vol, - vsm2warp, - compute_readout, -) - - -def all_fmb_pipeline(name='hmc_sdc_ecc', fugue_params=dict(smooth3d=2.0)): - """ - Builds a pipeline including three artifact corrections: head-motion - correction (HMC), susceptibility-derived distortion correction (SDC), - and Eddy currents-derived distortion correction (ECC). - - The displacement fields from each kind of distortions are combined. Thus, - only one interpolation occurs between input data and result. - - .. warning:: this workflow rotates the gradients table (*b*-vectors) - [Leemans09]_. - - - Examples - -------- - - >>> from nipype.workflows.dmri.fsl.artifacts import all_fmb_pipeline - >>> allcorr = all_fmb_pipeline() - >>> allcorr.inputs.inputnode.in_file = 'epi.nii' - >>> allcorr.inputs.inputnode.in_bval = 'diffusion.bval' - >>> allcorr.inputs.inputnode.in_bvec = 'diffusion.bvec' - >>> allcorr.inputs.inputnode.bmap_mag = 'magnitude.nii' - >>> allcorr.inputs.inputnode.bmap_pha = 'phase.nii' - >>> allcorr.inputs.inputnode.epi_param = 'epi_param.txt' - >>> allcorr.run() # doctest: +SKIP - - """ - inputnode = pe.Node( - niu.IdentityInterface(fields=[ - 'in_file', 'in_bvec', 'in_bval', 'bmap_pha', 'bmap_mag', - 'epi_param' - ]), - name='inputnode') - - outputnode = pe.Node( - niu.IdentityInterface(fields=['out_file', 'out_mask', 'out_bvec']), - name='outputnode') - - list_b0 = pe.Node( - niu.Function( - input_names=['in_bval'], - output_names=['out_idx'], - function=b0_indices), - name='B0indices') - - avg_b0_0 = pe.Node( - niu.Function( - input_names=['in_file', 'index'], - output_names=['out_file'], - function=time_avg), - name='b0_avg_pre') - avg_b0_1 = pe.Node( - niu.Function( - input_names=['in_file', 'index'], - output_names=['out_file'], - function=time_avg), - name='b0_avg_post') - - bet_dwi0 = pe.Node( - fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_pre') - bet_dwi1 = pe.Node( - fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_post') - - hmc = hmc_pipeline() - sdc = sdc_fmb(fugue_params=fugue_params) - ecc = ecc_pipeline() - unwarp = apply_all_corrections() - - wf = pe.Workflow(name=name) - wf.connect( - [(inputnode, hmc, - [('in_file', 'inputnode.in_file'), ('in_bvec', 'inputnode.in_bvec'), - ('in_bval', 'inputnode.in_bval')]), (inputnode, list_b0, - [('in_bval', 'in_bval')]), - (inputnode, avg_b0_0, [('in_file', 'in_file')]), (list_b0, avg_b0_0, - [('out_idx', - 'index')]), - (avg_b0_0, bet_dwi0, [('out_file', 'in_file')]), (bet_dwi0, hmc, [ - ('mask_file', 'inputnode.in_mask') - ]), (hmc, sdc, [('outputnode.out_file', 'inputnode.in_file')]), - (bet_dwi0, sdc, - [('mask_file', 'inputnode.in_mask')]), (inputnode, sdc, [ - ('bmap_pha', 'inputnode.bmap_pha'), - ('bmap_mag', 'inputnode.bmap_mag'), ('epi_param', - 'inputnode.settings') - ]), (list_b0, sdc, [('out_idx', 'inputnode.in_ref')]), (hmc, ecc, [ - ('outputnode.out_xfms', 'inputnode.in_xfms') - ]), (inputnode, ecc, - [('in_file', 'inputnode.in_file'), - ('in_bval', 'inputnode.in_bval')]), (bet_dwi0, ecc, [ - ('mask_file', 'inputnode.in_mask') - ]), (ecc, avg_b0_1, [('outputnode.out_file', - 'in_file')]), (list_b0, avg_b0_1, - [('out_idx', 'index')]), - (avg_b0_1, bet_dwi1, [('out_file', 'in_file')]), (inputnode, unwarp, [ - ('in_file', 'inputnode.in_dwi') - ]), (hmc, unwarp, - [('outputnode.out_xfms', 'inputnode.in_hmc')]), (ecc, unwarp, [ - ('outputnode.out_xfms', 'inputnode.in_ecc') - ]), (sdc, unwarp, [('outputnode.out_warp', - 'inputnode.in_sdc')]), (hmc, outputnode, [ - ('outputnode.out_bvec', 'out_bvec') - ]), (unwarp, outputnode, - [('outputnode.out_file', - 'out_file')]), (bet_dwi1, outputnode, - [('mask_file', - 'out_mask')])]) - return wf - - -def all_peb_pipeline(name='hmc_sdc_ecc', - epi_params=dict( - echospacing=0.77e-3, - acc_factor=3, - enc_dir='y-', - epi_factor=1), - altepi_params=dict( - echospacing=0.77e-3, - acc_factor=3, - enc_dir='y', - epi_factor=1)): - """ - Builds a pipeline including three artifact corrections: head-motion - correction (HMC), susceptibility-derived distortion correction (SDC), - and Eddy currents-derived distortion correction (ECC). - - .. warning:: this workflow rotates the gradients table (*b*-vectors) - [Leemans09]_. - - - Examples - -------- - - >>> from nipype.workflows.dmri.fsl.artifacts import all_peb_pipeline - >>> allcorr = all_peb_pipeline() - >>> allcorr.inputs.inputnode.in_file = 'epi.nii' - >>> allcorr.inputs.inputnode.alt_file = 'epi_rev.nii' - >>> allcorr.inputs.inputnode.in_bval = 'diffusion.bval' - >>> allcorr.inputs.inputnode.in_bvec = 'diffusion.bvec' - >>> allcorr.run() # doctest: +SKIP - - """ - inputnode = pe.Node( - niu.IdentityInterface( - fields=['in_file', 'in_bvec', 'in_bval', 'alt_file']), - name='inputnode') - - outputnode = pe.Node( - niu.IdentityInterface(fields=['out_file', 'out_mask', 'out_bvec']), - name='outputnode') - - avg_b0_0 = pe.Node( - niu.Function( - input_names=['in_dwi', 'in_bval'], - output_names=['out_file'], - function=b0_average), - name='b0_avg_pre') - avg_b0_1 = pe.Node( - niu.Function( - input_names=['in_dwi', 'in_bval'], - output_names=['out_file'], - function=b0_average), - name='b0_avg_post') - bet_dwi0 = pe.Node( - fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_pre') - bet_dwi1 = pe.Node( - fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_post') - - hmc = hmc_pipeline() - sdc = sdc_peb(epi_params=epi_params, altepi_params=altepi_params) - ecc = ecc_pipeline() - - unwarp = apply_all_corrections() - - wf = pe.Workflow(name=name) - wf.connect( - [(inputnode, hmc, - [('in_file', 'inputnode.in_file'), ('in_bvec', 'inputnode.in_bvec'), - ('in_bval', 'inputnode.in_bval')]), (inputnode, avg_b0_0, - [('in_file', 'in_dwi'), - ('in_bval', 'in_bval')]), - (avg_b0_0, bet_dwi0, [('out_file', 'in_file')]), (bet_dwi0, hmc, [ - ('mask_file', 'inputnode.in_mask') - ]), (hmc, sdc, [('outputnode.out_file', 'inputnode.in_file')]), - (bet_dwi0, sdc, - [('mask_file', 'inputnode.in_mask')]), (inputnode, sdc, [ - ('in_bval', 'inputnode.in_bval'), ('alt_file', - 'inputnode.alt_file') - ]), (inputnode, ecc, [('in_file', 'inputnode.in_file'), - ('in_bval', 'inputnode.in_bval')]), - (bet_dwi0, ecc, [('mask_file', 'inputnode.in_mask')]), (hmc, ecc, [ - ('outputnode.out_xfms', 'inputnode.in_xfms') - ]), (ecc, avg_b0_1, [('outputnode.out_file', - 'in_dwi')]), (inputnode, avg_b0_1, - [('in_bval', 'in_bval')]), - (avg_b0_1, bet_dwi1, [('out_file', 'in_file')]), (inputnode, unwarp, [ - ('in_file', 'inputnode.in_dwi') - ]), (hmc, unwarp, - [('outputnode.out_xfms', 'inputnode.in_hmc')]), (ecc, unwarp, [ - ('outputnode.out_xfms', 'inputnode.in_ecc') - ]), (sdc, unwarp, [('outputnode.out_warp', - 'inputnode.in_sdc')]), (hmc, outputnode, [ - ('outputnode.out_bvec', 'out_bvec') - ]), (unwarp, outputnode, - [('outputnode.out_file', - 'out_file')]), (bet_dwi1, outputnode, - [('mask_file', - 'out_mask')])]) - return wf - - -def all_fsl_pipeline(name='fsl_all_correct', - epi_params=dict( - echospacing=0.77e-3, acc_factor=3, enc_dir='y-'), - altepi_params=dict( - echospacing=0.77e-3, acc_factor=3, enc_dir='y')): - """ - Workflow that integrates FSL ``topup`` and ``eddy``. - - - .. warning:: this workflow rotates the gradients table (*b*-vectors) - [Leemans09]_. - - - .. warning:: this workflow does not perform jacobian modulation of each - *DWI* [Jones10]_. - - - Examples - -------- - - >>> from nipype.workflows.dmri.fsl.artifacts import all_fsl_pipeline - >>> allcorr = all_fsl_pipeline() - >>> allcorr.inputs.inputnode.in_file = 'epi.nii' - >>> allcorr.inputs.inputnode.alt_file = 'epi_rev.nii' - >>> allcorr.inputs.inputnode.in_bval = 'diffusion.bval' - >>> allcorr.inputs.inputnode.in_bvec = 'diffusion.bvec' - >>> allcorr.run() # doctest: +SKIP - - """ - - inputnode = pe.Node( - niu.IdentityInterface( - fields=['in_file', 'in_bvec', 'in_bval', 'alt_file']), - name='inputnode') - - outputnode = pe.Node( - niu.IdentityInterface(fields=['out_file', 'out_mask', 'out_bvec']), - name='outputnode') - - def gen_index(in_file): - import numpy as np - import nibabel as nb - import os - from nipype.utils import NUMPY_MMAP - out_file = os.path.abspath('index.txt') - vols = nb.load(in_file, mmap=NUMPY_MMAP).get_data().shape[-1] - np.savetxt(out_file, np.ones((vols, )).T) - return out_file - - gen_idx = pe.Node( - niu.Function( - input_names=['in_file'], - output_names=['out_file'], - function=gen_index), - name='gen_index') - avg_b0_0 = pe.Node( - niu.Function( - input_names=['in_dwi', 'in_bval'], - output_names=['out_file'], - function=b0_average), - name='b0_avg_pre') - bet_dwi0 = pe.Node( - fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_pre') - - sdc = sdc_peb(epi_params=epi_params, altepi_params=altepi_params) - ecc = pe.Node(fsl.Eddy(method='jac'), name='fsl_eddy') - rot_bvec = pe.Node( - niu.Function( - input_names=['in_bvec', 'eddy_params'], - output_names=['out_file'], - function=eddy_rotate_bvecs), - name='Rotate_Bvec') - avg_b0_1 = pe.Node( - niu.Function( - input_names=['in_dwi', 'in_bval'], - output_names=['out_file'], - function=b0_average), - name='b0_avg_post') - bet_dwi1 = pe.Node( - fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_post') - - wf = pe.Workflow(name=name) - wf.connect( - [(inputnode, avg_b0_0, [('in_file', 'in_dwi'), ('in_bval', - 'in_bval')]), - (avg_b0_0, bet_dwi0, [('out_file', 'in_file')]), (bet_dwi0, sdc, [ - ('mask_file', 'inputnode.in_mask') - ]), (inputnode, sdc, [('in_file', 'inputnode.in_file'), - ('alt_file', 'inputnode.alt_file'), - ('in_bval', 'inputnode.in_bval')]), - (sdc, ecc, [('topup.out_enc_file', 'in_acqp'), - ('topup.out_fieldcoef', 'in_topup_fieldcoef'), - ('topup.out_movpar', - 'in_topup_movpar')]), (bet_dwi0, ecc, [('mask_file', - 'in_mask')]), - (inputnode, gen_idx, [('in_file', 'in_file')]), (inputnode, ecc, [ - ('in_file', 'in_file'), ('in_bval', 'in_bval'), ('in_bvec', - 'in_bvec') - ]), (gen_idx, ecc, - [('out_file', 'in_index')]), (inputnode, rot_bvec, [ - ('in_bvec', 'in_bvec') - ]), (ecc, rot_bvec, - [('out_parameter', 'eddy_params')]), (ecc, avg_b0_1, [ - ('out_corrected', 'in_dwi') - ]), (inputnode, avg_b0_1, [('in_bval', 'in_bval')]), - (avg_b0_1, bet_dwi1, [('out_file', 'in_file')]), (ecc, outputnode, [ - ('out_corrected', 'out_file') - ]), (rot_bvec, outputnode, - [('out_file', 'out_bvec')]), (bet_dwi1, outputnode, - [('mask_file', 'out_mask')])]) - return wf - - -def hmc_pipeline(name='motion_correct'): - """ - HMC stands for head-motion correction. - - Creates a pipeline that corrects for head motion artifacts in dMRI - sequences. - It takes a series of diffusion weighted images and rigidly co-registers - them to one reference image. Finally, the `b`-matrix is rotated accordingly - [Leemans09]_ making use of the rotation matrix obtained by FLIRT. - - Search angles have been limited to 4 degrees, based on results in - [Yendiki13]_. - - A list of rigid transformation matrices is provided, so that transforms - can be chained. - This is useful to correct for artifacts with only one interpolation process - (as previously discussed `here - `_), - and also to compute nuisance regressors as proposed by [Yendiki13]_. - - .. warning:: This workflow rotates the `b`-vectors, so please be advised - that not all the dicom converters ensure the consistency between the - resulting nifti orientation and the gradients table (e.g. dcm2nii - checks it). - - .. admonition:: References - - .. [Leemans09] Leemans A, and Jones DK, `The B-matrix must be rotated - when correcting for subject motion in DTI data - `_, - Magn Reson Med. 61(6):1336-49. 2009. doi: 10.1002/mrm.21890. - - .. [Yendiki13] Yendiki A et al., `Spurious group differences due to head - motion in a diffusion MRI study - `_. - Neuroimage. 21(88C):79-90. 2013. doi: 10.1016/j.neuroimage.2013.11.027 - - Example - ------- - - >>> from nipype.workflows.dmri.fsl.artifacts import hmc_pipeline - >>> hmc = hmc_pipeline() - >>> hmc.inputs.inputnode.in_file = 'diffusion.nii' - >>> hmc.inputs.inputnode.in_bvec = 'diffusion.bvec' - >>> hmc.inputs.inputnode.in_bval = 'diffusion.bval' - >>> hmc.inputs.inputnode.in_mask = 'mask.nii' - >>> hmc.run() # doctest: +SKIP - - Inputs:: - - inputnode.in_file - input dwi file - inputnode.in_mask - weights mask of reference image (a file with data \ -range in [0.0, 1.0], indicating the weight of each voxel when computing the \ -metric. - inputnode.in_bval - b-values file - inputnode.in_bvec - gradients file (b-vectors) - inputnode.ref_num (optional, default=0) index of the b0 volume that \ -should be taken as reference - - Outputs:: - - outputnode.out_file - corrected dwi file - outputnode.out_bvec - rotated gradient vectors table - outputnode.out_xfms - list of transformation matrices - - """ - params = dict( - dof=6, - bgvalue=0, - save_log=True, - no_search=True, - # cost='mutualinfo', cost_func='mutualinfo', bins=64, - schedule=get_flirt_schedule('hmc')) - - inputnode = pe.Node( - niu.IdentityInterface( - fields=['in_file', 'ref_num', 'in_bvec', 'in_bval', 'in_mask']), - name='inputnode') - split = pe.Node( - niu.Function( - output_names=['out_ref', 'out_mov', 'out_bval', 'volid'], - input_names=['in_file', 'in_bval', 'ref_num'], - function=hmc_split), - name='SplitDWI') - flirt = dwi_flirt(flirt_param=params) - insmat = pe.Node( - niu.Function( - input_names=['inlist', 'volid'], - output_names=['out'], - function=insert_mat), - name='InsertRefmat') - rot_bvec = pe.Node( - niu.Function( - function=rotate_bvecs, - input_names=['in_bvec', 'in_matrix'], - output_names=['out_file']), - name='Rotate_Bvec') - outputnode = pe.Node( - niu.IdentityInterface(fields=['out_file', 'out_bvec', 'out_xfms']), - name='outputnode') - - wf = pe.Workflow(name=name) - wf.connect([(inputnode, split, - [('in_file', 'in_file'), ('in_bval', 'in_bval'), - ('ref_num', 'ref_num')]), (inputnode, flirt, [ - ('in_mask', 'inputnode.ref_mask') - ]), (split, flirt, [('out_ref', 'inputnode.reference'), - ('out_mov', 'inputnode.in_file'), - ('out_bval', 'inputnode.in_bval')]), - (flirt, insmat, [('outputnode.out_xfms', 'inlist')]), - (split, insmat, [('volid', 'volid')]), (inputnode, rot_bvec, [ - ('in_bvec', 'in_bvec') - ]), (insmat, rot_bvec, - [('out', 'in_matrix')]), (rot_bvec, outputnode, - [('out_file', 'out_bvec')]), - (flirt, outputnode, [('outputnode.out_file', - 'out_file')]), (insmat, outputnode, - [('out', 'out_xfms')])]) - return wf - - -def ecc_pipeline(name='eddy_correct'): - """ - ECC stands for Eddy currents correction. - - Creates a pipeline that corrects for artifacts induced by Eddy currents in - dMRI sequences. - It takes a series of diffusion weighted images and linearly co-registers - them to one reference image (the average of all b0s in the dataset). - - DWIs are also modulated by the determinant of the Jacobian as indicated by - [Jones10]_ and [Rohde04]_. - - A list of rigid transformation matrices can be provided, sourcing from a - :func:`.hmc_pipeline` workflow, to initialize registrations in a *motion - free* framework. - - A list of affine transformation matrices is available as output, so that - transforms can be chained (discussion - `here `_). - - .. admonition:: References - - .. [Jones10] Jones DK, `The signal intensity must be modulated by the - determinant of the Jacobian when correcting for eddy currents in - diffusion MRI - `_, - Proc. ISMRM 18th Annual Meeting, (2010). - - .. [Rohde04] Rohde et al., `Comprehensive Approach for Correction of - Motion and Distortion in Diffusion-Weighted MRI - `_, MRM - 51:103-114 (2004). - - Example - ------- - - >>> from nipype.workflows.dmri.fsl.artifacts import ecc_pipeline - >>> ecc = ecc_pipeline() - >>> ecc.inputs.inputnode.in_file = 'diffusion.nii' - >>> ecc.inputs.inputnode.in_bval = 'diffusion.bval' - >>> ecc.inputs.inputnode.in_mask = 'mask.nii' - >>> ecc.run() # doctest: +SKIP - - Inputs:: - - inputnode.in_file - input dwi file - inputnode.in_mask - weights mask of reference image (a file with data \ -range sin [0.0, 1.0], indicating the weight of each voxel when computing the \ -metric. - inputnode.in_bval - b-values table - inputnode.in_xfms - list of matrices to initialize registration (from \ -head-motion correction) - - Outputs:: - - outputnode.out_file - corrected dwi file - outputnode.out_xfms - list of transformation matrices - """ - - params = dict( - dof=12, - no_search=True, - interp='spline', - bgvalue=0, - schedule=get_flirt_schedule('ecc')) - # cost='normmi', cost_func='normmi', bins=64, - - inputnode = pe.Node( - niu.IdentityInterface( - fields=['in_file', 'in_bval', 'in_mask', 'in_xfms']), - name='inputnode') - avg_b0 = pe.Node( - niu.Function( - input_names=['in_dwi', 'in_bval'], - output_names=['out_file'], - function=b0_average), - name='b0_avg') - pick_dws = pe.Node( - niu.Function( - input_names=['in_dwi', 'in_bval', 'b'], - output_names=['out_file'], - function=extract_bval), - name='ExtractDWI') - pick_dws.inputs.b = 'diff' - - flirt = dwi_flirt(flirt_param=params, excl_nodiff=True) - - mult = pe.MapNode( - fsl.BinaryMaths(operation='mul'), - name='ModulateDWIs', - iterfield=['in_file', 'operand_value']) - thres = pe.MapNode( - fsl.Threshold(thresh=0.0), - iterfield=['in_file'], - name='RemoveNegative') - - split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') - get_mat = pe.Node( - niu.Function( - input_names=['in_bval', 'in_xfms'], - output_names=['out_files'], - function=recompose_xfm), - name='GatherMatrices') - merge = pe.Node( - niu.Function( - input_names=['in_dwi', 'in_bval', 'in_corrected'], - output_names=['out_file'], - function=recompose_dwi), - name='MergeDWIs') - - outputnode = pe.Node( - niu.IdentityInterface(fields=['out_file', 'out_xfms']), - name='outputnode') - - wf = pe.Workflow(name=name) - wf.connect([ - (inputnode, avg_b0, [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), - (inputnode, pick_dws, [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), - (inputnode, merge, - [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), (inputnode, flirt, [ - ('in_mask', 'inputnode.ref_mask'), - ('in_xfms', 'inputnode.in_xfms'), ('in_bval', 'inputnode.in_bval') - ]), (inputnode, get_mat, [('in_bval', 'in_bval')]), (avg_b0, flirt, [ - ('out_file', 'inputnode.reference') - ]), (pick_dws, flirt, [('out_file', 'inputnode.in_file')]), - (flirt, get_mat, [('outputnode.out_xfms', 'in_xfms')]), (flirt, mult, [ - (('outputnode.out_xfms', _xfm_jacobian), 'operand_value') - ]), (flirt, split, - [('outputnode.out_file', 'in_file')]), (split, mult, [ - ('out_files', 'in_file') - ]), (mult, thres, [('out_file', 'in_file')]), (thres, merge, [ - ('out_file', 'in_corrected') - ]), (get_mat, outputnode, - [('out_files', 'out_xfms')]), (merge, outputnode, - [('out_file', 'out_file')]) - ]) - return wf - - -def sdc_fmb(name='fmb_correction', - interp='Linear', - fugue_params=dict(smooth3d=2.0)): - """ - SDC stands for susceptibility distortion correction. FMB stands for - fieldmap-based. - - The fieldmap based (FMB) method implements SDC by using a mapping of the - B0 field as proposed by [Jezzard95]_. This workflow uses the implementation - of FSL (`FUGUE `_). Phase - unwrapping is performed using `PRELUDE - `_ - [Jenkinson03]_. Preparation of the fieldmap is performed reproducing the - script in FSL `fsl_prepare_fieldmap - `_. - - - - Example - ------- - - >>> from nipype.workflows.dmri.fsl.artifacts import sdc_fmb - >>> fmb = sdc_fmb() - >>> fmb.inputs.inputnode.in_file = 'diffusion.nii' - >>> fmb.inputs.inputnode.in_ref = list(range(0, 30, 6)) - >>> fmb.inputs.inputnode.in_mask = 'mask.nii' - >>> fmb.inputs.inputnode.bmap_mag = 'magnitude.nii' - >>> fmb.inputs.inputnode.bmap_pha = 'phase.nii' - >>> fmb.inputs.inputnode.settings = 'epi_param.txt' - >>> fmb.run() # doctest: +SKIP - - .. warning:: Only SIEMENS format fieldmaps are supported. - - .. admonition:: References - - .. [Jezzard95] Jezzard P, and Balaban RS, `Correction for geometric - distortion in echo planar images from B0 field variations - `_, - MRM 34(1):65-73. (1995). doi: 10.1002/mrm.1910340111. - - .. [Jenkinson03] Jenkinson M., `Fast, automated, N-dimensional - phase-unwrapping algorithm `_, - MRM 49(1):193-197, 2003, doi: 10.1002/mrm.10354. - - """ - - epi_defaults = { - 'delta_te': 2.46e-3, - 'echospacing': 0.77e-3, - 'acc_factor': 2, - 'enc_dir': u'AP' - } - - inputnode = pe.Node( - niu.IdentityInterface(fields=[ - 'in_file', 'in_ref', 'in_mask', 'bmap_pha', 'bmap_mag', 'settings' - ]), - name='inputnode') - - outputnode = pe.Node( - niu.IdentityInterface(fields=['out_file', 'out_vsm', 'out_warp']), - name='outputnode') - - r_params = pe.Node( - JSONFileGrabber(defaults=epi_defaults), name='SettingsGrabber') - eff_echo = pe.Node( - niu.Function( - function=_eff_t_echo, - input_names=['echospacing', 'acc_factor'], - output_names=['eff_echo']), - name='EffEcho') - - firstmag = pe.Node(fsl.ExtractROI(t_min=0, t_size=1), name='GetFirst') - n4 = pe.Node(ants.N4BiasFieldCorrection(dimension=3), name='Bias') - bet = pe.Node(fsl.BET(frac=0.4, mask=True), name='BrainExtraction') - dilate = pe.Node( - fsl.maths.MathsCommand(nan2zeros=True, args='-kernel sphere 5 -dilM'), - name='MskDilate') - pha2rads = pe.Node( - niu.Function( - input_names=['in_file'], - output_names=['out_file'], - function=siemens2rads), - name='PreparePhase') - prelude = pe.Node(fsl.PRELUDE(process3d=True), name='PhaseUnwrap') - rad2rsec = pe.Node( - niu.Function( - input_names=['in_file', 'delta_te'], - output_names=['out_file'], - function=rads2radsec), - name='ToRadSec') - - baseline = pe.Node( - niu.Function( - input_names=['in_file', 'index'], - output_names=['out_file'], - function=time_avg), - name='Baseline') - - fmm2b0 = pe.Node( - ants.Registration(output_warped_image=True), name="FMm_to_B0") - fmm2b0.inputs.transforms = ['Rigid'] * 2 - fmm2b0.inputs.transform_parameters = [(1.0, )] * 2 - fmm2b0.inputs.number_of_iterations = [[50], [20]] - fmm2b0.inputs.dimension = 3 - fmm2b0.inputs.metric = ['Mattes', 'Mattes'] - fmm2b0.inputs.metric_weight = [1.0] * 2 - fmm2b0.inputs.radius_or_number_of_bins = [64, 64] - fmm2b0.inputs.sampling_strategy = ['Regular', 'Random'] - fmm2b0.inputs.sampling_percentage = [None, 0.2] - fmm2b0.inputs.convergence_threshold = [1.e-5, 1.e-8] - fmm2b0.inputs.convergence_window_size = [20, 10] - fmm2b0.inputs.smoothing_sigmas = [[6.0], [2.0]] - fmm2b0.inputs.sigma_units = ['vox'] * 2 - fmm2b0.inputs.shrink_factors = [[6], [1]] # ,[1] ] - fmm2b0.inputs.use_estimate_learning_rate_once = [True] * 2 - fmm2b0.inputs.use_histogram_matching = [True] * 2 - fmm2b0.inputs.initial_moving_transform_com = 0 - fmm2b0.inputs.collapse_output_transforms = True - fmm2b0.inputs.winsorize_upper_quantile = 0.995 - - applyxfm = pe.Node( - ants.ApplyTransforms(dimension=3, interpolation=interp), - name='FMp_to_B0') - - pre_fugue = pe.Node(fsl.FUGUE(save_fmap=True), name='PreliminaryFugue') - demean = pe.Node( - niu.Function( - input_names=['in_file', 'in_mask'], - output_names=['out_file'], - function=demean_image), - name='DemeanFmap') - - cleanup = cleanup_edge_pipeline() - - addvol = pe.Node( - niu.Function( - input_names=['in_file'], - output_names=['out_file'], - function=add_empty_vol), - name='AddEmptyVol') - - vsm = pe.Node( - fsl.FUGUE(save_shift=True, **fugue_params), name="ComputeVSM") - - split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') - merge = pe.Node(fsl.Merge(dimension='t'), name='MergeDWIs') - unwarp = pe.MapNode( - fsl.FUGUE(icorr=True, forward_warping=False), - iterfield=['in_file'], - name='UnwarpDWIs') - thres = pe.MapNode( - fsl.Threshold(thresh=0.0), - iterfield=['in_file'], - name='RemoveNegative') - vsm2dfm = vsm2warp() - vsm2dfm.inputs.inputnode.scaling = 1.0 - - wf = pe.Workflow(name=name) - wf.connect([ - (inputnode, r_params, - [('settings', 'in_file')]), (r_params, eff_echo, [ - ('echospacing', 'echospacing'), ('acc_factor', 'acc_factor') - ]), (inputnode, pha2rads, - [('bmap_pha', 'in_file')]), (inputnode, firstmag, - [('bmap_mag', 'in_file')]), - (inputnode, baseline, - [('in_file', 'in_file'), ('in_ref', 'index')]), (firstmag, n4, [ - ('roi_file', 'input_image') - ]), (n4, bet, [('output_image', 'in_file')]), (bet, dilate, [ - ('mask_file', 'in_file') - ]), (pha2rads, prelude, [('out_file', 'phase_file')]), (n4, prelude, [ - ('output_image', 'magnitude_file') - ]), (dilate, prelude, [('out_file', 'mask_file')]), - (r_params, rad2rsec, [('delta_te', 'delta_te')]), (prelude, rad2rsec, [ - ('unwrapped_phase_file', 'in_file') - ]), (baseline, fmm2b0, [('out_file', 'fixed_image')]), (n4, fmm2b0, [ - ('output_image', 'moving_image') - ]), (inputnode, fmm2b0, - [('in_mask', 'fixed_image_mask')]), (dilate, fmm2b0, [ - ('out_file', 'moving_image_mask') - ]), (baseline, applyxfm, [('out_file', 'reference_image')]), - (rad2rsec, applyxfm, - [('out_file', 'input_image')]), (fmm2b0, applyxfm, [ - ('forward_transforms', 'transforms'), ('forward_invert_flags', - 'invert_transform_flags') - ]), (applyxfm, pre_fugue, - [('output_image', 'fmap_in_file')]), (inputnode, pre_fugue, [ - ('in_mask', 'mask_file') - ]), (pre_fugue, demean, - [('fmap_out_file', 'in_file')]), (inputnode, demean, [ - ('in_mask', 'in_mask') - ]), (demean, cleanup, [('out_file', 'inputnode.in_file')]), - (inputnode, cleanup, - [('in_mask', 'inputnode.in_mask')]), (cleanup, addvol, [ - ('outputnode.out_file', 'in_file') - ]), (inputnode, vsm, [('in_mask', 'mask_file')]), (addvol, vsm, [ - ('out_file', 'fmap_in_file') - ]), (r_params, vsm, [('delta_te', 'asym_se_time')]), (eff_echo, vsm, [ - ('eff_echo', 'dwell_time') - ]), (inputnode, split, [('in_file', 'in_file')]), (split, unwarp, [ - ('out_files', 'in_file') - ]), (vsm, unwarp, - [('shift_out_file', 'shift_in_file')]), (r_params, unwarp, [ - (('enc_dir', _fix_enc_dir), 'unwarp_direction') - ]), (unwarp, thres, [('unwarped_file', 'in_file')]), - (thres, merge, [('out_file', 'in_files')]), (r_params, vsm2dfm, [ - (('enc_dir', _fix_enc_dir), 'inputnode.enc_dir') - ]), (merge, vsm2dfm, - [('merged_file', 'inputnode.in_ref')]), (vsm, vsm2dfm, [ - ('shift_out_file', 'inputnode.in_vsm') - ]), (merge, outputnode, - [('merged_file', 'out_file')]), (vsm, outputnode, [ - ('shift_out_file', 'out_vsm') - ]), (vsm2dfm, outputnode, [('outputnode.out_warp', - 'out_warp')]) - ]) - return wf - - -def sdc_peb(name='peb_correction', - epi_params=dict( - echospacing=0.77e-3, acc_factor=3, enc_dir='y-', epi_factor=1), - altepi_params=dict( - echospacing=0.77e-3, acc_factor=3, enc_dir='y', epi_factor=1)): - """ - SDC stands for susceptibility distortion correction. PEB stands for - phase-encoding-based. - - The phase-encoding-based (PEB) method implements SDC by acquiring - diffusion images with two different enconding directions [Andersson2003]_. - The most typical case is acquiring with opposed phase-gradient blips - (e.g. *A>>>P* and *P>>>A*, or equivalently, *-y* and *y*) - as in [Chiou2000]_, but it is also possible to use orthogonal - configurations [Cordes2000]_ (e.g. *A>>>P* and *L>>>R*, - or equivalently *-y* and *x*). - This workflow uses the implementation of FSL - (`TOPUP `_). - - Example - ------- - - >>> from nipype.workflows.dmri.fsl.artifacts import sdc_peb - >>> peb = sdc_peb() - >>> peb.inputs.inputnode.in_file = 'epi.nii' - >>> peb.inputs.inputnode.alt_file = 'epi_rev.nii' - >>> peb.inputs.inputnode.in_bval = 'diffusion.bval' - >>> peb.inputs.inputnode.in_mask = 'mask.nii' - >>> peb.run() # doctest: +SKIP - - .. admonition:: References - - .. [Andersson2003] Andersson JL et al., `How to correct susceptibility - distortions in spin-echo echo-planar images: application to diffusion - tensor imaging `_. - Neuroimage. 2003 Oct;20(2):870-88. doi: 10.1016/S1053-8119(03)00336-7 - - .. [Cordes2000] Cordes D et al., Geometric distortion correction in EPI - using two images with orthogonal phase-encoding directions, in Proc. - ISMRM (8), p.1712, Denver, US, 2000. - - .. [Chiou2000] Chiou JY, and Nalcioglu O, A simple method to correct - off-resonance related distortion in echo planar imaging, in Proc. - ISMRM (8), p.1712, Denver, US, 2000. - - """ - - inputnode = pe.Node( - niu.IdentityInterface( - fields=['in_file', 'in_bval', 'in_mask', 'alt_file', 'ref_num']), - name='inputnode') - outputnode = pe.Node( - niu.IdentityInterface(fields=['out_file', 'out_vsm', 'out_warp']), - name='outputnode') - - b0_ref = pe.Node(fsl.ExtractROI(t_size=1), name='b0_ref') - b0_alt = pe.Node(fsl.ExtractROI(t_size=1), name='b0_alt') - b0_comb = pe.Node(niu.Merge(2), name='b0_list') - b0_merge = pe.Node(fsl.Merge(dimension='t'), name='b0_merged') - - topup = pe.Node(fsl.TOPUP(), name='topup') - topup.inputs.encoding_direction = [ - epi_params['enc_dir'], altepi_params['enc_dir'] - ] - - readout = compute_readout(epi_params) - topup.inputs.readout_times = [readout, compute_readout(altepi_params)] - - unwarp = pe.Node(fsl.ApplyTOPUP(in_index=[1], method='jac'), name='unwarp') - - # scaling = pe.Node(niu.Function(input_names=['in_file', 'enc_dir'], - # output_names=['factor'], function=_get_zoom), - # name='GetZoom') - # scaling.inputs.enc_dir = epi_params['enc_dir'] - vsm2dfm = vsm2warp() - vsm2dfm.inputs.inputnode.enc_dir = epi_params['enc_dir'] - vsm2dfm.inputs.inputnode.scaling = readout - - wf = pe.Workflow(name=name) - wf.connect([ - (inputnode, b0_ref, [('in_file', 'in_file'), (('ref_num', _checkrnum), - 't_min')]), - (inputnode, b0_alt, [('alt_file', 'in_file'), (('ref_num', _checkrnum), - 't_min')]), - (b0_ref, b0_comb, [('roi_file', 'in1')]), - (b0_alt, b0_comb, [('roi_file', 'in2')]), - (b0_comb, b0_merge, [('out', 'in_files')]), - (b0_merge, topup, [('merged_file', 'in_file')]), - (topup, unwarp, [('out_fieldcoef', 'in_topup_fieldcoef'), - ('out_movpar', 'in_topup_movpar'), - ('out_enc_file', 'encoding_file')]), - (inputnode, unwarp, [('in_file', 'in_files')]), - (unwarp, outputnode, [('out_corrected', 'out_file')]), - # (b0_ref, scaling, [('roi_file', 'in_file')]), - # (scaling, vsm2dfm, [('factor', 'inputnode.scaling')]), - (b0_ref, vsm2dfm, [('roi_file', 'inputnode.in_ref')]), - (topup, vsm2dfm, [('out_field', 'inputnode.in_vsm')]), - (topup, outputnode, [('out_field', 'out_vsm')]), - (vsm2dfm, outputnode, [('outputnode.out_warp', 'out_warp')]) - ]) - return wf - - -def remove_bias(name='bias_correct'): - """ - This workflow estimates a single multiplicative bias field from the - averaged *b0* image, as suggested in [Jeurissen2014]_. - - .. admonition:: References - - .. [Jeurissen2014] Jeurissen B. et al., `Multi-tissue constrained - spherical deconvolution for improved analysis of multi-shell diffusion - MRI data `_. - NeuroImage (2014). doi: 10.1016/j.neuroimage.2014.07.061 - - - Example - ------- - - >>> from nipype.workflows.dmri.fsl.artifacts import remove_bias - >>> bias = remove_bias() - >>> bias.inputs.inputnode.in_file = 'epi.nii' - >>> bias.inputs.inputnode.in_bval = 'diffusion.bval' - >>> bias.inputs.inputnode.in_mask = 'mask.nii' - >>> bias.run() # doctest: +SKIP - - """ - inputnode = pe.Node( - niu.IdentityInterface(fields=['in_file', 'in_bval', 'in_mask']), - name='inputnode') - - outputnode = pe.Node( - niu.IdentityInterface(fields=['out_file']), name='outputnode') - - avg_b0 = pe.Node( - niu.Function( - input_names=['in_dwi', 'in_bval'], - output_names=['out_file'], - function=b0_average), - name='b0_avg') - n4 = pe.Node( - ants.N4BiasFieldCorrection( - dimension=3, save_bias=True, bspline_fitting_distance=600), - name='Bias_b0') - split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') - mult = pe.MapNode( - fsl.MultiImageMaths(op_string='-div %s'), - iterfield=['in_file'], - name='RemoveBiasOfDWIs') - thres = pe.MapNode( - fsl.Threshold(thresh=0.0), - iterfield=['in_file'], - name='RemoveNegative') - merge = pe.Node(fsl.utils.Merge(dimension='t'), name='MergeDWIs') - - wf = pe.Workflow(name=name) - wf.connect([(inputnode, avg_b0, [ - ('in_file', 'in_dwi'), ('in_bval', 'in_bval') - ]), (avg_b0, n4, [('out_file', 'input_image')]), (inputnode, n4, [ - ('in_mask', 'mask_image') - ]), (inputnode, split, [('in_file', 'in_file')]), (n4, mult, [ - ('bias_image', 'operand_files') - ]), (split, mult, [('out_files', 'in_file')]), (mult, thres, - [('out_file', 'in_file')]), - (thres, merge, [('out_file', 'in_files')]), - (merge, outputnode, [('merged_file', 'out_file')])]) - return wf - - -def _eff_t_echo(echospacing, acc_factor): - eff_echo = echospacing / (1.0 * acc_factor) - return eff_echo - - -def _fix_enc_dir(enc_dir): - enc_dir = enc_dir.lower() - if enc_dir == 'lr': - return 'x-' - if enc_dir == 'rl': - return 'x' - if enc_dir == 'ap': - return 'y-' - if enc_dir == 'pa': - return 'y' - return enc_dir - - -def _checkrnum(ref_num): - from nipype.interfaces.base import isdefined - if (ref_num is None) or not isdefined(ref_num): - return 0 - return ref_num - - -def _nonb0(in_bval): - import numpy as np - bvals = np.loadtxt(in_bval) - return np.where(bvals != 0)[0].tolist() - - -def _xfm_jacobian(in_xfm): - import numpy as np - from math import fabs - return [fabs(np.linalg.det(np.loadtxt(xfm))) for xfm in in_xfm] - - -def _get_zoom(in_file, enc_dir): - import nibabel as nb - from nipype.utils import NUMPY_MMAP - - zooms = nb.load(in_file, mmap=NUMPY_MMAP).header.get_zooms() - - if 'y' in enc_dir: - return zooms[1] - elif 'x' in enc_dir: - return zooms[0] - elif 'z' in enc_dir: - return zooms[2] - else: - raise ValueError('Wrong encoding direction string') diff --git a/nipype/workflows/dmri/fsl/dti.py b/nipype/workflows/dmri/fsl/dti.py deleted file mode 100644 index ee7e48dd5a..0000000000 --- a/nipype/workflows/dmri/fsl/dti.py +++ /dev/null @@ -1,276 +0,0 @@ -# -*- coding: utf-8 -*- -# coding: utf-8 - -from __future__ import absolute_import - -from ....pipeline import engine as pe -from ....interfaces import utility as niu -from ....interfaces import fsl -from ....algorithms import misc - -# backwards compatibility -from .epi import create_eddy_correct_pipeline - - -def transpose(samples_over_fibres): - import numpy as np - a = np.array(samples_over_fibres) - return np.squeeze(a.T).tolist() - - -def create_bedpostx_pipeline( - name='bedpostx', - params={ - 'n_fibres': 2, - 'fudge': 1, - 'burn_in': 1000, - 'n_jumps': 1250, - 'sample_every': 25, - 'model': 2, - 'cnlinear': True - }): - """ - Creates a pipeline that does the same as bedpostx script from FSL - - calculates diffusion model parameters (distributions not MLE) voxelwise for - the whole volume (by splitting it slicewise). - - Example - ------- - - >>> from nipype.workflows.dmri.fsl.dti import create_bedpostx_pipeline - >>> params = dict(n_fibres = 2, fudge = 1, burn_in = 1000, - ... n_jumps = 1250, sample_every = 25) - >>> bpwf = create_bedpostx_pipeline('nipype_bedpostx', params) - >>> bpwf.inputs.inputnode.dwi = 'diffusion.nii' - >>> bpwf.inputs.inputnode.mask = 'mask.nii' - >>> bpwf.inputs.inputnode.bvecs = 'bvecs' - >>> bpwf.inputs.inputnode.bvals = 'bvals' - >>> bpwf.run() # doctest: +SKIP - - Inputs:: - - inputnode.dwi - inputnode.mask - inputnode.bvecs - inputnode.bvals - - Outputs:: - - outputnode wraps all XFibres outputs - - """ - - inputnode = pe.Node( - niu.IdentityInterface(fields=['dwi', 'mask', 'bvecs', 'bvals']), - name='inputnode') - - slice_dwi = pe.Node(fsl.Split(dimension='z'), name='slice_dwi') - slice_msk = pe.Node(fsl.Split(dimension='z'), name='slice_msk') - mask_dwi = pe.MapNode( - fsl.ImageMaths(op_string='-mas'), - iterfield=['in_file', 'in_file2'], - name='mask_dwi') - - xfib_if = fsl.XFibres(**params) - xfibres = pe.MapNode(xfib_if, name='xfibres', iterfield=['dwi', 'mask']) - - make_dyads = pe.MapNode( - fsl.MakeDyadicVectors(), - name="make_dyads", - iterfield=['theta_vol', 'phi_vol']) - out_fields = [ - 'dyads', 'dyads_disp', 'thsamples', 'phsamples', 'fsamples', - 'mean_thsamples', 'mean_phsamples', 'mean_fsamples' - ] - - outputnode = pe.Node( - niu.IdentityInterface(fields=out_fields), name='outputnode') - - wf = pe.Workflow(name=name) - wf.connect( - [(inputnode, slice_dwi, [('dwi', 'in_file')]), (inputnode, slice_msk, - [('mask', 'in_file')]), - (slice_dwi, mask_dwi, - [('out_files', 'in_file')]), (slice_msk, mask_dwi, [('out_files', - 'in_file2')]), - (slice_dwi, xfibres, - [('out_files', 'dwi')]), (mask_dwi, xfibres, [('out_file', 'mask')]), - (inputnode, xfibres, [('bvecs', 'bvecs'), - ('bvals', 'bvals')]), (inputnode, make_dyads, - [('mask', 'mask')])]) - - mms = {} - for k in ['thsamples', 'phsamples', 'fsamples']: - mms[k] = merge_and_mean(k) - wf.connect([(xfibres, mms[k], [(k, 'inputnode.in_files')]), - (mms[k], outputnode, [('outputnode.merged', k), - ('outputnode.mean', - 'mean_%s' % k)])]) - - # m_mdsamples = pe.Node(fsl.Merge(dimension="z"), - # name="merge_mean_dsamples") - wf.connect([ - (mms['thsamples'], make_dyads, [('outputnode.merged', 'theta_vol')]), - (mms['phsamples'], make_dyads, [('outputnode.merged', 'phi_vol')]), - # (xfibres, m_mdsamples, [('mean_dsamples', 'in_files')]), - (make_dyads, outputnode, [('dyads', 'dyads'), ('dispersion', - 'dyads_disp')]) - ]) - return wf - - -def merge_and_mean(name='mm'): - inputnode = pe.Node( - niu.IdentityInterface(fields=['in_files']), name='inputnode') - outputnode = pe.Node( - niu.IdentityInterface(fields=['merged', 'mean']), name='outputnode') - merge = pe.MapNode( - fsl.Merge(dimension='z'), name='Merge', iterfield=['in_files']) - mean = pe.MapNode( - fsl.ImageMaths(op_string='-Tmean'), name='Mean', iterfield=['in_file']) - - wf = pe.Workflow(name=name) - wf.connect([(inputnode, merge, [(('in_files', transpose), 'in_files')]), - (merge, mean, [('merged_file', 'in_file')]), - (merge, outputnode, - [('merged_file', 'merged')]), (mean, outputnode, [('out_file', - 'mean')])]) - return wf - - -def bedpostx_parallel( - name='bedpostx_parallel', - compute_all_outputs=True, - params={ - 'n_fibres': 2, - 'fudge': 1, - 'burn_in': 1000, - 'n_jumps': 1250, - 'sample_every': 25, - 'model': 1, - 'cnlinear': True - }): - """ - Does the same as :func:`.create_bedpostx_pipeline` by splitting - the input dMRI in small ROIs that are better suited for parallel - processing). - - Example - ------- - - >>> from nipype.workflows.dmri.fsl.dti import bedpostx_parallel - >>> params = dict(n_fibres = 2, fudge = 1, burn_in = 1000, - ... n_jumps = 1250, sample_every = 25) - >>> bpwf = bedpostx_parallel('nipype_bedpostx_parallel', params=params) - >>> bpwf.inputs.inputnode.dwi = 'diffusion.nii' - >>> bpwf.inputs.inputnode.mask = 'mask.nii' - >>> bpwf.inputs.inputnode.bvecs = 'bvecs' - >>> bpwf.inputs.inputnode.bvals = 'bvals' - >>> bpwf.run(plugin='CondorDAGMan') # doctest: +SKIP - - Inputs:: - - inputnode.dwi - inputnode.mask - inputnode.bvecs - inputnode.bvals - - Outputs:: - - outputnode wraps all XFibres outputs - - """ - - inputnode = pe.Node( - niu.IdentityInterface(fields=['dwi', 'mask', 'bvecs', 'bvals']), - name='inputnode') - slice_dwi = pe.Node(misc.SplitROIs(roi_size=(5, 5, 1)), name='slice_dwi') - if params is not None: - xfib_if = fsl.XFibres5(**params) - else: - xfib_if = fsl.XFibres5() - xfibres = pe.MapNode(xfib_if, name='xfibres', iterfield=['dwi', 'mask']) - - mrg_dyads = pe.MapNode( - misc.MergeROIs(), name='Merge_dyads', iterfield=['in_files']) - mrg_fsamp = pe.MapNode( - misc.MergeROIs(), name='Merge_mean_fsamples', iterfield=['in_files']) - out_fields = ['dyads', 'fsamples'] - - if compute_all_outputs: - out_fields += [ - 'dyads_disp', 'thsamples', 'phsamples', 'mean_fsamples', - 'mean_thsamples', 'mean_phsamples', 'merged_fsamples', - 'merged_thsamples', 'merged_phsamples' - ] - - outputnode = pe.Node( - niu.IdentityInterface(fields=out_fields), name='outputnode') - - wf = pe.Workflow(name=name) - wf.connect( - [(inputnode, slice_dwi, [('dwi', 'in_file'), ('mask', 'in_mask')]), - (slice_dwi, xfibres, [('out_files', 'dwi'), ('out_masks', 'mask')]), - (inputnode, xfibres, - [('bvecs', 'bvecs'), ('bvals', 'bvals')]), (inputnode, mrg_dyads, [ - ('mask', 'in_reference') - ]), (xfibres, mrg_dyads, - [(('dyads', transpose), 'in_files')]), (slice_dwi, mrg_dyads, [ - ('out_index', 'in_index') - ]), (inputnode, mrg_fsamp, - [('mask', 'in_reference')]), (xfibres, mrg_fsamp, [ - (('mean_fsamples', transpose), 'in_files') - ]), (slice_dwi, mrg_fsamp, [('out_index', 'in_index')]), - (mrg_dyads, outputnode, - [('merged_file', 'dyads')]), (mrg_fsamp, outputnode, - [('merged_file', 'fsamples')])]) - - if compute_all_outputs: - make_dyads = pe.MapNode( - fsl.MakeDyadicVectors(), - name="Make_dyads", - iterfield=['theta_vol', 'phi_vol']) - - wf.connect([(inputnode, make_dyads, [('mask', 'mask')])]) - mms = {} - for k in ['thsamples', 'phsamples', 'fsamples']: - mms[k] = merge_and_mean_parallel(k) - wf.connect( - [(slice_dwi, mms[k], [('out_index', 'inputnode.in_index')]), - (inputnode, mms[k], [('mask', 'inputnode.in_reference')]), - (xfibres, mms[k], [(k, 'inputnode.in_files')]), - (mms[k], outputnode, [('outputnode.merged', 'merged_%s' % k), - ('outputnode.mean', 'mean_%s' % k)])]) - - # m_mdsamples = pe.Node(fsl.Merge(dimension="z"), - # name="merge_mean_dsamples") - wf.connect([ - (mms['thsamples'], make_dyads, [('outputnode.merged', - 'theta_vol')]), - (mms['phsamples'], make_dyads, [('outputnode.merged', 'phi_vol')]), - # (xfibres, m_mdsamples, [('mean_dsamples', 'in_files')]), - (make_dyads, outputnode, [('dispersion', 'dyads_disp')]) - ]) - - return wf - - -def merge_and_mean_parallel(name='mm'): - inputnode = pe.Node( - niu.IdentityInterface(fields=['in_files', 'in_reference', 'in_index']), - name='inputnode') - outputnode = pe.Node( - niu.IdentityInterface(fields=['merged', 'mean']), name='outputnode') - merge = pe.MapNode(misc.MergeROIs(), name='Merge', iterfield=['in_files']) - mean = pe.MapNode( - fsl.ImageMaths(op_string='-Tmean'), name='Mean', iterfield=['in_file']) - - wf = pe.Workflow(name=name) - wf.connect([(inputnode, merge, - [(('in_files', transpose), 'in_files'), - ('in_reference', 'in_reference'), ('in_index', 'in_index')]), - (merge, mean, [('merged_file', 'in_file')]), - (merge, outputnode, - [('merged_file', 'merged')]), (mean, outputnode, [('out_file', - 'mean')])]) - return wf diff --git a/nipype/workflows/dmri/fsl/epi.py b/nipype/workflows/dmri/fsl/epi.py deleted file mode 100644 index 3bd88a99b7..0000000000 --- a/nipype/workflows/dmri/fsl/epi.py +++ /dev/null @@ -1,885 +0,0 @@ -# -*- coding: utf-8 -*- -# coding: utf-8 -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open, str - -import warnings - -from ....pipeline import engine as pe -from ....interfaces import utility as niu -from ....interfaces import fsl as fsl - - -def create_dmri_preprocessing(name='dMRI_preprocessing', - use_fieldmap=True, - fieldmap_registration=False): - """ - Creates a workflow that chains the necessary pipelines to - correct for motion, eddy currents, and, if selected, susceptibility - artifacts in EPI dMRI sequences. - - .. deprecated:: 0.9.3 - Use :func:`nipype.workflows.dmri.preprocess.epi.all_fmb_pipeline` or - :func:`nipype.workflows.dmri.preprocess.epi.all_peb_pipeline` instead. - - - .. warning:: This workflow rotates the b-vectors, so please be - advised that not all the dicom converters ensure the consistency between the resulting - nifti orientation and the b matrix table (e.g. dcm2nii checks it). - - - Example - ------- - - >>> nipype_dmri_preprocess = create_dmri_preprocessing('nipype_dmri_prep') - >>> nipype_dmri_preprocess.inputs.inputnode.in_file = 'diffusion.nii' - >>> nipype_dmri_preprocess.inputs.inputnode.in_bvec = 'diffusion.bvec' - >>> nipype_dmri_preprocess.inputs.inputnode.ref_num = 0 - >>> nipype_dmri_preprocess.inputs.inputnode.fieldmap_mag = 'magnitude.nii' - >>> nipype_dmri_preprocess.inputs.inputnode.fieldmap_pha = 'phase.nii' - >>> nipype_dmri_preprocess.inputs.inputnode.te_diff = 2.46 - >>> nipype_dmri_preprocess.inputs.inputnode.epi_echospacing = 0.77 - >>> nipype_dmri_preprocess.inputs.inputnode.epi_rev_encoding = False - >>> nipype_dmri_preprocess.inputs.inputnode.pi_accel_factor = True - >>> nipype_dmri_preprocess.run() # doctest: +SKIP - - - Inputs:: - - inputnode.in_file - The diffusion data - inputnode.in_bvec - The b-matrix file, in FSL format and consistent with the in_file orientation - inputnode.ref_num - The reference volume (a b=0 volume in dMRI) - inputnode.fieldmap_mag - The magnitude of the fieldmap - inputnode.fieldmap_pha - The phase difference of the fieldmap - inputnode.te_diff - TE increment used (in msec.) on the fieldmap acquisition (generally 2.46ms for 3T scanners) - inputnode.epi_echospacing - The EPI EchoSpacing parameter (in msec.) - inputnode.epi_rev_encoding - True if reverse encoding was used (generally False) - inputnode.pi_accel_factor - Parallel imaging factor (aka GRAPPA acceleration factor) - inputnode.vsm_sigma - Sigma (in mm.) of the gaussian kernel used for in-slice smoothing of the deformation field (voxel shift map, vsm) - - - Outputs:: - - outputnode.dmri_corrected - outputnode.bvec_rotated - - - Optional arguments:: - - use_fieldmap - True if there are fieldmap files that should be used (default True) - fieldmap_registration - True if registration to fieldmap should be performed (default False) - - - """ - - warnings.warn( - ('This workflow is deprecated from v.1.0.0, use of available ' - 'nipype.workflows.dmri.preprocess.epi.all_*'), DeprecationWarning) - - pipeline = pe.Workflow(name=name) - - inputnode = pe.Node( - niu.IdentityInterface(fields=[ - 'in_file', 'in_bvec', 'ref_num', 'fieldmap_mag', 'fieldmap_pha', - 'te_diff', 'epi_echospacing', 'epi_rev_encoding', - 'pi_accel_factor', 'vsm_sigma' - ]), - name='inputnode') - - outputnode = pe.Node( - niu.IdentityInterface(fields=['dmri_corrected', 'bvec_rotated']), - name='outputnode') - - motion = create_motion_correct_pipeline() - eddy = create_eddy_correct_pipeline() - - if use_fieldmap: # we have a fieldmap, so lets use it (yay!) - susceptibility = create_epidewarp_pipeline( - fieldmap_registration=fieldmap_registration) - - pipeline.connect( - [(inputnode, motion, [('in_file', 'inputnode.in_file'), - ('in_bvec', 'inputnode.in_bvec'), - ('ref_num', 'inputnode.ref_num')]), - (inputnode, eddy, - [('ref_num', 'inputnode.ref_num')]), (motion, eddy, [ - ('outputnode.motion_corrected', 'inputnode.in_file') - ]), (eddy, susceptibility, - [('outputnode.eddy_corrected', 'inputnode.in_file')]), - (inputnode, susceptibility, - [('ref_num', 'inputnode.ref_num'), ('fieldmap_mag', - 'inputnode.fieldmap_mag'), - ('fieldmap_pha', 'inputnode.fieldmap_pha'), - ('te_diff', 'inputnode.te_diff'), ('epi_echospacing', - 'inputnode.epi_echospacing'), - ('epi_rev_encoding', - 'inputnode.epi_rev_encoding'), ('pi_accel_factor', - 'inputnode.pi_accel_factor'), - ('vsm_sigma', 'inputnode.vsm_sigma')]), (motion, outputnode, [ - ('outputnode.out_bvec', 'bvec_rotated') - ]), (susceptibility, outputnode, [('outputnode.epi_corrected', - 'dmri_corrected')])]) - else: # we don't have a fieldmap, so we just carry on without it :( - pipeline.connect([(inputnode, motion, [ - ('in_file', 'inputnode.in_file'), ('in_bvec', 'inputnode.in_bvec'), - ('ref_num', 'inputnode.ref_num') - ]), (inputnode, eddy, [('ref_num', 'inputnode.ref_num')]), - (motion, eddy, [('outputnode.motion_corrected', - 'inputnode.in_file')]), - (motion, outputnode, - [('outputnode.out_bvec', - 'bvec_rotated')]), (eddy, outputnode, - [('outputnode.eddy_corrected', - 'dmri_corrected')])]) - - return pipeline - - -def create_motion_correct_pipeline(name='motion_correct'): - """Creates a pipeline that corrects for motion artifact in dMRI sequences. - It takes a series of diffusion weighted images and rigidly co-registers - them to one reference image. Finally, the b-matrix is rotated accordingly - (Leemans et al. 2009 - http://www.ncbi.nlm.nih.gov/pubmed/19319973), - making use of the rotation matrix obtained by FLIRT. - - - .. deprecated:: 0.9.3 - Use :func:`nipype.workflows.dmri.preprocess.epi.hmc_pipeline` instead. - - - .. warning:: This workflow rotates the b-vectors, so please be adviced - that not all the dicom converters ensure the consistency between the resulting - nifti orientation and the b matrix table (e.g. dcm2nii checks it). - - - Example - ------- - - >>> nipype_motioncorrect = create_motion_correct_pipeline('nipype_motioncorrect') - >>> nipype_motioncorrect.inputs.inputnode.in_file = 'diffusion.nii' - >>> nipype_motioncorrect.inputs.inputnode.in_bvec = 'diffusion.bvec' - >>> nipype_motioncorrect.inputs.inputnode.ref_num = 0 - >>> nipype_motioncorrect.run() # doctest: +SKIP - - Inputs:: - - inputnode.in_file - inputnode.ref_num - inputnode.in_bvec - - Outputs:: - - outputnode.motion_corrected - outputnode.out_bvec - - """ - - warnings.warn( - ('This workflow is deprecated from v.1.0.0, use ' - 'nipype.workflows.dmri.preprocess.epi.hmc_pipeline instead'), - DeprecationWarning) - - inputnode = pe.Node( - niu.IdentityInterface(fields=['in_file', 'ref_num', 'in_bvec']), - name='inputnode') - - pipeline = pe.Workflow(name=name) - - split = pe.Node(fsl.Split(dimension='t'), name='split') - pick_ref = pe.Node(niu.Select(), name='pick_ref') - coregistration = pe.MapNode( - fsl.FLIRT(no_search=True, interp='spline', padding_size=1, dof=6), - name='coregistration', - iterfield=['in_file']) - rotate_bvecs = pe.Node( - niu.Function( - input_names=['in_bvec', 'in_matrix'], - output_names=['out_file'], - function=_rotate_bvecs), - name='rotate_b_matrix') - merge = pe.Node(fsl.Merge(dimension='t'), name='merge') - outputnode = pe.Node( - niu.IdentityInterface(fields=['motion_corrected', 'out_bvec']), - name='outputnode') - - pipeline.connect( - [(inputnode, split, [('in_file', 'in_file')]), - (split, pick_ref, [('out_files', 'inlist')]), (inputnode, pick_ref, [ - ('ref_num', 'index') - ]), (split, coregistration, - [('out_files', 'in_file')]), (inputnode, rotate_bvecs, - [('in_bvec', 'in_bvec')]), - (coregistration, rotate_bvecs, - [('out_matrix_file', 'in_matrix')]), (pick_ref, coregistration, - [('out', 'reference')]), - (coregistration, merge, - [('out_file', 'in_files')]), (merge, outputnode, [ - ('merged_file', 'motion_corrected') - ]), (rotate_bvecs, outputnode, [('out_file', 'out_bvec')])]) - - return pipeline - - -def create_eddy_correct_pipeline(name='eddy_correct'): - """ - - .. deprecated:: 0.9.3 - Use :func:`nipype.workflows.dmri.preprocess.epi.ecc_pipeline` instead. - - - Creates a pipeline that replaces eddy_correct script in FSL. It takes a - series of diffusion weighted images and linearly co-registers them to one - reference image. No rotation of the B-matrix is performed, so this pipeline - should be executed after the motion correction pipeline. - - Example - ------- - - >>> nipype_eddycorrect = create_eddy_correct_pipeline('nipype_eddycorrect') - >>> nipype_eddycorrect.inputs.inputnode.in_file = 'diffusion.nii' - >>> nipype_eddycorrect.inputs.inputnode.ref_num = 0 - >>> nipype_eddycorrect.run() # doctest: +SKIP - - Inputs:: - - inputnode.in_file - inputnode.ref_num - - Outputs:: - - outputnode.eddy_corrected - """ - - warnings.warn( - ('This workflow is deprecated from v.1.0.0, use ' - 'nipype.workflows.dmri.preprocess.epi.ecc_pipeline instead'), - DeprecationWarning) - - inputnode = pe.Node( - niu.IdentityInterface(fields=['in_file', 'ref_num']), name='inputnode') - - pipeline = pe.Workflow(name=name) - - split = pe.Node(fsl.Split(dimension='t'), name='split') - pick_ref = pe.Node(niu.Select(), name='pick_ref') - coregistration = pe.MapNode( - fsl.FLIRT(no_search=True, padding_size=1, interp='trilinear'), - name='coregistration', - iterfield=['in_file']) - merge = pe.Node(fsl.Merge(dimension='t'), name='merge') - outputnode = pe.Node( - niu.IdentityInterface(fields=['eddy_corrected']), name='outputnode') - - pipeline.connect([(inputnode, split, [('in_file', 'in_file')]), - (split, pick_ref, - [('out_files', 'inlist')]), (inputnode, pick_ref, - [('ref_num', 'index')]), - (split, coregistration, - [('out_files', 'in_file')]), (pick_ref, coregistration, - [('out', 'reference')]), - (coregistration, merge, - [('out_file', 'in_files')]), (merge, outputnode, - [('merged_file', - 'eddy_corrected')])]) - return pipeline - - -def fieldmap_correction(name='fieldmap_correction', nocheck=False): - """ - - .. deprecated:: 0.9.3 - Use :func:`nipype.workflows.dmri.preprocess.epi.sdc_fmb` instead. - - - Fieldmap-based retrospective correction of EPI images for the susceptibility distortion - artifact (Jezzard et al., 1995). Fieldmap images are assumed to be already registered - to EPI data, and a brain mask is required. - - Replaces the former workflow, still available as create_epidewarp_pipeline(). The difference - with respect the epidewarp pipeline is that now the workflow uses the new fsl_prepare_fieldmap - available as of FSL 5.0. - - - Example - ------- - - >>> nipype_epicorrect = fieldmap_correction('nipype_epidewarp') - >>> nipype_epicorrect.inputs.inputnode.in_file = 'diffusion.nii' - >>> nipype_epicorrect.inputs.inputnode.in_mask = 'brainmask.nii' - >>> nipype_epicorrect.inputs.inputnode.fieldmap_pha = 'phase.nii' - >>> nipype_epicorrect.inputs.inputnode.fieldmap_mag = 'magnitude.nii' - >>> nipype_epicorrect.inputs.inputnode.te_diff = 2.46 - >>> nipype_epicorrect.inputs.inputnode.epi_echospacing = 0.77 - >>> nipype_epicorrect.inputs.inputnode.encoding_direction = 'y' - >>> nipype_epicorrect.run() # doctest: +SKIP - - Inputs:: - - inputnode.in_file - The volume acquired with EPI sequence - inputnode.in_mask - A brain mask - inputnode.fieldmap_pha - The phase difference map from the fieldmapping, registered to in_file - inputnode.fieldmap_mag - The magnitud maps (usually 4D, one magnitude per GRE scan) - from the fieldmapping, registered to in_file - inputnode.te_diff - Time difference in msec. between TE in ms of the fieldmapping (usually a GRE sequence). - inputnode.epi_echospacing - The effective echo spacing (aka dwell time) in msec. of the EPI sequence. If - EPI was acquired with parallel imaging, then the effective echo spacing is - eff_es = es / acc_factor. - inputnode.encoding_direction - The phase encoding direction in EPI acquisition (default y) - inputnode.vsm_sigma - Sigma value of the gaussian smoothing filter applied to the vsm (voxel shift map) - - - Outputs:: - - outputnode.epi_corrected - outputnode.out_vsm - - """ - - warnings.warn(('This workflow is deprecated from v.1.0.0, use ' - 'nipype.workflows.dmri.preprocess.epi.sdc_fmb instead'), - DeprecationWarning) - - inputnode = pe.Node( - niu.IdentityInterface(fields=[ - 'in_file', 'in_mask', 'fieldmap_pha', 'fieldmap_mag', 'te_diff', - 'epi_echospacing', 'vsm_sigma', 'encoding_direction' - ]), - name='inputnode') - - pipeline = pe.Workflow(name=name) - - # Keep first frame from magnitude - select_mag = pe.Node( - fsl.utils.ExtractROI(t_size=1, t_min=0), name='select_magnitude') - - # Mask magnitude (it is required by PreparedFieldMap) - mask_mag = pe.Node(fsl.maths.ApplyMask(), name='mask_magnitude') - - # Run fsl_prepare_fieldmap - fslprep = pe.Node(fsl.PrepareFieldmap(), name='prepare_fieldmap') - - if nocheck: - fslprep.inputs.nocheck = True - - # Use FUGUE to generate the voxel shift map (vsm) - vsm = pe.Node(fsl.FUGUE(save_shift=True), name='generate_vsm') - - # VSM demean is not anymore present in the epi_reg script - # vsm_mean = pe.Node(niu.Function(input_names=['in_file', 'mask_file', 'in_unwarped'], output_names=[ - # 'out_file'], function=_vsm_remove_mean), name='vsm_mean_shift') - - # fugue_epi - dwi_split = pe.Node( - niu.Function( - input_names=['in_file'], - output_names=['out_files'], - function=_split_dwi), - name='dwi_split') - - # 'fugue -i %s -u %s --loadshift=%s --mask=%s' % ( vol_name, out_vol_name, vsm_name, mask_name ) - dwi_applyxfm = pe.MapNode( - fsl.FUGUE(icorr=True, save_shift=False), - iterfield=['in_file'], - name='dwi_fugue') - # Merge back all volumes - dwi_merge = pe.Node(fsl.utils.Merge(dimension='t'), name='dwi_merge') - - outputnode = pe.Node( - niu.IdentityInterface(fields=['epi_corrected', 'out_vsm']), - name='outputnode') - - pipeline.connect( - [(inputnode, select_mag, - [('fieldmap_mag', 'in_file')]), (inputnode, fslprep, [ - ('fieldmap_pha', 'in_phase'), ('te_diff', 'delta_TE') - ]), (inputnode, mask_mag, - [('in_mask', 'mask_file')]), (select_mag, mask_mag, - [('roi_file', 'in_file')]), - (mask_mag, fslprep, [('out_file', 'in_magnitude')]), (fslprep, vsm, [ - ('out_fieldmap', 'phasemap_in_file') - ]), (inputnode, - vsm, [('fieldmap_mag', - 'in_file'), ('encoding_direction', 'unwarp_direction'), - (('te_diff', _ms2sec), 'asym_se_time'), - ('vsm_sigma', 'smooth2d'), (('epi_echospacing', _ms2sec), - 'dwell_time')]), - (mask_mag, vsm, [('out_file', 'mask_file')]), (inputnode, dwi_split, [ - ('in_file', 'in_file') - ]), (dwi_split, dwi_applyxfm, - [('out_files', 'in_file')]), (mask_mag, dwi_applyxfm, - [('out_file', 'mask_file')]), - (vsm, dwi_applyxfm, - [('shift_out_file', 'shift_in_file')]), (inputnode, dwi_applyxfm, [ - ('encoding_direction', 'unwarp_direction') - ]), (dwi_applyxfm, dwi_merge, - [('unwarped_file', 'in_files')]), (dwi_merge, outputnode, [ - ('merged_file', 'epi_corrected') - ]), (vsm, outputnode, [('shift_out_file', 'out_vsm')])]) - - return pipeline - - -def topup_correction(name='topup_correction'): - """ - - .. deprecated:: 0.9.3 - Use :func:`nipype.workflows.dmri.preprocess.epi.sdc_peb` instead. - - - Corrects for susceptibilty distortion of EPI images when one reverse encoding dataset has - been acquired - - - Example - ------- - - >>> nipype_epicorrect = topup_correction('nipype_topup') - >>> nipype_epicorrect.inputs.inputnode.in_file_dir = 'epi.nii' - >>> nipype_epicorrect.inputs.inputnode.in_file_rev = 'epi_rev.nii' - >>> nipype_epicorrect.inputs.inputnode.encoding_direction = ['y', 'y-'] - >>> nipype_epicorrect.inputs.inputnode.ref_num = 0 - >>> nipype_epicorrect.run() # doctest: +SKIP - - - Inputs:: - - inputnode.in_file_dir - EPI volume acquired in 'forward' phase encoding - inputnode.in_file_rev - EPI volume acquired in 'reversed' phase encoding - inputnode.encoding_direction - Direction encoding of in_file_dir - inputnode.ref_num - Identifier of the reference volumes (usually B0 volume) - - - Outputs:: - - outputnode.epi_corrected - - - """ - - warnings.warn(('This workflow is deprecated from v.1.0.0, use ' - 'nipype.workflows.dmri.preprocess.epi.sdc_peb instead'), - DeprecationWarning) - - pipeline = pe.Workflow(name=name) - - inputnode = pe.Node( - niu.IdentityInterface(fields=[ - 'in_file_dir', 'in_file_rev', 'encoding_direction', - 'readout_times', 'ref_num' - ]), - name='inputnode') - - outputnode = pe.Node( - niu.IdentityInterface(fields=[ - 'out_fieldcoef', 'out_movpar', 'out_enc_file', 'epi_corrected' - ]), - name='outputnode') - - b0_dir = pe.Node(fsl.ExtractROI(t_size=1), name='b0_1') - b0_rev = pe.Node(fsl.ExtractROI(t_size=1), name='b0_2') - combin = pe.Node(niu.Merge(2), name='merge') - combin2 = pe.Node(niu.Merge(2), name='merge2') - merged = pe.Node(fsl.Merge(dimension='t'), name='b0_comb') - - topup = pe.Node(fsl.TOPUP(), name='topup') - applytopup = pe.Node(fsl.ApplyTOPUP(in_index=[1, 2]), name='applytopup') - - pipeline.connect( - [(inputnode, b0_dir, [('in_file_dir', 'in_file'), ('ref_num', - 't_min')]), - (inputnode, b0_rev, - [('in_file_rev', - 'in_file'), ('ref_num', 't_min')]), (inputnode, combin2, [ - ('in_file_dir', 'in1'), ('in_file_rev', 'in2') - ]), (b0_dir, combin, [('roi_file', 'in1')]), (b0_rev, combin, [ - ('roi_file', 'in2') - ]), (combin, merged, [('out', 'in_files')]), - (merged, topup, [('merged_file', 'in_file')]), (inputnode, topup, [ - ('encoding_direction', 'encoding_direction'), ('readout_times', - 'readout_times') - ]), (topup, applytopup, [('out_fieldcoef', 'in_topup_fieldcoef'), - ('out_movpar', 'in_topup_movpar'), - ('out_enc_file', 'encoding_file')]), - (combin2, applytopup, [('out', 'in_files')]), (topup, outputnode, [ - ('out_fieldcoef', 'out_fieldcoef'), ('out_movpar', 'out_movpar'), - ('out_enc_file', 'out_enc_file') - ]), (applytopup, outputnode, [('out_corrected', 'epi_corrected')])]) - - return pipeline - - -def create_epidewarp_pipeline(name='epidewarp', fieldmap_registration=False): - """ - Replaces the epidewarp.fsl script (http://www.nmr.mgh.harvard.edu/~greve/fbirn/b0/epidewarp.fsl) - for susceptibility distortion correction of dMRI & fMRI acquired with EPI sequences and the fieldmap - information (Jezzard et al., 1995) using FSL's FUGUE. The registration to the (warped) fieldmap - (strictly following the original script) is available using fieldmap_registration=True. - - - .. warning:: This workflow makes use of ``epidewarp.fsl`` a script of FSL deprecated long - time ago. The use of this workflow is not recommended, use - :func:`nipype.workflows.dmri.preprocess.epi.sdc_fmb` instead. - - - Example - ------- - - >>> nipype_epicorrect = create_epidewarp_pipeline('nipype_epidewarp', fieldmap_registration=False) - >>> nipype_epicorrect.inputs.inputnode.in_file = 'diffusion.nii' - >>> nipype_epicorrect.inputs.inputnode.fieldmap_mag = 'magnitude.nii' - >>> nipype_epicorrect.inputs.inputnode.fieldmap_pha = 'phase.nii' - >>> nipype_epicorrect.inputs.inputnode.te_diff = 2.46 - >>> nipype_epicorrect.inputs.inputnode.epi_echospacing = 0.77 - >>> nipype_epicorrect.inputs.inputnode.epi_rev_encoding = False - >>> nipype_epicorrect.inputs.inputnode.ref_num = 0 - >>> nipype_epicorrect.inputs.inputnode.pi_accel_factor = 1.0 - >>> nipype_epicorrect.run() # doctest: +SKIP - - Inputs:: - - inputnode.in_file - The volume acquired with EPI sequence - inputnode.fieldmap_mag - The magnitude of the fieldmap - inputnode.fieldmap_pha - The phase difference of the fieldmap - inputnode.te_diff - Time difference between TE in ms. - inputnode.epi_echospacing - The echo spacing (aka dwell time) in the EPI sequence - inputnode.epi_ph_encoding_dir - The phase encoding direction in EPI acquisition (default y) - inputnode.epi_rev_encoding - True if it is acquired with reverse encoding - inputnode.pi_accel_factor - Acceleration factor used for EPI parallel imaging (GRAPPA) - inputnode.vsm_sigma - Sigma value of the gaussian smoothing filter applied to the vsm (voxel shift map) - inputnode.ref_num - The reference volume (B=0 in dMRI or a central frame in fMRI) - - - Outputs:: - - outputnode.epi_corrected - - - Optional arguments:: - - fieldmap_registration - True if registration to fieldmap should be done (default False) - - """ - - warnings.warn(('This workflow reproduces a deprecated FSL script.'), - DeprecationWarning) - - inputnode = pe.Node( - niu.IdentityInterface(fields=[ - 'in_file', 'fieldmap_mag', 'fieldmap_pha', 'te_diff', - 'epi_echospacing', 'epi_ph_encoding_dir', 'epi_rev_encoding', - 'pi_accel_factor', 'vsm_sigma', 'ref_num', 'unwarp_direction' - ]), - name='inputnode') - - pipeline = pe.Workflow(name=name) - - # Keep first frame from magnitude - select_mag = pe.Node( - fsl.utils.ExtractROI(t_size=1, t_min=0), name='select_magnitude') - - # mask_brain - mask_mag = pe.Node(fsl.BET(mask=True), name='mask_magnitude') - mask_mag_dil = pe.Node( - niu.Function( - input_names=['in_file'], - output_names=['out_file'], - function=_dilate_mask), - name='mask_dilate') - - # Compute dwell time - dwell_time = pe.Node( - niu.Function( - input_names=['dwell_time', 'pi_factor', 'is_reverse_encoding'], - output_names=['dwell_time'], - function=_compute_dwelltime), - name='dwell_time') - - # Normalize phase diff to be [-pi, pi) - norm_pha = pe.Node( - niu.Function( - input_names=['in_file'], - output_names=['out_file'], - function=_prepare_phasediff), - name='normalize_phasediff') - # Execute FSL PRELUDE: prelude -p %s -a %s -o %s -f -v -m %s - prelude = pe.Node(fsl.PRELUDE(process3d=True), name='phase_unwrap') - fill_phase = pe.Node( - niu.Function( - input_names=['in_file'], - output_names=['out_file'], - function=_fill_phase), - name='fill_phasediff') - - # to assure that vsm is same dimension as mag. The input only affects the output dimension. - # The content of the input has no effect on the vsm. The de-warped mag volume is - # meaningless and will be thrown away - # fugue -i %s -u %s -p %s --dwell=%s --asym=%s --mask=%s --saveshift=%s % - # ( mag_name, magdw_name, ph_name, esp, tediff, mask_name, vsmmag_name) - vsm = pe.Node(fsl.FUGUE(save_shift=True), name='generate_vsm') - vsm_mean = pe.Node( - niu.Function( - input_names=['in_file', 'mask_file', 'in_unwarped'], - output_names=['out_file'], - function=_vsm_remove_mean), - name='vsm_mean_shift') - - # fugue_epi - dwi_split = pe.Node( - niu.Function( - input_names=['in_file'], - output_names=['out_files'], - function=_split_dwi), - name='dwi_split') - # 'fugue -i %s -u %s --loadshift=%s --mask=%s' % ( vol_name, out_vol_name, vsm_name, mask_name ) - dwi_applyxfm = pe.MapNode( - fsl.FUGUE(icorr=True, save_shift=False), - iterfield=['in_file'], - name='dwi_fugue') - # Merge back all volumes - dwi_merge = pe.Node(fsl.utils.Merge(dimension='t'), name='dwi_merge') - - outputnode = pe.Node( - niu.IdentityInterface(fields=['epi_corrected']), name='outputnode') - - pipeline.connect( - [(inputnode, dwell_time, - [('epi_echospacing', 'dwell_time'), ('pi_accel_factor', 'pi_factor'), - ('epi_rev_encoding', - 'is_reverse_encoding')]), (inputnode, select_mag, [('fieldmap_mag', - 'in_file')]), - (inputnode, norm_pha, [('fieldmap_pha', - 'in_file')]), (select_mag, mask_mag, - [('roi_file', 'in_file')]), - (mask_mag, mask_mag_dil, - [('mask_file', 'in_file')]), (select_mag, prelude, [ - ('roi_file', 'magnitude_file') - ]), (norm_pha, prelude, - [('out_file', 'phase_file')]), (mask_mag_dil, prelude, [ - ('out_file', 'mask_file') - ]), (prelude, fill_phase, - [('unwrapped_phase_file', 'in_file')]), (inputnode, vsm, [ - ('fieldmap_mag', 'in_file') - ]), (fill_phase, vsm, [('out_file', 'phasemap_in_file')]), - (inputnode, vsm, [(('te_diff', _ms2sec), 'asym_se_time'), - ('vsm_sigma', 'smooth2d')]), (dwell_time, vsm, [ - (('dwell_time', _ms2sec), 'dwell_time') - ]), (mask_mag_dil, vsm, [('out_file', - 'mask_file')]), - (mask_mag_dil, vsm_mean, - [('out_file', 'mask_file')]), (vsm, vsm_mean, [ - ('unwarped_file', 'in_unwarped'), ('shift_out_file', 'in_file') - ]), (inputnode, dwi_split, - [('in_file', 'in_file')]), (dwi_split, dwi_applyxfm, [ - ('out_files', 'in_file') - ]), (dwi_applyxfm, dwi_merge, - [('unwarped_file', 'in_files')]), (dwi_merge, outputnode, - [('merged_file', - 'epi_corrected')])]) - - if fieldmap_registration: - """ Register magfw to example epi. There are some parameters here that may need to be tweaked. Should probably strip the mag - Pre-condition: forward warp the mag in order to reg with func. What does mask do here? - """ - # Select reference volume from EPI (B0 in dMRI and a middle frame in - # fMRI) - select_epi = pe.Node(fsl.utils.ExtractROI(t_size=1), name='select_epi') - - # fugue -i %s -w %s --loadshift=%s --mask=%s % ( mag_name, magfw_name, - # vsmmag_name, mask_name ), log ) # Forward Map - vsm_fwd = pe.Node(fsl.FUGUE(forward_warping=True), name='vsm_fwd') - vsm_reg = pe.Node( - fsl.FLIRT( - bins=256, - cost='corratio', - dof=6, - interp='spline', - searchr_x=[-10, 10], - searchr_y=[-10, 10], - searchr_z=[-10, 10]), - name='vsm_registration') - # 'flirt -in %s -ref %s -out %s -init %s -applyxfm' % ( vsmmag_name, ref_epi, vsmmag_name, magfw_mat_out ) - vsm_applyxfm = pe.Node( - fsl.ApplyXfm(interp='spline'), name='vsm_apply_xfm') - # 'flirt -in %s -ref %s -out %s -init %s -applyxfm' % ( mask_name, ref_epi, mask_name, magfw_mat_out ) - msk_applyxfm = pe.Node( - fsl.ApplyXfm(interp='nearestneighbour'), name='msk_apply_xfm') - - pipeline.connect( - [(inputnode, select_epi, - [('in_file', 'in_file'), - ('ref_num', 't_min')]), (select_epi, vsm_reg, [('roi_file', - 'reference')]), - (vsm, vsm_fwd, [('shift_out_file', 'shift_in_file')]), - (mask_mag_dil, vsm_fwd, - [('out_file', 'mask_file')]), (inputnode, vsm_fwd, [ - ('fieldmap_mag', 'in_file') - ]), (vsm_fwd, vsm_reg, - [('warped_file', 'in_file')]), (vsm_reg, msk_applyxfm, [ - ('out_matrix_file', 'in_matrix_file') - ]), (select_epi, msk_applyxfm, [('roi_file', 'reference')]), - (mask_mag_dil, msk_applyxfm, - [('out_file', 'in_file')]), (vsm_reg, vsm_applyxfm, [ - ('out_matrix_file', 'in_matrix_file') - ]), (select_epi, vsm_applyxfm, - [('roi_file', 'reference')]), (vsm_mean, vsm_applyxfm, - [('out_file', 'in_file')]), - (msk_applyxfm, dwi_applyxfm, - [('out_file', 'mask_file')]), (vsm_applyxfm, dwi_applyxfm, - [('out_file', 'shift_in_file')])]) - else: - pipeline.connect( - [(mask_mag_dil, dwi_applyxfm, [('out_file', 'mask_file')]), - (vsm_mean, dwi_applyxfm, [('out_file', 'shift_in_file')])]) - - return pipeline - - -def _rotate_bvecs(in_bvec, in_matrix): - import os - import numpy as np - - name, fext = os.path.splitext(os.path.basename(in_bvec)) - if fext == '.gz': - name, _ = os.path.splitext(name) - out_file = os.path.abspath('./%s_rotated.bvec' % name) - bvecs = np.loadtxt(in_bvec) - new_bvecs = np.zeros( - shape=bvecs.T.shape) # pre-initialise array, 3 col format - - for i, vol_matrix in enumerate(in_matrix[0::]): # start index at 0 - bvec = np.matrix(bvecs[:, i]) - rot = np.matrix(np.loadtxt(vol_matrix)[0:3, 0:3]) - new_bvecs[i] = (np.array( - rot * bvec.T).T)[0] # fill each volume with x,y,z as we go along - np.savetxt(out_file, np.array(new_bvecs).T, fmt=b'%0.15f') - return out_file - - -def _cat_logs(in_files): - import shutil - import os - - name, fext = os.path.splitext(os.path.basename(in_files[0])) - if fext == '.gz': - name, _ = os.path.splitext(name) - out_file = os.path.abspath('./%s_ecclog.log' % name) - with open(out_file, 'wb') as totallog: - for i, fname in enumerate(in_files): - totallog.write('\n\npreprocessing %d\n' % i) - with open(fname) as inlog: - for line in inlog: - totallog.write(line) - return out_file - - -def _compute_dwelltime(dwell_time=0.68, - pi_factor=1.0, - is_reverse_encoding=False): - dwell_time *= (1.0 / pi_factor) - - if is_reverse_encoding: - dwell_time *= -1.0 - - return dwell_time - - -def _effective_echospacing(dwell_time, pi_factor=1.0): - dwelltime = 1.0e-3 * dwell_time * (1.0 / pi_factor) - return dwelltime - - -def _prepare_phasediff(in_file): - import nibabel as nb - import os - import numpy as np - from nipype.utils import NUMPY_MMAP - img = nb.load(in_file, mmap=NUMPY_MMAP) - max_diff = np.max(img.get_data().reshape(-1)) - min_diff = np.min(img.get_data().reshape(-1)) - A = (2.0 * np.pi) / (max_diff - min_diff) - B = np.pi - (A * max_diff) - diff_norm = img.get_data() * A + B - - name, fext = os.path.splitext(os.path.basename(in_file)) - if fext == '.gz': - name, _ = os.path.splitext(name) - out_file = os.path.abspath('./%s_2pi.nii.gz' % name) - nb.save(nb.Nifti1Image(diff_norm, img.affine, img.header), out_file) - return out_file - - -def _dilate_mask(in_file, iterations=4): - import nibabel as nb - import scipy.ndimage as ndimage - import os - from nipype.utils import NUMPY_MMAP - img = nb.load(in_file, mmap=NUMPY_MMAP) - dilated_img = img.__class__( - ndimage.binary_dilation(img.get_data(), iterations=iterations), - img.affine, img.header) - - name, fext = os.path.splitext(os.path.basename(in_file)) - if fext == '.gz': - name, _ = os.path.splitext(name) - out_file = os.path.abspath('./%s_dil.nii.gz' % name) - nb.save(dilated_img, out_file) - return out_file - - -def _fill_phase(in_file): - import nibabel as nb - import os - import numpy as np - from nipype.utils import NUMPY_MMAP - img = nb.load(in_file, mmap=NUMPY_MMAP) - dumb_img = nb.Nifti1Image(np.zeros(img.shape), img.affine, img.header) - out_nii = nb.funcs.concat_images((img, dumb_img)) - name, fext = os.path.splitext(os.path.basename(in_file)) - if fext == '.gz': - name, _ = os.path.splitext(name) - out_file = os.path.abspath('./%s_fill.nii.gz' % name) - nb.save(out_nii, out_file) - return out_file - - -def _vsm_remove_mean(in_file, mask_file, in_unwarped): - import nibabel as nb - import os - import numpy as np - import numpy.ma as ma - from nipype.utils import NUMPY_MMAP - img = nb.load(in_file, mmap=NUMPY_MMAP) - msk = nb.load(mask_file, mmap=NUMPY_MMAP).get_data() - img_data = img.get_data() - img_data[msk == 0] = 0 - vsmmag_masked = ma.masked_values(img_data.reshape(-1), 0.0) - vsmmag_masked = vsmmag_masked - vsmmag_masked.mean() - masked_img = img.__class__( - vsmmag_masked.reshape(img.shape), img.affine, img.header) - name, fext = os.path.splitext(os.path.basename(in_file)) - if fext == '.gz': - name, _ = os.path.splitext(name) - out_file = os.path.abspath('./%s_demeaned.nii.gz' % name) - nb.save(masked_img, out_file) - return out_file - - -def _ms2sec(val): - return val * 1e-3 - - -def _split_dwi(in_file): - import nibabel as nb - import os - from nipype.utils import NUMPY_MMAP - out_files = [] - frames = nb.funcs.four_to_three(nb.load(in_file, mmap=NUMPY_MMAP)) - name, fext = os.path.splitext(os.path.basename(in_file)) - if fext == '.gz': - name, _ = os.path.splitext(name) - for i, frame in enumerate(frames): - out_file = os.path.abspath('./%s_%03d.nii.gz' % (name, i)) - nb.save(frame, out_file) - out_files.append(out_file) - return out_files diff --git a/nipype/workflows/dmri/fsl/tbss.py b/nipype/workflows/dmri/fsl/tbss.py deleted file mode 100644 index 3aef3e734a..0000000000 --- a/nipype/workflows/dmri/fsl/tbss.py +++ /dev/null @@ -1,590 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: - -import os -from warnings import warn - -from ....pipeline import engine as pe -from ....interfaces import utility as util -from ....interfaces import fsl as fsl - - -def tbss1_op_string(in_files): - import nibabel as nb - from nipype.utils import NUMPY_MMAP - op_strings = [] - for infile in in_files: - img = nb.load(infile, mmap=NUMPY_MMAP) - dimtup = tuple(d - 2 for d in img.shape) - dimtup = dimtup[0:3] - op_str = '-min 1 -ero -roi 1 %d 1 %d 1 %d 0 1' % dimtup - op_strings.append(op_str) - return op_strings - - -def create_tbss_1_preproc(name='tbss_1_preproc'): - """Preprocess FA data for TBSS: erodes a little and zero end slicers and - creates masks(for use in FLIRT & FNIRT from FSL). - A pipeline that does the same as tbss_1_preproc script in FSL - - Example - ------- - - >>> from nipype.workflows.dmri.fsl import tbss - >>> tbss1 = tbss.create_tbss_1_preproc() - >>> tbss1.inputs.inputnode.fa_list = ['s1_FA.nii', 's2_FA.nii', 's3_FA.nii'] - - Inputs:: - - inputnode.fa_list - - Outputs:: - - outputnode.fa_list - outputnode.mask_list - outputnode.slices - - """ - - # Define the inputnode - inputnode = pe.Node( - interface=util.IdentityInterface(fields=["fa_list"]), name="inputnode") - - # Prep the FA images - prepfa = pe.MapNode( - fsl.ImageMaths(suffix="_prep"), - name="prepfa", - iterfield=['in_file', 'op_string']) - - # Slicer - slicer = pe.MapNode( - fsl.Slicer(all_axial=True, image_width=1280), - name='slicer', - iterfield=['in_file']) - - # Create a mask - getmask1 = pe.MapNode( - fsl.ImageMaths(op_string="-bin", suffix="_mask"), - name="getmask1", - iterfield=['in_file']) - getmask2 = pe.MapNode( - fsl.MultiImageMaths(op_string="-dilD -dilD -sub 1 -abs -add %s"), - name="getmask2", - iterfield=['in_file', 'operand_files']) - - # $FSLDIR/bin/fslmaths FA/${f}_FA_mask -dilD -dilD -sub 1 -abs -add FA/${f}_FA_mask FA/${f}_FA_mask -odt char - # Define the tbss1 workflow - tbss1 = pe.Workflow(name=name) - tbss1.connect([ - (inputnode, prepfa, [("fa_list", "in_file")]), - (inputnode, prepfa, [(("fa_list", tbss1_op_string), "op_string")]), - (prepfa, getmask1, [("out_file", "in_file")]), - (getmask1, getmask2, [("out_file", "in_file"), ("out_file", - "operand_files")]), - (prepfa, slicer, [('out_file', 'in_file')]), - ]) - - # Define the outputnode - outputnode = pe.Node( - interface=util.IdentityInterface( - fields=["fa_list", "mask_list", "slices"]), - name="outputnode") - tbss1.connect([(prepfa, outputnode, [("out_file", "fa_list")]), - (getmask2, outputnode, [("out_file", "mask_list")]), - (slicer, outputnode, [('out_file', 'slices')])]) - return tbss1 - - -def create_tbss_2_reg(name="tbss_2_reg"): - """TBSS nonlinear registration: - A pipeline that does the same as 'tbss_2_reg -t' script in FSL. '-n' option - is not supported at the moment. - - Example - ------- - - >>> from nipype.workflows.dmri.fsl import tbss - >>> tbss2 = create_tbss_2_reg(name="tbss2") - >>> tbss2.inputs.inputnode.target = fsl.Info.standard_image("FMRIB58_FA_1mm.nii.gz") # doctest: +SKIP - >>> tbss2.inputs.inputnode.fa_list = ['s1_FA.nii', 's2_FA.nii', 's3_FA.nii'] - >>> tbss2.inputs.inputnode.mask_list = ['s1_mask.nii', 's2_mask.nii', 's3_mask.nii'] - - Inputs:: - - inputnode.fa_list - inputnode.mask_list - inputnode.target - - Outputs:: - - outputnode.field_list - - """ - - # Define the inputnode - inputnode = pe.Node( - interface=util.IdentityInterface( - fields=["fa_list", "mask_list", "target"]), - name="inputnode") - - # Flirt the FA image to the target - flirt = pe.MapNode( - interface=fsl.FLIRT(dof=12), - iterfield=['in_file', 'in_weight'], - name="flirt") - - fnirt = pe.MapNode( - interface=fsl.FNIRT(fieldcoeff_file=True), - iterfield=['in_file', 'inmask_file', 'affine_file'], - name="fnirt") - # Fnirt the FA image to the target - if fsl.no_fsl(): - warn('NO FSL found') - else: - config_file = os.path.join(os.environ["FSLDIR"], - "etc/flirtsch/FA_2_FMRIB58_1mm.cnf") - fnirt.inputs.config_file = config_file - - # Define the registration workflow - tbss2 = pe.Workflow(name=name) - - # Connect up the registration workflow - tbss2.connect([ - (inputnode, flirt, [("fa_list", "in_file"), ("target", "reference"), - ("mask_list", "in_weight")]), - (inputnode, fnirt, [("fa_list", "in_file"), - ("mask_list", "inmask_file"), ("target", - "ref_file")]), - (flirt, fnirt, [("out_matrix_file", "affine_file")]), - ]) - - # Define the outputnode - outputnode = pe.Node( - interface=util.IdentityInterface(fields=['field_list']), - name="outputnode") - - tbss2.connect([(fnirt, outputnode, [('fieldcoeff_file', 'field_list')])]) - return tbss2 - - -def create_tbss_3_postreg(name='tbss_3_postreg', estimate_skeleton=True): - """Post-registration processing: derive mean_FA and mean_FA_skeleton from - mean of all subjects in study. Target is assumed to be FMRIB58_FA_1mm. - A pipeline that does the same as 'tbss_3_postreg -S' script from FSL - Setting 'estimate_skeleton to False will use precomputed FMRIB58_FA-skeleton_1mm - skeleton (same as 'tbss_3_postreg -T'). - - Example - ------- - - >>> from nipype.workflows.dmri.fsl import tbss - >>> tbss3 = tbss.create_tbss_3_postreg() - >>> tbss3.inputs.inputnode.fa_list = ['s1_wrapped_FA.nii', 's2_wrapped_FA.nii', 's3_wrapped_FA.nii'] - - Inputs:: - - inputnode.field_list - inputnode.fa_list - - Outputs:: - - outputnode.groupmask - outputnode.skeleton_file - outputnode.meanfa_file - outputnode.mergefa_file - - """ - - # Create the inputnode - inputnode = pe.Node( - interface=util.IdentityInterface(fields=['field_list', 'fa_list']), - name='inputnode') - - # Apply the warpfield to the masked FA image - applywarp = pe.MapNode( - interface=fsl.ApplyWarp(), - iterfield=['in_file', 'field_file'], - name="applywarp") - if fsl.no_fsl(): - warn('NO FSL found') - else: - applywarp.inputs.ref_file = fsl.Info.standard_image( - "FMRIB58_FA_1mm.nii.gz") - - # Merge the FA files into a 4D file - mergefa = pe.Node(fsl.Merge(dimension="t"), name="mergefa") - - # Get a group mask - groupmask = pe.Node( - fsl.ImageMaths( - op_string="-max 0 -Tmin -bin", - out_data_type="char", - suffix="_mask"), - name="groupmask") - - maskgroup = pe.Node( - fsl.ImageMaths(op_string="-mas", suffix="_masked"), name="maskgroup") - - tbss3 = pe.Workflow(name=name) - tbss3.connect([ - (inputnode, applywarp, [("fa_list", "in_file"), ("field_list", - "field_file")]), - (applywarp, mergefa, [("out_file", "in_files")]), - (mergefa, groupmask, [("merged_file", "in_file")]), - (mergefa, maskgroup, [("merged_file", "in_file")]), - (groupmask, maskgroup, [("out_file", "in_file2")]), - ]) - - # Create outputnode - outputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'groupmask', 'skeleton_file', 'meanfa_file', 'mergefa_file' - ]), - name='outputnode') - - if estimate_skeleton: - # Take the mean over the fourth dimension - meanfa = pe.Node( - fsl.ImageMaths(op_string="-Tmean", suffix="_mean"), name="meanfa") - - # Use the mean FA volume to generate a tract skeleton - makeskeleton = pe.Node( - fsl.TractSkeleton(skeleton_file=True), name="makeskeleton") - tbss3.connect( - [(maskgroup, meanfa, [("out_file", "in_file")]), - (meanfa, makeskeleton, - [("out_file", "in_file")]), (groupmask, outputnode, - [('out_file', 'groupmask')]), - (makeskeleton, outputnode, - [('skeleton_file', 'skeleton_file')]), (meanfa, outputnode, [ - ('out_file', 'meanfa_file') - ]), (maskgroup, outputnode, [('out_file', 'mergefa_file')])]) - else: - # $FSLDIR/bin/fslmaths $FSLDIR/data/standard/FMRIB58_FA_1mm -mas mean_FA_mask mean_FA - maskstd = pe.Node( - fsl.ImageMaths(op_string="-mas", suffix="_masked"), name="maskstd") - if fsl.no_fsl(): - warn('NO FSL found') - else: - maskstd.inputs.in_file = fsl.Info.standard_image( - "FMRIB58_FA_1mm.nii.gz") - - # $FSLDIR/bin/fslmaths mean_FA -bin mean_FA_mask - binmaskstd = pe.Node( - fsl.ImageMaths(op_string="-bin"), name="binmaskstd") - - # $FSLDIR/bin/fslmaths all_FA -mas mean_FA_mask all_FA - maskgroup2 = pe.Node( - fsl.ImageMaths(op_string="-mas", suffix="_masked"), - name="maskgroup2") - - tbss3.connect([(groupmask, maskstd, [("out_file", "in_file2")]), - (maskstd, binmaskstd, [("out_file", "in_file")]), - (maskgroup, maskgroup2, [("out_file", "in_file")]), - (binmaskstd, maskgroup2, [("out_file", "in_file2")])]) - - if fsl.no_fsl(): - warn('NO FSL found') - else: - outputnode.inputs.skeleton_file = fsl.Info.standard_image( - "FMRIB58_FA-skeleton_1mm.nii.gz") - tbss3.connect([(binmaskstd, outputnode, [('out_file', 'groupmask')]), - (maskstd, outputnode, [('out_file', 'meanfa_file')]), - (maskgroup2, outputnode, [('out_file', - 'mergefa_file')])]) - return tbss3 - - -def tbss4_op_string(skeleton_thresh): - op_string = "-thr %.1f -bin" % skeleton_thresh - return op_string - - -def create_tbss_4_prestats(name='tbss_4_prestats'): - """Post-registration processing:Creating skeleton mask using a threshold - projecting all FA data onto skeleton. - A pipeline that does the same as tbss_4_prestats script from FSL - - Example - ------- - - >>> from nipype.workflows.dmri.fsl import tbss - >>> tbss4 = tbss.create_tbss_4_prestats(name='tbss4') - >>> tbss4.inputs.inputnode.skeleton_thresh = 0.2 - - Inputs:: - - inputnode.skeleton_thresh - inputnode.groupmask - inputnode.skeleton_file - inputnode.meanfa_file - inputnode.mergefa_file - - Outputs:: - - outputnode.all_FA_skeletonised - outputnode.mean_FA_skeleton_mask - outputnode.distance_map - outputnode.skeleton_file - - """ - # Create inputnode - inputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'groupmask', 'skeleton_file', 'meanfa_file', 'mergefa_file', - 'skeleton_thresh' - ]), - name='inputnode') - - # Mask the skeleton at the threshold - skeletonmask = pe.Node(fsl.ImageMaths(suffix="_mask"), name="skeletonmask") - - # Invert the brainmask then add in the tract skeleton - invertmask = pe.Node( - fsl.ImageMaths(suffix="_inv", op_string="-mul -1 -add 1 -add"), - name="invertmask") - - # Generate a distance map with the tract skeleton - distancemap = pe.Node(fsl.DistanceMap(), name="distancemap") - - # Project the FA values onto the skeleton - projectfa = pe.Node( - fsl.TractSkeleton( - project_data=True, skeleton_file=True, use_cingulum_mask=True), - name="projectfa") - - # Create tbss4 workflow - tbss4 = pe.Workflow(name=name) - tbss4.connect([ - (inputnode, invertmask, [("groupmask", "in_file")]), - (inputnode, skeletonmask, [("skeleton_file", "in_file"), - (('skeleton_thresh', tbss4_op_string), - 'op_string')]), - (inputnode, projectfa, [('skeleton_thresh', 'threshold'), - ("meanfa_file", "in_file"), ("mergefa_file", - "data_file")]), - (skeletonmask, invertmask, [("out_file", "in_file2")]), - (invertmask, distancemap, [("out_file", "in_file")]), - (distancemap, projectfa, [("distance_map", "distance_map")]), - ]) - - # Create the outputnode - outputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'projectedfa_file', 'skeleton_mask', 'distance_map', - 'skeleton_file' - ]), - name='outputnode') - - tbss4.connect( - [(projectfa, outputnode, [('projected_data', 'projectedfa_file'), - ('skeleton_file', 'skeleton_file')]), - (distancemap, outputnode, [('distance_map', 'distance_map')]), - (skeletonmask, outputnode, [('out_file', 'skeleton_mask')])]) - - return tbss4 - - -def create_tbss_all(name='tbss_all', estimate_skeleton=True): - """Create a pipeline that combines create_tbss_* pipelines - - Example - ------- - - >>> from nipype.workflows.dmri.fsl import tbss - >>> tbss_wf = tbss.create_tbss_all('tbss', estimate_skeleton=True) - >>> tbss_wf.inputs.inputnode.skeleton_thresh = 0.2 - >>> tbss_wf.inputs.inputnode.fa_list = ['s1_wrapped_FA.nii', 's2_wrapped_FA.nii', 's3_wrapped_FA.nii'] - - >>> tbss_wf = tbss.create_tbss_all('tbss', estimate_skeleton=False) - >>> tbss_wf.inputs.inputnode.skeleton_thresh = 0.2 - >>> tbss_wf.inputs.inputnode.fa_list = ['s1_wrapped_FA.nii', 's2_wrapped_FA.nii', 's3_wrapped_FA.nii'] - - - Inputs:: - - inputnode.fa_list - inputnode.skeleton_thresh - - Outputs:: - - outputnode.meanfa_file - outputnode.projectedfa_file - outputnode.skeleton_file - outputnode.skeleton_mask - - """ - - # Define the inputnode - inputnode = pe.Node( - interface=util.IdentityInterface( - fields=['fa_list', 'skeleton_thresh']), - name='inputnode') - - tbss1 = create_tbss_1_preproc(name='tbss1') - tbss2 = create_tbss_2_reg(name='tbss2') - if fsl.no_fsl(): - warn('NO FSL found') - else: - tbss2.inputs.inputnode.target = fsl.Info.standard_image( - "FMRIB58_FA_1mm.nii.gz") - tbss3 = create_tbss_3_postreg( - name='tbss3', estimate_skeleton=estimate_skeleton) - tbss4 = create_tbss_4_prestats(name='tbss4') - - tbss_all = pe.Workflow(name=name) - tbss_all.connect( - [(inputnode, tbss1, [('fa_list', 'inputnode.fa_list')]), - (inputnode, tbss4, - [('skeleton_thresh', 'inputnode.skeleton_thresh')]), (tbss1, tbss2, [ - ('outputnode.fa_list', 'inputnode.fa_list'), - ('outputnode.mask_list', 'inputnode.mask_list') - ]), (tbss1, tbss3, [('outputnode.fa_list', 'inputnode.fa_list')]), - (tbss2, tbss3, [('outputnode.field_list', 'inputnode.field_list')]), - (tbss3, tbss4, [('outputnode.groupmask', - 'inputnode.groupmask'), ('outputnode.skeleton_file', - 'inputnode.skeleton_file'), - ('outputnode.meanfa_file', 'inputnode.meanfa_file'), - ('outputnode.mergefa_file', - 'inputnode.mergefa_file')])]) - - # Define the outputnode - outputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'groupmask', 'skeleton_file3', 'meanfa_file', 'mergefa_file', - 'projectedfa_file', 'skeleton_file4', 'skeleton_mask', - 'distance_map' - ]), - name='outputnode') - outputall_node = pe.Node( - interface=util.IdentityInterface(fields=[ - 'fa_list1', 'mask_list1', 'field_list2', 'groupmask3', - 'skeleton_file3', 'meanfa_file3', 'mergefa_file3', - 'projectedfa_file4', 'skeleton_mask4', 'distance_map4' - ]), - name='outputall_node') - - tbss_all.connect([ - (tbss3, outputnode, [ - ('outputnode.meanfa_file', 'meanfa_file'), - ('outputnode.mergefa_file', 'mergefa_file'), - ('outputnode.groupmask', 'groupmask'), - ('outputnode.skeleton_file', 'skeleton_file3'), - ]), - (tbss4, outputnode, [ - ('outputnode.projectedfa_file', 'projectedfa_file'), - ('outputnode.skeleton_file', 'skeleton_file4'), - ('outputnode.skeleton_mask', 'skeleton_mask'), - ('outputnode.distance_map', 'distance_map'), - ]), - (tbss1, outputall_node, [ - ('outputnode.fa_list', 'fa_list1'), - ('outputnode.mask_list', 'mask_list1'), - ]), - (tbss2, outputall_node, [ - ('outputnode.field_list', 'field_list2'), - ]), - (tbss3, outputall_node, [ - ('outputnode.meanfa_file', 'meanfa_file3'), - ('outputnode.mergefa_file', 'mergefa_file3'), - ('outputnode.groupmask', 'groupmask3'), - ('outputnode.skeleton_file', 'skeleton_file3'), - ]), - (tbss4, outputall_node, [ - ('outputnode.projectedfa_file', 'projectedfa_file4'), - ('outputnode.skeleton_mask', 'skeleton_mask4'), - ('outputnode.distance_map', 'distance_map4'), - ]), - ]) - return tbss_all - - -def create_tbss_non_FA(name='tbss_non_FA'): - """ - A pipeline that implement tbss_non_FA in FSL - - Example - ------- - - >>> from nipype.workflows.dmri.fsl import tbss - >>> tbss_MD = tbss.create_tbss_non_FA() - >>> tbss_MD.inputs.inputnode.file_list = [] - >>> tbss_MD.inputs.inputnode.field_list = [] - >>> tbss_MD.inputs.inputnode.skeleton_thresh = 0.2 - >>> tbss_MD.inputs.inputnode.groupmask = './xxx' - >>> tbss_MD.inputs.inputnode.meanfa_file = './xxx' - >>> tbss_MD.inputs.inputnode.distance_map = [] - >>> tbss_MD.inputs.inputnode.all_FA_file = './xxx' - - Inputs:: - - inputnode.file_list - inputnode.field_list - inputnode.skeleton_thresh - inputnode.groupmask - inputnode.meanfa_file - inputnode.distance_map - inputnode.all_FA_file - - Outputs:: - - outputnode.projected_nonFA_file - - """ - - # Define the inputnode - inputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'file_list', 'field_list', 'skeleton_thresh', 'groupmask', - 'meanfa_file', 'distance_map', 'all_FA_file' - ]), - name='inputnode') - - # Apply the warpfield to the non FA image - applywarp = pe.MapNode( - interface=fsl.ApplyWarp(), - iterfield=['in_file', 'field_file'], - name="applywarp") - if fsl.no_fsl(): - warn('NO FSL found') - else: - applywarp.inputs.ref_file = fsl.Info.standard_image( - "FMRIB58_FA_1mm.nii.gz") - # Merge the non FA files into a 4D file - merge = pe.Node(fsl.Merge(dimension="t"), name="merge") - # merged_file="all_FA.nii.gz" - maskgroup = pe.Node( - fsl.ImageMaths(op_string="-mas", suffix="_masked"), name="maskgroup") - projectfa = pe.Node( - fsl.TractSkeleton( - project_data=True, - # projected_data = 'test.nii.gz', - use_cingulum_mask=True), - name="projectfa") - - tbss_non_FA = pe.Workflow(name=name) - tbss_non_FA.connect([ - (inputnode, applywarp, [ - ('file_list', 'in_file'), - ('field_list', 'field_file'), - ]), - (applywarp, merge, [("out_file", "in_files")]), - (merge, maskgroup, [("merged_file", "in_file")]), - (inputnode, maskgroup, [('groupmask', 'in_file2')]), - (maskgroup, projectfa, [('out_file', 'alt_data_file')]), - (inputnode, projectfa, - [('skeleton_thresh', 'threshold'), ("meanfa_file", "in_file"), - ("distance_map", "distance_map"), ("all_FA_file", 'data_file')]), - ]) - - # Define the outputnode - outputnode = pe.Node( - interface=util.IdentityInterface(fields=['projected_nonFA_file']), - name='outputnode') - tbss_non_FA.connect([ - (projectfa, outputnode, [ - ('projected_data', 'projected_nonFA_file'), - ]), - ]) - return tbss_non_FA diff --git a/nipype/workflows/dmri/fsl/tests/__init__.py b/nipype/workflows/dmri/fsl/tests/__init__.py deleted file mode 100644 index 99fb243f19..0000000000 --- a/nipype/workflows/dmri/fsl/tests/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: diff --git a/nipype/workflows/dmri/fsl/tests/test_dti.py b/nipype/workflows/dmri/fsl/tests/test_dti.py deleted file mode 100644 index 23cd8f37d8..0000000000 --- a/nipype/workflows/dmri/fsl/tests/test_dti.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function, absolute_import -import os - -import pytest -import nipype.interfaces.fsl as fsl -import nipype.interfaces.utility as util -from nipype.interfaces.fsl import no_fsl, no_fsl_course_data - -import nipype.pipeline.engine as pe -import warnings -from nipype.workflows.dmri.fsl.dti import create_bedpostx_pipeline -from nipype.utils.filemanip import simplify_list - - -@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") -@pytest.mark.skipif(no_fsl_course_data(), reason="fsl data not available") -def test_create_bedpostx_pipeline(tmpdir): - fsl_course_dir = os.path.abspath(os.environ['FSL_COURSE_DATA']) - - mask_file = os.path.join(fsl_course_dir, - "fdt2/subj1.bedpostX/nodif_brain_mask.nii.gz") - bvecs_file = os.path.join(fsl_course_dir, "fdt2/subj1/bvecs") - bvals_file = os.path.join(fsl_course_dir, "fdt2/subj1/bvals") - dwi_file = os.path.join(fsl_course_dir, "fdt2/subj1/data.nii.gz") - z_min = 62 - z_size = 2 - - slice_mask = pe.Node( - fsl.ExtractROI( - x_min=0, x_size=-1, y_min=0, y_size=-1, z_min=z_min, - z_size=z_size), - name="slice_mask") - slice_mask.inputs.in_file = mask_file - - slice_dwi = pe.Node( - fsl.ExtractROI( - x_min=0, x_size=-1, y_min=0, y_size=-1, z_min=z_min, - z_size=z_size), - name="slice_dwi") - slice_dwi.inputs.in_file = dwi_file - - nipype_bedpostx = create_bedpostx_pipeline("nipype_bedpostx") - nipype_bedpostx.inputs.inputnode.bvecs = bvecs_file - nipype_bedpostx.inputs.inputnode.bvals = bvals_file - nipype_bedpostx.inputs.xfibres.n_fibres = 1 - nipype_bedpostx.inputs.xfibres.fudge = 1 - nipype_bedpostx.inputs.xfibres.burn_in = 0 - nipype_bedpostx.inputs.xfibres.n_jumps = 1 - nipype_bedpostx.inputs.xfibres.sample_every = 1 - nipype_bedpostx.inputs.xfibres.cnlinear = True - nipype_bedpostx.inputs.xfibres.seed = 0 - nipype_bedpostx.inputs.xfibres.model = 2 - - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - original_bedpostx = pe.Node( - interface=fsl.BEDPOSTX(), name="original_bedpostx") - original_bedpostx.inputs.bvecs = bvecs_file - original_bedpostx.inputs.bvals = bvals_file - original_bedpostx.inputs.environ['FSLPARALLEL'] = "" - original_bedpostx.inputs.n_fibres = 1 - original_bedpostx.inputs.fudge = 1 - original_bedpostx.inputs.burn_in = 0 - original_bedpostx.inputs.n_jumps = 1 - original_bedpostx.inputs.sample_every = 1 - original_bedpostx.inputs.seed = 0 - original_bedpostx.inputs.model = 2 - - test_f1 = pe.Node(util.AssertEqual(), name="mean_f1_test") - - pipeline = pe.Workflow(name="test_bedpostx") - pipeline.base_dir = tmpdir.mkdir("nipype_test_bedpostx_").strpath - - pipeline.connect([ - (slice_mask, original_bedpostx, [("roi_file", "mask")]), - (slice_mask, nipype_bedpostx, [("roi_file", "inputnode.mask")]), - (slice_dwi, original_bedpostx, [("roi_file", "dwi")]), - (slice_dwi, nipype_bedpostx, [("roi_file", "inputnode.dwi")]), - (nipype_bedpostx, test_f1, [(("outputnode.mean_fsamples", - simplify_list), "volume1")]), - (original_bedpostx, test_f1, [("mean_fsamples", "volume2")]), - ]) - - pipeline.run(plugin='Linear') diff --git a/nipype/workflows/dmri/fsl/tests/test_epi.py b/nipype/workflows/dmri/fsl/tests/test_epi.py deleted file mode 100644 index 24400d0747..0000000000 --- a/nipype/workflows/dmri/fsl/tests/test_epi.py +++ /dev/null @@ -1,46 +0,0 @@ -# -*- coding: utf-8 -*- -import os - -import pytest -import nipype.workflows.fmri.fsl as fsl_wf -import nipype.interfaces.fsl as fsl -import nipype.interfaces.utility as util -from nipype.interfaces.fsl import no_fsl, no_fsl_course_data - -import nipype.pipeline.engine as pe -import warnings -from nipype.workflows.dmri.fsl.epi import create_eddy_correct_pipeline - - -@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") -@pytest.mark.skipif(no_fsl_course_data(), reason="fsl data not available") -def test_create_eddy_correct_pipeline(tmpdir): - fsl_course_dir = os.path.abspath(os.environ['FSL_COURSE_DATA']) - - dwi_file = os.path.join(fsl_course_dir, "fdt1/subj1/data.nii.gz") - - trim_dwi = pe.Node(fsl.ExtractROI(t_min=0, t_size=2), name="trim_dwi") - trim_dwi.inputs.in_file = dwi_file - - nipype_eddycorrect = create_eddy_correct_pipeline("nipype_eddycorrect") - nipype_eddycorrect.inputs.inputnode.ref_num = 0 - - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - original_eddycorrect = pe.Node( - interface=fsl.EddyCorrect(), name="original_eddycorrect") - original_eddycorrect.inputs.ref_num = 0 - - test = pe.Node(util.AssertEqual(), name="eddy_corrected_dwi_test") - - pipeline = pe.Workflow(name="test_eddycorrect") - pipeline.base_dir = tmpdir.mkdir("nipype_test_eddycorrect_").strpath - - pipeline.connect([ - (trim_dwi, original_eddycorrect, [("roi_file", "in_file")]), - (trim_dwi, nipype_eddycorrect, [("roi_file", "inputnode.in_file")]), - (nipype_eddycorrect, test, [("outputnode.eddy_corrected", "volume1")]), - (original_eddycorrect, test, [("eddy_corrected", "volume2")]), - ]) - - pipeline.run(plugin='Linear') diff --git a/nipype/workflows/dmri/fsl/tests/test_tbss.py b/nipype/workflows/dmri/fsl/tests/test_tbss.py deleted file mode 100644 index 34b49a1f86..0000000000 --- a/nipype/workflows/dmri/fsl/tests/test_tbss.py +++ /dev/null @@ -1,211 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: - -import os -from nipype.interfaces.fsl.base import no_fsl, no_fsl_course_data -import nipype.pipeline.engine as pe -import nipype.interfaces.utility as util -import pytest -import tempfile -import shutil -from subprocess import call -from nipype.workflows.dmri.fsl.tbss import create_tbss_all -import nipype.interfaces.io as nio -from nipype.interfaces import fsl - - -def _tbss_test_helper(estimate_skeleton): - fsl_course_dir = os.path.abspath(os.environ['FSL_COURSE_DATA']) - fsl.FSLCommand.set_default_output_type('NIFTI_GZ') - test_dir = tempfile.mkdtemp(prefix="nipype_test_tbss_") - tbss_orig_dir = os.path.join(test_dir, "tbss_all_original") - os.mkdir(tbss_orig_dir) - old_dir = os.getcwd() - os.chdir(tbss_orig_dir) - - subjects = ['1260', '1549'] - FA_list = [ - os.path.join(fsl_course_dir, 'tbss', subject_id + '.nii.gz') - for subject_id in subjects - ] - for f in FA_list: - shutil.copy(f, os.getcwd()) - - call( - ['tbss_1_preproc'] + - [subject_id + '.nii.gz' for subject_id in subjects], - env=os.environ.update({ - 'FSLOUTPUTTYPE': 'NIFTI_GZ' - })) - tbss1_orig_dir = os.path.join(test_dir, "tbss1_original") - shutil.copytree(tbss_orig_dir, tbss1_orig_dir) - - call( - ['tbss_2_reg', '-T'], - env=os.environ.update({ - 'FSLOUTPUTTYPE': 'NIFTI_GZ' - })) - tbss2_orig_dir = os.path.join(test_dir, "tbss2_original") - shutil.copytree(tbss_orig_dir, tbss2_orig_dir) - - if estimate_skeleton: - call( - ['tbss_3_postreg', '-S'], - env=os.environ.update({ - 'FSLOUTPUTTYPE': 'NIFTI_GZ' - })) - else: - call( - ['tbss_3_postreg', '-T'], - env=os.environ.update({ - 'FSLOUTPUTTYPE': 'NIFTI_GZ' - })) - tbss3_orig_dir = os.path.join(test_dir, "tbss3_original") - shutil.copytree(tbss_orig_dir, tbss3_orig_dir) - - call( - ['tbss_4_prestats', '0.2'], - env=os.environ.update({ - 'FSLOUTPUTTYPE': 'NIFTI_GZ' - })) - tbss4_orig_dir = os.path.join(test_dir, "tbss4_original") - shutil.copytree(tbss_orig_dir, tbss4_orig_dir) - - pipeline = pe.Workflow(name="test_tbss") - pipeline.base_dir = os.path.join(test_dir, "tbss_nipype") - - tbss = create_tbss_all(estimate_skeleton=estimate_skeleton) - tbss.inputs.inputnode.fa_list = FA_list - tbss.inputs.inputnode.skeleton_thresh = 0.2 - - tbss1_original_datasource = pe.Node( - nio.DataGrabber( - outfields=['fa_list', 'mask_list'], sort_filelist=False), - name='tbss1_original_datasource') - tbss1_original_datasource.inputs.base_directory = tbss1_orig_dir - tbss1_original_datasource.inputs.template = 'FA/%s_FA%s.nii.gz' - tbss1_original_datasource.inputs.template_args = dict( - fa_list=[[subjects, '']], mask_list=[[subjects, '_mask']]) - - tbss1_test_fa = pe.MapNode( - util.AssertEqual(), - name="tbss1_fa_test", - iterfield=['volume1', 'volume2']) - tbss1_test_mask = pe.MapNode( - util.AssertEqual(), - name="tbss1_mask_test", - iterfield=['volume1', 'volume2']) - - pipeline.connect(tbss, 'tbss1.outputnode.fa_list', tbss1_test_fa, - 'volume1') - pipeline.connect(tbss, 'tbss1.outputnode.mask_list', tbss1_test_mask, - 'volume1') - pipeline.connect(tbss1_original_datasource, 'fa_list', tbss1_test_fa, - 'volume2') - pipeline.connect(tbss1_original_datasource, 'mask_list', tbss1_test_mask, - 'volume2') - tbss2_original_datasource = pe.Node( - nio.DataGrabber(outfields=['field_list'], sort_filelist=False), - name='tbss2_original_datasource') - - tbss2_original_datasource.inputs.base_directory = tbss2_orig_dir - tbss2_original_datasource.inputs.template = 'FA/%s_FA%s.nii.gz' - tbss2_original_datasource.inputs.template_args = dict( - field_list=[[subjects, '_to_target_warp']]) - tbss2_test_field = pe.MapNode( - util.AssertEqual(), - name="tbss2_test_field", - iterfield=['volume1', 'volume2']) - - pipeline.connect(tbss, 'tbss2.outputnode.field_list', tbss2_test_field, - 'volume1') - pipeline.connect(tbss2_original_datasource, 'field_list', tbss2_test_field, - 'volume2') - - tbss3_original_datasource = pe.Node( - nio.DataGrabber( - outfields=[ - 'groupmask', 'skeleton_file', 'meanfa_file', 'mergefa_file' - ], - sort_filelist=False), - name='tbss3_original_datasource') - tbss3_original_datasource.inputs.base_directory = tbss3_orig_dir - tbss3_original_datasource.inputs.template = 'stats/%s.nii.gz' - tbss3_original_datasource.inputs.template_args = dict( - groupmask=[['mean_FA_mask']], - skeleton_file=[['mean_FA_skeleton']], - meanfa_file=[['mean_FA']], - mergefa_file=[['all_FA']]) - - tbss3_test_groupmask = pe.Node( - util.AssertEqual(), name="tbss3_test_groupmask") - tbss3_test_skeleton_file = pe.Node( - util.AssertEqual(), name="tbss3_test_skeleton_file") - tbss3_test_meanfa_file = pe.Node( - util.AssertEqual(), name="tbss3_test_meanfa_file") - tbss3_test_mergefa_file = pe.Node( - util.AssertEqual(), name="tbss3_test_mergefa_file") - - pipeline.connect(tbss, 'tbss3.outputnode.groupmask', tbss3_test_groupmask, - 'volume1') - pipeline.connect(tbss3_original_datasource, 'groupmask', - tbss3_test_groupmask, 'volume2') - pipeline.connect(tbss, 'tbss3.outputnode.skeleton_file', - tbss3_test_skeleton_file, 'volume1') - pipeline.connect(tbss3_original_datasource, 'skeleton_file', - tbss3_test_skeleton_file, 'volume2') - pipeline.connect(tbss, 'tbss3.outputnode.meanfa_file', - tbss3_test_meanfa_file, 'volume1') - pipeline.connect(tbss3_original_datasource, 'meanfa_file', - tbss3_test_meanfa_file, 'volume2') - pipeline.connect(tbss, 'tbss3.outputnode.mergefa_file', - tbss3_test_mergefa_file, 'volume1') - pipeline.connect(tbss3_original_datasource, 'mergefa_file', - tbss3_test_mergefa_file, 'volume2') - - tbss4_original_datasource = pe.Node( - nio.DataGrabber( - outfields=['all_FA_skeletonised', 'mean_FA_skeleton_mask'], - sort_filelist=False), - name='tbss4_original_datasource') - tbss4_original_datasource.inputs.base_directory = tbss4_orig_dir - tbss4_original_datasource.inputs.template = 'stats/%s.nii.gz' - tbss4_original_datasource.inputs.template_args = dict( - all_FA_skeletonised=[['all_FA_skeletonised']], - mean_FA_skeleton_mask=[['mean_FA_skeleton_mask']]) - tbss4_test_all_FA_skeletonised = pe.Node( - util.AssertEqual(), name="tbss4_test_all_FA_skeletonised") - tbss4_test_mean_FA_skeleton_mask = pe.Node( - util.AssertEqual(), name="tbss4_test_mean_FA_skeleton_mask") - - pipeline.connect(tbss, 'tbss4.outputnode.projectedfa_file', - tbss4_test_all_FA_skeletonised, 'volume1') - pipeline.connect(tbss4_original_datasource, 'all_FA_skeletonised', - tbss4_test_all_FA_skeletonised, 'volume2') - pipeline.connect(tbss, 'tbss4.outputnode.skeleton_mask', - tbss4_test_mean_FA_skeleton_mask, 'volume1') - pipeline.connect(tbss4_original_datasource, 'mean_FA_skeleton_mask', - tbss4_test_mean_FA_skeleton_mask, 'volume2') - - pipeline.run(plugin='Linear') - os.chdir(old_dir) - shutil.rmtree(test_dir) - - -# this test is disabled until we figure out what is wrong with TBSS in 5.0.9 - - -@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") -@pytest.mark.skipif(no_fsl_course_data(), reason="fsl data not available") -def test_disabled_tbss_est_skeleton(): - _tbss_test_helper(True) - - -# this test is disabled until we figure out what is wrong with TBSS in 5.0.9 - - -@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") -@pytest.mark.skipif(no_fsl_course_data(), reason="fsl data not available") -def test_disabled_tbss_est_skeleton_use_precomputed_skeleton(): - _tbss_test_helper(False) diff --git a/nipype/workflows/dmri/fsl/utils.py b/nipype/workflows/dmri/fsl/utils.py deleted file mode 100644 index bd53f5cb55..0000000000 --- a/nipype/workflows/dmri/fsl/utils.py +++ /dev/null @@ -1,847 +0,0 @@ -# -*- coding: utf-8 -*- -# coding: utf-8 -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: - -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import zip, next, range, str - -from ....pipeline import engine as pe -from ....interfaces import utility as niu -from ....interfaces import fsl -from ....interfaces import ants - - -def cleanup_edge_pipeline(name='Cleanup'): - """ - Perform some de-spiking filtering to clean up the edge of the fieldmap - (copied from fsl_prepare_fieldmap) - """ - inputnode = pe.Node( - niu.IdentityInterface(fields=['in_file', 'in_mask']), name='inputnode') - outputnode = pe.Node( - niu.IdentityInterface(fields=['out_file']), name='outputnode') - - fugue = pe.Node( - fsl.FUGUE( - save_fmap=True, despike_2dfilter=True, despike_threshold=2.1), - name='Despike') - erode = pe.Node( - fsl.maths.MathsCommand(nan2zeros=True, args='-kernel 2D -ero'), - name='MskErode') - newmsk = pe.Node( - fsl.MultiImageMaths(op_string='-sub %s -thr 0.5 -bin'), name='NewMask') - applymsk = pe.Node(fsl.ApplyMask(nan2zeros=True), name='ApplyMask') - join = pe.Node(niu.Merge(2), name='Merge') - addedge = pe.Node( - fsl.MultiImageMaths(op_string='-mas %s -add %s'), name='AddEdge') - - wf = pe.Workflow(name=name) - wf.connect([(inputnode, fugue, [ - ('in_file', 'fmap_in_file'), ('in_mask', 'mask_file') - ]), (inputnode, erode, [('in_mask', 'in_file')]), (inputnode, newmsk, [ - ('in_mask', 'in_file') - ]), (erode, newmsk, [('out_file', 'operand_files')]), (fugue, applymsk, [ - ('fmap_out_file', 'in_file') - ]), (newmsk, applymsk, - [('out_file', 'mask_file')]), (erode, join, [('out_file', 'in1')]), - (applymsk, join, [('out_file', 'in2')]), (inputnode, addedge, [ - ('in_file', 'in_file') - ]), (join, addedge, [('out', 'operand_files')]), - (addedge, outputnode, [('out_file', 'out_file')])]) - return wf - - -def vsm2warp(name='Shiftmap2Warping'): - """ - Converts a voxel shift map (vsm) to a displacements field (warp). - """ - inputnode = pe.Node( - niu.IdentityInterface( - fields=['in_vsm', 'in_ref', 'scaling', 'enc_dir']), - name='inputnode') - outputnode = pe.Node( - niu.IdentityInterface(fields=['out_warp']), name='outputnode') - fixhdr = pe.Node( - niu.Function( - input_names=['in_file', 'in_file_hdr'], - output_names=['out_file'], - function=copy_hdr), - name='Fix_hdr') - vsm = pe.Node(fsl.maths.BinaryMaths(operation='mul'), name='ScaleField') - vsm2dfm = pe.Node( - fsl.ConvertWarp(relwarp=True, out_relwarp=True), name='vsm2dfm') - - wf = pe.Workflow(name=name) - wf.connect([(inputnode, fixhdr, [('in_vsm', 'in_file'), ('in_ref', - 'in_file_hdr')]), - (inputnode, vsm, - [('scaling', 'operand_value')]), (fixhdr, vsm, [('out_file', - 'in_file')]), - (vsm, vsm2dfm, - [('out_file', 'shift_in_file')]), (inputnode, vsm2dfm, [ - ('in_ref', 'reference'), ('enc_dir', 'shift_direction') - ]), (vsm2dfm, outputnode, [('out_file', 'out_warp')])]) - return wf - - -def dwi_flirt(name='DWICoregistration', excl_nodiff=False, flirt_param={}): - """ - Generates a workflow for linear registration of dwi volumes - """ - inputnode = pe.Node( - niu.IdentityInterface( - fields=['reference', 'in_file', 'ref_mask', 'in_xfms', 'in_bval']), - name='inputnode') - - initmat = pe.Node( - niu.Function( - input_names=['in_bval', 'in_xfms', 'excl_nodiff'], - output_names=['init_xfms'], - function=_checkinitxfm), - name='InitXforms') - initmat.inputs.excl_nodiff = excl_nodiff - dilate = pe.Node( - fsl.maths.MathsCommand(nan2zeros=True, args='-kernel sphere 5 -dilM'), - name='MskDilate') - split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') - n4 = pe.Node(ants.N4BiasFieldCorrection(dimension=3), name='Bias') - enhb0 = pe.Node( - niu.Function( - input_names=['in_file', 'in_mask', 'clip_limit'], - output_names=['out_file'], - function=enhance), - name='B0Equalize') - enhb0.inputs.clip_limit = 0.015 - enhdw = pe.MapNode( - niu.Function( - input_names=['in_file', 'in_mask'], - output_names=['out_file'], - function=enhance), - name='DWEqualize', - iterfield=['in_file']) - flirt = pe.MapNode( - fsl.FLIRT(**flirt_param), - name='CoRegistration', - iterfield=['in_file', 'in_matrix_file']) - apply_xfms = pe.MapNode( - fsl.ApplyXFM( - apply_xfm=True, - interp='spline', - bgvalue=0), - name='ApplyXFMs', - iterfield=['in_file', 'in_matrix_file'] - ) - thres = pe.MapNode( - fsl.Threshold(thresh=0.0), - iterfield=['in_file'], - name='RemoveNegative') - merge = pe.Node(fsl.Merge(dimension='t'), name='MergeDWIs') - outputnode = pe.Node( - niu.IdentityInterface(fields=['out_file', 'out_xfms']), - name='outputnode') - wf = pe.Workflow(name=name) - wf.connect([ - (inputnode, split, [('in_file', 'in_file')]), - (inputnode, dilate, [('ref_mask', 'in_file')]), - (inputnode, enhb0, [('ref_mask', 'in_mask')]), - (inputnode, initmat, [('in_xfms', 'in_xfms'), - ('in_bval', 'in_bval')]), - (inputnode, n4, [('reference', 'input_image'), - ('ref_mask', 'mask_image')]), - (dilate, flirt, [('out_file', 'ref_weight'), - ('out_file', 'in_weight')]), - (n4, enhb0, [('output_image', 'in_file')]), - (split, enhdw, [('out_files', 'in_file')]), - (split, apply_xfms, [('out_files', 'in_file')]), - (dilate, enhdw, [('out_file', 'in_mask')]), - (enhb0, flirt, [('out_file', 'reference')]), - (enhb0, apply_xfms, [('out_file', 'reference')]), - (enhdw, flirt, [('out_file', 'in_file')]), - (initmat, flirt, [('init_xfms', 'in_matrix_file')]), - (flirt, apply_xfms, [('out_matrix_file', 'in_matrix_file')]), - (apply_xfms, thres, [('out_file', 'in_file')]), - (thres, merge, [('out_file', 'in_files')]), - (merge, outputnode, [('merged_file', 'out_file')]), - (flirt, outputnode, [('out_matrix_file', 'out_xfms')]) - ]) - return wf - - -def apply_all_corrections(name='UnwarpArtifacts'): - """ - Combines two lists of linear transforms with the deformation field - map obtained typically after the SDC process. - Additionally, computes the corresponding bspline coefficients and - the map of determinants of the jacobian. - """ - - inputnode = pe.Node( - niu.IdentityInterface(fields=['in_sdc', 'in_hmc', 'in_ecc', 'in_dwi']), - name='inputnode') - outputnode = pe.Node( - niu.IdentityInterface( - fields=['out_file', 'out_warp', 'out_coeff', 'out_jacobian']), - name='outputnode') - warps = pe.MapNode( - fsl.ConvertWarp(relwarp=True), - iterfield=['premat', 'postmat'], - name='ConvertWarp') - - selref = pe.Node(niu.Select(index=[0]), name='Reference') - - split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') - unwarp = pe.MapNode( - fsl.ApplyWarp(), - iterfield=['in_file', 'field_file'], - name='UnwarpDWIs') - - coeffs = pe.MapNode( - fsl.WarpUtils(out_format='spline'), - iterfield=['in_file'], - name='CoeffComp') - jacobian = pe.MapNode( - fsl.WarpUtils(write_jacobian=True), - iterfield=['in_file'], - name='JacobianComp') - jacmult = pe.MapNode( - fsl.MultiImageMaths(op_string='-mul %s'), - iterfield=['in_file', 'operand_files'], - name='ModulateDWIs') - - thres = pe.MapNode( - fsl.Threshold(thresh=0.0), - iterfield=['in_file'], - name='RemoveNegative') - merge = pe.Node(fsl.Merge(dimension='t'), name='MergeDWIs') - - wf = pe.Workflow(name=name) - wf.connect([(inputnode, warps, [ - ('in_sdc', 'warp1'), ('in_hmc', 'premat'), ('in_ecc', 'postmat'), - ('in_dwi', 'reference') - ]), (inputnode, split, [('in_dwi', 'in_file')]), (split, selref, [ - ('out_files', 'inlist') - ]), (warps, unwarp, [('out_file', 'field_file')]), (split, unwarp, [ - ('out_files', 'in_file') - ]), (selref, unwarp, [('out', 'ref_file')]), (selref, coeffs, [ - ('out', 'reference') - ]), (warps, coeffs, [('out_file', 'in_file')]), (selref, jacobian, [ - ('out', 'reference') - ]), (coeffs, jacobian, [('out_file', 'in_file')]), (unwarp, jacmult, [ - ('out_file', 'in_file') - ]), (jacobian, jacmult, [('out_jacobian', 'operand_files')]), - (jacmult, thres, [('out_file', 'in_file')]), (thres, merge, [ - ('out_file', 'in_files') - ]), (warps, outputnode, [('out_file', 'out_warp')]), - (coeffs, outputnode, - [('out_file', 'out_coeff')]), (jacobian, outputnode, [ - ('out_jacobian', 'out_jacobian') - ]), (merge, outputnode, [('merged_file', 'out_file')])]) - return wf - - -def extract_bval(in_dwi, in_bval, b=0, out_file=None): - """ - Writes an image containing only the volumes with b-value specified at - input - """ - import numpy as np - import nibabel as nb - import os.path as op - from nipype.utils import NUMPY_MMAP - - if out_file is None: - fname, ext = op.splitext(op.basename(in_dwi)) - if ext == ".gz": - fname, ext2 = op.splitext(fname) - ext = ext2 + ext - out_file = op.abspath("%s_tsoi%s" % (fname, ext)) - - im = nb.load(in_dwi, mmap=NUMPY_MMAP) - dwidata = im.get_data() - bvals = np.loadtxt(in_bval) - - if b == 'diff': - selection = np.where(bvals != 0) - elif b == 'nodiff': - selection = np.where(bvals == 0) - else: - selection = np.where(bvals == b) - - extdata = np.squeeze(dwidata.take(selection, axis=3)) - hdr = im.header.copy() - hdr.set_data_shape(extdata.shape) - nb.Nifti1Image(extdata, im.affine, hdr).to_filename(out_file) - return out_file - - -def hmc_split(in_file, in_bval, ref_num=0, lowbval=5.0): - """ - Selects the reference and moving volumes from a dwi dataset - for the purpose of HMC. - """ - import numpy as np - import nibabel as nb - import os.path as op - from nipype.interfaces.base import isdefined - from nipype.utils import NUMPY_MMAP - - im = nb.load(in_file, mmap=NUMPY_MMAP) - data = im.get_data() - hdr = im.header.copy() - bval = np.loadtxt(in_bval) - - lowbs = np.where(bval <= lowbval)[0] - - volid = lowbs[0] - if (isdefined(ref_num) and (ref_num < len(lowbs))): - volid = ref_num - - if volid == 0: - data = data[..., 1:] - bval = bval[1:] - elif volid == (data.shape[-1] - 1): - data = data[..., :-1] - bval = bval[:-1] - else: - data = np.concatenate( - (data[..., :volid], data[..., (volid + 1):]), axis=3) - bval = np.hstack((bval[:volid], bval[(volid + 1):])) - - out_ref = op.abspath('hmc_ref.nii.gz') - out_mov = op.abspath('hmc_mov.nii.gz') - out_bval = op.abspath('bval_split.txt') - - refdata = data[..., volid] - hdr.set_data_shape(refdata.shape) - nb.Nifti1Image(refdata, im.affine, hdr).to_filename(out_ref) - - hdr.set_data_shape(data.shape) - nb.Nifti1Image(data, im.affine, hdr).to_filename(out_mov) - np.savetxt(out_bval, bval) - return [out_ref, out_mov, out_bval, volid] - - -def remove_comp(in_file, in_bval, volid=0, out_file=None): - """ - Removes the volume ``volid`` from the 4D nifti file - """ - import numpy as np - import nibabel as nb - import os.path as op - from nipype.utils import NUMPY_MMAP - - if out_file is None: - fname, ext = op.splitext(op.basename(in_file)) - if ext == ".gz": - fname, ext2 = op.splitext(fname) - ext = ext2 + ext - out_file = op.abspath("%s_extract%s" % (fname, ext)) - - im = nb.load(in_file, mmap=NUMPY_MMAP) - data = im.get_data() - hdr = im.header.copy() - bval = np.loadtxt(in_bval) - - if volid == 0: - data = data[..., 1:] - bval = bval[1:] - elif volid == (data.shape[-1] - 1): - data = data[..., :-1] - bval = bval[:-1] - else: - data = np.concatenate( - (data[..., :volid], data[..., (volid + 1):]), axis=3) - bval = np.hstack((bval[:volid], bval[(volid + 1):])) - hdr.set_data_shape(data.shape) - nb.Nifti1Image(data, im.affine, hdr).to_filename(out_file) - - out_bval = op.abspath('bval_extract.txt') - np.savetxt(out_bval, bval) - return out_file, out_bval - - -def insert_mat(inlist, volid=0): - import numpy as np - import os.path as op - idfname = op.abspath('identity.mat') - out = inlist - np.savetxt(idfname, np.eye(4)) - out.insert(volid, idfname) - return out - - -def recompose_dwi(in_dwi, in_bval, in_corrected, out_file=None): - """ - Recompose back the dMRI data accordingly the b-values table after EC - correction - """ - import numpy as np - import nibabel as nb - import os.path as op - from nipype.utils import NUMPY_MMAP - - if out_file is None: - fname, ext = op.splitext(op.basename(in_dwi)) - if ext == ".gz": - fname, ext2 = op.splitext(fname) - ext = ext2 + ext - out_file = op.abspath("%s_eccorrect%s" % (fname, ext)) - - im = nb.load(in_dwi, mmap=NUMPY_MMAP) - dwidata = im.get_data() - bvals = np.loadtxt(in_bval) - dwis = np.where(bvals != 0)[0].tolist() - - if len(dwis) != len(in_corrected): - raise RuntimeError(('Length of DWIs in b-values table and after' - 'correction should match')) - - for bindex, dwi in zip(dwis, in_corrected): - dwidata[..., bindex] = nb.load(dwi, mmap=NUMPY_MMAP).get_data() - - nb.Nifti1Image(dwidata, im.affine, im.header).to_filename(out_file) - return out_file - - -def recompose_xfm(in_bval, in_xfms): - """ - Insert identity transformation matrices in b0 volumes to build up a list - """ - import numpy as np - import os.path as op - - bvals = np.loadtxt(in_bval) - xfms = iter([np.loadtxt(xfm) for xfm in in_xfms]) - out_files = [] - - for i, b in enumerate(bvals): - if b == 0.0: - mat = np.eye(4) - else: - mat = next(xfms) - - out_name = op.abspath('eccor_%04d.mat' % i) - out_files.append(out_name) - np.savetxt(out_name, mat) - - return out_files - - -def time_avg(in_file, index=[0], out_file=None): - """ - Average the input time-series, selecting the indices given in index - - .. warning:: time steps should be already registered (corrected for - head motion artifacts). - - """ - import numpy as np - import nibabel as nb - import os.path as op - from nipype.utils import NUMPY_MMAP - - if out_file is None: - fname, ext = op.splitext(op.basename(in_file)) - if ext == ".gz": - fname, ext2 = op.splitext(fname) - ext = ext2 + ext - out_file = op.abspath("%s_baseline%s" % (fname, ext)) - - index = np.atleast_1d(index).tolist() - - imgs = np.array(nb.four_to_three(nb.load(in_file, mmap=NUMPY_MMAP)))[index] - if len(index) == 1: - data = imgs[0].get_data().astype(np.float32) - else: - data = np.average( - np.array([im.get_data().astype(np.float32) for im in imgs]), - axis=0) - - hdr = imgs[0].header.copy() - hdr.set_data_shape(data.shape) - hdr.set_xyzt_units('mm') - hdr.set_data_dtype(np.float32) - nb.Nifti1Image(data, imgs[0].affine, hdr).to_filename(out_file) - return out_file - - -def b0_indices(in_bval, max_b=10.0): - """ - Extract the indices of slices in a b-values file with a low b value - """ - import numpy as np - bval = np.loadtxt(in_bval) - return np.argwhere(bval <= max_b).flatten().tolist() - - -def b0_average(in_dwi, in_bval, max_b=10.0, out_file=None): - """ - A function that averages the *b0* volumes from a DWI dataset. - As current dMRI data are being acquired with all b-values > 0.0, - the *lowb* volumes are selected by specifying the parameter max_b. - - .. warning:: *b0* should be already registered (head motion artifact should - be corrected). - - """ - import numpy as np - import nibabel as nb - import os.path as op - from nipype.utils import NUMPY_MMAP - - if out_file is None: - fname, ext = op.splitext(op.basename(in_dwi)) - if ext == ".gz": - fname, ext2 = op.splitext(fname) - ext = ext2 + ext - out_file = op.abspath("%s_avg_b0%s" % (fname, ext)) - - imgs = np.array(nb.four_to_three(nb.load(in_dwi, mmap=NUMPY_MMAP))) - bval = np.loadtxt(in_bval) - index = np.argwhere(bval <= max_b).flatten().tolist() - - b0s = [im.get_data().astype(np.float32) for im in imgs[index]] - b0 = np.average(np.array(b0s), axis=0) - - hdr = imgs[0].header.copy() - hdr.set_data_shape(b0.shape) - hdr.set_xyzt_units('mm') - hdr.set_data_dtype(np.float32) - nb.Nifti1Image(b0, imgs[0].affine, hdr).to_filename(out_file) - return out_file - - -def rotate_bvecs(in_bvec, in_matrix): - """ - Rotates the input bvec file accordingly with a list of matrices. - - .. note:: the input affine matrix transforms points in the destination - image to their corresponding coordinates in the original image. - Therefore, this matrix should be inverted first, as we want to know - the target position of :math:`\\vec{r}`. - - """ - import os - import numpy as np - - name, fext = os.path.splitext(os.path.basename(in_bvec)) - if fext == '.gz': - name, _ = os.path.splitext(name) - out_file = os.path.abspath('%s_rotated.bvec' % name) - bvecs = np.loadtxt(in_bvec).T - new_bvecs = [] - - if len(bvecs) != len(in_matrix): - raise RuntimeError(('Number of b-vectors (%d) and rotation ' - 'matrices (%d) should match.') % (len(bvecs), - len(in_matrix))) - - for bvec, mat in zip(bvecs, in_matrix): - if np.all(bvec == 0.0): - new_bvecs.append(bvec) - else: - invrot = np.linalg.inv(np.loadtxt(mat))[:3, :3] - newbvec = invrot.dot(bvec) - new_bvecs.append((newbvec / np.linalg.norm(newbvec))) - - np.savetxt(out_file, np.array(new_bvecs).T, fmt=b'%0.15f') - return out_file - - -def eddy_rotate_bvecs(in_bvec, eddy_params): - """ - Rotates the input bvec file accordingly with a list of parameters sourced - from ``eddy``, as explained `here - `_. - """ - import os - import numpy as np - from math import sin, cos - - name, fext = os.path.splitext(os.path.basename(in_bvec)) - if fext == '.gz': - name, _ = os.path.splitext(name) - out_file = os.path.abspath('%s_rotated.bvec' % name) - bvecs = np.loadtxt(in_bvec).T - new_bvecs = [] - - params = np.loadtxt(eddy_params) - - if len(bvecs) != len(params): - raise RuntimeError(('Number of b-vectors and rotation ' - 'matrices should match.')) - - for bvec, row in zip(bvecs, params): - if np.all(bvec == 0.0): - new_bvecs.append(bvec) - else: - ax = row[3] - ay = row[4] - az = row[5] - - Rx = np.array([[1.0, 0.0, 0.0], [0.0, cos(ax), -sin(ax)], - [0.0, sin(ax), cos(ax)]]) - Ry = np.array([[cos(ay), 0.0, sin(ay)], [0.0, 1.0, 0.0], - [-sin(ay), 0.0, cos(ay)]]) - Rz = np.array([[cos(az), -sin(az), 0.0], [sin(az), - cos(az), 0.0], - [0.0, 0.0, 1.0]]) - R = Rx.dot(Ry).dot(Rz) - - invrot = np.linalg.inv(R) - newbvec = invrot.dot(bvec) - new_bvecs.append(newbvec / np.linalg.norm(newbvec)) - - np.savetxt(out_file, np.array(new_bvecs).T, fmt=b'%0.15f') - return out_file - - -def compute_readout(params): - """ - Computes readout time from epi params (see `eddy documentation - `_). - - .. warning:: ``params['echospacing']`` should be in *sec* units. - - - """ - epi_factor = 1.0 - acc_factor = 1.0 - try: - if params['epi_factor'] > 1: - epi_factor = float(params['epi_factor'] - 1) - except: - pass - try: - if params['acc_factor'] > 1: - acc_factor = 1.0 / params['acc_factor'] - except: - pass - return acc_factor * epi_factor * params['echospacing'] - - -def siemens2rads(in_file, out_file=None): - """ - Converts input phase difference map to rads - """ - import numpy as np - import nibabel as nb - import os.path as op - import math - - if out_file is None: - fname, fext = op.splitext(op.basename(in_file)) - if fext == '.gz': - fname, _ = op.splitext(fname) - out_file = op.abspath('./%s_rads.nii.gz' % fname) - - in_file = np.atleast_1d(in_file).tolist() - im = nb.load(in_file[0]) - data = im.get_data().astype(np.float32) - hdr = im.header.copy() - - if len(in_file) == 2: - data = nb.load(in_file[1]).get_data().astype(np.float32) - data - elif (data.ndim == 4) and (data.shape[-1] == 2): - data = np.squeeze(data[..., 1] - data[..., 0]) - hdr.set_data_shape(data.shape[:3]) - - imin = data.min() - imax = data.max() - data = (2.0 * math.pi * (data - imin) / (imax - imin)) - math.pi - hdr.set_data_dtype(np.float32) - hdr.set_xyzt_units('mm') - hdr['datatype'] = 16 - nb.Nifti1Image(data, im.affine, hdr).to_filename(out_file) - return out_file - - -def rads2radsec(in_file, delta_te, out_file=None): - """ - Converts input phase difference map to rads - """ - import numpy as np - import nibabel as nb - import os.path as op - import math - from nipype.utils import NUMPY_MMAP - - if out_file is None: - fname, fext = op.splitext(op.basename(in_file)) - if fext == '.gz': - fname, _ = op.splitext(fname) - out_file = op.abspath('./%s_radsec.nii.gz' % fname) - - im = nb.load(in_file, mmap=NUMPY_MMAP) - data = im.get_data().astype(np.float32) * (1.0 / delta_te) - nb.Nifti1Image(data, im.affine, im.header).to_filename(out_file) - return out_file - - -def demean_image(in_file, in_mask=None, out_file=None): - """ - Demean image data inside mask - """ - import numpy as np - import nibabel as nb - import os.path as op - import math - from nipype.utils import NUMPY_MMAP - - if out_file is None: - fname, fext = op.splitext(op.basename(in_file)) - if fext == '.gz': - fname, _ = op.splitext(fname) - out_file = op.abspath('./%s_demean.nii.gz' % fname) - - im = nb.load(in_file, mmap=NUMPY_MMAP) - data = im.get_data().astype(np.float32) - msk = np.ones_like(data) - - if in_mask is not None: - msk = nb.load(in_mask, mmap=NUMPY_MMAP).get_data().astype(np.float32) - msk[msk > 0] = 1.0 - msk[msk < 1] = 0.0 - - mean = np.median(data[msk == 1].reshape(-1)) - data[msk == 1] = data[msk == 1] - mean - nb.Nifti1Image(data, im.affine, im.header).to_filename(out_file) - return out_file - - -def add_empty_vol(in_file, out_file=None): - """ - Adds an empty vol to the phase difference image - """ - import nibabel as nb - import os.path as op - import numpy as np - import math - from nipype.utils import NUMPY_MMAP - - if out_file is None: - fname, fext = op.splitext(op.basename(in_file)) - if fext == '.gz': - fname, _ = op.splitext(fname) - out_file = op.abspath('./%s_4D.nii.gz' % fname) - - im = nb.load(in_file, mmap=NUMPY_MMAP) - zim = nb.Nifti1Image(np.zeros_like(im.get_data()), im.affine, im.header) - nb.funcs.concat_images([im, zim]).to_filename(out_file) - return out_file - - -def reorient_bvecs(in_dwi, old_dwi, in_bvec): - """ - Checks reorientations of ``in_dwi`` w.r.t. ``old_dwi`` and - reorients the in_bvec table accordingly. - """ - import os - import numpy as np - import nibabel as nb - from nipype.utils import NUMPY_MMAP - - name, fext = os.path.splitext(os.path.basename(in_bvec)) - if fext == '.gz': - name, _ = os.path.splitext(name) - out_file = os.path.abspath('%s_reorient.bvec' % name) - bvecs = np.loadtxt(in_bvec).T - new_bvecs = [] - - N = nb.load(in_dwi, mmap=NUMPY_MMAP).affine - O = nb.load(old_dwi, mmap=NUMPY_MMAP).affine - RS = N.dot(np.linalg.inv(O))[:3, :3] - sc_idx = np.where((np.abs(RS) != 1) & (RS != 0)) - S = np.ones_like(RS) - S[sc_idx] = RS[sc_idx] - R = RS / S - - new_bvecs = [R.dot(b) for b in bvecs] - np.savetxt(out_file, np.array(new_bvecs).T, fmt=b'%0.15f') - return out_file - - -def copy_hdr(in_file, in_file_hdr, out_file=None): - import numpy as np - import nibabel as nb - import os.path as op - from nipype.utils import NUMPY_MMAP - - if out_file is None: - fname, fext = op.splitext(op.basename(in_file)) - if fext == '.gz': - fname, _ = op.splitext(fname) - out_file = op.abspath('./%s_fixhdr.nii.gz' % fname) - - imref = nb.load(in_file_hdr, mmap=NUMPY_MMAP) - hdr = imref.header.copy() - hdr.set_data_dtype(np.float32) - vsm = nb.load(in_file, mmap=NUMPY_MMAP).get_data().astype(np.float32) - hdr.set_data_shape(vsm.shape) - hdr.set_xyzt_units('mm') - nii = nb.Nifti1Image(vsm, imref.affine, hdr) - nii.to_filename(out_file) - return out_file - - -def enhance(in_file, clip_limit=0.010, in_mask=None, out_file=None): - import numpy as np - import nibabel as nb - import os.path as op - from skimage import exposure, img_as_int - from nipype.utils import NUMPY_MMAP - - if out_file is None: - fname, fext = op.splitext(op.basename(in_file)) - if fext == '.gz': - fname, _ = op.splitext(fname) - out_file = op.abspath('./%s_enh.nii.gz' % fname) - - im = nb.load(in_file, mmap=NUMPY_MMAP) - imdata = im.get_data() - imshape = im.shape - - if in_mask is not None: - msk = nb.load(in_mask, mmap=NUMPY_MMAP).get_data() - msk[msk > 0] = 1 - msk[msk < 1] = 0 - imdata = imdata * msk - - immin = imdata.min() - imdata = (imdata - immin).astype(np.uint16) - - adapted = exposure.equalize_adapthist( - imdata.reshape(imshape[0], -1), clip_limit=clip_limit) - - nb.Nifti1Image(adapted.reshape(imshape), im.affine, - im.header).to_filename(out_file) - - return out_file - - -def _checkinitxfm(in_bval, excl_nodiff, in_xfms=None): - from nipype.interfaces.base import isdefined - import numpy as np - import os.path as op - bvals = np.loadtxt(in_bval) - - gen_id = ((in_xfms is None) or (not isdefined(in_xfms)) - or (len(in_xfms) != len(bvals))) - - init_xfms = [] - if excl_nodiff: - dws = np.where(bvals != 0)[0].tolist() - else: - dws = list(range(len(bvals))) - - if gen_id: - for i in dws: - xfm_file = op.abspath('init_%04d.mat' % i) - np.savetxt(xfm_file, np.eye(4)) - init_xfms.append(xfm_file) - else: - init_xfms = [in_xfms[i] for i in dws] - - return init_xfms diff --git a/nipype/workflows/dmri/mrtrix/__init__.py b/nipype/workflows/dmri/mrtrix/__init__.py deleted file mode 100644 index 6851021111..0000000000 --- a/nipype/workflows/dmri/mrtrix/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import -from .diffusion import create_mrtrix_dti_pipeline -from .connectivity_mapping import create_connectivity_pipeline -from .group_connectivity import (create_group_connectivity_pipeline) diff --git a/nipype/workflows/dmri/mrtrix/connectivity_mapping.py b/nipype/workflows/dmri/mrtrix/connectivity_mapping.py deleted file mode 100644 index e47dcb9531..0000000000 --- a/nipype/workflows/dmri/mrtrix/connectivity_mapping.py +++ /dev/null @@ -1,639 +0,0 @@ -# -*- coding: utf-8 -*- -import inspect -import os.path as op # system functions - -from ....interfaces import io as nio # Data i/o -from ....interfaces import utility as util # utility -from ....pipeline import engine as pe # pypeline engine -from ....interfaces import fsl as fsl -from ....interfaces import freesurfer as fs # freesurfer -from ....interfaces import mrtrix as mrtrix -from ....interfaces import cmtk as cmtk -from ....interfaces import dipy as dipy -from ....algorithms import misc as misc -from ..fsl.epi import create_eddy_correct_pipeline -from ..connectivity.nx import create_networkx_pipeline, create_cmats_to_csv_pipeline -from ....interfaces.utility import Function -from ...misc.utils import select_aparc_annot - - -def create_connectivity_pipeline(name="connectivity", - parcellation_name='scale500'): - """Creates a pipeline that does the same connectivity processing as in the - :ref:`example_dmri_connectivity_advanced` example script. Given a subject id (and completed Freesurfer reconstruction) - diffusion-weighted image, b-values, and b-vectors, the workflow will return the subject's connectome - as a Connectome File Format (CFF) file for use in Connectome Viewer (http://www.cmtk.org). - - Example - ------- - - >>> from nipype.workflows.dmri.mrtrix.connectivity_mapping import create_connectivity_pipeline - >>> conmapper = create_connectivity_pipeline("nipype_conmap") - >>> conmapper.inputs.inputnode.subjects_dir = '.' - >>> conmapper.inputs.inputnode.subject_id = 'subj1' - >>> conmapper.inputs.inputnode.dwi = 'data.nii.gz' - >>> conmapper.inputs.inputnode.bvecs = 'bvecs' - >>> conmapper.inputs.inputnode.bvals = 'bvals' - >>> conmapper.run() # doctest: +SKIP - - Inputs:: - - inputnode.subject_id - inputnode.subjects_dir - inputnode.dwi - inputnode.bvecs - inputnode.bvals - inputnode.resolution_network_file - - Outputs:: - - outputnode.connectome - outputnode.cmatrix - outputnode.networks - outputnode.fa - outputnode.struct - outputnode.tracts - outputnode.rois - outputnode.odfs - outputnode.filtered_tractography - outputnode.tdi - outputnode.nxstatscff - outputnode.nxcsv - outputnode.cmatrices_csv - outputnode.mean_fiber_length - outputnode.median_fiber_length - outputnode.fiber_length_std - """ - - inputnode_within = pe.Node( - util.IdentityInterface(fields=[ - "subject_id", "dwi", "bvecs", "bvals", "subjects_dir", - "resolution_network_file" - ]), - name="inputnode_within") - - FreeSurferSource = pe.Node( - interface=nio.FreeSurferSource(), name='fssource') - FreeSurferSourceLH = pe.Node( - interface=nio.FreeSurferSource(), name='fssourceLH') - FreeSurferSourceLH.inputs.hemi = 'lh' - - FreeSurferSourceRH = pe.Node( - interface=nio.FreeSurferSource(), name='fssourceRH') - FreeSurferSourceRH.inputs.hemi = 'rh' - """ - Creating the workflow's nodes - ============================= - """ - """ - Conversion nodes - ---------------- - """ - """ - A number of conversion operations are required to obtain NIFTI files from the FreesurferSource for each subject. - Nodes are used to convert the following: - * Original structural image to NIFTI - * Pial, white, inflated, and spherical surfaces for both the left and right hemispheres are converted to GIFTI for visualization in ConnectomeViewer - * Parcellated annotation files for the left and right hemispheres are also converted to GIFTI - - """ - - mri_convert_Brain = pe.Node( - interface=fs.MRIConvert(), name='mri_convert_Brain') - mri_convert_Brain.inputs.out_type = 'nii' - mri_convert_ROI_scale500 = mri_convert_Brain.clone( - 'mri_convert_ROI_scale500') - - mris_convertLH = pe.Node(interface=fs.MRIsConvert(), name='mris_convertLH') - mris_convertLH.inputs.out_datatype = 'gii' - mris_convertRH = mris_convertLH.clone('mris_convertRH') - mris_convertRHwhite = mris_convertLH.clone('mris_convertRHwhite') - mris_convertLHwhite = mris_convertLH.clone('mris_convertLHwhite') - mris_convertRHinflated = mris_convertLH.clone('mris_convertRHinflated') - mris_convertLHinflated = mris_convertLH.clone('mris_convertLHinflated') - mris_convertRHsphere = mris_convertLH.clone('mris_convertRHsphere') - mris_convertLHsphere = mris_convertLH.clone('mris_convertLHsphere') - mris_convertLHlabels = mris_convertLH.clone('mris_convertLHlabels') - mris_convertRHlabels = mris_convertLH.clone('mris_convertRHlabels') - """ - Diffusion processing nodes - -------------------------- - - .. seealso:: - - dmri_mrtrix_dti.py - Tutorial that focuses solely on the MRtrix diffusion processing - - http://www.brain.org.au/software/mrtrix/index.html - MRtrix's online documentation - """ - """ - b-values and b-vectors stored in FSL's format are converted into a single encoding file for MRTrix. - """ - - fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(), name='fsl2mrtrix') - """ - Distortions induced by eddy currents are corrected prior to fitting the tensors. - The first image is used as a reference for which to warp the others. - """ - - eddycorrect = create_eddy_correct_pipeline(name='eddycorrect') - eddycorrect.inputs.inputnode.ref_num = 1 - """ - Tensors are fitted to each voxel in the diffusion-weighted image and from these three maps are created: - * Major eigenvector in each voxel - * Apparent diffusion coefficient - * Fractional anisotropy - """ - - dwi2tensor = pe.Node(interface=mrtrix.DWI2Tensor(), name='dwi2tensor') - tensor2vector = pe.Node( - interface=mrtrix.Tensor2Vector(), name='tensor2vector') - tensor2adc = pe.Node( - interface=mrtrix.Tensor2ApparentDiffusion(), name='tensor2adc') - tensor2fa = pe.Node( - interface=mrtrix.Tensor2FractionalAnisotropy(), name='tensor2fa') - MRconvert_fa = pe.Node(interface=mrtrix.MRConvert(), name='MRconvert_fa') - MRconvert_fa.inputs.extension = 'nii' - """ - - These nodes are used to create a rough brain mask from the b0 image. - The b0 image is extracted from the original diffusion-weighted image, - put through a simple thresholding routine, and smoothed using a 3x3 median filter. - """ - - MRconvert = pe.Node(interface=mrtrix.MRConvert(), name='MRconvert') - MRconvert.inputs.extract_at_axis = 3 - MRconvert.inputs.extract_at_coordinate = [0] - threshold_b0 = pe.Node(interface=mrtrix.Threshold(), name='threshold_b0') - median3d = pe.Node(interface=mrtrix.MedianFilter3D(), name='median3d') - """ - The brain mask is also used to help identify single-fiber voxels. - This is done by passing the brain mask through two erosion steps, - multiplying the remaining mask with the fractional anisotropy map, and - thresholding the result to obtain some highly anisotropic within-brain voxels. - """ - - erode_mask_firstpass = pe.Node( - interface=mrtrix.Erode(), name='erode_mask_firstpass') - erode_mask_secondpass = pe.Node( - interface=mrtrix.Erode(), name='erode_mask_secondpass') - MRmultiply = pe.Node(interface=mrtrix.MRMultiply(), name='MRmultiply') - MRmult_merge = pe.Node(interface=util.Merge(2), name='MRmultiply_merge') - threshold_FA = pe.Node(interface=mrtrix.Threshold(), name='threshold_FA') - threshold_FA.inputs.absolute_threshold_value = 0.7 - """ - For whole-brain tracking we also require a broad white-matter seed mask. - This is created by generating a white matter mask, given a brainmask, and - thresholding it at a reasonably high level. - """ - - bet = pe.Node(interface=fsl.BET(mask=True), name='bet_b0') - gen_WM_mask = pe.Node( - interface=mrtrix.GenerateWhiteMatterMask(), name='gen_WM_mask') - threshold_wmmask = pe.Node( - interface=mrtrix.Threshold(), name='threshold_wmmask') - threshold_wmmask.inputs.absolute_threshold_value = 0.4 - """ - The spherical deconvolution step depends on the estimate of the response function - in the highly anisotropic voxels we obtained above. - - .. warning:: - - For damaged or pathological brains one should take care to lower the maximum harmonic order of these steps. - - """ - - estimateresponse = pe.Node( - interface=mrtrix.EstimateResponseForSH(), name='estimateresponse') - estimateresponse.inputs.maximum_harmonic_order = 6 - csdeconv = pe.Node( - interface=mrtrix.ConstrainedSphericalDeconvolution(), name='csdeconv') - csdeconv.inputs.maximum_harmonic_order = 6 - """ - Finally, we track probabilistically using the orientation distribution functions obtained earlier. - The tracts are then used to generate a tract-density image, and they are also converted to TrackVis format. - """ - - probCSDstreamtrack = pe.Node( - interface=mrtrix.ProbabilisticSphericallyDeconvolutedStreamlineTrack(), - name='probCSDstreamtrack') - probCSDstreamtrack.inputs.inputmodel = 'SD_PROB' - probCSDstreamtrack.inputs.desired_number_of_tracks = 150000 - tracks2prob = pe.Node(interface=mrtrix.Tracks2Prob(), name='tracks2prob') - tracks2prob.inputs.colour = True - MRconvert_tracks2prob = MRconvert_fa.clone(name='MRconvert_tracks2prob') - tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(), name='tck2trk') - trk2tdi = pe.Node(interface=dipy.TrackDensityMap(), name='trk2tdi') - """ - Structural segmentation nodes - ----------------------------- - """ - """ - The following node identifies the transformation between the diffusion-weighted - image and the structural image. This transformation is then applied to the tracts - so that they are in the same space as the regions of interest. - """ - - coregister = pe.Node(interface=fsl.FLIRT(dof=6), name='coregister') - coregister.inputs.cost = ('normmi') - """ - Parcellation is performed given the aparc+aseg image from Freesurfer. - The CMTK Parcellation step subdivides these regions to return a higher-resolution parcellation scheme. - The parcellation used here is entitled "scale500" and returns 1015 regions. - """ - - parcellate = pe.Node(interface=cmtk.Parcellate(), name="Parcellate") - parcellate.inputs.parcellation_name = parcellation_name - """ - The CreateMatrix interface takes in the remapped aparc+aseg image as well as the label dictionary and fiber tracts - and outputs a number of different files. The most important of which is the connectivity network itself, which is stored - as a 'gpickle' and can be loaded using Python's NetworkX package (see CreateMatrix docstring). Also outputted are various - NumPy arrays containing detailed tract information, such as the start and endpoint regions, and statistics on the mean and - standard deviation for the fiber length of each connection. These matrices can be used in the ConnectomeViewer to plot the - specific tracts that connect between user-selected regions. - - Here we choose the Lausanne2008 parcellation scheme, since we are incorporating the CMTK parcellation step. - """ - - creatematrix = pe.Node(interface=cmtk.CreateMatrix(), name="CreateMatrix") - creatematrix.inputs.count_region_intersections = True - """ - Next we define the endpoint of this tutorial, which is the CFFConverter node, as well as a few nodes which use - the Nipype Merge utility. These are useful for passing lists of the files we want packaged in our CFF file. - The inspect.getfile command is used to package this script into the resulting CFF file, so that it is easy to - look back at the processing parameters that were used. - """ - - CFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="CFFConverter") - CFFConverter.inputs.script_files = op.abspath( - inspect.getfile(inspect.currentframe())) - giftiSurfaces = pe.Node(interface=util.Merge(8), name="GiftiSurfaces") - giftiLabels = pe.Node(interface=util.Merge(2), name="GiftiLabels") - niftiVolumes = pe.Node(interface=util.Merge(3), name="NiftiVolumes") - fiberDataArrays = pe.Node(interface=util.Merge(4), name="FiberDataArrays") - """ - We also create a node to calculate several network metrics on our resulting file, and another CFF converter - which will be used to package these networks into a single file. - """ - - networkx = create_networkx_pipeline(name='networkx') - cmats_to_csv = create_cmats_to_csv_pipeline(name='cmats_to_csv') - nfibs_to_csv = pe.Node(interface=misc.Matlab2CSV(), name='nfibs_to_csv') - merge_nfib_csvs = pe.Node( - interface=misc.MergeCSVFiles(), name='merge_nfib_csvs') - merge_nfib_csvs.inputs.extra_column_heading = 'Subject' - merge_nfib_csvs.inputs.out_file = 'fibers.csv' - NxStatsCFFConverter = pe.Node( - interface=cmtk.CFFConverter(), name="NxStatsCFFConverter") - NxStatsCFFConverter.inputs.script_files = op.abspath( - inspect.getfile(inspect.currentframe())) - """ - Connecting the workflow - ======================= - Here we connect our processing pipeline. - """ - """ - Connecting the inputs, FreeSurfer nodes, and conversions - -------------------------------------------------------- - """ - - mapping = pe.Workflow(name='mapping') - """ - First, we connect the input node to the FreeSurfer input nodes. - """ - - mapping.connect([(inputnode_within, FreeSurferSource, [("subjects_dir", - "subjects_dir")])]) - mapping.connect([(inputnode_within, FreeSurferSource, [("subject_id", - "subject_id")])]) - - mapping.connect([(inputnode_within, FreeSurferSourceLH, - [("subjects_dir", "subjects_dir")])]) - mapping.connect([(inputnode_within, FreeSurferSourceLH, [("subject_id", - "subject_id")])]) - - mapping.connect([(inputnode_within, FreeSurferSourceRH, - [("subjects_dir", "subjects_dir")])]) - mapping.connect([(inputnode_within, FreeSurferSourceRH, [("subject_id", - "subject_id")])]) - - mapping.connect([(inputnode_within, parcellate, [("subjects_dir", - "subjects_dir")])]) - mapping.connect([(inputnode_within, parcellate, [("subject_id", - "subject_id")])]) - mapping.connect([(parcellate, mri_convert_ROI_scale500, [('roi_file', - 'in_file')])]) - """ - Nifti conversion for subject's stripped brain image from Freesurfer: - """ - - mapping.connect([(FreeSurferSource, mri_convert_Brain, [('brain', - 'in_file')])]) - """ - Surface conversions to GIFTI (pial, white, inflated, and sphere for both hemispheres) - """ - - mapping.connect([(FreeSurferSourceLH, mris_convertLH, [('pial', - 'in_file')])]) - mapping.connect([(FreeSurferSourceRH, mris_convertRH, [('pial', - 'in_file')])]) - mapping.connect([(FreeSurferSourceLH, mris_convertLHwhite, [('white', - 'in_file')])]) - mapping.connect([(FreeSurferSourceRH, mris_convertRHwhite, [('white', - 'in_file')])]) - mapping.connect([(FreeSurferSourceLH, mris_convertLHinflated, - [('inflated', 'in_file')])]) - mapping.connect([(FreeSurferSourceRH, mris_convertRHinflated, - [('inflated', 'in_file')])]) - mapping.connect([(FreeSurferSourceLH, mris_convertLHsphere, - [('sphere', 'in_file')])]) - mapping.connect([(FreeSurferSourceRH, mris_convertRHsphere, - [('sphere', 'in_file')])]) - """ - The annotation files are converted using the pial surface as a map via the MRIsConvert interface. - One of the functions defined earlier is used to select the lh.aparc.annot and rh.aparc.annot files - specifically (rather than e.g. rh.aparc.a2009s.annot) from the output list given by the FreeSurferSource. - """ - - mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, - [('pial', 'in_file')])]) - mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, - [('pial', 'in_file')])]) - mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, - [(('annot', select_aparc_annot), 'annot_file')])]) - mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, - [(('annot', select_aparc_annot), 'annot_file')])]) - """ - Diffusion Processing - -------------------- - Now we connect the tensor computations: - """ - - mapping.connect([(inputnode_within, fsl2mrtrix, [("bvecs", "bvec_file"), - ("bvals", "bval_file")])]) - mapping.connect([(inputnode_within, eddycorrect, [("dwi", - "inputnode.in_file")])]) - mapping.connect([(eddycorrect, dwi2tensor, [("outputnode.eddy_corrected", - "in_file")])]) - mapping.connect([(fsl2mrtrix, dwi2tensor, [("encoding_file", - "encoding_file")])]) - - mapping.connect([ - (dwi2tensor, tensor2vector, [['tensor', 'in_file']]), - (dwi2tensor, tensor2adc, [['tensor', 'in_file']]), - (dwi2tensor, tensor2fa, [['tensor', 'in_file']]), - ]) - mapping.connect([(tensor2fa, MRmult_merge, [("FA", "in1")])]) - mapping.connect([(tensor2fa, MRconvert_fa, [("FA", "in_file")])]) - """ - - This block creates the rough brain mask to be multiplied, mulitplies it with the - fractional anisotropy image, and thresholds it to get the single-fiber voxels. - """ - - mapping.connect([(eddycorrect, MRconvert, [("outputnode.eddy_corrected", - "in_file")])]) - mapping.connect([(MRconvert, threshold_b0, [("converted", "in_file")])]) - mapping.connect([(threshold_b0, median3d, [("out_file", "in_file")])]) - mapping.connect([(median3d, erode_mask_firstpass, [("out_file", - "in_file")])]) - mapping.connect([(erode_mask_firstpass, erode_mask_secondpass, - [("out_file", "in_file")])]) - mapping.connect([(erode_mask_secondpass, MRmult_merge, [("out_file", - "in2")])]) - mapping.connect([(MRmult_merge, MRmultiply, [("out", "in_files")])]) - mapping.connect([(MRmultiply, threshold_FA, [("out_file", "in_file")])]) - """ - Here the thresholded white matter mask is created for seeding the tractography. - """ - - mapping.connect([(eddycorrect, bet, [("outputnode.eddy_corrected", - "in_file")])]) - mapping.connect([(eddycorrect, gen_WM_mask, [("outputnode.eddy_corrected", - "in_file")])]) - mapping.connect([(bet, gen_WM_mask, [("mask_file", "binary_mask")])]) - mapping.connect([(fsl2mrtrix, gen_WM_mask, [("encoding_file", - "encoding_file")])]) - mapping.connect([(gen_WM_mask, threshold_wmmask, [("WMprobabilitymap", - "in_file")])]) - """ - Next we estimate the fiber response distribution. - """ - - mapping.connect([(eddycorrect, estimateresponse, - [("outputnode.eddy_corrected", "in_file")])]) - mapping.connect([(fsl2mrtrix, estimateresponse, [("encoding_file", - "encoding_file")])]) - mapping.connect([(threshold_FA, estimateresponse, [("out_file", - "mask_image")])]) - """ - Run constrained spherical deconvolution. - """ - - mapping.connect([(eddycorrect, csdeconv, [("outputnode.eddy_corrected", - "in_file")])]) - mapping.connect([(gen_WM_mask, csdeconv, [("WMprobabilitymap", - "mask_image")])]) - mapping.connect([(estimateresponse, csdeconv, [("response", - "response_file")])]) - mapping.connect([(fsl2mrtrix, csdeconv, [("encoding_file", - "encoding_file")])]) - """ - Connect the tractography and compute the tract density image. - """ - - mapping.connect([(threshold_wmmask, probCSDstreamtrack, [("out_file", - "seed_file")])]) - mapping.connect([(csdeconv, probCSDstreamtrack, - [("spherical_harmonics_image", "in_file")])]) - mapping.connect([(probCSDstreamtrack, tracks2prob, [("tracked", - "in_file")])]) - mapping.connect([(eddycorrect, tracks2prob, [("outputnode.eddy_corrected", - "template_file")])]) - mapping.connect([(tracks2prob, MRconvert_tracks2prob, [("tract_image", - "in_file")])]) - """ - Structural Processing - --------------------- - First, we coregister the diffusion image to the structural image - """ - - mapping.connect([(eddycorrect, coregister, [("outputnode.eddy_corrected", - "in_file")])]) - mapping.connect([(mri_convert_Brain, coregister, [('out_file', - 'reference')])]) - """ - The MRtrix-tracked fibers are converted to TrackVis format (with voxel and data dimensions grabbed from the DWI). - The connectivity matrix is created with the transformed .trk fibers and the parcellation file. - """ - - mapping.connect([(eddycorrect, tck2trk, [("outputnode.eddy_corrected", - "image_file")])]) - mapping.connect([(mri_convert_Brain, tck2trk, - [("out_file", "registration_image_file")])]) - mapping.connect([(coregister, tck2trk, [("out_matrix_file", - "matrix_file")])]) - mapping.connect([(probCSDstreamtrack, tck2trk, [("tracked", "in_file")])]) - mapping.connect([(tck2trk, creatematrix, [("out_file", "tract_file")])]) - mapping.connect([(tck2trk, trk2tdi, [("out_file", "in_file")])]) - mapping.connect(inputnode_within, 'resolution_network_file', creatematrix, - 'resolution_network_file') - mapping.connect([(inputnode_within, creatematrix, [("subject_id", - "out_matrix_file")])]) - mapping.connect([(inputnode_within, creatematrix, - [("subject_id", "out_matrix_mat_file")])]) - mapping.connect([(parcellate, creatematrix, [("roi_file", "roi_file")])]) - """ - The merge nodes defined earlier are used here to create lists of the files which are - destined for the CFFConverter. - """ - - mapping.connect([(mris_convertLH, giftiSurfaces, [("converted", "in1")])]) - mapping.connect([(mris_convertRH, giftiSurfaces, [("converted", "in2")])]) - mapping.connect([(mris_convertLHwhite, giftiSurfaces, [("converted", - "in3")])]) - mapping.connect([(mris_convertRHwhite, giftiSurfaces, [("converted", - "in4")])]) - mapping.connect([(mris_convertLHinflated, giftiSurfaces, [("converted", - "in5")])]) - mapping.connect([(mris_convertRHinflated, giftiSurfaces, [("converted", - "in6")])]) - mapping.connect([(mris_convertLHsphere, giftiSurfaces, [("converted", - "in7")])]) - mapping.connect([(mris_convertRHsphere, giftiSurfaces, [("converted", - "in8")])]) - - mapping.connect([(mris_convertLHlabels, giftiLabels, [("converted", - "in1")])]) - mapping.connect([(mris_convertRHlabels, giftiLabels, [("converted", - "in2")])]) - - mapping.connect([(parcellate, niftiVolumes, [("roi_file", "in1")])]) - mapping.connect([(eddycorrect, niftiVolumes, [("outputnode.eddy_corrected", - "in2")])]) - mapping.connect([(mri_convert_Brain, niftiVolumes, [("out_file", "in3")])]) - - mapping.connect([(creatematrix, fiberDataArrays, [("endpoint_file", - "in1")])]) - mapping.connect([(creatematrix, fiberDataArrays, [("endpoint_file_mm", - "in2")])]) - mapping.connect([(creatematrix, fiberDataArrays, [("fiber_length_file", - "in3")])]) - mapping.connect([(creatematrix, fiberDataArrays, [("fiber_label_file", - "in4")])]) - """ - This block actually connects the merged lists to the CFF converter. We pass the surfaces - and volumes that are to be included, as well as the tracts and the network itself. The currently - running pipeline (dmri_connectivity_advanced.py) is also scraped and included in the CFF file. This - makes it easy for the user to examine the entire processing pathway used to generate the end - product. - """ - - mapping.connect([(giftiSurfaces, CFFConverter, [("out", - "gifti_surfaces")])]) - mapping.connect([(giftiLabels, CFFConverter, [("out", "gifti_labels")])]) - mapping.connect([(creatematrix, CFFConverter, [("matrix_files", - "gpickled_networks")])]) - mapping.connect([(niftiVolumes, CFFConverter, [("out", "nifti_volumes")])]) - mapping.connect([(fiberDataArrays, CFFConverter, [("out", "data_files")])]) - mapping.connect([(creatematrix, CFFConverter, [("filtered_tractography", - "tract_files")])]) - mapping.connect([(inputnode_within, CFFConverter, [("subject_id", - "title")])]) - """ - The graph theoretical metrics which have been generated are placed into another CFF file. - """ - - mapping.connect([(inputnode_within, networkx, - [("subject_id", "inputnode.extra_field")])]) - mapping.connect([(creatematrix, networkx, [("intersection_matrix_file", - "inputnode.network_file")])]) - - mapping.connect([(networkx, NxStatsCFFConverter, - [("outputnode.network_files", "gpickled_networks")])]) - mapping.connect([(giftiSurfaces, NxStatsCFFConverter, - [("out", "gifti_surfaces")])]) - mapping.connect([(giftiLabels, NxStatsCFFConverter, [("out", - "gifti_labels")])]) - mapping.connect([(niftiVolumes, NxStatsCFFConverter, [("out", - "nifti_volumes")])]) - mapping.connect([(fiberDataArrays, NxStatsCFFConverter, [("out", - "data_files")])]) - mapping.connect([(inputnode_within, NxStatsCFFConverter, [("subject_id", - "title")])]) - - mapping.connect([(inputnode_within, cmats_to_csv, - [("subject_id", "inputnode.extra_field")])]) - mapping.connect([(creatematrix, cmats_to_csv, - [("matlab_matrix_files", - "inputnode.matlab_matrix_files")])]) - mapping.connect([(creatematrix, nfibs_to_csv, [("stats_file", - "in_file")])]) - mapping.connect([(nfibs_to_csv, merge_nfib_csvs, [("csv_files", - "in_files")])]) - mapping.connect([(inputnode_within, merge_nfib_csvs, [("subject_id", - "extra_field")])]) - """ - Create a higher-level workflow - -------------------------------------- - Finally, we create another higher-level workflow to connect our mapping workflow with the info and datagrabbing nodes - declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding - their names to the subject list and their data to the proper folders. - """ - - inputnode = pe.Node( - interface=util.IdentityInterface( - fields=["subject_id", "dwi", "bvecs", "bvals", "subjects_dir"]), - name="inputnode") - - outputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - "fa", "struct", "tracts", "tracks2prob", "connectome", - "nxstatscff", "nxmatlab", "nxcsv", "fiber_csv", "cmatrices_csv", - "nxmergedcsv", "cmatrix", "networks", "filtered_tracts", "rois", - "odfs", "tdi", "mean_fiber_length", "median_fiber_length", - "fiber_length_std" - ]), - name="outputnode") - - connectivity = pe.Workflow(name="connectivity") - connectivity.base_output_dir = name - connectivity.base_dir = name - - connectivity.connect([ - (inputnode, mapping, - [("dwi", "inputnode_within.dwi"), ("bvals", "inputnode_within.bvals"), - ("bvecs", "inputnode_within.bvecs"), ("subject_id", - "inputnode_within.subject_id"), - ("subjects_dir", "inputnode_within.subjects_dir")]) - ]) - - connectivity.connect( - [(mapping, outputnode, - [("tck2trk.out_file", - "tracts"), ("CFFConverter.connectome_file", - "connectome"), ("NxStatsCFFConverter.connectome_file", - "nxstatscff"), - ("CreateMatrix.matrix_mat_file", - "cmatrix"), ("CreateMatrix.mean_fiber_length_matrix_mat_file", - "mean_fiber_length"), - ("CreateMatrix.median_fiber_length_matrix_mat_file", - "median_fiber_length"), - ("CreateMatrix.fiber_length_std_matrix_mat_file", - "fiber_length_std"), ("CreateMatrix.matrix_files", "networks"), - ("CreateMatrix.filtered_tractographies", - "filtered_tracts"), ("merge_nfib_csvs.csv_file", "fiber_csv"), - ("mri_convert_ROI_scale500.out_file", - "rois"), ("trk2tdi.out_file", - "tdi"), ("csdeconv.spherical_harmonics_image", "odfs"), - ("mri_convert_Brain.out_file", - "struct"), ("MRconvert_fa.converted", - "fa"), ("MRconvert_tracks2prob.converted", - "tracks2prob")])]) - - connectivity.connect([(cmats_to_csv, outputnode, [("outputnode.csv_file", - "cmatrices_csv")])]) - connectivity.connect([(networkx, outputnode, [("outputnode.csv_files", - "nxcsv")])]) - return connectivity diff --git a/nipype/workflows/dmri/mrtrix/diffusion.py b/nipype/workflows/dmri/mrtrix/diffusion.py deleted file mode 100644 index a4305bf04e..0000000000 --- a/nipype/workflows/dmri/mrtrix/diffusion.py +++ /dev/null @@ -1,186 +0,0 @@ -# -*- coding: utf-8 -*- -from ....interfaces import utility as util # utility -from ....pipeline import engine as pe # pypeline engine -from ....interfaces import fsl as fsl -from ....interfaces import mrtrix as mrtrix - - -def create_mrtrix_dti_pipeline(name="dtiproc", - tractography_type='probabilistic'): - """Creates a pipeline that does the same diffusion processing as in the - :doc:`../../users/examples/dmri_mrtrix_dti` example script. Given a diffusion-weighted image, - b-values, and b-vectors, the workflow will return the tractography - computed from spherical deconvolution and probabilistic streamline tractography - - Example - ------- - - >>> dti = create_mrtrix_dti_pipeline("mrtrix_dti") - >>> dti.inputs.inputnode.dwi = 'data.nii' - >>> dti.inputs.inputnode.bvals = 'bvals' - >>> dti.inputs.inputnode.bvecs = 'bvecs' - >>> dti.run() # doctest: +SKIP - - Inputs:: - - inputnode.dwi - inputnode.bvecs - inputnode.bvals - - Outputs:: - - outputnode.fa - outputnode.tdi - outputnode.tracts_tck - outputnode.tracts_trk - outputnode.csdeconv - - """ - - inputnode = pe.Node( - interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), - name="inputnode") - - bet = pe.Node(interface=fsl.BET(), name="bet") - bet.inputs.mask = True - - fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(), name='fsl2mrtrix') - fsl2mrtrix.inputs.invert_y = True - - dwi2tensor = pe.Node(interface=mrtrix.DWI2Tensor(), name='dwi2tensor') - - tensor2vector = pe.Node( - interface=mrtrix.Tensor2Vector(), name='tensor2vector') - tensor2adc = pe.Node( - interface=mrtrix.Tensor2ApparentDiffusion(), name='tensor2adc') - tensor2fa = pe.Node( - interface=mrtrix.Tensor2FractionalAnisotropy(), name='tensor2fa') - - erode_mask_firstpass = pe.Node( - interface=mrtrix.Erode(), name='erode_mask_firstpass') - erode_mask_secondpass = pe.Node( - interface=mrtrix.Erode(), name='erode_mask_secondpass') - - threshold_b0 = pe.Node(interface=mrtrix.Threshold(), name='threshold_b0') - - threshold_FA = pe.Node(interface=mrtrix.Threshold(), name='threshold_FA') - threshold_FA.inputs.absolute_threshold_value = 0.7 - - threshold_wmmask = pe.Node( - interface=mrtrix.Threshold(), name='threshold_wmmask') - threshold_wmmask.inputs.absolute_threshold_value = 0.4 - - MRmultiply = pe.Node(interface=mrtrix.MRMultiply(), name='MRmultiply') - MRmult_merge = pe.Node(interface=util.Merge(2), name='MRmultiply_merge') - - median3d = pe.Node(interface=mrtrix.MedianFilter3D(), name='median3D') - - MRconvert = pe.Node(interface=mrtrix.MRConvert(), name='MRconvert') - MRconvert.inputs.extract_at_axis = 3 - MRconvert.inputs.extract_at_coordinate = [0] - - csdeconv = pe.Node( - interface=mrtrix.ConstrainedSphericalDeconvolution(), name='csdeconv') - - gen_WM_mask = pe.Node( - interface=mrtrix.GenerateWhiteMatterMask(), name='gen_WM_mask') - - estimateresponse = pe.Node( - interface=mrtrix.EstimateResponseForSH(), name='estimateresponse') - - if tractography_type == 'probabilistic': - CSDstreamtrack = pe.Node( - interface=mrtrix. - ProbabilisticSphericallyDeconvolutedStreamlineTrack(), - name='CSDstreamtrack') - else: - CSDstreamtrack = pe.Node( - interface=mrtrix.SphericallyDeconvolutedStreamlineTrack(), - name='CSDstreamtrack') - CSDstreamtrack.inputs.desired_number_of_tracks = 15000 - - tracks2prob = pe.Node(interface=mrtrix.Tracks2Prob(), name='tracks2prob') - tracks2prob.inputs.colour = True - tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(), name='tck2trk') - - workflow = pe.Workflow(name=name) - workflow.base_output_dir = name - - workflow.connect([(inputnode, fsl2mrtrix, [("bvecs", "bvec_file"), - ("bvals", "bval_file")])]) - workflow.connect([(inputnode, dwi2tensor, [("dwi", "in_file")])]) - workflow.connect([(fsl2mrtrix, dwi2tensor, [("encoding_file", - "encoding_file")])]) - - workflow.connect([ - (dwi2tensor, tensor2vector, [['tensor', 'in_file']]), - (dwi2tensor, tensor2adc, [['tensor', 'in_file']]), - (dwi2tensor, tensor2fa, [['tensor', 'in_file']]), - ]) - - workflow.connect([(inputnode, MRconvert, [("dwi", "in_file")])]) - workflow.connect([(MRconvert, threshold_b0, [("converted", "in_file")])]) - workflow.connect([(threshold_b0, median3d, [("out_file", "in_file")])]) - workflow.connect([(median3d, erode_mask_firstpass, [("out_file", - "in_file")])]) - workflow.connect([(erode_mask_firstpass, erode_mask_secondpass, - [("out_file", "in_file")])]) - - workflow.connect([(tensor2fa, MRmult_merge, [("FA", "in1")])]) - workflow.connect([(erode_mask_secondpass, MRmult_merge, [("out_file", - "in2")])]) - workflow.connect([(MRmult_merge, MRmultiply, [("out", "in_files")])]) - workflow.connect([(MRmultiply, threshold_FA, [("out_file", "in_file")])]) - workflow.connect([(threshold_FA, estimateresponse, [("out_file", - "mask_image")])]) - - workflow.connect([(inputnode, bet, [("dwi", "in_file")])]) - workflow.connect([(inputnode, gen_WM_mask, [("dwi", "in_file")])]) - workflow.connect([(bet, gen_WM_mask, [("mask_file", "binary_mask")])]) - workflow.connect([(fsl2mrtrix, gen_WM_mask, [("encoding_file", - "encoding_file")])]) - - workflow.connect([(inputnode, estimateresponse, [("dwi", "in_file")])]) - workflow.connect([(fsl2mrtrix, estimateresponse, [("encoding_file", - "encoding_file")])]) - - workflow.connect([(inputnode, csdeconv, [("dwi", "in_file")])]) - workflow.connect([(gen_WM_mask, csdeconv, [("WMprobabilitymap", - "mask_image")])]) - workflow.connect([(estimateresponse, csdeconv, [("response", - "response_file")])]) - workflow.connect([(fsl2mrtrix, csdeconv, [("encoding_file", - "encoding_file")])]) - - workflow.connect([(gen_WM_mask, threshold_wmmask, [("WMprobabilitymap", - "in_file")])]) - workflow.connect([(threshold_wmmask, CSDstreamtrack, [("out_file", - "seed_file")])]) - workflow.connect([(csdeconv, CSDstreamtrack, [("spherical_harmonics_image", - "in_file")])]) - - if tractography_type == 'probabilistic': - workflow.connect([(CSDstreamtrack, tracks2prob, [("tracked", - "in_file")])]) - workflow.connect([(inputnode, tracks2prob, [("dwi", - "template_file")])]) - - workflow.connect([(CSDstreamtrack, tck2trk, [("tracked", "in_file")])]) - workflow.connect([(inputnode, tck2trk, [("dwi", "image_file")])]) - - output_fields = ["fa", "tracts_trk", "csdeconv", "tracts_tck"] - if tractography_type == 'probabilistic': - output_fields.append("tdi") - outputnode = pe.Node( - interface=util.IdentityInterface(fields=output_fields), - name="outputnode") - - workflow.connect([(CSDstreamtrack, outputnode, - [("tracked", "tracts_tck")]), (csdeconv, outputnode, [ - ("spherical_harmonics_image", "csdeconv") - ]), (tensor2fa, outputnode, [("FA", "fa")]), - (tck2trk, outputnode, [("out_file", "tracts_trk")])]) - if tractography_type == 'probabilistic': - workflow.connect([(tracks2prob, outputnode, [("tract_image", "tdi")])]) - - return workflow diff --git a/nipype/workflows/dmri/mrtrix/group_connectivity.py b/nipype/workflows/dmri/mrtrix/group_connectivity.py deleted file mode 100644 index 10d961a18c..0000000000 --- a/nipype/workflows/dmri/mrtrix/group_connectivity.py +++ /dev/null @@ -1,139 +0,0 @@ -# -*- coding: utf-8 -*- -import os.path as op -import warnings - -from ....interfaces import io as nio # Data i/o -from ....interfaces import utility as util # utility -from ....interfaces import cmtk as cmtk -from ....algorithms import misc as misc -from ....pipeline import engine as pe # pipeline engine -from ....utils.misc import package_check -from .connectivity_mapping import create_connectivity_pipeline - -try: - package_check('cmp') -except Exception as e: - warnings.warn('cmp not installed') -else: - import cmp - - -def create_group_connectivity_pipeline(group_list, - group_id, - data_dir, - subjects_dir, - output_dir, - template_args_dict=0): - """Creates a pipeline that performs MRtrix structural connectivity processing - on groups of subjects. Given a diffusion-weighted image, and text files containing - the associated b-values and b-vectors, the workflow will return each subjects' connectomes - in a Connectome File Format (CFF) file, for use in Connectome Viewer (http://www.cmtk.org). - - Example - ------- - - >>> import nipype.interfaces.freesurfer as fs - >>> import nipype.workflows.dmri.mrtrix.group_connectivity as groupwork - >>> import cmp # doctest: +SKIP - >>> from nipype.testing import example_data - >>> subjects_dir = '.' - >>> data_dir = '.' - >>> output_dir = '.' - >>> fs.FSCommand.set_default_subjects_dir(subjects_dir) - >>> group_list = {} - >>> group_list['group1'] = ['subj1', 'subj2'] - >>> group_list['group2'] = ['subj3', 'subj4'] - >>> template_args = dict(dwi=[['subject_id', 'dwi']], bvecs=[['subject_id', 'bvecs']], bvals=[['subject_id', 'bvals']]) - >>> group_id = 'group1' - >>> l1pipeline = groupwork.create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, template_args) - >>> parcellation_name = 'scale500' - >>> l1pipeline.inputs.connectivity.mapping.Parcellate.parcellation_name = parcellation_name - >>> cmp_config = cmp.configuration.PipelineConfiguration() # doctest: +SKIP - >>> cmp_config.parcellation_scheme = "Lausanne2008" # doctest: +SKIP - >>> l1pipeline.inputs.connectivity.mapping.inputnode_within.resolution_network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml'] # doctest: +SKIP - >>> l1pipeline.run() # doctest: +SKIP - - - Inputs:: - - group_list: Dictionary of subject lists, keyed by group name - group_id: String containing the group name - data_dir: Path to the data directory - subjects_dir: Path to the Freesurfer 'subjects' directory - output_dir: Path for the output files - template_args_dict: Dictionary of template arguments for the connectivity pipeline datasource - e.g. info = dict(dwi=[['subject_id', 'dwi']], - bvecs=[['subject_id','bvecs']], - bvals=[['subject_id','bvals']]) - """ - group_infosource = pe.Node( - interface=util.IdentityInterface(fields=['group_id']), - name="group_infosource") - group_infosource.inputs.group_id = group_id - subject_list = group_list[group_id] - subj_infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), - name="subj_infosource") - subj_infosource.iterables = ('subject_id', subject_list) - - if template_args_dict == 0: - info = dict( - dwi=[['subject_id', 'dwi']], - bvecs=[['subject_id', 'bvecs']], - bvals=[['subject_id', 'bvals']]) - else: - info = template_args_dict - - datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=list(info.keys())), - name='datasource') - - datasource.inputs.template = "%s/%s" - datasource.inputs.base_directory = data_dir - datasource.inputs.field_template = dict(dwi='%s/%s.nii') - datasource.inputs.template_args = info - datasource.inputs.sort_filelist = True - """ - Create a connectivity mapping workflow - """ - conmapper = create_connectivity_pipeline("nipype_conmap") - conmapper.inputs.inputnode.subjects_dir = subjects_dir - conmapper.base_dir = op.abspath('conmapper') - - datasink = pe.Node(interface=nio.DataSink(), name="datasink") - datasink.inputs.base_directory = output_dir - datasink.inputs.container = group_id - - l1pipeline = pe.Workflow(name="l1pipeline_" + group_id) - l1pipeline.base_dir = output_dir - l1pipeline.base_output_dir = group_id - l1pipeline.connect([(subj_infosource, conmapper, - [('subject_id', 'inputnode.subject_id')])]) - l1pipeline.connect([(subj_infosource, datasource, [('subject_id', - 'subject_id')])]) - l1pipeline.connect([(datasource, conmapper, [ - ("dwi", "inputnode.dwi"), - ("bvals", "inputnode.bvals"), - ("bvecs", "inputnode.bvecs"), - ])]) - l1pipeline.connect([(conmapper, datasink, [ - ("outputnode.connectome", "@l1output.cff"), - ("outputnode.nxstatscff", "@l1output.nxstatscff"), - ("outputnode.nxmatlab", "@l1output.nxmatlab"), - ("outputnode.nxcsv", "@l1output.nxcsv"), - ("outputnode.fiber_csv", "@l1output.fiber_csv"), - ("outputnode.cmatrices_csv", "@l1output.cmatrices_csv"), - ("outputnode.fa", "@l1output.fa"), - ("outputnode.filtered_tracts", "@l1output.filtered_tracts"), - ("outputnode.cmatrix", "@l1output.cmatrix"), - ("outputnode.rois", "@l1output.rois"), - ("outputnode.odfs", "@l1output.odfs"), - ("outputnode.struct", "@l1output.struct"), - ("outputnode.networks", "@l1output.networks"), - ("outputnode.mean_fiber_length", "@l1output.mean_fiber_length"), - ("outputnode.fiber_length_std", "@l1output.fiber_length_std"), - ])]) - l1pipeline.connect([(group_infosource, datasink, [('group_id', - '@group_id')])]) - return l1pipeline diff --git a/nipype/workflows/fmri/__init__.py b/nipype/workflows/fmri/__init__.py deleted file mode 100644 index 5523a0c412..0000000000 --- a/nipype/workflows/fmri/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from . import fsl, spm diff --git a/nipype/workflows/fmri/fsl/__init__.py b/nipype/workflows/fmri/fsl/__init__.py deleted file mode 100644 index 9f6ca78ee8..0000000000 --- a/nipype/workflows/fmri/fsl/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# -*- coding: utf-8 -*- -from .preprocess import (create_susan_smooth, create_fsl_fs_preproc, - create_parallelfeat_preproc, create_featreg_preproc, - create_reg_workflow) -from .estimate import create_modelfit_workflow, create_fixed_effects_flow - -# backwards compatibility -from ...rsfmri.fsl.resting import create_resting_preproc diff --git a/nipype/workflows/fmri/fsl/estimate.py b/nipype/workflows/fmri/fsl/estimate.py deleted file mode 100644 index 638e422bfc..0000000000 --- a/nipype/workflows/fmri/fsl/estimate.py +++ /dev/null @@ -1,298 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -from builtins import range - -from ....interfaces import fsl as fsl # fsl -from ....interfaces import utility as util # utility -from ....pipeline import engine as pe # pypeline engine - -from .... import LooseVersion - - -def create_modelfit_workflow(name='modelfit', f_contrasts=False): - """Create an FSL individual modelfitting workflow - - Example - ------- - - >>> modelfit = create_modelfit_workflow() - >>> modelfit.base_dir = '.' - >>> info = dict() - >>> modelfit.inputs.inputspec.session_info = info - >>> modelfit.inputs.inputspec.interscan_interval = 3. - >>> modelfit.inputs.inputspec.film_threshold = 1000 - >>> modelfit.run() #doctest: +SKIP - - Inputs:: - - inputspec.session_info : info generated by modelgen.SpecifyModel - inputspec.interscan_interval : interscan interval - inputspec.contrasts : list of contrasts - inputspec.film_threshold : image threshold for FILM estimation - inputspec.model_serial_correlations - inputspec.bases - - Outputs:: - - outputspec.copes - outputspec.varcopes - outputspec.dof_file - outputspec.pfiles - outputspec.zfiles - outputspec.parameter_estimates - """ - - version = 0 - if fsl.Info.version() and \ - LooseVersion(fsl.Info.version()) > LooseVersion('5.0.6'): - version = 507 - - modelfit = pe.Workflow(name=name) - """ - Create the nodes - """ - - inputspec = pe.Node( - util.IdentityInterface(fields=[ - 'session_info', 'interscan_interval', 'contrasts', - 'film_threshold', 'functional_data', 'bases', - 'model_serial_correlations' - ]), - name='inputspec') - level1design = pe.Node(interface=fsl.Level1Design(), name="level1design") - modelgen = pe.MapNode( - interface=fsl.FEATModel(), - name='modelgen', - iterfield=['fsf_file', 'ev_files']) - if version < 507: - modelestimate = pe.MapNode( - interface=fsl.FILMGLS(smooth_autocorr=True, mask_size=5), - name='modelestimate', - iterfield=['design_file', 'in_file']) - else: - if f_contrasts: - iterfield = ['design_file', 'in_file', 'tcon_file', 'fcon_file'] - else: - iterfield = ['design_file', 'in_file', 'tcon_file'] - modelestimate = pe.MapNode( - interface=fsl.FILMGLS(smooth_autocorr=True, mask_size=5), - name='modelestimate', - iterfield=iterfield) - - if version < 507: - if f_contrasts: - iterfield = [ - 'tcon_file', 'fcon_file', 'param_estimates', 'sigmasquareds', - 'corrections', 'dof_file' - ] - else: - iterfield = [ - 'tcon_file', 'param_estimates', 'sigmasquareds', 'corrections', - 'dof_file' - ] - conestimate = pe.MapNode( - interface=fsl.ContrastMgr(), - name='conestimate', - iterfield=[ - 'tcon_file', 'fcon_file', 'param_estimates', 'sigmasquareds', - 'corrections', 'dof_file' - ]) - - if f_contrasts: - iterfield = ['in1', 'in2'] - else: - iterfield = ['in1'] - merge_contrasts = pe.MapNode( - interface=util.Merge(2), name='merge_contrasts', iterfield=iterfield) - ztopval = pe.MapNode( - interface=fsl.ImageMaths(op_string='-ztop', suffix='_pval'), - nested=True, - name='ztop', - iterfield=['in_file']) - outputspec = pe.Node( - util.IdentityInterface(fields=[ - 'copes', 'varcopes', 'dof_file', 'pfiles', 'zfiles', - 'parameter_estimates' - ]), - name='outputspec') - """ - Setup the connections - """ - - modelfit.connect([ - (inputspec, level1design, - [('interscan_interval', 'interscan_interval'), - ('session_info', 'session_info'), ('contrasts', 'contrasts'), - ('bases', 'bases'), ('model_serial_correlations', - 'model_serial_correlations')]), - (inputspec, modelestimate, [('film_threshold', 'threshold'), - ('functional_data', 'in_file')]), - (level1design, modelgen, [('fsf_files', 'fsf_file'), ('ev_files', - 'ev_files')]), - (modelgen, modelestimate, [('design_file', 'design_file')]), - (merge_contrasts, ztopval, [('out', 'in_file')]), - (ztopval, outputspec, [('out_file', 'pfiles')]), - (merge_contrasts, outputspec, [('out', 'zfiles')]), - (modelestimate, outputspec, [('param_estimates', - 'parameter_estimates'), ('dof_file', - 'dof_file')]), - ]) - if version < 507: - modelfit.connect([ - (modelgen, conestimate, [('con_file', 'tcon_file'), - ('fcon_file', 'fcon_file')]), - (modelestimate, conestimate, - [('param_estimates', 'param_estimates'), ('sigmasquareds', - 'sigmasquareds'), - ('corrections', 'corrections'), ('dof_file', 'dof_file')]), - (conestimate, merge_contrasts, [('zstats', 'in1'), ('zfstats', - 'in2')]), - (conestimate, outputspec, [('copes', 'copes'), ('varcopes', - 'varcopes')]), - ]) - else: - modelfit.connect([ - (modelgen, modelestimate, [('con_file', 'tcon_file'), - ('fcon_file', 'fcon_file')]), - (modelestimate, merge_contrasts, [('zstats', 'in1'), ('zfstats', - 'in2')]), - (modelestimate, outputspec, [('copes', 'copes'), ('varcopes', - 'varcopes')]), - ]) - return modelfit - - -def create_overlay_workflow(name='overlay'): - """Setup overlay workflow - """ - - overlay = pe.Workflow(name='overlay') - overlaystats = pe.MapNode( - interface=fsl.Overlay(), name="overlaystats", iterfield=['stat_image']) - overlaystats.inputs.show_negative_stats = True - overlaystats.inputs.auto_thresh_bg = True - - slicestats = pe.MapNode( - interface=fsl.Slicer(), name="slicestats", iterfield=['in_file']) - slicestats.inputs.all_axial = True - slicestats.inputs.image_width = 512 - - overlay.connect(overlaystats, 'out_file', slicestats, 'in_file') - return overlay - - -def create_fixed_effects_flow(name='fixedfx'): - """Create a fixed-effects workflow - - This workflow is used to combine registered copes and varcopes across runs - for an individual subject - - Example - ------- - - >>> fixedfx = create_fixed_effects_flow() - >>> fixedfx.base_dir = '.' - >>> fixedfx.inputs.inputspec.copes = [['cope1run1.nii.gz', 'cope1run2.nii.gz'], ['cope2run1.nii.gz', 'cope2run2.nii.gz']] # per contrast - >>> fixedfx.inputs.inputspec.varcopes = [['varcope1run1.nii.gz', 'varcope1run2.nii.gz'], ['varcope2run1.nii.gz', 'varcope2run2.nii.gz']] # per contrast - >>> fixedfx.inputs.inputspec.dof_files = ['dofrun1', 'dofrun2'] # per run - >>> fixedfx.run() #doctest: +SKIP - - Inputs:: - - inputspec.copes : list of list of cope files (one list per contrast) - inputspec.varcopes : list of list of varcope files (one list per - contrast) - inputspec.dof_files : degrees of freedom files for each run - - Outputs:: - - outputspec.res4d : 4d residual time series - outputspec.copes : contrast parameter estimates - outputspec.varcopes : variance of contrast parameter estimates - outputspec.zstats : z statistics of contrasts - outputspec.tstats : t statistics of contrasts - """ - - fixed_fx = pe.Workflow(name=name) - - inputspec = pe.Node( - util.IdentityInterface(fields=['copes', 'varcopes', 'dof_files']), - name='inputspec') - """ - Use :class:`nipype.interfaces.fsl.Merge` to merge the copes and - varcopes for each condition - """ - - copemerge = pe.MapNode( - interface=fsl.Merge(dimension='t'), - iterfield=['in_files'], - name="copemerge") - - varcopemerge = pe.MapNode( - interface=fsl.Merge(dimension='t'), - iterfield=['in_files'], - name="varcopemerge") - """ - Use :class:`nipype.interfaces.fsl.L2Model` to generate subject and condition - specific level 2 model design files - """ - - level2model = pe.Node(interface=fsl.L2Model(), name='l2model') - """ - Use :class:`nipype.interfaces.fsl.FLAMEO` to estimate a second level model - """ - - flameo = pe.MapNode( - interface=fsl.FLAMEO(run_mode='fe'), - name="flameo", - iterfield=['cope_file', 'var_cope_file']) - - def get_dofvolumes(dof_files, cope_files): - import os - import nibabel as nb - import numpy as np - img = nb.load(cope_files[0]) - if len(img.shape) > 3: - out_data = np.zeros(img.shape) - else: - out_data = np.zeros(list(img.shape) + [1]) - for i in range(out_data.shape[-1]): - dof = np.loadtxt(dof_files[i]) - out_data[:, :, :, i] = dof - filename = os.path.join(os.getcwd(), 'dof_file.nii.gz') - newimg = nb.Nifti1Image(out_data, None, img.header) - newimg.to_filename(filename) - return filename - - gendof = pe.Node( - util.Function( - input_names=['dof_files', 'cope_files'], - output_names=['dof_volume'], - function=get_dofvolumes), - name='gendofvolume') - - outputspec = pe.Node( - util.IdentityInterface( - fields=['res4d', 'copes', 'varcopes', 'zstats', 'tstats']), - name='outputspec') - - fixed_fx.connect( - [(inputspec, copemerge, - [('copes', 'in_files')]), (inputspec, varcopemerge, [('varcopes', - 'in_files')]), - (inputspec, gendof, [('dof_files', 'dof_files')]), (copemerge, gendof, - [('merged_file', - 'cope_files')]), - (copemerge, flameo, - [('merged_file', 'cope_file')]), (varcopemerge, flameo, [ - ('merged_file', 'var_cope_file') - ]), (level2model, flameo, - [('design_mat', 'design_file'), ('design_con', 't_con_file'), - ('design_grp', 'cov_split_file')]), (gendof, flameo, - [('dof_volume', - 'dof_var_cope_file')]), - (flameo, outputspec, - [('res4d', 'res4d'), ('copes', 'copes'), ('var_copes', 'varcopes'), - ('zstats', 'zstats'), ('tstats', 'tstats')])]) - return fixed_fx diff --git a/nipype/workflows/fmri/fsl/preprocess.py b/nipype/workflows/fmri/fsl/preprocess.py deleted file mode 100644 index ac235bdba1..0000000000 --- a/nipype/workflows/fmri/fsl/preprocess.py +++ /dev/null @@ -1,1293 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import division - -import os -from ....interfaces import fsl as fsl # fsl -from ....interfaces import utility as util # utility -from ....pipeline import engine as pe # pypeline engine -from ....interfaces import freesurfer as fs # freesurfer -from ....interfaces import spm as spm -from ...smri.freesurfer.utils import create_getmask_flow -from .... import LooseVersion - - -def getthreshop(thresh): - return ['-thr %.10f -Tmin -bin' % (0.1 * val[1]) for val in thresh] - - -def pickrun(files, whichrun): - """pick file from list of files""" - - filemap = {'first': 0, 'last': -1, 'middle': len(files) // 2} - - if isinstance(files, list): - - # whichrun is given as integer - if isinstance(whichrun, int): - return files[whichrun] - # whichrun is given as string - elif isinstance(whichrun, str): - if whichrun not in filemap.keys(): - raise (KeyError, 'Sorry, whichrun must be either integer index' - 'or string in form of "first", "last" or "middle') - else: - return files[filemap[whichrun]] - else: - # in case single file name is given - return files - - -def pickfirst(files): - if isinstance(files, list): - return files[0] - else: - return files - - -def pickmiddle(files): - from nibabel import load - import numpy as np - from nipype.utils import NUMPY_MMAP - middlevol = [] - for f in files: - middlevol.append(int(np.ceil(load(f, mmap=NUMPY_MMAP).shape[3] / 2))) - return middlevol - - -def pickvol(filenames, fileidx, which): - from nibabel import load - import numpy as np - from nipype.utils import NUMPY_MMAP - if which.lower() == 'first': - idx = 0 - elif which.lower() == 'middle': - idx = int( - np.ceil(load(filenames[fileidx], mmap=NUMPY_MMAP).shape[3] / 2)) - elif which.lower() == 'last': - idx = load(filenames[fileidx]).shape[3] - 1 - else: - raise Exception('unknown value for volume selection : %s' % which) - return idx - - -def getbtthresh(medianvals): - return [0.75 * val for val in medianvals] - - -def chooseindex(fwhm): - if fwhm < 1: - return [0] - else: - return [1] - - -def getmeanscale(medianvals): - return ['-mul %.10f' % (10000. / val) for val in medianvals] - - -def getusans(x): - return [[tuple([val[0], 0.75 * val[1]])] for val in x] - - -tolist = lambda x: [x] -highpass_operand = lambda x: '-bptf %.10f -1' % x - - -def create_parallelfeat_preproc(name='featpreproc', highpass=True): - """Preprocess each run with FSL independently of the others - - Parameters - ---------- - - :: - - name : name of workflow (default: featpreproc) - highpass : boolean (default: True) - - Inputs:: - - inputspec.func : functional runs (filename or list of filenames) - inputspec.fwhm : fwhm for smoothing with SUSAN - inputspec.highpass : HWHM in TRs (if created with highpass=True) - - Outputs:: - - outputspec.reference : volume to which runs are realigned - outputspec.motion_parameters : motion correction parameters - outputspec.realigned_files : motion corrected files - outputspec.motion_plots : plots of motion correction parameters - outputspec.mask : mask file used to mask the brain - outputspec.smoothed_files : smoothed functional data - outputspec.highpassed_files : highpassed functional data (if highpass=True) - outputspec.mean : mean file - - Example - ------- - - >>> preproc = create_parallelfeat_preproc() - >>> preproc.inputs.inputspec.func = ['f3.nii', 'f5.nii'] - >>> preproc.inputs.inputspec.fwhm = 5 - >>> preproc.inputs.inputspec.highpass = 128./(2*2.5) - >>> preproc.base_dir = '/tmp' - >>> preproc.run() # doctest: +SKIP - - >>> preproc = create_parallelfeat_preproc(highpass=False) - >>> preproc.inputs.inputspec.func = 'f3.nii' - >>> preproc.inputs.inputspec.fwhm = 5 - >>> preproc.base_dir = '/tmp' - >>> preproc.run() # doctest: +SKIP - """ - version = 0 - if fsl.Info.version() and \ - LooseVersion(fsl.Info.version()) > LooseVersion('5.0.6'): - version = 507 - - featpreproc = pe.Workflow(name=name) - """ - Set up a node to define all inputs required for the preprocessing workflow - - """ - - if highpass: - inputnode = pe.Node( - interface=util.IdentityInterface( - fields=['func', 'fwhm', 'highpass']), - name='inputspec') - outputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'reference', 'motion_parameters', 'realigned_files', - 'motion_plots', 'mask', 'smoothed_files', 'highpassed_files', - 'mean' - ]), - name='outputspec') - else: - inputnode = pe.Node( - interface=util.IdentityInterface(fields=['func', 'fwhm']), - name='inputspec') - outputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'reference', 'motion_parameters', 'realigned_files', - 'motion_plots', 'mask', 'smoothed_files', 'mean' - ]), - name='outputspec') - """ - Set up a node to define outputs for the preprocessing workflow - - """ - """ - Convert functional images to float representation. Since there can - be more than one functional run we use a MapNode to convert each - run. - """ - - img2float = pe.MapNode( - interface=fsl.ImageMaths( - out_data_type='float', op_string='', suffix='_dtype'), - iterfield=['in_file'], - name='img2float') - featpreproc.connect(inputnode, 'func', img2float, 'in_file') - """ - Extract the first volume of the first run as the reference - """ - - extract_ref = pe.MapNode( - interface=fsl.ExtractROI(t_size=1), - iterfield=['in_file', 't_min'], - name='extractref') - - featpreproc.connect(img2float, 'out_file', extract_ref, 'in_file') - featpreproc.connect(img2float, ('out_file', pickmiddle), extract_ref, - 't_min') - featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference') - """ - Realign the functional runs to the reference (1st volume of first run) - """ - - motion_correct = pe.MapNode( - interface=fsl.MCFLIRT(save_mats=True, save_plots=True), - name='realign', - iterfield=['in_file', 'ref_file']) - featpreproc.connect(img2float, 'out_file', motion_correct, 'in_file') - featpreproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file') - featpreproc.connect(motion_correct, 'par_file', outputnode, - 'motion_parameters') - featpreproc.connect(motion_correct, 'out_file', outputnode, - 'realigned_files') - """ - Plot the estimated motion parameters - """ - - plot_motion = pe.MapNode( - interface=fsl.PlotMotionParams(in_source='fsl'), - name='plot_motion', - iterfield=['in_file']) - plot_motion.iterables = ('plot_type', ['rotations', 'translations']) - featpreproc.connect(motion_correct, 'par_file', plot_motion, 'in_file') - featpreproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots') - """ - Extract the mean volume of the first functional run - """ - - meanfunc = pe.MapNode( - interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), - iterfield=['in_file'], - name='meanfunc') - featpreproc.connect(motion_correct, 'out_file', meanfunc, 'in_file') - """ - Strip the skull from the mean functional to generate a mask - """ - - meanfuncmask = pe.MapNode( - interface=fsl.BET(mask=True, no_output=True, frac=0.3), - iterfield=['in_file'], - name='meanfuncmask') - featpreproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file') - """ - Mask the functional runs with the extracted mask - """ - - maskfunc = pe.MapNode( - interface=fsl.ImageMaths(suffix='_bet', op_string='-mas'), - iterfield=['in_file', 'in_file2'], - name='maskfunc') - featpreproc.connect(motion_correct, 'out_file', maskfunc, 'in_file') - featpreproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2') - """ - Determine the 2nd and 98th percentile intensities of each functional run - """ - - getthresh = pe.MapNode( - interface=fsl.ImageStats(op_string='-p 2 -p 98'), - iterfield=['in_file'], - name='getthreshold') - featpreproc.connect(maskfunc, 'out_file', getthresh, 'in_file') - """ - Threshold the first run of the functional data at 10% of the 98th percentile - """ - - threshold = pe.MapNode( - interface=fsl.ImageMaths(out_data_type='char', suffix='_thresh'), - iterfield=['in_file', 'op_string'], - name='threshold') - featpreproc.connect(maskfunc, 'out_file', threshold, 'in_file') - """ - Define a function to get 10% of the intensity - """ - - featpreproc.connect(getthresh, ('out_stat', getthreshop), threshold, - 'op_string') - """ - Determine the median value of the functional runs using the mask - """ - - medianval = pe.MapNode( - interface=fsl.ImageStats(op_string='-k %s -p 50'), - iterfield=['in_file', 'mask_file'], - name='medianval') - featpreproc.connect(motion_correct, 'out_file', medianval, 'in_file') - featpreproc.connect(threshold, 'out_file', medianval, 'mask_file') - """ - Dilate the mask - """ - - dilatemask = pe.MapNode( - interface=fsl.ImageMaths(suffix='_dil', op_string='-dilF'), - iterfield=['in_file'], - name='dilatemask') - featpreproc.connect(threshold, 'out_file', dilatemask, 'in_file') - featpreproc.connect(dilatemask, 'out_file', outputnode, 'mask') - """ - Mask the motion corrected functional runs with the dilated mask - """ - - maskfunc2 = pe.MapNode( - interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), - iterfield=['in_file', 'in_file2'], - name='maskfunc2') - featpreproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file') - featpreproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2') - """ - Smooth each run using SUSAN with the brightness threshold set to 75% - of the median value for each run and a mask consituting the mean - functional - """ - - smooth = create_susan_smooth() - - featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm') - featpreproc.connect(maskfunc2, 'out_file', smooth, 'inputnode.in_files') - featpreproc.connect(dilatemask, 'out_file', smooth, 'inputnode.mask_file') - """ - Mask the smoothed data with the dilated mask - """ - - maskfunc3 = pe.MapNode( - interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), - iterfield=['in_file', 'in_file2'], - name='maskfunc3') - featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3, - 'in_file') - - featpreproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2') - - concatnode = pe.Node(interface=util.Merge(2), name='concat') - featpreproc.connect(maskfunc2, ('out_file', tolist), concatnode, 'in1') - featpreproc.connect(maskfunc3, ('out_file', tolist), concatnode, 'in2') - """ - The following nodes select smooth or unsmoothed data depending on the - fwhm. This is because SUSAN defaults to smoothing the data with about the - voxel size of the input data if the fwhm parameter is less than 1/3 of the - voxel size. - """ - selectnode = pe.Node(interface=util.Select(), name='select') - - featpreproc.connect(concatnode, 'out', selectnode, 'inlist') - - featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index') - featpreproc.connect(selectnode, 'out', outputnode, 'smoothed_files') - """ - Scale the median value of the run is set to 10000 - """ - - meanscale = pe.MapNode( - interface=fsl.ImageMaths(suffix='_gms'), - iterfield=['in_file', 'op_string'], - name='meanscale') - featpreproc.connect(selectnode, 'out', meanscale, 'in_file') - """ - Define a function to get the scaling factor for intensity normalization - """ - - featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, - 'op_string') - """ - Perform temporal highpass filtering on the data - """ - - if highpass: - highpass = pe.MapNode( - interface=fsl.ImageMaths(suffix='_tempfilt'), - iterfield=['in_file'], - name='highpass') - featpreproc.connect(inputnode, ('highpass', highpass_operand), - highpass, 'op_string') - featpreproc.connect(meanscale, 'out_file', highpass, 'in_file') - - if version < 507: - featpreproc.connect(highpass, 'out_file', outputnode, - 'highpassed_files') - else: - """ - Add back the mean removed by the highpass filter operation as of FSL 5.0.7 - """ - meanfunc4 = pe.MapNode( - interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), - iterfield=['in_file'], - name='meanfunc4') - - featpreproc.connect(meanscale, 'out_file', meanfunc4, 'in_file') - addmean = pe.MapNode( - interface=fsl.BinaryMaths(operation='add'), - iterfield=['in_file', 'operand_file'], - name='addmean') - featpreproc.connect(highpass, 'out_file', addmean, 'in_file') - featpreproc.connect(meanfunc4, 'out_file', addmean, 'operand_file') - featpreproc.connect(addmean, 'out_file', outputnode, - 'highpassed_files') - """ - Generate a mean functional image from the first run - """ - - meanfunc3 = pe.MapNode( - interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), - iterfield=['in_file'], - name='meanfunc3') - - featpreproc.connect(meanscale, 'out_file', meanfunc3, 'in_file') - featpreproc.connect(meanfunc3, 'out_file', outputnode, 'mean') - - return featpreproc - - -def create_featreg_preproc(name='featpreproc', - highpass=True, - whichvol='middle', - whichrun=0): - """Create a FEAT preprocessing workflow with registration to one volume of the first run - - Parameters - ---------- - - :: - - name : name of workflow (default: featpreproc) - highpass : boolean (default: True) - whichvol : which volume of the first run to register to ('first', 'middle', 'last', 'mean') - whichrun : which run to draw reference volume from (integer index or 'first', 'middle', 'last') - - Inputs:: - - inputspec.func : functional runs (filename or list of filenames) - inputspec.fwhm : fwhm for smoothing with SUSAN - inputspec.highpass : HWHM in TRs (if created with highpass=True) - - Outputs:: - - outputspec.reference : volume to which runs are realigned - outputspec.motion_parameters : motion correction parameters - outputspec.realigned_files : motion corrected files - outputspec.motion_plots : plots of motion correction parameters - outputspec.mask : mask file used to mask the brain - outputspec.smoothed_files : smoothed functional data - outputspec.highpassed_files : highpassed functional data (if highpass=True) - outputspec.mean : mean file - - Example - ------- - - >>> preproc = create_featreg_preproc() - >>> preproc.inputs.inputspec.func = ['f3.nii', 'f5.nii'] - >>> preproc.inputs.inputspec.fwhm = 5 - >>> preproc.inputs.inputspec.highpass = 128./(2*2.5) - >>> preproc.base_dir = '/tmp' - >>> preproc.run() # doctest: +SKIP - - >>> preproc = create_featreg_preproc(highpass=False, whichvol='mean') - >>> preproc.inputs.inputspec.func = 'f3.nii' - >>> preproc.inputs.inputspec.fwhm = 5 - >>> preproc.base_dir = '/tmp' - >>> preproc.run() # doctest: +SKIP - """ - - version = 0 - if fsl.Info.version() and \ - LooseVersion(fsl.Info.version()) > LooseVersion('5.0.6'): - version = 507 - - featpreproc = pe.Workflow(name=name) - """ - Set up a node to define all inputs required for the preprocessing workflow - - """ - - if highpass: - inputnode = pe.Node( - interface=util.IdentityInterface( - fields=['func', 'fwhm', 'highpass']), - name='inputspec') - outputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'reference', 'motion_parameters', 'realigned_files', - 'motion_plots', 'mask', 'smoothed_files', 'highpassed_files', - 'mean' - ]), - name='outputspec') - else: - inputnode = pe.Node( - interface=util.IdentityInterface(fields=['func', 'fwhm']), - name='inputspec') - outputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'reference', 'motion_parameters', 'realigned_files', - 'motion_plots', 'mask', 'smoothed_files', 'mean' - ]), - name='outputspec') - """ - Set up a node to define outputs for the preprocessing workflow - - """ - """ - Convert functional images to float representation. Since there can - be more than one functional run we use a MapNode to convert each - run. - """ - - img2float = pe.MapNode( - interface=fsl.ImageMaths( - out_data_type='float', op_string='', suffix='_dtype'), - iterfield=['in_file'], - name='img2float') - featpreproc.connect(inputnode, 'func', img2float, 'in_file') - """ - Extract the middle (or what whichvol points to) volume of the first run as the reference - """ - - if whichvol != 'mean': - extract_ref = pe.Node( - interface=fsl.ExtractROI(t_size=1), - iterfield=['in_file'], - name='extractref') - featpreproc.connect(img2float, ('out_file', pickrun, whichrun), - extract_ref, 'in_file') - featpreproc.connect(img2float, ('out_file', pickvol, 0, whichvol), - extract_ref, 't_min') - featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference') - """ - Realign the functional runs to the reference (`whichvol` volume of first run) - """ - - motion_correct = pe.MapNode( - interface=fsl.MCFLIRT( - save_mats=True, save_plots=True, interpolation='spline'), - name='realign', - iterfield=['in_file']) - featpreproc.connect(img2float, 'out_file', motion_correct, 'in_file') - if whichvol != 'mean': - featpreproc.connect(extract_ref, 'roi_file', motion_correct, - 'ref_file') - else: - motion_correct.inputs.mean_vol = True - featpreproc.connect(motion_correct, ('mean_img', pickrun, whichrun), - outputnode, 'reference') - - featpreproc.connect(motion_correct, 'par_file', outputnode, - 'motion_parameters') - featpreproc.connect(motion_correct, 'out_file', outputnode, - 'realigned_files') - """ - Plot the estimated motion parameters - """ - - plot_motion = pe.MapNode( - interface=fsl.PlotMotionParams(in_source='fsl'), - name='plot_motion', - iterfield=['in_file']) - plot_motion.iterables = ('plot_type', ['rotations', 'translations']) - featpreproc.connect(motion_correct, 'par_file', plot_motion, 'in_file') - featpreproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots') - """ - Extract the mean volume of the first functional run - """ - - meanfunc = pe.Node( - interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), - name='meanfunc') - featpreproc.connect(motion_correct, ('out_file', pickrun, whichrun), - meanfunc, 'in_file') - """ - Strip the skull from the mean functional to generate a mask - """ - - meanfuncmask = pe.Node( - interface=fsl.BET(mask=True, no_output=True, frac=0.3), - name='meanfuncmask') - featpreproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file') - """ - Mask the functional runs with the extracted mask - """ - - maskfunc = pe.MapNode( - interface=fsl.ImageMaths(suffix='_bet', op_string='-mas'), - iterfield=['in_file'], - name='maskfunc') - featpreproc.connect(motion_correct, 'out_file', maskfunc, 'in_file') - featpreproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2') - """ - Determine the 2nd and 98th percentile intensities of each functional run - """ - - getthresh = pe.MapNode( - interface=fsl.ImageStats(op_string='-p 2 -p 98'), - iterfield=['in_file'], - name='getthreshold') - featpreproc.connect(maskfunc, 'out_file', getthresh, 'in_file') - """ - Threshold the first run of the functional data at 10% of the 98th percentile - """ - - threshold = pe.MapNode( - interface=fsl.ImageMaths(out_data_type='char', suffix='_thresh'), - iterfield=['in_file', 'op_string'], - name='threshold') - featpreproc.connect(maskfunc, 'out_file', threshold, 'in_file') - """ - Define a function to get 10% of the intensity - """ - - featpreproc.connect(getthresh, ('out_stat', getthreshop), threshold, - 'op_string') - """ - Determine the median value of the functional runs using the mask - """ - - medianval = pe.MapNode( - interface=fsl.ImageStats(op_string='-k %s -p 50'), - iterfield=['in_file', 'mask_file'], - name='medianval') - featpreproc.connect(motion_correct, 'out_file', medianval, 'in_file') - featpreproc.connect(threshold, 'out_file', medianval, 'mask_file') - """ - Dilate the mask - """ - - dilatemask = pe.MapNode( - interface=fsl.ImageMaths(suffix='_dil', op_string='-dilF'), - iterfield=['in_file'], - name='dilatemask') - featpreproc.connect(threshold, 'out_file', dilatemask, 'in_file') - featpreproc.connect(dilatemask, 'out_file', outputnode, 'mask') - """ - Mask the motion corrected functional runs with the dilated mask - """ - - maskfunc2 = pe.MapNode( - interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), - iterfield=['in_file', 'in_file2'], - name='maskfunc2') - featpreproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file') - featpreproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2') - """ - Smooth each run using SUSAN with the brightness threshold set to 75% - of the median value for each run and a mask constituting the mean - functional - """ - - smooth = create_susan_smooth() - - featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm') - featpreproc.connect(maskfunc2, 'out_file', smooth, 'inputnode.in_files') - featpreproc.connect(dilatemask, 'out_file', smooth, 'inputnode.mask_file') - """ - Mask the smoothed data with the dilated mask - """ - - maskfunc3 = pe.MapNode( - interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), - iterfield=['in_file', 'in_file2'], - name='maskfunc3') - featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3, - 'in_file') - - featpreproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2') - - concatnode = pe.Node(interface=util.Merge(2), name='concat') - featpreproc.connect(maskfunc2, ('out_file', tolist), concatnode, 'in1') - featpreproc.connect(maskfunc3, ('out_file', tolist), concatnode, 'in2') - """ - The following nodes select smooth or unsmoothed data depending on the - fwhm. This is because SUSAN defaults to smoothing the data with about the - voxel size of the input data if the fwhm parameter is less than 1/3 of the - voxel size. - """ - selectnode = pe.Node(interface=util.Select(), name='select') - - featpreproc.connect(concatnode, 'out', selectnode, 'inlist') - - featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index') - featpreproc.connect(selectnode, 'out', outputnode, 'smoothed_files') - """ - Scale the median value of the run is set to 10000 - """ - - meanscale = pe.MapNode( - interface=fsl.ImageMaths(suffix='_gms'), - iterfield=['in_file', 'op_string'], - name='meanscale') - featpreproc.connect(selectnode, 'out', meanscale, 'in_file') - """ - Define a function to get the scaling factor for intensity normalization - """ - - featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, - 'op_string') - """ - Generate a mean functional image from the first run - """ - - meanfunc3 = pe.Node( - interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), - iterfield=['in_file'], - name='meanfunc3') - - featpreproc.connect(meanscale, ('out_file', pickrun, whichrun), meanfunc3, - 'in_file') - featpreproc.connect(meanfunc3, 'out_file', outputnode, 'mean') - """ - Perform temporal highpass filtering on the data - """ - - if highpass: - highpass = pe.MapNode( - interface=fsl.ImageMaths(suffix='_tempfilt'), - iterfield=['in_file'], - name='highpass') - featpreproc.connect(inputnode, ('highpass', highpass_operand), - highpass, 'op_string') - featpreproc.connect(meanscale, 'out_file', highpass, 'in_file') - - if version < 507: - featpreproc.connect(highpass, 'out_file', outputnode, - 'highpassed_files') - else: - """ - Add back the mean removed by the highpass filter operation as of FSL 5.0.7 - """ - meanfunc4 = pe.MapNode( - interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), - iterfield=['in_file'], - name='meanfunc4') - - featpreproc.connect(meanscale, 'out_file', meanfunc4, 'in_file') - addmean = pe.MapNode( - interface=fsl.BinaryMaths(operation='add'), - iterfield=['in_file', 'operand_file'], - name='addmean') - featpreproc.connect(highpass, 'out_file', addmean, 'in_file') - featpreproc.connect(meanfunc4, 'out_file', addmean, 'operand_file') - featpreproc.connect(addmean, 'out_file', outputnode, - 'highpassed_files') - - return featpreproc - - -def create_susan_smooth(name="susan_smooth", separate_masks=True): - """Create a SUSAN smoothing workflow - - Parameters - ---------- - - :: - - name : name of workflow (default: susan_smooth) - separate_masks : separate masks for each run - - Inputs:: - - inputnode.in_files : functional runs (filename or list of filenames) - inputnode.fwhm : fwhm for smoothing with SUSAN (float or list of floats) - inputnode.mask_file : mask used for estimating SUSAN thresholds (but not for smoothing) - - Outputs:: - - outputnode.smoothed_files : functional runs (filename or list of filenames) - - Example - ------- - - >>> smooth = create_susan_smooth() - >>> smooth.inputs.inputnode.in_files = 'f3.nii' - >>> smooth.inputs.inputnode.fwhm = 5 - >>> smooth.inputs.inputnode.mask_file = 'mask.nii' - >>> smooth.run() # doctest: +SKIP - - """ - - # replaces the functionality of a "for loop" - def cartesian_product(fwhms, in_files, usans, btthresh): - from nipype.utils.filemanip import ensure_list - # ensure all inputs are lists - in_files = ensure_list(in_files) - fwhms = [fwhms] if isinstance(fwhms, (int, float)) else fwhms - # create cartesian product lists (s_ = single element of list) - cart_in_file = [ - s_in_file for s_in_file in in_files for s_fwhm in fwhms - ] - cart_fwhm = [s_fwhm for s_in_file in in_files for s_fwhm in fwhms] - cart_usans = [s_usans for s_usans in usans for s_fwhm in fwhms] - cart_btthresh = [ - s_btthresh for s_btthresh in btthresh for s_fwhm in fwhms - ] - - return cart_in_file, cart_fwhm, cart_usans, cart_btthresh - - susan_smooth = pe.Workflow(name=name) - """ - Set up a node to define all inputs required for the preprocessing workflow - - """ - - inputnode = pe.Node( - interface=util.IdentityInterface( - fields=['in_files', 'fwhm', 'mask_file']), - name='inputnode') - """ - Smooth each run using SUSAN with the brightness threshold set to 75% - of the median value for each run and a mask consituting the mean - functional - """ - - multi_inputs = pe.Node( - util.Function( - function=cartesian_product, - output_names=[ - 'cart_in_file', 'cart_fwhm', 'cart_usans', 'cart_btthresh' - ]), - name='multi_inputs') - - smooth = pe.MapNode( - interface=fsl.SUSAN(), - iterfield=['in_file', 'brightness_threshold', 'usans', 'fwhm'], - name='smooth') - """ - Determine the median value of the functional runs using the mask - """ - - if separate_masks: - median = pe.MapNode( - interface=fsl.ImageStats(op_string='-k %s -p 50'), - iterfield=['in_file', 'mask_file'], - name='median') - else: - median = pe.MapNode( - interface=fsl.ImageStats(op_string='-k %s -p 50'), - iterfield=['in_file'], - name='median') - susan_smooth.connect(inputnode, 'in_files', median, 'in_file') - susan_smooth.connect(inputnode, 'mask_file', median, 'mask_file') - """ - Mask the motion corrected functional runs with the dilated mask - """ - - if separate_masks: - mask = pe.MapNode( - interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), - iterfield=['in_file', 'in_file2'], - name='mask') - else: - mask = pe.MapNode( - interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), - iterfield=['in_file'], - name='mask') - susan_smooth.connect(inputnode, 'in_files', mask, 'in_file') - susan_smooth.connect(inputnode, 'mask_file', mask, 'in_file2') - """ - Determine the mean image from each functional run - """ - - meanfunc = pe.MapNode( - interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), - iterfield=['in_file'], - name='meanfunc2') - susan_smooth.connect(mask, 'out_file', meanfunc, 'in_file') - """ - Merge the median values with the mean functional images into a coupled list - """ - - merge = pe.Node(interface=util.Merge(2, axis='hstack'), name='merge') - susan_smooth.connect(meanfunc, 'out_file', merge, 'in1') - susan_smooth.connect(median, 'out_stat', merge, 'in2') - """ - Define a function to get the brightness threshold for SUSAN - """ - - susan_smooth.connect([ - (inputnode, multi_inputs, [('in_files', 'in_files'), ('fwhm', - 'fwhms')]), - (median, multi_inputs, [(('out_stat', getbtthresh), 'btthresh')]), - (merge, multi_inputs, [(('out', getusans), 'usans')]), - (multi_inputs, smooth, - [('cart_in_file', 'in_file'), ('cart_fwhm', 'fwhm'), - ('cart_btthresh', 'brightness_threshold'), ('cart_usans', 'usans')]), - ]) - - outputnode = pe.Node( - interface=util.IdentityInterface(fields=['smoothed_files']), - name='outputnode') - - susan_smooth.connect(smooth, 'smoothed_file', outputnode, 'smoothed_files') - - return susan_smooth - - -def create_fsl_fs_preproc(name='preproc', highpass=True, whichvol='middle'): - """Create a FEAT preprocessing workflow together with freesurfer - - Parameters - ---------- - - :: - - name : name of workflow (default: preproc) - highpass : boolean (default: True) - whichvol : which volume of the first run to register to ('first', 'middle', 'mean') - - Inputs:: - - inputspec.func : functional runs (filename or list of filenames) - inputspec.fwhm : fwhm for smoothing with SUSAN - inputspec.highpass : HWHM in TRs (if created with highpass=True) - inputspec.subject_id : freesurfer subject id - inputspec.subjects_dir : freesurfer subjects dir - - Outputs:: - - outputspec.reference : volume to which runs are realigned - outputspec.motion_parameters : motion correction parameters - outputspec.realigned_files : motion corrected files - outputspec.motion_plots : plots of motion correction parameters - outputspec.mask_file : mask file used to mask the brain - outputspec.smoothed_files : smoothed functional data - outputspec.highpassed_files : highpassed functional data (if highpass=True) - outputspec.reg_file : bbregister registration files - outputspec.reg_cost : bbregister registration cost files - - Example - ------- - - >>> preproc = create_fsl_fs_preproc(whichvol='first') - >>> preproc.inputs.inputspec.highpass = 128./(2*2.5) - >>> preproc.inputs.inputspec.func = ['f3.nii', 'f5.nii'] - >>> preproc.inputs.inputspec.subjects_dir = '.' - >>> preproc.inputs.inputspec.subject_id = 's1' - >>> preproc.inputs.inputspec.fwhm = 6 - >>> preproc.run() # doctest: +SKIP - """ - - featpreproc = pe.Workflow(name=name) - """ - Set up a node to define all inputs required for the preprocessing workflow - - """ - - if highpass: - inputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'func', 'fwhm', 'subject_id', 'subjects_dir', 'highpass' - ]), - name='inputspec') - outputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'reference', 'motion_parameters', 'realigned_files', - 'motion_plots', 'mask_file', 'smoothed_files', - 'highpassed_files', 'reg_file', 'reg_cost' - ]), - name='outputspec') - else: - inputnode = pe.Node( - interface=util.IdentityInterface( - fields=['func', 'fwhm', 'subject_id', 'subjects_dir']), - name='inputspec') - outputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'reference', 'motion_parameters', 'realigned_files', - 'motion_plots', 'mask_file', 'smoothed_files', 'reg_file', - 'reg_cost' - ]), - name='outputspec') - """ - Set up a node to define outputs for the preprocessing workflow - - """ - """ - Convert functional images to float representation. Since there can - be more than one functional run we use a MapNode to convert each - run. - """ - - img2float = pe.MapNode( - interface=fsl.ImageMaths( - out_data_type='float', op_string='', suffix='_dtype'), - iterfield=['in_file'], - name='img2float') - featpreproc.connect(inputnode, 'func', img2float, 'in_file') - """ - Extract the first volume of the first run as the reference - """ - - if whichvol != 'mean': - extract_ref = pe.Node( - interface=fsl.ExtractROI(t_size=1), - iterfield=['in_file'], - name='extractref') - featpreproc.connect(img2float, ('out_file', pickfirst), extract_ref, - 'in_file') - featpreproc.connect(img2float, ('out_file', pickvol, 0, whichvol), - extract_ref, 't_min') - featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference') - """ - Realign the functional runs to the reference (1st volume of first run) - """ - - motion_correct = pe.MapNode( - interface=fsl.MCFLIRT( - save_mats=True, save_plots=True, interpolation='sinc'), - name='realign', - iterfield=['in_file']) - featpreproc.connect(img2float, 'out_file', motion_correct, 'in_file') - if whichvol != 'mean': - featpreproc.connect(extract_ref, 'roi_file', motion_correct, - 'ref_file') - else: - motion_correct.inputs.mean_vol = True - featpreproc.connect(motion_correct, 'mean_img', outputnode, - 'reference') - - featpreproc.connect(motion_correct, 'par_file', outputnode, - 'motion_parameters') - featpreproc.connect(motion_correct, 'out_file', outputnode, - 'realigned_files') - """ - Plot the estimated motion parameters - """ - - plot_motion = pe.MapNode( - interface=fsl.PlotMotionParams(in_source='fsl'), - name='plot_motion', - iterfield=['in_file']) - plot_motion.iterables = ('plot_type', ['rotations', 'translations']) - featpreproc.connect(motion_correct, 'par_file', plot_motion, 'in_file') - featpreproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots') - """Get the mask from subject for each run - """ - - maskflow = create_getmask_flow() - featpreproc.connect([(inputnode, maskflow, - [('subject_id', 'inputspec.subject_id'), - ('subjects_dir', 'inputspec.subjects_dir')])]) - maskflow.inputs.inputspec.contrast_type = 't2' - if whichvol != 'mean': - featpreproc.connect(extract_ref, 'roi_file', maskflow, - 'inputspec.source_file') - else: - featpreproc.connect(motion_correct, ('mean_img', pickfirst), maskflow, - 'inputspec.source_file') - """ - Mask the functional runs with the extracted mask - """ - - maskfunc = pe.MapNode( - interface=fsl.ImageMaths(suffix='_bet', op_string='-mas'), - iterfield=['in_file'], - name='maskfunc') - featpreproc.connect(motion_correct, 'out_file', maskfunc, 'in_file') - featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), - maskfunc, 'in_file2') - """ - Smooth each run using SUSAN with the brightness threshold set to 75% - of the median value for each run and a mask consituting the mean - functional - """ - - smooth = create_susan_smooth(separate_masks=False) - - featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm') - featpreproc.connect(maskfunc, 'out_file', smooth, 'inputnode.in_files') - featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), smooth, - 'inputnode.mask_file') - """ - Mask the smoothed data with the dilated mask - """ - - maskfunc3 = pe.MapNode( - interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), - iterfield=['in_file'], - name='maskfunc3') - featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3, - 'in_file') - featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), - maskfunc3, 'in_file2') - - concatnode = pe.Node(interface=util.Merge(2), name='concat') - featpreproc.connect(maskfunc, ('out_file', tolist), concatnode, 'in1') - featpreproc.connect(maskfunc3, ('out_file', tolist), concatnode, 'in2') - """ - The following nodes select smooth or unsmoothed data depending on the - fwhm. This is because SUSAN defaults to smoothing the data with about the - voxel size of the input data if the fwhm parameter is less than 1/3 of the - voxel size. - """ - selectnode = pe.Node(interface=util.Select(), name='select') - - featpreproc.connect(concatnode, 'out', selectnode, 'inlist') - - featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index') - featpreproc.connect(selectnode, 'out', outputnode, 'smoothed_files') - """ - Scale the median value of the run is set to 10000 - """ - - meanscale = pe.MapNode( - interface=fsl.ImageMaths(suffix='_gms'), - iterfield=['in_file', 'op_string'], - name='meanscale') - featpreproc.connect(selectnode, 'out', meanscale, 'in_file') - """ - Determine the median value of the functional runs using the mask - """ - - medianval = pe.MapNode( - interface=fsl.ImageStats(op_string='-k %s -p 50'), - iterfield=['in_file'], - name='medianval') - featpreproc.connect(motion_correct, 'out_file', medianval, 'in_file') - featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), - medianval, 'mask_file') - """ - Define a function to get the scaling factor for intensity normalization - """ - - featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, - 'op_string') - """ - Perform temporal highpass filtering on the data - """ - - if highpass: - highpass = pe.MapNode( - interface=fsl.ImageMaths(suffix='_tempfilt'), - iterfield=['in_file'], - name='highpass') - featpreproc.connect(inputnode, ('highpass', highpass_operand), - highpass, 'op_string') - featpreproc.connect(meanscale, 'out_file', highpass, 'in_file') - featpreproc.connect(highpass, 'out_file', outputnode, - 'highpassed_files') - - featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), - outputnode, 'mask_file') - featpreproc.connect(maskflow, 'outputspec.reg_file', outputnode, - 'reg_file') - featpreproc.connect(maskflow, 'outputspec.reg_cost', outputnode, - 'reg_cost') - - return featpreproc - - -def create_reg_workflow(name='registration'): - """Create a FEAT preprocessing workflow - - Parameters - ---------- - - :: - - name : name of workflow (default: 'registration') - - Inputs:: - - inputspec.source_files : files (filename or list of filenames to register) - inputspec.mean_image : reference image to use - inputspec.anatomical_image : anatomical image to coregister to - inputspec.target_image : registration target - - Outputs:: - - outputspec.func2anat_transform : FLIRT transform - outputspec.anat2target_transform : FLIRT+FNIRT transform - outputspec.transformed_files : transformed files in target space - outputspec.transformed_mean : mean image in target space - - Example - ------- - - """ - - register = pe.Workflow(name=name) - - inputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'source_files', 'mean_image', 'anatomical_image', 'target_image', - 'target_image_brain', 'config_file' - ]), - name='inputspec') - outputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'func2anat_transform', - 'anat2target_transform', - 'transformed_files', - 'transformed_mean', - ]), - name='outputspec') - """ - Estimate the tissue classes from the anatomical image. But use spm's segment - as FSL appears to be breaking. - """ - - stripper = pe.Node(fsl.BET(), name='stripper') - register.connect(inputnode, 'anatomical_image', stripper, 'in_file') - fast = pe.Node(fsl.FAST(), name='fast') - register.connect(stripper, 'out_file', fast, 'in_files') - """ - Binarize the segmentation - """ - - binarize = pe.Node( - fsl.ImageMaths(op_string='-nan -thr 0.5 -bin'), name='binarize') - pickindex = lambda x, i: x[i] - register.connect(fast, ('partial_volume_files', pickindex, 2), binarize, - 'in_file') - """ - Calculate rigid transform from mean image to anatomical image - """ - - mean2anat = pe.Node(fsl.FLIRT(), name='mean2anat') - mean2anat.inputs.dof = 6 - register.connect(inputnode, 'mean_image', mean2anat, 'in_file') - register.connect(stripper, 'out_file', mean2anat, 'reference') - """ - Now use bbr cost function to improve the transform - """ - - mean2anatbbr = pe.Node(fsl.FLIRT(), name='mean2anatbbr') - mean2anatbbr.inputs.dof = 6 - mean2anatbbr.inputs.cost = 'bbr' - mean2anatbbr.inputs.schedule = os.path.join( - os.getenv('FSLDIR'), 'etc/flirtsch/bbr.sch') - register.connect(inputnode, 'mean_image', mean2anatbbr, 'in_file') - register.connect(binarize, 'out_file', mean2anatbbr, 'wm_seg') - register.connect(inputnode, 'anatomical_image', mean2anatbbr, 'reference') - register.connect(mean2anat, 'out_matrix_file', mean2anatbbr, - 'in_matrix_file') - """ - Calculate affine transform from anatomical to target - """ - - anat2target_affine = pe.Node(fsl.FLIRT(), name='anat2target_linear') - anat2target_affine.inputs.searchr_x = [-180, 180] - anat2target_affine.inputs.searchr_y = [-180, 180] - anat2target_affine.inputs.searchr_z = [-180, 180] - register.connect(stripper, 'out_file', anat2target_affine, 'in_file') - register.connect(inputnode, 'target_image_brain', anat2target_affine, - 'reference') - """ - Calculate nonlinear transform from anatomical to target - """ - - anat2target_nonlinear = pe.Node(fsl.FNIRT(), name='anat2target_nonlinear') - anat2target_nonlinear.inputs.fieldcoeff_file = True - register.connect(anat2target_affine, 'out_matrix_file', - anat2target_nonlinear, 'affine_file') - register.connect(inputnode, 'anatomical_image', anat2target_nonlinear, - 'in_file') - register.connect(inputnode, 'config_file', anat2target_nonlinear, - 'config_file') - register.connect(inputnode, 'target_image', anat2target_nonlinear, - 'ref_file') - """ - Transform the mean image. First to anatomical and then to target - """ - - warpmean = pe.Node(fsl.ApplyWarp(interp='spline'), name='warpmean') - register.connect(inputnode, 'mean_image', warpmean, 'in_file') - register.connect(mean2anatbbr, 'out_matrix_file', warpmean, 'premat') - register.connect(inputnode, 'target_image', warpmean, 'ref_file') - register.connect(anat2target_nonlinear, 'fieldcoeff_file', warpmean, - 'field_file') - """ - Transform the remaining images. First to anatomical and then to target - """ - - warpall = pe.MapNode( - fsl.ApplyWarp(interp='spline'), - iterfield=['in_file'], - nested=True, - name='warpall') - register.connect(inputnode, 'source_files', warpall, 'in_file') - register.connect(mean2anatbbr, 'out_matrix_file', warpall, 'premat') - register.connect(inputnode, 'target_image', warpall, 'ref_file') - register.connect(anat2target_nonlinear, 'fieldcoeff_file', warpall, - 'field_file') - """ - Assign all the output files - """ - - register.connect(warpmean, 'out_file', outputnode, 'transformed_mean') - register.connect(warpall, 'out_file', outputnode, 'transformed_files') - register.connect(mean2anatbbr, 'out_matrix_file', outputnode, - 'func2anat_transform') - register.connect(anat2target_nonlinear, 'fieldcoeff_file', outputnode, - 'anat2target_transform') - - return register diff --git a/nipype/workflows/fmri/fsl/tests/__init__.py b/nipype/workflows/fmri/fsl/tests/__init__.py deleted file mode 100644 index 2986294d9d..0000000000 --- a/nipype/workflows/fmri/fsl/tests/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# -*- coding: utf-8 -*- -__author__ = 'satra' diff --git a/nipype/workflows/fmri/fsl/tests/test_preprocess.py b/nipype/workflows/fmri/fsl/tests/test_preprocess.py deleted file mode 100644 index 4f382bdc1a..0000000000 --- a/nipype/workflows/fmri/fsl/tests/test_preprocess.py +++ /dev/null @@ -1,25 +0,0 @@ -__author__ = 'oliver' - -from ..preprocess import create_featreg_preproc, pickrun - - -def test_pickrun(): - files = ['1', '2', '3', '4'] - assert pickrun(files, 0) == '1' - assert pickrun(files, 'first') == '1' - assert pickrun(files, -1) == '4' - assert pickrun(files, 'last') == '4' - assert pickrun(files, 'middle') == '3' - - -def test_create_featreg_preproc(): - """smoke test""" - wf = create_featreg_preproc(whichrun=0) - - # test type - import nipype - assert type(wf) == nipype.pipeline.engine.Workflow - - # test methods - assert wf.get_node('extractref') - assert wf._get_dot() diff --git a/nipype/workflows/fmri/spm/__init__.py b/nipype/workflows/fmri/spm/__init__.py deleted file mode 100644 index f974a663db..0000000000 --- a/nipype/workflows/fmri/spm/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -from .preprocess import (create_spm_preproc, create_vbm_preproc, - create_DARTEL_template) diff --git a/nipype/workflows/fmri/spm/estimate.py b/nipype/workflows/fmri/spm/estimate.py deleted file mode 100644 index 99fb243f19..0000000000 --- a/nipype/workflows/fmri/spm/estimate.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: diff --git a/nipype/workflows/fmri/spm/preprocess.py b/nipype/workflows/fmri/spm/preprocess.py deleted file mode 100644 index f2957e4b03..0000000000 --- a/nipype/workflows/fmri/spm/preprocess.py +++ /dev/null @@ -1,332 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: - -import os - -from ....algorithms import rapidart as ra -from ....interfaces import spm as spm -from ....interfaces import utility as niu -from ....pipeline import engine as pe -from ...smri.freesurfer.utils import create_getmask_flow - -from .... import logging -logger = logging.getLogger('nipype.workflow') - - -def create_spm_preproc(name='preproc'): - """Create an spm preprocessing workflow with freesurfer registration and - artifact detection. - - The workflow realigns and smooths and registers the functional images with - the subject's freesurfer space. - - Example - ------- - - >>> preproc = create_spm_preproc() - >>> preproc.base_dir = '.' - >>> preproc.inputs.inputspec.fwhm = 6 - >>> preproc.inputs.inputspec.subject_id = 's1' - >>> preproc.inputs.inputspec.subjects_dir = '.' - >>> preproc.inputs.inputspec.functionals = ['f3.nii', 'f5.nii'] - >>> preproc.inputs.inputspec.norm_threshold = 1 - >>> preproc.inputs.inputspec.zintensity_threshold = 3 - - Inputs:: - - inputspec.functionals : functional runs use 4d nifti - inputspec.subject_id : freesurfer subject id - inputspec.subjects_dir : freesurfer subjects dir - inputspec.fwhm : smoothing fwhm - inputspec.norm_threshold : norm threshold for outliers - inputspec.zintensity_threshold : intensity threshold in z-score - - Outputs:: - - outputspec.realignment_parameters : realignment parameter files - outputspec.smoothed_files : smoothed functional files - outputspec.outlier_files : list of outliers - outputspec.outlier_stats : statistics of outliers - outputspec.outlier_plots : images of outliers - outputspec.mask_file : binary mask file in reference image space - outputspec.reg_file : registration file that maps reference image to - freesurfer space - outputspec.reg_cost : cost of registration (useful for detecting misalignment) - """ - """ - Initialize the workflow - """ - - workflow = pe.Workflow(name=name) - """ - Define the inputs to this workflow - """ - - inputnode = pe.Node( - niu.IdentityInterface(fields=[ - 'functionals', 'subject_id', 'subjects_dir', 'fwhm', - 'norm_threshold', 'zintensity_threshold' - ]), - name='inputspec') - """ - Setup the processing nodes and create the mask generation and coregistration - workflow - """ - - poplist = lambda x: x.pop() - realign = pe.Node(spm.Realign(), name='realign') - workflow.connect(inputnode, 'functionals', realign, 'in_files') - maskflow = create_getmask_flow() - workflow.connect([(inputnode, maskflow, - [('subject_id', 'inputspec.subject_id'), - ('subjects_dir', 'inputspec.subjects_dir')])]) - maskflow.inputs.inputspec.contrast_type = 't2' - workflow.connect(realign, 'mean_image', maskflow, 'inputspec.source_file') - smooth = pe.Node(spm.Smooth(), name='smooth') - workflow.connect(inputnode, 'fwhm', smooth, 'fwhm') - workflow.connect(realign, 'realigned_files', smooth, 'in_files') - artdetect = pe.Node( - ra.ArtifactDetect( - mask_type='file', - parameter_source='SPM', - use_differences=[True, False], - use_norm=True, - save_plot=True), - name='artdetect') - workflow.connect([(inputnode, artdetect, - [('norm_threshold', 'norm_threshold'), - ('zintensity_threshold', 'zintensity_threshold')])]) - workflow.connect([(realign, artdetect, [('realigned_files', - 'realigned_files'), - ('realignment_parameters', - 'realignment_parameters')])]) - workflow.connect(maskflow, ('outputspec.mask_file', poplist), artdetect, - 'mask_file') - """ - Define the outputs of the workflow and connect the nodes to the outputnode - """ - - outputnode = pe.Node( - niu.IdentityInterface(fields=[ - "realignment_parameters", "smoothed_files", "mask_file", - "reg_file", "reg_cost", 'outlier_files', 'outlier_stats', - 'outlier_plots' - ]), - name="outputspec") - workflow.connect( - [(maskflow, outputnode, [("outputspec.reg_file", "reg_file")]), - (maskflow, outputnode, - [("outputspec.reg_cost", "reg_cost")]), (maskflow, outputnode, [ - (("outputspec.mask_file", poplist), "mask_file") - ]), (realign, outputnode, [('realignment_parameters', - 'realignment_parameters')]), - (smooth, outputnode, [('smoothed_files', 'smoothed_files')]), - (artdetect, outputnode, [('outlier_files', 'outlier_files'), - ('statistic_files', 'outlier_stats'), - ('plot_files', 'outlier_plots')])]) - return workflow - - -def create_vbm_preproc(name='vbmpreproc'): - """Create a vbm workflow that generates DARTEL-based warps to MNI space - - Based on: http://www.fil.ion.ucl.ac.uk/~john/misc/VBMclass10.pdf - - Example - ------- - - >>> preproc = create_vbm_preproc() - >>> preproc.inputs.inputspec.fwhm = 8 - >>> preproc.inputs.inputspec.structural_files = [ - ... os.path.abspath('s1.nii'), os.path.abspath('s3.nii')] - >>> preproc.inputs.inputspec.template_prefix = 'Template' - >>> preproc.run() # doctest: +SKIP - - Inputs:: - - inputspec.structural_files : structural data to be used to create templates - inputspec.fwhm: single of triplet for smoothing when normalizing to MNI space - inputspec.template_prefix : prefix for dartel template - - Outputs:: - - outputspec.normalized_files : normalized gray matter files - outputspec.template_file : DARTEL template - outputspec.icv : intracranial volume (cc - assuming dimensions in mm) - - """ - - workflow = pe.Workflow(name=name) - """ - Define the inputs to this workflow - """ - - inputnode = pe.Node( - niu.IdentityInterface( - fields=['structural_files', 'fwhm', 'template_prefix']), - name='inputspec') - - dartel_template = create_DARTEL_template() - - workflow.connect(inputnode, 'template_prefix', dartel_template, - 'inputspec.template_prefix') - workflow.connect(inputnode, 'structural_files', dartel_template, - 'inputspec.structural_files') - - norm2mni = pe.Node(spm.DARTELNorm2MNI(modulate=True), name='norm2mni') - workflow.connect(dartel_template, 'outputspec.template_file', norm2mni, - 'template_file') - workflow.connect(dartel_template, 'outputspec.flow_fields', norm2mni, - 'flowfield_files') - - def getclass1images(class_images): - class1images = [] - for session in class_images: - class1images.extend(session[0]) - return class1images - - workflow.connect(dartel_template, - ('segment.native_class_images', getclass1images), - norm2mni, 'apply_to_files') - workflow.connect(inputnode, 'fwhm', norm2mni, 'fwhm') - - def compute_icv(class_images): - from nibabel import load - from numpy import prod - icv = [] - for session in class_images: - voxel_volume = prod(load(session[0][0]).header.get_zooms()) - img = load(session[0][0]).get_data() + \ - load(session[1][0]).get_data() + \ - load(session[2][0]).get_data() - img_icv = (img > 0.5).astype(int).sum() * voxel_volume * 1e-3 - icv.append(img_icv) - return icv - - calc_icv = pe.Node( - niu.Function( - function=compute_icv, - input_names=['class_images'], - output_names=['icv']), - name='calc_icv') - - workflow.connect(dartel_template, 'segment.native_class_images', calc_icv, - 'class_images') - """ - Define the outputs of the workflow and connect the nodes to the outputnode - """ - - outputnode = pe.Node( - niu.IdentityInterface( - fields=["normalized_files", "template_file", "icv"]), - name="outputspec") - workflow.connect([ - (dartel_template, outputnode, [('outputspec.template_file', - 'template_file')]), - (norm2mni, outputnode, [("normalized_files", "normalized_files")]), - (calc_icv, outputnode, [("icv", "icv")]), - ]) - - return workflow - - -def create_DARTEL_template(name='dartel_template'): - """Create a vbm workflow that generates DARTEL-based template - - - Example - ------- - - >>> preproc = create_DARTEL_template() - >>> preproc.inputs.inputspec.structural_files = [ - ... os.path.abspath('s1.nii'), os.path.abspath('s3.nii')] - >>> preproc.inputs.inputspec.template_prefix = 'Template' - >>> preproc.run() # doctest: +SKIP - - Inputs:: - - inputspec.structural_files : structural data to be used to create templates - inputspec.template_prefix : prefix for dartel template - - Outputs:: - - outputspec.template_file : DARTEL template - outputspec.flow_fields : warps from input struct files to the template - - """ - - workflow = pe.Workflow(name=name) - - inputnode = pe.Node( - niu.IdentityInterface(fields=['structural_files', 'template_prefix']), - name='inputspec') - - segment = pe.MapNode( - spm.NewSegment(), iterfield=['channel_files'], name='segment') - workflow.connect(inputnode, 'structural_files', segment, 'channel_files') - - spm_info = spm.Info.getinfo() - if spm_info: - spm_path = spm_info['path'] - if spm_info['name'] == 'SPM8': - tissue1 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 1), 2, - (True, True), (False, False)) - tissue2 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 2), 2, - (True, True), (False, False)) - tissue3 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 3), 2, - (True, False), (False, False)) - tissue4 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 4), 3, - (False, False), (False, False)) - tissue5 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 5), 4, - (False, False), (False, False)) - tissue6 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 6), 2, - (False, False), (False, False)) - elif spm_info['name'] == 'SPM12': - spm_path = spm_info['path'] - tissue1 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 1), 1, - (True, True), (False, False)) - tissue2 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 2), 1, - (True, True), (False, False)) - tissue3 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 3), 2, - (True, False), (False, False)) - tissue4 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 4), 3, - (False, False), (False, False)) - tissue5 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 5), 4, - (False, False), (False, False)) - tissue6 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 6), 2, - (False, False), (False, False)) - else: - logger.critical('Unsupported version of SPM') - - segment.inputs.tissues = [ - tissue1, tissue2, tissue3, tissue4, tissue5, tissue6 - ] - else: - logger.critical('SPM not found') - - dartel = pe.Node(spm.DARTEL(), name='dartel') - """Get the gray and white segmentation classes generated by NewSegment - """ - - def get2classes(dartel_files): - class1images = [] - class2images = [] - for session in dartel_files: - class1images.extend(session[0]) - class2images.extend(session[1]) - return [class1images, class2images] - - workflow.connect(segment, ('dartel_input_images', get2classes), dartel, - 'image_files') - workflow.connect(inputnode, 'template_prefix', dartel, 'template_prefix') - - outputnode = pe.Node( - niu.IdentityInterface(fields=["template_file", "flow_fields"]), - name="outputspec") - workflow.connect([ - (dartel, outputnode, [('final_template_file', 'template_file'), - ('dartel_flow_fields', 'flow_fields')]), - ]) - - return workflow diff --git a/nipype/workflows/fmri/spm/tests/__init__.py b/nipype/workflows/fmri/spm/tests/__init__.py deleted file mode 100644 index 2986294d9d..0000000000 --- a/nipype/workflows/fmri/spm/tests/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# -*- coding: utf-8 -*- -__author__ = 'satra' diff --git a/nipype/workflows/graph/__init__.py b/nipype/workflows/graph/__init__.py deleted file mode 100644 index ead6180dc8..0000000000 --- a/nipype/workflows/graph/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) diff --git a/nipype/workflows/misc/__init__.py b/nipype/workflows/misc/__init__.py deleted file mode 100644 index 40a96afc6f..0000000000 --- a/nipype/workflows/misc/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# -*- coding: utf-8 -*- diff --git a/nipype/workflows/misc/utils.py b/nipype/workflows/misc/utils.py deleted file mode 100644 index b581ec8c54..0000000000 --- a/nipype/workflows/misc/utils.py +++ /dev/null @@ -1,91 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from builtins import map, range - - -def get_vox_dims(volume): - import nibabel as nb - from nipype.utils import NUMPY_MMAP - if isinstance(volume, list): - volume = volume[0] - nii = nb.load(volume, mmap=NUMPY_MMAP) - hdr = nii.header - voxdims = hdr.get_zooms() - return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])] - - -def get_data_dims(volume): - import nibabel as nb - from nipype.utils import NUMPY_MMAP - if isinstance(volume, list): - volume = volume[0] - nii = nb.load(volume, mmap=NUMPY_MMAP) - hdr = nii.header - datadims = hdr.get_data_shape() - return [int(datadims[0]), int(datadims[1]), int(datadims[2])] - - -def get_affine(volume): - import nibabel as nb - from nipype.utils import NUMPY_MMAP - nii = nb.load(volume, mmap=NUMPY_MMAP) - return nii.affine - - -def select_aparc(list_of_files): - for in_file in list_of_files: - if 'aparc+aseg.mgz' in in_file: - idx = list_of_files.index(in_file) - return list_of_files[idx] - - -def select_aparc_annot(list_of_files): - for in_file in list_of_files: - if '.aparc.annot' in in_file: - idx = list_of_files.index(in_file) - return list_of_files[idx] - - -def region_list_from_volume(in_file): - import nibabel as nb - import numpy as np - from nipype.utils import NUMPY_MMAP - segmentation = nb.load(in_file, mmap=NUMPY_MMAP) - segmentationdata = segmentation.get_data() - rois = np.unique(segmentationdata) - region_list = list(rois) - region_list.sort() - region_list.remove(0) - region_list = list(map(int, region_list)) - return region_list - - -def id_list_from_lookup_table(lookup_file, region_list): - import numpy as np - LUTlabelsRGBA = np.loadtxt( - lookup_file, - skiprows=4, - usecols=[0, 1, 2, 3, 4, 5], - comments='#', - dtype={ - 'names': ('index', 'label', 'R', 'G', 'B', 'A'), - 'formats': ('int', '|S30', 'int', 'int', 'int', 'int') - }) - numLUTLabels = np.size(LUTlabelsRGBA) - LUTlabelDict = {} - for labels in range(0, numLUTLabels): - LUTlabelDict[LUTlabelsRGBA[labels][0]] = [ - LUTlabelsRGBA[labels][1], LUTlabelsRGBA[labels][2], - LUTlabelsRGBA[labels][3], LUTlabelsRGBA[labels][4], - LUTlabelsRGBA[labels][5] - ] - id_list = [] - for region in region_list: - label = LUTlabelDict[region][0] - id_list.append(label) - id_list = list(map(str, id_list)) - return id_list diff --git a/nipype/workflows/rsfmri/__init__.py b/nipype/workflows/rsfmri/__init__.py deleted file mode 100644 index bd58039343..0000000000 --- a/nipype/workflows/rsfmri/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from . import fsl diff --git a/nipype/workflows/rsfmri/fsl/__init__.py b/nipype/workflows/rsfmri/fsl/__init__.py deleted file mode 100644 index 2e17899066..0000000000 --- a/nipype/workflows/rsfmri/fsl/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# -*- coding: utf-8 -*- -from .resting import create_resting_preproc diff --git a/nipype/workflows/rsfmri/fsl/resting.py b/nipype/workflows/rsfmri/fsl/resting.py deleted file mode 100644 index 176a0ed6f7..0000000000 --- a/nipype/workflows/rsfmri/fsl/resting.py +++ /dev/null @@ -1,162 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from ....interfaces import fsl as fsl # fsl -from ....interfaces import utility as util # utility -from ....pipeline import engine as pe # pypeline engine -from ....algorithms import confounds - - -def select_volume(filename, which): - """Return the middle index of a file - """ - from nibabel import load - import numpy as np - from nipype.utils import NUMPY_MMAP - - if which.lower() == 'first': - idx = 0 - elif which.lower() == 'middle': - idx = int(np.ceil(load(filename, mmap=NUMPY_MMAP).shape[3] / 2)) - else: - raise Exception('unknown value for volume selection : %s' % which) - return idx - - -def create_realign_flow(name='realign'): - """Realign a time series to the middle volume using spline interpolation - - Uses MCFLIRT to realign the time series and ApplyWarp to apply the rigid - body transformations using spline interpolation (unknown order). - - Example - ------- - - >>> wf = create_realign_flow() - >>> wf.inputs.inputspec.func = 'f3.nii' - >>> wf.run() # doctest: +SKIP - - """ - realignflow = pe.Workflow(name=name) - inputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'func', - ]), name='inputspec') - outputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'realigned_file', - ]), - name='outputspec') - realigner = pe.Node( - fsl.MCFLIRT(save_mats=True, stats_imgs=True), name='realigner') - splitter = pe.Node(fsl.Split(dimension='t'), name='splitter') - warper = pe.MapNode( - fsl.ApplyWarp(interp='spline'), - iterfield=['in_file', 'premat'], - name='warper') - joiner = pe.Node(fsl.Merge(dimension='t'), name='joiner') - - realignflow.connect(inputnode, 'func', realigner, 'in_file') - realignflow.connect(inputnode, ('func', select_volume, 'middle'), - realigner, 'ref_vol') - realignflow.connect(realigner, 'out_file', splitter, 'in_file') - realignflow.connect(realigner, 'mat_file', warper, 'premat') - realignflow.connect(realigner, 'variance_img', warper, 'ref_file') - realignflow.connect(splitter, 'out_files', warper, 'in_file') - realignflow.connect(warper, 'out_file', joiner, 'in_files') - realignflow.connect(joiner, 'merged_file', outputnode, 'realigned_file') - return realignflow - - -def create_resting_preproc(name='restpreproc', base_dir=None): - """Create a "resting" time series preprocessing workflow - - The noise removal is based on Behzadi et al. (2007) - - Parameters - ---------- - - name : name of workflow (default: restpreproc) - - Inputs:: - - inputspec.func : functional run (filename or list of filenames) - - Outputs:: - - outputspec.noise_mask_file : voxels used for PCA to derive noise - components - outputspec.filtered_file : bandpass filtered and noise-reduced time - series - - Example - ------- - - >>> TR = 3.0 - >>> wf = create_resting_preproc() - >>> wf.inputs.inputspec.func = 'f3.nii' - >>> wf.inputs.inputspec.num_noise_components = 6 - >>> wf.inputs.inputspec.highpass_sigma = 100/(2*TR) - >>> wf.inputs.inputspec.lowpass_sigma = 12.5/(2*TR) - >>> wf.run() # doctest: +SKIP - - """ - - restpreproc = pe.Workflow(name=name, base_dir=base_dir) - - # Define nodes - inputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'func', 'num_noise_components', 'highpass_sigma', 'lowpass_sigma' - ]), - name='inputspec') - outputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'noise_mask_file', - 'filtered_file', - ]), - name='outputspec') - slicetimer = pe.Node(fsl.SliceTimer(), name='slicetimer') - realigner = create_realign_flow() - tsnr = pe.Node(confounds.TSNR(regress_poly=2), name='tsnr') - getthresh = pe.Node( - interface=fsl.ImageStats(op_string='-p 98'), name='getthreshold') - threshold_stddev = pe.Node(fsl.Threshold(), name='threshold') - compcor = pe.Node( - confounds.ACompCor( - components_file="noise_components.txt", pre_filter=False), - name='compcor') - remove_noise = pe.Node( - fsl.FilterRegressor(filter_all=True), name='remove_noise') - bandpass_filter = pe.Node(fsl.TemporalFilter(), name='bandpass_filter') - - # Define connections - restpreproc.connect(inputnode, 'func', slicetimer, 'in_file') - restpreproc.connect(slicetimer, 'slice_time_corrected_file', realigner, - 'inputspec.func') - restpreproc.connect(realigner, 'outputspec.realigned_file', tsnr, - 'in_file') - restpreproc.connect(tsnr, 'stddev_file', threshold_stddev, 'in_file') - restpreproc.connect(tsnr, 'stddev_file', getthresh, 'in_file') - restpreproc.connect(getthresh, 'out_stat', threshold_stddev, 'thresh') - restpreproc.connect(realigner, 'outputspec.realigned_file', compcor, - 'realigned_file') - restpreproc.connect(threshold_stddev, 'out_file', compcor, 'mask_files') - restpreproc.connect(inputnode, 'num_noise_components', compcor, - 'num_components') - restpreproc.connect(tsnr, 'detrended_file', remove_noise, 'in_file') - restpreproc.connect(compcor, 'components_file', remove_noise, - 'design_file') - restpreproc.connect(inputnode, 'highpass_sigma', bandpass_filter, - 'highpass_sigma') - restpreproc.connect(inputnode, 'lowpass_sigma', bandpass_filter, - 'lowpass_sigma') - restpreproc.connect(remove_noise, 'out_file', bandpass_filter, 'in_file') - restpreproc.connect(threshold_stddev, 'out_file', outputnode, - 'noise_mask_file') - restpreproc.connect(bandpass_filter, 'out_file', outputnode, - 'filtered_file') - return restpreproc diff --git a/nipype/workflows/rsfmri/fsl/tests/__init__.py b/nipype/workflows/rsfmri/fsl/tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/nipype/workflows/rsfmri/fsl/tests/test_resting.py b/nipype/workflows/rsfmri/fsl/tests/test_resting.py deleted file mode 100644 index eba73a75b1..0000000000 --- a/nipype/workflows/rsfmri/fsl/tests/test_resting.py +++ /dev/null @@ -1,107 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: - -import pytest -import os -import mock -import numpy as np - -from .....testing import utils -from .....interfaces import IdentityInterface -from .....pipeline.engine import Node, Workflow - -from ..resting import create_resting_preproc - -ALL_FIELDS = [ - 'func', 'in_file', 'slice_time_corrected_file', 'stddev_file', 'out_stat', - 'thresh', 'num_noise_components', 'detrended_file', 'design_file', - 'highpass_sigma', 'lowpass_sigma', 'out_file', 'noise_mask_file', - 'filtered_file' -] - - -def stub_node_factory(*args, **kwargs): - if 'name' not in kwargs.keys(): - raise Exception() - name = kwargs['name'] - if name == 'compcor': - return Node(*args, **kwargs) - else: # replace with an IdentityInterface - return Node(IdentityInterface(fields=ALL_FIELDS), name=name) - - -def stub_wf(*args, **kwargs): - wflow = Workflow(name='realigner') - inputnode = Node(IdentityInterface(fields=['func']), name='inputspec') - outputnode = Node( - interface=IdentityInterface(fields=['realigned_file']), - name='outputspec') - wflow.connect(inputnode, 'func', outputnode, 'realigned_file') - return wflow - - -class TestResting(): - - in_filenames = { - 'realigned_file': 'rsfmrifunc.nii', - 'mask_file': 'rsfmrimask.nii' - } - - out_filenames = { - 'components_file': 'restpreproc/compcor/noise_components.txt' - } - - num_noise_components = 6 - - @pytest.fixture(autouse=True) - def setup_class(self, tmpdir): - # setup temp folder - tmpdir.chdir() - self.in_filenames = { - key: os.path.abspath(value) - for key, value in self.in_filenames.items() - } - - # create&save input files - utils.save_toy_nii(self.fake_data, self.in_filenames['realigned_file']) - mask = np.zeros(self.fake_data.shape[:3]) - for i in range(mask.shape[0]): - for j in range(mask.shape[1]): - if i == j: - mask[i, j] = 1 - utils.save_toy_nii(mask, self.in_filenames['mask_file']) - - @mock.patch( - 'nipype.workflows.rsfmri.fsl.resting.create_realign_flow', - side_effect=stub_wf) - @mock.patch('nipype.pipeline.engine.Node', side_effect=stub_node_factory) - def test_create_resting_preproc(self, mock_node, mock_realign_wf): - wflow = create_resting_preproc(base_dir=os.getcwd()) - - wflow.inputs.inputspec.num_noise_components = self.num_noise_components - mask_in = wflow.get_node('threshold').inputs - mask_in.out_file = self.in_filenames['mask_file'] - func_in = wflow.get_node('slicetimer').inputs - func_in.slice_time_corrected_file = self.in_filenames['realigned_file'] - - wflow.run() - - # assert - expected_file = os.path.abspath(self.out_filenames['components_file']) - with open(expected_file, 'r') as components_file: - components_data = [line.rstrip().split() - for line in components_file] - num_got_components = len(components_data) - assert (num_got_components == self.num_noise_components or - num_got_components == self.fake_data.shape[3]) - first_two = [row[:2] for row in components_data[1:]] - assert first_two == [['-0.5172356654', '-0.6973053243'], - ['0.2574722644', '0.1645270737'], - ['-0.0806469590', '0.5156853779'], - ['0.7187176051', '-0.3235820287'], - ['-0.3783072450', '0.3406749013']] - - fake_data = np.array([[[[2, 4, 3, 9, 1], [3, 6, 4, 7, 4]], - [[8, 3, 4, 6, 2], [4, 0, 4, 4, 2]]], - [[[9, 7, 5, 5, 7], [7, 8, 4, 8, 4]], - [[0, 4, 7, 1, 7], [6, 8, 8, 8, 7]]]]) diff --git a/nipype/workflows/smri/__init__.py b/nipype/workflows/smri/__init__.py deleted file mode 100644 index b6d7bf5731..0000000000 --- a/nipype/workflows/smri/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) - -from . import ants -from . import freesurfer -from . import niftyreg diff --git a/nipype/workflows/smri/ants/ANTSBuildTemplate.py b/nipype/workflows/smri/ants/ANTSBuildTemplate.py deleted file mode 100644 index 5a43d47bac..0000000000 --- a/nipype/workflows/smri/ants/ANTSBuildTemplate.py +++ /dev/null @@ -1,388 +0,0 @@ -# -*- coding: utf-8 -*- -################################################################################# -# Program: Build Template Parallel -# Language: Python -## -# Authors: Jessica Forbes, Grace Murray, and Hans Johnson, University of Iowa -## -# This software is distributed WITHOUT ANY WARRANTY; without even -# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -# PURPOSE. -## -################################################################################# -from __future__ import print_function -from builtins import map -from builtins import zip -from builtins import range - -from ....pipeline import engine as pe -from ....interfaces import utility as util -from ....interfaces.utility import Function - -from ....interfaces.ants import (ANTS, WarpImageMultiTransform, AverageImages, - MultiplyImages, AverageAffineTransform) - - -def GetFirstListElement(this_list): - return this_list[0] - - -def MakeTransformListWithGradientWarps(averageAffineTranform, - gradientStepWarp): - return [ - averageAffineTranform, gradientStepWarp, gradientStepWarp, - gradientStepWarp, gradientStepWarp - ] - - -def RenestDeformedPassiveImages(deformedPassiveImages, - flattened_image_nametypes): - import os - """ Now make a list of lists of images where the outter list is per image type, - and the inner list is the same size as the number of subjects to be averaged. - In this case, the first element will be a list of all the deformed T2's, and - the second element will be a list of all deformed POSTERIOR_AIR, etc.. - """ - all_images_size = len(deformedPassiveImages) - image_dictionary_of_lists = dict() - nested_imagetype_list = list() - outputAverageImageName_list = list() - image_type_list = list() - # make empty_list, this is not efficient, but it works - for name in flattened_image_nametypes: - image_dictionary_of_lists[name] = list() - for index in range(0, all_images_size): - curr_name = flattened_image_nametypes[index] - curr_file = deformedPassiveImages[index] - image_dictionary_of_lists[curr_name].append(curr_file) - for image_type, image_list in list(image_dictionary_of_lists.items()): - nested_imagetype_list.append(image_list) - outputAverageImageName_list.append('AVG_' + image_type + '.nii.gz') - image_type_list.append('WARP_AVG_' + image_type) - print("\n" * 10) - print("HACK: ", nested_imagetype_list) - print("HACK: ", outputAverageImageName_list) - print("HACK: ", image_type_list) - return nested_imagetype_list, outputAverageImageName_list, image_type_list - - -# Utility Function -# This will make a list of list pairs for defining the concatenation of transforms -# wp=['wp1.nii','wp2.nii','wp3.nii'] -# af=['af1.mat','af2.mat','af3.mat'] -# ll=map(list,zip(af,wp)) -# ll -# #[['af1.mat', 'wp1.nii'], ['af2.mat', 'wp2.nii'], ['af3.mat', 'wp3.nii']] - - -def MakeListsOfTransformLists(warpTransformList, AffineTransformList): - return list(map(list, list(zip(warpTransformList, AffineTransformList)))) - - -# Flatten and return equal length transform and images lists. - - -def FlattenTransformAndImagesList(ListOfPassiveImagesDictionaries, - transformation_series): - import sys - print("HACK: DEBUG: ListOfPassiveImagesDictionaries\n{lpi}\n".format( - lpi=ListOfPassiveImagesDictionaries)) - subjCount = len(ListOfPassiveImagesDictionaries) - tranCount = len(transformation_series) - if subjCount != tranCount: - print("ERROR: subjCount must equal tranCount {0} != {1}".format( - subjCount, tranCount)) - sys.exit(-1) - flattened_images = list() - flattened_image_nametypes = list() - flattened_transforms = list() - passiveImagesCount = len(ListOfPassiveImagesDictionaries[0]) - for subjIndex in range(0, subjCount): - # if passiveImagesCount != len(ListOfPassiveImagesDictionaries[subjIndex]): - # print "ERROR: all image lengths must be equal {0} != {1}".format(passiveImagesCount,len(ListOfPassiveImagesDictionaries[subjIndex])) - # sys.exit(-1) - subjImgDictionary = ListOfPassiveImagesDictionaries[subjIndex] - subjToAtlasTransform = transformation_series[subjIndex] - for imgname, img in list(subjImgDictionary.items()): - flattened_images.append(img) - flattened_image_nametypes.append(imgname) - flattened_transforms.append(subjToAtlasTransform) - print("HACK: flattened images {0}\n".format(flattened_images)) - print("HACK: flattened nametypes {0}\n".format(flattened_image_nametypes)) - print("HACK: flattened txfms {0}\n".format(flattened_transforms)) - return flattened_images, flattened_transforms, flattened_image_nametypes - - -def ANTSTemplateBuildSingleIterationWF(iterationPhasePrefix=''): - """ - - Inputs:: - - inputspec.images : - inputspec.fixed_image : - inputspec.ListOfPassiveImagesDictionaries : - - Outputs:: - - outputspec.template : - outputspec.transforms_list : - outputspec.passive_deformed_templates : - """ - - TemplateBuildSingleIterationWF = pe.Workflow( - name='ANTSTemplateBuildSingleIterationWF_' + - str(str(iterationPhasePrefix))) - - inputSpec = pe.Node( - interface=util.IdentityInterface(fields=[ - 'images', 'fixed_image', 'ListOfPassiveImagesDictionaries' - ]), - run_without_submitting=True, - name='inputspec') - # HACK: TODO: Need to move all local functions to a common untility file, or at the top of the file so that - # they do not change due to re-indenting. Otherwise re-indenting for flow control will trigger - # their hash to change. - # HACK: TODO: REMOVE 'transforms_list' it is not used. That will change all the hashes - # HACK: TODO: Need to run all python files through the code beutifiers. It has gotten pretty ugly. - outputSpec = pe.Node( - interface=util.IdentityInterface(fields=[ - 'template', 'transforms_list', 'passive_deformed_templates' - ]), - run_without_submitting=True, - name='outputspec') - - # NOTE MAP NODE! warp each of the original images to the provided fixed_image as the template - BeginANTS = pe.MapNode( - interface=ANTS(), name='BeginANTS', iterfield=['moving_image']) - BeginANTS.inputs.dimension = 3 - BeginANTS.inputs.output_transform_prefix = str( - iterationPhasePrefix) + '_tfm' - BeginANTS.inputs.metric = ['CC'] - BeginANTS.inputs.metric_weight = [1.0] - BeginANTS.inputs.radius = [5] - BeginANTS.inputs.transformation_model = 'SyN' - BeginANTS.inputs.gradient_step_length = 0.25 - BeginANTS.inputs.number_of_iterations = [50, 35, 15] - BeginANTS.inputs.number_of_affine_iterations = [ - 10000, 10000, 10000, 10000, 10000 - ] - BeginANTS.inputs.use_histogram_matching = True - BeginANTS.inputs.mi_option = [32, 16000] - BeginANTS.inputs.regularization = 'Gauss' - BeginANTS.inputs.regularization_gradient_field_sigma = 3 - BeginANTS.inputs.regularization_deformation_field_sigma = 0 - TemplateBuildSingleIterationWF.connect(inputSpec, 'images', BeginANTS, - 'moving_image') - TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', BeginANTS, - 'fixed_image') - - MakeTransformsLists = pe.Node( - interface=util.Function( - function=MakeListsOfTransformLists, - input_names=['warpTransformList', 'AffineTransformList'], - output_names=['out']), - run_without_submitting=True, - name='MakeTransformsLists') - MakeTransformsLists.interface.ignore_exception = True - TemplateBuildSingleIterationWF.connect( - BeginANTS, 'warp_transform', MakeTransformsLists, 'warpTransformList') - TemplateBuildSingleIterationWF.connect(BeginANTS, 'affine_transform', - MakeTransformsLists, - 'AffineTransformList') - - # Now warp all the input_images images - wimtdeformed = pe.MapNode( - interface=WarpImageMultiTransform(), - iterfield=['transformation_series', 'input_image'], - name='wimtdeformed') - TemplateBuildSingleIterationWF.connect(inputSpec, 'images', wimtdeformed, - 'input_image') - TemplateBuildSingleIterationWF.connect( - MakeTransformsLists, 'out', wimtdeformed, 'transformation_series') - - # Shape Update Next ===== - # Now Average All input_images deformed images together to create an updated template average - AvgDeformedImages = pe.Node( - interface=AverageImages(), name='AvgDeformedImages') - AvgDeformedImages.inputs.dimension = 3 - AvgDeformedImages.inputs.output_average_image = str( - iterationPhasePrefix) + '.nii.gz' - AvgDeformedImages.inputs.normalize = True - TemplateBuildSingleIterationWF.connect(wimtdeformed, "output_image", - AvgDeformedImages, 'images') - - # Now average all affine transforms together - AvgAffineTransform = pe.Node( - interface=AverageAffineTransform(), name='AvgAffineTransform') - AvgAffineTransform.inputs.dimension = 3 - AvgAffineTransform.inputs.output_affine_transform = 'Avererage_' + str( - iterationPhasePrefix) + '_Affine.mat' - TemplateBuildSingleIterationWF.connect(BeginANTS, 'affine_transform', - AvgAffineTransform, 'transforms') - - # Now average the warp fields togther - AvgWarpImages = pe.Node(interface=AverageImages(), name='AvgWarpImages') - AvgWarpImages.inputs.dimension = 3 - AvgWarpImages.inputs.output_average_image = str( - iterationPhasePrefix) + 'warp.nii.gz' - AvgWarpImages.inputs.normalize = True - TemplateBuildSingleIterationWF.connect(BeginANTS, 'warp_transform', - AvgWarpImages, 'images') - - # Now average the images together - # TODO: For now GradientStep is set to 0.25 as a hard coded default value. - GradientStep = 0.25 - GradientStepWarpImage = pe.Node( - interface=MultiplyImages(), name='GradientStepWarpImage') - GradientStepWarpImage.inputs.dimension = 3 - GradientStepWarpImage.inputs.second_input = -1.0 * GradientStep - GradientStepWarpImage.inputs.output_product_image = 'GradientStep0.25_' + str( - iterationPhasePrefix) + '_warp.nii.gz' - TemplateBuildSingleIterationWF.connect( - AvgWarpImages, 'output_average_image', GradientStepWarpImage, - 'first_input') - - # Now create the new template shape based on the average of all deformed images - UpdateTemplateShape = pe.Node( - interface=WarpImageMultiTransform(), name='UpdateTemplateShape') - UpdateTemplateShape.inputs.invert_affine = [1] - TemplateBuildSingleIterationWF.connect( - AvgDeformedImages, 'output_average_image', UpdateTemplateShape, - 'reference_image') - TemplateBuildSingleIterationWF.connect( - AvgAffineTransform, 'affine_transform', UpdateTemplateShape, - 'transformation_series') - TemplateBuildSingleIterationWF.connect(GradientStepWarpImage, - 'output_product_image', - UpdateTemplateShape, 'input_image') - - ApplyInvAverageAndFourTimesGradientStepWarpImage = pe.Node( - interface=util.Function( - function=MakeTransformListWithGradientWarps, - input_names=['averageAffineTranform', 'gradientStepWarp'], - output_names=['TransformListWithGradientWarps']), - run_without_submitting=True, - name='MakeTransformListWithGradientWarps') - ApplyInvAverageAndFourTimesGradientStepWarpImage.interface.ignore_exception = True - - TemplateBuildSingleIterationWF.connect( - AvgAffineTransform, 'affine_transform', - ApplyInvAverageAndFourTimesGradientStepWarpImage, - 'averageAffineTranform') - TemplateBuildSingleIterationWF.connect( - UpdateTemplateShape, 'output_image', - ApplyInvAverageAndFourTimesGradientStepWarpImage, 'gradientStepWarp') - - ReshapeAverageImageWithShapeUpdate = pe.Node( - interface=WarpImageMultiTransform(), - name='ReshapeAverageImageWithShapeUpdate') - ReshapeAverageImageWithShapeUpdate.inputs.invert_affine = [1] - ReshapeAverageImageWithShapeUpdate.inputs.out_postfix = '_Reshaped' - TemplateBuildSingleIterationWF.connect( - AvgDeformedImages, 'output_average_image', - ReshapeAverageImageWithShapeUpdate, 'input_image') - TemplateBuildSingleIterationWF.connect( - AvgDeformedImages, 'output_average_image', - ReshapeAverageImageWithShapeUpdate, 'reference_image') - TemplateBuildSingleIterationWF.connect( - ApplyInvAverageAndFourTimesGradientStepWarpImage, - 'TransformListWithGradientWarps', ReshapeAverageImageWithShapeUpdate, - 'transformation_series') - TemplateBuildSingleIterationWF.connect(ReshapeAverageImageWithShapeUpdate, - 'output_image', outputSpec, - 'template') - - ###### - ###### - # Process all the passive deformed images in a way similar to the main image used for registration - ###### - ###### - ###### - ############################################## - # Now warp all the ListOfPassiveImagesDictionaries images - FlattenTransformAndImagesListNode = pe.Node( - Function( - function=FlattenTransformAndImagesList, - input_names=[ - 'ListOfPassiveImagesDictionaries', 'transformation_series' - ], - output_names=[ - 'flattened_images', 'flattened_transforms', - 'flattened_image_nametypes' - ]), - run_without_submitting=True, - name="99_FlattenTransformAndImagesList") - TemplateBuildSingleIterationWF.connect( - inputSpec, 'ListOfPassiveImagesDictionaries', - FlattenTransformAndImagesListNode, 'ListOfPassiveImagesDictionaries') - TemplateBuildSingleIterationWF.connect(MakeTransformsLists, 'out', - FlattenTransformAndImagesListNode, - 'transformation_series') - wimtPassivedeformed = pe.MapNode( - interface=WarpImageMultiTransform(), - iterfield=['transformation_series', 'input_image'], - name='wimtPassivedeformed') - TemplateBuildSingleIterationWF.connect( - AvgDeformedImages, 'output_average_image', wimtPassivedeformed, - 'reference_image') - TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, - 'flattened_images', - wimtPassivedeformed, 'input_image') - TemplateBuildSingleIterationWF.connect( - FlattenTransformAndImagesListNode, 'flattened_transforms', - wimtPassivedeformed, 'transformation_series') - - RenestDeformedPassiveImagesNode = pe.Node( - Function( - function=RenestDeformedPassiveImages, - input_names=['deformedPassiveImages', 'flattened_image_nametypes'], - output_names=[ - 'nested_imagetype_list', 'outputAverageImageName_list', - 'image_type_list' - ]), - run_without_submitting=True, - name="99_RenestDeformedPassiveImages") - TemplateBuildSingleIterationWF.connect(wimtPassivedeformed, 'output_image', - RenestDeformedPassiveImagesNode, - 'deformedPassiveImages') - TemplateBuildSingleIterationWF.connect( - FlattenTransformAndImagesListNode, 'flattened_image_nametypes', - RenestDeformedPassiveImagesNode, 'flattened_image_nametypes') - # Now Average All passive input_images deformed images together to create an updated template average - AvgDeformedPassiveImages = pe.MapNode( - interface=AverageImages(), - iterfield=['images', 'output_average_image'], - name='AvgDeformedPassiveImages') - AvgDeformedPassiveImages.inputs.dimension = 3 - AvgDeformedPassiveImages.inputs.normalize = False - TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode, - "nested_imagetype_list", - AvgDeformedPassiveImages, 'images') - TemplateBuildSingleIterationWF.connect( - RenestDeformedPassiveImagesNode, "outputAverageImageName_list", - AvgDeformedPassiveImages, 'output_average_image') - - # -- TODO: Now neeed to reshape all the passive images as well - ReshapeAveragePassiveImageWithShapeUpdate = pe.MapNode( - interface=WarpImageMultiTransform(), - iterfield=['input_image', 'reference_image', 'out_postfix'], - name='ReshapeAveragePassiveImageWithShapeUpdate') - ReshapeAveragePassiveImageWithShapeUpdate.inputs.invert_affine = [1] - TemplateBuildSingleIterationWF.connect( - RenestDeformedPassiveImagesNode, "image_type_list", - ReshapeAveragePassiveImageWithShapeUpdate, 'out_postfix') - TemplateBuildSingleIterationWF.connect( - AvgDeformedPassiveImages, 'output_average_image', - ReshapeAveragePassiveImageWithShapeUpdate, 'input_image') - TemplateBuildSingleIterationWF.connect( - AvgDeformedPassiveImages, 'output_average_image', - ReshapeAveragePassiveImageWithShapeUpdate, 'reference_image') - TemplateBuildSingleIterationWF.connect( - ApplyInvAverageAndFourTimesGradientStepWarpImage, - 'TransformListWithGradientWarps', - ReshapeAveragePassiveImageWithShapeUpdate, 'transformation_series') - TemplateBuildSingleIterationWF.connect( - ReshapeAveragePassiveImageWithShapeUpdate, 'output_image', outputSpec, - 'passive_deformed_templates') - - return TemplateBuildSingleIterationWF diff --git a/nipype/workflows/smri/ants/__init__.py b/nipype/workflows/smri/ants/__init__.py deleted file mode 100644 index 3cb140771c..0000000000 --- a/nipype/workflows/smri/ants/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -from .ANTSBuildTemplate import ANTSTemplateBuildSingleIterationWF -from .antsRegistrationBuildTemplate import antsRegistrationTemplateBuildSingleIterationWF diff --git a/nipype/workflows/smri/ants/antsRegistrationBuildTemplate.py b/nipype/workflows/smri/ants/antsRegistrationBuildTemplate.py deleted file mode 100644 index 3574935fc1..0000000000 --- a/nipype/workflows/smri/ants/antsRegistrationBuildTemplate.py +++ /dev/null @@ -1,535 +0,0 @@ -# -*- coding: utf-8 -*- -################################################################################# -# Program: Build Template Parallel -# Language: Python -## -# Authors: Jessica Forbes, Grace Murray, and Hans Johnson, University of Iowa -## -# This software is distributed WITHOUT ANY WARRANTY; without even -# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -# PURPOSE. -## -################################################################################# -from __future__ import print_function -from builtins import range - -from ....pipeline import engine as pe -from ....interfaces import utility as util -from ....interfaces.utility import Function - -from ....interfaces.ants import (Registration, ApplyTransforms, AverageImages, - MultiplyImages, AverageAffineTransform) - - -def makeListOfOneElement(inputFile): - outputList = [inputFile] - return outputList - - -def GetFirstListElement(this_list): - return this_list[0] - - -def MakeTransformListWithGradientWarps(averageAffineTranform, - gradientStepWarp): - return [ - averageAffineTranform, gradientStepWarp, gradientStepWarp, - gradientStepWarp, gradientStepWarp - ] - - -def RenestDeformedPassiveImages(deformedPassiveImages, - flattened_image_nametypes, - interpolationMapping): - import os - """ Now make a list of lists of images where the outter list is per image type, - and the inner list is the same size as the number of subjects to be averaged. - In this case, the first element will be a list of all the deformed T2's, and - the second element will be a list of all deformed POSTERIOR_AIR, etc.. - """ - all_images_size = len(deformedPassiveImages) - image_dictionary_of_lists = dict() - nested_imagetype_list = list() - outputAverageImageName_list = list() - image_type_list = list() - nested_interpolation_type = list() - # make empty_list, this is not efficient, but it works - for name in flattened_image_nametypes: - image_dictionary_of_lists[name] = list() - for index in range(0, all_images_size): - curr_name = flattened_image_nametypes[index] - curr_file = deformedPassiveImages[index] - image_dictionary_of_lists[curr_name].append(curr_file) - for image_type, image_list in list(image_dictionary_of_lists.items()): - nested_imagetype_list.append(image_list) - outputAverageImageName_list.append('AVG_' + image_type + '.nii.gz') - image_type_list.append('WARP_AVG_' + image_type) - if image_type in interpolationMapping: - nested_interpolation_type.append(interpolationMapping[image_type]) - else: - nested_interpolation_type.append( - 'Linear') # Linear is the default. - print("\n" * 10) - print("HACK: ", nested_imagetype_list) - print("HACK: ", outputAverageImageName_list) - print("HACK: ", image_type_list) - print("HACK: ", nested_interpolation_type) - return nested_imagetype_list, outputAverageImageName_list, image_type_list, nested_interpolation_type - - -def SplitAffineAndWarpComponents(list_of_transforms_lists): - # Nota bene: The outputs will include the initial_moving_transform from Registration (which depends on what - # the invert_initial_moving_transform is set to) - affine_component_list = [] - warp_component_list = [] - for transform in list_of_transforms_lists: - affine_component_list.append(transform[0]) - warp_component_list.append(transform[1]) - print("HACK ", affine_component_list, " ", warp_component_list) - return affine_component_list, warp_component_list - - -# Flatten and return equal length transform and images lists. - - -def FlattenTransformAndImagesList(ListOfPassiveImagesDictionaries, transforms, - invert_transform_flags, - interpolationMapping): - import sys - print("HACK: DEBUG: ListOfPassiveImagesDictionaries\n{lpi}\n".format( - lpi=ListOfPassiveImagesDictionaries)) - subjCount = len(ListOfPassiveImagesDictionaries) - tranCount = len(transforms) - if subjCount != tranCount: - print("ERROR: subjCount must equal tranCount {0} != {1}".format( - subjCount, tranCount)) - sys.exit(-1) - invertTfmsFlagsCount = len(invert_transform_flags) - if subjCount != invertTfmsFlagsCount: - print("ERROR: subjCount must equal invertTfmsFlags {0} != {1}".format( - subjCount, invertTfmsFlagsCount)) - sys.exit(-1) - flattened_images = list() - flattened_image_nametypes = list() - flattened_transforms = list() - flattened_invert_transform_flags = list() - flattened_interpolation_type = list() - passiveImagesCount = len(ListOfPassiveImagesDictionaries[0]) - for subjIndex in range(0, subjCount): - # if passiveImagesCount != len(ListOfPassiveImagesDictionaries[subjIndex]): - # print "ERROR: all image lengths must be equal {0} != {1}".format(passiveImagesCount,len(ListOfPassiveImagesDictionaries[subjIndex])) - # sys.exit(-1) - subjImgDictionary = ListOfPassiveImagesDictionaries[subjIndex] - subjToAtlasTransform = transforms[subjIndex] - subjToAtlasInvertFlags = invert_transform_flags[subjIndex] - for imgname, img in list(subjImgDictionary.items()): - flattened_images.append(img) - flattened_image_nametypes.append(imgname) - flattened_transforms.append(subjToAtlasTransform) - flattened_invert_transform_flags.append(subjToAtlasInvertFlags) - if imgname in interpolationMapping: - flattened_interpolation_type.append( - interpolationMapping[imgname]) - else: - flattened_interpolation_type.append( - 'Linear') # Linear is the default. - print("HACK: flattened images {0}\n".format(flattened_images)) - print("HACK: flattened nametypes {0}\n".format(flattened_image_nametypes)) - print("HACK: flattened txfms {0}\n".format(flattened_transforms)) - print("HACK: flattened txfmsFlags{0}\n".format( - flattened_invert_transform_flags)) - return flattened_images, flattened_transforms, flattened_invert_transform_flags, flattened_image_nametypes, flattened_interpolation_type - - -def GetMovingImages(ListOfImagesDictionaries, registrationImageTypes, - interpolationMapping): - """ This currently ONLY works when registrationImageTypes has - length of exactly 1. When the new multi-variate registration - is introduced, it will be expanded. - """ - if len(registrationImageTypes) != 1: - print("ERROR: Multivariate imageing not supported yet!") - return [] - moving_images = [ - mdict[registrationImageTypes[0]] for mdict in ListOfImagesDictionaries - ] - moving_interpolation_type = interpolationMapping[registrationImageTypes[0]] - return moving_images, moving_interpolation_type - - -def GetPassiveImages(ListOfImagesDictionaries, registrationImageTypes): - if len(registrationImageTypes) != 1: - print("ERROR: Multivariate imageing not supported yet!") - return [dict()] - passive_images = list() - for mdict in ListOfImagesDictionaries: - ThisSubjectPassiveImages = dict() - for key, value in list(mdict.items()): - if key not in registrationImageTypes: - ThisSubjectPassiveImages[key] = value - passive_images.append(ThisSubjectPassiveImages) - return passive_images - - -## -# NOTE: The modes can be either 'SINGLE_IMAGE' or 'MULTI' -# 'SINGLE_IMAGE' is quick shorthand when you are building an atlas with a single subject, then registration can -# be short-circuted -# any other string indicates the normal mode that you would expect and replicates the shell script build_template_parallel.sh - - -def antsRegistrationTemplateBuildSingleIterationWF(iterationPhasePrefix=''): - """ - - Inputs:: - - inputspec.images : - inputspec.fixed_image : - inputspec.ListOfPassiveImagesDictionaries : - inputspec.interpolationMapping : - - Outputs:: - - outputspec.template : - outputspec.transforms_list : - outputspec.passive_deformed_templates : - """ - TemplateBuildSingleIterationWF = pe.Workflow( - name='antsRegistrationTemplateBuildSingleIterationWF_' + - str(iterationPhasePrefix)) - - inputSpec = pe.Node( - interface=util.IdentityInterface(fields=[ - 'ListOfImagesDictionaries', 'registrationImageTypes', - 'interpolationMapping', 'fixed_image' - ]), - run_without_submitting=True, - name='inputspec') - # HACK: TODO: Need to move all local functions to a common untility file, or at the top of the file so that - # they do not change due to re-indenting. Otherwise re-indenting for flow control will trigger - # their hash to change. - # HACK: TODO: REMOVE 'transforms_list' it is not used. That will change all the hashes - # HACK: TODO: Need to run all python files through the code beutifiers. It has gotten pretty ugly. - outputSpec = pe.Node( - interface=util.IdentityInterface(fields=[ - 'template', 'transforms_list', 'passive_deformed_templates' - ]), - run_without_submitting=True, - name='outputspec') - - # NOTE MAP NODE! warp each of the original images to the provided fixed_image as the template - BeginANTS = pe.MapNode( - interface=Registration(), name='BeginANTS', iterfield=['moving_image']) - BeginANTS.inputs.dimension = 3 - BeginANTS.inputs.output_transform_prefix = str( - iterationPhasePrefix) + '_tfm' - BeginANTS.inputs.transforms = ["Affine", "SyN"] - BeginANTS.inputs.transform_parameters = [[0.9], [0.25, 3.0, 0.0]] - BeginANTS.inputs.metric = ['Mattes', 'CC'] - BeginANTS.inputs.metric_weight = [1.0, 1.0] - BeginANTS.inputs.radius_or_number_of_bins = [32, 5] - BeginANTS.inputs.number_of_iterations = [[1000, 1000, 1000], [50, 35, 15]] - BeginANTS.inputs.use_histogram_matching = [True, True] - BeginANTS.inputs.use_estimate_learning_rate_once = [False, False] - BeginANTS.inputs.shrink_factors = [[3, 2, 1], [3, 2, 1]] - BeginANTS.inputs.smoothing_sigmas = [[3, 2, 0], [3, 2, 0]] - BeginANTS.inputs.sigma_units = ["vox"] * 2 - - GetMovingImagesNode = pe.Node( - interface=util.Function( - function=GetMovingImages, - input_names=[ - 'ListOfImagesDictionaries', 'registrationImageTypes', - 'interpolationMapping' - ], - output_names=['moving_images', 'moving_interpolation_type']), - run_without_submitting=True, - name='99_GetMovingImagesNode') - TemplateBuildSingleIterationWF.connect( - inputSpec, 'ListOfImagesDictionaries', GetMovingImagesNode, - 'ListOfImagesDictionaries') - TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes', - GetMovingImagesNode, - 'registrationImageTypes') - TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping', - GetMovingImagesNode, - 'interpolationMapping') - - TemplateBuildSingleIterationWF.connect( - GetMovingImagesNode, 'moving_images', BeginANTS, 'moving_image') - TemplateBuildSingleIterationWF.connect(GetMovingImagesNode, - 'moving_interpolation_type', - BeginANTS, 'interpolation') - TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', BeginANTS, - 'fixed_image') - - # Now warp all the input_images images - wimtdeformed = pe.MapNode( - interface=ApplyTransforms(), - iterfield=['transforms', 'invert_transform_flags', 'input_image'], - name='wimtdeformed') - wimtdeformed.inputs.interpolation = 'Linear' - wimtdeformed.default_value = 0 - TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms', - wimtdeformed, 'transforms') - TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_invert_flags', - wimtdeformed, - 'invert_transform_flags') - TemplateBuildSingleIterationWF.connect( - GetMovingImagesNode, 'moving_images', wimtdeformed, 'input_image') - TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', - wimtdeformed, 'reference_image') - - # Shape Update Next ===== - # Now Average All input_images deformed images together to create an updated template average - AvgDeformedImages = pe.Node( - interface=AverageImages(), name='AvgDeformedImages') - AvgDeformedImages.inputs.dimension = 3 - AvgDeformedImages.inputs.output_average_image = str( - iterationPhasePrefix) + '.nii.gz' - AvgDeformedImages.inputs.normalize = True - TemplateBuildSingleIterationWF.connect(wimtdeformed, "output_image", - AvgDeformedImages, 'images') - - # Now average all affine transforms together - AvgAffineTransform = pe.Node( - interface=AverageAffineTransform(), name='AvgAffineTransform') - AvgAffineTransform.inputs.dimension = 3 - AvgAffineTransform.inputs.output_affine_transform = 'Avererage_' + str( - iterationPhasePrefix) + '_Affine.mat' - - SplitAffineAndWarpsNode = pe.Node( - interface=util.Function( - function=SplitAffineAndWarpComponents, - input_names=['list_of_transforms_lists'], - output_names=['affine_component_list', 'warp_component_list']), - run_without_submitting=True, - name='99_SplitAffineAndWarpsNode') - TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms', - SplitAffineAndWarpsNode, - 'list_of_transforms_lists') - TemplateBuildSingleIterationWF.connect(SplitAffineAndWarpsNode, - 'affine_component_list', - AvgAffineTransform, 'transforms') - - # Now average the warp fields togther - AvgWarpImages = pe.Node(interface=AverageImages(), name='AvgWarpImages') - AvgWarpImages.inputs.dimension = 3 - AvgWarpImages.inputs.output_average_image = str( - iterationPhasePrefix) + 'warp.nii.gz' - AvgWarpImages.inputs.normalize = True - TemplateBuildSingleIterationWF.connect(SplitAffineAndWarpsNode, - 'warp_component_list', - AvgWarpImages, 'images') - - # Now average the images together - # TODO: For now GradientStep is set to 0.25 as a hard coded default value. - GradientStep = 0.25 - GradientStepWarpImage = pe.Node( - interface=MultiplyImages(), name='GradientStepWarpImage') - GradientStepWarpImage.inputs.dimension = 3 - GradientStepWarpImage.inputs.second_input = -1.0 * GradientStep - GradientStepWarpImage.inputs.output_product_image = 'GradientStep0.25_' + str( - iterationPhasePrefix) + '_warp.nii.gz' - TemplateBuildSingleIterationWF.connect( - AvgWarpImages, 'output_average_image', GradientStepWarpImage, - 'first_input') - - # Now create the new template shape based on the average of all deformed images - UpdateTemplateShape = pe.Node( - interface=ApplyTransforms(), name='UpdateTemplateShape') - UpdateTemplateShape.inputs.invert_transform_flags = [True] - UpdateTemplateShape.inputs.interpolation = 'Linear' - UpdateTemplateShape.default_value = 0 - - TemplateBuildSingleIterationWF.connect( - AvgDeformedImages, 'output_average_image', UpdateTemplateShape, - 'reference_image') - TemplateBuildSingleIterationWF.connect([ - (AvgAffineTransform, UpdateTemplateShape, - [(('affine_transform', makeListOfOneElement), 'transforms')]), - ]) - TemplateBuildSingleIterationWF.connect(GradientStepWarpImage, - 'output_product_image', - UpdateTemplateShape, 'input_image') - - ApplyInvAverageAndFourTimesGradientStepWarpImage = pe.Node( - interface=util.Function( - function=MakeTransformListWithGradientWarps, - input_names=['averageAffineTranform', 'gradientStepWarp'], - output_names=['TransformListWithGradientWarps']), - run_without_submitting=True, - name='99_MakeTransformListWithGradientWarps') - ApplyInvAverageAndFourTimesGradientStepWarpImage.interface.ignore_exception = True - - TemplateBuildSingleIterationWF.connect( - AvgAffineTransform, 'affine_transform', - ApplyInvAverageAndFourTimesGradientStepWarpImage, - 'averageAffineTranform') - TemplateBuildSingleIterationWF.connect( - UpdateTemplateShape, 'output_image', - ApplyInvAverageAndFourTimesGradientStepWarpImage, 'gradientStepWarp') - - ReshapeAverageImageWithShapeUpdate = pe.Node( - interface=ApplyTransforms(), name='ReshapeAverageImageWithShapeUpdate') - ReshapeAverageImageWithShapeUpdate.inputs.invert_transform_flags = [ - True, False, False, False, False - ] - ReshapeAverageImageWithShapeUpdate.inputs.interpolation = 'Linear' - ReshapeAverageImageWithShapeUpdate.default_value = 0 - ReshapeAverageImageWithShapeUpdate.inputs.output_image = 'ReshapeAverageImageWithShapeUpdate.nii.gz' - TemplateBuildSingleIterationWF.connect( - AvgDeformedImages, 'output_average_image', - ReshapeAverageImageWithShapeUpdate, 'input_image') - TemplateBuildSingleIterationWF.connect( - AvgDeformedImages, 'output_average_image', - ReshapeAverageImageWithShapeUpdate, 'reference_image') - TemplateBuildSingleIterationWF.connect( - ApplyInvAverageAndFourTimesGradientStepWarpImage, - 'TransformListWithGradientWarps', ReshapeAverageImageWithShapeUpdate, - 'transforms') - TemplateBuildSingleIterationWF.connect(ReshapeAverageImageWithShapeUpdate, - 'output_image', outputSpec, - 'template') - - ###### - ###### - # Process all the passive deformed images in a way similar to the main image used for registration - ###### - ###### - ###### - ############################################## - # Now warp all the ListOfPassiveImagesDictionaries images - FlattenTransformAndImagesListNode = pe.Node( - Function( - function=FlattenTransformAndImagesList, - input_names=[ - 'ListOfPassiveImagesDictionaries', 'transforms', - 'invert_transform_flags', 'interpolationMapping' - ], - output_names=[ - 'flattened_images', 'flattened_transforms', - 'flattened_invert_transform_flags', - 'flattened_image_nametypes', 'flattened_interpolation_type' - ]), - run_without_submitting=True, - name="99_FlattenTransformAndImagesList") - - GetPassiveImagesNode = pe.Node( - interface=util.Function( - function=GetPassiveImages, - input_names=['ListOfImagesDictionaries', 'registrationImageTypes'], - output_names=['ListOfPassiveImagesDictionaries']), - run_without_submitting=True, - name='99_GetPassiveImagesNode') - TemplateBuildSingleIterationWF.connect( - inputSpec, 'ListOfImagesDictionaries', GetPassiveImagesNode, - 'ListOfImagesDictionaries') - TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes', - GetPassiveImagesNode, - 'registrationImageTypes') - - TemplateBuildSingleIterationWF.connect( - GetPassiveImagesNode, 'ListOfPassiveImagesDictionaries', - FlattenTransformAndImagesListNode, 'ListOfPassiveImagesDictionaries') - TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping', - FlattenTransformAndImagesListNode, - 'interpolationMapping') - TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms', - FlattenTransformAndImagesListNode, - 'transforms') - TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_invert_flags', - FlattenTransformAndImagesListNode, - 'invert_transform_flags') - wimtPassivedeformed = pe.MapNode( - interface=ApplyTransforms(), - iterfield=[ - 'transforms', 'invert_transform_flags', 'input_image', - 'interpolation' - ], - name='wimtPassivedeformed') - wimtPassivedeformed.default_value = 0 - TemplateBuildSingleIterationWF.connect( - AvgDeformedImages, 'output_average_image', wimtPassivedeformed, - 'reference_image') - TemplateBuildSingleIterationWF.connect( - FlattenTransformAndImagesListNode, 'flattened_interpolation_type', - wimtPassivedeformed, 'interpolation') - TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, - 'flattened_images', - wimtPassivedeformed, 'input_image') - TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, - 'flattened_transforms', - wimtPassivedeformed, 'transforms') - TemplateBuildSingleIterationWF.connect( - FlattenTransformAndImagesListNode, 'flattened_invert_transform_flags', - wimtPassivedeformed, 'invert_transform_flags') - - RenestDeformedPassiveImagesNode = pe.Node( - Function( - function=RenestDeformedPassiveImages, - input_names=[ - 'deformedPassiveImages', 'flattened_image_nametypes', - 'interpolationMapping' - ], - output_names=[ - 'nested_imagetype_list', 'outputAverageImageName_list', - 'image_type_list', 'nested_interpolation_type' - ]), - run_without_submitting=True, - name="99_RenestDeformedPassiveImages") - TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping', - RenestDeformedPassiveImagesNode, - 'interpolationMapping') - TemplateBuildSingleIterationWF.connect(wimtPassivedeformed, 'output_image', - RenestDeformedPassiveImagesNode, - 'deformedPassiveImages') - TemplateBuildSingleIterationWF.connect( - FlattenTransformAndImagesListNode, 'flattened_image_nametypes', - RenestDeformedPassiveImagesNode, 'flattened_image_nametypes') - # Now Average All passive input_images deformed images together to create an updated template average - AvgDeformedPassiveImages = pe.MapNode( - interface=AverageImages(), - iterfield=['images', 'output_average_image'], - name='AvgDeformedPassiveImages') - AvgDeformedPassiveImages.inputs.dimension = 3 - AvgDeformedPassiveImages.inputs.normalize = False - TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode, - "nested_imagetype_list", - AvgDeformedPassiveImages, 'images') - TemplateBuildSingleIterationWF.connect( - RenestDeformedPassiveImagesNode, "outputAverageImageName_list", - AvgDeformedPassiveImages, 'output_average_image') - - # -- TODO: Now neeed to reshape all the passive images as well - ReshapeAveragePassiveImageWithShapeUpdate = pe.MapNode( - interface=ApplyTransforms(), - iterfield=[ - 'input_image', 'reference_image', 'output_image', 'interpolation' - ], - name='ReshapeAveragePassiveImageWithShapeUpdate') - ReshapeAveragePassiveImageWithShapeUpdate.inputs.invert_transform_flags = [ - True, False, False, False, False - ] - ReshapeAveragePassiveImageWithShapeUpdate.default_value = 0 - TemplateBuildSingleIterationWF.connect( - RenestDeformedPassiveImagesNode, 'nested_interpolation_type', - ReshapeAveragePassiveImageWithShapeUpdate, 'interpolation') - TemplateBuildSingleIterationWF.connect( - RenestDeformedPassiveImagesNode, 'outputAverageImageName_list', - ReshapeAveragePassiveImageWithShapeUpdate, 'output_image') - TemplateBuildSingleIterationWF.connect( - AvgDeformedPassiveImages, 'output_average_image', - ReshapeAveragePassiveImageWithShapeUpdate, 'input_image') - TemplateBuildSingleIterationWF.connect( - AvgDeformedPassiveImages, 'output_average_image', - ReshapeAveragePassiveImageWithShapeUpdate, 'reference_image') - TemplateBuildSingleIterationWF.connect( - ApplyInvAverageAndFourTimesGradientStepWarpImage, - 'TransformListWithGradientWarps', - ReshapeAveragePassiveImageWithShapeUpdate, 'transforms') - TemplateBuildSingleIterationWF.connect( - ReshapeAveragePassiveImageWithShapeUpdate, 'output_image', outputSpec, - 'passive_deformed_templates') - - return TemplateBuildSingleIterationWF diff --git a/nipype/workflows/smri/freesurfer/__init__.py b/nipype/workflows/smri/freesurfer/__init__.py deleted file mode 100644 index caa854f9c9..0000000000 --- a/nipype/workflows/smri/freesurfer/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -*- coding: utf-8 -*- -from .utils import (create_getmask_flow, create_get_stats_flow, - create_tessellation_flow) -from .bem import create_bem_flow -from .recon import create_skullstripped_recon_flow, create_reconall_workflow diff --git a/nipype/workflows/smri/freesurfer/autorecon1.py b/nipype/workflows/smri/freesurfer/autorecon1.py deleted file mode 100644 index 0973e210a7..0000000000 --- a/nipype/workflows/smri/freesurfer/autorecon1.py +++ /dev/null @@ -1,512 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from ....utils import NUMPY_MMAP -from ....pipeline import engine as pe -from ....interfaces.utility import Function, IdentityInterface -from ....interfaces.freesurfer import * -from .utils import copy_file - - -def checkT1s(T1_files, cw256=False): - """Verifying size of inputs and setting workflow parameters""" - import sys - import nibabel as nb - from nipype.utils.filemanip import ensure_list - - T1_files = ensure_list(T1_files) - if len(T1_files) == 0: - print("ERROR: No T1's Given") - sys.exit(-1) - - shape = nb.load(T1_files[0]).shape - for t1 in T1_files[1:]: - if nb.load(t1, mmap=NUMPY_MMAP).shape != shape: - print("ERROR: T1s not the same size. Cannot process {0} and {1} " - "together".format(T1_files[0], t1)) - sys.exit(-1) - - origvol_names = ["{0:03d}.mgz".format(i + 1) for i in range(len(T1_files))] - - # check if cw256 is set to crop the images if size is larger than 256 - if not cw256 and any(dim > 256 for dim in shape): - print("Setting MRI Convert to crop images to 256 FOV") - cw256 = True - - resample_type = 'cubic' if len(T1_files) > 1 else 'interpolate' - return T1_files, cw256, resample_type, origvol_names - - -def create_AutoRecon1(name="AutoRecon1", - longitudinal=False, - distance=None, - custom_atlas=None, - plugin_args=None, - shrink=None, - stop=None, - fsvernum=5.3): - """Creates the AutoRecon1 workflow in nipype. - - Inputs:: - inputspec.T1_files : T1 files (mandatory) - inputspec.T2_file : T2 file (optional) - inputspec.FLAIR_file : FLAIR file (optional) - inputspec.cw256 : Conform inputs to 256 FOV (optional) - inputspec.num_threads: Number of threads to use with EM Register (default=1) - Outpus:: - - """ - ar1_wf = pe.Workflow(name=name) - inputspec = pe.Node( - interface=IdentityInterface(fields=[ - 'T1_files', 'T2_file', 'FLAIR_file', 'cw256', 'num_threads', - 'reg_template_withskull', 'awk_file' - ]), - run_without_submitting=True, - name='inputspec') - - if not longitudinal: - # single session processing - verify_inputs = pe.Node( - Function(["T1_files", "cw256"], - ["T1_files", "cw256", "resample_type", "origvol_names"], - checkT1s), - name="Check_T1s") - ar1_wf.connect([(inputspec, verify_inputs, [('T1_files', 'T1_files'), - ('cw256', 'cw256')])]) - - # T1 image preparation - # For all T1's mri_convert ${InputVol} ${out_file} - T1_image_preparation = pe.MapNode( - MRIConvert(), iterfield=['in_file', 'out_file'], name="T1_prep") - - ar1_wf.connect([ - (verify_inputs, T1_image_preparation, - [('T1_files', 'in_file'), ('origvol_names', 'out_file')]), - ]) - - def convert_modalities(in_file=None, out_file=None): - """Returns an undefined output if the in_file is not defined""" - from nipype.interfaces.freesurfer import MRIConvert - import os - if in_file: - convert = MRIConvert() - convert.inputs.in_file = in_file - convert.inputs.out_file = out_file - convert.inputs.no_scale = True - out = convert.run() - out_file = os.path.abspath(out.outputs.out_file) - return out_file - - T2_convert = pe.Node( - Function(['in_file', 'out_file'], ['out_file'], - convert_modalities), - name="T2_Convert") - T2_convert.inputs.out_file = 'T2raw.mgz' - ar1_wf.connect([(inputspec, T2_convert, [('T2_file', 'in_file')])]) - - FLAIR_convert = pe.Node( - Function(['in_file', 'out_file'], ['out_file'], - convert_modalities), - name="FLAIR_Convert") - FLAIR_convert.inputs.out_file = 'FLAIRraw.mgz' - ar1_wf.connect([(inputspec, FLAIR_convert, [('FLAIR_file', - 'in_file')])]) - else: - # longitudinal inputs - inputspec = pe.Node( - interface=IdentityInterface(fields=[ - 'T1_files', 'iscales', 'ltas', 'subj_to_template_lta', - 'template_talairach_xfm', 'template_brainmask' - ]), - run_without_submitting=True, - name='inputspec') - - def output_names(T1_files): - """Create file names that are dependent on the number of T1 inputs""" - iscale_names = list() - lta_names = list() - for i, t1 in enumerate(T1_files): - # assign an input number - file_num = str(i + 1) - while len(file_num) < 3: - file_num = '0' + file_num - iscale_names.append("{0}-iscale.txt".format(file_num)) - lta_names.append("{0}.lta".format(file_num)) - return iscale_names, lta_names - - filenames = pe.Node( - Function(['T1_files'], ['iscale_names', 'lta_names'], - output_names), - name="Longitudinal_Filenames") - ar1_wf.connect([(inputspec, filenames, [('T1_files', 'T1_files')])]) - - copy_ltas = pe.MapNode( - Function(['in_file', 'out_file'], ['out_file'], copy_file), - iterfield=['in_file', 'out_file'], - name='Copy_ltas') - ar1_wf.connect([(inputspec, copy_ltas, [('ltas', 'in_file')]), - (filenames, copy_ltas, [('lta_names', 'out_file')])]) - - copy_iscales = pe.MapNode( - Function(['in_file', 'out_file'], ['out_file'], copy_file), - iterfield=['in_file', 'out_file'], - name='Copy_iscales') - ar1_wf.connect([(inputspec, copy_iscales, [('iscales', 'in_file')]), - (filenames, copy_iscales, [('iscale_names', - 'out_file')])]) - - concatenate_lta = pe.MapNode( - ConcatenateLTA(), iterfield=['in_file'], name="Concatenate_ltas") - ar1_wf.connect([(copy_ltas, concatenate_lta, [('out_file', - 'in_file')]), - (inputspec, concatenate_lta, [('subj_to_template_lta', - 'subj_to_base')])]) - - # Motion Correction - """ - When there are multiple source volumes, this step will correct for small - motions between them and then average them together. The output of the - motion corrected average is mri/rawavg.mgz which is then conformed to - 255 cubed char images (1mm isotropic voxels) in mri/orig.mgz. - """ - - def createTemplate(in_files, out_file): - import os - import shutil - if len(in_files) == 1: - # if only 1 T1 scan given, no need to run RobustTemplate - print( - "WARNING: only one run found. This is OK, but motion correction " - + - "cannot be performed on one run, so I'll copy the run to rawavg " - + "and continue.") - shutil.copyfile(in_files[0], out_file) - intensity_scales = None - transforms = None - else: - from nipype.interfaces.freesurfer import RobustTemplate - # if multiple T1 scans are given run RobustTemplate - intensity_scales = [ - os.path.basename(f.replace('.mgz', '-iscale.txt')) - for f in in_files - ] - transforms = [ - os.path.basename(f.replace('.mgz', '.lta')) for f in in_files - ] - robtemp = RobustTemplate() - robtemp.inputs.in_files = in_files - robtemp.inputs.average_metric = 'median' - robtemp.inputs.out_file = out_file - robtemp.inputs.no_iteration = True - robtemp.inputs.fixed_timepoint = True - robtemp.inputs.auto_detect_sensitivity = True - robtemp.inputs.initial_timepoint = 1 - robtemp.inputs.scaled_intensity_outputs = intensity_scales - robtemp.inputs.transform_outputs = transforms - robtemp.inputs.subsample_threshold = 200 - robtemp.inputs.intensity_scaling = True - robtemp_result = robtemp.run() - # collect the outputs from RobustTemplate - out_file = robtemp_result.outputs.out_file - intensity_scales = [ - os.path.abspath(f) - for f in robtemp_result.outputs.scaled_intensity_outputs - ] - transforms = [ - os.path.abspath(f) - for f in robtemp_result.outputs.transform_outputs - ] - out_file = os.path.abspath(out_file) - return out_file, intensity_scales, transforms - - if not longitudinal: - create_template = pe.Node( - Function(['in_files', 'out_file'], - ['out_file', 'intensity_scales', 'transforms'], - createTemplate), - name="Robust_Template") - create_template.inputs.out_file = 'rawavg.mgz' - ar1_wf.connect([(T1_image_preparation, create_template, - [('out_file', 'in_files')])]) - else: - create_template = pe.Node(RobustTemplate(), name="Robust_Template") - create_template.inputs.average_metric = 'median' - create_template.inputs.out_file = 'rawavg.mgz' - create_template.inputs.no_iteration = True - ar1_wf.connect([(concatenate_lta, create_template, - [('out_file', 'initial_transforms')]), - (inputSpec, create_template, [('in_t1s', 'in_files')]), - (copy_iscales, create_template, - [('out_file', 'in_intensity_scales')])]) - - # mri_convert - conform_template = pe.Node(MRIConvert(), name='Conform_Template') - conform_template.inputs.out_file = 'orig.mgz' - if not longitudinal: - conform_template.inputs.conform = True - ar1_wf.connect([(verify_inputs, conform_template, - [('cw256', 'cw256'), ('resample_type', - 'resample_type')])]) - else: - conform_template.inputs.out_datatype = 'uchar' - - ar1_wf.connect([(create_template, conform_template, [('out_file', - 'in_file')])]) - - # Talairach - """ - This computes the affine transform from the orig volume to the MNI305 atlas using Avi Snyders 4dfp - suite of image registration tools, through a FreeSurfer script called talairach_avi. - Several of the downstream programs use talairach coordinates as seed points. - """ - - bias_correction = pe.Node(MNIBiasCorrection(), name="Bias_correction") - bias_correction.inputs.iterations = 1 - bias_correction.inputs.protocol_iterations = 1000 - bias_correction.inputs.distance = distance - if stop: - bias_correction.inputs.stop = stop - if shrink: - bias_correction.inputs.shrink = shrink - bias_correction.inputs.no_rescale = True - bias_correction.inputs.out_file = 'orig_nu.mgz' - - ar1_wf.connect([ - (conform_template, bias_correction, [('out_file', 'in_file')]), - ]) - - if not longitudinal: - # single session processing - talairach_avi = pe.Node(TalairachAVI(), name="Compute_Transform") - if custom_atlas is not None: - # allows to specify a custom atlas - talairach_avi.inputs.atlas = custom_atlas - talairach_avi.inputs.out_file = 'talairach.auto.xfm' - ar1_wf.connect([(bias_correction, talairach_avi, [('out_file', - 'in_file')])]) - else: - # longitudinal processing - # Just copy the template xfm - talairach_avi = pe.Node( - Function(['in_file', 'out_file'], ['out_file'], copy_file), - name='Copy_Template_Transform') - talairach_avi.inputs.out_file = 'talairach.auto.xfm' - - ar1_wf.connect([(inputspec, talairach_avi, [('template_talairach_xfm', - 'in_file')])]) - - copy_transform = pe.Node( - Function(['in_file', 'out_file'], ['out_file'], copy_file), - name='Copy_Transform') - copy_transform.inputs.out_file = 'talairach.xfm' - - ar1_wf.connect([(talairach_avi, copy_transform, [('out_file', - 'in_file')])]) - - # In recon-all the talairach.xfm is added to orig.mgz, even though - # it does not exist yet. This is a compromise to keep from - # having to change the time stamp of the orig volume after talairaching. - # Here we are going to add xfm to the header after the xfm has been created. - # This may mess up the timestamp. - - add_xform_to_orig = pe.Node( - AddXFormToHeader(), name="Add_Transform_to_Orig") - add_xform_to_orig.inputs.copy_name = True - add_xform_to_orig.inputs.out_file = conform_template.inputs.out_file - - ar1_wf.connect( - [(conform_template, add_xform_to_orig, [('out_file', 'in_file')]), - (copy_transform, add_xform_to_orig, [('out_file', 'transform')])]) - - # This node adds the transform to the orig_nu.mgz file. This step does not - # exist in the recon-all workflow, because that workflow adds the talairach - # to the orig.mgz file header before the talairach actually exists. - add_xform_to_orig_nu = pe.Node( - AddXFormToHeader(), name="Add_Transform_to_Orig_Nu") - add_xform_to_orig_nu.inputs.copy_name = True - add_xform_to_orig_nu.inputs.out_file = bias_correction.inputs.out_file - - ar1_wf.connect( - [(bias_correction, add_xform_to_orig_nu, [('out_file', 'in_file')]), - (copy_transform, add_xform_to_orig_nu, [('out_file', 'transform')])]) - - # check the alignment of the talairach - # TODO: Figure out how to read output from this node. - check_alignment = pe.Node( - CheckTalairachAlignment(), name="Check_Talairach_Alignment") - check_alignment.inputs.threshold = 0.005 - ar1_wf.connect([ - (copy_transform, check_alignment, [('out_file', 'in_file')]), - ]) - - if not longitudinal: - - def awkfile(in_file, log_file): - """ - This method uses 'awk' which must be installed prior to running the workflow and is not a - part of nipype or freesurfer. - """ - import subprocess - import os - command = ['awk', '-f', in_file, log_file] - print(''.join(command)) - subprocess.call(command) - log_file = os.path.abspath(log_file) - return log_file - - awk_logfile = pe.Node( - Function(['in_file', 'log_file'], ['log_file'], awkfile), - name='Awk') - - ar1_wf.connect([(talairach_avi, awk_logfile, [('out_log', - 'log_file')]), - (inputspec, awk_logfile, [('awk_file', 'in_file')])]) - - # TODO datasink the output from TalirachQC...not sure how to do this - tal_qc = pe.Node(TalairachQC(), name="Detect_Aligment_Failures") - ar1_wf.connect([(awk_logfile, tal_qc, [('log_file', 'log_file')])]) - - if fsvernum < 6: - # intensity correction is performed before normalization - intensity_correction = pe.Node( - MNIBiasCorrection(), name="Intensity_Correction") - intensity_correction.inputs.out_file = 'nu.mgz' - intensity_correction.inputs.iterations = 2 - ar1_wf.connect([(add_xform_to_orig, intensity_correction, - [('out_file', 'in_file')]), - (copy_transform, intensity_correction, - [('out_file', 'transform')])]) - - add_to_header_nu = pe.Node(AddXFormToHeader(), name="Add_XForm_to_NU") - add_to_header_nu.inputs.copy_name = True - add_to_header_nu.inputs.out_file = 'nu.mgz' - ar1_wf.connect([(intensity_correction, add_to_header_nu, [ - ('out_file', 'in_file'), - ]), (copy_transform, add_to_header_nu, [('out_file', 'transform')])]) - - # Intensity Normalization - # Performs intensity normalization of the orig volume and places the result in mri/T1.mgz. - # Attempts to correct for fluctuations in intensity that would otherwise make intensity-based - # segmentation much more difficult. Intensities for all voxels are scaled so that the mean - # intensity of the white matter is 110. - - mri_normalize = pe.Node(Normalize(), name="Normalize_T1") - mri_normalize.inputs.gradient = 1 - mri_normalize.inputs.out_file = 'T1.mgz' - - if fsvernum < 6: - ar1_wf.connect([(add_to_header_nu, mri_normalize, [('out_file', - 'in_file')])]) - else: - ar1_wf.connect([(add_xform_to_orig_nu, mri_normalize, [('out_file', - 'in_file')])]) - - ar1_wf.connect([(copy_transform, mri_normalize, [('out_file', - 'transform')])]) - - # Skull Strip - """ - Removes the skull from mri/T1.mgz and stores the result in - mri/brainmask.auto.mgz and mri/brainmask.mgz. Runs the mri_watershed program. - """ - if not longitudinal: - mri_em_register = pe.Node(EMRegister(), name="EM_Register") - mri_em_register.inputs.out_file = 'talairach_with_skull.lta' - mri_em_register.inputs.skull = True - if plugin_args: - mri_em_register.plugin_args = plugin_args - - if fsvernum < 6: - ar1_wf.connect(add_to_header_nu, 'out_file', mri_em_register, - 'in_file') - else: - ar1_wf.connect(add_xform_to_orig_nu, 'out_file', mri_em_register, - 'in_file') - - ar1_wf.connect([(inputspec, mri_em_register, - [('num_threads', 'num_threads'), - ('reg_template_withskull', 'template')])]) - - brainmask = pe.Node( - WatershedSkullStrip(), name='Watershed_Skull_Strip') - brainmask.inputs.t1 = True - brainmask.inputs.out_file = 'brainmask.auto.mgz' - ar1_wf.connect([(mri_normalize, brainmask, [('out_file', 'in_file')]), - (mri_em_register, brainmask, [('out_file', - 'transform')]), - (inputspec, brainmask, [('reg_template_withskull', - 'brain_atlas')])]) - else: - copy_template_brainmask = pe.Node( - Function(['in_file', 'out_file'], ['out_file'], copy_file), - name='Copy_Template_Brainmask') - copy_template_brainmask.inputs.out_file = 'brainmask_{0}.mgz'.format( - config['long_template']) - - ar1_wf.connect([(inputspec, copy_template_brainmask, - [('template_brainmask', 'in_file')])]) - - mask1 = pe.Node(ApplyMask(), name="ApplyMask1") - mask1.inputs.keep_mask_deletion_edits = True - mask1.inputs.out_file = 'brainmask.auto.mgz' - - ar1_wf.connect([(mri_normalize, mask1, [('out_file', 'in_file')]), - (copy_template_brainmask, mask1, [('out_file', - 'mask_file')])]) - - brainmask = pe.Node(ApplyMask(), name="ApplyMask2") - brainmask.inputs.keep_mask_deletion_edits = True - brainmask.inputs.transfer = 255 - brainmask.inputs.out_file = mask1.inputs.out_file - - ar1_wf.connect([(mask1, brainmask, [('out_file', 'in_file')]), - (copy_template_brainmask, brainmask, [('out_file', - 'mask_file')])]) - - copy_brainmask = pe.Node( - Function(['in_file', 'out_file'], ['out_file'], copy_file), - name='Copy_Brainmask') - copy_brainmask.inputs.out_file = 'brainmask.mgz' - - ar1_wf.connect([(brainmask, copy_brainmask, [('out_file', 'in_file')])]) - - outputs = [ - 'origvols', 't2_raw', 'flair', 'rawavg', 'orig_nu', 'orig', - 'talairach_auto', 'talairach', 't1', 'talskull', 'brainmask_auto', - 'brainmask', 'braintemplate' - ] - - if fsvernum < 6: - outputspec = pe.Node( - IdentityInterface(fields=outputs + ['nu']), name="outputspec") - ar1_wf.connect([(add_to_header_nu, outputspec, [('out_file', 'nu')])]) - else: - outputspec = pe.Node( - IdentityInterface(fields=outputs), name="outputspec") - - ar1_wf.connect([ - (T1_image_preparation, outputspec, [('out_file', 'origvols')]), - (T2_convert, outputspec, [('out_file', 't2_raw')]), - (FLAIR_convert, outputspec, [('out_file', 'flair')]), - (create_template, outputspec, [('out_file', 'rawavg')]), - (add_xform_to_orig, outputspec, [('out_file', 'orig')]), - (add_xform_to_orig_nu, outputspec, [('out_file', 'orig_nu')]), - (talairach_avi, outputspec, [('out_file', 'talairach_auto')]), - (copy_transform, outputspec, [('out_file', 'talairach')]), - (mri_normalize, outputspec, [('out_file', 't1')]), - (brainmask, outputspec, [('out_file', 'brainmask_auto')]), - (copy_brainmask, outputspec, [('out_file', 'brainmask')]), - ]) - - if not longitudinal: - ar1_wf.connect([ - (mri_em_register, outputspec, [('out_file', 'talskull')]), - ]) - else: - ar1_wf.connect([ - (copy_template_brainmask, outputspec, [('out_file', - 'braintemplate')]), - ]) - - return ar1_wf, outputs diff --git a/nipype/workflows/smri/freesurfer/autorecon2.py b/nipype/workflows/smri/freesurfer/autorecon2.py deleted file mode 100644 index a11587412d..0000000000 --- a/nipype/workflows/smri/freesurfer/autorecon2.py +++ /dev/null @@ -1,720 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from ....interfaces.utility import Function, IdentityInterface, Merge -from ....pipeline import engine as pe -from ....interfaces.freesurfer import * -from .utils import copy_file - - -def copy_ltas(in_file, subjects_dir, subject_id, long_template): - import os - out_file = copy_file(in_file, - os.path.basename(in_file).replace( - long_template, subject_id)) - return out_file - - -def create_AutoRecon2(name="AutoRecon2", - longitudinal=False, - plugin_args=None, - fsvernum=5.3, - stop=None, - shrink=None, - distance=None): - # AutoRecon2 - # Workflow - ar2_wf = pe.Workflow(name=name) - - inputspec = pe.Node( - IdentityInterface(fields=[ - 'orig', - 'nu', # version < 6 - 'brainmask', - 'transform', - 'subject_id', - 'template_talairach_lta', - 'template_talairach_m3z', - 'template_label_intensities', - 'template_aseg', - 'subj_to_template_lta', - 'alltps_to_template_ltas', - 'template_lh_white', - 'template_rh_white', - 'template_lh_pial', - 'template_rh_pial', - 'init_wm', - 'timepoints', - 'alltps_segs', - 'alltps_segs_noCC', - 'alltps_norms', - 'num_threads', - 'reg_template', - 'reg_template_withskull' - ]), - run_without_submitting=True, - name='inputspec') - - # Input node - if longitudinal: - # TODO: Work on longitudinal workflow - inputspec.inputs.timepoints = config['timepoints'] - - if fsvernum >= 6: - # NU Intensity Correction - """ - Non-parametric Non-uniform intensity Normalization (N3), corrects for - intensity non-uniformity in MR data, making relatively few assumptions about - the data. This runs the MINC tool 'nu_correct'. - """ - intensity_correction = pe.Node( - MNIBiasCorrection(), name="Intensity_Correction") - intensity_correction.inputs.out_file = 'nu.mgz' - ar2_wf.connect([(inputspec, intensity_correction, - [('orig', 'in_file'), ('brainmask', 'mask'), - ('transform', 'transform')])]) - - # intensity correction parameters are more specific in 6+ - intensity_correction.inputs.iterations = 1 - intensity_correction.inputs.protocol_iterations = 1000 - if stop: - intensity_correction.inputs.stop = stop - if shrink: - intensity_correction.inputs.shrink = shrink - intensity_correction.inputs.distance = distance - - add_to_header_nu = pe.Node(AddXFormToHeader(), name="Add_XForm_to_NU") - add_to_header_nu.inputs.copy_name = True - add_to_header_nu.inputs.out_file = 'nu.mgz' - ar2_wf.connect([(intensity_correction, add_to_header_nu, [ - ('out_file', 'in_file'), - ]), (inputspec, add_to_header_nu, [('transform', 'transform')])]) - - # EM Registration - """ - Computes the transform to align the mri/nu.mgz volume to the default GCA - atlas found in FREESURFER_HOME/average (see -gca flag for more info). - """ - if longitudinal: - align_transform = pe.Node( - Function(['in_file', 'out_file'], ['out_file'], copy_file), - name='Copy_Talairach_lta') - align_transform.inputs.out_file = 'talairach.lta' - - ar2_wf.connect([(inputspec, align_transform, - [('template_talairach_lta', 'in_file')])]) - else: - align_transform = pe.Node(EMRegister(), name="Align_Transform") - align_transform.inputs.out_file = 'talairach.lta' - align_transform.inputs.nbrspacing = 3 - if plugin_args: - align_transform.plugin_args = plugin_args - ar2_wf.connect([(inputspec, align_transform, - [('brainmask', 'mask'), ('reg_template', 'template'), - ('num_threads', 'num_threads')])]) - if fsvernum >= 6: - ar2_wf.connect([(add_to_header_nu, align_transform, - [('out_file', 'in_file')])]) - else: - ar2_wf.connect([(inputspec, align_transform, [('nu', 'in_file')])]) - - # CA Normalize - """ - Further normalization, based on GCA model. The normalization is based on an - estimate of the most certain segmentation voxels, which it then uses to - estimate the bias field/scalings. Creates mri/norm.mgz. - """ - ca_normalize = pe.Node(CANormalize(), name='CA_Normalize') - ca_normalize.inputs.out_file = 'norm.mgz' - if not longitudinal: - ca_normalize.inputs.control_points = 'ctrl_pts.mgz' - else: - copy_template_aseg = pe.Node( - Function(['in_file', 'out_file'], ['out_file'], copy_file), - name='Copy_Template_Aseg') - copy_template_aseg.inputs.out_file = 'aseg_{0}.mgz'.format( - config['long_template']) - - ar1_wf.connect( - [(inputspec, copy_template, [('template_aseg', 'in_file')]), - (copy_template, ca_normalize, [('out_file', 'long_file')])]) - - ar2_wf.connect([(align_transform, ca_normalize, [('out_file', - 'transform')]), - (inputspec, ca_normalize, [('brainmask', 'mask'), - ('reg_template', 'atlas')])]) - if fsvernum >= 6: - ar2_wf.connect([(add_to_header_nu, ca_normalize, [('out_file', - 'in_file')])]) - else: - ar2_wf.connect([(inputspec, ca_normalize, [('nu', 'in_file')])]) - - # CA Register - # Computes a nonlinear transform to align with GCA atlas. - ca_register = pe.Node(CARegister(), name='CA_Register') - ca_register.inputs.align = 'after' - ca_register.inputs.no_big_ventricles = True - ca_register.inputs.out_file = 'talairach.m3z' - if plugin_args: - ca_register.plugin_args = plugin_args - ar2_wf.connect([(ca_normalize, ca_register, [('out_file', 'in_file')]), - (inputspec, ca_register, - [('brainmask', 'mask'), ('num_threads', 'num_threads'), - ('reg_template', 'template')])]) - if not longitudinal: - ar2_wf.connect([(align_transform, ca_register, [('out_file', - 'transform')])]) - else: - ca_register.inputs.levels = 2 - ca_register.inputs.A = 1 - ar2_wf.connect([(inputspec, ca_register, [('template_talairach_m3z', - 'l_files')])]) - - # Remove Neck - """ - The neck region is removed from the NU-corrected volume mri/nu.mgz. Makes use - of transform computed from prior CA Register stage. - """ - remove_neck = pe.Node(RemoveNeck(), name='Remove_Neck') - remove_neck.inputs.radius = 25 - remove_neck.inputs.out_file = 'nu_noneck.mgz' - ar2_wf.connect([(ca_register, remove_neck, [('out_file', 'transform')]), - (inputspec, remove_neck, [('reg_template', 'template')])]) - if fsvernum >= 6: - ar2_wf.connect([(add_to_header_nu, remove_neck, [('out_file', - 'in_file')])]) - else: - ar2_wf.connect([(inputspec, remove_neck, [('nu', 'in_file')])]) - - # SkullLTA (EM Registration, with Skull) - # Computes transform to align volume mri/nu_noneck.mgz with GCA volume - # possessing the skull. - em_reg_withskull = pe.Node(EMRegister(), name='EM_Register_withSkull') - em_reg_withskull.inputs.skull = True - em_reg_withskull.inputs.out_file = 'talairach_with_skull_2.lta' - if plugin_args: - em_reg_withskull.plugin_args = plugin_args - ar2_wf.connect([(align_transform, em_reg_withskull, [('out_file', - 'transform')]), - (remove_neck, em_reg_withskull, [('out_file', 'in_file')]), - (inputspec, em_reg_withskull, - [('num_threads', 'num_threads'), - ('reg_template_withskull', 'template')])]) - - # SubCort Seg (CA Label) - # Labels subcortical structures, based in GCA model. - if longitudinal: - copy_long_ltas = pe.MapNode( - Function( - ['in_file', 'subjects_dir', 'subject_id', 'long_template'], - ['out_file'], copy_ltas), - iterfield=['in_file'], - name='Copy_long_ltas') - ar2_wf.connect([(inputspec, copy_long_ltas, - [('alltps_to_template_ltas', 'in_file'), - ('subjects_dir', 'subjects_dir'), ('subject_id', - 'subject_id')])]) - copy_long_ltas.inputs.long_template = config['long_template'] - - merge_norms = pe.Node(Merge(2), name="Merge_Norms") - - ar2_wf.connect([(inputspec, merge_norms, [('alltps_norms', 'in1')]), - (ca_normalize, merge_norms, [('out_file', 'in2')])]) - - fuse_segmentations = pe.Node( - FuseSegmentations(), name="Fuse_Segmentations") - - ar2_wf.connect([(inputspec, fuse_segmentations, [ - ('timepoints', 'timepoints'), ('alltps_segs', 'in_segmentations'), - ('alltps_segs_noCC', 'in_segmentations_noCC'), ('subject_id', - 'subject_id') - ]), (merge_norms, fuse_segmentations, [('out', 'in_norms')])]) - fuse_segmentations.inputs.out_file = 'aseg.fused.mgz' - - ca_label = pe.Node(CALabel(), name='CA_Label') - if fsvernum >= 6: - ca_label.inputs.relabel_unlikely = (9, .3) - ca_label.inputs.prior = 0.5 - ca_label.inputs.align = True - ca_label.inputs.out_file = 'aseg.auto_noCCseg.mgz' - if plugin_args: - ca_label.plugin_args = plugin_args - ar2_wf.connect([(ca_normalize, ca_label, [('out_file', 'in_file')]), - (ca_register, ca_label, [('out_file', 'transform')]), - (inputspec, ca_label, [('num_threads', 'num_threads'), - ('reg_template', 'template')])]) - - if longitudinal: - ar2_wf.connect([(fuse_segmentations, ca_label, [('out_file', - 'in_vol')]), - (inputspec, ca_label, [('template_label_intensities', - 'intensities')])]) - - # mri_cc - segments the corpus callosum into five separate labels in the - # subcortical segmentation volume 'aseg.mgz' - segment_cc = pe.Node(SegmentCC(), name="Segment_CorpusCallosum") - segment_cc.inputs.out_rotation = 'cc_up.lta' - segment_cc.inputs.out_file = 'aseg.auto.mgz' - segment_cc.inputs.copy_inputs = True - ar2_wf.connect([ - (ca_label, segment_cc, [('out_file', 'in_file')]), - (ca_normalize, segment_cc, [('out_file', 'in_norm')]), - ]) - - copy_cc = pe.Node( - Function(['in_file', 'out_file'], ['out_file'], copy_file), - name='Copy_CCSegmentation') - copy_cc.inputs.out_file = 'aseg.presurf.mgz' - - ar2_wf.connect([(segment_cc, copy_cc, [('out_file', 'in_file')])]) - - # Normalization2 - """ - Performs a second (major) intensity correction using only the brain volume a - s the input (so that it has to be done after the skull strip). Intensity - normalization works better when the skull has been removed. Creates a new - brain.mgz volume. The -autorecon2-cp stage begins here. - """ - normalization2 = pe.Node(Normalize(), name="Normalization2") - normalization2.inputs.out_file = 'brain.mgz' - ar2_wf.connect([(copy_cc, normalization2, [('out_file', 'segmentation')]), - (inputspec, normalization2, [('brainmask', 'mask')]), - (ca_normalize, normalization2, [('out_file', 'in_file')])]) - - # Mask Brain Final Surface - - # Applies brainmask.mgz to brain.mgz to create brain.finalsurfs.mgz. - mri_mask = pe.Node(ApplyMask(), name="Mask_Brain_Final_Surface") - mri_mask.inputs.mask_thresh = 5 - mri_mask.inputs.out_file = 'brain.finalsurfs.mgz' - - ar2_wf.connect([(normalization2, mri_mask, [('out_file', 'in_file')]), - (inputspec, mri_mask, [('brainmask', 'mask_file')])]) - - # WM Segmentation - """ - Attempts to separate white matter from everything else. The input is - mri/brain.mgz, and the output is mri/wm.mgz. Uses intensity, neighborhood, - and smoothness constraints. This is the volume that is edited when manually - fixing defects. Calls mri_segment, mri_edit_wm_with_aseg, and mri_pretess. - """ - - wm_seg = pe.Node(SegmentWM(), name="Segment_WM") - wm_seg.inputs.out_file = 'wm.seg.mgz' - ar2_wf.connect([(normalization2, wm_seg, [('out_file', 'in_file')])]) - - edit_wm = pe.Node(EditWMwithAseg(), name='Edit_WhiteMatter') - edit_wm.inputs.out_file = 'wm.asegedit.mgz' - edit_wm.inputs.keep_in = True - ar2_wf.connect([(wm_seg, edit_wm, [('out_file', 'in_file')]), - (copy_cc, edit_wm, [('out_file', 'seg_file')]), - (normalization2, edit_wm, [('out_file', 'brain_file')])]) - - pretess = pe.Node(MRIPretess(), name="MRI_Pretess") - pretess.inputs.out_file = 'wm.mgz' - pretess.inputs.label = 'wm' - ar2_wf.connect([(edit_wm, pretess, [('out_file', 'in_filled')]), - (ca_normalize, pretess, [('out_file', 'in_norm')])]) - - if longitudinal: - transfer_init_wm = pe.Node(ApplyMask(), name="Transfer_Initial_WM") - transfer_init_wm.inputs.transfer = 255 - transfer_init_wm.inputs.keep_mask_deletion_edits = True - transfer_init_wm.inputs.out_file = 'wm.mgz' - ar2_wf.connect([(pretess, transfer_init_wm, [('out_file', 'in_file')]), - (inputspec, transfer_init_wm, - [('init_wm', 'mask_file'), ('subj_to_template_lta', - 'xfm_file')])]) - # changing the pretess variable so that the rest of the connections still work!!! - pretess = transfer_init_wm - - # Fill - """ This creates the subcortical mass from which the orig surface is created. - The mid brain is cut from the cerebrum, and the hemispheres are cut from each - other. The left hemisphere is binarized to 255. The right hemisphere is binarized - to 127. The input is mri/wm.mgz and the output is mri/filled.mgz. Calls mri_fill. - """ - - fill = pe.Node(MRIFill(), name="Fill") - fill.inputs.log_file = 'ponscc.cut.log' - fill.inputs.out_file = 'filled.mgz' - - ar2_wf.connect([ - (pretess, fill, [('out_file', 'in_file')]), - (align_transform, fill, [('out_file', 'transform')]), - (ca_label, fill, [('out_file', 'segmentation')]), - ]) - - ar2_lh = pe.Workflow("AutoRecon2_Left") - ar2_rh = pe.Workflow("AutoRecon2_Right") - - # iterate by hemisphere - for hemisphere in ['lh', 'rh']: - if hemisphere == 'lh': - label = 255 - hemi_wf = ar2_lh - else: - label = 127 - hemi_wf = ar2_rh - - hemi_inputspec = pe.Node( - IdentityInterface(fields=[ - 'norm', 'filled', 'aseg', 't1', 'wm', 'brain', 'num_threads' - ]), - name="inputspec") - - if longitudinal: - # Make White Surf - # Copy files from longitudinal base - copy_template_white = pe.Node( - Function(['in_file', 'out_file'], ['out_file'], copy_file), - name='Copy_Template_White') - copy_template_white.inputs.out_file = '{0}.orig'.format(hemisphere) - - copy_template_orig_white = pe.Node( - Function(['in_file', 'out_file'], ['out_file'], copy_file), - name='Copy_Template_Orig_White') - copy_template_orig_white.inputs.out_file = '{0}.orig_white'.format( - hemisphere) - - copy_template_orig_pial = pe.Node( - Function(['in_file', 'out_file'], ['out_file'], copy_file), - name='Copy_Template_Orig_Pial') - copy_template_orig_pial.inputs.out_file = '{0}.orig_pial'.format( - hemisphere) - - # White - - # This function implicitly calls other inputs based on the subject_id - # wf attempts to make sure files are data sinked to the correct - # folders before calling - make_surfaces = pe.Node(MakeSurfaces(), name="Make_Surfaces") - make_surfaces.inputs.noaparc = True - make_surfaces.inputs.mgz = True - make_surfaces.inputs.white_only = True - make_surfaces.inputs.hemisphere = hemisphere - make_surfaces.inputs.maximum = 3.5 - make_surfaces.inputs.longitudinal = True - make_surfaces.inputs.copy_inputs = True - - hemi_wf.connect([(copy_template_orig_white, make_surfaces, - [('out_file', 'orig_white')]), - (copy_template_white, make_surfaces, - [('out_file', 'in_orig')])]) - - else: - # If running single session - # Tessellate by hemisphere - """ - This is the step where the orig surface (ie, surf/?h.orig.nofix) is created. - The surface is created by covering the filled hemisphere with triangles. - Runs mri_pretess to create a connected WM volume (neighboring voxels must - have faces in common) and then mri_tessellate to create the surface. The - places where the points of the triangles meet are called vertices. Creates - the file surf/?h.orig.nofix Note: the topology fixer will create the surface - ?h.orig. Finally mris_extract_main_component will remove small surface - components, not connected to the main body. - """ - pretess2 = pe.Node(MRIPretess(), name='Pretess2') - pretess2.inputs.out_file = 'filled-pretess{0}.mgz'.format(label) - pretess2.inputs.label = label - - hemi_wf.connect([(hemi_inputspec, pretess2, - [('norm', 'in_norm'), ('filled', 'in_filled')])]) - - tesselate = pe.Node(MRITessellate(), name="Tesselation") - tesselate.inputs.out_file = "{0}.orig.nofix".format(hemisphere) - tesselate.inputs.label_value = label - hemi_wf.connect([(pretess2, tesselate, [('out_file', 'in_file')])]) - - extract_main_component = pe.Node( - ExtractMainComponent(), name="Extract_Main_Component") - extract_main_component.inputs.out_file = "{0}.orig.nofix".format( - hemisphere) - hemi_wf.connect([(tesselate, extract_main_component, - [('surface', 'in_file')])]) - - copy_orig = pe.Node( - Function(['in_file', 'out_file'], ['out_file'], copy_file), - name='Copy_Orig') - copy_orig.inputs.out_file = '{0}.orig'.format(hemisphere) - hemi_wf.connect([(extract_main_component, copy_orig, - [('out_file', 'in_file')])]) - - # Orig Surface Smoothing 1 - """ - After tesselation, the orig surface is very jagged because each triangle is - on the edge of a voxel face and so are at right angles to each other. The - vertex positions are adjusted slightly here to reduce the angle. This is - only necessary for the inflation processes. Creates surf/?h.smoothwm(.nofix). - Calls mris_smooth. Smooth1 is the step just after tessellation. - """ - - smooth1 = pe.Node(SmoothTessellation(), name="Smooth1") - smooth1.inputs.disable_estimates = True - smooth1.inputs.seed = 1234 - smooth1.inputs.out_file = '{0}.smoothwm.nofix'.format(hemisphere) - hemi_wf.connect([(extract_main_component, smooth1, [('out_file', - 'in_file')])]) - - # Inflation 1 - """ - Inflation of the surf/?h.smoothwm(.nofix) surface to create surf/?h.inflated. - The inflation attempts to minimize metric distortion so that distances and - areas are preserved (ie, the surface is not stretched). In this sense, it is - like inflating a paper bag and not a balloon. Inflate1 is the step just after - tessellation. - """ - - inflate1 = pe.Node(MRIsInflate(), name="inflate1") - inflate1.inputs.no_save_sulc = True - inflate1.inputs.out_file = '{0}.inflated.nofix'.format(hemisphere) - - copy_inflate1 = pe.Node( - Function(['in_file', 'out_file'], ['out_file'], copy_file), - name='Copy_Inflate1') - copy_inflate1.inputs.out_file = '{0}.inflated'.format(hemisphere) - hemi_wf.connect([ - (smooth1, inflate1, [('surface', 'in_file')]), - (inflate1, copy_inflate1, [('out_file', 'in_file')]), - ]) - - # Sphere - """ - This is the initial step of automatic topology fixing. It is a - quasi-homeomorphic spherical transformation of the inflated surface designed - to localize topological defects for the subsequent automatic topology fixer. - Calls mris_sphere. - """ - - qsphere = pe.Node(Sphere(), name="Sphere") - qsphere.inputs.seed = 1234 - qsphere.inputs.magic = True - qsphere.inputs.out_file = '{0}.qsphere.nofix'.format(hemisphere) - if plugin_args: - qsphere.plugin_args = plugin_args - hemi_wf.connect([(inflate1, qsphere, [('out_file', 'in_file')]), - (hemi_inputspec, qsphere, [('num_threads', - 'num_threads')])]) - - # Automatic Topology Fixer - """ - Finds topological defects (ie, holes in a filled hemisphere) using - surf/?h.qsphere.nofix, and changes the orig surface (surf/?h.orig.nofix) to - remove the defects. Changes the number of vertices. All the defects will be - removed, but the user should check the orig surface in the volume to make - sure that it looks appropriate. - - This mris_fix_topology does not take in the {lh,rh}.orig file, but instead takes in the - subject ID and hemisphere and tries to find it from the subjects - directory. - """ - fix_topology = pe.Node(FixTopology(), name="Fix_Topology") - fix_topology.inputs.mgz = True - fix_topology.inputs.ga = True - fix_topology.inputs.seed = 1234 - fix_topology.inputs.hemisphere = hemisphere - fix_topology.inputs.copy_inputs = True - hemi_wf.connect([(copy_orig, fix_topology, - [('out_file', - 'in_orig')]), (copy_inflate1, fix_topology, - [('out_file', 'in_inflated')]), - (qsphere, fix_topology, [('out_file', 'sphere')]), - (hemi_inputspec, fix_topology, - [('wm', 'in_wm'), ('brain', 'in_brain')])]) - - # TODO: halt workflow for bad euler number - euler_number = pe.Node(EulerNumber(), name="Euler_Number") - - hemi_wf.connect([ - (fix_topology, euler_number, [('out_file', 'in_file')]), - ]) - - remove_intersection = pe.Node( - RemoveIntersection(), name="Remove_Intersection") - remove_intersection.inputs.out_file = "{0}.orig".format(hemisphere) - - hemi_wf.connect([(euler_number, remove_intersection, - [('out_file', 'in_file')])]) - - # White - - # This function implicitly calls other inputs based on the subject_id - # need to make sure files are data sinked to the correct folders before - # calling - make_surfaces = pe.Node(MakeSurfaces(), name="Make_Surfaces") - make_surfaces.inputs.noaparc = True - make_surfaces.inputs.mgz = True - make_surfaces.inputs.white_only = True - make_surfaces.inputs.hemisphere = hemisphere - make_surfaces.inputs.copy_inputs = True - hemi_wf.connect([(remove_intersection, make_surfaces, - [('out_file', 'in_orig')]), - (hemi_inputspec, make_surfaces, - [('aseg', 'in_aseg'), ('t1', 'in_T1'), - ('filled', 'in_filled'), ('wm', 'in_wm')])]) - # end of non-longitudinal specific steps - - # Orig Surface Smoothing 2 - """ - After tesselation, the orig surface is very jagged because each triangle is on - the edge of a voxel face and so are at right angles to each other. The vertex - positions are adjusted slightly here to reduce the angle. This is only necessary - for the inflation processes. Smooth2 is the step just after topology - fixing. - """ - smooth2 = pe.Node(SmoothTessellation(), name="Smooth2") - smooth2.inputs.disable_estimates = True - smooth2.inputs.smoothing_iterations = 3 - smooth2.inputs.seed = 1234 - smooth2.inputs.out_file = '{0}.smoothwm'.format(hemisphere) - hemi_wf.connect([(make_surfaces, smooth2, [('out_white', 'in_file')])]) - - # Inflation 2 - """ - Inflation of the surf/?h.smoothwm(.nofix) surface to create surf/?h.inflated. - The inflation attempts to minimize metric distortion so that distances and areas - are preserved (ie, the surface is not stretched). In this sense, it is like - inflating a paper bag and not a balloon. Inflate2 is the step just after - topology fixing. - """ - inflate2 = pe.Node(MRIsInflate(), name="inflate2") - inflate2.inputs.out_sulc = '{0}.sulc'.format(hemisphere) - inflate2.inputs.out_file = '{0}.inflated'.format(hemisphere) - hemi_wf.connect([ - (smooth2, inflate2, [('surface', 'in_file')]), - ]) - - # Compute Curvature - """No documentation on this step""" - - curvature1 = pe.Node(Curvature(), name="Curvature1") - curvature1.inputs.save = True - curvature1.inputs.copy_input = True - hemi_wf.connect([ - (make_surfaces, curvature1, [('out_white', 'in_file')]), - ]) - - curvature2 = pe.Node(Curvature(), name="Curvature2") - curvature2.inputs.threshold = .999 - curvature2.inputs.n = True - curvature2.inputs.averages = 5 - curvature2.inputs.save = True - curvature2.inputs.distances = (10, 10) - curvature1.inputs.copy_input = True - hemi_wf.connect([ - (inflate2, curvature2, [('out_file', 'in_file')]), - ]) - - curvature_stats = pe.Node(CurvatureStats(), name="Curvature_Stats") - curvature_stats.inputs.min_max = True - curvature_stats.inputs.write = True - curvature_stats.inputs.values = True - curvature_stats.inputs.hemisphere = hemisphere - curvature_stats.inputs.copy_inputs = True - curvature_stats.inputs.out_file = '{0}.curv.stats'.format(hemisphere) - hemi_wf.connect([ - (smooth2, curvature_stats, [('surface', 'surface')]), - (make_surfaces, curvature_stats, [('out_curv', 'curvfile1')]), - (inflate2, curvature_stats, [('out_sulc', 'curvfile2')]), - ]) - - if longitudinal: - ar2_wf.connect([(inputspec, hemi_wf, - [('template_{0}_white'.format(hemisphere), - 'Copy_Template_White.in_file'), - ('template_{0}_white'.format(hemisphere), - 'Copy_Template_Orig_White.in_file'), - ('template_{0}_pial'.format(hemisphere), - 'Copy_Template_Pial.in_file')])]) - - # Connect inputs for the hemisphere workflows - ar2_wf.connect( - [(ca_normalize, hemi_wf, - [('out_file', 'inputspec.norm')]), (fill, hemi_wf, [ - ('out_file', 'inputspec.filled') - ]), (copy_cc, hemi_wf, [('out_file', 'inputspec.aseg')]), - (mri_mask, hemi_wf, [('out_file', 'inputspec.t1')]), - (pretess, hemi_wf, [('out_file', - 'inputspec.wm')]), (normalization2, hemi_wf, - [('out_file', - 'inputspec.brain')]), - (inputspec, hemi_wf, [('num_threads', 'inputspec.num_threads')])]) - - # Outputs for hemisphere workflow - hemi_outputs = [ - 'orig_nofix', 'orig', 'smoothwm_nofix', 'inflated_nofix', - 'qsphere_nofix', 'white', 'curv', 'area', 'cortex', 'pial_auto', - 'thickness', 'smoothwm', 'sulc', 'inflated', 'white_H', 'white_K', - 'inflated_H', 'inflated_K', 'curv_stats' - ] - - hemi_outputspec = pe.Node( - IdentityInterface(fields=hemi_outputs), name="outputspec") - - hemi_wf.connect( - [(extract_main_component, hemi_outputspec, - [('out_file', 'orig_nofix')]), (inflate1, hemi_outputspec, [ - ('out_file', 'inflated_nofix') - ]), (smooth1, hemi_outputspec, [('surface', 'smoothwm_nofix')]), - (qsphere, hemi_outputspec, [('out_file', 'qsphere_nofix')]), - (remove_intersection, hemi_outputspec, - [('out_file', 'orig')]), (make_surfaces, hemi_outputspec, [ - ('out_white', 'white'), ('out_curv', 'curv'), - ('out_area', 'area'), ('out_cortex', 'cortex'), ('out_pial', - 'pial_auto') - ]), (smooth2, hemi_outputspec, - [('surface', 'smoothwm')]), (inflate2, hemi_outputspec, - [('out_sulc', 'sulc'), - ('out_file', 'inflated')]), - (curvature1, hemi_outputspec, - [('out_mean', 'white_H'), - ('out_gauss', 'white_K')]), (curvature2, hemi_outputspec, [ - ('out_mean', 'inflated_H'), ('out_gauss', 'inflated_K') - ]), (curvature_stats, hemi_outputspec, [('out_file', - 'curv_stats')])]) - - outputs = [ - 'nu', 'tal_lta', 'norm', 'ctrl_pts', 'tal_m3z', 'nu_noneck', - 'talskull2', 'aseg_noCC', 'cc_up', 'aseg_auto', 'aseg_presurf', - 'brain', 'brain_finalsurfs', 'wm_seg', 'wm_aseg', 'wm', 'ponscc_log', - 'filled' - ] - for hemi in ('lh', 'rh'): - for field in hemi_outputs: - outputs.append("{0}_".format(hemi) + field) - outputspec = pe.Node(IdentityInterface(fields=outputs), name="outputspec") - - if fsvernum >= 6: - ar2_wf.connect([(add_to_header_nu, outputspec, [('out_file', 'nu')])]) - else: - # add to outputspec to perserve datasinking - ar2_wf.connect([(inputspec, outputspec, [('nu', 'nu')])]) - - ar2_wf.connect([ - (align_transform, outputspec, [('out_file', 'tal_lta')]), - (ca_normalize, outputspec, [('out_file', 'norm')]), - (ca_normalize, outputspec, [('control_points', 'ctrl_pts')]), - (ca_register, outputspec, [('out_file', 'tal_m3z')]), - (remove_neck, outputspec, [('out_file', 'nu_noneck')]), - (em_reg_withskull, outputspec, [('out_file', 'talskull2')]), - (ca_label, outputspec, [('out_file', 'aseg_noCC')]), - (segment_cc, outputspec, [('out_rotation', 'cc_up'), ('out_file', - 'aseg_auto')]), - (copy_cc, outputspec, [('out_file', 'aseg_presurf')]), - (normalization2, outputspec, [('out_file', 'brain')]), - (mri_mask, outputspec, [('out_file', 'brain_finalsurfs')]), - (wm_seg, outputspec, [('out_file', 'wm_seg')]), - (edit_wm, outputspec, [('out_file', 'wm_aseg')]), - (pretess, outputspec, [('out_file', 'wm')]), - (fill, outputspec, [('out_file', 'filled'), ('log_file', - 'ponscc_log')]), - ]) - - for hemi, hemi_wf in [('lh', ar2_lh), ('rh', ar2_rh)]: - for field in hemi_outputs: - output = "{0}_".format(hemi) + field - ar2_wf.connect([(hemi_wf, outputspec, [("outputspec." + field, - output)])]) - - return ar2_wf, outputs diff --git a/nipype/workflows/smri/freesurfer/autorecon3.py b/nipype/workflows/smri/freesurfer/autorecon3.py deleted file mode 100644 index 477198d2da..0000000000 --- a/nipype/workflows/smri/freesurfer/autorecon3.py +++ /dev/null @@ -1,959 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from ....interfaces.utility import IdentityInterface, Merge, Function -from ....pipeline import engine as pe -from ....interfaces.freesurfer import * -from .ba_maps import create_ba_maps_wf -from ....interfaces.io import DataGrabber - - -def create_AutoRecon3(name="AutoRecon3", - qcache=False, - plugin_args=None, - th3=True, - exvivo=True, - entorhinal=True, - fsvernum=5.3): - - # AutoRecon3 - # Workflow - ar3_wf = pe.Workflow(name=name) - - # Input Node - inputspec = pe.Node( - IdentityInterface(fields=[ - 'lh_inflated', 'rh_inflated', 'lh_smoothwm', 'rh_smoothwm', - 'lh_white', 'rh_white', 'lh_white_H', 'rh_white_H', 'lh_white_K', - 'rh_white_K', 'lh_cortex_label', 'rh_cortex_label', 'lh_orig', - 'rh_orig', 'lh_sulc', 'rh_sulc', 'lh_area', 'rh_area', 'lh_curv', - 'rh_curv', 'lh_orig_nofix', 'rh_orig_nofix', 'aseg_presurf', - 'brain_finalsurfs', 'wm', 'filled', 'brainmask', 'transform', - 'orig_mgz', 'rawavg', 'norm', 'lh_atlas', 'rh_atlas', - 'lh_classifier1', 'rh_classifier1', 'lh_classifier2', - 'rh_classifier2', 'lh_classifier3', 'rh_classifier3', - 'lookup_table', 'wm_lookup_table', 'src_subject_id', - 'src_subject_dir', 'color_table', 'num_threads' - ]), - name='inputspec') - - ar3_lh_wf1 = pe.Workflow(name="AutoRecon3_Left_1") - ar3_rh_wf1 = pe.Workflow(name="AutoRecon3_Right_1") - for hemisphere, hemi_wf in [('lh', ar3_lh_wf1), ('rh', ar3_rh_wf1)]: - hemi_inputspec1 = pe.Node( - IdentityInterface(fields=[ - 'inflated', 'smoothwm', 'white', 'cortex_label', 'orig', - 'aseg_presurf', 'brain_finalsurfs', 'wm', 'filled', 'sphere', - 'sulc', 'area', 'curv', 'classifier', 'atlas', 'num_threads' - ]), - name="inputspec") - - # Spherical Inflation - - # Inflates the orig surface into a sphere while minimizing metric distortion. - # This step is necessary in order to register the surface to the spherical - # atlas (also known as the spherical morph). Calls mris_sphere. Creates - # surf/?h.sphere. The -autorecon3 stage begins here. - - ar3_sphere = pe.Node(Sphere(), name="Spherical_Inflation") - ar3_sphere.inputs.seed = 1234 - ar3_sphere.inputs.out_file = '{0}.sphere'.format(hemisphere) - if plugin_args: - ar3_sphere.plugin_args = plugin_args - hemi_wf.connect([(hemi_inputspec1, ar3_sphere, - [('inflated', 'in_file'), - ('smoothwm', 'in_smoothwm'), ('num_threads', - 'num_threads')])]) - - # Ipsilateral Surface Registation (Spherical Morph) - - # Registers the orig surface to the spherical atlas through surf/?h.sphere. - # The surfaces are first coarsely registered by aligning the large scale - # folding patterns found in ?h.sulc and then fine tuned using the small-scale - # patterns as in ?h.curv. Calls mris_register. Creates surf/?h.sphere.reg. - - ar3_surfreg = pe.Node(Register(), name="Surface_Registration") - ar3_surfreg.inputs.out_file = '{0}.sphere.reg'.format(hemisphere) - ar3_surfreg.inputs.curv = True - hemi_wf.connect([(ar3_sphere, ar3_surfreg, [('out_file', 'in_surf')]), - (hemi_inputspec1, ar3_surfreg, - [('smoothwm', 'in_smoothwm'), ('sulc', 'in_sulc'), - ('atlas', 'target')])]) - - # Jacobian - - # Computes how much the white surface was distorted in order to register to - # the spherical atlas during the -surfreg step. - - ar3_jacobian = pe.Node(Jacobian(), name="Jacobian") - ar3_jacobian.inputs.out_file = '{0}.jacobian_white'.format(hemisphere) - hemi_wf.connect( - [(hemi_inputspec1, ar3_jacobian, [('white', 'in_origsurf')]), - (ar3_surfreg, ar3_jacobian, [('out_file', 'in_mappedsurf')])]) - - # Average Curvature - - # Resamples the average curvature from the atlas to that of the subject. - # Allows the user to display activity on the surface of an individual - # with the folding pattern (ie, anatomy) of a group. - - ar3_paint = pe.Node(Paint(), name="Average_Curvature") - ar3_paint.inputs.averages = 5 - ar3_paint.inputs.template_param = 6 - ar3_paint.inputs.out_file = "{0}.avg_curv".format(hemisphere) - hemi_wf.connect([(ar3_surfreg, ar3_paint, [('out_file', 'in_surf')]), - (hemi_inputspec1, ar3_paint, [('atlas', - 'template')])]) - - # Cortical Parcellation - - # Assigns a neuroanatomical label to each location on the cortical - # surface. Incorporates both geometric information derived from the - # cortical model (sulcus and curvature), and neuroanatomical convention. - - ar3_parcellation = pe.Node(MRIsCALabel(), "Cortical_Parcellation") - ar3_parcellation.inputs.seed = 1234 - ar3_parcellation.inputs.hemisphere = hemisphere - ar3_parcellation.inputs.copy_inputs = True - ar3_parcellation.inputs.out_file = "{0}.aparc.annot".format(hemisphere) - if plugin_args: - ar3_parcellation.plugin_args = plugin_args - hemi_wf.connect( - [(hemi_inputspec1, ar3_parcellation, - [('smoothwm', 'smoothwm'), ('cortex_label', 'label'), - ('aseg_presurf', 'aseg'), ('classifier', 'classifier'), - ('curv', 'curv'), ('sulc', 'sulc'), ('num_threads', - 'num_threads')]), - (ar3_surfreg, ar3_parcellation, [('out_file', 'canonsurf')])]) - - # Pial Surface - - ar3_pial = pe.Node(MakeSurfaces(), name="Make_Pial_Surface") - ar3_pial.inputs.mgz = True - ar3_pial.inputs.hemisphere = hemisphere - ar3_pial.inputs.copy_inputs = True - - if fsvernum < 6: - ar3_pial.inputs.white = 'NOWRITE' - hemi_wf.connect(hemi_inputspec1, 'white', ar3_pial, 'in_white') - else: - ar3_pial.inputs.no_white = True - hemi_wf.connect([(hemi_inputspec1, ar3_pial, - [('white', 'orig_pial'), ('white', - 'orig_white')])]) - - hemi_wf.connect( - [(hemi_inputspec1, ar3_pial, - [('wm', 'in_wm'), ('orig', 'in_orig'), ('filled', 'in_filled'), - ('brain_finalsurfs', 'in_T1'), ('aseg_presurf', 'in_aseg')]), - (ar3_parcellation, ar3_pial, [('out_file', 'in_label')])]) - - # Surface Volume - """ - Creates the ?h.volume file by first creating the ?h.mid.area file by - adding ?h.area(.white) to ?h.area.pial, then dividing by two. Then ?h.volume - is created by multiplying ?.mid.area with ?h.thickness. - """ - - ar3_add = pe.Node(MRIsCalc(), name="Add_Pial_Area") - ar3_add.inputs.action = "add" - ar3_add.inputs.out_file = '{0}.area.mid'.format(hemisphere) - hemi_wf.connect([ - (ar3_pial, ar3_add, [('out_area', 'in_file2')]), - (hemi_inputspec1, ar3_add, [('area', 'in_file1')]), - ]) - - ar3_divide = pe.Node(MRIsCalc(), name="Mid_Pial") - ar3_divide.inputs.action = "div" - ar3_divide.inputs.in_int = 2 - ar3_divide.inputs.out_file = '{0}.area.mid'.format(hemisphere) - hemi_wf.connect([ - (ar3_add, ar3_divide, [('out_file', 'in_file1')]), - ]) - - ar3_volume = pe.Node(MRIsCalc(), name="Calculate_Volume") - ar3_volume.inputs.action = "mul" - ar3_volume.inputs.out_file = '{0}.volume'.format(hemisphere) - hemi_wf.connect([ - (ar3_divide, ar3_volume, [('out_file', 'in_file1')]), - (ar3_pial, ar3_volume, [('out_thickness', 'in_file2')]), - ]) - - # Connect the inputs - ar3_wf.connect( - [(inputspec, hemi_wf, - [('{0}_inflated'.format(hemisphere), 'inputspec.inflated'), - ('{0}_smoothwm'.format(hemisphere), - 'inputspec.smoothwm'), ('{0}_white'.format(hemisphere), - 'inputspec.white'), - ('{0}_cortex_label'.format(hemisphere), - 'inputspec.cortex_label'), ('{0}_orig'.format(hemisphere), - 'inputspec.orig'), - ('{0}_sulc'.format(hemisphere), - 'inputspec.sulc'), ('{0}_area'.format(hemisphere), - 'inputspec.area'), - ('{0}_curv'.format(hemisphere), - 'inputspec.curv'), ('aseg_presurf', 'inputspec.aseg_presurf'), - ('brain_finalsurfs', - 'inputspec.brain_finalsurfs'), ('wm', 'inputspec.wm'), - ('filled', 'inputspec.filled'), ('{0}_atlas'.format(hemisphere), - 'inputspec.atlas'), - ('{0}_classifier1'.format(hemisphere), - 'inputspec.classifier'), ('num_threads', - 'inputspec.num_threads')])]) - - # Workflow1 Outputs - hemi_outputs1 = [ - 'sphere', 'sphere_reg', 'jacobian_white', 'avg_curv', - 'aparc_annot', 'area_pial', 'curv_pial', 'pial', 'thickness_pial', - 'area_mid', 'volume' - ] - hemi_outputspec1 = pe.Node( - IdentityInterface(fields=hemi_outputs1), name="outputspec") - hemi_wf.connect([(ar3_pial, hemi_outputspec1, [ - ('out_pial', 'pial'), ('out_curv', 'curv_pial'), - ('out_area', 'area_pial'), ('out_thickness', 'thickness_pial') - ]), (ar3_divide, hemi_outputspec1, - [('out_file', 'area_mid')]), (ar3_volume, hemi_outputspec1, - [('out_file', 'volume')]), - (ar3_parcellation, hemi_outputspec1, - [('out_file', 'aparc_annot')]), - (ar3_jacobian, hemi_outputspec1, - [('out_file', - 'jacobian_white')]), (ar3_paint, hemi_outputspec1, - [('out_file', 'avg_curv')]), - (ar3_surfreg, hemi_outputspec1, - [('out_file', - 'sphere_reg')]), (ar3_sphere, hemi_outputspec1, - [('out_file', 'sphere')])]) - - # Cortical Ribbon Mask - """ - Creates binary volume masks of the cortical ribbon - ie, each voxel is either a 1 or 0 depending upon whether it falls in the ribbon or not. - """ - volume_mask = pe.Node(VolumeMask(), name="Mask_Ribbon") - volume_mask.inputs.left_whitelabel = 2 - volume_mask.inputs.left_ribbonlabel = 3 - volume_mask.inputs.right_whitelabel = 41 - volume_mask.inputs.right_ribbonlabel = 42 - volume_mask.inputs.save_ribbon = True - volume_mask.inputs.copy_inputs = True - - ar3_wf.connect([ - (inputspec, volume_mask, [('lh_white', 'lh_white'), ('rh_white', - 'rh_white')]), - (ar3_lh_wf1, volume_mask, [('outputspec.pial', 'lh_pial')]), - (ar3_rh_wf1, volume_mask, [('outputspec.pial', 'rh_pial')]), - ]) - - if fsvernum >= 6: - ar3_wf.connect([(inputspec, volume_mask, [('aseg_presurf', - 'in_aseg')])]) - else: - ar3_wf.connect([(inputspec, volume_mask, [('aseg_presurf', 'aseg')])]) - - ar3_lh_wf2 = pe.Workflow(name="AutoRecon3_Left_2") - ar3_rh_wf2 = pe.Workflow(name="AutoRecon3_Right_2") - - for hemisphere, hemiwf2 in [('lh', ar3_lh_wf2), ('rh', ar3_rh_wf2)]: - if hemisphere == 'lh': - hemiwf1 = ar3_lh_wf1 - else: - hemiwf1 = ar3_rh_wf1 - - hemi_inputs2 = [ - 'wm', - 'lh_white', - 'rh_white', - 'transform', - 'brainmask', - 'aseg_presurf', - 'cortex_label', - 'lh_pial', - 'rh_pial', - 'thickness', - 'aparc_annot', - 'ribbon', - 'smoothwm', - 'sphere_reg', - 'orig_mgz', - 'rawavg', - 'curv', - 'sulc', - 'classifier2', - 'classifier3', - ] - - hemi_inputspec2 = pe.Node( - IdentityInterface(fields=hemi_inputs2), name="inputspec") - - # Parcellation Statistics - """ - Runs mris_anatomical_stats to create a summary table of cortical parcellation statistics for each structure, including - structure name - number of vertices - total surface area (mm^2) - total gray matter volume (mm^3) - average cortical thickness (mm) - standard error of cortical thicknessr (mm) - integrated rectified mean curvature - integrated rectified Gaussian curvature - folding index - intrinsic curvature index. - """ - parcellation_stats_white = pe.Node( - ParcellationStats(), - name="Parcellation_Stats_{0}_White".format(hemisphere)) - parcellation_stats_white.inputs.mgz = True - parcellation_stats_white.inputs.th3 = th3 - parcellation_stats_white.inputs.tabular_output = True - parcellation_stats_white.inputs.surface = 'white' - parcellation_stats_white.inputs.hemisphere = hemisphere - parcellation_stats_white.inputs.out_color = 'aparc.annot.ctab' - parcellation_stats_white.inputs.out_table = '{0}.aparc.stats'.format( - hemisphere) - parcellation_stats_white.inputs.copy_inputs = True - - hemiwf2.connect([ - (hemi_inputspec2, parcellation_stats_white, [ - ('wm', 'wm'), - ('lh_white', 'lh_white'), - ('rh_white', 'rh_white'), - ('transform', 'transform'), - ('brainmask', 'brainmask'), - ('aseg_presurf', 'aseg'), - ('cortex_label', 'in_cortex'), - ('cortex_label', 'cortex_label'), - ('lh_pial', 'lh_pial'), - ('rh_pial', 'rh_pial'), - ('thickness', 'thickness'), - ('aparc_annot', 'in_annotation'), - ('ribbon', 'ribbon'), - ]), - ]) - - parcellation_stats_pial = pe.Node( - ParcellationStats(), - name="Parcellation_Stats_{0}_Pial".format(hemisphere)) - parcellation_stats_pial.inputs.mgz = True - parcellation_stats_pial.inputs.th3 = th3 - parcellation_stats_pial.inputs.tabular_output = True - parcellation_stats_pial.inputs.surface = 'pial' - parcellation_stats_pial.inputs.hemisphere = hemisphere - parcellation_stats_pial.inputs.copy_inputs = True - parcellation_stats_pial.inputs.out_color = 'aparc.annot.ctab' - parcellation_stats_pial.inputs.out_table = '{0}.aparc.pial.stats'.format( - hemisphere) - - hemiwf2.connect([ - (hemi_inputspec2, parcellation_stats_pial, [ - ('wm', 'wm'), - ('lh_white', 'lh_white'), - ('rh_white', 'rh_white'), - ('transform', 'transform'), - ('brainmask', 'brainmask'), - ('aseg_presurf', 'aseg'), - ('cortex_label', 'cortex_label'), - ('cortex_label', 'in_cortex'), - ('lh_pial', 'lh_pial'), - ('rh_pial', 'rh_pial'), - ('thickness', 'thickness'), - ('aparc_annot', 'in_annotation'), - ('ribbon', 'ribbon'), - ]), - ]) - - # Cortical Parcellation 2 - cortical_parcellation_2 = pe.Node( - MRIsCALabel(), - name="Cortical_Parcellation_{0}_2".format(hemisphere)) - cortical_parcellation_2.inputs.out_file = '{0}.aparc.a2009s.annot'.format( - hemisphere) - cortical_parcellation_2.inputs.seed = 1234 - cortical_parcellation_2.inputs.copy_inputs = True - cortical_parcellation_2.inputs.hemisphere = hemisphere - - hemiwf2.connect([(hemi_inputspec2, cortical_parcellation_2, - [('smoothwm', 'smoothwm'), ('aseg_presurf', 'aseg'), - ('cortex_label', 'label'), ('sphere_reg', - 'canonsurf'), ('curv', - 'curv'), - ('sulc', 'sulc'), ('classifier2', 'classifier')])]) - - # Parcellation Statistics 2 - parcellation_stats_white_2 = parcellation_stats_white.clone( - name="Parcellation_Statistics_{0}_2".format(hemisphere)) - parcellation_stats_white_2.inputs.hemisphere = hemisphere - parcellation_stats_white_2.inputs.out_color = 'aparc.annot.a2009s.ctab' - parcellation_stats_white_2.inputs.out_table = '{0}.aparc.a2009s.stats'.format( - hemisphere) - hemiwf2.connect([(hemi_inputspec2, parcellation_stats_white_2, [ - ('wm', 'wm'), - ('lh_white', 'lh_white'), - ('rh_white', 'rh_white'), - ('transform', 'transform'), - ('brainmask', 'brainmask'), - ('aseg_presurf', 'aseg'), - ('cortex_label', 'cortex_label'), - ('cortex_label', 'in_cortex'), - ('lh_pial', 'lh_pial'), - ('rh_pial', 'rh_pial'), - ('thickness', 'thickness'), - ('ribbon', 'ribbon'), - ]), (cortical_parcellation_2, parcellation_stats_white_2, - [('out_file', 'in_annotation')])]) - - # Cortical Parcellation 3 - cortical_parcellation_3 = pe.Node( - MRIsCALabel(), - name="Cortical_Parcellation_{0}_3".format(hemisphere)) - cortical_parcellation_3.inputs.out_file = '{0}.aparc.DKTatlas40.annot'.format( - hemisphere) - cortical_parcellation_3.inputs.hemisphere = hemisphere - cortical_parcellation_3.inputs.seed = 1234 - cortical_parcellation_3.inputs.copy_inputs = True - hemiwf2.connect([(hemi_inputspec2, cortical_parcellation_3, - [('smoothwm', 'smoothwm'), ('aseg_presurf', 'aseg'), - ('cortex_label', 'label'), ('sphere_reg', - 'canonsurf'), ('curv', - 'curv'), - ('sulc', 'sulc'), ('classifier3', 'classifier')])]) - - # Parcellation Statistics 3 - parcellation_stats_white_3 = parcellation_stats_white.clone( - name="Parcellation_Statistics_{0}_3".format(hemisphere)) - parcellation_stats_white_3.inputs.out_color = 'aparc.annot.DKTatlas40.ctab' - parcellation_stats_white_3.inputs.out_table = '{0}.aparc.DKTatlas40.stats'.format( - hemisphere) - parcellation_stats_white_3.inputs.hemisphere = hemisphere - - hemiwf2.connect([(hemi_inputspec2, parcellation_stats_white_3, [ - ('wm', 'wm'), - ('lh_white', 'lh_white'), - ('rh_white', 'rh_white'), - ('transform', 'transform'), - ('brainmask', 'brainmask'), - ('aseg_presurf', 'aseg'), - ('cortex_label', 'cortex_label'), - ('cortex_label', 'in_cortex'), - ('lh_pial', 'lh_pial'), - ('rh_pial', 'rh_pial'), - ('thickness', 'thickness'), - ('ribbon', 'ribbon'), - ]), (cortical_parcellation_3, parcellation_stats_white_3, - [('out_file', 'in_annotation')])]) - - # WM/GM Contrast - contrast = pe.Node( - Contrast(), name="WM_GM_Contrast_{0}".format(hemisphere)) - contrast.inputs.hemisphere = hemisphere - contrast.inputs.copy_inputs = True - - hemiwf2.connect([ - (hemi_inputspec2, contrast, [ - ('orig_mgz', 'orig'), - ('rawavg', 'rawavg'), - ('{0}_white'.format(hemisphere), 'white'), - ('cortex_label', 'cortex'), - ('aparc_annot', 'annotation'), - ('thickness', 'thickness'), - ]), - ]) - - hemi_outputs2 = [ - 'aparc_annot_ctab', - 'aparc_stats', - 'aparc_pial_stats', - 'aparc_a2009s_annot', - 'aparc_a2009s_annot_ctab', - 'aparc_a2009s_annot_stats', - 'aparc_DKTatlas40_annot', - 'aparc_DKTatlas40_annot_ctab', - 'aparc_DKTatlas40_annot_stats', - 'wg_pct_mgh', - 'wg_pct_stats', - 'pctsurfcon_log', - ] - hemi_outputspec2 = pe.Node( - IdentityInterface(fields=hemi_outputs2), name="outputspec") - - hemiwf2.connect([ - (contrast, hemi_outputspec2, - [('out_contrast', 'wg_pct_mgh'), ('out_stats', 'wg_pct_stats'), - ('out_log', 'pctsurfcon_log')]), - (parcellation_stats_white_3, hemi_outputspec2, - [('out_color', 'aparc_DKTatlas40_annot_ctab'), - ('out_table', 'aparc_DKTatlas40_annot_stats')]), - (cortical_parcellation_3, hemi_outputspec2, - [('out_file', 'aparc_DKTatlas40_annot')]), - (parcellation_stats_white_2, hemi_outputspec2, - [('out_color', 'aparc_a2009s_annot_ctab'), - ('out_table', 'aparc_a2009s_annot_stats')]), - (cortical_parcellation_2, hemi_outputspec2, - [('out_file', 'aparc_a2009s_annot')]), - (parcellation_stats_white, hemi_outputspec2, - [('out_color', 'aparc_annot_ctab'), ('out_table', - 'aparc_stats')]), - (parcellation_stats_pial, hemi_outputspec2, - [('out_table', 'aparc_pial_stats')]), - ]) - # connect inputs to hemisphere2 workflow - ar3_wf.connect([ - (inputspec, hemiwf2, [ - ('wm', 'inputspec.wm'), - ('lh_white', 'inputspec.lh_white'), - ('rh_white', 'inputspec.rh_white'), - ('transform', 'inputspec.transform'), - ('brainmask', 'inputspec.brainmask'), - ('aseg_presurf', 'inputspec.aseg_presurf'), - ('{0}_cortex_label'.format(hemisphere), - 'inputspec.cortex_label'), - ('{0}_smoothwm'.format(hemisphere), 'inputspec.smoothwm'), - ('orig_mgz', 'inputspec.orig_mgz'), - ('rawavg', 'inputspec.rawavg'), - ('{0}_curv'.format(hemisphere), 'inputspec.curv'), - ('{0}_sulc'.format(hemisphere), 'inputspec.sulc'), - ('{0}_classifier2'.format(hemisphere), - 'inputspec.classifier2'), - ('{0}_classifier3'.format(hemisphere), - 'inputspec.classifier3'), - ]), - (ar3_lh_wf1, hemiwf2, [('outputspec.pial', 'inputspec.lh_pial')]), - (ar3_rh_wf1, hemiwf2, [('outputspec.pial', 'inputspec.rh_pial')]), - (hemiwf1, hemiwf2, - [('outputspec.thickness_pial', 'inputspec.thickness'), - ('outputspec.aparc_annot', 'inputspec.aparc_annot'), - ('outputspec.sphere_reg', 'inputspec.sphere_reg')]), - (volume_mask, hemiwf2, [('out_ribbon', 'inputspec.ribbon')]), - ]) - # End hemisphere2 workflow - - # APARC to ASEG - # Adds information from the ribbon into the aseg.mgz (volume parcellation). - aparc_2_aseg = pe.Node(Aparc2Aseg(), name="Aparc2Aseg") - aparc_2_aseg.inputs.volmask = True - aparc_2_aseg.inputs.copy_inputs = True - aparc_2_aseg.inputs.out_file = "aparc+aseg.mgz" - ar3_wf.connect([(inputspec, aparc_2_aseg, [ - ('lh_white', 'lh_white'), - ('rh_white', 'rh_white'), - ]), (ar3_lh_wf1, aparc_2_aseg, [ - ('outputspec.pial', 'lh_pial'), - ('outputspec.aparc_annot', 'lh_annotation'), - ]), (ar3_rh_wf1, aparc_2_aseg, [ - ('outputspec.pial', 'rh_pial'), - ('outputspec.aparc_annot', 'rh_annotation'), - ]), (volume_mask, aparc_2_aseg, [ - ('rh_ribbon', 'rh_ribbon'), - ('lh_ribbon', 'lh_ribbon'), - ('out_ribbon', 'ribbon'), - ])]) - if fsvernum < 6: - ar3_wf.connect([(inputspec, aparc_2_aseg, [('aseg_presurf', 'aseg')])]) - else: - # Relabel Hypointensities - relabel_hypos = pe.Node( - RelabelHypointensities(), name="Relabel_Hypointensities") - relabel_hypos.inputs.out_file = 'aseg.presurf.hypos.mgz' - ar3_wf.connect([(inputspec, relabel_hypos, - [('aseg_presurf', 'aseg'), ('lh_white', 'lh_white'), - ('rh_white', 'rh_white')])]) - ar3_wf.connect([(relabel_hypos, aparc_2_aseg, [('out_file', 'aseg')])]) - - aparc_2_aseg_2009 = pe.Node(Aparc2Aseg(), name="Aparc2Aseg_2009") - aparc_2_aseg_2009.inputs.volmask = True - aparc_2_aseg_2009.inputs.a2009s = True - aparc_2_aseg_2009.inputs.copy_inputs = True - aparc_2_aseg_2009.inputs.out_file = "aparc.a2009s+aseg.mgz" - ar3_wf.connect([(inputspec, aparc_2_aseg_2009, [ - ('lh_white', 'lh_white'), - ('rh_white', 'rh_white'), - ]), (ar3_lh_wf1, aparc_2_aseg_2009, [ - ('outputspec.pial', 'lh_pial'), - ]), (ar3_lh_wf2, aparc_2_aseg_2009, [('outputspec.aparc_a2009s_annot', - 'lh_annotation')]), - (ar3_rh_wf2, aparc_2_aseg_2009, - [('outputspec.aparc_a2009s_annot', - 'rh_annotation')]), (ar3_rh_wf1, aparc_2_aseg_2009, [ - ('outputspec.pial', 'rh_pial'), - ]), (volume_mask, aparc_2_aseg_2009, - [('rh_ribbon', 'rh_ribbon'), - ('lh_ribbon', 'lh_ribbon'), ('out_ribbon', - 'ribbon')])]) - - if fsvernum >= 6: - apas_2_aseg = pe.Node(Apas2Aseg(), name="Apas_2_Aseg") - ar3_wf.connect([(aparc_2_aseg, apas_2_aseg, [('out_file', 'in_file')]), - (relabel_hypos, aparc_2_aseg_2009, [('out_file', - 'aseg')])]) - else: - # aseg.mgz gets edited in place, so we'll copy and pass it to the - # outputspec once aparc_2_aseg has completed - def out_aseg(in_aparcaseg, in_aseg, out_file): - import shutil - import os - out_file = os.path.abspath(out_file) - shutil.copy(in_aseg, out_file) - return out_file - - apas_2_aseg = pe.Node( - Function(['in_aparcaseg', 'in_aseg', 'out_file'], ['out_file'], - out_aseg), - name="Aseg") - ar3_wf.connect( - [(aparc_2_aseg, apas_2_aseg, [('out_file', 'in_aparcaseg')]), - (inputspec, apas_2_aseg, [('aseg_presurf', 'in_aseg')]), - (inputspec, aparc_2_aseg_2009, [('aseg_presurf', 'aseg')])]) - - apas_2_aseg.inputs.out_file = "aseg.mgz" - - # Segmentation Stats - """ - Computes statistics on the segmented subcortical structures found in - mri/aseg.mgz. Writes output to file stats/aseg.stats. - """ - - segstats = pe.Node(SegStatsReconAll(), name="Segmentation_Statistics") - segstats.inputs.empty = True - segstats.inputs.brain_vol = 'brain-vol-from-seg' - segstats.inputs.exclude_ctx_gm_wm = True - segstats.inputs.supratent = True - segstats.inputs.subcort_gm = True - segstats.inputs.etiv = True - segstats.inputs.wm_vol_from_surf = True - segstats.inputs.cortex_vol_from_surf = True - segstats.inputs.total_gray = True - segstats.inputs.euler = True - segstats.inputs.exclude_id = 0 - segstats.inputs.intensity_units = "MR" - segstats.inputs.summary_file = 'aseg.stats' - segstats.inputs.copy_inputs = True - - ar3_wf.connect([ - (apas_2_aseg, segstats, [('out_file', 'segmentation_file')]), - (inputspec, segstats, [ - ('lh_white', 'lh_white'), - ('rh_white', 'rh_white'), - ('transform', 'transform'), - ('norm', 'in_intensity'), - ('norm', 'partial_volume_file'), - ('brainmask', 'brainmask_file'), - ('lh_orig_nofix', 'lh_orig_nofix'), - ('rh_orig_nofix', 'rh_orig_nofix'), - ('lookup_table', 'color_table_file'), - ]), - (volume_mask, segstats, [('out_ribbon', 'ribbon')]), - (ar3_lh_wf1, segstats, [ - ('outputspec.pial', 'lh_pial'), - ]), - (ar3_rh_wf1, segstats, [ - ('outputspec.pial', 'rh_pial'), - ]), - ]) - - if fsvernum >= 6: - ar3_wf.connect(inputspec, 'aseg_presurf', segstats, 'presurf_seg') - else: - ar3_wf.connect(inputspec, 'aseg_presurf', segstats, 'aseg') - - # White Matter Parcellation - - # Adds WM Parcellation info into the aseg and computes stat. - - wm_parcellation = pe.Node(Aparc2Aseg(), name="WM_Parcellation") - wm_parcellation.inputs.volmask = True - wm_parcellation.inputs.label_wm = True - wm_parcellation.inputs.hypo_wm = True - wm_parcellation.inputs.rip_unknown = True - wm_parcellation.inputs.copy_inputs = True - wm_parcellation.inputs.out_file = "wmparc.mgz" - - ar3_wf.connect([(inputspec, wm_parcellation, [ - ('lh_white', 'lh_white'), - ('rh_white', 'rh_white'), - ]), (ar3_lh_wf1, wm_parcellation, [ - ('outputspec.pial', 'lh_pial'), - ('outputspec.aparc_annot', 'lh_annotation'), - ]), (ar3_rh_wf1, wm_parcellation, [ - ('outputspec.pial', 'rh_pial'), - ('outputspec.aparc_annot', 'rh_annotation'), - ]), (volume_mask, wm_parcellation, [ - ('rh_ribbon', 'rh_ribbon'), - ('lh_ribbon', 'lh_ribbon'), - ('out_ribbon', 'ribbon'), - ]), (apas_2_aseg, wm_parcellation, [('out_file', 'aseg')]), - (aparc_2_aseg, wm_parcellation, [('out_file', 'ctxseg')])]) - - if fsvernum < 6: - ar3_wf.connect([(inputspec, wm_parcellation, [('filled', 'filled')])]) - - # White Matter Segmentation Stats - - wm_segstats = pe.Node( - SegStatsReconAll(), name="WM_Segmentation_Statistics") - wm_segstats.inputs.intensity_units = "MR" - wm_segstats.inputs.wm_vol_from_surf = True - wm_segstats.inputs.etiv = True - wm_segstats.inputs.copy_inputs = True - wm_segstats.inputs.exclude_id = 0 - wm_segstats.inputs.summary_file = "wmparc.stats" - - ar3_wf.connect([ - (wm_parcellation, wm_segstats, [('out_file', 'segmentation_file')]), - (inputspec, wm_segstats, [ - ('lh_white', 'lh_white'), - ('rh_white', 'rh_white'), - ('transform', 'transform'), - ('norm', 'in_intensity'), - ('norm', 'partial_volume_file'), - ('brainmask', 'brainmask_file'), - ('lh_orig_nofix', 'lh_orig_nofix'), - ('rh_orig_nofix', 'rh_orig_nofix'), - ('wm_lookup_table', 'color_table_file'), - ]), - (volume_mask, wm_segstats, [('out_ribbon', 'ribbon')]), - (ar3_lh_wf1, wm_segstats, [ - ('outputspec.pial', 'lh_pial'), - ]), - (ar3_rh_wf1, wm_segstats, [ - ('outputspec.pial', 'rh_pial'), - ]), - ]) - - if fsvernum >= 6: - ar3_wf.connect(inputspec, 'aseg_presurf', wm_segstats, 'presurf_seg') - else: - ar3_wf.connect(inputspec, 'aseg_presurf', wm_segstats, 'aseg') - - # add brodman area maps to the workflow - ba_WF, ba_outputs = create_ba_maps_wf( - th3=th3, exvivo=exvivo, entorhinal=entorhinal) - - ar3_wf.connect([(ar3_lh_wf1, ba_WF, [ - ('outputspec.sphere_reg', 'inputspec.lh_sphere_reg'), - ('outputspec.thickness_pial', 'inputspec.lh_thickness'), - ('outputspec.pial', 'inputspec.lh_pial'), - ]), (ar3_rh_wf1, ba_WF, [ - ('outputspec.sphere_reg', 'inputspec.rh_sphere_reg'), - ('outputspec.thickness_pial', 'inputspec.rh_thickness'), - ('outputspec.pial', 'inputspec.rh_pial'), - ]), (inputspec, ba_WF, [ - ('lh_white', 'inputspec.lh_white'), - ('rh_white', 'inputspec.rh_white'), - ('transform', 'inputspec.transform'), - ('aseg_presurf', 'inputspec.aseg'), - ('brainmask', 'inputspec.brainmask'), - ('wm', 'inputspec.wm'), - ('lh_orig', 'inputspec.lh_orig'), - ('rh_orig', 'inputspec.rh_orig'), - ('lh_cortex_label', 'inputspec.lh_cortex_label'), - ('rh_cortex_label', 'inputspec.rh_cortex_label'), - ('src_subject_dir', 'inputspec.src_subject_dir'), - ('src_subject_id', 'inputspec.src_subject_id'), - ('color_table', 'inputspec.color_table'), - ]), (volume_mask, ba_WF, [('out_ribbon', 'inputspec.ribbon')])]) - - if qcache: - source_inputs = ['lh_sphere_reg', 'rh_sphere_reg'] - source_subject = pe.Node( - DataGrabber(outfields=source_inputs), - name="{0}_srcsubject".format(hemisphere)) - source_subject.inputs.template = '*' - source_subject.inputs.sort_filelist = False - source_subject.inputs.field_template = dict( - lh_sphere_reg='surf/lh.sphere.reg', - rh_sphere_reg='surf/rh.sphere.reg') - - qcache_wf = pe.Workflow("QCache") - - measurements = [ - 'thickness', 'area', 'area.pial', 'volume', 'curv', 'sulc', - 'white.K', 'white.H', 'jacobian_white', 'w-g.pct.mgh' - ] - - qcache_inputs = list() - for source_file in source_inputs: - qcache_inputs.append('source_' + source_file) - qcache_config = dict() - qcache_outputs = list() - for hemisphere in ['lh', 'rh']: - qcache_config[hemisphere] = dict() - for meas_name in measurements: - qcache_config[hemisphere][meas_name] = dict() - - if meas_name == 'thickness': - meas_file = hemisphere + '_' + meas_name + '_pial' - else: - meas_file = hemisphere + '_' + meas_name.replace( - '.', '_').replace('-', '') - qcache_inputs.append(meas_file) - - preproc_name = "Preproc_{0}".format(meas_file) - preproc_out = '{0}.{1}.{2}.mgh'.format( - hemisphere, meas_name, config['src_subject_id']) - preproc_out_name = preproc_out.replace('.', '_') - qcache_config[hemisphere][meas_name]['preproc'] = dict( - infile=meas_file, - name=preproc_name, - out=preproc_out, - out_name=preproc_out_name) - qcache_outputs.append(preproc_out_name) - - qcache_config[hemisphere][meas_name]['smooth'] = dict() - for value in range(0, 26, 5): - smooth_name = "Smooth_{0}_{1}".format(meas_file, value) - smooth_out = "{0}.{1}.fwhm{2}.{3}.mgh".format( - hemisphere, meas_name, value, config['src_subject_id']) - smooth_out_name = smooth_out.replace('.', '_') - qcache_config[hemisphere][meas_name]['smooth'][ - value] = dict( - name=smooth_name, - out=smooth_out, - out_name=smooth_out_name) - qcache_outputs.append(smooth_out_name) - - qcache_inputs.append(hemisphere + '_sphere_reg') - - qcache_inputspec = pe.Node( - IdentityInterface(fields=qcache_inputs), name="inputspec") - - qcache_outputspec = pe.Node( - IdentityInterface(fields=qcache_outputs), name="outputspec") - - for hemi in qcache_config.iterkeys(): - for meas_config in qcache_config[hemi].itervalues(): - preprocess = pe.Node( - MRISPreprocReconAll(), name=meas_config['preproc']['name']) - target_id = config['src_subject_id'] - preprocess.inputs.out_file = meas_config['preproc']['out'] - preprocess.inputs.target = target_id - preprocess.inputs.hemi = hemi - preprocess.inputs.copy_inputs = True - - qcache_merge = pe.Node( - Merge(2), - name="Merge{0}".format(meas_config['preproc']['name'])) - - qcache_wf.connect([ - (qcache_inputspec, qcache_merge, - [('lh_sphere_reg', 'in1'), ('rh_sphere_reg', 'in2')]), - (qcache_inputspec, preprocess, - [(meas_config['preproc']['infile'], 'surf_measure_file'), - ('source_lh_sphere_reg', 'lh_surfreg_target'), - ('source_rh_sphere_reg', 'rh_surfreg_target')]), - (qcache_merge, preprocess, [('out', 'surfreg_files')]), - (preprocess, qcache_outputspec, - [('out_file', meas_config['preproc']['out_name'])]), - ]) - - for value, val_config in meas_config['smooth'].iteritems(): - surf2surf = pe.Node( - SurfaceSmooth(), name=val_config['name']) - surf2surf.inputs.fwhm = value - surf2surf.inputs.cortex = True - surf2surf.inputs.subject_id = target_id - surf2surf.inputs.hemi = hemisphere - surf2surf.inputs.out_file = val_config['out'] - qcache_wf.connect( - [(preprocess, surf2surf, [('out_file', 'in_file')]), - (surf2surf, qcache_outputspec, - [('out_file', val_config['out_name'])])]) - - # connect qcache inputs - ar3_wf.connect([ - (inputspec, qcache_wf, - [('lh_curv', 'inputspec.lh_curv'), ('rh_curv', - 'inputspec.rh_curv'), - ('lh_sulc', 'inputspec.lh_sulc'), ('rh_sulc', - 'inputspec.rh_sulc'), - ('lh_white_K', 'inputspec.lh_white_K'), ('rh_white_K', - 'inputspec.rh_white_K'), - ('lh_area', 'inputspec.lh_area'), ('rh_area', - 'inputspec.rh_area')]), - (ar3_lh_wf1, qcache_wf, - [('outputspec.thickness_pial', 'inputspec.lh_thickness_pial'), - ('outputspec.area_pial', - 'inputspec.lh_area_pial'), ('outputspec.volume', - 'inputspec.lh_volume'), - ('outputspec.jacobian_white', - 'inputspec.lh_jacobian_white'), ('outputspec.sphere_reg', - 'inputspec.lh_sphere_reg')]), - (ar3_lh_wf2, qcache_wf, [('outputspec.wg_pct_mgh', - 'inputspec.lh_wg_pct_mgh')]), - (ar3_rh_wf1, qcache_wf, - [('outputspec.thickness_pial', 'inputspec.rh_thickness_pial'), - ('outputspec.area_pial', - 'inputspec.rh_area_pial'), ('outputspec.volume', - 'inputspec.rh_volume'), - ('outputspec.jacobian_white', - 'inputspec.rh_jacobian_white'), ('outputspec.sphere_reg', - 'inputspec.rh_sphere_reg')]), - (ar3_rh_wf2, qcache_wf, [('outputspec.wg_pct_mgh', - 'inputspec.rh_wg_pct_mgh')]), - ]) - for source_file in source_inputs: - ar3_wf.connect([(inputspec, source_subject, [('source_subject_dir', - 'base_directory')]), - (source_subject, qcache_wf, - [(source_file, - 'inputspec.source_' + source_file)])]) - # end qcache workflow - - # Add outputs to outputspec - ar3_outputs = [ - 'aseg', 'wmparc', 'wmparc_stats', 'aseg_stats', 'aparc_a2009s_aseg', - 'aparc_aseg', 'aseg_presurf_hypos', 'ribbon', 'rh_ribbon', 'lh_ribbon' - ] - for output in hemi_outputs1 + hemi_outputs2: - for hemi in ('lh_', 'rh_'): - ar3_outputs.append(hemi + output) - if qcache: - ar3_outputs.extend(qcache_outputs) - - ar3_outputs.extend(ba_outputs) - - outputspec = pe.Node( - IdentityInterface(fields=ar3_outputs), name="outputspec") - - ar3_wf.connect([(apas_2_aseg, outputspec, - [('out_file', 'aseg')]), (wm_parcellation, outputspec, - [('out_file', 'wmparc')]), - (wm_segstats, outputspec, - [('summary_file', - 'wmparc_stats')]), (segstats, outputspec, - [('summary_file', 'aseg_stats')]), - (aparc_2_aseg_2009, outputspec, - [('out_file', - 'aparc_a2009s_aseg')]), (aparc_2_aseg, outputspec, - [('out_file', 'aparc_aseg')]), - (volume_mask, outputspec, - [('out_ribbon', 'ribbon'), ('lh_ribbon', 'lh_ribbon'), - ('rh_ribbon', 'rh_ribbon')])]) - if fsvernum >= 6: - ar3_wf.connect([(relabel_hypos, outputspec, [('out_file', - 'aseg_presurf_hypos')])]) - - for i, outputs in enumerate([hemi_outputs1, hemi_outputs2]): - if i == 0: - lhwf = ar3_lh_wf1 - rhwf = ar3_rh_wf1 - else: - lhwf = ar3_lh_wf2 - rhwf = ar3_rh_wf2 - for output in outputs: - ar3_wf.connect([(lhwf, outputspec, [('outputspec.' + output, - 'lh_' + output)]), - (rhwf, outputspec, [('outputspec.' + output, - 'rh_' + output)])]) - - for output in ba_outputs: - ar3_wf.connect([(ba_WF, outputspec, [('outputspec.' + output, - output)])]) - - if qcache: - for output in qcache_outputs: - ar3_wf.connect([(qcache_wf, outputspec, [('outputspec.' + output, - output)])]) - - return ar3_wf, ar3_outputs diff --git a/nipype/workflows/smri/freesurfer/ba_maps.py b/nipype/workflows/smri/freesurfer/ba_maps.py deleted file mode 100644 index 8a4ae6caf1..0000000000 --- a/nipype/workflows/smri/freesurfer/ba_maps.py +++ /dev/null @@ -1,172 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -import os -from ....interfaces.utility import Function, IdentityInterface -from ....pipeline import engine as pe # pypeline engine -from ....interfaces.freesurfer import Label2Label, Label2Annot, ParcellationStats -from ....interfaces.io import DataGrabber -from ....interfaces.utility import Merge - - -def create_ba_maps_wf(name="Brodmann_Area_Maps", - th3=True, - exvivo=True, - entorhinal=True): - # Brodmann Area Maps (BA Maps) and Hinds V1 Atlas - inputs = [ - 'lh_sphere_reg', 'rh_sphere_reg', 'lh_white', 'rh_white', 'lh_pial', - 'rh_pial', 'lh_orig', 'rh_orig', 'transform', 'lh_thickness', - 'rh_thickness', 'lh_cortex_label', 'rh_cortex_label', 'brainmask', - 'aseg', 'ribbon', 'wm', 'src_subject_id', 'src_subject_dir', - 'color_table' - ] - - inputspec = pe.Node(IdentityInterface(fields=inputs), name="inputspec") - - ba_WF = pe.Workflow(name=name) - - ba_outputs = [ - 'lh_BAMaps_stats', 'lh_color', 'lh_BAMaps_labels', - 'lh_BAMaps_annotation', 'lh_thresh_BAMaps_stats', 'lh_thresh_color', - 'lh_thresh_BAMaps_labels', 'lh_thresh_BAMaps_annotation', - 'rh_BAMaps_stats', 'rh_color', 'rh_BAMaps_labels', - 'rh_BAMaps_annotation', 'rh_thresh_BAMaps_stats', 'rh_thresh_color', - 'rh_thresh_BAMaps_labels', 'rh_thresh_BAMaps_annotation' - ] - - outputspec = pe.Node( - IdentityInterface(fields=ba_outputs), name="outputspec") - - labels = [ - "BA1", "BA2", "BA3a", "BA3b", "BA4a", "BA4p", "BA6", "BA44", "BA45", - "V1", "V2", "MT", "perirhinal" - ] - if entorhinal: - labels.insert(-1, 'entorhinal') - for hemisphere in ['lh', 'rh']: - for threshold in [True, False]: - field_template = dict( - sphere_reg='surf/{0}.sphere.reg'.format(hemisphere), - white='surf/{0}.white'.format(hemisphere)) - - out_files = list() - source_fields = list() - if threshold: - for label in labels: - if label == 'perirhinal' and not entorhinal: - # versions < 6.0 do not use thresh.perirhinal - continue - if exvivo: - out_file = '{0}.{1}_exvivo.thresh.label'.format( - hemisphere, label) - else: - out_file = '{0}.{1}.thresh.label'.format( - hemisphere, label) - out_files.append(out_file) - field_template[label] = 'label/' + out_file - source_fields.append(label) - node_name = 'BA_Maps_' + hemisphere + '_Thresh' - else: - for label in labels: - if exvivo: - out_file = '{0}.{1}_exvivo.label'.format( - hemisphere, label) - else: - out_file = '{0}.{1}.label'.format(hemisphere, label) - - out_files.append(out_file) - field_template[label] = 'label/' + out_file - source_fields.append(label) - node_name = 'BA_Maps_' + hemisphere - - source_subject = pe.Node( - DataGrabber(outfields=source_fields + ['sphere_reg', 'white']), - name=node_name + "_srcsubject") - source_subject.inputs.template = '*' - source_subject.inputs.sort_filelist = False - source_subject.inputs.field_template = field_template - ba_WF.connect([(inputspec, source_subject, [('src_subject_dir', - 'base_directory')])]) - - merge_labels = pe.Node( - Merge(len(out_files)), name=node_name + "_Merge") - for i, label in enumerate(source_fields): - ba_WF.connect([(source_subject, merge_labels, - [(label, 'in{0}'.format(i + 1))])]) - - node = pe.MapNode( - Label2Label(), - name=node_name + '_Label2Label', - iterfield=['source_label', 'out_file']) - node.inputs.hemisphere = hemisphere - node.inputs.out_file = out_files - node.inputs.copy_inputs = True - - ba_WF.connect( - [(merge_labels, node, [('out', 'source_label')]), - (source_subject, node, [('sphere_reg', 'source_sphere_reg'), - ('white', 'source_white')]), - (inputspec, node, [('src_subject_id', 'source_subject')])]) - - label2annot = pe.Node(Label2Annot(), name=node_name + '_2_Annot') - label2annot.inputs.hemisphere = hemisphere - label2annot.inputs.verbose_off = True - label2annot.inputs.keep_max = True - label2annot.inputs.copy_inputs = True - - stats_node = pe.Node( - ParcellationStats(), name=node_name + '_Stats') - stats_node.inputs.hemisphere = hemisphere - stats_node.inputs.mgz = True - stats_node.inputs.th3 = th3 - stats_node.inputs.surface = 'white' - stats_node.inputs.tabular_output = True - stats_node.inputs.copy_inputs = True - - if threshold: - label2annot.inputs.out_annot = "BA_exvivo.thresh" - ba_WF.connect( - [(stats_node, outputspec, - [('out_color', '{0}_thresh_color'.format(hemisphere)), - ('out_table', - '{0}_thresh_BAMaps_stats'.format(hemisphere))]), - (label2annot, outputspec, - [('out_file', - '{0}_thresh_BAMaps_annotation'.format(hemisphere))]), - (node, outputspec, - [('out_file', - '{0}_thresh_BAMaps_labels'.format(hemisphere))])]) - else: - label2annot.inputs.out_annot = "BA_exvivo" - ba_WF.connect( - [(stats_node, outputspec, - [('out_color', '{0}_color'.format(hemisphere)), - ('out_table', '{0}_BAMaps_stats'.format(hemisphere))]), - (label2annot, outputspec, - [('out_file', - '{0}_BAMaps_annotation'.format(hemisphere))]), - (node, outputspec, - [('out_file', '{0}_BAMaps_labels'.format(hemisphere))])]) - - ba_WF.connect( - [(inputspec, node, [ - ('{0}_sphere_reg'.format(hemisphere), 'sphere_reg'), - ('{0}_white'.format(hemisphere), 'white'), - ]), (node, label2annot, [('out_file', 'in_labels')]), - (inputspec, label2annot, - [('{0}_orig'.format(hemisphere), 'orig'), - ('color_table', 'color_table')]), (label2annot, stats_node, - [('out_file', - 'in_annotation')]), - (inputspec, stats_node, - [('{0}_thickness'.format(hemisphere), - 'thickness'), ('{0}_cortex_label'.format(hemisphere), - 'cortex_label'), ('lh_white', 'lh_white'), - ('rh_white', 'rh_white'), ('lh_pial', 'lh_pial'), - ('rh_pial', 'rh_pial'), ('transform', - 'transform'), ('brainmask', - 'brainmask'), - ('aseg', 'aseg'), ('wm', 'wm'), ('ribbon', 'ribbon')])]) - - return ba_WF, ba_outputs diff --git a/nipype/workflows/smri/freesurfer/bem.py b/nipype/workflows/smri/freesurfer/bem.py deleted file mode 100644 index b959de4852..0000000000 --- a/nipype/workflows/smri/freesurfer/bem.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: - -from ....pipeline import engine as pe -from ....interfaces import mne as mne -from ....interfaces import freesurfer as fs -from ....interfaces import utility as niu - - -def create_bem_flow(name='bem', out_format='stl'): - """Uses MNE's Watershed algorithm to create Boundary Element Meshes (BEM) - for a subject's brain, inner/outer skull, and skin. The surfaces are - returned in the desired (by default, stereolithic .stl) format. - - Example - ------- - >>> from nipype.workflows.smri.freesurfer import create_bem_flow - >>> bemflow = create_bem_flow() - >>> bemflow.inputs.inputspec.subject_id = 'subj1' - >>> bemflow.inputs.inputspec.subjects_dir = '.' - >>> bemflow.run() # doctest: +SKIP - - - Inputs:: - - inputspec.subject_id : freesurfer subject id - inputspec.subjects_dir : freesurfer subjects directory - - Outputs:: - - outputspec.meshes : output boundary element meshes in (by default) - stereolithographic (.stl) format - """ - """ - Initialize the workflow - """ - - bemflow = pe.Workflow(name=name) - """ - Define the inputs to the workflow. - """ - - inputnode = pe.Node( - niu.IdentityInterface(fields=['subject_id', 'subjects_dir']), - name='inputspec') - """ - Define all the nodes of the workflow: - - fssource: used to retrieve aseg.mgz - mri_convert : converts aseg.mgz to aseg.nii - tessellate : tessellates regions in aseg.mgz - surfconvert : converts regions to stereolithographic (.stl) format - - """ - - watershed_bem = pe.Node(interface=mne.WatershedBEM(), name='WatershedBEM') - - surfconvert = pe.MapNode( - fs.MRIsConvert(out_datatype=out_format), - iterfield=['in_file'], - name='surfconvert') - """ - Connect the nodes - """ - - bemflow.connect([ - (inputnode, watershed_bem, [('subject_id', 'subject_id'), - ('subjects_dir', 'subjects_dir')]), - (watershed_bem, surfconvert, [('mesh_files', 'in_file')]), - ]) - """ - Setup an outputnode that defines relevant inputs of the workflow. - """ - - outputnode = pe.Node( - niu.IdentityInterface(fields=["meshes"]), name="outputspec") - bemflow.connect([ - (surfconvert, outputnode, [("converted", "meshes")]), - ]) - return bemflow diff --git a/nipype/workflows/smri/freesurfer/recon.py b/nipype/workflows/smri/freesurfer/recon.py deleted file mode 100644 index f7fa593a49..0000000000 --- a/nipype/workflows/smri/freesurfer/recon.py +++ /dev/null @@ -1,604 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from ....pipeline import engine as pe -from ....interfaces import freesurfer as fs -from ....interfaces import utility as niu -from .autorecon1 import create_AutoRecon1 -from .autorecon2 import create_AutoRecon2 -from .autorecon3 import create_AutoRecon3 -from ....interfaces.freesurfer import AddXFormToHeader, Info -from ....interfaces.io import DataSink -from .utils import getdefaultconfig -from .... import logging - -logger = logging.getLogger('nipype.workflow') - - -def create_skullstripped_recon_flow(name="skullstripped_recon_all"): - """Performs recon-all on voulmes that are already skull stripped. - FreeSurfer failes to perform skullstrippig on some volumes (especially - MP2RAGE). This can be avoided by doing skullstripping before running - recon-all (using for example SPECTRE algorithm). - - Example - ------- - >>> from nipype.workflows.smri.freesurfer import create_skullstripped_recon_flow - >>> recon_flow = create_skullstripped_recon_flow() - >>> recon_flow.inputs.inputspec.subject_id = 'subj1' - >>> recon_flow.inputs.inputspec.T1_files = 'T1.nii.gz' - >>> recon_flow.run() # doctest: +SKIP - - - Inputs:: - inputspec.T1_files : skullstripped T1_files (mandatory) - inputspec.subject_id : freesurfer subject id (optional) - inputspec.subjects_dir : freesurfer subjects directory (optional) - - Outputs:: - - outputspec.subject_id : freesurfer subject id - outputspec.subjects_dir : freesurfer subjects directory - """ - wf = pe.Workflow(name=name) - - inputnode = pe.Node( - niu.IdentityInterface( - fields=['subject_id', 'subjects_dir', 'T1_files']), - name='inputspec') - - autorecon1 = pe.Node(fs.ReconAll(), name="autorecon1") - autorecon1.plugin_args = {'submit_specs': 'request_memory = 2500'} - autorecon1.inputs.directive = "autorecon1" - autorecon1.inputs.args = "-noskullstrip" - autorecon1._interface._can_resume = False - - wf.connect(inputnode, "T1_files", autorecon1, "T1_files") - wf.connect(inputnode, "subjects_dir", autorecon1, "subjects_dir") - wf.connect(inputnode, "subject_id", autorecon1, "subject_id") - - def link_masks(subjects_dir, subject_id): - import os - os.symlink( - os.path.join(subjects_dir, subject_id, "mri", "T1.mgz"), - os.path.join(subjects_dir, subject_id, "mri", - "brainmask.auto.mgz")) - os.symlink( - os.path.join(subjects_dir, subject_id, "mri", - "brainmask.auto.mgz"), - os.path.join(subjects_dir, subject_id, "mri", "brainmask.mgz")) - return subjects_dir, subject_id - - masks = pe.Node( - niu.Function( - input_names=['subjects_dir', 'subject_id'], - output_names=['subjects_dir', 'subject_id'], - function=link_masks), - name="link_masks") - - wf.connect(autorecon1, "subjects_dir", masks, "subjects_dir") - wf.connect(autorecon1, "subject_id", masks, "subject_id") - - autorecon_resume = pe.Node(fs.ReconAll(), name="autorecon_resume") - autorecon_resume.plugin_args = {'submit_specs': 'request_memory = 2500'} - autorecon_resume.inputs.args = "-no-isrunning" - wf.connect(masks, "subjects_dir", autorecon_resume, "subjects_dir") - wf.connect(masks, "subject_id", autorecon_resume, "subject_id") - - outputnode = pe.Node( - niu.IdentityInterface(fields=['subject_id', 'subjects_dir']), - name='outputspec') - - wf.connect(autorecon_resume, "subjects_dir", outputnode, "subjects_dir") - wf.connect(autorecon_resume, "subject_id", outputnode, "subject_id") - return wf - - -def create_reconall_workflow(name="ReconAll", plugin_args=None): - """Creates the ReconAll workflow in Nipype. This workflow is designed to - run the same commands as FreeSurfer's reconall script but with the added - features that a Nipype workflow provides. Before running this workflow, it - is necessary to have the FREESURFER_HOME environmental variable set to the - directory containing the version of FreeSurfer to be used in this workflow. - - Example - ------- - >>> from nipype.workflows.smri.freesurfer import create_reconall_workflow - >>> recon_all = create_reconall_workflow() - >>> recon_all.inputs.inputspec.subject_id = 'subj1' - >>> recon_all.inputs.inputspec.subjects_dir = '.' - >>> recon_all.inputs.inputspec.T1_files = 'T1.nii.gz' - >>> recon_all.run() # doctest: +SKIP - - - Inputs:: - inputspec.subjects_dir : subjects directory (mandatory) - inputspec.subject_id : name of subject (mandatory) - inputspec.T1_files : T1 files (mandatory) - inputspec.T2_file : T2 file (optional) - inputspec.FLAIR_file : FLAIR file (optional) - inputspec.cw256 : Conform inputs to 256 FOV (optional) - inputspec.num_threads: Number of threads on nodes that utilize OpenMP (default=1) - plugin_args : Dictionary of plugin args to set to nodes that utilize OpenMP (optional) - Outputs:: - postdatasink_outputspec.subject_id : name of the datasinked output folder in the subjects directory - - Note: - The input subject_id is not passed to the commands in the workflow. Commands - that require subject_id are reading implicit inputs from - {SUBJECTS_DIR}/{subject_id}. For those commands the subject_id is set to the - default value and SUBJECTS_DIR is set to the node directory. The implicit - inputs are then copied to the node directory in order to mimic a SUBJECTS_DIR - structure. For example, if the command implicitly reads in brainmask.mgz, the - interface would copy that input file to - {node_dir}/{subject_id}/mri/brainmask.mgz and set SUBJECTS_DIR to node_dir. - The workflow only uses the input subject_id to datasink the outputs to - {subjects_dir}/{subject_id}. - """ - reconall = pe.Workflow(name=name) - - inputspec = pe.Node( - niu.IdentityInterface(fields=[ - 'subject_id', 'subjects_dir', 'T1_files', 'T2_file', 'FLAIR_file', - 'num_threads', 'cw256', 'reg_template', 'reg_template_withskull', - 'lh_atlas', 'rh_atlas', 'lh_classifier1', 'rh_classifier1', - 'lh_classifier2', 'rh_classifier2', 'lh_classifier3', - 'rh_classifier3', 'lookup_table', 'wm_lookup_table', - 'src_subject_id', 'src_subject_dir', 'color_table', 'awk_file' - ]), - run_without_submitting=True, - name='inputspec') - - # check freesurfer version and set parameters - fs_version_full = Info.version() - if fs_version_full and ('v6.0' in fs_version_full - or 'dev' in fs_version_full): - # assuming that dev is 6.0 - fsvernum = 6.0 - fs_version = 'v6.0' - th3 = True - shrink = 2 - distance = 200 # 3T should be 50 - stop = 0.0001 - exvivo = True - entorhinal = True - rb_date = "2014-08-21" - else: - # 5.3 is default - fsvernum = 5.3 - if fs_version_full: - if 'v5.3' in fs_version_full: - fs_version = 'v5.3' - else: - fs_version = fs_version_full.split('-')[-1] - logger.info(("Warning: Workflow may not work properly if " - "FREESURFER_HOME environmental variable is not " - "set or if you are using an older version of " - "FreeSurfer")) - else: - fs_version = 5.3 # assume version 5.3 - th3 = False - shrink = None - distance = 50 - stop = None - exvivo = False - entorhinal = False - rb_date = "2008-03-26" - - logger.info("FreeSurfer Version: {0}".format(fs_version)) - - def setconfig(reg_template=None, - reg_template_withskull=None, - lh_atlas=None, - rh_atlas=None, - lh_classifier1=None, - rh_classifier1=None, - lh_classifier2=None, - rh_classifier2=None, - lh_classifier3=None, - rh_classifier3=None, - src_subject_id=None, - src_subject_dir=None, - color_table=None, - lookup_table=None, - wm_lookup_table=None, - awk_file=None, - rb_date=None): - """Set optional configurations to the default""" - - def checkarg(arg, default): - """Returns the value if defined; otherwise default""" - if arg: - return arg - else: - return default - - defaultconfig = getdefaultconfig(exitonfail=True, rb_date=rb_date) - # set the default template and classifier files - reg_template = checkarg(reg_template, - defaultconfig['registration_template']) - reg_template_withskull = checkarg( - reg_template_withskull, - defaultconfig['registration_template_withskull']) - lh_atlas = checkarg(lh_atlas, defaultconfig['lh_atlas']) - rh_atlas = checkarg(rh_atlas, defaultconfig['rh_atlas']) - lh_classifier1 = checkarg(lh_classifier1, - defaultconfig['lh_classifier']) - rh_classifier1 = checkarg(rh_classifier1, - defaultconfig['rh_classifier']) - lh_classifier2 = checkarg(lh_classifier2, - defaultconfig['lh_classifier2']) - rh_classifier2 = checkarg(rh_classifier2, - defaultconfig['rh_classifier2']) - lh_classifier3 = checkarg(lh_classifier3, - defaultconfig['lh_classifier3']) - rh_classifier3 = checkarg(rh_classifier3, - defaultconfig['rh_classifier3']) - src_subject_id = checkarg(src_subject_id, - defaultconfig['src_subject_id']) - src_subject_dir = checkarg(src_subject_dir, - defaultconfig['src_subject_dir']) - color_table = checkarg(color_table, defaultconfig['AvgColorTable']) - lookup_table = checkarg(lookup_table, defaultconfig['LookUpTable']) - wm_lookup_table = checkarg(wm_lookup_table, - defaultconfig['WMLookUpTable']) - awk_file = checkarg(awk_file, defaultconfig['awk_file']) - return reg_template, reg_template_withskull, lh_atlas, rh_atlas, \ - lh_classifier1, rh_classifier1, lh_classifier2, rh_classifier2, \ - lh_classifier3, rh_classifier3, src_subject_id, src_subject_dir, \ - color_table, lookup_table, wm_lookup_table, awk_file - - # list of params to check - params = [ - 'reg_template', 'reg_template_withskull', 'lh_atlas', 'rh_atlas', - 'lh_classifier1', 'rh_classifier1', 'lh_classifier2', 'rh_classifier2', - 'lh_classifier3', 'rh_classifier3', 'src_subject_id', - 'src_subject_dir', 'color_table', 'lookup_table', 'wm_lookup_table', - 'awk_file' - ] - - config_node = pe.Node( - niu.Function(params + ['rb_date'], params, setconfig), name="config") - - config_node.inputs.rb_date = rb_date - - for param in params: - reconall.connect(inputspec, param, config_node, param) - - # create AutoRecon1 - ar1_wf, ar1_outputs = create_AutoRecon1( - plugin_args=plugin_args, - stop=stop, - distance=distance, - shrink=shrink, - fsvernum=fsvernum) - # connect inputs for AutoRecon1 - reconall.connect([(inputspec, ar1_wf, [ - ('T1_files', 'inputspec.T1_files'), ('T2_file', 'inputspec.T2_file'), - ('FLAIR_file', 'inputspec.FLAIR_file'), - ('num_threads', 'inputspec.num_threads'), ('cw256', 'inputspec.cw256') - ]), (config_node, ar1_wf, [('reg_template_withskull', - 'inputspec.reg_template_withskull'), - ('awk_file', 'inputspec.awk_file')])]) - # create AutoRecon2 - ar2_wf, ar2_outputs = create_AutoRecon2( - plugin_args=plugin_args, - fsvernum=fsvernum, - stop=stop, - shrink=shrink, - distance=distance) - # connect inputs for AutoRecon2 - reconall.connect( - [(inputspec, ar2_wf, [('num_threads', 'inputspec.num_threads')]), - (config_node, ar2_wf, [('reg_template_withskull', - 'inputspec.reg_template_withskull'), - ('reg_template', 'inputspec.reg_template')]), - (ar1_wf, ar2_wf, [('outputspec.brainmask', 'inputspec.brainmask'), - ('outputspec.talairach', 'inputspec.transform'), - ('outputspec.orig', 'inputspec.orig')])]) - - if fsvernum < 6: - reconall.connect([(ar1_wf, ar2_wf, [('outputspec.nu', - 'inputspec.nu')])]) - - # create AutoRecon3 - ar3_wf, ar3_outputs = create_AutoRecon3( - plugin_args=plugin_args, - th3=th3, - exvivo=exvivo, - entorhinal=entorhinal, - fsvernum=fsvernum) - # connect inputs for AutoRecon3 - reconall.connect( - [(config_node, ar3_wf, - [('lh_atlas', 'inputspec.lh_atlas'), - ('rh_atlas', 'inputspec.rh_atlas'), ('lh_classifier1', - 'inputspec.lh_classifier1'), - ('rh_classifier1', - 'inputspec.rh_classifier1'), ('lh_classifier2', - 'inputspec.lh_classifier2'), - ('rh_classifier2', - 'inputspec.rh_classifier2'), ('lh_classifier3', - 'inputspec.lh_classifier3'), - ('rh_classifier3', - 'inputspec.rh_classifier3'), ('lookup_table', - 'inputspec.lookup_table'), - ('wm_lookup_table', - 'inputspec.wm_lookup_table'), ('src_subject_dir', - 'inputspec.src_subject_dir'), - ('src_subject_id', - 'inputspec.src_subject_id'), ('color_table', - 'inputspec.color_table')]), - (ar1_wf, ar3_wf, [('outputspec.brainmask', 'inputspec.brainmask'), - ('outputspec.talairach', 'inputspec.transform'), - ('outputspec.orig', - 'inputspec.orig_mgz'), ('outputspec.rawavg', - 'inputspec.rawavg')]), - (ar2_wf, ar3_wf, - [('outputspec.aseg_presurf', 'inputspec.aseg_presurf'), - ('outputspec.brain_finalsurfs', - 'inputspec.brain_finalsurfs'), ('outputspec.wm', 'inputspec.wm'), - ('outputspec.filled', 'inputspec.filled'), ('outputspec.norm', - 'inputspec.norm')])]) - for hemi in ('lh', 'rh'): - reconall.connect([(ar2_wf, ar3_wf, - [('outputspec.{0}_inflated'.format(hemi), - 'inputspec.{0}_inflated'.format(hemi)), - ('outputspec.{0}_smoothwm'.format(hemi), - 'inputspec.{0}_smoothwm'.format(hemi)), - ('outputspec.{0}_white'.format(hemi), - 'inputspec.{0}_white'.format(hemi)), - ('outputspec.{0}_cortex'.format(hemi), - 'inputspec.{0}_cortex_label'.format(hemi)), - ('outputspec.{0}_area'.format(hemi), - 'inputspec.{0}_area'.format(hemi)), - ('outputspec.{0}_curv'.format(hemi), - 'inputspec.{0}_curv'.format(hemi)), - ('outputspec.{0}_sulc'.format(hemi), - 'inputspec.{0}_sulc'.format(hemi)), - ('outputspec.{0}_orig_nofix'.format(hemi), - 'inputspec.{0}_orig_nofix'.format(hemi)), - ('outputspec.{0}_orig'.format(hemi), - 'inputspec.{0}_orig'.format(hemi)), - ('outputspec.{0}_white_H'.format(hemi), - 'inputspec.{0}_white_H'.format(hemi)), - ('outputspec.{0}_white_K'.format(hemi), - 'inputspec.{0}_white_K'.format(hemi))])]) - - # Add more outputs to outputspec - outputs = ar1_outputs + ar2_outputs + ar3_outputs - outputspec = pe.Node( - niu.IdentityInterface(fields=outputs, mandatory_inputs=True), - name="outputspec") - - for outfields, wf in [(ar1_outputs, ar1_wf), (ar2_outputs, ar2_wf), - (ar3_outputs, ar3_wf)]: - for field in outfields: - reconall.connect([(wf, outputspec, [('outputspec.' + field, - field)])]) - - # PreDataSink: Switch Transforms to datasinked transfrom - # The transforms in the header files of orig.mgz, orig_nu.mgz, and nu.mgz - # are all reference a transform in the cache directory. We need to rewrite the - # headers to reference the datasinked transform - - # get the filepath to where the transform will be datasinked - def getDSTransformPath(subjects_dir, subject_id): - import os - transform = os.path.join(subjects_dir, subject_id, 'mri', 'transforms', - 'talairach.xfm') - return transform - - dstransform = pe.Node( - niu.Function(['subjects_dir', 'subject_id'], ['transform'], - getDSTransformPath), - name="PreDataSink_GetTransformPath") - reconall.connect([(inputspec, dstransform, - [('subjects_dir', 'subjects_dir'), ('subject_id', - 'subject_id')])]) - # add the data sink transfrom location to the headers - predatasink_orig = pe.Node(AddXFormToHeader(), name="PreDataSink_Orig") - predatasink_orig.inputs.copy_name = True - predatasink_orig.inputs.out_file = 'orig.mgz' - reconall.connect([(outputspec, predatasink_orig, [('orig', 'in_file')]), - (dstransform, predatasink_orig, [('transform', - 'transform')])]) - predatasink_orig_nu = pe.Node( - AddXFormToHeader(), name="PreDataSink_Orig_Nu") - predatasink_orig_nu.inputs.copy_name = True - predatasink_orig_nu.inputs.out_file = 'orig_nu.mgz' - reconall.connect( - [(outputspec, predatasink_orig_nu, [('orig_nu', 'in_file')]), - (dstransform, predatasink_orig_nu, [('transform', 'transform')])]) - predatasink_nu = pe.Node(AddXFormToHeader(), name="PreDataSink_Nu") - predatasink_nu.inputs.copy_name = True - predatasink_nu.inputs.out_file = 'nu.mgz' - reconall.connect([(outputspec, predatasink_nu, [('nu', 'in_file')]), - (dstransform, predatasink_nu, [('transform', - 'transform')])]) - - # Datasink outputs - datasink = pe.Node(DataSink(), name="DataSink") - datasink.inputs.parameterization = False - - reconall.connect([(inputspec, datasink, - [('subjects_dir', 'base_directory'), ('subject_id', - 'container')])]) - - # assign datasink inputs - reconall.connect([ - (predatasink_orig, datasink, [('out_file', 'mri.@orig')]), - (predatasink_orig_nu, datasink, [('out_file', 'mri.@orig_nu')]), - (predatasink_nu, datasink, [('out_file', 'mri.@nu')]), - (outputspec, datasink, [ - ('origvols', 'mri.orig'), - ('t2_raw', 'mri.orig.@t2raw'), - ('flair', 'mri.orig.@flair'), - ('rawavg', 'mri.@rawavg'), - ('talairach_auto', 'mri.transforms.@tal_auto'), - ('talairach', 'mri.transforms.@tal'), - ('t1', 'mri.@t1'), - ('brainmask_auto', 'mri.@brainmask_auto'), - ('brainmask', 'mri.@brainmask'), - ('braintemplate', 'mri.@braintemplate'), - ('tal_lta', 'mri.transforms.@tal_lta'), - ('norm', 'mri.@norm'), - ('ctrl_pts', 'mri.@ctrl_pts'), - ('tal_m3z', 'mri.transforms.@tal_m3z'), - ('nu_noneck', 'mri.@nu_noneck'), - ('talskull2', 'mri.transforms.@talskull2'), - ('aseg_noCC', 'mri.@aseg_noCC'), - ('cc_up', 'mri.transforms.@cc_up'), - ('aseg_auto', 'mri.@aseg_auto'), - ('aseg_presurf', 'mri.@aseg_presuf'), - ('brain', 'mri.@brain'), - ('brain_finalsurfs', 'mri.@brain_finalsurfs'), - ('wm_seg', 'mri.@wm_seg'), - ('wm_aseg', 'mri.@wm_aseg'), - ('wm', 'mri.@wm'), - ('filled', 'mri.@filled'), - ('ponscc_log', 'mri.@ponscc_log'), - ('lh_orig_nofix', 'surf.@lh_orig_nofix'), - ('lh_orig', 'surf.@lh_orig'), - ('lh_smoothwm_nofix', 'surf.@lh_smoothwm_nofix'), - ('lh_inflated_nofix', 'surf.@lh_inflated_nofix'), - ('lh_qsphere_nofix', 'surf.@lh_qsphere_nofix'), - ('lh_white', 'surf.@lh_white'), - ('lh_curv', 'surf.@lh_curv'), - ('lh_area', 'surf.@lh_area'), - ('lh_cortex', 'label.@lh_cortex'), - ('lh_smoothwm', 'surf.@lh_smoothwm'), - ('lh_sulc', 'surf.@lh_sulc'), - ('lh_inflated', 'surf.@lh_inflated'), - ('lh_white_H', 'surf.@lh_white_H'), - ('lh_white_K', 'surf.@lh_white_K'), - ('lh_inflated_H', 'surf.@lh_inflated_H'), - ('lh_inflated_K', 'surf.@lh_inflated_K'), - ('lh_curv_stats', 'stats.@lh_curv_stats'), - ('rh_orig_nofix', 'surf.@rh_orig_nofix'), - ('rh_orig', 'surf.@rh_orig'), - ('rh_smoothwm_nofix', 'surf.@rh_smoothwm_nofix'), - ('rh_inflated_nofix', 'surf.@rh_inflated_nofix'), - ('rh_qsphere_nofix', 'surf.@rh_qsphere_nofix'), - ('rh_white', 'surf.@rh_white'), - ('rh_curv', 'surf.@rh_curv'), - ('rh_area', 'surf.@rh_area'), - ('rh_cortex', 'label.@rh_cortex'), - ('rh_smoothwm', 'surf.@rh_smoothwm'), - ('rh_sulc', 'surf.@rh_sulc'), - ('rh_inflated', 'surf.@rh_inflated'), - ('rh_white_H', 'surf.@rh_white_H'), - ('rh_white_K', 'surf.@rh_white_K'), - ('rh_inflated_H', 'surf.@rh_inflated_H'), - ('rh_inflated_K', 'surf.@rh_inflated_K'), - ('rh_curv_stats', 'stats.@rh_curv_stats'), - ('lh_aparc_annot_ctab', 'label.@aparc_annot_ctab'), - ('aseg', 'mri.@aseg'), - ('wmparc', 'mri.@wmparc'), - ('wmparc_stats', 'stats.@wmparc_stats'), - ('aseg_stats', 'stats.@aseg_stats'), - ('aparc_a2009s_aseg', 'mri.@aparc_a2009s_aseg'), - ('aparc_aseg', 'mri.@aparc_aseg'), - ('aseg_presurf_hypos', 'mri.@aseg_presurf_hypos'), - ('ribbon', 'mri.@ribbon'), - ('rh_ribbon', 'mri.@rh_ribbon'), - ('lh_ribbon', 'mri.@lh_ribbon'), - ('lh_sphere', 'surf.@lh_sphere'), - ('rh_sphere', 'surf.@rh_sphere'), - ('lh_sphere_reg', 'surf.@lh_sphere_reg'), - ('rh_sphere_reg', 'surf.@rh_sphere_reg'), - ('lh_jacobian_white', 'surf.@lh_jacobian_white'), - ('rh_jacobian_white', 'surf.@rh_jacobian_white'), - ('lh_avg_curv', 'surf.@lh_avg_curv'), - ('rh_avg_curv', 'surf.@rh_avg_curv'), - ('lh_aparc_annot', 'label.@lh_aparc_annot'), - ('rh_aparc_annot', 'label.@rh_aparc_annot'), - ('lh_area_pial', 'surf.@lh_area_pial'), - ('rh_area_pial', 'surf.@rh_area_pial'), - ('lh_curv_pial', 'surf.@lh_curv_pial'), - ('rh_curv_pial', 'surf.@rh_curv_pial'), - ('lh_pial', 'surf.@lh_pial'), - ('rh_pial', 'surf.@rh_pial'), - ('lh_thickness_pial', 'surf.@lh_thickness_pial'), - ('rh_thickness_pial', 'surf.@rh_thickness_pial'), - ('lh_area_mid', 'surf.@lh_area_mid'), - ('rh_area_mid', 'surf.@rh_area_mid'), - ('lh_volume', 'surf.@lh_volume'), - ('rh_volume', 'surf.@rh_volume'), - ('lh_aparc_annot_ctab', 'label.@lh_aparc_annot_ctab'), - ('rh_aparc_annot_ctab', 'label.@rh_aparc_annot_ctab'), - ('lh_aparc_stats', 'stats.@lh_aparc_stats'), - ('rh_aparc_stats', 'stats.@rh_aparc_stats'), - ('lh_aparc_pial_stats', 'stats.@lh_aparc_pial_stats'), - ('rh_aparc_pial_stats', 'stats.@rh_aparc_pial_stats'), - ('lh_aparc_a2009s_annot', 'label.@lh_aparc_a2009s_annot'), - ('rh_aparc_a2009s_annot', 'label.@rh_aparc_a2009s_annot'), - ('lh_aparc_a2009s_annot_ctab', - 'label.@lh_aparc_a2009s_annot_ctab'), - ('rh_aparc_a2009s_annot_ctab', - 'label.@rh_aparc_a2009s_annot_ctab'), - ('lh_aparc_a2009s_annot_stats', - 'stats.@lh_aparc_a2009s_annot_stats'), - ('rh_aparc_a2009s_annot_stats', - 'stats.@rh_aparc_a2009s_annot_stats'), - ('lh_aparc_DKTatlas40_annot', 'label.@lh_aparc_DKTatlas40_annot'), - ('rh_aparc_DKTatlas40_annot', 'label.@rh_aparc_DKTatlas40_annot'), - ('lh_aparc_DKTatlas40_annot_ctab', - 'label.@lh_aparc_DKTatlas40_annot_ctab'), - ('rh_aparc_DKTatlas40_annot_ctab', - 'label.@rh_aparc_DKTatlas40_annot_ctab'), - ('lh_aparc_DKTatlas40_annot_stats', - 'stats.@lh_aparc_DKTatlas40_annot_stats'), - ('rh_aparc_DKTatlas40_annot_stats', - 'stats.@rh_aparc_DKTatlas40_annot_stats'), - ('lh_wg_pct_mgh', 'surf.@lh_wg_pct_mgh'), - ('rh_wg_pct_mgh', 'surf.@rh_wg_pct_mgh'), - ('lh_wg_pct_stats', 'stats.@lh_wg_pct_stats'), - ('rh_wg_pct_stats', 'stats.@rh_wg_pct_stats'), - ('lh_pctsurfcon_log', 'log.@lh_pctsurfcon_log'), - ('rh_pctsurfcon_log', 'log.@rh_pctsurfcon_log'), - ('lh_BAMaps_stats', 'stats.@lh_BAMaps_stats'), - ('lh_color', 'label.@lh_color'), - ('lh_thresh_BAMaps_stats', 'stats.@lh_thresh_BAMaps_stats'), - ('lh_thresh_color', 'label.@lh_thresh_color'), - ('rh_BAMaps_stats', 'stats.@rh_BAMaps_stats'), - ('rh_color', 'label.@rh_color'), - ('rh_thresh_BAMaps_stats', 'stats.@rh_thresh_BAMaps_stats'), - ('rh_thresh_color', 'label.@rh_thresh_color'), - ('lh_BAMaps_labels', 'label.@lh_BAMaps_labels'), - ('lh_thresh_BAMaps_labels', 'label.@lh_thresh_BAMaps_labels'), - ('rh_BAMaps_labels', 'label.@rh_BAMaps_labels'), - ('rh_thresh_BAMaps_labels', 'label.@rh_thresh_BAMaps_labels'), - ('lh_BAMaps_annotation', 'label.@lh_BAMaps_annotation'), - ('lh_thresh_BAMaps_annotation', - 'label.@lh_thresh_BAMaps_annotation'), - ('rh_BAMaps_annotation', 'label.@rh_BAMaps_annotation'), - ('rh_thresh_BAMaps_annotation', - 'label.@rh_thresh_BAMaps_annotation'), - ]), - ]) - - # compeltion node - # since recon-all outputs so many files a completion node is added - # that will output the subject_id once the workflow has completed - def completemethod(datasinked_files, subject_id): - print("recon-all has finished executing for subject: {0}".format( - subject_id)) - return subject_id - - completion = pe.Node( - niu.Function(['datasinked_files', 'subject_id'], ['subject_id'], - completemethod), - name="Completion") - - # create a special identity interface for outputing the subject_id - - postds_outputspec = pe.Node( - niu.IdentityInterface(['subject_id']), name="postdatasink_outputspec") - - reconall.connect( - [(datasink, completion, [('out_file', 'datasinked_files')]), - (inputspec, completion, [('subject_id', 'subject_id')]), - (completion, postds_outputspec, [('subject_id', 'subject_id')])]) - - return reconall diff --git a/nipype/workflows/smri/freesurfer/utils.py b/nipype/workflows/smri/freesurfer/utils.py deleted file mode 100644 index 40f1f205b6..0000000000 --- a/nipype/workflows/smri/freesurfer/utils.py +++ /dev/null @@ -1,498 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: - -from ....pipeline import engine as pe -from ....interfaces import fsl as fsl -from ....interfaces import freesurfer as fs -from ....interfaces import meshfix as mf -from ....interfaces import io as nio -from ....interfaces import utility as niu -from ....algorithms import misc as misc -from ....interfaces.utility import Function -from ....workflows.misc.utils import region_list_from_volume, id_list_from_lookup_table -import os - - -def get_aparc_aseg(files): - """Return the aparc+aseg.mgz file""" - for name in files: - if 'aparc+aseg' in name: - return name - raise ValueError('aparc+aseg.mgz not found') - - -def create_getmask_flow(name='getmask', dilate_mask=True): - """Registers a source file to freesurfer space and create a brain mask in - source space - - Requires fsl tools for initializing registration - - Parameters - ---------- - - name : string - name of workflow - dilate_mask : boolean - indicates whether to dilate mask or not - - Example - ------- - - >>> getmask = create_getmask_flow() - >>> getmask.inputs.inputspec.source_file = 'mean.nii' - >>> getmask.inputs.inputspec.subject_id = 's1' - >>> getmask.inputs.inputspec.subjects_dir = '.' - >>> getmask.inputs.inputspec.contrast_type = 't2' - - - Inputs:: - - inputspec.source_file : reference image for mask generation - inputspec.subject_id : freesurfer subject id - inputspec.subjects_dir : freesurfer subjects directory - inputspec.contrast_type : MR contrast of reference image - - Outputs:: - - outputspec.mask_file : binary mask file in reference image space - outputspec.reg_file : registration file that maps reference image to - freesurfer space - outputspec.reg_cost : cost of registration (useful for detecting misalignment) - """ - """ - Initialize the workflow - """ - - getmask = pe.Workflow(name=name) - """ - Define the inputs to the workflow. - """ - - inputnode = pe.Node( - niu.IdentityInterface(fields=[ - 'source_file', 'subject_id', 'subjects_dir', 'contrast_type' - ]), - name='inputspec') - """ - Define all the nodes of the workflow: - - fssource: used to retrieve aseg.mgz - threshold : binarize aseg - register : coregister source file to freesurfer space - voltransform: convert binarized aseg to source file space - """ - - fssource = pe.Node(nio.FreeSurferSource(), name='fssource') - threshold = pe.Node(fs.Binarize(min=0.5, out_type='nii'), name='threshold') - register = pe.MapNode( - fs.BBRegister(init='fsl'), iterfield=['source_file'], name='register') - voltransform = pe.MapNode( - fs.ApplyVolTransform(inverse=True), - iterfield=['source_file', 'reg_file'], - name='transform') - """ - Connect the nodes - """ - - getmask.connect([(inputnode, fssource, [ - ('subject_id', 'subject_id'), ('subjects_dir', 'subjects_dir') - ]), (inputnode, register, - [('source_file', 'source_file'), ('subject_id', 'subject_id'), - ('subjects_dir', 'subjects_dir'), - ('contrast_type', 'contrast_type')]), (inputnode, voltransform, [ - ('subjects_dir', 'subjects_dir'), ('source_file', 'source_file') - ]), (fssource, threshold, [(('aparc_aseg', get_aparc_aseg), - 'in_file')]), - (register, voltransform, [('out_reg_file', 'reg_file')]), - (threshold, voltransform, [('binary_file', - 'target_file')])]) - """ - Add remaining nodes and connections - - dilate : dilate the transformed file in source space - threshold2 : binarize transformed file - """ - - threshold2 = pe.MapNode( - fs.Binarize(min=0.5, out_type='nii'), - iterfield=['in_file'], - name='threshold2') - if dilate_mask: - threshold2.inputs.dilate = 1 - getmask.connect([(voltransform, threshold2, [('transformed_file', - 'in_file')])]) - """ - Setup an outputnode that defines relevant inputs of the workflow. - """ - - outputnode = pe.Node( - niu.IdentityInterface(fields=["mask_file", "reg_file", "reg_cost"]), - name="outputspec") - getmask.connect([ - (register, outputnode, [("out_reg_file", "reg_file")]), - (register, outputnode, [("min_cost_file", "reg_cost")]), - (threshold2, outputnode, [("binary_file", "mask_file")]), - ]) - return getmask - - -def create_get_stats_flow(name='getstats', withreg=False): - """Retrieves stats from labels - - Parameters - ---------- - - name : string - name of workflow - withreg : boolean - indicates whether to register source to label - - Example - ------- - - - Inputs:: - - inputspec.source_file : reference image for mask generation - inputspec.label_file : label file from which to get ROIs - - (optionally with registration) - inputspec.reg_file : bbreg file (assumes reg from source to label - inputspec.inverse : boolean whether to invert the registration - inputspec.subjects_dir : freesurfer subjects directory - - Outputs:: - - outputspec.stats_file : stats file - """ - """ - Initialize the workflow - """ - - getstats = pe.Workflow(name=name) - """ - Define the inputs to the workflow. - """ - - if withreg: - inputnode = pe.Node( - niu.IdentityInterface(fields=[ - 'source_file', 'label_file', 'reg_file', 'subjects_dir' - ]), - name='inputspec') - else: - inputnode = pe.Node( - niu.IdentityInterface(fields=['source_file', 'label_file']), - name='inputspec') - - statnode = pe.MapNode( - fs.SegStats(), - iterfield=['segmentation_file', 'in_file'], - name='segstats') - """ - Convert between source and label spaces if registration info is provided - - """ - if withreg: - voltransform = pe.MapNode( - fs.ApplyVolTransform(inverse=True), - iterfield=['source_file', 'reg_file'], - name='transform') - getstats.connect(inputnode, 'reg_file', voltransform, 'reg_file') - getstats.connect(inputnode, 'source_file', voltransform, 'source_file') - getstats.connect(inputnode, 'label_file', voltransform, 'target_file') - getstats.connect(inputnode, 'subjects_dir', voltransform, - 'subjects_dir') - - def switch_labels(inverse, transform_output, source_file, label_file): - if inverse: - return transform_output, source_file - else: - return label_file, transform_output - - chooser = pe.MapNode( - niu.Function( - input_names=[ - 'inverse', 'transform_output', 'source_file', 'label_file' - ], - output_names=['label_file', 'source_file'], - function=switch_labels), - iterfield=['transform_output', 'source_file'], - name='chooser') - getstats.connect(inputnode, 'source_file', chooser, 'source_file') - getstats.connect(inputnode, 'label_file', chooser, 'label_file') - getstats.connect(inputnode, 'inverse', chooser, 'inverse') - getstats.connect(voltransform, 'transformed_file', chooser, - 'transform_output') - getstats.connect(chooser, 'label_file', statnode, 'segmentation_file') - getstats.connect(chooser, 'source_file', statnode, 'in_file') - else: - getstats.connect(inputnode, 'label_file', statnode, - 'segmentation_file') - getstats.connect(inputnode, 'source_file', statnode, 'in_file') - """ - Setup an outputnode that defines relevant inputs of the workflow. - """ - - outputnode = pe.Node( - niu.IdentityInterface(fields=["stats_file"]), name="outputspec") - getstats.connect([ - (statnode, outputnode, [("summary_file", "stats_file")]), - ]) - return getstats - - -def create_tessellation_flow(name='tessellate', out_format='stl'): - """Tessellates the input subject's aseg.mgz volume and returns - the surfaces for each region in stereolithic (.stl) format - - Example - ------- - >>> from nipype.workflows.smri.freesurfer import create_tessellation_flow - >>> tessflow = create_tessellation_flow() - >>> tessflow.inputs.inputspec.subject_id = 'subj1' - >>> tessflow.inputs.inputspec.subjects_dir = '.' - >>> tessflow.inputs.inputspec.lookup_file = 'FreeSurferColorLUT.txt' # doctest: +SKIP - >>> tessflow.run() # doctest: +SKIP - - - Inputs:: - - inputspec.subject_id : freesurfer subject id - inputspec.subjects_dir : freesurfer subjects directory - inputspec.lookup_file : lookup file from freesurfer directory - - Outputs:: - - outputspec.meshes : output region meshes in (by default) stereolithographic (.stl) format - """ - """ - Initialize the workflow - """ - - tessflow = pe.Workflow(name=name) - """ - Define the inputs to the workflow. - """ - - inputnode = pe.Node( - niu.IdentityInterface( - fields=['subject_id', 'subjects_dir', 'lookup_file']), - name='inputspec') - """ - Define all the nodes of the workflow: - - fssource: used to retrieve aseg.mgz - mri_convert : converts aseg.mgz to aseg.nii - tessellate : tessellates regions in aseg.mgz - surfconvert : converts regions to stereolithographic (.stl) format - smoother: smooths the tessellated regions - - """ - - fssource = pe.Node(nio.FreeSurferSource(), name='fssource') - volconvert = pe.Node(fs.MRIConvert(out_type='nii'), name='volconvert') - tessellate = pe.MapNode( - fs.MRIMarchingCubes(), - iterfield=['label_value', 'out_file'], - name='tessellate') - surfconvert = pe.MapNode( - fs.MRIsConvert(out_datatype='stl'), - iterfield=['in_file'], - name='surfconvert') - smoother = pe.MapNode( - mf.MeshFix(), iterfield=['in_file1'], name='smoother') - if out_format == 'gii': - stl_to_gifti = pe.MapNode( - fs.MRIsConvert(out_datatype=out_format), - iterfield=['in_file'], - name='stl_to_gifti') - smoother.inputs.save_as_stl = True - smoother.inputs.laplacian_smoothing_steps = 1 - - region_list_from_volume_interface = Function( - input_names=["in_file"], - output_names=["region_list"], - function=region_list_from_volume) - - id_list_from_lookup_table_interface = Function( - input_names=["lookup_file", "region_list"], - output_names=["id_list"], - function=id_list_from_lookup_table) - - region_list_from_volume_node = pe.Node( - interface=region_list_from_volume_interface, - name='region_list_from_volume_node') - id_list_from_lookup_table_node = pe.Node( - interface=id_list_from_lookup_table_interface, - name='id_list_from_lookup_table_node') - """ - Connect the nodes - """ - - tessflow.connect([ - (inputnode, fssource, [('subject_id', 'subject_id'), - ('subjects_dir', 'subjects_dir')]), - (fssource, volconvert, [('aseg', 'in_file')]), - (volconvert, region_list_from_volume_node, [('out_file', 'in_file')]), - (region_list_from_volume_node, tessellate, [('region_list', - 'label_value')]), - (region_list_from_volume_node, id_list_from_lookup_table_node, - [('region_list', 'region_list')]), - (inputnode, id_list_from_lookup_table_node, [('lookup_file', - 'lookup_file')]), - (id_list_from_lookup_table_node, tessellate, [('id_list', - 'out_file')]), - (fssource, tessellate, [('aseg', 'in_file')]), - (tessellate, surfconvert, [('surface', 'in_file')]), - (surfconvert, smoother, [('converted', 'in_file1')]), - ]) - """ - Setup an outputnode that defines relevant inputs of the workflow. - """ - - outputnode = pe.Node( - niu.IdentityInterface(fields=["meshes"]), name="outputspec") - - if out_format == 'gii': - tessflow.connect([ - (smoother, stl_to_gifti, [("mesh_file", "in_file")]), - ]) - tessflow.connect([ - (stl_to_gifti, outputnode, [("converted", "meshes")]), - ]) - else: - tessflow.connect([ - (smoother, outputnode, [("mesh_file", "meshes")]), - ]) - return tessflow - - -def copy_files(in_files, out_files): - """ - Create a function to copy a file that can be modified by a following node - without changing the original file - """ - import shutil - import sys - if len(in_files) != len(out_files): - print( - "ERROR: Length of input files must be identical to the length of " - + "outrput files to be copied") - sys.exit(-1) - for i, in_file in enumerate(in_files): - out_file = out_files[i] - print("copying {0} to {1}".format(in_file, out_file)) - shutil.copy(in_file, out_file) - return out_files - - -def copy_file(in_file, out_file=None): - """ - Create a function to copy a file that can be modified by a following node - without changing the original file. - """ - import os - import shutil - if out_file is None: - out_file = os.path.join(os.getcwd(), os.path.basename(in_file)) - if type(in_file) is list and len(in_file) == 1: - in_file = in_file[0] - out_file = os.path.abspath(out_file) - in_file = os.path.abspath(in_file) - print("copying {0} to {1}".format(in_file, out_file)) - shutil.copy(in_file, out_file) - return out_file - - -def mkdir_p(path): - import errno - import os - try: - os.makedirs(path) - except OSError as exc: # Python >2.5 - if exc.errno == errno.EEXIST and os.path.isdir(path): - pass - else: - raise - - -def getdefaultconfig(exitonfail=False, rb_date="2014-08-21"): - config = { - 'custom_atlas': None, - 'cw256': False, - 'field_strength': '1.5T', - 'fs_home': checkenv(exitonfail), - 'longitudinal': False, - 'long_base': None, - 'openmp': None, - 'plugin_args': None, - 'qcache': False, - 'queue': None, - 'recoding_file': None, - 'src_subject_id': 'fsaverage', - 'th3': True - } - - config['src_subject_dir'] = os.path.join(config['fs_home'], 'subjects', - config['src_subject_id']) - config['awk_file'] = os.path.join(config['fs_home'], 'bin', - 'extract_talairach_avi_QA.awk') - config['registration_template'] = os.path.join( - config['fs_home'], 'average', 'RB_all_{0}.gca'.format(rb_date)) - config['registration_template_withskull'] = os.path.join( - config['fs_home'], 'average', - 'RB_all_withskull_{0}.gca'.format(rb_date)) - for hemi in ('lh', 'rh'): - config['{0}_atlas'.format(hemi)] = os.path.join( - config['fs_home'], 'average', - '{0}.average.curvature.filled.buckner40.tif'.format(hemi)) - config['{0}_classifier'.format(hemi)] = os.path.join( - config['fs_home'], 'average', - '{0}.curvature.buckner40.filled.desikan_killiany.2010-03-25.gcs'. - format(hemi)) - config['{0}_classifier2'.format(hemi)] = os.path.join( - config['fs_home'], 'average', - '{0}.destrieux.simple.2009-07-29.gcs'.format(hemi)) - config['{0}_classifier3'.format(hemi)] = os.path.join( - config['fs_home'], 'average', '{0}.DKTatlas40.gcs'.format(hemi)) - config['LookUpTable'] = os.path.join(config['fs_home'], 'ASegStatsLUT.txt') - config['WMLookUpTable'] = os.path.join(config['fs_home'], - 'WMParcStatsLUT.txt') - config['AvgColorTable'] = os.path.join(config['fs_home'], 'average', - 'colortable_BA.txt') - - return config - - -def checkenv(exitonfail=False): - """Check for the necessary FS environment variables""" - import sys - fs_home = os.environ.get('FREESURFER_HOME') - path = os.environ.get('PATH') - print("FREESURFER_HOME: {0}".format(fs_home)) - if fs_home is None: - msg = "please set FREESURFER_HOME before running the workflow" - elif not os.path.isdir(fs_home): - msg = "FREESURFER_HOME must be set to a valid directory before running this workflow" - elif os.path.join(fs_home, 'bin') not in path.replace('//', '/'): - print(path) - msg = "Could not find necessary executable in path" - setupscript = os.path.join(fs_home, 'SetUpFreeSurfer.sh') - if os.path.isfile(setupscript): - print("Please source the setup script before running the workflow:" - + "\nsource {0}".format(setupscript)) - else: - print( - "Please ensure that FREESURFER_HOME is set to a valid fs " + - "directory and source the necessary SetUpFreeSurfer.sh script before running " - + "this workflow") - else: - return fs_home - - if exitonfail: - print("ERROR: " + msg) - sys.exit(2) - else: - print("Warning: " + msg) diff --git a/nipype/workflows/smri/niftyreg/__init__.py b/nipype/workflows/smri/niftyreg/__init__.py deleted file mode 100644 index b9d0c9c85b..0000000000 --- a/nipype/workflows/smri/niftyreg/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: - -from .groupwise import (create_groupwise_average, create_nonlinear_gw_step, - create_linear_gw_step) diff --git a/nipype/workflows/smri/niftyreg/groupwise.py b/nipype/workflows/smri/niftyreg/groupwise.py deleted file mode 100644 index fd8d25541b..0000000000 --- a/nipype/workflows/smri/niftyreg/groupwise.py +++ /dev/null @@ -1,384 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -Example of registration workflows using niftyreg, useful for a variety of -pipelines. Including linear and non-linear image co-registration -""" - -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import str, range -from ....interfaces import utility as niu -from ....interfaces import niftyreg as niftyreg -from ....pipeline import engine as pe - - -def create_linear_gw_step(name="linear_gw_niftyreg", - demean=True, - linear_options_hash=None, - use_mask=False, - verbose=False): - """ - Creates a workflow that performs linear co-registration of a set of images - using RegAladin, producing an average image and a set of affine - transformation matrices linking each of the floating images to the average. - - Inputs:: - - inputspec.in_files - The input files to be registered - inputspec.ref_file - The initial reference image that the input files - are registered to - inputspec.rmask_file - Mask of the reference image - inputspec.in_aff_files - Initial affine transformation files - - Outputs:: - - outputspec.average_image - The average image - outputspec.aff_files - The affine transformation files - - Optional arguments:: - - linear_options_hash - An options dictionary containing a list of - parameters for RegAladin that take - the same form as given in the interface (default None) - demean - Selects whether to demean the transformation matrices when - performing the averaging (default True) - initial_affines - Selects whether to iterate over initial affine - images, which we generally won't have (default False) - - Example - ------- - - >>> from nipype.workflows.smri.niftyreg import create_linear_gw_step - >>> lgw = create_linear_gw_step('my_linear_coreg') # doctest: +SKIP - >>> lgw.inputs.inputspec.in_files = [ - ... 'file1.nii.gz', 'file2.nii.gz'] # doctest: +SKIP - >>> lgw.inputs.inputspec.ref_file = ['ref.nii.gz'] # doctest: +SKIP - >>> lgw.run() # doctest: +SKIP - - """ - # Create the sub workflow - workflow = pe.Workflow(name=name) - workflow.base_output_dir = name - - # We need to create an input node for the workflow - inputnode = pe.Node( - niu.IdentityInterface(fields=['in_files', 'ref_file', 'rmask_file']), - name='inputspec') - - if linear_options_hash is None: - linear_options_hash = dict() - - # Rigidly register each of the images to the average - lin_reg = pe.MapNode( - interface=niftyreg.RegAladin(**linear_options_hash), - name="lin_reg", - iterfield=['flo_file']) - - if verbose is False: - lin_reg.inputs.verbosity_off_flag = True - - # Average the images - ave_ims = pe.Node(interface=niftyreg.RegAverage(), name="ave_ims") - - # We have a new average image and the affine - # transformations, which are returned as an output node. - outputnode = pe.Node( - niu.IdentityInterface(fields=['average_image', 'trans_files']), - name='outputspec') - - # Connect the inputs to the lin_reg node - workflow.connect([(inputnode, lin_reg, [('ref_file', 'ref_file')]), - (inputnode, lin_reg, [('in_files', 'flo_file')])]) - if use_mask: - workflow.connect(inputnode, 'rmask_file', lin_reg, 'rmask_file') - - if demean: - workflow.connect([(inputnode, ave_ims, [('ref_file', - 'demean1_ref_file')]), - (lin_reg, ave_ims, [('avg_output', 'warp_files')])]) - else: - workflow.connect(lin_reg, 'res_file', ave_ims, 'avg_files') - - # Connect up the output node - workflow.connect([(lin_reg, outputnode, [('aff_file', 'trans_files')]), - (ave_ims, outputnode, [('out_file', 'average_image')])]) - - return workflow - - -def create_nonlinear_gw_step(name="nonlinear_gw_niftyreg", - demean=True, - nonlinear_options_hash=None, - initial_affines=False, - use_mask=False, - verbose=False): - """ - Creates a workflow that perform non-linear co-registrations of a set of - images using RegF3d, producing an non-linear average image and a set of - cpp transformation linking each of the floating images to the average. - - Inputs:: - - inputspec.in_files - The input files to be registered - inputspec.ref_file - The initial reference image that the input files - are registered to - inputspec.rmask_file - Mask of the reference image - inputspec.in_trans_files - Initial transformation files (affine or - cpps) - - Outputs:: - - outputspec.average_image - The average image - outputspec.cpp_files - The bspline transformation files - - Optional arguments:: - - nonlinear_options_hash - An options dictionary containing a list of - parameters for RegAladin that take the - same form as given in the interface (default None) - initial_affines - Selects whether to iterate over initial affine - images, which we generally won't have (default False) - - Example - ------- - >>> from nipype.workflows.smri.niftyreg import create_nonlinear_gw_step - >>> nlc = create_nonlinear_gw_step('nonlinear_coreg') # doctest: +SKIP - >>> nlc.inputs.inputspec.in_files = [ - ... 'file1.nii.gz', 'file2.nii.gz'] # doctest: +SKIP - >>> nlc.inputs.inputspec.ref_file = ['ref.nii.gz'] # doctest: +SKIP - >>> nlc.run() # doctest: +SKIP - - """ - - # Create the workflow - workflow = pe.Workflow(name=name) - workflow.base_output_dir = name - - # We need to create an input node for the workflow - inputnode = pe.Node( - niu.IdentityInterface( - fields=['in_files', 'ref_file', 'rmask_file', 'input_aff_files']), - name='inputspec') - - if nonlinear_options_hash is None: - nonlinear_options_hash = dict() - - # non-rigidly register each of the images to the average - # flo_file can take a list of files - # Need to be able to iterate over input affine files, but what about the - # cases where we have no input affine files? - # Passing empty strings are not valid filenames, and undefined fields can - # not be iterated over. - # Current simple solution, as this is not generally required, is to use a - # flag which specifies wherther to iterate - if initial_affines: - nonlin_reg = pe.MapNode( - interface=niftyreg.RegF3D(**nonlinear_options_hash), - name="nonlin_reg", - iterfield=['flo_file', 'aff_file']) - else: - nonlin_reg = pe.MapNode( - interface=niftyreg.RegF3D(**nonlinear_options_hash), - name="nonlin_reg", - iterfield=['flo_file']) - - if verbose is False: - nonlin_reg.inputs.verbosity_off_flag = True - - # Average the images - ave_ims = pe.Node(interface=niftyreg.RegAverage(), name="ave_ims") - - # We have a new centered average image, the resampled original images and - # the affine transformations, which are returned as an output node. - outputnode = pe.Node( - niu.IdentityInterface(fields=['average_image', 'trans_files']), - name='outputspec') - - # Connect the inputs to the lin_reg node, which is split over in_files - workflow.connect([(inputnode, nonlin_reg, [('in_files', 'flo_file')]), - (inputnode, nonlin_reg, [('ref_file', 'ref_file')])]) - - if use_mask: - workflow.connect(inputnode, 'rmask_file', nonlin_reg, 'rmask_file') - - # If we have initial affine transforms, we need to connect them in - if initial_affines: - workflow.connect(inputnode, 'input_aff_files', nonlin_reg, 'aff_file') - - if demean: - if 'vel_flag' in list(nonlinear_options_hash.keys()) and \ - nonlinear_options_hash['vel_flag'] is True and \ - initial_affines: - workflow.connect(inputnode, 'ref_file', ave_ims, - 'demean3_ref_file') - else: - workflow.connect(inputnode, 'ref_file', ave_ims, - 'demean2_ref_file') - workflow.connect(nonlin_reg, 'avg_output', ave_ims, 'warp_files') - else: - workflow.connect(nonlin_reg, 'res_file', ave_ims, 'avg_files') - - # Connect up the output node - workflow.connect([(nonlin_reg, outputnode, [('cpp_file', 'trans_files')]), - (ave_ims, outputnode, [('out_file', 'average_image')])]) - - return workflow - - -# Creates an atlas image by iterative registration. An initial reference image -# can be provided, otherwise one will be made. -def create_groupwise_average(name="atlas_creation", - itr_rigid=3, - itr_affine=3, - itr_non_lin=5, - linear_options_hash=None, - nonlinear_options_hash=None, - use_mask=False, - verbose=False): - """ - Create the overall workflow that embeds all the rigid, affine and - non-linear components. - - Inputs:: - - inputspec.in_files - The input files to be registered - inputspec.ref_file - The initial reference image that the input files - are registered to - inputspec.rmask_file - Mask of the reference image - inputspec.in_trans_files - Initial transformation files (affine or - cpps) - - Outputs:: - - outputspec.average_image - The average image - outputspec.cpp_files - The bspline transformation files - - - Example - ------- - - >>> from nipype.workflows.smri.niftyreg import create_groupwise_average - >>> node = create_groupwise_average('groupwise_av') # doctest: +SKIP - >>> node.inputs.inputspec.in_files = [ - ... 'file1.nii.gz', 'file2.nii.gz'] # doctest: +SKIP - >>> node.inputs.inputspec.ref_file = ['ref.nii.gz'] # doctest: +SKIP - >>> node.inputs.inputspec.rmask_file = ['mask.nii.gz'] # doctest: +SKIP - >>> node.run() # doctest: +SKIP - - """ - # Create workflow - workflow = pe.Workflow(name=name) - - if linear_options_hash is None: - linear_options_hash = dict() - - if nonlinear_options_hash is None: - nonlinear_options_hash = dict() - - # Create the input and output node - inputnode = pe.Node( - niu.IdentityInterface(fields=['in_files', 'ref_file', 'rmask_file']), - name='inputspec') - - outputnode = pe.Node( - niu.IdentityInterface(fields=['average_image', 'trans_files']), - name='outputspec') - - # Create lists to store the rigid, affine and non-linear sub-workflow - lin_workflows = [] - nonlin_workflows = [] - - # Create the linear groupwise registration sub-workflows - for i in range(itr_rigid + itr_affine): - # Define is the sub-workflow is rigid or affine - if i >= itr_rigid: - linear_options_hash['rig_only_flag'] = False - else: - linear_options_hash['rig_only_flag'] = True - - # Define if the average image should be demean to ensure we have a - # barycenter - if (i < itr_rigid) or (i == (itr_rigid + itr_affine - 1)): - demean_arg = False - else: - demean_arg = True - - # Create the rigid or affine sub-workflow and add it to the relevant - # list - wf = create_linear_gw_step( - name='lin_reg' + str(i), - linear_options_hash=linear_options_hash, - demean=demean_arg, - verbose=verbose) - lin_workflows.append(wf) - - # Connect up the input data to the workflow - workflow.connect(inputnode, 'in_files', wf, 'inputspec.in_files') - if use_mask: - workflow.connect(inputnode, 'rmask_file', wf, - 'inputspec.rmask_file') - # If it exist, connect the previous workflow to the current one - if i == 0: - workflow.connect(inputnode, 'ref_file', wf, 'inputspec.ref_file') - else: - workflow.connect(lin_workflows[i - 1], 'outputspec.average_image', - wf, 'inputspec.ref_file') - - demean_arg = True - - # Create the nonlinear groupwise registration sub-workflows - for i in range(itr_non_lin): - - if len(lin_workflows) > 0: - initial_affines_arg = True - if i == (itr_non_lin - 1): - demean_arg = False - - wf = create_nonlinear_gw_step( - name='nonlin' + str(i), - demean=demean_arg, - initial_affines=initial_affines_arg, - nonlinear_options_hash=nonlinear_options_hash, - verbose=verbose) - - # Connect up the input data to the workflows - workflow.connect(inputnode, 'in_files', wf, 'inputspec.in_files') - if use_mask: - workflow.connect(inputnode, 'rmask_file', wf, - 'inputspec.rmask_file') - - if initial_affines_arg: - # Take the final linear registration results and use them to - # initialise the NR - workflow.connect(lin_workflows[-1], 'outputspec.trans_files', wf, - 'inputspec.input_aff_files') - - if i == 0: - if len(lin_workflows) > 0: - workflow.connect(lin_workflows[-1], 'outputspec.average_image', - wf, 'inputspec.ref_file') - else: - workflow.connect(inputnode, 'ref_file', wf, - 'inputspec.ref_file') - else: - workflow.connect(nonlin_workflows[i - 1], - 'outputspec.average_image', wf, - 'inputspec.ref_file') - - nonlin_workflows.append(wf) - - # Set up the last workflow - lw = None - if len(nonlin_workflows) > 0: - lw = nonlin_workflows[-1] - elif len(lin_workflows) > 0: - lw = lin_workflows[-1] - - # Connect the data to return - workflow.connect( - [(lw, outputnode, [('outputspec.average_image', 'average_image')]), - (lw, outputnode, [('outputspec.trans_files', 'trans_files')])]) - - return workflow diff --git a/nipype/workflows/warp/__init__.py b/nipype/workflows/warp/__init__.py deleted file mode 100644 index 40a96afc6f..0000000000 --- a/nipype/workflows/warp/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# -*- coding: utf-8 -*- From e41ca1ad63a3045c2557b4b2234dbe527b87fcfd Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 7 Oct 2019 13:01:17 -0400 Subject: [PATCH 0476/1665] ENH: Add niflow-nipype1-workflows to Docker file --- docker/generate_dockerfiles.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/generate_dockerfiles.sh b/docker/generate_dockerfiles.sh index 44173ee009..9c389873fa 100755 --- a/docker/generate_dockerfiles.sh +++ b/docker/generate_dockerfiles.sh @@ -94,7 +94,7 @@ function generate_main_dockerfile() { conda_install='python=${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR} libxml2 libxslt matplotlib mkl "numpy!=1.16.0" paramiko pandas psutil scikit-learn scipy traits' \ - pip_install="pytest-xdist" \ + pip_install="pytest-xdist niflow-nipype1-workflows" \ activate=true \ --copy docker/files/run_builddocs.sh docker/files/run_examples.sh \ docker/files/run_pytests.sh nipype/external/fsl_imglob.py /usr/bin/ \ From 5c9e5711180dc327de28c0b48256f8a26551b100 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 7 Oct 2019 14:19:09 -0400 Subject: [PATCH 0477/1665] CI: Install rdflib earlier to ensure neurdflib goes later --- docker/generate_dockerfiles.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/generate_dockerfiles.sh b/docker/generate_dockerfiles.sh index 9c389873fa..d6d880bfc5 100755 --- a/docker/generate_dockerfiles.sh +++ b/docker/generate_dockerfiles.sh @@ -93,7 +93,7 @@ function generate_main_dockerfile() { --miniconda create_env=neuro \ conda_install='python=${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR} libxml2 libxslt matplotlib mkl "numpy!=1.16.0" paramiko - pandas psutil scikit-learn scipy traits' \ + pandas psutil scikit-learn scipy traits rdflib' \ pip_install="pytest-xdist niflow-nipype1-workflows" \ activate=true \ --copy docker/files/run_builddocs.sh docker/files/run_examples.sh \ From e6a1cc71fbbc650f4e64d8fae78a4b801d23d386 Mon Sep 17 00:00:00 2001 From: oesteban Date: Mon, 7 Oct 2019 16:16:53 -0700 Subject: [PATCH 0478/1665] FIX: Restore ``AFNICommand._get_fname``, required by some interfaces In #2964 I broke some interfaces (@tsalo identified `Allineate` as one of those) by removing the ``_get_fname`` function from the base ``AFNICommand``. The intent was to migrate to a ``name_source``-based management of automatically generated names, but in the case of ``Allineate``, the use of `_get_fname` was a bit different (modifying the user-provide input value under certain conditions to follow ANFI's ``3dAllineate`` behavior). Also remove a definition of the same function for ``Deconvolve``. This PR restores the missing function. Closes #3070. --- nipype/interfaces/afni/base.py | 51 +++++++++++++++++++++++++++++++- nipype/interfaces/afni/model.py | 52 --------------------------------- 2 files changed, 50 insertions(+), 53 deletions(-) diff --git a/nipype/interfaces/afni/base.py b/nipype/interfaces/afni/base.py index 815f0619c8..1a21dcb2cf 100644 --- a/nipype/interfaces/afni/base.py +++ b/nipype/interfaces/afni/base.py @@ -11,7 +11,7 @@ from distutils import spawn from ... import logging, LooseVersion -from ...utils.filemanip import split_filename +from ...utils.filemanip import split_filename, fname_presuffix from ..base import (CommandLine, traits, CommandLineInputSpec, isdefined, File, TraitedSpec, PackageInfo) from ...external.due import BibTeX @@ -237,6 +237,55 @@ def _list_outputs(self): outputs[name] = outputs[name] + "+orig.BRIK" return outputs + def _gen_fname(self, + basename, + cwd=None, + suffix=None, + change_ext=True, + ext=None): + """ + Generate a filename based on the given parameters. + + The filename will take the form: cwd/basename. + If change_ext is True, it will use the extentions specified in + intputs.output_type. + + Parameters + ---------- + basename : str + Filename to base the new filename on. + cwd : str + Path to prefix to the new filename. (default is os.getcwd()) + suffix : str + Suffix to add to the `basename`. (defaults is '' ) + change_ext : bool + Flag to change the filename extension to the FSL output type. + (default True) + + Returns + ------- + fname : str + New filename based on given parameters. + + """ + if not basename: + msg = 'Unable to generate filename for command %s. ' % self.cmd + msg += 'basename is not set!' + raise ValueError(msg) + + if cwd is None: + cwd = os.getcwd() + if ext is None: + ext = Info.output_type_to_ext(self.inputs.outputtype) + if change_ext: + suffix = ''.join((suffix, ext)) if suffix else ext + + if suffix is None: + suffix = '' + fname = fname_presuffix( + basename, suffix=suffix, use_ext=False, newpath=cwd) + return fname + def no_afni(): """Check whether AFNI is not available.""" diff --git a/nipype/interfaces/afni/model.py b/nipype/interfaces/afni/model.py index 96afbe7320..ff6994b374 100644 --- a/nipype/interfaces/afni/model.py +++ b/nipype/interfaces/afni/model.py @@ -309,58 +309,6 @@ def _list_outputs(self): return outputs - def _gen_fname(self, - basename, - cwd=None, - suffix=None, - change_ext=True, - ext=None): - """Generate a filename based on the given parameters. - - The filename will take the form: cwd/basename. - If change_ext is True, it will use the extentions specified in - intputs.output_type. - - Parameters - ---------- - basename : str - Filename to base the new filename on. - cwd : str - Path to prefix to the new filename. (default is os.getcwd()) - suffix : str - Suffix to add to the `basename`. (defaults is '' ) - change_ext : bool - Flag to change the filename extension to the FSL output type. - (default True) - - Returns - ------- - fname : str - New filename based on given parameters. - - """ - from nipype.utils.filemanip import fname_presuffix - - if basename == '': - msg = 'Unable to generate filename for command %s. ' % self.cmd - msg += 'basename is not set!' - raise ValueError(msg) - if cwd is None: - cwd = os.getcwd() - if ext is None: - ext = Info.output_type_to_ext(self.inputs.outputtype) - if change_ext: - if suffix: - suffix = ''.join((suffix, ext)) - else: - suffix = ext - if suffix is None: - suffix = '' - fname = fname_presuffix( - basename, suffix=suffix, use_ext=False, newpath=cwd) - return fname - - class RemlfitInputSpec(AFNICommandInputSpec): # mandatory files in_files = InputMultiPath( From 2514026726623a3827b4d12b4f4d4ae75378d30d Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 7 Oct 2019 20:43:03 -0400 Subject: [PATCH 0479/1665] Update .travis.yml Co-Authored-By: Oscar Esteban --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 5ffabcf979..f79018dd50 100644 --- a/.travis.yml +++ b/.travis.yml @@ -78,7 +78,7 @@ script: fi - | if [ "$CHECK_TYPE" = "specs" ]; then - make check-before-commit + make specs git add nipype test "$( git diff --cached | wc -l )" -eq 0 || ( git diff --cached && false ) fi From a7124255c0aa944100f63559382f25542429882b Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 7 Oct 2019 16:39:59 -0400 Subject: [PATCH 0480/1665] DOC: Update changelog --- doc/changelog/1.X.X-changelog | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index 82747d9903..cfac06b090 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -1,3 +1,22 @@ +1.3.0 (To Be Determined) +======================== + +##### [Full changelog](https://github.com/nipy/nipype/milestone/34?closed=1) + + * FIX: Restore ``AFNICommand._get_fname``, required by some interfaces (https://github.com/nipy/nipype/pull/3071) + * FIX: Remove asynchronous chdir callback (https://github.com/nipy/nipype/pull/3060) + * FIX: Minimize scope for directory changes while loading results file (https://github.com/nipy/nipype/pull/3061) + * ENH: Add precommit information for contributors and pre-commit style (https://github.com/nipy/nipype/pull/3063) + * ENH: Delay etelemetry for non-interactive sessions, report bad versions (https://github.com/nipy/nipype/pull/3049) + * ENH: Run memoized check_version at REPL import, Node/Workflow/Interface init (https://github.com/nipy/nipype/pull/30) + * RF: Provide functions to augment old Path.mkdir, Path.resolve methods (https://github.com/nipy/nipype/pull/3050) + * RF: Redirect nipype.workflows to niflow.nipype1.workflows (https://github.com/nipy/nipype/pull/3067) + * TST: Skip dcm2niix test if data fails to download (https://github.com/nipy/nipype/pull/3059) + * TST: dcm2niix test fix (https://github.com/nipy/nipype/pull/3058) + * MAINT: Drop Python 3.4 support (https://github.com/nipy/nipype/pull/3062) + * CI: ``make specs`` on Travis (https://github.com/nipy/nipype/pull/3066) + + 1.2.3 (September 23, 2019) ========================== From 040b94270d708cf0945698f9e88bb04bef849ffc Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 7 Oct 2019 16:41:11 -0400 Subject: [PATCH 0481/1665] MNT: Version 1.3.0-rc1 --- doc/conf.py | 2 +- nipype/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 10ac3c36db..c49f20e514 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -82,7 +82,7 @@ # The short X.Y version. version = nipype.__version__ # The full version, including alpha/beta/rc tags. -release = "1.2.3" +release = "1.3.0-rc1" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/nipype/info.py b/nipype/info.py index 9ba12193ea..aa8f460688 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -9,7 +9,7 @@ # nipype version information # Remove -dev for release -__version__ = '1.3.0-dev' +__version__ = '1.3.0-rc1' def get_nipype_gitversion(): From 215349b9c3a6a9925fbfbf2a7cfd6d01f66964ce Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 7 Oct 2019 16:59:41 -0400 Subject: [PATCH 0482/1665] CI: Use Python 3.6.5 environment for packaging --- .circleci/config.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a5d59eb75b..3fc27bf663 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -339,6 +339,7 @@ jobs: - run: name: Check pypi preconditions command: | + pyenv local 3.6.5 pip install --upgrade pip twine future wheel readme_renderer setuptools python setup.py sdist bdist_wheel twine check dist/* @@ -353,7 +354,7 @@ jobs: - run: name: Validate Python 3 installation command: | - pyenv local 3.5.2 + pyenv local 3.6.5 pip install --upgrade pip pip install dist/nipype-*-py2.py3-none-any.whl # Futures should not install in Python 3 @@ -388,6 +389,7 @@ jobs: - run: name: Deploy to PyPI command: | + pyenv local 3.6.5 pip install --upgrade twine future wheel readme_renderer setuptools python setup.py check -r -s python setup.py sdist bdist_wheel From 50952d7483bef049dc565492815ac9fcaeb6fd88 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 8 Oct 2019 07:20:52 -0400 Subject: [PATCH 0483/1665] MNT: Update mailmap, tracking down ambiguous authors --- .mailmap | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.mailmap b/.mailmap index ced66b4099..edb9ca5a4d 100644 --- a/.mailmap +++ b/.mailmap @@ -70,6 +70,7 @@ Horea Christian Isaac Schwabacher Jakub Kaczmarzyk James Kent +James Kent Fred Mertz Janosch Linkersdörfer Jason Wong Jason Wong @@ -80,6 +81,7 @@ Joerg Stadler Joerg Stadler Joerg Stadler John A. Lee +John A. Lee Joke Durnez Josh Warner Junhao WEN @@ -151,6 +153,7 @@ Shariq Iqbal Shariq Iqbal Shoshana Berleant Shoshana Berleant Shoshana Berleant +Shoshana Berleant Ubuntu Simon Rothmei Simon Rothmei Siqi Liu From a0ee11c452db39fe951ee73d9b7ae1ac03575ed7 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 8 Oct 2019 07:21:51 -0400 Subject: [PATCH 0484/1665] FIX: Always propagate existing Zenodo authors --- tools/update_zenodo.py | 40 +++++++++++----------------------------- 1 file changed, 11 insertions(+), 29 deletions(-) diff --git a/tools/update_zenodo.py b/tools/update_zenodo.py index c83e14ff14..2833194cd1 100755 --- a/tools/update_zenodo.py +++ b/tools/update_zenodo.py @@ -7,28 +7,8 @@ from fuzzywuzzy import fuzz, process import subprocess as sp -# These ORCIDs should go last +# These names should go last CREATORS_LAST = ['Gorgolewski, Krzysztof J.', 'Ghosh, Satrajit'] -# for entries not found in line-contributions -MISSING_ENTRIES = [ - {"name": "Varada, Jan"}, - {"name": "Schwabacher, Isaac"}, - {"affiliation": "Child Mind Institute / Nathan Kline Institute", - "name": "Pellman, John", - "orcid": "0000-0001-6810-4461"}, - {"name": "Khanuja, Ranjeet"}, - {"affiliation": - "Medical Imaging & Biomarkers, Bioclinica, Newark, CA, USA.", - "name": "Pannetier, Nicolas", - "orcid": "0000-0002-0744-5155"}, - {"name": "McDermottroe, Conor"}, - {"affiliation": - "Max Planck Institute for Human Cognitive and Brain Sciences, " - "Leipzig, Germany.", - "name": "Mihai, Paul Glad", - "orcid": "0000-0001-5715-6442"}, - {"name": "Lai, Jeff"} -] if __name__ == '__main__': contrib_file = Path('line-contributors.txt') @@ -55,7 +35,7 @@ zenodo = json.loads(zenodo_file.read_text()) zen_names = [' '.join(val['name'].split(',')[::-1]).strip() for val in zenodo['creators']] - total_names = len(zen_names) + len(MISSING_ENTRIES) + total_names = len(zen_names) name_matches = [] position = 1 @@ -63,13 +43,14 @@ matches = process.extract(ele, zen_names, scorer=fuzz.token_sort_ratio, limit=2) # matches is a list [('First match', % Match), ('Second match', % Match)] - if matches[0][1] > 80: - val = zenodo['creators'][zen_names.index(matches[0][0])] - else: + if matches[0][1] <= 80: # skip unmatched names print("No entry to sort:", ele) continue + idx = zen_names.index(matches[0][0]) + val = zenodo['creators'][idx] + if val not in name_matches: if val['name'] not in CREATORS_LAST: val['position'] = position @@ -78,10 +59,11 @@ val['position'] = total_names + CREATORS_LAST.index(val['name']) name_matches.append(val) - for missing in MISSING_ENTRIES: - missing['position'] = position - position += 1 - name_matches.append(missing) + for missing in zenodo['creators']: + if 'position' not in missing: + missing['position'] = position + position += 1 + name_matches.append(missing) zenodo['creators'] = sorted(name_matches, key=lambda k: k['position']) # Remove position From 21ef45edb57e03cccdb24c88b97593f4d52be043 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 8 Oct 2019 07:32:37 -0400 Subject: [PATCH 0485/1665] DOC: Update .zenodo.json ordering --- .zenodo.json | 165 ++++++++++++++++++++++++++------------------------- 1 file changed, 84 insertions(+), 81 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index ac9972984b..d4600474db 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -11,18 +11,13 @@ "orcid": "0000-0002-6533-164X" }, { - "affiliation": "Independent", - "name": "Ziegler, Erik", + "affiliation": "MIT", + "name": "Jarecka, Dorota", "orcid": "0000-0003-1857-8129" }, { - "affiliation": "The University of Iowa", - "name": "Ellis, David Gage", - "orcid": "0000-0002-3718-6836" - }, - { - "affiliation": "MIT", - "name": "Jarecka, Dorota", + "affiliation": "Independent", + "name": "Ziegler, Erik", "orcid": "0000-0003-1857-8129" }, { @@ -30,11 +25,6 @@ "name": "Johnson, Hans", "orcid": "0000-0001-9513-2660" }, - { - "affiliation": "The Laboratory for Investigative Neurophysiology (The LINE), Department of Radiology and Department of Clinical Neurosciences, Lausanne, Switzerland; Center for Biomedical Imaging (CIBM), Lausanne, Switzerland", - "name": "Notter, Michael Philipp", - "orcid": "0000-0002-5866-047X" - }, { "name": "Burns, Christopher" }, @@ -47,9 +37,19 @@ "name": "Hamalainen, Carlo", "orcid": "0000-0001-7655-3830" }, + { + "affiliation": "The Laboratory for Investigative Neurophysiology (The LINE), Department of Radiology and Department of Clinical Neurosciences, Lausanne, Switzerland; Center for Biomedical Imaging (CIBM), Lausanne, Switzerland", + "name": "Notter, Michael Philipp", + "orcid": "0000-0002-5866-047X" + }, { "name": "Yvernault, Benjamin" }, + { + "affiliation": "The University of Iowa", + "name": "Ellis, David Gage", + "orcid": "0000-0002-3718-6836" + }, { "affiliation": "Florida International University", "name": "Salo, Taylor", @@ -74,7 +74,8 @@ "name": "Wong, Jason" }, { - "name": "Modat, Marc" + "affiliation": "Concordia University", + "name": "Benderoff, Erin" }, { "affiliation": "Developer", @@ -110,6 +111,9 @@ "affiliation": "National Institutes of Health", "name": "Clark, Michael G. " }, + { + "name": "Modat, Marc" + }, { "affiliation": "Mayo Clinic, Neurology, Rochester, MN, USA", "name": "Dayan, Michael", @@ -138,13 +142,6 @@ "name": "Gramfort, Alexandre", "orcid": "0000-0001-9791-4404" }, - { - "name": "Berleant, Shoshana" - }, - { - "affiliation": "Concordia University", - "name": "Benderoff, Erin" - }, { "affiliation": "Dartmouth College: Hanover, NH, United States", "name": "Halchenko, Yaroslav O.", @@ -155,16 +152,14 @@ "name": "Christian, Horea", "orcid": "0000-0001-7037-2449" }, + { + "name": "Berleant, Shoshana" + }, { "affiliation": "The Centre for Addiction and Mental Health", "name": "Joseph, Michael", "orcid": "0000-0002-0068-230X" }, - { - "affiliation": "UC San Diego", - "name": "Cipollini, Ben", - "orcid": "0000-0002-7782-0790" - }, { "affiliation": "ARAMIS LAB, Brain and Spine Institute (ICM), Paris, France.", "name": "Guillon, Je\u0301re\u0301my", @@ -181,13 +176,13 @@ }, { "affiliation": "Montreal Neurological Institute and Hospital", - "name": "DuPre, Elizabeth", - "orcid": "0000-0003-1358-196X" + "name": "Markello, Ross", + "orcid": "0000-0003-1057-1336" }, { "affiliation": "Montreal Neurological Institute and Hospital", - "name": "Markello, Ross", - "orcid": "0000-0003-1057-1336" + "name": "DuPre, Elizabeth", + "orcid": "0000-0003-1358-196X" }, { "affiliation": "MIT", @@ -197,6 +192,11 @@ { "name": "Moloney, Brendan" }, + { + "affiliation": "UC San Diego", + "name": "Cipollini, Ben", + "orcid": "0000-0002-7782-0790" + }, { "affiliation": "INRIA", "name": "Varoquaux, Gael", @@ -287,9 +287,6 @@ "name": "Bottenhorn, Katherine", "orcid": "0000-0002-7796-8795" }, - { - "name": "Kent, James" - }, { "name": "Watanabe, Aimi" }, @@ -343,6 +340,9 @@ "name": "Liem, Franz", "orcid": "0000-0003-0646-4810" }, + { + "name": "Kent, James" + }, { "affiliation": "UniversityHospital Heidelberg, Germany", "name": "Kleesiek, Jens" @@ -365,11 +365,6 @@ { "name": "Ghayoor, Ali" }, - { - "affiliation": "Otto-von-Guericke-University Magdeburg, Germany", - "name": "Contier, Oliver", - "orcid": "0000-0002-2983-4709" - }, { "name": "K\u00fcttner, Ren\u00e9" }, @@ -379,6 +374,9 @@ { "name": "Millman, Jarrod" }, + { + "name": "Lai, Jeff" + }, { "name": "Zhou, Dale" }, @@ -406,9 +404,9 @@ "orcid": "0000-0002-2127-0507" }, { - "affiliation": "Leibniz Institute for Neurobiology", - "name": "Stadler, J\u00f6rg", - "orcid": "0000-0003-4313-129X" + "affiliation": "Yale University; New Haven, CT, United States", + "name": "Sisk, Lucinda M.", + "orcid": "0000-0003-4900-9770" }, { "affiliation": "Korea Advanced Institute of Science and Technology", @@ -437,16 +435,6 @@ { "name": "Hallquist, Michael" }, - { - "affiliation": "Yale University; New Haven, CT, United States", - "name": "Sisk, Lucinda M.", - "orcid": "0000-0003-4900-9770" - }, - { - "affiliation": "TIB \u2013 Leibniz Information Centre for Science and Technology and University Library, Hannover, Germany", - "name": "Leinweber, Katrin", - "orcid": "0000-0001-5135-5758" - }, { "affiliation": "Donders Institute for Brain, Cognition and Behavior, Center for Cognitive Neuroimaging", "name": "Chetverikov, Andrey", @@ -495,9 +483,14 @@ "name": "Hinds, Oliver" }, { - "affiliation": "University of Newcastle, Australia", - "name": "Cooper, Gavin", - "orcid": "0000-0002-7186-5293" + "affiliation": "National Institute on Aging, Baltimore, MD, USA", + "name": "Bilgel, Murat", + "orcid": "0000-0001-5042-7422" + }, + { + "affiliation": "TIB \u2013 Leibniz Information Centre for Science and Technology and University Library, Hannover, Germany", + "name": "Leinweber, Katrin", + "orcid": "0000-0001-5135-5758" }, { "name": "Inati, Souheil" @@ -506,15 +499,6 @@ "affiliation": "Boston University", "name": "Perkins, L. Nathan" }, - { - "name": "Marina, Ana" - }, - { - "name": "Mattfeld, Aaron" - }, - { - "name": "Noel, Maxime" - }, { "affiliation": "University of Amsterdam", "name": "Lukas Snoek", @@ -524,14 +508,30 @@ "affiliation": "Institute of Imaging & Computer Vision, RWTH Aachen University, Germany", "name": "Weninger, Leon" }, + { + "affiliation": "University of Newcastle, Australia", + "name": "Cooper, Gavin", + "orcid": "0000-0002-7186-5293" + }, + { + "name": "Mattfeld, Aaron" + }, { "name": "Matsubara, K" }, + { + "name": "Noel, Maxime" + }, { "affiliation": "University of Pennsylvania", "name": "Junhao WEN", "orcid": "0000-0003-2077-3070" }, + { + "affiliation": "Leibniz Institute for Neurobiology", + "name": "Stadler, J\u00f6rg", + "orcid": "0000-0003-4313-129X" + }, { "name": "Cheung, Brian" }, @@ -563,10 +563,6 @@ "name": "Gerhard, Stephan", "orcid": "0000-0003-4454-6171" }, - { - "affiliation": "University of Illinois Urbana Champaign", - "name": "Sharp, Paul" - }, { "affiliation": "Technical University Munich", "name": "Molina-Romero, Miguel", @@ -578,14 +574,14 @@ { "name": "Weinstein, Alejandro" }, + { + "name": "Tambini, Arielle" + }, { "affiliation": "Duke University", "name": "Broderick, William", "orcid": "0000-0002-8999-9003" }, - { - "name": "Saase, Victor" - }, { "name": "Rothmei, Simon" }, @@ -594,17 +590,9 @@ "name": "Andberg, Sami Kristian", "orcid": "0000-0002-5650-3964" }, - { - "name": "Harms, Robbert" - }, { "name": "Khanuja, Ranjeet" }, - { - "affiliation": "National Institute on Aging, Baltimore, MD, USA", - "name": "Bilgel, Murat", - "orcid": "0000-0001-5042-7422" - }, { "name": "Schlamp, Kai" }, @@ -620,10 +608,11 @@ "name": "Tarbert, Claire" }, { - "name": "Tambini, Arielle" + "name": "Harms, Robbert" }, { - "name": "Nickson, Thomas" + "affiliation": "University of Illinois Urbana Champaign", + "name": "Sharp, Paul" }, { "name": "Crusoe, Michael R.", @@ -676,6 +665,20 @@ { "name": "Davison, Andrew" }, + { + "affiliation": "Otto-von-Guericke-University Magdeburg, Germany", + "name": "Contier, Oliver", + "orcid": "0000-0002-2983-4709" + }, + { + "name": "Marina, Ana" + }, + { + "name": "Saase, Victor" + }, + { + "name": "Nickson, Thomas" + }, { "name": "Varada, Jan" }, From b417e47d101440a2b88dc2d348d45f6b346ec01b Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 8 Oct 2019 07:33:24 -0400 Subject: [PATCH 0486/1665] DOC: Add Olivia Stanley to Zenodo --- .zenodo.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index d4600474db..85938865fe 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -538,6 +538,10 @@ { "name": "Urchs, Sebastian" }, + { + "affiliation": "Dept of Medical Biophysics, Univeristy of Western Ontario", + "name": "Stanley, Olivia" + }, { "affiliation": "Department of Psychology, Stanford University; Parietal, INRIA", "name": "Durnez, Joke", From f35d7a7fd4faf0336e1850719a7164cb1b4430ca Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 8 Oct 2019 08:14:25 -0400 Subject: [PATCH 0487/1665] DOC: Restore Zenodo entries that have been dropped over time --- .zenodo.json | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 85938865fe..e65bbe084a 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -369,7 +369,9 @@ "name": "K\u00fcttner, Ren\u00e9" }, { - "name": "Perez-Guevara, Martin" + "affiliation": "Neurospin/Unicog/Inserm/CEA", + "name": "Perez-Guevara, Martin Felipe", + "orcid": "0000-0003-4497-861X" }, { "name": "Millman, Jarrod" @@ -501,7 +503,7 @@ }, { "affiliation": "University of Amsterdam", - "name": "Lukas Snoek", + "name": "Snoek, Lukas", "orcid": "0000-0001-8972-204X" }, { @@ -669,6 +671,35 @@ { "name": "Davison, Andrew" }, + { + "name": "Bielievtsov, Dmytro", + "orcid": "0000-0003-3846-7696" + }, + { + "affiliation": "MPI-CBS; McGill University", + "name": "Steele, Christopher John", + "orcid": "0000-0003-1656-7928" + }, + { + "affiliation": "State Key Laboratory of Cognitive Neuroscience and Learning & IDG/McGovern Institute for Brain Research, Beijing Normal University, Beijing, China", + "name": "Huang, Lijie", + "orcid": "0000-0002-9910-5069" + }, + { + "affiliation": "Athinoula A. Martinos Center for Biomedical Imaging, Department of Radiology, Massachusetts General Hospital, Charlestown, MA, USA", + "name": "Gonzalez, Ivan", + "orcid": "0000-0002-6451-6909" + }, + { + "affiliation": "Mayo Clinic", + "name": "Warner, Joshua", + "orcid": "0000-0003-3579-4835" + }, + { + "affiliation": "Max Planck Research Group for Neuroanatomy & Connectivity, Max Planck Institute for Human Cognitive and Brain Sciences, Leipzig, Germany", + "name": "Margulies, Daniel S.", + "orcid": "0000-0002-8880-9204" + }, { "affiliation": "Otto-von-Guericke-University Magdeburg, Germany", "name": "Contier, Oliver", From ab78dbb27495a75992f64c855dfdb76fc3a7facd Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 8 Oct 2019 09:06:57 -0400 Subject: [PATCH 0488/1665] MNT: Version 1.3.0-rc1.post-dev --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index aa8f460688..b71cdfa9b9 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -9,7 +9,7 @@ # nipype version information # Remove -dev for release -__version__ = '1.3.0-rc1' +__version__ = '1.3.0-rc1.post-dev' def get_nipype_gitversion(): From 3e491099f8e5fd228db1f844c2b7fb3d038ec0a7 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Tue, 8 Oct 2019 10:09:23 -0400 Subject: [PATCH 0489/1665] Allow nipype.cfg in cwd to be read even if ~/.nipype does not exist Fixes a bug where the nipype.cfg in the current directory would not be read if the folder ~/.nipype does not exist. --- nipype/utils/config.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nipype/utils/config.py b/nipype/utils/config.py index 219215e0c8..c028ea326f 100644 --- a/nipype/utils/config.py +++ b/nipype/utils/config.py @@ -104,9 +104,8 @@ def __init__(self, *args, **kwargs): self._display = None self._resource_monitor = None - if os.path.exists(config_dir): - self._config.read( - [os.path.join(config_dir, 'nipype.cfg'), 'nipype.cfg']) + self._config.read( + [os.path.join(config_dir, 'nipype.cfg'), 'nipype.cfg']) for option in CONFIG_DEPRECATIONS: for section in ['execution', 'logging', 'monitoring']: From 0704f7ec60c1b6c954f81f392587ce524aec3e66 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Tue, 8 Oct 2019 10:22:16 -0400 Subject: [PATCH 0490/1665] Allow nipype configuration directory to be specified with NIPYPECONFDIR --- nipype/utils/config.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nipype/utils/config.py b/nipype/utils/config.py index 219215e0c8..228e9933e0 100644 --- a/nipype/utils/config.py +++ b/nipype/utils/config.py @@ -97,7 +97,9 @@ def __init__(self, *args, **kwargs): self._config = configparser.ConfigParser() self._cwd = None - config_dir = os.path.expanduser('~/.nipype') + config_dir = os.path.expanduser( + os.getenv('NIPYPECONFDIR', + default='~/.nipype')) self.data_file = os.path.join(config_dir, 'nipype.json') self.set_default_config() From edcb73cb931da3038d687121f56d801e3e066cba Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Tue, 8 Oct 2019 10:37:18 -0400 Subject: [PATCH 0491/1665] add FileExistsError to filemanipy for python 2 support --- nipype/interfaces/io.py | 6 +++--- nipype/utils/filemanip.py | 8 ++++++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index f752f09252..ebf2ca5efc 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -28,12 +28,12 @@ import tempfile from os.path import join, dirname from warnings import warn -import errno from .. import config, logging from ..utils.filemanip import ( copyfile, simplify_list, ensure_list, - get_related_files, split_filename) + get_related_files, split_filename, + FileExistsError) from ..utils.misc import human_order_sorted, str2bool from .base import ( TraitedSpec, traits, Str, File, Directory, BaseInterface, InputMultiPath, @@ -2883,7 +2883,7 @@ class ExportFile(SimpleInterface): def _run_interface(self, runtime): if not self.inputs.clobber and op.exists(self.inputs.out_file): - raise FileExistsError(errno.EEXIST, 'File %s exists' % self.inputs.out_file) + raise FileExistsError(self.inputs.out_file) if not op.isabs(self.inputs.out_file): raise ValueError('Out_file must be an absolute path.') if (self.inputs.check_extension and diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 1cdd1e9676..583357c148 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -51,6 +51,14 @@ def __init__(self, path): super(FileNotFoundError, self).__init__( 2, 'No such file or directory', '%s' % path) + class FileExistsError(OSError): # noqa + """Defines the exception for Python 2.""" + + def __init__(self, path): + """Initialize the exception.""" + super(FileExistsError, self).__init__( + 17, 'File or directory exists', '%s' % path) + USING_PATHLIB2 = False try: From 72ac8a5a723c075c4c75a821f5ac80ea071680e1 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 8 Oct 2019 10:42:33 -0400 Subject: [PATCH 0492/1665] MNT: Missed Py2 cleanups --- nipype/interfaces/base/core.py | 1 - .../base/tests/test_traits_extension.py | 2 -- nipype/interfaces/slicer/generate_classes.py | 2 +- .../tests/test_auto_SEMLikeCommandLine.py | 18 ------------------ setup.py | 1 - 5 files changed, 1 insertion(+), 23 deletions(-) delete mode 100644 nipype/interfaces/tests/test_auto_SEMLikeCommandLine.py diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index c87be8c8d5..126fd51a8a 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -21,7 +21,6 @@ import sys import simplejson as json from dateutil.parser import parse as parseutc -from future import standard_library from traits.trait_errors import TraitError from ... import config, logging, LooseVersion diff --git a/nipype/interfaces/base/tests/test_traits_extension.py b/nipype/interfaces/base/tests/test_traits_extension.py index 8138d691f3..ac9c36adf7 100644 --- a/nipype/interfaces/base/tests/test_traits_extension.py +++ b/nipype/interfaces/base/tests/test_traits_extension.py @@ -1,8 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Check the resolving/rebasing feature of ``BasePath``s.""" -from __future__ import print_function, unicode_literals - from ... import base as nib from ..traits_extension import rebase_path_traits, resolve_path_traits, Path diff --git a/nipype/interfaces/slicer/generate_classes.py b/nipype/interfaces/slicer/generate_classes.py index 08d0030317..371c957acd 100644 --- a/nipype/interfaces/slicer/generate_classes.py +++ b/nipype/interfaces/slicer/generate_classes.py @@ -36,7 +36,7 @@ def add_class_to_package(class_codes, class_names, module_name, package_dir): \"\"\"Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.\"\"\"\n\n""" ) - imports = """from __future__ import (print_function, division, unicode_literals, + imports = """\ from ..base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath) import os\n\n\n""" diff --git a/nipype/interfaces/tests/test_auto_SEMLikeCommandLine.py b/nipype/interfaces/tests/test_auto_SEMLikeCommandLine.py deleted file mode 100644 index 7777a8443e..0000000000 --- a/nipype/interfaces/tests/test_auto_SEMLikeCommandLine.py +++ /dev/null @@ -1,18 +0,0 @@ -# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals -from ..base import SEMLikeCommandLine - - -def test_SEMLikeCommandLine_inputs(): - input_map = dict(args=dict(argstr='%s', - ), - environ=dict(nohash=True, - usedefault=True, - ), - ) - inputs = SEMLikeCommandLine.input_spec() - - for key, metadata in list(input_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(inputs.traits()[key], metakey) == value - diff --git a/setup.py b/setup.py index 573de9b663..b4239da30b 100755 --- a/setup.py +++ b/setup.py @@ -16,7 +16,6 @@ from glob import glob import os from os.path import join as pjoin -from io import open # Commit hash writing, and dependency checking from setuptools.command.build_py import build_py From 7b9c639327417e125f2f74776f58feb873287d1e Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 8 Oct 2019 10:42:47 -0400 Subject: [PATCH 0493/1665] MNT: Drop Py2 compatibility for tools/ --- tools/apigen.py | 4 ---- tools/build_interface_docs.py | 1 - tools/build_modref_templates.py | 2 -- tools/ex2rst | 4 ---- tools/github.py | 4 ---- tools/gitwash_dumper.py | 4 ---- tools/interfacedocgen.py | 3 --- tools/make_examples.py | 3 --- tools/run_examples.py | 1 - 9 files changed, 26 deletions(-) diff --git a/tools/apigen.py b/tools/apigen.py index 13609ba07f..8388dc7a59 100644 --- a/tools/apigen.py +++ b/tools/apigen.py @@ -20,10 +20,6 @@ PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed project. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import object, open - import os import re diff --git a/tools/build_interface_docs.py b/tools/build_interface_docs.py index 37b99cb476..7da2f40e0d 100755 --- a/tools/build_interface_docs.py +++ b/tools/build_interface_docs.py @@ -3,7 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Script to auto-generate interface docs. """ -from __future__ import print_function, unicode_literals # stdlib imports import os import sys diff --git a/tools/build_modref_templates.py b/tools/build_modref_templates.py index 18a0372824..0b06ea9235 100755 --- a/tools/build_modref_templates.py +++ b/tools/build_modref_templates.py @@ -3,8 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Script to auto-generate our API docs. """ - -from __future__ import print_function # stdlib imports import os import sys diff --git a/tools/ex2rst b/tools/ex2rst index 140b7d5aa5..df24df0340 100755 --- a/tools/ex2rst +++ b/tools/ex2rst @@ -11,10 +11,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Helper to automagically generate ReST versions of examples""" -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open, str - import os import sys import re diff --git a/tools/github.py b/tools/github.py index f33beb48a3..14aa4a6620 100644 --- a/tools/github.py +++ b/tools/github.py @@ -1,8 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals -from future import standard_library -standard_library.install_aliases() -from builtins import open import http.client import inspect import simplejson diff --git a/tools/gitwash_dumper.py b/tools/gitwash_dumper.py index e5a26ba7a4..c86a0689bd 100755 --- a/tools/gitwash_dumper.py +++ b/tools/gitwash_dumper.py @@ -1,9 +1,5 @@ #!/usr/bin/env python ''' Checkout gitwash repo into directory and do search replace on name ''' -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open - import os from os.path import join as pjoin import shutil diff --git a/tools/interfacedocgen.py b/tools/interfacedocgen.py index f73375827f..cf85949077 100644 --- a/tools/interfacedocgen.py +++ b/tools/interfacedocgen.py @@ -20,9 +20,6 @@ PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed project.""" -from __future__ import print_function, unicode_literals -from builtins import object, open - # Stdlib imports import inspect import os diff --git a/tools/make_examples.py b/tools/make_examples.py index fe9f7880c8..32a7ca31b5 100755 --- a/tools/make_examples.py +++ b/tools/make_examples.py @@ -3,9 +3,6 @@ This also creates the index.rst file appropriately, makes figures, etc. """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) -from builtins import open from past.builtins import execfile # ----------------------------------------------------------------------------- # Library imports diff --git a/tools/run_examples.py b/tools/run_examples.py index 96847cbe25..11b69db6c0 100644 --- a/tools/run_examples.py +++ b/tools/run_examples.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import print_function import os import sys from shutil import rmtree From 1725f5181fc9affcc266d387a6eb8f6137dc4969 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 8 Oct 2019 11:24:23 -0400 Subject: [PATCH 0494/1665] NEP29: Update numpy min version to 1.12 --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index b71cdfa9b9..bd81ca482f 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -102,7 +102,7 @@ def get_nipype_gitversion(): NIBABEL_MIN_VERSION = '2.1.0' NETWORKX_MIN_VERSION = '1.9' NETWORKX_MAX_VERSION_27 = '2.2' -NUMPY_MIN_VERSION = '1.9.0' +NUMPY_MIN_VERSION = '1.12' # Numpy bug in python 3.7: # https://www.opensourceanswers.com/blog/you-shouldnt-use-python-37-for-data-science-right-now.html NUMPY_MIN_VERSION_37 = '1.15.3' From b25c9c3328e9f2c49383734fbd85aa16d970d54e Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Tue, 8 Oct 2019 17:15:04 -0400 Subject: [PATCH 0495/1665] bugfix. actually import FileExistsError --- nipype/utils/filemanip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 1e134a3333..d846ce4bca 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -41,7 +41,7 @@ PY3 = sys.version_info[0] >= 3 try: - from builtins import FileNotFoundError + from builtins import FileNotFoundError, FileExistsError except ImportError: # PY27 class FileNotFoundError(OSError): # noqa """Defines the exception for Python 2.""" From 006cdcfed7704fd553a236d74e7faa75793884a9 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Wed, 9 Oct 2019 09:46:39 -0400 Subject: [PATCH 0496/1665] import FileExistsError in test --- nipype/interfaces/tests/test_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index 5013f04942..1b83f1c3c6 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -20,7 +20,7 @@ import nipype.interfaces.io as nio from nipype.interfaces.base.traits_extension import isdefined from nipype.interfaces.base import Undefined, TraitError -from nipype.utils.filemanip import dist_is_editable +from nipype.utils.filemanip import dist_is_editable, FileExistsError # Check for boto noboto = False From 5737d212a277efcbe0a36226d0f2e59a98c81ebc Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Wed, 9 Oct 2019 11:14:13 -0400 Subject: [PATCH 0497/1665] Update nipype/interfaces/tests/test_io.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/tests/test_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index 1b83f1c3c6..bd1af89c1d 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -687,7 +687,7 @@ def test_ExportFile(tmp_path): testin.write_text('test string') i = nio.ExportFile() i.inputs.in_file = testin - i.inputs.out_file = tmp_path / 'out.tsv' + i.inputs.out_file = str(tmp_path / 'out.tsv') i.inputs.check_extension = True with pytest.raises(RuntimeError): i.run() From 47bb8e8376d1847fc377d41ddf82a1911227ed79 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Wed, 9 Oct 2019 11:14:22 -0400 Subject: [PATCH 0498/1665] Update nipype/interfaces/tests/test_io.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/tests/test_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index bd1af89c1d..de0452d0a0 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -686,7 +686,7 @@ def test_ExportFile(tmp_path): testin = tmp_path / 'in.txt' testin.write_text('test string') i = nio.ExportFile() - i.inputs.in_file = testin + i.inputs.in_file = str(testin) i.inputs.out_file = str(tmp_path / 'out.tsv') i.inputs.check_extension = True with pytest.raises(RuntimeError): From 1ba9ca1480354f744cb97a76e343303a4254e305 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Wed, 9 Oct 2019 14:26:40 -0400 Subject: [PATCH 0499/1665] add docstring to ExportFile --- nipype/interfaces/io.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 6d2ac1d970..0eaa2f9726 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2878,6 +2878,27 @@ class ExportFileOutputSpec(TraitedSpec): class ExportFile(SimpleInterface): + """Copy in_file to out_file. + + This interface copies an input file to a named output file. + This is useful to save files to a specified location + (as opposed to using DataSink). + + Examples + -------- + + A trivial example that copies temporary_file.nii.gz + to sub1_out.nii.gz. (A more realistic example would set + in_file as the output of another Node.) + + >>> from nipype.interfaces.io import ExportFile + >>> import os.path as op + >>> ef = Node(ExportFile(), "export") + >>> ef.inputs.in_file = "temporary_file.nii.gz" + >>> ef.inputs.out_file = op.abspath("output_folder/sub1_out.nii.gz") + >>> ef.run() + + """ input_spec = ExportFileInputSpec output_spec = ExportFileOutputSpec From d338ddd0a7475b6e4bd4d65f4e16fc4e6d6f8fc6 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Fri, 11 Oct 2019 14:28:34 -0700 Subject: [PATCH 0500/1665] ENH: Minimize the number of calls to ``_load_results`` when populating inputs This PR attempts to alleviate #3014 by opening the result file of a source node only once when that node feeds into several inputs of the node collecting inputs. Before these changes, a call to ``_load_results`` was issued for every input field that needed to collect its inputs from a past node. Now, all the inputs comming from the same node are put together and the ``_load_results`` function is called just once. The PR also modifies the manner the ``AttributeError``s (#3014) were handled to make it easier to spot whether an error occured while loading results araises when gathering the inputs of a node-to-be-run or elsewhere. --- nipype/pipeline/engine/nodes.py | 72 ++++++++++++++++++--------------- 1 file changed, 39 insertions(+), 33 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index bf58934f5b..3fab9cfc2f 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -9,7 +9,7 @@ absolute_import) from builtins import range, str, bytes, open -from collections import OrderedDict +from collections import OrderedDict, defaultdict import os import os.path as op @@ -510,7 +510,8 @@ def _get_hashval(self): return self._hashed_inputs, self._hashvalue def _get_inputs(self): - """Retrieve inputs from pointers to results file + """ + Retrieve inputs from pointers to results files. This mechanism can be easily extended/replaced to retrieve data from other data sources (e.g., XNAT, HTTP, etc.,.) @@ -518,41 +519,46 @@ def _get_inputs(self): if self._got_inputs: return - logger.debug('Setting node inputs') + prev_results = defaultdict(list) for key, info in list(self.input_source.items()): - logger.debug('input: %s', key) - results_file = info[0] - logger.debug('results file: %s', results_file) - outputs = _load_resultfile(results_file).outputs + prev_results[info[0]].append((key, info[1])) + + logger.debug('[Node] Setting %d connected inputs from %d previous nodes.', + len(self.input_source), len(prev_results)) + + for results_fname, connections in list(prev_results.items()): + outputs = None + try: + outputs = _load_resultfile(results_fname).outputs + except AttributeError as e: + logger.critical('%s', e) + if outputs is None: raise RuntimeError("""\ -Error populating the input "%s" of node "%s": the results file of the source node \ -(%s) does not contain any outputs.""" % (key, self.name, results_file)) - output_value = Undefined - if isinstance(info[1], tuple): - output_name = info[1][0] - value = getattr(outputs, output_name) - if isdefined(value): - output_value = evaluate_connect_function( - info[1][1], info[1][2], value) - else: - output_name = info[1] +Error populating the inpus of node "%s": the results file of the source node \ +(%s) does not contain any outputs.""" % (self.name, results_fname)) + + for key, conn in connections: + output_value = Undefined + if isinstance(conn, tuple): + value = getattr(outputs, conn[0]) + if isdefined(value): + output_value = evaluate_connect_function( + conn[1], conn[2], value) + else: + output_value = getattr(outputs, conn) + try: - output_value = outputs.trait_get()[output_name] - except AttributeError: - output_value = outputs.dictcopy()[output_name] - logger.debug('output: %s', output_name) - try: - self.set_input(key, deepcopy(output_value)) - except traits.TraitError as e: - msg = ( - e.args[0], '', 'Error setting node input:', - 'Node: %s' % self.name, 'input: %s' % key, - 'results_file: %s' % results_file, - 'value: %s' % str(output_value), - ) - e.args = ('\n'.join(msg), ) - raise + self.set_input(key, deepcopy(output_value)) + except traits.TraitError as e: + msg = ( + e.args[0], '', 'Error setting node input:', + 'Node: %s' % self.name, 'input: %s' % key, + 'results_file: %s' % results_fname, + 'value: %s' % str(output_value), + ) + e.args = ('\n'.join(msg), ) + raise # Successfully set inputs self._got_inputs = True From 2e5436d31a7eecf52fc94fee8d6e01539f773e46 Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 11 Oct 2019 16:03:30 -0700 Subject: [PATCH 0501/1665] fix: one typo in error message, exit early if no connections are to be evaluated --- nipype/pipeline/engine/nodes.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 3fab9cfc2f..eeb47f6d7a 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -516,15 +516,20 @@ def _get_inputs(self): This mechanism can be easily extended/replaced to retrieve data from other data sources (e.g., XNAT, HTTP, etc.,.) """ - if self._got_inputs: + if self._got_inputs: # Inputs cached + return + + if not self.input_source: # No previous nodes + self._got_inputs = True return prev_results = defaultdict(list) for key, info in list(self.input_source.items()): prev_results[info[0]].append((key, info[1])) - logger.debug('[Node] Setting %d connected inputs from %d previous nodes.', - len(self.input_source), len(prev_results)) + logger.debug( + '[Node] Setting %d connected inputs of node "%s" from %d previous nodes.', + len(self.input_source), self.name, len(prev_results)) for results_fname, connections in list(prev_results.items()): outputs = None @@ -535,7 +540,7 @@ def _get_inputs(self): if outputs is None: raise RuntimeError("""\ -Error populating the inpus of node "%s": the results file of the source node \ +Error populating the inputs of node "%s": the results file of the source node \ (%s) does not contain any outputs.""" % (self.name, results_fname)) for key, conn in connections: From ec023bfb1906ea6378593a7323e567e0f1e080ec Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Thu, 17 Oct 2019 16:45:25 -0400 Subject: [PATCH 0502/1665] Update nipype/interfaces/io.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 0eaa2f9726..8dd59d60dc 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2878,7 +2878,7 @@ class ExportFileOutputSpec(TraitedSpec): class ExportFile(SimpleInterface): - """Copy in_file to out_file. + """ Export a file to an absolute path This interface copies an input file to a named output file. This is useful to save files to a specified location From 1e0d7a1db2390e14f61a605fa8f27ee0b3d0fbd0 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Thu, 17 Oct 2019 16:45:36 -0400 Subject: [PATCH 0503/1665] Update nipype/interfaces/io.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 8dd59d60dc..dd66d1e3dd 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2881,7 +2881,7 @@ class ExportFile(SimpleInterface): """ Export a file to an absolute path This interface copies an input file to a named output file. - This is useful to save files to a specified location + This is useful to save individual files to a specific location, (as opposed to using DataSink). Examples From 4246b029dd2e61b51ec56b609917da29e21834bd Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Thu, 17 Oct 2019 16:45:45 -0400 Subject: [PATCH 0504/1665] Update nipype/interfaces/io.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index dd66d1e3dd..fde8b84015 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2882,7 +2882,7 @@ class ExportFile(SimpleInterface): This interface copies an input file to a named output file. This is useful to save individual files to a specific location, - (as opposed to using DataSink). + instead of more flexible interfaces like DataSink. Examples -------- From fe3dd0754338d69904a6e7e1882f63419d1dd2bd Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Thu, 17 Oct 2019 16:46:10 -0400 Subject: [PATCH 0505/1665] Update nipype/interfaces/io.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index fde8b84015..51b0b31904 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2893,7 +2893,7 @@ class ExportFile(SimpleInterface): >>> from nipype.interfaces.io import ExportFile >>> import os.path as op - >>> ef = Node(ExportFile(), "export") + >>> ef = ExportFile() >>> ef.inputs.in_file = "temporary_file.nii.gz" >>> ef.inputs.out_file = op.abspath("output_folder/sub1_out.nii.gz") >>> ef.run() From 7ed854ff2abcf76a68b3e2b40124ea3342aa0903 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Thu, 17 Oct 2019 16:55:31 -0400 Subject: [PATCH 0506/1665] bugfixes in docstring --- nipype/interfaces/io.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 51b0b31904..801bc73d71 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2887,14 +2887,11 @@ class ExportFile(SimpleInterface): Examples -------- - A trivial example that copies temporary_file.nii.gz - to sub1_out.nii.gz. (A more realistic example would set - in_file as the output of another Node.) - >>> from nipype.interfaces.io import ExportFile >>> import os.path as op >>> ef = ExportFile() - >>> ef.inputs.in_file = "temporary_file.nii.gz" + >>> ef.inputs.in_file = "T1.nii.gz" + >>> os.mkdir("output_folder") >>> ef.inputs.out_file = op.abspath("output_folder/sub1_out.nii.gz") >>> ef.run() From 7a3c7f61243bda06338b68907b80a66efbb85609 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Mon, 21 Oct 2019 14:27:55 -0400 Subject: [PATCH 0507/1665] Update nipype/interfaces/io.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 801bc73d71..46446f1a1c 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2893,7 +2893,7 @@ class ExportFile(SimpleInterface): >>> ef.inputs.in_file = "T1.nii.gz" >>> os.mkdir("output_folder") >>> ef.inputs.out_file = op.abspath("output_folder/sub1_out.nii.gz") - >>> ef.run() + >>> res = ef.run() """ input_spec = ExportFileInputSpec From 1c26d1d0a31f0053d75643048999562e776ff9ef Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Mon, 21 Oct 2019 14:28:02 -0400 Subject: [PATCH 0508/1665] Update nipype/interfaces/tests/test_io.py Co-Authored-By: Chris Markiewicz --- nipype/interfaces/tests/test_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index de0452d0a0..9cb7e05075 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -694,7 +694,7 @@ def test_ExportFile(tmp_path): i.inputs.check_extension = False i.run() assert (tmp_path / 'out.tsv').read_text() == 'test string' - i.inputs.out_file = tmp_path / 'out.txt' + i.inputs.out_file = str(tmp_path / 'out.txt') i.inputs.check_extension = True i.run() assert (tmp_path / 'out.txt').read_text() == 'test string' From a23e16da25b7d7e664cfd5702b5143fd6d80a4be Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Mon, 21 Oct 2019 14:30:08 -0400 Subject: [PATCH 0509/1665] Add @effigies suggestion --- nipype/interfaces/io.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 46446f1a1c..556d85db4d 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2894,6 +2894,8 @@ class ExportFile(SimpleInterface): >>> os.mkdir("output_folder") >>> ef.inputs.out_file = op.abspath("output_folder/sub1_out.nii.gz") >>> res = ef.run() + >>> os.path.exists(res.outputs.out_file) + True """ input_spec = ExportFileInputSpec From 3695d36e1e66a57089e6b7ecfc402a9d2fe2c925 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Tue, 22 Oct 2019 10:49:43 -0400 Subject: [PATCH 0510/1665] add test_auto_ExportFile.py --- .../interfaces/tests/test_auto_ExportFile.py | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 nipype/interfaces/tests/test_auto_ExportFile.py diff --git a/nipype/interfaces/tests/test_auto_ExportFile.py b/nipype/interfaces/tests/test_auto_ExportFile.py new file mode 100644 index 0000000000..335fe41372 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_ExportFile.py @@ -0,0 +1,30 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import ExportFile + + +def test_ExportFile_inputs(): + input_map = dict( + check_extension=dict(), + clobber=dict(), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_file=dict( + extensions=None, + mandatory=True, + ), + ) + inputs = ExportFile.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ExportFile_outputs(): + output_map = dict(out_file=dict(extensions=None, ), ) + outputs = ExportFile.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value From 760f603bd3b3b303628aaa63a5aabd2a0d402185 Mon Sep 17 00:00:00 2001 From: Adam Kimbler Date: Wed, 23 Oct 2019 13:59:44 -0400 Subject: [PATCH 0511/1665] [FIX} Fixed typo in QWarpInputSpec Trait description Fixed a typo in QWarpInputSpecs trait description for 'noXdis', 'noYdis', and 'noZdis'. --- nipype/interfaces/afni/preprocess.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index d31541412f..a5a992452d 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -3387,11 +3387,11 @@ class QwarpInputSpec(AFNICommandInputSpec): exists=True, copyfile=False) noXdis = traits.Bool( - desc='Warp will not displace in x directoin', argstr='-noXdis') + desc='Warp will not displace in x direction', argstr='-noXdis') noYdis = traits.Bool( - desc='Warp will not displace in y directoin', argstr='-noYdis') + desc='Warp will not displace in y direction', argstr='-noYdis') noZdis = traits.Bool( - desc='Warp will not displace in z directoin', argstr='-noZdis') + desc='Warp will not displace in z direction', argstr='-noZdis') iniwarp = traits.List( File(exists=True, copyfile=False), desc='A dataset with an initial nonlinear warp to use.' From 3ea29c15d4574a43d25d3524431c817cc5786740 Mon Sep 17 00:00:00 2001 From: Adam Kimbler Date: Fri, 25 Oct 2019 13:33:05 -0400 Subject: [PATCH 0512/1665] Update preprocess.py --- nipype/interfaces/afni/preprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index a5a992452d..96d695e101 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -24,7 +24,7 @@ class CentralityInputSpec(AFNICommandInputSpec): - """Common input spec class for all centrality-related commands + """Common input spec class for all centrality-related commands """ mask = File( From 9ffc797be5c65df3ddd41519de0e4b9dde3860b3 Mon Sep 17 00:00:00 2001 From: Adam Kimbler Date: Fri, 25 Oct 2019 13:33:19 -0400 Subject: [PATCH 0513/1665] Update preprocess.py --- nipype/interfaces/afni/preprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 96d695e101..a5a992452d 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -24,7 +24,7 @@ class CentralityInputSpec(AFNICommandInputSpec): - """Common input spec class for all centrality-related commands + """Common input spec class for all centrality-related commands """ mask = File( From d5d22be064f7e93a8423e452c0845ae735bf1c4e Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 26 Oct 2019 09:20:17 -0400 Subject: [PATCH 0514/1665] ENH: Always fsync pickle files --- nipype/utils/filemanip.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index d846ce4bca..057178f4e4 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -790,9 +790,9 @@ def read_stream(stream, logger=None, encoding=None): def savepkl(filename, record, versioning=False): - pklopen = gzip.open if filename.endswith('.pklz') else open with SoftFileLock('%s.lock' % filename): - with pklopen(filename, 'wb') as pkl_file: + with open(filename, 'wb') as fobj: + pkl_file = gzip.GzipFile(fileobj=fobj) if filename.endswith('.pklz') else fobj if versioning: from nipype import __version__ as version metadata = json.dumps({'version': version}) @@ -801,6 +801,9 @@ def savepkl(filename, record, versioning=False): pkl_file.write('\n'.encode('utf-8')) pickle.dump(record, pkl_file) + # Pickle files need to be available immediately, so force a sync + fobj.flush() + os.fsync(fobj.fileno()) rst_levels = ['=', '-', '~', '+'] From a07c138cff91b239bb292c729e9e07b0fa9fb4e2 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 28 Oct 2019 22:26:12 -0400 Subject: [PATCH 0515/1665] ENH: Add sync option to savepkl, sync when saving result file --- nipype/pipeline/engine/utils.py | 8 ++++---- nipype/utils/filemanip.py | 25 ++++++++++++------------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index a5245dda48..218c30850b 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -236,17 +236,17 @@ def save_resultfile(result, cwd, name, rebase=None): if result.outputs is None: logger.warning('Storing result file without outputs') - savepkl(resultsfile, result) + savepkl(resultsfile, result, sync=True) return try: output_names = result.outputs.copyable_trait_names() except AttributeError: logger.debug('Storing non-traited results, skipping rebase of paths') - savepkl(resultsfile, result) + savepkl(resultsfile, result, sync=True) return if not rebase: - savepkl(resultsfile, result) + savepkl(resultsfile, result, sync=True) return backup_traits = {} @@ -262,7 +262,7 @@ def save_resultfile(result, cwd, name, rebase=None): backup_traits[key] = old val = rebase_path_traits(result.outputs.trait(key), old, cwd) setattr(result.outputs, key, val) - savepkl(resultsfile, result) + savepkl(resultsfile, result, sync=True) finally: # Restore resolved paths from the outputs dict no matter what for key, val in list(backup_traits.items()): diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 057178f4e4..ca82778e7d 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -789,19 +789,18 @@ def read_stream(stream, logger=None, encoding=None): return out.splitlines() -def savepkl(filename, record, versioning=False): - with SoftFileLock('%s.lock' % filename): - with open(filename, 'wb') as fobj: - pkl_file = gzip.GzipFile(fileobj=fobj) if filename.endswith('.pklz') else fobj - if versioning: - from nipype import __version__ as version - metadata = json.dumps({'version': version}) - - pkl_file.write(metadata.encode('utf-8')) - pkl_file.write('\n'.encode('utf-8')) - - pickle.dump(record, pkl_file) - # Pickle files need to be available immediately, so force a sync +def savepkl(filename, record, versioning=False, sync=False): + with open(filename, 'wb') as fobj: + pkl_file = gzip.GzipFile(fileobj=fobj) if filename.endswith('.pklz') else fobj + if versioning: + from nipype import __version__ as version + metadata = json.dumps({'version': version}) + + pkl_file.write(metadata.encode('utf-8')) + pkl_file.write('\n'.encode('utf-8')) + + pickle.dump(record, pkl_file) + if sync: fobj.flush() os.fsync(fobj.fileno()) From e97b7f9bc637824090d8cb76174ef719c5a72c81 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Tue, 5 Nov 2019 18:47:44 -0500 Subject: [PATCH 0516/1665] add delay to read results --- nipype/utils/filemanip.py | 329 +++++++++++++++++++++----------------- 1 file changed, 183 insertions(+), 146 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index ca82778e7d..21ba4cf5b8 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -3,8 +3,7 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Miscellaneous file manipulation functions """ -from __future__ import (print_function, division, unicode_literals, - absolute_import) +from __future__ import print_function, division, unicode_literals, absolute_import import sys import pickle @@ -21,35 +20,35 @@ import contextlib import posixpath import simplejson as json -from filelock import SoftFileLock +from time import sleep, time +from glob import glob from builtins import str, bytes, open from .. import logging, config from .misc import is_container from future import standard_library + standard_library.install_aliases() -fmlogger = logging.getLogger('nipype.utils') +fmlogger = logging.getLogger("nipype.utils") -related_filetype_sets = [ - ('.hdr', '.img', '.mat'), - ('.nii', '.mat'), - ('.BRIK', '.HEAD'), -] +related_filetype_sets = [(".hdr", ".img", ".mat"), (".nii", ".mat"), (".BRIK", ".HEAD")] PY3 = sys.version_info[0] >= 3 try: from builtins import FileNotFoundError, FileExistsError except ImportError: # PY27 + class FileNotFoundError(OSError): # noqa """Defines the exception for Python 2.""" def __init__(self, path): """Initialize the exception.""" super(FileNotFoundError, self).__init__( - 2, 'No such file or directory', '%s' % path) + 2, "No such file or directory", "%s" % path + ) class FileExistsError(OSError): # noqa """Defines the exception for Python 2.""" @@ -57,7 +56,8 @@ class FileExistsError(OSError): # noqa def __init__(self, path): """Initialize the exception.""" super(FileExistsError, self).__init__( - 17, 'File or directory exists', '%s' % path) + 17, "File or directory exists", "%s" % path + ) USING_PATHLIB2 = False @@ -65,6 +65,7 @@ def __init__(self, path): from pathlib import Path except ImportError: from pathlib2 import Path + USING_PATHLIB2 = True @@ -104,11 +105,12 @@ def path_mkdir(path, mode=0o777, parents=False, exist_ok=False): return os.mkdir(str(path), mode=mode) -if not hasattr(Path, 'write_text'): +if not hasattr(Path, "write_text"): # PY34 - Path does not have write_text def _write_text(self, text): - with open(str(self), 'w') as f: + with open(str(self), "w") as f: f.write(text) + Path.write_text = _write_text @@ -152,8 +154,7 @@ def split_filename(fname): ext = None for special_ext in special_extensions: ext_len = len(special_ext) - if (len(fname) > ext_len) and \ - (fname[-ext_len:].lower() == special_ext.lower()): + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): ext = fname[-ext_len:] fname = fname[:-ext_len] break @@ -181,11 +182,11 @@ def to_str_py27(value): """ if isinstance(value, dict): - entry = '{}: {}'.format - retval = '{' + entry = "{}: {}".format + retval = "{" for key, val in list(value.items()): if len(retval) > 1: - retval += ', ' + retval += ", " kenc = repr(key) if kenc.startswith(("u'", 'u"')): kenc = kenc[1:] @@ -193,12 +194,12 @@ def to_str_py27(value): if venc.startswith(("u'", 'u"')): venc = venc[1:] retval += entry(kenc, venc) - retval += '}' + retval += "}" return retval istuple = isinstance(value, tuple) if isinstance(value, (tuple, list)): - retval = '(' if istuple else '[' + retval = "(" if istuple else "[" nels = len(value) for i, v in enumerate(value): venc = to_str_py27(v) @@ -207,11 +208,11 @@ def to_str_py27(value): retval += venc if i < nels - 1: - retval += ', ' + retval += ", " if istuple and nels == 1: - retval += ',' - retval += ')' if istuple else ']' + retval += "," + retval += ")" if istuple else "]" return retval retval = repr(value).decode() @@ -220,7 +221,7 @@ def to_str_py27(value): return retval -def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True): +def fname_presuffix(fname, prefix="", suffix="", newpath=None, use_ext=True): """Manipulates path and name of input filename Parameters @@ -254,7 +255,7 @@ def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True): """ pth, fname, ext = split_filename(fname) if not use_ext: - ext = '' + ext = "" # No need for isdefined: bool(Undefined) evaluates to False if newpath: @@ -262,7 +263,7 @@ def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True): return op.join(pth, prefix + fname + suffix + ext) -def fnames_presuffix(fnames, prefix='', suffix='', newpath=None, use_ext=True): +def fnames_presuffix(fnames, prefix="", suffix="", newpath=None, use_ext=True): """Calls fname_presuffix for a list of files. """ f2 = [] @@ -276,7 +277,7 @@ def hash_rename(filename, hashvalue): and sets path to output_directory """ path, name, ext = split_filename(filename) - newfilename = ''.join((name, '_0x', hashvalue, ext)) + newfilename = "".join((name, "_0x", hashvalue, ext)) return op.join(path, newfilename) @@ -285,15 +286,14 @@ def check_forhash(filename): if isinstance(filename, list): filename = filename[0] path, name = op.split(filename) - if re.search('(_0x[a-z0-9]{32})', name): - hashvalue = re.findall('(_0x[a-z0-9]{32})', name) + if re.search("(_0x[a-z0-9]{32})", name): + hashvalue = re.findall("(_0x[a-z0-9]{32})", name) return True, hashvalue else: return False, None -def hash_infile(afile, chunk_len=8192, crypto=hashlib.md5, - raise_notfound=False): +def hash_infile(afile, chunk_len=8192, crypto=hashlib.md5, raise_notfound=False): """ Computes hash of a file using 'crypto' module @@ -317,7 +317,7 @@ def hash_infile(afile, chunk_len=8192, crypto=hashlib.md5, return None crypto_obj = crypto() - with open(afile, 'rb') as fp: + with open(afile, "rb") as fp: while True: data = fp.read(chunk_len) if not data: @@ -352,19 +352,19 @@ def _parse_mount_table(exit_code, output): # ^^^^ ^^^^^ # OSX mount example: /dev/disk2 on / (hfs, local, journaled) # ^ ^^^ - pattern = re.compile(r'.*? on (/.*?) (?:type |\()([^\s,\)]+)') + pattern = re.compile(r".*? on (/.*?) (?:type |\()([^\s,\)]+)") # Keep line and match for error reporting (match == None on failure) # Ignore empty lines - matches = [(l, pattern.match(l)) - for l in output.strip().splitlines() if l] + matches = [(l, pattern.match(l)) for l in output.strip().splitlines() if l] # (path, fstype) tuples, sorted by path length (longest first) - mount_info = sorted((match.groups() for _, match in matches - if match is not None), - key=lambda x: len(x[0]), reverse=True) - cifs_paths = [path for path, fstype in mount_info - if fstype.lower() == 'cifs'] + mount_info = sorted( + (match.groups() for _, match in matches if match is not None), + key=lambda x: len(x[0]), + reverse=True, + ) + cifs_paths = [path for path, fstype in mount_info if fstype.lower() == "cifs"] # Report failures as warnings for line, match in matches: @@ -372,7 +372,8 @@ def _parse_mount_table(exit_code, output): fmlogger.debug("Cannot parse mount line: '%s'", line) return [ - mount for mount in mount_info + mount + for mount in mount_info if any(mount[0].startswith(path) for path in cifs_paths) ] @@ -410,17 +411,19 @@ def on_cifs(fname): # Only the first match (most recent parent) counts for fspath, fstype in _cifs_table: if fname.startswith(fspath): - return fstype == 'cifs' + return fstype == "cifs" return False -def copyfile(originalfile, - newfile, - copy=False, - create_new=False, - hashmethod=None, - use_hardlink=False, - copy_related_files=True): +def copyfile( + originalfile, + newfile, + copy=False, + create_new=False, + hashmethod=None, + use_hardlink=False, + copy_related_files=True, +): """Copy or link ``originalfile`` to ``newfile``. If ``use_hardlink`` is True, and the file can be hard-linked, then a @@ -457,7 +460,7 @@ def copyfile(originalfile, if create_new: while op.exists(newfile): base, fname, ext = split_filename(newfile) - s = re.search('_c[0-9]{4,4}$', fname) + s = re.search("_c[0-9]{4,4}$", fname) i = 0 if s: i = int(s.group()[2:]) + 1 @@ -467,7 +470,7 @@ def copyfile(originalfile, newfile = base + os.sep + fname + ext if hashmethod is None: - hashmethod = config.get('execution', 'hash_method').lower() + hashmethod = config.get("execution", "hash_method").lower() # Don't try creating symlinks on CIFS if copy is False and on_cifs(newfile): @@ -487,26 +490,33 @@ def copyfile(originalfile, keep = False if op.lexists(newfile): if op.islink(newfile): - if all((os.readlink(newfile) == op.realpath(originalfile), - not use_hardlink, not copy)): + if all( + ( + os.readlink(newfile) == op.realpath(originalfile), + not use_hardlink, + not copy, + ) + ): keep = True elif posixpath.samefile(newfile, originalfile): keep = True else: - if hashmethod == 'timestamp': + if hashmethod == "timestamp": hashfn = hash_timestamp - elif hashmethod == 'content': + elif hashmethod == "content": hashfn = hash_infile else: raise AttributeError("Unknown hash method found:", hashmethod) newhash = hashfn(newfile) - fmlogger.debug('File: %s already exists,%s, copy:%d', newfile, - newhash, copy) + fmlogger.debug( + "File: %s already exists,%s, copy:%d", newfile, newhash, copy + ) orighash = hashfn(originalfile) keep = newhash == orighash if keep: - fmlogger.debug('File: %s already exists, not overwriting, copy:%d', - newfile, copy) + fmlogger.debug( + "File: %s already exists, not overwriting, copy:%d", newfile, copy + ) else: os.unlink(newfile) @@ -517,7 +527,7 @@ def copyfile(originalfile, # ~hardlink & ~symlink => copy if not keep and use_hardlink: try: - fmlogger.debug('Linking File: %s->%s', newfile, originalfile) + fmlogger.debug("Linking File: %s->%s", newfile, originalfile) # Use realpath to avoid hardlinking symlinks os.link(op.realpath(originalfile), newfile) except OSError: @@ -525,9 +535,9 @@ def copyfile(originalfile, else: keep = True - if not keep and not copy and os.name == 'posix': + if not keep and not copy and os.name == "posix": try: - fmlogger.debug('Symlinking File: %s->%s', newfile, originalfile) + fmlogger.debug("Symlinking File: %s->%s", newfile, originalfile) os.symlink(originalfile, newfile) except OSError: copy = True # Disable symlink for associated files @@ -536,15 +546,17 @@ def copyfile(originalfile, if not keep: try: - fmlogger.debug('Copying File: %s->%s', newfile, originalfile) + fmlogger.debug("Copying File: %s->%s", newfile, originalfile) shutil.copyfile(originalfile, newfile) except shutil.Error as e: fmlogger.warning(e.message) # Associated files if copy_related_files: - related_file_pairs = (get_related_files(f, include_this_file=False) - for f in (originalfile, newfile)) + related_file_pairs = ( + get_related_files(f, include_this_file=False) + for f in (originalfile, newfile) + ) for alt_ofile, alt_nfile in zip(*related_file_pairs): if op.exists(alt_ofile): copyfile( @@ -553,7 +565,8 @@ def copyfile(originalfile, copy, hashmethod=hashmethod, use_hardlink=use_hardlink, - copy_related_files=False) + copy_related_files=False, + ) return newfile @@ -606,9 +619,7 @@ def copyfiles(filelist, dest, copy=False, create_new=False): newfiles = [] for i, f in enumerate(ensure_list(filelist)): if isinstance(f, list): - newfiles.insert(i, - copyfiles( - f, dest, copy=copy, create_new=create_new)) + newfiles.insert(i, copyfiles(f, dest, copy=copy, create_new=create_new)) else: if len(outfiles) > 1: destfile = outfiles[i] @@ -653,9 +664,9 @@ def check_depends(targets, dependencies): """ tgts = ensure_list(targets) deps = ensure_list(dependencies) - return all(map(op.exists, tgts)) and \ - min(map(op.getmtime, tgts)) > \ - max(list(map(op.getmtime, deps)) + [0]) + return all(map(op.exists, tgts)) and min(map(op.getmtime, tgts)) > max( + list(map(op.getmtime, deps)) + [0] + ) def save_json(filename, data): @@ -669,9 +680,9 @@ def save_json(filename, data): Dictionary to save in json file. """ - mode = 'w' + mode = "w" if sys.version_info[0] < 3: - mode = 'wb' + mode = "wb" with open(filename, mode) as fp: json.dump(data, fp, sort_keys=True, indent=4) @@ -690,32 +701,50 @@ def load_json(filename): """ - with open(filename, 'r') as fp: + with open(filename, "r") as fp: data = json.load(fp) return data def loadcrash(infile, *args): - if infile.endswith('pkl') or infile.endswith('pklz'): + if infile.endswith("pkl") or infile.endswith("pklz"): return loadpkl(infile) else: - raise ValueError('Only pickled crashfiles are supported') + raise ValueError("Only pickled crashfiles are supported") def loadpkl(infile): """Load a zipped or plain cPickled file.""" infile = Path(infile) - fmlogger.debug('Loading pkl: %s', infile) - pklopen = gzip.open if infile.suffix == '.pklz' else open + fmlogger.debug("Loading pkl: %s", infile) + pklopen = gzip.open if infile.suffix == ".pklz" else open - with SoftFileLock('%s.lock' % infile): - with pklopen(str(infile), 'rb') as pkl_file: - pkl_contents = pkl_file.read() + t = time() + timeout = float(config["execution"]["job_finished_timeout"]) + timed_out = True + while (time() - t) < timeout: + try: + glob(str(infile)).pop() + timed_out = False + break + except Exception as e: + fmlogger.debug(e) + sleep(2) + if timed_out: + error_message = ( + "Ressult file {0} expected, but " + "does not exist after ({1}) " + "seconds.".format(infile, timeout) + ) + raise IOError(error_message) + + with pklopen(str(infile), "rb") as pkl_file: + pkl_contents = pkl_file.read() pkl_metadata = None # Look if pkl file contains version metadata - idx = pkl_contents.find(b'\n') + idx = pkl_contents.find(b"\n") if idx >= 0: try: pkl_metadata = json.loads(pkl_contents[:idx]) @@ -724,7 +753,7 @@ def loadpkl(infile): pass else: # On success, skip JSON metadata - pkl_contents = pkl_contents[idx + 1:] + pkl_contents = pkl_contents[idx + 1 :] # Pickle files may contain relative paths that must be resolved relative # to the working directory, so use indirectory while attempting to load @@ -735,38 +764,45 @@ def loadpkl(infile): except UnicodeDecodeError: # Was this pickle created with Python 2.x? with indirectory(infile.parent): - unpkl = pickle.loads(pkl_contents, fix_imports=True, encoding='utf-8') - fmlogger.info('Successfully loaded pkl in compatibility mode.') + unpkl = pickle.loads(pkl_contents, fix_imports=True, encoding="utf-8") + fmlogger.info("Successfully loaded pkl in compatibility mode.") # Unpickling problems except Exception as e: - if pkl_metadata and 'version' in pkl_metadata: + if pkl_metadata and "version" in pkl_metadata: from nipype import __version__ as version - if pkl_metadata['version'] != version: - fmlogger.error("""\ + + if pkl_metadata["version"] != version: + fmlogger.error( + """\ Attempted to open a results file generated by Nipype version %s, \ -with an incompatible Nipype version (%s)""", pkl_metadata['version'], version) +with an incompatible Nipype version (%s)""", + pkl_metadata["version"], + version, + ) raise e - fmlogger.warning("""\ + fmlogger.warning( + """\ No metadata was found in the pkl file. Make sure you are currently using \ -the same Nipype version from the generated pkl.""") +the same Nipype version from the generated pkl.""" + ) raise e if unpkl is None: - raise ValueError('Loading %s resulted in None.' % infile) + raise ValueError("Loading %s resulted in None." % infile) return unpkl def crash2txt(filename, record): """ Write out plain text crash file """ - with open(filename, 'w') as fp: - if 'node' in record: - node = record['node'] - fp.write('Node: {}\n'.format(node.fullname)) - fp.write('Working directory: {}\n'.format(node.output_dir())) - fp.write('\n') - fp.write('Node inputs:\n{}\n'.format(node.inputs)) - fp.write(''.join(record['traceback'])) + with open(filename, "w") as fp: + if "node" in record: + node = record["node"] + fp.write("Node: {}\n".format(node.fullname)) + fp.write("Working directory: {}\n".format(node.output_dir())) + fp.write("\n") + fp.write("Node inputs:\n{}\n".format(node.inputs)) + fp.write("".join(record["traceback"])) def read_stream(stream, logger=None, encoding=None): @@ -779,25 +815,26 @@ def read_stream(stream, logger=None, encoding=None): """ - default_encoding = encoding or locale.getdefaultlocale()[1] or 'UTF-8' + default_encoding = encoding or locale.getdefaultlocale()[1] or "UTF-8" logger = logger or fmlogger try: out = stream.decode(default_encoding) except UnicodeDecodeError as err: - out = stream.decode(default_encoding, errors='replace') - logger.warning('Error decoding string: %s', err) + out = stream.decode(default_encoding, errors="replace") + logger.warning("Error decoding string: %s", err) return out.splitlines() def savepkl(filename, record, versioning=False, sync=False): - with open(filename, 'wb') as fobj: - pkl_file = gzip.GzipFile(fileobj=fobj) if filename.endswith('.pklz') else fobj + with open(filename, "wb") as fobj: + pkl_file = gzip.GzipFile(fileobj=fobj) if filename.endswith(".pklz") else fobj if versioning: from nipype import __version__ as version - metadata = json.dumps({'version': version}) - pkl_file.write(metadata.encode('utf-8')) - pkl_file.write('\n'.encode('utf-8')) + metadata = json.dumps({"version": version}) + + pkl_file.write(metadata.encode("utf-8")) + pkl_file.write("\n".encode("utf-8")) pickle.dump(record, pkl_file) if sync: @@ -805,26 +842,25 @@ def savepkl(filename, record, versioning=False, sync=False): os.fsync(fobj.fileno()) -rst_levels = ['=', '-', '~', '+'] +rst_levels = ["=", "-", "~", "+"] def write_rst_header(header, level=0): - return '\n'.join( - (header, ''.join([rst_levels[level] for _ in header]))) + '\n\n' + return "\n".join((header, "".join([rst_levels[level] for _ in header]))) + "\n\n" -def write_rst_list(items, prefix=''): +def write_rst_list(items, prefix=""): out = [] for item in items: - out.append('{} {}'.format(prefix, str(item))) - return '\n'.join(out) + '\n\n' + out.append("{} {}".format(prefix, str(item))) + return "\n".join(out) + "\n\n" -def write_rst_dict(info, prefix=''): +def write_rst_dict(info, prefix=""): out = [] for key, value in sorted(info.items()): - out.append('{}* {} : {}'.format(prefix, key, str(value))) - return '\n'.join(out) + '\n\n' + out.append("{}* {} : {}".format(prefix, key, str(value))) + return "\n".join(out) + "\n\n" def dist_is_editable(dist): @@ -838,7 +874,7 @@ def dist_is_editable(dist): # Borrowed from `pip`'s' API """ for path_item in sys.path: - egg_link = op.join(path_item, dist + '.egg-link') + egg_link = op.join(path_item, dist + ".egg-link") if op.isfile(egg_link): return True return False @@ -865,7 +901,7 @@ def makedirs(path, mode=0o777, exist_ok=False): except OSError: fmlogger.debug("Problem creating directory %s", path) if not op.exists(path): - raise OSError('Could not create directory %s' % path) + raise OSError("Could not create directory %s" % path) return path @@ -893,11 +929,12 @@ def emptydirs(path, noexist_ok=False): elcont = os.listdir(path) if ex.errno == errno.ENOTEMPTY and not elcont: fmlogger.warning( - 'An exception was raised trying to remove old %s, but the path' - ' seems empty. Is it an NFS mount?. Passing the exception.', - path) + "An exception was raised trying to remove old %s, but the path" + " seems empty. Is it an NFS mount?. Passing the exception.", + path, + ) elif ex.errno == errno.ENOTEMPTY and elcont: - fmlogger.debug('Folder %s contents (%d items).', path, len(elcont)) + fmlogger.debug("Folder %s contents (%d items).", path, len(elcont)) raise ex else: raise ex @@ -937,11 +974,11 @@ def which(cmd, env=None, pathext=None): """ if pathext is None: - pathext = os.getenv('PATHEXT', '').split(os.pathsep) - pathext.insert(0, '') + pathext = os.getenv("PATHEXT", "").split(os.pathsep) + pathext.insert(0, "") path = os.getenv("PATH", os.defpath) - if env and 'PATH' in env: + if env and "PATH" in env: path = env.get("PATH") if sys.version_info >= (3, 3): @@ -975,27 +1012,25 @@ def get_dependencies(name, environ): """ command = None - if sys.platform == 'darwin': - command = 'otool -L `which %s`' % name - elif 'linux' in sys.platform: - command = 'ldd `which %s`' % name + if sys.platform == "darwin": + command = "otool -L `which %s`" % name + elif "linux" in sys.platform: + command = "ldd `which %s`" % name else: - return 'Platform %s not supported' % sys.platform + return "Platform %s not supported" % sys.platform deps = None try: proc = sp.Popen( - command, - stdout=sp.PIPE, - stderr=sp.PIPE, - shell=True, - env=environ) + command, stdout=sp.PIPE, stderr=sp.PIPE, shell=True, env=environ + ) o, e = proc.communicate() deps = o.rstrip() except Exception as ex: deps = '"%s" failed' % command - fmlogger.warning('Could not get dependencies of %s. Error:\n%s', - name, ex.message) + fmlogger.warning( + "Could not get dependencies of %s. Error:\n%s", name, ex.message + ) return deps @@ -1015,7 +1050,7 @@ def canonicalize_env(env): Windows: environment dictionary with bytes keys and values Other: untouched input ``env`` """ - if os.name != 'nt': + if os.name != "nt": return env # convert unicode to string for python 2 @@ -1024,9 +1059,9 @@ def canonicalize_env(env): out_env = {} for key, val in env.items(): if not isinstance(key, bytes): - key = key.encode('utf-8') + key = key.encode("utf-8") if not isinstance(val, bytes): - val = val.encode('utf-8') + val = val.encode("utf-8") if not PY3: key = bytes_to_native_str(key) val = bytes_to_native_str(val) @@ -1052,11 +1087,13 @@ def relpath(path, start=None): unc_path, rest = op.splitunc(path) unc_start, rest = op.splitunc(start) if bool(unc_path) ^ bool(unc_start): - raise ValueError(("Cannot mix UNC and non-UNC paths " - "(%s and %s)") % (path, start)) + raise ValueError( + ("Cannot mix UNC and non-UNC paths " "(%s and %s)") % (path, start) + ) else: - raise ValueError("path is on drive %s, start on drive %s" % - (path_list[0], start_list[0])) + raise ValueError( + "path is on drive %s, start on drive %s" % (path_list[0], start_list[0]) + ) # Work out how much of the filepath is shared by start and path. for i in range(min(len(start_list), len(path_list))): if start_list[i].lower() != path_list[i].lower(): From ead903262f9a5aa772039500b37ca33425831b13 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Tue, 5 Nov 2019 20:42:38 -0500 Subject: [PATCH 0517/1665] fix: config call and gzipped write --- nipype/utils/filemanip.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 21ba4cf5b8..40670a4f24 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -720,7 +720,7 @@ def loadpkl(infile): pklopen = gzip.open if infile.suffix == ".pklz" else open t = time() - timeout = float(config["execution"]["job_finished_timeout"]) + timeout = float(config.get("execution", "job_finished_timeout")) timed_out = True while (time() - t) < timeout: try: @@ -826,8 +826,8 @@ def read_stream(stream, logger=None, encoding=None): def savepkl(filename, record, versioning=False, sync=False): - with open(filename, "wb") as fobj: - pkl_file = gzip.GzipFile(fileobj=fobj) if filename.endswith(".pklz") else fobj + pkl_open = gzip.open if filename.endswith(".pklz") else open + with pkl_open(filename, "wb") as pkl_file: if versioning: from nipype import __version__ as version @@ -837,9 +837,10 @@ def savepkl(filename, record, versioning=False, sync=False): pkl_file.write("\n".encode("utf-8")) pickle.dump(record, pkl_file) + fmlogger.info("Finished saving: {}".format(filename)) if sync: - fobj.flush() - os.fsync(fobj.fileno()) + pkl_file.flush() + os.fsync(pkl_file.fileno()) rst_levels = ["=", "-", "~", "+"] From 78f03e9c64ea1b92b040074b247776da07390887 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Wed, 6 Nov 2019 11:28:02 -0500 Subject: [PATCH 0518/1665] Update nipype/utils/filemanip.py Co-Authored-By: Chris Markiewicz --- nipype/utils/filemanip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 40670a4f24..3c31f68a0d 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -732,7 +732,7 @@ def loadpkl(infile): sleep(2) if timed_out: error_message = ( - "Ressult file {0} expected, but " + "Result file {0} expected, but " "does not exist after ({1}) " "seconds.".format(infile, timeout) ) From c956c8d81c63974c69db6d6b76963c343dee41df Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Wed, 6 Nov 2019 11:36:52 -0500 Subject: [PATCH 0519/1665] fix: replace glob with file exists check --- nipype/utils/filemanip.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 3c31f68a0d..655ee679cc 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -723,12 +723,10 @@ def loadpkl(infile): timeout = float(config.get("execution", "job_finished_timeout")) timed_out = True while (time() - t) < timeout: - try: - glob(str(infile)).pop() + if infile.exists(): timed_out = False break - except Exception as e: - fmlogger.debug(e) + fmlogger.debug("'{}' missing; waiting 2s".format(infile)) sleep(2) if timed_out: error_message = ( From e572991dbd26cceb624d01b71cb7801fde16b129 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Wed, 6 Nov 2019 23:02:47 -0500 Subject: [PATCH 0520/1665] enh: refactor saving based on feedback --- nipype/pipeline/engine/utils.py | 763 ++++++++++++++++++-------------- nipype/utils/filemanip.py | 29 +- 2 files changed, 438 insertions(+), 354 deletions(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 218c30850b..5005eaa332 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -2,8 +2,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Utility routines for workflow graphs""" -from __future__ import (print_function, division, unicode_literals, - absolute_import) +from __future__ import print_function, division, unicode_literals, absolute_import from builtins import str, open, next, zip, range import os @@ -44,7 +43,12 @@ from ...utils.misc import str2bool from ...utils.functions import create_function_from_source from ...interfaces.base.traits_extension import ( - rebase_path_traits, resolve_path_traits, OutputMultiPath, isdefined, Undefined) + rebase_path_traits, + resolve_path_traits, + OutputMultiPath, + isdefined, + Undefined, +) from ...interfaces.base.support import Bunch, InterfaceResult from ...interfaces.base import CommandLine from ...interfaces.utility import IdentityInterface @@ -56,7 +60,7 @@ from funcsigs import signature standard_library.install_aliases() -logger = logging.getLogger('nipype.workflow') +logger = logging.getLogger("nipype.workflow") PY3 = sys.version_info[0] > 2 @@ -82,13 +86,12 @@ def save_hashfile(hashfile, hashed_inputs): # XXX - SG current workaround is to just # create the hashed file and not put anything # in it - with open(hashfile, 'wt') as fd: + with open(hashfile, "wt") as fd: fd.writelines(str(hashed_inputs)) - logger.debug('Unable to write a particular type to the json file') + logger.debug("Unable to write a particular type to the json file") else: - logger.critical('Unable to open the file in write mode: %s', - hashfile) + logger.critical("Unable to open the file in write mode: %s", hashfile) def nodelist_runner(nodes, updatehash=False, stop_first=False): @@ -108,49 +111,47 @@ def nodelist_runner(nodes, updatehash=False, stop_first=False): result = node.result err = [] - if result.runtime and hasattr(result.runtime, 'traceback'): + if result.runtime and hasattr(result.runtime, "traceback"): err = [result.runtime.traceback] err += format_exception(*sys.exc_info()) - err = '\n'.join(err) + err = "\n".join(err) finally: yield i, result, err def write_node_report(node, result=None, is_mapnode=False): """Write a report file for a node.""" - if not str2bool(node.config['execution']['create_report']): + if not str2bool(node.config["execution"]["create_report"]): return cwd = node.output_dir() - report_file = Path(cwd) / '_report' / 'report.rst' + report_file = Path(cwd) / "_report" / "report.rst" path_mkdir(report_file.parent, exist_ok=True, parents=True) lines = [ - write_rst_header('Node: %s' % get_print_name(node), level=0), - write_rst_list( - ['Hierarchy : %s' % node.fullname, - 'Exec ID : %s' % node._id]), - write_rst_header('Original Inputs', level=1), + write_rst_header("Node: %s" % get_print_name(node), level=0), + write_rst_list(["Hierarchy : %s" % node.fullname, "Exec ID : %s" % node._id]), + write_rst_header("Original Inputs", level=1), write_rst_dict(node.inputs.trait_get()), ] if result is None: logger.debug('[Node] Writing pre-exec report to "%s"', report_file) - report_file.write_text('\n'.join(lines)) + report_file.write_text("\n".join(lines)) return logger.debug('[Node] Writing post-exec report to "%s"', report_file) lines += [ - write_rst_header('Execution Inputs', level=1), + write_rst_header("Execution Inputs", level=1), write_rst_dict(node.inputs.trait_get()), - write_rst_header('Execution Outputs', level=1) + write_rst_header("Execution Outputs", level=1), ] outputs = result.outputs if outputs is None: - lines += ['None'] - report_file.write_text('\n'.join(lines)) + lines += ["None"] + report_file.write_text("\n".join(lines)) return if isinstance(outputs, Bunch): @@ -158,95 +159,103 @@ def write_node_report(node, result=None, is_mapnode=False): elif outputs: lines.append(write_rst_dict(outputs.trait_get())) else: - lines += ['Outputs object was empty.'] + lines += ["Outputs object was empty."] if is_mapnode: - lines.append(write_rst_header('Subnode reports', level=1)) + lines.append(write_rst_header("Subnode reports", level=1)) nitems = len(ensure_list(getattr(node.inputs, node.iterfield[0]))) subnode_report_files = [] for i in range(nitems): - subnode_file = Path(cwd) / 'mapflow' / ( - '_%s%d' % (node.name, i)) / '_report' / 'report.rst' - subnode_report_files.append('subnode %d : %s' % (i, subnode_file)) + subnode_file = ( + Path(cwd) + / "mapflow" + / ("_%s%d" % (node.name, i)) + / "_report" + / "report.rst" + ) + subnode_report_files.append("subnode %d : %s" % (i, subnode_file)) lines.append(write_rst_list(subnode_report_files)) - report_file.write_text('\n'.join(lines)) + report_file.write_text("\n".join(lines)) return - lines.append(write_rst_header('Runtime info', level=1)) + lines.append(write_rst_header("Runtime info", level=1)) # Init rst dictionary of runtime stats rst_dict = { - 'hostname': result.runtime.hostname, - 'duration': result.runtime.duration, - 'working_dir': result.runtime.cwd, - 'prev_wd': getattr(result.runtime, 'prevcwd', ''), + "hostname": result.runtime.hostname, + "duration": result.runtime.duration, + "working_dir": result.runtime.cwd, + "prev_wd": getattr(result.runtime, "prevcwd", ""), } - for prop in ('cmdline', 'mem_peak_gb', 'cpu_percent'): + for prop in ("cmdline", "mem_peak_gb", "cpu_percent"): if hasattr(result.runtime, prop): rst_dict[prop] = getattr(result.runtime, prop) lines.append(write_rst_dict(rst_dict)) # Collect terminal output - if hasattr(result.runtime, 'merged'): + if hasattr(result.runtime, "merged"): lines += [ - write_rst_header('Terminal output', level=2), + write_rst_header("Terminal output", level=2), write_rst_list(result.runtime.merged), ] - if hasattr(result.runtime, 'stdout'): + if hasattr(result.runtime, "stdout"): lines += [ - write_rst_header('Terminal - standard output', level=2), + write_rst_header("Terminal - standard output", level=2), write_rst_list(result.runtime.stdout), ] - if hasattr(result.runtime, 'stderr'): + if hasattr(result.runtime, "stderr"): lines += [ - write_rst_header('Terminal - standard error', level=2), + write_rst_header("Terminal - standard error", level=2), write_rst_list(result.runtime.stderr), ] # Store environment - if hasattr(result.runtime, 'environ'): + if hasattr(result.runtime, "environ"): lines += [ - write_rst_header('Environment', level=2), + write_rst_header("Environment", level=2), write_rst_dict(result.runtime.environ), ] - report_file.write_text('\n'.join(lines)) + report_file.write_text("\n".join(lines)) def write_report(node, report_type=None, is_mapnode=False): """Write a report file for a node - DEPRECATED""" - if report_type not in ('preexec', 'postexec'): + if report_type not in ("preexec", "postexec"): logger.warning('[Node] Unknown report type "%s".', report_type) return - write_node_report(node, is_mapnode=is_mapnode, - result=node.result if report_type == 'postexec' else None) + write_node_report( + node, + is_mapnode=is_mapnode, + result=node.result if report_type == "postexec" else None, + ) def save_resultfile(result, cwd, name, rebase=None): """Save a result pklz file to ``cwd``.""" if rebase is None: - rebase = config.getboolean('execution', 'use_relative_paths') + rebase = config.getboolean("execution", "use_relative_paths") cwd = os.path.abspath(cwd) - resultsfile = os.path.join(cwd, 'result_%s.pklz' % name) + resultsfile = os.path.join(cwd, "result_%s.pklz" % name) logger.debug("Saving results file: '%s'", resultsfile) if result.outputs is None: - logger.warning('Storing result file without outputs') - savepkl(resultsfile, result, sync=True) + logger.warning("Storing result file without outputs") + savepkl(resultsfile, result) return try: output_names = result.outputs.copyable_trait_names() except AttributeError: - logger.debug('Storing non-traited results, skipping rebase of paths') - savepkl(resultsfile, result, sync=True) + logger.debug("Storing non-traited results, skipping rebase of paths") + savepkl(resultsfile, result) return if not rebase: - savepkl(resultsfile, result, sync=True) + savepkl(resultsfile, result) return backup_traits = {} @@ -258,11 +267,12 @@ def save_resultfile(result, cwd, name, rebase=None): if isdefined(old): if result.outputs.trait(key).is_trait_type(OutputMultiPath): old = result.outputs.trait(key).handler.get_value( - result.outputs, key) + result.outputs, key + ) backup_traits[key] = old val = rebase_path_traits(result.outputs.trait(key), old, cwd) setattr(result.outputs, key, val) - savepkl(resultsfile, result, sync=True) + savepkl(resultsfile, result) finally: # Restore resolved paths from the outputs dict no matter what for key, val in list(backup_traits.items()): @@ -298,17 +308,19 @@ def load_resultfile(results_file, resolve=True): try: outputs = result.outputs.get() except TypeError: # This is a Bunch - logger.debug('Outputs object of loaded result %s is a Bunch.', results_file) + logger.debug("Outputs object of loaded result %s is a Bunch.", results_file) return result - logger.debug('Resolving paths in outputs loaded from results file.') + logger.debug("Resolving paths in outputs loaded from results file.") for trait_name, old in list(outputs.items()): if isdefined(old): if result.outputs.trait(trait_name).is_trait_type(OutputMultiPath): old = result.outputs.trait(trait_name).handler.get_value( - result.outputs, trait_name) - value = resolve_path_traits(result.outputs.trait(trait_name), old, - results_file.parent) + result.outputs, trait_name + ) + value = resolve_path_traits( + result.outputs.trait(trait_name), old, results_file.parent + ) setattr(result.outputs, trait_name, value) return result @@ -320,13 +332,13 @@ def strip_temp(files, wd): if isinstance(f, list): out.append(strip_temp(f, wd)) else: - out.append(f.replace(os.path.join(wd, '_tempinput'), wd)) + out.append(f.replace(os.path.join(wd, "_tempinput"), wd)) return out def _write_inputs(node): lines = [] - nodename = node.fullname.replace('.', '_') + nodename = node.fullname.replace(".", "_") for key, _ in list(node.inputs.items()): val = getattr(node.inputs, key) if isdefined(val): @@ -337,65 +349,72 @@ def _write_inputs(node): lines.append("%s.inputs.%s = '%s'" % (nodename, key, val)) else: funcname = [ - name for name in func.__globals__ - if name != '__builtins__' + name for name in func.__globals__ if name != "__builtins__" ][0] lines.append(pickle.loads(val)) if funcname == nodename: - lines[-1] = lines[-1].replace(' %s(' % funcname, - ' %s_1(' % funcname) - funcname = '%s_1' % funcname + lines[-1] = lines[-1].replace( + " %s(" % funcname, " %s_1(" % funcname + ) + funcname = "%s_1" % funcname + lines.append("from nipype.utils.functions import getsource") lines.append( - 'from nipype.utils.functions import getsource') - lines.append("%s.inputs.%s = getsource(%s)" % - (nodename, key, funcname)) + "%s.inputs.%s = getsource(%s)" % (nodename, key, funcname) + ) else: - lines.append('%s.inputs.%s = %s' % (nodename, key, val)) + lines.append("%s.inputs.%s = %s" % (nodename, key, val)) return lines -def format_node(node, format='python', include_config=False): +def format_node(node, format="python", include_config=False): """Format a node in a given output syntax.""" from .nodes import MapNode + lines = [] - name = node.fullname.replace('.', '_') - if format == 'python': + name = node.fullname.replace(".", "_") + if format == "python": klass = node.interface - importline = 'from %s import %s' % (klass.__module__, - klass.__class__.__name__) - comment = '# Node: %s' % node.fullname + importline = "from %s import %s" % (klass.__module__, klass.__class__.__name__) + comment = "# Node: %s" % node.fullname spec = signature(node.interface.__init__) args = [p.name for p in list(spec.parameters.values())] args = args[1:] if args: filled_args = [] for arg in args: - if hasattr(node.interface, '_%s' % arg): - filled_args.append('%s=%s' % - (arg, - getattr(node.interface, '_%s' % arg))) - args = ', '.join(filled_args) + if hasattr(node.interface, "_%s" % arg): + filled_args.append( + "%s=%s" % (arg, getattr(node.interface, "_%s" % arg)) + ) + args = ", ".join(filled_args) else: - args = '' + args = "" klass_name = klass.__class__.__name__ if isinstance(node, MapNode): - nodedef = '%s = MapNode(%s(%s), iterfield=%s, name="%s")' \ - % (name, klass_name, args, node.iterfield, name) + nodedef = '%s = MapNode(%s(%s), iterfield=%s, name="%s")' % ( + name, + klass_name, + args, + node.iterfield, + name, + ) else: - nodedef = '%s = Node(%s(%s), name="%s")' \ - % (name, klass_name, args, name) + nodedef = '%s = Node(%s(%s), name="%s")' % (name, klass_name, args, name) lines = [importline, comment, nodedef] if include_config: lines = [ - importline, "from future import standard_library", + importline, + "from future import standard_library", "standard_library.install_aliases()", - "from collections import OrderedDict", comment, nodedef + "from collections import OrderedDict", + comment, + nodedef, ] - lines.append('%s.config = %s' % (name, node.config)) + lines.append("%s.config = %s" % (name, node.config)) if node.iterables is not None: - lines.append('%s.iterables = %s' % (name, node.iterables)) + lines.append("%s.iterables = %s" % (name, node.iterables)) lines.extend(_write_inputs(node)) return lines @@ -420,28 +439,26 @@ def modify_paths(object, relative=True, basedir=None): out = {} for key, val in sorted(object.items()): if isdefined(val): - out[key] = modify_paths( - val, relative=relative, basedir=basedir) + out[key] = modify_paths(val, relative=relative, basedir=basedir) elif isinstance(object, (list, tuple)): out = [] for val in object: if isdefined(val): - out.append( - modify_paths(val, relative=relative, basedir=basedir)) + out.append(modify_paths(val, relative=relative, basedir=basedir)) if isinstance(object, tuple): out = tuple(out) else: if isdefined(object): if isinstance(object, (str, bytes)) and os.path.isfile(object): if relative: - if config.getboolean('execution', 'use_relative_paths'): + if config.getboolean("execution", "use_relative_paths"): out = relpath(object, start=basedir) else: out = object else: out = os.path.abspath(os.path.join(basedir, object)) if not os.path.exists(out): - raise IOError('File %s not found' % out) + raise IOError("File %s not found" % out) else: out = object else: @@ -457,20 +474,20 @@ def get_print_name(node, simple_form=True): """ name = node.fullname - if hasattr(node, '_interface'): - pkglist = node.interface.__class__.__module__.split('.') + if hasattr(node, "_interface"): + pkglist = node.interface.__class__.__module__.split(".") interface = node.interface.__class__.__name__ - destclass = '' + destclass = "" if len(pkglist) > 2: - destclass = '.%s' % pkglist[2] + destclass = ".%s" % pkglist[2] if simple_form: name = node.fullname + destclass else: - name = '.'.join([node.fullname, interface]) + destclass + name = ".".join([node.fullname, interface]) + destclass if simple_form: - parts = name.split('.') + parts = name.split(".") if len(parts) > 2: - return ' ('.join(parts[1:]) + ')' + return " (".join(parts[1:]) + ")" elif len(parts) == 2: return parts[1] return name @@ -481,15 +498,16 @@ def _create_dot_graph(graph, show_connectinfo=False, simple_form=True): Ensures that edge info is pickleable. """ - logger.debug('creating dot graph') + logger.debug("creating dot graph") import networkx as nx + pklgraph = nx.DiGraph() for edge in graph.edges(): data = graph.get_edge_data(*edge) srcname = get_print_name(edge[0], simple_form=simple_form) destname = get_print_name(edge[1], simple_form=simple_form) if show_connectinfo: - pklgraph.add_edge(srcname, destname, l=str(data['connect'])) + pklgraph.add_edge(srcname, destname, l=str(data["connect"])) else: pklgraph.add_edge(srcname, destname) return pklgraph @@ -510,67 +528,86 @@ def _write_detailed_dot(graph, dotfilename): } """ import networkx as nx - text = ['digraph structs {', 'node [shape=record];'] + + text = ["digraph structs {", "node [shape=record];"] # write nodes edges = [] for n in nx.topological_sort(graph): nodename = n.itername inports = [] for u, v, d in graph.in_edges(nbunch=n, data=True): - for cd in d['connect']: + for cd in d["connect"]: if isinstance(cd[0], (str, bytes)): outport = cd[0] else: outport = cd[0][0] inport = cd[1] - ipstrip = 'in%s' % _replacefunk(inport) - opstrip = 'out%s' % _replacefunk(outport) + ipstrip = "in%s" % _replacefunk(inport) + opstrip = "out%s" % _replacefunk(outport) edges.append( - '%s:%s:e -> %s:%s:w;' % (u.itername.replace('.', ''), opstrip, - v.itername.replace('.', ''), ipstrip)) + "%s:%s:e -> %s:%s:w;" + % ( + u.itername.replace(".", ""), + opstrip, + v.itername.replace(".", ""), + ipstrip, + ) + ) if inport not in inports: inports.append(inport) - inputstr = ['{IN'] + [ - '| %s' % (_replacefunk(ip), ip) for ip in sorted(inports) - ] + ['}'] + inputstr = ( + ["{IN"] + + ["| %s" % (_replacefunk(ip), ip) for ip in sorted(inports)] + + ["}"] + ) outports = [] for u, v, d in graph.out_edges(nbunch=n, data=True): - for cd in d['connect']: + for cd in d["connect"]: if isinstance(cd[0], (str, bytes)): outport = cd[0] else: outport = cd[0][0] if outport not in outports: outports.append(outport) - outputstr = ['{OUT'] + [ - '| %s' % (_replacefunk(oport), oport) - for oport in sorted(outports) - ] + ['}'] - srcpackage = '' - if hasattr(n, '_interface'): - pkglist = n.interface.__class__.__module__.split('.') + outputstr = ( + ["{OUT"] + + [ + "| %s" % (_replacefunk(oport), oport) + for oport in sorted(outports) + ] + + ["}"] + ) + srcpackage = "" + if hasattr(n, "_interface"): + pkglist = n.interface.__class__.__module__.split(".") if len(pkglist) > 2: srcpackage = pkglist[2] - srchierarchy = '.'.join(nodename.split('.')[1:-1]) - nodenamestr = '{ %s | %s | %s }' % (nodename.split('.')[-1], - srcpackage, srchierarchy) + srchierarchy = ".".join(nodename.split(".")[1:-1]) + nodenamestr = "{ %s | %s | %s }" % ( + nodename.split(".")[-1], + srcpackage, + srchierarchy, + ) text += [ - '%s [label="%s|%s|%s"];' % - (nodename.replace('.', ''), ''.join(inputstr), nodenamestr, - ''.join(outputstr)) + '%s [label="%s|%s|%s"];' + % ( + nodename.replace(".", ""), + "".join(inputstr), + nodenamestr, + "".join(outputstr), + ) ] # write edges for edge in sorted(edges): text.append(edge) - text.append('}') - with open(dotfilename, 'wt') as filep: - filep.write('\n'.join(text)) + text.append("}") + with open(dotfilename, "wt") as filep: + filep.write("\n".join(text)) return text def _replacefunk(x): - return x.replace('_', '').replace('.', '').replace('@', '').replace( - '-', '') + return x.replace("_", "").replace(".", "").replace("@", "").replace("-", "") # Graph manipulations for iterable expansion @@ -582,9 +619,9 @@ def _get_valid_pathstr(pathstr): """ if not isinstance(pathstr, (str, bytes)): pathstr = to_str(pathstr) - pathstr = pathstr.replace(os.sep, '..') - pathstr = re.sub(r'''[][ (){}?:<>#!|"';]''', '', pathstr) - pathstr = pathstr.replace(',', '.') + pathstr = pathstr.replace(os.sep, "..") + pathstr = re.sub(r"""[][ (){}?:<>#!|"';]""", "", pathstr) + pathstr = pathstr.replace(",", ".") return pathstr @@ -657,8 +694,9 @@ def synchronize_iterables(iterables): True """ out_list = [] - iterable_items = [(field, iter(fvals())) - for field, fvals in sorted(iterables.items())] + iterable_items = [ + (field, iter(fvals())) for field, fvals in sorted(iterables.items()) + ] while True: cur_dict = {} for field, iter_values in iterable_items: @@ -679,17 +717,21 @@ def evaluate_connect_function(function_source, args, first_arg): try: output_value = func(first_arg, *list(args)) except NameError as e: - if e.args[0].startswith("global name") and \ - e.args[0].endswith("is not defined"): - e.args = (e.args[0], - ("Due to engine constraints all imports have to be done " - "inside each function definition")) + if e.args[0].startswith("global name") and e.args[0].endswith("is not defined"): + e.args = ( + e.args[0], + ( + "Due to engine constraints all imports have to be done " + "inside each function definition" + ), + ) raise e return output_value def get_levels(G): import networkx as nx + levels = {} for n in nx.topological_sort(G): levels[n] = 0 @@ -698,13 +740,9 @@ def get_levels(G): return levels -def _merge_graphs(supergraph, - nodes, - subgraph, - nodeid, - iterables, - prefix, - synchronize=False): +def _merge_graphs( + supergraph, nodes, subgraph, nodeid, iterables, prefix, synchronize=False +): """Merges two graphs that share a subset of nodes. If the subgraph needs to be replicated for multiple iterables, the @@ -740,8 +778,12 @@ def _merge_graphs(supergraph, # This should trap the problem of miswiring when multiple iterables are # used at the same level. The use of the template below for naming # updates to nodes is the general solution. - raise Exception(("Execution graph does not have a unique set of node " - "names. Please rerun the workflow")) + raise Exception( + ( + "Execution graph does not have a unique set of node " + "names. Please rerun the workflow" + ) + ) edgeinfo = {} for n in list(subgraph.nodes()): nidx = ids.index(n._hierarchy + n._id) @@ -751,7 +793,8 @@ def _merge_graphs(supergraph, if n._hierarchy + n._id not in list(edgeinfo.keys()): edgeinfo[n._hierarchy + n._id] = [] edgeinfo[n._hierarchy + n._id].append( - (edge[0], supergraph.get_edge_data(*edge))) + (edge[0], supergraph.get_edge_data(*edge)) + ) supergraph.remove_nodes_from(nodes) # Add copies of the subgraph depending on the number of iterables iterable_params = expand_iterables(iterables, synchronize) @@ -760,20 +803,21 @@ def _merge_graphs(supergraph, return supergraph # Make an iterable subgraph node id template count = len(iterable_params) - template = '.%s%%0%dd' % (prefix, np.ceil(np.log10(count))) + template = ".%s%%0%dd" % (prefix, np.ceil(np.log10(count))) # Copy the iterable subgraphs for i, params in enumerate(iterable_params): Gc = deepcopy(subgraph) ids = [n._hierarchy + n._id for n in Gc.nodes()] nodeidx = ids.index(nodeid) rootnode = list(Gc.nodes())[nodeidx] - paramstr = '' + paramstr = "" for key, val in sorted(params.items()): - paramstr = '{}_{}_{}'.format(paramstr, _get_valid_pathstr(key), - _get_valid_pathstr(val)) + paramstr = "{}_{}_{}".format( + paramstr, _get_valid_pathstr(key), _get_valid_pathstr(val) + ) rootnode.set_input(key, val) - logger.debug('Parameterization: paramstr=%s', paramstr) + logger.debug("Parameterization: paramstr=%s", paramstr) levels = get_levels(Gc) for n in Gc.nodes(): # update parameterization of the node to reflect the location of @@ -806,10 +850,10 @@ def _connect_nodes(graph, srcnode, destnode, connection_info): """ data = graph.get_edge_data(srcnode, destnode, default=None) if not data: - data = {'connect': connection_info} + data = {"connect": connection_info} graph.add_edges_from([(srcnode, destnode, data)]) else: - data['connect'].extend(connection_info) + data["connect"].extend(connection_info) def _remove_nonjoin_identity_nodes(graph, keep_iterables=False): @@ -821,7 +865,7 @@ def _remove_nonjoin_identity_nodes(graph, keep_iterables=False): # if keep_iterables is False, then include the iterable # and join nodes in the nodes to delete for node in _identity_nodes(graph, not keep_iterables): - if not hasattr(node, 'joinsource'): + if not hasattr(node, "joinsource"): _remove_identity_node(graph, node) return graph @@ -834,10 +878,12 @@ def _identity_nodes(graph, include_iterables): to True. """ import networkx as nx + return [ - node for node in nx.topological_sort(graph) - if isinstance(node.interface, IdentityInterface) and ( - include_iterables or getattr(node, 'iterables') is None) + node + for node in nx.topological_sort(graph) + if isinstance(node.interface, IdentityInterface) + and (include_iterables or getattr(node, "iterables") is None) ] @@ -847,8 +893,7 @@ def _remove_identity_node(graph, node): portinputs, portoutputs = _node_ports(graph, node) for field, connections in list(portoutputs.items()): if portinputs: - _propagate_internal_output(graph, node, field, connections, - portinputs) + _propagate_internal_output(graph, node, field, connections, portinputs) else: _propagate_root_output(graph, node, field, connections) graph.remove_nodes_from([node]) @@ -868,10 +913,10 @@ def _node_ports(graph, node): portinputs = {} portoutputs = {} for u, _, d in graph.in_edges(node, data=True): - for src, dest in d['connect']: + for src, dest in d["connect"]: portinputs[dest] = (u, src) for _, v, d in graph.out_edges(node, data=True): - for src, dest in d['connect']: + for src, dest in d["connect"]: if isinstance(src, tuple): srcport = src[0] else: @@ -902,25 +947,22 @@ def _propagate_internal_output(graph, node, field, connections, portinputs): if isinstance(srcport, tuple) and isinstance(src, tuple): src_func = srcport[1].split("\\n")[0] dst_func = src[1].split("\\n")[0] - raise ValueError("Does not support two inline functions " - "in series ('{}' and '{}'), found when " - "connecting {} to {}. Please use a Function " - "node.".format(src_func, dst_func, srcnode, - destnode)) - - connect = graph.get_edge_data( - srcnode, destnode, default={ - 'connect': [] - }) + raise ValueError( + "Does not support two inline functions " + "in series ('{}' and '{}'), found when " + "connecting {} to {}. Please use a Function " + "node.".format(src_func, dst_func, srcnode, destnode) + ) + + connect = graph.get_edge_data(srcnode, destnode, default={"connect": []}) if isinstance(src, tuple): - connect['connect'].append(((srcport, src[1], src[2]), inport)) + connect["connect"].append(((srcport, src[1], src[2]), inport)) else: - connect = {'connect': [(srcport, inport)]} + connect = {"connect": [(srcport, inport)]} old_connect = graph.get_edge_data( - srcnode, destnode, default={ - 'connect': [] - }) - old_connect['connect'] += connect['connect'] + srcnode, destnode, default={"connect": []} + ) + old_connect["connect"] += connect["connect"] graph.add_edges_from([(srcnode, destnode, old_connect)]) else: value = getattr(node.inputs, field) @@ -938,6 +980,7 @@ def generate_expanded_graph(graph_in): parameterized as (a=1,b=3), (a=1,b=4), (a=2,b=3) and (a=2,b=4). """ import networkx as nx + try: dfs_preorder = nx.dfs_preorder except AttributeError: @@ -949,7 +992,7 @@ def generate_expanded_graph(graph_in): for node in graph_in.nodes(): if node.iterables: _standardize_iterables(node) - allprefixes = list('abcdefghijklmnopqrstuvwxyz') + allprefixes = list("abcdefghijklmnopqrstuvwxyz") # the iterable nodes inodes = _iterable_nodes(graph_in) @@ -962,8 +1005,10 @@ def generate_expanded_graph(graph_in): # the join successor nodes of the current iterable node jnodes = [ - node for node in graph_in.nodes() - if hasattr(node, 'joinsource') and inode.name == node.joinsource + node + for node in graph_in.nodes() + if hasattr(node, "joinsource") + and inode.name == node.joinsource and nx.has_path(graph_in, inode, node) ] @@ -980,8 +1025,7 @@ def generate_expanded_graph(graph_in): for src, dest in edges2remove: graph_in.remove_edge(src, dest) - logger.debug("Excised the %s -> %s join node in-edge.", src, - dest) + logger.debug("Excised the %s -> %s join node in-edge.", src, dest) if inode.itersource: # the itersource is a (node name, fields) tuple @@ -991,22 +1035,24 @@ def generate_expanded_graph(graph_in): src_fields = [src_fields] # find the unique iterable source node in the graph try: - iter_src = next((node for node in graph_in.nodes() - if node.name == src_name - and nx.has_path(graph_in, node, inode))) + iter_src = next( + ( + node + for node in graph_in.nodes() + if node.name == src_name and nx.has_path(graph_in, node, inode) + ) + ) except StopIteration: - raise ValueError("The node %s itersource %s was not found" - " among the iterable predecessor nodes" % - (inode, src_name)) - logger.debug("The node %s has iterable source node %s", inode, - iter_src) + raise ValueError( + "The node %s itersource %s was not found" + " among the iterable predecessor nodes" % (inode, src_name) + ) + logger.debug("The node %s has iterable source node %s", inode, iter_src) # look up the iterables for this particular itersource descendant # using the iterable source ancestor values as a key iterables = {} # the source node iterables values - src_values = [ - getattr(iter_src.inputs, field) for field in src_fields - ] + src_values = [getattr(iter_src.inputs, field) for field in src_fields] # if there is one source field, then the key is the the source value, # otherwise the key is the tuple of source values if len(src_values) == 1: @@ -1016,9 +1062,13 @@ def generate_expanded_graph(graph_in): # The itersource iterables is a {field: lookup} dictionary, where the # lookup is a {source key: iteration list} dictionary. Look up the # current iterable value using the predecessor itersource input values. - iter_dict = dict([(field, lookup[key]) - for field, lookup in inode.iterables - if key in lookup]) + iter_dict = dict( + [ + (field, lookup[key]) + for field, lookup in inode.iterables + if key in lookup + ] + ) # convert the iterables to the standard {field: function} format @@ -1026,37 +1076,43 @@ def make_field_func(*pair): return pair[0], lambda: pair[1] iterables = dict( - [make_field_func(*pair) for pair in list(iter_dict.items())]) + [make_field_func(*pair) for pair in list(iter_dict.items())] + ) else: iterables = inode.iterables.copy() inode.iterables = None - logger.debug('node: %s iterables: %s', inode, iterables) + logger.debug("node: %s iterables: %s", inode, iterables) # collect the subnodes to expand subnodes = [s for s in dfs_preorder(graph_in, inode)] - prior_prefix = [re.findall(r'\.(.)I', s._id) for s in subnodes if s._id] + prior_prefix = [re.findall(r"\.(.)I", s._id) for s in subnodes if s._id] prior_prefix = sorted([l for item in prior_prefix for l in item]) if not prior_prefix: - iterable_prefix = 'a' + iterable_prefix = "a" else: - if prior_prefix[-1] == 'z': - raise ValueError('Too many iterables in the workflow') - iterable_prefix =\ - allprefixes[allprefixes.index(prior_prefix[-1]) + 1] - logger.debug(('subnodes:', subnodes)) + if prior_prefix[-1] == "z": + raise ValueError("Too many iterables in the workflow") + iterable_prefix = allprefixes[allprefixes.index(prior_prefix[-1]) + 1] + logger.debug(("subnodes:", subnodes)) # append a suffix to the iterable node id - inode._id += '.%sI' % iterable_prefix + inode._id += ".%sI" % iterable_prefix # merge the iterated subgraphs # dj: the behaviour of .copy changes in version 2 - if LooseVersion(nx.__version__) < LooseVersion('2'): + if LooseVersion(nx.__version__) < LooseVersion("2"): subgraph = graph_in.subgraph(subnodes) else: subgraph = graph_in.subgraph(subnodes).copy() - graph_in = _merge_graphs(graph_in, subnodes, subgraph, - inode._hierarchy + inode._id, iterables, - iterable_prefix, inode.synchronize) + graph_in = _merge_graphs( + graph_in, + subnodes, + subgraph, + inode._hierarchy + inode._id, + iterables, + iterable_prefix, + inode.synchronize, + ) # reconnect the join nodes for jnode in jnodes: @@ -1069,7 +1125,7 @@ def make_field_func(*pair): for src_id in list(old_edge_dict.keys()): # Drop the original JoinNodes; only concerned with # generated Nodes - if hasattr(node, 'joinfield') and node.itername == src_id: + if hasattr(node, "joinfield") and node.itername == src_id: continue # Patterns: # - src_id : Non-iterable node @@ -1078,12 +1134,17 @@ def make_field_func(*pair): # - src_id.[a-z]I.[a-z]\d+ : # Non-IdentityInterface w/ iterables # - src_idJ\d+ : JoinNode(IdentityInterface) - if re.match(src_id + r'((\.[a-z](I\.[a-z])?|J)\d+)?$', - node.itername): + if re.match( + src_id + r"((\.[a-z](I\.[a-z])?|J)\d+)?$", node.itername + ): expansions[src_id].append(node) for in_id, in_nodes in list(expansions.items()): - logger.debug("The join node %s input %s was expanded" - " to %d nodes.", jnode, in_id, len(in_nodes)) + logger.debug( + "The join node %s input %s was expanded" " to %d nodes.", + jnode, + in_id, + len(in_nodes), + ) # preserve the node iteration order by sorting on the node id for in_nodes in list(expansions.values()): in_nodes.sort(key=lambda node: node._id) @@ -1092,9 +1153,7 @@ def make_field_func(*pair): iter_cnt = count_iterables(iterables, inode.synchronize) # make new join node fields to connect to each replicated # join in-edge source node. - slot_dicts = [ - jnode._add_join_item_fields() for _ in range(iter_cnt) - ] + slot_dicts = [jnode._add_join_item_fields() for _ in range(iter_cnt)] # for each join in-edge, connect every expanded source node # which matches on the in-edge source name to the destination # join node. Qualify each edge connect join field name by @@ -1110,11 +1169,10 @@ def make_field_func(*pair): olddata = old_edge_dict[old_id] newdata = deepcopy(olddata) # the (source, destination) field tuples - connects = newdata['connect'] + connects = newdata["connect"] # the join fields connected to the source join_fields = [ - field for _, field in connects - if field in jnode.joinfield + field for _, field in connects if field in jnode.joinfield ] # the {field: slot fields} maps assigned to the input # node, e.g. {'image': 'imageJ3', 'mask': 'maskJ3'} @@ -1129,10 +1187,18 @@ def make_field_func(*pair): connects[con_idx] = (src_field, slot_field) logger.debug( "Qualified the %s -> %s join field %s as %s.", - in_node, jnode, dest_field, slot_field) + in_node, + jnode, + dest_field, + slot_field, + ) graph_in.add_edge(in_node, jnode, **newdata) - logger.debug("Connected the join node %s subgraph to the" - " expanded join point %s", jnode, in_node) + logger.debug( + "Connected the join node %s subgraph to the" + " expanded join point %s", + jnode, + in_node, + ) # nx.write_dot(graph_in, '%s_post.dot' % node) # the remaining iterable nodes @@ -1172,6 +1238,7 @@ def _iterable_nodes(graph_in): Return the iterable nodes list """ import networkx as nx + nodes = nx.topological_sort(graph_in) inodes = [node for node in nodes if node.iterables is not None] inodes_no_src = [node for node in inodes if not node.itersource] @@ -1194,8 +1261,9 @@ def _standardize_iterables(node): if node.synchronize: if len(iterables) == 2: first, last = iterables - if all((isinstance(item, (str, bytes)) and item in fields - for item in first)): + if all( + (isinstance(item, (str, bytes)) and item in fields for item in first) + ): iterables = _transpose_iterables(first, last) # Convert a tuple to a list @@ -1212,9 +1280,7 @@ def _standardize_iterables(node): def make_field_func(*pair): return pair[0], lambda: pair[1] - iter_items = [ - make_field_func(*field_value1) for field_value1 in iterables - ] + iter_items = [make_field_func(*field_value1) for field_value1 in iterables] iterables = dict(iter_items) node.iterables = iterables @@ -1231,20 +1297,25 @@ def _validate_iterables(node, iterables, fields): if isinstance(iterables, dict): iterables = list(iterables.items()) elif not isinstance(iterables, tuple) and not isinstance(iterables, list): - raise ValueError("The %s iterables type is not a list or a dictionary:" - " %s" % (node.name, iterables.__class__)) + raise ValueError( + "The %s iterables type is not a list or a dictionary:" + " %s" % (node.name, iterables.__class__) + ) for item in iterables: try: if len(item) != 2: - raise ValueError("The %s iterables is not a [(field, values)]" - " list" % node.name) + raise ValueError( + "The %s iterables is not a [(field, values)]" " list" % node.name + ) except TypeError as e: - raise TypeError("A %s iterables member is not iterable: %s" % - (node.name, e)) + raise TypeError( + "A %s iterables member is not iterable: %s" % (node.name, e) + ) field, _ = item if field not in fields: - raise ValueError("The %s iterables field is unrecognized: %s" % - (node.name, field)) + raise ValueError( + "The %s iterables field is unrecognized: %s" % (node.name, field) + ) def _transpose_iterables(fields, values): @@ -1267,18 +1338,26 @@ def _transpose_iterables(fields, values): return list(transposed.items()) return list( - zip(fields, [[v for v in list(transpose) if v is not None] - for transpose in zip(*values)])) - - -def export_graph(graph_in, - base_dir=None, - show=False, - use_execgraph=False, - show_connectinfo=False, - dotfilename='graph.dot', - format='png', - simple_form=True): + zip( + fields, + [ + [v for v in list(transpose) if v is not None] + for transpose in zip(*values) + ], + ) + ) + + +def export_graph( + graph_in, + base_dir=None, + show=False, + use_execgraph=False, + show_connectinfo=False, + dotfilename="graph.dot", + format="png", + simple_form=True, +): """ Displays the graph layout of the pipeline This function requires that pygraphviz and matplotlib are available on @@ -1300,37 +1379,40 @@ def export_graph(graph_in, makes the graph rather cluttered. default [False] """ import networkx as nx + graph = deepcopy(graph_in) if use_execgraph: graph = generate_expanded_graph(graph) - logger.debug('using execgraph') + logger.debug("using execgraph") else: - logger.debug('using input graph') + logger.debug("using input graph") if base_dir is None: base_dir = os.getcwd() makedirs(base_dir, exist_ok=True) out_dot = fname_presuffix( - dotfilename, suffix='_detailed.dot', use_ext=False, newpath=base_dir) + dotfilename, suffix="_detailed.dot", use_ext=False, newpath=base_dir + ) _write_detailed_dot(graph, out_dot) # Convert .dot if format != 'dot' outfname, res = _run_dot(out_dot, format_ext=format) if res is not None and res.runtime.returncode: - logger.warning('dot2png: %s', res.runtime.stderr) + logger.warning("dot2png: %s", res.runtime.stderr) pklgraph = _create_dot_graph(graph, show_connectinfo, simple_form) simple_dot = fname_presuffix( - dotfilename, suffix='.dot', use_ext=False, newpath=base_dir) + dotfilename, suffix=".dot", use_ext=False, newpath=base_dir + ) nx.drawing.nx_pydot.write_dot(pklgraph, simple_dot) # Convert .dot if format != 'dot' simplefname, res = _run_dot(simple_dot, format_ext=format) if res is not None and res.runtime.returncode: - logger.warning('dot2png: %s', res.runtime.stderr) + logger.warning("dot2png: %s", res.runtime.stderr) if show: - pos = nx.graphviz_layout(pklgraph, prog='dot') + pos = nx.graphviz_layout(pklgraph, prog="dot") nx.draw(pklgraph, pos) if show_connectinfo: nx.draw_networkx_edge_labels(pklgraph, pos) @@ -1338,7 +1420,7 @@ def export_graph(graph_in, return simplefname if simple_form else outfname -def format_dot(dotfilename, format='png'): +def format_dot(dotfilename, format="png"): """Dump a directed graph (Linux only; install via `brew` on OSX)""" try: formatted_dot, _ = _run_dot(dotfilename, format_ext=format) @@ -1351,14 +1433,13 @@ def format_dot(dotfilename, format='png'): def _run_dot(dotfilename, format_ext): - if format_ext == 'dot': + if format_ext == "dot": return dotfilename, None - dot_base = os.path.splitext(dotfilename)[0] - formatted_dot = '{}.{}'.format(dot_base, format_ext) + dot_base = os.path.splitext(dotfilename)[0] + formatted_dot = "{}.{}".format(dot_base, format_ext) cmd = 'dot -T{} -o"{}" "{}"'.format(format_ext, formatted_dot, dotfilename) - res = CommandLine(cmd, terminal_output='allatonce', - resource_monitor=False).run() + res = CommandLine(cmd, terminal_output="allatonce", resource_monitor=False).run() return formatted_dot, res @@ -1387,9 +1468,9 @@ def walk_outputs(object): else: if isdefined(object) and isinstance(object, (str, bytes)): if os.path.islink(object) or os.path.isfile(object): - out = [(filename, 'f') for filename in get_all_files(object)] + out = [(filename, "f") for filename in get_all_files(object)] elif os.path.isdir(object): - out = [(object, 'd')] + out = [(object, "d")] return out @@ -1399,53 +1480,54 @@ def walk_files(cwd): yield os.path.join(path, f) -def clean_working_directory(outputs, - cwd, - inputs, - needed_outputs, - config, - files2keep=None, - dirs2keep=None): +def clean_working_directory( + outputs, cwd, inputs, needed_outputs, config, files2keep=None, dirs2keep=None +): """Removes all files not needed for further analysis from the directory """ if not outputs: return outputs_to_keep = list(outputs.trait_get().keys()) - if needed_outputs and \ - str2bool(config['execution']['remove_unnecessary_outputs']): + if needed_outputs and str2bool(config["execution"]["remove_unnecessary_outputs"]): outputs_to_keep = needed_outputs # build a list of needed files output_files = [] outputdict = outputs.trait_get() for output in outputs_to_keep: output_files.extend(walk_outputs(outputdict[output])) - needed_files = [path for path, type in output_files if type == 'f'] - if str2bool(config['execution']['keep_inputs']): + needed_files = [path for path, type in output_files if type == "f"] + if str2bool(config["execution"]["keep_inputs"]): input_files = [] inputdict = inputs.trait_get() input_files.extend(walk_outputs(inputdict)) - needed_files += [path for path, type in input_files if type == 'f'] + needed_files += [path for path, type in input_files if type == "f"] for extra in [ - '_0x*.json', 'provenance.*', 'pyscript*.m', 'pyjobs*.mat', - 'command.txt', 'result*.pklz', '_inputs.pklz', '_node.pklz', - '.proc-*', + "_0x*.json", + "provenance.*", + "pyscript*.m", + "pyjobs*.mat", + "command.txt", + "result*.pklz", + "_inputs.pklz", + "_node.pklz", + ".proc-*", ]: needed_files.extend(glob(os.path.join(cwd, extra))) if files2keep: needed_files.extend(ensure_list(files2keep)) - needed_dirs = [path for path, type in output_files if type == 'd'] + needed_dirs = [path for path, type in output_files if type == "d"] if dirs2keep: needed_dirs.extend(ensure_list(dirs2keep)) - for extra in ['_nipype', '_report']: + for extra in ["_nipype", "_report"]: needed_dirs.extend(glob(os.path.join(cwd, extra))) temp = [] for filename in needed_files: temp.extend(get_related_files(filename)) needed_files = temp - logger.debug('Needed files: %s', ';'.join(needed_files)) - logger.debug('Needed dirs: %s', ';'.join(needed_dirs)) + logger.debug("Needed files: %s", ";".join(needed_files)) + logger.debug("Needed dirs: %s", ";".join(needed_dirs)) files2remove = [] - if str2bool(config['execution']['remove_unnecessary_outputs']): + if str2bool(config["execution"]["remove_unnecessary_outputs"]): for f in walk_files(cwd): if f not in needed_files: if not needed_dirs: @@ -1453,15 +1535,15 @@ def clean_working_directory(outputs, elif not any([f.startswith(dname) for dname in needed_dirs]): files2remove.append(f) else: - if not str2bool(config['execution']['keep_inputs']): + if not str2bool(config["execution"]["keep_inputs"]): input_files = [] inputdict = inputs.trait_get() input_files.extend(walk_outputs(inputdict)) - input_files = [path for path, type in input_files if type == 'f'] + input_files = [path for path, type in input_files if type == "f"] for f in walk_files(cwd): if f in input_files and f not in needed_files: files2remove.append(f) - logger.debug('Removing files: %s', ';'.join(files2remove)) + logger.debug("Removing files: %s", ";".join(files2remove)) for f in files2remove: os.remove(f) for key in outputs.copyable_trait_names(): @@ -1515,11 +1597,11 @@ def merge_bundles(g1, g2): return g1 -def write_workflow_prov(graph, filename=None, format='all'): +def write_workflow_prov(graph, filename=None, format="all"): """Write W3C PROV Model JSON file """ if not filename: - filename = os.path.join(os.getcwd(), 'workflow_provenance') + filename = os.path.join(os.getcwd(), "workflow_provenance") ps = ProvStore() @@ -1531,16 +1613,15 @@ def write_workflow_prov(graph, filename=None, format='all'): _, hashval, _, _ = node.hash_exists() attrs = { pm.PROV["type"]: nipype_ns[classname], - pm.PROV["label"]: '_'.join((classname, node.name)), - nipype_ns['hashval']: hashval + pm.PROV["label"]: "_".join((classname, node.name)), + nipype_ns["hashval"]: hashval, } process = ps.g.activity(get_id(), None, None, attrs) if isinstance(result.runtime, list): process.add_attributes({pm.PROV["type"]: nipype_ns["MapNode"]}) # add info about sub processes for idx, runtime in enumerate(result.runtime): - subresult = InterfaceResult( - result.interface[idx], runtime, outputs={}) + subresult = InterfaceResult(result.interface[idx], runtime, outputs={}) if result.inputs: if idx < len(result.inputs): subresult.inputs = result.inputs[idx] @@ -1550,14 +1631,12 @@ def write_workflow_prov(graph, filename=None, format='all'): if isdefined(values) and idx < len(values): subresult.outputs[key] = values[idx] sub_doc = ProvStore().add_results(subresult) - sub_bundle = pm.ProvBundle( - sub_doc.get_records(), identifier=get_id()) + sub_bundle = pm.ProvBundle(sub_doc.get_records(), identifier=get_id()) ps.g.add_bundle(sub_bundle) bundle_entity = ps.g.entity( sub_bundle.identifier, - other_attributes={ - 'prov:type': pm.PROV_BUNDLE - }) + other_attributes={"prov:type": pm.PROV_BUNDLE}, + ) ps.g.wasGeneratedBy(bundle_entity, process) else: process.add_attributes({pm.PROV["type"]: nipype_ns["Node"]}) @@ -1565,14 +1644,11 @@ def write_workflow_prov(graph, filename=None, format='all'): prov_doc = result.provenance else: prov_doc = ProvStore().add_results(result) - result_bundle = pm.ProvBundle( - prov_doc.get_records(), identifier=get_id()) + result_bundle = pm.ProvBundle(prov_doc.get_records(), identifier=get_id()) ps.g.add_bundle(result_bundle) bundle_entity = ps.g.entity( - result_bundle.identifier, - other_attributes={ - 'prov:type': pm.PROV_BUNDLE - }) + result_bundle.identifier, other_attributes={"prov:type": pm.PROV_BUNDLE} + ) ps.g.wasGeneratedBy(bundle_entity, process) processes.append(process) @@ -1581,7 +1657,8 @@ def write_workflow_prov(graph, filename=None, format='all'): for idx, edgeinfo in enumerate(graph.in_edges()): ps.g.wasStartedBy( processes[list(nodes).index(edgeinfo[1])], - starter=processes[list(nodes).index(edgeinfo[0])]) + starter=processes[list(nodes).index(edgeinfo[0])], + ) # write provenance ps.write_provenance(filename, format=format) @@ -1596,46 +1673,49 @@ def write_workflow_resources(graph, filename=None, append=None): import simplejson as json # Overwrite filename if nipype config is set - filename = config.get('monitoring', 'summary_file', filename) + filename = config.get("monitoring", "summary_file", filename) # If filename still does not make sense, store in $PWD if not filename: - filename = os.path.join(os.getcwd(), 'resource_monitor.json') + filename = os.path.join(os.getcwd(), "resource_monitor.json") if append is None: - append = str2bool(config.get('monitoring', 'summary_append', 'true')) + append = str2bool(config.get("monitoring", "summary_append", "true")) big_dict = { - 'time': [], - 'name': [], - 'interface': [], - 'rss_GiB': [], - 'vms_GiB': [], - 'cpus': [], - 'mapnode': [], - 'params': [], + "time": [], + "name": [], + "interface": [], + "rss_GiB": [], + "vms_GiB": [], + "cpus": [], + "mapnode": [], + "params": [], } # If file exists, just append new profile information # If we append different runs, then we will see different # "bursts" of timestamps corresponding to those executions. if append and os.path.isfile(filename): - with open(filename, 'r' if PY3 else 'rb') as rsf: + with open(filename, "r" if PY3 else "rb") as rsf: big_dict = json.load(rsf) for _, node in enumerate(graph.nodes()): nodename = node.fullname classname = node.interface.__class__.__name__ - params = '' + params = "" if node.parameterization: - params = '_'.join(['{}'.format(p) for p in node.parameterization]) + params = "_".join(["{}".format(p) for p in node.parameterization]) try: rt_list = node.result.runtime except Exception: - logger.warning('Could not access runtime info for node %s' - ' (%s interface)', nodename, classname) + logger.warning( + "Could not access runtime info for node %s" " (%s interface)", + nodename, + classname, + ) continue if not isinstance(rt_list, list): @@ -1643,22 +1723,26 @@ def write_workflow_resources(graph, filename=None, append=None): for subidx, runtime in enumerate(rt_list): try: - nsamples = len(runtime.prof_dict['time']) + nsamples = len(runtime.prof_dict["time"]) except AttributeError: logger.warning( 'Could not retrieve profiling information for node "%s" ' - '(mapflow %d/%d).', nodename, subidx + 1, len(rt_list)) + "(mapflow %d/%d).", + nodename, + subidx + 1, + len(rt_list), + ) continue - for key in ['time', 'cpus', 'rss_GiB', 'vms_GiB']: + for key in ["time", "cpus", "rss_GiB", "vms_GiB"]: big_dict[key] += runtime.prof_dict[key] - big_dict['interface'] += [classname] * nsamples - big_dict['name'] += [nodename] * nsamples - big_dict['mapnode'] += [subidx] * nsamples - big_dict['params'] += [params] * nsamples + big_dict["interface"] += [classname] * nsamples + big_dict["name"] += [nodename] * nsamples + big_dict["mapnode"] += [subidx] * nsamples + big_dict["params"] += [params] * nsamples - with open(filename, 'w' if PY3 else 'wb') as rsf: + with open(filename, "w" if PY3 else "wb") as rsf: json.dump(big_dict, rsf, ensure_ascii=False) return filename @@ -1668,6 +1752,7 @@ def topological_sort(graph, depth_first=False): """Returns a depth first sorted order if depth_first is True """ import networkx as nx + nodesort = list(nx.topological_sort(graph)) if not depth_first: return nodesort, None @@ -1685,8 +1770,8 @@ def topological_sort(graph, depth_first=False): for node in desc: indices.append(nodesort.index(node)) nodes.extend( - np.array(nodesort)[np.array(indices)[np.argsort(indices)]] - .tolist()) + np.array(nodesort)[np.array(indices)[np.argsort(indices)]].tolist() + ) for node in desc: nodesort.remove(node) groups.extend([group] * len(desc)) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 655ee679cc..6897beb19f 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -21,11 +21,10 @@ import posixpath import simplejson as json from time import sleep, time -from glob import glob from builtins import str, bytes, open -from .. import logging, config +from .. import logging, config, __version__ as version from .misc import is_container from future import standard_library @@ -823,22 +822,22 @@ def read_stream(stream, logger=None, encoding=None): return out.splitlines() -def savepkl(filename, record, versioning=False, sync=False): - pkl_open = gzip.open if filename.endswith(".pklz") else open - with pkl_open(filename, "wb") as pkl_file: - if versioning: - from nipype import __version__ as version +def savepkl(filename, record, versioning=False): + from io import BytesIO + with BytesIO() as f: + if versioning: metadata = json.dumps({"version": version}) + f.write(metadata.encode("utf-8")) + f.write("\n".encode("utf-8")) + pickle.dump(record, f) + content = f.getvalue() - pkl_file.write(metadata.encode("utf-8")) - pkl_file.write("\n".encode("utf-8")) - - pickle.dump(record, pkl_file) - fmlogger.info("Finished saving: {}".format(filename)) - if sync: - pkl_file.flush() - os.fsync(pkl_file.fileno()) + pkl_open = gzip.open if filename.endswith(".pklz") else open + tmpfile = filename + ".tmp" + with pkl_open(tmpfile, "wb") as pkl_file: + pkl_file.write(content) + os.rename(tmpfile, filename) rst_levels = ["=", "-", "~", "+"] From b9cdb285188f649a3e8133a03c8eb158d75895da Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 11 Nov 2019 17:32:41 -0500 Subject: [PATCH 0521/1665] MNT: Update changelog --- doc/changelog/1.X.X-changelog | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index cfac06b090..80f853866b 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -1,11 +1,16 @@ -1.3.0 (To Be Determined) -======================== +1.3.0 (November 11, 2019) +========================= ##### [Full changelog](https://github.com/nipy/nipype/milestone/34?closed=1) + * FIX: Fixed typo in QwarpInputSpec Trait description (https://github.com/nipy/nipype/pull/3079) * FIX: Restore ``AFNICommand._get_fname``, required by some interfaces (https://github.com/nipy/nipype/pull/3071) * FIX: Remove asynchronous chdir callback (https://github.com/nipy/nipype/pull/3060) * FIX: Minimize scope for directory changes while loading results file (https://github.com/nipy/nipype/pull/3061) + * ENH: Minimize the number of calls to ``_load_results`` when populating inputs (https://github.com/nipy/nipype/pull/3075) + * ENH: Refactor savepkl/loadpkl - add a window for loadpkl to wait for the file (https://github.com/nipy/nipype/pull/3089) + * ENH: Add "ExportFile" interface as simple alternative to "DataSink" (https://github.com/nipy/nipype/pull/3054) + * ENH: Allow nipype.cfg in cwd to be read even if ~/.nipype does not exist (https://github.com/nipy/nipype/pull/3072) * ENH: Add precommit information for contributors and pre-commit style (https://github.com/nipy/nipype/pull/3063) * ENH: Delay etelemetry for non-interactive sessions, report bad versions (https://github.com/nipy/nipype/pull/3049) * ENH: Run memoized check_version at REPL import, Node/Workflow/Interface init (https://github.com/nipy/nipype/pull/30) From 81adf95d3db2a7d767b5cc9cb459254a9e5137a4 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 11 Nov 2019 18:02:00 -0500 Subject: [PATCH 0522/1665] Update Zenodo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No entry to sort: Gio Piantoni No entry to sort: Victor Férat No entry to sort: Isaiah Norton No entry to sort: Niklas Förster No entry to sort: daniel glen No entry to sort: Kirstie Whitaker No entry to sort: hstojic No entry to sort: Jonathan R. Williford No entry to sort: Adam Kimbler No entry to sort: Kevin Sitek No entry to sort: Ami Tsuchida No entry to sort: Abel González No entry to sort: Michiel Cottaar No entry to sort: Daniel Brenner --- .zenodo.json | 84 ++++++++++++++++++++++++++-------------------------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index e65bbe084a..5598da914d 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -73,10 +73,6 @@ "affiliation": "Shattuck Lab, UCLA Brain Mapping Center", "name": "Wong, Jason" }, - { - "affiliation": "Concordia University", - "name": "Benderoff, Erin" - }, { "affiliation": "Developer", "name": "Clark, Daniel", @@ -142,6 +138,10 @@ "name": "Gramfort, Alexandre", "orcid": "0000-0001-9791-4404" }, + { + "affiliation": "Concordia University", + "name": "Benderoff, Erin" + }, { "affiliation": "Dartmouth College: Hanover, NH, United States", "name": "Halchenko, Yaroslav O.", @@ -274,6 +274,11 @@ { "name": "Dubois, Mathieu" }, + { + "affiliation": "Holland Bloorview Kids Rehabilitation Hospital", + "name": "Tilley II, Steven", + "orcid": "0000-0003-4853-5082" + }, { "affiliation": "Child Mind Institute", "name": "Frohlich, Caroline" @@ -313,10 +318,6 @@ "name": "Eshaghi, Arman", "orcid": "0000-0002-6652-3512" }, - { - "name": "Heinsfeld, Anibal S\u00f3lon", - "orcid": "0000-0002-2050-0614" - }, { "name": "Ginsburg, Daniel" }, @@ -330,6 +331,10 @@ "name": "Kastman, Erik", "orcid": "0000-0001-7221-9042" }, + { + "name": "Heinsfeld, Anibal S\u00f3lon", + "orcid": "0000-0002-2050-0614" + }, { "affiliation": "Washington University in St Louis", "name": "Acland, Benjamin", @@ -376,9 +381,6 @@ { "name": "Millman, Jarrod" }, - { - "name": "Lai, Jeff" - }, { "name": "Zhou, Dale" }, @@ -388,11 +390,6 @@ { "name": "Haselgrove, Christian" }, - { - "affiliation": "Holland Bloorview Kids Rehabilitation Hospital", - "name": "Tilley II, Steven", - "orcid": "0000-0003-4853-5082" - }, { "name": "Renfro, Mandy" }, @@ -405,11 +402,6 @@ "name": "Kahn, Ari E.", "orcid": "0000-0002-2127-0507" }, - { - "affiliation": "Yale University; New Haven, CT, United States", - "name": "Sisk, Lucinda M.", - "orcid": "0000-0003-4900-9770" - }, { "affiliation": "Korea Advanced Institute of Science and Technology", "name": "Kim, Sin", @@ -437,6 +429,11 @@ { "name": "Hallquist, Michael" }, + { + "affiliation": "Yale University; New Haven, CT, United States", + "name": "Sisk, Lucinda M.", + "orcid": "0000-0003-4900-9770" + }, { "affiliation": "Donders Institute for Brain, Cognition and Behavior, Center for Cognitive Neuroimaging", "name": "Chetverikov, Andrey", @@ -484,11 +481,6 @@ { "name": "Hinds, Oliver" }, - { - "affiliation": "National Institute on Aging, Baltimore, MD, USA", - "name": "Bilgel, Murat", - "orcid": "0000-0001-5042-7422" - }, { "affiliation": "TIB \u2013 Leibniz Information Centre for Science and Technology and University Library, Hannover, Germany", "name": "Leinweber, Katrin", @@ -501,15 +493,6 @@ "affiliation": "Boston University", "name": "Perkins, L. Nathan" }, - { - "affiliation": "University of Amsterdam", - "name": "Snoek, Lukas", - "orcid": "0000-0001-8972-204X" - }, - { - "affiliation": "Institute of Imaging & Computer Vision, RWTH Aachen University, Germany", - "name": "Weninger, Leon" - }, { "affiliation": "University of Newcastle, Australia", "name": "Cooper, Gavin", @@ -524,16 +507,20 @@ { "name": "Noel, Maxime" }, + { + "affiliation": "University of Amsterdam", + "name": "Snoek, Lukas", + "orcid": "0000-0001-8972-204X" + }, + { + "affiliation": "Institute of Imaging & Computer Vision, RWTH Aachen University, Germany", + "name": "Weninger, Leon" + }, { "affiliation": "University of Pennsylvania", "name": "Junhao WEN", "orcid": "0000-0003-2077-3070" }, - { - "affiliation": "Leibniz Institute for Neurobiology", - "name": "Stadler, J\u00f6rg", - "orcid": "0000-0003-4313-129X" - }, { "name": "Cheung, Brian" }, @@ -549,6 +536,11 @@ "name": "Durnez, Joke", "orcid": "0000-0001-9030-2202" }, + { + "affiliation": "Leibniz Institute for Neurobiology", + "name": "Stadler, J\u00f6rg", + "orcid": "0000-0003-4313-129X" + }, { "affiliation": "CNRS, UMS3552 IRMaGe", "name": "Condamine, Eric", @@ -600,10 +592,12 @@ "name": "Khanuja, Ranjeet" }, { - "name": "Schlamp, Kai" + "affiliation": "National Institute on Aging, Baltimore, MD, USA", + "name": "Bilgel, Murat", + "orcid": "0000-0001-5042-7422" }, { - "name": "Arias, Jaime" + "name": "Schlamp, Kai" }, { "affiliation": "CEA", @@ -671,6 +665,12 @@ { "name": "Davison, Andrew" }, + { + "name": "Lai, Jeff" + }, + { + "name": "Arias, Jaime" + }, { "name": "Bielievtsov, Dmytro", "orcid": "0000-0003-3846-7696" From 37851ae40e6f7c00a7dbda2678221c40e06f0624 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 11 Nov 2019 18:02:53 -0500 Subject: [PATCH 0523/1665] MNT: Verison 1.3.0 --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index bd81ca482f..8a155fbc36 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -9,7 +9,7 @@ # nipype version information # Remove -dev for release -__version__ = '1.3.0-rc1.post-dev' +__version__ = '1.3.0' def get_nipype_gitversion(): From 8c781a6a8892054c9430f98b74fb44176bbc65fc Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 12 Nov 2019 09:36:29 -0500 Subject: [PATCH 0524/1665] MNT: 1.3.1-dev --- doc/documentation.rst | 2 +- nipype/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/documentation.rst b/doc/documentation.rst index 70b55d9a78..5b4216f8a7 100644 --- a/doc/documentation.rst +++ b/doc/documentation.rst @@ -9,7 +9,7 @@ Documentation :Release: |version| :Date: |today| -Previous versions: `1.2.3 `_ `1.2.2 `_ +Previous versions: `1.3.0 `_ `1.2.3 `_ .. container:: doc2 diff --git a/nipype/info.py b/nipype/info.py index 8a155fbc36..6fc6cf0099 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -9,7 +9,7 @@ # nipype version information # Remove -dev for release -__version__ = '1.3.0' +__version__ = '1.3.1-dev' def get_nipype_gitversion(): From 9e88a217a36072782768c1c4f69736a4b9bf741e Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 12 Nov 2019 09:37:31 -0500 Subject: [PATCH 0525/1665] MNT: 1.4.0-dev --- doc/documentation.rst | 2 +- nipype/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/documentation.rst b/doc/documentation.rst index 70b55d9a78..5b4216f8a7 100644 --- a/doc/documentation.rst +++ b/doc/documentation.rst @@ -9,7 +9,7 @@ Documentation :Release: |version| :Date: |today| -Previous versions: `1.2.3 `_ `1.2.2 `_ +Previous versions: `1.3.0 `_ `1.2.3 `_ .. container:: doc2 diff --git a/nipype/info.py b/nipype/info.py index 8a155fbc36..4e9055c7ea 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -9,7 +9,7 @@ # nipype version information # Remove -dev for release -__version__ = '1.3.0' +__version__ = '1.4.0-dev' def get_nipype_gitversion(): From 497b44d680eee0892fa59c6aaaae22a17d70a536 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 12 Nov 2019 09:43:34 -0500 Subject: [PATCH 0526/1665] STY: Black files pre-merge --- nipype/pipeline/engine/utils.py | 749 ++++++++++++++++++-------------- nipype/utils/filemanip.py | 271 ++++++------ 2 files changed, 561 insertions(+), 459 deletions(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index cb68061f0d..78d9417f9f 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -35,7 +35,12 @@ from ...utils.misc import str2bool from ...utils.functions import create_function_from_source from ...interfaces.base.traits_extension import ( - rebase_path_traits, resolve_path_traits, OutputMultiPath, isdefined, Undefined) + rebase_path_traits, + resolve_path_traits, + OutputMultiPath, + isdefined, + Undefined, +) from ...interfaces.base.support import Bunch, InterfaceResult from ...interfaces.base import CommandLine from ...interfaces.utility import IdentityInterface @@ -43,7 +48,7 @@ from inspect import signature -logger = logging.getLogger('nipype.workflow') +logger = logging.getLogger("nipype.workflow") def _parameterization_dir(param): @@ -68,13 +73,12 @@ def save_hashfile(hashfile, hashed_inputs): # XXX - SG current workaround is to just # create the hashed file and not put anything # in it - with open(hashfile, 'wt') as fd: + with open(hashfile, "wt") as fd: fd.writelines(str(hashed_inputs)) - logger.debug('Unable to write a particular type to the json file') + logger.debug("Unable to write a particular type to the json file") else: - logger.critical('Unable to open the file in write mode: %s', - hashfile) + logger.critical("Unable to open the file in write mode: %s", hashfile) def nodelist_runner(nodes, updatehash=False, stop_first=False): @@ -94,49 +98,47 @@ def nodelist_runner(nodes, updatehash=False, stop_first=False): result = node.result err = [] - if result.runtime and hasattr(result.runtime, 'traceback'): + if result.runtime and hasattr(result.runtime, "traceback"): err = [result.runtime.traceback] err += format_exception(*sys.exc_info()) - err = '\n'.join(err) + err = "\n".join(err) finally: yield i, result, err def write_node_report(node, result=None, is_mapnode=False): """Write a report file for a node.""" - if not str2bool(node.config['execution']['create_report']): + if not str2bool(node.config["execution"]["create_report"]): return cwd = node.output_dir() - report_file = Path(cwd) / '_report' / 'report.rst' + report_file = Path(cwd) / "_report" / "report.rst" report_file.parent.mkdir(exist_ok=True, parents=True) lines = [ - write_rst_header('Node: %s' % get_print_name(node), level=0), - write_rst_list( - ['Hierarchy : %s' % node.fullname, - 'Exec ID : %s' % node._id]), - write_rst_header('Original Inputs', level=1), + write_rst_header("Node: %s" % get_print_name(node), level=0), + write_rst_list(["Hierarchy : %s" % node.fullname, "Exec ID : %s" % node._id]), + write_rst_header("Original Inputs", level=1), write_rst_dict(node.inputs.trait_get()), ] if result is None: logger.debug('[Node] Writing pre-exec report to "%s"', report_file) - report_file.write_text('\n'.join(lines)) + report_file.write_text("\n".join(lines)) return logger.debug('[Node] Writing post-exec report to "%s"', report_file) lines += [ - write_rst_header('Execution Inputs', level=1), + write_rst_header("Execution Inputs", level=1), write_rst_dict(node.inputs.trait_get()), - write_rst_header('Execution Outputs', level=1) + write_rst_header("Execution Outputs", level=1), ] outputs = result.outputs if outputs is None: - lines += ['None'] - report_file.write_text('\n'.join(lines)) + lines += ["None"] + report_file.write_text("\n".join(lines)) return if isinstance(outputs, Bunch): @@ -144,90 +146,98 @@ def write_node_report(node, result=None, is_mapnode=False): elif outputs: lines.append(write_rst_dict(outputs.trait_get())) else: - lines += ['Outputs object was empty.'] + lines += ["Outputs object was empty."] if is_mapnode: - lines.append(write_rst_header('Subnode reports', level=1)) + lines.append(write_rst_header("Subnode reports", level=1)) nitems = len(ensure_list(getattr(node.inputs, node.iterfield[0]))) subnode_report_files = [] for i in range(nitems): - subnode_file = Path(cwd) / 'mapflow' / ( - '_%s%d' % (node.name, i)) / '_report' / 'report.rst' - subnode_report_files.append('subnode %d : %s' % (i, subnode_file)) + subnode_file = ( + Path(cwd) + / "mapflow" + / ("_%s%d" % (node.name, i)) + / "_report" + / "report.rst" + ) + subnode_report_files.append("subnode %d : %s" % (i, subnode_file)) lines.append(write_rst_list(subnode_report_files)) - report_file.write_text('\n'.join(lines)) + report_file.write_text("\n".join(lines)) return - lines.append(write_rst_header('Runtime info', level=1)) + lines.append(write_rst_header("Runtime info", level=1)) # Init rst dictionary of runtime stats rst_dict = { - 'hostname': result.runtime.hostname, - 'duration': result.runtime.duration, - 'working_dir': result.runtime.cwd, - 'prev_wd': getattr(result.runtime, 'prevcwd', ''), + "hostname": result.runtime.hostname, + "duration": result.runtime.duration, + "working_dir": result.runtime.cwd, + "prev_wd": getattr(result.runtime, "prevcwd", ""), } - for prop in ('cmdline', 'mem_peak_gb', 'cpu_percent'): + for prop in ("cmdline", "mem_peak_gb", "cpu_percent"): if hasattr(result.runtime, prop): rst_dict[prop] = getattr(result.runtime, prop) lines.append(write_rst_dict(rst_dict)) # Collect terminal output - if hasattr(result.runtime, 'merged'): + if hasattr(result.runtime, "merged"): lines += [ - write_rst_header('Terminal output', level=2), + write_rst_header("Terminal output", level=2), write_rst_list(result.runtime.merged), ] - if hasattr(result.runtime, 'stdout'): + if hasattr(result.runtime, "stdout"): lines += [ - write_rst_header('Terminal - standard output', level=2), + write_rst_header("Terminal - standard output", level=2), write_rst_list(result.runtime.stdout), ] - if hasattr(result.runtime, 'stderr'): + if hasattr(result.runtime, "stderr"): lines += [ - write_rst_header('Terminal - standard error', level=2), + write_rst_header("Terminal - standard error", level=2), write_rst_list(result.runtime.stderr), ] # Store environment - if hasattr(result.runtime, 'environ'): + if hasattr(result.runtime, "environ"): lines += [ - write_rst_header('Environment', level=2), + write_rst_header("Environment", level=2), write_rst_dict(result.runtime.environ), ] - report_file.write_text('\n'.join(lines)) + report_file.write_text("\n".join(lines)) def write_report(node, report_type=None, is_mapnode=False): """Write a report file for a node - DEPRECATED""" - if report_type not in ('preexec', 'postexec'): + if report_type not in ("preexec", "postexec"): logger.warning('[Node] Unknown report type "%s".', report_type) return - write_node_report(node, is_mapnode=is_mapnode, - result=node.result if report_type == 'postexec' else None) + write_node_report( + node, + is_mapnode=is_mapnode, + result=node.result if report_type == "postexec" else None, + ) def save_resultfile(result, cwd, name, rebase=None): """Save a result pklz file to ``cwd``.""" if rebase is None: - rebase = config.getboolean('execution', 'use_relative_paths') + rebase = config.getboolean("execution", "use_relative_paths") cwd = os.path.abspath(cwd) - resultsfile = os.path.join(cwd, 'result_%s.pklz' % name) + resultsfile = os.path.join(cwd, "result_%s.pklz" % name) logger.debug("Saving results file: '%s'", resultsfile) if result.outputs is None: - logger.warning('Storing result file without outputs') + logger.warning("Storing result file without outputs") savepkl(resultsfile, result) return try: output_names = result.outputs.copyable_trait_names() except AttributeError: - logger.debug('Storing non-traited results, skipping rebase of paths') + logger.debug("Storing non-traited results, skipping rebase of paths") savepkl(resultsfile, result) return @@ -244,7 +254,8 @@ def save_resultfile(result, cwd, name, rebase=None): if isdefined(old): if result.outputs.trait(key).is_trait_type(OutputMultiPath): old = result.outputs.trait(key).handler.get_value( - result.outputs, key) + result.outputs, key + ) backup_traits[key] = old val = rebase_path_traits(result.outputs.trait(key), old, cwd) setattr(result.outputs, key, val) @@ -284,17 +295,19 @@ def load_resultfile(results_file, resolve=True): try: outputs = result.outputs.get() except TypeError: # This is a Bunch - logger.debug('Outputs object of loaded result %s is a Bunch.', results_file) + logger.debug("Outputs object of loaded result %s is a Bunch.", results_file) return result - logger.debug('Resolving paths in outputs loaded from results file.') + logger.debug("Resolving paths in outputs loaded from results file.") for trait_name, old in list(outputs.items()): if isdefined(old): if result.outputs.trait(trait_name).is_trait_type(OutputMultiPath): old = result.outputs.trait(trait_name).handler.get_value( - result.outputs, trait_name) - value = resolve_path_traits(result.outputs.trait(trait_name), old, - results_file.parent) + result.outputs, trait_name + ) + value = resolve_path_traits( + result.outputs.trait(trait_name), old, results_file.parent + ) setattr(result.outputs, trait_name, value) return result @@ -306,13 +319,13 @@ def strip_temp(files, wd): if isinstance(f, list): out.append(strip_temp(f, wd)) else: - out.append(f.replace(os.path.join(wd, '_tempinput'), wd)) + out.append(f.replace(os.path.join(wd, "_tempinput"), wd)) return out def _write_inputs(node): lines = [] - nodename = node.fullname.replace('.', '_') + nodename = node.fullname.replace(".", "_") for key, _ in list(node.inputs.items()): val = getattr(node.inputs, key) if isdefined(val): @@ -323,64 +336,70 @@ def _write_inputs(node): lines.append("%s.inputs.%s = '%s'" % (nodename, key, val)) else: funcname = [ - name for name in func.__globals__ - if name != '__builtins__' + name for name in func.__globals__ if name != "__builtins__" ][0] lines.append(pickle.loads(val)) if funcname == nodename: - lines[-1] = lines[-1].replace(' %s(' % funcname, - ' %s_1(' % funcname) - funcname = '%s_1' % funcname + lines[-1] = lines[-1].replace( + " %s(" % funcname, " %s_1(" % funcname + ) + funcname = "%s_1" % funcname + lines.append("from nipype.utils.functions import getsource") lines.append( - 'from nipype.utils.functions import getsource') - lines.append("%s.inputs.%s = getsource(%s)" % - (nodename, key, funcname)) + "%s.inputs.%s = getsource(%s)" % (nodename, key, funcname) + ) else: - lines.append('%s.inputs.%s = %s' % (nodename, key, val)) + lines.append("%s.inputs.%s = %s" % (nodename, key, val)) return lines -def format_node(node, format='python', include_config=False): +def format_node(node, format="python", include_config=False): """Format a node in a given output syntax.""" from .nodes import MapNode + lines = [] - name = node.fullname.replace('.', '_') - if format == 'python': + name = node.fullname.replace(".", "_") + if format == "python": klass = node.interface - importline = 'from %s import %s' % (klass.__module__, - klass.__class__.__name__) - comment = '# Node: %s' % node.fullname + importline = "from %s import %s" % (klass.__module__, klass.__class__.__name__) + comment = "# Node: %s" % node.fullname spec = signature(node.interface.__init__) args = [p.name for p in list(spec.parameters.values())] args = args[1:] if args: filled_args = [] for arg in args: - if hasattr(node.interface, '_%s' % arg): - filled_args.append('%s=%s' % - (arg, - getattr(node.interface, '_%s' % arg))) - args = ', '.join(filled_args) + if hasattr(node.interface, "_%s" % arg): + filled_args.append( + "%s=%s" % (arg, getattr(node.interface, "_%s" % arg)) + ) + args = ", ".join(filled_args) else: - args = '' + args = "" klass_name = klass.__class__.__name__ if isinstance(node, MapNode): - nodedef = '%s = MapNode(%s(%s), iterfield=%s, name="%s")' \ - % (name, klass_name, args, node.iterfield, name) + nodedef = '%s = MapNode(%s(%s), iterfield=%s, name="%s")' % ( + name, + klass_name, + args, + node.iterfield, + name, + ) else: - nodedef = '%s = Node(%s(%s), name="%s")' \ - % (name, klass_name, args, name) + nodedef = '%s = Node(%s(%s), name="%s")' % (name, klass_name, args, name) lines = [importline, comment, nodedef] if include_config: lines = [ importline, - "from collections import OrderedDict", comment, nodedef + "from collections import OrderedDict", + comment, + nodedef, ] - lines.append('%s.config = %s' % (name, node.config)) + lines.append("%s.config = %s" % (name, node.config)) if node.iterables is not None: - lines.append('%s.iterables = %s' % (name, node.iterables)) + lines.append("%s.iterables = %s" % (name, node.iterables)) lines.extend(_write_inputs(node)) return lines @@ -405,28 +424,26 @@ def modify_paths(object, relative=True, basedir=None): out = {} for key, val in sorted(object.items()): if isdefined(val): - out[key] = modify_paths( - val, relative=relative, basedir=basedir) + out[key] = modify_paths(val, relative=relative, basedir=basedir) elif isinstance(object, (list, tuple)): out = [] for val in object: if isdefined(val): - out.append( - modify_paths(val, relative=relative, basedir=basedir)) + out.append(modify_paths(val, relative=relative, basedir=basedir)) if isinstance(object, tuple): out = tuple(out) else: if isdefined(object): if isinstance(object, (str, bytes)) and os.path.isfile(object): if relative: - if config.getboolean('execution', 'use_relative_paths'): + if config.getboolean("execution", "use_relative_paths"): out = relpath(object, start=basedir) else: out = object else: out = os.path.abspath(os.path.join(basedir, object)) if not os.path.exists(out): - raise IOError('File %s not found' % out) + raise IOError("File %s not found" % out) else: out = object else: @@ -442,20 +459,20 @@ def get_print_name(node, simple_form=True): """ name = node.fullname - if hasattr(node, '_interface'): - pkglist = node.interface.__class__.__module__.split('.') + if hasattr(node, "_interface"): + pkglist = node.interface.__class__.__module__.split(".") interface = node.interface.__class__.__name__ - destclass = '' + destclass = "" if len(pkglist) > 2: - destclass = '.%s' % pkglist[2] + destclass = ".%s" % pkglist[2] if simple_form: name = node.fullname + destclass else: - name = '.'.join([node.fullname, interface]) + destclass + name = ".".join([node.fullname, interface]) + destclass if simple_form: - parts = name.split('.') + parts = name.split(".") if len(parts) > 2: - return ' ('.join(parts[1:]) + ')' + return " (".join(parts[1:]) + ")" elif len(parts) == 2: return parts[1] return name @@ -466,15 +483,16 @@ def _create_dot_graph(graph, show_connectinfo=False, simple_form=True): Ensures that edge info is pickleable. """ - logger.debug('creating dot graph') + logger.debug("creating dot graph") import networkx as nx + pklgraph = nx.DiGraph() for edge in graph.edges(): data = graph.get_edge_data(*edge) srcname = get_print_name(edge[0], simple_form=simple_form) destname = get_print_name(edge[1], simple_form=simple_form) if show_connectinfo: - pklgraph.add_edge(srcname, destname, l=str(data['connect'])) + pklgraph.add_edge(srcname, destname, l=str(data["connect"])) else: pklgraph.add_edge(srcname, destname) return pklgraph @@ -495,67 +513,86 @@ def _write_detailed_dot(graph, dotfilename): } """ import networkx as nx - text = ['digraph structs {', 'node [shape=record];'] + + text = ["digraph structs {", "node [shape=record];"] # write nodes edges = [] for n in nx.topological_sort(graph): nodename = n.itername inports = [] for u, v, d in graph.in_edges(nbunch=n, data=True): - for cd in d['connect']: + for cd in d["connect"]: if isinstance(cd[0], (str, bytes)): outport = cd[0] else: outport = cd[0][0] inport = cd[1] - ipstrip = 'in%s' % _replacefunk(inport) - opstrip = 'out%s' % _replacefunk(outport) + ipstrip = "in%s" % _replacefunk(inport) + opstrip = "out%s" % _replacefunk(outport) edges.append( - '%s:%s:e -> %s:%s:w;' % (u.itername.replace('.', ''), opstrip, - v.itername.replace('.', ''), ipstrip)) + "%s:%s:e -> %s:%s:w;" + % ( + u.itername.replace(".", ""), + opstrip, + v.itername.replace(".", ""), + ipstrip, + ) + ) if inport not in inports: inports.append(inport) - inputstr = ['{IN'] + [ - '| %s' % (_replacefunk(ip), ip) for ip in sorted(inports) - ] + ['}'] + inputstr = ( + ["{IN"] + + ["| %s" % (_replacefunk(ip), ip) for ip in sorted(inports)] + + ["}"] + ) outports = [] for u, v, d in graph.out_edges(nbunch=n, data=True): - for cd in d['connect']: + for cd in d["connect"]: if isinstance(cd[0], (str, bytes)): outport = cd[0] else: outport = cd[0][0] if outport not in outports: outports.append(outport) - outputstr = ['{OUT'] + [ - '| %s' % (_replacefunk(oport), oport) - for oport in sorted(outports) - ] + ['}'] - srcpackage = '' - if hasattr(n, '_interface'): - pkglist = n.interface.__class__.__module__.split('.') + outputstr = ( + ["{OUT"] + + [ + "| %s" % (_replacefunk(oport), oport) + for oport in sorted(outports) + ] + + ["}"] + ) + srcpackage = "" + if hasattr(n, "_interface"): + pkglist = n.interface.__class__.__module__.split(".") if len(pkglist) > 2: srcpackage = pkglist[2] - srchierarchy = '.'.join(nodename.split('.')[1:-1]) - nodenamestr = '{ %s | %s | %s }' % (nodename.split('.')[-1], - srcpackage, srchierarchy) + srchierarchy = ".".join(nodename.split(".")[1:-1]) + nodenamestr = "{ %s | %s | %s }" % ( + nodename.split(".")[-1], + srcpackage, + srchierarchy, + ) text += [ - '%s [label="%s|%s|%s"];' % - (nodename.replace('.', ''), ''.join(inputstr), nodenamestr, - ''.join(outputstr)) + '%s [label="%s|%s|%s"];' + % ( + nodename.replace(".", ""), + "".join(inputstr), + nodenamestr, + "".join(outputstr), + ) ] # write edges for edge in sorted(edges): text.append(edge) - text.append('}') - with open(dotfilename, 'wt') as filep: - filep.write('\n'.join(text)) + text.append("}") + with open(dotfilename, "wt") as filep: + filep.write("\n".join(text)) return text def _replacefunk(x): - return x.replace('_', '').replace('.', '').replace('@', '').replace( - '-', '') + return x.replace("_", "").replace(".", "").replace("@", "").replace("-", "") # Graph manipulations for iterable expansion @@ -567,9 +604,9 @@ def _get_valid_pathstr(pathstr): """ if not isinstance(pathstr, (str, bytes)): pathstr = str(pathstr) - pathstr = pathstr.replace(os.sep, '..') - pathstr = re.sub(r'''[][ (){}?:<>#!|"';]''', '', pathstr) - pathstr = pathstr.replace(',', '.') + pathstr = pathstr.replace(os.sep, "..") + pathstr = re.sub(r"""[][ (){}?:<>#!|"';]""", "", pathstr) + pathstr = pathstr.replace(",", ".") return pathstr @@ -642,8 +679,9 @@ def synchronize_iterables(iterables): True """ out_list = [] - iterable_items = [(field, iter(fvals())) - for field, fvals in sorted(iterables.items())] + iterable_items = [ + (field, iter(fvals())) for field, fvals in sorted(iterables.items()) + ] while True: cur_dict = {} for field, iter_values in iterable_items: @@ -664,17 +702,21 @@ def evaluate_connect_function(function_source, args, first_arg): try: output_value = func(first_arg, *list(args)) except NameError as e: - if e.args[0].startswith("global name") and \ - e.args[0].endswith("is not defined"): - e.args = (e.args[0], - ("Due to engine constraints all imports have to be done " - "inside each function definition")) + if e.args[0].startswith("global name") and e.args[0].endswith("is not defined"): + e.args = ( + e.args[0], + ( + "Due to engine constraints all imports have to be done " + "inside each function definition" + ), + ) raise e return output_value def get_levels(G): import networkx as nx + levels = {} for n in nx.topological_sort(G): levels[n] = 0 @@ -683,13 +725,9 @@ def get_levels(G): return levels -def _merge_graphs(supergraph, - nodes, - subgraph, - nodeid, - iterables, - prefix, - synchronize=False): +def _merge_graphs( + supergraph, nodes, subgraph, nodeid, iterables, prefix, synchronize=False +): """Merges two graphs that share a subset of nodes. If the subgraph needs to be replicated for multiple iterables, the @@ -725,8 +763,12 @@ def _merge_graphs(supergraph, # This should trap the problem of miswiring when multiple iterables are # used at the same level. The use of the template below for naming # updates to nodes is the general solution. - raise Exception(("Execution graph does not have a unique set of node " - "names. Please rerun the workflow")) + raise Exception( + ( + "Execution graph does not have a unique set of node " + "names. Please rerun the workflow" + ) + ) edgeinfo = {} for n in list(subgraph.nodes()): nidx = ids.index(n._hierarchy + n._id) @@ -736,7 +778,8 @@ def _merge_graphs(supergraph, if n._hierarchy + n._id not in list(edgeinfo.keys()): edgeinfo[n._hierarchy + n._id] = [] edgeinfo[n._hierarchy + n._id].append( - (edge[0], supergraph.get_edge_data(*edge))) + (edge[0], supergraph.get_edge_data(*edge)) + ) supergraph.remove_nodes_from(nodes) # Add copies of the subgraph depending on the number of iterables iterable_params = expand_iterables(iterables, synchronize) @@ -745,20 +788,21 @@ def _merge_graphs(supergraph, return supergraph # Make an iterable subgraph node id template count = len(iterable_params) - template = '.%s%%0%dd' % (prefix, np.ceil(np.log10(count))) + template = ".%s%%0%dd" % (prefix, np.ceil(np.log10(count))) # Copy the iterable subgraphs for i, params in enumerate(iterable_params): Gc = deepcopy(subgraph) ids = [n._hierarchy + n._id for n in Gc.nodes()] nodeidx = ids.index(nodeid) rootnode = list(Gc.nodes())[nodeidx] - paramstr = '' + paramstr = "" for key, val in sorted(params.items()): - paramstr = '{}_{}_{}'.format(paramstr, _get_valid_pathstr(key), - _get_valid_pathstr(val)) + paramstr = "{}_{}_{}".format( + paramstr, _get_valid_pathstr(key), _get_valid_pathstr(val) + ) rootnode.set_input(key, val) - logger.debug('Parameterization: paramstr=%s', paramstr) + logger.debug("Parameterization: paramstr=%s", paramstr) levels = get_levels(Gc) for n in Gc.nodes(): # update parameterization of the node to reflect the location of @@ -791,10 +835,10 @@ def _connect_nodes(graph, srcnode, destnode, connection_info): """ data = graph.get_edge_data(srcnode, destnode, default=None) if not data: - data = {'connect': connection_info} + data = {"connect": connection_info} graph.add_edges_from([(srcnode, destnode, data)]) else: - data['connect'].extend(connection_info) + data["connect"].extend(connection_info) def _remove_nonjoin_identity_nodes(graph, keep_iterables=False): @@ -806,7 +850,7 @@ def _remove_nonjoin_identity_nodes(graph, keep_iterables=False): # if keep_iterables is False, then include the iterable # and join nodes in the nodes to delete for node in _identity_nodes(graph, not keep_iterables): - if not hasattr(node, 'joinsource'): + if not hasattr(node, "joinsource"): _remove_identity_node(graph, node) return graph @@ -819,10 +863,12 @@ def _identity_nodes(graph, include_iterables): to True. """ import networkx as nx + return [ - node for node in nx.topological_sort(graph) - if isinstance(node.interface, IdentityInterface) and ( - include_iterables or getattr(node, 'iterables') is None) + node + for node in nx.topological_sort(graph) + if isinstance(node.interface, IdentityInterface) + and (include_iterables or getattr(node, "iterables") is None) ] @@ -832,8 +878,7 @@ def _remove_identity_node(graph, node): portinputs, portoutputs = _node_ports(graph, node) for field, connections in list(portoutputs.items()): if portinputs: - _propagate_internal_output(graph, node, field, connections, - portinputs) + _propagate_internal_output(graph, node, field, connections, portinputs) else: _propagate_root_output(graph, node, field, connections) graph.remove_nodes_from([node]) @@ -853,10 +898,10 @@ def _node_ports(graph, node): portinputs = {} portoutputs = {} for u, _, d in graph.in_edges(node, data=True): - for src, dest in d['connect']: + for src, dest in d["connect"]: portinputs[dest] = (u, src) for _, v, d in graph.out_edges(node, data=True): - for src, dest in d['connect']: + for src, dest in d["connect"]: if isinstance(src, tuple): srcport = src[0] else: @@ -887,25 +932,22 @@ def _propagate_internal_output(graph, node, field, connections, portinputs): if isinstance(srcport, tuple) and isinstance(src, tuple): src_func = srcport[1].split("\\n")[0] dst_func = src[1].split("\\n")[0] - raise ValueError("Does not support two inline functions " - "in series ('{}' and '{}'), found when " - "connecting {} to {}. Please use a Function " - "node.".format(src_func, dst_func, srcnode, - destnode)) - - connect = graph.get_edge_data( - srcnode, destnode, default={ - 'connect': [] - }) + raise ValueError( + "Does not support two inline functions " + "in series ('{}' and '{}'), found when " + "connecting {} to {}. Please use a Function " + "node.".format(src_func, dst_func, srcnode, destnode) + ) + + connect = graph.get_edge_data(srcnode, destnode, default={"connect": []}) if isinstance(src, tuple): - connect['connect'].append(((srcport, src[1], src[2]), inport)) + connect["connect"].append(((srcport, src[1], src[2]), inport)) else: - connect = {'connect': [(srcport, inport)]} + connect = {"connect": [(srcport, inport)]} old_connect = graph.get_edge_data( - srcnode, destnode, default={ - 'connect': [] - }) - old_connect['connect'] += connect['connect'] + srcnode, destnode, default={"connect": []} + ) + old_connect["connect"] += connect["connect"] graph.add_edges_from([(srcnode, destnode, old_connect)]) else: value = getattr(node.inputs, field) @@ -923,6 +965,7 @@ def generate_expanded_graph(graph_in): parameterized as (a=1,b=3), (a=1,b=4), (a=2,b=3) and (a=2,b=4). """ import networkx as nx + try: dfs_preorder = nx.dfs_preorder except AttributeError: @@ -934,7 +977,7 @@ def generate_expanded_graph(graph_in): for node in graph_in.nodes(): if node.iterables: _standardize_iterables(node) - allprefixes = list('abcdefghijklmnopqrstuvwxyz') + allprefixes = list("abcdefghijklmnopqrstuvwxyz") # the iterable nodes inodes = _iterable_nodes(graph_in) @@ -947,8 +990,10 @@ def generate_expanded_graph(graph_in): # the join successor nodes of the current iterable node jnodes = [ - node for node in graph_in.nodes() - if hasattr(node, 'joinsource') and inode.name == node.joinsource + node + for node in graph_in.nodes() + if hasattr(node, "joinsource") + and inode.name == node.joinsource and nx.has_path(graph_in, inode, node) ] @@ -965,8 +1010,7 @@ def generate_expanded_graph(graph_in): for src, dest in edges2remove: graph_in.remove_edge(src, dest) - logger.debug("Excised the %s -> %s join node in-edge.", src, - dest) + logger.debug("Excised the %s -> %s join node in-edge.", src, dest) if inode.itersource: # the itersource is a (node name, fields) tuple @@ -976,22 +1020,24 @@ def generate_expanded_graph(graph_in): src_fields = [src_fields] # find the unique iterable source node in the graph try: - iter_src = next((node for node in graph_in.nodes() - if node.name == src_name - and nx.has_path(graph_in, node, inode))) + iter_src = next( + ( + node + for node in graph_in.nodes() + if node.name == src_name and nx.has_path(graph_in, node, inode) + ) + ) except StopIteration: - raise ValueError("The node %s itersource %s was not found" - " among the iterable predecessor nodes" % - (inode, src_name)) - logger.debug("The node %s has iterable source node %s", inode, - iter_src) + raise ValueError( + "The node %s itersource %s was not found" + " among the iterable predecessor nodes" % (inode, src_name) + ) + logger.debug("The node %s has iterable source node %s", inode, iter_src) # look up the iterables for this particular itersource descendant # using the iterable source ancestor values as a key iterables = {} # the source node iterables values - src_values = [ - getattr(iter_src.inputs, field) for field in src_fields - ] + src_values = [getattr(iter_src.inputs, field) for field in src_fields] # if there is one source field, then the key is the the source value, # otherwise the key is the tuple of source values if len(src_values) == 1: @@ -1001,9 +1047,13 @@ def generate_expanded_graph(graph_in): # The itersource iterables is a {field: lookup} dictionary, where the # lookup is a {source key: iteration list} dictionary. Look up the # current iterable value using the predecessor itersource input values. - iter_dict = dict([(field, lookup[key]) - for field, lookup in inode.iterables - if key in lookup]) + iter_dict = dict( + [ + (field, lookup[key]) + for field, lookup in inode.iterables + if key in lookup + ] + ) # convert the iterables to the standard {field: function} format @@ -1011,37 +1061,43 @@ def make_field_func(*pair): return pair[0], lambda: pair[1] iterables = dict( - [make_field_func(*pair) for pair in list(iter_dict.items())]) + [make_field_func(*pair) for pair in list(iter_dict.items())] + ) else: iterables = inode.iterables.copy() inode.iterables = None - logger.debug('node: %s iterables: %s', inode, iterables) + logger.debug("node: %s iterables: %s", inode, iterables) # collect the subnodes to expand subnodes = [s for s in dfs_preorder(graph_in, inode)] - prior_prefix = [re.findall(r'\.(.)I', s._id) for s in subnodes if s._id] + prior_prefix = [re.findall(r"\.(.)I", s._id) for s in subnodes if s._id] prior_prefix = sorted([l for item in prior_prefix for l in item]) if not prior_prefix: - iterable_prefix = 'a' + iterable_prefix = "a" else: - if prior_prefix[-1] == 'z': - raise ValueError('Too many iterables in the workflow') - iterable_prefix =\ - allprefixes[allprefixes.index(prior_prefix[-1]) + 1] - logger.debug(('subnodes:', subnodes)) + if prior_prefix[-1] == "z": + raise ValueError("Too many iterables in the workflow") + iterable_prefix = allprefixes[allprefixes.index(prior_prefix[-1]) + 1] + logger.debug(("subnodes:", subnodes)) # append a suffix to the iterable node id - inode._id += '.%sI' % iterable_prefix + inode._id += ".%sI" % iterable_prefix # merge the iterated subgraphs # dj: the behaviour of .copy changes in version 2 - if LooseVersion(nx.__version__) < LooseVersion('2'): + if LooseVersion(nx.__version__) < LooseVersion("2"): subgraph = graph_in.subgraph(subnodes) else: subgraph = graph_in.subgraph(subnodes).copy() - graph_in = _merge_graphs(graph_in, subnodes, subgraph, - inode._hierarchy + inode._id, iterables, - iterable_prefix, inode.synchronize) + graph_in = _merge_graphs( + graph_in, + subnodes, + subgraph, + inode._hierarchy + inode._id, + iterables, + iterable_prefix, + inode.synchronize, + ) # reconnect the join nodes for jnode in jnodes: @@ -1054,7 +1110,7 @@ def make_field_func(*pair): for src_id in list(old_edge_dict.keys()): # Drop the original JoinNodes; only concerned with # generated Nodes - if hasattr(node, 'joinfield') and node.itername == src_id: + if hasattr(node, "joinfield") and node.itername == src_id: continue # Patterns: # - src_id : Non-iterable node @@ -1063,12 +1119,17 @@ def make_field_func(*pair): # - src_id.[a-z]I.[a-z]\d+ : # Non-IdentityInterface w/ iterables # - src_idJ\d+ : JoinNode(IdentityInterface) - if re.match(src_id + r'((\.[a-z](I\.[a-z])?|J)\d+)?$', - node.itername): + if re.match( + src_id + r"((\.[a-z](I\.[a-z])?|J)\d+)?$", node.itername + ): expansions[src_id].append(node) for in_id, in_nodes in list(expansions.items()): - logger.debug("The join node %s input %s was expanded" - " to %d nodes.", jnode, in_id, len(in_nodes)) + logger.debug( + "The join node %s input %s was expanded" " to %d nodes.", + jnode, + in_id, + len(in_nodes), + ) # preserve the node iteration order by sorting on the node id for in_nodes in list(expansions.values()): in_nodes.sort(key=lambda node: node._id) @@ -1077,9 +1138,7 @@ def make_field_func(*pair): iter_cnt = count_iterables(iterables, inode.synchronize) # make new join node fields to connect to each replicated # join in-edge source node. - slot_dicts = [ - jnode._add_join_item_fields() for _ in range(iter_cnt) - ] + slot_dicts = [jnode._add_join_item_fields() for _ in range(iter_cnt)] # for each join in-edge, connect every expanded source node # which matches on the in-edge source name to the destination # join node. Qualify each edge connect join field name by @@ -1095,11 +1154,10 @@ def make_field_func(*pair): olddata = old_edge_dict[old_id] newdata = deepcopy(olddata) # the (source, destination) field tuples - connects = newdata['connect'] + connects = newdata["connect"] # the join fields connected to the source join_fields = [ - field for _, field in connects - if field in jnode.joinfield + field for _, field in connects if field in jnode.joinfield ] # the {field: slot fields} maps assigned to the input # node, e.g. {'image': 'imageJ3', 'mask': 'maskJ3'} @@ -1114,10 +1172,18 @@ def make_field_func(*pair): connects[con_idx] = (src_field, slot_field) logger.debug( "Qualified the %s -> %s join field %s as %s.", - in_node, jnode, dest_field, slot_field) + in_node, + jnode, + dest_field, + slot_field, + ) graph_in.add_edge(in_node, jnode, **newdata) - logger.debug("Connected the join node %s subgraph to the" - " expanded join point %s", jnode, in_node) + logger.debug( + "Connected the join node %s subgraph to the" + " expanded join point %s", + jnode, + in_node, + ) # nx.write_dot(graph_in, '%s_post.dot' % node) # the remaining iterable nodes @@ -1157,6 +1223,7 @@ def _iterable_nodes(graph_in): Return the iterable nodes list """ import networkx as nx + nodes = nx.topological_sort(graph_in) inodes = [node for node in nodes if node.iterables is not None] inodes_no_src = [node for node in inodes if not node.itersource] @@ -1179,8 +1246,9 @@ def _standardize_iterables(node): if node.synchronize: if len(iterables) == 2: first, last = iterables - if all((isinstance(item, (str, bytes)) and item in fields - for item in first)): + if all( + (isinstance(item, (str, bytes)) and item in fields for item in first) + ): iterables = _transpose_iterables(first, last) # Convert a tuple to a list @@ -1197,9 +1265,7 @@ def _standardize_iterables(node): def make_field_func(*pair): return pair[0], lambda: pair[1] - iter_items = [ - make_field_func(*field_value1) for field_value1 in iterables - ] + iter_items = [make_field_func(*field_value1) for field_value1 in iterables] iterables = dict(iter_items) node.iterables = iterables @@ -1216,20 +1282,25 @@ def _validate_iterables(node, iterables, fields): if isinstance(iterables, dict): iterables = list(iterables.items()) elif not isinstance(iterables, tuple) and not isinstance(iterables, list): - raise ValueError("The %s iterables type is not a list or a dictionary:" - " %s" % (node.name, iterables.__class__)) + raise ValueError( + "The %s iterables type is not a list or a dictionary:" + " %s" % (node.name, iterables.__class__) + ) for item in iterables: try: if len(item) != 2: - raise ValueError("The %s iterables is not a [(field, values)]" - " list" % node.name) + raise ValueError( + "The %s iterables is not a [(field, values)]" " list" % node.name + ) except TypeError as e: - raise TypeError("A %s iterables member is not iterable: %s" % - (node.name, e)) + raise TypeError( + "A %s iterables member is not iterable: %s" % (node.name, e) + ) field, _ = item if field not in fields: - raise ValueError("The %s iterables field is unrecognized: %s" % - (node.name, field)) + raise ValueError( + "The %s iterables field is unrecognized: %s" % (node.name, field) + ) def _transpose_iterables(fields, values): @@ -1252,18 +1323,26 @@ def _transpose_iterables(fields, values): return list(transposed.items()) return list( - zip(fields, [[v for v in list(transpose) if v is not None] - for transpose in zip(*values)])) - - -def export_graph(graph_in, - base_dir=None, - show=False, - use_execgraph=False, - show_connectinfo=False, - dotfilename='graph.dot', - format='png', - simple_form=True): + zip( + fields, + [ + [v for v in list(transpose) if v is not None] + for transpose in zip(*values) + ], + ) + ) + + +def export_graph( + graph_in, + base_dir=None, + show=False, + use_execgraph=False, + show_connectinfo=False, + dotfilename="graph.dot", + format="png", + simple_form=True, +): """ Displays the graph layout of the pipeline This function requires that pygraphviz and matplotlib are available on @@ -1285,37 +1364,40 @@ def export_graph(graph_in, makes the graph rather cluttered. default [False] """ import networkx as nx + graph = deepcopy(graph_in) if use_execgraph: graph = generate_expanded_graph(graph) - logger.debug('using execgraph') + logger.debug("using execgraph") else: - logger.debug('using input graph') + logger.debug("using input graph") if base_dir is None: base_dir = os.getcwd() os.makedirs(base_dir, exist_ok=True) out_dot = fname_presuffix( - dotfilename, suffix='_detailed.dot', use_ext=False, newpath=base_dir) + dotfilename, suffix="_detailed.dot", use_ext=False, newpath=base_dir + ) _write_detailed_dot(graph, out_dot) # Convert .dot if format != 'dot' outfname, res = _run_dot(out_dot, format_ext=format) if res is not None and res.runtime.returncode: - logger.warning('dot2png: %s', res.runtime.stderr) + logger.warning("dot2png: %s", res.runtime.stderr) pklgraph = _create_dot_graph(graph, show_connectinfo, simple_form) simple_dot = fname_presuffix( - dotfilename, suffix='.dot', use_ext=False, newpath=base_dir) + dotfilename, suffix=".dot", use_ext=False, newpath=base_dir + ) nx.drawing.nx_pydot.write_dot(pklgraph, simple_dot) # Convert .dot if format != 'dot' simplefname, res = _run_dot(simple_dot, format_ext=format) if res is not None and res.runtime.returncode: - logger.warning('dot2png: %s', res.runtime.stderr) + logger.warning("dot2png: %s", res.runtime.stderr) if show: - pos = nx.graphviz_layout(pklgraph, prog='dot') + pos = nx.graphviz_layout(pklgraph, prog="dot") nx.draw(pklgraph, pos) if show_connectinfo: nx.draw_networkx_edge_labels(pklgraph, pos) @@ -1323,7 +1405,7 @@ def export_graph(graph_in, return simplefname if simple_form else outfname -def format_dot(dotfilename, format='png'): +def format_dot(dotfilename, format="png"): """Dump a directed graph (Linux only; install via `brew` on OSX)""" try: formatted_dot, _ = _run_dot(dotfilename, format_ext=format) @@ -1336,14 +1418,13 @@ def format_dot(dotfilename, format='png'): def _run_dot(dotfilename, format_ext): - if format_ext == 'dot': + if format_ext == "dot": return dotfilename, None - dot_base = os.path.splitext(dotfilename)[0] - formatted_dot = '{}.{}'.format(dot_base, format_ext) + dot_base = os.path.splitext(dotfilename)[0] + formatted_dot = "{}.{}".format(dot_base, format_ext) cmd = 'dot -T{} -o"{}" "{}"'.format(format_ext, formatted_dot, dotfilename) - res = CommandLine(cmd, terminal_output='allatonce', - resource_monitor=False).run() + res = CommandLine(cmd, terminal_output="allatonce", resource_monitor=False).run() return formatted_dot, res @@ -1372,9 +1453,9 @@ def walk_outputs(object): else: if isdefined(object) and isinstance(object, (str, bytes)): if os.path.islink(object) or os.path.isfile(object): - out = [(filename, 'f') for filename in get_all_files(object)] + out = [(filename, "f") for filename in get_all_files(object)] elif os.path.isdir(object): - out = [(object, 'd')] + out = [(object, "d")] return out @@ -1384,53 +1465,54 @@ def walk_files(cwd): yield os.path.join(path, f) -def clean_working_directory(outputs, - cwd, - inputs, - needed_outputs, - config, - files2keep=None, - dirs2keep=None): +def clean_working_directory( + outputs, cwd, inputs, needed_outputs, config, files2keep=None, dirs2keep=None +): """Removes all files not needed for further analysis from the directory """ if not outputs: return outputs_to_keep = list(outputs.trait_get().keys()) - if needed_outputs and \ - str2bool(config['execution']['remove_unnecessary_outputs']): + if needed_outputs and str2bool(config["execution"]["remove_unnecessary_outputs"]): outputs_to_keep = needed_outputs # build a list of needed files output_files = [] outputdict = outputs.trait_get() for output in outputs_to_keep: output_files.extend(walk_outputs(outputdict[output])) - needed_files = [path for path, type in output_files if type == 'f'] - if str2bool(config['execution']['keep_inputs']): + needed_files = [path for path, type in output_files if type == "f"] + if str2bool(config["execution"]["keep_inputs"]): input_files = [] inputdict = inputs.trait_get() input_files.extend(walk_outputs(inputdict)) - needed_files += [path for path, type in input_files if type == 'f'] + needed_files += [path for path, type in input_files if type == "f"] for extra in [ - '_0x*.json', 'provenance.*', 'pyscript*.m', 'pyjobs*.mat', - 'command.txt', 'result*.pklz', '_inputs.pklz', '_node.pklz', - '.proc-*', + "_0x*.json", + "provenance.*", + "pyscript*.m", + "pyjobs*.mat", + "command.txt", + "result*.pklz", + "_inputs.pklz", + "_node.pklz", + ".proc-*", ]: needed_files.extend(glob(os.path.join(cwd, extra))) if files2keep: needed_files.extend(ensure_list(files2keep)) - needed_dirs = [path for path, type in output_files if type == 'd'] + needed_dirs = [path for path, type in output_files if type == "d"] if dirs2keep: needed_dirs.extend(ensure_list(dirs2keep)) - for extra in ['_nipype', '_report']: + for extra in ["_nipype", "_report"]: needed_dirs.extend(glob(os.path.join(cwd, extra))) temp = [] for filename in needed_files: temp.extend(get_related_files(filename)) needed_files = temp - logger.debug('Needed files: %s', ';'.join(needed_files)) - logger.debug('Needed dirs: %s', ';'.join(needed_dirs)) + logger.debug("Needed files: %s", ";".join(needed_files)) + logger.debug("Needed dirs: %s", ";".join(needed_dirs)) files2remove = [] - if str2bool(config['execution']['remove_unnecessary_outputs']): + if str2bool(config["execution"]["remove_unnecessary_outputs"]): for f in walk_files(cwd): if f not in needed_files: if not needed_dirs: @@ -1438,15 +1520,15 @@ def clean_working_directory(outputs, elif not any([f.startswith(dname) for dname in needed_dirs]): files2remove.append(f) else: - if not str2bool(config['execution']['keep_inputs']): + if not str2bool(config["execution"]["keep_inputs"]): input_files = [] inputdict = inputs.trait_get() input_files.extend(walk_outputs(inputdict)) - input_files = [path for path, type in input_files if type == 'f'] + input_files = [path for path, type in input_files if type == "f"] for f in walk_files(cwd): if f in input_files and f not in needed_files: files2remove.append(f) - logger.debug('Removing files: %s', ';'.join(files2remove)) + logger.debug("Removing files: %s", ";".join(files2remove)) for f in files2remove: os.remove(f) for key in outputs.copyable_trait_names(): @@ -1500,11 +1582,11 @@ def merge_bundles(g1, g2): return g1 -def write_workflow_prov(graph, filename=None, format='all'): +def write_workflow_prov(graph, filename=None, format="all"): """Write W3C PROV Model JSON file """ if not filename: - filename = os.path.join(os.getcwd(), 'workflow_provenance') + filename = os.path.join(os.getcwd(), "workflow_provenance") ps = ProvStore() @@ -1516,16 +1598,15 @@ def write_workflow_prov(graph, filename=None, format='all'): _, hashval, _, _ = node.hash_exists() attrs = { pm.PROV["type"]: nipype_ns[classname], - pm.PROV["label"]: '_'.join((classname, node.name)), - nipype_ns['hashval']: hashval + pm.PROV["label"]: "_".join((classname, node.name)), + nipype_ns["hashval"]: hashval, } process = ps.g.activity(get_id(), None, None, attrs) if isinstance(result.runtime, list): process.add_attributes({pm.PROV["type"]: nipype_ns["MapNode"]}) # add info about sub processes for idx, runtime in enumerate(result.runtime): - subresult = InterfaceResult( - result.interface[idx], runtime, outputs={}) + subresult = InterfaceResult(result.interface[idx], runtime, outputs={}) if result.inputs: if idx < len(result.inputs): subresult.inputs = result.inputs[idx] @@ -1535,14 +1616,12 @@ def write_workflow_prov(graph, filename=None, format='all'): if isdefined(values) and idx < len(values): subresult.outputs[key] = values[idx] sub_doc = ProvStore().add_results(subresult) - sub_bundle = pm.ProvBundle( - sub_doc.get_records(), identifier=get_id()) + sub_bundle = pm.ProvBundle(sub_doc.get_records(), identifier=get_id()) ps.g.add_bundle(sub_bundle) bundle_entity = ps.g.entity( sub_bundle.identifier, - other_attributes={ - 'prov:type': pm.PROV_BUNDLE - }) + other_attributes={"prov:type": pm.PROV_BUNDLE}, + ) ps.g.wasGeneratedBy(bundle_entity, process) else: process.add_attributes({pm.PROV["type"]: nipype_ns["Node"]}) @@ -1550,14 +1629,11 @@ def write_workflow_prov(graph, filename=None, format='all'): prov_doc = result.provenance else: prov_doc = ProvStore().add_results(result) - result_bundle = pm.ProvBundle( - prov_doc.get_records(), identifier=get_id()) + result_bundle = pm.ProvBundle(prov_doc.get_records(), identifier=get_id()) ps.g.add_bundle(result_bundle) bundle_entity = ps.g.entity( - result_bundle.identifier, - other_attributes={ - 'prov:type': pm.PROV_BUNDLE - }) + result_bundle.identifier, other_attributes={"prov:type": pm.PROV_BUNDLE} + ) ps.g.wasGeneratedBy(bundle_entity, process) processes.append(process) @@ -1566,7 +1642,8 @@ def write_workflow_prov(graph, filename=None, format='all'): for idx, edgeinfo in enumerate(graph.in_edges()): ps.g.wasStartedBy( processes[list(nodes).index(edgeinfo[1])], - starter=processes[list(nodes).index(edgeinfo[0])]) + starter=processes[list(nodes).index(edgeinfo[0])], + ) # write provenance ps.write_provenance(filename, format=format) @@ -1581,46 +1658,49 @@ def write_workflow_resources(graph, filename=None, append=None): import simplejson as json # Overwrite filename if nipype config is set - filename = config.get('monitoring', 'summary_file', filename) + filename = config.get("monitoring", "summary_file", filename) # If filename still does not make sense, store in $PWD if not filename: - filename = os.path.join(os.getcwd(), 'resource_monitor.json') + filename = os.path.join(os.getcwd(), "resource_monitor.json") if append is None: - append = str2bool(config.get('monitoring', 'summary_append', 'true')) + append = str2bool(config.get("monitoring", "summary_append", "true")) big_dict = { - 'time': [], - 'name': [], - 'interface': [], - 'rss_GiB': [], - 'vms_GiB': [], - 'cpus': [], - 'mapnode': [], - 'params': [], + "time": [], + "name": [], + "interface": [], + "rss_GiB": [], + "vms_GiB": [], + "cpus": [], + "mapnode": [], + "params": [], } # If file exists, just append new profile information # If we append different runs, then we will see different # "bursts" of timestamps corresponding to those executions. if append and os.path.isfile(filename): - with open(filename, 'r') as rsf: + with open(filename, "r") as rsf: big_dict = json.load(rsf) for _, node in enumerate(graph.nodes()): nodename = node.fullname classname = node.interface.__class__.__name__ - params = '' + params = "" if node.parameterization: - params = '_'.join(['{}'.format(p) for p in node.parameterization]) + params = "_".join(["{}".format(p) for p in node.parameterization]) try: rt_list = node.result.runtime except Exception: - logger.warning('Could not access runtime info for node %s' - ' (%s interface)', nodename, classname) + logger.warning( + "Could not access runtime info for node %s" " (%s interface)", + nodename, + classname, + ) continue if not isinstance(rt_list, list): @@ -1628,22 +1708,26 @@ def write_workflow_resources(graph, filename=None, append=None): for subidx, runtime in enumerate(rt_list): try: - nsamples = len(runtime.prof_dict['time']) + nsamples = len(runtime.prof_dict["time"]) except AttributeError: logger.warning( 'Could not retrieve profiling information for node "%s" ' - '(mapflow %d/%d).', nodename, subidx + 1, len(rt_list)) + "(mapflow %d/%d).", + nodename, + subidx + 1, + len(rt_list), + ) continue - for key in ['time', 'cpus', 'rss_GiB', 'vms_GiB']: + for key in ["time", "cpus", "rss_GiB", "vms_GiB"]: big_dict[key] += runtime.prof_dict[key] - big_dict['interface'] += [classname] * nsamples - big_dict['name'] += [nodename] * nsamples - big_dict['mapnode'] += [subidx] * nsamples - big_dict['params'] += [params] * nsamples + big_dict["interface"] += [classname] * nsamples + big_dict["name"] += [nodename] * nsamples + big_dict["mapnode"] += [subidx] * nsamples + big_dict["params"] += [params] * nsamples - with open(filename, 'w') as rsf: + with open(filename, "w") as rsf: json.dump(big_dict, rsf, ensure_ascii=False) return filename @@ -1653,6 +1737,7 @@ def topological_sort(graph, depth_first=False): """Returns a depth first sorted order if depth_first is True """ import networkx as nx + nodesort = list(nx.topological_sort(graph)) if not depth_first: return nodesort, None @@ -1670,8 +1755,8 @@ def topological_sort(graph, depth_first=False): for node in desc: indices.append(nodesort.index(node)) nodes.extend( - np.array(nodesort)[np.array(indices)[np.argsort(indices)]] - .tolist()) + np.array(nodesort)[np.array(indices)[np.argsort(indices)]].tolist() + ) for node in desc: nodesort.remove(node) groups.extend([group] * len(desc)) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 8a7ce6d9ea..c3346af8f3 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -24,12 +24,12 @@ from .. import logging, config from .misc import is_container -fmlogger = logging.getLogger('nipype.utils') +fmlogger = logging.getLogger("nipype.utils") related_filetype_sets = [ - ('.hdr', '.img', '.mat'), - ('.nii', '.mat'), - ('.BRIK', '.HEAD'), + (".hdr", ".img", ".mat"), + (".nii", ".mat"), + (".BRIK", ".HEAD"), ] @@ -99,8 +99,7 @@ def split_filename(fname): ext = None for special_ext in special_extensions: ext_len = len(special_ext) - if (len(fname) > ext_len) and \ - (fname[-ext_len:].lower() == special_ext.lower()): + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): ext = fname[-ext_len:] fname = fname[:-ext_len] break @@ -110,7 +109,7 @@ def split_filename(fname): return pth, fname, ext -def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True): +def fname_presuffix(fname, prefix="", suffix="", newpath=None, use_ext=True): """Manipulates path and name of input filename Parameters @@ -144,7 +143,7 @@ def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True): """ pth, fname, ext = split_filename(fname) if not use_ext: - ext = '' + ext = "" # No need for isdefined: bool(Undefined) evaluates to False if newpath: @@ -152,7 +151,7 @@ def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True): return op.join(pth, prefix + fname + suffix + ext) -def fnames_presuffix(fnames, prefix='', suffix='', newpath=None, use_ext=True): +def fnames_presuffix(fnames, prefix="", suffix="", newpath=None, use_ext=True): """Calls fname_presuffix for a list of files. """ f2 = [] @@ -166,7 +165,7 @@ def hash_rename(filename, hashvalue): and sets path to output_directory """ path, name, ext = split_filename(filename) - newfilename = ''.join((name, '_0x', hashvalue, ext)) + newfilename = "".join((name, "_0x", hashvalue, ext)) return op.join(path, newfilename) @@ -175,15 +174,14 @@ def check_forhash(filename): if isinstance(filename, list): filename = filename[0] path, name = op.split(filename) - if re.search('(_0x[a-z0-9]{32})', name): - hashvalue = re.findall('(_0x[a-z0-9]{32})', name) + if re.search("(_0x[a-z0-9]{32})", name): + hashvalue = re.findall("(_0x[a-z0-9]{32})", name) return True, hashvalue else: return False, None -def hash_infile(afile, chunk_len=8192, crypto=hashlib.md5, - raise_notfound=False): +def hash_infile(afile, chunk_len=8192, crypto=hashlib.md5, raise_notfound=False): """ Computes hash of a file using 'crypto' module @@ -207,7 +205,7 @@ def hash_infile(afile, chunk_len=8192, crypto=hashlib.md5, return None crypto_obj = crypto() - with open(afile, 'rb') as fp: + with open(afile, "rb") as fp: while True: data = fp.read(chunk_len) if not data: @@ -242,19 +240,19 @@ def _parse_mount_table(exit_code, output): # ^^^^ ^^^^^ # OSX mount example: /dev/disk2 on / (hfs, local, journaled) # ^ ^^^ - pattern = re.compile(r'.*? on (/.*?) (?:type |\()([^\s,\)]+)') + pattern = re.compile(r".*? on (/.*?) (?:type |\()([^\s,\)]+)") # Keep line and match for error reporting (match == None on failure) # Ignore empty lines - matches = [(l, pattern.match(l)) - for l in output.strip().splitlines() if l] + matches = [(l, pattern.match(l)) for l in output.strip().splitlines() if l] # (path, fstype) tuples, sorted by path length (longest first) - mount_info = sorted((match.groups() for _, match in matches - if match is not None), - key=lambda x: len(x[0]), reverse=True) - cifs_paths = [path for path, fstype in mount_info - if fstype.lower() == 'cifs'] + mount_info = sorted( + (match.groups() for _, match in matches if match is not None), + key=lambda x: len(x[0]), + reverse=True, + ) + cifs_paths = [path for path, fstype in mount_info if fstype.lower() == "cifs"] # Report failures as warnings for line, match in matches: @@ -262,7 +260,8 @@ def _parse_mount_table(exit_code, output): fmlogger.debug("Cannot parse mount line: '%s'", line) return [ - mount for mount in mount_info + mount + for mount in mount_info if any(mount[0].startswith(path) for path in cifs_paths) ] @@ -300,17 +299,19 @@ def on_cifs(fname): # Only the first match (most recent parent) counts for fspath, fstype in _cifs_table: if fname.startswith(fspath): - return fstype == 'cifs' + return fstype == "cifs" return False -def copyfile(originalfile, - newfile, - copy=False, - create_new=False, - hashmethod=None, - use_hardlink=False, - copy_related_files=True): +def copyfile( + originalfile, + newfile, + copy=False, + create_new=False, + hashmethod=None, + use_hardlink=False, + copy_related_files=True, +): """Copy or link ``originalfile`` to ``newfile``. If ``use_hardlink`` is True, and the file can be hard-linked, then a @@ -347,7 +348,7 @@ def copyfile(originalfile, if create_new: while op.exists(newfile): base, fname, ext = split_filename(newfile) - s = re.search('_c[0-9]{4,4}$', fname) + s = re.search("_c[0-9]{4,4}$", fname) i = 0 if s: i = int(s.group()[2:]) + 1 @@ -357,7 +358,7 @@ def copyfile(originalfile, newfile = base + os.sep + fname + ext if hashmethod is None: - hashmethod = config.get('execution', 'hash_method').lower() + hashmethod = config.get("execution", "hash_method").lower() # Don't try creating symlinks on CIFS if copy is False and on_cifs(newfile): @@ -377,26 +378,33 @@ def copyfile(originalfile, keep = False if op.lexists(newfile): if op.islink(newfile): - if all((os.readlink(newfile) == op.realpath(originalfile), - not use_hardlink, not copy)): + if all( + ( + os.readlink(newfile) == op.realpath(originalfile), + not use_hardlink, + not copy, + ) + ): keep = True elif posixpath.samefile(newfile, originalfile): keep = True else: - if hashmethod == 'timestamp': + if hashmethod == "timestamp": hashfn = hash_timestamp - elif hashmethod == 'content': + elif hashmethod == "content": hashfn = hash_infile else: raise AttributeError("Unknown hash method found:", hashmethod) newhash = hashfn(newfile) - fmlogger.debug('File: %s already exists,%s, copy:%d', newfile, - newhash, copy) + fmlogger.debug( + "File: %s already exists,%s, copy:%d", newfile, newhash, copy + ) orighash = hashfn(originalfile) keep = newhash == orighash if keep: - fmlogger.debug('File: %s already exists, not overwriting, copy:%d', - newfile, copy) + fmlogger.debug( + "File: %s already exists, not overwriting, copy:%d", newfile, copy + ) else: os.unlink(newfile) @@ -407,7 +415,7 @@ def copyfile(originalfile, # ~hardlink & ~symlink => copy if not keep and use_hardlink: try: - fmlogger.debug('Linking File: %s->%s', newfile, originalfile) + fmlogger.debug("Linking File: %s->%s", newfile, originalfile) # Use realpath to avoid hardlinking symlinks os.link(op.realpath(originalfile), newfile) except OSError: @@ -415,9 +423,9 @@ def copyfile(originalfile, else: keep = True - if not keep and not copy and os.name == 'posix': + if not keep and not copy and os.name == "posix": try: - fmlogger.debug('Symlinking File: %s->%s', newfile, originalfile) + fmlogger.debug("Symlinking File: %s->%s", newfile, originalfile) os.symlink(originalfile, newfile) except OSError: copy = True # Disable symlink for associated files @@ -426,15 +434,17 @@ def copyfile(originalfile, if not keep: try: - fmlogger.debug('Copying File: %s->%s', newfile, originalfile) + fmlogger.debug("Copying File: %s->%s", newfile, originalfile) shutil.copyfile(originalfile, newfile) except shutil.Error as e: fmlogger.warning(e.message) # Associated files if copy_related_files: - related_file_pairs = (get_related_files(f, include_this_file=False) - for f in (originalfile, newfile)) + related_file_pairs = ( + get_related_files(f, include_this_file=False) + for f in (originalfile, newfile) + ) for alt_ofile, alt_nfile in zip(*related_file_pairs): if op.exists(alt_ofile): copyfile( @@ -443,7 +453,8 @@ def copyfile(originalfile, copy, hashmethod=hashmethod, use_hardlink=use_hardlink, - copy_related_files=False) + copy_related_files=False, + ) return newfile @@ -496,9 +507,7 @@ def copyfiles(filelist, dest, copy=False, create_new=False): newfiles = [] for i, f in enumerate(ensure_list(filelist)): if isinstance(f, list): - newfiles.insert(i, - copyfiles( - f, dest, copy=copy, create_new=create_new)) + newfiles.insert(i, copyfiles(f, dest, copy=copy, create_new=create_new)) else: if len(outfiles) > 1: destfile = outfiles[i] @@ -543,9 +552,9 @@ def check_depends(targets, dependencies): """ tgts = ensure_list(targets) deps = ensure_list(dependencies) - return all(map(op.exists, tgts)) and \ - min(map(op.getmtime, tgts)) > \ - max(list(map(op.getmtime, deps)) + [0]) + return all(map(op.exists, tgts)) and min(map(op.getmtime, tgts)) > max( + list(map(op.getmtime, deps)) + [0] + ) def save_json(filename, data): @@ -559,7 +568,7 @@ def save_json(filename, data): Dictionary to save in json file. """ - mode = 'w' + mode = "w" with open(filename, mode) as fp: json.dump(data, fp, sort_keys=True, indent=4) @@ -578,32 +587,32 @@ def load_json(filename): """ - with open(filename, 'r') as fp: + with open(filename, "r") as fp: data = json.load(fp) return data def loadcrash(infile, *args): - if infile.endswith('pkl') or infile.endswith('pklz'): + if infile.endswith("pkl") or infile.endswith("pklz"): return loadpkl(infile) else: - raise ValueError('Only pickled crashfiles are supported') + raise ValueError("Only pickled crashfiles are supported") def loadpkl(infile): """Load a zipped or plain cPickled file.""" infile = Path(infile) - fmlogger.debug('Loading pkl: %s', infile) - pklopen = gzip.open if infile.suffix == '.pklz' else open + fmlogger.debug("Loading pkl: %s", infile) + pklopen = gzip.open if infile.suffix == ".pklz" else open - with SoftFileLock('%s.lock' % infile): - with pklopen(str(infile), 'rb') as pkl_file: + with SoftFileLock("%s.lock" % infile): + with pklopen(str(infile), "rb") as pkl_file: pkl_contents = pkl_file.read() pkl_metadata = None # Look if pkl file contains version metadata - idx = pkl_contents.find(b'\n') + idx = pkl_contents.find(b"\n") if idx >= 0: try: pkl_metadata = json.loads(pkl_contents[:idx]) @@ -612,7 +621,7 @@ def loadpkl(infile): pass else: # On success, skip JSON metadata - pkl_contents = pkl_contents[idx + 1:] + pkl_contents = pkl_contents[idx + 1 :] # Pickle files may contain relative paths that must be resolved relative # to the working directory, so use indirectory while attempting to load @@ -623,38 +632,45 @@ def loadpkl(infile): except UnicodeDecodeError: # Was this pickle created with Python 2.x? with indirectory(infile.parent): - unpkl = pickle.loads(pkl_contents, fix_imports=True, encoding='utf-8') - fmlogger.info('Successfully loaded pkl in compatibility mode.') + unpkl = pickle.loads(pkl_contents, fix_imports=True, encoding="utf-8") + fmlogger.info("Successfully loaded pkl in compatibility mode.") # Unpickling problems except Exception as e: - if pkl_metadata and 'version' in pkl_metadata: + if pkl_metadata and "version" in pkl_metadata: from nipype import __version__ as version - if pkl_metadata['version'] != version: - fmlogger.error("""\ + + if pkl_metadata["version"] != version: + fmlogger.error( + """\ Attempted to open a results file generated by Nipype version %s, \ -with an incompatible Nipype version (%s)""", pkl_metadata['version'], version) +with an incompatible Nipype version (%s)""", + pkl_metadata["version"], + version, + ) raise e - fmlogger.warning("""\ + fmlogger.warning( + """\ No metadata was found in the pkl file. Make sure you are currently using \ -the same Nipype version from the generated pkl.""") +the same Nipype version from the generated pkl.""" + ) raise e if unpkl is None: - raise ValueError('Loading %s resulted in None.' % infile) + raise ValueError("Loading %s resulted in None." % infile) return unpkl def crash2txt(filename, record): """ Write out plain text crash file """ - with open(filename, 'w') as fp: - if 'node' in record: - node = record['node'] - fp.write('Node: {}\n'.format(node.fullname)) - fp.write('Working directory: {}\n'.format(node.output_dir())) - fp.write('\n') - fp.write('Node inputs:\n{}\n'.format(node.inputs)) - fp.write(''.join(record['traceback'])) + with open(filename, "w") as fp: + if "node" in record: + node = record["node"] + fp.write("Node: {}\n".format(node.fullname)) + fp.write("Working directory: {}\n".format(node.output_dir())) + fp.write("\n") + fp.write("Node inputs:\n{}\n".format(node.inputs)) + fp.write("".join(record["traceback"])) def read_stream(stream, logger=None, encoding=None): @@ -667,50 +683,50 @@ def read_stream(stream, logger=None, encoding=None): """ - default_encoding = encoding or locale.getdefaultlocale()[1] or 'UTF-8' + default_encoding = encoding or locale.getdefaultlocale()[1] or "UTF-8" logger = logger or fmlogger try: out = stream.decode(default_encoding) except UnicodeDecodeError as err: - out = stream.decode(default_encoding, errors='replace') - logger.warning('Error decoding string: %s', err) + out = stream.decode(default_encoding, errors="replace") + logger.warning("Error decoding string: %s", err) return out.splitlines() def savepkl(filename, record, versioning=False): - pklopen = gzip.open if filename.endswith('.pklz') else open - with SoftFileLock('%s.lock' % filename): - with pklopen(filename, 'wb') as pkl_file: + pklopen = gzip.open if filename.endswith(".pklz") else open + with SoftFileLock("%s.lock" % filename): + with pklopen(filename, "wb") as pkl_file: if versioning: from nipype import __version__ as version - metadata = json.dumps({'version': version}) - pkl_file.write(metadata.encode('utf-8')) - pkl_file.write('\n'.encode('utf-8')) + metadata = json.dumps({"version": version}) + + pkl_file.write(metadata.encode("utf-8")) + pkl_file.write("\n".encode("utf-8")) pickle.dump(record, pkl_file) -rst_levels = ['=', '-', '~', '+'] +rst_levels = ["=", "-", "~", "+"] def write_rst_header(header, level=0): - return '\n'.join( - (header, ''.join([rst_levels[level] for _ in header]))) + '\n\n' + return "\n".join((header, "".join([rst_levels[level] for _ in header]))) + "\n\n" -def write_rst_list(items, prefix=''): +def write_rst_list(items, prefix=""): out = [] for item in items: - out.append('{} {}'.format(prefix, str(item))) - return '\n'.join(out) + '\n\n' + out.append("{} {}".format(prefix, str(item))) + return "\n".join(out) + "\n\n" -def write_rst_dict(info, prefix=''): +def write_rst_dict(info, prefix=""): out = [] for key, value in sorted(info.items()): - out.append('{}* {} : {}'.format(prefix, key, str(value))) - return '\n'.join(out) + '\n\n' + out.append("{}* {} : {}".format(prefix, key, str(value))) + return "\n".join(out) + "\n\n" def dist_is_editable(dist): @@ -724,7 +740,7 @@ def dist_is_editable(dist): # Borrowed from `pip`'s' API """ for path_item in sys.path: - egg_link = op.join(path_item, dist + '.egg-link') + egg_link = op.join(path_item, dist + ".egg-link") if op.isfile(egg_link): return True return False @@ -754,11 +770,12 @@ def emptydirs(path, noexist_ok=False): elcont = os.listdir(path) if ex.errno == errno.ENOTEMPTY and not elcont: fmlogger.warning( - 'An exception was raised trying to remove old %s, but the path' - ' seems empty. Is it an NFS mount?. Passing the exception.', - path) + "An exception was raised trying to remove old %s, but the path" + " seems empty. Is it an NFS mount?. Passing the exception.", + path, + ) elif ex.errno == errno.ENOTEMPTY and elcont: - fmlogger.debug('Folder %s contents (%d items).', path, len(elcont)) + fmlogger.debug("Folder %s contents (%d items).", path, len(elcont)) raise ex else: raise ex @@ -798,11 +815,11 @@ def which(cmd, env=None, pathext=None): """ if pathext is None: - pathext = os.getenv('PATHEXT', '').split(os.pathsep) - pathext.insert(0, '') + pathext = os.getenv("PATHEXT", "").split(os.pathsep) + pathext.insert(0, "") path = os.getenv("PATH", os.defpath) - if env and 'PATH' in env: + if env and "PATH" in env: path = env.get("PATH") for ext in pathext: @@ -819,27 +836,25 @@ def get_dependencies(name, environ): """ command = None - if sys.platform == 'darwin': - command = 'otool -L `which %s`' % name - elif 'linux' in sys.platform: - command = 'ldd `which %s`' % name + if sys.platform == "darwin": + command = "otool -L `which %s`" % name + elif "linux" in sys.platform: + command = "ldd `which %s`" % name else: - return 'Platform %s not supported' % sys.platform + return "Platform %s not supported" % sys.platform deps = None try: proc = sp.Popen( - command, - stdout=sp.PIPE, - stderr=sp.PIPE, - shell=True, - env=environ) + command, stdout=sp.PIPE, stderr=sp.PIPE, shell=True, env=environ + ) o, e = proc.communicate() deps = o.rstrip() except Exception as ex: deps = '"%s" failed' % command - fmlogger.warning('Could not get dependencies of %s. Error:\n%s', - name, ex.message) + fmlogger.warning( + "Could not get dependencies of %s. Error:\n%s", name, ex.message + ) return deps @@ -859,15 +874,15 @@ def canonicalize_env(env): Windows: environment dictionary with bytes keys and values Other: untouched input ``env`` """ - if os.name != 'nt': + if os.name != "nt": return env out_env = {} for key, val in env.items(): if not isinstance(key, bytes): - key = key.encode('utf-8') + key = key.encode("utf-8") if not isinstance(val, bytes): - val = val.encode('utf-8') + val = val.encode("utf-8") out_env[key] = val return out_env @@ -890,11 +905,13 @@ def relpath(path, start=None): unc_path, rest = op.splitunc(path) unc_start, rest = op.splitunc(start) if bool(unc_path) ^ bool(unc_start): - raise ValueError(("Cannot mix UNC and non-UNC paths " - "(%s and %s)") % (path, start)) + raise ValueError( + ("Cannot mix UNC and non-UNC paths " "(%s and %s)") % (path, start) + ) else: - raise ValueError("path is on drive %s, start on drive %s" % - (path_list[0], start_list[0])) + raise ValueError( + "path is on drive %s, start on drive %s" % (path_list[0], start_list[0]) + ) # Work out how much of the filepath is shared by start and path. for i in range(min(len(start_list), len(path_list))): if start_list[i].lower() != path_list[i].lower(): From 39fbd5411a844ce7c023964d3295eb7643b95af5 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 12 Nov 2019 10:38:32 -0500 Subject: [PATCH 0527/1665] FIX: Missed merge issue --- nipype/interfaces/tests/test_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index 8bdda68951..1b718f0533 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -15,7 +15,7 @@ import nipype.interfaces.io as nio from nipype.interfaces.base.traits_extension import isdefined from nipype.interfaces.base import Undefined, TraitError -from nipype.utils.filemanip import dist_is_editable, FileExistsError +from nipype.utils.filemanip import dist_is_editable # Check for boto noboto = False From 0557bc9c04fc4cd9d42864c6722e10a06541c744 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Tue, 12 Nov 2019 15:32:26 -0500 Subject: [PATCH 0528/1665] fix: restore checking traits or bunch --- nipype/pipeline/engine/nodes.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index eeb47f6d7a..df133ec8d4 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -551,7 +551,12 @@ def _get_inputs(self): output_value = evaluate_connect_function( conn[1], conn[2], value) else: - output_value = getattr(outputs, conn) + output_name = conn + try: + output_value = outputs.trait_get()[output_name] + except AttributeError: + output_value = outputs.dictcopy()[output_name] + logger.debug("output: %s", output_name) try: self.set_input(key, deepcopy(output_value)) From 76e96fcf5737b59696767a4f5129efe3868cb64a Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Tue, 12 Nov 2019 15:38:21 -0500 Subject: [PATCH 0529/1665] doc: add 1.3.0 to bad version --- .et | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.et b/.et index 96ed9287ea..d271a1d060 100644 --- a/.et +++ b/.et @@ -1,3 +1,4 @@ { "bad_versions" : [ "1.2.1", - "1.2.3"] -} \ No newline at end of file + "1.2.3", + "1.3.0"] +} From 082369fbdfcf8a3353808bdca765a3210367f903 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 12 Nov 2019 16:46:00 -0500 Subject: [PATCH 0530/1665] REL: 1.3.1 --- doc/changelog/1.X.X-changelog | 6 ++++++ nipype/info.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/doc/changelog/1.X.X-changelog b/doc/changelog/1.X.X-changelog index 80f853866b..9282d261a2 100644 --- a/doc/changelog/1.X.X-changelog +++ b/doc/changelog/1.X.X-changelog @@ -1,3 +1,9 @@ +1.3.1 (November 12, 2019) +========================= + + * FIX: Restore checking traits or bunch (https://github.com/nipy/nipype/pull/3094) + + 1.3.0 (November 11, 2019) ========================= diff --git a/nipype/info.py b/nipype/info.py index 6fc6cf0099..4ec20470db 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -9,7 +9,7 @@ # nipype version information # Remove -dev for release -__version__ = '1.3.1-dev' +__version__ = '1.3.1' def get_nipype_gitversion(): From 881636526133d900f525ecaff79790fb7b7bbcf7 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Tue, 12 Nov 2019 17:39:56 -0500 Subject: [PATCH 0531/1665] add skull_file output --- nipype/interfaces/fsl/preprocess.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py index ce96763d43..3d247506f4 100644 --- a/nipype/interfaces/fsl/preprocess.py +++ b/nipype/interfaces/fsl/preprocess.py @@ -111,6 +111,7 @@ class BETOutputSpec(TraitedSpec): outskin_mesh_file = File( desc="path/name of outskin mesh outline (if generated)") skull_mask_file = File(desc="path/name of skull mask (if generated)") + skull_file = File(desc="path/name of skull file (if generated)") class BET(FSLCommand): @@ -181,6 +182,9 @@ def _list_outputs(self): outputs['out_file'], suffix='_outskin_mesh') outputs['skull_mask_file'] = self._gen_fname( outputs['out_file'], suffix='_skull_mask') + if isdefined(self.inputs.skull) and self.inputs.skull: + outputs['skull_file'] = self._gen_fname( + outputs['out_file'], suffix='_skull') if isdefined(self.inputs.no_output) and self.inputs.no_output: outputs['out_file'] = Undefined return outputs From a7d0f63f56124d3c6aaae3b5312798916af7bb74 Mon Sep 17 00:00:00 2001 From: Steven Tilley Date: Wed, 13 Nov 2019 09:56:39 -0500 Subject: [PATCH 0532/1665] run make specs --- nipype/interfaces/fsl/tests/test_auto_BET.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/interfaces/fsl/tests/test_auto_BET.py b/nipype/interfaces/fsl/tests/test_auto_BET.py index ea155c249b..b4f1689b0c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BET.py +++ b/nipype/interfaces/fsl/tests/test_auto_BET.py @@ -94,6 +94,7 @@ def test_BET_outputs(): outskin_mesh_file=dict(extensions=None, ), outskull_mask_file=dict(extensions=None, ), outskull_mesh_file=dict(extensions=None, ), + skull_file=dict(extensions=None, ), skull_mask_file=dict(extensions=None, ), ) outputs = BET.output_spec() From 75653feadc6667d5313d83e9c62a5d5819771a9c Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 13 Nov 2019 23:41:15 -0500 Subject: [PATCH 0533/1665] STY: Black --- nipype/__init__.py | 66 +- nipype/algorithms/__init__.py | 2 +- nipype/algorithms/confounds.py | 945 +++-- nipype/algorithms/icc.py | 72 +- nipype/algorithms/mesh.py | 187 +- nipype/algorithms/metrics.py | 386 +- nipype/algorithms/misc.py | 703 ++-- nipype/algorithms/modelgen.py | 518 +-- nipype/algorithms/rapidart.py | 565 +-- nipype/algorithms/stats.py | 53 +- nipype/algorithms/tests/test_CompCor.py | 234 +- nipype/algorithms/tests/test_ErrorMap.py | 32 +- nipype/algorithms/tests/test_Overlap.py | 12 +- nipype/algorithms/tests/test_TSNR.py | 121 +- nipype/algorithms/tests/test_auto_ACompCor.py | 48 +- .../tests/test_auto_ActivationCount.py | 13 +- .../tests/test_auto_AddCSVColumn.py | 14 +- .../algorithms/tests/test_auto_AddCSVRow.py | 10 +- nipype/algorithms/tests/test_auto_AddNoise.py | 25 +- .../tests/test_auto_ArtifactDetect.py | 48 +- .../tests/test_auto_CalculateMedian.py | 8 +- .../test_auto_CalculateNormalizedMoments.py | 11 +- .../tests/test_auto_ComputeDVARS.py | 46 +- .../tests/test_auto_ComputeMeshWarp.py | 30 +- .../algorithms/tests/test_auto_CreateNifti.py | 14 +- nipype/algorithms/tests/test_auto_Distance.py | 21 +- .../tests/test_auto_FramewiseDisplacement.py | 33 +- .../tests/test_auto_FuzzyOverlap.py | 22 +- nipype/algorithms/tests/test_auto_Gunzip.py | 9 +- nipype/algorithms/tests/test_auto_ICC.py | 15 +- .../algorithms/tests/test_auto_Matlab2CSV.py | 11 +- .../tests/test_auto_MergeCSVFiles.py | 13 +- .../algorithms/tests/test_auto_MergeROIs.py | 8 +- .../tests/test_auto_MeshWarpMaths.py | 29 +- .../tests/test_auto_ModifyAffine.py | 7 +- .../tests/test_auto_NonSteadyStateDetector.py | 9 +- .../test_auto_NormalizeProbabilityMapSet.py | 9 +- .../algorithms/tests/test_auto_P2PDistance.py | 30 +- .../algorithms/tests/test_auto_PickAtlas.py | 17 +- .../algorithms/tests/test_auto_Similarity.py | 20 +- .../tests/test_auto_SimpleThreshold.py | 9 +- .../tests/test_auto_SpecifyModel.py | 32 +- .../tests/test_auto_SpecifySPMModel.py | 36 +- .../tests/test_auto_SpecifySparseModel.py | 46 +- .../algorithms/tests/test_auto_SplitROIs.py | 15 +- .../tests/test_auto_StimulusCorrelation.py | 15 +- nipype/algorithms/tests/test_auto_TCompCor.py | 50 +- .../algorithms/tests/test_auto_WarpPoints.py | 25 +- nipype/algorithms/tests/test_confounds.py | 34 +- nipype/algorithms/tests/test_icc_anova.py | 12 +- nipype/algorithms/tests/test_mesh_ops.py | 14 +- nipype/algorithms/tests/test_metrics.py | 32 +- nipype/algorithms/tests/test_misc.py | 5 +- nipype/algorithms/tests/test_modelgen.py | 256 +- nipype/algorithms/tests/test_moments.py | 18 +- .../algorithms/tests/test_normalize_tpms.py | 13 +- nipype/algorithms/tests/test_rapidart.py | 92 +- nipype/algorithms/tests/test_splitmerge.py | 4 +- nipype/algorithms/tests/test_stats.py | 35 +- nipype/caching/memory.py | 71 +- nipype/caching/tests/test_memory.py | 7 +- nipype/conftest.py | 9 +- nipype/external/cloghandler.py | 54 +- nipype/external/due.py | 10 +- nipype/external/fsl_imglob.py | 24 +- nipype/info.py | 175 +- nipype/interfaces/__init__.py | 2 +- nipype/interfaces/afni/__init__.py | 90 +- nipype/interfaces/afni/base.py | 157 +- nipype/interfaces/afni/model.py | 859 ++-- nipype/interfaces/afni/preprocess.py | 3670 +++++++++-------- nipype/interfaces/afni/svm.py | 145 +- .../afni/tests/test_auto_ABoverlap.py | 40 +- .../afni/tests/test_auto_AFNICommand.py | 18 +- .../afni/tests/test_auto_AFNICommandBase.py | 6 +- .../afni/tests/test_auto_AFNIPythonCommand.py | 18 +- .../afni/tests/test_auto_AFNItoNIFTI.py | 42 +- .../afni/tests/test_auto_AlignEpiAnatPy.py | 71 +- .../afni/tests/test_auto_Allineate.py | 161 +- .../afni/tests/test_auto_AutoTLRC.py | 23 +- .../afni/tests/test_auto_AutoTcorrelate.py | 46 +- .../afni/tests/test_auto_Autobox.py | 31 +- .../afni/tests/test_auto_Automask.py | 41 +- .../afni/tests/test_auto_Axialize.py | 47 +- .../afni/tests/test_auto_Bandpass.py | 73 +- .../afni/tests/test_auto_BlurInMask.py | 50 +- .../afni/tests/test_auto_BlurToFWHM.py | 44 +- .../afni/tests/test_auto_BrickStat.py | 41 +- .../interfaces/afni/tests/test_auto_Bucket.py | 28 +- .../interfaces/afni/tests/test_auto_Calc.py | 58 +- nipype/interfaces/afni/tests/test_auto_Cat.py | 62 +- .../afni/tests/test_auto_CatMatvec.py | 43 +- .../afni/tests/test_auto_CenterMass.py | 40 +- .../afni/tests/test_auto_ClipLevel.py | 36 +- .../afni/tests/test_auto_ConvertDset.py | 34 +- .../interfaces/afni/tests/test_auto_Copy.py | 30 +- .../afni/tests/test_auto_Deconvolve.py | 166 +- .../afni/tests/test_auto_DegreeCentrality.py | 46 +- .../afni/tests/test_auto_Despike.py | 28 +- .../afni/tests/test_auto_Detrend.py | 28 +- nipype/interfaces/afni/tests/test_auto_Dot.py | 54 +- nipype/interfaces/afni/tests/test_auto_ECM.py | 57 +- .../interfaces/afni/tests/test_auto_Edge3.py | 46 +- .../interfaces/afni/tests/test_auto_Eval.py | 58 +- .../interfaces/afni/tests/test_auto_FWHMx.py | 83 +- nipype/interfaces/afni/tests/test_auto_Fim.py | 39 +- .../afni/tests/test_auto_Fourier.py | 40 +- .../interfaces/afni/tests/test_auto_GCOR.py | 23 +- .../interfaces/afni/tests/test_auto_Hist.py | 46 +- .../interfaces/afni/tests/test_auto_LFCD.py | 41 +- .../afni/tests/test_auto_LocalBistat.py | 62 +- .../afni/tests/test_auto_Localstat.py | 66 +- .../afni/tests/test_auto_MaskTool.py | 55 +- .../afni/tests/test_auto_Maskave.py | 39 +- .../interfaces/afni/tests/test_auto_Means.py | 53 +- .../interfaces/afni/tests/test_auto_Merge.py | 36 +- .../interfaces/afni/tests/test_auto_Notes.py | 43 +- .../afni/tests/test_auto_NwarpAdjust.py | 31 +- .../afni/tests/test_auto_NwarpApply.py | 53 +- .../afni/tests/test_auto_NwarpCat.py | 41 +- .../afni/tests/test_auto_OneDToolPy.py | 45 +- .../afni/tests/test_auto_OutlierCount.py | 73 +- .../afni/tests/test_auto_QualityIndex.py | 59 +- .../interfaces/afni/tests/test_auto_Qwarp.py | 206 +- .../afni/tests/test_auto_QwarpPlusMinus.py | 210 +- .../afni/tests/test_auto_ROIStats.py | 69 +- .../interfaces/afni/tests/test_auto_ReHo.py | 57 +- .../interfaces/afni/tests/test_auto_Refit.py | 52 +- .../afni/tests/test_auto_Remlfit.py | 190 +- .../afni/tests/test_auto_Resample.py | 35 +- .../afni/tests/test_auto_Retroicor.py | 60 +- .../afni/tests/test_auto_SVMTest.py | 46 +- .../afni/tests/test_auto_SVMTrain.py | 81 +- nipype/interfaces/afni/tests/test_auto_Seg.py | 37 +- .../afni/tests/test_auto_SkullStrip.py | 24 +- .../afni/tests/test_auto_Synthesize.py | 43 +- .../interfaces/afni/tests/test_auto_TCat.py | 36 +- .../afni/tests/test_auto_TCatSubBrick.py | 34 +- .../afni/tests/test_auto_TCorr1D.py | 51 +- .../afni/tests/test_auto_TCorrMap.py | 181 +- .../afni/tests/test_auto_TCorrelate.py | 38 +- .../interfaces/afni/tests/test_auto_TNorm.py | 40 +- .../afni/tests/test_auto_TProject.py | 69 +- .../interfaces/afni/tests/test_auto_TShift.py | 61 +- .../afni/tests/test_auto_TSmooth.py | 49 +- .../interfaces/afni/tests/test_auto_TStat.py | 35 +- .../interfaces/afni/tests/test_auto_To3D.py | 38 +- .../interfaces/afni/tests/test_auto_Undump.py | 43 +- .../afni/tests/test_auto_Unifize.py | 52 +- .../interfaces/afni/tests/test_auto_Volreg.py | 73 +- .../interfaces/afni/tests/test_auto_Warp.py | 60 +- .../interfaces/afni/tests/test_auto_ZCutUp.py | 30 +- .../interfaces/afni/tests/test_auto_Zcat.py | 43 +- .../afni/tests/test_auto_Zeropad.py | 87 +- .../afni/tests/test_extra_Deconvolve.py | 9 +- nipype/interfaces/afni/utils.py | 2881 +++++++------ nipype/interfaces/ants/__init__.py | 43 +- nipype/interfaces/ants/base.py | 48 +- nipype/interfaces/ants/registration.py | 1237 +++--- nipype/interfaces/ants/resampling.py | 473 ++- nipype/interfaces/ants/segmentation.py | 1610 ++++---- .../interfaces/ants/tests/test_auto_ANTS.py | 106 +- .../ants/tests/test_auto_ANTSCommand.py | 12 +- .../ants/tests/test_auto_AffineInitializer.py | 67 +- .../ants/tests/test_auto_AntsJointFusion.py | 103 +- .../ants/tests/test_auto_ApplyTransforms.py | 60 +- .../test_auto_ApplyTransformsToPoints.py | 35 +- .../ants/tests/test_auto_Atropos.py | 77 +- .../tests/test_auto_AverageAffineTransform.py | 33 +- .../ants/tests/test_auto_AverageImages.py | 40 +- .../ants/tests/test_auto_BrainExtraction.py | 97 +- .../tests/test_auto_ComposeMultiTransform.py | 40 +- .../tests/test_auto_CompositeTransformUtil.py | 44 +- .../test_auto_ConvertScalarImageToRGB.py | 80 +- .../ants/tests/test_auto_CorticalThickness.py | 112 +- ...est_auto_CreateJacobianDeterminantImage.py | 44 +- .../ants/tests/test_auto_CreateTiledMosaic.py | 52 +- .../ants/tests/test_auto_DenoiseImage.py | 53 +- .../ants/tests/test_auto_JointFusion.py | 86 +- .../ants/tests/test_auto_KellyKapowski.py | 77 +- .../ants/tests/test_auto_LabelGeometry.py | 41 +- .../tests/test_auto_LaplacianThickness.py | 63 +- .../tests/test_auto_MeasureImageSimilarity.py | 66 +- .../ants/tests/test_auto_MultiplyImages.py | 40 +- .../tests/test_auto_N4BiasFieldCorrection.py | 79 +- .../ants/tests/test_auto_Registration.py | 156 +- .../tests/test_auto_RegistrationSynQuick.py | 66 +- .../test_auto_WarpImageMultiTransform.py | 63 +- ..._auto_WarpTimeSeriesImageMultiTransform.py | 57 +- .../ants/tests/test_extra_Registration.py | 8 +- .../interfaces/ants/tests/test_resampling.py | 64 +- .../ants/tests/test_segmentation.py | 26 +- .../ants/tests/test_spec_JointFusion.py | 69 +- nipype/interfaces/ants/utils.py | 244 +- nipype/interfaces/ants/visualization.py | 178 +- nipype/interfaces/base/__init__.py | 46 +- nipype/interfaces/base/core.py | 338 +- nipype/interfaces/base/specs.py | 162 +- nipype/interfaces/base/support.py | 137 +- .../base/tests/test_auto_CommandLine.py | 6 +- .../base/tests/test_auto_MpiCommandLine.py | 9 +- .../tests/test_auto_SEMLikeCommandLine.py | 6 +- .../base/tests/test_auto_StdOutCommandLine.py | 14 +- nipype/interfaces/base/tests/test_core.py | 261 +- .../base/tests/test_resource_monitor.py | 43 +- nipype/interfaces/base/tests/test_specs.py | 252 +- nipype/interfaces/base/tests/test_support.py | 23 +- .../base/tests/test_traits_extension.py | 254 +- nipype/interfaces/base/traits_extension.py | 158 +- nipype/interfaces/brainsuite/__init__.py | 20 +- nipype/interfaces/brainsuite/brainsuite.py | 1830 ++++---- .../brainsuite/tests/test_auto_BDP.py | 143 +- .../brainsuite/tests/test_auto_Bfc.py | 89 +- .../brainsuite/tests/test_auto_Bse.py | 96 +- .../brainsuite/tests/test_auto_Cerebro.py | 81 +- .../brainsuite/tests/test_auto_Cortex.py | 50 +- .../brainsuite/tests/test_auto_Dewisp.py | 31 +- .../brainsuite/tests/test_auto_Dfs.py | 66 +- .../brainsuite/tests/test_auto_Hemisplit.py | 62 +- .../brainsuite/tests/test_auto_Pialmesh.py | 85 +- .../brainsuite/tests/test_auto_Pvc.py | 44 +- .../brainsuite/tests/test_auto_SVReg.py | 61 +- .../brainsuite/tests/test_auto_Scrubmask.py | 39 +- .../brainsuite/tests/test_auto_Skullfinder.py | 49 +- .../brainsuite/tests/test_auto_Tca.py | 39 +- .../tests/test_auto_ThicknessPVC.py | 12 +- nipype/interfaces/bru2nii.py | 47 +- nipype/interfaces/c3.py | 132 +- nipype/interfaces/camino/__init__.py | 42 +- nipype/interfaces/camino/calib.py | 281 +- nipype/interfaces/camino/connectivity.py | 100 +- nipype/interfaces/camino/convert.py | 734 ++-- nipype/interfaces/camino/dti.py | 1236 +++--- nipype/interfaces/camino/odf.py | 395 +- .../camino/tests/test_auto_AnalyzeHeader.py | 123 +- .../tests/test_auto_ComputeEigensystem.py | 39 +- .../test_auto_ComputeFractionalAnisotropy.py | 37 +- .../tests/test_auto_ComputeMeanDiffusivity.py | 37 +- .../tests/test_auto_ComputeTensorTrace.py | 37 +- .../camino/tests/test_auto_Conmat.py | 53 +- .../camino/tests/test_auto_DT2NIfTI.py | 30 +- .../camino/tests/test_auto_DTIFit.py | 42 +- .../camino/tests/test_auto_DTLUTGen.py | 66 +- .../camino/tests/test_auto_DTMetric.py | 43 +- .../camino/tests/test_auto_FSL2Scheme.py | 53 +- .../camino/tests/test_auto_Image2Voxel.py | 29 +- .../camino/tests/test_auto_ImageStats.py | 34 +- .../camino/tests/test_auto_LinRecon.py | 48 +- .../interfaces/camino/tests/test_auto_MESD.py | 61 +- .../camino/tests/test_auto_ModelFit.py | 74 +- .../camino/tests/test_auto_NIfTIDT2Camino.py | 44 +- .../camino/tests/test_auto_PicoPDFs.py | 54 +- .../camino/tests/test_auto_ProcStreamlines.py | 157 +- .../camino/tests/test_auto_QBallMX.py | 49 +- .../camino/tests/test_auto_SFLUTGen.py | 58 +- .../camino/tests/test_auto_SFPICOCalibData.py | 82 +- .../camino/tests/test_auto_SFPeaks.py | 81 +- .../camino/tests/test_auto_Shredder.py | 43 +- .../camino/tests/test_auto_Track.py | 95 +- .../camino/tests/test_auto_TrackBallStick.py | 95 +- .../camino/tests/test_auto_TrackBayesDirac.py | 123 +- .../tests/test_auto_TrackBedpostxDeter.py | 105 +- .../tests/test_auto_TrackBedpostxProba.py | 110 +- .../camino/tests/test_auto_TrackBootstrap.py | 118 +- .../camino/tests/test_auto_TrackDT.py | 95 +- .../camino/tests/test_auto_TrackPICo.py | 102 +- .../camino/tests/test_auto_TractShredder.py | 43 +- .../camino/tests/test_auto_VtkStreamlines.py | 60 +- nipype/interfaces/camino/utils.py | 52 +- nipype/interfaces/camino2trackvis/convert.py | 83 +- .../tests/test_auto_Camino2Trackvis.py | 56 +- .../tests/test_auto_Trackvis2Camino.py | 31 +- nipype/interfaces/cmtk/base.py | 8 +- nipype/interfaces/cmtk/cmtk.py | 855 ++-- nipype/interfaces/cmtk/convert.py | 145 +- nipype/interfaces/cmtk/nbs.py | 127 +- nipype/interfaces/cmtk/nx.py | 485 ++- nipype/interfaces/cmtk/parcellation.py | 707 ++-- .../cmtk/tests/test_auto_AverageNetworks.py | 16 +- .../cmtk/tests/test_auto_CFFConverter.py | 13 +- .../cmtk/tests/test_auto_CreateMatrix.py | 84 +- .../cmtk/tests/test_auto_CreateNodes.py | 19 +- .../cmtk/tests/test_auto_MergeCNetworks.py | 11 +- .../tests/test_auto_NetworkBasedStatistic.py | 24 +- .../cmtk/tests/test_auto_NetworkXMetrics.py | 60 +- .../cmtk/tests/test_auto_Parcellate.py | 29 +- .../interfaces/cmtk/tests/test_auto_ROIGen.py | 29 +- nipype/interfaces/cmtk/tests/test_nbs.py | 7 +- nipype/interfaces/dcm2nii.py | 367 +- nipype/interfaces/dcmstack.py | 184 +- nipype/interfaces/diffusion_toolkit/base.py | 7 +- nipype/interfaces/diffusion_toolkit/dti.py | 258 +- nipype/interfaces/diffusion_toolkit/odf.py | 330 +- .../interfaces/diffusion_toolkit/postproc.py | 34 +- .../tests/test_auto_DTIRecon.py | 70 +- .../tests/test_auto_DTITracker.py | 75 +- .../tests/test_auto_HARDIMat.py | 51 +- .../tests/test_auto_ODFRecon.py | 74 +- .../tests/test_auto_ODFTracker.py | 88 +- .../tests/test_auto_SplineFilter.py | 31 +- .../tests/test_auto_TrackMerge.py | 24 +- nipype/interfaces/dipy/anisotropic_power.py | 18 +- nipype/interfaces/dipy/base.py | 105 +- nipype/interfaces/dipy/preprocess.py | 152 +- nipype/interfaces/dipy/reconstruction.py | 178 +- nipype/interfaces/dipy/registration.py | 16 +- nipype/interfaces/dipy/setup.py | 9 +- nipype/interfaces/dipy/simulate.py | 190 +- nipype/interfaces/dipy/stats.py | 16 +- nipype/interfaces/dipy/tensors.py | 23 +- .../dipy/tests/test_auto_APMQball.py | 23 +- nipype/interfaces/dipy/tests/test_auto_CSD.py | 34 +- nipype/interfaces/dipy/tests/test_auto_DTI.py | 33 +- .../dipy/tests/test_auto_Denoise.py | 24 +- .../tests/test_auto_DipyDiffusionInterface.py | 17 +- .../tests/test_auto_EstimateResponseSH.py | 49 +- .../dipy/tests/test_auto_RESTORE.py | 37 +- .../dipy/tests/test_auto_Resample.py | 14 +- .../tests/test_auto_SimulateMultiTensor.py | 61 +- .../tests/test_auto_StreamlineTractography.py | 55 +- .../dipy/tests/test_auto_TensorMode.py | 23 +- .../dipy/tests/test_auto_TrackDensityMap.py | 18 +- nipype/interfaces/dipy/tests/test_base.py | 145 +- nipype/interfaces/dipy/tracks.py | 219 +- nipype/interfaces/dtitk/__init__.py | 23 +- nipype/interfaces/dtitk/base.py | 35 +- nipype/interfaces/dtitk/registration.py | 499 ++- .../dtitk/tests/test_auto_AffScalarVol.py | 53 +- .../tests/test_auto_AffSymTensor3DVol.py | 58 +- .../dtitk/tests/test_auto_Affine.py | 48 +- .../dtitk/tests/test_auto_AffineTask.py | 48 +- .../dtitk/tests/test_auto_BinThresh.py | 52 +- .../dtitk/tests/test_auto_BinThreshTask.py | 52 +- .../dtitk/tests/test_auto_CommandLineDtitk.py | 6 +- .../dtitk/tests/test_auto_ComposeXfm.py | 29 +- .../dtitk/tests/test_auto_ComposeXfmTask.py | 29 +- .../dtitk/tests/test_auto_Diffeo.py | 52 +- .../dtitk/tests/test_auto_DiffeoScalarVol.py | 49 +- .../tests/test_auto_DiffeoSymTensor3DVol.py | 61 +- .../dtitk/tests/test_auto_DiffeoTask.py | 52 +- .../interfaces/dtitk/tests/test_auto_Rigid.py | 48 +- .../dtitk/tests/test_auto_RigidTask.py | 48 +- .../dtitk/tests/test_auto_SVAdjustVoxSp.py | 37 +- .../tests/test_auto_SVAdjustVoxSpTask.py | 37 +- .../dtitk/tests/test_auto_SVResample.py | 44 +- .../dtitk/tests/test_auto_SVResampleTask.py | 44 +- .../tests/test_auto_TVAdjustOriginTask.py | 37 +- .../dtitk/tests/test_auto_TVAdjustVoxSp.py | 37 +- .../tests/test_auto_TVAdjustVoxSpTask.py | 37 +- .../dtitk/tests/test_auto_TVResample.py | 46 +- .../dtitk/tests/test_auto_TVResampleTask.py | 46 +- .../dtitk/tests/test_auto_TVtool.py | 25 +- .../dtitk/tests/test_auto_TVtoolTask.py | 25 +- .../dtitk/tests/test_auto_affScalarVolTask.py | 53 +- .../tests/test_auto_affSymTensor3DVolTask.py | 58 +- .../tests/test_auto_diffeoScalarVolTask.py | 49 +- .../test_auto_diffeoSymTensor3DVolTask.py | 61 +- nipype/interfaces/dtitk/utils.py | 306 +- nipype/interfaces/dynamic_slicer.py | 132 +- nipype/interfaces/elastix/base.py | 18 +- nipype/interfaces/elastix/registration.py | 143 +- .../elastix/tests/test_auto_AnalyzeWarp.py | 54 +- .../elastix/tests/test_auto_ApplyWarp.py | 35 +- .../elastix/tests/test_auto_EditTransform.py | 22 +- .../elastix/tests/test_auto_PointsWarp.py | 35 +- .../elastix/tests/test_auto_Registration.py | 55 +- nipype/interfaces/elastix/utils.py | 138 +- nipype/interfaces/freesurfer/__init__.py | 108 +- nipype/interfaces/freesurfer/base.py | 107 +- nipype/interfaces/freesurfer/longitudinal.py | 155 +- nipype/interfaces/freesurfer/model.py | 1406 ++++--- nipype/interfaces/freesurfer/preprocess.py | 2510 ++++++----- nipype/interfaces/freesurfer/registration.py | 367 +- .../freesurfer/tests/test_BBRegister.py | 140 +- .../freesurfer/tests/test_FSSurfaceCommand.py | 16 +- .../tests/test_auto_AddXFormToHeader.py | 36 +- .../freesurfer/tests/test_auto_Aparc2Aseg.py | 93 +- .../freesurfer/tests/test_auto_Apas2Aseg.py | 26 +- .../freesurfer/tests/test_auto_ApplyMask.py | 56 +- .../tests/test_auto_ApplyVolTransform.py | 177 +- .../freesurfer/tests/test_auto_Binarize.py | 88 +- .../freesurfer/tests/test_auto_CALabel.py | 67 +- .../freesurfer/tests/test_auto_CANormalize.py | 54 +- .../freesurfer/tests/test_auto_CARegister.py | 56 +- .../test_auto_CheckTalairachAlignment.py | 27 +- .../freesurfer/tests/test_auto_Concatenate.py | 58 +- .../tests/test_auto_ConcatenateLTA.py | 49 +- .../freesurfer/tests/test_auto_Contrast.py | 56 +- .../freesurfer/tests/test_auto_Curvature.py | 28 +- .../tests/test_auto_CurvatureStats.py | 55 +- .../tests/test_auto_DICOMConvert.py | 21 +- .../freesurfer/tests/test_auto_EMRegister.py | 45 +- .../tests/test_auto_EditWMwithAseg.py | 41 +- .../freesurfer/tests/test_auto_EulerNumber.py | 18 +- .../tests/test_auto_ExtractMainComponent.py | 24 +- .../freesurfer/tests/test_auto_FSCommand.py | 7 +- .../tests/test_auto_FSCommandOpenMP.py | 7 +- .../tests/test_auto_FSScriptCommand.py | 7 +- .../freesurfer/tests/test_auto_FitMSParams.py | 27 +- .../freesurfer/tests/test_auto_FixTopology.py | 57 +- .../tests/test_auto_FuseSegmentations.py | 43 +- .../freesurfer/tests/test_auto_GLMFit.py | 194 +- .../freesurfer/tests/test_auto_ImageInfo.py | 17 +- .../freesurfer/tests/test_auto_Jacobian.py | 31 +- .../freesurfer/tests/test_auto_LTAConvert.py | 78 +- .../freesurfer/tests/test_auto_Label2Annot.py | 46 +- .../freesurfer/tests/test_auto_Label2Label.py | 66 +- .../freesurfer/tests/test_auto_Label2Vol.py | 83 +- .../tests/test_auto_MNIBiasCorrection.py | 48 +- .../freesurfer/tests/test_auto_MPRtoMNI305.py | 32 +- .../freesurfer/tests/test_auto_MRIConvert.py | 205 +- .../freesurfer/tests/test_auto_MRICoreg.py | 96 +- .../freesurfer/tests/test_auto_MRIFill.py | 43 +- .../tests/test_auto_MRIMarchingCubes.py | 37 +- .../freesurfer/tests/test_auto_MRIPretess.py | 44 +- .../freesurfer/tests/test_auto_MRISPreproc.py | 79 +- .../tests/test_auto_MRISPreprocReconAll.py | 97 +- .../tests/test_auto_MRITessellate.py | 35 +- .../freesurfer/tests/test_auto_MRIsCALabel.py | 71 +- .../freesurfer/tests/test_auto_MRIsCalc.py | 47 +- .../freesurfer/tests/test_auto_MRIsCombine.py | 23 +- .../freesurfer/tests/test_auto_MRIsConvert.py | 79 +- .../freesurfer/tests/test_auto_MRIsExpand.py | 56 +- .../freesurfer/tests/test_auto_MRIsInflate.py | 36 +- .../freesurfer/tests/test_auto_MS_LDA.py | 53 +- .../tests/test_auto_MakeAverageSubject.py | 23 +- .../tests/test_auto_MakeSurfaces.py | 97 +- .../freesurfer/tests/test_auto_Normalize.py | 38 +- .../tests/test_auto_OneSampleTTest.py | 194 +- .../freesurfer/tests/test_auto_Paint.py | 33 +- .../tests/test_auto_ParcellationStats.py | 116 +- .../tests/test_auto_ParseDICOMDir.py | 26 +- .../freesurfer/tests/test_auto_ReconAll.py | 290 +- .../freesurfer/tests/test_auto_Register.py | 47 +- .../tests/test_auto_RegisterAVItoTalairach.py | 44 +- .../tests/test_auto_RelabelHypointensities.py | 45 +- .../tests/test_auto_RemoveIntersection.py | 23 +- .../freesurfer/tests/test_auto_RemoveNeck.py | 40 +- .../freesurfer/tests/test_auto_Resample.py | 28 +- .../tests/test_auto_RobustRegister.py | 111 +- .../tests/test_auto_RobustTemplate.py | 49 +- .../tests/test_auto_SampleToSurface.py | 132 +- .../freesurfer/tests/test_auto_SegStats.py | 124 +- .../tests/test_auto_SegStatsReconAll.py | 178 +- .../freesurfer/tests/test_auto_SegmentCC.py | 42 +- .../freesurfer/tests/test_auto_SegmentWM.py | 25 +- .../freesurfer/tests/test_auto_Smooth.py | 53 +- .../tests/test_auto_SmoothTessellation.py | 54 +- .../freesurfer/tests/test_auto_Sphere.py | 32 +- .../tests/test_auto_SphericalAverage.py | 63 +- .../tests/test_auto_Surface2VolTransform.py | 59 +- .../tests/test_auto_SurfaceSmooth.py | 50 +- .../tests/test_auto_SurfaceSnapshots.py | 120 +- .../tests/test_auto_SurfaceTransform.py | 53 +- .../tests/test_auto_SynthesizeFLASH.py | 54 +- .../tests/test_auto_TalairachAVI.py | 29 +- .../freesurfer/tests/test_auto_TalairachQC.py | 21 +- .../freesurfer/tests/test_auto_Tkregister2.py | 79 +- .../tests/test_auto_UnpackSDICOMDir.py | 44 +- .../freesurfer/tests/test_auto_VolumeMask.py | 75 +- .../tests/test_auto_WatershedSkullStrip.py | 38 +- .../interfaces/freesurfer/tests/test_model.py | 25 +- .../freesurfer/tests/test_preprocess.py | 112 +- .../interfaces/freesurfer/tests/test_utils.py | 76 +- nipype/interfaces/freesurfer/utils.py | 2282 +++++----- nipype/interfaces/fsl/__init__.py | 137 +- nipype/interfaces/fsl/aroma.py | 146 +- nipype/interfaces/fsl/base.py | 108 +- nipype/interfaces/fsl/dti.py | 1265 +++--- nipype/interfaces/fsl/epi.py | 1251 +++--- nipype/interfaces/fsl/fix.py | 258 +- nipype/interfaces/fsl/maths.py | 159 +- nipype/interfaces/fsl/model.py | 2167 +++++----- nipype/interfaces/fsl/possum.py | 116 +- nipype/interfaces/fsl/preprocess.py | 2066 +++++----- nipype/interfaces/fsl/tests/test_FILMGLS.py | 163 +- .../fsl/tests/test_Level1Design_functions.py | 36 +- .../fsl/tests/test_auto_AR1Image.py | 45 +- .../fsl/tests/test_auto_AccuracyTester.py | 32 +- .../fsl/tests/test_auto_ApplyMask.py | 46 +- .../fsl/tests/test_auto_ApplyTOPUP.py | 48 +- .../fsl/tests/test_auto_ApplyWarp.py | 69 +- .../fsl/tests/test_auto_ApplyXFM.py | 203 +- .../interfaces/fsl/tests/test_auto_AvScale.py | 23 +- .../interfaces/fsl/tests/test_auto_B0Calc.py | 87 +- .../fsl/tests/test_auto_BEDPOSTX5.py | 123 +- nipype/interfaces/fsl/tests/test_auto_BET.py | 161 +- .../fsl/tests/test_auto_BinaryMaths.py | 54 +- .../fsl/tests/test_auto_ChangeDataType.py | 40 +- .../fsl/tests/test_auto_Classifier.py | 31 +- .../interfaces/fsl/tests/test_auto_Cleaner.py | 52 +- .../interfaces/fsl/tests/test_auto_Cluster.py | 136 +- .../interfaces/fsl/tests/test_auto_Complex.py | 192 +- .../fsl/tests/test_auto_ContrastMgr.py | 50 +- .../fsl/tests/test_auto_ConvertWarp.py | 87 +- .../fsl/tests/test_auto_ConvertXFM.py | 42 +- .../fsl/tests/test_auto_CopyGeom.py | 31 +- .../interfaces/fsl/tests/test_auto_DTIFit.py | 95 +- .../fsl/tests/test_auto_DilateImage.py | 61 +- .../fsl/tests/test_auto_DistanceMap.py | 35 +- .../fsl/tests/test_auto_DualRegression.py | 59 +- .../fsl/tests/test_auto_EPIDeWarp.py | 82 +- nipype/interfaces/fsl/tests/test_auto_Eddy.py | 141 +- .../fsl/tests/test_auto_EddyCorrect.py | 33 +- .../fsl/tests/test_auto_EddyQuad.py | 70 +- .../interfaces/fsl/tests/test_auto_EpiReg.py | 98 +- .../fsl/tests/test_auto_ErodeImage.py | 61 +- .../fsl/tests/test_auto_ExtractROI.py | 78 +- nipype/interfaces/fsl/tests/test_auto_FAST.py | 76 +- nipype/interfaces/fsl/tests/test_auto_FEAT.py | 18 +- .../fsl/tests/test_auto_FEATModel.py | 32 +- .../fsl/tests/test_auto_FEATRegister.py | 13 +- .../interfaces/fsl/tests/test_auto_FIRST.py | 53 +- .../interfaces/fsl/tests/test_auto_FLAMEO.py | 80 +- .../interfaces/fsl/tests/test_auto_FLIRT.py | 200 +- .../interfaces/fsl/tests/test_auto_FNIRT.py | 178 +- .../fsl/tests/test_auto_FSLCommand.py | 7 +- .../fsl/tests/test_auto_FSLXCommand.py | 121 +- .../interfaces/fsl/tests/test_auto_FUGUE.py | 122 +- .../fsl/tests/test_auto_FeatureExtractor.py | 21 +- .../fsl/tests/test_auto_FilterRegressor.py | 50 +- .../fsl/tests/test_auto_FindTheBiggest.py | 26 +- nipype/interfaces/fsl/tests/test_auto_GLM.py | 106 +- .../fsl/tests/test_auto_ICA_AROMA.py | 65 +- .../fsl/tests/test_auto_ImageMaths.py | 45 +- .../fsl/tests/test_auto_ImageMeants.py | 47 +- .../fsl/tests/test_auto_ImageStats.py | 40 +- .../interfaces/fsl/tests/test_auto_InvWarp.py | 49 +- .../fsl/tests/test_auto_IsotropicSmooth.py | 53 +- .../interfaces/fsl/tests/test_auto_L2Model.py | 10 +- .../fsl/tests/test_auto_Level1Design.py | 17 +- .../interfaces/fsl/tests/test_auto_MCFLIRT.py | 73 +- .../interfaces/fsl/tests/test_auto_MELODIC.py | 145 +- .../fsl/tests/test_auto_MakeDyadicVectors.py | 45 +- .../fsl/tests/test_auto_MathsCommand.py | 39 +- .../fsl/tests/test_auto_MaxImage.py | 45 +- .../fsl/tests/test_auto_MaxnImage.py | 45 +- .../fsl/tests/test_auto_MeanImage.py | 45 +- .../fsl/tests/test_auto_MedianImage.py | 45 +- .../interfaces/fsl/tests/test_auto_Merge.py | 34 +- .../fsl/tests/test_auto_MinImage.py | 45 +- .../fsl/tests/test_auto_MotionOutliers.py | 52 +- .../fsl/tests/test_auto_MultiImageMaths.py | 47 +- .../tests/test_auto_MultipleRegressDesign.py | 14 +- .../interfaces/fsl/tests/test_auto_Overlay.py | 80 +- .../interfaces/fsl/tests/test_auto_PRELUDE.py | 75 +- .../fsl/tests/test_auto_PercentileImage.py | 50 +- .../fsl/tests/test_auto_PlotMotionParams.py | 33 +- .../fsl/tests/test_auto_PlotTimeSeries.py | 74 +- .../fsl/tests/test_auto_PowerSpectrum.py | 24 +- .../fsl/tests/test_auto_PrepareFieldmap.py | 50 +- .../fsl/tests/test_auto_ProbTrackX.py | 134 +- .../fsl/tests/test_auto_ProbTrackX2.py | 187 +- .../fsl/tests/test_auto_ProjThresh.py | 23 +- .../fsl/tests/test_auto_Randomise.py | 89 +- .../fsl/tests/test_auto_Reorient2Std.py | 24 +- .../fsl/tests/test_auto_RobustFOV.py | 33 +- nipype/interfaces/fsl/tests/test_auto_SMM.py | 20 +- .../interfaces/fsl/tests/test_auto_SUSAN.py | 54 +- .../interfaces/fsl/tests/test_auto_SigLoss.py | 32 +- .../interfaces/fsl/tests/test_auto_Slice.py | 22 +- .../fsl/tests/test_auto_SliceTimer.py | 43 +- .../interfaces/fsl/tests/test_auto_Slicer.py | 107 +- .../interfaces/fsl/tests/test_auto_Smooth.py | 32 +- .../fsl/tests/test_auto_SmoothEstimate.py | 39 +- .../fsl/tests/test_auto_SpatialFilter.py | 61 +- .../interfaces/fsl/tests/test_auto_Split.py | 29 +- .../fsl/tests/test_auto_StdImage.py | 45 +- .../fsl/tests/test_auto_SwapDimensions.py | 30 +- .../interfaces/fsl/tests/test_auto_TOPUP.py | 116 +- .../fsl/tests/test_auto_TemporalFilter.py | 51 +- .../fsl/tests/test_auto_Threshold.py | 49 +- .../fsl/tests/test_auto_TractSkeleton.py | 50 +- .../fsl/tests/test_auto_Training.py | 27 +- .../fsl/tests/test_auto_TrainingSetCreator.py | 14 +- .../fsl/tests/test_auto_UnaryMaths.py | 45 +- .../interfaces/fsl/tests/test_auto_VecReg.py | 62 +- .../fsl/tests/test_auto_WarpPoints.py | 58 +- .../fsl/tests/test_auto_WarpPointsFromStd.py | 52 +- .../fsl/tests/test_auto_WarpPointsToStd.py | 65 +- .../fsl/tests/test_auto_WarpUtils.py | 48 +- .../fsl/tests/test_auto_XFibres5.py | 126 +- nipype/interfaces/fsl/tests/test_base.py | 49 +- nipype/interfaces/fsl/tests/test_dti.py | 357 +- nipype/interfaces/fsl/tests/test_epi.py | 11 +- nipype/interfaces/fsl/tests/test_maths.py | 113 +- nipype/interfaces/fsl/tests/test_model.py | 19 +- .../interfaces/fsl/tests/test_preprocess.py | 592 +-- nipype/interfaces/fsl/tests/test_utils.py | 183 +- nipype/interfaces/fsl/utils.py | 1876 +++++---- nipype/interfaces/image.py | 100 +- nipype/interfaces/io.py | 1435 ++++--- nipype/interfaces/matlab.py | 130 +- nipype/interfaces/meshfix.py | 182 +- nipype/interfaces/minc/__init__.py | 2 +- nipype/interfaces/minc/base.py | 51 +- nipype/interfaces/minc/minc.py | 3088 +++++++------- nipype/interfaces/minc/testdata.py | 13 +- .../minc/tests/test_auto_Average.py | 243 +- .../interfaces/minc/tests/test_auto_BBox.py | 47 +- .../interfaces/minc/tests/test_auto_Beast.py | 94 +- .../minc/tests/test_auto_BestLinReg.py | 45 +- .../minc/tests/test_auto_BigAverage.py | 44 +- .../interfaces/minc/tests/test_auto_Blob.py | 32 +- .../interfaces/minc/tests/test_auto_Blur.py | 71 +- .../interfaces/minc/tests/test_auto_Calc.py | 236 +- .../minc/tests/test_auto_Convert.py | 37 +- .../interfaces/minc/tests/test_auto_Copy.py | 34 +- .../interfaces/minc/tests/test_auto_Dump.py | 58 +- .../minc/tests/test_auto_Extract.py | 253 +- .../minc/tests/test_auto_Gennlxfm.py | 34 +- .../interfaces/minc/tests/test_auto_Math.py | 278 +- .../interfaces/minc/tests/test_auto_NlpFit.py | 58 +- .../interfaces/minc/tests/test_auto_Norm.py | 65 +- nipype/interfaces/minc/tests/test_auto_Pik.py | 96 +- .../minc/tests/test_auto_Resample.py | 358 +- .../minc/tests/test_auto_Reshape.py | 33 +- .../interfaces/minc/tests/test_auto_ToEcat.py | 41 +- .../interfaces/minc/tests/test_auto_ToRaw.py | 121 +- .../minc/tests/test_auto_VolSymm.py | 58 +- .../minc/tests/test_auto_Volcentre.py | 37 +- .../interfaces/minc/tests/test_auto_Voliso.py | 37 +- .../interfaces/minc/tests/test_auto_Volpad.py | 41 +- .../interfaces/minc/tests/test_auto_XfmAvg.py | 41 +- .../minc/tests/test_auto_XfmConcat.py | 34 +- .../minc/tests/test_auto_XfmInvert.py | 33 +- nipype/interfaces/mipav/__init__.py | 27 +- nipype/interfaces/mipav/developer.py | 989 ++--- nipype/interfaces/mipav/generate_classes.py | 55 +- .../test_auto_JistBrainMgdmSegmentation.py | 98 +- ...est_auto_JistBrainMp2rageDuraEstimation.py | 41 +- ...est_auto_JistBrainMp2rageSkullStripping.py | 70 +- .../test_auto_JistBrainPartialVolumeFilter.py | 36 +- ...est_auto_JistCortexSurfaceMeshInflation.py | 50 +- .../test_auto_JistIntensityMp2rageMasking.py | 69 +- .../test_auto_JistLaminarProfileCalculator.py | 39 +- .../test_auto_JistLaminarProfileGeometry.py | 40 +- .../test_auto_JistLaminarProfileSampling.py | 48 +- .../test_auto_JistLaminarROIAveraging.py | 44 +- ...test_auto_JistLaminarVolumetricLayering.py | 69 +- ...test_auto_MedicAlgorithmImageCalculator.py | 39 +- .../test_auto_MedicAlgorithmLesionToads.py | 143 +- .../test_auto_MedicAlgorithmMipavReorient.py | 49 +- .../mipav/tests/test_auto_MedicAlgorithmN3.py | 55 +- .../test_auto_MedicAlgorithmSPECTRE2010.py | 157 +- ...uto_MedicAlgorithmThresholdToBinaryMask.py | 36 +- .../mipav/tests/test_auto_RandomVol.py | 45 +- nipype/interfaces/mixins/__init__.py | 5 +- nipype/interfaces/mixins/reporting.py | 23 +- nipype/interfaces/mne/base.py | 107 +- .../mne/tests/test_auto_WatershedBEM.py | 62 +- nipype/interfaces/mrtrix/__init__.py | 42 +- nipype/interfaces/mrtrix/convert.py | 169 +- nipype/interfaces/mrtrix/preprocess.py | 547 +-- nipype/interfaces/mrtrix/tensors.py | 374 +- ..._auto_ConstrainedSphericalDeconvolution.py | 71 +- .../test_auto_DWI2SphericalHarmonicsImage.py | 37 +- .../mrtrix/tests/test_auto_DWI2Tensor.py | 53 +- ...est_auto_DiffusionTensorStreamlineTrack.py | 134 +- .../tests/test_auto_Directions2Amplitude.py | 42 +- .../mrtrix/tests/test_auto_Erode.py | 42 +- .../tests/test_auto_EstimateResponseForSH.py | 45 +- .../mrtrix/tests/test_auto_FSL2MRTrix.py | 25 +- .../mrtrix/tests/test_auto_FilterTracks.py | 67 +- .../mrtrix/tests/test_auto_FindShPeaks.py | 49 +- .../tests/test_auto_GenerateDirections.py | 33 +- .../test_auto_GenerateWhiteMatterMask.py | 37 +- .../mrtrix/tests/test_auto_MRConvert.py | 79 +- .../mrtrix/tests/test_auto_MRMultiply.py | 34 +- .../mrtrix/tests/test_auto_MRTransform.py | 67 +- .../mrtrix/tests/test_auto_MRTrix2TrackVis.py | 21 +- .../mrtrix/tests/test_auto_MRTrixInfo.py | 16 +- .../mrtrix/tests/test_auto_MRTrixViewer.py | 25 +- .../mrtrix/tests/test_auto_MedianFilter3D.py | 35 +- ...cSphericallyDeconvolutedStreamlineTrack.py | 131 +- ..._SphericallyDeconvolutedStreamlineTrack.py | 129 +- .../mrtrix/tests/test_auto_StreamlineTrack.py | 129 +- .../test_auto_Tensor2ApparentDiffusion.py | 35 +- .../test_auto_Tensor2FractionalAnisotropy.py | 35 +- .../mrtrix/tests/test_auto_Tensor2Vector.py | 35 +- .../mrtrix/tests/test_auto_Threshold.py | 49 +- .../mrtrix/tests/test_auto_Tracks2Prob.py | 58 +- nipype/interfaces/mrtrix/tracking.py | 375 +- nipype/interfaces/mrtrix3/__init__.py | 25 +- nipype/interfaces/mrtrix3/base.py | 87 +- nipype/interfaces/mrtrix3/connectivity.py | 258 +- nipype/interfaces/mrtrix3/preprocess.py | 265 +- nipype/interfaces/mrtrix3/reconst.py | 137 +- .../mrtrix3/tests/test_auto_ACTPrepareFSL.py | 24 +- .../mrtrix3/tests/test_auto_BrainMask.py | 49 +- .../tests/test_auto_BuildConnectome.py | 59 +- .../mrtrix3/tests/test_auto_ComputeTDI.py | 77 +- .../mrtrix3/tests/test_auto_DWIBiasCorrect.py | 74 +- .../mrtrix3/tests/test_auto_DWIDenoise.py | 66 +- .../mrtrix3/tests/test_auto_DWIExtract.py | 61 +- .../mrtrix3/tests/test_auto_EstimateFOD.py | 122 +- .../mrtrix3/tests/test_auto_FitTensor.py | 61 +- .../mrtrix3/tests/test_auto_Generate5tt.py | 56 +- .../mrtrix3/tests/test_auto_LabelConfig.py | 60 +- .../mrtrix3/tests/test_auto_LabelConvert.py | 47 +- .../mrtrix3/tests/test_auto_MRConvert.py | 69 +- .../mrtrix3/tests/test_auto_MRDeGibbs.py | 72 +- .../mrtrix3/tests/test_auto_MRMath.py | 58 +- .../mrtrix3/tests/test_auto_MRResize.py | 66 +- .../mrtrix3/tests/test_auto_MRTrix3Base.py | 6 +- .../mrtrix3/tests/test_auto_Mesh2PVE.py | 36 +- .../tests/test_auto_ReplaceFSwithFIRST.py | 37 +- .../mrtrix3/tests/test_auto_ResponseSD.py | 97 +- .../mrtrix3/tests/test_auto_TCK2VTK.py | 40 +- .../mrtrix3/tests/test_auto_TensorMetrics.py | 57 +- .../mrtrix3/tests/test_auto_Tractography.py | 157 +- nipype/interfaces/mrtrix3/tracking.py | 361 +- nipype/interfaces/mrtrix3/utils.py | 558 ++- nipype/interfaces/niftyfit/asl.py | 172 +- nipype/interfaces/niftyfit/base.py | 11 +- nipype/interfaces/niftyfit/dwi.py | 654 +-- nipype/interfaces/niftyfit/qt1.py | 202 +- nipype/interfaces/niftyfit/tests/test_asl.py | 31 +- .../niftyfit/tests/test_auto_DwiTool.py | 189 +- .../niftyfit/tests/test_auto_FitAsl.py | 126 +- .../niftyfit/tests/test_auto_FitDwi.py | 277 +- .../niftyfit/tests/test_auto_FitQt1.py | 186 +- .../tests/test_auto_NiftyFitCommand.py | 6 +- nipype/interfaces/niftyfit/tests/test_dwi.py | 72 +- nipype/interfaces/niftyfit/tests/test_qt1.py | 65 +- nipype/interfaces/niftyreg/__init__.py | 10 +- nipype/interfaces/niftyreg/base.py | 66 +- nipype/interfaces/niftyreg/reg.py | 361 +- nipype/interfaces/niftyreg/regutils.py | 740 ++-- .../tests/test_auto_NiftyRegCommand.py | 12 +- .../niftyreg/tests/test_auto_RegAladin.py | 95 +- .../niftyreg/tests/test_auto_RegAverage.py | 96 +- .../niftyreg/tests/test_auto_RegF3D.py | 150 +- .../niftyreg/tests/test_auto_RegJacobian.py | 39 +- .../niftyreg/tests/test_auto_RegMeasure.py | 39 +- .../niftyreg/tests/test_auto_RegResample.py | 57 +- .../niftyreg/tests/test_auto_RegTools.py | 64 +- .../niftyreg/tests/test_auto_RegTransform.py | 214 +- nipype/interfaces/niftyreg/tests/test_reg.py | 47 +- .../niftyreg/tests/test_regutils.py | 399 +- nipype/interfaces/niftyseg/__init__.py | 3 +- nipype/interfaces/niftyseg/base.py | 6 +- nipype/interfaces/niftyseg/em.py | 124 +- nipype/interfaces/niftyseg/label_fusion.py | 231 +- nipype/interfaces/niftyseg/lesions.py | 80 +- nipype/interfaces/niftyseg/maths.py | 298 +- nipype/interfaces/niftyseg/patchmatch.py | 53 +- nipype/interfaces/niftyseg/stats.py | 103 +- .../niftyseg/tests/test_auto_BinaryMaths.py | 47 +- .../tests/test_auto_BinaryMathsInteger.py | 41 +- .../niftyseg/tests/test_auto_BinaryStats.py | 44 +- .../niftyseg/tests/test_auto_CalcTopNCC.py | 41 +- .../interfaces/niftyseg/tests/test_auto_EM.py | 87 +- .../niftyseg/tests/test_auto_FillLesions.py | 58 +- .../niftyseg/tests/test_auto_LabelFusion.py | 68 +- .../niftyseg/tests/test_auto_MathsCommand.py | 29 +- .../niftyseg/tests/test_auto_Merge.py | 37 +- .../tests/test_auto_NiftySegCommand.py | 6 +- .../niftyseg/tests/test_auto_PatchMatch.py | 46 +- .../niftyseg/tests/test_auto_StatsCommand.py | 29 +- .../niftyseg/tests/test_auto_TupleMaths.py | 53 +- .../niftyseg/tests/test_auto_UnaryMaths.py | 35 +- .../niftyseg/tests/test_auto_UnaryStats.py | 35 +- .../niftyseg/tests/test_em_interfaces.py | 17 +- .../niftyseg/tests/test_extra_PatchMatch.py | 15 +- .../niftyseg/tests/test_label_fusion.py | 70 +- .../interfaces/niftyseg/tests/test_lesions.py | 13 +- .../interfaces/niftyseg/tests/test_maths.py | 90 +- .../interfaces/niftyseg/tests/test_stats.py | 23 +- nipype/interfaces/nilearn.py | 137 +- nipype/interfaces/nipy/base.py | 4 +- nipype/interfaces/nipy/model.py | 197 +- nipype/interfaces/nipy/preprocess.py | 148 +- .../nipy/tests/test_auto_ComputeMask.py | 11 +- .../nipy/tests/test_auto_EstimateContrast.py | 32 +- .../interfaces/nipy/tests/test_auto_FitGLM.py | 30 +- .../nipy/tests/test_auto_Similarity.py | 20 +- .../tests/test_auto_SpaceTimeRealigner.py | 16 +- .../interfaces/nipy/tests/test_auto_Trim.py | 17 +- nipype/interfaces/nipy/utils.py | 27 +- nipype/interfaces/nitime/__init__.py | 7 +- nipype/interfaces/nitime/analysis.py | 200 +- nipype/interfaces/nitime/base.py | 2 +- .../tests/test_auto_CoherenceAnalyzer.py | 27 +- nipype/interfaces/nitime/tests/test_nitime.py | 32 +- nipype/interfaces/petpvc.py | 156 +- nipype/interfaces/quickshear.py | 55 +- nipype/interfaces/semtools/brains/__init__.py | 7 +- nipype/interfaces/semtools/brains/classify.py | 46 +- .../semtools/brains/segmentation.py | 92 +- ...t_auto_BRAINSPosteriorToContinuousClass.py | 51 +- .../brains/tests/test_auto_BRAINSTalairach.py | 55 +- .../tests/test_auto_BRAINSTalairachMask.py | 35 +- .../tests/test_auto_GenerateEdgeMapImage.py | 38 +- .../tests/test_auto_GeneratePurePlugMask.py | 25 +- .../test_auto_HistogramMatchingFilter.py | 44 +- .../brains/tests/test_auto_SimilarityIndex.py | 26 +- .../interfaces/semtools/brains/utilities.py | 126 +- nipype/interfaces/semtools/converters.py | 30 +- .../interfaces/semtools/diffusion/__init__.py | 34 +- .../semtools/diffusion/diffusion.py | 435 +- .../interfaces/semtools/diffusion/gtract.py | 1044 ++--- .../semtools/diffusion/maxcurvature.py | 24 +- .../diffusion/tests/test_auto_DWIConvert.py | 83 +- .../tests/test_auto_compareTractInclusion.py | 31 +- .../diffusion/tests/test_auto_dtiaverage.py | 22 +- .../diffusion/tests/test_auto_dtiestim.py | 77 +- .../diffusion/tests/test_auto_dtiprocess.py | 156 +- .../tests/test_auto_extractNrrdVectorIndex.py | 27 +- .../tests/test_auto_gtractAnisotropyMap.py | 25 +- .../tests/test_auto_gtractAverageBvalues.py | 27 +- .../tests/test_auto_gtractClipAnisotropy.py | 27 +- .../tests/test_auto_gtractCoRegAnatomy.py | 72 +- .../tests/test_auto_gtractConcatDwi.py | 22 +- .../test_auto_gtractCopyImageOrientation.py | 28 +- .../tests/test_auto_gtractCoregBvalues.py | 56 +- .../tests/test_auto_gtractCostFastMarching.py | 44 +- .../tests/test_auto_gtractCreateGuideFiber.py | 27 +- .../test_auto_gtractFastMarchingTracking.py | 50 +- .../tests/test_auto_gtractFiberTracking.py | 80 +- .../tests/test_auto_gtractImageConformity.py | 28 +- .../test_auto_gtractInvertBSplineTransform.py | 33 +- ...test_auto_gtractInvertDisplacementField.py | 30 +- .../test_auto_gtractInvertRigidTransform.py | 23 +- .../test_auto_gtractResampleAnisotropy.py | 31 +- .../tests/test_auto_gtractResampleB0.py | 35 +- .../test_auto_gtractResampleCodeImage.py | 35 +- .../test_auto_gtractResampleDWIInPlace.py | 51 +- .../tests/test_auto_gtractResampleFibers.py | 31 +- .../diffusion/tests/test_auto_gtractTensor.py | 49 +- ...auto_gtractTransformToDisplacementField.py | 26 +- .../diffusion/tests/test_auto_maxcurvature.py | 25 +- .../diffusion/tractography/commandlineonly.py | 18 +- .../diffusion/tractography/fiberprocess.py | 92 +- .../diffusion/tractography/fibertrack.py | 74 +- .../tests/test_auto_UKFTractography.py | 94 +- .../tests/test_auto_fiberprocess.py | 58 +- .../tests/test_auto_fiberstats.py | 16 +- .../tests/test_auto_fibertrack.py | 46 +- .../diffusion/tractography/ukftractography.py | 174 +- nipype/interfaces/semtools/featurecreator.py | 23 +- .../interfaces/semtools/filtering/__init__.py | 25 +- .../semtools/filtering/denoising.py | 56 +- .../semtools/filtering/featuredetection.py | 304 +- .../filtering/tests/test_auto_CannyEdge.py | 27 +- ...to_CannySegmentationLevelSetImageFilter.py | 42 +- .../filtering/tests/test_auto_DilateImage.py | 28 +- .../filtering/tests/test_auto_DilateMask.py | 30 +- .../filtering/tests/test_auto_DistanceMaps.py | 28 +- .../test_auto_DumpBinaryTrainingVectors.py | 19 +- .../filtering/tests/test_auto_ErodeImage.py | 28 +- .../tests/test_auto_FlippedDifference.py | 26 +- .../test_auto_GenerateBrainClippedImage.py | 28 +- .../test_auto_GenerateSummedGradientImage.py | 30 +- .../tests/test_auto_GenerateTestImage.py | 29 +- ...GradientAnisotropicDiffusionImageFilter.py | 27 +- .../tests/test_auto_HammerAttributeCreator.py | 30 +- .../tests/test_auto_NeighborhoodMean.py | 28 +- .../tests/test_auto_NeighborhoodMedian.py | 28 +- .../tests/test_auto_STAPLEAnalysis.py | 20 +- .../test_auto_TextureFromNoiseImageFilter.py | 23 +- .../tests/test_auto_TextureMeasureFilter.py | 30 +- .../tests/test_auto_UnbiasedNonLocalMeans.py | 42 +- .../semtools/legacy/registration.py | 43 +- .../legacy/tests/test_auto_scalartransform.py | 38 +- .../semtools/registration/__init__.py | 3 +- .../semtools/registration/brainsfit.py | 463 ++- .../semtools/registration/brainsresample.py | 71 +- .../semtools/registration/brainsresize.py | 33 +- .../semtools/registration/specialized.py | 464 ++- .../tests/test_auto_BRAINSDemonWarp.py | 140 +- .../registration/tests/test_auto_BRAINSFit.py | 220 +- .../tests/test_auto_BRAINSResample.py | 51 +- .../tests/test_auto_BRAINSResize.py | 25 +- .../test_auto_BRAINSTransformFromFiducials.py | 34 +- .../tests/test_auto_VBRAINSDemonWarp.py | 139 +- .../semtools/segmentation/__init__.py | 12 +- .../semtools/segmentation/specialized.py | 761 ++-- .../segmentation/tests/test_auto_BRAINSABC.py | 121 +- .../test_auto_BRAINSConstellationDetector.py | 165 +- ...BRAINSCreateLabelMapFromProbabilityMaps.py | 42 +- .../segmentation/tests/test_auto_BRAINSCut.py | 48 +- .../tests/test_auto_BRAINSMultiSTAPLE.py | 34 +- .../tests/test_auto_BRAINSROIAuto.py | 44 +- ...t_auto_BinaryMaskEditorBasedOnLandmarks.py | 38 +- .../segmentation/tests/test_auto_ESLR.py | 35 +- .../semtools/testing/featuredetection.py | 20 +- .../testing/generateaveragelmkfile.py | 30 +- .../semtools/testing/landmarkscompare.py | 25 +- .../semtools/tests/test_auto_DWICompare.py | 19 +- .../tests/test_auto_DWISimpleCompare.py | 21 +- ...o_GenerateCsfClippedFromClassifiedImage.py | 21 +- .../interfaces/semtools/utilities/__init__.py | 29 +- .../interfaces/semtools/utilities/brains.py | 747 ++-- .../tests/test_auto_BRAINSAlignMSP.py | 49 +- .../tests/test_auto_BRAINSClipInferior.py | 27 +- .../test_auto_BRAINSConstellationModeler.py | 50 +- .../tests/test_auto_BRAINSEyeDetector.py | 25 +- ...est_auto_BRAINSInitializedControlPoints.py | 35 +- .../test_auto_BRAINSLandmarkInitializer.py | 25 +- .../test_auto_BRAINSLinearModelerEPCA.py | 16 +- .../tests/test_auto_BRAINSLmkTransform.py | 41 +- .../utilities/tests/test_auto_BRAINSMush.py | 74 +- .../tests/test_auto_BRAINSSnapShotWriter.py | 34 +- .../tests/test_auto_BRAINSTransformConvert.py | 37 +- ...st_auto_BRAINSTrimForegroundInDirection.py | 33 +- .../tests/test_auto_CleanUpOverlapLabels.py | 16 +- .../tests/test_auto_FindCenterOfBrain.py | 75 +- ...auto_GenerateLabelMapFromProbabilityMap.py | 20 +- .../tests/test_auto_ImageRegionPlotter.py | 40 +- .../tests/test_auto_JointHistogram.py | 30 +- .../tests/test_auto_ShuffleVectorsModule.py | 19 +- .../utilities/tests/test_auto_fcsv_to_hdf5.py | 32 +- .../tests/test_auto_insertMidACPCpoint.py | 21 +- ...test_auto_landmarksConstellationAligner.py | 19 +- ...test_auto_landmarksConstellationWeights.py | 31 +- nipype/interfaces/slicer/__init__.py | 10 +- nipype/interfaces/slicer/converters.py | 62 +- .../interfaces/slicer/diffusion/__init__.py | 13 +- .../interfaces/slicer/diffusion/diffusion.py | 355 +- .../diffusion/tests/test_auto_DTIexport.py | 26 +- .../diffusion/tests/test_auto_DTIimport.py | 28 +- .../test_auto_DWIJointRicianLMMSEFilter.py | 40 +- .../tests/test_auto_DWIRicianLMMSEFilter.py | 52 +- .../tests/test_auto_DWIToDTIEstimation.py | 46 +- ..._auto_DiffusionTensorScalarMeasurements.py | 28 +- ...est_auto_DiffusionWeightedVolumeMasking.py | 41 +- .../tests/test_auto_ResampleDTIVolume.py | 96 +- .../test_auto_TractographyLabelMapSeeding.py | 63 +- .../interfaces/slicer/filtering/__init__.py | 22 +- .../interfaces/slicer/filtering/arithmetic.py | 114 +- .../slicer/filtering/checkerboardfilter.py | 30 +- .../interfaces/slicer/filtering/denoising.py | 96 +- .../slicer/filtering/extractskeleton.py | 40 +- .../slicer/filtering/histogrammatching.py | 49 +- .../slicer/filtering/imagelabelcombine.py | 30 +- .../interfaces/slicer/filtering/morphology.py | 35 +- .../filtering/n4itkbiasfieldcorrection.py | 77 +- .../resamplescalarvectordwivolume.py | 120 +- .../tests/test_auto_AddScalarVolumes.py | 34 +- .../tests/test_auto_CastScalarVolume.py | 28 +- .../tests/test_auto_CheckerBoardFilter.py | 37 +- ...test_auto_CurvatureAnisotropicDiffusion.py | 32 +- .../tests/test_auto_ExtractSkeleton.py | 34 +- .../test_auto_GaussianBlurImageFilter.py | 28 +- .../test_auto_GradientAnisotropicDiffusion.py | 32 +- .../test_auto_GrayscaleFillHoleImageFilter.py | 26 +- ...test_auto_GrayscaleGrindPeakImageFilter.py | 26 +- .../tests/test_auto_HistogramMatching.py | 38 +- .../tests/test_auto_ImageLabelCombine.py | 34 +- .../tests/test_auto_MaskScalarVolume.py | 36 +- .../tests/test_auto_MedianImageFilter.py | 31 +- .../tests/test_auto_MultiplyScalarVolumes.py | 34 +- .../test_auto_N4ITKBiasFieldCorrection.py | 60 +- ...test_auto_ResampleScalarVectorDWIVolume.py | 92 +- .../tests/test_auto_SubtractScalarVolumes.py | 34 +- .../tests/test_auto_ThresholdScalarVolume.py | 36 +- ...auto_VotingBinaryHoleFillingImageFilter.py | 37 +- .../slicer/filtering/thresholdscalarvolume.py | 36 +- .../votingbinaryholefillingimagefilter.py | 39 +- nipype/interfaces/slicer/generate_classes.py | 470 ++- nipype/interfaces/slicer/legacy/__init__.py | 12 +- nipype/interfaces/slicer/legacy/converters.py | 18 +- .../slicer/legacy/diffusion/denoising.py | 51 +- ...est_auto_DWIUnbiasedNonLocalMeansFilter.py | 45 +- nipype/interfaces/slicer/legacy/filtering.py | 62 +- .../interfaces/slicer/legacy/registration.py | 520 +-- .../interfaces/slicer/legacy/segmentation.py | 48 +- .../tests/test_auto_AffineRegistration.py | 50 +- ...test_auto_BSplineDeformableRegistration.py | 59 +- .../test_auto_BSplineToDeformationField.py | 26 +- .../test_auto_ExpertAutomatedRegistration.py | 88 +- .../tests/test_auto_LinearRegistration.py | 58 +- ..._auto_MultiResolutionAffineRegistration.py | 51 +- .../test_auto_OtsuThresholdImageFilter.py | 32 +- .../test_auto_OtsuThresholdSegmentation.py | 34 +- .../tests/test_auto_ResampleScalarVolume.py | 33 +- .../tests/test_auto_RigidRegistration.py | 60 +- .../quantification/changequantification.py | 53 +- .../petstandarduptakevaluecomputation.py | 54 +- .../test_auto_IntensityDifferenceMetric.py | 49 +- ..._auto_PETStandardUptakeValueComputation.py | 43 +- .../slicer/registration/__init__.py | 8 +- .../slicer/registration/brainsfit.py | 415 +- .../slicer/registration/brainsresample.py | 66 +- .../slicer/registration/specialized.py | 486 ++- .../tests/test_auto_ACPCTransform.py | 22 +- .../tests/test_auto_BRAINSDemonWarp.py | 140 +- .../registration/tests/test_auto_BRAINSFit.py | 202 +- .../tests/test_auto_BRAINSResample.py | 51 +- .../tests/test_auto_FiducialRegistration.py | 26 +- .../tests/test_auto_VBRAINSDemonWarp.py | 139 +- .../slicer/segmentation/__init__.py | 3 +- .../simpleregiongrowingsegmentation.py | 53 +- .../slicer/segmentation/specialized.py | 236 +- .../tests/test_auto_BRAINSROIAuto.py | 39 +- .../tests/test_auto_EMSegmentCommandLine.py | 68 +- .../test_auto_RobustStatisticsSegmenter.py | 42 +- ...st_auto_SimpleRegionGrowingSegmentation.py | 40 +- nipype/interfaces/slicer/surface.py | 221 +- .../tests/test_auto_DicomToNrrdConverter.py | 31 +- ...test_auto_EMSegmentTransformToNewFormat.py | 23 +- .../tests/test_auto_GrayscaleModelMaker.py | 38 +- .../tests/test_auto_LabelMapSmoothing.py | 34 +- .../slicer/tests/test_auto_MergeModels.py | 32 +- .../slicer/tests/test_auto_ModelMaker.py | 60 +- .../slicer/tests/test_auto_ModelToLabelMap.py | 34 +- .../tests/test_auto_OrientScalarVolume.py | 28 +- .../tests/test_auto_ProbeVolumeWithModel.py | 32 +- .../tests/test_auto_SlicerCommandLine.py | 6 +- nipype/interfaces/slicer/utilities.py | 39 +- nipype/interfaces/spm/__init__.py | 47 +- nipype/interfaces/spm/base.py | 245 +- nipype/interfaces/spm/model.py | 822 ++-- nipype/interfaces/spm/preprocess.py | 2251 +++++----- .../spm/tests/test_auto_Analyze2nii.py | 23 +- .../spm/tests/test_auto_ApplyDeformations.py | 28 +- .../test_auto_ApplyInverseDeformation.py | 35 +- .../spm/tests/test_auto_ApplyTransform.py | 27 +- .../spm/tests/test_auto_CalcCoregAffine.py | 29 +- .../spm/tests/test_auto_Coregister.py | 50 +- .../spm/tests/test_auto_CreateWarped.py | 27 +- .../interfaces/spm/tests/test_auto_DARTEL.py | 28 +- .../spm/tests/test_auto_DARTELNorm2MNI.py | 33 +- .../spm/tests/test_auto_DicomImport.py | 36 +- .../spm/tests/test_auto_EstimateContrast.py | 33 +- .../spm/tests/test_auto_EstimateModel.py | 31 +- .../spm/tests/test_auto_FactorialDesign.py | 48 +- .../spm/tests/test_auto_FieldMap.py | 117 +- .../spm/tests/test_auto_Level1Design.py | 52 +- .../test_auto_MultipleRegressionDesign.py | 60 +- .../spm/tests/test_auto_NewSegment.py | 27 +- .../spm/tests/test_auto_Normalize.py | 68 +- .../spm/tests/test_auto_Normalize12.py | 59 +- .../tests/test_auto_OneSampleTTestDesign.py | 53 +- .../spm/tests/test_auto_PairedTTestDesign.py | 57 +- .../interfaces/spm/tests/test_auto_Realign.py | 54 +- .../spm/tests/test_auto_RealignUnwarp.py | 87 +- .../interfaces/spm/tests/test_auto_Reslice.py | 25 +- .../spm/tests/test_auto_ResliceToReference.py | 27 +- .../spm/tests/test_auto_SPMCommand.py | 7 +- .../interfaces/spm/tests/test_auto_Segment.py | 74 +- .../spm/tests/test_auto_SliceTiming.py | 47 +- .../interfaces/spm/tests/test_auto_Smooth.py | 28 +- .../spm/tests/test_auto_Threshold.py | 41 +- .../tests/test_auto_ThresholdStatistics.py | 27 +- .../tests/test_auto_TwoSampleTTestDesign.py | 62 +- .../spm/tests/test_auto_VBMSegment.py | 150 +- nipype/interfaces/spm/tests/test_base.py | 74 +- nipype/interfaces/spm/tests/test_model.py | 30 +- .../interfaces/spm/tests/test_preprocess.py | 81 +- nipype/interfaces/spm/tests/test_utils.py | 64 +- nipype/interfaces/spm/utils.py | 293 +- .../tests/test_auto_BIDSDataGrabber.py | 11 +- nipype/interfaces/tests/test_auto_Bru2.py | 30 +- nipype/interfaces/tests/test_auto_C3d.py | 50 +- .../tests/test_auto_C3dAffineTool.py | 40 +- nipype/interfaces/tests/test_auto_CopyMeta.py | 14 +- .../interfaces/tests/test_auto_DataFinder.py | 8 +- .../interfaces/tests/test_auto_DataGrabber.py | 10 +- nipype/interfaces/tests/test_auto_DataSink.py | 10 +- nipype/interfaces/tests/test_auto_Dcm2nii.py | 91 +- nipype/interfaces/tests/test_auto_Dcm2niix.py | 84 +- nipype/interfaces/tests/test_auto_DcmStack.py | 10 +- .../interfaces/tests/test_auto_ExportFile.py | 14 +- .../tests/test_auto_FreeSurferSource.py | 160 +- .../tests/test_auto_GroupAndStack.py | 10 +- .../tests/test_auto_JSONFileGrabber.py | 7 +- .../tests/test_auto_JSONFileSink.py | 10 +- .../interfaces/tests/test_auto_LookupMeta.py | 8 +- .../tests/test_auto_MatlabCommand.py | 49 +- .../interfaces/tests/test_auto_MergeNifti.py | 8 +- nipype/interfaces/tests/test_auto_MeshFix.py | 101 +- .../interfaces/tests/test_auto_MySQLSink.py | 14 +- nipype/interfaces/tests/test_auto_PETPVC.py | 75 +- .../interfaces/tests/test_auto_Quickshear.py | 36 +- nipype/interfaces/tests/test_auto_Reorient.py | 12 +- nipype/interfaces/tests/test_auto_Rescale.py | 16 +- .../tests/test_auto_S3DataGrabber.py | 16 +- .../interfaces/tests/test_auto_SQLiteSink.py | 7 +- .../tests/test_auto_SSHDataGrabber.py | 20 +- .../interfaces/tests/test_auto_SelectFiles.py | 8 +- .../tests/test_auto_SignalExtraction.py | 24 +- .../tests/test_auto_SlicerCommandLine.py | 9 +- .../interfaces/tests/test_auto_SplitNifti.py | 11 +- nipype/interfaces/tests/test_auto_XNATSink.py | 26 +- .../interfaces/tests/test_auto_XNATSource.py | 18 +- nipype/interfaces/tests/test_extra_dcm2nii.py | 19 +- nipype/interfaces/tests/test_image.py | 20 +- nipype/interfaces/tests/test_io.py | 572 +-- nipype/interfaces/tests/test_matlab.py | 83 +- nipype/interfaces/tests/test_nilearn.py | 172 +- nipype/interfaces/utility/__init__.py | 3 +- nipype/interfaces/utility/base.py | 134 +- nipype/interfaces/utility/csv.py | 25 +- .../utility/tests/test_auto_AssertEqual.py | 10 +- .../utility/tests/test_auto_CSVReader.py | 8 +- .../utility/tests/test_auto_Function.py | 4 +- .../tests/test_auto_IdentityInterface.py | 2 + .../utility/tests/test_auto_Merge.py | 10 +- .../utility/tests/test_auto_Rename.py | 13 +- .../utility/tests/test_auto_Select.py | 9 +- .../utility/tests/test_auto_Split.py | 8 +- nipype/interfaces/utility/tests/test_base.py | 46 +- nipype/interfaces/utility/tests/test_csv.py | 14 +- .../interfaces/utility/tests/test_wrappers.py | 80 +- nipype/interfaces/utility/wrappers.py | 70 +- nipype/interfaces/vista/__init__.py | 2 +- .../vista/tests/test_auto_Vnifti2Image.py | 30 +- .../vista/tests/test_auto_VtoMat.py | 24 +- nipype/interfaces/vista/vista.py | 39 +- nipype/interfaces/vtkbase.py | 24 +- nipype/interfaces/workbench/base.py | 10 +- nipype/interfaces/workbench/cifti.py | 52 +- nipype/interfaces/workbench/metric.py | 74 +- .../workbench/tests/test_auto_CiftiSmooth.py | 91 +- .../tests/test_auto_MetricResample.py | 87 +- .../workbench/tests/test_auto_WBCommand.py | 6 +- nipype/pipeline/__init__.py | 2 +- nipype/pipeline/engine/__init__.py | 2 +- nipype/pipeline/engine/base.py | 15 +- nipype/pipeline/engine/nodes.py | 579 +-- nipype/pipeline/engine/tests/test_base.py | 73 +- nipype/pipeline/engine/tests/test_engine.py | 588 +-- nipype/pipeline/engine/tests/test_join.py | 525 +-- nipype/pipeline/engine/tests/test_nodes.py | 149 +- nipype/pipeline/engine/tests/test_utils.py | 188 +- .../pipeline/engine/tests/test_workflows.py | 209 +- nipype/pipeline/engine/workflows.py | 648 +-- nipype/pipeline/plugins/base.py | 306 +- nipype/pipeline/plugins/condor.py | 77 +- nipype/pipeline/plugins/dagman.py | 126 +- nipype/pipeline/plugins/debug.py | 13 +- nipype/pipeline/plugins/ipython.py | 62 +- nipype/pipeline/plugins/legacymultiproc.py | 220 +- nipype/pipeline/plugins/linear.py | 21 +- nipype/pipeline/plugins/lsf.py | 95 +- nipype/pipeline/plugins/multiproc.py | 173 +- nipype/pipeline/plugins/oar.py | 110 +- nipype/pipeline/plugins/pbs.py | 83 +- nipype/pipeline/plugins/pbsgraph.py | 45 +- .../pipeline/plugins/semaphore_singleton.py | 1 + nipype/pipeline/plugins/sge.py | 286 +- nipype/pipeline/plugins/sgegraph.py | 126 +- nipype/pipeline/plugins/slurm.py | 110 +- nipype/pipeline/plugins/slurmgraph.py | 134 +- nipype/pipeline/plugins/somaflow.py | 13 +- nipype/pipeline/plugins/tests/test_base.py | 4 +- .../pipeline/plugins/tests/test_callback.py | 37 +- nipype/pipeline/plugins/tests/test_debug.py | 20 +- .../tests/test_legacymultiproc_nondaemon.py | 62 +- nipype/pipeline/plugins/tests/test_linear.py | 25 +- .../pipeline/plugins/tests/test_multiproc.py | 98 +- nipype/pipeline/plugins/tests/test_oar.py | 27 +- nipype/pipeline/plugins/tests/test_pbs.py | 27 +- .../pipeline/plugins/tests/test_somaflow.py | 25 +- nipype/pipeline/plugins/tests/test_tools.py | 28 +- nipype/pipeline/plugins/tools.py | 70 +- nipype/pkg_info.py | 39 +- nipype/scripts/cli.py | 145 +- nipype/scripts/crash_files.py | 17 +- nipype/scripts/instance.py | 3 +- nipype/scripts/utils.py | 44 +- nipype/sphinxext/plot_workflow.py | 282 +- nipype/testing/__init__.py | 10 +- nipype/testing/fixtures.py | 25 +- nipype/testing/tests/test_utils.py | 9 +- nipype/testing/utils.py | 26 +- nipype/tests/test_nipype.py | 8 +- nipype/utils/config.py | 161 +- nipype/utils/docparse.py | 77 +- nipype/utils/draw_gantt_chart.py | 332 +- nipype/utils/functions.py | 14 +- nipype/utils/logger.py | 61 +- nipype/utils/matlabtools.py | 7 +- nipype/utils/misc.py | 73 +- nipype/utils/nipype2boutiques.py | 436 +- nipype/utils/nipype_cmd.py | 25 +- nipype/utils/profiler.py | 94 +- nipype/utils/provenance.py | 215 +- nipype/utils/spm_docs.py | 6 +- nipype/utils/subprocess.py | 90 +- nipype/utils/tests/test_cmd.py | 28 +- nipype/utils/tests/test_config.py | 224 +- nipype/utils/tests/test_docparse.py | 8 +- nipype/utils/tests/test_filemanip.py | 330 +- nipype/utils/tests/test_functions.py | 8 +- nipype/utils/tests/test_misc.py | 46 +- nipype/utils/tests/test_nipype2boutiques.py | 55 +- nipype/utils/tests/test_provenance.py | 20 +- nipype/workflows/__init__.py | 22 +- 1191 files changed, 61298 insertions(+), 67657 deletions(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index 0773845b89..76b2ba58f9 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -4,8 +4,12 @@ import os from distutils.version import LooseVersion -from .info import (LONG_DESCRIPTION as __doc__, URL as __url__, STATUS as - __status__, __version__) +from .info import ( + LONG_DESCRIPTION as __doc__, + URL as __url__, + STATUS as __status__, + __version__, +) from .utils.config import NipypeConfig from .utils.logger import Logging from .refs import due @@ -13,6 +17,7 @@ try: import faulthandler + faulthandler.enable() except (ImportError, IOError) as e: pass @@ -26,18 +31,16 @@ def __call__(self, doctests=True, parallel=False): try: import pytest except ImportError: - raise RuntimeError( - 'py.test not installed, run: pip install pytest') + raise RuntimeError("py.test not installed, run: pip install pytest") args = [] if not doctests: - args.extend(['-p', 'no:doctest']) + args.extend(["-p", "no:doctest"]) if parallel: try: import xdist except ImportError: - raise RuntimeError( - "pytest-xdist required for parallel run") - args.append('-n auto') + raise RuntimeError("pytest-xdist required for parallel run") + args.append("-n auto") args.append(os.path.dirname(__file__)) pytest.main(args=args) @@ -51,8 +54,16 @@ def get_info(): from .pipeline import Node, MapNode, JoinNode, Workflow -from .interfaces import (DataGrabber, DataSink, SelectFiles, IdentityInterface, - Rename, Function, Select, Merge) +from .interfaces import ( + DataGrabber, + DataSink, + SelectFiles, + IdentityInterface, + Rename, + Function, + Select, + Merge, +) def check_latest_version(raise_exception=False): @@ -63,11 +74,12 @@ def check_latest_version(raise_exception=False): Raise a RuntimeError if a bad version is being used """ import etelemetry - logger = logging.getLogger('nipype.utils') + + logger = logging.getLogger("nipype.utils") INIT_MSG = "Running {packname} version {version} (latest: {latest})".format - latest = {"version": 'Unknown', "bad_versions": []} + latest = {"version": "Unknown", "bad_versions": []} result = None try: result = etelemetry.get_project("nipy/nipype") @@ -77,24 +89,34 @@ def check_latest_version(raise_exception=False): if result: latest.update(**result) if LooseVersion(__version__) != LooseVersion(latest["version"]): - logger.info(INIT_MSG(packname='nipype', - version=__version__, - latest=latest["version"])) - if latest["bad_versions"] and \ - any([LooseVersion(__version__) == LooseVersion(ver) - for ver in latest["bad_versions"]]): - message = ('You are using a version of Nipype with a critical ' - 'bug. Please use a different version.') + logger.info( + INIT_MSG( + packname="nipype", version=__version__, latest=latest["version"] + ) + ) + if latest["bad_versions"] and any( + [ + LooseVersion(__version__) == LooseVersion(ver) + for ver in latest["bad_versions"] + ] + ): + message = ( + "You are using a version of Nipype with a critical " + "bug. Please use a different version." + ) if raise_exception: raise RuntimeError(message) else: logger.critical(message) return latest + # Run telemetry on import for interactive sessions, such as IPython, Jupyter notebooks, Python REPL -if config.getboolean('execution', 'check_version'): +if config.getboolean("execution", "check_version"): import __main__ - if not hasattr(__main__, '__file__'): + + if not hasattr(__main__, "__file__"): from .interfaces.base import BaseInterface + if BaseInterface._etelemetry_version_data is None: BaseInterface._etelemetry_version_data = check_latest_version() diff --git a/nipype/algorithms/__init__.py b/nipype/algorithms/__init__.py index a2909a3501..b28fc516d2 100644 --- a/nipype/algorithms/__init__.py +++ b/nipype/algorithms/__init__.py @@ -7,4 +7,4 @@ Exaples: artifactdetect """ -__docformat__ = 'restructuredtext' +__docformat__ = "restructuredtext" diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 4e09a1700a..428812b842 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -1,9 +1,9 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -''' +""" Algorithms to compute confounds in :abbr:`fMRI (functional MRI)` -''' +""" import os import os.path as op from collections import OrderedDict @@ -15,14 +15,21 @@ from .. import config, logging from ..external.due import BibTeX -from ..interfaces.base import (traits, TraitedSpec, BaseInterface, - BaseInterfaceInputSpec, File, isdefined, - InputMultiPath, OutputMultiPath, - SimpleInterface) +from ..interfaces.base import ( + traits, + TraitedSpec, + BaseInterface, + BaseInterfaceInputSpec, + File, + isdefined, + InputMultiPath, + OutputMultiPath, + SimpleInterface, +) from ..utils import NUMPY_MMAP from ..utils.misc import normalize_mc_params -IFLOGGER = logging.getLogger('nipype.interface') +IFLOGGER = logging.getLogger("nipype.interface") def fallback_svd(a, full_matrices=True, compute_uv=True): @@ -32,69 +39,76 @@ def fallback_svd(a, full_matrices=True, compute_uv=True): pass from scipy.linalg import svd - return svd(a, full_matrices=full_matrices, compute_uv=compute_uv, lapack_driver='gesvd') + + return svd( + a, full_matrices=full_matrices, compute_uv=compute_uv, lapack_driver="gesvd" + ) class ComputeDVARSInputSpec(BaseInterfaceInputSpec): - in_file = File( - exists=True, mandatory=True, desc='functional data, after HMC') - in_mask = File(exists=True, mandatory=True, desc='a brain mask') + in_file = File(exists=True, mandatory=True, desc="functional data, after HMC") + in_mask = File(exists=True, mandatory=True, desc="a brain mask") remove_zerovariance = traits.Bool( - True, usedefault=True, desc='remove voxels with zero variance') - save_std = traits.Bool( - True, usedefault=True, desc='save standardized DVARS') - save_nstd = traits.Bool( - False, usedefault=True, desc='save non-standardized DVARS') + True, usedefault=True, desc="remove voxels with zero variance" + ) + save_std = traits.Bool(True, usedefault=True, desc="save standardized DVARS") + save_nstd = traits.Bool(False, usedefault=True, desc="save non-standardized DVARS") save_vxstd = traits.Bool( - False, usedefault=True, desc='save voxel-wise standardized DVARS') - save_all = traits.Bool(False, usedefault=True, desc='output all DVARS') + False, usedefault=True, desc="save voxel-wise standardized DVARS" + ) + save_all = traits.Bool(False, usedefault=True, desc="output all DVARS") - series_tr = traits.Float(desc='repetition time in sec.') - save_plot = traits.Bool(False, usedefault=True, desc='write DVARS plot') - figdpi = traits.Int(100, usedefault=True, desc='output dpi for the plot') + series_tr = traits.Float(desc="repetition time in sec.") + save_plot = traits.Bool(False, usedefault=True, desc="write DVARS plot") + figdpi = traits.Int(100, usedefault=True, desc="output dpi for the plot") figsize = traits.Tuple( traits.Float(11.7), traits.Float(2.3), usedefault=True, - desc='output figure size') + desc="output figure size", + ) figformat = traits.Enum( - 'png', 'pdf', 'svg', usedefault=True, desc='output format for figures') + "png", "pdf", "svg", usedefault=True, desc="output format for figures" + ) intensity_normalization = traits.Float( 1000.0, usedefault=True, - desc='Divide value in each voxel at each timepoint ' - 'by the median calculated across all voxels' - 'and timepoints within the mask (if specified)' - 'and then multiply by the value specified by' - 'this parameter. By using the default (1000)' - 'output DVARS will be expressed in ' - 'x10 % BOLD units compatible with Power et al.' - '2012. Set this to 0 to disable intensity' - 'normalization altogether.') + desc="Divide value in each voxel at each timepoint " + "by the median calculated across all voxels" + "and timepoints within the mask (if specified)" + "and then multiply by the value specified by" + "this parameter. By using the default (1000)" + "output DVARS will be expressed in " + "x10 % BOLD units compatible with Power et al." + "2012. Set this to 0 to disable intensity" + "normalization altogether.", + ) class ComputeDVARSOutputSpec(TraitedSpec): - out_std = File(exists=True, desc='output text file') - out_nstd = File(exists=True, desc='output text file') - out_vxstd = File(exists=True, desc='output text file') - out_all = File(exists=True, desc='output text file') + out_std = File(exists=True, desc="output text file") + out_nstd = File(exists=True, desc="output text file") + out_vxstd = File(exists=True, desc="output text file") + out_all = File(exists=True, desc="output text file") avg_std = traits.Float() avg_nstd = traits.Float() avg_vxstd = traits.Float() - fig_std = File(exists=True, desc='output DVARS plot') - fig_nstd = File(exists=True, desc='output DVARS plot') - fig_vxstd = File(exists=True, desc='output DVARS plot') + fig_std = File(exists=True, desc="output DVARS plot") + fig_nstd = File(exists=True, desc="output DVARS plot") + fig_vxstd = File(exists=True, desc="output DVARS plot") class ComputeDVARS(BaseInterface): """ Computes the DVARS. """ + input_spec = ComputeDVARSInputSpec output_spec = ComputeDVARSOutputSpec - references_ = [{ - 'entry': - BibTeX("""\ + references_ = [ + { + "entry": BibTeX( + """\ @techreport{nichols_notes_2013, address = {Coventry, UK}, title = {Notes on {Creating} a {Standardized} {Version} of {DVARS}}, @@ -104,11 +118,13 @@ class ComputeDVARS(BaseInterface): institution = {University of Warwick}, author = {Nichols, Thomas}, year = {2013} -}"""), - 'tags': ['method'] - }, { - 'entry': - BibTeX("""\ +}""" + ), + "tags": ["method"], + }, + { + "entry": BibTeX( + """\ @article{power_spurious_2012, title = {Spurious but systematic correlations in functional connectivity {MRI} networks \ arise from subject motion}, @@ -122,9 +138,11 @@ class ComputeDVARS(BaseInterface): year = {2012}, pages = {2142--2154}, } -"""), - 'tags': ['method'] - }] +""" + ), + "tags": ["method"], + }, + ] def __init__(self, **inputs): self._results = {} @@ -133,100 +151,107 @@ def __init__(self, **inputs): def _gen_fname(self, suffix, ext=None): fname, in_ext = op.splitext(op.basename(self.inputs.in_file)) - if in_ext == '.gz': + if in_ext == ".gz": fname, in_ext2 = op.splitext(fname) in_ext = in_ext2 + in_ext if ext is None: ext = in_ext - if ext.startswith('.'): + if ext.startswith("."): ext = ext[1:] - return op.abspath('{}_{}.{}'.format(fname, suffix, ext)) + return op.abspath("{}_{}.{}".format(fname, suffix, ext)) def _run_interface(self, runtime): dvars = compute_dvars( self.inputs.in_file, self.inputs.in_mask, remove_zerovariance=self.inputs.remove_zerovariance, - intensity_normalization=self.inputs.intensity_normalization) + intensity_normalization=self.inputs.intensity_normalization, + ) - (self._results['avg_std'], self._results['avg_nstd'], - self._results['avg_vxstd']) = np.mean( - dvars, axis=1).astype(float) + ( + self._results["avg_std"], + self._results["avg_nstd"], + self._results["avg_vxstd"], + ) = np.mean(dvars, axis=1).astype(float) tr = None if isdefined(self.inputs.series_tr): tr = self.inputs.series_tr if self.inputs.save_std: - out_file = self._gen_fname('dvars_std', ext='tsv') - np.savetxt(out_file, dvars[0], fmt=b'%0.6f') - self._results['out_std'] = out_file + out_file = self._gen_fname("dvars_std", ext="tsv") + np.savetxt(out_file, dvars[0], fmt=b"%0.6f") + self._results["out_std"] = out_file if self.inputs.save_plot: - self._results['fig_std'] = self._gen_fname( - 'dvars_std', ext=self.inputs.figformat) + self._results["fig_std"] = self._gen_fname( + "dvars_std", ext=self.inputs.figformat + ) fig = plot_confound( - dvars[0], - self.inputs.figsize, - 'Standardized DVARS', - series_tr=tr) + dvars[0], self.inputs.figsize, "Standardized DVARS", series_tr=tr + ) fig.savefig( - self._results['fig_std'], + self._results["fig_std"], dpi=float(self.inputs.figdpi), format=self.inputs.figformat, - bbox_inches='tight') + bbox_inches="tight", + ) fig.clf() if self.inputs.save_nstd: - out_file = self._gen_fname('dvars_nstd', ext='tsv') - np.savetxt(out_file, dvars[1], fmt=b'%0.6f') - self._results['out_nstd'] = out_file + out_file = self._gen_fname("dvars_nstd", ext="tsv") + np.savetxt(out_file, dvars[1], fmt=b"%0.6f") + self._results["out_nstd"] = out_file if self.inputs.save_plot: - self._results['fig_nstd'] = self._gen_fname( - 'dvars_nstd', ext=self.inputs.figformat) + self._results["fig_nstd"] = self._gen_fname( + "dvars_nstd", ext=self.inputs.figformat + ) fig = plot_confound( - dvars[1], self.inputs.figsize, 'DVARS', series_tr=tr) + dvars[1], self.inputs.figsize, "DVARS", series_tr=tr + ) fig.savefig( - self._results['fig_nstd'], + self._results["fig_nstd"], dpi=float(self.inputs.figdpi), format=self.inputs.figformat, - bbox_inches='tight') + bbox_inches="tight", + ) fig.clf() if self.inputs.save_vxstd: - out_file = self._gen_fname('dvars_vxstd', ext='tsv') - np.savetxt(out_file, dvars[2], fmt=b'%0.6f') - self._results['out_vxstd'] = out_file + out_file = self._gen_fname("dvars_vxstd", ext="tsv") + np.savetxt(out_file, dvars[2], fmt=b"%0.6f") + self._results["out_vxstd"] = out_file if self.inputs.save_plot: - self._results['fig_vxstd'] = self._gen_fname( - 'dvars_vxstd', ext=self.inputs.figformat) + self._results["fig_vxstd"] = self._gen_fname( + "dvars_vxstd", ext=self.inputs.figformat + ) fig = plot_confound( - dvars[2], - self.inputs.figsize, - 'Voxelwise std DVARS', - series_tr=tr) + dvars[2], self.inputs.figsize, "Voxelwise std DVARS", series_tr=tr + ) fig.savefig( - self._results['fig_vxstd'], + self._results["fig_vxstd"], dpi=float(self.inputs.figdpi), format=self.inputs.figformat, - bbox_inches='tight') + bbox_inches="tight", + ) fig.clf() if self.inputs.save_all: - out_file = self._gen_fname('dvars', ext='tsv') + out_file = self._gen_fname("dvars", ext="tsv") np.savetxt( out_file, np.vstack(dvars).T, - fmt=b'%0.8f', - delimiter=b'\t', - header='std DVARS\tnon-std DVARS\tvx-wise std DVARS', - comments='') - self._results['out_all'] = out_file + fmt=b"%0.8f", + delimiter=b"\t", + header="std DVARS\tnon-std DVARS\tvx-wise std DVARS", + comments="", + ) + self._results["out_all"] = out_file return runtime @@ -235,7 +260,7 @@ def _list_outputs(self): class FramewiseDisplacementInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc='motion parameters') + in_file = File(exists=True, mandatory=True, desc="motion parameters") parameter_source = traits.Enum( "FSL", "AFNI", @@ -243,33 +268,32 @@ class FramewiseDisplacementInputSpec(BaseInterfaceInputSpec): "FSFAST", "NIPY", desc="Source of movement parameters", - mandatory=True) + mandatory=True, + ) radius = traits.Float( 50, usedefault=True, - desc='radius in mm to calculate angular FDs, 50mm is the ' - 'default since it is used in Power et al. 2012') - out_file = File( - 'fd_power_2012.txt', usedefault=True, desc='output file name') - out_figure = File( - 'fd_power_2012.pdf', usedefault=True, desc='output figure name') - series_tr = traits.Float(desc='repetition time in sec.') - save_plot = traits.Bool(False, usedefault=True, desc='write FD plot') - normalize = traits.Bool( - False, usedefault=True, desc='calculate FD in mm/s') - figdpi = traits.Int( - 100, usedefault=True, desc='output dpi for the FD plot') + desc="radius in mm to calculate angular FDs, 50mm is the " + "default since it is used in Power et al. 2012", + ) + out_file = File("fd_power_2012.txt", usedefault=True, desc="output file name") + out_figure = File("fd_power_2012.pdf", usedefault=True, desc="output figure name") + series_tr = traits.Float(desc="repetition time in sec.") + save_plot = traits.Bool(False, usedefault=True, desc="write FD plot") + normalize = traits.Bool(False, usedefault=True, desc="calculate FD in mm/s") + figdpi = traits.Int(100, usedefault=True, desc="output dpi for the FD plot") figsize = traits.Tuple( traits.Float(11.7), traits.Float(2.3), usedefault=True, - desc='output figure size') + desc="output figure size", + ) class FramewiseDisplacementOutputSpec(TraitedSpec): - out_file = File(desc='calculated FD per timestep') - out_figure = File(desc='output image file') - fd_average = traits.Float(desc='average FD') + out_file = File(desc="calculated FD per timestep") + out_figure = File(desc="output image file") + fd_average = traits.Float(desc="average FD") class FramewiseDisplacement(BaseInterface): @@ -288,9 +312,10 @@ class FramewiseDisplacement(BaseInterface): input_spec = FramewiseDisplacementInputSpec output_spec = FramewiseDisplacementOutputSpec - references_ = [{ - 'entry': - BibTeX("""\ + references_ = [ + { + "entry": BibTeX( + """\ @article{power_spurious_2012, title = {Spurious but systematic correlations in functional connectivity {MRI} networks \ arise from subject motion}, @@ -304,9 +329,11 @@ class FramewiseDisplacement(BaseInterface): year = {2012}, pages = {2142--2154}, } -"""), - 'tags': ['method'] - }] +""" + ), + "tags": ["method"], + } + ] def _run_interface(self, runtime): mpars = np.loadtxt(self.inputs.in_file) # mpars is N_t x 6 @@ -314,20 +341,19 @@ def _run_interface(self, runtime): func1d=normalize_mc_params, axis=1, arr=mpars, - source=self.inputs.parameter_source) + source=self.inputs.parameter_source, + ) diff = mpars[:-1, :6] - mpars[1:, :6] diff[:, 3:6] *= self.inputs.radius fd_res = np.abs(diff).sum(axis=1) self._results = { - 'out_file': op.abspath(self.inputs.out_file), - 'fd_average': float(fd_res.mean()) + "out_file": op.abspath(self.inputs.out_file), + "fd_average": float(fd_res.mean()), } np.savetxt( - self.inputs.out_file, - fd_res, - header='FramewiseDisplacement', - comments='') + self.inputs.out_file, fd_res, header="FramewiseDisplacement", comments="" + ) if self.inputs.save_plot: tr = None @@ -335,21 +361,23 @@ def _run_interface(self, runtime): tr = self.inputs.series_tr if self.inputs.normalize and tr is None: - IFLOGGER.warning('FD plot cannot be normalized if TR is not set') + IFLOGGER.warning("FD plot cannot be normalized if TR is not set") - self._results['out_figure'] = op.abspath(self.inputs.out_figure) + self._results["out_figure"] = op.abspath(self.inputs.out_figure) fig = plot_confound( fd_res, self.inputs.figsize, - 'FD', - units='mm', + "FD", + units="mm", series_tr=tr, - normalize=self.inputs.normalize) + normalize=self.inputs.normalize, + ) fig.savefig( - self._results['out_figure'], + self._results["out_figure"], dpi=float(self.inputs.figdpi), format=self.inputs.out_figure[-3:], - bbox_inches='tight') + bbox_inches="tight", + ) fig.clf() return runtime @@ -360,104 +388,135 @@ def _list_outputs(self): class CompCorInputSpec(BaseInterfaceInputSpec): realigned_file = File( - exists=True, mandatory=True, desc='already realigned brain image (4D)') + exists=True, mandatory=True, desc="already realigned brain image (4D)" + ) mask_files = InputMultiPath( File(exists=True), - desc=('One or more mask files that determines ' - 'ROI (3D). When more that one file is ' - 'provided `merge_method` or ' - '`merge_index` must be provided')) + desc=( + "One or more mask files that determines " + "ROI (3D). When more that one file is " + "provided `merge_method` or " + "`merge_index` must be provided" + ), + ) merge_method = traits.Enum( - 'union', - 'intersect', - 'none', - xor=['mask_index'], - requires=['mask_files'], - desc=('Merge method if multiple masks are ' - 'present - `union` uses voxels included in' - ' at least one input mask, `intersect` ' - 'uses only voxels present in all input ' - 'masks, `none` performs CompCor on ' - 'each mask individually')) + "union", + "intersect", + "none", + xor=["mask_index"], + requires=["mask_files"], + desc=( + "Merge method if multiple masks are " + "present - `union` uses voxels included in" + " at least one input mask, `intersect` " + "uses only voxels present in all input " + "masks, `none` performs CompCor on " + "each mask individually" + ), + ) mask_index = traits.Range( low=0, - xor=['merge_method'], - requires=['mask_files'], - desc=('Position of mask in `mask_files` to use - ' - 'first is the default.')) + xor=["merge_method"], + requires=["mask_files"], + desc=("Position of mask in `mask_files` to use - " "first is the default."), + ) mask_names = traits.List( traits.Str, - desc='Names for provided masks (for printing into metadata). ' - 'If provided, it must be as long as the final mask list ' - '(after any merge and indexing operations).') + desc="Names for provided masks (for printing into metadata). " + "If provided, it must be as long as the final mask list " + "(after any merge and indexing operations).", + ) components_file = traits.Str( - 'components_file.txt', + "components_file.txt", usedefault=True, - desc='Filename to store physiological components') + desc="Filename to store physiological components", + ) num_components = traits.Either( - 'all', traits.Range(low=1), xor=['variance_threshold'], - desc='Number of components to return from the decomposition. If ' - '`num_components` is `all`, then all components will be ' - 'retained.') + "all", + traits.Range(low=1), + xor=["variance_threshold"], + desc="Number of components to return from the decomposition. If " + "`num_components` is `all`, then all components will be " + "retained.", + ) # 6 for BOLD, 4 for ASL # automatically instantiated to 6 in CompCor below if neither # `num_components` nor `variance_threshold` is defined (for # backward compatibility) variance_threshold = traits.Range( - low=0.0, high=1.0, exclude_low=True, exclude_high=True, xor=['num_components'], - desc='Select the number of components to be returned automatically ' - 'based on their ability to explain variance in the dataset. ' - '`variance_threshold` is a fractional value between 0 and 1; ' - 'the number of components retained will be equal to the minimum ' - 'number of components necessary to explain the provided ' - 'fraction of variance in the masked time series.') + low=0.0, + high=1.0, + exclude_low=True, + exclude_high=True, + xor=["num_components"], + desc="Select the number of components to be returned automatically " + "based on their ability to explain variance in the dataset. " + "`variance_threshold` is a fractional value between 0 and 1; " + "the number of components retained will be equal to the minimum " + "number of components necessary to explain the provided " + "fraction of variance in the masked time series.", + ) pre_filter = traits.Enum( - 'polynomial', - 'cosine', + "polynomial", + "cosine", False, usedefault=True, - desc='Detrend time series prior to component ' - 'extraction') + desc="Detrend time series prior to component " "extraction", + ) use_regress_poly = traits.Bool( - deprecated='0.15.0', - new_name='pre_filter', - desc=('use polynomial regression ' - 'pre-component extraction')) + deprecated="0.15.0", + new_name="pre_filter", + desc=("use polynomial regression " "pre-component extraction"), + ) regress_poly_degree = traits.Range( - low=1, value=1, usedefault=True, desc='the degree polynomial to use') + low=1, value=1, usedefault=True, desc="the degree polynomial to use" + ) header_prefix = traits.Str( - desc=('the desired header for the output tsv ' - 'file (one column). If undefined, will ' - 'default to "CompCor"')) + desc=( + "the desired header for the output tsv " + "file (one column). If undefined, will " + 'default to "CompCor"' + ) + ) high_pass_cutoff = traits.Float( - 128, - usedefault=True, - desc='Cutoff (in seconds) for "cosine" pre-filter') + 128, usedefault=True, desc='Cutoff (in seconds) for "cosine" pre-filter' + ) repetition_time = traits.Float( - desc='Repetition time (TR) of series - derived from image header if ' - 'unspecified') + desc="Repetition time (TR) of series - derived from image header if " + "unspecified" + ) save_pre_filter = traits.Either( - traits.Bool, File, default=False, usedefault=True, - desc='Save pre-filter basis as text file') + traits.Bool, + File, + default=False, + usedefault=True, + desc="Save pre-filter basis as text file", + ) save_metadata = traits.Either( - traits.Bool, File, default=False, usedefault=True, - desc='Save component metadata as text file') - ignore_initial_volumes = traits.Range( - low=0, + traits.Bool, + File, + default=False, usedefault=True, - desc='Number of volumes at start of series to ignore') + desc="Save component metadata as text file", + ) + ignore_initial_volumes = traits.Range( + low=0, usedefault=True, desc="Number of volumes at start of series to ignore" + ) failure_mode = traits.Enum( - 'error', 'NaN', + "error", + "NaN", usedefault=True, - desc='When no components are found or convergence fails, raise an error ' - 'or silently return columns of NaNs.') + desc="When no components are found or convergence fails, raise an error " + "or silently return columns of NaNs.", + ) class CompCorOutputSpec(TraitedSpec): components_file = File( - exists=True, desc='text file containing the noise components') - pre_filter_file = File(desc='text file containing high-pass filter basis') - metadata_file = File(desc='text file containing component metadata') + exists=True, desc="text file containing the noise components" + ) + pre_filter_file = File(desc="text file containing high-pass filter basis") + metadata_file = File(desc="text file containing component metadata") class CompCor(SimpleInterface): @@ -495,12 +554,14 @@ class CompCor(SimpleInterface): >>> ccinterface.inputs.regress_poly_degree = 2 """ + input_spec = CompCorInputSpec output_spec = CompCorOutputSpec - references_ = [{ - 'tags': ['method', 'implementation'], - 'entry': - BibTeX("""\ + references_ = [ + { + "tags": ["method", "implementation"], + "entry": BibTeX( + """\ @article{compcor_2007, title = {A component based noise correction method (CompCor) for BOLD and perfusion based}, volume = {37}, @@ -511,67 +572,82 @@ class CompCor(SimpleInterface): author = {Behzadi, Yashar and Restom, Khaled and Liau, Joy and Liu, Thomas T.}, year = {2007}, pages = {90-101} -}""")}] +}""" + ), + } + ] def __init__(self, *args, **kwargs): - ''' exactly the same as compcor except the header ''' + """ exactly the same as compcor except the header """ super(CompCor, self).__init__(*args, **kwargs) - self._header = 'CompCor' + self._header = "CompCor" def _run_interface(self, runtime): mask_images = [] if isdefined(self.inputs.mask_files): - mask_images = combine_mask_files(self.inputs.mask_files, - self.inputs.merge_method, - self.inputs.mask_index) + mask_images = combine_mask_files( + self.inputs.mask_files, self.inputs.merge_method, self.inputs.mask_index + ) if self.inputs.use_regress_poly: - self.inputs.pre_filter = 'polynomial' + self.inputs.pre_filter = "polynomial" # Degree 0 == remove mean; see compute_noise_components - degree = (self.inputs.regress_poly_degree - if self.inputs.pre_filter == 'polynomial' else 0) + degree = ( + self.inputs.regress_poly_degree + if self.inputs.pre_filter == "polynomial" + else 0 + ) imgseries = nb.load(self.inputs.realigned_file, mmap=NUMPY_MMAP) if len(imgseries.shape) != 4: - raise ValueError('{} expected a 4-D nifti file. Input {} has ' - '{} dimensions (shape {})'.format( - self._header, self.inputs.realigned_file, - len(imgseries.shape), imgseries.shape)) + raise ValueError( + "{} expected a 4-D nifti file. Input {} has " + "{} dimensions (shape {})".format( + self._header, + self.inputs.realigned_file, + len(imgseries.shape), + imgseries.shape, + ) + ) if len(mask_images) == 0: img = nb.Nifti1Image( np.ones(imgseries.shape[:3], dtype=np.bool), affine=imgseries.affine, - header=imgseries.header) + header=imgseries.header, + ) mask_images = [img] skip_vols = self.inputs.ignore_initial_volumes if skip_vols: imgseries = imgseries.__class__( - imgseries.get_data()[..., skip_vols:], imgseries.affine, - imgseries.header) + imgseries.get_data()[..., skip_vols:], + imgseries.affine, + imgseries.header, + ) mask_images = self._process_masks(mask_images, imgseries.get_data()) TR = 0 - if self.inputs.pre_filter == 'cosine': + if self.inputs.pre_filter == "cosine": if isdefined(self.inputs.repetition_time): TR = self.inputs.repetition_time else: # Derive TR from NIfTI header, if possible try: TR = imgseries.header.get_zooms()[3] - if imgseries.header.get_xyzt_units()[1] == 'msec': + if imgseries.header.get_xyzt_units()[1] == "msec": TR /= 1000 except (AttributeError, IndexError): TR = 0 if TR == 0: raise ValueError( - '{} cannot detect repetition time from image - ' - 'Set the repetition_time input'.format(self._header)) + "{} cannot detect repetition time from image - " + "Set the repetition_time input".format(self._header) + ) if isdefined(self.inputs.variance_threshold): components_criterion = self.inputs.variance_threshold @@ -579,91 +655,104 @@ def _run_interface(self, runtime): components_criterion = self.inputs.num_components else: components_criterion = 6 - IFLOGGER.warning('`num_components` and `variance_threshold` are ' - 'not defined. Setting number of components to 6 ' - 'for backward compatibility. Please set either ' - '`num_components` or `variance_threshold`, as ' - 'this feature may be deprecated in the future.') + IFLOGGER.warning( + "`num_components` and `variance_threshold` are " + "not defined. Setting number of components to 6 " + "for backward compatibility. Please set either " + "`num_components` or `variance_threshold`, as " + "this feature may be deprecated in the future." + ) components, filter_basis, metadata = compute_noise_components( - imgseries.get_data(), mask_images, components_criterion, - self.inputs.pre_filter, degree, self.inputs.high_pass_cutoff, TR, - self.inputs.failure_mode, self.inputs.mask_names) + imgseries.get_data(), + mask_images, + components_criterion, + self.inputs.pre_filter, + degree, + self.inputs.high_pass_cutoff, + TR, + self.inputs.failure_mode, + self.inputs.mask_names, + ) if skip_vols: old_comp = components nrows = skip_vols + components.shape[0] - components = np.zeros( - (nrows, components.shape[1]), dtype=components.dtype) + components = np.zeros((nrows, components.shape[1]), dtype=components.dtype) components[skip_vols:] = old_comp - components_file = os.path.join(os.getcwd(), - self.inputs.components_file) + components_file = os.path.join(os.getcwd(), self.inputs.components_file) components_header = self._make_headers(components.shape[1]) np.savetxt( components_file, components, fmt=b"%.10f", - delimiter='\t', - header='\t'.join(components_header), - comments='') - self._results['components_file'] = os.path.join( - runtime.cwd, self.inputs.components_file) + delimiter="\t", + header="\t".join(components_header), + comments="", + ) + self._results["components_file"] = os.path.join( + runtime.cwd, self.inputs.components_file + ) save_pre_filter = False - if self.inputs.pre_filter in ['polynomial', 'cosine']: + if self.inputs.pre_filter in ["polynomial", "cosine"]: save_pre_filter = self.inputs.save_pre_filter if save_pre_filter: - self._results['pre_filter_file'] = save_pre_filter + self._results["pre_filter_file"] = save_pre_filter if save_pre_filter is True: - self._results['pre_filter_file'] = os.path.join( - runtime.cwd, 'pre_filter.tsv') + self._results["pre_filter_file"] = os.path.join( + runtime.cwd, "pre_filter.tsv" + ) - ftype = { - 'polynomial': 'Legendre', - 'cosine': 'Cosine' - }[self.inputs.pre_filter] + ftype = {"polynomial": "Legendre", "cosine": "Cosine"}[ + self.inputs.pre_filter + ] ncols = filter_basis.shape[1] if filter_basis.size > 0 else 0 - header = ['{}{:02d}'.format(ftype, i) for i in range(ncols)] + header = ["{}{:02d}".format(ftype, i) for i in range(ncols)] if skip_vols: old_basis = filter_basis # nrows defined above filter_basis = np.zeros( - (nrows, ncols + skip_vols), dtype=filter_basis.dtype) + (nrows, ncols + skip_vols), dtype=filter_basis.dtype + ) if old_basis.size > 0: filter_basis[skip_vols:, :ncols] = old_basis filter_basis[:skip_vols, -skip_vols:] = np.eye(skip_vols) - header.extend([ - 'NonSteadyStateOutlier{:02d}'.format(i) - for i in range(skip_vols) - ]) + header.extend( + ["NonSteadyStateOutlier{:02d}".format(i) for i in range(skip_vols)] + ) np.savetxt( - self._results['pre_filter_file'], + self._results["pre_filter_file"], filter_basis, - fmt=b'%.10f', - delimiter='\t', - header='\t'.join(header), - comments='') + fmt=b"%.10f", + delimiter="\t", + header="\t".join(header), + comments="", + ) metadata_file = self.inputs.save_metadata if metadata_file: - self._results['metadata_file'] = metadata_file + self._results["metadata_file"] = metadata_file if metadata_file is True: - self._results['metadata_file'] = ( - os.path.join(runtime.cwd, 'component_metadata.tsv')) - components_names = np.empty(len(metadata['mask']), - dtype='object_') - retained = np.where(metadata['retained']) - not_retained = np.where(np.logical_not(metadata['retained'])) + self._results["metadata_file"] = os.path.join( + runtime.cwd, "component_metadata.tsv" + ) + components_names = np.empty(len(metadata["mask"]), dtype="object_") + retained = np.where(metadata["retained"]) + not_retained = np.where(np.logical_not(metadata["retained"])) components_names[retained] = components_header - components_names[not_retained] = ([ - 'dropped{}'.format(i) for i in range(len(not_retained[0]))]) - with open(self._results['metadata_file'], 'w') as f: - f.write('\t'.join(['component'] + list(metadata.keys())) + '\n') + components_names[not_retained] = [ + "dropped{}".format(i) for i in range(len(not_retained[0])) + ] + with open(self._results["metadata_file"], "w") as f: + f.write("\t".join(["component"] + list(metadata.keys())) + "\n") for i in zip(components_names, *metadata.values()): - f.write('{0[0]}\t{0[1]}\t{0[2]:.10f}\t' - '{0[3]:.10f}\t{0[4]:.10f}\t{0[5]}\n'.format(i)) + f.write( + "{0[0]}\t{0[1]}\t{0[2]:.10f}\t" + "{0[3]:.10f}\t{0[4]:.10f}\t{0[5]}\n".format(i) + ) return runtime @@ -671,9 +760,12 @@ def _process_masks(self, mask_images, timeseries=None): return mask_images def _make_headers(self, num_col): - header = self.inputs.header_prefix if \ - isdefined(self.inputs.header_prefix) else self._header - headers = ['{}{:02d}'.format(header, i) for i in range(num_col)] + header = ( + self.inputs.header_prefix + if isdefined(self.inputs.header_prefix) + else self._header + ) + headers = ["{}{:02d}".format(header, i) for i in range(num_col)] return headers @@ -685,35 +777,35 @@ class ACompCor(CompCor): """ def __init__(self, *args, **kwargs): - ''' exactly the same as compcor except the header ''' + """ exactly the same as compcor except the header """ super(ACompCor, self).__init__(*args, **kwargs) - self._header = 'aCompCor' + self._header = "aCompCor" class TCompCorInputSpec(CompCorInputSpec): # and all the fields in CompCorInputSpec percentile_threshold = traits.Range( - low=0., - high=1., - value=.02, + low=0.0, + high=1.0, + value=0.02, exclude_low=True, exclude_high=True, usedefault=True, - desc='the percentile ' - 'used to select highest-variance ' - 'voxels, represented by a number ' - 'between 0 and 1, exclusive. By ' - 'default, this value is set to .02. ' - 'That is, the 2% of voxels ' - 'with the highest variance are used.') + desc="the percentile " + "used to select highest-variance " + "voxels, represented by a number " + "between 0 and 1, exclusive. By " + "default, this value is set to .02. " + "That is, the 2% of voxels " + "with the highest variance are used.", + ) class TCompCorOutputSpec(CompCorOutputSpec): # and all the fields in CompCorOutputSpec high_variance_masks = OutputMultiPath( - File(exists=True), - desc=(("voxels exceeding the variance" - " threshold"))) + File(exists=True), desc=(("voxels exceeding the variance" " threshold")) + ) class TCompCor(CompCor): @@ -737,9 +829,9 @@ class TCompCor(CompCor): output_spec = TCompCorOutputSpec def __init__(self, *args, **kwargs): - ''' exactly the same as compcor except the header ''' + """ exactly the same as compcor except the header """ super(TCompCor, self).__init__(*args, **kwargs) - self._header = 'tCompCor' + self._header = "tCompCor" self._mask_files = [] def _process_masks(self, mask_images, timeseries=None): @@ -752,25 +844,27 @@ def _process_masks(self, mask_images, timeseries=None): tSTD = _compute_tSTD(imgseries, 0, axis=-1) threshold_std = np.percentile( tSTD, - np.round(100. * - (1. - self.inputs.percentile_threshold)).astype(int)) + np.round(100.0 * (1.0 - self.inputs.percentile_threshold)).astype(int), + ) mask_data = np.zeros_like(mask) mask_data[mask != 0] = tSTD >= threshold_std - out_image = nb.Nifti1Image( - mask_data, affine=img.affine, header=img.header) + out_image = nb.Nifti1Image(mask_data, affine=img.affine, header=img.header) # save mask - mask_file = os.path.abspath('mask_{:03d}.nii.gz'.format(i)) + mask_file = os.path.abspath("mask_{:03d}.nii.gz".format(i)) out_image.to_filename(mask_file) - IFLOGGER.debug('tCompcor computed and saved mask of shape %s to ' - 'mask_file %s', str(mask.shape), mask_file) + IFLOGGER.debug( + "tCompcor computed and saved mask of shape %s to " "mask_file %s", + str(mask.shape), + mask_file, + ) self._mask_files.append(mask_file) out_images.append(out_image) return out_images def _list_outputs(self): outputs = super(TCompCor, self)._list_outputs() - outputs['high_variance_masks'] = self._mask_files + outputs["high_variance_masks"] = self._mask_files return outputs @@ -778,35 +872,31 @@ class TSNRInputSpec(BaseInterfaceInputSpec): in_file = InputMultiPath( File(exists=True), mandatory=True, - desc='realigned 4D file or a list of 3D files') - regress_poly = traits.Range(low=1, desc='Remove polynomials') + desc="realigned 4D file or a list of 3D files", + ) + regress_poly = traits.Range(low=1, desc="Remove polynomials") tsnr_file = File( - 'tsnr.nii.gz', - usedefault=True, - hash_files=False, - desc='output tSNR file') + "tsnr.nii.gz", usedefault=True, hash_files=False, desc="output tSNR file" + ) mean_file = File( - 'mean.nii.gz', - usedefault=True, - hash_files=False, - desc='output mean file') + "mean.nii.gz", usedefault=True, hash_files=False, desc="output mean file" + ) stddev_file = File( - 'stdev.nii.gz', - usedefault=True, - hash_files=False, - desc='output tSNR file') + "stdev.nii.gz", usedefault=True, hash_files=False, desc="output tSNR file" + ) detrended_file = File( - 'detrend.nii.gz', + "detrend.nii.gz", usedefault=True, hash_files=False, - desc='input file after detrending') + desc="input file after detrending", + ) class TSNROutputSpec(TraitedSpec): - tsnr_file = File(exists=True, desc='tsnr image file') - mean_file = File(exists=True, desc='mean image file') - stddev_file = File(exists=True, desc='std dev image file') - detrended_file = File(desc='detrended input file') + tsnr_file = File(exists=True, desc="tsnr image file") + mean_file = File(exists=True, desc="mean image file") + stddev_file = File(exists=True, desc="std dev image file") + detrended_file = File(desc="detrended input file") class TSNR(BaseInterface): @@ -823,6 +913,7 @@ class TSNR(BaseInterface): >>> res = tsnr.run() # doctest: +SKIP """ + input_spec = TSNRInputSpec output_spec = TSNROutputSpec @@ -830,33 +921,29 @@ def _run_interface(self, runtime): img = nb.load(self.inputs.in_file[0], mmap=NUMPY_MMAP) header = img.header.copy() vollist = [ - nb.load(filename, mmap=NUMPY_MMAP) - for filename in self.inputs.in_file + nb.load(filename, mmap=NUMPY_MMAP) for filename in self.inputs.in_file ] data = np.concatenate( - [ - vol.get_data().reshape(vol.shape[:3] + (-1, )) - for vol in vollist - ], - axis=3) + [vol.get_data().reshape(vol.shape[:3] + (-1,)) for vol in vollist], axis=3 + ) data = np.nan_to_num(data) - if data.dtype.kind == 'i': + if data.dtype.kind == "i": header.set_data_dtype(np.float32) data = data.astype(np.float32) if isdefined(self.inputs.regress_poly): - data = regress_poly( - self.inputs.regress_poly, data, remove_mean=False)[0] + data = regress_poly(self.inputs.regress_poly, data, remove_mean=False)[0] img = nb.Nifti1Image(data, img.affine, header) nb.save(img, op.abspath(self.inputs.detrended_file)) meanimg = np.mean(data, axis=3) stddevimg = np.std(data, axis=3) tsnr = np.zeros_like(meanimg) - stddevimg_nonzero = stddevimg > 1.e-3 - tsnr[stddevimg_nonzero] = meanimg[stddevimg_nonzero] / stddevimg[ - stddevimg_nonzero] + stddevimg_nonzero = stddevimg > 1.0e-3 + tsnr[stddevimg_nonzero] = ( + meanimg[stddevimg_nonzero] / stddevimg[stddevimg_nonzero] + ) img = nb.Nifti1Image(tsnr, img.affine, header) nb.save(img, op.abspath(self.inputs.tsnr_file)) img = nb.Nifti1Image(meanimg, img.affine, header) @@ -867,21 +954,23 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - for k in ['tsnr_file', 'mean_file', 'stddev_file']: + for k in ["tsnr_file", "mean_file", "stddev_file"]: outputs[k] = op.abspath(getattr(self.inputs, k)) if isdefined(self.inputs.regress_poly): - outputs['detrended_file'] = op.abspath(self.inputs.detrended_file) + outputs["detrended_file"] = op.abspath(self.inputs.detrended_file) return outputs class NonSteadyStateDetectorInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc='4D NIFTI EPI file') + in_file = File(exists=True, mandatory=True, desc="4D NIFTI EPI file") class NonSteadyStateDetectorOutputSpec(TraitedSpec): - n_volumes_to_discard = traits.Int(desc='Number of non-steady state volumes' - 'detected in the beginning of the scan.') + n_volumes_to_discard = traits.Int( + desc="Number of non-steady state volumes" + "detected in the beginning of the scan." + ) class NonSteadyStateDetector(BaseInterface): @@ -895,10 +984,11 @@ class NonSteadyStateDetector(BaseInterface): def _run_interface(self, runtime): in_nii = nb.load(self.inputs.in_file) - global_signal = in_nii.get_data()[:, :, :, :50].mean(axis=0).mean( - axis=0).mean(axis=0) + global_signal = ( + in_nii.get_data()[:, :, :, :50].mean(axis=0).mean(axis=0).mean(axis=0) + ) - self._results = {'n_volumes_to_discard': is_outlier(global_signal)} + self._results = {"n_volumes_to_discard": is_outlier(global_signal)} return runtime @@ -906,10 +996,9 @@ def _list_outputs(self): return self._results -def compute_dvars(in_file, - in_mask, - remove_zerovariance=False, - intensity_normalization=1000): +def compute_dvars( + in_file, in_mask, remove_zerovariance=False, intensity_normalization=1000 +): """ Compute the :abbr:`DVARS (D referring to temporal derivative of timecourses, VARS referring to RMS variance over voxels)` @@ -957,18 +1046,19 @@ def compute_dvars(in_file, # Robust standard deviation (we are using "lower" interpolation # because this is what FSL is doing - func_sd = (np.percentile(mfunc, 75, axis=1, interpolation="lower") - - np.percentile(mfunc, 25, axis=1, interpolation="lower")) / 1.349 + func_sd = ( + np.percentile(mfunc, 75, axis=1, interpolation="lower") + - np.percentile(mfunc, 25, axis=1, interpolation="lower") + ) / 1.349 if remove_zerovariance: mfunc = mfunc[func_sd != 0, :] func_sd = func_sd[func_sd != 0] # Compute (non-robust) estimate of lag-1 autocorrelation - ar1 = np.apply_along_axis(AR_est_YW, 1, - regress_poly(0, mfunc, - remove_mean=True)[0].astype( - np.float32), 1)[:, 0] + ar1 = np.apply_along_axis( + AR_est_YW, 1, regress_poly(0, mfunc, remove_mean=True)[0].astype(np.float32), 1 + )[:, 0] # Compute (predicted) standard deviation of temporal difference time series diff_sdhat = np.squeeze(np.sqrt(((1 - ar1) * 2).tolist())) * func_sd @@ -984,28 +1074,25 @@ def compute_dvars(in_file, dvars_stdz = dvars_nstd / diff_sd_mean with warnings.catch_warnings(): # catch, e.g., divide by zero errors - warnings.filterwarnings('error') + warnings.filterwarnings("error") # voxelwise standardization diff_vx_stdz = np.square( - func_diff / np.array([diff_sdhat] * func_diff.shape[-1]).T) + func_diff / np.array([diff_sdhat] * func_diff.shape[-1]).T + ) dvars_vx_stdz = np.sqrt(diff_vx_stdz.mean(axis=0)) return (dvars_stdz, dvars_nstd, dvars_vx_stdz) -def plot_confound(tseries, - figsize, - name, - units=None, - series_tr=None, - normalize=False): +def plot_confound(tseries, figsize, name, units=None, series_tr=None, normalize=False): """ A helper function to plot :abbr:`fMRI (functional MRI)` confounds. """ import matplotlib - matplotlib.use(config.get('execution', 'matplotlib_backend')) + + matplotlib.use(config.get("execution", "matplotlib_backend")) import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec from matplotlib.backends.backend_pdf import FigureCanvasPdf as FigureCanvas @@ -1024,18 +1111,18 @@ def plot_confound(tseries, ax.set_xlim((0, len(tseries))) ylabel = name if units is not None: - ylabel += (' speed [{}/s]' if normalize else ' [{}]').format(units) + ylabel += (" speed [{}/s]" if normalize else " [{}]").format(units) ax.set_ylabel(ylabel) - xlabel = 'Frame #' + xlabel = "Frame #" if series_tr is not None: - xlabel = 'Frame # ({} sec TR)'.format(series_tr) + xlabel = "Frame # ({} sec TR)".format(series_tr) ax.set_xlabel(xlabel) ylim = ax.get_ylim() ax = fig.add_subplot(grid[0, -1]) sns.distplot(tseries, vertical=True, ax=ax) - ax.set_xlabel('Frames') + ax.set_xlabel("Frames") ax.set_ylim(ylim) ax.set_yticklabels([]) return fig @@ -1063,7 +1150,7 @@ def is_outlier(points, thresh=3.5): if len(points.shape) == 1: points = points[:, None] median = np.median(points, axis=0) - diff = np.sum((points - median)**2, axis=-1) + diff = np.sum((points - median) ** 2, axis=-1) diff = np.sqrt(diff) med_abs_deviation = np.median(diff) @@ -1079,11 +1166,12 @@ def is_outlier(points, thresh=3.5): return timepoints_to_discard -def cosine_filter(data, timestep, period_cut, remove_mean=True, axis=-1, - failure_mode='error'): +def cosine_filter( + data, timestep, period_cut, remove_mean=True, axis=-1, failure_mode="error" +): datashape = data.shape timepoints = datashape[axis] - if datashape[0] == 0 and failure_mode != 'error': + if datashape[0] == 0 and failure_mode != "error": return data, np.array([]) data = data.reshape((-1, timepoints)) @@ -1103,8 +1191,7 @@ def cosine_filter(data, timestep, period_cut, remove_mean=True, axis=-1, return residuals.reshape(datashape), non_constant_regressors -def regress_poly(degree, data, remove_mean=True, axis=-1, - failure_mode='error'): +def regress_poly(degree, data, remove_mean=True, axis=-1, failure_mode="error"): """ Returns data with degree polynomial regressed out. @@ -1112,12 +1199,13 @@ def regress_poly(degree, data, remove_mean=True, axis=-1, :param int axis: numpy array axes along which regression is performed """ - IFLOGGER.debug('Performing polynomial regression on data of shape %s', - str(data.shape)) + IFLOGGER.debug( + "Performing polynomial regression on data of shape %s", str(data.shape) + ) datashape = data.shape timepoints = datashape[axis] - if datashape[0] == 0 and failure_mode != 'error': + if datashape[0] == 0 and failure_mode != "error": return data, np.array([]) # Rearrange all voxel-wise time-series in rows @@ -1166,21 +1254,28 @@ def combine_mask_files(mask_files, mask_method=None, mask_index=None): if len(mask_files) == 1: mask_index = 0 else: - raise ValueError(('When more than one mask file is provided, ' - 'one of merge_method or mask_index must be ' - 'set')) + raise ValueError( + ( + "When more than one mask file is provided, " + "one of merge_method or mask_index must be " + "set" + ) + ) if mask_index < len(mask_files): mask = nb.load(mask_files[mask_index], mmap=NUMPY_MMAP) return [mask] - raise ValueError(('mask_index {0} must be less than number of mask ' - 'files {1}').format(mask_index, len(mask_files))) + raise ValueError( + ("mask_index {0} must be less than number of mask " "files {1}").format( + mask_index, len(mask_files) + ) + ) masks = [] - if mask_method == 'none': + if mask_method == "none": for filename in mask_files: masks.append(nb.load(filename, mmap=NUMPY_MMAP)) return masks - if mask_method == 'union': + if mask_method == "union": mask = None for filename in mask_files: img = nb.load(filename, mmap=NUMPY_MMAP) @@ -1190,7 +1285,7 @@ def combine_mask_files(mask_files, mask_method=None, mask_index=None): img = nb.Nifti1Image(mask, img.affine, header=img.header) return [img] - if mask_method == 'intersect': + if mask_method == "intersect": mask = None for filename in mask_files: img = nb.load(filename, mmap=NUMPY_MMAP) @@ -1201,10 +1296,17 @@ def combine_mask_files(mask_files, mask_method=None, mask_index=None): return [img] -def compute_noise_components(imgseries, mask_images, components_criterion=0.5, - filter_type=False, degree=0, period_cut=128, - repetition_time=None, failure_mode='error', - mask_names=None): +def compute_noise_components( + imgseries, + mask_images, + components_criterion=0.5, + filter_type=False, + degree=0, + period_cut=128, + repetition_time=None, + failure_mode="error", + mask_names=None, +): """Compute the noise components from the imgseries for each mask Parameters @@ -1260,7 +1362,7 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, cumulative explained variances. """ basis = np.array([]) - if components_criterion == 'all': + if components_criterion == "all": components_criterion = -1 mask_names = mask_names or range(len(mask_images)) @@ -1275,9 +1377,11 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, mask = nb.squeeze_image(img).get_data().astype(np.bool) if imgseries.shape[:3] != mask.shape: raise ValueError( - 'Inputs for CompCor, timeseries and mask, do not have ' - 'matching spatial dimensions ({} and {}, respectively)'.format( - imgseries.shape[:3], mask.shape)) + "Inputs for CompCor, timeseries and mask, do not have " + "matching spatial dimensions ({} and {}, respectively)".format( + imgseries.shape[:3], mask.shape + ) + ) voxel_timecourses = imgseries[mask, :] @@ -1286,19 +1390,22 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, # Currently support Legendre-polynomial or cosine or detrending # With no filter, the mean is nonetheless removed (poly w/ degree 0) - if filter_type == 'cosine': + if filter_type == "cosine": if repetition_time is None: - raise ValueError( - 'Repetition time must be provided for cosine filter') + raise ValueError("Repetition time must be provided for cosine filter") voxel_timecourses, basis = cosine_filter( - voxel_timecourses, repetition_time, period_cut, - failure_mode=failure_mode) - elif filter_type in ('polynomial', False): + voxel_timecourses, + repetition_time, + period_cut, + failure_mode=failure_mode, + ) + elif filter_type in ("polynomial", False): # from paper: # "The constant and linear trends of the columns in the matrix M were # removed [prior to ...]" - voxel_timecourses, basis = regress_poly(degree, voxel_timecourses, - failure_mode=failure_mode) + voxel_timecourses, basis = regress_poly( + degree, voxel_timecourses, failure_mode=failure_mode + ) # "Voxel time series from the noise ROI (either anatomical or tSTD) were # placed in a matrix M of size Nxm, with time along the row dimension @@ -1306,19 +1413,20 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, M = voxel_timecourses.T # "[... were removed] prior to column-wise variance normalization." - M = M / _compute_tSTD(M, 1.) + M = M / _compute_tSTD(M, 1.0) # "The covariance matrix C = MMT was constructed and decomposed into its # principal components using a singular value decomposition." try: u, s, _ = fallback_svd(M, full_matrices=False) except (np.linalg.LinAlgError, ValueError): - if failure_mode == 'error': + if failure_mode == "error": raise s = np.full(M.shape[0], np.nan, dtype=np.float32) if components_criterion >= 1: - u = np.full((M.shape[0], components_criterion), - np.nan, dtype=np.float32) + u = np.full( + (M.shape[0], components_criterion), np.nan, dtype=np.float32 + ) else: u = np.full((M.shape[0], 1), np.nan, dtype=np.float32) @@ -1327,8 +1435,9 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, num_components = int(components_criterion) if 0 < components_criterion < 1: - num_components = np.searchsorted(cumulative_variance_explained, - components_criterion) + 1 + num_components = ( + np.searchsorted(cumulative_variance_explained, components_criterion) + 1 + ) elif components_criterion == -1: num_components = len(s) @@ -1346,18 +1455,19 @@ def compute_noise_components(imgseries, mask_images, components_criterion=0.5, if len(components) > 0: components = np.hstack(components) else: - if failure_mode == 'error': - raise ValueError('No components found') - components = np.full((M.shape[0], num_components), - np.nan, dtype=np.float32) - - metadata = OrderedDict([ - ('mask', list(chain(*md_mask))), - ('singular_value', np.hstack(md_sv)), - ('variance_explained', np.hstack(md_var)), - ('cumulative_variance_explained', np.hstack(md_cumvar)), - ('retained', list(chain(*md_retained))) - ]) + if failure_mode == "error": + raise ValueError("No components found") + components = np.full((M.shape[0], num_components), np.nan, dtype=np.float32) + + metadata = OrderedDict( + [ + ("mask", list(chain(*md_mask))), + ("singular_value", np.hstack(md_sv)), + ("variance_explained", np.hstack(md_var)), + ("cumulative_variance_explained", np.hstack(md_cumvar)), + ("retained", list(chain(*md_retained))), + ] + ) return components, basis, metadata @@ -1395,7 +1505,7 @@ def _cosine_drift(period_cut, frametimes): """ len_tim = len(frametimes) n_times = np.arange(len_tim) - hfcut = 1. / period_cut # input parameter is the period + hfcut = 1.0 / period_cut # input parameter is the period # frametimes.max() should be (len_tim-1)*dt dt = frametimes[1] - frametimes[0] @@ -1406,10 +1516,9 @@ def _cosine_drift(period_cut, frametimes): nfct = np.sqrt(2.0 / len_tim) for k in range(1, order): - cdrift[:, k - 1] = nfct * np.cos( - (np.pi / len_tim) * (n_times + .5) * k) + cdrift[:, k - 1] = nfct * np.cos((np.pi / len_tim) * (n_times + 0.5) * k) - cdrift[:, order - 1] = 1. # or 1./sqrt(len_tim) to normalize + cdrift[:, order - 1] = 1.0 # or 1./sqrt(len_tim) to normalize return cdrift @@ -1433,7 +1542,7 @@ def _full_rank(X, cmax=1e15): c = smax / smin if c < cmax: return X, c - IFLOGGER.warning('Matrix is singular at working precision, regularizing...') + IFLOGGER.warning("Matrix is singular at working precision, regularizing...") lda = (smax - cmax * smin) / (cmax - 1) s = s + lda X = np.dot(U, np.dot(np.diag(s), V)) diff --git a/nipype/algorithms/icc.py b/nipype/algorithms/icc.py index ff57cc1fd4..a3ae57edf8 100644 --- a/nipype/algorithms/icc.py +++ b/nipype/algorithms/icc.py @@ -4,8 +4,13 @@ from numpy import ones, kron, mean, eye, hstack, dot, tile from numpy.linalg import pinv import nibabel as nb -from ..interfaces.base import BaseInterfaceInputSpec, TraitedSpec, \ - BaseInterface, traits, File +from ..interfaces.base import ( + BaseInterfaceInputSpec, + TraitedSpec, + BaseInterface, + traits, + File, +) from ..utils import NUMPY_MMAP @@ -13,7 +18,8 @@ class ICCInputSpec(BaseInterfaceInputSpec): subjects_sessions = traits.List( traits.List(File(exists=True)), desc="n subjects m sessions 3D stat files", - mandatory=True) + mandatory=True, + ) mask = File(exists=True, mandatory=True) @@ -24,27 +30,28 @@ class ICCOutputSpec(TraitedSpec): class ICC(BaseInterface): - ''' + """ Calculates Interclass Correlation Coefficient (3,1) as defined in P. E. Shrout & Joseph L. Fleiss (1979). "Intraclass Correlations: Uses in Assessing Rater Reliability". Psychological Bulletin 86 (2): 420-428. This particular implementation is aimed at relaibility (test-retest) studies. - ''' + """ + input_spec = ICCInputSpec output_spec = ICCOutputSpec def _run_interface(self, runtime): maskdata = nb.load(self.inputs.mask).get_data() - maskdata = np.logical_not( - np.logical_or(maskdata == 0, np.isnan(maskdata))) - - session_datas = [[ - nb.load(fname, mmap=NUMPY_MMAP).get_data()[maskdata].reshape( - -1, 1) for fname in sessions - ] for sessions in self.inputs.subjects_sessions] - list_of_sessions = [ - np.dstack(session_data) for session_data in session_datas + maskdata = np.logical_not(np.logical_or(maskdata == 0, np.isnan(maskdata))) + + session_datas = [ + [ + nb.load(fname, mmap=NUMPY_MMAP).get_data()[maskdata].reshape(-1, 1) + for fname in sessions + ] + for sessions in self.inputs.subjects_sessions ] + list_of_sessions = [np.dstack(session_data) for session_data in session_datas] all_data = np.hstack(list_of_sessions) icc = np.zeros(session_datas[0][0].shape) session_F = np.zeros(session_datas[0][0].shape) @@ -53,44 +60,45 @@ def _run_interface(self, runtime): for x in range(icc.shape[0]): Y = all_data[x, :, :] - icc[x], subject_var[x], session_var[x], session_F[ - x], _, _ = ICC_rep_anova(Y) + icc[x], subject_var[x], session_var[x], session_F[x], _, _ = ICC_rep_anova( + Y + ) nim = nb.load(self.inputs.subjects_sessions[0][0]) new_data = np.zeros(nim.shape) - new_data[maskdata] = icc.reshape(-1, ) + new_data[maskdata] = icc.reshape(-1,) new_img = nb.Nifti1Image(new_data, nim.affine, nim.header) - nb.save(new_img, 'icc_map.nii') + nb.save(new_img, "icc_map.nii") new_data = np.zeros(nim.shape) - new_data[maskdata] = session_var.reshape(-1, ) + new_data[maskdata] = session_var.reshape(-1,) new_img = nb.Nifti1Image(new_data, nim.affine, nim.header) - nb.save(new_img, 'session_var_map.nii') + nb.save(new_img, "session_var_map.nii") new_data = np.zeros(nim.shape) - new_data[maskdata] = subject_var.reshape(-1, ) + new_data[maskdata] = subject_var.reshape(-1,) new_img = nb.Nifti1Image(new_data, nim.affine, nim.header) - nb.save(new_img, 'subject_var_map.nii') + nb.save(new_img, "subject_var_map.nii") return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['icc_map'] = os.path.abspath('icc_map.nii') - outputs['session_var_map'] = os.path.abspath('session_var_map.nii') - outputs['subject_var_map'] = os.path.abspath('subject_var_map.nii') + outputs["icc_map"] = os.path.abspath("icc_map.nii") + outputs["session_var_map"] = os.path.abspath("session_var_map.nii") + outputs["subject_var_map"] = os.path.abspath("subject_var_map.nii") return outputs def ICC_rep_anova(Y): - ''' + """ the data Y are entered as a 'table' ie subjects are in rows and repeated measures in columns One Sample Repeated measure ANOVA Y = XB + E with X = [FaTor / Subjects] - ''' + """ [nb_subjects, nb_conditions] = Y.shape dfc = nb_conditions - 1 @@ -102,7 +110,7 @@ def ICC_rep_anova(Y): # Sum Square Total mean_Y = mean(Y) - SST = ((Y - mean_Y)**2).sum() + SST = ((Y - mean_Y) ** 2).sum() # create the design matrix for the different levels x = kron(eye(nb_conditions), ones((nb_subjects, 1))) # sessions @@ -110,16 +118,16 @@ def ICC_rep_anova(Y): X = hstack([x, x0]) # Sum Square Error - predicted_Y = dot(dot(dot(X, pinv(dot(X.T, X))), X.T), Y.flatten('F')) - residuals = Y.flatten('F') - predicted_Y - SSE = (residuals**2).sum() + predicted_Y = dot(dot(dot(X, pinv(dot(X.T, X))), X.T), Y.flatten("F")) + residuals = Y.flatten("F") - predicted_Y + SSE = (residuals ** 2).sum() residuals.shape = Y.shape MSE = SSE / dfe # Sum square session effect - between colums/sessions - SSC = ((mean(Y, 0) - mean_Y)**2).sum() * nb_subjects + SSC = ((mean(Y, 0) - mean_Y) ** 2).sum() * nb_subjects MSC = SSC / dfc / nb_subjects session_effect_F = MSC / MSE diff --git a/nipype/algorithms/mesh.py b/nipype/algorithms/mesh.py index 2d5c7d2d49..3732f88548 100644 --- a/nipype/algorithms/mesh.py +++ b/nipype/algorithms/mesh.py @@ -9,11 +9,17 @@ from numpy import linalg as nla from .. import logging -from ..interfaces.base import (BaseInterface, traits, TraitedSpec, File, - BaseInterfaceInputSpec) +from ..interfaces.base import ( + BaseInterface, + traits, + TraitedSpec, + File, + BaseInterfaceInputSpec, +) from ..interfaces.vtkbase import tvtk from ..interfaces import vtkbase as VTKInfo -IFLOGGER = logging.getLogger('nipype.interface') + +IFLOGGER = logging.getLogger("nipype.interface") class TVTKBaseInterface(BaseInterface): @@ -23,34 +29,34 @@ class TVTKBaseInterface(BaseInterface): def __init__(self, **inputs): if VTKInfo.no_tvtk(): - raise ImportError('This interface requires tvtk to run.') + raise ImportError("This interface requires tvtk to run.") super(TVTKBaseInterface, self).__init__(**inputs) class WarpPointsInputSpec(BaseInterfaceInputSpec): - points = File( - exists=True, mandatory=True, desc='file containing the point set') + points = File(exists=True, mandatory=True, desc="file containing the point set") warp = File( - exists=True, - mandatory=True, - desc='dense deformation field to be applied') + exists=True, mandatory=True, desc="dense deformation field to be applied" + ) interp = traits.Enum( - 'cubic', - 'nearest', - 'linear', + "cubic", + "nearest", + "linear", usedefault=True, mandatory=True, - desc='interpolation') + desc="interpolation", + ) out_points = File( - name_source='points', - name_template='%s_warped', - output_name='out_points', + name_source="points", + name_template="%s_warped", + output_name="out_points", keep_extension=True, - desc='the warped point set') + desc="the warped point set", + ) class WarpPointsOutputSpec(TraitedSpec): - out_points = File(desc='the warped point set') + out_points = File(desc="the warped point set") class WarpPoints(TVTKBaseInterface): @@ -70,22 +76,23 @@ class WarpPoints(TVTKBaseInterface): res = wp.run() """ + input_spec = WarpPointsInputSpec output_spec = WarpPointsOutputSpec - def _gen_fname(self, in_file, suffix='generated', ext=None): + def _gen_fname(self, in_file, suffix="generated", ext=None): fname, fext = op.splitext(op.basename(in_file)) - if fext == '.gz': + if fext == ".gz": fname, fext2 = op.splitext(fname) fext = fext2 + fext if ext is None: ext = fext - if ext[0] == '.': + if ext[0] == ".": ext = ext[1:] - return op.abspath('%s_%s.%s' % (fname, suffix, ext)) + return op.abspath("%s_%s.%s" % (fname, suffix, ext)) def _run_interface(self, runtime): import nibabel as nb @@ -111,7 +118,7 @@ def _run_interface(self, runtime): warp = ndimage.map_coordinates(wdata, voxpoints.transpose()) else: - warp = np.zeros((points.shape[0], )) + warp = np.zeros((points.shape[0],)) warps.append(warp) @@ -120,15 +127,15 @@ def _run_interface(self, runtime): mesh.points = newpoints w = tvtk.PolyDataWriter() VTKInfo.configure_input_data(w, mesh) - w.file_name = self._gen_fname( - self.inputs.points, suffix='warped', ext='.vtk') + w.file_name = self._gen_fname(self.inputs.points, suffix="warped", ext=".vtk") w.write() return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['out_points'] = self._gen_fname( - self.inputs.points, suffix='warped', ext='.vtk') + outputs["out_points"] = self._gen_fname( + self.inputs.points, suffix="warped", ext=".vtk" + ) return outputs @@ -136,43 +143,46 @@ class ComputeMeshWarpInputSpec(BaseInterfaceInputSpec): surface1 = File( exists=True, mandatory=True, - desc=('Reference surface (vtk format) to which compute ' - 'distance.')) + desc=("Reference surface (vtk format) to which compute " "distance."), + ) surface2 = File( exists=True, mandatory=True, - desc=('Test surface (vtk format) from which compute ' - 'distance.')) + desc=("Test surface (vtk format) from which compute " "distance."), + ) metric = traits.Enum( - 'euclidean', - 'sqeuclidean', - usedefault=True, - desc='norm used to report distance') + "euclidean", "sqeuclidean", usedefault=True, desc="norm used to report distance" + ) weighting = traits.Enum( - 'none', - 'area', + "none", + "area", usedefault=True, - desc=('"none": no weighting is performed, surface": edge distance is ' - 'weighted by the corresponding surface area')) + desc=( + '"none": no weighting is performed, surface": edge distance is ' + "weighted by the corresponding surface area" + ), + ) out_warp = File( - 'surfwarp.vtk', + "surfwarp.vtk", usedefault=True, - desc='vtk file based on surface1 and warpings mapping it ' - 'to surface2') + desc="vtk file based on surface1 and warpings mapping it " "to surface2", + ) out_file = File( - 'distance.npy', + "distance.npy", usedefault=True, - desc='numpy file keeping computed distances and weights') + desc="numpy file keeping computed distances and weights", + ) class ComputeMeshWarpOutputSpec(TraitedSpec): distance = traits.Float(desc="computed distance") out_warp = File( exists=True, - desc=('vtk file with the vertex-wise ' - 'mapping of surface1 to surface2')) + desc=("vtk file with the vertex-wise " "mapping of surface1 to surface2"), + ) out_file = File( - exists=True, desc='numpy file keeping computed distances and weights') + exists=True, desc="numpy file keeping computed distances and weights" + ) class ComputeMeshWarp(TVTKBaseInterface): @@ -216,7 +226,7 @@ def _run_interface(self, runtime): vtk2 = VTKInfo.vtk_output(r2) r1.update() r2.update() - assert (len(vtk1.points) == len(vtk2.points)) + assert len(vtk1.points) == len(vtk2.points) points1 = np.array(vtk1.points) points2 = np.array(vtk2.points) @@ -229,10 +239,10 @@ def _run_interface(self, runtime): except TypeError: # numpy < 1.9 errvector = np.apply_along_axis(nla.norm, 1, diff) - if self.inputs.metric == 'sqeuclidean': + if self.inputs.metric == "sqeuclidean": errvector **= 2 - if self.inputs.weighting == 'area': + if self.inputs.weighting == "area": faces = vtk1.polys.to_array().reshape(-1, 4).astype(int)[:, 1:] for i, p1 in enumerate(points2): @@ -254,9 +264,8 @@ def _run_interface(self, runtime): out_mesh.points = vtk1.points out_mesh.polys = vtk1.polys out_mesh.point_data.vectors = diff - out_mesh.point_data.vectors.name = 'warpings' - writer = tvtk.PolyDataWriter( - file_name=op.abspath(self.inputs.out_warp)) + out_mesh.point_data.vectors.name = "warpings" + writer = tvtk.PolyDataWriter(file_name=op.abspath(self.inputs.out_warp)) VTKInfo.configure_input_data(writer, out_mesh) writer.write() @@ -265,9 +274,9 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) - outputs['out_warp'] = op.abspath(self.inputs.out_warp) - outputs['distance'] = self._distance + outputs["out_file"] = op.abspath(self.inputs.out_file) + outputs["out_warp"] = op.abspath(self.inputs.out_warp) + outputs["distance"] = self._distance return outputs @@ -275,11 +284,15 @@ class MeshWarpMathsInputSpec(BaseInterfaceInputSpec): in_surf = File( exists=True, mandatory=True, - desc=('Input surface in vtk format, with associated warp ' - 'field as point data (ie. from ComputeMeshWarp')) + desc=( + "Input surface in vtk format, with associated warp " + "field as point data (ie. from ComputeMeshWarp" + ), + ) float_trait = traits.Either( traits.Float(1.0), - traits.Tuple(traits.Float(1.0), traits.Float(1.0), traits.Float(1.0))) + traits.Tuple(traits.Float(1.0), traits.Float(1.0), traits.Float(1.0)), + ) operator = traits.Either( float_trait, @@ -287,31 +300,27 @@ class MeshWarpMathsInputSpec(BaseInterfaceInputSpec): default=1.0, usedefault=True, mandatory=True, - desc='image, float or tuple of floats to act as operator') + desc="image, float or tuple of floats to act as operator", + ) operation = traits.Enum( - 'sum', - 'sub', - 'mul', - 'div', - usedefault=True, - desc='operation to be performed') + "sum", "sub", "mul", "div", usedefault=True, desc="operation to be performed" + ) out_warp = File( - 'warp_maths.vtk', + "warp_maths.vtk", usedefault=True, - desc='vtk file based on in_surf and warpings mapping it ' - 'to out_file') - out_file = File( - 'warped_surf.vtk', usedefault=True, desc='vtk with surface warped') + desc="vtk file based on in_surf and warpings mapping it " "to out_file", + ) + out_file = File("warped_surf.vtk", usedefault=True, desc="vtk with surface warped") class MeshWarpMathsOutputSpec(TraitedSpec): out_warp = File( exists=True, - desc=('vtk file with the vertex-wise ' - 'mapping of surface1 to surface2')) - out_file = File(exists=True, desc='vtk with surface warped') + desc=("vtk file with the vertex-wise " "mapping of surface1 to surface2"), + ) + out_file = File(exists=True, desc="vtk with surface warped") class MeshWarpMaths(TVTKBaseInterface): @@ -346,7 +355,7 @@ def _run_interface(self, runtime): points1 = np.array(vtk1.points) if vtk1.point_data.vectors is None: - raise RuntimeError('No warping field was found in in_surf') + raise RuntimeError("No warping field was found in in_surf") operator = self.inputs.operator opfield = np.ones_like(points1) @@ -355,7 +364,7 @@ def _run_interface(self, runtime): r2 = tvtk.PolyDataReader(file_name=self.inputs.surface2) vtk2 = VTKInfo.vtk_output(r2) r2.update() - assert (len(points1) == len(vtk2.points)) + assert len(points1) == len(vtk2.points) opfield = vtk2.point_data.vectors @@ -363,7 +372,7 @@ def _run_interface(self, runtime): opfield = vtk2.point_data.scalars if opfield is None: - raise RuntimeError('No operator values found in operator file') + raise RuntimeError("No operator values found in operator file") opfield = np.array(opfield) @@ -375,33 +384,31 @@ def _run_interface(self, runtime): warping = np.array(vtk1.point_data.vectors) - if self.inputs.operation == 'sum': + if self.inputs.operation == "sum": warping += opfield - elif self.inputs.operation == 'sub': + elif self.inputs.operation == "sub": warping -= opfield - elif self.inputs.operation == 'mul': + elif self.inputs.operation == "mul": warping *= opfield - elif self.inputs.operation == 'div': + elif self.inputs.operation == "div": warping /= opfield vtk1.point_data.vectors = warping - writer = tvtk.PolyDataWriter( - file_name=op.abspath(self.inputs.out_warp)) + writer = tvtk.PolyDataWriter(file_name=op.abspath(self.inputs.out_warp)) VTKInfo.configure_input_data(writer, vtk1) writer.write() vtk1.point_data.vectors = None vtk1.points = points1 + warping - writer = tvtk.PolyDataWriter( - file_name=op.abspath(self.inputs.out_file)) + writer = tvtk.PolyDataWriter(file_name=op.abspath(self.inputs.out_file)) VTKInfo.configure_input_data(writer, vtk1) writer.write() return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) - outputs['out_warp'] = op.abspath(self.inputs.out_warp) + outputs["out_file"] = op.abspath(self.inputs.out_file) + outputs["out_warp"] = op.abspath(self.inputs.out_warp) return outputs @@ -418,5 +425,7 @@ class P2PDistance(ComputeMeshWarp): def __init__(self, **inputs): super(P2PDistance, self).__init__(**inputs) - IFLOGGER.warning('This interface has been deprecated since 1.0, please ' - 'use ComputeMeshWarp') + IFLOGGER.warning( + "This interface has been deprecated since 1.0, please " + "use ComputeMeshWarp" + ) diff --git a/nipype/algorithms/metrics.py b/nipype/algorithms/metrics.py index d1075ec04e..7ee1ac5bfd 100644 --- a/nipype/algorithms/metrics.py +++ b/nipype/algorithms/metrics.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -''' +""" Image assessment algorithms. Typical overlap and error computation measures to evaluate results from other processing units. -''' +""" import os import os.path as op @@ -14,23 +14,27 @@ from .. import config, logging from ..interfaces.base import ( - SimpleInterface, BaseInterface, traits, TraitedSpec, File, - InputMultiPath, BaseInterfaceInputSpec, - isdefined) + SimpleInterface, + BaseInterface, + traits, + TraitedSpec, + File, + InputMultiPath, + BaseInterfaceInputSpec, + isdefined, +) from ..interfaces.nipy.base import NipyBaseInterface -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") class DistanceInputSpec(BaseInterfaceInputSpec): volume1 = File( - exists=True, - mandatory=True, - desc="Has to have the same dimensions as volume2.") + exists=True, mandatory=True, desc="Has to have the same dimensions as volume2." + ) volume2 = File( - exists=True, - mandatory=True, - desc="Has to have the same dimensions as volume1.") + exists=True, mandatory=True, desc="Has to have the same dimensions as volume1." + ) method = traits.Enum( "eucl_min", "eucl_cog", @@ -46,21 +50,22 @@ class DistanceInputSpec(BaseInterfaceInputSpec): to volume1 weighted by their values\ "eucl_max": maximum over minimum Euclidian distances of all volume2\ voxels to volume1 (also known as the Hausdorff distance)', - usedefault=True) - mask_volume = File( - exists=True, desc="calculate overlap only within this mask.") + usedefault=True, + ) + mask_volume = File(exists=True, desc="calculate overlap only within this mask.") class DistanceOutputSpec(TraitedSpec): distance = traits.Float() - point1 = traits.Array(shape=(3, )) - point2 = traits.Array(shape=(3, )) + point1 = traits.Array(shape=(3,)) + point2 = traits.Array(shape=(3,)) histogram = File() class Distance(BaseInterface): """Calculates distance between two volumes. """ + input_spec = DistanceInputSpec output_spec = DistanceOutputSpec @@ -68,6 +73,7 @@ class Distance(BaseInterface): def _find_border(self, data): from scipy.ndimage.morphology import binary_erosion + eroded = binary_erosion(data) border = np.logical_and(data, np.logical_not(eroded)) return border @@ -82,6 +88,7 @@ def _get_coordinates(self, data, affine): def _eucl_min(self, nii1, nii2): from scipy.spatial.distance import cdist, euclidean + origdata1 = nii1.get_data().astype(np.bool) border1 = self._find_border(origdata1) @@ -93,30 +100,33 @@ def _eucl_min(self, nii1, nii2): set2_coordinates = self._get_coordinates(border2, nii2.affine) dist_matrix = cdist(set1_coordinates.T, set2_coordinates.T) - (point1, point2) = np.unravel_index( - np.argmin(dist_matrix), dist_matrix.shape) - return (euclidean(set1_coordinates.T[point1, :], - set2_coordinates.T[point2, :]), - set1_coordinates.T[point1, :], set2_coordinates.T[point2, :]) + (point1, point2) = np.unravel_index(np.argmin(dist_matrix), dist_matrix.shape) + return ( + euclidean(set1_coordinates.T[point1, :], set2_coordinates.T[point2, :]), + set1_coordinates.T[point1, :], + set2_coordinates.T[point2, :], + ) def _eucl_cog(self, nii1, nii2): from scipy.spatial.distance import cdist from scipy.ndimage.measurements import center_of_mass, label - origdata1 = np.logical_and(nii1.get_data() != 0, - np.logical_not(np.isnan(nii1.get_data()))) + + origdata1 = np.logical_and( + nii1.get_data() != 0, np.logical_not(np.isnan(nii1.get_data())) + ) cog_t = np.array(center_of_mass(origdata1.copy())).reshape(-1, 1) cog_t = np.vstack((cog_t, np.array([1]))) cog_t_coor = np.dot(nii1.affine, cog_t)[:3, :] - origdata2 = np.logical_and(nii2.get_data() != 0, - np.logical_not(np.isnan(nii2.get_data()))) + origdata2 = np.logical_and( + nii2.get_data() != 0, np.logical_not(np.isnan(nii2.get_data())) + ) (labeled_data, n_labels) = label(origdata2) cogs = np.ones((4, n_labels)) for i in range(n_labels): - cogs[:3, i] = np.array( - center_of_mass(origdata2, labeled_data, i + 1)) + cogs[:3, i] = np.array(center_of_mass(origdata2, labeled_data, i + 1)) cogs_coor = np.dot(nii2.affine, cogs)[:3, :] @@ -126,6 +136,7 @@ def _eucl_cog(self, nii1, nii2): def _eucl_mean(self, nii1, nii2, weighted=False): from scipy.spatial.distance import cdist + origdata1 = nii1.get_data().astype(np.bool) border1 = self._find_border(origdata1) @@ -137,33 +148,32 @@ def _eucl_mean(self, nii1, nii2, weighted=False): dist_matrix = cdist(set1_coordinates.T, set2_coordinates.T) min_dist_matrix = np.amin(dist_matrix, axis=0) import matplotlib - matplotlib.use(config.get('execution', 'matplotlib_backend')) + + matplotlib.use(config.get("execution", "matplotlib_backend")) import matplotlib.pyplot as plt + plt.figure() - plt.hist(min_dist_matrix, 50, normed=1, facecolor='green') + plt.hist(min_dist_matrix, 50, normed=1, facecolor="green") plt.savefig(self._hist_filename) plt.clf() plt.close() if weighted: - return np.average( - min_dist_matrix, weights=nii2.get_data()[origdata2].flat) + return np.average(min_dist_matrix, weights=nii2.get_data()[origdata2].flat) else: return np.mean(min_dist_matrix) def _eucl_max(self, nii1, nii2): from scipy.spatial.distance import cdist + origdata1 = nii1.get_data() - origdata1 = np.logical_not( - np.logical_or(origdata1 == 0, np.isnan(origdata1))) + origdata1 = np.logical_not(np.logical_or(origdata1 == 0, np.isnan(origdata1))) origdata2 = nii2.get_data() - origdata2 = np.logical_not( - np.logical_or(origdata2 == 0, np.isnan(origdata2))) + origdata2 = np.logical_not(np.logical_or(origdata2 == 0, np.isnan(origdata2))) if isdefined(self.inputs.mask_volume): maskdata = nb.load(self.inputs.mask_volume).get_data() - maskdata = np.logical_not( - np.logical_or(maskdata == 0, np.isnan(maskdata))) + maskdata = np.logical_not(np.logical_or(maskdata == 0, np.isnan(maskdata))) origdata1 = np.logical_and(maskdata, origdata1) origdata2 = np.logical_and(maskdata, origdata2) @@ -176,8 +186,7 @@ def _eucl_max(self, nii1, nii2): set1_coordinates = self._get_coordinates(border1, nii1.affine) set2_coordinates = self._get_coordinates(border2, nii2.affine) distances = cdist(set1_coordinates.T, set2_coordinates.T) - mins = np.concatenate((np.amin(distances, axis=0), - np.amin(distances, axis=1))) + mins = np.concatenate((np.amin(distances, axis=0), np.amin(distances, axis=1))) return np.max(mins) @@ -187,8 +196,7 @@ def _run_interface(self, runtime): nii2 = nb.load(self.inputs.volume2, mmap=False) if self.inputs.method == "eucl_min": - self._distance, self._point1, self._point2 = self._eucl_min( - nii1, nii2) + self._distance, self._point1, self._point2 = self._eucl_min(nii1, nii2) elif self.inputs.method == "eucl_cog": self._distance = self._eucl_cog(nii1, nii2) @@ -205,62 +213,55 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['distance'] = self._distance + outputs["distance"] = self._distance if self.inputs.method == "eucl_min": - outputs['point1'] = self._point1 - outputs['point2'] = self._point2 + outputs["point1"] = self._point1 + outputs["point2"] = self._point2 elif self.inputs.method in ["eucl_mean", "eucl_wmean"]: - outputs['histogram'] = os.path.abspath(self._hist_filename) + outputs["histogram"] = os.path.abspath(self._hist_filename) return outputs class OverlapInputSpec(BaseInterfaceInputSpec): volume1 = File( - exists=True, - mandatory=True, - desc='Has to have the same dimensions as volume2.') + exists=True, mandatory=True, desc="Has to have the same dimensions as volume2." + ) volume2 = File( - exists=True, - mandatory=True, - desc='Has to have the same dimensions as volume1.') - mask_volume = File( - exists=True, desc='calculate overlap only within this mask.') + exists=True, mandatory=True, desc="Has to have the same dimensions as volume1." + ) + mask_volume = File(exists=True, desc="calculate overlap only within this mask.") bg_overlap = traits.Bool( - False, - usedefault=True, - mandatory=True, - desc='consider zeros as a label') - out_file = File('diff.nii', usedefault=True) + False, usedefault=True, mandatory=True, desc="consider zeros as a label" + ) + out_file = File("diff.nii", usedefault=True) weighting = traits.Enum( - 'none', - 'volume', - 'squared_vol', + "none", + "volume", + "squared_vol", usedefault=True, - desc=('\'none\': no class-overlap weighting is ' - 'performed. \'volume\': computed class-' - 'overlaps are weighted by class volume ' - '\'squared_vol\': computed class-overlaps ' - 'are weighted by the squared volume of ' - 'the class')) + desc=( + "'none': no class-overlap weighting is " + "performed. 'volume': computed class-" + "overlaps are weighted by class volume " + "'squared_vol': computed class-overlaps " + "are weighted by the squared volume of " + "the class" + ), + ) vol_units = traits.Enum( - 'voxel', - 'mm', - mandatory=True, - usedefault=True, - desc='units for volumes') + "voxel", "mm", mandatory=True, usedefault=True, desc="units for volumes" + ) class OverlapOutputSpec(TraitedSpec): - jaccard = traits.Float(desc='averaged jaccard index') - dice = traits.Float(desc='averaged dice index') - roi_ji = traits.List( - traits.Float(), desc=('the Jaccard index (JI) per ROI')) - roi_di = traits.List(traits.Float(), desc=('the Dice index (DI) per ROI')) - volume_difference = traits.Float(desc=('averaged volume difference')) - roi_voldiff = traits.List( - traits.Float(), desc=('volume differences of ROIs')) - labels = traits.List(traits.Int(), desc=('detected labels')) - diff_file = File(exists=True, desc='error map of differences') + jaccard = traits.Float(desc="averaged jaccard index") + dice = traits.Float(desc="averaged dice index") + roi_ji = traits.List(traits.Float(), desc=("the Jaccard index (JI) per ROI")) + roi_di = traits.List(traits.Float(), desc=("the Dice index (DI) per ROI")) + volume_difference = traits.Float(desc=("averaged volume difference")) + roi_voldiff = traits.List(traits.Float(), desc=("volume differences of ROIs")) + labels = traits.List(traits.Int(), desc=("detected labels")) + diff_file = File(exists=True, desc="error map of differences") class Overlap(BaseInterface): @@ -282,12 +283,14 @@ class Overlap(BaseInterface): >>> res = overlap.run() # doctest: +SKIP """ + input_spec = OverlapInputSpec output_spec = OverlapOutputSpec def _bool_vec_dissimilarity(self, booldata1, booldata2, method): from scipy.spatial.distance import dice, jaccard - methods = {'dice': dice, 'jaccard': jaccard} + + methods = {"dice": dice, "jaccard": jaccard} if not (np.any(booldata1) or np.any(booldata2)): return 0 return 1 - methods[method](booldata1.flat, booldata2.flat) @@ -298,7 +301,7 @@ def _run_interface(self, runtime): scale = 1.0 - if self.inputs.vol_units == 'mm': + if self.inputs.vol_units == "mm": voxvol = nii1.header.get_zooms() for i in range(nii1.get_data().ndim - 1): scale = scale * voxvol[i] @@ -326,51 +329,50 @@ def _run_interface(self, runtime): for l in labels: res.append( - self._bool_vec_dissimilarity( - data1 == l, data2 == l, method='jaccard')) + self._bool_vec_dissimilarity(data1 == l, data2 == l, method="jaccard") + ) volumes1.append(scale * len(data1[data1 == l])) volumes2.append(scale * len(data2[data2 == l])) results = dict(jaccard=[], dice=[]) - results['jaccard'] = np.array(res) - results['dice'] = 2.0 * results['jaccard'] / (results['jaccard'] + 1.0) + results["jaccard"] = np.array(res) + results["dice"] = 2.0 * results["jaccard"] / (results["jaccard"] + 1.0) - weights = np.ones((len(volumes1), ), dtype=np.float32) - if self.inputs.weighting != 'none': + weights = np.ones((len(volumes1),), dtype=np.float32) + if self.inputs.weighting != "none": weights = weights / np.array(volumes1) - if self.inputs.weighting == 'squared_vol': - weights = weights**2 + if self.inputs.weighting == "squared_vol": + weights = weights ** 2 weights = weights / np.sum(weights) both_data = np.zeros(data1.shape) both_data[(data1 - data2) != 0] = 1 nb.save( - nb.Nifti1Image(both_data, nii1.affine, nii1.header), - self.inputs.out_file) + nb.Nifti1Image(both_data, nii1.affine, nii1.header), self.inputs.out_file + ) self._labels = labels self._ove_rois = results - self._vol_rois = ( - np.array(volumes1) - np.array(volumes2)) / np.array(volumes1) + self._vol_rois = (np.array(volumes1) - np.array(volumes2)) / np.array(volumes1) - self._dice = round(np.sum(weights * results['dice']), 5) - self._jaccard = round(np.sum(weights * results['jaccard']), 5) + self._dice = round(np.sum(weights * results["dice"]), 5) + self._jaccard = round(np.sum(weights * results["jaccard"]), 5) self._volume = np.sum(weights * self._vol_rois) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['labels'] = self._labels - outputs['jaccard'] = self._jaccard - outputs['dice'] = self._dice - outputs['volume_difference'] = self._volume - - outputs['roi_ji'] = self._ove_rois['jaccard'].tolist() - outputs['roi_di'] = self._ove_rois['dice'].tolist() - outputs['roi_voldiff'] = self._vol_rois.tolist() - outputs['diff_file'] = os.path.abspath(self.inputs.out_file) + outputs["labels"] = self._labels + outputs["jaccard"] = self._jaccard + outputs["dice"] = self._dice + outputs["volume_difference"] = self._volume + + outputs["roi_ji"] = self._ove_rois["jaccard"].tolist() + outputs["roi_di"] = self._ove_rois["dice"].tolist() + outputs["roi_voldiff"] = self._vol_rois.tolist() + outputs["diff_file"] = os.path.abspath(self.inputs.out_file) return outputs @@ -378,38 +380,44 @@ class FuzzyOverlapInputSpec(BaseInterfaceInputSpec): in_ref = InputMultiPath( File(exists=True), mandatory=True, - desc='Reference image. Requires the same dimensions as in_tst.') + desc="Reference image. Requires the same dimensions as in_tst.", + ) in_tst = InputMultiPath( File(exists=True), mandatory=True, - desc='Test image. Requires the same dimensions as in_ref.') - in_mask = File(exists=True, desc='calculate overlap only within mask') + desc="Test image. Requires the same dimensions as in_ref.", + ) + in_mask = File(exists=True, desc="calculate overlap only within mask") weighting = traits.Enum( - 'none', - 'volume', - 'squared_vol', + "none", + "volume", + "squared_vol", usedefault=True, - desc=('\'none\': no class-overlap weighting is ' - 'performed. \'volume\': computed class-' - 'overlaps are weighted by class volume ' - '\'squared_vol\': computed class-overlaps ' - 'are weighted by the squared volume of ' - 'the class')) + desc=( + "'none': no class-overlap weighting is " + "performed. 'volume': computed class-" + "overlaps are weighted by class volume " + "'squared_vol': computed class-overlaps " + "are weighted by the squared volume of " + "the class" + ), + ) out_file = File( - 'diff.nii', - desc='alternative name for resulting difference-map', - usedefault=True) + "diff.nii", + desc="alternative name for resulting difference-map", + usedefault=True, + ) class FuzzyOverlapOutputSpec(TraitedSpec): - jaccard = traits.Float(desc='Fuzzy Jaccard Index (fJI), all the classes') - dice = traits.Float(desc='Fuzzy Dice Index (fDI), all the classes') + jaccard = traits.Float(desc="Fuzzy Jaccard Index (fJI), all the classes") + dice = traits.Float(desc="Fuzzy Dice Index (fDI), all the classes") class_fji = traits.List( - traits.Float(), - desc='Array containing the fJIs of each computed class') + traits.Float(), desc="Array containing the fJIs of each computed class" + ) class_fdi = traits.List( - traits.Float(), - desc='Array containing the fDIs of each computed class') + traits.Float(), desc="Array containing the fDIs of each computed class" + ) class FuzzyOverlap(SimpleInterface): @@ -443,8 +451,9 @@ def _run_interface(self, runtime): # Data must have same shape if not refdata.shape == tstdata.shape: raise RuntimeError( - 'Size of "in_tst" %s must match that of "in_ref" %s.' % - (tstdata.shape, refdata.shape)) + 'Size of "in_tst" %s must match that of "in_ref" %s.' + % (tstdata.shape, refdata.shape) + ) ncomp = refdata.shape[-1] @@ -461,29 +470,33 @@ def _run_interface(self, runtime): tstdata = tstdata[mask] if np.any(refdata < 0.0): - iflogger.warning('Negative values encountered in "in_ref" input, ' - 'taking absolute values.') + iflogger.warning( + 'Negative values encountered in "in_ref" input, ' + "taking absolute values." + ) refdata = np.abs(refdata) if np.any(tstdata < 0.0): - iflogger.warning('Negative values encountered in "in_tst" input, ' - 'taking absolute values.') + iflogger.warning( + 'Negative values encountered in "in_tst" input, ' + "taking absolute values." + ) tstdata = np.abs(tstdata) if np.any(refdata > 1.0): - iflogger.warning('Values greater than 1.0 found in "in_ref" input, ' - 'scaling values.') + iflogger.warning( + 'Values greater than 1.0 found in "in_ref" input, ' "scaling values." + ) refdata /= refdata.max() if np.any(tstdata > 1.0): - iflogger.warning('Values greater than 1.0 found in "in_tst" input, ' - 'scaling values.') + iflogger.warning( + 'Values greater than 1.0 found in "in_tst" input, ' "scaling values." + ) tstdata /= tstdata.max() - numerators = np.atleast_2d( - np.minimum(refdata, tstdata).reshape((-1, ncomp))) - denominators = np.atleast_2d( - np.maximum(refdata, tstdata).reshape((-1, ncomp))) + numerators = np.atleast_2d(np.minimum(refdata, tstdata).reshape((-1, ncomp))) + denominators = np.atleast_2d(np.maximum(refdata, tstdata).reshape((-1, ncomp))) jaccards = numerators.sum(axis=0) / denominators.sum(axis=0) @@ -493,16 +506,16 @@ def _run_interface(self, runtime): volumes = np.sum((refdata + tstdata) > 0, axis=1).reshape((-1, ncomp)) weights = 1.0 / volumes if self.inputs.weighting == "squared_vol": - weights = weights**2 + weights = weights ** 2 weights = weights / np.sum(weights) dices = 2.0 * jaccards / (jaccards + 1.0) # Fill-in the results object - self._results['jaccard'] = float(weights.dot(jaccards)) - self._results['dice'] = float(weights.dot(dices)) - self._results['class_fji'] = [float(v) for v in jaccards] - self._results['class_fdi'] = [float(v) for v in dices] + self._results["jaccard"] = float(weights.dot(jaccards)) + self._results["dice"] = float(weights.dot(dices)) + self._results["class_fji"] = [float(v) for v in jaccards] + self._results["class_fdi"] = [float(v) for v in dices] return runtime @@ -510,18 +523,21 @@ class ErrorMapInputSpec(BaseInterfaceInputSpec): in_ref = File( exists=True, mandatory=True, - desc="Reference image. Requires the same dimensions as in_tst.") + desc="Reference image. Requires the same dimensions as in_tst.", + ) in_tst = File( exists=True, mandatory=True, - desc="Test image. Requires the same dimensions as in_ref.") + desc="Test image. Requires the same dimensions as in_ref.", + ) mask = File(exists=True, desc="calculate overlap only within this mask.") metric = traits.Enum( "sqeuclidean", "euclidean", - desc='error map metric (as implemented in scipy cdist)', + desc="error map metric (as implemented in scipy cdist)", usedefault=True, - mandatory=True) + mandatory=True, + ) out_map = File(desc="Name for the output file") @@ -541,31 +557,34 @@ class ErrorMap(BaseInterface): >>> errormap.inputs.in_tst = 'cont2.nii' >>> res = errormap.run() # doctest: +SKIP """ + input_spec = ErrorMapInputSpec output_spec = ErrorMapOutputSpec - _out_file = '' + _out_file = "" def _run_interface(self, runtime): # Get two numpy data matrices nii_ref = nb.load(self.inputs.in_ref) ref_data = np.squeeze(nii_ref.get_data()) tst_data = np.squeeze(nb.load(self.inputs.in_tst).get_data()) - assert (ref_data.ndim == tst_data.ndim) + assert ref_data.ndim == tst_data.ndim # Load mask comps = 1 mapshape = ref_data.shape - if (ref_data.ndim == 4): + if ref_data.ndim == 4: comps = ref_data.shape[-1] mapshape = ref_data.shape[:-1] if isdefined(self.inputs.mask): msk = nb.load(self.inputs.mask).get_data() - if (mapshape != msk.shape): - raise RuntimeError("Mask should match volume shape, \ - mask is %s and volumes are %s" % - (list(msk.shape), list(mapshape))) + if mapshape != msk.shape: + raise RuntimeError( + "Mask should match volume shape, \ + mask is %s and volumes are %s" + % (list(msk.shape), list(mapshape)) + ) else: msk = np.ones(shape=mapshape) @@ -574,51 +593,52 @@ def _run_interface(self, runtime): msk_idxs = np.where(mskvector == 1) refvector = ref_data.reshape(-1, comps)[msk_idxs].astype(np.float32) tstvector = tst_data.reshape(-1, comps)[msk_idxs].astype(np.float32) - diffvector = (refvector - tstvector) + diffvector = refvector - tstvector # Scale the difference - if self.inputs.metric == 'sqeuclidean': - errvector = diffvector**2 - if (comps > 1): + if self.inputs.metric == "sqeuclidean": + errvector = diffvector ** 2 + if comps > 1: errvector = np.sum(errvector, axis=1) else: errvector = np.squeeze(errvector) - elif self.inputs.metric == 'euclidean': + elif self.inputs.metric == "euclidean": errvector = np.linalg.norm(diffvector, axis=1) errvectorexp = np.zeros_like( - mskvector, dtype=np.float32) # The default type is uint8 + mskvector, dtype=np.float32 + ) # The default type is uint8 errvectorexp[msk_idxs] = errvector # Get averaged error - self._distance = np.average( - errvector) # Only average the masked voxels + self._distance = np.average(errvector) # Only average the masked voxels errmap = errvectorexp.reshape(mapshape) hdr = nii_ref.header.copy() hdr.set_data_dtype(np.float32) - hdr['data_type'] = 16 + hdr["data_type"] = 16 hdr.set_data_shape(mapshape) if not isdefined(self.inputs.out_map): fname, ext = op.splitext(op.basename(self.inputs.in_tst)) - if ext == '.gz': + if ext == ".gz": fname, ext2 = op.splitext(fname) ext = ext2 + ext self._out_file = op.abspath(fname + "_errmap" + ext) else: self._out_file = self.inputs.out_map - nb.Nifti1Image(errmap.astype(np.float32), nii_ref.affine, - hdr).to_filename(self._out_file) + nb.Nifti1Image(errmap.astype(np.float32), nii_ref.affine, hdr).to_filename( + self._out_file + ) return runtime def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_map'] = self._out_file - outputs['distance'] = self._distance + outputs["out_map"] = self._out_file + outputs["distance"] = self._distance return outputs @@ -628,7 +648,7 @@ class SimilarityInputSpec(BaseInterfaceInputSpec): mask1 = File(exists=True, desc="3D volume") mask2 = File(exists=True, desc="3D volume") metric = traits.Either( - traits.Enum('cc', 'cr', 'crl1', 'mi', 'nmi', 'slr'), + traits.Enum("cc", "cr", "crl1", "mi", "nmi", "slr"), traits.Callable(), desc="""str or callable Cost-function for assessing image similarity. If a string, @@ -638,12 +658,14 @@ class SimilarityInputSpec(BaseInterfaceInputSpec): supervised log-likelihood ratio. If a callable, it should take a two-dimensional array representing the image joint histogram as an input and return a float.""", - usedefault=True) + usedefault=True, + ) class SimilarityOutputSpec(TraitedSpec): similarity = traits.List( - traits.Float(desc="Similarity between volume 1 and 2, frame by frame")) + traits.Float(desc="Similarity between volume 1 and 2, frame by frame") + ) class Similarity(NipyBaseInterface): @@ -671,7 +693,9 @@ class Similarity(NipyBaseInterface): output_spec = SimilarityOutputSpec def _run_interface(self, runtime): - from nipy.algorithms.registration.histogram_registration import HistogramRegistration + from nipy.algorithms.registration.histogram_registration import ( + HistogramRegistration, + ) from nipy.algorithms.registration.affine import Affine vol1_nii = nb.load(self.inputs.volume1) @@ -688,7 +712,8 @@ def _run_interface(self, runtime): if dims < 2 or dims > 4: raise RuntimeError( - 'Image dimensions not supported (detected %dD file)' % dims) + "Image dimensions not supported (detected %dD file)" % dims + ) if isdefined(self.inputs.mask1): mask1 = nb.load(self.inputs.mask1).get_data() == 1 @@ -708,12 +733,13 @@ def _run_interface(self, runtime): to_img=ts2, similarity=self.inputs.metric, from_mask=mask1, - to_mask=mask2) + to_mask=mask2, + ) self._similarity.append(histreg.eval(Affine())) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['similarity'] = self._similarity + outputs["similarity"] = self._similarity return outputs diff --git a/nipype/algorithms/misc.py b/nipype/algorithms/misc.py index fc44e8738f..723b4d83d4 100644 --- a/nipype/algorithms/misc.py +++ b/nipype/algorithms/misc.py @@ -1,9 +1,9 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -''' +""" Miscellaneous algorithms -''' +""" import os import os.path as op @@ -16,36 +16,49 @@ from .. import logging from . import metrics as nam from ..interfaces.base import ( - BaseInterface, traits, TraitedSpec, File, InputMultiPath, OutputMultiPath, - BaseInterfaceInputSpec, isdefined, DynamicTraitedSpec, Undefined) + BaseInterface, + traits, + TraitedSpec, + File, + InputMultiPath, + OutputMultiPath, + BaseInterfaceInputSpec, + isdefined, + DynamicTraitedSpec, + Undefined, +) from ..utils.filemanip import fname_presuffix, split_filename, ensure_list from ..utils import NUMPY_MMAP from . import confounds -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") class PickAtlasInputSpec(BaseInterfaceInputSpec): atlas = File( - exists=True, - desc="Location of the atlas that will be used.", - mandatory=True) + exists=True, desc="Location of the atlas that will be used.", mandatory=True + ) labels = traits.Either( traits.Int, traits.List(traits.Int), - desc=("Labels of regions that will be included in the mask. Must be\ - compatible with the atlas used."), - mandatory=True) + desc=( + "Labels of regions that will be included in the mask. Must be\ + compatible with the atlas used." + ), + mandatory=True, + ) hemi = traits.Enum( - 'both', - 'left', - 'right', + "both", + "left", + "right", desc="Restrict the mask to only one hemisphere: left or right", - usedefault=True) + usedefault=True, + ) dilation_size = traits.Int( usedefault=True, - desc="Defines how much the mask will be dilated (expanded in 3D).") + desc="Defines how much the mask will be dilated (expanded in 3D).", + ) output_file = File(desc="Where to store the output mask.") @@ -73,7 +86,8 @@ def _gen_output_filename(self): fname=self.inputs.atlas, suffix="_mask", newpath=os.getcwd(), - use_ext=True) + use_ext=True, + ) else: output = os.path.realpath(self.inputs.output_file) return output @@ -89,43 +103,50 @@ def _get_brodmann_area(self): labels = self.inputs.labels for lab in labels: newdata[origdata == lab] = 1 - if self.inputs.hemi == 'right': - newdata[int(floor(float(origdata.shape[0]) / 2)):, :, :] = 0 - elif self.inputs.hemi == 'left': - newdata[:int(ceil(float(origdata.shape[0]) / 2)), :, :] = 0 + if self.inputs.hemi == "right": + newdata[int(floor(float(origdata.shape[0]) / 2)) :, :, :] = 0 + elif self.inputs.hemi == "left": + newdata[: int(ceil(float(origdata.shape[0]) / 2)), :, :] = 0 if self.inputs.dilation_size != 0: from scipy.ndimage.morphology import grey_dilation - newdata = grey_dilation(newdata, - (2 * self.inputs.dilation_size + 1, - 2 * self.inputs.dilation_size + 1, - 2 * self.inputs.dilation_size + 1)) + + newdata = grey_dilation( + newdata, + ( + 2 * self.inputs.dilation_size + 1, + 2 * self.inputs.dilation_size + 1, + 2 * self.inputs.dilation_size + 1, + ), + ) return nb.Nifti1Image(newdata, nii.affine, nii.header) def _list_outputs(self): outputs = self._outputs().get() - outputs['mask_file'] = self._gen_output_filename() + outputs["mask_file"] = self._gen_output_filename() return outputs class SimpleThresholdInputSpec(BaseInterfaceInputSpec): volumes = InputMultiPath( - File(exists=True), desc='volumes to be thresholded', mandatory=True) + File(exists=True), desc="volumes to be thresholded", mandatory=True + ) threshold = traits.Float( - desc='volumes to be thresholdedeverything below this value will be set\ - to zero', - mandatory=True) + desc="volumes to be thresholdedeverything below this value will be set\ + to zero", + mandatory=True, + ) class SimpleThresholdOutputSpec(TraitedSpec): - thresholded_volumes = OutputMultiPath( - File(exists=True), desc="thresholded volumes") + thresholded_volumes = OutputMultiPath(File(exists=True), desc="thresholded volumes") class SimpleThreshold(BaseInterface): """Applies a threshold to input volumes """ + input_spec = SimpleThresholdInputSpec output_spec = SimpleThresholdOutputSpec @@ -141,7 +162,7 @@ def _run_interface(self, runtime): new_img = nb.Nifti1Image(thresholded_map, img.affine, img.header) _, base, _ = split_filename(fname) - nb.save(new_img, base + '_thresholded.nii') + nb.save(new_img, base + "_thresholded.nii") return runtime @@ -151,21 +172,24 @@ def _list_outputs(self): for fname in self.inputs.volumes: _, base, _ = split_filename(fname) outputs["thresholded_volumes"].append( - os.path.abspath(base + '_thresholded.nii')) + os.path.abspath(base + "_thresholded.nii") + ) return outputs class ModifyAffineInputSpec(BaseInterfaceInputSpec): volumes = InputMultiPath( File(exists=True), - desc='volumes which affine matrices will be modified', - mandatory=True) + desc="volumes which affine matrices will be modified", + mandatory=True, + ) transformation_matrix = traits.Array( value=np.eye(4), shape=(4, 4), desc="transformation matrix that will be left multiplied by the\ affine matrix", - usedefault=True) + usedefault=True, + ) class ModifyAffineOutputSpec(TraitedSpec): @@ -176,6 +200,7 @@ class ModifyAffine(BaseInterface): """Left multiplies the affine matrix with a specified values. Saves the volume as a nifti file. """ + input_spec = ModifyAffineInputSpec output_spec = ModifyAffineOutputSpec @@ -192,23 +217,24 @@ def _run_interface(self, runtime): nb.save( nb.Nifti1Image(img.get_data(), affine, img.header), - self._gen_output_filename(fname)) + self._gen_output_filename(fname), + ) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['transformed_volumes'] = [] + outputs["transformed_volumes"] = [] for fname in self.inputs.volumes: - outputs['transformed_volumes'].append( - self._gen_output_filename(fname)) + outputs["transformed_volumes"].append(self._gen_output_filename(fname)) return outputs class CreateNiftiInputSpec(BaseInterfaceInputSpec): data_file = File(exists=True, mandatory=True, desc="ANALYZE img file") header_file = File( - exists=True, mandatory=True, desc="corresponding ANALYZE hdr file") + exists=True, mandatory=True, desc="corresponding ANALYZE hdr file" + ) affine = traits.Array(desc="affine transformation array") @@ -219,6 +245,7 @@ class CreateNiftiOutputSpec(TraitedSpec): class CreateNifti(BaseInterface): """Creates a nifti volume """ + input_spec = CreateNiftiInputSpec output_spec = CreateNiftiOutputSpec @@ -227,7 +254,7 @@ def _gen_output_file_name(self): return os.path.abspath(base + ".nii") def _run_interface(self, runtime): - with open(self.inputs.header_file, 'rb') as hdr_file: + with open(self.inputs.header_file, "rb") as hdr_file: hdr = nb.AnalyzeHeader.from_fileobj(hdr_file) if isdefined(self.inputs.affine): @@ -235,7 +262,7 @@ def _run_interface(self, runtime): else: affine = None - with open(self.inputs.data_file, 'rb') as data_file: + with open(self.inputs.data_file, "rb") as data_file: data = hdr.data_from_fileobj(data_file) img = nb.Nifti1Image(data, affine, hdr) @@ -245,7 +272,7 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['nifti_file'] = self._gen_output_file_name() + outputs["nifti_file"] = self._gen_output_file_name() return outputs @@ -270,6 +297,7 @@ class Gunzip(BaseInterface): >>> os.unlink('tpms_msk.nii') """ + input_spec = GunzipInputSpec output_spec = GunzipOutputSpec @@ -282,14 +310,15 @@ def _gen_output_file_name(self): def _run_interface(self, runtime): import gzip import shutil - with gzip.open(self.inputs.in_file, 'rb') as in_file: - with open(self._gen_output_file_name(), 'wb') as out_file: + + with gzip.open(self.inputs.in_file, "rb") as in_file: + with open(self._gen_output_file_name(), "wb") as out_file: shutil.copyfileobj(in_file, out_file) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = self._gen_output_file_name() + outputs["out_file"] = self._gen_output_file_name() return outputs @@ -307,27 +336,31 @@ def matlab2csv(in_array, name, reshape): if reshape: if len(np.shape(output_array)) > 1: output_array = np.reshape( - output_array, - (np.shape(output_array)[0] * np.shape(output_array)[1], 1)) + output_array, (np.shape(output_array)[0] * np.shape(output_array)[1], 1) + ) iflogger.info(np.shape(output_array)) - output_name = op.abspath(name + '.csv') - np.savetxt(output_name, output_array, delimiter=',') + output_name = op.abspath(name + ".csv") + np.savetxt(output_name, output_array, delimiter=",") return output_name class Matlab2CSVInputSpec(TraitedSpec): - in_file = File(exists=True, mandatory=True, desc='Input MATLAB .mat file') + in_file = File(exists=True, mandatory=True, desc="Input MATLAB .mat file") reshape_matrix = traits.Bool( True, usedefault=True, - desc='The output of this interface is meant for R, so matrices will be\ - reshaped to vectors by default.') + desc="The output of this interface is meant for R, so matrices will be\ + reshaped to vectors by default.", + ) class Matlab2CSVOutputSpec(TraitedSpec): csv_files = OutputMultiPath( - File(desc='Output CSV files for each variable saved in the input .mat\ - file')) + File( + desc="Output CSV files for each variable saved in the input .mat\ + file" + ) + ) class Matlab2CSV(BaseInterface): @@ -345,11 +378,13 @@ class Matlab2CSV(BaseInterface): >>> mat2csv.inputs.in_file = 'cmatrix.mat' >>> mat2csv.run() # doctest: +SKIP """ + input_spec = Matlab2CSVInputSpec output_spec = Matlab2CSVOutputSpec def _run_interface(self, runtime): import scipy.io as sio + in_dict = sio.loadmat(op.abspath(self.inputs.in_file)) # Check if the file has multiple variables in it. If it does, loop @@ -359,87 +394,100 @@ def _run_interface(self, runtime): saved_variables = list() for key in list(in_dict.keys()): - if not key.startswith('__'): + if not key.startswith("__"): if isinstance(in_dict[key][0], np.ndarray): saved_variables.append(key) else: - iflogger.info('One of the keys in the input file, %s, is ' - 'not a Numpy array', key) + iflogger.info( + "One of the keys in the input file, %s, is " + "not a Numpy array", + key, + ) if len(saved_variables) > 1: - iflogger.info('%i variables found:', len(saved_variables)) + iflogger.info("%i variables found:", len(saved_variables)) iflogger.info(saved_variables) for variable in saved_variables: - iflogger.info('...Converting %s - type %s - to CSV', variable, - type(in_dict[variable])) - matlab2csv(in_dict[variable], variable, - self.inputs.reshape_matrix) + iflogger.info( + "...Converting %s - type %s - to CSV", + variable, + type(in_dict[variable]), + ) + matlab2csv(in_dict[variable], variable, self.inputs.reshape_matrix) elif len(saved_variables) == 1: _, name, _ = split_filename(self.inputs.in_file) variable = saved_variables[0] - iflogger.info('Single variable found %s, type %s:', variable, - type(in_dict[variable])) - iflogger.info('...Converting %s to CSV from %s', variable, - self.inputs.in_file) + iflogger.info( + "Single variable found %s, type %s:", variable, type(in_dict[variable]) + ) + iflogger.info( + "...Converting %s to CSV from %s", variable, self.inputs.in_file + ) matlab2csv(in_dict[variable], name, self.inputs.reshape_matrix) else: - iflogger.error('No values in the MATLAB file?!') + iflogger.error("No values in the MATLAB file?!") return runtime def _list_outputs(self): import scipy.io as sio + outputs = self.output_spec().get() in_dict = sio.loadmat(op.abspath(self.inputs.in_file)) saved_variables = list() for key in list(in_dict.keys()): - if not key.startswith('__'): + if not key.startswith("__"): if isinstance(in_dict[key][0], np.ndarray): saved_variables.append(key) else: - iflogger.error('One of the keys in the input file, %s, is ' - 'not a Numpy array', key) + iflogger.error( + "One of the keys in the input file, %s, is " + "not a Numpy array", + key, + ) if len(saved_variables) > 1: - outputs['csv_files'] = replaceext(saved_variables, '.csv') + outputs["csv_files"] = replaceext(saved_variables, ".csv") elif len(saved_variables) == 1: _, name, ext = split_filename(self.inputs.in_file) - outputs['csv_files'] = op.abspath(name + '.csv') + outputs["csv_files"] = op.abspath(name + ".csv") else: - iflogger.error('No values in the MATLAB file?!') + iflogger.error("No values in the MATLAB file?!") return outputs def merge_csvs(in_list): for idx, in_file in enumerate(in_list): try: - in_array = np.loadtxt(in_file, delimiter=',') + in_array = np.loadtxt(in_file, delimiter=",") except ValueError: try: - in_array = np.loadtxt(in_file, delimiter=',', skiprows=1) + in_array = np.loadtxt(in_file, delimiter=",", skiprows=1) except ValueError: - with open(in_file, 'r') as first: + with open(in_file, "r") as first: header_line = first.readline() - header_list = header_line.split(',') + header_list = header_line.split(",") n_cols = len(header_list) try: in_array = np.loadtxt( in_file, - delimiter=',', + delimiter=",", skiprows=1, - usecols=list(range(1, n_cols))) + usecols=list(range(1, n_cols)), + ) except ValueError: in_array = np.loadtxt( in_file, - delimiter=',', + delimiter=",", skiprows=1, - usecols=list(range(1, n_cols - 1))) + usecols=list(range(1, n_cols - 1)), + ) if idx == 0: out_array = in_array else: out_array = np.dstack((out_array, in_array)) out_array = np.squeeze(out_array) - iflogger.info('Final output array shape:') + iflogger.info("Final output array shape:") iflogger.info(np.shape(out_array)) return out_array @@ -447,16 +495,17 @@ def merge_csvs(in_list): def remove_identical_paths(in_files): import os.path as op from ..utils.filemanip import split_filename + if len(in_files) > 1: out_names = list() commonprefix = op.commonprefix(in_files) - lastslash = commonprefix.rfind('/') - commonpath = commonprefix[0:(lastslash + 1)] + lastslash = commonprefix.rfind("/") + commonpath = commonprefix[0 : (lastslash + 1)] for fileidx, in_file in enumerate(in_files): path, name, ext = split_filename(in_file) in_file = op.join(path, name) - name = in_file.replace(commonpath, '') - name = name.replace('_subject_id_', '') + name = in_file.replace(commonpath, "") + name = name.replace("_subject_id_", "") out_names.append(name) else: path, name, ext = split_filename(in_files[0]) @@ -467,7 +516,7 @@ def remove_identical_paths(in_files): def maketypelist(rowheadings, shape, extraheadingBool, extraheading): typelist = [] if rowheadings: - typelist.append(('heading', 'a40')) + typelist.append(("heading", "a40")) if len(shape) > 1: for idx in range(1, (min(shape) + 1)): typelist.append((str(idx), float)) @@ -475,29 +524,28 @@ def maketypelist(rowheadings, shape, extraheadingBool, extraheading): for idx in range(1, (shape[0] + 1)): typelist.append((str(idx), float)) if extraheadingBool: - typelist.append((extraheading, 'a40')) + typelist.append((extraheading, "a40")) iflogger.info(typelist) return typelist -def makefmtlist(output_array, typelist, rowheadingsBool, shape, - extraheadingBool): +def makefmtlist(output_array, typelist, rowheadingsBool, shape, extraheadingBool): fmtlist = [] if rowheadingsBool: - fmtlist.append('%s') + fmtlist.append("%s") if len(shape) > 1: output = np.zeros(max(shape), typelist) for idx in range(1, min(shape) + 1): output[str(idx)] = output_array[:, idx - 1] - fmtlist.append('%f') + fmtlist.append("%f") else: output = np.zeros(1, typelist) for idx in range(1, len(output_array) + 1): output[str(idx)] = output_array[idx - 1] - fmtlist.append('%f') + fmtlist.append("%f") if extraheadingBool: - fmtlist.append('%s') - fmt = ','.join(fmtlist) + fmtlist.append("%s") + fmt = ",".join(fmtlist) return fmt, output @@ -505,34 +553,37 @@ class MergeCSVFilesInputSpec(TraitedSpec): in_files = InputMultiPath( File(exists=True), mandatory=True, - desc='Input comma-separated value (CSV) files') + desc="Input comma-separated value (CSV) files", + ) out_file = File( - 'merged.csv', - usedefault=True, - desc='Output filename for merged CSV file') + "merged.csv", usedefault=True, desc="Output filename for merged CSV file" + ) column_headings = traits.List( traits.Str, - desc='List of column headings to save in merged CSV file\ + desc="List of column headings to save in merged CSV file\ (must be equal to number of input files). If left undefined, these\ - will be pulled from the input filenames.') + will be pulled from the input filenames.", + ) row_headings = traits.List( traits.Str, - desc='List of row headings to save in merged CSV file\ - (must be equal to number of rows in the input files).') + desc="List of row headings to save in merged CSV file\ + (must be equal to number of rows in the input files).", + ) row_heading_title = traits.Str( - 'label', + "label", usedefault=True, - desc='Column heading for the row headings\ - added') - extra_column_heading = traits.Str( - desc='New heading to add for the added field.') + desc="Column heading for the row headings\ + added", + ) + extra_column_heading = traits.Str(desc="New heading to add for the added field.") extra_field = traits.Str( - desc='New field to add to each row. This is useful for saving the\ - group or subject ID in the file.') + desc="New field to add to each row. This is useful for saving the\ + group or subject ID in the file." + ) class MergeCSVFilesOutputSpec(TraitedSpec): - csv_file = File(desc='Output CSV file containing columns ') + csv_file = File(desc="Output CSV file containing columns ") class MergeCSVFiles(BaseInterface): @@ -553,51 +604,49 @@ class MergeCSVFiles(BaseInterface): >>> mat2csv.inputs.column_headings = ['degree','clustering'] >>> mat2csv.run() # doctest: +SKIP """ + input_spec = MergeCSVFilesInputSpec output_spec = MergeCSVFilesOutputSpec def _run_interface(self, runtime): extraheadingBool = False - extraheading = '' + extraheading = "" rowheadingsBool = False """ This block defines the column headings. """ if isdefined(self.inputs.column_headings): - iflogger.info('Column headings have been provided:') + iflogger.info("Column headings have been provided:") headings = self.inputs.column_headings else: - iflogger.info( - 'Column headings not provided! Pulled from input filenames:') + iflogger.info("Column headings not provided! Pulled from input filenames:") headings = remove_identical_paths(self.inputs.in_files) if isdefined(self.inputs.extra_field): if isdefined(self.inputs.extra_column_heading): extraheading = self.inputs.extra_column_heading - iflogger.info('Extra column heading provided: %s', - extraheading) + iflogger.info("Extra column heading provided: %s", extraheading) else: - extraheading = 'type' - iflogger.info( - 'Extra column heading was not defined. Using "type"') + extraheading = "type" + iflogger.info('Extra column heading was not defined. Using "type"') headings.append(extraheading) extraheadingBool = True if len(self.inputs.in_files) == 1: - iflogger.warning('Only one file input!') + iflogger.warning("Only one file input!") if isdefined(self.inputs.row_headings): - iflogger.info('Row headings have been provided. Adding "labels"' - 'column header.') + iflogger.info( + 'Row headings have been provided. Adding "labels"' "column header." + ) prefix = '"{p}","'.format(p=self.inputs.row_heading_title) - csv_headings = prefix + '","'.join( - itertools.chain(headings)) + '"\n' + csv_headings = prefix + '","'.join(itertools.chain(headings)) + '"\n' rowheadingsBool = True else: - iflogger.info('Row headings have not been provided.') + iflogger.info("Row headings have not been provided.") csv_headings = '"' + '","'.join(itertools.chain(headings)) + '"\n' - iflogger.info('Final Headings:') + iflogger.info("Final Headings:") iflogger.info(csv_headings) """ Next we merge the arrays and define the output text file @@ -605,18 +654,18 @@ def _run_interface(self, runtime): output_array = merge_csvs(self.inputs.in_files) _, name, ext = split_filename(self.inputs.out_file) - if not ext == '.csv': - ext = '.csv' + if not ext == ".csv": + ext = ".csv" out_file = op.abspath(name + ext) - with open(out_file, 'w') as file_handle: + with open(out_file, "w") as file_handle: file_handle.write(csv_headings) shape = np.shape(output_array) - typelist = maketypelist(rowheadingsBool, shape, extraheadingBool, - extraheading) - fmt, output = makefmtlist(output_array, typelist, rowheadingsBool, - shape, extraheadingBool) + typelist = maketypelist(rowheadingsBool, shape, extraheadingBool, extraheading) + fmt, output = makefmtlist( + output_array, typelist, rowheadingsBool, shape, extraheadingBool + ) if rowheadingsBool: row_heading_list = self.inputs.row_headings @@ -624,8 +673,8 @@ def _run_interface(self, runtime): for row_heading in row_heading_list: row_heading_with_quotes = '"' + row_heading + '"' row_heading_list_with_quotes.append(row_heading_with_quotes) - row_headings = np.array(row_heading_list_with_quotes, dtype='|S40') - output['heading'] = row_headings + row_headings = np.array(row_heading_list_with_quotes, dtype="|S40") + output["heading"] = row_headings if isdefined(self.inputs.extra_field): extrafieldlist = [] @@ -639,39 +688,37 @@ def _run_interface(self, runtime): output[extraheading] = extrafieldlist iflogger.info(output) iflogger.info(fmt) - with open(out_file, 'a') as file_handle: - np.savetxt(file_handle, output, fmt, delimiter=',') + with open(out_file, "a") as file_handle: + np.savetxt(file_handle, output, fmt, delimiter=",") return runtime def _list_outputs(self): outputs = self.output_spec().get() _, name, ext = split_filename(self.inputs.out_file) - if not ext == '.csv': - ext = '.csv' + if not ext == ".csv": + ext = ".csv" out_file = op.abspath(name + ext) - outputs['csv_file'] = out_file + outputs["csv_file"] = out_file return outputs class AddCSVColumnInputSpec(TraitedSpec): in_file = File( - exists=True, - mandatory=True, - desc='Input comma-separated value (CSV) files') + exists=True, mandatory=True, desc="Input comma-separated value (CSV) files" + ) out_file = File( - 'extra_heading.csv', - usedefault=True, - desc='Output filename for merged CSV file') - extra_column_heading = traits.Str( - desc='New heading to add for the added field.') + "extra_heading.csv", usedefault=True, desc="Output filename for merged CSV file" + ) + extra_column_heading = traits.Str(desc="New heading to add for the added field.") extra_field = traits.Str( - desc='New field to add to each row. This is useful for saving the\ - group or subject ID in the file.') + desc="New field to add to each row. This is useful for saving the\ + group or subject ID in the file." + ) class AddCSVColumnOutputSpec(TraitedSpec): - csv_file = File(desc='Output CSV file containing columns ') + csv_file = File(desc="Output CSV file containing columns ") class AddCSVColumn(BaseInterface): @@ -687,25 +734,25 @@ class AddCSVColumn(BaseInterface): >>> addcol.inputs.extra_field = 'male' >>> addcol.run() # doctest: +SKIP """ + input_spec = AddCSVColumnInputSpec output_spec = AddCSVColumnOutputSpec def _run_interface(self, runtime): - in_file = open(self.inputs.in_file, 'r') + in_file = open(self.inputs.in_file, "r") _, name, ext = split_filename(self.inputs.out_file) - if not ext == '.csv': - ext = '.csv' + if not ext == ".csv": + ext = ".csv" out_file = op.abspath(name + ext) - out_file = open(out_file, 'w') + out_file = open(out_file, "w") firstline = in_file.readline() - firstline = firstline.replace('\n', '') - new_firstline = firstline + ',"' + \ - self.inputs.extra_column_heading + '"\n' + firstline = firstline.replace("\n", "") + new_firstline = firstline + ',"' + self.inputs.extra_column_heading + '"\n' out_file.write(new_firstline) for line in in_file: - new_line = line.replace('\n', '') - new_line = new_line + ',' + self.inputs.extra_field + '\n' + new_line = line.replace("\n", "") + new_line = new_line + "," + self.inputs.extra_field + "\n" out_file.write(new_line) in_file.close() out_file.close() @@ -714,16 +761,15 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self.output_spec().get() _, name, ext = split_filename(self.inputs.out_file) - if not ext == '.csv': - ext = '.csv' + if not ext == ".csv": + ext = ".csv" out_file = op.abspath(name + ext) - outputs['csv_file'] = out_file + outputs["csv_file"] = out_file return outputs class AddCSVRowInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - in_file = File( - mandatory=True, desc='Input comma-separated value (CSV) files') + in_file = File(mandatory=True, desc="Input comma-separated value (CSV) files") _outputs = traits.Dict(traits.Any, value={}, usedefault=True) def __setattr__(self, key, value): @@ -738,7 +784,7 @@ def __setattr__(self, key, value): class AddCSVRowOutputSpec(TraitedSpec): - csv_file = File(desc='Output CSV file containing rows ') + csv_file = File(desc="Output CSV file containing rows ") class AddCSVRow(BaseInterface): @@ -765,6 +811,7 @@ class AddCSVRow(BaseInterface): >>> addrow.inputs.list_of_values = [ 0.4, 0.7, 0.3 ] >>> addrow.run() # doctest: +SKIP """ + input_spec = AddCSVRowInputSpec output_spec = AddCSVRowOutputSpec @@ -789,34 +836,40 @@ def _run_interface(self, runtime): try: import pandas as pd except ImportError as e: - raise ImportError('This interface requires pandas ' - '(http://pandas.pydata.org/) to run.') from e + raise ImportError( + "This interface requires pandas " "(http://pandas.pydata.org/) to run." + ) from e try: from filelock import SoftFileLock + self._have_lock = True except ImportError: from warnings import warn - warn(('Python module filelock was not found: AddCSVRow will not be' - ' thread-safe in multi-processor execution')) + + warn( + ( + "Python module filelock was not found: AddCSVRow will not be" + " thread-safe in multi-processor execution" + ) + ) input_dict = {} for key, val in list(self.inputs._outputs.items()): # expand lists to several columns - if key == 'trait_added' and val in self.inputs.copyable_trait_names( - ): + if key == "trait_added" and val in self.inputs.copyable_trait_names(): continue if isinstance(val, list): for i, v in enumerate(val): - input_dict['%s_%d' % (key, i)] = v + input_dict["%s_%d" % (key, i)] = v else: input_dict[key] = val df = pd.DataFrame([input_dict]) if self._have_lock: - self._lock = SoftFileLock('%s.lock' % self.inputs.in_file) + self._lock = SoftFileLock("%s.lock" % self.inputs.in_file) # Acquire lock self._lock.acquire() @@ -825,7 +878,7 @@ def _run_interface(self, runtime): formerdf = pd.read_csv(self.inputs.in_file, index_col=0) df = pd.concat([formerdf, df], ignore_index=True) - with open(self.inputs.in_file, 'w') as f: + with open(self.inputs.in_file, "w") as f: df.to_csv(f) if self._have_lock: @@ -835,7 +888,7 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self.output_spec().get() - outputs['csv_file'] = self.inputs.in_file + outputs["csv_file"] = self.inputs.in_file return outputs def _outputs(self): @@ -849,16 +902,18 @@ class CalculateNormalizedMomentsInputSpec(TraitedSpec): timeseries_file = File( exists=True, mandatory=True, - desc='Text file with timeseries in columns and timepoints in rows,\ - whitespace separated') + desc="Text file with timeseries in columns and timepoints in rows,\ + whitespace separated", + ) moment = traits.Int( mandatory=True, desc="Define which moment should be calculated, 3 for skewness, 4 for\ - kurtosis.") + kurtosis.", + ) class CalculateNormalizedMomentsOutputSpec(TraitedSpec): - moments = traits.List(traits.Float(), desc='Moments') + moments = traits.List(traits.Float(), desc="Moments") class CalculateNormalizedMoments(BaseInterface): @@ -873,18 +928,18 @@ class CalculateNormalizedMoments(BaseInterface): >>> skew.inputs.timeseries_file = 'timeseries.txt' >>> skew.run() # doctest: +SKIP """ + input_spec = CalculateNormalizedMomentsInputSpec output_spec = CalculateNormalizedMomentsOutputSpec def _run_interface(self, runtime): - self._moments = calc_moments(self.inputs.timeseries_file, - self.inputs.moment) + self._moments = calc_moments(self.inputs.timeseries_file, self.inputs.moment) return runtime def _list_outputs(self): outputs = self.output_spec().get() - outputs['skewness'] = self._moments + outputs["skewness"] = self._moments return outputs @@ -897,42 +952,45 @@ def calc_moments(timeseries_file, moment): """ import scipy.stats as stats + timeseries = np.genfromtxt(timeseries_file) m2 = stats.moment(timeseries, 2, axis=0) m3 = stats.moment(timeseries, moment, axis=0) - zero = (m2 == 0) - return np.where(zero, 0, m3 / m2**(moment / 2.0)) + zero = m2 == 0 + return np.where(zero, 0, m3 / m2 ** (moment / 2.0)) class AddNoiseInputSpec(TraitedSpec): in_file = File( exists=True, mandatory=True, - desc='input image that will be corrupted with noise') + desc="input image that will be corrupted with noise", + ) in_mask = File( exists=True, - desc=('input mask, voxels outside this mask ' - 'will be considered background')) - snr = traits.Float(10.0, desc='desired output SNR in dB', usedefault=True) + desc=("input mask, voxels outside this mask " "will be considered background"), + ) + snr = traits.Float(10.0, desc="desired output SNR in dB", usedefault=True) dist = traits.Enum( - 'normal', - 'rician', + "normal", + "rician", usedefault=True, mandatory=True, - desc=('desired noise distribution')) + desc=("desired noise distribution"), + ) bg_dist = traits.Enum( - 'normal', - 'rayleigh', + "normal", + "rayleigh", usedefault=True, mandatory=True, - desc=('desired noise distribution, currently ' - 'only normal is implemented')) - out_file = File(desc='desired output filename') + desc=("desired noise distribution, currently " "only normal is implemented"), + ) + out_file = File(desc="desired output filename") class AddNoiseOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='corrupted image') + out_file = File(exists=True, desc="corrupted image") class AddNoise(BaseInterface): @@ -950,6 +1008,7 @@ class AddNoise(BaseInterface): >>> noise.run() # doctest: +SKIP """ + input_spec = AddNoiseInputSpec output_spec = AddNoiseOutputSpec @@ -968,7 +1027,8 @@ def _run_interface(self, runtime): mask=in_mask, snr_db=snr, dist=self.inputs.dist, - bg_dist=self.inputs.bg_dist) + bg_dist=self.inputs.bg_dist, + ) res_im = nb.Nifti1Image(result, in_image.affine, in_image.header) res_im.to_filename(self._gen_output_filename()) return runtime @@ -976,8 +1036,7 @@ def _run_interface(self, runtime): def _gen_output_filename(self): if not isdefined(self.inputs.out_file): _, base, ext = split_filename(self.inputs.in_file) - out_file = os.path.abspath('%s_SNR%03.2f%s' % - (base, self.inputs.snr, ext)) + out_file = os.path.abspath("%s_SNR%03.2f%s" % (base, self.inputs.snr, ext)) else: out_file = self.inputs.out_file @@ -985,20 +1044,16 @@ def _gen_output_filename(self): def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = self._gen_output_filename() + outputs["out_file"] = self._gen_output_filename() return outputs - def gen_noise(self, - image, - mask=None, - snr_db=10.0, - dist='normal', - bg_dist='normal'): + def gen_noise(self, image, mask=None, snr_db=10.0, dist="normal", bg_dist="normal"): """ Generates a copy of an image with a certain amount of added gaussian noise (rayleigh for background in mask) """ from math import sqrt + snr = sqrt(np.power(10.0, snr_db / 10.0)) if mask is None: @@ -1012,36 +1067,37 @@ def gen_noise(self, signal = image[mask > 0].reshape(-1) - if dist == 'normal': + if dist == "normal": signal = signal - signal.mean() sigma_n = sqrt(signal.var() / snr) noise = np.random.normal(size=image.shape, scale=sigma_n) - if (np.any(mask == 0)) and (bg_dist == 'rayleigh'): + if (np.any(mask == 0)) and (bg_dist == "rayleigh"): bg_noise = np.random.rayleigh(size=image.shape, scale=sigma_n) noise[mask == 0] = bg_noise[mask == 0] im_noise = image + noise - elif dist == 'rician': + elif dist == "rician": sigma_n = signal.mean() / snr n_1 = np.random.normal(size=image.shape, scale=sigma_n) n_2 = np.random.normal(size=image.shape, scale=sigma_n) stde_1 = n_1 / sqrt(2.0) stde_2 = n_2 / sqrt(2.0) - im_noise = np.sqrt((image + stde_1)**2 + (stde_2)**2) + im_noise = np.sqrt((image + stde_1) ** 2 + (stde_2) ** 2) else: - raise NotImplementedError(('Only normal and rician distributions ' - 'are supported')) + raise NotImplementedError( + ("Only normal and rician distributions " "are supported") + ) return im_noise class NormalizeProbabilityMapSetInputSpec(TraitedSpec): in_files = InputMultiPath( - File(exists=True, mandatory=True, desc='The tpms to be normalized')) - in_mask = File( - exists=True, desc='Masked voxels must sum up 1.0, 0.0 otherwise.') + File(exists=True, mandatory=True, desc="The tpms to be normalized") + ) + in_mask = File(exists=True, desc="Masked voxels must sum up 1.0, 0.0 otherwise.") class NormalizeProbabilityMapSetOutputSpec(TraitedSpec): @@ -1065,6 +1121,7 @@ class NormalizeProbabilityMapSet(BaseInterface): >>> normalize.inputs.in_mask = 'tpms_msk.nii.gz' >>> normalize.run() # doctest: +SKIP """ + input_spec = NormalizeProbabilityMapSetInputSpec output_spec = NormalizeProbabilityMapSetOutputSpec @@ -1079,23 +1136,24 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_files'] = self._out_filenames + outputs["out_files"] = self._out_filenames return outputs class SplitROIsInputSpec(TraitedSpec): - in_file = File(exists=True, mandatory=True, desc='file to be splitted') - in_mask = File(exists=True, desc='only process files inside mask') - roi_size = traits.Tuple( - traits.Int, traits.Int, traits.Int, desc='desired ROI size') + in_file = File(exists=True, mandatory=True, desc="file to be splitted") + in_mask = File(exists=True, desc="only process files inside mask") + roi_size = traits.Tuple(traits.Int, traits.Int, traits.Int, desc="desired ROI size") class SplitROIsOutputSpec(TraitedSpec): - out_files = OutputMultiPath(File(exists=True), desc='the resulting ROIs') + out_files = OutputMultiPath(File(exists=True), desc="the resulting ROIs") out_masks = OutputMultiPath( - File(exists=True), desc='a mask indicating valid values') + File(exists=True), desc="a mask indicating valid values" + ) out_index = OutputMultiPath( - File(exists=True), desc='arrays keeping original locations') + File(exists=True), desc="arrays keeping original locations" + ) class SplitROIs(BaseInterface): @@ -1113,6 +1171,7 @@ class SplitROIs(BaseInterface): >>> rois.run() # doctest: +SKIP """ + input_spec = SplitROIsInputSpec output_spec = SplitROIsOutputSpec @@ -1127,9 +1186,9 @@ def _run_interface(self, runtime): roisize = self.inputs.roi_size res = split_rois(self.inputs.in_file, mask, roisize) - self._outnames['out_files'] = res[0] - self._outnames['out_masks'] = res[1] - self._outnames['out_index'] = res[2] + self._outnames["out_files"] = res[0] + self._outnames["out_masks"] = res[1] + self._outnames["out_index"] = res[2] return runtime def _list_outputs(self): @@ -1141,15 +1200,16 @@ def _list_outputs(self): class MergeROIsInputSpec(TraitedSpec): in_files = InputMultiPath( - File(exists=True, mandatory=True, desc='files to be re-merged')) + File(exists=True, mandatory=True, desc="files to be re-merged") + ) in_index = InputMultiPath( - File(exists=True, mandatory=True), - desc='array keeping original locations') - in_reference = File(exists=True, desc='reference file') + File(exists=True, mandatory=True), desc="array keeping original locations" + ) + in_reference = File(exists=True, desc="reference file") class MergeROIsOutputSpec(TraitedSpec): - merged_file = File(exists=True, desc='the recomposed file') + merged_file = File(exists=True, desc="the recomposed file") class MergeROIs(BaseInterface): @@ -1168,18 +1228,20 @@ class MergeROIs(BaseInterface): >>> rois.run() # doctest: +SKIP """ + input_spec = MergeROIsInputSpec output_spec = MergeROIsOutputSpec def _run_interface(self, runtime): - res = merge_rois(self.inputs.in_files, self.inputs.in_index, - self.inputs.in_reference) + res = merge_rois( + self.inputs.in_files, self.inputs.in_index, self.inputs.in_reference + ) self._merged = res return runtime def _list_outputs(self): outputs = self.output_spec().get() - outputs['merged_file'] = self._merged + outputs["merged_file"] = self._merged return outputs @@ -1200,11 +1262,11 @@ def normalize_tpms(in_files, in_mask=None, out_files=None): if len(out_files) != len(in_files): for i, finname in enumerate(in_files): fname, fext = op.splitext(op.basename(finname)) - if fext == '.gz': + if fext == ".gz": fname, fext2 = op.splitext(fname) fext = fext2 + fext - out_file = op.abspath('%s_norm_%02d%s' % (fname, i, fext)) + out_file = op.abspath("%s_norm_%02d%s" % (fname, i, fext)) out_files += [out_file] imgs = [nb.load(fim, mmap=NUMPY_MMAP) for fim in in_files] @@ -1213,11 +1275,12 @@ def normalize_tpms(in_files, in_mask=None, out_files=None): img_data = imgs[0].get_data() img_data[img_data > 0.0] = 1.0 hdr = imgs[0].header.copy() - hdr['data_type'] = 16 + hdr["data_type"] = 16 hdr.set_data_dtype(np.float32) nb.save( nb.Nifti1Image(img_data.astype(np.float32), imgs[0].affine, hdr), - out_files[0]) + out_files[0], + ) return out_files[0] img_data = np.array([im.get_data() for im in imgs]).astype(np.float32) @@ -1239,11 +1302,11 @@ def normalize_tpms(in_files, in_mask=None, out_files=None): data = np.ma.masked_equal(img_data[i], 0) probmap = data / weights hdr = imgs[i].header.copy() - hdr['data_type'] = 16 - hdr.set_data_dtype('float32') + hdr["data_type"] = 16 + hdr.set_data_dtype("float32") nb.save( - nb.Nifti1Image(probmap.astype(np.float32), imgs[i].affine, hdr), - out_file) + nb.Nifti1Image(probmap.astype(np.float32), imgs[i].affine, hdr), out_file + ) return out_files @@ -1283,9 +1346,10 @@ def split_rois(in_file, mask=None, roishape=None): data = np.squeeze(data.take(nzels, axis=0)) nvols = data.shape[-1] - roidefname = op.abspath('onesmask.nii.gz') - nb.Nifti1Image(np.ones(roishape, dtype=np.uint8), None, - None).to_filename(roidefname) + roidefname = op.abspath("onesmask.nii.gz") + nb.Nifti1Image(np.ones(roishape, dtype=np.uint8), None, None).to_filename( + roidefname + ) out_files = [] out_mask = [] @@ -1301,24 +1365,25 @@ def split_rois(in_file, mask=None, roishape=None): last = els droi = data[first:last, ...] - iname = op.abspath('roi%010d_idx' % i) - out_idxs.append(iname + '.npz') - np.savez(iname, (nzels[0][first:last], )) + iname = op.abspath("roi%010d_idx" % i) + out_idxs.append(iname + ".npz") + np.savez(iname, (nzels[0][first:last],)) if fill > 0: - droi = np.vstack((droi, - np.zeros( - (int(fill), int(nvols)), dtype=np.float32))) - partialmsk = np.ones((roisize, ), dtype=np.uint8) - partialmsk[-int(fill):] = 0 - partname = op.abspath('partialmask.nii.gz') - nb.Nifti1Image(partialmsk.reshape(roishape), None, - None).to_filename(partname) + droi = np.vstack( + (droi, np.zeros((int(fill), int(nvols)), dtype=np.float32)) + ) + partialmsk = np.ones((roisize,), dtype=np.uint8) + partialmsk[-int(fill) :] = 0 + partname = op.abspath("partialmask.nii.gz") + nb.Nifti1Image(partialmsk.reshape(roishape), None, None).to_filename( + partname + ) out_mask.append(partname) else: out_mask.append(roidefname) - fname = op.abspath('roi%010d.nii.gz' % i) + fname = op.abspath("roi%010d.nii.gz" % i) nb.Nifti1Image(droi.reshape(droishape), None, None).to_filename(fname) out_files.append(fname) return out_files, out_mask, out_idxs @@ -1334,17 +1399,17 @@ def merge_rois(in_files, in_idxs, in_ref, dtype=None, out_file=None): import subprocess as sp if out_file is None: - out_file = op.abspath('merged.nii.gz') + out_file = op.abspath("merged.nii.gz") if dtype is None: dtype = np.float32 # if file is compressed, uncompress using os # to avoid memory errors - if op.splitext(in_ref)[1] == '.gz': + if op.splitext(in_ref)[1] == ".gz": try: - iflogger.info('uncompress %i', in_ref) - sp.check_call(['gunzip', in_ref], stdout=sp.PIPE, shell=True) + iflogger.info("uncompress %i", in_ref) + sp.check_call(["gunzip", in_ref], stdout=sp.PIPE, shell=True) in_ref = op.splitext(in_ref)[0] except: pass @@ -1363,50 +1428,54 @@ def merge_rois(in_files, in_idxs, in_ref, dtype=None, out_file=None): ndirs = 1 newshape = (rsh[0], rsh[1], rsh[2], ndirs) hdr.set_data_dtype(dtype) - hdr.set_xyzt_units('mm', 'sec') + hdr.set_xyzt_units("mm", "sec") if ndirs < 300: data = np.zeros((npix, ndirs)) for cname, iname in zip(in_files, in_idxs): f = np.load(iname) - idxs = np.squeeze(f['arr_0']) - cdata = nb.load( - cname, mmap=NUMPY_MMAP).get_data().reshape(-1, ndirs) + idxs = np.squeeze(f["arr_0"]) + cdata = nb.load(cname, mmap=NUMPY_MMAP).get_data().reshape(-1, ndirs) nels = len(idxs) - idata = (idxs, ) + idata = (idxs,) try: data[idata, ...] = cdata[0:nels, ...] except: - print(('Consistency between indexes and chunks was ' - 'lost: data=%s, chunk=%s') % (str(data.shape), - str(cdata.shape))) + print( + ( + "Consistency between indexes and chunks was " + "lost: data=%s, chunk=%s" + ) + % (str(data.shape), str(cdata.shape)) + ) raise hdr.set_data_shape(newshape) - nb.Nifti1Image(data.reshape(newshape).astype(dtype), aff, - hdr).to_filename(out_file) + nb.Nifti1Image(data.reshape(newshape).astype(dtype), aff, hdr).to_filename( + out_file + ) else: hdr.set_data_shape(rsh[:3]) nii = [] for d in range(ndirs): - fname = op.abspath('vol%06d.nii' % d) + fname = op.abspath("vol%06d.nii" % d) nb.Nifti1Image(np.zeros(rsh[:3]), aff, hdr).to_filename(fname) nii.append(fname) for cname, iname in zip(in_files, in_idxs): f = np.load(iname) - idxs = np.squeeze(f['arr_0']) + idxs = np.squeeze(f["arr_0"]) for d, fname in enumerate(nii): data = nb.load(fname, mmap=NUMPY_MMAP).get_data().reshape(-1) - cdata = nb.load( - cname, mmap=NUMPY_MMAP).get_data().reshape(-1, ndirs)[:, d] + cdata = ( + nb.load(cname, mmap=NUMPY_MMAP).get_data().reshape(-1, ndirs)[:, d] + ) nels = len(idxs) - idata = (idxs, ) + idata = (idxs,) data[idata] = cdata[0:nels] - nb.Nifti1Image(data.reshape(rsh[:3]), aff, - hdr).to_filename(fname) + nb.Nifti1Image(data.reshape(rsh[:3]), aff, hdr).to_filename(fname) imgs = [nb.load(im, mmap=NUMPY_MMAP) for im in nii] allim = nb.concat_images(imgs) @@ -1420,15 +1489,17 @@ class CalculateMedianInputSpec(BaseInterfaceInputSpec): File( exists=True, mandatory=True, - desc="One or more realigned Nifti 4D timeseries")) + desc="One or more realigned Nifti 4D timeseries", + ) + ) median_file = traits.Str(desc="Filename prefix to store median images") median_per_file = traits.Bool( - False, usedefault=True, desc="Calculate a median file for each Nifti") + False, usedefault=True, desc="Calculate a median file for each Nifti" + ) class CalculateMedianOutputSpec(TraitedSpec): - median_files = OutputMultiPath( - File(exists=True), desc="One or more median images") + median_files = OutputMultiPath(File(exists=True), desc="One or more median images") class CalculateMedian(BaseInterface): @@ -1444,6 +1515,7 @@ class CalculateMedian(BaseInterface): >>> mean.run() # doctest: +SKIP """ + input_spec = CalculateMedianInputSpec output_spec = CalculateMedianOutputSpec @@ -1460,20 +1532,20 @@ def _gen_fname(self, suffix, idx=None, ext=None): else: in_file = self.inputs.in_files fname, in_ext = op.splitext(op.basename(in_file)) - if in_ext == '.gz': + if in_ext == ".gz": fname, in_ext2 = op.splitext(fname) in_ext = in_ext2 + in_ext if ext is None: ext = in_ext - if ext.startswith('.'): + if ext.startswith("."): ext = ext[1:] if self.inputs.median_file: outname = self.inputs.median_file else: - outname = '{}_{}'.format(fname, suffix) + outname = "{}_{}".format(fname, suffix) if idx: outname += str(idx) - return op.abspath('{}.{}'.format(outname, ext)) + return op.abspath("{}.{}".format(outname, ext)) def _run_interface(self, runtime): total = None @@ -1494,16 +1566,15 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['median_files'] = self._median_files + outputs["median_files"] = self._median_files return outputs - def _write_nifti(self, img, data, idx, suffix='median'): + def _write_nifti(self, img, data, idx, suffix="median"): if self.inputs.median_per_file: median_img = nb.Nifti1Image(data, img.affine, img.header) filename = self._gen_fname(suffix, idx=idx) else: - median_img = nb.Nifti1Image(data / (idx + 1), img.affine, - img.header) + median_img = nb.Nifti1Image(data / (idx + 1), img.affine, img.header) filename = self._gen_fname(suffix) median_img.to_filename(filename) return filename @@ -1521,9 +1592,13 @@ class Distance(nam.Distance): def __init__(self, **inputs): super(nam.Distance, self).__init__(**inputs) - warnings.warn(("This interface has been deprecated since 0.10.0," - " please use nipype.algorithms.metrics.Distance"), - DeprecationWarning) + warnings.warn( + ( + "This interface has been deprecated since 0.10.0," + " please use nipype.algorithms.metrics.Distance" + ), + DeprecationWarning, + ) class Overlap(nam.Overlap): @@ -1535,9 +1610,13 @@ class Overlap(nam.Overlap): def __init__(self, **inputs): super(nam.Overlap, self).__init__(**inputs) - warnings.warn(("This interface has been deprecated since 0.10.0," - " please use nipype.algorithms.metrics.Overlap"), - DeprecationWarning) + warnings.warn( + ( + "This interface has been deprecated since 0.10.0," + " please use nipype.algorithms.metrics.Overlap" + ), + DeprecationWarning, + ) class FuzzyOverlap(nam.FuzzyOverlap): @@ -1550,9 +1629,13 @@ class FuzzyOverlap(nam.FuzzyOverlap): def __init__(self, **inputs): super(nam.FuzzyOverlap, self).__init__(**inputs) - warnings.warn(("This interface has been deprecated since 0.10.0," - " please use nipype.algorithms.metrics.FuzzyOverlap"), - DeprecationWarning) + warnings.warn( + ( + "This interface has been deprecated since 0.10.0," + " please use nipype.algorithms.metrics.FuzzyOverlap" + ), + DeprecationWarning, + ) class TSNR(confounds.TSNR): @@ -1563,6 +1646,10 @@ class TSNR(confounds.TSNR): def __init__(self, **inputs): super(confounds.TSNR, self).__init__(**inputs) - warnings.warn(("This interface has been moved since 0.12.0," - " please use nipype.algorithms.confounds.TSNR"), - UserWarning) + warnings.warn( + ( + "This interface has been moved since 0.12.0," + " please use nipype.algorithms.confounds.TSNR" + ), + UserWarning, + ) diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index 9beb0f031d..2457fe8d2f 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -18,13 +18,21 @@ import numpy as np from ..utils import NUMPY_MMAP -from ..interfaces.base import (BaseInterface, TraitedSpec, InputMultiPath, - traits, File, Bunch, BaseInterfaceInputSpec, - isdefined) +from ..interfaces.base import ( + BaseInterface, + TraitedSpec, + InputMultiPath, + traits, + File, + Bunch, + BaseInterfaceInputSpec, + isdefined, +) from ..utils.filemanip import ensure_list from ..utils.misc import normalize_mc_params from .. import config, logging -iflogger = logging.getLogger('nipype.interface') + +iflogger = logging.getLogger("nipype.interface") def gcd(a, b): @@ -80,17 +88,22 @@ def spm_hrf(RT, P=None, fMRI_T=16): """ from scipy.special import gammaln + p = np.array([6, 16, 1, 1, 6, 0, 32], dtype=float) if P is not None: - p[0:len(P)] = P + p[0 : len(P)] = P - _spm_Gpdf = lambda x, h, l: np.exp(h * np.log(l) + (h - 1) * np.log(x) - (l * x) - gammaln(h)) + _spm_Gpdf = lambda x, h, l: np.exp( + h * np.log(l) + (h - 1) * np.log(x) - (l * x) - gammaln(h) + ) # modelled hemodynamic response function - {mixture of Gammas} dt = RT / float(fMRI_T) u = np.arange(0, int(p[6] / dt + 1)) - p[5] / dt - with np.errstate(divide='ignore'): # Known division-by-zero - hrf = _spm_Gpdf(u, p[0] / p[2], dt / p[2]) - _spm_Gpdf( - u, p[1] / p[3], dt / p[3]) / p[4] + with np.errstate(divide="ignore"): # Known division-by-zero + hrf = ( + _spm_Gpdf(u, p[0] / p[2], dt / p[2]) + - _spm_Gpdf(u, p[1] / p[3], dt / p[3]) / p[4] + ) idx = np.arange(0, int((p[6] / RT) + 1)) * fMRI_T hrf = hrf[idx] hrf = hrf / np.sum(hrf) @@ -130,21 +143,20 @@ def scale_timings(timelist, input_units, output_units, time_repetition): """ if input_units == output_units: - _scalefactor = 1. + _scalefactor = 1.0 - if (input_units == 'scans') and (output_units == 'secs'): + if (input_units == "scans") and (output_units == "secs"): _scalefactor = time_repetition - if (input_units == 'secs') and (output_units == 'scans'): - _scalefactor = 1. / time_repetition - timelist = [np.max([0., _scalefactor * t]) for t in timelist] + if (input_units == "secs") and (output_units == "scans"): + _scalefactor = 1.0 / time_repetition + timelist = [np.max([0.0, _scalefactor * t]) for t in timelist] return timelist -def bids_gen_info(bids_event_files, - condition_column='', - amplitude_column=None, - time_repetition=False, - ): + +def bids_gen_info( + bids_event_files, condition_column="", amplitude_column=None, time_repetition=False, +): """Generate subject_info structure from a list of BIDS .tsv event files. Parameters @@ -169,22 +181,22 @@ def bids_gen_info(bids_event_files, info = [] for bids_event_file in bids_event_files: with open(bids_event_file) as f: - f_events = csv.DictReader(f, skipinitialspace=True, delimiter='\t') + f_events = csv.DictReader(f, skipinitialspace=True, delimiter="\t") events = [{k: v for k, v in row.items()} for row in f_events] if not condition_column: - condition_column = '_trial_type' + condition_column = "_trial_type" for i in events: - i.update({condition_column: 'ev0'}) + i.update({condition_column: "ev0"}) conditions = sorted(set([i[condition_column] for i in events])) runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[]) for condition in conditions: - selected_events = [i for i in events if i[condition_column]==condition] - onsets = [float(i['onset']) for i in selected_events] - durations = [float(i['duration']) for i in selected_events] + selected_events = [i for i in events if i[condition_column] == condition] + onsets = [float(i["onset"]) for i in selected_events] + durations = [float(i["duration"]) for i in selected_events] if time_repetition: decimals = math.ceil(-math.log10(time_repetition)) onsets = [np.round(i, decimals) for i in onsets] - durations = [np.round(i ,decimals) for i in durations] + durations = [np.round(i, decimals) for i in durations] runinfo.conditions.append(condition) runinfo.onsets.append(onsets) runinfo.durations.append(durations) @@ -205,10 +217,10 @@ def gen_info(run_event_files): runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[]) for event_file in event_files: _, name = os.path.split(event_file) - if '.run' in name: - name, _ = name.split('.run%03d' % (i + 1)) - elif '.txt' in name: - name, _ = name.split('.txt') + if ".run" in name: + name, _ = name.split(".run%03d" % (i + 1)) + elif ".txt" in name: + name, _ = name.split(".txt") runinfo.conditions.append(name) event_info = np.atleast_2d(np.loadtxt(event_file)) @@ -221,7 +233,7 @@ def gen_info(run_event_files): if event_info.shape[1] > 2: runinfo.amplitudes.append(event_info[:, 2].tolist()) else: - delattr(runinfo, 'amplitudes') + delattr(runinfo, "amplitudes") info.append(runinfo) return info @@ -230,38 +242,43 @@ class SpecifyModelInputSpec(BaseInterfaceInputSpec): subject_info = InputMultiPath( Bunch, mandatory=True, - xor=['subject_info', 'event_files', 'bids_event_file'], - desc='Bunch or List(Bunch) subject-specific ' - 'condition information. see ' - ':ref:`SpecifyModel` or ' - 'SpecifyModel.__doc__ for details') + xor=["subject_info", "event_files", "bids_event_file"], + desc="Bunch or List(Bunch) subject-specific " + "condition information. see " + ":ref:`SpecifyModel` or " + "SpecifyModel.__doc__ for details", + ) event_files = InputMultiPath( traits.List(File(exists=True)), mandatory=True, - xor=['subject_info', 'event_files', 'bids_event_file'], - desc='List of event description files 1, 2 or 3 ' - 'column format corresponding to onsets, ' - 'durations and amplitudes') + xor=["subject_info", "event_files", "bids_event_file"], + desc="List of event description files 1, 2 or 3 " + "column format corresponding to onsets, " + "durations and amplitudes", + ) bids_event_file = InputMultiPath( File(exists=True), mandatory=True, - xor=['subject_info', 'event_files', 'bids_event_file'], - desc='TSV event file containing common BIDS fields: `onset`,' - '`duration`, and categorization and amplitude columns') + xor=["subject_info", "event_files", "bids_event_file"], + desc="TSV event file containing common BIDS fields: `onset`," + "`duration`, and categorization and amplitude columns", + ) bids_condition_column = traits.Str( - default_value='trial_type', + default_value="trial_type", usedefault=True, - desc='Column of the file passed to `bids_event_file` to the ' - 'unique values of which events will be assigned' - 'to regressors') + desc="Column of the file passed to `bids_event_file` to the " + "unique values of which events will be assigned" + "to regressors", + ) bids_amplitude_column = traits.Str( - desc='Column of the file passed to `bids_event_file` ' - 'according to which to assign amplitudes to events') + desc="Column of the file passed to `bids_event_file` " + "according to which to assign amplitudes to events" + ) realignment_parameters = InputMultiPath( File(exists=True), - desc='Realignment parameters returned ' - 'by motion correction algorithm', - copyfile=False) + desc="Realignment parameters returned " "by motion correction algorithm", + copyfile=False, + ) parameter_source = traits.Enum( "SPM", "FSL", @@ -269,38 +286,43 @@ class SpecifyModelInputSpec(BaseInterfaceInputSpec): "FSFAST", "NIPY", usedefault=True, - desc="Source of motion parameters") + desc="Source of motion parameters", + ) outlier_files = InputMultiPath( File(exists=True), - desc='Files containing scan outlier indices ' - 'that should be tossed', - copyfile=False) + desc="Files containing scan outlier indices " "that should be tossed", + copyfile=False, + ) functional_runs = InputMultiPath( traits.Either(traits.List(File(exists=True)), File(exists=True)), mandatory=True, - desc='Data files for model. List of 4D ' - 'files or list of list of 3D ' - 'files per session', - copyfile=False) + desc="Data files for model. List of 4D " + "files or list of list of 3D " + "files per session", + copyfile=False, + ) input_units = traits.Enum( - 'secs', - 'scans', + "secs", + "scans", mandatory=True, - desc='Units of event onsets and durations (secs ' - 'or scans). Output units are always in secs') + desc="Units of event onsets and durations (secs " + "or scans). Output units are always in secs", + ) high_pass_filter_cutoff = traits.Float( - mandatory=True, desc='High-pass filter cutoff in secs') + mandatory=True, desc="High-pass filter cutoff in secs" + ) time_repetition = traits.Float( mandatory=True, - desc='Time between the start of one volume ' - 'to the start of the next image volume.') + desc="Time between the start of one volume " + "to the start of the next image volume.", + ) # Not implemented yet # polynomial_order = traits.Range(0, low=0, # desc ='Number of polynomial functions to model high pass filter.') class SpecifyModelOutputSpec(TraitedSpec): - session_info = traits.Any(desc='Session info for level1designs') + session_info = traits.Any(desc="Session info for level1designs") class SpecifyModel(BaseInterface): @@ -368,107 +390,109 @@ class SpecifyModel(BaseInterface): >>> s.inputs.subject_info = [evs_run2, evs_run3] """ + input_spec = SpecifyModelInputSpec output_spec = SpecifyModelOutputSpec - def _generate_standard_design(self, - infolist, - functional_runs=None, - realignment_parameters=None, - outliers=None): + def _generate_standard_design( + self, infolist, functional_runs=None, realignment_parameters=None, outliers=None + ): """ Generates a standard design matrix paradigm given information about each run """ sessinfo = [] - output_units = 'secs' - if 'output_units' in self.inputs.traits(): + output_units = "secs" + if "output_units" in self.inputs.traits(): output_units = self.inputs.output_units for i, info in enumerate(infolist): sessinfo.insert(i, dict(cond=[])) if isdefined(self.inputs.high_pass_filter_cutoff): - sessinfo[i]['hpf'] = \ - np.float(self.inputs.high_pass_filter_cutoff) + sessinfo[i]["hpf"] = np.float(self.inputs.high_pass_filter_cutoff) - if hasattr(info, 'conditions') and info.conditions is not None: + if hasattr(info, "conditions") and info.conditions is not None: for cid, cond in enumerate(info.conditions): - sessinfo[i]['cond'].insert(cid, dict()) - sessinfo[i]['cond'][cid]['name'] = info.conditions[cid] + sessinfo[i]["cond"].insert(cid, dict()) + sessinfo[i]["cond"][cid]["name"] = info.conditions[cid] scaled_onset = scale_timings( - info.onsets[cid], self.inputs.input_units, - output_units, self.inputs.time_repetition) - sessinfo[i]['cond'][cid]['onset'] = scaled_onset + info.onsets[cid], + self.inputs.input_units, + output_units, + self.inputs.time_repetition, + ) + sessinfo[i]["cond"][cid]["onset"] = scaled_onset scaled_duration = scale_timings( - info.durations[cid], self.inputs.input_units, - output_units, self.inputs.time_repetition) - sessinfo[i]['cond'][cid]['duration'] = scaled_duration - if hasattr(info, 'amplitudes') and info.amplitudes: - sessinfo[i]['cond'][cid]['amplitudes'] = \ - info.amplitudes[cid] - - if hasattr(info, 'tmod') and info.tmod and \ - len(info.tmod) > cid: - sessinfo[i]['cond'][cid]['tmod'] = info.tmod[cid] - - if hasattr(info, 'pmod') and info.pmod and \ - len(info.pmod) > cid: + info.durations[cid], + self.inputs.input_units, + output_units, + self.inputs.time_repetition, + ) + sessinfo[i]["cond"][cid]["duration"] = scaled_duration + if hasattr(info, "amplitudes") and info.amplitudes: + sessinfo[i]["cond"][cid]["amplitudes"] = info.amplitudes[cid] + + if hasattr(info, "tmod") and info.tmod and len(info.tmod) > cid: + sessinfo[i]["cond"][cid]["tmod"] = info.tmod[cid] + + if hasattr(info, "pmod") and info.pmod and len(info.pmod) > cid: if info.pmod[cid]: - sessinfo[i]['cond'][cid]['pmod'] = [] + sessinfo[i]["cond"][cid]["pmod"] = [] for j, name in enumerate(info.pmod[cid].name): - sessinfo[i]['cond'][cid]['pmod'].insert(j, {}) - sessinfo[i]['cond'][cid]['pmod'][j]['name'] = \ - name - sessinfo[i]['cond'][cid]['pmod'][j]['poly'] = \ - info.pmod[cid].poly[j] - sessinfo[i]['cond'][cid]['pmod'][j]['param'] = \ - info.pmod[cid].param[j] - - sessinfo[i]['regress'] = [] - if hasattr(info, 'regressors') and info.regressors is not None: + sessinfo[i]["cond"][cid]["pmod"].insert(j, {}) + sessinfo[i]["cond"][cid]["pmod"][j]["name"] = name + sessinfo[i]["cond"][cid]["pmod"][j]["poly"] = info.pmod[ + cid + ].poly[j] + sessinfo[i]["cond"][cid]["pmod"][j][ + "param" + ] = info.pmod[cid].param[j] + + sessinfo[i]["regress"] = [] + if hasattr(info, "regressors") and info.regressors is not None: for j, r in enumerate(info.regressors): - sessinfo[i]['regress'].insert(j, dict(name='', val=[])) - if hasattr(info, 'regressor_names') and \ - info.regressor_names is not None: - sessinfo[i]['regress'][j]['name'] = \ - info.regressor_names[j] + sessinfo[i]["regress"].insert(j, dict(name="", val=[])) + if ( + hasattr(info, "regressor_names") + and info.regressor_names is not None + ): + sessinfo[i]["regress"][j]["name"] = info.regressor_names[j] else: - sessinfo[i]['regress'][j]['name'] = 'UR%d' % (j + 1) - sessinfo[i]['regress'][j]['val'] = info.regressors[j] - sessinfo[i]['scans'] = functional_runs[i] + sessinfo[i]["regress"][j]["name"] = "UR%d" % (j + 1) + sessinfo[i]["regress"][j]["val"] = info.regressors[j] + sessinfo[i]["scans"] = functional_runs[i] if realignment_parameters is not None: for i, rp in enumerate(realignment_parameters): mc = realignment_parameters[i] for col in range(mc.shape[1]): - colidx = len(sessinfo[i]['regress']) - sessinfo[i]['regress'].insert(colidx, dict( - name='', val=[])) - sessinfo[i]['regress'][colidx]['name'] = 'Realign%d' % ( - col + 1) - sessinfo[i]['regress'][colidx]['val'] = mc[:, col].tolist() + colidx = len(sessinfo[i]["regress"]) + sessinfo[i]["regress"].insert(colidx, dict(name="", val=[])) + sessinfo[i]["regress"][colidx]["name"] = "Realign%d" % (col + 1) + sessinfo[i]["regress"][colidx]["val"] = mc[:, col].tolist() if outliers is not None: for i, out in enumerate(outliers): numscans = 0 - for f in ensure_list(sessinfo[i]['scans']): + for f in ensure_list(sessinfo[i]["scans"]): shape = load(f, mmap=NUMPY_MMAP).shape if len(shape) == 3 or shape[3] == 1: - iflogger.warning('You are using 3D instead of 4D ' - 'files. Are you sure this was ' - 'intended?') + iflogger.warning( + "You are using 3D instead of 4D " + "files. Are you sure this was " + "intended?" + ) numscans += 1 else: numscans += shape[3] for j, scanno in enumerate(out): - colidx = len(sessinfo[i]['regress']) - sessinfo[i]['regress'].insert(colidx, dict( - name='', val=[])) - sessinfo[i]['regress'][colidx]['name'] = 'Outlier%d' % ( - j + 1) - sessinfo[i]['regress'][colidx]['val'] = \ - np.zeros((1, numscans))[0].tolist() - sessinfo[i]['regress'][colidx]['val'][int(scanno)] = 1 + colidx = len(sessinfo[i]["regress"]) + sessinfo[i]["regress"].insert(colidx, dict(name="", val=[])) + sessinfo[i]["regress"][colidx]["name"] = "Outlier%d" % (j + 1) + sessinfo[i]["regress"][colidx]["val"] = np.zeros((1, numscans))[ + 0 + ].tolist() + sessinfo[i]["regress"][colidx]["val"][int(scanno)] = 1 return sessinfo def _generate_design(self, infolist=None): @@ -482,7 +506,9 @@ def _generate_design(self, infolist=None): func1d=normalize_mc_params, axis=1, arr=np.loadtxt(parfile), - source=self.inputs.parameter_source)) + source=self.inputs.parameter_source, + ) + ) outliers = [] if isdefined(self.inputs.outlier_files): for filename in self.inputs.outlier_files: @@ -507,12 +533,13 @@ def _generate_design(self, infolist=None): self.inputs.bids_condition_column, self.inputs.bids_amplitude_column, self.inputs.time_repetition, - ) + ) self._sessinfo = self._generate_standard_design( infolist, functional_runs=self.inputs.functional_runs, realignment_parameters=realignment_parameters, - outliers=outliers) + outliers=outliers, + ) def _run_interface(self, runtime): """ @@ -523,9 +550,9 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - if not hasattr(self, '_sessinfo'): + if not hasattr(self, "_sessinfo"): self._generate_design() - outputs['session_info'] = self._sessinfo + outputs["session_info"] = self._sessinfo return outputs @@ -534,14 +561,14 @@ class SpecifySPMModelInputSpec(SpecifyModelInputSpec): concatenate_runs = traits.Bool( False, usedefault=True, - desc='Concatenate all runs to look like a ' - 'single session.') + desc="Concatenate all runs to look like a " "single session.", + ) output_units = traits.Enum( - 'secs', - 'scans', + "secs", + "scans", usedefault=True, - desc='Units of design event onsets and durations ' - '(secs or scans)') + desc="Units of design event onsets and durations " "(secs or scans)", + ) class SpecifySPMModel(SpecifyModel): @@ -580,7 +607,7 @@ def _concatenate_info(self, infolist): img = load(f, mmap=NUMPY_MMAP) numscans = img.shape[3] else: - raise Exception('Functional input not specified correctly') + raise Exception("Functional input not specified correctly") nscans.insert(i, numscans) # now combine all fields into 1 @@ -589,63 +616,68 @@ def _concatenate_info(self, infolist): infoout = infolist[0] for j, val in enumerate(infolist[0].durations): if len(infolist[0].onsets[j]) > 1 and len(val) == 1: - infoout.durations[j] = ( - infolist[0].durations[j] * len(infolist[0].onsets[j])) + infoout.durations[j] = infolist[0].durations[j] * len( + infolist[0].onsets[j] + ) for i, info in enumerate(infolist[1:]): # info.[conditions, tmod] remain the same if info.onsets: for j, val in enumerate(info.onsets): - if self.inputs.input_units == 'secs': - onsets = np.array(info.onsets[j]) +\ - self.inputs.time_repetition * \ - sum(nscans[0:(i + 1)]) + if self.inputs.input_units == "secs": + onsets = np.array( + info.onsets[j] + ) + self.inputs.time_repetition * sum(nscans[0 : (i + 1)]) infoout.onsets[j].extend(onsets.tolist()) else: - onsets = np.array(info.onsets[j]) + \ - sum(nscans[0:(i + 1)]) + onsets = np.array(info.onsets[j]) + sum(nscans[0 : (i + 1)]) infoout.onsets[j].extend(onsets.tolist()) for j, val in enumerate(info.durations): if len(info.onsets[j]) > 1 and len(val) == 1: infoout.durations[j].extend( - info.durations[j] * len(info.onsets[j])) + info.durations[j] * len(info.onsets[j]) + ) elif len(info.onsets[j]) == len(val): infoout.durations[j].extend(info.durations[j]) else: - raise ValueError('Mismatch in number of onsets and \ + raise ValueError( + "Mismatch in number of onsets and \ durations for run {0}, condition \ - {1}'.format(i + 2, j + 1)) + {1}".format( + i + 2, j + 1 + ) + ) - if hasattr(info, 'amplitudes') and info.amplitudes: + if hasattr(info, "amplitudes") and info.amplitudes: for j, val in enumerate(info.amplitudes): infoout.amplitudes[j].extend(info.amplitudes[j]) - if hasattr(info, 'pmod') and info.pmod: + if hasattr(info, "pmod") and info.pmod: for j, val in enumerate(info.pmod): if val: for key, data in enumerate(val.param): infoout.pmod[j].param[key].extend(data) - if hasattr(info, 'regressors') and info.regressors: + if hasattr(info, "regressors") and info.regressors: # assumes same ordering of regressors across different # runs and the same names for the regressors for j, v in enumerate(info.regressors): infoout.regressors[j].extend(info.regressors[j]) # insert session regressors - if not hasattr(infoout, 'regressors') or not infoout.regressors: + if not hasattr(infoout, "regressors") or not infoout.regressors: infoout.regressors = [] onelist = np.zeros((1, sum(nscans))) - onelist[0, sum(nscans[0:i]):sum(nscans[0:(i + 1)])] = 1 - infoout.regressors.insert( - len(infoout.regressors), - onelist.tolist()[0]) + onelist[0, sum(nscans[0:i]) : sum(nscans[0 : (i + 1)])] = 1 + infoout.regressors.insert(len(infoout.regressors), onelist.tolist()[0]) return [infoout], nscans def _generate_design(self, infolist=None): - if not isdefined(self.inputs.concatenate_runs) or \ - not self.inputs.concatenate_runs: + if ( + not isdefined(self.inputs.concatenate_runs) + or not self.inputs.concatenate_runs + ): super(SpecifySPMModel, self)._generate_design(infolist=infolist) return @@ -663,12 +695,14 @@ def _generate_design(self, infolist=None): func1d=normalize_mc_params, axis=1, arr=np.loadtxt(parfile), - source=self.inputs.parameter_source) + source=self.inputs.parameter_source, + ) if not realignment_parameters: realignment_parameters.insert(0, mc) else: - realignment_parameters[0] = \ - np.concatenate((realignment_parameters[0], mc)) + realignment_parameters[0] = np.concatenate( + (realignment_parameters[0], mc) + ) outliers = [] if isdefined(self.inputs.outlier_files): outliers = [[]] @@ -676,12 +710,13 @@ def _generate_design(self, infolist=None): try: out = np.loadtxt(filename) except IOError: - iflogger.warning('Error reading outliers file %s', filename) + iflogger.warning("Error reading outliers file %s", filename) out = np.array([]) if out.size > 0: - iflogger.debug('fname=%s, out=%s, nscans=%d', filename, - out, sum(nscans[0:i])) + iflogger.debug( + "fname=%s, out=%s, nscans=%d", filename, out, sum(nscans[0:i]) + ) sumscans = out.astype(int) + sum(nscans[0:i]) if out.size == 1: @@ -693,38 +728,39 @@ def _generate_design(self, infolist=None): concatlist, functional_runs=functional_runs, realignment_parameters=realignment_parameters, - outliers=outliers) + outliers=outliers, + ) class SpecifySparseModelInputSpec(SpecifyModelInputSpec): time_acquisition = traits.Float( - 0, - mandatory=True, - desc='Time in seconds to acquire a single ' - 'image volume') + 0, mandatory=True, desc="Time in seconds to acquire a single " "image volume" + ) volumes_in_cluster = traits.Range( - 1, usedefault=True, desc='Number of scan volumes in a cluster') - model_hrf = traits.Bool(desc='Model sparse events with hrf') + 1, usedefault=True, desc="Number of scan volumes in a cluster" + ) + model_hrf = traits.Bool(desc="Model sparse events with hrf") stimuli_as_impulses = traits.Bool( - True, desc='Treat each stimulus to be impulse-like', usedefault=True) + True, desc="Treat each stimulus to be impulse-like", usedefault=True + ) use_temporal_deriv = traits.Bool( - requires=['model_hrf'], - desc='Create a temporal derivative in ' - 'addition to regular regressor') + requires=["model_hrf"], + desc="Create a temporal derivative in " "addition to regular regressor", + ) scale_regressors = traits.Bool( - True, desc='Scale regressors by the peak', usedefault=True) + True, desc="Scale regressors by the peak", usedefault=True + ) scan_onset = traits.Float( - 0.0, - desc='Start of scanning relative to onset of run in secs', - usedefault=True) + 0.0, desc="Start of scanning relative to onset of run in secs", usedefault=True + ) save_plot = traits.Bool( - desc=('Save plot of sparse design calculation ' - '(requires matplotlib)')) + desc=("Save plot of sparse design calculation " "(requires matplotlib)") + ) class SpecifySparseModelOutputSpec(SpecifyModelOutputSpec): - sparse_png_file = File(desc='PNG file showing sparse design') - sparse_svg_file = File(desc='SVG file showing sparse design') + sparse_png_file = File(desc="PNG file showing sparse design") + sparse_svg_file = File(desc="SVG file showing sparse design") class SpecifySparseModel(SpecifyModel): @@ -756,6 +792,7 @@ class SpecifySparseModel(SpecifyModel): >>> s.inputs.subject_info = [evs_run2, evs_run3] """ + input_spec = SpecifySparseModelInputSpec output_spec = SpecifySparseModelOutputSpec @@ -766,7 +803,8 @@ def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): if isdefined(self.inputs.save_plot) and self.inputs.save_plot: bplot = True import matplotlib - matplotlib.use(config.get('execution', 'matplotlib_backend')) + + matplotlib.use(config.get("execution", "matplotlib_backend")) import matplotlib.pyplot as plt TR = np.round(self.inputs.time_repetition * 1000) # in ms @@ -789,8 +827,8 @@ def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): dt = float(gcd(dttemp, dt)) if dt < 1: - raise Exception('Time multiple less than 1 ms') - iflogger.info('Setting dt = %d ms\n', dt) + raise Exception("Time multiple less than 1 ms") + iflogger.info("Setting dt = %d ms\n", dt) npts = int(np.ceil(total_time / dt)) times = np.arange(0, total_time, dt) * 1e-3 timeline = np.zeros((npts)) @@ -804,14 +842,15 @@ def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): boxcar[int(1.0 * 1e3 / dt)] = 1.0 reg_scale = float(TA / dt) else: - boxcar[int(1.0 * 1e3 / dt):int(2.0 * 1e3 / dt)] = 1.0 + boxcar[int(1.0 * 1e3 / dt) : int(2.0 * 1e3 / dt)] = 1.0 if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf: response = np.convolve(boxcar, hrf) reg_scale = 1.0 / response.max() - iflogger.info('response sum: %.4f max: %.4f', response.sum(), - response.max()) - iflogger.info('reg_scale: %.4f', reg_scale) + iflogger.info( + "response sum: %.4f max: %.4f", response.sum(), response.max() + ) + iflogger.info("reg_scale: %.4f", reg_scale) for i, t in enumerate(onsets): idx = int(np.round(t / dt)) @@ -831,7 +870,7 @@ def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): if durations[i] == 0: durations[i] = TA * nvol stimdur = np.ones((int(durations[i] / dt))) - timeline2 = np.convolve(timeline2, stimdur)[0:len(timeline2)] + timeline2 = np.convolve(timeline2, stimdur)[0 : len(timeline2)] timeline += timeline2 timeline2[:] = 0 @@ -840,17 +879,21 @@ def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): plt.plot(times, timeline) if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf: - timeline = np.convolve(timeline, hrf)[0:len(timeline)] - if isdefined(self.inputs.use_temporal_deriv) and \ - self.inputs.use_temporal_deriv: + timeline = np.convolve(timeline, hrf)[0 : len(timeline)] + if ( + isdefined(self.inputs.use_temporal_deriv) + and self.inputs.use_temporal_deriv + ): # create temporal deriv timederiv = np.concatenate(([0], np.diff(timeline))) if bplot: plt.subplot(4, 1, 3) plt.plot(times, timeline) - if isdefined(self.inputs.use_temporal_deriv) and \ - self.inputs.use_temporal_deriv: + if ( + isdefined(self.inputs.use_temporal_deriv) + and self.inputs.use_temporal_deriv + ): plt.plot(times, timederiv) # sample timeline timeline2 = np.zeros((npts)) @@ -861,13 +904,14 @@ def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): scanidx = scanstart + np.arange(int(TA / dt)) timeline2[scanidx] = np.max(timeline) reg.insert(i, np.mean(timeline[scanidx]) * reg_scale) - if isdefined(self.inputs.use_temporal_deriv) and \ - self.inputs.use_temporal_deriv: + if ( + isdefined(self.inputs.use_temporal_deriv) + and self.inputs.use_temporal_deriv + ): regderiv.insert(i, np.mean(timederiv[scanidx]) * reg_scale) - if isdefined(self.inputs.use_temporal_deriv) and \ - self.inputs.use_temporal_deriv: - iflogger.info('orthoganlizing derivative w.r.t. main regressor') + if isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv: + iflogger.info("orthoganlizing derivative w.r.t. main regressor") regderiv = orth(reg, regderiv) if bplot: @@ -875,8 +919,8 @@ def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): plt.plot(times, timeline2) plt.subplot(4, 1, 4) plt.bar(np.arange(len(reg)), reg, width=0.5) - plt.savefig('sparse.png') - plt.savefig('sparse.svg') + plt.savefig("sparse.png") + plt.savefig("sparse.svg") if regderiv: return [reg, regderiv] @@ -889,23 +933,32 @@ def _cond_to_regress(self, info, nscans): reg = [] regnames = [] for i, cond in enumerate(info.conditions): - if hasattr(info, 'amplitudes') and info.amplitudes: + if hasattr(info, "amplitudes") and info.amplitudes: amplitudes = info.amplitudes[i] else: amplitudes = None regnames.insert(len(regnames), cond) - scaled_onsets = scale_timings(info.onsets[i], - self.inputs.input_units, 'secs', - self.inputs.time_repetition) - scaled_durations = scale_timings(info.durations[i], - self.inputs.input_units, 'secs', - self.inputs.time_repetition) - regressor = self._gen_regress(scaled_onsets, scaled_durations, - amplitudes, nscans) - if isdefined(self.inputs.use_temporal_deriv) and \ - self.inputs.use_temporal_deriv: + scaled_onsets = scale_timings( + info.onsets[i], + self.inputs.input_units, + "secs", + self.inputs.time_repetition, + ) + scaled_durations = scale_timings( + info.durations[i], + self.inputs.input_units, + "secs", + self.inputs.time_repetition, + ) + regressor = self._gen_regress( + scaled_onsets, scaled_durations, amplitudes, nscans + ) + if ( + isdefined(self.inputs.use_temporal_deriv) + and self.inputs.use_temporal_deriv + ): reg.insert(len(reg), regressor[0]) - regnames.insert(len(regnames), cond + '_D') + regnames.insert(len(regnames), cond + "_D") reg.insert(len(reg), regressor[1]) else: reg.insert(len(reg), regressor) @@ -917,7 +970,7 @@ def _cond_to_regress(self, info, nscans): treg = np.zeros((nscans / nvol, nvol)) treg[:, i] = 1 reg.insert(len(reg), treg.ravel().tolist()) - regnames.insert(len(regnames), 'T1effect_%d' % i) + regnames.insert(len(regnames), "T1effect_%d" % i) return reg, regnames def _generate_clustered_design(self, infolist): @@ -934,10 +987,11 @@ def _generate_clustered_design(self, infolist): img = load(self.inputs.functional_runs[i], mmap=NUMPY_MMAP) nscans = img.shape[3] reg, regnames = self._cond_to_regress(info, nscans) - if hasattr(infoout[i], 'regressors') and infoout[i].regressors: + if hasattr(infoout[i], "regressors") and infoout[i].regressors: if not infoout[i].regressor_names: - infoout[i].regressor_names = \ - ['R%d' % j for j in range(len(infoout[i].regressors))] + infoout[i].regressor_names = [ + "R%d" % j for j in range(len(infoout[i].regressors)) + ] else: infoout[i].regressors = [] infoout[i].regressor_names = [] @@ -958,13 +1012,11 @@ def _generate_design(self, infolist=None): def _list_outputs(self): outputs = self._outputs().get() - if not hasattr(self, '_sessinfo'): + if not hasattr(self, "_sessinfo"): self._generate_design() - outputs['session_info'] = self._sessinfo + outputs["session_info"] = self._sessinfo if isdefined(self.inputs.save_plot) and self.inputs.save_plot: - outputs['sparse_png_file'] = os.path.join(os.getcwd(), - 'sparse.png') - outputs['sparse_svg_file'] = os.path.join(os.getcwd(), - 'sparse.svg') + outputs["sparse_png_file"] = os.path.join(os.getcwd(), "sparse.png") + outputs["sparse_svg_file"] = os.path.join(os.getcwd(), "sparse.svg") return outputs diff --git a/nipype/algorithms/rapidart.py b/nipype/algorithms/rapidart.py index 6371458ca3..3f02ca8d29 100644 --- a/nipype/algorithms/rapidart.py +++ b/nipype/algorithms/rapidart.py @@ -19,13 +19,21 @@ import numpy as np from ..utils import NUMPY_MMAP -from ..interfaces.base import (BaseInterface, traits, InputMultiPath, - OutputMultiPath, TraitedSpec, File, - BaseInterfaceInputSpec, isdefined) +from ..interfaces.base import ( + BaseInterface, + traits, + InputMultiPath, + OutputMultiPath, + TraitedSpec, + File, + BaseInterfaceInputSpec, + isdefined, +) from ..utils.filemanip import ensure_list, save_json, split_filename from ..utils.misc import find_indices, normalize_mc_params from .. import logging, config -iflogger = logging.getLogger('nipype.interface') + +iflogger = logging.getLogger("nipype.interface") def _get_affine_matrix(params, source): @@ -35,19 +43,19 @@ def _get_affine_matrix(params, source): source : the package that generated the parameters supports SPM, AFNI, FSFAST, FSL, NIPY """ - if source == 'NIPY': + if source == "NIPY": # nipy does not store typical euler angles, use nipy to convert from nipy.algorithms.registration import to_matrix44 + return to_matrix44(params) params = normalize_mc_params(params, source) # process for FSL, SPM, AFNI and FSFAST - rotfunc = lambda x: np.array([[np.cos(x), np.sin(x)], - [-np.sin(x), np.cos(x)]]) + rotfunc = lambda x: np.array([[np.cos(x), np.sin(x)], [-np.sin(x), np.cos(x)]]) q = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0]) if len(params) < 12: - params = np.hstack((params, q[len(params):])) - params.shape = (len(params), ) + params = np.hstack((params, q[len(params) :])) + params.shape = (len(params),) # Translation T = np.eye(4) T[0:3, -1] = params[0:3] @@ -64,7 +72,7 @@ def _get_affine_matrix(params, source): # Shear Sh = np.eye(4) Sh[(0, 0, 1), (1, 2, 2)] = params[9:12] - if source in ('AFNI', 'FSFAST'): + if source in ("AFNI", "FSFAST"): return np.dot(T, np.dot(Ry, np.dot(Rx, np.dot(Rz, np.dot(S, Sh))))) return np.dot(T, np.dot(Rx, np.dot(Ry, np.dot(Rz, np.dot(S, Sh))))) @@ -88,9 +96,7 @@ def _calc_norm(mc, use_differences, source, brain_pts=None): """ - affines = [ - _get_affine_matrix(mc[i, :], source) for i in range(mc.shape[0]) - ] + affines = [_get_affine_matrix(mc[i, :], source) for i in range(mc.shape[0])] return _calc_norm_affine(affines, use_differences, brain_pts) @@ -129,24 +135,34 @@ def _calc_norm_affine(affines, use_differences, brain_pts=None): displacement[i, :] = np.sqrt( np.sum( np.power( - np.reshape(newpos[i, :], - (3, all_pts.shape[1])) - all_pts[0:3, :], - 2), - axis=0)) + np.reshape(newpos[i, :], (3, all_pts.shape[1])) + - all_pts[0:3, :], + 2, + ), + axis=0, + ) + ) # np.savez('displacement.npz', newpos=newpos, pts=all_pts) normdata = np.zeros(len(affines)) if use_differences: newpos = np.concatenate( - (np.zeros((1, n_pts)), np.diff(newpos, n=1, axis=0)), axis=0) + (np.zeros((1, n_pts)), np.diff(newpos, n=1, axis=0)), axis=0 + ) for i in range(newpos.shape[0]): - normdata[i] = \ - np.max(np.sqrt(np.sum( - np.reshape(np.power(np.abs(newpos[i, :]), 2), - (3, all_pts.shape[1])), - axis=0))) + normdata[i] = np.max( + np.sqrt( + np.sum( + np.reshape( + np.power(np.abs(newpos[i, :]), 2), (3, all_pts.shape[1]) + ), + axis=0, + ) + ) + ) else: from scipy.signal import detrend - newpos = np.abs(detrend(newpos, axis=0, type='constant')) + + newpos = np.abs(detrend(newpos, axis=0, type="constant")) normdata = np.sqrt(np.mean(np.power(newpos, 2), axis=1)) return normdata, displacement @@ -154,15 +170,18 @@ def _calc_norm_affine(affines, use_differences, brain_pts=None): class ArtifactDetectInputSpec(BaseInterfaceInputSpec): realigned_files = InputMultiPath( File(exists=True), - desc=("Names of realigned functional data " - "files"), - mandatory=True) + desc=("Names of realigned functional data " "files"), + mandatory=True, + ) realignment_parameters = InputMultiPath( File(exists=True), mandatory=True, - desc=("Names of realignment " - "parameters corresponding to " - "the functional data files")) + desc=( + "Names of realignment " + "parameters corresponding to " + "the functional data files" + ), + ) parameter_source = traits.Enum( "SPM", "FSL", @@ -170,131 +189,167 @@ class ArtifactDetectInputSpec(BaseInterfaceInputSpec): "NiPy", "FSFAST", desc="Source of movement parameters", - mandatory=True) + mandatory=True, + ) use_differences = traits.ListBool( [True, False], minlen=2, maxlen=2, usedefault=True, - desc=("Use differences between successive" - " motion (first element) and " - "intensity parameter (second " - "element) estimates in order to " - "determine outliers. " - "(default is [True, False])")) + desc=( + "Use differences between successive" + " motion (first element) and " + "intensity parameter (second " + "element) estimates in order to " + "determine outliers. " + "(default is [True, False])" + ), + ) use_norm = traits.Bool( True, usedefault=True, - requires=['norm_threshold'], - desc=("Uses a composite of the motion parameters in " - "order to determine outliers.")) + requires=["norm_threshold"], + desc=( + "Uses a composite of the motion parameters in " + "order to determine outliers." + ), + ) norm_threshold = traits.Float( - xor=['rotation_threshold', 'translation_threshold'], + xor=["rotation_threshold", "translation_threshold"], mandatory=True, - desc=("Threshold to use to detect motion-rela" - "ted outliers when composite motion is " - "being used")) + desc=( + "Threshold to use to detect motion-rela" + "ted outliers when composite motion is " + "being used" + ), + ) rotation_threshold = traits.Float( mandatory=True, - xor=['norm_threshold'], - desc=("Threshold (in radians) to use to " - "detect rotation-related outliers")) + xor=["norm_threshold"], + desc=("Threshold (in radians) to use to " "detect rotation-related outliers"), + ) translation_threshold = traits.Float( mandatory=True, - xor=['norm_threshold'], - desc=("Threshold (in mm) to use to " - "detect translation-related " - "outliers")) + xor=["norm_threshold"], + desc=("Threshold (in mm) to use to " "detect translation-related " "outliers"), + ) zintensity_threshold = traits.Float( mandatory=True, - desc=("Intensity Z-threshold use to " - "detection images that deviate " - "from the mean")) + desc=( + "Intensity Z-threshold use to " + "detection images that deviate " + "from the mean" + ), + ) mask_type = traits.Enum( - 'spm_global', - 'file', - 'thresh', + "spm_global", + "file", + "thresh", mandatory=True, - desc=("Type of mask that should be used to mask the" - " functional data. *spm_global* uses an " - "spm_global like calculation to determine the" - " brain mask. *file* specifies a brain mask " - "file (should be an image file consisting of " - "0s and 1s). *thresh* specifies a threshold " - "to use. By default all voxels are used," - "unless one of these mask types are defined")) - mask_file = File( - exists=True, desc="Mask file to be used if mask_type is 'file'.") + desc=( + "Type of mask that should be used to mask the" + " functional data. *spm_global* uses an " + "spm_global like calculation to determine the" + " brain mask. *file* specifies a brain mask " + "file (should be an image file consisting of " + "0s and 1s). *thresh* specifies a threshold " + "to use. By default all voxels are used," + "unless one of these mask types are defined" + ), + ) + mask_file = File(exists=True, desc="Mask file to be used if mask_type is 'file'.") mask_threshold = traits.Float( - desc=("Mask threshold to be used if mask_type" - " is 'thresh'.")) + desc=("Mask threshold to be used if mask_type" " is 'thresh'.") + ) intersect_mask = traits.Bool( - True, usedefault=True, - desc=("Intersect the masks when computed from " - "spm_global.")) + True, + usedefault=True, + desc=("Intersect the masks when computed from " "spm_global."), + ) save_plot = traits.Bool( - True, desc="save plots containing outliers", usedefault=True) + True, desc="save plots containing outliers", usedefault=True + ) plot_type = traits.Enum( - 'png', - 'svg', - 'eps', - 'pdf', + "png", + "svg", + "eps", + "pdf", desc="file type of the outlier plot", - usedefault=True) + usedefault=True, + ) bound_by_brainmask = traits.Bool( False, - desc=("use the brain mask to " - "determine bounding box" - "for composite norm (works" - "for SPM and Nipy - currently" - "inaccurate for FSL, AFNI"), - usedefault=True) + desc=( + "use the brain mask to " + "determine bounding box" + "for composite norm (works" + "for SPM and Nipy - currently" + "inaccurate for FSL, AFNI" + ), + usedefault=True, + ) global_threshold = traits.Float( 8.0, - desc=("use this threshold when mask " - "type equal's spm_global"), - usedefault=True) + desc=("use this threshold when mask " "type equal's spm_global"), + usedefault=True, + ) class ArtifactDetectOutputSpec(TraitedSpec): outlier_files = OutputMultiPath( File(exists=True), - desc=("One file for each functional run " - "containing a list of 0-based indices" - " corresponding to outlier volumes")) + desc=( + "One file for each functional run " + "containing a list of 0-based indices" + " corresponding to outlier volumes" + ), + ) intensity_files = OutputMultiPath( File(exists=True), - desc=("One file for each functional run " - "containing the global intensity " - "values determined from the " - "brainmask")) + desc=( + "One file for each functional run " + "containing the global intensity " + "values determined from the " + "brainmask" + ), + ) norm_files = OutputMultiPath( - File, - desc=("One file for each functional run " - "containing the composite norm")) + File, desc=("One file for each functional run " "containing the composite norm") + ) statistic_files = OutputMultiPath( File(exists=True), - desc=("One file for each functional run " - "containing information about the " - "different types of artifacts and " - "if design info is provided then " - "details of stimulus correlated " - "motion and a listing or artifacts " - "by event type.")) + desc=( + "One file for each functional run " + "containing information about the " + "different types of artifacts and " + "if design info is provided then " + "details of stimulus correlated " + "motion and a listing or artifacts " + "by event type." + ), + ) plot_files = OutputMultiPath( File, - desc=("One image file for each functional run " - "containing the detected outliers")) + desc=( + "One image file for each functional run " "containing the detected outliers" + ), + ) mask_files = OutputMultiPath( File, - desc=("One image file for each functional run " - "containing the mask used for global " - "signal calculation")) + desc=( + "One image file for each functional run " + "containing the mask used for global " + "signal calculation" + ), + ) displacement_files = OutputMultiPath( File, - desc=("One image file for each " - "functional run containing the " - "voxel displacement timeseries")) + desc=( + "One image file for each " + "functional run containing the " + "voxel displacement timeseries" + ), + ) class ArtifactDetect(BaseInterface): @@ -344,62 +399,79 @@ def _get_output_filenames(self, motionfile, output_dir): else: raise Exception("Unknown type of file") _, filename, ext = split_filename(infile) - artifactfile = os.path.join(output_dir, ''.join(('art.', filename, - '_outliers.txt'))) - intensityfile = os.path.join(output_dir, ''.join(('global_intensity.', - filename, '.txt'))) - statsfile = os.path.join(output_dir, ''.join(('stats.', filename, - '.txt'))) - normfile = os.path.join(output_dir, ''.join(('norm.', filename, - '.txt'))) - plotfile = os.path.join(output_dir, ''.join(('plot.', filename, '.', - self.inputs.plot_type))) - displacementfile = os.path.join(output_dir, ''.join(('disp.', filename, - ext))) - maskfile = os.path.join(output_dir, ''.join(('mask.', filename, ext))) - return (artifactfile, intensityfile, statsfile, normfile, plotfile, - displacementfile, maskfile) + artifactfile = os.path.join( + output_dir, "".join(("art.", filename, "_outliers.txt")) + ) + intensityfile = os.path.join( + output_dir, "".join(("global_intensity.", filename, ".txt")) + ) + statsfile = os.path.join(output_dir, "".join(("stats.", filename, ".txt"))) + normfile = os.path.join(output_dir, "".join(("norm.", filename, ".txt"))) + plotfile = os.path.join( + output_dir, "".join(("plot.", filename, ".", self.inputs.plot_type)) + ) + displacementfile = os.path.join(output_dir, "".join(("disp.", filename, ext))) + maskfile = os.path.join(output_dir, "".join(("mask.", filename, ext))) + return ( + artifactfile, + intensityfile, + statsfile, + normfile, + plotfile, + displacementfile, + maskfile, + ) def _list_outputs(self): outputs = self._outputs().get() - outputs['outlier_files'] = [] - outputs['intensity_files'] = [] - outputs['statistic_files'] = [] - outputs['mask_files'] = [] + outputs["outlier_files"] = [] + outputs["intensity_files"] = [] + outputs["statistic_files"] = [] + outputs["mask_files"] = [] if isdefined(self.inputs.use_norm) and self.inputs.use_norm: - outputs['norm_files'] = [] + outputs["norm_files"] = [] if self.inputs.bound_by_brainmask: - outputs['displacement_files'] = [] + outputs["displacement_files"] = [] if isdefined(self.inputs.save_plot) and self.inputs.save_plot: - outputs['plot_files'] = [] + outputs["plot_files"] = [] for i, f in enumerate(ensure_list(self.inputs.realigned_files)): - (outlierfile, intensityfile, statsfile, normfile, plotfile, - displacementfile, maskfile) = \ - self._get_output_filenames(f, os.getcwd()) - outputs['outlier_files'].insert(i, outlierfile) - outputs['intensity_files'].insert(i, intensityfile) - outputs['statistic_files'].insert(i, statsfile) - outputs['mask_files'].insert(i, maskfile) + ( + outlierfile, + intensityfile, + statsfile, + normfile, + plotfile, + displacementfile, + maskfile, + ) = self._get_output_filenames(f, os.getcwd()) + outputs["outlier_files"].insert(i, outlierfile) + outputs["intensity_files"].insert(i, intensityfile) + outputs["statistic_files"].insert(i, statsfile) + outputs["mask_files"].insert(i, maskfile) if isdefined(self.inputs.use_norm) and self.inputs.use_norm: - outputs['norm_files'].insert(i, normfile) + outputs["norm_files"].insert(i, normfile) if self.inputs.bound_by_brainmask: - outputs['displacement_files'].insert(i, displacementfile) + outputs["displacement_files"].insert(i, displacementfile) if isdefined(self.inputs.save_plot) and self.inputs.save_plot: - outputs['plot_files'].insert(i, plotfile) + outputs["plot_files"].insert(i, plotfile) return outputs def _plot_outliers_with_wave(self, wave, outliers, name): import matplotlib + matplotlib.use(config.get("execution", "matplotlib_backend")) import matplotlib.pyplot as plt + plt.plot(wave) plt.ylim([wave.min(), wave.max()]) plt.xlim([0, len(wave) - 1]) if len(outliers): plt.plot( np.tile(outliers[:, None], (1, 2)).T, - np.tile([wave.min(), wave.max()], (len(outliers), 1)).T, 'r') - plt.xlabel('Scans - 0-based') + np.tile([wave.min(), wave.max()], (len(outliers), 1)).T, + "r", + ) + plt.xlabel("Scans - 0-based") plt.ylabel(name) def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): @@ -407,6 +479,7 @@ def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): Core routine for detecting outliers """ from scipy import signal + if not cwd: cwd = os.getcwd() @@ -427,16 +500,15 @@ def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): affine = nim.affine g = np.zeros((timepoints, 1)) masktype = self.inputs.mask_type - if masktype == 'spm_global': # spm_global like calculation - iflogger.debug('art: using spm global') + if masktype == "spm_global": # spm_global like calculation + iflogger.debug("art: using spm global") intersect_mask = self.inputs.intersect_mask if intersect_mask: mask = np.ones((x, y, z), dtype=bool) for t0 in range(timepoints): vol = data[:, :, :, t0] # Use an SPM like approach - mask_tmp = vol > \ - (np.nanmean(vol) / self.inputs.global_threshold) + mask_tmp = vol > (np.nanmean(vol) / self.inputs.global_threshold) mask = mask * mask_tmp for t0 in range(timepoints): vol = data[:, :, :, t0] @@ -445,15 +517,14 @@ def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): intersect_mask = False g = np.zeros((timepoints, 1)) if not intersect_mask: - iflogger.info('not intersect_mask is True') + iflogger.info("not intersect_mask is True") mask = np.zeros((x, y, z, timepoints)) for t0 in range(timepoints): vol = data[:, :, :, t0] - mask_tmp = vol > \ - (np.nanmean(vol) / self.inputs.global_threshold) + mask_tmp = vol > (np.nanmean(vol) / self.inputs.global_threshold) mask[:, :, :, t0] = mask_tmp g[t0] = np.nansum(vol * mask_tmp) / np.nansum(mask_tmp) - elif masktype == 'file': # uses a mask image to determine intensity + elif masktype == "file": # uses a mask image to determine intensity maskimg = load(self.inputs.mask_file, mmap=NUMPY_MMAP) mask = maskimg.get_data() affine = maskimg.affine @@ -461,7 +532,7 @@ def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): for t0 in range(timepoints): vol = data[:, :, :, t0] g[t0] = np.nanmean(vol[mask]) - elif masktype == 'thresh': # uses a fixed signal threshold + elif masktype == "thresh": # uses a fixed signal threshold for t0 in range(timepoints): vol = data[:, :, :, t0] mask = vol > self.inputs.mask_threshold @@ -473,8 +544,7 @@ def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): # compute normalized intensity values gz = signal.detrend(g, axis=0) # detrend the signal if self.inputs.use_differences[1]: - gz = np.concatenate( - (np.zeros((1, 1)), np.diff(gz, n=1, axis=0)), axis=0) + gz = np.concatenate((np.zeros((1, 1)), np.diff(gz, n=1, axis=0)), axis=0) gz = (gz - np.mean(gz)) / np.std(gz) # normalize the detrended signal iidx = find_indices(abs(gz) > self.inputs.zintensity_threshold) @@ -482,9 +552,15 @@ def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): mc_in = np.loadtxt(motionfile) mc = deepcopy(mc_in) - (artifactfile, intensityfile, statsfile, normfile, plotfile, - displacementfile, maskfile) = self._get_output_filenames( - imgfile, cwd) + ( + artifactfile, + intensityfile, + statsfile, + normfile, + plotfile, + displacementfile, + maskfile, + ) = self._get_output_filenames(imgfile, cwd) mask_img = Nifti1Image(mask.astype(np.uint8), affine) mask_img.to_filename(maskfile) @@ -492,122 +568,122 @@ def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): brain_pts = None if self.inputs.bound_by_brainmask: voxel_coords = np.nonzero(mask) - coords = np.vstack((voxel_coords[0], - np.vstack((voxel_coords[1], - voxel_coords[2])))).T - brain_pts = np.dot(affine, - np.hstack((coords, - np.ones((coords.shape[0], - 1)))).T) + coords = np.vstack( + (voxel_coords[0], np.vstack((voxel_coords[1], voxel_coords[2]))) + ).T + brain_pts = np.dot( + affine, np.hstack((coords, np.ones((coords.shape[0], 1)))).T + ) # calculate the norm of the motion parameters normval, displacement = _calc_norm( mc, self.inputs.use_differences[0], self.inputs.parameter_source, - brain_pts=brain_pts) + brain_pts=brain_pts, + ) tidx = find_indices(normval > self.inputs.norm_threshold) ridx = find_indices(normval < 0) if displacement is not None: dmap = np.zeros((x, y, z, timepoints), dtype=np.float) for i in range(timepoints): - dmap[voxel_coords[0], voxel_coords[1], voxel_coords[2], - i] = displacement[i, :] + dmap[ + voxel_coords[0], voxel_coords[1], voxel_coords[2], i + ] = displacement[i, :] dimg = Nifti1Image(dmap, affine) dimg.to_filename(displacementfile) else: if self.inputs.use_differences[0]: mc = np.concatenate( - (np.zeros((1, 6)), np.diff(mc_in, n=1, axis=0)), axis=0) + (np.zeros((1, 6)), np.diff(mc_in, n=1, axis=0)), axis=0 + ) traval = mc[:, 0:3] # translation parameters (mm) rotval = mc[:, 3:6] # rotation parameters (rad) tidx = find_indices( - np.sum(abs(traval) > self.inputs.translation_threshold, 1) > 0) + np.sum(abs(traval) > self.inputs.translation_threshold, 1) > 0 + ) ridx = find_indices( - np.sum(abs(rotval) > self.inputs.rotation_threshold, 1) > 0) + np.sum(abs(rotval) > self.inputs.rotation_threshold, 1) > 0 + ) outliers = np.unique(np.union1d(iidx, np.union1d(tidx, ridx))) # write output to outputfile - np.savetxt(artifactfile, outliers, fmt=b'%d', delimiter=' ') - np.savetxt(intensityfile, g, fmt=b'%.2f', delimiter=' ') + np.savetxt(artifactfile, outliers, fmt=b"%d", delimiter=" ") + np.savetxt(intensityfile, g, fmt=b"%.2f", delimiter=" ") if self.inputs.use_norm: - np.savetxt(normfile, normval, fmt=b'%.4f', delimiter=' ') + np.savetxt(normfile, normval, fmt=b"%.4f", delimiter=" ") if isdefined(self.inputs.save_plot) and self.inputs.save_plot: import matplotlib + matplotlib.use(config.get("execution", "matplotlib_backend")) import matplotlib.pyplot as plt + fig = plt.figure() if isdefined(self.inputs.use_norm) and self.inputs.use_norm: plt.subplot(211) else: plt.subplot(311) - self._plot_outliers_with_wave(gz, iidx, 'Intensity') + self._plot_outliers_with_wave(gz, iidx, "Intensity") if isdefined(self.inputs.use_norm) and self.inputs.use_norm: plt.subplot(212) - self._plot_outliers_with_wave(normval, np.union1d(tidx, ridx), - 'Norm (mm)') + self._plot_outliers_with_wave( + normval, np.union1d(tidx, ridx), "Norm (mm)" + ) else: - diff = '' + diff = "" if self.inputs.use_differences[0]: - diff = 'diff' + diff = "diff" plt.subplot(312) - self._plot_outliers_with_wave(traval, tidx, - 'Translation (mm)' + diff) + self._plot_outliers_with_wave(traval, tidx, "Translation (mm)" + diff) plt.subplot(313) - self._plot_outliers_with_wave(rotval, ridx, - 'Rotation (rad)' + diff) + self._plot_outliers_with_wave(rotval, ridx, "Rotation (rad)" + diff) plt.savefig(plotfile) plt.close(fig) motion_outliers = np.union1d(tidx, ridx) stats = [ + {"motion_file": motionfile, "functional_file": imgfile}, { - 'motion_file': motionfile, - 'functional_file': imgfile + "common_outliers": len(np.intersect1d(iidx, motion_outliers)), + "intensity_outliers": len(np.setdiff1d(iidx, motion_outliers)), + "motion_outliers": len(np.setdiff1d(motion_outliers, iidx)), }, { - 'common_outliers': len(np.intersect1d(iidx, motion_outliers)), - 'intensity_outliers': len(np.setdiff1d(iidx, motion_outliers)), - 'motion_outliers': len(np.setdiff1d(motion_outliers, iidx)), - }, - { - 'motion': [ - { - 'using differences': self.inputs.use_differences[0] - }, + "motion": [ + {"using differences": self.inputs.use_differences[0]}, { - 'mean': np.mean(mc_in, axis=0).tolist(), - 'min': np.min(mc_in, axis=0).tolist(), - 'max': np.max(mc_in, axis=0).tolist(), - 'std': np.std(mc_in, axis=0).tolist() + "mean": np.mean(mc_in, axis=0).tolist(), + "min": np.min(mc_in, axis=0).tolist(), + "max": np.max(mc_in, axis=0).tolist(), + "std": np.std(mc_in, axis=0).tolist(), }, ] }, { - 'intensity': [ - { - 'using differences': self.inputs.use_differences[1] - }, + "intensity": [ + {"using differences": self.inputs.use_differences[1]}, { - 'mean': np.mean(gz, axis=0).tolist(), - 'min': np.min(gz, axis=0).tolist(), - 'max': np.max(gz, axis=0).tolist(), - 'std': np.std(gz, axis=0).tolist() + "mean": np.mean(gz, axis=0).tolist(), + "min": np.min(gz, axis=0).tolist(), + "max": np.max(gz, axis=0).tolist(), + "std": np.std(gz, axis=0).tolist(), }, ] }, ] if self.inputs.use_norm: stats.insert( - 3, { - 'motion_norm': { - 'mean': np.mean(normval, axis=0).tolist(), - 'min': np.min(normval, axis=0).tolist(), - 'max': np.max(normval, axis=0).tolist(), - 'std': np.std(normval, axis=0).tolist(), + 3, + { + "motion_norm": { + "mean": np.mean(normval, axis=0).tolist(), + "min": np.min(normval, axis=0).tolist(), + "max": np.max(normval, axis=0).tolist(), + "std": np.std(normval, axis=0).tolist(), } - }) + }, + ) save_json(statsfile, stats) def _run_interface(self, runtime): @@ -616,8 +692,7 @@ def _run_interface(self, runtime): funcfilelist = ensure_list(self.inputs.realigned_files) motparamlist = ensure_list(self.inputs.realignment_parameters) for i, imgf in enumerate(funcfilelist): - self._detect_outliers_core( - imgf, motparamlist[i], i, cwd=os.getcwd()) + self._detect_outliers_core(imgf, motparamlist[i], i, cwd=os.getcwd()) return runtime @@ -625,29 +700,30 @@ class StimCorrInputSpec(BaseInterfaceInputSpec): realignment_parameters = InputMultiPath( File(exists=True), mandatory=True, - desc=("Names of realignment " - "parameters corresponding to " - "the functional data files")) + desc=( + "Names of realignment " + "parameters corresponding to " + "the functional data files" + ), + ) intensity_values = InputMultiPath( File(exists=True), mandatory=True, - desc=("Name of file containing intensity " - "values")) + desc=("Name of file containing intensity " "values"), + ) spm_mat_file = File( - exists=True, - mandatory=True, - desc="SPM mat file (use pre-estimate SPM.mat file)") + exists=True, mandatory=True, desc="SPM mat file (use pre-estimate SPM.mat file)" + ) concatenated_design = traits.Bool( mandatory=True, - desc=("state if the design matrix " - "contains concatenated sessions")) + desc=("state if the design matrix " "contains concatenated sessions"), + ) class StimCorrOutputSpec(TraitedSpec): stimcorr_files = OutputMultiPath( - File(exists=True), - desc=("List of files containing " - "correlation values")) + File(exists=True), desc=("List of files containing " "correlation values") + ) class StimulusCorrelation(BaseInterface): @@ -688,12 +764,10 @@ def _get_output_filenames(self, motionfile, output_dir): """ (_, filename) = os.path.split(motionfile) (filename, _) = os.path.splitext(filename) - corrfile = os.path.join(output_dir, ''.join(('qa.', filename, - '_stimcorr.txt'))) + corrfile = os.path.join(output_dir, "".join(("qa.", filename, "_stimcorr.txt"))) return corrfile - def _stimcorr_core(self, motionfile, intensityfile, designmatrix, - cwd=None): + def _stimcorr_core(self, motionfile, intensityfile, designmatrix, cwd=None): """ Core routine for determining stimulus correlation @@ -710,14 +784,14 @@ def _stimcorr_core(self, motionfile, intensityfile, designmatrix, cm = np.corrcoef(concat_matrix, rowvar=0) corrfile = self._get_output_filenames(motionfile, cwd) # write output to outputfile - file = open(corrfile, 'w') + file = open(corrfile, "w") file.write("Stats for:\n") file.write("Stimulus correlated motion:\n%s\n" % motionfile) for i in range(dcol): file.write("SCM.%d:" % i) for v in cm[i, dcol + np.arange(mccol)]: file.write(" %.2f" % v) - file.write('\n') + file.write("\n") file.write("Stimulus correlated intensity:\n%s\n" % intensityfile) for i in range(dcol): file.write("SCI.%d: %.2f\n" % (i, cm[i, -1])) @@ -732,21 +806,19 @@ def _get_spm_submatrix(self, spmmat, sessidx, rows=None): sessidx: int index to session that needs to be extracted. """ - designmatrix = spmmat['SPM'][0][0].xX[0][0].X - U = spmmat['SPM'][0][0].Sess[0][sessidx].U[0] + designmatrix = spmmat["SPM"][0][0].xX[0][0].X + U = spmmat["SPM"][0][0].Sess[0][sessidx].U[0] if rows is None: - rows = spmmat['SPM'][0][0].Sess[0][sessidx].row[0] - 1 - cols = (spmmat['SPM'][0][0].Sess[0][sessidx].col[0][list( - range(len(U)))] - 1) - outmatrix = designmatrix.take( - rows.tolist(), axis=0).take( - cols.tolist(), axis=1) + rows = spmmat["SPM"][0][0].Sess[0][sessidx].row[0] - 1 + cols = spmmat["SPM"][0][0].Sess[0][sessidx].col[0][list(range(len(U)))] - 1 + outmatrix = designmatrix.take(rows.tolist(), axis=0).take(cols.tolist(), axis=1) return outmatrix def _run_interface(self, runtime): """Execute this module. """ import scipy.io as sio + motparamlist = self.inputs.realignment_parameters intensityfiles = self.inputs.intensity_values spmmat = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False) @@ -760,8 +832,7 @@ def _run_interface(self, runtime): rows = np.sum(nrows) + np.arange(mc_in.shape[0]) nrows.append(mc_in.shape[0]) matrix = self._get_spm_submatrix(spmmat, sessidx, rows) - self._stimcorr_core(motparamlist[i], intensityfiles[i], matrix, - os.getcwd()) + self._stimcorr_core(motparamlist[i], intensityfiles[i], matrix, os.getcwd()) return runtime def _list_outputs(self): @@ -770,5 +841,5 @@ def _list_outputs(self): for i, f in enumerate(self.inputs.realignment_parameters): files.insert(i, self._get_output_filenames(f, os.getcwd())) if files: - outputs['stimcorr_files'] = files + outputs["stimcorr_files"] = files return outputs diff --git a/nipype/algorithms/stats.py b/nipype/algorithms/stats.py index 29a45f5844..2a8b00f614 100644 --- a/nipype/algorithms/stats.py +++ b/nipype/algorithms/stats.py @@ -9,24 +9,33 @@ import numpy as np from ..interfaces.base import ( - BaseInterfaceInputSpec, TraitedSpec, SimpleInterface, - traits, InputMultiPath, File + BaseInterfaceInputSpec, + TraitedSpec, + SimpleInterface, + traits, + InputMultiPath, + File, ) from ..utils.filemanip import split_filename class ActivationCountInputSpec(BaseInterfaceInputSpec): - in_files = InputMultiPath(File(exists=True), mandatory=True, - desc='input file, generally a list of z-stat maps') + in_files = InputMultiPath( + File(exists=True), + mandatory=True, + desc="input file, generally a list of z-stat maps", + ) threshold = traits.Float( - mandatory=True, desc='binarization threshold. E.g. a threshold of 1.65 ' - 'corresponds to a two-sided Z-test of p<.10') + mandatory=True, + desc="binarization threshold. E.g. a threshold of 1.65 " + "corresponds to a two-sided Z-test of p<.10", + ) class ActivationCountOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='output activation count map') - acm_pos = File(exists=True, desc='positive activation count map') - acm_neg = File(exists=True, desc='negative activation count map') + out_file = File(exists=True, desc="output activation count map") + acm_pos = File(exists=True, desc="positive activation count map") + acm_neg = File(exists=True, desc="negative activation count map") class ActivationCount(SimpleInterface): @@ -36,31 +45,35 @@ class ActivationCount(SimpleInterface): Adapted from: https://github.com/poldracklab/CNP_task_analysis/\ blob/61c27f5992db9d8800884f8ffceb73e6957db8af/CNP_2nd_level_ACM.py """ + input_spec = ActivationCountInputSpec output_spec = ActivationCountOutputSpec def _run_interface(self, runtime): allmaps = nb.concat_images(self.inputs.in_files).get_data() - acm_pos = np.mean(allmaps > self.inputs.threshold, - axis=3, dtype=np.float32) - acm_neg = np.mean(allmaps < -1.0 * self.inputs.threshold, - axis=3, dtype=np.float32) + acm_pos = np.mean(allmaps > self.inputs.threshold, axis=3, dtype=np.float32) + acm_neg = np.mean( + allmaps < -1.0 * self.inputs.threshold, axis=3, dtype=np.float32 + ) acm_diff = acm_pos - acm_neg template_fname = self.inputs.in_files[0] ext = split_filename(template_fname)[2] - fname_fmt = os.path.join(runtime.cwd, 'acm_{}' + ext).format + fname_fmt = os.path.join(runtime.cwd, "acm_{}" + ext).format - self._results['out_file'] = fname_fmt('diff') - self._results['acm_pos'] = fname_fmt('pos') - self._results['acm_neg'] = fname_fmt('neg') + self._results["out_file"] = fname_fmt("diff") + self._results["acm_pos"] = fname_fmt("pos") + self._results["acm_neg"] = fname_fmt("neg") img = nb.load(template_fname) img.__class__(acm_diff, img.affine, img.header).to_filename( - self._results['out_file']) + self._results["out_file"] + ) img.__class__(acm_pos, img.affine, img.header).to_filename( - self._results['acm_pos']) + self._results["acm_pos"] + ) img.__class__(acm_neg, img.affine, img.header).to_filename( - self._results['acm_neg']) + self._results["acm_neg"] + ) return runtime diff --git a/nipype/algorithms/tests/test_CompCor.py b/nipype/algorithms/tests/test_CompCor.py index 3aa535dc19..762d8a0889 100644 --- a/nipype/algorithms/tests/test_CompCor.py +++ b/nipype/algorithms/tests/test_CompCor.py @@ -10,14 +10,14 @@ from ..confounds import CompCor, TCompCor, ACompCor -class TestCompCor(): - ''' Note: Tests currently do a poor job of testing functionality ''' +class TestCompCor: + """ Note: Tests currently do a poor job of testing functionality """ filenames = { - 'functionalnii': 'compcorfunc.nii', - 'masknii': 'compcormask.nii', - 'masknii2': 'compcormask2.nii', - 'components_file': None + "functionalnii": "compcorfunc.nii", + "masknii": "compcormask.nii", + "masknii2": "compcormask2.nii", + "components_file": None, } @pytest.fixture(autouse=True) @@ -26,32 +26,38 @@ def setup_class(self, tmpdir): tmpdir.chdir() noise = np.fromfunction(self.fake_noise_fun, self.fake_data.shape) self.realigned_file = utils.save_toy_nii( - self.fake_data + noise, self.filenames['functionalnii']) + self.fake_data + noise, self.filenames["functionalnii"] + ) mask = np.ones(self.fake_data.shape[:3]) mask[0, 0, 0] = 0 mask[0, 0, 1] = 0 - mask1 = utils.save_toy_nii(mask, self.filenames['masknii']) + mask1 = utils.save_toy_nii(mask, self.filenames["masknii"]) other_mask = np.ones(self.fake_data.shape[:3]) other_mask[0, 1, 0] = 0 other_mask[1, 1, 0] = 0 - mask2 = utils.save_toy_nii(other_mask, self.filenames['masknii2']) + mask2 = utils.save_toy_nii(other_mask, self.filenames["masknii2"]) self.mask_files = [mask1, mask2] def test_compcor(self): - expected_components = [['-0.1989607212', '-0.5753813646'], [ - '0.5692369697', '0.5674945949' - ], ['-0.6662573243', - '0.4675843432'], ['0.4206466244', '-0.3361270124'], - ['-0.1246655485', '-0.1235705610']] + expected_components = [ + ["-0.1989607212", "-0.5753813646"], + ["0.5692369697", "0.5674945949"], + ["-0.6662573243", "0.4675843432"], + ["0.4206466244", "-0.3361270124"], + ["-0.1246655485", "-0.1235705610"], + ] self.run_cc( CompCor( num_components=6, realigned_file=self.realigned_file, mask_files=self.mask_files, - mask_index=0), expected_components) + mask_index=0, + ), + expected_components, + ) self.run_cc( ACompCor( @@ -59,50 +65,66 @@ def test_compcor(self): realigned_file=self.realigned_file, mask_files=self.mask_files, mask_index=0, - components_file='acc_components_file'), expected_components, - 'aCompCor') + components_file="acc_components_file", + ), + expected_components, + "aCompCor", + ) def test_compcor_variance_threshold_and_metadata(self): - expected_components = [['-0.2027150345', '-0.4954813834'], - ['0.2565929051', '0.7866217875'], - ['-0.3550986008', '-0.0089784905'], - ['0.7512786244', '-0.3599828482'], - ['-0.4500578942', '0.0778209345']] + expected_components = [ + ["-0.2027150345", "-0.4954813834"], + ["0.2565929051", "0.7866217875"], + ["-0.3550986008", "-0.0089784905"], + ["0.7512786244", "-0.3599828482"], + ["-0.4500578942", "0.0778209345"], + ] expected_metadata = { - 'component': 'CompCor00', - 'mask': 'mask', - 'singular_value': '4.0720553036', - 'variance_explained': '0.5527211465', - 'cumulative_variance_explained': '0.5527211465', - 'retained': 'True', + "component": "CompCor00", + "mask": "mask", + "singular_value": "4.0720553036", + "variance_explained": "0.5527211465", + "cumulative_variance_explained": "0.5527211465", + "retained": "True", } ccinterface = CompCor( - variance_threshold=0.7, - realigned_file=self.realigned_file, - mask_files=self.mask_files, - mask_names=['mask'], - mask_index=1, - save_metadata=True) - self.run_cc(ccinterface=ccinterface, - expected_components=expected_components, - expected_n_components=2, - expected_metadata=expected_metadata) + variance_threshold=0.7, + realigned_file=self.realigned_file, + mask_files=self.mask_files, + mask_names=["mask"], + mask_index=1, + save_metadata=True, + ) + self.run_cc( + ccinterface=ccinterface, + expected_components=expected_components, + expected_n_components=2, + expected_metadata=expected_metadata, + ) def test_tcompcor(self): - ccinterface = TCompCor(num_components=6, - realigned_file=self.realigned_file, percentile_threshold=0.75) - self.run_cc(ccinterface, [['-0.1114536190', '-0.4632908609'], [ - '0.4566907310', '0.6983205193' - ], ['-0.7132557407', '0.1340170559'], [ - '0.5022537643', '-0.5098322262' - ], ['-0.1342351356', '0.1407855119']], 'tCompCor') + ccinterface = TCompCor( + num_components=6, + realigned_file=self.realigned_file, + percentile_threshold=0.75, + ) + self.run_cc( + ccinterface, + [ + ["-0.1114536190", "-0.4632908609"], + ["0.4566907310", "0.6983205193"], + ["-0.7132557407", "0.1340170559"], + ["0.5022537643", "-0.5098322262"], + ["-0.1342351356", "0.1407855119"], + ], + "tCompCor", + ) def test_tcompcor_no_percentile(self): - ccinterface = TCompCor(num_components=6, - realigned_file=self.realigned_file) + ccinterface = TCompCor(num_components=6, realigned_file=self.realigned_file) ccinterface.run() - mask = nb.load('mask_000.nii.gz').get_data() + mask = nb.load("mask_000.nii.gz").get_data() num_nonmasked_voxels = np.count_nonzero(mask) assert num_nonmasked_voxels == 1 @@ -113,20 +135,25 @@ def test_compcor_no_regress_poly(self): realigned_file=self.realigned_file, mask_files=self.mask_files, mask_index=0, - pre_filter=False), [['0.4451946442', '-0.7683311482'], [ - '-0.4285129505', '-0.0926034137' - ], ['0.5721540256', '0.5608764842'], [ - '-0.5367548139', '0.0059943226' - ], ['-0.0520809054', '0.2940637551']]) + pre_filter=False, + ), + [ + ["0.4451946442", "-0.7683311482"], + ["-0.4285129505", "-0.0926034137"], + ["0.5721540256", "0.5608764842"], + ["-0.5367548139", "0.0059943226"], + ["-0.0520809054", "0.2940637551"], + ], + ) def test_tcompcor_asymmetric_dim(self): asymmetric_shape = (2, 3, 4, 5) asymmetric_data = utils.save_toy_nii( - np.zeros(asymmetric_shape), 'asymmetric.nii') + np.zeros(asymmetric_shape), "asymmetric.nii" + ) TCompCor(realigned_file=asymmetric_data).run() - assert nb.load( - 'mask_000.nii.gz').get_data().shape == asymmetric_shape[:3] + assert nb.load("mask_000.nii.gz").get_data().shape == asymmetric_shape[:3] def test_compcor_bad_input_shapes(self): # dim 0 is < dim 0 of self.mask_files (2) @@ -135,76 +162,81 @@ def test_compcor_bad_input_shapes(self): shape_more_than = (3, 3, 3, 5) for data_shape in (shape_less_than, shape_more_than): - data_file = utils.save_toy_nii(np.zeros(data_shape), 'temp.nii') - interface = CompCor( - realigned_file=data_file, mask_files=self.mask_files[0]) + data_file = utils.save_toy_nii(np.zeros(data_shape), "temp.nii") + interface = CompCor(realigned_file=data_file, mask_files=self.mask_files[0]) with pytest.raises(ValueError): interface.run() # Dimension mismatch def test_tcompcor_bad_input_dim(self): bad_dims = (2, 2, 2) - data_file = utils.save_toy_nii(np.zeros(bad_dims), 'temp.nii') + data_file = utils.save_toy_nii(np.zeros(bad_dims), "temp.nii") interface = TCompCor(realigned_file=data_file) with pytest.raises(ValueError): interface.run() # Not a 4D file def test_tcompcor_merge_intersect_masks(self): - for method in ['union', 'intersect']: + for method in ["union", "intersect"]: TCompCor( realigned_file=self.realigned_file, mask_files=self.mask_files, - merge_method=method).run() - if method == 'union': + merge_method=method, + ).run() + if method == "union": assert np.array_equal( - nb.load('mask_000.nii.gz').get_data(), - ([[[0, 0], [0, 0]], [[0, 0], [1, 0]]])) - if method == 'intersect': + nb.load("mask_000.nii.gz").get_data(), + ([[[0, 0], [0, 0]], [[0, 0], [1, 0]]]), + ) + if method == "intersect": assert np.array_equal( - nb.load('mask_000.nii.gz').get_data(), - ([[[0, 0], [0, 0]], [[0, 1], [0, 0]]])) + nb.load("mask_000.nii.gz").get_data(), + ([[[0, 0], [0, 0]], [[0, 1], [0, 0]]]), + ) def test_tcompcor_index_mask(self): TCompCor( - realigned_file=self.realigned_file, - mask_files=self.mask_files, - mask_index=1).run() + realigned_file=self.realigned_file, mask_files=self.mask_files, mask_index=1 + ).run() assert np.array_equal( - nb.load('mask_000.nii.gz').get_data(), - ([[[0, 0], [0, 0]], [[0, 1], [0, 0]]])) + nb.load("mask_000.nii.gz").get_data(), + ([[[0, 0], [0, 0]], [[0, 1], [0, 0]]]), + ) def test_tcompcor_multi_mask_no_index(self): interface = TCompCor( - realigned_file=self.realigned_file, mask_files=self.mask_files) + realigned_file=self.realigned_file, mask_files=self.mask_files + ) with pytest.raises(ValueError): interface.run() # more than one mask file - def run_cc(self, - ccinterface, - expected_components, - expected_header='CompCor', - expected_n_components=None, - expected_metadata=None): + def run_cc( + self, + ccinterface, + expected_components, + expected_header="CompCor", + expected_n_components=None, + expected_metadata=None, + ): # run ccresult = ccinterface.run() # assert - expected_file = ccinterface._list_outputs()['components_file'] + expected_file = ccinterface._list_outputs()["components_file"] assert ccresult.outputs.components_file == expected_file assert os.path.exists(expected_file) assert os.path.getsize(expected_file) > 0 - with open(ccresult.outputs.components_file, 'r') as components_file: + with open(ccresult.outputs.components_file, "r") as components_file: if expected_n_components is None: - expected_n_components = min(ccinterface.inputs.num_components, - self.fake_data.shape[3]) + expected_n_components = min( + ccinterface.inputs.num_components, self.fake_data.shape[3] + ) - components_data = [line.rstrip().split('\t') - for line in components_file] + components_data = [line.rstrip().split("\t") for line in components_file] # the first item will be '#', we can throw it out header = components_data.pop(0) expected_header = [ - expected_header + '{:02d}'.format(i) + expected_header + "{:02d}".format(i) for i in range(expected_n_components) ] for i, heading in enumerate(header): @@ -213,22 +245,22 @@ def run_cc(self, num_got_timepoints = len(components_data) assert num_got_timepoints == self.fake_data.shape[3] for index, timepoint in enumerate(components_data): - assert (len(timepoint) == expected_n_components) + assert len(timepoint) == expected_n_components assert timepoint[:2] == expected_components[index] if ccinterface.inputs.save_metadata: - expected_metadata_file = ( - ccinterface._list_outputs()['metadata_file']) + expected_metadata_file = ccinterface._list_outputs()["metadata_file"] assert ccresult.outputs.metadata_file == expected_metadata_file assert os.path.exists(expected_metadata_file) assert os.path.getsize(expected_metadata_file) > 0 - with open(ccresult.outputs.metadata_file, 'r') as metadata_file: - components_metadata = [line.rstrip().split('\t') - for line in metadata_file] - components_metadata = {i: j for i, j in - zip(components_metadata[0], - components_metadata[1])} + with open(ccresult.outputs.metadata_file, "r") as metadata_file: + components_metadata = [ + line.rstrip().split("\t") for line in metadata_file + ] + components_metadata = { + i: j for i, j in zip(components_metadata[0], components_metadata[1]) + } assert components_metadata == expected_metadata return ccresult @@ -237,7 +269,9 @@ def run_cc(self, def fake_noise_fun(i, j, l, m): return m * i + l - j - fake_data = np.array([[[[8, 5, 3, 8, 0], [6, 7, 4, 7, 1]], - [[7, 9, 1, 6, 5], [0, 7, 4, 7, 7]]], - [[[2, 4, 5, 7, 0], [1, 7, 0, 5, 4]], - [[7, 3, 9, 0, 4], [9, 4, 1, 5, 0]]]]) + fake_data = np.array( + [ + [[[8, 5, 3, 8, 0], [6, 7, 4, 7, 1]], [[7, 9, 1, 6, 5], [0, 7, 4, 7, 7]]], + [[[2, 4, 5, 7, 0], [1, 7, 0, 5, 4]], [[7, 3, 9, 0, 4], [9, 4, 1, 5, 0]]], + ] + ) diff --git a/nipype/algorithms/tests/test_ErrorMap.py b/nipype/algorithms/tests/test_ErrorMap.py index cfd30b0b74..4ae811008f 100644 --- a/nipype/algorithms/tests/test_ErrorMap.py +++ b/nipype/algorithms/tests/test_ErrorMap.py @@ -23,30 +23,30 @@ def test_errormap(tmpdir): img2 = nb.Nifti1Image(volume2, np.eye(4)) maskimg = nb.Nifti1Image(mask, np.eye(4)) - nb.save(img1, tmpdir.join('von.nii.gz').strpath) - nb.save(img2, tmpdir.join('alan.nii.gz').strpath) - nb.save(maskimg, tmpdir.join('mask.nii.gz').strpath) + nb.save(img1, tmpdir.join("von.nii.gz").strpath) + nb.save(img2, tmpdir.join("alan.nii.gz").strpath) + nb.save(maskimg, tmpdir.join("mask.nii.gz").strpath) # Default metric errmap = ErrorMap() - errmap.inputs.in_tst = tmpdir.join('von.nii.gz').strpath - errmap.inputs.in_ref = tmpdir.join('alan.nii.gz').strpath - errmap.out_map = tmpdir.join('out_map.nii.gz').strpath + errmap.inputs.in_tst = tmpdir.join("von.nii.gz").strpath + errmap.inputs.in_ref = tmpdir.join("alan.nii.gz").strpath + errmap.out_map = tmpdir.join("out_map.nii.gz").strpath result = errmap.run() assert result.outputs.distance == 1.125 # Square metric - errmap.inputs.metric = 'sqeuclidean' + errmap.inputs.metric = "sqeuclidean" result = errmap.run() assert result.outputs.distance == 1.125 # Linear metric - errmap.inputs.metric = 'euclidean' + errmap.inputs.metric = "euclidean" result = errmap.run() assert result.outputs.distance == 0.875 # Masked - errmap.inputs.mask = tmpdir.join('mask.nii.gz').strpath + errmap.inputs.mask = tmpdir.join("mask.nii.gz").strpath result = errmap.run() assert result.outputs.distance == 1.0 @@ -64,15 +64,15 @@ def test_errormap(tmpdir): msvolume2[:, :, :, 1] = volume1 msimg2 = nb.Nifti1Image(msvolume2, np.eye(4)) - nb.save(msimg1, tmpdir.join('von-ray.nii.gz').strpath) - nb.save(msimg2, tmpdir.join('alan-ray.nii.gz').strpath) + nb.save(msimg1, tmpdir.join("von-ray.nii.gz").strpath) + nb.save(msimg2, tmpdir.join("alan-ray.nii.gz").strpath) - errmap.inputs.in_tst = tmpdir.join('von-ray.nii.gz').strpath - errmap.inputs.in_ref = tmpdir.join('alan-ray.nii.gz').strpath - errmap.inputs.metric = 'sqeuclidean' + errmap.inputs.in_tst = tmpdir.join("von-ray.nii.gz").strpath + errmap.inputs.in_ref = tmpdir.join("alan-ray.nii.gz").strpath + errmap.inputs.metric = "sqeuclidean" result = errmap.run() assert result.outputs.distance == 5.5 - errmap.inputs.metric = 'euclidean' + errmap.inputs.metric = "euclidean" result = errmap.run() - assert result.outputs.distance == np.float32(1.25 * (2**0.5)) + assert result.outputs.distance == np.float32(1.25 * (2 ** 0.5)) diff --git a/nipype/algorithms/tests/test_Overlap.py b/nipype/algorithms/tests/test_Overlap.py index 786a7328b8..ea3b5a3f5d 100644 --- a/nipype/algorithms/tests/test_Overlap.py +++ b/nipype/algorithms/tests/test_Overlap.py @@ -5,7 +5,7 @@ import os -from nipype.testing import (example_data) +from nipype.testing import example_data import numpy as np @@ -15,10 +15,11 @@ def test_overlap(tmpdir): def check_close(val1, val2): import numpy.testing as npt + return npt.assert_almost_equal(val1, val2, decimal=3) - in1 = example_data('segmentation0.nii.gz') - in2 = example_data('segmentation1.nii.gz') + in1 = example_data("segmentation0.nii.gz") + in2 = example_data("segmentation1.nii.gz") tmpdir.chdir() overlap = Overlap() @@ -36,8 +37,7 @@ def check_close(val1, val2): overlap = Overlap() overlap.inputs.volume1 = in1 overlap.inputs.volume2 = in2 - overlap.inputs.vol_units = 'mm' + overlap.inputs.vol_units = "mm" res = overlap.run() check_close(res.outputs.jaccard, 0.99705) - check_close(res.outputs.roi_voldiff, np.array([0.0063086, -0.0025506, - 0.0])) + check_close(res.outputs.roi_voldiff, np.array([0.0063086, -0.0025506, 0.0])) diff --git a/nipype/algorithms/tests/test_TSNR.py b/nipype/algorithms/tests/test_TSNR.py index 1d192ec056..e00bf35e05 100644 --- a/nipype/algorithms/tests/test_TSNR.py +++ b/nipype/algorithms/tests/test_TSNR.py @@ -13,18 +13,18 @@ import os -class TestTSNR(): - ''' Note: Tests currently do a poor job of testing functionality ''' +class TestTSNR: + """ Note: Tests currently do a poor job of testing functionality """ in_filenames = { - 'in_file': 'tsnrinfile.nii', + "in_file": "tsnrinfile.nii", } out_filenames = { # default output file names - 'detrended_file': 'detrend.nii.gz', - 'mean_file': 'mean.nii.gz', - 'stddev_file': 'stdev.nii.gz', - 'tsnr_file': 'tsnr.nii.gz' + "detrended_file": "detrend.nii.gz", + "mean_file": "mean.nii.gz", + "stddev_file": "stdev.nii.gz", + "tsnr_file": "tsnr.nii.gz", } @pytest.fixture(autouse=True) @@ -32,78 +32,84 @@ def setup_class(self, tmpdir): # setup temp folder tmpdir.chdir() - utils.save_toy_nii(self.fake_data, self.in_filenames['in_file']) + utils.save_toy_nii(self.fake_data, self.in_filenames["in_file"]) def test_tsnr(self): # run - tsnrresult = TSNR(in_file=self.in_filenames['in_file']).run() + tsnrresult = TSNR(in_file=self.in_filenames["in_file"]).run() # assert self.assert_expected_outputs( - tsnrresult, { - 'mean_file': (2.8, 7.4), - 'stddev_file': (0.8, 2.9), - 'tsnr_file': (1.3, 9.25) - }) + tsnrresult, + { + "mean_file": (2.8, 7.4), + "stddev_file": (0.8, 2.9), + "tsnr_file": (1.3, 9.25), + }, + ) def test_tsnr_withpoly1(self): # run - tsnrresult = TSNR( - in_file=self.in_filenames['in_file'], regress_poly=1).run() + tsnrresult = TSNR(in_file=self.in_filenames["in_file"], regress_poly=1).run() # assert self.assert_expected_outputs_poly( - tsnrresult, { - 'detrended_file': (-0.1, 8.7), - 'mean_file': (2.8, 7.4), - 'stddev_file': (0.75, 2.75), - 'tsnr_file': (1.4, 9.9) - }) + tsnrresult, + { + "detrended_file": (-0.1, 8.7), + "mean_file": (2.8, 7.4), + "stddev_file": (0.75, 2.75), + "tsnr_file": (1.4, 9.9), + }, + ) def test_tsnr_withpoly2(self): # run - tsnrresult = TSNR( - in_file=self.in_filenames['in_file'], regress_poly=2).run() + tsnrresult = TSNR(in_file=self.in_filenames["in_file"], regress_poly=2).run() # assert self.assert_expected_outputs_poly( - tsnrresult, { - 'detrended_file': (-0.22, 8.55), - 'mean_file': (2.8, 7.7), - 'stddev_file': (0.21, 2.4), - 'tsnr_file': (1.7, 35.9) - }) + tsnrresult, + { + "detrended_file": (-0.22, 8.55), + "mean_file": (2.8, 7.7), + "stddev_file": (0.21, 2.4), + "tsnr_file": (1.7, 35.9), + }, + ) def test_tsnr_withpoly3(self): # run - tsnrresult = TSNR( - in_file=self.in_filenames['in_file'], regress_poly=3).run() + tsnrresult = TSNR(in_file=self.in_filenames["in_file"], regress_poly=3).run() # assert self.assert_expected_outputs_poly( - tsnrresult, { - 'detrended_file': (1.8, 7.95), - 'mean_file': (2.8, 7.7), - 'stddev_file': (0.1, 1.7), - 'tsnr_file': (2.6, 57.3) - }) - - @mock.patch('warnings.warn') + tsnrresult, + { + "detrended_file": (1.8, 7.95), + "mean_file": (2.8, 7.7), + "stddev_file": (0.1, 1.7), + "tsnr_file": (2.6, 57.3), + }, + ) + + @mock.patch("warnings.warn") def test_warning(self, mock_warn): - ''' test that usage of misc.TSNR trips a warning to use - confounds.TSNR instead ''' + """ test that usage of misc.TSNR trips a warning to use + confounds.TSNR instead """ # run - misc.TSNR(in_file=self.in_filenames['in_file']) + misc.TSNR(in_file=self.in_filenames["in_file"]) # assert assert True in [ - args[0].count('confounds') > 0 - for _, args, _ in mock_warn.mock_calls + args[0].count("confounds") > 0 for _, args, _ in mock_warn.mock_calls ] def assert_expected_outputs_poly(self, tsnrresult, expected_ranges): - assert os.path.basename(tsnrresult.outputs.detrended_file) == \ - self.out_filenames['detrended_file'] + assert ( + os.path.basename(tsnrresult.outputs.detrended_file) + == self.out_filenames["detrended_file"] + ) self.assert_expected_outputs(tsnrresult, expected_ranges) def assert_expected_outputs(self, tsnrresult, expected_ranges): @@ -111,12 +117,11 @@ def assert_expected_outputs(self, tsnrresult, expected_ranges): self.assert_unchanged(expected_ranges) def assert_default_outputs(self, outputs): - assert os.path.basename(outputs.mean_file) == \ - self.out_filenames['mean_file'] - assert os.path.basename(outputs.stddev_file) == \ - self.out_filenames['stddev_file'] - assert os.path.basename(outputs.tsnr_file) == \ - self.out_filenames['tsnr_file'] + assert os.path.basename(outputs.mean_file) == self.out_filenames["mean_file"] + assert ( + os.path.basename(outputs.stddev_file) == self.out_filenames["stddev_file"] + ) + assert os.path.basename(outputs.tsnr_file) == self.out_filenames["tsnr_file"] def assert_unchanged(self, expected_ranges): for key, (min_, max_) in expected_ranges.items(): @@ -124,7 +129,9 @@ def assert_unchanged(self, expected_ranges): npt.assert_almost_equal(np.amin(data), min_, decimal=1) npt.assert_almost_equal(np.amax(data), max_, decimal=1) - fake_data = np.array([[[[2, 4, 3, 9, 1], [3, 6, 4, 7, 4]], - [[8, 3, 4, 6, 2], [4, 0, 4, 4, 2]]], - [[[9, 7, 5, 5, 7], [7, 8, 4, 8, 4]], - [[0, 4, 7, 1, 7], [6, 8, 8, 8, 7]]]]) + fake_data = np.array( + [ + [[[2, 4, 3, 9, 1], [3, 6, 4, 7, 4]], [[8, 3, 4, 6, 2], [4, 0, 4, 4, 2]]], + [[[9, 7, 5, 5, 7], [7, 8, 4, 8, 4]], [[0, 4, 7, 1, 7], [6, 8, 8, 8, 7]]], + ] + ) diff --git a/nipype/algorithms/tests/test_auto_ACompCor.py b/nipype/algorithms/tests/test_auto_ACompCor.py index 598bbd7c5a..e2788e97d5 100644 --- a/nipype/algorithms/tests/test_auto_ACompCor.py +++ b/nipype/algorithms/tests/test_auto_ACompCor.py @@ -4,47 +4,37 @@ def test_ACompCor_inputs(): input_map = dict( - components_file=dict(usedefault=True, ), - failure_mode=dict(usedefault=True, ), + components_file=dict(usedefault=True,), + failure_mode=dict(usedefault=True,), header_prefix=dict(), - high_pass_cutoff=dict(usedefault=True, ), - ignore_initial_volumes=dict(usedefault=True, ), + high_pass_cutoff=dict(usedefault=True,), + ignore_initial_volumes=dict(usedefault=True,), mask_files=dict(), - mask_index=dict( - requires=['mask_files'], - xor=['merge_method'], - ), + mask_index=dict(requires=["mask_files"], xor=["merge_method"],), mask_names=dict(), - merge_method=dict( - requires=['mask_files'], - xor=['mask_index'], - ), - num_components=dict(xor=['variance_threshold'], ), - pre_filter=dict(usedefault=True, ), - realigned_file=dict( - extensions=None, - mandatory=True, - ), - regress_poly_degree=dict(usedefault=True, ), + merge_method=dict(requires=["mask_files"], xor=["mask_index"],), + num_components=dict(xor=["variance_threshold"],), + pre_filter=dict(usedefault=True,), + realigned_file=dict(extensions=None, mandatory=True,), + regress_poly_degree=dict(usedefault=True,), repetition_time=dict(), - save_metadata=dict(usedefault=True, ), - save_pre_filter=dict(usedefault=True, ), - use_regress_poly=dict( - deprecated='0.15.0', - new_name='pre_filter', - ), - variance_threshold=dict(xor=['num_components'], ), + save_metadata=dict(usedefault=True,), + save_pre_filter=dict(usedefault=True,), + use_regress_poly=dict(deprecated="0.15.0", new_name="pre_filter",), + variance_threshold=dict(xor=["num_components"],), ) inputs = ACompCor.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ACompCor_outputs(): output_map = dict( - components_file=dict(extensions=None, ), - metadata_file=dict(extensions=None, ), - pre_filter_file=dict(extensions=None, ), + components_file=dict(extensions=None,), + metadata_file=dict(extensions=None,), + pre_filter_file=dict(extensions=None,), ) outputs = ACompCor.output_spec() diff --git a/nipype/algorithms/tests/test_auto_ActivationCount.py b/nipype/algorithms/tests/test_auto_ActivationCount.py index f3b8d77f23..6fc50301ac 100644 --- a/nipype/algorithms/tests/test_auto_ActivationCount.py +++ b/nipype/algorithms/tests/test_auto_ActivationCount.py @@ -3,20 +3,19 @@ def test_ActivationCount_inputs(): - input_map = dict( - in_files=dict(mandatory=True, ), - threshold=dict(mandatory=True, ), - ) + input_map = dict(in_files=dict(mandatory=True,), threshold=dict(mandatory=True,),) inputs = ActivationCount.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ActivationCount_outputs(): output_map = dict( - acm_neg=dict(extensions=None, ), - acm_pos=dict(extensions=None, ), - out_file=dict(extensions=None, ), + acm_neg=dict(extensions=None,), + acm_pos=dict(extensions=None,), + out_file=dict(extensions=None,), ) outputs = ActivationCount.output_spec() diff --git a/nipype/algorithms/tests/test_auto_AddCSVColumn.py b/nipype/algorithms/tests/test_auto_AddCSVColumn.py index b761ad25f5..a2d82b6eec 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVColumn.py +++ b/nipype/algorithms/tests/test_auto_AddCSVColumn.py @@ -6,22 +6,18 @@ def test_AddCSVColumn_inputs(): input_map = dict( extra_column_heading=dict(), extra_field=dict(), - in_file=dict( - extensions=None, - mandatory=True, - ), - out_file=dict( - extensions=None, - usedefault=True, - ), + in_file=dict(extensions=None, mandatory=True,), + out_file=dict(extensions=None, usedefault=True,), ) inputs = AddCSVColumn.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AddCSVColumn_outputs(): - output_map = dict(csv_file=dict(extensions=None, ), ) + output_map = dict(csv_file=dict(extensions=None,),) outputs = AddCSVColumn.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_AddCSVRow.py b/nipype/algorithms/tests/test_auto_AddCSVRow.py index 13d3da48b9..39d6d40abb 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVRow.py +++ b/nipype/algorithms/tests/test_auto_AddCSVRow.py @@ -4,19 +4,17 @@ def test_AddCSVRow_inputs(): input_map = dict( - _outputs=dict(usedefault=True, ), - in_file=dict( - extensions=None, - mandatory=True, - ), + _outputs=dict(usedefault=True,), in_file=dict(extensions=None, mandatory=True,), ) inputs = AddCSVRow.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AddCSVRow_outputs(): - output_map = dict(csv_file=dict(extensions=None, ), ) + output_map = dict(csv_file=dict(extensions=None,),) outputs = AddCSVRow.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_AddNoise.py b/nipype/algorithms/tests/test_auto_AddNoise.py index e98a761ca7..ad1e8734e8 100644 --- a/nipype/algorithms/tests/test_auto_AddNoise.py +++ b/nipype/algorithms/tests/test_auto_AddNoise.py @@ -4,29 +4,22 @@ def test_AddNoise_inputs(): input_map = dict( - bg_dist=dict( - mandatory=True, - usedefault=True, - ), - dist=dict( - mandatory=True, - usedefault=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - in_mask=dict(extensions=None, ), - out_file=dict(extensions=None, ), - snr=dict(usedefault=True, ), + bg_dist=dict(mandatory=True, usedefault=True,), + dist=dict(mandatory=True, usedefault=True,), + in_file=dict(extensions=None, mandatory=True,), + in_mask=dict(extensions=None,), + out_file=dict(extensions=None,), + snr=dict(usedefault=True,), ) inputs = AddNoise.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AddNoise_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = AddNoise.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ArtifactDetect.py b/nipype/algorithms/tests/test_auto_ArtifactDetect.py index 6d49a4bd27..9340982472 100644 --- a/nipype/algorithms/tests/test_auto_ArtifactDetect.py +++ b/nipype/algorithms/tests/test_auto_ArtifactDetect.py @@ -4,45 +4,33 @@ def test_ArtifactDetect_inputs(): input_map = dict( - bound_by_brainmask=dict(usedefault=True, ), - global_threshold=dict(usedefault=True, ), - intersect_mask=dict(usedefault=True, ), - mask_file=dict(extensions=None, ), + bound_by_brainmask=dict(usedefault=True,), + global_threshold=dict(usedefault=True,), + intersect_mask=dict(usedefault=True,), + mask_file=dict(extensions=None,), mask_threshold=dict(), - mask_type=dict(mandatory=True, ), + mask_type=dict(mandatory=True,), norm_threshold=dict( - mandatory=True, - xor=['rotation_threshold', 'translation_threshold'], + mandatory=True, xor=["rotation_threshold", "translation_threshold"], ), - parameter_source=dict(mandatory=True, ), - plot_type=dict(usedefault=True, ), - realigned_files=dict(mandatory=True, ), - realignment_parameters=dict(mandatory=True, ), - rotation_threshold=dict( - mandatory=True, - xor=['norm_threshold'], - ), - save_plot=dict(usedefault=True, ), - translation_threshold=dict( - mandatory=True, - xor=['norm_threshold'], - ), - use_differences=dict( - maxlen=2, - minlen=2, - usedefault=True, - ), - use_norm=dict( - requires=['norm_threshold'], - usedefault=True, - ), - zintensity_threshold=dict(mandatory=True, ), + parameter_source=dict(mandatory=True,), + plot_type=dict(usedefault=True,), + realigned_files=dict(mandatory=True,), + realignment_parameters=dict(mandatory=True,), + rotation_threshold=dict(mandatory=True, xor=["norm_threshold"],), + save_plot=dict(usedefault=True,), + translation_threshold=dict(mandatory=True, xor=["norm_threshold"],), + use_differences=dict(maxlen=2, minlen=2, usedefault=True,), + use_norm=dict(requires=["norm_threshold"], usedefault=True,), + zintensity_threshold=dict(mandatory=True,), ) inputs = ArtifactDetect.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ArtifactDetect_outputs(): output_map = dict( displacement_files=dict(), diff --git a/nipype/algorithms/tests/test_auto_CalculateMedian.py b/nipype/algorithms/tests/test_auto_CalculateMedian.py index 1da36b096f..ff8d9edd7a 100644 --- a/nipype/algorithms/tests/test_auto_CalculateMedian.py +++ b/nipype/algorithms/tests/test_auto_CalculateMedian.py @@ -4,17 +4,17 @@ def test_CalculateMedian_inputs(): input_map = dict( - in_files=dict(), - median_file=dict(), - median_per_file=dict(usedefault=True, ), + in_files=dict(), median_file=dict(), median_per_file=dict(usedefault=True,), ) inputs = CalculateMedian.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CalculateMedian_outputs(): - output_map = dict(median_files=dict(), ) + output_map = dict(median_files=dict(),) outputs = CalculateMedian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py b/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py index 06ad3c3508..102ec2c205 100644 --- a/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py +++ b/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py @@ -4,19 +4,18 @@ def test_CalculateNormalizedMoments_inputs(): input_map = dict( - moment=dict(mandatory=True, ), - timeseries_file=dict( - extensions=None, - mandatory=True, - ), + moment=dict(mandatory=True,), + timeseries_file=dict(extensions=None, mandatory=True,), ) inputs = CalculateNormalizedMoments.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CalculateNormalizedMoments_outputs(): - output_map = dict(moments=dict(), ) + output_map = dict(moments=dict(),) outputs = CalculateNormalizedMoments.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ComputeDVARS.py b/nipype/algorithms/tests/test_auto_ComputeDVARS.py index 3c68d0bd98..ed51de0b0a 100644 --- a/nipype/algorithms/tests/test_auto_ComputeDVARS.py +++ b/nipype/algorithms/tests/test_auto_ComputeDVARS.py @@ -4,24 +4,18 @@ def test_ComputeDVARS_inputs(): input_map = dict( - figdpi=dict(usedefault=True, ), - figformat=dict(usedefault=True, ), - figsize=dict(usedefault=True, ), - in_file=dict( - extensions=None, - mandatory=True, - ), - in_mask=dict( - extensions=None, - mandatory=True, - ), - intensity_normalization=dict(usedefault=True, ), - remove_zerovariance=dict(usedefault=True, ), - save_all=dict(usedefault=True, ), - save_nstd=dict(usedefault=True, ), - save_plot=dict(usedefault=True, ), - save_std=dict(usedefault=True, ), - save_vxstd=dict(usedefault=True, ), + figdpi=dict(usedefault=True,), + figformat=dict(usedefault=True,), + figsize=dict(usedefault=True,), + in_file=dict(extensions=None, mandatory=True,), + in_mask=dict(extensions=None, mandatory=True,), + intensity_normalization=dict(usedefault=True,), + remove_zerovariance=dict(usedefault=True,), + save_all=dict(usedefault=True,), + save_nstd=dict(usedefault=True,), + save_plot=dict(usedefault=True,), + save_std=dict(usedefault=True,), + save_vxstd=dict(usedefault=True,), series_tr=dict(), ) inputs = ComputeDVARS.input_spec() @@ -29,18 +23,20 @@ def test_ComputeDVARS_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ComputeDVARS_outputs(): output_map = dict( avg_nstd=dict(), avg_std=dict(), avg_vxstd=dict(), - fig_nstd=dict(extensions=None, ), - fig_std=dict(extensions=None, ), - fig_vxstd=dict(extensions=None, ), - out_all=dict(extensions=None, ), - out_nstd=dict(extensions=None, ), - out_std=dict(extensions=None, ), - out_vxstd=dict(extensions=None, ), + fig_nstd=dict(extensions=None,), + fig_std=dict(extensions=None,), + fig_vxstd=dict(extensions=None,), + out_all=dict(extensions=None,), + out_nstd=dict(extensions=None,), + out_std=dict(extensions=None,), + out_vxstd=dict(extensions=None,), ) outputs = ComputeDVARS.output_spec() diff --git a/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py index cf76cd7467..871564b817 100644 --- a/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py +++ b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py @@ -4,35 +4,25 @@ def test_ComputeMeshWarp_inputs(): input_map = dict( - metric=dict(usedefault=True, ), - out_file=dict( - extensions=None, - usedefault=True, - ), - out_warp=dict( - extensions=None, - usedefault=True, - ), - surface1=dict( - extensions=None, - mandatory=True, - ), - surface2=dict( - extensions=None, - mandatory=True, - ), - weighting=dict(usedefault=True, ), + metric=dict(usedefault=True,), + out_file=dict(extensions=None, usedefault=True,), + out_warp=dict(extensions=None, usedefault=True,), + surface1=dict(extensions=None, mandatory=True,), + surface2=dict(extensions=None, mandatory=True,), + weighting=dict(usedefault=True,), ) inputs = ComputeMeshWarp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ComputeMeshWarp_outputs(): output_map = dict( distance=dict(), - out_file=dict(extensions=None, ), - out_warp=dict(extensions=None, ), + out_file=dict(extensions=None,), + out_warp=dict(extensions=None,), ) outputs = ComputeMeshWarp.output_spec() diff --git a/nipype/algorithms/tests/test_auto_CreateNifti.py b/nipype/algorithms/tests/test_auto_CreateNifti.py index af9853227c..d4989386b4 100644 --- a/nipype/algorithms/tests/test_auto_CreateNifti.py +++ b/nipype/algorithms/tests/test_auto_CreateNifti.py @@ -5,22 +5,18 @@ def test_CreateNifti_inputs(): input_map = dict( affine=dict(), - data_file=dict( - extensions=None, - mandatory=True, - ), - header_file=dict( - extensions=None, - mandatory=True, - ), + data_file=dict(extensions=None, mandatory=True,), + header_file=dict(extensions=None, mandatory=True,), ) inputs = CreateNifti.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CreateNifti_outputs(): - output_map = dict(nifti_file=dict(extensions=None, ), ) + output_map = dict(nifti_file=dict(extensions=None,),) outputs = CreateNifti.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_Distance.py b/nipype/algorithms/tests/test_auto_Distance.py index 254a6b23d2..e334e3a0f1 100644 --- a/nipype/algorithms/tests/test_auto_Distance.py +++ b/nipype/algorithms/tests/test_auto_Distance.py @@ -4,28 +4,21 @@ def test_Distance_inputs(): input_map = dict( - mask_volume=dict(extensions=None, ), - method=dict(usedefault=True, ), - volume1=dict( - extensions=None, - mandatory=True, - ), - volume2=dict( - extensions=None, - mandatory=True, - ), + mask_volume=dict(extensions=None,), + method=dict(usedefault=True,), + volume1=dict(extensions=None, mandatory=True,), + volume2=dict(extensions=None, mandatory=True,), ) inputs = Distance.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Distance_outputs(): output_map = dict( - distance=dict(), - histogram=dict(extensions=None, ), - point1=dict(), - point2=dict(), + distance=dict(), histogram=dict(extensions=None,), point1=dict(), point2=dict(), ) outputs = Distance.output_spec() diff --git a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py index e529b891e7..1bc46fba64 100644 --- a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py +++ b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py @@ -4,24 +4,15 @@ def test_FramewiseDisplacement_inputs(): input_map = dict( - figdpi=dict(usedefault=True, ), - figsize=dict(usedefault=True, ), - in_file=dict( - extensions=None, - mandatory=True, - ), - normalize=dict(usedefault=True, ), - out_figure=dict( - extensions=None, - usedefault=True, - ), - out_file=dict( - extensions=None, - usedefault=True, - ), - parameter_source=dict(mandatory=True, ), - radius=dict(usedefault=True, ), - save_plot=dict(usedefault=True, ), + figdpi=dict(usedefault=True,), + figsize=dict(usedefault=True,), + in_file=dict(extensions=None, mandatory=True,), + normalize=dict(usedefault=True,), + out_figure=dict(extensions=None, usedefault=True,), + out_file=dict(extensions=None, usedefault=True,), + parameter_source=dict(mandatory=True,), + radius=dict(usedefault=True,), + save_plot=dict(usedefault=True,), series_tr=dict(), ) inputs = FramewiseDisplacement.input_spec() @@ -29,11 +20,13 @@ def test_FramewiseDisplacement_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FramewiseDisplacement_outputs(): output_map = dict( fd_average=dict(), - out_figure=dict(extensions=None, ), - out_file=dict(extensions=None, ), + out_figure=dict(extensions=None,), + out_file=dict(extensions=None,), ) outputs = FramewiseDisplacement.output_spec() diff --git a/nipype/algorithms/tests/test_auto_FuzzyOverlap.py b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py index 80513bd850..877f864bee 100644 --- a/nipype/algorithms/tests/test_auto_FuzzyOverlap.py +++ b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py @@ -4,27 +4,21 @@ def test_FuzzyOverlap_inputs(): input_map = dict( - in_mask=dict(extensions=None, ), - in_ref=dict(mandatory=True, ), - in_tst=dict(mandatory=True, ), - out_file=dict( - extensions=None, - usedefault=True, - ), - weighting=dict(usedefault=True, ), + in_mask=dict(extensions=None,), + in_ref=dict(mandatory=True,), + in_tst=dict(mandatory=True,), + out_file=dict(extensions=None, usedefault=True,), + weighting=dict(usedefault=True,), ) inputs = FuzzyOverlap.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FuzzyOverlap_outputs(): - output_map = dict( - class_fdi=dict(), - class_fji=dict(), - dice=dict(), - jaccard=dict(), - ) + output_map = dict(class_fdi=dict(), class_fji=dict(), dice=dict(), jaccard=dict(),) outputs = FuzzyOverlap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_Gunzip.py b/nipype/algorithms/tests/test_auto_Gunzip.py index 765a1a3f68..40a1f44531 100644 --- a/nipype/algorithms/tests/test_auto_Gunzip.py +++ b/nipype/algorithms/tests/test_auto_Gunzip.py @@ -3,17 +3,16 @@ def test_Gunzip_inputs(): - input_map = dict(in_file=dict( - extensions=None, - mandatory=True, - ), ) + input_map = dict(in_file=dict(extensions=None, mandatory=True,),) inputs = Gunzip.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Gunzip_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Gunzip.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ICC.py b/nipype/algorithms/tests/test_auto_ICC.py index 2056ad000e..9797fdb4af 100644 --- a/nipype/algorithms/tests/test_auto_ICC.py +++ b/nipype/algorithms/tests/test_auto_ICC.py @@ -4,22 +4,21 @@ def test_ICC_inputs(): input_map = dict( - mask=dict( - extensions=None, - mandatory=True, - ), - subjects_sessions=dict(mandatory=True, ), + mask=dict(extensions=None, mandatory=True,), + subjects_sessions=dict(mandatory=True,), ) inputs = ICC.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ICC_outputs(): output_map = dict( - icc_map=dict(extensions=None, ), - session_var_map=dict(extensions=None, ), - subject_var_map=dict(extensions=None, ), + icc_map=dict(extensions=None,), + session_var_map=dict(extensions=None,), + subject_var_map=dict(extensions=None,), ) outputs = ICC.output_spec() diff --git a/nipype/algorithms/tests/test_auto_Matlab2CSV.py b/nipype/algorithms/tests/test_auto_Matlab2CSV.py index b783f317f7..665dbc3fed 100644 --- a/nipype/algorithms/tests/test_auto_Matlab2CSV.py +++ b/nipype/algorithms/tests/test_auto_Matlab2CSV.py @@ -4,19 +4,18 @@ def test_Matlab2CSV_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - reshape_matrix=dict(usedefault=True, ), + in_file=dict(extensions=None, mandatory=True,), + reshape_matrix=dict(usedefault=True,), ) inputs = Matlab2CSV.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Matlab2CSV_outputs(): - output_map = dict(csv_files=dict(), ) + output_map = dict(csv_files=dict(),) outputs = Matlab2CSV.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_MergeCSVFiles.py b/nipype/algorithms/tests/test_auto_MergeCSVFiles.py index aeacb0eaf5..f4f7bc54a0 100644 --- a/nipype/algorithms/tests/test_auto_MergeCSVFiles.py +++ b/nipype/algorithms/tests/test_auto_MergeCSVFiles.py @@ -7,12 +7,9 @@ def test_MergeCSVFiles_inputs(): column_headings=dict(), extra_column_heading=dict(), extra_field=dict(), - in_files=dict(mandatory=True, ), - out_file=dict( - extensions=None, - usedefault=True, - ), - row_heading_title=dict(usedefault=True, ), + in_files=dict(mandatory=True,), + out_file=dict(extensions=None, usedefault=True,), + row_heading_title=dict(usedefault=True,), row_headings=dict(), ) inputs = MergeCSVFiles.input_spec() @@ -20,8 +17,10 @@ def test_MergeCSVFiles_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MergeCSVFiles_outputs(): - output_map = dict(csv_file=dict(extensions=None, ), ) + output_map = dict(csv_file=dict(extensions=None,),) outputs = MergeCSVFiles.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_MergeROIs.py b/nipype/algorithms/tests/test_auto_MergeROIs.py index a588955342..7f56b9d08c 100644 --- a/nipype/algorithms/tests/test_auto_MergeROIs.py +++ b/nipype/algorithms/tests/test_auto_MergeROIs.py @@ -4,17 +4,17 @@ def test_MergeROIs_inputs(): input_map = dict( - in_files=dict(), - in_index=dict(), - in_reference=dict(extensions=None, ), + in_files=dict(), in_index=dict(), in_reference=dict(extensions=None,), ) inputs = MergeROIs.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MergeROIs_outputs(): - output_map = dict(merged_file=dict(extensions=None, ), ) + output_map = dict(merged_file=dict(extensions=None,),) outputs = MergeROIs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_MeshWarpMaths.py b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py index 887f5a54e7..be0de9e541 100644 --- a/nipype/algorithms/tests/test_auto_MeshWarpMaths.py +++ b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py @@ -5,34 +5,21 @@ def test_MeshWarpMaths_inputs(): input_map = dict( float_trait=dict(), - in_surf=dict( - extensions=None, - mandatory=True, - ), - operation=dict(usedefault=True, ), - operator=dict( - mandatory=True, - usedefault=True, - ), - out_file=dict( - extensions=None, - usedefault=True, - ), - out_warp=dict( - extensions=None, - usedefault=True, - ), + in_surf=dict(extensions=None, mandatory=True,), + operation=dict(usedefault=True,), + operator=dict(mandatory=True, usedefault=True,), + out_file=dict(extensions=None, usedefault=True,), + out_warp=dict(extensions=None, usedefault=True,), ) inputs = MeshWarpMaths.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MeshWarpMaths_outputs(): - output_map = dict( - out_file=dict(extensions=None, ), - out_warp=dict(extensions=None, ), - ) + output_map = dict(out_file=dict(extensions=None,), out_warp=dict(extensions=None,),) outputs = MeshWarpMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ModifyAffine.py b/nipype/algorithms/tests/test_auto_ModifyAffine.py index d524a3cf58..a4a441e662 100644 --- a/nipype/algorithms/tests/test_auto_ModifyAffine.py +++ b/nipype/algorithms/tests/test_auto_ModifyAffine.py @@ -4,16 +4,17 @@ def test_ModifyAffine_inputs(): input_map = dict( - transformation_matrix=dict(usedefault=True, ), - volumes=dict(mandatory=True, ), + transformation_matrix=dict(usedefault=True,), volumes=dict(mandatory=True,), ) inputs = ModifyAffine.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ModifyAffine_outputs(): - output_map = dict(transformed_volumes=dict(), ) + output_map = dict(transformed_volumes=dict(),) outputs = ModifyAffine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py index 453f7c17f0..5d42bcf0e7 100644 --- a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py +++ b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py @@ -3,17 +3,16 @@ def test_NonSteadyStateDetector_inputs(): - input_map = dict(in_file=dict( - extensions=None, - mandatory=True, - ), ) + input_map = dict(in_file=dict(extensions=None, mandatory=True,),) inputs = NonSteadyStateDetector.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_NonSteadyStateDetector_outputs(): - output_map = dict(n_volumes_to_discard=dict(), ) + output_map = dict(n_volumes_to_discard=dict(),) outputs = NonSteadyStateDetector.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py b/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py index da95a8b2a2..41b8cc030d 100644 --- a/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py +++ b/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py @@ -3,17 +3,16 @@ def test_NormalizeProbabilityMapSet_inputs(): - input_map = dict( - in_files=dict(), - in_mask=dict(extensions=None, ), - ) + input_map = dict(in_files=dict(), in_mask=dict(extensions=None,),) inputs = NormalizeProbabilityMapSet.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_NormalizeProbabilityMapSet_outputs(): - output_map = dict(out_files=dict(), ) + output_map = dict(out_files=dict(),) outputs = NormalizeProbabilityMapSet.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_P2PDistance.py b/nipype/algorithms/tests/test_auto_P2PDistance.py index a10b6e4b58..0c11648576 100644 --- a/nipype/algorithms/tests/test_auto_P2PDistance.py +++ b/nipype/algorithms/tests/test_auto_P2PDistance.py @@ -4,35 +4,25 @@ def test_P2PDistance_inputs(): input_map = dict( - metric=dict(usedefault=True, ), - out_file=dict( - extensions=None, - usedefault=True, - ), - out_warp=dict( - extensions=None, - usedefault=True, - ), - surface1=dict( - extensions=None, - mandatory=True, - ), - surface2=dict( - extensions=None, - mandatory=True, - ), - weighting=dict(usedefault=True, ), + metric=dict(usedefault=True,), + out_file=dict(extensions=None, usedefault=True,), + out_warp=dict(extensions=None, usedefault=True,), + surface1=dict(extensions=None, mandatory=True,), + surface2=dict(extensions=None, mandatory=True,), + weighting=dict(usedefault=True,), ) inputs = P2PDistance.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_P2PDistance_outputs(): output_map = dict( distance=dict(), - out_file=dict(extensions=None, ), - out_warp=dict(extensions=None, ), + out_file=dict(extensions=None,), + out_warp=dict(extensions=None,), ) outputs = P2PDistance.output_spec() diff --git a/nipype/algorithms/tests/test_auto_PickAtlas.py b/nipype/algorithms/tests/test_auto_PickAtlas.py index 7fbe81307e..71a76aba5c 100644 --- a/nipype/algorithms/tests/test_auto_PickAtlas.py +++ b/nipype/algorithms/tests/test_auto_PickAtlas.py @@ -4,22 +4,21 @@ def test_PickAtlas_inputs(): input_map = dict( - atlas=dict( - extensions=None, - mandatory=True, - ), - dilation_size=dict(usedefault=True, ), - hemi=dict(usedefault=True, ), - labels=dict(mandatory=True, ), - output_file=dict(extensions=None, ), + atlas=dict(extensions=None, mandatory=True,), + dilation_size=dict(usedefault=True,), + hemi=dict(usedefault=True,), + labels=dict(mandatory=True,), + output_file=dict(extensions=None,), ) inputs = PickAtlas.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_PickAtlas_outputs(): - output_map = dict(mask_file=dict(extensions=None, ), ) + output_map = dict(mask_file=dict(extensions=None,),) outputs = PickAtlas.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_Similarity.py b/nipype/algorithms/tests/test_auto_Similarity.py index f138e36295..3a851d0d30 100644 --- a/nipype/algorithms/tests/test_auto_Similarity.py +++ b/nipype/algorithms/tests/test_auto_Similarity.py @@ -4,25 +4,21 @@ def test_Similarity_inputs(): input_map = dict( - mask1=dict(extensions=None, ), - mask2=dict(extensions=None, ), - metric=dict(usedefault=True, ), - volume1=dict( - extensions=None, - mandatory=True, - ), - volume2=dict( - extensions=None, - mandatory=True, - ), + mask1=dict(extensions=None,), + mask2=dict(extensions=None,), + metric=dict(usedefault=True,), + volume1=dict(extensions=None, mandatory=True,), + volume2=dict(extensions=None, mandatory=True,), ) inputs = Similarity.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Similarity_outputs(): - output_map = dict(similarity=dict(), ) + output_map = dict(similarity=dict(),) outputs = Similarity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SimpleThreshold.py b/nipype/algorithms/tests/test_auto_SimpleThreshold.py index 2c66dc9f76..7a1c531c3d 100644 --- a/nipype/algorithms/tests/test_auto_SimpleThreshold.py +++ b/nipype/algorithms/tests/test_auto_SimpleThreshold.py @@ -3,17 +3,16 @@ def test_SimpleThreshold_inputs(): - input_map = dict( - threshold=dict(mandatory=True, ), - volumes=dict(mandatory=True, ), - ) + input_map = dict(threshold=dict(mandatory=True,), volumes=dict(mandatory=True,),) inputs = SimpleThreshold.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SimpleThreshold_outputs(): - output_map = dict(thresholded_volumes=dict(), ) + output_map = dict(thresholded_volumes=dict(),) outputs = SimpleThreshold.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SpecifyModel.py b/nipype/algorithms/tests/test_auto_SpecifyModel.py index 4c856de1bb..fd583f42bc 100644 --- a/nipype/algorithms/tests/test_auto_SpecifyModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifyModel.py @@ -5,37 +5,33 @@ def test_SpecifyModel_inputs(): input_map = dict( bids_amplitude_column=dict(), - bids_condition_column=dict(usedefault=True, ), + bids_condition_column=dict(usedefault=True,), bids_event_file=dict( - mandatory=True, - xor=['subject_info', 'event_files', 'bids_event_file'], + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], ), event_files=dict( - mandatory=True, - xor=['subject_info', 'event_files', 'bids_event_file'], + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], ), - functional_runs=dict( - copyfile=False, - mandatory=True, - ), - high_pass_filter_cutoff=dict(mandatory=True, ), - input_units=dict(mandatory=True, ), - outlier_files=dict(copyfile=False, ), - parameter_source=dict(usedefault=True, ), - realignment_parameters=dict(copyfile=False, ), + functional_runs=dict(copyfile=False, mandatory=True,), + high_pass_filter_cutoff=dict(mandatory=True,), + input_units=dict(mandatory=True,), + outlier_files=dict(copyfile=False,), + parameter_source=dict(usedefault=True,), + realignment_parameters=dict(copyfile=False,), subject_info=dict( - mandatory=True, - xor=['subject_info', 'event_files', 'bids_event_file'], + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], ), - time_repetition=dict(mandatory=True, ), + time_repetition=dict(mandatory=True,), ) inputs = SpecifyModel.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SpecifyModel_outputs(): - output_map = dict(session_info=dict(), ) + output_map = dict(session_info=dict(),) outputs = SpecifyModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py index a0f2c5a1a6..cb8c5f7a17 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py @@ -5,39 +5,35 @@ def test_SpecifySPMModel_inputs(): input_map = dict( bids_amplitude_column=dict(), - bids_condition_column=dict(usedefault=True, ), + bids_condition_column=dict(usedefault=True,), bids_event_file=dict( - mandatory=True, - xor=['subject_info', 'event_files', 'bids_event_file'], + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], ), - concatenate_runs=dict(usedefault=True, ), + concatenate_runs=dict(usedefault=True,), event_files=dict( - mandatory=True, - xor=['subject_info', 'event_files', 'bids_event_file'], + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], ), - functional_runs=dict( - copyfile=False, - mandatory=True, - ), - high_pass_filter_cutoff=dict(mandatory=True, ), - input_units=dict(mandatory=True, ), - outlier_files=dict(copyfile=False, ), - output_units=dict(usedefault=True, ), - parameter_source=dict(usedefault=True, ), - realignment_parameters=dict(copyfile=False, ), + functional_runs=dict(copyfile=False, mandatory=True,), + high_pass_filter_cutoff=dict(mandatory=True,), + input_units=dict(mandatory=True,), + outlier_files=dict(copyfile=False,), + output_units=dict(usedefault=True,), + parameter_source=dict(usedefault=True,), + realignment_parameters=dict(copyfile=False,), subject_info=dict( - mandatory=True, - xor=['subject_info', 'event_files', 'bids_event_file'], + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], ), - time_repetition=dict(mandatory=True, ), + time_repetition=dict(mandatory=True,), ) inputs = SpecifySPMModel.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SpecifySPMModel_outputs(): - output_map = dict(session_info=dict(), ) + output_map = dict(session_info=dict(),) outputs = SpecifySPMModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py index 839cba7e60..ad116a86b1 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py @@ -5,48 +5,44 @@ def test_SpecifySparseModel_inputs(): input_map = dict( bids_amplitude_column=dict(), - bids_condition_column=dict(usedefault=True, ), + bids_condition_column=dict(usedefault=True,), bids_event_file=dict( - mandatory=True, - xor=['subject_info', 'event_files', 'bids_event_file'], + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], ), event_files=dict( - mandatory=True, - xor=['subject_info', 'event_files', 'bids_event_file'], + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], ), - functional_runs=dict( - copyfile=False, - mandatory=True, - ), - high_pass_filter_cutoff=dict(mandatory=True, ), - input_units=dict(mandatory=True, ), + functional_runs=dict(copyfile=False, mandatory=True,), + high_pass_filter_cutoff=dict(mandatory=True,), + input_units=dict(mandatory=True,), model_hrf=dict(), - outlier_files=dict(copyfile=False, ), - parameter_source=dict(usedefault=True, ), - realignment_parameters=dict(copyfile=False, ), + outlier_files=dict(copyfile=False,), + parameter_source=dict(usedefault=True,), + realignment_parameters=dict(copyfile=False,), save_plot=dict(), - scale_regressors=dict(usedefault=True, ), - scan_onset=dict(usedefault=True, ), - stimuli_as_impulses=dict(usedefault=True, ), + scale_regressors=dict(usedefault=True,), + scan_onset=dict(usedefault=True,), + stimuli_as_impulses=dict(usedefault=True,), subject_info=dict( - mandatory=True, - xor=['subject_info', 'event_files', 'bids_event_file'], + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], ), - time_acquisition=dict(mandatory=True, ), - time_repetition=dict(mandatory=True, ), - use_temporal_deriv=dict(requires=['model_hrf'], ), - volumes_in_cluster=dict(usedefault=True, ), + time_acquisition=dict(mandatory=True,), + time_repetition=dict(mandatory=True,), + use_temporal_deriv=dict(requires=["model_hrf"],), + volumes_in_cluster=dict(usedefault=True,), ) inputs = SpecifySparseModel.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SpecifySparseModel_outputs(): output_map = dict( session_info=dict(), - sparse_png_file=dict(extensions=None, ), - sparse_svg_file=dict(extensions=None, ), + sparse_png_file=dict(extensions=None,), + sparse_svg_file=dict(extensions=None,), ) outputs = SpecifySparseModel.output_spec() diff --git a/nipype/algorithms/tests/test_auto_SplitROIs.py b/nipype/algorithms/tests/test_auto_SplitROIs.py index acaef12eee..a9f3844775 100644 --- a/nipype/algorithms/tests/test_auto_SplitROIs.py +++ b/nipype/algorithms/tests/test_auto_SplitROIs.py @@ -4,11 +4,8 @@ def test_SplitROIs_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - in_mask=dict(extensions=None, ), + in_file=dict(extensions=None, mandatory=True,), + in_mask=dict(extensions=None,), roi_size=dict(), ) inputs = SplitROIs.input_spec() @@ -16,12 +13,10 @@ def test_SplitROIs_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SplitROIs_outputs(): - output_map = dict( - out_files=dict(), - out_index=dict(), - out_masks=dict(), - ) + output_map = dict(out_files=dict(), out_index=dict(), out_masks=dict(),) outputs = SplitROIs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_StimulusCorrelation.py b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py index c39fa0bc97..2e95175ca8 100644 --- a/nipype/algorithms/tests/test_auto_StimulusCorrelation.py +++ b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py @@ -4,21 +4,20 @@ def test_StimulusCorrelation_inputs(): input_map = dict( - concatenated_design=dict(mandatory=True, ), - intensity_values=dict(mandatory=True, ), - realignment_parameters=dict(mandatory=True, ), - spm_mat_file=dict( - extensions=None, - mandatory=True, - ), + concatenated_design=dict(mandatory=True,), + intensity_values=dict(mandatory=True,), + realignment_parameters=dict(mandatory=True,), + spm_mat_file=dict(extensions=None, mandatory=True,), ) inputs = StimulusCorrelation.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_StimulusCorrelation_outputs(): - output_map = dict(stimcorr_files=dict(), ) + output_map = dict(stimcorr_files=dict(),) outputs = StimulusCorrelation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_TCompCor.py b/nipype/algorithms/tests/test_auto_TCompCor.py index 9fe97f946b..d8cb0d7ae6 100644 --- a/nipype/algorithms/tests/test_auto_TCompCor.py +++ b/nipype/algorithms/tests/test_auto_TCompCor.py @@ -4,49 +4,39 @@ def test_TCompCor_inputs(): input_map = dict( - components_file=dict(usedefault=True, ), - failure_mode=dict(usedefault=True, ), + components_file=dict(usedefault=True,), + failure_mode=dict(usedefault=True,), header_prefix=dict(), - high_pass_cutoff=dict(usedefault=True, ), - ignore_initial_volumes=dict(usedefault=True, ), + high_pass_cutoff=dict(usedefault=True,), + ignore_initial_volumes=dict(usedefault=True,), mask_files=dict(), - mask_index=dict( - requires=['mask_files'], - xor=['merge_method'], - ), + mask_index=dict(requires=["mask_files"], xor=["merge_method"],), mask_names=dict(), - merge_method=dict( - requires=['mask_files'], - xor=['mask_index'], - ), - num_components=dict(xor=['variance_threshold'], ), - percentile_threshold=dict(usedefault=True, ), - pre_filter=dict(usedefault=True, ), - realigned_file=dict( - extensions=None, - mandatory=True, - ), - regress_poly_degree=dict(usedefault=True, ), + merge_method=dict(requires=["mask_files"], xor=["mask_index"],), + num_components=dict(xor=["variance_threshold"],), + percentile_threshold=dict(usedefault=True,), + pre_filter=dict(usedefault=True,), + realigned_file=dict(extensions=None, mandatory=True,), + regress_poly_degree=dict(usedefault=True,), repetition_time=dict(), - save_metadata=dict(usedefault=True, ), - save_pre_filter=dict(usedefault=True, ), - use_regress_poly=dict( - deprecated='0.15.0', - new_name='pre_filter', - ), - variance_threshold=dict(xor=['num_components'], ), + save_metadata=dict(usedefault=True,), + save_pre_filter=dict(usedefault=True,), + use_regress_poly=dict(deprecated="0.15.0", new_name="pre_filter",), + variance_threshold=dict(xor=["num_components"],), ) inputs = TCompCor.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TCompCor_outputs(): output_map = dict( - components_file=dict(extensions=None, ), + components_file=dict(extensions=None,), high_variance_masks=dict(), - metadata_file=dict(extensions=None, ), - pre_filter_file=dict(extensions=None, ), + metadata_file=dict(extensions=None,), + pre_filter_file=dict(extensions=None,), ) outputs = TCompCor.output_spec() diff --git a/nipype/algorithms/tests/test_auto_WarpPoints.py b/nipype/algorithms/tests/test_auto_WarpPoints.py index 3c18186db7..493f9fb26f 100644 --- a/nipype/algorithms/tests/test_auto_WarpPoints.py +++ b/nipype/algorithms/tests/test_auto_WarpPoints.py @@ -4,33 +4,26 @@ def test_WarpPoints_inputs(): input_map = dict( - interp=dict( - mandatory=True, - usedefault=True, - ), + interp=dict(mandatory=True, usedefault=True,), out_points=dict( extensions=None, keep_extension=True, - name_source='points', - name_template='%s_warped', - output_name='out_points', - ), - points=dict( - extensions=None, - mandatory=True, - ), - warp=dict( - extensions=None, - mandatory=True, + name_source="points", + name_template="%s_warped", + output_name="out_points", ), + points=dict(extensions=None, mandatory=True,), + warp=dict(extensions=None, mandatory=True,), ) inputs = WarpPoints.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_WarpPoints_outputs(): - output_map = dict(out_points=dict(extensions=None, ), ) + output_map = dict(out_points=dict(extensions=None,),) outputs = WarpPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_confounds.py b/nipype/algorithms/tests/test_confounds.py index c9ef93f49f..29f18c9221 100644 --- a/nipype/algorithms/tests/test_confounds.py +++ b/nipype/algorithms/tests/test_confounds.py @@ -4,13 +4,13 @@ import pytest from nipype.testing import example_data -from nipype.algorithms.confounds import FramewiseDisplacement, ComputeDVARS, \ - is_outlier +from nipype.algorithms.confounds import FramewiseDisplacement, ComputeDVARS, is_outlier import numpy as np nonitime = True try: import nitime + nonitime = False except ImportError: pass @@ -18,31 +18,34 @@ def test_fd(tmpdir): tempdir = tmpdir.strpath - ground_truth = np.loadtxt(example_data('fsl_motion_outliers_fd.txt')) + ground_truth = np.loadtxt(example_data("fsl_motion_outliers_fd.txt")) fdisplacement = FramewiseDisplacement( - in_file=example_data('fsl_mcflirt_movpar.txt'), - out_file=tempdir + '/fd.txt', - parameter_source="FSL") + in_file=example_data("fsl_mcflirt_movpar.txt"), + out_file=tempdir + "/fd.txt", + parameter_source="FSL", + ) res = fdisplacement.run() with open(res.outputs.out_file) as all_lines: for line in all_lines: - assert 'FramewiseDisplacement' in line + assert "FramewiseDisplacement" in line break assert np.allclose( - ground_truth, np.loadtxt(res.outputs.out_file, skiprows=1), atol=.16) + ground_truth, np.loadtxt(res.outputs.out_file, skiprows=1), atol=0.16 + ) assert np.abs(ground_truth.mean() - res.outputs.fd_average) < 1e-2 @pytest.mark.skipif(nonitime, reason="nitime is not installed") def test_dvars(tmpdir): - ground_truth = np.loadtxt(example_data('ds003_sub-01_mc.DVARS')) + ground_truth = np.loadtxt(example_data("ds003_sub-01_mc.DVARS")) dvars = ComputeDVARS( - in_file=example_data('ds003_sub-01_mc.nii.gz'), - in_mask=example_data('ds003_sub-01_mc_brainmask.nii.gz'), + in_file=example_data("ds003_sub-01_mc.nii.gz"), + in_mask=example_data("ds003_sub-01_mc_brainmask.nii.gz"), save_all=True, - intensity_normalization=0) + intensity_normalization=0, + ) tmpdir.chdir() res = dvars.run() @@ -54,9 +57,10 @@ def test_dvars(tmpdir): assert (np.abs(dv1[:, 2] - ground_truth[:, 2]).sum() / len(dv1)) < 0.05 dvars = ComputeDVARS( - in_file=example_data('ds003_sub-01_mc.nii.gz'), - in_mask=example_data('ds003_sub-01_mc_brainmask.nii.gz'), - save_all=True) + in_file=example_data("ds003_sub-01_mc.nii.gz"), + in_mask=example_data("ds003_sub-01_mc_brainmask.nii.gz"), + save_all=True, + ) res = dvars.run() dv1 = np.loadtxt(res.outputs.out_all, skiprows=1) diff --git a/nipype/algorithms/tests/test_icc_anova.py b/nipype/algorithms/tests/test_icc_anova.py index 79ed312f40..bd0fe3525b 100644 --- a/nipype/algorithms/tests/test_icc_anova.py +++ b/nipype/algorithms/tests/test_icc_anova.py @@ -7,8 +7,16 @@ def test_ICC_rep_anova(): # see table 2 in P. E. Shrout & Joseph L. Fleiss (1979). "Intraclass # Correlations: Uses in Assessing Rater Reliability". Psychological # Bulletin 86 (2): 420-428 - Y = np.array([[9, 2, 5, 8], [6, 1, 3, 2], [8, 4, 6, 8], [7, 1, 2, 6], - [10, 5, 6, 9], [6, 2, 4, 7]]) + Y = np.array( + [ + [9, 2, 5, 8], + [6, 1, 3, 2], + [8, 4, 6, 8], + [7, 1, 2, 6], + [10, 5, 6, 9], + [6, 2, 4, 7], + ] + ) icc, r_var, e_var, _, dfc, dfe = ICC_rep_anova(Y) # see table 4 diff --git a/nipype/algorithms/tests/test_mesh_ops.py b/nipype/algorithms/tests/test_mesh_ops.py index a08a5a97c3..8be59e08c0 100644 --- a/nipype/algorithms/tests/test_mesh_ops.py +++ b/nipype/algorithms/tests/test_mesh_ops.py @@ -17,15 +17,15 @@ def test_ident_distances(tmpdir): tmpdir.chdir() - in_surf = example_data('surf01.vtk') + in_surf = example_data("surf01.vtk") dist_ident = m.ComputeMeshWarp() dist_ident.inputs.surface1 = in_surf dist_ident.inputs.surface2 = in_surf - dist_ident.inputs.out_file = tmpdir.join('distance.npy').strpath + dist_ident.inputs.out_file = tmpdir.join("distance.npy").strpath res = dist_ident.run() assert res.outputs.distance == 0.0 - dist_ident.inputs.weighting = 'area' + dist_ident.inputs.weighting = "area" res = dist_ident.run() assert res.outputs.distance == 0.0 @@ -34,8 +34,8 @@ def test_ident_distances(tmpdir): def test_trans_distances(tmpdir): from ...interfaces.vtkbase import tvtk - in_surf = example_data('surf01.vtk') - warped_surf = tmpdir.join('warped.vtk').strpath + in_surf = example_data("surf01.vtk") + warped_surf = tmpdir.join("warped.vtk").strpath inc = np.array([0.7, 0.3, -0.2]) @@ -51,10 +51,10 @@ def test_trans_distances(tmpdir): dist = m.ComputeMeshWarp() dist.inputs.surface1 = in_surf dist.inputs.surface2 = warped_surf - dist.inputs.out_file = tmpdir.join('distance.npy').strpath + dist.inputs.out_file = tmpdir.join("distance.npy").strpath res = dist.run() assert np.allclose(res.outputs.distance, np.linalg.norm(inc), 4) - dist.inputs.weighting = 'area' + dist.inputs.weighting = "area" res = dist.run() assert np.allclose(res.outputs.distance, np.linalg.norm(inc), 4) diff --git a/nipype/algorithms/tests/test_metrics.py b/nipype/algorithms/tests/test_metrics.py index fb876b3c72..ad7502992e 100644 --- a/nipype/algorithms/tests/test_metrics.py +++ b/nipype/algorithms/tests/test_metrics.py @@ -11,24 +11,21 @@ def test_fuzzy_overlap(tmpdir): tmpdir.chdir() # Tests with tissue probability maps - in_mask = example_data('tpms_msk.nii.gz') - tpms = [example_data('tpm_%02d.nii.gz' % i) for i in range(3)] + in_mask = example_data("tpms_msk.nii.gz") + tpms = [example_data("tpm_%02d.nii.gz" % i) for i in range(3)] out = FuzzyOverlap(in_ref=tpms[0], in_tst=tpms[0]).run().outputs assert out.dice == 1 - out = FuzzyOverlap( - in_mask=in_mask, in_ref=tpms[0], in_tst=tpms[0]).run().outputs + out = FuzzyOverlap(in_mask=in_mask, in_ref=tpms[0], in_tst=tpms[0]).run().outputs assert out.dice == 1 - out = FuzzyOverlap( - in_mask=in_mask, in_ref=tpms[0], in_tst=tpms[1]).run().outputs + out = FuzzyOverlap(in_mask=in_mask, in_ref=tpms[0], in_tst=tpms[1]).run().outputs assert 0 < out.dice < 1 out = FuzzyOverlap(in_ref=tpms, in_tst=tpms).run().outputs assert out.dice == 1.0 - out = FuzzyOverlap( - in_mask=in_mask, in_ref=tpms, in_tst=tpms).run().outputs + out = FuzzyOverlap(in_mask=in_mask, in_ref=tpms, in_tst=tpms).run().outputs assert out.dice == 1.0 # Tests with synthetic 3x3x3 images @@ -36,14 +33,14 @@ def test_fuzzy_overlap(tmpdir): data[0, 0, 0] = 0.5 data[2, 2, 2] = 0.25 data[1, 1, 1] = 0.3 - nb.Nifti1Image(data, np.eye(4)).to_filename('test1.nii.gz') + nb.Nifti1Image(data, np.eye(4)).to_filename("test1.nii.gz") data = np.zeros((3, 3, 3), dtype=float) data[0, 0, 0] = 0.6 data[1, 1, 1] = 0.3 - nb.Nifti1Image(data, np.eye(4)).to_filename('test2.nii.gz') + nb.Nifti1Image(data, np.eye(4)).to_filename("test2.nii.gz") - out = FuzzyOverlap(in_ref='test1.nii.gz', in_tst='test2.nii.gz').run().outputs + out = FuzzyOverlap(in_ref="test1.nii.gz", in_tst="test2.nii.gz").run().outputs assert np.allclose(out.dice, 0.82051) # Just considering the mask, the central pixel @@ -51,8 +48,13 @@ def test_fuzzy_overlap(tmpdir): data = np.zeros((3, 3, 3), dtype=int) data[0, 0, 0] = 1 data[2, 2, 2] = 1 - nb.Nifti1Image(data, np.eye(4)).to_filename('mask.nii.gz') - - out = FuzzyOverlap(in_ref='test1.nii.gz', in_tst='test2.nii.gz', - in_mask='mask.nii.gz').run().outputs + nb.Nifti1Image(data, np.eye(4)).to_filename("mask.nii.gz") + + out = ( + FuzzyOverlap( + in_ref="test1.nii.gz", in_tst="test2.nii.gz", in_mask="mask.nii.gz" + ) + .run() + .outputs + ) assert np.allclose(out.dice, 0.74074) diff --git a/nipype/algorithms/tests/test_misc.py b/nipype/algorithms/tests/test_misc.py index e9d5cbdb3c..40aab24b2a 100644 --- a/nipype/algorithms/tests/test_misc.py +++ b/nipype/algorithms/tests/test_misc.py @@ -26,7 +26,8 @@ def test_CreateNifti(create_analyze_pair_file_in_directory): # .inputs based parameters setting create_nifti.inputs.header_file = filelist[0] create_nifti.inputs.data_file = fname_presuffix( - filelist[0], '', '.img', use_ext=False) + filelist[0], "", ".img", use_ext=False + ) result = create_nifti.run() @@ -41,7 +42,7 @@ def test_CalculateMedian(create_analyze_pair_file_in_directory): with pytest.raises(TypeError): mean.run() - mean.inputs.in_files = example_data('ds003_sub-01_mc.nii.gz') + mean.inputs.in_files = example_data("ds003_sub-01_mc.nii.gz") eg = mean.run() assert os.path.exists(eg.outputs.median_files) diff --git a/nipype/algorithms/tests/test_modelgen.py b/nipype/algorithms/tests/test_modelgen.py index 759e53b0a4..a2c85f6747 100644 --- a/nipype/algorithms/tests/test_modelgen.py +++ b/nipype/algorithms/tests/test_modelgen.py @@ -12,234 +12,234 @@ import numpy.testing as npt from nipype.testing import example_data from nipype.interfaces.base import Bunch, TraitError -from nipype.algorithms.modelgen import (bids_gen_info, SpecifyModel, - SpecifySparseModel, SpecifySPMModel) +from nipype.algorithms.modelgen import ( + bids_gen_info, + SpecifyModel, + SpecifySparseModel, + SpecifySPMModel, +) def test_bids_gen_info(): - fname = example_data('events.tsv') + fname = example_data("events.tsv") res = bids_gen_info([fname]) - assert res[0].onsets == [[183.75, 313.75, 483.75, 633.75, 783.75, 933.75, 1083.75, 1233.75]] + assert res[0].onsets == [ + [183.75, 313.75, 483.75, 633.75, 783.75, 933.75, 1083.75, 1233.75] + ] assert res[0].durations == [[20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0]] - assert res[0].amplitudes ==[[1, 1, 1, 1, 1, 1, 1, 1]] - assert res[0].conditions == ['ev0'] + assert res[0].amplitudes == [[1, 1, 1, 1, 1, 1, 1, 1]] + assert res[0].conditions == ["ev0"] def test_modelgen1(tmpdir): - filename1 = tmpdir.join('test1.nii').strpath - filename2 = tmpdir.join('test2.nii').strpath - Nifti1Image(np.random.rand(10, 10, 10, 200), - np.eye(4)).to_filename(filename1) - Nifti1Image(np.random.rand(10, 10, 10, 200), - np.eye(4)).to_filename(filename2) + filename1 = tmpdir.join("test1.nii").strpath + filename2 = tmpdir.join("test2.nii").strpath + Nifti1Image(np.random.rand(10, 10, 10, 200), np.eye(4)).to_filename(filename1) + Nifti1Image(np.random.rand(10, 10, 10, 200), np.eye(4)).to_filename(filename2) s = SpecifyModel() - s.inputs.input_units = 'scans' - set_output_units = lambda: setattr(s.inputs, 'output_units', 'scans') + s.inputs.input_units = "scans" + set_output_units = lambda: setattr(s.inputs, "output_units", "scans") with pytest.raises(TraitError): set_output_units() s.inputs.functional_runs = [filename1, filename2] s.inputs.time_repetition = 6 - s.inputs.high_pass_filter_cutoff = 128. + s.inputs.high_pass_filter_cutoff = 128.0 info = [ Bunch( - conditions=['cond1'], + conditions=["cond1"], onsets=[[2, 50, 100, 180]], durations=[[1]], amplitudes=None, pmod=None, regressors=None, regressor_names=None, - tmod=None), + tmod=None, + ), Bunch( - conditions=['cond1'], + conditions=["cond1"], onsets=[[30, 40, 100, 150]], durations=[[1]], amplitudes=None, pmod=None, regressors=None, regressor_names=None, - tmod=None) + tmod=None, + ), ] s.inputs.subject_info = info res = s.run() assert len(res.outputs.session_info) == 2 - assert len(res.outputs.session_info[0]['regress']) == 0 - assert len(res.outputs.session_info[0]['cond']) == 1 + assert len(res.outputs.session_info[0]["regress"]) == 0 + assert len(res.outputs.session_info[0]["cond"]) == 1 npt.assert_almost_equal( - np.array(res.outputs.session_info[0]['cond'][0]['onset']), - np.array([12, 300, 600, 1080])) + np.array(res.outputs.session_info[0]["cond"][0]["onset"]), + np.array([12, 300, 600, 1080]), + ) info = [ - Bunch(conditions=['cond1'], onsets=[[2]], durations=[[1]]), - Bunch(conditions=['cond1'], onsets=[[3]], durations=[[1]]) + Bunch(conditions=["cond1"], onsets=[[2]], durations=[[1]]), + Bunch(conditions=["cond1"], onsets=[[3]], durations=[[1]]), ] s.inputs.subject_info = deepcopy(info) res = s.run() npt.assert_almost_equal( - np.array(res.outputs.session_info[0]['cond'][0]['duration']), - np.array([6.])) + np.array(res.outputs.session_info[0]["cond"][0]["duration"]), np.array([6.0]) + ) npt.assert_almost_equal( - np.array(res.outputs.session_info[1]['cond'][0]['duration']), - np.array([6.])) + np.array(res.outputs.session_info[1]["cond"][0]["duration"]), np.array([6.0]) + ) info = [ Bunch( - conditions=['cond1', 'cond2'], - onsets=[[2, 3], [2]], - durations=[[1, 1], [1]]), + conditions=["cond1", "cond2"], onsets=[[2, 3], [2]], durations=[[1, 1], [1]] + ), Bunch( - conditions=['cond1', 'cond2'], + conditions=["cond1", "cond2"], onsets=[[2, 3], [2, 4]], - durations=[[1, 1], [1, 1]]) + durations=[[1, 1], [1, 1]], + ), ] s.inputs.subject_info = deepcopy(info) - s.inputs.input_units = 'scans' + s.inputs.input_units = "scans" res = s.run() npt.assert_almost_equal( - np.array(res.outputs.session_info[0]['cond'][0]['duration']), - np.array([6., 6.])) + np.array(res.outputs.session_info[0]["cond"][0]["duration"]), + np.array([6.0, 6.0]), + ) npt.assert_almost_equal( - np.array(res.outputs.session_info[0]['cond'][1]['duration']), - np.array([ - 6., - ])) + np.array(res.outputs.session_info[0]["cond"][1]["duration"]), np.array([6.0,]) + ) npt.assert_almost_equal( - np.array(res.outputs.session_info[1]['cond'][1]['duration']), - np.array([6., 6.])) + np.array(res.outputs.session_info[1]["cond"][1]["duration"]), + np.array([6.0, 6.0]), + ) def test_modelgen_spm_concat(tmpdir): - filename1 = tmpdir.join('test1.nii').strpath - filename2 = tmpdir.join('test2.nii').strpath - Nifti1Image(np.random.rand(10, 10, 10, 30), - np.eye(4)).to_filename(filename1) - Nifti1Image(np.random.rand(10, 10, 10, 30), - np.eye(4)).to_filename(filename2) + filename1 = tmpdir.join("test1.nii").strpath + filename2 = tmpdir.join("test2.nii").strpath + Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename1) + Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename2) # Test case when only one duration is passed, as being the same for all onsets. s = SpecifySPMModel() - s.inputs.input_units = 'secs' + s.inputs.input_units = "secs" s.inputs.concatenate_runs = True - setattr(s.inputs, 'output_units', 'secs') - assert s.inputs.output_units == 'secs' + setattr(s.inputs, "output_units", "secs") + assert s.inputs.output_units == "secs" s.inputs.functional_runs = [filename1, filename2] s.inputs.time_repetition = 6 - s.inputs.high_pass_filter_cutoff = 128. + s.inputs.high_pass_filter_cutoff = 128.0 info = [ - Bunch( - conditions=['cond1'], onsets=[[2, 50, 100, 170]], durations=[[1]]), - Bunch( - conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]]) + Bunch(conditions=["cond1"], onsets=[[2, 50, 100, 170]], durations=[[1]]), + Bunch(conditions=["cond1"], onsets=[[30, 40, 100, 150]], durations=[[1]]), ] s.inputs.subject_info = deepcopy(info) res = s.run() assert len(res.outputs.session_info) == 1 - assert len(res.outputs.session_info[0]['regress']) == 1 - assert np.sum(res.outputs.session_info[0]['regress'][0]['val']) == 30 - assert len(res.outputs.session_info[0]['cond']) == 1 + assert len(res.outputs.session_info[0]["regress"]) == 1 + assert np.sum(res.outputs.session_info[0]["regress"][0]["val"]) == 30 + assert len(res.outputs.session_info[0]["cond"]) == 1 npt.assert_almost_equal( - np.array(res.outputs.session_info[0]['cond'][0]['onset']), - np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0])) + np.array(res.outputs.session_info[0]["cond"][0]["onset"]), + np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0]), + ) npt.assert_almost_equal( - np.array(res.outputs.session_info[0]['cond'][0]['duration']), - np.array([1., 1., 1., 1., 1., 1., 1., 1.])) + np.array(res.outputs.session_info[0]["cond"][0]["duration"]), + np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]), + ) # Test case of scans as output units instead of seconds - setattr(s.inputs, 'output_units', 'scans') - assert s.inputs.output_units == 'scans' + setattr(s.inputs, "output_units", "scans") + assert s.inputs.output_units == "scans" s.inputs.subject_info = deepcopy(info) res = s.run() npt.assert_almost_equal( - np.array(res.outputs.session_info[0]['cond'][0]['onset']), - np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0]) / 6) + np.array(res.outputs.session_info[0]["cond"][0]["onset"]), + np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0]) / 6, + ) # Test case for no concatenation with seconds as output units s.inputs.concatenate_runs = False s.inputs.subject_info = deepcopy(info) - s.inputs.output_units = 'secs' + s.inputs.output_units = "secs" res = s.run() npt.assert_almost_equal( - np.array(res.outputs.session_info[0]['cond'][0]['onset']), - np.array([2.0, 50.0, 100.0, 170.0])) + np.array(res.outputs.session_info[0]["cond"][0]["onset"]), + np.array([2.0, 50.0, 100.0, 170.0]), + ) # Test case for variable number of events in separate runs, sometimes unique. - filename3 = tmpdir.join('test3.nii').strpath - Nifti1Image(np.random.rand(10, 10, 10, 30), - np.eye(4)).to_filename(filename3) + filename3 = tmpdir.join("test3.nii").strpath + Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename3) s.inputs.functional_runs = [filename1, filename2, filename3] info = [ Bunch( - conditions=['cond1', 'cond2'], - onsets=[[2, 3], [2]], - durations=[[1, 1], [1]]), + conditions=["cond1", "cond2"], onsets=[[2, 3], [2]], durations=[[1, 1], [1]] + ), Bunch( - conditions=['cond1', 'cond2'], + conditions=["cond1", "cond2"], onsets=[[2, 3], [2, 4]], - durations=[[1, 1], [1, 1]]), + durations=[[1, 1], [1, 1]], + ), Bunch( - conditions=['cond1', 'cond2'], - onsets=[[2, 3], [2]], - durations=[[1, 1], [1]]) + conditions=["cond1", "cond2"], onsets=[[2, 3], [2]], durations=[[1, 1], [1]] + ), ] s.inputs.subject_info = deepcopy(info) res = s.run() npt.assert_almost_equal( - np.array(res.outputs.session_info[0]['cond'][0]['duration']), - np.array([1., 1.])) + np.array(res.outputs.session_info[0]["cond"][0]["duration"]), + np.array([1.0, 1.0]), + ) npt.assert_almost_equal( - np.array(res.outputs.session_info[0]['cond'][1]['duration']), - np.array([ - 1., - ])) + np.array(res.outputs.session_info[0]["cond"][1]["duration"]), np.array([1.0,]) + ) npt.assert_almost_equal( - np.array(res.outputs.session_info[1]['cond'][1]['duration']), - np.array([1., 1.])) + np.array(res.outputs.session_info[1]["cond"][1]["duration"]), + np.array([1.0, 1.0]), + ) npt.assert_almost_equal( - np.array(res.outputs.session_info[2]['cond'][1]['duration']), - np.array([ - 1., - ])) + np.array(res.outputs.session_info[2]["cond"][1]["duration"]), np.array([1.0,]) + ) # Test case for variable number of events in concatenated runs, sometimes unique. s.inputs.concatenate_runs = True info = [ Bunch( - conditions=['cond1', 'cond2'], - onsets=[[2, 3], [2]], - durations=[[1, 1], [1]]), + conditions=["cond1", "cond2"], onsets=[[2, 3], [2]], durations=[[1, 1], [1]] + ), Bunch( - conditions=['cond1', 'cond2'], + conditions=["cond1", "cond2"], onsets=[[2, 3], [2, 4]], - durations=[[1, 1], [1, 1]]), + durations=[[1, 1], [1, 1]], + ), Bunch( - conditions=['cond1', 'cond2'], - onsets=[[2, 3], [2]], - durations=[[1, 1], [1]]) + conditions=["cond1", "cond2"], onsets=[[2, 3], [2]], durations=[[1, 1], [1]] + ), ] s.inputs.subject_info = deepcopy(info) res = s.run() npt.assert_almost_equal( - np.array(res.outputs.session_info[0]['cond'][0]['duration']), - np.array([1., 1., 1., 1., 1., 1.])) + np.array(res.outputs.session_info[0]["cond"][0]["duration"]), + np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0]), + ) npt.assert_almost_equal( - np.array(res.outputs.session_info[0]['cond'][1]['duration']), - np.array([1., 1., 1., 1.])) + np.array(res.outputs.session_info[0]["cond"][1]["duration"]), + np.array([1.0, 1.0, 1.0, 1.0]), + ) def test_modelgen_sparse(tmpdir): - filename1 = tmpdir.join('test1.nii').strpath - filename2 = tmpdir.join('test2.nii').strpath - Nifti1Image(np.random.rand(10, 10, 10, 50), - np.eye(4)).to_filename(filename1) - Nifti1Image(np.random.rand(10, 10, 10, 50), - np.eye(4)).to_filename(filename2) + filename1 = tmpdir.join("test1.nii").strpath + filename2 = tmpdir.join("test2.nii").strpath + Nifti1Image(np.random.rand(10, 10, 10, 50), np.eye(4)).to_filename(filename1) + Nifti1Image(np.random.rand(10, 10, 10, 50), np.eye(4)).to_filename(filename2) s = SpecifySparseModel() - s.inputs.input_units = 'secs' + s.inputs.input_units = "secs" s.inputs.functional_runs = [filename1, filename2] s.inputs.time_repetition = 6 info = [ - Bunch( - conditions=['cond1'], onsets=[[0, 50, 100, 180]], durations=[[2]]), - Bunch( - conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]]) + Bunch(conditions=["cond1"], onsets=[[0, 50, 100, 180]], durations=[[2]]), + Bunch(conditions=["cond1"], onsets=[[30, 40, 100, 150]], durations=[[1]]), ] s.inputs.subject_info = info s.inputs.volumes_in_cluster = 1 @@ -247,26 +247,26 @@ def test_modelgen_sparse(tmpdir): s.inputs.high_pass_filter_cutoff = np.inf res = s.run() assert len(res.outputs.session_info) == 2 - assert len(res.outputs.session_info[0]['regress']) == 1 - assert len(res.outputs.session_info[0]['cond']) == 0 + assert len(res.outputs.session_info[0]["regress"]) == 1 + assert len(res.outputs.session_info[0]["cond"]) == 0 s.inputs.stimuli_as_impulses = False res = s.run() - assert res.outputs.session_info[0]['regress'][0]['val'][0] == 1.0 + assert res.outputs.session_info[0]["regress"][0]["val"][0] == 1.0 s.inputs.model_hrf = True res = s.run() npt.assert_almost_equal( - res.outputs.session_info[0]['regress'][0]['val'][0], - 0.016675298129743384) - assert len(res.outputs.session_info[0]['regress']) == 1 + res.outputs.session_info[0]["regress"][0]["val"][0], 0.016675298129743384 + ) + assert len(res.outputs.session_info[0]["regress"]) == 1 s.inputs.use_temporal_deriv = True res = s.run() - assert len(res.outputs.session_info[0]['regress']) == 2 + assert len(res.outputs.session_info[0]["regress"]) == 2 npt.assert_almost_equal( - res.outputs.session_info[0]['regress'][0]['val'][0], - 0.016675298129743384) + res.outputs.session_info[0]["regress"][0]["val"][0], 0.016675298129743384 + ) npt.assert_almost_equal( - res.outputs.session_info[1]['regress'][1]['val'][5], - 0.007671459162258378) + res.outputs.session_info[1]["regress"][1]["val"][5], 0.007671459162258378 + ) diff --git a/nipype/algorithms/tests/test_moments.py b/nipype/algorithms/tests/test_moments.py index fa174a79e4..91e6313193 100644 --- a/nipype/algorithms/tests/test_moments.py +++ b/nipype/algorithms/tests/test_moments.py @@ -130,8 +130,16 @@ def test_skew(tmpdir): skewness = calc_moments(f.strpath, 3) assert np.allclose( skewness, - np.array([ - -0.23418937314622, 0.2946365564954823, -0.05781002053540932, - -0.3512508282578762, -0.07035664150233077, -0.01935867699166935, - 0.00483863369427428, 0.21879460029850167 - ])) + np.array( + [ + -0.23418937314622, + 0.2946365564954823, + -0.05781002053540932, + -0.3512508282578762, + -0.07035664150233077, + -0.01935867699166935, + 0.00483863369427428, + 0.21879460029850167, + ] + ), + ) diff --git a/nipype/algorithms/tests/test_normalize_tpms.py b/nipype/algorithms/tests/test_normalize_tpms.py index 31eb2b96dd..7ff482f23f 100644 --- a/nipype/algorithms/tests/test_normalize_tpms.py +++ b/nipype/algorithms/tests/test_normalize_tpms.py @@ -18,7 +18,7 @@ def test_normalize_tpms(tmpdir): - in_mask = example_data('tpms_msk.nii.gz') + in_mask = example_data("tpms_msk.nii.gz") mskdata = nb.load(in_mask, mmap=NUMPY_MMAP).get_data() mskdata[mskdata > 0.0] = 1.0 @@ -27,16 +27,17 @@ def test_normalize_tpms(tmpdir): out_files = [] for i in range(3): - mapname = example_data('tpm_%02d.nii.gz' % i) - filename = tmpdir.join('modtpm_%02d.nii.gz' % i).strpath - out_files.append(tmpdir.join('normtpm_%02d.nii.gz' % i).strpath) + mapname = example_data("tpm_%02d.nii.gz" % i) + filename = tmpdir.join("modtpm_%02d.nii.gz" % i).strpath + out_files.append(tmpdir.join("normtpm_%02d.nii.gz" % i).strpath) im = nb.load(mapname, mmap=NUMPY_MMAP) data = im.get_data() mapdata.append(data.copy()) - nb.Nifti1Image(2.0 * (data * mskdata), im.affine, - im.header).to_filename(filename) + nb.Nifti1Image(2.0 * (data * mskdata), im.affine, im.header).to_filename( + filename + ) in_files.append(filename) normalize_tpms(in_files, in_mask, out_files=out_files) diff --git a/nipype/algorithms/tests/test_rapidart.py b/nipype/algorithms/tests/test_rapidart.py index fbdce11819..fdf0716805 100644 --- a/nipype/algorithms/tests/test_rapidart.py +++ b/nipype/algorithms/tests/test_rapidart.py @@ -16,58 +16,79 @@ def test_ad_init(): def test_ad_output_filenames(): ad = ra.ArtifactDetect() - outputdir = '/tmp' - f = 'motion.nii' - (outlierfile, intensityfile, statsfile, normfile, plotfile, - displacementfile, maskfile) = ad._get_output_filenames(f, outputdir) - assert outlierfile == '/tmp/art.motion_outliers.txt' - assert intensityfile == '/tmp/global_intensity.motion.txt' - assert statsfile == '/tmp/stats.motion.txt' - assert normfile == '/tmp/norm.motion.txt' - assert plotfile == '/tmp/plot.motion.png' - assert displacementfile == '/tmp/disp.motion.nii' - assert maskfile == '/tmp/mask.motion.nii' + outputdir = "/tmp" + f = "motion.nii" + ( + outlierfile, + intensityfile, + statsfile, + normfile, + plotfile, + displacementfile, + maskfile, + ) = ad._get_output_filenames(f, outputdir) + assert outlierfile == "/tmp/art.motion_outliers.txt" + assert intensityfile == "/tmp/global_intensity.motion.txt" + assert statsfile == "/tmp/stats.motion.txt" + assert normfile == "/tmp/norm.motion.txt" + assert plotfile == "/tmp/plot.motion.png" + assert displacementfile == "/tmp/disp.motion.nii" + assert maskfile == "/tmp/mask.motion.nii" def test_ad_get_affine_matrix(): - matrix = ra._get_affine_matrix(np.array([0]), 'SPM') + matrix = ra._get_affine_matrix(np.array([0]), "SPM") npt.assert_equal(matrix, np.eye(4)) # test translation params = [1, 2, 3] - matrix = ra._get_affine_matrix(params, 'SPM') + matrix = ra._get_affine_matrix(params, "SPM") out = np.eye(4) out[0:3, 3] = params npt.assert_equal(matrix, out) # test rotation params = np.array([0, 0, 0, np.pi / 2, np.pi / 2, np.pi / 2]) - matrix = ra._get_affine_matrix(params, 'SPM') - out = np.array([0, 0, 1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1]).reshape( - (4, 4)) + matrix = ra._get_affine_matrix(params, "SPM") + out = np.array([0, 0, 1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1]).reshape((4, 4)) npt.assert_almost_equal(matrix, out) # test scaling params = np.array([0, 0, 0, 0, 0, 0, 1, 2, 3]) - matrix = ra._get_affine_matrix(params, 'SPM') - out = np.array([1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 1]).reshape( - (4, 4)) + matrix = ra._get_affine_matrix(params, "SPM") + out = np.array([1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 1]).reshape((4, 4)) npt.assert_equal(matrix, out) # test shear params = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 3]) - matrix = ra._get_affine_matrix(params, 'SPM') - out = np.array([1, 1, 2, 0, 0, 1, 3, 0, 0, 0, 1, 0, 0, 0, 0, 1]).reshape( - (4, 4)) + matrix = ra._get_affine_matrix(params, "SPM") + out = np.array([1, 1, 2, 0, 0, 1, 3, 0, 0, 0, 1, 0, 0, 0, 0, 1]).reshape((4, 4)) npt.assert_equal(matrix, out) def test_ad_get_norm(): - params = np.array([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, np.pi / 4, np.pi / 4, np.pi / 4, 0, 0, 0, - -np.pi / 4, -np.pi / 4, -np.pi / 4 - ]).reshape((3, 6)) - norm, _ = ra._calc_norm(params, False, 'SPM') - npt.assert_almost_equal(norm, - np.array([18.86436316, 37.74610158, 31.29780829])) - norm, _ = ra._calc_norm(params, True, 'SPM') - npt.assert_almost_equal(norm, np.array([0., 143.72192614, 173.92527131])) + params = np.array( + [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + np.pi / 4, + np.pi / 4, + np.pi / 4, + 0, + 0, + 0, + -np.pi / 4, + -np.pi / 4, + -np.pi / 4, + ] + ).reshape((3, 6)) + norm, _ = ra._calc_norm(params, False, "SPM") + npt.assert_almost_equal(norm, np.array([18.86436316, 37.74610158, 31.29780829])) + norm, _ = ra._calc_norm(params, True, "SPM") + npt.assert_almost_equal(norm, np.array([0.0, 143.72192614, 173.92527131])) def test_sc_init(): @@ -81,13 +102,14 @@ def test_sc_populate_inputs(): realignment_parameters=None, intensity_values=None, spm_mat_file=None, - concatenated_design=None) + concatenated_design=None, + ) assert set(sc.inputs.__dict__.keys()) == set(inputs.__dict__.keys()) def test_sc_output_filenames(): sc = ra.StimulusCorrelation() - outputdir = '/tmp' - f = 'motion.nii' + outputdir = "/tmp" + f = "motion.nii" corrfile = sc._get_output_filenames(f, outputdir) - assert corrfile == '/tmp/qa.motion_stimcorr.txt' + assert corrfile == "/tmp/qa.motion_stimcorr.txt" diff --git a/nipype/algorithms/tests/test_splitmerge.py b/nipype/algorithms/tests/test_splitmerge.py index f05d291028..96e60c6cbb 100644 --- a/nipype/algorithms/tests/test_splitmerge.py +++ b/nipype/algorithms/tests/test_splitmerge.py @@ -13,8 +13,8 @@ def test_split_and_merge(tmpdir): from nipype.algorithms.misc import split_rois, merge_rois - in_mask = example_data('tpms_msk.nii.gz') - dwfile = tmpdir.join('dwi.nii.gz').strpath + in_mask = example_data("tpms_msk.nii.gz") + dwfile = tmpdir.join("dwi.nii.gz").strpath mskdata = nb.load(in_mask, mmap=NUMPY_MMAP).get_data() aff = nb.load(in_mask, mmap=NUMPY_MMAP).affine diff --git a/nipype/algorithms/tests/test_stats.py b/nipype/algorithms/tests/test_stats.py index 9a4c7525b5..29305e7a7a 100644 --- a/nipype/algorithms/tests/test_stats.py +++ b/nipype/algorithms/tests/test_stats.py @@ -10,10 +10,9 @@ def test_ActivationCount(tmpdir): tmpdir.chdir() - in_files = ['{:d}.nii'.format(i) for i in range(3)] + in_files = ["{:d}.nii".format(i) for i in range(3)] for fname in in_files: - nb.Nifti1Image(np.random.normal(size=(5, 5, 5)), - np.eye(4)).to_filename(fname) + nb.Nifti1Image(np.random.normal(size=(5, 5, 5)), np.eye(4)).to_filename(fname) acm = ActivationCount(in_files=in_files, threshold=1.65) res = acm.run() @@ -23,23 +22,29 @@ def test_ActivationCount(tmpdir): assert np.allclose(diff.get_data(), pos.get_data() - neg.get_data()) -@pytest.mark.parametrize("threshold, above_thresh", [ - (1, 15.865), # above one standard deviation (one side) - (2, 2.275), # above two standard deviations (one side) - (3, 0.135) # above three standard deviations (one side) -]) +@pytest.mark.parametrize( + "threshold, above_thresh", + [ + (1, 15.865), # above one standard deviation (one side) + (2, 2.275), # above two standard deviations (one side) + (3, 0.135), # above three standard deviations (one side) + ], +) def test_ActivationCount_normaldistr(tmpdir, threshold, above_thresh): tmpdir.chdir() - in_files = ['{:d}.nii'.format(i) for i in range(3)] + in_files = ["{:d}.nii".format(i) for i in range(3)] for fname in in_files: - nb.Nifti1Image(np.random.normal(size=(100, 100, 100)), - np.eye(4)).to_filename(fname) + nb.Nifti1Image(np.random.normal(size=(100, 100, 100)), np.eye(4)).to_filename( + fname + ) acm = ActivationCount(in_files=in_files, threshold=threshold) res = acm.run() pos = nb.load(res.outputs.acm_pos) neg = nb.load(res.outputs.acm_neg) - assert np.isclose(pos.get_data().mean(), - above_thresh * 1.e-2, rtol=0.1, atol=1.e-4) - assert np.isclose(neg.get_data().mean(), - above_thresh * 1.e-2, rtol=0.1, atol=1.e-4) + assert np.isclose( + pos.get_data().mean(), above_thresh * 1.0e-2, rtol=0.1, atol=1.0e-4 + ) + assert np.isclose( + neg.get_data().mean(), above_thresh * 1.0e-2, rtol=0.1, atol=1.0e-4 + ) diff --git a/nipype/caching/memory.py b/nipype/caching/memory.py index 53bf92cce7..4f773f0c3c 100644 --- a/nipype/caching/memory.py +++ b/nipype/caching/memory.py @@ -42,18 +42,18 @@ def __init__(self, interface, base_dir, callback=None): An optional callable called each time after the function is called. """ - if not (isinstance(interface, type) - and issubclass(interface, BaseInterface)): - raise ValueError('the interface argument should be a nipype ' - 'interface class, but %s (type %s) was passed.' % - (interface, type(interface))) + if not (isinstance(interface, type) and issubclass(interface, BaseInterface)): + raise ValueError( + "the interface argument should be a nipype " + "interface class, but %s (type %s) was passed." + % (interface, type(interface)) + ) self.interface = interface base_dir = os.path.abspath(base_dir) if not os.path.exists(base_dir) and os.path.isdir(base_dir): - raise ValueError('base_dir should be an existing directory') + raise ValueError("base_dir should be an existing directory") self.base_dir = base_dir - doc = '%s\n%s' % (self.interface.__doc__, - self.interface.help(returnhelp=True)) + doc = "%s\n%s" % (self.interface.__doc__, self.interface.help(returnhelp=True)) self.__doc__ = doc self.callback = callback @@ -64,10 +64,12 @@ def __call__(self, **kwargs): interface.inputs.trait_set(**kwargs) # Make a name for our node inputs = interface.inputs.get_hashval() - hasher = hashlib.new('md5') + hasher = hashlib.new("md5") hasher.update(pickle.dumps(inputs)) - dir_name = '%s-%s' % (interface.__class__.__module__.replace('.', '-'), - interface.__class__.__name__) + dir_name = "%s-%s" % ( + interface.__class__.__module__.replace(".", "-"), + interface.__class__.__name__, + ) job_name = hasher.hexdigest() node = Node(interface, name=job_name) node.base_dir = os.path.join(self.base_dir, dir_name) @@ -84,9 +86,12 @@ def __call__(self, **kwargs): return out def __repr__(self): - return '{}({}.{}), base_dir={})'.format( - self.__class__.__name__, self.interface.__module__, - self.interface.__name__, self.base_dir) + return "{}({}.{}), base_dir={})".format( + self.__class__.__name__, + self.interface.__module__, + self.interface.__name__, + self.base_dir, + ) ############################################################################### @@ -98,9 +103,9 @@ def read_log(filename, run_dict=None): if run_dict is None: run_dict = dict() - with open(filename, 'r') as logfile: + with open(filename, "r") as logfile: for line in logfile: - dir_name, job_name = line[:-1].split('/') + dir_name, job_name = line[:-1].split("/") jobs = run_dict.get(dir_name, set()) jobs.add(job_name) run_dict[dir_name] = jobs @@ -122,13 +127,13 @@ def rm_all_but(base_dir, dirs_to_keep, warn=False): except OSError: "Dir has been deleted" return - all_dirs = [d for d in all_dirs if not d.startswith('log.')] + all_dirs = [d for d in all_dirs if not d.startswith("log.")] dirs_to_rm = list(dirs_to_keep.symmetric_difference(all_dirs)) for dir_name in dirs_to_rm: dir_name = os.path.join(base_dir, dir_name) if os.path.exists(dir_name): if warn: - print('removing directory: %s' % dir_name) + print("removing directory: %s" % dir_name) shutil.rmtree(dir_name) @@ -163,13 +168,13 @@ class Memory(object): """ def __init__(self, base_dir): - base_dir = os.path.join(os.path.abspath(base_dir), 'nipype_mem') + base_dir = os.path.join(os.path.abspath(base_dir), "nipype_mem") if not os.path.exists(base_dir): os.mkdir(base_dir) elif not os.path.isdir(base_dir): - raise ValueError('base_dir should be a directory') + raise ValueError("base_dir should be a directory") self.base_dir = base_dir - open(os.path.join(base_dir, 'log.current'), 'a').close() + open(os.path.join(base_dir, "log.current"), "a").close() def cache(self, interface): """ Returns a callable that caches the output of an interface @@ -219,24 +224,23 @@ def _log_name(self, dir_name, job_name): # Every counter is a file opened in append mode and closed # immediately to avoid race conditions in parallel computing: # file appends are atomic - with open(os.path.join(base_dir, 'log.current'), 'a') as currentlog: - currentlog.write('%s/%s\n' % (dir_name, job_name)) + with open(os.path.join(base_dir, "log.current"), "a") as currentlog: + currentlog.write("%s/%s\n" % (dir_name, job_name)) t = time.localtime() - year_dir = os.path.join(base_dir, 'log.%i' % t.tm_year) + year_dir = os.path.join(base_dir, "log.%i" % t.tm_year) try: os.mkdir(year_dir) except OSError: "Dir exists" - month_dir = os.path.join(year_dir, '%02i' % t.tm_mon) + month_dir = os.path.join(year_dir, "%02i" % t.tm_mon) try: os.mkdir(month_dir) except OSError: "Dir exists" - with open(os.path.join(month_dir, '%02i.log' % t.tm_mday), - 'a') as rotatefile: - rotatefile.write('%s/%s\n' % (dir_name, job_name)) + with open(os.path.join(month_dir, "%02i.log" % t.tm_mday), "a") as rotatefile: + rotatefile.write("%s/%s\n" % (dir_name, job_name)) def clear_previous_runs(self, warn=True): """ Remove all the cache that where not used in the latest run of @@ -250,7 +254,7 @@ def clear_previous_runs(self, warn=True): removed """ base_dir = self.base_dir - latest_runs = read_log(os.path.join(base_dir, 'log.current')) + latest_runs = read_log(os.path.join(base_dir, "log.current")) self._clear_all_but(latest_runs, warn=warn) def clear_runs_since(self, day=None, month=None, year=None, warn=True): @@ -271,10 +275,10 @@ def clear_runs_since(self, day=None, month=None, year=None, warn=True): month = month if month is not None else t.tm_mon year = year if year is not None else t.tm_year base_dir = self.base_dir - cut_off_file = '%s/log.%i/%02i/%02i.log' % (base_dir, year, month, day) + cut_off_file = "%s/log.%i/%02i/%02i.log" % (base_dir, year, month, day) logs_to_flush = list() recent_runs = dict() - for log_name in glob.glob('%s/log.*/*/*.log' % base_dir): + for log_name in glob.glob("%s/log.*/*/*.log" % base_dir): if log_name < cut_off_file: logs_to_flush.append(log_name) else: @@ -289,8 +293,7 @@ def _clear_all_but(self, runs, warn=True): """ rm_all_but(self.base_dir, set(runs.keys()), warn=warn) for dir_name, job_names in list(runs.items()): - rm_all_but( - os.path.join(self.base_dir, dir_name), job_names, warn=warn) + rm_all_but(os.path.join(self.base_dir, dir_name), job_names, warn=warn) def __repr__(self): - return '{}(base_dir={})'.format(self.__class__.__name__, self.base_dir) + return "{}(base_dir={})".format(self.__class__.__name__, self.base_dir) diff --git a/nipype/caching/tests/test_memory.py b/nipype/caching/tests/test_memory.py index 642fee363d..ef80869f03 100644 --- a/nipype/caching/tests/test_memory.py +++ b/nipype/caching/tests/test_memory.py @@ -6,6 +6,7 @@ from ...pipeline.engine.tests.test_engine import EngineTestInterface from ... import config + config.set_default_config() nb_runs = 0 @@ -19,10 +20,10 @@ def _run_interface(self, runtime): def test_caching(tmpdir): - old_rerun = config.get('execution', 'stop_on_first_rerun') + old_rerun = config.get("execution", "stop_on_first_rerun") try: # Prevent rerun to check that evaluation is computed only once - config.set('execution', 'stop_on_first_rerun', 'true') + config.set("execution", "stop_on_first_rerun", "true") mem = Memory(tmpdir.strpath) first_nb_run = nb_runs results = mem.cache(SideEffectInterface)(input1=2, input2=1) @@ -37,4 +38,4 @@ def test_caching(tmpdir): assert nb_runs == first_nb_run + 2 assert results.outputs.output1 == [1, 1] finally: - config.set('execution', 'stop_on_first_rerun', old_rerun) + config.set("execution", "stop_on_first_rerun", old_rerun) diff --git a/nipype/conftest.py b/nipype/conftest.py index 9a9175ce28..b099fd0078 100644 --- a/nipype/conftest.py +++ b/nipype/conftest.py @@ -6,16 +6,17 @@ import py.path as pp NIPYPE_DATADIR = os.path.realpath( - os.path.join(os.path.dirname(__file__), 'testing/data')) + os.path.join(os.path.dirname(__file__), "testing/data") +) temp_folder = mkdtemp() -data_dir = os.path.join(temp_folder, 'data') +data_dir = os.path.join(temp_folder, "data") shutil.copytree(NIPYPE_DATADIR, data_dir) @pytest.fixture(autouse=True) def add_np(doctest_namespace): - doctest_namespace['np'] = numpy - doctest_namespace['os'] = os + doctest_namespace["np"] = numpy + doctest_namespace["os"] = os doctest_namespace["datadir"] = data_dir diff --git a/nipype/external/cloghandler.py b/nipype/external/cloghandler.py index 05e28968dd..c5ee9d7a6f 100644 --- a/nipype/external/cloghandler.py +++ b/nipype/external/cloghandler.py @@ -74,14 +74,16 @@ class ConcurrentRotatingFileHandler(BaseRotatingHandler): exceed the given size. """ - def __init__(self, - filename, - mode='a', - maxBytes=0, - backupCount=0, - encoding=None, - debug=True, - supress_abs_warn=False): + def __init__( + self, + filename, + mode="a", + maxBytes=0, + backupCount=0, + encoding=None, + debug=True, + supress_abs_warn=False, + ): """ Open the specified file and use it as the stream for logging. @@ -139,15 +141,16 @@ def __init__(self, # if the given filename contains no path, we make an absolute path if not os.path.isabs(filename): - if FORCE_ABSOLUTE_PATH or \ - not os.path.split(filename)[0]: + if FORCE_ABSOLUTE_PATH or not os.path.split(filename)[0]: filename = os.path.abspath(filename) elif not supress_abs_warn: from warnings import warn + warn( "The given 'filename' should be an absolute path. If your " "application calls os.chdir(), your logs may get messed up. " - "Use 'supress_abs_warn=True' to hide this message.") + "Use 'supress_abs_warn=True' to hide this message." + ) try: BaseRotatingHandler.__init__(self, filename, mode, encoding) except TypeError: # Due to a different logging release without encoding support (Python 2.4.1 and earlier?) @@ -158,7 +161,7 @@ def __init__(self, self.maxBytes = maxBytes self.backupCount = backupCount # Prevent multiple extensions on the lock file (Only handles the normal "*.log" case.) - self.lock_file = '%s.lock' % filename + self.lock_file = "%s.lock" % filename self.stream_lock = SoftFileLock(self.lock_file) # For debug mode, swap out the "_degrade()" method with a more a verbose one. @@ -231,13 +234,17 @@ def _degrade_debug(self, degrade, msg, *args): """ if degrade: if not self._rotateFailed: - sys.stderr.write("Degrade mode - ENTERING - (pid=%d) %s\n" % - (os.getpid(), msg % args)) + sys.stderr.write( + "Degrade mode - ENTERING - (pid=%d) %s\n" + % (os.getpid(), msg % args) + ) self._rotateFailed = True else: if self._rotateFailed: - sys.stderr.write("Degrade mode - EXITING - (pid=%d) %s\n" % - (os.getpid(), msg % args)) + sys.stderr.write( + "Degrade mode - EXITING - (pid=%d) %s\n" + % (os.getpid(), msg % args) + ) self._rotateFailed = False def doRollover(self): @@ -255,15 +262,15 @@ def doRollover(self): # Attempt to rename logfile to tempname: There is a slight race-condition here, but it seems unavoidable tmpname = None while not tmpname or os.path.exists(tmpname): - tmpname = "%s.rotate.%08d" % (self.baseFilename, - randint(0, 99999999)) + tmpname = "%s.rotate.%08d" % (self.baseFilename, randint(0, 99999999)) try: # Do a rename test to determine if we can successfully rename the log file os.rename(self.baseFilename, tmpname) except (IOError, OSError): exc_value = sys.exc_info()[1] - self._degrade(True, "rename failed. File in use? " - "exception=%s", exc_value) + self._degrade( + True, "rename failed. File in use? " "exception=%s", exc_value + ) return # Q: Is there some way to protect this code from a KeboardInterupt? @@ -311,19 +318,18 @@ def shouldRollover(self, record): def _shouldRollover(self): if self.maxBytes > 0: # are we rolling over? try: - self.stream.seek( - 0, 2) # due to non-posix-compliant Windows feature + self.stream.seek(0, 2) # due to non-posix-compliant Windows feature except IOError: return True if self.stream.tell() >= self.maxBytes: return True else: - self._degrade(False, - "Rotation done or not needed at this time") + self._degrade(False, "Rotation done or not needed at this time") return False # Publish this class to the "logging.handlers" module so that it can be use # from a logging config file via logging.config.fileConfig(). import logging.handlers + logging.handlers.ConcurrentRotatingFileHandler = ConcurrentRotatingFileHandler diff --git a/nipype/external/due.py b/nipype/external/due.py index c360435bae..fc436d5d45 100644 --- a/nipype/external/due.py +++ b/nipype/external/due.py @@ -24,7 +24,7 @@ License: BSD-2 """ -__version__ = '0.0.5' +__version__ = "0.0.5" class InactiveDueCreditCollector(object): @@ -45,7 +45,7 @@ def nondecorating_decorator(func): cite = load = add = _donothing def __repr__(self): - return '{}()'.format(self.__class__.__name__) + return "{}()".format(self.__class__.__name__) def _donothing_func(*args, **kwargs): @@ -55,9 +55,9 @@ def _donothing_func(*args, **kwargs): try: from duecredit import due, BibTeX, Doi, Url - if 'due' in locals() and not hasattr(due, 'cite'): - raise RuntimeError( - "Imported due lacks .cite. DueCredit is now disabled") + + if "due" in locals() and not hasattr(due, "cite"): + raise RuntimeError("Imported due lacks .cite. DueCredit is now disabled") except ImportError: # Initiate due stub due = InactiveDueCreditCollector() diff --git a/nipype/external/fsl_imglob.py b/nipype/external/fsl_imglob.py index ea4cfe245a..3707e4750d 100755 --- a/nipype/external/fsl_imglob.py +++ b/nipype/external/fsl_imglob.py @@ -79,9 +79,9 @@ def usage(): # basename and extension pair ) def isImage(input, allExtensions): for extension in allExtensions: - if input[-len(extension):] == extension: - return True, input[:-len(extension)], extension - return False, input, '' + if input[-len(extension) :] == extension: + return True, input[: -len(extension)], extension + return False, input, "" def removeImageExtension(input, allExtensions): @@ -95,13 +95,14 @@ def main(): if sys.version_info < (2, 4): import sets from sets import Set + setAvailable = False else: setAvailable = True deleteExtensions = True - primaryExtensions = ['.nii.gz', '.nii', '.hdr.gz', '.hdr'] - secondaryExtensions = ['.img.gz', '.img'] + primaryExtensions = [".nii.gz", ".nii", ".hdr.gz", ".hdr"] + secondaryExtensions = [".img.gz", ".img"] allExtensions = primaryExtensions + secondaryExtensions validExtensions = primaryExtensions startingArg = 1 @@ -125,13 +126,14 @@ def main(): for currentExtension in validExtensions: filelist.extend( glob.glob( - removeImageExtension(sys.argv[arg], allExtensions) + - currentExtension)) + removeImageExtension(sys.argv[arg], allExtensions) + + currentExtension + ) + ) if deleteExtensions: for file in range(0, len(filelist)): - filelist[file] = removeImageExtension(filelist[file], - allExtensions) + filelist[file] = removeImageExtension(filelist[file], allExtensions) if setAvailable: filelist = list(set(filelist)) else: @@ -139,9 +141,9 @@ def main(): filelist.sort() for file in range(0, len(filelist)): - print(filelist[file], end=' ') + print(filelist[file], end=" ") if file < len(filelist) - 1: - print(" ", end=' ') + print(" ", end=" ") if __name__ == "__main__": diff --git a/nipype/info.py b/nipype/info.py index 9858e045dc..c39e328f26 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -5,7 +5,7 @@ # nipype version information # Remove -dev for release -__version__ = '1.4.0-dev' +__version__ = "1.4.0-dev" def get_nipype_gitversion(): @@ -18,46 +18,50 @@ def get_nipype_gitversion(): """ import os import subprocess + try: import nipype + gitpath = os.path.realpath( - os.path.join(os.path.dirname(nipype.__file__), os.path.pardir)) + os.path.join(os.path.dirname(nipype.__file__), os.path.pardir) + ) except: gitpath = os.getcwd() - gitpathgit = os.path.join(gitpath, '.git') + gitpathgit = os.path.join(gitpath, ".git") if not os.path.exists(gitpathgit): return None ver = None try: o, _ = subprocess.Popen( - 'git describe', shell=True, cwd=gitpath, - stdout=subprocess.PIPE).communicate() + "git describe", shell=True, cwd=gitpath, stdout=subprocess.PIPE + ).communicate() except Exception: pass else: - ver = o.decode().strip().split('-')[-1] + ver = o.decode().strip().split("-")[-1] return ver -if __version__.endswith('-dev'): +if __version__.endswith("-dev"): gitversion = get_nipype_gitversion() if gitversion: - __version__ = '{}+{}'.format(__version__, gitversion) + __version__ = "{}+{}".format(__version__, gitversion) CLASSIFIERS = [ - 'Development Status :: 5 - Production/Stable', 'Environment :: Console', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: MacOS :: MacOS X', - 'Operating System :: POSIX :: Linux', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Topic :: Scientific/Engineering' + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Topic :: Scientific/Engineering", ] PYTHON_REQUIRES = ">= 3.5" -description = 'Neuroimaging in Python: Pipelines and Interfaces' +description = "Neuroimaging in Python: Pipelines and Interfaces" # Note: this long_description is actually a copy/paste from the top-level # README.txt, so that it shows up nicely on PyPI. So please remember to edit @@ -94,86 +98,88 @@ def get_nipype_gitversion(): """ # versions -NIBABEL_MIN_VERSION = '2.1.0' -NETWORKX_MIN_VERSION = '1.9' -NETWORKX_MAX_VERSION_27 = '2.2' -NUMPY_MIN_VERSION = '1.12' +NIBABEL_MIN_VERSION = "2.1.0" +NETWORKX_MIN_VERSION = "1.9" +NETWORKX_MAX_VERSION_27 = "2.2" +NUMPY_MIN_VERSION = "1.12" # Numpy bug in python 3.7: # https://www.opensourceanswers.com/blog/you-shouldnt-use-python-37-for-data-science-right-now.html -NUMPY_MIN_VERSION_37 = '1.15.3' -SCIPY_MIN_VERSION = '0.14' +NUMPY_MIN_VERSION_37 = "1.15.3" +SCIPY_MIN_VERSION = "0.14" # Scipy drops 2.7 and 3.4 support in 1.3 -SCIPY_MAX_VERSION_34 = '1.3.0' -TRAITS_MIN_VERSION = '4.6' -DATEUTIL_MIN_VERSION = '2.2' -FUTURE_MIN_VERSION = '0.16.0' -SIMPLEJSON_MIN_VERSION = '3.8.0' -PROV_VERSION = '1.5.2' -CLICK_MIN_VERSION = '6.6.0' -PYDOT_MIN_VERSION = '1.2.3' - -NAME = 'nipype' -MAINTAINER = 'nipype developers' -MAINTAINER_EMAIL = 'neuroimaging@python.org' +SCIPY_MAX_VERSION_34 = "1.3.0" +TRAITS_MIN_VERSION = "4.6" +DATEUTIL_MIN_VERSION = "2.2" +FUTURE_MIN_VERSION = "0.16.0" +SIMPLEJSON_MIN_VERSION = "3.8.0" +PROV_VERSION = "1.5.2" +CLICK_MIN_VERSION = "6.6.0" +PYDOT_MIN_VERSION = "1.2.3" + +NAME = "nipype" +MAINTAINER = "nipype developers" +MAINTAINER_EMAIL = "neuroimaging@python.org" DESCRIPTION = description LONG_DESCRIPTION = long_description -URL = 'http://nipy.org/nipype' -DOWNLOAD_URL = 'http://github.com/nipy/nipype/archives/master' -LICENSE = 'Apache License, 2.0' -AUTHOR = 'nipype developers' -AUTHOR_EMAIL = 'neuroimaging@python.org' -PLATFORMS = 'OS Independent' -MAJOR = __version__.split('.')[0] -MINOR = __version__.split('.')[1] -MICRO = __version__.replace('-', '.').split('.')[2] -ISRELEASE = (len(__version__.replace('-', '.').split('.')) == 3 - or 'post' in __version__.replace('-', '.').split('.')[-1]) +URL = "http://nipy.org/nipype" +DOWNLOAD_URL = "http://github.com/nipy/nipype/archives/master" +LICENSE = "Apache License, 2.0" +AUTHOR = "nipype developers" +AUTHOR_EMAIL = "neuroimaging@python.org" +PLATFORMS = "OS Independent" +MAJOR = __version__.split(".")[0] +MINOR = __version__.split(".")[1] +MICRO = __version__.replace("-", ".").split(".")[2] +ISRELEASE = ( + len(__version__.replace("-", ".").split(".")) == 3 + or "post" in __version__.replace("-", ".").split(".")[-1] +) VERSION = __version__ -PROVIDES = ['nipype'] +PROVIDES = ["nipype"] REQUIRES = [ - 'click>=%s' % CLICK_MIN_VERSION, - 'funcsigs', - 'future>=%s' % FUTURE_MIN_VERSION, - 'networkx>=%s' % NETWORKX_MIN_VERSION, - 'nibabel>=%s' % NIBABEL_MIN_VERSION, + "click>=%s" % CLICK_MIN_VERSION, + "funcsigs", + "future>=%s" % FUTURE_MIN_VERSION, + "networkx>=%s" % NETWORKX_MIN_VERSION, + "nibabel>=%s" % NIBABEL_MIN_VERSION, 'numpy>=%s ; python_version < "3.7"' % NUMPY_MIN_VERSION, 'numpy>=%s ; python_version >= "3.7"' % NUMPY_MIN_VERSION_37, - 'packaging', - 'prov>=%s' % PROV_VERSION, - 'pydot>=%s' % PYDOT_MIN_VERSION, - 'pydotplus', - 'python-dateutil>=%s' % DATEUTIL_MIN_VERSION, - 'scipy>=%s' % SCIPY_MIN_VERSION, - 'simplejson>=%s' % SIMPLEJSON_MIN_VERSION, - 'traits>=%s,!=5.0' % TRAITS_MIN_VERSION, - 'filelock>=3.0.0', - 'etelemetry', + "packaging", + "prov>=%s" % PROV_VERSION, + "pydot>=%s" % PYDOT_MIN_VERSION, + "pydotplus", + "python-dateutil>=%s" % DATEUTIL_MIN_VERSION, + "scipy>=%s" % SCIPY_MIN_VERSION, + "simplejson>=%s" % SIMPLEJSON_MIN_VERSION, + "traits>=%s,!=5.0" % TRAITS_MIN_VERSION, + "filelock>=3.0.0", + "etelemetry", ] # neurdflib has to come after prov # https://github.com/nipy/nipype/pull/2961#issuecomment-512035484 -REQUIRES += ['neurdflib'] +REQUIRES += ["neurdflib"] TESTS_REQUIRES = [ - 'codecov', - 'coverage<5', - 'mock', - 'pytest', - 'pytest-cov', - 'pytest-env', + "codecov", + "coverage<5", + "mock", + "pytest", + "pytest-cov", + "pytest-env", ] EXTRA_REQUIRES = { - 'data': ['datalad'], - 'doc': ['Sphinx>=1.4', 'numpydoc', 'matplotlib', 'pydotplus', 'pydot>=1.2.3'], - 'duecredit': ['duecredit'], - 'nipy': ['nitime', 'nilearn<0.5.0', 'dipy', 'nipy', 'matplotlib'], - 'profiler': ['psutil>=5.0'], - 'pybids': ['pybids>=0.7.0'], - 'specs': ['yapf>=0.27'], - 'ssh': ['paramiko'], - 'tests': TESTS_REQUIRES, - 'xvfbwrapper': ['xvfbwrapper'], + "data": ["datalad"], + "doc": ["Sphinx>=1.4", "numpydoc", "matplotlib", "pydotplus", "pydot>=1.2.3"], + "duecredit": ["duecredit"], + "nipy": ["nitime", "nilearn<0.5.0", "dipy", "nipy", "matplotlib"], + "profiler": ["psutil>=5.0"], + "pybids": ["pybids>=0.7.0"], + "specs": ["yapf>=0.27"], + "ssh": ["paramiko"], + "tests": TESTS_REQUIRES, + "xvfbwrapper": ["xvfbwrapper"], # 'mesh': ['mayavi'] # Enable when it works } @@ -183,9 +189,10 @@ def _list_union(iterable): # Enable a handle to install all extra dependencies at once -EXTRA_REQUIRES['all'] = _list_union(EXTRA_REQUIRES.values()) +EXTRA_REQUIRES["all"] = _list_union(EXTRA_REQUIRES.values()) # dev = doc + tests + specs -EXTRA_REQUIRES['dev'] = _list_union(val for key, val in EXTRA_REQUIRES.items() - if key in ('doc', 'tests', 'specs')) +EXTRA_REQUIRES["dev"] = _list_union( + val for key, val in EXTRA_REQUIRES.items() if key in ("doc", "tests", "specs") +) -STATUS = 'stable' +STATUS = "stable" diff --git a/nipype/interfaces/__init__.py b/nipype/interfaces/__init__.py index 7ad7166476..fe1bf9c9e5 100644 --- a/nipype/interfaces/__init__.py +++ b/nipype/interfaces/__init__.py @@ -6,7 +6,7 @@ Requires Packages to be installed """ -__docformat__ = 'restructuredtext' +__docformat__ = "restructuredtext" from .io import DataGrabber, DataSink, SelectFiles, BIDSDataGrabber from .utility import IdentityInterface, Rename, Function, Select, Merge diff --git a/nipype/interfaces/afni/__init__.py b/nipype/interfaces/afni/__init__.py index 015f17df73..f795e347a3 100644 --- a/nipype/interfaces/afni/__init__.py +++ b/nipype/interfaces/afni/__init__.py @@ -9,16 +9,82 @@ from .base import Info from .preprocess import ( - AlignEpiAnatPy, Allineate, Automask, AutoTcorrelate, AutoTLRC, Bandpass, - BlurInMask, BlurToFWHM, ClipLevel, DegreeCentrality, Despike, Detrend, ECM, - Fim, Fourier, Hist, LFCD, Maskave, Means, OutlierCount, QualityIndex, - ROIStats, Retroicor, Seg, SkullStrip, TCorr1D, TCorrMap, TCorrelate, TNorm, - TProject, TShift, TSmooth, Volreg, Warp, QwarpPlusMinus, Qwarp) -from .svm import (SVMTest, SVMTrain) + AlignEpiAnatPy, + Allineate, + Automask, + AutoTcorrelate, + AutoTLRC, + Bandpass, + BlurInMask, + BlurToFWHM, + ClipLevel, + DegreeCentrality, + Despike, + Detrend, + ECM, + Fim, + Fourier, + Hist, + LFCD, + Maskave, + Means, + OutlierCount, + QualityIndex, + ROIStats, + Retroicor, + Seg, + SkullStrip, + TCorr1D, + TCorrMap, + TCorrelate, + TNorm, + TProject, + TShift, + TSmooth, + Volreg, + Warp, + QwarpPlusMinus, + Qwarp, +) +from .svm import SVMTest, SVMTrain from .utils import ( - ABoverlap, AFNItoNIFTI, Autobox, Axialize, BrickStat, Bucket, Calc, Cat, - CatMatvec, CenterMass, ConvertDset, Copy, Dot, Edge3, Eval, FWHMx, - LocalBistat, Localstat, MaskTool, Merge, Notes, NwarpApply, NwarpAdjust, - NwarpCat, OneDToolPy, Refit, ReHo, Resample, TCat, TCatSubBrick, TStat, - To3D, Unifize, Undump, ZCutUp, GCOR, Zcat, Zeropad) -from .model import (Deconvolve, Remlfit, Synthesize) + ABoverlap, + AFNItoNIFTI, + Autobox, + Axialize, + BrickStat, + Bucket, + Calc, + Cat, + CatMatvec, + CenterMass, + ConvertDset, + Copy, + Dot, + Edge3, + Eval, + FWHMx, + LocalBistat, + Localstat, + MaskTool, + Merge, + Notes, + NwarpApply, + NwarpAdjust, + NwarpCat, + OneDToolPy, + Refit, + ReHo, + Resample, + TCat, + TCatSubBrick, + TStat, + To3D, + Unifize, + Undump, + ZCutUp, + GCOR, + Zcat, + Zeropad, +) +from .model import Deconvolve, Remlfit, Synthesize diff --git a/nipype/interfaces/afni/base.py b/nipype/interfaces/afni/base.py index f83b40c4be..c1b181b85d 100644 --- a/nipype/interfaces/afni/base.py +++ b/nipype/interfaces/afni/base.py @@ -8,33 +8,40 @@ from ... import logging, LooseVersion from ...utils.filemanip import split_filename, fname_presuffix -from ..base import (CommandLine, traits, CommandLineInputSpec, isdefined, File, - TraitedSpec, PackageInfo) +from ..base import ( + CommandLine, + traits, + CommandLineInputSpec, + isdefined, + File, + TraitedSpec, + PackageInfo, +) from ...external.due import BibTeX # Use nipype's logging system -IFLOGGER = logging.getLogger('nipype.interface') +IFLOGGER = logging.getLogger("nipype.interface") class Info(PackageInfo): """Handle afni output type and version information.""" - __outputtype = 'AFNI' - ftypes = {'NIFTI': '.nii', 'AFNI': '', 'NIFTI_GZ': '.nii.gz'} - version_cmd = 'afni --version' + __outputtype = "AFNI" + ftypes = {"NIFTI": ".nii", "AFNI": "", "NIFTI_GZ": ".nii.gz"} + version_cmd = "afni --version" @staticmethod def parse_version(raw_info): """Check and parse AFNI's version.""" - version_stamp = raw_info.split('\n')[0].split('Version ')[1] - if version_stamp.startswith('AFNI'): - version_stamp = version_stamp.split('AFNI_')[1] - elif version_stamp.startswith('Debian'): - version_stamp = version_stamp.split('Debian-')[1].split('~')[0] + version_stamp = raw_info.split("\n")[0].split("Version ")[1] + if version_stamp.startswith("AFNI"): + version_stamp = version_stamp.split("AFNI_")[1] + elif version_stamp.startswith("Debian"): + version_stamp = version_stamp.split("Debian-")[1].split("~")[0] else: return None - version = LooseVersion(version_stamp.replace('_', '.')).version[:3] + version = LooseVersion(version_stamp.replace("_", ".")).version[:3] if version[0] < 1000: version[0] = version[0] + 2000 return tuple(version) @@ -58,7 +65,7 @@ def output_type_to_ext(cls, outputtype): try: return cls.ftypes[outputtype] except KeyError as e: - msg = 'Invalid AFNIOUTPUTTYPE: ', outputtype + msg = "Invalid AFNIOUTPUTTYPE: ", outputtype raise KeyError(msg) from e @classmethod @@ -75,7 +82,7 @@ def outputtype(cls): None """ - return 'AFNI' + return "AFNI" @staticmethod def standard_image(img_name): @@ -86,10 +93,11 @@ def standard_image(img_name): """ clout = CommandLine( - 'which afni', + "which afni", ignore_exception=True, resource_monitor=False, - terminal_output='allatonce').run() + terminal_output="allatonce", + ).run() if clout.runtime.returncode is not 0: return None @@ -106,25 +114,28 @@ class AFNICommandBase(CommandLine): """ def _run_interface(self, runtime): - if platform == 'darwin': - runtime.environ['DYLD_FALLBACK_LIBRARY_PATH'] = '/usr/local/afni/' + if platform == "darwin": + runtime.environ["DYLD_FALLBACK_LIBRARY_PATH"] = "/usr/local/afni/" return super(AFNICommandBase, self)._run_interface(runtime) class AFNICommandInputSpec(CommandLineInputSpec): num_threads = traits.Int( - 1, usedefault=True, nohash=True, desc='set number of threads') + 1, usedefault=True, nohash=True, desc="set number of threads" + ) outputtype = traits.Enum( - 'AFNI', list(Info.ftypes.keys()), desc='AFNI output filetype') + "AFNI", list(Info.ftypes.keys()), desc="AFNI output filetype" + ) out_file = File( name_template="%s_afni", - desc='output image file name', - argstr='-prefix %s', - name_source=["in_file"]) + desc="output image file name", + argstr="-prefix %s", + name_source=["in_file"], + ) class AFNICommandOutputSpec(TraitedSpec): - out_file = File(desc='output file', exists=True) + out_file = File(desc="output file", exists=True) class AFNICommand(AFNICommandBase): @@ -133,34 +144,39 @@ class AFNICommand(AFNICommandBase): input_spec = AFNICommandInputSpec _outputtype = None - references_ = [{ - 'entry': - BibTeX('@article{Cox1996,' - 'author={R.W. Cox},' - 'title={AFNI: software for analysis and ' - 'visualization of functional magnetic ' - 'resonance neuroimages},' - 'journal={Computers and Biomedical research},' - 'volume={29},' - 'number={3},' - 'pages={162-173},' - 'year={1996},' - '}'), - 'tags': ['implementation'], - }, { - 'entry': - BibTeX('@article{CoxHyde1997,' - 'author={R.W. Cox and J.S. Hyde},' - 'title={Software tools for analysis and ' - 'visualization of fMRI data},' - 'journal={NMR in Biomedicine},' - 'volume={10},' - 'number={45},' - 'pages={171-178},' - 'year={1997},' - '}'), - 'tags': ['implementation'], - }] + references_ = [ + { + "entry": BibTeX( + "@article{Cox1996," + "author={R.W. Cox}," + "title={AFNI: software for analysis and " + "visualization of functional magnetic " + "resonance neuroimages}," + "journal={Computers and Biomedical research}," + "volume={29}," + "number={3}," + "pages={162-173}," + "year={1996}," + "}" + ), + "tags": ["implementation"], + }, + { + "entry": BibTeX( + "@article{CoxHyde1997," + "author={R.W. Cox and J.S. Hyde}," + "title={Software tools for analysis and " + "visualization of fMRI data}," + "journal={NMR in Biomedicine}," + "volume={10}," + "number={45}," + "pages={171-178}," + "year={1997}," + "}" + ), + "tags": ["implementation"], + }, + ] @property def num_threads(self): @@ -184,15 +200,15 @@ def set_default_output_type(cls, outputtype): if outputtype in Info.ftypes: cls._outputtype = outputtype else: - raise AttributeError('Invalid AFNI outputtype: %s' % outputtype) + raise AttributeError("Invalid AFNI outputtype: %s" % outputtype) def __init__(self, **inputs): """Instantiate an AFNI command tool wrapper.""" super(AFNICommand, self).__init__(**inputs) - self.inputs.on_trait_change(self._output_update, 'outputtype') + self.inputs.on_trait_change(self._output_update, "outputtype") - if hasattr(self.inputs, 'num_threads'): - self.inputs.on_trait_change(self._nthreads_update, 'num_threads') + if hasattr(self.inputs, "num_threads"): + self.inputs.on_trait_change(self._nthreads_update, "num_threads") if self._outputtype is None: self._outputtype = Info.outputtype() @@ -204,7 +220,7 @@ def __init__(self, **inputs): def _nthreads_update(self): """Update environment with new number of threads.""" - self.inputs.environ['OMP_NUM_THREADS'] = '%d' % self.inputs.num_threads + self.inputs.environ["OMP_NUM_THREADS"] = "%d" % self.inputs.num_threads def _output_update(self): """ @@ -219,7 +235,8 @@ def _output_update(self): def _overload_extension(self, value, name=None): path, base, _ = split_filename(value) return os.path.join( - path, base + Info.output_type_to_ext(self.inputs.outputtype)) + path, base + Info.output_type_to_ext(self.inputs.outputtype) + ) def _list_outputs(self): outputs = super(AFNICommand, self)._list_outputs() @@ -233,12 +250,7 @@ def _list_outputs(self): outputs[name] = outputs[name] + "+orig.BRIK" return outputs - def _gen_fname(self, - basename, - cwd=None, - suffix=None, - change_ext=True, - ext=None): + def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, ext=None): """ Generate a filename based on the given parameters. @@ -265,8 +277,8 @@ def _gen_fname(self, """ if not basename: - msg = 'Unable to generate filename for command %s. ' % self.cmd - msg += 'basename is not set!' + msg = "Unable to generate filename for command %s. " % self.cmd + msg += "basename is not set!" raise ValueError(msg) if cwd is None: @@ -274,12 +286,11 @@ def _gen_fname(self, if ext is None: ext = Info.output_type_to_ext(self.inputs.outputtype) if change_ext: - suffix = ''.join((suffix, ext)) if suffix else ext + suffix = "".join((suffix, ext)) if suffix else ext if suffix is None: - suffix = '' - fname = fname_presuffix( - basename, suffix=suffix, use_ext=False, newpath=cwd) + suffix = "" + fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd) return fname @@ -292,9 +303,11 @@ def no_afni(): class AFNIPythonCommandInputSpec(CommandLineInputSpec): outputtype = traits.Enum( - 'AFNI', list(Info.ftypes.keys()), desc='AFNI output filetype') + "AFNI", list(Info.ftypes.keys()), desc="AFNI output filetype" + ) py27_path = traits.Either( - 'python2', File(exists=True), usedefault=True, default='python2') + "python2", File(exists=True), usedefault=True, default="python2" + ) class AFNIPythonCommand(AFNICommand): diff --git a/nipype/interfaces/afni/model.py b/nipype/interfaces/afni/model.py index efc4bf7b59..2e6d2fc15a 100644 --- a/nipype/interfaces/afni/model.py +++ b/nipype/interfaces/afni/model.py @@ -12,220 +12,263 @@ import os -from ..base import (CommandLineInputSpec, CommandLine, Directory, TraitedSpec, - traits, isdefined, File, InputMultiPath, Undefined, Str) +from ..base import ( + CommandLineInputSpec, + CommandLine, + Directory, + TraitedSpec, + traits, + isdefined, + File, + InputMultiPath, + Undefined, + Str, +) from ...external.due import BibTeX -from .base import (AFNICommandBase, AFNICommand, AFNICommandInputSpec, - AFNICommandOutputSpec, Info) +from .base import ( + AFNICommandBase, + AFNICommand, + AFNICommandInputSpec, + AFNICommandOutputSpec, + Info, +) class DeconvolveInputSpec(AFNICommandInputSpec): in_files = InputMultiPath( File(exists=True), - desc='filenames of 3D+time input datasets. More than one filename can ' - 'be given and the datasets will be auto-catenated in time. ' - 'You can input a 1D time series file here, but the time axis ' - 'should run along the ROW direction, not the COLUMN direction as ' - 'in the \'input1D\' option.', - argstr='-input %s', + desc="filenames of 3D+time input datasets. More than one filename can " + "be given and the datasets will be auto-catenated in time. " + "You can input a 1D time series file here, but the time axis " + "should run along the ROW direction, not the COLUMN direction as " + "in the 'input1D' option.", + argstr="-input %s", copyfile=False, sep=" ", - position=1) + position=1, + ) sat = traits.Bool( - desc='check the dataset time series for initial saturation transients,' - ' which should normally have been excised before data analysis.', - argstr='-sat', - xor=['trans']) + desc="check the dataset time series for initial saturation transients," + " which should normally have been excised before data analysis.", + argstr="-sat", + xor=["trans"], + ) trans = traits.Bool( - desc='check the dataset time series for initial saturation transients,' - ' which should normally have been excised before data analysis.', - argstr='-trans', - xor=['sat']) + desc="check the dataset time series for initial saturation transients," + " which should normally have been excised before data analysis.", + argstr="-trans", + xor=["sat"], + ) noblock = traits.Bool( - desc='normally, if you input multiple datasets with \'input\', then ' - 'the separate datasets are taken to be separate image runs that ' - 'get separate baseline models. Use this options if you want to ' - 'have the program consider these to be all one big run.' - '* If any of the input dataset has only 1 sub-brick, then this ' - 'option is automatically invoked!' - '* If the auto-catenation feature isn\'t used, then this option ' - 'has no effect, no how, no way.', - argstr='-noblock') + desc="normally, if you input multiple datasets with 'input', then " + "the separate datasets are taken to be separate image runs that " + "get separate baseline models. Use this options if you want to " + "have the program consider these to be all one big run." + "* If any of the input dataset has only 1 sub-brick, then this " + "option is automatically invoked!" + "* If the auto-catenation feature isn't used, then this option " + "has no effect, no how, no way.", + argstr="-noblock", + ) force_TR = traits.Float( - desc='use this value instead of the TR in the \'input\' ' - 'dataset. (It\'s better to fix the input using Refit.)', - argstr='-force_TR %f', - position=0) + desc="use this value instead of the TR in the 'input' " + "dataset. (It's better to fix the input using Refit.)", + argstr="-force_TR %f", + position=0, + ) input1D = File( - desc='filename of single (fMRI) .1D time series where time runs down ' - 'the column.', - argstr='-input1D %s', - exists=True) + desc="filename of single (fMRI) .1D time series where time runs down " + "the column.", + argstr="-input1D %s", + exists=True, + ) TR_1D = traits.Float( - desc='TR to use with \'input1D\'. This option has no effect if you do ' - 'not also use \'input1D\'.', - argstr='-TR_1D %f') + desc="TR to use with 'input1D'. This option has no effect if you do " + "not also use 'input1D'.", + argstr="-TR_1D %f", + ) legendre = traits.Bool( - desc='use Legendre polynomials for null hypothesis (baseline model)', - argstr='-legendre') + desc="use Legendre polynomials for null hypothesis (baseline model)", + argstr="-legendre", + ) nolegendre = traits.Bool( - desc='use power polynomials for null hypotheses. Don\'t do this ' - 'unless you are crazy!', - argstr='-nolegendre') + desc="use power polynomials for null hypotheses. Don't do this " + "unless you are crazy!", + argstr="-nolegendre", + ) nodmbase = traits.Bool( - desc='don\'t de-mean baseline time series', argstr='-nodmbase') + desc="don't de-mean baseline time series", argstr="-nodmbase" + ) dmbase = traits.Bool( - desc='de-mean baseline time series (default if \'polort\' >= 0)', - argstr='-dmbase') + desc="de-mean baseline time series (default if 'polort' >= 0)", argstr="-dmbase" + ) svd = traits.Bool( - desc='use SVD instead of Gaussian elimination (default)', - argstr='-svd') - nosvd = traits.Bool( - desc='use Gaussian elimination instead of SVD', argstr='-nosvd') + desc="use SVD instead of Gaussian elimination (default)", argstr="-svd" + ) + nosvd = traits.Bool(desc="use Gaussian elimination instead of SVD", argstr="-nosvd") rmsmin = traits.Float( - desc='minimum rms error to reject reduced model (default = 0; don\'t ' - 'use this option normally!)', - argstr='-rmsmin %f') + desc="minimum rms error to reject reduced model (default = 0; don't " + "use this option normally!)", + argstr="-rmsmin %f", + ) nocond = traits.Bool( - desc='DON\'T calculate matrix condition number', argstr='-nocond') + desc="DON'T calculate matrix condition number", argstr="-nocond" + ) singvals = traits.Bool( - desc='print out the matrix singular values', argstr='-singvals') + desc="print out the matrix singular values", argstr="-singvals" + ) goforit = traits.Int( - desc='use this to proceed even if the matrix has bad problems (e.g., ' - 'duplicate columns, large condition number, etc.).', - argstr='-GOFORIT %i') + desc="use this to proceed even if the matrix has bad problems (e.g., " + "duplicate columns, large condition number, etc.).", + argstr="-GOFORIT %i", + ) allzero_OK = traits.Bool( - desc='don\'t consider all zero matrix columns to be the type of error ' - 'that \'gotforit\' is needed to ignore.', - argstr='-allzero_OK') + desc="don't consider all zero matrix columns to be the type of error " + "that 'gotforit' is needed to ignore.", + argstr="-allzero_OK", + ) dname = traits.Tuple( - Str, - Str, - desc='set environmental variable to provided value', - argstr='-D%s=%s') + Str, Str, desc="set environmental variable to provided value", argstr="-D%s=%s" + ) mask = File( - desc='filename of 3D mask dataset; only data time series from within ' - 'the mask will be analyzed; results for voxels outside the mask ' - 'will be set to zero.', - argstr='-mask %s', - exists=True) + desc="filename of 3D mask dataset; only data time series from within " + "the mask will be analyzed; results for voxels outside the mask " + "will be set to zero.", + argstr="-mask %s", + exists=True, + ) automask = traits.Bool( - desc='build a mask automatically from input data (will be slow for ' - 'long time series datasets)', - argstr='-automask') + desc="build a mask automatically from input data (will be slow for " + "long time series datasets)", + argstr="-automask", + ) STATmask = File( - desc='build a mask from provided file, and use this mask for the ' - 'purpose of reporting truncation-to float issues AND for ' - 'computing the FDR curves. The actual results ARE not masked ' - 'with this option (only with \'mask\' or \'automask\' options).', - argstr='-STATmask %s', - exists=True) + desc="build a mask from provided file, and use this mask for the " + "purpose of reporting truncation-to float issues AND for " + "computing the FDR curves. The actual results ARE not masked " + "with this option (only with 'mask' or 'automask' options).", + argstr="-STATmask %s", + exists=True, + ) censor = File( - desc='filename of censor .1D time series. This is a file of 1s and ' - '0s, indicating which time points are to be included (1) and ' - 'which are to be excluded (0).', - argstr='-censor %s', - exists=True) + desc="filename of censor .1D time series. This is a file of 1s and " + "0s, indicating which time points are to be included (1) and " + "which are to be excluded (0).", + argstr="-censor %s", + exists=True, + ) polort = traits.Int( - desc='degree of polynomial corresponding to the null hypothesis ' - '[default: 1]', - argstr='-polort %d') + desc="degree of polynomial corresponding to the null hypothesis " + "[default: 1]", + argstr="-polort %d", + ) ortvec = traits.Tuple( - File(desc='filename', exists=True), - Str(desc='label'), - desc='this option lets you input a rectangular array of 1 or more ' - 'baseline vectors from a file. This method is a fast way to ' - 'include a lot of baseline regressors in one step. ', - argstr='-ortvec %s %s') - x1D = File(desc='specify name for saved X matrix', argstr='-x1D %s') + File(desc="filename", exists=True), + Str(desc="label"), + desc="this option lets you input a rectangular array of 1 or more " + "baseline vectors from a file. This method is a fast way to " + "include a lot of baseline regressors in one step. ", + argstr="-ortvec %s %s", + ) + x1D = File(desc="specify name for saved X matrix", argstr="-x1D %s") x1D_stop = traits.Bool( - desc='stop running after writing .xmat.1D file', argstr='-x1D_stop') + desc="stop running after writing .xmat.1D file", argstr="-x1D_stop" + ) cbucket = traits.Str( - desc='Name for dataset in which to save the regression ' - 'coefficients (no statistics). This dataset ' - 'will be used in a -xrestore run [not yet implemented] ' - 'instead of the bucket dataset, if possible.', - argstr='-cbucket %s') - out_file = File(desc='output statistics file', argstr='-bucket %s') + desc="Name for dataset in which to save the regression " + "coefficients (no statistics). This dataset " + "will be used in a -xrestore run [not yet implemented] " + "instead of the bucket dataset, if possible.", + argstr="-cbucket %s", + ) + out_file = File(desc="output statistics file", argstr="-bucket %s") num_threads = traits.Int( - desc='run the program with provided number of sub-processes', - argstr='-jobs %d', - nohash=True) - fout = traits.Bool( - desc='output F-statistic for each stimulus', argstr='-fout') + desc="run the program with provided number of sub-processes", + argstr="-jobs %d", + nohash=True, + ) + fout = traits.Bool(desc="output F-statistic for each stimulus", argstr="-fout") rout = traits.Bool( - desc='output the R^2 statistic for each stimulus', argstr='-rout') - tout = traits.Bool( - desc='output the T-statistic for each stimulus', argstr='-tout') + desc="output the R^2 statistic for each stimulus", argstr="-rout" + ) + tout = traits.Bool(desc="output the T-statistic for each stimulus", argstr="-tout") vout = traits.Bool( - desc='output the sample variance (MSE) for each stimulus', - argstr='-vout') + desc="output the sample variance (MSE) for each stimulus", argstr="-vout" + ) nofdr = traits.Bool( - desc="Don't compute the statistic-vs-FDR curves for the bucket " - "dataset.", - argstr='-noFDR') + desc="Don't compute the statistic-vs-FDR curves for the bucket " "dataset.", + argstr="-noFDR", + ) global_times = traits.Bool( - desc='use global timing for stimulus timing files', - argstr='-global_times', - xor=['local_times']) + desc="use global timing for stimulus timing files", + argstr="-global_times", + xor=["local_times"], + ) local_times = traits.Bool( - desc='use local timing for stimulus timing files', - argstr='-local_times', - xor=['global_times']) + desc="use local timing for stimulus timing files", + argstr="-local_times", + xor=["global_times"], + ) num_stimts = traits.Int( - desc='number of stimulus timing files', - argstr='-num_stimts %d', - position=-6) + desc="number of stimulus timing files", argstr="-num_stimts %d", position=-6 + ) stim_times = traits.List( traits.Tuple( - traits.Int(desc='k-th response model'), - File(desc='stimulus timing file', exists=True), - Str(desc='model')), - desc='generate a response model from a set of stimulus times' - ' given in file.', - argstr='-stim_times %d %s \'%s\'...', - position=-5) + traits.Int(desc="k-th response model"), + File(desc="stimulus timing file", exists=True), + Str(desc="model"), + ), + desc="generate a response model from a set of stimulus times" " given in file.", + argstr="-stim_times %d %s '%s'...", + position=-5, + ) stim_label = traits.List( traits.Tuple( - traits.Int(desc='k-th input stimulus'), - Str(desc='stimulus label')), - desc='label for kth input stimulus (e.g., Label1)', - argstr='-stim_label %d %s...', - requires=['stim_times'], - position=-4) + traits.Int(desc="k-th input stimulus"), Str(desc="stimulus label") + ), + desc="label for kth input stimulus (e.g., Label1)", + argstr="-stim_label %d %s...", + requires=["stim_times"], + position=-4, + ) stim_times_subtract = traits.Float( - desc='this option means to subtract specified seconds from each time ' - 'encountered in any \'stim_times\' option. The purpose of this ' - 'option is to make it simple to adjust timing files for the ' - 'removal of images from the start of each imaging run.', - argstr='-stim_times_subtract %f') + desc="this option means to subtract specified seconds from each time " + "encountered in any 'stim_times' option. The purpose of this " + "option is to make it simple to adjust timing files for the " + "removal of images from the start of each imaging run.", + argstr="-stim_times_subtract %f", + ) num_glt = traits.Int( - desc='number of general linear tests (i.e., contrasts)', - argstr='-num_glt %d', - position=-3) + desc="number of general linear tests (i.e., contrasts)", + argstr="-num_glt %d", + position=-3, + ) gltsym = traits.List( - Str(desc='symbolic general linear test'), - desc='general linear tests (i.e., contrasts) using symbolic ' - 'conventions (e.g., \'+Label1 -Label2\')', - argstr='-gltsym \'SYM: %s\'...', - position=-2) + Str(desc="symbolic general linear test"), + desc="general linear tests (i.e., contrasts) using symbolic " + "conventions (e.g., '+Label1 -Label2')", + argstr="-gltsym 'SYM: %s'...", + position=-2, + ) glt_label = traits.List( traits.Tuple( - traits.Int(desc='k-th general linear test'), - Str(desc='GLT label')), - desc='general linear test (i.e., contrast) labels', - argstr='-glt_label %d %s...', - requires=['gltsym'], - position=-1) + traits.Int(desc="k-th general linear test"), Str(desc="GLT label") + ), + desc="general linear test (i.e., contrast) labels", + argstr="-glt_label %d %s...", + requires=["gltsym"], + position=-1, + ) class DeconvolveOutputSpec(TraitedSpec): - out_file = File(desc='output statistics file', exists=True) + out_file = File(desc="output statistics file", exists=True) reml_script = File( - desc='automatical generated script to run 3dREMLfit', exists=True) - x1D = File(desc='save out X matrix', exists=True) - cbucket = File(desc='output regression coefficients file (if generated)') + desc="automatical generated script to run 3dREMLfit", exists=True + ) + x1D = File(desc="save out X matrix", exists=True) + cbucket = File(desc="output regression coefficients file (if generated)") class Deconvolve(AFNICommand): @@ -252,28 +295,27 @@ class Deconvolve(AFNICommand): >>> res = deconvolve.run() # doctest: +SKIP """ - _cmd = '3dDeconvolve' + _cmd = "3dDeconvolve" input_spec = DeconvolveInputSpec output_spec = DeconvolveOutputSpec def _format_arg(self, name, trait_spec, value): - if name == 'gltsym': + if name == "gltsym": for n, val in enumerate(value): - if val.startswith('SYM: '): - value[n] = val.lstrip('SYM: ') + if val.startswith("SYM: "): + value[n] = val.lstrip("SYM: ") return super(Deconvolve, self)._format_arg(name, trait_spec, value) def _parse_inputs(self, skip=None): if skip is None: skip = [] - if len(self.inputs.stim_times) and not isdefined( - self.inputs.num_stimts): + if len(self.inputs.stim_times) and not isdefined(self.inputs.num_stimts): self.inputs.num_stimts = len(self.inputs.stim_times) if len(self.inputs.gltsym) and not isdefined(self.inputs.num_glt): self.inputs.num_glt = len(self.inputs.gltsym) if not isdefined(self.inputs.out_file): - self.inputs.out_file = 'Decon.nii' + self.inputs.out_file = "Decon.nii" return super(Deconvolve, self)._parse_inputs(skip) @@ -281,28 +323,26 @@ def _list_outputs(self): outputs = self.output_spec().get() _gen_fname_opts = {} - _gen_fname_opts['basename'] = self.inputs.out_file - _gen_fname_opts['cwd'] = os.getcwd() + _gen_fname_opts["basename"] = self.inputs.out_file + _gen_fname_opts["cwd"] = os.getcwd() if isdefined(self.inputs.x1D): - if not self.inputs.x1D.endswith('.xmat.1D'): - outputs['x1D'] = os.path.abspath(self.inputs.x1D + '.xmat.1D') + if not self.inputs.x1D.endswith(".xmat.1D"): + outputs["x1D"] = os.path.abspath(self.inputs.x1D + ".xmat.1D") else: - outputs['x1D'] = os.path.abspath(self.inputs.x1D) + outputs["x1D"] = os.path.abspath(self.inputs.x1D) else: - outputs['x1D'] = self._gen_fname( - suffix='.xmat.1D', **_gen_fname_opts) + outputs["x1D"] = self._gen_fname(suffix=".xmat.1D", **_gen_fname_opts) if isdefined(self.inputs.cbucket): - outputs['cbucket'] = os.path.abspath(self.inputs.cbucket) + outputs["cbucket"] = os.path.abspath(self.inputs.cbucket) - outputs['reml_script'] = self._gen_fname( - suffix='.REML_cmd', **_gen_fname_opts) + outputs["reml_script"] = self._gen_fname(suffix=".REML_cmd", **_gen_fname_opts) # remove out_file from outputs if x1d_stop set to True if self.inputs.x1D_stop: - del outputs['out_file'], outputs['cbucket'] + del outputs["out_file"], outputs["cbucket"] else: - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs @@ -311,239 +351,261 @@ class RemlfitInputSpec(AFNICommandInputSpec): # mandatory files in_files = InputMultiPath( File(exists=True), - desc='Read time series dataset', + desc="Read time series dataset", argstr='-input "%s"', mandatory=True, copyfile=False, - sep=" ") + sep=" ", + ) matrix = File( - desc='the design matrix file, which should have been output from ' - 'Deconvolve via the \'x1D\' option', - argstr='-matrix %s', - mandatory=True) + desc="the design matrix file, which should have been output from " + "Deconvolve via the 'x1D' option", + argstr="-matrix %s", + mandatory=True, + ) # "Semi-Hidden Alternative Ways to Define the Matrix" polort = traits.Int( - desc='if no \'matrix\' option is given, AND no \'matim\' option, ' - 'create a matrix with Legendre polynomial regressors' - 'up to the specified order. The default value is 0, which' - 'produces a matrix with a single column of all ones', - argstr='-polort %d', - xor=['matrix']) + desc="if no 'matrix' option is given, AND no 'matim' option, " + "create a matrix with Legendre polynomial regressors" + "up to the specified order. The default value is 0, which" + "produces a matrix with a single column of all ones", + argstr="-polort %d", + xor=["matrix"], + ) matim = File( - desc='read a standard file as the matrix. You can use only Col as ' - 'a name in GLTs with these nonstandard matrix input methods, ' - 'since the other names come from the \'matrix\' file. ' - 'These mutually exclusive options are ignored if \'matrix\' ' - 'is used.', - argstr='-matim %s', - xor=['matrix']) + desc="read a standard file as the matrix. You can use only Col as " + "a name in GLTs with these nonstandard matrix input methods, " + "since the other names come from the 'matrix' file. " + "These mutually exclusive options are ignored if 'matrix' " + "is used.", + argstr="-matim %s", + xor=["matrix"], + ) # Other arguments mask = File( - desc='filename of 3D mask dataset; only data time series from within ' - 'the mask will be analyzed; results for voxels outside the mask ' - 'will be set to zero.', - argstr='-mask %s', - exists=True) + desc="filename of 3D mask dataset; only data time series from within " + "the mask will be analyzed; results for voxels outside the mask " + "will be set to zero.", + argstr="-mask %s", + exists=True, + ) automask = traits.Bool( usedefault=True, - argstr='-automask', - desc='build a mask automatically from input data (will be slow for ' - 'long time series datasets)') + argstr="-automask", + desc="build a mask automatically from input data (will be slow for " + "long time series datasets)", + ) STATmask = File( - desc='filename of 3D mask dataset to be used for the purpose ' - 'of reporting truncation-to float issues AND for computing the ' - 'FDR curves. The actual results ARE not masked with this option ' - '(only with \'mask\' or \'automask\' options).', - argstr='-STATmask %s', - exists=True) + desc="filename of 3D mask dataset to be used for the purpose " + "of reporting truncation-to float issues AND for computing the " + "FDR curves. The actual results ARE not masked with this option " + "(only with 'mask' or 'automask' options).", + argstr="-STATmask %s", + exists=True, + ) addbase = InputMultiPath( - File( - exists=True, - desc='file containing columns to add to regression matrix'), - desc='file(s) to add baseline model columns to the matrix with this ' - 'option. Each column in the specified file(s) will be appended ' - 'to the matrix. File(s) must have at least as many rows as the ' - 'matrix does.', + File(exists=True, desc="file containing columns to add to regression matrix"), + desc="file(s) to add baseline model columns to the matrix with this " + "option. Each column in the specified file(s) will be appended " + "to the matrix. File(s) must have at least as many rows as the " + "matrix does.", copyfile=False, sep=" ", - argstr='-addbase %s') + argstr="-addbase %s", + ) slibase = InputMultiPath( - File( - exists=True, - desc='file containing columns to add to regression matrix'), - desc='similar to \'addbase\' in concept, BUT each specified file ' - 'must have an integer multiple of the number of slices ' - 'in the input dataset(s); then, separate regression ' - 'matrices are generated for each slice, with the ' - 'first column of the file appended to the matrix for ' - 'the first slice of the dataset, the second column of the file ' - 'appended to the matrix for the first slice of the dataset, ' - 'and so on. Intended to help model physiological noise in FMRI, ' - 'or other effects you want to regress out that might ' - 'change significantly in the inter-slice time intervals. This ' - 'will slow the program down, and make it use a lot more memory ' - '(to hold all the matrix stuff).', - argstr='-slibase %s') + File(exists=True, desc="file containing columns to add to regression matrix"), + desc="similar to 'addbase' in concept, BUT each specified file " + "must have an integer multiple of the number of slices " + "in the input dataset(s); then, separate regression " + "matrices are generated for each slice, with the " + "first column of the file appended to the matrix for " + "the first slice of the dataset, the second column of the file " + "appended to the matrix for the first slice of the dataset, " + "and so on. Intended to help model physiological noise in FMRI, " + "or other effects you want to regress out that might " + "change significantly in the inter-slice time intervals. This " + "will slow the program down, and make it use a lot more memory " + "(to hold all the matrix stuff).", + argstr="-slibase %s", + ) slibase_sm = InputMultiPath( - File( - exists=True, - desc='file containing columns to add to regression matrix'), - desc='similar to \'slibase\', BUT each file much be in slice major ' - 'order (i.e. all slice0 columns come first, then all slice1 ' - 'columns, etc).', - argstr='-slibase_sm %s') + File(exists=True, desc="file containing columns to add to regression matrix"), + desc="similar to 'slibase', BUT each file much be in slice major " + "order (i.e. all slice0 columns come first, then all slice1 " + "columns, etc).", + argstr="-slibase_sm %s", + ) usetemp = traits.Bool( - desc='write intermediate stuff to disk, to economize on RAM. ' - 'Using this option might be necessary to run with ' - '\'slibase\' and with \'Grid\' values above the default, ' - 'since the program has to store a large number of ' - 'matrices for such a problem: two for every slice and ' - 'for every (a,b) pair in the ARMA parameter grid. Temporary ' - 'files are written to the directory given in environment ' - 'variable TMPDIR, or in /tmp, or in ./ (preference is in that ' - 'order)', - argstr='-usetemp') + desc="write intermediate stuff to disk, to economize on RAM. " + "Using this option might be necessary to run with " + "'slibase' and with 'Grid' values above the default, " + "since the program has to store a large number of " + "matrices for such a problem: two for every slice and " + "for every (a,b) pair in the ARMA parameter grid. Temporary " + "files are written to the directory given in environment " + "variable TMPDIR, or in /tmp, or in ./ (preference is in that " + "order)", + argstr="-usetemp", + ) nodmbase = traits.Bool( - desc='by default, baseline columns added to the matrix via ' - '\'addbase\' or \'slibase\' or \'dsort\' will each have their ' - 'mean removed (as is done in Deconvolve); this option turns this ' - 'centering off', - argstr='-nodmbase', - requires=['addbase', 'dsort']) + desc="by default, baseline columns added to the matrix via " + "'addbase' or 'slibase' or 'dsort' will each have their " + "mean removed (as is done in Deconvolve); this option turns this " + "centering off", + argstr="-nodmbase", + requires=["addbase", "dsort"], + ) dsort = File( - desc='4D dataset to be used as voxelwise baseline regressor', + desc="4D dataset to be used as voxelwise baseline regressor", exists=True, copyfile=False, - argstr='-dsort %s') + argstr="-dsort %s", + ) dsort_nods = traits.Bool( - desc='if \'dsort\' option is used, this command will output ' - 'additional results files excluding the \'dsort\' file', - argstr='-dsort_nods', - requires=['dsort']) - fout = traits.Bool( - desc='output F-statistic for each stimulus', argstr='-fout') + desc="if 'dsort' option is used, this command will output " + "additional results files excluding the 'dsort' file", + argstr="-dsort_nods", + requires=["dsort"], + ) + fout = traits.Bool(desc="output F-statistic for each stimulus", argstr="-fout") rout = traits.Bool( - desc='output the R^2 statistic for each stimulus', argstr='-rout') + desc="output the R^2 statistic for each stimulus", argstr="-rout" + ) tout = traits.Bool( - desc='output the T-statistic for each stimulus; if you use ' - '\'out_file\' and do not give any of \'fout\', \'tout\',' - 'or \'rout\', then the program assumes \'fout\' is activated.', - argstr='-tout') + desc="output the T-statistic for each stimulus; if you use " + "'out_file' and do not give any of 'fout', 'tout'," + "or 'rout', then the program assumes 'fout' is activated.", + argstr="-tout", + ) nofdr = traits.Bool( - desc='do NOT add FDR curve data to bucket datasets; FDR curves can ' - 'take a long time if \'tout\' is used', - argstr='-noFDR') + desc="do NOT add FDR curve data to bucket datasets; FDR curves can " + "take a long time if 'tout' is used", + argstr="-noFDR", + ) nobout = traits.Bool( - desc='do NOT add baseline (null hypothesis) regressor betas ' - 'to the \'rbeta_file\' and/or \'obeta_file\' output datasets.', - argstr='-nobout') + desc="do NOT add baseline (null hypothesis) regressor betas " + "to the 'rbeta_file' and/or 'obeta_file' output datasets.", + argstr="-nobout", + ) gltsym = traits.List( traits.Either( - traits.Tuple(File(exists=True), Str()), traits.Tuple(Str(), - Str())), - desc='read a symbolic GLT from input file and associate it with a ' - 'label. As in Deconvolve, you can also use the \'SYM:\' method ' - 'to provide the definition of the GLT directly as a string ' - '(e.g., with \'SYM: +Label1 -Label2\'). Unlike Deconvolve, you ' - 'MUST specify \'SYM: \' if providing the GLT directly as a ' - 'string instead of from a file', - argstr='-gltsym "%s" %s...') + traits.Tuple(File(exists=True), Str()), traits.Tuple(Str(), Str()) + ), + desc="read a symbolic GLT from input file and associate it with a " + "label. As in Deconvolve, you can also use the 'SYM:' method " + "to provide the definition of the GLT directly as a string " + "(e.g., with 'SYM: +Label1 -Label2'). Unlike Deconvolve, you " + "MUST specify 'SYM: ' if providing the GLT directly as a " + "string instead of from a file", + argstr='-gltsym "%s" %s...', + ) out_file = File( - desc='output dataset for beta + statistics from the REML estimation; ' - 'also contains the results of any GLT analysis requested ' - 'in the Deconvolve setup, similar to the \'bucket\' output ' - 'from Deconvolve. This dataset does NOT get the betas ' - '(or statistics) of those regressors marked as \'baseline\' ' - 'in the matrix file.', - argstr='-Rbuck %s') + desc="output dataset for beta + statistics from the REML estimation; " + "also contains the results of any GLT analysis requested " + "in the Deconvolve setup, similar to the 'bucket' output " + "from Deconvolve. This dataset does NOT get the betas " + "(or statistics) of those regressors marked as 'baseline' " + "in the matrix file.", + argstr="-Rbuck %s", + ) var_file = File( - desc='output dataset for REML variance parameters', argstr='-Rvar %s') + desc="output dataset for REML variance parameters", argstr="-Rvar %s" + ) rbeta_file = File( - desc='output dataset for beta weights from the REML estimation, ' - 'similar to the \'cbucket\' output from Deconvolve. This dataset ' - 'will contain all the beta weights, for baseline and stimulus ' - 'regressors alike, unless the \'-nobout\' option is given -- ' - 'in that case, this dataset will only get the betas for the ' - 'stimulus regressors.', - argstr='-Rbeta %s') + desc="output dataset for beta weights from the REML estimation, " + "similar to the 'cbucket' output from Deconvolve. This dataset " + "will contain all the beta weights, for baseline and stimulus " + "regressors alike, unless the '-nobout' option is given -- " + "in that case, this dataset will only get the betas for the " + "stimulus regressors.", + argstr="-Rbeta %s", + ) glt_file = File( - desc='output dataset for beta + statistics from the REML estimation, ' - 'but ONLY for the GLTs added on the REMLfit command line itself ' - 'via \'gltsym\'; GLTs from Deconvolve\'s command line will NOT ' - 'be included.', - argstr='-Rglt %s') - fitts_file = File( - desc='ouput dataset for REML fitted model', argstr='-Rfitts %s') + desc="output dataset for beta + statistics from the REML estimation, " + "but ONLY for the GLTs added on the REMLfit command line itself " + "via 'gltsym'; GLTs from Deconvolve's command line will NOT " + "be included.", + argstr="-Rglt %s", + ) + fitts_file = File(desc="ouput dataset for REML fitted model", argstr="-Rfitts %s") errts_file = File( - desc='output dataset for REML residuals = data - fitted model', - argstr='-Rerrts %s') + desc="output dataset for REML residuals = data - fitted model", + argstr="-Rerrts %s", + ) wherr_file = File( - desc='dataset for REML residual, whitened using the estimated ' - 'ARMA(1,1) correlation matrix of the noise', - argstr='-Rwherr %s') - quiet = traits.Bool( - desc='turn off most progress messages', argstr='-quiet') + desc="dataset for REML residual, whitened using the estimated " + "ARMA(1,1) correlation matrix of the noise", + argstr="-Rwherr %s", + ) + quiet = traits.Bool(desc="turn off most progress messages", argstr="-quiet") verb = traits.Bool( - desc='turns on more progress messages, including memory usage ' - 'progress reports at various stages', - argstr='-verb') + desc="turns on more progress messages, including memory usage " + "progress reports at various stages", + argstr="-verb", + ) goforit = traits.Bool( - desc='With potential issues flagged in the design matrix, an attempt ' - 'will nevertheless be made to fit the model', - argstr='-GOFORIT') + desc="With potential issues flagged in the design matrix, an attempt " + "will nevertheless be made to fit the model", + argstr="-GOFORIT", + ) ovar = File( - desc='dataset for OLSQ st.dev. parameter (kind of boring)', - argstr='-Ovar %s') + desc="dataset for OLSQ st.dev. parameter (kind of boring)", argstr="-Ovar %s" + ) obeta = File( - desc='dataset for beta weights from the OLSQ estimation', - argstr='-Obeta %s') + desc="dataset for beta weights from the OLSQ estimation", argstr="-Obeta %s" + ) obuck = File( - desc='dataset for beta + statistics from the OLSQ estimation', - argstr='-Obuck %s') + desc="dataset for beta + statistics from the OLSQ estimation", + argstr="-Obuck %s", + ) oglt = File( - desc='dataset for beta + statistics from \'gltsym\' options', - argstr='-Oglt %s') - ofitts = File(desc='dataset for OLSQ fitted model', argstr='-Ofitts %s') + desc="dataset for beta + statistics from 'gltsym' options", argstr="-Oglt %s" + ) + ofitts = File(desc="dataset for OLSQ fitted model", argstr="-Ofitts %s") oerrts = File( - desc='dataset for OLSQ residuals (data - fitted model)', - argstr='-Oerrts %s') + desc="dataset for OLSQ residuals (data - fitted model)", argstr="-Oerrts %s" + ) class RemlfitOutputSpec(AFNICommandOutputSpec): out_file = File( - desc='dataset for beta + statistics from the REML estimation (if ' - 'generated') - var_file = File(desc='dataset for REML variance parameters (if generated)') + desc="dataset for beta + statistics from the REML estimation (if " "generated" + ) + var_file = File(desc="dataset for REML variance parameters (if generated)") rbeta_file = File( - desc='dataset for beta weights from the REML estimation (if ' - 'generated)') + desc="dataset for beta weights from the REML estimation (if " "generated)" + ) rbeta_file = File( - desc='output dataset for beta weights from the REML estimation (if ' - 'generated') + desc="output dataset for beta weights from the REML estimation (if " "generated" + ) glt_file = File( - desc='output dataset for beta + statistics from the REML estimation, ' - 'but ONLY for the GLTs added on the REMLfit command ' - 'line itself via \'gltsym\' (if generated)') - fitts_file = File( - desc='ouput dataset for REML fitted model (if generated)') + desc="output dataset for beta + statistics from the REML estimation, " + "but ONLY for the GLTs added on the REMLfit command " + "line itself via 'gltsym' (if generated)" + ) + fitts_file = File(desc="ouput dataset for REML fitted model (if generated)") errts_file = File( - desc='output dataset for REML residuals = data - fitted model (if ' - 'generated') + desc="output dataset for REML residuals = data - fitted model (if " "generated" + ) wherr_file = File( - desc='dataset for REML residual, whitened using the estimated ' - 'ARMA(1,1) correlation matrix of the noise (if generated)') - ovar = File(desc='dataset for OLSQ st.dev. parameter (if generated)') - obeta = File(desc='dataset for beta weights from the OLSQ estimation (if ' - 'generated)') + desc="dataset for REML residual, whitened using the estimated " + "ARMA(1,1) correlation matrix of the noise (if generated)" + ) + ovar = File(desc="dataset for OLSQ st.dev. parameter (if generated)") + obeta = File( + desc="dataset for beta weights from the OLSQ estimation (if " "generated)" + ) obuck = File( - desc='dataset for beta + statistics from the OLSQ estimation (if ' - 'generated)') + desc="dataset for beta + statistics from the OLSQ estimation (if " "generated)" + ) oglt = File( - desc='dataset for beta + statistics from \'gltsym\' options (if ' - 'generated') - ofitts = File(desc='dataset for OLSQ fitted model (if generated)') - oerrts = File(desc='dataset for OLSQ residuals = data - fitted model (if ' - 'generated') + desc="dataset for beta + statistics from 'gltsym' options (if " "generated" + ) + ofitts = File(desc="dataset for OLSQ fitted model (if generated)") + oerrts = File( + desc="dataset for OLSQ residuals = data - fitted model (if " "generated" + ) class Remlfit(AFNICommand): @@ -568,7 +630,7 @@ class Remlfit(AFNICommand): >>> res = remlfit.run() # doctest: +SKIP """ - _cmd = '3dREMLfit' + _cmd = "3dREMLfit" input_spec = RemlfitInputSpec output_spec = RemlfitOutputSpec @@ -589,50 +651,53 @@ def _list_outputs(self): class SynthesizeInputSpec(AFNICommandInputSpec): cbucket = File( - desc='Read the dataset output from ' - '3dDeconvolve via the \'-cbucket\' option.', - argstr='-cbucket %s', + desc="Read the dataset output from " "3dDeconvolve via the '-cbucket' option.", + argstr="-cbucket %s", copyfile=False, - mandatory=True) + mandatory=True, + ) matrix = File( - desc='Read the matrix output from ' - '3dDeconvolve via the \'-x1D\' option.', - argstr='-matrix %s', + desc="Read the matrix output from " "3dDeconvolve via the '-x1D' option.", + argstr="-matrix %s", copyfile=False, - mandatory=True) + mandatory=True, + ) select = traits.List( - Str(desc='selected columns to synthesize'), - argstr='-select %s', - desc='A list of selected columns from the matrix (and the ' - 'corresponding coefficient sub-bricks from the ' - 'cbucket). Valid types include \'baseline\', ' - ' \'polort\', \'allfunc\', \'allstim\', \'all\', ' - 'Can also provide \'something\' where something matches ' - 'a stim_label from 3dDeconvolve, and \'digits\' where digits ' - 'are the numbers of the select matrix columns by ' - 'numbers (starting at 0), or number ranges of the form ' - '\'3..7\' and \'3-7\'.', - mandatory=True) + Str(desc="selected columns to synthesize"), + argstr="-select %s", + desc="A list of selected columns from the matrix (and the " + "corresponding coefficient sub-bricks from the " + "cbucket). Valid types include 'baseline', " + " 'polort', 'allfunc', 'allstim', 'all', " + "Can also provide 'something' where something matches " + "a stim_label from 3dDeconvolve, and 'digits' where digits " + "are the numbers of the select matrix columns by " + "numbers (starting at 0), or number ranges of the form " + "'3..7' and '3-7'.", + mandatory=True, + ) out_file = File( - name_template='syn', - desc='output dataset prefix name (default \'syn\')', - argstr='-prefix %s') + name_template="syn", + desc="output dataset prefix name (default 'syn')", + argstr="-prefix %s", + ) dry_run = traits.Bool( - desc='Don\'t compute the output, just ' - 'check the inputs.', - argstr='-dry') + desc="Don't compute the output, just " "check the inputs.", argstr="-dry" + ) TR = traits.Float( - desc='TR to set in the output. The default value of ' - 'TR is read from the header of the matrix file.', - argstr='-TR %f') + desc="TR to set in the output. The default value of " + "TR is read from the header of the matrix file.", + argstr="-TR %f", + ) cenfill = traits.Enum( - 'zero', - 'nbhr', - 'none', - argstr='-cenfill %s', - desc='Determines how censored time points from the ' - '3dDeconvolve run will be filled. Valid types ' - 'are \'zero\', \'nbhr\' and \'none\'.') + "zero", + "nbhr", + "none", + argstr="-cenfill %s", + desc="Determines how censored time points from the " + "3dDeconvolve run will be filled. Valid types " + "are 'zero', 'nbhr' and 'none'.", + ) class Synthesize(AFNICommand): @@ -656,7 +721,7 @@ class Synthesize(AFNICommand): >>> syn = synthesize.run() # doctest: +SKIP """ - _cmd = '3dSynthesize' + _cmd = "3dSynthesize" input_spec = SynthesizeInputSpec output_spec = AFNICommandOutputSpec diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 17b23a2491..3e0c02eda7 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -6,108 +6,134 @@ import os import os.path as op -from ...utils.filemanip import (load_json, save_json, split_filename, - fname_presuffix) -from ..base import (CommandLineInputSpec, CommandLine, TraitedSpec, traits, - isdefined, File, InputMultiPath, Undefined, Str, - InputMultiObject) - -from .base import (AFNICommandBase, AFNICommand, AFNICommandInputSpec, - AFNICommandOutputSpec, AFNIPythonCommandInputSpec, - AFNIPythonCommand, Info, no_afni) - -from ...import logging -iflogger = logging.getLogger('nipype.interface') +from ...utils.filemanip import load_json, save_json, split_filename, fname_presuffix +from ..base import ( + CommandLineInputSpec, + CommandLine, + TraitedSpec, + traits, + isdefined, + File, + InputMultiPath, + Undefined, + Str, + InputMultiObject, +) + +from .base import ( + AFNICommandBase, + AFNICommand, + AFNICommandInputSpec, + AFNICommandOutputSpec, + AFNIPythonCommandInputSpec, + AFNIPythonCommand, + Info, + no_afni, +) + +from ... import logging + +iflogger = logging.getLogger("nipype.interface") class CentralityInputSpec(AFNICommandInputSpec): """Common input spec class for all centrality-related commands """ - mask = File( - desc='mask file to mask input data', argstr='-mask %s', exists=True) + mask = File(desc="mask file to mask input data", argstr="-mask %s", exists=True) thresh = traits.Float( - desc='threshold to exclude connections where corr <= thresh', - argstr='-thresh %f') - polort = traits.Int(desc='', argstr='-polort %d') + desc="threshold to exclude connections where corr <= thresh", + argstr="-thresh %f", + ) + polort = traits.Int(desc="", argstr="-polort %d") autoclip = traits.Bool( - desc='Clip off low-intensity regions in the dataset', - argstr='-autoclip') + desc="Clip off low-intensity regions in the dataset", argstr="-autoclip" + ) automask = traits.Bool( - desc='Mask the dataset to target brain-only voxels', - argstr='-automask') + desc="Mask the dataset to target brain-only voxels", argstr="-automask" + ) class AlignEpiAnatPyInputSpec(AFNIPythonCommandInputSpec): in_file = File( - desc='EPI dataset to align', - argstr='-epi %s', + desc="EPI dataset to align", + argstr="-epi %s", mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) anat = File( - desc='name of structural dataset', - argstr='-anat %s', + desc="name of structural dataset", + argstr="-anat %s", mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) epi_base = traits.Either( traits.Range(low=0), - traits.Enum('mean', 'median', 'max'), - desc='the epi base used in alignment' - 'should be one of (0/mean/median/max/subbrick#)', + traits.Enum("mean", "median", "max"), + desc="the epi base used in alignment" + "should be one of (0/mean/median/max/subbrick#)", mandatory=True, - argstr='-epi_base %s') + argstr="-epi_base %s", + ) anat2epi = traits.Bool( - desc='align anatomical to EPI dataset (default)', argstr='-anat2epi') - epi2anat = traits.Bool( - desc='align EPI to anatomical dataset', argstr='-epi2anat') + desc="align anatomical to EPI dataset (default)", argstr="-anat2epi" + ) + epi2anat = traits.Bool(desc="align EPI to anatomical dataset", argstr="-epi2anat") save_skullstrip = traits.Bool( - desc='save skull-stripped (not aligned)', argstr='-save_skullstrip') + desc="save skull-stripped (not aligned)", argstr="-save_skullstrip" + ) suffix = traits.Str( - '_al', - desc='append suffix to the original anat/epi dataset to use' + "_al", + desc="append suffix to the original anat/epi dataset to use" 'in the resulting dataset names (default is "_al")', usedefault=True, - argstr='-suffix %s') + argstr="-suffix %s", + ) epi_strip = traits.Enum( - ('3dSkullStrip', '3dAutomask', 'None'), - desc='method to mask brain in EPI data' - 'should be one of[3dSkullStrip]/3dAutomask/None)', - argstr='-epi_strip %s') + ("3dSkullStrip", "3dAutomask", "None"), + desc="method to mask brain in EPI data" + "should be one of[3dSkullStrip]/3dAutomask/None)", + argstr="-epi_strip %s", + ) volreg = traits.Enum( - 'on', - 'off', + "on", + "off", usedefault=True, - desc='do volume registration on EPI dataset before alignment' - 'should be \'on\' or \'off\', defaults to \'on\'', - argstr='-volreg %s') + desc="do volume registration on EPI dataset before alignment" + "should be 'on' or 'off', defaults to 'on'", + argstr="-volreg %s", + ) tshift = traits.Enum( - 'on', - 'off', + "on", + "off", usedefault=True, - desc='do time shifting of EPI dataset before alignment' - 'should be \'on\' or \'off\', defaults to \'on\'', - argstr='-tshift %s') + desc="do time shifting of EPI dataset before alignment" + "should be 'on' or 'off', defaults to 'on'", + argstr="-tshift %s", + ) class AlignEpiAnatPyOutputSpec(TraitedSpec): - anat_al_orig = File( - desc="A version of the anatomy that is aligned to the EPI") - epi_al_orig = File( - desc="A version of the EPI dataset aligned to the anatomy") + anat_al_orig = File(desc="A version of the anatomy that is aligned to the EPI") + epi_al_orig = File(desc="A version of the EPI dataset aligned to the anatomy") epi_tlrc_al = File( - desc="A version of the EPI dataset aligned to a standard template") + desc="A version of the EPI dataset aligned to a standard template" + ) anat_al_mat = File(desc="matrix to align anatomy to the EPI") epi_al_mat = File(desc="matrix to align EPI to anatomy") epi_vr_al_mat = File(desc="matrix to volume register EPI") - epi_reg_al_mat = File( - desc="matrix to volume register and align epi to anatomy") - epi_al_tlrc_mat = File(desc="matrix to volume register and align epi" - "to anatomy and put into standard space") - epi_vr_motion = File(desc="motion parameters from EPI time-series" - "registration (tsh included in name if slice" - "timing correction is also included).") + epi_reg_al_mat = File(desc="matrix to volume register and align epi to anatomy") + epi_al_tlrc_mat = File( + desc="matrix to volume register and align epi" + "to anatomy and put into standard space" + ) + epi_vr_motion = File( + desc="motion parameters from EPI time-series" + "registration (tsh included in name if slice" + "timing correction is also included)." + ) skullstrip = File(desc="skull-stripped (not aligned) volume") @@ -158,7 +184,8 @@ class AlignEpiAnatPy(AFNIPythonCommand): 'python2 ...align_epi_anat.py -anat structural.nii -epi_base 0 -epi_strip 3dAutomask -epi functional.nii -save_skullstrip -suffix _al -tshift off -volreg off' >>> res = allineate.run() # doctest: +SKIP """ - _cmd = 'align_epi_anat.py' + + _cmd = "align_epi_anat.py" input_spec = AlignEpiAnatPyInputSpec output_spec = AlignEpiAnatPyOutputSpec @@ -166,288 +193,350 @@ def _list_outputs(self): outputs = self.output_spec().get() anat_prefix = self._gen_fname(self.inputs.anat) epi_prefix = self._gen_fname(self.inputs.in_file) - if '+' in anat_prefix: - anat_prefix = ''.join(anat_prefix.split('+')[:-1]) - if '+' in epi_prefix: - epi_prefix = ''.join(epi_prefix.split('+')[:-1]) + if "+" in anat_prefix: + anat_prefix = "".join(anat_prefix.split("+")[:-1]) + if "+" in epi_prefix: + epi_prefix = "".join(epi_prefix.split("+")[:-1]) outputtype = self.inputs.outputtype - if outputtype == 'AFNI': - ext = '.HEAD' + if outputtype == "AFNI": + ext = ".HEAD" else: ext = Info.output_type_to_ext(outputtype) - matext = '.1D' + matext = ".1D" suffix = self.inputs.suffix if self.inputs.anat2epi: - outputs['anat_al_orig'] = self._gen_fname( - anat_prefix, suffix=suffix + '+orig', ext=ext) - outputs['anat_al_mat'] = self._gen_fname( - anat_prefix, suffix=suffix + '_mat.aff12', ext=matext) + outputs["anat_al_orig"] = self._gen_fname( + anat_prefix, suffix=suffix + "+orig", ext=ext + ) + outputs["anat_al_mat"] = self._gen_fname( + anat_prefix, suffix=suffix + "_mat.aff12", ext=matext + ) if self.inputs.epi2anat: - outputs['epi_al_orig'] = self._gen_fname( - epi_prefix, suffix=suffix + '+orig', ext=ext) - outputs['epi_al_mat'] = self._gen_fname( - epi_prefix, suffix=suffix + '_mat.aff12', ext=matext) - if self.inputs.volreg == 'on': - outputs['epi_vr_al_mat'] = self._gen_fname( - epi_prefix, suffix='_vr' + suffix + '_mat.aff12', ext=matext) - if self.inputs.tshift == 'on': - outputs['epi_vr_motion'] = self._gen_fname( - epi_prefix, suffix='tsh_vr_motion', ext=matext) - elif self.inputs.tshift == 'off': - outputs['epi_vr_motion'] = self._gen_fname( - epi_prefix, suffix='vr_motion', ext=matext) - if self.inputs.volreg == 'on' and self.inputs.epi2anat: - outputs['epi_reg_al_mat'] = self._gen_fname( - epi_prefix, suffix='_reg' + suffix + '_mat.aff12', ext=matext) + outputs["epi_al_orig"] = self._gen_fname( + epi_prefix, suffix=suffix + "+orig", ext=ext + ) + outputs["epi_al_mat"] = self._gen_fname( + epi_prefix, suffix=suffix + "_mat.aff12", ext=matext + ) + if self.inputs.volreg == "on": + outputs["epi_vr_al_mat"] = self._gen_fname( + epi_prefix, suffix="_vr" + suffix + "_mat.aff12", ext=matext + ) + if self.inputs.tshift == "on": + outputs["epi_vr_motion"] = self._gen_fname( + epi_prefix, suffix="tsh_vr_motion", ext=matext + ) + elif self.inputs.tshift == "off": + outputs["epi_vr_motion"] = self._gen_fname( + epi_prefix, suffix="vr_motion", ext=matext + ) + if self.inputs.volreg == "on" and self.inputs.epi2anat: + outputs["epi_reg_al_mat"] = self._gen_fname( + epi_prefix, suffix="_reg" + suffix + "_mat.aff12", ext=matext + ) if self.inputs.save_skullstrip: outputs.skullstrip = self._gen_fname( - anat_prefix, suffix='_ns' + '+orig', ext=ext) + anat_prefix, suffix="_ns" + "+orig", ext=ext + ) return outputs class AllineateInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dAllineate', - argstr='-source %s', + desc="input file to 3dAllineate", + argstr="-source %s", mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) reference = File( exists=True, - argstr='-base %s', - desc='file to be used as reference, the first volume will be used if ' - 'not given the reference will be the first volume of in_file.') + argstr="-base %s", + desc="file to be used as reference, the first volume will be used if " + "not given the reference will be the first volume of in_file.", + ) out_file = File( - desc='output file from 3dAllineate', - argstr='-prefix %s', - name_template='%s_allineate', - name_source='in_file', + desc="output file from 3dAllineate", + argstr="-prefix %s", + name_template="%s_allineate", + name_source="in_file", hash_files=False, - xor=['allcostx']) + xor=["allcostx"], + ) out_param_file = File( - argstr='-1Dparam_save %s', - desc='Save the warp parameters in ASCII (.1D) format.', - xor=['in_param_file', 'allcostx']) + argstr="-1Dparam_save %s", + desc="Save the warp parameters in ASCII (.1D) format.", + xor=["in_param_file", "allcostx"], + ) in_param_file = File( exists=True, - argstr='-1Dparam_apply %s', - desc='Read warp parameters from file and apply them to ' - 'the source dataset, and produce a new dataset', - xor=['out_param_file']) + argstr="-1Dparam_apply %s", + desc="Read warp parameters from file and apply them to " + "the source dataset, and produce a new dataset", + xor=["out_param_file"], + ) out_matrix = File( - argstr='-1Dmatrix_save %s', - desc='Save the transformation matrix for each volume.', - xor=['in_matrix', 'allcostx']) + argstr="-1Dmatrix_save %s", + desc="Save the transformation matrix for each volume.", + xor=["in_matrix", "allcostx"], + ) in_matrix = File( - desc='matrix to align input file', - argstr='-1Dmatrix_apply %s', + desc="matrix to align input file", + argstr="-1Dmatrix_apply %s", position=-3, - xor=['out_matrix']) + xor=["out_matrix"], + ) overwrite = traits.Bool( - desc='overwrite output file if it already exists', argstr='-overwrite') + desc="overwrite output file if it already exists", argstr="-overwrite" + ) allcostx = File( - desc= - 'Compute and print ALL available cost functionals for the un-warped inputs' - 'AND THEN QUIT. If you use this option none of the other expected outputs will be produced', - argstr='-allcostx |& tee %s', + desc="Compute and print ALL available cost functionals for the un-warped inputs" + "AND THEN QUIT. If you use this option none of the other expected outputs will be produced", + argstr="-allcostx |& tee %s", position=-1, - xor=['out_file', 'out_matrix', 'out_param_file', 'out_weight_file']) + xor=["out_file", "out_matrix", "out_param_file", "out_weight_file"], + ) _cost_funcs = [ - 'leastsq', 'ls', 'mutualinfo', 'mi', 'corratio_mul', 'crM', - 'norm_mutualinfo', 'nmi', 'hellinger', 'hel', 'corratio_add', 'crA', - 'corratio_uns', 'crU' + "leastsq", + "ls", + "mutualinfo", + "mi", + "corratio_mul", + "crM", + "norm_mutualinfo", + "nmi", + "hellinger", + "hel", + "corratio_add", + "crA", + "corratio_uns", + "crU", ] cost = traits.Enum( *_cost_funcs, - argstr='-cost %s', - desc='Defines the \'cost\' function that defines the matching between ' - 'the source and the base') - _interp_funcs = [ - 'nearestneighbour', 'linear', 'cubic', 'quintic', 'wsinc5' - ] + argstr="-cost %s", + desc="Defines the 'cost' function that defines the matching between " + "the source and the base" + ) + _interp_funcs = ["nearestneighbour", "linear", "cubic", "quintic", "wsinc5"] interpolation = traits.Enum( *_interp_funcs[:-1], - argstr='-interp %s', - desc='Defines interpolation method to use during matching') + argstr="-interp %s", + desc="Defines interpolation method to use during matching" + ) final_interpolation = traits.Enum( *_interp_funcs, - argstr='-final %s', - desc='Defines interpolation method used to create the output dataset') + argstr="-final %s", + desc="Defines interpolation method used to create the output dataset" + ) # TECHNICAL OPTIONS (used for fine control of the program): nmatch = traits.Int( - argstr='-nmatch %d', - desc='Use at most n scattered points to match the datasets.') + argstr="-nmatch %d", + desc="Use at most n scattered points to match the datasets.", + ) no_pad = traits.Bool( - argstr='-nopad', desc='Do not use zero-padding on the base image.') + argstr="-nopad", desc="Do not use zero-padding on the base image." + ) zclip = traits.Bool( - argstr='-zclip', - desc='Replace negative values in the input datasets (source & base) ' - 'with zero.') + argstr="-zclip", + desc="Replace negative values in the input datasets (source & base) " + "with zero.", + ) convergence = traits.Float( - argstr='-conv %f', - desc='Convergence test in millimeters (default 0.05mm).') - usetemp = traits.Bool(argstr='-usetemp', desc='temporary file use') + argstr="-conv %f", desc="Convergence test in millimeters (default 0.05mm)." + ) + usetemp = traits.Bool(argstr="-usetemp", desc="temporary file use") check = traits.List( traits.Enum(*_cost_funcs), - argstr='-check %s', - desc='After cost functional optimization is done, start at the final ' - 'parameters and RE-optimize using this new cost functions. If ' - 'the results are too different, a warning message will be ' - 'printed. However, the final parameters from the original ' - 'optimization will be used to create the output dataset.') + argstr="-check %s", + desc="After cost functional optimization is done, start at the final " + "parameters and RE-optimize using this new cost functions. If " + "the results are too different, a warning message will be " + "printed. However, the final parameters from the original " + "optimization will be used to create the output dataset.", + ) # ** PARAMETERS THAT AFFECT THE COST OPTIMIZATION STRATEGY ** one_pass = traits.Bool( - argstr='-onepass', - desc='Use only the refining pass -- do not try a coarse resolution ' - 'pass first. Useful if you know that only small amounts of ' - 'image alignment are needed.') + argstr="-onepass", + desc="Use only the refining pass -- do not try a coarse resolution " + "pass first. Useful if you know that only small amounts of " + "image alignment are needed.", + ) two_pass = traits.Bool( - argstr='-twopass', - desc='Use a two pass alignment strategy for all volumes, searching ' - 'for a large rotation+shift and then refining the alignment.') + argstr="-twopass", + desc="Use a two pass alignment strategy for all volumes, searching " + "for a large rotation+shift and then refining the alignment.", + ) two_blur = traits.Float( - argstr='-twoblur %f', - desc='Set the blurring radius for the first pass in mm.') + argstr="-twoblur %f", desc="Set the blurring radius for the first pass in mm." + ) two_first = traits.Bool( - argstr='-twofirst', - desc='Use -twopass on the first image to be registered, and ' - 'then on all subsequent images from the source dataset, ' - 'use results from the first image\'s coarse pass to start ' - 'the fine pass.') + argstr="-twofirst", + desc="Use -twopass on the first image to be registered, and " + "then on all subsequent images from the source dataset, " + "use results from the first image's coarse pass to start " + "the fine pass.", + ) two_best = traits.Int( - argstr='-twobest %d', - desc='In the coarse pass, use the best \'bb\' set of initial' - 'points to search for the starting point for the fine' - 'pass. If bb==0, then no search is made for the best' - 'starting point, and the identity transformation is' - 'used as the starting point. [Default=5; min=0 max=11]') + argstr="-twobest %d", + desc="In the coarse pass, use the best 'bb' set of initial" + "points to search for the starting point for the fine" + "pass. If bb==0, then no search is made for the best" + "starting point, and the identity transformation is" + "used as the starting point. [Default=5; min=0 max=11]", + ) fine_blur = traits.Float( - argstr='-fineblur %f', - desc='Set the blurring radius to use in the fine resolution ' - 'pass to \'x\' mm. A small amount (1-2 mm?) of blurring at ' - 'the fine step may help with convergence, if there is ' - 'some problem, especially if the base volume is very noisy. ' - '[Default == 0 mm = no blurring at the final alignment pass]') + argstr="-fineblur %f", + desc="Set the blurring radius to use in the fine resolution " + "pass to 'x' mm. A small amount (1-2 mm?) of blurring at " + "the fine step may help with convergence, if there is " + "some problem, especially if the base volume is very noisy. " + "[Default == 0 mm = no blurring at the final alignment pass]", + ) center_of_mass = Str( - argstr='-cmass%s', - desc='Use the center-of-mass calculation to bracket the shifts.') + argstr="-cmass%s", + desc="Use the center-of-mass calculation to bracket the shifts.", + ) autoweight = Str( - argstr='-autoweight%s', - desc='Compute a weight function using the 3dAutomask ' - 'algorithm plus some blurring of the base image.') + argstr="-autoweight%s", + desc="Compute a weight function using the 3dAutomask " + "algorithm plus some blurring of the base image.", + ) automask = traits.Int( - argstr='-automask+%d', - desc='Compute a mask function, set a value for dilation or 0.') + argstr="-automask+%d", + desc="Compute a mask function, set a value for dilation or 0.", + ) autobox = traits.Bool( - argstr='-autobox', - desc='Expand the -automask function to enclose a rectangular ' - 'box that holds the irregular mask.') + argstr="-autobox", + desc="Expand the -automask function to enclose a rectangular " + "box that holds the irregular mask.", + ) nomask = traits.Bool( - argstr='-nomask', - desc='Don\'t compute the autoweight/mask; if -weight is not ' - 'also used, then every voxel will be counted equally.') + argstr="-nomask", + desc="Don't compute the autoweight/mask; if -weight is not " + "also used, then every voxel will be counted equally.", + ) weight_file = File( - argstr='-weight %s', + argstr="-weight %s", exists=True, - deprecated='1.0.0', - new_name='weight', - desc='Set the weighting for each voxel in the base dataset; ' - 'larger weights mean that voxel count more in the cost function. ' - 'Must be defined on the same grid as the base dataset') + deprecated="1.0.0", + new_name="weight", + desc="Set the weighting for each voxel in the base dataset; " + "larger weights mean that voxel count more in the cost function. " + "Must be defined on the same grid as the base dataset", + ) weight = traits.Either( File(exists=True), traits.Float(), - argstr='-weight %s', - desc='Set the weighting for each voxel in the base dataset; ' - 'larger weights mean that voxel count more in the cost function. ' - 'If an image file is given, the volume must be defined on the ' - 'same grid as the base dataset') + argstr="-weight %s", + desc="Set the weighting for each voxel in the base dataset; " + "larger weights mean that voxel count more in the cost function. " + "If an image file is given, the volume must be defined on the " + "same grid as the base dataset", + ) out_weight_file = File( - argstr='-wtprefix %s', - desc='Write the weight volume to disk as a dataset', - xor=['allcostx']) + argstr="-wtprefix %s", + desc="Write the weight volume to disk as a dataset", + xor=["allcostx"], + ) source_mask = File( - exists=True, argstr='-source_mask %s', desc='mask the input dataset') + exists=True, argstr="-source_mask %s", desc="mask the input dataset" + ) source_automask = traits.Int( - argstr='-source_automask+%d', - desc='Automatically mask the source dataset with dilation or 0.') + argstr="-source_automask+%d", + desc="Automatically mask the source dataset with dilation or 0.", + ) warp_type = traits.Enum( - 'shift_only', - 'shift_rotate', - 'shift_rotate_scale', - 'affine_general', - argstr='-warp %s', - desc='Set the warp type.') + "shift_only", + "shift_rotate", + "shift_rotate_scale", + "affine_general", + argstr="-warp %s", + desc="Set the warp type.", + ) warpfreeze = traits.Bool( - argstr='-warpfreeze', - desc='Freeze the non-rigid body parameters after first volume.') + argstr="-warpfreeze", + desc="Freeze the non-rigid body parameters after first volume.", + ) replacebase = traits.Bool( - argstr='-replacebase', - desc='If the source has more than one volume, then after the first ' - 'volume is aligned to the base.') + argstr="-replacebase", + desc="If the source has more than one volume, then after the first " + "volume is aligned to the base.", + ) replacemeth = traits.Enum( *_cost_funcs, - argstr='-replacemeth %s', - desc='After first volume is aligned, switch method for later volumes. ' - 'For use with \'-replacebase\'.') + argstr="-replacemeth %s", + desc="After first volume is aligned, switch method for later volumes. " + "For use with '-replacebase'." + ) epi = traits.Bool( - argstr='-EPI', - desc='Treat the source dataset as being composed of warped ' - 'EPI slices, and the base as comprising anatomically ' - '\'true\' images. Only phase-encoding direction image ' - 'shearing and scaling will be allowed with this option.') + argstr="-EPI", + desc="Treat the source dataset as being composed of warped " + "EPI slices, and the base as comprising anatomically " + "'true' images. Only phase-encoding direction image " + "shearing and scaling will be allowed with this option.", + ) maxrot = traits.Float( - argstr='-maxrot %f', desc='Maximum allowed rotation in degrees.') - maxshf = traits.Float( - argstr='-maxshf %f', desc='Maximum allowed shift in mm.') - maxscl = traits.Float( - argstr='-maxscl %f', desc='Maximum allowed scaling factor.') - maxshr = traits.Float( - argstr='-maxshr %f', desc='Maximum allowed shearing factor.') + argstr="-maxrot %f", desc="Maximum allowed rotation in degrees." + ) + maxshf = traits.Float(argstr="-maxshf %f", desc="Maximum allowed shift in mm.") + maxscl = traits.Float(argstr="-maxscl %f", desc="Maximum allowed scaling factor.") + maxshr = traits.Float(argstr="-maxshr %f", desc="Maximum allowed shearing factor.") master = File( exists=True, - argstr='-master %s', - desc='Write the output dataset on the same grid as this file.') + argstr="-master %s", + desc="Write the output dataset on the same grid as this file.", + ) newgrid = traits.Float( - argstr='-newgrid %f', - desc='Write the output dataset using isotropic grid spacing in mm.') + argstr="-newgrid %f", + desc="Write the output dataset using isotropic grid spacing in mm.", + ) # Non-linear experimental _nwarp_types = [ - 'bilinear', 'cubic', 'quintic', 'heptic', 'nonic', 'poly3', 'poly5', - 'poly7', 'poly9' + "bilinear", + "cubic", + "quintic", + "heptic", + "nonic", + "poly3", + "poly5", + "poly7", + "poly9", ] # same non-hellenistic nwarp = traits.Enum( *_nwarp_types, - argstr='-nwarp %s', - desc='Experimental nonlinear warping: bilinear or legendre poly.') - _dirs = ['X', 'Y', 'Z', 'I', 'J', 'K'] + argstr="-nwarp %s", + desc="Experimental nonlinear warping: bilinear or legendre poly." + ) + _dirs = ["X", "Y", "Z", "I", "J", "K"] nwarp_fixmot = traits.List( traits.Enum(*_dirs), - argstr='-nwarp_fixmot%s...', - desc='To fix motion along directions.') + argstr="-nwarp_fixmot%s...", + desc="To fix motion along directions.", + ) nwarp_fixdep = traits.List( traits.Enum(*_dirs), - argstr='-nwarp_fixdep%s...', - desc='To fix non-linear warp dependency along directions.') - verbose = traits.Bool( - argstr='-verb', desc='Print out verbose progress reports.') + argstr="-nwarp_fixdep%s...", + desc="To fix non-linear warp dependency along directions.", + ) + verbose = traits.Bool(argstr="-verb", desc="Print out verbose progress reports.") quiet = traits.Bool( - argstr='-quiet', desc="Don't print out verbose progress reports.") + argstr="-quiet", desc="Don't print out verbose progress reports." + ) class AllineateOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='output image file name') - out_matrix = File(exists=True, desc='matrix to align input file') - out_param_file = File(exists=True, desc='warp parameters') - out_weight_file = File(exists=True, desc='weight volume') + out_file = File(exists=True, desc="output image file name") + out_matrix = File(exists=True, desc="matrix to align input file") + out_param_file = File(exists=True, desc="warp parameters") + out_weight_file = File(exists=True, desc="weight volume") allcostx = File( - desc= - 'Compute and print ALL available cost functionals for the un-warped inputs' + desc="Compute and print ALL available cost functionals for the un-warped inputs" ) @@ -486,7 +575,7 @@ class Allineate(AFNICommand): >>> res = allineate.run() # doctest: +SKIP """ - _cmd = '3dAllineate' + _cmd = "3dAllineate" input_spec = AllineateInputSpec output_spec = AllineateOutputSpec @@ -494,58 +583,63 @@ def _list_outputs(self): outputs = super(Allineate, self)._list_outputs() if self.inputs.out_weight_file: - outputs['out_weight_file'] = op.abspath( - self.inputs.out_weight_file) + outputs["out_weight_file"] = op.abspath(self.inputs.out_weight_file) if self.inputs.out_matrix: path, base, ext = split_filename(self.inputs.out_matrix) - if ext.lower() not in ['.1d', '.1D']: - outputs['out_matrix'] = self._gen_fname( - self.inputs.out_matrix, suffix='.aff12.1D') + if ext.lower() not in [".1d", ".1D"]: + outputs["out_matrix"] = self._gen_fname( + self.inputs.out_matrix, suffix=".aff12.1D" + ) else: - outputs['out_matrix'] = op.abspath(self.inputs.out_matrix) + outputs["out_matrix"] = op.abspath(self.inputs.out_matrix) if self.inputs.out_param_file: path, base, ext = split_filename(self.inputs.out_param_file) - if ext.lower() not in ['.1d', '.1D']: - outputs['out_param_file'] = self._gen_fname( - self.inputs.out_param_file, suffix='.param.1D') + if ext.lower() not in [".1d", ".1D"]: + outputs["out_param_file"] = self._gen_fname( + self.inputs.out_param_file, suffix=".param.1D" + ) else: - outputs['out_param_file'] = op.abspath( - self.inputs.out_param_file) + outputs["out_param_file"] = op.abspath(self.inputs.out_param_file) if self.inputs.allcostx: - outputs['allcostX'] = os.path.abspath(self.inputs.allcostx) + outputs["allcostX"] = os.path.abspath(self.inputs.allcostx) return outputs class AutoTcorrelateInputSpec(AFNICommandInputSpec): in_file = File( - desc='timeseries x space (volume or surface) file', - argstr='%s', + desc="timeseries x space (volume or surface) file", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) polort = traits.Int( - desc='Remove polynomical trend of order m or -1 for no detrending', - argstr='-polort %d') - eta2 = traits.Bool(desc='eta^2 similarity', argstr='-eta2') - mask = File(exists=True, desc='mask of voxels', argstr='-mask %s') + desc="Remove polynomical trend of order m or -1 for no detrending", + argstr="-polort %d", + ) + eta2 = traits.Bool(desc="eta^2 similarity", argstr="-eta2") + mask = File(exists=True, desc="mask of voxels", argstr="-mask %s") mask_only_targets = traits.Bool( - desc='use mask only on targets voxels', - argstr='-mask_only_targets', - xor=['mask_source']) + desc="use mask only on targets voxels", + argstr="-mask_only_targets", + xor=["mask_source"], + ) mask_source = File( exists=True, - desc='mask for source voxels', - argstr='-mask_source %s', - xor=['mask_only_targets']) + desc="mask for source voxels", + argstr="-mask_source %s", + xor=["mask_only_targets"], + ) out_file = File( - name_template='%s_similarity_matrix.1D', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') + name_template="%s_similarity_matrix.1D", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) class AutoTcorrelate(AFNICommand): @@ -573,44 +667,48 @@ class AutoTcorrelate(AFNICommand): input_spec = AutoTcorrelateInputSpec output_spec = AFNICommandOutputSpec - _cmd = '3dAutoTcorrelate' + _cmd = "3dAutoTcorrelate" def _overload_extension(self, value, name=None): path, base, ext = split_filename(value) - if ext.lower() not in ['.1d', '.1D', '.nii.gz', '.nii']: - ext = ext + '.1D' + if ext.lower() not in [".1d", ".1D", ".nii.gz", ".nii"]: + ext = ext + ".1D" return os.path.join(path, base + ext) class AutomaskInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dAutomask', - argstr='%s', + desc="input file to 3dAutomask", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_mask', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') + name_template="%s_mask", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) brain_file = File( - name_template='%s_masked', - desc='output file from 3dAutomask', - argstr='-apply_prefix %s', - name_source='in_file') + name_template="%s_masked", + desc="output file from 3dAutomask", + argstr="-apply_prefix %s", + name_source="in_file", + ) clfrac = traits.Float( - desc='sets the clip level fraction (must be 0.1-0.9). A small value ' - 'will tend to make the mask larger [default = 0.5].', - argstr='-clfrac %s') - dilate = traits.Int(desc='dilate the mask outwards', argstr='-dilate %s') - erode = traits.Int(desc='erode the mask inwards', argstr='-erode %s') + desc="sets the clip level fraction (must be 0.1-0.9). A small value " + "will tend to make the mask larger [default = 0.5].", + argstr="-clfrac %s", + ) + dilate = traits.Int(desc="dilate the mask outwards", argstr="-dilate %s") + erode = traits.Int(desc="erode the mask inwards", argstr="-erode %s") class AutomaskOutputSpec(TraitedSpec): - out_file = File(desc='mask file', exists=True) - brain_file = File(desc='brain file (skull stripped)', exists=True) + out_file = File(desc="mask file", exists=True) + brain_file = File(desc="brain file (skull stripped)", exists=True) class Automask(AFNICommand): @@ -633,81 +731,85 @@ class Automask(AFNICommand): """ - _cmd = '3dAutomask' + _cmd = "3dAutomask" input_spec = AutomaskInputSpec output_spec = AutomaskOutputSpec class AutoTLRCInputSpec(CommandLineInputSpec): outputtype = traits.Enum( - 'AFNI', list(Info.ftypes.keys()), desc='AFNI output filetype') + "AFNI", list(Info.ftypes.keys()), desc="AFNI output filetype" + ) in_file = File( - desc='Original anatomical volume (+orig).' - 'The skull is removed by this script' - 'unless instructed otherwise (-no_ss).', - argstr='-input %s', + desc="Original anatomical volume (+orig)." + "The skull is removed by this script" + "unless instructed otherwise (-no_ss).", + argstr="-input %s", mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) base = traits.Str( - desc=' Reference anatomical volume' - ' Usually this volume is in some standard space like' - ' TLRC or MNI space and with afni dataset view of' - ' (+tlrc).' - ' Preferably, this reference volume should have had' - ' the skull removed but that is not mandatory.' - ' AFNI\'s distribution contains several templates.' + desc=" Reference anatomical volume" + " Usually this volume is in some standard space like" + " TLRC or MNI space and with afni dataset view of" + " (+tlrc)." + " Preferably, this reference volume should have had" + " the skull removed but that is not mandatory." + " AFNI's distribution contains several templates." ' For a longer list, use "whereami -show_templates"' - 'TT_N27+tlrc --> Single subject, skull stripped volume.' - ' This volume is also known as ' - ' N27_SurfVol_NoSkull+tlrc elsewhere in ' - ' AFNI and SUMA land.' - ' (www.loni.ucla.edu, www.bic.mni.mcgill.ca)' - ' This template has a full set of FreeSurfer' - ' (surfer.nmr.mgh.harvard.edu)' - ' surface models that can be used in SUMA. ' - ' For details, see Talairach-related link:' - ' https://afni.nimh.nih.gov/afni/suma' - 'TT_icbm452+tlrc --> Average volume of 452 normal brains.' - ' Skull Stripped. (www.loni.ucla.edu)' - 'TT_avg152T1+tlrc --> Average volume of 152 normal brains.' - ' Skull Stripped.(www.bic.mni.mcgill.ca)' - 'TT_EPI+tlrc --> EPI template from spm2, masked as TT_avg152T1' - ' TT_avg152 and TT_EPI volume sources are from' - ' SPM\'s distribution. (www.fil.ion.ucl.ac.uk/spm/)' - 'If you do not specify a path for the template, the script' - 'will attempt to locate the template AFNI\'s binaries directory.' - 'NOTE: These datasets have been slightly modified from' - ' their original size to match the standard TLRC' - ' dimensions (Jean Talairach and Pierre Tournoux' - ' Co-Planar Stereotaxic Atlas of the Human Brain' - ' Thieme Medical Publishers, New York, 1988). ' - ' That was done for internal consistency in AFNI.' - ' You may use the original form of these' - ' volumes if you choose but your TLRC coordinates' - ' will not be consistent with AFNI\'s TLRC database' - ' (San Antonio Talairach Daemon database), for example.', + "TT_N27+tlrc --> Single subject, skull stripped volume." + " This volume is also known as " + " N27_SurfVol_NoSkull+tlrc elsewhere in " + " AFNI and SUMA land." + " (www.loni.ucla.edu, www.bic.mni.mcgill.ca)" + " This template has a full set of FreeSurfer" + " (surfer.nmr.mgh.harvard.edu)" + " surface models that can be used in SUMA. " + " For details, see Talairach-related link:" + " https://afni.nimh.nih.gov/afni/suma" + "TT_icbm452+tlrc --> Average volume of 452 normal brains." + " Skull Stripped. (www.loni.ucla.edu)" + "TT_avg152T1+tlrc --> Average volume of 152 normal brains." + " Skull Stripped.(www.bic.mni.mcgill.ca)" + "TT_EPI+tlrc --> EPI template from spm2, masked as TT_avg152T1" + " TT_avg152 and TT_EPI volume sources are from" + " SPM's distribution. (www.fil.ion.ucl.ac.uk/spm/)" + "If you do not specify a path for the template, the script" + "will attempt to locate the template AFNI's binaries directory." + "NOTE: These datasets have been slightly modified from" + " their original size to match the standard TLRC" + " dimensions (Jean Talairach and Pierre Tournoux" + " Co-Planar Stereotaxic Atlas of the Human Brain" + " Thieme Medical Publishers, New York, 1988). " + " That was done for internal consistency in AFNI." + " You may use the original form of these" + " volumes if you choose but your TLRC coordinates" + " will not be consistent with AFNI's TLRC database" + " (San Antonio Talairach Daemon database), for example.", mandatory=True, - argstr='-base %s') + argstr="-base %s", + ) no_ss = traits.Bool( - desc='Do not strip skull of input data set' - '(because skull has already been removed' - 'or because template still has the skull)' - 'NOTE: The -no_ss option is not all that optional.' - ' Here is a table of when you should and should not use -no_ss' - ' Template Template' - ' WITH skull WITHOUT skull' - ' Dset.' - ' WITH skull -no_ss xxx ' - ' ' - ' WITHOUT skull No Cigar -no_ss' - ' ' - ' Template means: Your template of choice' - ' Dset. means: Your anatomical dataset' - ' -no_ss means: Skull stripping should not be attempted on Dset' - ' xxx means: Don\'t put anything, the script will strip Dset' - ' No Cigar means: Don\'t try that combination, it makes no sense.', - argstr='-no_ss') + desc="Do not strip skull of input data set" + "(because skull has already been removed" + "or because template still has the skull)" + "NOTE: The -no_ss option is not all that optional." + " Here is a table of when you should and should not use -no_ss" + " Template Template" + " WITH skull WITHOUT skull" + " Dset." + " WITH skull -no_ss xxx " + " " + " WITHOUT skull No Cigar -no_ss" + " " + " Template means: Your template of choice" + " Dset. means: Your anatomical dataset" + " -no_ss means: Skull stripping should not be attempted on Dset" + " xxx means: Don't put anything, the script will strip Dset" + " No Cigar means: Don't try that combination, it makes no sense.", + argstr="-no_ss", + ) class AutoTLRC(AFNICommand): @@ -728,85 +830,97 @@ class AutoTLRC(AFNICommand): >>> res = autoTLRC.run() # doctest: +SKIP """ - _cmd = '@auto_tlrc' + + _cmd = "@auto_tlrc" input_spec = AutoTLRCInputSpec output_spec = AFNICommandOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - ext = '.HEAD' - outputs['out_file'] = os.path.abspath( - self._gen_fname(self.inputs.in_file, suffix='+tlrc') + ext) + ext = ".HEAD" + outputs["out_file"] = os.path.abspath( + self._gen_fname(self.inputs.in_file, suffix="+tlrc") + ext + ) return outputs class BandpassInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dBandpass', - argstr='%s', + desc="input file to 3dBandpass", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_bp', - desc='output file from 3dBandpass', - argstr='-prefix %s', + name_template="%s_bp", + desc="output file from 3dBandpass", + argstr="-prefix %s", position=1, - name_source='in_file') - lowpass = traits.Float( - desc='lowpass', argstr='%f', position=-2, mandatory=True) - highpass = traits.Float( - desc='highpass', argstr='%f', position=-3, mandatory=True) - mask = File(desc='mask file', position=2, argstr='-mask %s', exists=True) + name_source="in_file", + ) + lowpass = traits.Float(desc="lowpass", argstr="%f", position=-2, mandatory=True) + highpass = traits.Float(desc="highpass", argstr="%f", position=-3, mandatory=True) + mask = File(desc="mask file", position=2, argstr="-mask %s", exists=True) despike = traits.Bool( - argstr='-despike', - desc='Despike each time series before other processing. Hopefully, ' - 'you don\'t actually need to do this, which is why it is ' - 'optional.') + argstr="-despike", + desc="Despike each time series before other processing. Hopefully, " + "you don't actually need to do this, which is why it is " + "optional.", + ) orthogonalize_file = InputMultiPath( File(exists=True), - argstr='-ort %s', - desc='Also orthogonalize input to columns in f.1D. Multiple \'-ort\' ' - 'options are allowed.') + argstr="-ort %s", + desc="Also orthogonalize input to columns in f.1D. Multiple '-ort' " + "options are allowed.", + ) orthogonalize_dset = File( exists=True, - argstr='-dsort %s', - desc='Orthogonalize each voxel to the corresponding voxel time series ' - 'in dataset \'fset\', which must have the same spatial and ' - 'temporal grid structure as the main input dataset. At present, ' - 'only one \'-dsort\' option is allowed.') + argstr="-dsort %s", + desc="Orthogonalize each voxel to the corresponding voxel time series " + "in dataset 'fset', which must have the same spatial and " + "temporal grid structure as the main input dataset. At present, " + "only one '-dsort' option is allowed.", + ) no_detrend = traits.Bool( - argstr='-nodetrend', - desc='Skip the quadratic detrending of the input that occurs before ' - 'the FFT-based bandpassing. You would only want to do this if ' - 'the dataset had been detrended already in some other program.') + argstr="-nodetrend", + desc="Skip the quadratic detrending of the input that occurs before " + "the FFT-based bandpassing. You would only want to do this if " + "the dataset had been detrended already in some other program.", + ) tr = traits.Float( - argstr='-dt %f', - desc='Set time step (TR) in sec [default=from dataset header].') + argstr="-dt %f", desc="Set time step (TR) in sec [default=from dataset header]." + ) nfft = traits.Int( - argstr='-nfft %d', desc='Set the FFT length [must be a legal value].') + argstr="-nfft %d", desc="Set the FFT length [must be a legal value]." + ) normalize = traits.Bool( - argstr='-norm', - desc='Make all output time series have L2 norm = 1 (i.e., sum of ' - 'squares = 1).') + argstr="-norm", + desc="Make all output time series have L2 norm = 1 (i.e., sum of " + "squares = 1).", + ) automask = traits.Bool( - argstr='-automask', desc='Create a mask from the input dataset.') + argstr="-automask", desc="Create a mask from the input dataset." + ) blur = traits.Float( - argstr='-blur %f', - desc='Blur (inside the mask only) with a filter width (FWHM) of ' - '\'fff\' millimeters.') + argstr="-blur %f", + desc="Blur (inside the mask only) with a filter width (FWHM) of " + "'fff' millimeters.", + ) localPV = traits.Float( - argstr='-localPV %f', - desc='Replace each vector by the local Principal Vector (AKA first ' - 'singular vector) from a neighborhood of radius \'rrr\' ' - 'millimeters. Note that the PV time series is L2 normalized. ' - 'This option is mostly for Bob Cox to have fun with.') + argstr="-localPV %f", + desc="Replace each vector by the local Principal Vector (AKA first " + "singular vector) from a neighborhood of radius 'rrr' " + "millimeters. Note that the PV time series is L2 normalized. " + "This option is mostly for Bob Cox to have fun with.", + ) notrans = traits.Bool( - argstr='-notrans', - desc='Don\'t check for initial positive transients in the data. ' - 'The test is a little slow, so skipping it is OK, if you KNOW ' - 'the data time series are transient-free.') + argstr="-notrans", + desc="Don't check for initial positive transients in the data. " + "The test is a little slow, so skipping it is OK, if you KNOW " + "the data time series are transient-free.", + ) class Bandpass(AFNICommand): @@ -831,46 +945,52 @@ class Bandpass(AFNICommand): """ - _cmd = '3dBandpass' + _cmd = "3dBandpass" input_spec = BandpassInputSpec output_spec = AFNICommandOutputSpec class BlurInMaskInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dSkullStrip', - argstr='-input %s', + desc="input file to 3dSkullStrip", + argstr="-input %s", position=1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_blur', - desc='output to the file', - argstr='-prefix %s', - name_source='in_file', - position=-1) + name_template="%s_blur", + desc="output to the file", + argstr="-prefix %s", + name_source="in_file", + position=-1, + ) mask = File( - desc='Mask dataset, if desired. Blurring will occur only within the ' - 'mask. Voxels NOT in the mask will be set to zero in the output.', - argstr='-mask %s') + desc="Mask dataset, if desired. Blurring will occur only within the " + "mask. Voxels NOT in the mask will be set to zero in the output.", + argstr="-mask %s", + ) multimask = File( - desc='Multi-mask dataset -- each distinct nonzero value in dataset ' - 'will be treated as a separate mask for blurring purposes.', - argstr='-Mmask %s') + desc="Multi-mask dataset -- each distinct nonzero value in dataset " + "will be treated as a separate mask for blurring purposes.", + argstr="-Mmask %s", + ) automask = traits.Bool( - desc='Create an automask from the input dataset.', argstr='-automask') - fwhm = traits.Float( - desc='fwhm kernel size', argstr='-FWHM %f', mandatory=True) + desc="Create an automask from the input dataset.", argstr="-automask" + ) + fwhm = traits.Float(desc="fwhm kernel size", argstr="-FWHM %f", mandatory=True) preserve = traits.Bool( - desc='Normally, voxels not in the mask will be set to zero in the ' - 'output. If you want the original values in the dataset to be ' - 'preserved in the output, use this option.', - argstr='-preserve') + desc="Normally, voxels not in the mask will be set to zero in the " + "output. If you want the original values in the dataset to be " + "preserved in the output, use this option.", + argstr="-preserve", + ) float_out = traits.Bool( - desc='Save dataset as floats, no matter what the input data type is.', - argstr='-float') - options = Str(desc='options', argstr='%s', position=2) + desc="Save dataset as floats, no matter what the input data type is.", + argstr="-float", + ) + options = Str(desc="options", argstr="%s", position=2) class BlurInMask(AFNICommand): @@ -893,34 +1013,39 @@ class BlurInMask(AFNICommand): """ - _cmd = '3dBlurInMask' + _cmd = "3dBlurInMask" input_spec = BlurInMaskInputSpec output_spec = AFNICommandOutputSpec class BlurToFWHMInputSpec(AFNICommandInputSpec): in_file = File( - desc='The dataset that will be smoothed', - argstr='-input %s', + desc="The dataset that will be smoothed", + argstr="-input %s", mandatory=True, - exists=True) + exists=True, + ) automask = traits.Bool( - desc='Create an automask from the input dataset.', argstr='-automask') + desc="Create an automask from the input dataset.", argstr="-automask" + ) fwhm = traits.Float( - desc='Blur until the 3D FWHM reaches this value (in mm)', - argstr='-FWHM %f') + desc="Blur until the 3D FWHM reaches this value (in mm)", argstr="-FWHM %f" + ) fwhmxy = traits.Float( - desc='Blur until the 2D (x,y)-plane FWHM reaches this value (in mm)', - argstr='-FWHMxy %f') + desc="Blur until the 2D (x,y)-plane FWHM reaches this value (in mm)", + argstr="-FWHMxy %f", + ) blurmaster = File( - desc='The dataset whose smoothness controls the process.', - argstr='-blurmaster %s', - exists=True) + desc="The dataset whose smoothness controls the process.", + argstr="-blurmaster %s", + exists=True, + ) mask = File( - desc='Mask dataset, if desired. Voxels NOT in mask will be set to zero ' - 'in output.', - argstr='-mask %s', - exists=True) + desc="Mask dataset, if desired. Voxels NOT in mask will be set to zero " + "in output.", + argstr="-mask %s", + exists=True, + ) class BlurToFWHM(AFNICommand): @@ -942,37 +1067,42 @@ class BlurToFWHM(AFNICommand): >>> res = blur.run() # doctest: +SKIP """ - _cmd = '3dBlurToFWHM' + + _cmd = "3dBlurToFWHM" input_spec = BlurToFWHMInputSpec output_spec = AFNICommandOutputSpec class ClipLevelInputSpec(CommandLineInputSpec): in_file = File( - desc='input file to 3dClipLevel', - argstr='%s', + desc="input file to 3dClipLevel", + argstr="%s", position=-1, mandatory=True, - exists=True) + exists=True, + ) mfrac = traits.Float( - desc='Use the number ff instead of 0.50 in the algorithm', - argstr='-mfrac %s', - position=2) + desc="Use the number ff instead of 0.50 in the algorithm", + argstr="-mfrac %s", + position=2, + ) doall = traits.Bool( - desc='Apply the algorithm to each sub-brick separately.', - argstr='-doall', + desc="Apply the algorithm to each sub-brick separately.", + argstr="-doall", position=3, - xor=('grad')) + xor=("grad"), + ) grad = File( - desc='Also compute a \'gradual\' clip level as a function of voxel ' - 'position, and output that to a dataset.', - argstr='-grad %s', + desc="Also compute a 'gradual' clip level as a function of voxel " + "position, and output that to a dataset.", + argstr="-grad %s", position=3, - xor=('doall')) + xor=("doall"), + ) class ClipLevelOutputSpec(TraitedSpec): - clip_val = traits.Float(desc='output') + clip_val = traits.Float(desc="output") class ClipLevel(AFNICommandBase): @@ -993,7 +1123,8 @@ class ClipLevel(AFNICommandBase): >>> res = cliplevel.run() # doctest: +SKIP """ - _cmd = '3dClipLevel' + + _cmd = "3dClipLevel" input_spec = ClipLevelInputSpec output_spec = ClipLevelOutputSpec @@ -1001,16 +1132,16 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() - outfile = os.path.join(os.getcwd(), 'stat_result.json') + outfile = os.path.join(os.getcwd(), "stat_result.json") if runtime is None: try: - clip_val = load_json(outfile)['stat'] + clip_val = load_json(outfile)["stat"] except IOError: return self.run().outputs else: clip_val = [] - for line in runtime.stdout.split('\n'): + for line in runtime.stdout.split("\n"): if line: values = line.split() if len(values) > 1: @@ -1031,17 +1162,19 @@ class DegreeCentralityInputSpec(CentralityInputSpec): """ in_file = File( - desc='input file to 3dDegreeCentrality', - argstr='%s', + desc="input file to 3dDegreeCentrality", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) sparsity = traits.Float( - desc='only take the top percent of connections', argstr='-sparsity %f') + desc="only take the top percent of connections", argstr="-sparsity %f" + ) oned_file = Str( - desc='output filepath to text dump of correlation matrix', - argstr='-out1D %s') + desc="output filepath to text dump of correlation matrix", argstr="-out1D %s" + ) class DegreeCentralityOutputSpec(AFNICommandOutputSpec): @@ -1049,9 +1182,10 @@ class DegreeCentralityOutputSpec(AFNICommandOutputSpec): """ oned_file = File( - desc='The text output of the similarity matrix computed after ' - 'thresholding with one-dimensional and ijk voxel indices, ' - 'correlations, image extents, and affine matrix.') + desc="The text output of the similarity matrix computed after " + "thresholding with one-dimensional and ijk voxel indices, " + "correlations, image extents, and affine matrix." + ) class DegreeCentrality(AFNICommand): @@ -1076,7 +1210,7 @@ class DegreeCentrality(AFNICommand): """ - _cmd = '3dDegreeCentrality' + _cmd = "3dDegreeCentrality" input_spec = DegreeCentralityInputSpec output_spec = DegreeCentralityOutputSpec @@ -1088,24 +1222,26 @@ def _list_outputs(self): # Update outputs dictionary if oned file is defined outputs = super(DegreeCentrality, self)._list_outputs() if self.inputs.oned_file: - outputs['oned_file'] = os.path.abspath(self.inputs.oned_file) + outputs["oned_file"] = os.path.abspath(self.inputs.oned_file) return outputs class DespikeInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dDespike', - argstr='%s', + desc="input file to 3dDespike", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_despike', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') + name_template="%s_despike", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) class Despike(AFNICommand): @@ -1126,24 +1262,26 @@ class Despike(AFNICommand): """ - _cmd = '3dDespike' + _cmd = "3dDespike" input_spec = DespikeInputSpec output_spec = AFNICommandOutputSpec class DetrendInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dDetrend', - argstr='%s', + desc="input file to 3dDetrend", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_detrend', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') + name_template="%s_detrend", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) class Detrend(AFNICommand): @@ -1167,7 +1305,7 @@ class Detrend(AFNICommand): """ - _cmd = '3dDetrend' + _cmd = "3dDetrend" input_spec = DetrendInputSpec output_spec = AFNICommandOutputSpec @@ -1177,43 +1315,52 @@ class ECMInputSpec(CentralityInputSpec): """ in_file = File( - desc='input file to 3dECM', - argstr='%s', + desc="input file to 3dECM", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) sparsity = traits.Float( - desc='only take the top percent of connections', argstr='-sparsity %f') + desc="only take the top percent of connections", argstr="-sparsity %f" + ) full = traits.Bool( - desc='Full power method; enables thresholding; automatically selected ' - 'if -thresh or -sparsity are set', - argstr='-full') + desc="Full power method; enables thresholding; automatically selected " + "if -thresh or -sparsity are set", + argstr="-full", + ) fecm = traits.Bool( - desc='Fast centrality method; substantial speed increase but cannot ' - 'accomodate thresholding; automatically selected if -thresh or ' - '-sparsity are not set', - argstr='-fecm') + desc="Fast centrality method; substantial speed increase but cannot " + "accomodate thresholding; automatically selected if -thresh or " + "-sparsity are not set", + argstr="-fecm", + ) shift = traits.Float( - desc='shift correlation coefficients in similarity matrix to enforce ' - 'non-negativity, s >= 0.0; default = 0.0 for -full, 1.0 for -fecm', - argstr='-shift %f') + desc="shift correlation coefficients in similarity matrix to enforce " + "non-negativity, s >= 0.0; default = 0.0 for -full, 1.0 for -fecm", + argstr="-shift %f", + ) scale = traits.Float( - desc='scale correlation coefficients in similarity matrix to after ' - 'shifting, x >= 0.0; default = 1.0 for -full, 0.5 for -fecm', - argstr='-scale %f') + desc="scale correlation coefficients in similarity matrix to after " + "shifting, x >= 0.0; default = 1.0 for -full, 0.5 for -fecm", + argstr="-scale %f", + ) eps = traits.Float( - desc='sets the stopping criterion for the power iteration; ' - 'l2|v_old - v_new| < eps*|v_old|; default = 0.001', - argstr='-eps %f') + desc="sets the stopping criterion for the power iteration; " + "l2|v_old - v_new| < eps*|v_old|; default = 0.001", + argstr="-eps %f", + ) max_iter = traits.Int( - desc='sets the maximum number of iterations to use in the power ' - 'iteration; default = 1000', - argstr='-max_iter %d') + desc="sets the maximum number of iterations to use in the power " + "iteration; default = 1000", + argstr="-max_iter %d", + ) memory = traits.Float( - desc='Limit memory consumption on system by setting the amount of GB ' - 'to limit the algorithm to; default = 2GB', - argstr='-memory %f') + desc="Limit memory consumption on system by setting the amount of GB " + "to limit the algorithm to; default = 2GB", + argstr="-memory %f", + ) class ECM(AFNICommand): @@ -1238,38 +1385,39 @@ class ECM(AFNICommand): """ - _cmd = '3dECM' + _cmd = "3dECM" input_spec = ECMInputSpec output_spec = AFNICommandOutputSpec class FimInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dfim+', - argstr='-input %s', + desc="input file to 3dfim+", + argstr="-input %s", position=1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_fim', - desc='output image file name', - argstr='-bucket %s', - name_source='in_file') + name_template="%s_fim", + desc="output image file name", + argstr="-bucket %s", + name_source="in_file", + ) ideal_file = File( - desc='ideal time series file name', - argstr='-ideal_file %s', + desc="ideal time series file name", + argstr="-ideal_file %s", position=2, mandatory=True, - exists=True) + exists=True, + ) fim_thr = traits.Float( - desc='fim internal mask threshold value', - argstr='-fim_thr %f', - position=3) + desc="fim internal mask threshold value", argstr="-fim_thr %f", position=3 + ) out = Str( - desc='Flag to output the specified parameter', - argstr='-out %s', - position=4) + desc="Flag to output the specified parameter", argstr="-out %s", position=4 + ) class Fim(AFNICommand): @@ -1295,32 +1443,33 @@ class Fim(AFNICommand): """ - _cmd = '3dfim+' + _cmd = "3dfim+" input_spec = FimInputSpec output_spec = AFNICommandOutputSpec class FourierInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dFourier', - argstr='%s', + desc="input file to 3dFourier", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_fourier', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') - lowpass = traits.Float( - desc='lowpass', argstr='-lowpass %f', mandatory=True) - highpass = traits.Float( - desc='highpass', argstr='-highpass %f', mandatory=True) + name_template="%s_fourier", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) + lowpass = traits.Float(desc="lowpass", argstr="-lowpass %f", mandatory=True) + highpass = traits.Float(desc="highpass", argstr="-highpass %f", mandatory=True) retrend = traits.Bool( - desc='Any mean and linear trend are removed before filtering. This ' - 'will restore the trend after filtering.', - argstr='-retrend') + desc="Any mean and linear trend are removed before filtering. This " + "will restore the trend after filtering.", + argstr="-retrend", + ) class Fourier(AFNICommand): @@ -1345,48 +1494,48 @@ class Fourier(AFNICommand): """ - _cmd = '3dFourier' + _cmd = "3dFourier" input_spec = FourierInputSpec output_spec = AFNICommandOutputSpec class HistInputSpec(CommandLineInputSpec): in_file = File( - desc='input file to 3dHist', - argstr='-input %s', + desc="input file to 3dHist", + argstr="-input %s", position=1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - desc='Write histogram to niml file with this prefix', - name_template='%s_hist', + desc="Write histogram to niml file with this prefix", + name_template="%s_hist", keep_extension=False, - argstr='-prefix %s', - name_source=['in_file']) + argstr="-prefix %s", + name_source=["in_file"], + ) showhist = traits.Bool( - False, - usedefault=True, - desc='write a text visual histogram', - argstr='-showhist') + False, usedefault=True, desc="write a text visual histogram", argstr="-showhist" + ) out_show = File( - name_template='%s_hist.out', - desc='output image file name', + name_template="%s_hist.out", + desc="output image file name", keep_extension=False, - argstr='> %s', - name_source='in_file', - position=-1) - mask = File( - desc='matrix to align input file', argstr='-mask %s', exists=True) - nbin = traits.Int(desc='number of bins', argstr='-nbin %d') - max_value = traits.Float(argstr='-max %f', desc='maximum intensity value') - min_value = traits.Float(argstr='-min %f', desc='minimum intensity value') - bin_width = traits.Float(argstr='-binwidth %f', desc='bin width') + argstr="> %s", + name_source="in_file", + position=-1, + ) + mask = File(desc="matrix to align input file", argstr="-mask %s", exists=True) + nbin = traits.Int(desc="number of bins", argstr="-nbin %d") + max_value = traits.Float(argstr="-max %f", desc="maximum intensity value") + min_value = traits.Float(argstr="-min %f", desc="minimum intensity value") + bin_width = traits.Float(argstr="-binwidth %f", desc="bin width") class HistOutputSpec(TraitedSpec): - out_file = File(desc='output file', exists=True) - out_show = File(desc='output visual histogram') + out_file = File(desc="output file", exists=True) + out_show = File(desc="output visual histogram") class Hist(AFNICommandBase): @@ -1408,7 +1557,7 @@ class Hist(AFNICommandBase): """ - _cmd = '3dHist' + _cmd = "3dHist" input_spec = HistInputSpec output_spec = HistOutputSpec _redirect_x = True @@ -1426,14 +1575,14 @@ def _parse_inputs(self, skip=None): if not self.inputs.showhist: if skip is None: skip = [] - skip += ['out_show'] + skip += ["out_show"] return super(Hist, self)._parse_inputs(skip=skip) def _list_outputs(self): outputs = super(Hist, self)._list_outputs() - outputs['out_file'] += '.niml.hist' + outputs["out_file"] += ".niml.hist" if not self.inputs.showhist: - outputs['out_show'] = Undefined + outputs["out_show"] = Undefined return outputs @@ -1442,12 +1591,13 @@ class LFCDInputSpec(CentralityInputSpec): """ in_file = File( - desc='input file to 3dLFCD', - argstr='%s', + desc="input file to 3dLFCD", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) class LFCD(AFNICommand): @@ -1471,33 +1621,32 @@ class LFCD(AFNICommand): >>> res = lfcd.run() # doctest: +SKIP """ - _cmd = '3dLFCD' + _cmd = "3dLFCD" input_spec = LFCDInputSpec output_spec = AFNICommandOutputSpec class MaskaveInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dmaskave', - argstr='%s', + desc="input file to 3dmaskave", + argstr="%s", position=-2, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_maskave.1D', - desc='output image file name', + name_template="%s_maskave.1D", + desc="output image file name", keep_extension=True, - argstr='> %s', - name_source='in_file', - position=-1) + argstr="> %s", + name_source="in_file", + position=-1, + ) mask = File( - desc='matrix to align input file', - argstr='-mask %s', - position=1, - exists=True) - quiet = traits.Bool( - desc='matrix to align input file', argstr='-quiet', position=2) + desc="matrix to align input file", argstr="-mask %s", position=1, exists=True + ) + quiet = traits.Bool(desc="matrix to align input file", argstr="-quiet", position=2) class Maskave(AFNICommand): @@ -1521,40 +1670,39 @@ class Maskave(AFNICommand): """ - _cmd = '3dmaskave' + _cmd = "3dmaskave" input_spec = MaskaveInputSpec output_spec = AFNICommandOutputSpec class MeansInputSpec(AFNICommandInputSpec): in_file_a = File( - desc='input file to 3dMean', - argstr='%s', + desc="input file to 3dMean", + argstr="%s", position=-2, mandatory=True, - exists=True) + exists=True, + ) in_file_b = File( - desc='another input file to 3dMean', - argstr='%s', - position=-1, - exists=True) + desc="another input file to 3dMean", argstr="%s", position=-1, exists=True + ) datum = traits.Str( - desc='Sets the data type of the output dataset', argstr='-datum %s') + desc="Sets the data type of the output dataset", argstr="-datum %s" + ) out_file = File( - name_template='%s_mean', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file_a') - scale = Str(desc='scaling of output', argstr='-%sscale') - non_zero = traits.Bool(desc='use only non-zero values', argstr='-non_zero') - std_dev = traits.Bool(desc='calculate std dev', argstr='-stdev') - sqr = traits.Bool(desc='mean square instead of value', argstr='-sqr') - summ = traits.Bool(desc='take sum, (not average)', argstr='-sum') - count = traits.Bool( - desc='compute count of non-zero voxels', argstr='-count') - mask_inter = traits.Bool( - desc='create intersection mask', argstr='-mask_inter') - mask_union = traits.Bool(desc='create union mask', argstr='-mask_union') + name_template="%s_mean", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file_a", + ) + scale = Str(desc="scaling of output", argstr="-%sscale") + non_zero = traits.Bool(desc="use only non-zero values", argstr="-non_zero") + std_dev = traits.Bool(desc="calculate std dev", argstr="-stdev") + sqr = traits.Bool(desc="mean square instead of value", argstr="-sqr") + summ = traits.Bool(desc="take sum, (not average)", argstr="-sum") + count = traits.Bool(desc="compute count of non-zero voxels", argstr="-count") + mask_inter = traits.Bool(desc="create intersection mask", argstr="-mask_inter") + mask_union = traits.Bool(desc="create union mask", argstr="-mask_union") class Means(AFNICommand): @@ -1586,81 +1734,82 @@ class Means(AFNICommand): """ - _cmd = '3dMean' + _cmd = "3dMean" input_spec = MeansInputSpec output_spec = AFNICommandOutputSpec class OutlierCountInputSpec(CommandLineInputSpec): in_file = File( - argstr='%s', - mandatory=True, - exists=True, - position=-2, - desc='input dataset') + argstr="%s", mandatory=True, exists=True, position=-2, desc="input dataset" + ) mask = File( exists=True, - argstr='-mask %s', - xor=['autoclip', 'automask'], - desc='only count voxels within the given mask') + argstr="-mask %s", + xor=["autoclip", "automask"], + desc="only count voxels within the given mask", + ) qthr = traits.Range( value=1e-3, low=0.0, high=1.0, usedefault=True, - argstr='-qthr %.5f', - desc='indicate a value for q to compute alpha') + argstr="-qthr %.5f", + desc="indicate a value for q to compute alpha", + ) autoclip = traits.Bool( False, usedefault=True, - argstr='-autoclip', - xor=['mask'], - desc='clip off small voxels') + argstr="-autoclip", + xor=["mask"], + desc="clip off small voxels", + ) automask = traits.Bool( False, usedefault=True, - argstr='-automask', - xor=['mask'], - desc='clip off small voxels') + argstr="-automask", + xor=["mask"], + desc="clip off small voxels", + ) fraction = traits.Bool( False, usedefault=True, - argstr='-fraction', - desc='write out the fraction of masked voxels which are outliers at ' - 'each timepoint') + argstr="-fraction", + desc="write out the fraction of masked voxels which are outliers at " + "each timepoint", + ) interval = traits.Bool( False, usedefault=True, - argstr='-range', - desc='write out the median + 3.5 MAD of outlier count with each ' - 'timepoint') - save_outliers = traits.Bool( - False, usedefault=True, desc='enables out_file option') + argstr="-range", + desc="write out the median + 3.5 MAD of outlier count with each " "timepoint", + ) + save_outliers = traits.Bool(False, usedefault=True, desc="enables out_file option") outliers_file = File( - name_template='%s_outliers', - argstr='-save %s', - name_source=['in_file'], - output_name='out_outliers', + name_template="%s_outliers", + argstr="-save %s", + name_source=["in_file"], + output_name="out_outliers", keep_extension=True, - desc='output image file name') + desc="output image file name", + ) polort = traits.Int( - argstr='-polort %d', - desc='detrend each voxel timeseries with polynomials') + argstr="-polort %d", desc="detrend each voxel timeseries with polynomials" + ) legendre = traits.Bool( - False, - usedefault=True, - argstr='-legendre', - desc='use Legendre polynomials') + False, usedefault=True, argstr="-legendre", desc="use Legendre polynomials" + ) out_file = File( - name_template='%s_outliers', - name_source=['in_file'], + name_template="%s_outliers", + name_source=["in_file"], keep_extension=False, - desc='capture standard output') + desc="capture standard output", + ) class OutlierCountOutputSpec(TraitedSpec): - out_outliers = File(exists=True, desc='output image file name') - out_file = File(desc='capture standard output') + out_outliers = File(exists=True, desc="output image file name") + out_file = File(desc="capture standard output") class OutlierCount(CommandLine): @@ -1682,10 +1831,10 @@ class OutlierCount(CommandLine): """ - _cmd = '3dToutcount' + _cmd = "3dToutcount" input_spec = OutlierCountInputSpec output_spec = OutlierCountOutputSpec - _terminal_output = 'file_split' + _terminal_output = "file_split" def _parse_inputs(self, skip=None): if skip is None: @@ -1693,84 +1842,87 @@ def _parse_inputs(self, skip=None): # This is not strictly an input, but needs be # set before run() is called. - if self.terminal_output == 'none': - self.terminal_output = 'file_split' + if self.terminal_output == "none": + self.terminal_output = "file_split" if not self.inputs.save_outliers: - skip += ['outliers_file'] + skip += ["outliers_file"] return super(OutlierCount, self)._parse_inputs(skip) def _run_interface(self, runtime): runtime = super(OutlierCount, self)._run_interface(runtime) # Read from runtime.stdout or runtime.merged - with open(op.abspath(self.inputs.out_file), 'w') as outfh: + with open(op.abspath(self.inputs.out_file), "w") as outfh: outfh.write(runtime.stdout or runtime.merged) return runtime def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) if self.inputs.save_outliers: - outputs['out_outliers'] = op.abspath(self.inputs.outliers_file) + outputs["out_outliers"] = op.abspath(self.inputs.outliers_file) return outputs class QualityIndexInputSpec(CommandLineInputSpec): in_file = File( - argstr='%s', - mandatory=True, - exists=True, - position=-2, - desc='input dataset') + argstr="%s", mandatory=True, exists=True, position=-2, desc="input dataset" + ) mask = File( exists=True, - argstr='-mask %s', - xor=['autoclip', 'automask'], - desc='compute correlation only across masked voxels') + argstr="-mask %s", + xor=["autoclip", "automask"], + desc="compute correlation only across masked voxels", + ) spearman = traits.Bool( False, usedefault=True, - argstr='-spearman', - desc='Quality index is 1 minus the Spearman (rank) correlation ' - 'coefficient of each sub-brick with the median sub-brick. ' - '(default).') + argstr="-spearman", + desc="Quality index is 1 minus the Spearman (rank) correlation " + "coefficient of each sub-brick with the median sub-brick. " + "(default).", + ) quadrant = traits.Bool( False, usedefault=True, - argstr='-quadrant', - desc='Similar to -spearman, but using 1 minus the quadrant correlation ' - 'coefficient as the quality index.') + argstr="-quadrant", + desc="Similar to -spearman, but using 1 minus the quadrant correlation " + "coefficient as the quality index.", + ) autoclip = traits.Bool( False, usedefault=True, - argstr='-autoclip', - xor=['mask'], - desc='clip off small voxels') + argstr="-autoclip", + xor=["mask"], + desc="clip off small voxels", + ) automask = traits.Bool( False, usedefault=True, - argstr='-automask', - xor=['mask'], - desc='clip off small voxels') - clip = traits.Float(argstr='-clip %f', desc='clip off values below') + argstr="-automask", + xor=["mask"], + desc="clip off small voxels", + ) + clip = traits.Float(argstr="-clip %f", desc="clip off values below") interval = traits.Bool( False, usedefault=True, - argstr='-range', - desc='write out the median + 3.5 MAD of outlier count with each ' - 'timepoint') + argstr="-range", + desc="write out the median + 3.5 MAD of outlier count with each " "timepoint", + ) out_file = File( - name_template='%s_tqual', - name_source=['in_file'], - argstr='> %s', + name_template="%s_tqual", + name_source=["in_file"], + argstr="> %s", keep_extension=False, position=-1, - desc='capture standard output') + desc="capture standard output", + ) class QualityIndexOutputSpec(TraitedSpec): - out_file = File(desc='file containing the captured standard output') + out_file = File(desc="file containing the captured standard output") class QualityIndex(CommandLine): @@ -1792,110 +1944,130 @@ class QualityIndex(CommandLine): >>> res = tqual.run() # doctest: +SKIP """ - _cmd = '3dTqual' + + _cmd = "3dTqual" input_spec = QualityIndexInputSpec output_spec = QualityIndexOutputSpec class ROIStatsInputSpec(CommandLineInputSpec): in_file = File( - desc='input dataset', - argstr='%s', - position=-2, - mandatory=True, - exists=True) - mask = File(desc='input mask', argstr='-mask %s', position=3, exists=True, - deprecated='1.1.4', new_name='mask_file') - mask_file = File(desc='input mask', argstr='-mask %s', exists=True) + desc="input dataset", argstr="%s", position=-2, mandatory=True, exists=True + ) + mask = File( + desc="input mask", + argstr="-mask %s", + position=3, + exists=True, + deprecated="1.1.4", + new_name="mask_file", + ) + mask_file = File(desc="input mask", argstr="-mask %s", exists=True) mask_f2short = traits.Bool( - desc='Tells the program to convert a float mask to short integers, ' - 'by simple rounding.', - argstr='-mask_f2short') + desc="Tells the program to convert a float mask to short integers, " + "by simple rounding.", + argstr="-mask_f2short", + ) num_roi = traits.Int( - desc='Forces the assumption that the mask dataset\'s ROIs are ' - 'denoted by 1 to n inclusive. Normally, the program ' - 'figures out the ROIs on its own. This option is ' - 'useful if a) you are certain that the mask dataset ' - 'has no values outside the range [0 n], b) there may ' - 'be some ROIs missing between [1 n] in the mask data-' - 'set and c) you want those columns in the output any-' - 'way so the output lines up with the output from other ' - 'invocations of 3dROIstats.', - argstr='-numroi %s') + desc="Forces the assumption that the mask dataset's ROIs are " + "denoted by 1 to n inclusive. Normally, the program " + "figures out the ROIs on its own. This option is " + "useful if a) you are certain that the mask dataset " + "has no values outside the range [0 n], b) there may " + "be some ROIs missing between [1 n] in the mask data-" + "set and c) you want those columns in the output any-" + "way so the output lines up with the output from other " + "invocations of 3dROIstats.", + argstr="-numroi %s", + ) zerofill = traits.Str( - requires=['num_roi'], - desc='For ROI labels not found, use the provided string instead of ' - 'a \'0\' in the output file. Only active if `num_roi` is ' - 'enabled.', - argstr='-zerofill %s') + requires=["num_roi"], + desc="For ROI labels not found, use the provided string instead of " + "a '0' in the output file. Only active if `num_roi` is " + "enabled.", + argstr="-zerofill %s", + ) roisel = File( exists=True, - desc='Only considers ROIs denoted by values found in the specified ' - 'file. Note that the order of the ROIs as specified in the file ' - 'is not preserved. So an SEL.1D of \'2 8 20\' produces the same ' - 'output as \'8 20 2\'', - argstr='-roisel %s') - debug = traits.Bool( - desc='print debug information', - argstr='-debug') - quiet = traits.Bool( - desc='execute quietly', - argstr='-quiet') + desc="Only considers ROIs denoted by values found in the specified " + "file. Note that the order of the ROIs as specified in the file " + "is not preserved. So an SEL.1D of '2 8 20' produces the same " + "output as '8 20 2'", + argstr="-roisel %s", + ) + debug = traits.Bool(desc="print debug information", argstr="-debug") + quiet = traits.Bool(desc="execute quietly", argstr="-quiet") nomeanout = traits.Bool( - desc='Do not include the (zero-inclusive) mean among computed stats', - argstr='-nomeanout') + desc="Do not include the (zero-inclusive) mean among computed stats", + argstr="-nomeanout", + ) nobriklab = traits.Bool( - desc='Do not print the sub-brick label next to its index', - argstr='-nobriklab') + desc="Do not print the sub-brick label next to its index", argstr="-nobriklab" + ) format1D = traits.Bool( - xor=['format1DR'], - desc='Output results in a 1D format that includes commented labels', - argstr='-1Dformat') + xor=["format1DR"], + desc="Output results in a 1D format that includes commented labels", + argstr="-1Dformat", + ) format1DR = traits.Bool( - xor=['format1D'], - desc='Output results in a 1D format that includes uncommented ' - 'labels. May not work optimally with typical 1D functions, ' - 'but is useful for R functions.', - argstr='-1DRformat') - _stat_names = ['mean', 'sum', 'voxels', 'minmax', 'sigma', 'median', - 'mode', 'summary', 'zerominmax', 'zerosigma', 'zeromedian', - 'zeromode'] + xor=["format1D"], + desc="Output results in a 1D format that includes uncommented " + "labels. May not work optimally with typical 1D functions, " + "but is useful for R functions.", + argstr="-1DRformat", + ) + _stat_names = [ + "mean", + "sum", + "voxels", + "minmax", + "sigma", + "median", + "mode", + "summary", + "zerominmax", + "zerosigma", + "zeromedian", + "zeromode", + ] stat = InputMultiObject( traits.Enum(_stat_names), - desc='statistics to compute. Options include: ' - ' * mean = Compute the mean using only non_zero voxels.' - ' Implies the opposite for the mean computed ' - ' by default.\n' - ' * median = Compute the median of nonzero voxels\n' - ' * mode = Compute the mode of nonzero voxels.' - ' (integral valued sets only)\n' - ' * minmax = Compute the min/max of nonzero voxels\n' - ' * sum = Compute the sum using only nonzero voxels.\n' - ' * voxels = Compute the number of nonzero voxels\n' - ' * sigma = Compute the standard deviation of nonzero' - ' voxels\n' - 'Statistics that include zero-valued voxels:\n' - ' * zerominmax = Compute the min/max of all voxels.\n' - ' * zerosigma = Compute the standard deviation of all' - ' voxels.\n' - ' * zeromedian = Compute the median of all voxels.\n' - ' * zeromode = Compute the mode of all voxels.\n' - ' * summary = Only output a summary line with the grand ' - ' mean across all briks in the input dataset.' - ' This option cannot be used with nomeanout.\n' - 'More that one option can be specified.', - argstr='%s...') + desc="statistics to compute. Options include: " + " * mean = Compute the mean using only non_zero voxels." + " Implies the opposite for the mean computed " + " by default.\n" + " * median = Compute the median of nonzero voxels\n" + " * mode = Compute the mode of nonzero voxels." + " (integral valued sets only)\n" + " * minmax = Compute the min/max of nonzero voxels\n" + " * sum = Compute the sum using only nonzero voxels.\n" + " * voxels = Compute the number of nonzero voxels\n" + " * sigma = Compute the standard deviation of nonzero" + " voxels\n" + "Statistics that include zero-valued voxels:\n" + " * zerominmax = Compute the min/max of all voxels.\n" + " * zerosigma = Compute the standard deviation of all" + " voxels.\n" + " * zeromedian = Compute the median of all voxels.\n" + " * zeromode = Compute the mode of all voxels.\n" + " * summary = Only output a summary line with the grand " + " mean across all briks in the input dataset." + " This option cannot be used with nomeanout.\n" + "More that one option can be specified.", + argstr="%s...", + ) out_file = File( - name_template='%s_roistat.1D', - desc='output file', + name_template="%s_roistat.1D", + desc="output file", keep_extension=False, - argstr='> %s', - name_source='in_file', - position=-1) + argstr="> %s", + name_source="in_file", + position=-1, + ) class ROIStatsOutputSpec(TraitedSpec): - out_file = File(desc='output tab-separated values file', exists=True) + out_file = File(desc="output tab-separated values file", exists=True) class ROIStats(AFNICommandBase): @@ -1918,75 +2090,84 @@ class ROIStats(AFNICommandBase): >>> res = roistats.run() # doctest: +SKIP """ - _cmd = '3dROIstats' - _terminal_output = 'allatonce' + + _cmd = "3dROIstats" + _terminal_output = "allatonce" input_spec = ROIStatsInputSpec output_spec = ROIStatsOutputSpec def _format_arg(self, name, spec, value): _stat_dict = { - 'mean': '-nzmean', - 'median': '-nzmedian', - 'mode': '-nzmode', - 'minmax': '-nzminmax', - 'sigma': '-nzsigma', - 'voxels': '-nzvoxels', - 'sum': '-nzsum', - 'summary': '-summary', - 'zerominmax': '-minmax', - 'zeromedian': '-median', - 'zerosigma': '-sigma', - 'zeromode': '-mode' - } - if name == 'stat': + "mean": "-nzmean", + "median": "-nzmedian", + "mode": "-nzmode", + "minmax": "-nzminmax", + "sigma": "-nzsigma", + "voxels": "-nzvoxels", + "sum": "-nzsum", + "summary": "-summary", + "zerominmax": "-minmax", + "zeromedian": "-median", + "zerosigma": "-sigma", + "zeromode": "-mode", + } + if name == "stat": value = [_stat_dict[v] for v in value] return super(ROIStats, self)._format_arg(name, spec, value) class RetroicorInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dretroicor', - argstr='%s', + desc="input file to 3dretroicor", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_retroicor', - name_source=['in_file'], - desc='output image file name', - argstr='-prefix %s', - position=1) + name_template="%s_retroicor", + name_source=["in_file"], + desc="output image file name", + argstr="-prefix %s", + position=1, + ) card = File( - desc='1D cardiac data file for cardiac correction', - argstr='-card %s', + desc="1D cardiac data file for cardiac correction", + argstr="-card %s", position=-2, - exists=True) + exists=True, + ) resp = File( - desc='1D respiratory waveform data for correction', - argstr='-resp %s', + desc="1D respiratory waveform data for correction", + argstr="-resp %s", position=-3, - exists=True) + exists=True, + ) threshold = traits.Int( - desc='Threshold for detection of R-wave peaks in input (Make sure it ' - 'is above the background noise level, Try 3/4 or 4/5 times range ' - 'plus minimum)', - argstr='-threshold %d', - position=-4) + desc="Threshold for detection of R-wave peaks in input (Make sure it " + "is above the background noise level, Try 3/4 or 4/5 times range " + "plus minimum)", + argstr="-threshold %d", + position=-4, + ) order = traits.Int( - desc='The order of the correction (2 is typical)', - argstr='-order %s', - position=-5) + desc="The order of the correction (2 is typical)", + argstr="-order %s", + position=-5, + ) cardphase = File( - desc='Filename for 1D cardiac phase output', - argstr='-cardphase %s', + desc="Filename for 1D cardiac phase output", + argstr="-cardphase %s", position=-6, - hash_files=False) + hash_files=False, + ) respphase = File( - desc='Filename for 1D resp phase output', - argstr='-respphase %s', + desc="Filename for 1D resp phase output", + argstr="-respphase %s", position=-7, - hash_files=False) + hash_files=False, + ) class Retroicor(AFNICommand): @@ -2024,67 +2205,75 @@ class Retroicor(AFNICommand): """ - _cmd = '3dretroicor' + _cmd = "3dretroicor" input_spec = RetroicorInputSpec output_spec = AFNICommandOutputSpec def _format_arg(self, name, trait_spec, value): - if name == 'in_file': - if not isdefined(self.inputs.card) and not isdefined( - self.inputs.resp): + if name == "in_file": + if not isdefined(self.inputs.card) and not isdefined(self.inputs.resp): return None return super(Retroicor, self)._format_arg(name, trait_spec, value) class SegInputSpec(CommandLineInputSpec): in_file = File( - desc='ANAT is the volume to segment', - argstr='-anat %s', + desc="ANAT is the volume to segment", + argstr="-anat %s", position=-1, mandatory=True, exists=True, - copyfile=True) + copyfile=True, + ) mask = traits.Either( - traits.Enum('AUTO'), + traits.Enum("AUTO"), File(exists=True), - desc='only non-zero voxels in mask are analyzed. mask can either be a ' + desc="only non-zero voxels in mask are analyzed. mask can either be a " 'dataset or the string "AUTO" which would use AFNI\'s automask ' - 'function to create the mask.', - argstr='-mask %s', + "function to create the mask.", + argstr="-mask %s", position=-2, - mandatory=True) + mandatory=True, + ) blur_meth = traits.Enum( - 'BFT', - 'BIM', - argstr='-blur_meth %s', - desc='set the blurring method for bias field estimation') + "BFT", + "BIM", + argstr="-blur_meth %s", + desc="set the blurring method for bias field estimation", + ) bias_fwhm = traits.Float( - desc='The amount of blurring used when estimating the field bias with ' - 'the Wells method', - argstr='-bias_fwhm %f') + desc="The amount of blurring used when estimating the field bias with " + "the Wells method", + argstr="-bias_fwhm %f", + ) classes = Str( - desc='CLASS_STRING is a semicolon delimited string of class labels', - argstr='-classes %s') + desc="CLASS_STRING is a semicolon delimited string of class labels", + argstr="-classes %s", + ) bmrf = traits.Float( - desc='Weighting factor controlling spatial homogeneity of the ' - 'classifications', - argstr='-bmrf %f') + desc="Weighting factor controlling spatial homogeneity of the " + "classifications", + argstr="-bmrf %f", + ) bias_classes = Str( - desc='A semicolon delimited string of classes that contribute to the ' - 'estimation of the bias field', - argstr='-bias_classes %s') + desc="A semicolon delimited string of classes that contribute to the " + "estimation of the bias field", + argstr="-bias_classes %s", + ) prefix = Str( - desc='the prefix for the output folder containing all output volumes', - argstr='-prefix %s') + desc="the prefix for the output folder containing all output volumes", + argstr="-prefix %s", + ) mixfrac = Str( - desc='MIXFRAC sets up the volume-wide (within mask) tissue fractions ' - 'while initializing the segmentation (see IGNORE for exception)', - argstr='-mixfrac %s') + desc="MIXFRAC sets up the volume-wide (within mask) tissue fractions " + "while initializing the segmentation (see IGNORE for exception)", + argstr="-mixfrac %s", + ) mixfloor = traits.Float( - desc='Set the minimum value for any class\'s mixing fraction', - argstr='-mixfloor %f') - main_N = traits.Int( - desc='Number of iterations to perform.', argstr='-main_N %d') + desc="Set the minimum value for any class's mixing fraction", + argstr="-mixfloor %f", + ) + main_N = traits.Int(desc="Number of iterations to perform.", argstr="-main_N %d") class Seg(AFNICommandBase): @@ -2108,7 +2297,7 @@ class Seg(AFNICommandBase): """ - _cmd = '3dSeg' + _cmd = "3dSeg" input_spec = SegInputSpec output_spec = AFNICommandOutputSpec @@ -2119,10 +2308,9 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() if isdefined(self.inputs.prefix): - outfile = os.path.join(os.getcwd(), self.inputs.prefix, - 'Classes+*.BRIK') + outfile = os.path.join(os.getcwd(), self.inputs.prefix, "Classes+*.BRIK") else: - outfile = os.path.join(os.getcwd(), 'Segsy', 'Classes+*.BRIK') + outfile = os.path.join(os.getcwd(), "Segsy", "Classes+*.BRIK") outputs.out_file = glob.glob(outfile)[0] @@ -2131,17 +2319,19 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): class SkullStripInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dSkullStrip', - argstr='-input %s', + desc="input file to 3dSkullStrip", + argstr="-input %s", position=1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_skullstrip', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') + name_template="%s_skullstrip", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) class SkullStrip(AFNICommand): @@ -2164,7 +2354,8 @@ class SkullStrip(AFNICommand): >>> res = skullstrip.run() # doctest: +SKIP """ - _cmd = '3dSkullStrip' + + _cmd = "3dSkullStrip" _redirect_x = True input_spec = SkullStripInputSpec output_spec = AFNICommandOutputSpec @@ -2182,48 +2373,55 @@ def __init__(self, **inputs): class TCorr1DInputSpec(AFNICommandInputSpec): xset = File( - desc='3d+time dataset input', - argstr=' %s', + desc="3d+time dataset input", + argstr=" %s", position=-2, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) y_1d = File( - desc='1D time series file input', - argstr=' %s', + desc="1D time series file input", + argstr=" %s", position=-1, mandatory=True, - exists=True) + exists=True, + ) out_file = File( - desc='output filename prefix', - name_template='%s_correlation.nii.gz', - argstr='-prefix %s', - name_source='xset', - keep_extension=True) + desc="output filename prefix", + name_template="%s_correlation.nii.gz", + argstr="-prefix %s", + name_source="xset", + keep_extension=True, + ) pearson = traits.Bool( - desc='Correlation is the normal Pearson correlation coefficient', - argstr=' -pearson', - xor=['spearman', 'quadrant', 'ktaub'], - position=1) + desc="Correlation is the normal Pearson correlation coefficient", + argstr=" -pearson", + xor=["spearman", "quadrant", "ktaub"], + position=1, + ) spearman = traits.Bool( - desc='Correlation is the Spearman (rank) correlation coefficient', - argstr=' -spearman', - xor=['pearson', 'quadrant', 'ktaub'], - position=1) + desc="Correlation is the Spearman (rank) correlation coefficient", + argstr=" -spearman", + xor=["pearson", "quadrant", "ktaub"], + position=1, + ) quadrant = traits.Bool( - desc='Correlation is the quadrant correlation coefficient', - argstr=' -quadrant', - xor=['pearson', 'spearman', 'ktaub'], - position=1) + desc="Correlation is the quadrant correlation coefficient", + argstr=" -quadrant", + xor=["pearson", "spearman", "ktaub"], + position=1, + ) ktaub = traits.Bool( - desc='Correlation is the Kendall\'s tau_b correlation coefficient', - argstr=' -ktaub', - xor=['pearson', 'spearman', 'quadrant'], - position=1) + desc="Correlation is the Kendall's tau_b correlation coefficient", + argstr=" -ktaub", + xor=["pearson", "spearman", "quadrant"], + position=1, + ) class TCorr1DOutputSpec(TraitedSpec): - out_file = File(desc='output file containing correlations', exists=True) + out_file = File(desc="output file containing correlations", exists=True) class TCorr1D(AFNICommand): @@ -2243,73 +2441,69 @@ class TCorr1D(AFNICommand): """ - _cmd = '3dTcorr1D' + _cmd = "3dTcorr1D" input_spec = TCorr1DInputSpec output_spec = TCorr1DOutputSpec class TCorrMapInputSpec(AFNICommandInputSpec): - in_file = File( - exists=True, argstr='-input %s', mandatory=True, copyfile=False) - seeds = File(exists=True, argstr='-seed %s', xor=('seeds_width')) - mask = File(exists=True, argstr='-mask %s') - automask = traits.Bool(argstr='-automask') - polort = traits.Int(argstr='-polort %d') - bandpass = traits.Tuple( - (traits.Float(), traits.Float()), argstr='-bpass %f %f') - regress_out_timeseries = File(exists=True, argstr='-ort %s') - blur_fwhm = traits.Float(argstr='-Gblur %f') - seeds_width = traits.Float(argstr='-Mseed %f', xor=('seeds')) + in_file = File(exists=True, argstr="-input %s", mandatory=True, copyfile=False) + seeds = File(exists=True, argstr="-seed %s", xor=("seeds_width")) + mask = File(exists=True, argstr="-mask %s") + automask = traits.Bool(argstr="-automask") + polort = traits.Int(argstr="-polort %d") + bandpass = traits.Tuple((traits.Float(), traits.Float()), argstr="-bpass %f %f") + regress_out_timeseries = File(exists=True, argstr="-ort %s") + blur_fwhm = traits.Float(argstr="-Gblur %f") + seeds_width = traits.Float(argstr="-Mseed %f", xor=("seeds")) # outputs - mean_file = File(argstr='-Mean %s', suffix='_mean', name_source='in_file') - zmean = File(argstr='-Zmean %s', suffix='_zmean', name_source='in_file') - qmean = File(argstr='-Qmean %s', suffix='_qmean', name_source='in_file') - pmean = File(argstr='-Pmean %s', suffix='_pmean', name_source='in_file') - - _thresh_opts = ('absolute_threshold', 'var_absolute_threshold', - 'var_absolute_threshold_normalize') + mean_file = File(argstr="-Mean %s", suffix="_mean", name_source="in_file") + zmean = File(argstr="-Zmean %s", suffix="_zmean", name_source="in_file") + qmean = File(argstr="-Qmean %s", suffix="_qmean", name_source="in_file") + pmean = File(argstr="-Pmean %s", suffix="_pmean", name_source="in_file") + + _thresh_opts = ( + "absolute_threshold", + "var_absolute_threshold", + "var_absolute_threshold_normalize", + ) thresholds = traits.List(traits.Int()) absolute_threshold = File( - argstr='-Thresh %f %s', - suffix='_thresh', - name_source='in_file', - xor=_thresh_opts) + argstr="-Thresh %f %s", + suffix="_thresh", + name_source="in_file", + xor=_thresh_opts, + ) var_absolute_threshold = File( - argstr='-VarThresh %f %f %f %s', - suffix='_varthresh', - name_source='in_file', - xor=_thresh_opts) + argstr="-VarThresh %f %f %f %s", + suffix="_varthresh", + name_source="in_file", + xor=_thresh_opts, + ) var_absolute_threshold_normalize = File( - argstr='-VarThreshN %f %f %f %s', - suffix='_varthreshn', - name_source='in_file', - xor=_thresh_opts) + argstr="-VarThreshN %f %f %f %s", + suffix="_varthreshn", + name_source="in_file", + xor=_thresh_opts, + ) - correlation_maps = File(argstr='-CorrMap %s', name_source='in_file') - correlation_maps_masked = File( - argstr='-CorrMask %s', name_source='in_file') + correlation_maps = File(argstr="-CorrMap %s", name_source="in_file") + correlation_maps_masked = File(argstr="-CorrMask %s", name_source="in_file") - _expr_opts = ('average_expr', 'average_expr_nonzero', 'sum_expr') + _expr_opts = ("average_expr", "average_expr_nonzero", "sum_expr") expr = Str() average_expr = File( - argstr='-Aexpr %s %s', - suffix='_aexpr', - name_source='in_file', - xor=_expr_opts) + argstr="-Aexpr %s %s", suffix="_aexpr", name_source="in_file", xor=_expr_opts + ) average_expr_nonzero = File( - argstr='-Cexpr %s %s', - suffix='_cexpr', - name_source='in_file', - xor=_expr_opts) + argstr="-Cexpr %s %s", suffix="_cexpr", name_source="in_file", xor=_expr_opts + ) sum_expr = File( - argstr='-Sexpr %s %s', - suffix='_sexpr', - name_source='in_file', - xor=_expr_opts) + argstr="-Sexpr %s %s", suffix="_sexpr", name_source="in_file", xor=_expr_opts + ) histogram_bin_numbers = traits.Int() - histogram = File( - name_source='in_file', argstr='-Hist %d %s', suffix='_hist') + histogram = File(name_source="in_file", argstr="-Hist %d %s", suffix="_hist") class TCorrMapOutputSpec(TraitedSpec): @@ -2350,48 +2544,50 @@ class TCorrMap(AFNICommand): """ - _cmd = '3dTcorrMap' + _cmd = "3dTcorrMap" input_spec = TCorrMapInputSpec output_spec = TCorrMapOutputSpec - _additional_metadata = ['suffix'] + _additional_metadata = ["suffix"] def _format_arg(self, name, trait_spec, value): if name in self.inputs._thresh_opts: return trait_spec.argstr % self.inputs.thresholds + [value] elif name in self.inputs._expr_opts: return trait_spec.argstr % (self.inputs.expr, value) - elif name == 'histogram': - return trait_spec.argstr % (self.inputs.histogram_bin_numbers, - value) + elif name == "histogram": + return trait_spec.argstr % (self.inputs.histogram_bin_numbers, value) else: return super(TCorrMap, self)._format_arg(name, trait_spec, value) class TCorrelateInputSpec(AFNICommandInputSpec): xset = File( - desc='input xset', - argstr='%s', + desc="input xset", + argstr="%s", position=-2, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) yset = File( - desc='input yset', - argstr='%s', + desc="input yset", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_tcorr', - desc='output image file name', - argstr='-prefix %s', - name_source='xset') + name_template="%s_tcorr", + desc="output image file name", + argstr="-prefix %s", + name_source="xset", + ) pearson = traits.Bool( - desc='Correlation is the normal Pearson correlation coefficient', - argstr='-pearson') - polort = traits.Int( - desc='Remove polynomical trend of order m', argstr='-polort %d') + desc="Correlation is the normal Pearson correlation coefficient", + argstr="-pearson", + ) + polort = traits.Int(desc="Remove polynomical trend of order m", argstr="-polort %d") class TCorrelate(AFNICommand): @@ -2417,44 +2613,50 @@ class TCorrelate(AFNICommand): """ - _cmd = '3dTcorrelate' + _cmd = "3dTcorrelate" input_spec = TCorrelateInputSpec output_spec = AFNICommandOutputSpec class TNormInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dTNorm', - argstr='%s', + desc="input file to 3dTNorm", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_tnorm', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') + name_template="%s_tnorm", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) norm2 = traits.Bool( - desc='L2 normalize (sum of squares = 1) [DEFAULT]', argstr='-norm2') + desc="L2 normalize (sum of squares = 1) [DEFAULT]", argstr="-norm2" + ) normR = traits.Bool( - desc= - 'normalize so sum of squares = number of time points * e.g., so RMS = 1.', - argstr='-normR') + desc="normalize so sum of squares = number of time points * e.g., so RMS = 1.", + argstr="-normR", + ) norm1 = traits.Bool( - desc='L1 normalize (sum of absolute values = 1)', argstr='-norm1') + desc="L1 normalize (sum of absolute values = 1)", argstr="-norm1" + ) normx = traits.Bool( - desc='Scale so max absolute value = 1 (L_infinity norm)', - argstr='-normx') + desc="Scale so max absolute value = 1 (L_infinity norm)", argstr="-normx" + ) polort = traits.Int( desc="""Detrend with polynomials of order p before normalizing [DEFAULT = don't do this] * Use '-polort 0' to remove the mean, for example""", - argstr='-polort %s') + argstr="-polort %s", + ) L1fit = traits.Bool( desc="""Detrend with L1 regression (L2 is the default) * This option is here just for the hell of it""", - argstr='-L1fit') + argstr="-L1fit", + ) class TNorm(AFNICommand): @@ -2477,32 +2679,36 @@ class TNorm(AFNICommand): >>> res = tshift.run() # doctest: +SKIP """ - _cmd = '3dTnorm' + + _cmd = "3dTnorm" input_spec = TNormInputSpec output_spec = AFNICommandOutputSpec class TProjectInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dTproject', - argstr='-input %s', + desc="input file to 3dTproject", + argstr="-input %s", position=1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_tproject', - desc='output image file name', + name_template="%s_tproject", + desc="output image file name", position=-1, - argstr='-prefix %s', - name_source='in_file') + argstr="-prefix %s", + name_source="in_file", + ) censor = File( desc="""filename of censor .1D time series * This is a file of 1s and 0s, indicating which time points are to be included (1) and which are to be excluded (0).""", argstr="-censor %s", - exists=True) + exists=True, + ) censortr = traits.List( traits.Str(), desc="""list of strings that specify time indexes @@ -2519,10 +2725,13 @@ class TProjectInputSpec(AFNICommandInputSpec): +N.B.: 2:37,47 means index #37 in run #2 and global time index 47; it does NOT mean index #37 in run #2 AND index #47 in run #2.""", - argstr="-CENSORTR %s") + argstr="-CENSORTR %s", + ) cenmode = traits.Enum( - 'KILL', 'ZERO', 'NTRP', - desc="""specifies how censored time points are treated in + "KILL", + "ZERO", + "NTRP", + desc="""specifies how censored time points are treated in the output dataset: + mode = ZERO ==> put zero values in their place ==> output datset is same length as input @@ -2535,7 +2744,8 @@ class TProjectInputSpec(AFNICommandInputSpec): of any time points -- this feature is to keep the Spanish Inquisition happy. * The default mode is KILL !!!""", - argstr='-cenmode %s') + argstr="-cenmode %s", + ) concat = File( desc="""The catenation file, as in 3dDeconvolve, containing the TR indexes of the start points for each contiguous run @@ -2556,18 +2766,21 @@ class TProjectInputSpec(AFNICommandInputSpec): from the ort files via the '{...}' selector for the 1D files and the '[...]' selector for the datasets.""", exists=True, - argstr='-concat %s') + argstr="-concat %s", + ) noblock = traits.Bool( desc="""Also as in 3dDeconvolve, if you want the program to treat an auto-catenated dataset as one long run, use this option. ++ However, '-noblock' will not affect catenation if you use the '-concat' option.""", - argstr='-noblock') + argstr="-noblock", + ) ort = File( desc="""Remove each column in file ++ Each column will have its mean removed.""", exists=True, - argstr="-ort %s") + argstr="-ort %s", + ) polort = traits.Int( desc="""Remove polynomials up to and including degree pp. ++ Default value is 2. @@ -2578,49 +2791,56 @@ class TProjectInputSpec(AFNICommandInputSpec): ++ Use of -polort -1 is not advised (if data mean != 0), even if -ort contains constant terms, as all means are removed.""", - argstr="-polort %d") + argstr="-polort %d", + ) dsort = InputMultiObject( - File( - exists=True, - copyfile=False), + File(exists=True, copyfile=False), argstr="-dsort %s...", desc="""Remove the 3D+time time series in dataset fset. ++ That is, 'fset' contains a different nuisance time series for each voxel (e.g., from AnatICOR). - ++ Multiple -dsort options are allowed.""") + ++ Multiple -dsort options are allowed.""", + ) bandpass = traits.Tuple( - traits.Float, traits.Float, + traits.Float, + traits.Float, desc="""Remove all frequencies EXCEPT those in the range""", - argstr='-bandpass %g %g') + argstr="-bandpass %g %g", + ) stopband = traits.Tuple( - traits.Float, traits.Float, + traits.Float, + traits.Float, desc="""Remove all frequencies in the range""", - argstr='-stopband %g %g') + argstr="-stopband %g %g", + ) TR = traits.Float( desc="""Use time step dd for the frequency calculations, rather than the value stored in the dataset header.""", - argstr='-TR %g') + argstr="-TR %g", + ) mask = File( exists=True, desc="""Only operate on voxels nonzero in the mset dataset. ++ Voxels outside the mask will be filled with zeros. ++ If no masking option is given, then all voxels will be processed.""", - argstr='-mask %s') + argstr="-mask %s", + ) automask = traits.Bool( - desc="""Generate a mask automatically""", - xor=['mask'], - argstr='-automask') + desc="""Generate a mask automatically""", xor=["mask"], argstr="-automask" + ) blur = traits.Float( desc="""Blur (inside the mask only) with a filter that has width (FWHM) of fff millimeters. ++ Spatial blurring (if done) is after the time series filtering.""", - argstr='-blur %g') + argstr="-blur %g", + ) norm = traits.Bool( desc="""Normalize each output time series to have sum of squares = 1. This is the LAST operation.""", - argstr='-norm') + argstr="-norm", + ) class TProject(AFNICommand): @@ -2652,76 +2872,92 @@ class TProject(AFNICommand): >>> res = tproject.run() # doctest: +SKIP """ - _cmd = '3dTproject' + + _cmd = "3dTproject" input_spec = TProjectInputSpec output_spec = AFNICommandOutputSpec - class TShiftInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dTshift', - argstr='%s', + desc="input file to 3dTshift", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_tshift', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') + name_template="%s_tshift", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) tr = Str( desc='manually set the TR. You can attach suffix "s" for seconds ' 'or "ms" for milliseconds.', - argstr='-TR %s') + argstr="-TR %s", + ) tzero = traits.Float( - desc='align each slice to given time offset', - argstr='-tzero %s', - xor=['tslice']) + desc="align each slice to given time offset", argstr="-tzero %s", xor=["tslice"] + ) tslice = traits.Int( - desc='align each slice to time offset of given slice', - argstr='-slice %s', - xor=['tzero']) + desc="align each slice to time offset of given slice", + argstr="-slice %s", + xor=["tzero"], + ) ignore = traits.Int( - desc='ignore the first set of points specified', argstr='-ignore %s') + desc="ignore the first set of points specified", argstr="-ignore %s" + ) interp = traits.Enum( - ('Fourier', 'linear', 'cubic', 'quintic', 'heptic'), - desc='different interpolation methods (see 3dTshift for details) ' - 'default = Fourier', - argstr='-%s') + ("Fourier", "linear", "cubic", "quintic", "heptic"), + desc="different interpolation methods (see 3dTshift for details) " + "default = Fourier", + argstr="-%s", + ) tpattern = traits.Either( - traits.Enum('alt+z', 'altplus', # Synonyms - 'alt+z2', - 'alt-z', 'altminus', # Synonyms - 'alt-z2', - 'seq+z', 'seqplus', # Synonyms - 'seq-z', 'seqminus'), # Synonyms + traits.Enum( + "alt+z", + "altplus", # Synonyms + "alt+z2", + "alt-z", + "altminus", # Synonyms + "alt-z2", + "seq+z", + "seqplus", # Synonyms + "seq-z", + "seqminus", + ), # Synonyms Str, # For backwards compatibility - desc='use specified slice time pattern rather than one in header', - argstr='-tpattern %s', - xor=['slice_timing']) + desc="use specified slice time pattern rather than one in header", + argstr="-tpattern %s", + xor=["slice_timing"], + ) slice_timing = traits.Either( File(exists=True), traits.List(traits.Float), - desc='time offsets from the volume acquisition onset for each slice', - argstr='-tpattern @%s', - xor=['tpattern']) + desc="time offsets from the volume acquisition onset for each slice", + argstr="-tpattern @%s", + xor=["tpattern"], + ) slice_encoding_direction = traits.Enum( - 'k', 'k-', + "k", + "k-", usedefault=True, - desc='Direction in which slice_timing is specified (default: k). If negative,' - 'slice_timing is defined in reverse order, that is, the first entry ' - 'corresponds to the slice with the largest index, and the final entry ' - 'corresponds to slice index zero. Only in effect when slice_timing is ' - 'passed as list, not when it is passed as file.',) + desc="Direction in which slice_timing is specified (default: k). If negative," + "slice_timing is defined in reverse order, that is, the first entry " + "corresponds to the slice with the largest index, and the final entry " + "corresponds to slice index zero. Only in effect when slice_timing is " + "passed as list, not when it is passed as file.", + ) rlt = traits.Bool( - desc='Before shifting, remove the mean and linear trend', - argstr='-rlt') + desc="Before shifting, remove the mean and linear trend", argstr="-rlt" + ) rltplus = traits.Bool( - desc='Before shifting, remove the mean and linear trend and later put ' - 'back the mean', - argstr='-rlt+') + desc="Before shifting, remove the mean and linear trend and later put " + "back the mean", + argstr="-rlt+", + ) class TShiftOutputSpec(AFNICommandOutputSpec): @@ -2818,15 +3054,18 @@ class TShift(AFNICommand): >>> res = tshift.run() # doctest: +SKIP """ - _cmd = '3dTshift' + + _cmd = "3dTshift" input_spec = TShiftInputSpec output_spec = TShiftOutputSpec def _format_arg(self, name, trait_spec, value): - if name == 'tpattern' and value.startswith('@'): - iflogger.warning('Passing a file prefixed by "@" will be deprecated' - '; please use the `slice_timing` input') - elif name == 'slice_timing' and isinstance(value, list): + if name == "tpattern" and value.startswith("@"): + iflogger.warning( + 'Passing a file prefixed by "@" will be deprecated' + "; please use the `slice_timing` input" + ) + elif name == "slice_timing" and isinstance(value, list): value = self._write_slice_timing() return super(TShift, self)._format_arg(name, trait_spec, value) @@ -2835,67 +3074,72 @@ def _write_slice_timing(self): if self.inputs.slice_encoding_direction.endswith("-"): slice_timing.reverse() - fname = 'slice_timing.1D' - with open(fname, 'w') as fobj: - fobj.write('\t'.join(map(str, slice_timing))) + fname = "slice_timing.1D" + with open(fname, "w") as fobj: + fobj.write("\t".join(map(str, slice_timing))) return fname def _list_outputs(self): outputs = super(TShift, self)._list_outputs() if isdefined(self.inputs.slice_timing): if isinstance(self.inputs.slice_timing, list): - outputs['timing_file'] = os.path.abspath('slice_timing.1D') + outputs["timing_file"] = os.path.abspath("slice_timing.1D") else: - outputs['timing_file'] = os.path.abspath(self.inputs.slice_timing) + outputs["timing_file"] = os.path.abspath(self.inputs.slice_timing) return outputs class TSmoothInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dTSmooth', - argstr='%s', + desc="input file to 3dTSmooth", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_smooth', - desc='output file from 3dTSmooth', - argstr='-prefix %s', - name_source='in_file') + name_template="%s_smooth", + desc="output file from 3dTSmooth", + argstr="-prefix %s", + name_source="in_file", + ) datum = traits.Str( - desc='Sets the data type of the output dataset', - argstr='-datum %s') + desc="Sets the data type of the output dataset", argstr="-datum %s" + ) lin = traits.Bool( - desc='3 point linear filter: 0.15*a + 0.70*b + 0.15*c' - '[This is the default smoother]', - argstr='-lin') - med = traits.Bool( - desc='3 point median filter: median(a,b,c)', - argstr='-med') + desc="3 point linear filter: 0.15*a + 0.70*b + 0.15*c" + "[This is the default smoother]", + argstr="-lin", + ) + med = traits.Bool(desc="3 point median filter: median(a,b,c)", argstr="-med") osf = traits.Bool( - desc='3 point order statistics filter:' - '0.15*min(a,b,c) + 0.70*median(a,b,c) + 0.15*max(a,b,c)', - argstr='-osf') + desc="3 point order statistics filter:" + "0.15*min(a,b,c) + 0.70*median(a,b,c) + 0.15*max(a,b,c)", + argstr="-osf", + ) lin3 = traits.Int( - desc='3 point linear filter: 0.5*(1-m)*a + m*b + 0.5*(1-m)*c' + desc="3 point linear filter: 0.5*(1-m)*a + m*b + 0.5*(1-m)*c" "Here, 'm' is a number strictly between 0 and 1.", - argstr='-3lin %d') + argstr="-3lin %d", + ) hamming = traits.Int( - argstr='-hamming %d', - desc='Use N point Hamming windows.' - '(N must be odd and bigger than 1.)') + argstr="-hamming %d", + desc="Use N point Hamming windows." "(N must be odd and bigger than 1.)", + ) blackman = traits.Int( - argstr='-blackman %d', - desc='Use N point Blackman windows.' - '(N must be odd and bigger than 1.)') + argstr="-blackman %d", + desc="Use N point Blackman windows." "(N must be odd and bigger than 1.)", + ) custom = File( - argstr='-custom %s', - desc='odd # of coefficients must be in a single column in ASCII file') + argstr="-custom %s", + desc="odd # of coefficients must be in a single column in ASCII file", + ) adaptive = traits.Int( - argstr='-adaptive %d', - desc='use adaptive mean filtering of width N ' - '(where N must be odd and bigger than 3).') + argstr="-adaptive %d", + desc="use adaptive mean filtering of width N " + "(where N must be odd and bigger than 3).", + ) class TSmooth(AFNICommand): @@ -2918,76 +3162,87 @@ class TSmooth(AFNICommand): >>> res = smooth.run() # doctest: +SKIP """ - _cmd = '3dTsmooth' + + _cmd = "3dTsmooth" input_spec = TSmoothInputSpec output_spec = AFNICommandOutputSpec class VolregInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dvolreg', - argstr='%s', + desc="input file to 3dvolreg", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) in_weight_volume = traits.Either( traits.Tuple(File(exists=True), traits.Int), File(exists=True), - desc='weights for each voxel specified by a file with an ' - 'optional volume number (defaults to 0)', - argstr="-weight '%s[%d]'") + desc="weights for each voxel specified by a file with an " + "optional volume number (defaults to 0)", + argstr="-weight '%s[%d]'", + ) out_file = File( - name_template='%s_volreg', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') + name_template="%s_volreg", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) basefile = File( - desc='base file for registration', - argstr='-base %s', - position=-6, - exists=True) + desc="base file for registration", argstr="-base %s", position=-6, exists=True + ) zpad = traits.Int( - desc='Zeropad around the edges by \'n\' voxels during rotations', - argstr='-zpad %d', - position=-5) + desc="Zeropad around the edges by 'n' voxels during rotations", + argstr="-zpad %d", + position=-5, + ) md1d_file = File( - name_template='%s_md.1D', - desc='max displacement output file', - argstr='-maxdisp1D %s', - name_source='in_file', + name_template="%s_md.1D", + desc="max displacement output file", + argstr="-maxdisp1D %s", + name_source="in_file", keep_extension=True, - position=-4) + position=-4, + ) oned_file = File( - name_template='%s.1D', - desc='1D movement parameters output file', - argstr='-1Dfile %s', - name_source='in_file', - keep_extension=True) + name_template="%s.1D", + desc="1D movement parameters output file", + argstr="-1Dfile %s", + name_source="in_file", + keep_extension=True, + ) verbose = traits.Bool( - desc='more detailed description of the process', argstr='-verbose') + desc="more detailed description of the process", argstr="-verbose" + ) timeshift = traits.Bool( - desc='time shift to mean slice time offset', argstr='-tshift 0') + desc="time shift to mean slice time offset", argstr="-tshift 0" + ) copyorigin = traits.Bool( - desc='copy base file origin coords to output', argstr='-twodup') + desc="copy base file origin coords to output", argstr="-twodup" + ) oned_matrix_save = File( - name_template='%s.aff12.1D', - desc='Save the matrix transformation', - argstr='-1Dmatrix_save %s', + name_template="%s.aff12.1D", + desc="Save the matrix transformation", + argstr="-1Dmatrix_save %s", keep_extension=True, - name_source='in_file') + name_source="in_file", + ) interp = traits.Enum( - ('Fourier', 'cubic', 'heptic', 'quintic', 'linear'), - desc='spatial interpolation methods [default = heptic]', - argstr='-%s') + ("Fourier", "cubic", "heptic", "quintic", "linear"), + desc="spatial interpolation methods [default = heptic]", + argstr="-%s", + ) class VolregOutputSpec(TraitedSpec): - out_file = File(desc='registered file', exists=True) - md1d_file = File(desc='max displacement info file', exists=True) - oned_file = File(desc='movement parameters info file', exists=True) + out_file = File(desc="registered file", exists=True) + md1d_file = File(desc="max displacement info file", exists=True) + oned_file = File(desc="movement parameters info file", exists=True) oned_matrix_save = File( - desc='matrix transformation from base to input', exists=True) + desc="matrix transformation from base to input", exists=True + ) class Volreg(AFNICommand): @@ -3025,67 +3280,73 @@ class Volreg(AFNICommand): """ - _cmd = '3dvolreg' + _cmd = "3dvolreg" input_spec = VolregInputSpec output_spec = VolregOutputSpec def _format_arg(self, name, trait_spec, value): - if name == 'in_weight_volume' and not isinstance(value, tuple): + if name == "in_weight_volume" and not isinstance(value, tuple): value = (value, 0) return super(Volreg, self)._format_arg(name, trait_spec, value) class WarpInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dWarp', - argstr='%s', + desc="input file to 3dWarp", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_warp', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file', - keep_extension=True) + name_template="%s_warp", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + keep_extension=True, + ) tta2mni = traits.Bool( - desc='transform dataset from Talairach to MNI152', argstr='-tta2mni') + desc="transform dataset from Talairach to MNI152", argstr="-tta2mni" + ) mni2tta = traits.Bool( - desc='transform dataset from MNI152 to Talaraich', argstr='-mni2tta') + desc="transform dataset from MNI152 to Talaraich", argstr="-mni2tta" + ) matparent = File( - desc='apply transformation from 3dWarpDrive', - argstr='-matparent %s', - exists=True) + desc="apply transformation from 3dWarpDrive", + argstr="-matparent %s", + exists=True, + ) oblique_parent = File( - desc='Read in the oblique transformation matrix from an oblique ' - 'dataset and make cardinal dataset oblique to match', - argstr='-oblique_parent %s', - exists=True) + desc="Read in the oblique transformation matrix from an oblique " + "dataset and make cardinal dataset oblique to match", + argstr="-oblique_parent %s", + exists=True, + ) deoblique = traits.Bool( - desc='transform dataset from oblique to cardinal', argstr='-deoblique') + desc="transform dataset from oblique to cardinal", argstr="-deoblique" + ) interp = traits.Enum( - ('linear', 'cubic', 'NN', 'quintic'), - desc='spatial interpolation methods [default = linear]', - argstr='-%s') + ("linear", "cubic", "NN", "quintic"), + desc="spatial interpolation methods [default = linear]", + argstr="-%s", + ) gridset = File( - desc='copy grid of specified dataset', - argstr='-gridset %s', - exists=True) - newgrid = traits.Float( - desc='specify grid of this size (mm)', argstr='-newgrid %f') + desc="copy grid of specified dataset", argstr="-gridset %s", exists=True + ) + newgrid = traits.Float(desc="specify grid of this size (mm)", argstr="-newgrid %f") zpad = traits.Int( - desc='pad input dataset with N planes of zero on all sides.', - argstr='-zpad %d') + desc="pad input dataset with N planes of zero on all sides.", argstr="-zpad %d" + ) verbose = traits.Bool( - desc='Print out some information along the way.', argstr='-verb') - save_warp = traits.Bool( - desc='save warp as .mat file', requires=['verbose']) + desc="Print out some information along the way.", argstr="-verb" + ) + save_warp = traits.Bool(desc="save warp as .mat file", requires=["verbose"]) class WarpOutputSpec(TraitedSpec): - out_file = File(desc='Warped file.', exists=True) - warp_file = File(desc='warp transform .mat file') + out_file = File(desc="Warped file.", exists=True) + warp_file = File(desc="warp transform .mat file") class Warp(AFNICommand): @@ -3115,7 +3376,8 @@ class Warp(AFNICommand): >>> res = warp_2.run() # doctest: +SKIP """ - _cmd = '3dWarp' + + _cmd = "3dWarp" input_spec = WarpInputSpec output_spec = WarpOutputSpec @@ -3124,39 +3386,40 @@ def _run_interface(self, runtime): if self.inputs.save_warp: import numpy as np - warp_file = self._list_outputs()['warp_file'] - np.savetxt(warp_file, [runtime.stdout], fmt=str('%s')) + + warp_file = self._list_outputs()["warp_file"] + np.savetxt(warp_file, [runtime.stdout], fmt=str("%s")) return runtime def _list_outputs(self): outputs = super(Warp, self)._list_outputs() if self.inputs.save_warp: - outputs['warp_file'] = fname_presuffix(outputs['out_file'], - suffix='_transform.mat', - use_ext=False) + outputs["warp_file"] = fname_presuffix( + outputs["out_file"], suffix="_transform.mat", use_ext=False + ) return outputs class QwarpInputSpec(AFNICommandInputSpec): in_file = File( - desc= - 'Source image (opposite phase encoding direction than base image).', - argstr='-source %s', + desc="Source image (opposite phase encoding direction than base image).", + argstr="-source %s", mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) base_file = File( - desc= - 'Base image (opposite phase encoding direction than source image).', - argstr='-base %s', + desc="Base image (opposite phase encoding direction than source image).", + argstr="-base %s", mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - argstr='-prefix %s', - name_template='ppp_%s', - name_source=['in_file'], + argstr="-prefix %s", + name_template="ppp_%s", + name_source=["in_file"], desc="""\ Sets the prefix/suffix for the output datasets. * The source dataset is warped to match the base @@ -3190,482 +3453,514 @@ class QwarpInputSpec(AFNICommandInputSpec): * You can easily compute the inverse later, say by a command like 3dNwarpCat -prefix Z_WARPINV 'INV(Z_WARP+tlrc)' or the inverse can be computed as needed in 3dNwarpApply, like - 3dNwarpApply -nwarp 'INV(Z_WARP+tlrc)' -source Dataset.nii ...""") + 3dNwarpApply -nwarp 'INV(Z_WARP+tlrc)' -source Dataset.nii ...""", + ) resample = traits.Bool( - desc='This option simply resamples the source dataset to match the' - 'base dataset grid. You can use this if the two datasets' - 'overlap well (as seen in the AFNI GUI), but are not on the' - 'same 3D grid.' - '* If they don\'t overlap well, allineate them first' - '* The reampling here is done with the' - '\'wsinc5\' method, which has very little blurring artifact.' - '* If the base and source datasets ARE on the same 3D grid,' - 'then the -resample option will be ignored.' - '* You CAN use -resample with these 3dQwarp options:' - '-plusminus -inilev -iniwarp -duplo', - argstr='-resample') + desc="This option simply resamples the source dataset to match the" + "base dataset grid. You can use this if the two datasets" + "overlap well (as seen in the AFNI GUI), but are not on the" + "same 3D grid." + "* If they don't overlap well, allineate them first" + "* The reampling here is done with the" + "'wsinc5' method, which has very little blurring artifact." + "* If the base and source datasets ARE on the same 3D grid," + "then the -resample option will be ignored." + "* You CAN use -resample with these 3dQwarp options:" + "-plusminus -inilev -iniwarp -duplo", + argstr="-resample", + ) allineate = traits.Bool( - desc='This option will make 3dQwarp run 3dAllineate first, to align ' - 'the source dataset to the base with an affine transformation. ' - 'It will then use that alignment as a starting point for the ' - 'nonlinear warping.', - argstr='-allineate') + desc="This option will make 3dQwarp run 3dAllineate first, to align " + "the source dataset to the base with an affine transformation. " + "It will then use that alignment as a starting point for the " + "nonlinear warping.", + argstr="-allineate", + ) allineate_opts = traits.Str( - desc='add extra options to the 3dAllineate command to be run by ' - '3dQwarp.', - argstr='-allineate_opts %s', - requires=['allineate']) - nowarp = traits.Bool(desc='Do not save the _WARP file.', argstr='-nowarp') + desc="add extra options to the 3dAllineate command to be run by " "3dQwarp.", + argstr="-allineate_opts %s", + requires=["allineate"], + ) + nowarp = traits.Bool(desc="Do not save the _WARP file.", argstr="-nowarp") iwarp = traits.Bool( - desc='Do compute and save the _WARPINV file.', - argstr='-iwarp', - xor=['plusminus']) + desc="Do compute and save the _WARPINV file.", + argstr="-iwarp", + xor=["plusminus"], + ) pear = traits.Bool( - desc='Use strict Pearson correlation for matching.' - '* Not usually recommended, since the \'clipped Pearson\' method' - 'used by default will reduce the impact of outlier values.', - argstr='-pear') + desc="Use strict Pearson correlation for matching." + "* Not usually recommended, since the 'clipped Pearson' method" + "used by default will reduce the impact of outlier values.", + argstr="-pear", + ) noneg = traits.Bool( - desc='Replace negative values in either input volume with 0.' - '* If there ARE negative input values, and you do NOT use -noneg,' - 'then strict Pearson correlation will be used, since the \'clipped\'' - 'method only is implemented for non-negative volumes.' - '* \'-noneg\' is not the default, since there might be situations where' - 'you want to align datasets with positive and negative values mixed.' - '* But, in many cases, the negative values in a dataset are just the' - 'result of interpolation artifacts (or other peculiarities), and so' - 'they should be ignored. That is what \'-noneg\' is for.', - argstr='-noneg') + desc="Replace negative values in either input volume with 0." + "* If there ARE negative input values, and you do NOT use -noneg," + "then strict Pearson correlation will be used, since the 'clipped'" + "method only is implemented for non-negative volumes." + "* '-noneg' is not the default, since there might be situations where" + "you want to align datasets with positive and negative values mixed." + "* But, in many cases, the negative values in a dataset are just the" + "result of interpolation artifacts (or other peculiarities), and so" + "they should be ignored. That is what '-noneg' is for.", + argstr="-noneg", + ) nopenalty = traits.Bool( - desc='Replace negative values in either input volume with 0.' - '* If there ARE negative input values, and you do NOT use -noneg,' - 'then strict Pearson correlation will be used, since the \'clipped\'' - 'method only is implemented for non-negative volumes.' - '* \'-noneg\' is not the default, since there might be situations where' - 'you want to align datasets with positive and negative values mixed.' - '* But, in many cases, the negative values in a dataset are just the' - 'result of interpolation artifacts (or other peculiarities), and so' - 'they should be ignored. That is what \'-noneg\' is for.', - argstr='-nopenalty') + desc="Replace negative values in either input volume with 0." + "* If there ARE negative input values, and you do NOT use -noneg," + "then strict Pearson correlation will be used, since the 'clipped'" + "method only is implemented for non-negative volumes." + "* '-noneg' is not the default, since there might be situations where" + "you want to align datasets with positive and negative values mixed." + "* But, in many cases, the negative values in a dataset are just the" + "result of interpolation artifacts (or other peculiarities), and so" + "they should be ignored. That is what '-noneg' is for.", + argstr="-nopenalty", + ) penfac = traits.Float( - desc='Use this value to weight the penalty.' - 'The default value is 1.Larger values mean the' - 'penalty counts more, reducing grid distortions,' - 'insha\'Allah; \'-nopenalty\' is the same as \'-penfac 0\'.' - ' -->>* [23 Sep 2013] -- Zhark increased the default value of' - ' the penalty by a factor of 5, and also made it get' - ' progressively larger with each level of refinement.' - ' Thus, warping results will vary from earlier instances' - ' of 3dQwarp.' - ' * The progressive increase in the penalty at higher levels' - ' means that the \'cost function\' can actually look like the' - ' alignment is getting worse when the levels change.' - ' * IF you wish to turn off this progression, for whatever' - ' reason (e.g., to keep compatibility with older results),' - ' use the option \'-penold\'.To be completely compatible with' - ' the older 3dQwarp, you\'ll also have to use \'-penfac 0.2\'.', - argstr='-penfac %f') + desc="Use this value to weight the penalty." + "The default value is 1.Larger values mean the" + "penalty counts more, reducing grid distortions," + "insha'Allah; '-nopenalty' is the same as '-penfac 0'." + " -->>* [23 Sep 2013] -- Zhark increased the default value of" + " the penalty by a factor of 5, and also made it get" + " progressively larger with each level of refinement." + " Thus, warping results will vary from earlier instances" + " of 3dQwarp." + " * The progressive increase in the penalty at higher levels" + " means that the 'cost function' can actually look like the" + " alignment is getting worse when the levels change." + " * IF you wish to turn off this progression, for whatever" + " reason (e.g., to keep compatibility with older results)," + " use the option '-penold'.To be completely compatible with" + " the older 3dQwarp, you'll also have to use '-penfac 0.2'.", + argstr="-penfac %f", + ) noweight = traits.Bool( - desc='If you want a binary weight (the old default), use this option.' - 'That is, each voxel in the base volume automask will be' - 'weighted the same in the computation of the cost functional.', - argstr='-noweight') + desc="If you want a binary weight (the old default), use this option." + "That is, each voxel in the base volume automask will be" + "weighted the same in the computation of the cost functional.", + argstr="-noweight", + ) weight = File( - desc='Instead of computing the weight from the base dataset,' - 'directly input the weight volume from dataset \'www\'.' - '* Useful if you know what over parts of the base image you' - 'want to emphasize or de-emphasize the matching functional.', - argstr='-weight %s', - exists=True) + desc="Instead of computing the weight from the base dataset," + "directly input the weight volume from dataset 'www'." + "* Useful if you know what over parts of the base image you" + "want to emphasize or de-emphasize the matching functional.", + argstr="-weight %s", + exists=True, + ) wball = traits.List( traits.Int(), - desc='-wball x y z r f' - 'Enhance automatic weight from \'-useweight\' by a factor' - 'of 1+f*Gaussian(FWHM=r) centered in the base image at' - 'DICOM coordinates (x,y,z) and with radius \'r\'. The' - 'goal of this option is to try and make the alignment' - 'better in a specific part of the brain.' - '* Example: -wball 0 14 6 30 40' - 'to emphasize the thalamic area (in MNI/Talairach space).' - '* The \'r\' parameter must be positive!' - '* The \'f\' parameter must be between 1 and 100 (inclusive).' - '* \'-wball\' does nothing if you input your own weight' - 'with the \'-weight\' option.' - '* \'-wball\' does change the binary weight created by' - 'the \'-noweight\' option.' - '* You can only use \'-wball\' once in a run of 3dQwarp.' - '*** The effect of \'-wball\' is not dramatic. The example' - 'above makes the average brain image across a collection' - 'of subjects a little sharper in the thalamic area, which' - 'might have some small value. If you care enough about' - 'alignment to use \'-wball\', then you should examine the' - 'results from 3dQwarp for each subject, to see if the' - 'alignments are good enough for your purposes.', - argstr='-wball %s', + desc="-wball x y z r f" + "Enhance automatic weight from '-useweight' by a factor" + "of 1+f*Gaussian(FWHM=r) centered in the base image at" + "DICOM coordinates (x,y,z) and with radius 'r'. The" + "goal of this option is to try and make the alignment" + "better in a specific part of the brain." + "* Example: -wball 0 14 6 30 40" + "to emphasize the thalamic area (in MNI/Talairach space)." + "* The 'r' parameter must be positive!" + "* The 'f' parameter must be between 1 and 100 (inclusive)." + "* '-wball' does nothing if you input your own weight" + "with the '-weight' option." + "* '-wball' does change the binary weight created by" + "the '-noweight' option." + "* You can only use '-wball' once in a run of 3dQwarp." + "*** The effect of '-wball' is not dramatic. The example" + "above makes the average brain image across a collection" + "of subjects a little sharper in the thalamic area, which" + "might have some small value. If you care enough about" + "alignment to use '-wball', then you should examine the" + "results from 3dQwarp for each subject, to see if the" + "alignments are good enough for your purposes.", + argstr="-wball %s", minlen=5, - maxlen=5) - traits.Tuple((traits.Float(), traits.Float()), argstr='-bpass %f %f') + maxlen=5, + ) + traits.Tuple((traits.Float(), traits.Float()), argstr="-bpass %f %f") wmask = traits.Tuple( (File(exists=True), traits.Float()), - desc='-wmask ws f' - 'Similar to \'-wball\', but here, you provide a dataset \'ws\'' - 'that indicates where to increase the weight.' - '* The \'ws\' dataset must be on the same 3D grid as the base dataset.' - '* \'ws\' is treated as a mask -- it only matters where it' - 'is nonzero -- otherwise, the values inside are not used.' - '* After \'ws\' comes the factor \'f\' by which to increase the' - 'automatically computed weight. Where \'ws\' is nonzero,' - 'the weighting will be multiplied by (1+f).' - '* As with \'-wball\', the factor \'f\' should be between 1 and 100.' - '* You cannot use \'-wball\' and \'-wmask\' together!', - argstr='-wpass %s %f') + desc="-wmask ws f" + "Similar to '-wball', but here, you provide a dataset 'ws'" + "that indicates where to increase the weight." + "* The 'ws' dataset must be on the same 3D grid as the base dataset." + "* 'ws' is treated as a mask -- it only matters where it" + "is nonzero -- otherwise, the values inside are not used." + "* After 'ws' comes the factor 'f' by which to increase the" + "automatically computed weight. Where 'ws' is nonzero," + "the weighting will be multiplied by (1+f)." + "* As with '-wball', the factor 'f' should be between 1 and 100." + "* You cannot use '-wball' and '-wmask' together!", + argstr="-wpass %s %f", + ) out_weight_file = File( - argstr='-wtprefix %s', - desc='Write the weight volume to disk as a dataset') + argstr="-wtprefix %s", desc="Write the weight volume to disk as a dataset" + ) blur = traits.List( traits.Float(), - desc='Gaussian blur the input images by \'bb\' (FWHM) voxels before' - 'doing the alignment (the output dataset will not be blurred).' - 'The default is 2.345 (for no good reason).' - '* Optionally, you can provide 2 values for \'bb\', and then' - 'the first one is applied to the base volume, the second' - 'to the source volume.' - '-->>* e.g., \'-blur 0 3\' to skip blurring the base image' - '(if the base is a blurry template, for example).' - '* A negative blur radius means to use 3D median filtering,' - 'rather than Gaussian blurring. This type of filtering will' - 'better preserve edges, which can be important in alignment.' - '* If the base is a template volume that is already blurry,' - 'you probably don\'t want to blur it again, but blurring' - 'the source volume a little is probably a good idea, to' - 'help the program avoid trying to match tiny features.' - '* Note that -duplo will blur the volumes some extra' - 'amount for the initial small-scale warping, to make' - 'that phase of the program converge more rapidly.', - argstr='-blur %s', + desc="Gaussian blur the input images by 'bb' (FWHM) voxels before" + "doing the alignment (the output dataset will not be blurred)." + "The default is 2.345 (for no good reason)." + "* Optionally, you can provide 2 values for 'bb', and then" + "the first one is applied to the base volume, the second" + "to the source volume." + "-->>* e.g., '-blur 0 3' to skip blurring the base image" + "(if the base is a blurry template, for example)." + "* A negative blur radius means to use 3D median filtering," + "rather than Gaussian blurring. This type of filtering will" + "better preserve edges, which can be important in alignment." + "* If the base is a template volume that is already blurry," + "you probably don't want to blur it again, but blurring" + "the source volume a little is probably a good idea, to" + "help the program avoid trying to match tiny features." + "* Note that -duplo will blur the volumes some extra" + "amount for the initial small-scale warping, to make" + "that phase of the program converge more rapidly.", + argstr="-blur %s", minlen=1, - maxlen=2) + maxlen=2, + ) pblur = traits.List( traits.Float(), - desc='Use progressive blurring; that is, for larger patch sizes,' - 'the amount of blurring is larger. The general idea is to' - 'avoid trying to match finer details when the patch size' - 'and incremental warps are coarse. When \'-blur\' is used' - 'as well, it sets a minimum amount of blurring that will' - 'be used. [06 Aug 2014 -- \'-pblur\' may become the default someday].' - '* You can optionally give the fraction of the patch size that' - 'is used for the progressive blur by providing a value between' - '0 and 0.25 after \'-pblur\'. If you provide TWO values, the' - 'the first fraction is used for progressively blurring the' - 'base image and the second for the source image. The default' - 'parameters when just \'-pblur\' is given is the same as giving' - 'the options as \'-pblur 0.09 0.09\'.' - '* \'-pblur\' is useful when trying to match 2 volumes with high' - 'amounts of detail; e.g, warping one subject\'s brain image to' - 'match another\'s, or trying to warp to match a detailed template.' - '* Note that using negative values with \'-blur\' means that the' - 'progressive blurring will be done with median filters, rather' - 'than Gaussian linear blurring.' - '-->>*** The combination of the -allineate and -pblur options will make' - 'the results of using 3dQwarp to align to a template somewhat' - 'less sensitive to initial head position and scaling.', - argstr='-pblur %s', + desc="Use progressive blurring; that is, for larger patch sizes," + "the amount of blurring is larger. The general idea is to" + "avoid trying to match finer details when the patch size" + "and incremental warps are coarse. When '-blur' is used" + "as well, it sets a minimum amount of blurring that will" + "be used. [06 Aug 2014 -- '-pblur' may become the default someday]." + "* You can optionally give the fraction of the patch size that" + "is used for the progressive blur by providing a value between" + "0 and 0.25 after '-pblur'. If you provide TWO values, the" + "the first fraction is used for progressively blurring the" + "base image and the second for the source image. The default" + "parameters when just '-pblur' is given is the same as giving" + "the options as '-pblur 0.09 0.09'." + "* '-pblur' is useful when trying to match 2 volumes with high" + "amounts of detail; e.g, warping one subject's brain image to" + "match another's, or trying to warp to match a detailed template." + "* Note that using negative values with '-blur' means that the" + "progressive blurring will be done with median filters, rather" + "than Gaussian linear blurring." + "-->>*** The combination of the -allineate and -pblur options will make" + "the results of using 3dQwarp to align to a template somewhat" + "less sensitive to initial head position and scaling.", + argstr="-pblur %s", minlen=1, - maxlen=2) + maxlen=2, + ) emask = File( - desc='Here, \'ee\' is a dataset to specify a mask of voxels' - 'to EXCLUDE from the analysis -- all voxels in \'ee\'' - 'that are NONZERO will not be used in the alignment.' - '* The base image always automasked -- the emask is' - 'extra, to indicate voxels you definitely DON\'T want' - 'included in the matching process, even if they are' - 'inside the brain.', - argstr='-emask %s', + desc="Here, 'ee' is a dataset to specify a mask of voxels" + "to EXCLUDE from the analysis -- all voxels in 'ee'" + "that are NONZERO will not be used in the alignment." + "* The base image always automasked -- the emask is" + "extra, to indicate voxels you definitely DON'T want" + "included in the matching process, even if they are" + "inside the brain.", + argstr="-emask %s", exists=True, - copyfile=False) - noXdis = traits.Bool( - desc='Warp will not displace in x direction', argstr='-noXdis') - noYdis = traits.Bool( - desc='Warp will not displace in y direction', argstr='-noYdis') - noZdis = traits.Bool( - desc='Warp will not displace in z direction', argstr='-noZdis') + copyfile=False, + ) + noXdis = traits.Bool(desc="Warp will not displace in x direction", argstr="-noXdis") + noYdis = traits.Bool(desc="Warp will not displace in y direction", argstr="-noYdis") + noZdis = traits.Bool(desc="Warp will not displace in z direction", argstr="-noZdis") iniwarp = traits.List( File(exists=True, copyfile=False), - desc='A dataset with an initial nonlinear warp to use.' - '* If this option is not used, the initial warp is the identity.' - '* You can specify a catenation of warps (in quotes) here, as in' - 'program 3dNwarpApply.' - '* As a special case, if you just input an affine matrix in a .1D' - 'file, that will work also -- it is treated as giving the initial' + desc="A dataset with an initial nonlinear warp to use." + "* If this option is not used, the initial warp is the identity." + "* You can specify a catenation of warps (in quotes) here, as in" + "program 3dNwarpApply." + "* As a special case, if you just input an affine matrix in a .1D" + "file, that will work also -- it is treated as giving the initial" 'warp via the string "IDENT(base_dataset) matrix_file.aff12.1D".' - '* You CANNOT use this option with -duplo !!' - '* -iniwarp is usually used with -inilev to re-start 3dQwarp from' - 'a previous stopping point.', - argstr='-iniwarp %s', - xor=['duplo']) + "* You CANNOT use this option with -duplo !!" + "* -iniwarp is usually used with -inilev to re-start 3dQwarp from" + "a previous stopping point.", + argstr="-iniwarp %s", + xor=["duplo"], + ) inilev = traits.Int( - desc='The initial refinement \'level\' at which to start.' - '* Usually used with -iniwarp; CANNOT be used with -duplo.' - '* The combination of -inilev and -iniwarp lets you take the' - 'results of a previous 3dQwarp run and refine them further:' - 'Note that the source dataset in the second run is the SAME as' - 'in the first run. If you don\'t see why this is necessary,' - 'then you probably need to seek help from an AFNI guru.', - argstr='-inilev %d', - xor=['duplo']) + desc="The initial refinement 'level' at which to start." + "* Usually used with -iniwarp; CANNOT be used with -duplo." + "* The combination of -inilev and -iniwarp lets you take the" + "results of a previous 3dQwarp run and refine them further:" + "Note that the source dataset in the second run is the SAME as" + "in the first run. If you don't see why this is necessary," + "then you probably need to seek help from an AFNI guru.", + argstr="-inilev %d", + xor=["duplo"], + ) minpatch = traits.Int( - desc='* The value of mm should be an odd integer.' - '* The default value of mm is 25.' - '* For more accurate results than mm=25, try 19 or 13.' - '* The smallest allowed patch size is 5.' - '* You may want stop at a larger patch size (say 7 or 9) and use' - 'the -Qfinal option to run that final level with quintic warps,' - 'which might run faster and provide the same degree of warp detail.' - '* Trying to make two different brain volumes match in fine detail' - 'is usually a waste of time, especially in humans. There is too' - 'much variability in anatomy to match gyrus to gyrus accurately.' - 'For this reason, the default minimum patch size is 25 voxels.' - 'Using a smaller \'-minpatch\' might try to force the warp to' - 'match features that do not match, and the result can be useless' - 'image distortions -- another reason to LOOK AT THE RESULTS.', - argstr='-minpatch %d') + desc="* The value of mm should be an odd integer." + "* The default value of mm is 25." + "* For more accurate results than mm=25, try 19 or 13." + "* The smallest allowed patch size is 5." + "* You may want stop at a larger patch size (say 7 or 9) and use" + "the -Qfinal option to run that final level with quintic warps," + "which might run faster and provide the same degree of warp detail." + "* Trying to make two different brain volumes match in fine detail" + "is usually a waste of time, especially in humans. There is too" + "much variability in anatomy to match gyrus to gyrus accurately." + "For this reason, the default minimum patch size is 25 voxels." + "Using a smaller '-minpatch' might try to force the warp to" + "match features that do not match, and the result can be useless" + "image distortions -- another reason to LOOK AT THE RESULTS.", + argstr="-minpatch %d", + ) maxlev = traits.Int( - desc='The initial refinement \'level\' at which to start.' - '* Usually used with -iniwarp; CANNOT be used with -duplo.' - '* The combination of -inilev and -iniwarp lets you take the' - 'results of a previous 3dQwarp run and refine them further:' - 'Note that the source dataset in the second run is the SAME as' - 'in the first run. If you don\'t see why this is necessary,' - 'then you probably need to seek help from an AFNI guru.', - argstr='-maxlev %d', - xor=['duplo'], - position=-1) + desc="The initial refinement 'level' at which to start." + "* Usually used with -iniwarp; CANNOT be used with -duplo." + "* The combination of -inilev and -iniwarp lets you take the" + "results of a previous 3dQwarp run and refine them further:" + "Note that the source dataset in the second run is the SAME as" + "in the first run. If you don't see why this is necessary," + "then you probably need to seek help from an AFNI guru.", + argstr="-maxlev %d", + xor=["duplo"], + position=-1, + ) gridlist = File( - desc='This option provides an alternate way to specify the patch' - 'grid sizes used in the warp optimization process. \'gl\' is' - 'a 1D file with a list of patches to use -- in most cases,' - 'you will want to use it in the following form:' - '-gridlist \'1D: 0 151 101 75 51\'' - '* Here, a 0 patch size means the global domain. Patch sizes' - 'otherwise should be odd integers >= 5.' - '* If you use the \'0\' patch size again after the first position,' - 'you will actually get an iteration at the size of the' - 'default patch level 1, where the patch sizes are 75% of' - 'the volume dimension. There is no way to force the program' - 'to literally repeat the sui generis step of lev=0.' - '* You cannot use -gridlist with -duplo or -plusminus!', - argstr='-gridlist %s', + desc="This option provides an alternate way to specify the patch" + "grid sizes used in the warp optimization process. 'gl' is" + "a 1D file with a list of patches to use -- in most cases," + "you will want to use it in the following form:" + "-gridlist '1D: 0 151 101 75 51'" + "* Here, a 0 patch size means the global domain. Patch sizes" + "otherwise should be odd integers >= 5." + "* If you use the '0' patch size again after the first position," + "you will actually get an iteration at the size of the" + "default patch level 1, where the patch sizes are 75% of" + "the volume dimension. There is no way to force the program" + "to literally repeat the sui generis step of lev=0." + "* You cannot use -gridlist with -duplo or -plusminus!", + argstr="-gridlist %s", exists=True, copyfile=False, - xor=['duplo', 'plusminus']) + xor=["duplo", "plusminus"], + ) allsave = traits.Bool( - desc='This option lets you save the output warps from each level' - 'of the refinement process. Mostly used for experimenting.' - '* Cannot be used with -nopadWARP, -duplo, or -plusminus.' - '* Will only save all the outputs if the program terminates' - 'normally -- if it crashes, or freezes, then all these' - 'warps are lost.', - argstr='-allsave', - xor=['nopadWARP', 'duplo', 'plusminus']) + desc="This option lets you save the output warps from each level" + "of the refinement process. Mostly used for experimenting." + "* Cannot be used with -nopadWARP, -duplo, or -plusminus." + "* Will only save all the outputs if the program terminates" + "normally -- if it crashes, or freezes, then all these" + "warps are lost.", + argstr="-allsave", + xor=["nopadWARP", "duplo", "plusminus"], + ) duplo = traits.Bool( - desc='Start off with 1/2 scale versions of the volumes,' - 'for getting a speedy coarse first alignment.' - '* Then scales back up to register the full volumes.' - 'The goal is greater speed, and it seems to help this' - 'positively piggish program to be more expeditious.' - '* However, accuracy is somewhat lower with \'-duplo\',' - 'for reasons that currenly elude Zhark; for this reason,' - 'the Emperor does not usually use \'-duplo\'.', - argstr='-duplo', - xor=[ - 'gridlist', 'maxlev', 'inilev', 'iniwarp', 'plusminus', 'allsave' - ]) + desc="Start off with 1/2 scale versions of the volumes," + "for getting a speedy coarse first alignment." + "* Then scales back up to register the full volumes." + "The goal is greater speed, and it seems to help this" + "positively piggish program to be more expeditious." + "* However, accuracy is somewhat lower with '-duplo'," + "for reasons that currenly elude Zhark; for this reason," + "the Emperor does not usually use '-duplo'.", + argstr="-duplo", + xor=["gridlist", "maxlev", "inilev", "iniwarp", "plusminus", "allsave"], + ) workhard = traits.Bool( - desc='Iterate more times, which can help when the volumes are' - 'hard to align at all, or when you hope to get a more precise' - 'alignment.' - '* Slows the program down (possibly a lot), of course.' - '* When you combine \'-workhard\' with \'-duplo\', only the' - 'full size volumes get the extra iterations.' - '* For finer control over which refinement levels work hard,' - 'you can use this option in the form (for example)' - ' -workhard:4:7' - 'which implies the extra iterations will be done at levels' - '4, 5, 6, and 7, but not otherwise.' - '* You can also use \'-superhard\' to iterate even more, but' - 'this extra option will REALLY slow things down.' - '-->>* Under most circumstances, you should not need to use either' - '-workhard or -superhard.' - '-->>* The fastest way to register to a template image is via the' - '-duplo option, and without the -workhard or -superhard options.' - '-->>* If you use this option in the form \'-Workhard\' (first letter' - 'in upper case), then the second iteration at each level is' - 'done with quintic polynomial warps.', - argstr='-workhard', - xor=['boxopt', 'ballopt']) + desc="Iterate more times, which can help when the volumes are" + "hard to align at all, or when you hope to get a more precise" + "alignment." + "* Slows the program down (possibly a lot), of course." + "* When you combine '-workhard' with '-duplo', only the" + "full size volumes get the extra iterations." + "* For finer control over which refinement levels work hard," + "you can use this option in the form (for example)" + " -workhard:4:7" + "which implies the extra iterations will be done at levels" + "4, 5, 6, and 7, but not otherwise." + "* You can also use '-superhard' to iterate even more, but" + "this extra option will REALLY slow things down." + "-->>* Under most circumstances, you should not need to use either" + "-workhard or -superhard." + "-->>* The fastest way to register to a template image is via the" + "-duplo option, and without the -workhard or -superhard options." + "-->>* If you use this option in the form '-Workhard' (first letter" + "in upper case), then the second iteration at each level is" + "done with quintic polynomial warps.", + argstr="-workhard", + xor=["boxopt", "ballopt"], + ) Qfinal = traits.Bool( - desc='At the finest patch size (the final level), use Hermite' - 'quintic polynomials for the warp instead of cubic polynomials.' - '* In a 3D \'patch\', there are 2x2x2x3=24 cubic polynomial basis' - 'function parameters over which to optimize (2 polynomials' - 'dependent on each of the x,y,z directions, and 3 different' - 'directions of displacement).' - '* There are 3x3x3x3=81 quintic polynomial parameters per patch.' - '* With -Qfinal, the final level will have more detail in' - 'the allowed warps, at the cost of yet more CPU time.' - '* However, no patch below 7x7x7 in size will be done with quintic' - 'polynomials.' - '* This option is also not usually needed, and is experimental.', - argstr='-Qfinal') + desc="At the finest patch size (the final level), use Hermite" + "quintic polynomials for the warp instead of cubic polynomials." + "* In a 3D 'patch', there are 2x2x2x3=24 cubic polynomial basis" + "function parameters over which to optimize (2 polynomials" + "dependent on each of the x,y,z directions, and 3 different" + "directions of displacement)." + "* There are 3x3x3x3=81 quintic polynomial parameters per patch." + "* With -Qfinal, the final level will have more detail in" + "the allowed warps, at the cost of yet more CPU time." + "* However, no patch below 7x7x7 in size will be done with quintic" + "polynomials." + "* This option is also not usually needed, and is experimental.", + argstr="-Qfinal", + ) Qonly = traits.Bool( - desc='Use Hermite quintic polynomials at all levels.' - '* Very slow (about 4 times longer). Also experimental.' - '* Will produce a (discrete representation of a) C2 warp.', - argstr='-Qonly') + desc="Use Hermite quintic polynomials at all levels." + "* Very slow (about 4 times longer). Also experimental." + "* Will produce a (discrete representation of a) C2 warp.", + argstr="-Qonly", + ) plusminus = traits.Bool( - desc='Normally, the warp displacements dis(x) are defined to match' - 'base(x) to source(x+dis(x)). With this option, the match' - 'is between base(x-dis(x)) and source(x+dis(x)) -- the two' - 'images \'meet in the middle\'.' - '* One goal is to mimic the warping done to MRI EPI data by' - 'field inhomogeneities, when registering between a \'blip up\'' - 'and a \'blip down\' down volume, which will have opposite' - 'distortions.' - '* Define Wp(x) = x+dis(x) and Wm(x) = x-dis(x). Then since' - 'base(Wm(x)) matches source(Wp(x)), by substituting INV(Wm(x))' - 'wherever we see x, we have base(x) matches source(Wp(INV(Wm(x))));' - 'that is, the warp V(x) that one would get from the \'usual\' way' - 'of running 3dQwarp is V(x) = Wp(INV(Wm(x))).' - '* Conversely, we can calculate Wp(x) in terms of V(x) as follows:' - 'If V(x) = x + dv(x), define Vh(x) = x + dv(x)/2;' - 'then Wp(x) = V(INV(Vh(x)))' - '* With the above formulas, it is possible to compute Wp(x) from' - 'V(x) and vice-versa, using program 3dNwarpCalc. The requisite' - 'commands are left as an exercise for the aspiring AFNI Jedi Master.' - '* You can use the semi-secret \'-pmBASE\' option to get the V(x)' - 'warp and the source dataset warped to base space, in addition to' - 'the Wp(x) \'_PLUS\' and Wm(x) \'_MINUS\' warps.' - '-->>* Alas: -plusminus does not work with -duplo or -allineate :-(' - '* However, you can use -iniwarp with -plusminus :-)' - '-->>* The outputs have _PLUS (from the source dataset) and _MINUS' - '(from the base dataset) in their filenames, in addition to' - 'the prefix. The -iwarp option, if present, will be ignored.', - argstr='-plusminus', - xor=['duplo', 'allsave', 'iwarp']) + desc="Normally, the warp displacements dis(x) are defined to match" + "base(x) to source(x+dis(x)). With this option, the match" + "is between base(x-dis(x)) and source(x+dis(x)) -- the two" + "images 'meet in the middle'." + "* One goal is to mimic the warping done to MRI EPI data by" + "field inhomogeneities, when registering between a 'blip up'" + "and a 'blip down' down volume, which will have opposite" + "distortions." + "* Define Wp(x) = x+dis(x) and Wm(x) = x-dis(x). Then since" + "base(Wm(x)) matches source(Wp(x)), by substituting INV(Wm(x))" + "wherever we see x, we have base(x) matches source(Wp(INV(Wm(x))));" + "that is, the warp V(x) that one would get from the 'usual' way" + "of running 3dQwarp is V(x) = Wp(INV(Wm(x)))." + "* Conversely, we can calculate Wp(x) in terms of V(x) as follows:" + "If V(x) = x + dv(x), define Vh(x) = x + dv(x)/2;" + "then Wp(x) = V(INV(Vh(x)))" + "* With the above formulas, it is possible to compute Wp(x) from" + "V(x) and vice-versa, using program 3dNwarpCalc. The requisite" + "commands are left as an exercise for the aspiring AFNI Jedi Master." + "* You can use the semi-secret '-pmBASE' option to get the V(x)" + "warp and the source dataset warped to base space, in addition to" + "the Wp(x) '_PLUS' and Wm(x) '_MINUS' warps." + "-->>* Alas: -plusminus does not work with -duplo or -allineate :-(" + "* However, you can use -iniwarp with -plusminus :-)" + "-->>* The outputs have _PLUS (from the source dataset) and _MINUS" + "(from the base dataset) in their filenames, in addition to" + "the prefix. The -iwarp option, if present, will be ignored.", + argstr="-plusminus", + xor=["duplo", "allsave", "iwarp"], + ) nopad = traits.Bool( - desc='Do NOT use zero-padding on the 3D base and source images.' - '[Default == zero-pad, if needed]' - '* The underlying model for deformations goes to zero at the' - 'edge of the volume being warped. However, if there is' - 'significant data near an edge of the volume, then it won\'t' - 'get displaced much, and so the results might not be good.' - '* Zero padding is designed as a way to work around this potential' - 'problem. You should NOT need the \'-nopad\' option for any' - 'reason that Zhark can think of, but it is here to be symmetrical' - 'with 3dAllineate.' - '* Note that the output (warped from source) dataset will be on the' - 'base dataset grid whether or not zero-padding is allowed. However,' - 'unless you use the following option, allowing zero-padding (i.e.,' - 'the default operation) will make the output WARP dataset(s) be' - 'on a larger grid (also see \'-expad\' below).', - argstr='-nopad') + desc="Do NOT use zero-padding on the 3D base and source images." + "[Default == zero-pad, if needed]" + "* The underlying model for deformations goes to zero at the" + "edge of the volume being warped. However, if there is" + "significant data near an edge of the volume, then it won't" + "get displaced much, and so the results might not be good." + "* Zero padding is designed as a way to work around this potential" + "problem. You should NOT need the '-nopad' option for any" + "reason that Zhark can think of, but it is here to be symmetrical" + "with 3dAllineate." + "* Note that the output (warped from source) dataset will be on the" + "base dataset grid whether or not zero-padding is allowed. However," + "unless you use the following option, allowing zero-padding (i.e.," + "the default operation) will make the output WARP dataset(s) be" + "on a larger grid (also see '-expad' below).", + argstr="-nopad", + ) nopadWARP = traits.Bool( - desc='If for some reason you require the warp volume to' - 'match the base volume, then use this option to have the output' - 'WARP dataset(s) truncated.', - argstr='-nopadWARP', - xor=['allsave', 'expad']) + desc="If for some reason you require the warp volume to" + "match the base volume, then use this option to have the output" + "WARP dataset(s) truncated.", + argstr="-nopadWARP", + xor=["allsave", "expad"], + ) expad = traits.Int( - desc='This option instructs the program to pad the warp by an extra' - '\'EE\' voxels (and then 3dQwarp starts optimizing it).' - '* This option is seldom needed, but can be useful if you' - 'might later catenate the nonlinear warp -- via 3dNwarpCat --' - 'with an affine transformation that contains a large shift.' - 'Under that circumstance, the nonlinear warp might be shifted' - 'partially outside its original grid, so expanding that grid' - 'can avoid this problem.' - '* Note that this option perforce turns off \'-nopadWARP\'.', - argstr='-expad %d', - xor=['nopadWARP']) + desc="This option instructs the program to pad the warp by an extra" + "'EE' voxels (and then 3dQwarp starts optimizing it)." + "* This option is seldom needed, but can be useful if you" + "might later catenate the nonlinear warp -- via 3dNwarpCat --" + "with an affine transformation that contains a large shift." + "Under that circumstance, the nonlinear warp might be shifted" + "partially outside its original grid, so expanding that grid" + "can avoid this problem." + "* Note that this option perforce turns off '-nopadWARP'.", + argstr="-expad %d", + xor=["nopadWARP"], + ) ballopt = traits.Bool( - desc='Normally, the incremental warp parameters are optimized inside' - 'a rectangular \'box\' (24 dimensional for cubic patches, 81 for' - 'quintic patches), whose limits define the amount of distortion' - 'allowed at each step. Using \'-ballopt\' switches these limits' - 'to be applied to a \'ball\' (interior of a hypersphere), which' - 'can allow for larger incremental displacements. Use this' - 'option if you think things need to be able to move farther.', - argstr='-ballopt', - xor=['workhard', 'boxopt']) + desc="Normally, the incremental warp parameters are optimized inside" + "a rectangular 'box' (24 dimensional for cubic patches, 81 for" + "quintic patches), whose limits define the amount of distortion" + "allowed at each step. Using '-ballopt' switches these limits" + "to be applied to a 'ball' (interior of a hypersphere), which" + "can allow for larger incremental displacements. Use this" + "option if you think things need to be able to move farther.", + argstr="-ballopt", + xor=["workhard", "boxopt"], + ) baxopt = traits.Bool( - desc='Use the \'box\' optimization limits instead of the \'ball\'' - '[this is the default at present].' - '* Note that if \'-workhard\' is used, then ball and box optimization' - 'are alternated in the different iterations at each level, so' - 'these two options have no effect in that case.', - argstr='-boxopt', - xor=['workhard', 'ballopt']) + desc="Use the 'box' optimization limits instead of the 'ball'" + "[this is the default at present]." + "* Note that if '-workhard' is used, then ball and box optimization" + "are alternated in the different iterations at each level, so" + "these two options have no effect in that case.", + argstr="-boxopt", + xor=["workhard", "ballopt"], + ) verb = traits.Bool( - desc='more detailed description of the process', - argstr='-verb', - xor=['quiet']) + desc="more detailed description of the process", argstr="-verb", xor=["quiet"] + ) quiet = traits.Bool( - desc='Cut out most of the fun fun fun progress messages :-(', - argstr='-quiet', - xor=['verb']) + desc="Cut out most of the fun fun fun progress messages :-(", + argstr="-quiet", + xor=["verb"], + ) # Hidden and semi-hidden options - overwrite = traits.Bool(desc='Overwrite outputs', argstr='-overwrite') + overwrite = traits.Bool(desc="Overwrite outputs", argstr="-overwrite") lpc = traits.Bool( - desc='Local Pearson minimization (i.e., EPI-T1 registration)' - 'This option has not be extensively tested' - 'If you use \'-lpc\', then \'-maxlev 0\' is automatically set.' - 'If you want to go to more refined levels, you can set \'-maxlev\'' - 'This should be set up to have lpc as the second to last argument' - 'and maxlev as the second to last argument, as needed by AFNI' - 'Using maxlev > 1 is not recommended for EPI-T1 alignment.', - argstr='-lpc', - xor=['nmi', 'mi', 'hel', 'lpa', 'pear'], - position=-2) + desc="Local Pearson minimization (i.e., EPI-T1 registration)" + "This option has not be extensively tested" + "If you use '-lpc', then '-maxlev 0' is automatically set." + "If you want to go to more refined levels, you can set '-maxlev'" + "This should be set up to have lpc as the second to last argument" + "and maxlev as the second to last argument, as needed by AFNI" + "Using maxlev > 1 is not recommended for EPI-T1 alignment.", + argstr="-lpc", + xor=["nmi", "mi", "hel", "lpa", "pear"], + position=-2, + ) lpa = traits.Bool( - desc='Local Pearson maximization' - 'This option has not be extensively tested', - argstr='-lpa', - xor=['nmi', 'mi', 'lpc', 'hel', 'pear']) + desc="Local Pearson maximization" "This option has not be extensively tested", + argstr="-lpa", + xor=["nmi", "mi", "lpc", "hel", "pear"], + ) hel = traits.Bool( - desc='Hellinger distance: a matching function for the adventurous' - 'This option has NOT be extensively tested for usefullness' - 'and should be considered experimental at this infundibulum.', - argstr='-hel', - xor=['nmi', 'mi', 'lpc', 'lpa', 'pear']) + desc="Hellinger distance: a matching function for the adventurous" + "This option has NOT be extensively tested for usefullness" + "and should be considered experimental at this infundibulum.", + argstr="-hel", + xor=["nmi", "mi", "lpc", "lpa", "pear"], + ) mi = traits.Bool( - desc='Mutual Information: a matching function for the adventurous' - 'This option has NOT be extensively tested for usefullness' - 'and should be considered experimental at this infundibulum.', - argstr='-mi', - xor=['mi', 'hel', 'lpc', 'lpa', 'pear']) + desc="Mutual Information: a matching function for the adventurous" + "This option has NOT be extensively tested for usefullness" + "and should be considered experimental at this infundibulum.", + argstr="-mi", + xor=["mi", "hel", "lpc", "lpa", "pear"], + ) nmi = traits.Bool( - desc= - 'Normalized Mutual Information: a matching function for the adventurous' - 'This option has NOT be extensively tested for usefullness' - 'and should be considered experimental at this infundibulum.', - argstr='-nmi', - xor=['nmi', 'hel', 'lpc', 'lpa', 'pear']) + desc="Normalized Mutual Information: a matching function for the adventurous" + "This option has NOT be extensively tested for usefullness" + "and should be considered experimental at this infundibulum.", + argstr="-nmi", + xor=["nmi", "hel", "lpc", "lpa", "pear"], + ) class QwarpOutputSpec(TraitedSpec): warped_source = File( - desc='Warped source file. If plusminus is used, this is the undistorted' - 'source file.') - warped_base = File(desc='Undistorted base file.') + desc="Warped source file. If plusminus is used, this is the undistorted" + "source file." + ) + warped_base = File(desc="Undistorted base file.") source_warp = File( desc="Displacement in mm for the source image." "If plusminus is used this is the field suceptibility correction" - "warp (in 'mm') for source image.") + "warp (in 'mm') for source image." + ) base_warp = File( desc="Displacement in mm for the base image." "If plus minus is used, this is the field suceptibility correction" "warp (in 'mm') for base image. This is only output if plusminus" - "or iwarp options are passed") + "or iwarp options are passed" + ) weights = File(desc="Auto-computed weight volume.") @@ -3756,12 +4051,13 @@ class Qwarp(AFNICommand): "3dQwarp -allineate -allineate_opts '-cose lpa -verb' -base mni.nii -source structural.nii \ -prefix ppp_structural" >>> res3 = qwarp3.run() # doctest: +SKIP """ - _cmd = '3dQwarp' + + _cmd = "3dQwarp" input_spec = QwarpInputSpec output_spec = QwarpOutputSpec def _format_arg(self, name, spec, value): - if name == 'allineate_opts': + if name == "allineate_opts": return spec.argstr % ("'" + value + "'") return super(Qwarp, self)._format_arg(name, spec, value) @@ -3769,87 +4065,111 @@ def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.out_file): - prefix = self._gen_fname(self.inputs.in_file, suffix='_QW') + prefix = self._gen_fname(self.inputs.in_file, suffix="_QW") outputtype = self.inputs.outputtype - if outputtype == 'AFNI': - ext = '.HEAD' - suffix = '+tlrc' + if outputtype == "AFNI": + ext = ".HEAD" + suffix = "+tlrc" else: ext = Info.output_type_to_ext(outputtype) - suffix = '' + suffix = "" else: prefix = self.inputs.out_file - ext_ind = max([ - prefix.lower().rfind('.nii.gz'), - prefix.lower().rfind('.nii') - ]) + ext_ind = max( + [prefix.lower().rfind(".nii.gz"), prefix.lower().rfind(".nii")] + ) if ext_ind == -1: - ext = '.HEAD' - suffix = '+tlrc' + ext = ".HEAD" + suffix = "+tlrc" else: ext = prefix[ext_ind:] - suffix = '' + suffix = "" # All outputs should be in the same directory as the prefix out_dir = os.path.dirname(os.path.abspath(prefix)) - outputs['warped_source'] = fname_presuffix( - prefix, suffix=suffix, use_ext=False, newpath=out_dir) + ext + outputs["warped_source"] = ( + fname_presuffix(prefix, suffix=suffix, use_ext=False, newpath=out_dir) + ext + ) if not self.inputs.nowarp: - outputs['source_warp'] = fname_presuffix( - prefix, suffix='_WARP' + suffix, use_ext=False, - newpath=out_dir) + ext + outputs["source_warp"] = ( + fname_presuffix( + prefix, suffix="_WARP" + suffix, use_ext=False, newpath=out_dir + ) + + ext + ) if self.inputs.iwarp: - outputs['base_warp'] = fname_presuffix( - prefix, suffix='_WARPINV' + suffix, use_ext=False, - newpath=out_dir) + ext + outputs["base_warp"] = ( + fname_presuffix( + prefix, suffix="_WARPINV" + suffix, use_ext=False, newpath=out_dir + ) + + ext + ) if isdefined(self.inputs.out_weight_file): - outputs['weights'] = os.path.abspath(self.inputs.out_weight_file) + outputs["weights"] = os.path.abspath(self.inputs.out_weight_file) if self.inputs.plusminus: - outputs['warped_source'] = fname_presuffix( - prefix, suffix='_PLUS' + suffix, use_ext=False, - newpath=out_dir) + ext - outputs['warped_base'] = fname_presuffix( - prefix, suffix='_MINUS' + suffix, use_ext=False, - newpath=out_dir) + ext - outputs['source_warp'] = fname_presuffix( - prefix, suffix='_PLUS_WARP' + suffix, use_ext=False, - newpath=out_dir) + ext - outputs['base_warp'] = fname_presuffix( - prefix, suffix='_MINUS_WARP' + suffix, use_ext=False, - newpath=out_dir) + ext + outputs["warped_source"] = ( + fname_presuffix( + prefix, suffix="_PLUS" + suffix, use_ext=False, newpath=out_dir + ) + + ext + ) + outputs["warped_base"] = ( + fname_presuffix( + prefix, suffix="_MINUS" + suffix, use_ext=False, newpath=out_dir + ) + + ext + ) + outputs["source_warp"] = ( + fname_presuffix( + prefix, suffix="_PLUS_WARP" + suffix, use_ext=False, newpath=out_dir + ) + + ext + ) + outputs["base_warp"] = ( + fname_presuffix( + prefix, + suffix="_MINUS_WARP" + suffix, + use_ext=False, + newpath=out_dir, + ) + + ext + ) return outputs def _gen_filename(self, name): - if name == 'out_file': - return self._gen_fname(self.inputs.in_file, suffix='_QW') + if name == "out_file": + return self._gen_fname(self.inputs.in_file, suffix="_QW") class QwarpPlusMinusInputSpec(QwarpInputSpec): source_file = File( - desc='Source image (opposite phase encoding direction than base image)', - argstr='-source %s', + desc="Source image (opposite phase encoding direction than base image)", + argstr="-source %s", exists=True, - deprecated='1.1.2', - new_name='in_file', - copyfile=False) + deprecated="1.1.2", + new_name="in_file", + copyfile=False, + ) out_file = File( - 'Qwarp.nii.gz', - argstr='-prefix %s', + "Qwarp.nii.gz", + argstr="-prefix %s", position=0, usedefault=True, - desc="Output file") + desc="Output file", + ) plusminus = traits.Bool( True, usedefault=True, position=1, - desc='Normally, the warp displacements dis(x) are defined to match' - 'base(x) to source(x+dis(x)). With this option, the match' - 'is between base(x-dis(x)) and source(x+dis(x)) -- the two' - 'images \'meet in the middle\'. For more info, view Qwarp` interface', - argstr='-plusminus', - xor=['duplo', 'allsave', 'iwarp']) + desc="Normally, the warp displacements dis(x) are defined to match" + "base(x) to source(x+dis(x)). With this option, the match" + "is between base(x-dis(x)) and source(x+dis(x)) -- the two" + "images 'meet in the middle'. For more info, view Qwarp` interface", + argstr="-plusminus", + xor=["duplo", "allsave", "iwarp"], + ) class QwarpPlusMinus(Qwarp): diff --git a/nipype/interfaces/afni/svm.py b/nipype/interfaces/afni/svm.py index 0453778642..13c83af51c 100644 --- a/nipype/interfaces/afni/svm.py +++ b/nipype/interfaces/afni/svm.py @@ -10,68 +10,74 @@ class SVMTrainInputSpec(AFNICommandInputSpec): # training options ttype = traits.Str( - desc='tname: classification or regression', - argstr='-type %s', - mandatory=True) + desc="tname: classification or regression", argstr="-type %s", mandatory=True + ) in_file = File( - desc='A 3D+t AFNI brik dataset to be used for training.', - argstr='-trainvol %s', + desc="A 3D+t AFNI brik dataset to be used for training.", + argstr="-trainvol %s", mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( name_template="%s_vectors", - desc='output sum of weighted linear support vectors file name', - argstr='-bucket %s', - suffix='_bucket', - name_source="in_file") + desc="output sum of weighted linear support vectors file name", + argstr="-bucket %s", + suffix="_bucket", + name_source="in_file", + ) model = File( name_template="%s_model", - desc='basename for the brik containing the SVM model', - argstr='-model %s', - suffix='_model', - name_source="in_file") + desc="basename for the brik containing the SVM model", + argstr="-model %s", + suffix="_model", + name_source="in_file", + ) alphas = File( name_template="%s_alphas", - desc='output alphas file name', - argstr='-alpha %s', - suffix='_alphas', - name_source="in_file") + desc="output alphas file name", + argstr="-alpha %s", + suffix="_alphas", + name_source="in_file", + ) mask = File( - desc='byte-format brik file used to mask voxels in the analysis', - argstr='-mask %s', + desc="byte-format brik file used to mask voxels in the analysis", + argstr="-mask %s", position=-1, exists=True, - copyfile=False) + copyfile=False, + ) nomodelmask = traits.Bool( - desc='Flag to enable the omission of a mask file', - argstr='-nomodelmask') + desc="Flag to enable the omission of a mask file", argstr="-nomodelmask" + ) trainlabels = File( - desc= - '.1D labels corresponding to the stimulus paradigm for the training data.', - argstr='-trainlabels %s', - exists=True) + desc=".1D labels corresponding to the stimulus paradigm for the training data.", + argstr="-trainlabels %s", + exists=True, + ) censor = File( - desc= - '.1D censor file that allows the user to ignore certain samples in the training data.', - argstr='-censor %s', - exists=True) + desc=".1D censor file that allows the user to ignore certain samples in the training data.", + argstr="-censor %s", + exists=True, + ) kernel = traits.Str( - desc= - 'string specifying type of kernel function:linear, polynomial, rbf, sigmoid', - argstr='-kernel %s') + desc="string specifying type of kernel function:linear, polynomial, rbf, sigmoid", + argstr="-kernel %s", + ) max_iterations = traits.Int( - desc='Specify the maximum number of iterations for the optimization.', - argstr='-max_iterations %d') + desc="Specify the maximum number of iterations for the optimization.", + argstr="-max_iterations %d", + ) w_out = traits.Bool( - desc='output sum of weighted linear support vectors', argstr='-wout') - options = traits.Str(desc='additional options for SVM-light', argstr='%s') + desc="output sum of weighted linear support vectors", argstr="-wout" + ) + options = traits.Str(desc="additional options for SVM-light", argstr="%s") class SVMTrainOutputSpec(TraitedSpec): - out_file = File(desc='sum of weighted linear support vectors file name') - model = File(desc='brik containing the SVM model file name') - alphas = File(desc='output alphas file name') + out_file = File(desc="sum of weighted linear support vectors file name") + model = File(desc="brik containing the SVM model file name") + alphas = File(desc="output alphas file name") class SVMTrain(AFNICommand): @@ -95,10 +101,10 @@ class SVMTrain(AFNICommand): """ - _cmd = '3dsvm' + _cmd = "3dsvm" input_spec = SVMTrainInputSpec output_spec = SVMTrainOutputSpec - _additional_metadata = ['suffix'] + _additional_metadata = ["suffix"] def _format_arg(self, name, trait_spec, value): return super(SVMTrain, self)._format_arg(name, trait_spec, value) @@ -107,39 +113,43 @@ def _format_arg(self, name, trait_spec, value): class SVMTestInputSpec(AFNICommandInputSpec): # testing options model = traits.Str( - desc='modname is the basename for the brik containing the SVM model', - argstr='-model %s', - mandatory=True) + desc="modname is the basename for the brik containing the SVM model", + argstr="-model %s", + mandatory=True, + ) in_file = File( - desc='A 3D or 3D+t AFNI brik dataset to be used for testing.', - argstr='-testvol %s', + desc="A 3D or 3D+t AFNI brik dataset to be used for testing.", + argstr="-testvol %s", exists=True, - mandatory=True) + mandatory=True, + ) out_file = File( name_template="%s_predictions", - desc='filename for .1D prediction file(s).', - argstr='-predictions %s') + desc="filename for .1D prediction file(s).", + argstr="-predictions %s", + ) testlabels = File( - desc= - '*true* class category .1D labels for the test dataset. It is used to calculate the prediction accuracy performance', + desc="*true* class category .1D labels for the test dataset. It is used to calculate the prediction accuracy performance", exists=True, - argstr='-testlabels %s') + argstr="-testlabels %s", + ) classout = traits.Bool( - desc= - 'Flag to specify that pname files should be integer-valued, corresponding to class category decisions.', - argstr='-classout') + desc="Flag to specify that pname files should be integer-valued, corresponding to class category decisions.", + argstr="-classout", + ) nopredcensord = traits.Bool( - desc= - 'Flag to prevent writing predicted values for censored time-points', - argstr='-nopredcensord') + desc="Flag to prevent writing predicted values for censored time-points", + argstr="-nopredcensord", + ) nodetrend = traits.Bool( - desc= - 'Flag to specify that pname files should not be linearly detrended', - argstr='-nodetrend') + desc="Flag to specify that pname files should not be linearly detrended", + argstr="-nodetrend", + ) multiclass = traits.Bool( - desc='Specifies multiclass algorithm for classification', - argstr='-multiclass %s') - options = traits.Str(desc='additional options for SVM-light', argstr='%s') + desc="Specifies multiclass algorithm for classification", + argstr="-multiclass %s", + ) + options = traits.Str(desc="additional options for SVM-light", argstr="%s") class SVMTest(AFNICommand): @@ -160,6 +170,7 @@ class SVMTest(AFNICommand): >>> res = svmTest.run() # doctest: +SKIP """ - _cmd = '3dsvm' + + _cmd = "3dsvm" input_spec = SVMTestInputSpec output_spec = AFNICommandOutputSpec diff --git a/nipype/interfaces/afni/tests/test_auto_ABoverlap.py b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py index 5fe7d2efd7..63e2e8d652 100644 --- a/nipype/interfaces/afni/tests/test_auto_ABoverlap.py +++ b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py @@ -4,46 +4,30 @@ def test_ABoverlap_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file_a=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-3, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-3, ), in_file_b=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-2, - ), - no_automask=dict(argstr='-no_automask', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr=' |& tee %s', - extensions=None, - position=-1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2, ), + no_automask=dict(argstr="-no_automask",), + num_threads=dict(nohash=True, usedefault=True,), + out_file=dict(argstr=" |& tee %s", extensions=None, position=-1,), outputtype=dict(), - quiet=dict(argstr='-quiet', ), - verb=dict(argstr='-verb', ), + quiet=dict(argstr="-quiet",), + verb=dict(argstr="-verb",), ) inputs = ABoverlap.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ABoverlap_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ABoverlap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AFNICommand.py b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py index 17f152c304..0764b4947b 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNICommand.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py @@ -4,20 +4,14 @@ def test_AFNICommand_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source=['in_file'], - name_template='%s_afni', + name_source=["in_file"], + name_template="%s_afni", ), outputtype=dict(), ) diff --git a/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py b/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py index ce0a85708c..58de0b425a 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py @@ -4,11 +4,7 @@ def test_AFNICommandBase_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), ) inputs = AFNICommandBase.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py index d9e3508113..e437676286 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py @@ -4,20 +4,14 @@ def test_AFNIPythonCommand_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source=['in_file'], - name_template='%s_afni', + name_source=["in_file"], + name_template="%s_afni", ), outputtype=dict(), ) diff --git a/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py index df58646223..19012ff364 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py @@ -4,48 +4,34 @@ def test_AFNItoNIFTI_inputs(): input_map = dict( - args=dict(argstr='%s', ), - denote=dict(argstr='-denote', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + denote=dict(argstr="-denote",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - newid=dict( - argstr='-newid', - xor=['oldid'], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - oldid=dict( - argstr='-oldid', - xor=['newid'], + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), + newid=dict(argstr="-newid", xor=["oldid"],), + num_threads=dict(nohash=True, usedefault=True,), + oldid=dict(argstr="-oldid", xor=["newid"],), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, hash_files=False, - name_source='in_file', - name_template='%s.nii', + name_source="in_file", + name_template="%s.nii", ), outputtype=dict(), - pure=dict(argstr='-pure', ), + pure=dict(argstr="-pure",), ) inputs = AFNItoNIFTI.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AFNItoNIFTI_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = AFNItoNIFTI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py index cb2389e67b..e2b8f5a3c9 100644 --- a/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py +++ b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py @@ -4,63 +4,42 @@ def test_AlignEpiAnatPy_inputs(): input_map = dict( - anat=dict( - argstr='-anat %s', - copyfile=False, - extensions=None, - mandatory=True, - ), - anat2epi=dict(argstr='-anat2epi', ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - epi2anat=dict(argstr='-epi2anat', ), - epi_base=dict( - argstr='-epi_base %s', - mandatory=True, - ), - epi_strip=dict(argstr='-epi_strip %s', ), + anat=dict(argstr="-anat %s", copyfile=False, extensions=None, mandatory=True,), + anat2epi=dict(argstr="-anat2epi",), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + epi2anat=dict(argstr="-epi2anat",), + epi_base=dict(argstr="-epi_base %s", mandatory=True,), + epi_strip=dict(argstr="-epi_strip %s",), in_file=dict( - argstr='-epi %s', - copyfile=False, - extensions=None, - mandatory=True, + argstr="-epi %s", copyfile=False, extensions=None, mandatory=True, ), outputtype=dict(), - py27_path=dict(usedefault=True, ), - save_skullstrip=dict(argstr='-save_skullstrip', ), - suffix=dict( - argstr='-suffix %s', - usedefault=True, - ), - tshift=dict( - argstr='-tshift %s', - usedefault=True, - ), - volreg=dict( - argstr='-volreg %s', - usedefault=True, - ), + py27_path=dict(usedefault=True,), + save_skullstrip=dict(argstr="-save_skullstrip",), + suffix=dict(argstr="-suffix %s", usedefault=True,), + tshift=dict(argstr="-tshift %s", usedefault=True,), + volreg=dict(argstr="-volreg %s", usedefault=True,), ) inputs = AlignEpiAnatPy.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AlignEpiAnatPy_outputs(): output_map = dict( - anat_al_mat=dict(extensions=None, ), - anat_al_orig=dict(extensions=None, ), - epi_al_mat=dict(extensions=None, ), - epi_al_orig=dict(extensions=None, ), - epi_al_tlrc_mat=dict(extensions=None, ), - epi_reg_al_mat=dict(extensions=None, ), - epi_tlrc_al=dict(extensions=None, ), - epi_vr_al_mat=dict(extensions=None, ), - epi_vr_motion=dict(extensions=None, ), - skullstrip=dict(extensions=None, ), + anat_al_mat=dict(extensions=None,), + anat_al_orig=dict(extensions=None,), + epi_al_mat=dict(extensions=None,), + epi_al_orig=dict(extensions=None,), + epi_al_tlrc_mat=dict(extensions=None,), + epi_reg_al_mat=dict(extensions=None,), + epi_tlrc_al=dict(extensions=None,), + epi_vr_al_mat=dict(extensions=None,), + epi_vr_motion=dict(extensions=None,), + skullstrip=dict(extensions=None,), ) outputs = AlignEpiAnatPy.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Allineate.py b/nipype/interfaces/afni/tests/test_auto_Allineate.py index 0891e60621..356e7d52a0 100644 --- a/nipype/interfaces/afni/tests/test_auto_Allineate.py +++ b/nipype/interfaces/afni/tests/test_auto_Allineate.py @@ -5,132 +5,103 @@ def test_Allineate_inputs(): input_map = dict( allcostx=dict( - argstr='-allcostx |& tee %s', + argstr="-allcostx |& tee %s", extensions=None, position=-1, - xor=[ - 'out_file', 'out_matrix', 'out_param_file', 'out_weight_file' - ], + xor=["out_file", "out_matrix", "out_param_file", "out_weight_file"], ), - args=dict(argstr='%s', ), - autobox=dict(argstr='-autobox', ), - automask=dict(argstr='-automask+%d', ), - autoweight=dict(argstr='-autoweight%s', ), - center_of_mass=dict(argstr='-cmass%s', ), - check=dict(argstr='-check %s', ), - convergence=dict(argstr='-conv %f', ), - cost=dict(argstr='-cost %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - epi=dict(argstr='-EPI', ), - final_interpolation=dict(argstr='-final %s', ), - fine_blur=dict(argstr='-fineblur %f', ), + args=dict(argstr="%s",), + autobox=dict(argstr="-autobox",), + automask=dict(argstr="-automask+%d",), + autoweight=dict(argstr="-autoweight%s",), + center_of_mass=dict(argstr="-cmass%s",), + check=dict(argstr="-check %s",), + convergence=dict(argstr="-conv %f",), + cost=dict(argstr="-cost %s",), + environ=dict(nohash=True, usedefault=True,), + epi=dict(argstr="-EPI",), + final_interpolation=dict(argstr="-final %s",), + fine_blur=dict(argstr="-fineblur %f",), in_file=dict( - argstr='-source %s', - copyfile=False, - extensions=None, - mandatory=True, + argstr="-source %s", copyfile=False, extensions=None, mandatory=True, ), in_matrix=dict( - argstr='-1Dmatrix_apply %s', + argstr="-1Dmatrix_apply %s", extensions=None, position=-3, - xor=['out_matrix'], + xor=["out_matrix"], ), in_param_file=dict( - argstr='-1Dparam_apply %s', - extensions=None, - xor=['out_param_file'], - ), - interpolation=dict(argstr='-interp %s', ), - master=dict( - argstr='-master %s', - extensions=None, - ), - maxrot=dict(argstr='-maxrot %f', ), - maxscl=dict(argstr='-maxscl %f', ), - maxshf=dict(argstr='-maxshf %f', ), - maxshr=dict(argstr='-maxshr %f', ), - newgrid=dict(argstr='-newgrid %f', ), - nmatch=dict(argstr='-nmatch %d', ), - no_pad=dict(argstr='-nopad', ), - nomask=dict(argstr='-nomask', ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="-1Dparam_apply %s", extensions=None, xor=["out_param_file"], ), - nwarp=dict(argstr='-nwarp %s', ), - nwarp_fixdep=dict(argstr='-nwarp_fixdep%s...', ), - nwarp_fixmot=dict(argstr='-nwarp_fixmot%s...', ), - one_pass=dict(argstr='-onepass', ), + interpolation=dict(argstr="-interp %s",), + master=dict(argstr="-master %s", extensions=None,), + maxrot=dict(argstr="-maxrot %f",), + maxscl=dict(argstr="-maxscl %f",), + maxshf=dict(argstr="-maxshf %f",), + maxshr=dict(argstr="-maxshr %f",), + newgrid=dict(argstr="-newgrid %f",), + nmatch=dict(argstr="-nmatch %d",), + no_pad=dict(argstr="-nopad",), + nomask=dict(argstr="-nomask",), + num_threads=dict(nohash=True, usedefault=True,), + nwarp=dict(argstr="-nwarp %s",), + nwarp_fixdep=dict(argstr="-nwarp_fixdep%s...",), + nwarp_fixmot=dict(argstr="-nwarp_fixmot%s...",), + one_pass=dict(argstr="-onepass",), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, hash_files=False, - name_source='in_file', - name_template='%s_allineate', - xor=['allcostx'], + name_source="in_file", + name_template="%s_allineate", + xor=["allcostx"], ), out_matrix=dict( - argstr='-1Dmatrix_save %s', - extensions=None, - xor=['in_matrix', 'allcostx'], + argstr="-1Dmatrix_save %s", extensions=None, xor=["in_matrix", "allcostx"], ), out_param_file=dict( - argstr='-1Dparam_save %s', - extensions=None, - xor=['in_param_file', 'allcostx'], - ), - out_weight_file=dict( - argstr='-wtprefix %s', + argstr="-1Dparam_save %s", extensions=None, - xor=['allcostx'], + xor=["in_param_file", "allcostx"], ), + out_weight_file=dict(argstr="-wtprefix %s", extensions=None, xor=["allcostx"],), outputtype=dict(), - overwrite=dict(argstr='-overwrite', ), - quiet=dict(argstr='-quiet', ), - reference=dict( - argstr='-base %s', - extensions=None, - ), - replacebase=dict(argstr='-replacebase', ), - replacemeth=dict(argstr='-replacemeth %s', ), - source_automask=dict(argstr='-source_automask+%d', ), - source_mask=dict( - argstr='-source_mask %s', - extensions=None, - ), - two_best=dict(argstr='-twobest %d', ), - two_blur=dict(argstr='-twoblur %f', ), - two_first=dict(argstr='-twofirst', ), - two_pass=dict(argstr='-twopass', ), - usetemp=dict(argstr='-usetemp', ), - verbose=dict(argstr='-verb', ), - warp_type=dict(argstr='-warp %s', ), - warpfreeze=dict(argstr='-warpfreeze', ), - weight=dict(argstr='-weight %s', ), + overwrite=dict(argstr="-overwrite",), + quiet=dict(argstr="-quiet",), + reference=dict(argstr="-base %s", extensions=None,), + replacebase=dict(argstr="-replacebase",), + replacemeth=dict(argstr="-replacemeth %s",), + source_automask=dict(argstr="-source_automask+%d",), + source_mask=dict(argstr="-source_mask %s", extensions=None,), + two_best=dict(argstr="-twobest %d",), + two_blur=dict(argstr="-twoblur %f",), + two_first=dict(argstr="-twofirst",), + two_pass=dict(argstr="-twopass",), + usetemp=dict(argstr="-usetemp",), + verbose=dict(argstr="-verb",), + warp_type=dict(argstr="-warp %s",), + warpfreeze=dict(argstr="-warpfreeze",), + weight=dict(argstr="-weight %s",), weight_file=dict( - argstr='-weight %s', - deprecated='1.0.0', - extensions=None, - new_name='weight', + argstr="-weight %s", deprecated="1.0.0", extensions=None, new_name="weight", ), - zclip=dict(argstr='-zclip', ), + zclip=dict(argstr="-zclip",), ) inputs = Allineate.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Allineate_outputs(): output_map = dict( - allcostx=dict(extensions=None, ), - out_file=dict(extensions=None, ), - out_matrix=dict(extensions=None, ), - out_param_file=dict(extensions=None, ), - out_weight_file=dict(extensions=None, ), + allcostx=dict(extensions=None,), + out_file=dict(extensions=None,), + out_matrix=dict(extensions=None,), + out_param_file=dict(extensions=None,), + out_weight_file=dict(extensions=None,), ) outputs = Allineate.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py index 337835e5d1..a2b13596e6 100644 --- a/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py +++ b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py @@ -4,22 +4,13 @@ def test_AutoTLRC_inputs(): input_map = dict( - args=dict(argstr='%s', ), - base=dict( - argstr='-base %s', - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + base=dict(argstr="-base %s", mandatory=True,), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='-input %s', - copyfile=False, - extensions=None, - mandatory=True, + argstr="-input %s", copyfile=False, extensions=None, mandatory=True, ), - no_ss=dict(argstr='-no_ss', ), + no_ss=dict(argstr="-no_ss",), outputtype=dict(), ) inputs = AutoTLRC.input_spec() @@ -27,8 +18,10 @@ def test_AutoTLRC_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AutoTLRC_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = AutoTLRC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py index 3ddf4ef69c..344ec503ce 100644 --- a/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py +++ b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py @@ -4,52 +4,36 @@ def test_AutoTcorrelate_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - eta2=dict(argstr='-eta2', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + eta2=dict(argstr="-eta2",), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), - mask_only_targets=dict( - argstr='-mask_only_targets', - xor=['mask_source'], + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), + mask=dict(argstr="-mask %s", extensions=None,), + mask_only_targets=dict(argstr="-mask_only_targets", xor=["mask_source"],), mask_source=dict( - argstr='-mask_source %s', - extensions=None, - xor=['mask_only_targets'], - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="-mask_source %s", extensions=None, xor=["mask_only_targets"], ), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_similarity_matrix.1D', + name_source="in_file", + name_template="%s_similarity_matrix.1D", ), outputtype=dict(), - polort=dict(argstr='-polort %d', ), + polort=dict(argstr="-polort %d",), ) inputs = AutoTcorrelate.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AutoTcorrelate_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = AutoTcorrelate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Autobox.py b/nipype/interfaces/afni/tests/test_auto_Autobox.py index 4837cf2d50..91eca4d811 100644 --- a/nipype/interfaces/afni/tests/test_auto_Autobox.py +++ b/nipype/interfaces/afni/tests/test_auto_Autobox.py @@ -4,39 +4,32 @@ def test_Autobox_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='-input %s', - copyfile=False, - extensions=None, - mandatory=True, - ), - no_clustering=dict(argstr='-noclust', ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="-input %s", copyfile=False, extensions=None, mandatory=True, ), + no_clustering=dict(argstr="-noclust",), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_autobox', + name_source="in_file", + name_template="%s_autobox", ), outputtype=dict(), - padding=dict(argstr='-npad %d', ), + padding=dict(argstr="-npad %d",), ) inputs = Autobox.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Autobox_outputs(): output_map = dict( - out_file=dict(extensions=None, ), + out_file=dict(extensions=None,), x_max=dict(), x_min=dict(), y_max=dict(), diff --git a/nipype/interfaces/afni/tests/test_auto_Automask.py b/nipype/interfaces/afni/tests/test_auto_Automask.py index 052c143f79..27f8040695 100644 --- a/nipype/interfaces/afni/tests/test_auto_Automask.py +++ b/nipype/interfaces/afni/tests/test_auto_Automask.py @@ -4,36 +4,26 @@ def test_Automask_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), brain_file=dict( - argstr='-apply_prefix %s', + argstr="-apply_prefix %s", extensions=None, - name_source='in_file', - name_template='%s_masked', + name_source="in_file", + name_template="%s_masked", ), - clfrac=dict(argstr='-clfrac %s', ), - dilate=dict(argstr='-dilate %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - erode=dict(argstr='-erode %s', ), + clfrac=dict(argstr="-clfrac %s",), + dilate=dict(argstr="-dilate %s",), + environ=dict(nohash=True, usedefault=True,), + erode=dict(argstr="-erode %s",), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_mask', + name_source="in_file", + name_template="%s_mask", ), outputtype=dict(), ) @@ -42,10 +32,11 @@ def test_Automask_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Automask_outputs(): output_map = dict( - brain_file=dict(extensions=None, ), - out_file=dict(extensions=None, ), + brain_file=dict(extensions=None,), out_file=dict(extensions=None,), ) outputs = Automask.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Axialize.py b/nipype/interfaces/afni/tests/test_auto_Axialize.py index 0b14be386d..2c4fafbb5b 100644 --- a/nipype/interfaces/afni/tests/test_auto_Axialize.py +++ b/nipype/interfaces/afni/tests/test_auto_Axialize.py @@ -4,51 +4,34 @@ def test_Axialize_inputs(): input_map = dict( - args=dict(argstr='%s', ), - axial=dict( - argstr='-axial', - xor=['coronal', 'sagittal'], - ), - coronal=dict( - argstr='-coronal', - xor=['sagittal', 'axial'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + axial=dict(argstr="-axial", xor=["coronal", "sagittal"],), + coronal=dict(argstr="-coronal", xor=["sagittal", "axial"],), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-2, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2, ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - orientation=dict(argstr='-orient %s', ), + num_threads=dict(nohash=True, usedefault=True,), + orientation=dict(argstr="-orient %s",), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_axialize', + name_source="in_file", + name_template="%s_axialize", ), outputtype=dict(), - sagittal=dict( - argstr='-sagittal', - xor=['coronal', 'axial'], - ), - verb=dict(argstr='-verb', ), + sagittal=dict(argstr="-sagittal", xor=["coronal", "axial"],), + verb=dict(argstr="-verb",), ) inputs = Axialize.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Axialize_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Axialize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Bandpass.py b/nipype/interfaces/afni/tests/test_auto_Bandpass.py index d8e54e0211..b17a8433ca 100644 --- a/nipype/interfaces/afni/tests/test_auto_Bandpass.py +++ b/nipype/interfaces/afni/tests/test_auto_Bandpass.py @@ -4,67 +4,44 @@ def test_Bandpass_inputs(): input_map = dict( - args=dict(argstr='%s', ), - automask=dict(argstr='-automask', ), - blur=dict(argstr='-blur %f', ), - despike=dict(argstr='-despike', ), - environ=dict( - nohash=True, - usedefault=True, - ), - highpass=dict( - argstr='%f', - mandatory=True, - position=-3, - ), + args=dict(argstr="%s",), + automask=dict(argstr="-automask",), + blur=dict(argstr="-blur %f",), + despike=dict(argstr="-despike",), + environ=dict(nohash=True, usedefault=True,), + highpass=dict(argstr="%f", mandatory=True, position=-3,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - localPV=dict(argstr='-localPV %f', ), - lowpass=dict( - argstr='%f', - mandatory=True, - position=-2, - ), - mask=dict( - argstr='-mask %s', - extensions=None, - position=2, - ), - nfft=dict(argstr='-nfft %d', ), - no_detrend=dict(argstr='-nodetrend', ), - normalize=dict(argstr='-norm', ), - notrans=dict(argstr='-notrans', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - orthogonalize_dset=dict( - argstr='-dsort %s', - extensions=None, - ), - orthogonalize_file=dict(argstr='-ort %s', ), + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + ), + localPV=dict(argstr="-localPV %f",), + lowpass=dict(argstr="%f", mandatory=True, position=-2,), + mask=dict(argstr="-mask %s", extensions=None, position=2,), + nfft=dict(argstr="-nfft %d",), + no_detrend=dict(argstr="-nodetrend",), + normalize=dict(argstr="-norm",), + notrans=dict(argstr="-notrans",), + num_threads=dict(nohash=True, usedefault=True,), + orthogonalize_dset=dict(argstr="-dsort %s", extensions=None,), + orthogonalize_file=dict(argstr="-ort %s",), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_bp', + name_source="in_file", + name_template="%s_bp", position=1, ), outputtype=dict(), - tr=dict(argstr='-dt %f', ), + tr=dict(argstr="-dt %f",), ) inputs = Bandpass.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Bandpass_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Bandpass.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_BlurInMask.py b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py index 86c62f56eb..ec44fed3ee 100644 --- a/nipype/interfaces/afni/tests/test_auto_BlurInMask.py +++ b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py @@ -4,57 +4,41 @@ def test_BlurInMask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - automask=dict(argstr='-automask', ), - environ=dict( - nohash=True, - usedefault=True, - ), - float_out=dict(argstr='-float', ), - fwhm=dict( - argstr='-FWHM %f', - mandatory=True, - ), + args=dict(argstr="%s",), + automask=dict(argstr="-automask",), + environ=dict(nohash=True, usedefault=True,), + float_out=dict(argstr="-float",), + fwhm=dict(argstr="-FWHM %f", mandatory=True,), in_file=dict( - argstr='-input %s', + argstr="-input %s", copyfile=False, extensions=None, mandatory=True, position=1, ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), - multimask=dict( - argstr='-Mmask %s', - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - options=dict( - argstr='%s', - position=2, - ), + mask=dict(argstr="-mask %s", extensions=None,), + multimask=dict(argstr="-Mmask %s", extensions=None,), + num_threads=dict(nohash=True, usedefault=True,), + options=dict(argstr="%s", position=2,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_blur', + name_source="in_file", + name_template="%s_blur", position=-1, ), outputtype=dict(), - preserve=dict(argstr='-preserve', ), + preserve=dict(argstr="-preserve",), ) inputs = BlurInMask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BlurInMask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = BlurInMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py index 35f8b2bb80..87788b8b3d 100644 --- a/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py +++ b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py @@ -4,36 +4,20 @@ def test_BlurToFWHM_inputs(): input_map = dict( - args=dict(argstr='%s', ), - automask=dict(argstr='-automask', ), - blurmaster=dict( - argstr='-blurmaster %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fwhm=dict(argstr='-FWHM %f', ), - fwhmxy=dict(argstr='-FWHMxy %f', ), - in_file=dict( - argstr='-input %s', - extensions=None, - mandatory=True, - ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + automask=dict(argstr="-automask",), + blurmaster=dict(argstr="-blurmaster %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + fwhm=dict(argstr="-FWHM %f",), + fwhmxy=dict(argstr="-FWHMxy %f",), + in_file=dict(argstr="-input %s", extensions=None, mandatory=True,), + mask=dict(argstr="-mask %s", extensions=None,), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source=['in_file'], - name_template='%s_afni', + name_source=["in_file"], + name_template="%s_afni", ), outputtype=dict(), ) @@ -42,8 +26,10 @@ def test_BlurToFWHM_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BlurToFWHM_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = BlurToFWHM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_BrickStat.py b/nipype/interfaces/afni/tests/test_auto_BrickStat.py index d1119bc1e5..0d3bc11c21 100644 --- a/nipype/interfaces/afni/tests/test_auto_BrickStat.py +++ b/nipype/interfaces/afni/tests/test_auto_BrickStat.py @@ -4,40 +4,27 @@ def test_BrickStat_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), - mask=dict( - argstr='-mask %s', - extensions=None, - position=2, - ), - max=dict(argstr='-max', ), - mean=dict(argstr='-mean', ), - min=dict( - argstr='-min', - position=1, - ), - percentile=dict(argstr='-percentile %.3f %.3f %.3f', ), - slow=dict(argstr='-slow', ), - sum=dict(argstr='-sum', ), - var=dict(argstr='-var', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + mask=dict(argstr="-mask %s", extensions=None, position=2,), + max=dict(argstr="-max",), + mean=dict(argstr="-mean",), + min=dict(argstr="-min", position=1,), + percentile=dict(argstr="-percentile %.3f %.3f %.3f",), + slow=dict(argstr="-slow",), + sum=dict(argstr="-sum",), + var=dict(argstr="-var",), ) inputs = BrickStat.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BrickStat_outputs(): - output_map = dict(min_val=dict(), ) + output_map = dict(min_val=dict(),) outputs = BrickStat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Bucket.py b/nipype/interfaces/afni/tests/test_auto_Bucket.py index 7410a5c70b..de301feaed 100644 --- a/nipype/interfaces/afni/tests/test_auto_Bucket.py +++ b/nipype/interfaces/afni/tests/test_auto_Bucket.py @@ -4,25 +4,11 @@ def test_Bucket_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr='-prefix %s', - extensions=None, - name_template='buck', - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", mandatory=True, position=-1,), + num_threads=dict(nohash=True, usedefault=True,), + out_file=dict(argstr="-prefix %s", extensions=None, name_template="buck",), outputtype=dict(), ) inputs = Bucket.input_spec() @@ -30,8 +16,10 @@ def test_Bucket_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Bucket_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Bucket.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Calc.py b/nipype/interfaces/afni/tests/test_auto_Calc.py index f7da2d3a5e..5b49623773 100644 --- a/nipype/interfaces/afni/tests/test_auto_Calc.py +++ b/nipype/interfaces/afni/tests/test_auto_Calc.py @@ -4,59 +4,35 @@ def test_Calc_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - expr=dict( - argstr='-expr "%s"', - mandatory=True, - position=3, - ), - in_file_a=dict( - argstr='-a %s', - extensions=None, - mandatory=True, - position=0, - ), - in_file_b=dict( - argstr='-b %s', - extensions=None, - position=1, - ), - in_file_c=dict( - argstr='-c %s', - extensions=None, - position=2, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - other=dict( - argstr='', - extensions=None, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + expr=dict(argstr='-expr "%s"', mandatory=True, position=3,), + in_file_a=dict(argstr="-a %s", extensions=None, mandatory=True, position=0,), + in_file_b=dict(argstr="-b %s", extensions=None, position=1,), + in_file_c=dict(argstr="-c %s", extensions=None, position=2,), + num_threads=dict(nohash=True, usedefault=True,), + other=dict(argstr="", extensions=None,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file_a', - name_template='%s_calc', + name_source="in_file_a", + name_template="%s_calc", ), outputtype=dict(), - overwrite=dict(argstr='-overwrite', ), + overwrite=dict(argstr="-overwrite",), single_idx=dict(), - start_idx=dict(requires=['stop_idx'], ), - stop_idx=dict(requires=['start_idx'], ), + start_idx=dict(requires=["stop_idx"],), + stop_idx=dict(requires=["start_idx"],), ) inputs = Calc.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Calc_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Calc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Cat.py b/nipype/interfaces/afni/tests/test_auto_Cat.py index 01bfc29781..07a1d2979c 100644 --- a/nipype/interfaces/afni/tests/test_auto_Cat.py +++ b/nipype/interfaces/afni/tests/test_auto_Cat.py @@ -4,69 +4,55 @@ def test_Cat_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr='%s', - mandatory=True, - position=-2, + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_files=dict(argstr="%s", mandatory=True, position=-2,), + keepfree=dict(argstr="-nonfixed",), + num_threads=dict(nohash=True, usedefault=True,), + omitconst=dict(argstr="-nonconst",), + out_cint=dict( + xor=["out_format", "out_nice", "out_double", "out_fint", "out_int"], ), - keepfree=dict(argstr='-nonfixed', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - omitconst=dict(argstr='-nonconst', ), - out_cint=dict(xor=[ - 'out_format', 'out_nice', 'out_double', 'out_fint', 'out_int' - ], ), out_double=dict( - argstr='-d', - xor=['out_format', 'out_nice', 'out_int', 'out_fint', 'out_cint'], + argstr="-d", + xor=["out_format", "out_nice", "out_int", "out_fint", "out_cint"], ), out_file=dict( - argstr='> %s', + argstr="> %s", extensions=None, mandatory=True, position=-1, usedefault=True, ), out_fint=dict( - argstr='-f', - xor=[ - 'out_format', 'out_nice', 'out_double', 'out_int', 'out_cint' - ], + argstr="-f", + xor=["out_format", "out_nice", "out_double", "out_int", "out_cint"], ), out_format=dict( - argstr='-form %s', - xor=['out_int', 'out_nice', 'out_double', 'out_fint', 'out_cint'], + argstr="-form %s", + xor=["out_int", "out_nice", "out_double", "out_fint", "out_cint"], ), out_int=dict( - argstr='-i', - xor=[ - 'out_format', 'out_nice', 'out_double', 'out_fint', 'out_cint' - ], + argstr="-i", + xor=["out_format", "out_nice", "out_double", "out_fint", "out_cint"], ), out_nice=dict( - argstr='-n', - xor=[ - 'out_format', 'out_int', 'out_double', 'out_fint', 'out_cint' - ], + argstr="-n", + xor=["out_format", "out_int", "out_double", "out_fint", "out_cint"], ), outputtype=dict(), - sel=dict(argstr='-sel %s', ), - stack=dict(argstr='-stack', ), + sel=dict(argstr="-sel %s",), + stack=dict(argstr="-stack",), ) inputs = Cat.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Cat_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Cat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py index 117b64d0ea..f1716f6084 100644 --- a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py +++ b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py @@ -4,39 +4,20 @@ def test_CatMatvec_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fourxfour=dict( - argstr='-4x4', - xor=['matrix', 'oneline'], - ), - in_file=dict( - argstr='%s', - mandatory=True, - position=-2, - ), - matrix=dict( - argstr='-MATRIX', - xor=['oneline', 'fourxfour'], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - oneline=dict( - argstr='-ONELINE', - xor=['matrix', 'fourxfour'], - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fourxfour=dict(argstr="-4x4", xor=["matrix", "oneline"],), + in_file=dict(argstr="%s", mandatory=True, position=-2,), + matrix=dict(argstr="-MATRIX", xor=["oneline", "fourxfour"],), + num_threads=dict(nohash=True, usedefault=True,), + oneline=dict(argstr="-ONELINE", xor=["matrix", "fourxfour"],), out_file=dict( - argstr=' > %s', + argstr=" > %s", extensions=None, keep_extension=False, mandatory=True, - name_source='in_file', - name_template='%s_cat.aff12.1D', + name_source="in_file", + name_template="%s_cat.aff12.1D", position=-1, ), outputtype=dict(), @@ -46,8 +27,10 @@ def test_CatMatvec_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CatMatvec_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = CatMatvec.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_CenterMass.py b/nipype/interfaces/afni/tests/test_auto_CenterMass.py index 9fe4acf7db..07975af563 100644 --- a/nipype/interfaces/afni/tests/test_auto_CenterMass.py +++ b/nipype/interfaces/afni/tests/test_auto_CenterMass.py @@ -4,47 +4,37 @@ def test_CenterMass_inputs(): input_map = dict( - all_rois=dict(argstr='-all_rois', ), - args=dict(argstr='%s', ), - automask=dict(argstr='-automask', ), + all_rois=dict(argstr="-all_rois",), + args=dict(argstr="%s",), + automask=dict(argstr="-automask",), cm_file=dict( - argstr='> %s', + argstr="> %s", extensions=None, hash_files=False, keep_extension=False, - name_source='in_file', - name_template='%s_cm.out', + name_source="in_file", + name_template="%s_cm.out", position=-1, ), - environ=dict( - nohash=True, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=True, - extensions=None, - mandatory=True, - position=-2, - ), - local_ijk=dict(argstr='-local_ijk', ), - mask_file=dict( - argstr='-mask %s', - extensions=None, + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2, ), - roi_vals=dict(argstr='-roi_vals %s', ), - set_cm=dict(argstr='-set %f %f %f', ), + local_ijk=dict(argstr="-local_ijk",), + mask_file=dict(argstr="-mask %s", extensions=None,), + roi_vals=dict(argstr="-roi_vals %s",), + set_cm=dict(argstr="-set %f %f %f",), ) inputs = CenterMass.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CenterMass_outputs(): output_map = dict( - cm=dict(), - cm_file=dict(extensions=None, ), - out_file=dict(extensions=None, ), + cm=dict(), cm_file=dict(extensions=None,), out_file=dict(extensions=None,), ) outputs = CenterMass.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py index 95152081ca..576ea68f89 100644 --- a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py +++ b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py @@ -4,40 +4,22 @@ def test_ClipLevel_inputs(): input_map = dict( - args=dict(argstr='%s', ), - doall=dict( - argstr='-doall', - position=3, - xor='grad', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad=dict( - argstr='-grad %s', - extensions=None, - position=3, - xor='doall', - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), - mfrac=dict( - argstr='-mfrac %s', - position=2, - ), + args=dict(argstr="%s",), + doall=dict(argstr="-doall", position=3, xor="grad",), + environ=dict(nohash=True, usedefault=True,), + grad=dict(argstr="-grad %s", extensions=None, position=3, xor="doall",), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + mfrac=dict(argstr="-mfrac %s", position=2,), ) inputs = ClipLevel.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ClipLevel_outputs(): - output_map = dict(clip_val=dict(), ) + output_map = dict(clip_val=dict(),) outputs = ClipLevel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ConvertDset.py b/nipype/interfaces/afni/tests/test_auto_ConvertDset.py index 41f816dfc7..249fe47843 100644 --- a/nipype/interfaces/afni/tests/test_auto_ConvertDset.py +++ b/nipype/interfaces/afni/tests/test_auto_ConvertDset.py @@ -4,32 +4,14 @@ def test_ConvertDset_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-input %s', - extensions=None, - mandatory=True, - position=-2, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-input %s", extensions=None, mandatory=True, position=-2,), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', - extensions=None, - mandatory=True, - position=-1, - ), - out_type=dict( - argstr='-o_%s', - mandatory=True, - position=0, + argstr="-prefix %s", extensions=None, mandatory=True, position=-1, ), + out_type=dict(argstr="-o_%s", mandatory=True, position=0,), outputtype=dict(), ) inputs = ConvertDset.input_spec() @@ -37,8 +19,10 @@ def test_ConvertDset_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ConvertDset_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ConvertDset.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Copy.py b/nipype/interfaces/afni/tests/test_auto_Copy.py index 9a716722e1..a601202c48 100644 --- a/nipype/interfaces/afni/tests/test_auto_Copy.py +++ b/nipype/interfaces/afni/tests/test_auto_Copy.py @@ -4,39 +4,31 @@ def test_Copy_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-2, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2, ), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source='in_file', - name_template='%s_copy', + name_source="in_file", + name_template="%s_copy", position=-1, ), outputtype=dict(), - verbose=dict(argstr='-verb', ), + verbose=dict(argstr="-verb",), ) inputs = Copy.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Copy_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Copy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Deconvolve.py b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py index c6969bbbe6..c03fafcf46 100644 --- a/nipype/interfaces/afni/tests/test_auto_Deconvolve.py +++ b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py @@ -4,129 +4,69 @@ def test_Deconvolve_inputs(): input_map = dict( - STATmask=dict( - argstr='-STATmask %s', - extensions=None, - ), - TR_1D=dict(argstr='-TR_1D %f', ), - allzero_OK=dict(argstr='-allzero_OK', ), - args=dict(argstr='%s', ), - automask=dict(argstr='-automask', ), - cbucket=dict(argstr='-cbucket %s', ), - censor=dict( - argstr='-censor %s', - extensions=None, - ), - dmbase=dict(argstr='-dmbase', ), - dname=dict(argstr='-D%s=%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - force_TR=dict( - argstr='-force_TR %f', - position=0, - ), - fout=dict(argstr='-fout', ), - global_times=dict( - argstr='-global_times', - xor=['local_times'], - ), - glt_label=dict( - argstr='-glt_label %d %s...', - position=-1, - requires=['gltsym'], - ), - gltsym=dict( - argstr="-gltsym 'SYM: %s'...", - position=-2, - ), - goforit=dict(argstr='-GOFORIT %i', ), - in_files=dict( - argstr='-input %s', - copyfile=False, - position=1, - sep=' ', - ), - input1D=dict( - argstr='-input1D %s', - extensions=None, - ), - legendre=dict(argstr='-legendre', ), - local_times=dict( - argstr='-local_times', - xor=['global_times'], - ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), - noblock=dict(argstr='-noblock', ), - nocond=dict(argstr='-nocond', ), - nodmbase=dict(argstr='-nodmbase', ), - nofdr=dict(argstr='-noFDR', ), - nolegendre=dict(argstr='-nolegendre', ), - nosvd=dict(argstr='-nosvd', ), - num_glt=dict( - argstr='-num_glt %d', - position=-3, - ), - num_stimts=dict( - argstr='-num_stimts %d', - position=-6, - ), - num_threads=dict( - argstr='-jobs %d', - nohash=True, - ), - ortvec=dict(argstr='-ortvec %s %s', ), - out_file=dict( - argstr='-bucket %s', - extensions=None, - ), + STATmask=dict(argstr="-STATmask %s", extensions=None,), + TR_1D=dict(argstr="-TR_1D %f",), + allzero_OK=dict(argstr="-allzero_OK",), + args=dict(argstr="%s",), + automask=dict(argstr="-automask",), + cbucket=dict(argstr="-cbucket %s",), + censor=dict(argstr="-censor %s", extensions=None,), + dmbase=dict(argstr="-dmbase",), + dname=dict(argstr="-D%s=%s",), + environ=dict(nohash=True, usedefault=True,), + force_TR=dict(argstr="-force_TR %f", position=0,), + fout=dict(argstr="-fout",), + global_times=dict(argstr="-global_times", xor=["local_times"],), + glt_label=dict(argstr="-glt_label %d %s...", position=-1, requires=["gltsym"],), + gltsym=dict(argstr="-gltsym 'SYM: %s'...", position=-2,), + goforit=dict(argstr="-GOFORIT %i",), + in_files=dict(argstr="-input %s", copyfile=False, position=1, sep=" ",), + input1D=dict(argstr="-input1D %s", extensions=None,), + legendre=dict(argstr="-legendre",), + local_times=dict(argstr="-local_times", xor=["global_times"],), + mask=dict(argstr="-mask %s", extensions=None,), + noblock=dict(argstr="-noblock",), + nocond=dict(argstr="-nocond",), + nodmbase=dict(argstr="-nodmbase",), + nofdr=dict(argstr="-noFDR",), + nolegendre=dict(argstr="-nolegendre",), + nosvd=dict(argstr="-nosvd",), + num_glt=dict(argstr="-num_glt %d", position=-3,), + num_stimts=dict(argstr="-num_stimts %d", position=-6,), + num_threads=dict(argstr="-jobs %d", nohash=True,), + ortvec=dict(argstr="-ortvec %s %s",), + out_file=dict(argstr="-bucket %s", extensions=None,), outputtype=dict(), - polort=dict(argstr='-polort %d', ), - rmsmin=dict(argstr='-rmsmin %f', ), - rout=dict(argstr='-rout', ), - sat=dict( - argstr='-sat', - xor=['trans'], - ), - singvals=dict(argstr='-singvals', ), + polort=dict(argstr="-polort %d",), + rmsmin=dict(argstr="-rmsmin %f",), + rout=dict(argstr="-rout",), + sat=dict(argstr="-sat", xor=["trans"],), + singvals=dict(argstr="-singvals",), stim_label=dict( - argstr='-stim_label %d %s...', - position=-4, - requires=['stim_times'], - ), - stim_times=dict( - argstr="-stim_times %d %s '%s'...", - position=-5, - ), - stim_times_subtract=dict(argstr='-stim_times_subtract %f', ), - svd=dict(argstr='-svd', ), - tout=dict(argstr='-tout', ), - trans=dict( - argstr='-trans', - xor=['sat'], - ), - vout=dict(argstr='-vout', ), - x1D=dict( - argstr='-x1D %s', - extensions=None, - ), - x1D_stop=dict(argstr='-x1D_stop', ), + argstr="-stim_label %d %s...", position=-4, requires=["stim_times"], + ), + stim_times=dict(argstr="-stim_times %d %s '%s'...", position=-5,), + stim_times_subtract=dict(argstr="-stim_times_subtract %f",), + svd=dict(argstr="-svd",), + tout=dict(argstr="-tout",), + trans=dict(argstr="-trans", xor=["sat"],), + vout=dict(argstr="-vout",), + x1D=dict(argstr="-x1D %s", extensions=None,), + x1D_stop=dict(argstr="-x1D_stop",), ) inputs = Deconvolve.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Deconvolve_outputs(): output_map = dict( - cbucket=dict(extensions=None, ), - out_file=dict(extensions=None, ), - reml_script=dict(extensions=None, ), - x1D=dict(extensions=None, ), + cbucket=dict(extensions=None,), + out_file=dict(extensions=None,), + reml_script=dict(extensions=None,), + x1D=dict(extensions=None,), ) outputs = Deconvolve.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py index 183745d2cb..0fb10a0713 100644 --- a/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py +++ b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py @@ -4,49 +4,37 @@ def test_DegreeCentrality_inputs(): input_map = dict( - args=dict(argstr='%s', ), - autoclip=dict(argstr='-autoclip', ), - automask=dict(argstr='-automask', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + autoclip=dict(argstr="-autoclip",), + automask=dict(argstr="-automask",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - mask=dict( - argstr='-mask %s', - extensions=None, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - oned_file=dict(argstr='-out1D %s', ), + mask=dict(argstr="-mask %s", extensions=None,), + num_threads=dict(nohash=True, usedefault=True,), + oned_file=dict(argstr="-out1D %s",), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source=['in_file'], - name_template='%s_afni', + name_source=["in_file"], + name_template="%s_afni", ), outputtype=dict(), - polort=dict(argstr='-polort %d', ), - sparsity=dict(argstr='-sparsity %f', ), - thresh=dict(argstr='-thresh %f', ), + polort=dict(argstr="-polort %d",), + sparsity=dict(argstr="-sparsity %f",), + thresh=dict(argstr="-thresh %f",), ) inputs = DegreeCentrality.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DegreeCentrality_outputs(): output_map = dict( - oned_file=dict(extensions=None, ), - out_file=dict(extensions=None, ), + oned_file=dict(extensions=None,), out_file=dict(extensions=None,), ) outputs = DegreeCentrality.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Despike.py b/nipype/interfaces/afni/tests/test_auto_Despike.py index 9d955e11d2..00a6666894 100644 --- a/nipype/interfaces/afni/tests/test_auto_Despike.py +++ b/nipype/interfaces/afni/tests/test_auto_Despike.py @@ -4,27 +4,17 @@ def test_Despike_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_despike', + name_source="in_file", + name_template="%s_despike", ), outputtype=dict(), ) @@ -33,8 +23,10 @@ def test_Despike_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Despike_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Despike.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Detrend.py b/nipype/interfaces/afni/tests/test_auto_Detrend.py index 227f7585ac..275c45208b 100644 --- a/nipype/interfaces/afni/tests/test_auto_Detrend.py +++ b/nipype/interfaces/afni/tests/test_auto_Detrend.py @@ -4,27 +4,17 @@ def test_Detrend_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_detrend', + name_source="in_file", + name_template="%s_detrend", ), outputtype=dict(), ) @@ -33,8 +23,10 @@ def test_Detrend_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Detrend_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Detrend.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Dot.py b/nipype/interfaces/afni/tests/test_auto_Dot.py index 561be9cd93..d1c7812cdd 100644 --- a/nipype/interfaces/afni/tests/test_auto_Dot.py +++ b/nipype/interfaces/afni/tests/test_auto_Dot.py @@ -4,48 +4,34 @@ def test_Dot_inputs(): input_map = dict( - args=dict(argstr='%s', ), - demean=dict(argstr='-demean', ), - docoef=dict(argstr='-docoef', ), - docor=dict(argstr='-docor', ), - dodice=dict(argstr='-dodice', ), - dodot=dict(argstr='-dodot', ), - doeta2=dict(argstr='-doeta2', ), - dosums=dict(argstr='-dosums', ), - environ=dict( - nohash=True, - usedefault=True, - ), - full=dict(argstr='-full', ), - in_files=dict( - argstr='%s ...', - position=-2, - ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), - mrange=dict(argstr='-mrange %s %s', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr=' |& tee %s', - extensions=None, - position=-1, - ), + args=dict(argstr="%s",), + demean=dict(argstr="-demean",), + docoef=dict(argstr="-docoef",), + docor=dict(argstr="-docor",), + dodice=dict(argstr="-dodice",), + dodot=dict(argstr="-dodot",), + doeta2=dict(argstr="-doeta2",), + dosums=dict(argstr="-dosums",), + environ=dict(nohash=True, usedefault=True,), + full=dict(argstr="-full",), + in_files=dict(argstr="%s ...", position=-2,), + mask=dict(argstr="-mask %s", extensions=None,), + mrange=dict(argstr="-mrange %s %s",), + num_threads=dict(nohash=True, usedefault=True,), + out_file=dict(argstr=" |& tee %s", extensions=None, position=-1,), outputtype=dict(), - show_labels=dict(argstr='-show_labels', ), - upper=dict(argstr='-upper', ), + show_labels=dict(argstr="-show_labels",), + upper=dict(argstr="-upper",), ) inputs = Dot.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Dot_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Dot.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ECM.py b/nipype/interfaces/afni/tests/test_auto_ECM.py index 0bcc8f005e..83d5508bae 100644 --- a/nipype/interfaces/afni/tests/test_auto_ECM.py +++ b/nipype/interfaces/afni/tests/test_auto_ECM.py @@ -4,53 +4,42 @@ def test_ECM_inputs(): input_map = dict( - args=dict(argstr='%s', ), - autoclip=dict(argstr='-autoclip', ), - automask=dict(argstr='-automask', ), - environ=dict( - nohash=True, - usedefault=True, - ), - eps=dict(argstr='-eps %f', ), - fecm=dict(argstr='-fecm', ), - full=dict(argstr='-full', ), + args=dict(argstr="%s",), + autoclip=dict(argstr="-autoclip",), + automask=dict(argstr="-automask",), + environ=dict(nohash=True, usedefault=True,), + eps=dict(argstr="-eps %f",), + fecm=dict(argstr="-fecm",), + full=dict(argstr="-full",), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), - max_iter=dict(argstr='-max_iter %d', ), - memory=dict(argstr='-memory %f', ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), + mask=dict(argstr="-mask %s", extensions=None,), + max_iter=dict(argstr="-max_iter %d",), + memory=dict(argstr="-memory %f",), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source=['in_file'], - name_template='%s_afni', + name_source=["in_file"], + name_template="%s_afni", ), outputtype=dict(), - polort=dict(argstr='-polort %d', ), - scale=dict(argstr='-scale %f', ), - shift=dict(argstr='-shift %f', ), - sparsity=dict(argstr='-sparsity %f', ), - thresh=dict(argstr='-thresh %f', ), + polort=dict(argstr="-polort %d",), + scale=dict(argstr="-scale %f",), + shift=dict(argstr="-shift %f",), + sparsity=dict(argstr="-sparsity %f",), + thresh=dict(argstr="-thresh %f",), ) inputs = ECM.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ECM_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ECM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Edge3.py b/nipype/interfaces/afni/tests/test_auto_Edge3.py index e15dc131b8..484920d596 100644 --- a/nipype/interfaces/afni/tests/test_auto_Edge3.py +++ b/nipype/interfaces/afni/tests/test_auto_Edge3.py @@ -4,54 +4,36 @@ def test_Edge3_inputs(): input_map = dict( - args=dict(argstr='%s', ), - datum=dict(argstr='-datum %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fscale=dict( - argstr='-fscale', - xor=['gscale', 'nscale', 'scale_floats'], - ), - gscale=dict( - argstr='-gscale', - xor=['fscale', 'nscale', 'scale_floats'], - ), + args=dict(argstr="%s",), + datum=dict(argstr="-datum %s",), + environ=dict(nohash=True, usedefault=True,), + fscale=dict(argstr="-fscale", xor=["gscale", "nscale", "scale_floats"],), + gscale=dict(argstr="-gscale", xor=["fscale", "nscale", "scale_floats"],), in_file=dict( - argstr='-input %s', + argstr="-input %s", copyfile=False, extensions=None, mandatory=True, position=0, ), - nscale=dict( - argstr='-nscale', - xor=['fscale', 'gscale', 'scale_floats'], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr='-prefix %s', - extensions=None, - position=-1, - ), + nscale=dict(argstr="-nscale", xor=["fscale", "gscale", "scale_floats"],), + num_threads=dict(nohash=True, usedefault=True,), + out_file=dict(argstr="-prefix %s", extensions=None, position=-1,), outputtype=dict(), scale_floats=dict( - argstr='-scale_floats %f', - xor=['fscale', 'gscale', 'nscale'], + argstr="-scale_floats %f", xor=["fscale", "gscale", "nscale"], ), - verbose=dict(argstr='-verbose', ), + verbose=dict(argstr="-verbose",), ) inputs = Edge3.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Edge3_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Edge3.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Eval.py b/nipype/interfaces/afni/tests/test_auto_Eval.py index 475898c32a..1bc22fcb6c 100644 --- a/nipype/interfaces/afni/tests/test_auto_Eval.py +++ b/nipype/interfaces/afni/tests/test_auto_Eval.py @@ -4,59 +4,35 @@ def test_Eval_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - expr=dict( - argstr='-expr "%s"', - mandatory=True, - position=3, - ), - in_file_a=dict( - argstr='-a %s', - extensions=None, - mandatory=True, - position=0, - ), - in_file_b=dict( - argstr='-b %s', - extensions=None, - position=1, - ), - in_file_c=dict( - argstr='-c %s', - extensions=None, - position=2, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - other=dict( - argstr='', - extensions=None, - ), - out1D=dict(argstr='-1D', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + expr=dict(argstr='-expr "%s"', mandatory=True, position=3,), + in_file_a=dict(argstr="-a %s", extensions=None, mandatory=True, position=0,), + in_file_b=dict(argstr="-b %s", extensions=None, position=1,), + in_file_c=dict(argstr="-c %s", extensions=None, position=2,), + num_threads=dict(nohash=True, usedefault=True,), + other=dict(argstr="", extensions=None,), + out1D=dict(argstr="-1D",), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file_a', - name_template='%s_calc', + name_source="in_file_a", + name_template="%s_calc", ), outputtype=dict(), single_idx=dict(), - start_idx=dict(requires=['stop_idx'], ), - stop_idx=dict(requires=['start_idx'], ), + start_idx=dict(requires=["stop_idx"],), + stop_idx=dict(requires=["start_idx"],), ) inputs = Eval.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Eval_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Eval.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_FWHMx.py b/nipype/interfaces/afni/tests/test_auto_FWHMx.py index 8d7d7d234c..1785a675b5 100644 --- a/nipype/interfaces/afni/tests/test_auto_FWHMx.py +++ b/nipype/interfaces/afni/tests/test_auto_FWHMx.py @@ -4,84 +4,57 @@ def test_FWHMx_inputs(): input_map = dict( - acf=dict( - argstr='-acf', - usedefault=True, - ), - args=dict(argstr='%s', ), - arith=dict( - argstr='-arith', - xor=['geom'], - ), - automask=dict( - argstr='-automask', - usedefault=True, - ), - combine=dict(argstr='-combine', ), - compat=dict(argstr='-compat', ), - demed=dict( - argstr='-demed', - xor=['detrend'], - ), - detrend=dict( - argstr='-detrend', - usedefault=True, - xor=['demed'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - geom=dict( - argstr='-geom', - xor=['arith'], - ), - in_file=dict( - argstr='-input %s', - extensions=None, - mandatory=True, - ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), + acf=dict(argstr="-acf", usedefault=True,), + args=dict(argstr="%s",), + arith=dict(argstr="-arith", xor=["geom"],), + automask=dict(argstr="-automask", usedefault=True,), + combine=dict(argstr="-combine",), + compat=dict(argstr="-compat",), + demed=dict(argstr="-demed", xor=["detrend"],), + detrend=dict(argstr="-detrend", usedefault=True, xor=["demed"],), + environ=dict(nohash=True, usedefault=True,), + geom=dict(argstr="-geom", xor=["arith"],), + in_file=dict(argstr="-input %s", extensions=None, mandatory=True,), + mask=dict(argstr="-mask %s", extensions=None,), out_detrend=dict( - argstr='-detprefix %s', + argstr="-detprefix %s", extensions=None, keep_extension=False, - name_source='in_file', - name_template='%s_detrend', + name_source="in_file", + name_template="%s_detrend", ), out_file=dict( - argstr='> %s', + argstr="> %s", extensions=None, keep_extension=False, - name_source='in_file', - name_template='%s_fwhmx.out', + name_source="in_file", + name_template="%s_fwhmx.out", position=-1, ), out_subbricks=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=False, - name_source='in_file', - name_template='%s_subbricks.out', + name_source="in_file", + name_template="%s_subbricks.out", ), - unif=dict(argstr='-unif', ), + unif=dict(argstr="-unif",), ) inputs = FWHMx.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FWHMx_outputs(): output_map = dict( acf_param=dict(), fwhm=dict(), - out_acf=dict(extensions=None, ), - out_detrend=dict(extensions=None, ), - out_file=dict(extensions=None, ), - out_subbricks=dict(extensions=None, ), + out_acf=dict(extensions=None,), + out_detrend=dict(extensions=None,), + out_file=dict(extensions=None,), + out_subbricks=dict(extensions=None,), ) outputs = FWHMx.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Fim.py b/nipype/interfaces/afni/tests/test_auto_Fim.py index 49cf009096..6a07de12e5 100644 --- a/nipype/interfaces/afni/tests/test_auto_Fim.py +++ b/nipype/interfaces/afni/tests/test_auto_Fim.py @@ -4,41 +4,26 @@ def test_Fim_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fim_thr=dict( - argstr='-fim_thr %f', - position=3, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fim_thr=dict(argstr="-fim_thr %f", position=3,), ideal_file=dict( - argstr='-ideal_file %s', - extensions=None, - mandatory=True, - position=2, + argstr="-ideal_file %s", extensions=None, mandatory=True, position=2, ), in_file=dict( - argstr='-input %s', + argstr="-input %s", copyfile=False, extensions=None, mandatory=True, position=1, ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out=dict( - argstr='-out %s', - position=4, - ), + num_threads=dict(nohash=True, usedefault=True,), + out=dict(argstr="-out %s", position=4,), out_file=dict( - argstr='-bucket %s', + argstr="-bucket %s", extensions=None, - name_source='in_file', - name_template='%s_fim', + name_source="in_file", + name_template="%s_fim", ), outputtype=dict(), ) @@ -47,8 +32,10 @@ def test_Fim_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Fim_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Fim.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Fourier.py b/nipype/interfaces/afni/tests/test_auto_Fourier.py index 1f1ef8bc4f..0c648fb149 100644 --- a/nipype/interfaces/afni/tests/test_auto_Fourier.py +++ b/nipype/interfaces/afni/tests/test_auto_Fourier.py @@ -4,46 +4,32 @@ def test_Fourier_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - highpass=dict( - argstr='-highpass %f', - mandatory=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + highpass=dict(argstr="-highpass %f", mandatory=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - lowpass=dict( - argstr='-lowpass %f', - mandatory=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), + lowpass=dict(argstr="-lowpass %f", mandatory=True,), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_fourier', + name_source="in_file", + name_template="%s_fourier", ), outputtype=dict(), - retrend=dict(argstr='-retrend', ), + retrend=dict(argstr="-retrend",), ) inputs = Fourier.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Fourier_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Fourier.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_GCOR.py b/nipype/interfaces/afni/tests/test_auto_GCOR.py index 98708d79fa..b63b269d6a 100644 --- a/nipype/interfaces/afni/tests/test_auto_GCOR.py +++ b/nipype/interfaces/afni/tests/test_auto_GCOR.py @@ -4,33 +4,28 @@ def test_GCOR_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='-input %s', + argstr="-input %s", copyfile=False, extensions=None, mandatory=True, position=-1, ), - mask=dict( - argstr='-mask %s', - copyfile=False, - extensions=None, - ), - nfirst=dict(argstr='-nfirst %d', ), - no_demean=dict(argstr='-no_demean', ), + mask=dict(argstr="-mask %s", copyfile=False, extensions=None,), + nfirst=dict(argstr="-nfirst %d",), + no_demean=dict(argstr="-no_demean",), ) inputs = GCOR.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GCOR_outputs(): - output_map = dict(out=dict(), ) + output_map = dict(out=dict(),) outputs = GCOR.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Hist.py b/nipype/interfaces/afni/tests/test_auto_Hist.py index 92ce044053..30443b3a44 100644 --- a/nipype/interfaces/afni/tests/test_auto_Hist.py +++ b/nipype/interfaces/afni/tests/test_auto_Hist.py @@ -4,56 +4,46 @@ def test_Hist_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bin_width=dict(argstr='-binwidth %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + bin_width=dict(argstr="-binwidth %f",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='-input %s', + argstr="-input %s", copyfile=False, extensions=None, mandatory=True, position=1, ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), - max_value=dict(argstr='-max %f', ), - min_value=dict(argstr='-min %f', ), - nbin=dict(argstr='-nbin %d', ), + mask=dict(argstr="-mask %s", extensions=None,), + max_value=dict(argstr="-max %f",), + min_value=dict(argstr="-min %f",), + nbin=dict(argstr="-nbin %d",), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, keep_extension=False, - name_source=['in_file'], - name_template='%s_hist', + name_source=["in_file"], + name_template="%s_hist", ), out_show=dict( - argstr='> %s', + argstr="> %s", extensions=None, keep_extension=False, - name_source='in_file', - name_template='%s_hist.out', + name_source="in_file", + name_template="%s_hist.out", position=-1, ), - showhist=dict( - argstr='-showhist', - usedefault=True, - ), + showhist=dict(argstr="-showhist", usedefault=True,), ) inputs = Hist.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Hist_outputs(): - output_map = dict( - out_file=dict(extensions=None, ), - out_show=dict(extensions=None, ), - ) + output_map = dict(out_file=dict(extensions=None,), out_show=dict(extensions=None,),) outputs = Hist.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_LFCD.py b/nipype/interfaces/afni/tests/test_auto_LFCD.py index 11c6857ccc..aebe0dcded 100644 --- a/nipype/interfaces/afni/tests/test_auto_LFCD.py +++ b/nipype/interfaces/afni/tests/test_auto_LFCD.py @@ -4,45 +4,34 @@ def test_LFCD_inputs(): input_map = dict( - args=dict(argstr='%s', ), - autoclip=dict(argstr='-autoclip', ), - automask=dict(argstr='-automask', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + autoclip=dict(argstr="-autoclip",), + automask=dict(argstr="-automask",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), + mask=dict(argstr="-mask %s", extensions=None,), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source=['in_file'], - name_template='%s_afni', + name_source=["in_file"], + name_template="%s_afni", ), outputtype=dict(), - polort=dict(argstr='-polort %d', ), - thresh=dict(argstr='-thresh %f', ), + polort=dict(argstr="-polort %d",), + thresh=dict(argstr="-thresh %f",), ) inputs = LFCD.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_LFCD_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = LFCD.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py index 91e96d9eed..ea718b9d1c 100644 --- a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py +++ b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py @@ -4,65 +4,35 @@ def test_LocalBistat_inputs(): input_map = dict( - args=dict(argstr='%s', ), - automask=dict( - argstr='-automask', - xor=['weight_file'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file1=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - in_file2=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), - neighborhood=dict( - argstr="-nbhd '%s(%s)'", - mandatory=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + automask=dict(argstr="-automask", xor=["weight_file"],), + environ=dict(nohash=True, usedefault=True,), + in_file1=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + in_file2=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + mask_file=dict(argstr="-mask %s", extensions=None,), + neighborhood=dict(argstr="-nbhd '%s(%s)'", mandatory=True,), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, keep_extension=True, - name_source='in_file1', - name_template='%s_bistat', + name_source="in_file1", + name_template="%s_bistat", position=0, ), outputtype=dict(), - stat=dict( - argstr='-stat %s...', - mandatory=True, - ), - weight_file=dict( - argstr='-weight %s', - extensions=None, - xor=['automask'], - ), + stat=dict(argstr="-stat %s...", mandatory=True,), + weight_file=dict(argstr="-weight %s", extensions=None, xor=["automask"],), ) inputs = LocalBistat.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_LocalBistat_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = LocalBistat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Localstat.py b/nipype/interfaces/afni/tests/test_auto_Localstat.py index e7b4089474..4b036e6d84 100644 --- a/nipype/interfaces/afni/tests/test_auto_Localstat.py +++ b/nipype/interfaces/afni/tests/test_auto_Localstat.py @@ -4,70 +4,46 @@ def test_Localstat_inputs(): input_map = dict( - args=dict(argstr='%s', ), - automask=dict(argstr='-automask', ), - environ=dict( - nohash=True, - usedefault=True, - ), - grid_rmode=dict( - argstr='-grid_rmode %s', - requires=['reduce_restore_grid'], - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), - neighborhood=dict( - argstr="-nbhd '%s(%s)'", - mandatory=True, - ), - nonmask=dict(argstr='-use_nonmask', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + automask=dict(argstr="-automask",), + environ=dict(nohash=True, usedefault=True,), + grid_rmode=dict(argstr="-grid_rmode %s", requires=["reduce_restore_grid"],), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + mask_file=dict(argstr="-mask %s", extensions=None,), + neighborhood=dict(argstr="-nbhd '%s(%s)'", mandatory=True,), + nonmask=dict(argstr="-use_nonmask",), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_localstat', + name_source="in_file", + name_template="%s_localstat", position=0, ), outputtype=dict(), - overwrite=dict(argstr='-overwrite', ), - quiet=dict(argstr='-quiet', ), + overwrite=dict(argstr="-overwrite",), + quiet=dict(argstr="-quiet",), reduce_grid=dict( - argstr='-reduce_grid %s', - xor=['reduce_restore_grid', 'reduce_max_vox'], + argstr="-reduce_grid %s", xor=["reduce_restore_grid", "reduce_max_vox"], ), reduce_max_vox=dict( - argstr='-reduce_max_vox %s', - xor=['reduce_restore_grid', 'reduce_grid'], + argstr="-reduce_max_vox %s", xor=["reduce_restore_grid", "reduce_grid"], ), reduce_restore_grid=dict( - argstr='-reduce_restore_grid %s', - xor=['reduce_max_vox', 'reduce_grid'], - ), - stat=dict( - argstr='-stat %s...', - mandatory=True, + argstr="-reduce_restore_grid %s", xor=["reduce_max_vox", "reduce_grid"], ), + stat=dict(argstr="-stat %s...", mandatory=True,), ) inputs = Localstat.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Localstat_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Localstat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_MaskTool.py b/nipype/interfaces/afni/tests/test_auto_MaskTool.py index 1b3be1de0e..dffe07ff32 100644 --- a/nipype/interfaces/afni/tests/test_auto_MaskTool.py +++ b/nipype/interfaces/afni/tests/test_auto_MaskTool.py @@ -4,52 +4,37 @@ def test_MaskTool_inputs(): input_map = dict( - args=dict(argstr='%s', ), - count=dict( - argstr='-count', - position=2, - ), - datum=dict(argstr='-datum %s', ), - dilate_inputs=dict(argstr='-dilate_inputs %s', ), - dilate_results=dict(argstr='-dilate_results %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fill_dirs=dict( - argstr='-fill_dirs %s', - requires=['fill_holes'], - ), - fill_holes=dict(argstr='-fill_holes', ), - frac=dict(argstr='-frac %s', ), - in_file=dict( - argstr='-input %s', - copyfile=False, - mandatory=True, - position=-1, - ), - inter=dict(argstr='-inter', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + count=dict(argstr="-count", position=2,), + datum=dict(argstr="-datum %s",), + dilate_inputs=dict(argstr="-dilate_inputs %s",), + dilate_results=dict(argstr="-dilate_results %s",), + environ=dict(nohash=True, usedefault=True,), + fill_dirs=dict(argstr="-fill_dirs %s", requires=["fill_holes"],), + fill_holes=dict(argstr="-fill_holes",), + frac=dict(argstr="-frac %s",), + in_file=dict(argstr="-input %s", copyfile=False, mandatory=True, position=-1,), + inter=dict(argstr="-inter",), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_mask', + name_source="in_file", + name_template="%s_mask", ), outputtype=dict(), - union=dict(argstr='-union', ), - verbose=dict(argstr='-verb %s', ), + union=dict(argstr="-union",), + verbose=dict(argstr="-verb %s",), ) inputs = MaskTool.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MaskTool_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MaskTool.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Maskave.py b/nipype/interfaces/afni/tests/test_auto_Maskave.py index f322d55fa8..b882cfdba9 100644 --- a/nipype/interfaces/afni/tests/test_auto_Maskave.py +++ b/nipype/interfaces/afni/tests/test_auto_Maskave.py @@ -4,48 +4,33 @@ def test_Maskave_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-2, - ), - mask=dict( - argstr='-mask %s', - extensions=None, - position=1, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2, ), + mask=dict(argstr="-mask %s", extensions=None, position=1,), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='> %s', + argstr="> %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_maskave.1D', + name_source="in_file", + name_template="%s_maskave.1D", position=-1, ), outputtype=dict(), - quiet=dict( - argstr='-quiet', - position=2, - ), + quiet=dict(argstr="-quiet", position=2,), ) inputs = Maskave.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Maskave_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Maskave.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Means.py b/nipype/interfaces/afni/tests/test_auto_Means.py index 9a6d759123..b88957fef3 100644 --- a/nipype/interfaces/afni/tests/test_auto_Means.py +++ b/nipype/interfaces/afni/tests/test_auto_Means.py @@ -4,50 +4,37 @@ def test_Means_inputs(): input_map = dict( - args=dict(argstr='%s', ), - count=dict(argstr='-count', ), - datum=dict(argstr='-datum %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file_a=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - in_file_b=dict( - argstr='%s', - extensions=None, - position=-1, - ), - mask_inter=dict(argstr='-mask_inter', ), - mask_union=dict(argstr='-mask_union', ), - non_zero=dict(argstr='-non_zero', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + count=dict(argstr="-count",), + datum=dict(argstr="-datum %s",), + environ=dict(nohash=True, usedefault=True,), + in_file_a=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + in_file_b=dict(argstr="%s", extensions=None, position=-1,), + mask_inter=dict(argstr="-mask_inter",), + mask_union=dict(argstr="-mask_union",), + non_zero=dict(argstr="-non_zero",), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file_a', - name_template='%s_mean', + name_source="in_file_a", + name_template="%s_mean", ), outputtype=dict(), - scale=dict(argstr='-%sscale', ), - sqr=dict(argstr='-sqr', ), - std_dev=dict(argstr='-stdev', ), - summ=dict(argstr='-sum', ), + scale=dict(argstr="-%sscale",), + sqr=dict(argstr="-sqr",), + std_dev=dict(argstr="-stdev",), + summ=dict(argstr="-sum",), ) inputs = Means.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Means_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Means.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Merge.py b/nipype/interfaces/afni/tests/test_auto_Merge.py index 30b838620e..9ccaf8d4d4 100644 --- a/nipype/interfaces/afni/tests/test_auto_Merge.py +++ b/nipype/interfaces/afni/tests/test_auto_Merge.py @@ -4,31 +4,17 @@ def test_Merge_inputs(): input_map = dict( - args=dict(argstr='%s', ), - blurfwhm=dict( - argstr='-1blur_fwhm %d', - units='mm', - ), - doall=dict(argstr='-doall', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr='%s', - copyfile=False, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + blurfwhm=dict(argstr="-1blur_fwhm %d", units="mm",), + doall=dict(argstr="-doall",), + environ=dict(nohash=True, usedefault=True,), + in_files=dict(argstr="%s", copyfile=False, mandatory=True, position=-1,), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_files', - name_template='%s_merge', + name_source="in_files", + name_template="%s_merge", ), outputtype=dict(), ) @@ -37,8 +23,10 @@ def test_Merge_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Merge_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Notes.py b/nipype/interfaces/afni/tests/test_auto_Notes.py index 65365e0829..a51727578d 100644 --- a/nipype/interfaces/afni/tests/test_auto_Notes.py +++ b/nipype/interfaces/afni/tests/test_auto_Notes.py @@ -4,46 +4,29 @@ def test_Notes_inputs(): input_map = dict( - add=dict(argstr='-a "%s"', ), - add_history=dict( - argstr='-h "%s"', - xor=['rep_history'], - ), - args=dict(argstr='%s', ), - delete=dict(argstr='-d %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), + add=dict(argstr='-a "%s"',), + add_history=dict(argstr='-h "%s"', xor=["rep_history"],), + args=dict(argstr="%s",), + delete=dict(argstr="-d %d",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr='%s', - extensions=None, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), + num_threads=dict(nohash=True, usedefault=True,), + out_file=dict(argstr="%s", extensions=None,), outputtype=dict(), - rep_history=dict( - argstr='-HH "%s"', - xor=['add_history'], - ), - ses=dict(argstr='-ses', ), + rep_history=dict(argstr='-HH "%s"', xor=["add_history"],), + ses=dict(argstr="-ses",), ) inputs = Notes.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Notes_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Notes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py b/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py index e3882d3559..265e3720a2 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py @@ -4,37 +4,30 @@ def test_NwarpAdjust_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict(argstr='-source %s', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_files=dict(argstr="-source %s",), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, keep_extension=True, - name_source='in_files', - name_template='%s_NwarpAdjust', - requires=['in_files'], + name_source="in_files", + name_template="%s_NwarpAdjust", + requires=["in_files"], ), outputtype=dict(), - warps=dict( - argstr='-nwarp %s', - mandatory=True, - ), + warps=dict(argstr="-nwarp %s", mandatory=True,), ) inputs = NwarpAdjust.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_NwarpAdjust_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = NwarpAdjust.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py index 490f296d3b..727b210c4e 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py @@ -4,52 +4,33 @@ def test_NwarpApply_inputs(): input_map = dict( - ainterp=dict(argstr='-ainterp %s', ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-source %s', - mandatory=True, - ), - interp=dict( - argstr='-interp %s', - usedefault=True, - ), - inv_warp=dict(argstr='-iwarp', ), - master=dict( - argstr='-master %s', - extensions=None, - ), + ainterp=dict(argstr="-ainterp %s",), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-source %s", mandatory=True,), + interp=dict(argstr="-interp %s", usedefault=True,), + inv_warp=dict(argstr="-iwarp",), + master=dict(argstr="-master %s", extensions=None,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_Nwarp', - ), - quiet=dict( - argstr='-quiet', - xor=['verb'], - ), - short=dict(argstr='-short', ), - verb=dict( - argstr='-verb', - xor=['quiet'], - ), - warp=dict( - argstr='-nwarp %s', - mandatory=True, + name_source="in_file", + name_template="%s_Nwarp", ), + quiet=dict(argstr="-quiet", xor=["verb"],), + short=dict(argstr="-short",), + verb=dict(argstr="-verb", xor=["quiet"],), + warp=dict(argstr="-nwarp %s", mandatory=True,), ) inputs = NwarpApply.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_NwarpApply_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = NwarpApply.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpCat.py b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py index 666de06590..82b1e6a125 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpCat.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py @@ -4,43 +4,32 @@ def test_NwarpCat_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - expad=dict(argstr='-expad %d', ), - in_files=dict( - argstr='%s', - mandatory=True, - position=-1, - ), - interp=dict( - argstr='-interp %s', - usedefault=True, - ), - inv_warp=dict(argstr='-iwarp', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + expad=dict(argstr="-expad %d",), + in_files=dict(argstr="%s", mandatory=True, position=-1,), + interp=dict(argstr="-interp %s", usedefault=True,), + inv_warp=dict(argstr="-iwarp",), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_files', - name_template='%s_NwarpCat', + name_source="in_files", + name_template="%s_NwarpCat", ), outputtype=dict(), - space=dict(argstr='-space %s', ), - verb=dict(argstr='-verb', ), + space=dict(argstr="-space %s",), + verb=dict(argstr="-verb",), ) inputs = NwarpCat.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_NwarpCat_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = NwarpCat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py index 3024c438c8..e9ae2472be 100644 --- a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py +++ b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py @@ -4,46 +4,39 @@ def test_OneDToolPy_inputs(): input_map = dict( - args=dict(argstr='%s', ), - censor_motion=dict(argstr='-censor_motion %f %s', ), - censor_prev_TR=dict(argstr='-censor_prev_TR', ), - demean=dict(argstr='-demean', ), - derivative=dict(argstr='-derivative', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-infile %s', - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + censor_motion=dict(argstr="-censor_motion %f %s",), + censor_prev_TR=dict(argstr="-censor_prev_TR",), + demean=dict(argstr="-demean",), + derivative=dict(argstr="-derivative",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-infile %s", extensions=None, mandatory=True,), out_file=dict( - argstr='-write %s', - extensions=None, - xor=['show_cormat_warnings'], + argstr="-write %s", extensions=None, xor=["show_cormat_warnings"], ), outputtype=dict(), - py27_path=dict(usedefault=True, ), - set_nruns=dict(argstr='-set_nruns %d', ), - show_censor_count=dict(argstr='-show_censor_count', ), + py27_path=dict(usedefault=True,), + set_nruns=dict(argstr="-set_nruns %d",), + show_censor_count=dict(argstr="-show_censor_count",), show_cormat_warnings=dict( - argstr='-show_cormat_warnings |& tee %s', + argstr="-show_cormat_warnings |& tee %s", extensions=None, position=-1, - xor=['out_file'], + xor=["out_file"], ), - show_indices_interest=dict(argstr='-show_indices_interest', ), - show_trs_run=dict(argstr='-show_trs_run %d', ), - show_trs_uncensored=dict(argstr='-show_trs_uncensored %s', ), + show_indices_interest=dict(argstr="-show_indices_interest",), + show_trs_run=dict(argstr="-show_trs_run %d",), + show_trs_uncensored=dict(argstr="-show_trs_uncensored %s",), ) inputs = OneDToolPy.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_OneDToolPy_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = OneDToolPy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_OutlierCount.py b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py index f630daf9f6..3b90e076d7 100644 --- a/nipype/interfaces/afni/tests/test_auto_OutlierCount.py +++ b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py @@ -4,74 +4,43 @@ def test_OutlierCount_inputs(): input_map = dict( - args=dict(argstr='%s', ), - autoclip=dict( - argstr='-autoclip', - usedefault=True, - xor=['mask'], - ), - automask=dict( - argstr='-automask', - usedefault=True, - xor=['mask'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fraction=dict( - argstr='-fraction', - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - interval=dict( - argstr='-range', - usedefault=True, - ), - legendre=dict( - argstr='-legendre', - usedefault=True, - ), - mask=dict( - argstr='-mask %s', - extensions=None, - xor=['autoclip', 'automask'], - ), + args=dict(argstr="%s",), + autoclip=dict(argstr="-autoclip", usedefault=True, xor=["mask"],), + automask=dict(argstr="-automask", usedefault=True, xor=["mask"],), + environ=dict(nohash=True, usedefault=True,), + fraction=dict(argstr="-fraction", usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + interval=dict(argstr="-range", usedefault=True,), + legendre=dict(argstr="-legendre", usedefault=True,), + mask=dict(argstr="-mask %s", extensions=None, xor=["autoclip", "automask"],), out_file=dict( extensions=None, keep_extension=False, - name_source=['in_file'], - name_template='%s_outliers', + name_source=["in_file"], + name_template="%s_outliers", ), outliers_file=dict( - argstr='-save %s', + argstr="-save %s", extensions=None, keep_extension=True, - name_source=['in_file'], - name_template='%s_outliers', - output_name='out_outliers', + name_source=["in_file"], + name_template="%s_outliers", + output_name="out_outliers", ), - polort=dict(argstr='-polort %d', ), - qthr=dict( - argstr='-qthr %.5f', - usedefault=True, - ), - save_outliers=dict(usedefault=True, ), + polort=dict(argstr="-polort %d",), + qthr=dict(argstr="-qthr %.5f", usedefault=True,), + save_outliers=dict(usedefault=True,), ) inputs = OutlierCount.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_OutlierCount_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - out_outliers=dict(extensions=None, ), + out_file=dict(extensions=None,), out_outliers=dict(extensions=None,), ) outputs = OutlierCount.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_QualityIndex.py b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py index 209353e4dc..359743b19f 100644 --- a/nipype/interfaces/afni/tests/test_auto_QualityIndex.py +++ b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py @@ -4,61 +4,34 @@ def test_QualityIndex_inputs(): input_map = dict( - args=dict(argstr='%s', ), - autoclip=dict( - argstr='-autoclip', - usedefault=True, - xor=['mask'], - ), - automask=dict( - argstr='-automask', - usedefault=True, - xor=['mask'], - ), - clip=dict(argstr='-clip %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - interval=dict( - argstr='-range', - usedefault=True, - ), - mask=dict( - argstr='-mask %s', - extensions=None, - xor=['autoclip', 'automask'], - ), + args=dict(argstr="%s",), + autoclip=dict(argstr="-autoclip", usedefault=True, xor=["mask"],), + automask=dict(argstr="-automask", usedefault=True, xor=["mask"],), + clip=dict(argstr="-clip %f",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + interval=dict(argstr="-range", usedefault=True,), + mask=dict(argstr="-mask %s", extensions=None, xor=["autoclip", "automask"],), out_file=dict( - argstr='> %s', + argstr="> %s", extensions=None, keep_extension=False, - name_source=['in_file'], - name_template='%s_tqual', + name_source=["in_file"], + name_template="%s_tqual", position=-1, ), - quadrant=dict( - argstr='-quadrant', - usedefault=True, - ), - spearman=dict( - argstr='-spearman', - usedefault=True, - ), + quadrant=dict(argstr="-quadrant", usedefault=True,), + spearman=dict(argstr="-spearman", usedefault=True,), ) inputs = QualityIndex.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_QualityIndex_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = QualityIndex.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Qwarp.py b/nipype/interfaces/afni/tests/test_auto_Qwarp.py index 877dd82fb2..14ff6192d5 100644 --- a/nipype/interfaces/afni/tests/test_auto_Qwarp.py +++ b/nipype/interfaces/afni/tests/test_auto_Qwarp.py @@ -4,171 +4,89 @@ def test_Qwarp_inputs(): input_map = dict( - Qfinal=dict(argstr='-Qfinal', ), - Qonly=dict(argstr='-Qonly', ), - allineate=dict(argstr='-allineate', ), - allineate_opts=dict( - argstr='-allineate_opts %s', - requires=['allineate'], - ), - allsave=dict( - argstr='-allsave', - xor=['nopadWARP', 'duplo', 'plusminus'], - ), - args=dict(argstr='%s', ), - ballopt=dict( - argstr='-ballopt', - xor=['workhard', 'boxopt'], - ), + Qfinal=dict(argstr="-Qfinal",), + Qonly=dict(argstr="-Qonly",), + allineate=dict(argstr="-allineate",), + allineate_opts=dict(argstr="-allineate_opts %s", requires=["allineate"],), + allsave=dict(argstr="-allsave", xor=["nopadWARP", "duplo", "plusminus"],), + args=dict(argstr="%s",), + ballopt=dict(argstr="-ballopt", xor=["workhard", "boxopt"],), base_file=dict( - argstr='-base %s', - copyfile=False, - extensions=None, - mandatory=True, + argstr="-base %s", copyfile=False, extensions=None, mandatory=True, ), - baxopt=dict( - argstr='-boxopt', - xor=['workhard', 'ballopt'], - ), - blur=dict(argstr='-blur %s', ), + baxopt=dict(argstr="-boxopt", xor=["workhard", "ballopt"],), + blur=dict(argstr="-blur %s",), duplo=dict( - argstr='-duplo', - xor=[ - 'gridlist', 'maxlev', 'inilev', 'iniwarp', 'plusminus', - 'allsave' - ], - ), - emask=dict( - argstr='-emask %s', - copyfile=False, - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - expad=dict( - argstr='-expad %d', - xor=['nopadWARP'], + argstr="-duplo", + xor=["gridlist", "maxlev", "inilev", "iniwarp", "plusminus", "allsave"], ), + emask=dict(argstr="-emask %s", copyfile=False, extensions=None,), + environ=dict(nohash=True, usedefault=True,), + expad=dict(argstr="-expad %d", xor=["nopadWARP"],), gridlist=dict( - argstr='-gridlist %s', + argstr="-gridlist %s", copyfile=False, extensions=None, - xor=['duplo', 'plusminus'], - ), - hel=dict( - argstr='-hel', - xor=['nmi', 'mi', 'lpc', 'lpa', 'pear'], + xor=["duplo", "plusminus"], ), + hel=dict(argstr="-hel", xor=["nmi", "mi", "lpc", "lpa", "pear"],), in_file=dict( - argstr='-source %s', - copyfile=False, - extensions=None, - mandatory=True, - ), - inilev=dict( - argstr='-inilev %d', - xor=['duplo'], - ), - iniwarp=dict( - argstr='-iniwarp %s', - xor=['duplo'], - ), - iwarp=dict( - argstr='-iwarp', - xor=['plusminus'], - ), - lpa=dict( - argstr='-lpa', - xor=['nmi', 'mi', 'lpc', 'hel', 'pear'], - ), - lpc=dict( - argstr='-lpc', - position=-2, - xor=['nmi', 'mi', 'hel', 'lpa', 'pear'], - ), - maxlev=dict( - argstr='-maxlev %d', - position=-1, - xor=['duplo'], - ), - mi=dict( - argstr='-mi', - xor=['mi', 'hel', 'lpc', 'lpa', 'pear'], - ), - minpatch=dict(argstr='-minpatch %d', ), - nmi=dict( - argstr='-nmi', - xor=['nmi', 'hel', 'lpc', 'lpa', 'pear'], - ), - noXdis=dict(argstr='-noXdis', ), - noYdis=dict(argstr='-noYdis', ), - noZdis=dict(argstr='-noZdis', ), - noneg=dict(argstr='-noneg', ), - nopad=dict(argstr='-nopad', ), - nopadWARP=dict( - argstr='-nopadWARP', - xor=['allsave', 'expad'], - ), - nopenalty=dict(argstr='-nopenalty', ), - nowarp=dict(argstr='-nowarp', ), - noweight=dict(argstr='-noweight', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + argstr="-source %s", copyfile=False, extensions=None, mandatory=True, + ), + inilev=dict(argstr="-inilev %d", xor=["duplo"],), + iniwarp=dict(argstr="-iniwarp %s", xor=["duplo"],), + iwarp=dict(argstr="-iwarp", xor=["plusminus"],), + lpa=dict(argstr="-lpa", xor=["nmi", "mi", "lpc", "hel", "pear"],), + lpc=dict(argstr="-lpc", position=-2, xor=["nmi", "mi", "hel", "lpa", "pear"],), + maxlev=dict(argstr="-maxlev %d", position=-1, xor=["duplo"],), + mi=dict(argstr="-mi", xor=["mi", "hel", "lpc", "lpa", "pear"],), + minpatch=dict(argstr="-minpatch %d",), + nmi=dict(argstr="-nmi", xor=["nmi", "hel", "lpc", "lpa", "pear"],), + noXdis=dict(argstr="-noXdis",), + noYdis=dict(argstr="-noYdis",), + noZdis=dict(argstr="-noZdis",), + noneg=dict(argstr="-noneg",), + nopad=dict(argstr="-nopad",), + nopadWARP=dict(argstr="-nopadWARP", xor=["allsave", "expad"],), + nopenalty=dict(argstr="-nopenalty",), + nowarp=dict(argstr="-nowarp",), + noweight=dict(argstr="-noweight",), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', - extensions=None, - name_source=['in_file'], - name_template='ppp_%s', - ), - out_weight_file=dict( - argstr='-wtprefix %s', + argstr="-prefix %s", extensions=None, + name_source=["in_file"], + name_template="ppp_%s", ), + out_weight_file=dict(argstr="-wtprefix %s", extensions=None,), outputtype=dict(), - overwrite=dict(argstr='-overwrite', ), - pblur=dict(argstr='-pblur %s', ), - pear=dict(argstr='-pear', ), - penfac=dict(argstr='-penfac %f', ), - plusminus=dict( - argstr='-plusminus', - xor=['duplo', 'allsave', 'iwarp'], - ), - quiet=dict( - argstr='-quiet', - xor=['verb'], - ), - resample=dict(argstr='-resample', ), - verb=dict( - argstr='-verb', - xor=['quiet'], - ), - wball=dict(argstr='-wball %s', ), - weight=dict( - argstr='-weight %s', - extensions=None, - ), - wmask=dict(argstr='-wpass %s %f', ), - workhard=dict( - argstr='-workhard', - xor=['boxopt', 'ballopt'], - ), + overwrite=dict(argstr="-overwrite",), + pblur=dict(argstr="-pblur %s",), + pear=dict(argstr="-pear",), + penfac=dict(argstr="-penfac %f",), + plusminus=dict(argstr="-plusminus", xor=["duplo", "allsave", "iwarp"],), + quiet=dict(argstr="-quiet", xor=["verb"],), + resample=dict(argstr="-resample",), + verb=dict(argstr="-verb", xor=["quiet"],), + wball=dict(argstr="-wball %s",), + weight=dict(argstr="-weight %s", extensions=None,), + wmask=dict(argstr="-wpass %s %f",), + workhard=dict(argstr="-workhard", xor=["boxopt", "ballopt"],), ) inputs = Qwarp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Qwarp_outputs(): output_map = dict( - base_warp=dict(extensions=None, ), - source_warp=dict(extensions=None, ), - warped_base=dict(extensions=None, ), - warped_source=dict(extensions=None, ), - weights=dict(extensions=None, ), + base_warp=dict(extensions=None,), + source_warp=dict(extensions=None,), + warped_base=dict(extensions=None,), + warped_source=dict(extensions=None,), + weights=dict(extensions=None,), ) outputs = Qwarp.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py index 5066483014..6b56af1006 100644 --- a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py +++ b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py @@ -4,180 +4,98 @@ def test_QwarpPlusMinus_inputs(): input_map = dict( - Qfinal=dict(argstr='-Qfinal', ), - Qonly=dict(argstr='-Qonly', ), - allineate=dict(argstr='-allineate', ), - allineate_opts=dict( - argstr='-allineate_opts %s', - requires=['allineate'], - ), - allsave=dict( - argstr='-allsave', - xor=['nopadWARP', 'duplo', 'plusminus'], - ), - args=dict(argstr='%s', ), - ballopt=dict( - argstr='-ballopt', - xor=['workhard', 'boxopt'], - ), + Qfinal=dict(argstr="-Qfinal",), + Qonly=dict(argstr="-Qonly",), + allineate=dict(argstr="-allineate",), + allineate_opts=dict(argstr="-allineate_opts %s", requires=["allineate"],), + allsave=dict(argstr="-allsave", xor=["nopadWARP", "duplo", "plusminus"],), + args=dict(argstr="%s",), + ballopt=dict(argstr="-ballopt", xor=["workhard", "boxopt"],), base_file=dict( - argstr='-base %s', - copyfile=False, - extensions=None, - mandatory=True, + argstr="-base %s", copyfile=False, extensions=None, mandatory=True, ), - baxopt=dict( - argstr='-boxopt', - xor=['workhard', 'ballopt'], - ), - blur=dict(argstr='-blur %s', ), + baxopt=dict(argstr="-boxopt", xor=["workhard", "ballopt"],), + blur=dict(argstr="-blur %s",), duplo=dict( - argstr='-duplo', - xor=[ - 'gridlist', 'maxlev', 'inilev', 'iniwarp', 'plusminus', - 'allsave' - ], - ), - emask=dict( - argstr='-emask %s', - copyfile=False, - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - expad=dict( - argstr='-expad %d', - xor=['nopadWARP'], + argstr="-duplo", + xor=["gridlist", "maxlev", "inilev", "iniwarp", "plusminus", "allsave"], ), + emask=dict(argstr="-emask %s", copyfile=False, extensions=None,), + environ=dict(nohash=True, usedefault=True,), + expad=dict(argstr="-expad %d", xor=["nopadWARP"],), gridlist=dict( - argstr='-gridlist %s', + argstr="-gridlist %s", copyfile=False, extensions=None, - xor=['duplo', 'plusminus'], - ), - hel=dict( - argstr='-hel', - xor=['nmi', 'mi', 'lpc', 'lpa', 'pear'], + xor=["duplo", "plusminus"], ), + hel=dict(argstr="-hel", xor=["nmi", "mi", "lpc", "lpa", "pear"],), in_file=dict( - argstr='-source %s', - copyfile=False, - extensions=None, - mandatory=True, - ), - inilev=dict( - argstr='-inilev %d', - xor=['duplo'], - ), - iniwarp=dict( - argstr='-iniwarp %s', - xor=['duplo'], - ), - iwarp=dict( - argstr='-iwarp', - xor=['plusminus'], - ), - lpa=dict( - argstr='-lpa', - xor=['nmi', 'mi', 'lpc', 'hel', 'pear'], - ), - lpc=dict( - argstr='-lpc', - position=-2, - xor=['nmi', 'mi', 'hel', 'lpa', 'pear'], - ), - maxlev=dict( - argstr='-maxlev %d', - position=-1, - xor=['duplo'], - ), - mi=dict( - argstr='-mi', - xor=['mi', 'hel', 'lpc', 'lpa', 'pear'], - ), - minpatch=dict(argstr='-minpatch %d', ), - nmi=dict( - argstr='-nmi', - xor=['nmi', 'hel', 'lpc', 'lpa', 'pear'], - ), - noXdis=dict(argstr='-noXdis', ), - noYdis=dict(argstr='-noYdis', ), - noZdis=dict(argstr='-noZdis', ), - noneg=dict(argstr='-noneg', ), - nopad=dict(argstr='-nopad', ), - nopadWARP=dict( - argstr='-nopadWARP', - xor=['allsave', 'expad'], - ), - nopenalty=dict(argstr='-nopenalty', ), - nowarp=dict(argstr='-nowarp', ), - noweight=dict(argstr='-noweight', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + argstr="-source %s", copyfile=False, extensions=None, mandatory=True, + ), + inilev=dict(argstr="-inilev %d", xor=["duplo"],), + iniwarp=dict(argstr="-iniwarp %s", xor=["duplo"],), + iwarp=dict(argstr="-iwarp", xor=["plusminus"],), + lpa=dict(argstr="-lpa", xor=["nmi", "mi", "lpc", "hel", "pear"],), + lpc=dict(argstr="-lpc", position=-2, xor=["nmi", "mi", "hel", "lpa", "pear"],), + maxlev=dict(argstr="-maxlev %d", position=-1, xor=["duplo"],), + mi=dict(argstr="-mi", xor=["mi", "hel", "lpc", "lpa", "pear"],), + minpatch=dict(argstr="-minpatch %d",), + nmi=dict(argstr="-nmi", xor=["nmi", "hel", "lpc", "lpa", "pear"],), + noXdis=dict(argstr="-noXdis",), + noYdis=dict(argstr="-noYdis",), + noZdis=dict(argstr="-noZdis",), + noneg=dict(argstr="-noneg",), + nopad=dict(argstr="-nopad",), + nopadWARP=dict(argstr="-nopadWARP", xor=["allsave", "expad"],), + nopenalty=dict(argstr="-nopenalty",), + nowarp=dict(argstr="-nowarp",), + noweight=dict(argstr="-noweight",), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', - extensions=None, - position=0, - usedefault=True, - ), - out_weight_file=dict( - argstr='-wtprefix %s', - extensions=None, + argstr="-prefix %s", extensions=None, position=0, usedefault=True, ), + out_weight_file=dict(argstr="-wtprefix %s", extensions=None,), outputtype=dict(), - overwrite=dict(argstr='-overwrite', ), - pblur=dict(argstr='-pblur %s', ), - pear=dict(argstr='-pear', ), - penfac=dict(argstr='-penfac %f', ), + overwrite=dict(argstr="-overwrite",), + pblur=dict(argstr="-pblur %s",), + pear=dict(argstr="-pear",), + penfac=dict(argstr="-penfac %f",), plusminus=dict( - argstr='-plusminus', + argstr="-plusminus", position=1, usedefault=True, - xor=['duplo', 'allsave', 'iwarp'], - ), - quiet=dict( - argstr='-quiet', - xor=['verb'], + xor=["duplo", "allsave", "iwarp"], ), - resample=dict(argstr='-resample', ), + quiet=dict(argstr="-quiet", xor=["verb"],), + resample=dict(argstr="-resample",), source_file=dict( - argstr='-source %s', + argstr="-source %s", copyfile=False, - deprecated='1.1.2', + deprecated="1.1.2", extensions=None, - new_name='in_file', - ), - verb=dict( - argstr='-verb', - xor=['quiet'], - ), - wball=dict(argstr='-wball %s', ), - weight=dict( - argstr='-weight %s', - extensions=None, - ), - wmask=dict(argstr='-wpass %s %f', ), - workhard=dict( - argstr='-workhard', - xor=['boxopt', 'ballopt'], + new_name="in_file", ), + verb=dict(argstr="-verb", xor=["quiet"],), + wball=dict(argstr="-wball %s",), + weight=dict(argstr="-weight %s", extensions=None,), + wmask=dict(argstr="-wpass %s %f",), + workhard=dict(argstr="-workhard", xor=["boxopt", "ballopt"],), ) inputs = QwarpPlusMinus.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_QwarpPlusMinus_outputs(): output_map = dict( - base_warp=dict(extensions=None, ), - source_warp=dict(extensions=None, ), - warped_base=dict(extensions=None, ), - warped_source=dict(extensions=None, ), - weights=dict(extensions=None, ), + base_warp=dict(extensions=None,), + source_warp=dict(extensions=None,), + warped_base=dict(extensions=None,), + warped_source=dict(extensions=None,), + weights=dict(extensions=None,), ) outputs = QwarpPlusMinus.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_ROIStats.py b/nipype/interfaces/afni/tests/test_auto_ROIStats.py index f71c3158cd..026e3ff076 100644 --- a/nipype/interfaces/afni/tests/test_auto_ROIStats.py +++ b/nipype/interfaces/afni/tests/test_auto_ROIStats.py @@ -4,67 +4,46 @@ def test_ROIStats_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debug=dict(argstr='-debug', ), - environ=dict( - nohash=True, - usedefault=True, - ), - format1D=dict( - argstr='-1Dformat', - xor=['format1DR'], - ), - format1DR=dict( - argstr='-1DRformat', - xor=['format1D'], - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + debug=dict(argstr="-debug",), + environ=dict(nohash=True, usedefault=True,), + format1D=dict(argstr="-1Dformat", xor=["format1DR"],), + format1DR=dict(argstr="-1DRformat", xor=["format1D"],), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), mask=dict( - argstr='-mask %s', - deprecated='1.1.4', + argstr="-mask %s", + deprecated="1.1.4", extensions=None, - new_name='mask_file', + new_name="mask_file", position=3, ), - mask_f2short=dict(argstr='-mask_f2short', ), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), - nobriklab=dict(argstr='-nobriklab', ), - nomeanout=dict(argstr='-nomeanout', ), - num_roi=dict(argstr='-numroi %s', ), + mask_f2short=dict(argstr="-mask_f2short",), + mask_file=dict(argstr="-mask %s", extensions=None,), + nobriklab=dict(argstr="-nobriklab",), + nomeanout=dict(argstr="-nomeanout",), + num_roi=dict(argstr="-numroi %s",), out_file=dict( - argstr='> %s', + argstr="> %s", extensions=None, keep_extension=False, - name_source='in_file', - name_template='%s_roistat.1D', + name_source="in_file", + name_template="%s_roistat.1D", position=-1, ), - quiet=dict(argstr='-quiet', ), - roisel=dict( - argstr='-roisel %s', - extensions=None, - ), - stat=dict(argstr='%s...', ), - zerofill=dict( - argstr='-zerofill %s', - requires=['num_roi'], - ), + quiet=dict(argstr="-quiet",), + roisel=dict(argstr="-roisel %s", extensions=None,), + stat=dict(argstr="%s...",), + zerofill=dict(argstr="-zerofill %s", requires=["num_roi"],), ) inputs = ROIStats.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ROIStats_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ROIStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ReHo.py b/nipype/interfaces/afni/tests/test_auto_ReHo.py index 2e0c3fb6eb..1200399a1d 100644 --- a/nipype/interfaces/afni/tests/test_auto_ReHo.py +++ b/nipype/interfaces/afni/tests/test_auto_ReHo.py @@ -4,58 +4,37 @@ def test_ReHo_inputs(): input_map = dict( - args=dict(argstr='%s', ), - chi_sq=dict(argstr='-chi_sq', ), + args=dict(argstr="%s",), + chi_sq=dict(argstr="-chi_sq",), ellipsoid=dict( - argstr='-neigh_X %s -neigh_Y %s -neigh_Z %s', - xor=['sphere', 'neighborhood'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-inset %s', - extensions=None, - mandatory=True, - position=1, - ), - label_set=dict( - argstr='-in_rois %s', - extensions=None, - ), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), - neighborhood=dict( - argstr='-nneigh %s', - xor=['sphere', 'ellipsoid'], - ), + argstr="-neigh_X %s -neigh_Y %s -neigh_Z %s", + xor=["sphere", "neighborhood"], + ), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-inset %s", extensions=None, mandatory=True, position=1,), + label_set=dict(argstr="-in_rois %s", extensions=None,), + mask_file=dict(argstr="-mask %s", extensions=None,), + neighborhood=dict(argstr="-nneigh %s", xor=["sphere", "ellipsoid"],), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_reho', + name_source="in_file", + name_template="%s_reho", position=0, ), - overwrite=dict(argstr='-overwrite', ), - sphere=dict( - argstr='-neigh_RAD %s', - xor=['neighborhood', 'ellipsoid'], - ), + overwrite=dict(argstr="-overwrite",), + sphere=dict(argstr="-neigh_RAD %s", xor=["neighborhood", "ellipsoid"],), ) inputs = ReHo.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ReHo_outputs(): - output_map = dict( - out_file=dict(extensions=None, ), - out_vals=dict(extensions=None, ), - ) + output_map = dict(out_file=dict(extensions=None,), out_vals=dict(extensions=None,),) outputs = ReHo.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Refit.py b/nipype/interfaces/afni/tests/test_auto_Refit.py index fd7688dce4..65952d85fa 100644 --- a/nipype/interfaces/afni/tests/test_auto_Refit.py +++ b/nipype/interfaces/afni/tests/test_auto_Refit.py @@ -4,45 +4,37 @@ def test_Refit_inputs(): input_map = dict( - args=dict(argstr='%s', ), - atrcopy=dict(argstr='-atrcopy %s %s', ), - atrfloat=dict(argstr='-atrfloat %s %s', ), - atrint=dict(argstr='-atrint %s %s', ), - atrstring=dict(argstr='-atrstring %s %s', ), - deoblique=dict(argstr='-deoblique', ), - duporigin_file=dict( - argstr='-duporigin %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + atrcopy=dict(argstr="-atrcopy %s %s",), + atrfloat=dict(argstr="-atrfloat %s %s",), + atrint=dict(argstr="-atrint %s %s",), + atrstring=dict(argstr="-atrstring %s %s",), + deoblique=dict(argstr="-deoblique",), + duporigin_file=dict(argstr="-duporigin %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=True, - extensions=None, - mandatory=True, - position=-1, + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-1, ), - nosaveatr=dict(argstr='-nosaveatr', ), - saveatr=dict(argstr='-saveatr', ), - space=dict(argstr='-space %s', ), - xdel=dict(argstr='-xdel %f', ), - xorigin=dict(argstr='-xorigin %s', ), - xyzscale=dict(argstr='-xyzscale %f', ), - ydel=dict(argstr='-ydel %f', ), - yorigin=dict(argstr='-yorigin %s', ), - zdel=dict(argstr='-zdel %f', ), - zorigin=dict(argstr='-zorigin %s', ), + nosaveatr=dict(argstr="-nosaveatr",), + saveatr=dict(argstr="-saveatr",), + space=dict(argstr="-space %s",), + xdel=dict(argstr="-xdel %f",), + xorigin=dict(argstr="-xorigin %s",), + xyzscale=dict(argstr="-xyzscale %f",), + ydel=dict(argstr="-ydel %f",), + yorigin=dict(argstr="-yorigin %s",), + zdel=dict(argstr="-zdel %f",), + zorigin=dict(argstr="-zorigin %s",), ) inputs = Refit.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Refit_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Refit.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Remlfit.py b/nipype/interfaces/afni/tests/test_auto_Remlfit.py index 0ace6a1b04..3040c1f48e 100644 --- a/nipype/interfaces/afni/tests/test_auto_Remlfit.py +++ b/nipype/interfaces/afni/tests/test_auto_Remlfit.py @@ -4,151 +4,69 @@ def test_Remlfit_inputs(): input_map = dict( - STATmask=dict( - argstr='-STATmask %s', - extensions=None, - ), - addbase=dict( - argstr='-addbase %s', - copyfile=False, - sep=' ', - ), - args=dict(argstr='%s', ), - automask=dict( - argstr='-automask', - usedefault=True, - ), - dsort=dict( - argstr='-dsort %s', - copyfile=False, - extensions=None, - ), - dsort_nods=dict( - argstr='-dsort_nods', - requires=['dsort'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - errts_file=dict( - argstr='-Rerrts %s', - extensions=None, - ), - fitts_file=dict( - argstr='-Rfitts %s', - extensions=None, - ), - fout=dict(argstr='-fout', ), - glt_file=dict( - argstr='-Rglt %s', - extensions=None, - ), - gltsym=dict(argstr='-gltsym "%s" %s...', ), - goforit=dict(argstr='-GOFORIT', ), - in_files=dict( - argstr='-input "%s"', - copyfile=False, - mandatory=True, - sep=' ', - ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), - matim=dict( - argstr='-matim %s', - extensions=None, - xor=['matrix'], - ), - matrix=dict( - argstr='-matrix %s', - extensions=None, - mandatory=True, - ), - nobout=dict(argstr='-nobout', ), - nodmbase=dict( - argstr='-nodmbase', - requires=['addbase', 'dsort'], - ), - nofdr=dict(argstr='-noFDR', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - obeta=dict( - argstr='-Obeta %s', - extensions=None, - ), - obuck=dict( - argstr='-Obuck %s', - extensions=None, - ), - oerrts=dict( - argstr='-Oerrts %s', - extensions=None, - ), - ofitts=dict( - argstr='-Ofitts %s', - extensions=None, - ), - oglt=dict( - argstr='-Oglt %s', - extensions=None, - ), - out_file=dict( - argstr='-Rbuck %s', - extensions=None, - ), + STATmask=dict(argstr="-STATmask %s", extensions=None,), + addbase=dict(argstr="-addbase %s", copyfile=False, sep=" ",), + args=dict(argstr="%s",), + automask=dict(argstr="-automask", usedefault=True,), + dsort=dict(argstr="-dsort %s", copyfile=False, extensions=None,), + dsort_nods=dict(argstr="-dsort_nods", requires=["dsort"],), + environ=dict(nohash=True, usedefault=True,), + errts_file=dict(argstr="-Rerrts %s", extensions=None,), + fitts_file=dict(argstr="-Rfitts %s", extensions=None,), + fout=dict(argstr="-fout",), + glt_file=dict(argstr="-Rglt %s", extensions=None,), + gltsym=dict(argstr='-gltsym "%s" %s...',), + goforit=dict(argstr="-GOFORIT",), + in_files=dict(argstr='-input "%s"', copyfile=False, mandatory=True, sep=" ",), + mask=dict(argstr="-mask %s", extensions=None,), + matim=dict(argstr="-matim %s", extensions=None, xor=["matrix"],), + matrix=dict(argstr="-matrix %s", extensions=None, mandatory=True,), + nobout=dict(argstr="-nobout",), + nodmbase=dict(argstr="-nodmbase", requires=["addbase", "dsort"],), + nofdr=dict(argstr="-noFDR",), + num_threads=dict(nohash=True, usedefault=True,), + obeta=dict(argstr="-Obeta %s", extensions=None,), + obuck=dict(argstr="-Obuck %s", extensions=None,), + oerrts=dict(argstr="-Oerrts %s", extensions=None,), + ofitts=dict(argstr="-Ofitts %s", extensions=None,), + oglt=dict(argstr="-Oglt %s", extensions=None,), + out_file=dict(argstr="-Rbuck %s", extensions=None,), outputtype=dict(), - ovar=dict( - argstr='-Ovar %s', - extensions=None, - ), - polort=dict( - argstr='-polort %d', - xor=['matrix'], - ), - quiet=dict(argstr='-quiet', ), - rbeta_file=dict( - argstr='-Rbeta %s', - extensions=None, - ), - rout=dict(argstr='-rout', ), - slibase=dict(argstr='-slibase %s', ), - slibase_sm=dict(argstr='-slibase_sm %s', ), - tout=dict(argstr='-tout', ), - usetemp=dict(argstr='-usetemp', ), - var_file=dict( - argstr='-Rvar %s', - extensions=None, - ), - verb=dict(argstr='-verb', ), - wherr_file=dict( - argstr='-Rwherr %s', - extensions=None, - ), + ovar=dict(argstr="-Ovar %s", extensions=None,), + polort=dict(argstr="-polort %d", xor=["matrix"],), + quiet=dict(argstr="-quiet",), + rbeta_file=dict(argstr="-Rbeta %s", extensions=None,), + rout=dict(argstr="-rout",), + slibase=dict(argstr="-slibase %s",), + slibase_sm=dict(argstr="-slibase_sm %s",), + tout=dict(argstr="-tout",), + usetemp=dict(argstr="-usetemp",), + var_file=dict(argstr="-Rvar %s", extensions=None,), + verb=dict(argstr="-verb",), + wherr_file=dict(argstr="-Rwherr %s", extensions=None,), ) inputs = Remlfit.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Remlfit_outputs(): output_map = dict( - errts_file=dict(extensions=None, ), - fitts_file=dict(extensions=None, ), - glt_file=dict(extensions=None, ), - obeta=dict(extensions=None, ), - obuck=dict(extensions=None, ), - oerrts=dict(extensions=None, ), - ofitts=dict(extensions=None, ), - oglt=dict(extensions=None, ), - out_file=dict(extensions=None, ), - ovar=dict(extensions=None, ), - rbeta_file=dict(extensions=None, ), - var_file=dict(extensions=None, ), - wherr_file=dict(extensions=None, ), + errts_file=dict(extensions=None,), + fitts_file=dict(extensions=None,), + glt_file=dict(extensions=None,), + obeta=dict(extensions=None,), + obuck=dict(extensions=None,), + oerrts=dict(extensions=None,), + ofitts=dict(extensions=None,), + oglt=dict(extensions=None,), + out_file=dict(extensions=None,), + ovar=dict(extensions=None,), + rbeta_file=dict(extensions=None,), + var_file=dict(extensions=None,), + wherr_file=dict(extensions=None,), ) outputs = Remlfit.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Resample.py b/nipype/interfaces/afni/tests/test_auto_Resample.py index f2c7456a6a..3053112816 100644 --- a/nipype/interfaces/afni/tests/test_auto_Resample.py +++ b/nipype/interfaces/afni/tests/test_auto_Resample.py @@ -4,44 +4,37 @@ def test_Resample_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='-inset %s', + argstr="-inset %s", copyfile=False, extensions=None, mandatory=True, position=-1, ), - master=dict( - argstr='-master %s', - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - orientation=dict(argstr='-orient %s', ), + master=dict(argstr="-master %s", extensions=None,), + num_threads=dict(nohash=True, usedefault=True,), + orientation=dict(argstr="-orient %s",), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_resample', + name_source="in_file", + name_template="%s_resample", ), outputtype=dict(), - resample_mode=dict(argstr='-rmode %s', ), - voxel_size=dict(argstr='-dxyz %f %f %f', ), + resample_mode=dict(argstr="-rmode %s",), + voxel_size=dict(argstr="-dxyz %f %f %f",), ) inputs = Resample.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Resample_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Retroicor.py b/nipype/interfaces/afni/tests/test_auto_Retroicor.py index 9e3c10631c..9c95116d09 100644 --- a/nipype/interfaces/afni/tests/test_auto_Retroicor.py +++ b/nipype/interfaces/afni/tests/test_auto_Retroicor.py @@ -4,68 +4,40 @@ def test_Retroicor_inputs(): input_map = dict( - args=dict(argstr='%s', ), - card=dict( - argstr='-card %s', - extensions=None, - position=-2, - ), + args=dict(argstr="%s",), + card=dict(argstr="-card %s", extensions=None, position=-2,), cardphase=dict( - argstr='-cardphase %s', - extensions=None, - hash_files=False, - position=-6, - ), - environ=dict( - nohash=True, - usedefault=True, + argstr="-cardphase %s", extensions=None, hash_files=False, position=-6, ), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - order=dict( - argstr='-order %s', - position=-5, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), + num_threads=dict(nohash=True, usedefault=True,), + order=dict(argstr="-order %s", position=-5,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source=['in_file'], - name_template='%s_retroicor', + name_source=["in_file"], + name_template="%s_retroicor", position=1, ), outputtype=dict(), - resp=dict( - argstr='-resp %s', - extensions=None, - position=-3, - ), + resp=dict(argstr="-resp %s", extensions=None, position=-3,), respphase=dict( - argstr='-respphase %s', - extensions=None, - hash_files=False, - position=-7, - ), - threshold=dict( - argstr='-threshold %d', - position=-4, + argstr="-respphase %s", extensions=None, hash_files=False, position=-7, ), + threshold=dict(argstr="-threshold %d", position=-4,), ) inputs = Retroicor.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Retroicor_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Retroicor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTest.py b/nipype/interfaces/afni/tests/test_auto_SVMTest.py index 79f33834e9..e4ab1e00bc 100644 --- a/nipype/interfaces/afni/tests/test_auto_SVMTest.py +++ b/nipype/interfaces/afni/tests/test_auto_SVMTest.py @@ -4,47 +4,31 @@ def test_SVMTest_inputs(): input_map = dict( - args=dict(argstr='%s', ), - classout=dict(argstr='-classout', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-testvol %s', - extensions=None, - mandatory=True, - ), - model=dict( - argstr='-model %s', - mandatory=True, - ), - multiclass=dict(argstr='-multiclass %s', ), - nodetrend=dict(argstr='-nodetrend', ), - nopredcensord=dict(argstr='-nopredcensord', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - options=dict(argstr='%s', ), + args=dict(argstr="%s",), + classout=dict(argstr="-classout",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-testvol %s", extensions=None, mandatory=True,), + model=dict(argstr="-model %s", mandatory=True,), + multiclass=dict(argstr="-multiclass %s",), + nodetrend=dict(argstr="-nodetrend",), + nopredcensord=dict(argstr="-nopredcensord",), + num_threads=dict(nohash=True, usedefault=True,), + options=dict(argstr="%s",), out_file=dict( - argstr='-predictions %s', - extensions=None, - name_template='%s_predictions', + argstr="-predictions %s", extensions=None, name_template="%s_predictions", ), outputtype=dict(), - testlabels=dict( - argstr='-testlabels %s', - extensions=None, - ), + testlabels=dict(argstr="-testlabels %s", extensions=None,), ) inputs = SVMTest.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SVMTest_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = SVMTest.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTrain.py b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py index 307b8628f8..5edf36b7fa 100644 --- a/nipype/interfaces/afni/tests/test_auto_SVMTrain.py +++ b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py @@ -5,76 +5,55 @@ def test_SVMTrain_inputs(): input_map = dict( alphas=dict( - argstr='-alpha %s', + argstr="-alpha %s", extensions=None, - name_source='in_file', - name_template='%s_alphas', - suffix='_alphas', - ), - args=dict(argstr='%s', ), - censor=dict( - argstr='-censor %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, + name_source="in_file", + name_template="%s_alphas", + suffix="_alphas", ), + args=dict(argstr="%s",), + censor=dict(argstr="-censor %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='-trainvol %s', - copyfile=False, - extensions=None, - mandatory=True, - ), - kernel=dict(argstr='-kernel %s', ), - mask=dict( - argstr='-mask %s', - copyfile=False, - extensions=None, - position=-1, + argstr="-trainvol %s", copyfile=False, extensions=None, mandatory=True, ), - max_iterations=dict(argstr='-max_iterations %d', ), + kernel=dict(argstr="-kernel %s",), + mask=dict(argstr="-mask %s", copyfile=False, extensions=None, position=-1,), + max_iterations=dict(argstr="-max_iterations %d",), model=dict( - argstr='-model %s', + argstr="-model %s", extensions=None, - name_source='in_file', - name_template='%s_model', - suffix='_model', + name_source="in_file", + name_template="%s_model", + suffix="_model", ), - nomodelmask=dict(argstr='-nomodelmask', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - options=dict(argstr='%s', ), + nomodelmask=dict(argstr="-nomodelmask",), + num_threads=dict(nohash=True, usedefault=True,), + options=dict(argstr="%s",), out_file=dict( - argstr='-bucket %s', + argstr="-bucket %s", extensions=None, - name_source='in_file', - name_template='%s_vectors', - suffix='_bucket', + name_source="in_file", + name_template="%s_vectors", + suffix="_bucket", ), outputtype=dict(), - trainlabels=dict( - argstr='-trainlabels %s', - extensions=None, - ), - ttype=dict( - argstr='-type %s', - mandatory=True, - ), - w_out=dict(argstr='-wout', ), + trainlabels=dict(argstr="-trainlabels %s", extensions=None,), + ttype=dict(argstr="-type %s", mandatory=True,), + w_out=dict(argstr="-wout",), ) inputs = SVMTrain.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SVMTrain_outputs(): output_map = dict( - alphas=dict(extensions=None, ), - model=dict(extensions=None, ), - out_file=dict(extensions=None, ), + alphas=dict(extensions=None,), + model=dict(extensions=None,), + out_file=dict(extensions=None,), ) outputs = SVMTrain.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Seg.py b/nipype/interfaces/afni/tests/test_auto_Seg.py index a5d475d9a0..f243aac8c7 100644 --- a/nipype/interfaces/afni/tests/test_auto_Seg.py +++ b/nipype/interfaces/afni/tests/test_auto_Seg.py @@ -4,40 +4,35 @@ def test_Seg_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bias_classes=dict(argstr='-bias_classes %s', ), - bias_fwhm=dict(argstr='-bias_fwhm %f', ), - blur_meth=dict(argstr='-blur_meth %s', ), - bmrf=dict(argstr='-bmrf %f', ), - classes=dict(argstr='-classes %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + bias_classes=dict(argstr="-bias_classes %s",), + bias_fwhm=dict(argstr="-bias_fwhm %f",), + blur_meth=dict(argstr="-blur_meth %s",), + bmrf=dict(argstr="-bmrf %f",), + classes=dict(argstr="-classes %s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='-anat %s', + argstr="-anat %s", copyfile=True, extensions=None, mandatory=True, position=-1, ), - main_N=dict(argstr='-main_N %d', ), - mask=dict( - argstr='-mask %s', - mandatory=True, - position=-2, - ), - mixfloor=dict(argstr='-mixfloor %f', ), - mixfrac=dict(argstr='-mixfrac %s', ), - prefix=dict(argstr='-prefix %s', ), + main_N=dict(argstr="-main_N %d",), + mask=dict(argstr="-mask %s", mandatory=True, position=-2,), + mixfloor=dict(argstr="-mixfloor %f",), + mixfrac=dict(argstr="-mixfrac %s",), + prefix=dict(argstr="-prefix %s",), ) inputs = Seg.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Seg_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Seg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_SkullStrip.py b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py index c198b0172a..4fbf078da5 100644 --- a/nipype/interfaces/afni/tests/test_auto_SkullStrip.py +++ b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py @@ -4,27 +4,21 @@ def test_SkullStrip_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='-input %s', + argstr="-input %s", copyfile=False, extensions=None, mandatory=True, position=1, ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_skullstrip', + name_source="in_file", + name_template="%s_skullstrip", ), outputtype=dict(), ) @@ -33,8 +27,10 @@ def test_SkullStrip_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SkullStrip_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = SkullStrip.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Synthesize.py b/nipype/interfaces/afni/tests/test_auto_Synthesize.py index 2c71bd41b7..df23d9923b 100644 --- a/nipype/interfaces/afni/tests/test_auto_Synthesize.py +++ b/nipype/interfaces/afni/tests/test_auto_Synthesize.py @@ -4,48 +4,31 @@ def test_Synthesize_inputs(): input_map = dict( - TR=dict(argstr='-TR %f', ), - args=dict(argstr='%s', ), + TR=dict(argstr="-TR %f",), + args=dict(argstr="%s",), cbucket=dict( - argstr='-cbucket %s', - copyfile=False, - extensions=None, - mandatory=True, - ), - cenfill=dict(argstr='-cenfill %s', ), - dry_run=dict(argstr='-dry', ), - environ=dict( - nohash=True, - usedefault=True, + argstr="-cbucket %s", copyfile=False, extensions=None, mandatory=True, ), + cenfill=dict(argstr="-cenfill %s",), + dry_run=dict(argstr="-dry",), + environ=dict(nohash=True, usedefault=True,), matrix=dict( - argstr='-matrix %s', - copyfile=False, - extensions=None, - mandatory=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr='-prefix %s', - extensions=None, - name_template='syn', + argstr="-matrix %s", copyfile=False, extensions=None, mandatory=True, ), + num_threads=dict(nohash=True, usedefault=True,), + out_file=dict(argstr="-prefix %s", extensions=None, name_template="syn",), outputtype=dict(), - select=dict( - argstr='-select %s', - mandatory=True, - ), + select=dict(argstr="-select %s", mandatory=True,), ) inputs = Synthesize.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Synthesize_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Synthesize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCat.py b/nipype/interfaces/afni/tests/test_auto_TCat.py index acd3ec7b27..b9a4a16054 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCat.py +++ b/nipype/interfaces/afni/tests/test_auto_TCat.py @@ -4,41 +4,29 @@ def test_TCat_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr=' %s', - copyfile=False, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_files=dict(argstr=" %s", copyfile=False, mandatory=True, position=-1,), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_files', - name_template='%s_tcat', + name_source="in_files", + name_template="%s_tcat", ), outputtype=dict(), - rlt=dict( - argstr='-rlt%s', - position=1, - ), - verbose=dict(argstr='-verb', ), + rlt=dict(argstr="-rlt%s", position=1,), + verbose=dict(argstr="-verb",), ) inputs = TCat.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TCat_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TCat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py index 516c2d15d0..7644b191a6 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py +++ b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py @@ -4,39 +4,23 @@ def test_TCatSubBrick_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr='%s%s ...', - copyfile=False, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr='-prefix %s', - extensions=None, - genfile=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_files=dict(argstr="%s%s ...", copyfile=False, mandatory=True, position=-1,), + num_threads=dict(nohash=True, usedefault=True,), + out_file=dict(argstr="-prefix %s", extensions=None, genfile=True,), outputtype=dict(), - rlt=dict( - argstr='-rlt%s', - position=1, - ), + rlt=dict(argstr="-rlt%s", position=1,), ) inputs = TCatSubBrick.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TCatSubBrick_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TCatSubBrick.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCorr1D.py b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py index a87efb29da..0a306ceca3 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorr1D.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py @@ -4,64 +4,43 @@ def test_TCorr1D_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), ktaub=dict( - argstr=' -ktaub', - position=1, - xor=['pearson', 'spearman', 'quadrant'], - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr=" -ktaub", position=1, xor=["pearson", "spearman", "quadrant"], ), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, keep_extension=True, - name_source='xset', - name_template='%s_correlation.nii.gz', + name_source="xset", + name_template="%s_correlation.nii.gz", ), outputtype=dict(), pearson=dict( - argstr=' -pearson', - position=1, - xor=['spearman', 'quadrant', 'ktaub'], + argstr=" -pearson", position=1, xor=["spearman", "quadrant", "ktaub"], ), quadrant=dict( - argstr=' -quadrant', - position=1, - xor=['pearson', 'spearman', 'ktaub'], + argstr=" -quadrant", position=1, xor=["pearson", "spearman", "ktaub"], ), spearman=dict( - argstr=' -spearman', - position=1, - xor=['pearson', 'quadrant', 'ktaub'], + argstr=" -spearman", position=1, xor=["pearson", "quadrant", "ktaub"], ), xset=dict( - argstr=' %s', - copyfile=False, - extensions=None, - mandatory=True, - position=-2, - ), - y_1d=dict( - argstr=' %s', - extensions=None, - mandatory=True, - position=-1, + argstr=" %s", copyfile=False, extensions=None, mandatory=True, position=-2, ), + y_1d=dict(argstr=" %s", extensions=None, mandatory=True, position=-1,), ) inputs = TCorr1D.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TCorr1D_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TCorr1D.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py index 364a19af41..5a03aa3732 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py @@ -5,135 +5,106 @@ def test_TCorrMap_inputs(): input_map = dict( absolute_threshold=dict( - argstr='-Thresh %f %s', - extensions=None, - name_source='in_file', - suffix='_thresh', - xor=('absolute_threshold', 'var_absolute_threshold', - 'var_absolute_threshold_normalize'), - ), - args=dict(argstr='%s', ), - automask=dict(argstr='-automask', ), + argstr="-Thresh %f %s", + extensions=None, + name_source="in_file", + suffix="_thresh", + xor=( + "absolute_threshold", + "var_absolute_threshold", + "var_absolute_threshold_normalize", + ), + ), + args=dict(argstr="%s",), + automask=dict(argstr="-automask",), average_expr=dict( - argstr='-Aexpr %s %s', + argstr="-Aexpr %s %s", extensions=None, - name_source='in_file', - suffix='_aexpr', - xor=('average_expr', 'average_expr_nonzero', 'sum_expr'), + name_source="in_file", + suffix="_aexpr", + xor=("average_expr", "average_expr_nonzero", "sum_expr"), ), average_expr_nonzero=dict( - argstr='-Cexpr %s %s', + argstr="-Cexpr %s %s", extensions=None, - name_source='in_file', - suffix='_cexpr', - xor=('average_expr', 'average_expr_nonzero', 'sum_expr'), + name_source="in_file", + suffix="_cexpr", + xor=("average_expr", "average_expr_nonzero", "sum_expr"), ), - bandpass=dict(argstr='-bpass %f %f', ), - blur_fwhm=dict(argstr='-Gblur %f', ), + bandpass=dict(argstr="-bpass %f %f",), + blur_fwhm=dict(argstr="-Gblur %f",), correlation_maps=dict( - argstr='-CorrMap %s', - extensions=None, - name_source='in_file', + argstr="-CorrMap %s", extensions=None, name_source="in_file", ), correlation_maps_masked=dict( - argstr='-CorrMask %s', - extensions=None, - name_source='in_file', - ), - environ=dict( - nohash=True, - usedefault=True, + argstr="-CorrMask %s", extensions=None, name_source="in_file", ), + environ=dict(nohash=True, usedefault=True,), expr=dict(), histogram=dict( - argstr='-Hist %d %s', + argstr="-Hist %d %s", extensions=None, - name_source='in_file', - suffix='_hist', + name_source="in_file", + suffix="_hist", ), histogram_bin_numbers=dict(), in_file=dict( - argstr='-input %s', - copyfile=False, - extensions=None, - mandatory=True, - ), - mask=dict( - argstr='-mask %s', - extensions=None, + argstr="-input %s", copyfile=False, extensions=None, mandatory=True, ), + mask=dict(argstr="-mask %s", extensions=None,), mean_file=dict( - argstr='-Mean %s', - extensions=None, - name_source='in_file', - suffix='_mean', - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="-Mean %s", extensions=None, name_source="in_file", suffix="_mean", ), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source=['in_file'], - name_template='%s_afni', + name_source=["in_file"], + name_template="%s_afni", ), outputtype=dict(), pmean=dict( - argstr='-Pmean %s', - extensions=None, - name_source='in_file', - suffix='_pmean', + argstr="-Pmean %s", extensions=None, name_source="in_file", suffix="_pmean", ), - polort=dict(argstr='-polort %d', ), + polort=dict(argstr="-polort %d",), qmean=dict( - argstr='-Qmean %s', - extensions=None, - name_source='in_file', - suffix='_qmean', - ), - regress_out_timeseries=dict( - argstr='-ort %s', - extensions=None, - ), - seeds=dict( - argstr='-seed %s', - extensions=None, - xor='seeds_width', - ), - seeds_width=dict( - argstr='-Mseed %f', - xor='seeds', + argstr="-Qmean %s", extensions=None, name_source="in_file", suffix="_qmean", ), + regress_out_timeseries=dict(argstr="-ort %s", extensions=None,), + seeds=dict(argstr="-seed %s", extensions=None, xor="seeds_width",), + seeds_width=dict(argstr="-Mseed %f", xor="seeds",), sum_expr=dict( - argstr='-Sexpr %s %s', + argstr="-Sexpr %s %s", extensions=None, - name_source='in_file', - suffix='_sexpr', - xor=('average_expr', 'average_expr_nonzero', 'sum_expr'), + name_source="in_file", + suffix="_sexpr", + xor=("average_expr", "average_expr_nonzero", "sum_expr"), ), thresholds=dict(), var_absolute_threshold=dict( - argstr='-VarThresh %f %f %f %s', + argstr="-VarThresh %f %f %f %s", extensions=None, - name_source='in_file', - suffix='_varthresh', - xor=('absolute_threshold', 'var_absolute_threshold', - 'var_absolute_threshold_normalize'), + name_source="in_file", + suffix="_varthresh", + xor=( + "absolute_threshold", + "var_absolute_threshold", + "var_absolute_threshold_normalize", + ), ), var_absolute_threshold_normalize=dict( - argstr='-VarThreshN %f %f %f %s', + argstr="-VarThreshN %f %f %f %s", extensions=None, - name_source='in_file', - suffix='_varthreshn', - xor=('absolute_threshold', 'var_absolute_threshold', - 'var_absolute_threshold_normalize'), + name_source="in_file", + suffix="_varthreshn", + xor=( + "absolute_threshold", + "var_absolute_threshold", + "var_absolute_threshold_normalize", + ), ), zmean=dict( - argstr='-Zmean %s', - extensions=None, - name_source='in_file', - suffix='_zmean', + argstr="-Zmean %s", extensions=None, name_source="in_file", suffix="_zmean", ), ) inputs = TCorrMap.input_spec() @@ -141,21 +112,23 @@ def test_TCorrMap_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TCorrMap_outputs(): output_map = dict( - absolute_threshold=dict(extensions=None, ), - average_expr=dict(extensions=None, ), - average_expr_nonzero=dict(extensions=None, ), - correlation_maps=dict(extensions=None, ), - correlation_maps_masked=dict(extensions=None, ), - histogram=dict(extensions=None, ), - mean_file=dict(extensions=None, ), - pmean=dict(extensions=None, ), - qmean=dict(extensions=None, ), - sum_expr=dict(extensions=None, ), - var_absolute_threshold=dict(extensions=None, ), - var_absolute_threshold_normalize=dict(extensions=None, ), - zmean=dict(extensions=None, ), + absolute_threshold=dict(extensions=None,), + average_expr=dict(extensions=None,), + average_expr_nonzero=dict(extensions=None,), + correlation_maps=dict(extensions=None,), + correlation_maps_masked=dict(extensions=None,), + histogram=dict(extensions=None,), + mean_file=dict(extensions=None,), + pmean=dict(extensions=None,), + qmean=dict(extensions=None,), + sum_expr=dict(extensions=None,), + var_absolute_threshold=dict(extensions=None,), + var_absolute_threshold_normalize=dict(extensions=None,), + zmean=dict(extensions=None,), ) outputs = TCorrMap.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrelate.py b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py index 3e75c7d099..0bf794886b 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrelate.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py @@ -4,37 +4,23 @@ def test_TCorrelate_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='xset', - name_template='%s_tcorr', + name_source="xset", + name_template="%s_tcorr", ), outputtype=dict(), - pearson=dict(argstr='-pearson', ), - polort=dict(argstr='-polort %d', ), + pearson=dict(argstr="-pearson",), + polort=dict(argstr="-polort %d",), xset=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-2, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2, ), yset=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), ) inputs = TCorrelate.input_spec() @@ -42,8 +28,10 @@ def test_TCorrelate_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TCorrelate_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TCorrelate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TNorm.py b/nipype/interfaces/afni/tests/test_auto_TNorm.py index 144e0fdeba..8a8493f927 100644 --- a/nipype/interfaces/afni/tests/test_auto_TNorm.py +++ b/nipype/interfaces/afni/tests/test_auto_TNorm.py @@ -4,43 +4,35 @@ def test_TNorm_inputs(): input_map = dict( - L1fit=dict(argstr='-L1fit', ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + L1fit=dict(argstr="-L1fit",), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - norm1=dict(argstr='-norm1', ), - norm2=dict(argstr='-norm2', ), - normR=dict(argstr='-normR', ), - normx=dict(argstr='-normx', ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), + norm1=dict(argstr="-norm1",), + norm2=dict(argstr="-norm2",), + normR=dict(argstr="-normR",), + normx=dict(argstr="-normx",), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_tnorm', + name_source="in_file", + name_template="%s_tnorm", ), outputtype=dict(), - polort=dict(argstr='-polort %s', ), + polort=dict(argstr="-polort %s",), ) inputs = TNorm.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TNorm_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TNorm.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TProject.py b/nipype/interfaces/afni/tests/test_auto_TProject.py index 897b18c853..e300cb16be 100644 --- a/nipype/interfaces/afni/tests/test_auto_TProject.py +++ b/nipype/interfaces/afni/tests/test_auto_TProject.py @@ -4,68 +4,49 @@ def test_TProject_inputs(): input_map = dict( - TR=dict(argstr='-TR %g', ), - args=dict(argstr='%s', ), - automask=dict( - argstr='-automask', - xor=['mask'], - ), - bandpass=dict(argstr='-bandpass %g %g', ), - blur=dict(argstr='-blur %g', ), - cenmode=dict(argstr='-cenmode %s', ), - censor=dict( - argstr='-censor %s', - extensions=None, - ), - censortr=dict(argstr='-CENSORTR %s', ), - concat=dict( - argstr='-concat %s', - extensions=None, - ), - dsort=dict(argstr='-dsort %s...', ), - environ=dict( - nohash=True, - usedefault=True, - ), + TR=dict(argstr="-TR %g",), + args=dict(argstr="%s",), + automask=dict(argstr="-automask", xor=["mask"],), + bandpass=dict(argstr="-bandpass %g %g",), + blur=dict(argstr="-blur %g",), + cenmode=dict(argstr="-cenmode %s",), + censor=dict(argstr="-censor %s", extensions=None,), + censortr=dict(argstr="-CENSORTR %s",), + concat=dict(argstr="-concat %s", extensions=None,), + dsort=dict(argstr="-dsort %s...",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='-input %s', + argstr="-input %s", copyfile=False, extensions=None, mandatory=True, position=1, ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), - noblock=dict(argstr='-noblock', ), - norm=dict(argstr='-norm', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - ort=dict( - argstr='-ort %s', - extensions=None, - ), + mask=dict(argstr="-mask %s", extensions=None,), + noblock=dict(argstr="-noblock",), + norm=dict(argstr="-norm",), + num_threads=dict(nohash=True, usedefault=True,), + ort=dict(argstr="-ort %s", extensions=None,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_tproject', + name_source="in_file", + name_template="%s_tproject", position=-1, ), outputtype=dict(), - polort=dict(argstr='-polort %d', ), - stopband=dict(argstr='-stopband %g %g', ), + polort=dict(argstr="-polort %d",), + stopband=dict(argstr="-stopband %g %g",), ) inputs = TProject.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TProject_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TProject.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TShift.py b/nipype/interfaces/afni/tests/test_auto_TShift.py index 7b6589df5f..9f1e6c3342 100644 --- a/nipype/interfaces/afni/tests/test_auto_TShift.py +++ b/nipype/interfaces/afni/tests/test_auto_TShift.py @@ -4,61 +4,40 @@ def test_TShift_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - ignore=dict(argstr='-ignore %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + ignore=dict(argstr="-ignore %s",), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - interp=dict(argstr='-%s', ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), + interp=dict(argstr="-%s",), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_tshift', + name_source="in_file", + name_template="%s_tshift", ), outputtype=dict(), - rlt=dict(argstr='-rlt', ), - rltplus=dict(argstr='-rlt+', ), - slice_encoding_direction=dict(usedefault=True, ), - slice_timing=dict( - argstr='-tpattern @%s', - xor=['tpattern'], - ), - tpattern=dict( - argstr='-tpattern %s', - xor=['slice_timing'], - ), - tr=dict(argstr='-TR %s', ), - tslice=dict( - argstr='-slice %s', - xor=['tzero'], - ), - tzero=dict( - argstr='-tzero %s', - xor=['tslice'], - ), + rlt=dict(argstr="-rlt",), + rltplus=dict(argstr="-rlt+",), + slice_encoding_direction=dict(usedefault=True,), + slice_timing=dict(argstr="-tpattern @%s", xor=["tpattern"],), + tpattern=dict(argstr="-tpattern %s", xor=["slice_timing"],), + tr=dict(argstr="-TR %s",), + tslice=dict(argstr="-slice %s", xor=["tzero"],), + tzero=dict(argstr="-tzero %s", xor=["tslice"],), ) inputs = TShift.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TShift_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - timing_file=dict(extensions=None, ), + out_file=dict(extensions=None,), timing_file=dict(extensions=None,), ) outputs = TShift.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_TSmooth.py b/nipype/interfaces/afni/tests/test_auto_TSmooth.py index 67c4821ed2..1223686b83 100644 --- a/nipype/interfaces/afni/tests/test_auto_TSmooth.py +++ b/nipype/interfaces/afni/tests/test_auto_TSmooth.py @@ -4,39 +4,26 @@ def test_TSmooth_inputs(): input_map = dict( - adaptive=dict(argstr='-adaptive %d', ), - args=dict(argstr='%s', ), - blackman=dict(argstr='-blackman %d', ), - custom=dict( - argstr='-custom %s', - extensions=None, - ), - datum=dict(argstr='-datum %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - hamming=dict(argstr='-hamming %d', ), + adaptive=dict(argstr="-adaptive %d",), + args=dict(argstr="%s",), + blackman=dict(argstr="-blackman %d",), + custom=dict(argstr="-custom %s", extensions=None,), + datum=dict(argstr="-datum %s",), + environ=dict(nohash=True, usedefault=True,), + hamming=dict(argstr="-hamming %d",), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), - lin=dict(argstr='-lin', ), - lin3=dict(argstr='-3lin %d', ), - med=dict(argstr='-med', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - osf=dict(argstr='-osf', ), + lin=dict(argstr="-lin",), + lin3=dict(argstr="-3lin %d",), + med=dict(argstr="-med",), + num_threads=dict(nohash=True, usedefault=True,), + osf=dict(argstr="-osf",), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_smooth', + name_source="in_file", + name_template="%s_smooth", ), outputtype=dict(), ) @@ -45,8 +32,10 @@ def test_TSmooth_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TSmooth_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TStat.py b/nipype/interfaces/afni/tests/test_auto_TStat.py index 1b0949ce40..0a7a99da76 100644 --- a/nipype/interfaces/afni/tests/test_auto_TStat.py +++ b/nipype/interfaces/afni/tests/test_auto_TStat.py @@ -4,32 +4,19 @@ def test_TStat_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - mask=dict( - argstr='-mask %s', - extensions=None, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - options=dict(argstr='%s', ), + mask=dict(argstr="-mask %s", extensions=None,), + num_threads=dict(nohash=True, usedefault=True,), + options=dict(argstr="%s",), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_tstat', + name_source="in_file", + name_template="%s_tstat", ), outputtype=dict(), ) @@ -38,8 +25,10 @@ def test_TStat_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TStat_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TStat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_To3D.py b/nipype/interfaces/afni/tests/test_auto_To3D.py index 86d64804a7..ffed9ba623 100644 --- a/nipype/interfaces/afni/tests/test_auto_To3D.py +++ b/nipype/interfaces/afni/tests/test_auto_To3D.py @@ -4,40 +4,32 @@ def test_To3D_inputs(): input_map = dict( - args=dict(argstr='%s', ), - assumemosaic=dict(argstr='-assume_dicom_mosaic', ), - datatype=dict(argstr='-datum %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - filetype=dict(argstr='-%s', ), - funcparams=dict(argstr='-time:zt %s alt+z2', ), - in_folder=dict( - argstr='%s/*.dcm', - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + assumemosaic=dict(argstr="-assume_dicom_mosaic",), + datatype=dict(argstr="-datum %s",), + environ=dict(nohash=True, usedefault=True,), + filetype=dict(argstr="-%s",), + funcparams=dict(argstr="-time:zt %s alt+z2",), + in_folder=dict(argstr="%s/*.dcm", mandatory=True, position=-1,), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source=['in_folder'], - name_template='%s', + name_source=["in_folder"], + name_template="%s", ), outputtype=dict(), - skipoutliers=dict(argstr='-skip_outliers', ), + skipoutliers=dict(argstr="-skip_outliers",), ) inputs = To3D.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_To3D_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = To3D.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Undump.py b/nipype/interfaces/afni/tests/test_auto_Undump.py index d5c2dabdfc..3679a118fe 100644 --- a/nipype/interfaces/afni/tests/test_auto_Undump.py +++ b/nipype/interfaces/afni/tests/test_auto_Undump.py @@ -4,47 +4,36 @@ def test_Undump_inputs(): input_map = dict( - args=dict(argstr='%s', ), - coordinates_specification=dict(argstr='-%s', ), - datatype=dict(argstr='-datum %s', ), - default_value=dict(argstr='-dval %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fill_value=dict(argstr='-fval %f', ), - head_only=dict(argstr='-head_only', ), + args=dict(argstr="%s",), + coordinates_specification=dict(argstr="-%s",), + datatype=dict(argstr="-datum %s",), + default_value=dict(argstr="-dval %f",), + environ=dict(nohash=True, usedefault=True,), + fill_value=dict(argstr="-fval %f",), + head_only=dict(argstr="-head_only",), in_file=dict( - argstr='-master %s', + argstr="-master %s", copyfile=False, extensions=None, mandatory=True, position=-1, ), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - orient=dict(argstr='-orient %s', ), - out_file=dict( - argstr='-prefix %s', - extensions=None, - name_source='in_file', - ), + mask_file=dict(argstr="-mask %s", extensions=None,), + num_threads=dict(nohash=True, usedefault=True,), + orient=dict(argstr="-orient %s",), + out_file=dict(argstr="-prefix %s", extensions=None, name_source="in_file",), outputtype=dict(), - srad=dict(argstr='-srad %f', ), + srad=dict(argstr="-srad %f",), ) inputs = Undump.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Undump_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Undump.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Unifize.py b/nipype/interfaces/afni/tests/test_auto_Unifize.py index 73156ba847..71331215b7 100644 --- a/nipype/interfaces/afni/tests/test_auto_Unifize.py +++ b/nipype/interfaces/afni/tests/test_auto_Unifize.py @@ -4,56 +4,44 @@ def test_Unifize_inputs(): input_map = dict( - args=dict(argstr='%s', ), - cl_frac=dict(argstr='-clfrac %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - epi=dict( - argstr='-EPI', - requires=['no_duplo', 't2'], - xor=['gm'], - ), - gm=dict(argstr='-GM', ), + args=dict(argstr="%s",), + cl_frac=dict(argstr="-clfrac %f",), + environ=dict(nohash=True, usedefault=True,), + epi=dict(argstr="-EPI", requires=["no_duplo", "t2"], xor=["gm"],), + gm=dict(argstr="-GM",), in_file=dict( - argstr='-input %s', + argstr="-input %s", copyfile=False, extensions=None, mandatory=True, position=-1, ), - no_duplo=dict(argstr='-noduplo', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + no_duplo=dict(argstr="-noduplo",), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_unifized', + name_source="in_file", + name_template="%s_unifized", ), outputtype=dict(), - quiet=dict(argstr='-quiet', ), - rbt=dict(argstr='-rbt %f %f %f', ), - scale_file=dict( - argstr='-ssave %s', - extensions=None, - ), - t2=dict(argstr='-T2', ), - t2_up=dict(argstr='-T2up %f', ), - urad=dict(argstr='-Urad %s', ), + quiet=dict(argstr="-quiet",), + rbt=dict(argstr="-rbt %f %f %f",), + scale_file=dict(argstr="-ssave %s", extensions=None,), + t2=dict(argstr="-T2",), + t2_up=dict(argstr="-T2up %f",), + urad=dict(argstr="-Urad %s",), ) inputs = Unifize.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Unifize_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - scale_file=dict(extensions=None, ), + out_file=dict(extensions=None,), scale_file=dict(extensions=None,), ) outputs = Unifize.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Volreg.py b/nipype/interfaces/afni/tests/test_auto_Volreg.py index 6f6f816a5d..9d7c6aa69b 100644 --- a/nipype/interfaces/afni/tests/test_auto_Volreg.py +++ b/nipype/interfaces/afni/tests/test_auto_Volreg.py @@ -4,77 +4,62 @@ def test_Volreg_inputs(): input_map = dict( - args=dict(argstr='%s', ), - basefile=dict( - argstr='-base %s', - extensions=None, - position=-6, - ), - copyorigin=dict(argstr='-twodup', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + basefile=dict(argstr="-base %s", extensions=None, position=-6,), + copyorigin=dict(argstr="-twodup",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), - in_weight_volume=dict(argstr="-weight '%s[%d]'", ), - interp=dict(argstr='-%s', ), + in_weight_volume=dict(argstr="-weight '%s[%d]'",), + interp=dict(argstr="-%s",), md1d_file=dict( - argstr='-maxdisp1D %s', + argstr="-maxdisp1D %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_md.1D', + name_source="in_file", + name_template="%s_md.1D", position=-4, ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + num_threads=dict(nohash=True, usedefault=True,), oned_file=dict( - argstr='-1Dfile %s', + argstr="-1Dfile %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s.1D', + name_source="in_file", + name_template="%s.1D", ), oned_matrix_save=dict( - argstr='-1Dmatrix_save %s', + argstr="-1Dmatrix_save %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s.aff12.1D', + name_source="in_file", + name_template="%s.aff12.1D", ), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_volreg', + name_source="in_file", + name_template="%s_volreg", ), outputtype=dict(), - timeshift=dict(argstr='-tshift 0', ), - verbose=dict(argstr='-verbose', ), - zpad=dict( - argstr='-zpad %d', - position=-5, - ), + timeshift=dict(argstr="-tshift 0",), + verbose=dict(argstr="-verbose",), + zpad=dict(argstr="-zpad %d", position=-5,), ) inputs = Volreg.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Volreg_outputs(): output_map = dict( - md1d_file=dict(extensions=None, ), - oned_file=dict(extensions=None, ), - oned_matrix_save=dict(extensions=None, ), - out_file=dict(extensions=None, ), + md1d_file=dict(extensions=None,), + oned_file=dict(extensions=None,), + oned_matrix_save=dict(extensions=None,), + out_file=dict(extensions=None,), ) outputs = Volreg.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Warp.py b/nipype/interfaces/afni/tests/test_auto_Warp.py index 1adc2c1a1d..5b5e9ded71 100644 --- a/nipype/interfaces/afni/tests/test_auto_Warp.py +++ b/nipype/interfaces/afni/tests/test_auto_Warp.py @@ -4,60 +4,42 @@ def test_Warp_inputs(): input_map = dict( - args=dict(argstr='%s', ), - deoblique=dict(argstr='-deoblique', ), - environ=dict( - nohash=True, - usedefault=True, - ), - gridset=dict( - argstr='-gridset %s', - extensions=None, - ), + args=dict(argstr="%s",), + deoblique=dict(argstr="-deoblique",), + environ=dict(nohash=True, usedefault=True,), + gridset=dict(argstr="-gridset %s", extensions=None,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - interp=dict(argstr='-%s', ), - matparent=dict( - argstr='-matparent %s', - extensions=None, - ), - mni2tta=dict(argstr='-mni2tta', ), - newgrid=dict(argstr='-newgrid %f', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - oblique_parent=dict( - argstr='-oblique_parent %s', - extensions=None, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), + interp=dict(argstr="-%s",), + matparent=dict(argstr="-matparent %s", extensions=None,), + mni2tta=dict(argstr="-mni2tta",), + newgrid=dict(argstr="-newgrid %f",), + num_threads=dict(nohash=True, usedefault=True,), + oblique_parent=dict(argstr="-oblique_parent %s", extensions=None,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_warp', + name_source="in_file", + name_template="%s_warp", ), outputtype=dict(), - save_warp=dict(requires=['verbose'], ), - tta2mni=dict(argstr='-tta2mni', ), - verbose=dict(argstr='-verb', ), - zpad=dict(argstr='-zpad %d', ), + save_warp=dict(requires=["verbose"],), + tta2mni=dict(argstr="-tta2mni",), + verbose=dict(argstr="-verb",), + zpad=dict(argstr="-zpad %d",), ) inputs = Warp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Warp_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - warp_file=dict(extensions=None, ), + out_file=dict(extensions=None,), warp_file=dict(extensions=None,), ) outputs = Warp.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_ZCutUp.py b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py index 5451e057c9..d234da5a50 100644 --- a/nipype/interfaces/afni/tests/test_auto_ZCutUp.py +++ b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py @@ -4,28 +4,18 @@ def test_ZCutUp_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - keep=dict(argstr='-keep %s', ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), + keep=dict(argstr="-keep %s",), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_file', - name_template='%s_zcutup', + name_source="in_file", + name_template="%s_zcutup", ), outputtype=dict(), ) @@ -34,8 +24,10 @@ def test_ZCutUp_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ZCutUp_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ZCutUp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Zcat.py b/nipype/interfaces/afni/tests/test_auto_Zcat.py index b226cf4a3a..81251acfe8 100644 --- a/nipype/interfaces/afni/tests/test_auto_Zcat.py +++ b/nipype/interfaces/afni/tests/test_auto_Zcat.py @@ -4,46 +4,31 @@ def test_Zcat_inputs(): input_map = dict( - args=dict(argstr='%s', ), - datum=dict(argstr='-datum %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fscale=dict( - argstr='-fscale', - xor=['nscale'], - ), - in_files=dict( - argstr='%s', - copyfile=False, - mandatory=True, - position=-1, - ), - nscale=dict( - argstr='-nscale', - xor=['fscale'], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + datum=dict(argstr="-datum %s",), + environ=dict(nohash=True, usedefault=True,), + fscale=dict(argstr="-fscale", xor=["nscale"],), + in_files=dict(argstr="%s", copyfile=False, mandatory=True, position=-1,), + nscale=dict(argstr="-nscale", xor=["fscale"],), + num_threads=dict(nohash=True, usedefault=True,), out_file=dict( - argstr='-prefix %s', + argstr="-prefix %s", extensions=None, - name_source='in_files', - name_template='%s_zcat', + name_source="in_files", + name_template="%s_zcat", ), outputtype=dict(), - verb=dict(argstr='-verb', ), + verb=dict(argstr="-verb",), ) inputs = Zcat.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Zcat_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Zcat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Zeropad.py b/nipype/interfaces/afni/tests/test_auto_Zeropad.py index 3f9352c567..6f59445034 100644 --- a/nipype/interfaces/afni/tests/test_auto_Zeropad.py +++ b/nipype/interfaces/afni/tests/test_auto_Zeropad.py @@ -4,85 +4,40 @@ def test_Zeropad_inputs(): input_map = dict( - A=dict( - argstr='-A %i', - xor=['master'], - ), - AP=dict( - argstr='-AP %i', - xor=['master'], - ), - I=dict( - argstr='-I %i', - xor=['master'], - ), - IS=dict( - argstr='-IS %i', - xor=['master'], - ), - L=dict( - argstr='-L %i', - xor=['master'], - ), - P=dict( - argstr='-P %i', - xor=['master'], - ), - R=dict( - argstr='-R %i', - xor=['master'], - ), - RL=dict( - argstr='-RL %i', - xor=['master'], - ), - S=dict( - argstr='-S %i', - xor=['master'], - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + A=dict(argstr="-A %i", xor=["master"],), + AP=dict(argstr="-AP %i", xor=["master"],), + I=dict(argstr="-I %i", xor=["master"],), + IS=dict(argstr="-IS %i", xor=["master"],), + L=dict(argstr="-L %i", xor=["master"],), + P=dict(argstr="-P %i", xor=["master"],), + R=dict(argstr="-R %i", xor=["master"],), + RL=dict(argstr="-RL %i", xor=["master"],), + S=dict(argstr="-S %i", xor=["master"],), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_files=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, ), master=dict( - argstr='-master %s', + argstr="-master %s", extensions=None, - xor=['I', 'S', 'A', 'P', 'L', 'R', 'z', 'RL', 'AP', 'IS', 'mm'], - ), - mm=dict( - argstr='-mm', - xor=['master'], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr='-prefix %s', - extensions=None, - name_template='zeropad', + xor=["I", "S", "A", "P", "L", "R", "z", "RL", "AP", "IS", "mm"], ), + mm=dict(argstr="-mm", xor=["master"],), + num_threads=dict(nohash=True, usedefault=True,), + out_file=dict(argstr="-prefix %s", extensions=None, name_template="zeropad",), outputtype=dict(), - z=dict( - argstr='-z %i', - xor=['master'], - ), + z=dict(argstr="-z %i", xor=["master"],), ) inputs = Zeropad.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Zeropad_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Zeropad.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_extra_Deconvolve.py b/nipype/interfaces/afni/tests/test_extra_Deconvolve.py index 93adc3b748..1efa14c66b 100644 --- a/nipype/interfaces/afni/tests/test_extra_Deconvolve.py +++ b/nipype/interfaces/afni/tests/test_extra_Deconvolve.py @@ -2,10 +2,11 @@ from ..model import Deconvolve + def test_x1dstop(): deconv = Deconvolve() - deconv.inputs.out_file = 'file.nii' - assert 'out_file' in deconv._list_outputs() + deconv.inputs.out_file = "file.nii" + assert "out_file" in deconv._list_outputs() deconv.inputs.x1D_stop = True - assert 'out_file' not in deconv._list_outputs() - assert 'cbucket' not in deconv._list_outputs() + assert "out_file" not in deconv._list_outputs() + assert "cbucket" not in deconv._list_outputs() diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py index eb897ac110..61287b934e 100644 --- a/nipype/interfaces/afni/utils.py +++ b/nipype/interfaces/afni/utils.py @@ -14,40 +14,58 @@ import re import numpy as np -from ...utils.filemanip import (load_json, save_json, split_filename) -from ..base import (CommandLineInputSpec, CommandLine, Directory, TraitedSpec, - traits, isdefined, File, InputMultiObject, InputMultiPath, - Undefined, Str) +from ...utils.filemanip import load_json, save_json, split_filename +from ..base import ( + CommandLineInputSpec, + CommandLine, + Directory, + TraitedSpec, + traits, + isdefined, + File, + InputMultiObject, + InputMultiPath, + Undefined, + Str, +) from ...external.due import BibTeX -from .base import (AFNICommandBase, AFNICommand, AFNICommandInputSpec, - AFNICommandOutputSpec, AFNIPythonCommandInputSpec, - AFNIPythonCommand) +from .base import ( + AFNICommandBase, + AFNICommand, + AFNICommandInputSpec, + AFNICommandOutputSpec, + AFNIPythonCommandInputSpec, + AFNIPythonCommand, +) class ABoverlapInputSpec(AFNICommandInputSpec): in_file_a = File( - desc='input file A', - argstr='%s', + desc="input file A", + argstr="%s", position=-3, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) in_file_b = File( - desc='input file B', - argstr='%s', + desc="input file B", + argstr="%s", position=-2, mandatory=True, exists=True, - copyfile=False) - out_file = File( - desc='collect output to a file', argstr=' |& tee %s', position=-1) + copyfile=False, + ) + out_file = File(desc="collect output to a file", argstr=" |& tee %s", position=-1) no_automask = traits.Bool( - desc='consider input datasets as masks', argstr='-no_automask') + desc="consider input datasets as masks", argstr="-no_automask" + ) quiet = traits.Bool( - desc='be as quiet as possible (without being entirely mute)', - argstr='-quiet') + desc="be as quiet as possible (without being entirely mute)", argstr="-quiet" + ) verb = traits.Bool( - desc='print out some progress reports (to stderr)', argstr='-verb') + desc="print out some progress reports (to stderr)", argstr="-verb" + ) class ABoverlap(AFNICommand): @@ -71,50 +89,56 @@ class ABoverlap(AFNICommand): """ - _cmd = '3dABoverlap' + _cmd = "3dABoverlap" input_spec = ABoverlapInputSpec output_spec = AFNICommandOutputSpec class AFNItoNIFTIInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dAFNItoNIFTI', - argstr='%s', + desc="input file to 3dAFNItoNIFTI", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s.nii', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file', - hash_files=False) + name_template="%s.nii", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + hash_files=False, + ) float_ = traits.Bool( - desc='Force the output dataset to be 32-bit floats. This option ' - 'should be used when the input AFNI dataset has different float ' - 'scale factors for different sub-bricks, an option that ' - 'NIfTI-1.1 does not support.', - argstr='-float') + desc="Force the output dataset to be 32-bit floats. This option " + "should be used when the input AFNI dataset has different float " + "scale factors for different sub-bricks, an option that " + "NIfTI-1.1 does not support.", + argstr="-float", + ) pure = traits.Bool( - desc='Do NOT write an AFNI extension field into the output file. Only ' - 'use this option if needed. You can also use the \'nifti_tool\' ' - 'program to strip extensions from a file.', - argstr='-pure') + desc="Do NOT write an AFNI extension field into the output file. Only " + "use this option if needed. You can also use the 'nifti_tool' " + "program to strip extensions from a file.", + argstr="-pure", + ) denote = traits.Bool( - desc='When writing the AFNI extension field, remove text notes that ' - 'might contain subject identifying information.', - argstr='-denote') + desc="When writing the AFNI extension field, remove text notes that " + "might contain subject identifying information.", + argstr="-denote", + ) oldid = traits.Bool( - desc='Give the new dataset the input dataset' - 's AFNI ID code.', - argstr='-oldid', - xor=['newid']) + desc="Give the new dataset the input dataset" "s AFNI ID code.", + argstr="-oldid", + xor=["newid"], + ) newid = traits.Bool( - desc='Give the new dataset a new AFNI ID code, to distinguish it from ' - 'the input dataset.', - argstr='-newid', - xor=['oldid']) + desc="Give the new dataset a new AFNI ID code, to distinguish it from " + "the input dataset.", + argstr="-newid", + xor=["oldid"], + ) class AFNItoNIFTI(AFNICommand): @@ -137,14 +161,14 @@ class AFNItoNIFTI(AFNICommand): """ - _cmd = '3dAFNItoNIFTI' + _cmd = "3dAFNItoNIFTI" input_spec = AFNItoNIFTIInputSpec output_spec = AFNICommandOutputSpec def _overload_extension(self, value, name=None): path, base, ext = split_filename(value) - if ext.lower() not in ['.nii', '.nii.gz', '.1d', '.1D']: - ext += '.nii' + if ext.lower() not in [".nii", ".nii.gz", ".1d", ".1D"]: + ext += ".nii" return os.path.join(path, base + ext) def _gen_filename(self, name): @@ -155,20 +179,23 @@ class AutoboxInputSpec(AFNICommandInputSpec): in_file = File( exists=True, mandatory=True, - argstr='-input %s', - desc='input file', - copyfile=False) + argstr="-input %s", + desc="input file", + copyfile=False, + ) padding = traits.Int( - argstr='-npad %d', - desc='Number of extra voxels to pad on each side of box') + argstr="-npad %d", desc="Number of extra voxels to pad on each side of box" + ) out_file = File( - argstr='-prefix %s', name_source='in_file', name_template='%s_autobox') + argstr="-prefix %s", name_source="in_file", name_template="%s_autobox" + ) no_clustering = traits.Bool( - argstr='-noclust', - desc='Don\'t do any clustering to find box. Any non-zero voxel will ' - 'be preserved in the cropped volume. The default method uses ' - 'some clustering to find the cropping box, and will clip off ' - 'small isolated blobs.') + argstr="-noclust", + desc="Don't do any clustering to find box. Any non-zero voxel will " + "be preserved in the cropped volume. The default method uses " + "some clustering to find the cropping box, and will clip off " + "small isolated blobs.", + ) class AutoboxOutputSpec(TraitedSpec): # out_file not mandatory @@ -179,7 +206,7 @@ class AutoboxOutputSpec(TraitedSpec): # out_file not mandatory z_min = traits.Int() z_max = traits.Int() - out_file = File(desc='output file') + out_file = File(desc="output file") class Autobox(AFNICommand): @@ -202,17 +229,18 @@ class Autobox(AFNICommand): """ - _cmd = '3dAutobox' + _cmd = "3dAutobox" input_spec = AutoboxInputSpec output_spec = AutoboxOutputSpec def aggregate_outputs(self, runtime=None, needed_outputs=None): - outputs = super(Autobox, self).aggregate_outputs( - runtime, needed_outputs) - pattern = r'x=(?P-?\d+)\.\.(?P-?\d+) '\ - r'y=(?P-?\d+)\.\.(?P-?\d+) '\ - r'z=(?P-?\d+)\.\.(?P-?\d+)' - for line in runtime.stderr.split('\n'): + outputs = super(Autobox, self).aggregate_outputs(runtime, needed_outputs) + pattern = ( + r"x=(?P-?\d+)\.\.(?P-?\d+) " + r"y=(?P-?\d+)\.\.(?P-?\d+) " + r"z=(?P-?\d+)\.\.(?P-?\d+)" + ) + for line in runtime.stderr.split("\n"): m = re.search(pattern, line) if m: d = m.groupdict() @@ -222,40 +250,41 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): class BrickStatInputSpec(CommandLineInputSpec): in_file = File( - desc='input file to 3dmaskave', - argstr='%s', + desc="input file to 3dmaskave", + argstr="%s", position=-1, mandatory=True, - exists=True) + exists=True, + ) mask = File( - desc='-mask dset = use dset as mask to include/exclude voxels', - argstr='-mask %s', + desc="-mask dset = use dset as mask to include/exclude voxels", + argstr="-mask %s", position=2, - exists=True) + exists=True, + ) min = traits.Bool( - desc='print the minimum value in dataset', argstr='-min', position=1) + desc="print the minimum value in dataset", argstr="-min", position=1 + ) slow = traits.Bool( - desc='read the whole dataset to find the min and max values', - argstr='-slow') - max = traits.Bool( - desc='print the maximum value in the dataset', argstr='-max') - mean = traits.Bool( - desc='print the mean value in the dataset', argstr='-mean') - sum = traits.Bool( - desc='print the sum of values in the dataset', argstr='-sum') - var = traits.Bool(desc='print the variance in the dataset', argstr='-var') + desc="read the whole dataset to find the min and max values", argstr="-slow" + ) + max = traits.Bool(desc="print the maximum value in the dataset", argstr="-max") + mean = traits.Bool(desc="print the mean value in the dataset", argstr="-mean") + sum = traits.Bool(desc="print the sum of values in the dataset", argstr="-sum") + var = traits.Bool(desc="print the variance in the dataset", argstr="-var") percentile = traits.Tuple( traits.Float, traits.Float, traits.Float, - desc='p0 ps p1 write the percentile values starting ' - 'at p0% and ending at p1% at a step of ps%. ' - 'only one sub-brick is accepted.', - argstr='-percentile %.3f %.3f %.3f') + desc="p0 ps p1 write the percentile values starting " + "at p0% and ending at p1% at a step of ps%. " + "only one sub-brick is accepted.", + argstr="-percentile %.3f %.3f %.3f", + ) class BrickStatOutputSpec(TraitedSpec): - min_val = traits.Float(desc='output') + min_val = traits.Float(desc="output") class BrickStat(AFNICommandBase): @@ -278,7 +307,8 @@ class BrickStat(AFNICommandBase): >>> res = brickstat.run() # doctest: +SKIP """ - _cmd = '3dBrickStat' + + _cmd = "3dBrickStat" input_spec = BrickStatInputSpec output_spec = BrickStatOutputSpec @@ -286,16 +316,16 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() - outfile = os.path.join(os.getcwd(), 'stat_result.json') + outfile = os.path.join(os.getcwd(), "stat_result.json") if runtime is None: try: - min_val = load_json(outfile)['stat'] + min_val = load_json(outfile)["stat"] except IOError: return self.run().outputs else: min_val = [] - for line in runtime.stdout.split('\n'): + for line in runtime.stdout.split("\n"): if line: values = line.split() if len(values) > 1: @@ -315,51 +345,53 @@ class BucketInputSpec(AFNICommandInputSpec): in_file = traits.List( traits.Tuple( (File(exists=True, copyfile=False), traits.Str(argstr="'%s'")), - artstr="%s%s"), + artstr="%s%s", + ), position=-1, mandatory=True, argstr="%s", - desc='List of tuples of input datasets and subbrick selection strings' - 'as described in more detail in the following afni help string' - 'Input dataset specified using one of these forms:' - ' \'prefix+view\', \'prefix+view.HEAD\', or \'prefix+view.BRIK\'.' - 'You can also add a sub-brick selection list after the end of the' - 'dataset name. This allows only a subset of the sub-bricks to be' - 'included into the output (by default, all of the input dataset' - 'is copied into the output). A sub-brick selection list looks like' - 'one of the following forms:' - ' fred+orig[5] ==> use only sub-brick #5' - ' fred+orig[5,9,17] ==> use #5, #9, and #17' - ' fred+orig[5..8] or [5-8] ==> use #5, #6, #7, and #8' - ' fred+orig[5..13(2)] or [5-13(2)] ==> use #5, #7, #9, #11, and #13' - 'Sub-brick indexes start at 0. You can use the character \'$\'' - 'to indicate the last sub-brick in a dataset; for example, you' - 'can select every third sub-brick by using the selection list' - ' fred+orig[0..$(3)]' - 'N.B.: The sub-bricks are output in the order specified, which may' - ' not be the order in the original datasets. For example, using' - ' fred+orig[0..$(2),1..$(2)]' - ' will cause the sub-bricks in fred+orig to be output into the' - ' new dataset in an interleaved fashion. Using' - ' fred+orig[$..0]' - ' will reverse the order of the sub-bricks in the output.' - 'N.B.: Bucket datasets have multiple sub-bricks, but do NOT have' - ' a time dimension. You can input sub-bricks from a 3D+time dataset' - ' into a bucket dataset. You can use the \'3dinfo\' program to see' - ' how many sub-bricks a 3D+time or a bucket dataset contains.' - 'N.B.: In non-bucket functional datasets (like the \'fico\' datasets' - ' output by FIM, or the \'fitt\' datasets output by 3dttest), sub-brick' - ' [0] is the \'intensity\' and sub-brick [1] is the statistical parameter' - ' used as a threshold. Thus, to create a bucket dataset using the' - ' intensity from dataset A and the threshold from dataset B, and' - ' calling the output dataset C, you would type' - ' 3dbucket -prefix C -fbuc \'A+orig[0]\' -fbuc \'B+orig[1]\'' - 'WARNING: using this program, it is possible to create a dataset that' - ' has different basic datum types for different sub-bricks' - ' (e.g., shorts for brick 0, floats for brick 1).' - ' Do NOT do this! Very few AFNI programs will work correctly' - ' with such datasets!') - out_file = File(argstr='-prefix %s', name_template='buck') + desc="List of tuples of input datasets and subbrick selection strings" + "as described in more detail in the following afni help string" + "Input dataset specified using one of these forms:" + " 'prefix+view', 'prefix+view.HEAD', or 'prefix+view.BRIK'." + "You can also add a sub-brick selection list after the end of the" + "dataset name. This allows only a subset of the sub-bricks to be" + "included into the output (by default, all of the input dataset" + "is copied into the output). A sub-brick selection list looks like" + "one of the following forms:" + " fred+orig[5] ==> use only sub-brick #5" + " fred+orig[5,9,17] ==> use #5, #9, and #17" + " fred+orig[5..8] or [5-8] ==> use #5, #6, #7, and #8" + " fred+orig[5..13(2)] or [5-13(2)] ==> use #5, #7, #9, #11, and #13" + "Sub-brick indexes start at 0. You can use the character '$'" + "to indicate the last sub-brick in a dataset; for example, you" + "can select every third sub-brick by using the selection list" + " fred+orig[0..$(3)]" + "N.B.: The sub-bricks are output in the order specified, which may" + " not be the order in the original datasets. For example, using" + " fred+orig[0..$(2),1..$(2)]" + " will cause the sub-bricks in fred+orig to be output into the" + " new dataset in an interleaved fashion. Using" + " fred+orig[$..0]" + " will reverse the order of the sub-bricks in the output." + "N.B.: Bucket datasets have multiple sub-bricks, but do NOT have" + " a time dimension. You can input sub-bricks from a 3D+time dataset" + " into a bucket dataset. You can use the '3dinfo' program to see" + " how many sub-bricks a 3D+time or a bucket dataset contains." + "N.B.: In non-bucket functional datasets (like the 'fico' datasets" + " output by FIM, or the 'fitt' datasets output by 3dttest), sub-brick" + " [0] is the 'intensity' and sub-brick [1] is the statistical parameter" + " used as a threshold. Thus, to create a bucket dataset using the" + " intensity from dataset A and the threshold from dataset B, and" + " calling the output dataset C, you would type" + " 3dbucket -prefix C -fbuc 'A+orig[0]' -fbuc 'B+orig[1]'" + "WARNING: using this program, it is possible to create a dataset that" + " has different basic datum types for different sub-bricks" + " (e.g., shorts for brick 0, floats for brick 1)." + " Do NOT do this! Very few AFNI programs will work correctly" + " with such datasets!", + ) + out_file = File(argstr="-prefix %s", name_template="buck") class Bucket(AFNICommand): @@ -382,41 +414,42 @@ class Bucket(AFNICommand): """ - _cmd = '3dbucket' + _cmd = "3dbucket" input_spec = BucketInputSpec output_spec = AFNICommandOutputSpec def _format_arg(self, name, spec, value): - if name == 'in_file': - return spec.argstr % ( - ' '.join([i[0] + "'" + i[1] + "'" for i in value])) + if name == "in_file": + return spec.argstr % (" ".join([i[0] + "'" + i[1] + "'" for i in value])) return super(Bucket, self)._format_arg(name, spec, value) class CalcInputSpec(AFNICommandInputSpec): in_file_a = File( - desc='input file to 3dcalc', - argstr='-a %s', + desc="input file to 3dcalc", + argstr="-a %s", position=0, mandatory=True, - exists=True) + exists=True, + ) in_file_b = File( - desc='operand file to 3dcalc', argstr='-b %s', position=1, exists=True) + desc="operand file to 3dcalc", argstr="-b %s", position=1, exists=True + ) in_file_c = File( - desc='operand file to 3dcalc', argstr='-c %s', position=2, exists=True) + desc="operand file to 3dcalc", argstr="-c %s", position=2, exists=True + ) out_file = File( - name_template='%s_calc', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file_a') - expr = Str(desc='expr', argstr='-expr "%s"', position=3, mandatory=True) - start_idx = traits.Int( - desc='start index for in_file_a', requires=['stop_idx']) - stop_idx = traits.Int( - desc='stop index for in_file_a', requires=['start_idx']) - single_idx = traits.Int(desc='volume index for in_file_a') - overwrite = traits.Bool(desc='overwrite output', argstr='-overwrite') - other = File(desc='other options', argstr='') + name_template="%s_calc", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file_a", + ) + expr = Str(desc="expr", argstr='-expr "%s"', position=3, mandatory=True) + start_idx = traits.Int(desc="start index for in_file_a", requires=["stop_idx"]) + stop_idx = traits.Int(desc="stop index for in_file_a", requires=["start_idx"]) + single_idx = traits.Int(desc="volume index for in_file_a") + overwrite = traits.Bool(desc="overwrite output", argstr="-overwrite") + other = File(desc="other options", argstr="") class Calc(AFNICommand): @@ -451,82 +484,89 @@ class Calc(AFNICommand): """ - _cmd = '3dcalc' + _cmd = "3dcalc" input_spec = CalcInputSpec output_spec = AFNICommandOutputSpec def _format_arg(self, name, trait_spec, value): - if name == 'in_file_a': + if name == "in_file_a": arg = trait_spec.argstr % value if isdefined(self.inputs.start_idx): - arg += '[%d..%d]' % (self.inputs.start_idx, - self.inputs.stop_idx) + arg += "[%d..%d]" % (self.inputs.start_idx, self.inputs.stop_idx) if isdefined(self.inputs.single_idx): - arg += '[%d]' % (self.inputs.single_idx) + arg += "[%d]" % (self.inputs.single_idx) return arg return super(Calc, self)._format_arg(name, trait_spec, value) def _parse_inputs(self, skip=None): """Skip the arguments without argstr metadata """ - return super( - Calc, self)._parse_inputs(skip=('start_idx', 'stop_idx', 'other')) + return super(Calc, self)._parse_inputs(skip=("start_idx", "stop_idx", "other")) class CatInputSpec(AFNICommandInputSpec): - in_files = traits.List( - File(exists=True), argstr="%s", mandatory=True, position=-2) + in_files = traits.List(File(exists=True), argstr="%s", mandatory=True, position=-2) out_file = File( - argstr='> %s', - value='catout.1d', + argstr="> %s", + value="catout.1d", usedefault=True, - desc='output (concatenated) file name', + desc="output (concatenated) file name", position=-1, - mandatory=True) + mandatory=True, + ) omitconst = traits.Bool( - desc='Omit columns that are identically constant from output.', - argstr='-nonconst') + desc="Omit columns that are identically constant from output.", + argstr="-nonconst", + ) keepfree = traits.Bool( - desc='Keep only columns that are marked as \'free\' in the ' - '3dAllineate header from \'-1Dparam_save\'. ' - 'If there is no such header, all columns are kept.', - argstr='-nonfixed') + desc="Keep only columns that are marked as 'free' in the " + "3dAllineate header from '-1Dparam_save'. " + "If there is no such header, all columns are kept.", + argstr="-nonfixed", + ) out_format = traits.Enum( - 'int', - 'nice', - 'double', - 'fint', - 'cint', - argstr='-form %s', - desc='specify data type for output. Valid types are \'int\', ' - '\'nice\', \'double\', \'fint\', and \'cint\'.', - xor=['out_int', 'out_nice', 'out_double', 'out_fint', 'out_cint']) + "int", + "nice", + "double", + "fint", + "cint", + argstr="-form %s", + desc="specify data type for output. Valid types are 'int', " + "'nice', 'double', 'fint', and 'cint'.", + xor=["out_int", "out_nice", "out_double", "out_fint", "out_cint"], + ) stack = traits.Bool( - desc='Stack the columns of the resultant matrix in the output.', - argstr='-stack') + desc="Stack the columns of the resultant matrix in the output.", argstr="-stack" + ) sel = traits.Str( - desc='Apply the same column/row selection string to all filenames ' - 'on the command line.', - argstr='-sel %s') + desc="Apply the same column/row selection string to all filenames " + "on the command line.", + argstr="-sel %s", + ) out_int = traits.Bool( - desc='specifiy int data type for output', - argstr='-i', - xor=['out_format', 'out_nice', 'out_double', 'out_fint', 'out_cint']) + desc="specifiy int data type for output", + argstr="-i", + xor=["out_format", "out_nice", "out_double", "out_fint", "out_cint"], + ) out_nice = traits.Bool( - desc='specifiy nice data type for output', - argstr='-n', - xor=['out_format', 'out_int', 'out_double', 'out_fint', 'out_cint']) + desc="specifiy nice data type for output", + argstr="-n", + xor=["out_format", "out_int", "out_double", "out_fint", "out_cint"], + ) out_double = traits.Bool( - desc='specifiy double data type for output', - argstr='-d', - xor=['out_format', 'out_nice', 'out_int', 'out_fint', 'out_cint']) + desc="specifiy double data type for output", + argstr="-d", + xor=["out_format", "out_nice", "out_int", "out_fint", "out_cint"], + ) out_fint = traits.Bool( - desc='specifiy int, rounded down, data type for output', - argstr='-f', - xor=['out_format', 'out_nice', 'out_double', 'out_int', 'out_cint']) + desc="specifiy int, rounded down, data type for output", + argstr="-f", + xor=["out_format", "out_nice", "out_double", "out_int", "out_cint"], + ) out_cint = traits.Bool( - desc='specifiy int, rounded up, data type for output', - xor=['out_format', 'out_nice', 'out_double', 'out_fint', 'out_int']) + desc="specifiy int, rounded up, data type for output", + xor=["out_format", "out_nice", "out_double", "out_fint", "out_int"], + ) class Cat(AFNICommand): @@ -551,7 +591,7 @@ class Cat(AFNICommand): """ - _cmd = '1dcat' + _cmd = "1dcat" input_spec = CatInputSpec output_spec = AFNICommandOutputSpec @@ -562,32 +602,37 @@ class CatMatvecInputSpec(AFNICommandInputSpec): desc="list of tuples of mfiles and associated opkeys", mandatory=True, argstr="%s", - position=-2) + position=-2, + ) out_file = File( argstr=" > %s", - name_template='%s_cat.aff12.1D', - name_source='in_file', + name_template="%s_cat.aff12.1D", + name_source="in_file", keep_extension=False, desc="File to write concattenated matvecs to", position=-1, - mandatory=True) + mandatory=True, + ) matrix = traits.Bool( desc="indicates that the resulting matrix will" "be written to outfile in the 'MATRIX(...)' format (FORM 3)." "This feature could be used, with clever scripting, to input" "a matrix directly on the command line to program 3dWarp.", argstr="-MATRIX", - xor=['oneline', 'fourxfour']) + xor=["oneline", "fourxfour"], + ) oneline = traits.Bool( desc="indicates that the resulting matrix" "will simply be written as 12 numbers on one line.", argstr="-ONELINE", - xor=['matrix', 'fourxfour']) + xor=["matrix", "fourxfour"], + ) fourxfour = traits.Bool( desc="Output matrix in augmented form (last row is 0 0 0 1)" "This option does not work with -MATRIX or -ONELINE", argstr="-4x4", - xor=['matrix', 'oneline']) + xor=["matrix", "oneline"], + ) class CatMatvec(AFNICommand): @@ -609,68 +654,75 @@ class CatMatvec(AFNICommand): """ - _cmd = 'cat_matvec' + _cmd = "cat_matvec" input_spec = CatMatvecInputSpec output_spec = AFNICommandOutputSpec def _format_arg(self, name, spec, value): - if name == 'in_file': + if name == "in_file": # Concatenate a series of filenames, with optional opkeys - return ' '.join('%s -%s' % (mfile, opkey) if opkey else mfile - for mfile, opkey in value) + return " ".join( + "%s -%s" % (mfile, opkey) if opkey else mfile for mfile, opkey in value + ) return super(CatMatvec, self)._format_arg(name, spec, value) class CenterMassInputSpec(CommandLineInputSpec): in_file = File( - desc='input file to 3dCM', - argstr='%s', + desc="input file to 3dCM", + argstr="%s", position=-2, mandatory=True, exists=True, - copyfile=True) + copyfile=True, + ) cm_file = File( - name_source='in_file', - name_template='%s_cm.out', + name_source="in_file", + name_template="%s_cm.out", hash_files=False, keep_extension=False, desc="File to write center of mass to", argstr="> %s", - position=-1) + position=-1, + ) mask_file = File( - desc='Only voxels with nonzero values in the provided mask will be ' - 'averaged.', - argstr='-mask %s', - exists=True) - automask = traits.Bool( - desc='Generate the mask automatically', argstr='-automask') + desc="Only voxels with nonzero values in the provided mask will be " + "averaged.", + argstr="-mask %s", + exists=True, + ) + automask = traits.Bool(desc="Generate the mask automatically", argstr="-automask") set_cm = traits.Tuple( (traits.Float(), traits.Float(), traits.Float()), - desc='After computing the center of mass, set the origin fields in ' - 'the header so that the center of mass will be at (x,y,z) in ' - 'DICOM coords.', - argstr='-set %f %f %f') + desc="After computing the center of mass, set the origin fields in " + "the header so that the center of mass will be at (x,y,z) in " + "DICOM coords.", + argstr="-set %f %f %f", + ) local_ijk = traits.Bool( - desc='Output values as (i,j,k) in local orienation', - argstr='-local_ijk') + desc="Output values as (i,j,k) in local orienation", argstr="-local_ijk" + ) roi_vals = traits.List( traits.Int, - desc='Compute center of mass for each blob with voxel value of v0, ' - 'v1, v2, etc. This option is handy for getting ROI centers of ' - 'mass.', - argstr='-roi_vals %s') + desc="Compute center of mass for each blob with voxel value of v0, " + "v1, v2, etc. This option is handy for getting ROI centers of " + "mass.", + argstr="-roi_vals %s", + ) all_rois = traits.Bool( - desc='Don\'t bother listing the values of ROIs you want: The program ' - 'will find all of them and produce a full list', - argstr='-all_rois') + desc="Don't bother listing the values of ROIs you want: The program " + "will find all of them and produce a full list", + argstr="-all_rois", + ) class CenterMassOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='output file') - cm_file = File(desc='file with the center of mass coordinates') + out_file = File(exists=True, desc="output file") + cm_file = File(desc="file with the center of mass coordinates") cm = traits.List( traits.Tuple(traits.Float(), traits.Float(), traits.Float()), - desc='center of mass') + desc="center of mass", + ) class CenterMass(AFNICommandBase): @@ -698,41 +750,53 @@ class CenterMass(AFNICommandBase): >>> res = 3dcm.run() # doctest: +SKIP """ - _cmd = '3dCM' + _cmd = "3dCM" input_spec = CenterMassInputSpec output_spec = CenterMassOutputSpec def _list_outputs(self): outputs = super(CenterMass, self)._list_outputs() - outputs['out_file'] = os.path.abspath(self.inputs.in_file) - outputs['cm_file'] = os.path.abspath(self.inputs.cm_file) - sout = np.loadtxt(outputs['cm_file'], ndmin=2) - outputs['cm'] = [tuple(s) for s in sout] + outputs["out_file"] = os.path.abspath(self.inputs.in_file) + outputs["cm_file"] = os.path.abspath(self.inputs.cm_file) + sout = np.loadtxt(outputs["cm_file"], ndmin=2) + outputs["cm"] = [tuple(s) for s in sout] return outputs class ConvertDsetInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to ConvertDset', - argstr='-input %s', + desc="input file to ConvertDset", + argstr="-input %s", position=-2, mandatory=True, - exists=True) + exists=True, + ) out_file = File( - desc='output file for ConvertDset', - argstr='-prefix %s', + desc="output file for ConvertDset", + argstr="-prefix %s", position=-1, - mandatory=True) + mandatory=True, + ) out_type = traits.Enum( - ('niml', 'niml_asc', 'niml_bi', - '1D', '1Dp', '1Dpt', - 'gii', 'gii_asc', 'gii_b64', 'gii_b64gz'), - desc='output type', - argstr='-o_%s', + ( + "niml", + "niml_asc", + "niml_bi", + "1D", + "1Dp", + "1Dpt", + "gii", + "gii_asc", + "gii_b64", + "gii_b64gz", + ), + desc="output type", + argstr="-o_%s", mandatory=True, - position=0) + position=0, + ) class ConvertDset(AFNICommandBase): @@ -754,31 +818,33 @@ class ConvertDset(AFNICommandBase): >>> res = convertdset.run() # doctest: +SKIP """ - _cmd = 'ConvertDset' + _cmd = "ConvertDset" input_spec = ConvertDsetInputSpec output_spec = AFNICommandOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class CopyInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dcopy', - argstr='%s', + desc="input file to 3dcopy", + argstr="%s", position=-2, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_copy', - desc='output image file name', - argstr='%s', + name_template="%s_copy", + desc="output image file name", + argstr="%s", position=-1, - name_source='in_file') - verbose = traits.Bool(desc='print progress reports', argstr='-verb') + name_source="in_file", + ) + verbose = traits.Bool(desc="print progress reports", argstr="-verb") class Copy(AFNICommand): @@ -819,7 +885,7 @@ class Copy(AFNICommand): """ - _cmd = '3dcopy' + _cmd = "3dcopy" input_spec = CopyInputSpec output_spec = AFNICommandOutputSpec @@ -829,47 +895,48 @@ class DotInputSpec(AFNICommandInputSpec): (File()), desc="list of input files, possibly with subbrick selectors", argstr="%s ...", - position=-2) - out_file = File( - desc='collect output to a file', argstr=' |& tee %s', position=-1) - mask = File(desc='Use this dataset as a mask', argstr='-mask %s') + position=-2, + ) + out_file = File(desc="collect output to a file", argstr=" |& tee %s", position=-1) + mask = File(desc="Use this dataset as a mask", argstr="-mask %s") mrange = traits.Tuple( (traits.Float(), traits.Float()), - desc='Means to further restrict the voxels from \'mset\' so that' - 'only those mask values within this range (inclusive) willbe used.', - argstr='-mrange %s %s') + desc="Means to further restrict the voxels from 'mset' so that" + "only those mask values within this range (inclusive) willbe used.", + argstr="-mrange %s %s", + ) demean = traits.Bool( - desc= - 'Remove the mean from each volume prior to computing the correlation', - argstr='-demean') + desc="Remove the mean from each volume prior to computing the correlation", + argstr="-demean", + ) docor = traits.Bool( - desc='Return the correlation coefficient (default).', argstr='-docor') - dodot = traits.Bool( - desc='Return the dot product (unscaled).', argstr='-dodot') + desc="Return the correlation coefficient (default).", argstr="-docor" + ) + dodot = traits.Bool(desc="Return the dot product (unscaled).", argstr="-dodot") docoef = traits.Bool( - desc= - 'Return the least square fit coefficients {{a,b}} so that dset2 is approximately a + b*dset1', - argstr='-docoef') + desc="Return the least square fit coefficients {{a,b}} so that dset2 is approximately a + b*dset1", + argstr="-docoef", + ) dosums = traits.Bool( - desc= - 'Return the 6 numbers xbar= ybar= <(x-xbar)^2> <(y-ybar)^2> <(x-xbar)(y-ybar)> and the correlation coefficient.', - argstr='-dosums') + desc="Return the 6 numbers xbar= ybar= <(x-xbar)^2> <(y-ybar)^2> <(x-xbar)(y-ybar)> and the correlation coefficient.", + argstr="-dosums", + ) dodice = traits.Bool( - desc='Return the Dice coefficient (the Sorensen-Dice index).', - argstr='-dodice') + desc="Return the Dice coefficient (the Sorensen-Dice index).", argstr="-dodice" + ) doeta2 = traits.Bool( - desc='Return eta-squared (Cohen, NeuroImage 2008).', argstr='-doeta2') + desc="Return eta-squared (Cohen, NeuroImage 2008).", argstr="-doeta2" + ) full = traits.Bool( - desc= - 'Compute the whole matrix. A waste of time, but handy for parsing.', - argstr='-full') + desc="Compute the whole matrix. A waste of time, but handy for parsing.", + argstr="-full", + ) show_labels = traits.Bool( - desc= - 'Print sub-brick labels to help identify what is being correlated. This option is useful when' - 'you have more than 2 sub-bricks at input.', - argstr='-show_labels') - upper = traits.Bool( - desc='Compute upper triangular matrix', argstr='-upper') + desc="Print sub-brick labels to help identify what is being correlated. This option is useful when" + "you have more than 2 sub-bricks at input.", + argstr="-show_labels", + ) + upper = traits.Bool(desc="Compute upper triangular matrix", argstr="-upper") class Dot(AFNICommand): @@ -890,53 +957,60 @@ class Dot(AFNICommand): >>> res = copy3d.run() # doctest: +SKIP """ - _cmd = '3dDot' + + _cmd = "3dDot" input_spec = DotInputSpec output_spec = AFNICommandOutputSpec class Edge3InputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dedge3', - argstr='-input %s', + desc="input file to 3dedge3", + argstr="-input %s", position=0, mandatory=True, exists=True, - copyfile=False) - out_file = File( - desc='output image file name', position=-1, argstr='-prefix %s') + copyfile=False, + ) + out_file = File(desc="output image file name", position=-1, argstr="-prefix %s") datum = traits.Enum( - 'byte', - 'short', - 'float', - argstr='-datum %s', - desc='specify data type for output. Valid types are \'byte\', ' - '\'short\' and \'float\'.') + "byte", + "short", + "float", + argstr="-datum %s", + desc="specify data type for output. Valid types are 'byte', " + "'short' and 'float'.", + ) fscale = traits.Bool( - desc='Force scaling of the output to the maximum integer range.', - argstr='-fscale', - xor=['gscale', 'nscale', 'scale_floats']) + desc="Force scaling of the output to the maximum integer range.", + argstr="-fscale", + xor=["gscale", "nscale", "scale_floats"], + ) gscale = traits.Bool( - desc='Same as \'-fscale\', but also forces each output sub-brick to ' - 'to get the same scaling factor.', - argstr='-gscale', - xor=['fscale', 'nscale', 'scale_floats']) + desc="Same as '-fscale', but also forces each output sub-brick to " + "to get the same scaling factor.", + argstr="-gscale", + xor=["fscale", "nscale", "scale_floats"], + ) nscale = traits.Bool( - desc='Don\'t do any scaling on output to byte or short datasets.', - argstr='-nscale', - xor=['fscale', 'gscale', 'scale_floats']) + desc="Don't do any scaling on output to byte or short datasets.", + argstr="-nscale", + xor=["fscale", "gscale", "scale_floats"], + ) scale_floats = traits.Float( - desc='Multiply input by VAL, but only if the input datum is ' - 'float. This is needed when the input dataset ' - 'has a small range, like 0 to 2.0 for instance. ' - 'With such a range, very few edges are detected due to ' - 'what I suspect to be truncation problems. ' - 'Multiplying such a dataset by 10000 fixes the problem ' - 'and the scaling is undone at the output.', - argstr='-scale_floats %f', - xor=['fscale', 'gscale', 'nscale']) + desc="Multiply input by VAL, but only if the input datum is " + "float. This is needed when the input dataset " + "has a small range, like 0 to 2.0 for instance. " + "With such a range, very few edges are detected due to " + "what I suspect to be truncation problems. " + "Multiplying such a dataset by 10000 fixes the problem " + "and the scaling is undone at the output.", + argstr="-scale_floats %f", + xor=["fscale", "gscale", "nscale"], + ) verbose = traits.Bool( - desc='Print out some information along the way.', argstr='-verbose') + desc="Print out some information along the way.", argstr="-verbose" + ) class Edge3(AFNICommand): @@ -982,35 +1056,37 @@ class Edge3(AFNICommand): """ - _cmd = '3dedge3' + _cmd = "3dedge3" input_spec = Edge3InputSpec output_spec = AFNICommandOutputSpec class EvalInputSpec(AFNICommandInputSpec): in_file_a = File( - desc='input file to 1deval', - argstr='-a %s', + desc="input file to 1deval", + argstr="-a %s", position=0, mandatory=True, - exists=True) + exists=True, + ) in_file_b = File( - desc='operand file to 1deval', argstr='-b %s', position=1, exists=True) + desc="operand file to 1deval", argstr="-b %s", position=1, exists=True + ) in_file_c = File( - desc='operand file to 1deval', argstr='-c %s', position=2, exists=True) + desc="operand file to 1deval", argstr="-c %s", position=2, exists=True + ) out_file = File( - name_template='%s_calc', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file_a') - out1D = traits.Bool(desc='output in 1D', argstr='-1D') - expr = Str(desc='expr', argstr='-expr "%s"', position=3, mandatory=True) - start_idx = traits.Int( - desc='start index for in_file_a', requires=['stop_idx']) - stop_idx = traits.Int( - desc='stop index for in_file_a', requires=['start_idx']) - single_idx = traits.Int(desc='volume index for in_file_a') - other = File(desc='other options', argstr='') + name_template="%s_calc", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file_a", + ) + out1D = traits.Bool(desc="output in 1D", argstr="-1D") + expr = Str(desc="expr", argstr='-expr "%s"', position=3, mandatory=True) + start_idx = traits.Int(desc="start index for in_file_a", requires=["stop_idx"]) + stop_idx = traits.Int(desc="stop index for in_file_a", requires=["start_idx"]) + single_idx = traits.Int(desc="volume index for in_file_a") + other = File(desc="other options", argstr="") class Eval(AFNICommand): @@ -1036,125 +1112,132 @@ class Eval(AFNICommand): """ - _cmd = '1deval' + _cmd = "1deval" input_spec = EvalInputSpec output_spec = AFNICommandOutputSpec def _format_arg(self, name, trait_spec, value): - if name == 'in_file_a': + if name == "in_file_a": arg = trait_spec.argstr % value if isdefined(self.inputs.start_idx): - arg += '[%d..%d]' % (self.inputs.start_idx, - self.inputs.stop_idx) + arg += "[%d..%d]" % (self.inputs.start_idx, self.inputs.stop_idx) if isdefined(self.inputs.single_idx): - arg += '[%d]' % (self.inputs.single_idx) + arg += "[%d]" % (self.inputs.single_idx) return arg return super(Eval, self)._format_arg(name, trait_spec, value) def _parse_inputs(self, skip=None): """Skip the arguments without argstr metadata """ - return super( - Eval, self)._parse_inputs(skip=('start_idx', 'stop_idx', 'other')) + return super(Eval, self)._parse_inputs(skip=("start_idx", "stop_idx", "other")) class FWHMxInputSpec(CommandLineInputSpec): in_file = File( - desc='input dataset', argstr='-input %s', mandatory=True, exists=True) + desc="input dataset", argstr="-input %s", mandatory=True, exists=True + ) out_file = File( - argstr='> %s', - name_source='in_file', - name_template='%s_fwhmx.out', + argstr="> %s", + name_source="in_file", + name_template="%s_fwhmx.out", position=-1, keep_extension=False, - desc='output file') + desc="output file", + ) out_subbricks = File( - argstr='-out %s', - name_source='in_file', - name_template='%s_subbricks.out', + argstr="-out %s", + name_source="in_file", + name_template="%s_subbricks.out", keep_extension=False, - desc='output file listing the subbricks FWHM') + desc="output file listing the subbricks FWHM", + ) mask = File( - desc='use only voxels that are nonzero in mask', - argstr='-mask %s', - exists=True) + desc="use only voxels that are nonzero in mask", argstr="-mask %s", exists=True + ) automask = traits.Bool( False, usedefault=True, - argstr='-automask', - desc='compute a mask from THIS dataset, a la 3dAutomask') + argstr="-automask", + desc="compute a mask from THIS dataset, a la 3dAutomask", + ) detrend = traits.Either( traits.Bool(), traits.Int(), default=False, - argstr='-detrend', - xor=['demed'], + argstr="-detrend", + xor=["demed"], usedefault=True, - desc='instead of demed (0th order detrending), detrend to the ' - 'specified order. If order is not given, the program picks ' - 'q=NT/30. -detrend disables -demed, and includes -unif.') + desc="instead of demed (0th order detrending), detrend to the " + "specified order. If order is not given, the program picks " + "q=NT/30. -detrend disables -demed, and includes -unif.", + ) demed = traits.Bool( False, - argstr='-demed', - xor=['detrend'], - desc='If the input dataset has more than one sub-brick (e.g., has a ' - 'time axis), then subtract the median of each voxel\'s time ' - 'series before processing FWHM. This will tend to remove ' - 'intrinsic spatial structure and leave behind the noise.') + argstr="-demed", + xor=["detrend"], + desc="If the input dataset has more than one sub-brick (e.g., has a " + "time axis), then subtract the median of each voxel's time " + "series before processing FWHM. This will tend to remove " + "intrinsic spatial structure and leave behind the noise.", + ) unif = traits.Bool( False, - argstr='-unif', - desc='If the input dataset has more than one sub-brick, then ' - 'normalize each voxel\'s time series to have the same MAD before ' - 'processing FWHM.') + argstr="-unif", + desc="If the input dataset has more than one sub-brick, then " + "normalize each voxel's time series to have the same MAD before " + "processing FWHM.", + ) out_detrend = File( - argstr='-detprefix %s', - name_source='in_file', - name_template='%s_detrend', + argstr="-detprefix %s", + name_source="in_file", + name_template="%s_detrend", keep_extension=False, - desc='Save the detrended file into a dataset') + desc="Save the detrended file into a dataset", + ) geom = traits.Bool( - argstr='-geom', - xor=['arith'], - desc='if in_file has more than one sub-brick, compute the final ' - 'estimate as the geometric mean of the individual sub-brick FWHM ' - 'estimates') + argstr="-geom", + xor=["arith"], + desc="if in_file has more than one sub-brick, compute the final " + "estimate as the geometric mean of the individual sub-brick FWHM " + "estimates", + ) arith = traits.Bool( - argstr='-arith', - xor=['geom'], - desc='if in_file has more than one sub-brick, compute the final ' - 'estimate as the arithmetic mean of the individual sub-brick ' - 'FWHM estimates') + argstr="-arith", + xor=["geom"], + desc="if in_file has more than one sub-brick, compute the final " + "estimate as the arithmetic mean of the individual sub-brick " + "FWHM estimates", + ) combine = traits.Bool( - argstr='-combine', - desc='combine the final measurements along each axis') - compat = traits.Bool( - argstr='-compat', desc='be compatible with the older 3dFWHM') + argstr="-combine", desc="combine the final measurements along each axis" + ) + compat = traits.Bool(argstr="-compat", desc="be compatible with the older 3dFWHM") acf = traits.Either( traits.Bool(), File(), traits.Tuple(File(exists=True), traits.Float()), default=False, usedefault=True, - argstr='-acf', - desc='computes the spatial autocorrelation') + argstr="-acf", + desc="computes the spatial autocorrelation", + ) class FWHMxOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='output file') - out_subbricks = File(exists=True, desc='output file (subbricks)') - out_detrend = File(desc='output file, detrended') + out_file = File(exists=True, desc="output file") + out_subbricks = File(exists=True, desc="output file (subbricks)") + out_detrend = File(desc="output file, detrended") fwhm = traits.Either( traits.Tuple(traits.Float(), traits.Float(), traits.Float()), - traits.Tuple(traits.Float(), traits.Float(), traits.Float(), - traits.Float()), - desc='FWHM along each axis') + traits.Tuple(traits.Float(), traits.Float(), traits.Float(), traits.Float()), + desc="FWHM along each axis", + ) acf_param = traits.Either( traits.Tuple(traits.Float(), traits.Float(), traits.Float()), - traits.Tuple(traits.Float(), traits.Float(), traits.Float(), - traits.Float()), - desc='fitted ACF model parameters') - out_acf = File(exists=True, desc='output acf file') + traits.Tuple(traits.Float(), traits.Float(), traits.Float(), traits.Float()), + desc="fitted ACF model parameters", + ) + out_acf = File(exists=True, desc="output acf file") class FWHMx(AFNICommandBase): @@ -1259,20 +1342,22 @@ class FWHMx(AFNICommandBase): """ - _cmd = '3dFWHMx' + + _cmd = "3dFWHMx" input_spec = FWHMxInputSpec output_spec = FWHMxOutputSpec references_ = [ { - 'entry': - BibTeX('@article{CoxReynoldsTaylor2016,' - 'author={R.W. Cox, R.C. Reynolds, and P.A. Taylor},' - 'title={AFNI and clustering: false positive rates redux},' - 'journal={bioRxiv},' - 'year={2016},' - '}'), - 'tags': ['method'], + "entry": BibTeX( + "@article{CoxReynoldsTaylor2016," + "author={R.W. Cox, R.C. Reynolds, and P.A. Taylor}," + "title={AFNI and clustering: false positive rates redux}," + "journal={bioRxiv}," + "year={2016}," + "}" + ), + "tags": ["method"], }, ] _acf = True @@ -1281,20 +1366,20 @@ def _parse_inputs(self, skip=None): if not self.inputs.detrend: if skip is None: skip = [] - skip += ['out_detrend'] + skip += ["out_detrend"] return super(FWHMx, self)._parse_inputs(skip=skip) def _format_arg(self, name, trait_spec, value): - if name == 'detrend': + if name == "detrend": if isinstance(value, bool): if value: return trait_spec.argstr else: return None elif isinstance(value, int): - return trait_spec.argstr + ' %d' % value + return trait_spec.argstr + " %d" % value - if name == 'acf': + if name == "acf": if isinstance(value, bool): if value: return trait_spec.argstr @@ -1302,9 +1387,9 @@ def _format_arg(self, name, trait_spec, value): self._acf = False return None elif isinstance(value, tuple): - return trait_spec.argstr + ' %s %f' % value + return trait_spec.argstr + " %s %f" % value elif isinstance(value, (str, bytes)): - return trait_spec.argstr + ' ' + value + return trait_spec.argstr + " " + value return super(FWHMx, self)._format_arg(name, trait_spec, value) def _list_outputs(self): @@ -1312,28 +1397,28 @@ def _list_outputs(self): if self.inputs.detrend: fname, ext = op.splitext(self.inputs.in_file) - if '.gz' in ext: + if ".gz" in ext: _, ext2 = op.splitext(fname) ext = ext2 + ext - outputs['out_detrend'] += ext + outputs["out_detrend"] += ext else: - outputs['out_detrend'] = Undefined + outputs["out_detrend"] = Undefined - sout = np.loadtxt(outputs['out_file']) + sout = np.loadtxt(outputs["out_file"]) # handle newer versions of AFNI if sout.size == 8: - outputs['fwhm'] = tuple(sout[0, :]) + outputs["fwhm"] = tuple(sout[0, :]) else: - outputs['fwhm'] = tuple(sout) + outputs["fwhm"] = tuple(sout) if self._acf: assert sout.size == 8, "Wrong number of elements in %s" % str(sout) - outputs['acf_param'] = tuple(sout[1]) + outputs["acf_param"] = tuple(sout[1]) - outputs['out_acf'] = op.abspath('3dFWHMx.1D') + outputs["out_acf"] = op.abspath("3dFWHMx.1D") if isinstance(self.inputs.acf, (str, bytes)): - outputs['out_acf'] = op.abspath(self.inputs.acf) + outputs["out_acf"] = op.abspath(self.inputs.acf) return outputs @@ -1342,81 +1427,103 @@ class LocalBistatInputSpec(AFNICommandInputSpec): in_file1 = File( exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-2, - desc='Filename of the first image') + desc="Filename of the first image", + ) in_file2 = File( exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-1, - desc='Filename of the second image') + desc="Filename of the second image", + ) neighborhood = traits.Either( - traits.Tuple(traits.Enum('SPHERE', 'RHDD', 'TOHD'), traits.Float()), - traits.Tuple(traits.Enum('RECT'), traits.Tuple(traits.Float(), - traits.Float(), - traits.Float())), + traits.Tuple(traits.Enum("SPHERE", "RHDD", "TOHD"), traits.Float()), + traits.Tuple( + traits.Enum("RECT"), + traits.Tuple(traits.Float(), traits.Float(), traits.Float()), + ), mandatory=True, - desc='The region around each voxel that will be extracted for ' - 'the statistics calculation. Possible regions are: ' - '\'SPHERE\', \'RHDD\' (rhombic dodecahedron), \'TOHD\' ' - '(truncated octahedron) with a given radius in mm or ' - '\'RECT\' (rectangular block) with dimensions to specify in mm.', - argstr="-nbhd '%s(%s)'") - _stat_names = ['pearson', 'spearman', 'quadrant', 'mutinfo', 'normuti', - 'jointent', 'hellinger', 'crU', 'crM', 'crA', 'L2slope', - 'L1slope', 'num', 'ALL'] + desc="The region around each voxel that will be extracted for " + "the statistics calculation. Possible regions are: " + "'SPHERE', 'RHDD' (rhombic dodecahedron), 'TOHD' " + "(truncated octahedron) with a given radius in mm or " + "'RECT' (rectangular block) with dimensions to specify in mm.", + argstr="-nbhd '%s(%s)'", + ) + _stat_names = [ + "pearson", + "spearman", + "quadrant", + "mutinfo", + "normuti", + "jointent", + "hellinger", + "crU", + "crM", + "crA", + "L2slope", + "L1slope", + "num", + "ALL", + ] stat = InputMultiPath( traits.Enum(_stat_names), mandatory=True, - desc='statistics to compute. Possible names are :' - ' * pearson = Pearson correlation coefficient' - ' * spearman = Spearman correlation coefficient' - ' * quadrant = Quadrant correlation coefficient' - ' * mutinfo = Mutual Information' - ' * normuti = Normalized Mutual Information' - ' * jointent = Joint entropy' - ' * hellinger= Hellinger metric' - ' * crU = Correlation ratio (Unsymmetric)' - ' * crM = Correlation ratio (symmetrized by Multiplication)' - ' * crA = Correlation ratio (symmetrized by Addition)' - ' * L2slope = slope of least-squares (L2) linear regression of ' - ' the data from dataset1 vs. the dataset2 ' - ' (i.e., d2 = a + b*d1 ==> this is \'b\')' - ' * L1slope = slope of least-absolute-sum (L1) linear ' - ' regression of the data from dataset1 vs. ' - ' the dataset2' - ' * num = number of the values in the region: ' - ' with the use of -mask or -automask, ' - ' the size of the region around any given ' - ' voxel will vary; this option lets you ' - ' map that size.' - ' * ALL = all of the above, in that order' - 'More than one option can be used.', - argstr='-stat %s...') + desc="statistics to compute. Possible names are :" + " * pearson = Pearson correlation coefficient" + " * spearman = Spearman correlation coefficient" + " * quadrant = Quadrant correlation coefficient" + " * mutinfo = Mutual Information" + " * normuti = Normalized Mutual Information" + " * jointent = Joint entropy" + " * hellinger= Hellinger metric" + " * crU = Correlation ratio (Unsymmetric)" + " * crM = Correlation ratio (symmetrized by Multiplication)" + " * crA = Correlation ratio (symmetrized by Addition)" + " * L2slope = slope of least-squares (L2) linear regression of " + " the data from dataset1 vs. the dataset2 " + " (i.e., d2 = a + b*d1 ==> this is 'b')" + " * L1slope = slope of least-absolute-sum (L1) linear " + " regression of the data from dataset1 vs. " + " the dataset2" + " * num = number of the values in the region: " + " with the use of -mask or -automask, " + " the size of the region around any given " + " voxel will vary; this option lets you " + " map that size." + " * ALL = all of the above, in that order" + "More than one option can be used.", + argstr="-stat %s...", + ) mask_file = File( exists=True, - desc='mask image file name. Voxels NOT in the mask will not be used ' - 'in the neighborhood of any voxel. Also, a voxel NOT in the mask ' - 'will have its statistic(s) computed as zero (0).', - argstr='-mask %s') + desc="mask image file name. Voxels NOT in the mask will not be used " + "in the neighborhood of any voxel. Also, a voxel NOT in the mask " + "will have its statistic(s) computed as zero (0).", + argstr="-mask %s", + ) automask = traits.Bool( - desc='Compute the mask as in program 3dAutomask.', - argstr='-automask', - xor=['weight_file']) + desc="Compute the mask as in program 3dAutomask.", + argstr="-automask", + xor=["weight_file"], + ) weight_file = File( exists=True, - desc='File name of an image to use as a weight. Only applies to ' - '\'pearson\' statistics.', - argstr='-weight %s', - xor=['automask']) + desc="File name of an image to use as a weight. Only applies to " + "'pearson' statistics.", + argstr="-weight %s", + xor=["automask"], + ) out_file = File( - desc='Output dataset.', - argstr='-prefix %s', - name_source='in_file1', - name_template='%s_bistat', + desc="Output dataset.", + argstr="-prefix %s", + name_source="in_file1", + name_template="%s_bistat", keep_extension=True, - position=0) + position=0, + ) class LocalBistat(AFNICommand): @@ -1442,157 +1549,184 @@ class LocalBistat(AFNICommand): """ - _cmd = '3dLocalBistat' + _cmd = "3dLocalBistat" input_spec = LocalBistatInputSpec output_spec = AFNICommandOutputSpec def _format_arg(self, name, spec, value): - if name == 'neighborhood' and value[0] == 'RECT': - value = ('RECT', '%s,%s,%s' % value[1]) + if name == "neighborhood" and value[0] == "RECT": + value = ("RECT", "%s,%s,%s" % value[1]) return super(LocalBistat, self)._format_arg(name, spec, value) class LocalstatInputSpec(AFNICommandInputSpec): in_file = File( - exists=True, - mandatory=True, - argstr='%s', - position=-1, - desc='input dataset') + exists=True, mandatory=True, argstr="%s", position=-1, desc="input dataset" + ) neighborhood = traits.Either( - traits.Tuple(traits.Enum('SPHERE', 'RHDD', 'TOHD'), traits.Float()), - traits.Tuple(traits.Enum('RECT'), traits.Tuple(traits.Float(), - traits.Float(), - traits.Float())), + traits.Tuple(traits.Enum("SPHERE", "RHDD", "TOHD"), traits.Float()), + traits.Tuple( + traits.Enum("RECT"), + traits.Tuple(traits.Float(), traits.Float(), traits.Float()), + ), mandatory=True, - desc='The region around each voxel that will be extracted for ' - 'the statistics calculation. Possible regions are: ' - '\'SPHERE\', \'RHDD\' (rhombic dodecahedron), \'TOHD\' ' - '(truncated octahedron) with a given radius in mm or ' - '\'RECT\' (rectangular block) with dimensions to specify in mm.', - argstr="-nbhd '%s(%s)'") - _stat_names = ['mean', 'stdev', 'var', 'cvar', 'median', 'MAD', 'min', - 'max', 'absmax', 'num', 'sum', 'FWHM', 'FWHMbar', 'rank', - 'frank', 'P2skew', 'ALL', 'mMP2s', 'mmMP2s'] + desc="The region around each voxel that will be extracted for " + "the statistics calculation. Possible regions are: " + "'SPHERE', 'RHDD' (rhombic dodecahedron), 'TOHD' " + "(truncated octahedron) with a given radius in mm or " + "'RECT' (rectangular block) with dimensions to specify in mm.", + argstr="-nbhd '%s(%s)'", + ) + _stat_names = [ + "mean", + "stdev", + "var", + "cvar", + "median", + "MAD", + "min", + "max", + "absmax", + "num", + "sum", + "FWHM", + "FWHMbar", + "rank", + "frank", + "P2skew", + "ALL", + "mMP2s", + "mmMP2s", + ] stat = InputMultiObject( traits.Either( - traits.Enum(_stat_names), - traits.Tuple(traits.Enum('perc'), - traits.Tuple(traits.Float, traits.Float, traits.Float))), + traits.Enum(_stat_names), + traits.Tuple( + traits.Enum("perc"), + traits.Tuple(traits.Float, traits.Float, traits.Float), + ), + ), mandatory=True, - desc='statistics to compute. Possible names are :\n' - ' * mean = average of the values\n' - ' * stdev = standard deviation\n' - ' * var = variance (stdev*stdev)\n' - ' * cvar = coefficient of variation = stdev/fabs(mean)\n' - ' * median = median of the values\n' - ' * MAD = median absolute deviation\n' - ' * min = minimum\n' - ' * max = maximum\n' - ' * absmax = maximum of the absolute values\n' - ' * num = number of the values in the region:\n' - ' with the use of -mask or -automask,' - ' the size of the region around any given' - ' voxel will vary; this option lets you' - ' map that size. It may be useful if you' - ' plan to compute a t-statistic (say) from' - ' the mean and stdev outputs.\n' - ' * sum = sum of the values in the region\n' - ' * FWHM = compute (like 3dFWHM) image smoothness' - ' inside each voxel\'s neighborhood. Results' - ' are in 3 sub-bricks: FWHMx, FHWMy, and FWHMz.' - ' Places where an output is -1 are locations' - ' where the FWHM value could not be computed' - ' (e.g., outside the mask).\n' - ' * FWHMbar= Compute just the average of the 3 FWHM values' - ' (normally would NOT do this with FWHM also).\n' - ' * perc:P0:P1:Pstep = \n' - ' Compute percentiles between P0 and P1 with a ' - ' step of Pstep.\n' - ' Default P1 is equal to P0 and default P2 = 1\n' - ' * rank = rank of the voxel\'s intensity\n' - ' * frank = rank / number of voxels in neighborhood\n' - ' * P2skew = Pearson\'s second skewness coefficient' - ' 3 * (mean - median) / stdev\n' - ' * ALL = all of the above, in that order ' - ' (except for FWHMbar and perc).\n' - ' * mMP2s = Exactly the same output as:' - ' median, MAD, P2skew,' - ' but a little faster\n' - ' * mmMP2s = Exactly the same output as:' - ' mean, median, MAD, P2skew\n' - 'More than one option can be used.', - argstr='-stat %s...') + desc="statistics to compute. Possible names are :\n" + " * mean = average of the values\n" + " * stdev = standard deviation\n" + " * var = variance (stdev*stdev)\n" + " * cvar = coefficient of variation = stdev/fabs(mean)\n" + " * median = median of the values\n" + " * MAD = median absolute deviation\n" + " * min = minimum\n" + " * max = maximum\n" + " * absmax = maximum of the absolute values\n" + " * num = number of the values in the region:\n" + " with the use of -mask or -automask," + " the size of the region around any given" + " voxel will vary; this option lets you" + " map that size. It may be useful if you" + " plan to compute a t-statistic (say) from" + " the mean and stdev outputs.\n" + " * sum = sum of the values in the region\n" + " * FWHM = compute (like 3dFWHM) image smoothness" + " inside each voxel's neighborhood. Results" + " are in 3 sub-bricks: FWHMx, FHWMy, and FWHMz." + " Places where an output is -1 are locations" + " where the FWHM value could not be computed" + " (e.g., outside the mask).\n" + " * FWHMbar= Compute just the average of the 3 FWHM values" + " (normally would NOT do this with FWHM also).\n" + " * perc:P0:P1:Pstep = \n" + " Compute percentiles between P0 and P1 with a " + " step of Pstep.\n" + " Default P1 is equal to P0 and default P2 = 1\n" + " * rank = rank of the voxel's intensity\n" + " * frank = rank / number of voxels in neighborhood\n" + " * P2skew = Pearson's second skewness coefficient" + " 3 * (mean - median) / stdev\n" + " * ALL = all of the above, in that order " + " (except for FWHMbar and perc).\n" + " * mMP2s = Exactly the same output as:" + " median, MAD, P2skew," + " but a little faster\n" + " * mmMP2s = Exactly the same output as:" + " mean, median, MAD, P2skew\n" + "More than one option can be used.", + argstr="-stat %s...", + ) mask_file = File( exists=True, - desc='Mask image file name. Voxels NOT in the mask will not be used ' - 'in the neighborhood of any voxel. Also, a voxel NOT in the ' - 'mask will have its statistic(s) computed as zero (0) unless ' - 'the parameter \'nonmask\' is set to true.', - argstr='-mask %s') + desc="Mask image file name. Voxels NOT in the mask will not be used " + "in the neighborhood of any voxel. Also, a voxel NOT in the " + "mask will have its statistic(s) computed as zero (0) unless " + "the parameter 'nonmask' is set to true.", + argstr="-mask %s", + ) automask = traits.Bool( - desc='Compute the mask as in program 3dAutomask.', - argstr='-automask') + desc="Compute the mask as in program 3dAutomask.", argstr="-automask" + ) nonmask = traits.Bool( - desc='Voxels not in the mask WILL have their local statistics ' - 'computed from all voxels in their neighborhood that ARE in ' - 'the mask.\n' - ' * For instance, this option can be used to compute the ' - ' average local white matter time series, even at non-WM ' - ' voxels.', - argstr='-use_nonmask') + desc="Voxels not in the mask WILL have their local statistics " + "computed from all voxels in their neighborhood that ARE in " + "the mask.\n" + " * For instance, this option can be used to compute the " + " average local white matter time series, even at non-WM " + " voxels.", + argstr="-use_nonmask", + ) reduce_grid = traits.Either( traits.Float, traits.Tuple(traits.Float, traits.Float, traits.Float), - argstr='-reduce_grid %s', - xor=['reduce_restore_grid', 'reduce_max_vox'], - desc='Compute output on a grid that is reduced by the specified ' - 'factors. If a single value is passed, output is resampled ' - 'to the specified isotropic grid. Otherwise, the 3 inputs ' - 'describe the reduction in the X, Y, and Z directions. This ' - 'option speeds up computations at the expense of resolution. ' - 'It should only be used when the nbhd is quite large with ' - 'respect to the input\'s resolution, and the resultant stats ' - 'are expected to be smooth.') + argstr="-reduce_grid %s", + xor=["reduce_restore_grid", "reduce_max_vox"], + desc="Compute output on a grid that is reduced by the specified " + "factors. If a single value is passed, output is resampled " + "to the specified isotropic grid. Otherwise, the 3 inputs " + "describe the reduction in the X, Y, and Z directions. This " + "option speeds up computations at the expense of resolution. " + "It should only be used when the nbhd is quite large with " + "respect to the input's resolution, and the resultant stats " + "are expected to be smooth.", + ) reduce_restore_grid = traits.Either( traits.Float, traits.Tuple(traits.Float, traits.Float, traits.Float), - argstr='-reduce_restore_grid %s', - xor=['reduce_max_vox', 'reduce_grid'], - desc='Like reduce_grid, but also resample output back to input' - 'grid.') + argstr="-reduce_restore_grid %s", + xor=["reduce_max_vox", "reduce_grid"], + desc="Like reduce_grid, but also resample output back to input" "grid.", + ) reduce_max_vox = traits.Float( - argstr='-reduce_max_vox %s', - xor=['reduce_restore_grid', 'reduce_grid'], - desc='Like reduce_restore_grid, but automatically set Rx Ry Rz so' - 'that the computation grid is at a resolution of nbhd/MAX_VOX' - 'voxels.') + argstr="-reduce_max_vox %s", + xor=["reduce_restore_grid", "reduce_grid"], + desc="Like reduce_restore_grid, but automatically set Rx Ry Rz so" + "that the computation grid is at a resolution of nbhd/MAX_VOX" + "voxels.", + ) grid_rmode = traits.Enum( - 'NN', - 'Li', - 'Cu', - 'Bk', - argstr='-grid_rmode %s', - requires=['reduce_restore_grid'], - desc='Interpolant to use when resampling the output with the' - 'reduce_restore_grid option. The resampling method string ' - 'RESAM should come from the set {\'NN\', \'Li\', \'Cu\', ' - '\'Bk\'}. These stand for \'Nearest Neighbor\', \'Linear\', ' - '\'Cubic\', and \'Blocky\' interpolation, respectively.') + "NN", + "Li", + "Cu", + "Bk", + argstr="-grid_rmode %s", + requires=["reduce_restore_grid"], + desc="Interpolant to use when resampling the output with the" + "reduce_restore_grid option. The resampling method string " + "RESAM should come from the set {'NN', 'Li', 'Cu', " + "'Bk'}. These stand for 'Nearest Neighbor', 'Linear', " + "'Cubic', and 'Blocky' interpolation, respectively.", + ) quiet = traits.Bool( - argstr='-quiet', - desc='Stop the highly informative progress reports.') + argstr="-quiet", desc="Stop the highly informative progress reports." + ) overwrite = traits.Bool( - desc='overwrite output file if it already exists', - argstr='-overwrite') + desc="overwrite output file if it already exists", argstr="-overwrite" + ) out_file = File( - desc='Output dataset.', - argstr='-prefix %s', - name_source='in_file', - name_template='%s_localstat', + desc="Output dataset.", + argstr="-prefix %s", + name_source="in_file", + name_template="%s_localstat", keep_extension=True, - position=0) + position=0, + ) class Localstat(AFNICommand): @@ -1618,18 +1752,19 @@ class Localstat(AFNICommand): >>> res = localstat.run() # doctest: +SKIP """ - _cmd = '3dLocalstat' + + _cmd = "3dLocalstat" input_spec = LocalstatInputSpec output_spec = AFNICommandOutputSpec def _format_arg(self, name, spec, value): - if name == 'neighborhood' and value[0] == 'RECT': - value = ('RECT', '%s,%s,%s' % value[1]) - if name == 'stat': - value = ['perc:%s:%s:%s' % v[1] if len(v) == 2 else v for v in value] - if name == 'reduce_grid' or name == 'reduce_restore_grid': + if name == "neighborhood" and value[0] == "RECT": + value = ("RECT", "%s,%s,%s" % value[1]) + if name == "stat": + value = ["perc:%s:%s:%s" % v[1] if len(v) == 2 else v for v in value] + if name == "reduce_grid" or name == "reduce_restore_grid": if len(value) == 3: - value = '%s %s %s' % value + value = "%s %s %s" % value return super(Localstat, self)._format_arg(name, spec, value) @@ -1637,61 +1772,68 @@ def _format_arg(self, name, spec, value): class MaskToolInputSpec(AFNICommandInputSpec): in_file = InputMultiPath( File(exists=True), - desc='input file or files to 3dmask_tool', - argstr='-input %s', + desc="input file or files to 3dmask_tool", + argstr="-input %s", position=-1, mandatory=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_mask', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') + name_template="%s_mask", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) count = traits.Bool( - desc='Instead of created a binary 0/1 mask dataset, create one with ' - 'counts of voxel overlap, i.e., each voxel will contain the ' - 'number of masks that it is set in.', - argstr='-count', - position=2) + desc="Instead of created a binary 0/1 mask dataset, create one with " + "counts of voxel overlap, i.e., each voxel will contain the " + "number of masks that it is set in.", + argstr="-count", + position=2, + ) datum = traits.Enum( - 'byte', - 'short', - 'float', - argstr='-datum %s', - desc='specify data type for output. Valid types are \'byte\', ' - '\'short\' and \'float\'.') + "byte", + "short", + "float", + argstr="-datum %s", + desc="specify data type for output. Valid types are 'byte', " + "'short' and 'float'.", + ) dilate_inputs = Str( - desc='Use this option to dilate and/or erode datasets as they are ' - 'read. ex. \'5 -5\' to dilate and erode 5 times', - argstr='-dilate_inputs %s') + desc="Use this option to dilate and/or erode datasets as they are " + "read. ex. '5 -5' to dilate and erode 5 times", + argstr="-dilate_inputs %s", + ) dilate_results = Str( - desc='dilate and/or erode combined mask at the given levels.', - argstr='-dilate_results %s') + desc="dilate and/or erode combined mask at the given levels.", + argstr="-dilate_results %s", + ) frac = traits.Float( - desc='When combining masks (across datasets and sub-bricks), use ' - 'this option to restrict the result to a certain fraction of the ' - 'set of volumes', - argstr='-frac %s') - inter = traits.Bool( - desc='intersection, this means -frac 1.0', argstr='-inter') - union = traits.Bool(desc='union, this means -frac 0', argstr='-union') + desc="When combining masks (across datasets and sub-bricks), use " + "this option to restrict the result to a certain fraction of the " + "set of volumes", + argstr="-frac %s", + ) + inter = traits.Bool(desc="intersection, this means -frac 1.0", argstr="-inter") + union = traits.Bool(desc="union, this means -frac 0", argstr="-union") fill_holes = traits.Bool( - desc='This option can be used to fill holes in the resulting mask, ' - 'i.e. after all other processing has been done.', - argstr='-fill_holes') + desc="This option can be used to fill holes in the resulting mask, " + "i.e. after all other processing has been done.", + argstr="-fill_holes", + ) fill_dirs = Str( - desc='fill holes only in the given directions. This option is for use ' - 'with -fill holes. should be a single string that specifies ' - '1-3 of the axes using {x,y,z} labels (i.e. dataset axis order), ' - 'or using the labels in {R,L,A,P,I,S}.', - argstr='-fill_dirs %s', - requires=['fill_holes']) - verbose = traits.Int( - desc='specify verbosity level, for 0 to 3', argstr='-verb %s') + desc="fill holes only in the given directions. This option is for use " + "with -fill holes. should be a single string that specifies " + "1-3 of the axes using {x,y,z} labels (i.e. dataset axis order), " + "or using the labels in {R,L,A,P,I,S}.", + argstr="-fill_dirs %s", + requires=["fill_holes"], + ) + verbose = traits.Int(desc="specify verbosity level, for 0 to 3", argstr="-verb %s") class MaskToolOutputSpec(TraitedSpec): - out_file = File(desc='mask file', exists=True) + out_file = File(desc="mask file", exists=True) class MaskTool(AFNICommand): @@ -1713,27 +1855,31 @@ class MaskTool(AFNICommand): """ - _cmd = '3dmask_tool' + _cmd = "3dmask_tool" input_spec = MaskToolInputSpec output_spec = MaskToolOutputSpec class MergeInputSpec(AFNICommandInputSpec): in_files = InputMultiPath( - File(desc='input file to 3dmerge', exists=True), - argstr='%s', + File(desc="input file to 3dmerge", exists=True), + argstr="%s", position=-1, mandatory=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_merge', - desc='output image file name', - argstr='-prefix %s', - name_source='in_files') + name_template="%s_merge", + desc="output image file name", + argstr="-prefix %s", + name_source="in_files", + ) doall = traits.Bool( - desc='apply options to all sub-bricks in dataset', argstr='-doall') + desc="apply options to all sub-bricks in dataset", argstr="-doall" + ) blurfwhm = traits.Int( - desc='FWHM blur value (mm)', argstr='-1blur_fwhm %d', units='mm') + desc="FWHM blur value (mm)", argstr="-1blur_fwhm %d", units="mm" + ) class Merge(AFNICommand): @@ -1757,29 +1903,32 @@ class Merge(AFNICommand): """ - _cmd = '3dmerge' + _cmd = "3dmerge" input_spec = MergeInputSpec output_spec = AFNICommandOutputSpec class NotesInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dNotes', - argstr='%s', + desc="input file to 3dNotes", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) - add = Str(desc='note to add', argstr='-a "%s"') + copyfile=False, + ) + add = Str(desc="note to add", argstr='-a "%s"') add_history = Str( - desc='note to add to history', argstr='-h "%s"', xor=['rep_history']) + desc="note to add to history", argstr='-h "%s"', xor=["rep_history"] + ) rep_history = Str( - desc='note with which to replace history', + desc="note with which to replace history", argstr='-HH "%s"', - xor=['add_history']) - delete = traits.Int(desc='delete note number num', argstr='-d %d') - ses = traits.Bool(desc='print to stdout the expanded notes', argstr='-ses') - out_file = File(desc='output image file name', argstr='%s') + xor=["add_history"], + ) + delete = traits.Int(desc="delete note number num", argstr="-d %d") + ses = traits.Bool(desc="print to stdout the expanded notes", argstr="-ses") + out_file = File(desc="output image file name", argstr="%s") class Notes(CommandLine): @@ -1801,13 +1950,13 @@ class Notes(CommandLine): >>> res = notes.run() # doctest: +SKIP """ - _cmd = '3dNotes' + _cmd = "3dNotes" input_spec = NotesInputSpec output_spec = AFNICommandOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.in_file) + outputs["out_file"] = os.path.abspath(self.inputs.in_file) return outputs @@ -1816,24 +1965,27 @@ class NwarpAdjustInputSpec(AFNICommandInputSpec): File(exists=True), minlen=5, mandatory=True, - argstr='-nwarp %s', - desc='List of input 3D warp datasets') + argstr="-nwarp %s", + desc="List of input 3D warp datasets", + ) in_files = InputMultiPath( File(exists=True), minlen=5, - argstr='-source %s', - desc='List of input 3D datasets to be warped by the adjusted warp ' - 'datasets. There must be exactly as many of these datasets as ' - 'there are input warps.') + argstr="-source %s", + desc="List of input 3D datasets to be warped by the adjusted warp " + "datasets. There must be exactly as many of these datasets as " + "there are input warps.", + ) out_file = File( - desc='Output mean dataset, only needed if in_files are also given. ' - 'The output dataset will be on the common grid shared by the ' - 'source datasets.', - argstr='-prefix %s', - name_source='in_files', - name_template='%s_NwarpAdjust', + desc="Output mean dataset, only needed if in_files are also given. " + "The output dataset will be on the common grid shared by the " + "source datasets.", + argstr="-prefix %s", + name_source="in_files", + name_template="%s_NwarpAdjust", keep_extension=True, - requires=['in_files']) + requires=["in_files"], + ) class NwarpAdjust(AFNICommandBase): @@ -1858,7 +2010,8 @@ class NwarpAdjust(AFNICommandBase): >>> res = adjust.run() # doctest: +SKIP """ - _cmd = '3dNwarpAdjust' + + _cmd = "3dNwarpAdjust" input_spec = NwarpAdjustInputSpec output_spec = AFNICommandOutputSpec @@ -1866,7 +2019,7 @@ def _parse_inputs(self, skip=None): if not self.inputs.in_files: if skip is None: skip = [] - skip += ['out_file'] + skip += ["out_file"] return super(NwarpAdjust, self)._parse_inputs(skip=skip) def _list_outputs(self): @@ -1874,15 +2027,16 @@ def _list_outputs(self): if self.inputs.in_files: if self.inputs.out_file: - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) else: basename = os.path.basename(self.inputs.in_files[0]) basename_noext, ext = op.splitext(basename) - if '.gz' in ext: + if ".gz" in ext: basename_noext, ext2 = op.splitext(basename_noext) ext = ext2 + ext - outputs['out_file'] = os.path.abspath( - basename_noext + '_NwarpAdjust' + ext) + outputs["out_file"] = os.path.abspath( + basename_noext + "_NwarpAdjust" + ext + ) return outputs @@ -1891,62 +2045,67 @@ class NwarpApplyInputSpec(CommandLineInputSpec): File(exists=True), traits.List(File(exists=True)), mandatory=True, - argstr='-source %s', - desc='the name of the dataset to be warped ' - 'can be multiple datasets') + argstr="-source %s", + desc="the name of the dataset to be warped " "can be multiple datasets", + ) warp = traits.String( - desc='the name of the warp dataset. ' - 'multiple warps can be concatenated (make sure they exist)', - argstr='-nwarp %s', - mandatory=True) + desc="the name of the warp dataset. " + "multiple warps can be concatenated (make sure they exist)", + argstr="-nwarp %s", + mandatory=True, + ) inv_warp = traits.Bool( - desc='After the warp specified in \'-nwarp\' is computed, invert it', - argstr='-iwarp') + desc="After the warp specified in '-nwarp' is computed, invert it", + argstr="-iwarp", + ) master = File( exists=True, - desc='the name of the master dataset, which defines the output grid', - argstr='-master %s') + desc="the name of the master dataset, which defines the output grid", + argstr="-master %s", + ) interp = traits.Enum( - 'wsinc5', - 'NN', - 'nearestneighbour', - 'nearestneighbor', - 'linear', - 'trilinear', - 'cubic', - 'tricubic', - 'quintic', - 'triquintic', - desc='defines interpolation method to use during warp', - argstr='-interp %s', - usedefault=True) + "wsinc5", + "NN", + "nearestneighbour", + "nearestneighbor", + "linear", + "trilinear", + "cubic", + "tricubic", + "quintic", + "triquintic", + desc="defines interpolation method to use during warp", + argstr="-interp %s", + usedefault=True, + ) ainterp = traits.Enum( - 'NN', - 'nearestneighbour', - 'nearestneighbor', - 'linear', - 'trilinear', - 'cubic', - 'tricubic', - 'quintic', - 'triquintic', - 'wsinc5', - desc='specify a different interpolation method than might ' - 'be used for the warp', - argstr='-ainterp %s') + "NN", + "nearestneighbour", + "nearestneighbor", + "linear", + "trilinear", + "cubic", + "tricubic", + "quintic", + "triquintic", + "wsinc5", + desc="specify a different interpolation method than might " + "be used for the warp", + argstr="-ainterp %s", + ) out_file = File( - name_template='%s_Nwarp', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') + name_template="%s_Nwarp", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) short = traits.Bool( - desc='Write output dataset using 16-bit short integers, rather than ' - 'the usual 32-bit floats.', - argstr='-short') - quiet = traits.Bool( - desc='don\'t be verbose :(', argstr='-quiet', xor=['verb']) - verb = traits.Bool( - desc='be extra verbose :)', argstr='-verb', xor=['quiet']) + desc="Write output dataset using 16-bit short integers, rather than " + "the usual 32-bit floats.", + argstr="-short", + ) + quiet = traits.Bool(desc="don't be verbose :(", argstr="-quiet", xor=["verb"]) + verb = traits.Bool(desc="be extra verbose :)", argstr="-verb", xor=["quiet"]) class NwarpApply(AFNICommandBase): @@ -1970,46 +2129,49 @@ class NwarpApply(AFNICommandBase): >>> res = nwarp.run() # doctest: +SKIP """ - _cmd = '3dNwarpApply' + + _cmd = "3dNwarpApply" input_spec = NwarpApplyInputSpec output_spec = AFNICommandOutputSpec class NwarpCatInputSpec(AFNICommandInputSpec): in_files = traits.List( - traits.Either(File(), - traits.Tuple( - traits.Enum('IDENT', 'INV', 'SQRT', 'SQRTINV'), - File())), + traits.Either( + File(), traits.Tuple(traits.Enum("IDENT", "INV", "SQRT", "SQRTINV"), File()) + ), desc="list of tuples of 3D warps and associated functions", mandatory=True, argstr="%s", - position=-1) + position=-1, + ) space = traits.String( - desc='string to attach to the output dataset as its atlas space ' - 'marker.', - argstr='-space %s') - inv_warp = traits.Bool( - desc='invert the final warp before output', argstr='-iwarp') + desc="string to attach to the output dataset as its atlas space " "marker.", + argstr="-space %s", + ) + inv_warp = traits.Bool(desc="invert the final warp before output", argstr="-iwarp") interp = traits.Enum( - 'wsinc5', - 'linear', - 'quintic', - desc='specify a different interpolation method than might ' - 'be used for the warp', - argstr='-interp %s', - usedefault=True) + "wsinc5", + "linear", + "quintic", + desc="specify a different interpolation method than might " + "be used for the warp", + argstr="-interp %s", + usedefault=True, + ) expad = traits.Int( - desc='Pad the nonlinear warps by the given number of voxels voxels in ' - 'all directions. The warp displacements are extended by linear ' - 'extrapolation from the faces of the input grid..', - argstr='-expad %d') + desc="Pad the nonlinear warps by the given number of voxels voxels in " + "all directions. The warp displacements are extended by linear " + "extrapolation from the faces of the input grid..", + argstr="-expad %d", + ) out_file = File( - name_template='%s_NwarpCat', - desc='output image file name', - argstr='-prefix %s', - name_source='in_files') - verb = traits.Bool(desc='be verbose', argstr='-verb') + name_template="%s_NwarpCat", + desc="output image file name", + argstr="-prefix %s", + name_source="in_files", + ) + verb = traits.Bool(desc="be verbose", argstr="-verb") class NwarpCat(AFNICommand): @@ -2064,91 +2226,99 @@ class NwarpCat(AFNICommand): >>> res = nwarpcat.run() # doctest: +SKIP """ - _cmd = '3dNwarpCat' + + _cmd = "3dNwarpCat" input_spec = NwarpCatInputSpec output_spec = AFNICommandOutputSpec def _format_arg(self, name, spec, value): - if name == 'in_files': - return spec.argstr % (' '.join([ - "'" + v[0] + "(" + v[1] + ")'" if isinstance(v, tuple) else v - for v in value - ])) + if name == "in_files": + return spec.argstr % ( + " ".join( + [ + "'" + v[0] + "(" + v[1] + ")'" if isinstance(v, tuple) else v + for v in value + ] + ) + ) return super(NwarpCat, self)._format_arg(name, spec, value) def _gen_filename(self, name): - if name == 'out_file': - return self._gen_fname( - self.inputs.in_files[0][0], suffix='_NwarpCat') + if name == "out_file": + return self._gen_fname(self.inputs.in_files[0][0], suffix="_NwarpCat") def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.out_file): - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) else: - outputs['out_file'] = os.path.abspath( + outputs["out_file"] = os.path.abspath( self._gen_fname( - self.inputs.in_files[0], - suffix='_NwarpCat+tlrc', - ext='.HEAD')) + self.inputs.in_files[0], suffix="_NwarpCat+tlrc", ext=".HEAD" + ) + ) return outputs class OneDToolPyInputSpec(AFNIPythonCommandInputSpec): in_file = File( - desc='input file to OneDTool', - argstr='-infile %s', - mandatory=True, - exists=True) + desc="input file to OneDTool", argstr="-infile %s", mandatory=True, exists=True + ) set_nruns = traits.Int( - desc='treat the input data as if it has nruns', argstr='-set_nruns %d') + desc="treat the input data as if it has nruns", argstr="-set_nruns %d" + ) derivative = traits.Bool( - desc= - 'take the temporal derivative of each vector (done as first backward difference)', - argstr='-derivative') + desc="take the temporal derivative of each vector (done as first backward difference)", + argstr="-derivative", + ) demean = traits.Bool( - desc='demean each run (new mean of each run = 0.0)', argstr='-demean') + desc="demean each run (new mean of each run = 0.0)", argstr="-demean" + ) out_file = File( - desc='write the current 1D data to FILE', - argstr='-write %s', - xor=['show_cormat_warnings']) + desc="write the current 1D data to FILE", + argstr="-write %s", + xor=["show_cormat_warnings"], + ) show_censor_count = traits.Bool( - desc= - 'display the total number of censored TRs Note : if input is a valid xmat.1D dataset, ' - 'then the count will come from the header. Otherwise the input is assumed to be a binary censor' - 'file, and zeros are simply counted.', - argstr="-show_censor_count") + desc="display the total number of censored TRs Note : if input is a valid xmat.1D dataset, " + "then the count will come from the header. Otherwise the input is assumed to be a binary censor" + "file, and zeros are simply counted.", + argstr="-show_censor_count", + ) censor_motion = traits.Tuple( (traits.Float(), File()), - desc= - 'Tuple of motion limit and outfile prefix. need to also set set_nruns -r set_run_lengths', - argstr="-censor_motion %f %s") + desc="Tuple of motion limit and outfile prefix. need to also set set_nruns -r set_run_lengths", + argstr="-censor_motion %f %s", + ) censor_prev_TR = traits.Bool( - desc='for each censored TR, also censor previous', - argstr='-censor_prev_TR') + desc="for each censored TR, also censor previous", argstr="-censor_prev_TR" + ) show_trs_uncensored = traits.Enum( - 'comma', - 'space', - 'encoded', - 'verbose', - desc= - 'display a list of TRs which were not censored in the specified style', - argstr='-show_trs_uncensored %s') + "comma", + "space", + "encoded", + "verbose", + desc="display a list of TRs which were not censored in the specified style", + argstr="-show_trs_uncensored %s", + ) show_cormat_warnings = File( - desc='Write cormat warnings to a file', + desc="Write cormat warnings to a file", argstr="-show_cormat_warnings |& tee %s", position=-1, - xor=['out_file']) + xor=["out_file"], + ) show_indices_interest = traits.Bool( desc="display column indices for regs of interest", - argstr="-show_indices_interest") + argstr="-show_indices_interest", + ) show_trs_run = traits.Int( desc="restrict -show_trs_[un]censored to the given 1-based run", - argstr="-show_trs_run %d") + argstr="-show_trs_run %d", + ) class OneDToolPyOutputSpec(AFNICommandOutputSpec): - out_file = File(desc='output of 1D_tool.py') + out_file = File(desc="output of 1D_tool.py") class OneDToolPy(AFNIPythonCommand): @@ -2166,7 +2336,7 @@ class OneDToolPy(AFNIPythonCommand): >>> res = odt.run() # doctest: +SKIP """ - _cmd = '1d_tool.py' + _cmd = "1d_tool.py" input_spec = OneDToolPyInputSpec output_spec = OneDToolPyOutputSpec @@ -2175,95 +2345,102 @@ def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.out_file): - outputs['out_file'] = os.path.join(os.getcwd(), - self.inputs.out_file) + outputs["out_file"] = os.path.join(os.getcwd(), self.inputs.out_file) if isdefined(self.inputs.show_cormat_warnings): - outputs['out_file'] = os.path.join( - os.getcwd(), self.inputs.show_cormat_warnings) + outputs["out_file"] = os.path.join( + os.getcwd(), self.inputs.show_cormat_warnings + ) if isdefined(self.inputs.censor_motion): - outputs['out_file'] = os.path.join(os.getcwd(), - self.inputs.censor_motion[1] + - '_censor.1D') + outputs["out_file"] = os.path.join( + os.getcwd(), self.inputs.censor_motion[1] + "_censor.1D" + ) return outputs class RefitInputSpec(CommandLineInputSpec): in_file = File( - desc='input file to 3drefit', - argstr='%s', + desc="input file to 3drefit", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=True) + copyfile=True, + ) deoblique = traits.Bool( - desc='replace current transformation matrix with cardinal matrix', - argstr='-deoblique') - xorigin = Str( - desc='x distance for edge voxel offset', argstr='-xorigin %s') - yorigin = Str( - desc='y distance for edge voxel offset', argstr='-yorigin %s') - zorigin = Str( - desc='z distance for edge voxel offset', argstr='-zorigin %s') + desc="replace current transformation matrix with cardinal matrix", + argstr="-deoblique", + ) + xorigin = Str(desc="x distance for edge voxel offset", argstr="-xorigin %s") + yorigin = Str(desc="y distance for edge voxel offset", argstr="-yorigin %s") + zorigin = Str(desc="z distance for edge voxel offset", argstr="-zorigin %s") duporigin_file = File( - argstr='-duporigin %s', + argstr="-duporigin %s", exists=True, - desc='Copies the xorigin, yorigin, and zorigin values from the header ' - 'of the given dataset') - xdel = traits.Float(desc='new x voxel dimension in mm', argstr='-xdel %f') - ydel = traits.Float(desc='new y voxel dimension in mm', argstr='-ydel %f') - zdel = traits.Float(desc='new z voxel dimension in mm', argstr='-zdel %f') + desc="Copies the xorigin, yorigin, and zorigin values from the header " + "of the given dataset", + ) + xdel = traits.Float(desc="new x voxel dimension in mm", argstr="-xdel %f") + ydel = traits.Float(desc="new y voxel dimension in mm", argstr="-ydel %f") + zdel = traits.Float(desc="new z voxel dimension in mm", argstr="-zdel %f") xyzscale = traits.Float( - desc='Scale the size of the dataset voxels by the given factor', - argstr='-xyzscale %f') + desc="Scale the size of the dataset voxels by the given factor", + argstr="-xyzscale %f", + ) space = traits.Enum( - 'TLRC', - 'MNI', - 'ORIG', - argstr='-space %s', - desc='Associates the dataset with a specific template type, e.g. ' - 'TLRC, MNI, ORIG') + "TLRC", + "MNI", + "ORIG", + argstr="-space %s", + desc="Associates the dataset with a specific template type, e.g. " + "TLRC, MNI, ORIG", + ) atrcopy = traits.Tuple( File(exists=True), traits.Str(), - argstr='-atrcopy %s %s', - desc='Copy AFNI header attribute from the given file into the header ' - 'of the dataset(s) being modified. For more information on AFNI ' - 'header attributes, see documentation file README.attributes. ' - 'More than one \'-atrcopy\' option can be used. For AFNI ' - 'advanced users only. Do NOT use -atrcopy or -atrstring with ' - 'other modification options. See also -copyaux.') + argstr="-atrcopy %s %s", + desc="Copy AFNI header attribute from the given file into the header " + "of the dataset(s) being modified. For more information on AFNI " + "header attributes, see documentation file README.attributes. " + "More than one '-atrcopy' option can be used. For AFNI " + "advanced users only. Do NOT use -atrcopy or -atrstring with " + "other modification options. See also -copyaux.", + ) atrstring = traits.Tuple( traits.Str(), traits.Str(), - argstr='-atrstring %s %s', - desc='Copy the last given string into the dataset(s) being modified, ' - 'giving it the attribute name given by the last string.' - 'To be safe, the last string should be in quotes.') + argstr="-atrstring %s %s", + desc="Copy the last given string into the dataset(s) being modified, " + "giving it the attribute name given by the last string." + "To be safe, the last string should be in quotes.", + ) atrfloat = traits.Tuple( traits.Str(), traits.Str(), - argstr='-atrfloat %s %s', - desc='Create or modify floating point attributes. ' - 'The input values may be specified as a single string in quotes ' - 'or as a 1D filename or string, example ' - '\'1 0.2 0 0 -0.2 1 0 0 0 0 1 0\' or ' - 'flipZ.1D or \'1D:1,0.2,2@0,-0.2,1,2@0,2@0,1,0\'') + argstr="-atrfloat %s %s", + desc="Create or modify floating point attributes. " + "The input values may be specified as a single string in quotes " + "or as a 1D filename or string, example " + "'1 0.2 0 0 -0.2 1 0 0 0 0 1 0' or " + "flipZ.1D or '1D:1,0.2,2@0,-0.2,1,2@0,2@0,1,0'", + ) atrint = traits.Tuple( traits.Str(), traits.Str(), - argstr='-atrint %s %s', - desc='Create or modify integer attributes. ' - 'The input values may be specified as a single string in quotes ' - 'or as a 1D filename or string, example ' - '\'1 0 0 0 0 1 0 0 0 0 1 0\' or ' - 'flipZ.1D or \'1D:1,0,2@0,-0,1,2@0,2@0,1,0\'') + argstr="-atrint %s %s", + desc="Create or modify integer attributes. " + "The input values may be specified as a single string in quotes " + "or as a 1D filename or string, example " + "'1 0 0 0 0 1 0 0 0 0 1 0' or " + "flipZ.1D or '1D:1,0,2@0,-0,1,2@0,2@0,1,0'", + ) saveatr = traits.Bool( - argstr='-saveatr', - desc='(default) Copy the attributes that are known to AFNI into ' - 'the dset->dblk structure thereby forcing changes to known ' - 'attributes to be present in the output. This option only makes ' - 'sense with -atrcopy.') - nosaveatr = traits.Bool(argstr='-nosaveatr', desc='Opposite of -saveatr') + argstr="-saveatr", + desc="(default) Copy the attributes that are known to AFNI into " + "the dset->dblk structure thereby forcing changes to known " + "attributes to be present in the output. This option only makes " + "sense with -atrcopy.", + ) + nosaveatr = traits.Bool(argstr="-nosaveatr", desc="Opposite of -saveatr") class Refit(AFNICommandBase): @@ -2290,93 +2467,101 @@ class Refit(AFNICommandBase): "3drefit -atrfloat IJK_TO_DICOM_REAL '1 0.2 0 0 -0.2 1 0 0 0 0 1 0' structural.nii" >>> res = refit_2.run() # doctest: +SKIP """ - _cmd = '3drefit' + + _cmd = "3drefit" input_spec = RefitInputSpec output_spec = AFNICommandOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.in_file) + outputs["out_file"] = os.path.abspath(self.inputs.in_file) return outputs class ReHoInputSpec(CommandLineInputSpec): in_file = File( - desc='input dataset', - argstr='-inset %s', + desc="input dataset", + argstr="-inset %s", position=1, mandatory=True, - exists=True) + exists=True, + ) out_file = File( - desc='Output dataset.', - argstr='-prefix %s', - name_source='in_file', - name_template='%s_reho', + desc="Output dataset.", + argstr="-prefix %s", + name_source="in_file", + name_template="%s_reho", keep_extension=True, - position=0) + position=0, + ) chi_sq = traits.Bool( - argstr='-chi_sq', - desc='Output the Friedman chi-squared value in addition to the ' - 'Kendall\'s W. This option is currently compatible only with ' - 'the AFNI (BRIK/HEAD) output type; the chi-squared value will ' - 'be the second sub-brick of the output dataset.') + argstr="-chi_sq", + desc="Output the Friedman chi-squared value in addition to the " + "Kendall's W. This option is currently compatible only with " + "the AFNI (BRIK/HEAD) output type; the chi-squared value will " + "be the second sub-brick of the output dataset.", + ) mask_file = File( - desc='Mask within which ReHo should be calculated voxelwise', - argstr='-mask %s') + desc="Mask within which ReHo should be calculated voxelwise", argstr="-mask %s" + ) neighborhood = traits.Enum( - 'faces', - 'edges', - 'vertices', - xor=['sphere', 'ellipsoid'], - argstr='-nneigh %s', - desc='voxels in neighborhood. can be: ' - '* faces (for voxel and 6 facewise neighbors, only),\n' - '* edges (for voxel and 18 face- and edge-wise neighbors),\n' - '* vertices (for voxel and 26 face-, edge-, and node-wise ' - 'neighbors).\n') + "faces", + "edges", + "vertices", + xor=["sphere", "ellipsoid"], + argstr="-nneigh %s", + desc="voxels in neighborhood. can be: " + "* faces (for voxel and 6 facewise neighbors, only),\n" + "* edges (for voxel and 18 face- and edge-wise neighbors),\n" + "* vertices (for voxel and 26 face-, edge-, and node-wise " + "neighbors).\n", + ) sphere = traits.Float( - argstr='-neigh_RAD %s', - xor=['neighborhood', 'ellipsoid'], - desc='for additional voxelwise neighborhood control, the ' - 'radius R of a desired neighborhood can be put in; R is ' - 'a floating point number, and must be >1. Examples of ' - 'the numbers of voxels in a given radius are as follows ' - '(you can roughly approximate with the ol\' 4*PI*(R^3)/3 ' - 'thing):\n' - ' R=2.0 -> V=33,\n' - ' R=2.3 -> V=57, \n' - ' R=2.9 -> V=93, \n' - ' R=3.1 -> V=123, \n' - ' R=3.9 -> V=251, \n' - ' R=4.5 -> V=389, \n' - ' R=6.1 -> V=949, \n' - 'but you can choose most any value.') + argstr="-neigh_RAD %s", + xor=["neighborhood", "ellipsoid"], + desc="for additional voxelwise neighborhood control, the " + "radius R of a desired neighborhood can be put in; R is " + "a floating point number, and must be >1. Examples of " + "the numbers of voxels in a given radius are as follows " + "(you can roughly approximate with the ol' 4*PI*(R^3)/3 " + "thing):\n" + " R=2.0 -> V=33,\n" + " R=2.3 -> V=57, \n" + " R=2.9 -> V=93, \n" + " R=3.1 -> V=123, \n" + " R=3.9 -> V=251, \n" + " R=4.5 -> V=389, \n" + " R=6.1 -> V=949, \n" + "but you can choose most any value.", + ) ellipsoid = traits.Tuple( traits.Float, traits.Float, traits.Float, - xor=['sphere', 'neighborhood'], - argstr='-neigh_X %s -neigh_Y %s -neigh_Z %s', - desc='Tuple indicating the x, y, and z radius of an ellipsoid ' - 'defining the neighbourhood of each voxel.\n' - 'The \'hood is then made according to the following relation:' - '(i/A)^2 + (j/B)^2 + (k/C)^2 <=1.\n' - 'which will have approx. V=4*PI*A*B*C/3. The impetus for ' - 'this freedom was for use with data having anisotropic ' - 'voxel edge lengths.') + xor=["sphere", "neighborhood"], + argstr="-neigh_X %s -neigh_Y %s -neigh_Z %s", + desc="Tuple indicating the x, y, and z radius of an ellipsoid " + "defining the neighbourhood of each voxel.\n" + "The 'hood is then made according to the following relation:" + "(i/A)^2 + (j/B)^2 + (k/C)^2 <=1.\n" + "which will have approx. V=4*PI*A*B*C/3. The impetus for " + "this freedom was for use with data having anisotropic " + "voxel edge lengths.", + ) label_set = File( exists=True, - argstr='-in_rois %s', - desc='a set of ROIs, each labelled with distinct ' - 'integers. ReHo will then be calculated per ROI.') + argstr="-in_rois %s", + desc="a set of ROIs, each labelled with distinct " + "integers. ReHo will then be calculated per ROI.", + ) overwrite = traits.Bool( - desc='overwrite output file if it already exists', - argstr='-overwrite') + desc="overwrite output file if it already exists", argstr="-overwrite" + ) class ReHoOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='Voxelwise regional homogeneity map') - out_vals = File(desc='Table of labelwise regional homogenity values') + out_file = File(exists=True, desc="Voxelwise regional homogeneity map") + out_vals = File(desc="Table of labelwise regional homogenity values") class ReHo(AFNICommandBase): @@ -2399,23 +2584,24 @@ class ReHo(AFNICommandBase): >>> res = reho.run() # doctest: +SKIP """ - _cmd = '3dReHo' + + _cmd = "3dReHo" input_spec = ReHoInputSpec output_spec = ReHoOutputSpec def _list_outputs(self): outputs = super(ReHo, self)._list_outputs() if self.inputs.label_set: - outputs['out_vals'] = outputs['out_file'] + '_ROI_reho.vals' + outputs["out_vals"] = outputs["out_file"] + "_ROI_reho.vals" return outputs def _format_arg(self, name, spec, value): _neigh_dict = { - 'faces': 7, - 'edges': 19, - 'vertices': 27, - } - if name == 'neighborhood': + "faces": 7, + "edges": 19, + "vertices": 27, + } + if name == "neighborhood": value = _neigh_dict[value] return super(ReHo, self)._format_arg(name, spec, value) @@ -2423,33 +2609,36 @@ def _format_arg(self, name, spec, value): class ResampleInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dresample', - argstr='-inset %s', + desc="input file to 3dresample", + argstr="-inset %s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_resample', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') - orientation = Str(desc='new orientation code', argstr='-orient %s') + name_template="%s_resample", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) + orientation = Str(desc="new orientation code", argstr="-orient %s") resample_mode = traits.Enum( - 'NN', - 'Li', - 'Cu', - 'Bk', - argstr='-rmode %s', + "NN", + "Li", + "Cu", + "Bk", + argstr="-rmode %s", desc='resampling method from set {"NN", "Li", "Cu", "Bk"}. These are ' 'for "Nearest Neighbor", "Linear", "Cubic" and "Blocky"' - 'interpolation, respectively. Default is NN.') + "interpolation, respectively. Default is NN.", + ) voxel_size = traits.Tuple( *[traits.Float()] * 3, - argstr='-dxyz %f %f %f', - desc='resample to new dx, dy and dz') - master = File( - argstr='-master %s', desc='align dataset grid to a reference file') + argstr="-dxyz %f %f %f", + desc="resample to new dx, dy and dz" + ) + master = File(argstr="-master %s", desc="align dataset grid to a reference file") class Resample(AFNICommand): @@ -2472,7 +2661,7 @@ class Resample(AFNICommand): """ - _cmd = '3dresample' + _cmd = "3dresample" input_spec = ResampleInputSpec output_spec = AFNICommandOutputSpec @@ -2480,29 +2669,33 @@ class Resample(AFNICommand): class TCatInputSpec(AFNICommandInputSpec): in_files = InputMultiPath( File(exists=True), - desc='input file to 3dTcat', - argstr=' %s', + desc="input file to 3dTcat", + argstr=" %s", position=-1, mandatory=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_tcat', - desc='output image file name', - argstr='-prefix %s', - name_source='in_files') + name_template="%s_tcat", + desc="output image file name", + argstr="-prefix %s", + name_source="in_files", + ) rlt = traits.Enum( - '', - '+', - '++', - argstr='-rlt%s', - desc='Remove linear trends in each voxel time series loaded from each ' - 'input dataset, SEPARATELY. Option -rlt removes the least squares ' - 'fit of \'a+b*t\' to each voxel time series. Option -rlt+ adds ' - 'dataset mean back in. Option -rlt++ adds overall mean of all ' - 'dataset timeseries back in.', - position=1) + "", + "+", + "++", + argstr="-rlt%s", + desc="Remove linear trends in each voxel time series loaded from each " + "input dataset, SEPARATELY. Option -rlt removes the least squares " + "fit of 'a+b*t' to each voxel time series. Option -rlt+ adds " + "dataset mean back in. Option -rlt++ adds overall mean of all " + "dataset timeseries back in.", + position=1, + ) verbose = traits.Bool( - desc='Print out some verbose output as the program', argstr='-verb') + desc="Print out some verbose output as the program", argstr="-verb" + ) class TCat(AFNICommand): @@ -2528,7 +2721,7 @@ class TCat(AFNICommand): """ - _cmd = '3dTcat' + _cmd = "3dTcat" input_spec = TCatInputSpec output_spec = AFNICommandOutputSpec @@ -2536,26 +2729,27 @@ class TCat(AFNICommand): class TCatSBInputSpec(AFNICommandInputSpec): in_files = traits.List( traits.Tuple(File(exists=True), Str()), - desc='List of tuples of file names and subbrick selectors as strings.' - 'Don\'t forget to protect the single quotes in the subbrick selector' - 'so the contents are protected from the command line interpreter.', - argstr='%s%s ...', + desc="List of tuples of file names and subbrick selectors as strings." + "Don't forget to protect the single quotes in the subbrick selector" + "so the contents are protected from the command line interpreter.", + argstr="%s%s ...", position=-1, mandatory=True, - copyfile=False) - out_file = File( - desc='output image file name', argstr='-prefix %s', genfile=True) + copyfile=False, + ) + out_file = File(desc="output image file name", argstr="-prefix %s", genfile=True) rlt = traits.Enum( - '', - '+', - '++', - argstr='-rlt%s', - desc='Remove linear trends in each voxel time series loaded from each ' - 'input dataset, SEPARATELY. Option -rlt removes the least squares ' - 'fit of \'a+b*t\' to each voxel time series. Option -rlt+ adds ' - 'dataset mean back in. Option -rlt++ adds overall mean of all ' - 'dataset timeseries back in.', - position=1) + "", + "+", + "++", + argstr="-rlt%s", + desc="Remove linear trends in each voxel time series loaded from each " + "input dataset, SEPARATELY. Option -rlt removes the least squares " + "fit of 'a+b*t' to each voxel time series. Option -rlt+ adds " + "dataset mean back in. Option -rlt++ adds overall mean of all " + "dataset timeseries back in.", + position=1, + ) class TCatSubBrick(AFNICommand): @@ -2579,30 +2773,32 @@ class TCatSubBrick(AFNICommand): """ - _cmd = '3dTcat' + _cmd = "3dTcat" input_spec = TCatSBInputSpec output_spec = AFNICommandOutputSpec def _gen_filename(self, name): - if name == 'out_file': - return self._gen_fname(self.inputs.in_files[0][0], suffix='_tcat') + if name == "out_file": + return self._gen_fname(self.inputs.in_files[0][0], suffix="_tcat") class TStatInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dTstat', - argstr='%s', + desc="input file to 3dTstat", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_tstat', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') - mask = File(desc='mask file', argstr='-mask %s', exists=True) - options = Str(desc='selected statistical output', argstr='%s') + name_template="%s_tstat", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) + mask = File(desc="mask file", argstr="-mask %s", exists=True) + options = Str(desc="selected statistical output", argstr="%s") class TStat(AFNICommand): @@ -2625,64 +2821,66 @@ class TStat(AFNICommand): """ - _cmd = '3dTstat' + _cmd = "3dTstat" input_spec = TStatInputSpec output_spec = AFNICommandOutputSpec class To3DInputSpec(AFNICommandInputSpec): out_file = File( - name_template='%s', - desc='output image file name', - argstr='-prefix %s', - name_source=['in_folder']) + name_template="%s", + desc="output image file name", + argstr="-prefix %s", + name_source=["in_folder"], + ) in_folder = Directory( - desc='folder with DICOM images to convert', - argstr='%s/*.dcm', + desc="folder with DICOM images to convert", + argstr="%s/*.dcm", position=-1, mandatory=True, - exists=True) + exists=True, + ) filetype = traits.Enum( - 'spgr', - 'fse', - 'epan', - 'anat', - 'ct', - 'spct', - 'pet', - 'mra', - 'bmap', - 'diff', - 'omri', - 'abuc', - 'fim', - 'fith', - 'fico', - 'fitt', - 'fift', - 'fizt', - 'fict', - 'fibt', - 'fibn', - 'figt', - 'fipt', - 'fbuc', - argstr='-%s', - desc='type of datafile being converted') - skipoutliers = traits.Bool( - desc='skip the outliers check', argstr='-skip_outliers') + "spgr", + "fse", + "epan", + "anat", + "ct", + "spct", + "pet", + "mra", + "bmap", + "diff", + "omri", + "abuc", + "fim", + "fith", + "fico", + "fitt", + "fift", + "fizt", + "fict", + "fibt", + "fibn", + "figt", + "fipt", + "fbuc", + argstr="-%s", + desc="type of datafile being converted", + ) + skipoutliers = traits.Bool(desc="skip the outliers check", argstr="-skip_outliers") assumemosaic = traits.Bool( - desc='assume that Siemens image is mosaic', - argstr='-assume_dicom_mosaic') + desc="assume that Siemens image is mosaic", argstr="-assume_dicom_mosaic" + ) datatype = traits.Enum( - 'short', - 'float', - 'byte', - 'complex', - desc='set output file datatype', - argstr='-datum %s') - funcparams = Str( - desc='parameters for functional data', argstr='-time:zt %s alt+z2') + "short", + "float", + "byte", + "complex", + desc="set output file datatype", + argstr="-datum %s", + ) + funcparams = Str(desc="parameters for functional data", argstr="-time:zt %s alt+z2") class To3D(AFNICommand): @@ -2706,76 +2904,80 @@ class To3D(AFNICommand): """ - _cmd = 'to3d' + _cmd = "to3d" input_spec = To3DInputSpec output_spec = AFNICommandOutputSpec class UndumpInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dUndump, whose geometry will determine' - 'the geometry of the output', - argstr='-master %s', + desc="input file to 3dUndump, whose geometry will determine" + "the geometry of the output", + argstr="-master %s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') + desc="output image file name", argstr="-prefix %s", name_source="in_file" + ) mask_file = File( - desc='mask image file name. Only voxels that are nonzero in the mask ' - 'can be set.', - argstr='-mask %s') + desc="mask image file name. Only voxels that are nonzero in the mask " + "can be set.", + argstr="-mask %s", + ) datatype = traits.Enum( - 'short', - 'float', - 'byte', - desc='set output file datatype', - argstr='-datum %s') + "short", "float", "byte", desc="set output file datatype", argstr="-datum %s" + ) default_value = traits.Float( - desc='default value stored in each input voxel that does not have ' - 'a value supplied in the input file', - argstr='-dval %f') + desc="default value stored in each input voxel that does not have " + "a value supplied in the input file", + argstr="-dval %f", + ) fill_value = traits.Float( - desc='value, used for each voxel in the output dataset that is NOT ' - 'listed in the input file', - argstr='-fval %f') + desc="value, used for each voxel in the output dataset that is NOT " + "listed in the input file", + argstr="-fval %f", + ) coordinates_specification = traits.Enum( - 'ijk', - 'xyz', - desc='Coordinates in the input file as index triples (i, j, k) ' - 'or spatial coordinates (x, y, z) in mm', - argstr='-%s') + "ijk", + "xyz", + desc="Coordinates in the input file as index triples (i, j, k) " + "or spatial coordinates (x, y, z) in mm", + argstr="-%s", + ) srad = traits.Float( - desc='radius in mm of the sphere that will be filled about each input ' - '(x,y,z) or (i,j,k) voxel. If the radius is not given, or is 0, ' - 'then each input data line sets the value in only one voxel.', - argstr='-srad %f') + desc="radius in mm of the sphere that will be filled about each input " + "(x,y,z) or (i,j,k) voxel. If the radius is not given, or is 0, " + "then each input data line sets the value in only one voxel.", + argstr="-srad %f", + ) orient = traits.Tuple( - traits.Enum('R', 'L'), - traits.Enum('A', 'P'), - traits.Enum('I', 'S'), - desc='Specifies the coordinate order used by -xyz. ' - 'The code must be 3 letters, one each from the pairs ' - '{R,L} {A,P} {I,S}. The first letter gives the ' - 'orientation of the x-axis, the second the orientation ' - 'of the y-axis, the third the z-axis: ' - 'R = right-to-left L = left-to-right ' - 'A = anterior-to-posterior P = posterior-to-anterior ' - 'I = inferior-to-superior S = superior-to-inferior ' - 'If -orient isn\'t used, then the coordinate order of the ' - '-master (in_file) dataset is used to interpret (x,y,z) inputs.', - argstr='-orient %s') + traits.Enum("R", "L"), + traits.Enum("A", "P"), + traits.Enum("I", "S"), + desc="Specifies the coordinate order used by -xyz. " + "The code must be 3 letters, one each from the pairs " + "{R,L} {A,P} {I,S}. The first letter gives the " + "orientation of the x-axis, the second the orientation " + "of the y-axis, the third the z-axis: " + "R = right-to-left L = left-to-right " + "A = anterior-to-posterior P = posterior-to-anterior " + "I = inferior-to-superior S = superior-to-inferior " + "If -orient isn't used, then the coordinate order of the " + "-master (in_file) dataset is used to interpret (x,y,z) inputs.", + argstr="-orient %s", + ) head_only = traits.Bool( - desc='create only the .HEAD file which gets exploited by ' - 'the AFNI matlab library function New_HEAD.m', - argstr='-head_only') + desc="create only the .HEAD file which gets exploited by " + "the AFNI matlab library function New_HEAD.m", + argstr="-head_only", + ) class UndumpOutputSpec(TraitedSpec): - out_file = File(desc='assembled file', exists=True) + out_file = File(desc="assembled file", exists=True) class Undump(AFNICommand): @@ -2815,89 +3017,99 @@ class Undump(AFNICommand): """ - _cmd = '3dUndump' + _cmd = "3dUndump" input_spec = UndumpInputSpec output_spec = UndumpOutputSpec class UnifizeInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dUnifize', - argstr='-input %s', + desc="input file to 3dUnifize", + argstr="-input %s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_unifized', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') + name_template="%s_unifized", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) t2 = traits.Bool( - desc='Treat the input as if it were T2-weighted, rather than ' - 'T1-weighted. This processing is done simply by inverting ' - 'the image contrast, processing it as if that result were ' - 'T1-weighted, and then re-inverting the results ' - 'counts of voxel overlap, i.e., each voxel will contain the ' - 'number of masks that it is set in.', - argstr='-T2') + desc="Treat the input as if it were T2-weighted, rather than " + "T1-weighted. This processing is done simply by inverting " + "the image contrast, processing it as if that result were " + "T1-weighted, and then re-inverting the results " + "counts of voxel overlap, i.e., each voxel will contain the " + "number of masks that it is set in.", + argstr="-T2", + ) gm = traits.Bool( - desc='Also scale to unifize \'gray matter\' = lower intensity voxels ' - '(to aid in registering images from different scanners).', - argstr='-GM') + desc="Also scale to unifize 'gray matter' = lower intensity voxels " + "(to aid in registering images from different scanners).", + argstr="-GM", + ) urad = traits.Float( - desc='Sets the radius (in voxels) of the ball used for the sneaky ' - 'trick. Default value is 18.3, and should be changed ' - 'proportionally if the dataset voxel size differs significantly ' - 'from 1 mm.', - argstr='-Urad %s') + desc="Sets the radius (in voxels) of the ball used for the sneaky " + "trick. Default value is 18.3, and should be changed " + "proportionally if the dataset voxel size differs significantly " + "from 1 mm.", + argstr="-Urad %s", + ) scale_file = File( - desc='output file name to save the scale factor used at each voxel ', - argstr='-ssave %s') + desc="output file name to save the scale factor used at each voxel ", + argstr="-ssave %s", + ) no_duplo = traits.Bool( - desc='Do NOT use the \'duplo down\' step; this can be useful for ' - 'lower resolution datasets.', - argstr='-noduplo') + desc="Do NOT use the 'duplo down' step; this can be useful for " + "lower resolution datasets.", + argstr="-noduplo", + ) epi = traits.Bool( - desc='Assume the input dataset is a T2 (or T2*) weighted EPI time ' - 'series. After computing the scaling, apply it to ALL volumes ' - '(TRs) in the input dataset. That is, a given voxel will be ' - 'scaled by the same factor at each TR. ' - 'This option also implies \'-noduplo\' and \'-T2\'.' - 'This option turns off \'-GM\' if you turned it on.', - argstr='-EPI', - requires=['no_duplo', 't2'], - xor=['gm']) + desc="Assume the input dataset is a T2 (or T2*) weighted EPI time " + "series. After computing the scaling, apply it to ALL volumes " + "(TRs) in the input dataset. That is, a given voxel will be " + "scaled by the same factor at each TR. " + "This option also implies '-noduplo' and '-T2'." + "This option turns off '-GM' if you turned it on.", + argstr="-EPI", + requires=["no_duplo", "t2"], + xor=["gm"], + ) rbt = traits.Tuple( traits.Float(), traits.Float(), traits.Float(), - desc='Option for AFNI experts only.' - 'Specify the 3 parameters for the algorithm:\n' - 'R = radius; same as given by option \'-Urad\', [default=18.3]\n' - 'b = bottom percentile of normalizing data range, [default=70.0]\n' - 'r = top percentile of normalizing data range, [default=80.0]\n', - argstr='-rbt %f %f %f') + desc="Option for AFNI experts only." + "Specify the 3 parameters for the algorithm:\n" + "R = radius; same as given by option '-Urad', [default=18.3]\n" + "b = bottom percentile of normalizing data range, [default=70.0]\n" + "r = top percentile of normalizing data range, [default=80.0]\n", + argstr="-rbt %f %f %f", + ) t2_up = traits.Float( - desc='Option for AFNI experts only.' - 'Set the upper percentile point used for T2-T1 inversion. ' - 'Allowed to be anything between 90 and 100 (inclusive), with ' - 'default to 98.5 (for no good reason).', - argstr='-T2up %f') + desc="Option for AFNI experts only." + "Set the upper percentile point used for T2-T1 inversion. " + "Allowed to be anything between 90 and 100 (inclusive), with " + "default to 98.5 (for no good reason).", + argstr="-T2up %f", + ) cl_frac = traits.Float( - desc='Option for AFNI experts only.' - 'Set the automask \'clip level fraction\'. Must be between ' - '0.1 and 0.9. A small fraction means to make the initial ' - 'threshold for clipping (a la 3dClipLevel) smaller, which ' - 'will tend to make the mask larger. [default=0.1]', - argstr='-clfrac %f') - quiet = traits.Bool( - desc='Don\'t print the progress messages.', argstr='-quiet') + desc="Option for AFNI experts only." + "Set the automask 'clip level fraction'. Must be between " + "0.1 and 0.9. A small fraction means to make the initial " + "threshold for clipping (a la 3dClipLevel) smaller, which " + "will tend to make the mask larger. [default=0.1]", + argstr="-clfrac %f", + ) + quiet = traits.Bool(desc="Don't print the progress messages.", argstr="-quiet") class UnifizeOutputSpec(TraitedSpec): - scale_file = File(desc='scale factor file') - out_file = File(desc='unifized file', exists=True) + scale_file = File(desc="scale factor file") + out_file = File(desc="unifized file", exists=True) class Unifize(AFNICommand): @@ -2944,25 +3156,27 @@ class Unifize(AFNICommand): """ - _cmd = '3dUnifize' + _cmd = "3dUnifize" input_spec = UnifizeInputSpec output_spec = UnifizeOutputSpec class ZCutUpInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3dZcutup', - argstr='%s', + desc="input file to 3dZcutup", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_zcutup', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') - keep = Str(desc='slice range to keep in output', argstr='-keep %s') + name_template="%s_zcutup", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) + keep = Str(desc="slice range to keep in output", argstr="-keep %s") class ZCutUp(AFNICommand): @@ -2985,36 +3199,38 @@ class ZCutUp(AFNICommand): """ - _cmd = '3dZcutup' + _cmd = "3dZcutup" input_spec = ZCutUpInputSpec output_spec = AFNICommandOutputSpec class GCORInputSpec(CommandLineInputSpec): in_file = File( - desc='input dataset to compute the GCOR over', - argstr='-input %s', + desc="input dataset to compute the GCOR over", + argstr="-input %s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) mask = File( - desc='mask dataset, for restricting the computation', - argstr='-mask %s', + desc="mask dataset, for restricting the computation", + argstr="-mask %s", exists=True, - copyfile=False) + copyfile=False, + ) nfirst = traits.Int( - 0, argstr='-nfirst %d', desc='specify number of initial TRs to ignore') + 0, argstr="-nfirst %d", desc="specify number of initial TRs to ignore" + ) no_demean = traits.Bool( - False, - argstr='-no_demean', - desc='do not (need to) demean as first step') + False, argstr="-no_demean", desc="do not (need to) demean as first step" + ) class GCOROutputSpec(TraitedSpec): - out = traits.Float(desc='global correlation value') + out = traits.Float(desc="global correlation value") class GCOR(CommandLine): @@ -3039,7 +3255,7 @@ class GCOR(CommandLine): """ - _cmd = '@compute_gcor' + _cmd = "@compute_gcor" input_spec = GCORInputSpec output_spec = GCOROutputSpec @@ -3047,48 +3263,54 @@ def _run_interface(self, runtime): runtime = super(GCOR, self)._run_interface(runtime) gcor_line = [ - line.strip() for line in runtime.stdout.split('\n') - if line.strip().startswith('GCOR = ') + line.strip() + for line in runtime.stdout.split("\n") + if line.strip().startswith("GCOR = ") ][-1] - setattr(self, '_gcor', float(gcor_line[len('GCOR = '):])) + setattr(self, "_gcor", float(gcor_line[len("GCOR = ") :])) return runtime def _list_outputs(self): - return {'out': getattr(self, '_gcor')} + return {"out": getattr(self, "_gcor")} class AxializeInputSpec(AFNICommandInputSpec): in_file = File( - desc='input file to 3daxialize', - argstr='%s', + desc="input file to 3daxialize", + argstr="%s", position=-2, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_axialize', - desc='output image file name', - argstr='-prefix %s', - name_source='in_file') - verb = traits.Bool(desc='Print out a progerss report', argstr='-verb') + name_template="%s_axialize", + desc="output image file name", + argstr="-prefix %s", + name_source="in_file", + ) + verb = traits.Bool(desc="Print out a progerss report", argstr="-verb") sagittal = traits.Bool( - desc='Do sagittal slice order [-orient ASL]', - argstr='-sagittal', - xor=['coronal', 'axial']) + desc="Do sagittal slice order [-orient ASL]", + argstr="-sagittal", + xor=["coronal", "axial"], + ) coronal = traits.Bool( - desc='Do coronal slice order [-orient RSA]', - argstr='-coronal', - xor=['sagittal', 'axial']) + desc="Do coronal slice order [-orient RSA]", + argstr="-coronal", + xor=["sagittal", "axial"], + ) axial = traits.Bool( - desc='Do axial slice order [-orient RAI]' - 'This is the default AFNI axial order, and' - 'is the one currently required by the' - 'volume rendering plugin; this is also' - 'the default orientation output by this' + desc="Do axial slice order [-orient RAI]" + "This is the default AFNI axial order, and" + "is the one currently required by the" + "volume rendering plugin; this is also" + "the default orientation output by this" "program (hence the program's name).", - argstr='-axial', - xor=['coronal', 'sagittal']) - orientation = Str(desc='new orientation code', argstr='-orient %s') + argstr="-axial", + xor=["coronal", "sagittal"], + ) + orientation = Str(desc="new orientation code", argstr="-orient %s") class Axialize(AFNICommand): @@ -3110,48 +3332,53 @@ class Axialize(AFNICommand): """ - _cmd = '3daxialize' + _cmd = "3daxialize" input_spec = AxializeInputSpec output_spec = AFNICommandOutputSpec class ZcatInputSpec(AFNICommandInputSpec): in_files = InputMultiPath( - File(desc='input files to 3dZcat', exists=True), - argstr='%s', + File(desc="input files to 3dZcat", exists=True), + argstr="%s", position=-1, mandatory=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='%s_zcat', - desc='output dataset prefix name (default \'zcat\')', - argstr='-prefix %s', - name_source='in_files') + name_template="%s_zcat", + desc="output dataset prefix name (default 'zcat')", + argstr="-prefix %s", + name_source="in_files", + ) datum = traits.Enum( - 'byte', - 'short', - 'float', - argstr='-datum %s', - desc='specify data type for output. Valid types are \'byte\', ' - '\'short\' and \'float\'.') + "byte", + "short", + "float", + argstr="-datum %s", + desc="specify data type for output. Valid types are 'byte', " + "'short' and 'float'.", + ) verb = traits.Bool( - desc='print out some verbositiness as the program proceeds.', - argstr='-verb') + desc="print out some verbositiness as the program proceeds.", argstr="-verb" + ) fscale = traits.Bool( - desc='Force scaling of the output to the maximum integer ' - 'range. This only has effect if the output datum is ' - 'byte or short (either forced or defaulted). This ' - 'option is sometimes necessary to eliminate ' - 'unpleasant truncation artifacts.', - argstr='-fscale', - xor=['nscale']) + desc="Force scaling of the output to the maximum integer " + "range. This only has effect if the output datum is " + "byte or short (either forced or defaulted). This " + "option is sometimes necessary to eliminate " + "unpleasant truncation artifacts.", + argstr="-fscale", + xor=["nscale"], + ) nscale = traits.Bool( - desc='Don\'t do any scaling on output to byte or short ' - 'datasets. This may be especially useful when ' - 'operating on mask datasets whose output values ' - 'are only 0\'s and 1\'s.', - argstr='-nscale', - xor=['fscale']) + desc="Don't do any scaling on output to byte or short " + "datasets. This may be especially useful when " + "operating on mask datasets whose output values " + "are only 0's and 1's.", + argstr="-nscale", + xor=["fscale"], + ) class Zcat(AFNICommand): @@ -3173,88 +3400,98 @@ class Zcat(AFNICommand): >>> res = zcat.run() # doctest: +SKIP """ - _cmd = '3dZcat' + _cmd = "3dZcat" input_spec = ZcatInputSpec output_spec = AFNICommandOutputSpec class ZeropadInputSpec(AFNICommandInputSpec): in_files = File( - desc='input dataset', - argstr='%s', + desc="input dataset", + argstr="%s", position=-1, mandatory=True, exists=True, - copyfile=False) + copyfile=False, + ) out_file = File( - name_template='zeropad', - desc='output dataset prefix name (default \'zeropad\')', - argstr='-prefix %s') + name_template="zeropad", + desc="output dataset prefix name (default 'zeropad')", + argstr="-prefix %s", + ) I = traits.Int( - desc='adds \'n\' planes of zero at the Inferior edge', - argstr='-I %i', - xor=['master']) + desc="adds 'n' planes of zero at the Inferior edge", + argstr="-I %i", + xor=["master"], + ) S = traits.Int( - desc='adds \'n\' planes of zero at the Superior edge', - argstr='-S %i', - xor=['master']) + desc="adds 'n' planes of zero at the Superior edge", + argstr="-S %i", + xor=["master"], + ) A = traits.Int( - desc='adds \'n\' planes of zero at the Anterior edge', - argstr='-A %i', - xor=['master']) + desc="adds 'n' planes of zero at the Anterior edge", + argstr="-A %i", + xor=["master"], + ) P = traits.Int( - desc='adds \'n\' planes of zero at the Posterior edge', - argstr='-P %i', - xor=['master']) + desc="adds 'n' planes of zero at the Posterior edge", + argstr="-P %i", + xor=["master"], + ) L = traits.Int( - desc='adds \'n\' planes of zero at the Left edge', - argstr='-L %i', - xor=['master']) + desc="adds 'n' planes of zero at the Left edge", argstr="-L %i", xor=["master"] + ) R = traits.Int( - desc='adds \'n\' planes of zero at the Right edge', - argstr='-R %i', - xor=['master']) + desc="adds 'n' planes of zero at the Right edge", argstr="-R %i", xor=["master"] + ) z = traits.Int( - desc='adds \'n\' planes of zero on EACH of the ' - 'dataset z-axis (slice-direction) faces', - argstr='-z %i', - xor=['master']) + desc="adds 'n' planes of zero on EACH of the " + "dataset z-axis (slice-direction) faces", + argstr="-z %i", + xor=["master"], + ) RL = traits.Int( - desc='specify that planes should be added or cut ' - 'symmetrically to make the resulting volume have' - 'N slices in the right-left direction', - argstr='-RL %i', - xor=['master']) + desc="specify that planes should be added or cut " + "symmetrically to make the resulting volume have" + "N slices in the right-left direction", + argstr="-RL %i", + xor=["master"], + ) AP = traits.Int( - desc='specify that planes should be added or cut ' - 'symmetrically to make the resulting volume have' - 'N slices in the anterior-posterior direction', - argstr='-AP %i', - xor=['master']) + desc="specify that planes should be added or cut " + "symmetrically to make the resulting volume have" + "N slices in the anterior-posterior direction", + argstr="-AP %i", + xor=["master"], + ) IS = traits.Int( - desc='specify that planes should be added or cut ' - 'symmetrically to make the resulting volume have' - 'N slices in the inferior-superior direction', - argstr='-IS %i', - xor=['master']) + desc="specify that planes should be added or cut " + "symmetrically to make the resulting volume have" + "N slices in the inferior-superior direction", + argstr="-IS %i", + xor=["master"], + ) mm = traits.Bool( - desc='pad counts \'n\' are in mm instead of slices, ' - 'where each \'n\' is an integer and at least \'n\' ' - 'mm of slices will be added/removed; e.g., n = 3 ' - 'and slice thickness = 2.5 mm ==> 2 slices added', - argstr='-mm', - xor=['master']) + desc="pad counts 'n' are in mm instead of slices, " + "where each 'n' is an integer and at least 'n' " + "mm of slices will be added/removed; e.g., n = 3 " + "and slice thickness = 2.5 mm ==> 2 slices added", + argstr="-mm", + xor=["master"], + ) master = File( - desc='match the volume described in dataset ' - '\'mset\', where mset must have the same ' - 'orientation and grid spacing as dataset to be ' - 'padded. the goal of -master is to make the ' - 'output dataset from 3dZeropad match the ' - 'spatial \'extents\' of mset by adding or ' - 'subtracting slices as needed. You can\'t use ' - '-I,-S,..., or -mm with -master', - argstr='-master %s', - xor=['I', 'S', 'A', 'P', 'L', 'R', 'z', 'RL', 'AP', 'IS', 'mm']) + desc="match the volume described in dataset " + "'mset', where mset must have the same " + "orientation and grid spacing as dataset to be " + "padded. the goal of -master is to make the " + "output dataset from 3dZeropad match the " + "spatial 'extents' of mset by adding or " + "subtracting slices as needed. You can't use " + "-I,-S,..., or -mm with -master", + argstr="-master %s", + xor=["I", "S", "A", "P", "L", "R", "z", "RL", "AP", "IS", "mm"], + ) class Zeropad(AFNICommand): @@ -3281,6 +3518,6 @@ class Zeropad(AFNICommand): >>> res = zeropad.run() # doctest: +SKIP """ - _cmd = '3dZeropad' + _cmd = "3dZeropad" input_spec = ZeropadInputSpec output_spec = AFNICommandOutputSpec diff --git a/nipype/interfaces/ants/__init__.py b/nipype/interfaces/ants/__init__.py index fa441944a2..389a5f1371 100644 --- a/nipype/interfaces/ants/__init__.py +++ b/nipype/interfaces/ants/__init__.py @@ -4,23 +4,44 @@ """Top-level namespace for ants.""" # Registraiton programs -from .registration import (ANTS, Registration, RegistrationSynQuick, - CompositeTransformUtil, MeasureImageSimilarity) +from .registration import ( + ANTS, + Registration, + RegistrationSynQuick, + CompositeTransformUtil, + MeasureImageSimilarity, +) # Resampling Programs -from .resampling import (ApplyTransforms, ApplyTransformsToPoints, - WarpImageMultiTransform, - WarpTimeSeriesImageMultiTransform) +from .resampling import ( + ApplyTransforms, + ApplyTransformsToPoints, + WarpImageMultiTransform, + WarpTimeSeriesImageMultiTransform, +) # Segmentation Programs -from .segmentation import (Atropos, LaplacianThickness, N4BiasFieldCorrection, - JointFusion, CorticalThickness, BrainExtraction, - DenoiseImage, AntsJointFusion) +from .segmentation import ( + Atropos, + LaplacianThickness, + N4BiasFieldCorrection, + JointFusion, + CorticalThickness, + BrainExtraction, + DenoiseImage, + AntsJointFusion, +) # Visualization Programs from .visualization import ConvertScalarImageToRGB, CreateTiledMosaic # Utility Programs -from .utils import (AverageAffineTransform, AverageImages, MultiplyImages, - CreateJacobianDeterminantImage, AffineInitializer, - ComposeMultiTransform, LabelGeometry) +from .utils import ( + AverageAffineTransform, + AverageImages, + MultiplyImages, + CreateJacobianDeterminantImage, + AffineInitializer, + ComposeMultiTransform, + LabelGeometry, +) diff --git a/nipype/interfaces/ants/base.py b/nipype/interfaces/ants/base.py index e27cfc0920..4b5e5ef8db 100644 --- a/nipype/interfaces/ants/base.py +++ b/nipype/interfaces/ants/base.py @@ -6,9 +6,9 @@ # Local imports from ... import logging, LooseVersion -from ..base import (CommandLine, CommandLineInputSpec, traits, isdefined, - PackageInfo) -iflogger = logging.getLogger('nipype.interface') +from ..base import CommandLine, CommandLineInputSpec, traits, isdefined, PackageInfo + +iflogger = logging.getLogger("nipype.interface") # -Using -1 gives primary responsibilty to ITKv4 to do the correct # thread limitings. @@ -22,32 +22,34 @@ # ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS takes precidence. # This behavior states that you the user explicitly specifies # num_threads, then respect that no matter what SGE tries to limit. -PREFERED_ITKv4_THREAD_LIMIT_VARIABLE = 'NSLOTS' -ALT_ITKv4_THREAD_LIMIT_VARIABLE = 'ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS' +PREFERED_ITKv4_THREAD_LIMIT_VARIABLE = "NSLOTS" +ALT_ITKv4_THREAD_LIMIT_VARIABLE = "ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS" class Info(PackageInfo): - version_cmd = os.path.join(os.getenv('ANTSPATH', ''), - 'antsRegistration') + ' --version' + version_cmd = ( + os.path.join(os.getenv("ANTSPATH", ""), "antsRegistration") + " --version" + ) @staticmethod def parse_version(raw_info): for line in raw_info.splitlines(): - if line.startswith('ANTs Version: '): + if line.startswith("ANTs Version: "): v_string = line.split()[2] break else: return None # -githash may or may not be appended - v_string = v_string.split('-')[0] + v_string = v_string.split("-")[0] # 2.2.0-equivalent version string - if 'post' in v_string and \ - LooseVersion(v_string) >= LooseVersion('2.1.0.post789'): - return '2.2.0' + if "post" in v_string and LooseVersion(v_string) >= LooseVersion( + "2.1.0.post789" + ): + return "2.2.0" else: - return '.'.join(v_string.split('.')[:3]) + return ".".join(v_string.split(".")[:3]) class ANTSCommandInputSpec(CommandLineInputSpec): @@ -58,7 +60,8 @@ class ANTSCommandInputSpec(CommandLineInputSpec): LOCAL_DEFAULT_NUMBER_OF_THREADS, usedefault=True, nohash=True, - desc="Number of ITK threads to use") + desc="Number of ITK threads to use", + ) class ANTSCommand(CommandLine): @@ -70,7 +73,7 @@ class ANTSCommand(CommandLine): def __init__(self, **inputs): super(ANTSCommand, self).__init__(**inputs) - self.inputs.on_trait_change(self._num_threads_update, 'num_threads') + self.inputs.on_trait_change(self._num_threads_update, "num_threads") if not isdefined(self.inputs.num_threads): self.inputs.num_threads = self._num_threads @@ -88,22 +91,21 @@ def _num_threads_update(self): # default behavior should be the one specified by ITKv4 rules # (i.e. respect SGE $NSLOTS or environmental variables of threads, or # user environmental settings) - if (self.inputs.num_threads == -1): - if (ALT_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ): + if self.inputs.num_threads == -1: + if ALT_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ: del self.inputs.environ[ALT_ITKv4_THREAD_LIMIT_VARIABLE] - if (PREFERED_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ): + if PREFERED_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ: del self.inputs.environ[PREFERED_ITKv4_THREAD_LIMIT_VARIABLE] else: - self.inputs.environ.update({ - PREFERED_ITKv4_THREAD_LIMIT_VARIABLE: - '%s' % self.inputs.num_threads - }) + self.inputs.environ.update( + {PREFERED_ITKv4_THREAD_LIMIT_VARIABLE: "%s" % self.inputs.num_threads} + ) @staticmethod def _format_xarray(val): """ Convenience method for converting input arrays [1,2,3] to commandline format '1x2x3' """ - return 'x'.join([str(x) for x in val]) + return "x".join([str(x) for x in val]) @classmethod def set_default_num_threads(cls, num_threads): diff --git a/nipype/interfaces/ants/registration.py b/nipype/interfaces/ants/registration.py index 2b85ea5923..6aee26655e 100644 --- a/nipype/interfaces/ants/registration.py +++ b/nipype/interfaces/ants/registration.py @@ -11,23 +11,23 @@ class ANTSInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( - 3, - 2, - argstr='%d', - position=1, - desc='image dimension (2 or 3)') + 3, 2, argstr="%d", position=1, desc="image dimension (2 or 3)" + ) fixed_image = InputMultiPath( File(exists=True), mandatory=True, - desc=('image to which the moving image is ' - 'warped')) + desc=("image to which the moving image is " "warped"), + ) moving_image = InputMultiPath( File(exists=True), - argstr='%s', + argstr="%s", mandatory=True, - desc=('image to apply transformation to ' - '(generally a coregistered' - 'functional)')) + desc=( + "image to apply transformation to " + "(generally a coregistered" + "functional)" + ), + ) # Not all metrics are appropriate for all modalities. Also, not all metrics # are efficeint or appropriate at all resolution levels, Some metrics @@ -50,75 +50,80 @@ class ANTSInputSpec(ANTSCommandInputSpec): # # Cost = Sum_i ( metricweight[i] Metric_i ( fixedimage[i], movingimage[i]) ) metric = traits.List( - traits.Enum('CC', 'MI', 'SMI', 'PR', 'SSD', 'MSQ', 'PSE'), + traits.Enum("CC", "MI", "SMI", "PR", "SSD", "MSQ", "PSE"), mandatory=True, - desc='') + desc="", + ) metric_weight = traits.List( traits.Float(), value=[1.0], usedefault=True, - requires=['metric'], + requires=["metric"], mandatory=True, - desc='the metric weight(s) for each stage. ' - 'The weights must sum to 1 per stage.') + desc="the metric weight(s) for each stage. " + "The weights must sum to 1 per stage.", + ) radius = traits.List( traits.Int(), - requires=['metric'], + requires=["metric"], mandatory=True, - desc='radius of the region (i.e. number of layers around a voxel/pixel)' - ' that is used for computing cross correlation') + desc="radius of the region (i.e. number of layers around a voxel/pixel)" + " that is used for computing cross correlation", + ) output_transform_prefix = Str( - 'out', - usedefault=True, - argstr='--output-naming %s', - mandatory=True, - desc='') + "out", usedefault=True, argstr="--output-naming %s", mandatory=True, desc="" + ) transformation_model = traits.Enum( - 'Diff', - 'Elast', - 'Exp', - 'Greedy Exp', - 'SyN', - argstr='%s', + "Diff", + "Elast", + "Exp", + "Greedy Exp", + "SyN", + argstr="%s", mandatory=True, - desc='') - gradient_step_length = traits.Float( - requires=['transformation_model'], desc='') - number_of_time_steps = traits.Float( - requires=['gradient_step_length'], desc='') - delta_time = traits.Float(requires=['number_of_time_steps'], desc='') - symmetry_type = traits.Float(requires=['delta_time'], desc='') + desc="", + ) + gradient_step_length = traits.Float(requires=["transformation_model"], desc="") + number_of_time_steps = traits.Float(requires=["gradient_step_length"], desc="") + delta_time = traits.Float(requires=["number_of_time_steps"], desc="") + symmetry_type = traits.Float(requires=["delta_time"], desc="") use_histogram_matching = traits.Bool( - argstr='%s', default_value=True, usedefault=True) + argstr="%s", default_value=True, usedefault=True + ) number_of_iterations = traits.List( - traits.Int(), argstr='--number-of-iterations %s', sep='x') + traits.Int(), argstr="--number-of-iterations %s", sep="x" + ) smoothing_sigmas = traits.List( - traits.Int(), argstr='--gaussian-smoothing-sigmas %s', sep='x') + traits.Int(), argstr="--gaussian-smoothing-sigmas %s", sep="x" + ) subsampling_factors = traits.List( - traits.Int(), argstr='--subsampling-factors %s', sep='x') - affine_gradient_descent_option = traits.List(traits.Float(), argstr='%s') + traits.Int(), argstr="--subsampling-factors %s", sep="x" + ) + affine_gradient_descent_option = traits.List(traits.Float(), argstr="%s") - mi_option = traits.List(traits.Int(), argstr='--MI-option %s', sep='x') - regularization = traits.Enum('Gauss', 'DMFFD', argstr='%s', desc='') + mi_option = traits.List(traits.Int(), argstr="--MI-option %s", sep="x") + regularization = traits.Enum("Gauss", "DMFFD", argstr="%s", desc="") regularization_gradient_field_sigma = traits.Float( - requires=['regularization'], desc='') + requires=["regularization"], desc="" + ) regularization_deformation_field_sigma = traits.Float( - requires=['regularization'], desc='') + requires=["regularization"], desc="" + ) number_of_affine_iterations = traits.List( - traits.Int(), argstr='--number-of-affine-iterations %s', sep='x') + traits.Int(), argstr="--number-of-affine-iterations %s", sep="x" + ) class ANTSOutputSpec(TraitedSpec): - affine_transform = File(exists=True, desc='Affine transform file') - warp_transform = File(exists=True, desc='Warping deformation field') - inverse_warp_transform = File( - exists=True, desc='Inverse warping deformation field') - metaheader = File(exists=True, desc='VTK metaheader .mhd file') - metaheader_raw = File(exists=True, desc='VTK metaheader .raw file') + affine_transform = File(exists=True, desc="Affine transform file") + warp_transform = File(exists=True, desc="Warping deformation field") + inverse_warp_transform = File(exists=True, desc="Inverse warping deformation field") + metaheader = File(exists=True, desc="VTK metaheader .mhd file") + metaheader_raw = File(exists=True, desc="VTK metaheader .raw file") class ANTS(ANTSCommand): @@ -151,26 +156,32 @@ class ANTS(ANTSCommand): 10000x10000x10000x10000x10000 --number-of-iterations 50x35x15 --output-naming MY --regularization Gauss[3.0,0.0] \ --transformation-model SyN[0.25] --use-Histogram-Matching 1' """ - _cmd = 'ANTS' + + _cmd = "ANTS" input_spec = ANTSInputSpec output_spec = ANTSOutputSpec def _image_metric_constructor(self): retval = [] - intensity_based = ['CC', 'MI', 'SMI', 'PR', 'SSD', 'MSQ'] - point_set_based = ['PSE', 'JTB'] + intensity_based = ["CC", "MI", "SMI", "PR", "SSD", "MSQ"] + point_set_based = ["PSE", "JTB"] for ii in range(len(self.inputs.moving_image)): if self.inputs.metric[ii] in intensity_based: retval.append( - '--image-metric %s[ %s, %s, %g, %d ]' % - (self.inputs.metric[ii], self.inputs.fixed_image[ii], - self.inputs.moving_image[ii], - self.inputs.metric_weight[ii], self.inputs.radius[ii])) + "--image-metric %s[ %s, %s, %g, %d ]" + % ( + self.inputs.metric[ii], + self.inputs.fixed_image[ii], + self.inputs.moving_image[ii], + self.inputs.metric_weight[ii], + self.inputs.radius[ii], + ) + ) elif self.inputs.metric[ii] == point_set_based: pass # retval.append('--image-metric %s[%s, %s, ...'.format(self.inputs.metric[ii], # self.inputs.fixed_image[ii], self.inputs.moving_image[ii], ...)) - return ' '.join(retval) + return " ".join(retval) def _transformation_constructor(self): model = self.inputs.transformation_model @@ -178,62 +189,67 @@ def _transformation_constructor(self): time_step = self.inputs.number_of_time_steps delta_time = self.inputs.delta_time symmetry_type = self.inputs.symmetry_type - retval = ['--transformation-model %s' % model] + retval = ["--transformation-model %s" % model] parameters = [] for elem in (step_length, time_step, delta_time, symmetry_type): if elem is not traits.Undefined: - parameters.append('%#.2g' % elem) + parameters.append("%#.2g" % elem) if len(parameters) > 0: if len(parameters) > 1: - parameters = ','.join(parameters) + parameters = ",".join(parameters) else: - parameters = ''.join(parameters) - retval.append('[%s]' % parameters) - return ''.join(retval) + parameters = "".join(parameters) + retval.append("[%s]" % parameters) + return "".join(retval) def _regularization_constructor(self): - return '--regularization {0}[{1},{2}]'.format( + return "--regularization {0}[{1},{2}]".format( self.inputs.regularization, self.inputs.regularization_gradient_field_sigma, - self.inputs.regularization_deformation_field_sigma) + self.inputs.regularization_deformation_field_sigma, + ) def _affine_gradient_descent_option_constructor(self): values = self.inputs.affine_gradient_descent_option - defaults = [0.1, 0.5, 1.e-4, 1.e-4] + defaults = [0.1, 0.5, 1.0e-4, 1.0e-4] for ii in range(len(defaults)): try: defaults[ii] = values[ii] except IndexError: break parameters = self._format_xarray( - [('%g' % defaults[index]) for index in range(4)]) - retval = ['--affine-gradient-descent-option', parameters] - return ' '.join(retval) + [("%g" % defaults[index]) for index in range(4)] + ) + retval = ["--affine-gradient-descent-option", parameters] + return " ".join(retval) def _format_arg(self, opt, spec, val): - if opt == 'moving_image': + if opt == "moving_image": return self._image_metric_constructor() - elif opt == 'transformation_model': + elif opt == "transformation_model": return self._transformation_constructor() - elif opt == 'regularization': + elif opt == "regularization": return self._regularization_constructor() - elif opt == 'affine_gradient_descent_option': + elif opt == "affine_gradient_descent_option": return self._affine_gradient_descent_option_constructor() - elif opt == 'use_histogram_matching': + elif opt == "use_histogram_matching": if self.inputs.use_histogram_matching: - return '--use-Histogram-Matching 1' + return "--use-Histogram-Matching 1" else: - return '--use-Histogram-Matching 0' + return "--use-Histogram-Matching 0" return super(ANTS, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() - outputs['affine_transform'] = os.path.abspath( - self.inputs.output_transform_prefix + 'Affine.txt') - outputs['warp_transform'] = os.path.abspath( - self.inputs.output_transform_prefix + 'Warp.nii.gz') - outputs['inverse_warp_transform'] = os.path.abspath( - self.inputs.output_transform_prefix + 'InverseWarp.nii.gz') + outputs["affine_transform"] = os.path.abspath( + self.inputs.output_transform_prefix + "Affine.txt" + ) + outputs["warp_transform"] = os.path.abspath( + self.inputs.output_transform_prefix + "Warp.nii.gz" + ) + outputs["inverse_warp_transform"] = os.path.abspath( + self.inputs.output_transform_prefix + "InverseWarp.nii.gz" + ) # outputs['metaheader'] = os.path.abspath(self.inputs.output_transform_prefix + 'velocity.mhd') # outputs['metaheader_raw'] = os.path.abspath(self.inputs.output_transform_prefix + 'velocity.raw') return outputs @@ -243,198 +259,231 @@ class RegistrationInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( 3, 2, - argstr='--dimensionality %d', + argstr="--dimensionality %d", usedefault=True, - desc='image dimension (2 or 3)') + desc="image dimension (2 or 3)", + ) fixed_image = InputMultiPath( File(exists=True), mandatory=True, - desc='Image to which the moving_image should be transformed' - '(usually a structural image)') + desc="Image to which the moving_image should be transformed" + "(usually a structural image)", + ) fixed_image_mask = File( exists=True, - argstr='%s', - max_ver='2.1.0', - xor=['fixed_image_masks'], - desc='Mask used to limit metric sampling region of the fixed image' - 'in all stages') + argstr="%s", + max_ver="2.1.0", + xor=["fixed_image_masks"], + desc="Mask used to limit metric sampling region of the fixed image" + "in all stages", + ) fixed_image_masks = InputMultiPath( - traits.Either('NULL', File(exists=True)), - min_ver='2.2.0', - xor=['fixed_image_mask'], - desc= - 'Masks used to limit metric sampling region of the fixed image, defined per registration stage' - '(Use "NULL" to omit a mask at a given stage)') + traits.Either("NULL", File(exists=True)), + min_ver="2.2.0", + xor=["fixed_image_mask"], + desc="Masks used to limit metric sampling region of the fixed image, defined per registration stage" + '(Use "NULL" to omit a mask at a given stage)', + ) moving_image = InputMultiPath( File(exists=True), mandatory=True, - desc= - 'Image that will be registered to the space of fixed_image. This is the' - 'image on which the transformations will be applied to') + desc="Image that will be registered to the space of fixed_image. This is the" + "image on which the transformations will be applied to", + ) moving_image_mask = File( exists=True, - requires=['fixed_image_mask'], - max_ver='2.1.0', - xor=['moving_image_masks'], - desc='mask used to limit metric sampling region of the moving image' - 'in all stages') + requires=["fixed_image_mask"], + max_ver="2.1.0", + xor=["moving_image_masks"], + desc="mask used to limit metric sampling region of the moving image" + "in all stages", + ) moving_image_masks = InputMultiPath( - traits.Either('NULL', File(exists=True)), - min_ver='2.2.0', - xor=['moving_image_mask'], - desc= - 'Masks used to limit metric sampling region of the moving image, defined per registration stage' - '(Use "NULL" to omit a mask at a given stage)') + traits.Either("NULL", File(exists=True)), + min_ver="2.2.0", + xor=["moving_image_mask"], + desc="Masks used to limit metric sampling region of the moving image, defined per registration stage" + '(Use "NULL" to omit a mask at a given stage)', + ) save_state = File( - argstr='--save-state %s', + argstr="--save-state %s", exists=False, - desc= - 'Filename for saving the internal restorable state of the registration' + desc="Filename for saving the internal restorable state of the registration", ) restore_state = File( - argstr='--restore-state %s', + argstr="--restore-state %s", exists=True, - desc= - 'Filename for restoring the internal restorable state of the registration' + desc="Filename for restoring the internal restorable state of the registration", ) initial_moving_transform = InputMultiPath( File(exists=True), - argstr='%s', - desc='A transform or a list of transforms that should be applied ' - 'before the registration begins. Note that, when a list is given, ' - 'the transformations are applied in reverse order.', - xor=['initial_moving_transform_com']) + argstr="%s", + desc="A transform or a list of transforms that should be applied " + "before the registration begins. Note that, when a list is given, " + "the transformations are applied in reverse order.", + xor=["initial_moving_transform_com"], + ) invert_initial_moving_transform = InputMultiPath( traits.Bool(), requires=["initial_moving_transform"], - desc='One boolean or a list of booleans that indicate' - 'whether the inverse(s) of the transform(s) defined' - 'in initial_moving_transform should be used.', - xor=['initial_moving_transform_com']) + desc="One boolean or a list of booleans that indicate" + "whether the inverse(s) of the transform(s) defined" + "in initial_moving_transform should be used.", + xor=["initial_moving_transform_com"], + ) initial_moving_transform_com = traits.Enum( 0, 1, 2, - argstr='%s', - xor=['initial_moving_transform'], + argstr="%s", + xor=["initial_moving_transform"], desc="Align the moving_image and fixed_image before registration using " "the geometric center of the images (=0), the image intensities (=1), " - "or the origin of the images (=2).") - metric_item_trait = traits.Enum("CC", "MeanSquares", "Demons", "GC", "MI", - "Mattes") - metric_stage_trait = traits.Either(metric_item_trait, - traits.List(metric_item_trait)) + "or the origin of the images (=2).", + ) + metric_item_trait = traits.Enum("CC", "MeanSquares", "Demons", "GC", "MI", "Mattes") + metric_stage_trait = traits.Either( + metric_item_trait, traits.List(metric_item_trait) + ) metric = traits.List( metric_stage_trait, mandatory=True, - desc='the metric(s) to use for each stage. ' - 'Note that multiple metrics per stage are not supported ' - 'in ANTS 1.9.1 and earlier.') + desc="the metric(s) to use for each stage. " + "Note that multiple metrics per stage are not supported " + "in ANTS 1.9.1 and earlier.", + ) metric_weight_item_trait = traits.Float(1.0, usedefault=True) metric_weight_stage_trait = traits.Either( - metric_weight_item_trait, traits.List(metric_weight_item_trait)) + metric_weight_item_trait, traits.List(metric_weight_item_trait) + ) metric_weight = traits.List( metric_weight_stage_trait, value=[1.0], usedefault=True, - requires=['metric'], + requires=["metric"], mandatory=True, - desc='the metric weight(s) for each stage. ' - 'The weights must sum to 1 per stage.') + desc="the metric weight(s) for each stage. " + "The weights must sum to 1 per stage.", + ) radius_bins_item_trait = traits.Int(5, usedefault=True) radius_bins_stage_trait = traits.Either( - radius_bins_item_trait, traits.List(radius_bins_item_trait)) + radius_bins_item_trait, traits.List(radius_bins_item_trait) + ) radius_or_number_of_bins = traits.List( radius_bins_stage_trait, value=[5], usedefault=True, - requires=['metric_weight'], - desc='the number of bins in each stage for the MI and Mattes metric, ' - 'the radius for other metrics') - sampling_strategy_item_trait = traits.Enum("None", "Regular", "Random", - None) + requires=["metric_weight"], + desc="the number of bins in each stage for the MI and Mattes metric, " + "the radius for other metrics", + ) + sampling_strategy_item_trait = traits.Enum("None", "Regular", "Random", None) sampling_strategy_stage_trait = traits.Either( - sampling_strategy_item_trait, - traits.List(sampling_strategy_item_trait)) + sampling_strategy_item_trait, traits.List(sampling_strategy_item_trait) + ) sampling_strategy = traits.List( trait=sampling_strategy_stage_trait, - requires=['metric_weight'], - desc='the metric sampling strategy (strategies) for each stage') + requires=["metric_weight"], + desc="the metric sampling strategy (strategies) for each stage", + ) sampling_percentage_item_trait = traits.Either( - traits.Range(low=0.0, high=1.0), None) + traits.Range(low=0.0, high=1.0), None + ) sampling_percentage_stage_trait = traits.Either( - sampling_percentage_item_trait, - traits.List(sampling_percentage_item_trait)) + sampling_percentage_item_trait, traits.List(sampling_percentage_item_trait) + ) sampling_percentage = traits.List( trait=sampling_percentage_stage_trait, - requires=['sampling_strategy'], - desc="the metric sampling percentage(s) to use for each stage") - use_estimate_learning_rate_once = traits.List(traits.Bool(), desc='') + requires=["sampling_strategy"], + desc="the metric sampling percentage(s) to use for each stage", + ) + use_estimate_learning_rate_once = traits.List(traits.Bool(), desc="") use_histogram_matching = traits.Either( traits.Bool, - traits.List(traits.Bool(argstr='%s')), + traits.List(traits.Bool(argstr="%s")), default=True, usedefault=True, - desc='Histogram match the images before registration.') + desc="Histogram match the images before registration.", + ) interpolation = traits.Enum( - 'Linear', - 'NearestNeighbor', - 'CosineWindowedSinc', - 'WelchWindowedSinc', - 'HammingWindowedSinc', - 'LanczosWindowedSinc', - 'BSpline', - 'MultiLabel', - 'Gaussian', - argstr='%s', - usedefault=True) + "Linear", + "NearestNeighbor", + "CosineWindowedSinc", + "WelchWindowedSinc", + "HammingWindowedSinc", + "LanczosWindowedSinc", + "BSpline", + "MultiLabel", + "Gaussian", + argstr="%s", + usedefault=True, + ) interpolation_parameters = traits.Either( traits.Tuple(traits.Int()), # BSpline (order) traits.Tuple( - traits.Float(), # Gaussian/MultiLabel (sigma, alpha) - traits.Float())) + traits.Float(), traits.Float() # Gaussian/MultiLabel (sigma, alpha) + ), + ) write_composite_transform = traits.Bool( - argstr='--write-composite-transform %d', + argstr="--write-composite-transform %d", default_value=False, usedefault=True, - desc='') + desc="", + ) collapse_output_transforms = traits.Bool( - argstr='--collapse-output-transforms %d', + argstr="--collapse-output-transforms %d", default_value=True, usedefault=True, # This should be true for explicit completeness - desc=('Collapse output transforms. Specifically, enabling this option ' - 'combines all adjacent linear transforms and composes all ' - 'adjacent displacement field transforms before writing the ' - 'results to disk.')) + desc=( + "Collapse output transforms. Specifically, enabling this option " + "combines all adjacent linear transforms and composes all " + "adjacent displacement field transforms before writing the " + "results to disk." + ), + ) initialize_transforms_per_stage = traits.Bool( - argstr='--initialize-transforms-per-stage %d', + argstr="--initialize-transforms-per-stage %d", default_value=False, usedefault=True, # This should be true for explicit completeness - desc= - ('Initialize linear transforms from the previous stage. By enabling this option, ' - 'the current linear stage transform is directly intialized from the previous ' - 'stages linear transform; this allows multiple linear stages to be run where ' - 'each stage directly updates the estimated linear transform from the previous ' - 'stage. (e.g. Translation -> Rigid -> Affine). ')) + desc=( + "Initialize linear transforms from the previous stage. By enabling this option, " + "the current linear stage transform is directly intialized from the previous " + "stages linear transform; this allows multiple linear stages to be run where " + "each stage directly updates the estimated linear transform from the previous " + "stage. (e.g. Translation -> Rigid -> Affine). " + ), + ) # NOTE: Even though only 0=False and 1=True are allowed, ants uses integer # values instead of booleans float = traits.Bool( - argstr='--float %d', + argstr="--float %d", default_value=False, - desc='Use float instead of double for computations.') + desc="Use float instead of double for computations.", + ) transforms = traits.List( - traits.Enum('Rigid', 'Affine', 'CompositeAffine', 'Similarity', - 'Translation', 'BSpline', 'GaussianDisplacementField', - 'TimeVaryingVelocityField', - 'TimeVaryingBSplineVelocityField', 'SyN', 'BSplineSyN', - 'Exponential', 'BSplineExponential'), - argstr='%s', - mandatory=True) + traits.Enum( + "Rigid", + "Affine", + "CompositeAffine", + "Similarity", + "Translation", + "BSpline", + "GaussianDisplacementField", + "TimeVaryingVelocityField", + "TimeVaryingBSplineVelocityField", + "SyN", + "BSplineSyN", + "Exponential", + "BSplineExponential", + ), + argstr="%s", + mandatory=True, + ) # TODO: input checking and allow defaults # All parameters must be specified for BSplineDisplacementField, TimeVaryingBSplineVelocityField, BSplineSyN, # Exponential, and BSplineExponential. EVEN DEFAULTS! @@ -445,111 +494,119 @@ class RegistrationInputSpec(ANTSCommandInputSpec): traits.Tuple( traits.Float(), # GaussianDisplacementField, SyN traits.Float(), - traits.Float()), + traits.Float(), + ), traits.Tuple( traits.Float(), # BSplineSyn, traits.Int(), # BSplineDisplacementField, traits.Int(), # TimeVaryingBSplineVelocityField - traits.Int()), + traits.Int(), + ), traits.Tuple( traits.Float(), # TimeVaryingVelocityField traits.Int(), traits.Float(), traits.Float(), traits.Float(), - traits.Float()), + traits.Float(), + ), traits.Tuple( traits.Float(), # Exponential traits.Float(), traits.Float(), - traits.Int()), + traits.Int(), + ), traits.Tuple( traits.Float(), # BSplineExponential traits.Int(), traits.Int(), traits.Int(), - traits.Int()), - )) + traits.Int(), + ), + ) + ) restrict_deformation = traits.List( traits.List(traits.Enum(0, 1)), - desc=("This option allows the user to restrict the optimization of " - "the displacement field, translation, rigid or affine transform " - "on a per-component basis. For example, if one wants to limit " - "the deformation or rotation of 3-D volume to the first two " - "dimensions, this is possible by specifying a weight vector of " - "'1x1x0' for a deformation field or '1x1x0x1x1x0' for a rigid " - "transformation. Low-dimensional restriction only works if " - "there are no preceding transformations.")) + desc=( + "This option allows the user to restrict the optimization of " + "the displacement field, translation, rigid or affine transform " + "on a per-component basis. For example, if one wants to limit " + "the deformation or rotation of 3-D volume to the first two " + "dimensions, this is possible by specifying a weight vector of " + "'1x1x0' for a deformation field or '1x1x0x1x1x0' for a rigid " + "transformation. Low-dimensional restriction only works if " + "there are no preceding transformations." + ), + ) # Convergence flags number_of_iterations = traits.List(traits.List(traits.Int())) smoothing_sigmas = traits.List(traits.List(traits.Float()), mandatory=True) sigma_units = traits.List( - traits.Enum('mm', 'vox'), - requires=['smoothing_sigmas'], - desc="units for smoothing sigmas") + traits.Enum("mm", "vox"), + requires=["smoothing_sigmas"], + desc="units for smoothing sigmas", + ) shrink_factors = traits.List(traits.List(traits.Int()), mandatory=True) convergence_threshold = traits.List( trait=traits.Float(), value=[1e-6], minlen=1, - requires=['number_of_iterations'], - usedefault=True) + requires=["number_of_iterations"], + usedefault=True, + ) convergence_window_size = traits.List( trait=traits.Int(), value=[10], minlen=1, - requires=['convergence_threshold'], - usedefault=True) + requires=["convergence_threshold"], + usedefault=True, + ) # Output flags - output_transform_prefix = Str( - "transform", usedefault=True, argstr="%s", desc="") - output_warped_image = traits.Either( - traits.Bool, File(), hash_files=False, desc="") + output_transform_prefix = Str("transform", usedefault=True, argstr="%s", desc="") + output_warped_image = traits.Either(traits.Bool, File(), hash_files=False, desc="") output_inverse_warped_image = traits.Either( - traits.Bool, - File(), - hash_files=False, - requires=['output_warped_image'], - desc="") + traits.Bool, File(), hash_files=False, requires=["output_warped_image"], desc="" + ) winsorize_upper_quantile = traits.Range( low=0.0, high=1.0, value=1.0, - argstr='%s', + argstr="%s", usedefault=True, - desc="The Upper quantile to clip image ranges") + desc="The Upper quantile to clip image ranges", + ) winsorize_lower_quantile = traits.Range( low=0.0, high=1.0, value=0.0, - argstr='%s', + argstr="%s", usedefault=True, - desc="The Lower quantile to clip image ranges") + desc="The Lower quantile to clip image ranges", + ) - verbose = traits.Bool(argstr='-v', default_value=False, usedefault=True) + verbose = traits.Bool(argstr="-v", default_value=False, usedefault=True) class RegistrationOutputSpec(TraitedSpec): forward_transforms = traits.List( - File(exists=True), - desc='List of output transforms for forward registration') + File(exists=True), desc="List of output transforms for forward registration" + ) reverse_transforms = traits.List( - File(exists=True), - desc='List of output transforms for reverse registration') + File(exists=True), desc="List of output transforms for reverse registration" + ) forward_invert_flags = traits.List( - traits.Bool(), - desc='List of flags corresponding to the forward transforms') + traits.Bool(), desc="List of flags corresponding to the forward transforms" + ) reverse_invert_flags = traits.List( - traits.Bool(), - desc='List of flags corresponding to the reverse transforms') - composite_transform = File(exists=True, desc='Composite transform file') - inverse_composite_transform = File(desc='Inverse composite transform file') + traits.Bool(), desc="List of flags corresponding to the reverse transforms" + ) + composite_transform = File(exists=True, desc="Composite transform file") + inverse_composite_transform = File(desc="Inverse composite transform file") warped_image = File(desc="Outputs warped image") inverse_warped_image = File(desc="Outputs the inverse of the warped image") save_state = File(desc="The saved registration state to be restored") - metric_value = traits.Float(desc='the final value of metric') - elapsed_time = traits.Float( - desc='the total elapsed time as reported by ANTs') + metric_value = traits.Float(desc="the final value of metric") + elapsed_time = traits.Float(desc="the total elapsed time as reported by ANTs") class Registration(ANTSCommand): @@ -917,15 +974,20 @@ class Registration(ANTSCommand): --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ --write-composite-transform 1' """ - DEF_SAMPLING_STRATEGY = 'None' + + DEF_SAMPLING_STRATEGY = "None" """The default sampling strategy argument.""" - _cmd = 'antsRegistration' + _cmd = "antsRegistration" input_spec = RegistrationInputSpec output_spec = RegistrationOutputSpec _quantilesDone = False _linear_transform_names = [ - 'Rigid', 'Affine', 'Translation', 'CompositeAffine', 'Similarity' + "Rigid", + "Affine", + "Translation", + "CompositeAffine", + "Similarity", ] def __init__(self, **inputs): @@ -933,20 +995,21 @@ def __init__(self, **inputs): self._elapsed_time = None self._metric_value = None - def _run_interface(self, runtime, correct_return_codes=(0, )): + def _run_interface(self, runtime, correct_return_codes=(0,)): runtime = super(Registration, self)._run_interface(runtime) # Parse some profiling info output = runtime.stdout or runtime.merged if output: - lines = output.split('\n') + lines = output.split("\n") for l in lines[::-1]: # This should be the last line - if l.strip().startswith('Total elapsed time:'): - self._elapsed_time = float(l.strip().replace( - 'Total elapsed time: ', '')) - elif 'DIAGNOSTIC' in l: - self._metric_value = float(l.split(',')[2]) + if l.strip().startswith("Total elapsed time:"): + self._elapsed_time = float( + l.strip().replace("Total elapsed time: ", "") + ) + elif "DIAGNOSTIC" in l: + self._metric_value = float(l.split(",")[2]) break return runtime @@ -968,18 +1031,20 @@ def _format_metric(self, index): metric=name_input, weight=self.inputs.metric_weight[index], radius_or_bins=self.inputs.radius_or_number_of_bins[index], - optional=self.inputs.radius_or_number_of_bins[index]) + optional=self.inputs.radius_or_number_of_bins[index], + ) # The optional sampling strategy and percentage. - if isdefined(self.inputs.sampling_strategy - ) and self.inputs.sampling_strategy: + if isdefined(self.inputs.sampling_strategy) and self.inputs.sampling_strategy: sampling_strategy = self.inputs.sampling_strategy[index] if sampling_strategy: - stage_inputs['sampling_strategy'] = sampling_strategy - if isdefined(self.inputs.sampling_percentage - ) and self.inputs.sampling_percentage: + stage_inputs["sampling_strategy"] = sampling_strategy + if ( + isdefined(self.inputs.sampling_percentage) + and self.inputs.sampling_percentage + ): sampling_percentage = self.inputs.sampling_percentage[index] if sampling_percentage: - stage_inputs['sampling_percentage'] = sampling_percentage + stage_inputs["sampling_percentage"] = sampling_percentage # Make a list of metric specifications, one per -m command line # argument for the current stage. @@ -1013,16 +1078,18 @@ def _format_metric(self, index): @staticmethod def _format_metric_argument(**kwargs): - retval = '%s[ %s, %s, %g, %d' % (kwargs['metric'], - kwargs['fixed_image'], - kwargs['moving_image'], - kwargs['weight'], - kwargs['radius_or_bins']) + retval = "%s[ %s, %s, %g, %d" % ( + kwargs["metric"], + kwargs["fixed_image"], + kwargs["moving_image"], + kwargs["weight"], + kwargs["radius_or_bins"], + ) # The optional sampling strategy. - if 'sampling_strategy' in kwargs: - sampling_strategy = kwargs['sampling_strategy'] - elif 'sampling_percentage' in kwargs: + if "sampling_strategy" in kwargs: + sampling_strategy = kwargs["sampling_strategy"] + elif "sampling_percentage" in kwargs: # The sampling percentage is specified but not the # sampling strategy. Use the default strategy. sampling_strategy = Registration.DEF_SAMPLING_STRATEGY @@ -1030,44 +1097,53 @@ def _format_metric_argument(**kwargs): sampling_strategy = None # Format the optional sampling arguments. if sampling_strategy: - retval += ', %s' % sampling_strategy - if 'sampling_percentage' in kwargs: - retval += ', %g' % kwargs['sampling_percentage'] + retval += ", %s" % sampling_strategy + if "sampling_percentage" in kwargs: + retval += ", %g" % kwargs["sampling_percentage"] - retval += ' ]' + retval += " ]" return retval def _format_transform(self, index): retval = [] - retval.append('%s[ ' % self.inputs.transforms[index]) - parameters = ', '.join([ - str(element) for element in self.inputs.transform_parameters[index] - ]) - retval.append('%s' % parameters) - retval.append(' ]') + retval.append("%s[ " % self.inputs.transforms[index]) + parameters = ", ".join( + [str(element) for element in self.inputs.transform_parameters[index]] + ) + retval.append("%s" % parameters) + retval.append(" ]") return "".join(retval) def _format_registration(self): retval = [] for ii in range(len(self.inputs.transforms)): - retval.append('--transform %s' % (self._format_transform(ii))) + retval.append("--transform %s" % (self._format_transform(ii))) for metric in self._format_metric(ii): - retval.append('--metric %s' % metric) - retval.append('--convergence %s' % self._format_convergence(ii)) + retval.append("--metric %s" % metric) + retval.append("--convergence %s" % self._format_convergence(ii)) if isdefined(self.inputs.sigma_units): retval.append( - '--smoothing-sigmas %s%s' % - (self._format_xarray(self.inputs.smoothing_sigmas[ii]), - self.inputs.sigma_units[ii])) + "--smoothing-sigmas %s%s" + % ( + self._format_xarray(self.inputs.smoothing_sigmas[ii]), + self.inputs.sigma_units[ii], + ) + ) else: - retval.append('--smoothing-sigmas %s' % self._format_xarray( - self.inputs.smoothing_sigmas[ii])) - retval.append('--shrink-factors %s' % self._format_xarray( - self.inputs.shrink_factors[ii])) + retval.append( + "--smoothing-sigmas %s" + % self._format_xarray(self.inputs.smoothing_sigmas[ii]) + ) + retval.append( + "--shrink-factors %s" + % self._format_xarray(self.inputs.shrink_factors[ii]) + ) if isdefined(self.inputs.use_estimate_learning_rate_once): - retval.append('--use-estimate-learning-rate-once %d' % - self.inputs.use_estimate_learning_rate_once[ii]) + retval.append( + "--use-estimate-learning-rate-once %d" + % self.inputs.use_estimate_learning_rate_once[ii] + ) if isdefined(self.inputs.use_histogram_matching): # use_histogram_matching is either a common flag for all transforms # or a list of transform-specific flags @@ -1075,50 +1151,59 @@ def _format_registration(self): histval = self.inputs.use_histogram_matching else: histval = self.inputs.use_histogram_matching[ii] - retval.append('--use-histogram-matching %d' % histval) + retval.append("--use-histogram-matching %d" % histval) if isdefined(self.inputs.restrict_deformation): retval.append( - '--restrict-deformation %s' % self._format_xarray( - self.inputs.restrict_deformation[ii])) - if any((isdefined(self.inputs.fixed_image_masks), - isdefined(self.inputs.moving_image_masks))): + "--restrict-deformation %s" + % self._format_xarray(self.inputs.restrict_deformation[ii]) + ) + if any( + ( + isdefined(self.inputs.fixed_image_masks), + isdefined(self.inputs.moving_image_masks), + ) + ): if isdefined(self.inputs.fixed_image_masks): - fixed_masks = ensure_list( - self.inputs.fixed_image_masks) + fixed_masks = ensure_list(self.inputs.fixed_image_masks) fixed_mask = fixed_masks[ii if len(fixed_masks) > 1 else 0] else: - fixed_mask = 'NULL' + fixed_mask = "NULL" if isdefined(self.inputs.moving_image_masks): - moving_masks = ensure_list( - self.inputs.moving_image_masks) - moving_mask = moving_masks[ii - if len(moving_masks) > 1 else 0] + moving_masks = ensure_list(self.inputs.moving_image_masks) + moving_mask = moving_masks[ii if len(moving_masks) > 1 else 0] else: - moving_mask = 'NULL' - retval.append('--masks [ %s, %s ]' % (fixed_mask, moving_mask)) + moving_mask = "NULL" + retval.append("--masks [ %s, %s ]" % (fixed_mask, moving_mask)) return " ".join(retval) def _get_outputfilenames(self, inverse=False): output_filename = None if not inverse: - if isdefined(self.inputs.output_warped_image) and \ - self.inputs.output_warped_image: + if ( + isdefined(self.inputs.output_warped_image) + and self.inputs.output_warped_image + ): output_filename = self.inputs.output_warped_image if isinstance(output_filename, bool): - output_filename = '%s_Warped.nii.gz' % self.inputs.output_transform_prefix + output_filename = ( + "%s_Warped.nii.gz" % self.inputs.output_transform_prefix + ) return output_filename inv_output_filename = None - if isdefined(self.inputs.output_inverse_warped_image) and \ - self.inputs.output_inverse_warped_image: + if ( + isdefined(self.inputs.output_inverse_warped_image) + and self.inputs.output_inverse_warped_image + ): inv_output_filename = self.inputs.output_inverse_warped_image if isinstance(inv_output_filename, bool): - inv_output_filename = '%s_InverseWarped.nii.gz' % self.inputs.output_transform_prefix + inv_output_filename = ( + "%s_InverseWarped.nii.gz" % self.inputs.output_transform_prefix + ) return inv_output_filename def _format_convergence(self, ii): - convergence_iter = self._format_xarray( - self.inputs.number_of_iterations[ii]) + convergence_iter = self._format_xarray(self.inputs.number_of_iterations[ii]) if len(self.inputs.convergence_threshold) > ii: convergence_value = self.inputs.convergence_threshold[ii] else: @@ -1127,19 +1212,25 @@ def _format_convergence(self, ii): convergence_ws = self.inputs.convergence_window_size[ii] else: convergence_ws = self.inputs.convergence_window_size[0] - return '[ %s, %g, %d ]' % (convergence_iter, convergence_value, - convergence_ws) + return "[ %s, %g, %d ]" % (convergence_iter, convergence_value, convergence_ws) def _format_winsorize_image_intensities(self): - if not self.inputs.winsorize_upper_quantile > self.inputs.winsorize_lower_quantile: + if ( + not self.inputs.winsorize_upper_quantile + > self.inputs.winsorize_lower_quantile + ): raise RuntimeError( - "Upper bound MUST be more than lower bound: %g > %g" % - (self.inputs.winsorize_upper_quantile, - self.inputs.winsorize_lower_quantile)) + "Upper bound MUST be more than lower bound: %g > %g" + % ( + self.inputs.winsorize_upper_quantile, + self.inputs.winsorize_lower_quantile, + ) + ) self._quantilesDone = True - return '--winsorize-image-intensities [ %s, %s ]' % ( + return "--winsorize-image-intensities [ %s, %s ]" % ( self.inputs.winsorize_lower_quantile, - self.inputs.winsorize_upper_quantile) + self.inputs.winsorize_upper_quantile, + ) def _get_initial_transform_filenames(self): n_transforms = len(self.inputs.initial_moving_transform) @@ -1150,60 +1241,76 @@ def _get_initial_transform_filenames(self): if len(self.inputs.invert_initial_moving_transform) != n_transforms: raise Exception( 'Inputs "initial_moving_transform" and "invert_initial_moving_transform"' - 'should have the same length.') + "should have the same length." + ) invert_flags = self.inputs.invert_initial_moving_transform retval = [ - "[ %s, %d ]" % (xfm, int(flag)) for xfm, flag in zip( - self.inputs.initial_moving_transform, invert_flags) + "[ %s, %d ]" % (xfm, int(flag)) + for xfm, flag in zip(self.inputs.initial_moving_transform, invert_flags) ] - return " ".join(['--initial-moving-transform'] + retval) + return " ".join(["--initial-moving-transform"] + retval) def _format_arg(self, opt, spec, val): - if opt == 'fixed_image_mask': + if opt == "fixed_image_mask": if isdefined(self.inputs.moving_image_mask): - return '--masks [ %s, %s ]' % (self.inputs.fixed_image_mask, - self.inputs.moving_image_mask) + return "--masks [ %s, %s ]" % ( + self.inputs.fixed_image_mask, + self.inputs.moving_image_mask, + ) else: - return '--masks %s' % self.inputs.fixed_image_mask - elif opt == 'transforms': + return "--masks %s" % self.inputs.fixed_image_mask + elif opt == "transforms": return self._format_registration() - elif opt == 'initial_moving_transform': + elif opt == "initial_moving_transform": return self._get_initial_transform_filenames() - elif opt == 'initial_moving_transform_com': - do_center_of_mass_init = self.inputs.initial_moving_transform_com \ - if isdefined(self.inputs.initial_moving_transform_com) else 0 # Just do the default behavior - return '--initial-moving-transform [ %s, %s, %d ]' % ( - self.inputs.fixed_image[0], self.inputs.moving_image[0], - do_center_of_mass_init) - elif opt == 'interpolation': - if self.inputs.interpolation in ['BSpline', 'MultiLabel', 'Gaussian'] and \ - isdefined(self.inputs.interpolation_parameters): - return '--interpolation %s[ %s ]' % ( - self.inputs.interpolation, ', '.join([ - str(param) - for param in self.inputs.interpolation_parameters - ])) + elif opt == "initial_moving_transform_com": + do_center_of_mass_init = ( + self.inputs.initial_moving_transform_com + if isdefined(self.inputs.initial_moving_transform_com) + else 0 + ) # Just do the default behavior + return "--initial-moving-transform [ %s, %s, %d ]" % ( + self.inputs.fixed_image[0], + self.inputs.moving_image[0], + do_center_of_mass_init, + ) + elif opt == "interpolation": + if self.inputs.interpolation in [ + "BSpline", + "MultiLabel", + "Gaussian", + ] and isdefined(self.inputs.interpolation_parameters): + return "--interpolation %s[ %s ]" % ( + self.inputs.interpolation, + ", ".join( + [str(param) for param in self.inputs.interpolation_parameters] + ), + ) else: - return '--interpolation %s' % self.inputs.interpolation - elif opt == 'output_transform_prefix': + return "--interpolation %s" % self.inputs.interpolation + elif opt == "output_transform_prefix": out_filename = self._get_outputfilenames(inverse=False) inv_out_filename = self._get_outputfilenames(inverse=True) if out_filename and inv_out_filename: - return '--output [ %s, %s, %s ]' % ( - self.inputs.output_transform_prefix, out_filename, - inv_out_filename) + return "--output [ %s, %s, %s ]" % ( + self.inputs.output_transform_prefix, + out_filename, + inv_out_filename, + ) elif out_filename: - return '--output [ %s, %s ]' % ( - self.inputs.output_transform_prefix, out_filename) + return "--output [ %s, %s ]" % ( + self.inputs.output_transform_prefix, + out_filename, + ) else: - return '--output %s' % self.inputs.output_transform_prefix - elif opt == 'winsorize_upper_quantile' or opt == 'winsorize_lower_quantile': + return "--output %s" % self.inputs.output_transform_prefix + elif opt == "winsorize_upper_quantile" or opt == "winsorize_lower_quantile": if not self._quantilesDone: return self._format_winsorize_image_intensities() else: self._quantilesDone = False - return '' # Must return something for argstr! + return "" # Must return something for argstr! # This feature was removed from recent versions of antsRegistration due to corrupt outputs. # elif opt == 'collapse_linear_transforms_to_fixed_image_header': # return self._formatCollapseLinearTransformsToFixedImageHeader() @@ -1211,14 +1318,14 @@ def _format_arg(self, opt, spec, val): def _output_filenames(self, prefix, count, transform, inverse=False): self.low_dimensional_transform_map = { - 'Rigid': 'Rigid.mat', - 'Affine': 'Affine.mat', - 'GenericAffine': 'GenericAffine.mat', - 'CompositeAffine': 'Affine.mat', - 'Similarity': 'Similarity.mat', - 'Translation': 'Translation.mat', - 'BSpline': 'BSpline.txt', - 'Initial': 'DerivedInitialMovingTranslation.mat' + "Rigid": "Rigid.mat", + "Affine": "Affine.mat", + "GenericAffine": "GenericAffine.mat", + "CompositeAffine": "Affine.mat", + "Similarity": "Similarity.mat", + "Translation": "Translation.mat", + "BSpline": "BSpline.txt", + "Initial": "DerivedInitialMovingTranslation.mat", } if transform in list(self.low_dimensional_transform_map.keys()): suffix = self.low_dimensional_transform_map[transform] @@ -1226,126 +1333,145 @@ def _output_filenames(self, prefix, count, transform, inverse=False): else: inverse_mode = False # These are not analytically invertable if inverse: - suffix = 'InverseWarp.nii.gz' + suffix = "InverseWarp.nii.gz" else: - suffix = 'Warp.nii.gz' - return '%s%d%s' % (prefix, count, suffix), inverse_mode + suffix = "Warp.nii.gz" + return "%s%d%s" % (prefix, count, suffix), inverse_mode def _list_outputs(self): outputs = self._outputs().get() - outputs['forward_transforms'] = [] - outputs['forward_invert_flags'] = [] - outputs['reverse_transforms'] = [] - outputs['reverse_invert_flags'] = [] + outputs["forward_transforms"] = [] + outputs["forward_invert_flags"] = [] + outputs["reverse_transforms"] = [] + outputs["reverse_invert_flags"] = [] # invert_initial_moving_transform should be always defined, even if # there's no initial transform invert_initial_moving_transform = [False] * len( - self.inputs.initial_moving_transform) + self.inputs.initial_moving_transform + ) if isdefined(self.inputs.invert_initial_moving_transform): - invert_initial_moving_transform = self.inputs.invert_initial_moving_transform + invert_initial_moving_transform = ( + self.inputs.invert_initial_moving_transform + ) if self.inputs.write_composite_transform: - filename = self.inputs.output_transform_prefix + 'Composite.h5' - outputs['composite_transform'] = os.path.abspath(filename) - filename = self.inputs.output_transform_prefix + \ - 'InverseComposite.h5' - outputs['inverse_composite_transform'] = os.path.abspath(filename) + filename = self.inputs.output_transform_prefix + "Composite.h5" + outputs["composite_transform"] = os.path.abspath(filename) + filename = self.inputs.output_transform_prefix + "InverseComposite.h5" + outputs["inverse_composite_transform"] = os.path.abspath(filename) # If composite transforms are written, then individuals are not written (as of 2014-10-26 else: if not self.inputs.collapse_output_transforms: transform_count = 0 if isdefined(self.inputs.initial_moving_transform): outputs[ - 'forward_transforms'] += self.inputs.initial_moving_transform - outputs[ - 'forward_invert_flags'] += invert_initial_moving_transform - outputs['reverse_transforms'] = self.inputs.initial_moving_transform + \ - outputs['reverse_transforms'] - outputs['reverse_invert_flags'] = [ - not e for e in invert_initial_moving_transform - ] + outputs['reverse_invert_flags'] # Prepend - transform_count += len( - self.inputs.initial_moving_transform) + "forward_transforms" + ] += self.inputs.initial_moving_transform + outputs["forward_invert_flags"] += invert_initial_moving_transform + outputs["reverse_transforms"] = ( + self.inputs.initial_moving_transform + + outputs["reverse_transforms"] + ) + outputs["reverse_invert_flags"] = ( + [not e for e in invert_initial_moving_transform] + + outputs["reverse_invert_flags"] + ) # Prepend + transform_count += len(self.inputs.initial_moving_transform) elif isdefined(self.inputs.initial_moving_transform_com): forward_filename, forward_inversemode = self._output_filenames( - self.inputs.output_transform_prefix, transform_count, - 'Initial') + self.inputs.output_transform_prefix, transform_count, "Initial" + ) reverse_filename, reverse_inversemode = self._output_filenames( - self.inputs.output_transform_prefix, transform_count, - 'Initial', True) - outputs['forward_transforms'].append( - os.path.abspath(forward_filename)) - outputs['forward_invert_flags'].append(False) - outputs['reverse_transforms'].insert( - 0, os.path.abspath(reverse_filename)) - outputs['reverse_invert_flags'].insert(0, True) + self.inputs.output_transform_prefix, + transform_count, + "Initial", + True, + ) + outputs["forward_transforms"].append( + os.path.abspath(forward_filename) + ) + outputs["forward_invert_flags"].append(False) + outputs["reverse_transforms"].insert( + 0, os.path.abspath(reverse_filename) + ) + outputs["reverse_invert_flags"].insert(0, True) transform_count += 1 for count in range(len(self.inputs.transforms)): forward_filename, forward_inversemode = self._output_filenames( - self.inputs.output_transform_prefix, transform_count, - self.inputs.transforms[count]) + self.inputs.output_transform_prefix, + transform_count, + self.inputs.transforms[count], + ) reverse_filename, reverse_inversemode = self._output_filenames( - self.inputs.output_transform_prefix, transform_count, - self.inputs.transforms[count], True) - outputs['forward_transforms'].append( - os.path.abspath(forward_filename)) - outputs['forward_invert_flags'].append(forward_inversemode) - outputs['reverse_transforms'].insert( - 0, os.path.abspath(reverse_filename)) - outputs['reverse_invert_flags'].insert( - 0, reverse_inversemode) + self.inputs.output_transform_prefix, + transform_count, + self.inputs.transforms[count], + True, + ) + outputs["forward_transforms"].append( + os.path.abspath(forward_filename) + ) + outputs["forward_invert_flags"].append(forward_inversemode) + outputs["reverse_transforms"].insert( + 0, os.path.abspath(reverse_filename) + ) + outputs["reverse_invert_flags"].insert(0, reverse_inversemode) transform_count += 1 else: transform_count = 0 is_linear = [ - t in self._linear_transform_names - for t in self.inputs.transforms + t in self._linear_transform_names for t in self.inputs.transforms ] collapse_list = [] - if isdefined(self.inputs.initial_moving_transform) or \ - isdefined(self.inputs.initial_moving_transform_com): + if isdefined(self.inputs.initial_moving_transform) or isdefined( + self.inputs.initial_moving_transform_com + ): is_linear.insert(0, True) # Only files returned by collapse_output_transforms if any(is_linear): - collapse_list.append('GenericAffine') + collapse_list.append("GenericAffine") if not all(is_linear): - collapse_list.append('SyN') + collapse_list.append("SyN") for transform in collapse_list: forward_filename, forward_inversemode = self._output_filenames( self.inputs.output_transform_prefix, transform_count, transform, - inverse=False) + inverse=False, + ) reverse_filename, reverse_inversemode = self._output_filenames( self.inputs.output_transform_prefix, transform_count, transform, - inverse=True) - outputs['forward_transforms'].append( - os.path.abspath(forward_filename)) - outputs['forward_invert_flags'].append(forward_inversemode) - outputs['reverse_transforms'].append( - os.path.abspath(reverse_filename)) - outputs['reverse_invert_flags'].append(reverse_inversemode) + inverse=True, + ) + outputs["forward_transforms"].append( + os.path.abspath(forward_filename) + ) + outputs["forward_invert_flags"].append(forward_inversemode) + outputs["reverse_transforms"].append( + os.path.abspath(reverse_filename) + ) + outputs["reverse_invert_flags"].append(reverse_inversemode) transform_count += 1 out_filename = self._get_outputfilenames(inverse=False) inv_out_filename = self._get_outputfilenames(inverse=True) if out_filename: - outputs['warped_image'] = os.path.abspath(out_filename) + outputs["warped_image"] = os.path.abspath(out_filename) if inv_out_filename: - outputs['inverse_warped_image'] = os.path.abspath(inv_out_filename) + outputs["inverse_warped_image"] = os.path.abspath(inv_out_filename) if len(self.inputs.save_state): - outputs['save_state'] = os.path.abspath(self.inputs.save_state) + outputs["save_state"] = os.path.abspath(self.inputs.save_state) if self._metric_value: - outputs['metric_value'] = self._metric_value + outputs["metric_value"] = self._metric_value if self._elapsed_time: - outputs['elapsed_time'] = self._elapsed_time + outputs["elapsed_time"] = self._elapsed_time return outputs @@ -1354,20 +1480,17 @@ class MeasureImageSimilarityInputSpec(ANTSCommandInputSpec): 2, 3, 4, - argstr='--dimensionality %d', + argstr="--dimensionality %d", position=1, - desc='Dimensionality of the fixed/moving image pair', + desc="Dimensionality of the fixed/moving image pair", ) fixed_image = File( - exists=True, - mandatory=True, - desc='Image to which the moving image is warped', + exists=True, mandatory=True, desc="Image to which the moving image is warped", ) moving_image = File( exists=True, mandatory=True, - desc= - 'Image to apply transformation to (generally a coregistered functional)', + desc="Image to apply transformation to (generally a coregistered functional)", ) metric = traits.Enum( "CC", @@ -1380,41 +1503,42 @@ class MeasureImageSimilarityInputSpec(ANTSCommandInputSpec): mandatory=True, ) metric_weight = traits.Float( - requires=['metric'], + requires=["metric"], default_value=1.0, usedefault=True, desc='The "metricWeight" variable is not used.', ) radius_or_number_of_bins = traits.Int( - requires=['metric'], + requires=["metric"], mandatory=True, - desc='The number of bins in each stage for the MI and Mattes metric, ' - 'or the radius for other metrics', + desc="The number of bins in each stage for the MI and Mattes metric, " + "or the radius for other metrics", ) sampling_strategy = traits.Enum( "None", "Regular", "Random", - requires=['metric'], + requires=["metric"], usedefault=True, - desc='Manner of choosing point set over which to optimize the metric. ' - 'Defaults to "None" (i.e. a dense sampling of one sample per voxel).') + desc="Manner of choosing point set over which to optimize the metric. " + 'Defaults to "None" (i.e. a dense sampling of one sample per voxel).', + ) sampling_percentage = traits.Either( traits.Range(low=0.0, high=1.0), - requires=['metric'], + requires=["metric"], mandatory=True, - desc= - 'Percentage of points accessible to the sampling strategy over which ' - 'to optimize the metric.') + desc="Percentage of points accessible to the sampling strategy over which " + "to optimize the metric.", + ) fixed_image_mask = File( exists=True, - argstr='%s', - desc='mask used to limit metric sampling region of the fixed image', + argstr="%s", + desc="mask used to limit metric sampling region of the fixed image", ) moving_image_mask = File( exists=True, - requires=['fixed_image_mask'], - desc='mask used to limit metric sampling region of the moving image', + requires=["fixed_image_mask"], + desc="mask used to limit metric sampling region of the moving image", ) @@ -1445,14 +1569,15 @@ class MeasureImageSimilarity(ANTSCommand): 'MeasureImageSimilarity --dimensionality 3 --masks ["mask.nii","mask.nii.gz"] \ --metric MI["T1.nii","resting.nii",1.0,5,Regular,1.0]' """ - _cmd = 'MeasureImageSimilarity' + + _cmd = "MeasureImageSimilarity" input_spec = MeasureImageSimilarityInputSpec output_spec = MeasureImageSimilarityOutputSpec def _metric_constructor(self): - retval = '--metric {metric}["{fixed_image}","{moving_image}",{metric_weight},'\ - '{radius_or_number_of_bins},{sampling_strategy},{sampling_percentage}]'\ - .format( + retval = ( + '--metric {metric}["{fixed_image}","{moving_image}",{metric_weight},' + "{radius_or_number_of_bins},{sampling_strategy},{sampling_percentage}]".format( metric=self.inputs.metric, fixed_image=self.inputs.fixed_image, moving_image=self.inputs.moving_image, @@ -1461,50 +1586,74 @@ def _metric_constructor(self): sampling_strategy=self.inputs.sampling_strategy, sampling_percentage=self.inputs.sampling_percentage, ) + ) return retval def _mask_constructor(self): if self.inputs.moving_image_mask: - retval = '--masks ["{fixed_image_mask}","{moving_image_mask}"]'\ - .format( - fixed_image_mask=self.inputs.fixed_image_mask, - moving_image_mask=self.inputs.moving_image_mask, - ) + retval = '--masks ["{fixed_image_mask}","{moving_image_mask}"]'.format( + fixed_image_mask=self.inputs.fixed_image_mask, + moving_image_mask=self.inputs.moving_image_mask, + ) else: - retval = '--masks "{fixed_image_mask}"'\ - .format( - fixed_image_mask=self.inputs.fixed_image_mask, - ) + retval = '--masks "{fixed_image_mask}"'.format( + fixed_image_mask=self.inputs.fixed_image_mask, + ) return retval def _format_arg(self, opt, spec, val): - if opt == 'metric': + if opt == "metric": return self._metric_constructor() - elif opt == 'fixed_image_mask': + elif opt == "fixed_image_mask": return self._mask_constructor() return super(MeasureImageSimilarity, self)._format_arg(opt, spec, val) def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() - stdout = runtime.stdout.split('\n') + stdout = runtime.stdout.split("\n") outputs.similarity = float(stdout[0]) return outputs class RegistrationSynQuickInputSpec(ANTSCommandInputSpec): - dimension = traits.Enum(3, 2, argstr='-d %d', - usedefault=True, desc='image dimension (2 or 3)') - fixed_image = InputMultiPath(File(exists=True), mandatory=True, argstr='-f %s...', - desc='Fixed image or source image or reference image') - moving_image = InputMultiPath(File(exists=True), mandatory=True, argstr='-m %s...', - desc='Moving image or target image') - output_prefix = Str("transform", usedefault=True, argstr='-o %s', - desc="A prefix that is prepended to all output files") - num_threads = traits.Int(default_value=LOCAL_DEFAULT_NUMBER_OF_THREADS, usedefault=True, - desc='Number of threads (default = 1)', argstr='-n %d') - - transform_type = traits.Enum('s', 't', 'r', 'a', 'sr', 'b', 'br', argstr='-t %s', - desc=""" + dimension = traits.Enum( + 3, 2, argstr="-d %d", usedefault=True, desc="image dimension (2 or 3)" + ) + fixed_image = InputMultiPath( + File(exists=True), + mandatory=True, + argstr="-f %s...", + desc="Fixed image or source image or reference image", + ) + moving_image = InputMultiPath( + File(exists=True), + mandatory=True, + argstr="-m %s...", + desc="Moving image or target image", + ) + output_prefix = Str( + "transform", + usedefault=True, + argstr="-o %s", + desc="A prefix that is prepended to all output files", + ) + num_threads = traits.Int( + default_value=LOCAL_DEFAULT_NUMBER_OF_THREADS, + usedefault=True, + desc="Number of threads (default = 1)", + argstr="-n %d", + ) + + transform_type = traits.Enum( + "s", + "t", + "r", + "a", + "sr", + "b", + "br", + argstr="-t %s", + desc=""" transform type t: translation r: rigid @@ -1513,26 +1662,41 @@ class RegistrationSynQuickInputSpec(ANTSCommandInputSpec): sr: rigid + deformable syn b: rigid + affine + deformable b-spline syn br: rigid + deformable b-spline syn""", - usedefault=True) + usedefault=True, + ) - use_histogram_matching = traits.Bool(False, argstr='-j %d', - desc='use histogram matching') - histogram_bins = traits.Int(default_value=32, usedefault=True, argstr='-r %d', - desc='histogram bins for mutual information in SyN stage \ - (default = 32)') - spline_distance = traits.Int(default_value=26, usedefault=True, argstr='-s %d', - desc='spline distance for deformable B-spline SyN transform \ - (default = 26)') - precision_type = traits.Enum('double', 'float', argstr='-p %s', - desc='precision type (default = double)', usedefault=True) + use_histogram_matching = traits.Bool( + False, argstr="-j %d", desc="use histogram matching" + ) + histogram_bins = traits.Int( + default_value=32, + usedefault=True, + argstr="-r %d", + desc="histogram bins for mutual information in SyN stage \ + (default = 32)", + ) + spline_distance = traits.Int( + default_value=26, + usedefault=True, + argstr="-s %d", + desc="spline distance for deformable B-spline SyN transform \ + (default = 26)", + ) + precision_type = traits.Enum( + "double", + "float", + argstr="-p %s", + desc="precision type (default = double)", + usedefault=True, + ) class RegistrationSynQuickOutputSpec(TraitedSpec): warped_image = File(exists=True, desc="Warped image") inverse_warped_image = File(exists=True, desc="Inverse warped image") - out_matrix = File(exists=True, desc='Affine matrix') - forward_warp_field = File(exists=True, desc='Forward warp field') - inverse_warp_field = File(exists=True, desc='Inverse warp field') + out_matrix = File(exists=True, desc="Affine matrix") + forward_warp_field = File(exists=True, desc="Forward warp field") + inverse_warp_field = File(exists=True, desc="Inverse warp field") class RegistrationSynQuick(ANTSCommand): @@ -1566,7 +1730,7 @@ class RegistrationSynQuick(ANTSCommand): >>> reg.run() # doctest: +SKIP """ - _cmd = 'antsRegistrationSyNQuick.sh' + _cmd = "antsRegistrationSyNQuick.sh" input_spec = RegistrationSynQuickInputSpec output_spec = RegistrationSynQuickOutputSpec @@ -1578,39 +1742,60 @@ def _num_threads_update(self): pass def _format_arg(self, name, spec, value): - if name == 'precision_type': + if name == "precision_type": return spec.argstr % value[0] return super(RegistrationSynQuick, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() out_base = os.path.abspath(self.inputs.output_prefix) - outputs['warped_image'] = out_base + 'Warped.nii.gz' - outputs['inverse_warped_image'] = out_base + 'InverseWarped.nii.gz' - outputs['out_matrix'] = out_base + '0GenericAffine.mat' + outputs["warped_image"] = out_base + "Warped.nii.gz" + outputs["inverse_warped_image"] = out_base + "InverseWarped.nii.gz" + outputs["out_matrix"] = out_base + "0GenericAffine.mat" - if self.inputs.transform_type not in ('t', 'r', 'a'): - outputs['forward_warp_field'] = out_base + '1Warp.nii.gz' - outputs['inverse_warp_field'] = out_base + '1InverseWarp.nii.gz' + if self.inputs.transform_type not in ("t", "r", "a"): + outputs["forward_warp_field"] = out_base + "1Warp.nii.gz" + outputs["inverse_warp_field"] = out_base + "1InverseWarp.nii.gz" return outputs + class CompositeTransformUtilInputSpec(ANTSCommandInputSpec): - process = traits.Enum('assemble', 'disassemble', argstr='--%s', - position=1, usedefault=True, - desc='What to do with the transform inputs (assemble or disassemble)', - ) - out_file = File(exists=False, argstr='%s', position=2, - desc='Output file path (only used for disassembly).') - in_file = InputMultiPath(File(exists=True), mandatory=True, argstr='%s...', - position=3, desc='Input transform file(s)') - output_prefix = Str("transform", usedefault=True, argstr='%s', position=4, - desc="A prefix that is prepended to all output files (only used for assembly).") + process = traits.Enum( + "assemble", + "disassemble", + argstr="--%s", + position=1, + usedefault=True, + desc="What to do with the transform inputs (assemble or disassemble)", + ) + out_file = File( + exists=False, + argstr="%s", + position=2, + desc="Output file path (only used for disassembly).", + ) + in_file = InputMultiPath( + File(exists=True), + mandatory=True, + argstr="%s...", + position=3, + desc="Input transform file(s)", + ) + output_prefix = Str( + "transform", + usedefault=True, + argstr="%s", + position=4, + desc="A prefix that is prepended to all output files (only used for assembly).", + ) + class CompositeTransformUtilOutputSpec(TraitedSpec): affine_transform = File(desc="Affine transform component") displacement_field = File(desc="Displacement field component") out_file = File(desc="Compound transformation file") + class CompositeTransformUtil(ANTSCommand): """ ANTs utility which can combine or break apart transform files into their individual @@ -1639,7 +1824,7 @@ class CompositeTransformUtil(ANTSCommand): >>> tran.run() # doctest: +SKIP """ - _cmd = 'CompositeTransformUtil' + _cmd = "CompositeTransformUtil" input_spec = CompositeTransformUtilInputSpec output_spec = CompositeTransformUtilOutputSpec @@ -1651,19 +1836,23 @@ def _num_threads_update(self): pass def _format_arg(self, name, spec, value): - if name == 'output_prefix' and self.inputs.process == 'assemble': - return '' - if name == 'out_file' and self.inputs.process == 'disassemble': - return '' + if name == "output_prefix" and self.inputs.process == "assemble": + return "" + if name == "out_file" and self.inputs.process == "disassemble": + return "" return super(CompositeTransformUtil, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() - if self.inputs.process == 'disassemble': - outputs['affine_transform'] = os.path.abspath( - '00_{}_AffineTransform.mat'.format(self.inputs.output_prefix)) - outputs['displacement_field'] = os.path.abspath( - '01_{}_DisplacementFieldTransform.nii.gz'.format(self.inputs.output_prefix)) - if self.inputs.process == 'assemble': - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + if self.inputs.process == "disassemble": + outputs["affine_transform"] = os.path.abspath( + "00_{}_AffineTransform.mat".format(self.inputs.output_prefix) + ) + outputs["displacement_field"] = os.path.abspath( + "01_{}_DisplacementFieldTransform.nii.gz".format( + self.inputs.output_prefix + ) + ) + if self.inputs.process == "assemble": + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs diff --git a/nipype/interfaces/ants/resampling.py b/nipype/interfaces/ants/resampling.py index 94bccbdf7d..a5d6a52c04 100644 --- a/nipype/interfaces/ants/resampling.py +++ b/nipype/interfaces/ants/resampling.py @@ -10,62 +10,70 @@ class WarpTimeSeriesImageMultiTransformInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( - 4, - 3, - argstr='%d', - usedefault=True, - desc='image dimension (3 or 4)', - position=1) + 4, 3, argstr="%d", usedefault=True, desc="image dimension (3 or 4)", position=1 + ) input_image = File( - argstr='%s', + argstr="%s", mandatory=True, copyfile=True, - desc=('image to apply transformation to (generally a ' - 'coregistered functional)')) + desc=( + "image to apply transformation to (generally a " "coregistered functional)" + ), + ) out_postfix = traits.Str( - '_wtsimt', - argstr='%s', + "_wtsimt", + argstr="%s", usedefault=True, - desc=('Postfix that is prepended to all output ' - 'files (default = _wtsimt)')) + desc=("Postfix that is prepended to all output " "files (default = _wtsimt)"), + ) reference_image = File( - argstr='-R %s', - xor=['tightest_box'], - desc='reference image space that you wish to warp INTO') + argstr="-R %s", + xor=["tightest_box"], + desc="reference image space that you wish to warp INTO", + ) tightest_box = traits.Bool( - argstr='--tightest-bounding-box', - desc=('computes tightest bounding box (overrided by ' - 'reference_image if given)'), - xor=['reference_image']) + argstr="--tightest-bounding-box", + desc=( + "computes tightest bounding box (overrided by " "reference_image if given)" + ), + xor=["reference_image"], + ) reslice_by_header = traits.Bool( - argstr='--reslice-by-header', - desc=('Uses orientation matrix and origin encoded in ' - 'reference image file header. Not typically used ' - 'with additional transforms')) + argstr="--reslice-by-header", + desc=( + "Uses orientation matrix and origin encoded in " + "reference image file header. Not typically used " + "with additional transforms" + ), + ) use_nearest = traits.Bool( - argstr='--use-NN', desc='Use nearest neighbor interpolation') + argstr="--use-NN", desc="Use nearest neighbor interpolation" + ) use_bspline = traits.Bool( - argstr='--use-Bspline', desc='Use 3rd order B-Spline interpolation') + argstr="--use-Bspline", desc="Use 3rd order B-Spline interpolation" + ) transformation_series = InputMultiPath( File(exists=True), - argstr='%s', - desc='transformation file(s) to be applied', + argstr="%s", + desc="transformation file(s) to be applied", mandatory=True, - copyfile=False) + copyfile=False, + ) invert_affine = traits.List( traits.Int, desc=( - 'List of Affine transformations to invert.' - 'E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines ' - 'found in transformation_series. Note that indexing ' - 'starts with 1 and does not include warp fields. Affine ' - 'transformations are distinguished ' + "List of Affine transformations to invert." + "E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines " + "found in transformation_series. Note that indexing " + "starts with 1 and does not include warp fields. Affine " + "transformations are distinguished " 'from warp fields by the word "affine" included in their filenames.' - )) + ), + ) class WarpTimeSeriesImageMultiTransformOutputSpec(TraitedSpec): - output_image = File(exists=True, desc='Warped image') + output_image = File(exists=True, desc="Warped image") class WarpTimeSeriesImageMultiTransform(ANTSCommand): @@ -93,25 +101,23 @@ class WarpTimeSeriesImageMultiTransform(ANTSCommand): -i ants_Affine.txt' """ - _cmd = 'WarpTimeSeriesImageMultiTransform' + _cmd = "WarpTimeSeriesImageMultiTransform" input_spec = WarpTimeSeriesImageMultiTransformInputSpec output_spec = WarpTimeSeriesImageMultiTransformOutputSpec def _format_arg(self, opt, spec, val): - if opt == 'out_postfix': - _, name, ext = split_filename( - os.path.abspath(self.inputs.input_image)) + if opt == "out_postfix": + _, name, ext = split_filename(os.path.abspath(self.inputs.input_image)) return name + val + ext - if opt == 'transformation_series': + if opt == "transformation_series": series = [] affine_counter = 0 affine_invert = [] for transformation in val: - if 'Affine' in transformation and \ - isdefined(self.inputs.invert_affine): + if "Affine" in transformation and isdefined(self.inputs.invert_affine): affine_counter += 1 if affine_counter in self.inputs.invert_affine: - series += ['-i'] + series += ["-i"] affine_invert.append(affine_counter) series += [transformation] @@ -120,23 +126,26 @@ def _format_arg(self, opt, spec, val): if diff_inv: raise Exceptions( "Review invert_affine, not all indexes from invert_affine were used, " - "check the description for the full definition") + "check the description for the full definition" + ) - return ' '.join(series) + return " ".join(series) return super(WarpTimeSeriesImageMultiTransform, self)._format_arg( - opt, spec, val) + opt, spec, val + ) def _list_outputs(self): outputs = self._outputs().get() _, name, ext = split_filename(os.path.abspath(self.inputs.input_image)) - outputs['output_image'] = os.path.join(os.getcwd(), ''.join( - (name, self.inputs.out_postfix, ext))) + outputs["output_image"] = os.path.join( + os.getcwd(), "".join((name, self.inputs.out_postfix, ext)) + ) return outputs def _run_interface(self, runtime, correct_return_codes=[0]): - runtime = super(WarpTimeSeriesImageMultiTransform, - self)._run_interface( - runtime, correct_return_codes=[0, 1]) + runtime = super(WarpTimeSeriesImageMultiTransform, self)._run_interface( + runtime, correct_return_codes=[0, 1] + ) if "100 % complete" not in runtime.stdout: self.raise_exception(runtime) return runtime @@ -144,70 +153,79 @@ def _run_interface(self, runtime, correct_return_codes=[0]): class WarpImageMultiTransformInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( - 3, - 2, - argstr='%d', - usedefault=True, - desc='image dimension (2 or 3)', - position=1) + 3, 2, argstr="%d", usedefault=True, desc="image dimension (2 or 3)", position=1 + ) input_image = File( - argstr='%s', + argstr="%s", mandatory=True, - desc=('image to apply transformation to (generally a ' - 'coregistered functional)'), - position=2) + desc=( + "image to apply transformation to (generally a " "coregistered functional)" + ), + position=2, + ) output_image = File( genfile=True, hash_files=False, - argstr='%s', - desc='name of the output warped image', + argstr="%s", + desc="name of the output warped image", position=3, - xor=['out_postfix']) + xor=["out_postfix"], + ) out_postfix = File( "_wimt", usedefault=True, hash_files=False, - desc=('Postfix that is prepended to all output ' - 'files (default = _wimt)'), - xor=['output_image']) + desc=("Postfix that is prepended to all output " "files (default = _wimt)"), + xor=["output_image"], + ) reference_image = File( - argstr='-R %s', - xor=['tightest_box'], - desc='reference image space that you wish to warp INTO') + argstr="-R %s", + xor=["tightest_box"], + desc="reference image space that you wish to warp INTO", + ) tightest_box = traits.Bool( - argstr='--tightest-bounding-box', - desc=('computes tightest bounding box (overrided by ' - 'reference_image if given)'), - xor=['reference_image']) + argstr="--tightest-bounding-box", + desc=( + "computes tightest bounding box (overrided by " "reference_image if given)" + ), + xor=["reference_image"], + ) reslice_by_header = traits.Bool( - argstr='--reslice-by-header', - desc=('Uses orientation matrix and origin encoded in ' - 'reference image file header. Not typically used ' - 'with additional transforms')) + argstr="--reslice-by-header", + desc=( + "Uses orientation matrix and origin encoded in " + "reference image file header. Not typically used " + "with additional transforms" + ), + ) use_nearest = traits.Bool( - argstr='--use-NN', desc='Use nearest neighbor interpolation') + argstr="--use-NN", desc="Use nearest neighbor interpolation" + ) use_bspline = traits.Bool( - argstr='--use-BSpline', desc='Use 3rd order B-Spline interpolation') + argstr="--use-BSpline", desc="Use 3rd order B-Spline interpolation" + ) transformation_series = InputMultiPath( File(exists=True), - argstr='%s', - desc='transformation file(s) to be applied', + argstr="%s", + desc="transformation file(s) to be applied", mandatory=True, - position=-1) + position=-1, + ) invert_affine = traits.List( traits.Int, desc=( - 'List of Affine transformations to invert.' - 'E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines ' - 'found in transformation_series. Note that indexing ' - 'starts with 1 and does not include warp fields. Affine ' - 'transformations are distinguished ' + "List of Affine transformations to invert." + "E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines " + "found in transformation_series. Note that indexing " + "starts with 1 and does not include warp fields. Affine " + "transformations are distinguished " 'from warp fields by the word "affine" included in their filenames.' - )) + ), + ) class WarpImageMultiTransformOutputSpec(TraitedSpec): - output_image = File(exists=True, desc='Warped image') + output_image = File(exists=True, desc="Warped image") class WarpImageMultiTransform(ANTSCommand): @@ -237,28 +255,28 @@ class WarpImageMultiTransform(ANTSCommand): """ - _cmd = 'WarpImageMultiTransform' + _cmd = "WarpImageMultiTransform" input_spec = WarpImageMultiTransformInputSpec output_spec = WarpImageMultiTransformOutputSpec def _gen_filename(self, name): - if name == 'output_image': - _, name, ext = split_filename( - os.path.abspath(self.inputs.input_image)) - return ''.join((name, self.inputs.out_postfix, ext)) + if name == "output_image": + _, name, ext = split_filename(os.path.abspath(self.inputs.input_image)) + return "".join((name, self.inputs.out_postfix, ext)) return None def _format_arg(self, opt, spec, val): - if opt == 'transformation_series': + if opt == "transformation_series": series = [] affine_counter = 0 affine_invert = [] for transformation in val: - if "affine" in transformation.lower() and \ - isdefined(self.inputs.invert_affine): + if "affine" in transformation.lower() and isdefined( + self.inputs.invert_affine + ): affine_counter += 1 if affine_counter in self.inputs.invert_affine: - series += ['-i'] + series += ["-i"] affine_invert.append(affine_counter) series += [transformation] @@ -267,19 +285,21 @@ def _format_arg(self, opt, spec, val): if diff_inv: raise Exceptions( "Review invert_affine, not all indexes from invert_affine were used, " - "check the description for the full definition") + "check the description for the full definition" + ) - return ' '.join(series) + return " ".join(series) return super(WarpImageMultiTransform, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() if isdefined(self.inputs.output_image): - outputs['output_image'] = os.path.abspath(self.inputs.output_image) + outputs["output_image"] = os.path.abspath(self.inputs.output_image) else: - outputs['output_image'] = os.path.abspath( - self._gen_filename('output_image')) + outputs["output_image"] = os.path.abspath( + self._gen_filename("output_image") + ) return outputs @@ -288,81 +308,92 @@ class ApplyTransformsInputSpec(ANTSCommandInputSpec): 2, 3, 4, - argstr='--dimensionality %d', - desc=('This option forces the image to be treated ' - 'as a specified-dimensional image. If not ' - 'specified, antsWarp tries to infer the ' - 'dimensionality from the input image.')) + argstr="--dimensionality %d", + desc=( + "This option forces the image to be treated " + "as a specified-dimensional image. If not " + "specified, antsWarp tries to infer the " + "dimensionality from the input image." + ), + ) input_image_type = traits.Enum( 0, 1, 2, 3, - argstr='--input-image-type %d', - desc=('Option specifying the input image ' - 'type of scalar (default), vector, ' - 'tensor, or time series.')) + argstr="--input-image-type %d", + desc=( + "Option specifying the input image " + "type of scalar (default), vector, " + "tensor, or time series." + ), + ) input_image = File( - argstr='--input %s', + argstr="--input %s", mandatory=True, - desc=('image to apply transformation to (generally a ' - 'coregistered functional)'), - exists=True) + desc=( + "image to apply transformation to (generally a " "coregistered functional)" + ), + exists=True, + ) output_image = traits.Str( - argstr='--output %s', - desc='output file name', - genfile=True, - hash_files=False) + argstr="--output %s", desc="output file name", genfile=True, hash_files=False + ) out_postfix = traits.Str( "_trans", usedefault=True, - desc=('Postfix that is appended to all output ' - 'files (default = _trans)')) + desc=("Postfix that is appended to all output " "files (default = _trans)"), + ) reference_image = File( - argstr='--reference-image %s', + argstr="--reference-image %s", mandatory=True, - desc='reference image space that you wish to warp INTO', - exists=True) + desc="reference image space that you wish to warp INTO", + exists=True, + ) interpolation = traits.Enum( - 'Linear', - 'NearestNeighbor', - 'CosineWindowedSinc', - 'WelchWindowedSinc', - 'HammingWindowedSinc', - 'LanczosWindowedSinc', - 'MultiLabel', - 'Gaussian', - 'BSpline', - argstr='%s', - usedefault=True) + "Linear", + "NearestNeighbor", + "CosineWindowedSinc", + "WelchWindowedSinc", + "HammingWindowedSinc", + "LanczosWindowedSinc", + "MultiLabel", + "Gaussian", + "BSpline", + argstr="%s", + usedefault=True, + ) interpolation_parameters = traits.Either( traits.Tuple(traits.Int()), # BSpline (order) traits.Tuple( - traits.Float(), # Gaussian/MultiLabel (sigma, alpha) - traits.Float())) + traits.Float(), traits.Float() # Gaussian/MultiLabel (sigma, alpha) + ), + ) transforms = traits.Either( InputMultiPath(File(exists=True)), - 'identity', - argstr='%s', + "identity", + argstr="%s", mandatory=True, - desc='transform files: will be applied in reverse order. For ' - 'example, the last specified transform will be applied first.') + desc="transform files: will be applied in reverse order. For " + "example, the last specified transform will be applied first.", + ) invert_transform_flags = InputMultiPath(traits.Bool()) - default_value = traits.Float( - 0.0, argstr='--default-value %g', usedefault=True) + default_value = traits.Float(0.0, argstr="--default-value %g", usedefault=True) print_out_composite_warp_file = traits.Bool( False, requires=["output_image"], - desc='output a composite warp file instead of a transformed image') + desc="output a composite warp file instead of a transformed image", + ) float = traits.Bool( - argstr='--float %d', + argstr="--float %d", default_value=False, usedefault=True, - desc='Use float instead of double for computations.') + desc="Use float instead of double for computations.", + ) class ApplyTransformsOutputSpec(TraitedSpec): - output_image = File(exists=True, desc='Warped image') + output_image = File(exists=True, desc="Warped image") class ApplyTransforms(ANTSCommand): @@ -411,12 +442,13 @@ class ApplyTransforms(ANTSCommand): --interpolation BSpline[ 5 ] --output deformed_moving1.nii --reference-image fixed1.nii \ --transform [ ants_Warp.nii.gz, 0 ] --transform [ trans.mat, 0 ]' """ - _cmd = 'antsApplyTransforms' + + _cmd = "antsApplyTransforms" input_spec = ApplyTransformsInputSpec output_spec = ApplyTransformsOutputSpec def _gen_filename(self, name): - if name == 'output_image': + if name == "output_image": output = self.inputs.output_image if not isdefined(output): _, name, ext = split_filename(self.inputs.input_image) @@ -429,15 +461,20 @@ def _get_transform_filenames(self): for ii in range(len(self.inputs.transforms)): if isdefined(self.inputs.invert_transform_flags): if len(self.inputs.transforms) == len( - self.inputs.invert_transform_flags): - invert_code = 1 if self.inputs.invert_transform_flags[ - ii] else 0 - retval.append("--transform [ %s, %d ]" % - (self.inputs.transforms[ii], invert_code)) + self.inputs.invert_transform_flags + ): + invert_code = 1 if self.inputs.invert_transform_flags[ii] else 0 + retval.append( + "--transform [ %s, %d ]" + % (self.inputs.transforms[ii], invert_code) + ) else: - raise Exception(( - "ERROR: The useInverse list must have the same number " - "of entries as the transformsFileName list.")) + raise Exception( + ( + "ERROR: The useInverse list must have the same number " + "of entries as the transformsFileName list." + ) + ) else: retval.append("--transform %s" % self.inputs.transforms[ii]) return " ".join(retval) @@ -446,7 +483,8 @@ def _get_output_warped_filename(self): if isdefined(self.inputs.print_out_composite_warp_file): return "--output [ %s, %d ]" % ( self._gen_filename("output_image"), - int(self.inputs.print_out_composite_warp_file)) + int(self.inputs.print_out_composite_warp_file), + ) else: return "--output %s" % (self._gen_filename("output_image")) @@ -454,25 +492,28 @@ def _format_arg(self, opt, spec, val): if opt == "output_image": return self._get_output_warped_filename() elif opt == "transforms": - if val == 'identity': - return '-t identity' + if val == "identity": + return "-t identity" return self._get_transform_filenames() - elif opt == 'interpolation': - if self.inputs.interpolation in ['BSpline', 'MultiLabel', 'Gaussian'] and \ - isdefined(self.inputs.interpolation_parameters): - return '--interpolation %s[ %s ]' % ( - self.inputs.interpolation, ', '.join([ - str(param) - for param in self.inputs.interpolation_parameters - ])) + elif opt == "interpolation": + if self.inputs.interpolation in [ + "BSpline", + "MultiLabel", + "Gaussian", + ] and isdefined(self.inputs.interpolation_parameters): + return "--interpolation %s[ %s ]" % ( + self.inputs.interpolation, + ", ".join( + [str(param) for param in self.inputs.interpolation_parameters] + ), + ) else: - return '--interpolation %s' % self.inputs.interpolation + return "--interpolation %s" % self.inputs.interpolation return super(ApplyTransforms, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() - outputs['output_image'] = os.path.abspath( - self._gen_filename('output_image')) + outputs["output_image"] = os.path.abspath(self._gen_filename("output_image")) return outputs @@ -481,44 +522,50 @@ class ApplyTransformsToPointsInputSpec(ANTSCommandInputSpec): 2, 3, 4, - argstr='--dimensionality %d', - desc=('This option forces the image to be treated ' - 'as a specified-dimensional image. If not ' - 'specified, antsWarp tries to infer the ' - 'dimensionality from the input image.')) + argstr="--dimensionality %d", + desc=( + "This option forces the image to be treated " + "as a specified-dimensional image. If not " + "specified, antsWarp tries to infer the " + "dimensionality from the input image." + ), + ) input_file = File( - argstr='--input %s', + argstr="--input %s", mandatory=True, - desc= - ("Currently, the only input supported is a csv file with" - " columns including x,y (2D), x,y,z (3D) or x,y,z,t,label (4D) column headers." - " The points should be defined in physical space." - " If in doubt how to convert coordinates from your files to the space" - " required by antsApplyTransformsToPoints try creating/drawing a simple" - " label volume with only one voxel set to 1 and all others set to 0." - " Write down the voxel coordinates. Then use ImageMaths LabelStats to find" - " out what coordinates for this voxel antsApplyTransformsToPoints is" - " expecting."), - exists=True) + desc=( + "Currently, the only input supported is a csv file with" + " columns including x,y (2D), x,y,z (3D) or x,y,z,t,label (4D) column headers." + " The points should be defined in physical space." + " If in doubt how to convert coordinates from your files to the space" + " required by antsApplyTransformsToPoints try creating/drawing a simple" + " label volume with only one voxel set to 1 and all others set to 0." + " Write down the voxel coordinates. Then use ImageMaths LabelStats to find" + " out what coordinates for this voxel antsApplyTransformsToPoints is" + " expecting." + ), + exists=True, + ) output_file = traits.Str( - argstr='--output %s', - desc='Name of the output CSV file', - name_source=['input_file'], + argstr="--output %s", + desc="Name of the output CSV file", + name_source=["input_file"], hash_files=False, - name_template='%s_transformed.csv') + name_template="%s_transformed.csv", + ) transforms = traits.List( File(exists=True), - argstr='%s', + argstr="%s", mandatory=True, - desc='transforms that will be applied to the points') + desc="transforms that will be applied to the points", + ) invert_transform_flags = traits.List( - traits.Bool(), - desc='list indicating if a transform should be reversed') + traits.Bool(), desc="list indicating if a transform should be reversed" + ) class ApplyTransformsToPointsOutputSpec(TraitedSpec): - output_file = File( - exists=True, desc='csv file with transformed coordinates') + output_file = File(exists=True, desc="csv file with transformed coordinates") class ApplyTransformsToPoints(ANTSCommand): @@ -540,7 +587,8 @@ class ApplyTransformsToPoints(ANTSCommand): """ - _cmd = 'antsApplyTransformsToPoints' + + _cmd = "antsApplyTransformsToPoints" input_spec = ApplyTransformsToPointsInputSpec output_spec = ApplyTransformsToPointsOutputSpec @@ -549,15 +597,20 @@ def _get_transform_filenames(self): for ii in range(len(self.inputs.transforms)): if isdefined(self.inputs.invert_transform_flags): if len(self.inputs.transforms) == len( - self.inputs.invert_transform_flags): - invert_code = 1 if self.inputs.invert_transform_flags[ - ii] else 0 - retval.append("--transform [ %s, %d ]" % - (self.inputs.transforms[ii], invert_code)) + self.inputs.invert_transform_flags + ): + invert_code = 1 if self.inputs.invert_transform_flags[ii] else 0 + retval.append( + "--transform [ %s, %d ]" + % (self.inputs.transforms[ii], invert_code) + ) else: - raise Exception(( - "ERROR: The useInverse list must have the same number " - "of entries as the transformsFileName list.")) + raise Exception( + ( + "ERROR: The useInverse list must have the same number " + "of entries as the transformsFileName list." + ) + ) else: retval.append("--transform %s" % self.inputs.transforms[ii]) return " ".join(retval) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index e9966bf612..e3fe579844 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -13,46 +13,50 @@ class AtroposInputSpec(ANTSCommandInputSpec): 3, 2, 4, - argstr='--image-dimensionality %d', + argstr="--image-dimensionality %d", usedefault=True, - desc='image dimension (2, 3, or 4)') + desc="image dimension (2, 3, or 4)", + ) intensity_images = InputMultiPath( - File(exists=True), argstr="--intensity-image %s...", mandatory=True) - mask_image = File(exists=True, argstr='--mask-image %s', mandatory=True) + File(exists=True), argstr="--intensity-image %s...", mandatory=True + ) + mask_image = File(exists=True, argstr="--mask-image %s", mandatory=True) initialization = traits.Enum( - 'Random', - 'Otsu', - 'KMeans', - 'PriorProbabilityImages', - 'PriorLabelImage', + "Random", + "Otsu", + "KMeans", + "PriorProbabilityImages", + "PriorLabelImage", argstr="%s", - requires=['number_of_tissue_classes'], - mandatory=True) + requires=["number_of_tissue_classes"], + mandatory=True, + ) prior_probability_images = InputMultiPath(File(exists=True)) number_of_tissue_classes = traits.Int(mandatory=True) prior_weighting = traits.Float() - prior_probability_threshold = traits.Float(requires=['prior_weighting']) + prior_probability_threshold = traits.Float(requires=["prior_weighting"]) likelihood_model = traits.Str(argstr="--likelihood-model %s") mrf_smoothing_factor = traits.Float(argstr="%s") - mrf_radius = traits.List(traits.Int(), requires=['mrf_smoothing_factor']) + mrf_radius = traits.List(traits.Int(), requires=["mrf_smoothing_factor"]) icm_use_synchronous_update = traits.Bool(argstr="%s") maximum_number_of_icm_terations = traits.Int( - requires=['icm_use_synchronous_update']) + requires=["icm_use_synchronous_update"] + ) n_iterations = traits.Int(argstr="%s") - convergence_threshold = traits.Float(requires=['n_iterations']) + convergence_threshold = traits.Float(requires=["n_iterations"]) posterior_formulation = traits.Str(argstr="%s") use_random_seed = traits.Bool( True, - argstr='--use-random-seed %d', - desc='use random seed value over constant', - usedefault=True) - use_mixture_model_proportions = traits.Bool( - requires=['posterior_formulation']) - out_classified_image_name = File( - argstr="%s", genfile=True, hash_files=False) + argstr="--use-random-seed %d", + desc="use random seed value over constant", + usedefault=True, + ) + use_mixture_model_proportions = traits.Bool(requires=["posterior_formulation"]) + out_classified_image_name = File(argstr="%s", genfile=True, hash_files=False) save_posteriors = traits.Bool() output_posteriors_name_template = traits.Str( - 'POSTERIOR_%02d.nii.gz', usedefault=True) + "POSTERIOR_%02d.nii.gz", usedefault=True + ) class AtroposOutputSpec(TraitedSpec): @@ -97,27 +101,33 @@ class Atropos(ANTSCommand): --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] --use-random-seed 1' """ + input_spec = AtroposInputSpec output_spec = AtroposOutputSpec - _cmd = 'Atropos' + _cmd = "Atropos" def _format_arg(self, opt, spec, val): - if opt == 'initialization': + if opt == "initialization": retval = "--initialization %s[%d" % ( - val, self.inputs.number_of_tissue_classes) + val, + self.inputs.number_of_tissue_classes, + ) if val == "PriorProbabilityImages": - _, _, ext = split_filename( - self.inputs.prior_probability_images[0]) - retval += ",priors/priorProbImages%02d" + \ - ext + ",%g" % self.inputs.prior_weighting + _, _, ext = split_filename(self.inputs.prior_probability_images[0]) + retval += ( + ",priors/priorProbImages%02d" + + ext + + ",%g" % self.inputs.prior_weighting + ) if isdefined(self.inputs.prior_probability_threshold): retval += ",%g" % self.inputs.prior_probability_threshold return retval + "]" - if opt == 'mrf_smoothing_factor': + if opt == "mrf_smoothing_factor": retval = "--mrf [%g" % val if isdefined(self.inputs.mrf_radius): retval += ",%s" % self._format_xarray( - [str(s) for s in self.inputs.mrf_radius]) + [str(s) for s in self.inputs.mrf_radius] + ) return retval + "]" if opt == "icm_use_synchronous_update": retval = "--icm [%d" % val @@ -148,90 +158,105 @@ def _run_interface(self, runtime, correct_return_codes=[0]): os.makedirs(priors_directory) _, _, ext = split_filename(self.inputs.prior_probability_images[0]) for i, f in enumerate(self.inputs.prior_probability_images): - target = os.path.join(priors_directory, - 'priorProbImages%02d' % (i + 1) + ext) - if not (os.path.exists(target) - and os.path.realpath(target) == os.path.abspath(f)): + target = os.path.join( + priors_directory, "priorProbImages%02d" % (i + 1) + ext + ) + if not ( + os.path.exists(target) + and os.path.realpath(target) == os.path.abspath(f) + ): copyfile( os.path.abspath(f), - os.path.join(priors_directory, - 'priorProbImages%02d' % (i + 1) + ext)) + os.path.join( + priors_directory, "priorProbImages%02d" % (i + 1) + ext + ), + ) runtime = super(Atropos, self)._run_interface(runtime) return runtime def _gen_filename(self, name): - if name == 'out_classified_image_name': + if name == "out_classified_image_name": output = self.inputs.out_classified_image_name if not isdefined(output): _, name, ext = split_filename(self.inputs.intensity_images[0]) - output = name + '_labeled' + ext + output = name + "_labeled" + ext return output return None def _list_outputs(self): outputs = self._outputs().get() - outputs['classified_image'] = os.path.abspath( - self._gen_filename('out_classified_image_name')) - if isdefined( - self.inputs.save_posteriors) and self.inputs.save_posteriors: - outputs['posteriors'] = [] + outputs["classified_image"] = os.path.abspath( + self._gen_filename("out_classified_image_name") + ) + if isdefined(self.inputs.save_posteriors) and self.inputs.save_posteriors: + outputs["posteriors"] = [] for i in range(self.inputs.number_of_tissue_classes): - outputs['posteriors'].append( + outputs["posteriors"].append( os.path.abspath( - self.inputs.output_posteriors_name_template % (i + 1))) + self.inputs.output_posteriors_name_template % (i + 1) + ) + ) return outputs class LaplacianThicknessInputSpec(ANTSCommandInputSpec): input_wm = File( - argstr='%s', + argstr="%s", mandatory=True, copyfile=True, - desc='white matter segmentation image', - position=1) + desc="white matter segmentation image", + position=1, + ) input_gm = File( - argstr='%s', + argstr="%s", mandatory=True, copyfile=True, - desc='gray matter segmentation image', - position=2) + desc="gray matter segmentation image", + position=2, + ) output_image = File( - desc='name of output file', - argstr='%s', + desc="name of output file", + argstr="%s", position=3, - name_source=['input_wm'], - name_template='%s_thickness', + name_source=["input_wm"], + name_template="%s_thickness", keep_extension=True, - hash_files=False) + hash_files=False, + ) smooth_param = traits.Float( - argstr='%s', - desc='Sigma of the Laplacian Recursive Image Filter (defaults to 1)', - position=4) + argstr="%s", + desc="Sigma of the Laplacian Recursive Image Filter (defaults to 1)", + position=4, + ) prior_thickness = traits.Float( - argstr='%s', - desc='Prior thickness (defaults to 500)', - requires=['smooth_param'], - position=5) + argstr="%s", + desc="Prior thickness (defaults to 500)", + requires=["smooth_param"], + position=5, + ) dT = traits.Float( - argstr='%s', - desc='Time delta used during integration (defaults to 0.01)', - requires=['prior_thickness'], - position=6) + argstr="%s", + desc="Time delta used during integration (defaults to 0.01)", + requires=["prior_thickness"], + position=6, + ) sulcus_prior = traits.Float( - argstr='%s', - desc='Positive floating point number for sulcus prior. ' - 'Authors said that 0.15 might be a reasonable value', - requires=['dT'], - position=7) + argstr="%s", + desc="Positive floating point number for sulcus prior. " + "Authors said that 0.15 might be a reasonable value", + requires=["dT"], + position=7, + ) tolerance = traits.Float( - argstr='%s', - desc='Tolerance to reach during optimization (defaults to 0.001)', - requires=['sulcus_prior'], - position=8) + argstr="%s", + desc="Tolerance to reach during optimization (defaults to 0.001)", + requires=["sulcus_prior"], + position=8, + ) class LaplacianThicknessOutputSpec(TraitedSpec): - output_image = File(exists=True, desc='Cortical thickness') + output_image = File(exists=True, desc="Cortical thickness") class LaplacianThickness(ANTSCommand): @@ -253,81 +278,92 @@ class LaplacianThickness(ANTSCommand): """ - _cmd = 'LaplacianThickness' + _cmd = "LaplacianThickness" input_spec = LaplacianThicknessInputSpec output_spec = LaplacianThicknessOutputSpec class N4BiasFieldCorrectionInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( - 3, - 2, - 4, - argstr='-d %d', - usedefault=True, - desc='image dimension (2, 3 or 4)') + 3, 2, 4, argstr="-d %d", usedefault=True, desc="image dimension (2, 3 or 4)" + ) input_image = File( - argstr='--input-image %s', + argstr="--input-image %s", mandatory=True, - desc=('input for bias correction. Negative values or values close to ' - 'zero should be processed prior to correction')) + desc=( + "input for bias correction. Negative values or values close to " + "zero should be processed prior to correction" + ), + ) mask_image = File( - argstr='--mask-image %s', - desc=('image to specify region to perform final bias correction in')) + argstr="--mask-image %s", + desc=("image to specify region to perform final bias correction in"), + ) weight_image = File( - argstr='--weight-image %s', - desc=('image for relative weighting (e.g. probability map of the white ' - 'matter) of voxels during the B-spline fitting. ')) + argstr="--weight-image %s", + desc=( + "image for relative weighting (e.g. probability map of the white " + "matter) of voxels during the B-spline fitting. " + ), + ) output_image = traits.Str( - argstr='--output %s', - desc='output file name', - name_source=['input_image'], - name_template='%s_corrected', + argstr="--output %s", + desc="output file name", + name_source=["input_image"], + name_template="%s_corrected", keep_extension=True, - hash_files=False) + hash_files=False, + ) bspline_fitting_distance = traits.Float(argstr="--bspline-fitting %s") - bspline_order = traits.Int(requires=['bspline_fitting_distance']) + bspline_order = traits.Int(requires=["bspline_fitting_distance"]) shrink_factor = traits.Int(argstr="--shrink-factor %d") n_iterations = traits.List(traits.Int(), argstr="--convergence %s") - convergence_threshold = traits.Float(requires=['n_iterations']) + convergence_threshold = traits.Float(requires=["n_iterations"]) save_bias = traits.Bool( False, mandatory=True, usedefault=True, - desc=('True if the estimated bias should be saved to file.'), - xor=['bias_image']) - bias_image = File( - desc='Filename for the estimated bias.', hash_files=False) + desc=("True if the estimated bias should be saved to file."), + xor=["bias_image"], + ) + bias_image = File(desc="Filename for the estimated bias.", hash_files=False) copy_header = traits.Bool( False, mandatory=True, usedefault=True, - desc='copy headers of the original image into the ' - 'output (corrected) file') + desc="copy headers of the original image into the " "output (corrected) file", + ) rescale_intensities = traits.Bool( - False, usedefault=True, argstr='-r', min_ver='2.1.0', + False, + usedefault=True, + argstr="-r", + min_ver="2.1.0", desc="""\ [NOTE: Only ANTs>=2.1.0] At each iteration, a new intensity mapping is calculated and applied but there is nothing which constrains the new intensity range to be within certain values. The result is that the range can "drift" from the original at each iteration. This option rescales to the [min,max] range of the original image intensities -within the user-specified mask.""") +within the user-specified mask.""", + ) histogram_sharpening = traits.Tuple( (0.15, 0.01, 200), - traits.Float, traits.Float, traits.Int, - argstr='--histogram-sharpening [%g,%g,%d]', + traits.Float, + traits.Float, + traits.Int, + argstr="--histogram-sharpening [%g,%g,%d]", desc="""\ Three-values tuple of histogram sharpening parameters \ (FWHM, wienerNose, numberOfHistogramBins). These options describe the histogram sharpening parameters, i.e. the \ deconvolution step parameters described in the original N3 algorithm. -The default values have been shown to work fairly well.""") +The default values have been shown to work fairly well.""", + ) class N4BiasFieldCorrectionOutputSpec(TraitedSpec): - output_image = File(exists=True, desc='Warped image') - bias_image = File(exists=True, desc='Estimated bias') + output_image = File(exists=True, desc="Warped image") + bias_image = File(exists=True, desc="Estimated bias") class N4BiasFieldCorrection(ANTSCommand): @@ -398,7 +434,7 @@ class N4BiasFieldCorrection(ANTSCommand): """ - _cmd = 'N4BiasFieldCorrection' + _cmd = "N4BiasFieldCorrection" input_spec = N4BiasFieldCorrectionInputSpec output_spec = N4BiasFieldCorrectionOutputSpec @@ -408,38 +444,38 @@ def __init__(self, *args, **kwargs): super(N4BiasFieldCorrection, self).__init__(*args, **kwargs) def _format_arg(self, name, trait_spec, value): - if name == 'output_image' and self._out_bias_file: - newval = '[ %s, %s ]' % (value, self._out_bias_file) + if name == "output_image" and self._out_bias_file: + newval = "[ %s, %s ]" % (value, self._out_bias_file) return trait_spec.argstr % newval - if name == 'bspline_fitting_distance': + if name == "bspline_fitting_distance": if isdefined(self.inputs.bspline_order): - newval = '[ %g, %d ]' % (value, self.inputs.bspline_order) + newval = "[ %g, %d ]" % (value, self.inputs.bspline_order) else: - newval = '[ %g ]' % value + newval = "[ %g ]" % value return trait_spec.argstr % newval - if name == 'n_iterations': + if name == "n_iterations": if isdefined(self.inputs.convergence_threshold): - newval = '[ %s, %g ]' % ( + newval = "[ %s, %g ]" % ( self._format_xarray([str(elt) for elt in value]), - self.inputs.convergence_threshold) + self.inputs.convergence_threshold, + ) else: - newval = '[ %s ]' % self._format_xarray( - [str(elt) for elt in value]) + newval = "[ %s ]" % self._format_xarray([str(elt) for elt in value]) return trait_spec.argstr % newval - return super(N4BiasFieldCorrection, self)._format_arg( - name, trait_spec, value) + return super(N4BiasFieldCorrection, self)._format_arg(name, trait_spec, value) def _parse_inputs(self, skip=None): - skip = (skip or []) + ['save_bias', 'bias_image'] + skip = (skip or []) + ["save_bias", "bias_image"] self._out_bias_file = None if self.inputs.save_bias or isdefined(self.inputs.bias_image): bias_image = self.inputs.bias_image if not isdefined(bias_image): - bias_image = fname_presuffix(os.path.basename(self.inputs.input_image), - suffix='_bias') + bias_image = fname_presuffix( + os.path.basename(self.inputs.input_image), suffix="_bias" + ) self._out_bias_file = bias_image return super(N4BiasFieldCorrection, self)._parse_inputs(skip=skip) @@ -448,166 +484,202 @@ def _list_outputs(self): # Fix headers if self.inputs.copy_header: - self._copy_header(outputs['output_image']) + self._copy_header(outputs["output_image"]) if self._out_bias_file: - outputs['bias_image'] = os.path.abspath(self._out_bias_file) + outputs["bias_image"] = os.path.abspath(self._out_bias_file) if self.inputs.copy_header: - self._copy_header(outputs['bias_image']) + self._copy_header(outputs["bias_image"]) return outputs def _copy_header(self, fname): """Copy header from input image to an output image.""" import nibabel as nb + in_img = nb.load(self.inputs.input_image) out_img = nb.load(fname, mmap=False) - new_img = out_img.__class__(out_img.get_fdata(), in_img.affine, - in_img.header) + new_img = out_img.__class__(out_img.get_fdata(), in_img.affine, in_img.header) new_img.set_data_dtype(out_img.get_data_dtype()) new_img.to_filename(fname) class CorticalThicknessInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( - 3, 2, argstr='-d %d', usedefault=True, desc='image dimension (2 or 3)') + 3, 2, argstr="-d %d", usedefault=True, desc="image dimension (2 or 3)" + ) anatomical_image = File( exists=True, - argstr='-a %s', - desc=('Structural *intensity* image, typically T1.' - ' If more than one anatomical image is specified,' - ' subsequently specified images are used during the' - ' segmentation process. However, only the first' - ' image is used in the registration of priors.' - ' Our suggestion would be to specify the T1' - ' as the first image.'), - mandatory=True) + argstr="-a %s", + desc=( + "Structural *intensity* image, typically T1." + " If more than one anatomical image is specified," + " subsequently specified images are used during the" + " segmentation process. However, only the first" + " image is used in the registration of priors." + " Our suggestion would be to specify the T1" + " as the first image." + ), + mandatory=True, + ) brain_template = File( exists=True, - argstr='-e %s', - desc=('Anatomical *intensity* template (possibly created using a' - ' population data set with buildtemplateparallel.sh in ANTs).' - ' This template is *not* skull-stripped.'), - mandatory=True) + argstr="-e %s", + desc=( + "Anatomical *intensity* template (possibly created using a" + " population data set with buildtemplateparallel.sh in ANTs)." + " This template is *not* skull-stripped." + ), + mandatory=True, + ) brain_probability_mask = File( exists=True, - argstr='-m %s', - desc='brain probability mask in template space', + argstr="-m %s", + desc="brain probability mask in template space", copyfile=False, - mandatory=True) + mandatory=True, + ) segmentation_priors = InputMultiPath( - File(exists=True), argstr='-p %s', mandatory=True) + File(exists=True), argstr="-p %s", mandatory=True + ) out_prefix = traits.Str( - 'antsCT_', - argstr='-o %s', + "antsCT_", + argstr="-o %s", usedefault=True, - desc=('Prefix that is prepended to all output' - ' files (default = antsCT_)')) + desc=("Prefix that is prepended to all output" " files (default = antsCT_)"), + ) image_suffix = traits.Str( - 'nii.gz', - desc=('any of standard ITK formats,' - ' nii.gz is default'), - argstr='-s %s', - usedefault=True) + "nii.gz", + desc=("any of standard ITK formats," " nii.gz is default"), + argstr="-s %s", + usedefault=True, + ) t1_registration_template = File( exists=True, - desc=('Anatomical *intensity* template' - ' (assumed to be skull-stripped). A common' - ' case would be where this would be the same' - ' template as specified in the -e option which' - ' is not skull stripped.'), - argstr='-t %s', - mandatory=True) + desc=( + "Anatomical *intensity* template" + " (assumed to be skull-stripped). A common" + " case would be where this would be the same" + " template as specified in the -e option which" + " is not skull stripped." + ), + argstr="-t %s", + mandatory=True, + ) extraction_registration_mask = File( exists=True, - argstr='-f %s', - desc=('Mask (defined in the template space) used during' - ' registration for brain extraction.')) + argstr="-f %s", + desc=( + "Mask (defined in the template space) used during" + " registration for brain extraction." + ), + ) keep_temporary_files = traits.Int( - argstr='-k %d', - desc='Keep brain extraction/segmentation warps, etc (default = 0).') + argstr="-k %d", + desc="Keep brain extraction/segmentation warps, etc (default = 0).", + ) max_iterations = traits.Int( - argstr='-i %d', - desc=('ANTS registration max iterations (default = 100x100x70x20)')) + argstr="-i %d", + desc=("ANTS registration max iterations (default = 100x100x70x20)"), + ) prior_segmentation_weight = traits.Float( - argstr='-w %f', - desc=('Atropos spatial prior *probability* weight for' - ' the segmentation')) + argstr="-w %f", + desc=("Atropos spatial prior *probability* weight for" " the segmentation"), + ) segmentation_iterations = traits.Int( - argstr='-n %d', - desc=('N4 -> Atropos -> N4 iterations during segmentation' - ' (default = 3)')) + argstr="-n %d", + desc=("N4 -> Atropos -> N4 iterations during segmentation" " (default = 3)"), + ) posterior_formulation = traits.Str( - argstr='-b %s', - desc=('Atropos posterior formulation and whether or not' - ' to use mixture model proportions.' - ''' e.g 'Socrates[1]' (default) or 'Aristotle[1]'.''' - ' Choose the latter if you' - ' want use the distance priors (see also the -l option' - ' for label propagation control).')) + argstr="-b %s", + desc=( + "Atropos posterior formulation and whether or not" + " to use mixture model proportions." + """ e.g 'Socrates[1]' (default) or 'Aristotle[1]'.""" + " Choose the latter if you" + " want use the distance priors (see also the -l option" + " for label propagation control)." + ), + ) use_floatingpoint_precision = traits.Enum( 0, 1, - argstr='-j %d', - desc=('Use floating point precision in registrations (default = 0)')) + argstr="-j %d", + desc=("Use floating point precision in registrations (default = 0)"), + ) use_random_seeding = traits.Enum( 0, 1, - argstr='-u %d', - desc=('Use random number generated from system clock in Atropos' - ' (default = 1)')) + argstr="-u %d", + desc=( + "Use random number generated from system clock in Atropos" " (default = 1)" + ), + ) b_spline_smoothing = traits.Bool( - argstr='-v', - desc=('Use B-spline SyN for registrations and B-spline' - ' exponential mapping in DiReCT.')) + argstr="-v", + desc=( + "Use B-spline SyN for registrations and B-spline" + " exponential mapping in DiReCT." + ), + ) cortical_label_image = File( - exists=True, desc='Cortical ROI labels to use as a prior for ATITH.') + exists=True, desc="Cortical ROI labels to use as a prior for ATITH." + ) label_propagation = traits.Str( - argstr='-l %s', - desc= - ('Incorporate a distance prior one the posterior formulation. Should be' - ''' of the form 'label[lambda,boundaryProbability]' where label''' - ' is a value of 1,2,3,... denoting label ID. The label' - ' probability for anything outside the current label' - ' = boundaryProbability * exp( -lambda * distanceFromBoundary )' - ' Intuitively, smaller lambda values will increase the spatial capture' - ' range of the distance prior. To apply to all label values, simply omit' - ' specifying the label, i.e. -l [lambda,boundaryProbability].')) + argstr="-l %s", + desc=( + "Incorporate a distance prior one the posterior formulation. Should be" + """ of the form 'label[lambda,boundaryProbability]' where label""" + " is a value of 1,2,3,... denoting label ID. The label" + " probability for anything outside the current label" + " = boundaryProbability * exp( -lambda * distanceFromBoundary )" + " Intuitively, smaller lambda values will increase the spatial capture" + " range of the distance prior. To apply to all label values, simply omit" + " specifying the label, i.e. -l [lambda,boundaryProbability]." + ), + ) quick_registration = traits.Bool( - argstr='-q 1', - desc= - ('If = 1, use antsRegistrationSyNQuick.sh as the basis for registration' - ' during brain extraction, brain segmentation, and' - ' (optional) normalization to a template.' - ' Otherwise use antsRegistrationSyN.sh (default = 0).')) + argstr="-q 1", + desc=( + "If = 1, use antsRegistrationSyNQuick.sh as the basis for registration" + " during brain extraction, brain segmentation, and" + " (optional) normalization to a template." + " Otherwise use antsRegistrationSyN.sh (default = 0)." + ), + ) debug = traits.Bool( - argstr='-z 1', + argstr="-z 1", desc=( - 'If > 0, runs a faster version of the script.' - ' Only for testing. Implies -u 0.' - ' Requires single thread computation for complete reproducibility.' - )) + "If > 0, runs a faster version of the script." + " Only for testing. Implies -u 0." + " Requires single thread computation for complete reproducibility." + ), + ) class CorticalThicknessOutputSpec(TraitedSpec): - BrainExtractionMask = File(exists=True, desc='brain extraction mask') - ExtractedBrainN4 = File(exists=True, desc='extracted brain from N4 image') - BrainSegmentation = File(exists=True, desc='brain segmentaion image') - BrainSegmentationN4 = File(exists=True, desc='N4 corrected image') + BrainExtractionMask = File(exists=True, desc="brain extraction mask") + ExtractedBrainN4 = File(exists=True, desc="extracted brain from N4 image") + BrainSegmentation = File(exists=True, desc="brain segmentaion image") + BrainSegmentationN4 = File(exists=True, desc="N4 corrected image") BrainSegmentationPosteriors = OutputMultiPath( - File(exists=True), desc='Posterior probability images') - CorticalThickness = File(exists=True, desc='cortical thickness file') + File(exists=True), desc="Posterior probability images" + ) + CorticalThickness = File(exists=True, desc="cortical thickness file") TemplateToSubject1GenericAffine = File( - exists=True, desc='Template to subject affine') - TemplateToSubject0Warp = File(exists=True, desc='Template to subject warp') - SubjectToTemplate1Warp = File( - exists=True, desc='Template to subject inverse warp') + exists=True, desc="Template to subject affine" + ) + TemplateToSubject0Warp = File(exists=True, desc="Template to subject warp") + SubjectToTemplate1Warp = File(exists=True, desc="Template to subject inverse warp") SubjectToTemplate0GenericAffine = File( - exists=True, desc='Template to subject inverse affine') + exists=True, desc="Template to subject inverse affine" + ) SubjectToTemplateLogJacobian = File( - exists=True, desc='Template to subject log jacobian') + exists=True, desc="Template to subject log jacobian" + ) CorticalThicknessNormedToTemplate = File( - exists=True, desc='Normalized cortical thickness') - BrainVolumes = File(exists=True, desc='Brain volumes as text') + exists=True, desc="Normalized cortical thickness" + ) + BrainVolumes = File(exists=True, desc="Brain volumes as text") class CorticalThickness(ANTSCommand): @@ -633,25 +705,25 @@ class CorticalThickness(ANTSCommand): input_spec = CorticalThicknessInputSpec output_spec = CorticalThicknessOutputSpec - _cmd = 'antsCorticalThickness.sh' + _cmd = "antsCorticalThickness.sh" def _format_arg(self, opt, spec, val): - if opt == 'anatomical_image': - retval = '-a %s' % val + if opt == "anatomical_image": + retval = "-a %s" % val return retval - if opt == 'brain_template': - retval = '-e %s' % val + if opt == "brain_template": + retval = "-e %s" % val return retval - if opt == 'brain_probability_mask': - retval = '-m %s' % val + if opt == "brain_probability_mask": + retval = "-m %s" % val return retval - if opt == 'out_prefix': - retval = '-o %s' % val + if opt == "out_prefix": + retval = "-o %s" % val return retval - if opt == 't1_registration_template': - retval = '-t %s' % val + if opt == "t1_registration_template": + retval = "-t %s" % val return retval - if opt == 'segmentation_priors': + if opt == "segmentation_priors": _, _, ext = split_filename(self.inputs.segmentation_priors[0]) retval = "-p nipype_priors/BrainSegmentationPrior%02d" + ext return retval @@ -663,154 +735,199 @@ def _run_interface(self, runtime, correct_return_codes=[0]): os.makedirs(priors_directory) _, _, ext = split_filename(self.inputs.segmentation_priors[0]) for i, f in enumerate(self.inputs.segmentation_priors): - target = os.path.join(priors_directory, - 'BrainSegmentationPrior%02d' % (i + 1) + ext) - if not (os.path.exists(target) - and os.path.realpath(target) == os.path.abspath(f)): + target = os.path.join( + priors_directory, "BrainSegmentationPrior%02d" % (i + 1) + ext + ) + if not ( + os.path.exists(target) + and os.path.realpath(target) == os.path.abspath(f) + ): copyfile(os.path.abspath(f), target) runtime = super(CorticalThickness, self)._run_interface(runtime) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['BrainExtractionMask'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'BrainExtractionMask.' + - self.inputs.image_suffix) - outputs['ExtractedBrainN4'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'ExtractedBrain0N4.' + - self.inputs.image_suffix) - outputs['BrainSegmentation'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'BrainSegmentation.' + - self.inputs.image_suffix) - outputs['BrainSegmentationN4'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'BrainSegmentation0N4.' + - self.inputs.image_suffix) + outputs["BrainExtractionMask"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + "BrainExtractionMask." + self.inputs.image_suffix, + ) + outputs["ExtractedBrainN4"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + "ExtractedBrain0N4." + self.inputs.image_suffix, + ) + outputs["BrainSegmentation"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + "BrainSegmentation." + self.inputs.image_suffix, + ) + outputs["BrainSegmentationN4"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + "BrainSegmentation0N4." + self.inputs.image_suffix, + ) posteriors = [] for i in range(len(self.inputs.segmentation_priors)): posteriors.append( - os.path.join(os.getcwd(), self.inputs.out_prefix + - 'BrainSegmentationPosteriors%02d.' % - (i + 1) + self.inputs.image_suffix)) - outputs['BrainSegmentationPosteriors'] = posteriors - outputs['CorticalThickness'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'CorticalThickness.' + - self.inputs.image_suffix) - outputs['TemplateToSubject1GenericAffine'] = os.path.join( + os.path.join( + os.getcwd(), + self.inputs.out_prefix + + "BrainSegmentationPosteriors%02d." % (i + 1) + + self.inputs.image_suffix, + ) + ) + outputs["BrainSegmentationPosteriors"] = posteriors + outputs["CorticalThickness"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + "CorticalThickness." + self.inputs.image_suffix, + ) + outputs["TemplateToSubject1GenericAffine"] = os.path.join( + os.getcwd(), self.inputs.out_prefix + "TemplateToSubject1GenericAffine.mat" + ) + outputs["TemplateToSubject0Warp"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + + "TemplateToSubject0Warp." + + self.inputs.image_suffix, + ) + outputs["SubjectToTemplate1Warp"] = os.path.join( os.getcwd(), - self.inputs.out_prefix + 'TemplateToSubject1GenericAffine.mat') - outputs['TemplateToSubject0Warp'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'TemplateToSubject0Warp.' + - self.inputs.image_suffix) - outputs['SubjectToTemplate1Warp'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'SubjectToTemplate1Warp.' + - self.inputs.image_suffix) - outputs['SubjectToTemplate0GenericAffine'] = os.path.join( + self.inputs.out_prefix + + "SubjectToTemplate1Warp." + + self.inputs.image_suffix, + ) + outputs["SubjectToTemplate0GenericAffine"] = os.path.join( + os.getcwd(), self.inputs.out_prefix + "SubjectToTemplate0GenericAffine.mat" + ) + outputs["SubjectToTemplateLogJacobian"] = os.path.join( os.getcwd(), - self.inputs.out_prefix + 'SubjectToTemplate0GenericAffine.mat') - outputs['SubjectToTemplateLogJacobian'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + - 'SubjectToTemplateLogJacobian.' + self.inputs.image_suffix) - outputs['CorticalThicknessNormedToTemplate'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'CorticalThickness.' + - self.inputs.image_suffix) - outputs['BrainVolumes'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'brainvols.csv') + self.inputs.out_prefix + + "SubjectToTemplateLogJacobian." + + self.inputs.image_suffix, + ) + outputs["CorticalThicknessNormedToTemplate"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + "CorticalThickness." + self.inputs.image_suffix, + ) + outputs["BrainVolumes"] = os.path.join( + os.getcwd(), self.inputs.out_prefix + "brainvols.csv" + ) return outputs class BrainExtractionInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( - 3, 2, argstr='-d %d', usedefault=True, desc='image dimension (2 or 3)') + 3, 2, argstr="-d %d", usedefault=True, desc="image dimension (2 or 3)" + ) anatomical_image = File( exists=True, - argstr='-a %s', - desc=('Structural image, typically T1. If more than one' - ' anatomical image is specified, subsequently specified' - ' images are used during the segmentation process. However,' - ' only the first image is used in the registration of priors.' - ' Our suggestion would be to specify the T1 as the first image.' - ' Anatomical template created using e.g. LPBA40 data set with' - ' buildtemplateparallel.sh in ANTs.'), - mandatory=True) + argstr="-a %s", + desc=( + "Structural image, typically T1. If more than one" + " anatomical image is specified, subsequently specified" + " images are used during the segmentation process. However," + " only the first image is used in the registration of priors." + " Our suggestion would be to specify the T1 as the first image." + " Anatomical template created using e.g. LPBA40 data set with" + " buildtemplateparallel.sh in ANTs." + ), + mandatory=True, + ) brain_template = File( exists=True, - argstr='-e %s', - desc=('Anatomical template created using e.g. LPBA40 data set with' - ' buildtemplateparallel.sh in ANTs.'), - mandatory=True) + argstr="-e %s", + desc=( + "Anatomical template created using e.g. LPBA40 data set with" + " buildtemplateparallel.sh in ANTs." + ), + mandatory=True, + ) brain_probability_mask = File( exists=True, - argstr='-m %s', - desc=('Brain probability mask created using e.g. LPBA40 data set which' - ' have brain masks defined, and warped to anatomical template and' - ' averaged resulting in a probability image.'), + argstr="-m %s", + desc=( + "Brain probability mask created using e.g. LPBA40 data set which" + " have brain masks defined, and warped to anatomical template and" + " averaged resulting in a probability image." + ), copyfile=False, - mandatory=True) + mandatory=True, + ) out_prefix = traits.Str( - 'highres001_', - argstr='-o %s', + "highres001_", + argstr="-o %s", usedefault=True, - desc=('Prefix that is prepended to all output' - ' files (default = highress001_)')) + desc=( + "Prefix that is prepended to all output" " files (default = highress001_)" + ), + ) extraction_registration_mask = File( exists=True, - argstr='-f %s', - desc=('Mask (defined in the template space) used during' - ' registration for brain extraction.' - ' To limit the metric computation to a specific region.')) + argstr="-f %s", + desc=( + "Mask (defined in the template space) used during" + " registration for brain extraction." + " To limit the metric computation to a specific region." + ), + ) image_suffix = traits.Str( - 'nii.gz', - desc=('any of standard ITK formats,' - ' nii.gz is default'), - argstr='-s %s', - usedefault=True) + "nii.gz", + desc=("any of standard ITK formats," " nii.gz is default"), + argstr="-s %s", + usedefault=True, + ) use_random_seeding = traits.Enum( 0, 1, - argstr='-u %d', - desc=('Use random number generated from system clock in Atropos' - ' (default = 1)')) + argstr="-u %d", + desc=( + "Use random number generated from system clock in Atropos" " (default = 1)" + ), + ) keep_temporary_files = traits.Int( - argstr='-k %d', - desc='Keep brain extraction/segmentation warps, etc (default = 0).') + argstr="-k %d", + desc="Keep brain extraction/segmentation warps, etc (default = 0).", + ) use_floatingpoint_precision = traits.Enum( 0, 1, - argstr='-q %d', - desc=('Use floating point precision in registrations (default = 0)')) + argstr="-q %d", + desc=("Use floating point precision in registrations (default = 0)"), + ) debug = traits.Bool( - argstr='-z 1', + argstr="-z 1", desc=( - 'If > 0, runs a faster version of the script.' - ' Only for testing. Implies -u 0.' - ' Requires single thread computation for complete reproducibility.' - )) + "If > 0, runs a faster version of the script." + " Only for testing. Implies -u 0." + " Requires single thread computation for complete reproducibility." + ), + ) class BrainExtractionOutputSpec(TraitedSpec): - BrainExtractionMask = File(exists=True, desc='brain extraction mask') - BrainExtractionBrain = File(exists=True, desc='brain extraction image') - BrainExtractionCSF = File( - exists=True, desc='segmentation mask with only CSF') + BrainExtractionMask = File(exists=True, desc="brain extraction mask") + BrainExtractionBrain = File(exists=True, desc="brain extraction image") + BrainExtractionCSF = File(exists=True, desc="segmentation mask with only CSF") BrainExtractionGM = File( - exists=True, desc='segmentation mask with only grey matter') - BrainExtractionInitialAffine = File(exists=True, desc='') - BrainExtractionInitialAffineFixed = File(exists=True, desc='') - BrainExtractionInitialAffineMoving = File(exists=True, desc='') - BrainExtractionLaplacian = File(exists=True, desc='') - BrainExtractionPrior0GenericAffine = File(exists=True, desc='') - BrainExtractionPrior1InverseWarp = File(exists=True, desc='') - BrainExtractionPrior1Warp = File(exists=True, desc='') - BrainExtractionPriorWarped = File(exists=True, desc='') + exists=True, desc="segmentation mask with only grey matter" + ) + BrainExtractionInitialAffine = File(exists=True, desc="") + BrainExtractionInitialAffineFixed = File(exists=True, desc="") + BrainExtractionInitialAffineMoving = File(exists=True, desc="") + BrainExtractionLaplacian = File(exists=True, desc="") + BrainExtractionPrior0GenericAffine = File(exists=True, desc="") + BrainExtractionPrior1InverseWarp = File(exists=True, desc="") + BrainExtractionPrior1Warp = File(exists=True, desc="") + BrainExtractionPriorWarped = File(exists=True, desc="") BrainExtractionSegmentation = File( - exists=True, desc='segmentation mask with CSF, GM, and WM') - BrainExtractionTemplateLaplacian = File(exists=True, desc='') - BrainExtractionTmp = File(exists=True, desc='') + exists=True, desc="segmentation mask with CSF, GM, and WM" + ) + BrainExtractionTemplateLaplacian = File(exists=True, desc="") + BrainExtractionTmp = File(exists=True, desc="") BrainExtractionWM = File( - exists=True, desc='segmenration mask with only white matter') - N4Corrected0 = File(exists=True, desc='N4 bias field corrected image') - N4Truncated0 = File(exists=True, desc='') + exists=True, desc="segmenration mask with only white matter" + ) + N4Corrected0 = File(exists=True, desc="N4 bias field corrected image") + N4Truncated0 = File(exists=True, desc="") class BrainExtraction(ANTSCommand): @@ -827,45 +944,46 @@ class BrainExtraction(ANTSCommand): 'antsBrainExtraction.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz -e study_template.nii.gz -d 3 \ -s nii.gz -o highres001_' """ + input_spec = BrainExtractionInputSpec output_spec = BrainExtractionOutputSpec - _cmd = 'antsBrainExtraction.sh' + _cmd = "antsBrainExtraction.sh" - def _run_interface(self, runtime, correct_return_codes=(0, )): + def _run_interface(self, runtime, correct_return_codes=(0,)): # antsBrainExtraction.sh requires ANTSPATH to be defined out_environ = self._get_environ() - ants_path = out_environ.get('ANTSPATH', None) or os.getenv( - 'ANTSPATH', None) + ants_path = out_environ.get("ANTSPATH", None) or os.getenv("ANTSPATH", None) if ants_path is None: # Check for antsRegistration, which is under bin/ (the $ANTSPATH) instead of # checking for antsBrainExtraction.sh which is under script/ - cmd_path = which('antsRegistration', env=runtime.environ) + cmd_path = which("antsRegistration", env=runtime.environ) if not cmd_path: raise RuntimeError( 'The environment variable $ANTSPATH is not defined in host "%s", ' - 'and Nipype could not determine it automatically.' % - runtime.hostname) + "and Nipype could not determine it automatically." + % runtime.hostname + ) ants_path = os.path.dirname(cmd_path) - self.inputs.environ.update({'ANTSPATH': ants_path}) - runtime.environ.update({'ANTSPATH': ants_path}) + self.inputs.environ.update({"ANTSPATH": ants_path}) + runtime.environ.update({"ANTSPATH": ants_path}) runtime = super(BrainExtraction, self)._run_interface(runtime) # Still, double-check if it didn't found N4 - if 'we cant find' in runtime.stdout: - for line in runtime.stdout.split('\n'): - if line.strip().startswith('we cant find'): - tool = line.strip().replace('we cant find the', - '').split(' ')[0] + if "we cant find" in runtime.stdout: + for line in runtime.stdout.split("\n"): + if line.strip().startswith("we cant find"): + tool = line.strip().replace("we cant find the", "").split(" ")[0] break errmsg = ( 'antsBrainExtraction.sh requires "%s" to be found in $ANTSPATH ' - '($ANTSPATH="%s").') % (tool, ants_path) + '($ANTSPATH="%s").' + ) % (tool, ants_path) if runtime.stderr is None: runtime.stderr = errmsg else: - runtime.stderr += '\n' + errmsg + runtime.stderr += "\n" + errmsg runtime.returncode = 1 self.raise_exception(runtime) @@ -873,64 +991,105 @@ def _run_interface(self, runtime, correct_return_codes=(0, )): def _list_outputs(self): outputs = self._outputs().get() - outputs['BrainExtractionMask'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'BrainExtractionMask.' + - self.inputs.image_suffix) - outputs['BrainExtractionBrain'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'BrainExtractionBrain.' + - self.inputs.image_suffix) - if isdefined(self.inputs.keep_temporary_files - ) and self.inputs.keep_temporary_files != 0: - outputs['BrainExtractionCSF'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'BrainExtractionCSF.' + - self.inputs.image_suffix) - outputs['BrainExtractionGM'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'BrainExtractionGM.' + - self.inputs.image_suffix) - outputs['BrainExtractionInitialAffine'] = os.path.join( + outputs["BrainExtractionMask"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + "BrainExtractionMask." + self.inputs.image_suffix, + ) + outputs["BrainExtractionBrain"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + "BrainExtractionBrain." + self.inputs.image_suffix, + ) + if ( + isdefined(self.inputs.keep_temporary_files) + and self.inputs.keep_temporary_files != 0 + ): + outputs["BrainExtractionCSF"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + + "BrainExtractionCSF." + + self.inputs.image_suffix, + ) + outputs["BrainExtractionGM"] = os.path.join( os.getcwd(), - self.inputs.out_prefix + 'BrainExtractionInitialAffine.mat') - outputs['BrainExtractionInitialAffineFixed'] = os.path.join( + self.inputs.out_prefix + + "BrainExtractionGM." + + self.inputs.image_suffix, + ) + outputs["BrainExtractionInitialAffine"] = os.path.join( + os.getcwd(), self.inputs.out_prefix + "BrainExtractionInitialAffine.mat" + ) + outputs["BrainExtractionInitialAffineFixed"] = os.path.join( os.getcwd(), - self.inputs.out_prefix + 'BrainExtractionInitialAffineFixed.' + - self.inputs.image_suffix) - outputs['BrainExtractionInitialAffineMoving'] = os.path.join( + self.inputs.out_prefix + + "BrainExtractionInitialAffineFixed." + + self.inputs.image_suffix, + ) + outputs["BrainExtractionInitialAffineMoving"] = os.path.join( os.getcwd(), - self.inputs.out_prefix + 'BrainExtractionInitialAffineMoving.' - + self.inputs.image_suffix) - outputs['BrainExtractionLaplacian'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + - 'BrainExtractionLaplacian.' + self.inputs.image_suffix) - outputs['BrainExtractionPrior0GenericAffine'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + - 'BrainExtractionPrior0GenericAffine.mat') - outputs['BrainExtractionPrior1InverseWarp'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + - 'BrainExtractionPrior1InverseWarp.' + self.inputs.image_suffix) - outputs['BrainExtractionPrior1Warp'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + - 'BrainExtractionPrior1Warp.' + self.inputs.image_suffix) - outputs['BrainExtractionPriorWarped'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + - 'BrainExtractionPriorWarped.' + self.inputs.image_suffix) - outputs['BrainExtractionSegmentation'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + - 'BrainExtractionSegmentation.' + self.inputs.image_suffix) - outputs['BrainExtractionTemplateLaplacian'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + - 'BrainExtractionTemplateLaplacian.' + self.inputs.image_suffix) - outputs['BrainExtractionTmp'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'BrainExtractionTmp.' + - self.inputs.image_suffix) - outputs['BrainExtractionWM'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'BrainExtractionWM.' + - self.inputs.image_suffix) - outputs['N4Corrected0'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'N4Corrected0.' + - self.inputs.image_suffix) - outputs['N4Truncated0'] = os.path.join( - os.getcwd(), self.inputs.out_prefix + 'N4Truncated0.' + - self.inputs.image_suffix) + self.inputs.out_prefix + + "BrainExtractionInitialAffineMoving." + + self.inputs.image_suffix, + ) + outputs["BrainExtractionLaplacian"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + + "BrainExtractionLaplacian." + + self.inputs.image_suffix, + ) + outputs["BrainExtractionPrior0GenericAffine"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + "BrainExtractionPrior0GenericAffine.mat", + ) + outputs["BrainExtractionPrior1InverseWarp"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + + "BrainExtractionPrior1InverseWarp." + + self.inputs.image_suffix, + ) + outputs["BrainExtractionPrior1Warp"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + + "BrainExtractionPrior1Warp." + + self.inputs.image_suffix, + ) + outputs["BrainExtractionPriorWarped"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + + "BrainExtractionPriorWarped." + + self.inputs.image_suffix, + ) + outputs["BrainExtractionSegmentation"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + + "BrainExtractionSegmentation." + + self.inputs.image_suffix, + ) + outputs["BrainExtractionTemplateLaplacian"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + + "BrainExtractionTemplateLaplacian." + + self.inputs.image_suffix, + ) + outputs["BrainExtractionTmp"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + + "BrainExtractionTmp." + + self.inputs.image_suffix, + ) + outputs["BrainExtractionWM"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + + "BrainExtractionWM." + + self.inputs.image_suffix, + ) + outputs["N4Corrected0"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + "N4Corrected0." + self.inputs.image_suffix, + ) + outputs["N4Truncated0"] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + "N4Truncated0." + self.inputs.image_suffix, + ) return outputs @@ -940,75 +1099,79 @@ class JointFusionInputSpec(ANTSCommandInputSpec): 3, 2, 4, - argstr='%d', + argstr="%d", position=0, usedefault=True, mandatory=True, - desc='image dimension (2, 3, or 4)') + desc="image dimension (2, 3, or 4)", + ) modalities = traits.Int( - argstr='%d', - position=1, - mandatory=True, - desc='Number of modalities or features') + argstr="%d", position=1, mandatory=True, desc="Number of modalities or features" + ) warped_intensity_images = InputMultiPath( - File(exists=True), - argstr="-g %s...", - mandatory=True, - desc='Warped atlas images') + File(exists=True), argstr="-g %s...", mandatory=True, desc="Warped atlas images" + ) target_image = InputMultiPath( - File(exists=True), - argstr='-tg %s...', - mandatory=True, - desc='Target image(s)') + File(exists=True), argstr="-tg %s...", mandatory=True, desc="Target image(s)" + ) warped_label_images = InputMultiPath( File(exists=True), argstr="-l %s...", mandatory=True, - desc='Warped atlas segmentations') + desc="Warped atlas segmentations", + ) method = traits.Str( - default='Joint', - argstr='-m %s', + default="Joint", + argstr="-m %s", usedefault=True, - desc=('Select voting method. Options: Joint (Joint' - ' Label Fusion). May be followed by optional' - ' parameters in brackets, e.g., -m Joint[0.1,2]')) + desc=( + "Select voting method. Options: Joint (Joint" + " Label Fusion). May be followed by optional" + " parameters in brackets, e.g., -m Joint[0.1,2]" + ), + ) alpha = traits.Float( default=0.1, usedefault=True, - requires=['method'], - desc=('Regularization term added to matrix Mx for inverse')) + requires=["method"], + desc=("Regularization term added to matrix Mx for inverse"), + ) beta = traits.Int( default=2, usedefault=True, - requires=['method'], - desc=('Exponent for mapping intensity difference to joint error')) + requires=["method"], + desc=("Exponent for mapping intensity difference to joint error"), + ) output_label_image = File( - argstr='%s', + argstr="%s", mandatory=True, position=-1, - name_template='%s', - output_name='output_label_image', - desc='Output fusion label map image') + name_template="%s", + output_name="output_label_image", + desc="Output fusion label map image", + ) patch_radius = traits.ListInt( minlen=3, maxlen=3, - argstr='-rp %s', - desc=('Patch radius for similarity measures, ' - 'scalar or vector. Default: 2x2x2')) + argstr="-rp %s", + desc=( + "Patch radius for similarity measures, " "scalar or vector. Default: 2x2x2" + ), + ) search_radius = traits.ListInt( - minlen=3, - maxlen=3, - argstr='-rs %s', - desc='Local search radius. Default: 3x3x3') + minlen=3, maxlen=3, argstr="-rs %s", desc="Local search radius. Default: 3x3x3" + ) exclusion_region = File( exists=True, - argstr='-x %s', - desc=('Specify an exclusion region for the given label.')) + argstr="-x %s", + desc=("Specify an exclusion region for the given label."), + ) atlas_group_id = traits.ListInt( - argstr='-gp %d...', desc='Assign a group ID for each atlas') + argstr="-gp %d...", desc="Assign a group ID for each atlas" + ) atlas_group_weights = traits.ListInt( - argstr='-gpw %d...', - desc=('Assign the voting weights to each atlas group')) + argstr="-gpw %d...", desc=("Assign the voting weights to each atlas group") + ) class JointFusionOutputSpec(TraitedSpec): @@ -1047,33 +1210,36 @@ class JointFusion(ANTSCommand): 'jointfusion 3 1 -m Joint[0.5,1] -rp 3x2x1 -rs 1x2x3 -tg T1.nii -g im1.nii -g im2.nii -g im3.nii \ -l segmentation0.nii.gz -l segmentation1.nii.gz -l segmentation1.nii.gz fusion_labelimage_output.nii' """ + input_spec = JointFusionInputSpec output_spec = JointFusionOutputSpec - _cmd = 'jointfusion' + _cmd = "jointfusion" def _format_arg(self, opt, spec, val): - if opt == 'method': - if '[' in val: - retval = '-m {0}'.format(val) + if opt == "method": + if "[" in val: + retval = "-m {0}".format(val) else: - retval = '-m {0}[{1},{2}]'.format( - self.inputs.method, self.inputs.alpha, self.inputs.beta) - elif opt == 'patch_radius': - retval = '-rp {0}'.format(self._format_xarray(val)) - elif opt == 'search_radius': - retval = '-rs {0}'.format(self._format_xarray(val)) + retval = "-m {0}[{1},{2}]".format( + self.inputs.method, self.inputs.alpha, self.inputs.beta + ) + elif opt == "patch_radius": + retval = "-rp {0}".format(self._format_xarray(val)) + elif opt == "search_radius": + retval = "-rs {0}".format(self._format_xarray(val)) else: - if opt == 'warped_intensity_images': - assert len(val) == self.inputs.modalities * len(self.inputs.warped_label_images), \ - "Number of intensity images and label maps must be the same {0}!={1}".format( - len(val), len(self.inputs.warped_label_images)) + if opt == "warped_intensity_images": + assert len(val) == self.inputs.modalities * len( + self.inputs.warped_label_images + ), "Number of intensity images and label maps must be the same {0}!={1}".format( + len(val), len(self.inputs.warped_label_images) + ) return super(JointFusion, self)._format_arg(opt, spec, val) return retval def _list_outputs(self): outputs = self._outputs().get() - outputs['output_label_image'] = os.path.abspath( - self.inputs.output_label_image) + outputs["output_label_image"] = os.path.abspath(self.inputs.output_label_image) return outputs @@ -1082,52 +1248,61 @@ class DenoiseImageInputSpec(ANTSCommandInputSpec): 2, 3, 4, - argstr='-d %d', - desc='This option forces the image to be treated ' - 'as a specified-dimensional image. If not ' - 'specified, the program tries to infer the ' - 'dimensionality from the input image.') + argstr="-d %d", + desc="This option forces the image to be treated " + "as a specified-dimensional image. If not " + "specified, the program tries to infer the " + "dimensionality from the input image.", + ) input_image = File( exists=True, argstr="-i %s", mandatory=True, - desc='A scalar image is expected as input for noise correction.') + desc="A scalar image is expected as input for noise correction.", + ) noise_model = traits.Enum( - 'Gaussian', - 'Rician', - argstr='-n %s', + "Gaussian", + "Rician", + argstr="-n %s", usedefault=True, - desc=('Employ a Rician or Gaussian noise model.')) + desc=("Employ a Rician or Gaussian noise model."), + ) shrink_factor = traits.Int( default_value=1, usedefault=True, - argstr='-s %s', - desc=('Running noise correction on large images can' - ' be time consuming. To lessen computation time,' - ' the input image can be resampled. The shrink' - ' factor, specified as a single integer, describes' - ' this resampling. Shrink factor = 1 is the default.')) + argstr="-s %s", + desc=( + "Running noise correction on large images can" + " be time consuming. To lessen computation time," + " the input image can be resampled. The shrink" + " factor, specified as a single integer, describes" + " this resampling. Shrink factor = 1 is the default." + ), + ) output_image = File( argstr="-o %s", - name_source=['input_image'], + name_source=["input_image"], hash_files=False, keep_extension=True, - name_template='%s_noise_corrected', - desc='The output consists of the noise corrected' - ' version of the input image.') + name_template="%s_noise_corrected", + desc="The output consists of the noise corrected" + " version of the input image.", + ) save_noise = traits.Bool( False, mandatory=True, usedefault=True, - desc=('True if the estimated noise should be saved to file.'), - xor=['noise_image']) + desc=("True if the estimated noise should be saved to file."), + xor=["noise_image"], + ) noise_image = File( - name_source=['input_image'], + name_source=["input_image"], hash_files=False, keep_extension=True, - name_template='%s_noise', - desc='Filename for the estimated noise.') - verbose = traits.Bool(False, argstr="-v", desc=('Verbose output.')) + name_template="%s_noise", + desc="Filename for the estimated noise.", + ) + verbose = traits.Bool(False, argstr="-v", desc=("Verbose output.")) class DenoiseImageOutputSpec(TraitedSpec): @@ -1160,16 +1335,19 @@ class DenoiseImage(ANTSCommand): >>> denoise_3.cmdline 'DenoiseImage -i im1.nii -n Gaussian -o [ im1_noise_corrected.nii, im1_noise.nii ] -s 1' """ + input_spec = DenoiseImageInputSpec output_spec = DenoiseImageOutputSpec - _cmd = 'DenoiseImage' + _cmd = "DenoiseImage" def _format_arg(self, name, trait_spec, value): - if ((name == 'output_image') and - (self.inputs.save_noise or isdefined(self.inputs.noise_image))): - newval = '[ %s, %s ]' % ( - self._filename_from_source('output_image'), - self._filename_from_source('noise_image')) + if (name == "output_image") and ( + self.inputs.save_noise or isdefined(self.inputs.noise_image) + ): + newval = "[ %s, %s ]" % ( + self._filename_from_source("output_image"), + self._filename_from_source("noise_image"), + ) return trait_spec.argstr % newval return super(DenoiseImage, self)._format_arg(name, trait_spec, value) @@ -1180,121 +1358,145 @@ class AntsJointFusionInputSpec(ANTSCommandInputSpec): 3, 2, 4, - argstr='-d %d', - desc='This option forces the image to be treated ' - 'as a specified-dimensional image. If not ' - 'specified, the program tries to infer the ' - 'dimensionality from the input image.') + argstr="-d %d", + desc="This option forces the image to be treated " + "as a specified-dimensional image. If not " + "specified, the program tries to infer the " + "dimensionality from the input image.", + ) target_image = traits.List( InputMultiPath(File(exists=True)), - argstr='-t %s', + argstr="-t %s", mandatory=True, - desc='The target image (or ' - 'multimodal target images) assumed to be ' - 'aligned to a common image domain.') + desc="The target image (or " + "multimodal target images) assumed to be " + "aligned to a common image domain.", + ) atlas_image = traits.List( InputMultiPath(File(exists=True)), argstr="-g %s...", mandatory=True, - desc='The atlas image (or ' - 'multimodal atlas images) assumed to be ' - 'aligned to a common image domain.') + desc="The atlas image (or " + "multimodal atlas images) assumed to be " + "aligned to a common image domain.", + ) atlas_segmentation_image = InputMultiPath( File(exists=True), argstr="-l %s...", mandatory=True, - desc='The atlas segmentation ' - 'images. For performing label fusion the number ' - 'of specified segmentations should be identical ' - 'to the number of atlas image sets.') + desc="The atlas segmentation " + "images. For performing label fusion the number " + "of specified segmentations should be identical " + "to the number of atlas image sets.", + ) alpha = traits.Float( default_value=0.1, usedefault=True, - argstr='-a %s', + argstr="-a %s", desc=( - 'Regularization ' - 'term added to matrix Mx for calculating the inverse. Default = 0.1' - )) + "Regularization " + "term added to matrix Mx for calculating the inverse. Default = 0.1" + ), + ) beta = traits.Float( default_value=2.0, usedefault=True, - argstr='-b %s', - desc=('Exponent for mapping ' - 'intensity difference to the joint error. Default = 2.0')) + argstr="-b %s", + desc=( + "Exponent for mapping " + "intensity difference to the joint error. Default = 2.0" + ), + ) retain_label_posterior_images = traits.Bool( False, - argstr='-r', + argstr="-r", usedefault=True, - requires=['atlas_segmentation_image'], - desc=('Retain label posterior probability images. Requires ' - 'atlas segmentations to be specified. Default = false')) + requires=["atlas_segmentation_image"], + desc=( + "Retain label posterior probability images. Requires " + "atlas segmentations to be specified. Default = false" + ), + ) retain_atlas_voting_images = traits.Bool( False, - argstr='-f', + argstr="-f", usedefault=True, - desc=('Retain atlas voting images. Default = false')) + desc=("Retain atlas voting images. Default = false"), + ) constrain_nonnegative = traits.Bool( False, - argstr='-c', + argstr="-c", usedefault=True, - desc=('Constrain solution to non-negative weights.')) + desc=("Constrain solution to non-negative weights."), + ) patch_radius = traits.ListInt( minlen=3, maxlen=3, - argstr='-p %s', - desc=('Patch radius for similarity measures.' - 'Default: 2x2x2')) + argstr="-p %s", + desc=("Patch radius for similarity measures." "Default: 2x2x2"), + ) patch_metric = traits.Enum( - 'PC', - 'MSQ', - argstr='-m %s', - desc=('Metric to be used in determining the most similar ' - 'neighborhood patch. Options include Pearson\'s ' - 'correlation (PC) and mean squares (MSQ). Default = ' - 'PC (Pearson correlation).')) + "PC", + "MSQ", + argstr="-m %s", + desc=( + "Metric to be used in determining the most similar " + "neighborhood patch. Options include Pearson's " + "correlation (PC) and mean squares (MSQ). Default = " + "PC (Pearson correlation)." + ), + ) search_radius = traits.List( [3, 3, 3], minlen=1, maxlen=3, - argstr='-s %s', + argstr="-s %s", usedefault=True, - desc=('Search radius for similarity measures. Default = 3x3x3. ' - 'One can also specify an image where the value at the ' - 'voxel specifies the isotropic search radius at that voxel.')) + desc=( + "Search radius for similarity measures. Default = 3x3x3. " + "One can also specify an image where the value at the " + "voxel specifies the isotropic search radius at that voxel." + ), + ) exclusion_image_label = traits.List( traits.Str(), - argstr='-e %s', - requires=['exclusion_image'], - desc=('Specify a label for the exclusion region.')) + argstr="-e %s", + requires=["exclusion_image"], + desc=("Specify a label for the exclusion region."), + ) exclusion_image = traits.List( - File(exists=True), - desc=('Specify an exclusion region for the given label.')) + File(exists=True), desc=("Specify an exclusion region for the given label.") + ) mask_image = File( - argstr='-x %s', + argstr="-x %s", exists=True, - desc='If a mask image ' - 'is specified, fusion is only performed in the mask region.') + desc="If a mask image " + "is specified, fusion is only performed in the mask region.", + ) out_label_fusion = File( - argstr="%s", hash_files=False, desc='The output label fusion image.') + argstr="%s", hash_files=False, desc="The output label fusion image." + ) out_intensity_fusion_name_format = traits.Str( argstr="", - desc='Optional intensity fusion ' - 'image file name format. ' - '(e.g. "antsJointFusionIntensity_%d.nii.gz")') + desc="Optional intensity fusion " + "image file name format. " + '(e.g. "antsJointFusionIntensity_%d.nii.gz")', + ) out_label_post_prob_name_format = traits.Str( - 'antsJointFusionPosterior_%d.nii.gz', - requires=['out_label_fusion', 'out_intensity_fusion_name_format'], - desc='Optional label posterior probability ' - 'image file name format.') + "antsJointFusionPosterior_%d.nii.gz", + requires=["out_label_fusion", "out_intensity_fusion_name_format"], + desc="Optional label posterior probability " "image file name format.", + ) out_atlas_voting_weight_name_format = traits.Str( - 'antsJointFusionVotingWeight_%d.nii.gz', + "antsJointFusionVotingWeight_%d.nii.gz", requires=[ - 'out_label_fusion', 'out_intensity_fusion_name_format', - 'out_label_post_prob_name_format' + "out_label_fusion", + "out_intensity_fusion_name_format", + "out_label_post_prob_name_format", ], - desc='Optional atlas voting weight image ' - 'file name format.') - verbose = traits.Bool(False, argstr="-v", desc=('Verbose output.')) + desc="Optional atlas voting weight image " "file name format.", + ) + verbose = traits.Bool(False, argstr="-v", desc=("Verbose output.")) class AntsJointFusionOutputSpec(TraitedSpec): @@ -1364,69 +1566,79 @@ class AntsJointFusion(ANTSCommand): -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v" """ + input_spec = AntsJointFusionInputSpec output_spec = AntsJointFusionOutputSpec - _cmd = 'antsJointFusion' + _cmd = "antsJointFusion" def _format_arg(self, opt, spec, val): - if opt == 'exclusion_image_label': + if opt == "exclusion_image_label": retval = [] for ii in range(len(self.inputs.exclusion_image_label)): retval.append( - '-e {0}[{1}]'.format(self.inputs.exclusion_image_label[ii], - self.inputs.exclusion_image[ii])) - retval = ' '.join(retval) - elif opt == 'patch_radius': - retval = '-p {0}'.format(self._format_xarray(val)) - elif opt == 'search_radius': - retval = '-s {0}'.format(self._format_xarray(val)) - elif opt == 'out_label_fusion': + "-e {0}[{1}]".format( + self.inputs.exclusion_image_label[ii], + self.inputs.exclusion_image[ii], + ) + ) + retval = " ".join(retval) + elif opt == "patch_radius": + retval = "-p {0}".format(self._format_xarray(val)) + elif opt == "search_radius": + retval = "-s {0}".format(self._format_xarray(val)) + elif opt == "out_label_fusion": if isdefined(self.inputs.out_intensity_fusion_name_format): if isdefined(self.inputs.out_label_post_prob_name_format): - if isdefined( - self.inputs.out_atlas_voting_weight_name_format): - retval = '-o [{0}, {1}, {2}, {3}]'.format( + if isdefined(self.inputs.out_atlas_voting_weight_name_format): + retval = "-o [{0}, {1}, {2}, {3}]".format( self.inputs.out_label_fusion, self.inputs.out_intensity_fusion_name_format, self.inputs.out_label_post_prob_name_format, - self.inputs.out_atlas_voting_weight_name_format) + self.inputs.out_atlas_voting_weight_name_format, + ) else: - retval = '-o [{0}, {1}, {2}]'.format( + retval = "-o [{0}, {1}, {2}]".format( self.inputs.out_label_fusion, self.inputs.out_intensity_fusion_name_format, - self.inputs.out_label_post_prob_name_format) + self.inputs.out_label_post_prob_name_format, + ) else: - retval = '-o [{0}, {1}]'.format( + retval = "-o [{0}, {1}]".format( self.inputs.out_label_fusion, - self.inputs.out_intensity_fusion_name_format) + self.inputs.out_intensity_fusion_name_format, + ) else: - retval = '-o {0}'.format(self.inputs.out_label_fusion) - elif opt == 'out_intensity_fusion_name_format': - retval = '' + retval = "-o {0}".format(self.inputs.out_label_fusion) + elif opt == "out_intensity_fusion_name_format": + retval = "" if not isdefined(self.inputs.out_label_fusion): - retval = '-o {0}'.format( - self.inputs.out_intensity_fusion_name_format) - elif opt == 'atlas_image': - atlas_image_cmd = " ".join([ - '-g [{0}]'.format(", ".join("'%s'" % fn for fn in ai)) - for ai in self.inputs.atlas_image - ]) + retval = "-o {0}".format(self.inputs.out_intensity_fusion_name_format) + elif opt == "atlas_image": + atlas_image_cmd = " ".join( + [ + "-g [{0}]".format(", ".join("'%s'" % fn for fn in ai)) + for ai in self.inputs.atlas_image + ] + ) retval = atlas_image_cmd - elif opt == 'target_image': - target_image_cmd = " ".join([ - '-t [{0}]'.format(", ".join("'%s'" % fn for fn in ai)) - for ai in self.inputs.target_image - ]) + elif opt == "target_image": + target_image_cmd = " ".join( + [ + "-t [{0}]".format(", ".join("'%s'" % fn for fn in ai)) + for ai in self.inputs.target_image + ] + ) retval = target_image_cmd - elif opt == 'atlas_segmentation_image': - assert len(val) == len(self.inputs.atlas_image), "Number of specified " \ - "segmentations should be identical to the number of atlas image " \ + elif opt == "atlas_segmentation_image": + assert len(val) == len(self.inputs.atlas_image), ( + "Number of specified " + "segmentations should be identical to the number of atlas image " "sets {0}!={1}".format(len(val), len(self.inputs.atlas_image)) + ) - atlas_segmentation_image_cmd = " ".join([ - '-l {0}'.format(fn) - for fn in self.inputs.atlas_segmentation_image - ]) + atlas_segmentation_image_cmd = " ".join( + ["-l {0}".format(fn) for fn in self.inputs.atlas_segmentation_image] + ) retval = atlas_segmentation_image_cmd else: @@ -1436,17 +1648,19 @@ def _format_arg(self, opt, spec, val): def _list_outputs(self): outputs = self._outputs().get() if isdefined(self.inputs.out_label_fusion): - outputs['out_label_fusion'] = os.path.abspath( - self.inputs.out_label_fusion) + outputs["out_label_fusion"] = os.path.abspath(self.inputs.out_label_fusion) if isdefined(self.inputs.out_intensity_fusion_name_format): - outputs['out_intensity_fusion_name_format'] = os.path.abspath( - self.inputs.out_intensity_fusion_name_format) + outputs["out_intensity_fusion_name_format"] = os.path.abspath( + self.inputs.out_intensity_fusion_name_format + ) if isdefined(self.inputs.out_label_post_prob_name_format): - outputs['out_label_post_prob_name_format'] = os.path.abspath( - self.inputs.out_label_post_prob_name_format) + outputs["out_label_post_prob_name_format"] = os.path.abspath( + self.inputs.out_label_post_prob_name_format + ) if isdefined(self.inputs.out_atlas_voting_weight_name_format): - outputs['out_atlas_voting_weight_name_format'] = os.path.abspath( - self.inputs.out_atlas_voting_weight_name_format) + outputs["out_atlas_voting_weight_name_format"] = os.path.abspath( + self.inputs.out_atlas_voting_weight_name_format + ) return outputs @@ -1455,54 +1669,52 @@ class KellyKapowskiInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( 3, 2, - argstr='--image-dimensionality %d', + argstr="--image-dimensionality %d", usedefault=True, - desc='image dimension (2 or 3)') + desc="image dimension (2 or 3)", + ) segmentation_image = File( exists=True, argstr='--segmentation-image "%s"', mandatory=True, - desc= - "A segmentation image must be supplied labeling the gray and white matters." + desc="A segmentation image must be supplied labeling the gray and white matters." " Default values = 2 and 3, respectively.", ) gray_matter_label = traits.Int( 2, usedefault=True, - desc= - "The label value for the gray matter label in the segmentation_image.") + desc="The label value for the gray matter label in the segmentation_image.", + ) white_matter_label = traits.Int( 3, usedefault=True, - desc= - "The label value for the white matter label in the segmentation_image." + desc="The label value for the white matter label in the segmentation_image.", ) gray_matter_prob_image = File( exists=True, argstr='--gray-matter-probability-image "%s"', - desc= - "In addition to the segmentation image, a gray matter probability image can be" + desc="In addition to the segmentation image, a gray matter probability image can be" " used. If no such image is supplied, one is created using the segmentation image" - " and a variance of 1.0 mm.") + " and a variance of 1.0 mm.", + ) white_matter_prob_image = File( exists=True, argstr='--white-matter-probability-image "%s"', - desc= - "In addition to the segmentation image, a white matter probability image can be" + desc="In addition to the segmentation image, a white matter probability image can be" " used. If no such image is supplied, one is created using the segmentation image" - " and a variance of 1.0 mm.") + " and a variance of 1.0 mm.", + ) convergence = traits.Str( default="[50,0.001,10]", argstr='--convergence "%s"', usedefault=True, - desc= - "Convergence is determined by fitting a line to the normalized energy profile of" + desc="Convergence is determined by fitting a line to the normalized energy profile of" " the last N iterations (where N is specified by the window size) and determining" " the slope which is then compared with the convergence threshold.", ) @@ -1511,68 +1723,80 @@ class KellyKapowskiInputSpec(ANTSCommandInputSpec): 10, usedefault=True, argstr="--thickness-prior-estimate %f", - desc= - "Provides a prior constraint on the final thickness measurement in mm." + desc="Provides a prior constraint on the final thickness measurement in mm.", ) thickness_prior_image = File( exists=True, argstr='--thickness-prior-image "%s"', - desc="An image containing spatially varying prior thickness values.") + desc="An image containing spatially varying prior thickness values.", + ) gradient_step = traits.Float( 0.025, usedefault=True, argstr="--gradient-step %f", - desc="Gradient step size for the optimization.") + desc="Gradient step size for the optimization.", + ) smoothing_variance = traits.Float( - 1.0, usedefault=True, + 1.0, + usedefault=True, argstr="--smoothing-variance %f", - desc="Defines the Gaussian smoothing of the hit and total images.") + desc="Defines the Gaussian smoothing of the hit and total images.", + ) smoothing_velocity_field = traits.Float( - 1.5, usedefault=True, + 1.5, + usedefault=True, argstr="--smoothing-velocity-field-parameter %f", - desc= - "Defines the Gaussian smoothing of the velocity field (default = 1.5)." + desc="Defines the Gaussian smoothing of the velocity field (default = 1.5)." " If the b-spline smoothing option is chosen, then this defines the" - " isotropic mesh spacing for the smoothing spline (default = 15).") + " isotropic mesh spacing for the smoothing spline (default = 15).", + ) use_bspline_smoothing = traits.Bool( argstr="--use-bspline-smoothing 1", - desc="Sets the option for B-spline smoothing of the velocity field.") + desc="Sets the option for B-spline smoothing of the velocity field.", + ) number_integration_points = traits.Int( - 10, usedefault=True, + 10, + usedefault=True, argstr="--number-of-integration-points %d", - desc="Number of compositions of the diffeomorphism per iteration.") + desc="Number of compositions of the diffeomorphism per iteration.", + ) max_invert_displacement_field_iters = traits.Int( - 20, usedefault=True, + 20, + usedefault=True, argstr="--maximum-number-of-invert-displacement-field-iterations %d", desc="Maximum number of iterations for estimating the invert" - "displacement field.") + "displacement field.", + ) cortical_thickness = File( argstr='--output "%s"', keep_extension=True, name_source=["segmentation_image"], - name_template='%s_cortical_thickness', - desc='Filename for the cortical thickness.', - hash_files=False) + name_template="%s_cortical_thickness", + desc="Filename for the cortical thickness.", + hash_files=False, + ) warped_white_matter = File( name_source=["segmentation_image"], keep_extension=True, - name_template='%s_warped_white_matter', - desc='Filename for the warped white matter file.', - hash_files=False) + name_template="%s_warped_white_matter", + desc="Filename for the warped white matter file.", + hash_files=False, + ) class KellyKapowskiOutputSpec(TraitedSpec): cortical_thickness = File( - desc="A thickness map defined in the segmented gray matter.") + desc="A thickness map defined in the segmented gray matter." + ) warped_white_matter = File(desc="A warped white matter image.") @@ -1600,66 +1824,68 @@ class KellyKapowski(ANTSCommand): --smoothing-velocity-field-parameter 1.500000 --thickness-prior-estimate 10.000000' """ + _cmd = "KellyKapowski" input_spec = KellyKapowskiInputSpec output_spec = KellyKapowskiOutputSpec - references_ = [{ - 'entry': - BibTeX( - "@book{Das2009867," - "author={Sandhitsu R. Das and Brian B. Avants and Murray Grossman and James C. Gee}," - "title={Registration based cortical thickness measurement.}," - "journal={NeuroImage}," - "volume={45}," - "number={37}," - "pages={867--879}," - "year={2009}," - "issn={1053-8119}," - "url={http://www.sciencedirect.com/science/article/pii/S1053811908012780}," - "doi={https://doi.org/10.1016/j.neuroimage.2008.12.016}" - "}"), - 'description': - 'The details on the implementation of DiReCT.', - 'tags': ['implementation'], - }] + references_ = [ + { + "entry": BibTeX( + "@book{Das2009867," + "author={Sandhitsu R. Das and Brian B. Avants and Murray Grossman and James C. Gee}," + "title={Registration based cortical thickness measurement.}," + "journal={NeuroImage}," + "volume={45}," + "number={37}," + "pages={867--879}," + "year={2009}," + "issn={1053-8119}," + "url={http://www.sciencedirect.com/science/article/pii/S1053811908012780}," + "doi={https://doi.org/10.1016/j.neuroimage.2008.12.016}" + "}" + ), + "description": "The details on the implementation of DiReCT.", + "tags": ["implementation"], + } + ] def _parse_inputs(self, skip=None): if skip is None: skip = [] - skip += [ - 'warped_white_matter', 'gray_matter_label', 'white_matter_label' - ] + skip += ["warped_white_matter", "gray_matter_label", "white_matter_label"] return super(KellyKapowski, self)._parse_inputs(skip=skip) def _gen_filename(self, name): - if name == 'cortical_thickness': + if name == "cortical_thickness": output = self.inputs.cortical_thickness if not isdefined(output): _, name, ext = split_filename(self.inputs.segmentation_image) - output = name + '_cortical_thickness' + ext + output = name + "_cortical_thickness" + ext return output - if name == 'warped_white_matter': + if name == "warped_white_matter": output = self.inputs.warped_white_matter if not isdefined(output): _, name, ext = split_filename(self.inputs.segmentation_image) - output = name + '_warped_white_matter' + ext + output = name + "_warped_white_matter" + ext return output return None def _format_arg(self, opt, spec, val): if opt == "segmentation_image": - newval = '[{0},{1},{2}]'.format(self.inputs.segmentation_image, - self.inputs.gray_matter_label, - self.inputs.white_matter_label) + newval = "[{0},{1},{2}]".format( + self.inputs.segmentation_image, + self.inputs.gray_matter_label, + self.inputs.white_matter_label, + ) return spec.argstr % newval if opt == "cortical_thickness": ct = self._gen_filename("cortical_thickness") wm = self._gen_filename("warped_white_matter") - newval = '[{},{}]'.format(ct, wm) + newval = "[{},{}]".format(ct, wm) return spec.argstr % newval return super(KellyKapowski, self)._format_arg(opt, spec, val) diff --git a/nipype/interfaces/ants/tests/test_auto_ANTS.py b/nipype/interfaces/ants/tests/test_auto_ANTS.py index 3e762beaa9..f3e329f957 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTS.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTS.py @@ -4,90 +4,50 @@ def test_ANTS_inputs(): input_map = dict( - affine_gradient_descent_option=dict(argstr='%s', ), - args=dict(argstr='%s', ), - delta_time=dict(requires=['number_of_time_steps'], ), - dimension=dict( - argstr='%d', - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_image=dict(mandatory=True, ), - gradient_step_length=dict(requires=['transformation_model'], ), - metric=dict(mandatory=True, ), - metric_weight=dict( - mandatory=True, - requires=['metric'], - usedefault=True, - ), - mi_option=dict( - argstr='--MI-option %s', - sep='x', - ), - moving_image=dict( - argstr='%s', - mandatory=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + affine_gradient_descent_option=dict(argstr="%s",), + args=dict(argstr="%s",), + delta_time=dict(requires=["number_of_time_steps"],), + dimension=dict(argstr="%d", position=1,), + environ=dict(nohash=True, usedefault=True,), + fixed_image=dict(mandatory=True,), + gradient_step_length=dict(requires=["transformation_model"],), + metric=dict(mandatory=True,), + metric_weight=dict(mandatory=True, requires=["metric"], usedefault=True,), + mi_option=dict(argstr="--MI-option %s", sep="x",), + moving_image=dict(argstr="%s", mandatory=True,), + num_threads=dict(nohash=True, usedefault=True,), number_of_affine_iterations=dict( - argstr='--number-of-affine-iterations %s', - sep='x', + argstr="--number-of-affine-iterations %s", sep="x", ), - number_of_iterations=dict( - argstr='--number-of-iterations %s', - sep='x', - ), - number_of_time_steps=dict(requires=['gradient_step_length'], ), + number_of_iterations=dict(argstr="--number-of-iterations %s", sep="x",), + number_of_time_steps=dict(requires=["gradient_step_length"],), output_transform_prefix=dict( - argstr='--output-naming %s', - mandatory=True, - usedefault=True, - ), - radius=dict( - mandatory=True, - requires=['metric'], - ), - regularization=dict(argstr='%s', ), - regularization_deformation_field_sigma=dict( - requires=['regularization'], ), - regularization_gradient_field_sigma=dict(requires=['regularization' - ], ), - smoothing_sigmas=dict( - argstr='--gaussian-smoothing-sigmas %s', - sep='x', - ), - subsampling_factors=dict( - argstr='--subsampling-factors %s', - sep='x', - ), - symmetry_type=dict(requires=['delta_time'], ), - transformation_model=dict( - argstr='%s', - mandatory=True, - ), - use_histogram_matching=dict( - argstr='%s', - usedefault=True, - ), + argstr="--output-naming %s", mandatory=True, usedefault=True, + ), + radius=dict(mandatory=True, requires=["metric"],), + regularization=dict(argstr="%s",), + regularization_deformation_field_sigma=dict(requires=["regularization"],), + regularization_gradient_field_sigma=dict(requires=["regularization"],), + smoothing_sigmas=dict(argstr="--gaussian-smoothing-sigmas %s", sep="x",), + subsampling_factors=dict(argstr="--subsampling-factors %s", sep="x",), + symmetry_type=dict(requires=["delta_time"],), + transformation_model=dict(argstr="%s", mandatory=True,), + use_histogram_matching=dict(argstr="%s", usedefault=True,), ) inputs = ANTS.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ANTS_outputs(): output_map = dict( - affine_transform=dict(extensions=None, ), - inverse_warp_transform=dict(extensions=None, ), - metaheader=dict(extensions=None, ), - metaheader_raw=dict(extensions=None, ), - warp_transform=dict(extensions=None, ), + affine_transform=dict(extensions=None,), + inverse_warp_transform=dict(extensions=None,), + metaheader=dict(extensions=None,), + metaheader_raw=dict(extensions=None,), + warp_transform=dict(extensions=None,), ) outputs = ANTS.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py b/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py index 54d1effe3a..7a5ff5dec5 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py @@ -4,15 +4,9 @@ def test_ANTSCommand_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + num_threads=dict(nohash=True, usedefault=True,), ) inputs = ANTSCommand.input_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py index 0ae65a48a2..f97fbe3352 100644 --- a/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py +++ b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py @@ -4,66 +4,27 @@ def test_AffineInitializer_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='%s', - position=0, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_image=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - local_search=dict( - argstr='%d', - position=7, - usedefault=True, - ), - moving_image=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr='%s', - extensions=None, - position=3, - usedefault=True, - ), - principal_axes=dict( - argstr='%d', - position=6, - usedefault=True, - ), - radian_fraction=dict( - argstr='%f', - position=5, - usedefault=True, - ), - search_factor=dict( - argstr='%f', - position=4, - usedefault=True, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="%s", position=0, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + fixed_image=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + local_search=dict(argstr="%d", position=7, usedefault=True,), + moving_image=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + num_threads=dict(nohash=True, usedefault=True,), + out_file=dict(argstr="%s", extensions=None, position=3, usedefault=True,), + principal_axes=dict(argstr="%d", position=6, usedefault=True,), + radian_fraction=dict(argstr="%f", position=5, usedefault=True,), + search_factor=dict(argstr="%f", position=4, usedefault=True,), ) inputs = AffineInitializer.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AffineInitializer_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = AffineInitializer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py index e4e6721d82..d86f7f84cb 100644 --- a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py @@ -4,93 +4,52 @@ def test_AntsJointFusion_inputs(): input_map = dict( - alpha=dict( - argstr='-a %s', - usedefault=True, - ), - args=dict(argstr='%s', ), - atlas_image=dict( - argstr='-g %s...', - mandatory=True, - ), - atlas_segmentation_image=dict( - argstr='-l %s...', - mandatory=True, - ), - beta=dict( - argstr='-b %s', - usedefault=True, - ), - constrain_nonnegative=dict( - argstr='-c', - usedefault=True, - ), - dimension=dict(argstr='-d %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), + alpha=dict(argstr="-a %s", usedefault=True,), + args=dict(argstr="%s",), + atlas_image=dict(argstr="-g %s...", mandatory=True,), + atlas_segmentation_image=dict(argstr="-l %s...", mandatory=True,), + beta=dict(argstr="-b %s", usedefault=True,), + constrain_nonnegative=dict(argstr="-c", usedefault=True,), + dimension=dict(argstr="-d %d",), + environ=dict(nohash=True, usedefault=True,), exclusion_image=dict(), - exclusion_image_label=dict( - argstr='-e %s', - requires=['exclusion_image'], - ), - mask_image=dict( - argstr='-x %s', - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_atlas_voting_weight_name_format=dict(requires=[ - 'out_label_fusion', 'out_intensity_fusion_name_format', - 'out_label_post_prob_name_format' - ], ), - out_intensity_fusion_name_format=dict(argstr='', ), - out_label_fusion=dict( - argstr='%s', - extensions=None, - hash_files=False, - ), + exclusion_image_label=dict(argstr="-e %s", requires=["exclusion_image"],), + mask_image=dict(argstr="-x %s", extensions=None,), + num_threads=dict(nohash=True, usedefault=True,), + out_atlas_voting_weight_name_format=dict( + requires=[ + "out_label_fusion", + "out_intensity_fusion_name_format", + "out_label_post_prob_name_format", + ], + ), + out_intensity_fusion_name_format=dict(argstr="",), + out_label_fusion=dict(argstr="%s", extensions=None, hash_files=False,), out_label_post_prob_name_format=dict( - requires=['out_label_fusion', - 'out_intensity_fusion_name_format'], ), - patch_metric=dict(argstr='-m %s', ), - patch_radius=dict( - argstr='-p %s', - maxlen=3, - minlen=3, - ), - retain_atlas_voting_images=dict( - argstr='-f', - usedefault=True, + requires=["out_label_fusion", "out_intensity_fusion_name_format"], ), + patch_metric=dict(argstr="-m %s",), + patch_radius=dict(argstr="-p %s", maxlen=3, minlen=3,), + retain_atlas_voting_images=dict(argstr="-f", usedefault=True,), retain_label_posterior_images=dict( - argstr='-r', - requires=['atlas_segmentation_image'], - usedefault=True, - ), - search_radius=dict( - argstr='-s %s', - usedefault=True, + argstr="-r", requires=["atlas_segmentation_image"], usedefault=True, ), - target_image=dict( - argstr='-t %s', - mandatory=True, - ), - verbose=dict(argstr='-v', ), + search_radius=dict(argstr="-s %s", usedefault=True,), + target_image=dict(argstr="-t %s", mandatory=True,), + verbose=dict(argstr="-v",), ) inputs = AntsJointFusion.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AntsJointFusion_outputs(): output_map = dict( out_atlas_voting_weight_name_format=dict(), out_intensity_fusion_name_format=dict(), - out_label_fusion=dict(extensions=None, ), + out_label_fusion=dict(extensions=None,), out_label_post_prob_name_format=dict(), ) outputs = AntsJointFusion.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py index 00a21dce13..18add39b6c 100644 --- a/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py @@ -4,60 +4,34 @@ def test_ApplyTransforms_inputs(): input_map = dict( - args=dict(argstr='%s', ), - default_value=dict( - argstr='--default-value %g', - usedefault=True, - ), - dimension=dict(argstr='--dimensionality %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - float=dict( - argstr='--float %d', - usedefault=True, - ), - input_image=dict( - argstr='--input %s', - extensions=None, - mandatory=True, - ), - input_image_type=dict(argstr='--input-image-type %d', ), - interpolation=dict( - argstr='%s', - usedefault=True, - ), + args=dict(argstr="%s",), + default_value=dict(argstr="--default-value %g", usedefault=True,), + dimension=dict(argstr="--dimensionality %d",), + environ=dict(nohash=True, usedefault=True,), + float=dict(argstr="--float %d", usedefault=True,), + input_image=dict(argstr="--input %s", extensions=None, mandatory=True,), + input_image_type=dict(argstr="--input-image-type %d",), + interpolation=dict(argstr="%s", usedefault=True,), interpolation_parameters=dict(), invert_transform_flags=dict(), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_postfix=dict(usedefault=True, ), - output_image=dict( - argstr='--output %s', - genfile=True, - hash_files=False, - ), - print_out_composite_warp_file=dict(requires=['output_image'], ), + num_threads=dict(nohash=True, usedefault=True,), + out_postfix=dict(usedefault=True,), + output_image=dict(argstr="--output %s", genfile=True, hash_files=False,), + print_out_composite_warp_file=dict(requires=["output_image"],), reference_image=dict( - argstr='--reference-image %s', - extensions=None, - mandatory=True, - ), - transforms=dict( - argstr='%s', - mandatory=True, + argstr="--reference-image %s", extensions=None, mandatory=True, ), + transforms=dict(argstr="%s", mandatory=True,), ) inputs = ApplyTransforms.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ApplyTransforms_outputs(): - output_map = dict(output_image=dict(extensions=None, ), ) + output_map = dict(output_image=dict(extensions=None,),) outputs = ApplyTransforms.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py index 7ef77a6204..a0258471b8 100644 --- a/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py @@ -4,40 +4,29 @@ def test_ApplyTransformsToPoints_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict(argstr='--dimensionality %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr='--input %s', - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="--dimensionality %d",), + environ=dict(nohash=True, usedefault=True,), + input_file=dict(argstr="--input %s", extensions=None, mandatory=True,), invert_transform_flags=dict(), - num_threads=dict( - nohash=True, - usedefault=True, - ), + num_threads=dict(nohash=True, usedefault=True,), output_file=dict( - argstr='--output %s', + argstr="--output %s", hash_files=False, - name_source=['input_file'], - name_template='%s_transformed.csv', - ), - transforms=dict( - argstr='%s', - mandatory=True, + name_source=["input_file"], + name_template="%s_transformed.csv", ), + transforms=dict(argstr="%s", mandatory=True,), ) inputs = ApplyTransformsToPoints.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ApplyTransformsToPoints_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = ApplyTransformsToPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_Atropos.py b/nipype/interfaces/ants/tests/test_auto_Atropos.py index bb7a2afe78..021348bba0 100644 --- a/nipype/interfaces/ants/tests/test_auto_Atropos.py +++ b/nipype/interfaces/ants/tests/test_auto_Atropos.py @@ -4,71 +4,44 @@ def test_Atropos_inputs(): input_map = dict( - args=dict(argstr='%s', ), - convergence_threshold=dict(requires=['n_iterations'], ), - dimension=dict( - argstr='--image-dimensionality %d', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - icm_use_synchronous_update=dict(argstr='%s', ), + args=dict(argstr="%s",), + convergence_threshold=dict(requires=["n_iterations"],), + dimension=dict(argstr="--image-dimensionality %d", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + icm_use_synchronous_update=dict(argstr="%s",), initialization=dict( - argstr='%s', - mandatory=True, - requires=['number_of_tissue_classes'], - ), - intensity_images=dict( - argstr='--intensity-image %s...', - mandatory=True, - ), - likelihood_model=dict(argstr='--likelihood-model %s', ), - mask_image=dict( - argstr='--mask-image %s', - extensions=None, - mandatory=True, - ), - maximum_number_of_icm_terations=dict( - requires=['icm_use_synchronous_update'], ), - mrf_radius=dict(requires=['mrf_smoothing_factor'], ), - mrf_smoothing_factor=dict(argstr='%s', ), - n_iterations=dict(argstr='%s', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - number_of_tissue_classes=dict(mandatory=True, ), + argstr="%s", mandatory=True, requires=["number_of_tissue_classes"], + ), + intensity_images=dict(argstr="--intensity-image %s...", mandatory=True,), + likelihood_model=dict(argstr="--likelihood-model %s",), + mask_image=dict(argstr="--mask-image %s", extensions=None, mandatory=True,), + maximum_number_of_icm_terations=dict(requires=["icm_use_synchronous_update"],), + mrf_radius=dict(requires=["mrf_smoothing_factor"],), + mrf_smoothing_factor=dict(argstr="%s",), + n_iterations=dict(argstr="%s",), + num_threads=dict(nohash=True, usedefault=True,), + number_of_tissue_classes=dict(mandatory=True,), out_classified_image_name=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, + argstr="%s", extensions=None, genfile=True, hash_files=False, ), - output_posteriors_name_template=dict(usedefault=True, ), - posterior_formulation=dict(argstr='%s', ), + output_posteriors_name_template=dict(usedefault=True,), + posterior_formulation=dict(argstr="%s",), prior_probability_images=dict(), - prior_probability_threshold=dict(requires=['prior_weighting'], ), + prior_probability_threshold=dict(requires=["prior_weighting"],), prior_weighting=dict(), save_posteriors=dict(), - use_mixture_model_proportions=dict(requires=['posterior_formulation' - ], ), - use_random_seed=dict( - argstr='--use-random-seed %d', - usedefault=True, - ), + use_mixture_model_proportions=dict(requires=["posterior_formulation"],), + use_random_seed=dict(argstr="--use-random-seed %d", usedefault=True,), ) inputs = Atropos.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Atropos_outputs(): - output_map = dict( - classified_image=dict(extensions=None, ), - posteriors=dict(), - ) + output_map = dict(classified_image=dict(extensions=None,), posteriors=dict(),) outputs = Atropos.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py index 9247abda86..5d8b191931 100644 --- a/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py @@ -4,39 +4,24 @@ def test_AverageAffineTransform_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='%d', - mandatory=True, - position=0, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="%d", mandatory=True, position=0,), + environ=dict(nohash=True, usedefault=True,), + num_threads=dict(nohash=True, usedefault=True,), output_affine_transform=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - transforms=dict( - argstr='%s', - mandatory=True, - position=3, + argstr="%s", extensions=None, mandatory=True, position=1, ), + transforms=dict(argstr="%s", mandatory=True, position=3,), ) inputs = AverageAffineTransform.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AverageAffineTransform_outputs(): - output_map = dict(affine_transform=dict(extensions=None, ), ) + output_map = dict(affine_transform=dict(extensions=None,),) outputs = AverageAffineTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_AverageImages.py b/nipype/interfaces/ants/tests/test_auto_AverageImages.py index 206d27d4bf..572407efbc 100644 --- a/nipype/interfaces/ants/tests/test_auto_AverageImages.py +++ b/nipype/interfaces/ants/tests/test_auto_AverageImages.py @@ -4,36 +4,14 @@ def test_AverageImages_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='%d', - mandatory=True, - position=0, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - images=dict( - argstr='%s', - mandatory=True, - position=3, - ), - normalize=dict( - argstr='%d', - mandatory=True, - position=2, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="%d", mandatory=True, position=0,), + environ=dict(nohash=True, usedefault=True,), + images=dict(argstr="%s", mandatory=True, position=3,), + normalize=dict(argstr="%d", mandatory=True, position=2,), + num_threads=dict(nohash=True, usedefault=True,), output_average_image=dict( - argstr='%s', - extensions=None, - hash_files=False, - position=1, - usedefault=True, + argstr="%s", extensions=None, hash_files=False, position=1, usedefault=True, ), ) inputs = AverageImages.input_spec() @@ -41,8 +19,10 @@ def test_AverageImages_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AverageImages_outputs(): - output_map = dict(output_average_image=dict(extensions=None, ), ) + output_map = dict(output_average_image=dict(extensions=None,),) outputs = AverageImages.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py index ba220afb9c..8eb0293313 100644 --- a/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py +++ b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py @@ -4,77 +4,50 @@ def test_BrainExtraction_inputs(): input_map = dict( - anatomical_image=dict( - argstr='-a %s', - extensions=None, - mandatory=True, - ), - args=dict(argstr='%s', ), + anatomical_image=dict(argstr="-a %s", extensions=None, mandatory=True,), + args=dict(argstr="%s",), brain_probability_mask=dict( - argstr='-m %s', - copyfile=False, - extensions=None, - mandatory=True, - ), - brain_template=dict( - argstr='-e %s', - extensions=None, - mandatory=True, - ), - debug=dict(argstr='-z 1', ), - dimension=dict( - argstr='-d %d', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - extraction_registration_mask=dict( - argstr='-f %s', - extensions=None, - ), - image_suffix=dict( - argstr='-s %s', - usedefault=True, - ), - keep_temporary_files=dict(argstr='-k %d', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_prefix=dict( - argstr='-o %s', - usedefault=True, - ), - use_floatingpoint_precision=dict(argstr='-q %d', ), - use_random_seeding=dict(argstr='-u %d', ), + argstr="-m %s", copyfile=False, extensions=None, mandatory=True, + ), + brain_template=dict(argstr="-e %s", extensions=None, mandatory=True,), + debug=dict(argstr="-z 1",), + dimension=dict(argstr="-d %d", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + extraction_registration_mask=dict(argstr="-f %s", extensions=None,), + image_suffix=dict(argstr="-s %s", usedefault=True,), + keep_temporary_files=dict(argstr="-k %d",), + num_threads=dict(nohash=True, usedefault=True,), + out_prefix=dict(argstr="-o %s", usedefault=True,), + use_floatingpoint_precision=dict(argstr="-q %d",), + use_random_seeding=dict(argstr="-u %d",), ) inputs = BrainExtraction.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BrainExtraction_outputs(): output_map = dict( - BrainExtractionBrain=dict(extensions=None, ), - BrainExtractionCSF=dict(extensions=None, ), - BrainExtractionGM=dict(extensions=None, ), - BrainExtractionInitialAffine=dict(extensions=None, ), - BrainExtractionInitialAffineFixed=dict(extensions=None, ), - BrainExtractionInitialAffineMoving=dict(extensions=None, ), - BrainExtractionLaplacian=dict(extensions=None, ), - BrainExtractionMask=dict(extensions=None, ), - BrainExtractionPrior0GenericAffine=dict(extensions=None, ), - BrainExtractionPrior1InverseWarp=dict(extensions=None, ), - BrainExtractionPrior1Warp=dict(extensions=None, ), - BrainExtractionPriorWarped=dict(extensions=None, ), - BrainExtractionSegmentation=dict(extensions=None, ), - BrainExtractionTemplateLaplacian=dict(extensions=None, ), - BrainExtractionTmp=dict(extensions=None, ), - BrainExtractionWM=dict(extensions=None, ), - N4Corrected0=dict(extensions=None, ), - N4Truncated0=dict(extensions=None, ), + BrainExtractionBrain=dict(extensions=None,), + BrainExtractionCSF=dict(extensions=None,), + BrainExtractionGM=dict(extensions=None,), + BrainExtractionInitialAffine=dict(extensions=None,), + BrainExtractionInitialAffineFixed=dict(extensions=None,), + BrainExtractionInitialAffineMoving=dict(extensions=None,), + BrainExtractionLaplacian=dict(extensions=None,), + BrainExtractionMask=dict(extensions=None,), + BrainExtractionPrior0GenericAffine=dict(extensions=None,), + BrainExtractionPrior1InverseWarp=dict(extensions=None,), + BrainExtractionPrior1Warp=dict(extensions=None,), + BrainExtractionPriorWarped=dict(extensions=None,), + BrainExtractionSegmentation=dict(extensions=None,), + BrainExtractionTemplateLaplacian=dict(extensions=None,), + BrainExtractionTmp=dict(extensions=None,), + BrainExtractionWM=dict(extensions=None,), + N4Corrected0=dict(extensions=None,), + N4Truncated0=dict(extensions=None,), ) outputs = BrainExtraction.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py index 476f316a9f..ccc54c6eb8 100644 --- a/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py @@ -4,46 +4,30 @@ def test_ComposeMultiTransform_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='%d', - position=0, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="%d", position=0, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + num_threads=dict(nohash=True, usedefault=True,), output_transform=dict( - argstr='%s', + argstr="%s", extensions=None, keep_extension=True, - name_source=['transforms'], - name_template='%s_composed', + name_source=["transforms"], + name_template="%s_composed", position=1, ), - reference_image=dict( - argstr='%s', - extensions=None, - position=2, - ), - transforms=dict( - argstr='%s', - mandatory=True, - position=3, - ), + reference_image=dict(argstr="%s", extensions=None, position=2,), + transforms=dict(argstr="%s", mandatory=True, position=3,), ) inputs = ComposeMultiTransform.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ComposeMultiTransform_outputs(): - output_map = dict(output_transform=dict(extensions=None, ), ) + output_map = dict(output_transform=dict(extensions=None,),) outputs = ComposeMultiTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py b/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py index 9bedb990ec..125c69f141 100644 --- a/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py +++ b/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py @@ -4,46 +4,26 @@ def test_CompositeTransformUtil_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s...', - mandatory=True, - position=3, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr='%s', - extensions=None, - position=2, - ), - output_prefix=dict( - argstr='%s', - position=4, - usedefault=True, - ), - process=dict( - argstr='--%s', - position=1, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s...", mandatory=True, position=3,), + num_threads=dict(nohash=True, usedefault=True,), + out_file=dict(argstr="%s", extensions=None, position=2,), + output_prefix=dict(argstr="%s", position=4, usedefault=True,), + process=dict(argstr="--%s", position=1, usedefault=True,), ) inputs = CompositeTransformUtil.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CompositeTransformUtil_outputs(): output_map = dict( - affine_transform=dict(extensions=None, ), - displacement_field=dict(extensions=None, ), - out_file=dict(extensions=None, ), + affine_transform=dict(extensions=None,), + displacement_field=dict(extensions=None,), + out_file=dict(extensions=None,), ) outputs = CompositeTransformUtil.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py index b3944ed735..7e8c5605f7 100644 --- a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py +++ b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py @@ -4,77 +4,29 @@ def test_ConvertScalarImageToRGB_inputs(): input_map = dict( - args=dict(argstr='%s', ), - colormap=dict( - argstr='%s', - mandatory=True, - position=4, - usedefault=True, - ), - custom_color_map_file=dict( - argstr='%s', - position=5, - usedefault=True, - ), - dimension=dict( - argstr='%d', - mandatory=True, - position=0, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_image=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - mask_image=dict( - argstr='%s', - extensions=None, - position=3, - usedefault=True, - ), - maximum_RGB_output=dict( - argstr='%d', - position=9, - usedefault=True, - ), - maximum_input=dict( - argstr='%d', - mandatory=True, - position=7, - ), - minimum_RGB_output=dict( - argstr='%d', - position=8, - usedefault=True, - ), - minimum_input=dict( - argstr='%d', - mandatory=True, - position=6, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - output_image=dict( - argstr='%s', - position=2, - usedefault=True, - ), + args=dict(argstr="%s",), + colormap=dict(argstr="%s", mandatory=True, position=4, usedefault=True,), + custom_color_map_file=dict(argstr="%s", position=5, usedefault=True,), + dimension=dict(argstr="%d", mandatory=True, position=0, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + input_image=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + mask_image=dict(argstr="%s", extensions=None, position=3, usedefault=True,), + maximum_RGB_output=dict(argstr="%d", position=9, usedefault=True,), + maximum_input=dict(argstr="%d", mandatory=True, position=7,), + minimum_RGB_output=dict(argstr="%d", position=8, usedefault=True,), + minimum_input=dict(argstr="%d", mandatory=True, position=6,), + num_threads=dict(nohash=True, usedefault=True,), + output_image=dict(argstr="%s", position=2, usedefault=True,), ) inputs = ConvertScalarImageToRGB.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ConvertScalarImageToRGB_outputs(): - output_map = dict(output_image=dict(extensions=None, ), ) + output_map = dict(output_image=dict(extensions=None,),) outputs = ConvertScalarImageToRGB.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py index d3cf218934..92a3f04b57 100644 --- a/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py @@ -4,89 +4,55 @@ def test_CorticalThickness_inputs(): input_map = dict( - anatomical_image=dict( - argstr='-a %s', - extensions=None, - mandatory=True, - ), - args=dict(argstr='%s', ), - b_spline_smoothing=dict(argstr='-v', ), + anatomical_image=dict(argstr="-a %s", extensions=None, mandatory=True,), + args=dict(argstr="%s",), + b_spline_smoothing=dict(argstr="-v",), brain_probability_mask=dict( - argstr='-m %s', - copyfile=False, - extensions=None, - mandatory=True, - ), - brain_template=dict( - argstr='-e %s', - extensions=None, - mandatory=True, - ), - cortical_label_image=dict(extensions=None, ), - debug=dict(argstr='-z 1', ), - dimension=dict( - argstr='-d %d', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - extraction_registration_mask=dict( - argstr='-f %s', - extensions=None, - ), - image_suffix=dict( - argstr='-s %s', - usedefault=True, - ), - keep_temporary_files=dict(argstr='-k %d', ), - label_propagation=dict(argstr='-l %s', ), - max_iterations=dict(argstr='-i %d', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_prefix=dict( - argstr='-o %s', - usedefault=True, - ), - posterior_formulation=dict(argstr='-b %s', ), - prior_segmentation_weight=dict(argstr='-w %f', ), - quick_registration=dict(argstr='-q 1', ), - segmentation_iterations=dict(argstr='-n %d', ), - segmentation_priors=dict( - argstr='-p %s', - mandatory=True, - ), - t1_registration_template=dict( - argstr='-t %s', - extensions=None, - mandatory=True, - ), - use_floatingpoint_precision=dict(argstr='-j %d', ), - use_random_seeding=dict(argstr='-u %d', ), + argstr="-m %s", copyfile=False, extensions=None, mandatory=True, + ), + brain_template=dict(argstr="-e %s", extensions=None, mandatory=True,), + cortical_label_image=dict(extensions=None,), + debug=dict(argstr="-z 1",), + dimension=dict(argstr="-d %d", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + extraction_registration_mask=dict(argstr="-f %s", extensions=None,), + image_suffix=dict(argstr="-s %s", usedefault=True,), + keep_temporary_files=dict(argstr="-k %d",), + label_propagation=dict(argstr="-l %s",), + max_iterations=dict(argstr="-i %d",), + num_threads=dict(nohash=True, usedefault=True,), + out_prefix=dict(argstr="-o %s", usedefault=True,), + posterior_formulation=dict(argstr="-b %s",), + prior_segmentation_weight=dict(argstr="-w %f",), + quick_registration=dict(argstr="-q 1",), + segmentation_iterations=dict(argstr="-n %d",), + segmentation_priors=dict(argstr="-p %s", mandatory=True,), + t1_registration_template=dict(argstr="-t %s", extensions=None, mandatory=True,), + use_floatingpoint_precision=dict(argstr="-j %d",), + use_random_seeding=dict(argstr="-u %d",), ) inputs = CorticalThickness.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CorticalThickness_outputs(): output_map = dict( - BrainExtractionMask=dict(extensions=None, ), - BrainSegmentation=dict(extensions=None, ), - BrainSegmentationN4=dict(extensions=None, ), + BrainExtractionMask=dict(extensions=None,), + BrainSegmentation=dict(extensions=None,), + BrainSegmentationN4=dict(extensions=None,), BrainSegmentationPosteriors=dict(), - BrainVolumes=dict(extensions=None, ), - CorticalThickness=dict(extensions=None, ), - CorticalThicknessNormedToTemplate=dict(extensions=None, ), - ExtractedBrainN4=dict(extensions=None, ), - SubjectToTemplate0GenericAffine=dict(extensions=None, ), - SubjectToTemplate1Warp=dict(extensions=None, ), - SubjectToTemplateLogJacobian=dict(extensions=None, ), - TemplateToSubject0Warp=dict(extensions=None, ), - TemplateToSubject1GenericAffine=dict(extensions=None, ), + BrainVolumes=dict(extensions=None,), + CorticalThickness=dict(extensions=None,), + CorticalThicknessNormedToTemplate=dict(extensions=None,), + ExtractedBrainN4=dict(extensions=None,), + SubjectToTemplate0GenericAffine=dict(extensions=None,), + SubjectToTemplate1Warp=dict(extensions=None,), + SubjectToTemplateLogJacobian=dict(extensions=None,), + TemplateToSubject0Warp=dict(extensions=None,), + TemplateToSubject1GenericAffine=dict(extensions=None,), ) outputs = CorticalThickness.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py index d5ac0c1d06..be694c055f 100644 --- a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py +++ b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py @@ -4,48 +4,26 @@ def test_CreateJacobianDeterminantImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), deformationField=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - doLogJacobian=dict( - argstr='%d', - position=3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - imageDimension=dict( - argstr='%d', - mandatory=True, - position=0, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - outputImage=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - useGeometric=dict( - argstr='%d', - position=4, + argstr="%s", extensions=None, mandatory=True, position=1, ), + doLogJacobian=dict(argstr="%d", position=3,), + environ=dict(nohash=True, usedefault=True,), + imageDimension=dict(argstr="%d", mandatory=True, position=0,), + num_threads=dict(nohash=True, usedefault=True,), + outputImage=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + useGeometric=dict(argstr="%d", position=4,), ) inputs = CreateJacobianDeterminantImage.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CreateJacobianDeterminantImage_outputs(): - output_map = dict(jacobian_image=dict(extensions=None, ), ) + output_map = dict(jacobian_image=dict(extensions=None,),) outputs = CreateJacobianDeterminantImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py index e6a4142c90..3bb36c9d01 100644 --- a/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py +++ b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py @@ -4,48 +4,30 @@ def test_CreateTiledMosaic_inputs(): input_map = dict( - alpha_value=dict(argstr='-a %.2f', ), - args=dict(argstr='%s', ), - direction=dict(argstr='-d %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - flip_slice=dict(argstr='-f %s', ), - input_image=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), - mask_image=dict( - argstr='-x %s', - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - output_image=dict( - argstr='-o %s', - usedefault=True, - ), - pad_or_crop=dict(argstr='-p %s', ), - permute_axes=dict(argstr='-g', ), - rgb_image=dict( - argstr='-r %s', - extensions=None, - mandatory=True, - ), - slices=dict(argstr='-s %s', ), - tile_geometry=dict(argstr='-t %s', ), + alpha_value=dict(argstr="-a %.2f",), + args=dict(argstr="%s",), + direction=dict(argstr="-d %d",), + environ=dict(nohash=True, usedefault=True,), + flip_slice=dict(argstr="-f %s",), + input_image=dict(argstr="-i %s", extensions=None, mandatory=True,), + mask_image=dict(argstr="-x %s", extensions=None,), + num_threads=dict(nohash=True, usedefault=True,), + output_image=dict(argstr="-o %s", usedefault=True,), + pad_or_crop=dict(argstr="-p %s",), + permute_axes=dict(argstr="-g",), + rgb_image=dict(argstr="-r %s", extensions=None, mandatory=True,), + slices=dict(argstr="-s %s",), + tile_geometry=dict(argstr="-t %s",), ) inputs = CreateTiledMosaic.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CreateTiledMosaic_outputs(): - output_map = dict(output_image=dict(extensions=None, ), ) + output_map = dict(output_image=dict(extensions=None,),) outputs = CreateTiledMosaic.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py index 0c65abc907..244b2ca778 100644 --- a/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py +++ b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py @@ -4,60 +4,41 @@ def test_DenoiseImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict(argstr='-d %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_image=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="-d %d",), + environ=dict(nohash=True, usedefault=True,), + input_image=dict(argstr="-i %s", extensions=None, mandatory=True,), noise_image=dict( extensions=None, hash_files=False, keep_extension=True, - name_source=['input_image'], - name_template='%s_noise', - ), - noise_model=dict( - argstr='-n %s', - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, + name_source=["input_image"], + name_template="%s_noise", ), + noise_model=dict(argstr="-n %s", usedefault=True,), + num_threads=dict(nohash=True, usedefault=True,), output_image=dict( - argstr='-o %s', + argstr="-o %s", extensions=None, hash_files=False, keep_extension=True, - name_source=['input_image'], - name_template='%s_noise_corrected', + name_source=["input_image"], + name_template="%s_noise_corrected", ), - save_noise=dict( - mandatory=True, - usedefault=True, - xor=['noise_image'], - ), - shrink_factor=dict( - argstr='-s %s', - usedefault=True, - ), - verbose=dict(argstr='-v', ), + save_noise=dict(mandatory=True, usedefault=True, xor=["noise_image"],), + shrink_factor=dict(argstr="-s %s", usedefault=True,), + verbose=dict(argstr="-v",), ) inputs = DenoiseImage.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DenoiseImage_outputs(): output_map = dict( - noise_image=dict(extensions=None, ), - output_image=dict(extensions=None, ), + noise_image=dict(extensions=None,), output_image=dict(extensions=None,), ) outputs = DenoiseImage.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_JointFusion.py b/nipype/interfaces/ants/tests/test_auto_JointFusion.py index b6639938bb..4919b27a2d 100644 --- a/nipype/interfaces/ants/tests/test_auto_JointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_JointFusion.py @@ -4,82 +4,40 @@ def test_JointFusion_inputs(): input_map = dict( - alpha=dict( - requires=['method'], - usedefault=True, - ), - args=dict(argstr='%s', ), - atlas_group_id=dict(argstr='-gp %d...', ), - atlas_group_weights=dict(argstr='-gpw %d...', ), - beta=dict( - requires=['method'], - usedefault=True, - ), - dimension=dict( - argstr='%d', - mandatory=True, - position=0, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - exclusion_region=dict( - argstr='-x %s', - extensions=None, - ), - method=dict( - argstr='-m %s', - usedefault=True, - ), - modalities=dict( - argstr='%d', - mandatory=True, - position=1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + alpha=dict(requires=["method"], usedefault=True,), + args=dict(argstr="%s",), + atlas_group_id=dict(argstr="-gp %d...",), + atlas_group_weights=dict(argstr="-gpw %d...",), + beta=dict(requires=["method"], usedefault=True,), + dimension=dict(argstr="%d", mandatory=True, position=0, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + exclusion_region=dict(argstr="-x %s", extensions=None,), + method=dict(argstr="-m %s", usedefault=True,), + modalities=dict(argstr="%d", mandatory=True, position=1,), + num_threads=dict(nohash=True, usedefault=True,), output_label_image=dict( - argstr='%s', + argstr="%s", extensions=None, mandatory=True, - name_template='%s', - output_name='output_label_image', + name_template="%s", + output_name="output_label_image", position=-1, ), - patch_radius=dict( - argstr='-rp %s', - maxlen=3, - minlen=3, - ), - search_radius=dict( - argstr='-rs %s', - maxlen=3, - minlen=3, - ), - target_image=dict( - argstr='-tg %s...', - mandatory=True, - ), - warped_intensity_images=dict( - argstr='-g %s...', - mandatory=True, - ), - warped_label_images=dict( - argstr='-l %s...', - mandatory=True, - ), + patch_radius=dict(argstr="-rp %s", maxlen=3, minlen=3,), + search_radius=dict(argstr="-rs %s", maxlen=3, minlen=3,), + target_image=dict(argstr="-tg %s...", mandatory=True,), + warped_intensity_images=dict(argstr="-g %s...", mandatory=True,), + warped_label_images=dict(argstr="-l %s...", mandatory=True,), ) inputs = JointFusion.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_JointFusion_outputs(): - output_map = dict(output_label_image=dict(extensions=None, ), ) + output_map = dict(output_label_image=dict(extensions=None,),) outputs = JointFusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py index 881071500e..94ce9e9abf 100644 --- a/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py +++ b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py @@ -4,82 +4,55 @@ def test_KellyKapowski_inputs(): input_map = dict( - args=dict(argstr='%s', ), - convergence=dict( - argstr='--convergence "%s"', - usedefault=True, - ), + args=dict(argstr="%s",), + convergence=dict(argstr='--convergence "%s"', usedefault=True,), cortical_thickness=dict( argstr='--output "%s"', extensions=None, hash_files=False, keep_extension=True, - name_source=['segmentation_image'], - name_template='%s_cortical_thickness', - ), - dimension=dict( - argstr='--image-dimensionality %d', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, + name_source=["segmentation_image"], + name_template="%s_cortical_thickness", ), - gradient_step=dict( - argstr='--gradient-step %f', - usedefault=True, - ), - gray_matter_label=dict(usedefault=True, ), + dimension=dict(argstr="--image-dimensionality %d", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + gradient_step=dict(argstr="--gradient-step %f", usedefault=True,), + gray_matter_label=dict(usedefault=True,), gray_matter_prob_image=dict( - argstr='--gray-matter-probability-image "%s"', - extensions=None, + argstr='--gray-matter-probability-image "%s"', extensions=None, ), max_invert_displacement_field_iters=dict( - argstr= - '--maximum-number-of-invert-displacement-field-iterations %d', - usedefault=True, - ), - num_threads=dict( - nohash=True, + argstr="--maximum-number-of-invert-displacement-field-iterations %d", usedefault=True, ), + num_threads=dict(nohash=True, usedefault=True,), number_integration_points=dict( - argstr='--number-of-integration-points %d', - usedefault=True, + argstr="--number-of-integration-points %d", usedefault=True, ), segmentation_image=dict( - argstr='--segmentation-image "%s"', - extensions=None, - mandatory=True, - ), - smoothing_variance=dict( - argstr='--smoothing-variance %f', - usedefault=True, + argstr='--segmentation-image "%s"', extensions=None, mandatory=True, ), + smoothing_variance=dict(argstr="--smoothing-variance %f", usedefault=True,), smoothing_velocity_field=dict( - argstr='--smoothing-velocity-field-parameter %f', - usedefault=True, + argstr="--smoothing-velocity-field-parameter %f", usedefault=True, ), thickness_prior_estimate=dict( - argstr='--thickness-prior-estimate %f', - usedefault=True, + argstr="--thickness-prior-estimate %f", usedefault=True, ), thickness_prior_image=dict( - argstr='--thickness-prior-image "%s"', - extensions=None, + argstr='--thickness-prior-image "%s"', extensions=None, ), - use_bspline_smoothing=dict(argstr='--use-bspline-smoothing 1', ), + use_bspline_smoothing=dict(argstr="--use-bspline-smoothing 1",), warped_white_matter=dict( extensions=None, hash_files=False, keep_extension=True, - name_source=['segmentation_image'], - name_template='%s_warped_white_matter', + name_source=["segmentation_image"], + name_template="%s_warped_white_matter", ), - white_matter_label=dict(usedefault=True, ), + white_matter_label=dict(usedefault=True,), white_matter_prob_image=dict( - argstr='--white-matter-probability-image "%s"', - extensions=None, + argstr='--white-matter-probability-image "%s"', extensions=None, ), ) inputs = KellyKapowski.input_spec() @@ -87,10 +60,12 @@ def test_KellyKapowski_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_KellyKapowski_outputs(): output_map = dict( - cortical_thickness=dict(extensions=None, ), - warped_white_matter=dict(extensions=None, ), + cortical_thickness=dict(extensions=None,), + warped_white_matter=dict(extensions=None,), ) outputs = KellyKapowski.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py b/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py index 73e789f885..dfc4e0ff60 100644 --- a/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py +++ b/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py @@ -4,37 +4,18 @@ def test_LabelGeometry_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='%d', - position=0, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="%d", position=0, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), intensity_image=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - usedefault=True, - ), - label_image=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", extensions=None, mandatory=True, position=2, usedefault=True, ), + label_image=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + num_threads=dict(nohash=True, usedefault=True,), output_file=dict( - argstr='%s', - name_source=['label_image'], - name_template='%s.csv', + argstr="%s", + name_source=["label_image"], + name_template="%s.csv", position=3, ), ) @@ -43,8 +24,10 @@ def test_LabelGeometry_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_LabelGeometry_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = LabelGeometry.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py index 89fa00044c..3b18ca5d0f 100644 --- a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py @@ -4,70 +4,39 @@ def test_LaplacianThickness_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dT=dict( - argstr='%s', - position=6, - requires=['prior_thickness'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + dT=dict(argstr="%s", position=6, requires=["prior_thickness"],), + environ=dict(nohash=True, usedefault=True,), input_gm=dict( - argstr='%s', - copyfile=True, - extensions=None, - mandatory=True, - position=2, + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=2, ), input_wm=dict( - argstr='%s', - copyfile=True, - extensions=None, - mandatory=True, - position=1, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=1, ), + num_threads=dict(nohash=True, usedefault=True,), output_image=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, keep_extension=True, - name_source=['input_wm'], - name_template='%s_thickness', + name_source=["input_wm"], + name_template="%s_thickness", position=3, ), - prior_thickness=dict( - argstr='%s', - position=5, - requires=['smooth_param'], - ), - smooth_param=dict( - argstr='%s', - position=4, - ), - sulcus_prior=dict( - argstr='%s', - position=7, - requires=['dT'], - ), - tolerance=dict( - argstr='%s', - position=8, - requires=['sulcus_prior'], - ), + prior_thickness=dict(argstr="%s", position=5, requires=["smooth_param"],), + smooth_param=dict(argstr="%s", position=4,), + sulcus_prior=dict(argstr="%s", position=7, requires=["dT"],), + tolerance=dict(argstr="%s", position=8, requires=["sulcus_prior"],), ) inputs = LaplacianThickness.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_LaplacianThickness_outputs(): - output_map = dict(output_image=dict(extensions=None, ), ) + output_map = dict(output_image=dict(extensions=None,),) outputs = LaplacianThickness.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py index bc90711b69..2d15c49afa 100644 --- a/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py +++ b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py @@ -4,63 +4,29 @@ def test_MeasureImageSimilarity_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='--dimensionality %d', - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_image=dict( - extensions=None, - mandatory=True, - ), - fixed_image_mask=dict( - argstr='%s', - extensions=None, - ), - metric=dict( - argstr='%s', - mandatory=True, - ), - metric_weight=dict( - requires=['metric'], - usedefault=True, - ), - moving_image=dict( - extensions=None, - mandatory=True, - ), - moving_image_mask=dict( - extensions=None, - requires=['fixed_image_mask'], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - radius_or_number_of_bins=dict( - mandatory=True, - requires=['metric'], - ), - sampling_percentage=dict( - mandatory=True, - requires=['metric'], - ), - sampling_strategy=dict( - requires=['metric'], - usedefault=True, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="--dimensionality %d", position=1,), + environ=dict(nohash=True, usedefault=True,), + fixed_image=dict(extensions=None, mandatory=True,), + fixed_image_mask=dict(argstr="%s", extensions=None,), + metric=dict(argstr="%s", mandatory=True,), + metric_weight=dict(requires=["metric"], usedefault=True,), + moving_image=dict(extensions=None, mandatory=True,), + moving_image_mask=dict(extensions=None, requires=["fixed_image_mask"],), + num_threads=dict(nohash=True, usedefault=True,), + radius_or_number_of_bins=dict(mandatory=True, requires=["metric"],), + sampling_percentage=dict(mandatory=True, requires=["metric"],), + sampling_strategy=dict(requires=["metric"], usedefault=True,), ) inputs = MeasureImageSimilarity.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MeasureImageSimilarity_outputs(): - output_map = dict(similarity=dict(), ) + output_map = dict(similarity=dict(),) outputs = MeasureImageSimilarity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py index 5aad6c1722..8a8d0958eb 100644 --- a/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py +++ b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py @@ -4,45 +4,25 @@ def test_MultiplyImages_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='%d', - mandatory=True, - position=0, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - first_input=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="%d", mandatory=True, position=0,), + environ=dict(nohash=True, usedefault=True,), + first_input=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + num_threads=dict(nohash=True, usedefault=True,), output_product_image=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=3, - ), - second_input=dict( - argstr='%s', - mandatory=True, - position=2, + argstr="%s", extensions=None, mandatory=True, position=3, ), + second_input=dict(argstr="%s", mandatory=True, position=2,), ) inputs = MultiplyImages.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MultiplyImages_outputs(): - output_map = dict(output_product_image=dict(extensions=None, ), ) + output_map = dict(output_product_image=dict(extensions=None,),) outputs = MultiplyImages.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py index c7391afb71..2426660455 100644 --- a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py +++ b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py @@ -4,74 +4,41 @@ def test_N4BiasFieldCorrection_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bias_image=dict( - extensions=None, - hash_files=False, - ), - bspline_fitting_distance=dict(argstr='--bspline-fitting %s', ), - bspline_order=dict(requires=['bspline_fitting_distance'], ), - convergence_threshold=dict(requires=['n_iterations'], ), - copy_header=dict( - mandatory=True, - usedefault=True, - ), - dimension=dict( - argstr='-d %d', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - histogram_sharpening=dict( - argstr='--histogram-sharpening [%g,%g,%d]', ), - input_image=dict( - argstr='--input-image %s', - extensions=None, - mandatory=True, - ), - mask_image=dict( - argstr='--mask-image %s', - extensions=None, - ), - n_iterations=dict(argstr='--convergence %s', ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + bias_image=dict(extensions=None, hash_files=False,), + bspline_fitting_distance=dict(argstr="--bspline-fitting %s",), + bspline_order=dict(requires=["bspline_fitting_distance"],), + convergence_threshold=dict(requires=["n_iterations"],), + copy_header=dict(mandatory=True, usedefault=True,), + dimension=dict(argstr="-d %d", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + histogram_sharpening=dict(argstr="--histogram-sharpening [%g,%g,%d]",), + input_image=dict(argstr="--input-image %s", extensions=None, mandatory=True,), + mask_image=dict(argstr="--mask-image %s", extensions=None,), + n_iterations=dict(argstr="--convergence %s",), + num_threads=dict(nohash=True, usedefault=True,), output_image=dict( - argstr='--output %s', + argstr="--output %s", hash_files=False, keep_extension=True, - name_source=['input_image'], - name_template='%s_corrected', - ), - rescale_intensities=dict( - argstr='-r', - min_ver='2.1.0', - usedefault=True, - ), - save_bias=dict( - mandatory=True, - usedefault=True, - xor=['bias_image'], - ), - shrink_factor=dict(argstr='--shrink-factor %d', ), - weight_image=dict( - argstr='--weight-image %s', - extensions=None, + name_source=["input_image"], + name_template="%s_corrected", ), + rescale_intensities=dict(argstr="-r", min_ver="2.1.0", usedefault=True,), + save_bias=dict(mandatory=True, usedefault=True, xor=["bias_image"],), + shrink_factor=dict(argstr="--shrink-factor %d",), + weight_image=dict(argstr="--weight-image %s", extensions=None,), ) inputs = N4BiasFieldCorrection.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_N4BiasFieldCorrection_outputs(): output_map = dict( - bias_image=dict(extensions=None, ), - output_image=dict(extensions=None, ), + bias_image=dict(extensions=None,), output_image=dict(extensions=None,), ) outputs = N4BiasFieldCorrection.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_Registration.py b/nipype/interfaces/ants/tests/test_auto_Registration.py index 1231b1d1a0..33921e8638 100644 --- a/nipype/interfaces/ants/tests/test_auto_Registration.py +++ b/nipype/interfaces/ants/tests/test_auto_Registration.py @@ -4,141 +4,81 @@ def test_Registration_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), collapse_output_transforms=dict( - argstr='--collapse-output-transforms %d', - usedefault=True, - ), - convergence_threshold=dict( - requires=['number_of_iterations'], - usedefault=True, + argstr="--collapse-output-transforms %d", usedefault=True, ), + convergence_threshold=dict(requires=["number_of_iterations"], usedefault=True,), convergence_window_size=dict( - requires=['convergence_threshold'], - usedefault=True, - ), - dimension=dict( - argstr='--dimensionality %d', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, + requires=["convergence_threshold"], usedefault=True, ), - fixed_image=dict(mandatory=True, ), + dimension=dict(argstr="--dimensionality %d", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + fixed_image=dict(mandatory=True,), fixed_image_mask=dict( - argstr='%s', - extensions=None, - max_ver='2.1.0', - xor=['fixed_image_masks'], - ), - fixed_image_masks=dict( - min_ver='2.2.0', - xor=['fixed_image_mask'], + argstr="%s", extensions=None, max_ver="2.1.0", xor=["fixed_image_masks"], ), - float=dict(argstr='--float %d', ), + fixed_image_masks=dict(min_ver="2.2.0", xor=["fixed_image_mask"],), + float=dict(argstr="--float %d",), initial_moving_transform=dict( - argstr='%s', - xor=['initial_moving_transform_com'], + argstr="%s", xor=["initial_moving_transform_com"], ), initial_moving_transform_com=dict( - argstr='%s', - xor=['initial_moving_transform'], + argstr="%s", xor=["initial_moving_transform"], ), initialize_transforms_per_stage=dict( - argstr='--initialize-transforms-per-stage %d', - usedefault=True, - ), - interpolation=dict( - argstr='%s', - usedefault=True, + argstr="--initialize-transforms-per-stage %d", usedefault=True, ), + interpolation=dict(argstr="%s", usedefault=True,), interpolation_parameters=dict(), invert_initial_moving_transform=dict( - requires=['initial_moving_transform'], - xor=['initial_moving_transform_com'], + requires=["initial_moving_transform"], xor=["initial_moving_transform_com"], ), - metric=dict(mandatory=True, ), + metric=dict(mandatory=True,), metric_item_trait=dict(), metric_stage_trait=dict(), - metric_weight=dict( - mandatory=True, - requires=['metric'], - usedefault=True, - ), - metric_weight_item_trait=dict(usedefault=True, ), + metric_weight=dict(mandatory=True, requires=["metric"], usedefault=True,), + metric_weight_item_trait=dict(usedefault=True,), metric_weight_stage_trait=dict(), - moving_image=dict(mandatory=True, ), + moving_image=dict(mandatory=True,), moving_image_mask=dict( extensions=None, - max_ver='2.1.0', - requires=['fixed_image_mask'], - xor=['moving_image_masks'], - ), - moving_image_masks=dict( - min_ver='2.2.0', - xor=['moving_image_mask'], - ), - num_threads=dict( - nohash=True, - usedefault=True, + max_ver="2.1.0", + requires=["fixed_image_mask"], + xor=["moving_image_masks"], ), + moving_image_masks=dict(min_ver="2.2.0", xor=["moving_image_mask"],), + num_threads=dict(nohash=True, usedefault=True,), number_of_iterations=dict(), output_inverse_warped_image=dict( - hash_files=False, - requires=['output_warped_image'], + hash_files=False, requires=["output_warped_image"], ), - output_transform_prefix=dict( - argstr='%s', - usedefault=True, - ), - output_warped_image=dict(hash_files=False, ), - radius_bins_item_trait=dict(usedefault=True, ), + output_transform_prefix=dict(argstr="%s", usedefault=True,), + output_warped_image=dict(hash_files=False,), + radius_bins_item_trait=dict(usedefault=True,), radius_bins_stage_trait=dict(), - radius_or_number_of_bins=dict( - requires=['metric_weight'], - usedefault=True, - ), - restore_state=dict( - argstr='--restore-state %s', - extensions=None, - ), + radius_or_number_of_bins=dict(requires=["metric_weight"], usedefault=True,), + restore_state=dict(argstr="--restore-state %s", extensions=None,), restrict_deformation=dict(), - sampling_percentage=dict(requires=['sampling_strategy'], ), + sampling_percentage=dict(requires=["sampling_strategy"],), sampling_percentage_item_trait=dict(), sampling_percentage_stage_trait=dict(), - sampling_strategy=dict(requires=['metric_weight'], ), + sampling_strategy=dict(requires=["metric_weight"],), sampling_strategy_item_trait=dict(), sampling_strategy_stage_trait=dict(), - save_state=dict( - argstr='--save-state %s', - extensions=None, - ), - shrink_factors=dict(mandatory=True, ), - sigma_units=dict(requires=['smoothing_sigmas'], ), - smoothing_sigmas=dict(mandatory=True, ), + save_state=dict(argstr="--save-state %s", extensions=None,), + shrink_factors=dict(mandatory=True,), + sigma_units=dict(requires=["smoothing_sigmas"],), + smoothing_sigmas=dict(mandatory=True,), transform_parameters=dict(), - transforms=dict( - argstr='%s', - mandatory=True, - ), + transforms=dict(argstr="%s", mandatory=True,), use_estimate_learning_rate_once=dict(), - use_histogram_matching=dict(usedefault=True, ), - verbose=dict( - argstr='-v', - usedefault=True, - ), - winsorize_lower_quantile=dict( - argstr='%s', - usedefault=True, - ), - winsorize_upper_quantile=dict( - argstr='%s', - usedefault=True, - ), + use_histogram_matching=dict(usedefault=True,), + verbose=dict(argstr="-v", usedefault=True,), + winsorize_lower_quantile=dict(argstr="%s", usedefault=True,), + winsorize_upper_quantile=dict(argstr="%s", usedefault=True,), write_composite_transform=dict( - argstr='--write-composite-transform %d', - usedefault=True, + argstr="--write-composite-transform %d", usedefault=True, ), ) inputs = Registration.input_spec() @@ -146,19 +86,21 @@ def test_Registration_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Registration_outputs(): output_map = dict( - composite_transform=dict(extensions=None, ), + composite_transform=dict(extensions=None,), elapsed_time=dict(), forward_invert_flags=dict(), forward_transforms=dict(), - inverse_composite_transform=dict(extensions=None, ), - inverse_warped_image=dict(extensions=None, ), + inverse_composite_transform=dict(extensions=None,), + inverse_warped_image=dict(extensions=None,), metric_value=dict(), reverse_invert_flags=dict(), reverse_transforms=dict(), - save_state=dict(extensions=None, ), - warped_image=dict(extensions=None, ), + save_state=dict(extensions=None,), + warped_image=dict(extensions=None,), ) outputs = Registration.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py b/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py index 5448df5ed5..3bc1b8aa06 100644 --- a/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py +++ b/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py @@ -4,61 +4,33 @@ def test_RegistrationSynQuick_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='-d %d', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_image=dict( - argstr='-f %s...', - mandatory=True, - ), - histogram_bins=dict( - argstr='-r %d', - usedefault=True, - ), - moving_image=dict( - argstr='-m %s...', - mandatory=True, - ), - num_threads=dict( - argstr='-n %d', - usedefault=True, - ), - output_prefix=dict( - argstr='-o %s', - usedefault=True, - ), - precision_type=dict( - argstr='-p %s', - usedefault=True, - ), - spline_distance=dict( - argstr='-s %d', - usedefault=True, - ), - transform_type=dict( - argstr='-t %s', - usedefault=True, - ), - use_histogram_matching=dict(argstr='-j %d', ), + args=dict(argstr="%s",), + dimension=dict(argstr="-d %d", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + fixed_image=dict(argstr="-f %s...", mandatory=True,), + histogram_bins=dict(argstr="-r %d", usedefault=True,), + moving_image=dict(argstr="-m %s...", mandatory=True,), + num_threads=dict(argstr="-n %d", usedefault=True,), + output_prefix=dict(argstr="-o %s", usedefault=True,), + precision_type=dict(argstr="-p %s", usedefault=True,), + spline_distance=dict(argstr="-s %d", usedefault=True,), + transform_type=dict(argstr="-t %s", usedefault=True,), + use_histogram_matching=dict(argstr="-j %d",), ) inputs = RegistrationSynQuick.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RegistrationSynQuick_outputs(): output_map = dict( - forward_warp_field=dict(extensions=None, ), - inverse_warp_field=dict(extensions=None, ), - inverse_warped_image=dict(extensions=None, ), - out_matrix=dict(extensions=None, ), - warped_image=dict(extensions=None, ), + forward_warp_field=dict(extensions=None,), + inverse_warp_field=dict(extensions=None,), + inverse_warped_image=dict(extensions=None,), + out_matrix=dict(extensions=None,), + warped_image=dict(extensions=None,), ) outputs = RegistrationSynQuick.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py index e8385c9c6a..b9acca1442 100644 --- a/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py @@ -4,66 +4,39 @@ def test_WarpImageMultiTransform_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='%d', - position=1, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_image=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="%d", position=1, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + input_image=dict(argstr="%s", extensions=None, mandatory=True, position=2,), invert_affine=dict(), - num_threads=dict( - nohash=True, - usedefault=True, - ), + num_threads=dict(nohash=True, usedefault=True,), out_postfix=dict( - extensions=None, - hash_files=False, - usedefault=True, - xor=['output_image'], + extensions=None, hash_files=False, usedefault=True, xor=["output_image"], ), output_image=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, position=3, - xor=['out_postfix'], - ), - reference_image=dict( - argstr='-R %s', - extensions=None, - xor=['tightest_box'], - ), - reslice_by_header=dict(argstr='--reslice-by-header', ), - tightest_box=dict( - argstr='--tightest-bounding-box', - xor=['reference_image'], - ), - transformation_series=dict( - argstr='%s', - mandatory=True, - position=-1, - ), - use_bspline=dict(argstr='--use-BSpline', ), - use_nearest=dict(argstr='--use-NN', ), + xor=["out_postfix"], + ), + reference_image=dict(argstr="-R %s", extensions=None, xor=["tightest_box"],), + reslice_by_header=dict(argstr="--reslice-by-header",), + tightest_box=dict(argstr="--tightest-bounding-box", xor=["reference_image"],), + transformation_series=dict(argstr="%s", mandatory=True, position=-1,), + use_bspline=dict(argstr="--use-BSpline",), + use_nearest=dict(argstr="--use-NN",), ) inputs = WarpImageMultiTransform.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_WarpImageMultiTransform_outputs(): - output_map = dict(output_image=dict(extensions=None, ), ) + output_map = dict(output_image=dict(extensions=None,),) outputs = WarpImageMultiTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py index 0ab8a379de..e95d70c9ac 100644 --- a/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py @@ -4,56 +4,29 @@ def test_WarpTimeSeriesImageMultiTransform_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='%d', - position=1, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_image=dict( - argstr='%s', - copyfile=True, - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="%d", position=1, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + input_image=dict(argstr="%s", copyfile=True, extensions=None, mandatory=True,), invert_affine=dict(), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_postfix=dict( - argstr='%s', - usedefault=True, - ), - reference_image=dict( - argstr='-R %s', - extensions=None, - xor=['tightest_box'], - ), - reslice_by_header=dict(argstr='--reslice-by-header', ), - tightest_box=dict( - argstr='--tightest-bounding-box', - xor=['reference_image'], - ), - transformation_series=dict( - argstr='%s', - copyfile=False, - mandatory=True, - ), - use_bspline=dict(argstr='--use-Bspline', ), - use_nearest=dict(argstr='--use-NN', ), + num_threads=dict(nohash=True, usedefault=True,), + out_postfix=dict(argstr="%s", usedefault=True,), + reference_image=dict(argstr="-R %s", extensions=None, xor=["tightest_box"],), + reslice_by_header=dict(argstr="--reslice-by-header",), + tightest_box=dict(argstr="--tightest-bounding-box", xor=["reference_image"],), + transformation_series=dict(argstr="%s", copyfile=False, mandatory=True,), + use_bspline=dict(argstr="--use-Bspline",), + use_nearest=dict(argstr="--use-NN",), ) inputs = WarpTimeSeriesImageMultiTransform.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_WarpTimeSeriesImageMultiTransform_outputs(): - output_map = dict(output_image=dict(extensions=None, ), ) + output_map = dict(output_image=dict(extensions=None,),) outputs = WarpTimeSeriesImageMultiTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_extra_Registration.py b/nipype/interfaces/ants/tests/test_extra_Registration.py index 1f38f3c61f..d134324253 100644 --- a/nipype/interfaces/ants/tests/test_extra_Registration.py +++ b/nipype/interfaces/ants/tests/test_extra_Registration.py @@ -8,13 +8,13 @@ def test_ants_mand(tmpdir): tmpdir.chdir() filepath = os.path.dirname(os.path.realpath(__file__)) - datadir = os.path.realpath(os.path.join(filepath, '../../../testing/data')) + datadir = os.path.realpath(os.path.join(filepath, "../../../testing/data")) ants = registration.ANTS() ants.inputs.transformation_model = "SyN" - ants.inputs.moving_image = [os.path.join(datadir, 'resting.nii')] - ants.inputs.fixed_image = [os.path.join(datadir, 'T1.nii')] - ants.inputs.metric = ['MI'] + ants.inputs.moving_image = [os.path.join(datadir, "resting.nii")] + ants.inputs.fixed_image = [os.path.join(datadir, "T1.nii")] + ants.inputs.metric = ["MI"] with pytest.raises(ValueError) as er: ants.run() diff --git a/nipype/interfaces/ants/tests/test_resampling.py b/nipype/interfaces/ants/tests/test_resampling.py index 14903f0137..3b1da9d3ee 100644 --- a/nipype/interfaces/ants/tests/test_resampling.py +++ b/nipype/interfaces/ants/tests/test_resampling.py @@ -1,7 +1,10 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from nipype.interfaces.ants import WarpImageMultiTransform, WarpTimeSeriesImageMultiTransform +from nipype.interfaces.ants import ( + WarpImageMultiTransform, + WarpTimeSeriesImageMultiTransform, +) import os import pytest @@ -10,7 +13,7 @@ def change_dir(request): orig_dir = os.getcwd() filepath = os.path.dirname(os.path.realpath(__file__)) - datadir = os.path.realpath(os.path.join(filepath, '../../../testing/data')) + datadir = os.path.realpath(os.path.join(filepath, "../../../testing/data")) os.chdir(datadir) def move2orig(): @@ -22,32 +25,43 @@ def move2orig(): @pytest.fixture() def create_wimt(): wimt = WarpImageMultiTransform() - wimt.inputs.input_image = 'diffusion_weighted.nii' - wimt.inputs.reference_image = 'functional.nii' + wimt.inputs.input_image = "diffusion_weighted.nii" + wimt.inputs.reference_image = "functional.nii" wimt.inputs.transformation_series = [ - 'func2anat_coreg_Affine.txt', 'func2anat_InverseWarp.nii.gz', - 'dwi2anat_Warp.nii.gz', 'dwi2anat_coreg_Affine.txt' + "func2anat_coreg_Affine.txt", + "func2anat_InverseWarp.nii.gz", + "dwi2anat_Warp.nii.gz", + "dwi2anat_coreg_Affine.txt", ] return wimt def test_WarpImageMultiTransform(change_dir, create_wimt): wimt = create_wimt - assert wimt.cmdline == 'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii \ -func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt' + assert ( + wimt.cmdline + == "WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii \ +func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt" + ) def test_WarpImageMultiTransform_invaffine_1(change_dir, create_wimt): wimt = create_wimt wimt.inputs.invert_affine = [1] - assert wimt.cmdline == 'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii \ --i func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt' + assert ( + wimt.cmdline + == "WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii \ +-i func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt" + ) def test_WarpImageMultiTransform_invaffine_2(change_dir, create_wimt): wimt = create_wimt wimt.inputs.invert_affine = [2] - assert wimt.cmdline == 'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz -i dwi2anat_coreg_Affine.txt' + assert ( + wimt.cmdline + == "WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz -i dwi2anat_coreg_Affine.txt" + ) def test_WarpImageMultiTransform_invaffine_wrong(change_dir, create_wimt): @@ -60,30 +74,32 @@ def test_WarpImageMultiTransform_invaffine_wrong(change_dir, create_wimt): @pytest.fixture() def create_wtsimt(): wtsimt = WarpTimeSeriesImageMultiTransform() - wtsimt.inputs.input_image = 'resting.nii' - wtsimt.inputs.reference_image = 'ants_deformed.nii.gz' - wtsimt.inputs.transformation_series = [ - 'ants_Warp.nii.gz', 'ants_Affine.txt' - ] + wtsimt.inputs.input_image = "resting.nii" + wtsimt.inputs.reference_image = "ants_deformed.nii.gz" + wtsimt.inputs.transformation_series = ["ants_Warp.nii.gz", "ants_Affine.txt"] return wtsimt def test_WarpTimeSeriesImageMultiTransform(change_dir, create_wtsimt): wtsimt = create_wtsimt - assert wtsimt.cmdline == 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii \ --R ants_deformed.nii.gz ants_Warp.nii.gz ants_Affine.txt' + assert ( + wtsimt.cmdline + == "WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii \ +-R ants_deformed.nii.gz ants_Warp.nii.gz ants_Affine.txt" + ) -def test_WarpTimeSeriesImageMultiTransform_invaffine(change_dir, - create_wtsimt): +def test_WarpTimeSeriesImageMultiTransform_invaffine(change_dir, create_wtsimt): wtsimt = create_wtsimt wtsimt.inputs.invert_affine = [1] - assert wtsimt.cmdline == 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii \ --R ants_deformed.nii.gz ants_Warp.nii.gz -i ants_Affine.txt' + assert ( + wtsimt.cmdline + == "WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii \ +-R ants_deformed.nii.gz ants_Warp.nii.gz -i ants_Affine.txt" + ) -def test_WarpTimeSeriesImageMultiTransform_invaffine_wrong( - change_dir, create_wtsimt): +def test_WarpTimeSeriesImageMultiTransform_invaffine_wrong(change_dir, create_wtsimt): wtsimt = create_wtsimt wtsimt.inputs.invert_affine = [0] with pytest.raises(Exception): diff --git a/nipype/interfaces/ants/tests/test_segmentation.py b/nipype/interfaces/ants/tests/test_segmentation.py index e9a1443934..4fc22ee34a 100644 --- a/nipype/interfaces/ants/tests/test_segmentation.py +++ b/nipype/interfaces/ants/tests/test_segmentation.py @@ -12,7 +12,7 @@ def change_dir(request): orig_dir = os.getcwd() filepath = os.path.dirname(os.path.realpath(__file__)) - datadir = os.path.realpath(os.path.join(filepath, '../../../testing/data')) + datadir = os.path.realpath(os.path.join(filepath, "../../../testing/data")) os.chdir(datadir) def move2orig(): @@ -25,14 +25,14 @@ def move2orig(): def create_lt(): lt = LaplacianThickness() # we do not run, so I stick some not really proper files as input - lt.inputs.input_gm = 'diffusion_weighted.nii' - lt.inputs.input_wm = 'functional.nii' + lt.inputs.input_gm = "diffusion_weighted.nii" + lt.inputs.input_wm = "functional.nii" return lt def test_LaplacianThickness_defaults(change_dir, create_lt): lt = create_lt - base_cmd = 'LaplacianThickness functional.nii diffusion_weighted.nii functional_thickness.nii' + base_cmd = "LaplacianThickness functional.nii diffusion_weighted.nii functional_thickness.nii" assert lt.cmdline == base_cmd lt.inputs.smooth_param = 4.5 assert lt.cmdline == base_cmd + " 4.5" @@ -43,17 +43,25 @@ def test_LaplacianThickness_defaults(change_dir, create_lt): def test_LaplacianThickness_wrongargs(change_dir, create_lt): lt = create_lt lt.inputs.tolerance = 0.001 - with pytest.raises(ValueError, match=r".* requires a value for input 'sulcus_prior' .*"): + with pytest.raises( + ValueError, match=r".* requires a value for input 'sulcus_prior' .*" + ): lt.cmdline lt.inputs.sulcus_prior = 0.15 with pytest.raises(ValueError, match=r".* requires a value for input 'dT' .*"): lt.cmdline lt.inputs.dT = 0.01 - with pytest.raises(ValueError, match=r".* requires a value for input 'prior_thickness' .*"): + with pytest.raises( + ValueError, match=r".* requires a value for input 'prior_thickness' .*" + ): lt.cmdline lt.inputs.prior_thickness = 5.9 - with pytest.raises(ValueError, match=r".* requires a value for input 'smooth_param' .*"): + with pytest.raises( + ValueError, match=r".* requires a value for input 'smooth_param' .*" + ): lt.cmdline lt.inputs.smooth_param = 4.5 - assert lt.cmdline == 'LaplacianThickness functional.nii diffusion_weighted.nii ' \ - 'functional_thickness.nii 4.5 5.9 0.01 0.15 0.001' + assert ( + lt.cmdline == "LaplacianThickness functional.nii diffusion_weighted.nii " + "functional_thickness.nii 4.5 5.9 0.01 0.15 0.001" + ) diff --git a/nipype/interfaces/ants/tests/test_spec_JointFusion.py b/nipype/interfaces/ants/tests/test_spec_JointFusion.py index a1dc03cc40..a0276afbb0 100644 --- a/nipype/interfaces/ants/tests/test_spec_JointFusion.py +++ b/nipype/interfaces/ants/tests/test_spec_JointFusion.py @@ -8,7 +8,7 @@ def test_JointFusion_dimension(): at = JointFusion() - set_dimension = lambda d: setattr(at.inputs, 'dimension', int(d)) + set_dimension = lambda d: setattr(at.inputs, "dimension", int(d)) for d in range(2, 5): set_dimension(d) assert at.inputs.dimension == int(d) @@ -20,66 +20,73 @@ def test_JointFusion_dimension(): @pytest.mark.parametrize("m", range(1, 5)) def test_JointFusion_modalities(m): at = JointFusion() - setattr(at.inputs, 'modalities', int(m)) + setattr(at.inputs, "modalities", int(m)) assert at.inputs.modalities == int(m) -@pytest.mark.parametrize("a, b", - [(a, b) for a in range(10) for b in range(10)]) +@pytest.mark.parametrize("a, b", [(a, b) for a in range(10) for b in range(10)]) def test_JointFusion_method(a, b): at = JointFusion() - set_method = lambda a, b: setattr(at.inputs, 'method', 'Joint[%.1f,%d]'.format(a, b)) + set_method = lambda a, b: setattr( + at.inputs, "method", "Joint[%.1f,%d]".format(a, b) + ) _a = a / 10.0 set_method(_a, b) # set directly - assert at.inputs.method == 'Joint[%.1f,%d]'.format(_a, b) + assert at.inputs.method == "Joint[%.1f,%d]".format(_a, b) aprime = _a + 0.1 bprime = b + 1 at.inputs.alpha = aprime at.inputs.beta = bprime # set with alpha/beta - assert at.inputs.method == 'Joint[%.1f,%d]'.format(aprime, bprime) + assert at.inputs.method == "Joint[%.1f,%d]".format(aprime, bprime) -@pytest.mark.parametrize("attr, x", - [(attr, x) - for attr in ['patch_radius', 'search_radius'] - for x in range(5)]) +@pytest.mark.parametrize( + "attr, x", + [(attr, x) for attr in ["patch_radius", "search_radius"] for x in range(5)], +) def test_JointFusion_radius(attr, x): at = JointFusion() - setattr(at.inputs, attr, [x, x + 1, x**x]) - assert at._format_arg(attr, None, getattr( - at.inputs, attr))[4:] == '{0}x{1}x{2}'.format(x, x + 1, x**x) + setattr(at.inputs, attr, [x, x + 1, x ** x]) + assert at._format_arg(attr, None, getattr(at.inputs, attr))[ + 4: + ] == "{0}x{1}x{2}".format(x, x + 1, x ** x) def test_JointFusion_cmd(): at = JointFusion() at.inputs.dimension = 3 at.inputs.modalities = 1 - at.inputs.method = 'Joint[0.1,2]' - at.inputs.output_label_image = 'fusion_labelimage_output.nii' - warped_intensity_images = [ - example_data('im1.nii'), - example_data('im2.nii') - ] + at.inputs.method = "Joint[0.1,2]" + at.inputs.output_label_image = "fusion_labelimage_output.nii" + warped_intensity_images = [example_data("im1.nii"), example_data("im2.nii")] at.inputs.warped_intensity_images = warped_intensity_images segmentation_images = [ - example_data('segmentation0.nii.gz'), - example_data('segmentation1.nii.gz') + example_data("segmentation0.nii.gz"), + example_data("segmentation1.nii.gz"), ] at.inputs.warped_label_images = segmentation_images - T1_image = example_data('T1.nii') + T1_image = example_data("T1.nii") at.inputs.target_image = T1_image at.inputs.patch_radius = [3, 2, 1] at.inputs.search_radius = [1, 2, 3] - expected_command = ('jointfusion 3 1 -m Joint[0.1,2] -rp 3x2x1 -rs 1x2x3' - ' -tg %s -g %s -g %s -l %s -l %s' - ' fusion_labelimage_output.nii') % ( - T1_image, warped_intensity_images[0], - warped_intensity_images[1], segmentation_images[0], - segmentation_images[1]) + expected_command = ( + "jointfusion 3 1 -m Joint[0.1,2] -rp 3x2x1 -rs 1x2x3" + " -tg %s -g %s -g %s -l %s -l %s" + " fusion_labelimage_output.nii" + ) % ( + T1_image, + warped_intensity_images[0], + warped_intensity_images[1], + segmentation_images[0], + segmentation_images[1], + ) assert at.cmdline == expected_command # setting intensity or labels with unequal lengths raises error with pytest.raises(AssertionError): - at._format_arg('warped_intensity_images', InputMultiPath, - warped_intensity_images + [example_data('im3.nii')]) + at._format_arg( + "warped_intensity_images", + InputMultiPath, + warped_intensity_images + [example_data("im3.nii")], + ) diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index 0725f45edc..5497535609 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -10,27 +10,25 @@ class AverageAffineTransformInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( - 3, - 2, - argstr='%d', - mandatory=True, - position=0, - desc='image dimension (2 or 3)') + 3, 2, argstr="%d", mandatory=True, position=0, desc="image dimension (2 or 3)" + ) output_affine_transform = File( - argstr='%s', + argstr="%s", mandatory=True, position=1, - desc='Outputfname.txt: the name of the resulting transform.') + desc="Outputfname.txt: the name of the resulting transform.", + ) transforms = InputMultiPath( File(exists=True), - argstr='%s', + argstr="%s", mandatory=True, position=3, - desc='transforms to average') + desc="transforms to average", + ) class AverageAffineTransformOutputSpec(TraitedSpec): - affine_transform = File(exists=True, desc='average transform file') + affine_transform = File(exists=True, desc="average transform file") class AverageAffineTransform(ANTSCommand): @@ -45,7 +43,8 @@ class AverageAffineTransform(ANTSCommand): >>> avg.cmdline 'AverageAffineTransform 3 MYtemplatewarp.mat trans.mat func_to_struct.mat' """ - _cmd = 'AverageAffineTransform' + + _cmd = "AverageAffineTransform" input_spec = AverageAffineTransformInputSpec output_spec = AverageAffineTransformOutputSpec @@ -54,44 +53,42 @@ def _format_arg(self, opt, spec, val): def _list_outputs(self): outputs = self._outputs().get() - outputs['affine_transform'] = os.path.abspath( - self.inputs.output_affine_transform) + outputs["affine_transform"] = os.path.abspath( + self.inputs.output_affine_transform + ) return outputs class AverageImagesInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( - 3, - 2, - argstr='%d', - mandatory=True, - position=0, - desc='image dimension (2 or 3)') + 3, 2, argstr="%d", mandatory=True, position=0, desc="image dimension (2 or 3)" + ) output_average_image = File( "average.nii", - argstr='%s', + argstr="%s", position=1, usedefault=True, hash_files=False, - desc='the name of the resulting image.') + desc="the name of the resulting image.", + ) normalize = traits.Bool( argstr="%d", mandatory=True, position=2, - desc='Normalize: if true, the 2nd image is divided by its mean. ' - 'This will select the largest image to average into.') + desc="Normalize: if true, the 2nd image is divided by its mean. " + "This will select the largest image to average into.", + ) images = InputMultiPath( File(exists=True), - argstr='%s', + argstr="%s", mandatory=True, position=3, - desc= - 'image to apply transformation to (generally a coregistered functional)' + desc="image to apply transformation to (generally a coregistered functional)", ) class AverageImagesOutputSpec(TraitedSpec): - output_average_image = File(exists=True, desc='average image file') + output_average_image = File(exists=True, desc="average image file") class AverageImages(ANTSCommand): @@ -107,7 +104,8 @@ class AverageImages(ANTSCommand): >>> avg.cmdline 'AverageImages 3 average.nii.gz 1 rc1s1.nii rc1s1.nii' """ - _cmd = 'AverageImages' + + _cmd = "AverageImages" input_spec = AverageImagesInputSpec output_spec = AverageImagesOutputSpec @@ -116,37 +114,37 @@ def _format_arg(self, opt, spec, val): def _list_outputs(self): outputs = self._outputs().get() - outputs['output_average_image'] = os.path.realpath( - self.inputs.output_average_image) + outputs["output_average_image"] = os.path.realpath( + self.inputs.output_average_image + ) return outputs class MultiplyImagesInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( - 3, - 2, - argstr='%d', - mandatory=True, - position=0, - desc='image dimension (2 or 3)') + 3, 2, argstr="%d", mandatory=True, position=0, desc="image dimension (2 or 3)" + ) first_input = File( - argstr='%s', exists=True, mandatory=True, position=1, desc='image 1') + argstr="%s", exists=True, mandatory=True, position=1, desc="image 1" + ) second_input = traits.Either( File(exists=True), traits.Float, - argstr='%s', + argstr="%s", mandatory=True, position=2, - desc='image 2 or multiplication weight') + desc="image 2 or multiplication weight", + ) output_product_image = File( - argstr='%s', + argstr="%s", mandatory=True, position=3, - desc='Outputfname.nii.gz: the name of the resulting image.') + desc="Outputfname.nii.gz: the name of the resulting image.", + ) class MultiplyImagesOutputSpec(TraitedSpec): - output_product_image = File(exists=True, desc='average image file') + output_product_image = File(exists=True, desc="average image file") class MultiplyImages(ANTSCommand): @@ -162,7 +160,8 @@ class MultiplyImages(ANTSCommand): >>> test.cmdline 'MultiplyImages 3 moving2.nii 0.25 out.nii' """ - _cmd = 'MultiplyImages' + + _cmd = "MultiplyImages" input_spec = MultiplyImagesInputSpec output_spec = MultiplyImagesOutputSpec @@ -171,35 +170,34 @@ def _format_arg(self, opt, spec, val): def _list_outputs(self): outputs = self._outputs().get() - outputs['output_product_image'] = os.path.abspath( - self.inputs.output_product_image) + outputs["output_product_image"] = os.path.abspath( + self.inputs.output_product_image + ) return outputs class CreateJacobianDeterminantImageInputSpec(ANTSCommandInputSpec): imageDimension = traits.Enum( - 3, - 2, - argstr='%d', - mandatory=True, - position=0, - desc='image dimension (2 or 3)') + 3, 2, argstr="%d", mandatory=True, position=0, desc="image dimension (2 or 3)" + ) deformationField = File( - argstr='%s', + argstr="%s", exists=True, mandatory=True, position=1, - desc='deformation transformation file') - outputImage = File( - argstr='%s', mandatory=True, position=2, desc='output filename') + desc="deformation transformation file", + ) + outputImage = File(argstr="%s", mandatory=True, position=2, desc="output filename") doLogJacobian = traits.Enum( - 0, 1, argstr='%d', position=3, desc='return the log jacobian') + 0, 1, argstr="%d", position=3, desc="return the log jacobian" + ) useGeometric = traits.Enum( - 0, 1, argstr='%d', position=4, desc='return the geometric jacobian') + 0, 1, argstr="%d", position=4, desc="return the geometric jacobian" + ) class CreateJacobianDeterminantImageOutputSpec(TraitedSpec): - jacobian_image = File(exists=True, desc='jacobian image') + jacobian_image = File(exists=True, desc="jacobian image") class CreateJacobianDeterminantImage(ANTSCommand): @@ -215,76 +213,72 @@ class CreateJacobianDeterminantImage(ANTSCommand): 'CreateJacobianDeterminantImage 3 ants_Warp.nii.gz out_name.nii.gz' """ - _cmd = 'CreateJacobianDeterminantImage' + _cmd = "CreateJacobianDeterminantImage" input_spec = CreateJacobianDeterminantImageInputSpec output_spec = CreateJacobianDeterminantImageOutputSpec def _format_arg(self, opt, spec, val): - return super(CreateJacobianDeterminantImage, self)._format_arg( - opt, spec, val) + return super(CreateJacobianDeterminantImage, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() - outputs['jacobian_image'] = os.path.abspath(self.inputs.outputImage) + outputs["jacobian_image"] = os.path.abspath(self.inputs.outputImage) return outputs class AffineInitializerInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( - 3, 2, usedefault=True, position=0, argstr='%s', desc='dimension') + 3, 2, usedefault=True, position=0, argstr="%s", desc="dimension" + ) fixed_image = File( - exists=True, - mandatory=True, - position=1, - argstr='%s', - desc='reference image') + exists=True, mandatory=True, position=1, argstr="%s", desc="reference image" + ) moving_image = File( - exists=True, - mandatory=True, - position=2, - argstr='%s', - desc='moving image') + exists=True, mandatory=True, position=2, argstr="%s", desc="moving image" + ) out_file = File( - 'transform.mat', + "transform.mat", usedefault=True, position=3, - argstr='%s', - desc='output transform file') + argstr="%s", + desc="output transform file", + ) # Defaults in antsBrainExtraction.sh -> 15 0.1 0 10 search_factor = traits.Float( 15.0, usedefault=True, position=4, - argstr='%f', - desc='increments (degrees) for affine search') + argstr="%f", + desc="increments (degrees) for affine search", + ) radian_fraction = traits.Range( 0.0, 1.0, value=0.1, usedefault=True, position=5, - argstr='%f', - desc='search this arc +/- principal axes') + argstr="%f", + desc="search this arc +/- principal axes", + ) principal_axes = traits.Bool( False, usedefault=True, position=6, - argstr='%d', - desc= - 'whether the rotation is searched around an initial principal axis alignment.' + argstr="%d", + desc="whether the rotation is searched around an initial principal axis alignment.", ) local_search = traits.Int( 10, usedefault=True, position=7, - argstr='%d', - desc= - ' determines if a local optimization is run at each search point for the set ' - 'number of iterations') + argstr="%d", + desc=" determines if a local optimization is run at each search point for the set " + "number of iterations", + ) class AffineInitializerOutputSpec(TraitedSpec): - out_file = File(desc='output transform file') + out_file = File(desc="output transform file") class AffineInitializer(ANTSCommand): @@ -299,43 +293,43 @@ class AffineInitializer(ANTSCommand): 'antsAffineInitializer 3 fixed1.nii moving1.nii transform.mat 15.000000 0.100000 0 10' """ - _cmd = 'antsAffineInitializer' + + _cmd = "antsAffineInitializer" input_spec = AffineInitializerInputSpec output_spec = AffineInitializerOutputSpec def _list_outputs(self): - return {'out_file': os.path.abspath(self.inputs.out_file)} + return {"out_file": os.path.abspath(self.inputs.out_file)} class ComposeMultiTransformInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( - 3, - 2, - argstr='%d', - usedefault=True, - position=0, - desc='image dimension (2 or 3)') + 3, 2, argstr="%d", usedefault=True, position=0, desc="image dimension (2 or 3)" + ) output_transform = File( - argstr='%s', + argstr="%s", position=1, - name_source=['transforms'], - name_template='%s_composed', + name_source=["transforms"], + name_template="%s_composed", keep_extension=True, - desc='the name of the resulting transform.') + desc="the name of the resulting transform.", + ) reference_image = File( - argstr='%s', + argstr="%s", position=2, - desc='Reference image (only necessary when output is warpfield)') + desc="Reference image (only necessary when output is warpfield)", + ) transforms = InputMultiPath( File(exists=True), - argstr='%s', + argstr="%s", mandatory=True, position=3, - desc='transforms to average') + desc="transforms to average", + ) class ComposeMultiTransformOutputSpec(TraitedSpec): - output_transform = File(exists=True, desc='Composed transform file') + output_transform = File(exists=True, desc="Composed transform file") class ComposeMultiTransform(ANTSCommand): @@ -352,43 +346,42 @@ class ComposeMultiTransform(ANTSCommand): 'ComposeMultiTransform 3 struct_to_template_composed.mat struct_to_template.mat func_to_struct.mat' """ - _cmd = 'ComposeMultiTransform' + + _cmd = "ComposeMultiTransform" input_spec = ComposeMultiTransformInputSpec output_spec = ComposeMultiTransformOutputSpec class LabelGeometryInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( - 3, - 2, - argstr='%d', - usedefault=True, - position=0, - desc='image dimension (2 or 3)') + 3, 2, argstr="%d", usedefault=True, position=0, desc="image dimension (2 or 3)" + ) label_image = File( - argstr='%s', + argstr="%s", position=1, mandatory=True, - desc='label image to use for extracting geometry measures') + desc="label image to use for extracting geometry measures", + ) intensity_image = File( - value='[]', + value="[]", exists=True, - argstr='%s', + argstr="%s", mandatory=True, usedefault=True, position=2, - desc='Intensity image to extract values from. ' - 'This is an optional input') + desc="Intensity image to extract values from. " "This is an optional input", + ) output_file = traits.Str( - name_source=['label_image'], - name_template='%s.csv', - argstr='%s', + name_source=["label_image"], + name_template="%s.csv", + argstr="%s", position=3, - desc='name of output file') + desc="name of output file", + ) class LabelGeometryOutputSpec(TraitedSpec): - output_file = File(exists=True, desc='CSV file of geometry measures') + output_file = File(exists=True, desc="CSV file of geometry measures") class LabelGeometry(ANTSCommand): @@ -409,6 +402,7 @@ class LabelGeometry(ANTSCommand): 'LabelGeometryMeasures 3 atlas.nii.gz ants_Warp.nii.gz atlas.csv' """ - _cmd = 'LabelGeometryMeasures' + + _cmd = "LabelGeometryMeasures" input_spec = LabelGeometryInputSpec output_spec = LabelGeometryOutputSpec diff --git a/nipype/interfaces/ants/visualization.py b/nipype/interfaces/ants/visualization.py index b5ab661889..3e3c75be50 100644 --- a/nipype/interfaces/ants/visualization.py +++ b/nipype/interfaces/ants/visualization.py @@ -12,56 +12,55 @@ class ConvertScalarImageToRGBInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( 3, 2, - argstr='%d', + argstr="%d", usedefault=True, - desc='image dimension (2 or 3)', + desc="image dimension (2 or 3)", mandatory=True, - position=0) + position=0, + ) input_image = File( - argstr='%s', + argstr="%s", exists=True, - desc='Main input is a 3-D grayscale image.', + desc="Main input is a 3-D grayscale image.", mandatory=True, - position=1) + position=1, + ) output_image = traits.Str( - 'rgb.nii.gz', - argstr='%s', - usedefault=True, - desc='rgb output image', - position=2) + "rgb.nii.gz", argstr="%s", usedefault=True, desc="rgb output image", position=2 + ) mask_image = File( - 'none', - argstr='%s', - exists=True, - desc='mask image', - position=3, - usedefault=True) + "none", argstr="%s", exists=True, desc="mask image", position=3, usedefault=True + ) colormap = traits.Str( - argstr='%s', + argstr="%s", usedefault=True, - desc=('Possible colormaps: grey, red, green, ' - 'blue, copper, jet, hsv, spring, summer, ' - 'autumn, winter, hot, cool, overunder, custom '), + desc=( + "Possible colormaps: grey, red, green, " + "blue, copper, jet, hsv, spring, summer, " + "autumn, winter, hot, cool, overunder, custom " + ), mandatory=True, - position=4) + position=4, + ) custom_color_map_file = traits.Str( - 'none', - argstr='%s', - usedefault=True, - desc='custom color map file', - position=5) + "none", argstr="%s", usedefault=True, desc="custom color map file", position=5 + ) minimum_input = traits.Int( - argstr='%d', desc='minimum input', mandatory=True, position=6) + argstr="%d", desc="minimum input", mandatory=True, position=6 + ) maximum_input = traits.Int( - argstr='%d', desc='maximum input', mandatory=True, position=7) + argstr="%d", desc="maximum input", mandatory=True, position=7 + ) minimum_RGB_output = traits.Int( - 0, usedefault=True, argstr='%d', desc='', position=8) + 0, usedefault=True, argstr="%d", desc="", position=8 + ) maximum_RGB_output = traits.Int( - 255, usedefault=True, argstr='%d', desc='', position=9) + 255, usedefault=True, argstr="%d", desc="", position=9 + ) class ConvertScalarImageToRGBOutputSpec(TraitedSpec): - output_image = File(exists=True, desc='converted RGB image') + output_image = File(exists=True, desc="converted RGB image") class ConvertScalarImageToRGB(ANTSCommand): @@ -78,7 +77,8 @@ class ConvertScalarImageToRGB(ANTSCommand): >>> converter.cmdline 'ConvertScalarImageToRGB 3 T1.nii.gz rgb.nii.gz none jet none 0 6 0 255' """ - _cmd = 'ConvertScalarImageToRGB' + + _cmd = "ConvertScalarImageToRGB" input_spec = ConvertScalarImageToRGBInputSpec output_spec = ConvertScalarImageToRGBOutputSpec @@ -87,73 +87,88 @@ def _format_arg(self, opt, spec, val): def _list_outputs(self): outputs = self._outputs().get() - outputs['output_image'] = os.path.join(os.getcwd(), - self.inputs.output_image) + outputs["output_image"] = os.path.join(os.getcwd(), self.inputs.output_image) return outputs class CreateTiledMosaicInputSpec(ANTSCommandInputSpec): input_image = File( - argstr='-i %s', + argstr="-i %s", exists=True, - desc='Main input is a 3-D grayscale image.', - mandatory=True) + desc="Main input is a 3-D grayscale image.", + mandatory=True, + ) rgb_image = File( - argstr='-r %s', + argstr="-r %s", exists=True, - desc=('An optional Rgb image can be added as an overlay.' - 'It must have the same image' - 'geometry as the input grayscale image.'), - mandatory=True) + desc=( + "An optional Rgb image can be added as an overlay." + "It must have the same image" + "geometry as the input grayscale image." + ), + mandatory=True, + ) mask_image = File( - argstr='-x %s', - exists=True, - desc='Specifies the ROI of the RGB voxels used.') + argstr="-x %s", exists=True, desc="Specifies the ROI of the RGB voxels used." + ) alpha_value = traits.Float( - argstr='-a %.2f', - desc=('If an Rgb image is provided, render the overlay ' - 'using the specified alpha parameter.')) + argstr="-a %.2f", + desc=( + "If an Rgb image is provided, render the overlay " + "using the specified alpha parameter." + ), + ) output_image = traits.Str( - 'output.png', - argstr='-o %s', - desc='The output consists of the tiled mosaic image.', - usedefault=True) + "output.png", + argstr="-o %s", + desc="The output consists of the tiled mosaic image.", + usedefault=True, + ) tile_geometry = traits.Str( - argstr='-t %s', + argstr="-t %s", desc=( - 'The tile geometry specifies the number of rows and columns' + "The tile geometry specifies the number of rows and columns" 'in the output image. For example, if the user specifies "5x10", ' - 'then 5 rows by 10 columns of slices are rendered. If R < 0 and C > ' - '0 (or vice versa), the negative value is selected' - 'based on direction.')) + "then 5 rows by 10 columns of slices are rendered. If R < 0 and C > " + "0 (or vice versa), the negative value is selected" + "based on direction." + ), + ) direction = traits.Int( - argstr='-d %d', - desc=('Specifies the direction of ' - 'the slices. If no direction is specified, the ' - 'direction with the coarsest spacing is chosen.')) + argstr="-d %d", + desc=( + "Specifies the direction of " + "the slices. If no direction is specified, the " + "direction with the coarsest spacing is chosen." + ), + ) pad_or_crop = traits.Str( - argstr='-p %s', - desc='argument passed to -p flag:' - '[padVoxelWidth,]' - '[lowerPadding[0]xlowerPadding[1],upperPadding[0]xupperPadding[1],' - 'constantValue]' - 'The user can specify whether to pad or crop a specified ' - 'voxel-width boundary of each individual slice. For this ' - 'program, cropping is simply padding with negative voxel-widths.' - 'If one pads (+), the user can also specify a constant pad ' - 'value (default = 0). If a mask is specified, the user can use ' + argstr="-p %s", + desc="argument passed to -p flag:" + "[padVoxelWidth,]" + "[lowerPadding[0]xlowerPadding[1],upperPadding[0]xupperPadding[1]," + "constantValue]" + "The user can specify whether to pad or crop a specified " + "voxel-width boundary of each individual slice. For this " + "program, cropping is simply padding with negative voxel-widths." + "If one pads (+), the user can also specify a constant pad " + "value (default = 0). If a mask is specified, the user can use " 'the mask to define the region, by using the keyword "mask"' - ' plus an offset, e.g. "-p mask+3".') + ' plus an offset, e.g. "-p mask+3".', + ) slices = traits.Str( - argstr='-s %s', - desc=('Number of slices to increment Slice1xSlice2xSlice3' - '[numberOfSlicesToIncrement,,]')) - flip_slice = traits.Str(argstr='-f %s', desc='flipXxflipY') - permute_axes = traits.Bool(argstr='-g', desc='doPermute') + argstr="-s %s", + desc=( + "Number of slices to increment Slice1xSlice2xSlice3" + "[numberOfSlicesToIncrement,,]" + ), + ) + flip_slice = traits.Str(argstr="-f %s", desc="flipXxflipY") + permute_axes = traits.Bool(argstr="-g", desc="doPermute") class CreateTiledMosaicOutputSpec(TraitedSpec): - output_image = File(exists=True, desc='image file') + output_image = File(exists=True, desc="image file") class CreateTiledMosaic(ANTSCommand): @@ -180,12 +195,11 @@ class CreateTiledMosaic(ANTSCommand): -r rgb.nii.gz -s [2 ,100 ,160]' """ - _cmd = 'CreateTiledMosaic' + _cmd = "CreateTiledMosaic" input_spec = CreateTiledMosaicInputSpec output_spec = CreateTiledMosaicOutputSpec def _list_outputs(self): outputs = self._outputs().get() - outputs['output_image'] = os.path.join(os.getcwd(), - self.inputs.output_image) + outputs["output_image"] = os.path.join(os.getcwd(), self.inputs.output_image) return outputs diff --git a/nipype/interfaces/base/__init__.py b/nipype/interfaces/base/__init__.py index a846794561..7c70f9768d 100644 --- a/nipype/interfaces/base/__init__.py +++ b/nipype/interfaces/base/__init__.py @@ -11,19 +11,41 @@ from traits.trait_handlers import TraitDictObject, TraitListObject from traits.trait_errors import TraitError -from .core import (Interface, BaseInterface, SimpleInterface, CommandLine, - StdOutCommandLine, MpiCommandLine, SEMLikeCommandLine, - LibraryBaseInterface, PackageInfo) +from .core import ( + Interface, + BaseInterface, + SimpleInterface, + CommandLine, + StdOutCommandLine, + MpiCommandLine, + SEMLikeCommandLine, + LibraryBaseInterface, + PackageInfo, +) -from .specs import (BaseTraitedSpec, TraitedSpec, DynamicTraitedSpec, - BaseInterfaceInputSpec, CommandLineInputSpec, - StdOutCommandLineInputSpec) +from .specs import ( + BaseTraitedSpec, + TraitedSpec, + DynamicTraitedSpec, + BaseInterfaceInputSpec, + CommandLineInputSpec, + StdOutCommandLineInputSpec, +) from .traits_extension import ( - traits, Undefined, isdefined, has_metadata, - File, ImageFile, Directory, - Str, DictStrStr, - OutputMultiObject, InputMultiObject, - OutputMultiPath, InputMultiPath) + traits, + Undefined, + isdefined, + has_metadata, + File, + ImageFile, + Directory, + Str, + DictStrStr, + OutputMultiObject, + InputMultiObject, + OutputMultiPath, + InputMultiPath, +) -from .support import (Bunch, InterfaceResult, NipypeInterfaceError) +from .support import Bunch, InterfaceResult, NipypeInterfaceError diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 126fd51a8a..fd4c701fff 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -26,25 +26,33 @@ from ... import config, logging, LooseVersion from ...utils.provenance import write_provenance from ...utils.misc import str2bool, rgetcwd -from ...utils.filemanip import (split_filename, which, get_dependencies) +from ...utils.filemanip import split_filename, which, get_dependencies from ...utils.subprocess import run_command from ...external.due import due from .traits_extension import traits, isdefined -from .specs import (BaseInterfaceInputSpec, CommandLineInputSpec, - StdOutCommandLineInputSpec, MpiCommandLineInputSpec, - get_filecopy_info) -from .support import (Bunch, InterfaceResult, NipypeInterfaceError, - format_help) +from .specs import ( + BaseInterfaceInputSpec, + CommandLineInputSpec, + StdOutCommandLineInputSpec, + MpiCommandLineInputSpec, + get_filecopy_info, +) +from .support import Bunch, InterfaceResult, NipypeInterfaceError, format_help -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") VALID_TERMINAL_OUTPUT = [ - 'stream', 'allatonce', 'file', 'file_split', 'file_stdout', 'file_stderr', - 'none' + "stream", + "allatonce", + "file", + "file_split", + "file_stdout", + "file_stderr", + "none", ] -__docformat__ = 'restructuredtext' +__docformat__ = "restructuredtext" class Interface(object): @@ -113,8 +121,9 @@ def _get_filecopy_info(cls): Necessary for pipeline operation """ iflogger.warning( - '_get_filecopy_info member of Interface was deprecated ' - 'in nipype-1.1.6 and will be removed in 1.2.0') + "_get_filecopy_info member of Interface was deprecated " + "in nipype-1.1.6 and will be removed in 1.2.0" + ) return get_filecopy_info(cls) @@ -149,6 +158,7 @@ class BaseInterface(Interface): """ + input_spec = BaseInterfaceInputSpec _version = None _additional_metadata = [] @@ -157,16 +167,17 @@ class BaseInterface(Interface): resource_monitor = True # Enabled for this interface IFF enabled in the config _etelemetry_version_data = None - def __init__(self, from_file=None, resource_monitor=None, - ignore_exception=False, **inputs): - if config.getboolean('execution', 'check_version'): + def __init__( + self, from_file=None, resource_monitor=None, ignore_exception=False, **inputs + ): + if config.getboolean("execution", "check_version"): from ... import check_latest_version + if BaseInterface._etelemetry_version_data is None: BaseInterface._etelemetry_version_data = check_latest_version() if not self.input_spec: - raise Exception( - 'No input_spec in class: %s' % self.__class__.__name__) + raise Exception("No input_spec in class: %s" % self.__class__.__name__) self.inputs = self.input_spec(**inputs) self.ignore_exception = ignore_exception @@ -194,34 +205,42 @@ def _check_requires(self, spec, name, value): """ if spec.requires: values = [ - not isdefined(getattr(self.inputs, field)) - for field in spec.requires + not isdefined(getattr(self.inputs, field)) for field in spec.requires ] if any(values) and isdefined(value): if len(values) > 1: - fmt = ("%s requires values for inputs %s because '%s' is set. " - "For a list of required inputs, see %s.help()") + fmt = ( + "%s requires values for inputs %s because '%s' is set. " + "For a list of required inputs, see %s.help()" + ) else: - fmt = ("%s requires a value for input %s because '%s' is set. " - "For a list of required inputs, see %s.help()") - msg = fmt % (self.__class__.__name__, - ', '.join("'%s'" % req for req in spec.requires), - name, - self.__class__.__name__) + fmt = ( + "%s requires a value for input %s because '%s' is set. " + "For a list of required inputs, see %s.help()" + ) + msg = fmt % ( + self.__class__.__name__, + ", ".join("'%s'" % req for req in spec.requires), + name, + self.__class__.__name__, + ) raise ValueError(msg) def _check_xor(self, spec, name, value): """ check if mutually exclusive inputs are satisfied """ if spec.xor: - values = [ - isdefined(getattr(self.inputs, field)) for field in spec.xor - ] + values = [isdefined(getattr(self.inputs, field)) for field in spec.xor] if not any(values) and not isdefined(value): - msg = ("%s requires a value for one of the inputs '%s'. " - "For a list of required inputs, see %s.help()" % - (self.__class__.__name__, ', '.join(spec.xor), - self.__class__.__name__)) + msg = ( + "%s requires a value for one of the inputs '%s'. " + "For a list of required inputs, see %s.help()" + % ( + self.__class__.__name__, + ", ".join(spec.xor), + self.__class__.__name__, + ) + ) raise ValueError(msg) def _check_mandatory_inputs(self): @@ -231,15 +250,17 @@ def _check_mandatory_inputs(self): value = getattr(self.inputs, name) self._check_xor(spec, name, value) if not isdefined(value) and spec.xor is None: - msg = ("%s requires a value for input '%s'. " - "For a list of required inputs, see %s.help()" % - (self.__class__.__name__, name, - self.__class__.__name__)) + msg = ( + "%s requires a value for input '%s'. " + "For a list of required inputs, see %s.help()" + % (self.__class__.__name__, name, self.__class__.__name__) + ) raise ValueError(msg) if isdefined(value): self._check_requires(spec, name, value) for name, spec in list( - self.inputs.traits(mandatory=None, transient=None).items()): + self.inputs.traits(mandatory=None, transient=None).items() + ): self._check_requires(spec, name, getattr(self.inputs, name)) def _check_version_requirements(self, trait_object, raise_exception=True): @@ -253,16 +274,16 @@ def _check_version_requirements(self, trait_object, raise_exception=True): if names and self.version: version = LooseVersion(str(self.version)) for name in names: - min_ver = LooseVersion( - str(trait_object.traits()[name].min_ver)) + min_ver = LooseVersion(str(trait_object.traits()[name].min_ver)) if min_ver > version: unavailable_traits.append(name) if not isdefined(getattr(trait_object, name)): continue if raise_exception: raise Exception( - 'Trait %s (%s) (version %s < required %s)' % - (name, self.__class__.__name__, version, min_ver)) + "Trait %s (%s) (version %s < required %s)" + % (name, self.__class__.__name__, version, min_ver) + ) # check maximum version check = dict(max_ver=lambda t: t is not None) @@ -270,16 +291,16 @@ def _check_version_requirements(self, trait_object, raise_exception=True): if names and self.version: version = LooseVersion(str(self.version)) for name in names: - max_ver = LooseVersion( - str(trait_object.traits()[name].max_ver)) + max_ver = LooseVersion(str(trait_object.traits()[name].max_ver)) if max_ver < version: unavailable_traits.append(name) if not isdefined(getattr(trait_object, name)): continue if raise_exception: raise Exception( - 'Trait %s (%s) (version %s > required %s)' % - (name, self.__class__.__name__, version, max_ver)) + "Trait %s (%s) (version %s > required %s)" + % (name, self.__class__.__name__, version, max_ver) + ) return unavailable_traits def _run_interface(self, runtime): @@ -291,7 +312,7 @@ def _duecredit_cite(self): """ Add the interface references to the duecredit citations """ for r in self.references_: - r['path'] = self.__module__ + r["path"] = self.__module__ due.cite(**r) def run(self, cwd=None, ignore_exception=None, **inputs): @@ -333,10 +354,11 @@ def run(self, cwd=None, ignore_exception=None, **inputs): # initialize provenance tracking store_provenance = str2bool( - config.get('execution', 'write_provenance', 'false')) + config.get("execution", "write_provenance", "false") + ) env = deepcopy(dict(os.environ)) if self._redirect_x: - env['DISPLAY'] = config.get_display() + env["DISPLAY"] = config.get_display() runtime = Bunch( cwd=cwd, @@ -348,17 +370,19 @@ def run(self, cwd=None, ignore_exception=None, **inputs): endTime=None, platform=platform.platform(), hostname=platform.node(), - version=self.version) + version=self.version, + ) runtime_attrs = set(runtime.dictcopy()) mon_sp = None if enable_rm: - mon_freq = float( - config.get('execution', 'resource_monitor_frequency', 1)) + mon_freq = float(config.get("execution", "resource_monitor_frequency", 1)) proc_pid = os.getpid() iflogger.debug( - 'Creating a ResourceMonitor on a %s interface, PID=%d.', - self.__class__.__name__, proc_pid) + "Creating a ResourceMonitor on a %s interface, PID=%d.", + self.__class__.__name__, + proc_pid, + ) mon_sp = ResourceMonitor(proc_pid, freq=mon_freq) mon_sp.start() @@ -373,38 +397,37 @@ def run(self, cwd=None, ignore_exception=None, **inputs): outputs = self.aggregate_outputs(runtime) except Exception as e: import traceback + # Retrieve the maximum info fast runtime.traceback = traceback.format_exc() # Gather up the exception arguments and append nipype info. - exc_args = e.args if getattr(e, 'args') else tuple() + exc_args = e.args if getattr(e, "args") else tuple() exc_args += ( - 'An exception of type %s occurred while running interface %s.' - % (type(e).__name__, self.__class__.__name__), ) - if config.get('logging', 'interface_level', - 'info').lower() == 'debug': - exc_args += ('Inputs: %s' % str(self.inputs), ) + "An exception of type %s occurred while running interface %s." + % (type(e).__name__, self.__class__.__name__), + ) + if config.get("logging", "interface_level", "info").lower() == "debug": + exc_args += ("Inputs: %s" % str(self.inputs),) - runtime.traceback_args = ('\n'.join( - ['%s' % arg for arg in exc_args]), ) + runtime.traceback_args = ("\n".join(["%s" % arg for arg in exc_args]),) if not ignore_exception: raise finally: if runtime is None or runtime_attrs - set(runtime.dictcopy()): - raise RuntimeError("{} interface failed to return valid " - "runtime object".format( - interface.__class__.__name__)) + raise RuntimeError( + "{} interface failed to return valid " + "runtime object".format(interface.__class__.__name__) + ) # This needs to be done always runtime.endTime = dt.isoformat(dt.utcnow()) timediff = parseutc(runtime.endTime) - parseutc(runtime.startTime) - runtime.duration = (timediff.days * 86400 + timediff.seconds + - timediff.microseconds / 1e6) + runtime.duration = ( + timediff.days * 86400 + timediff.seconds + timediff.microseconds / 1e6 + ) results = InterfaceResult( - interface, - runtime, - inputs=inputs, - outputs=outputs, - provenance=None) + interface, runtime, inputs=inputs, outputs=outputs, provenance=None + ) # Add provenance (if required) if store_provenance: @@ -414,23 +437,24 @@ def run(self, cwd=None, ignore_exception=None, **inputs): # Make sure runtime profiler is shut down if enable_rm: import numpy as np + mon_sp.stop() runtime.mem_peak_gb = None runtime.cpu_percent = None # Read .prof file in and set runtime values - vals = np.loadtxt(mon_sp.fname, delimiter=',') + vals = np.loadtxt(mon_sp.fname, delimiter=",") if vals.size: vals = np.atleast_2d(vals) runtime.mem_peak_gb = vals[:, 2].max() / 1024 runtime.cpu_percent = vals[:, 1].max() runtime.prof_dict = { - 'time': vals[:, 0].tolist(), - 'cpus': vals[:, 1].tolist(), - 'rss_GiB': (vals[:, 2] / 1024).tolist(), - 'vms_GiB': (vals[:, 3] / 1024).tolist(), + "time": vals[:, 0].tolist(), + "cpus": vals[:, 1].tolist(), + "rss_GiB": (vals[:, 2] / 1024).tolist(), + "vms_GiB": (vals[:, 3] / 1024).tolist(), } os.chdir(syscwd) @@ -461,18 +485,23 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): na_names = aggregate_names.intersection(_na_outputs) if na_names: # XXX Change to TypeError in Nipype 2.0 - raise KeyError("""\ + raise KeyError( + """\ Output trait(s) %s not available in version %s of interface %s.\ -""" % (', '.join(na_names), self.version, self.__class__.__name__)) +""" + % (", ".join(na_names), self.version, self.__class__.__name__) + ) for key in aggregate_names: # Final aggregation val = predicted_outputs[key] try: setattr(outputs, key, val) except TraitError as error: - if 'an existing' in getattr(error, 'info', 'default'): - msg = "No such file or directory '%s' for output '%s' of a %s interface" % \ - (val, key, self.__class__.__name__) + if "an existing" in getattr(error, "info", "default"): + msg = ( + "No such file or directory '%s' for output '%s' of a %s interface" + % (val, key, self.__class__.__name__) + ) raise FileNotFoundError(msg) raise error return outputs @@ -480,9 +509,10 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): @property def version(self): if self._version is None: - if str2bool(config.get('execution', 'stop_on_unknown_version')): - raise ValueError('Interface %s has no version information' % - self.__class__.__name__) + if str2bool(config.get("execution", "stop_on_unknown_version")): + raise ValueError( + "Interface %s has no version information" % self.__class__.__name__ + ) return self._version def load_inputs_from_json(self, json_file, overwrite=True): @@ -507,8 +537,8 @@ def save_inputs_to_json(self, json_file): A convenient way to save current inputs to a JSON file. """ inputs = self.inputs.get_traitsfree() - iflogger.debug('saving inputs %s', inputs) - with open(json_file, 'w') as fhandle: + iflogger.debug("saving inputs %s", inputs) + with open(json_file, "w") as fhandle: json.dump(inputs, fhandle, indent=4, ensure_ascii=False) def _pre_run_hook(self, runtime): @@ -576,7 +606,8 @@ class SimpleInterface(BaseInterface): def __init__(self, from_file=None, resource_monitor=None, **inputs): super(SimpleInterface, self).__init__( - from_file=from_file, resource_monitor=resource_monitor, **inputs) + from_file=from_file, resource_monitor=resource_monitor, **inputs + ) self._results = {} def _list_outputs(self): @@ -617,11 +648,12 @@ class must be instantiated with a command argument '11c37f97649cd61627f4afe5136af8c0' """ + input_spec = CommandLineInputSpec - _cmd_prefix = '' + _cmd_prefix = "" _cmd = None _version = None - _terminal_output = 'stream' + _terminal_output = "stream" @classmethod def set_default_terminal_output(cls, output_type): @@ -636,18 +668,16 @@ def set_default_terminal_output(cls, output_type): if output_type in VALID_TERMINAL_OUTPUT: cls._terminal_output = output_type else: - raise AttributeError( - 'Invalid terminal output_type: %s' % output_type) + raise AttributeError("Invalid terminal output_type: %s" % output_type) def __init__(self, command=None, terminal_output=None, **inputs): super(CommandLine, self).__init__(**inputs) self._environ = None # Set command. Input argument takes precedence - self._cmd = command or getattr(self, '_cmd', None) + self._cmd = command or getattr(self, "_cmd", None) # Store dependencies in runtime object - self._ldd = str2bool( - config.get('execution', 'get_linked_libs', 'true')) + self._ldd = str2bool(config.get("execution", "get_linked_libs", "true")) if self._cmd is None: raise Exception("Missing command") @@ -660,8 +690,9 @@ def cmd(self): """sets base command, immutable""" if not self._cmd: raise NotImplementedError( - 'CommandLineInterface should wrap an executable, but ' - 'none has been set.') + "CommandLineInterface should wrap an executable, but " + "none has been set." + ) return self._cmd @property @@ -670,7 +701,7 @@ def cmdline(self): validates arguments and generates command line""" self._check_mandatory_inputs() allargs = [self._cmd_prefix + self.cmd] + self._parse_inputs() - return ' '.join(allargs) + return " ".join(allargs) @property def terminal_output(self): @@ -681,23 +712,26 @@ def terminal_output(self, value): if value not in VALID_TERMINAL_OUTPUT: raise RuntimeError( 'Setting invalid value "%s" for terminal_output. Valid values are ' - '%s.' % (value, - ', '.join(['"%s"' % v - for v in VALID_TERMINAL_OUTPUT]))) + "%s." % (value, ", ".join(['"%s"' % v for v in VALID_TERMINAL_OUTPUT])) + ) self._terminal_output = value def raise_exception(self, runtime): raise RuntimeError( - ('Command:\n{cmdline}\nStandard output:\n{stdout}\n' - 'Standard error:\n{stderr}\nReturn code: {returncode}' - ).format(**runtime.dictcopy())) + ( + "Command:\n{cmdline}\nStandard output:\n{stdout}\n" + "Standard error:\n{stderr}\nReturn code: {returncode}" + ).format(**runtime.dictcopy()) + ) def _get_environ(self): - return getattr(self.inputs, 'environ', {}) + return getattr(self.inputs, "environ", {}) - def version_from_command(self, flag='-v', cmd=None): - iflogger.warning('version_from_command member of CommandLine was ' - 'Deprecated in nipype-1.0.0 and deleted in 1.1.0') + def version_from_command(self, flag="-v", cmd=None): + iflogger.warning( + "version_from_command member of CommandLine was " + "Deprecated in nipype-1.0.0 and deleted in 1.1.0" + ) if cmd is None: cmd = self.cmd.split()[0] @@ -706,7 +740,7 @@ def version_from_command(self, flag='-v', cmd=None): out_environ = self._get_environ() env.update(out_environ) proc = sp.Popen( - ' '.join((cmd, flag)), + " ".join((cmd, flag)), shell=True, env=env, stdout=sp.PIPE, @@ -715,7 +749,7 @@ def version_from_command(self, flag='-v', cmd=None): o, e = proc.communicate() return o - def _run_interface(self, runtime, correct_return_codes=(0, )): + def _run_interface(self, runtime, correct_return_codes=(0,)): """Execute command via subprocess Parameters @@ -743,16 +777,18 @@ def _run_interface(self, runtime, correct_return_codes=(0, )): if cmd_path is None: raise IOError( 'No command "%s" found on host %s. Please check that the ' - 'corresponding package is installed.' % (executable_name, - runtime.hostname)) + "corresponding package is installed." + % (executable_name, runtime.hostname) + ) runtime.command_path = cmd_path - runtime.dependencies = (get_dependencies(executable_name, - runtime.environ) - if self._ldd else '') + runtime.dependencies = ( + get_dependencies(executable_name, runtime.environ) + if self._ldd + else "" + ) runtime = run_command(runtime, output=self.terminal_output) - if runtime.returncode is None or \ - runtime.returncode not in correct_return_codes: + if runtime.returncode is None or runtime.returncode not in correct_return_codes: self.raise_exception(runtime) return runtime @@ -763,15 +799,15 @@ def _format_arg(self, name, trait_spec, value): Formats a trait containing argstr metadata """ argstr = trait_spec.argstr - iflogger.debug('%s_%s', name, value) + iflogger.debug("%s_%s", name, value) if trait_spec.is_trait_type(traits.Bool) and "%" not in argstr: # Boolean options have no format string. Just append options if True. return argstr if value else None # traits.Either turns into traits.TraitCompound and does not have any # inner_traits - elif trait_spec.is_trait_type(traits.List) \ - or (trait_spec.is_trait_type(traits.TraitCompound) and - isinstance(value, list)): + elif trait_spec.is_trait_type(traits.List) or ( + trait_spec.is_trait_type(traits.TraitCompound) and isinstance(value, list) + ): # This is a bit simple-minded at present, and should be # construed as the default. If more sophisticated behavior # is needed, it can be accomplished with metadata (e.g. @@ -781,13 +817,13 @@ def _format_arg(self, name, trait_spec, value): # Depending on whether we stick with traitlets, and whether or # not we beef up traitlets.List, we may want to put some # type-checking code here as well - sep = trait_spec.sep if trait_spec.sep is not None else ' ' + sep = trait_spec.sep if trait_spec.sep is not None else " " - if argstr.endswith('...'): + if argstr.endswith("..."): # repeatable option # --id %d... will expand to # --id 1 --id 2 --id 3 etc.,. - argstr = argstr.replace('...', '') + argstr = argstr.replace("...", "") return sep.join([argstr % elt for elt in value]) else: return argstr % sep.join(str(elt) for elt in value) @@ -807,13 +843,16 @@ def _filename_from_source(self, name, chain=None): return retval # Do not generate filename when excluded by other inputs - if any(isdefined(getattr(self.inputs, field)) - for field in trait_spec.xor or ()): + if any( + isdefined(getattr(self.inputs, field)) for field in trait_spec.xor or () + ): return retval # Do not generate filename when required fields are missing - if not all(isdefined(getattr(self.inputs, field)) - for field in trait_spec.requires or ()): + if not all( + isdefined(getattr(self.inputs, field)) + for field in trait_spec.requires or () + ): return retval if isdefined(retval) and "%s" in retval: @@ -826,15 +865,14 @@ def _filename_from_source(self, name, chain=None): ns = trait_spec.name_source while isinstance(ns, (list, tuple)): if len(ns) > 1: - iflogger.warning( - 'Only one name_source per trait is allowed') + iflogger.warning("Only one name_source per trait is allowed") ns = ns[0] if not isinstance(ns, (str, bytes)): raise ValueError( - 'name_source of \'{}\' trait should be an input trait ' - 'name, but a type {} object was found'.format( - name, type(ns))) + "name_source of '{}' trait should be an input trait " + "name, but a type {} object was found".format(name, type(ns)) + ) if isdefined(getattr(self.inputs, ns)): name_source = ns @@ -849,8 +887,7 @@ def _filename_from_source(self, name, chain=None): base = source else: if name in chain: - raise NipypeInterfaceError( - 'Mutually pointing name_sources') + raise NipypeInterfaceError("Mutually pointing name_sources") chain.append(name) base = self._filename_from_source(ns, chain) @@ -938,7 +975,7 @@ class StdOutCommandLine(CommandLine): input_spec = StdOutCommandLineInputSpec def _gen_filename(self, name): - return self._gen_outfilename() if name == 'out_file' else None + return self._gen_outfilename() if name == "out_file" else None def _gen_outfilename(self): raise NotImplementedError @@ -961,6 +998,7 @@ class MpiCommandLine(CommandLine): >>> mpi_cli.cmdline 'mpiexec -n 8 my_mpi_prog -v' """ + input_spec = MpiCommandLineInputSpec @property @@ -968,11 +1006,11 @@ def cmdline(self): """Adds 'mpiexec' to begining of command""" result = [] if self.inputs.use_mpi: - result.append('mpiexec') + result.append("mpiexec") if self.inputs.n_procs: - result.append('-n %d' % self.inputs.n_procs) + result.append("-n %d" % self.inputs.n_procs) result.append(super(MpiCommandLine, self).cmdline) - return ' '.join(result) + return " ".join(result) class SEMLikeCommandLine(CommandLine): @@ -992,10 +1030,8 @@ def _outputs_from_inputs(self, outputs): for name in list(outputs.keys()): corresponding_input = getattr(self.inputs, name) if isdefined(corresponding_input): - if (isinstance(corresponding_input, bool) - and corresponding_input): - outputs[name] = \ - os.path.abspath(self._outputs_filenames[name]) + if isinstance(corresponding_input, bool) and corresponding_input: + outputs[name] = os.path.abspath(self._outputs_filenames[name]) else: if isinstance(corresponding_input, list): outputs[name] = [ @@ -1023,18 +1059,23 @@ def __init__(self, check_import=True, *args, **kwargs): super(LibraryBaseInterface, self).__init__(*args, **kwargs) if check_import: import pkgutil + failed_imports = [] for pkg in (self._pkg,) + tuple(self.imports): if pkgutil.find_loader(pkg) is None: failed_imports.append(pkg) if failed_imports: - iflogger.warning('Unable to import %s; %s interface may fail to ' - 'run', failed_imports, self.__class__.__name__) + iflogger.warning( + "Unable to import %s; %s interface may fail to " "run", + failed_imports, + self.__class__.__name__, + ) @property def version(self): if self._version is None: import importlib + try: self._version = importlib.import_module(self._pkg).__version__ except (ImportError, AttributeError): @@ -1055,14 +1096,15 @@ def version(klass): clout = CommandLine( command=klass.version_cmd, resource_monitor=False, - terminal_output='allatonce').run() + terminal_output="allatonce", + ).run() except IOError: return None raw_info = clout.runtime.stdout elif klass.version_file is not None: try: - with open(klass.version_file, 'rt') as fobj: + with open(klass.version_file, "rt") as fobj: raw_info = fobj.read() except OSError: return None diff --git a/nipype/interfaces/base/specs.py b/nipype/interfaces/base/specs.py index 4c9b36cc26..579f97def8 100644 --- a/nipype/interfaces/base/specs.py +++ b/nipype/interfaces/base/specs.py @@ -31,7 +31,7 @@ from ... import config, __version__ -FLOAT_FORMAT = '{:.10f}'.format +FLOAT_FORMAT = "{:.10f}".format nipype_version = Version(__version__) @@ -55,6 +55,7 @@ class BaseTraitedSpec(traits.HasTraits): XXX Reconsider this in the long run, but it seems like the best solution to move forward on the refactoring. """ + package_version = nipype_version def __init__(self, **kwargs): @@ -83,8 +84,8 @@ def __repr__(self): """ Return a well-formatted representation of the traits """ outstr = [] for name, value in sorted(self.trait_get().items()): - outstr.append('%s = %s' % (name, value)) - return '\n{}\n'.format('\n'.join(outstr)) + outstr.append("%s = %s" % (name, value)) + return "\n{}\n".format("\n".join(outstr)) def _generate_handlers(self): """Find all traits with the 'xor' metadata and attach an event @@ -111,11 +112,12 @@ def _xor_warn(self, obj, name, old, new): continue if isdefined(getattr(self, trait_name)): self.trait_set( - trait_change_notify=False, **{ - '%s' % name: Undefined - }) - msg = ('Input "%s" is mutually exclusive with input "%s", ' - 'which is already set') % (name, trait_name) + trait_change_notify=False, **{"%s" % name: Undefined} + ) + msg = ( + 'Input "%s" is mutually exclusive with input "%s", ' + "which is already set" + ) % (name, trait_name) raise IOError(msg) def _deprecated_warn(self, obj, name, old, new): @@ -123,32 +125,37 @@ def _deprecated_warn(self, obj, name, old, new): """ if isdefined(new): trait_spec = self.traits()[name] - msg1 = ('Input %s in interface %s is deprecated.' % - (name, self.__class__.__name__.split('InputSpec')[0])) - msg2 = ('Will be removed or raise an error as of release %s' % - trait_spec.deprecated) + msg1 = "Input %s in interface %s is deprecated." % ( + name, + self.__class__.__name__.split("InputSpec")[0], + ) + msg2 = ( + "Will be removed or raise an error as of release %s" + % trait_spec.deprecated + ) if trait_spec.new_name: if trait_spec.new_name not in self.copyable_trait_names(): - raise TraitError(msg1 + ' Replacement trait %s not found' % - trait_spec.new_name) - msg3 = 'It has been replaced by %s.' % trait_spec.new_name + raise TraitError( + msg1 + " Replacement trait %s not found" % trait_spec.new_name + ) + msg3 = "It has been replaced by %s." % trait_spec.new_name else: - msg3 = '' - msg = ' '.join((msg1, msg2, msg3)) + msg3 = "" + msg = " ".join((msg1, msg2, msg3)) if Version(str(trait_spec.deprecated)) < self.package_version: raise TraitError(msg) else: if trait_spec.new_name: - msg += 'Unsetting old value %s; setting new value %s.' % ( - name, trait_spec.new_name) + msg += "Unsetting old value %s; setting new value %s." % ( + name, + trait_spec.new_name, + ) warn(msg) if trait_spec.new_name: self.trait_set( trait_change_notify=False, - **{ - '%s' % name: Undefined, - '%s' % trait_spec.new_name: new - }) + **{"%s" % name: Undefined, "%s" % trait_spec.new_name: new} + ) def trait_get(self, **kwargs): """ Returns traited class as a dict @@ -184,8 +191,11 @@ def _clean_container(self, objekt, undefinedval=None, skipundefined=False): else: if not skipundefined: out[key] = undefinedval - elif (isinstance(objekt, TraitListObject) or isinstance(objekt, list) or - isinstance(objekt, tuple)): + elif ( + isinstance(objekt, TraitListObject) + or isinstance(objekt, list) + or isinstance(objekt, tuple) + ): out = [] for val in objekt: if isdefined(val): @@ -211,8 +221,7 @@ def has_metadata(self, name, metadata, value=None, recursive=True): Return has_metadata for the requested trait name in this interface """ - return has_metadata( - self.trait(name).trait_type, metadata, value, recursive) + return has_metadata(self.trait(name).trait_type, metadata, value, recursive) def get_hashval(self, hash_method=None): """Return a dictionary of our items with hashes for each file. @@ -241,36 +250,45 @@ def get_hashval(self, hash_method=None): # skip undefined traits and traits with nohash=True continue - hash_files = (not self.has_metadata(name, "hash_files", False) and - not self.has_metadata(name, "name_source")) - list_nofilename.append((name, - self._get_sorteddict( - val, - hash_method=hash_method, - hash_files=hash_files))) - list_withhash.append((name, - self._get_sorteddict( - val, - True, - hash_method=hash_method, - hash_files=hash_files))) + hash_files = not self.has_metadata( + name, "hash_files", False + ) and not self.has_metadata(name, "name_source") + list_nofilename.append( + ( + name, + self._get_sorteddict( + val, hash_method=hash_method, hash_files=hash_files + ), + ) + ) + list_withhash.append( + ( + name, + self._get_sorteddict( + val, True, hash_method=hash_method, hash_files=hash_files + ), + ) + ) return list_withhash, md5(str(list_nofilename).encode()).hexdigest() - def _get_sorteddict(self, - objekt, - dictwithhash=False, - hash_method=None, - hash_files=True): + def _get_sorteddict( + self, objekt, dictwithhash=False, hash_method=None, hash_files=True + ): if isinstance(objekt, dict): out = [] for key, val in sorted(objekt.items()): if isdefined(val): - out.append((key, - self._get_sorteddict( - val, - dictwithhash, - hash_method=hash_method, - hash_files=hash_files))) + out.append( + ( + key, + self._get_sorteddict( + val, + dictwithhash, + hash_method=hash_method, + hash_files=hash_files, + ), + ) + ) elif isinstance(objekt, (list, tuple)): out = [] for val in objekt: @@ -280,24 +298,28 @@ def _get_sorteddict(self, val, dictwithhash, hash_method=hash_method, - hash_files=hash_files)) + hash_files=hash_files, + ) + ) if isinstance(objekt, tuple): out = tuple(out) else: out = None if isdefined(objekt): - if (hash_files and isinstance(objekt, (str, bytes)) and - os.path.isfile(objekt)): + if ( + hash_files + and isinstance(objekt, (str, bytes)) + and os.path.isfile(objekt) + ): if hash_method is None: - hash_method = config.get('execution', 'hash_method') + hash_method = config.get("execution", "hash_method") - if hash_method.lower() == 'timestamp': + if hash_method.lower() == "timestamp": hash = hash_timestamp(objekt) - elif hash_method.lower() == 'content': + elif hash_method.lower() == "content": hash = hash_infile(objekt) else: - raise Exception( - "Unknown hash method: %s" % hash_method) + raise Exception("Unknown hash method: %s" % hash_method) if dictwithhash: out = (objekt, hash) else: @@ -344,6 +366,7 @@ class TraitedSpec(BaseTraitedSpec): This is used in 90% of the cases. """ + _ = traits.Disallow @@ -357,6 +380,7 @@ class DynamicTraitedSpec(BaseTraitedSpec): This class is a workaround for add_traits and clone_traits not functioning well together. """ + def __deepcopy__(self, memo): """ Replace the ``__deepcopy__`` member with a traits-friendly implementation. @@ -385,9 +409,10 @@ def __deepcopy__(self, memo): class CommandLineInputSpec(BaseInterfaceInputSpec): - args = Str(argstr='%s', desc='Additional parameters to the command') + args = Str(argstr="%s", desc="Additional parameters to the command") environ = traits.DictStrStr( - desc='Environment variables', usedefault=True, nohash=True) + desc="Environment variables", usedefault=True, nohash=True + ) class StdOutCommandLineInputSpec(CommandLineInputSpec): @@ -396,12 +421,13 @@ class StdOutCommandLineInputSpec(CommandLineInputSpec): class MpiCommandLineInputSpec(CommandLineInputSpec): use_mpi = traits.Bool( - False, - desc="Whether or not to run the command with mpiexec", - usedefault=True) - n_procs = traits.Int(desc="Num processors to specify to mpiexec. Do not " - "specify if this is managed externally (e.g. through " - "SGE)") + False, desc="Whether or not to run the command with mpiexec", usedefault=True + ) + n_procs = traits.Int( + desc="Num processors to specify to mpiexec. Do not " + "specify if this is managed externally (e.g. through " + "SGE)" + ) def get_filecopy_info(cls): @@ -412,7 +438,7 @@ def get_filecopy_info(cls): return None # normalize_filenames is not a classmethod, hence check first - if not isclass(cls) and hasattr(cls, 'normalize_filenames'): + if not isclass(cls) and hasattr(cls, "normalize_filenames"): cls.normalize_filenames() info = [] inputs = cls.input_spec() if isclass(cls) else cls.inputs diff --git a/nipype/interfaces/base/support.py b/nipype/interfaces/base/support.py index 18d2471c48..e3e1a229f6 100644 --- a/nipype/interfaces/base/support.py +++ b/nipype/interfaces/base/support.py @@ -15,10 +15,12 @@ from ... import logging from ...utils.misc import is_container from ...utils.filemanip import md5, hash_infile -iflogger = logging.getLogger('nipype.interface') + +iflogger = logging.getLogger("nipype.interface") HELP_LINEWIDTH = 70 + class NipypeInterfaceError(Exception): """Custom error for interfaces""" @@ -26,7 +28,7 @@ def __init__(self, value): self.value = value def __str__(self): - return '{}'.format(self.value) + return "{}".format(self.value) class Bunch(object): @@ -70,7 +72,7 @@ def items(self): def iteritems(self): """iterates over bunch attributes as key, value pairs""" - iflogger.warning('iteritems is deprecated, use items instead') + iflogger.warning("iteritems is deprecated, use items instead") return list(self.items()) def get(self, *args): @@ -95,22 +97,22 @@ def __repr__(self): needs setting or not. Till that mechanism changes, only alter this after careful consideration. """ - outstr = ['Bunch('] + outstr = ["Bunch("] first = True for k, v in sorted(self.items()): if not first: - outstr.append(', ') + outstr.append(", ") if isinstance(v, dict): pairs = [] for key, value in sorted(v.items()): pairs.append("'%s': %s" % (key, value)) - v = '{' + ', '.join(pairs) + '}' - outstr.append('%s=%s' % (k, v)) + v = "{" + ", ".join(pairs) + "}" + outstr.append("%s=%s" % (k, v)) else: - outstr.append('%s=%r' % (k, v)) + outstr.append("%s=%r" % (k, v)) first = False - outstr.append(')') - return ''.join(outstr) + outstr.append(")") + return "".join(outstr) def _get_bunch_hash(self): """Return a dictionary of our items with hashes for each file. @@ -143,7 +145,7 @@ def _get_bunch_hash(self): item = None else: if len(val) == 0: - raise AttributeError('%s attribute is empty' % key) + raise AttributeError("%s attribute is empty" % key) item = val[0] else: item = val @@ -167,18 +169,18 @@ def _get_bunch_hash(self): def _repr_pretty_(self, p, cycle): """Support for the pretty module from ipython.externals""" if cycle: - p.text('Bunch(...)') + p.text("Bunch(...)") else: - p.begin_group(6, 'Bunch(') + p.begin_group(6, "Bunch(") first = True for k, v in sorted(self.items()): if not first: - p.text(',') + p.text(",") p.breakable() - p.text(k + '=') + p.text(k + "=") p.pretty(v) first = False - p.end_group(6, ')') + p.end_group(6, ")") def _hash_bunch_dict(adict, key): @@ -216,12 +218,7 @@ class InterfaceResult(object): """ - def __init__(self, - interface, - runtime, - inputs=None, - outputs=None, - provenance=None): + def __init__(self, interface, runtime, inputs=None, outputs=None, provenance=None): self._version = 2.0 self.interface = interface self.runtime = runtime @@ -252,18 +249,20 @@ def format_help(cls): from ...utils.misc import trim docstring = [] - cmd = getattr(cls, '_cmd', None) + cmd = getattr(cls, "_cmd", None) if cmd: - docstring += ['Wraps the executable command ``%s``.' % cmd, ''] + docstring += ["Wraps the executable command ``%s``." % cmd, ""] if cls.__doc__: - docstring += trim(cls.__doc__).split('\n') + [''] - - allhelp = '\n'.join( - docstring + - _inputs_help(cls) + [''] + - _outputs_help(cls) + [''] + - _refs_help(cls) + docstring += trim(cls.__doc__).split("\n") + [""] + + allhelp = "\n".join( + docstring + + _inputs_help(cls) + + [""] + + _outputs_help(cls) + + [""] + + _refs_help(cls) ) return allhelp.expandtabs(8) @@ -277,7 +276,7 @@ def _inputs_help(cls): ['Inputs::', '', '\t[Mandatory]', '\tin_file: (a pathlike object or string... """ - helpstr = ['Inputs::'] + helpstr = ["Inputs::"] mandatory_keys = [] optional_items = [] @@ -285,19 +284,21 @@ def _inputs_help(cls): inputs = cls.input_spec() mandatory_items = list(inputs.traits(mandatory=True).items()) if mandatory_items: - helpstr += ['', '\t[Mandatory]'] + helpstr += ["", "\t[Mandatory]"] for name, spec in mandatory_items: helpstr += get_trait_desc(inputs, name, spec) mandatory_keys = {item[0] for item in mandatory_items} - optional_items = ['\n'.join(get_trait_desc(inputs, name, val)) - for name, val in inputs.traits(transient=None).items() - if name not in mandatory_keys] + optional_items = [ + "\n".join(get_trait_desc(inputs, name, val)) + for name, val in inputs.traits(transient=None).items() + if name not in mandatory_keys + ] if optional_items: - helpstr += ['', '\t[Optional]'] + optional_items + helpstr += ["", "\t[Optional]"] + optional_items if not mandatory_keys and not optional_items: - helpstr += ['', '\tNone'] + helpstr += ["", "\tNone"] return helpstr @@ -310,12 +311,13 @@ def _outputs_help(cls): ['Outputs::', '', '\tout: (a float)\n\t\tglobal correlation value'] """ - helpstr = ['Outputs::', '', '\tNone'] + helpstr = ["Outputs::", "", "\tNone"] if cls.output_spec: outputs = cls.output_spec() outhelpstr = [ - '\n'.join(get_trait_desc(outputs, name, spec)) - for name, spec in outputs.traits(transient=None).items()] + "\n".join(get_trait_desc(outputs, name, spec)) + for name, spec in outputs.traits(transient=None).items() + ] if outhelpstr: helpstr = helpstr[:-1] + outhelpstr return helpstr @@ -323,13 +325,13 @@ def _outputs_help(cls): def _refs_help(cls): """Prints interface references.""" - references = getattr(cls, 'references_', None) + references = getattr(cls, "references_", None) if not references: return [] - helpstr = ['References:', '-----------'] + helpstr = ["References:", "-----------"] for r in references: - helpstr += ['{}'.format(r['entry'])] + helpstr += ["{}".format(r["entry"])] return helpstr @@ -341,59 +343,62 @@ def get_trait_desc(inputs, name, spec): requires = spec.requires argstr = spec.argstr - manhelpstr = ['\t%s' % name] + manhelpstr = ["\t%s" % name] type_info = spec.full_info(inputs, name, None) - default = '' + default = "" if spec.usedefault: - default = ', nipype default value: %s' % str( - spec.default_value()[1]) + default = ", nipype default value: %s" % str(spec.default_value()[1]) line = "(%s%s)" % (type_info, default) manhelpstr = wrap( line, HELP_LINEWIDTH, - initial_indent=manhelpstr[0] + ': ', - subsequent_indent='\t\t ') + initial_indent=manhelpstr[0] + ": ", + subsequent_indent="\t\t ", + ) if desc: - for line in desc.split('\n'): + for line in desc.split("\n"): line = re.sub(r"\s+", " ", line) manhelpstr += wrap( - line, HELP_LINEWIDTH, - initial_indent='\t\t', - subsequent_indent='\t\t') + line, HELP_LINEWIDTH, initial_indent="\t\t", subsequent_indent="\t\t" + ) if argstr: pos = spec.position if pos is not None: manhelpstr += wrap( - 'argument: ``%s``, position: %s' % (argstr, pos), + "argument: ``%s``, position: %s" % (argstr, pos), HELP_LINEWIDTH, - initial_indent='\t\t', - subsequent_indent='\t\t') + initial_indent="\t\t", + subsequent_indent="\t\t", + ) else: manhelpstr += wrap( - 'argument: ``%s``' % argstr, + "argument: ``%s``" % argstr, HELP_LINEWIDTH, - initial_indent='\t\t', - subsequent_indent='\t\t') + initial_indent="\t\t", + subsequent_indent="\t\t", + ) if xor: - line = '%s' % ', '.join(xor) + line = "%s" % ", ".join(xor) manhelpstr += wrap( line, HELP_LINEWIDTH, - initial_indent='\t\tmutually_exclusive: ', - subsequent_indent='\t\t ') + initial_indent="\t\tmutually_exclusive: ", + subsequent_indent="\t\t ", + ) if requires: others = [field for field in requires if field != name] - line = '%s' % ', '.join(others) + line = "%s" % ", ".join(others) manhelpstr += wrap( line, HELP_LINEWIDTH, - initial_indent='\t\trequires: ', - subsequent_indent='\t\t ') + initial_indent="\t\trequires: ", + subsequent_indent="\t\t ", + ) return manhelpstr diff --git a/nipype/interfaces/base/tests/test_auto_CommandLine.py b/nipype/interfaces/base/tests/test_auto_CommandLine.py index 22d7406921..b003543a3e 100644 --- a/nipype/interfaces/base/tests/test_auto_CommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_CommandLine.py @@ -4,11 +4,7 @@ def test_CommandLine_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), ) inputs = CommandLine.input_spec() diff --git a/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py b/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py index 4084a19945..7ab181458f 100644 --- a/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py @@ -4,13 +4,10 @@ def test_MpiCommandLine_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), n_procs=dict(), - use_mpi=dict(usedefault=True, ), + use_mpi=dict(usedefault=True,), ) inputs = MpiCommandLine.input_spec() diff --git a/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py b/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py index 18e788f3a4..e17eed4db1 100644 --- a/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py @@ -4,11 +4,7 @@ def test_SEMLikeCommandLine_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), ) inputs = SEMLikeCommandLine.input_spec() diff --git a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py index de6ef5450a..bc0771ac78 100644 --- a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py @@ -4,17 +4,9 @@ def test_StdOutCommandLine_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), ) inputs = StdOutCommandLine.input_spec() diff --git a/nipype/interfaces/base/tests/test_core.py b/nipype/interfaces/base/tests/test_core.py index a265d8f8a9..d7e2620c9b 100644 --- a/nipype/interfaces/base/tests/test_core.py +++ b/nipype/interfaces/base/tests/test_core.py @@ -53,27 +53,27 @@ def __init__(self): def test_BaseInterface(): - config.set('monitoring', 'enable', '0') + config.set("monitoring", "enable", "0") assert nib.BaseInterface.help() is None class InputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int') - goo = nib.traits.Int(desc='a random int', mandatory=True) - moo = nib.traits.Int(desc='a random int', mandatory=False) - hoo = nib.traits.Int(desc='a random int', usedefault=True) - zoo = nib.File(desc='a file', copyfile=False) - woo = nib.File(desc='a file', copyfile=True) + foo = nib.traits.Int(desc="a random int") + goo = nib.traits.Int(desc="a random int", mandatory=True) + moo = nib.traits.Int(desc="a random int", mandatory=False) + hoo = nib.traits.Int(desc="a random int", usedefault=True) + zoo = nib.File(desc="a file", copyfile=False) + woo = nib.File(desc="a file", copyfile=True) class OutputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int') + foo = nib.traits.Int(desc="a random int") class DerivedInterface(nib.BaseInterface): input_spec = InputSpec resource_monitor = False assert DerivedInterface.help() is None - assert 'moo' in ''.join(_inputs_help(DerivedInterface)) + assert "moo" in "".join(_inputs_help(DerivedInterface)) assert DerivedInterface()._outputs() is None assert DerivedInterface().inputs.foo == nib.Undefined with pytest.raises(ValueError): @@ -103,7 +103,7 @@ def _run_interface(self, runtime): def test_BaseInterface_load_save_inputs(tmpdir): - tmp_json = tmpdir.join('settings.json').strpath + tmp_json = tmpdir.join("settings.json").strpath class InputSpec(nib.TraitedSpec): input1 = nib.traits.Int() @@ -117,7 +117,7 @@ class DerivedInterface(nib.BaseInterface): def __init__(self, **inputs): super(DerivedInterface, self).__init__(**inputs) - inputs_dict = {'input1': 12, 'input3': True, 'input4': 'some string'} + inputs_dict = {"input1": 12, "input3": True, "input4": "some string"} bif = DerivedInterface(**inputs_dict) bif.save_inputs_to_json(tmp_json) bif2 = DerivedInterface() @@ -128,22 +128,22 @@ def __init__(self, **inputs): assert bif3.inputs.get_traitsfree() == inputs_dict inputs_dict2 = inputs_dict.copy() - inputs_dict2.update({'input4': 'some other string'}) - bif4 = DerivedInterface(from_file=tmp_json, input4=inputs_dict2['input4']) + inputs_dict2.update({"input4": "some other string"}) + bif4 = DerivedInterface(from_file=tmp_json, input4=inputs_dict2["input4"]) assert bif4.inputs.get_traitsfree() == inputs_dict2 - bif5 = DerivedInterface(input4=inputs_dict2['input4']) + bif5 = DerivedInterface(input4=inputs_dict2["input4"]) bif5.load_inputs_from_json(tmp_json, overwrite=False) assert bif5.inputs.get_traitsfree() == inputs_dict2 - bif6 = DerivedInterface(input4=inputs_dict2['input4']) + bif6 = DerivedInterface(input4=inputs_dict2["input4"]) bif6.load_inputs_from_json(tmp_json) assert bif6.inputs.get_traitsfree() == inputs_dict # test get hashval in a complex interface from nipype.interfaces.ants import Registration - settings = example_data( - example_data('smri_ants_registration_settings.json')) + + settings = example_data(example_data("smri_ants_registration_settings.json")) with open(settings) as setf: data_dict = json.load(setf) @@ -154,15 +154,16 @@ def __init__(self, **inputs): tsthash2 = Registration(from_file=settings) assert {} == check_dict(data_dict, tsthash2.inputs.get_traitsfree()) - _, hashvalue = tsthash.inputs.get_hashval(hash_method='timestamp') - assert '8562a5623562a871115eb14822ee8d02' == hashvalue + _, hashvalue = tsthash.inputs.get_hashval(hash_method="timestamp") + assert "8562a5623562a871115eb14822ee8d02" == hashvalue class MinVerInputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int', min_ver='0.9') + foo = nib.traits.Int(desc="a random int", min_ver="0.9") + class MaxVerInputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int', max_ver='0.7') + foo = nib.traits.Int(desc="a random int", max_ver="0.7") def test_input_version_1(): @@ -172,7 +173,7 @@ class DerivedInterface1(nib.BaseInterface): obj = DerivedInterface1() obj._check_version_requirements(obj.inputs) - config.set('execution', 'stop_on_unknown_version', True) + config.set("execution", "stop_on_unknown_version", True) with pytest.raises(ValueError) as excinfo: obj._check_version_requirements(obj.inputs) @@ -184,7 +185,7 @@ class DerivedInterface1(nib.BaseInterface): def test_input_version_2(): class DerivedInterface1(nib.BaseInterface): input_spec = MinVerInputSpec - _version = '0.8' + _version = "0.8" obj = DerivedInterface1() obj.inputs.foo = 1 @@ -196,7 +197,7 @@ class DerivedInterface1(nib.BaseInterface): def test_input_version_3(): class DerivedInterface1(nib.BaseInterface): input_spec = MinVerInputSpec - _version = '0.10' + _version = "0.10" obj = DerivedInterface1() obj._check_version_requirements(obj.inputs) @@ -205,7 +206,7 @@ class DerivedInterface1(nib.BaseInterface): def test_input_version_4(): class DerivedInterface1(nib.BaseInterface): input_spec = MinVerInputSpec - _version = '0.9' + _version = "0.9" obj = DerivedInterface1() obj.inputs.foo = 1 @@ -215,7 +216,7 @@ class DerivedInterface1(nib.BaseInterface): def test_input_version_5(): class DerivedInterface2(nib.BaseInterface): input_spec = MaxVerInputSpec - _version = '0.8' + _version = "0.8" obj = DerivedInterface2() obj.inputs.foo = 1 @@ -227,7 +228,7 @@ class DerivedInterface2(nib.BaseInterface): def test_input_version_6(): class DerivedInterface1(nib.BaseInterface): input_spec = MaxVerInputSpec - _version = '0.7' + _version = "0.7" obj = DerivedInterface1() obj.inputs.foo = 1 @@ -236,52 +237,52 @@ class DerivedInterface1(nib.BaseInterface): def test_output_version(): class InputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int') + foo = nib.traits.Int(desc="a random int") class OutputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int', min_ver='0.9') + foo = nib.traits.Int(desc="a random int", min_ver="0.9") class DerivedInterface1(nib.BaseInterface): input_spec = InputSpec output_spec = OutputSpec - _version = '0.10' + _version = "0.10" resource_monitor = False obj = DerivedInterface1() assert obj._check_version_requirements(obj._outputs()) == [] class InputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int') + foo = nib.traits.Int(desc="a random int") class OutputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int', min_ver='0.11') + foo = nib.traits.Int(desc="a random int", min_ver="0.11") class DerivedInterface1(nib.BaseInterface): input_spec = InputSpec output_spec = OutputSpec - _version = '0.10' + _version = "0.10" resource_monitor = False obj = DerivedInterface1() - assert obj._check_version_requirements(obj._outputs()) == ['foo'] + assert obj._check_version_requirements(obj._outputs()) == ["foo"] class InputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int') + foo = nib.traits.Int(desc="a random int") class OutputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int', min_ver='0.11') + foo = nib.traits.Int(desc="a random int", min_ver="0.11") class DerivedInterface1(nib.BaseInterface): input_spec = InputSpec output_spec = OutputSpec - _version = '0.10' + _version = "0.10" resource_monitor = False def _run_interface(self, runtime): return runtime def _list_outputs(self): - return {'foo': 1} + return {"foo": 1} obj = DerivedInterface1() with pytest.raises(KeyError): @@ -291,51 +292,50 @@ def _list_outputs(self): def test_Commandline(): with pytest.raises(Exception): nib.CommandLine() - ci = nib.CommandLine(command='which') - assert ci.cmd == 'which' + ci = nib.CommandLine(command="which") + assert ci.cmd == "which" assert ci.inputs.args == nib.Undefined - ci2 = nib.CommandLine(command='which', args='ls') - assert ci2.cmdline == 'which ls' - ci3 = nib.CommandLine(command='echo') + ci2 = nib.CommandLine(command="which", args="ls") + assert ci2.cmdline == "which ls" + ci3 = nib.CommandLine(command="echo") ci3.resource_monitor = False - ci3.inputs.environ = {'MYENV': 'foo'} + ci3.inputs.environ = {"MYENV": "foo"} res = ci3.run() - assert res.runtime.environ['MYENV'] == 'foo' + assert res.runtime.environ["MYENV"] == "foo" assert res.outputs is None class CommandLineInputSpec1(nib.CommandLineInputSpec): - foo = nib.Str(argstr='%s', desc='a str') - goo = nib.traits.Bool(argstr='-g', desc='a bool', position=0) - hoo = nib.traits.List(argstr='-l %s', desc='a list') - moo = nib.traits.List( - argstr='-i %d...', desc='a repeated list', position=-1) - noo = nib.traits.Int(argstr='-x %d', desc='an int') - roo = nib.traits.Str(desc='not on command line') + foo = nib.Str(argstr="%s", desc="a str") + goo = nib.traits.Bool(argstr="-g", desc="a bool", position=0) + hoo = nib.traits.List(argstr="-l %s", desc="a list") + moo = nib.traits.List(argstr="-i %d...", desc="a repeated list", position=-1) + noo = nib.traits.Int(argstr="-x %d", desc="an int") + roo = nib.traits.Str(desc="not on command line") soo = nib.traits.Bool(argstr="-soo") nib.CommandLine.input_spec = CommandLineInputSpec1 - ci4 = nib.CommandLine(command='cmd') - ci4.inputs.foo = 'foo' + ci4 = nib.CommandLine(command="cmd") + ci4.inputs.foo = "foo" ci4.inputs.goo = True - ci4.inputs.hoo = ['a', 'b'] + ci4.inputs.hoo = ["a", "b"] ci4.inputs.moo = [1, 2, 3] ci4.inputs.noo = 0 - ci4.inputs.roo = 'hello' + ci4.inputs.roo = "hello" ci4.inputs.soo = False cmd = ci4._parse_inputs() - assert cmd[0] == '-g' - assert cmd[-1] == '-i 1 -i 2 -i 3' - assert 'hello' not in ' '.join(cmd) - assert '-soo' not in ' '.join(cmd) + assert cmd[0] == "-g" + assert cmd[-1] == "-i 1 -i 2 -i 3" + assert "hello" not in " ".join(cmd) + assert "-soo" not in " ".join(cmd) ci4.inputs.soo = True cmd = ci4._parse_inputs() - assert '-soo' in ' '.join(cmd) + assert "-soo" in " ".join(cmd) class CommandLineInputSpec2(nib.CommandLineInputSpec): - foo = nib.File(argstr='%s', desc='a str', genfile=True) + foo = nib.File(argstr="%s", desc="a str", genfile=True) nib.CommandLine.input_spec = CommandLineInputSpec2 - ci5 = nib.CommandLine(command='cmd') + ci5 = nib.CommandLine(command="cmd") with pytest.raises(NotImplementedError): ci5._parse_inputs() @@ -343,102 +343,106 @@ class DerivedClass(nib.CommandLine): input_spec = CommandLineInputSpec2 def _gen_filename(self, name): - return 'filename' + return "filename" - ci6 = DerivedClass(command='cmd') - assert ci6._parse_inputs()[0] == 'filename' + ci6 = DerivedClass(command="cmd") + assert ci6._parse_inputs()[0] == "filename" nib.CommandLine.input_spec = nib.CommandLineInputSpec def test_Commandline_environ(monkeypatch, tmpdir): from nipype import config + config.set_default_config() tmpdir.chdir() - monkeypatch.setitem(os.environ, 'DISPLAY', ':1') + monkeypatch.setitem(os.environ, "DISPLAY", ":1") # Test environment - ci3 = nib.CommandLine(command='echo') + ci3 = nib.CommandLine(command="echo") res = ci3.run() - assert res.runtime.environ['DISPLAY'] == ':1' + assert res.runtime.environ["DISPLAY"] == ":1" # Test display_variable option - monkeypatch.delitem(os.environ, 'DISPLAY', raising=False) - config.set('execution', 'display_variable', ':3') + monkeypatch.delitem(os.environ, "DISPLAY", raising=False) + config.set("execution", "display_variable", ":3") res = ci3.run() - assert 'DISPLAY' not in ci3.inputs.environ - assert 'DISPLAY' not in res.runtime.environ + assert "DISPLAY" not in ci3.inputs.environ + assert "DISPLAY" not in res.runtime.environ # If the interface has _redirect_x then yes, it should be set ci3._redirect_x = True res = ci3.run() - assert res.runtime.environ['DISPLAY'] == ':3' + assert res.runtime.environ["DISPLAY"] == ":3" # Test overwrite - monkeypatch.setitem(os.environ, 'DISPLAY', ':1') - ci3.inputs.environ = {'DISPLAY': ':2'} + monkeypatch.setitem(os.environ, "DISPLAY", ":1") + ci3.inputs.environ = {"DISPLAY": ":2"} res = ci3.run() - assert res.runtime.environ['DISPLAY'] == ':2' + assert res.runtime.environ["DISPLAY"] == ":2" def test_CommandLine_output(tmpdir): # Create one file tmpdir.chdir() - file = tmpdir.join('foo.txt') - file.write('123456\n') + file = tmpdir.join("foo.txt") + file.write("123456\n") name = os.path.basename(file.strpath) - ci = nib.CommandLine(command='ls -l') - ci.terminal_output = 'allatonce' + ci = nib.CommandLine(command="ls -l") + ci.terminal_output = "allatonce" res = ci.run() - assert res.runtime.merged == '' + assert res.runtime.merged == "" assert name in res.runtime.stdout # Check stdout is written - ci = nib.CommandLine(command='ls -l') - ci.terminal_output = 'file_stdout' + ci = nib.CommandLine(command="ls -l") + ci.terminal_output = "file_stdout" res = ci.run() - assert os.path.isfile('stdout.nipype') + assert os.path.isfile("stdout.nipype") assert name in res.runtime.stdout - tmpdir.join('stdout.nipype').remove(ignore_errors=True) + tmpdir.join("stdout.nipype").remove(ignore_errors=True) # Check stderr is written - ci = nib.CommandLine(command='ls -l') - ci.terminal_output = 'file_stderr' + ci = nib.CommandLine(command="ls -l") + ci.terminal_output = "file_stderr" res = ci.run() - assert os.path.isfile('stderr.nipype') - tmpdir.join('stderr.nipype').remove(ignore_errors=True) + assert os.path.isfile("stderr.nipype") + tmpdir.join("stderr.nipype").remove(ignore_errors=True) # Check outputs are thrown away - ci = nib.CommandLine(command='ls -l') - ci.terminal_output = 'none' + ci = nib.CommandLine(command="ls -l") + ci.terminal_output = "none" res = ci.run() - assert res.runtime.stdout == '' and \ - res.runtime.stderr == '' and \ - res.runtime.merged == '' + assert ( + res.runtime.stdout == "" + and res.runtime.stderr == "" + and res.runtime.merged == "" + ) # Check that new interfaces are set to default 'stream' - ci = nib.CommandLine(command='ls -l') + ci = nib.CommandLine(command="ls -l") res = ci.run() - assert ci.terminal_output == 'stream' - assert name in res.runtime.stdout and \ - res.runtime.stderr == '' + assert ci.terminal_output == "stream" + assert name in res.runtime.stdout and res.runtime.stderr == "" # Check only one file is generated - ci = nib.CommandLine(command='ls -l') - ci.terminal_output = 'file' + ci = nib.CommandLine(command="ls -l") + ci.terminal_output = "file" res = ci.run() - assert os.path.isfile('output.nipype') - assert name in res.runtime.merged and \ - res.runtime.stdout == '' and \ - res.runtime.stderr == '' - tmpdir.join('output.nipype').remove(ignore_errors=True) + assert os.path.isfile("output.nipype") + assert ( + name in res.runtime.merged + and res.runtime.stdout == "" + and res.runtime.stderr == "" + ) + tmpdir.join("output.nipype").remove(ignore_errors=True) # Check split files are generated - ci = nib.CommandLine(command='ls -l') - ci.terminal_output = 'file_split' + ci = nib.CommandLine(command="ls -l") + ci.terminal_output = "file_split" res = ci.run() - assert os.path.isfile('stdout.nipype') - assert os.path.isfile('stderr.nipype') + assert os.path.isfile("stdout.nipype") + assert os.path.isfile("stderr.nipype") assert name in res.runtime.stdout @@ -446,34 +450,34 @@ def test_global_CommandLine_output(tmpdir): """Ensures CommandLine.set_default_terminal_output works""" from nipype.interfaces.fsl import BET - ci = nib.CommandLine(command='ls -l') - assert ci.terminal_output == 'stream' # default case + ci = nib.CommandLine(command="ls -l") + assert ci.terminal_output == "stream" # default case ci = BET() - assert ci.terminal_output == 'stream' # default case + assert ci.terminal_output == "stream" # default case - nib.CommandLine.set_default_terminal_output('allatonce') - ci = nib.CommandLine(command='ls -l') - assert ci.terminal_output == 'allatonce' + nib.CommandLine.set_default_terminal_output("allatonce") + ci = nib.CommandLine(command="ls -l") + assert ci.terminal_output == "allatonce" - nib.CommandLine.set_default_terminal_output('file') - ci = nib.CommandLine(command='ls -l') - assert ci.terminal_output == 'file' + nib.CommandLine.set_default_terminal_output("file") + ci = nib.CommandLine(command="ls -l") + assert ci.terminal_output == "file" # Check default affects derived interfaces ci = BET() - assert ci.terminal_output == 'file' + assert ci.terminal_output == "file" def test_CommandLine_prefix(tmpdir): tmpdir.chdir() - oop = 'out/of/path' + oop = "out/of/path" os.makedirs(oop) - script_name = 'test_script.sh' + script_name = "test_script.sh" script_path = os.path.join(oop, script_name) - with open(script_path, 'w') as script_f: - script_f.write('#!/usr/bin/env bash\necho Success!') + with open(script_path, "w") as script_f: + script_f.write("#!/usr/bin/env bash\necho Success!") os.chmod(script_path, 0o755) ci = nib.CommandLine(command=script_name) @@ -481,19 +485,19 @@ def test_CommandLine_prefix(tmpdir): ci.run() class OOPCLI(nib.CommandLine): - _cmd_prefix = oop + '/' + _cmd_prefix = oop + "/" ci = OOPCLI(command=script_name) ci.run() class OOPShell(nib.CommandLine): - _cmd_prefix = 'bash {}/'.format(oop) + _cmd_prefix = "bash {}/".format(oop) ci = OOPShell(command=script_name) ci.run() class OOPBadShell(nib.CommandLine): - _cmd_prefix = 'shell_dne {}/'.format(oop) + _cmd_prefix = "shell_dne {}/".format(oop) ci = OOPBadShell(command=script_name) with pytest.raises(IOError): @@ -504,6 +508,7 @@ def test_runtime_checks(): class TestInterface(nib.BaseInterface): class input_spec(nib.TraitedSpec): a = nib.traits.Any() + class output_spec(nib.TraitedSpec): b = nib.traits.Any() @@ -516,7 +521,7 @@ def _run_interface(self, runtime): class BrokenRuntime(TestInterface): def _run_interface(self, runtime): - del runtime.__dict__['cwd'] + del runtime.__dict__["cwd"] return runtime with pytest.raises(RuntimeError): diff --git a/nipype/interfaces/base/tests/test_resource_monitor.py b/nipype/interfaces/base/tests/test_resource_monitor.py index a6c79b091b..47a515f64c 100644 --- a/nipype/interfaces/base/tests/test_resource_monitor.py +++ b/nipype/interfaces/base/tests/test_resource_monitor.py @@ -28,22 +28,26 @@ def use_resource_monitor(): class UseResourcesInputSpec(CommandLineInputSpec): mem_gb = traits.Float( - desc='Number of GB of RAM to use', argstr='-g %f', mandatory=True) + desc="Number of GB of RAM to use", argstr="-g %f", mandatory=True + ) n_procs = traits.Int( - desc='Number of threads to use', argstr='-p %d', mandatory=True) + desc="Number of threads to use", argstr="-p %d", mandatory=True + ) class UseResources(CommandLine): """ use_resources cmd interface """ + from nipype import __path__ + # Init attributes input_spec = UseResourcesInputSpec # Get path of executable exec_dir = os.path.realpath(__path__[0]) - exec_path = os.path.join(exec_dir, 'utils', 'tests', 'use_resources') + exec_path = os.path.join(exec_dir, "utils", "tests", "use_resources") # Init cmd _cmd = exec_path @@ -51,39 +55,41 @@ class UseResources(CommandLine): @pytest.mark.skip(reason="inconsistent readings") -@pytest.mark.skipif( - os.getenv('CI_SKIP_TEST', False), reason='disabled in CI tests') -@pytest.mark.parametrize("mem_gb,n_procs", [(0.5, 3), (2.2, 8), (0.8, 4), - (1.5, 1)]) +@pytest.mark.skipif(os.getenv("CI_SKIP_TEST", False), reason="disabled in CI tests") +@pytest.mark.parametrize("mem_gb,n_procs", [(0.5, 3), (2.2, 8), (0.8, 4), (1.5, 1)]) def test_cmdline_profiling(tmpdir, mem_gb, n_procs, use_resource_monitor): """ Test runtime profiler correctly records workflow RAM/CPUs consumption of a CommandLine-derived interface """ from nipype import config - config.set('monitoring', 'sample_frequency', '0.2') # Force sampling fast + + config.set("monitoring", "sample_frequency", "0.2") # Force sampling fast tmpdir.chdir() iface = UseResources(mem_gb=mem_gb, n_procs=n_procs) result = iface.run() - assert abs(mem_gb - result.runtime.mem_peak_gb - ) < 0.3, 'estimated memory error above .3GB' - assert int(result.runtime.cpu_percent / 100 + 0.2 - ) == n_procs, 'wrong number of threads estimated' + assert ( + abs(mem_gb - result.runtime.mem_peak_gb) < 0.3 + ), "estimated memory error above .3GB" + assert ( + int(result.runtime.cpu_percent / 100 + 0.2) == n_procs + ), "wrong number of threads estimated" @pytest.mark.skipif( - True, reason='test disabled temporarily, until funcion profiling works') -@pytest.mark.parametrize("mem_gb,n_procs", [(0.5, 3), (2.2, 8), (0.8, 4), - (1.5, 1)]) + True, reason="test disabled temporarily, until funcion profiling works" +) +@pytest.mark.parametrize("mem_gb,n_procs", [(0.5, 3), (2.2, 8), (0.8, 4), (1.5, 1)]) def test_function_profiling(tmpdir, mem_gb, n_procs, use_resource_monitor): """ Test runtime profiler correctly records workflow RAM/CPUs consumption of a Function interface """ from nipype import config - config.set('monitoring', 'sample_frequency', '0.2') # Force sampling fast + + config.set("monitoring", "sample_frequency", "0.2") # Force sampling fast tmpdir.chdir() iface = niu.Function(function=_use_resources) @@ -91,6 +97,7 @@ def test_function_profiling(tmpdir, mem_gb, n_procs, use_resource_monitor): iface.inputs.n_procs = n_procs result = iface.run() - assert abs(mem_gb - result.runtime.mem_peak_gb - ) < 0.3, 'estimated memory error above .3GB' + assert ( + abs(mem_gb - result.runtime.mem_peak_gb) < 0.3 + ), "estimated memory error above .3GB" assert int(result.runtime.cpu_percent / 100 + 0.2) >= n_procs diff --git a/nipype/interfaces/base/tests/test_specs.py b/nipype/interfaces/base/tests/test_specs.py index f1721fa226..d94f97ed1b 100644 --- a/nipype/interfaces/base/tests/test_specs.py +++ b/nipype/interfaces/base/tests/test_specs.py @@ -17,10 +17,10 @@ @pytest.fixture(scope="module") def setup_file(request, tmpdir_factory): - tmp_dir = tmpdir_factory.mktemp('files') - tmp_infile = tmp_dir.join('foo.txt') - with tmp_infile.open('w') as fp: - fp.writelines(['123456789']) + tmp_dir = tmpdir_factory.mktemp("files") + tmp_infile = tmp_dir.join("foo.txt") + with tmp_infile.open("w") as fp: + fp.writelines(["123456789"]) tmp_dir.chdir() @@ -29,7 +29,7 @@ def setup_file(request, tmpdir_factory): def test_TraitedSpec(): assert nib.TraitedSpec().get_hashval() - assert nib.TraitedSpec().__repr__() == '\n\n' + assert nib.TraitedSpec().__repr__() == "\n\n" class spec(nib.TraitedSpec): foo = nib.traits.Int @@ -41,14 +41,16 @@ class spec(nib.TraitedSpec): with pytest.raises(nib.traits.TraitError): specfunc(1) infields = spec(foo=1) - hashval = ([('foo', 1), ('goo', '0.0000000000')], - 'e89433b8c9141aa0fda2f8f4d662c047') + hashval = ( + [("foo", 1), ("goo", "0.0000000000")], + "e89433b8c9141aa0fda2f8f4d662c047", + ) assert infields.get_hashval() == hashval - assert infields.__repr__() == '\nfoo = 1\ngoo = 0.0\n' + assert infields.__repr__() == "\nfoo = 1\ngoo = 0.0\n" def test_TraitedSpec_tab_completion(): - bet_nd = Node(fsl.BET(), name='bet') + bet_nd = Node(fsl.BET(), name="bet") bet_interface = fsl.BET() bet_inputs = bet_nd.inputs.class_editable_traits() bet_outputs = bet_nd.outputs.class_editable_traits() @@ -64,15 +66,16 @@ def test_TraitedSpec_tab_completion(): @pytest.mark.skip def test_TraitedSpec_dynamic(): from pickle import dumps, loads + a = nib.BaseTraitedSpec() - a.add_trait('foo', nib.traits.Int) + a.add_trait("foo", nib.traits.Int) a.foo = 1 - assign_a = lambda: setattr(a, 'foo', 'a') + assign_a = lambda: setattr(a, "foo", "a") with pytest.raises(Exception): assign_a pkld_a = dumps(a) unpkld_a = loads(pkld_a) - assign_a_again = lambda: setattr(unpkld_a, 'foo', 'a') + assign_a_again = lambda: setattr(unpkld_a, "foo", "a") with pytest.raises(Exception): assign_a_again @@ -82,39 +85,43 @@ def extract_func(list_out): return list_out[0] # Define interface - func_interface = Function(input_names=["list_out"], - output_names=["out_file", "another_file"], - function=extract_func) + func_interface = Function( + input_names=["list_out"], + output_names=["out_file", "another_file"], + function=extract_func, + ) # Define node - list_extract = Node(Function( - input_names=["list_out"], output_names=["out_file"], - function=extract_func), name="list_extract") + list_extract = Node( + Function( + input_names=["list_out"], output_names=["out_file"], function=extract_func + ), + name="list_extract", + ) # Check __all__ for interface inputs expected_input = set(list_extract.inputs.editable_traits()) - assert(set(func_interface.inputs.__all__) == expected_input) + assert set(func_interface.inputs.__all__) == expected_input # Check __all__ for node inputs - assert(set(list_extract.inputs.__all__) == expected_input) + assert set(list_extract.inputs.__all__) == expected_input # Check __all__ for node outputs expected_output = set(list_extract.outputs.editable_traits()) - assert(set(list_extract.outputs.__all__) == expected_output) + assert set(list_extract.outputs.__all__) == expected_output # Add trait and retest - list_extract._interface._output_names.append('added_out_trait') - expected_output.add('added_out_trait') - assert(set(list_extract.outputs.__all__) == expected_output) + list_extract._interface._output_names.append("added_out_trait") + expected_output.add("added_out_trait") + assert set(list_extract.outputs.__all__) == expected_output def test_TraitedSpec_logic(): class spec3(nib.TraitedSpec): - _xor_inputs = ('foo', 'bar') + _xor_inputs = ("foo", "bar") - foo = nib.traits.Int(xor=_xor_inputs, desc='foo or bar, not both') - bar = nib.traits.Int(xor=_xor_inputs, desc='bar or foo, not both') - kung = nib.traits.Float( - requires=('foo', ), position=0, desc='kung foo') + foo = nib.traits.Int(xor=_xor_inputs, desc="foo or bar, not both") + bar = nib.traits.Int(xor=_xor_inputs, desc="bar or foo, not both") + kung = nib.traits.Float(requires=("foo",), position=0, desc="kung foo") class out3(nib.TraitedSpec): output = nib.traits.Int @@ -129,7 +136,7 @@ class MyInterface(nib.BaseInterface): # setattr(myif.inputs, 'kung', 10.0) myif.inputs.foo = 1 assert myif.inputs.foo == 1 - set_bar = lambda: setattr(myif.inputs, 'bar', 1) + set_bar = lambda: setattr(myif.inputs, "bar", 1) with pytest.raises(IOError): set_bar() assert myif.inputs.foo == 1 @@ -139,34 +146,34 @@ class MyInterface(nib.BaseInterface): def test_deprecation(): with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', UserWarning) + warnings.filterwarnings("always", "", UserWarning) class DeprecationSpec1(nib.TraitedSpec): - foo = nib.traits.Int(deprecated='0.1') + foo = nib.traits.Int(deprecated="0.1") spec_instance = DeprecationSpec1() - set_foo = lambda: setattr(spec_instance, 'foo', 1) + set_foo = lambda: setattr(spec_instance, "foo", 1) with pytest.raises(nib.TraitError): set_foo() - assert len(w) == 0, 'no warnings, just errors' + assert len(w) == 0, "no warnings, just errors" with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', UserWarning) + warnings.filterwarnings("always", "", UserWarning) class DeprecationSpec2(nib.TraitedSpec): - foo = nib.traits.Int(deprecated='100', new_name='bar') + foo = nib.traits.Int(deprecated="100", new_name="bar") spec_instance = DeprecationSpec2() - set_foo = lambda: setattr(spec_instance, 'foo', 1) + set_foo = lambda: setattr(spec_instance, "foo", 1) with pytest.raises(nib.TraitError): set_foo() - assert len(w) == 0, 'no warnings, just errors' + assert len(w) == 0, "no warnings, just errors" with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', UserWarning) + warnings.filterwarnings("always", "", UserWarning) class DeprecationSpec3(nib.TraitedSpec): - foo = nib.traits.Int(deprecated='1000', new_name='bar') + foo = nib.traits.Int(deprecated="1000", new_name="bar") bar = nib.traits.Int() spec_instance = DeprecationSpec3() @@ -176,14 +183,13 @@ class DeprecationSpec3(nib.TraitedSpec): except nib.TraitError: not_raised = False assert not_raised - assert len( - w) == 1, 'deprecated warning 1 %s' % [w1.message for w1 in w] + assert len(w) == 1, "deprecated warning 1 %s" % [w1.message for w1 in w] with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', UserWarning) + warnings.filterwarnings("always", "", UserWarning) class DeprecationSpec3(nib.TraitedSpec): - foo = nib.traits.Int(deprecated='1000', new_name='bar') + foo = nib.traits.Int(deprecated="1000", new_name="bar") bar = nib.traits.Int() spec_instance = DeprecationSpec3() @@ -195,8 +201,7 @@ class DeprecationSpec3(nib.TraitedSpec): assert not_raised assert spec_instance.foo == Undefined assert spec_instance.bar == 1 - assert len( - w) == 1, 'deprecated warning 2 %s' % [w1.message for w1 in w] + assert len(w) == 1, "deprecated warning 2 %s" % [w1.message for w1 in w] def test_namesource(setup_file): @@ -204,12 +209,10 @@ def test_namesource(setup_file): tmpd, nme, ext = split_filename(tmp_infile) class spec2(nib.CommandLineInputSpec): - moo = nib.File( - name_source=['doo'], hash_files=False, argstr="%s", position=2) + moo = nib.File(name_source=["doo"], hash_files=False, argstr="%s", position=2) doo = nib.File(exists=True, argstr="%s", position=1) goo = traits.Int(argstr="%d", position=4) - poo = nib.File( - name_source=['goo'], hash_files=False, argstr="%s", position=3) + poo = nib.File(name_source=["goo"], hash_files=False, argstr="%s", position=3) class TestName(nib.CommandLine): _cmd = "mycommand" @@ -218,10 +221,10 @@ class TestName(nib.CommandLine): testobj = TestName() testobj.inputs.doo = tmp_infile testobj.inputs.goo = 99 - assert '%s_generated' % nme in testobj.cmdline - assert '%d_generated' % testobj.inputs.goo in testobj.cmdline + assert "%s_generated" % nme in testobj.cmdline + assert "%d_generated" % testobj.inputs.goo in testobj.cmdline testobj.inputs.moo = "my_%s_template" - assert 'my_%s_template' % nme in testobj.cmdline + assert "my_%s_template" % nme in testobj.cmdline def test_chained_namesource(setup_file): @@ -231,13 +234,13 @@ def test_chained_namesource(setup_file): class spec2(nib.CommandLineInputSpec): doo = nib.File(exists=True, argstr="%s", position=1) moo = nib.File( - name_source=['doo'], + name_source=["doo"], hash_files=False, argstr="%s", position=2, - name_template='%s_mootpl') - poo = nib.File( - name_source=['moo'], hash_files=False, argstr="%s", position=3) + name_template="%s_mootpl", + ) + poo = nib.File(name_source=["moo"], hash_files=False, argstr="%s", position=3) class TestName(nib.CommandLine): _cmd = "mycommand" @@ -246,9 +249,9 @@ class TestName(nib.CommandLine): testobj = TestName() testobj.inputs.doo = tmp_infile res = testobj.cmdline - assert '%s' % tmp_infile in res - assert '%s_mootpl ' % nme in res - assert '%s_mootpl_generated' % nme in res + assert "%s" % tmp_infile in res + assert "%s_mootpl " % nme in res + assert "%s_mootpl_generated" % nme in res def test_cycle_namesource1(setup_file): @@ -257,15 +260,14 @@ def test_cycle_namesource1(setup_file): class spec3(nib.CommandLineInputSpec): moo = nib.File( - name_source=['doo'], + name_source=["doo"], hash_files=False, argstr="%s", position=1, - name_template='%s_mootpl') - poo = nib.File( - name_source=['moo'], hash_files=False, argstr="%s", position=2) - doo = nib.File( - name_source=['poo'], hash_files=False, argstr="%s", position=3) + name_template="%s_mootpl", + ) + poo = nib.File(name_source=["moo"], hash_files=False, argstr="%s", position=2) + doo = nib.File(name_source=["poo"], hash_files=False, argstr="%s", position=3) class TestCycle(nib.CommandLine): _cmd = "mycommand" @@ -287,15 +289,14 @@ def test_cycle_namesource2(setup_file): class spec3(nib.CommandLineInputSpec): moo = nib.File( - name_source=['doo'], + name_source=["doo"], hash_files=False, argstr="%s", position=1, - name_template='%s_mootpl') - poo = nib.File( - name_source=['moo'], hash_files=False, argstr="%s", position=2) - doo = nib.File( - name_source=['poo'], hash_files=False, argstr="%s", position=3) + name_template="%s_mootpl", + ) + poo = nib.File(name_source=["moo"], hash_files=False, argstr="%s", position=2) + doo = nib.File(name_source=["poo"], hash_files=False, argstr="%s", position=3) class TestCycle(nib.CommandLine): _cmd = "mycommand" @@ -313,9 +314,9 @@ class TestCycle(nib.CommandLine): print(res) assert not_raised - assert '%s' % tmp_infile in res - assert '%s_generated' % nme in res - assert '%s_generated_mootpl' % nme in res + assert "%s" % tmp_infile in res + assert "%s_generated" % nme in res + assert "%s_generated_mootpl" % nme in res def test_namesource_constraints(setup_file): @@ -324,30 +325,30 @@ def test_namesource_constraints(setup_file): class constrained_spec(nib.CommandLineInputSpec): in_file = nib.File(argstr="%s", position=1) - threshold = traits.Float( - argstr="%g", - xor=['mask_file'], - position=2) + threshold = traits.Float(argstr="%g", xor=["mask_file"], position=2) mask_file = nib.File( argstr="%s", - name_source=['in_file'], - name_template='%s_mask', + name_source=["in_file"], + name_template="%s_mask", keep_extension=True, - xor=['threshold'], - position=2) + xor=["threshold"], + position=2, + ) out_file1 = nib.File( argstr="%s", - name_source=['in_file'], - name_template='%s_out1', + name_source=["in_file"], + name_template="%s_out1", keep_extension=True, - position=3) + position=3, + ) out_file2 = nib.File( argstr="%s", - name_source=['in_file'], - name_template='%s_out2', + name_source=["in_file"], + name_template="%s_out2", keep_extension=True, - requires=['threshold'], - position=4) + requires=["threshold"], + position=4, + ) class TestConstrained(nib.CommandLine): _cmd = "mycommand" @@ -356,15 +357,15 @@ class TestConstrained(nib.CommandLine): tc = TestConstrained() # name_source undefined, so template traits remain undefined - assert tc.cmdline == 'mycommand' + assert tc.cmdline == "mycommand" # mask_file and out_file1 enabled by name_source definition tc.inputs.in_file = os.path.basename(tmp_infile) - assert tc.cmdline == 'mycommand foo.txt foo_mask.txt foo_out1.txt' + assert tc.cmdline == "mycommand foo.txt foo_mask.txt foo_out1.txt" # mask_file disabled by threshold, out_file2 enabled by threshold - tc.inputs.threshold = 10. - assert tc.cmdline == 'mycommand foo.txt 10 foo_out1.txt foo_out2.txt' + tc.inputs.threshold = 10.0 + assert tc.cmdline == "mycommand foo.txt 10 foo_out1.txt foo_out2.txt" def test_TraitedSpec_withFile(setup_file): @@ -377,8 +378,8 @@ class spec2(nib.TraitedSpec): doo = nib.traits.List(nib.File(exists=True)) infields = spec2(moo=tmp_infile, doo=[tmp_infile]) - hashval = infields.get_hashval(hash_method='content') - assert hashval[1] == 'a00e9ee24f5bfa9545a515b7a759886b' + hashval = infields.get_hashval(hash_method="content") + assert hashval[1] == "a00e9ee24f5bfa9545a515b7a759886b" def test_TraitedSpec_withNoFileHashing(setup_file): @@ -391,22 +392,22 @@ class spec2(nib.TraitedSpec): doo = nib.traits.List(nib.File(exists=True)) infields = spec2(moo=nme, doo=[tmp_infile]) - hashval = infields.get_hashval(hash_method='content') - assert hashval[1] == '8da4669ff5d72f670a46ea3e7a203215' + hashval = infields.get_hashval(hash_method="content") + assert hashval[1] == "8da4669ff5d72f670a46ea3e7a203215" class spec3(nib.TraitedSpec): moo = nib.File(exists=True, name_source="doo") doo = nib.traits.List(nib.File(exists=True)) infields = spec3(moo=nme, doo=[tmp_infile]) - hashval1 = infields.get_hashval(hash_method='content') + hashval1 = infields.get_hashval(hash_method="content") class spec4(nib.TraitedSpec): moo = nib.File(exists=True) doo = nib.traits.List(nib.File(exists=True)) infields = spec4(moo=nme, doo=[tmp_infile]) - hashval2 = infields.get_hashval(hash_method='content') + hashval2 = infields.get_hashval(hash_method="content") assert hashval1[1] != hashval2[1] @@ -414,30 +415,29 @@ def test_ImageFile(): x = nib.BaseInterface().inputs # setup traits - x.add_trait('nifti', nib.ImageFile(types=['nifti1', 'dicom'])) - x.add_trait('anytype', nib.ImageFile()) + x.add_trait("nifti", nib.ImageFile(types=["nifti1", "dicom"])) + x.add_trait("anytype", nib.ImageFile()) with pytest.raises(ValueError): - x.add_trait('newtype', nib.ImageFile(types=['nifti10'])) - x.add_trait('nocompress', - nib.ImageFile(types=['mgh'], allow_compressed=False)) + x.add_trait("newtype", nib.ImageFile(types=["nifti10"])) + x.add_trait("nocompress", nib.ImageFile(types=["mgh"], allow_compressed=False)) with pytest.raises(nib.TraitError): - x.nifti = 'test.mgz' - x.nifti = 'test.nii' - x.anytype = 'test.xml' + x.nifti = "test.mgz" + x.nifti = "test.nii" + x.anytype = "test.xml" with pytest.raises(nib.TraitError): - x.nocompress = 'test.mgz' - x.nocompress = 'test.mgh' + x.nocompress = "test.mgz" + x.nocompress = "test.mgh" def test_filecopy_info(): class InputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int') - goo = nib.traits.Int(desc='a random int', mandatory=True) - moo = nib.traits.Int(desc='a random int', mandatory=False) - hoo = nib.traits.Int(desc='a random int', usedefault=True) - zoo = nib.File(desc='a file', copyfile=False) - woo = nib.File(desc='a file', copyfile=True) + foo = nib.traits.Int(desc="a random int") + goo = nib.traits.Int(desc="a random int", mandatory=True) + moo = nib.traits.Int(desc="a random int", mandatory=False) + hoo = nib.traits.Int(desc="a random int", usedefault=True) + zoo = nib.File(desc="a file", copyfile=False) + woo = nib.File(desc="a file", copyfile=True) class DerivedInterface(nib.BaseInterface): input_spec = InputSpec @@ -445,16 +445,16 @@ class DerivedInterface(nib.BaseInterface): def normalize_filenames(self): """A mock normalize_filenames for freesurfer interfaces that have one""" - self.inputs.zoo = 'normalized_filename.ext' + self.inputs.zoo = "normalized_filename.ext" assert get_filecopy_info(nib.BaseInterface) == [] # Test on interface class, not instantiated info = get_filecopy_info(DerivedInterface) - assert info[0]['key'] == 'woo' - assert info[0]['copy'] - assert info[1]['key'] == 'zoo' - assert not info[1]['copy'] + assert info[0]["key"] == "woo" + assert info[0]["copy"] + assert info[1]["key"] == "zoo" + assert not info[1]["copy"] info = None # Test with instantiated interface @@ -464,9 +464,9 @@ def normalize_filenames(self): # After the first call to get_filecopy_info zoo is defined info = get_filecopy_info(derived) # Ensure that normalize_filenames was called - assert derived.inputs.zoo == 'normalized_filename.ext' + assert derived.inputs.zoo == "normalized_filename.ext" # Check the results are consistent - assert info[0]['key'] == 'woo' - assert info[0]['copy'] - assert info[1]['key'] == 'zoo' - assert not info[1]['copy'] + assert info[0]["key"] == "woo" + assert info[0]["copy"] + assert info[1]["key"] == "zoo" + assert not info[1]["copy"] diff --git a/nipype/interfaces/base/tests/test_support.py b/nipype/interfaces/base/tests/test_support.py index fbd6dcc209..878794b04f 100644 --- a/nipype/interfaces/base/tests/test_support.py +++ b/nipype/interfaces/base/tests/test_support.py @@ -10,7 +10,7 @@ from ... import base as nib -@pytest.mark.parametrize("args", [{}, {'a': 1, 'b': [2, 3]}]) +@pytest.mark.parametrize("args", [{}, {"a": 1, "b": [2, 3]}]) def test_bunch(args): b = nib.Bunch(**args) assert b.__dict__ == args @@ -33,25 +33,24 @@ def test_bunch_methods(): b.update(a=3) newb = b.dictcopy() assert b.a == 3 - assert b.get('a') == 3 - assert b.get('badkey', 'otherthing') == 'otherthing' + assert b.get("a") == 3 + assert b.get("badkey", "otherthing") == "otherthing" assert b != newb assert type(dict()) == type(newb) - assert newb['a'] == 3 + assert newb["a"] == 3 def test_bunch_hash(): # NOTE: Since the path to the json file is included in the Bunch, # the hash will be unique to each machine. - json_pth = pkgrf('nipype', - os.path.join('testing', 'data', 'realign_json.json')) + json_pth = pkgrf("nipype", os.path.join("testing", "data", "realign_json.json")) - b = nib.Bunch(infile=json_pth, otherthing='blue', yat=True) + b = nib.Bunch(infile=json_pth, otherthing="blue", yat=True) newbdict, bhash = b._get_bunch_hash() - assert bhash == 'd1f46750044c3de102efc847720fc35f' + assert bhash == "d1f46750044c3de102efc847720fc35f" # Make sure the hash stored in the json file for `infile` is correct. jshash = md5() - with open(json_pth, 'r') as fp: - jshash.update(fp.read().encode('utf-8')) - assert newbdict['infile'][0][1] == jshash.hexdigest() - assert newbdict['yat'] is True + with open(json_pth, "r") as fp: + jshash.update(fp.read().encode("utf-8")) + assert newbdict["infile"][0][1] == jshash.hexdigest() + assert newbdict["yat"] is True diff --git a/nipype/interfaces/base/tests/test_traits_extension.py b/nipype/interfaces/base/tests/test_traits_extension.py index ac9c36adf7..ec0574ad9c 100644 --- a/nipype/interfaces/base/tests/test_traits_extension.py +++ b/nipype/interfaces/base/tests/test_traits_extension.py @@ -16,8 +16,11 @@ class _test_spec(nib.TraitedSpec): g = nib.traits.Either(nib.File, nib.Str) h = nib.Str i = nib.traits.Either(nib.File, nib.traits.Tuple(nib.File, nib.traits.Int)) - j = nib.traits.Either(nib.File, nib.traits.Tuple(nib.File, nib.traits.Int), - nib.traits.Dict(nib.Str, nib.File())) + j = nib.traits.Either( + nib.File, + nib.traits.Tuple(nib.File, nib.traits.Int), + nib.traits.Dict(nib.Str, nib.File()), + ) k = nib.DictStrStr @@ -25,286 +28,295 @@ def test_rebase_resolve_path_traits(): """Check rebase_path_traits and resolve_path_traits and idempotence.""" spec = _test_spec() - v = '/some/path/f1.txt' - a = rebase_path_traits(spec.trait('a'), v, '/some/path') - assert a == Path('f1.txt') + v = "/some/path/f1.txt" + a = rebase_path_traits(spec.trait("a"), v, "/some/path") + assert a == Path("f1.txt") # Idempotence - assert rebase_path_traits(spec.trait('a'), a, '/some/path') == a + assert rebase_path_traits(spec.trait("a"), a, "/some/path") == a - a = resolve_path_traits(spec.trait('a'), a, '/some/path') + a = resolve_path_traits(spec.trait("a"), a, "/some/path") assert a == Path(v) # Idempotence - assert resolve_path_traits(spec.trait('a'), a, '/some/path') == a + assert resolve_path_traits(spec.trait("a"), a, "/some/path") == a - a = rebase_path_traits(spec.trait('a'), v, '/some/other/path') + a = rebase_path_traits(spec.trait("a"), v, "/some/other/path") assert a == Path(v) # Idempotence - assert rebase_path_traits(spec.trait('a'), a, '/some/other/path') == a + assert rebase_path_traits(spec.trait("a"), a, "/some/other/path") == a - a = resolve_path_traits(spec.trait('a'), a, '/some/path') + a = resolve_path_traits(spec.trait("a"), a, "/some/path") assert a == Path(v) # Idempotence - assert resolve_path_traits(spec.trait('a'), a, '/some/path') == a + assert resolve_path_traits(spec.trait("a"), a, "/some/path") == a - v = ('/some/path/f1.txt', '/some/path/f2.txt') - b = rebase_path_traits(spec.trait('b'), v, '/some/path') - assert b == (Path('f1.txt'), Path('f2.txt')) + v = ("/some/path/f1.txt", "/some/path/f2.txt") + b = rebase_path_traits(spec.trait("b"), v, "/some/path") + assert b == (Path("f1.txt"), Path("f2.txt")) # Idempotence - assert rebase_path_traits(spec.trait('b'), b, '/some/path') == b + assert rebase_path_traits(spec.trait("b"), b, "/some/path") == b - b = resolve_path_traits(spec.trait('b'), b, '/some/path') + b = resolve_path_traits(spec.trait("b"), b, "/some/path") assert b == (Path(v[0]), Path(v[1])) # Idempotence - assert resolve_path_traits(spec.trait('b'), b, '/some/path') == b + assert resolve_path_traits(spec.trait("b"), b, "/some/path") == b - v = ['/some/path/f1.txt', '/some/path/f2.txt', '/some/path/f3.txt'] - c = rebase_path_traits(spec.trait('c'), v, '/some/path') - assert c == [Path('f1.txt'), Path('f2.txt'), Path('f3.txt')] + v = ["/some/path/f1.txt", "/some/path/f2.txt", "/some/path/f3.txt"] + c = rebase_path_traits(spec.trait("c"), v, "/some/path") + assert c == [Path("f1.txt"), Path("f2.txt"), Path("f3.txt")] # Idempotence - assert rebase_path_traits(spec.trait('c'), c, '/some/path') == c + assert rebase_path_traits(spec.trait("c"), c, "/some/path") == c - c = resolve_path_traits(spec.trait('c'), c, '/some/path') + c = resolve_path_traits(spec.trait("c"), c, "/some/path") assert c == [Path(vp) for vp in v] # Idempotence - assert resolve_path_traits(spec.trait('c'), c, '/some/path') == c + assert resolve_path_traits(spec.trait("c"), c, "/some/path") == c v = 2.0 - d = rebase_path_traits(spec.trait('d'), v, '/some/path') + d = rebase_path_traits(spec.trait("d"), v, "/some/path") assert d == v - d = resolve_path_traits(spec.trait('d'), d, '/some/path') + d = resolve_path_traits(spec.trait("d"), d, "/some/path") assert d == v - v = '/some/path/either.txt' - d = rebase_path_traits(spec.trait('d'), v, '/some/path') - assert d == Path('either.txt') + v = "/some/path/either.txt" + d = rebase_path_traits(spec.trait("d"), v, "/some/path") + assert d == Path("either.txt") # Idempotence - assert rebase_path_traits(spec.trait('d'), d, '/some/path') == d + assert rebase_path_traits(spec.trait("d"), d, "/some/path") == d - d = resolve_path_traits(spec.trait('d'), d, '/some/path') + d = resolve_path_traits(spec.trait("d"), d, "/some/path") assert d == Path(v) # Idempotence - assert resolve_path_traits(spec.trait('d'), d, '/some/path') == d + assert resolve_path_traits(spec.trait("d"), d, "/some/path") == d - v = ['/some/path/f1.txt', '/some/path/f2.txt', '/some/path/f3.txt'] - e = rebase_path_traits(spec.trait('e'), v, '/some/path') - assert e == [Path('f1.txt'), Path('f2.txt'), Path('f3.txt')] + v = ["/some/path/f1.txt", "/some/path/f2.txt", "/some/path/f3.txt"] + e = rebase_path_traits(spec.trait("e"), v, "/some/path") + assert e == [Path("f1.txt"), Path("f2.txt"), Path("f3.txt")] # Idempotence - assert rebase_path_traits(spec.trait('e'), e, '/some/path') == e + assert rebase_path_traits(spec.trait("e"), e, "/some/path") == e - e = resolve_path_traits(spec.trait('e'), e, '/some/path') + e = resolve_path_traits(spec.trait("e"), e, "/some/path") assert e == [Path(vp) for vp in v] # Idempotence - assert resolve_path_traits(spec.trait('e'), e, '/some/path') == e + assert resolve_path_traits(spec.trait("e"), e, "/some/path") == e - v = [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]] - e = rebase_path_traits(spec.trait('e'), v, '/some/path') - assert e == [[Path('f1.txt'), Path('f2.txt')], [[Path('f3.txt')]]] + v = [["/some/path/f1.txt", "/some/path/f2.txt"], [["/some/path/f3.txt"]]] + e = rebase_path_traits(spec.trait("e"), v, "/some/path") + assert e == [[Path("f1.txt"), Path("f2.txt")], [[Path("f3.txt")]]] # Idempotence - assert rebase_path_traits(spec.trait('e'), e, '/some/path') == e + assert rebase_path_traits(spec.trait("e"), e, "/some/path") == e - e = resolve_path_traits(spec.trait('e'), e, '/some/path') - assert e == [[[Path(vpp) for vpp in vp] if isinstance(vp, list) else Path(vp) for vp in inner] - for inner in v] + e = resolve_path_traits(spec.trait("e"), e, "/some/path") + assert e == [ + [ + [Path(vpp) for vpp in vp] if isinstance(vp, list) else Path(vp) + for vp in inner + ] + for inner in v + ] # Idempotence - assert resolve_path_traits(spec.trait('e'), e, '/some/path') == e + assert resolve_path_traits(spec.trait("e"), e, "/some/path") == e # These are Str - no rebasing/resolving should happen - v = [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]] - ee = rebase_path_traits(spec.trait('ee'), v, '/some/path') + v = [["/some/path/f1.txt", "/some/path/f2.txt"], [["/some/path/f3.txt"]]] + ee = rebase_path_traits(spec.trait("ee"), v, "/some/path") assert ee == v # Idempotence - assert rebase_path_traits(spec.trait('ee'), ee, '/some/path') == ee + assert rebase_path_traits(spec.trait("ee"), ee, "/some/path") == ee - ee = resolve_path_traits(spec.trait('ee'), [['f1.txt', 'f2.txt'], [['f3.txt']]], '/some/path') - assert ee == [['f1.txt', 'f2.txt'], [['f3.txt']]] + ee = resolve_path_traits( + spec.trait("ee"), [["f1.txt", "f2.txt"], [["f3.txt"]]], "/some/path" + ) + assert ee == [["f1.txt", "f2.txt"], [["f3.txt"]]] # Idempotence - assert resolve_path_traits(spec.trait('ee'), ee, '/some/path') == ee + assert resolve_path_traits(spec.trait("ee"), ee, "/some/path") == ee - v = {'1': '/some/path/f1.txt'} - f = rebase_path_traits(spec.trait('f'), v, '/some') - assert f == {'1': Path('path/f1.txt')} + v = {"1": "/some/path/f1.txt"} + f = rebase_path_traits(spec.trait("f"), v, "/some") + assert f == {"1": Path("path/f1.txt")} # Idempotence - assert rebase_path_traits(spec.trait('f'), f, '/some') == f + assert rebase_path_traits(spec.trait("f"), f, "/some") == f - f = resolve_path_traits(spec.trait('f'), f, '/some') + f = resolve_path_traits(spec.trait("f"), f, "/some") assert f == {k: Path(val) for k, val in v.items()} # Idempotence - assert resolve_path_traits(spec.trait('f'), f, '/some') == f + assert resolve_path_traits(spec.trait("f"), f, "/some") == f # Either(Str, File): passing in path-like apply manipulation - v = '/some/path/either.txt' - g = rebase_path_traits(spec.trait('g'), v, '/some/path') - assert g == Path('either.txt') + v = "/some/path/either.txt" + g = rebase_path_traits(spec.trait("g"), v, "/some/path") + assert g == Path("either.txt") # Idempotence - assert rebase_path_traits(spec.trait('g'), g, '/some/path') == g + assert rebase_path_traits(spec.trait("g"), g, "/some/path") == g - g = resolve_path_traits(spec.trait('g'), g, '/some/path') + g = resolve_path_traits(spec.trait("g"), g, "/some/path") assert g == Path(v) # Idempotence - assert resolve_path_traits(spec.trait('g'), g, '/some/path') == g + assert resolve_path_traits(spec.trait("g"), g, "/some/path") == g - g = rebase_path_traits(spec.trait('g'), v, '/some') - assert g == Path('path/either.txt') + g = rebase_path_traits(spec.trait("g"), v, "/some") + assert g == Path("path/either.txt") # Idempotence - assert rebase_path_traits(spec.trait('g'), g, '/some/path') == g + assert rebase_path_traits(spec.trait("g"), g, "/some/path") == g - g = resolve_path_traits(spec.trait('g'), g, '/some') + g = resolve_path_traits(spec.trait("g"), g, "/some") assert g == Path(v) # Idempotence - assert resolve_path_traits(spec.trait('g'), g, '/some/path') == g + assert resolve_path_traits(spec.trait("g"), g, "/some/path") == g # Either(Str, File): passing str discards File - v = 'either.txt' - g = rebase_path_traits(spec.trait('g'), v, '/some/path') + v = "either.txt" + g = rebase_path_traits(spec.trait("g"), v, "/some/path") assert g == v # Idempotence - assert rebase_path_traits(spec.trait('g'), g, '/some/path') == g + assert rebase_path_traits(spec.trait("g"), g, "/some/path") == g # This is a problematic case, it is impossible to know whether this # was meant to be a string or a file. # In this implementation, strings take precedence - g = resolve_path_traits(spec.trait('g'), g, '/some/path') + g = resolve_path_traits(spec.trait("g"), g, "/some/path") assert g == v # Idempotence - assert resolve_path_traits(spec.trait('g'), g, '/some/path') == g + assert resolve_path_traits(spec.trait("g"), g, "/some/path") == g - v = 'string' - g = rebase_path_traits(spec.trait('g'), v, '/some') + v = "string" + g = rebase_path_traits(spec.trait("g"), v, "/some") assert g == v # Idempotence - assert rebase_path_traits(spec.trait('g'), g, '/some') == g + assert rebase_path_traits(spec.trait("g"), g, "/some") == g # This is a problematic case, it is impossible to know whether this # was meant to be a string or a file. - g = resolve_path_traits(spec.trait('g'), v, '/some') + g = resolve_path_traits(spec.trait("g"), v, "/some") assert g == v # Idempotence - assert resolve_path_traits(spec.trait('g'), g, '/some') == g + assert resolve_path_traits(spec.trait("g"), g, "/some") == g - g = rebase_path_traits(spec.trait('g'), v, '/some/path') + g = rebase_path_traits(spec.trait("g"), v, "/some/path") assert g == v # You dont want this one to be a Path # Idempotence - assert rebase_path_traits(spec.trait('g'), g, '/some/path') == g + assert rebase_path_traits(spec.trait("g"), g, "/some/path") == g # This is a problematic case, it is impossible to know whether this # was meant to be a string or a file. - g = resolve_path_traits(spec.trait('g'), g, '/some/path') + g = resolve_path_traits(spec.trait("g"), g, "/some/path") assert g == v # You dont want this one to be a Path # Idempotence - assert resolve_path_traits(spec.trait('g'), g, '/some/path') == g + assert resolve_path_traits(spec.trait("g"), g, "/some/path") == g - h = rebase_path_traits(spec.trait('h'), v, '/some/path') + h = rebase_path_traits(spec.trait("h"), v, "/some/path") assert h == v # Idempotence - assert rebase_path_traits(spec.trait('h'), h, '/some/path') == h + assert rebase_path_traits(spec.trait("h"), h, "/some/path") == h - h = resolve_path_traits(spec.trait('h'), h, '/some/path') + h = resolve_path_traits(spec.trait("h"), h, "/some/path") assert h == v # Idempotence - assert resolve_path_traits(spec.trait('h'), h, '/some/path') == h + assert resolve_path_traits(spec.trait("h"), h, "/some/path") == h - v = '/some/path/either/file.txt' - i = rebase_path_traits(spec.trait('i'), v, '/some/path') - assert i == Path('either/file.txt') + v = "/some/path/either/file.txt" + i = rebase_path_traits(spec.trait("i"), v, "/some/path") + assert i == Path("either/file.txt") # Idempotence - assert rebase_path_traits(spec.trait('i'), i, '/some/path') == i + assert rebase_path_traits(spec.trait("i"), i, "/some/path") == i - i = resolve_path_traits(spec.trait('i'), i, '/some/path') + i = resolve_path_traits(spec.trait("i"), i, "/some/path") assert i == Path(v) # Idempotence - assert resolve_path_traits(spec.trait('i'), i, '/some/path') == i + assert resolve_path_traits(spec.trait("i"), i, "/some/path") == i - v = ('/some/path/either/tuple/file.txt', 2) - i = rebase_path_traits(spec.trait('i'), v, '/some/path') - assert i == (Path('either/tuple/file.txt'), 2) + v = ("/some/path/either/tuple/file.txt", 2) + i = rebase_path_traits(spec.trait("i"), v, "/some/path") + assert i == (Path("either/tuple/file.txt"), 2) # Idempotence - assert rebase_path_traits(spec.trait('i'), i, '/some/path') == i + assert rebase_path_traits(spec.trait("i"), i, "/some/path") == i - i = resolve_path_traits(spec.trait('i'), i, '/some/path') + i = resolve_path_traits(spec.trait("i"), i, "/some/path") assert i == (Path(v[0]), v[1]) # Idempotence - assert resolve_path_traits(spec.trait('i'), i, '/some/path') == i + assert resolve_path_traits(spec.trait("i"), i, "/some/path") == i - v = '/some/path/either/file.txt' - j = rebase_path_traits(spec.trait('j'), v, '/some/path') - assert j == Path('either/file.txt') + v = "/some/path/either/file.txt" + j = rebase_path_traits(spec.trait("j"), v, "/some/path") + assert j == Path("either/file.txt") # Idempotence - assert rebase_path_traits(spec.trait('j'), j, '/some/path') == j + assert rebase_path_traits(spec.trait("j"), j, "/some/path") == j - j = resolve_path_traits(spec.trait('j'), j, '/some/path') + j = resolve_path_traits(spec.trait("j"), j, "/some/path") assert j == Path(v) # Idempotence - assert resolve_path_traits(spec.trait('j'), j, '/some/path') == j + assert resolve_path_traits(spec.trait("j"), j, "/some/path") == j - v = ('/some/path/either/tuple/file.txt', 2) - j = rebase_path_traits(spec.trait('j'), ('/some/path/either/tuple/file.txt', 2), '/some/path') - assert j == (Path('either/tuple/file.txt'), 2) + v = ("/some/path/either/tuple/file.txt", 2) + j = rebase_path_traits( + spec.trait("j"), ("/some/path/either/tuple/file.txt", 2), "/some/path" + ) + assert j == (Path("either/tuple/file.txt"), 2) # Idempotence - assert rebase_path_traits(spec.trait('j'), j, '/some/path') == j + assert rebase_path_traits(spec.trait("j"), j, "/some/path") == j - j = resolve_path_traits(spec.trait('j'), j, '/some/path') + j = resolve_path_traits(spec.trait("j"), j, "/some/path") assert j == (Path(v[0]), v[1]) # Idempotence - assert resolve_path_traits(spec.trait('j'), j, '/some/path') == j + assert resolve_path_traits(spec.trait("j"), j, "/some/path") == j - v = {'a': '/some/path/either/dict/file.txt'} - j = rebase_path_traits(spec.trait('j'), v, '/some/path') - assert j == {'a': Path('either/dict/file.txt')} + v = {"a": "/some/path/either/dict/file.txt"} + j = rebase_path_traits(spec.trait("j"), v, "/some/path") + assert j == {"a": Path("either/dict/file.txt")} # Idempotence - assert rebase_path_traits(spec.trait('j'), j, '/some/path') == j + assert rebase_path_traits(spec.trait("j"), j, "/some/path") == j - j = resolve_path_traits(spec.trait('j'), j, '/some/path') + j = resolve_path_traits(spec.trait("j"), j, "/some/path") assert j == {k: Path(val) for k, val in v.items()} # Idempotence - assert resolve_path_traits(spec.trait('j'), j, '/some/path') == j + assert resolve_path_traits(spec.trait("j"), j, "/some/path") == j - v = {'path': '/some/path/f1.txt'} - k = rebase_path_traits(spec.trait('k'), v, '/some/path') + v = {"path": "/some/path/f1.txt"} + k = rebase_path_traits(spec.trait("k"), v, "/some/path") assert k == v # Idempotence - assert rebase_path_traits(spec.trait('k'), k, '/some/path') == k + assert rebase_path_traits(spec.trait("k"), k, "/some/path") == k - k = resolve_path_traits(spec.trait('k'), k, '/some/path') + k = resolve_path_traits(spec.trait("k"), k, "/some/path") assert k == v diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index 2e176e8bd4..0ffab07a03 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -31,20 +31,20 @@ from pathlib import Path from ...utils.filemanip import path_resolve -if traits_version < '3.7.0': - raise ImportError('Traits version 3.7.0 or higher must be installed') +if traits_version < "3.7.0": + raise ImportError("Traits version 3.7.0 or higher must be installed") IMG_FORMATS = { - 'afni': ('.HEAD', '.BRIK'), - 'cifti2': ('.nii', '.nii.gz'), - 'dicom': ('.dcm', '.IMA', '.tar', '.tar.gz'), - 'gifti': ('.gii', '.gii.gz'), - 'mgh': ('.mgh', '.mgz', '.mgh.gz'), - 'nifti1': ('.nii', '.nii.gz', '.hdr', '.img', '.img.gz'), - 'nifti2': ('.nii', '.nii.gz'), - 'nrrd': ('.nrrd', '.nhdr'), + "afni": (".HEAD", ".BRIK"), + "cifti2": (".nii", ".nii.gz"), + "dicom": (".dcm", ".IMA", ".tar", ".tar.gz"), + "gifti": (".gii", ".gii.gz"), + "mgh": (".mgh", ".mgz", ".mgh.gz"), + "nifti1": (".nii", ".nii.gz", ".hdr", ".img", ".img.gz"), + "nifti2": (".nii", ".nii.gz"), + "nrrd": (".nrrd", ".nhdr"), } -IMG_ZIP_FMT = set(['.nii.gz', 'tar.gz', '.gii.gz', '.mgz', '.mgh.gz', 'img.gz']) +IMG_ZIP_FMT = set([".nii.gz", "tar.gz", ".gii.gz", ".mgz", ".mgh.gz", "img.gz"]) """ The functions that pop-up the Traits GUIs, edit_traits and @@ -98,17 +98,17 @@ class BasePath(TraitType): @property def info_text(self): """Create the trait's general description.""" - info_text = 'a pathlike object or string' + info_text = "a pathlike object or string" if any((self.exists, self._is_file, self._is_dir)): - info_text += ' representing a' + info_text += " representing a" if self.exists: - info_text += 'n existing' + info_text += "n existing" if self._is_file: - info_text += ' file' + info_text += " file" elif self._is_dir: - info_text += ' directory' + info_text += " directory" else: - info_text += ' file or directory' + info_text += " file or directory" return info_text def __init__(self, value=Undefined, exists=False, resolve=False, **metadata): @@ -142,6 +142,7 @@ def validate(self, objekt, name, value, return_pathlike=False): return value + class Directory(BasePath): """ Defines a trait whose value must be a directory path. @@ -284,8 +285,15 @@ class File(BasePath): _is_file = True _exts = None - def __init__(self, value=NoDefaultSpecified, exists=False, resolve=False, - allow_compressed=True, extensions=None, **metadata): + def __init__( + self, + value=NoDefaultSpecified, + exists=False, + resolve=False, + allow_compressed=True, + extensions=None, + **metadata + ): """Create a File trait.""" if extensions is not None: if isinstance(extensions, (bytes, str)): @@ -294,11 +302,22 @@ def __init__(self, value=NoDefaultSpecified, exists=False, resolve=False, if allow_compressed is False: extensions = list(set(extensions) - IMG_ZIP_FMT) - self._exts = sorted(set(['.%s' % ext if not ext.startswith('.') else ext - for ext in extensions])) - - super(File, self).__init__(value=value, exists=exists, resolve=resolve, - extensions=self._exts, **metadata) + self._exts = sorted( + set( + [ + ".%s" % ext if not ext.startswith(".") else ext + for ext in extensions + ] + ) + ) + + super(File, self).__init__( + value=value, + exists=exists, + resolve=resolve, + extensions=self._exts, + **metadata + ) def validate(self, objekt, name, value, return_pathlike=False): """Validate a value change.""" @@ -317,8 +336,14 @@ def validate(self, objekt, name, value, return_pathlike=False): class ImageFile(File): """Defines a trait whose value must be a known neuroimaging file.""" - def __init__(self, value=NoDefaultSpecified, exists=False, - resolve=False, types=None, **metadata): + def __init__( + self, + value=NoDefaultSpecified, + exists=False, + resolve=False, + types=None, + **metadata + ): """Create an ImageFile trait.""" extensions = None if types is not None: @@ -327,14 +352,21 @@ def __init__(self, value=NoDefaultSpecified, exists=False, if set(types) - set(IMG_FORMATS.keys()): invalid = set(types) - set(IMG_FORMATS.keys()) - raise ValueError("""\ + raise ValueError( + """\ Unknown value(s) %s for metadata type of an ImageFile input.\ -""" % ', '.join(['"%s"' % t for t in invalid])) +""" + % ", ".join(['"%s"' % t for t in invalid]) + ) extensions = [ext for t in types for ext in IMG_FORMATS[t]] super(ImageFile, self).__init__( - value=value, exists=exists, extensions=extensions, - resolve=resolve, **metadata) + value=value, + exists=exists, + extensions=extensions, + resolve=resolve, + **metadata + ) def isdefined(objekt): @@ -342,20 +374,21 @@ def isdefined(objekt): def has_metadata(trait, metadata, value=None, recursive=True): - ''' + """ Checks if a given trait has a metadata (and optionally if it is set to particular value) - ''' + """ count = 0 - if hasattr(trait, "_metadata") and metadata in list( - trait._metadata.keys()) and (trait._metadata[metadata] == value - or value is None): + if ( + hasattr(trait, "_metadata") + and metadata in list(trait._metadata.keys()) + and (trait._metadata[metadata] == value or value is None) + ): count += 1 if recursive: - if hasattr(trait, 'inner_traits'): + if hasattr(trait, "inner_traits"): for inner_trait in trait.inner_traits(): - count += has_metadata(inner_trait.trait_type, metadata, - recursive) - if hasattr(trait, 'handlers') and trait.handlers is not None: + count += has_metadata(inner_trait.trait_type, metadata, recursive) + if hasattr(trait, "handlers") and trait.handlers is not None: for handler in trait.handlers: count += has_metadata(handler, metadata, recursive) @@ -369,21 +402,20 @@ class MultiObject(traits.List): def validate(self, objekt, name, value): # want to treat range and other sequences (except str) as list - if not isinstance(value, (str, bytes)) and isinstance( - value, Sequence): + if not isinstance(value, (str, bytes)) and isinstance(value, Sequence): value = list(value) - if not isdefined(value) or \ - (isinstance(value, list) and len(value) == 0): + if not isdefined(value) or (isinstance(value, list) and len(value) == 0): return Undefined newvalue = value inner_trait = self.inner_traits()[0] - if not isinstance(value, list) \ - or (isinstance(inner_trait.trait_type, traits.List) and - not isinstance(inner_trait.trait_type, InputMultiObject) and - not isinstance(value[0], list)): + if not isinstance(value, list) or ( + isinstance(inner_trait.trait_type, traits.List) + and not isinstance(inner_trait.trait_type, InputMultiObject) + and not isinstance(value[0], list) + ): newvalue = [value] value = super(MultiObject, self).validate(objekt, name, newvalue) @@ -470,6 +502,7 @@ class InputMultiObject(MultiObject): ['/software/temp/foo.txt', '/software/temp/goo.txt'] """ + pass @@ -512,23 +545,34 @@ def _recurse_on_path_traits(func, thistrait, value, cwd): if thistrait.is_trait_type(BasePath): value = func(value, cwd) elif thistrait.is_trait_type(traits.List): - innertrait, = thistrait.inner_traits + (innertrait,) = thistrait.inner_traits if not isinstance(value, (list, tuple)): return _recurse_on_path_traits(func, innertrait, value, cwd) - value = [_recurse_on_path_traits(func, innertrait, v, cwd) - for v in value] + value = [_recurse_on_path_traits(func, innertrait, v, cwd) for v in value] elif isinstance(value, dict) and thistrait.is_trait_type(traits.Dict): _, innertrait = thistrait.inner_traits - value = {k: _recurse_on_path_traits(func, innertrait, v, cwd) - for k, v in value.items()} + value = { + k: _recurse_on_path_traits(func, innertrait, v, cwd) + for k, v in value.items() + } elif isinstance(value, tuple) and thistrait.is_trait_type(traits.Tuple): - value = tuple([_recurse_on_path_traits(func, subtrait, v, cwd) - for subtrait, v in zip(thistrait.handler.types, value)]) + value = tuple( + [ + _recurse_on_path_traits(func, subtrait, v, cwd) + for subtrait, v in zip(thistrait.handler.types, value) + ] + ) elif thistrait.is_trait_type(traits.TraitCompound): - is_str = [isinstance(f, (traits.String, traits.BaseStr, traits.BaseBytes, Str)) - for f in thistrait.handler.handlers] - if any(is_str) and isinstance(value, (bytes, str)) and not value.startswith('/'): + is_str = [ + isinstance(f, (traits.String, traits.BaseStr, traits.BaseBytes, Str)) + for f in thistrait.handler.handlers + ] + if ( + any(is_str) + and isinstance(value, (bytes, str)) + and not value.startswith("/") + ): return value for subtrait in thistrait.handler.handlers: diff --git a/nipype/interfaces/brainsuite/__init__.py b/nipype/interfaces/brainsuite/__init__.py index 6611aedff4..45bcf5fc65 100644 --- a/nipype/interfaces/brainsuite/__init__.py +++ b/nipype/interfaces/brainsuite/__init__.py @@ -1,4 +1,18 @@ # -*- coding: utf-8 -*- -from .brainsuite import (Bse, Bfc, Pvc, Cerebro, Cortex, Scrubmask, Tca, - Dewisp, Dfs, Pialmesh, Skullfinder, Hemisplit, SVReg, - BDP, ThicknessPVC) +from .brainsuite import ( + Bse, + Bfc, + Pvc, + Cerebro, + Cortex, + Scrubmask, + Tca, + Dewisp, + Dfs, + Pialmesh, + Skullfinder, + Hemisplit, + SVReg, + BDP, + ThicknessPVC, +) diff --git a/nipype/interfaces/brainsuite/brainsuite.py b/nipype/interfaces/brainsuite/brainsuite.py index 919e9aba1a..0d26017ea7 100644 --- a/nipype/interfaces/brainsuite/brainsuite.py +++ b/nipype/interfaces/brainsuite/brainsuite.py @@ -3,7 +3,15 @@ import os import re as regex -from ..base import TraitedSpec, CommandLineInputSpec, CommandLine, File, traits, isdefined +from ..base import ( + TraitedSpec, + CommandLineInputSpec, + CommandLine, + File, + traits, + isdefined, +) + """This script provides interfaces for BrainSuite command line tools. Please see brainsuite.org for more information. @@ -13,59 +21,60 @@ class BseInputSpec(CommandLineInputSpec): - inputMRIFile = File( - mandatory=True, argstr='-i %s', desc='input MRI volume') + inputMRIFile = File(mandatory=True, argstr="-i %s", desc="input MRI volume") outputMRIVolume = File( - desc= - 'output brain-masked MRI volume. If unspecified, output file name will be auto generated.', - argstr='-o %s', + desc="output brain-masked MRI volume. If unspecified, output file name will be auto generated.", + argstr="-o %s", hash_files=False, - genfile=True) + genfile=True, + ) outputMaskFile = File( - desc= - 'save smooth brain mask. If unspecified, output file name will be auto generated.', - argstr='--mask %s', + desc="save smooth brain mask. If unspecified, output file name will be auto generated.", + argstr="--mask %s", hash_files=False, - genfile=True) + genfile=True, + ) diffusionConstant = traits.Float( - 25, usedefault=True, desc='diffusion constant', argstr='-d %f') + 25, usedefault=True, desc="diffusion constant", argstr="-d %f" + ) diffusionIterations = traits.Int( - 3, usedefault=True, desc='diffusion iterations', argstr='-n %d') + 3, usedefault=True, desc="diffusion iterations", argstr="-n %d" + ) edgeDetectionConstant = traits.Float( - 0.64, usedefault=True, desc='edge detection constant', argstr='-s %f') + 0.64, usedefault=True, desc="edge detection constant", argstr="-s %f" + ) radius = traits.Float( - 1, - usedefault=True, - desc='radius of erosion/dilation filter', - argstr='-r %f') + 1, usedefault=True, desc="radius of erosion/dilation filter", argstr="-r %f" + ) dilateFinalMask = traits.Bool( - True, usedefault=True, desc='dilate final mask', argstr='-p') - trim = traits.Bool( - True, usedefault=True, desc='trim brainstem', argstr='--trim') + True, usedefault=True, desc="dilate final mask", argstr="-p" + ) + trim = traits.Bool(True, usedefault=True, desc="trim brainstem", argstr="--trim") outputDiffusionFilter = File( - desc='diffusion filter output', argstr='--adf %s', hash_files=False) - outputEdgeMap = File( - desc='edge map output', argstr='--edge %s', hash_files=False) + desc="diffusion filter output", argstr="--adf %s", hash_files=False + ) + outputEdgeMap = File(desc="edge map output", argstr="--edge %s", hash_files=False) outputDetailedBrainMask = File( - desc='save detailed brain mask', argstr='--hires %s', hash_files=False) - outputCortexFile = File( - desc='cortex file', argstr='--cortex %s', hash_files=False) + desc="save detailed brain mask", argstr="--hires %s", hash_files=False + ) + outputCortexFile = File(desc="cortex file", argstr="--cortex %s", hash_files=False) verbosityLevel = traits.Float( - 1, usedefault=True, desc=' verbosity level (0=silent)', argstr='-v %f') + 1, usedefault=True, desc=" verbosity level (0=silent)", argstr="-v %f" + ) noRotate = traits.Bool( - desc= - 'retain original orientation(default behavior will auto-rotate input NII files to LPI orientation)', - argstr='--norotate') - timer = traits.Bool(desc='show timing', argstr='--timer') + desc="retain original orientation(default behavior will auto-rotate input NII files to LPI orientation)", + argstr="--norotate", + ) + timer = traits.Bool(desc="show timing", argstr="--timer") class BseOutputSpec(TraitedSpec): - outputMRIVolume = File(desc='path/name of brain-masked MRI volume') - outputMaskFile = File(desc='path/name of smooth brain mask') - outputDiffusionFilter = File(desc='path/name of diffusion filter output') - outputEdgeMap = File(desc='path/name of edge map output') - outputDetailedBrainMask = File(desc='path/name of detailed brain mask') - outputCortexFile = File(desc='path/name of cortex file') + outputMRIVolume = File(desc="path/name of brain-masked MRI volume") + outputMaskFile = File(desc="path/name of smooth brain mask") + outputDiffusionFilter = File(desc="path/name of diffusion filter output") + outputEdgeMap = File(desc="path/name of edge map output") + outputDetailedBrainMask = File(desc="path/name of detailed brain mask") + outputCortexFile = File(desc="path/name of cortex file") class Bse(CommandLine): @@ -88,7 +97,7 @@ class Bse(CommandLine): input_spec = BseInputSpec output_spec = BseOutputSpec - _cmd = 'bse' + _cmd = "bse" def _gen_filename(self, name): inputs = self.inputs.get() @@ -96,8 +105,8 @@ def _gen_filename(self, name): return os.path.abspath(inputs[name]) fileToSuffixMap = { - 'outputMRIVolume': '.bse.nii.gz', - 'outputMaskFile': '.mask.nii.gz' + "outputMRIVolume": ".bse.nii.gz", + "outputMaskFile": ".mask.nii.gz", } if name in fileToSuffixMap: @@ -111,87 +120,90 @@ def _list_outputs(self): class BfcInputSpec(CommandLineInputSpec): inputMRIFile = File( - mandatory=True, desc='input skull-stripped MRI volume', argstr='-i %s') - inputMaskFile = File(desc='mask file', argstr='-m %s', hash_files=False) + mandatory=True, desc="input skull-stripped MRI volume", argstr="-i %s" + ) + inputMaskFile = File(desc="mask file", argstr="-m %s", hash_files=False) outputMRIVolume = File( - desc= - 'output bias-corrected MRI volume.If unspecified, output file name will be auto generated.', - argstr='-o %s', + desc="output bias-corrected MRI volume.If unspecified, output file name will be auto generated.", + argstr="-o %s", hash_files=False, - genfile=True) + genfile=True, + ) outputBiasField = File( - desc='save bias field estimate', argstr='--bias %s', hash_files=False) + desc="save bias field estimate", argstr="--bias %s", hash_files=False + ) outputMaskedBiasField = File( - desc='save bias field estimate (masked)', - argstr='--maskedbias %s', - hash_files=False) - histogramRadius = traits.Int( - desc='histogram radius (voxels)', argstr='-r %d') + desc="save bias field estimate (masked)", + argstr="--maskedbias %s", + hash_files=False, + ) + histogramRadius = traits.Int(desc="histogram radius (voxels)", argstr="-r %d") biasEstimateSpacing = traits.Int( - desc='bias sample spacing (voxels)', argstr='-s %d') + desc="bias sample spacing (voxels)", argstr="-s %d" + ) controlPointSpacing = traits.Int( - desc='control point spacing (voxels)', argstr='-c %d') + desc="control point spacing (voxels)", argstr="-c %d" + ) splineLambda = traits.Float( - desc='spline stiffness weighting parameter', argstr='-w %f') + desc="spline stiffness weighting parameter", argstr="-w %f" + ) histogramType = traits.Enum( - 'ellipse', - 'block', - desc= - 'Options for type of histogram\nellipse: use ellipsoid for ROI histogram\nblock :use block for ROI histogram', - argstr='%s') + "ellipse", + "block", + desc="Options for type of histogram\nellipse: use ellipsoid for ROI histogram\nblock :use block for ROI histogram", + argstr="%s", + ) iterativeMode = traits.Bool( - desc='iterative mode (overrides -r, -s, -c, -w settings)', - argstr='--iterate') - correctionScheduleFile = File( - desc='list of parameters ', argstr='--schedule %s') + desc="iterative mode (overrides -r, -s, -c, -w settings)", argstr="--iterate" + ) + correctionScheduleFile = File(desc="list of parameters ", argstr="--schedule %s") biasFieldEstimatesOutputPrefix = traits.Str( - desc='save iterative bias field estimates as .n.field.nii.gz', - argstr='--biasprefix %s') + desc="save iterative bias field estimates as .n.field.nii.gz", + argstr="--biasprefix %s", + ) correctedImagesOutputPrefix = traits.Str( - desc='save iterative corrected images as .n.bfc.nii.gz', - argstr='--prefix %s') + desc="save iterative corrected images as .n.bfc.nii.gz", + argstr="--prefix %s", + ) correctWholeVolume = traits.Bool( - desc='apply correction field to entire volume', argstr='--extrapolate') + desc="apply correction field to entire volume", argstr="--extrapolate" + ) minBias = traits.Float( - 0.5, - usedefault=True, - desc='minimum allowed bias value', - argstr='-L %f') + 0.5, usedefault=True, desc="minimum allowed bias value", argstr="-L %f" + ) maxBias = traits.Float( - 1.5, - usedefault=True, - desc='maximum allowed bias value', - argstr='-U %f') + 1.5, usedefault=True, desc="maximum allowed bias value", argstr="-U %f" + ) biasRange = traits.Enum( "low", "medium", "high", - desc= - 'Preset options for bias_model\n low: small bias model [0.95,1.05]\n' - 'medium: medium bias model [0.90,1.10]\n high: high bias model [0.80,1.20]', - argstr='%s') + desc="Preset options for bias_model\n low: small bias model [0.95,1.05]\n" + "medium: medium bias model [0.90,1.10]\n high: high bias model [0.80,1.20]", + argstr="%s", + ) intermediate_file_type = traits.Enum( "analyze", "nifti", "gzippedAnalyze", "gzippedNifti", - desc='Options for the format in which intermediate files are generated', - argstr='%s') - convergenceThreshold = traits.Float( - desc='convergence threshold', argstr='--eps %f') + desc="Options for the format in which intermediate files are generated", + argstr="%s", + ) + convergenceThreshold = traits.Float(desc="convergence threshold", argstr="--eps %f") biasEstimateConvergenceThreshold = traits.Float( - desc='bias estimate convergence threshold (values > 0.1 disable)', - argstr='--beps %f') - verbosityLevel = traits.Int( - desc='verbosity level (0=silent)', argstr='-v %d') - timer = traits.Bool(desc='display timing information', argstr='--timer') + desc="bias estimate convergence threshold (values > 0.1 disable)", + argstr="--beps %f", + ) + verbosityLevel = traits.Int(desc="verbosity level (0=silent)", argstr="-v %d") + timer = traits.Bool(desc="display timing information", argstr="--timer") class BfcOutputSpec(TraitedSpec): - outputMRIVolume = File(desc='path/name of output file') - outputBiasField = File(desc='path/name of bias field output file') - outputMaskedBiasField = File(desc='path/name of masked bias field output') - correctionScheduleFile = File(desc='path/name of schedule file') + outputMRIVolume = File(desc="path/name of output file") + outputBiasField = File(desc="path/name of bias field output file") + outputMaskedBiasField = File(desc="path/name of masked bias field output") + correctionScheduleFile = File(desc="path/name of schedule file") class Bfc(CommandLine): @@ -215,38 +227,37 @@ class Bfc(CommandLine): input_spec = BfcInputSpec output_spec = BfcOutputSpec - _cmd = 'bfc' + _cmd = "bfc" def _gen_filename(self, name): inputs = self.inputs.get() if isdefined(inputs[name]): return os.path.abspath(inputs[name]) - fileToSuffixMap = {'outputMRIVolume': '.bfc.nii.gz'} + fileToSuffixMap = {"outputMRIVolume": ".bfc.nii.gz"} if name in fileToSuffixMap: return getFileName(self.inputs.inputMRIFile, fileToSuffixMap[name]) return None def _format_arg(self, name, spec, value): - if name == 'histogramType': - return spec.argstr % { - "ellipse": "--ellipse", - "block": "--block" - }[value] - if name == 'biasRange': - return spec.argstr % { - "low": "--low", - "medium": "--medium", - "high": "--high" - }[value] - if name == 'intermediate_file_type': - return spec.argstr % { - "analyze": "--analyze", - "nifti": "--nifti", - "gzippedAnalyze": "--analyzegz", - "gzippedNifti": "--niftigz" - }[value] + if name == "histogramType": + return spec.argstr % {"ellipse": "--ellipse", "block": "--block"}[value] + if name == "biasRange": + return ( + spec.argstr + % {"low": "--low", "medium": "--medium", "high": "--high"}[value] + ) + if name == "intermediate_file_type": + return ( + spec.argstr + % { + "analyze": "--analyze", + "nifti": "--nifti", + "gzippedAnalyze": "--analyzegz", + "gzippedNifti": "--niftigz", + }[value] + ) return super(Bfc, self)._format_arg(name, spec, value) @@ -255,25 +266,27 @@ def _list_outputs(self): class PvcInputSpec(CommandLineInputSpec): - inputMRIFile = File(mandatory=True, desc='MRI file', argstr='-i %s') - inputMaskFile = File(desc='brain mask file', argstr='-m %s') + inputMRIFile = File(mandatory=True, desc="MRI file", argstr="-i %s") + inputMaskFile = File(desc="brain mask file", argstr="-m %s") outputLabelFile = File( - desc= - 'output label file. If unspecified, output file name will be auto generated.', - argstr='-o %s', - genfile=True) + desc="output label file. If unspecified, output file name will be auto generated.", + argstr="-o %s", + genfile=True, + ) outputTissueFractionFile = File( - desc='output tissue fraction file', argstr='-f %s', genfile=True) - spatialPrior = traits.Float(desc='spatial prior strength', argstr='-l %f') - verbosity = traits.Int(desc='verbosity level (0 = silent)', argstr='-v %d') + desc="output tissue fraction file", argstr="-f %s", genfile=True + ) + spatialPrior = traits.Float(desc="spatial prior strength", argstr="-l %f") + verbosity = traits.Int(desc="verbosity level (0 = silent)", argstr="-v %d") threeClassFlag = traits.Bool( - desc='use a three-class (CSF=0,GM=1,WM=2) labeling', argstr='-3') - timer = traits.Bool(desc='time processing', argstr='--timer') + desc="use a three-class (CSF=0,GM=1,WM=2) labeling", argstr="-3" + ) + timer = traits.Bool(desc="time processing", argstr="--timer") class PvcOutputSpec(TraitedSpec): - outputLabelFile = File(desc='path/name of label file') - outputTissueFractionFile = File(desc='path/name of tissue fraction file') + outputLabelFile = File(desc="path/name of label file") + outputTissueFractionFile = File(desc="path/name of tissue fraction file") class Pvc(CommandLine): @@ -298,7 +311,7 @@ class Pvc(CommandLine): input_spec = PvcInputSpec output_spec = PvcOutputSpec - _cmd = 'pvc' + _cmd = "pvc" def _gen_filename(self, name): inputs = self.inputs.get() @@ -306,8 +319,8 @@ def _gen_filename(self, name): return os.path.abspath(inputs[name]) fileToSuffixMap = { - 'outputLabelFile': '.pvc.label.nii.gz', - 'outputTissueFractionFile': '.pvc.frac.nii.gz' + "outputLabelFile": ".pvc.label.nii.gz", + "outputTissueFractionFile": ".pvc.frac.nii.gz", } if name in fileToSuffixMap: return getFileName(self.inputs.inputMRIFile, fileToSuffixMap[name]) @@ -319,53 +332,53 @@ def _list_outputs(self): class CerebroInputSpec(CommandLineInputSpec): - inputMRIFile = File( - mandatory=True, desc='input 3D MRI volume', argstr='-i %s') + inputMRIFile = File(mandatory=True, desc="input 3D MRI volume", argstr="-i %s") inputAtlasMRIFile = File( - mandatory=True, desc='atlas MRI volume', argstr='--atlas %s') + mandatory=True, desc="atlas MRI volume", argstr="--atlas %s" + ) inputAtlasLabelFile = File( - mandatory=True, desc='atlas labeling', argstr='--atlaslabels %s') - inputBrainMaskFile = File(desc='brain mask file', argstr='-m %s') + mandatory=True, desc="atlas labeling", argstr="--atlaslabels %s" + ) + inputBrainMaskFile = File(desc="brain mask file", argstr="-m %s") outputCerebrumMaskFile = File( - desc= - 'output cerebrum mask volume. If unspecified, output file name will be auto generated.', - argstr='-o %s', - genfile=True) + desc="output cerebrum mask volume. If unspecified, output file name will be auto generated.", + argstr="-o %s", + genfile=True, + ) outputLabelVolumeFile = File( - desc= - 'output labeled hemisphere/cerebrum volume. If unspecified, output file name will be auto generated.', - argstr='-l %s', - genfile=True) - costFunction = traits.Int(2, usedefault=True, desc='0,1,2', argstr='-c %d') + desc="output labeled hemisphere/cerebrum volume. If unspecified, output file name will be auto generated.", + argstr="-l %s", + genfile=True, + ) + costFunction = traits.Int(2, usedefault=True, desc="0,1,2", argstr="-c %d") useCentroids = traits.Bool( - desc='use centroids of data to initialize position', - argstr='--centroids') + desc="use centroids of data to initialize position", argstr="--centroids" + ) outputAffineTransformFile = File( - desc='save affine transform to file.', argstr='--air %s', genfile=True) + desc="save affine transform to file.", argstr="--air %s", genfile=True + ) outputWarpTransformFile = File( - desc='save warp transform to file.', argstr='--warp %s', genfile=True) - verbosity = traits.Int(desc='verbosity level (0=silent)', argstr='-v %d') - linearConvergence = traits.Float( - desc='linear convergence', argstr='--linconv %f') - warpLabel = traits.Int( - desc='warp order (2,3,4,5,6,7,8)', argstr='--warplevel %d') - warpConvergence = traits.Float( - desc='warp convergence', argstr='--warpconv %f') - keepTempFiles = traits.Bool( - desc="don't remove temporary files", argstr='--keep') + desc="save warp transform to file.", argstr="--warp %s", genfile=True + ) + verbosity = traits.Int(desc="verbosity level (0=silent)", argstr="-v %d") + linearConvergence = traits.Float(desc="linear convergence", argstr="--linconv %f") + warpLabel = traits.Int(desc="warp order (2,3,4,5,6,7,8)", argstr="--warplevel %d") + warpConvergence = traits.Float(desc="warp convergence", argstr="--warpconv %f") + keepTempFiles = traits.Bool(desc="don't remove temporary files", argstr="--keep") tempDirectory = traits.Str( - desc='specify directory to use for temporary files', - argstr='--tempdir %s') + desc="specify directory to use for temporary files", argstr="--tempdir %s" + ) tempDirectoryBase = traits.Str( - desc='create a temporary directory within this directory', - argstr='--tempdirbase %s') + desc="create a temporary directory within this directory", + argstr="--tempdirbase %s", + ) class CerebroOutputSpec(TraitedSpec): - outputCerebrumMaskFile = File(desc='path/name of cerebrum mask file') - outputLabelVolumeFile = File(desc='path/name of label mask file') - outputAffineTransformFile = File(desc='path/name of affine transform file') - outputWarpTransformFile = File(desc='path/name of warp transform file') + outputCerebrumMaskFile = File(desc="path/name of cerebrum mask file") + outputLabelVolumeFile = File(desc="path/name of label mask file") + outputAffineTransformFile = File(desc="path/name of affine transform file") + outputWarpTransformFile = File(desc="path/name of warp transform file") class Cerebro(CommandLine): @@ -393,7 +406,7 @@ class Cerebro(CommandLine): input_spec = CerebroInputSpec output_spec = CerebroOutputSpec - _cmd = 'cerebro' + _cmd = "cerebro" def _gen_filename(self, name): inputs = self.inputs.get() @@ -401,10 +414,10 @@ def _gen_filename(self, name): return os.path.abspath(inputs[name]) fileToSuffixMap = { - 'outputCerebrumMaskFile': '.cerebrum.mask.nii.gz', - 'outputLabelVolumeFile': '.hemi.label.nii.gz', - 'outputWarpTransformFile': '.warp', - 'outputAffineTransformFile': '.air' + "outputCerebrumMaskFile": ".cerebrum.mask.nii.gz", + "outputLabelVolumeFile": ".hemi.label.nii.gz", + "outputWarpTransformFile": ".warp", + "outputAffineTransformFile": ".air", } if name in fileToSuffixMap: return getFileName(self.inputs.inputMRIFile, fileToSuffixMap[name]) @@ -417,36 +430,38 @@ def _list_outputs(self): class CortexInputSpec(CommandLineInputSpec): inputHemisphereLabelFile = File( - mandatory=True, desc='hemisphere / lobe label volume', argstr='-h %s') + mandatory=True, desc="hemisphere / lobe label volume", argstr="-h %s" + ) outputCerebrumMask = File( - desc= - 'output structure mask. If unspecified, output file name will be auto generated.', - argstr='-o %s', - genfile=True) + desc="output structure mask. If unspecified, output file name will be auto generated.", + argstr="-o %s", + genfile=True, + ) inputTissueFractionFile = File( - mandatory=True, - desc='tissue fraction file (32-bit float)', - argstr='-f %s') + mandatory=True, desc="tissue fraction file (32-bit float)", argstr="-f %s" + ) tissueFractionThreshold = traits.Float( 50.0, usedefault=True, - desc='tissue fraction threshold (percentage)', - argstr='-p %f') + desc="tissue fraction threshold (percentage)", + argstr="-p %f", + ) computeWGBoundary = traits.Bool( - True, usedefault=True, desc='compute WM/GM boundary', argstr='-w') - computeGCBoundary = traits.Bool( - desc='compute GM/CSF boundary', argstr='-g') + True, usedefault=True, desc="compute WM/GM boundary", argstr="-w" + ) + computeGCBoundary = traits.Bool(desc="compute GM/CSF boundary", argstr="-g") includeAllSubcorticalAreas = traits.Bool( True, usedefault=True, - desc='include all subcortical areas in WM mask', - argstr='-a') - verbosity = traits.Int(desc='verbosity level', argstr='-v %d') - timer = traits.Bool(desc='timing function', argstr='--timer') + desc="include all subcortical areas in WM mask", + argstr="-a", + ) + verbosity = traits.Int(desc="verbosity level", argstr="-v %d") + timer = traits.Bool(desc="timing function", argstr="--timer") class CortexOutputSpec(TraitedSpec): - outputCerebrumMask = File(desc='path/name of cerebrum mask') + outputCerebrumMask = File(desc="path/name of cerebrum mask") class Cortex(CommandLine): @@ -471,16 +486,17 @@ class Cortex(CommandLine): input_spec = CortexInputSpec output_spec = CortexOutputSpec - _cmd = 'cortex' + _cmd = "cortex" def _gen_filename(self, name): inputs = self.inputs.get() if isdefined(inputs[name]): return os.path.abspath(inputs[name]) - if name == 'outputCerebrumMask': - return getFileName(self.inputs.inputHemisphereLabelFile, - '.init.cortex.mask.nii.gz') + if name == "outputCerebrumMask": + return getFileName( + self.inputs.inputHemisphereLabelFile, ".init.cortex.mask.nii.gz" + ) return None def _list_outputs(self): @@ -489,23 +505,26 @@ def _list_outputs(self): class ScrubmaskInputSpec(CommandLineInputSpec): inputMaskFile = File( - mandatory=True, desc='input structure mask file', argstr='-i %s') + mandatory=True, desc="input structure mask file", argstr="-i %s" + ) outputMaskFile = File( - desc= - 'output structure mask file. If unspecified, output file name will be auto generated.', - argstr='-o %s', - genfile=True) + desc="output structure mask file. If unspecified, output file name will be auto generated.", + argstr="-o %s", + genfile=True, + ) backgroundFillThreshold = traits.Int( - 2, usedefault=True, desc='background fill threshold', argstr='-b %d') + 2, usedefault=True, desc="background fill threshold", argstr="-b %d" + ) foregroundTrimThreshold = traits.Int( - 0, usedefault=True, desc='foreground trim threshold', argstr='-f %d') - numberIterations = traits.Int(desc='number of iterations', argstr='-n %d') - verbosity = traits.Int(desc='verbosity (0=silent)', argstr='-v %d') - timer = traits.Bool(desc='timing function', argstr='--timer') + 0, usedefault=True, desc="foreground trim threshold", argstr="-f %d" + ) + numberIterations = traits.Int(desc="number of iterations", argstr="-n %d") + verbosity = traits.Int(desc="verbosity (0=silent)", argstr="-v %d") + timer = traits.Bool(desc="timing function", argstr="--timer") class ScrubmaskOutputSpec(TraitedSpec): - outputMaskFile = File(desc='path/name of mask file') + outputMaskFile = File(desc="path/name of mask file") class Scrubmask(CommandLine): @@ -526,18 +545,20 @@ class Scrubmask(CommandLine): >>> results = scrubmask.run() #doctest: +SKIP """ + input_spec = ScrubmaskInputSpec output_spec = ScrubmaskOutputSpec - _cmd = 'scrubmask' + _cmd = "scrubmask" def _gen_filename(self, name): inputs = self.inputs.get() if isdefined(inputs[name]): return os.path.abspath(inputs[name]) - if name == 'outputMaskFile': - return getFileName(self.inputs.inputMaskFile, - '.cortex.scrubbed.mask.nii.gz') + if name == "outputMaskFile": + return getFileName( + self.inputs.inputMaskFile, ".cortex.scrubbed.mask.nii.gz" + ) return None @@ -546,25 +567,25 @@ def _list_outputs(self): class TcaInputSpec(CommandLineInputSpec): - inputMaskFile = File( - mandatory=True, desc='input mask volume', argstr='-i %s') + inputMaskFile = File(mandatory=True, desc="input mask volume", argstr="-i %s") outputMaskFile = File( - desc= - 'output mask volume. If unspecified, output file name will be auto generated.', - argstr='-o %s', - genfile=True) + desc="output mask volume. If unspecified, output file name will be auto generated.", + argstr="-o %s", + genfile=True, + ) minCorrectionSize = traits.Int( - 2500, usedefault=True, desc='maximum correction size', argstr='-m %d') - maxCorrectionSize = traits.Int( - desc='minimum correction size', argstr='-n %d') + 2500, usedefault=True, desc="maximum correction size", argstr="-m %d" + ) + maxCorrectionSize = traits.Int(desc="minimum correction size", argstr="-n %d") foregroundDelta = traits.Int( - 20, usedefault=True, desc='foreground delta', argstr='--delta %d') - verbosity = traits.Int(desc='verbosity (0 = quiet)', argstr='-v %d') - timer = traits.Bool(desc='timing function', argstr='--timer') + 20, usedefault=True, desc="foreground delta", argstr="--delta %d" + ) + verbosity = traits.Int(desc="verbosity (0 = quiet)", argstr="-v %d") + timer = traits.Bool(desc="timing function", argstr="--timer") class TcaOutputSpec(TraitedSpec): - outputMaskFile = File(desc='path/name of mask file') + outputMaskFile = File(desc="path/name of mask file") class Tca(CommandLine): @@ -583,18 +604,18 @@ class Tca(CommandLine): >>> results = tca.run() #doctest: +SKIP """ + input_spec = TcaInputSpec output_spec = TcaOutputSpec - _cmd = 'tca' + _cmd = "tca" def _gen_filename(self, name): inputs = self.inputs.get() if isdefined(inputs[name]): return os.path.abspath(inputs[name]) - if name == 'outputMaskFile': - return getFileName(self.inputs.inputMaskFile, - '.cortex.tca.mask.nii.gz') + if name == "outputMaskFile": + return getFileName(self.inputs.inputMaskFile, ".cortex.tca.mask.nii.gz") return None @@ -603,21 +624,20 @@ def _list_outputs(self): class DewispInputSpec(CommandLineInputSpec): - inputMaskFile = File(mandatory=True, desc='input file', argstr='-i %s') + inputMaskFile = File(mandatory=True, desc="input file", argstr="-i %s") outputMaskFile = File( - desc= - 'output file. If unspecified, output file name will be auto generated.', - argstr='-o %s', - genfile=True) - verbosity = traits.Int(desc='verbosity', argstr='-v %d') - sizeThreshold = traits.Int(desc='size threshold', argstr='-t %d') - maximumIterations = traits.Int( - desc='maximum number of iterations', argstr='-n %d') - timer = traits.Bool(desc='time processing', argstr='--timer') + desc="output file. If unspecified, output file name will be auto generated.", + argstr="-o %s", + genfile=True, + ) + verbosity = traits.Int(desc="verbosity", argstr="-v %d") + sizeThreshold = traits.Int(desc="size threshold", argstr="-t %d") + maximumIterations = traits.Int(desc="maximum number of iterations", argstr="-n %d") + timer = traits.Bool(desc="time processing", argstr="--timer") class DewispOutputSpec(TraitedSpec): - outputMaskFile = File(desc='path/name of mask file') + outputMaskFile = File(desc="path/name of mask file") class Dewisp(CommandLine): @@ -645,16 +665,15 @@ class Dewisp(CommandLine): input_spec = DewispInputSpec output_spec = DewispOutputSpec - _cmd = 'dewisp' + _cmd = "dewisp" def _gen_filename(self, name): inputs = self.inputs.get() if isdefined(inputs[name]): return os.path.abspath(inputs[name]) - if name == 'outputMaskFile': - return getFileName(self.inputs.inputMaskFile, - '.cortex.dewisp.mask.nii.gz') + if name == "outputMaskFile": + return getFileName(self.inputs.inputMaskFile, ".cortex.dewisp.mask.nii.gz") return None @@ -663,55 +682,59 @@ def _list_outputs(self): class DfsInputSpec(CommandLineInputSpec): - inputVolumeFile = File( - mandatory=True, desc='input 3D volume', argstr='-i %s') + inputVolumeFile = File(mandatory=True, desc="input 3D volume", argstr="-i %s") outputSurfaceFile = File( - desc= - 'output surface mesh file. If unspecified, output file name will be auto generated.', - argstr='-o %s', - genfile=True) + desc="output surface mesh file. If unspecified, output file name will be auto generated.", + argstr="-o %s", + genfile=True, + ) inputShadingVolume = File( - desc='shade surface model with data from image volume', argstr='-c %s') + desc="shade surface model with data from image volume", argstr="-c %s" + ) smoothingIterations = traits.Int( - 10, - usedefault=True, - desc='number of smoothing iterations', - argstr='-n %d') + 10, usedefault=True, desc="number of smoothing iterations", argstr="-n %d" + ) smoothingConstant = traits.Float( - 0.5, usedefault=True, desc='smoothing constant', argstr='-a %f') + 0.5, usedefault=True, desc="smoothing constant", argstr="-a %f" + ) curvatureWeighting = traits.Float( - 5.0, usedefault=True, desc='curvature weighting', argstr='-w %f') - scalingPercentile = traits.Float(desc='scaling percentile', argstr='-f %f') + 5.0, usedefault=True, desc="curvature weighting", argstr="-w %f" + ) + scalingPercentile = traits.Float(desc="scaling percentile", argstr="-f %f") nonZeroTessellation = traits.Bool( - desc='tessellate non-zero voxels', - argstr='-nz', - xor=('nonZeroTessellation', 'specialTessellation')) + desc="tessellate non-zero voxels", + argstr="-nz", + xor=("nonZeroTessellation", "specialTessellation"), + ) tessellationThreshold = traits.Float( - desc= - 'To be used with specialTessellation. Set this value first, then set specialTessellation value.\nUsage: tessellate voxels greater_than, less_than, or equal_to ', - argstr='%f') + desc="To be used with specialTessellation. Set this value first, then set specialTessellation value.\nUsage: tessellate voxels greater_than, less_than, or equal_to ", + argstr="%f", + ) specialTessellation = traits.Enum( - 'greater_than', - 'less_than', - 'equal_to', - desc= - 'To avoid throwing a UserWarning, set tessellationThreshold first. Then set this attribute.\nUsage: tessellate voxels greater_than, less_than, or equal_to ', - argstr='%s', - xor=('nonZeroTessellation', 'specialTessellation'), - requires=['tessellationThreshold'], - position=-1) + "greater_than", + "less_than", + "equal_to", + desc="To avoid throwing a UserWarning, set tessellationThreshold first. Then set this attribute.\nUsage: tessellate voxels greater_than, less_than, or equal_to ", + argstr="%s", + xor=("nonZeroTessellation", "specialTessellation"), + requires=["tessellationThreshold"], + position=-1, + ) zeroPadFlag = traits.Bool( - desc='zero-pad volume (avoids clipping at edges)', argstr='-z') + desc="zero-pad volume (avoids clipping at edges)", argstr="-z" + ) noNormalsFlag = traits.Bool( - desc='do not compute vertex normals', argstr='--nonormals') + desc="do not compute vertex normals", argstr="--nonormals" + ) postSmoothFlag = traits.Bool( - desc='smooth vertices after coloring', argstr='--postsmooth') - verbosity = traits.Int(desc='verbosity (0 = quiet)', argstr='-v %d') - timer = traits.Bool(desc='timing function', argstr='--timer') + desc="smooth vertices after coloring", argstr="--postsmooth" + ) + verbosity = traits.Int(desc="verbosity (0 = quiet)", argstr="-v %d") + timer = traits.Bool(desc="timing function", argstr="--timer") class DfsOutputSpec(TraitedSpec): - outputSurfaceFile = File(desc='path/name of surface file') + outputSurfaceFile = File(desc="path/name of surface file") class Dfs(CommandLine): @@ -734,18 +757,21 @@ class Dfs(CommandLine): input_spec = DfsInputSpec output_spec = DfsOutputSpec - _cmd = 'dfs' + _cmd = "dfs" def _format_arg(self, name, spec, value): - if name == 'tessellationThreshold': - return '' # blank argstr - if name == 'specialTessellation': + if name == "tessellationThreshold": + return "" # blank argstr + if name == "specialTessellation": threshold = self.inputs.tessellationThreshold - return spec.argstr % { - "greater_than": ''.join(("-gt %f" % threshold)), - "less_than": ''.join(("-lt %f" % threshold)), - "equal_to": ''.join(("-eq %f" % threshold)) - }[value] + return ( + spec.argstr + % { + "greater_than": "".join(("-gt %f" % threshold)), + "less_than": "".join(("-lt %f" % threshold)), + "equal_to": "".join(("-eq %f" % threshold)), + }[value] + ) return super(Dfs, self)._format_arg(name, spec, value) def _gen_filename(self, name): @@ -753,9 +779,8 @@ def _gen_filename(self, name): if isdefined(inputs[name]): return os.path.abspath(inputs[name]) - if name == 'outputSurfaceFile': - return getFileName(self.inputs.inputVolumeFile, - '.inner.cortex.dfs') + if name == "outputSurfaceFile": + return getFileName(self.inputs.inputVolumeFile, ".inner.cortex.dfs") return None @@ -764,60 +789,57 @@ def _list_outputs(self): class PialmeshInputSpec(CommandLineInputSpec): - inputSurfaceFile = File(mandatory=True, desc='input file', argstr='-i %s') + inputSurfaceFile = File(mandatory=True, desc="input file", argstr="-i %s") outputSurfaceFile = File( - desc= - 'output file. If unspecified, output file name will be auto generated.', - argstr='-o %s', - genfile=True) - verbosity = traits.Int(desc='verbosity', argstr='-v %d') + desc="output file. If unspecified, output file name will be auto generated.", + argstr="-o %s", + genfile=True, + ) + verbosity = traits.Int(desc="verbosity", argstr="-v %d") inputTissueFractionFile = File( - mandatory=True, - desc='floating point (32) tissue fraction image', - argstr='-f %s') + mandatory=True, desc="floating point (32) tissue fraction image", argstr="-f %s" + ) numIterations = traits.Int( - 100, usedefault=True, desc='number of iterations', argstr='-n %d') + 100, usedefault=True, desc="number of iterations", argstr="-n %d" + ) searchRadius = traits.Float( - 1, usedefault=True, desc='search radius', argstr='-r %f') - stepSize = traits.Float( - 0.4, usedefault=True, desc='step size', argstr='-s %f') + 1, usedefault=True, desc="search radius", argstr="-r %f" + ) + stepSize = traits.Float(0.4, usedefault=True, desc="step size", argstr="-s %f") inputMaskFile = File( - mandatory=True, - desc='restrict growth to mask file region', - argstr='-m %s') + mandatory=True, desc="restrict growth to mask file region", argstr="-m %s" + ) maxThickness = traits.Float( - 20, - usedefault=True, - desc='maximum allowed tissue thickness', - argstr='--max %f') + 20, usedefault=True, desc="maximum allowed tissue thickness", argstr="--max %f" + ) tissueThreshold = traits.Float( - 1.05, usedefault=True, desc='tissue threshold', argstr='-t %f') + 1.05, usedefault=True, desc="tissue threshold", argstr="-t %f" + ) # output interval is not an output -- it specifies how frequently the # output surfaces are generated outputInterval = traits.Int( - 10, usedefault=True, desc='output interval', argstr='--interval %d') + 10, usedefault=True, desc="output interval", argstr="--interval %d" + ) exportPrefix = traits.Str( - desc='prefix for exporting surfaces if interval is set', - argstr='--prefix %s') + desc="prefix for exporting surfaces if interval is set", argstr="--prefix %s" + ) laplacianSmoothing = traits.Float( - 0.025, - usedefault=True, - desc='apply Laplacian smoothing', - argstr='--smooth %f') - timer = traits.Bool(desc='show timing', argstr='--timer') + 0.025, usedefault=True, desc="apply Laplacian smoothing", argstr="--smooth %f" + ) + timer = traits.Bool(desc="show timing", argstr="--timer") recomputeNormals = traits.Bool( - desc='recompute normals at each iteration', argstr='--norm') + desc="recompute normals at each iteration", argstr="--norm" + ) normalSmoother = traits.Float( - 0.2, - usedefault=True, - desc='strength of normal smoother.', - argstr='--nc %f') + 0.2, usedefault=True, desc="strength of normal smoother.", argstr="--nc %f" + ) tangentSmoother = traits.Float( - desc='strength of tangential smoother.', argstr='--tc %f') + desc="strength of tangential smoother.", argstr="--tc %f" + ) class PialmeshOutputSpec(TraitedSpec): - outputSurfaceFile = File(desc='path/name of surface file') + outputSurfaceFile = File(desc="path/name of surface file") class Pialmesh(CommandLine): @@ -842,16 +864,15 @@ class Pialmesh(CommandLine): input_spec = PialmeshInputSpec output_spec = PialmeshOutputSpec - _cmd = 'pialmesh' + _cmd = "pialmesh" def _gen_filename(self, name): inputs = self.inputs.get() if isdefined(inputs[name]): return os.path.abspath(inputs[name]) - if name == 'outputSurfaceFile': - return getFileName(self.inputs.inputSurfaceFile, - '.pial.cortex.dfs') + if name == "outputSurfaceFile": + return getFileName(self.inputs.inputSurfaceFile, ".pial.cortex.dfs") return None @@ -860,42 +881,43 @@ def _list_outputs(self): class HemisplitInputSpec(CommandLineInputSpec): - inputSurfaceFile = File( - mandatory=True, desc='input surface', argstr='-i %s') + inputSurfaceFile = File(mandatory=True, desc="input surface", argstr="-i %s") inputHemisphereLabelFile = File( - mandatory=True, desc='input hemisphere label volume', argstr='-l %s') + mandatory=True, desc="input hemisphere label volume", argstr="-l %s" + ) outputLeftHemisphere = File( - desc= - 'output surface file, left hemisphere. If unspecified, output file name will be auto generated.', - argstr='--left %s', - genfile=True) + desc="output surface file, left hemisphere. If unspecified, output file name will be auto generated.", + argstr="--left %s", + genfile=True, + ) outputRightHemisphere = File( - desc= - 'output surface file, right hemisphere. If unspecified, output file name will be auto generated.', - argstr='--right %s', - genfile=True) + desc="output surface file, right hemisphere. If unspecified, output file name will be auto generated.", + argstr="--right %s", + genfile=True, + ) pialSurfaceFile = File( - desc='pial surface file -- must have same geometry as input surface', - argstr='-p %s') + desc="pial surface file -- must have same geometry as input surface", + argstr="-p %s", + ) outputLeftPialHemisphere = File( - desc= - 'output pial surface file, left hemisphere. If unspecified, output file name will be auto generated.', - argstr='-pl %s', - genfile=True) + desc="output pial surface file, left hemisphere. If unspecified, output file name will be auto generated.", + argstr="-pl %s", + genfile=True, + ) outputRightPialHemisphere = File( - desc= - 'output pial surface file, right hemisphere. If unspecified, output file name will be auto generated.', - argstr='-pr %s', - genfile=True) - verbosity = traits.Int(desc='verbosity (0 = silent)', argstr='-v %d') - timer = traits.Bool(desc='timing function', argstr='--timer') + desc="output pial surface file, right hemisphere. If unspecified, output file name will be auto generated.", + argstr="-pr %s", + genfile=True, + ) + verbosity = traits.Int(desc="verbosity (0 = silent)", argstr="-v %d") + timer = traits.Bool(desc="timing function", argstr="--timer") class HemisplitOutputSpec(TraitedSpec): - outputLeftHemisphere = File(desc='path/name of left hemisphere') - outputRightHemisphere = File(desc='path/name of right hemisphere') - outputLeftPialHemisphere = File(desc='path/name of left pial hemisphere') - outputRightPialHemisphere = File(desc='path/name of right pial hemisphere') + outputLeftHemisphere = File(desc="path/name of left hemisphere") + outputRightHemisphere = File(desc="path/name of right hemisphere") + outputLeftPialHemisphere = File(desc="path/name of left pial hemisphere") + outputRightPialHemisphere = File(desc="path/name of right pial hemisphere") class Hemisplit(CommandLine): @@ -920,7 +942,7 @@ class Hemisplit(CommandLine): input_spec = HemisplitInputSpec output_spec = HemisplitOutputSpec - _cmd = 'hemisplit' + _cmd = "hemisplit" def _gen_filename(self, name): inputs = self.inputs.get() @@ -928,14 +950,13 @@ def _gen_filename(self, name): return os.path.abspath(inputs[name]) fileToSuffixMap = { - 'outputLeftHemisphere': '.left.inner.cortex.dfs', - 'outputLeftPialHemisphere': '.left.pial.cortex.dfs', - 'outputRightHemisphere': '.right.inner.cortex.dfs', - 'outputRightPialHemisphere': '.right.pial.cortex.dfs' + "outputLeftHemisphere": ".left.inner.cortex.dfs", + "outputLeftPialHemisphere": ".left.pial.cortex.dfs", + "outputRightHemisphere": ".right.inner.cortex.dfs", + "outputRightPialHemisphere": ".right.pial.cortex.dfs", } if name in fileToSuffixMap: - return getFileName(self.inputs.inputSurfaceFile, - fileToSuffixMap[name]) + return getFileName(self.inputs.inputSurfaceFile, fileToSuffixMap[name]) return None @@ -944,42 +965,48 @@ def _list_outputs(self): class SkullfinderInputSpec(CommandLineInputSpec): - inputMRIFile = File(mandatory=True, desc='input file', argstr='-i %s') + inputMRIFile = File(mandatory=True, desc="input file", argstr="-i %s") inputMaskFile = File( mandatory=True, - desc='A brain mask file, 8-bit image (0=non-brain, 255=brain)', - argstr='-m %s') + desc="A brain mask file, 8-bit image (0=non-brain, 255=brain)", + argstr="-m %s", + ) outputLabelFile = File( - desc= - 'output multi-colored label volume segmenting brain, scalp, inner skull & outer skull ' - 'If unspecified, output file name will be auto generated.', - argstr='-o %s', - genfile=True) - verbosity = traits.Int(desc='verbosity', argstr='-v %d') - lowerThreshold = traits.Int( - desc='Lower threshold for segmentation', argstr='-l %d') - upperThreshold = traits.Int( - desc='Upper threshold for segmentation', argstr='-u %d') + desc="output multi-colored label volume segmenting brain, scalp, inner skull & outer skull " + "If unspecified, output file name will be auto generated.", + argstr="-o %s", + genfile=True, + ) + verbosity = traits.Int(desc="verbosity", argstr="-v %d") + lowerThreshold = traits.Int(desc="Lower threshold for segmentation", argstr="-l %d") + upperThreshold = traits.Int(desc="Upper threshold for segmentation", argstr="-u %d") surfaceFilePrefix = traits.Str( - desc='if specified, generate surface files for brain, skull, and scalp', - argstr='-s %s') + desc="if specified, generate surface files for brain, skull, and scalp", + argstr="-s %s", + ) bgLabelValue = traits.Int( - desc='background label value (0-255)', argstr='--bglabel %d') + desc="background label value (0-255)", argstr="--bglabel %d" + ) scalpLabelValue = traits.Int( - desc='scalp label value (0-255)', argstr='--scalplabel %d') + desc="scalp label value (0-255)", argstr="--scalplabel %d" + ) skullLabelValue = traits.Int( - desc='skull label value (0-255)', argstr='--skulllabel %d') + desc="skull label value (0-255)", argstr="--skulllabel %d" + ) spaceLabelValue = traits.Int( - desc='space label value (0-255)', argstr='--spacelabel %d') + desc="space label value (0-255)", argstr="--spacelabel %d" + ) brainLabelValue = traits.Int( - desc='brain label value (0-255)', argstr='--brainlabel %d') + desc="brain label value (0-255)", argstr="--brainlabel %d" + ) performFinalOpening = traits.Bool( - desc='perform a final opening operation on the scalp mask', - argstr='--finalOpening') + desc="perform a final opening operation on the scalp mask", + argstr="--finalOpening", + ) class SkullfinderOutputSpec(TraitedSpec): - outputLabelFile = File(desc='path/name of label file') + outputLabelFile = File(desc="path/name of label file") class Skullfinder(CommandLine): @@ -997,18 +1024,18 @@ class Skullfinder(CommandLine): >>> results = skullfinder.run() #doctest: +SKIP """ + input_spec = SkullfinderInputSpec output_spec = SkullfinderOutputSpec - _cmd = 'skullfinder' + _cmd = "skullfinder" def _gen_filename(self, name): inputs = self.inputs.get() if isdefined(inputs[name]): return os.path.abspath(inputs[name]) - if name == 'outputLabelFile': - return getFileName(self.inputs.inputMRIFile, - '.skullfinder.label.nii.gz') + if name == "outputLabelFile": + return getFileName(self.inputs.inputMRIFile, ".skullfinder.label.nii.gz") return None @@ -1018,116 +1045,117 @@ def _list_outputs(self): class SVRegInputSpec(CommandLineInputSpec): subjectFilePrefix = traits.Str( - argstr='\'%s\'', + argstr="'%s'", mandatory=True, position=0, - desc= - 'Absolute path and filename prefix of the subjects output from BrainSuite ' - 'Cortical Surface Extraction Sequence') + desc="Absolute path and filename prefix of the subjects output from BrainSuite " + "Cortical Surface Extraction Sequence", + ) dataSinkDelay = traits.List( traits.Str, - argstr='%s', - desc= - 'Connect datasink out_file to dataSinkDelay to delay execution of SVReg ' - 'until dataSink has finished sinking CSE outputs.' - 'For use with parallel processing workflows including Brainsuites Cortical ' - 'Surface Extraction sequence (SVReg requires certain files from Brainsuite ' - 'CSE, which must all be in the pathway specified by subjectFilePrefix. see ' - 'http://brainsuite.org/processing/svreg/usage/ for list of required inputs ' + argstr="%s", + desc="Connect datasink out_file to dataSinkDelay to delay execution of SVReg " + "until dataSink has finished sinking CSE outputs." + "For use with parallel processing workflows including Brainsuites Cortical " + "Surface Extraction sequence (SVReg requires certain files from Brainsuite " + "CSE, which must all be in the pathway specified by subjectFilePrefix. see " + "http://brainsuite.org/processing/svreg/usage/ for list of required inputs ", ) atlasFilePrefix = traits.Str( position=1, - argstr='\'%s\'', - desc= - 'Optional: Absolute Path and filename prefix of atlas files and labels to which ' - 'the subject will be registered. If unspecified, SVReg' - 'will use its own included atlas files') + argstr="'%s'", + desc="Optional: Absolute Path and filename prefix of atlas files and labels to which " + "the subject will be registered. If unspecified, SVReg" + "will use its own included atlas files", + ) iterations = traits.Int( - argstr='\'-H %d\'', - desc='Assigns a number of iterations in the intensity registration step.' - 'if unspecified, performs 100 iterations') + argstr="'-H %d'", + desc="Assigns a number of iterations in the intensity registration step." + "if unspecified, performs 100 iterations", + ) refineOutputs = traits.Bool( - argstr='\'-r\'', - desc='Refine outputs at the expense of more processing time.') + argstr="'-r'", desc="Refine outputs at the expense of more processing time." + ) skipToVolumeReg = traits.Bool( - argstr='\'-s\'', - desc= - 'If surface registration was already performed at an earlier time and the ' - 'user would not like to redo this step, then this flag may be used to skip ' - 'ahead to the volumetric registration. Necessary input files will need to ' - 'be present in the input directory called by the command.') + argstr="'-s'", + desc="If surface registration was already performed at an earlier time and the " + "user would not like to redo this step, then this flag may be used to skip " + "ahead to the volumetric registration. Necessary input files will need to " + "be present in the input directory called by the command.", + ) skipToIntensityReg = traits.Bool( - argstr='\'-p\'', - desc= - 'If the p-harmonic volumetric registration was already performed at an ' - 'earlier time and the user would not like to redo this step, then this ' - 'flag may be used to skip ahead to the intensity registration and ' - 'label transfer step.') + argstr="'-p'", + desc="If the p-harmonic volumetric registration was already performed at an " + "earlier time and the user would not like to redo this step, then this " + "flag may be used to skip ahead to the intensity registration and " + "label transfer step.", + ) useManualMaskFile = traits.Bool( - argstr='\'-cbm\'', - desc= - 'Can call a manually edited cerebrum mask to limit boundaries. Will ' - 'use file: subbasename.cerebrum.mask.nii.gz Make sure to correctly ' - 'replace your manually edited mask file in your input folder with the ' - 'correct subbasename.') + argstr="'-cbm'", + desc="Can call a manually edited cerebrum mask to limit boundaries. Will " + "use file: subbasename.cerebrum.mask.nii.gz Make sure to correctly " + "replace your manually edited mask file in your input folder with the " + "correct subbasename.", + ) curveMatchingInstructions = traits.Str( - argstr='\'-cur %s\'', - desc= - 'Used to take control of the curve matching process between the atlas ' - 'and subject. One can specify the name of the .dfc file and ' - 'the sulcal numbers <#sul> to be used as constraints. ' - 'example: curveMatchingInstructions = "subbasename.right.dfc 1 2 20"') + argstr="'-cur %s'", + desc="Used to take control of the curve matching process between the atlas " + "and subject. One can specify the name of the .dfc file and " + "the sulcal numbers <#sul> to be used as constraints. " + 'example: curveMatchingInstructions = "subbasename.right.dfc 1 2 20"', + ) useCerebrumMask = traits.Bool( - argstr='\'-C\'', - desc= - 'The cerebrum mask will be used for ' - 'masking the final labels instead of the default pial surface mask. ' - 'Every voxel will be labeled within the cerebrum mask regardless of ' - 'the boundaries of the pial surface.') + argstr="'-C'", + desc="The cerebrum mask will be used for " + "masking the final labels instead of the default pial surface mask. " + "Every voxel will be labeled within the cerebrum mask regardless of " + "the boundaries of the pial surface.", + ) pialSurfaceMaskDilation = traits.Int( - argstr='\'-D %d\'', - desc= - 'Cortical volume labels found in file output subbasename.svreg.label.nii.gz ' - 'find its boundaries by using the pial surface then dilating by 1 voxel. ' - 'Use this flag in order to control the number of pial surface mask dilation. ' - '(ie. -D 0 will assign no voxel dilation)') + argstr="'-D %d'", + desc="Cortical volume labels found in file output subbasename.svreg.label.nii.gz " + "find its boundaries by using the pial surface then dilating by 1 voxel. " + "Use this flag in order to control the number of pial surface mask dilation. " + "(ie. -D 0 will assign no voxel dilation)", + ) keepIntermediates = traits.Bool( - argstr='\'-k\'', - desc='Keep the intermediate files after the svreg sequence is complete.' + argstr="'-k'", + desc="Keep the intermediate files after the svreg sequence is complete.", ) - _XOR_verbosity = ('verbosity0', 'verbosity1', 'verbosity2') + _XOR_verbosity = ("verbosity0", "verbosity1", "verbosity2") verbosity0 = traits.Bool( - argstr='\'-v0\'', - xor=_XOR_verbosity, - desc='no messages will be reported') + argstr="'-v0'", xor=_XOR_verbosity, desc="no messages will be reported" + ) verbosity1 = traits.Bool( - argstr='\'-v1\'', + argstr="'-v1'", xor=_XOR_verbosity, - desc= - 'messages will be reported but not the iteration-wise detailed messages' + desc="messages will be reported but not the iteration-wise detailed messages", ) verbosity2 = traits.Bool( - argstr='\'v2\'', + argstr="'v2'", xor=_XOR_verbosity, - desc='all the messages, including per-iteration, will be displayed') + desc="all the messages, including per-iteration, will be displayed", + ) shortMessages = traits.Bool( - argstr='\'-gui\'', desc='Short messages instead of detailed messages') + argstr="'-gui'", desc="Short messages instead of detailed messages" + ) displayModuleName = traits.Bool( - argstr='\'-m\'', desc='Module name will be displayed in the messages') + argstr="'-m'", desc="Module name will be displayed in the messages" + ) displayTimestamps = traits.Bool( - argstr='\'-t\'', desc='Timestamps will be displayed in the messages') + argstr="'-t'", desc="Timestamps will be displayed in the messages" + ) skipVolumetricProcessing = traits.Bool( - argstr='\'-S\'', - desc= - 'Only surface registration and labeling will be performed. Volumetric ' - 'processing will be skipped.') + argstr="'-S'", + desc="Only surface registration and labeling will be performed. Volumetric " + "processing will be skipped.", + ) useMultiThreading = traits.Bool( - argstr='\'-P\'', - desc= - 'If multiple CPUs are present on the system, the code will try to use ' - 'multithreading to make the execution fast.') - useSingleThreading = traits.Bool( - argstr='\'-U\'', desc='Use single threaded mode.') + argstr="'-P'", + desc="If multiple CPUs are present on the system, the code will try to use " + "multithreading to make the execution fast.", + ) + useSingleThreading = traits.Bool(argstr="'-U'", desc="Use single threaded mode.") class SVReg(CommandLine): @@ -1157,64 +1185,66 @@ class SVReg(CommandLine): """ input_spec = SVRegInputSpec - _cmd = 'svreg.sh' + _cmd = "svreg.sh" def _format_arg(self, name, spec, value): - if name == 'subjectFilePrefix' or name == 'atlasFilePrefix' or name == 'curveMatchingInstructions': + if ( + name == "subjectFilePrefix" + or name == "atlasFilePrefix" + or name == "curveMatchingInstructions" + ): return spec.argstr % os.path.expanduser(value) - if name == 'dataSinkDelay': - return spec.argstr % '' + if name == "dataSinkDelay": + return spec.argstr % "" return super(SVReg, self)._format_arg(name, spec, value) class BDPInputSpec(CommandLineInputSpec): bfcFile = File( - argstr='%s', + argstr="%s", mandatory=True, position=0, - xor=['noStructuralRegistration'], - desc= - 'Specify absolute path to file produced by bfc. By default, bfc produces the file in ' - 'the format: prefix.bfc.nii.gz') + xor=["noStructuralRegistration"], + desc="Specify absolute path to file produced by bfc. By default, bfc produces the file in " + "the format: prefix.bfc.nii.gz", + ) noStructuralRegistration = traits.Bool( - argstr='--no-structural-registration', + argstr="--no-structural-registration", mandatory=True, position=0, - xor=['bfcFile'], - desc= - 'Allows BDP to work without any structural input. This can useful when ' - 'one is only interested in diffusion modelling part of BDP. With this ' - 'flag only fieldmap-based distortion correction is supported. ' - 'outPrefix can be used to specify fileprefix of the output ' - 'filenames. Change dwiMask to define region of interest ' - 'for diffusion modelling.') + xor=["bfcFile"], + desc="Allows BDP to work without any structural input. This can useful when " + "one is only interested in diffusion modelling part of BDP. With this " + "flag only fieldmap-based distortion correction is supported. " + "outPrefix can be used to specify fileprefix of the output " + "filenames. Change dwiMask to define region of interest " + "for diffusion modelling.", + ) inputDiffusionData = File( - argstr='--nii %s', + argstr="--nii %s", mandatory=True, position=-2, - desc= - 'Specifies the absolute path and filename of the input diffusion data in 4D NIfTI-1 ' - 'format. The flag must be followed by the filename. Only NIfTI-1 files ' - 'with extension .nii or .nii.gz are supported. Furthermore, either ' - 'bMatrixFile, or a combination of both bValueFile and diffusionGradientFile ' - 'must be used to provide the necessary b-matrices/b-values and gradient vectors. ' + desc="Specifies the absolute path and filename of the input diffusion data in 4D NIfTI-1 " + "format. The flag must be followed by the filename. Only NIfTI-1 files " + "with extension .nii or .nii.gz are supported. Furthermore, either " + "bMatrixFile, or a combination of both bValueFile and diffusionGradientFile " + "must be used to provide the necessary b-matrices/b-values and gradient vectors. ", ) bMatrixFile = File( - argstr='--bmat %s', + argstr="--bmat %s", mandatory=True, - xor=['BVecBValPair'], + xor=["BVecBValPair"], position=-1, - desc= - 'Specifies the absolute path and filename of the file containing b-matrices for ' - 'diffusion-weighted scans. The flag must be followed by the filename. ' - 'This file must be a plain text file containing 3x3 matrices for each ' - 'diffusion encoding direction. It should contain zero matrices ' + desc="Specifies the absolute path and filename of the file containing b-matrices for " + "diffusion-weighted scans. The flag must be followed by the filename. " + "This file must be a plain text file containing 3x3 matrices for each " + "diffusion encoding direction. It should contain zero matrices " 'corresponding to b=0 images. This file usually has ".bmat" as its ' - 'extension, and can be used to provide BDP with the more-accurate ' - 'b-matrices as saved by some proprietary scanners. The b-matrices ' - 'specified by the file must be in the voxel coordinates of the input ' - 'diffusion weighted image (NIfTI file). In case b-matrices are not known/calculated, ' - 'bvec and .bval files can be used instead (see diffusionGradientFile and bValueFile). ' + "extension, and can be used to provide BDP with the more-accurate " + "b-matrices as saved by some proprietary scanners. The b-matrices " + "specified by the file must be in the voxel coordinates of the input " + "diffusion weighted image (NIfTI file). In case b-matrices are not known/calculated, " + "bvec and .bval files can be used instead (see diffusionGradientFile and bValueFile). ", ) BVecBValPair = traits.List( traits.Str, @@ -1222,452 +1252,465 @@ class BDPInputSpec(CommandLineInputSpec): maxlen=2, mandatory=True, position=-1, - xor=['bMatrixFile'], - argstr='--bvec %s --bval %s', - desc= - 'Must input a list containing first the BVector file, then the BValue file (both must be absolute paths)\n' - 'Example: bdp.inputs.BVecBValPair = [\'/directory/subdir/prefix.dwi.bvec\', \'/directory/subdir/prefix.dwi.bval\'] ' - 'The first item in the list specifies the filename of the file containing b-values for the ' - 'diffusion scan. The b-value file must be a plain-text file and usually has an ' - 'extension of .bval\n' - 'The second item in the list specifies the filename of the file containing the diffusion gradient ' - 'directions (specified in the voxel coordinates of the input ' - 'diffusion-weighted image)The b-vectors file must be a plain text file and ' - 'usually has an extension of .bvec ') + xor=["bMatrixFile"], + argstr="--bvec %s --bval %s", + desc="Must input a list containing first the BVector file, then the BValue file (both must be absolute paths)\n" + "Example: bdp.inputs.BVecBValPair = ['/directory/subdir/prefix.dwi.bvec', '/directory/subdir/prefix.dwi.bval'] " + "The first item in the list specifies the filename of the file containing b-values for the " + "diffusion scan. The b-value file must be a plain-text file and usually has an " + "extension of .bval\n" + "The second item in the list specifies the filename of the file containing the diffusion gradient " + "directions (specified in the voxel coordinates of the input " + "diffusion-weighted image)The b-vectors file must be a plain text file and " + "usually has an extension of .bvec ", + ) dataSinkDelay = traits.List( traits.Str, - argstr='%s', - desc= - 'For use in parallel processing workflows including Brainsuite Cortical ' - 'Surface Extraction sequence. Connect datasink out_file to dataSinkDelay ' - 'to delay execution of BDP until dataSink has finished sinking outputs. ' - 'In particular, BDP may be run after BFC has finished. For more information ' - 'see http://brainsuite.org/processing/diffusion/pipeline/') + argstr="%s", + desc="For use in parallel processing workflows including Brainsuite Cortical " + "Surface Extraction sequence. Connect datasink out_file to dataSinkDelay " + "to delay execution of BDP until dataSink has finished sinking outputs. " + "In particular, BDP may be run after BFC has finished. For more information " + "see http://brainsuite.org/processing/diffusion/pipeline/", + ) phaseEncodingDirection = traits.Enum( - 'x', - 'x-', - 'y', - 'y-', - 'z', - 'z-', - argstr='--dir=%s', - desc= - 'Specifies the phase-encoding direction of the EPI (diffusion) images. ' - 'It is same as the dominant direction of distortion in the images. This ' - 'information is used to constrain the distortion correction along the ' - 'specified direction. Directions are represented by any one of x, x-, y, ' + "x", + "x-", + "y", + "y-", + "z", + "z-", + argstr="--dir=%s", + desc="Specifies the phase-encoding direction of the EPI (diffusion) images. " + "It is same as the dominant direction of distortion in the images. This " + "information is used to constrain the distortion correction along the " + "specified direction. Directions are represented by any one of x, x-, y, " 'y-, z or z-. "x" direction increases towards the right side of the ' 'subject, while "x-" increases towards the left side of the subject. ' 'Similarly, "y" and "y-" are along the anterior-posterior direction of ' 'the subject, and "z" & "z-" are along the inferior-superior direction. ' 'When this flag is not used, BDP uses "y" as the default phase-encoding ' - 'direction. ') + "direction. ", + ) echoSpacing = traits.Float( - argstr='--echo-spacing=%f', - desc= - 'Sets the echo spacing to t seconds, which is used for fieldmap-based ' - 'distortion correction. This flag is required when using fieldmapCorrection' + argstr="--echo-spacing=%f", + desc="Sets the echo spacing to t seconds, which is used for fieldmap-based " + "distortion correction. This flag is required when using fieldmapCorrection", ) bValRatioThreshold = traits.Float( - argstr='--bval-ratio-threshold %f', - desc= - 'Sets a threshold which is used to determine b=0 images. When there are ' - 'no diffusion weighted image with b-value of zero, then BDP tries to use ' - 'diffusion weighted images with a low b-value in place of b=0 image. The ' - 'diffusion images with minimum b-value is used as b=0 image only if the ' - 'ratio of the maximum and minimum b-value is more than the specified ' - 'threshold. A lower value of threshold will allow diffusion images with ' - 'higher b-value to be used as b=0 image. The default value of this ' - 'threshold is set to 45, if this trait is not set. ') + argstr="--bval-ratio-threshold %f", + desc="Sets a threshold which is used to determine b=0 images. When there are " + "no diffusion weighted image with b-value of zero, then BDP tries to use " + "diffusion weighted images with a low b-value in place of b=0 image. The " + "diffusion images with minimum b-value is used as b=0 image only if the " + "ratio of the maximum and minimum b-value is more than the specified " + "threshold. A lower value of threshold will allow diffusion images with " + "higher b-value to be used as b=0 image. The default value of this " + "threshold is set to 45, if this trait is not set. ", + ) estimateTensors = traits.Bool( - argstr='--tensors', - desc= - 'Estimates diffusion tensors using a weighted log-linear estimation and ' - 'saves derived diffusion tensor parameters (FA, MD, axial, radial, L2, ' - 'L3). This is the default behavior if no diffusion modeling flags are ' - 'specified. The estimated diffusion tensors can be visualized by loading ' - 'the saved *.eig.nii.gz file in BrainSuite. BDP reports diffusivity (MD, ' - 'axial, radial, L2 and L3) in a unit which is reciprocal inverse of the ' - 'unit of input b-value. ') + argstr="--tensors", + desc="Estimates diffusion tensors using a weighted log-linear estimation and " + "saves derived diffusion tensor parameters (FA, MD, axial, radial, L2, " + "L3). This is the default behavior if no diffusion modeling flags are " + "specified. The estimated diffusion tensors can be visualized by loading " + "the saved *.eig.nii.gz file in BrainSuite. BDP reports diffusivity (MD, " + "axial, radial, L2 and L3) in a unit which is reciprocal inverse of the " + "unit of input b-value. ", + ) estimateODF_FRACT = traits.Bool( - argstr='--FRACT', - desc= - 'Estimates ODFs using the Funk-Radon and Cosine Transformation (FRACT). ' + argstr="--FRACT", + desc="Estimates ODFs using the Funk-Radon and Cosine Transformation (FRACT). " 'The outputs are saved in a separate directory with name "FRACT" and the ' - 'ODFs can be visualized by loading the saved ".odf" file in BrainSuite. ' + 'ODFs can be visualized by loading the saved ".odf" file in BrainSuite. ', ) estimateODF_FRT = traits.Bool( - argstr='--FRT', - desc= - 'Estimates ODFs using Funk-Radon Transformation (FRT). The coefficient ' + argstr="--FRT", + desc="Estimates ODFs using Funk-Radon Transformation (FRT). The coefficient " 'maps for ODFs are saved in a separate directory with name "FRT" and the ' 'ODFs can be visualized by loading the saved ".odf" file in BrainSuite. ' - 'The derived generalized-FA (GFA) maps are also saved in the output ' - 'directory. ') + "The derived generalized-FA (GFA) maps are also saved in the output " + "directory. ", + ) estimateODF_3DShore = traits.Float( - argstr='--3dshore --diffusion_time_ms %f', - desc='Estimates ODFs using 3Dshore. Pass in diffusion time, in ms') + argstr="--3dshore --diffusion_time_ms %f", + desc="Estimates ODFs using 3Dshore. Pass in diffusion time, in ms", + ) odfLambta = traits.Bool( - argstr='--odf-lambda ', - desc= - 'Sets the regularization parameter, lambda, of the Laplace-Beltrami ' - 'operator while estimating ODFs. The default value is set to 0.006 . This ' - 'can be used to set the appropriate regularization for the input ' - 'diffusion data. ') + argstr="--odf-lambda ", + desc="Sets the regularization parameter, lambda, of the Laplace-Beltrami " + "operator while estimating ODFs. The default value is set to 0.006 . This " + "can be used to set the appropriate regularization for the input " + "diffusion data. ", + ) t1Mask = File( - argstr='--t1-mask %s', - desc= - 'Specifies the filename of the brain-mask file for input T1-weighted ' - 'image. This mask can be same as the brain mask generated during ' - 'BrainSuite extraction sequence. For best results, the mask should not ' - 'include any extra-meningial tissues from T1-weighted image. The mask ' - 'must be in the same coordinates as input T1-weighted image (i.e. should ' - 'overlay correctly with input .bfc.nii.gz file in ' - 'BrainSuite). This mask is used for co-registration and defining brain ' - 'boundary for statistics computation. The mask can be generated and/or ' - 'edited in BrainSuite. In case outputDiffusionCoordinates is also ' - 'used, this mask is first transformed to diffusion coordinate and the ' - 'transformed mask is used for defining brain boundary in diffusion ' - 'coordinates. When t1Mask is not set, BDP will try to use ' - 'fileprefix>.mask.nii.gz as brain-mask. If .mask.nii.gz is ' - 'not found, then BDP will use the input .bfc.nii.gz itself as ' - 'mask (i.e. all non-zero voxels in .bfc.nii.gz is assumed to ' - 'constitute brain mask). ') + argstr="--t1-mask %s", + desc="Specifies the filename of the brain-mask file for input T1-weighted " + "image. This mask can be same as the brain mask generated during " + "BrainSuite extraction sequence. For best results, the mask should not " + "include any extra-meningial tissues from T1-weighted image. The mask " + "must be in the same coordinates as input T1-weighted image (i.e. should " + "overlay correctly with input .bfc.nii.gz file in " + "BrainSuite). This mask is used for co-registration and defining brain " + "boundary for statistics computation. The mask can be generated and/or " + "edited in BrainSuite. In case outputDiffusionCoordinates is also " + "used, this mask is first transformed to diffusion coordinate and the " + "transformed mask is used for defining brain boundary in diffusion " + "coordinates. When t1Mask is not set, BDP will try to use " + "fileprefix>.mask.nii.gz as brain-mask. If .mask.nii.gz is " + "not found, then BDP will use the input .bfc.nii.gz itself as " + "mask (i.e. all non-zero voxels in .bfc.nii.gz is assumed to " + "constitute brain mask). ", + ) dwiMask = File( - argstr='--dwi-mask %s', - desc= - 'Specifies the filename of the brain-mask file for diffusion data. This ' - 'mask is used only for co-registration purposes and can affect overall ' - 'quality of co-registration (see t1Mask for definition of brain mask ' - 'for statistics computation). The mask must be a 3D volume and should be ' - 'in the same coordinates as input Diffusion file/data (i.e. should ' - 'overlay correctly with input diffusion data in BrainSuite). For best ' - 'results, the mask should include only brain voxels (CSF voxels around ' - 'brain is also acceptable). When this flag is not used, BDP will generate ' - 'a pseudo mask using first b=0 image volume and would save it as ' - 'fileprefix>.dwi.RSA.mask.nii.gz. In case co-registration is not ' - 'accurate with automatically generated pseudo mask, BDP should be re-run ' - 'with a refined diffusion mask. The mask can be generated and/or edited ' - 'in BrainSuite. ') + argstr="--dwi-mask %s", + desc="Specifies the filename of the brain-mask file for diffusion data. This " + "mask is used only for co-registration purposes and can affect overall " + "quality of co-registration (see t1Mask for definition of brain mask " + "for statistics computation). The mask must be a 3D volume and should be " + "in the same coordinates as input Diffusion file/data (i.e. should " + "overlay correctly with input diffusion data in BrainSuite). For best " + "results, the mask should include only brain voxels (CSF voxels around " + "brain is also acceptable). When this flag is not used, BDP will generate " + "a pseudo mask using first b=0 image volume and would save it as " + "fileprefix>.dwi.RSA.mask.nii.gz. In case co-registration is not " + "accurate with automatically generated pseudo mask, BDP should be re-run " + "with a refined diffusion mask. The mask can be generated and/or edited " + "in BrainSuite. ", + ) rigidRegMeasure = traits.Enum( - 'MI', - 'INVERSION', - 'BDP', - argstr='--rigid-reg-measure %s', - desc='Defines the similarity measure to be used for rigid registration. ' + "MI", + "INVERSION", + "BDP", + argstr="--rigid-reg-measure %s", + desc="Defines the similarity measure to be used for rigid registration. " 'Possible measures are "MI", "INVERSION" and "BDP". MI measure uses ' - 'normalized mutual information based cost function. INVERSION measure ' - 'uses simpler cost function based on sum of squared difference by ' - 'exploiting the approximate inverse-contrast relationship in T1- and ' - 'T2-weighted images. BDP measure combines MI and INVERSION. It starts ' - 'with INVERSION measure and refines the result with MI measure. BDP is ' - 'the default measure when this trait is not set. ') + "normalized mutual information based cost function. INVERSION measure " + "uses simpler cost function based on sum of squared difference by " + "exploiting the approximate inverse-contrast relationship in T1- and " + "T2-weighted images. BDP measure combines MI and INVERSION. It starts " + "with INVERSION measure and refines the result with MI measure. BDP is " + "the default measure when this trait is not set. ", + ) dcorrRegMeasure = traits.Enum( - 'MI', - 'INVERSION-EPI', - 'INVERSION-T1', - 'INVERSION-BOTH', - 'BDP', - argstr='--dcorr-reg-method %s', - desc='Defines the method for registration-based distortion correction. ' + "MI", + "INVERSION-EPI", + "INVERSION-T1", + "INVERSION-BOTH", + "BDP", + argstr="--dcorr-reg-method %s", + desc="Defines the method for registration-based distortion correction. " 'Possible methods are "MI", "INVERSION-EPI", "INVERSION-T1", ' 'INVERSION-BOTH", and "BDP". MI method uses normalized mutual ' - 'information based cost-function while estimating the distortion field. ' - 'INVERSION-based method uses simpler cost function based on sum of ' - 'squared difference by exploiting the known approximate contrast ' - 'relationship in T1- and T2-weighted images. T2-weighted EPI is inverted ' - 'when INVERSION-EPI is used; T1-image is inverted when INVERSION-T1 is ' - 'used; and both are inverted when INVERSION-BOTH is used. BDP method add ' - 'the MI-based refinement after the correction using INVERSION-BOTH ' - 'method. BDP is the default method when this trait is not set. ') + "information based cost-function while estimating the distortion field. " + "INVERSION-based method uses simpler cost function based on sum of " + "squared difference by exploiting the known approximate contrast " + "relationship in T1- and T2-weighted images. T2-weighted EPI is inverted " + "when INVERSION-EPI is used; T1-image is inverted when INVERSION-T1 is " + "used; and both are inverted when INVERSION-BOTH is used. BDP method add " + "the MI-based refinement after the correction using INVERSION-BOTH " + "method. BDP is the default method when this trait is not set. ", + ) dcorrWeight = traits.Float( - argstr='--dcorr-regularization-wt %f', - desc= - 'Sets the (scalar) weighting parameter for regularization penalty in ' - 'registration-based distortion correction. Set this trait to a single, non-negative ' - 'number which specifies the weight. A large regularization weight encourages ' - 'smoother distortion field at the cost of low measure of image similarity ' - 'after distortion correction. On the other hand, a smaller regularization ' - 'weight can result into higher measure of image similarity but with ' - 'unrealistic and unsmooth distortion field. A weight of 0.5 would reduce ' - 'the penalty to half of the default regularization penalty (By default, this weight ' - 'is set to 1.0). Similarly, a weight of 2.0 ' - 'would increase the penalty to twice of the default penalty. ') + argstr="--dcorr-regularization-wt %f", + desc="Sets the (scalar) weighting parameter for regularization penalty in " + "registration-based distortion correction. Set this trait to a single, non-negative " + "number which specifies the weight. A large regularization weight encourages " + "smoother distortion field at the cost of low measure of image similarity " + "after distortion correction. On the other hand, a smaller regularization " + "weight can result into higher measure of image similarity but with " + "unrealistic and unsmooth distortion field. A weight of 0.5 would reduce " + "the penalty to half of the default regularization penalty (By default, this weight " + "is set to 1.0). Similarly, a weight of 2.0 " + "would increase the penalty to twice of the default penalty. ", + ) skipDistortionCorr = traits.Bool( - argstr='--no-distortion-correction', - desc='Skips distortion correction completely and performs only a rigid ' - 'registration of diffusion and T1-weighted image. This can be useful when ' - 'the input diffusion images do not have any distortion or they have been ' - 'corrected for distortion. ') + argstr="--no-distortion-correction", + desc="Skips distortion correction completely and performs only a rigid " + "registration of diffusion and T1-weighted image. This can be useful when " + "the input diffusion images do not have any distortion or they have been " + "corrected for distortion. ", + ) skipNonuniformityCorr = traits.Bool( - argstr='--no-nonuniformity-correction', - desc='Skips intensity non-uniformity correction in b=0 image for ' - 'registration-based distortion correction. The intensity non-uniformity ' - 'correction does not affect any diffusion modeling. ') + argstr="--no-nonuniformity-correction", + desc="Skips intensity non-uniformity correction in b=0 image for " + "registration-based distortion correction. The intensity non-uniformity " + "correction does not affect any diffusion modeling. ", + ) skipIntensityCorr = traits.Bool( - argstr='--no-intensity-correction', - xor=['fieldmapCorrectionMethod'], - desc= - 'Disables intensity correction when performing distortion correction. ' - 'Intensity correction can change the noise distribution in the corrected ' - 'image, but it does not affect estimated diffusion parameters like FA, ' - 'etc. ') + argstr="--no-intensity-correction", + xor=["fieldmapCorrectionMethod"], + desc="Disables intensity correction when performing distortion correction. " + "Intensity correction can change the noise distribution in the corrected " + "image, but it does not affect estimated diffusion parameters like FA, " + "etc. ", + ) fieldmapCorrection = File( - argstr='--fieldmap-correction %s', - requires=['echoSpacing'], - desc= - 'Use an acquired fieldmap for distortion correction. The fieldmap must ' - 'have units of radians/second. Specify the filename of the fieldmap file. ' - 'The field of view (FOV) of the fieldmap scan must cover the FOV of the diffusion ' - 'scan. BDP will try to check the overlap of the FOV of the two scans and ' + argstr="--fieldmap-correction %s", + requires=["echoSpacing"], + desc="Use an acquired fieldmap for distortion correction. The fieldmap must " + "have units of radians/second. Specify the filename of the fieldmap file. " + "The field of view (FOV) of the fieldmap scan must cover the FOV of the diffusion " + "scan. BDP will try to check the overlap of the FOV of the two scans and " 'will issue a warning/error if the diffusion scan"s FOV is not fully ' 'covered by the fieldmap"s FOV. BDP uses all of the information saved in ' - 'the NIfTI header to compute the FOV. If you get this error and think ' - 'that it is incorrect, then it can be suppressed using the flag ' - 'ignore-fieldmap-FOV. Neither the image matrix size nor the imaging ' - 'grid resolution of the fieldmap needs to be the same as that of the ' - 'diffusion scan, but the fieldmap must be pre-registred to the diffusion ' - 'scan. BDP does NOT align the fieldmap to the diffusion scan, nor does it ' - 'check the alignment of the fieldmap and diffusion scans. Only NIfTI ' - 'files with extension of .nii or .nii.gz are supported. Fieldmap-based ' - 'distortion correction also requires the echoSpacing. Also ' - 'fieldmapCorrectionMethod allows you to define method for ' - 'distortion correction. least squares is the default method. ') + "the NIfTI header to compute the FOV. If you get this error and think " + "that it is incorrect, then it can be suppressed using the flag " + "ignore-fieldmap-FOV. Neither the image matrix size nor the imaging " + "grid resolution of the fieldmap needs to be the same as that of the " + "diffusion scan, but the fieldmap must be pre-registred to the diffusion " + "scan. BDP does NOT align the fieldmap to the diffusion scan, nor does it " + "check the alignment of the fieldmap and diffusion scans. Only NIfTI " + "files with extension of .nii or .nii.gz are supported. Fieldmap-based " + "distortion correction also requires the echoSpacing. Also " + "fieldmapCorrectionMethod allows you to define method for " + "distortion correction. least squares is the default method. ", + ) fieldmapCorrectionMethod = traits.Enum( - 'pixelshift', - 'leastsq', - xor=['skipIntensityCorr'], - argstr='--fieldmap-correction-method %s', - desc='Defines the distortion correction method while using fieldmap. ' + "pixelshift", + "leastsq", + xor=["skipIntensityCorr"], + argstr="--fieldmap-correction-method %s", + desc="Defines the distortion correction method while using fieldmap. " 'Possible methods are "pixelshift" and "leastsq". leastsq is the default ' - 'method when this flag is not used. Pixel-shift (pixelshift) method uses ' - 'image interpolation to un-distort the distorted diffusion images. Least ' - 'squares (leastsq) method uses a physical model of distortion which is ' - 'more accurate (and more computationally expensive) than pixel-shift ' - 'method.') + "method when this flag is not used. Pixel-shift (pixelshift) method uses " + "image interpolation to un-distort the distorted diffusion images. Least " + "squares (leastsq) method uses a physical model of distortion which is " + "more accurate (and more computationally expensive) than pixel-shift " + "method.", + ) ignoreFieldmapFOV = traits.Bool( - argstr='--ignore-fieldmap-fov', - desc= - 'Supresses the error generated by an insufficient field of view of the ' - 'input fieldmap and continues with the processing. It is useful only when ' - 'used with fieldmap-based distortion correction. See ' - 'fieldmap-correction for a detailed explanation. ') + argstr="--ignore-fieldmap-fov", + desc="Supresses the error generated by an insufficient field of view of the " + "input fieldmap and continues with the processing. It is useful only when " + "used with fieldmap-based distortion correction. See " + "fieldmap-correction for a detailed explanation. ", + ) fieldmapSmooth = traits.Float( - argstr='--fieldmap-smooth3=%f', - desc='Applies 3D Gaussian smoothing with a standard deviation of S ' - 'millimeters (mm) to the input fieldmap before applying distortion ' - 'correction. This trait is only useful with ' - 'fieldmapCorrection. Skip this trait for no smoothing. ') + argstr="--fieldmap-smooth3=%f", + desc="Applies 3D Gaussian smoothing with a standard deviation of S " + "millimeters (mm) to the input fieldmap before applying distortion " + "correction. This trait is only useful with " + "fieldmapCorrection. Skip this trait for no smoothing. ", + ) transformDiffusionVolume = File( - argstr='--transform-diffusion-volume %s', - desc='This flag allows to define custom volumes in diffusion coordinate ' - 'which would be transformed into T1 coordinate in a rigid fashion. The ' - 'flag must be followed by the name of either a NIfTI file or of a folder ' - 'that contains one or more NIfTI files. All of the files must be in ' - 'diffusion coordinate, i.e. the files should overlay correctly with the ' - 'diffusion scan in BrainSuite. Only NIfTI files with an extension of .nii ' - 'or .nii.gz are supported. The transformed files are written to the ' + argstr="--transform-diffusion-volume %s", + desc="This flag allows to define custom volumes in diffusion coordinate " + "which would be transformed into T1 coordinate in a rigid fashion. The " + "flag must be followed by the name of either a NIfTI file or of a folder " + "that contains one or more NIfTI files. All of the files must be in " + "diffusion coordinate, i.e. the files should overlay correctly with the " + "diffusion scan in BrainSuite. Only NIfTI files with an extension of .nii " + "or .nii.gz are supported. The transformed files are written to the " 'output directory with suffix ".T1_coord" in the filename and will not be ' - 'corrected for distortion, if any. The trait transformInterpolation can ' - 'be used to define the type of interpolation that would be used (default ' - 'is set to linear). If you are attempting to transform a label file or ' + "corrected for distortion, if any. The trait transformInterpolation can " + "be used to define the type of interpolation that would be used (default " + "is set to linear). If you are attempting to transform a label file or " 'mask file, use "nearest" interpolation method with transformInterpolation. ' - 'See also transformT1Volume and transformInterpolation') + "See also transformT1Volume and transformInterpolation", + ) transformT1Volume = File( - argstr='--transform-t1-volume %s', - desc='Same as transformDiffusionVolume except that files specified must ' - 'be in T1 coordinate, i.e. the files should overlay correctly with the ' - 'input .bfc.nii.gz files in BrainSuite. BDP transforms these ' - 'data/images from T1 coordinate to diffusion coordinate. The transformed ' + argstr="--transform-t1-volume %s", + desc="Same as transformDiffusionVolume except that files specified must " + "be in T1 coordinate, i.e. the files should overlay correctly with the " + "input .bfc.nii.gz files in BrainSuite. BDP transforms these " + "data/images from T1 coordinate to diffusion coordinate. The transformed " 'files are written to the output directory with suffix ".D_coord" in the ' - 'filename. See also transformDiffusionVolume and transformInterpolation. ' + "filename. See also transformDiffusionVolume and transformInterpolation. ", ) transformInterpolation = traits.Enum( - 'linear', - 'nearest', - 'cubic', - 'spline', - argstr='--transform-interpolation %s', - desc= - 'Defines the type of interpolation method which would be used while ' - 'transforming volumes defined by transformT1Volume and ' + "linear", + "nearest", + "cubic", + "spline", + argstr="--transform-interpolation %s", + desc="Defines the type of interpolation method which would be used while " + "transforming volumes defined by transformT1Volume and " 'transformDiffusionVolume. Possible methods are "linear", "nearest", ' - '"cubic" and "spline". By default, "linear" interpolation is used. ') + '"cubic" and "spline". By default, "linear" interpolation is used. ', + ) transformT1Surface = File( - argstr='--transform-t1-surface %s', - desc='Similar to transformT1Volume, except that this flag allows ' - 'transforming surfaces (instead of volumes) in T1 coordinate into ' - 'diffusion coordinate in a rigid fashion. The flag must be followed by ' - 'the name of either a .dfs file or of a folder that contains one or more ' - 'dfs files. All of the files must be in T1 coordinate, i.e. the files ' - 'should overlay correctly with the T1-weighted scan in BrainSuite. The ' - 'transformed files are written to the output directory with suffix ' - 'D_coord" in the filename. ') + argstr="--transform-t1-surface %s", + desc="Similar to transformT1Volume, except that this flag allows " + "transforming surfaces (instead of volumes) in T1 coordinate into " + "diffusion coordinate in a rigid fashion. The flag must be followed by " + "the name of either a .dfs file or of a folder that contains one or more " + "dfs files. All of the files must be in T1 coordinate, i.e. the files " + "should overlay correctly with the T1-weighted scan in BrainSuite. The " + "transformed files are written to the output directory with suffix " + 'D_coord" in the filename. ', + ) transformDiffusionSurface = File( - argstr='--transform-diffusion-surface %s', - desc='Same as transformT1Volume, except that the .dfs files specified ' - 'must be in diffusion coordinate, i.e. the surface files should overlay ' - 'correctly with the diffusion scan in BrainSuite. The transformed files ' + argstr="--transform-diffusion-surface %s", + desc="Same as transformT1Volume, except that the .dfs files specified " + "must be in diffusion coordinate, i.e. the surface files should overlay " + "correctly with the diffusion scan in BrainSuite. The transformed files " 'are written to the output directory with suffix ".T1_coord" in the ' - 'filename. See also transformT1Volume. ') + "filename. See also transformT1Volume. ", + ) transformDataOnly = traits.Bool( - argstr='--transform-data-only', - desc= - 'Skip all of the processing (co-registration, distortion correction and ' - 'tensor/ODF estimation) and directly start transformation of defined ' - 'custom volumes, mask and labels (using transformT1Volume, ' - 'transformDiffusionVolume, transformT1Surface, ' - 'transformDiffusionSurface, customDiffusionLabel, ' - 'customT1Label). This flag is useful when BDP was previously run on a ' - 'subject (or ) and some more data (volumes, mask or labels) ' - 'need to be transformed across the T1-diffusion coordinate spaces. This ' - 'assumes that all the necessary files were generated earlier and all of ' - 'the other flags MUST be used in the same way as they were in the initial ' - 'BDP run that processed the data. ') + argstr="--transform-data-only", + desc="Skip all of the processing (co-registration, distortion correction and " + "tensor/ODF estimation) and directly start transformation of defined " + "custom volumes, mask and labels (using transformT1Volume, " + "transformDiffusionVolume, transformT1Surface, " + "transformDiffusionSurface, customDiffusionLabel, " + "customT1Label). This flag is useful when BDP was previously run on a " + "subject (or ) and some more data (volumes, mask or labels) " + "need to be transformed across the T1-diffusion coordinate spaces. This " + "assumes that all the necessary files were generated earlier and all of " + "the other flags MUST be used in the same way as they were in the initial " + "BDP run that processed the data. ", + ) generateStats = traits.Bool( - argstr='--generate-stats', - desc= - 'Generate ROI-wise statistics of estimated diffusion tensor parameters. ' - 'Units of the reported statistics are same as that of the estimated ' - 'tensor parameters (see estimateTensors). Mean, variance, and voxel counts of ' - 'white matter(WM), grey matter(GM), and both WM and GM combined are ' - 'written for each estimated parameter in a separate comma-seperated value ' - 'csv) file. BDP uses the ROI labels generated by Surface-Volume ' - 'Registration (SVReg) in the BrainSuite extraction sequence. ' - 'Specifically, it looks for labels saved in either ' - 'fileprefix>.svreg.corr.label.nii.gz or .svreg.label.nii.gz. ' - 'In case both files are present, only the first file is used. Also see ' - 'customDiffusionLabel and customT1Label for specifying your own ' - 'ROIs. It is also possible to forgo computing the SVReg ROI-wise ' - 'statistics and only compute stats with custom labels if SVReg label is ' - 'missing. BDP also transfers (and saves) the label/mask files to ' - 'appropriate coordinates before computing statistics. Also see ' - 'outputDiffusionCoordinates for outputs in diffusion coordinate and ' - 'forcePartialROIStats for an important note about field of view of ' - 'diffusion and T1-weighted scans. ') + argstr="--generate-stats", + desc="Generate ROI-wise statistics of estimated diffusion tensor parameters. " + "Units of the reported statistics are same as that of the estimated " + "tensor parameters (see estimateTensors). Mean, variance, and voxel counts of " + "white matter(WM), grey matter(GM), and both WM and GM combined are " + "written for each estimated parameter in a separate comma-seperated value " + "csv) file. BDP uses the ROI labels generated by Surface-Volume " + "Registration (SVReg) in the BrainSuite extraction sequence. " + "Specifically, it looks for labels saved in either " + "fileprefix>.svreg.corr.label.nii.gz or .svreg.label.nii.gz. " + "In case both files are present, only the first file is used. Also see " + "customDiffusionLabel and customT1Label for specifying your own " + "ROIs. It is also possible to forgo computing the SVReg ROI-wise " + "statistics and only compute stats with custom labels if SVReg label is " + "missing. BDP also transfers (and saves) the label/mask files to " + "appropriate coordinates before computing statistics. Also see " + "outputDiffusionCoordinates for outputs in diffusion coordinate and " + "forcePartialROIStats for an important note about field of view of " + "diffusion and T1-weighted scans. ", + ) onlyStats = traits.Bool( - argstr='--generate-only-stats', - desc= - 'Skip all of the processing (co-registration, distortion correction and ' - 'tensor/ODF estimation) and directly start computation of statistics. ' - 'This flag is useful when BDP was previously run on a subject (or ' - 'fileprefix>) and statistics need to be (re-)computed later. This ' - 'assumes that all the necessary files were generated earlier. All of the ' - 'other flags MUST be used in the same way as they were in the initial BDP ' - 'run that processed the data. ') + argstr="--generate-only-stats", + desc="Skip all of the processing (co-registration, distortion correction and " + "tensor/ODF estimation) and directly start computation of statistics. " + "This flag is useful when BDP was previously run on a subject (or " + "fileprefix>) and statistics need to be (re-)computed later. This " + "assumes that all the necessary files were generated earlier. All of the " + "other flags MUST be used in the same way as they were in the initial BDP " + "run that processed the data. ", + ) forcePartialROIStats = traits.Bool( - argstr='--force-partial-roi-stats', - desc= - 'The field of view (FOV) of the diffusion and T1-weighted scans may ' - 'differ significantly in some situations. This may result in partial ' - 'acquisitions of some ROIs in the diffusion scan. By default, BDP does ' - 'not compute statistics for partially acquired ROIs and shows warnings. ' - 'This flag forces computation of statistics for all ROIs, including those ' - 'which are partially acquired. When this flag is used, number of missing ' - 'voxels are also reported for each ROI in statistics files. Number of ' - 'missing voxels are reported in the same coordinate system as the ' - 'statistics file. ') + argstr="--force-partial-roi-stats", + desc="The field of view (FOV) of the diffusion and T1-weighted scans may " + "differ significantly in some situations. This may result in partial " + "acquisitions of some ROIs in the diffusion scan. By default, BDP does " + "not compute statistics for partially acquired ROIs and shows warnings. " + "This flag forces computation of statistics for all ROIs, including those " + "which are partially acquired. When this flag is used, number of missing " + "voxels are also reported for each ROI in statistics files. Number of " + "missing voxels are reported in the same coordinate system as the " + "statistics file. ", + ) customDiffusionLabel = File( - argstr='--custom-diffusion-label %s', - desc= - 'BDP supports custom ROIs in addition to those generated by BrainSuite ' - 'SVReg) for ROI-wise statistics calculation. The flag must be followed ' - 'by the name of either a file (custom ROI file) or of a folder that ' - 'contains one or more ROI files. All of the files must be in diffusion ' - 'coordinate, i.e. the label files should overlay correctly with the ' - 'diffusion scan in BrainSuite. These input label files are also ' - 'transferred (and saved) to T1 coordinate for statistics in T1 ' - 'coordinate. BDP uses nearest-neighborhood interpolation for this ' - 'transformation. Only NIfTI files, with an extension of .nii or .nii.gz ' - 'are supported. In order to avoid confusion with other ROI IDs in the ' - 'statistic files, a 5-digit ROI ID is generated for each custom label ' - 'found and the mapping of ID to label file is saved in the file ' - 'fileprefix>.BDP_ROI_MAP.xml. Custom label files can also be generated ' - 'by using the label painter tool in BrainSuite. See also ' - 'customLabelXML') + argstr="--custom-diffusion-label %s", + desc="BDP supports custom ROIs in addition to those generated by BrainSuite " + "SVReg) for ROI-wise statistics calculation. The flag must be followed " + "by the name of either a file (custom ROI file) or of a folder that " + "contains one or more ROI files. All of the files must be in diffusion " + "coordinate, i.e. the label files should overlay correctly with the " + "diffusion scan in BrainSuite. These input label files are also " + "transferred (and saved) to T1 coordinate for statistics in T1 " + "coordinate. BDP uses nearest-neighborhood interpolation for this " + "transformation. Only NIfTI files, with an extension of .nii or .nii.gz " + "are supported. In order to avoid confusion with other ROI IDs in the " + "statistic files, a 5-digit ROI ID is generated for each custom label " + "found and the mapping of ID to label file is saved in the file " + "fileprefix>.BDP_ROI_MAP.xml. Custom label files can also be generated " + "by using the label painter tool in BrainSuite. See also " + "customLabelXML", + ) customT1Label = File( - argstr='--custom-t1-label %s', - desc='Same as customDiffusionLabelexcept that the label files specified ' - 'must be in T1 coordinate, i.e. the label files should overlay correctly ' - 'with the T1-weighted scan in BrainSuite. If the trait ' - 'outputDiffusionCoordinates is also used then these input label files ' - 'are also transferred (and saved) to diffusion coordinate for statistics ' - 'in diffusion coordinate. BDP uses nearest-neighborhood interpolation for ' - 'this transformation. See also customLabelXML. ') + argstr="--custom-t1-label %s", + desc="Same as customDiffusionLabelexcept that the label files specified " + "must be in T1 coordinate, i.e. the label files should overlay correctly " + "with the T1-weighted scan in BrainSuite. If the trait " + "outputDiffusionCoordinates is also used then these input label files " + "are also transferred (and saved) to diffusion coordinate for statistics " + "in diffusion coordinate. BDP uses nearest-neighborhood interpolation for " + "this transformation. See also customLabelXML. ", + ) customLabelXML = File( - argstr='--custom-label-xml %s', - desc= - 'BrainSuite saves a descriptions of the SVReg labels (ROI name, ID, ' - 'color, and description) in an .xml file ' + argstr="--custom-label-xml %s", + desc="BrainSuite saves a descriptions of the SVReg labels (ROI name, ID, " + "color, and description) in an .xml file " 'brainsuite_labeldescription.xml). BDP uses the ROI ID"s from this xml ' - 'file to report statistics. This flag allows for the use of a custom ' - 'label description xml file. The flag must be followed by an xml ' - 'filename. This can be useful when you want to limit the ROIs for which ' - 'you compute statistics. You can also use custom xml files to name your ' + "file to report statistics. This flag allows for the use of a custom " + "label description xml file. The flag must be followed by an xml " + "filename. This can be useful when you want to limit the ROIs for which " + "you compute statistics. You can also use custom xml files to name your " 'own ROIs (assign ID"s) for custom labels. BrainSuite can save a label ' - 'description in .xml format after using the label painter tool to create ' + "description in .xml format after using the label painter tool to create " 'a ROI label. The xml file MUST be in the same format as BrainSuite"s ' - 'label description file (see brainsuite_labeldescription.xml for an ' - 'example). When this flag is used, NO 5-digit ROI ID is generated for ' - 'custom label files and NO Statistics will be calculated for ROIs not ' - 'identified in the custom xml file. See also customDiffusionLabel and ' - 'customT1Label.') + "label description file (see brainsuite_labeldescription.xml for an " + "example). When this flag is used, NO 5-digit ROI ID is generated for " + "custom label files and NO Statistics will be calculated for ROIs not " + "identified in the custom xml file. See also customDiffusionLabel and " + "customT1Label.", + ) outputSubdir = traits.Str( - argstr='--output-subdir %s', - desc= - 'By default, BDP writes out all the output (and intermediate) files in ' - 'the same directory (or folder) as the BFC file. This flag allows to ' - 'specify a sub-directory name in which output (and intermediate) files ' - 'would be written. BDP will create the sub-directory in the same ' - 'directory as BFC file. should be the name of the ' - 'sub-directory without any path. This can be useful to organize all ' - 'outputs generated by BDP in a separate sub-directory. ') + argstr="--output-subdir %s", + desc="By default, BDP writes out all the output (and intermediate) files in " + "the same directory (or folder) as the BFC file. This flag allows to " + "specify a sub-directory name in which output (and intermediate) files " + "would be written. BDP will create the sub-directory in the same " + "directory as BFC file. should be the name of the " + "sub-directory without any path. This can be useful to organize all " + "outputs generated by BDP in a separate sub-directory. ", + ) outputDiffusionCoordinates = traits.Bool( - argstr='--output-diffusion-coordinate', - desc= - 'Enables estimation of diffusion tensors and/or ODFs (and statistics if ' - 'applicable) in the native diffusion coordinate in addition to the ' - 'default T1-coordinate. All native diffusion coordinate files are saved ' + argstr="--output-diffusion-coordinate", + desc="Enables estimation of diffusion tensors and/or ODFs (and statistics if " + "applicable) in the native diffusion coordinate in addition to the " + "default T1-coordinate. All native diffusion coordinate files are saved " 'in a seperate folder named "diffusion_coord_outputs". In case statistics ' - 'computation is required, it will also transform/save all label/mask ' - 'files required to diffusion coordinate (see generateStats for ' - 'details). ') + "computation is required, it will also transform/save all label/mask " + "files required to diffusion coordinate (see generateStats for " + "details). ", + ) flagConfigFile = File( - argstr='--flag-conf-file %s', - desc= - 'Uses the defined file to specify BDP flags which can be useful for ' - 'batch processing. A flag configuration file is a plain text file which ' + argstr="--flag-conf-file %s", + desc="Uses the defined file to specify BDP flags which can be useful for " + "batch processing. A flag configuration file is a plain text file which " 'can contain any number of BDP"s optional flags (and their parameters) ' - 'separated by whitespace. Everything coming after # until end-of-line is ' - 'treated as comment and is ignored. If a flag is defined in configuration ' - 'file and is also specified in the command used to run BDP, then the ' - 'later get preference and overrides the definition in configuration ' - 'file. ') + "separated by whitespace. Everything coming after # until end-of-line is " + "treated as comment and is ignored. If a flag is defined in configuration " + "file and is also specified in the command used to run BDP, then the " + "later get preference and overrides the definition in configuration " + "file. ", + ) outPrefix = traits.Str( - argstr='--output-fileprefix %s', - desc='Specifies output fileprefix when noStructuralRegistration is ' - 'used. The fileprefix can not start with a dash (-) and should be a ' - 'simple string reflecting the absolute path to desired location, along with outPrefix. When this flag is ' - 'not specified (and noStructuralRegistration is used) then the output ' - 'files have same file-base as the input diffusion file. This trait is ' - 'ignored when noStructuralRegistration is not used. ') + argstr="--output-fileprefix %s", + desc="Specifies output fileprefix when noStructuralRegistration is " + "used. The fileprefix can not start with a dash (-) and should be a " + "simple string reflecting the absolute path to desired location, along with outPrefix. When this flag is " + "not specified (and noStructuralRegistration is used) then the output " + "files have same file-base as the input diffusion file. This trait is " + "ignored when noStructuralRegistration is not used. ", + ) threads = traits.Int( - argstr='--threads=%d', - desc='Sets the number of parallel process threads which can be used for ' - 'computations to N, where N must be an integer. Default value of N is ' - ' ') + argstr="--threads=%d", + desc="Sets the number of parallel process threads which can be used for " + "computations to N, where N must be an integer. Default value of N is " + " ", + ) lowMemory = traits.Bool( - argstr='--low-memory', - desc='Activates low-memory mode. This will run the registration-based ' - 'distortion correction at a lower resolution, which could result in a ' - 'less-accurate correction. This should only be used when no other ' - 'alternative is available. ') + argstr="--low-memory", + desc="Activates low-memory mode. This will run the registration-based " + "distortion correction at a lower resolution, which could result in a " + "less-accurate correction. This should only be used when no other " + "alternative is available. ", + ) ignoreMemory = traits.Bool( - argstr='--ignore-memory', - desc='Deactivates the inbuilt memory checks and forces BDP to run ' - 'registration-based distortion correction at its default resolution even ' - 'on machines with a low amount of memory. This may result in an ' - 'out-of-memory error when BDP cannot allocate sufficient memory. ') + argstr="--ignore-memory", + desc="Deactivates the inbuilt memory checks and forces BDP to run " + "registration-based distortion correction at its default resolution even " + "on machines with a low amount of memory. This may result in an " + "out-of-memory error when BDP cannot allocate sufficient memory. ", + ) class BDP(CommandLine): @@ -1696,21 +1739,22 @@ class BDP(CommandLine): """ input_spec = BDPInputSpec - _cmd = 'bdp.sh' + _cmd = "bdp.sh" def _format_arg(self, name, spec, value): - if name == 'BVecBValPair': + if name == "BVecBValPair": return spec.argstr % (value[0], value[1]) - if name == 'dataSinkDelay': - return spec.argstr % '' + if name == "dataSinkDelay": + return spec.argstr % "" return super(BDP, self)._format_arg(name, spec, value) class ThicknessPVCInputSpec(CommandLineInputSpec): subjectFilePrefix = traits.Str( - argstr='%s', + argstr="%s", mandatory=True, - desc='Absolute path and filename prefix of the subject data') + desc="Absolute path and filename prefix of the subject data", + ) class ThicknessPVC(CommandLine): @@ -1737,7 +1781,7 @@ class ThicknessPVC(CommandLine): """ input_spec = ThicknessPVCInputSpec - _cmd = 'thicknessPVC.sh' + _cmd = "thicknessPVC.sh" # used to generate file names for outputs @@ -1748,7 +1792,7 @@ def getFileName(inputName, suffix): dotRegex = regex.compile("[^.]+") # extract between last slash and first period inputNoExtension = dotRegex.findall(fullInput)[0] - return os.path.abspath(''.join((inputNoExtension, suffix))) + return os.path.abspath("".join((inputNoExtension, suffix))) def l_outputs(self): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_BDP.py b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py index ae25cb0598..94f95c5c2a 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_BDP.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py @@ -5,123 +5,90 @@ def test_BDP_inputs(): input_map = dict( BVecBValPair=dict( - argstr='--bvec %s --bval %s', + argstr="--bvec %s --bval %s", mandatory=True, position=-1, - xor=['bMatrixFile'], + xor=["bMatrixFile"], ), - args=dict(argstr='%s', ), + args=dict(argstr="%s",), bMatrixFile=dict( - argstr='--bmat %s', + argstr="--bmat %s", extensions=None, mandatory=True, position=-1, - xor=['BVecBValPair'], + xor=["BVecBValPair"], ), - bValRatioThreshold=dict(argstr='--bval-ratio-threshold %f', ), + bValRatioThreshold=dict(argstr="--bval-ratio-threshold %f",), bfcFile=dict( - argstr='%s', + argstr="%s", extensions=None, mandatory=True, position=0, - xor=['noStructuralRegistration'], + xor=["noStructuralRegistration"], ), customDiffusionLabel=dict( - argstr='--custom-diffusion-label %s', - extensions=None, - ), - customLabelXML=dict( - argstr='--custom-label-xml %s', - extensions=None, - ), - customT1Label=dict( - argstr='--custom-t1-label %s', - extensions=None, - ), - dataSinkDelay=dict(argstr='%s', ), - dcorrRegMeasure=dict(argstr='--dcorr-reg-method %s', ), - dcorrWeight=dict(argstr='--dcorr-regularization-wt %f', ), - dwiMask=dict( - argstr='--dwi-mask %s', - extensions=None, - ), - echoSpacing=dict(argstr='--echo-spacing=%f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - estimateODF_3DShore=dict(argstr='--3dshore --diffusion_time_ms %f', ), - estimateODF_FRACT=dict(argstr='--FRACT', ), - estimateODF_FRT=dict(argstr='--FRT', ), - estimateTensors=dict(argstr='--tensors', ), + argstr="--custom-diffusion-label %s", extensions=None, + ), + customLabelXML=dict(argstr="--custom-label-xml %s", extensions=None,), + customT1Label=dict(argstr="--custom-t1-label %s", extensions=None,), + dataSinkDelay=dict(argstr="%s",), + dcorrRegMeasure=dict(argstr="--dcorr-reg-method %s",), + dcorrWeight=dict(argstr="--dcorr-regularization-wt %f",), + dwiMask=dict(argstr="--dwi-mask %s", extensions=None,), + echoSpacing=dict(argstr="--echo-spacing=%f",), + environ=dict(nohash=True, usedefault=True,), + estimateODF_3DShore=dict(argstr="--3dshore --diffusion_time_ms %f",), + estimateODF_FRACT=dict(argstr="--FRACT",), + estimateODF_FRT=dict(argstr="--FRT",), + estimateTensors=dict(argstr="--tensors",), fieldmapCorrection=dict( - argstr='--fieldmap-correction %s', + argstr="--fieldmap-correction %s", extensions=None, - requires=['echoSpacing'], + requires=["echoSpacing"], ), fieldmapCorrectionMethod=dict( - argstr='--fieldmap-correction-method %s', - xor=['skipIntensityCorr'], - ), - fieldmapSmooth=dict(argstr='--fieldmap-smooth3=%f', ), - flagConfigFile=dict( - argstr='--flag-conf-file %s', - extensions=None, - ), - forcePartialROIStats=dict(argstr='--force-partial-roi-stats', ), - generateStats=dict(argstr='--generate-stats', ), - ignoreFieldmapFOV=dict(argstr='--ignore-fieldmap-fov', ), - ignoreMemory=dict(argstr='--ignore-memory', ), + argstr="--fieldmap-correction-method %s", xor=["skipIntensityCorr"], + ), + fieldmapSmooth=dict(argstr="--fieldmap-smooth3=%f",), + flagConfigFile=dict(argstr="--flag-conf-file %s", extensions=None,), + forcePartialROIStats=dict(argstr="--force-partial-roi-stats",), + generateStats=dict(argstr="--generate-stats",), + ignoreFieldmapFOV=dict(argstr="--ignore-fieldmap-fov",), + ignoreMemory=dict(argstr="--ignore-memory",), inputDiffusionData=dict( - argstr='--nii %s', - extensions=None, - mandatory=True, - position=-2, + argstr="--nii %s", extensions=None, mandatory=True, position=-2, ), - lowMemory=dict(argstr='--low-memory', ), + lowMemory=dict(argstr="--low-memory",), noStructuralRegistration=dict( - argstr='--no-structural-registration', + argstr="--no-structural-registration", mandatory=True, position=0, - xor=['bfcFile'], - ), - odfLambta=dict(argstr='--odf-lambda ', ), - onlyStats=dict(argstr='--generate-only-stats', ), - outPrefix=dict(argstr='--output-fileprefix %s', ), - outputDiffusionCoordinates=dict( - argstr='--output-diffusion-coordinate', ), - outputSubdir=dict(argstr='--output-subdir %s', ), - phaseEncodingDirection=dict(argstr='--dir=%s', ), - rigidRegMeasure=dict(argstr='--rigid-reg-measure %s', ), - skipDistortionCorr=dict(argstr='--no-distortion-correction', ), + xor=["bfcFile"], + ), + odfLambta=dict(argstr="--odf-lambda ",), + onlyStats=dict(argstr="--generate-only-stats",), + outPrefix=dict(argstr="--output-fileprefix %s",), + outputDiffusionCoordinates=dict(argstr="--output-diffusion-coordinate",), + outputSubdir=dict(argstr="--output-subdir %s",), + phaseEncodingDirection=dict(argstr="--dir=%s",), + rigidRegMeasure=dict(argstr="--rigid-reg-measure %s",), + skipDistortionCorr=dict(argstr="--no-distortion-correction",), skipIntensityCorr=dict( - argstr='--no-intensity-correction', - xor=['fieldmapCorrectionMethod'], + argstr="--no-intensity-correction", xor=["fieldmapCorrectionMethod"], ), - skipNonuniformityCorr=dict(argstr='--no-nonuniformity-correction', ), - t1Mask=dict( - argstr='--t1-mask %s', - extensions=None, - ), - threads=dict(argstr='--threads=%d', ), - transformDataOnly=dict(argstr='--transform-data-only', ), + skipNonuniformityCorr=dict(argstr="--no-nonuniformity-correction",), + t1Mask=dict(argstr="--t1-mask %s", extensions=None,), + threads=dict(argstr="--threads=%d",), + transformDataOnly=dict(argstr="--transform-data-only",), transformDiffusionSurface=dict( - argstr='--transform-diffusion-surface %s', - extensions=None, + argstr="--transform-diffusion-surface %s", extensions=None, ), transformDiffusionVolume=dict( - argstr='--transform-diffusion-volume %s', - extensions=None, - ), - transformInterpolation=dict(argstr='--transform-interpolation %s', ), - transformT1Surface=dict( - argstr='--transform-t1-surface %s', - extensions=None, - ), - transformT1Volume=dict( - argstr='--transform-t1-volume %s', - extensions=None, + argstr="--transform-diffusion-volume %s", extensions=None, ), + transformInterpolation=dict(argstr="--transform-interpolation %s",), + transformT1Surface=dict(argstr="--transform-t1-surface %s", extensions=None,), + transformT1Volume=dict(argstr="--transform-t1-volume %s", extensions=None,), ) inputs = BDP.input_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py index 3f41f15518..5e2588fd74 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py @@ -4,76 +4,49 @@ def test_Bfc_inputs(): input_map = dict( - args=dict(argstr='%s', ), - biasEstimateConvergenceThreshold=dict(argstr='--beps %f', ), - biasEstimateSpacing=dict(argstr='-s %d', ), - biasFieldEstimatesOutputPrefix=dict(argstr='--biasprefix %s', ), - biasRange=dict(argstr='%s', ), - controlPointSpacing=dict(argstr='-c %d', ), - convergenceThreshold=dict(argstr='--eps %f', ), - correctWholeVolume=dict(argstr='--extrapolate', ), - correctedImagesOutputPrefix=dict(argstr='--prefix %s', ), - correctionScheduleFile=dict( - argstr='--schedule %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - histogramRadius=dict(argstr='-r %d', ), - histogramType=dict(argstr='%s', ), - inputMRIFile=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), - inputMaskFile=dict( - argstr='-m %s', - extensions=None, - hash_files=False, - ), - intermediate_file_type=dict(argstr='%s', ), - iterativeMode=dict(argstr='--iterate', ), - maxBias=dict( - argstr='-U %f', - usedefault=True, - ), - minBias=dict( - argstr='-L %f', - usedefault=True, - ), - outputBiasField=dict( - argstr='--bias %s', - extensions=None, - hash_files=False, - ), + args=dict(argstr="%s",), + biasEstimateConvergenceThreshold=dict(argstr="--beps %f",), + biasEstimateSpacing=dict(argstr="-s %d",), + biasFieldEstimatesOutputPrefix=dict(argstr="--biasprefix %s",), + biasRange=dict(argstr="%s",), + controlPointSpacing=dict(argstr="-c %d",), + convergenceThreshold=dict(argstr="--eps %f",), + correctWholeVolume=dict(argstr="--extrapolate",), + correctedImagesOutputPrefix=dict(argstr="--prefix %s",), + correctionScheduleFile=dict(argstr="--schedule %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + histogramRadius=dict(argstr="-r %d",), + histogramType=dict(argstr="%s",), + inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True,), + inputMaskFile=dict(argstr="-m %s", extensions=None, hash_files=False,), + intermediate_file_type=dict(argstr="%s",), + iterativeMode=dict(argstr="--iterate",), + maxBias=dict(argstr="-U %f", usedefault=True,), + minBias=dict(argstr="-L %f", usedefault=True,), + outputBiasField=dict(argstr="--bias %s", extensions=None, hash_files=False,), outputMRIVolume=dict( - argstr='-o %s', - extensions=None, - genfile=True, - hash_files=False, + argstr="-o %s", extensions=None, genfile=True, hash_files=False, ), outputMaskedBiasField=dict( - argstr='--maskedbias %s', - extensions=None, - hash_files=False, + argstr="--maskedbias %s", extensions=None, hash_files=False, ), - splineLambda=dict(argstr='-w %f', ), - timer=dict(argstr='--timer', ), - verbosityLevel=dict(argstr='-v %d', ), + splineLambda=dict(argstr="-w %f",), + timer=dict(argstr="--timer",), + verbosityLevel=dict(argstr="-v %d",), ) inputs = Bfc.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Bfc_outputs(): output_map = dict( - correctionScheduleFile=dict(extensions=None, ), - outputBiasField=dict(extensions=None, ), - outputMRIVolume=dict(extensions=None, ), - outputMaskedBiasField=dict(extensions=None, ), + correctionScheduleFile=dict(extensions=None,), + outputBiasField=dict(extensions=None,), + outputMRIVolume=dict(extensions=None,), + outputMaskedBiasField=dict(extensions=None,), ) outputs = Bfc.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bse.py b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py index 3476926f0e..a980010ef0 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Bse.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py @@ -4,92 +4,48 @@ def test_Bse_inputs(): input_map = dict( - args=dict(argstr='%s', ), - diffusionConstant=dict( - argstr='-d %f', - usedefault=True, - ), - diffusionIterations=dict( - argstr='-n %d', - usedefault=True, - ), - dilateFinalMask=dict( - argstr='-p', - usedefault=True, - ), - edgeDetectionConstant=dict( - argstr='-s %f', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMRIFile=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), - noRotate=dict(argstr='--norotate', ), - outputCortexFile=dict( - argstr='--cortex %s', - extensions=None, - hash_files=False, - ), + args=dict(argstr="%s",), + diffusionConstant=dict(argstr="-d %f", usedefault=True,), + diffusionIterations=dict(argstr="-n %d", usedefault=True,), + dilateFinalMask=dict(argstr="-p", usedefault=True,), + edgeDetectionConstant=dict(argstr="-s %f", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True,), + noRotate=dict(argstr="--norotate",), + outputCortexFile=dict(argstr="--cortex %s", extensions=None, hash_files=False,), outputDetailedBrainMask=dict( - argstr='--hires %s', - extensions=None, - hash_files=False, + argstr="--hires %s", extensions=None, hash_files=False, ), outputDiffusionFilter=dict( - argstr='--adf %s', - extensions=None, - hash_files=False, - ), - outputEdgeMap=dict( - argstr='--edge %s', - extensions=None, - hash_files=False, + argstr="--adf %s", extensions=None, hash_files=False, ), + outputEdgeMap=dict(argstr="--edge %s", extensions=None, hash_files=False,), outputMRIVolume=dict( - argstr='-o %s', - extensions=None, - genfile=True, - hash_files=False, + argstr="-o %s", extensions=None, genfile=True, hash_files=False, ), outputMaskFile=dict( - argstr='--mask %s', - extensions=None, - genfile=True, - hash_files=False, - ), - radius=dict( - argstr='-r %f', - usedefault=True, - ), - timer=dict(argstr='--timer', ), - trim=dict( - argstr='--trim', - usedefault=True, - ), - verbosityLevel=dict( - argstr='-v %f', - usedefault=True, + argstr="--mask %s", extensions=None, genfile=True, hash_files=False, ), + radius=dict(argstr="-r %f", usedefault=True,), + timer=dict(argstr="--timer",), + trim=dict(argstr="--trim", usedefault=True,), + verbosityLevel=dict(argstr="-v %f", usedefault=True,), ) inputs = Bse.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Bse_outputs(): output_map = dict( - outputCortexFile=dict(extensions=None, ), - outputDetailedBrainMask=dict(extensions=None, ), - outputDiffusionFilter=dict(extensions=None, ), - outputEdgeMap=dict(extensions=None, ), - outputMRIVolume=dict(extensions=None, ), - outputMaskFile=dict(extensions=None, ), + outputCortexFile=dict(extensions=None,), + outputDetailedBrainMask=dict(extensions=None,), + outputDiffusionFilter=dict(extensions=None,), + outputEdgeMap=dict(extensions=None,), + outputMRIVolume=dict(extensions=None,), + outputMaskFile=dict(extensions=None,), ) outputs = Bse.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py index 91b610bbf2..e2ff64c071 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py @@ -4,74 +4,45 @@ def test_Cerebro_inputs(): input_map = dict( - args=dict(argstr='%s', ), - costFunction=dict( - argstr='-c %d', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + costFunction=dict(argstr="-c %d", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), inputAtlasLabelFile=dict( - argstr='--atlaslabels %s', - extensions=None, - mandatory=True, - ), - inputAtlasMRIFile=dict( - argstr='--atlas %s', - extensions=None, - mandatory=True, - ), - inputBrainMaskFile=dict( - argstr='-m %s', - extensions=None, - ), - inputMRIFile=dict( - argstr='-i %s', - extensions=None, - mandatory=True, + argstr="--atlaslabels %s", extensions=None, mandatory=True, ), - keepTempFiles=dict(argstr='--keep', ), - linearConvergence=dict(argstr='--linconv %f', ), + inputAtlasMRIFile=dict(argstr="--atlas %s", extensions=None, mandatory=True,), + inputBrainMaskFile=dict(argstr="-m %s", extensions=None,), + inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True,), + keepTempFiles=dict(argstr="--keep",), + linearConvergence=dict(argstr="--linconv %f",), outputAffineTransformFile=dict( - argstr='--air %s', - extensions=None, - genfile=True, - ), - outputCerebrumMaskFile=dict( - argstr='-o %s', - extensions=None, - genfile=True, - ), - outputLabelVolumeFile=dict( - argstr='-l %s', - extensions=None, - genfile=True, + argstr="--air %s", extensions=None, genfile=True, ), + outputCerebrumMaskFile=dict(argstr="-o %s", extensions=None, genfile=True,), + outputLabelVolumeFile=dict(argstr="-l %s", extensions=None, genfile=True,), outputWarpTransformFile=dict( - argstr='--warp %s', - extensions=None, - genfile=True, - ), - tempDirectory=dict(argstr='--tempdir %s', ), - tempDirectoryBase=dict(argstr='--tempdirbase %s', ), - useCentroids=dict(argstr='--centroids', ), - verbosity=dict(argstr='-v %d', ), - warpConvergence=dict(argstr='--warpconv %f', ), - warpLabel=dict(argstr='--warplevel %d', ), + argstr="--warp %s", extensions=None, genfile=True, + ), + tempDirectory=dict(argstr="--tempdir %s",), + tempDirectoryBase=dict(argstr="--tempdirbase %s",), + useCentroids=dict(argstr="--centroids",), + verbosity=dict(argstr="-v %d",), + warpConvergence=dict(argstr="--warpconv %f",), + warpLabel=dict(argstr="--warplevel %d",), ) inputs = Cerebro.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Cerebro_outputs(): output_map = dict( - outputAffineTransformFile=dict(extensions=None, ), - outputCerebrumMaskFile=dict(extensions=None, ), - outputLabelVolumeFile=dict(extensions=None, ), - outputWarpTransformFile=dict(extensions=None, ), + outputAffineTransformFile=dict(extensions=None,), + outputCerebrumMaskFile=dict(extensions=None,), + outputLabelVolumeFile=dict(extensions=None,), + outputWarpTransformFile=dict(extensions=None,), ) outputs = Cerebro.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py index 607d98eb14..75015d79ab 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py @@ -4,49 +4,27 @@ def test_Cortex_inputs(): input_map = dict( - args=dict(argstr='%s', ), - computeGCBoundary=dict(argstr='-g', ), - computeWGBoundary=dict( - argstr='-w', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - includeAllSubcorticalAreas=dict( - argstr='-a', - usedefault=True, - ), - inputHemisphereLabelFile=dict( - argstr='-h %s', - extensions=None, - mandatory=True, - ), - inputTissueFractionFile=dict( - argstr='-f %s', - extensions=None, - mandatory=True, - ), - outputCerebrumMask=dict( - argstr='-o %s', - extensions=None, - genfile=True, - ), - timer=dict(argstr='--timer', ), - tissueFractionThreshold=dict( - argstr='-p %f', - usedefault=True, - ), - verbosity=dict(argstr='-v %d', ), + args=dict(argstr="%s",), + computeGCBoundary=dict(argstr="-g",), + computeWGBoundary=dict(argstr="-w", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + includeAllSubcorticalAreas=dict(argstr="-a", usedefault=True,), + inputHemisphereLabelFile=dict(argstr="-h %s", extensions=None, mandatory=True,), + inputTissueFractionFile=dict(argstr="-f %s", extensions=None, mandatory=True,), + outputCerebrumMask=dict(argstr="-o %s", extensions=None, genfile=True,), + timer=dict(argstr="--timer",), + tissueFractionThreshold=dict(argstr="-p %f", usedefault=True,), + verbosity=dict(argstr="-v %d",), ) inputs = Cortex.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Cortex_outputs(): - output_map = dict(outputCerebrumMask=dict(extensions=None, ), ) + output_map = dict(outputCerebrumMask=dict(extensions=None,),) outputs = Cortex.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py index d6eb0c6414..ed3b4c32f6 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py @@ -4,33 +4,24 @@ def test_Dewisp_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMaskFile=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), - maximumIterations=dict(argstr='-n %d', ), - outputMaskFile=dict( - argstr='-o %s', - extensions=None, - genfile=True, - ), - sizeThreshold=dict(argstr='-t %d', ), - timer=dict(argstr='--timer', ), - verbosity=dict(argstr='-v %d', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputMaskFile=dict(argstr="-i %s", extensions=None, mandatory=True,), + maximumIterations=dict(argstr="-n %d",), + outputMaskFile=dict(argstr="-o %s", extensions=None, genfile=True,), + sizeThreshold=dict(argstr="-t %d",), + timer=dict(argstr="--timer",), + verbosity=dict(argstr="-v %d",), ) inputs = Dewisp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Dewisp_outputs(): - output_map = dict(outputMaskFile=dict(extensions=None, ), ) + output_map = dict(outputMaskFile=dict(extensions=None,),) outputs = Dewisp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py index f79d309262..00a35e8c82 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py @@ -4,62 +4,40 @@ def test_Dfs_inputs(): input_map = dict( - args=dict(argstr='%s', ), - curvatureWeighting=dict( - argstr='-w %f', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputShadingVolume=dict( - argstr='-c %s', - extensions=None, - ), - inputVolumeFile=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), - noNormalsFlag=dict(argstr='--nonormals', ), + args=dict(argstr="%s",), + curvatureWeighting=dict(argstr="-w %f", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + inputShadingVolume=dict(argstr="-c %s", extensions=None,), + inputVolumeFile=dict(argstr="-i %s", extensions=None, mandatory=True,), + noNormalsFlag=dict(argstr="--nonormals",), nonZeroTessellation=dict( - argstr='-nz', - xor=('nonZeroTessellation', 'specialTessellation'), - ), - outputSurfaceFile=dict( - argstr='-o %s', - extensions=None, - genfile=True, - ), - postSmoothFlag=dict(argstr='--postsmooth', ), - scalingPercentile=dict(argstr='-f %f', ), - smoothingConstant=dict( - argstr='-a %f', - usedefault=True, - ), - smoothingIterations=dict( - argstr='-n %d', - usedefault=True, + argstr="-nz", xor=("nonZeroTessellation", "specialTessellation"), ), + outputSurfaceFile=dict(argstr="-o %s", extensions=None, genfile=True,), + postSmoothFlag=dict(argstr="--postsmooth",), + scalingPercentile=dict(argstr="-f %f",), + smoothingConstant=dict(argstr="-a %f", usedefault=True,), + smoothingIterations=dict(argstr="-n %d", usedefault=True,), specialTessellation=dict( - argstr='%s', + argstr="%s", position=-1, - requires=['tessellationThreshold'], - xor=('nonZeroTessellation', 'specialTessellation'), + requires=["tessellationThreshold"], + xor=("nonZeroTessellation", "specialTessellation"), ), - tessellationThreshold=dict(argstr='%f', ), - timer=dict(argstr='--timer', ), - verbosity=dict(argstr='-v %d', ), - zeroPadFlag=dict(argstr='-z', ), + tessellationThreshold=dict(argstr="%f",), + timer=dict(argstr="--timer",), + verbosity=dict(argstr="-v %d",), + zeroPadFlag=dict(argstr="-z",), ) inputs = Dfs.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Dfs_outputs(): - output_map = dict(outputSurfaceFile=dict(extensions=None, ), ) + output_map = dict(outputSurfaceFile=dict(extensions=None,),) outputs = Dfs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py index cfc74eb69a..761d049672 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py @@ -4,59 +4,31 @@ def test_Hemisplit_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputHemisphereLabelFile=dict( - argstr='-l %s', - extensions=None, - mandatory=True, - ), - inputSurfaceFile=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), - outputLeftHemisphere=dict( - argstr='--left %s', - extensions=None, - genfile=True, - ), - outputLeftPialHemisphere=dict( - argstr='-pl %s', - extensions=None, - genfile=True, - ), - outputRightHemisphere=dict( - argstr='--right %s', - extensions=None, - genfile=True, - ), - outputRightPialHemisphere=dict( - argstr='-pr %s', - extensions=None, - genfile=True, - ), - pialSurfaceFile=dict( - argstr='-p %s', - extensions=None, - ), - timer=dict(argstr='--timer', ), - verbosity=dict(argstr='-v %d', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputHemisphereLabelFile=dict(argstr="-l %s", extensions=None, mandatory=True,), + inputSurfaceFile=dict(argstr="-i %s", extensions=None, mandatory=True,), + outputLeftHemisphere=dict(argstr="--left %s", extensions=None, genfile=True,), + outputLeftPialHemisphere=dict(argstr="-pl %s", extensions=None, genfile=True,), + outputRightHemisphere=dict(argstr="--right %s", extensions=None, genfile=True,), + outputRightPialHemisphere=dict(argstr="-pr %s", extensions=None, genfile=True,), + pialSurfaceFile=dict(argstr="-p %s", extensions=None,), + timer=dict(argstr="--timer",), + verbosity=dict(argstr="-v %d",), ) inputs = Hemisplit.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Hemisplit_outputs(): output_map = dict( - outputLeftHemisphere=dict(extensions=None, ), - outputLeftPialHemisphere=dict(extensions=None, ), - outputRightHemisphere=dict(extensions=None, ), - outputRightPialHemisphere=dict(extensions=None, ), + outputLeftHemisphere=dict(extensions=None,), + outputLeftPialHemisphere=dict(extensions=None,), + outputRightHemisphere=dict(extensions=None,), + outputRightPialHemisphere=dict(extensions=None,), ) outputs = Hemisplit.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py index 561f6ef1d8..9da07862f8 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py @@ -4,76 +4,35 @@ def test_Pialmesh_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - exportPrefix=dict(argstr='--prefix %s', ), - inputMaskFile=dict( - argstr='-m %s', - extensions=None, - mandatory=True, - ), - inputSurfaceFile=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), - inputTissueFractionFile=dict( - argstr='-f %s', - extensions=None, - mandatory=True, - ), - laplacianSmoothing=dict( - argstr='--smooth %f', - usedefault=True, - ), - maxThickness=dict( - argstr='--max %f', - usedefault=True, - ), - normalSmoother=dict( - argstr='--nc %f', - usedefault=True, - ), - numIterations=dict( - argstr='-n %d', - usedefault=True, - ), - outputInterval=dict( - argstr='--interval %d', - usedefault=True, - ), - outputSurfaceFile=dict( - argstr='-o %s', - extensions=None, - genfile=True, - ), - recomputeNormals=dict(argstr='--norm', ), - searchRadius=dict( - argstr='-r %f', - usedefault=True, - ), - stepSize=dict( - argstr='-s %f', - usedefault=True, - ), - tangentSmoother=dict(argstr='--tc %f', ), - timer=dict(argstr='--timer', ), - tissueThreshold=dict( - argstr='-t %f', - usedefault=True, - ), - verbosity=dict(argstr='-v %d', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + exportPrefix=dict(argstr="--prefix %s",), + inputMaskFile=dict(argstr="-m %s", extensions=None, mandatory=True,), + inputSurfaceFile=dict(argstr="-i %s", extensions=None, mandatory=True,), + inputTissueFractionFile=dict(argstr="-f %s", extensions=None, mandatory=True,), + laplacianSmoothing=dict(argstr="--smooth %f", usedefault=True,), + maxThickness=dict(argstr="--max %f", usedefault=True,), + normalSmoother=dict(argstr="--nc %f", usedefault=True,), + numIterations=dict(argstr="-n %d", usedefault=True,), + outputInterval=dict(argstr="--interval %d", usedefault=True,), + outputSurfaceFile=dict(argstr="-o %s", extensions=None, genfile=True,), + recomputeNormals=dict(argstr="--norm",), + searchRadius=dict(argstr="-r %f", usedefault=True,), + stepSize=dict(argstr="-s %f", usedefault=True,), + tangentSmoother=dict(argstr="--tc %f",), + timer=dict(argstr="--timer",), + tissueThreshold=dict(argstr="-t %f", usedefault=True,), + verbosity=dict(argstr="-v %d",), ) inputs = Pialmesh.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Pialmesh_outputs(): - output_map = dict(outputSurfaceFile=dict(extensions=None, ), ) + output_map = dict(outputSurfaceFile=dict(extensions=None,),) outputs = Pialmesh.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py index 5b93215b70..fa1c8bc8b7 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py @@ -4,44 +4,28 @@ def test_Pvc_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMRIFile=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), - inputMaskFile=dict( - argstr='-m %s', - extensions=None, - ), - outputLabelFile=dict( - argstr='-o %s', - extensions=None, - genfile=True, - ), - outputTissueFractionFile=dict( - argstr='-f %s', - extensions=None, - genfile=True, - ), - spatialPrior=dict(argstr='-l %f', ), - threeClassFlag=dict(argstr='-3', ), - timer=dict(argstr='--timer', ), - verbosity=dict(argstr='-v %d', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True,), + inputMaskFile=dict(argstr="-m %s", extensions=None,), + outputLabelFile=dict(argstr="-o %s", extensions=None, genfile=True,), + outputTissueFractionFile=dict(argstr="-f %s", extensions=None, genfile=True,), + spatialPrior=dict(argstr="-l %f",), + threeClassFlag=dict(argstr="-3",), + timer=dict(argstr="--timer",), + verbosity=dict(argstr="-v %d",), ) inputs = Pvc.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Pvc_outputs(): output_map = dict( - outputLabelFile=dict(extensions=None, ), - outputTissueFractionFile=dict(extensions=None, ), + outputLabelFile=dict(extensions=None,), + outputTissueFractionFile=dict(extensions=None,), ) outputs = Pvc.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py b/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py index 3d861a0677..2c19934e7c 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py @@ -4,48 +4,33 @@ def test_SVReg_inputs(): input_map = dict( - args=dict(argstr='%s', ), - atlasFilePrefix=dict( - argstr="'%s'", - position=1, - ), - curveMatchingInstructions=dict(argstr="'-cur %s'", ), - dataSinkDelay=dict(argstr='%s', ), - displayModuleName=dict(argstr="'-m'", ), - displayTimestamps=dict(argstr="'-t'", ), - environ=dict( - nohash=True, - usedefault=True, - ), - iterations=dict(argstr="'-H %d'", ), - keepIntermediates=dict(argstr="'-k'", ), - pialSurfaceMaskDilation=dict(argstr="'-D %d'", ), - refineOutputs=dict(argstr="'-r'", ), - shortMessages=dict(argstr="'-gui'", ), - skipToIntensityReg=dict(argstr="'-p'", ), - skipToVolumeReg=dict(argstr="'-s'", ), - skipVolumetricProcessing=dict(argstr="'-S'", ), - subjectFilePrefix=dict( - argstr="'%s'", - mandatory=True, - position=0, - ), - useCerebrumMask=dict(argstr="'-C'", ), - useManualMaskFile=dict(argstr="'-cbm'", ), - useMultiThreading=dict(argstr="'-P'", ), - useSingleThreading=dict(argstr="'-U'", ), + args=dict(argstr="%s",), + atlasFilePrefix=dict(argstr="'%s'", position=1,), + curveMatchingInstructions=dict(argstr="'-cur %s'",), + dataSinkDelay=dict(argstr="%s",), + displayModuleName=dict(argstr="'-m'",), + displayTimestamps=dict(argstr="'-t'",), + environ=dict(nohash=True, usedefault=True,), + iterations=dict(argstr="'-H %d'",), + keepIntermediates=dict(argstr="'-k'",), + pialSurfaceMaskDilation=dict(argstr="'-D %d'",), + refineOutputs=dict(argstr="'-r'",), + shortMessages=dict(argstr="'-gui'",), + skipToIntensityReg=dict(argstr="'-p'",), + skipToVolumeReg=dict(argstr="'-s'",), + skipVolumetricProcessing=dict(argstr="'-S'",), + subjectFilePrefix=dict(argstr="'%s'", mandatory=True, position=0,), + useCerebrumMask=dict(argstr="'-C'",), + useManualMaskFile=dict(argstr="'-cbm'",), + useMultiThreading=dict(argstr="'-P'",), + useSingleThreading=dict(argstr="'-U'",), verbosity0=dict( - argstr="'-v0'", - xor=('verbosity0', 'verbosity1', 'verbosity2'), + argstr="'-v0'", xor=("verbosity0", "verbosity1", "verbosity2"), ), verbosity1=dict( - argstr="'-v1'", - xor=('verbosity0', 'verbosity1', 'verbosity2'), - ), - verbosity2=dict( - argstr="'v2'", - xor=('verbosity0', 'verbosity1', 'verbosity2'), + argstr="'-v1'", xor=("verbosity0", "verbosity1", "verbosity2"), ), + verbosity2=dict(argstr="'v2'", xor=("verbosity0", "verbosity1", "verbosity2"),), ) inputs = SVReg.input_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py index c04b16d5d2..b356b0335e 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py @@ -4,40 +4,25 @@ def test_Scrubmask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - backgroundFillThreshold=dict( - argstr='-b %d', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - foregroundTrimThreshold=dict( - argstr='-f %d', - usedefault=True, - ), - inputMaskFile=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), - numberIterations=dict(argstr='-n %d', ), - outputMaskFile=dict( - argstr='-o %s', - extensions=None, - genfile=True, - ), - timer=dict(argstr='--timer', ), - verbosity=dict(argstr='-v %d', ), + args=dict(argstr="%s",), + backgroundFillThreshold=dict(argstr="-b %d", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + foregroundTrimThreshold=dict(argstr="-f %d", usedefault=True,), + inputMaskFile=dict(argstr="-i %s", extensions=None, mandatory=True,), + numberIterations=dict(argstr="-n %d",), + outputMaskFile=dict(argstr="-o %s", extensions=None, genfile=True,), + timer=dict(argstr="--timer",), + verbosity=dict(argstr="-v %d",), ) inputs = Scrubmask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Scrubmask_outputs(): - output_map = dict(outputMaskFile=dict(extensions=None, ), ) + output_map = dict(outputMaskFile=dict(extensions=None,),) outputs = Scrubmask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py index 11707befa3..06480f30e8 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py @@ -4,44 +4,31 @@ def test_Skullfinder_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bgLabelValue=dict(argstr='--bglabel %d', ), - brainLabelValue=dict(argstr='--brainlabel %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMRIFile=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), - inputMaskFile=dict( - argstr='-m %s', - extensions=None, - mandatory=True, - ), - lowerThreshold=dict(argstr='-l %d', ), - outputLabelFile=dict( - argstr='-o %s', - extensions=None, - genfile=True, - ), - performFinalOpening=dict(argstr='--finalOpening', ), - scalpLabelValue=dict(argstr='--scalplabel %d', ), - skullLabelValue=dict(argstr='--skulllabel %d', ), - spaceLabelValue=dict(argstr='--spacelabel %d', ), - surfaceFilePrefix=dict(argstr='-s %s', ), - upperThreshold=dict(argstr='-u %d', ), - verbosity=dict(argstr='-v %d', ), + args=dict(argstr="%s",), + bgLabelValue=dict(argstr="--bglabel %d",), + brainLabelValue=dict(argstr="--brainlabel %d",), + environ=dict(nohash=True, usedefault=True,), + inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True,), + inputMaskFile=dict(argstr="-m %s", extensions=None, mandatory=True,), + lowerThreshold=dict(argstr="-l %d",), + outputLabelFile=dict(argstr="-o %s", extensions=None, genfile=True,), + performFinalOpening=dict(argstr="--finalOpening",), + scalpLabelValue=dict(argstr="--scalplabel %d",), + skullLabelValue=dict(argstr="--skulllabel %d",), + spaceLabelValue=dict(argstr="--spacelabel %d",), + surfaceFilePrefix=dict(argstr="-s %s",), + upperThreshold=dict(argstr="-u %d",), + verbosity=dict(argstr="-v %d",), ) inputs = Skullfinder.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Skullfinder_outputs(): - output_map = dict(outputLabelFile=dict(extensions=None, ), ) + output_map = dict(outputLabelFile=dict(extensions=None,),) outputs = Skullfinder.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Tca.py b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py index b7767a1b01..66cb70ac8f 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Tca.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py @@ -4,40 +4,25 @@ def test_Tca_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - foregroundDelta=dict( - argstr='--delta %d', - usedefault=True, - ), - inputMaskFile=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), - maxCorrectionSize=dict(argstr='-n %d', ), - minCorrectionSize=dict( - argstr='-m %d', - usedefault=True, - ), - outputMaskFile=dict( - argstr='-o %s', - extensions=None, - genfile=True, - ), - timer=dict(argstr='--timer', ), - verbosity=dict(argstr='-v %d', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + foregroundDelta=dict(argstr="--delta %d", usedefault=True,), + inputMaskFile=dict(argstr="-i %s", extensions=None, mandatory=True,), + maxCorrectionSize=dict(argstr="-n %d",), + minCorrectionSize=dict(argstr="-m %d", usedefault=True,), + outputMaskFile=dict(argstr="-o %s", extensions=None, genfile=True,), + timer=dict(argstr="--timer",), + verbosity=dict(argstr="-v %d",), ) inputs = Tca.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Tca_outputs(): - output_map = dict(outputMaskFile=dict(extensions=None, ), ) + output_map = dict(outputMaskFile=dict(extensions=None,),) outputs = Tca.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py b/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py index cf2174690e..c0265e7e6c 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py @@ -4,15 +4,9 @@ def test_ThicknessPVC_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - subjectFilePrefix=dict( - argstr='%s', - mandatory=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + subjectFilePrefix=dict(argstr="%s", mandatory=True,), ) inputs = ThicknessPVC.input_spec() diff --git a/nipype/interfaces/bru2nii.py b/nipype/interfaces/bru2nii.py index 1b4574f00c..7ed9c77651 100644 --- a/nipype/interfaces/bru2nii.py +++ b/nipype/interfaces/bru2nii.py @@ -3,32 +3,38 @@ """ import os -from .base import (CommandLine, CommandLineInputSpec, traits, TraitedSpec, - isdefined, File, Directory) +from .base import ( + CommandLine, + CommandLineInputSpec, + traits, + TraitedSpec, + isdefined, + File, + Directory, +) class Bru2InputSpec(CommandLineInputSpec): input_dir = Directory( - desc="Input Directory", - exists=True, - mandatory=True, - position=-1, - argstr="%s") + desc="Input Directory", exists=True, mandatory=True, position=-1, argstr="%s" + ) actual_size = traits.Bool( - argstr='-a', - desc="Keep actual size - otherwise x10 scale so animals match human.") + argstr="-a", + desc="Keep actual size - otherwise x10 scale so animals match human.", + ) force_conversion = traits.Bool( - argstr='-f', - desc="Force conversion of localizers images (multiple slice " - "orientations).") - compress = traits.Bool( - argstr='-z', desc='gz compress images (".nii.gz").') + argstr="-f", + desc="Force conversion of localizers images (multiple slice " "orientations).", + ) + compress = traits.Bool(argstr="-z", desc='gz compress images (".nii.gz").') append_protocol_name = traits.Bool( - argstr='-p', desc="Append protocol name to output filename.") + argstr="-p", desc="Append protocol name to output filename." + ) output_filename = traits.Str( argstr="-o %s", desc='Output filename (".nii" will be appended, or ".nii.gz" if the "-z" compress option is selected)', - genfile=True) + genfile=True, + ) class Bru2OutputSpec(TraitedSpec): @@ -47,6 +53,7 @@ class Bru2(CommandLine): >>> converter.cmdline # doctest: +ELLIPSIS 'Bru2 -o .../data/brukerdir brukerdir' """ + input_spec = Bru2InputSpec output_spec = Bru2OutputSpec _cmd = "Bru2" @@ -56,7 +63,7 @@ def _list_outputs(self): if isdefined(self.inputs.output_filename): output_filename1 = os.path.abspath(self.inputs.output_filename) else: - output_filename1 = self._gen_filename('output_filename') + output_filename1 = self._gen_filename("output_filename") if self.inputs.compress: outputs["nii_file"] = output_filename1 + ".nii.gz" else: @@ -64,8 +71,8 @@ def _list_outputs(self): return outputs def _gen_filename(self, name): - if name == 'output_filename': + if name == "output_filename": outfile = os.path.join( - os.getcwd(), - os.path.basename(os.path.normpath(self.inputs.input_dir))) + os.getcwd(), os.path.basename(os.path.normpath(self.inputs.input_dir)) + ) return outfile diff --git a/nipype/interfaces/c3.py b/nipype/interfaces/c3.py index db81fce55f..4eadb98207 100644 --- a/nipype/interfaces/c3.py +++ b/nipype/interfaces/c3.py @@ -5,9 +5,17 @@ import os from glob import glob -from .base import (CommandLineInputSpec, traits, TraitedSpec, File, - SEMLikeCommandLine, InputMultiPath, OutputMultiPath, - CommandLine, isdefined) +from .base import ( + CommandLineInputSpec, + traits, + TraitedSpec, + File, + SEMLikeCommandLine, + InputMultiPath, + OutputMultiPath, + CommandLine, + isdefined, +) from ..utils.filemanip import split_filename from .. import logging @@ -16,16 +24,17 @@ class C3dAffineToolInputSpec(CommandLineInputSpec): reference_file = File(exists=True, argstr="-ref %s", position=1) - source_file = File(exists=True, argstr='-src %s', position=2) - transform_file = File(exists=True, argstr='%s', position=3) + source_file = File(exists=True, argstr="-src %s", position=2) + transform_file = File(exists=True, argstr="%s", position=3) itk_transform = traits.Either( traits.Bool, File(), hash_files=False, desc="Export ITK transform.", argstr="-oitk %s", - position=5) - fsl2ras = traits.Bool(argstr='-fsl2ras', position=4) + position=5, + ) + fsl2ras = traits.Bool(argstr="-fsl2ras", position=4) class C3dAffineToolOutputSpec(TraitedSpec): @@ -46,11 +55,12 @@ class C3dAffineTool(SEMLikeCommandLine): >>> c3.cmdline 'c3d_affine_tool -src cmatrix.mat -fsl2ras -oitk affine.txt' """ + input_spec = C3dAffineToolInputSpec output_spec = C3dAffineToolOutputSpec - _cmd = 'c3d_affine_tool' - _outputs_filenames = {'itk_transform': 'affine.txt'} + _cmd = "c3d_affine_tool" + _outputs_filenames = {"itk_transform": "affine.txt"} class C3dInputSpec(CommandLineInputSpec): @@ -59,62 +69,97 @@ class C3dInputSpec(CommandLineInputSpec): position=1, argstr="%s", mandatory=True, - desc="Input file (wildcard and multiple are supported).") + desc="Input file (wildcard and multiple are supported).", + ) out_file = File( exists=False, argstr="-o %s", position=-1, xor=["out_files"], - desc="Output file of last image on the stack.") + desc="Output file of last image on the stack.", + ) out_files = InputMultiPath( File(), argstr="-oo %s", xor=["out_file"], position=-1, - desc=("Write all images on the convert3d stack as multiple files." - " Supports both list of output files or a pattern for the output" - " filenames (using %d substituion).")) + desc=( + "Write all images on the convert3d stack as multiple files." + " Supports both list of output files or a pattern for the output" + " filenames (using %d substituion)." + ), + ) pix_type = traits.Enum( - "float", "char", "uchar", "short", "ushort", "int", "uint", "double", + "float", + "char", + "uchar", + "short", + "ushort", + "int", + "uint", + "double", argstr="-type %s", - desc=("Specifies the pixel type for the output image. By default," - " images are written in floating point (float) format")) + desc=( + "Specifies the pixel type for the output image. By default," + " images are written in floating point (float) format" + ), + ) scale = traits.Either( - traits.Int(), traits.Float(), + traits.Int(), + traits.Float(), argstr="-scale %s", - desc=("Multiplies the intensity of each voxel in the last image on the" - " stack by the given factor.")) + desc=( + "Multiplies the intensity of each voxel in the last image on the" + " stack by the given factor." + ), + ) shift = traits.Either( - traits.Int(), traits.Float(), + traits.Int(), + traits.Float(), argstr="-shift %s", - desc='Adds the given constant to every voxel.') + desc="Adds the given constant to every voxel.", + ) interp = traits.Enum( - "Linear", "NearestNeighbor", "Cubic", "Sinc", "Gaussian", + "Linear", + "NearestNeighbor", + "Cubic", + "Sinc", + "Gaussian", argstr="-interpolation %s", - desc=("Specifies the interpolation used with -resample and other" - " commands. Default is Linear.")) + desc=( + "Specifies the interpolation used with -resample and other" + " commands. Default is Linear." + ), + ) resample = traits.Str( argstr="-resample %s", - desc=("Resamples the image, keeping the bounding box the same, but" - " changing the number of voxels in the image. The dimensions can be" - " specified as a percentage, for example to double the number of voxels" - " in each direction. The -interpolation flag affects how sampling is" - " performed.")) + desc=( + "Resamples the image, keeping the bounding box the same, but" + " changing the number of voxels in the image. The dimensions can be" + " specified as a percentage, for example to double the number of voxels" + " in each direction. The -interpolation flag affects how sampling is" + " performed." + ), + ) smooth = traits.Str( argstr="-smooth %s", - desc=("Applies Gaussian smoothing to the image. The parameter vector" - " specifies the standard deviation of the Gaussian kernel.")) + desc=( + "Applies Gaussian smoothing to the image. The parameter vector" + " specifies the standard deviation of the Gaussian kernel." + ), + ) multicomp_split = traits.Bool( False, usedefault=True, argstr="-mcr", position=0, - desc="Enable reading of multi-component images.") + desc="Enable reading of multi-component images.", + ) is_4d = traits.Bool( False, usedefault=True, - desc=("Changes command to support 4D file operations (default is" - " false).")) + desc=("Changes command to support 4D file operations (default is" " false)."), + ) class C3dOutputSpec(TraitedSpec): @@ -147,6 +192,7 @@ class C3d(CommandLine): >>> c3.cmdline 'c4d epi.nii -type short -o epi.img' """ + input_spec = C3dInputSpec output_spec = C3dOutputSpec @@ -163,8 +209,7 @@ def _is_4d(self): def _run_interface(self, runtime): cmd = self._cmd - if (not isdefined(self.inputs.out_file) - and not isdefined(self.inputs.out_files)): + if not isdefined(self.inputs.out_file) and not isdefined(self.inputs.out_files): # Convert3d does not want to override file, by default # so we define a new output file self._gen_outfile() @@ -175,8 +220,9 @@ def _run_interface(self, runtime): def _gen_outfile(self): # if many infiles, raise exception if (len(self.inputs.in_file) > 1) or ("*" in self.inputs.in_file[0]): - raise AttributeError("Multiple in_files found - specify either" - " `out_file` or `out_files`.") + raise AttributeError( + "Multiple in_files found - specify either" " `out_file` or `out_files`." + ) _, fn, ext = split_filename(self.inputs.in_file[0]) self.inputs.out_file = fn + "_generated" + ext # if generated file will overwrite, raise error @@ -192,9 +238,11 @@ def _list_outputs(self): if len(self.inputs.out_files) == 1: _out_files = glob(os.path.abspath(self.inputs.out_files[0])) else: - _out_files = [os.path.abspath(f) for f in self.inputs.out_files - if os.path.exists(os.path.abspath(f))] + _out_files = [ + os.path.abspath(f) + for f in self.inputs.out_files + if os.path.exists(os.path.abspath(f)) + ] outputs["out_files"] = _out_files return outputs - diff --git a/nipype/interfaces/camino/__init__.py b/nipype/interfaces/camino/__init__.py index 0120732ef6..e90cc6f375 100644 --- a/nipype/interfaces/camino/__init__.py +++ b/nipype/interfaces/camino/__init__.py @@ -5,14 +5,36 @@ """ from .connectivity import Conmat -from .convert import (Image2Voxel, FSL2Scheme, VtkStreamlines, ProcStreamlines, - TractShredder, DT2NIfTI, NIfTIDT2Camino, AnalyzeHeader, - Shredder) -from .dti import (DTIFit, ModelFit, DTLUTGen, PicoPDFs, Track, TrackPICo, - TrackBayesDirac, TrackDT, TrackBallStick, TrackBootstrap, - TrackBedpostxDeter, TrackBedpostxProba, - ComputeFractionalAnisotropy, ComputeMeanDiffusivity, - ComputeTensorTrace, ComputeEigensystem, DTMetric) -from .calib import (SFPICOCalibData, SFLUTGen) -from .odf import (QBallMX, LinRecon, SFPeaks, MESD) +from .convert import ( + Image2Voxel, + FSL2Scheme, + VtkStreamlines, + ProcStreamlines, + TractShredder, + DT2NIfTI, + NIfTIDT2Camino, + AnalyzeHeader, + Shredder, +) +from .dti import ( + DTIFit, + ModelFit, + DTLUTGen, + PicoPDFs, + Track, + TrackPICo, + TrackBayesDirac, + TrackDT, + TrackBallStick, + TrackBootstrap, + TrackBedpostxDeter, + TrackBedpostxProba, + ComputeFractionalAnisotropy, + ComputeMeanDiffusivity, + ComputeTensorTrace, + ComputeEigensystem, + DTMetric, +) +from .calib import SFPICOCalibData, SFLUTGen +from .odf import QBallMX, LinRecon, SFPeaks, MESD from .utils import ImageStats diff --git a/nipype/interfaces/camino/calib.py b/nipype/interfaces/camino/calib.py index 24eb993bf6..a16dbd9149 100644 --- a/nipype/interfaces/camino/calib.py +++ b/nipype/interfaces/camino/calib.py @@ -3,95 +3,122 @@ import os from ...utils.filemanip import split_filename -from ..base import (traits, TraitedSpec, File, StdOutCommandLine, - StdOutCommandLineInputSpec) +from ..base import ( + traits, + TraitedSpec, + File, + StdOutCommandLine, + StdOutCommandLineInputSpec, +) class SFPICOCalibDataInputSpec(StdOutCommandLineInputSpec): snr = traits.Float( - argstr='-snr %f', - units='NA', - desc=('Specifies the signal-to-noise ratio of the ' - 'non-diffusion-weighted measurements to use in simulations.')) + argstr="-snr %f", + units="NA", + desc=( + "Specifies the signal-to-noise ratio of the " + "non-diffusion-weighted measurements to use in simulations." + ), + ) scheme_file = File( exists=True, - argstr='-schemefile %s', + argstr="-schemefile %s", mandatory=True, - desc='Specifies the scheme file for the diffusion MRI data') + desc="Specifies the scheme file for the diffusion MRI data", + ) info_file = File( - desc='The name to be given to the information output filename.', - argstr='-infooutputfile %s', + desc="The name to be given to the information output filename.", + argstr="-infooutputfile %s", mandatory=True, genfile=True, - hash_files=False) # Genfile and hash_files? + hash_files=False, + ) # Genfile and hash_files? trace = traits.Float( - argstr='-trace %f', - units='NA', - desc='Trace of the diffusion tensor(s) used in the test function.') + argstr="-trace %f", + units="NA", + desc="Trace of the diffusion tensor(s) used in the test function.", + ) onedtfarange = traits.List( traits.Float, - argstr='-onedtfarange %s', + argstr="-onedtfarange %s", minlen=2, maxlen=2, - units='NA', - desc=('Minimum and maximum FA for the single tensor ' - 'synthetic data.')) + units="NA", + desc=("Minimum and maximum FA for the single tensor " "synthetic data."), + ) onedtfastep = traits.Float( - argstr='-onedtfastep %f', - units='NA', - desc=('FA step size controlling how many steps there are ' - 'between the minimum and maximum FA settings.')) + argstr="-onedtfastep %f", + units="NA", + desc=( + "FA step size controlling how many steps there are " + "between the minimum and maximum FA settings." + ), + ) twodtfarange = traits.List( traits.Float, - argstr='-twodtfarange %s', + argstr="-twodtfarange %s", minlen=2, maxlen=2, - units='NA', - desc=('Minimum and maximum FA for the two tensor ' - 'synthetic data. FA is varied for both tensors ' - 'to give all the different permutations.')) + units="NA", + desc=( + "Minimum and maximum FA for the two tensor " + "synthetic data. FA is varied for both tensors " + "to give all the different permutations." + ), + ) twodtfastep = traits.Float( - argstr='-twodtfastep %f', - units='NA', - desc=('FA step size controlling how many steps there are ' - 'between the minimum and maximum FA settings ' - 'for the two tensor cases.')) + argstr="-twodtfastep %f", + units="NA", + desc=( + "FA step size controlling how many steps there are " + "between the minimum and maximum FA settings " + "for the two tensor cases." + ), + ) twodtanglerange = traits.List( traits.Float, - argstr='-twodtanglerange %s', + argstr="-twodtanglerange %s", minlen=2, maxlen=2, - units='NA', - desc=('Minimum and maximum crossing angles ' - 'between the two fibres.')) + units="NA", + desc=("Minimum and maximum crossing angles " "between the two fibres."), + ) twodtanglestep = traits.Float( - argstr='-twodtanglestep %f', - units='NA', - desc=('Angle step size controlling how many steps there are ' - 'between the minimum and maximum crossing angles for ' - 'the two tensor cases.')) + argstr="-twodtanglestep %f", + units="NA", + desc=( + "Angle step size controlling how many steps there are " + "between the minimum and maximum crossing angles for " + "the two tensor cases." + ), + ) twodtmixmax = traits.Float( - argstr='-twodtmixmax %f', - units='NA', - desc= - ('Mixing parameter controlling the proportion of one fibre population ' - 'to the other. The minimum mixing parameter is (1 - twodtmixmax).')) + argstr="-twodtmixmax %f", + units="NA", + desc=( + "Mixing parameter controlling the proportion of one fibre population " + "to the other. The minimum mixing parameter is (1 - twodtmixmax)." + ), + ) twodtmixstep = traits.Float( - argstr='-twodtmixstep %f', - units='NA', - desc=('Mixing parameter step size for the two tensor cases. ' - 'Specify how many mixing parameter increments to use.')) + argstr="-twodtmixstep %f", + units="NA", + desc=( + "Mixing parameter step size for the two tensor cases. " + "Specify how many mixing parameter increments to use." + ), + ) seed = traits.Float( - argstr='-seed %f', - units='NA', - desc= - 'Specifies the random seed to use for noise generation in simulation trials.' + argstr="-seed %f", + units="NA", + desc="Specifies the random seed to use for noise generation in simulation trials.", ) class SFPICOCalibDataOutputSpec(TraitedSpec): - PICOCalib = File(exists=True, desc='Calibration dataset') - calib_info = File(exists=True, desc='Calibration dataset') + PICOCalib = File(exists=True, desc="Calibration dataset") + calib_info = File(exists=True, desc="Calibration dataset") class SFPICOCalibData(StdOutCommandLine): @@ -146,88 +173,103 @@ class SFPICOCalibData(StdOutCommandLine): two fibre cases. However, care should be taken to ensure that enough data is generated for calculating the LUT. # doctest: +SKIP """ - _cmd = 'sfpicocalibdata' + + _cmd = "sfpicocalibdata" input_spec = SFPICOCalibDataInputSpec output_spec = SFPICOCalibDataOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['PICOCalib'] = os.path.abspath(self._gen_outfilename()) - outputs['calib_info'] = os.path.abspath(self.inputs.info_file) + outputs["PICOCalib"] = os.path.abspath(self._gen_outfilename()) + outputs["calib_info"] = os.path.abspath(self.inputs.info_file) return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.scheme_file) - return name + '_PICOCalib.Bfloat' + return name + "_PICOCalib.Bfloat" class SFLUTGenInputSpec(StdOutCommandLineInputSpec): in_file = File( exists=True, - argstr='-inputfile %s', + argstr="-inputfile %s", mandatory=True, - desc='Voxel-order data of the spherical functions peaks.') + desc="Voxel-order data of the spherical functions peaks.", + ) info_file = File( - argstr='-infofile %s', + argstr="-infofile %s", mandatory=True, - desc=('The Info file that corresponds to the calibration ' - 'datafile used in the reconstruction.')) + desc=( + "The Info file that corresponds to the calibration " + "datafile used in the reconstruction." + ), + ) outputstem = traits.Str( - 'LUT', - argstr='-outputstem %s', - desc= - ('Define the name of the generated luts. The form of the filenames will be ' - '[outputstem]_oneFibreSurfaceCoeffs.Bdouble and ' - '[outputstem]_twoFibreSurfaceCoeffs.Bdouble'), - usedefault=True) + "LUT", + argstr="-outputstem %s", + desc=( + "Define the name of the generated luts. The form of the filenames will be " + "[outputstem]_oneFibreSurfaceCoeffs.Bdouble and " + "[outputstem]_twoFibreSurfaceCoeffs.Bdouble" + ), + usedefault=True, + ) pdf = traits.Enum( - 'bingham', - 'watson', - argstr='-pdf %s', - desc= - ('Sets the distribution to use for the calibration. The default is the Bingham ' - 'distribution, which allows elliptical probability density contours. ' - 'Currently supported options are: ' - ' bingham - The Bingham distribution, which allows elliptical probability ' - ' density contours. ' - ' watson - The Watson distribution. This distribution is rotationally symmetric.' - ), - usedefault=True) + "bingham", + "watson", + argstr="-pdf %s", + desc=( + "Sets the distribution to use for the calibration. The default is the Bingham " + "distribution, which allows elliptical probability density contours. " + "Currently supported options are: " + " bingham - The Bingham distribution, which allows elliptical probability " + " density contours. " + " watson - The Watson distribution. This distribution is rotationally symmetric." + ), + usedefault=True, + ) binincsize = traits.Int( - argstr='-binincsize %d', - units='NA', - desc= - ('Sets the size of the bins. In the case of 2D histograms such as the ' - 'Bingham, the bins are always square. Default is 1.')) + argstr="-binincsize %d", + units="NA", + desc=( + "Sets the size of the bins. In the case of 2D histograms such as the " + "Bingham, the bins are always square. Default is 1." + ), + ) minvectsperbin = traits.Int( - argstr='-minvectsperbin %d', - units='NA', - desc= - ('Specifies the minimum number of fibre-orientation estimates a bin ' - 'must contain before it is used in the lut line/surface generation. ' - 'Default is 50. If you get the error "no fibre-orientation estimates ' - 'in histogram!", the calibration data set is too small to get enough ' - 'samples in any of the histogram bins. You can decrease the minimum ' - 'number per bin to get things running in quick tests, but the sta- ' - 'tistics will not be reliable and for serious applications, you need ' - 'to increase the size of the calibration data set until the error goes.' - )) + argstr="-minvectsperbin %d", + units="NA", + desc=( + "Specifies the minimum number of fibre-orientation estimates a bin " + "must contain before it is used in the lut line/surface generation. " + 'Default is 50. If you get the error "no fibre-orientation estimates ' + 'in histogram!", the calibration data set is too small to get enough ' + "samples in any of the histogram bins. You can decrease the minimum " + "number per bin to get things running in quick tests, but the sta- " + "tistics will not be reliable and for serious applications, you need " + "to increase the size of the calibration data set until the error goes." + ), + ) directmap = traits.Bool( - argstr='-directmap', - desc= - ('Use direct mapping between the eigenvalues and the distribution parameters ' - 'instead of the log of the eigenvalues.')) + argstr="-directmap", + desc=( + "Use direct mapping between the eigenvalues and the distribution parameters " + "instead of the log of the eigenvalues." + ), + ) order = traits.Int( - argstr='-order %d', - units='NA', - desc= - ('The order of the polynomial fitting the surface. Order 1 is linear. ' - 'Order 2 (default) is quadratic.')) + argstr="-order %d", + units="NA", + desc=( + "The order of the polynomial fitting the surface. Order 1 is linear. " + "Order 2 (default) is quadratic." + ), + ) class SFLUTGenOutputSpec(TraitedSpec): - lut_one_fibre = File(exists=True, desc='PICo lut for one-fibre model') - lut_two_fibres = File(exists=True, desc='PICo lut for two-fibre model') + lut_one_fibre = File(exists=True, desc="PICo lut for one-fibre model") + lut_two_fibres = File(exists=True, desc="PICo lut for two-fibre model") class SFLUTGen(StdOutCommandLine): @@ -278,17 +320,20 @@ class SFLUTGen(StdOutCommandLine): >>> lutgen.inputs.info_file = 'PICO_calib.info' >>> lutgen.run() # doctest: +SKIP """ - _cmd = 'sflutgen' + + _cmd = "sflutgen" input_spec = SFLUTGenInputSpec output_spec = SFLUTGenOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs[ - 'lut_one_fibre'] = self.inputs.outputstem + '_oneFibreSurfaceCoeffs.Bdouble' - outputs[ - 'lut_two_fibres'] = self.inputs.outputstem + '_twoFibreSurfaceCoeffs.Bdouble' + outputs["lut_one_fibre"] = ( + self.inputs.outputstem + "_oneFibreSurfaceCoeffs.Bdouble" + ) + outputs["lut_two_fibres"] = ( + self.inputs.outputstem + "_twoFibreSurfaceCoeffs.Bdouble" + ) return outputs def _gen_outfilename(self): - return '/dev/null' + return "/dev/null" diff --git a/nipype/interfaces/camino/connectivity.py b/nipype/interfaces/camino/connectivity.py index 78172db9cc..5ec7fe8c63 100644 --- a/nipype/interfaces/camino/connectivity.py +++ b/nipype/interfaces/camino/connectivity.py @@ -2,43 +2,55 @@ import os from ...utils.filemanip import split_filename -from ..base import (traits, TraitedSpec, File, CommandLine, - CommandLineInputSpec, isdefined) +from ..base import ( + traits, + TraitedSpec, + File, + CommandLine, + CommandLineInputSpec, + isdefined, +) class ConmatInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='-inputfile %s', + argstr="-inputfile %s", mandatory=True, - desc='Streamlines as generated by the Track interface') + desc="Streamlines as generated by the Track interface", + ) target_file = File( exists=True, - argstr='-targetfile %s', + argstr="-targetfile %s", mandatory=True, - desc= - 'An image containing targets, as used in ProcStreamlines interface.') + desc="An image containing targets, as used in ProcStreamlines interface.", + ) scalar_file = File( exists=True, - argstr='-scalarfile %s', - desc=('Optional scalar file for computing tract-based statistics. ' - 'Must be in the same space as the target file.'), - requires=['tract_stat']) + argstr="-scalarfile %s", + desc=( + "Optional scalar file for computing tract-based statistics. " + "Must be in the same space as the target file." + ), + requires=["tract_stat"], + ) targetname_file = File( exists=True, - argstr='-targetnamefile %s', - desc= - ('Optional names of targets. This file should contain one entry per line, ' - 'with the target intensity followed by the name, separated by white space. ' - 'For example: ' - ' 1 some_brain_region ' - ' 2 some_other_region ' - 'These names will be used in the output. The names themselves should not ' - 'contain spaces or commas. The labels may be in any order but the output ' - 'matrices will be ordered by label intensity.')) + argstr="-targetnamefile %s", + desc=( + "Optional names of targets. This file should contain one entry per line, " + "with the target intensity followed by the name, separated by white space. " + "For example: " + " 1 some_brain_region " + " 2 some_other_region " + "These names will be used in the output. The names themselves should not " + "contain spaces or commas. The labels may be in any order but the output " + "matrices will be ordered by label intensity." + ), + ) tract_stat = traits.Enum( "mean", @@ -47,31 +59,38 @@ class ConmatInputSpec(CommandLineInputSpec): "sum", "median", "var", - argstr='-tractstat %s', - units='NA', + argstr="-tractstat %s", + units="NA", desc=("Tract statistic to use. See TractStats for other options."), - requires=['scalar_file'], - xor=['tract_prop']) + requires=["scalar_file"], + xor=["tract_prop"], + ) tract_prop = traits.Enum( "length", "endpointsep", - argstr='-tractstat %s', - units='NA', - xor=['tract_stat'], - desc=('Tract property average to compute in the connectivity matrix. ' - 'See TractStats for details.')) + argstr="-tractstat %s", + units="NA", + xor=["tract_stat"], + desc=( + "Tract property average to compute in the connectivity matrix. " + "See TractStats for details." + ), + ) output_root = File( - argstr='-outputroot %s', + argstr="-outputroot %s", genfile=True, - desc=('filename root prepended onto the names of the output files. ' - 'The extension will be determined from the input.')) + desc=( + "filename root prepended onto the names of the output files. " + "The extension will be determined from the input." + ), + ) class ConmatOutputSpec(TraitedSpec): - conmat_sc = File(exists=True, desc='Connectivity matrix in CSV file.') - conmat_ts = File(desc='Tract statistics in CSV file.') + conmat_sc = File(exists=True, desc="Connectivity matrix in CSV file.") + conmat_ts = File(desc="Tract statistics in CSV file.") class Conmat(CommandLine): @@ -149,15 +168,16 @@ class Conmat(CommandLine): >>> conmat.tract_stat = 'mean' >>> conmat.run() # doctest: +SKIP """ - _cmd = 'conmat' + + _cmd = "conmat" input_spec = ConmatInputSpec output_spec = ConmatOutputSpec def _list_outputs(self): outputs = self.output_spec().get() output_root = self._gen_outputroot() - outputs['conmat_sc'] = os.path.abspath(output_root + "sc.csv") - outputs['conmat_ts'] = os.path.abspath(output_root + "ts.csv") + outputs["conmat_sc"] = os.path.abspath(output_root + "sc.csv") + outputs["conmat_ts"] = os.path.abspath(output_root + "ts.csv") return outputs def _gen_outfilename(self): @@ -166,11 +186,11 @@ def _gen_outfilename(self): def _gen_outputroot(self): output_root = self.inputs.output_root if not isdefined(output_root): - output_root = self._gen_filename('output_root') + output_root = self._gen_filename("output_root") return output_root def _gen_filename(self, name): - if name == 'output_root': + if name == "output_root": _, filename, _ = split_filename(self.inputs.in_file) filename = filename + "_" return filename diff --git a/nipype/interfaces/camino/convert.py b/nipype/interfaces/camino/convert.py index 2ac96befaa..a5b4b70fb3 100644 --- a/nipype/interfaces/camino/convert.py +++ b/nipype/interfaces/camino/convert.py @@ -4,18 +4,27 @@ import glob from ...utils.filemanip import split_filename -from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, - File, StdOutCommandLine, OutputMultiPath, - StdOutCommandLineInputSpec, isdefined) +from ..base import ( + CommandLineInputSpec, + CommandLine, + traits, + TraitedSpec, + File, + StdOutCommandLine, + OutputMultiPath, + StdOutCommandLineInputSpec, + isdefined, +) class Image2VoxelInputSpec(StdOutCommandLineInputSpec): in_file = File( exists=True, - argstr='-4dimage %s', + argstr="-4dimage %s", mandatory=True, position=1, - desc='4d image file') + desc="4d image file", + ) # TODO convert list of files on the fly # imagelist = File(exists=True, argstr='-imagelist %s', # mandatory=True, position=1, @@ -31,16 +40,15 @@ class Image2VoxelInputSpec(StdOutCommandLineInputSpec): "int", "long", "double", - argstr='-outputdatatype %s', + argstr="-outputdatatype %s", position=2, - desc= - '"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"', - usedefault=True) + desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"', + usedefault=True, + ) class Image2VoxelOutputSpec(TraitedSpec): - voxel_order = File( - exists=True, desc='path/name of 4D volume in voxel order') + voxel_order = File(exists=True, desc="path/name of 4D volume in voxel order") class Image2Voxel(StdOutCommandLine): @@ -59,71 +67,76 @@ class Image2Voxel(StdOutCommandLine): >>> img2vox.inputs.in_file = '4d_dwi.nii' >>> img2vox.run() # doctest: +SKIP """ - _cmd = 'image2voxel' + + _cmd = "image2voxel" input_spec = Image2VoxelInputSpec output_spec = Image2VoxelOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['voxel_order'] = os.path.abspath(self._gen_outfilename()) + outputs["voxel_order"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '.B' + self.inputs.out_type + return name + ".B" + self.inputs.out_type class FSL2SchemeInputSpec(StdOutCommandLineInputSpec): bvec_file = File( exists=True, - argstr='-bvecfile %s', + argstr="-bvecfile %s", mandatory=True, position=1, - desc='b vector file') + desc="b vector file", + ) bval_file = File( exists=True, - argstr='-bvalfile %s', + argstr="-bvalfile %s", mandatory=True, position=2, - desc='b value file') + desc="b value file", + ) numscans = traits.Int( - argstr='-numscans %d', - units='NA', - desc= - "Output all measurements numerous (n) times, used when combining multiple scans from the same imaging session." + argstr="-numscans %d", + units="NA", + desc="Output all measurements numerous (n) times, used when combining multiple scans from the same imaging session.", ) interleave = traits.Bool( - argstr='-interleave', - desc="Interleave repeated scans. Only used with -numscans.") + argstr="-interleave", + desc="Interleave repeated scans. Only used with -numscans.", + ) bscale = traits.Float( - argstr='-bscale %d', - units='NA', - desc= - "Scaling factor to convert the b-values into different units. Default is 10^6." + argstr="-bscale %d", + units="NA", + desc="Scaling factor to convert the b-values into different units. Default is 10^6.", ) diffusiontime = traits.Float( - argstr='-diffusiontime %f', units='NA', desc="Diffusion time") + argstr="-diffusiontime %f", units="NA", desc="Diffusion time" + ) flipx = traits.Bool( - argstr='-flipx', desc="Negate the x component of all the vectors.") + argstr="-flipx", desc="Negate the x component of all the vectors." + ) flipy = traits.Bool( - argstr='-flipy', desc="Negate the y component of all the vectors.") + argstr="-flipy", desc="Negate the y component of all the vectors." + ) flipz = traits.Bool( - argstr='-flipz', desc="Negate the z component of all the vectors.") + argstr="-flipz", desc="Negate the z component of all the vectors." + ) usegradmod = traits.Bool( - argstr='-usegradmod', - desc= - "Use the gradient magnitude to scale b. This option has no effect if your gradient directions have unit magnitude." + argstr="-usegradmod", + desc="Use the gradient magnitude to scale b. This option has no effect if your gradient directions have unit magnitude.", ) class FSL2SchemeOutputSpec(TraitedSpec): - scheme = File(exists=True, desc='Scheme file') + scheme = File(exists=True, desc="Scheme file") class FSL2Scheme(StdOutCommandLine): @@ -140,80 +153,81 @@ class FSL2Scheme(StdOutCommandLine): >>> makescheme.run() # doctest: +SKIP """ - _cmd = 'fsl2scheme' + + _cmd = "fsl2scheme" input_spec = FSL2SchemeInputSpec output_spec = FSL2SchemeOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['scheme'] = os.path.abspath(self._gen_outfilename()) + outputs["scheme"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.bvec_file) - return name + '.scheme' + return name + ".scheme" class VtkStreamlinesInputSpec(StdOutCommandLineInputSpec): inputmodel = traits.Enum( - 'raw', - 'voxels', - argstr='-inputmodel %s', - desc='input model type (raw or voxels)', - usedefault=True) + "raw", + "voxels", + argstr="-inputmodel %s", + desc="input model type (raw or voxels)", + usedefault=True, + ) in_file = File( - exists=True, - argstr=' < %s', - mandatory=True, - position=-2, - desc='data file') + exists=True, argstr=" < %s", mandatory=True, position=-2, desc="data file" + ) voxeldims = traits.List( traits.Int, - desc='voxel dimensions in mm', - argstr='-voxeldims %s', + desc="voxel dimensions in mm", + argstr="-voxeldims %s", minlen=3, maxlen=3, position=4, - units='mm') + units="mm", + ) seed_file = File( exists=False, - argstr='-seedfile %s', + argstr="-seedfile %s", position=1, - desc='image containing seed points') + desc="image containing seed points", + ) target_file = File( exists=False, - argstr='-targetfile %s', + argstr="-targetfile %s", position=2, - desc='image containing integer-valued target regions') + desc="image containing integer-valued target regions", + ) scalar_file = File( exists=False, - argstr='-scalarfile %s', + argstr="-scalarfile %s", position=3, - desc='image that is in the same physical space as the tracts') + desc="image that is in the same physical space as the tracts", + ) colourorient = traits.Bool( - argstr='-colourorient', - desc= - "Each point on the streamline is coloured by the local orientation.") + argstr="-colourorient", + desc="Each point on the streamline is coloured by the local orientation.", + ) interpolatescalars = traits.Bool( - argstr='-interpolatescalars', - desc= - "the scalar value at each point on the streamline is calculated by trilinear interpolation" + argstr="-interpolatescalars", + desc="the scalar value at each point on the streamline is calculated by trilinear interpolation", ) interpolate = traits.Bool( - argstr='-interpolate', - desc= - "the scalar value at each point on the streamline is calculated by trilinear interpolation" + argstr="-interpolate", + desc="the scalar value at each point on the streamline is calculated by trilinear interpolation", ) class VtkStreamlinesOutputSpec(TraitedSpec): - vtk = File(exists=True, desc='Streamlines in VTK format') + vtk = File(exists=True, desc="Streamlines in VTK format") class VtkStreamlines(StdOutCommandLine): @@ -229,189 +243,183 @@ class VtkStreamlines(StdOutCommandLine): >>> vtk.inputs.voxeldims = [1,1,1] >>> vtk.run() # doctest: +SKIP """ - _cmd = 'vtkstreamlines' + + _cmd = "vtkstreamlines" input_spec = VtkStreamlinesInputSpec output_spec = VtkStreamlinesOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['vtk'] = os.path.abspath(self._gen_outfilename()) + outputs["vtk"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '.vtk' + return name + ".vtk" class ProcStreamlinesInputSpec(StdOutCommandLineInputSpec): inputmodel = traits.Enum( - 'raw', - 'voxels', - argstr='-inputmodel %s', - desc='input model type (raw or voxels)', - usedefault=True) + "raw", + "voxels", + argstr="-inputmodel %s", + desc="input model type (raw or voxels)", + usedefault=True, + ) in_file = File( exists=True, - argstr='-inputfile %s', + argstr="-inputfile %s", mandatory=True, position=1, - desc='data file') + desc="data file", + ) maxtractpoints = traits.Int( - argstr='-maxtractpoints %d', - units='NA', - desc="maximum number of tract points") + argstr="-maxtractpoints %d", units="NA", desc="maximum number of tract points" + ) mintractpoints = traits.Int( - argstr='-mintractpoints %d', - units='NA', - desc="minimum number of tract points") + argstr="-mintractpoints %d", units="NA", desc="minimum number of tract points" + ) maxtractlength = traits.Int( - argstr='-maxtractlength %d', - units='mm', - desc="maximum length of tracts") + argstr="-maxtractlength %d", units="mm", desc="maximum length of tracts" + ) mintractlength = traits.Int( - argstr='-mintractlength %d', - units='mm', - desc="minimum length of tracts") + argstr="-mintractlength %d", units="mm", desc="minimum length of tracts" + ) datadims = traits.List( traits.Int, - desc='data dimensions in voxels', - argstr='-datadims %s', + desc="data dimensions in voxels", + argstr="-datadims %s", minlen=3, maxlen=3, - units='voxels') + units="voxels", + ) voxeldims = traits.List( traits.Int, - desc='voxel dimensions in mm', - argstr='-voxeldims %s', + desc="voxel dimensions in mm", + argstr="-voxeldims %s", minlen=3, maxlen=3, - units='mm') + units="mm", + ) seedpointmm = traits.List( traits.Int, - desc='The coordinates of a single seed point for tractography in mm', - argstr='-seedpointmm %s', + desc="The coordinates of a single seed point for tractography in mm", + argstr="-seedpointmm %s", minlen=3, maxlen=3, - units='mm') + units="mm", + ) seedpointvox = traits.List( traits.Int, - desc= - 'The coordinates of a single seed point for tractography in voxels', - argstr='-seedpointvox %s', + desc="The coordinates of a single seed point for tractography in voxels", + argstr="-seedpointvox %s", minlen=3, maxlen=3, - units='voxels') + units="voxels", + ) seedfile = File( - exists=False, - argstr='-seedfile %s', - desc='Image Containing Seed Points') + exists=False, argstr="-seedfile %s", desc="Image Containing Seed Points" + ) regionindex = traits.Int( - argstr='-regionindex %d', - units='mm', - desc="index of specific region to process") + argstr="-regionindex %d", units="mm", desc="index of specific region to process" + ) iterations = traits.Float( - argstr='-iterations %d', - units='NA', - desc= - "Number of streamlines generated for each seed. Not required when outputting streamlines, but needed to create PICo images. The default is 1 if the output is streamlines, and 5000 if the output is connection probability images." + argstr="-iterations %d", + units="NA", + desc="Number of streamlines generated for each seed. Not required when outputting streamlines, but needed to create PICo images. The default is 1 if the output is streamlines, and 5000 if the output is connection probability images.", ) targetfile = File( - exists=False, - argstr='-targetfile %s', - desc='Image containing target volumes.') + exists=False, argstr="-targetfile %s", desc="Image containing target volumes." + ) allowmultitargets = traits.Bool( - argstr='-allowmultitargets', - desc="Allows streamlines to connect to multiple target volumes.") + argstr="-allowmultitargets", + desc="Allows streamlines to connect to multiple target volumes.", + ) directional = traits.List( traits.Int, - desc= - 'Splits the streamlines at the seed point and computes separate connection probabilities for each segment. Streamline segments are grouped according to their dot product with the vector (X, Y, Z). The ideal vector will be tangential to the streamline trajectory at the seed, such that the streamline projects from the seed along (X, Y, Z) and -(X, Y, Z). However, it is only necessary for the streamline trajectory to not be orthogonal to (X, Y, Z).', - argstr='-directional %s', + desc="Splits the streamlines at the seed point and computes separate connection probabilities for each segment. Streamline segments are grouped according to their dot product with the vector (X, Y, Z). The ideal vector will be tangential to the streamline trajectory at the seed, such that the streamline projects from the seed along (X, Y, Z) and -(X, Y, Z). However, it is only necessary for the streamline trajectory to not be orthogonal to (X, Y, Z).", + argstr="-directional %s", minlen=3, maxlen=3, - units='NA') + units="NA", + ) waypointfile = File( exists=False, - argstr='-waypointfile %s', - desc= - 'Image containing waypoints. Waypoints are defined as regions of the image with the same intensity, where 0 is background and any value > 0 is a waypoint.' + argstr="-waypointfile %s", + desc="Image containing waypoints. Waypoints are defined as regions of the image with the same intensity, where 0 is background and any value > 0 is a waypoint.", ) truncateloops = traits.Bool( - argstr='-truncateloops', - desc= - "This option allows streamlines to enter a waypoint exactly once. After the streamline leaves the waypoint, it is truncated upon a second entry to the waypoint." + argstr="-truncateloops", + desc="This option allows streamlines to enter a waypoint exactly once. After the streamline leaves the waypoint, it is truncated upon a second entry to the waypoint.", ) discardloops = traits.Bool( - argstr='-discardloops', - desc= - "This option allows streamlines to enter a waypoint exactly once. After the streamline leaves the waypoint, the entire streamline is discarded upon a second entry to the waypoint." + argstr="-discardloops", + desc="This option allows streamlines to enter a waypoint exactly once. After the streamline leaves the waypoint, the entire streamline is discarded upon a second entry to the waypoint.", ) exclusionfile = File( exists=False, - argstr='-exclusionfile %s', - desc= - 'Image containing exclusion ROIs. This should be an Analyze 7.5 header / image file.hdr and file.img.' + argstr="-exclusionfile %s", + desc="Image containing exclusion ROIs. This should be an Analyze 7.5 header / image file.hdr and file.img.", ) truncateinexclusion = traits.Bool( - argstr='-truncateinexclusion', - desc="Retain segments of a streamline before entry to an exclusion ROI." + argstr="-truncateinexclusion", + desc="Retain segments of a streamline before entry to an exclusion ROI.", ) endpointfile = File( exists=False, - argstr='-endpointfile %s', - desc= - 'Image containing endpoint ROIs. This should be an Analyze 7.5 header / image file.hdr and file.img.' + argstr="-endpointfile %s", + desc="Image containing endpoint ROIs. This should be an Analyze 7.5 header / image file.hdr and file.img.", ) resamplestepsize = traits.Float( - argstr='-resamplestepsize %d', - units='NA', - desc= - "Each point on a streamline is tested for entry into target, exclusion or waypoint volumes. If the length between points on a tract is not much smaller than the voxel length, then streamlines may pass through part of a voxel without being counted. To avoid this, the program resamples streamlines such that the step size is one tenth of the smallest voxel dimension in the image. This increases the size of raw or oogl streamline output and incurs some performance penalty. The resample resolution can be controlled with this option or disabled altogether by passing a negative step size or by passing the -noresample option." + argstr="-resamplestepsize %d", + units="NA", + desc="Each point on a streamline is tested for entry into target, exclusion or waypoint volumes. If the length between points on a tract is not much smaller than the voxel length, then streamlines may pass through part of a voxel without being counted. To avoid this, the program resamples streamlines such that the step size is one tenth of the smallest voxel dimension in the image. This increases the size of raw or oogl streamline output and incurs some performance penalty. The resample resolution can be controlled with this option or disabled altogether by passing a negative step size or by passing the -noresample option.", ) noresample = traits.Bool( - argstr='-noresample', - desc= - "Disables resampling of input streamlines. Resampling is automatically disabled if the input model is voxels." + argstr="-noresample", + desc="Disables resampling of input streamlines. Resampling is automatically disabled if the input model is voxels.", ) outputtracts = traits.Bool( - argstr='-outputtracts', - desc="Output streamlines in raw binary format.") + argstr="-outputtracts", desc="Output streamlines in raw binary format." + ) outputroot = File( exists=False, - argstr='-outputroot %s', - desc='Prepended onto all output file names.') + argstr="-outputroot %s", + desc="Prepended onto all output file names.", + ) - gzip = traits.Bool( - argstr='-gzip', desc="save the output image in gzip format") + gzip = traits.Bool(argstr="-gzip", desc="save the output image in gzip format") outputcp = traits.Bool( - argstr='-outputcp', + argstr="-outputcp", desc="output the connection probability map (Analyze image, float)", - requires=['outputroot', 'seedfile']) + requires=["outputroot", "seedfile"], + ) outputsc = traits.Bool( - argstr='-outputsc', + argstr="-outputsc", desc="output the connection probability map (raw streamlines, int)", - requires=['outputroot', 'seedfile']) + requires=["outputroot", "seedfile"], + ) outputacm = traits.Bool( - argstr='-outputacm', - desc= - "output all tracts in a single connection probability map (Analyze image)", - requires=['outputroot', 'seedfile']) + argstr="-outputacm", + desc="output all tracts in a single connection probability map (Analyze image)", + requires=["outputroot", "seedfile"], + ) outputcbs = traits.Bool( - argstr='-outputcbs', - desc= - "outputs connectivity-based segmentation maps; requires target outputfile", - requires=['outputroot', 'targetfile', 'seedfile']) + argstr="-outputcbs", + desc="outputs connectivity-based segmentation maps; requires target outputfile", + requires=["outputroot", "targetfile", "seedfile"], + ) class ProcStreamlinesOutputSpec(TraitedSpec): - proc = File(exists=True, desc='Processed Streamlines') + proc = File(exists=True, desc="Processed Streamlines") outputroot_files = OutputMultiPath(File(exists=True)) @@ -430,18 +438,19 @@ class ProcStreamlines(StdOutCommandLine): >>> proc.inputs.in_file = 'tract_data.Bfloat' >>> proc.run() # doctest: +SKIP """ - _cmd = 'procstreamlines' + + _cmd = "procstreamlines" input_spec = ProcStreamlinesInputSpec output_spec = ProcStreamlinesOutputSpec def _format_arg(self, name, spec, value): - if name == 'outputroot': + if name == "outputroot": return spec.argstr % self._get_actual_outputroot(value) return super(ProcStreamlines, self)._format_arg(name, spec, value) def __init__(self, *args, **kwargs): - super(ProcStreamlines, self).__init__(*args, **kwargs) - self.outputroot_files = [] + super(ProcStreamlines, self).__init__(*args, **kwargs) + self.outputroot_files = [] def _run_interface(self, runtime): outputroot = self.inputs.outputroot @@ -452,53 +461,49 @@ def _run_interface(self, runtime): os.makedirs(base) new_runtime = super(ProcStreamlines, self)._run_interface(runtime) self.outputroot_files = glob.glob( - os.path.join(os.getcwd(), actual_outputroot + '*')) + os.path.join(os.getcwd(), actual_outputroot + "*") + ) return new_runtime else: new_runtime = super(ProcStreamlines, self)._run_interface(runtime) return new_runtime def _get_actual_outputroot(self, outputroot): - actual_outputroot = os.path.join('procstream_outfiles', outputroot) + actual_outputroot = os.path.join("procstream_outfiles", outputroot) return actual_outputroot def _list_outputs(self): outputs = self.output_spec().get() - outputs['proc'] = os.path.abspath(self._gen_outfilename()) - outputs['outputroot_files'] = self.outputroot_files + outputs["proc"] = os.path.abspath(self._gen_outfilename()) + outputs["outputroot_files"] = self.outputroot_files return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_proc' + return name + "_proc" class TractShredderInputSpec(StdOutCommandLineInputSpec): in_file = File( - exists=True, - argstr='< %s', - mandatory=True, - position=-2, - desc='tract file') + exists=True, argstr="< %s", mandatory=True, position=-2, desc="tract file" + ) offset = traits.Int( - argstr='%d', - units='NA', - desc='initial offset of offset tracts', - position=1) + argstr="%d", units="NA", desc="initial offset of offset tracts", position=1 + ) bunchsize = traits.Int( - argstr='%d', - units='NA', - desc='reads and outputs a group of bunchsize tracts', - position=2) + argstr="%d", + units="NA", + desc="reads and outputs a group of bunchsize tracts", + position=2, + ) - space = traits.Int( - argstr='%d', units='NA', desc='skips space tracts', position=3) + space = traits.Int(argstr="%d", units="NA", desc="skips space tracts", position=3) class TractShredderOutputSpec(TraitedSpec): - shredded = File(exists=True, desc='Shredded tract file') + shredded = File(exists=True, desc="Shredded tract file") class TractShredder(StdOutCommandLine): @@ -522,13 +527,14 @@ class TractShredder(StdOutCommandLine): >>> shred.inputs.space = 2 >>> shred.run() # doctest: +SKIP """ - _cmd = 'tractshredder' + + _cmd = "tractshredder" input_spec = TractShredderInputSpec output_spec = TractShredderOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['shredded'] = os.path.abspath(self._gen_outfilename()) + outputs["shredded"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): @@ -539,35 +545,38 @@ def _gen_outfilename(self): class DT2NIfTIInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='-inputfile %s', + argstr="-inputfile %s", mandatory=True, position=1, - desc='tract file') + desc="tract file", + ) output_root = File( - argstr='-outputroot %s', + argstr="-outputroot %s", position=2, genfile=True, - desc='filename root prepended onto the names of three output files.') + desc="filename root prepended onto the names of three output files.", + ) header_file = File( exists=True, - argstr='-header %s', + argstr="-header %s", mandatory=True, position=3, - desc=' A Nifti .nii or .hdr file containing the header information') + desc=" A Nifti .nii or .hdr file containing the header information", + ) class DT2NIfTIOutputSpec(TraitedSpec): - dt = File(exists=True, desc='diffusion tensors in NIfTI format') + dt = File(exists=True, desc="diffusion tensors in NIfTI format") exitcode = File( - exists=True, - desc='exit codes from Camino reconstruction in NIfTI format') + exists=True, desc="exit codes from Camino reconstruction in NIfTI format" + ) lns0 = File( - exists=True, - desc='estimated lns0 from Camino reconstruction in NIfTI format') + exists=True, desc="estimated lns0 from Camino reconstruction in NIfTI format" + ) class DT2NIfTI(CommandLine): @@ -576,7 +585,8 @@ class DT2NIfTI(CommandLine): Reads Camino diffusion tensors, and converts them to NIFTI format as three .nii files. """ - _cmd = 'dt2nii' + + _cmd = "dt2nii" input_spec = DT2NIfTIInputSpec output_spec = DT2NIfTIOutputSpec @@ -594,11 +604,11 @@ def _gen_outfilename(self): def _gen_outputroot(self): output_root = self.inputs.output_root if not isdefined(output_root): - output_root = self._gen_filename('output_root') + output_root = self._gen_filename("output_root") return output_root def _gen_filename(self, name): - if name == 'output_root': + if name == "output_root": _, filename, _ = split_filename(self.inputs.in_file) filename = filename + "_" return filename @@ -607,56 +617,55 @@ def _gen_filename(self, name): class NIfTIDT2CaminoInputSpec(StdOutCommandLineInputSpec): in_file = File( exists=True, - argstr='-inputfile %s', + argstr="-inputfile %s", mandatory=True, position=1, - desc= - 'A NIFTI-1 dataset containing diffusion tensors. The tensors are assumed to be ' - 'in lower-triangular order as specified by the NIFTI standard for the storage of ' - 'symmetric matrices. This file should be either a .nii or a .hdr file.' + desc="A NIFTI-1 dataset containing diffusion tensors. The tensors are assumed to be " + "in lower-triangular order as specified by the NIFTI standard for the storage of " + "symmetric matrices. This file should be either a .nii or a .hdr file.", ) s0_file = File( - argstr='-s0 %s', + argstr="-s0 %s", exists=True, - desc= - 'File containing the unweighted signal for each voxel, may be a raw binary ' - 'file (specify type with -inputdatatype) or a supported image file.') + desc="File containing the unweighted signal for each voxel, may be a raw binary " + "file (specify type with -inputdatatype) or a supported image file.", + ) lns0_file = File( - argstr='-lns0 %s', + argstr="-lns0 %s", exists=True, - desc= - 'File containing the log of the unweighted signal for each voxel, may be a ' - 'raw binary file (specify type with -inputdatatype) or a supported image file.' + desc="File containing the log of the unweighted signal for each voxel, may be a " + "raw binary file (specify type with -inputdatatype) or a supported image file.", ) bgmask = File( - argstr='-bgmask %s', + argstr="-bgmask %s", exists=True, - desc= - 'Binary valued brain / background segmentation, may be a raw binary file ' - '(specify type with -maskdatatype) or a supported image file.') + desc="Binary valued brain / background segmentation, may be a raw binary file " + "(specify type with -maskdatatype) or a supported image file.", + ) scaleslope = traits.Float( - argstr='-scaleslope %s', - desc='A value v in the diffusion tensor is scaled to v * s + i. This is ' - 'applied after any scaling specified by the input image. Default is 1.0.' + argstr="-scaleslope %s", + desc="A value v in the diffusion tensor is scaled to v * s + i. This is " + "applied after any scaling specified by the input image. Default is 1.0.", ) scaleinter = traits.Float( - argstr='-scaleinter %s', - desc='A value v in the diffusion tensor is scaled to v * s + i. This is ' - 'applied after any scaling specified by the input image. Default is 0.0.' + argstr="-scaleinter %s", + desc="A value v in the diffusion tensor is scaled to v * s + i. This is " + "applied after any scaling specified by the input image. Default is 0.0.", ) uppertriangular = traits.Bool( - argstr='-uppertriangular %s', - desc='Specifies input in upper-triangular (VTK style) order.') + argstr="-uppertriangular %s", + desc="Specifies input in upper-triangular (VTK style) order.", + ) class NIfTIDT2CaminoOutputSpec(TraitedSpec): - out_file = File(desc='diffusion tensors data in Camino format') + out_file = File(desc="diffusion tensors data in Camino format") class NIfTIDT2Camino(CommandLine): @@ -680,17 +689,18 @@ class NIfTIDT2Camino(CommandLine): to use the -uppertriangular option to convert these correctly. """ - _cmd = 'niftidt2camino' + + _cmd = "niftidt2camino" input_spec = NIfTIDT2CaminoInputSpec output_spec = NIfTIDT2CaminoOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs["out_file"] = self._gen_filename('out_file') + outputs["out_file"] = self._gen_filename("out_file") return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": _, filename, _ = split_filename(self.inputs.in_file) return filename @@ -698,33 +708,39 @@ def _gen_filename(self, name): class AnalyzeHeaderInputSpec(StdOutCommandLineInputSpec): in_file = File( exists=True, - argstr='< %s', + argstr="< %s", mandatory=True, position=1, - desc='Tensor-fitted data filename') + desc="Tensor-fitted data filename", + ) scheme_file = File( exists=True, - argstr='%s', + argstr="%s", position=2, - desc=('Camino scheme file (b values / vectors, ' - 'see camino.fsl2scheme)')) + desc=("Camino scheme file (b values / vectors, " "see camino.fsl2scheme)"), + ) readheader = File( exists=True, - argstr='-readheader %s', + argstr="-readheader %s", position=3, - desc=('Reads header information from file and prints to ' - 'stdout. If this option is not specified, then the ' - 'program writes a header based on the other ' - 'arguments.')) + desc=( + "Reads header information from file and prints to " + "stdout. If this option is not specified, then the " + "program writes a header based on the other " + "arguments." + ), + ) printimagedims = File( exists=True, - argstr='-printimagedims %s', + argstr="-printimagedims %s", position=3, - desc=('Prints image data and voxel dimensions as ' - 'Camino arguments and exits.')) + desc=( + "Prints image data and voxel dimensions as " "Camino arguments and exits." + ), + ) # How do we implement both file and enum (for the program) in one argument? # Is this option useful anyway? @@ -734,143 +750,170 @@ class AnalyzeHeaderInputSpec(StdOutCommandLineInputSpec): # vcthreshselect, pdview, track. printprogargs = File( exists=True, - argstr='-printprogargs %s', + argstr="-printprogargs %s", position=3, - desc=('Prints data dimension (and type, if relevant) ' - 'arguments for a specific Camino program, where ' - 'prog is one of shredder, scanner2voxel, ' - 'vcthreshselect, pdview, track.')) + desc=( + "Prints data dimension (and type, if relevant) " + "arguments for a specific Camino program, where " + "prog is one of shredder, scanner2voxel, " + "vcthreshselect, pdview, track." + ), + ) printintelbyteorder = File( exists=True, - argstr='-printintelbyteorder %s', + argstr="-printintelbyteorder %s", position=3, - desc=('Prints 1 if the header is little-endian, ' - '0 otherwise.')) + desc=("Prints 1 if the header is little-endian, " "0 otherwise."), + ) printbigendian = File( exists=True, - argstr='-printbigendian %s', + argstr="-printbigendian %s", position=3, - desc=('Prints 1 if the header is big-endian, 0 ' - 'otherwise.')) + desc=("Prints 1 if the header is big-endian, 0 " "otherwise."), + ) initfromheader = File( exists=True, - argstr='-initfromheader %s', + argstr="-initfromheader %s", position=3, - desc=('Reads header information from file and ' - 'intializes a new header with the values read ' - 'from the file. You may replace any ' - 'combination of fields in the new header by ' - 'specifying subsequent options.')) + desc=( + "Reads header information from file and " + "intializes a new header with the values read " + "from the file. You may replace any " + "combination of fields in the new header by " + "specifying subsequent options." + ), + ) data_dims = traits.List( traits.Int, - desc='data dimensions in voxels', - argstr='-datadims %s', + desc="data dimensions in voxels", + argstr="-datadims %s", minlen=3, maxlen=3, - units='voxels') + units="voxels", + ) voxel_dims = traits.List( traits.Float, - desc='voxel dimensions in mm', - argstr='-voxeldims %s', + desc="voxel dimensions in mm", + argstr="-voxeldims %s", minlen=3, maxlen=3, - units='mm') + units="mm", + ) centre = traits.List( traits.Int, - argstr='-centre %s', + argstr="-centre %s", minlen=3, maxlen=3, - units='mm', - desc=('Voxel specifying origin of Talairach ' - 'coordinate system for SPM, default [0 0 0].')) + units="mm", + desc=( + "Voxel specifying origin of Talairach " + "coordinate system for SPM, default [0 0 0]." + ), + ) picoseed = traits.List( traits.Int, - argstr='-picoseed %s', + argstr="-picoseed %s", minlen=3, maxlen=3, - desc=('Voxel specifying the seed (for PICo maps), ' - 'default [0 0 0].'), - units='mm') + desc=("Voxel specifying the seed (for PICo maps), " "default [0 0 0]."), + units="mm", + ) nimages = traits.Int( - argstr='-nimages %d', - units='NA', - desc="Number of images in the img file. Default 1.") + argstr="-nimages %d", + units="NA", + desc="Number of images in the img file. Default 1.", + ) datatype = traits.Enum( - 'byte', - 'char', - '[u]short', - '[u]int', - 'float', - 'complex', - 'double', - argstr='-datatype %s', - desc=('The char datatype is 8 bit (not the 16 bit ' - 'char of Java), as specified by the Analyze ' - '7.5 standard. The byte, ushort and uint ' - 'types are not part of the Analyze ' - 'specification but are supported by SPM.'), - mandatory=True) + "byte", + "char", + "[u]short", + "[u]int", + "float", + "complex", + "double", + argstr="-datatype %s", + desc=( + "The char datatype is 8 bit (not the 16 bit " + "char of Java), as specified by the Analyze " + "7.5 standard. The byte, ushort and uint " + "types are not part of the Analyze " + "specification but are supported by SPM." + ), + mandatory=True, + ) offset = traits.Int( - argstr='-offset %d', - units='NA', - desc=('According to the Analyze 7.5 standard, this is ' - 'the byte offset in the .img file at which ' - 'voxels start. This value can be negative to ' - 'specify that the absolute value is applied for ' - 'every image in the file.')) + argstr="-offset %d", + units="NA", + desc=( + "According to the Analyze 7.5 standard, this is " + "the byte offset in the .img file at which " + "voxels start. This value can be negative to " + "specify that the absolute value is applied for " + "every image in the file." + ), + ) greylevels = traits.List( traits.Int, - argstr='-gl %s', + argstr="-gl %s", minlen=2, maxlen=2, - desc=('Minimum and maximum greylevels. Stored as ' - 'shorts in the header.'), - units='NA') + desc=("Minimum and maximum greylevels. Stored as " "shorts in the header."), + units="NA", + ) scaleslope = traits.Float( - argstr='-scaleslope %d', - units='NA', - desc=('Intensities in the image are scaled by ' - 'this factor by SPM and MRICro. Default is ' - '1.0.')) + argstr="-scaleslope %d", + units="NA", + desc=( + "Intensities in the image are scaled by " + "this factor by SPM and MRICro. Default is " + "1.0." + ), + ) scaleinter = traits.Float( - argstr='-scaleinter %d', - units='NA', - desc=('Constant to add to the image intensities. ' - 'Used by SPM and MRIcro.')) + argstr="-scaleinter %d", + units="NA", + desc=("Constant to add to the image intensities. " "Used by SPM and MRIcro."), + ) description = traits.String( - argstr='-description %s', - desc=('Short description - No spaces, max ' - 'length 79 bytes. Will be null ' - 'terminated automatically.')) + argstr="-description %s", + desc=( + "Short description - No spaces, max " + "length 79 bytes. Will be null " + "terminated automatically." + ), + ) intelbyteorder = traits.Bool( - argstr='-intelbyteorder', - desc=("Write header in intel byte order " - "(little-endian).")) + argstr="-intelbyteorder", + desc=("Write header in intel byte order " "(little-endian)."), + ) networkbyteorder = traits.Bool( - argstr='-networkbyteorder', - desc=("Write header in network byte order " - "(big-endian). This is the default " - "for new headers.")) + argstr="-networkbyteorder", + desc=( + "Write header in network byte order " + "(big-endian). This is the default " + "for new headers." + ), + ) class AnalyzeHeaderOutputSpec(TraitedSpec): - header = File(exists=True, desc='Analyze header') + header = File(exists=True, desc="Analyze header") class AnalyzeHeader(StdOutCommandLine): @@ -897,13 +940,14 @@ class AnalyzeHeader(StdOutCommandLine): >>> hdr.inputs.voxel_dims = [1,1,1] >>> hdr.run() # doctest: +SKIP """ - _cmd = 'analyzeheader' + + _cmd = "analyzeheader" input_spec = AnalyzeHeaderInputSpec output_spec = AnalyzeHeaderOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['header'] = os.path.abspath(self._gen_outfilename()) + outputs["header"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): @@ -914,29 +958,28 @@ def _gen_outfilename(self): class ShredderInputSpec(StdOutCommandLineInputSpec): in_file = File( exists=True, - argstr='< %s', + argstr="< %s", mandatory=True, position=-2, - desc='raw binary data file') + desc="raw binary data file", + ) offset = traits.Int( - argstr='%d', - units='NA', - desc='initial offset of offset bytes', - position=1) + argstr="%d", units="NA", desc="initial offset of offset bytes", position=1 + ) chunksize = traits.Int( - argstr='%d', - units='NA', - desc='reads and outputs a chunk of chunksize bytes', - position=2) + argstr="%d", + units="NA", + desc="reads and outputs a chunk of chunksize bytes", + position=2, + ) - space = traits.Int( - argstr='%d', units='NA', desc='skips space bytes', position=3) + space = traits.Int(argstr="%d", units="NA", desc="skips space bytes", position=3) class ShredderOutputSpec(TraitedSpec): - shredded = File(exists=True, desc='Shredded binary data file') + shredded = File(exists=True, desc="Shredded binary data file") class Shredder(StdOutCommandLine): @@ -962,13 +1005,14 @@ class Shredder(StdOutCommandLine): >>> shred.inputs.space = 2 >>> shred.run() # doctest: +SKIP """ - _cmd = 'shredder' + + _cmd = "shredder" input_spec = ShredderInputSpec output_spec = ShredderOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['shredded_file'] = os.path.abspath(self._gen_outfilename()) + outputs["shredded_file"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): diff --git a/nipype/interfaces/camino/dti.py b/nipype/interfaces/camino/dti.py index 703b98f9fb..6a17271bcf 100644 --- a/nipype/interfaces/camino/dti.py +++ b/nipype/interfaces/camino/dti.py @@ -3,45 +3,56 @@ import os from ...utils.filemanip import split_filename -from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, - File, Directory, StdOutCommandLine, - StdOutCommandLineInputSpec, isdefined, InputMultiPath) +from ..base import ( + CommandLineInputSpec, + CommandLine, + traits, + TraitedSpec, + File, + Directory, + StdOutCommandLine, + StdOutCommandLineInputSpec, + isdefined, + InputMultiPath, +) class DTIFitInputSpec(StdOutCommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=1, - desc='voxel-order data filename') + desc="voxel-order data filename", + ) bgmask = File( - argstr='-bgmask %s', + argstr="-bgmask %s", exists=True, - desc= - ('Provides the name of a file containing a background mask computed using, ' - 'for example, FSL bet2 program. The mask file contains zero in background ' - 'voxels and non-zero in foreground.')) + desc=( + "Provides the name of a file containing a background mask computed using, " + "for example, FSL bet2 program. The mask file contains zero in background " + "voxels and non-zero in foreground." + ), + ) scheme_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=2, - desc='Camino scheme file (b values / vectors, see camino.fsl2scheme)') + desc="Camino scheme file (b values / vectors, see camino.fsl2scheme)", + ) non_linear = traits.Bool( - argstr='-nonlinear', + argstr="-nonlinear", position=3, - desc= - "Use non-linear fitting instead of the default linear regression to the log measurements. " + desc="Use non-linear fitting instead of the default linear regression to the log measurements. ", ) class DTIFitOutputSpec(TraitedSpec): - tensor_fitted = File( - exists=True, desc='path/name of 4D volume in voxel order') + tensor_fitted = File(exists=True, desc="path/name of 4D volume in voxel order") class DTIFit(StdOutCommandLine): @@ -66,95 +77,108 @@ class DTIFit(StdOutCommandLine): >>> fit.inputs.in_file = 'tensor_fitted_data.Bdouble' >>> fit.run() # doctest: +SKIP """ - _cmd = 'dtfit' + + _cmd = "dtfit" input_spec = DTIFitInputSpec output_spec = DTIFitOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['tensor_fitted'] = os.path.abspath(self._gen_outfilename()) + outputs["tensor_fitted"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_DT.Bdouble' + return name + "_DT.Bdouble" class DTMetricInputSpec(CommandLineInputSpec): eigen_data = File( exists=True, - argstr='-inputfile %s', + argstr="-inputfile %s", mandatory=True, - desc='voxel-order data filename') + desc="voxel-order data filename", + ) metric = traits.Enum( - 'fa', - 'md', - 'rd', - 'l1', - 'l2', - 'l3', - 'tr', - 'ra', - '2dfa', - 'cl', - 'cp', - 'cs', - argstr='-stat %s', + "fa", + "md", + "rd", + "l1", + "l2", + "l3", + "tr", + "ra", + "2dfa", + "cl", + "cp", + "cs", + argstr="-stat %s", mandatory=True, - desc= - ('Specifies the metric to compute. Possible choices are: ' - '"fa", "md", "rd", "l1", "l2", "l3", "tr", "ra", "2dfa", "cl", "cp" or "cs".' - )) + desc=( + "Specifies the metric to compute. Possible choices are: " + '"fa", "md", "rd", "l1", "l2", "l3", "tr", "ra", "2dfa", "cl", "cp" or "cs".' + ), + ) inputdatatype = traits.Enum( - 'double', - 'float', - 'long', - 'int', - 'short', - 'char', - argstr='-inputdatatype %s', + "double", + "float", + "long", + "int", + "short", + "char", + argstr="-inputdatatype %s", usedefault=True, - desc=('Specifies the data type of the input data. ' - 'The data type can be any of the following strings: ' - '"char", "short", "int", "long", "float" or "double".' - 'Default is double data type')) + desc=( + "Specifies the data type of the input data. " + "The data type can be any of the following strings: " + '"char", "short", "int", "long", "float" or "double".' + "Default is double data type" + ), + ) outputdatatype = traits.Enum( - 'double', - 'float', - 'long', - 'int', - 'short', - 'char', - argstr='-outputdatatype %s', + "double", + "float", + "long", + "int", + "short", + "char", + argstr="-outputdatatype %s", usedefault=True, - desc=('Specifies the data type of the output data. ' - 'The data type can be any of the following strings: ' - '"char", "short", "int", "long", "float" or "double".' - 'Default is double data type')) + desc=( + "Specifies the data type of the output data. " + "The data type can be any of the following strings: " + '"char", "short", "int", "long", "float" or "double".' + "Default is double data type" + ), + ) data_header = File( - argstr='-header %s', + argstr="-header %s", exists=True, desc=( - 'A Nifti .nii or .nii.gz file containing the header information. ' - 'Usually this will be the header of the raw data file from which ' - 'the diffusion tensors were reconstructed.')) + "A Nifti .nii or .nii.gz file containing the header information. " + "Usually this will be the header of the raw data file from which " + "the diffusion tensors were reconstructed." + ), + ) outputfile = File( - argstr='-outputfile %s', + argstr="-outputfile %s", genfile=True, - desc= - ('Output name. Output will be a .nii.gz file if data_header is provided and' - 'in voxel order with outputdatatype datatype (default: double) otherwise.' - )) + desc=( + "Output name. Output will be a .nii.gz file if data_header is provided and" + "in voxel order with outputdatatype datatype (default: double) otherwise." + ), + ) class DTMetricOutputSpec(TraitedSpec): metric_stats = File( - exists=True, desc='Diffusion Tensor statistics of the chosen metric') + exists=True, desc="Diffusion Tensor statistics of the chosen metric" + ) class DTMetric(CommandLine): @@ -192,13 +216,14 @@ class DTMetric(CommandLine): >>> dtmetric.inputs.outputdatatype = 'float' >>> dtmetric.run() # doctest: +SKIP """ - _cmd = 'dtshape' + + _cmd = "dtshape" input_spec = DTMetricInputSpec output_spec = DTMetricOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['metric_stats'] = os.path.abspath(self._gen_outfilename()) + outputs["metric_stats"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): @@ -207,18 +232,18 @@ def _gen_outfilename(self): def _gen_outputfile(self): outputfile = self.inputs.outputfile if not isdefined(outputfile): - outputfile = self._gen_filename('outputfile') + outputfile = self._gen_filename("outputfile") return outputfile def _gen_filename(self, name): - if name == 'outputfile': + if name == "outputfile": _, name, _ = split_filename(self.inputs.eigen_data) metric = self.inputs.metric datatype = self.inputs.outputdatatype if isdefined(self.inputs.data_header): - filename = name + '_' + metric + '.nii.gz' + filename = name + "_" + metric + ".nii.gz" else: - filename = name + '_' + metric + '.B' + datatype + filename = name + "_" + metric + ".B" + datatype return filename @@ -228,132 +253,129 @@ def _gen_model_options(): # @NoSelf Generate all possible permutations of < multi - tensor > < single - tensor > options """ - single_tensor = [ - 'dt', 'restore', 'algdt', 'nldt_pos', 'nldt', 'ldt_wtd' - ] + single_tensor = ["dt", "restore", "algdt", "nldt_pos", "nldt", "ldt_wtd"] multi_tensor = [ - 'cylcyl', 'cylcyl_eq', 'pospos', 'pospos_eq', 'poscyl', - 'poscyl_eq', 'cylcylcyl', 'cylcylcyl_eq', 'pospospos', - 'pospospos_eq', 'posposcyl', 'posposcyl_eq', 'poscylcyl', - 'poscylcyl_eq' + "cylcyl", + "cylcyl_eq", + "pospos", + "pospos_eq", + "poscyl", + "poscyl_eq", + "cylcylcyl", + "cylcylcyl_eq", + "pospospos", + "pospospos_eq", + "posposcyl", + "posposcyl_eq", + "poscylcyl", + "poscylcyl_eq", ] - other = ['adc', 'ball_stick'] + other = ["adc", "ball_stick"] model_list = single_tensor model_list.extend(other) - model_list.extend([ - multi + ' ' + single for multi in multi_tensor - for single in single_tensor - ]) + model_list.extend( + [multi + " " + single for multi in multi_tensor for single in single_tensor] + ) return model_list model = traits.Enum( _gen_model_options(), - argstr='-model %s', + argstr="-model %s", mandatory=True, - desc='Specifies the model to be fit to the data.') + desc="Specifies the model to be fit to the data.", + ) in_file = File( exists=True, - argstr='-inputfile %s', + argstr="-inputfile %s", mandatory=True, - desc='voxel-order data filename') + desc="voxel-order data filename", + ) inputdatatype = traits.Enum( - 'float', - 'char', - 'short', - 'int', - 'long', - 'double', - argstr='-inputdatatype %s', - desc= - 'Specifies the data type of the input file: "char", "short", "int", "long", "float" or "double". The input file must have BIG-ENDIAN ordering. By default, the input type is "float".' + "float", + "char", + "short", + "int", + "long", + "double", + argstr="-inputdatatype %s", + desc='Specifies the data type of the input file: "char", "short", "int", "long", "float" or "double". The input file must have BIG-ENDIAN ordering. By default, the input type is "float".', ) scheme_file = File( exists=True, - argstr='-schemefile %s', + argstr="-schemefile %s", mandatory=True, - desc='Camino scheme file (b values / vectors, see camino.fsl2scheme)') + desc="Camino scheme file (b values / vectors, see camino.fsl2scheme)", + ) - outputfile = File( - argstr='-outputfile %s', desc='Filename of the output file.') + outputfile = File(argstr="-outputfile %s", desc="Filename of the output file.") outlier = File( - argstr='-outliermap %s', + argstr="-outliermap %s", exists=True, - desc= - 'Specifies the name of the file to contain the outlier map generated by the RESTORE algorithm.' + desc="Specifies the name of the file to contain the outlier map generated by the RESTORE algorithm.", ) noisemap = File( - argstr='-noisemap %s', + argstr="-noisemap %s", exists=True, - desc= - 'Specifies the name of the file to contain the estimated noise variance on the diffusion-weighted signal, generated by a weighted tensor fit. The data type of this file is big-endian double.' + desc="Specifies the name of the file to contain the estimated noise variance on the diffusion-weighted signal, generated by a weighted tensor fit. The data type of this file is big-endian double.", ) residualmap = File( - argstr='-residualmap %s', + argstr="-residualmap %s", exists=True, - desc= - 'Specifies the name of the file to contain the weighted residual errors after computing a weighted linear tensor fit. One value is produced per measurement, in voxel order.The data type of this file is big-endian double. Images of the residuals for each measurement can be extracted with shredder.' + desc="Specifies the name of the file to contain the weighted residual errors after computing a weighted linear tensor fit. One value is produced per measurement, in voxel order.The data type of this file is big-endian double. Images of the residuals for each measurement can be extracted with shredder.", ) sigma = traits.Float( - argstr='-sigma %G', - desc= - 'Specifies the standard deviation of the noise in the data. Required by the RESTORE algorithm.' + argstr="-sigma %G", + desc="Specifies the standard deviation of the noise in the data. Required by the RESTORE algorithm.", ) bgthresh = traits.Float( - argstr='-bgthresh %G', - desc= - 'Sets a threshold on the average q=0 measurement to separate foreground and background. The program does not process background voxels, but outputs the same number of values in background voxels and foreground voxels. Each value is zero in background voxels apart from the exit code which is -1.' + argstr="-bgthresh %G", + desc="Sets a threshold on the average q=0 measurement to separate foreground and background. The program does not process background voxels, but outputs the same number of values in background voxels and foreground voxels. Each value is zero in background voxels apart from the exit code which is -1.", ) bgmask = File( - argstr='-bgmask %s', + argstr="-bgmask %s", exists=True, - desc= - 'Provides the name of a file containing a background mask computed using, for example, FSL\'s bet2 program. The mask file contains zero in background voxels and non-zero in foreground.' + desc="Provides the name of a file containing a background mask computed using, for example, FSL's bet2 program. The mask file contains zero in background voxels and non-zero in foreground.", ) cfthresh = traits.Float( - argstr='-csfthresh %G', - desc= - 'Sets a threshold on the average q=0 measurement to determine which voxels are CSF. This program does not treat CSF voxels any different to other voxels.' + argstr="-csfthresh %G", + desc="Sets a threshold on the average q=0 measurement to determine which voxels are CSF. This program does not treat CSF voxels any different to other voxels.", ) fixedmodq = traits.List( traits.Float, - argstr='-fixedmod %s', + argstr="-fixedmod %s", minlen=4, maxlen=4, - desc= - 'Specifies a spherical acquisition scheme with M measurements with q=0 and N measurements with |q|=Q and diffusion time tau. The N measurements with |q|=Q have unique directions. The program reads in the directions from the files in directory PointSets.' + desc="Specifies a spherical acquisition scheme with M measurements with q=0 and N measurements with |q|=Q and diffusion time tau. The N measurements with |q|=Q have unique directions. The program reads in the directions from the files in directory PointSets.", ) fixedbvalue = traits.List( traits.Float, - argstr='-fixedbvalue %s', + argstr="-fixedbvalue %s", minlen=3, maxlen=3, - desc= - 'As above, but specifies . The resulting scheme is the same whether you specify b directly or indirectly using -fixedmodq.' + desc="As above, but specifies . The resulting scheme is the same whether you specify b directly or indirectly using -fixedmodq.", ) tau = traits.Float( - argstr='-tau %G', - desc= - 'Sets the diffusion time separately. This overrides the diffusion time specified in a scheme file or by a scheme index for both the acquisition scheme and in the data synthesis.' + argstr="-tau %G", + desc="Sets the diffusion time separately. This overrides the diffusion time specified in a scheme file or by a scheme index for both the acquisition scheme and in the data synthesis.", ) class ModelFitOutputSpec(TraitedSpec): - fitted_data = File( - exists=True, desc='output file of 4D volume in voxel order') + fitted_data = File(exists=True, desc="output file of 4D volume in voxel order") class ModelFit(StdOutCommandLine): @@ -376,98 +398,99 @@ class ModelFit(StdOutCommandLine): >>> fit.inputs.in_file = 'tensor_fitted_data.Bdouble' >>> fit.run() # doctest: +SKIP """ - _cmd = 'modelfit' + + _cmd = "modelfit" input_spec = ModelFitInputSpec output_spec = ModelFitOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['fitted_data'] = os.path.abspath(self._gen_outfilename()) + outputs["fitted_data"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_fit.Bdouble' + return name + "_fit.Bdouble" class DTLUTGenInputSpec(StdOutCommandLineInputSpec): lrange = traits.List( traits.Float, - desc='Index to one-tensor LUTs. This is the ratio L1/L3 and L2 / L3.' - 'The LUT is square, with half the values calculated (because L2 / L3 cannot be less than L1 / L3 by definition).' - 'The minimum must be >= 1. For comparison, a ratio L1 / L3 = 10 with L2 / L3 = 1 corresponds to an FA of 0.891, ' - 'and L1 / L3 = 15 with L2 / L3 = 1 corresponds to an FA of 0.929. The default range is 1 to 10.', - argstr='-lrange %s', + desc="Index to one-tensor LUTs. This is the ratio L1/L3 and L2 / L3." + "The LUT is square, with half the values calculated (because L2 / L3 cannot be less than L1 / L3 by definition)." + "The minimum must be >= 1. For comparison, a ratio L1 / L3 = 10 with L2 / L3 = 1 corresponds to an FA of 0.891, " + "and L1 / L3 = 15 with L2 / L3 = 1 corresponds to an FA of 0.929. The default range is 1 to 10.", + argstr="-lrange %s", minlen=2, maxlen=2, position=1, - units='NA') + units="NA", + ) frange = traits.List( traits.Float, - desc='Index to two-tensor LUTs. This is the fractional anisotropy' - ' of the two tensors. The default is 0.3 to 0.94', - argstr='-frange %s', + desc="Index to two-tensor LUTs. This is the fractional anisotropy" + " of the two tensors. The default is 0.3 to 0.94", + argstr="-frange %s", minlen=2, maxlen=2, position=1, - units='NA') + units="NA", + ) step = traits.Float( - argstr='-step %f', - units='NA', - desc='Distance between points in the LUT.' - 'For example, if lrange is 1 to 10 and the step is 0.1, LUT entries will be computed ' - 'at L1 / L3 = 1, 1.1, 1.2 ... 10.0 and at L2 / L3 = 1.0, 1.1 ... L1 / L3.' - 'For single tensor LUTs, the default step is 0.2, for two-tensor LUTs it is 0.02.' + argstr="-step %f", + units="NA", + desc="Distance between points in the LUT." + "For example, if lrange is 1 to 10 and the step is 0.1, LUT entries will be computed " + "at L1 / L3 = 1, 1.1, 1.2 ... 10.0 and at L2 / L3 = 1.0, 1.1 ... L1 / L3." + "For single tensor LUTs, the default step is 0.2, for two-tensor LUTs it is 0.02.", ) samples = traits.Int( - argstr='-samples %d', - units='NA', - desc= - 'The number of synthetic measurements to generate at each point in the LUT. The default is 2000.' + argstr="-samples %d", + units="NA", + desc="The number of synthetic measurements to generate at each point in the LUT. The default is 2000.", ) snr = traits.Float( - argstr='-snr %f', - units='NA', - desc='The signal to noise ratio of the unweighted (q = 0) measurements.' - 'This should match the SNR (in white matter) of the images that the LUTs are used with.' + argstr="-snr %f", + units="NA", + desc="The signal to noise ratio of the unweighted (q = 0) measurements." + "This should match the SNR (in white matter) of the images that the LUTs are used with.", ) bingham = traits.Bool( - argstr='-bingham', - desc="Compute a LUT for the Bingham PDF. This is the default.") + argstr="-bingham", + desc="Compute a LUT for the Bingham PDF. This is the default.", + ) - acg = traits.Bool(argstr='-acg', desc="Compute a LUT for the ACG PDF.") + acg = traits.Bool(argstr="-acg", desc="Compute a LUT for the ACG PDF.") - watson = traits.Bool( - argstr='-watson', desc="Compute a LUT for the Watson PDF.") + watson = traits.Bool(argstr="-watson", desc="Compute a LUT for the Watson PDF.") inversion = traits.Int( - argstr='-inversion %d', - units='NA', - desc= - 'Index of the inversion to use. The default is 1 (linear single tensor inversion).' + argstr="-inversion %d", + units="NA", + desc="Index of the inversion to use. The default is 1 (linear single tensor inversion).", ) trace = traits.Float( - argstr='-trace %G', - units='NA', - desc= - 'Trace of the diffusion tensor(s) used in the test function in the LUT generation. The default is 2100E-12 m^2 s^-1.' + argstr="-trace %G", + units="NA", + desc="Trace of the diffusion tensor(s) used in the test function in the LUT generation. The default is 2100E-12 m^2 s^-1.", ) scheme_file = File( - argstr='-schemefile %s', + argstr="-schemefile %s", mandatory=True, position=2, - desc='The scheme file of the images to be processed using this LUT.') + desc="The scheme file of the images to be processed using this LUT.", + ) class DTLUTGenOutputSpec(TraitedSpec): - dtLUT = File(exists=True, desc='Lookup Table') + dtLUT = File(exists=True, desc="Lookup Table") class DTLUTGen(StdOutCommandLine): @@ -492,84 +515,87 @@ class DTLUTGen(StdOutCommandLine): >>> dtl.inputs.scheme_file = 'A.scheme' >>> dtl.run() # doctest: +SKIP """ - _cmd = 'dtlutgen' + + _cmd = "dtlutgen" input_spec = DTLUTGenInputSpec output_spec = DTLUTGenOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['dtLUT'] = os.path.abspath(self._gen_outfilename()) + outputs["dtLUT"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.scheme_file) - return name + '.dat' + return name + ".dat" class PicoPDFsInputSpec(StdOutCommandLineInputSpec): in_file = File( exists=True, - argstr='< %s', + argstr="< %s", mandatory=True, position=1, - desc='voxel-order data filename') + desc="voxel-order data filename", + ) inputmodel = traits.Enum( - 'dt', - 'multitensor', - 'pds', - argstr='-inputmodel %s', + "dt", + "multitensor", + "pds", + argstr="-inputmodel %s", position=2, - desc='input model type', - usedefault=True) + desc="input model type", + usedefault=True, + ) luts = InputMultiPath( File(exists=True), - argstr='-luts %s', + argstr="-luts %s", mandatory=True, - desc='Files containing the lookup tables.' - 'For tensor data, one lut must be specified for each type of inversion used in the image (one-tensor, two-tensor, three-tensor).' - 'For pds, the number of LUTs must match -numpds (it is acceptable to use the same LUT several times - see example, above).' - 'These LUTs may be generated with dtlutgen.') + desc="Files containing the lookup tables." + "For tensor data, one lut must be specified for each type of inversion used in the image (one-tensor, two-tensor, three-tensor)." + "For pds, the number of LUTs must match -numpds (it is acceptable to use the same LUT several times - see example, above)." + "These LUTs may be generated with dtlutgen.", + ) pdf = traits.Enum( - 'bingham', - 'watson', - 'acg', - argstr='-pdf %s', + "bingham", + "watson", + "acg", + argstr="-pdf %s", position=4, - desc=' Specifies the PDF to use. There are three choices:' - 'watson - The Watson distribution. This distribution is rotationally symmetric.' - 'bingham - The Bingham distributionn, which allows elliptical probability density contours.' - 'acg - The Angular Central Gaussian distribution, which also allows elliptical probability density contours', - usedefault=True) + desc=" Specifies the PDF to use. There are three choices:" + "watson - The Watson distribution. This distribution is rotationally symmetric." + "bingham - The Bingham distributionn, which allows elliptical probability density contours." + "acg - The Angular Central Gaussian distribution, which also allows elliptical probability density contours", + usedefault=True, + ) directmap = traits.Bool( - argstr='-directmap', - desc= - "Only applicable when using pds as the inputmodel. Use direct mapping between the eigenvalues and the distribution parameters instead of the log of the eigenvalues." + argstr="-directmap", + desc="Only applicable when using pds as the inputmodel. Use direct mapping between the eigenvalues and the distribution parameters instead of the log of the eigenvalues.", ) maxcomponents = traits.Int( - argstr='-maxcomponents %d', - units='NA', - desc= - 'The maximum number of tensor components in a voxel (default 2) for multitensor data.' - 'Currently, only the default is supported, but future releases may allow the input of three-tensor data using this option.' + argstr="-maxcomponents %d", + units="NA", + desc="The maximum number of tensor components in a voxel (default 2) for multitensor data." + "Currently, only the default is supported, but future releases may allow the input of three-tensor data using this option.", ) numpds = traits.Int( - argstr='-numpds %d', - units='NA', - desc='The maximum number of PDs in a voxel (default 3) for PD data.' - 'This option determines the size of the input and output voxels.' - 'This means that the data file may be large enough to accomodate three or more PDs,' - 'but does not mean that any of the voxels are classified as containing three or more PDs.' + argstr="-numpds %d", + units="NA", + desc="The maximum number of PDs in a voxel (default 3) for PD data." + "This option determines the size of the input and output voxels." + "This means that the data file may be large enough to accomodate three or more PDs," + "but does not mean that any of the voxels are classified as containing three or more PDs.", ) class PicoPDFsOutputSpec(TraitedSpec): - pdfs = File(exists=True, desc='path/name of 4D volume in voxel order') + pdfs = File(exists=True, desc="path/name of 4D volume in voxel order") class PicoPDFs(StdOutCommandLine): @@ -586,210 +612,237 @@ class PicoPDFs(StdOutCommandLine): >>> pdf.inputs.in_file = 'voxel-order_data.Bfloat' >>> pdf.run() # doctest: +SKIP """ - _cmd = 'picopdfs' + + _cmd = "picopdfs" input_spec = PicoPDFsInputSpec output_spec = PicoPDFsOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['pdfs'] = os.path.abspath(self._gen_outfilename()) + outputs["pdfs"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_pdfs.Bdouble' + return name + "_pdfs.Bdouble" class TrackInputSpec(CommandLineInputSpec): in_file = File( - exists=True, - argstr='-inputfile %s', - position=1, - desc='input data file') + exists=True, argstr="-inputfile %s", position=1, desc="input data file" + ) - seed_file = File( - exists=True, argstr='-seedfile %s', position=2, desc='seed file') + seed_file = File(exists=True, argstr="-seedfile %s", position=2, desc="seed file") inputmodel = traits.Enum( - 'dt', - 'multitensor', - 'sfpeak', - 'pico', - 'repbs_dt', - 'repbs_multitensor', - 'ballstick', - 'wildbs_dt', - 'bayesdirac', - 'bayesdirac_dt', - 'bedpostx_dyad', - 'bedpostx', - argstr='-inputmodel %s', - desc='input model type', - usedefault=True) + "dt", + "multitensor", + "sfpeak", + "pico", + "repbs_dt", + "repbs_multitensor", + "ballstick", + "wildbs_dt", + "bayesdirac", + "bayesdirac_dt", + "bedpostx_dyad", + "bedpostx", + argstr="-inputmodel %s", + desc="input model type", + usedefault=True, + ) tracker = traits.Enum( - 'fact', - 'euler', - 'rk4', - argstr='-tracker %s', - desc=("The tracking algorithm controls streamlines are " - "generated from the data. The choices are: " - "- FACT, which follows the local fibre orientation " - "in each voxel. No interpolation is used." - "- EULER, which uses a fixed step size along the " - "local fibre orientation. With nearest-neighbour " - "interpolation, this method may be very similar to " - "FACT, except that the step size is fixed, whereas " - "FACT steps extend to the boundary of the next voxel " - "(distance variable depending on the entry and exit " - "points to the voxel)." - "- RK4: Fourth-order Runge-Kutta method. The step " - "size is fixed, however the eventual direction of " - "the step is determined by taking and averaging a " - "series of partial steps."), - usedefault=True) + "fact", + "euler", + "rk4", + argstr="-tracker %s", + desc=( + "The tracking algorithm controls streamlines are " + "generated from the data. The choices are: " + "- FACT, which follows the local fibre orientation " + "in each voxel. No interpolation is used." + "- EULER, which uses a fixed step size along the " + "local fibre orientation. With nearest-neighbour " + "interpolation, this method may be very similar to " + "FACT, except that the step size is fixed, whereas " + "FACT steps extend to the boundary of the next voxel " + "(distance variable depending on the entry and exit " + "points to the voxel)." + "- RK4: Fourth-order Runge-Kutta method. The step " + "size is fixed, however the eventual direction of " + "the step is determined by taking and averaging a " + "series of partial steps." + ), + usedefault=True, + ) interpolator = traits.Enum( - 'nn', - 'prob_nn', - 'linear', - argstr='-interpolator %s', - desc=("The interpolation algorithm determines how " - "the fiber orientation(s) are defined at a given " - "continuous point within the input image. " - "Interpolators are only used when the tracking " - "algorithm is not FACT. The choices are: " - "- NN: Nearest-neighbour interpolation, just " - "uses the local voxel data directly." - "- PROB_NN: Probabilistic nearest-neighbor " - "interpolation, similar to the method pro- " - "posed by Behrens et al [Magnetic Resonance " - "in Medicine, 50:1077-1088, 2003]. The data " - "is not interpolated, but at each point we " - "randomly choose one of the 8 voxels sur- " - "rounding a point. The probability of choosing " - "a particular voxel is based on how close the " - "point is to the centre of that voxel." - "- LINEAR: Linear interpolation of the vector " - "field containing the principal directions at " - "each point.")) + "nn", + "prob_nn", + "linear", + argstr="-interpolator %s", + desc=( + "The interpolation algorithm determines how " + "the fiber orientation(s) are defined at a given " + "continuous point within the input image. " + "Interpolators are only used when the tracking " + "algorithm is not FACT. The choices are: " + "- NN: Nearest-neighbour interpolation, just " + "uses the local voxel data directly." + "- PROB_NN: Probabilistic nearest-neighbor " + "interpolation, similar to the method pro- " + "posed by Behrens et al [Magnetic Resonance " + "in Medicine, 50:1077-1088, 2003]. The data " + "is not interpolated, but at each point we " + "randomly choose one of the 8 voxels sur- " + "rounding a point. The probability of choosing " + "a particular voxel is based on how close the " + "point is to the centre of that voxel." + "- LINEAR: Linear interpolation of the vector " + "field containing the principal directions at " + "each point." + ), + ) stepsize = traits.Float( - argstr='-stepsize %f', - requires=['tracker'], - desc=('Step size for EULER and RK4 tracking. ' - 'The default is 1mm.')) + argstr="-stepsize %f", + requires=["tracker"], + desc=("Step size for EULER and RK4 tracking. " "The default is 1mm."), + ) inputdatatype = traits.Enum( - 'float', 'double', argstr='-inputdatatype %s', desc='input file type') + "float", "double", argstr="-inputdatatype %s", desc="input file type" + ) - gzip = traits.Bool( - argstr='-gzip', desc="save the output image in gzip format") + gzip = traits.Bool(argstr="-gzip", desc="save the output image in gzip format") maxcomponents = traits.Int( - argstr='-maxcomponents %d', - units='NA', - desc=("The maximum number of tensor components in a " - "voxel. This determines the size of the input " - "file and does not say anything about the " - "voxel classification. The default is 2 if " - "the input model is multitensor and 1 if the " - "input model is dt.")) + argstr="-maxcomponents %d", + units="NA", + desc=( + "The maximum number of tensor components in a " + "voxel. This determines the size of the input " + "file and does not say anything about the " + "voxel classification. The default is 2 if " + "the input model is multitensor and 1 if the " + "input model is dt." + ), + ) numpds = traits.Int( - argstr='-numpds %d', - units='NA', - desc=("The maximum number of PDs in a voxel for input " - "models sfpeak and pico. The default is 3 for input " - "model sfpeak and 1 for input model pico. This option " - "determines the size of the voxels in the input file " - "and does not affect tracking. For tensor data, use " - "the -maxcomponents option.")) + argstr="-numpds %d", + units="NA", + desc=( + "The maximum number of PDs in a voxel for input " + "models sfpeak and pico. The default is 3 for input " + "model sfpeak and 1 for input model pico. This option " + "determines the size of the voxels in the input file " + "and does not affect tracking. For tensor data, use " + "the -maxcomponents option." + ), + ) data_dims = traits.List( traits.Int, - desc='data dimensions in voxels', - argstr='-datadims %s', + desc="data dimensions in voxels", + argstr="-datadims %s", minlen=3, maxlen=3, - units='voxels') + units="voxels", + ) voxel_dims = traits.List( traits.Float, - desc='voxel dimensions in mm', - argstr='-voxeldims %s', + desc="voxel dimensions in mm", + argstr="-voxeldims %s", minlen=3, maxlen=3, - units='mm') + units="mm", + ) ipthresh = traits.Float( - argstr='-ipthresh %f', - desc=('Curvature threshold for tracking, expressed as ' - 'the minimum dot product between two streamline ' - 'orientations calculated over the length of a ' - 'voxel. If the dot product between the previous ' - 'and current directions is less than this ' - 'threshold, then the streamline terminates. The ' - 'default setting will terminate fibres that curve ' - 'by more than 80 degrees. Set this to -1.0 to ' - 'disable curvature checking completely.')) + argstr="-ipthresh %f", + desc=( + "Curvature threshold for tracking, expressed as " + "the minimum dot product between two streamline " + "orientations calculated over the length of a " + "voxel. If the dot product between the previous " + "and current directions is less than this " + "threshold, then the streamline terminates. The " + "default setting will terminate fibres that curve " + "by more than 80 degrees. Set this to -1.0 to " + "disable curvature checking completely." + ), + ) curvethresh = traits.Float( - argstr='-curvethresh %f', - desc=('Curvature threshold for tracking, expressed ' - 'as the maximum angle (in degrees) between ' - 'between two streamline orientations ' - 'calculated over the length of a voxel. If ' - 'the angle is greater than this, then the ' - 'streamline terminates.')) + argstr="-curvethresh %f", + desc=( + "Curvature threshold for tracking, expressed " + "as the maximum angle (in degrees) between " + "between two streamline orientations " + "calculated over the length of a voxel. If " + "the angle is greater than this, then the " + "streamline terminates." + ), + ) curveinterval = traits.Float( - argstr='-curveinterval %f', - requires=['curvethresh'], - desc=('Interval over which the curvature threshold ' - 'should be evaluated, in mm. The default is ' - '5mm. When using the default curvature ' - 'threshold of 90 degrees, this means that ' - 'streamlines will terminate if they curve by ' - 'more than 90 degrees over a path length ' - 'of 5mm.')) + argstr="-curveinterval %f", + requires=["curvethresh"], + desc=( + "Interval over which the curvature threshold " + "should be evaluated, in mm. The default is " + "5mm. When using the default curvature " + "threshold of 90 degrees, this means that " + "streamlines will terminate if they curve by " + "more than 90 degrees over a path length " + "of 5mm." + ), + ) anisthresh = traits.Float( - argstr='-anisthresh %f', - desc=('Terminate fibres that enter a voxel with lower ' - 'anisotropy than the threshold.')) + argstr="-anisthresh %f", + desc=( + "Terminate fibres that enter a voxel with lower " + "anisotropy than the threshold." + ), + ) anisfile = File( - argstr='-anisfile %s', + argstr="-anisfile %s", exists=True, - desc=('File containing the anisotropy map. This is required to ' - 'apply an anisotropy threshold with non tensor data. If ' - 'the map issupplied it is always used, even in tensor ' - 'data.')) + desc=( + "File containing the anisotropy map. This is required to " + "apply an anisotropy threshold with non tensor data. If " + "the map issupplied it is always used, even in tensor " + "data." + ), + ) outputtracts = traits.Enum( - 'float', - 'double', - 'oogl', - argstr='-outputtracts %s', - desc='output tract file type') + "float", + "double", + "oogl", + argstr="-outputtracts %s", + desc="output tract file type", + ) out_file = File( - argstr='-outputfile %s', - position=-1, - genfile=True, - desc='output data file') + argstr="-outputfile %s", position=-1, genfile=True, desc="output data file" + ) output_root = File( exists=False, - argstr='-outputroot %s', + argstr="-outputroot %s", position=-1, - desc='root directory for output') + desc="root directory for output", + ) class TrackOutputSpec(TraitedSpec): - tracked = File( - exists=True, desc='output file containing reconstructed tracts') + tracked = File(exists=True, desc="output file containing reconstructed tracts") class Track(CommandLine): @@ -808,7 +861,7 @@ class Track(CommandLine): >>> track.run() # doctest: +SKIP """ - _cmd = 'track' + _cmd = "track" input_spec = TrackInputSpec output_spec = TrackOutputSpec @@ -819,11 +872,11 @@ def _list_outputs(self): out_file_path = os.path.abspath(self.inputs.out_file) else: out_file_path = os.path.abspath(self._gen_outfilename()) - outputs['tracked'] = out_file_path + outputs["tracked"] = out_file_path return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._gen_outfilename() else: return None @@ -831,10 +884,10 @@ def _gen_filename(self, name): def _gen_outfilename(self): # Currently in_file is only undefined for bedpostx input if not isdefined(self.inputs.in_file): - name = 'bedpostx' + name = "bedpostx" else: _, name, _ = split_filename(self.inputs.in_file) - return name + '_tracked' + return name + "_tracked" class TrackDT(Track): @@ -858,18 +911,17 @@ def __init__(self, command=None, **inputs): class TrackPICoInputSpec(TrackInputSpec): pdf = traits.Enum( - 'bingham', - 'watson', - 'acg', - argstr='-pdf %s', - desc='Specifies the model for PICo parameters. The default is "bingham.' + "bingham", + "watson", + "acg", + argstr="-pdf %s", + desc='Specifies the model for PICo parameters. The default is "bingham.', ) iterations = traits.Int( - argstr='-iterations %d', - units='NA', - desc= - "Number of streamlines to generate at each seed point. The default is 5000." + argstr="-iterations %d", + units="NA", + desc="Number of streamlines to generate at each seed point. The default is 5000.", ) @@ -896,17 +948,21 @@ def __init__(self, command=None, **inputs): class TrackBedpostxDeterInputSpec(TrackInputSpec): bedpostxdir = Directory( - argstr='-bedpostxdir %s', + argstr="-bedpostxdir %s", mandatory=True, exists=True, - desc=('Directory containing bedpostx output')) + desc=("Directory containing bedpostx output"), + ) min_vol_frac = traits.Float( - argstr='-bedpostxminf %d', - units='NA', - desc=("Zeros out compartments in bedpostx data " - "with a mean volume fraction f of less than " - "min_vol_frac. The default is 0.01.")) + argstr="-bedpostxminf %d", + units="NA", + desc=( + "Zeros out compartments in bedpostx data " + "with a mean volume fraction f of less than " + "min_vol_frac. The default is 0.01." + ), + ) class TrackBedpostxDeter(Track): @@ -942,23 +998,29 @@ def __init__(self, command=None, **inputs): class TrackBedpostxProbaInputSpec(TrackInputSpec): bedpostxdir = Directory( - argstr='-bedpostxdir %s', + argstr="-bedpostxdir %s", mandatory=True, exists=True, - desc=('Directory containing bedpostx output')) + desc=("Directory containing bedpostx output"), + ) min_vol_frac = traits.Float( - argstr='-bedpostxminf %d', - units='NA', - desc=("Zeros out compartments in bedpostx data " - "with a mean volume fraction f of less than " - "min_vol_frac. The default is 0.01.")) + argstr="-bedpostxminf %d", + units="NA", + desc=( + "Zeros out compartments in bedpostx data " + "with a mean volume fraction f of less than " + "min_vol_frac. The default is 0.01." + ), + ) iterations = traits.Int( - argstr='-iterations %d', - units='NA', - desc=("Number of streamlines to generate at each " - "seed point. The default is 1.")) + argstr="-iterations %d", + units="NA", + desc=( + "Number of streamlines to generate at each " "seed point. The default is 1." + ), + ) class TrackBedpostxProba(Track): @@ -999,65 +1061,63 @@ def __init__(self, command=None, **inputs): class TrackBayesDiracInputSpec(TrackInputSpec): scheme_file = File( - argstr='-schemefile %s', + argstr="-schemefile %s", mandatory=True, exists=True, - desc=('The scheme file corresponding to the data being ' - 'processed.')) + desc=("The scheme file corresponding to the data being " "processed."), + ) iterations = traits.Int( - argstr='-iterations %d', - units='NA', - desc=("Number of streamlines to generate at each " - "seed point. The default is 5000.")) + argstr="-iterations %d", + units="NA", + desc=( + "Number of streamlines to generate at each " + "seed point. The default is 5000." + ), + ) pdf = traits.Enum( - 'bingham', - 'watson', - 'acg', - argstr='-pdf %s', - desc= - 'Specifies the model for PICo priors (not the curvature priors). The default is "bingham".' + "bingham", + "watson", + "acg", + argstr="-pdf %s", + desc='Specifies the model for PICo priors (not the curvature priors). The default is "bingham".', ) pointset = traits.Int( - argstr='-pointset %s', - desc= - 'Index to the point set to use for Bayesian likelihood calculation. The index specifies a set of evenly distributed points on the unit sphere, where each point x defines two possible step directions (x or -x) for the streamline path. A larger number indexes a larger point set, which gives higher angular resolution at the expense of computation time. The default is index 1, which gives 1922 points, index 0 gives 1082 points, index 2 gives 3002 points.' + argstr="-pointset %s", + desc="Index to the point set to use for Bayesian likelihood calculation. The index specifies a set of evenly distributed points on the unit sphere, where each point x defines two possible step directions (x or -x) for the streamline path. A larger number indexes a larger point set, which gives higher angular resolution at the expense of computation time. The default is index 1, which gives 1922 points, index 0 gives 1082 points, index 2 gives 3002 points.", ) datamodel = traits.Enum( - 'cylsymmdt', - 'ballstick', - argstr='-datamodel %s', - desc= - 'Model of the data for Bayesian tracking. The default model is "cylsymmdt", a diffusion tensor with cylindrical symmetry about e_1, ie L1 >= L_2 = L_3. The other model is "ballstick", the partial volume model (see ballstickfit).' + "cylsymmdt", + "ballstick", + argstr="-datamodel %s", + desc='Model of the data for Bayesian tracking. The default model is "cylsymmdt", a diffusion tensor with cylindrical symmetry about e_1, ie L1 >= L_2 = L_3. The other model is "ballstick", the partial volume model (see ballstickfit).', ) curvepriork = traits.Float( - argstr='-curvepriork %G', - desc= - 'Concentration parameter for the prior distribution on fibre orientations given the fibre orientation at the previous step. Larger values of k make curvature less likely.' + argstr="-curvepriork %G", + desc="Concentration parameter for the prior distribution on fibre orientations given the fibre orientation at the previous step. Larger values of k make curvature less likely.", ) curvepriorg = traits.Float( - argstr='-curvepriorg %G', - desc= - 'Concentration parameter for the prior distribution on fibre orientations given the fibre orientation at the previous step. Larger values of g make curvature less likely.' + argstr="-curvepriorg %G", + desc="Concentration parameter for the prior distribution on fibre orientations given the fibre orientation at the previous step. Larger values of g make curvature less likely.", ) extpriorfile = File( exists=True, - argstr='-extpriorfile %s', - desc= - 'Path to a PICo image produced by picopdfs. The PDF in each voxel is used as a prior for the fibre orientation in Bayesian tracking. The prior image must be in the same space as the diffusion data.' + argstr="-extpriorfile %s", + desc="Path to a PICo image produced by picopdfs. The PDF in each voxel is used as a prior for the fibre orientation in Bayesian tracking. The prior image must be in the same space as the diffusion data.", ) extpriordatatype = traits.Enum( - 'float', - 'double', - argstr='-extpriordatatype %s', - desc='Datatype of the prior image. The default is "double".') + "float", + "double", + argstr="-extpriordatatype %s", + desc='Datatype of the prior image. The default is "double".', + ) class TrackBayesDirac(Track): @@ -1103,35 +1163,34 @@ def __init__(self, command=None, **inputs): class TrackBootstrapInputSpec(TrackInputSpec): scheme_file = File( - argstr='-schemefile %s', + argstr="-schemefile %s", mandatory=True, exists=True, - desc='The scheme file corresponding to the data being processed.') + desc="The scheme file corresponding to the data being processed.", + ) iterations = traits.Int( - argstr='-iterations %d', - units='NA', - desc="Number of streamlines to generate at each seed point.") + argstr="-iterations %d", + units="NA", + desc="Number of streamlines to generate at each seed point.", + ) inversion = traits.Int( - argstr='-inversion %s', - desc= - 'Tensor reconstruction algorithm for repetition bootstrapping. Default is 1 (linear reconstruction, single tensor).' + argstr="-inversion %s", + desc="Tensor reconstruction algorithm for repetition bootstrapping. Default is 1 (linear reconstruction, single tensor).", ) bsdatafiles = traits.List( File(exists=True), mandatory=True, - argstr='-bsdatafile %s', - desc= - 'Specifies files containing raw data for repetition bootstrapping. Use -inputfile for wild bootstrap data.' + argstr="-bsdatafile %s", + desc="Specifies files containing raw data for repetition bootstrapping. Use -inputfile for wild bootstrap data.", ) bgmask = File( - argstr='-bgmask %s', + argstr="-bgmask %s", exists=True, - desc= - 'Provides the name of a file containing a background mask computed using, for example, FSL\'s bet2 program. The mask file contains zero in background voxels and non-zero in foreground.' + desc="Provides the name of a file containing a background mask computed using, for example, FSL's bet2 program. The mask file contains zero in background voxels and non-zero in foreground.", ) @@ -1160,59 +1219,59 @@ def __init__(self, command=None, **inputs): class ComputeMeanDiffusivityInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='< %s', + argstr="< %s", mandatory=True, position=1, - desc='Tensor-fitted data filename') + desc="Tensor-fitted data filename", + ) scheme_file = File( exists=True, - argstr='%s', + argstr="%s", position=2, - desc='Camino scheme file (b values / vectors, see camino.fsl2scheme)') + desc="Camino scheme file (b values / vectors, see camino.fsl2scheme)", + ) out_file = File(argstr="> %s", position=-1, genfile=True) inputmodel = traits.Enum( - 'dt', - 'twotensor', - 'threetensor', - argstr='-inputmodel %s', - desc= - 'Specifies the model that the input tensor data contains parameters for.' + "dt", + "twotensor", + "threetensor", + argstr="-inputmodel %s", + desc="Specifies the model that the input tensor data contains parameters for." 'Possible model types are: "dt" (diffusion-tensor data), "twotensor" (two-tensor data), ' '"threetensor" (three-tensor data). By default, the program assumes that the input data ' - 'contains a single diffusion tensor in each voxel.') + "contains a single diffusion tensor in each voxel.", + ) inputdatatype = traits.Enum( - 'char', - 'short', - 'int', - 'long', - 'float', - 'double', - argstr='-inputdatatype %s', - desc= - 'Specifies the data type of the input file. The data type can be any of the' - 'following strings: "char", "short", "int", "long", "float" or "double".' + "char", + "short", + "int", + "long", + "float", + "double", + argstr="-inputdatatype %s", + desc="Specifies the data type of the input file. The data type can be any of the" + 'following strings: "char", "short", "int", "long", "float" or "double".', ) outputdatatype = traits.Enum( - 'char', - 'short', - 'int', - 'long', - 'float', - 'double', - argstr='-outputdatatype %s', - desc= - 'Specifies the data type of the output data. The data type can be any of the' - 'following strings: "char", "short", "int", "long", "float" or "double".' + "char", + "short", + "int", + "long", + "float", + "double", + argstr="-outputdatatype %s", + desc="Specifies the data type of the output data. The data type can be any of the" + 'following strings: "char", "short", "int", "long", "float" or "double".', ) class ComputeMeanDiffusivityOutputSpec(TraitedSpec): - md = File(exists=True, desc='Mean Diffusivity Map') + md = File(exists=True, desc="Mean Diffusivity Map") class ComputeMeanDiffusivity(StdOutCommandLine): @@ -1228,7 +1287,8 @@ class ComputeMeanDiffusivity(StdOutCommandLine): >>> md.inputs.scheme_file = 'A.scheme' >>> md.run() # doctest: +SKIP """ - _cmd = 'md' + + _cmd = "md" input_spec = ComputeMeanDiffusivityInputSpec output_spec = ComputeMeanDiffusivityOutputSpec @@ -1245,58 +1305,58 @@ def _gen_outfilename(self): class ComputeFractionalAnisotropyInputSpec(StdOutCommandLineInputSpec): in_file = File( exists=True, - argstr='< %s', + argstr="< %s", mandatory=True, position=1, - desc='Tensor-fitted data filename') + desc="Tensor-fitted data filename", + ) scheme_file = File( exists=True, - argstr='%s', + argstr="%s", position=2, - desc='Camino scheme file (b values / vectors, see camino.fsl2scheme)') + desc="Camino scheme file (b values / vectors, see camino.fsl2scheme)", + ) inputmodel = traits.Enum( - 'dt', - 'twotensor', - 'threetensor', - 'multitensor', - argstr='-inputmodel %s', - desc= - 'Specifies the model that the input tensor data contains parameters for.' + "dt", + "twotensor", + "threetensor", + "multitensor", + argstr="-inputmodel %s", + desc="Specifies the model that the input tensor data contains parameters for." 'Possible model types are: "dt" (diffusion-tensor data), "twotensor" (two-tensor data), ' '"threetensor" (three-tensor data). By default, the program assumes that the input data ' - 'contains a single diffusion tensor in each voxel.') + "contains a single diffusion tensor in each voxel.", + ) inputdatatype = traits.Enum( - 'char', - 'short', - 'int', - 'long', - 'float', - 'double', - argstr='-inputdatatype %s', - desc= - 'Specifies the data type of the input file. The data type can be any of the' - 'following strings: "char", "short", "int", "long", "float" or "double".' + "char", + "short", + "int", + "long", + "float", + "double", + argstr="-inputdatatype %s", + desc="Specifies the data type of the input file. The data type can be any of the" + 'following strings: "char", "short", "int", "long", "float" or "double".', ) outputdatatype = traits.Enum( - 'char', - 'short', - 'int', - 'long', - 'float', - 'double', - argstr='-outputdatatype %s', - desc= - 'Specifies the data type of the output data. The data type can be any of the' - 'following strings: "char", "short", "int", "long", "float" or "double".' + "char", + "short", + "int", + "long", + "float", + "double", + argstr="-outputdatatype %s", + desc="Specifies the data type of the output data. The data type can be any of the" + 'following strings: "char", "short", "int", "long", "float" or "double".', ) class ComputeFractionalAnisotropyOutputSpec(TraitedSpec): - fa = File(exists=True, desc='Fractional Anisotropy Map') + fa = File(exists=True, desc="Fractional Anisotropy Map") class ComputeFractionalAnisotropy(StdOutCommandLine): @@ -1318,75 +1378,76 @@ class ComputeFractionalAnisotropy(StdOutCommandLine): >>> fa.inputs.scheme_file = 'A.scheme' >>> fa.run() # doctest: +SKIP """ - _cmd = 'fa' + + _cmd = "fa" input_spec = ComputeFractionalAnisotropyInputSpec output_spec = ComputeFractionalAnisotropyOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['fa'] = os.path.abspath(self._gen_outfilename()) + outputs["fa"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_FA.Bdouble' # Need to change to self.inputs.outputdatatype + return name + "_FA.Bdouble" # Need to change to self.inputs.outputdatatype class ComputeTensorTraceInputSpec(StdOutCommandLineInputSpec): in_file = File( exists=True, - argstr='< %s', + argstr="< %s", mandatory=True, position=1, - desc='Tensor-fitted data filename') + desc="Tensor-fitted data filename", + ) scheme_file = File( exists=True, - argstr='%s', + argstr="%s", position=2, - desc='Camino scheme file (b values / vectors, see camino.fsl2scheme)') + desc="Camino scheme file (b values / vectors, see camino.fsl2scheme)", + ) inputmodel = traits.Enum( - 'dt', - 'twotensor', - 'threetensor', - 'multitensor', - argstr='-inputmodel %s', - desc= - 'Specifies the model that the input tensor data contains parameters for.' + "dt", + "twotensor", + "threetensor", + "multitensor", + argstr="-inputmodel %s", + desc="Specifies the model that the input tensor data contains parameters for." 'Possible model types are: "dt" (diffusion-tensor data), "twotensor" (two-tensor data), ' '"threetensor" (three-tensor data). By default, the program assumes that the input data ' - 'contains a single diffusion tensor in each voxel.') + "contains a single diffusion tensor in each voxel.", + ) inputdatatype = traits.Enum( - 'char', - 'short', - 'int', - 'long', - 'float', - 'double', - argstr='-inputdatatype %s', - desc= - 'Specifies the data type of the input file. The data type can be any of the' - 'following strings: "char", "short", "int", "long", "float" or "double".' + "char", + "short", + "int", + "long", + "float", + "double", + argstr="-inputdatatype %s", + desc="Specifies the data type of the input file. The data type can be any of the" + 'following strings: "char", "short", "int", "long", "float" or "double".', ) outputdatatype = traits.Enum( - 'char', - 'short', - 'int', - 'long', - 'float', - 'double', - argstr='-outputdatatype %s', - desc= - 'Specifies the data type of the output data. The data type can be any of the' - 'following strings: "char", "short", "int", "long", "float" or "double".' + "char", + "short", + "int", + "long", + "float", + "double", + argstr="-outputdatatype %s", + desc="Specifies the data type of the output data. The data type can be any of the" + 'following strings: "char", "short", "int", "long", "float" or "double".', ) class ComputeTensorTraceOutputSpec(TraitedSpec): - trace = File(exists=True, desc='Trace of the diffusion tensor') + trace = File(exists=True, desc="Trace of the diffusion tensor") class ComputeTensorTrace(StdOutCommandLine): @@ -1410,73 +1471,79 @@ class ComputeTensorTrace(StdOutCommandLine): >>> trace.inputs.scheme_file = 'A.scheme' >>> trace.run() # doctest: +SKIP """ - _cmd = 'trd' + + _cmd = "trd" input_spec = ComputeTensorTraceInputSpec output_spec = ComputeTensorTraceOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['trace'] = os.path.abspath(self._gen_outfilename()) + outputs["trace"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_TrD.img' # Need to change to self.inputs.outputdatatype + return name + "_TrD.img" # Need to change to self.inputs.outputdatatype class ComputeEigensystemInputSpec(StdOutCommandLineInputSpec): in_file = File( exists=True, - argstr='< %s', + argstr="< %s", mandatory=True, position=1, - desc='Tensor-fitted data filename') + desc="Tensor-fitted data filename", + ) inputmodel = traits.Enum( - 'dt', - 'multitensor', - argstr='-inputmodel %s', - desc= - 'Specifies the model that the input data contains parameters for. Possible model types are: "dt" (diffusion-tensor data) and "multitensor"' + "dt", + "multitensor", + argstr="-inputmodel %s", + desc='Specifies the model that the input data contains parameters for. Possible model types are: "dt" (diffusion-tensor data) and "multitensor"', ) maxcomponents = traits.Int( - argstr='-maxcomponents %d', - desc= - 'The maximum number of tensor components in a voxel of the input data.' + argstr="-maxcomponents %d", + desc="The maximum number of tensor components in a voxel of the input data.", ) inputdatatype = traits.Enum( - 'double', - 'float', - 'long', - 'int', - 'short', - 'char', - argstr='-inputdatatype %s', + "double", + "float", + "long", + "int", + "short", + "char", + argstr="-inputdatatype %s", usedefault=True, - desc=('Specifies the data type of the input data. ' - 'The data type can be any of the following strings: ' - '"char", "short", "int", "long", "float" or "double".' - 'Default is double data type')) + desc=( + "Specifies the data type of the input data. " + "The data type can be any of the following strings: " + '"char", "short", "int", "long", "float" or "double".' + "Default is double data type" + ), + ) outputdatatype = traits.Enum( - 'double', - 'float', - 'long', - 'int', - 'short', - 'char', - argstr='-outputdatatype %s', + "double", + "float", + "long", + "int", + "short", + "char", + argstr="-outputdatatype %s", usedefault=True, - desc=('Specifies the data type of the output data. ' - 'The data type can be any of the following strings: ' - '"char", "short", "int", "long", "float" or "double".' - 'Default is double data type')) + desc=( + "Specifies the data type of the output data. " + "The data type can be any of the following strings: " + '"char", "short", "int", "long", "float" or "double".' + "Default is double data type" + ), + ) class ComputeEigensystemOutputSpec(TraitedSpec): - eigen = File(exists=True, desc='Trace of the diffusion tensor') + eigen = File(exists=True, desc="Trace of the diffusion tensor") class ComputeEigensystem(StdOutCommandLine): @@ -1499,7 +1566,8 @@ class ComputeEigensystem(StdOutCommandLine): >>> dteig.inputs.in_file = 'tensor_fitted_data.Bdouble' >>> dteig.run() # doctest: +SKIP """ - _cmd = 'dteig' + + _cmd = "dteig" input_spec = ComputeEigensystemInputSpec output_spec = ComputeEigensystemOutputSpec @@ -1511,4 +1579,4 @@ def _list_outputs(self): def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) datatype = self.inputs.outputdatatype - return name + '_eig.B' + datatype + return name + "_eig.B" + datatype diff --git a/nipype/interfaces/camino/odf.py b/nipype/interfaces/camino/odf.py index 163c41fd87..f152f32762 100644 --- a/nipype/interfaces/camino/odf.py +++ b/nipype/interfaces/camino/odf.py @@ -3,52 +3,69 @@ import os from ...utils.filemanip import split_filename -from ..base import (traits, TraitedSpec, File, StdOutCommandLine, - StdOutCommandLineInputSpec) +from ..base import ( + traits, + TraitedSpec, + File, + StdOutCommandLine, + StdOutCommandLineInputSpec, +) class QBallMXInputSpec(StdOutCommandLineInputSpec): basistype = traits.Enum( - 'rbf', - 'sh', - argstr='-basistype %s', - desc=('Basis function type. "rbf" to use radial basis functions ' - '"sh" to use spherical harmonics'), - usedefault=True) + "rbf", + "sh", + argstr="-basistype %s", + desc=( + 'Basis function type. "rbf" to use radial basis functions ' + '"sh" to use spherical harmonics' + ), + usedefault=True, + ) scheme_file = File( exists=True, - argstr='-schemefile %s', + argstr="-schemefile %s", mandatory=True, - desc='Specifies the scheme file for the diffusion MRI data') + desc="Specifies the scheme file for the diffusion MRI data", + ) order = traits.Int( - argstr='-order %d', - units='NA', + argstr="-order %d", + units="NA", desc=( - 'Specific to sh. Maximum order of the spherical harmonic series. ' - 'Default is 4.')) + "Specific to sh. Maximum order of the spherical harmonic series. " + "Default is 4." + ), + ) rbfpointset = traits.Int( - argstr='-rbfpointset %d', - units='NA', + argstr="-rbfpointset %d", + units="NA", desc=( - 'Specific to rbf. Sets the number of radial basis functions to use. ' - 'The value specified must be present in the Pointsets directory. ' - 'The default value is 246.')) + "Specific to rbf. Sets the number of radial basis functions to use. " + "The value specified must be present in the Pointsets directory. " + "The default value is 246." + ), + ) rbfsigma = traits.Float( - argstr='-rbfsigma %f', - units='NA', - desc= - ('Specific to rbf. Sets the width of the interpolating basis functions. ' - 'The default value is 0.2618 (15 degrees).')) + argstr="-rbfsigma %f", + units="NA", + desc=( + "Specific to rbf. Sets the width of the interpolating basis functions. " + "The default value is 0.2618 (15 degrees)." + ), + ) smoothingsigma = traits.Float( - argstr='-smoothingsigma %f', - units='NA', + argstr="-smoothingsigma %f", + units="NA", desc=( - 'Specific to rbf. Sets the width of the smoothing basis functions. ' - 'The default value is 0.1309 (7.5 degrees).')) + "Specific to rbf. Sets the width of the smoothing basis functions. " + "The default value is 0.1309 (7.5 degrees)." + ), + ) class QBallMXOutputSpec(TraitedSpec): - qmat = File(exists=True, desc='Q-Ball reconstruction matrix') + qmat = File(exists=True, desc="Q-Ball reconstruction matrix") class QBallMX(StdOutCommandLine): @@ -90,52 +107,61 @@ class QBallMX(StdOutCommandLine): >>> qballcoeffs.inputs.bgmask = 'brain_mask.nii' >>> qballcoeffs.run() # doctest: +SKIP """ - _cmd = 'qballmx' + + _cmd = "qballmx" input_spec = QBallMXInputSpec output_spec = QBallMXOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['qmat'] = os.path.abspath(self._gen_outfilename()) + outputs["qmat"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.scheme_file) - return name + '_qmat.Bdouble' + return name + "_qmat.Bdouble" class LinReconInputSpec(StdOutCommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=1, - desc='voxel-order data filename') + desc="voxel-order data filename", + ) scheme_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=2, - desc='Specifies the scheme file for the diffusion MRI data') + desc="Specifies the scheme file for the diffusion MRI data", + ) qball_mat = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=3, - desc='Linear transformation matrix.') + desc="Linear transformation matrix.", + ) normalize = traits.Bool( - argstr='-normalize', - desc=('Normalize the measurements and discard ' - 'the zero measurements before the linear transform.')) + argstr="-normalize", + desc=( + "Normalize the measurements and discard " + "the zero measurements before the linear transform." + ), + ) log = traits.Bool( - argstr='-log', - desc=('Transform the log measurements rather than the ' - 'measurements themselves')) - bgmask = File(exists=True, argstr='-bgmask %s', desc='background mask') + argstr="-log", + desc=( + "Transform the log measurements rather than the " "measurements themselves" + ), + ) + bgmask = File(exists=True, argstr="-bgmask %s", desc="background mask") class LinReconOutputSpec(TraitedSpec): - recon_data = File(exists=True, desc='Transformed data') + recon_data = File(exists=True, desc="Transformed data") class LinRecon(StdOutCommandLine): @@ -181,84 +207,96 @@ class LinRecon(StdOutCommandLine): >>> qballcoeffs.inputs.normalize = True >>> qballcoeffs.run() # doctest: +SKIP """ - _cmd = 'linrecon' + + _cmd = "linrecon" input_spec = LinReconInputSpec output_spec = LinReconOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['recon_data'] = os.path.abspath(self._gen_outfilename()) + outputs["recon_data"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.scheme_file) - return name + '_recondata.Bdouble' + return name + "_recondata.Bdouble" class MESDInputSpec(StdOutCommandLineInputSpec): in_file = File( exists=True, - argstr='-inputfile %s', + argstr="-inputfile %s", mandatory=True, position=1, - desc='voxel-order data filename') + desc="voxel-order data filename", + ) inverter = traits.Enum( - 'SPIKE', - 'PAS', - argstr='-filter %s', + "SPIKE", + "PAS", + argstr="-filter %s", position=2, mandatory=True, - desc= - ('The inversion index specifies the type of inversion to perform on the data.' - 'The currently available choices are:' - 'Inverter name | Inverter parameters' - '---------------|------------------' - 'SPIKE | bd (b-value x diffusivity along the fibre.)' - 'PAS | r')) + desc=( + "The inversion index specifies the type of inversion to perform on the data." + "The currently available choices are:" + "Inverter name | Inverter parameters" + "---------------|------------------" + "SPIKE | bd (b-value x diffusivity along the fibre.)" + "PAS | r" + ), + ) inverter_param = traits.Float( - argstr='%f', - units='NA', + argstr="%f", + units="NA", position=3, mandatory=True, - desc= - ('Parameter associated with the inverter. Cf. inverter description for' - 'more information.')) + desc=( + "Parameter associated with the inverter. Cf. inverter description for" + "more information." + ), + ) fastmesd = traits.Bool( - argstr='-fastmesd', - requires=['mepointset'], - desc= - ('Turns off numerical integration checks and fixes the integration point set size at that of' - 'the index specified by -basepointset..')) + argstr="-fastmesd", + requires=["mepointset"], + desc=( + "Turns off numerical integration checks and fixes the integration point set size at that of" + "the index specified by -basepointset.." + ), + ) mepointset = traits.Int( - argstr='-mepointset %d', - units='NA', - desc= - ('Use a set of directions other than those in the scheme file for the deconvolution kernel.' - 'The number refers to the number of directions on the unit sphere. For example, ' - '"-mepointset 54" uses the directions in "camino/PointSets/Elec054.txt".' - )) + argstr="-mepointset %d", + units="NA", + desc=( + "Use a set of directions other than those in the scheme file for the deconvolution kernel." + "The number refers to the number of directions on the unit sphere. For example, " + '"-mepointset 54" uses the directions in "camino/PointSets/Elec054.txt".' + ), + ) scheme_file = File( exists=True, - argstr='-schemefile %s', + argstr="-schemefile %s", mandatory=True, - desc='Specifies the scheme file for the diffusion MRI data') - bgmask = File(exists=True, argstr='-bgmask %s', desc='background mask') + desc="Specifies the scheme file for the diffusion MRI data", + ) + bgmask = File(exists=True, argstr="-bgmask %s", desc="background mask") inputdatatype = traits.Enum( - 'float', - 'char', - 'short', - 'int', - 'long', - 'double', - argstr='-inputdatatype %s', - desc= - ('Specifies the data type of the input file: "char", "short", "int", "long",' - '"float" or "double". The input file must have BIG-ENDIAN ordering.' - 'By default, the input type is "float".')) + "float", + "char", + "short", + "int", + "long", + "double", + argstr="-inputdatatype %s", + desc=( + 'Specifies the data type of the input file: "char", "short", "int", "long",' + '"float" or "double". The input file must have BIG-ENDIAN ordering.' + 'By default, the input type is "float".' + ), + ) class MESDOutputSpec(TraitedSpec): - mesd_data = File(exists=True, desc='MESD data') + mesd_data = File(exists=True, desc="MESD data") class MESD(StdOutCommandLine): @@ -338,116 +376,128 @@ class MESD(StdOutCommandLine): >>> mesd.inputs.inverter_param = 1.4 >>> mesd.run() # doctest: +SKIP """ - _cmd = 'mesd' + + _cmd = "mesd" input_spec = MESDInputSpec output_spec = MESDOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['mesd_data'] = os.path.abspath(self._gen_outfilename()) + outputs["mesd_data"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.scheme_file) - return name + '_MESD.Bdouble' + return name + "_MESD.Bdouble" class SFPeaksInputSpec(StdOutCommandLineInputSpec): in_file = File( exists=True, - argstr='-inputfile %s', + argstr="-inputfile %s", mandatory=True, - desc='Voxel-order data of spherical functions') + desc="Voxel-order data of spherical functions", + ) inputmodel = traits.Enum( - 'sh', - 'maxent', - 'rbf', - argstr='-inputmodel %s', + "sh", + "maxent", + "rbf", + argstr="-inputmodel %s", mandatory=True, - desc= - ('Type of functions input via in_file. Currently supported options are: ' - ' sh - Spherical harmonic series. Specify the maximum order of the SH series ' - ' with the "order" attribute if different from the default of 4. ' - ' maxent - Maximum entropy representations output by MESD. The reconstruction ' - ' directions input to MESD must be specified. By default this is the ' - ' same set of gradient directions (excluding zero gradients) in the ' - ' scheme file, so specify the "schemefile" attribute unless the ' - ' "mepointset" attribute was set in MESD. ' - ' rbf - Sums of radial basis functions. Specify the pointset with the attribute ' - ' "rbfpointset" if different from the default. See QBallMX.')) + desc=( + "Type of functions input via in_file. Currently supported options are: " + " sh - Spherical harmonic series. Specify the maximum order of the SH series " + ' with the "order" attribute if different from the default of 4. ' + " maxent - Maximum entropy representations output by MESD. The reconstruction " + " directions input to MESD must be specified. By default this is the " + " same set of gradient directions (excluding zero gradients) in the " + ' scheme file, so specify the "schemefile" attribute unless the ' + ' "mepointset" attribute was set in MESD. ' + " rbf - Sums of radial basis functions. Specify the pointset with the attribute " + ' "rbfpointset" if different from the default. See QBallMX.' + ), + ) order = traits.Int( - argstr='-order %d', - units='NA', - desc='Specific to sh. Maximum order of the spherical harmonic series.') + argstr="-order %d", + units="NA", + desc="Specific to sh. Maximum order of the spherical harmonic series.", + ) scheme_file = File( - exists=True, - argstr='%s', - desc='Specific to maxent. Specifies the scheme file.') + exists=True, argstr="%s", desc="Specific to maxent. Specifies the scheme file." + ) rbfpointset = traits.Int( - argstr='-rbfpointset %d', - units='NA', + argstr="-rbfpointset %d", + units="NA", desc=( - 'Specific to rbf. Sets the number of radial basis functions to use. ' - 'The value specified must be present in the Pointsets directory. ' - 'The default value is 246.')) + "Specific to rbf. Sets the number of radial basis functions to use. " + "The value specified must be present in the Pointsets directory. " + "The default value is 246." + ), + ) mepointset = traits.Int( - argstr='-mepointset %d', - units='NA', - desc= - ('Use a set of directions other than those in the scheme file for the deconvolution ' - 'kernel. The number refers to the number of directions on the unit sphere. ' - 'For example, "mepointset = 54" uses the directions in "camino/PointSets/Elec054.txt" ' - 'Use this option only if you told MESD to use a custom set of directions with the same ' - 'option. Otherwise, specify the scheme file with the "schemefile" attribute.' - )) + argstr="-mepointset %d", + units="NA", + desc=( + "Use a set of directions other than those in the scheme file for the deconvolution " + "kernel. The number refers to the number of directions on the unit sphere. " + 'For example, "mepointset = 54" uses the directions in "camino/PointSets/Elec054.txt" ' + "Use this option only if you told MESD to use a custom set of directions with the same " + 'option. Otherwise, specify the scheme file with the "schemefile" attribute.' + ), + ) numpds = traits.Int( - argstr='-numpds %d', - units='NA', - desc='The largest number of peak directions to output in each voxel.') + argstr="-numpds %d", + units="NA", + desc="The largest number of peak directions to output in each voxel.", + ) noconsistencycheck = traits.Bool( - argstr='-noconsistencycheck', - desc= - 'Turns off the consistency check. The output shows all consistencies as true.' + argstr="-noconsistencycheck", + desc="Turns off the consistency check. The output shows all consistencies as true.", ) searchradius = traits.Float( - argstr='-searchradius %f', - units='NA', - desc= - 'The search radius in the peak finding algorithm. The default is 0.4 (cf. "density")' + argstr="-searchradius %f", + units="NA", + desc='The search radius in the peak finding algorithm. The default is 0.4 (cf. "density")', ) density = traits.Int( - argstr='-density %d', - units='NA', - desc= - ('The number of randomly rotated icosahedra to use in constructing the set of points for ' - 'random sampling in the peak finding algorithm. Default is 1000, which works well for very ' - 'spiky maxent functions. For other types of function, it is reasonable to set the density ' - 'much lower and increase the search radius slightly, which speeds up the computation.' - )) + argstr="-density %d", + units="NA", + desc=( + "The number of randomly rotated icosahedra to use in constructing the set of points for " + "random sampling in the peak finding algorithm. Default is 1000, which works well for very " + "spiky maxent functions. For other types of function, it is reasonable to set the density " + "much lower and increase the search radius slightly, which speeds up the computation." + ), + ) pointset = traits.Int( - argstr='-pointset %d', - units='NA', - desc= - ('To sample using an evenly distributed set of points instead. The integer can be ' - '0, 1, ..., 7. Index 0 gives 1082 points, 1 gives 1922, 2 gives 3002, 3 gives 4322, ' - '4 gives 5882, 5 gives 8672, 6 gives 12002, 7 gives 15872.')) + argstr="-pointset %d", + units="NA", + desc=( + "To sample using an evenly distributed set of points instead. The integer can be " + "0, 1, ..., 7. Index 0 gives 1082 points, 1 gives 1922, 2 gives 3002, 3 gives 4322, " + "4 gives 5882, 5 gives 8672, 6 gives 12002, 7 gives 15872." + ), + ) pdthresh = traits.Float( - argstr='-pdthresh %f', - units='NA', - desc= - ('Base threshold on the actual peak direction strength divided by the mean of the ' - 'function. The default is 1.0 (the peak must be equal or greater than the mean).' - )) + argstr="-pdthresh %f", + units="NA", + desc=( + "Base threshold on the actual peak direction strength divided by the mean of the " + "function. The default is 1.0 (the peak must be equal or greater than the mean)." + ), + ) stdsfrommean = traits.Float( - argstr='-stdsfrommean %f', - units='NA', - desc= - ('This is the number of standard deviations of the function to be added to the ' - '"pdthresh" attribute in the peak directions pruning.')) + argstr="-stdsfrommean %f", + units="NA", + desc=( + "This is the number of standard deviations of the function to be added to the " + '"pdthresh" attribute in the peak directions pruning.' + ), + ) class SFPeaksOutputSpec(TraitedSpec): - peaks = File(exists=True, desc='Peaks of the spherical functions.') + peaks = File(exists=True, desc="Peaks of the spherical functions.") class SFPeaks(StdOutCommandLine): @@ -528,15 +578,16 @@ class SFPeaks(StdOutCommandLine): >>> sf_peaks.inputs.searchradius = 1.0 >>> sf_peaks.run() # doctest: +SKIP """ - _cmd = 'sfpeaks' + + _cmd = "sfpeaks" input_spec = SFPeaksInputSpec output_spec = SFPeaksOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['peaks'] = os.path.abspath(self._gen_outfilename()) + outputs["peaks"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_peaks.Bdouble' + return name + "_peaks.Bdouble" diff --git a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py index 6341c5fb46..419330da13 100644 --- a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py +++ b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py @@ -4,109 +4,42 @@ def test_AnalyzeHeader_inputs(): input_map = dict( - args=dict(argstr='%s', ), - centre=dict( - argstr='-centre %s', - units='mm', - ), - data_dims=dict( - argstr='-datadims %s', - units='voxels', - ), - datatype=dict( - argstr='-datatype %s', - mandatory=True, - ), - description=dict(argstr='-description %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - greylevels=dict( - argstr='-gl %s', - units='NA', - ), - in_file=dict( - argstr='< %s', - extensions=None, - mandatory=True, - position=1, - ), - initfromheader=dict( - argstr='-initfromheader %s', - extensions=None, - position=3, - ), - intelbyteorder=dict(argstr='-intelbyteorder', ), - networkbyteorder=dict(argstr='-networkbyteorder', ), - nimages=dict( - argstr='-nimages %d', - units='NA', - ), - offset=dict( - argstr='-offset %d', - units='NA', - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - picoseed=dict( - argstr='-picoseed %s', - units='mm', - ), - printbigendian=dict( - argstr='-printbigendian %s', - extensions=None, - position=3, - ), - printimagedims=dict( - argstr='-printimagedims %s', - extensions=None, - position=3, - ), + args=dict(argstr="%s",), + centre=dict(argstr="-centre %s", units="mm",), + data_dims=dict(argstr="-datadims %s", units="voxels",), + datatype=dict(argstr="-datatype %s", mandatory=True,), + description=dict(argstr="-description %s",), + environ=dict(nohash=True, usedefault=True,), + greylevels=dict(argstr="-gl %s", units="NA",), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1,), + initfromheader=dict(argstr="-initfromheader %s", extensions=None, position=3,), + intelbyteorder=dict(argstr="-intelbyteorder",), + networkbyteorder=dict(argstr="-networkbyteorder",), + nimages=dict(argstr="-nimages %d", units="NA",), + offset=dict(argstr="-offset %d", units="NA",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + picoseed=dict(argstr="-picoseed %s", units="mm",), + printbigendian=dict(argstr="-printbigendian %s", extensions=None, position=3,), + printimagedims=dict(argstr="-printimagedims %s", extensions=None, position=3,), printintelbyteorder=dict( - argstr='-printintelbyteorder %s', - extensions=None, - position=3, - ), - printprogargs=dict( - argstr='-printprogargs %s', - extensions=None, - position=3, - ), - readheader=dict( - argstr='-readheader %s', - extensions=None, - position=3, - ), - scaleinter=dict( - argstr='-scaleinter %d', - units='NA', - ), - scaleslope=dict( - argstr='-scaleslope %d', - units='NA', - ), - scheme_file=dict( - argstr='%s', - extensions=None, - position=2, - ), - voxel_dims=dict( - argstr='-voxeldims %s', - units='mm', - ), + argstr="-printintelbyteorder %s", extensions=None, position=3, + ), + printprogargs=dict(argstr="-printprogargs %s", extensions=None, position=3,), + readheader=dict(argstr="-readheader %s", extensions=None, position=3,), + scaleinter=dict(argstr="-scaleinter %d", units="NA",), + scaleslope=dict(argstr="-scaleslope %d", units="NA",), + scheme_file=dict(argstr="%s", extensions=None, position=2,), + voxel_dims=dict(argstr="-voxeldims %s", units="mm",), ) inputs = AnalyzeHeader.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AnalyzeHeader_outputs(): - output_map = dict(header=dict(extensions=None, ), ) + output_map = dict(header=dict(extensions=None,),) outputs = AnalyzeHeader.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py index 63c652319c..70e1603a33 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py @@ -4,41 +4,24 @@ def test_ComputeEigensystem_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='< %s', - extensions=None, - mandatory=True, - position=1, - ), - inputdatatype=dict( - argstr='-inputdatatype %s', - usedefault=True, - ), - inputmodel=dict(argstr='-inputmodel %s', ), - maxcomponents=dict(argstr='-maxcomponents %d', ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - outputdatatype=dict( - argstr='-outputdatatype %s', - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1,), + inputdatatype=dict(argstr="-inputdatatype %s", usedefault=True,), + inputmodel=dict(argstr="-inputmodel %s",), + maxcomponents=dict(argstr="-maxcomponents %d",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + outputdatatype=dict(argstr="-outputdatatype %s", usedefault=True,), ) inputs = ComputeEigensystem.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ComputeEigensystem_outputs(): - output_map = dict(eigen=dict(extensions=None, ), ) + output_map = dict(eigen=dict(extensions=None,),) outputs = ComputeEigensystem.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py index 6475557c40..cecdcd6dd9 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py @@ -4,39 +4,24 @@ def test_ComputeFractionalAnisotropy_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='< %s', - extensions=None, - mandatory=True, - position=1, - ), - inputdatatype=dict(argstr='-inputdatatype %s', ), - inputmodel=dict(argstr='-inputmodel %s', ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - outputdatatype=dict(argstr='-outputdatatype %s', ), - scheme_file=dict( - argstr='%s', - extensions=None, - position=2, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1,), + inputdatatype=dict(argstr="-inputdatatype %s",), + inputmodel=dict(argstr="-inputmodel %s",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + outputdatatype=dict(argstr="-outputdatatype %s",), + scheme_file=dict(argstr="%s", extensions=None, position=2,), ) inputs = ComputeFractionalAnisotropy.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ComputeFractionalAnisotropy_outputs(): - output_map = dict(fa=dict(extensions=None, ), ) + output_map = dict(fa=dict(extensions=None,),) outputs = ComputeFractionalAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py index a068d7de24..692d900494 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py @@ -4,39 +4,24 @@ def test_ComputeMeanDiffusivity_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='< %s', - extensions=None, - mandatory=True, - position=1, - ), - inputdatatype=dict(argstr='-inputdatatype %s', ), - inputmodel=dict(argstr='-inputmodel %s', ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - outputdatatype=dict(argstr='-outputdatatype %s', ), - scheme_file=dict( - argstr='%s', - extensions=None, - position=2, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1,), + inputdatatype=dict(argstr="-inputdatatype %s",), + inputmodel=dict(argstr="-inputmodel %s",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + outputdatatype=dict(argstr="-outputdatatype %s",), + scheme_file=dict(argstr="%s", extensions=None, position=2,), ) inputs = ComputeMeanDiffusivity.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ComputeMeanDiffusivity_outputs(): - output_map = dict(md=dict(extensions=None, ), ) + output_map = dict(md=dict(extensions=None,),) outputs = ComputeMeanDiffusivity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py index a05dbf331c..3a7469378e 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py @@ -4,39 +4,24 @@ def test_ComputeTensorTrace_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='< %s', - extensions=None, - mandatory=True, - position=1, - ), - inputdatatype=dict(argstr='-inputdatatype %s', ), - inputmodel=dict(argstr='-inputmodel %s', ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - outputdatatype=dict(argstr='-outputdatatype %s', ), - scheme_file=dict( - argstr='%s', - extensions=None, - position=2, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1,), + inputdatatype=dict(argstr="-inputdatatype %s",), + inputmodel=dict(argstr="-inputmodel %s",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + outputdatatype=dict(argstr="-outputdatatype %s",), + scheme_file=dict(argstr="%s", extensions=None, position=2,), ) inputs = ComputeTensorTrace.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ComputeTensorTrace_outputs(): - output_map = dict(trace=dict(extensions=None, ), ) + output_map = dict(trace=dict(extensions=None,),) outputs = ComputeTensorTrace.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Conmat.py b/nipype/interfaces/camino/tests/test_auto_Conmat.py index 52a5b668d7..65c84dc64c 100644 --- a/nipype/interfaces/camino/tests/test_auto_Conmat.py +++ b/nipype/interfaces/camino/tests/test_auto_Conmat.py @@ -4,45 +4,21 @@ def test_Conmat_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-inputfile %s', - extensions=None, - mandatory=True, - ), - output_root=dict( - argstr='-outputroot %s', - extensions=None, - genfile=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True,), + output_root=dict(argstr="-outputroot %s", extensions=None, genfile=True,), scalar_file=dict( - argstr='-scalarfile %s', - extensions=None, - requires=['tract_stat'], - ), - target_file=dict( - argstr='-targetfile %s', - extensions=None, - mandatory=True, - ), - targetname_file=dict( - argstr='-targetnamefile %s', - extensions=None, - ), - tract_prop=dict( - argstr='-tractstat %s', - units='NA', - xor=['tract_stat'], + argstr="-scalarfile %s", extensions=None, requires=["tract_stat"], ), + target_file=dict(argstr="-targetfile %s", extensions=None, mandatory=True,), + targetname_file=dict(argstr="-targetnamefile %s", extensions=None,), + tract_prop=dict(argstr="-tractstat %s", units="NA", xor=["tract_stat"],), tract_stat=dict( - argstr='-tractstat %s', - requires=['scalar_file'], - units='NA', - xor=['tract_prop'], + argstr="-tractstat %s", + requires=["scalar_file"], + units="NA", + xor=["tract_prop"], ), ) inputs = Conmat.input_spec() @@ -50,10 +26,11 @@ def test_Conmat_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Conmat_outputs(): output_map = dict( - conmat_sc=dict(extensions=None, ), - conmat_ts=dict(extensions=None, ), + conmat_sc=dict(extensions=None,), conmat_ts=dict(extensions=None,), ) outputs = Conmat.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py index ab006820f8..fce7560dd2 100644 --- a/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py +++ b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py @@ -4,28 +4,16 @@ def test_DT2NIfTI_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), header_file=dict( - argstr='-header %s', - extensions=None, - mandatory=True, - position=3, + argstr="-header %s", extensions=None, mandatory=True, position=3, ), in_file=dict( - argstr='-inputfile %s', - extensions=None, - mandatory=True, - position=1, + argstr="-inputfile %s", extensions=None, mandatory=True, position=1, ), output_root=dict( - argstr='-outputroot %s', - extensions=None, - genfile=True, - position=2, + argstr="-outputroot %s", extensions=None, genfile=True, position=2, ), ) inputs = DT2NIfTI.input_spec() @@ -33,11 +21,13 @@ def test_DT2NIfTI_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DT2NIfTI_outputs(): output_map = dict( - dt=dict(extensions=None, ), - exitcode=dict(extensions=None, ), - lns0=dict(extensions=None, ), + dt=dict(extensions=None,), + exitcode=dict(extensions=None,), + lns0=dict(extensions=None,), ) outputs = DT2NIfTI.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_DTIFit.py b/nipype/interfaces/camino/tests/test_auto_DTIFit.py index 9322028345..467e2d54ea 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/camino/tests/test_auto_DTIFit.py @@ -4,45 +4,23 @@ def test_DTIFit_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bgmask=dict( - argstr='-bgmask %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - non_linear=dict( - argstr='-nonlinear', - position=3, - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - scheme_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), + args=dict(argstr="%s",), + bgmask=dict(argstr="-bgmask %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + non_linear=dict(argstr="-nonlinear", position=3,), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + scheme_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), ) inputs = DTIFit.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DTIFit_outputs(): - output_map = dict(tensor_fitted=dict(extensions=None, ), ) + output_map = dict(tensor_fitted=dict(extensions=None,),) outputs = DTIFit.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py index 9a564cf37b..674d38a37b 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py @@ -4,64 +4,32 @@ def test_DTLUTGen_inputs(): input_map = dict( - acg=dict(argstr='-acg', ), - args=dict(argstr='%s', ), - bingham=dict(argstr='-bingham', ), - environ=dict( - nohash=True, - usedefault=True, - ), - frange=dict( - argstr='-frange %s', - position=1, - units='NA', - ), - inversion=dict( - argstr='-inversion %d', - units='NA', - ), - lrange=dict( - argstr='-lrange %s', - position=1, - units='NA', - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - samples=dict( - argstr='-samples %d', - units='NA', - ), + acg=dict(argstr="-acg",), + args=dict(argstr="%s",), + bingham=dict(argstr="-bingham",), + environ=dict(nohash=True, usedefault=True,), + frange=dict(argstr="-frange %s", position=1, units="NA",), + inversion=dict(argstr="-inversion %d", units="NA",), + lrange=dict(argstr="-lrange %s", position=1, units="NA",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + samples=dict(argstr="-samples %d", units="NA",), scheme_file=dict( - argstr='-schemefile %s', - extensions=None, - mandatory=True, - position=2, + argstr="-schemefile %s", extensions=None, mandatory=True, position=2, ), - snr=dict( - argstr='-snr %f', - units='NA', - ), - step=dict( - argstr='-step %f', - units='NA', - ), - trace=dict( - argstr='-trace %G', - units='NA', - ), - watson=dict(argstr='-watson', ), + snr=dict(argstr="-snr %f", units="NA",), + step=dict(argstr="-step %f", units="NA",), + trace=dict(argstr="-trace %G", units="NA",), + watson=dict(argstr="-watson",), ) inputs = DTLUTGen.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DTLUTGen_outputs(): - output_map = dict(dtLUT=dict(extensions=None, ), ) + output_map = dict(dtLUT=dict(extensions=None,),) outputs = DTLUTGen.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_DTMetric.py b/nipype/interfaces/camino/tests/test_auto_DTMetric.py index 2458921c8c..fd62a3d329 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTMetric.py +++ b/nipype/interfaces/camino/tests/test_auto_DTMetric.py @@ -4,45 +4,24 @@ def test_DTMetric_inputs(): input_map = dict( - args=dict(argstr='%s', ), - data_header=dict( - argstr='-header %s', - extensions=None, - ), - eigen_data=dict( - argstr='-inputfile %s', - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputdatatype=dict( - argstr='-inputdatatype %s', - usedefault=True, - ), - metric=dict( - argstr='-stat %s', - mandatory=True, - ), - outputdatatype=dict( - argstr='-outputdatatype %s', - usedefault=True, - ), - outputfile=dict( - argstr='-outputfile %s', - extensions=None, - genfile=True, - ), + args=dict(argstr="%s",), + data_header=dict(argstr="-header %s", extensions=None,), + eigen_data=dict(argstr="-inputfile %s", extensions=None, mandatory=True,), + environ=dict(nohash=True, usedefault=True,), + inputdatatype=dict(argstr="-inputdatatype %s", usedefault=True,), + metric=dict(argstr="-stat %s", mandatory=True,), + outputdatatype=dict(argstr="-outputdatatype %s", usedefault=True,), + outputfile=dict(argstr="-outputfile %s", extensions=None, genfile=True,), ) inputs = DTMetric.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DTMetric_outputs(): - output_map = dict(metric_stats=dict(extensions=None, ), ) + output_map = dict(metric_stats=dict(extensions=None,),) outputs = DTMetric.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py index cd06996609..00b11eb751 100644 --- a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py +++ b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py @@ -4,54 +4,33 @@ def test_FSL2Scheme_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bscale=dict( - argstr='-bscale %d', - units='NA', - ), + args=dict(argstr="%s",), + bscale=dict(argstr="-bscale %d", units="NA",), bval_file=dict( - argstr='-bvalfile %s', - extensions=None, - mandatory=True, - position=2, + argstr="-bvalfile %s", extensions=None, mandatory=True, position=2, ), bvec_file=dict( - argstr='-bvecfile %s', - extensions=None, - mandatory=True, - position=1, - ), - diffusiontime=dict( - argstr='-diffusiontime %f', - units='NA', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - flipx=dict(argstr='-flipx', ), - flipy=dict(argstr='-flipy', ), - flipz=dict(argstr='-flipz', ), - interleave=dict(argstr='-interleave', ), - numscans=dict( - argstr='-numscans %d', - units='NA', + argstr="-bvecfile %s", extensions=None, mandatory=True, position=1, ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - usegradmod=dict(argstr='-usegradmod', ), + diffusiontime=dict(argstr="-diffusiontime %f", units="NA",), + environ=dict(nohash=True, usedefault=True,), + flipx=dict(argstr="-flipx",), + flipy=dict(argstr="-flipy",), + flipz=dict(argstr="-flipz",), + interleave=dict(argstr="-interleave",), + numscans=dict(argstr="-numscans %d", units="NA",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + usegradmod=dict(argstr="-usegradmod",), ) inputs = FSL2Scheme.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FSL2Scheme_outputs(): - output_map = dict(scheme=dict(extensions=None, ), ) + output_map = dict(scheme=dict(extensions=None,),) outputs = FSL2Scheme.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py index 13a3107b6d..47379c7f54 100644 --- a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py +++ b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py @@ -4,36 +4,23 @@ def test_Image2Voxel_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='-4dimage %s', - extensions=None, - mandatory=True, - position=1, - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - out_type=dict( - argstr='-outputdatatype %s', - position=2, - usedefault=True, + argstr="-4dimage %s", extensions=None, mandatory=True, position=1, ), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + out_type=dict(argstr="-outputdatatype %s", position=2, usedefault=True,), ) inputs = Image2Voxel.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Image2Voxel_outputs(): - output_map = dict(voxel_order=dict(extensions=None, ), ) + output_map = dict(voxel_order=dict(extensions=None,),) outputs = Image2Voxel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ImageStats.py b/nipype/interfaces/camino/tests/test_auto_ImageStats.py index 77f23481fc..9d817f4ca9 100644 --- a/nipype/interfaces/camino/tests/test_auto_ImageStats.py +++ b/nipype/interfaces/camino/tests/test_auto_ImageStats.py @@ -4,38 +4,22 @@ def test_ImageStats_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr='-images %s', - mandatory=True, - position=-1, - ), - out_type=dict( - argstr='-outputdatatype %s', - usedefault=True, - ), - output_root=dict( - argstr='-outputroot %s', - extensions=None, - mandatory=True, - ), - stat=dict( - argstr='-stat %s', - mandatory=True, - units='NA', - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_files=dict(argstr="-images %s", mandatory=True, position=-1,), + out_type=dict(argstr="-outputdatatype %s", usedefault=True,), + output_root=dict(argstr="-outputroot %s", extensions=None, mandatory=True,), + stat=dict(argstr="-stat %s", mandatory=True, units="NA",), ) inputs = ImageStats.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ImageStats_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ImageStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_LinRecon.py b/nipype/interfaces/camino/tests/test_auto_LinRecon.py index 8d7d991708..a8a7731b70 100644 --- a/nipype/interfaces/camino/tests/test_auto_LinRecon.py +++ b/nipype/interfaces/camino/tests/test_auto_LinRecon.py @@ -4,49 +4,25 @@ def test_LinRecon_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bgmask=dict( - argstr='-bgmask %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - log=dict(argstr='-log', ), - normalize=dict(argstr='-normalize', ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - qball_mat=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=3, - ), - scheme_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), + args=dict(argstr="%s",), + bgmask=dict(argstr="-bgmask %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + log=dict(argstr="-log",), + normalize=dict(argstr="-normalize",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + qball_mat=dict(argstr="%s", extensions=None, mandatory=True, position=3,), + scheme_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), ) inputs = LinRecon.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_LinRecon_outputs(): - output_map = dict(recon_data=dict(extensions=None, ), ) + output_map = dict(recon_data=dict(extensions=None,),) outputs = LinRecon.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_MESD.py b/nipype/interfaces/camino/tests/test_auto_MESD.py index b41a8ed4ad..dd91241d0a 100644 --- a/nipype/interfaces/camino/tests/test_auto_MESD.py +++ b/nipype/interfaces/camino/tests/test_auto_MESD.py @@ -4,60 +4,29 @@ def test_MESD_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bgmask=dict( - argstr='-bgmask %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fastmesd=dict( - argstr='-fastmesd', - requires=['mepointset'], - ), + args=dict(argstr="%s",), + bgmask=dict(argstr="-bgmask %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + fastmesd=dict(argstr="-fastmesd", requires=["mepointset"],), in_file=dict( - argstr='-inputfile %s', - extensions=None, - mandatory=True, - position=1, - ), - inputdatatype=dict(argstr='-inputdatatype %s', ), - inverter=dict( - argstr='-filter %s', - mandatory=True, - position=2, - ), - inverter_param=dict( - argstr='%f', - mandatory=True, - position=3, - units='NA', - ), - mepointset=dict( - argstr='-mepointset %d', - units='NA', - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - scheme_file=dict( - argstr='-schemefile %s', - extensions=None, - mandatory=True, - ), + argstr="-inputfile %s", extensions=None, mandatory=True, position=1, + ), + inputdatatype=dict(argstr="-inputdatatype %s",), + inverter=dict(argstr="-filter %s", mandatory=True, position=2,), + inverter_param=dict(argstr="%f", mandatory=True, position=3, units="NA",), + mepointset=dict(argstr="-mepointset %d", units="NA",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True,), ) inputs = MESD.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MESD_outputs(): - output_map = dict(mesd_data=dict(extensions=None, ), ) + output_map = dict(mesd_data=dict(extensions=None,),) outputs = MESD.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ModelFit.py b/nipype/interfaces/camino/tests/test_auto_ModelFit.py index ab2f6e1d73..ca5ba4a9d6 100644 --- a/nipype/interfaces/camino/tests/test_auto_ModelFit.py +++ b/nipype/interfaces/camino/tests/test_auto_ModelFit.py @@ -4,66 +4,34 @@ def test_ModelFit_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bgmask=dict( - argstr='-bgmask %s', - extensions=None, - ), - bgthresh=dict(argstr='-bgthresh %G', ), - cfthresh=dict(argstr='-csfthresh %G', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedbvalue=dict(argstr='-fixedbvalue %s', ), - fixedmodq=dict(argstr='-fixedmod %s', ), - in_file=dict( - argstr='-inputfile %s', - extensions=None, - mandatory=True, - ), - inputdatatype=dict(argstr='-inputdatatype %s', ), - model=dict( - argstr='-model %s', - mandatory=True, - ), - noisemap=dict( - argstr='-noisemap %s', - extensions=None, - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - outlier=dict( - argstr='-outliermap %s', - extensions=None, - ), - outputfile=dict( - argstr='-outputfile %s', - extensions=None, - ), - residualmap=dict( - argstr='-residualmap %s', - extensions=None, - ), - scheme_file=dict( - argstr='-schemefile %s', - extensions=None, - mandatory=True, - ), - sigma=dict(argstr='-sigma %G', ), - tau=dict(argstr='-tau %G', ), + args=dict(argstr="%s",), + bgmask=dict(argstr="-bgmask %s", extensions=None,), + bgthresh=dict(argstr="-bgthresh %G",), + cfthresh=dict(argstr="-csfthresh %G",), + environ=dict(nohash=True, usedefault=True,), + fixedbvalue=dict(argstr="-fixedbvalue %s",), + fixedmodq=dict(argstr="-fixedmod %s",), + in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True,), + inputdatatype=dict(argstr="-inputdatatype %s",), + model=dict(argstr="-model %s", mandatory=True,), + noisemap=dict(argstr="-noisemap %s", extensions=None,), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + outlier=dict(argstr="-outliermap %s", extensions=None,), + outputfile=dict(argstr="-outputfile %s", extensions=None,), + residualmap=dict(argstr="-residualmap %s", extensions=None,), + scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True,), + sigma=dict(argstr="-sigma %G",), + tau=dict(argstr="-tau %G",), ) inputs = ModelFit.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ModelFit_outputs(): - output_map = dict(fitted_data=dict(extensions=None, ), ) + output_map = dict(fitted_data=dict(extensions=None,),) outputs = ModelFit.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py index 9bdd2cc3ba..951e4bdc0e 100644 --- a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py +++ b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py @@ -4,46 +4,28 @@ def test_NIfTIDT2Camino_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bgmask=dict( - argstr='-bgmask %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + bgmask=dict(argstr="-bgmask %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='-inputfile %s', - extensions=None, - mandatory=True, - position=1, - ), - lns0_file=dict( - argstr='-lns0 %s', - extensions=None, - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, + argstr="-inputfile %s", extensions=None, mandatory=True, position=1, ), - s0_file=dict( - argstr='-s0 %s', - extensions=None, - ), - scaleinter=dict(argstr='-scaleinter %s', ), - scaleslope=dict(argstr='-scaleslope %s', ), - uppertriangular=dict(argstr='-uppertriangular %s', ), + lns0_file=dict(argstr="-lns0 %s", extensions=None,), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + s0_file=dict(argstr="-s0 %s", extensions=None,), + scaleinter=dict(argstr="-scaleinter %s",), + scaleslope=dict(argstr="-scaleslope %s",), + uppertriangular=dict(argstr="-uppertriangular %s",), ) inputs = NIfTIDT2Camino.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_NIfTIDT2Camino_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = NIfTIDT2Camino.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py index 7d118392dc..5a321dddba 100644 --- a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py +++ b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py @@ -4,54 +4,26 @@ def test_PicoPDFs_inputs(): input_map = dict( - args=dict(argstr='%s', ), - directmap=dict(argstr='-directmap', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='< %s', - extensions=None, - mandatory=True, - position=1, - ), - inputmodel=dict( - argstr='-inputmodel %s', - position=2, - usedefault=True, - ), - luts=dict( - argstr='-luts %s', - mandatory=True, - ), - maxcomponents=dict( - argstr='-maxcomponents %d', - units='NA', - ), - numpds=dict( - argstr='-numpds %d', - units='NA', - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - pdf=dict( - argstr='-pdf %s', - position=4, - usedefault=True, - ), + args=dict(argstr="%s",), + directmap=dict(argstr="-directmap",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1,), + inputmodel=dict(argstr="-inputmodel %s", position=2, usedefault=True,), + luts=dict(argstr="-luts %s", mandatory=True,), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), + numpds=dict(argstr="-numpds %d", units="NA",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + pdf=dict(argstr="-pdf %s", position=4, usedefault=True,), ) inputs = PicoPDFs.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_PicoPDFs_outputs(): - output_map = dict(pdfs=dict(extensions=None, ), ) + output_map = dict(pdfs=dict(extensions=None,),) outputs = PicoPDFs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py index e4e9cb4d4f..7ccd071c99 100644 --- a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py @@ -4,133 +4,54 @@ def test_ProcStreamlines_inputs(): input_map = dict( - allowmultitargets=dict(argstr='-allowmultitargets', ), - args=dict(argstr='%s', ), - datadims=dict( - argstr='-datadims %s', - units='voxels', - ), - directional=dict( - argstr='-directional %s', - units='NA', - ), - discardloops=dict(argstr='-discardloops', ), - endpointfile=dict( - argstr='-endpointfile %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - exclusionfile=dict( - argstr='-exclusionfile %s', - extensions=None, - ), - gzip=dict(argstr='-gzip', ), + allowmultitargets=dict(argstr="-allowmultitargets",), + args=dict(argstr="%s",), + datadims=dict(argstr="-datadims %s", units="voxels",), + directional=dict(argstr="-directional %s", units="NA",), + discardloops=dict(argstr="-discardloops",), + endpointfile=dict(argstr="-endpointfile %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + exclusionfile=dict(argstr="-exclusionfile %s", extensions=None,), + gzip=dict(argstr="-gzip",), in_file=dict( - argstr='-inputfile %s', - extensions=None, - mandatory=True, - position=1, - ), - inputmodel=dict( - argstr='-inputmodel %s', - usedefault=True, - ), - iterations=dict( - argstr='-iterations %d', - units='NA', - ), - maxtractlength=dict( - argstr='-maxtractlength %d', - units='mm', - ), - maxtractpoints=dict( - argstr='-maxtractpoints %d', - units='NA', - ), - mintractlength=dict( - argstr='-mintractlength %d', - units='mm', - ), - mintractpoints=dict( - argstr='-mintractpoints %d', - units='NA', - ), - noresample=dict(argstr='-noresample', ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - outputacm=dict( - argstr='-outputacm', - requires=['outputroot', 'seedfile'], - ), + argstr="-inputfile %s", extensions=None, mandatory=True, position=1, + ), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), + iterations=dict(argstr="-iterations %d", units="NA",), + maxtractlength=dict(argstr="-maxtractlength %d", units="mm",), + maxtractpoints=dict(argstr="-maxtractpoints %d", units="NA",), + mintractlength=dict(argstr="-mintractlength %d", units="mm",), + mintractpoints=dict(argstr="-mintractpoints %d", units="NA",), + noresample=dict(argstr="-noresample",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + outputacm=dict(argstr="-outputacm", requires=["outputroot", "seedfile"],), outputcbs=dict( - argstr='-outputcbs', - requires=['outputroot', 'targetfile', 'seedfile'], - ), - outputcp=dict( - argstr='-outputcp', - requires=['outputroot', 'seedfile'], - ), - outputroot=dict( - argstr='-outputroot %s', - extensions=None, - ), - outputsc=dict( - argstr='-outputsc', - requires=['outputroot', 'seedfile'], - ), - outputtracts=dict(argstr='-outputtracts', ), - regionindex=dict( - argstr='-regionindex %d', - units='mm', - ), - resamplestepsize=dict( - argstr='-resamplestepsize %d', - units='NA', - ), - seedfile=dict( - argstr='-seedfile %s', - extensions=None, - ), - seedpointmm=dict( - argstr='-seedpointmm %s', - units='mm', - ), - seedpointvox=dict( - argstr='-seedpointvox %s', - units='voxels', - ), - targetfile=dict( - argstr='-targetfile %s', - extensions=None, - ), - truncateinexclusion=dict(argstr='-truncateinexclusion', ), - truncateloops=dict(argstr='-truncateloops', ), - voxeldims=dict( - argstr='-voxeldims %s', - units='mm', - ), - waypointfile=dict( - argstr='-waypointfile %s', - extensions=None, - ), + argstr="-outputcbs", requires=["outputroot", "targetfile", "seedfile"], + ), + outputcp=dict(argstr="-outputcp", requires=["outputroot", "seedfile"],), + outputroot=dict(argstr="-outputroot %s", extensions=None,), + outputsc=dict(argstr="-outputsc", requires=["outputroot", "seedfile"],), + outputtracts=dict(argstr="-outputtracts",), + regionindex=dict(argstr="-regionindex %d", units="mm",), + resamplestepsize=dict(argstr="-resamplestepsize %d", units="NA",), + seedfile=dict(argstr="-seedfile %s", extensions=None,), + seedpointmm=dict(argstr="-seedpointmm %s", units="mm",), + seedpointvox=dict(argstr="-seedpointvox %s", units="voxels",), + targetfile=dict(argstr="-targetfile %s", extensions=None,), + truncateinexclusion=dict(argstr="-truncateinexclusion",), + truncateloops=dict(argstr="-truncateloops",), + voxeldims=dict(argstr="-voxeldims %s", units="mm",), + waypointfile=dict(argstr="-waypointfile %s", extensions=None,), ) inputs = ProcStreamlines.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ProcStreamlines_outputs(): - output_map = dict( - outputroot_files=dict(), - proc=dict(extensions=None, ), - ) + output_map = dict(outputroot_files=dict(), proc=dict(extensions=None,),) outputs = ProcStreamlines.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_QBallMX.py b/nipype/interfaces/camino/tests/test_auto_QBallMX.py index 6e30e8e019..180e5c6f83 100644 --- a/nipype/interfaces/camino/tests/test_auto_QBallMX.py +++ b/nipype/interfaces/camino/tests/test_auto_QBallMX.py @@ -4,50 +4,25 @@ def test_QBallMX_inputs(): input_map = dict( - args=dict(argstr='%s', ), - basistype=dict( - argstr='-basistype %s', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - order=dict( - argstr='-order %d', - units='NA', - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - rbfpointset=dict( - argstr='-rbfpointset %d', - units='NA', - ), - rbfsigma=dict( - argstr='-rbfsigma %f', - units='NA', - ), - scheme_file=dict( - argstr='-schemefile %s', - extensions=None, - mandatory=True, - ), - smoothingsigma=dict( - argstr='-smoothingsigma %f', - units='NA', - ), + args=dict(argstr="%s",), + basistype=dict(argstr="-basistype %s", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + order=dict(argstr="-order %d", units="NA",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + rbfpointset=dict(argstr="-rbfpointset %d", units="NA",), + rbfsigma=dict(argstr="-rbfsigma %f", units="NA",), + scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True,), + smoothingsigma=dict(argstr="-smoothingsigma %f", units="NA",), ) inputs = QBallMX.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_QBallMX_outputs(): - output_map = dict(qmat=dict(extensions=None, ), ) + output_map = dict(qmat=dict(extensions=None,),) outputs = QBallMX.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py index 1b16f7b7d5..96dd1c2e5e 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py @@ -4,58 +4,28 @@ def test_SFLUTGen_inputs(): input_map = dict( - args=dict(argstr='%s', ), - binincsize=dict( - argstr='-binincsize %d', - units='NA', - ), - directmap=dict(argstr='-directmap', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-inputfile %s', - extensions=None, - mandatory=True, - ), - info_file=dict( - argstr='-infofile %s', - extensions=None, - mandatory=True, - ), - minvectsperbin=dict( - argstr='-minvectsperbin %d', - units='NA', - ), - order=dict( - argstr='-order %d', - units='NA', - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - outputstem=dict( - argstr='-outputstem %s', - usedefault=True, - ), - pdf=dict( - argstr='-pdf %s', - usedefault=True, - ), + args=dict(argstr="%s",), + binincsize=dict(argstr="-binincsize %d", units="NA",), + directmap=dict(argstr="-directmap",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True,), + info_file=dict(argstr="-infofile %s", extensions=None, mandatory=True,), + minvectsperbin=dict(argstr="-minvectsperbin %d", units="NA",), + order=dict(argstr="-order %d", units="NA",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + outputstem=dict(argstr="-outputstem %s", usedefault=True,), + pdf=dict(argstr="-pdf %s", usedefault=True,), ) inputs = SFLUTGen.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SFLUTGen_outputs(): output_map = dict( - lut_one_fibre=dict(extensions=None, ), - lut_two_fibres=dict(extensions=None, ), + lut_one_fibre=dict(extensions=None,), lut_two_fibres=dict(extensions=None,), ) outputs = SFLUTGen.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py index 9c89fef228..b6a032e66d 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py @@ -4,83 +4,39 @@ def test_SFPICOCalibData_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), info_file=dict( - argstr='-infooutputfile %s', + argstr="-infooutputfile %s", extensions=None, genfile=True, hash_files=False, mandatory=True, ), - onedtfarange=dict( - argstr='-onedtfarange %s', - units='NA', - ), - onedtfastep=dict( - argstr='-onedtfastep %f', - units='NA', - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - scheme_file=dict( - argstr='-schemefile %s', - extensions=None, - mandatory=True, - ), - seed=dict( - argstr='-seed %f', - units='NA', - ), - snr=dict( - argstr='-snr %f', - units='NA', - ), - trace=dict( - argstr='-trace %f', - units='NA', - ), - twodtanglerange=dict( - argstr='-twodtanglerange %s', - units='NA', - ), - twodtanglestep=dict( - argstr='-twodtanglestep %f', - units='NA', - ), - twodtfarange=dict( - argstr='-twodtfarange %s', - units='NA', - ), - twodtfastep=dict( - argstr='-twodtfastep %f', - units='NA', - ), - twodtmixmax=dict( - argstr='-twodtmixmax %f', - units='NA', - ), - twodtmixstep=dict( - argstr='-twodtmixstep %f', - units='NA', - ), + onedtfarange=dict(argstr="-onedtfarange %s", units="NA",), + onedtfastep=dict(argstr="-onedtfastep %f", units="NA",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True,), + seed=dict(argstr="-seed %f", units="NA",), + snr=dict(argstr="-snr %f", units="NA",), + trace=dict(argstr="-trace %f", units="NA",), + twodtanglerange=dict(argstr="-twodtanglerange %s", units="NA",), + twodtanglestep=dict(argstr="-twodtanglestep %f", units="NA",), + twodtfarange=dict(argstr="-twodtfarange %s", units="NA",), + twodtfastep=dict(argstr="-twodtfastep %f", units="NA",), + twodtmixmax=dict(argstr="-twodtmixmax %f", units="NA",), + twodtmixstep=dict(argstr="-twodtmixstep %f", units="NA",), ) inputs = SFPICOCalibData.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SFPICOCalibData_outputs(): output_map = dict( - PICOCalib=dict(extensions=None, ), - calib_info=dict(extensions=None, ), + PICOCalib=dict(extensions=None,), calib_info=dict(extensions=None,), ) outputs = SFPICOCalibData.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py index ef382ff133..8012e56d5d 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py @@ -4,75 +4,32 @@ def test_SFPeaks_inputs(): input_map = dict( - args=dict(argstr='%s', ), - density=dict( - argstr='-density %d', - units='NA', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-inputfile %s', - extensions=None, - mandatory=True, - ), - inputmodel=dict( - argstr='-inputmodel %s', - mandatory=True, - ), - mepointset=dict( - argstr='-mepointset %d', - units='NA', - ), - noconsistencycheck=dict(argstr='-noconsistencycheck', ), - numpds=dict( - argstr='-numpds %d', - units='NA', - ), - order=dict( - argstr='-order %d', - units='NA', - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - pdthresh=dict( - argstr='-pdthresh %f', - units='NA', - ), - pointset=dict( - argstr='-pointset %d', - units='NA', - ), - rbfpointset=dict( - argstr='-rbfpointset %d', - units='NA', - ), - scheme_file=dict( - argstr='%s', - extensions=None, - ), - searchradius=dict( - argstr='-searchradius %f', - units='NA', - ), - stdsfrommean=dict( - argstr='-stdsfrommean %f', - units='NA', - ), + args=dict(argstr="%s",), + density=dict(argstr="-density %d", units="NA",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True,), + inputmodel=dict(argstr="-inputmodel %s", mandatory=True,), + mepointset=dict(argstr="-mepointset %d", units="NA",), + noconsistencycheck=dict(argstr="-noconsistencycheck",), + numpds=dict(argstr="-numpds %d", units="NA",), + order=dict(argstr="-order %d", units="NA",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + pdthresh=dict(argstr="-pdthresh %f", units="NA",), + pointset=dict(argstr="-pointset %d", units="NA",), + rbfpointset=dict(argstr="-rbfpointset %d", units="NA",), + scheme_file=dict(argstr="%s", extensions=None,), + searchradius=dict(argstr="-searchradius %f", units="NA",), + stdsfrommean=dict(argstr="-stdsfrommean %f", units="NA",), ) inputs = SFPeaks.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SFPeaks_outputs(): - output_map = dict(peaks=dict(extensions=None, ), ) + output_map = dict(peaks=dict(extensions=None,),) outputs = SFPeaks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Shredder.py b/nipype/interfaces/camino/tests/test_auto_Shredder.py index 81a78f2feb..c7e82afbad 100644 --- a/nipype/interfaces/camino/tests/test_auto_Shredder.py +++ b/nipype/interfaces/camino/tests/test_auto_Shredder.py @@ -4,46 +4,23 @@ def test_Shredder_inputs(): input_map = dict( - args=dict(argstr='%s', ), - chunksize=dict( - argstr='%d', - position=2, - units='NA', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='< %s', - extensions=None, - mandatory=True, - position=-2, - ), - offset=dict( - argstr='%d', - position=1, - units='NA', - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - space=dict( - argstr='%d', - position=3, - units='NA', - ), + args=dict(argstr="%s",), + chunksize=dict(argstr="%d", position=2, units="NA",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=-2,), + offset=dict(argstr="%d", position=1, units="NA",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + space=dict(argstr="%d", position=3, units="NA",), ) inputs = Shredder.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Shredder_outputs(): - output_map = dict(shredded=dict(extensions=None, ), ) + output_map = dict(shredded=dict(extensions=None,),) outputs = Shredder.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Track.py b/nipype/interfaces/camino/tests/test_auto_Track.py index 2b08ecf619..99f42e95c7 100644 --- a/nipype/interfaces/camino/tests/test_auto_Track.py +++ b/nipype/interfaces/camino/tests/test_auto_Track.py @@ -4,83 +4,40 @@ def test_Track_inputs(): input_map = dict( - anisfile=dict( - argstr='-anisfile %s', - extensions=None, - ), - anisthresh=dict(argstr='-anisthresh %f', ), - args=dict(argstr='%s', ), - curveinterval=dict( - argstr='-curveinterval %f', - requires=['curvethresh'], - ), - curvethresh=dict(argstr='-curvethresh %f', ), - data_dims=dict( - argstr='-datadims %s', - units='voxels', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gzip=dict(argstr='-gzip', ), - in_file=dict( - argstr='-inputfile %s', - extensions=None, - position=1, - ), - inputdatatype=dict(argstr='-inputdatatype %s', ), - inputmodel=dict( - argstr='-inputmodel %s', - usedefault=True, - ), - interpolator=dict(argstr='-interpolator %s', ), - ipthresh=dict(argstr='-ipthresh %f', ), - maxcomponents=dict( - argstr='-maxcomponents %d', - units='NA', - ), - numpds=dict( - argstr='-numpds %d', - units='NA', - ), + anisfile=dict(argstr="-anisfile %s", extensions=None,), + anisthresh=dict(argstr="-anisthresh %f",), + args=dict(argstr="%s",), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), + curvethresh=dict(argstr="-curvethresh %f",), + data_dims=dict(argstr="-datadims %s", units="voxels",), + environ=dict(nohash=True, usedefault=True,), + gzip=dict(argstr="-gzip",), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), + inputdatatype=dict(argstr="-inputdatatype %s",), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), + interpolator=dict(argstr="-interpolator %s",), + ipthresh=dict(argstr="-ipthresh %f",), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), + numpds=dict(argstr="-numpds %d", units="NA",), out_file=dict( - argstr='-outputfile %s', - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr='-outputroot %s', - extensions=None, - position=-1, - ), - outputtracts=dict(argstr='-outputtracts %s', ), - seed_file=dict( - argstr='-seedfile %s', - extensions=None, - position=2, - ), - stepsize=dict( - argstr='-stepsize %f', - requires=['tracker'], - ), - tracker=dict( - argstr='-tracker %s', - usedefault=True, - ), - voxel_dims=dict( - argstr='-voxeldims %s', - units='mm', - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1, + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), + outputtracts=dict(argstr="-outputtracts %s",), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), + tracker=dict(argstr="-tracker %s", usedefault=True,), + voxel_dims=dict(argstr="-voxeldims %s", units="mm",), ) inputs = Track.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Track_outputs(): - output_map = dict(tracked=dict(extensions=None, ), ) + output_map = dict(tracked=dict(extensions=None,),) outputs = Track.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py index d5514b301b..ff13fbe241 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py @@ -4,83 +4,40 @@ def test_TrackBallStick_inputs(): input_map = dict( - anisfile=dict( - argstr='-anisfile %s', - extensions=None, - ), - anisthresh=dict(argstr='-anisthresh %f', ), - args=dict(argstr='%s', ), - curveinterval=dict( - argstr='-curveinterval %f', - requires=['curvethresh'], - ), - curvethresh=dict(argstr='-curvethresh %f', ), - data_dims=dict( - argstr='-datadims %s', - units='voxels', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gzip=dict(argstr='-gzip', ), - in_file=dict( - argstr='-inputfile %s', - extensions=None, - position=1, - ), - inputdatatype=dict(argstr='-inputdatatype %s', ), - inputmodel=dict( - argstr='-inputmodel %s', - usedefault=True, - ), - interpolator=dict(argstr='-interpolator %s', ), - ipthresh=dict(argstr='-ipthresh %f', ), - maxcomponents=dict( - argstr='-maxcomponents %d', - units='NA', - ), - numpds=dict( - argstr='-numpds %d', - units='NA', - ), + anisfile=dict(argstr="-anisfile %s", extensions=None,), + anisthresh=dict(argstr="-anisthresh %f",), + args=dict(argstr="%s",), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), + curvethresh=dict(argstr="-curvethresh %f",), + data_dims=dict(argstr="-datadims %s", units="voxels",), + environ=dict(nohash=True, usedefault=True,), + gzip=dict(argstr="-gzip",), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), + inputdatatype=dict(argstr="-inputdatatype %s",), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), + interpolator=dict(argstr="-interpolator %s",), + ipthresh=dict(argstr="-ipthresh %f",), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), + numpds=dict(argstr="-numpds %d", units="NA",), out_file=dict( - argstr='-outputfile %s', - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr='-outputroot %s', - extensions=None, - position=-1, - ), - outputtracts=dict(argstr='-outputtracts %s', ), - seed_file=dict( - argstr='-seedfile %s', - extensions=None, - position=2, - ), - stepsize=dict( - argstr='-stepsize %f', - requires=['tracker'], - ), - tracker=dict( - argstr='-tracker %s', - usedefault=True, - ), - voxel_dims=dict( - argstr='-voxeldims %s', - units='mm', - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1, + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), + outputtracts=dict(argstr="-outputtracts %s",), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), + tracker=dict(argstr="-tracker %s", usedefault=True,), + voxel_dims=dict(argstr="-voxeldims %s", units="mm",), ) inputs = TrackBallStick.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TrackBallStick_outputs(): - output_map = dict(tracked=dict(extensions=None, ), ) + output_map = dict(tracked=dict(extensions=None,),) outputs = TrackBallStick.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py index d2b4b27dd3..7f174486d5 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py @@ -4,102 +4,49 @@ def test_TrackBayesDirac_inputs(): input_map = dict( - anisfile=dict( - argstr='-anisfile %s', - extensions=None, - ), - anisthresh=dict(argstr='-anisthresh %f', ), - args=dict(argstr='%s', ), - curveinterval=dict( - argstr='-curveinterval %f', - requires=['curvethresh'], - ), - curvepriorg=dict(argstr='-curvepriorg %G', ), - curvepriork=dict(argstr='-curvepriork %G', ), - curvethresh=dict(argstr='-curvethresh %f', ), - data_dims=dict( - argstr='-datadims %s', - units='voxels', - ), - datamodel=dict(argstr='-datamodel %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - extpriordatatype=dict(argstr='-extpriordatatype %s', ), - extpriorfile=dict( - argstr='-extpriorfile %s', - extensions=None, - ), - gzip=dict(argstr='-gzip', ), - in_file=dict( - argstr='-inputfile %s', - extensions=None, - position=1, - ), - inputdatatype=dict(argstr='-inputdatatype %s', ), - inputmodel=dict( - argstr='-inputmodel %s', - usedefault=True, - ), - interpolator=dict(argstr='-interpolator %s', ), - ipthresh=dict(argstr='-ipthresh %f', ), - iterations=dict( - argstr='-iterations %d', - units='NA', - ), - maxcomponents=dict( - argstr='-maxcomponents %d', - units='NA', - ), - numpds=dict( - argstr='-numpds %d', - units='NA', - ), + anisfile=dict(argstr="-anisfile %s", extensions=None,), + anisthresh=dict(argstr="-anisthresh %f",), + args=dict(argstr="%s",), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), + curvepriorg=dict(argstr="-curvepriorg %G",), + curvepriork=dict(argstr="-curvepriork %G",), + curvethresh=dict(argstr="-curvethresh %f",), + data_dims=dict(argstr="-datadims %s", units="voxels",), + datamodel=dict(argstr="-datamodel %s",), + environ=dict(nohash=True, usedefault=True,), + extpriordatatype=dict(argstr="-extpriordatatype %s",), + extpriorfile=dict(argstr="-extpriorfile %s", extensions=None,), + gzip=dict(argstr="-gzip",), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), + inputdatatype=dict(argstr="-inputdatatype %s",), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), + interpolator=dict(argstr="-interpolator %s",), + ipthresh=dict(argstr="-ipthresh %f",), + iterations=dict(argstr="-iterations %d", units="NA",), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), + numpds=dict(argstr="-numpds %d", units="NA",), out_file=dict( - argstr='-outputfile %s', - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr='-outputroot %s', - extensions=None, - position=-1, - ), - outputtracts=dict(argstr='-outputtracts %s', ), - pdf=dict(argstr='-pdf %s', ), - pointset=dict(argstr='-pointset %s', ), - scheme_file=dict( - argstr='-schemefile %s', - extensions=None, - mandatory=True, - ), - seed_file=dict( - argstr='-seedfile %s', - extensions=None, - position=2, - ), - stepsize=dict( - argstr='-stepsize %f', - requires=['tracker'], - ), - tracker=dict( - argstr='-tracker %s', - usedefault=True, - ), - voxel_dims=dict( - argstr='-voxeldims %s', - units='mm', - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1, + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), + outputtracts=dict(argstr="-outputtracts %s",), + pdf=dict(argstr="-pdf %s",), + pointset=dict(argstr="-pointset %s",), + scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True,), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), + tracker=dict(argstr="-tracker %s", usedefault=True,), + voxel_dims=dict(argstr="-voxeldims %s", units="mm",), ) inputs = TrackBayesDirac.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TrackBayesDirac_outputs(): - output_map = dict(tracked=dict(extensions=None, ), ) + output_map = dict(tracked=dict(extensions=None,),) outputs = TrackBayesDirac.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py index f137fda109..91489a1d84 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py @@ -4,91 +4,42 @@ def test_TrackBedpostxDeter_inputs(): input_map = dict( - anisfile=dict( - argstr='-anisfile %s', - extensions=None, - ), - anisthresh=dict(argstr='-anisthresh %f', ), - args=dict(argstr='%s', ), - bedpostxdir=dict( - argstr='-bedpostxdir %s', - mandatory=True, - ), - curveinterval=dict( - argstr='-curveinterval %f', - requires=['curvethresh'], - ), - curvethresh=dict(argstr='-curvethresh %f', ), - data_dims=dict( - argstr='-datadims %s', - units='voxels', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gzip=dict(argstr='-gzip', ), - in_file=dict( - argstr='-inputfile %s', - extensions=None, - position=1, - ), - inputdatatype=dict(argstr='-inputdatatype %s', ), - inputmodel=dict( - argstr='-inputmodel %s', - usedefault=True, - ), - interpolator=dict(argstr='-interpolator %s', ), - ipthresh=dict(argstr='-ipthresh %f', ), - maxcomponents=dict( - argstr='-maxcomponents %d', - units='NA', - ), - min_vol_frac=dict( - argstr='-bedpostxminf %d', - units='NA', - ), - numpds=dict( - argstr='-numpds %d', - units='NA', - ), + anisfile=dict(argstr="-anisfile %s", extensions=None,), + anisthresh=dict(argstr="-anisthresh %f",), + args=dict(argstr="%s",), + bedpostxdir=dict(argstr="-bedpostxdir %s", mandatory=True,), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), + curvethresh=dict(argstr="-curvethresh %f",), + data_dims=dict(argstr="-datadims %s", units="voxels",), + environ=dict(nohash=True, usedefault=True,), + gzip=dict(argstr="-gzip",), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), + inputdatatype=dict(argstr="-inputdatatype %s",), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), + interpolator=dict(argstr="-interpolator %s",), + ipthresh=dict(argstr="-ipthresh %f",), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), + min_vol_frac=dict(argstr="-bedpostxminf %d", units="NA",), + numpds=dict(argstr="-numpds %d", units="NA",), out_file=dict( - argstr='-outputfile %s', - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr='-outputroot %s', - extensions=None, - position=-1, - ), - outputtracts=dict(argstr='-outputtracts %s', ), - seed_file=dict( - argstr='-seedfile %s', - extensions=None, - position=2, - ), - stepsize=dict( - argstr='-stepsize %f', - requires=['tracker'], - ), - tracker=dict( - argstr='-tracker %s', - usedefault=True, - ), - voxel_dims=dict( - argstr='-voxeldims %s', - units='mm', - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1, + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), + outputtracts=dict(argstr="-outputtracts %s",), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), + tracker=dict(argstr="-tracker %s", usedefault=True,), + voxel_dims=dict(argstr="-voxeldims %s", units="mm",), ) inputs = TrackBedpostxDeter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TrackBedpostxDeter_outputs(): - output_map = dict(tracked=dict(extensions=None, ), ) + output_map = dict(tracked=dict(extensions=None,),) outputs = TrackBedpostxDeter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py index f23fb143d4..92f02879da 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py @@ -4,95 +4,43 @@ def test_TrackBedpostxProba_inputs(): input_map = dict( - anisfile=dict( - argstr='-anisfile %s', - extensions=None, - ), - anisthresh=dict(argstr='-anisthresh %f', ), - args=dict(argstr='%s', ), - bedpostxdir=dict( - argstr='-bedpostxdir %s', - mandatory=True, - ), - curveinterval=dict( - argstr='-curveinterval %f', - requires=['curvethresh'], - ), - curvethresh=dict(argstr='-curvethresh %f', ), - data_dims=dict( - argstr='-datadims %s', - units='voxels', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gzip=dict(argstr='-gzip', ), - in_file=dict( - argstr='-inputfile %s', - extensions=None, - position=1, - ), - inputdatatype=dict(argstr='-inputdatatype %s', ), - inputmodel=dict( - argstr='-inputmodel %s', - usedefault=True, - ), - interpolator=dict(argstr='-interpolator %s', ), - ipthresh=dict(argstr='-ipthresh %f', ), - iterations=dict( - argstr='-iterations %d', - units='NA', - ), - maxcomponents=dict( - argstr='-maxcomponents %d', - units='NA', - ), - min_vol_frac=dict( - argstr='-bedpostxminf %d', - units='NA', - ), - numpds=dict( - argstr='-numpds %d', - units='NA', - ), + anisfile=dict(argstr="-anisfile %s", extensions=None,), + anisthresh=dict(argstr="-anisthresh %f",), + args=dict(argstr="%s",), + bedpostxdir=dict(argstr="-bedpostxdir %s", mandatory=True,), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), + curvethresh=dict(argstr="-curvethresh %f",), + data_dims=dict(argstr="-datadims %s", units="voxels",), + environ=dict(nohash=True, usedefault=True,), + gzip=dict(argstr="-gzip",), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), + inputdatatype=dict(argstr="-inputdatatype %s",), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), + interpolator=dict(argstr="-interpolator %s",), + ipthresh=dict(argstr="-ipthresh %f",), + iterations=dict(argstr="-iterations %d", units="NA",), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), + min_vol_frac=dict(argstr="-bedpostxminf %d", units="NA",), + numpds=dict(argstr="-numpds %d", units="NA",), out_file=dict( - argstr='-outputfile %s', - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr='-outputroot %s', - extensions=None, - position=-1, - ), - outputtracts=dict(argstr='-outputtracts %s', ), - seed_file=dict( - argstr='-seedfile %s', - extensions=None, - position=2, - ), - stepsize=dict( - argstr='-stepsize %f', - requires=['tracker'], - ), - tracker=dict( - argstr='-tracker %s', - usedefault=True, - ), - voxel_dims=dict( - argstr='-voxeldims %s', - units='mm', - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1, + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), + outputtracts=dict(argstr="-outputtracts %s",), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), + tracker=dict(argstr="-tracker %s", usedefault=True,), + voxel_dims=dict(argstr="-voxeldims %s", units="mm",), ) inputs = TrackBedpostxProba.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TrackBedpostxProba_outputs(): - output_map = dict(tracked=dict(extensions=None, ), ) + output_map = dict(tracked=dict(extensions=None,),) outputs = TrackBedpostxProba.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py index 589d7afe9c..8cd35bab8a 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py @@ -4,101 +4,45 @@ def test_TrackBootstrap_inputs(): input_map = dict( - anisfile=dict( - argstr='-anisfile %s', - extensions=None, - ), - anisthresh=dict(argstr='-anisthresh %f', ), - args=dict(argstr='%s', ), - bgmask=dict( - argstr='-bgmask %s', - extensions=None, - ), - bsdatafiles=dict( - argstr='-bsdatafile %s', - mandatory=True, - ), - curveinterval=dict( - argstr='-curveinterval %f', - requires=['curvethresh'], - ), - curvethresh=dict(argstr='-curvethresh %f', ), - data_dims=dict( - argstr='-datadims %s', - units='voxels', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gzip=dict(argstr='-gzip', ), - in_file=dict( - argstr='-inputfile %s', - extensions=None, - position=1, - ), - inputdatatype=dict(argstr='-inputdatatype %s', ), - inputmodel=dict( - argstr='-inputmodel %s', - usedefault=True, - ), - interpolator=dict(argstr='-interpolator %s', ), - inversion=dict(argstr='-inversion %s', ), - ipthresh=dict(argstr='-ipthresh %f', ), - iterations=dict( - argstr='-iterations %d', - units='NA', - ), - maxcomponents=dict( - argstr='-maxcomponents %d', - units='NA', - ), - numpds=dict( - argstr='-numpds %d', - units='NA', - ), + anisfile=dict(argstr="-anisfile %s", extensions=None,), + anisthresh=dict(argstr="-anisthresh %f",), + args=dict(argstr="%s",), + bgmask=dict(argstr="-bgmask %s", extensions=None,), + bsdatafiles=dict(argstr="-bsdatafile %s", mandatory=True,), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), + curvethresh=dict(argstr="-curvethresh %f",), + data_dims=dict(argstr="-datadims %s", units="voxels",), + environ=dict(nohash=True, usedefault=True,), + gzip=dict(argstr="-gzip",), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), + inputdatatype=dict(argstr="-inputdatatype %s",), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), + interpolator=dict(argstr="-interpolator %s",), + inversion=dict(argstr="-inversion %s",), + ipthresh=dict(argstr="-ipthresh %f",), + iterations=dict(argstr="-iterations %d", units="NA",), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), + numpds=dict(argstr="-numpds %d", units="NA",), out_file=dict( - argstr='-outputfile %s', - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr='-outputroot %s', - extensions=None, - position=-1, - ), - outputtracts=dict(argstr='-outputtracts %s', ), - scheme_file=dict( - argstr='-schemefile %s', - extensions=None, - mandatory=True, - ), - seed_file=dict( - argstr='-seedfile %s', - extensions=None, - position=2, - ), - stepsize=dict( - argstr='-stepsize %f', - requires=['tracker'], - ), - tracker=dict( - argstr='-tracker %s', - usedefault=True, - ), - voxel_dims=dict( - argstr='-voxeldims %s', - units='mm', - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1, + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), + outputtracts=dict(argstr="-outputtracts %s",), + scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True,), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), + tracker=dict(argstr="-tracker %s", usedefault=True,), + voxel_dims=dict(argstr="-voxeldims %s", units="mm",), ) inputs = TrackBootstrap.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TrackBootstrap_outputs(): - output_map = dict(tracked=dict(extensions=None, ), ) + output_map = dict(tracked=dict(extensions=None,),) outputs = TrackBootstrap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackDT.py b/nipype/interfaces/camino/tests/test_auto_TrackDT.py index 4b95e8f53f..736dc5fc5f 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackDT.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackDT.py @@ -4,83 +4,40 @@ def test_TrackDT_inputs(): input_map = dict( - anisfile=dict( - argstr='-anisfile %s', - extensions=None, - ), - anisthresh=dict(argstr='-anisthresh %f', ), - args=dict(argstr='%s', ), - curveinterval=dict( - argstr='-curveinterval %f', - requires=['curvethresh'], - ), - curvethresh=dict(argstr='-curvethresh %f', ), - data_dims=dict( - argstr='-datadims %s', - units='voxels', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gzip=dict(argstr='-gzip', ), - in_file=dict( - argstr='-inputfile %s', - extensions=None, - position=1, - ), - inputdatatype=dict(argstr='-inputdatatype %s', ), - inputmodel=dict( - argstr='-inputmodel %s', - usedefault=True, - ), - interpolator=dict(argstr='-interpolator %s', ), - ipthresh=dict(argstr='-ipthresh %f', ), - maxcomponents=dict( - argstr='-maxcomponents %d', - units='NA', - ), - numpds=dict( - argstr='-numpds %d', - units='NA', - ), + anisfile=dict(argstr="-anisfile %s", extensions=None,), + anisthresh=dict(argstr="-anisthresh %f",), + args=dict(argstr="%s",), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), + curvethresh=dict(argstr="-curvethresh %f",), + data_dims=dict(argstr="-datadims %s", units="voxels",), + environ=dict(nohash=True, usedefault=True,), + gzip=dict(argstr="-gzip",), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), + inputdatatype=dict(argstr="-inputdatatype %s",), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), + interpolator=dict(argstr="-interpolator %s",), + ipthresh=dict(argstr="-ipthresh %f",), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), + numpds=dict(argstr="-numpds %d", units="NA",), out_file=dict( - argstr='-outputfile %s', - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr='-outputroot %s', - extensions=None, - position=-1, - ), - outputtracts=dict(argstr='-outputtracts %s', ), - seed_file=dict( - argstr='-seedfile %s', - extensions=None, - position=2, - ), - stepsize=dict( - argstr='-stepsize %f', - requires=['tracker'], - ), - tracker=dict( - argstr='-tracker %s', - usedefault=True, - ), - voxel_dims=dict( - argstr='-voxeldims %s', - units='mm', - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1, + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), + outputtracts=dict(argstr="-outputtracts %s",), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), + tracker=dict(argstr="-tracker %s", usedefault=True,), + voxel_dims=dict(argstr="-voxeldims %s", units="mm",), ) inputs = TrackDT.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TrackDT_outputs(): - output_map = dict(tracked=dict(extensions=None, ), ) + output_map = dict(tracked=dict(extensions=None,),) outputs = TrackDT.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackPICo.py b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py index b3c69bc0b7..37d4a95179 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackPICo.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py @@ -4,88 +4,42 @@ def test_TrackPICo_inputs(): input_map = dict( - anisfile=dict( - argstr='-anisfile %s', - extensions=None, - ), - anisthresh=dict(argstr='-anisthresh %f', ), - args=dict(argstr='%s', ), - curveinterval=dict( - argstr='-curveinterval %f', - requires=['curvethresh'], - ), - curvethresh=dict(argstr='-curvethresh %f', ), - data_dims=dict( - argstr='-datadims %s', - units='voxels', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gzip=dict(argstr='-gzip', ), - in_file=dict( - argstr='-inputfile %s', - extensions=None, - position=1, - ), - inputdatatype=dict(argstr='-inputdatatype %s', ), - inputmodel=dict( - argstr='-inputmodel %s', - usedefault=True, - ), - interpolator=dict(argstr='-interpolator %s', ), - ipthresh=dict(argstr='-ipthresh %f', ), - iterations=dict( - argstr='-iterations %d', - units='NA', - ), - maxcomponents=dict( - argstr='-maxcomponents %d', - units='NA', - ), - numpds=dict( - argstr='-numpds %d', - units='NA', - ), + anisfile=dict(argstr="-anisfile %s", extensions=None,), + anisthresh=dict(argstr="-anisthresh %f",), + args=dict(argstr="%s",), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), + curvethresh=dict(argstr="-curvethresh %f",), + data_dims=dict(argstr="-datadims %s", units="voxels",), + environ=dict(nohash=True, usedefault=True,), + gzip=dict(argstr="-gzip",), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), + inputdatatype=dict(argstr="-inputdatatype %s",), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), + interpolator=dict(argstr="-interpolator %s",), + ipthresh=dict(argstr="-ipthresh %f",), + iterations=dict(argstr="-iterations %d", units="NA",), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), + numpds=dict(argstr="-numpds %d", units="NA",), out_file=dict( - argstr='-outputfile %s', - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr='-outputroot %s', - extensions=None, - position=-1, - ), - outputtracts=dict(argstr='-outputtracts %s', ), - pdf=dict(argstr='-pdf %s', ), - seed_file=dict( - argstr='-seedfile %s', - extensions=None, - position=2, - ), - stepsize=dict( - argstr='-stepsize %f', - requires=['tracker'], - ), - tracker=dict( - argstr='-tracker %s', - usedefault=True, - ), - voxel_dims=dict( - argstr='-voxeldims %s', - units='mm', - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1, + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), + outputtracts=dict(argstr="-outputtracts %s",), + pdf=dict(argstr="-pdf %s",), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), + tracker=dict(argstr="-tracker %s", usedefault=True,), + voxel_dims=dict(argstr="-voxeldims %s", units="mm",), ) inputs = TrackPICo.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TrackPICo_outputs(): - output_map = dict(tracked=dict(extensions=None, ), ) + output_map = dict(tracked=dict(extensions=None,),) outputs = TrackPICo.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TractShredder.py b/nipype/interfaces/camino/tests/test_auto_TractShredder.py index 150333d0eb..b8a95c9569 100644 --- a/nipype/interfaces/camino/tests/test_auto_TractShredder.py +++ b/nipype/interfaces/camino/tests/test_auto_TractShredder.py @@ -4,46 +4,23 @@ def test_TractShredder_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bunchsize=dict( - argstr='%d', - position=2, - units='NA', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='< %s', - extensions=None, - mandatory=True, - position=-2, - ), - offset=dict( - argstr='%d', - position=1, - units='NA', - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - space=dict( - argstr='%d', - position=3, - units='NA', - ), + args=dict(argstr="%s",), + bunchsize=dict(argstr="%d", position=2, units="NA",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=-2,), + offset=dict(argstr="%d", position=1, units="NA",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + space=dict(argstr="%d", position=3, units="NA",), ) inputs = TractShredder.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TractShredder_outputs(): - output_map = dict(shredded=dict(extensions=None, ), ) + output_map = dict(shredded=dict(extensions=None,),) outputs = TractShredder.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py index 251c82009d..cee10a70db 100644 --- a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py @@ -4,58 +4,28 @@ def test_VtkStreamlines_inputs(): input_map = dict( - args=dict(argstr='%s', ), - colourorient=dict(argstr='-colourorient', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr=' < %s', - extensions=None, - mandatory=True, - position=-2, - ), - inputmodel=dict( - argstr='-inputmodel %s', - usedefault=True, - ), - interpolate=dict(argstr='-interpolate', ), - interpolatescalars=dict(argstr='-interpolatescalars', ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), - scalar_file=dict( - argstr='-scalarfile %s', - extensions=None, - position=3, - ), - seed_file=dict( - argstr='-seedfile %s', - extensions=None, - position=1, - ), - target_file=dict( - argstr='-targetfile %s', - extensions=None, - position=2, - ), - voxeldims=dict( - argstr='-voxeldims %s', - position=4, - units='mm', - ), + args=dict(argstr="%s",), + colourorient=dict(argstr="-colourorient",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr=" < %s", extensions=None, mandatory=True, position=-2,), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), + interpolate=dict(argstr="-interpolate",), + interpolatescalars=dict(argstr="-interpolatescalars",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + scalar_file=dict(argstr="-scalarfile %s", extensions=None, position=3,), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=1,), + target_file=dict(argstr="-targetfile %s", extensions=None, position=2,), + voxeldims=dict(argstr="-voxeldims %s", position=4, units="mm",), ) inputs = VtkStreamlines.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_VtkStreamlines_outputs(): - output_map = dict(vtk=dict(extensions=None, ), ) + output_map = dict(vtk=dict(extensions=None,),) outputs = VtkStreamlines.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/utils.py b/nipype/interfaces/camino/utils.py index 6cfba21653..201e4e05d0 100644 --- a/nipype/interfaces/camino/utils.py +++ b/nipype/interfaces/camino/utils.py @@ -1,20 +1,29 @@ # -*- coding: utf-8 -*- import os -from ..base import (traits, TraitedSpec, File, CommandLine, - CommandLineInputSpec, InputMultiPath) +from ..base import ( + traits, + TraitedSpec, + File, + CommandLine, + CommandLineInputSpec, + InputMultiPath, +) from ...utils.filemanip import split_filename class ImageStatsInputSpec(CommandLineInputSpec): in_files = InputMultiPath( File(exists=True), - argstr='-images %s', + argstr="-images %s", mandatory=True, position=-1, - desc=('List of images to process. They must ' - 'be in the same space and have the same ' - 'dimensions.')) + desc=( + "List of images to process. They must " + "be in the same space and have the same " + "dimensions." + ), + ) stat = traits.Enum( "min", "max", @@ -23,10 +32,11 @@ class ImageStatsInputSpec(CommandLineInputSpec): "sum", "std", "var", - argstr='-stat %s', - units='NA', + argstr="-stat %s", + units="NA", mandatory=True, - desc="The statistic to compute.") + desc="The statistic to compute.", + ) out_type = traits.Enum( "float", @@ -35,21 +45,24 @@ class ImageStatsInputSpec(CommandLineInputSpec): "int", "long", "double", - argstr='-outputdatatype %s', + argstr="-outputdatatype %s", usedefault=True, - desc=('A Camino data type string, default is "float". ' - 'Type must be signed.')) + desc=('A Camino data type string, default is "float". ' "Type must be signed."), + ) output_root = File( - argstr='-outputroot %s', + argstr="-outputroot %s", mandatory=True, - desc=('Filename root prepended onto the names of the output ' - ' files. The extension will be determined from the input.')) + desc=( + "Filename root prepended onto the names of the output " + " files. The extension will be determined from the input." + ), + ) class ImageStatsOutputSpec(TraitedSpec): out_file = File( - exists=True, - desc='Path of the file computed with the statistic chosen') + exists=True, desc="Path of the file computed with the statistic chosen" + ) class ImageStats(CommandLine): @@ -67,13 +80,14 @@ class ImageStats(CommandLine): >>> imstats.inputs.stat = 'max' >>> imstats.run() # doctest: +SKIP """ - _cmd = 'imagestats' + + _cmd = "imagestats" input_spec = ImageStatsInputSpec output_spec = ImageStatsOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self._gen_outfilename()) + outputs["out_file"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): diff --git a/nipype/interfaces/camino2trackvis/convert.py b/nipype/interfaces/camino2trackvis/convert.py index 3f5664b975..f4e7e7dfd1 100644 --- a/nipype/interfaces/camino2trackvis/convert.py +++ b/nipype/interfaces/camino2trackvis/convert.py @@ -12,70 +12,74 @@ class Camino2TrackvisInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='-i %s', + argstr="-i %s", mandatory=True, position=1, - desc='The input .Bfloat (camino) file.') + desc="The input .Bfloat (camino) file.", + ) out_file = File( - argstr='-o %s', + argstr="-o %s", genfile=True, position=2, - desc='The filename to which to write the .trk (trackvis) file.') + desc="The filename to which to write the .trk (trackvis) file.", + ) min_length = traits.Float( - argstr='-l %d', + argstr="-l %d", position=3, - units='mm', - desc='The minimum length of tracts to output') + units="mm", + desc="The minimum length of tracts to output", + ) data_dims = traits.List( traits.Int, - argstr='-d %s', - sep=',', + argstr="-d %s", + sep=",", mandatory=True, position=4, minlen=3, maxlen=3, - desc= - 'Three comma-separated integers giving the number of voxels along each dimension of the source scans.' + desc="Three comma-separated integers giving the number of voxels along each dimension of the source scans.", ) voxel_dims = traits.List( traits.Float, - argstr='-x %s', - sep=',', + argstr="-x %s", + sep=",", mandatory=True, position=5, minlen=3, maxlen=3, - desc= - 'Three comma-separated numbers giving the size of each voxel in mm.') + desc="Three comma-separated numbers giving the size of each voxel in mm.", + ) # Change to enum with all combinations? i.e. LAS, LPI, RAS, etc.. voxel_order = File( - argstr='--voxel-order %s', + argstr="--voxel-order %s", mandatory=True, position=6, - desc='Set the order in which various directions were stored.\ + desc="Set the order in which various directions were stored.\ Specify with three letters consisting of one each \ from the pairs LR, AP, and SI. These stand for Left-Right, \ Anterior-Posterior, and Superior-Inferior. \ Whichever is specified in each position will \ be the direction of increasing order. \ - Read coordinate system from a NIfTI file.') + Read coordinate system from a NIfTI file.", + ) nifti_file = File( - argstr='--nifti %s', + argstr="--nifti %s", exists=True, position=7, - desc='Read coordinate system from a NIfTI file.') + desc="Read coordinate system from a NIfTI file.", + ) class Camino2TrackvisOutputSpec(TraitedSpec): trackvis = File( - exists=True, - desc='The filename to which to write the .trk (trackvis) file.') + exists=True, desc="The filename to which to write the .trk (trackvis) file." + ) class Camino2Trackvis(CommandLine): @@ -97,24 +101,24 @@ class Camino2Trackvis(CommandLine): >>> c2t.run() # doctest: +SKIP """ - _cmd = 'camino_to_trackvis' + _cmd = "camino_to_trackvis" input_spec = Camino2TrackvisInputSpec output_spec = Camino2TrackvisOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['trackvis'] = os.path.abspath(self._gen_outfilename()) + outputs["trackvis"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '.trk' + return name + ".trk" class Trackvis2CaminoInputSpec(CommandLineInputSpec): @@ -134,46 +138,49 @@ class Trackvis2CaminoInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='-i %s', + argstr="-i %s", mandatory=True, position=1, - desc='The input .trk (trackvis) file.') + desc="The input .trk (trackvis) file.", + ) out_file = File( - argstr='-o %s', + argstr="-o %s", genfile=True, position=2, - desc='The filename to which to write the .Bfloat (camino).') + desc="The filename to which to write the .Bfloat (camino).", + ) append_file = File( exists=True, - argstr='-a %s', + argstr="-a %s", position=2, - desc='A file to which the append the .Bfloat data. ') + desc="A file to which the append the .Bfloat data. ", + ) class Trackvis2CaminoOutputSpec(TraitedSpec): camino = File( - exists=True, - desc='The filename to which to write the .Bfloat (camino).') + exists=True, desc="The filename to which to write the .Bfloat (camino)." + ) class Trackvis2Camino(CommandLine): - _cmd = 'trackvis_to_camino' + _cmd = "trackvis_to_camino" input_spec = Trackvis2CaminoInputSpec output_spec = Trackvis2CaminoOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['camino'] = os.path.abspath(self._gen_outfilename()) + outputs["camino"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '.Bfloat' + return name + ".Bfloat" diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py index a78832f1a1..3cd618eb10 100644 --- a/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py @@ -4,50 +4,16 @@ def test_Camino2Trackvis_inputs(): input_map = dict( - args=dict(argstr='%s', ), - data_dims=dict( - argstr='-d %s', - mandatory=True, - position=4, - sep=',', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - position=1, - ), - min_length=dict( - argstr='-l %d', - position=3, - units='mm', - ), - nifti_file=dict( - argstr='--nifti %s', - extensions=None, - position=7, - ), - out_file=dict( - argstr='-o %s', - extensions=None, - genfile=True, - position=2, - ), - voxel_dims=dict( - argstr='-x %s', - mandatory=True, - position=5, - sep=',', - ), + args=dict(argstr="%s",), + data_dims=dict(argstr="-d %s", mandatory=True, position=4, sep=",",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1,), + min_length=dict(argstr="-l %d", position=3, units="mm",), + nifti_file=dict(argstr="--nifti %s", extensions=None, position=7,), + out_file=dict(argstr="-o %s", extensions=None, genfile=True, position=2,), + voxel_dims=dict(argstr="-x %s", mandatory=True, position=5, sep=",",), voxel_order=dict( - argstr='--voxel-order %s', - extensions=None, - mandatory=True, - position=6, + argstr="--voxel-order %s", extensions=None, mandatory=True, position=6, ), ) inputs = Camino2Trackvis.input_spec() @@ -55,8 +21,10 @@ def test_Camino2Trackvis_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Camino2Trackvis_outputs(): - output_map = dict(trackvis=dict(extensions=None, ), ) + output_map = dict(trackvis=dict(extensions=None,),) outputs = Camino2Trackvis.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py index 3d733966b4..b2869c08e3 100644 --- a/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py @@ -4,36 +4,21 @@ def test_Trackvis2Camino_inputs(): input_map = dict( - append_file=dict( - argstr='-a %s', - extensions=None, - position=2, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - position=1, - ), - out_file=dict( - argstr='-o %s', - extensions=None, - genfile=True, - position=2, - ), + append_file=dict(argstr="-a %s", extensions=None, position=2,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1,), + out_file=dict(argstr="-o %s", extensions=None, genfile=True, position=2,), ) inputs = Trackvis2Camino.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Trackvis2Camino_outputs(): - output_map = dict(camino=dict(extensions=None, ), ) + output_map = dict(camino=dict(extensions=None,),) outputs = Trackvis2Camino.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/base.py b/nipype/interfaces/cmtk/base.py index 4aedd56bdb..17d3070504 100644 --- a/nipype/interfaces/cmtk/base.py +++ b/nipype/interfaces/cmtk/base.py @@ -8,7 +8,7 @@ class CFFBaseInterface(LibraryBaseInterface): - _pkg = 'cfflib' + _pkg = "cfflib" # Originally set in convert, nbs, nx, parcellation @@ -16,18 +16,18 @@ class CFFBaseInterface(LibraryBaseInterface): # Remove in 2.0 have_cmp = True try: - package_check('cmp') + package_check("cmp") except ImportError: have_cmp = False have_cfflib = True try: - package_check('cfflib') + package_check("cfflib") except ImportError: have_cfflib = False have_cv = True try: - package_check('cviewer') + package_check("cviewer") except ImportError: have_cv = False diff --git a/nipype/interfaces/cmtk/cmtk.py b/nipype/interfaces/cmtk/cmtk.py index 8eb038e89c..c7b34aeae7 100644 --- a/nipype/interfaces/cmtk/cmtk.py +++ b/nipype/interfaces/cmtk/cmtk.py @@ -12,9 +12,18 @@ from ...utils.filemanip import split_filename from ...utils import NUMPY_MMAP -from ..base import (BaseInterface, BaseInterfaceInputSpec, traits, File, - TraitedSpec, Directory, OutputMultiPath, isdefined) -iflogger = logging.getLogger('nipype.interface') +from ..base import ( + BaseInterface, + BaseInterfaceInputSpec, + traits, + File, + TraitedSpec, + Directory, + OutputMultiPath, + isdefined, +) + +iflogger = logging.getLogger("nipype.interface") def length(xyz, along=False): @@ -56,7 +65,7 @@ def length(xyz, along=False): if along: return np.array([0]) return 0 - dists = np.sqrt((np.diff(xyz, axis=0)**2).sum(axis=1)) + dists = np.sqrt((np.diff(xyz, axis=0) ** 2).sum(axis=1)) if along: return np.cumsum(dists) return np.sum(dists) @@ -73,7 +82,8 @@ def get_rois_crossed(pointsmm, roiData, voxelSize): if not roiData[x, y, z] == 0: rois_crossed.append(roiData[x, y, z]) rois_crossed = list( - dict.fromkeys(rois_crossed).keys()) # Removed duplicates from the list + dict.fromkeys(rois_crossed).keys() + ) # Removed duplicates from the list return rois_crossed @@ -101,22 +111,25 @@ def create_allpoints_cmat(streamlines, roiData, voxelSize, n_rois): pcN = int(round(float(100 * i) / n_fib)) if pcN > pc and pcN % 1 == 0: pc = pcN - print('%4.0f%%' % (pc)) + print("%4.0f%%" % (pc)) rois_crossed = get_rois_crossed(fiber[0], roiData, voxelSize) if len(rois_crossed) > 0: list_of_roi_crossed_lists.append(list(rois_crossed)) final_fiber_ids.append(i) - connectivity_matrix = get_connectivity_matrix(n_rois, - list_of_roi_crossed_lists) + connectivity_matrix = get_connectivity_matrix(n_rois, list_of_roi_crossed_lists) dis = n_fib - len(final_fiber_ids) iflogger.info( - 'Found %i (%f percent out of %i fibers) fibers that start or ' - 'terminate in a voxel which is not labeled. (orphans)', dis, - dis * 100.0 / n_fib, n_fib) - iflogger.info('Valid fibers: %i (%f percent)', n_fib - dis, - 100 - dis * 100.0 / n_fib) - iflogger.info('Returning the intersecting point connectivity matrix') + "Found %i (%f percent out of %i fibers) fibers that start or " + "terminate in a voxel which is not labeled. (orphans)", + dis, + dis * 100.0 / n_fib, + n_fib, + ) + iflogger.info( + "Valid fibers: %i (%f percent)", n_fib - dis, 100 - dis * 100.0 / n_fib + ) + iflogger.info("Returning the intersecting point connectivity matrix") return connectivity_matrix, final_fiber_ids @@ -161,29 +174,31 @@ def create_endpoints_array(fib, voxelSize): endpoints[i, 1, 2] = int(endpoints[i, 1, 2] / float(voxelSize[2])) # Return the matrices - iflogger.info('Returning the endpoint matrix') + iflogger.info("Returning the endpoint matrix") return (endpoints, endpointsmm) -def cmat(track_file, - roi_file, - resolution_network_file, - matrix_name, - matrix_mat_name, - endpoint_name, - intersections=False): +def cmat( + track_file, + roi_file, + resolution_network_file, + matrix_name, + matrix_mat_name, + endpoint_name, + intersections=False, +): """ Create the connection matrix for each resolution using fibers and ROIs. """ import scipy.io as sio stats = {} - iflogger.info('Running cmat function') + iflogger.info("Running cmat function") # Identify the endpoints of each fiber - en_fname = op.abspath(endpoint_name + '_endpoints.npy') - en_fnamemm = op.abspath(endpoint_name + '_endpointsmm.npy') + en_fname = op.abspath(endpoint_name + "_endpoints.npy") + en_fnamemm = op.abspath(endpoint_name + "_endpointsmm.npy") - iflogger.info('Reading Trackvis file %s', track_file) + iflogger.info("Reading Trackvis file %s", track_file) fib, hdr = nb.trackvis.read(track_file, False) - stats['orig_n_fib'] = len(fib) + stats["orig_n_fib"] = len(fib) roi = nb.load(roi_file, mmap=NUMPY_MMAP) roiData = roi.get_data() @@ -191,13 +206,13 @@ def cmat(track_file, (endpoints, endpointsmm) = create_endpoints_array(fib, roiVoxelSize) # Output endpoint arrays - iflogger.info('Saving endpoint array: %s', en_fname) + iflogger.info("Saving endpoint array: %s", en_fname) np.save(en_fname, endpoints) - iflogger.info('Saving endpoint array in mm: %s', en_fnamemm) + iflogger.info("Saving endpoint array in mm: %s", en_fnamemm) np.save(en_fnamemm, endpointsmm) n = len(fib) - iflogger.info('Number of fibers: %i', n) + iflogger.info("Number of fibers: %i", n) # Create empty fiber label array fiberlabels = np.zeros((n, 2)) @@ -206,16 +221,16 @@ def cmat(track_file, # Add node information from specified parcellation scheme path, name, ext = split_filename(resolution_network_file) - if ext == '.pck': + if ext == ".pck": gp = nx.read_gpickle(resolution_network_file) - elif ext == '.graphml': + elif ext == ".graphml": gp = nx.read_graphml(resolution_network_file) else: raise TypeError("Unable to read file:", resolution_network_file) nROIs = len(gp.nodes()) # add node information from parcellation - if 'dn_position' in gp.nodes[list(gp.nodes())[0]]: + if "dn_position" in gp.nodes[list(gp.nodes())[0]]: G = gp.copy() else: G = nx.Graph() @@ -225,39 +240,48 @@ def cmat(track_file, # ROI in voxel coordinates (segmentation volume ) xyz = tuple( np.mean( - np.where( - np.flipud(roiData) == int(d["dn_correspondence_id"])), - axis=1)) - G.nodes[int(u)]['dn_position'] = tuple([xyz[0], xyz[2], -xyz[1]]) + np.where(np.flipud(roiData) == int(d["dn_correspondence_id"])), + axis=1, + ) + ) + G.nodes[int(u)]["dn_position"] = tuple([xyz[0], xyz[2], -xyz[1]]) if intersections: iflogger.info("Filtering tractography from intersections") intersection_matrix, final_fiber_ids = create_allpoints_cmat( - fib, roiData, roiVoxelSize, nROIs) + fib, roiData, roiVoxelSize, nROIs + ) finalfibers_fname = op.abspath( - endpoint_name + '_intersections_streamline_final.trk') - stats['intersections_n_fib'] = save_fibers(hdr, fib, finalfibers_fname, - final_fiber_ids) + endpoint_name + "_intersections_streamline_final.trk" + ) + stats["intersections_n_fib"] = save_fibers( + hdr, fib, finalfibers_fname, final_fiber_ids + ) intersection_matrix = np.matrix(intersection_matrix) I = G.copy() H = nx.from_numpy_matrix(np.matrix(intersection_matrix)) - H = nx.relabel_nodes( - H, lambda x: x + 1) # relabel nodes so they start at 1 + H = nx.relabel_nodes(H, lambda x: x + 1) # relabel nodes so they start at 1 I.add_weighted_edges_from( - ((u, v, d['weight']) for u, v, d in H.edges(data=True))) + ((u, v, d["weight"]) for u, v, d in H.edges(data=True)) + ) dis = 0 for i in range(endpoints.shape[0]): # ROI start => ROI end try: - startROI = int(roiData[endpoints[i, 0, 0], endpoints[i, 0, 1], - endpoints[i, 0, 2]]) - endROI = int(roiData[endpoints[i, 1, 0], endpoints[i, 1, 1], - endpoints[i, 1, 2]]) + startROI = int( + roiData[endpoints[i, 0, 0], endpoints[i, 0, 1], endpoints[i, 0, 2]] + ) + endROI = int( + roiData[endpoints[i, 1, 0], endpoints[i, 1, 1], endpoints[i, 1, 2]] + ) except IndexError: - iflogger.error('AN INDEXERROR EXCEPTION OCCURED FOR FIBER %s. ' - 'PLEASE CHECK ENDPOINT GENERATION', i) + iflogger.error( + "AN INDEXERROR EXCEPTION OCCURED FOR FIBER %s. " + "PLEASE CHECK ENDPOINT GENERATION", + i, + ) break # Filter @@ -270,8 +294,7 @@ def cmat(track_file, iflogger.error( "Start or endpoint of fiber terminate in a voxel which is labeled higher" ) - iflogger.error( - "than is expected by the parcellation node information.") + iflogger.error("than is expected by the parcellation node information.") iflogger.error("Start ROI: %i, End ROI: %i", startROI, endROI) iflogger.error("This needs bugfixing!") continue @@ -290,9 +313,8 @@ def cmat(track_file, final_fibers_idx.append(i) # Add edge to graph - if G.has_edge(startROI, - endROI) and 'fiblist' in G.edge[startROI][endROI]: - G.edge[startROI][endROI]['fiblist'].append(i) + if G.has_edge(startROI, endROI) and "fiblist" in G.edge[startROI][endROI]: + G.edge[startROI][endROI]["fiblist"].append(i) else: G.add_edge(startROI, endROI, fiblist=[i]) @@ -314,10 +336,13 @@ def cmat(track_file, final_fiberlabels_array = np.array(final_fiberlabels, dtype=int) iflogger.info( - 'Found %i (%f percent out of %i fibers) fibers that start or ' - 'terminate in a voxel which is not labeled. (orphans)', dis, - dis * 100.0 / n, n) - iflogger.info('Valid fibers: %i (%f%%)', n - dis, 100 - dis * 100.0 / n) + "Found %i (%f percent out of %i fibers) fibers that start or " + "terminate in a voxel which is not labeled. (orphans)", + dis, + dis * 100.0 / n, + n, + ) + iflogger.info("Valid fibers: %i (%f%%)", n - dis, 100 - dis * 100.0 / n) numfib = nx.Graph() numfib.add_nodes_from(G) @@ -327,109 +352,108 @@ def cmat(track_file, for u, v, d in G.edges(data=True): G.remove_edge(u, v) di = {} - if 'fiblist' in d: - di['number_of_fibers'] = len(d['fiblist']) - idx = np.where((final_fiberlabels_array[:, 0] == int(u)) & - (final_fiberlabels_array[:, 1] == int(v)))[0] - di['fiber_length_mean'] = float( - np.mean(final_fiberlength_array[idx])) - di['fiber_length_median'] = float( - np.median(final_fiberlength_array[idx])) - di['fiber_length_std'] = float( - np.std(final_fiberlength_array[idx])) + if "fiblist" in d: + di["number_of_fibers"] = len(d["fiblist"]) + idx = np.where( + (final_fiberlabels_array[:, 0] == int(u)) + & (final_fiberlabels_array[:, 1] == int(v)) + )[0] + di["fiber_length_mean"] = float(np.mean(final_fiberlength_array[idx])) + di["fiber_length_median"] = float(np.median(final_fiberlength_array[idx])) + di["fiber_length_std"] = float(np.std(final_fiberlength_array[idx])) else: - di['number_of_fibers'] = 0 - di['fiber_length_mean'] = 0 - di['fiber_length_median'] = 0 - di['fiber_length_std'] = 0 + di["number_of_fibers"] = 0 + di["fiber_length_mean"] = 0 + di["fiber_length_median"] = 0 + di["fiber_length_std"] = 0 if not u == v: # Fix for self loop problem G.add_edge(u, v, **di) - if 'fiblist' in d: - numfib.add_edge(u, v, weight=di['number_of_fibers']) - fibmean.add_edge(u, v, weight=di['fiber_length_mean']) - fibmedian.add_edge(u, v, weight=di['fiber_length_median']) - fibdev.add_edge(u, v, weight=di['fiber_length_std']) + if "fiblist" in d: + numfib.add_edge(u, v, weight=di["number_of_fibers"]) + fibmean.add_edge(u, v, weight=di["fiber_length_mean"]) + fibmedian.add_edge(u, v, weight=di["fiber_length_median"]) + fibdev.add_edge(u, v, weight=di["fiber_length_std"]) - iflogger.info('Writing network as %s', matrix_name) + iflogger.info("Writing network as %s", matrix_name) nx.write_gpickle(G, op.abspath(matrix_name)) numfib_mlab = nx.to_numpy_matrix(numfib, dtype=int) - numfib_dict = {'number_of_fibers': numfib_mlab} + numfib_dict = {"number_of_fibers": numfib_mlab} fibmean_mlab = nx.to_numpy_matrix(fibmean, dtype=np.float64) - fibmean_dict = {'mean_fiber_length': fibmean_mlab} + fibmean_dict = {"mean_fiber_length": fibmean_mlab} fibmedian_mlab = nx.to_numpy_matrix(fibmedian, dtype=np.float64) - fibmedian_dict = {'median_fiber_length': fibmedian_mlab} + fibmedian_dict = {"median_fiber_length": fibmedian_mlab} fibdev_mlab = nx.to_numpy_matrix(fibdev, dtype=np.float64) - fibdev_dict = {'fiber_length_std': fibdev_mlab} + fibdev_dict = {"fiber_length_std": fibdev_mlab} if intersections: path, name, ext = split_filename(matrix_name) - intersection_matrix_name = op.abspath(name + '_intersections') + ext - iflogger.info('Writing intersection network as %s', - intersection_matrix_name) + intersection_matrix_name = op.abspath(name + "_intersections") + ext + iflogger.info("Writing intersection network as %s", intersection_matrix_name) nx.write_gpickle(I, intersection_matrix_name) path, name, ext = split_filename(matrix_mat_name) - if not ext == '.mat': - ext = '.mat' + if not ext == ".mat": + ext = ".mat" matrix_mat_name = matrix_mat_name + ext - iflogger.info('Writing matlab matrix as %s', matrix_mat_name) + iflogger.info("Writing matlab matrix as %s", matrix_mat_name) sio.savemat(matrix_mat_name, numfib_dict) if intersections: - intersect_dict = {'intersections': intersection_matrix} - intersection_matrix_mat_name = op.abspath(name + '_intersections') + ext - iflogger.info('Writing intersection matrix as %s', - intersection_matrix_mat_name) + intersect_dict = {"intersections": intersection_matrix} + intersection_matrix_mat_name = op.abspath(name + "_intersections") + ext + iflogger.info("Writing intersection matrix as %s", intersection_matrix_mat_name) sio.savemat(intersection_matrix_mat_name, intersect_dict) - mean_fiber_length_matrix_name = op.abspath( - name + '_mean_fiber_length') + ext - iflogger.info('Writing matlab mean fiber length matrix as %s', - mean_fiber_length_matrix_name) + mean_fiber_length_matrix_name = op.abspath(name + "_mean_fiber_length") + ext + iflogger.info( + "Writing matlab mean fiber length matrix as %s", mean_fiber_length_matrix_name + ) sio.savemat(mean_fiber_length_matrix_name, fibmean_dict) - median_fiber_length_matrix_name = op.abspath( - name + '_median_fiber_length') + ext - iflogger.info('Writing matlab median fiber length matrix as %s', - median_fiber_length_matrix_name) + median_fiber_length_matrix_name = op.abspath(name + "_median_fiber_length") + ext + iflogger.info( + "Writing matlab median fiber length matrix as %s", + median_fiber_length_matrix_name, + ) sio.savemat(median_fiber_length_matrix_name, fibmedian_dict) - fiber_length_std_matrix_name = op.abspath(name + '_fiber_length_std') + ext - iflogger.info('Writing matlab fiber length deviation matrix as %s', - fiber_length_std_matrix_name) + fiber_length_std_matrix_name = op.abspath(name + "_fiber_length_std") + ext + iflogger.info( + "Writing matlab fiber length deviation matrix as %s", + fiber_length_std_matrix_name, + ) sio.savemat(fiber_length_std_matrix_name, fibdev_dict) - fiberlengths_fname = op.abspath(endpoint_name + '_final_fiberslength.npy') - iflogger.info('Storing final fiber length array as %s', fiberlengths_fname) + fiberlengths_fname = op.abspath(endpoint_name + "_final_fiberslength.npy") + iflogger.info("Storing final fiber length array as %s", fiberlengths_fname) np.save(fiberlengths_fname, final_fiberlength_array) - fiberlabels_fname = op.abspath(endpoint_name + '_filtered_fiberslabel.npy') - iflogger.info('Storing all fiber labels (with orphans) as %s', - fiberlabels_fname) + fiberlabels_fname = op.abspath(endpoint_name + "_filtered_fiberslabel.npy") + iflogger.info("Storing all fiber labels (with orphans) as %s", fiberlabels_fname) np.save( - fiberlabels_fname, - np.array(fiberlabels, dtype=np.int32), + fiberlabels_fname, np.array(fiberlabels, dtype=np.int32), ) - fiberlabels_noorphans_fname = op.abspath( - endpoint_name + '_final_fiberslabels.npy') - iflogger.info('Storing final fiber labels (no orphans) as %s', - fiberlabels_noorphans_fname) + fiberlabels_noorphans_fname = op.abspath(endpoint_name + "_final_fiberslabels.npy") + iflogger.info( + "Storing final fiber labels (no orphans) as %s", fiberlabels_noorphans_fname + ) np.save(fiberlabels_noorphans_fname, final_fiberlabels_array) iflogger.info("Filtering tractography - keeping only no orphan fibers") - finalfibers_fname = op.abspath(endpoint_name + '_streamline_final.trk') - stats['endpoint_n_fib'] = save_fibers(hdr, fib, finalfibers_fname, - final_fibers_idx) - stats['endpoints_percent'] = float(stats['endpoint_n_fib']) / float( - stats['orig_n_fib']) * 100 - stats['intersections_percent'] = float( - stats['intersections_n_fib']) / float(stats['orig_n_fib']) * 100 - - out_stats_file = op.abspath(endpoint_name + '_statistics.mat') - iflogger.info('Saving matrix creation statistics as %s', out_stats_file) + finalfibers_fname = op.abspath(endpoint_name + "_streamline_final.trk") + stats["endpoint_n_fib"] = save_fibers(hdr, fib, finalfibers_fname, final_fibers_idx) + stats["endpoints_percent"] = ( + float(stats["endpoint_n_fib"]) / float(stats["orig_n_fib"]) * 100 + ) + stats["intersections_percent"] = ( + float(stats["intersections_n_fib"]) / float(stats["orig_n_fib"]) * 100 + ) + + out_stats_file = op.abspath(endpoint_name + "_statistics.mat") + iflogger.info("Saving matrix creation statistics as %s", out_stats_file) sio.savemat(out_stats_file, stats) @@ -440,114 +464,119 @@ def save_fibers(oldhdr, oldfib, fname, indices): for i in indices: outstreams.append(oldfib[i]) n_fib_out = len(outstreams) - hdrnew['n_count'] = n_fib_out - iflogger.info('Writing final non-orphan fibers as %s', fname) + hdrnew["n_count"] = n_fib_out + iflogger.info("Writing final non-orphan fibers as %s", fname) nb.trackvis.write(fname, outstreams, hdrnew) return n_fib_out class CreateMatrixInputSpec(TraitedSpec): - roi_file = File( - exists=True, mandatory=True, desc='Freesurfer aparc+aseg file') - tract_file = File(exists=True, mandatory=True, desc='Trackvis tract file') + roi_file = File(exists=True, mandatory=True, desc="Freesurfer aparc+aseg file") + tract_file = File(exists=True, mandatory=True, desc="Trackvis tract file") resolution_network_file = File( exists=True, mandatory=True, - desc='Parcellation files from Connectome Mapping Toolkit') + desc="Parcellation files from Connectome Mapping Toolkit", + ) count_region_intersections = traits.Bool( False, usedefault=True, - desc= - 'Counts all of the fiber-region traversals in the connectivity matrix (requires significantly more computational time)' + desc="Counts all of the fiber-region traversals in the connectivity matrix (requires significantly more computational time)", ) out_matrix_file = File( - genfile=True, desc='NetworkX graph describing the connectivity') + genfile=True, desc="NetworkX graph describing the connectivity" + ) out_matrix_mat_file = File( - 'cmatrix.mat', - usedefault=True, - desc='Matlab matrix describing the connectivity') + "cmatrix.mat", usedefault=True, desc="Matlab matrix describing the connectivity" + ) out_mean_fiber_length_matrix_mat_file = File( genfile=True, - desc= - 'Matlab matrix describing the mean fiber lengths between each node.') + desc="Matlab matrix describing the mean fiber lengths between each node.", + ) out_median_fiber_length_matrix_mat_file = File( genfile=True, - desc= - 'Matlab matrix describing the mean fiber lengths between each node.') + desc="Matlab matrix describing the mean fiber lengths between each node.", + ) out_fiber_length_std_matrix_mat_file = File( genfile=True, - desc= - 'Matlab matrix describing the deviation in fiber lengths connecting each node.' + desc="Matlab matrix describing the deviation in fiber lengths connecting each node.", ) out_intersection_matrix_mat_file = File( genfile=True, - desc= - 'Matlab connectivity matrix if all region/fiber intersections are counted.' + desc="Matlab connectivity matrix if all region/fiber intersections are counted.", ) out_endpoint_array_name = File( - genfile=True, desc='Name for the generated endpoint arrays') + genfile=True, desc="Name for the generated endpoint arrays" + ) class CreateMatrixOutputSpec(TraitedSpec): - matrix_file = File( - desc='NetworkX graph describing the connectivity', exists=True) + matrix_file = File(desc="NetworkX graph describing the connectivity", exists=True) intersection_matrix_file = File( - desc='NetworkX graph describing the connectivity', exists=True) + desc="NetworkX graph describing the connectivity", exists=True + ) matrix_files = OutputMultiPath( File( - desc='All of the gpickled network files output by this interface', - exists=True)) + desc="All of the gpickled network files output by this interface", + exists=True, + ) + ) matlab_matrix_files = OutputMultiPath( - File( - desc='All of the MATLAB .mat files output by this interface', - exists=True)) + File(desc="All of the MATLAB .mat files output by this interface", exists=True) + ) matrix_mat_file = File( - desc='Matlab matrix describing the connectivity', exists=True) + desc="Matlab matrix describing the connectivity", exists=True + ) intersection_matrix_mat_file = File( - desc= - 'Matlab matrix describing the mean fiber lengths between each node.', - exists=True) + desc="Matlab matrix describing the mean fiber lengths between each node.", + exists=True, + ) mean_fiber_length_matrix_mat_file = File( - desc= - 'Matlab matrix describing the mean fiber lengths between each node.', - exists=True) + desc="Matlab matrix describing the mean fiber lengths between each node.", + exists=True, + ) median_fiber_length_matrix_mat_file = File( - desc= - 'Matlab matrix describing the median fiber lengths between each node.', - exists=True) + desc="Matlab matrix describing the median fiber lengths between each node.", + exists=True, + ) fiber_length_std_matrix_mat_file = File( - desc= - 'Matlab matrix describing the deviation in fiber lengths connecting each node.', - exists=True) + desc="Matlab matrix describing the deviation in fiber lengths connecting each node.", + exists=True, + ) endpoint_file = File( - desc='Saved Numpy array with the endpoints of each fiber', exists=True) + desc="Saved Numpy array with the endpoints of each fiber", exists=True + ) endpoint_file_mm = File( - desc= - 'Saved Numpy array with the endpoints of each fiber (in millimeters)', - exists=True) + desc="Saved Numpy array with the endpoints of each fiber (in millimeters)", + exists=True, + ) fiber_length_file = File( - desc='Saved Numpy array with the lengths of each fiber', exists=True) + desc="Saved Numpy array with the lengths of each fiber", exists=True + ) fiber_label_file = File( - desc='Saved Numpy array with the labels for each fiber', exists=True) + desc="Saved Numpy array with the labels for each fiber", exists=True + ) fiber_labels_noorphans = File( - desc='Saved Numpy array with the labels for each non-orphan fiber', - exists=True) + desc="Saved Numpy array with the labels for each non-orphan fiber", exists=True + ) filtered_tractography = File( - desc= - 'TrackVis file containing only those fibers originate in one and terminate in another region', - exists=True) + desc="TrackVis file containing only those fibers originate in one and terminate in another region", + exists=True, + ) filtered_tractography_by_intersections = File( - desc='TrackVis file containing all fibers which connect two regions', - exists=True) + desc="TrackVis file containing all fibers which connect two regions", + exists=True, + ) filtered_tractographies = OutputMultiPath( File( - desc= - 'TrackVis file containing only those fibers originate in one and terminate in another region', - exists=True)) + desc="TrackVis file containing only those fibers originate in one and terminate in another region", + exists=True, + ) + ) stats_file = File( - desc= - 'Saved Matlab .mat file with the number of fibers saved at each stage', - exists=True) + desc="Saved Matlab .mat file with the number of fibers saved at each stage", + exists=True, + ) class CreateMatrix(BaseInterface): @@ -570,36 +599,42 @@ class CreateMatrix(BaseInterface): def _run_interface(self, runtime): if isdefined(self.inputs.out_matrix_file): path, name, _ = split_filename(self.inputs.out_matrix_file) - matrix_file = op.abspath(name + '.pck') + matrix_file = op.abspath(name + ".pck") else: - matrix_file = self._gen_outfilename('.pck') + matrix_file = self._gen_outfilename(".pck") matrix_mat_file = op.abspath(self.inputs.out_matrix_mat_file) path, name, ext = split_filename(matrix_mat_file) - if not ext == '.mat': - ext = '.mat' + if not ext == ".mat": + ext = ".mat" matrix_mat_file = matrix_mat_file + ext if isdefined(self.inputs.out_mean_fiber_length_matrix_mat_file): mean_fiber_length_matrix_mat_file = op.abspath( - self.inputs.out_mean_fiber_length_matrix_mat_file) + self.inputs.out_mean_fiber_length_matrix_mat_file + ) else: mean_fiber_length_matrix_name = op.abspath( - self._gen_outfilename('_mean_fiber_length.mat')) + self._gen_outfilename("_mean_fiber_length.mat") + ) if isdefined(self.inputs.out_median_fiber_length_matrix_mat_file): median_fiber_length_matrix_mat_file = op.abspath( - self.inputs.out_median_fiber_length_matrix_mat_file) + self.inputs.out_median_fiber_length_matrix_mat_file + ) else: median_fiber_length_matrix_name = op.abspath( - self._gen_outfilename('_median_fiber_length.mat')) + self._gen_outfilename("_median_fiber_length.mat") + ) if isdefined(self.inputs.out_fiber_length_std_matrix_mat_file): fiber_length_std_matrix_mat_file = op.abspath( - self.inputs.out_fiber_length_std_matrix_mat_file) + self.inputs.out_fiber_length_std_matrix_mat_file + ) else: fiber_length_std_matrix_name = op.abspath( - self._gen_outfilename('_fiber_length_std.mat')) + self._gen_outfilename("_fiber_length_std.mat") + ) if not isdefined(self.inputs.out_endpoint_array_name): _, endpoint_name, _ = split_filename(self.inputs.tract_file) @@ -607,118 +642,135 @@ def _run_interface(self, runtime): else: endpoint_name = op.abspath(self.inputs.out_endpoint_array_name) - cmat(self.inputs.tract_file, self.inputs.roi_file, - self.inputs.resolution_network_file, matrix_file, matrix_mat_file, - endpoint_name, self.inputs.count_region_intersections) + cmat( + self.inputs.tract_file, + self.inputs.roi_file, + self.inputs.resolution_network_file, + matrix_file, + matrix_mat_file, + endpoint_name, + self.inputs.count_region_intersections, + ) return runtime def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.out_matrix_file): path, name, _ = split_filename(self.inputs.out_matrix_file) - out_matrix_file = op.abspath(name + '.pck') - out_intersection_matrix_file = op.abspath( - name + '_intersections.pck') + out_matrix_file = op.abspath(name + ".pck") + out_intersection_matrix_file = op.abspath(name + "_intersections.pck") else: - out_matrix_file = op.abspath(self._gen_outfilename('.pck')) + out_matrix_file = op.abspath(self._gen_outfilename(".pck")) out_intersection_matrix_file = op.abspath( - self._gen_outfilename('_intersections.pck')) + self._gen_outfilename("_intersections.pck") + ) - outputs['matrix_file'] = out_matrix_file - outputs['intersection_matrix_file'] = out_intersection_matrix_file + outputs["matrix_file"] = out_matrix_file + outputs["intersection_matrix_file"] = out_intersection_matrix_file matrix_mat_file = op.abspath(self.inputs.out_matrix_mat_file) path, name, ext = split_filename(matrix_mat_file) - if not ext == '.mat': - ext = '.mat' + if not ext == ".mat": + ext = ".mat" matrix_mat_file = matrix_mat_file + ext - outputs['matrix_mat_file'] = matrix_mat_file + outputs["matrix_mat_file"] = matrix_mat_file if isdefined(self.inputs.out_mean_fiber_length_matrix_mat_file): - outputs['mean_fiber_length_matrix_mat_file'] = op.abspath( - self.inputs.out_mean_fiber_length_matrix_mat_file) + outputs["mean_fiber_length_matrix_mat_file"] = op.abspath( + self.inputs.out_mean_fiber_length_matrix_mat_file + ) else: - outputs['mean_fiber_length_matrix_mat_file'] = op.abspath( - self._gen_outfilename('_mean_fiber_length.mat')) + outputs["mean_fiber_length_matrix_mat_file"] = op.abspath( + self._gen_outfilename("_mean_fiber_length.mat") + ) if isdefined(self.inputs.out_median_fiber_length_matrix_mat_file): - outputs['median_fiber_length_matrix_mat_file'] = op.abspath( - self.inputs.out_median_fiber_length_matrix_mat_file) + outputs["median_fiber_length_matrix_mat_file"] = op.abspath( + self.inputs.out_median_fiber_length_matrix_mat_file + ) else: - outputs['median_fiber_length_matrix_mat_file'] = op.abspath( - self._gen_outfilename('_median_fiber_length.mat')) + outputs["median_fiber_length_matrix_mat_file"] = op.abspath( + self._gen_outfilename("_median_fiber_length.mat") + ) if isdefined(self.inputs.out_fiber_length_std_matrix_mat_file): - outputs['fiber_length_std_matrix_mat_file'] = op.abspath( - self.inputs.out_fiber_length_std_matrix_mat_file) + outputs["fiber_length_std_matrix_mat_file"] = op.abspath( + self.inputs.out_fiber_length_std_matrix_mat_file + ) else: - outputs['fiber_length_std_matrix_mat_file'] = op.abspath( - self._gen_outfilename('_fiber_length_std.mat')) + outputs["fiber_length_std_matrix_mat_file"] = op.abspath( + self._gen_outfilename("_fiber_length_std.mat") + ) if isdefined(self.inputs.out_intersection_matrix_mat_file): - outputs['intersection_matrix_mat_file'] = op.abspath( - self.inputs.out_intersection_matrix_mat_file) + outputs["intersection_matrix_mat_file"] = op.abspath( + self.inputs.out_intersection_matrix_mat_file + ) else: - outputs['intersection_matrix_mat_file'] = op.abspath( - self._gen_outfilename('_intersections.mat')) + outputs["intersection_matrix_mat_file"] = op.abspath( + self._gen_outfilename("_intersections.mat") + ) if isdefined(self.inputs.out_endpoint_array_name): endpoint_name = self.inputs.out_endpoint_array_name - outputs['endpoint_file'] = op.abspath( - self.inputs.out_endpoint_array_name + '_endpoints.npy') - outputs['endpoint_file_mm'] = op.abspath( - self.inputs.out_endpoint_array_name + '_endpointsmm.npy') - outputs['fiber_length_file'] = op.abspath( - self.inputs.out_endpoint_array_name + '_final_fiberslength.npy' + outputs["endpoint_file"] = op.abspath( + self.inputs.out_endpoint_array_name + "_endpoints.npy" ) - outputs['fiber_label_file'] = op.abspath( - self.inputs.out_endpoint_array_name + - '_filtered_fiberslabel.npy') - outputs['fiber_labels_noorphans'] = op.abspath( - self.inputs.out_endpoint_array_name + '_final_fiberslabels.npy' + outputs["endpoint_file_mm"] = op.abspath( + self.inputs.out_endpoint_array_name + "_endpointsmm.npy" + ) + outputs["fiber_length_file"] = op.abspath( + self.inputs.out_endpoint_array_name + "_final_fiberslength.npy" + ) + outputs["fiber_label_file"] = op.abspath( + self.inputs.out_endpoint_array_name + "_filtered_fiberslabel.npy" + ) + outputs["fiber_labels_noorphans"] = op.abspath( + self.inputs.out_endpoint_array_name + "_final_fiberslabels.npy" ) else: _, endpoint_name, _ = split_filename(self.inputs.tract_file) - outputs['endpoint_file'] = op.abspath( - endpoint_name + '_endpoints.npy') - outputs['endpoint_file_mm'] = op.abspath( - endpoint_name + '_endpointsmm.npy') - outputs['fiber_length_file'] = op.abspath( - endpoint_name + '_final_fiberslength.npy') - outputs['fiber_label_file'] = op.abspath( - endpoint_name + '_filtered_fiberslabel.npy') - outputs['fiber_labels_noorphans'] = op.abspath( - endpoint_name + '_final_fiberslabels.npy') + outputs["endpoint_file"] = op.abspath(endpoint_name + "_endpoints.npy") + outputs["endpoint_file_mm"] = op.abspath(endpoint_name + "_endpointsmm.npy") + outputs["fiber_length_file"] = op.abspath( + endpoint_name + "_final_fiberslength.npy" + ) + outputs["fiber_label_file"] = op.abspath( + endpoint_name + "_filtered_fiberslabel.npy" + ) + outputs["fiber_labels_noorphans"] = op.abspath( + endpoint_name + "_final_fiberslabels.npy" + ) if self.inputs.count_region_intersections: - outputs['matrix_files'] = [ - out_matrix_file, out_intersection_matrix_file - ] - outputs['matlab_matrix_files'] = [ - outputs['matrix_mat_file'], - outputs['mean_fiber_length_matrix_mat_file'], - outputs['median_fiber_length_matrix_mat_file'], - outputs['fiber_length_std_matrix_mat_file'], - outputs['intersection_matrix_mat_file'] + outputs["matrix_files"] = [out_matrix_file, out_intersection_matrix_file] + outputs["matlab_matrix_files"] = [ + outputs["matrix_mat_file"], + outputs["mean_fiber_length_matrix_mat_file"], + outputs["median_fiber_length_matrix_mat_file"], + outputs["fiber_length_std_matrix_mat_file"], + outputs["intersection_matrix_mat_file"], ] else: - outputs['matrix_files'] = [out_matrix_file] - outputs['matlab_matrix_files'] = [ - outputs['matrix_mat_file'], - outputs['mean_fiber_length_matrix_mat_file'], - outputs['median_fiber_length_matrix_mat_file'], - outputs['fiber_length_std_matrix_mat_file'] + outputs["matrix_files"] = [out_matrix_file] + outputs["matlab_matrix_files"] = [ + outputs["matrix_mat_file"], + outputs["mean_fiber_length_matrix_mat_file"], + outputs["median_fiber_length_matrix_mat_file"], + outputs["fiber_length_std_matrix_mat_file"], ] - outputs['filtered_tractography'] = op.abspath( - endpoint_name + '_streamline_final.trk') - outputs['filtered_tractography_by_intersections'] = op.abspath( - endpoint_name + '_intersections_streamline_final.trk') - outputs['filtered_tractographies'] = [ - outputs['filtered_tractography'], - outputs['filtered_tractography_by_intersections'] + outputs["filtered_tractography"] = op.abspath( + endpoint_name + "_streamline_final.trk" + ) + outputs["filtered_tractography_by_intersections"] = op.abspath( + endpoint_name + "_intersections_streamline_final.trk" + ) + outputs["filtered_tractographies"] = [ + outputs["filtered_tractography"], + outputs["filtered_tractography_by_intersections"], ] - outputs['stats_file'] = op.abspath(endpoint_name + '_statistics.mat') + outputs["stats_file"] = op.abspath(endpoint_name + "_statistics.mat") return outputs def _gen_outfilename(self, ext): @@ -733,27 +785,29 @@ def _gen_outfilename(self, ext): class ROIGenInputSpec(BaseInterfaceInputSpec): aparc_aseg_file = File( - exists=True, mandatory=True, desc='Freesurfer aparc+aseg file') + exists=True, mandatory=True, desc="Freesurfer aparc+aseg file" + ) LUT_file = File( exists=True, - xor=['use_freesurfer_LUT'], - desc='Custom lookup table (cf. FreeSurferColorLUT.txt)') + xor=["use_freesurfer_LUT"], + desc="Custom lookup table (cf. FreeSurferColorLUT.txt)", + ) use_freesurfer_LUT = traits.Bool( - xor=['LUT_file'], - desc= - 'Boolean value; Set to True to use default Freesurfer LUT, False for custom LUT' + xor=["LUT_file"], + desc="Boolean value; Set to True to use default Freesurfer LUT, False for custom LUT", ) freesurfer_dir = Directory( - requires=['use_freesurfer_LUT'], desc='Freesurfer main directory') + requires=["use_freesurfer_LUT"], desc="Freesurfer main directory" + ) out_roi_file = File( - genfile=True, desc='Region of Interest file for connectivity mapping') - out_dict_file = File( - genfile=True, desc='Label dictionary saved in Pickle format') + genfile=True, desc="Region of Interest file for connectivity mapping" + ) + out_dict_file = File(genfile=True, desc="Label dictionary saved in Pickle format") class ROIGenOutputSpec(TraitedSpec): - roi_file = File(desc='Region of Interest file for connectivity mapping') - dict_file = File(desc='Label dictionary saved in Pickle format') + roi_file = File(desc="Region of Interest file for connectivity mapping") + dict_file = File(desc="Label dictionary saved in Pickle format") class ROIGen(BaseInterface): @@ -784,104 +838,165 @@ class ROIGen(BaseInterface): def _run_interface(self, runtime): aparc_aseg_file = self.inputs.aparc_aseg_file aparcpath, aparcname, aparcext = split_filename(aparc_aseg_file) - iflogger.info('Using Aparc+Aseg file: %s', aparcname + aparcext) + iflogger.info("Using Aparc+Aseg file: %s", aparcname + aparcext) niiAPARCimg = nb.load(aparc_aseg_file, mmap=NUMPY_MMAP) niiAPARCdata = niiAPARCimg.get_data() niiDataLabels = np.unique(niiAPARCdata) numDataLabels = np.size(niiDataLabels) - iflogger.info('Number of labels in image: %s', numDataLabels) + iflogger.info("Number of labels in image: %s", numDataLabels) write_dict = True if self.inputs.use_freesurfer_LUT: - self.LUT_file = self.inputs.freesurfer_dir + '/FreeSurferColorLUT.txt' - iflogger.info('Using Freesurfer LUT: %s', self.LUT_file) - prefix = 'fsLUT' - elif not self.inputs.use_freesurfer_LUT and isdefined( - self.inputs.LUT_file): + self.LUT_file = self.inputs.freesurfer_dir + "/FreeSurferColorLUT.txt" + iflogger.info("Using Freesurfer LUT: %s", self.LUT_file) + prefix = "fsLUT" + elif not self.inputs.use_freesurfer_LUT and isdefined(self.inputs.LUT_file): self.LUT_file = op.abspath(self.inputs.LUT_file) lutpath, lutname, lutext = split_filename(self.LUT_file) - iflogger.info('Using Custom LUT file: %s', lutname + lutext) + iflogger.info("Using Custom LUT file: %s", lutname + lutext) prefix = lutname else: - prefix = 'hardcoded' + prefix = "hardcoded" write_dict = False if isdefined(self.inputs.out_roi_file): roi_file = op.abspath(self.inputs.out_roi_file) else: - roi_file = op.abspath(prefix + '_' + aparcname + '.nii') + roi_file = op.abspath(prefix + "_" + aparcname + ".nii") if isdefined(self.inputs.out_dict_file): dict_file = op.abspath(self.inputs.out_dict_file) else: - dict_file = op.abspath(prefix + '_' + aparcname + '.pck') + dict_file = op.abspath(prefix + "_" + aparcname + ".pck") if write_dict: - iflogger.info('Lookup table: %s', op.abspath(self.LUT_file)) + iflogger.info("Lookup table: %s", op.abspath(self.LUT_file)) LUTlabelsRGBA = np.loadtxt( self.LUT_file, skiprows=4, usecols=[0, 1, 2, 3, 4, 5], - comments='#', + comments="#", dtype={ - 'names': ('index', 'label', 'R', 'G', 'B', 'A'), - 'formats': ('int', '|S30', 'int', 'int', 'int', 'int') - }) + "names": ("index", "label", "R", "G", "B", "A"), + "formats": ("int", "|S30", "int", "int", "int", "int"), + }, + ) numLUTLabels = np.size(LUTlabelsRGBA) if numLUTLabels < numDataLabels: iflogger.error( - 'LUT file provided does not contain all of the regions in the image' + "LUT file provided does not contain all of the regions in the image" ) - iflogger.error('Removing unmapped regions') - iflogger.info('Number of labels in LUT: %s', numLUTLabels) + iflogger.error("Removing unmapped regions") + iflogger.info("Number of labels in LUT: %s", numLUTLabels) LUTlabelDict = {} """ Create dictionary for input LUT table""" for labels in range(0, numLUTLabels): LUTlabelDict[LUTlabelsRGBA[labels][0]] = [ - LUTlabelsRGBA[labels][1], LUTlabelsRGBA[labels][2], - LUTlabelsRGBA[labels][3], LUTlabelsRGBA[labels][4], - LUTlabelsRGBA[labels][5] + LUTlabelsRGBA[labels][1], + LUTlabelsRGBA[labels][2], + LUTlabelsRGBA[labels][3], + LUTlabelsRGBA[labels][4], + LUTlabelsRGBA[labels][5], ] - iflogger.info('Printing LUT label dictionary') + iflogger.info("Printing LUT label dictionary") iflogger.info(LUTlabelDict) mapDict = {} - MAPPING = [[1, 2012], [2, 2019], [3, 2032], [4, 2014], [5, 2020], [ - 6, 2018 - ], [7, 2027], [8, 2028], [9, 2003], [10, 2024], [11, 2017], [12, 2026], - [13, 2002], [14, 2023], [15, 2010], [16, 2022], [17, 2031], - [18, 2029], [19, 2008], [20, 2025], [21, 2005], [22, 2021], - [23, 2011], [24, 2013], [25, 2007], [26, 2016], [27, 2006], - [28, 2033], [29, 2009], [30, 2015], [31, 2001], [32, 2030], - [33, 2034], [34, 2035], [35, 49], [36, 50], [37, 51], [ - 38, 52 - ], [39, 58], [40, 53], [41, 54], [42, 1012], [43, 1019], [ - 44, 1032 - ], [45, 1014], [46, 1020], [47, 1018], [48, 1027], [ - 49, 1028 - ], [50, 1003], [51, 1024], [52, 1017], [53, 1026], [ - 54, 1002 - ], [55, 1023], [56, 1010], [57, 1022], [58, 1031], [ - 59, 1029 - ], [60, 1008], [61, 1025], [62, 1005], [63, 1021], [ - 64, 1011 - ], [65, 1013], [66, 1007], [67, 1016], [68, 1006], [ - 69, 1033 - ], [70, 1009], [71, 1015], [72, 1001], [73, 1030], [ - 74, 1034 - ], [75, 1035], [76, 10], [77, 11], [78, 12], [79, 13], [ - 80, 26 - ], [81, 17], [82, 18], [83, 16]] + MAPPING = [ + [1, 2012], + [2, 2019], + [3, 2032], + [4, 2014], + [5, 2020], + [6, 2018], + [7, 2027], + [8, 2028], + [9, 2003], + [10, 2024], + [11, 2017], + [12, 2026], + [13, 2002], + [14, 2023], + [15, 2010], + [16, 2022], + [17, 2031], + [18, 2029], + [19, 2008], + [20, 2025], + [21, 2005], + [22, 2021], + [23, 2011], + [24, 2013], + [25, 2007], + [26, 2016], + [27, 2006], + [28, 2033], + [29, 2009], + [30, 2015], + [31, 2001], + [32, 2030], + [33, 2034], + [34, 2035], + [35, 49], + [36, 50], + [37, 51], + [38, 52], + [39, 58], + [40, 53], + [41, 54], + [42, 1012], + [43, 1019], + [44, 1032], + [45, 1014], + [46, 1020], + [47, 1018], + [48, 1027], + [49, 1028], + [50, 1003], + [51, 1024], + [52, 1017], + [53, 1026], + [54, 1002], + [55, 1023], + [56, 1010], + [57, 1022], + [58, 1031], + [59, 1029], + [60, 1008], + [61, 1025], + [62, 1005], + [63, 1021], + [64, 1011], + [65, 1013], + [66, 1007], + [67, 1016], + [68, 1006], + [69, 1033], + [70, 1009], + [71, 1015], + [72, 1001], + [73, 1030], + [74, 1034], + [75, 1035], + [76, 10], + [77, 11], + [78, 12], + [79, 13], + [80, 26], + [81, 17], + [82, 18], + [83, 16], + ] """ Create empty grey matter mask, Populate with only those regions defined in the mapping.""" niiGM = np.zeros(niiAPARCdata.shape, dtype=np.uint) for ma in MAPPING: niiGM[niiAPARCdata == ma[1]] = ma[0] mapDict[ma[0]] = ma[1] - iflogger.info('Grey matter mask created') + iflogger.info("Grey matter mask created") greyMaskLabels = np.unique(niiGM) numGMLabels = np.size(greyMaskLabels) - iflogger.info('Number of grey matter labels: %s', numGMLabels) + iflogger.info("Number of grey matter labels: %s", numGMLabels) labelDict = {} GMlabelDict = {} @@ -889,55 +1004,53 @@ def _run_interface(self, runtime): try: mapDict[label] if write_dict: - GMlabelDict['originalID'] = mapDict[label] + GMlabelDict["originalID"] = mapDict[label] except: - iflogger.info('Label %s not in provided mapping', label) + iflogger.info("Label %s not in provided mapping", label) if write_dict: del GMlabelDict GMlabelDict = {} - GMlabelDict['labels'] = LUTlabelDict[label][0] - GMlabelDict['colors'] = [ - LUTlabelDict[label][1], LUTlabelDict[label][2], - LUTlabelDict[label][3] + GMlabelDict["labels"] = LUTlabelDict[label][0] + GMlabelDict["colors"] = [ + LUTlabelDict[label][1], + LUTlabelDict[label][2], + LUTlabelDict[label][3], ] - GMlabelDict['a'] = LUTlabelDict[label][4] + GMlabelDict["a"] = LUTlabelDict[label][4] labelDict[label] = GMlabelDict - roi_image = nb.Nifti1Image(niiGM, niiAPARCimg.affine, - niiAPARCimg.header) - iflogger.info('Saving ROI File to %s', roi_file) + roi_image = nb.Nifti1Image(niiGM, niiAPARCimg.affine, niiAPARCimg.header) + iflogger.info("Saving ROI File to %s", roi_file) nb.save(roi_image, roi_file) if write_dict: - iflogger.info('Saving Dictionary File to %s in Pickle format', - dict_file) - with open(dict_file, 'w') as f: + iflogger.info("Saving Dictionary File to %s in Pickle format", dict_file) + with open(dict_file, "w") as f: pickle.dump(labelDict, f) return runtime def _list_outputs(self): outputs = self._outputs().get() if isdefined(self.inputs.out_roi_file): - outputs['roi_file'] = op.abspath(self.inputs.out_roi_file) + outputs["roi_file"] = op.abspath(self.inputs.out_roi_file) else: - outputs['roi_file'] = op.abspath(self._gen_outfilename('nii')) + outputs["roi_file"] = op.abspath(self._gen_outfilename("nii")) if isdefined(self.inputs.out_dict_file): - outputs['dict_file'] = op.abspath(self.inputs.out_dict_file) + outputs["dict_file"] = op.abspath(self.inputs.out_dict_file) else: - outputs['dict_file'] = op.abspath(self._gen_outfilename('pck')) + outputs["dict_file"] = op.abspath(self._gen_outfilename("pck")) return outputs def _gen_outfilename(self, ext): _, name, _ = split_filename(self.inputs.aparc_aseg_file) if self.inputs.use_freesurfer_LUT: - prefix = 'fsLUT' - elif not self.inputs.use_freesurfer_LUT and isdefined( - self.inputs.LUT_file): + prefix = "fsLUT" + elif not self.inputs.use_freesurfer_LUT and isdefined(self.inputs.LUT_file): lutpath, lutname, lutext = split_filename(self.inputs.LUT_file) prefix = lutname else: - prefix = 'hardcoded' - return prefix + '_' + name + '.' + ext + prefix = "hardcoded" + return prefix + "_" + name + "." + ext def create_nodes(roi_file, resolution_network_file, out_filename): @@ -949,28 +1062,30 @@ def create_nodes(roi_file, resolution_network_file, out_filename): G.add_node(int(u), **d) xyz = tuple( np.mean( - np.where(np.flipud(roiData) == int(d["dn_correspondence_id"])), - axis=1)) - G.nodes[int(u)]['dn_position'] = tuple([xyz[0], xyz[2], -xyz[1]]) + np.where(np.flipud(roiData) == int(d["dn_correspondence_id"])), axis=1 + ) + ) + G.nodes[int(u)]["dn_position"] = tuple([xyz[0], xyz[2], -xyz[1]]) nx.write_gpickle(G, out_filename) return out_filename class CreateNodesInputSpec(BaseInterfaceInputSpec): - roi_file = File( - exists=True, mandatory=True, desc='Region of interest file') + roi_file = File(exists=True, mandatory=True, desc="Region of interest file") resolution_network_file = File( exists=True, mandatory=True, - desc='Parcellation file from Connectome Mapping Toolkit') + desc="Parcellation file from Connectome Mapping Toolkit", + ) out_filename = File( - 'nodenetwork.pck', + "nodenetwork.pck", usedefault=True, - desc='Output gpickled network with the nodes defined.') + desc="Output gpickled network with the nodes defined.", + ) class CreateNodesOutputSpec(TraitedSpec): - node_network = File(desc='Output gpickled network with the nodes defined.') + node_network = File(desc="Output gpickled network with the nodes defined.") class CreateNodes(BaseInterface): @@ -991,14 +1106,16 @@ class CreateNodes(BaseInterface): output_spec = CreateNodesOutputSpec def _run_interface(self, runtime): - iflogger.info('Creating nodes...') - create_nodes(self.inputs.roi_file, self.inputs.resolution_network_file, - self.inputs.out_filename) - iflogger.info('Saving node network to %s', - op.abspath(self.inputs.out_filename)) + iflogger.info("Creating nodes...") + create_nodes( + self.inputs.roi_file, + self.inputs.resolution_network_file, + self.inputs.out_filename, + ) + iflogger.info("Saving node network to %s", op.abspath(self.inputs.out_filename)) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['node_network'] = op.abspath(self.inputs.out_filename) + outputs["node_network"] = op.abspath(self.inputs.out_filename) return outputs diff --git a/nipype/interfaces/cmtk/convert.py b/nipype/interfaces/cmtk/convert.py index 170ca44366..a0f956b6f9 100644 --- a/nipype/interfaces/cmtk/convert.py +++ b/nipype/interfaces/cmtk/convert.py @@ -7,54 +7,58 @@ import networkx as nx from ...utils.filemanip import split_filename -from ..base import (BaseInterfaceInputSpec, traits, File, - TraitedSpec, InputMultiPath, isdefined) +from ..base import ( + BaseInterfaceInputSpec, + traits, + File, + TraitedSpec, + InputMultiPath, + isdefined, +) from .base import CFFBaseInterface, have_cfflib class CFFConverterInputSpec(BaseInterfaceInputSpec): graphml_networks = InputMultiPath( - File(exists=True), desc='list of graphML networks') + File(exists=True), desc="list of graphML networks" + ) gpickled_networks = InputMultiPath( - File(exists=True), desc='list of gpickled Networkx graphs') + File(exists=True), desc="list of gpickled Networkx graphs" + ) - gifti_surfaces = InputMultiPath( - File(exists=True), desc='list of GIFTI surfaces') - gifti_labels = InputMultiPath( - File(exists=True), desc='list of GIFTI labels') - nifti_volumes = InputMultiPath( - File(exists=True), desc='list of NIFTI volumes') - tract_files = InputMultiPath( - File(exists=True), desc='list of Trackvis fiber files') + gifti_surfaces = InputMultiPath(File(exists=True), desc="list of GIFTI surfaces") + gifti_labels = InputMultiPath(File(exists=True), desc="list of GIFTI labels") + nifti_volumes = InputMultiPath(File(exists=True), desc="list of NIFTI volumes") + tract_files = InputMultiPath(File(exists=True), desc="list of Trackvis fiber files") timeseries_files = InputMultiPath( - File(exists=True), desc='list of HDF5 timeseries files') + File(exists=True), desc="list of HDF5 timeseries files" + ) script_files = InputMultiPath( - File(exists=True), desc='list of script files to include') + File(exists=True), desc="list of script files to include" + ) data_files = InputMultiPath( - File(exists=True), - desc='list of external data files (i.e. Numpy, HD5, XML) ') - - title = traits.Str(desc='Connectome Title') - creator = traits.Str(desc='Creator') - email = traits.Str(desc='Email address') - publisher = traits.Str(desc='Publisher') - license = traits.Str(desc='License') - rights = traits.Str(desc='Rights') - references = traits.Str(desc='References') - relation = traits.Str(desc='Relation') - species = traits.Str('Homo sapiens', desc='Species', usedefault=True) + File(exists=True), desc="list of external data files (i.e. Numpy, HD5, XML) " + ) + + title = traits.Str(desc="Connectome Title") + creator = traits.Str(desc="Creator") + email = traits.Str(desc="Email address") + publisher = traits.Str(desc="Publisher") + license = traits.Str(desc="License") + rights = traits.Str(desc="Rights") + references = traits.Str(desc="References") + relation = traits.Str(desc="Relation") + species = traits.Str("Homo sapiens", desc="Species", usedefault=True) description = traits.Str( - 'Created with the Nipype CFF converter', - desc='Description', - usedefault=True) + "Created with the Nipype CFF converter", desc="Description", usedefault=True + ) - out_file = File( - 'connectome.cff', usedefault=True, desc='Output connectome file') + out_file = File("connectome.cff", usedefault=True, desc="Output connectome file") class CFFConverterOutputSpec(TraitedSpec): - connectome_file = File(exists=True, desc='Output connectome file') + connectome_file = File(exists=True, desc="Output connectome file") class CFFConverter(CFFBaseInterface): @@ -78,6 +82,7 @@ class CFFConverter(CFFBaseInterface): def _run_interface(self, runtime): import cfflib as cf + a = cf.connectome() if isdefined(self.inputs.title): @@ -89,7 +94,7 @@ def _run_interface(self, runtime): a.connectome_meta.set_creator(self.inputs.creator) else: # Probably only works on some OSes... - a.connectome_meta.set_creator(os.getenv('USER')) + a.connectome_meta.set_creator(os.getenv("USER")) if isdefined(self.inputs.email): a.connectome_meta.set_email(self.inputs.email) @@ -122,7 +127,7 @@ def _run_interface(self, runtime): for ntwk in self.inputs.graphml_networks: # There must be a better way to deal with the unique name problem # (i.e. tracks and networks can't use the same name, and previously we were pulling them both from the input files) - ntwk_name = 'Network {cnt}'.format(cnt=count) + ntwk_name = "Network {cnt}".format(cnt=count) a.add_connectome_network_from_graphml(ntwk_name, ntwk) count += 1 @@ -148,10 +153,11 @@ def _run_interface(self, runtime): if isdefined(self.inputs.gifti_surfaces): for surf in self.inputs.gifti_surfaces: _, surf_name, _ = split_filename(surf) - csurf = cf.CSurface.create_from_gifti("Surface %d - %s" % - (count, surf_name), surf) - csurf.fileformat = 'Gifti' - csurf.dtype = 'Surfaceset' + csurf = cf.CSurface.create_from_gifti( + "Surface %d - %s" % (count, surf_name), surf + ) + csurf.fileformat = "Gifti" + csurf.dtype = "Surfaceset" a.add_connectome_surface(csurf) count += 1 @@ -160,9 +166,10 @@ def _run_interface(self, runtime): for label in self.inputs.gifti_labels: _, label_name, _ = split_filename(label) csurf = cf.CSurface.create_from_gifti( - "Surface Label %d - %s" % (count, label_name), label) - csurf.fileformat = 'Gifti' - csurf.dtype = 'Labels' + "Surface Label %d - %s" % (count, label_name), label + ) + csurf.fileformat = "Gifti" + csurf.dtype = "Labels" a.add_connectome_surface(csurf) count += 1 @@ -181,19 +188,19 @@ def _run_interface(self, runtime): if isdefined(self.inputs.data_files): for data in self.inputs.data_files: _, data_name, _ = split_filename(data) - cda = cf.CData(name=data_name, src=data, fileformat='NumPy') - if not string.find(data_name, 'lengths') == -1: - cda.dtype = 'FinalFiberLengthArray' - if not string.find(data_name, 'endpoints') == -1: - cda.dtype = 'FiberEndpoints' - if not string.find(data_name, 'labels') == -1: - cda.dtype = 'FinalFiberLabels' + cda = cf.CData(name=data_name, src=data, fileformat="NumPy") + if not string.find(data_name, "lengths") == -1: + cda.dtype = "FinalFiberLengthArray" + if not string.find(data_name, "endpoints") == -1: + cda.dtype = "FiberEndpoints" + if not string.find(data_name, "labels") == -1: + cda.dtype = "FinalFiberLabels" a.add_connectome_data(cda) a.print_summary() _, name, ext = split_filename(self.inputs.out_file) - if not ext == '.cff': - ext = '.cff' + if not ext == ".cff": + ext = ".cff" cf.save_to_cff(a, op.abspath(name + ext)) return runtime @@ -201,9 +208,9 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() _, name, ext = split_filename(self.inputs.out_file) - if not ext == '.cff': - ext = '.cff' - outputs['connectome_file'] = op.abspath(name + ext) + if not ext == ".cff": + ext = ".cff" + outputs["connectome_file"] = op.abspath(name + ext) return outputs @@ -211,16 +218,19 @@ class MergeCNetworksInputSpec(BaseInterfaceInputSpec): in_files = InputMultiPath( File(exists=True), mandatory=True, - desc='List of CFF files to extract networks from') + desc="List of CFF files to extract networks from", + ) out_file = File( - 'merged_network_connectome.cff', + "merged_network_connectome.cff", usedefault=True, - desc='Output CFF file with all the networks added') + desc="Output CFF file with all the networks added", + ) class MergeCNetworksOutputSpec(TraitedSpec): connectome_file = File( - exists=True, desc='Output CFF file with all the networks added') + exists=True, desc="Output CFF file with all the networks added" + ) class MergeCNetworks(CFFBaseInterface): @@ -235,11 +245,13 @@ class MergeCNetworks(CFFBaseInterface): >>> mrg.run() # doctest: +SKIP """ + input_spec = MergeCNetworksInputSpec output_spec = MergeCNetworksOutputSpec def _run_interface(self, runtime): import cfflib as cf + extracted_networks = [] for i, con in enumerate(self.inputs.in_files): @@ -250,21 +262,22 @@ def _run_interface(self, runtime): # metadata information ne.load() contitle = mycon.get_connectome_meta().get_title() - ne.set_name(str(i) + ': ' + contitle + ' - ' + ne.get_name()) + ne.set_name(str(i) + ": " + contitle + " - " + ne.get_name()) ne.set_src(ne.get_name()) extracted_networks.append(ne) # Add networks to new connectome newcon = cf.connectome( - title='All CNetworks', connectome_network=extracted_networks) + title="All CNetworks", connectome_network=extracted_networks + ) # Setting additional metadata metadata = newcon.get_connectome_meta() - metadata.set_creator('My Name') - metadata.set_email('My Email') + metadata.set_creator("My Name") + metadata.set_email("My Email") _, name, ext = split_filename(self.inputs.out_file) - if not ext == '.cff': - ext = '.cff' + if not ext == ".cff": + ext = ".cff" cf.save_to_cff(newcon, op.abspath(name + ext)) return runtime @@ -272,7 +285,7 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() _, name, ext = split_filename(self.inputs.out_file) - if not ext == '.cff': - ext = '.cff' - outputs['connectome_file'] = op.abspath(name + ext) + if not ext == ".cff": + ext = ".cff" + outputs["connectome_file"] = op.abspath(name + ext) return outputs diff --git a/nipype/interfaces/cmtk/nbs.py b/nipype/interfaces/cmtk/nbs.py index 19425d2977..e224daa082 100644 --- a/nipype/interfaces/cmtk/nbs.py +++ b/nipype/interfaces/cmtk/nbs.py @@ -8,10 +8,19 @@ import networkx as nx from ... import logging -from ..base import (LibraryBaseInterface, BaseInterfaceInputSpec, traits, File, - TraitedSpec, InputMultiPath, OutputMultiPath, isdefined) +from ..base import ( + LibraryBaseInterface, + BaseInterfaceInputSpec, + traits, + File, + TraitedSpec, + InputMultiPath, + OutputMultiPath, + isdefined, +) from .base import have_cv -iflogger = logging.getLogger('nipype.interface') + +iflogger = logging.getLogger("nipype.interface") def ntwks_to_matrices(in_files, edge_key): @@ -23,12 +32,13 @@ def ntwks_to_matrices(in_files, edge_key): graph = nx.read_gpickle(name) for u, v, d in graph.edges(data=True): try: - graph[u][v]['weight'] = d[ - edge_key] # Setting the edge requested edge value as weight value + graph[u][v]["weight"] = d[ + edge_key + ] # Setting the edge requested edge value as weight value except: raise KeyError( - "the graph edges do not have {} attribute".format( - edge_key)) + "the graph edges do not have {} attribute".format(edge_key) + ) matrix[:, :, idx] = nx.to_numpy_matrix(graph) # Retrieve the matrix return matrix @@ -37,49 +47,50 @@ class NetworkBasedStatisticInputSpec(BaseInterfaceInputSpec): in_group1 = InputMultiPath( File(exists=True), mandatory=True, - desc='Networks for the first group of subjects') + desc="Networks for the first group of subjects", + ) in_group2 = InputMultiPath( File(exists=True), mandatory=True, - desc='Networks for the second group of subjects') + desc="Networks for the second group of subjects", + ) node_position_network = File( - desc= - 'An optional network used to position the nodes for the output networks' + desc="An optional network used to position the nodes for the output networks" ) number_of_permutations = traits.Int( - 1000, usedefault=True, desc='Number of permutations to perform') - threshold = traits.Float(3, usedefault=True, desc='T-statistic threshold') + 1000, usedefault=True, desc="Number of permutations to perform" + ) + threshold = traits.Float(3, usedefault=True, desc="T-statistic threshold") t_tail = traits.Enum( - 'left', - 'right', - 'both', + "left", + "right", + "both", usedefault=True, - desc='Can be one of "left", "right", or "both"') + desc='Can be one of "left", "right", or "both"', + ) edge_key = traits.Str( - 'number_of_fibers', + "number_of_fibers", usedefault=True, - desc= - 'Usually "number_of_fibers, "fiber_length_mean", "fiber_length_std" for matrices made with CMTK' - 'Sometimes "weight" or "value" for functional networks.') - out_nbs_network = File( - desc='Output network with edges identified by the NBS') + desc='Usually "number_of_fibers, "fiber_length_mean", "fiber_length_std" for matrices made with CMTK' + 'Sometimes "weight" or "value" for functional networks.', + ) + out_nbs_network = File(desc="Output network with edges identified by the NBS") out_nbs_pval_network = File( - desc= - 'Output network with p-values to weight the edges identified by the NBS' + desc="Output network with p-values to weight the edges identified by the NBS" ) class NetworkBasedStatisticOutputSpec(TraitedSpec): nbs_network = File( - exists=True, desc='Output network with edges identified by the NBS') + exists=True, desc="Output network with edges identified by the NBS" + ) nbs_pval_network = File( exists=True, - desc= - 'Output network with p-values to weight the edges identified by the NBS' + desc="Output network with p-values to weight the edges identified by the NBS", ) network_files = OutputMultiPath( - File(exists=True), - desc='Output network with edges identified by the NBS') + File(exists=True), desc="Output network with edges identified by the NBS" + ) class NetworkBasedStatistic(LibraryBaseInterface): @@ -99,9 +110,10 @@ class NetworkBasedStatistic(LibraryBaseInterface): >>> nbs.inputs.in_group2 = ['pat1.pck', 'pat2.pck'] # doctest: +SKIP >>> nbs.run() # doctest: +SKIP """ + input_spec = NetworkBasedStatisticInputSpec output_spec = NetworkBasedStatisticOutputSpec - _pkg = 'cviewer' + _pkg = "cviewer" def _run_interface(self, runtime): from cviewer.libs.pyconto.groupstatistics import nbs @@ -110,8 +122,16 @@ def _run_interface(self, runtime): K = self.inputs.number_of_permutations TAIL = self.inputs.t_tail edge_key = self.inputs.edge_key - details = edge_key + '-thresh-' + str(THRESH) + '-k-' + str( - K) + '-tail-' + TAIL + '.pck' + details = ( + edge_key + + "-thresh-" + + str(THRESH) + + "-k-" + + str(K) + + "-tail-" + + TAIL + + ".pck" + ) # Fill in the data from the networks X = ntwks_to_matrices(self.inputs.in_group1, edge_key) @@ -119,7 +139,7 @@ def _run_interface(self, runtime): PVAL, ADJ, _ = nbs.compute_nbs(X, Y, THRESH, K, TAIL) - iflogger.info('p-values:') + iflogger.info("p-values:") iflogger.info(PVAL) pADJ = ADJ.copy() @@ -141,22 +161,23 @@ def _run_interface(self, runtime): node_ntwk_name = self.inputs.in_group1[0] node_network = nx.read_gpickle(node_ntwk_name) - iflogger.info('Populating node dictionaries with attributes from %s', - node_ntwk_name) + iflogger.info( + "Populating node dictionaries with attributes from %s", node_ntwk_name + ) for nid, ndata in node_network.nodes(data=True): nbsgraph.nodes[nid] = ndata nbs_pval_graph.nodes[nid] = ndata - path = op.abspath('NBS_Result_' + details) + path = op.abspath("NBS_Result_" + details) iflogger.info(path) nx.write_gpickle(nbsgraph, path) - iflogger.info('Saving output NBS edge network as %s', path) + iflogger.info("Saving output NBS edge network as %s", path) - pval_path = op.abspath('NBS_P_vals_' + details) + pval_path = op.abspath("NBS_P_vals_" + details) iflogger.info(pval_path) nx.write_gpickle(nbs_pval_graph, pval_path) - iflogger.info('Saving output p-value network as %s', pval_path) + iflogger.info("Saving output p-value network as %s", pval_path) return runtime def _list_outputs(self): @@ -166,15 +187,23 @@ def _list_outputs(self): K = self.inputs.number_of_permutations TAIL = self.inputs.t_tail edge_key = self.inputs.edge_key - details = edge_key + '-thresh-' + str(THRESH) + '-k-' + str( - K) + '-tail-' + TAIL + '.pck' - path = op.abspath('NBS_Result_' + details) - pval_path = op.abspath('NBS_P_vals_' + details) - - outputs['nbs_network'] = path - outputs['nbs_pval_network'] = pval_path - outputs['network_files'] = [path, pval_path] + details = ( + edge_key + + "-thresh-" + + str(THRESH) + + "-k-" + + str(K) + + "-tail-" + + TAIL + + ".pck" + ) + path = op.abspath("NBS_Result_" + details) + pval_path = op.abspath("NBS_P_vals_" + details) + + outputs["nbs_network"] = path + outputs["nbs_pval_network"] = pval_path + outputs["network_files"] = [path, pval_path] return outputs def _gen_outfilename(self, name, ext): - return name + '.' + ext + return name + "." + ext diff --git a/nipype/interfaces/cmtk/nx.py b/nipype/interfaces/cmtk/nx.py index 7fb47da6ac..c34d372a7e 100644 --- a/nipype/interfaces/cmtk/nx.py +++ b/nipype/interfaces/cmtk/nx.py @@ -9,19 +9,27 @@ from ... import logging from ...utils.filemanip import split_filename -from ..base import (BaseInterface, BaseInterfaceInputSpec, traits, File, - TraitedSpec, InputMultiPath, OutputMultiPath, isdefined) +from ..base import ( + BaseInterface, + BaseInterfaceInputSpec, + traits, + File, + TraitedSpec, + InputMultiPath, + OutputMultiPath, + isdefined, +) from .base import have_cmp -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") def read_unknown_ntwk(ntwk): if not isinstance(ntwk, nx.classes.graph.Graph): _, _, ext = split_filename(ntwk) - if ext == '.pck': + if ext == ".pck": ntwk = nx.read_gpickle(ntwk) - elif ext == '.graphml': + elif ext == ".graphml": ntwk = nx.read_graphml(ntwk) return ntwk @@ -39,34 +47,36 @@ def fix_keys_for_gexf(orig): GEXF Networks can be read in Gephi, however, the keys for the node and edge IDs must be converted to strings """ import networkx as nx + ntwk = nx.Graph() nodes = list(orig.nodes()) edges = list(orig.edges()) for node in nodes: newnodedata = {} newnodedata.update(orig.nodes[node]) - if 'dn_fsname' in orig.nodes[node]: - newnodedata['label'] = orig.nodes[node]['dn_fsname'] + if "dn_fsname" in orig.nodes[node]: + newnodedata["label"] = orig.nodes[node]["dn_fsname"] ntwk.add_node(str(node), **newnodedata) - if 'dn_position' in ntwk.nodes[str( - node)] and 'dn_position' in newnodedata: - ntwk.nodes[str(node)]['dn_position'] = str( - newnodedata['dn_position']) + if "dn_position" in ntwk.nodes[str(node)] and "dn_position" in newnodedata: + ntwk.nodes[str(node)]["dn_position"] = str(newnodedata["dn_position"]) for edge in edges: data = {} data = orig.edge[edge[0]][edge[1]] ntwk.add_edge(str(edge[0]), str(edge[1]), **data) - if 'fiber_length_mean' in ntwk.edge[str(edge[0])][str(edge[1])]: - ntwk.edge[str(edge[0])][str(edge[1])]['fiber_length_mean'] = str( - data['fiber_length_mean']) - if 'fiber_length_std' in ntwk.edge[str(edge[0])][str(edge[1])]: - ntwk.edge[str(edge[0])][str(edge[1])]['fiber_length_std'] = str( - data['fiber_length_std']) - if 'number_of_fibers' in ntwk.edge[str(edge[0])][str(edge[1])]: - ntwk.edge[str(edge[0])][str(edge[1])]['number_of_fibers'] = str( - data['number_of_fibers']) - if 'value' in ntwk.edge[str(edge[0])][str(edge[1])]: - ntwk.edge[str(edge[0])][str(edge[1])]['value'] = str(data['value']) + if "fiber_length_mean" in ntwk.edge[str(edge[0])][str(edge[1])]: + ntwk.edge[str(edge[0])][str(edge[1])]["fiber_length_mean"] = str( + data["fiber_length_mean"] + ) + if "fiber_length_std" in ntwk.edge[str(edge[0])][str(edge[1])]: + ntwk.edge[str(edge[0])][str(edge[1])]["fiber_length_std"] = str( + data["fiber_length_std"] + ) + if "number_of_fibers" in ntwk.edge[str(edge[0])][str(edge[1])]: + ntwk.edge[str(edge[0])][str(edge[1])]["number_of_fibers"] = str( + data["number_of_fibers"] + ) + if "value" in ntwk.edge[str(edge[0])][str(edge[1])]: + ntwk.edge[str(edge[0])][str(edge[1])]["value"] = str(data["value"]) return ntwk @@ -90,30 +100,34 @@ def average_networks(in_files, ntwk_res_file, group_id): import networkx as nx import os.path as op import scipy.io as sio - iflogger.info('Creating average network for group: %s', group_id) + + iflogger.info("Creating average network for group: %s", group_id) matlab_network_list = [] if len(in_files) == 1: avg_ntwk = read_unknown_ntwk(in_files[0]) else: count_to_keep_edge = np.round(len(in_files) / 2.0) - iflogger.info('Number of networks: %i, an edge must occur in at ' - 'least %i to remain in the average network', - len(in_files), count_to_keep_edge) + iflogger.info( + "Number of networks: %i, an edge must occur in at " + "least %i to remain in the average network", + len(in_files), + count_to_keep_edge, + ) ntwk_res_file = read_unknown_ntwk(ntwk_res_file) - iflogger.info('%i nodes found in network resolution file', - ntwk_res_file.number_of_nodes()) + iflogger.info( + "%i nodes found in network resolution file", ntwk_res_file.number_of_nodes() + ) ntwk = remove_all_edges(ntwk_res_file) counting_ntwk = ntwk.copy() # Sums all the relevant variables for index, subject in enumerate(in_files): tmp = nx.read_gpickle(subject) - iflogger.info('File %s has %i edges', subject, - tmp.number_of_edges()) + iflogger.info("File %s has %i edges", subject, tmp.number_of_edges()) edges = list(tmp.edges()) for edge in edges: data = {} data = tmp.edge[edge[0]][edge[1]] - data['count'] = 1 + data["count"] = 1 if ntwk.has_edge(edge[0], edge[1]): current = {} current = ntwk.edge[edge[0]][edge[1]] @@ -123,67 +137,75 @@ def average_networks(in_files, ntwk_res_file, group_id): for node in nodes: data = {} data = ntwk.nodes[node] - if 'value' in tmp.nodes[node]: - data['value'] = data['value'] + tmp.nodes[node]['value'] + if "value" in tmp.nodes[node]: + data["value"] = data["value"] + tmp.nodes[node]["value"] ntwk.add_node(node, **data) # Divides each value by the number of files nodes = list(ntwk.nodes()) edges = list(ntwk.edges()) - iflogger.info('Total network has %i edges', ntwk.number_of_edges()) + iflogger.info("Total network has %i edges", ntwk.number_of_edges()) avg_ntwk = nx.Graph() newdata = {} for node in nodes: data = ntwk.nodes[node] newdata = data - if 'value' in data: - newdata['value'] = data['value'] / len(in_files) - ntwk.nodes[node]['value'] = newdata + if "value" in data: + newdata["value"] = data["value"] / len(in_files) + ntwk.nodes[node]["value"] = newdata avg_ntwk.add_node(node, **newdata) edge_dict = {} - edge_dict['count'] = np.zeros((avg_ntwk.number_of_nodes(), - avg_ntwk.number_of_nodes())) + edge_dict["count"] = np.zeros( + (avg_ntwk.number_of_nodes(), avg_ntwk.number_of_nodes()) + ) for edge in edges: data = ntwk.edge[edge[0]][edge[1]] - if ntwk.edge[edge[0]][edge[1]]['count'] >= count_to_keep_edge: + if ntwk.edge[edge[0]][edge[1]]["count"] >= count_to_keep_edge: for key in list(data.keys()): - if not key == 'count': + if not key == "count": data[key] = data[key] / len(in_files) ntwk.edge[edge[0]][edge[1]] = data avg_ntwk.add_edge(edge[0], edge[1], **data) - edge_dict['count'][edge[0] - 1][edge[1] - 1] = ntwk.edge[edge[0]][ - edge[1]]['count'] + edge_dict["count"][edge[0] - 1][edge[1] - 1] = ntwk.edge[edge[0]][edge[1]][ + "count" + ] - iflogger.info('After thresholding, the average network has %i edges', - avg_ntwk.number_of_edges()) + iflogger.info( + "After thresholding, the average network has %i edges", + avg_ntwk.number_of_edges(), + ) avg_edges = avg_ntwk.edges() for edge in avg_edges: data = avg_ntwk.edge[edge[0]][edge[1]] for key in list(data.keys()): - if not key == 'count': - edge_dict[key] = np.zeros((avg_ntwk.number_of_nodes(), - avg_ntwk.number_of_nodes())) + if not key == "count": + edge_dict[key] = np.zeros( + (avg_ntwk.number_of_nodes(), avg_ntwk.number_of_nodes()) + ) edge_dict[key][edge[0] - 1][edge[1] - 1] = data[key] for key in list(edge_dict.keys()): tmp = {} - network_name = group_id + '_' + key + '_average.mat' + network_name = group_id + "_" + key + "_average.mat" matlab_network_list.append(op.abspath(network_name)) tmp[key] = edge_dict[key] sio.savemat(op.abspath(network_name), tmp) - iflogger.info('Saving average network for key: %s as %s', key, - op.abspath(network_name)) + iflogger.info( + "Saving average network for key: %s as %s", + key, + op.abspath(network_name), + ) # Writes the networks and returns the name - network_name = group_id + '_average.pck' + network_name = group_id + "_average.pck" nx.write_gpickle(avg_ntwk, op.abspath(network_name)) - iflogger.info('Saving average network as %s', op.abspath(network_name)) + iflogger.info("Saving average network as %s", op.abspath(network_name)) avg_ntwk = fix_keys_for_gexf(avg_ntwk) - network_name = group_id + '_average.gexf' + network_name = group_id + "_average.gexf" nx.write_gexf(avg_ntwk, op.abspath(network_name)) - iflogger.info('Saving average network as %s', op.abspath(network_name)) + iflogger.info("Saving average network as %s", op.abspath(network_name)) return network_name, matlab_network_list @@ -191,44 +213,46 @@ def compute_node_measures(ntwk, calculate_cliques=False): """ These return node-based measures """ - iflogger.info('Computing node measures:') + iflogger.info("Computing node measures:") measures = {} - iflogger.info('...Computing degree...') - measures['degree'] = np.array(list(ntwk.degree().values())) - iflogger.info('...Computing load centrality...') - measures['load_centrality'] = np.array( - list(nx.load_centrality(ntwk).values())) - iflogger.info('...Computing betweenness centrality...') - measures['betweenness_centrality'] = np.array( - list(nx.betweenness_centrality(ntwk).values())) - iflogger.info('...Computing degree centrality...') - measures['degree_centrality'] = np.array( - list(nx.degree_centrality(ntwk).values())) - iflogger.info('...Computing closeness centrality...') - measures['closeness_centrality'] = np.array( - list(nx.closeness_centrality(ntwk).values())) + iflogger.info("...Computing degree...") + measures["degree"] = np.array(list(ntwk.degree().values())) + iflogger.info("...Computing load centrality...") + measures["load_centrality"] = np.array(list(nx.load_centrality(ntwk).values())) + iflogger.info("...Computing betweenness centrality...") + measures["betweenness_centrality"] = np.array( + list(nx.betweenness_centrality(ntwk).values()) + ) + iflogger.info("...Computing degree centrality...") + measures["degree_centrality"] = np.array(list(nx.degree_centrality(ntwk).values())) + iflogger.info("...Computing closeness centrality...") + measures["closeness_centrality"] = np.array( + list(nx.closeness_centrality(ntwk).values()) + ) # iflogger.info('...Computing eigenvector centrality...') # measures['eigenvector_centrality'] = np.array(nx.eigenvector_centrality(ntwk, max_iter=100000).values()) - iflogger.info('...Computing triangles...') - measures['triangles'] = np.array(list(nx.triangles(ntwk).values())) - iflogger.info('...Computing clustering...') - measures['clustering'] = np.array(list(nx.clustering(ntwk).values())) - iflogger.info('...Computing k-core number') - measures['core_number'] = np.array(list(nx.core_number(ntwk).values())) - iflogger.info('...Identifying network isolates...') + iflogger.info("...Computing triangles...") + measures["triangles"] = np.array(list(nx.triangles(ntwk).values())) + iflogger.info("...Computing clustering...") + measures["clustering"] = np.array(list(nx.clustering(ntwk).values())) + iflogger.info("...Computing k-core number") + measures["core_number"] = np.array(list(nx.core_number(ntwk).values())) + iflogger.info("...Identifying network isolates...") isolate_list = nx.isolates(ntwk) binarized = np.zeros((ntwk.number_of_nodes(), 1)) for value in isolate_list: value = value - 1 # Zero indexing binarized[value] = 1 - measures['isolates'] = binarized + measures["isolates"] = binarized if calculate_cliques: - iflogger.info('...Calculating node clique number') - measures['node_clique_number'] = np.array( - list(nx.node_clique_number(ntwk).values())) - iflogger.info('...Computing number of cliques for each node...') - measures['number_of_cliques'] = np.array( - list(nx.number_of_cliques(ntwk).values())) + iflogger.info("...Calculating node clique number") + measures["node_clique_number"] = np.array( + list(nx.node_clique_number(ntwk).values()) + ) + iflogger.info("...Computing number of cliques for each node...") + measures["number_of_cliques"] = np.array( + list(nx.number_of_cliques(ntwk).values()) + ) return measures @@ -236,7 +260,7 @@ def compute_edge_measures(ntwk): """ These return edge-based measures """ - iflogger.info('Computing edge measures:') + iflogger.info("Computing edge measures:") measures = {} # iflogger.info('...Computing google matrix...' #Makes really large networks (500k+ edges)) # measures['google_matrix'] = nx.google_matrix(ntwk) @@ -251,60 +275,56 @@ def compute_dict_measures(ntwk): """ Returns a dictionary """ - iflogger.info('Computing measures which return a dictionary:') + iflogger.info("Computing measures which return a dictionary:") measures = {} - iflogger.info('...Computing rich club coefficient...') - measures['rich_club_coef'] = nx.rich_club_coefficient(ntwk) + iflogger.info("...Computing rich club coefficient...") + measures["rich_club_coef"] = nx.rich_club_coefficient(ntwk) return measures -def compute_singlevalued_measures(ntwk, weighted=True, - calculate_cliques=False): +def compute_singlevalued_measures(ntwk, weighted=True, calculate_cliques=False): """ Returns a single value per network """ - iflogger.info('Computing single valued measures:') + iflogger.info("Computing single valued measures:") measures = {} - iflogger.info('...Computing degree assortativity (pearson number) ...') + iflogger.info("...Computing degree assortativity (pearson number) ...") try: - measures['degree_pearsonr'] = nx.degree_pearsonr(ntwk) + measures["degree_pearsonr"] = nx.degree_pearsonr(ntwk) except AttributeError: # For NetworkX 1.6 - measures[ - 'degree_pearsonr'] = nx.degree_pearson_correlation_coefficient( - ntwk) - iflogger.info('...Computing degree assortativity...') + measures["degree_pearsonr"] = nx.degree_pearson_correlation_coefficient(ntwk) + iflogger.info("...Computing degree assortativity...") try: - measures['degree_assortativity'] = nx.degree_assortativity(ntwk) + measures["degree_assortativity"] = nx.degree_assortativity(ntwk) except AttributeError: - measures['degree_assortativity'] = nx.degree_assortativity_coefficient( - ntwk) - iflogger.info('...Computing transitivity...') - measures['transitivity'] = nx.transitivity(ntwk) - iflogger.info('...Computing number of connected_components...') - measures['number_connected_components'] = nx.number_connected_components( - ntwk) - iflogger.info('...Computing graph density...') - measures['graph_density'] = nx.density(ntwk) - iflogger.info('...Recording number of edges...') - measures['number_of_edges'] = nx.number_of_edges(ntwk) - iflogger.info('...Recording number of nodes...') - measures['number_of_nodes'] = nx.number_of_nodes(ntwk) - iflogger.info('...Computing average clustering...') - measures['average_clustering'] = nx.average_clustering(ntwk) + measures["degree_assortativity"] = nx.degree_assortativity_coefficient(ntwk) + iflogger.info("...Computing transitivity...") + measures["transitivity"] = nx.transitivity(ntwk) + iflogger.info("...Computing number of connected_components...") + measures["number_connected_components"] = nx.number_connected_components(ntwk) + iflogger.info("...Computing graph density...") + measures["graph_density"] = nx.density(ntwk) + iflogger.info("...Recording number of edges...") + measures["number_of_edges"] = nx.number_of_edges(ntwk) + iflogger.info("...Recording number of nodes...") + measures["number_of_nodes"] = nx.number_of_nodes(ntwk) + iflogger.info("...Computing average clustering...") + measures["average_clustering"] = nx.average_clustering(ntwk) if nx.is_connected(ntwk): - iflogger.info('...Calculating average shortest path length...') - measures[ - 'average_shortest_path_length'] = nx.average_shortest_path_length( - ntwk, weighted) + iflogger.info("...Calculating average shortest path length...") + measures["average_shortest_path_length"] = nx.average_shortest_path_length( + ntwk, weighted + ) else: - iflogger.info('...Calculating average shortest path length...') - measures[ - 'average_shortest_path_length'] = nx.average_shortest_path_length( - nx.connected_component_subgraphs(ntwk)[0], weighted) + iflogger.info("...Calculating average shortest path length...") + measures["average_shortest_path_length"] = nx.average_shortest_path_length( + nx.connected_component_subgraphs(ntwk)[0], weighted + ) if calculate_cliques: - iflogger.info('...Computing graph clique number...') - measures['graph_clique_number'] = nx.graph_clique_number( - ntwk) # out of memory error + iflogger.info("...Computing graph clique number...") + measures["graph_clique_number"] = nx.graph_clique_number( + ntwk + ) # out of memory error return measures @@ -324,7 +344,7 @@ def add_node_data(node_array, ntwk): newdata = {} for idx, data in ntwk.nodes(data=True): if not int(idx) == 0: - newdata['value'] = node_array[int(idx) - 1] + newdata["value"] = node_array[int(idx) - 1] data.update(newdata) node_ntwk.add_node(int(idx), **data) return node_ntwk @@ -336,8 +356,8 @@ def add_edge_data(edge_array, ntwk, above=0, below=0): for x, row in enumerate(edge_array): for y in range(0, np.max(np.shape(edge_array[x]))): if not edge_array[x, y] == 0: - data['value'] = edge_array[x, y] - if data['value'] <= below or data['value'] >= above: + data["value"] = edge_array[x, y] + if data["value"] <= below or data["value"] >= above: if edge_ntwk.has_edge(x + 1, y + 1): old_edge_dict = edge_ntwk.edge[x + 1][y + 1] edge_ntwk.remove_edge(x + 1, y + 1) @@ -347,79 +367,78 @@ def add_edge_data(edge_array, ntwk, above=0, below=0): class NetworkXMetricsInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc='Input network') + in_file = File(exists=True, mandatory=True, desc="Input network") out_k_core = File( - 'k_core', + "k_core", usedefault=True, - desc='Computed k-core network stored as a NetworkX pickle.') + desc="Computed k-core network stored as a NetworkX pickle.", + ) out_k_shell = File( - 'k_shell', + "k_shell", usedefault=True, - desc='Computed k-shell network stored as a NetworkX pickle.') + desc="Computed k-shell network stored as a NetworkX pickle.", + ) out_k_crust = File( - 'k_crust', + "k_crust", usedefault=True, - desc='Computed k-crust network stored as a NetworkX pickle.') + desc="Computed k-crust network stored as a NetworkX pickle.", + ) treat_as_weighted_graph = traits.Bool( True, usedefault=True, - desc= - 'Some network metrics can be calculated while considering only a binarized version of the graph' + desc="Some network metrics can be calculated while considering only a binarized version of the graph", ) compute_clique_related_measures = traits.Bool( False, usedefault=True, - desc= - 'Computing clique-related measures (e.g. node clique number) can be very time consuming' + desc="Computing clique-related measures (e.g. node clique number) can be very time consuming", ) out_global_metrics_matlab = File( - genfile=True, desc='Output node metrics in MATLAB .mat format') + genfile=True, desc="Output node metrics in MATLAB .mat format" + ) out_node_metrics_matlab = File( - genfile=True, desc='Output node metrics in MATLAB .mat format') + genfile=True, desc="Output node metrics in MATLAB .mat format" + ) out_edge_metrics_matlab = File( - genfile=True, desc='Output edge metrics in MATLAB .mat format') + genfile=True, desc="Output edge metrics in MATLAB .mat format" + ) out_pickled_extra_measures = File( - 'extra_measures', + "extra_measures", usedefault=True, - desc= - 'Network measures for group 1 that return dictionaries stored as a Pickle.' + desc="Network measures for group 1 that return dictionaries stored as a Pickle.", ) class NetworkXMetricsOutputSpec(TraitedSpec): - gpickled_network_files = OutputMultiPath( - File(desc='Output gpickled network files')) + gpickled_network_files = OutputMultiPath(File(desc="Output gpickled network files")) matlab_matrix_files = OutputMultiPath( - File(desc='Output network metrics in MATLAB .mat format')) - global_measures_matlab = File( - desc='Output global metrics in MATLAB .mat format') - node_measures_matlab = File( - desc='Output node metrics in MATLAB .mat format') - edge_measures_matlab = File( - desc='Output edge metrics in MATLAB .mat format') + File(desc="Output network metrics in MATLAB .mat format") + ) + global_measures_matlab = File(desc="Output global metrics in MATLAB .mat format") + node_measures_matlab = File(desc="Output node metrics in MATLAB .mat format") + edge_measures_matlab = File(desc="Output edge metrics in MATLAB .mat format") node_measure_networks = OutputMultiPath( - File(desc='Output gpickled network files for all node-based measures')) + File(desc="Output gpickled network files for all node-based measures") + ) edge_measure_networks = OutputMultiPath( - File(desc='Output gpickled network files for all edge-based measures')) + File(desc="Output gpickled network files for all edge-based measures") + ) k_networks = OutputMultiPath( File( - desc= - 'Output gpickled network files for the k-core, k-shell, and k-crust networks' - )) - k_core = File(desc='Computed k-core network stored as a NetworkX pickle.') - k_shell = File( - desc='Computed k-shell network stored as a NetworkX pickle.') - k_crust = File( - desc='Computed k-crust network stored as a NetworkX pickle.') + desc="Output gpickled network files for the k-core, k-shell, and k-crust networks" + ) + ) + k_core = File(desc="Computed k-core network stored as a NetworkX pickle.") + k_shell = File(desc="Computed k-shell network stored as a NetworkX pickle.") + k_crust = File(desc="Computed k-crust network stored as a NetworkX pickle.") pickled_extra_measures = File( - desc= - 'Network measures for the group that return dictionaries, stored as a Pickle.' + desc="Network measures for the group that return dictionaries, stored as a Pickle." ) matlab_dict_measures = OutputMultiPath( File( - desc= - 'Network measures for the group that return dictionaries, stored as matlab matrices.' - )) + desc="Network measures for the group that return dictionaries, stored as matlab matrices." + ) + ) class NetworkXMetrics(BaseInterface): @@ -434,11 +453,13 @@ class NetworkXMetrics(BaseInterface): >>> nxmetrics.inputs.in_file = 'subj1.pck' >>> nxmetrics.run() # doctest: +SKIP """ + input_spec = NetworkXMetricsInputSpec output_spec = NetworkXMetricsOutputSpec def _run_interface(self, runtime): import scipy.io as sio + global gpickled, nodentwks, edgentwks, kntwks, matlab gpickled = list() nodentwks = list() @@ -455,70 +476,73 @@ def _run_interface(self, runtime): weighted = self.inputs.treat_as_weighted_graph global_measures = compute_singlevalued_measures( - ntwk, weighted, calculate_cliques) + ntwk, weighted, calculate_cliques + ) if isdefined(self.inputs.out_global_metrics_matlab): global_out_file = op.abspath(self.inputs.out_global_metrics_matlab) else: - global_out_file = op.abspath( - self._gen_outfilename('globalmetrics', 'mat')) - sio.savemat(global_out_file, global_measures, oned_as='column') + global_out_file = op.abspath(self._gen_outfilename("globalmetrics", "mat")) + sio.savemat(global_out_file, global_measures, oned_as="column") matlab.append(global_out_file) node_measures = compute_node_measures(ntwk, calculate_cliques) for key in list(node_measures.keys()): newntwk = add_node_data(node_measures[key], ntwk) - out_file = op.abspath(self._gen_outfilename(key, 'pck')) + out_file = op.abspath(self._gen_outfilename(key, "pck")) nx.write_gpickle(newntwk, out_file) nodentwks.append(out_file) if isdefined(self.inputs.out_node_metrics_matlab): node_out_file = op.abspath(self.inputs.out_node_metrics_matlab) else: - node_out_file = op.abspath( - self._gen_outfilename('nodemetrics', 'mat')) - sio.savemat(node_out_file, node_measures, oned_as='column') + node_out_file = op.abspath(self._gen_outfilename("nodemetrics", "mat")) + sio.savemat(node_out_file, node_measures, oned_as="column") matlab.append(node_out_file) gpickled.extend(nodentwks) edge_measures = compute_edge_measures(ntwk) for key in list(edge_measures.keys()): newntwk = add_edge_data(edge_measures[key], ntwk) - out_file = op.abspath(self._gen_outfilename(key, 'pck')) + out_file = op.abspath(self._gen_outfilename(key, "pck")) nx.write_gpickle(newntwk, out_file) edgentwks.append(out_file) if isdefined(self.inputs.out_edge_metrics_matlab): edge_out_file = op.abspath(self.inputs.out_edge_metrics_matlab) else: - edge_out_file = op.abspath( - self._gen_outfilename('edgemetrics', 'mat')) - sio.savemat(edge_out_file, edge_measures, oned_as='column') + edge_out_file = op.abspath(self._gen_outfilename("edgemetrics", "mat")) + sio.savemat(edge_out_file, edge_measures, oned_as="column") matlab.append(edge_out_file) gpickled.extend(edgentwks) ntwk_measures = compute_network_measures(ntwk) for key in list(ntwk_measures.keys()): - if key == 'k_core': + if key == "k_core": out_file = op.abspath( - self._gen_outfilename(self.inputs.out_k_core, 'pck')) - if key == 'k_shell': + self._gen_outfilename(self.inputs.out_k_core, "pck") + ) + if key == "k_shell": out_file = op.abspath( - self._gen_outfilename(self.inputs.out_k_shell, 'pck')) - if key == 'k_crust': + self._gen_outfilename(self.inputs.out_k_shell, "pck") + ) + if key == "k_crust": out_file = op.abspath( - self._gen_outfilename(self.inputs.out_k_crust, 'pck')) + self._gen_outfilename(self.inputs.out_k_crust, "pck") + ) nx.write_gpickle(ntwk_measures[key], out_file) kntwks.append(out_file) gpickled.extend(kntwks) out_pickled_extra_measures = op.abspath( - self._gen_outfilename(self.inputs.out_pickled_extra_measures, - 'pck')) + self._gen_outfilename(self.inputs.out_pickled_extra_measures, "pck") + ) dict_measures = compute_dict_measures(ntwk) - iflogger.info('Saving extra measure file to %s in Pickle format', - op.abspath(out_pickled_extra_measures)) - with open(out_pickled_extra_measures, 'w') as fo: + iflogger.info( + "Saving extra measure file to %s in Pickle format", + op.abspath(out_pickled_extra_measures), + ) + with open(out_pickled_extra_measures, "w") as fo: pickle.dump(dict_measures, fo) - iflogger.info('Saving MATLAB measures as %s', matlab) + iflogger.info("Saving MATLAB measures as %s", matlab) # Loops through the measures which return a dictionary, # converts the keys and values to a Numpy array, @@ -535,67 +559,72 @@ def _run_interface(self, runtime): values = np.array(dict_measures[key][keyd]) nparrayvalues = np.append(nparrayvalues, values) nparray = np.vstack((nparraykeys, nparrayvalues)) - out_file = op.abspath(self._gen_outfilename(key, 'mat')) + out_file = op.abspath(self._gen_outfilename(key, "mat")) npdict = {} npdict[key] = nparray - sio.savemat(out_file, npdict, oned_as='column') + sio.savemat(out_file, npdict, oned_as="column") dicts.append(out_file) return runtime def _list_outputs(self): outputs = self.output_spec().get() outputs["k_core"] = op.abspath( - self._gen_outfilename(self.inputs.out_k_core, 'pck')) + self._gen_outfilename(self.inputs.out_k_core, "pck") + ) outputs["k_shell"] = op.abspath( - self._gen_outfilename(self.inputs.out_k_shell, 'pck')) + self._gen_outfilename(self.inputs.out_k_shell, "pck") + ) outputs["k_crust"] = op.abspath( - self._gen_outfilename(self.inputs.out_k_crust, 'pck')) + self._gen_outfilename(self.inputs.out_k_crust, "pck") + ) outputs["gpickled_network_files"] = gpickled outputs["k_networks"] = kntwks outputs["node_measure_networks"] = nodentwks outputs["edge_measure_networks"] = edgentwks outputs["matlab_dict_measures"] = dicts outputs["global_measures_matlab"] = op.abspath( - self._gen_outfilename('globalmetrics', 'mat')) + self._gen_outfilename("globalmetrics", "mat") + ) outputs["node_measures_matlab"] = op.abspath( - self._gen_outfilename('nodemetrics', 'mat')) + self._gen_outfilename("nodemetrics", "mat") + ) outputs["edge_measures_matlab"] = op.abspath( - self._gen_outfilename('edgemetrics', 'mat')) + self._gen_outfilename("edgemetrics", "mat") + ) outputs["matlab_matrix_files"] = [ - outputs["global_measures_matlab"], outputs["node_measures_matlab"], - outputs["edge_measures_matlab"] + outputs["global_measures_matlab"], + outputs["node_measures_matlab"], + outputs["edge_measures_matlab"], ] outputs["pickled_extra_measures"] = op.abspath( - self._gen_outfilename(self.inputs.out_pickled_extra_measures, - 'pck')) + self._gen_outfilename(self.inputs.out_pickled_extra_measures, "pck") + ) return outputs def _gen_outfilename(self, name, ext): - return name + '.' + ext + return name + "." + ext class AverageNetworksInputSpec(BaseInterfaceInputSpec): in_files = InputMultiPath( - File(exists=True), - mandatory=True, - desc='Networks for a group of subjects') + File(exists=True), mandatory=True, desc="Networks for a group of subjects" + ) resolution_network_file = File( exists=True, - desc= - 'Parcellation files from Connectome Mapping Toolkit. This is not necessary' - ', but if included, the interface will output the statistical maps as networkx graphs.' + desc="Parcellation files from Connectome Mapping Toolkit. This is not necessary" + ", but if included, the interface will output the statistical maps as networkx graphs.", ) - group_id = traits.Str('group1', usedefault=True, desc='ID for group') - out_gpickled_groupavg = File( - desc='Average network saved as a NetworkX .pck') - out_gexf_groupavg = File(desc='Average network saved as a .gexf file') + group_id = traits.Str("group1", usedefault=True, desc="ID for group") + out_gpickled_groupavg = File(desc="Average network saved as a NetworkX .pck") + out_gexf_groupavg = File(desc="Average network saved as a .gexf file") class AverageNetworksOutputSpec(TraitedSpec): - gpickled_groupavg = File(desc='Average network saved as a NetworkX .pck') - gexf_groupavg = File(desc='Average network saved as a .gexf file') + gpickled_groupavg = File(desc="Average network saved as a NetworkX .pck") + gexf_groupavg = File(desc="Average network saved as a .gexf file") matlab_groupavgs = OutputMultiPath( - File(desc='Average network saved as a .gexf file')) + File(desc="Average network saved as a .gexf file") + ) class AverageNetworks(BaseInterface): @@ -614,6 +643,7 @@ class AverageNetworks(BaseInterface): >>> avg.run() # doctest: +SKIP """ + input_spec = AverageNetworksInputSpec output_spec = AverageNetworksOutputSpec @@ -625,29 +655,28 @@ def _run_interface(self, runtime): global matlab_network_list network_name, matlab_network_list = average_networks( - self.inputs.in_files, ntwk_res_file, self.inputs.group_id) + self.inputs.in_files, ntwk_res_file, self.inputs.group_id + ) return runtime def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.out_gpickled_groupavg): outputs["gpickled_groupavg"] = op.abspath( - self._gen_outfilename(self.inputs.group_id + '_average', - 'pck')) + self._gen_outfilename(self.inputs.group_id + "_average", "pck") + ) else: - outputs["gpickled_groupavg"] = op.abspath( - self.inputs.out_gpickled_groupavg) + outputs["gpickled_groupavg"] = op.abspath(self.inputs.out_gpickled_groupavg) if not isdefined(self.inputs.out_gexf_groupavg): outputs["gexf_groupavg"] = op.abspath( - self._gen_outfilename(self.inputs.group_id + '_average', - 'gexf')) + self._gen_outfilename(self.inputs.group_id + "_average", "gexf") + ) else: - outputs["gexf_groupavg"] = op.abspath( - self.inputs.out_gexf_groupavg) + outputs["gexf_groupavg"] = op.abspath(self.inputs.out_gexf_groupavg) outputs["matlab_groupavgs"] = matlab_network_list return outputs def _gen_outfilename(self, name, ext): - return name + '.' + ext + return name + "." + ext diff --git a/nipype/interfaces/cmtk/parcellation.py b/nipype/interfaces/cmtk/parcellation.py index a80c4e895f..60ef0445b3 100644 --- a/nipype/interfaces/cmtk/parcellation.py +++ b/nipype/interfaces/cmtk/parcellation.py @@ -10,151 +10,321 @@ import networkx as nx from ... import logging -from ..base import (BaseInterface, LibraryBaseInterface, - BaseInterfaceInputSpec, traits, File, - TraitedSpec, Directory, isdefined) +from ..base import ( + BaseInterface, + LibraryBaseInterface, + BaseInterfaceInputSpec, + traits, + File, + TraitedSpec, + Directory, + isdefined, +) from .base import have_cmp -iflogger = logging.getLogger('nipype.interface') + +iflogger = logging.getLogger("nipype.interface") def create_annot_label(subject_id, subjects_dir, fs_dir, parcellation_name): import cmp from cmp.util import runCmd + iflogger.info("Create the cortical labels necessary for our ROIs") iflogger.info("=================================================") - fs_label_dir = op.join(op.join(subjects_dir, subject_id), 'label') + fs_label_dir = op.join(op.join(subjects_dir, subject_id), "label") output_dir = op.abspath(op.curdir) paths = [] cmp_config = cmp.configuration.PipelineConfiguration() cmp_config.parcellation_scheme = "Lausanne2008" - for hemi in ['lh', 'rh']: - spath = cmp_config._get_lausanne_parcellation('Lausanne2008')[ - parcellation_name]['fs_label_subdir_name'] % hemi + for hemi in ["lh", "rh"]: + spath = ( + cmp_config._get_lausanne_parcellation("Lausanne2008")[parcellation_name][ + "fs_label_subdir_name" + ] + % hemi + ) paths.append(spath) for p in paths: try: - os.makedirs(op.join('.', p)) + os.makedirs(op.join(".", p)) except: pass - if '33' in parcellation_name: + if "33" in parcellation_name: comp = [ - ('rh', 'myatlas_36_rh.gcs', 'rh.myaparc_36.annot', - 'regenerated_rh_36', 'myaparc_36'), - ('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot', - 'regenerated_rh_60', 'myaparc_60'), - ('lh', 'myatlas_36_lh.gcs', 'lh.myaparc_36.annot', - 'regenerated_lh_36', 'myaparc_36'), - ('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot', - 'regenerated_lh_60', 'myaparc_60'), + ( + "rh", + "myatlas_36_rh.gcs", + "rh.myaparc_36.annot", + "regenerated_rh_36", + "myaparc_36", + ), + ( + "rh", + "myatlas_60_rh.gcs", + "rh.myaparc_60.annot", + "regenerated_rh_60", + "myaparc_60", + ), + ( + "lh", + "myatlas_36_lh.gcs", + "lh.myaparc_36.annot", + "regenerated_lh_36", + "myaparc_36", + ), + ( + "lh", + "myatlas_60_lh.gcs", + "lh.myaparc_60.annot", + "regenerated_lh_60", + "myaparc_60", + ), ] - elif '60' in parcellation_name: + elif "60" in parcellation_name: comp = [ - ('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot', - 'regenerated_rh_60', 'myaparc_60'), - ('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot', - 'regenerated_lh_60', 'myaparc_60'), + ( + "rh", + "myatlas_60_rh.gcs", + "rh.myaparc_60.annot", + "regenerated_rh_60", + "myaparc_60", + ), + ( + "lh", + "myatlas_60_lh.gcs", + "lh.myaparc_60.annot", + "regenerated_lh_60", + "myaparc_60", + ), ] - elif '125' in parcellation_name: + elif "125" in parcellation_name: comp = [ - ('rh', 'myatlas_125_rh.gcs', 'rh.myaparc_125.annot', - 'regenerated_rh_125', 'myaparc_125'), - ('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot', - 'regenerated_rh_60', 'myaparc_60'), - ('lh', 'myatlas_125_lh.gcs', 'lh.myaparc_125.annot', - 'regenerated_lh_125', 'myaparc_125'), - ('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot', - 'regenerated_lh_60', 'myaparc_60'), + ( + "rh", + "myatlas_125_rh.gcs", + "rh.myaparc_125.annot", + "regenerated_rh_125", + "myaparc_125", + ), + ( + "rh", + "myatlas_60_rh.gcs", + "rh.myaparc_60.annot", + "regenerated_rh_60", + "myaparc_60", + ), + ( + "lh", + "myatlas_125_lh.gcs", + "lh.myaparc_125.annot", + "regenerated_lh_125", + "myaparc_125", + ), + ( + "lh", + "myatlas_60_lh.gcs", + "lh.myaparc_60.annot", + "regenerated_lh_60", + "myaparc_60", + ), ] - elif '250' in parcellation_name: + elif "250" in parcellation_name: comp = [ - ('rh', 'myatlas_250_rh.gcs', 'rh.myaparc_250.annot', - 'regenerated_rh_250', 'myaparc_250'), - ('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot', - 'regenerated_rh_60', 'myaparc_60'), - ('lh', 'myatlas_250_lh.gcs', 'lh.myaparc_250.annot', - 'regenerated_lh_250', 'myaparc_250'), - ('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot', - 'regenerated_lh_60', 'myaparc_60'), + ( + "rh", + "myatlas_250_rh.gcs", + "rh.myaparc_250.annot", + "regenerated_rh_250", + "myaparc_250", + ), + ( + "rh", + "myatlas_60_rh.gcs", + "rh.myaparc_60.annot", + "regenerated_rh_60", + "myaparc_60", + ), + ( + "lh", + "myatlas_250_lh.gcs", + "lh.myaparc_250.annot", + "regenerated_lh_250", + "myaparc_250", + ), + ( + "lh", + "myatlas_60_lh.gcs", + "lh.myaparc_60.annot", + "regenerated_lh_60", + "myaparc_60", + ), ] else: comp = [ - ('rh', 'myatlas_36_rh.gcs', 'rh.myaparc_36.annot', - 'regenerated_rh_36', 'myaparc_36'), - ('rh', 'myatlasP1_16_rh.gcs', 'rh.myaparcP1_16.annot', - 'regenerated_rh_500', 'myaparcP1_16'), - ('rh', 'myatlasP17_28_rh.gcs', 'rh.myaparcP17_28.annot', - 'regenerated_rh_500', 'myaparcP17_28'), - ('rh', 'myatlasP29_36_rh.gcs', 'rh.myaparcP29_36.annot', - 'regenerated_rh_500', 'myaparcP29_36'), - ('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot', - 'regenerated_rh_60', 'myaparc_60'), - ('rh', 'myatlas_125_rh.gcs', 'rh.myaparc_125.annot', - 'regenerated_rh_125', 'myaparc_125'), - ('rh', 'myatlas_250_rh.gcs', 'rh.myaparc_250.annot', - 'regenerated_rh_250', 'myaparc_250'), - ('lh', 'myatlas_36_lh.gcs', 'lh.myaparc_36.annot', - 'regenerated_lh_36', 'myaparc_36'), - ('lh', 'myatlasP1_16_lh.gcs', 'lh.myaparcP1_16.annot', - 'regenerated_lh_500', 'myaparcP1_16'), - ('lh', 'myatlasP17_28_lh.gcs', 'lh.myaparcP17_28.annot', - 'regenerated_lh_500', 'myaparcP17_28'), - ('lh', 'myatlasP29_36_lh.gcs', 'lh.myaparcP29_36.annot', - 'regenerated_lh_500', 'myaparcP29_36'), - ('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot', - 'regenerated_lh_60', 'myaparc_60'), - ('lh', 'myatlas_125_lh.gcs', 'lh.myaparc_125.annot', - 'regenerated_lh_125', 'myaparc_125'), - ('lh', 'myatlas_250_lh.gcs', 'lh.myaparc_250.annot', - 'regenerated_lh_250', 'myaparc_250'), + ( + "rh", + "myatlas_36_rh.gcs", + "rh.myaparc_36.annot", + "regenerated_rh_36", + "myaparc_36", + ), + ( + "rh", + "myatlasP1_16_rh.gcs", + "rh.myaparcP1_16.annot", + "regenerated_rh_500", + "myaparcP1_16", + ), + ( + "rh", + "myatlasP17_28_rh.gcs", + "rh.myaparcP17_28.annot", + "regenerated_rh_500", + "myaparcP17_28", + ), + ( + "rh", + "myatlasP29_36_rh.gcs", + "rh.myaparcP29_36.annot", + "regenerated_rh_500", + "myaparcP29_36", + ), + ( + "rh", + "myatlas_60_rh.gcs", + "rh.myaparc_60.annot", + "regenerated_rh_60", + "myaparc_60", + ), + ( + "rh", + "myatlas_125_rh.gcs", + "rh.myaparc_125.annot", + "regenerated_rh_125", + "myaparc_125", + ), + ( + "rh", + "myatlas_250_rh.gcs", + "rh.myaparc_250.annot", + "regenerated_rh_250", + "myaparc_250", + ), + ( + "lh", + "myatlas_36_lh.gcs", + "lh.myaparc_36.annot", + "regenerated_lh_36", + "myaparc_36", + ), + ( + "lh", + "myatlasP1_16_lh.gcs", + "lh.myaparcP1_16.annot", + "regenerated_lh_500", + "myaparcP1_16", + ), + ( + "lh", + "myatlasP17_28_lh.gcs", + "lh.myaparcP17_28.annot", + "regenerated_lh_500", + "myaparcP17_28", + ), + ( + "lh", + "myatlasP29_36_lh.gcs", + "lh.myaparcP29_36.annot", + "regenerated_lh_500", + "myaparcP29_36", + ), + ( + "lh", + "myatlas_60_lh.gcs", + "lh.myaparc_60.annot", + "regenerated_lh_60", + "myaparc_60", + ), + ( + "lh", + "myatlas_125_lh.gcs", + "lh.myaparc_125.annot", + "regenerated_lh_125", + "myaparc_125", + ), + ( + "lh", + "myatlas_250_lh.gcs", + "lh.myaparc_250.annot", + "regenerated_lh_250", + "myaparc_250", + ), ] log = cmp_config.get_logger() for out in comp: mris_cmd = 'mris_ca_label %s %s "%s/surf/%s.sphere.reg" "%s" "%s" ' % ( - subject_id, out[0], op.join(subjects_dir, subject_id), out[0], + subject_id, + out[0], + op.join(subjects_dir, subject_id), + out[0], cmp_config.get_lausanne_atlas(out[1]), - op.join(fs_label_dir, out[2])) + op.join(fs_label_dir, out[2]), + ) runCmd(mris_cmd, log) - iflogger.info('-----------') + iflogger.info("-----------") annot = '--annotation "%s"' % out[4] mri_an_cmd = 'mri_annotation2label --subject %s --hemi %s --outdir "%s" %s' % ( - subject_id, out[0], op.join(output_dir, out[3]), annot) + subject_id, + out[0], + op.join(output_dir, out[3]), + annot, + ) iflogger.info(mri_an_cmd) runCmd(mri_an_cmd, log) - iflogger.info('-----------') - iflogger.info(os.environ['SUBJECTS_DIR']) + iflogger.info("-----------") + iflogger.info(os.environ["SUBJECTS_DIR"]) # extract cc and unknown to add to tractography mask, we do not want this as a region of interest # in FS 5.0, unknown and corpuscallosum are not available for the 35 scale (why?), # but for the other scales only, take the ones from _60 - rhun = op.join(output_dir, 'rh.unknown.label') - lhun = op.join(output_dir, 'lh.unknown.label') - rhco = op.join(output_dir, 'rh.corpuscallosum.label') - lhco = op.join(output_dir, 'lh.corpuscallosum.label') - shutil.copy( - op.join(output_dir, 'regenerated_rh_60', 'rh.unknown.label'), rhun) - shutil.copy( - op.join(output_dir, 'regenerated_lh_60', 'lh.unknown.label'), lhun) + rhun = op.join(output_dir, "rh.unknown.label") + lhun = op.join(output_dir, "lh.unknown.label") + rhco = op.join(output_dir, "rh.corpuscallosum.label") + lhco = op.join(output_dir, "lh.corpuscallosum.label") + shutil.copy(op.join(output_dir, "regenerated_rh_60", "rh.unknown.label"), rhun) + shutil.copy(op.join(output_dir, "regenerated_lh_60", "lh.unknown.label"), lhun) shutil.copy( - op.join(output_dir, 'regenerated_rh_60', 'rh.corpuscallosum.label'), - rhco) + op.join(output_dir, "regenerated_rh_60", "rh.corpuscallosum.label"), rhco + ) shutil.copy( - op.join(output_dir, 'regenerated_lh_60', 'lh.corpuscallosum.label'), - lhco) + op.join(output_dir, "regenerated_lh_60", "lh.corpuscallosum.label"), lhco + ) - mri_cmd = """mri_label2vol --label "%s" --label "%s" --label "%s" --label "%s" --temp "%s" --o "%s" --identity """ % ( - rhun, lhun, rhco, lhco, - op.join(op.join(subjects_dir, subject_id), 'mri', 'orig.mgz'), - op.join(fs_label_dir, 'cc_unknown.nii.gz')) + mri_cmd = ( + """mri_label2vol --label "%s" --label "%s" --label "%s" --label "%s" --temp "%s" --o "%s" --identity """ + % ( + rhun, + lhun, + rhco, + lhco, + op.join(op.join(subjects_dir, subject_id), "mri", "orig.mgz"), + op.join(fs_label_dir, "cc_unknown.nii.gz"), + ) + ) runCmd(mri_cmd, log) - runCmd('mris_volmask %s' % subject_id, log) + runCmd("mris_volmask %s" % subject_id, log) mri_cmd = 'mri_convert -i "%s/mri/ribbon.mgz" -o "%s/mri/ribbon.nii.gz"' % ( - op.join(subjects_dir, subject_id), op.join(subjects_dir, subject_id)) + op.join(subjects_dir, subject_id), + op.join(subjects_dir, subject_id), + ) runCmd(mri_cmd, log) mri_cmd = 'mri_convert -i "%s/mri/aseg.mgz" -o "%s/mri/aseg.nii.gz"' % ( - op.join(subjects_dir, subject_id), op.join(subjects_dir, subject_id)) + op.join(subjects_dir, subject_id), + op.join(subjects_dir, subject_id), + ) runCmd(mri_cmd, log) iflogger.info("[ DONE ]") @@ -165,16 +335,16 @@ def create_roi(subject_id, subjects_dir, fs_dir, parcellation_name, dilation): from networks. Iteratively create volume. """ import cmp from cmp.util import runCmd + iflogger.info("Create the ROIs:") output_dir = op.abspath(op.curdir) fs_dir = op.join(subjects_dir, subject_id) cmp_config = cmp.configuration.PipelineConfiguration() cmp_config.parcellation_scheme = "Lausanne2008" log = cmp_config.get_logger() - parval = cmp_config._get_lausanne_parcellation('Lausanne2008')[ - parcellation_name] - pgpath = parval['node_information_graphml'] - aseg = nb.load(op.join(fs_dir, 'mri', 'aseg.nii.gz')) + parval = cmp_config._get_lausanne_parcellation("Lausanne2008")[parcellation_name] + pgpath = parval["node_information_graphml"] + aseg = nb.load(op.join(fs_dir, "mri", "aseg.nii.gz")) asegd = aseg.get_data() # identify cortical voxels, right (3) and left (42) hemispheres @@ -189,7 +359,7 @@ def create_roi(subject_id, subjects_dir, fs_dir, parcellation_name, dilation): shape = (25, 25, 25) center = np.array(shape) // 2 # dist: distances from the center of the neighbourhood - dist = np.zeros(shape, dtype='float32') + dist = np.zeros(shape, dtype="float32") for x in range(shape[0]): for y in range(shape[1]): for z in range(shape[2]): @@ -198,8 +368,8 @@ def create_roi(subject_id, subjects_dir, fs_dir, parcellation_name, dilation): iflogger.info("Working on parcellation: ") iflogger.info( - cmp_config._get_lausanne_parcellation('Lausanne2008')[ - parcellation_name]) + cmp_config._get_lausanne_parcellation("Lausanne2008")[parcellation_name] + ) iflogger.info("========================") pg = nx.read_graphml(pgpath) # each node represents a brain region @@ -211,52 +381,53 @@ def create_roi(subject_id, subjects_dir, fs_dir, parcellation_name, dilation): count = count + 1 iflogger.info(brv) iflogger.info(brk) - if brv['dn_hemisphere'] == 'left': - hemi = 'lh' - elif brv['dn_hemisphere'] == 'right': - hemi = 'rh' - if brv['dn_region'] == 'subcortical': + if brv["dn_hemisphere"] == "left": + hemi = "lh" + elif brv["dn_hemisphere"] == "right": + hemi = "rh" + if brv["dn_region"] == "subcortical": iflogger.info(brv) - iflogger.info('---------------------') - iflogger.info('Work on brain region: %s', brv['dn_region']) - iflogger.info('Freesurfer Name: %s', brv['dn_fsname']) - iflogger.info('Region %s of %s', count, pg.number_of_nodes()) - iflogger.info('---------------------') + iflogger.info("---------------------") + iflogger.info("Work on brain region: %s", brv["dn_region"]) + iflogger.info("Freesurfer Name: %s", brv["dn_fsname"]) + iflogger.info("Region %s of %s", count, pg.number_of_nodes()) + iflogger.info("---------------------") # if it is subcortical, retrieve roi from aseg - idx = np.where(asegd == int(brv['dn_fs_aseg_val'])) - rois[idx] = int(brv['dn_correspondence_id']) + idx = np.where(asegd == int(brv["dn_fs_aseg_val"])) + rois[idx] = int(brv["dn_correspondence_id"]) - elif brv['dn_region'] == 'cortical': + elif brv["dn_region"] == "cortical": iflogger.info(brv) - iflogger.info('---------------------') - iflogger.info('Work on brain region: %s', brv['dn_region']) - iflogger.info('Freesurfer Name: %s', brv['dn_fsname']) - iflogger.info('Region %s of %s', count, pg.number_of_nodes()) - iflogger.info('---------------------') - - labelpath = op.join(output_dir, - parval['fs_label_subdir_name'] % hemi) + iflogger.info("---------------------") + iflogger.info("Work on brain region: %s", brv["dn_region"]) + iflogger.info("Freesurfer Name: %s", brv["dn_fsname"]) + iflogger.info("Region %s of %s", count, pg.number_of_nodes()) + iflogger.info("---------------------") + + labelpath = op.join(output_dir, parval["fs_label_subdir_name"] % hemi) # construct .label file name - fname = '%s.%s.label' % (hemi, brv['dn_fsname']) + fname = "%s.%s.label" % (hemi, brv["dn_fsname"]) # execute fs mri_label2vol to generate volume roi from the label file # store it in temporary file to be overwritten for each region mri_cmd = 'mri_label2vol --label "%s" --temp "%s" --o "%s" --identity' % ( - op.join(labelpath, fname), op.join(fs_dir, 'mri', 'orig.mgz'), - op.join(output_dir, 'tmp.nii.gz')) + op.join(labelpath, fname), + op.join(fs_dir, "mri", "orig.mgz"), + op.join(output_dir, "tmp.nii.gz"), + ) runCmd(mri_cmd, log) - tmp = nb.load(op.join(output_dir, 'tmp.nii.gz')) + tmp = nb.load(op.join(output_dir, "tmp.nii.gz")) tmpd = tmp.get_data() # find voxel and set them to intensityvalue in rois idx = np.where(tmpd == 1) - rois[idx] = int(brv['dn_correspondence_id']) + rois[idx] = int(brv["dn_correspondence_id"]) # store volume eg in ROI_scale33.nii.gz - out_roi = op.abspath('ROI_%s.nii.gz' % parcellation_name) + out_roi = op.abspath("ROI_%s.nii.gz" % parcellation_name) # update the header hdr = aseg.header @@ -274,22 +445,20 @@ def create_roi(subject_id, subjects_dir, fs_dir, parcellation_name, dilation): # loop throughout all the voxels belonging to the aseg GM volume for j in range(xx.size): if rois[xx[j], yy[j], zz[j]] == 0: - local = extract( - rois, shape, position=(xx[j], yy[j], zz[j]), fill=0) + local = extract(rois, shape, position=(xx[j], yy[j], zz[j]), fill=0) mask = local.copy() mask[np.nonzero(local > 0)] = 1 thisdist = np.multiply(dist, mask) thisdist[np.nonzero(thisdist == 0)] = np.amax(thisdist) - value = np.int_( - local[np.nonzero(thisdist == np.amin(thisdist))]) + value = np.int_(local[np.nonzero(thisdist == np.amin(thisdist))]) if value.size > 1: counts = np.bincount(value) value = np.argmax(counts) rois[xx[j], yy[j], zz[j]] = value # store volume eg in ROIv_scale33.nii.gz - out_roi = op.abspath('ROIv_%s.nii.gz' % parcellation_name) - iflogger.info('Save output image to %s', out_roi) + out_roi = op.abspath("ROIv_%s.nii.gz" % parcellation_name) + iflogger.info("Save output image to %s", out_roi) img = nb.Nifti1Image(rois, aseg.affine, hdr2) nb.save(img, out_roi) @@ -299,14 +468,16 @@ def create_roi(subject_id, subjects_dir, fs_dir, parcellation_name, dilation): def create_wm_mask(subject_id, subjects_dir, fs_dir, parcellation_name): import cmp import scipy.ndimage.morphology as nd + iflogger.info("Create white matter mask") fs_dir = op.join(subjects_dir, subject_id) cmp_config = cmp.configuration.PipelineConfiguration() cmp_config.parcellation_scheme = "Lausanne2008" - pgpath = cmp_config._get_lausanne_parcellation('Lausanne2008')[ - parcellation_name]['node_information_graphml'] + pgpath = cmp_config._get_lausanne_parcellation("Lausanne2008")[parcellation_name][ + "node_information_graphml" + ] # load ribbon as basis for white matter mask - fsmask = nb.load(op.join(fs_dir, 'mri', 'ribbon.nii.gz')) + fsmask = nb.load(op.join(fs_dir, "mri", "ribbon.nii.gz")) fsmaskd = fsmask.get_data() wmmask = np.zeros(fsmaskd.shape) @@ -318,7 +489,7 @@ def create_wm_mask(subject_id, subjects_dir, fs_dir, parcellation_name): wmmask[idx_rh] = 1 # remove subcortical nuclei from white matter mask - aseg = nb.load(op.join(fs_dir, 'mri', 'aseg.nii.gz')) + aseg = nb.load(op.join(fs_dir, "mri", "aseg.nii.gz")) asegd = aseg.get_data() # need binary erosion function @@ -340,21 +511,36 @@ def create_wm_mask(subject_id, subjects_dir, fs_dir, parcellation_name): # lateral ventricles, thalamus proper and caudate # the latter two removed for better erosion, but put back afterwards - idx = np.where((asegd == 4) | (asegd == 43) | (asegd == 11) | (asegd == 50) - | (asegd == 31) | (asegd == 63) | (asegd == 10) - | (asegd == 49)) + idx = np.where( + (asegd == 4) + | (asegd == 43) + | (asegd == 11) + | (asegd == 50) + | (asegd == 31) + | (asegd == 63) + | (asegd == 10) + | (asegd == 49) + ) csfA[idx] = 1 csfA = imerode(imerode(csfA, se1), se) # thalmus proper and cuadate are put back because they are not lateral ventricles - idx = np.where((asegd == 11) | (asegd == 50) | (asegd == 10) - | (asegd == 49)) + idx = np.where((asegd == 11) | (asegd == 50) | (asegd == 10) | (asegd == 49)) csfA[idx] = 0 # REST CSF, IE 3RD AND 4TH VENTRICULE AND EXTRACEREBRAL CSF - idx = np.where((asegd == 5) | (asegd == 14) | (asegd == 15) | (asegd == 24) - | (asegd == 44) | (asegd == 72) | (asegd == 75) - | (asegd == 76) | (asegd == 213) | (asegd == 221)) + idx = np.where( + (asegd == 5) + | (asegd == 14) + | (asegd == 15) + | (asegd == 24) + | (asegd == 44) + | (asegd == 72) + | (asegd == 75) + | (asegd == 76) + | (asegd == 213) + | (asegd == 221) + ) # 43 ??, 4?? 213?, 221? # more to discuss. for i in [5, 14, 15, 24, 44, 72, 75, 76, 213, 221]: @@ -389,75 +575,88 @@ def create_wm_mask(subject_id, subjects_dir, fs_dir, parcellation_name): remaining[idx] = 1 # now remove all the structures from the white matter - idx = np.where((csfA != 0) | (csfB != 0) | (gr_ncl != 0) - | (remaining != 0)) + idx = np.where((csfA != 0) | (csfB != 0) | (gr_ncl != 0) | (remaining != 0)) wmmask[idx] = 0 iflogger.info( "Removing lateral ventricles and eroded grey nuclei and brainstem from white matter mask" ) # ADD voxels from 'cc_unknown.nii.gz' dataset - ccun = nb.load(op.join(fs_dir, 'label', 'cc_unknown.nii.gz')) + ccun = nb.load(op.join(fs_dir, "label", "cc_unknown.nii.gz")) ccund = ccun.get_data() idx = np.where(ccund != 0) iflogger.info("Add corpus callosum and unknown to wm mask") wmmask[idx] = 1 # check if we should subtract the cortical rois from this parcellation - iflogger.info('Loading ROI_%s.nii.gz to subtract cortical ROIs from white ' - 'matter mask', parcellation_name) - roi = nb.load(op.join(op.curdir, 'ROI_%s.nii.gz' % parcellation_name)) + iflogger.info( + "Loading ROI_%s.nii.gz to subtract cortical ROIs from white " "matter mask", + parcellation_name, + ) + roi = nb.load(op.join(op.curdir, "ROI_%s.nii.gz" % parcellation_name)) roid = roi.get_data() assert roid.shape[0] == wmmask.shape[0] pg = nx.read_graphml(pgpath) for brk, brv in pg.nodes(data=True): - if brv['dn_region'] == 'cortical': - iflogger.info('Subtracting region %s with intensity value %s', - brv['dn_region'], brv['dn_correspondence_id']) - idx = np.where(roid == int(brv['dn_correspondence_id'])) + if brv["dn_region"] == "cortical": + iflogger.info( + "Subtracting region %s with intensity value %s", + brv["dn_region"], + brv["dn_correspondence_id"], + ) + idx = np.where(roid == int(brv["dn_correspondence_id"])) wmmask[idx] = 0 # output white matter mask. crop and move it afterwards - wm_out = op.join(fs_dir, 'mri', 'fsmask_1mm.nii.gz') + wm_out = op.join(fs_dir, "mri", "fsmask_1mm.nii.gz") img = nb.Nifti1Image(wmmask, fsmask.affine, fsmask.header) - iflogger.info('Save white matter mask: %s', wm_out) + iflogger.info("Save white matter mask: %s", wm_out) nb.save(img, wm_out) -def crop_and_move_datasets(subject_id, subjects_dir, fs_dir, parcellation_name, - out_roi_file, dilation): +def crop_and_move_datasets( + subject_id, subjects_dir, fs_dir, parcellation_name, out_roi_file, dilation +): from cmp.util import runCmd + fs_dir = op.join(subjects_dir, subject_id) cmp_config = cmp.configuration.PipelineConfiguration() cmp_config.parcellation_scheme = "Lausanne2008" log = cmp_config.get_logger() output_dir = op.abspath(op.curdir) - iflogger.info('Cropping and moving datasets to %s', output_dir) - ds = [(op.join(fs_dir, 'mri', 'aseg.nii.gz'), - op.abspath('aseg.nii.gz')), (op.join(fs_dir, 'mri', - 'ribbon.nii.gz'), - op.abspath('ribbon.nii.gz')), - (op.join(fs_dir, 'mri', 'fsmask_1mm.nii.gz'), - op.abspath('fsmask_1mm.nii.gz')), (op.join(fs_dir, 'label', - 'cc_unknown.nii.gz'), - op.abspath('cc_unknown.nii.gz'))] - - ds.append((op.abspath('ROI_%s.nii.gz' % parcellation_name), - op.abspath('ROI_HR_th.nii.gz'))) + iflogger.info("Cropping and moving datasets to %s", output_dir) + ds = [ + (op.join(fs_dir, "mri", "aseg.nii.gz"), op.abspath("aseg.nii.gz")), + (op.join(fs_dir, "mri", "ribbon.nii.gz"), op.abspath("ribbon.nii.gz")), + (op.join(fs_dir, "mri", "fsmask_1mm.nii.gz"), op.abspath("fsmask_1mm.nii.gz")), + ( + op.join(fs_dir, "label", "cc_unknown.nii.gz"), + op.abspath("cc_unknown.nii.gz"), + ), + ] + + ds.append( + ( + op.abspath("ROI_%s.nii.gz" % parcellation_name), + op.abspath("ROI_HR_th.nii.gz"), + ) + ) if dilation is True: - ds.append((op.abspath('ROIv_%s.nii.gz' % parcellation_name), - op.abspath('ROIv_HR_th.nii.gz'))) - orig = op.join(fs_dir, 'mri', 'orig', '001.mgz') + ds.append( + ( + op.abspath("ROIv_%s.nii.gz" % parcellation_name), + op.abspath("ROIv_HR_th.nii.gz"), + ) + ) + orig = op.join(fs_dir, "mri", "orig", "001.mgz") for d in ds: - iflogger.info('Processing %s:', d[0]) + iflogger.info("Processing %s:", d[0]) if not op.exists(d[0]): - raise Exception('File %s does not exist.' % d[0]) + raise Exception("File %s does not exist." % d[0]) # reslice to original volume because the roi creation with freesurfer # changed to 256x256x256 resolution - mri_cmd = 'mri_convert -rl "%s" -rt nearest "%s" -nc "%s"' % (orig, - d[0], - d[1]) + mri_cmd = 'mri_convert -rl "%s" -rt nearest "%s" -nc "%s"' % (orig, d[0], d[1]) runCmd(mri_cmd, log) @@ -473,68 +672,71 @@ def extract(Z, shape, position, fill): ------- R: the neighbourhood of the specified point in Z """ - R = np.ones(shape, dtype=Z.dtype) * \ - fill # initialize output block to the fill value - P = np.array(list(position)).astype( - int) # position coordinates(numpy array) - Rs = np.array(list(R.shape)).astype( - int) # output block dimensions (numpy array) - Zs = np.array(list(Z.shape)).astype( - int) # original volume dimensions (numpy array) + R = ( + np.ones(shape, dtype=Z.dtype) * fill + ) # initialize output block to the fill value + P = np.array(list(position)).astype(int) # position coordinates(numpy array) + Rs = np.array(list(R.shape)).astype(int) # output block dimensions (numpy array) + Zs = np.array(list(Z.shape)).astype(int) # original volume dimensions (numpy array) R_start = np.zeros(len(shape)).astype(int) R_stop = np.array(list(shape)).astype(int) - Z_start = (P - Rs // 2) + Z_start = P - Rs // 2 Z_start_cor = (np.maximum(Z_start, 0)).tolist() # handle borders R_start = R_start + (Z_start_cor - Z_start) Z_stop = (P + Rs // 2) + Rs % 2 Z_stop_cor = (np.minimum(Z_stop, Zs)).tolist() # handle borders R_stop = R_stop - (Z_stop - Z_stop_cor) - R[R_start[0]:R_stop[0], R_start[1]:R_stop[1], R_start[2]:R_stop[ - 2]] = Z[Z_start_cor[0]:Z_stop_cor[0], Z_start_cor[1]:Z_stop_cor[1], - Z_start_cor[2]:Z_stop_cor[2]] + R[R_start[0] : R_stop[0], R_start[1] : R_stop[1], R_start[2] : R_stop[2]] = Z[ + Z_start_cor[0] : Z_stop_cor[0], + Z_start_cor[1] : Z_stop_cor[1], + Z_start_cor[2] : Z_stop_cor[2], + ] return R class ParcellateInputSpec(BaseInterfaceInputSpec): - subject_id = traits.String(mandatory=True, desc='Subject ID') + subject_id = traits.String(mandatory=True, desc="Subject ID") parcellation_name = traits.Enum( - 'scale500', ['scale33', 'scale60', 'scale125', 'scale250', 'scale500'], - usedefault=True) - freesurfer_dir = Directory(exists=True, desc='Freesurfer main directory') - subjects_dir = Directory(exists=True, desc='Freesurfer subjects directory') + "scale500", + ["scale33", "scale60", "scale125", "scale250", "scale500"], + usedefault=True, + ) + freesurfer_dir = Directory(exists=True, desc="Freesurfer main directory") + subjects_dir = Directory(exists=True, desc="Freesurfer subjects directory") out_roi_file = File( - genfile=True, desc='Region of Interest file for connectivity mapping') + genfile=True, desc="Region of Interest file for connectivity mapping" + ) dilation = traits.Bool( False, usedefault=True, - desc='Dilate cortical parcels? Useful for fMRI connectivity') + desc="Dilate cortical parcels? Useful for fMRI connectivity", + ) class ParcellateOutputSpec(TraitedSpec): roi_file = File( - exists=True, desc='Region of Interest file for connectivity mapping') - roiv_file = File( - desc='Region of Interest file for fMRI connectivity mapping') - white_matter_mask_file = File(exists=True, desc='White matter mask file') + exists=True, desc="Region of Interest file for connectivity mapping" + ) + roiv_file = File(desc="Region of Interest file for fMRI connectivity mapping") + white_matter_mask_file = File(exists=True, desc="White matter mask file") cc_unknown_file = File( - desc='Image file with regions labelled as unknown cortical structures', - exists=True) - ribbon_file = File( - desc='Image file detailing the cortical ribbon', exists=True) + desc="Image file with regions labelled as unknown cortical structures", + exists=True, + ) + ribbon_file = File(desc="Image file detailing the cortical ribbon", exists=True) aseg_file = File( - desc= - 'Automated segmentation file converted from Freesurfer "subjects" directory', - exists=True) + desc='Automated segmentation file converted from Freesurfer "subjects" directory', + exists=True, + ) roi_file_in_structural_space = File( - desc= - 'ROI image resliced to the dimensions of the original structural image', - exists=True) + desc="ROI image resliced to the dimensions of the original structural image", + exists=True, + ) dilated_roi_file_in_structural_space = File( - desc= - 'dilated ROI image resliced to the dimensions of the original structural image' + desc="dilated ROI image resliced to the dimensions of the original structural image" ) @@ -560,53 +762,66 @@ class Parcellate(LibraryBaseInterface): input_spec = ParcellateInputSpec output_spec = ParcellateOutputSpec - _pkg = 'cmp' - imports = ('scipy', ) + _pkg = "cmp" + imports = ("scipy",) def _run_interface(self, runtime): if self.inputs.subjects_dir: - os.environ.update({'SUBJECTS_DIR': self.inputs.subjects_dir}) + os.environ.update({"SUBJECTS_DIR": self.inputs.subjects_dir}) if not os.path.exists( - op.join(self.inputs.subjects_dir, self.inputs.subject_id)): + op.join(self.inputs.subjects_dir, self.inputs.subject_id) + ): raise Exception iflogger.info("ROI_HR_th.nii.gz / fsmask_1mm.nii.gz CREATION") iflogger.info("=============================================") - create_annot_label(self.inputs.subject_id, self.inputs.subjects_dir, - self.inputs.freesurfer_dir, - self.inputs.parcellation_name) - create_roi(self.inputs.subject_id, self.inputs.subjects_dir, - self.inputs.freesurfer_dir, self.inputs.parcellation_name, - self.inputs.dilation) - create_wm_mask(self.inputs.subject_id, self.inputs.subjects_dir, - self.inputs.freesurfer_dir, - self.inputs.parcellation_name) + create_annot_label( + self.inputs.subject_id, + self.inputs.subjects_dir, + self.inputs.freesurfer_dir, + self.inputs.parcellation_name, + ) + create_roi( + self.inputs.subject_id, + self.inputs.subjects_dir, + self.inputs.freesurfer_dir, + self.inputs.parcellation_name, + self.inputs.dilation, + ) + create_wm_mask( + self.inputs.subject_id, + self.inputs.subjects_dir, + self.inputs.freesurfer_dir, + self.inputs.parcellation_name, + ) crop_and_move_datasets( - self.inputs.subject_id, self.inputs.subjects_dir, - self.inputs.freesurfer_dir, self.inputs.parcellation_name, - self.inputs.out_roi_file, self.inputs.dilation) + self.inputs.subject_id, + self.inputs.subjects_dir, + self.inputs.freesurfer_dir, + self.inputs.parcellation_name, + self.inputs.out_roi_file, + self.inputs.dilation, + ) return runtime def _list_outputs(self): outputs = self._outputs().get() if isdefined(self.inputs.out_roi_file): - outputs['roi_file'] = op.abspath(self.inputs.out_roi_file) + outputs["roi_file"] = op.abspath(self.inputs.out_roi_file) else: - outputs['roi_file'] = op.abspath( - self._gen_outfilename('nii.gz', 'ROI')) + outputs["roi_file"] = op.abspath(self._gen_outfilename("nii.gz", "ROI")) if self.inputs.dilation is True: - outputs['roiv_file'] = op.abspath( - self._gen_outfilename('nii.gz', 'ROIv')) - outputs['white_matter_mask_file'] = op.abspath('fsmask_1mm.nii.gz') - outputs['cc_unknown_file'] = op.abspath('cc_unknown.nii.gz') - outputs['ribbon_file'] = op.abspath('ribbon.nii.gz') - outputs['aseg_file'] = op.abspath('aseg.nii.gz') - outputs['roi_file_in_structural_space'] = op.abspath( - 'ROI_HR_th.nii.gz') + outputs["roiv_file"] = op.abspath(self._gen_outfilename("nii.gz", "ROIv")) + outputs["white_matter_mask_file"] = op.abspath("fsmask_1mm.nii.gz") + outputs["cc_unknown_file"] = op.abspath("cc_unknown.nii.gz") + outputs["ribbon_file"] = op.abspath("ribbon.nii.gz") + outputs["aseg_file"] = op.abspath("aseg.nii.gz") + outputs["roi_file_in_structural_space"] = op.abspath("ROI_HR_th.nii.gz") if self.inputs.dilation is True: - outputs['dilated_roi_file_in_structural_space'] = op.abspath( - 'ROIv_HR_th.nii.gz') + outputs["dilated_roi_file_in_structural_space"] = op.abspath( + "ROIv_HR_th.nii.gz" + ) return outputs - def _gen_outfilename(self, ext, prefix='ROI'): - return prefix + '_' + self.inputs.parcellation_name + '.' + ext + def _gen_outfilename(self, ext, prefix="ROI"): + return prefix + "_" + self.inputs.parcellation_name + "." + ext diff --git a/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py index 5ed036dabd..41d3f6ecce 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py +++ b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py @@ -4,21 +4,23 @@ def test_AverageNetworks_inputs(): input_map = dict( - group_id=dict(usedefault=True, ), - in_files=dict(mandatory=True, ), - out_gexf_groupavg=dict(extensions=None, ), - out_gpickled_groupavg=dict(extensions=None, ), - resolution_network_file=dict(extensions=None, ), + group_id=dict(usedefault=True,), + in_files=dict(mandatory=True,), + out_gexf_groupavg=dict(extensions=None,), + out_gpickled_groupavg=dict(extensions=None,), + resolution_network_file=dict(extensions=None,), ) inputs = AverageNetworks.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AverageNetworks_outputs(): output_map = dict( - gexf_groupavg=dict(extensions=None, ), - gpickled_groupavg=dict(extensions=None, ), + gexf_groupavg=dict(extensions=None,), + gpickled_groupavg=dict(extensions=None,), matlab_groupavgs=dict(), ) outputs = AverageNetworks.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py index 7a3f837709..43240defab 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py @@ -6,7 +6,7 @@ def test_CFFConverter_inputs(): input_map = dict( creator=dict(), data_files=dict(), - description=dict(usedefault=True, ), + description=dict(usedefault=True,), email=dict(), gifti_labels=dict(), gifti_surfaces=dict(), @@ -14,16 +14,13 @@ def test_CFFConverter_inputs(): graphml_networks=dict(), license=dict(), nifti_volumes=dict(), - out_file=dict( - extensions=None, - usedefault=True, - ), + out_file=dict(extensions=None, usedefault=True,), publisher=dict(), references=dict(), relation=dict(), rights=dict(), script_files=dict(), - species=dict(usedefault=True, ), + species=dict(usedefault=True,), timeseries_files=dict(), title=dict(), tract_files=dict(), @@ -33,8 +30,10 @@ def test_CFFConverter_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CFFConverter_outputs(): - output_map = dict(connectome_file=dict(extensions=None, ), ) + output_map = dict(connectome_file=dict(extensions=None,),) outputs = CFFConverter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py b/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py index 4939b9301a..3e68292557 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py @@ -4,73 +4,45 @@ def test_CreateMatrix_inputs(): input_map = dict( - count_region_intersections=dict(usedefault=True, ), - out_endpoint_array_name=dict( - extensions=None, - genfile=True, - ), - out_fiber_length_std_matrix_mat_file=dict( - extensions=None, - genfile=True, - ), - out_intersection_matrix_mat_file=dict( - extensions=None, - genfile=True, - ), - out_matrix_file=dict( - extensions=None, - genfile=True, - ), - out_matrix_mat_file=dict( - extensions=None, - usedefault=True, - ), - out_mean_fiber_length_matrix_mat_file=dict( - extensions=None, - genfile=True, - ), - out_median_fiber_length_matrix_mat_file=dict( - extensions=None, - genfile=True, - ), - resolution_network_file=dict( - extensions=None, - mandatory=True, - ), - roi_file=dict( - extensions=None, - mandatory=True, - ), - tract_file=dict( - extensions=None, - mandatory=True, - ), + count_region_intersections=dict(usedefault=True,), + out_endpoint_array_name=dict(extensions=None, genfile=True,), + out_fiber_length_std_matrix_mat_file=dict(extensions=None, genfile=True,), + out_intersection_matrix_mat_file=dict(extensions=None, genfile=True,), + out_matrix_file=dict(extensions=None, genfile=True,), + out_matrix_mat_file=dict(extensions=None, usedefault=True,), + out_mean_fiber_length_matrix_mat_file=dict(extensions=None, genfile=True,), + out_median_fiber_length_matrix_mat_file=dict(extensions=None, genfile=True,), + resolution_network_file=dict(extensions=None, mandatory=True,), + roi_file=dict(extensions=None, mandatory=True,), + tract_file=dict(extensions=None, mandatory=True,), ) inputs = CreateMatrix.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CreateMatrix_outputs(): output_map = dict( - endpoint_file=dict(extensions=None, ), - endpoint_file_mm=dict(extensions=None, ), - fiber_label_file=dict(extensions=None, ), - fiber_labels_noorphans=dict(extensions=None, ), - fiber_length_file=dict(extensions=None, ), - fiber_length_std_matrix_mat_file=dict(extensions=None, ), + endpoint_file=dict(extensions=None,), + endpoint_file_mm=dict(extensions=None,), + fiber_label_file=dict(extensions=None,), + fiber_labels_noorphans=dict(extensions=None,), + fiber_length_file=dict(extensions=None,), + fiber_length_std_matrix_mat_file=dict(extensions=None,), filtered_tractographies=dict(), - filtered_tractography=dict(extensions=None, ), - filtered_tractography_by_intersections=dict(extensions=None, ), - intersection_matrix_file=dict(extensions=None, ), - intersection_matrix_mat_file=dict(extensions=None, ), + filtered_tractography=dict(extensions=None,), + filtered_tractography_by_intersections=dict(extensions=None,), + intersection_matrix_file=dict(extensions=None,), + intersection_matrix_mat_file=dict(extensions=None,), matlab_matrix_files=dict(), - matrix_file=dict(extensions=None, ), + matrix_file=dict(extensions=None,), matrix_files=dict(), - matrix_mat_file=dict(extensions=None, ), - mean_fiber_length_matrix_mat_file=dict(extensions=None, ), - median_fiber_length_matrix_mat_file=dict(extensions=None, ), - stats_file=dict(extensions=None, ), + matrix_mat_file=dict(extensions=None,), + mean_fiber_length_matrix_mat_file=dict(extensions=None,), + median_fiber_length_matrix_mat_file=dict(extensions=None,), + stats_file=dict(extensions=None,), ) outputs = CreateMatrix.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py index 95b810459c..95023590d2 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py @@ -4,26 +4,19 @@ def test_CreateNodes_inputs(): input_map = dict( - out_filename=dict( - extensions=None, - usedefault=True, - ), - resolution_network_file=dict( - extensions=None, - mandatory=True, - ), - roi_file=dict( - extensions=None, - mandatory=True, - ), + out_filename=dict(extensions=None, usedefault=True,), + resolution_network_file=dict(extensions=None, mandatory=True,), + roi_file=dict(extensions=None, mandatory=True,), ) inputs = CreateNodes.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CreateNodes_outputs(): - output_map = dict(node_network=dict(extensions=None, ), ) + output_map = dict(node_network=dict(extensions=None,),) outputs = CreateNodes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py index 3ba3588882..30aae80243 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py +++ b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py @@ -4,19 +4,18 @@ def test_MergeCNetworks_inputs(): input_map = dict( - in_files=dict(mandatory=True, ), - out_file=dict( - extensions=None, - usedefault=True, - ), + in_files=dict(mandatory=True,), + out_file=dict(extensions=None, usedefault=True,), ) inputs = MergeCNetworks.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MergeCNetworks_outputs(): - output_map = dict(connectome_file=dict(extensions=None, ), ) + output_map = dict(connectome_file=dict(extensions=None,),) outputs = MergeCNetworks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py index fe805cb134..af1c68fca2 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py @@ -4,25 +4,27 @@ def test_NetworkBasedStatistic_inputs(): input_map = dict( - edge_key=dict(usedefault=True, ), - in_group1=dict(mandatory=True, ), - in_group2=dict(mandatory=True, ), - node_position_network=dict(extensions=None, ), - number_of_permutations=dict(usedefault=True, ), - out_nbs_network=dict(extensions=None, ), - out_nbs_pval_network=dict(extensions=None, ), - t_tail=dict(usedefault=True, ), - threshold=dict(usedefault=True, ), + edge_key=dict(usedefault=True,), + in_group1=dict(mandatory=True,), + in_group2=dict(mandatory=True,), + node_position_network=dict(extensions=None,), + number_of_permutations=dict(usedefault=True,), + out_nbs_network=dict(extensions=None,), + out_nbs_pval_network=dict(extensions=None,), + t_tail=dict(usedefault=True,), + threshold=dict(usedefault=True,), ) inputs = NetworkBasedStatistic.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_NetworkBasedStatistic_outputs(): output_map = dict( - nbs_network=dict(extensions=None, ), - nbs_pval_network=dict(extensions=None, ), + nbs_network=dict(extensions=None,), + nbs_pval_network=dict(extensions=None,), network_files=dict(), ) outputs = NetworkBasedStatistic.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py index 638fe596f5..820b447885 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py @@ -4,61 +4,39 @@ def test_NetworkXMetrics_inputs(): input_map = dict( - compute_clique_related_measures=dict(usedefault=True, ), - in_file=dict( - extensions=None, - mandatory=True, - ), - out_edge_metrics_matlab=dict( - extensions=None, - genfile=True, - ), - out_global_metrics_matlab=dict( - extensions=None, - genfile=True, - ), - out_k_core=dict( - extensions=None, - usedefault=True, - ), - out_k_crust=dict( - extensions=None, - usedefault=True, - ), - out_k_shell=dict( - extensions=None, - usedefault=True, - ), - out_node_metrics_matlab=dict( - extensions=None, - genfile=True, - ), - out_pickled_extra_measures=dict( - extensions=None, - usedefault=True, - ), - treat_as_weighted_graph=dict(usedefault=True, ), + compute_clique_related_measures=dict(usedefault=True,), + in_file=dict(extensions=None, mandatory=True,), + out_edge_metrics_matlab=dict(extensions=None, genfile=True,), + out_global_metrics_matlab=dict(extensions=None, genfile=True,), + out_k_core=dict(extensions=None, usedefault=True,), + out_k_crust=dict(extensions=None, usedefault=True,), + out_k_shell=dict(extensions=None, usedefault=True,), + out_node_metrics_matlab=dict(extensions=None, genfile=True,), + out_pickled_extra_measures=dict(extensions=None, usedefault=True,), + treat_as_weighted_graph=dict(usedefault=True,), ) inputs = NetworkXMetrics.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_NetworkXMetrics_outputs(): output_map = dict( edge_measure_networks=dict(), - edge_measures_matlab=dict(extensions=None, ), - global_measures_matlab=dict(extensions=None, ), + edge_measures_matlab=dict(extensions=None,), + global_measures_matlab=dict(extensions=None,), gpickled_network_files=dict(), - k_core=dict(extensions=None, ), - k_crust=dict(extensions=None, ), + k_core=dict(extensions=None,), + k_crust=dict(extensions=None,), k_networks=dict(), - k_shell=dict(extensions=None, ), + k_shell=dict(extensions=None,), matlab_dict_measures=dict(), matlab_matrix_files=dict(), node_measure_networks=dict(), - node_measures_matlab=dict(extensions=None, ), - pickled_extra_measures=dict(extensions=None, ), + node_measures_matlab=dict(extensions=None,), + pickled_extra_measures=dict(extensions=None,), ) outputs = NetworkXMetrics.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py index f4d8eb8141..8c380c5704 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py +++ b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py @@ -4,14 +4,11 @@ def test_Parcellate_inputs(): input_map = dict( - dilation=dict(usedefault=True, ), + dilation=dict(usedefault=True,), freesurfer_dir=dict(), - out_roi_file=dict( - extensions=None, - genfile=True, - ), - parcellation_name=dict(usedefault=True, ), - subject_id=dict(mandatory=True, ), + out_roi_file=dict(extensions=None, genfile=True,), + parcellation_name=dict(usedefault=True,), + subject_id=dict(mandatory=True,), subjects_dir=dict(), ) inputs = Parcellate.input_spec() @@ -19,16 +16,18 @@ def test_Parcellate_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Parcellate_outputs(): output_map = dict( - aseg_file=dict(extensions=None, ), - cc_unknown_file=dict(extensions=None, ), - dilated_roi_file_in_structural_space=dict(extensions=None, ), - ribbon_file=dict(extensions=None, ), - roi_file=dict(extensions=None, ), - roi_file_in_structural_space=dict(extensions=None, ), - roiv_file=dict(extensions=None, ), - white_matter_mask_file=dict(extensions=None, ), + aseg_file=dict(extensions=None,), + cc_unknown_file=dict(extensions=None,), + dilated_roi_file_in_structural_space=dict(extensions=None,), + ribbon_file=dict(extensions=None,), + roi_file=dict(extensions=None,), + roi_file_in_structural_space=dict(extensions=None,), + roiv_file=dict(extensions=None,), + white_matter_mask_file=dict(extensions=None,), ) outputs = Parcellate.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py index da70979685..2191f940ac 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py +++ b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py @@ -4,34 +4,23 @@ def test_ROIGen_inputs(): input_map = dict( - LUT_file=dict( - extensions=None, - xor=['use_freesurfer_LUT'], - ), - aparc_aseg_file=dict( - extensions=None, - mandatory=True, - ), - freesurfer_dir=dict(requires=['use_freesurfer_LUT'], ), - out_dict_file=dict( - extensions=None, - genfile=True, - ), - out_roi_file=dict( - extensions=None, - genfile=True, - ), - use_freesurfer_LUT=dict(xor=['LUT_file'], ), + LUT_file=dict(extensions=None, xor=["use_freesurfer_LUT"],), + aparc_aseg_file=dict(extensions=None, mandatory=True,), + freesurfer_dir=dict(requires=["use_freesurfer_LUT"],), + out_dict_file=dict(extensions=None, genfile=True,), + out_roi_file=dict(extensions=None, genfile=True,), + use_freesurfer_LUT=dict(xor=["LUT_file"],), ) inputs = ROIGen.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ROIGen_outputs(): output_map = dict( - dict_file=dict(extensions=None, ), - roi_file=dict(extensions=None, ), + dict_file=dict(extensions=None,), roi_file=dict(extensions=None,), ) outputs = ROIGen.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_nbs.py b/nipype/interfaces/cmtk/tests/test_nbs.py index a03b00de0f..46da939f1a 100644 --- a/nipype/interfaces/cmtk/tests/test_nbs.py +++ b/nipype/interfaces/cmtk/tests/test_nbs.py @@ -6,7 +6,7 @@ have_cv = True try: - package_check('cviewer') + package_check("cviewer") except Exception as e: have_cv = False @@ -18,15 +18,14 @@ def creating_graphs(tmpdir): for idx, name in enumerate(graphnames): graph = np.random.rand(10, 10) G = nx.from_numpy_matrix(graph) - out_file = tmpdir.strpath + graphnames[idx] + '.pck' + out_file = tmpdir.strpath + graphnames[idx] + ".pck" # Save as pck file nx.write_gpickle(G, out_file) graphlist.append(out_file) return graphlist -@pytest.mark.skipif( - have_cv, reason="tests for import error, cviewer available") +@pytest.mark.skipif(have_cv, reason="tests for import error, cviewer available") def test_importerror(creating_graphs, tmpdir): tmpdir.chdir() graphlist = creating_graphs diff --git a/nipype/interfaces/dcm2nii.py b/nipype/interfaces/dcm2nii.py index 0e7c120df8..87d9b7b3df 100644 --- a/nipype/interfaces/dcm2nii.py +++ b/nipype/interfaces/dcm2nii.py @@ -8,19 +8,28 @@ from glob import iglob from ..utils.filemanip import split_filename -from .base import (CommandLine, CommandLineInputSpec, InputMultiPath, traits, - TraitedSpec, OutputMultiPath, isdefined, File, Directory, - PackageInfo) +from .base import ( + CommandLine, + CommandLineInputSpec, + InputMultiPath, + traits, + TraitedSpec, + OutputMultiPath, + isdefined, + File, + Directory, + PackageInfo, +) class Info(PackageInfo): """Handle dcm2niix version information""" - version_cmd = 'dcm2niix' + version_cmd = "dcm2niix" @staticmethod def parse_version(raw_info): - m = re.search(r'version (\S+)', raw_info) + m = re.search(r"version (\S+)", raw_info) return m.groups()[0] if m else None @@ -31,64 +40,63 @@ class Dcm2niiInputSpec(CommandLineInputSpec): position=-1, copyfile=False, mandatory=True, - xor=['source_dir']) + xor=["source_dir"], + ) source_dir = Directory( - exists=True, - argstr="%s", - position=-1, - mandatory=True, - xor=['source_names']) + exists=True, argstr="%s", position=-1, mandatory=True, xor=["source_names"] + ) anonymize = traits.Bool( - True, - argstr='-a', - usedefault=True, - desc="Remove identifying information") + True, argstr="-a", usedefault=True, desc="Remove identifying information" + ) config_file = File( exists=True, argstr="-b %s", genfile=True, - desc="Load settings from specified inifile") + desc="Load settings from specified inifile", + ) collapse_folders = traits.Bool( - True, argstr='-c', usedefault=True, desc="Collapse input folders") + True, argstr="-c", usedefault=True, desc="Collapse input folders" + ) date_in_filename = traits.Bool( - True, argstr='-d', usedefault=True, desc="Date in filename") + True, argstr="-d", usedefault=True, desc="Date in filename" + ) events_in_filename = traits.Bool( - True, - argstr='-e', - usedefault=True, - desc="Events (series/acq) in filename") + True, argstr="-e", usedefault=True, desc="Events (series/acq) in filename" + ) source_in_filename = traits.Bool( - False, argstr='-f', usedefault=True, desc="Source filename") + False, argstr="-f", usedefault=True, desc="Source filename" + ) gzip_output = traits.Bool( - False, argstr='-g', usedefault=True, desc="Gzip output (.gz)") + False, argstr="-g", usedefault=True, desc="Gzip output (.gz)" + ) id_in_filename = traits.Bool( - False, argstr='-i', usedefault=True, desc="ID in filename") + False, argstr="-i", usedefault=True, desc="ID in filename" + ) nii_output = traits.Bool( True, - argstr='-n', + argstr="-n", usedefault=True, - desc="Save as .nii - if no, create .hdr/.img pair") + desc="Save as .nii - if no, create .hdr/.img pair", + ) output_dir = Directory( exists=True, - argstr='-o %s', + argstr="-o %s", genfile=True, - desc="Output dir - if unspecified, source directory is used") + desc="Output dir - if unspecified, source directory is used", + ) protocol_in_filename = traits.Bool( - True, argstr='-p', usedefault=True, desc="Protocol in filename") - reorient = traits.Bool( - argstr='-r', desc="Reorient image to nearest orthogonal") + True, argstr="-p", usedefault=True, desc="Protocol in filename" + ) + reorient = traits.Bool(argstr="-r", desc="Reorient image to nearest orthogonal") spm_analyze = traits.Bool( - argstr='-s', xor=['nii_output'], desc="SPM2/Analyze not SPM5/NIfTI") + argstr="-s", xor=["nii_output"], desc="SPM2/Analyze not SPM5/NIfTI" + ) convert_all_pars = traits.Bool( - True, - argstr='-v', - usedefault=True, - desc="Convert every image in directory") + True, argstr="-v", usedefault=True, desc="Convert every image in directory" + ) reorient_and_crop = traits.Bool( - False, - argstr='-x', - usedefault=True, - desc="Reorient and crop 3D images") + False, argstr="-x", usedefault=True, desc="Reorient and crop 3D images" + ) class Dcm2niiOutputSpec(TraitedSpec): @@ -116,34 +124,46 @@ class Dcm2nii(CommandLine): input_spec = Dcm2niiInputSpec output_spec = Dcm2niiOutputSpec - _cmd = 'dcm2nii' + _cmd = "dcm2nii" def _format_arg(self, opt, spec, val): if opt in [ - 'anonymize', 'collapse_folders', 'date_in_filename', - 'events_in_filename', 'source_in_filename', 'gzip_output', - 'id_in_filename', 'nii_output', 'protocol_in_filename', - 'reorient', 'spm_analyze', 'convert_all_pars', - 'reorient_and_crop' + "anonymize", + "collapse_folders", + "date_in_filename", + "events_in_filename", + "source_in_filename", + "gzip_output", + "id_in_filename", + "nii_output", + "protocol_in_filename", + "reorient", + "spm_analyze", + "convert_all_pars", + "reorient_and_crop", ]: spec = deepcopy(spec) if val: - spec.argstr += ' y' + spec.argstr += " y" else: - spec.argstr += ' n' + spec.argstr += " n" val = True - if opt == 'source_names': + if opt == "source_names": return spec.argstr % val[0] return super(Dcm2nii, self)._format_arg(opt, spec, val) def _run_interface(self, runtime): self._config_created = False new_runtime = super(Dcm2nii, self)._run_interface(runtime) - (self.output_files, self.reoriented_files, - self.reoriented_and_cropped_files, self.bvecs, - self.bvals) = self._parse_stdout(new_runtime.stdout) + ( + self.output_files, + self.reoriented_files, + self.reoriented_and_cropped_files, + self.bvecs, + self.bvals, + ) = self._parse_stdout(new_runtime.stdout) if self._config_created: - os.remove('config.ini') + os.remove("config.ini") return new_runtime def _parse_stdout(self, stdout): @@ -158,12 +178,11 @@ def _parse_stdout(self, stdout): if not skip: out_file = None if line.startswith("Saving "): - out_file = line[len("Saving "):] + out_file = line[len("Saving ") :] elif line.startswith("GZip..."): # for gzipped output files are not absolute - fname = line[len("GZip..."):] - if len(files) and os.path.basename( - files[-1]) == fname[:-3]: + fname = line[len("GZip...") :] + if len(files) and os.path.basename(files[-1]) == fname[:-3]: # we are seeing a previously reported conversion # as being saved in gzipped form -- remove the # obsolete, uncompressed file @@ -171,7 +190,7 @@ def _parse_stdout(self, stdout): if isdefined(self.inputs.output_dir): output_dir = self.inputs.output_dir else: - output_dir = self._gen_filename('output_dir') + output_dir = self._gen_filename("output_dir") out_file = os.path.abspath(os.path.join(output_dir, fname)) elif line.startswith("Number of diffusion directions "): if last_added_file: @@ -183,15 +202,15 @@ def _parse_stdout(self, stdout): # just above for l in (bvecs, bvals): l[-1] = os.path.join( - os.path.dirname(l[-1]), - 'x%s' % (os.path.basename(l[-1]), )) - elif re.search('.*->(.*)', line): - val = re.search('.*->(.*)', line) + os.path.dirname(l[-1]), "x%s" % (os.path.basename(l[-1]),) + ) + elif re.search(".*->(.*)", line): + val = re.search(".*->(.*)", line) val = val.groups()[0] if isdefined(self.inputs.output_dir): output_dir = self.inputs.output_dir else: - output_dir = self._gen_filename('output_dir') + output_dir = self._gen_filename("output_dir") val = os.path.join(output_dir, val) if os.path.exists(val): out_file = val @@ -203,18 +222,22 @@ def _parse_stdout(self, stdout): continue if line.startswith("Reorienting as "): - reoriented_files.append(line[len("Reorienting as "):]) + reoriented_files.append(line[len("Reorienting as ") :]) skip = True continue elif line.startswith("Cropping NIfTI/Analyze image "): base, filename = os.path.split( - line[len("Cropping NIfTI/Analyze image "):]) + line[len("Cropping NIfTI/Analyze image ") :] + ) filename = "c" + filename - if os.path.exists(os.path.join( - base, filename)) or self.inputs.reorient_and_crop: + if ( + os.path.exists(os.path.join(base, filename)) + or self.inputs.reorient_and_crop + ): # if reorient&crop is true but the file doesn't exist, this errors when setting outputs reoriented_and_cropped_files.append( - os.path.join(base, filename)) + os.path.join(base, filename) + ) skip = True continue @@ -223,18 +246,17 @@ def _parse_stdout(self, stdout): def _list_outputs(self): outputs = self.output_spec().get() - outputs['converted_files'] = self.output_files - outputs['reoriented_files'] = self.reoriented_files - outputs[ - 'reoriented_and_cropped_files'] = self.reoriented_and_cropped_files - outputs['bvecs'] = self.bvecs - outputs['bvals'] = self.bvals + outputs["converted_files"] = self.output_files + outputs["reoriented_files"] = self.reoriented_files + outputs["reoriented_and_cropped_files"] = self.reoriented_and_cropped_files + outputs["bvecs"] = self.bvecs + outputs["bvals"] = self.bvals return outputs def _gen_filename(self, name): - if name == 'output_dir': + if name == "output_dir": return os.getcwd() - elif name == 'config_file': + elif name == "config_file": self._config_created = True config_file = "config.ini" with open(config_file, "w") as f: @@ -251,103 +273,103 @@ class Dcm2niixInputSpec(CommandLineInputSpec): position=-1, copyfile=False, mandatory=True, - desc=('A set of filenames to be converted. Note that the current ' - 'version (1.0.20180328) of dcm2niix converts any files in the ' - 'directory. To only convert specific files they should be in an ' - 'isolated directory'), - xor=['source_dir']) + desc=( + "A set of filenames to be converted. Note that the current " + "version (1.0.20180328) of dcm2niix converts any files in the " + "directory. To only convert specific files they should be in an " + "isolated directory" + ), + xor=["source_dir"], + ) source_dir = Directory( exists=True, argstr="%s", position=-1, mandatory=True, - desc='A directory containing dicom files to be converted', - xor=['source_names']) + desc="A directory containing dicom files to be converted", + xor=["source_names"], + ) out_filename = traits.Str( argstr="-f %s", desc="Output filename template (" - "%a=antenna (coil) number, " - "%c=comments, " - "%d=description, " - "%e=echo number, " - "%f=folder name, " - "%i=ID of patient, " - "%j=seriesInstanceUID, " - "%k=studyInstanceUID, " - "%m=manufacturer, " - "%n=name of patient, " - "%p=protocol, " - "%s=series number, " - "%t=time, " - "%u=acquisition number, " - "%v=vendor, " - "%x=study ID; " - "%z=sequence name)") + "%a=antenna (coil) number, " + "%c=comments, " + "%d=description, " + "%e=echo number, " + "%f=folder name, " + "%i=ID of patient, " + "%j=seriesInstanceUID, " + "%k=studyInstanceUID, " + "%m=manufacturer, " + "%n=name of patient, " + "%p=protocol, " + "%s=series number, " + "%t=time, " + "%u=acquisition number, " + "%v=vendor, " + "%x=study ID; " + "%z=sequence name)", + ) output_dir = Directory( - ".", - usedefault=True, - exists=True, - argstr='-o %s', - desc="Output directory") + ".", usedefault=True, exists=True, argstr="-o %s", desc="Output directory" + ) bids_format = traits.Bool( - True, - argstr='-b', - usedefault=True, - desc="Create a BIDS sidecar file") + True, argstr="-b", usedefault=True, desc="Create a BIDS sidecar file" + ) anon_bids = traits.Bool( - argstr='-ba', - requires=["bids_format"], - desc="Anonymize BIDS") + argstr="-ba", requires=["bids_format"], desc="Anonymize BIDS" + ) compress = traits.Enum( - 'y', 'i', 'n', '3', - argstr='-z %s', + "y", + "i", + "n", + "3", + argstr="-z %s", usedefault=True, - desc="Gzip compress images - [y=pigz, i=internal, n=no, 3=no,3D]") + desc="Gzip compress images - [y=pigz, i=internal, n=no, 3=no,3D]", + ) merge_imgs = traits.Bool( - False, - argstr='-m', - usedefault=True, - desc="merge 2D slices from same series") + False, argstr="-m", usedefault=True, desc="merge 2D slices from same series" + ) single_file = traits.Bool( - False, - argstr='-s', - usedefault=True, - desc="Single file mode") - verbose = traits.Bool( - False, - argstr='-v', - usedefault=True, - desc="Verbose output") + False, argstr="-s", usedefault=True, desc="Single file mode" + ) + verbose = traits.Bool(False, argstr="-v", usedefault=True, desc="Verbose output") crop = traits.Bool( - False, - argstr='-x', - usedefault=True, - desc="Crop 3D T1 acquisitions") + False, argstr="-x", usedefault=True, desc="Crop 3D T1 acquisitions" + ) has_private = traits.Bool( False, - argstr='-t', + argstr="-t", usedefault=True, - desc="Text notes including private patient details") + desc="Text notes including private patient details", + ) compression = traits.Enum( - 1, 2, 3, 4, 5, 6, 7, 8, 9, - argstr='-%d', - desc="Gz compression level (1=fastest, 9=smallest)") - comment = traits.Str( - argstr='-c %s', - desc="Comment stored as NIfTI aux_file") + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + argstr="-%d", + desc="Gz compression level (1=fastest, 9=smallest)", + ) + comment = traits.Str(argstr="-c %s", desc="Comment stored as NIfTI aux_file") ignore_deriv = traits.Bool( - argstr='-i', - desc="Ignore derived, localizer and 2D images") + argstr="-i", desc="Ignore derived, localizer and 2D images" + ) series_numbers = InputMultiPath( traits.Str(), - argstr='-n %s...', - desc="Selectively convert by series number - can be used up to 16 times") + argstr="-n %s...", + desc="Selectively convert by series number - can be used up to 16 times", + ) philips_float = traits.Bool( - argstr='-p', - desc="Philips precise float (not display) scaling") - to_nrrd = traits.Bool( - argstr="-e", - desc="Export as NRRD instead of NIfTI") + argstr="-p", desc="Philips precise float (not display) scaling" + ) + to_nrrd = traits.Bool(argstr="-e", desc="Export as NRRD instead of NIfTI") class Dcm2niixOutputSpec(TraitedSpec): @@ -388,7 +410,7 @@ class Dcm2niix(CommandLine): input_spec = Dcm2niixInputSpec output_spec = Dcm2niixOutputSpec - _cmd = 'dcm2niix' + _cmd = "dcm2niix" @property def version(self): @@ -396,25 +418,33 @@ def version(self): def _format_arg(self, opt, spec, val): bools = [ - 'bids_format', 'merge_imgs', 'single_file', 'verbose', 'crop', - 'has_private', 'anon_bids', 'ignore_deriv', 'philips_float', - 'to_nrrd', + "bids_format", + "merge_imgs", + "single_file", + "verbose", + "crop", + "has_private", + "anon_bids", + "ignore_deriv", + "philips_float", + "to_nrrd", ] if opt in bools: spec = deepcopy(spec) if val: - spec.argstr += ' y' + spec.argstr += " y" else: - spec.argstr += ' n' + spec.argstr += " n" val = True - if opt == 'source_names': - return spec.argstr % (os.path.dirname(val[0]) or '.') + if opt == "source_names": + return spec.argstr % (os.path.dirname(val[0]) or ".") return super(Dcm2niix, self)._format_arg(opt, spec, val) def _run_interface(self, runtime): # may use return code 1 despite conversion runtime = super(Dcm2niix, self)._run_interface( - runtime, correct_return_codes=(0, 1, )) + runtime, correct_return_codes=(0, 1,) + ) self._parse_files(self._parse_stdout(runtime.stdout)) return runtime @@ -422,7 +452,7 @@ def _parse_stdout(self, stdout): filenames = [] for line in stdout.split("\n"): if line.startswith("Convert "): # output - fname = str(re.search(r'\S+/\S+', line).group(0)) + fname = str(re.search(r"\S+/\S+", line).group(0)) filenames.append(os.path.abspath(fname)) return filenames @@ -438,10 +468,10 @@ def _parse_files(self, filenames): # search for relevant files, and sort accordingly for fl in search_files(filename, outtypes): if ( - fl.endswith(".nii") or - fl.endswith(".gz") or - fl.endswith(".nrrd") or - fl.endswith(".nhdr") + fl.endswith(".nii") + or fl.endswith(".gz") + or fl.endswith(".nrrd") + or fl.endswith(".nhdr") ): outfiles.append(fl) elif fl.endswith(".bval"): @@ -457,12 +487,13 @@ def _parse_files(self, filenames): def _list_outputs(self): outputs = self.output_spec().get() - outputs['converted_files'] = self.output_files - outputs['bvecs'] = self.bvecs - outputs['bvals'] = self.bvals - outputs['bids'] = self.bids + outputs["converted_files"] = self.output_files + outputs["bvecs"] = self.bvecs + outputs["bvals"] = self.bvals + outputs["bids"] = self.bids return outputs + # https://stackoverflow.com/a/4829130 def search_files(prefix, outtypes): return it.chain.from_iterable(iglob(prefix + outtype) for outtype in outtypes) diff --git a/nipype/interfaces/dcmstack.py b/nipype/interfaces/dcmstack.py index 711d84920f..d7223468c8 100644 --- a/nipype/interfaces/dcmstack.py +++ b/nipype/interfaces/dcmstack.py @@ -11,8 +11,17 @@ import nibabel as nb import imghdr -from .base import (TraitedSpec, DynamicTraitedSpec, InputMultiPath, File, - Directory, traits, BaseInterface, isdefined, Undefined) +from .base import ( + TraitedSpec, + DynamicTraitedSpec, + InputMultiPath, + File, + Directory, + traits, + BaseInterface, + isdefined, + Undefined, +) from ..utils import NUMPY_MMAP have_dcmstack = True @@ -27,28 +36,28 @@ def sanitize_path_comp(path_comp): result = [] for char in path_comp: - if char not in string.letters + string.digits + '-_.': - result.append('_') + if char not in string.letters + string.digits + "-_.": + result.append("_") else: result.append(char) - return ''.join(result) + return "".join(result) class NiftiGeneratorBaseInputSpec(TraitedSpec): - out_format = traits.Str(desc="String which can be formatted with " - "meta data to create the output filename(s)") - out_ext = traits.Str( - '.nii.gz', usedefault=True, desc="Determines output file type") - out_path = Directory( - desc='output path, current working directory if not set') + out_format = traits.Str( + desc="String which can be formatted with " + "meta data to create the output filename(s)" + ) + out_ext = traits.Str(".nii.gz", usedefault=True, desc="Determines output file type") + out_path = Directory(desc="output path, current working directory if not set") class NiftiGeneratorBase(BaseInterface): - '''Base class for interfaces that produce Nifti files, potentially with - embedded meta data.''' + """Base class for interfaces that produce Nifti files, potentially with + embedded meta data.""" def _get_out_path(self, meta, idx=None): - '''Return the output path for the gernerated Nifti.''' + """Return the output path for the gernerated Nifti.""" if self.inputs.out_format: out_fmt = self.inputs.out_format else: @@ -56,16 +65,16 @@ def _get_out_path(self, meta, idx=None): # with the provided meta data. out_fmt = [] if idx is not None: - out_fmt.append('%03d' % idx) - if 'SeriesNumber' in meta: - out_fmt.append('%(SeriesNumber)03d') - if 'ProtocolName' in meta: - out_fmt.append('%(ProtocolName)s') - elif 'SeriesDescription' in meta: - out_fmt.append('%(SeriesDescription)s') + out_fmt.append("%03d" % idx) + if "SeriesNumber" in meta: + out_fmt.append("%(SeriesNumber)03d") + if "ProtocolName" in meta: + out_fmt.append("%(ProtocolName)s") + elif "SeriesDescription" in meta: + out_fmt.append("%(SeriesDescription)s") else: - out_fmt.append('sequence') - out_fmt = '-'.join(out_fmt) + out_fmt.append("sequence") + out_fmt = "-".join(out_fmt) out_fn = (out_fmt % meta) + self.inputs.out_ext out_fn = sanitize_path_comp(out_fn) @@ -90,16 +99,18 @@ class DcmStackInputSpec(NiftiGeneratorBaseInputSpec): InputMultiPath(File(exists=True)), Directory(exists=True), traits.Str(), - mandatory=True) + mandatory=True, + ) embed_meta = traits.Bool(desc="Embed DICOM meta data into result") - exclude_regexes = traits.List(desc="Meta data to exclude, suplementing " - "any default exclude filters") - include_regexes = traits.List(desc="Meta data to include, overriding any " - "exclude filters") + exclude_regexes = traits.List( + desc="Meta data to exclude, suplementing " "any default exclude filters" + ) + include_regexes = traits.List( + desc="Meta data to include, overriding any " "exclude filters" + ) force_read = traits.Bool( - True, - usedefault=True, - desc=('Force reading files without DICM marker')) + True, usedefault=True, desc=("Force reading files without DICM marker") + ) class DcmStackOutputSpec(TraitedSpec): @@ -107,7 +118,7 @@ class DcmStackOutputSpec(TraitedSpec): class DcmStack(NiftiGeneratorBase): - '''Create one Nifti file from a set of DICOM files. Can optionally embed + """Create one Nifti file from a set of DICOM files. Can optionally embed meta data. Example @@ -119,14 +130,15 @@ class DcmStack(NiftiGeneratorBase): >>> stacker.run() # doctest: +SKIP >>> result.outputs.out_file # doctest: +SKIP '/path/to/cwd/sequence.nii.gz' - ''' + """ + input_spec = DcmStackInputSpec output_spec = DcmStackOutputSpec def _get_filelist(self, trait_input): if isinstance(trait_input, (str, bytes)): if op.isdir(trait_input): - return glob(op.join(trait_input, '*.dcm')) + return glob(op.join(trait_input, "*.dcm")) else: return glob(trait_input) @@ -140,18 +152,17 @@ def _run_interface(self, runtime): exclude_regexes = dcmstack.default_key_excl_res if isdefined(self.inputs.exclude_regexes): exclude_regexes += self.inputs.exclude_regexes - meta_filter = dcmstack.make_key_regex_filter(exclude_regexes, - include_regexes) + meta_filter = dcmstack.make_key_regex_filter(exclude_regexes, include_regexes) stack = dcmstack.DicomStack(meta_filter=meta_filter) for src_path in src_paths: if not imghdr.what(src_path) == "gif": - src_dcm = dicom.read_file( - src_path, force=self.inputs.force_read) + src_dcm = dicom.read_file(src_path, force=self.inputs.force_read) stack.add_dcm(src_dcm) nii = stack.to_nifti(embed_meta=True) nw = NiftiWrapper(nii) - self.out_path = \ - self._get_out_path(nw.meta_ext.get_class_dict(('global', 'const'))) + self.out_path = self._get_out_path( + nw.meta_ext.get_class_dict(("global", "const")) + ) if not self.inputs.embed_meta: nw.remove_extension() nb.save(nii, self.out_path) @@ -168,8 +179,9 @@ class GroupAndStackOutputSpec(TraitedSpec): class GroupAndStack(DcmStack): - '''Create (potentially) multiple Nifti files for a set of DICOM files. - ''' + """Create (potentially) multiple Nifti files for a set of DICOM files. + """ + input_spec = DcmStackInputSpec output_spec = GroupAndStackOutputSpec @@ -180,7 +192,7 @@ def _run_interface(self, runtime): self.out_list = [] for key, stack in list(stacks.items()): nw = NiftiWrapper(stack.to_nifti(embed_meta=True)) - const_meta = nw.meta_ext.get_class_dict(('global', 'const')) + const_meta = nw.meta_ext.get_class_dict(("global", "const")) out_path = self._get_out_path(const_meta) if not self.inputs.embed_meta: nw.remove_extension() @@ -196,19 +208,22 @@ def _list_outputs(self): class LookupMetaInputSpec(TraitedSpec): - in_file = File(mandatory=True, exists=True, desc='The input Nifti file') + in_file = File(mandatory=True, exists=True, desc="The input Nifti file") meta_keys = traits.Either( traits.List(), traits.Dict(), mandatory=True, - desc=("List of meta data keys to lookup, or a " - "dict where keys specify the meta data " - "keys to lookup and the values specify " - "the output names")) + desc=( + "List of meta data keys to lookup, or a " + "dict where keys specify the meta data " + "keys to lookup and the values specify " + "the output names" + ), + ) class LookupMeta(BaseInterface): - '''Lookup meta data values from a Nifti with embedded meta data. + """Lookup meta data values from a Nifti with embedded meta data. Example ------- @@ -223,7 +238,8 @@ class LookupMeta(BaseInterface): 9500.0 >>> result.outputs.TE # doctest: +SKIP 95.0 - ''' + """ + input_spec = LookupMetaInputSpec output_spec = DynamicTraitedSpec @@ -267,11 +283,14 @@ def _list_outputs(self): class CopyMetaInputSpec(TraitedSpec): src_file = File(mandatory=True, exists=True) dest_file = File(mandatory=True, exists=True) - include_classes = traits.List(desc="List of specific meta data " - "classifications to include. If not " - "specified include everything.") - exclude_classes = traits.List(desc="List of meta data " - "classifications to exclude") + include_classes = traits.List( + desc="List of specific meta data " + "classifications to include. If not " + "specified include everything." + ) + exclude_classes = traits.List( + desc="List of meta data " "classifications to exclude" + ) class CopyMetaOutputSpec(TraitedSpec): @@ -279,8 +298,9 @@ class CopyMetaOutputSpec(TraitedSpec): class CopyMeta(BaseInterface): - '''Copy meta data from one Nifti file to another. Useful for preserving - meta data after some processing steps.''' + """Copy meta data from one Nifti file to another. Useful for preserving + meta data after some processing steps.""" + input_spec = CopyMetaInputSpec output_spec = CopyMetaOutputSpec @@ -291,14 +311,9 @@ def _run_interface(self, runtime): dest = NiftiWrapper(dest_nii, make_empty=True) classes = src.meta_ext.get_valid_classes() if self.inputs.include_classes: - classes = [ - cls for cls in classes if cls in self.inputs.include_classes - ] + classes = [cls for cls in classes if cls in self.inputs.include_classes] if self.inputs.exclude_classes: - classes = [ - cls for cls in classes - if cls not in self.inputs.exclude_classes - ] + classes = [cls for cls in classes if cls not in self.inputs.exclude_classes] for cls in classes: src_dict = src.meta_ext.get_class_dict(cls) @@ -309,15 +324,14 @@ def _run_interface(self, runtime): dest.meta_ext.slice_dim = src.meta_ext.slice_dim dest.meta_ext.shape = src.meta_ext.shape - self.out_path = op.join(os.getcwd(), op.basename( - self.inputs.dest_file)) + self.out_path = op.join(os.getcwd(), op.basename(self.inputs.dest_file)) dest.to_filename(self.out_path) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['dest_file'] = self.out_path + outputs["dest_file"] = self.out_path return outputs @@ -326,11 +340,13 @@ class MergeNiftiInputSpec(NiftiGeneratorBaseInputSpec): sort_order = traits.Either( traits.Str(), traits.List(), - desc="One or more meta data keys to " - "sort files by.") - merge_dim = traits.Int(desc="Dimension to merge along. If not " - "specified, the last singular or " - "non-existant dimension is used.") + desc="One or more meta data keys to " "sort files by.", + ) + merge_dim = traits.Int( + desc="Dimension to merge along. If not " + "specified, the last singular or " + "non-existant dimension is used." + ) class MergeNiftiOutputSpec(TraitedSpec): @@ -346,8 +362,9 @@ def key_func(src_nii): class MergeNifti(NiftiGeneratorBase): - '''Merge multiple Nifti files into one. Merges together meta data - extensions as well.''' + """Merge multiple Nifti files into one. Merges together meta data + extensions as well.""" + input_spec = MergeNiftiInputSpec output_spec = MergeNiftiOutputSpec @@ -364,21 +381,23 @@ def _run_interface(self, runtime): else: merge_dim = self.inputs.merge_dim merged = NiftiWrapper.from_sequence(nws, merge_dim) - const_meta = merged.meta_ext.get_class_dict(('global', 'const')) + const_meta = merged.meta_ext.get_class_dict(("global", "const")) self.out_path = self._get_out_path(const_meta) nb.save(merged.nii_img, self.out_path) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = self.out_path + outputs["out_file"] = self.out_path return outputs class SplitNiftiInputSpec(NiftiGeneratorBaseInputSpec): in_file = File(exists=True, mandatory=True, desc="Nifti file to split") - split_dim = traits.Int(desc="Dimension to split along. If not " - "specified, the last dimension is used.") + split_dim = traits.Int( + desc="Dimension to split along. If not " + "specified, the last dimension is used." + ) class SplitNiftiOutputSpec(TraitedSpec): @@ -386,10 +405,11 @@ class SplitNiftiOutputSpec(TraitedSpec): class SplitNifti(NiftiGeneratorBase): - ''' + """ Split one Nifti file into many along the specified dimension. Each result has an updated meta data extension as well. - ''' + """ + input_spec = SplitNiftiInputSpec output_spec = SplitNiftiOutputSpec @@ -403,7 +423,7 @@ def _run_interface(self, runtime): else: split_dim = self.inputs.split_dim for split_idx, split_nw in enumerate(nw.split(split_dim)): - const_meta = split_nw.meta_ext.get_class_dict(('global', 'const')) + const_meta = split_nw.meta_ext.get_class_dict(("global", "const")) out_path = self._get_out_path(const_meta, idx=split_idx) nb.save(split_nw.nii_img, out_path) self.out_list.append(out_path) @@ -412,5 +432,5 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['out_list'] = self.out_list + outputs["out_list"] = self.out_list return outputs diff --git a/nipype/interfaces/diffusion_toolkit/base.py b/nipype/interfaces/diffusion_toolkit/base.py index bac8e781d1..02c7deceb1 100644 --- a/nipype/interfaces/diffusion_toolkit/base.py +++ b/nipype/interfaces/diffusion_toolkit/base.py @@ -16,7 +16,7 @@ import re from ..base import CommandLine -__docformat__ = 'restructuredtext' +__docformat__ = "restructuredtext" class Info(object): @@ -45,13 +45,12 @@ def version(): Version number as string or None if FSL not found """ - clout = CommandLine( - command='dti_recon', terminal_output='allatonce').run() + clout = CommandLine(command="dti_recon", terminal_output="allatonce").run() if clout.runtime.returncode is not 0: return None dtirecon = clout.runtime.stdout - result = re.search('dti_recon (.*)\n', dtirecon) + result = re.search("dti_recon (.*)\n", dtirecon) version = result.group(0).split()[1] return version diff --git a/nipype/interfaces/diffusion_toolkit/dti.py b/nipype/interfaces/diffusion_toolkit/dti.py index 793641a5d7..765ef6d8b9 100644 --- a/nipype/interfaces/diffusion_toolkit/dti.py +++ b/nipype/interfaces/diffusion_toolkit/dti.py @@ -7,37 +7,41 @@ import re from ...utils.filemanip import fname_presuffix, split_filename, copyfile -from ..base import (TraitedSpec, File, traits, CommandLine, - CommandLineInputSpec, isdefined) +from ..base import ( + TraitedSpec, + File, + traits, + CommandLine, + CommandLineInputSpec, + isdefined, +) -__docformat__ = 'restructuredtext' +__docformat__ = "restructuredtext" class DTIReconInputSpec(CommandLineInputSpec): DWI = File( - desc='Input diffusion volume', - argstr='%s', + desc="Input diffusion volume", + argstr="%s", exists=True, mandatory=True, - position=1) + position=1, + ) out_prefix = traits.Str( - "dti", - desc='Output file prefix', - argstr='%s', - usedefault=True, - position=2) + "dti", desc="Output file prefix", argstr="%s", usedefault=True, position=2 + ) output_type = traits.Enum( - 'nii', - 'analyze', - 'ni1', - 'nii.gz', - argstr='-ot %s', - desc='output file type', - usedefault=True) - bvecs = File( - exists=True, desc='b vectors file', argstr='-gm %s', mandatory=True) - bvals = File(exists=True, desc='b values file', mandatory=True) - n_averages = traits.Int(desc='Number of averages', argstr='-nex %s') + "nii", + "analyze", + "ni1", + "nii.gz", + argstr="-ot %s", + desc="output file type", + usedefault=True, + ) + bvecs = File(exists=True, desc="b vectors file", argstr="-gm %s", mandatory=True) + bvals = File(exists=True, desc="b values file", mandatory=True) + n_averages = traits.Int(desc="Number of averages", argstr="-nex %s") image_orientation_vectors = traits.List( traits.Float(), minlen=6, @@ -49,18 +53,20 @@ class DTIReconInputSpec(CommandLineInputSpec): one automatically. this information will be used to determine image orientation, as well as to adjust gradient vectors with oblique angle when""", - argstr="-iop %f") + argstr="-iop %f", + ) oblique_correction = traits.Bool( desc="""when oblique angle(s) applied, some SIEMENS dti protocols do not adjust gradient accordingly, thus it requires adjustment for correct diffusion tensor calculation""", - argstr="-oc") + argstr="-oc", + ) b0_threshold = traits.Float( - desc= - """program will use b0 image with the given threshold to mask out high + desc="""program will use b0 image with the given threshold to mask out high background of fa/adc maps. by default it will calculate threshold automatically. but if it failed, you need to set it manually.""", - argstr="-b0_th") + argstr="-b0_th", + ) class DTIReconOutputSpec(TraitedSpec): @@ -85,28 +91,27 @@ class DTIRecon(CommandLine): input_spec = DTIReconInputSpec output_spec = DTIReconOutputSpec - _cmd = 'dti_recon' + _cmd = "dti_recon" def _create_gradient_matrix(self, bvecs_file, bvals_file): - _gradient_matrix_file = 'gradient_matrix.txt' + _gradient_matrix_file = "gradient_matrix.txt" with open(bvals_file) as fbvals: - bvals = [val for val in re.split(r'\s+', fbvals.readline().strip())] + bvals = [val for val in re.split(r"\s+", fbvals.readline().strip())] with open(bvecs_file) as fbvecs: - bvecs_x = fbvecs.readline().split() - bvecs_y = fbvecs.readline().split() - bvecs_z = fbvecs.readline().split() + bvecs_x = fbvecs.readline().split() + bvecs_y = fbvecs.readline().split() + bvecs_z = fbvecs.readline().split() - with open(_gradient_matrix_file, 'w') as gradient_matrix_f: + with open(_gradient_matrix_file, "w") as gradient_matrix_f: for i in range(len(bvals)): - gradient_matrix_f.write("%s, %s, %s, %s\n" % - (bvecs_x[i], bvecs_y[i], bvecs_z[i], - bvals[i])) + gradient_matrix_f.write( + "%s, %s, %s, %s\n" % (bvecs_x[i], bvecs_y[i], bvecs_z[i], bvals[i]) + ) return _gradient_matrix_file def _format_arg(self, name, spec, value): if name == "bvecs": - new_val = self._create_gradient_matrix(self.inputs.bvecs, - self.inputs.bvals) + new_val = self._create_gradient_matrix(self.inputs.bvecs, self.inputs.bvals) return super(DTIRecon, self)._format_arg("bvecs", spec, new_val) return super(DTIRecon, self)._format_arg(name, spec, value) @@ -115,42 +120,42 @@ def _list_outputs(self): output_type = self.inputs.output_type outputs = self.output_spec().get() - outputs['ADC'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_adc.' + output_type)) - outputs['B0'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_b0.' + output_type)) - outputs['L1'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_e1.' + output_type)) - outputs['L2'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_e2.' + output_type)) - outputs['L3'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_e3.' + output_type)) - outputs['exp'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_exp.' + output_type)) - outputs['FA'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_fa.' + output_type)) - outputs['FA_color'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_fa_color.' + output_type)) - outputs['tensor'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_tensor.' + output_type)) - outputs['V1'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_v1.' + output_type)) - outputs['V2'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_v2.' + output_type)) - outputs['V3'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_v3.' + output_type)) + outputs["ADC"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_adc." + output_type) + ) + outputs["B0"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_b0." + output_type) + ) + outputs["L1"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_e1." + output_type) + ) + outputs["L2"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_e2." + output_type) + ) + outputs["L3"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_e3." + output_type) + ) + outputs["exp"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_exp." + output_type) + ) + outputs["FA"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_fa." + output_type) + ) + outputs["FA_color"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_fa_color." + output_type) + ) + outputs["tensor"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_tensor." + output_type) + ) + outputs["V1"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_v1." + output_type) + ) + outputs["V2"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_v2." + output_type) + ) + outputs["V3"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_v3." + output_type) + ) return outputs @@ -158,89 +163,87 @@ def _list_outputs(self): class DTITrackerInputSpec(CommandLineInputSpec): tensor_file = File(exists=True, desc="reconstructed tensor file") input_type = traits.Enum( - 'nii', - 'analyze', - 'ni1', - 'nii.gz', + "nii", + "analyze", + "ni1", + "nii.gz", desc="""input and output file type. accepted values are: analyze -> analyze format 7.5 ni1 -> nifti format saved in seperate .hdr and .img file nii -> nifti format with one .nii file nii.gz -> nifti format with compression default type is 'nii'""", - argstr="-it %s") + argstr="-it %s", + ) tracking_method = traits.Enum( - 'fact', - 'rk2', - 'tl', - 'sl', + "fact", + "rk2", + "tl", + "sl", desc="""fact -> use FACT method for tracking. this is the default method. rk2 -> use 2nd order runge-kutta method for tracking. tl -> use tensorline method for tracking. sl -> use interpolated streamline method with fixed step-length""", - argstr="-%s") + argstr="-%s", + ) step_length = traits.Float( desc="""set step length, in the unit of minimum voxel size. default value is 0.5 for interpolated streamline method and 0.1 for other methods""", - argstr="-l %f") + argstr="-l %f", + ) angle_threshold = traits.Float( - desc="set angle threshold. default value is 35 degree", - argstr="-at %f") + desc="set angle threshold. default value is 35 degree", argstr="-at %f" + ) angle_threshold_weight = traits.Float( - desc= - "set angle threshold weighting factor. weighting will be be applied \ + desc="set angle threshold weighting factor. weighting will be be applied \ on top of the angle_threshold", - argstr="-atw %f") + argstr="-atw %f", + ) random_seed = traits.Int( desc="use random location in a voxel instead of the center of the voxel \ to seed. can also define number of seed per voxel. default is 1", - argstr="-rseed %d") - invert_x = traits.Bool( - desc="invert x component of the vector", argstr="-ix") - invert_y = traits.Bool( - desc="invert y component of the vector", argstr="-iy") - invert_z = traits.Bool( - desc="invert z component of the vector", argstr="-iz") - swap_xy = traits.Bool( - desc="swap x & y vectors while tracking", argstr="-sxy") - swap_yz = traits.Bool( - desc="swap y & z vectors while tracking", argstr="-syz") - swap_zx = traits.Bool( - desc="swap x & z vectors while tracking", argstr="-szx") + argstr="-rseed %d", + ) + invert_x = traits.Bool(desc="invert x component of the vector", argstr="-ix") + invert_y = traits.Bool(desc="invert y component of the vector", argstr="-iy") + invert_z = traits.Bool(desc="invert z component of the vector", argstr="-iz") + swap_xy = traits.Bool(desc="swap x & y vectors while tracking", argstr="-sxy") + swap_yz = traits.Bool(desc="swap y & z vectors while tracking", argstr="-syz") + swap_zx = traits.Bool(desc="swap x & z vectors while tracking", argstr="-szx") mask1_file = File( - desc="first mask image", mandatory=True, argstr="-m %s", position=2) + desc="first mask image", mandatory=True, argstr="-m %s", position=2 + ) mask1_threshold = traits.Float( - desc= - "threshold value for the first mask image, if not given, the program will \ + desc="threshold value for the first mask image, if not given, the program will \ try automatically find the threshold", - position=3) + position=3, + ) mask2_file = File(desc="second mask image", argstr="-m2 %s", position=4) mask2_threshold = traits.Float( - desc= - "threshold value for the second mask image, if not given, the program will \ + desc="threshold value for the second mask image, if not given, the program will \ try automatically find the threshold", - position=5) + position=5, + ) input_data_prefix = traits.Str( "dti", desc="for internal naming use only", position=0, argstr="%s", - usedefault=True) + usedefault=True, + ) output_file = File( - "tracks.trk", - "file containing tracks", - argstr="%s", - position=1, - usedefault=True) + "tracks.trk", "file containing tracks", argstr="%s", position=1, usedefault=True + ) output_mask = File( - desc="output a binary mask file in analyze format", argstr="-om %s") + desc="output a binary mask file in analyze format", argstr="-om %s" + ) primary_vector = traits.Enum( - 'v2', - 'v3', - desc= - "which vector to use for fibre tracking: v2 or v3. If not set use v1", - argstr="-%s") + "v2", + "v3", + desc="which vector to use for fibre tracking: v2 or v3. If not set use v1", + argstr="-%s", + ) class DTITrackerOutputSpec(TraitedSpec): @@ -252,21 +255,22 @@ class DTITracker(CommandLine): input_spec = DTITrackerInputSpec output_spec = DTITrackerOutputSpec - _cmd = 'dti_tracker' + _cmd = "dti_tracker" def _run_interface(self, runtime): _, _, ext = split_filename(self.inputs.tensor_file) copyfile( self.inputs.tensor_file, os.path.abspath(self.inputs.input_data_prefix + "_tensor" + ext), - copy=False) + copy=False, + ) return super(DTITracker, self)._run_interface(runtime) def _list_outputs(self): outputs = self.output_spec().get() - outputs['track_file'] = os.path.abspath(self.inputs.output_file) + outputs["track_file"] = os.path.abspath(self.inputs.output_file) if isdefined(self.inputs.output_mask) and self.inputs.output_mask: - outputs['mask_file'] = os.path.abspath(self.inputs.output_mask) + outputs["mask_file"] = os.path.abspath(self.inputs.output_mask) return outputs diff --git a/nipype/interfaces/diffusion_toolkit/odf.py b/nipype/interfaces/diffusion_toolkit/odf.py index 9c2d6d2505..705a4a5d33 100644 --- a/nipype/interfaces/diffusion_toolkit/odf.py +++ b/nipype/interfaces/diffusion_toolkit/odf.py @@ -7,54 +7,59 @@ import re from ...utils.filemanip import fname_presuffix, split_filename, copyfile -from ..base import (TraitedSpec, File, traits, CommandLine, - CommandLineInputSpec, isdefined) +from ..base import ( + TraitedSpec, + File, + traits, + CommandLine, + CommandLineInputSpec, + isdefined, +) -__docformat__ = 'restructuredtext' +__docformat__ = "restructuredtext" class HARDIMatInputSpec(CommandLineInputSpec): bvecs = File( - exists=True, - desc='b vectors file', - argstr='%s', - position=1, - mandatory=True) - bvals = File(exists=True, desc='b values file', mandatory=True) + exists=True, desc="b vectors file", argstr="%s", position=1, mandatory=True + ) + bvals = File(exists=True, desc="b values file", mandatory=True) out_file = File( "recon_mat.dat", - desc='output matrix file', - argstr='%s', + desc="output matrix file", + argstr="%s", usedefault=True, - position=2) + position=2, + ) order = traits.Int( - argstr='-order %s', - desc= - """maximum order of spherical harmonics. must be even number. default - is 4""") + argstr="-order %s", + desc="""maximum order of spherical harmonics. must be even number. default + is 4""", + ) odf_file = File( exists=True, - argstr='-odf %s', - desc= - """filename that contains the reconstruction points on a HEMI-sphere. - use the pre-set 181 points by default""") + argstr="-odf %s", + desc="""filename that contains the reconstruction points on a HEMI-sphere. + use the pre-set 181 points by default""", + ) reference_file = File( exists=True, - argstr='-ref %s', - desc= - """provide a dicom or nifti image as the reference for the program to + argstr="-ref %s", + desc="""provide a dicom or nifti image as the reference for the program to figure out the image orientation information. if no such info was found in the given image header, the next 5 options -info, etc., will be used if provided. if image orientation info can be found in the given reference, all other 5 image orientation options will - be IGNORED""") + be IGNORED""", + ) image_info = File( exists=True, - argstr='-info %s', + argstr="-info %s", desc="""specify image information file. the image info file is generated from original dicom image by diff_unpack program and contains image orientation and other information needed for reconstruction and - tracking. by default will look into the image folder for .info file""") + tracking. by default will look into the image folder for .info file""", + ) image_orientation_vectors = traits.List( traits.Float(), minlen=6, @@ -66,112 +71,107 @@ class HARDIMatInputSpec(CommandLineInputSpec): one automatically. this information will be used to determine image orientation, as well as to adjust gradient vectors with oblique angle when""", - argstr="-iop %f") + argstr="-iop %f", + ) oblique_correction = traits.Bool( desc="""when oblique angle(s) applied, some SIEMENS dti protocols do not adjust gradient accordingly, thus it requires adjustment for correct diffusion tensor calculation""", - argstr="-oc") + argstr="-oc", + ) class HARDIMatOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='output matrix file') + out_file = File(exists=True, desc="output matrix file") class HARDIMat(CommandLine): """Use hardi_mat to calculate a reconstruction matrix from a gradient table """ + input_spec = HARDIMatInputSpec output_spec = HARDIMatOutputSpec - _cmd = 'hardi_mat' + _cmd = "hardi_mat" def _create_gradient_matrix(self, bvecs_file, bvals_file): - _gradient_matrix_file = 'gradient_matrix.txt' - bvals = [ - val for val in re.split('\s+', - open(bvals_file).readline().strip()) - ] + _gradient_matrix_file = "gradient_matrix.txt" + bvals = [val for val in re.split("\s+", open(bvals_file).readline().strip())] bvecs_f = open(bvecs_file) - bvecs_x = [val for val in re.split('\s+', bvecs_f.readline().strip())] - bvecs_y = [val for val in re.split('\s+', bvecs_f.readline().strip())] - bvecs_z = [val for val in re.split('\s+', bvecs_f.readline().strip())] + bvecs_x = [val for val in re.split("\s+", bvecs_f.readline().strip())] + bvecs_y = [val for val in re.split("\s+", bvecs_f.readline().strip())] + bvecs_z = [val for val in re.split("\s+", bvecs_f.readline().strip())] bvecs_f.close() - gradient_matrix_f = open(_gradient_matrix_file, 'w') + gradient_matrix_f = open(_gradient_matrix_file, "w") for i in range(len(bvals)): if int(bvals[i]) == 0: continue - gradient_matrix_f.write("%s %s %s\n" % (bvecs_x[i], bvecs_y[i], - bvecs_z[i])) + gradient_matrix_f.write("%s %s %s\n" % (bvecs_x[i], bvecs_y[i], bvecs_z[i])) gradient_matrix_f.close() return _gradient_matrix_file def _format_arg(self, name, spec, value): if name == "bvecs": - new_val = self._create_gradient_matrix(self.inputs.bvecs, - self.inputs.bvals) + new_val = self._create_gradient_matrix(self.inputs.bvecs, self.inputs.bvals) return super(HARDIMat, self)._format_arg("bvecs", spec, new_val) return super(HARDIMat, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs class ODFReconInputSpec(CommandLineInputSpec): DWI = File( - desc='Input raw data', - argstr='%s', - exists=True, - mandatory=True, - position=1) + desc="Input raw data", argstr="%s", exists=True, mandatory=True, position=1 + ) n_directions = traits.Int( - desc='Number of directions', argstr='%s', mandatory=True, position=2) + desc="Number of directions", argstr="%s", mandatory=True, position=2 + ) n_output_directions = traits.Int( - desc='Number of output directions', - argstr='%s', - mandatory=True, - position=3) + desc="Number of output directions", argstr="%s", mandatory=True, position=3 + ) out_prefix = traits.Str( - "odf", - desc='Output file prefix', - argstr='%s', - usedefault=True, - position=4) + "odf", desc="Output file prefix", argstr="%s", usedefault=True, position=4 + ) matrix = File( - argstr='-mat %s', + argstr="-mat %s", exists=True, desc="""use given file as reconstruction matrix.""", - mandatory=True) + mandatory=True, + ) n_b0 = traits.Int( - argstr='-b0 %s', + argstr="-b0 %s", desc="""number of b0 scans. by default the program gets this information from the number of directions and number of volumes in the raw data. useful when dealing with incomplete raw data set or only using part of raw data set to reconstruct""", - mandatory=True) + mandatory=True, + ) output_type = traits.Enum( - 'nii', - 'analyze', - 'ni1', - 'nii.gz', - argstr='-ot %s', - desc='output file type', - usedefault=True) + "nii", + "analyze", + "ni1", + "nii.gz", + argstr="-ot %s", + desc="output file type", + usedefault=True, + ) sharpness = traits.Float( desc="""smooth or sharpen the raw data. factor > 0 is smoothing. factor < 0 is sharpening. default value is 0 NOTE: this option applies to DSI study only""", - argstr='-s %f') + argstr="-s %f", + ) filter = traits.Bool( - desc="""apply a filter (e.g. high pass) to the raw image""", - argstr='-f') + desc="""apply a filter (e.g. high pass) to the raw image""", argstr="-f" + ) subtract_background = traits.Bool( - desc="""subtract the background value before reconstruction""", - argstr='-bg') - dsi = traits.Bool(desc="""indicates that the data is dsi""", argstr='-dsi') - output_entropy = traits.Bool(desc="""output entropy map""", argstr='-oe') + desc="""subtract the background value before reconstruction""", argstr="-bg" + ) + dsi = traits.Bool(desc="""indicates that the data is dsi""", argstr="-dsi") + output_entropy = traits.Bool(desc="""output entropy map""", argstr="-oe") image_orientation_vectors = traits.List( traits.Float(), minlen=6, @@ -183,12 +183,14 @@ class ODFReconInputSpec(CommandLineInputSpec): one automatically. this information will be used to determine image orientation, as well as to adjust gradient vectors with oblique angle when""", - argstr="-iop %f") + argstr="-iop %f", + ) oblique_correction = traits.Bool( desc="""when oblique angle(s) applied, some SIEMENS dti protocols do not adjust gradient accordingly, thus it requires adjustment for correct diffusion tensor calculation""", - argstr="-oc") + argstr="-oc", + ) class ODFReconOutputSpec(TraitedSpec): @@ -206,29 +208,29 @@ class ODFRecon(CommandLine): input_spec = ODFReconInputSpec output_spec = ODFReconOutputSpec - _cmd = 'odf_recon' + _cmd = "odf_recon" def _list_outputs(self): out_prefix = self.inputs.out_prefix output_type = self.inputs.output_type outputs = self.output_spec().get() - outputs['B0'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_b0.' + output_type)) - outputs['DWI'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_dwi.' + output_type)) - outputs['max'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_max.' + output_type)) - outputs['ODF'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_odf.' + output_type)) + outputs["B0"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_b0." + output_type) + ) + outputs["DWI"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_dwi." + output_type) + ) + outputs["max"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_max." + output_type) + ) + outputs["ODF"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_odf." + output_type) + ) if isdefined(self.inputs.output_entropy): - outputs['entropy'] = os.path.abspath( - fname_presuffix( - "", prefix=out_prefix, suffix='_entropy.' + output_type)) + outputs["entropy"] = os.path.abspath( + fname_presuffix("", prefix=out_prefix, suffix="_entropy." + output_type) + ) return outputs @@ -237,77 +239,73 @@ class ODFTrackerInputSpec(CommandLineInputSpec): max = File(exists=True, mandatory=True) ODF = File(exists=True, mandatory=True) input_data_prefix = traits.Str( - "odf", - desc='recon data prefix', - argstr='%s', - usedefault=True, - position=0) + "odf", desc="recon data prefix", argstr="%s", usedefault=True, position=0 + ) out_file = File( - "tracks.trk", - desc='output track file', - argstr='%s', - usedefault=True, - position=1) + "tracks.trk", desc="output track file", argstr="%s", usedefault=True, position=1 + ) input_output_type = traits.Enum( - 'nii', - 'analyze', - 'ni1', - 'nii.gz', - argstr='-it %s', - desc='input and output file type', - usedefault=True) + "nii", + "analyze", + "ni1", + "nii.gz", + argstr="-it %s", + desc="input and output file type", + usedefault=True, + ) runge_kutta2 = traits.Bool( - argstr='-rk2', + argstr="-rk2", desc="""use 2nd order runge-kutta method for tracking. - default tracking method is non-interpolate streamline""") + default tracking method is non-interpolate streamline""", + ) step_length = traits.Float( - argstr='-l %f', + argstr="-l %f", desc="""set step length, in the unit of minimum voxel size. - default value is 0.1.""") + default value is 0.1.""", + ) angle_threshold = traits.Float( - argstr='-at %f', + argstr="-at %f", desc="""set angle threshold. default value is 35 degree for - default tracking method and 25 for rk2""") + default tracking method and 25 for rk2""", + ) random_seed = traits.Int( - argstr='-rseed %s', + argstr="-rseed %s", desc="""use random location in a voxel instead of the center of the voxel - to seed. can also define number of seed per voxel. default is 1""") - invert_x = traits.Bool( - argstr='-ix', desc='invert x component of the vector') - invert_y = traits.Bool( - argstr='-iy', desc='invert y component of the vector') - invert_z = traits.Bool( - argstr='-iz', desc='invert z component of the vector') - swap_xy = traits.Bool( - argstr='-sxy', desc='swap x and y vectors while tracking') - swap_yz = traits.Bool( - argstr='-syz', desc='swap y and z vectors while tracking') - swap_zx = traits.Bool( - argstr='-szx', desc='swap x and z vectors while tracking') - disc = traits.Bool(argstr='-disc', desc='use disc tracking') + to seed. can also define number of seed per voxel. default is 1""", + ) + invert_x = traits.Bool(argstr="-ix", desc="invert x component of the vector") + invert_y = traits.Bool(argstr="-iy", desc="invert y component of the vector") + invert_z = traits.Bool(argstr="-iz", desc="invert z component of the vector") + swap_xy = traits.Bool(argstr="-sxy", desc="swap x and y vectors while tracking") + swap_yz = traits.Bool(argstr="-syz", desc="swap y and z vectors while tracking") + swap_zx = traits.Bool(argstr="-szx", desc="swap x and z vectors while tracking") + disc = traits.Bool(argstr="-disc", desc="use disc tracking") mask1_file = File( - desc="first mask image", mandatory=True, argstr="-m %s", position=2) + desc="first mask image", mandatory=True, argstr="-m %s", position=2 + ) mask1_threshold = traits.Float( - desc= - "threshold value for the first mask image, if not given, the program will \ + desc="threshold value for the first mask image, if not given, the program will \ try automatically find the threshold", - position=3) + position=3, + ) mask2_file = File(desc="second mask image", argstr="-m2 %s", position=4) mask2_threshold = traits.Float( - desc= - "threshold value for the second mask image, if not given, the program will \ + desc="threshold value for the second mask image, if not given, the program will \ try automatically find the threshold", - position=5) + position=5, + ) limit = traits.Int( - argstr='-limit %d', + argstr="-limit %d", desc="""in some special case, such as heart data, some track may go into infinite circle and take long time to stop. this option allows - setting a limit for the longest tracking steps (voxels)""") + setting a limit for the longest tracking steps (voxels)""", + ) dsi = traits.Bool( - argstr='-dsi', + argstr="-dsi", desc=""" specify the input odf data is dsi. because dsi recon uses fixed pre-calculated matrix, some special orientation patch needs to - be applied to keep dti/dsi/q-ball consistent.""") + be applied to keep dti/dsi/q-ball consistent.""", + ) image_orientation_vectors = traits.List( traits.Float(), minlen=6, @@ -319,24 +317,23 @@ class ODFTrackerInputSpec(CommandLineInputSpec): one automatically. this information will be used to determine image orientation, as well as to adjust gradient vectors with oblique angle when""", - argstr="-iop %f") + argstr="-iop %f", + ) slice_order = traits.Int( - argstr='-sorder %d', - desc= - 'set the slice order. 1 means normal, -1 means reversed. default value is 1' + argstr="-sorder %d", + desc="set the slice order. 1 means normal, -1 means reversed. default value is 1", ) voxel_order = traits.Enum( - 'RAS', - 'RPS', - 'RAI', - 'RPI', - 'LAI', - 'LAS', - 'LPS', - 'LPI', - argstr='-vorder %s', - desc= - """specify the voxel order in RL/AP/IS (human brain) reference. must be + "RAS", + "RPS", + "RAI", + "RPI", + "LAI", + "LAS", + "LPS", + "LPI", + argstr="-vorder %s", + desc="""specify the voxel order in RL/AP/IS (human brain) reference. must be 3 letters with no space in between. for example, RAS means the voxel row is from L->R, the column is from P->A and the slice order is from I->S. @@ -346,11 +343,12 @@ class ODFTrackerInputSpec(CommandLineInputSpec): sagittal image is PIL. this information also is NOT needed for tracking but will be saved in the track file and is essential for track display to map onto - the right coordinates""") + the right coordinates""", + ) class ODFTrackerOutputSpec(TraitedSpec): - track_file = File(exists=True, desc='output track file') + track_file = File(exists=True, desc="output track file") class ODFTracker(CommandLine): @@ -360,24 +358,26 @@ class ODFTracker(CommandLine): input_spec = ODFTrackerInputSpec output_spec = ODFTrackerOutputSpec - _cmd = 'odf_tracker' + _cmd = "odf_tracker" def _run_interface(self, runtime): _, _, ext = split_filename(self.inputs.max) copyfile( self.inputs.max, os.path.abspath(self.inputs.input_data_prefix + "_max" + ext), - copy=False) + copy=False, + ) _, _, ext = split_filename(self.inputs.ODF) copyfile( self.inputs.ODF, os.path.abspath(self.inputs.input_data_prefix + "_odf" + ext), - copy=False) + copy=False, + ) return super(ODFTracker, self)._run_interface(runtime) def _list_outputs(self): outputs = self.output_spec().get() - outputs['track_file'] = os.path.abspath(self.inputs.out_file) + outputs["track_file"] = os.path.abspath(self.inputs.out_file) return outputs diff --git a/nipype/interfaces/diffusion_toolkit/postproc.py b/nipype/interfaces/diffusion_toolkit/postproc.py index 19cafe8d7d..534b747a0d 100644 --- a/nipype/interfaces/diffusion_toolkit/postproc.py +++ b/nipype/interfaces/diffusion_toolkit/postproc.py @@ -5,10 +5,16 @@ """ import os -from ..base import (TraitedSpec, File, traits, CommandLine, InputMultiPath, - CommandLineInputSpec) +from ..base import ( + TraitedSpec, + File, + traits, + CommandLine, + InputMultiPath, + CommandLineInputSpec, +) -__docformat__ = 'restructuredtext' +__docformat__ = "restructuredtext" class SplineFilterInputSpec(CommandLineInputSpec): @@ -17,18 +23,21 @@ class SplineFilterInputSpec(CommandLineInputSpec): desc="file containing tracks to be filtered", position=0, argstr="%s", - mandatory=True) + mandatory=True, + ) step_length = traits.Float( desc="in the unit of minimum voxel size", position=1, argstr="%f", - mandatory=True) + mandatory=True, + ) output_file = File( "spline_tracks.trk", desc="target file for smoothed tracks", position=2, argstr="%s", - usedefault=True) + usedefault=True, + ) class SplineFilterOutputSpec(TraitedSpec): @@ -53,6 +62,7 @@ class SplineFilter(CommandLine): >>> filt.inputs.step_length = 0.5 >>> filt.run() # doctest: +SKIP """ + input_spec = SplineFilterInputSpec output_spec = SplineFilterOutputSpec @@ -60,8 +70,7 @@ class SplineFilter(CommandLine): def _list_outputs(self): outputs = self.output_spec().get() - outputs['smoothed_track_file'] = os.path.abspath( - self.inputs.output_file) + outputs["smoothed_track_file"] = os.path.abspath(self.inputs.output_file) return outputs @@ -71,13 +80,15 @@ class TrackMergeInputSpec(CommandLineInputSpec): desc="file containing tracks to be filtered", position=0, argstr="%s...", - mandatory=True) + mandatory=True, + ) output_file = File( "merged_tracks.trk", desc="target file for merged tracks", position=-1, argstr="%s", - usedefault=True) + usedefault=True, + ) class TrackMergeOutputSpec(TraitedSpec): @@ -104,6 +115,7 @@ class TrackMerge(CommandLine): >>> mrg.inputs.track_files = ['track1.trk','track2.trk'] >>> mrg.run() # doctest: +SKIP """ + input_spec = TrackMergeInputSpec output_spec = TrackMergeOutputSpec @@ -111,5 +123,5 @@ class TrackMerge(CommandLine): def _list_outputs(self): outputs = self.output_spec().get() - outputs['track_file'] = os.path.abspath(self.inputs.output_file) + outputs["track_file"] = os.path.abspath(self.inputs.output_file) return outputs diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py index 8623872a7e..91e164f35a 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py @@ -4,59 +4,39 @@ def test_DTIRecon_inputs(): input_map = dict( - DWI=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - args=dict(argstr='%s', ), - b0_threshold=dict(argstr='-b0_th', ), - bvals=dict( - extensions=None, - mandatory=True, - ), - bvecs=dict( - argstr='-gm %s', - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - image_orientation_vectors=dict(argstr='-iop %f', ), - n_averages=dict(argstr='-nex %s', ), - oblique_correction=dict(argstr='-oc', ), - out_prefix=dict( - argstr='%s', - position=2, - usedefault=True, - ), - output_type=dict( - argstr='-ot %s', - usedefault=True, - ), + DWI=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + args=dict(argstr="%s",), + b0_threshold=dict(argstr="-b0_th",), + bvals=dict(extensions=None, mandatory=True,), + bvecs=dict(argstr="-gm %s", extensions=None, mandatory=True,), + environ=dict(nohash=True, usedefault=True,), + image_orientation_vectors=dict(argstr="-iop %f",), + n_averages=dict(argstr="-nex %s",), + oblique_correction=dict(argstr="-oc",), + out_prefix=dict(argstr="%s", position=2, usedefault=True,), + output_type=dict(argstr="-ot %s", usedefault=True,), ) inputs = DTIRecon.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DTIRecon_outputs(): output_map = dict( - ADC=dict(extensions=None, ), - B0=dict(extensions=None, ), - FA=dict(extensions=None, ), - FA_color=dict(extensions=None, ), - L1=dict(extensions=None, ), - L2=dict(extensions=None, ), - L3=dict(extensions=None, ), - V1=dict(extensions=None, ), - V2=dict(extensions=None, ), - V3=dict(extensions=None, ), - exp=dict(extensions=None, ), - tensor=dict(extensions=None, ), + ADC=dict(extensions=None,), + B0=dict(extensions=None,), + FA=dict(extensions=None,), + FA_color=dict(extensions=None,), + L1=dict(extensions=None,), + L2=dict(extensions=None,), + L3=dict(extensions=None,), + V1=dict(extensions=None,), + V2=dict(extensions=None,), + V3=dict(extensions=None,), + exp=dict(extensions=None,), + tensor=dict(extensions=None,), ) outputs = DTIRecon.output_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py index 2f4f3417ef..d1fd3bd1e8 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py @@ -4,63 +4,40 @@ def test_DTITracker_inputs(): input_map = dict( - angle_threshold=dict(argstr='-at %f', ), - angle_threshold_weight=dict(argstr='-atw %f', ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_data_prefix=dict( - argstr='%s', - position=0, - usedefault=True, - ), - input_type=dict(argstr='-it %s', ), - invert_x=dict(argstr='-ix', ), - invert_y=dict(argstr='-iy', ), - invert_z=dict(argstr='-iz', ), - mask1_file=dict( - argstr='-m %s', - extensions=None, - mandatory=True, - position=2, - ), - mask1_threshold=dict(position=3, ), - mask2_file=dict( - argstr='-m2 %s', - extensions=None, - position=4, - ), - mask2_threshold=dict(position=5, ), - output_file=dict( - argstr='%s', - extensions=None, - position=1, - usedefault=True, - ), - output_mask=dict( - argstr='-om %s', - extensions=None, - ), - primary_vector=dict(argstr='-%s', ), - random_seed=dict(argstr='-rseed %d', ), - step_length=dict(argstr='-l %f', ), - swap_xy=dict(argstr='-sxy', ), - swap_yz=dict(argstr='-syz', ), - swap_zx=dict(argstr='-szx', ), - tensor_file=dict(extensions=None, ), - tracking_method=dict(argstr='-%s', ), + angle_threshold=dict(argstr="-at %f",), + angle_threshold_weight=dict(argstr="-atw %f",), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + input_data_prefix=dict(argstr="%s", position=0, usedefault=True,), + input_type=dict(argstr="-it %s",), + invert_x=dict(argstr="-ix",), + invert_y=dict(argstr="-iy",), + invert_z=dict(argstr="-iz",), + mask1_file=dict(argstr="-m %s", extensions=None, mandatory=True, position=2,), + mask1_threshold=dict(position=3,), + mask2_file=dict(argstr="-m2 %s", extensions=None, position=4,), + mask2_threshold=dict(position=5,), + output_file=dict(argstr="%s", extensions=None, position=1, usedefault=True,), + output_mask=dict(argstr="-om %s", extensions=None,), + primary_vector=dict(argstr="-%s",), + random_seed=dict(argstr="-rseed %d",), + step_length=dict(argstr="-l %f",), + swap_xy=dict(argstr="-sxy",), + swap_yz=dict(argstr="-syz",), + swap_zx=dict(argstr="-szx",), + tensor_file=dict(extensions=None,), + tracking_method=dict(argstr="-%s",), ) inputs = DTITracker.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DTITracker_outputs(): output_map = dict( - mask_file=dict(extensions=None, ), - track_file=dict(extensions=None, ), + mask_file=dict(extensions=None,), track_file=dict(extensions=None,), ) outputs = DTITracker.output_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py index d043890f9d..6cddb31fdc 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py @@ -4,50 +4,27 @@ def test_HARDIMat_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bvals=dict( - extensions=None, - mandatory=True, - ), - bvecs=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - image_info=dict( - argstr='-info %s', - extensions=None, - ), - image_orientation_vectors=dict(argstr='-iop %f', ), - oblique_correction=dict(argstr='-oc', ), - odf_file=dict( - argstr='-odf %s', - extensions=None, - ), - order=dict(argstr='-order %s', ), - out_file=dict( - argstr='%s', - extensions=None, - position=2, - usedefault=True, - ), - reference_file=dict( - argstr='-ref %s', - extensions=None, - ), + args=dict(argstr="%s",), + bvals=dict(extensions=None, mandatory=True,), + bvecs=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + environ=dict(nohash=True, usedefault=True,), + image_info=dict(argstr="-info %s", extensions=None,), + image_orientation_vectors=dict(argstr="-iop %f",), + oblique_correction=dict(argstr="-oc",), + odf_file=dict(argstr="-odf %s", extensions=None,), + order=dict(argstr="-order %s",), + out_file=dict(argstr="%s", extensions=None, position=2, usedefault=True,), + reference_file=dict(argstr="-ref %s", extensions=None,), ) inputs = HARDIMat.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_HARDIMat_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = HARDIMat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py index e1f6ad47b6..0e8132a7f6 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py @@ -4,65 +4,37 @@ def test_ODFRecon_inputs(): input_map = dict( - DWI=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - args=dict(argstr='%s', ), - dsi=dict(argstr='-dsi', ), - environ=dict( - nohash=True, - usedefault=True, - ), - filter=dict(argstr='-f', ), - image_orientation_vectors=dict(argstr='-iop %f', ), - matrix=dict( - argstr='-mat %s', - extensions=None, - mandatory=True, - ), - n_b0=dict( - argstr='-b0 %s', - mandatory=True, - ), - n_directions=dict( - argstr='%s', - mandatory=True, - position=2, - ), - n_output_directions=dict( - argstr='%s', - mandatory=True, - position=3, - ), - oblique_correction=dict(argstr='-oc', ), - out_prefix=dict( - argstr='%s', - position=4, - usedefault=True, - ), - output_entropy=dict(argstr='-oe', ), - output_type=dict( - argstr='-ot %s', - usedefault=True, - ), - sharpness=dict(argstr='-s %f', ), - subtract_background=dict(argstr='-bg', ), + DWI=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + args=dict(argstr="%s",), + dsi=dict(argstr="-dsi",), + environ=dict(nohash=True, usedefault=True,), + filter=dict(argstr="-f",), + image_orientation_vectors=dict(argstr="-iop %f",), + matrix=dict(argstr="-mat %s", extensions=None, mandatory=True,), + n_b0=dict(argstr="-b0 %s", mandatory=True,), + n_directions=dict(argstr="%s", mandatory=True, position=2,), + n_output_directions=dict(argstr="%s", mandatory=True, position=3,), + oblique_correction=dict(argstr="-oc",), + out_prefix=dict(argstr="%s", position=4, usedefault=True,), + output_entropy=dict(argstr="-oe",), + output_type=dict(argstr="-ot %s", usedefault=True,), + sharpness=dict(argstr="-s %f",), + subtract_background=dict(argstr="-bg",), ) inputs = ODFRecon.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ODFRecon_outputs(): output_map = dict( - B0=dict(extensions=None, ), - DWI=dict(extensions=None, ), - ODF=dict(extensions=None, ), - entropy=dict(extensions=None, ), - max=dict(extensions=None, ), + B0=dict(extensions=None,), + DWI=dict(extensions=None,), + ODF=dict(extensions=None,), + entropy=dict(extensions=None,), + max=dict(extensions=None,), ) outputs = ODFRecon.output_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py index 805c28831e..233aea3e3a 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py @@ -4,71 +4,43 @@ def test_ODFTracker_inputs(): input_map = dict( - ODF=dict( - extensions=None, - mandatory=True, - ), - angle_threshold=dict(argstr='-at %f', ), - args=dict(argstr='%s', ), - disc=dict(argstr='-disc', ), - dsi=dict(argstr='-dsi', ), - environ=dict( - nohash=True, - usedefault=True, - ), - image_orientation_vectors=dict(argstr='-iop %f', ), - input_data_prefix=dict( - argstr='%s', - position=0, - usedefault=True, - ), - input_output_type=dict( - argstr='-it %s', - usedefault=True, - ), - invert_x=dict(argstr='-ix', ), - invert_y=dict(argstr='-iy', ), - invert_z=dict(argstr='-iz', ), - limit=dict(argstr='-limit %d', ), - mask1_file=dict( - argstr='-m %s', - extensions=None, - mandatory=True, - position=2, - ), - mask1_threshold=dict(position=3, ), - mask2_file=dict( - argstr='-m2 %s', - extensions=None, - position=4, - ), - mask2_threshold=dict(position=5, ), - max=dict( - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr='%s', - extensions=None, - position=1, - usedefault=True, - ), - random_seed=dict(argstr='-rseed %s', ), - runge_kutta2=dict(argstr='-rk2', ), - slice_order=dict(argstr='-sorder %d', ), - step_length=dict(argstr='-l %f', ), - swap_xy=dict(argstr='-sxy', ), - swap_yz=dict(argstr='-syz', ), - swap_zx=dict(argstr='-szx', ), - voxel_order=dict(argstr='-vorder %s', ), + ODF=dict(extensions=None, mandatory=True,), + angle_threshold=dict(argstr="-at %f",), + args=dict(argstr="%s",), + disc=dict(argstr="-disc",), + dsi=dict(argstr="-dsi",), + environ=dict(nohash=True, usedefault=True,), + image_orientation_vectors=dict(argstr="-iop %f",), + input_data_prefix=dict(argstr="%s", position=0, usedefault=True,), + input_output_type=dict(argstr="-it %s", usedefault=True,), + invert_x=dict(argstr="-ix",), + invert_y=dict(argstr="-iy",), + invert_z=dict(argstr="-iz",), + limit=dict(argstr="-limit %d",), + mask1_file=dict(argstr="-m %s", extensions=None, mandatory=True, position=2,), + mask1_threshold=dict(position=3,), + mask2_file=dict(argstr="-m2 %s", extensions=None, position=4,), + mask2_threshold=dict(position=5,), + max=dict(extensions=None, mandatory=True,), + out_file=dict(argstr="%s", extensions=None, position=1, usedefault=True,), + random_seed=dict(argstr="-rseed %s",), + runge_kutta2=dict(argstr="-rk2",), + slice_order=dict(argstr="-sorder %d",), + step_length=dict(argstr="-l %f",), + swap_xy=dict(argstr="-sxy",), + swap_yz=dict(argstr="-syz",), + swap_zx=dict(argstr="-szx",), + voxel_order=dict(argstr="-vorder %s",), ) inputs = ODFTracker.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ODFTracker_outputs(): - output_map = dict(track_file=dict(extensions=None, ), ) + output_map = dict(track_file=dict(extensions=None,),) outputs = ODFTracker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py index 53074aab81..ddff69b5de 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py @@ -4,36 +4,21 @@ def test_SplineFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - output_file=dict( - argstr='%s', - extensions=None, - position=2, - usedefault=True, - ), - step_length=dict( - argstr='%f', - mandatory=True, - position=1, - ), - track_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + output_file=dict(argstr="%s", extensions=None, position=2, usedefault=True,), + step_length=dict(argstr="%f", mandatory=True, position=1,), + track_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), ) inputs = SplineFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SplineFilter_outputs(): - output_map = dict(smoothed_track_file=dict(extensions=None, ), ) + output_map = dict(smoothed_track_file=dict(extensions=None,),) outputs = SplineFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py index 9fea4d1b5e..1c274533f0 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py @@ -4,30 +4,20 @@ def test_TrackMerge_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - output_file=dict( - argstr='%s', - extensions=None, - position=-1, - usedefault=True, - ), - track_files=dict( - argstr='%s...', - mandatory=True, - position=0, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + output_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True,), + track_files=dict(argstr="%s...", mandatory=True, position=0,), ) inputs = TrackMerge.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TrackMerge_outputs(): - output_map = dict(track_file=dict(extensions=None, ), ) + output_map = dict(track_file=dict(extensions=None,),) outputs = TrackMerge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/anisotropic_power.py b/nipype/interfaces/dipy/anisotropic_power.py index 21208326f4..3982ed7106 100644 --- a/nipype/interfaces/dipy/anisotropic_power.py +++ b/nipype/interfaces/dipy/anisotropic_power.py @@ -6,11 +6,11 @@ from ..base import TraitedSpec, File, isdefined from .base import DipyDiffusionInterface, DipyBaseInterfaceInputSpec -IFLOGGER = logging.getLogger('nipype.interface') +IFLOGGER = logging.getLogger("nipype.interface") class APMQballInputSpec(DipyBaseInterfaceInputSpec): - mask_file = File(exists=True, desc='An optional brain mask') + mask_file = File(exists=True, desc="An optional brain mask") class APMQballOutputSpec(TraitedSpec): @@ -31,6 +31,7 @@ class APMQball(DipyDiffusionInterface): >>> apm.inputs.in_bval = 'bvals' >>> apm.run() # doctest: +SKIP """ + input_spec = APMQballInputSpec output_spec = APMQballOutputSpec @@ -50,23 +51,24 @@ def _run_interface(self, runtime): # Fit it model = shm.QballModel(gtab, 8) - sphere = get_sphere('symmetric724') + sphere = get_sphere("symmetric724") peaks = peaks_from_model( model=model, data=data, - relative_peak_threshold=.5, + relative_peak_threshold=0.5, min_separation_angle=25, sphere=sphere, - mask=mask) + mask=mask, + ) apm = shm.anisotropic_power(peaks.shm_coeff) - out_file = self._gen_filename('apm') + out_file = self._gen_filename("apm") nb.Nifti1Image(apm.astype("float32"), affine).to_filename(out_file) - IFLOGGER.info('APM qball image saved as %s', out_file) + IFLOGGER.info("APM qball image saved as %s", out_file) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = self._gen_filename('apm') + outputs["out_file"] = self._gen_filename("apm") return outputs diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py index 93546b69ac..1b168b5732 100644 --- a/nipype/interfaces/dipy/base.py +++ b/nipype/interfaces/dipy/base.py @@ -5,11 +5,17 @@ import inspect import numpy as np from ... import logging -from ..base import (traits, File, isdefined, LibraryBaseInterface, - BaseInterfaceInputSpec, TraitedSpec) +from ..base import ( + traits, + File, + isdefined, + LibraryBaseInterface, + BaseInterfaceInputSpec, + TraitedSpec, +) # List of workflows to ignore -SKIP_WORKFLOWS_LIST = ['Workflow', 'CombinedWorkflow'] +SKIP_WORKFLOWS_LIST = ["Workflow", "CombinedWorkflow"] HAVE_DIPY = True @@ -38,27 +44,30 @@ class DipyBaseInterface(LibraryBaseInterface): """ A base interface for py:mod:`dipy` computations """ - _pkg = 'dipy' + + _pkg = "dipy" class DipyBaseInterfaceInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc=('input diffusion data')) - in_bval = File(exists=True, mandatory=True, desc=('input b-values table')) - in_bvec = File(exists=True, mandatory=True, desc=('input b-vectors table')) - b0_thres = traits.Int(700, usedefault=True, desc=('b0 threshold')) - out_prefix = traits.Str(desc=('output prefix for file names')) + in_file = File(exists=True, mandatory=True, desc=("input diffusion data")) + in_bval = File(exists=True, mandatory=True, desc=("input b-values table")) + in_bvec = File(exists=True, mandatory=True, desc=("input b-vectors table")) + b0_thres = traits.Int(700, usedefault=True, desc=("b0 threshold")) + out_prefix = traits.Str(desc=("output prefix for file names")) class DipyDiffusionInterface(DipyBaseInterface): """ A base interface for py:mod:`dipy` computations """ + input_spec = DipyBaseInterfaceInputSpec def _get_gradient_table(self): bval = np.loadtxt(self.inputs.in_bval) bvec = np.loadtxt(self.inputs.in_bvec).T from dipy.core.gradients import gradient_table + gtab = gradient_table(bval, bvec) gtab.b0_threshold = self.inputs.b0_thres @@ -66,7 +75,7 @@ def _get_gradient_table(self): def _gen_filename(self, name, ext=None): fname, fext = op.splitext(op.basename(self.inputs.in_file)) - if fext == '.gz': + if fext == ".gz": fname, fext2 = op.splitext(fname) fext = fext2 + fext @@ -78,7 +87,7 @@ def _gen_filename(self, name, ext=None): if ext is None: ext = fext - return out_prefix + '_' + name + ext + return out_prefix + "_" + name + ext def convert_to_traits_type(dipy_type, is_file=False): @@ -108,8 +117,10 @@ def convert_to_traits_type(dipy_type, is_file=False): elif "complex" in dipy_type: return traits.Complex, is_mandatory else: - msg = "Error during convert_to_traits_type({0}).".format(dipy_type) + \ - "Unknown DIPY type." + msg = ( + "Error during convert_to_traits_type({0}).".format(dipy_type) + + "Unknown DIPY type." + ) raise IOError(msg) @@ -136,22 +147,21 @@ def create_interface_specs(class_name, params=None, BaseClass=TraitedSpec): for p in params: name, dipy_type, desc = p[0], p[1], p[2] is_file = bool("files" in name or "out_" in name) - traits_type, is_mandatory = convert_to_traits_type(dipy_type, - is_file) + traits_type, is_mandatory = convert_to_traits_type(dipy_type, is_file) # print(name, dipy_type, desc, is_file, traits_type, is_mandatory) if BaseClass.__name__ == BaseInterfaceInputSpec.__name__: if len(p) > 3: - attr[name] = traits_type(p[3], desc=desc[-1], - usedefault=True, - mandatory=is_mandatory) + attr[name] = traits_type( + p[3], desc=desc[-1], usedefault=True, mandatory=is_mandatory + ) else: - attr[name] = traits_type(desc=desc[-1], - mandatory=is_mandatory) + attr[name] = traits_type(desc=desc[-1], mandatory=is_mandatory) else: - attr[name] = traits_type(p[3], desc=desc[-1], exists=True, - usedefault=True,) + attr[name] = traits_type( + p[3], desc=desc[-1], exists=True, usedefault=True, + ) - newclass = type(str(class_name), (BaseClass, ), attr) + newclass = type(str(class_name), (BaseClass,), attr) return newclass @@ -180,19 +190,26 @@ def dipy_to_nipype_interface(cls_name, dipy_flow, BaseClass=DipyBaseInterface): flow = dipy_flow() parser.add_workflow(flow) default_values = inspect.getargspec(flow.run).defaults - optional_params = [args + (val,) for args, val in zip(parser.optional_parameters, default_values)] + optional_params = [ + args + (val,) for args, val in zip(parser.optional_parameters, default_values) + ] start = len(parser.optional_parameters) - len(parser.output_parameters) - output_parameters = [args + (val,) for args, val in zip(parser.output_parameters, default_values[start:])] + output_parameters = [ + args + (val,) + for args, val in zip(parser.output_parameters, default_values[start:]) + ] input_parameters = parser.positional_parameters + optional_params - input_spec = create_interface_specs("{}InputSpec".format(cls_name), - input_parameters, - BaseClass=BaseInterfaceInputSpec) + input_spec = create_interface_specs( + "{}InputSpec".format(cls_name), + input_parameters, + BaseClass=BaseInterfaceInputSpec, + ) - output_spec = create_interface_specs("{}OutputSpec".format(cls_name), - output_parameters, - BaseClass=TraitedSpec) + output_spec = create_interface_specs( + "{}OutputSpec".format(cls_name), output_parameters, BaseClass=TraitedSpec + ) def _run_interface(self, runtime): flow = dipy_flow() @@ -207,11 +224,16 @@ def _list_outputs(self): return outputs - newclass = type(str(cls_name), (BaseClass, ), - {"input_spec": input_spec, - "output_spec": output_spec, - "_run_interface": _run_interface, - "_list_outputs:": _list_outputs}) + newclass = type( + str(cls_name), + (BaseClass,), + { + "input_spec": input_spec, + "output_spec": output_spec, + "_run_interface": _run_interface, + "_list_outputs:": _list_outputs, + }, + ) return newclass @@ -235,7 +257,10 @@ def get_dipy_workflows(module): >>> get_dipy_workflows(align) # doctest: +SKIP """ - return [(m, obj) for m, obj in inspect.getmembers(module) - if inspect.isclass(obj) and - issubclass(obj, module.Workflow) and - m not in SKIP_WORKFLOWS_LIST] + return [ + (m, obj) + for m, obj in inspect.getmembers(module) + if inspect.isclass(obj) + and issubclass(obj, module.Workflow) + and m not in SKIP_WORKFLOWS_LIST + ] diff --git a/nipype/interfaces/dipy/preprocess.py b/nipype/interfaces/dipy/preprocess.py index 03686e0258..97d43e9220 100644 --- a/nipype/interfaces/dipy/preprocess.py +++ b/nipype/interfaces/dipy/preprocess.py @@ -8,44 +8,54 @@ from ...utils import NUMPY_MMAP from ... import logging -from ..base import (traits, TraitedSpec, File, isdefined) -from .base import (HAVE_DIPY, dipy_version, dipy_to_nipype_interface, - get_dipy_workflows, DipyBaseInterface) - -IFLOGGER = logging.getLogger('nipype.interface') - -if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): +from ..base import traits, TraitedSpec, File, isdefined +from .base import ( + HAVE_DIPY, + dipy_version, + dipy_to_nipype_interface, + get_dipy_workflows, + DipyBaseInterface, +) + +IFLOGGER = logging.getLogger("nipype.interface") + +if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion("0.15"): from dipy.workflows import denoise, mask l_wkflw = get_dipy_workflows(denoise) + get_dipy_workflows(mask) for name, obj in l_wkflw: - new_name = name.replace('Flow', '') + new_name = name.replace("Flow", "") globals()[new_name] = dipy_to_nipype_interface(new_name, obj) del l_wkflw else: - IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" - " open access to more function") + IFLOGGER.info( + "We advise you to upgrade DIPY version. This upgrade will" + " open access to more function" + ) class ResampleInputSpec(TraitedSpec): in_file = File( - exists=True, - mandatory=True, - desc='The input 4D diffusion-weighted image file') + exists=True, mandatory=True, desc="The input 4D diffusion-weighted image file" + ) vox_size = traits.Tuple( traits.Float, traits.Float, traits.Float, - desc=('specify the new voxel zooms. If no vox_size' - ' is set, then isotropic regridding will ' - 'be performed, with spacing equal to the ' - 'smallest current zoom.')) + desc=( + "specify the new voxel zooms. If no vox_size" + " is set, then isotropic regridding will " + "be performed, with spacing equal to the " + "smallest current zoom." + ), + ) interp = traits.Int( 1, mandatory=True, usedefault=True, - desc=('order of the interpolator (0 = nearest, 1 = linear, etc.')) + desc=("order of the interpolator (0 = nearest, 1 = linear, etc."), + ) class ResampleOutputSpec(TraitedSpec): @@ -66,6 +76,7 @@ class Resample(DipyBaseInterface): >>> reslice.inputs.in_file = 'diffusion.nii' >>> reslice.run() # doctest: +SKIP """ + input_spec = ResampleInputSpec output_spec = ResampleOutputSpec @@ -78,50 +89,47 @@ def _run_interface(self, runtime): out_file = op.abspath(self._gen_outfilename()) resample_proxy( - self.inputs.in_file, - order=order, - new_zooms=vox_size, - out_file=out_file) + self.inputs.in_file, order=order, new_zooms=vox_size, out_file=out_file + ) - IFLOGGER.info('Resliced image saved as %s', out_file) + IFLOGGER.info("Resliced image saved as %s", out_file) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = op.abspath(self._gen_outfilename()) + outputs["out_file"] = op.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): fname, fext = op.splitext(op.basename(self.inputs.in_file)) - if fext == '.gz': + if fext == ".gz": fname, fext2 = op.splitext(fname) fext = fext2 + fext - return op.abspath('%s_reslice%s' % (fname, fext)) + return op.abspath("%s_reslice%s" % (fname, fext)) class DenoiseInputSpec(TraitedSpec): in_file = File( - exists=True, - mandatory=True, - desc='The input 4D diffusion-weighted image file') - in_mask = File(exists=True, desc='brain mask') + exists=True, mandatory=True, desc="The input 4D diffusion-weighted image file" + ) + in_mask = File(exists=True, desc="brain mask") noise_model = traits.Enum( - 'rician', - 'gaussian', + "rician", + "gaussian", mandatory=True, usedefault=True, - desc=('noise distribution model')) + desc=("noise distribution model"), + ) signal_mask = File( - desc=('mask in which the mean signal ' - 'will be computed'), - exists=True) + desc=("mask in which the mean signal " "will be computed"), exists=True + ) noise_mask = File( - desc=('mask in which the standard deviation of noise ' - 'will be computed'), - exists=True) - patch_radius = traits.Int(1, usedefault=True, desc='patch radius') - block_radius = traits.Int(5, usedefault=True, desc='block_radius') - snr = traits.Float(desc='manually set an SNR') + desc=("mask in which the standard deviation of noise " "will be computed"), + exists=True, + ) + patch_radius = traits.Int(1, usedefault=True, desc="patch radius") + block_radius = traits.Int(5, usedefault=True, desc="block_radius") + snr = traits.Float(desc="manually set an SNR") class DenoiseOutputSpec(TraitedSpec): @@ -148,23 +156,23 @@ class Denoise(DipyBaseInterface): >>> denoise.inputs.in_file = 'diffusion.nii' >>> denoise.run() # doctest: +SKIP """ + input_spec = DenoiseInputSpec output_spec = DenoiseOutputSpec def _run_interface(self, runtime): out_file = op.abspath(self._gen_outfilename()) - settings = dict( - mask=None, rician=(self.inputs.noise_model == 'rician')) + settings = dict(mask=None, rician=(self.inputs.noise_model == "rician")) if isdefined(self.inputs.in_mask): - settings['mask'] = nb.load(self.inputs.in_mask).get_data() + settings["mask"] = nb.load(self.inputs.in_mask).get_data() if isdefined(self.inputs.patch_radius): - settings['patch_radius'] = self.inputs.patch_radius + settings["patch_radius"] = self.inputs.patch_radius if isdefined(self.inputs.block_radius): - settings['block_radius'] = self.inputs.block_radius + settings["block_radius"] = self.inputs.block_radius snr = None if isdefined(self.inputs.snr): @@ -183,22 +191,22 @@ def _run_interface(self, runtime): snr=snr, smask=signal_mask, nmask=noise_mask, - out_file=out_file) - IFLOGGER.info('Denoised image saved as %s, estimated SNR=%s', out_file, - str(s)) + out_file=out_file, + ) + IFLOGGER.info("Denoised image saved as %s, estimated SNR=%s", out_file, str(s)) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = op.abspath(self._gen_outfilename()) + outputs["out_file"] = op.abspath(self._gen_outfilename()) return outputs def _gen_outfilename(self): fname, fext = op.splitext(op.basename(self.inputs.in_file)) - if fext == '.gz': + if fext == ".gz": fname, fext2 = op.splitext(fname) fext = fext2 + fext - return op.abspath('%s_denoise%s' % (fname, fext)) + return op.abspath("%s_denoise%s" % (fname, fext)) def resample_proxy(in_file, order=3, new_zooms=None, out_file=None): @@ -209,10 +217,10 @@ def resample_proxy(in_file, order=3, new_zooms=None, out_file=None): if out_file is None: fname, fext = op.splitext(op.basename(in_file)) - if fext == '.gz': + if fext == ".gz": fname, fext2 = op.splitext(fname) fext = fext2 + fext - out_file = op.abspath('./%s_reslice%s' % (fname, fext)) + out_file = op.abspath("./%s_reslice%s" % (fname, fext)) img = nb.load(in_file, mmap=NUMPY_MMAP) hdr = img.header.copy() @@ -222,7 +230,7 @@ def resample_proxy(in_file, order=3, new_zooms=None, out_file=None): if new_zooms is None: minzoom = np.array(im_zooms).min() - new_zooms = tuple(np.ones((3, )) * minzoom) + new_zooms = tuple(np.ones((3,)) * minzoom) if np.all(im_zooms == new_zooms): return in_file @@ -232,18 +240,14 @@ def resample_proxy(in_file, order=3, new_zooms=None, out_file=None): tmp_zooms[:3] = new_zooms[0] hdr.set_zooms(tuple(tmp_zooms)) hdr.set_data_shape(data2.shape) - hdr.set_xyzt_units('mm') - nb.Nifti1Image(data2.astype(hdr.get_data_dtype()), affine2, - hdr).to_filename(out_file) + hdr.set_xyzt_units("mm") + nb.Nifti1Image(data2.astype(hdr.get_data_dtype()), affine2, hdr).to_filename( + out_file + ) return out_file, new_zooms -def nlmeans_proxy(in_file, - settings, - snr=None, - smask=None, - nmask=None, - out_file=None): +def nlmeans_proxy(in_file, settings, snr=None, smask=None, nmask=None, out_file=None): """ Uses non-local means to denoise 4D datasets """ @@ -253,10 +257,10 @@ def nlmeans_proxy(in_file, if out_file is None: fname, fext = op.splitext(op.basename(in_file)) - if fext == '.gz': + if fext == ".gz": fname, fext2 = op.splitext(fname) fext = fext2 + fext - out_file = op.abspath('./%s_denoise%s' % (fname, fext)) + out_file = op.abspath("./%s_denoise%s" % (fname, fext)) img = nb.load(in_file, mmap=NUMPY_MMAP) hdr = img.header @@ -269,25 +273,24 @@ def nlmeans_proxy(in_file, data = np.nan_to_num(data) if data.max() < 1.0e-4: - raise RuntimeError('There is no signal in the image') + raise RuntimeError("There is no signal in the image") df = 1.0 if data.max() < 1000.0: - df = 1000. / data.max() + df = 1000.0 / data.max() data *= df b0 = data[..., 0] if smask is None: smask = np.zeros_like(b0) - smask[b0 > np.percentile(b0, 85.)] = 1 + smask[b0 > np.percentile(b0, 85.0)] = 1 - smask = binary_erosion( - smask.astype(np.uint8), iterations=2).astype(np.uint8) + smask = binary_erosion(smask.astype(np.uint8), iterations=2).astype(np.uint8) if nmask is None: nmask = np.ones_like(b0, dtype=np.uint8) - bmask = settings['mask'] + bmask = settings["mask"] if bmask is None: bmask = np.zeros_like(b0) bmask[b0 > np.percentile(b0[b0 > 0], 10)] = 1 @@ -326,6 +329,5 @@ def nlmeans_proxy(in_file, den = np.squeeze(den) den /= df - nb.Nifti1Image(den.astype(hdr.get_data_dtype()), aff, - hdr).to_filename(out_file) + nb.Nifti1Image(den.astype(hdr.get_data_dtype()), aff, hdr).to_filename(out_file) return out_file, snr diff --git a/nipype/interfaces/dipy/reconstruction.py b/nipype/interfaces/dipy/reconstruction.py index 9351d1e400..ae60aab143 100644 --- a/nipype/interfaces/dipy/reconstruction.py +++ b/nipype/interfaces/dipy/reconstruction.py @@ -11,46 +11,49 @@ from ... import logging from ..base import TraitedSpec, File, traits, isdefined -from .base import (DipyDiffusionInterface, DipyBaseInterfaceInputSpec, - HAVE_DIPY, dipy_version, dipy_to_nipype_interface, - get_dipy_workflows) +from .base import ( + DipyDiffusionInterface, + DipyBaseInterfaceInputSpec, + HAVE_DIPY, + dipy_version, + dipy_to_nipype_interface, + get_dipy_workflows, +) -IFLOGGER = logging.getLogger('nipype.interface') +IFLOGGER = logging.getLogger("nipype.interface") -if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): +if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion("0.15"): from dipy.workflows import reconst l_wkflw = get_dipy_workflows(reconst) for name, obj in l_wkflw: - new_name = name.replace('Flow', '') + new_name = name.replace("Flow", "") globals()[new_name] = dipy_to_nipype_interface(new_name, obj) del l_wkflw else: - IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" - " open access to more models") + IFLOGGER.info( + "We advise you to upgrade DIPY version. This upgrade will" + " open access to more models" + ) class RESTOREInputSpec(DipyBaseInterfaceInputSpec): - in_mask = File(exists=True, desc=('input mask in which compute tensors')) - noise_mask = File( - exists=True, desc=('input mask in which compute noise variance')) + in_mask = File(exists=True, desc=("input mask in which compute tensors")) + noise_mask = File(exists=True, desc=("input mask in which compute noise variance")) class RESTOREOutputSpec(TraitedSpec): - fa = File(desc='output fractional anisotropy (FA) map computed from ' - 'the fitted DTI') - md = File(desc='output mean diffusivity (MD) map computed from the ' - 'fitted DTI') - rd = File(desc='output radial diffusivity (RD) map computed from ' - 'the fitted DTI') - mode = File(desc=('output mode (MO) map computed from the fitted DTI')) - trace = File( - desc=('output the tensor trace map computed from the ' - 'fitted DTI')) - evals = File(desc=('output the eigenvalues of the fitted DTI')) - evecs = File(desc=('output the eigenvectors of the fitted DTI')) + fa = File( + desc="output fractional anisotropy (FA) map computed from " "the fitted DTI" + ) + md = File(desc="output mean diffusivity (MD) map computed from the " "fitted DTI") + rd = File(desc="output radial diffusivity (RD) map computed from " "the fitted DTI") + mode = File(desc=("output mode (MO) map computed from the fitted DTI")) + trace = File(desc=("output the tensor trace map computed from the " "fitted DTI")) + evals = File(desc=("output the eigenvalues of the fitted DTI")) + evecs = File(desc=("output the eigenvectors of the fitted DTI")) class RESTORE(DipyDiffusionInterface): @@ -77,6 +80,7 @@ class RESTORE(DipyDiffusionInterface): """ + input_spec = RESTOREInputSpec output_spec = RESTOREOutputSpec @@ -104,7 +108,7 @@ def _run_interface(self, runtime): noise_msk = noise_msk.astype(np.uint8) try_b0 = False elif np.all(data[msk == 0, 0] == 0): - IFLOGGER.info('Input data are masked.') + IFLOGGER.info("Input data are masked.") noise_msk = msk.reshape(-1).astype(np.uint8) else: noise_msk = (1 - msk).reshape(-1).astype(np.uint8) @@ -113,8 +117,9 @@ def _run_interface(self, runtime): dsample = data.reshape(-1, data.shape[-1]) if try_b0 and (nb0 > 1): - noise_data = dsample.take( - np.where(gtab.b0s_mask), axis=-1)[noise_msk == 0, ...] + noise_data = dsample.take(np.where(gtab.b0s_mask), axis=-1)[ + noise_msk == 0, ... + ] n = nb0 else: nodiff = np.where(~gtab.b0s_mask) @@ -126,22 +131,25 @@ def _run_interface(self, runtime): # Estimate sigma required by RESTORE mean_std = np.median(noise_data.std(-1)) try: - bias = (1. - np.sqrt(2. / (n - 1)) * (gamma(n / 2.) / gamma( - (n - 1) / 2.))) + bias = 1.0 - np.sqrt(2.0 / (n - 1)) * ( + gamma(n / 2.0) / gamma((n - 1) / 2.0) + ) except: - bias = .0 + bias = 0.0 pass sigma = mean_std * (1 + bias) if sigma == 0: - IFLOGGER.warning('Noise std is 0.0, looks like data was masked and ' - 'noise cannot be estimated correctly. Using default ' - 'tensor model instead of RESTORE.') + IFLOGGER.warning( + "Noise std is 0.0, looks like data was masked and " + "noise cannot be estimated correctly. Using default " + "tensor model instead of RESTORE." + ) dti = TensorModel(gtab) else: - IFLOGGER.info('Performing RESTORE with noise std=%.4f.', sigma) - dti = TensorModel(gtab, fit_method='RESTORE', sigma=sigma) + IFLOGGER.info("Performing RESTORE with noise std=%.4f.", sigma) + dti = TensorModel(gtab, fit_method="RESTORE", sigma=sigma) try: fit_restore = dti.fit(data, msk) @@ -150,13 +158,14 @@ def _run_interface(self, runtime): fit_restore = dti.fit(data, msk) hdr.set_data_dtype(np.float32) - hdr['data_type'] = 16 + hdr["data_type"] = 16 for k in self._outputs().get(): scalar = getattr(fit_restore, k) hdr.set_data_shape(np.shape(scalar)) nb.Nifti1Image(scalar.astype(np.float32), affine, hdr).to_filename( - self._gen_filename(k)) + self._gen_filename(k) + ) return runtime @@ -168,25 +177,25 @@ def _list_outputs(self): class EstimateResponseSHInputSpec(DipyBaseInterfaceInputSpec): - in_evals = File( - exists=True, mandatory=True, desc=('input eigenvalues file')) - in_mask = File( - exists=True, desc=('input mask in which we find single fibers')) - fa_thresh = traits.Float(0.7, usedefault=True, desc=('FA threshold')) + in_evals = File(exists=True, mandatory=True, desc=("input eigenvalues file")) + in_mask = File(exists=True, desc=("input mask in which we find single fibers")) + fa_thresh = traits.Float(0.7, usedefault=True, desc=("FA threshold")) roi_radius = traits.Int( - 10, usedefault=True, desc=('ROI radius to be used in auto_response')) + 10, usedefault=True, desc=("ROI radius to be used in auto_response") + ) auto = traits.Bool( - xor=['recursive'], desc='use the auto_response estimator from dipy') + xor=["recursive"], desc="use the auto_response estimator from dipy" + ) recursive = traits.Bool( - xor=['auto'], desc='use the recursive response estimator from dipy') - response = File( - 'response.txt', usedefault=True, desc=('the output response file')) - out_mask = File('wm_mask.nii.gz', usedefault=True, desc='computed wm mask') + xor=["auto"], desc="use the recursive response estimator from dipy" + ) + response = File("response.txt", usedefault=True, desc=("the output response file")) + out_mask = File("wm_mask.nii.gz", usedefault=True, desc="computed wm mask") class EstimateResponseSHOutputSpec(TraitedSpec): - response = File(exists=True, desc=('the response file')) - out_mask = File(exists=True, desc=('output wm mask')) + response = File(exists=True, desc=("the response file")) + out_mask = File(exists=True, desc=("output wm mask")) class EstimateResponseSH(DipyDiffusionInterface): @@ -209,6 +218,7 @@ class EstimateResponseSH(DipyDiffusionInterface): """ + input_spec = EstimateResponseSHInputSpec output_spec = EstimateResponseSHOutputSpec @@ -242,12 +252,14 @@ def _run_interface(self, runtime): gtab, data, roi_radius=self.inputs.roi_radius, - fa_thr=self.inputs.fa_thresh) + fa_thr=self.inputs.fa_thresh, + ) response = response[0].tolist() + [S0] elif self.inputs.recursive: MD = np.nan_to_num(mean_diffusivity(evals)) * msk - indices = np.logical_or(FA >= 0.4, - (np.logical_and(FA >= 0.15, MD >= 0.0011))) + indices = np.logical_or( + FA >= 0.4, (np.logical_and(FA >= 0.15, MD >= 0.0011)) + ) data = nb.load(self.inputs.in_file).get_data() response = recursive_response( gtab, @@ -259,7 +271,8 @@ def _run_interface(self, runtime): init_trace=0.0021, iter=8, convergence=0.001, - parallel=True) + parallel=True, + ) ratio = abs(response[1] / response[0]) else: lambdas = evals[indices] @@ -269,42 +282,44 @@ def _run_interface(self, runtime): ratio = abs(response[1] / response[0]) if ratio > 0.25: - IFLOGGER.warning('Estimated response is not prolate enough. ' - 'Ratio=%0.3f.', ratio) - elif ratio < 1.e-5 or np.any(np.isnan(response)): - response = np.array([1.8e-3, 3.6e-4, 3.6e-4, S0]) IFLOGGER.warning( - 'Estimated response is not valid, using a default one') + "Estimated response is not prolate enough. " "Ratio=%0.3f.", ratio + ) + elif ratio < 1.0e-5 or np.any(np.isnan(response)): + response = np.array([1.8e-3, 3.6e-4, 3.6e-4, S0]) + IFLOGGER.warning("Estimated response is not valid, using a default one") else: - IFLOGGER.info('Estimated response: %s', str(response[:3])) + IFLOGGER.info("Estimated response: %s", str(response[:3])) np.savetxt(op.abspath(self.inputs.response), response) wm_mask = np.zeros_like(FA) wm_mask[indices] = 1 nb.Nifti1Image(wm_mask.astype(np.uint8), affine, None).to_filename( - op.abspath(self.inputs.out_mask)) + op.abspath(self.inputs.out_mask) + ) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['response'] = op.abspath(self.inputs.response) - outputs['out_mask'] = op.abspath(self.inputs.out_mask) + outputs["response"] = op.abspath(self.inputs.response) + outputs["out_mask"] = op.abspath(self.inputs.out_mask) return outputs class CSDInputSpec(DipyBaseInterfaceInputSpec): - in_mask = File(exists=True, desc=('input mask in which compute tensors')) - response = File(exists=True, desc=('single fiber estimated response')) + in_mask = File(exists=True, desc=("input mask in which compute tensors")) + response = File(exists=True, desc=("single fiber estimated response")) sh_order = traits.Int( - 8, usedefault=True, desc=('maximal shperical harmonics order')) - save_fods = traits.Bool(True, usedefault=True, desc=('save fODFs in file')) - out_fods = File(desc=('fODFs output file name')) + 8, usedefault=True, desc=("maximal shperical harmonics order") + ) + save_fods = traits.Bool(True, usedefault=True, desc=("save fODFs in file")) + out_fods = File(desc=("fODFs output file name")) class CSDOutputSpec(TraitedSpec): - model = File(desc='Python pickled object of the CSD model fitted.') - out_fods = File(desc=('fODFs output file name')) + model = File(desc="Python pickled object of the CSD model fitted.") + out_fods = File(desc=("fODFs output file name")) class CSD(DipyDiffusionInterface): @@ -328,12 +343,14 @@ class CSD(DipyDiffusionInterface): >>> csd.inputs.in_bvec = 'bvecs' >>> res = csd.run() # doctest: +SKIP """ + input_spec = CSDInputSpec output_spec = CSDOutputSpec def _run_interface(self, runtime): from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel from dipy.data import get_sphere + # import marshal as pickle import pickle as pickle import gzip @@ -355,30 +372,33 @@ def _run_interface(self, runtime): ratio = response[0][1] / response[0][0] if abs(ratio - 0.2) > 0.1: - IFLOGGER.warning('Estimated response is not prolate enough. ' - 'Ratio=%0.3f.', ratio) + IFLOGGER.warning( + "Estimated response is not prolate enough. " "Ratio=%0.3f.", ratio + ) csd_model = ConstrainedSphericalDeconvModel( - gtab, response, sh_order=self.inputs.sh_order) + gtab, response, sh_order=self.inputs.sh_order + ) - IFLOGGER.info('Fitting CSD model') + IFLOGGER.info("Fitting CSD model") csd_fit = csd_model.fit(data, msk) - f = gzip.open(self._gen_filename('csdmodel', ext='.pklz'), 'wb') + f = gzip.open(self._gen_filename("csdmodel", ext=".pklz"), "wb") pickle.dump(csd_model, f, -1) f.close() if self.inputs.save_fods: - sphere = get_sphere('symmetric724') + sphere = get_sphere("symmetric724") fods = csd_fit.odf(sphere) - nb.Nifti1Image(fods.astype(np.float32), img.affine, - None).to_filename(self._gen_filename('fods')) + nb.Nifti1Image(fods.astype(np.float32), img.affine, None).to_filename( + self._gen_filename("fods") + ) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['model'] = self._gen_filename('csdmodel', ext='.pklz') + outputs["model"] = self._gen_filename("csdmodel", ext=".pklz") if self.inputs.save_fods: - outputs['out_fods'] = self._gen_filename('fods') + outputs["out_fods"] = self._gen_filename("fods") return outputs diff --git a/nipype/interfaces/dipy/registration.py b/nipype/interfaces/dipy/registration.py index e2e5c1e7ec..f70c566194 100644 --- a/nipype/interfaces/dipy/registration.py +++ b/nipype/interfaces/dipy/registration.py @@ -1,21 +1,21 @@ - from distutils.version import LooseVersion from ... import logging -from .base import (HAVE_DIPY, dipy_version, dipy_to_nipype_interface, - get_dipy_workflows) +from .base import HAVE_DIPY, dipy_version, dipy_to_nipype_interface, get_dipy_workflows -IFLOGGER = logging.getLogger('nipype.interface') +IFLOGGER = logging.getLogger("nipype.interface") -if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): +if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion("0.15"): from dipy.workflows import align l_wkflw = get_dipy_workflows(align) for name, obj in l_wkflw: - new_name = name.replace('Flow', '') + new_name = name.replace("Flow", "") globals()[new_name] = dipy_to_nipype_interface(new_name, obj) del l_wkflw else: - IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" - " open access to more function") + IFLOGGER.info( + "We advise you to upgrade DIPY version. This upgrade will" + " open access to more function" + ) diff --git a/nipype/interfaces/dipy/setup.py b/nipype/interfaces/dipy/setup.py index 408d7af731..082d88f841 100644 --- a/nipype/interfaces/dipy/setup.py +++ b/nipype/interfaces/dipy/setup.py @@ -3,15 +3,16 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: -def configuration(parent_package='', top_path=None): +def configuration(parent_package="", top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('dipy', parent_package, top_path) + config = Configuration("dipy", parent_package, top_path) # config.add_data_dir('tests') return config -if __name__ == '__main__': +if __name__ == "__main__": from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) + + setup(**configuration(top_path="").todict()) diff --git a/nipype/interfaces/dipy/simulate.py b/nipype/interfaces/dipy/simulate.py index dfab8b2118..d9f0ed1023 100644 --- a/nipype/interfaces/dipy/simulate.py +++ b/nipype/interfaces/dipy/simulate.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from multiprocessing import (Pool, cpu_count) +from multiprocessing import Pool, cpu_count import os.path as op import numpy as np @@ -7,73 +7,84 @@ from ... import logging from ...utils import NUMPY_MMAP -from ..base import (traits, TraitedSpec, BaseInterfaceInputSpec, File, - InputMultiPath, isdefined) +from ..base import ( + traits, + TraitedSpec, + BaseInterfaceInputSpec, + File, + InputMultiPath, + isdefined, +) from .base import DipyBaseInterface -IFLOGGER = logging.getLogger('nipype.interface') + +IFLOGGER = logging.getLogger("nipype.interface") class SimulateMultiTensorInputSpec(BaseInterfaceInputSpec): in_dirs = InputMultiPath( - File(exists=True), - mandatory=True, - desc='list of fibers (principal directions)') + File(exists=True), mandatory=True, desc="list of fibers (principal directions)" + ) in_frac = InputMultiPath( - File(exists=True), - mandatory=True, - desc=('volume fraction of each fiber')) + File(exists=True), mandatory=True, desc=("volume fraction of each fiber") + ) in_vfms = InputMultiPath( File(exists=True), mandatory=True, - desc=('volume fractions of isotropic ' - 'compartiments')) - in_mask = File(exists=True, desc='mask to simulate data') + desc=("volume fractions of isotropic " "compartiments"), + ) + in_mask = File(exists=True, desc="mask to simulate data") diff_iso = traits.List( [3000e-6, 960e-6, 680e-6], traits.Float, usedefault=True, - desc='Diffusivity of isotropic compartments') + desc="Diffusivity of isotropic compartments", + ) diff_sf = traits.Tuple( (1700e-6, 200e-6, 200e-6), traits.Float, traits.Float, traits.Float, usedefault=True, - desc='Single fiber tensor') - - n_proc = traits.Int(0, usedefault=True, desc='number of processes') - baseline = File(exists=True, mandatory=True, desc='baseline T2 signal') - gradients = File(exists=True, desc='gradients file') - in_bvec = File(exists=True, desc='input bvecs file') - in_bval = File(exists=True, desc='input bvals file') + desc="Single fiber tensor", + ) + + n_proc = traits.Int(0, usedefault=True, desc="number of processes") + baseline = File(exists=True, mandatory=True, desc="baseline T2 signal") + gradients = File(exists=True, desc="gradients file") + in_bvec = File(exists=True, desc="input bvecs file") + in_bval = File(exists=True, desc="input bvals file") num_dirs = traits.Int( 32, usedefault=True, - desc=('number of gradient directions (when table ' - 'is automatically generated)')) + desc=( + "number of gradient directions (when table " "is automatically generated)" + ), + ) bvalues = traits.List( traits.Int, value=[1000, 3000], usedefault=True, - desc=('list of b-values (when table ' - 'is automatically generated)')) + desc=("list of b-values (when table " "is automatically generated)"), + ) out_file = File( - 'sim_dwi.nii.gz', + "sim_dwi.nii.gz", usedefault=True, - desc='output file with fractions to be simluated') + desc="output file with fractions to be simluated", + ) out_mask = File( - 'sim_msk.nii.gz', usedefault=True, desc='file with the mask simulated') - out_bvec = File('bvec.sim', usedefault=True, desc='simulated b vectors') - out_bval = File('bval.sim', usedefault=True, desc='simulated b values') - snr = traits.Int(0, usedefault=True, desc='signal-to-noise ratio (dB)') + "sim_msk.nii.gz", usedefault=True, desc="file with the mask simulated" + ) + out_bvec = File("bvec.sim", usedefault=True, desc="simulated b vectors") + out_bval = File("bval.sim", usedefault=True, desc="simulated b values") + snr = traits.Int(0, usedefault=True, desc="signal-to-noise ratio (dB)") class SimulateMultiTensorOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='simulated DWIs') - out_mask = File(exists=True, desc='mask file') - out_bvec = File(exists=True, desc='simulated b vectors') - out_bval = File(exists=True, desc='simulated b values') + out_file = File(exists=True, desc="simulated DWIs") + out_mask = File(exists=True, desc="mask file") + out_bvec = File(exists=True, desc="simulated b vectors") + out_bval = File(exists=True, desc="simulated b values") class SimulateMultiTensor(DipyBaseInterface): @@ -95,6 +106,7 @@ class SimulateMultiTensor(DipyBaseInterface): >>> sim.inputs.in_bval = 'bvals' >>> sim.run() # doctest: +SKIP """ + input_spec = SimulateMultiTensorInputSpec output_spec = SimulateMultiTensorOutputSpec @@ -108,8 +120,7 @@ def _run_interface(self, runtime): bvecs = np.loadtxt(self.inputs.in_bvec).T gtab = gradient_table(bvals, bvecs) else: - gtab = _generate_gradients(self.inputs.num_dirs, - self.inputs.bvalues) + gtab = _generate_gradients(self.inputs.num_dirs, self.inputs.bvalues) ndirs = len(gtab.bvals) np.savetxt(op.abspath(self.inputs.out_bvec), gtab.bvecs.T) np.savetxt(op.abspath(self.inputs.out_bval), gtab.bvals) @@ -123,15 +134,17 @@ def _run_interface(self, runtime): # Check and load sticks and their volume fractions nsticks = len(self.inputs.in_dirs) if len(self.inputs.in_frac) != nsticks: - raise RuntimeError(('Number of sticks and their volume fractions' - ' must match.')) + raise RuntimeError( + ("Number of sticks and their volume fractions" " must match.") + ) # Volume fractions of isotropic compartments nballs = len(self.inputs.in_vfms) vfs = np.squeeze( - nb.concat_images([ - nb.load(f, mmap=NUMPY_MMAP) for f in self.inputs.in_vfms - ]).get_data()) + nb.concat_images( + [nb.load(f, mmap=NUMPY_MMAP) for f in self.inputs.in_vfms] + ).get_data() + ) if nballs == 1: vfs = vfs[..., np.newaxis] total_vf = np.sum(vfs, axis=3) @@ -150,9 +163,10 @@ def _run_interface(self, runtime): # Fiber fractions ffsim = nb.concat_images( - [nb.load(f, mmap=NUMPY_MMAP) for f in self.inputs.in_frac]) + [nb.load(f, mmap=NUMPY_MMAP) for f in self.inputs.in_frac] + ) ffs = np.nan_to_num(np.squeeze(ffsim.get_data())) # fiber fractions - ffs = np.clip(ffs, 0., 1.) + ffs = np.clip(ffs, 0.0, 1.0) if nsticks == 1: ffs = ffs[..., np.newaxis] @@ -172,19 +186,19 @@ def _run_interface(self, runtime): for i in range(vfs.shape[-1]): vfs[..., i] -= total_ff - vfs = np.clip(vfs, 0., 1.) + vfs = np.clip(vfs, 0.0, 1.0) fractions = np.concatenate((ffs, vfs), axis=3) - nb.Nifti1Image(fractions, aff, None).to_filename('fractions.nii.gz') - nb.Nifti1Image(np.sum(fractions, axis=3), aff, - None).to_filename('total_vf.nii.gz') + nb.Nifti1Image(fractions, aff, None).to_filename("fractions.nii.gz") + nb.Nifti1Image(np.sum(fractions, axis=3), aff, None).to_filename( + "total_vf.nii.gz" + ) mhdr = hdr.copy() mhdr.set_data_dtype(np.uint8) - mhdr.set_xyzt_units('mm', 'sec') - nb.Nifti1Image(msk, aff, mhdr).to_filename( - op.abspath(self.inputs.out_mask)) + mhdr.set_xyzt_units("mm", "sec") + nb.Nifti1Image(msk, aff, mhdr).to_filename(op.abspath(self.inputs.out_mask)) # Initialize stack of args fracs = fractions[msk > 0] @@ -206,7 +220,7 @@ def _run_interface(self, runtime): for d in range(nballs): fd = np.random.randn(nvox, 3) w = np.linalg.norm(fd, axis=1) - fd[w < np.finfo(float).eps, ...] = np.array([1., 0., 0.]) + fd[w < np.finfo(float).eps, ...] = np.array([1.0, 0.0, 0.0]) w[w < np.finfo(float).eps] = 1.0 fd /= w[..., np.newaxis] dirs = np.hstack((dirs, fd)) @@ -214,26 +228,23 @@ def _run_interface(self, runtime): sf_evals = list(self.inputs.diff_sf) ba_evals = list(self.inputs.diff_iso) - mevals = [sf_evals] * nsticks + \ - [[ba_evals[d]] * 3 for d in range(nballs)] + mevals = [sf_evals] * nsticks + [[ba_evals[d]] * 3 for d in range(nballs)] b0 = b0_im.get_data()[msk > 0] args = [] for i in range(nvox): - args.append({ - 'fractions': - fracs[i, ...].tolist(), - 'sticks': - [tuple(dirs[i, j:j + 3]) for j in range(nsticks + nballs)], - 'gradients': - gtab, - 'mevals': - mevals, - 'S0': - b0[i], - 'snr': - self.inputs.snr - }) + args.append( + { + "fractions": fracs[i, ...].tolist(), + "sticks": [ + tuple(dirs[i, j : j + 3]) for j in range(nsticks + nballs) + ], + "gradients": gtab, + "mevals": mevals, + "S0": b0[i], + "snr": self.inputs.snr, + } + ) n_proc = self.inputs.n_proc if n_proc == 0: @@ -246,30 +257,34 @@ def _run_interface(self, runtime): # Simulate sticks using dipy IFLOGGER.info( - 'Starting simulation of %d voxels, %d diffusion directions.', - len(args), ndirs) + "Starting simulation of %d voxels, %d diffusion directions.", + len(args), + ndirs, + ) result = np.array(pool.map(_compute_voxel, args)) if np.shape(result)[1] != ndirs: - raise RuntimeError(('Computed directions do not match number' - 'of b-values.')) + raise RuntimeError( + ("Computed directions do not match number" "of b-values.") + ) signal = np.zeros((shape[0], shape[1], shape[2], ndirs)) signal[msk > 0] = result simhdr = hdr.copy() simhdr.set_data_dtype(np.float32) - simhdr.set_xyzt_units('mm', 'sec') + simhdr.set_xyzt_units("mm", "sec") nb.Nifti1Image(signal.astype(np.float32), aff, simhdr).to_filename( - op.abspath(self.inputs.out_file)) + op.abspath(self.inputs.out_file) + ) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) - outputs['out_mask'] = op.abspath(self.inputs.out_mask) - outputs['out_bvec'] = op.abspath(self.inputs.out_bvec) - outputs['out_bval'] = op.abspath(self.inputs.out_bval) + outputs["out_file"] = op.abspath(self.inputs.out_file) + outputs["out_mask"] = op.abspath(self.inputs.out_mask) + outputs["out_bvec"] = op.abspath(self.inputs.out_bvec) + outputs["out_bval"] = op.abspath(self.inputs.out_bval) return outputs @@ -291,24 +306,25 @@ def _compute_voxel(args): """ from dipy.sims.voxel import multi_tensor - ffs = args['fractions'] - gtab = args['gradients'] + ffs = args["fractions"] + gtab = args["gradients"] signal = np.zeros_like(gtab.bvals, dtype=np.float32) # Simulate dwi signal sf_vf = np.sum(ffs) if sf_vf > 0.0: - ffs = ((np.array(ffs) / sf_vf) * 100) - snr = args['snr'] if args['snr'] > 0 else None + ffs = (np.array(ffs) / sf_vf) * 100 + snr = args["snr"] if args["snr"] > 0 else None try: signal, _ = multi_tensor( gtab, - args['mevals'], - S0=args['S0'], - angles=args['sticks'], + args["mevals"], + S0=args["S0"], + angles=args["sticks"], fractions=ffs, - snr=snr) + snr=snr, + ) except Exception: pass @@ -322,7 +338,7 @@ def _generate_gradients(ndirs=64, values=[1000, 3000], nb0s=1): """ import numpy as np - from dipy.core.sphere import (disperse_charges, Sphere, HemiSphere) + from dipy.core.sphere import disperse_charges, Sphere, HemiSphere from dipy.core.gradients import gradient_table theta = np.pi * np.random.rand(ndirs) diff --git a/nipype/interfaces/dipy/stats.py b/nipype/interfaces/dipy/stats.py index 8f55b3322a..fff0184a56 100644 --- a/nipype/interfaces/dipy/stats.py +++ b/nipype/interfaces/dipy/stats.py @@ -1,20 +1,20 @@ - from distutils.version import LooseVersion from ... import logging -from .base import (HAVE_DIPY, dipy_version, dipy_to_nipype_interface, - get_dipy_workflows) +from .base import HAVE_DIPY, dipy_version, dipy_to_nipype_interface, get_dipy_workflows -IFLOGGER = logging.getLogger('nipype.interface') +IFLOGGER = logging.getLogger("nipype.interface") -if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.16'): +if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion("0.16"): from dipy.workflows import stats l_wkflw = get_dipy_workflows(stats) for name, obj in l_wkflw: - new_name = name.replace('Flow', '') + new_name = name.replace("Flow", "") globals()[new_name] = dipy_to_nipype_interface(new_name, obj) del l_wkflw else: - IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" - " open access to more function") + IFLOGGER.info( + "We advise you to upgrade DIPY version. This upgrade will" + " open access to more function" + ) diff --git a/nipype/interfaces/dipy/tensors.py b/nipype/interfaces/dipy/tensors.py index 3dc06c0356..2f9ad95f5b 100644 --- a/nipype/interfaces/dipy/tensors.py +++ b/nipype/interfaces/dipy/tensors.py @@ -6,11 +6,11 @@ from ..base import TraitedSpec, File, isdefined from .base import DipyDiffusionInterface, DipyBaseInterfaceInputSpec -IFLOGGER = logging.getLogger('nipype.interface') +IFLOGGER = logging.getLogger("nipype.interface") class DTIInputSpec(DipyBaseInterfaceInputSpec): - mask_file = File(exists=True, desc='An optional white matter mask') + mask_file = File(exists=True, desc="An optional white matter mask") class DTIOutputSpec(TraitedSpec): @@ -36,12 +36,14 @@ class DTI(DipyDiffusionInterface): >>> dti.inputs.in_bval = 'bvals' >>> dti.run() # doctest: +SKIP """ + input_spec = DTIInputSpec output_spec = DTIOutputSpec def _run_interface(self, runtime): from dipy.reconst import dti from dipy.io.utils import nifti1_symmat + gtab = self._get_gradient_table() img = nb.load(self.inputs.in_file) @@ -56,22 +58,22 @@ def _run_interface(self, runtime): ten_fit = tenmodel.fit(data, mask) lower_triangular = ten_fit.lower_triangular() img = nifti1_symmat(lower_triangular, affine) - out_file = self._gen_filename('dti') + out_file = self._gen_filename("dti") nb.save(img, out_file) - IFLOGGER.info('DTI parameters image saved as %s', out_file) + IFLOGGER.info("DTI parameters image saved as %s", out_file) # FA MD RD and AD for metric in ["fa", "md", "rd", "ad", "color_fa"]: data = getattr(ten_fit, metric).astype("float32") out_name = self._gen_filename(metric) nb.Nifti1Image(data, affine).to_filename(out_name) - IFLOGGER.info('DTI %s image saved as %s', metric, out_name) + IFLOGGER.info("DTI %s image saved as %s", metric, out_name) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = self._gen_filename('dti') + outputs["out_file"] = self._gen_filename("dti") for metric in ["fa", "md", "rd", "ad", "color_fa"]: outputs["{}_file".format(metric)] = self._gen_filename(metric) @@ -80,7 +82,7 @@ def _list_outputs(self): class TensorModeInputSpec(DipyBaseInterfaceInputSpec): - mask_file = File(exists=True, desc='An optional white matter mask') + mask_file = File(exists=True, desc="An optional white matter mask") class TensorModeOutputSpec(TraitedSpec): @@ -109,6 +111,7 @@ class TensorMode(DipyDiffusionInterface): >>> mode.inputs.in_bval = 'bvals' >>> mode.run() # doctest: +SKIP """ + input_spec = TensorModeInputSpec output_spec = TensorModeOutputSpec @@ -136,12 +139,12 @@ def _run_interface(self, runtime): # Write as a 3D Nifti image with the original affine img = nb.Nifti1Image(mode_data, affine) - out_file = self._gen_filename('mode') + out_file = self._gen_filename("mode") nb.save(img, out_file) - IFLOGGER.info('Tensor mode image saved as %s', out_file) + IFLOGGER.info("Tensor mode image saved as %s", out_file) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = self._gen_filename('mode') + outputs["out_file"] = self._gen_filename("mode") return outputs diff --git a/nipype/interfaces/dipy/tests/test_auto_APMQball.py b/nipype/interfaces/dipy/tests/test_auto_APMQball.py index 35a73b8d87..fcc97ebf70 100644 --- a/nipype/interfaces/dipy/tests/test_auto_APMQball.py +++ b/nipype/interfaces/dipy/tests/test_auto_APMQball.py @@ -4,20 +4,11 @@ def test_APMQball_inputs(): input_map = dict( - b0_thres=dict(usedefault=True, ), - in_bval=dict( - extensions=None, - mandatory=True, - ), - in_bvec=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - mask_file=dict(extensions=None, ), + b0_thres=dict(usedefault=True,), + in_bval=dict(extensions=None, mandatory=True,), + in_bvec=dict(extensions=None, mandatory=True,), + in_file=dict(extensions=None, mandatory=True,), + mask_file=dict(extensions=None,), out_prefix=dict(), ) inputs = APMQball.input_spec() @@ -25,8 +16,10 @@ def test_APMQball_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_APMQball_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = APMQball.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_CSD.py b/nipype/interfaces/dipy/tests/test_auto_CSD.py index dde8f52295..a9c92b02b1 100644 --- a/nipype/interfaces/dipy/tests/test_auto_CSD.py +++ b/nipype/interfaces/dipy/tests/test_auto_CSD.py @@ -4,36 +4,26 @@ def test_CSD_inputs(): input_map = dict( - b0_thres=dict(usedefault=True, ), - in_bval=dict( - extensions=None, - mandatory=True, - ), - in_bvec=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - in_mask=dict(extensions=None, ), - out_fods=dict(extensions=None, ), + b0_thres=dict(usedefault=True,), + in_bval=dict(extensions=None, mandatory=True,), + in_bvec=dict(extensions=None, mandatory=True,), + in_file=dict(extensions=None, mandatory=True,), + in_mask=dict(extensions=None,), + out_fods=dict(extensions=None,), out_prefix=dict(), - response=dict(extensions=None, ), - save_fods=dict(usedefault=True, ), - sh_order=dict(usedefault=True, ), + response=dict(extensions=None,), + save_fods=dict(usedefault=True,), + sh_order=dict(usedefault=True,), ) inputs = CSD.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CSD_outputs(): - output_map = dict( - model=dict(extensions=None, ), - out_fods=dict(extensions=None, ), - ) + output_map = dict(model=dict(extensions=None,), out_fods=dict(extensions=None,),) outputs = CSD.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_DTI.py b/nipype/interfaces/dipy/tests/test_auto_DTI.py index be2b3375af..8e2482b129 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DTI.py +++ b/nipype/interfaces/dipy/tests/test_auto_DTI.py @@ -4,20 +4,11 @@ def test_DTI_inputs(): input_map = dict( - b0_thres=dict(usedefault=True, ), - in_bval=dict( - extensions=None, - mandatory=True, - ), - in_bvec=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - mask_file=dict(extensions=None, ), + b0_thres=dict(usedefault=True,), + in_bval=dict(extensions=None, mandatory=True,), + in_bvec=dict(extensions=None, mandatory=True,), + in_file=dict(extensions=None, mandatory=True,), + mask_file=dict(extensions=None,), out_prefix=dict(), ) inputs = DTI.input_spec() @@ -25,14 +16,16 @@ def test_DTI_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DTI_outputs(): output_map = dict( - ad_file=dict(extensions=None, ), - color_fa_file=dict(extensions=None, ), - fa_file=dict(extensions=None, ), - md_file=dict(extensions=None, ), - out_file=dict(extensions=None, ), - rd_file=dict(extensions=None, ), + ad_file=dict(extensions=None,), + color_fa_file=dict(extensions=None,), + fa_file=dict(extensions=None,), + md_file=dict(extensions=None,), + out_file=dict(extensions=None,), + rd_file=dict(extensions=None,), ) outputs = DTI.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_Denoise.py b/nipype/interfaces/dipy/tests/test_auto_Denoise.py index c76fc9b18a..453e794f39 100644 --- a/nipype/interfaces/dipy/tests/test_auto_Denoise.py +++ b/nipype/interfaces/dipy/tests/test_auto_Denoise.py @@ -4,19 +4,13 @@ def test_Denoise_inputs(): input_map = dict( - block_radius=dict(usedefault=True, ), - in_file=dict( - extensions=None, - mandatory=True, - ), - in_mask=dict(extensions=None, ), - noise_mask=dict(extensions=None, ), - noise_model=dict( - mandatory=True, - usedefault=True, - ), - patch_radius=dict(usedefault=True, ), - signal_mask=dict(extensions=None, ), + block_radius=dict(usedefault=True,), + in_file=dict(extensions=None, mandatory=True,), + in_mask=dict(extensions=None,), + noise_mask=dict(extensions=None,), + noise_model=dict(mandatory=True, usedefault=True,), + patch_radius=dict(usedefault=True,), + signal_mask=dict(extensions=None,), snr=dict(), ) inputs = Denoise.input_spec() @@ -24,8 +18,10 @@ def test_Denoise_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Denoise_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Denoise.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py index 48940061f8..657128b050 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py +++ b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py @@ -4,19 +4,10 @@ def test_DipyDiffusionInterface_inputs(): input_map = dict( - b0_thres=dict(usedefault=True, ), - in_bval=dict( - extensions=None, - mandatory=True, - ), - in_bvec=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), + b0_thres=dict(usedefault=True,), + in_bval=dict(extensions=None, mandatory=True,), + in_bvec=dict(extensions=None, mandatory=True,), + in_file=dict(extensions=None, mandatory=True,), out_prefix=dict(), ) inputs = DipyDiffusionInterface.input_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py index a3ca7819a9..9b9cf49d6f 100644 --- a/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py +++ b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py @@ -4,48 +4,29 @@ def test_EstimateResponseSH_inputs(): input_map = dict( - auto=dict(xor=['recursive'], ), - b0_thres=dict(usedefault=True, ), - fa_thresh=dict(usedefault=True, ), - in_bval=dict( - extensions=None, - mandatory=True, - ), - in_bvec=dict( - extensions=None, - mandatory=True, - ), - in_evals=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - in_mask=dict(extensions=None, ), - out_mask=dict( - extensions=None, - usedefault=True, - ), + auto=dict(xor=["recursive"],), + b0_thres=dict(usedefault=True,), + fa_thresh=dict(usedefault=True,), + in_bval=dict(extensions=None, mandatory=True,), + in_bvec=dict(extensions=None, mandatory=True,), + in_evals=dict(extensions=None, mandatory=True,), + in_file=dict(extensions=None, mandatory=True,), + in_mask=dict(extensions=None,), + out_mask=dict(extensions=None, usedefault=True,), out_prefix=dict(), - recursive=dict(xor=['auto'], ), - response=dict( - extensions=None, - usedefault=True, - ), - roi_radius=dict(usedefault=True, ), + recursive=dict(xor=["auto"],), + response=dict(extensions=None, usedefault=True,), + roi_radius=dict(usedefault=True,), ) inputs = EstimateResponseSH.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EstimateResponseSH_outputs(): - output_map = dict( - out_mask=dict(extensions=None, ), - response=dict(extensions=None, ), - ) + output_map = dict(out_mask=dict(extensions=None,), response=dict(extensions=None,),) outputs = EstimateResponseSH.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_RESTORE.py b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py index 0795f4ea16..a172847174 100644 --- a/nipype/interfaces/dipy/tests/test_auto_RESTORE.py +++ b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py @@ -4,21 +4,12 @@ def test_RESTORE_inputs(): input_map = dict( - b0_thres=dict(usedefault=True, ), - in_bval=dict( - extensions=None, - mandatory=True, - ), - in_bvec=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - in_mask=dict(extensions=None, ), - noise_mask=dict(extensions=None, ), + b0_thres=dict(usedefault=True,), + in_bval=dict(extensions=None, mandatory=True,), + in_bvec=dict(extensions=None, mandatory=True,), + in_file=dict(extensions=None, mandatory=True,), + in_mask=dict(extensions=None,), + noise_mask=dict(extensions=None,), out_prefix=dict(), ) inputs = RESTORE.input_spec() @@ -26,15 +17,17 @@ def test_RESTORE_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RESTORE_outputs(): output_map = dict( - evals=dict(extensions=None, ), - evecs=dict(extensions=None, ), - fa=dict(extensions=None, ), - md=dict(extensions=None, ), - mode=dict(extensions=None, ), - rd=dict(extensions=None, ), - trace=dict(extensions=None, ), + evals=dict(extensions=None,), + evecs=dict(extensions=None,), + fa=dict(extensions=None,), + md=dict(extensions=None,), + mode=dict(extensions=None,), + rd=dict(extensions=None,), + trace=dict(extensions=None,), ) outputs = RESTORE.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_Resample.py b/nipype/interfaces/dipy/tests/test_auto_Resample.py index 044fdbe8f7..ac1b6ce9cd 100644 --- a/nipype/interfaces/dipy/tests/test_auto_Resample.py +++ b/nipype/interfaces/dipy/tests/test_auto_Resample.py @@ -4,14 +4,8 @@ def test_Resample_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - interp=dict( - mandatory=True, - usedefault=True, - ), + in_file=dict(extensions=None, mandatory=True,), + interp=dict(mandatory=True, usedefault=True,), vox_size=dict(), ) inputs = Resample.input_spec() @@ -19,8 +13,10 @@ def test_Resample_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Resample_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py index 12eea9e961..3202306026 100644 --- a/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py +++ b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py @@ -4,51 +4,38 @@ def test_SimulateMultiTensor_inputs(): input_map = dict( - baseline=dict( - extensions=None, - mandatory=True, - ), - bvalues=dict(usedefault=True, ), - diff_iso=dict(usedefault=True, ), - diff_sf=dict(usedefault=True, ), - gradients=dict(extensions=None, ), - in_bval=dict(extensions=None, ), - in_bvec=dict(extensions=None, ), - in_dirs=dict(mandatory=True, ), - in_frac=dict(mandatory=True, ), - in_mask=dict(extensions=None, ), - in_vfms=dict(mandatory=True, ), - n_proc=dict(usedefault=True, ), - num_dirs=dict(usedefault=True, ), - out_bval=dict( - extensions=None, - usedefault=True, - ), - out_bvec=dict( - extensions=None, - usedefault=True, - ), - out_file=dict( - extensions=None, - usedefault=True, - ), - out_mask=dict( - extensions=None, - usedefault=True, - ), - snr=dict(usedefault=True, ), + baseline=dict(extensions=None, mandatory=True,), + bvalues=dict(usedefault=True,), + diff_iso=dict(usedefault=True,), + diff_sf=dict(usedefault=True,), + gradients=dict(extensions=None,), + in_bval=dict(extensions=None,), + in_bvec=dict(extensions=None,), + in_dirs=dict(mandatory=True,), + in_frac=dict(mandatory=True,), + in_mask=dict(extensions=None,), + in_vfms=dict(mandatory=True,), + n_proc=dict(usedefault=True,), + num_dirs=dict(usedefault=True,), + out_bval=dict(extensions=None, usedefault=True,), + out_bvec=dict(extensions=None, usedefault=True,), + out_file=dict(extensions=None, usedefault=True,), + out_mask=dict(extensions=None, usedefault=True,), + snr=dict(usedefault=True,), ) inputs = SimulateMultiTensor.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SimulateMultiTensor_outputs(): output_map = dict( - out_bval=dict(extensions=None, ), - out_bvec=dict(extensions=None, ), - out_file=dict(extensions=None, ), - out_mask=dict(extensions=None, ), + out_bval=dict(extensions=None,), + out_bvec=dict(extensions=None,), + out_file=dict(extensions=None,), + out_mask=dict(extensions=None,), ) outputs = SimulateMultiTensor.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py index c88795ca1c..bbe4abce94 100644 --- a/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py +++ b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py @@ -4,52 +4,33 @@ def test_StreamlineTractography_inputs(): input_map = dict( - gfa_thresh=dict( - mandatory=True, - usedefault=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - in_model=dict(extensions=None, ), - in_peaks=dict(extensions=None, ), - min_angle=dict( - mandatory=True, - usedefault=True, - ), - multiprocess=dict( - mandatory=True, - usedefault=True, - ), - num_seeds=dict( - mandatory=True, - usedefault=True, - ), + gfa_thresh=dict(mandatory=True, usedefault=True,), + in_file=dict(extensions=None, mandatory=True,), + in_model=dict(extensions=None,), + in_peaks=dict(extensions=None,), + min_angle=dict(mandatory=True, usedefault=True,), + multiprocess=dict(mandatory=True, usedefault=True,), + num_seeds=dict(mandatory=True, usedefault=True,), out_prefix=dict(), - peak_threshold=dict( - mandatory=True, - usedefault=True, - ), - save_seeds=dict( - mandatory=True, - usedefault=True, - ), - seed_coord=dict(extensions=None, ), - seed_mask=dict(extensions=None, ), - tracking_mask=dict(extensions=None, ), + peak_threshold=dict(mandatory=True, usedefault=True,), + save_seeds=dict(mandatory=True, usedefault=True,), + seed_coord=dict(extensions=None,), + seed_mask=dict(extensions=None,), + tracking_mask=dict(extensions=None,), ) inputs = StreamlineTractography.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_StreamlineTractography_outputs(): output_map = dict( - gfa=dict(extensions=None, ), - odf_peaks=dict(extensions=None, ), - out_seeds=dict(extensions=None, ), - tracks=dict(extensions=None, ), + gfa=dict(extensions=None,), + odf_peaks=dict(extensions=None,), + out_seeds=dict(extensions=None,), + tracks=dict(extensions=None,), ) outputs = StreamlineTractography.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_TensorMode.py b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py index 274c9dbc77..29a01e4a75 100644 --- a/nipype/interfaces/dipy/tests/test_auto_TensorMode.py +++ b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py @@ -4,20 +4,11 @@ def test_TensorMode_inputs(): input_map = dict( - b0_thres=dict(usedefault=True, ), - in_bval=dict( - extensions=None, - mandatory=True, - ), - in_bvec=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - mask_file=dict(extensions=None, ), + b0_thres=dict(usedefault=True,), + in_bval=dict(extensions=None, mandatory=True,), + in_bvec=dict(extensions=None, mandatory=True,), + in_file=dict(extensions=None, mandatory=True,), + mask_file=dict(extensions=None,), out_prefix=dict(), ) inputs = TensorMode.input_spec() @@ -25,8 +16,10 @@ def test_TensorMode_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TensorMode_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TensorMode.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py index a52b40e0fa..06265ffd0f 100644 --- a/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py +++ b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py @@ -5,16 +5,10 @@ def test_TrackDensityMap_inputs(): input_map = dict( data_dims=dict(), - in_file=dict( - extensions=None, - mandatory=True, - ), - out_filename=dict( - extensions=None, - usedefault=True, - ), - points_space=dict(usedefault=True, ), - reference=dict(extensions=None, ), + in_file=dict(extensions=None, mandatory=True,), + out_filename=dict(extensions=None, usedefault=True,), + points_space=dict(usedefault=True,), + reference=dict(extensions=None,), voxel_dims=dict(), ) inputs = TrackDensityMap.input_spec() @@ -22,8 +16,10 @@ def test_TrackDensityMap_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TrackDensityMap_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TrackDensityMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_base.py b/nipype/interfaces/dipy/tests/test_base.py index 740057bcd3..38922ba0b7 100644 --- a/nipype/interfaces/dipy/tests/test_base.py +++ b/nipype/interfaces/dipy/tests/test_base.py @@ -1,50 +1,72 @@ import pytest from collections import namedtuple from ...base import traits, File, TraitedSpec, BaseInterfaceInputSpec -from ..base import (convert_to_traits_type, create_interface_specs, - dipy_to_nipype_interface, DipyBaseInterface, no_dipy, - get_dipy_workflows) +from ..base import ( + convert_to_traits_type, + create_interface_specs, + dipy_to_nipype_interface, + DipyBaseInterface, + no_dipy, + get_dipy_workflows, +) def test_convert_to_traits_type(): Params = namedtuple("Params", "traits_type is_file") Res = namedtuple("Res", "traits_type is_mandatory") - l_entries = [Params('variable string', False), - Params('variable int', False), - Params('variable float', False), - Params('variable bool', False), - Params('variable complex', False), - Params('variable int, optional', False), - Params('variable string, optional', False), - Params('variable float, optional', False), - Params('variable bool, optional', False), - Params('variable complex, optional', False), - Params('string', False), Params('int', False), - Params('string', True), Params('float', False), - Params('bool', False), Params('complex', False), - Params('string, optional', False), - Params('int, optional', False), - Params('string, optional', True), - Params('float, optional', False), - Params('bool, optional', False), - Params('complex, optional', False), - ] - l_expected = [Res(traits.ListStr, True), Res(traits.ListInt, True), - Res(traits.ListFloat, True), Res(traits.ListBool, True), - Res(traits.ListComplex, True), Res(traits.ListInt, False), - Res(traits.ListStr, False), Res(traits.ListFloat, False), - Res(traits.ListBool, False), Res(traits.ListComplex, False), - Res(traits.Str, True), Res(traits.Int, True), - Res(File, True), Res(traits.Float, True), - Res(traits.Bool, True), Res(traits.Complex, True), - Res(traits.Str, False), Res(traits.Int, False), - Res(File, False), Res(traits.Float, False), - Res(traits.Bool, False), Res(traits.Complex, False), - ] + l_entries = [ + Params("variable string", False), + Params("variable int", False), + Params("variable float", False), + Params("variable bool", False), + Params("variable complex", False), + Params("variable int, optional", False), + Params("variable string, optional", False), + Params("variable float, optional", False), + Params("variable bool, optional", False), + Params("variable complex, optional", False), + Params("string", False), + Params("int", False), + Params("string", True), + Params("float", False), + Params("bool", False), + Params("complex", False), + Params("string, optional", False), + Params("int, optional", False), + Params("string, optional", True), + Params("float, optional", False), + Params("bool, optional", False), + Params("complex, optional", False), + ] + l_expected = [ + Res(traits.ListStr, True), + Res(traits.ListInt, True), + Res(traits.ListFloat, True), + Res(traits.ListBool, True), + Res(traits.ListComplex, True), + Res(traits.ListInt, False), + Res(traits.ListStr, False), + Res(traits.ListFloat, False), + Res(traits.ListBool, False), + Res(traits.ListComplex, False), + Res(traits.Str, True), + Res(traits.Int, True), + Res(File, True), + Res(traits.Float, True), + Res(traits.Bool, True), + Res(traits.Complex, True), + Res(traits.Str, False), + Res(traits.Int, False), + Res(File, False), + Res(traits.Float, False), + Res(traits.Bool, False), + Res(traits.Complex, False), + ] for entry, res in zip(l_entries, l_expected): - traits_type, is_mandatory = convert_to_traits_type(entry.traits_type, - entry.is_file) + traits_type, is_mandatory = convert_to_traits_type( + entry.traits_type, entry.is_file + ) assert traits_type == res.traits_type assert is_mandatory == res.is_mandatory @@ -60,28 +82,34 @@ def test_create_interface_specs(): assert new_interface.__name__ == "MyInterface" assert not new_interface().get() - new_interface = create_interface_specs("MyInterface", - BaseClass=BaseInterfaceInputSpec) + new_interface = create_interface_specs( + "MyInterface", BaseClass=BaseInterfaceInputSpec + ) assert new_interface.__base__ == BaseInterfaceInputSpec assert isinstance(new_interface(), BaseInterfaceInputSpec) assert new_interface.__name__ == "MyInterface" assert not new_interface().get() - params = [("params1", "string", ["my description"]), ("params2_files", "string", ["my description @"]), - ("params3", "int, optional", ["useful option"]), ("out_params", "string", ["my out description"])] + params = [ + ("params1", "string", ["my description"]), + ("params2_files", "string", ["my description @"]), + ("params3", "int, optional", ["useful option"]), + ("out_params", "string", ["my out description"]), + ] - new_interface = create_interface_specs("MyInterface", params=params, - BaseClass=BaseInterfaceInputSpec) + new_interface = create_interface_specs( + "MyInterface", params=params, BaseClass=BaseInterfaceInputSpec + ) assert new_interface.__base__ == BaseInterfaceInputSpec assert isinstance(new_interface(), BaseInterfaceInputSpec) assert new_interface.__name__ == "MyInterface" current_params = new_interface().get() assert len(current_params) == 4 - assert 'params1' in current_params.keys() - assert 'params2_files' in current_params.keys() - assert 'params3' in current_params.keys() - assert 'out_params' in current_params.keys() + assert "params1" in current_params.keys() + assert "params2_files" in current_params.keys() + assert "params3" in current_params.keys() + assert "out_params" in current_params.keys() @pytest.mark.skipif(no_dipy(), reason="DIPY is not installed") @@ -89,12 +117,11 @@ def test_dipy_to_nipype_interface(): from dipy.workflows.workflow import Workflow class DummyWorkflow(Workflow): - @classmethod def get_short_name(cls): - return 'dwf1' + return "dwf1" - def run(self, in_files, param1=1, out_dir='', out_ref='out1.txt'): + def run(self, in_files, param1=1, out_dir="", out_ref="out1.txt"): """Workflow used to test basic workflows. Parameters @@ -119,19 +146,19 @@ def run(self, in_files, param1=1, out_dir='', out_ref='out1.txt'): assert new_specs.__base__ == DipyBaseInterface assert isinstance(new_specs(), DipyBaseInterface) assert new_specs.__name__ == "MyModelSpec" - assert hasattr(new_specs, 'input_spec') + assert hasattr(new_specs, "input_spec") assert new_specs().input_spec.__base__ == BaseInterfaceInputSpec - assert hasattr(new_specs, 'output_spec') + assert hasattr(new_specs, "output_spec") assert new_specs().output_spec.__base__ == TraitedSpec - assert hasattr(new_specs, '_run_interface') - assert hasattr(new_specs, '_list_outputs') + assert hasattr(new_specs, "_run_interface") + assert hasattr(new_specs, "_list_outputs") params_in = new_specs().inputs.get() params_out = new_specs()._outputs().get() assert len(params_in) == 4 - assert 'in_files' in params_in.keys() - assert 'param1' in params_in.keys() - assert 'out_dir' in params_out.keys() - assert 'out_ref' in params_out.keys() + assert "in_files" in params_in.keys() + assert "param1" in params_in.keys() + assert "out_dir" in params_out.keys() + assert "out_ref" in params_out.keys() with pytest.raises(ValueError): new_specs().run() @@ -143,7 +170,7 @@ def test_get_dipy_workflows(): l_wkflw = get_dipy_workflows(align) for name, obj in l_wkflw: - assert name.endswith('Flow') + assert name.endswith("Flow") assert issubclass(obj, align.Workflow) diff --git a/nipype/interfaces/dipy/tracks.py b/nipype/interfaces/dipy/tracks.py index 8688b8d106..947bf22121 100644 --- a/nipype/interfaces/dipy/tracks.py +++ b/nipype/interfaces/dipy/tracks.py @@ -6,60 +6,65 @@ from distutils.version import LooseVersion from ... import logging -from ..base import (TraitedSpec, BaseInterfaceInputSpec, File, isdefined, - traits) -from .base import (DipyBaseInterface, HAVE_DIPY, dipy_version, - dipy_to_nipype_interface, get_dipy_workflows) +from ..base import TraitedSpec, BaseInterfaceInputSpec, File, isdefined, traits +from .base import ( + DipyBaseInterface, + HAVE_DIPY, + dipy_version, + dipy_to_nipype_interface, + get_dipy_workflows, +) -IFLOGGER = logging.getLogger('nipype.interface') +IFLOGGER = logging.getLogger("nipype.interface") -if HAVE_DIPY and (LooseVersion('0.15') >= LooseVersion(dipy_version()) >= LooseVersion('0.16')): +if HAVE_DIPY and ( + LooseVersion("0.15") >= LooseVersion(dipy_version()) >= LooseVersion("0.16") +): try: from dipy.workflows.tracking import LocalFiberTrackingPAMFlow as DetTrackFlow except ImportError: # different name in 0.15 from dipy.workflows.tracking import DetTrackPAMFlow as DetTrackFlow - DeterministicTracking = dipy_to_nipype_interface("DeterministicTracking", - DetTrackFlow) + DeterministicTracking = dipy_to_nipype_interface( + "DeterministicTracking", DetTrackFlow + ) -if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion('0.15'): +if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion("0.15"): from dipy.workflows import segment, tracking l_wkflw = get_dipy_workflows(segment) + get_dipy_workflows(tracking) for name, obj in l_wkflw: - new_name = name.replace('Flow', '') + new_name = name.replace("Flow", "") globals()[new_name] = dipy_to_nipype_interface(new_name, obj) del l_wkflw else: - IFLOGGER.info("We advise you to upgrade DIPY version. This upgrade will" - " open access to more function") + IFLOGGER.info( + "We advise you to upgrade DIPY version. This upgrade will" + " open access to more function" + ) class TrackDensityMapInputSpec(BaseInterfaceInputSpec): - in_file = File( - exists=True, mandatory=True, desc='The input TrackVis track file') + in_file = File(exists=True, mandatory=True, desc="The input TrackVis track file") reference = File( - exists=True, desc='A reference file to define RAS coordinates space') + exists=True, desc="A reference file to define RAS coordinates space" + ) points_space = traits.Enum( - 'rasmm', - 'voxel', - None, - usedefault=True, - desc='coordinates of trk file') + "rasmm", "voxel", None, usedefault=True, desc="coordinates of trk file" + ) voxel_dims = traits.List( - traits.Float, minlen=3, maxlen=3, desc='The size of each voxel in mm.') + traits.Float, minlen=3, maxlen=3, desc="The size of each voxel in mm." + ) data_dims = traits.List( - traits.Int, - minlen=3, - maxlen=3, - desc='The size of the image in voxels.') + traits.Int, minlen=3, maxlen=3, desc="The size of the image in voxels." + ) out_filename = File( - 'tdi.nii', + "tdi.nii", usedefault=True, - desc='The output filename for the tracks in TrackVis ' - '(.trk) format') + desc="The output filename for the tracks in TrackVis " "(.trk) format", + ) class TrackDensityMapOutputSpec(TraitedSpec): @@ -80,6 +85,7 @@ class TrackDensityMap(DipyBaseInterface): >>> trk2tdi.run() # doctest: +SKIP """ + input_spec = TrackDensityMapInputSpec output_spec = TrackDensityMapOutputSpec @@ -97,19 +103,21 @@ def _run_interface(self, runtime): data_dims = refnii.shape[:3] kwargs = dict(affine=affine) else: - IFLOGGER.warning('voxel_dims and data_dims are deprecated as of dipy ' - '0.7.1. Please use reference input instead') + IFLOGGER.warning( + "voxel_dims and data_dims are deprecated as of dipy " + "0.7.1. Please use reference input instead" + ) if not isdefined(self.inputs.data_dims): - data_dims = header['dim'] + data_dims = header["dim"] else: data_dims = self.inputs.data_dims if not isdefined(self.inputs.voxel_dims): - voxel_size = header['voxel_size'] + voxel_size = header["voxel_size"] else: voxel_size = self.inputs.voxel_dims - affine = header['vox_to_ras'] + affine = header["vox_to_ras"] kwargs = dict(voxel_size=voxel_size) data = density_map(streams, data_dims, **kwargs) @@ -118,68 +126,72 @@ def _run_interface(self, runtime): out_file = op.abspath(self.inputs.out_filename) nb.save(img, out_file) - IFLOGGER.info('Track density map saved as %s, size=%s, dimensions=%s', - out_file, img.shape, img.header.get_zooms()) + IFLOGGER.info( + "Track density map saved as %s, size=%s, dimensions=%s", + out_file, + img.shape, + img.header.get_zooms(), + ) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = op.abspath(self.inputs.out_filename) + outputs["out_file"] = op.abspath(self.inputs.out_filename) return outputs class StreamlineTractographyInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc=('input diffusion data')) - in_model = File(exists=True, desc=('input f/d-ODF model extracted from.')) - tracking_mask = File( - exists=True, desc=('input mask within which perform tracking')) - seed_mask = File( - exists=True, desc=('input mask within which perform seeding')) - in_peaks = File(exists=True, desc=('peaks computed from the odf')) + in_file = File(exists=True, mandatory=True, desc=("input diffusion data")) + in_model = File(exists=True, desc=("input f/d-ODF model extracted from.")) + tracking_mask = File(exists=True, desc=("input mask within which perform tracking")) + seed_mask = File(exists=True, desc=("input mask within which perform seeding")) + in_peaks = File(exists=True, desc=("peaks computed from the odf")) seed_coord = File( exists=True, - desc=('file containing the list of seed voxel ' - 'coordinates (N,3)')) + desc=("file containing the list of seed voxel " "coordinates (N,3)"), + ) gfa_thresh = traits.Float( 0.2, mandatory=True, usedefault=True, - desc=('GFA threshold to compute tracking mask')) + desc=("GFA threshold to compute tracking mask"), + ) peak_threshold = traits.Float( 0.5, mandatory=True, usedefault=True, - desc=('threshold to consider peaks from model')) + desc=("threshold to consider peaks from model"), + ) min_angle = traits.Float( - 25.0, - mandatory=True, - usedefault=True, - desc=('minimum separation angle')) + 25.0, mandatory=True, usedefault=True, desc=("minimum separation angle") + ) multiprocess = traits.Bool( - True, mandatory=True, usedefault=True, desc=('use multiprocessing')) + True, mandatory=True, usedefault=True, desc=("use multiprocessing") + ) save_seeds = traits.Bool( - False, - mandatory=True, - usedefault=True, - desc=('save seeding voxels coordinates')) + False, mandatory=True, usedefault=True, desc=("save seeding voxels coordinates") + ) num_seeds = traits.Int( 10000, mandatory=True, usedefault=True, - desc=('desired number of tracks in tractography')) - out_prefix = traits.Str(desc=('output prefix for file names')) + desc=("desired number of tracks in tractography"), + ) + out_prefix = traits.Str(desc=("output prefix for file names")) class StreamlineTractographyOutputSpec(TraitedSpec): - tracks = File(desc='TrackVis file containing extracted streamlines') + tracks = File(desc="TrackVis file containing extracted streamlines") gfa = File( - desc=('The resulting GFA (generalized FA) computed using the ' - 'peaks of the ODF')) - odf_peaks = File(desc=('peaks computed from the odf')) + desc=( + "The resulting GFA (generalized FA) computed using the " "peaks of the ODF" + ) + ) + odf_peaks = File(desc=("peaks computed from the odf")) out_seeds = File( - desc=('file containing the (N,3) *voxel* coordinates used' - ' in seeding.')) + desc=("file containing the (N,3) *voxel* coordinates used" " in seeding.") + ) class StreamlineTractography(DipyBaseInterface): @@ -199,6 +211,7 @@ class StreamlineTractography(DipyBaseInterface): >>> track.inputs.tracking_mask = 'dilated_wm_mask.nii' >>> res = track.run() # doctest: +SKIP """ + input_spec = StreamlineTractographyInputSpec output_spec = StreamlineTractographyOutputSpec @@ -206,14 +219,15 @@ def _run_interface(self, runtime): from dipy.reconst.peaks import peaks_from_model from dipy.tracking.eudx import EuDX from dipy.data import get_sphere + # import marshal as pickle import pickle as pickle import gzip - if (not (isdefined(self.inputs.in_model) - or isdefined(self.inputs.in_peaks))): - raise RuntimeError(('At least one of in_model or in_peaks should ' - 'be supplied')) + if not (isdefined(self.inputs.in_model) or isdefined(self.inputs.in_peaks)): + raise RuntimeError( + ("At least one of in_model or in_peaks should " "be supplied") + ) img = nb.load(self.inputs.in_file) imref = nb.four_to_three(img)[0] @@ -222,20 +236,20 @@ def _run_interface(self, runtime): data = img.get_data().astype(np.float32) hdr = imref.header.copy() hdr.set_data_dtype(np.float32) - hdr['data_type'] = 16 + hdr["data_type"] = 16 - sphere = get_sphere('symmetric724') + sphere = get_sphere("symmetric724") self._save_peaks = False if isdefined(self.inputs.in_peaks): - IFLOGGER.info('Peaks file found, skipping ODF peaks search...') - f = gzip.open(self.inputs.in_peaks, 'rb') + IFLOGGER.info("Peaks file found, skipping ODF peaks search...") + f = gzip.open(self.inputs.in_peaks, "rb") peaks = pickle.load(f) f.close() else: self._save_peaks = True - IFLOGGER.info('Loading model and computing ODF peaks') - f = gzip.open(self.inputs.in_model, 'rb') + IFLOGGER.info("Loading model and computing ODF peaks") + f = gzip.open(self.inputs.in_model, "rb") odf_model = pickle.load(f) f.close() @@ -245,17 +259,19 @@ def _run_interface(self, runtime): sphere=sphere, relative_peak_threshold=self.inputs.peak_threshold, min_separation_angle=self.inputs.min_angle, - parallel=self.inputs.multiprocess) + parallel=self.inputs.multiprocess, + ) - f = gzip.open(self._gen_filename('peaks', ext='.pklz'), 'wb') + f = gzip.open(self._gen_filename("peaks", ext=".pklz"), "wb") pickle.dump(peaks, f, -1) f.close() hdr.set_data_shape(peaks.gfa.shape) nb.Nifti1Image(peaks.gfa.astype(np.float32), affine, hdr).to_filename( - self._gen_filename('gfa')) + self._gen_filename("gfa") + ) - IFLOGGER.info('Performing tractography') + IFLOGGER.info("Performing tractography") if isdefined(self.inputs.tracking_mask): msk = nb.load(self.inputs.tracking_mask).get_data() @@ -272,26 +288,31 @@ def _run_interface(self, runtime): elif isdefined(self.inputs.seed_mask): seedmsk = nb.load(self.inputs.seed_mask).get_data() - assert (seedmsk.shape == data.shape[:3]) + assert seedmsk.shape == data.shape[:3] seedmsk[seedmsk > 0] = 1 seedmsk[seedmsk < 1] = 0 seedps = np.array(np.where(seedmsk == 1), dtype=np.float32).T vseeds = seedps.shape[0] nsperv = (seeds // vseeds) + 1 - IFLOGGER.info('Seed mask is provided (%d voxels inside ' - 'mask), computing seeds (%d seeds/voxel).', vseeds, - nsperv) + IFLOGGER.info( + "Seed mask is provided (%d voxels inside " + "mask), computing seeds (%d seeds/voxel).", + vseeds, + nsperv, + ) if nsperv > 1: - IFLOGGER.info('Needed %d seeds per selected voxel (total %d).', - nsperv, vseeds) + IFLOGGER.info( + "Needed %d seeds per selected voxel (total %d).", nsperv, vseeds + ) seedps = np.vstack(np.array([seedps] * nsperv)) voxcoord = seedps + np.random.uniform(-1, 1, size=seedps.shape) nseeds = voxcoord.shape[0] - seeds = affine.dot( - np.vstack((voxcoord.T, np.ones((1, nseeds)))))[:3, :].T + seeds = affine.dot(np.vstack((voxcoord.T, np.ones((1, nseeds)))))[ + :3, : + ].T if self.inputs.save_seeds: - np.savetxt(self._gen_filename('seeds', ext='.txt'), seeds) + np.savetxt(self._gen_filename("seeds", ext=".txt"), seeds) if isdefined(self.inputs.tracking_mask): tmask = msk @@ -306,34 +327,34 @@ def _run_interface(self, runtime): seeds=seeds, affine=affine, odf_vertices=sphere.vertices, - a_low=a_low) + a_low=a_low, + ) ss_mm = [np.array(s) for s in eu] trkfilev = nb.trackvis.TrackvisFile( - [(s, None, None) for s in ss_mm], - points_space='rasmm', - affine=np.eye(4)) - trkfilev.to_file(self._gen_filename('tracked', ext='.trk')) + [(s, None, None) for s in ss_mm], points_space="rasmm", affine=np.eye(4) + ) + trkfilev.to_file(self._gen_filename("tracked", ext=".trk")) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['tracks'] = self._gen_filename('tracked', ext='.trk') - outputs['gfa'] = self._gen_filename('gfa') + outputs["tracks"] = self._gen_filename("tracked", ext=".trk") + outputs["gfa"] = self._gen_filename("gfa") if self._save_peaks: - outputs['odf_peaks'] = self._gen_filename('peaks', ext='.pklz') + outputs["odf_peaks"] = self._gen_filename("peaks", ext=".pklz") if self.inputs.save_seeds: if isdefined(self.inputs.seed_coord): - outputs['out_seeds'] = self.inputs.seed_coord + outputs["out_seeds"] = self.inputs.seed_coord else: - outputs['out_seeds'] = self._gen_filename('seeds', ext='.txt') + outputs["out_seeds"] = self._gen_filename("seeds", ext=".txt") return outputs def _gen_filename(self, name, ext=None): fname, fext = op.splitext(op.basename(self.inputs.in_file)) - if fext == '.gz': + if fext == ".gz": fname, fext2 = op.splitext(fname) fext = fext2 + fext @@ -345,4 +366,4 @@ def _gen_filename(self, name, ext=None): if ext is None: ext = fext - return out_prefix + '_' + name + ext + return out_prefix + "_" + name + ext diff --git a/nipype/interfaces/dtitk/__init__.py b/nipype/interfaces/dtitk/__init__.py index a41c09e588..6c9569114c 100644 --- a/nipype/interfaces/dtitk/__init__.py +++ b/nipype/interfaces/dtitk/__init__.py @@ -6,8 +6,21 @@ """ # from .base import () -from .registration import (Rigid, Affine, Diffeo, - ComposeXfm, DiffeoSymTensor3DVol, AffSymTensor3DVol, - AffScalarVol, DiffeoScalarVol) -from .utils import (TVAdjustVoxSp, SVAdjustVoxSp, TVResample, SVResample, - TVtool, BinThresh) +from .registration import ( + Rigid, + Affine, + Diffeo, + ComposeXfm, + DiffeoSymTensor3DVol, + AffSymTensor3DVol, + AffScalarVol, + DiffeoScalarVol, +) +from .utils import ( + TVAdjustVoxSp, + SVAdjustVoxSp, + TVResample, + SVResample, + TVtool, + BinThresh, +) diff --git a/nipype/interfaces/dtitk/base.py b/nipype/interfaces/dtitk/base.py index 32289e5217..aad1b4d521 100644 --- a/nipype/interfaces/dtitk/base.py +++ b/nipype/interfaces/dtitk/base.py @@ -33,28 +33,28 @@ from nipype.interfaces.fsl.base import Info import warnings -LOGGER = logging.getLogger('nipype.interface') +LOGGER = logging.getLogger("nipype.interface") class DTITKRenameMixin(object): def __init__(self, *args, **kwargs): classes = [cls.__name__ for cls in self.__class__.mro()] dep_name = classes[0] - rename_idx = classes.index('DTITKRenameMixin') + rename_idx = classes.index("DTITKRenameMixin") new_name = classes[rename_idx + 1] - warnings.warn('The {} interface has been renamed to {}\n' - 'Please see the documentation for DTI-TK ' - 'interfaces, as some inputs have been ' - 'added or renamed for clarity.' - ''.format(dep_name, new_name), - DeprecationWarning) + warnings.warn( + "The {} interface has been renamed to {}\n" + "Please see the documentation for DTI-TK " + "interfaces, as some inputs have been " + "added or renamed for clarity." + "".format(dep_name, new_name), + DeprecationWarning, + ) super(DTITKRenameMixin, self).__init__(*args, **kwargs) class CommandLineDtitk(CommandLine): - - def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, - ext=None): + def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, ext=None): """Generate a filename based on the given parameters. The filename will take the form: cwd/basename. @@ -80,9 +80,9 @@ def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, """ - if basename == '': - msg = 'Unable to generate filename for command %s. ' % self.cmd - msg += 'basename is not set!' + if basename == "": + msg = "Unable to generate filename for command %s. " % self.cmd + msg += "basename is not set!" raise ValueError(msg) if cwd is None: cwd = os.getcwd() @@ -90,11 +90,10 @@ def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, ext = Info.output_type_to_ext(self.inputs.output_type) if change_ext: if suffix: - suffix = ''.join((suffix, ext)) + suffix = "".join((suffix, ext)) else: suffix = ext if suffix is None: - suffix = '' - fname = fname_presuffix(basename, suffix=suffix, - use_ext=False, newpath=cwd) + suffix = "" + fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd) return fname diff --git a/nipype/interfaces/dtitk/registration.py b/nipype/interfaces/dtitk/registration.py index 6aa40d4201..4a50d5b1ad 100644 --- a/nipype/interfaces/dtitk/registration.py +++ b/nipype/interfaces/dtitk/registration.py @@ -29,25 +29,60 @@ from .base import CommandLineDtitk, DTITKRenameMixin import os -__docformat__ = 'restructuredtext' +__docformat__ = "restructuredtext" class RigidInputSpec(CommandLineInputSpec): - fixed_file = File(desc="fixed tensor volume", exists=True, - mandatory=True, position=0, argstr="%s", copyfile=False) - moving_file = File(desc="moving tensor volume", exists=True, - mandatory=True, position=1, argstr="%s", copyfile=False) - similarity_metric = traits.Enum('EDS', 'GDS', 'DDS', 'NMI', - mandatory=True, position=2, argstr="%s", - desc="similarity metric", usedefault=True) - sampling_xyz = traits.Tuple((4, 4, 4), mandatory=True, position=3, - argstr="%g %g %g", usedefault=True, - desc="dist between samp points (mm) (x,y,z)") - ftol = traits.Float(mandatory=True, position=4, argstr="%g", - desc="cost function tolerance", default_value=0.01, - usedefault=True) - initialize_xfm = File(copyfile=True, desc="Initialize w/DTITK-FORMAT" - "affine", position=5, argstr="%s", exists=True) + fixed_file = File( + desc="fixed tensor volume", + exists=True, + mandatory=True, + position=0, + argstr="%s", + copyfile=False, + ) + moving_file = File( + desc="moving tensor volume", + exists=True, + mandatory=True, + position=1, + argstr="%s", + copyfile=False, + ) + similarity_metric = traits.Enum( + "EDS", + "GDS", + "DDS", + "NMI", + mandatory=True, + position=2, + argstr="%s", + desc="similarity metric", + usedefault=True, + ) + sampling_xyz = traits.Tuple( + (4, 4, 4), + mandatory=True, + position=3, + argstr="%g %g %g", + usedefault=True, + desc="dist between samp points (mm) (x,y,z)", + ) + ftol = traits.Float( + mandatory=True, + position=4, + argstr="%g", + desc="cost function tolerance", + default_value=0.01, + usedefault=True, + ) + initialize_xfm = File( + copyfile=True, + desc="Initialize w/DTITK-FORMAT" "affine", + position=5, + argstr="%s", + exists=True, + ) class RigidOutputSpec(TraitedSpec): @@ -72,27 +107,27 @@ class Rigid(CommandLineDtitk): 'dti_rigid_reg im1.nii im2.nii EDS 4 4 4 0.01' >>> node.run() # doctest: +SKIP """ + input_spec = RigidInputSpec output_spec = RigidOutputSpec - _cmd = 'dti_rigid_reg' + _cmd = "dti_rigid_reg" - '''def _format_arg(self, name, spec, value): + """def _format_arg(self, name, spec, value): if name == 'initialize_xfm': value = 1 - return super(Rigid, self)._format_arg(name, spec, value)''' + return super(Rigid, self)._format_arg(name, spec, value)""" def _run_interface(self, runtime): runtime = super(Rigid, self)._run_interface(runtime) - if '''.aff doesn't exist or can't be opened''' in runtime.stderr: + if """.aff doesn't exist or can't be opened""" in runtime.stderr: self.raise_exception(runtime) return runtime def _list_outputs(self): outputs = self.output_spec().get() moving = self.inputs.moving_file - outputs['out_file_xfm'] = fname_presuffix(moving, suffix='.aff', - use_ext=False) - outputs['out_file'] = fname_presuffix(moving, suffix='_aff') + outputs["out_file_xfm"] = fname_presuffix(moving, suffix=".aff", use_ext=False) + outputs["out_file"] = fname_presuffix(moving, suffix="_aff") return outputs @@ -114,24 +149,44 @@ class Affine(Rigid): 'dti_affine_reg im1.nii im2.nii EDS 4 4 4 0.01 im_affine.aff' >>> node.run() # doctest: +SKIP """ - _cmd = 'dti_affine_reg' + + _cmd = "dti_affine_reg" class DiffeoInputSpec(CommandLineInputSpec): - fixed_file = File(desc="fixed tensor volume", - exists=True, position=0, argstr="%s") - moving_file = File(desc="moving tensor volume", - exists=True, position=1, argstr="%s", copyfile=False) - mask_file = File(desc="mask", exists=True, position=2, argstr="%s") - legacy = traits.Enum(1, desc="legacy parameter; always set to 1", - usedefault=True, mandatory=True, - position=3, argstr="%d") - n_iters = traits.Int(6, desc="number of iterations", - mandatory=True, - position=4, argstr="%d", usedefault=True) - ftol = traits.Float(0.002, desc="iteration for the optimization to stop", - mandatory=True, position=5, argstr="%g", - usedefault=True) + fixed_file = File(desc="fixed tensor volume", exists=True, position=0, argstr="%s") + moving_file = File( + desc="moving tensor volume", + exists=True, + position=1, + argstr="%s", + copyfile=False, + ) + mask_file = File(desc="mask", exists=True, position=2, argstr="%s") + legacy = traits.Enum( + 1, + desc="legacy parameter; always set to 1", + usedefault=True, + mandatory=True, + position=3, + argstr="%d", + ) + n_iters = traits.Int( + 6, + desc="number of iterations", + mandatory=True, + position=4, + argstr="%d", + usedefault=True, + ) + ftol = traits.Float( + 0.002, + desc="iteration for the optimization to stop", + mandatory=True, + position=5, + argstr="%g", + usedefault=True, + ) class DiffeoOutputSpec(TraitedSpec): @@ -157,25 +212,27 @@ class Diffeo(CommandLineDtitk): 'dti_diffeomorphic_reg im1.nii im2.nii mask.nii 1 6 0.002' >>> node.run() # doctest: +SKIP """ + input_spec = DiffeoInputSpec output_spec = DiffeoOutputSpec - _cmd = 'dti_diffeomorphic_reg' + _cmd = "dti_diffeomorphic_reg" def _list_outputs(self): outputs = self.output_spec().get() moving = self.inputs.moving_file - outputs['out_file_xfm'] = fname_presuffix(moving, suffix='_diffeo.df') - outputs['out_file'] = fname_presuffix(moving, suffix='_diffeo') + outputs["out_file_xfm"] = fname_presuffix(moving, suffix="_diffeo.df") + outputs["out_file"] = fname_presuffix(moving, suffix="_diffeo") return outputs class ComposeXfmInputSpec(CommandLineInputSpec): - in_df = File(desc='diffeomorphic warp file', exists=True, - argstr="-df %s", mandatory=True) - in_aff = File(desc='affine transform file', exists=True, - argstr="-aff %s", mandatory=True) - out_file = File(desc='output path', - argstr="-out %s", genfile=True) + in_df = File( + desc="diffeomorphic warp file", exists=True, argstr="-df %s", mandatory=True + ) + in_aff = File( + desc="affine transform file", exists=True, argstr="-aff %s", mandatory=True + ) + out_file = File(desc="output path", argstr="-out %s", genfile=True) class ComposeXfmOutputSpec(TraitedSpec): @@ -198,60 +255,89 @@ class ComposeXfm(CommandLineDtitk): im_warp_affdf.df.nii' >>> node.run() # doctest: +SKIP """ + input_spec = ComposeXfmInputSpec output_spec = ComposeXfmOutputSpec - _cmd = 'dfRightComposeAffine' + _cmd = "dfRightComposeAffine" def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.out_file if not isdefined(out_file): - out_file = self._gen_filename('out_file') - outputs['out_file'] = os.path.abspath(out_file) + out_file = self._gen_filename("out_file") + outputs["out_file"] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): - if name != 'out_file': + if name != "out_file": return path, base, ext = split_filename(self.inputs.in_df) - suffix = '_affdf' - if base.endswith('.df'): - suffix += '.df' + suffix = "_affdf" + if base.endswith(".df"): + suffix += ".df" base = base[:-3] return fname_presuffix(base, suffix=suffix + ext, use_ext=False) class AffSymTensor3DVolInputSpec(CommandLineInputSpec): - in_file = File(desc='moving tensor volume', exists=True, - argstr="-in %s", mandatory=True) - out_file = File(desc='output filename', - argstr="-out %s", name_source="in_file", - name_template="%s_affxfmd", keep_extension=True) - transform = File(exists=True, argstr="-trans %s", - xor=['target', 'translation', 'euler', 'deformation'], - desc='transform to apply: specify an input transformation' - ' file; parameters input will be ignored',) - interpolation = traits.Enum('LEI', 'EI', usedefault=True, - argstr="-interp %s", - desc='Log Euclidean/Euclidean Interpolation') - reorient = traits.Enum('PPD', 'NO', 'FS', argstr='-reorient %s', - usedefault=True, desc='Reorientation strategy: ' - 'preservation of principal direction, no ' - 'reorientation, or finite strain') - target = File(exists=True, argstr="-target %s", xor=['transform'], - desc='output volume specification read from the target ' - 'volume if specified') - translation = traits.Tuple((traits.Float(), traits.Float(), - traits.Float()), - desc='translation (x,y,z) in mm', - argstr='-translation %g %g %g', - xor=['transform']) - euler = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), - desc='(theta, phi, psi) in degrees', - xor=['transform'], argstr='-euler %g %g %g') - deformation = traits.Tuple((traits.Float(),) * 6, - desc='(xx,yy,zz,xy,yz,xz)', xor=['transform'], - argstr='-deformation %g %g %g %g %g %g') + in_file = File( + desc="moving tensor volume", exists=True, argstr="-in %s", mandatory=True + ) + out_file = File( + desc="output filename", + argstr="-out %s", + name_source="in_file", + name_template="%s_affxfmd", + keep_extension=True, + ) + transform = File( + exists=True, + argstr="-trans %s", + xor=["target", "translation", "euler", "deformation"], + desc="transform to apply: specify an input transformation" + " file; parameters input will be ignored", + ) + interpolation = traits.Enum( + "LEI", + "EI", + usedefault=True, + argstr="-interp %s", + desc="Log Euclidean/Euclidean Interpolation", + ) + reorient = traits.Enum( + "PPD", + "NO", + "FS", + argstr="-reorient %s", + usedefault=True, + desc="Reorientation strategy: " + "preservation of principal direction, no " + "reorientation, or finite strain", + ) + target = File( + exists=True, + argstr="-target %s", + xor=["transform"], + desc="output volume specification read from the target " "volume if specified", + ) + translation = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc="translation (x,y,z) in mm", + argstr="-translation %g %g %g", + xor=["transform"], + ) + euler = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc="(theta, phi, psi) in degrees", + xor=["transform"], + argstr="-euler %g %g %g", + ) + deformation = traits.Tuple( + (traits.Float(),) * 6, + desc="(xx,yy,zz,xy,yz,xz)", + xor=["transform"], + argstr="-deformation %g %g %g %g %g %g", + ) class AffSymTensor3DVolOutputSpec(TraitedSpec): @@ -274,43 +360,65 @@ class AffSymTensor3DVol(CommandLineDtitk): -reorient PPD -trans im_affine.aff' >>> node.run() # doctest: +SKIP """ + input_spec = AffSymTensor3DVolInputSpec output_spec = AffSymTensor3DVolOutputSpec - _cmd = 'affineSymTensor3DVolume' + _cmd = "affineSymTensor3DVolume" class AffScalarVolInputSpec(CommandLineInputSpec): - in_file = File(desc='moving scalar volume', exists=True, - argstr="-in %s", mandatory=True) - out_file = File(desc='output filename', - argstr="-out %s", name_source="in_file", - name_template="%s_affxfmd", keep_extension=True) - transform = File(exists=True, argstr="-trans %s", - xor=['target', 'translation', 'euler', 'deformation'], - desc='transform to apply: specify an input transformation' - ' file; parameters input will be ignored',) - interpolation = traits.Enum('trilinear', 'NN', - usedefault=True, argstr="-interp %s", - desc='trilinear or nearest neighbor' - ' interpolation') - target = File(exists=True, argstr="-target %s", xor=['transform'], - desc='output volume specification read from the target ' - 'volume if specified') - translation = traits.Tuple((traits.Float(), traits.Float(), - traits.Float()), - desc='translation (x,y,z) in mm', - argstr='-translation %g %g %g', - xor=['transform']) - euler = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), - desc='(theta, phi, psi) in degrees', - xor=['transform'], argstr='-euler %g %g %g') - deformation = traits.Tuple((traits.Float(),) * 6, - desc='(xx,yy,zz,xy,yz,xz)', xor=['transform'], - argstr='-deformation %g %g %g %g %g %g') + in_file = File( + desc="moving scalar volume", exists=True, argstr="-in %s", mandatory=True + ) + out_file = File( + desc="output filename", + argstr="-out %s", + name_source="in_file", + name_template="%s_affxfmd", + keep_extension=True, + ) + transform = File( + exists=True, + argstr="-trans %s", + xor=["target", "translation", "euler", "deformation"], + desc="transform to apply: specify an input transformation" + " file; parameters input will be ignored", + ) + interpolation = traits.Enum( + "trilinear", + "NN", + usedefault=True, + argstr="-interp %s", + desc="trilinear or nearest neighbor" " interpolation", + ) + target = File( + exists=True, + argstr="-target %s", + xor=["transform"], + desc="output volume specification read from the target " "volume if specified", + ) + translation = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc="translation (x,y,z) in mm", + argstr="-translation %g %g %g", + xor=["transform"], + ) + euler = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc="(theta, phi, psi) in degrees", + xor=["transform"], + argstr="-euler %g %g %g", + ) + deformation = traits.Tuple( + (traits.Float(),) * 6, + desc="(xx,yy,zz,xy,yz,xz)", + xor=["transform"], + argstr="-deformation %g %g %g %g %g %g", + ) class AffScalarVolOutputSpec(TraitedSpec): - out_file = File(desc='moved volume', exists=True) + out_file = File(desc="moved volume", exists=True) class AffScalarVol(CommandLineDtitk): @@ -329,43 +437,69 @@ class AffScalarVol(CommandLineDtitk): im_affine.aff' >>> node.run() # doctest: +SKIP """ + input_spec = AffScalarVolInputSpec output_spec = AffScalarVolOutputSpec - _cmd = 'affineScalarVolume' + _cmd = "affineScalarVolume" def _format_arg(self, name, spec, value): - if name == 'interpolation': - value = {'trilinear': 0, 'NN': 1}[value] + if name == "interpolation": + value = {"trilinear": 0, "NN": 1}[value] return super(AffScalarVol, self)._format_arg(name, spec, value) class DiffeoSymTensor3DVolInputSpec(CommandLineInputSpec): - in_file = File(desc='moving tensor volume', exists=True, - argstr="-in %s", mandatory=True) - out_file = File(desc='output filename', - argstr="-out %s", name_source="in_file", - name_template="%s_diffeoxfmd", keep_extension=True) - transform = File(exists=True, argstr="-trans %s", - mandatory=True, desc='transform to apply') - df = traits.Str('FD', argstr="-df %s", usedefault=True) - interpolation = traits.Enum('LEI', 'EI', usedefault=True, - argstr="-interp %s", - desc='Log Euclidean/Euclidean Interpolation') - reorient = traits.Enum('PPD', 'FS', argstr='-reorient %s', - usedefault=True, desc='Reorientation strategy: ' - 'preservation of principal direction or finite ' - 'strain') - target = File(exists=True, argstr="-target %s", xor=['voxel_size'], - desc='output volume specification read from the target ' - 'volume if specified') - voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), - desc='xyz voxel size (superseded by target)', - argstr="-vsize %g %g %g", xor=['target']) - flip = traits.Tuple((traits.Int(), traits.Int(), traits.Int()), - argstr="-flip %d %d %d") - resampling_type = traits.Enum('backward', 'forward', - desc='use backward or forward resampling', - argstr="-type %s") + in_file = File( + desc="moving tensor volume", exists=True, argstr="-in %s", mandatory=True + ) + out_file = File( + desc="output filename", + argstr="-out %s", + name_source="in_file", + name_template="%s_diffeoxfmd", + keep_extension=True, + ) + transform = File( + exists=True, argstr="-trans %s", mandatory=True, desc="transform to apply" + ) + df = traits.Str("FD", argstr="-df %s", usedefault=True) + interpolation = traits.Enum( + "LEI", + "EI", + usedefault=True, + argstr="-interp %s", + desc="Log Euclidean/Euclidean Interpolation", + ) + reorient = traits.Enum( + "PPD", + "FS", + argstr="-reorient %s", + usedefault=True, + desc="Reorientation strategy: " + "preservation of principal direction or finite " + "strain", + ) + target = File( + exists=True, + argstr="-target %s", + xor=["voxel_size"], + desc="output volume specification read from the target " "volume if specified", + ) + voxel_size = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc="xyz voxel size (superseded by target)", + argstr="-vsize %g %g %g", + xor=["target"], + ) + flip = traits.Tuple( + (traits.Int(), traits.Int(), traits.Int()), argstr="-flip %d %d %d" + ) + resampling_type = traits.Enum( + "backward", + "forward", + desc="use backward or forward resampling", + argstr="-type %s", + ) class DiffeoSymTensor3DVolOutputSpec(TraitedSpec): @@ -391,41 +525,60 @@ class DiffeoSymTensor3DVol(CommandLineDtitk): input_spec = DiffeoSymTensor3DVolInputSpec output_spec = DiffeoSymTensor3DVolOutputSpec - _cmd = 'deformationSymTensor3DVolume' + _cmd = "deformationSymTensor3DVolume" def _format_arg(self, name, spec, value): - if name == 'resampling_type': - value = {'forward': 0, 'backward': 1}[value] + if name == "resampling_type": + value = {"forward": 0, "backward": 1}[value] return super(DiffeoSymTensor3DVol, self)._format_arg(name, spec, value) class DiffeoScalarVolInputSpec(CommandLineInputSpec): - in_file = File(desc='moving scalar volume', exists=True, - argstr="-in %s", mandatory=True) - out_file = File(desc='output filename', - argstr="-out %s", name_source="in_file", - name_template="%s_diffeoxfmd", keep_extension=True) - transform = File(exists=True, argstr="-trans %s", - mandatory=True, desc='transform to apply') - target = File(exists=True, argstr="-target %s", xor=['voxel_size'], - desc='output volume specification read from the target ' - 'volume if specified') - voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), - desc='xyz voxel size (superseded by target)', - argstr="-vsize %g %g %g", xor=['target']) - flip = traits.Tuple((traits.Int(), traits.Int(), traits.Int()), - argstr="-flip %d %d %d") - resampling_type = traits.Enum('backward', 'forward', - desc='use backward or forward resampling', - argstr="-type %s") - interpolation = traits.Enum('trilinear', 'NN', - desc='trilinear, or nearest neighbor', - argstr="-interp %s", - usedefault=True) + in_file = File( + desc="moving scalar volume", exists=True, argstr="-in %s", mandatory=True + ) + out_file = File( + desc="output filename", + argstr="-out %s", + name_source="in_file", + name_template="%s_diffeoxfmd", + keep_extension=True, + ) + transform = File( + exists=True, argstr="-trans %s", mandatory=True, desc="transform to apply" + ) + target = File( + exists=True, + argstr="-target %s", + xor=["voxel_size"], + desc="output volume specification read from the target " "volume if specified", + ) + voxel_size = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc="xyz voxel size (superseded by target)", + argstr="-vsize %g %g %g", + xor=["target"], + ) + flip = traits.Tuple( + (traits.Int(), traits.Int(), traits.Int()), argstr="-flip %d %d %d" + ) + resampling_type = traits.Enum( + "backward", + "forward", + desc="use backward or forward resampling", + argstr="-type %s", + ) + interpolation = traits.Enum( + "trilinear", + "NN", + desc="trilinear, or nearest neighbor", + argstr="-interp %s", + usedefault=True, + ) class DiffeoScalarVolOutputSpec(TraitedSpec): - out_file = File(desc='moved volume', exists=True) + out_file = File(desc="moved volume", exists=True) class DiffeoScalarVol(CommandLineDtitk): @@ -447,13 +600,13 @@ class DiffeoScalarVol(CommandLineDtitk): input_spec = DiffeoScalarVolInputSpec output_spec = DiffeoScalarVolOutputSpec - _cmd = 'deformationScalarVolume' + _cmd = "deformationScalarVolume" def _format_arg(self, name, spec, value): - if name == 'resampling_type': - value = {'forward': 0, 'backward': 1}[value] - elif name == 'interpolation': - value = {'trilinear': 0, 'NN': 1}[value] + if name == "resampling_type": + value = {"forward": 0, "backward": 1}[value] + elif name == "interpolation": + value = {"trilinear": 0, "NN": 1}[value] return super(DiffeoScalarVol, self)._format_arg(name, spec, value) diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py index b6a6128a5f..e48312d3f2 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py @@ -4,57 +4,36 @@ def test_AffScalarVol_inputs(): input_map = dict( - args=dict(argstr='%s', ), - deformation=dict( - argstr='-deformation %g %g %g %g %g %g', - xor=['transform'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - euler=dict( - argstr='-euler %g %g %g', - xor=['transform'], - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr='-interp %s', - usedefault=True, - ), + args=dict(argstr="%s",), + deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"],), + environ=dict(nohash=True, usedefault=True,), + euler=dict(argstr="-euler %g %g %g", xor=["transform"],), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + interpolation=dict(argstr="-interp %s", usedefault=True,), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_affxfmd', - ), - target=dict( - argstr='-target %s', - extensions=None, - xor=['transform'], + name_source="in_file", + name_template="%s_affxfmd", ), + target=dict(argstr="-target %s", extensions=None, xor=["transform"],), transform=dict( - argstr='-trans %s', + argstr="-trans %s", extensions=None, - xor=['target', 'translation', 'euler', 'deformation'], - ), - translation=dict( - argstr='-translation %g %g %g', - xor=['transform'], + xor=["target", "translation", "euler", "deformation"], ), + translation=dict(argstr="-translation %g %g %g", xor=["transform"],), ) inputs = AffScalarVol.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AffScalarVol_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = AffScalarVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py index 658f8c7baa..54a562e8be 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py @@ -4,61 +4,37 @@ def test_AffSymTensor3DVol_inputs(): input_map = dict( - args=dict(argstr='%s', ), - deformation=dict( - argstr='-deformation %g %g %g %g %g %g', - xor=['transform'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - euler=dict( - argstr='-euler %g %g %g', - xor=['transform'], - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr='-interp %s', - usedefault=True, - ), + args=dict(argstr="%s",), + deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"],), + environ=dict(nohash=True, usedefault=True,), + euler=dict(argstr="-euler %g %g %g", xor=["transform"],), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + interpolation=dict(argstr="-interp %s", usedefault=True,), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_affxfmd', - ), - reorient=dict( - argstr='-reorient %s', - usedefault=True, - ), - target=dict( - argstr='-target %s', - extensions=None, - xor=['transform'], + name_source="in_file", + name_template="%s_affxfmd", ), + reorient=dict(argstr="-reorient %s", usedefault=True,), + target=dict(argstr="-target %s", extensions=None, xor=["transform"],), transform=dict( - argstr='-trans %s', + argstr="-trans %s", extensions=None, - xor=['target', 'translation', 'euler', 'deformation'], - ), - translation=dict( - argstr='-translation %g %g %g', - xor=['transform'], + xor=["target", "translation", "euler", "deformation"], ), + translation=dict(argstr="-translation %g %g %g", xor=["transform"],), ) inputs = AffSymTensor3DVol.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AffSymTensor3DVol_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = AffSymTensor3DVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_Affine.py b/nipype/interfaces/dtitk/tests/test_auto_Affine.py index 4469168d12..e095ce3922 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Affine.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Affine.py @@ -4,48 +4,21 @@ def test_Affine_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), fixed_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=0, - ), - ftol=dict( - argstr='%g', - mandatory=True, - position=4, - usedefault=True, - ), - initialize_xfm=dict( - argstr='%s', - copyfile=True, - extensions=None, - position=5, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0, ), + ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True,), + initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5,), moving_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1, ), sampling_xyz=dict( - argstr='%g %g %g', - mandatory=True, - position=3, - usedefault=True, + argstr="%g %g %g", mandatory=True, position=3, usedefault=True, ), similarity_metric=dict( - argstr='%s', - mandatory=True, - position=2, - usedefault=True, + argstr="%s", mandatory=True, position=2, usedefault=True, ), ) inputs = Affine.input_spec() @@ -53,10 +26,11 @@ def test_Affine_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Affine_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - out_file_xfm=dict(extensions=None, ), + out_file=dict(extensions=None,), out_file_xfm=dict(extensions=None,), ) outputs = Affine.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py index 873c92e7ca..31820fc56d 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py @@ -4,48 +4,21 @@ def test_AffineTask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), fixed_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=0, - ), - ftol=dict( - argstr='%g', - mandatory=True, - position=4, - usedefault=True, - ), - initialize_xfm=dict( - argstr='%s', - copyfile=True, - extensions=None, - position=5, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0, ), + ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True,), + initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5,), moving_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1, ), sampling_xyz=dict( - argstr='%g %g %g', - mandatory=True, - position=3, - usedefault=True, + argstr="%g %g %g", mandatory=True, position=3, usedefault=True, ), similarity_metric=dict( - argstr='%s', - mandatory=True, - position=2, - usedefault=True, + argstr="%s", mandatory=True, position=2, usedefault=True, ), ) inputs = AffineTask.input_spec() @@ -53,10 +26,11 @@ def test_AffineTask_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AffineTask_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - out_file_xfm=dict(extensions=None, ), + out_file=dict(extensions=None,), out_file_xfm=dict(extensions=None,), ) outputs = AffineTask.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py index cfd748dcb2..d1780ffe5c 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py +++ b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py @@ -4,57 +4,31 @@ def test_BinThresh_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, - ), - inside_value=dict( - argstr='%g', - mandatory=True, - position=4, - usedefault=True, - ), - lower_bound=dict( - argstr='%g', - mandatory=True, - position=2, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + inside_value=dict(argstr="%g", mandatory=True, position=4, usedefault=True,), + lower_bound=dict(argstr="%g", mandatory=True, position=2, usedefault=True,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_thrbin', + name_source="in_file", + name_template="%s_thrbin", position=1, ), - outside_value=dict( - argstr='%g', - mandatory=True, - position=5, - usedefault=True, - ), - upper_bound=dict( - argstr='%g', - mandatory=True, - position=3, - usedefault=True, - ), + outside_value=dict(argstr="%g", mandatory=True, position=5, usedefault=True,), + upper_bound=dict(argstr="%g", mandatory=True, position=3, usedefault=True,), ) inputs = BinThresh.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BinThresh_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = BinThresh.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py index 155c5a8406..b8e03f023b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py @@ -4,57 +4,31 @@ def test_BinThreshTask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, - ), - inside_value=dict( - argstr='%g', - mandatory=True, - position=4, - usedefault=True, - ), - lower_bound=dict( - argstr='%g', - mandatory=True, - position=2, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + inside_value=dict(argstr="%g", mandatory=True, position=4, usedefault=True,), + lower_bound=dict(argstr="%g", mandatory=True, position=2, usedefault=True,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_thrbin', + name_source="in_file", + name_template="%s_thrbin", position=1, ), - outside_value=dict( - argstr='%g', - mandatory=True, - position=5, - usedefault=True, - ), - upper_bound=dict( - argstr='%g', - mandatory=True, - position=3, - usedefault=True, - ), + outside_value=dict(argstr="%g", mandatory=True, position=5, usedefault=True,), + upper_bound=dict(argstr="%g", mandatory=True, position=3, usedefault=True,), ) inputs = BinThreshTask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BinThreshTask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = BinThreshTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py b/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py index 2e064a3150..ba76666b1f 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py +++ b/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py @@ -4,11 +4,7 @@ def test_CommandLineDtitk_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), ) inputs = CommandLineDtitk.input_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py index 17b31df738..88dac765f2 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py +++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py @@ -4,34 +4,21 @@ def test_ComposeXfm_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_aff=dict( - argstr='-aff %s', - extensions=None, - mandatory=True, - ), - in_df=dict( - argstr='-df %s', - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr='-out %s', - extensions=None, - genfile=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_aff=dict(argstr="-aff %s", extensions=None, mandatory=True,), + in_df=dict(argstr="-df %s", extensions=None, mandatory=True,), + out_file=dict(argstr="-out %s", extensions=None, genfile=True,), ) inputs = ComposeXfm.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ComposeXfm_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ComposeXfm.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py index 2ad8ce96ab..3f573a1815 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py @@ -4,34 +4,21 @@ def test_ComposeXfmTask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_aff=dict( - argstr='-aff %s', - extensions=None, - mandatory=True, - ), - in_df=dict( - argstr='-df %s', - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr='-out %s', - extensions=None, - genfile=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_aff=dict(argstr="-aff %s", extensions=None, mandatory=True,), + in_df=dict(argstr="-df %s", extensions=None, mandatory=True,), + out_file=dict(argstr="-out %s", extensions=None, genfile=True,), ) inputs = ComposeXfmTask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ComposeXfmTask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ComposeXfmTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py index cc960e36d5..cc354b0cde 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py @@ -4,55 +4,25 @@ def test_Diffeo_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_file=dict( - argstr='%s', - extensions=None, - position=0, - ), - ftol=dict( - argstr='%g', - mandatory=True, - position=5, - usedefault=True, - ), - legacy=dict( - argstr='%d', - mandatory=True, - position=3, - usedefault=True, - ), - mask_file=dict( - argstr='%s', - extensions=None, - position=2, - ), - moving_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - position=1, - ), - n_iters=dict( - argstr='%d', - mandatory=True, - position=4, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fixed_file=dict(argstr="%s", extensions=None, position=0,), + ftol=dict(argstr="%g", mandatory=True, position=5, usedefault=True,), + legacy=dict(argstr="%d", mandatory=True, position=3, usedefault=True,), + mask_file=dict(argstr="%s", extensions=None, position=2,), + moving_file=dict(argstr="%s", copyfile=False, extensions=None, position=1,), + n_iters=dict(argstr="%d", mandatory=True, position=4, usedefault=True,), ) inputs = Diffeo.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Diffeo_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - out_file_xfm=dict(extensions=None, ), + out_file=dict(extensions=None,), out_file_xfm=dict(extensions=None,), ) outputs = Diffeo.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py index 9924e1b8e3..cc85e03870 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py @@ -4,51 +4,32 @@ def test_DiffeoScalarVol_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - flip=dict(argstr='-flip %d %d %d', ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr='-interp %s', - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + flip=dict(argstr="-flip %d %d %d",), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + interpolation=dict(argstr="-interp %s", usedefault=True,), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_diffeoxfmd', - ), - resampling_type=dict(argstr='-type %s', ), - target=dict( - argstr='-target %s', - extensions=None, - xor=['voxel_size'], - ), - transform=dict( - argstr='-trans %s', - extensions=None, - mandatory=True, - ), - voxel_size=dict( - argstr='-vsize %g %g %g', - xor=['target'], + name_source="in_file", + name_template="%s_diffeoxfmd", ), + resampling_type=dict(argstr="-type %s",), + target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"],), + transform=dict(argstr="-trans %s", extensions=None, mandatory=True,), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"],), ) inputs = DiffeoScalarVol.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DiffeoScalarVol_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = DiffeoScalarVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py index a51cbd2314..67626f3ce6 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py @@ -4,59 +4,34 @@ def test_DiffeoSymTensor3DVol_inputs(): input_map = dict( - args=dict(argstr='%s', ), - df=dict( - argstr='-df %s', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - flip=dict(argstr='-flip %d %d %d', ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr='-interp %s', - usedefault=True, - ), + args=dict(argstr="%s",), + df=dict(argstr="-df %s", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + flip=dict(argstr="-flip %d %d %d",), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + interpolation=dict(argstr="-interp %s", usedefault=True,), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_diffeoxfmd', - ), - reorient=dict( - argstr='-reorient %s', - usedefault=True, - ), - resampling_type=dict(argstr='-type %s', ), - target=dict( - argstr='-target %s', - extensions=None, - xor=['voxel_size'], - ), - transform=dict( - argstr='-trans %s', - extensions=None, - mandatory=True, - ), - voxel_size=dict( - argstr='-vsize %g %g %g', - xor=['target'], - ), + name_source="in_file", + name_template="%s_diffeoxfmd", + ), + reorient=dict(argstr="-reorient %s", usedefault=True,), + resampling_type=dict(argstr="-type %s",), + target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"],), + transform=dict(argstr="-trans %s", extensions=None, mandatory=True,), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"],), ) inputs = DiffeoSymTensor3DVol.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DiffeoSymTensor3DVol_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = DiffeoSymTensor3DVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py index 5ba67405ea..0eb20d64d6 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py @@ -4,55 +4,25 @@ def test_DiffeoTask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_file=dict( - argstr='%s', - extensions=None, - position=0, - ), - ftol=dict( - argstr='%g', - mandatory=True, - position=5, - usedefault=True, - ), - legacy=dict( - argstr='%d', - mandatory=True, - position=3, - usedefault=True, - ), - mask_file=dict( - argstr='%s', - extensions=None, - position=2, - ), - moving_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - position=1, - ), - n_iters=dict( - argstr='%d', - mandatory=True, - position=4, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fixed_file=dict(argstr="%s", extensions=None, position=0,), + ftol=dict(argstr="%g", mandatory=True, position=5, usedefault=True,), + legacy=dict(argstr="%d", mandatory=True, position=3, usedefault=True,), + mask_file=dict(argstr="%s", extensions=None, position=2,), + moving_file=dict(argstr="%s", copyfile=False, extensions=None, position=1,), + n_iters=dict(argstr="%d", mandatory=True, position=4, usedefault=True,), ) inputs = DiffeoTask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DiffeoTask_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - out_file_xfm=dict(extensions=None, ), + out_file=dict(extensions=None,), out_file_xfm=dict(extensions=None,), ) outputs = DiffeoTask.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_Rigid.py b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py index 4d968093c4..629fd5b780 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Rigid.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py @@ -4,48 +4,21 @@ def test_Rigid_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), fixed_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=0, - ), - ftol=dict( - argstr='%g', - mandatory=True, - position=4, - usedefault=True, - ), - initialize_xfm=dict( - argstr='%s', - copyfile=True, - extensions=None, - position=5, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0, ), + ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True,), + initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5,), moving_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1, ), sampling_xyz=dict( - argstr='%g %g %g', - mandatory=True, - position=3, - usedefault=True, + argstr="%g %g %g", mandatory=True, position=3, usedefault=True, ), similarity_metric=dict( - argstr='%s', - mandatory=True, - position=2, - usedefault=True, + argstr="%s", mandatory=True, position=2, usedefault=True, ), ) inputs = Rigid.input_spec() @@ -53,10 +26,11 @@ def test_Rigid_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Rigid_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - out_file_xfm=dict(extensions=None, ), + out_file=dict(extensions=None,), out_file_xfm=dict(extensions=None,), ) outputs = Rigid.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py index 93658d5d40..ec280e06a9 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py @@ -4,48 +4,21 @@ def test_RigidTask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), fixed_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=0, - ), - ftol=dict( - argstr='%g', - mandatory=True, - position=4, - usedefault=True, - ), - initialize_xfm=dict( - argstr='%s', - copyfile=True, - extensions=None, - position=5, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0, ), + ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True,), + initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5,), moving_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1, ), sampling_xyz=dict( - argstr='%g %g %g', - mandatory=True, - position=3, - usedefault=True, + argstr="%g %g %g", mandatory=True, position=3, usedefault=True, ), similarity_metric=dict( - argstr='%s', - mandatory=True, - position=2, - usedefault=True, + argstr="%s", mandatory=True, position=2, usedefault=True, ), ) inputs = RigidTask.input_spec() @@ -53,10 +26,11 @@ def test_RigidTask_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RigidTask_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - out_file_xfm=dict(extensions=None, ), + out_file=dict(extensions=None,), out_file_xfm=dict(extensions=None,), ) outputs = RigidTask.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py index 9091868546..c01e08a66e 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py @@ -4,44 +4,31 @@ def test_SVAdjustVoxSp_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - origin=dict( - argstr='-origin %g %g %g', - xor=['target_file'], - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_avs', + name_source="in_file", + name_template="%s_avs", ), target_file=dict( - argstr='-target %s', - extensions=None, - xor=['voxel_size', 'origin'], - ), - voxel_size=dict( - argstr='-vsize %g %g %g', - xor=['target_file'], + argstr="-target %s", extensions=None, xor=["voxel_size", "origin"], ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), ) inputs = SVAdjustVoxSp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SVAdjustVoxSp_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = SVAdjustVoxSp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py index c1b4dc619a..0ca4e416ae 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py @@ -4,44 +4,31 @@ def test_SVAdjustVoxSpTask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - origin=dict( - argstr='-origin %g %g %g', - xor=['target_file'], - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_avs', + name_source="in_file", + name_template="%s_avs", ), target_file=dict( - argstr='-target %s', - extensions=None, - xor=['voxel_size', 'origin'], - ), - voxel_size=dict( - argstr='-vsize %g %g %g', - xor=['target_file'], + argstr="-target %s", extensions=None, xor=["voxel_size", "origin"], ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), ) inputs = SVAdjustVoxSpTask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SVAdjustVoxSpTask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = SVAdjustVoxSpTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResample.py b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py index 6aecbc29c9..c08df4bfbb 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVResample.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py @@ -4,49 +4,35 @@ def test_SVResample_inputs(): input_map = dict( - align=dict(argstr='-align %s', ), - args=dict(argstr='%s', ), - array_size=dict( - argstr='-size %d %d %d', - xor=['target_file'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - origin=dict( - argstr='-origin %g %g %g', - xor=['target_file'], - ), + align=dict(argstr="-align %s",), + args=dict(argstr="%s",), + array_size=dict(argstr="-size %d %d %d", xor=["target_file"],), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_resampled', + name_source="in_file", + name_template="%s_resampled", ), target_file=dict( - argstr='-target %s', + argstr="-target %s", extensions=None, - xor=['array_size', 'voxel_size', 'origin'], - ), - voxel_size=dict( - argstr='-vsize %g %g %g', - xor=['target_file'], + xor=["array_size", "voxel_size", "origin"], ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), ) inputs = SVResample.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SVResample_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = SVResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py index 92efe23e44..467163504b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py @@ -4,49 +4,35 @@ def test_SVResampleTask_inputs(): input_map = dict( - align=dict(argstr='-align %s', ), - args=dict(argstr='%s', ), - array_size=dict( - argstr='-size %d %d %d', - xor=['target_file'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - origin=dict( - argstr='-origin %g %g %g', - xor=['target_file'], - ), + align=dict(argstr="-align %s",), + args=dict(argstr="%s",), + array_size=dict(argstr="-size %d %d %d", xor=["target_file"],), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_resampled', + name_source="in_file", + name_template="%s_resampled", ), target_file=dict( - argstr='-target %s', + argstr="-target %s", extensions=None, - xor=['array_size', 'voxel_size', 'origin'], - ), - voxel_size=dict( - argstr='-vsize %g %g %g', - xor=['target_file'], + xor=["array_size", "voxel_size", "origin"], ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), ) inputs = SVResampleTask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SVResampleTask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = SVResampleTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py index 406ffcb418..d22aa78c9c 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py @@ -4,44 +4,31 @@ def test_TVAdjustOriginTask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - origin=dict( - argstr='-origin %g %g %g', - xor=['target_file'], - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_avs', + name_source="in_file", + name_template="%s_avs", ), target_file=dict( - argstr='-target %s', - extensions=None, - xor=['voxel_size', 'origin'], - ), - voxel_size=dict( - argstr='-vsize %g %g %g', - xor=['target_file'], + argstr="-target %s", extensions=None, xor=["voxel_size", "origin"], ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), ) inputs = TVAdjustOriginTask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TVAdjustOriginTask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TVAdjustOriginTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py index 1d2a9fa6b5..70dc59c5a3 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py @@ -4,44 +4,31 @@ def test_TVAdjustVoxSp_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - origin=dict( - argstr='-origin %g %g %g', - xor=['target_file'], - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_avs', + name_source="in_file", + name_template="%s_avs", ), target_file=dict( - argstr='-target %s', - extensions=None, - xor=['voxel_size', 'origin'], - ), - voxel_size=dict( - argstr='-vsize %g %g %g', - xor=['target_file'], + argstr="-target %s", extensions=None, xor=["voxel_size", "origin"], ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), ) inputs = TVAdjustVoxSp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TVAdjustVoxSp_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TVAdjustVoxSp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py index 6bd1f4601f..a23056e502 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py @@ -4,44 +4,31 @@ def test_TVAdjustVoxSpTask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - origin=dict( - argstr='-origin %g %g %g', - xor=['target_file'], - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_avs', + name_source="in_file", + name_template="%s_avs", ), target_file=dict( - argstr='-target %s', - extensions=None, - xor=['voxel_size', 'origin'], - ), - voxel_size=dict( - argstr='-vsize %g %g %g', - xor=['target_file'], + argstr="-target %s", extensions=None, xor=["voxel_size", "origin"], ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), ) inputs = TVAdjustVoxSpTask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TVAdjustVoxSpTask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TVAdjustVoxSpTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResample.py b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py index f8101d07ab..13142572f9 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVResample.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py @@ -4,50 +4,36 @@ def test_TVResample_inputs(): input_map = dict( - align=dict(argstr='-align %s', ), - args=dict(argstr='%s', ), - array_size=dict( - argstr='-size %d %d %d', - xor=['target_file'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - interpolation=dict(argstr='-interp %s', ), - origin=dict( - argstr='-origin %g %g %g', - xor=['target_file'], - ), + align=dict(argstr="-align %s",), + args=dict(argstr="%s",), + array_size=dict(argstr="-size %d %d %d", xor=["target_file"],), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + interpolation=dict(argstr="-interp %s",), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_resampled', + name_source="in_file", + name_template="%s_resampled", ), target_file=dict( - argstr='-target %s', + argstr="-target %s", extensions=None, - xor=['array_size', 'voxel_size', 'origin'], - ), - voxel_size=dict( - argstr='-vsize %g %g %g', - xor=['target_file'], + xor=["array_size", "voxel_size", "origin"], ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), ) inputs = TVResample.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TVResample_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TVResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py index d181ae6bc9..3bf6221d24 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py @@ -4,50 +4,36 @@ def test_TVResampleTask_inputs(): input_map = dict( - align=dict(argstr='-align %s', ), - args=dict(argstr='%s', ), - array_size=dict( - argstr='-size %d %d %d', - xor=['target_file'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - interpolation=dict(argstr='-interp %s', ), - origin=dict( - argstr='-origin %g %g %g', - xor=['target_file'], - ), + align=dict(argstr="-align %s",), + args=dict(argstr="%s",), + array_size=dict(argstr="-size %d %d %d", xor=["target_file"],), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + interpolation=dict(argstr="-interp %s",), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_resampled', + name_source="in_file", + name_template="%s_resampled", ), target_file=dict( - argstr='-target %s', + argstr="-target %s", extensions=None, - xor=['array_size', 'voxel_size', 'origin'], - ), - voxel_size=dict( - argstr='-vsize %g %g %g', - xor=['target_file'], + xor=["array_size", "voxel_size", "origin"], ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), ) inputs = TVResampleTask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TVResampleTask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TVResampleTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtool.py b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py index 322e226612..812049f83b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVtool.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py @@ -4,30 +4,21 @@ def test_TVtool_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - in_flag=dict(argstr='-%s', ), - out_file=dict( - argstr='-out %s', - extensions=None, - genfile=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + in_flag=dict(argstr="-%s",), + out_file=dict(argstr="-out %s", extensions=None, genfile=True,), ) inputs = TVtool.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TVtool_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TVtool.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py index 0699b69687..a26d2e76f2 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py @@ -4,30 +4,21 @@ def test_TVtoolTask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - in_flag=dict(argstr='-%s', ), - out_file=dict( - argstr='-out %s', - extensions=None, - genfile=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + in_flag=dict(argstr="-%s",), + out_file=dict(argstr="-out %s", extensions=None, genfile=True,), ) inputs = TVtoolTask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TVtoolTask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TVtoolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py index df6eeaed84..67b1d0efbb 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py @@ -4,57 +4,36 @@ def test_affScalarVolTask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - deformation=dict( - argstr='-deformation %g %g %g %g %g %g', - xor=['transform'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - euler=dict( - argstr='-euler %g %g %g', - xor=['transform'], - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr='-interp %s', - usedefault=True, - ), + args=dict(argstr="%s",), + deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"],), + environ=dict(nohash=True, usedefault=True,), + euler=dict(argstr="-euler %g %g %g", xor=["transform"],), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + interpolation=dict(argstr="-interp %s", usedefault=True,), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_affxfmd', - ), - target=dict( - argstr='-target %s', - extensions=None, - xor=['transform'], + name_source="in_file", + name_template="%s_affxfmd", ), + target=dict(argstr="-target %s", extensions=None, xor=["transform"],), transform=dict( - argstr='-trans %s', + argstr="-trans %s", extensions=None, - xor=['target', 'translation', 'euler', 'deformation'], - ), - translation=dict( - argstr='-translation %g %g %g', - xor=['transform'], + xor=["target", "translation", "euler", "deformation"], ), + translation=dict(argstr="-translation %g %g %g", xor=["transform"],), ) inputs = affScalarVolTask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_affScalarVolTask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = affScalarVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py index 9d1f491e8e..33778b661e 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py @@ -4,61 +4,37 @@ def test_affSymTensor3DVolTask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - deformation=dict( - argstr='-deformation %g %g %g %g %g %g', - xor=['transform'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - euler=dict( - argstr='-euler %g %g %g', - xor=['transform'], - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr='-interp %s', - usedefault=True, - ), + args=dict(argstr="%s",), + deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"],), + environ=dict(nohash=True, usedefault=True,), + euler=dict(argstr="-euler %g %g %g", xor=["transform"],), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + interpolation=dict(argstr="-interp %s", usedefault=True,), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_affxfmd', - ), - reorient=dict( - argstr='-reorient %s', - usedefault=True, - ), - target=dict( - argstr='-target %s', - extensions=None, - xor=['transform'], + name_source="in_file", + name_template="%s_affxfmd", ), + reorient=dict(argstr="-reorient %s", usedefault=True,), + target=dict(argstr="-target %s", extensions=None, xor=["transform"],), transform=dict( - argstr='-trans %s', + argstr="-trans %s", extensions=None, - xor=['target', 'translation', 'euler', 'deformation'], - ), - translation=dict( - argstr='-translation %g %g %g', - xor=['transform'], + xor=["target", "translation", "euler", "deformation"], ), + translation=dict(argstr="-translation %g %g %g", xor=["transform"],), ) inputs = affSymTensor3DVolTask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_affSymTensor3DVolTask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = affSymTensor3DVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py index 062af0c769..e1e2468b5c 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py @@ -4,51 +4,32 @@ def test_diffeoScalarVolTask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - flip=dict(argstr='-flip %d %d %d', ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr='-interp %s', - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + flip=dict(argstr="-flip %d %d %d",), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + interpolation=dict(argstr="-interp %s", usedefault=True,), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_diffeoxfmd', - ), - resampling_type=dict(argstr='-type %s', ), - target=dict( - argstr='-target %s', - extensions=None, - xor=['voxel_size'], - ), - transform=dict( - argstr='-trans %s', - extensions=None, - mandatory=True, - ), - voxel_size=dict( - argstr='-vsize %g %g %g', - xor=['target'], + name_source="in_file", + name_template="%s_diffeoxfmd", ), + resampling_type=dict(argstr="-type %s",), + target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"],), + transform=dict(argstr="-trans %s", extensions=None, mandatory=True,), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"],), ) inputs = diffeoScalarVolTask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_diffeoScalarVolTask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = diffeoScalarVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py index 1b7a8f03bb..0956ec615a 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py @@ -4,59 +4,34 @@ def test_diffeoSymTensor3DVolTask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - df=dict( - argstr='-df %s', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - flip=dict(argstr='-flip %d %d %d', ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr='-interp %s', - usedefault=True, - ), + args=dict(argstr="%s",), + df=dict(argstr="-df %s", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + flip=dict(argstr="-flip %d %d %d",), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + interpolation=dict(argstr="-interp %s", usedefault=True,), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_diffeoxfmd', - ), - reorient=dict( - argstr='-reorient %s', - usedefault=True, - ), - resampling_type=dict(argstr='-type %s', ), - target=dict( - argstr='-target %s', - extensions=None, - xor=['voxel_size'], - ), - transform=dict( - argstr='-trans %s', - extensions=None, - mandatory=True, - ), - voxel_size=dict( - argstr='-vsize %g %g %g', - xor=['target'], - ), + name_source="in_file", + name_template="%s_diffeoxfmd", + ), + reorient=dict(argstr="-reorient %s", usedefault=True,), + resampling_type=dict(argstr="-type %s",), + target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"],), + transform=dict(argstr="-trans %s", extensions=None, mandatory=True,), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"],), ) inputs = diffeoSymTensor3DVolTask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_diffeoSymTensor3DVolTask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = diffeoSymTensor3DVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/utils.py b/nipype/interfaces/dtitk/utils.py index 3ed6e61395..e959fd8f0c 100644 --- a/nipype/interfaces/dtitk/utils.py +++ b/nipype/interfaces/dtitk/utils.py @@ -29,25 +29,35 @@ from .base import CommandLineDtitk, DTITKRenameMixin import os -__docformat__ = 'restructuredtext' +__docformat__ = "restructuredtext" class TVAdjustVoxSpInputSpec(CommandLineInputSpec): - in_file = File(desc="tensor volume to modify", exists=True, - mandatory=True, argstr="-in %s") - out_file = File(desc='output path', - argstr="-out %s", name_source='in_file', - name_template='%s_avs', keep_extension=True) - target_file = File(desc='target volume to match', - argstr="-target %s", - xor=['voxel_size', 'origin']) - voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), - desc='xyz voxel size (superseded by target)', - argstr="-vsize %g %g %g", xor=['target_file']) - origin = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), - desc='xyz origin (superseded by target)', - argstr='-origin %g %g %g', - xor=['target_file']) + in_file = File( + desc="tensor volume to modify", exists=True, mandatory=True, argstr="-in %s" + ) + out_file = File( + desc="output path", + argstr="-out %s", + name_source="in_file", + name_template="%s_avs", + keep_extension=True, + ) + target_file = File( + desc="target volume to match", argstr="-target %s", xor=["voxel_size", "origin"] + ) + voxel_size = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc="xyz voxel size (superseded by target)", + argstr="-vsize %g %g %g", + xor=["target_file"], + ) + origin = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc="xyz origin (superseded by target)", + argstr="-origin %g %g %g", + xor=["target_file"], + ) class TVAdjustVoxSpOutputSpec(TraitedSpec): @@ -69,26 +79,38 @@ class TVAdjustVoxSp(CommandLineDtitk): 'TVAdjustVoxelspace -in im1.nii -out im1_avs.nii -target im2.nii' >>> node.run() # doctest: +SKIP """ + input_spec = TVAdjustVoxSpInputSpec output_spec = TVAdjustVoxSpOutputSpec - _cmd = 'TVAdjustVoxelspace' + _cmd = "TVAdjustVoxelspace" class SVAdjustVoxSpInputSpec(CommandLineInputSpec): - in_file = File(desc="scalar volume to modify", exists=True, - mandatory=True, argstr="-in %s") - out_file = File(desc='output path', argstr="-out %s", - name_source="in_file", name_template='%s_avs', - keep_extension=True) - target_file = File(desc='target volume to match', - argstr="-target %s", xor=['voxel_size', 'origin']) - voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), - desc='xyz voxel size (superseded by target)', - argstr="-vsize %g %g %g", xor=['target_file']) - origin = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), - desc='xyz origin (superseded by target)', - argstr='-origin %g %g %g', - xor=['target_file']) + in_file = File( + desc="scalar volume to modify", exists=True, mandatory=True, argstr="-in %s" + ) + out_file = File( + desc="output path", + argstr="-out %s", + name_source="in_file", + name_template="%s_avs", + keep_extension=True, + ) + target_file = File( + desc="target volume to match", argstr="-target %s", xor=["voxel_size", "origin"] + ) + voxel_size = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc="xyz voxel size (superseded by target)", + argstr="-vsize %g %g %g", + xor=["target_file"], + ) + origin = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc="xyz origin (superseded by target)", + argstr="-origin %g %g %g", + xor=["target_file"], + ) class SVAdjustVoxSpOutputSpec(TraitedSpec): @@ -110,33 +132,55 @@ class SVAdjustVoxSp(CommandLineDtitk): 'SVAdjustVoxelspace -in im1.nii -out im1_avs.nii -target im2.nii' >>> node.run() # doctest: +SKIP """ + input_spec = SVAdjustVoxSpInputSpec output_spec = SVAdjustVoxSpOutputSpec - _cmd = 'SVAdjustVoxelspace' + _cmd = "SVAdjustVoxelspace" class TVResampleInputSpec(CommandLineInputSpec): - in_file = File(desc="tensor volume to resample", exists=True, - mandatory=True, argstr="-in %s") - out_file = File(desc='output path', - name_source="in_file", name_template="%s_resampled", - keep_extension=True, argstr="-out %s") - target_file = File(desc='specs read from the target volume', - argstr="-target %s", - xor=['array_size', 'voxel_size', 'origin']) - align = traits.Enum('center', 'origin', argstr="-align %s", - desc='how to align output volume to input volume') - interpolation = traits.Enum('LEI', 'EI', argstr="-interp %s", - desc='Log Euclidean Euclidean Interpolation') - array_size = traits.Tuple((traits.Int(), traits.Int(), traits.Int()), - desc='resampled array size', xor=['target_file'], - argstr="-size %d %d %d") - voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), - desc='resampled voxel size', xor=['target_file'], - argstr="-vsize %g %g %g") - origin = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), - desc='xyz origin', xor=['target_file'], - argstr='-origin %g %g %g') + in_file = File( + desc="tensor volume to resample", exists=True, mandatory=True, argstr="-in %s" + ) + out_file = File( + desc="output path", + name_source="in_file", + name_template="%s_resampled", + keep_extension=True, + argstr="-out %s", + ) + target_file = File( + desc="specs read from the target volume", + argstr="-target %s", + xor=["array_size", "voxel_size", "origin"], + ) + align = traits.Enum( + "center", + "origin", + argstr="-align %s", + desc="how to align output volume to input volume", + ) + interpolation = traits.Enum( + "LEI", "EI", argstr="-interp %s", desc="Log Euclidean Euclidean Interpolation" + ) + array_size = traits.Tuple( + (traits.Int(), traits.Int(), traits.Int()), + desc="resampled array size", + xor=["target_file"], + argstr="-size %d %d %d", + ) + voxel_size = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc="resampled voxel size", + xor=["target_file"], + argstr="-vsize %g %g %g", + ) + origin = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc="xyz origin", + xor=["target_file"], + argstr="-origin %g %g %g", + ) class TVResampleOutputSpec(TraitedSpec): @@ -158,31 +202,52 @@ class TVResample(CommandLineDtitk): 'TVResample -in im1.nii -out im1_resampled.nii -target im2.nii' >>> node.run() # doctest: +SKIP """ + input_spec = TVResampleInputSpec output_spec = TVResampleOutputSpec - _cmd = 'TVResample' + _cmd = "TVResample" class SVResampleInputSpec(CommandLineInputSpec): - in_file = File(desc="image to resample", exists=True, - mandatory=True, argstr="-in %s") - out_file = File(desc='output path', - name_source="in_file", name_template="%s_resampled", - keep_extension=True, argstr="-out %s") - target_file = File(desc='specs read from the target volume', - argstr="-target %s", - xor=['array_size', 'voxel_size', 'origin']) - align = traits.Enum('center', 'origin', argstr="-align %s", - desc='how to align output volume to input volume') - array_size = traits.Tuple((traits.Int(), traits.Int(), traits.Int()), - desc='resampled array size', xor=['target_file'], - argstr="-size %d %d %d") - voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), - desc='resampled voxel size', xor=['target_file'], - argstr="-vsize %g %g %g") - origin = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), - desc='xyz origin', xor=['target_file'], - argstr='-origin %g %g %g') + in_file = File( + desc="image to resample", exists=True, mandatory=True, argstr="-in %s" + ) + out_file = File( + desc="output path", + name_source="in_file", + name_template="%s_resampled", + keep_extension=True, + argstr="-out %s", + ) + target_file = File( + desc="specs read from the target volume", + argstr="-target %s", + xor=["array_size", "voxel_size", "origin"], + ) + align = traits.Enum( + "center", + "origin", + argstr="-align %s", + desc="how to align output volume to input volume", + ) + array_size = traits.Tuple( + (traits.Int(), traits.Int(), traits.Int()), + desc="resampled array size", + xor=["target_file"], + argstr="-size %d %d %d", + ) + voxel_size = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc="resampled voxel size", + xor=["target_file"], + argstr="-vsize %g %g %g", + ) + origin = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc="xyz origin", + xor=["target_file"], + argstr="-origin %g %g %g", + ) class SVResampleOutputSpec(TraitedSpec): @@ -204,17 +269,18 @@ class SVResample(CommandLineDtitk): 'SVResample -in im1.nii -out im1_resampled.nii -target im2.nii' >>> node.run() # doctest: +SKIP """ + input_spec = SVResampleInputSpec output_spec = SVResampleOutputSpec - _cmd = 'SVResample' + _cmd = "SVResample" class TVtoolInputSpec(CommandLineInputSpec): - in_file = File(desc="scalar volume to resample", exists=True, - argstr="-in %s", mandatory=True) - '''NOTE: there are a lot more options here; not implementing all of them''' - in_flag = traits.Enum('fa', 'tr', 'ad', 'rd', 'pd', 'rgb', - argstr="-%s", desc='') + in_file = File( + desc="scalar volume to resample", exists=True, argstr="-in %s", mandatory=True + ) + """NOTE: there are a lot more options here; not implementing all of them""" + in_flag = traits.Enum("fa", "tr", "ad", "rd", "pd", "rgb", argstr="-%s", desc="") out_file = File(argstr="-out %s", genfile=True) @@ -237,46 +303,78 @@ class TVtool(CommandLineDtitk): 'TVtool -in im1.nii -fa -out im1_fa.nii' >>> node.run() # doctest: +SKIP """ + input_spec = TVtoolInputSpec output_spec = TVtoolOutputSpec - _cmd = 'TVtool' + _cmd = "TVtool" def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.out_file if not isdefined(out_file): - out_file = self._gen_filename('out_file') - outputs['out_file'] = os.path.abspath(out_file) + out_file = self._gen_filename("out_file") + outputs["out_file"] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): - if name != 'out_file': + if name != "out_file": return - return fname_presuffix(os.path.basename(self.inputs.in_file), - suffix='_' + self.inputs.in_flag) + return fname_presuffix( + os.path.basename(self.inputs.in_file), suffix="_" + self.inputs.in_flag + ) -'''Note: SVTool not implemented at this time''' +"""Note: SVTool not implemented at this time""" class BinThreshInputSpec(CommandLineInputSpec): - in_file = File(desc='Image to threshold/binarize', exists=True, - position=0, argstr="%s", mandatory=True) - out_file = File(desc='output path', position=1, argstr="%s", - keep_extension=True, name_source='in_file', - name_template='%s_thrbin') - lower_bound = traits.Float(0.01, usedefault=True, - position=2, argstr="%g", mandatory=True, - desc='lower bound of binarization range') - upper_bound = traits.Float(100, usedefault=True, - position=3, argstr="%g", mandatory=True, - desc='upper bound of binarization range') - inside_value = traits.Float(1, position=4, argstr="%g", usedefault=True, - mandatory=True, desc='value for voxels in ' - 'binarization range') - outside_value = traits.Float(0, position=5, argstr="%g", usedefault=True, - mandatory=True, desc='value for voxels' - 'outside of binarization range') + in_file = File( + desc="Image to threshold/binarize", + exists=True, + position=0, + argstr="%s", + mandatory=True, + ) + out_file = File( + desc="output path", + position=1, + argstr="%s", + keep_extension=True, + name_source="in_file", + name_template="%s_thrbin", + ) + lower_bound = traits.Float( + 0.01, + usedefault=True, + position=2, + argstr="%g", + mandatory=True, + desc="lower bound of binarization range", + ) + upper_bound = traits.Float( + 100, + usedefault=True, + position=3, + argstr="%g", + mandatory=True, + desc="upper bound of binarization range", + ) + inside_value = traits.Float( + 1, + position=4, + argstr="%g", + usedefault=True, + mandatory=True, + desc="value for voxels in " "binarization range", + ) + outside_value = traits.Float( + 0, + position=5, + argstr="%g", + usedefault=True, + mandatory=True, + desc="value for voxels" "outside of binarization range", + ) class BinThreshOutputSpec(TraitedSpec): @@ -304,7 +402,7 @@ class BinThresh(CommandLineDtitk): input_spec = BinThreshInputSpec output_spec = BinThreshOutputSpec - _cmd = 'BinaryThresholdImageFilter' + _cmd = "BinaryThresholdImageFilter" class BinThreshTask(DTITKRenameMixin, BinThresh): diff --git a/nipype/interfaces/dynamic_slicer.py b/nipype/interfaces/dynamic_slicer.py index db238127c9..8404aad802 100644 --- a/nipype/interfaces/dynamic_slicer.py +++ b/nipype/interfaces/dynamic_slicer.py @@ -6,28 +6,34 @@ import warnings import xml.dom.minidom -from .base import (CommandLine, CommandLineInputSpec, DynamicTraitedSpec, - traits, Undefined, File, isdefined) +from .base import ( + CommandLine, + CommandLineInputSpec, + DynamicTraitedSpec, + traits, + Undefined, + File, + isdefined, +) class SlicerCommandLineInputSpec(DynamicTraitedSpec, CommandLineInputSpec): - module = traits.Str( - desc="name of the Slicer command line module you want to use") + module = traits.Str(desc="name of the Slicer command line module you want to use") class SlicerCommandLine(CommandLine): """Experimental Slicer wrapper. Work in progress. """ + _cmd = "Slicer3" input_spec = SlicerCommandLineInputSpec output_spec = DynamicTraitedSpec def _grab_xml(self, module): cmd = CommandLine( - command="Slicer3", - resource_monitor=False, - args="--launch %s --xml" % module) + command="Slicer3", resource_monitor=False, args="--launch %s --xml" % module + ) ret = cmd.run() if ret.runtime.returncode == 0: return xml.dom.minidom.parseString(ret.runtime.stdout) @@ -38,8 +44,8 @@ def _outputs(self): base = super(SlicerCommandLine, self)._outputs() undefined_output_traits = {} for key in [ - node.getElementsByTagName('name')[0].firstChild.nodeValue - for node in self._outputs_nodes + node.getElementsByTagName("name")[0].firstChild.nodeValue + for node in self._outputs_nodes ]: base.add_trait(key, File(exists=True)) undefined_output_traits[key] = Undefined @@ -48,9 +54,10 @@ def _outputs(self): return base def __init__(self, module, **inputs): - warnings.warn('slicer is Not fully implemented', RuntimeWarning) + warnings.warn("slicer is Not fully implemented", RuntimeWarning) super(SlicerCommandLine, self).__init__( - command="Slicer3 --launch %s " % module, name=module, **inputs) + command="Slicer3 --launch %s " % module, name=module, **inputs + ) dom = self._grab_xml(module) self._outputs_filenames = {} @@ -60,95 +67,89 @@ def __init__(self, module, **inputs): for paramGroup in dom.getElementsByTagName("parameters"): for param in paramGroup.childNodes: - if param.nodeName in [ - 'label', 'description', '#text', '#comment' - ]: + if param.nodeName in ["label", "description", "#text", "#comment"]: continue traitsParams = {} - name = param.getElementsByTagName('name')[ - 0].firstChild.nodeValue + name = param.getElementsByTagName("name")[0].firstChild.nodeValue - longFlagNode = param.getElementsByTagName('longflag') + longFlagNode = param.getElementsByTagName("longflag") if longFlagNode: - traitsParams[ - "argstr"] = "--" + longFlagNode[0].firstChild.nodeValue + " " + traitsParams["argstr"] = ( + "--" + longFlagNode[0].firstChild.nodeValue + " " + ) else: traitsParams["argstr"] = "--" + name + " " argsDict = { - 'file': '%s', - 'integer': "%d", - 'double': "%f", - 'float': "%f", - 'image': "%s", - 'transform': "%s", - 'boolean': '', - 'string-enumeration': '%s', - 'string': "%s" + "file": "%s", + "integer": "%d", + "double": "%f", + "float": "%f", + "image": "%s", + "transform": "%s", + "boolean": "", + "string-enumeration": "%s", + "string": "%s", } - if param.nodeName.endswith('-vector'): + if param.nodeName.endswith("-vector"): traitsParams["argstr"] += argsDict[param.nodeName[:-7]] else: traitsParams["argstr"] += argsDict[param.nodeName] - index = param.getElementsByTagName('index') + index = param.getElementsByTagName("index") if index: traitsParams["position"] = index[0].firstChild.nodeValue - desc = param.getElementsByTagName('description') + desc = param.getElementsByTagName("description") if index: traitsParams["desc"] = desc[0].firstChild.nodeValue - name = param.getElementsByTagName('name')[ - 0].firstChild.nodeValue + name = param.getElementsByTagName("name")[0].firstChild.nodeValue typesDict = { - 'integer': traits.Int, - 'double': traits.Float, - 'float': traits.Float, - 'image': File, - 'transform': File, - 'boolean': traits.Bool, - 'string': traits.Str, - 'file': File + "integer": traits.Int, + "double": traits.Float, + "float": traits.Float, + "image": File, + "transform": File, + "boolean": traits.Bool, + "string": traits.Str, + "file": File, } - if param.nodeName == 'string-enumeration': + if param.nodeName == "string-enumeration": type = traits.Enum values = [ el.firstChild.nodeValue - for el in param.getElementsByTagName('element') + for el in param.getElementsByTagName("element") ] - elif param.nodeName.endswith('-vector'): + elif param.nodeName.endswith("-vector"): type = traits.List values = [typesDict[param.nodeName[:-7]]] - traitsParams["sep"] = ',' + traitsParams["sep"] = "," else: values = [] type = typesDict[param.nodeName] - if param.nodeName in [ - 'file', 'directory', 'image', 'transform' - ] and param.getElementsByTagName( - 'channel')[0].firstChild.nodeValue == 'output': - self.inputs.add_trait(name, - traits.Either( - traits.Bool, File, - **traitsParams)) + if ( + param.nodeName in ["file", "directory", "image", "transform"] + and param.getElementsByTagName("channel")[0].firstChild.nodeValue + == "output" + ): + self.inputs.add_trait( + name, traits.Either(traits.Bool, File, **traitsParams) + ) undefined_traits[name] = Undefined # traitsParams["exists"] = True - self._outputs_filenames[ - name] = self._gen_filename_from_param(param) + self._outputs_filenames[name] = self._gen_filename_from_param(param) # undefined_output_traits[name] = Undefined # self._outputs().add_trait(name, File(*values, **traitsParams)) self._outputs_nodes.append(param) else: - if param.nodeName in [ - 'file', 'directory', 'image', 'transform' - ]: + if param.nodeName in ["file", "directory", "image", "transform"]: traitsParams["exists"] = True self.inputs.add_trait(name, type(*values, **traitsParams)) undefined_traits[name] = Undefined @@ -164,23 +165,18 @@ def _gen_filename(self, name): return None def _gen_filename_from_param(self, param): - base = param.getElementsByTagName('name')[0].firstChild.nodeValue + base = param.getElementsByTagName("name")[0].firstChild.nodeValue fileExtensions = param.getAttribute("fileExtensions") if fileExtensions: ext = fileExtensions else: - ext = { - 'image': '.nii', - 'transform': '.txt', - 'file': '' - }[param.nodeName] + ext = {"image": ".nii", "transform": ".txt", "file": ""}[param.nodeName] return base + ext def _list_outputs(self): outputs = self.output_spec().get() for output_node in self._outputs_nodes: - name = output_node.getElementsByTagName('name')[ - 0].firstChild.nodeValue + name = output_node.getElementsByTagName("name")[0].firstChild.nodeValue outputs[name] = getattr(self.inputs, name) if isdefined(outputs[name]) and isinstance(outputs[name], bool): if outputs[name]: @@ -191,8 +187,8 @@ def _list_outputs(self): def _format_arg(self, name, spec, value): if name in [ - output_node.getElementsByTagName('name')[0] - .firstChild.nodeValue for output_node in self._outputs_nodes + output_node.getElementsByTagName("name")[0].firstChild.nodeValue + for output_node in self._outputs_nodes ]: if isinstance(value, bool): fname = self._gen_filename(name) diff --git a/nipype/interfaces/elastix/base.py b/nipype/interfaces/elastix/base.py index 748f69f44d..6e26937793 100644 --- a/nipype/interfaces/elastix/base.py +++ b/nipype/interfaces/elastix/base.py @@ -12,19 +12,23 @@ from ... import logging from ..base import CommandLineInputSpec, Directory, traits -iflogger = logging.getLogger('nipype.interface') + +iflogger = logging.getLogger("nipype.interface") class ElastixBaseInputSpec(CommandLineInputSpec): output_path = Directory( - './', + "./", exists=True, mandatory=True, usedefault=True, - argstr='-out %s', - desc='output directory') + argstr="-out %s", + desc="output directory", + ) num_threads = traits.Int( - 1, usedefault=True, - argstr='-threads %01d', + 1, + usedefault=True, + argstr="-threads %01d", nohash=True, - desc='set the maximum number of threads of elastix') + desc="set the maximum number of threads of elastix", + ) diff --git a/nipype/interfaces/elastix/registration.py b/nipype/interfaces/elastix/registration.py index 539534aada..9c6074014b 100644 --- a/nipype/interfaces/elastix/registration.py +++ b/nipype/interfaces/elastix/registration.py @@ -14,38 +14,37 @@ from .base import ElastixBaseInputSpec from ..base import CommandLine, TraitedSpec, File, traits, InputMultiPath -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") class RegistrationInputSpec(ElastixBaseInputSpec): - fixed_image = File( - exists=True, mandatory=True, argstr='-f %s', desc='fixed image') + fixed_image = File(exists=True, mandatory=True, argstr="-f %s", desc="fixed image") moving_image = File( - exists=True, mandatory=True, argstr='-m %s', desc='moving image') + exists=True, mandatory=True, argstr="-m %s", desc="moving image" + ) parameters = InputMultiPath( File(exists=True), mandatory=True, - argstr='-p %s...', - desc='parameter file, elastix handles 1 or more -p') - fixed_mask = File( - exists=True, argstr='-fMask %s', desc='mask for fixed image') - moving_mask = File( - exists=True, argstr='-mMask %s', desc='mask for moving image') + argstr="-p %s...", + desc="parameter file, elastix handles 1 or more -p", + ) + fixed_mask = File(exists=True, argstr="-fMask %s", desc="mask for fixed image") + moving_mask = File(exists=True, argstr="-mMask %s", desc="mask for moving image") initial_transform = File( - exists=True, - argstr='-t0 %s', - desc='parameter file for initial transform') + exists=True, argstr="-t0 %s", desc="parameter file for initial transform" + ) class RegistrationOutputSpec(TraitedSpec): - transform = InputMultiPath(File(exists=True), desc='output transform') - warped_file = File(desc='input moving image warped to fixed image') + transform = InputMultiPath(File(exists=True), desc="output transform") + warped_file = File(desc="input moving image warped to fixed image") warped_files = InputMultiPath( File(exists=False), - desc=('input moving image warped to fixed image at each level')) + desc=("input moving image warped to fixed image at each level"), + ) warped_files_flags = traits.List( - traits.Bool(False), - desc='flag indicating if warped image was generated') + traits.Bool(False), desc="flag indicating if warped image was generated" + ) class Registration(CommandLine): @@ -66,7 +65,7 @@ class Registration(CommandLine): """ - _cmd = 'elastix' + _cmd = "elastix" input_spec = RegistrationInputSpec output_spec = RegistrationOutputSpec @@ -75,37 +74,39 @@ def _list_outputs(self): out_dir = op.abspath(self.inputs.output_path) - regex = re.compile(r'^\((\w+)\s(.+)\)$') + regex = re.compile(r"^\((\w+)\s(.+)\)$") - outputs['transform'] = [] - outputs['warped_files'] = [] - outputs['warped_files_flags'] = [] + outputs["transform"] = [] + outputs["warped_files"] = [] + outputs["warped_files_flags"] = [] for i, params in enumerate(self.inputs.parameters): config = {} - with open(params, 'r') as f: + with open(params, "r") as f: for line in f.readlines(): line = line.strip() - if not line.startswith('//') and line: + if not line.startswith("//") and line: m = regex.search(line) if m: value = self._cast(m.group(2).strip()) config[m.group(1).strip()] = value - outputs['transform'].append( - op.join(out_dir, 'TransformParameters.%01d.txt' % i)) + outputs["transform"].append( + op.join(out_dir, "TransformParameters.%01d.txt" % i) + ) warped_file = None - if config['WriteResultImage']: - warped_file = op.join(out_dir, 'result.%01d.%s' % - (i, config['ResultImageFormat'])) + if config["WriteResultImage"]: + warped_file = op.join( + out_dir, "result.%01d.%s" % (i, config["ResultImageFormat"]) + ) - outputs['warped_files'].append(warped_file) - outputs['warped_files_flags'].append(config['WriteResultImage']) + outputs["warped_files"].append(warped_file) + outputs["warped_files_flags"].append(config["WriteResultImage"]) - if outputs['warped_files_flags'][-1]: - outputs['warped_file'] = outputs['warped_files'][-1] + if outputs["warped_files_flags"][-1]: + outputs["warped_file"] = outputs["warped_files"][-1] return outputs @@ -131,18 +132,17 @@ class ApplyWarpInputSpec(ElastixBaseInputSpec): transform_file = File( exists=True, mandatory=True, - argstr='-tp %s', - desc='transform-parameter file, only 1') + argstr="-tp %s", + desc="transform-parameter file, only 1", + ) moving_image = File( - exists=True, - argstr='-in %s', - mandatory=True, - desc='input image to deform') + exists=True, argstr="-in %s", mandatory=True, desc="input image to deform" + ) class ApplyWarpOutputSpec(TraitedSpec): - warped_file = File(desc='input moving image warped to fixed image') + warped_file = File(desc="input moving image warped to fixed image") class ApplyWarp(CommandLine): @@ -163,44 +163,47 @@ class ApplyWarp(CommandLine): """ - _cmd = 'transformix' + _cmd = "transformix" input_spec = ApplyWarpInputSpec output_spec = ApplyWarpOutputSpec def _list_outputs(self): outputs = self._outputs().get() out_dir = op.abspath(self.inputs.output_path) - outputs['warped_file'] = op.join(out_dir, 'result.nii.gz') + outputs["warped_file"] = op.join(out_dir, "result.nii.gz") return outputs class AnalyzeWarpInputSpec(ApplyWarpInputSpec): points = traits.Enum( - 'all', + "all", usedefault=True, position=0, - argstr='-def %s', - desc='transform all points from the input-image, which effectively' - ' generates a deformation field.') + argstr="-def %s", + desc="transform all points from the input-image, which effectively" + " generates a deformation field.", + ) jac = traits.Enum( - 'all', + "all", usedefault=True, - argstr='-jac %s', - desc='generate an image with the determinant of the spatial Jacobian') + argstr="-jac %s", + desc="generate an image with the determinant of the spatial Jacobian", + ) jacmat = traits.Enum( - 'all', + "all", usedefault=True, - argstr='-jacmat %s', - desc='generate an image with the spatial Jacobian matrix at each voxel') + argstr="-jacmat %s", + desc="generate an image with the spatial Jacobian matrix at each voxel", + ) moving_image = File( - exists=True, - argstr='-in %s', - desc='input image to deform (not used)') + exists=True, argstr="-in %s", desc="input image to deform (not used)" + ) + class AnalyzeWarpOutputSpec(TraitedSpec): - disp_field = File(desc='displacements field') - jacdet_map = File(desc='det(Jacobian) map') - jacmat_map = File(desc='Jacobian matrix map') + disp_field = File(desc="displacements field") + jacdet_map = File(desc="det(Jacobian) map") + jacmat_map = File(desc="Jacobian matrix map") class AnalyzeWarp(ApplyWarp): @@ -227,27 +230,29 @@ class AnalyzeWarp(ApplyWarp): def _list_outputs(self): outputs = self._outputs().get() out_dir = op.abspath(self.inputs.output_path) - outputs['disp_field'] = op.join(out_dir, 'deformationField.nii.gz') - outputs['jacdet_map'] = op.join(out_dir, 'spatialJacobian.nii.gz') - outputs['jacmat_map'] = op.join(out_dir, 'fullSpatialJacobian.nii.gz') + outputs["disp_field"] = op.join(out_dir, "deformationField.nii.gz") + outputs["jacdet_map"] = op.join(out_dir, "spatialJacobian.nii.gz") + outputs["jacmat_map"] = op.join(out_dir, "fullSpatialJacobian.nii.gz") return outputs class PointsWarpInputSpec(ElastixBaseInputSpec): points_file = File( exists=True, - argstr='-def %s', + argstr="-def %s", mandatory=True, - desc='input points (accepts .vtk triangular meshes).') + desc="input points (accepts .vtk triangular meshes).", + ) transform_file = File( exists=True, mandatory=True, - argstr='-tp %s', - desc='transform-parameter file, only 1') + argstr="-tp %s", + desc="transform-parameter file, only 1", + ) class PointsWarpOutputSpec(TraitedSpec): - warped_file = File(desc='input points displaced in fixed image domain') + warped_file = File(desc="input points displaced in fixed image domain") class PointsWarp(CommandLine): @@ -267,7 +272,7 @@ class PointsWarp(CommandLine): """ - _cmd = 'transformix' + _cmd = "transformix" input_spec = PointsWarpInputSpec output_spec = PointsWarpOutputSpec @@ -277,5 +282,5 @@ def _list_outputs(self): fname, ext = op.splitext(op.basename(self.inputs.points_file)) - outputs['warped_file'] = op.join(out_dir, 'outputpoints%s' % ext) + outputs["warped_file"] = op.join(out_dir, "outputpoints%s" % ext) return outputs diff --git a/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py index b8eb8686d0..509c0a5a77 100644 --- a/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py @@ -4,54 +4,28 @@ def test_AnalyzeWarp_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - jac=dict( - argstr='-jac %s', - usedefault=True, - ), - jacmat=dict( - argstr='-jacmat %s', - usedefault=True, - ), - moving_image=dict( - argstr='-in %s', - extensions=None, - ), - num_threads=dict( - argstr='-threads %01d', - nohash=True, - usedefault=True, - ), - output_path=dict( - argstr='-out %s', - mandatory=True, - usedefault=True, - ), - points=dict( - argstr='-def %s', - position=0, - usedefault=True, - ), - transform_file=dict( - argstr='-tp %s', - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + jac=dict(argstr="-jac %s", usedefault=True,), + jacmat=dict(argstr="-jacmat %s", usedefault=True,), + moving_image=dict(argstr="-in %s", extensions=None,), + num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True,), + output_path=dict(argstr="-out %s", mandatory=True, usedefault=True,), + points=dict(argstr="-def %s", position=0, usedefault=True,), + transform_file=dict(argstr="-tp %s", extensions=None, mandatory=True,), ) inputs = AnalyzeWarp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AnalyzeWarp_outputs(): output_map = dict( - disp_field=dict(extensions=None, ), - jacdet_map=dict(extensions=None, ), - jacmat_map=dict(extensions=None, ), + disp_field=dict(extensions=None,), + jacdet_map=dict(extensions=None,), + jacmat_map=dict(extensions=None,), ) outputs = AnalyzeWarp.output_spec() diff --git a/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py index 902928ae02..fd77478270 100644 --- a/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py @@ -4,39 +4,22 @@ def test_ApplyWarp_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - moving_image=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - num_threads=dict( - argstr='-threads %01d', - nohash=True, - usedefault=True, - ), - output_path=dict( - argstr='-out %s', - mandatory=True, - usedefault=True, - ), - transform_file=dict( - argstr='-tp %s', - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + moving_image=dict(argstr="-in %s", extensions=None, mandatory=True,), + num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True,), + output_path=dict(argstr="-out %s", mandatory=True, usedefault=True,), + transform_file=dict(argstr="-tp %s", extensions=None, mandatory=True,), ) inputs = ApplyWarp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ApplyWarp_outputs(): - output_map = dict(warped_file=dict(extensions=None, ), ) + output_map = dict(warped_file=dict(extensions=None,),) outputs = ApplyWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_EditTransform.py b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py index 2720e14f44..687906f8b0 100644 --- a/nipype/interfaces/elastix/tests/test_auto_EditTransform.py +++ b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py @@ -4,26 +4,22 @@ def test_EditTransform_inputs(): input_map = dict( - interpolation=dict( - argstr='FinalBSplineInterpolationOrder', - usedefault=True, - ), - output_file=dict(extensions=None, ), - output_format=dict(argstr='ResultImageFormat', ), - output_type=dict(argstr='ResultImagePixelType', ), - reference_image=dict(extensions=None, ), - transform_file=dict( - extensions=None, - mandatory=True, - ), + interpolation=dict(argstr="FinalBSplineInterpolationOrder", usedefault=True,), + output_file=dict(extensions=None,), + output_format=dict(argstr="ResultImageFormat",), + output_type=dict(argstr="ResultImagePixelType",), + reference_image=dict(extensions=None,), + transform_file=dict(extensions=None, mandatory=True,), ) inputs = EditTransform.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EditTransform_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = EditTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py index 8047d31ecb..83e3092f28 100644 --- a/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py @@ -4,39 +4,22 @@ def test_PointsWarp_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - num_threads=dict( - argstr='-threads %01d', - nohash=True, - usedefault=True, - ), - output_path=dict( - argstr='-out %s', - mandatory=True, - usedefault=True, - ), - points_file=dict( - argstr='-def %s', - extensions=None, - mandatory=True, - ), - transform_file=dict( - argstr='-tp %s', - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True,), + output_path=dict(argstr="-out %s", mandatory=True, usedefault=True,), + points_file=dict(argstr="-def %s", extensions=None, mandatory=True,), + transform_file=dict(argstr="-tp %s", extensions=None, mandatory=True,), ) inputs = PointsWarp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_PointsWarp_outputs(): - output_map = dict(warped_file=dict(extensions=None, ), ) + output_map = dict(warped_file=dict(extensions=None,),) outputs = PointsWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_Registration.py b/nipype/interfaces/elastix/tests/test_auto_Registration.py index 095d1cd521..18ad8f93a2 100644 --- a/nipype/interfaces/elastix/tests/test_auto_Registration.py +++ b/nipype/interfaces/elastix/tests/test_auto_Registration.py @@ -4,57 +4,28 @@ def test_Registration_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_image=dict( - argstr='-f %s', - extensions=None, - mandatory=True, - ), - fixed_mask=dict( - argstr='-fMask %s', - extensions=None, - ), - initial_transform=dict( - argstr='-t0 %s', - extensions=None, - ), - moving_image=dict( - argstr='-m %s', - extensions=None, - mandatory=True, - ), - moving_mask=dict( - argstr='-mMask %s', - extensions=None, - ), - num_threads=dict( - argstr='-threads %01d', - nohash=True, - usedefault=True, - ), - output_path=dict( - argstr='-out %s', - mandatory=True, - usedefault=True, - ), - parameters=dict( - argstr='-p %s...', - mandatory=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fixed_image=dict(argstr="-f %s", extensions=None, mandatory=True,), + fixed_mask=dict(argstr="-fMask %s", extensions=None,), + initial_transform=dict(argstr="-t0 %s", extensions=None,), + moving_image=dict(argstr="-m %s", extensions=None, mandatory=True,), + moving_mask=dict(argstr="-mMask %s", extensions=None,), + num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True,), + output_path=dict(argstr="-out %s", mandatory=True, usedefault=True,), + parameters=dict(argstr="-p %s...", mandatory=True,), ) inputs = Registration.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Registration_outputs(): output_map = dict( transform=dict(), - warped_file=dict(extensions=None, ), + warped_file=dict(extensions=None,), warped_files=dict(), warped_files_flags=dict(), ) diff --git a/nipype/interfaces/elastix/utils.py b/nipype/interfaces/elastix/utils.py index 52fd7023a2..3628416b1e 100644 --- a/nipype/interfaces/elastix/utils.py +++ b/nipype/interfaces/elastix/utils.py @@ -10,49 +10,60 @@ import os.path as op from ... import logging -from ..base import (BaseInterface, BaseInterfaceInputSpec, isdefined, - TraitedSpec, File, traits) -iflogger = logging.getLogger('nipype.interface') +from ..base import ( + BaseInterface, + BaseInterfaceInputSpec, + isdefined, + TraitedSpec, + File, + traits, +) + +iflogger = logging.getLogger("nipype.interface") class EditTransformInputSpec(BaseInterfaceInputSpec): transform_file = File( - exists=True, mandatory=True, desc='transform-parameter file, only 1') + exists=True, mandatory=True, desc="transform-parameter file, only 1" + ) reference_image = File( exists=True, - desc=('set a new reference image to change the ' - 'target coordinate system.')) + desc=("set a new reference image to change the " "target coordinate system."), + ) interpolation = traits.Enum( - 'cubic', - 'linear', - 'nearest', + "cubic", + "linear", + "nearest", usedefault=True, - argstr='FinalBSplineInterpolationOrder', - desc='set a new interpolator for transformation') + argstr="FinalBSplineInterpolationOrder", + desc="set a new interpolator for transformation", + ) output_type = traits.Enum( - 'float', - 'unsigned char', - 'unsigned short', - 'short', - 'unsigned long', - 'long', - 'double', - argstr='ResultImagePixelType', - desc='set a new output pixel type for resampled images') + "float", + "unsigned char", + "unsigned short", + "short", + "unsigned long", + "long", + "double", + argstr="ResultImagePixelType", + desc="set a new output pixel type for resampled images", + ) output_format = traits.Enum( - 'nii.gz', - 'nii', - 'mhd', - 'hdr', - 'vtk', - argstr='ResultImageFormat', - desc='set a new image format for resampled images') - output_file = File(desc='the filename for the resulting transform file') + "nii.gz", + "nii", + "mhd", + "hdr", + "vtk", + argstr="ResultImageFormat", + desc="set a new image format for resampled images", + ) + output_file = File(desc="the filename for the resulting transform file") class EditTransformOutputSpec(TraitedSpec): - output_file = File(exists=True, desc='output transform file') + output_file = File(exists=True, desc="output transform file") class EditTransform(BaseInterface): @@ -73,38 +84,42 @@ class EditTransform(BaseInterface): input_spec = EditTransformInputSpec output_spec = EditTransformOutputSpec - _out_file = '' - _pattern = '\((?P%s\s\"?)([-\.\s\w]+)(\"?\))' + _out_file = "" + _pattern = '\((?P%s\s"?)([-\.\s\w]+)("?\))' - _interp = {'nearest': 0, 'linear': 1, 'cubic': 3} + _interp = {"nearest": 0, "linear": 1, "cubic": 3} def _run_interface(self, runtime): import re import nibabel as nb import numpy as np - contents = '' + contents = "" - with open(self.inputs.transform_file, 'r') as f: + with open(self.inputs.transform_file, "r") as f: contents = f.read() if isdefined(self.inputs.output_type): - p = re.compile((self._pattern % - 'ResultImagePixelType').decode('string-escape')) - rep = '(\g%s\g<3>' % self.inputs.output_type + p = re.compile( + (self._pattern % "ResultImagePixelType").decode("string-escape") + ) + rep = "(\g%s\g<3>" % self.inputs.output_type contents = p.sub(rep, contents) if isdefined(self.inputs.output_format): p = re.compile( - (self._pattern % 'ResultImageFormat').decode('string-escape')) - rep = '(\g%s\g<3>' % self.inputs.output_format + (self._pattern % "ResultImageFormat").decode("string-escape") + ) + rep = "(\g%s\g<3>" % self.inputs.output_format contents = p.sub(rep, contents) if isdefined(self.inputs.interpolation): p = re.compile( - (self._pattern % - 'FinalBSplineInterpolationOrder').decode('string-escape')) - rep = '(\g%s\g<3>' % self._interp[self.inputs.interpolation] + (self._pattern % "FinalBSplineInterpolationOrder").decode( + "string-escape" + ) + ) + rep = "(\g%s\g<3>" % self._interp[self.inputs.interpolation] contents = p.sub(rep, contents) if isdefined(self.inputs.reference_image): @@ -113,19 +128,19 @@ def _run_interface(self, runtime): if len(im.header.get_zooms()) == 4: im = nb.func.four_to_three(im)[0] - size = ' '.join(["%01d" % s for s in im.shape]) - p = re.compile((self._pattern % 'Size').decode('string-escape')) - rep = '(\g%s\g<3>' % size + size = " ".join(["%01d" % s for s in im.shape]) + p = re.compile((self._pattern % "Size").decode("string-escape")) + rep = "(\g%s\g<3>" % size contents = p.sub(rep, contents) - index = ' '.join(["0" for s in im.shape]) - p = re.compile((self._pattern % 'Index').decode('string-escape')) - rep = '(\g%s\g<3>' % index + index = " ".join(["0" for s in im.shape]) + p = re.compile((self._pattern % "Index").decode("string-escape")) + rep = "(\g%s\g<3>" % index contents = p.sub(rep, contents) - spacing = ' '.join(["%0.4f" % f for f in im.header.get_zooms()]) - p = re.compile((self._pattern % 'Spacing').decode('string-escape')) - rep = '(\g%s\g<3>' % spacing + spacing = " ".join(["%0.4f" % f for f in im.header.get_zooms()]) + p = re.compile((self._pattern % "Spacing").decode("string-escape")) + rep = "(\g%s\g<3>" % spacing contents = p.sub(rep, contents) itkmat = np.eye(4) @@ -133,37 +148,36 @@ def _run_interface(self, runtime): itkmat[1, 1] = -1 affine = np.dot(itkmat, im.affine) - dirs = ' '.join( - ['%0.4f' % f for f in affine[0:3, 0:3].reshape(-1)]) - orig = ' '.join(['%0.4f' % f for f in affine[0:3, 3].reshape(-1)]) + dirs = " ".join(["%0.4f" % f for f in affine[0:3, 0:3].reshape(-1)]) + orig = " ".join(["%0.4f" % f for f in affine[0:3, 3].reshape(-1)]) # p = re.compile((self._pattern % 'Direction').decode('string-escape')) # rep = '(\g%s\g<3>' % dirs # contents = p.sub(rep, contents) - p = re.compile((self._pattern % 'Origin').decode('string-escape')) - rep = '(\g%s\g<3>' % orig + p = re.compile((self._pattern % "Origin").decode("string-escape")) + rep = "(\g%s\g<3>" % orig contents = p.sub(rep, contents) - with open(self._get_outfile(), 'w') as of: + with open(self._get_outfile(), "w") as of: of.write(contents) return runtime def _list_outputs(self): outputs = self.output_spec().get() - outputs['output_file'] = getattr(self, '_out_file') + outputs["output_file"] = getattr(self, "_out_file") return outputs def _get_outfile(self): - val = getattr(self, '_out_file') - if val is not None and val != '': + val = getattr(self, "_out_file") + if val is not None and val != "": return val if isdefined(self.inputs.output_file): - setattr(self, '_out_file', self.inputs.output_file) + setattr(self, "_out_file", self.inputs.output_file) return self.inputs.output_file out_file = op.abspath(op.basename(self.inputs.transform_file)) - setattr(self, '_out_file', out_file) + setattr(self, "_out_file", out_file) return out_file diff --git a/nipype/interfaces/freesurfer/__init__.py b/nipype/interfaces/freesurfer/__init__.py index 44c939706e..803ac571cb 100644 --- a/nipype/interfaces/freesurfer/__init__.py +++ b/nipype/interfaces/freesurfer/__init__.py @@ -5,25 +5,91 @@ from .base import Info, FSCommand, no_freesurfer from .preprocess import ( - ParseDICOMDir, UnpackSDICOMDir, MRIConvert, Resample, ReconAll, BBRegister, - ApplyVolTransform, Smooth, DICOMConvert, RobustRegister, FitMSParams, - SynthesizeFLASH, MNIBiasCorrection, WatershedSkullStrip, Normalize, - CANormalize, CARegister, CALabel, MRIsCALabel, SegmentCC, SegmentWM, - EditWMwithAseg, ConcatenateLTA) -from .model import (MRISPreproc, MRISPreprocReconAll, GLMFit, OneSampleTTest, - Binarize, Concatenate, SegStats, SegStatsReconAll, - Label2Vol, MS_LDA, Label2Label, Label2Annot, - SphericalAverage) + ParseDICOMDir, + UnpackSDICOMDir, + MRIConvert, + Resample, + ReconAll, + BBRegister, + ApplyVolTransform, + Smooth, + DICOMConvert, + RobustRegister, + FitMSParams, + SynthesizeFLASH, + MNIBiasCorrection, + WatershedSkullStrip, + Normalize, + CANormalize, + CARegister, + CALabel, + MRIsCALabel, + SegmentCC, + SegmentWM, + EditWMwithAseg, + ConcatenateLTA, +) +from .model import ( + MRISPreproc, + MRISPreprocReconAll, + GLMFit, + OneSampleTTest, + Binarize, + Concatenate, + SegStats, + SegStatsReconAll, + Label2Vol, + MS_LDA, + Label2Label, + Label2Annot, + SphericalAverage, +) from .utils import ( - SampleToSurface, SurfaceSmooth, SurfaceTransform, Surface2VolTransform, - SurfaceSnapshots, ApplyMask, MRIsConvert, MRITessellate, MRIPretess, - MRIMarchingCubes, SmoothTessellation, MakeAverageSubject, - ExtractMainComponent, Tkregister2, AddXFormToHeader, - CheckTalairachAlignment, TalairachAVI, TalairachQC, RemoveNeck, MRIFill, - MRIsInflate, Sphere, FixTopology, EulerNumber, RemoveIntersection, - MakeSurfaces, Curvature, CurvatureStats, Jacobian, MRIsCalc, VolumeMask, - ParcellationStats, Contrast, RelabelHypointensities, Aparc2Aseg, Apas2Aseg, - MRIsExpand, MRIsCombine) -from .longitudinal import (RobustTemplate, FuseSegmentations) -from .registration import (MPRtoMNI305, RegisterAVItoTalairach, EMRegister, - Register, Paint, MRICoreg) + SampleToSurface, + SurfaceSmooth, + SurfaceTransform, + Surface2VolTransform, + SurfaceSnapshots, + ApplyMask, + MRIsConvert, + MRITessellate, + MRIPretess, + MRIMarchingCubes, + SmoothTessellation, + MakeAverageSubject, + ExtractMainComponent, + Tkregister2, + AddXFormToHeader, + CheckTalairachAlignment, + TalairachAVI, + TalairachQC, + RemoveNeck, + MRIFill, + MRIsInflate, + Sphere, + FixTopology, + EulerNumber, + RemoveIntersection, + MakeSurfaces, + Curvature, + CurvatureStats, + Jacobian, + MRIsCalc, + VolumeMask, + ParcellationStats, + Contrast, + RelabelHypointensities, + Aparc2Aseg, + Apas2Aseg, + MRIsExpand, + MRIsCombine, +) +from .longitudinal import RobustTemplate, FuseSegmentations +from .registration import ( + MPRtoMNI305, + RegisterAVItoTalairach, + EMRegister, + Register, + Paint, + MRICoreg, +) diff --git a/nipype/interfaces/freesurfer/base.py b/nipype/interfaces/freesurfer/base.py index 3e82624cce..66023386da 100644 --- a/nipype/interfaces/freesurfer/base.py +++ b/nipype/interfaces/freesurfer/base.py @@ -18,10 +18,18 @@ from ... import LooseVersion from ...utils.filemanip import fname_presuffix -from ..base import (CommandLine, Directory, CommandLineInputSpec, isdefined, - traits, TraitedSpec, File, PackageInfo) +from ..base import ( + CommandLine, + Directory, + CommandLineInputSpec, + isdefined, + traits, + TraitedSpec, + File, + PackageInfo, +) -__docformat__ = 'restructuredtext' +__docformat__ = "restructuredtext" class Info(PackageInfo): @@ -35,9 +43,9 @@ class Info(PackageInfo): >>> Info.subjectsdir() # doctest: +SKIP """ - if os.getenv('FREESURFER_HOME'): - version_file = os.path.join( - os.getenv('FREESURFER_HOME'), 'build-stamp.txt') + + if os.getenv("FREESURFER_HOME"): + version_file = os.path.join(os.getenv("FREESURFER_HOME"), "build-stamp.txt") @staticmethod def parse_version(raw_info): @@ -51,30 +59,30 @@ def looseversion(cls): """ ver = cls.version() if ver is None: - return LooseVersion('0.0.0') + return LooseVersion("0.0.0") - vinfo = ver.rstrip().split('-') + vinfo = ver.rstrip().split("-") try: int(vinfo[-1], 16) except ValueError: - githash = '' + githash = "" else: - githash = '.' + vinfo[-1] + githash = "." + vinfo[-1] # As of FreeSurfer v6.0.0, the final component is a githash if githash: - if vinfo[3] == 'dev': + if vinfo[3] == "dev": # This will need updating when v6.0.1 comes out - vstr = '6.0.0-dev' + githash - elif vinfo[5][0] == 'v': + vstr = "6.0.0-dev" + githash + elif vinfo[5][0] == "v": vstr = vinfo[5][1:] else: - raise RuntimeError('Unknown version string: ' + ver) + raise RuntimeError("Unknown version string: " + ver) # Retain pre-6.0.0 heuristics - elif 'dev' in ver: - vstr = vinfo[-1] + '-dev' + elif "dev" in ver: + vstr = vinfo[-1] + "-dev" else: - vstr = ver.rstrip().split('-v')[-1] + vstr = ver.rstrip().split("-v")[-1] return LooseVersion(vstr) @@ -96,12 +104,12 @@ def subjectsdir(cls): """ if cls.version(): - return os.environ['SUBJECTS_DIR'] + return os.environ["SUBJECTS_DIR"] return None class FSTraitedSpec(CommandLineInputSpec): - subjects_dir = Directory(exists=True, desc='subjects directory') + subjects_dir = Directory(exists=True, desc="subjects directory") class FSCommand(CommandLine): @@ -116,7 +124,7 @@ class FSCommand(CommandLine): def __init__(self, **inputs): super(FSCommand, self).__init__(**inputs) - self.inputs.on_trait_change(self._subjects_dir_update, 'subjects_dir') + self.inputs.on_trait_change(self._subjects_dir_update, "subjects_dir") if not self._subjects_dir: self._subjects_dir = Info.subjectsdir() if not isdefined(self.inputs.subjects_dir) and self._subjects_dir: @@ -125,27 +133,20 @@ def __init__(self, **inputs): def _subjects_dir_update(self): if self.inputs.subjects_dir: - self.inputs.environ.update({ - 'SUBJECTS_DIR': self.inputs.subjects_dir - }) + self.inputs.environ.update({"SUBJECTS_DIR": self.inputs.subjects_dir}) @classmethod def set_default_subjects_dir(cls, subjects_dir): cls._subjects_dir = subjects_dir def run(self, **inputs): - if 'subjects_dir' in inputs: - self.inputs.subjects_dir = inputs['subjects_dir'] + if "subjects_dir" in inputs: + self.inputs.subjects_dir = inputs["subjects_dir"] self._subjects_dir_update() return super(FSCommand, self).run(**inputs) - def _gen_fname(self, - basename, - fname=None, - cwd=None, - suffix='_fs', - use_ext=True): - '''Define a generic mapping for a single outfile + def _gen_fname(self, basename, fname=None, cwd=None, suffix="_fs", use_ext=True): + """Define a generic mapping for a single outfile The filename is potentially autogenerated by suffixing inputs.infile @@ -159,15 +160,14 @@ def _gen_fname(self, prefix paths with cwd, otherwise os.getcwd() suffix : string default suffix - ''' - if basename == '': - msg = 'Unable to generate filename for command %s. ' % self.cmd - msg += 'basename is not set!' + """ + if basename == "": + msg = "Unable to generate filename for command %s. " % self.cmd + msg += "basename is not set!" raise ValueError(msg) if cwd is None: cwd = os.getcwd() - fname = fname_presuffix( - basename, suffix=suffix, use_ext=use_ext, newpath=cwd) + fname = fname_presuffix(basename, suffix=suffix, use_ext=use_ext, newpath=cwd) return fname @property @@ -196,9 +196,9 @@ def _associated_file(in_file, out_name): inspecting the surface data structure. """ path, base = os.path.split(out_name) - if path == '': + if path == "": path, in_file = os.path.split(in_file) - hemis = ('lh.', 'rh.') + hemis = ("lh.", "rh.") if in_file[:3] in hemis and base[:3] not in hemis: base = in_file[:3] + base return os.path.join(path, base) @@ -207,22 +207,24 @@ def _associated_file(in_file, out_name): class FSScriptCommand(FSCommand): """ Support for Freesurfer script commands with log terminal_output """ - _terminal_output = 'file' + + _terminal_output = "file" _always_run = False def _list_outputs(self): outputs = self._outputs().get() - outputs['log_file'] = os.path.abspath('output.nipype') + outputs["log_file"] = os.path.abspath("output.nipype") return outputs class FSScriptOutputSpec(TraitedSpec): log_file = File( - 'output.nipype', usedefault=True, exists=True, desc="The output log") + "output.nipype", usedefault=True, exists=True, desc="The output log" + ) class FSTraitedSpecOpenMP(FSTraitedSpec): - num_threads = traits.Int(desc='allows for specifying more threads') + num_threads = traits.Int(desc="allows for specifying more threads") class FSCommandOpenMP(FSCommand): @@ -238,25 +240,24 @@ class FSCommandOpenMP(FSCommand): def __init__(self, **inputs): super(FSCommandOpenMP, self).__init__(**inputs) - self.inputs.on_trait_change(self._num_threads_update, 'num_threads') + self.inputs.on_trait_change(self._num_threads_update, "num_threads") if not self._num_threads: - self._num_threads = os.environ.get('OMP_NUM_THREADS', None) + self._num_threads = os.environ.get("OMP_NUM_THREADS", None) if not self._num_threads: - self._num_threads = os.environ.get('NSLOTS', None) + self._num_threads = os.environ.get("NSLOTS", None) if not isdefined(self.inputs.num_threads) and self._num_threads: self.inputs.num_threads = int(self._num_threads) self._num_threads_update() def _num_threads_update(self): if self.inputs.num_threads: - self.inputs.environ.update({ - 'OMP_NUM_THREADS': - str(self.inputs.num_threads) - }) + self.inputs.environ.update( + {"OMP_NUM_THREADS": str(self.inputs.num_threads)} + ) def run(self, **inputs): - if 'num_threads' in inputs: - self.inputs.num_threads = inputs['num_threads'] + if "num_threads" in inputs: + self.inputs.num_threads = inputs["num_threads"] self._num_threads_update() return super(FSCommandOpenMP, self).run(**inputs) diff --git a/nipype/interfaces/freesurfer/longitudinal.py b/nipype/interfaces/freesurfer/longitudinal.py index eed32173b4..aa5f928550 100644 --- a/nipype/interfaces/freesurfer/longitudinal.py +++ b/nipype/interfaces/freesurfer/longitudinal.py @@ -7,13 +7,11 @@ import os from ... import logging -from ..base import (TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, - isdefined) -from .base import (FSCommand, FSTraitedSpec, FSCommandOpenMP, - FSTraitedSpecOpenMP) +from ..base import TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, isdefined +from .base import FSCommand, FSTraitedSpec, FSCommandOpenMP, FSTraitedSpecOpenMP -__docformat__ = 'restructuredtext' -iflogger = logging.getLogger('nipype.interface') +__docformat__ = "restructuredtext" +iflogger = logging.getLogger("nipype.interface") class RobustTemplateInputSpec(FSTraitedSpecOpenMP): @@ -21,78 +19,92 @@ class RobustTemplateInputSpec(FSTraitedSpecOpenMP): in_files = InputMultiPath( File(exists=True), mandatory=True, - argstr='--mov %s', - desc='input movable volumes to be aligned to common mean/median ' - 'template') + argstr="--mov %s", + desc="input movable volumes to be aligned to common mean/median " "template", + ) out_file = File( - 'mri_robust_template_out.mgz', + "mri_robust_template_out.mgz", mandatory=True, usedefault=True, - argstr='--template %s', - desc='output template volume (final mean/median image)') + argstr="--template %s", + desc="output template volume (final mean/median image)", + ) auto_detect_sensitivity = traits.Bool( - argstr='--satit', - xor=['outlier_sensitivity'], + argstr="--satit", + xor=["outlier_sensitivity"], mandatory=True, - desc='auto-detect good sensitivity (recommended for head or full ' - 'brain scans)') + desc="auto-detect good sensitivity (recommended for head or full " + "brain scans)", + ) outlier_sensitivity = traits.Float( - argstr='--sat %.4f', - xor=['auto_detect_sensitivity'], + argstr="--sat %.4f", + xor=["auto_detect_sensitivity"], mandatory=True, desc='set outlier sensitivity manually (e.g. "--sat 4.685" ). Higher ' - 'values mean less sensitivity.') + "values mean less sensitivity.", + ) # optional transform_outputs = traits.Either( InputMultiPath(File(exists=False)), traits.Bool, - argstr='--lta %s', - desc='output xforms to template (for each input)') + argstr="--lta %s", + desc="output xforms to template (for each input)", + ) intensity_scaling = traits.Bool( default_value=False, - argstr='--iscale', - desc='allow also intensity scaling (default off)') + argstr="--iscale", + desc="allow also intensity scaling (default off)", + ) scaled_intensity_outputs = traits.Either( InputMultiPath(File(exists=False)), traits.Bool, - argstr='--iscaleout %s', - desc='final intensity scales (will activate --iscale)') + argstr="--iscaleout %s", + desc="final intensity scales (will activate --iscale)", + ) subsample_threshold = traits.Int( - argstr='--subsample %d', - desc='subsample if dim > # on all axes (default no subs.)') + argstr="--subsample %d", + desc="subsample if dim > # on all axes (default no subs.)", + ) average_metric = traits.Enum( - 'median', - 'mean', - argstr='--average %d', - desc='construct template from: 0 Mean, 1 Median (default)') + "median", + "mean", + argstr="--average %d", + desc="construct template from: 0 Mean, 1 Median (default)", + ) initial_timepoint = traits.Int( - argstr='--inittp %d', - desc='use TP# for spacial init (default random), 0: no init') + argstr="--inittp %d", + desc="use TP# for spacial init (default random), 0: no init", + ) fixed_timepoint = traits.Bool( default_value=False, - argstr='--fixtp', - desc='map everthing to init TP# (init TP is not resampled)') + argstr="--fixtp", + desc="map everthing to init TP# (init TP is not resampled)", + ) no_iteration = traits.Bool( default_value=False, - argstr='--noit', - desc='do not iterate, just create first template') + argstr="--noit", + desc="do not iterate, just create first template", + ) initial_transforms = InputMultiPath( File(exists=True), - argstr='--ixforms %s', - desc='use initial transforms (lta) on source') + argstr="--ixforms %s", + desc="use initial transforms (lta) on source", + ) in_intensity_scales = InputMultiPath( - File(exists=True), - argstr='--iscalein %s', - desc='use initial intensity scales') + File(exists=True), argstr="--iscalein %s", desc="use initial intensity scales" + ) class RobustTemplateOutputSpec(TraitedSpec): out_file = File( - exists=True, desc='output template volume (final mean/median image)') + exists=True, desc="output template volume (final mean/median image)" + ) transform_outputs = OutputMultiPath( - File(exists=True), desc="output xform files from moving to template") + File(exists=True), desc="output xform files from moving to template" + ) scaled_intensity_outputs = OutputMultiPath( - File(exists=True), desc="output final intensity scales") + File(exists=True), desc="output final intensity scales" + ) class RobustTemplate(FSCommandOpenMP): @@ -135,76 +147,73 @@ class RobustTemplate(FSCommandOpenMP): """ - _cmd = 'mri_robust_template' + _cmd = "mri_robust_template" input_spec = RobustTemplateInputSpec output_spec = RobustTemplateOutputSpec def _format_arg(self, name, spec, value): - if name == 'average_metric': + if name == "average_metric": # return enumeration value return spec.argstr % {"mean": 0, "median": 1}[value] - if name in ('transform_outputs', 'scaled_intensity_outputs'): + if name in ("transform_outputs", "scaled_intensity_outputs"): value = self._list_outputs()[name] return super(RobustTemplate, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) n_files = len(self.inputs.in_files) - fmt = '{}{:02d}.{}' if n_files > 9 else '{}{:d}.{}' + fmt = "{}{:02d}.{}" if n_files > 9 else "{}{:d}.{}" if isdefined(self.inputs.transform_outputs): fnames = self.inputs.transform_outputs if fnames is True: - fnames = [ - fmt.format('tp', i + 1, 'lta') for i in range(n_files) - ] - outputs['transform_outputs'] = [os.path.abspath(x) for x in fnames] + fnames = [fmt.format("tp", i + 1, "lta") for i in range(n_files)] + outputs["transform_outputs"] = [os.path.abspath(x) for x in fnames] if isdefined(self.inputs.scaled_intensity_outputs): fnames = self.inputs.scaled_intensity_outputs if fnames is True: - fnames = [ - fmt.format('is', i + 1, 'txt') for i in range(n_files) - ] - outputs['scaled_intensity_outputs'] = [ - os.path.abspath(x) for x in fnames - ] + fnames = [fmt.format("is", i + 1, "txt") for i in range(n_files)] + outputs["scaled_intensity_outputs"] = [os.path.abspath(x) for x in fnames] return outputs class FuseSegmentationsInputSpec(FSTraitedSpec): # required subject_id = traits.String( - argstr='%s', position=-3, desc="subject_id being processed") + argstr="%s", position=-3, desc="subject_id being processed" + ) timepoints = InputMultiPath( traits.String(), mandatory=True, - argstr='%s', + argstr="%s", position=-2, - desc='subject_ids or timepoints to be processed') + desc="subject_ids or timepoints to be processed", + ) out_file = File( - exists=False, - mandatory=True, - position=-1, - desc="output fused segmentation file") + exists=False, mandatory=True, position=-1, desc="output fused segmentation file" + ) in_segmentations = InputMultiPath( File(exists=True), argstr="-a %s", mandatory=True, desc="name of aseg file to use (default: aseg.mgz) \ - must include the aseg files for all the given timepoints") + must include the aseg files for all the given timepoints", + ) in_segmentations_noCC = InputMultiPath( File(exists=True), argstr="-c %s", mandatory=True, desc="name of aseg file w/o CC labels (default: aseg.auto_noCCseg.mgz) \ - must include the corresponding file for all the given timepoints") + must include the corresponding file for all the given timepoints", + ) in_norms = InputMultiPath( File(exists=True), argstr="-n %s", mandatory=True, desc="-n - name of norm file to use (default: norm.mgs) \ must include the corresponding norm file for all given timepoints \ - as well as for the current subject") + as well as for the current subject", + ) class FuseSegmentationsOutputSpec(TraitedSpec): @@ -228,17 +237,17 @@ class FuseSegmentations(FSCommand): 'mri_fuse_segmentations -n norm.mgz -a aseg.mgz -c aseg.mgz tp.long.A.template tp1 tp2' """ - _cmd = 'mri_fuse_segmentations' + _cmd = "mri_fuse_segmentations" input_spec = FuseSegmentationsInputSpec output_spec = FuseSegmentationsOutputSpec def _format_arg(self, name, spec, value): - if name in ('in_segmentations', 'in_segmentations_noCC', 'in_norms'): + if name in ("in_segmentations", "in_segmentations_noCC", "in_norms"): # return enumeration value return spec.argstr % os.path.basename(value[0]) return super(FuseSegmentations, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs diff --git a/nipype/interfaces/freesurfer/model.py b/nipype/interfaces/freesurfer/model.py index 526f0a9919..8bf7918ae4 100644 --- a/nipype/interfaces/freesurfer/model.py +++ b/nipype/interfaces/freesurfer/model.py @@ -8,84 +8,106 @@ import os from ...utils.filemanip import fname_presuffix, split_filename -from ..base import (TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, - Directory, isdefined) +from ..base import ( + TraitedSpec, + File, + traits, + InputMultiPath, + OutputMultiPath, + Directory, + isdefined, +) from .base import FSCommand, FSTraitedSpec from .utils import copy2subjdir -__docformat__ = 'restructuredtext' +__docformat__ = "restructuredtext" class MRISPreprocInputSpec(FSTraitedSpec): - out_file = File(argstr='--out %s', genfile=True, desc='output filename') + out_file = File(argstr="--out %s", genfile=True, desc="output filename") target = traits.Str( - argstr='--target %s', mandatory=True, desc='target subject name') + argstr="--target %s", mandatory=True, desc="target subject name" + ) hemi = traits.Enum( - 'lh', - 'rh', - argstr='--hemi %s', + "lh", + "rh", + argstr="--hemi %s", mandatory=True, - desc='hemisphere for source and target') + desc="hemisphere for source and target", + ) surf_measure = traits.Str( - argstr='--meas %s', - xor=('surf_measure', 'surf_measure_file', 'surf_area'), - desc='Use subject/surf/hemi.surf_measure as input') + argstr="--meas %s", + xor=("surf_measure", "surf_measure_file", "surf_area"), + desc="Use subject/surf/hemi.surf_measure as input", + ) surf_area = traits.Str( - argstr='--area %s', - xor=('surf_measure', 'surf_measure_file', 'surf_area'), - desc= - 'Extract vertex area from subject/surf/hemi.surfname to use as input.') + argstr="--area %s", + xor=("surf_measure", "surf_measure_file", "surf_area"), + desc="Extract vertex area from subject/surf/hemi.surfname to use as input.", + ) subjects = traits.List( - argstr='--s %s...', - xor=('subjects', 'fsgd_file', 'subject_file'), - desc='subjects from who measures are calculated') + argstr="--s %s...", + xor=("subjects", "fsgd_file", "subject_file"), + desc="subjects from who measures are calculated", + ) fsgd_file = File( exists=True, - argstr='--fsgd %s', - xor=('subjects', 'fsgd_file', 'subject_file'), - desc='specify subjects using fsgd file') + argstr="--fsgd %s", + xor=("subjects", "fsgd_file", "subject_file"), + desc="specify subjects using fsgd file", + ) subject_file = File( exists=True, - argstr='--f %s', - xor=('subjects', 'fsgd_file', 'subject_file'), - desc='file specifying subjects separated by white space') + argstr="--f %s", + xor=("subjects", "fsgd_file", "subject_file"), + desc="file specifying subjects separated by white space", + ) surf_measure_file = InputMultiPath( File(exists=True), - argstr='--is %s...', - xor=('surf_measure', 'surf_measure_file', 'surf_area'), - desc='file alternative to surfmeas, still requires list of subjects') - source_format = traits.Str(argstr='--srcfmt %s', desc='source format') + argstr="--is %s...", + xor=("surf_measure", "surf_measure_file", "surf_area"), + desc="file alternative to surfmeas, still requires list of subjects", + ) + source_format = traits.Str(argstr="--srcfmt %s", desc="source format") surf_dir = traits.Str( - argstr='--surfdir %s', desc='alternative directory (instead of surf)') + argstr="--surfdir %s", desc="alternative directory (instead of surf)" + ) vol_measure_file = InputMultiPath( traits.Tuple(File(exists=True), File(exists=True)), - argstr='--iv %s %s...', - desc='list of volume measure and reg file tuples') + argstr="--iv %s %s...", + desc="list of volume measure and reg file tuples", + ) proj_frac = traits.Float( - argstr='--projfrac %s', desc='projection fraction for vol2surf') + argstr="--projfrac %s", desc="projection fraction for vol2surf" + ) fwhm = traits.Float( - argstr='--fwhm %f', - xor=['num_iters'], - desc='smooth by fwhm mm on the target surface') + argstr="--fwhm %f", + xor=["num_iters"], + desc="smooth by fwhm mm on the target surface", + ) num_iters = traits.Int( - argstr='--niters %d', - xor=['fwhm'], - desc='niters : smooth by niters on the target surface') + argstr="--niters %d", + xor=["fwhm"], + desc="niters : smooth by niters on the target surface", + ) fwhm_source = traits.Float( - argstr='--fwhm-src %f', - xor=['num_iters_source'], - desc='smooth by fwhm mm on the source surface') + argstr="--fwhm-src %f", + xor=["num_iters_source"], + desc="smooth by fwhm mm on the source surface", + ) num_iters_source = traits.Int( - argstr='--niterssrc %d', - xor=['fwhm_source'], - desc='niters : smooth by niters on the source surface') + argstr="--niterssrc %d", + xor=["fwhm_source"], + desc="niters : smooth by niters on the source surface", + ) smooth_cortex_only = traits.Bool( - argstr='--smooth-cortex-only', - desc='only smooth cortex (ie, exclude medial wall)') + argstr="--smooth-cortex-only", + desc="only smooth cortex (ie, exclude medial wall)", + ) class MRISPreprocOutputSpec(TraitedSpec): - out_file = File(desc='preprocessed output file') + out_file = File(desc="preprocessed output file") class MRISPreproc(FSCommand): @@ -106,22 +128,22 @@ class MRISPreproc(FSCommand): """ - _cmd = 'mris_preproc' + _cmd = "mris_preproc" input_spec = MRISPreprocInputSpec output_spec = MRISPreprocOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outfile = self.inputs.out_file - outputs['out_file'] = outfile + outputs["out_file"] = outfile if not isdefined(outfile): - outputs['out_file'] = os.path.join( - os.getcwd(), 'concat_%s_%s.mgz' % (self.inputs.hemi, - self.inputs.target)) + outputs["out_file"] = os.path.join( + os.getcwd(), "concat_%s_%s.mgz" % (self.inputs.hemi, self.inputs.target) + ) return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._list_outputs()[name] return None @@ -129,29 +151,34 @@ def _gen_filename(self, name): class MRISPreprocReconAllInputSpec(MRISPreprocInputSpec): surf_measure_file = File( exists=True, - argstr='--meas %s', - xor=('surf_measure', 'surf_measure_file', 'surf_area'), - desc='file necessary for surfmeas') + argstr="--meas %s", + xor=("surf_measure", "surf_measure_file", "surf_area"), + desc="file necessary for surfmeas", + ) surfreg_files = InputMultiPath( File(exists=True), argstr="--surfreg %s", - requires=['lh_surfreg_target', 'rh_surfreg_target'], - desc="lh and rh input surface registration files") + requires=["lh_surfreg_target", "rh_surfreg_target"], + desc="lh and rh input surface registration files", + ) lh_surfreg_target = File( - desc="Implicit target surface registration file", - requires=['surfreg_files']) + desc="Implicit target surface registration file", requires=["surfreg_files"] + ) rh_surfreg_target = File( - desc="Implicit target surface registration file", - requires=['surfreg_files']) + desc="Implicit target surface registration file", requires=["surfreg_files"] + ) subject_id = traits.String( - 'subject_id', - argstr='--s %s', + "subject_id", + argstr="--s %s", usedefault=True, - xor=('subjects', 'fsgd_file', 'subject_file', 'subject_id'), - desc='subject from whom measures are calculated') + xor=("subjects", "fsgd_file", "subject_file", "subject_id"), + desc="subject from whom measures are calculated", + ) copy_inputs = traits.Bool( - desc="If running as a node, set this to True " + - "this will copy some implicit inputs to the " + "node directory.") + desc="If running as a node, set this to True " + + "this will copy some implicit inputs to the " + + "node directory." + ) class MRISPreprocReconAll(MRISPreproc): @@ -174,30 +201,32 @@ class MRISPreprocReconAll(MRISPreproc): def run(self, **inputs): if self.inputs.copy_inputs: self.inputs.subjects_dir = os.getcwd() - if 'subjects_dir' in inputs: - inputs['subjects_dir'] = self.inputs.subjects_dir + if "subjects_dir" in inputs: + inputs["subjects_dir"] = self.inputs.subjects_dir if isdefined(self.inputs.surf_dir): folder = self.inputs.surf_dir else: - folder = 'surf' + folder = "surf" if isdefined(self.inputs.surfreg_files): for surfreg in self.inputs.surfreg_files: basename = os.path.basename(surfreg) copy2subjdir(self, surfreg, folder, basename) - if basename.startswith('lh.'): + if basename.startswith("lh."): copy2subjdir( self, self.inputs.lh_surfreg_target, folder, basename, - subject_id=self.inputs.target) + subject_id=self.inputs.target, + ) else: copy2subjdir( self, self.inputs.rh_surfreg_target, folder, basename, - subject_id=self.inputs.target) + subject_id=self.inputs.target, + ) if isdefined(self.inputs.surf_measure_file): copy2subjdir(self, self.inputs.surf_measure_file, folder) @@ -206,173 +235,194 @@ def run(self, **inputs): def _format_arg(self, name, spec, value): # mris_preproc looks for these files in the surf dir - if name == 'surfreg_files': + if name == "surfreg_files": basename = os.path.basename(value[0]) - return spec.argstr % basename.lstrip('rh.').lstrip('lh.') + return spec.argstr % basename.lstrip("rh.").lstrip("lh.") if name == "surf_measure_file": basename = os.path.basename(value) - return spec.argstr % basename.lstrip('rh.').lstrip('lh.') + return spec.argstr % basename.lstrip("rh.").lstrip("lh.") return super(MRISPreprocReconAll, self)._format_arg(name, spec, value) class GLMFitInputSpec(FSTraitedSpec): - glm_dir = traits.Str( - argstr='--glmdir %s', desc='save outputs to dir', genfile=True) + glm_dir = traits.Str(argstr="--glmdir %s", desc="save outputs to dir", genfile=True) in_file = File( - desc='input 4D file', argstr='--y %s', mandatory=True, copyfile=False) - _design_xor = ('fsgd', 'design', 'one_sample') + desc="input 4D file", argstr="--y %s", mandatory=True, copyfile=False + ) + _design_xor = ("fsgd", "design", "one_sample") fsgd = traits.Tuple( File(exists=True), - traits.Enum('doss', 'dods'), - argstr='--fsgd %s %s', + traits.Enum("doss", "dods"), + argstr="--fsgd %s %s", xor=_design_xor, - desc='freesurfer descriptor file') + desc="freesurfer descriptor file", + ) design = File( - exists=True, - argstr='--X %s', - xor=_design_xor, - desc='design matrix file') + exists=True, argstr="--X %s", xor=_design_xor, desc="design matrix file" + ) contrast = InputMultiPath( - File(exists=True), argstr='--C %s...', desc='contrast file') + File(exists=True), argstr="--C %s...", desc="contrast file" + ) one_sample = traits.Bool( - argstr='--osgm', - xor=('one_sample', 'fsgd', 'design', 'contrast'), - desc='construct X and C as a one-sample group mean') + argstr="--osgm", + xor=("one_sample", "fsgd", "design", "contrast"), + desc="construct X and C as a one-sample group mean", + ) no_contrast_ok = traits.Bool( - argstr='--no-contrasts-ok', - desc='do not fail if no contrasts specified') + argstr="--no-contrasts-ok", desc="do not fail if no contrasts specified" + ) per_voxel_reg = InputMultiPath( - File(exists=True), argstr='--pvr %s...', desc='per-voxel regressors') + File(exists=True), argstr="--pvr %s...", desc="per-voxel regressors" + ) self_reg = traits.Tuple( traits.Int, traits.Int, traits.Int, - argstr='--selfreg %d %d %d', - desc='self-regressor from index col row slice') + argstr="--selfreg %d %d %d", + desc="self-regressor from index col row slice", + ) weighted_ls = File( exists=True, - argstr='--wls %s', - xor=('weight_file', 'weight_inv', 'weight_sqrt'), - desc='weighted least squares') + argstr="--wls %s", + xor=("weight_file", "weight_inv", "weight_sqrt"), + desc="weighted least squares", + ) fixed_fx_var = File( - exists=True, argstr='--yffxvar %s', desc='for fixed effects analysis') + exists=True, argstr="--yffxvar %s", desc="for fixed effects analysis" + ) fixed_fx_dof = traits.Int( - argstr='--ffxdof %d', - xor=['fixed_fx_dof_file'], - desc='dof for fixed effects analysis') + argstr="--ffxdof %d", + xor=["fixed_fx_dof_file"], + desc="dof for fixed effects analysis", + ) fixed_fx_dof_file = File( - argstr='--ffxdofdat %d', - xor=['fixed_fx_dof'], - desc='text file with dof for fixed effects analysis') + argstr="--ffxdofdat %d", + xor=["fixed_fx_dof"], + desc="text file with dof for fixed effects analysis", + ) weight_file = File( - exists=True, - xor=['weighted_ls'], - desc='weight for each input at each voxel') + exists=True, xor=["weighted_ls"], desc="weight for each input at each voxel" + ) weight_inv = traits.Bool( - argstr='--w-inv', desc='invert weights', xor=['weighted_ls']) + argstr="--w-inv", desc="invert weights", xor=["weighted_ls"] + ) weight_sqrt = traits.Bool( - argstr='--w-sqrt', desc='sqrt of weights', xor=['weighted_ls']) - fwhm = traits.Range( - low=0.0, argstr='--fwhm %f', desc='smooth input by fwhm') + argstr="--w-sqrt", desc="sqrt of weights", xor=["weighted_ls"] + ) + fwhm = traits.Range(low=0.0, argstr="--fwhm %f", desc="smooth input by fwhm") var_fwhm = traits.Range( - low=0.0, argstr='--var-fwhm %f', desc='smooth variance by fwhm') + low=0.0, argstr="--var-fwhm %f", desc="smooth variance by fwhm" + ) no_mask_smooth = traits.Bool( - argstr='--no-mask-smooth', desc='do not mask when smoothing') + argstr="--no-mask-smooth", desc="do not mask when smoothing" + ) no_est_fwhm = traits.Bool( - argstr='--no-est-fwhm', desc='turn off FWHM output estimation') - mask_file = File(exists=True, argstr='--mask %s', desc='binary mask') + argstr="--no-est-fwhm", desc="turn off FWHM output estimation" + ) + mask_file = File(exists=True, argstr="--mask %s", desc="binary mask") label_file = File( exists=True, - argstr='--label %s', - xor=['cortex'], - desc='use label as mask, surfaces only') + argstr="--label %s", + xor=["cortex"], + desc="use label as mask, surfaces only", + ) cortex = traits.Bool( - argstr='--cortex', - xor=['label_file'], - desc='use subjects ?h.cortex.label as label') - invert_mask = traits.Bool(argstr='--mask-inv', desc='invert mask') + argstr="--cortex", + xor=["label_file"], + desc="use subjects ?h.cortex.label as label", + ) + invert_mask = traits.Bool(argstr="--mask-inv", desc="invert mask") prune = traits.Bool( - argstr='--prune', - desc= - 'remove voxels that do not have a non-zero value at each frame (def)') + argstr="--prune", + desc="remove voxels that do not have a non-zero value at each frame (def)", + ) no_prune = traits.Bool( - argstr='--no-prune', xor=['prunethresh'], desc='do not prune') + argstr="--no-prune", xor=["prunethresh"], desc="do not prune" + ) prune_thresh = traits.Float( - argstr='--prune_thr %f', - xor=['noprune'], - desc='prune threshold. Default is FLT_MIN') + argstr="--prune_thr %f", + xor=["noprune"], + desc="prune threshold. Default is FLT_MIN", + ) compute_log_y = traits.Bool( - argstr='--logy', desc='compute natural log of y prior to analysis') + argstr="--logy", desc="compute natural log of y prior to analysis" + ) save_estimate = traits.Bool( - argstr='--yhat-save', desc='save signal estimate (yhat)') - save_residual = traits.Bool( - argstr='--eres-save', desc='save residual error (eres)') + argstr="--yhat-save", desc="save signal estimate (yhat)" + ) + save_residual = traits.Bool(argstr="--eres-save", desc="save residual error (eres)") save_res_corr_mtx = traits.Bool( - argstr='--eres-scm', - desc='save residual error spatial correlation matrix (eres.scm). Big!') + argstr="--eres-scm", + desc="save residual error spatial correlation matrix (eres.scm). Big!", + ) surf = traits.Bool( argstr="--surf %s %s %s", requires=["subject_id", "hemi"], - desc="analysis is on a surface mesh") + desc="analysis is on a surface mesh", + ) subject_id = traits.Str(desc="subject id for surface geometry") hemi = traits.Enum("lh", "rh", desc="surface hemisphere") surf_geo = traits.Str( - "white", - usedefault=True, - desc="surface geometry name (e.g. white, pial)") + "white", usedefault=True, desc="surface geometry name (e.g. white, pial)" + ) simulation = traits.Tuple( - traits.Enum('perm', 'mc-full', 'mc-z'), + traits.Enum("perm", "mc-full", "mc-z"), traits.Int(min=1), traits.Float, traits.Str, - argstr='--sim %s %d %f %s', - desc='nulltype nsim thresh csdbasename') + argstr="--sim %s %d %f %s", + desc="nulltype nsim thresh csdbasename", + ) sim_sign = traits.Enum( - 'abs', 'pos', 'neg', argstr='--sim-sign %s', desc='abs, pos, or neg') + "abs", "pos", "neg", argstr="--sim-sign %s", desc="abs, pos, or neg" + ) uniform = traits.Tuple( traits.Float, traits.Float, - argstr='--uniform %f %f', - desc='use uniform distribution instead of gaussian') - pca = traits.Bool( - argstr='--pca', desc='perform pca/svd analysis on residual') + argstr="--uniform %f %f", + desc="use uniform distribution instead of gaussian", + ) + pca = traits.Bool(argstr="--pca", desc="perform pca/svd analysis on residual") calc_AR1 = traits.Bool( - argstr='--tar1', desc='compute and save temporal AR1 of residual') + argstr="--tar1", desc="compute and save temporal AR1 of residual" + ) save_cond = traits.Bool( - argstr='--save-cond', - desc='flag to save design matrix condition at each voxel') + argstr="--save-cond", desc="flag to save design matrix condition at each voxel" + ) vox_dump = traits.Tuple( traits.Int, traits.Int, traits.Int, - argstr='--voxdump %d %d %d', - desc='dump voxel GLM and exit') - seed = traits.Int(argstr='--seed %d', desc='used for synthesizing noise') - synth = traits.Bool(argstr='--synth', desc='replace input with gaussian') - resynth_test = traits.Int( - argstr='--resynthtest %d', desc='test GLM by resynthsis') - profile = traits.Int(argstr='--profile %d', desc='niters : test speed') + argstr="--voxdump %d %d %d", + desc="dump voxel GLM and exit", + ) + seed = traits.Int(argstr="--seed %d", desc="used for synthesizing noise") + synth = traits.Bool(argstr="--synth", desc="replace input with gaussian") + resynth_test = traits.Int(argstr="--resynthtest %d", desc="test GLM by resynthsis") + profile = traits.Int(argstr="--profile %d", desc="niters : test speed") force_perm = traits.Bool( - argstr='--perm-force', - desc='force perumtation test, even when design matrix is not orthog') - diag = traits.Int(argstr='--diag %d', desc='Gdiag_no : set diagnositc level') + argstr="--perm-force", + desc="force perumtation test, even when design matrix is not orthog", + ) + diag = traits.Int(argstr="--diag %d", desc="Gdiag_no : set diagnositc level") diag_cluster = traits.Bool( - argstr='--diag-cluster', - desc='save sig volume and exit from first sim loop') - debug = traits.Bool(argstr='--debug', desc='turn on debugging') + argstr="--diag-cluster", desc="save sig volume and exit from first sim loop" + ) + debug = traits.Bool(argstr="--debug", desc="turn on debugging") check_opts = traits.Bool( - argstr='--checkopts', - desc="don't run anything, just check options and exit") + argstr="--checkopts", desc="don't run anything, just check options and exit" + ) allow_repeated_subjects = traits.Bool( - argstr='--allowsubjrep', - desc= - 'allow subject names to repeat in the fsgd file (must appear before --fsgd' + argstr="--allowsubjrep", + desc="allow subject names to repeat in the fsgd file (must appear before --fsgd", ) allow_ill_cond = traits.Bool( - argstr='--illcond', desc='allow ill-conditioned design matrices') + argstr="--illcond", desc="allow ill-conditioned design matrices" + ) sim_done_file = File( - argstr='--sim-done %s', desc='create file when simulation finished') + argstr="--sim-done %s", desc="create file when simulation finished" + ) class GLMFitOutputSpec(TraitedSpec): @@ -385,18 +435,13 @@ class GLMFitOutputSpec(TraitedSpec): estimate_file = File(desc="map of the estimated Y values") mask_file = File(desc="map of the mask used in the analysis") fwhm_file = File(desc="text file with estimated smoothness") - dof_file = File( - desc="text file with effective degrees-of-freedom for the analysis") - gamma_file = OutputMultiPath( - desc="map of contrast of regression coefficients") - gamma_var_file = OutputMultiPath( - desc="map of regression contrast variance") + dof_file = File(desc="text file with effective degrees-of-freedom for the analysis") + gamma_file = OutputMultiPath(desc="map of contrast of regression coefficients") + gamma_var_file = OutputMultiPath(desc="map of regression contrast variance") sig_file = OutputMultiPath(desc="map of F-test significance (in -log10p)") ftest_file = OutputMultiPath(desc="map of test statistic values") - spatial_eigenvectors = File( - desc="map of spatial eigenvectors from residual PCA") - frame_eigenvectors = File( - desc="matrix of frame eigenvectors from residual PCA") + spatial_eigenvectors = File(desc="map of spatial eigenvectors from residual PCA") + frame_eigenvectors = File(desc="matrix of frame eigenvectors from residual PCA") singular_values = File(desc="matrix singular values from residual PCA") svd_stats_file = File(desc="text file summarizing the residual PCA") @@ -415,7 +460,7 @@ class GLMFit(FSCommand): """ - _cmd = 'mri_glmfit' + _cmd = "mri_glmfit" input_spec = GLMFitInputSpec output_spec = GLMFitOutputSpec @@ -459,12 +504,8 @@ def _list_outputs(self): contrasts = ["osgm"] # Add in the contrast images - outputs["sig_file"] = [ - os.path.join(glmdir, c, "sig.mgh") for c in contrasts - ] - outputs["ftest_file"] = [ - os.path.join(glmdir, c, "F.mgh") for c in contrasts - ] + outputs["sig_file"] = [os.path.join(glmdir, c, "sig.mgh") for c in contrasts] + outputs["ftest_file"] = [os.path.join(glmdir, c, "F.mgh") for c in contrasts] outputs["gamma_file"] = [ os.path.join(glmdir, c, "gamma.mgh") for c in contrasts ] @@ -483,7 +524,7 @@ def _list_outputs(self): return outputs def _gen_filename(self, name): - if name == 'glm_dir': + if name == "glm_dir": return os.getcwd() return None @@ -497,76 +538,75 @@ def __init__(self, **kwargs): class BinarizeInputSpec(FSTraitedSpec): in_file = File( exists=True, - argstr='--i %s', + argstr="--i %s", mandatory=True, copyfile=False, - desc='input volume') - min = traits.Float( - argstr='--min %f', xor=['wm_ven_csf'], desc='min thresh') - max = traits.Float( - argstr='--max %f', xor=['wm_ven_csf'], desc='max thresh') - rmin = traits.Float( - argstr='--rmin %f', desc='compute min based on rmin*globalmean') - rmax = traits.Float( - argstr='--rmax %f', desc='compute max based on rmax*globalmean') + desc="input volume", + ) + min = traits.Float(argstr="--min %f", xor=["wm_ven_csf"], desc="min thresh") + max = traits.Float(argstr="--max %f", xor=["wm_ven_csf"], desc="max thresh") + rmin = traits.Float(argstr="--rmin %f", desc="compute min based on rmin*globalmean") + rmax = traits.Float(argstr="--rmax %f", desc="compute max based on rmax*globalmean") match = traits.List( - traits.Int, argstr='--match %d...', desc='match instead of threshold') + traits.Int, argstr="--match %d...", desc="match instead of threshold" + ) wm = traits.Bool( - argstr='--wm', - desc='set match vals to 2 and 41 (aseg for cerebral WM)') + argstr="--wm", desc="set match vals to 2 and 41 (aseg for cerebral WM)" + ) ventricles = traits.Bool( - argstr='--ventricles', - desc='set match vals those for aseg ventricles+choroid (not 4th)') + argstr="--ventricles", + desc="set match vals those for aseg ventricles+choroid (not 4th)", + ) wm_ven_csf = traits.Bool( - argstr='--wm+vcsf', - xor=['min', 'max'], - desc='WM and ventricular CSF, including choroid (not 4th)') - binary_file = File( - argstr='--o %s', genfile=True, desc='binary output volume') - out_type = traits.Enum( - 'nii', 'nii.gz', 'mgz', argstr='', desc='output file type') + argstr="--wm+vcsf", + xor=["min", "max"], + desc="WM and ventricular CSF, including choroid (not 4th)", + ) + binary_file = File(argstr="--o %s", genfile=True, desc="binary output volume") + out_type = traits.Enum("nii", "nii.gz", "mgz", argstr="", desc="output file type") count_file = traits.Either( traits.Bool, File, - argstr='--count %s', - desc='save number of hits in ascii file (hits, ntotvox, pct)') + argstr="--count %s", + desc="save number of hits in ascii file (hits, ntotvox, pct)", + ) bin_val = traits.Int( - argstr='--binval %d', - desc='set vox within thresh to val (default is 1)') + argstr="--binval %d", desc="set vox within thresh to val (default is 1)" + ) bin_val_not = traits.Int( - argstr='--binvalnot %d', - desc='set vox outside range to val (default is 0)') - invert = traits.Bool(argstr='--inv', desc='set binval=0, binvalnot=1') + argstr="--binvalnot %d", desc="set vox outside range to val (default is 0)" + ) + invert = traits.Bool(argstr="--inv", desc="set binval=0, binvalnot=1") frame_no = traits.Int( - argstr='--frame %s', desc='use 0-based frame of input (default is 0)') - merge_file = File( - exists=True, argstr='--merge %s', desc='merge with mergevol') - mask_file = File( - exists=True, argstr='--mask maskvol', desc='must be within mask') - mask_thresh = traits.Float( - argstr='--mask-thresh %f', desc='set thresh for mask') + argstr="--frame %s", desc="use 0-based frame of input (default is 0)" + ) + merge_file = File(exists=True, argstr="--merge %s", desc="merge with mergevol") + mask_file = File(exists=True, argstr="--mask maskvol", desc="must be within mask") + mask_thresh = traits.Float(argstr="--mask-thresh %f", desc="set thresh for mask") abs = traits.Bool( - argstr='--abs', desc='take abs of invol first (ie, make unsigned)') + argstr="--abs", desc="take abs of invol first (ie, make unsigned)" + ) bin_col_num = traits.Bool( - argstr='--bincol', - desc='set binarized voxel value to its column number') - zero_edges = traits.Bool( - argstr='--zero-edges', desc='zero the edge voxels') + argstr="--bincol", desc="set binarized voxel value to its column number" + ) + zero_edges = traits.Bool(argstr="--zero-edges", desc="zero the edge voxels") zero_slice_edge = traits.Bool( - argstr='--zero-slice-edges', desc='zero the edge slice voxels') - dilate = traits.Int( - argstr='--dilate %d', desc='niters: dilate binarization in 3D') + argstr="--zero-slice-edges", desc="zero the edge slice voxels" + ) + dilate = traits.Int(argstr="--dilate %d", desc="niters: dilate binarization in 3D") erode = traits.Int( - argstr='--erode %d', - desc='nerode: erode binarization in 3D (after any dilation)') + argstr="--erode %d", + desc="nerode: erode binarization in 3D (after any dilation)", + ) erode2d = traits.Int( - argstr='--erode2d %d', - desc='nerode2d: erode binarization in 2D (after any 3D erosion)') + argstr="--erode2d %d", + desc="nerode2d: erode binarization in 2D (after any 3D erosion)", + ) class BinarizeOutputSpec(TraitedSpec): - binary_file = File(exists=True, desc='binarized output volume') - count_file = File(desc='ascii file containing number of hits') + binary_file = File(exists=True, desc="binarized output volume") + count_file = File(desc="ascii file containing number of hits") class Binarize(FSCommand): @@ -581,7 +621,7 @@ class Binarize(FSCommand): """ - _cmd = 'mri_binarize' + _cmd = "mri_binarize" input_spec = BinarizeInputSpec output_spec = BinarizeOutputSpec @@ -593,38 +633,41 @@ def _list_outputs(self): outfile = fname_presuffix( self.inputs.in_file, newpath=os.getcwd(), - suffix='.'.join(('_thresh', self.inputs.out_type)), - use_ext=False) + suffix=".".join(("_thresh", self.inputs.out_type)), + use_ext=False, + ) else: outfile = fname_presuffix( - self.inputs.in_file, newpath=os.getcwd(), suffix='_thresh') - outputs['binary_file'] = os.path.abspath(outfile) + self.inputs.in_file, newpath=os.getcwd(), suffix="_thresh" + ) + outputs["binary_file"] = os.path.abspath(outfile) value = self.inputs.count_file if isdefined(value): if isinstance(value, bool): if value: - outputs['count_file'] = fname_presuffix( + outputs["count_file"] = fname_presuffix( self.inputs.in_file, - suffix='_count.txt', + suffix="_count.txt", newpath=os.getcwd(), - use_ext=False) + use_ext=False, + ) else: - outputs['count_file'] = value + outputs["count_file"] = value return outputs def _format_arg(self, name, spec, value): - if name == 'count_file': + if name == "count_file": if isinstance(value, bool): fname = self._list_outputs()[name] else: fname = value return spec.argstr % fname - if name == 'out_type': - return '' + if name == "out_type": + return "" return super(Binarize, self)._format_arg(name, spec, value) def _gen_filename(self, name): - if name == 'binary_file': + if name == "binary_file": return self._list_outputs()[name] return None @@ -632,73 +675,77 @@ def _gen_filename(self, name): class ConcatenateInputSpec(FSTraitedSpec): in_files = InputMultiPath( File(exists=True), - desc='Individual volumes to be concatenated', - argstr='--i %s...', - mandatory=True) - concatenated_file = File( - desc='Output volume', argstr='--o %s', genfile=True) + desc="Individual volumes to be concatenated", + argstr="--i %s...", + mandatory=True, + ) + concatenated_file = File(desc="Output volume", argstr="--o %s", genfile=True) sign = traits.Enum( - 'abs', - 'pos', - 'neg', - argstr='--%s', - desc='Take only pos or neg voxles from input, or take abs') + "abs", + "pos", + "neg", + argstr="--%s", + desc="Take only pos or neg voxles from input, or take abs", + ) stats = traits.Enum( - 'sum', - 'var', - 'std', - 'max', - 'min', - 'mean', - argstr='--%s', - desc='Compute the sum, var, std, max, min or mean of the input volumes' + "sum", + "var", + "std", + "max", + "min", + "mean", + argstr="--%s", + desc="Compute the sum, var, std, max, min or mean of the input volumes", ) paired_stats = traits.Enum( - 'sum', - 'avg', - 'diff', - 'diff-norm', - 'diff-norm1', - 'diff-norm2', - argstr='--paired-%s', - desc='Compute paired sum, avg, or diff') + "sum", + "avg", + "diff", + "diff-norm", + "diff-norm1", + "diff-norm2", + argstr="--paired-%s", + desc="Compute paired sum, avg, or diff", + ) gmean = traits.Int( - argstr='--gmean %d', - desc='create matrix to average Ng groups, Nper=Ntot/Ng') + argstr="--gmean %d", desc="create matrix to average Ng groups, Nper=Ntot/Ng" + ) mean_div_n = traits.Bool( - argstr='--mean-div-n', desc='compute mean/nframes (good for var)') + argstr="--mean-div-n", desc="compute mean/nframes (good for var)" + ) multiply_by = traits.Float( - argstr='--mul %f', desc='Multiply input volume by some amount') + argstr="--mul %f", desc="Multiply input volume by some amount" + ) add_val = traits.Float( - argstr='--add %f', desc='Add some amount to the input volume') + argstr="--add %f", desc="Add some amount to the input volume" + ) multiply_matrix_file = File( - exists=True, - argstr='--mtx %s', - desc='Multiply input by an ascii matrix in file') + exists=True, argstr="--mtx %s", desc="Multiply input by an ascii matrix in file" + ) combine = traits.Bool( - argstr='--combine', - desc='Combine non-zero values into single frame volume') + argstr="--combine", desc="Combine non-zero values into single frame volume" + ) keep_dtype = traits.Bool( - argstr='--keep-datatype', - desc='Keep voxelwise precision type (default is float') + argstr="--keep-datatype", desc="Keep voxelwise precision type (default is float" + ) max_bonfcor = traits.Bool( - argstr='--max-bonfcor', - desc='Compute max and bonferroni correct (assumes -log10(ps))') + argstr="--max-bonfcor", + desc="Compute max and bonferroni correct (assumes -log10(ps))", + ) max_index = traits.Bool( - argstr='--max-index', - desc='Compute the index of max voxel in concatenated volumes') - mask_file = File( - exists=True, argstr='--mask %s', desc='Mask input with a volume') + argstr="--max-index", + desc="Compute the index of max voxel in concatenated volumes", + ) + mask_file = File(exists=True, argstr="--mask %s", desc="Mask input with a volume") vote = traits.Bool( - argstr='--vote', - desc='Most frequent value at each voxel and fraction of occurances') - sort = traits.Bool( - argstr='--sort', desc='Sort each voxel by ascending frame value') + argstr="--vote", + desc="Most frequent value at each voxel and fraction of occurances", + ) + sort = traits.Bool(argstr="--sort", desc="Sort each voxel by ascending frame value") class ConcatenateOutputSpec(TraitedSpec): - concatenated_file = File( - exists=True, desc='Path/name of the output volume') + concatenated_file = File(exists=True, desc="Path/name of the output volume") class Concatenate(FSCommand): @@ -719,7 +766,7 @@ class Concatenate(FSCommand): """ - _cmd = 'mri_concat' + _cmd = "mri_concat" input_spec = ConcatenateInputSpec output_spec = ConcatenateOutputSpec @@ -728,181 +775,198 @@ def _list_outputs(self): fname = self.inputs.concatenated_file if not isdefined(fname): - fname = 'concat_output.nii.gz' - outputs['concatenated_file'] = os.path.join(os.getcwd(), fname) + fname = "concat_output.nii.gz" + outputs["concatenated_file"] = os.path.join(os.getcwd(), fname) return outputs def _gen_filename(self, name): - if name == 'concatenated_file': + if name == "concatenated_file": return self._list_outputs()[name] return None class SegStatsInputSpec(FSTraitedSpec): - _xor_inputs = ('segmentation_file', 'annot', 'surf_label') + _xor_inputs = ("segmentation_file", "annot", "surf_label") segmentation_file = File( exists=True, - argstr='--seg %s', + argstr="--seg %s", xor=_xor_inputs, mandatory=True, - desc='segmentation volume path') + desc="segmentation volume path", + ) annot = traits.Tuple( traits.Str, - traits.Enum('lh', 'rh'), + traits.Enum("lh", "rh"), traits.Str, - argstr='--annot %s %s %s', + argstr="--annot %s %s %s", xor=_xor_inputs, mandatory=True, - desc='subject hemi parc : use surface parcellation') + desc="subject hemi parc : use surface parcellation", + ) surf_label = traits.Tuple( traits.Str, - traits.Enum('lh', 'rh'), + traits.Enum("lh", "rh"), traits.Str, - argstr='--slabel %s %s %s', + argstr="--slabel %s %s %s", xor=_xor_inputs, mandatory=True, - desc='subject hemi label : use surface label') + desc="subject hemi label : use surface label", + ) summary_file = File( - argstr='--sum %s', + argstr="--sum %s", genfile=True, position=-1, - desc='Segmentation stats summary table file') + desc="Segmentation stats summary table file", + ) partial_volume_file = File( - exists=True, argstr='--pv %s', desc='Compensate for partial voluming') + exists=True, argstr="--pv %s", desc="Compensate for partial voluming" + ) in_file = File( exists=True, - argstr='--i %s', - desc='Use the segmentation to report stats on this volume') + argstr="--i %s", + desc="Use the segmentation to report stats on this volume", + ) frame = traits.Int( - argstr='--frame %d', desc='Report stats on nth frame of input volume') - multiply = traits.Float(argstr='--mul %f', desc='multiply input by val') + argstr="--frame %d", desc="Report stats on nth frame of input volume" + ) + multiply = traits.Float(argstr="--mul %f", desc="multiply input by val") calc_snr = traits.Bool( - argstr='--snr', desc='save mean/std as extra column in output table') + argstr="--snr", desc="save mean/std as extra column in output table" + ) calc_power = traits.Enum( - 'sqr', - 'sqrt', - argstr='--%s', - desc='Compute either the sqr or the sqrt of the input') - _ctab_inputs = ('color_table_file', 'default_color_table', - 'gca_color_table') + "sqr", + "sqrt", + argstr="--%s", + desc="Compute either the sqr or the sqrt of the input", + ) + _ctab_inputs = ("color_table_file", "default_color_table", "gca_color_table") color_table_file = File( exists=True, - argstr='--ctab %s', + argstr="--ctab %s", xor=_ctab_inputs, - desc='color table file with seg id names') + desc="color table file with seg id names", + ) default_color_table = traits.Bool( - argstr='--ctab-default', + argstr="--ctab-default", xor=_ctab_inputs, - desc='use $FREESURFER_HOME/FreeSurferColorLUT.txt') + desc="use $FREESURFER_HOME/FreeSurferColorLUT.txt", + ) gca_color_table = File( exists=True, - argstr='--ctab-gca %s', + argstr="--ctab-gca %s", xor=_ctab_inputs, - desc='get color table from GCA (CMA)') + desc="get color table from GCA (CMA)", + ) segment_id = traits.List( - argstr='--id %s...', desc='Manually specify segmentation ids') - exclude_id = traits.Int( - argstr='--excludeid %d', desc='Exclude seg id from report') + argstr="--id %s...", desc="Manually specify segmentation ids" + ) + exclude_id = traits.Int(argstr="--excludeid %d", desc="Exclude seg id from report") exclude_ctx_gm_wm = traits.Bool( - argstr='--excl-ctxgmwm', desc='exclude cortical gray and white matter') + argstr="--excl-ctxgmwm", desc="exclude cortical gray and white matter" + ) wm_vol_from_surf = traits.Bool( - argstr='--surf-wm-vol', desc='Compute wm volume from surf') + argstr="--surf-wm-vol", desc="Compute wm volume from surf" + ) cortex_vol_from_surf = traits.Bool( - argstr='--surf-ctx-vol', desc='Compute cortex volume from surf') + argstr="--surf-ctx-vol", desc="Compute cortex volume from surf" + ) non_empty_only = traits.Bool( - argstr='--nonempty', desc='Only report nonempty segmentations') + argstr="--nonempty", desc="Only report nonempty segmentations" + ) empty = traits.Bool( - argstr="--empty", - desc="Report on segmentations listed in the color table") + argstr="--empty", desc="Report on segmentations listed in the color table" + ) mask_file = File( - exists=True, argstr='--mask %s', desc='Mask volume (same size as seg') + exists=True, argstr="--mask %s", desc="Mask volume (same size as seg" + ) mask_thresh = traits.Float( - argstr='--maskthresh %f', - desc='binarize mask with this threshold <0.5>') + argstr="--maskthresh %f", desc="binarize mask with this threshold <0.5>" + ) mask_sign = traits.Enum( - 'abs', - 'pos', - 'neg', - '--masksign %s', - desc='Sign for mask threshold: pos, neg, or abs') + "abs", + "pos", + "neg", + "--masksign %s", + desc="Sign for mask threshold: pos, neg, or abs", + ) mask_frame = traits.Int( - '--maskframe %d', - requires=['mask_file'], - desc='Mask with this (0 based) frame of the mask volume') + "--maskframe %d", + requires=["mask_file"], + desc="Mask with this (0 based) frame of the mask volume", + ) mask_invert = traits.Bool( - argstr='--maskinvert', desc='Invert binarized mask volume') - mask_erode = traits.Int( - argstr='--maskerode %d', desc='Erode mask by some amount') + argstr="--maskinvert", desc="Invert binarized mask volume" + ) + mask_erode = traits.Int(argstr="--maskerode %d", desc="Erode mask by some amount") brain_vol = traits.Enum( - 'brain-vol-from-seg', - 'brainmask', - argstr='--%s', - desc= - 'Compute brain volume either with ``brainmask`` or ``brain-vol-from-seg``' + "brain-vol-from-seg", + "brainmask", + argstr="--%s", + desc="Compute brain volume either with ``brainmask`` or ``brain-vol-from-seg``", ) brainmask_file = File( argstr="--brainmask %s", exists=True, - desc= - "Load brain mask and compute the volume of the brain as the non-zero voxels in this volume" + desc="Load brain mask and compute the volume of the brain as the non-zero voxels in this volume", ) - etiv = traits.Bool( - argstr='--etiv', desc='Compute ICV from talairach transform') + etiv = traits.Bool(argstr="--etiv", desc="Compute ICV from talairach transform") etiv_only = traits.Enum( - 'etiv', - 'old-etiv', - '--%s-only', - desc='Compute etiv and exit. Use ``etiv`` or ``old-etiv``') + "etiv", + "old-etiv", + "--%s-only", + desc="Compute etiv and exit. Use ``etiv`` or ``old-etiv``", + ) avgwf_txt_file = traits.Either( traits.Bool, File, - argstr='--avgwf %s', - desc='Save average waveform into file (bool or filename)') + argstr="--avgwf %s", + desc="Save average waveform into file (bool or filename)", + ) avgwf_file = traits.Either( traits.Bool, File, - argstr='--avgwfvol %s', - desc='Save as binary volume (bool or filename)') + argstr="--avgwfvol %s", + desc="Save as binary volume (bool or filename)", + ) sf_avg_file = traits.Either( - traits.Bool, - File, - argstr='--sfavg %s', - desc='Save mean across space and time') + traits.Bool, File, argstr="--sfavg %s", desc="Save mean across space and time" + ) vox = traits.List( traits.Int, - argstr='--vox %s', - desc='Replace seg with all 0s except at C R S (three int inputs)') - supratent = traits.Bool( - argstr="--supratent", desc="Undocumented input flag") + argstr="--vox %s", + desc="Replace seg with all 0s except at C R S (three int inputs)", + ) + supratent = traits.Bool(argstr="--supratent", desc="Undocumented input flag") subcort_gm = traits.Bool( - argstr="--subcortgray", - desc="Compute volume of subcortical gray matter") + argstr="--subcortgray", desc="Compute volume of subcortical gray matter" + ) total_gray = traits.Bool( - argstr="--totalgray", desc="Compute volume of total gray matter") + argstr="--totalgray", desc="Compute volume of total gray matter" + ) euler = traits.Bool( argstr="--euler", - desc= - "Write out number of defect holes in orig.nofix based on the euler number" + desc="Write out number of defect holes in orig.nofix based on the euler number", ) in_intensity = File( - argstr="--in %s --in-intensity-name %s", - desc="Undocumented input norm.mgz file") + argstr="--in %s --in-intensity-name %s", desc="Undocumented input norm.mgz file" + ) intensity_units = traits.Enum( - 'MR', + "MR", argstr="--in-intensity-units %s", requires=["in_intensity"], - desc="Intensity units") + desc="Intensity units", + ) class SegStatsOutputSpec(TraitedSpec): - summary_file = File( - exists=True, desc='Segmentation summary statistics table') + summary_file = File(exists=True, desc="Segmentation summary statistics table") avgwf_txt_file = File( - desc='Text file with functional statistics averaged over segs') - avgwf_file = File( - desc='Volume with functional statistics averaged over segs') + desc="Text file with functional statistics averaged over segs" + ) + avgwf_file = File(desc="Volume with functional statistics averaged over segs") sf_avg_file = File( - desc='Text file with func statistics averaged over segs and framss') + desc="Text file with func statistics averaged over segs and framss" + ) class SegStats(FSCommand): @@ -923,56 +987,58 @@ class SegStats(FSCommand): """ - _cmd = 'mri_segstats' + _cmd = "mri_segstats" input_spec = SegStatsInputSpec output_spec = SegStatsOutputSpec def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.summary_file): - outputs['summary_file'] = os.path.abspath(self.inputs.summary_file) + outputs["summary_file"] = os.path.abspath(self.inputs.summary_file) else: - outputs['summary_file'] = os.path.join(os.getcwd(), - 'summary.stats') + outputs["summary_file"] = os.path.join(os.getcwd(), "summary.stats") suffices = dict( - avgwf_txt_file='_avgwf.txt', - avgwf_file='_avgwf.nii.gz', - sf_avg_file='sfavg.txt') + avgwf_txt_file="_avgwf.txt", + avgwf_file="_avgwf.nii.gz", + sf_avg_file="sfavg.txt", + ) if isdefined(self.inputs.segmentation_file): _, src = os.path.split(self.inputs.segmentation_file) if isdefined(self.inputs.annot): - src = '_'.join(self.inputs.annot) + src = "_".join(self.inputs.annot) if isdefined(self.inputs.surf_label): - src = '_'.join(self.inputs.surf_label) + src = "_".join(self.inputs.surf_label) for name, suffix in list(suffices.items()): value = getattr(self.inputs, name) if isdefined(value): if isinstance(value, bool): outputs[name] = fname_presuffix( - src, suffix=suffix, newpath=os.getcwd(), use_ext=False) + src, suffix=suffix, newpath=os.getcwd(), use_ext=False + ) else: outputs[name] = os.path.abspath(value) return outputs def _format_arg(self, name, spec, value): - if name in ('summary_file', 'avgwf_txt_file'): + if name in ("summary_file", "avgwf_txt_file"): if not isinstance(value, bool): if not os.path.isabs(value): - value = os.path.join('.', value) - if name in ['avgwf_txt_file', 'avgwf_file', 'sf_avg_file']: + value = os.path.join(".", value) + if name in ["avgwf_txt_file", "avgwf_file", "sf_avg_file"]: if isinstance(value, bool): fname = self._list_outputs()[name] else: fname = value return spec.argstr % fname - elif name == 'in_intensity': - intensity_name = os.path.basename( - self.inputs.in_intensity).replace('.mgz', '') + elif name == "in_intensity": + intensity_name = os.path.basename(self.inputs.in_intensity).replace( + ".mgz", "" + ) return spec.argstr % (value, intensity_name) return super(SegStats, self)._format_arg(name, spec, value) def _gen_filename(self, name): - if name == 'summary_file': + if name == "summary_file": return self._list_outputs()[name] return None @@ -980,40 +1046,40 @@ def _gen_filename(self, name): class SegStatsReconAllInputSpec(SegStatsInputSpec): # recon-all input requirements subject_id = traits.String( - 'subject_id', + "subject_id", usedefault=True, argstr="--subject %s", mandatory=True, - desc="Subject id being processed") + desc="Subject id being processed", + ) # implicit - ribbon = File( - mandatory=True, exists=True, desc="Input file mri/ribbon.mgz") + ribbon = File(mandatory=True, exists=True, desc="Input file mri/ribbon.mgz") presurf_seg = File(exists=True, desc="Input segmentation volume") transform = File(mandatory=True, exists=True, desc="Input transform file") - lh_orig_nofix = File( - mandatory=True, exists=True, desc="Input lh.orig.nofix") - rh_orig_nofix = File( - mandatory=True, exists=True, desc="Input rh.orig.nofix") + lh_orig_nofix = File(mandatory=True, exists=True, desc="Input lh.orig.nofix") + rh_orig_nofix = File(mandatory=True, exists=True, desc="Input rh.orig.nofix") lh_white = File( mandatory=True, exists=True, - desc="Input file must be /surf/lh.white") + desc="Input file must be /surf/lh.white", + ) rh_white = File( mandatory=True, exists=True, - desc="Input file must be /surf/rh.white") + desc="Input file must be /surf/rh.white", + ) lh_pial = File( - mandatory=True, - exists=True, - desc="Input file must be /surf/lh.pial") + mandatory=True, exists=True, desc="Input file must be /surf/lh.pial" + ) rh_pial = File( - mandatory=True, - exists=True, - desc="Input file must be /surf/rh.pial") + mandatory=True, exists=True, desc="Input file must be /surf/rh.pial" + ) aseg = File(exists=True, desc="Mandatory implicit input in 5.3") - copy_inputs = traits.Bool(desc="If running as a node, set this to True " + - "otherwise, this will copy the implicit inputs " - + "to the node directory.") + copy_inputs = traits.Bool( + desc="If running as a node, set this to True " + + "otherwise, this will copy the implicit inputs " + + "to the node directory." + ) class SegStatsReconAll(SegStats): @@ -1053,117 +1119,128 @@ class SegStatsReconAll(SegStats): >>> segstatsreconall.cmdline 'mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --brain-vol-from-seg --surf-ctx-vol --empty --etiv --euler --excl-ctxgmwm --excludeid 0 --subcortgray --subject 10335 --supratent --totalgray --surf-wm-vol --sum ./summary.stats' """ + input_spec = SegStatsReconAllInputSpec output_spec = SegStatsOutputSpec def _format_arg(self, name, spec, value): - if name == 'brainmask_file': + if name == "brainmask_file": return spec.argstr % os.path.basename(value) return super(SegStatsReconAll, self)._format_arg(name, spec, value) def run(self, **inputs): if self.inputs.copy_inputs: self.inputs.subjects_dir = os.getcwd() - if 'subjects_dir' in inputs: - inputs['subjects_dir'] = self.inputs.subjects_dir - copy2subjdir(self, self.inputs.lh_orig_nofix, 'surf', - 'lh.orig.nofix') - copy2subjdir(self, self.inputs.rh_orig_nofix, 'surf', - 'rh.orig.nofix') - copy2subjdir(self, self.inputs.lh_white, 'surf', 'lh.white') - copy2subjdir(self, self.inputs.rh_white, 'surf', 'rh.white') - copy2subjdir(self, self.inputs.lh_pial, 'surf', 'lh.pial') - copy2subjdir(self, self.inputs.rh_pial, 'surf', 'rh.pial') - copy2subjdir(self, self.inputs.ribbon, 'mri', 'ribbon.mgz') - copy2subjdir(self, self.inputs.presurf_seg, 'mri', - 'aseg.presurf.mgz') - copy2subjdir(self, self.inputs.aseg, 'mri', 'aseg.mgz') - copy2subjdir(self, self.inputs.transform, - os.path.join('mri', 'transforms'), 'talairach.xfm') - copy2subjdir(self, self.inputs.in_intensity, 'mri') - copy2subjdir(self, self.inputs.brainmask_file, 'mri') + if "subjects_dir" in inputs: + inputs["subjects_dir"] = self.inputs.subjects_dir + copy2subjdir(self, self.inputs.lh_orig_nofix, "surf", "lh.orig.nofix") + copy2subjdir(self, self.inputs.rh_orig_nofix, "surf", "rh.orig.nofix") + copy2subjdir(self, self.inputs.lh_white, "surf", "lh.white") + copy2subjdir(self, self.inputs.rh_white, "surf", "rh.white") + copy2subjdir(self, self.inputs.lh_pial, "surf", "lh.pial") + copy2subjdir(self, self.inputs.rh_pial, "surf", "rh.pial") + copy2subjdir(self, self.inputs.ribbon, "mri", "ribbon.mgz") + copy2subjdir(self, self.inputs.presurf_seg, "mri", "aseg.presurf.mgz") + copy2subjdir(self, self.inputs.aseg, "mri", "aseg.mgz") + copy2subjdir( + self, + self.inputs.transform, + os.path.join("mri", "transforms"), + "talairach.xfm", + ) + copy2subjdir(self, self.inputs.in_intensity, "mri") + copy2subjdir(self, self.inputs.brainmask_file, "mri") return super(SegStatsReconAll, self).run(**inputs) class Label2VolInputSpec(FSTraitedSpec): label_file = InputMultiPath( File(exists=True), - argstr='--label %s...', - xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), + argstr="--label %s...", + xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), copyfile=False, mandatory=True, - desc='list of label files') + desc="list of label files", + ) annot_file = File( exists=True, - argstr='--annot %s', - xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), - requires=('subject_id', 'hemi'), + argstr="--annot %s", + xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), + requires=("subject_id", "hemi"), mandatory=True, copyfile=False, - desc='surface annotation file') + desc="surface annotation file", + ) seg_file = File( exists=True, - argstr='--seg %s', - xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), + argstr="--seg %s", + xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), mandatory=True, copyfile=False, - desc='segmentation file') + desc="segmentation file", + ) aparc_aseg = traits.Bool( - argstr='--aparc+aseg', - xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), + argstr="--aparc+aseg", + xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), mandatory=True, - desc='use aparc+aseg.mgz in subjectdir as seg') + desc="use aparc+aseg.mgz in subjectdir as seg", + ) template_file = File( - exists=True, - argstr='--temp %s', - mandatory=True, - desc='output template volume') + exists=True, argstr="--temp %s", mandatory=True, desc="output template volume" + ) reg_file = File( exists=True, - argstr='--reg %s', - xor=('reg_file', 'reg_header', 'identity'), - desc='tkregister style matrix VolXYZ = R*LabelXYZ') + argstr="--reg %s", + xor=("reg_file", "reg_header", "identity"), + desc="tkregister style matrix VolXYZ = R*LabelXYZ", + ) reg_header = File( exists=True, - argstr='--regheader %s', - xor=('reg_file', 'reg_header', 'identity'), - desc='label template volume') + argstr="--regheader %s", + xor=("reg_file", "reg_header", "identity"), + desc="label template volume", + ) identity = traits.Bool( - argstr='--identity', - xor=('reg_file', 'reg_header', 'identity'), - desc='set R=I') + argstr="--identity", xor=("reg_file", "reg_header", "identity"), desc="set R=I" + ) invert_mtx = traits.Bool( - argstr='--invertmtx', desc='Invert the registration matrix') + argstr="--invertmtx", desc="Invert the registration matrix" + ) fill_thresh = traits.Range( - 0., 1., argstr='--fillthresh %g', desc='thresh : between 0 and 1') + 0.0, 1.0, argstr="--fillthresh %g", desc="thresh : between 0 and 1" + ) label_voxel_volume = traits.Float( - argstr='--labvoxvol %f', desc='volume of each label point (def 1mm3)') + argstr="--labvoxvol %f", desc="volume of each label point (def 1mm3)" + ) proj = traits.Tuple( - traits.Enum('abs', 'frac'), + traits.Enum("abs", "frac"), traits.Float, traits.Float, traits.Float, - argstr='--proj %s %f %f %f', - requires=('subject_id', 'hemi'), - desc='project along surface normal') - subject_id = traits.Str(argstr='--subject %s', desc='subject id') + argstr="--proj %s %f %f %f", + requires=("subject_id", "hemi"), + desc="project along surface normal", + ) + subject_id = traits.Str(argstr="--subject %s", desc="subject id") hemi = traits.Enum( - 'lh', 'rh', argstr='--hemi %s', desc='hemisphere to use lh or rh') - surface = traits.Str( - argstr='--surf %s', desc='use surface instead of white') - vol_label_file = File(argstr='--o %s', genfile=True, desc='output volume') + "lh", "rh", argstr="--hemi %s", desc="hemisphere to use lh or rh" + ) + surface = traits.Str(argstr="--surf %s", desc="use surface instead of white") + vol_label_file = File(argstr="--o %s", genfile=True, desc="output volume") label_hit_file = File( - argstr='--hits %s', desc='file with each frame is nhits for a label') + argstr="--hits %s", desc="file with each frame is nhits for a label" + ) map_label_stat = File( - argstr='--label-stat %s', - desc='map the label stats field into the vol') + argstr="--label-stat %s", desc="map the label stats field into the vol" + ) native_vox2ras = traits.Bool( - argstr='--native-vox2ras', - desc='use native vox2ras xform instead of tkregister-style') + argstr="--native-vox2ras", + desc="use native vox2ras xform instead of tkregister-style", + ) class Label2VolOutputSpec(TraitedSpec): - vol_label_file = File(exists=True, desc='output volume') + vol_label_file = File(exists=True, desc="output volume") class Label2Vol(FSCommand): @@ -1178,7 +1255,7 @@ class Label2Vol(FSCommand): """ - _cmd = 'mri_label2vol' + _cmd = "mri_label2vol" input_spec = Label2VolInputSpec output_spec = Label2VolOutputSpec @@ -1186,21 +1263,22 @@ def _list_outputs(self): outputs = self.output_spec().get() outfile = self.inputs.vol_label_file if not isdefined(outfile): - for key in ['label_file', 'annot_file', 'seg_file']: + for key in ["label_file", "annot_file", "seg_file"]: if isdefined(getattr(self.inputs, key)): path = getattr(self.inputs, key) if isinstance(path, list): path = path[0] _, src = os.path.split(path) if isdefined(self.inputs.aparc_aseg): - src = 'aparc+aseg.mgz' + src = "aparc+aseg.mgz" outfile = fname_presuffix( - src, suffix='_vol.nii.gz', newpath=os.getcwd(), use_ext=False) - outputs['vol_label_file'] = outfile + src, suffix="_vol.nii.gz", newpath=os.getcwd(), use_ext=False + ) + outputs["vol_label_file"] = outfile return outputs def _gen_filename(self, name): - if name == 'vol_label_file': + if name == "vol_label_file": return self._list_outputs()[name] return None @@ -1208,51 +1286,53 @@ def _gen_filename(self, name): class MS_LDAInputSpec(FSTraitedSpec): lda_labels = traits.List( traits.Int(), - argstr='-lda %s', + argstr="-lda %s", mandatory=True, minlen=2, maxlen=2, - sep=' ', - desc='pair of class labels to optimize') + sep=" ", + desc="pair of class labels to optimize", + ) weight_file = File( - argstr='-weight %s', + argstr="-weight %s", mandatory=True, - desc='filename for the LDA weights (input or output)') + desc="filename for the LDA weights (input or output)", + ) vol_synth_file = File( exists=False, - argstr='-synth %s', + argstr="-synth %s", mandatory=True, - desc=('filename for the synthesized output ' - 'volume')) + desc=("filename for the synthesized output " "volume"), + ) label_file = File( - exists=True, argstr='-label %s', desc='filename of the label volume') + exists=True, argstr="-label %s", desc="filename of the label volume" + ) mask_file = File( - exists=True, - argstr='-mask %s', - desc='filename of the brain mask volume') + exists=True, argstr="-mask %s", desc="filename of the brain mask volume" + ) shift = traits.Int( - argstr='-shift %d', - desc='shift all values equal to the given value to zero') + argstr="-shift %d", desc="shift all values equal to the given value to zero" + ) conform = traits.Bool( - argstr='-conform', - desc=('Conform the input volumes (brain mask ' - 'typically already conformed)')) + argstr="-conform", + desc=("Conform the input volumes (brain mask " "typically already conformed)"), + ) use_weights = traits.Bool( - argstr='-W', - desc=('Use the weights from a previously ' - 'generated weight file')) + argstr="-W", desc=("Use the weights from a previously " "generated weight file") + ) images = InputMultiPath( File(exists=True), - argstr='%s', + argstr="%s", mandatory=True, copyfile=False, - desc='list of input FLASH images', - position=-1) + desc="list of input FLASH images", + position=-1, + ) class MS_LDAOutputSpec(TraitedSpec): - weight_file = File(exists=True, desc='') - vol_synth_file = File(exists=True, desc='') + weight_file = File(exists=True, desc="") + vol_synth_file = File(exists=True, desc="") class MS_LDA(FSCommand): @@ -1273,34 +1353,32 @@ class MS_LDA(FSCommand): 'mri_ms_LDA -conform -label label.mgz -lda 2 3 -shift 1 -W -synth synth_out.mgz -weight weights.txt FLASH1.mgz FLASH2.mgz FLASH3.mgz' """ - _cmd = 'mri_ms_LDA' + _cmd = "mri_ms_LDA" input_spec = MS_LDAInputSpec output_spec = MS_LDAOutputSpec def _list_outputs(self): outputs = self._outputs().get() if isdefined(self.inputs.output_synth): - outputs['vol_synth_file'] = os.path.abspath( - self.inputs.output_synth) + outputs["vol_synth_file"] = os.path.abspath(self.inputs.output_synth) else: - outputs['vol_synth_file'] = os.path.abspath( - self.inputs.vol_synth_file) - if not isdefined( - self.inputs.use_weights) or self.inputs.use_weights is False: - outputs['weight_file'] = os.path.abspath(self.inputs.weight_file) + outputs["vol_synth_file"] = os.path.abspath(self.inputs.vol_synth_file) + if not isdefined(self.inputs.use_weights) or self.inputs.use_weights is False: + outputs["weight_file"] = os.path.abspath(self.inputs.weight_file) return outputs def _verify_weights_file_exists(self): if not os.path.exists(os.path.abspath(self.inputs.weight_file)): raise traits.TraitError( - "MS_LDA: use_weights must accompany an existing weights file") + "MS_LDA: use_weights must accompany an existing weights file" + ) def _format_arg(self, name, spec, value): - if name == 'use_weights': + if name == "use_weights": if self.inputs.use_weights is True: self._verify_weights_file_exists() else: - return '' + return "" # TODO: Fix bug when boolean values are set explicitly to false return super(MS_LDA, self)._format_arg(name, spec, value) @@ -1310,57 +1388,56 @@ def _gen_filename(self, name): class Label2LabelInputSpec(FSTraitedSpec): hemisphere = traits.Enum( - 'lh', - 'rh', - argstr="--hemi %s", - mandatory=True, - desc="Input hemisphere") + "lh", "rh", argstr="--hemi %s", mandatory=True, desc="Input hemisphere" + ) subject_id = traits.String( - 'subject_id', + "subject_id", usedefault=True, argstr="--trgsubject %s", mandatory=True, - desc="Target subject") + desc="Target subject", + ) sphere_reg = File( - mandatory=True, - exists=True, - desc="Implicit input .sphere.reg") - white = File( - mandatory=True, exists=True, desc="Implicit input .white") + mandatory=True, exists=True, desc="Implicit input .sphere.reg" + ) + white = File(mandatory=True, exists=True, desc="Implicit input .white") source_sphere_reg = File( - mandatory=True, - exists=True, - desc="Implicit input .sphere.reg") + mandatory=True, exists=True, desc="Implicit input .sphere.reg" + ) source_white = File( - mandatory=True, exists=True, desc="Implicit input .white") + mandatory=True, exists=True, desc="Implicit input .white" + ) source_label = File( - argstr="--srclabel %s", - mandatory=True, - exists=True, - desc="Source label") + argstr="--srclabel %s", mandatory=True, exists=True, desc="Source label" + ) source_subject = traits.String( - argstr="--srcsubject %s", mandatory=True, desc="Source subject name") + argstr="--srcsubject %s", mandatory=True, desc="Source subject name" + ) # optional out_file = File( argstr="--trglabel %s", - name_source=['source_label'], - name_template='%s_converted', + name_source=["source_label"], + name_template="%s_converted", hash_files=False, keep_extension=True, - desc="Target label") + desc="Target label", + ) registration_method = traits.Enum( - 'surface', - 'volume', + "surface", + "volume", usedefault=True, argstr="--regmethod %s", - desc="Registration method") + desc="Registration method", + ) copy_inputs = traits.Bool( - desc="If running as a node, set this to True." + - "This will copy the input files to the node " + "directory.") + desc="If running as a node, set this to True." + + "This will copy the input files to the node " + + "directory." + ) class Label2LabelOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='Output label') + out_file = File(exists=True, desc="Output label") class Label2Label(FSCommand): @@ -1391,43 +1468,49 @@ class Label2Label(FSCommand): 'mri_label2label --hemi lh --trglabel lh-pial_converted.stl --regmethod surface --srclabel lh-pial.stl --srcsubject fsaverage --trgsubject 10335' """ - _cmd = 'mri_label2label' + _cmd = "mri_label2label" input_spec = Label2LabelInputSpec output_spec = Label2LabelOutputSpec def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = os.path.join(self.inputs.subjects_dir, - self.inputs.subject_id, 'label', - self.inputs.out_file) + outputs["out_file"] = os.path.join( + self.inputs.subjects_dir, + self.inputs.subject_id, + "label", + self.inputs.out_file, + ) return outputs def run(self, **inputs): if self.inputs.copy_inputs: self.inputs.subjects_dir = os.getcwd() - if 'subjects_dir' in inputs: - inputs['subjects_dir'] = self.inputs.subjects_dir + if "subjects_dir" in inputs: + inputs["subjects_dir"] = self.inputs.subjects_dir hemi = self.inputs.hemisphere - copy2subjdir(self, self.inputs.sphere_reg, 'surf', - '{0}.sphere.reg'.format(hemi)) - copy2subjdir(self, self.inputs.white, 'surf', - '{0}.white'.format(hemi)) + copy2subjdir( + self, self.inputs.sphere_reg, "surf", "{0}.sphere.reg".format(hemi) + ) + copy2subjdir(self, self.inputs.white, "surf", "{0}.white".format(hemi)) copy2subjdir( self, self.inputs.source_sphere_reg, - 'surf', - '{0}.sphere.reg'.format(hemi), - subject_id=self.inputs.source_subject) + "surf", + "{0}.sphere.reg".format(hemi), + subject_id=self.inputs.source_subject, + ) copy2subjdir( self, self.inputs.source_white, - 'surf', - '{0}.white'.format(hemi), - subject_id=self.inputs.source_subject) + "surf", + "{0}.white".format(hemi), + subject_id=self.inputs.source_subject, + ) # label dir must exist in order for output file to be written - label_dir = os.path.join(self.inputs.subjects_dir, - self.inputs.subject_id, 'label') + label_dir = os.path.join( + self.inputs.subjects_dir, self.inputs.subject_id, "label" + ) if not os.path.isdir(label_dir): os.makedirs(label_dir) @@ -1437,42 +1520,41 @@ def run(self, **inputs): class Label2AnnotInputSpec(FSTraitedSpec): # required hemisphere = traits.Enum( - 'lh', - 'rh', - argstr="--hemi %s", - mandatory=True, - desc="Input hemisphere") + "lh", "rh", argstr="--hemi %s", mandatory=True, desc="Input hemisphere" + ) subject_id = traits.String( - 'subject_id', + "subject_id", usedefault=True, argstr="--s %s", mandatory=True, - desc="Subject name/ID") + desc="Subject name/ID", + ) in_labels = traits.List( - argstr="--l %s...", mandatory=True, desc="List of input label files") + argstr="--l %s...", mandatory=True, desc="List of input label files" + ) out_annot = traits.String( - argstr="--a %s", - mandatory=True, - desc="Name of the annotation to create") + argstr="--a %s", mandatory=True, desc="Name of the annotation to create" + ) orig = File(exists=True, mandatory=True, desc="implicit {hemisphere}.orig") # optional keep_max = traits.Bool( - argstr="--maxstatwinner", desc="Keep label with highest 'stat' value") + argstr="--maxstatwinner", desc="Keep label with highest 'stat' value" + ) verbose_off = traits.Bool( - argstr="--noverbose", - desc="Turn off overlap and stat override messages") + argstr="--noverbose", desc="Turn off overlap and stat override messages" + ) color_table = File( argstr="--ctab %s", exists=True, - desc= - "File that defines the structure names, their indices, and their color" + desc="File that defines the structure names, their indices, and their color", ) copy_inputs = traits.Bool( - desc="copy implicit inputs and create a temp subjects_dir") + desc="copy implicit inputs and create a temp subjects_dir" + ) class Label2AnnotOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='Output annotation file') + out_file = File(exists=True, desc="Output annotation file") class Label2Annot(FSCommand): @@ -1492,23 +1574,25 @@ class Label2Annot(FSCommand): 'mris_label2annot --hemi lh --l lh.aparc.label --a test --s 10335' """ - _cmd = 'mris_label2annot' + _cmd = "mris_label2annot" input_spec = Label2AnnotInputSpec output_spec = Label2AnnotOutputSpec def run(self, **inputs): if self.inputs.copy_inputs: self.inputs.subjects_dir = os.getcwd() - if 'subjects_dir' in inputs: - inputs['subjects_dir'] = self.inputs.subjects_dir + if "subjects_dir" in inputs: + inputs["subjects_dir"] = self.inputs.subjects_dir copy2subjdir( self, self.inputs.orig, - folder='surf', - basename='{0}.orig'.format(self.inputs.hemisphere)) + folder="surf", + basename="{0}.orig".format(self.inputs.hemisphere), + ) # label dir must exist in order for output file to be written - label_dir = os.path.join(self.inputs.subjects_dir, - self.inputs.subject_id, 'label') + label_dir = os.path.join( + self.inputs.subjects_dir, self.inputs.subject_id, "label" + ) if not os.path.isdir(label_dir): os.makedirs(label_dir) return super(Label2Annot, self).run(**inputs) @@ -1516,39 +1600,27 @@ def run(self, **inputs): def _list_outputs(self): outputs = self._outputs().get() outputs["out_file"] = os.path.join( - str(self.inputs.subjects_dir), str(self.inputs.subject_id), - 'label', - str(self.inputs.hemisphere) + '.' + str(self.inputs.out_annot) + - '.annot') + str(self.inputs.subjects_dir), + str(self.inputs.subject_id), + "label", + str(self.inputs.hemisphere) + "." + str(self.inputs.out_annot) + ".annot", + ) return outputs class SphericalAverageInputSpec(FSTraitedSpec): out_file = File( - argstr="%s", - genfile=True, - exists=False, - position=-1, - desc="Output filename") + argstr="%s", genfile=True, exists=False, position=-1, desc="Output filename" + ) in_average = Directory( - argstr="%s", - exists=True, - genfile=True, - position=-2, - desc="Average subject") + argstr="%s", exists=True, genfile=True, position=-2, desc="Average subject" + ) in_surf = File( - argstr="%s", - mandatory=True, - exists=True, - position=-3, - desc="Input surface file") + argstr="%s", mandatory=True, exists=True, position=-3, desc="Input surface file" + ) hemisphere = traits.Enum( - 'lh', - 'rh', - argstr="%s", - mandatory=True, - position=-4, - desc="Input hemisphere") + "lh", "rh", argstr="%s", mandatory=True, position=-4, desc="Input hemisphere" + ) fname = traits.String( argstr="%s", mandatory=True, @@ -1558,28 +1630,28 @@ class SphericalAverageInputSpec(FSTraitedSpec): filename, set fname to 'rh.entorhinal' and which to 'label'. The program will then search for '{in_average}/label/rh.entorhinal.label' - """) + """, + ) which = traits.Enum( - 'coords', - 'label', - 'vals', - 'curv', - 'area', + "coords", + "label", + "vals", + "curv", + "area", argstr="%s", mandatory=True, position=-6, - desc="No documentation") - subject_id = traits.String( - argstr="-o %s", mandatory=True, desc="Output subject id") + desc="No documentation", + ) + subject_id = traits.String(argstr="-o %s", mandatory=True, desc="Output subject id") # optional erode = traits.Int(argstr="-erode %d", desc="Undocumented") - in_orig = File( - argstr="-orig %s", exists=True, desc="Original surface filename") + in_orig = File(argstr="-orig %s", exists=True, desc="Original surface filename") threshold = traits.Float(argstr="-t %.1f", desc="Undocumented") class SphericalAverageOutputSpec(TraitedSpec): - out_file = File(exists=False, desc='Output label') + out_file = File(exists=False, desc="Output label") class SphericalAverage(FSCommand): @@ -1603,26 +1675,26 @@ class SphericalAverage(FSCommand): 'mris_spherical_average -erode 2 -o 10335 -t 5.0 label lh.entorhinal lh pial . test.out' """ - _cmd = 'mris_spherical_average' + _cmd = "mris_spherical_average" input_spec = SphericalAverageInputSpec output_spec = SphericalAverageOutputSpec def _format_arg(self, name, spec, value): - if name == 'in_orig' or name == 'in_surf': + if name == "in_orig" or name == "in_surf": surf = os.path.basename(value) - for item in ['lh.', 'rh.']: - surf = surf.replace(item, '') + for item in ["lh.", "rh."]: + surf = surf.replace(item, "") return spec.argstr % surf return super(SphericalAverage, self)._format_arg(name, spec, value) def _gen_filename(self, name): - if name == 'in_average': - avg_subject = str(self.inputs.hemisphere) + '.EC_average' + if name == "in_average": + avg_subject = str(self.inputs.hemisphere) + ".EC_average" avg_directory = os.path.join(self.inputs.subjects_dir, avg_subject) if not os.path.isdir(avg_directory): - fs_home = os.path.abspath(os.environ.get('FREESURFER_HOME')) + fs_home = os.path.abspath(os.environ.get("FREESURFER_HOME")) return avg_subject - elif name == 'out_file': + elif name == "out_file": return self._list_outputs()[name] else: return None @@ -1630,15 +1702,15 @@ def _gen_filename(self, name): def _list_outputs(self): outputs = self._outputs().get() if isdefined(self.inputs.out_file): - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) else: - out_dir = os.path.join(self.inputs.subjects_dir, - self.inputs.subject_id, 'label') + out_dir = os.path.join( + self.inputs.subjects_dir, self.inputs.subject_id, "label" + ) if isdefined(self.inputs.in_average): basename = os.path.basename(self.inputs.in_average) - basename = basename.replace('_', '_exvivo_') + '.label' + basename = basename.replace("_", "_exvivo_") + ".label" else: - basename = str( - self.inputs.hemisphere) + '.EC_exvivo_average.label' - outputs['out_file'] = os.path.join(out_dir, basename) + basename = str(self.inputs.hemisphere) + ".EC_exvivo_average.label" + outputs["out_file"] = os.path.join(out_dir, basename) return outputs diff --git a/nipype/interfaces/freesurfer/preprocess.py b/nipype/interfaces/freesurfer/preprocess.py index 020d1b7899..f7e09f7629 100644 --- a/nipype/interfaces/freesurfer/preprocess.py +++ b/nipype/interfaces/freesurfer/preprocess.py @@ -14,15 +14,22 @@ from ... import logging, LooseVersion from ...utils.filemanip import fname_presuffix, check_depends from ..io import FreeSurferSource -from ..base import (TraitedSpec, File, traits, Directory, InputMultiPath, - OutputMultiPath, CommandLine, CommandLineInputSpec, - isdefined) -from .base import (FSCommand, FSTraitedSpec, FSTraitedSpecOpenMP, - FSCommandOpenMP, Info) +from ..base import ( + TraitedSpec, + File, + traits, + Directory, + InputMultiPath, + OutputMultiPath, + CommandLine, + CommandLineInputSpec, + isdefined, +) +from .base import FSCommand, FSTraitedSpec, FSTraitedSpecOpenMP, FSCommandOpenMP, Info from .utils import copy2subjdir -__docformat__ = 'restructuredtext' -iflogger = logging.getLogger('nipype.interface') +__docformat__ = "restructuredtext" +iflogger = logging.getLogger("nipype.interface") # Keeping this to avoid breaking external programs that depend on it, but # this should not be used internally @@ -32,22 +39,24 @@ class ParseDICOMDirInputSpec(FSTraitedSpec): dicom_dir = Directory( exists=True, - argstr='--d %s', + argstr="--d %s", mandatory=True, - desc='path to siemens dicom directory') + desc="path to siemens dicom directory", + ) dicom_info_file = File( - 'dicominfo.txt', - argstr='--o %s', + "dicominfo.txt", + argstr="--o %s", usedefault=True, - desc='file to which results are written') - sortbyrun = traits.Bool(argstr='--sortbyrun', desc='assign run numbers') + desc="file to which results are written", + ) + sortbyrun = traits.Bool(argstr="--sortbyrun", desc="assign run numbers") summarize = traits.Bool( - argstr='--summarize', desc='only print out info for run leaders') + argstr="--summarize", desc="only print out info for run leaders" + ) class ParseDICOMDirOutputSpec(TraitedSpec): - dicom_info_file = File( - exists=True, desc='text file containing dicom information') + dicom_info_file = File(exists=True, desc="text file containing dicom information") class ParseDICOMDir(FSCommand): @@ -66,66 +75,72 @@ class ParseDICOMDir(FSCommand): """ - _cmd = 'mri_parse_sdcmdir' + _cmd = "mri_parse_sdcmdir" input_spec = ParseDICOMDirInputSpec output_spec = ParseDICOMDirOutputSpec def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.dicom_info_file): - outputs['dicom_info_file'] = os.path.join( - os.getcwd(), self.inputs.dicom_info_file) + outputs["dicom_info_file"] = os.path.join( + os.getcwd(), self.inputs.dicom_info_file + ) return outputs class UnpackSDICOMDirInputSpec(FSTraitedSpec): source_dir = Directory( exists=True, - argstr='-src %s', + argstr="-src %s", mandatory=True, - desc='directory with the DICOM files') + desc="directory with the DICOM files", + ) output_dir = Directory( - argstr='-targ %s', - desc='top directory into which the files will be unpacked') + argstr="-targ %s", desc="top directory into which the files will be unpacked" + ) run_info = traits.Tuple( traits.Int, traits.Str, traits.Str, traits.Str, mandatory=True, - argstr='-run %d %s %s %s', - xor=('run_info', 'config', 'seq_config'), - desc='runno subdir format name : spec unpacking rules on cmdline') + argstr="-run %d %s %s %s", + xor=("run_info", "config", "seq_config"), + desc="runno subdir format name : spec unpacking rules on cmdline", + ) config = File( exists=True, - argstr='-cfg %s', + argstr="-cfg %s", mandatory=True, - xor=('run_info', 'config', 'seq_config'), - desc='specify unpacking rules in file') + xor=("run_info", "config", "seq_config"), + desc="specify unpacking rules in file", + ) seq_config = File( exists=True, - argstr='-seqcfg %s', + argstr="-seqcfg %s", mandatory=True, - xor=('run_info', 'config', 'seq_config'), - desc='specify unpacking rules based on sequence') + xor=("run_info", "config", "seq_config"), + desc="specify unpacking rules based on sequence", + ) dir_structure = traits.Enum( - 'fsfast', - 'generic', - argstr='-%s', - desc='unpack to specified directory structures') - no_info_dump = traits.Bool( - argstr='-noinfodump', desc='do not create infodump file') + "fsfast", + "generic", + argstr="-%s", + desc="unpack to specified directory structures", + ) + no_info_dump = traits.Bool(argstr="-noinfodump", desc="do not create infodump file") scan_only = File( exists=True, - argstr='-scanonly %s', - desc='only scan the directory and put result in file') - log_file = File( - exists=True, argstr='-log %s', desc='explicilty set log file') + argstr="-scanonly %s", + desc="only scan the directory and put result in file", + ) + log_file = File(exists=True, argstr="-log %s", desc="explicilty set log file") spm_zeropad = traits.Int( - argstr='-nspmzeropad %d', - desc='set frame number zero padding width for SPM') + argstr="-nspmzeropad %d", desc="set frame number zero padding width for SPM" + ) no_unpack_err = traits.Bool( - argstr='-no-unpackerr', desc='do not try to unpack runs with errors') + argstr="-no-unpackerr", desc="do not try to unpack runs with errors" + ) class UnpackSDICOMDir(FSCommand): @@ -146,272 +161,344 @@ class UnpackSDICOMDir(FSCommand): >>> unpack.cmdline 'unpacksdcmdir -generic -targ . -run 5 mprage nii struct -src .' """ - _cmd = 'unpacksdcmdir' + + _cmd = "unpacksdcmdir" input_spec = UnpackSDICOMDirInputSpec class MRIConvertInputSpec(FSTraitedSpec): - read_only = traits.Bool(argstr='--read_only', desc='read the input volume') - no_write = traits.Bool(argstr='--no_write', desc='do not write output') - in_info = traits.Bool(argstr='--in_info', desc='display input info') - out_info = traits.Bool(argstr='--out_info', desc='display output info') - in_stats = traits.Bool(argstr='--in_stats', desc='display input stats') - out_stats = traits.Bool(argstr='--out_stats', desc='display output stats') - in_matrix = traits.Bool(argstr='--in_matrix', desc='display input matrix') - out_matrix = traits.Bool( - argstr='--out_matrix', desc='display output matrix') - in_i_size = traits.Int(argstr='--in_i_size %d', desc='input i size') - in_j_size = traits.Int(argstr='--in_j_size %d', desc='input j size') - in_k_size = traits.Int(argstr='--in_k_size %d', desc='input k size') + read_only = traits.Bool(argstr="--read_only", desc="read the input volume") + no_write = traits.Bool(argstr="--no_write", desc="do not write output") + in_info = traits.Bool(argstr="--in_info", desc="display input info") + out_info = traits.Bool(argstr="--out_info", desc="display output info") + in_stats = traits.Bool(argstr="--in_stats", desc="display input stats") + out_stats = traits.Bool(argstr="--out_stats", desc="display output stats") + in_matrix = traits.Bool(argstr="--in_matrix", desc="display input matrix") + out_matrix = traits.Bool(argstr="--out_matrix", desc="display output matrix") + in_i_size = traits.Int(argstr="--in_i_size %d", desc="input i size") + in_j_size = traits.Int(argstr="--in_j_size %d", desc="input j size") + in_k_size = traits.Int(argstr="--in_k_size %d", desc="input k size") force_ras = traits.Bool( - argstr='--force_ras_good', - desc='use default when orientation info absent') + argstr="--force_ras_good", desc="use default when orientation info absent" + ) in_i_dir = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='--in_i_direction %f %f %f', - desc=' ') + argstr="--in_i_direction %f %f %f", + desc=" ", + ) in_j_dir = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='--in_j_direction %f %f %f', - desc=' ') + argstr="--in_j_direction %f %f %f", + desc=" ", + ) in_k_dir = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='--in_k_direction %f %f %f', - desc=' ') + argstr="--in_k_direction %f %f %f", + desc=" ", + ) _orientations = [ - 'LAI', 'LIA', 'ALI', 'AIL', 'ILA', 'IAL', 'LAS', 'LSA', 'ALS', 'ASL', - 'SLA', 'SAL', 'LPI', 'LIP', 'PLI', 'PIL', 'ILP', 'IPL', 'LPS', 'LSP', - 'PLS', 'PSL', 'SLP', 'SPL', 'RAI', 'RIA', 'ARI', 'AIR', 'IRA', 'IAR', - 'RAS', 'RSA', 'ARS', 'ASR', 'SRA', 'SAR', 'RPI', 'RIP', 'PRI', 'PIR', - 'IRP', 'IPR', 'RPS', 'RSP', 'PRS', 'PSR', 'SRP', 'SPR' + "LAI", + "LIA", + "ALI", + "AIL", + "ILA", + "IAL", + "LAS", + "LSA", + "ALS", + "ASL", + "SLA", + "SAL", + "LPI", + "LIP", + "PLI", + "PIL", + "ILP", + "IPL", + "LPS", + "LSP", + "PLS", + "PSL", + "SLP", + "SPL", + "RAI", + "RIA", + "ARI", + "AIR", + "IRA", + "IAR", + "RAS", + "RSA", + "ARS", + "ASR", + "SRA", + "SAR", + "RPI", + "RIP", + "PRI", + "PIR", + "IRP", + "IPR", + "RPS", + "RSP", + "PRS", + "PSR", + "SRP", + "SPR", ] # _orientations = [comb for comb in itertools.chain(*[[''.join(c) for c in itertools.permutations(s)] for s in [a+b+c for a in 'LR' for b in 'AP' for c in 'IS']])] in_orientation = traits.Enum( _orientations, - argstr='--in_orientation %s', - desc='specify the input orientation') + argstr="--in_orientation %s", + desc="specify the input orientation", + ) in_center = traits.List( traits.Float, maxlen=3, - argstr='--in_center %s', - desc=' ') - sphinx = traits.Bool( - argstr='--sphinx', desc='change orientation info to sphinx') + argstr="--in_center %s", + desc=" ", + ) + sphinx = traits.Bool(argstr="--sphinx", desc="change orientation info to sphinx") out_i_count = traits.Int( - argstr='--out_i_count %d', desc='some count ?? in i direction') + argstr="--out_i_count %d", desc="some count ?? in i direction" + ) out_j_count = traits.Int( - argstr='--out_j_count %d', desc='some count ?? in j direction') + argstr="--out_j_count %d", desc="some count ?? in j direction" + ) out_k_count = traits.Int( - argstr='--out_k_count %d', desc='some count ?? in k direction') + argstr="--out_k_count %d", desc="some count ?? in k direction" + ) vox_size = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='-voxsize %f %f %f', - desc= - ' specify the size (mm) - useful for upsampling or downsampling' + argstr="-voxsize %f %f %f", + desc=" specify the size (mm) - useful for upsampling or downsampling", ) - out_i_size = traits.Int(argstr='--out_i_size %d', desc='output i size') - out_j_size = traits.Int(argstr='--out_j_size %d', desc='output j size') - out_k_size = traits.Int(argstr='--out_k_size %d', desc='output k size') + out_i_size = traits.Int(argstr="--out_i_size %d", desc="output i size") + out_j_size = traits.Int(argstr="--out_j_size %d", desc="output j size") + out_k_size = traits.Int(argstr="--out_k_size %d", desc="output k size") out_i_dir = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='--out_i_direction %f %f %f', - desc=' ') + argstr="--out_i_direction %f %f %f", + desc=" ", + ) out_j_dir = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='--out_j_direction %f %f %f', - desc=' ') + argstr="--out_j_direction %f %f %f", + desc=" ", + ) out_k_dir = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='--out_k_direction %f %f %f', - desc=' ') + argstr="--out_k_direction %f %f %f", + desc=" ", + ) out_orientation = traits.Enum( _orientations, - argstr='--out_orientation %s', - desc='specify the output orientation') + argstr="--out_orientation %s", + desc="specify the output orientation", + ) out_center = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='--out_center %f %f %f', - desc=' ') + argstr="--out_center %f %f %f", + desc=" ", + ) out_datatype = traits.Enum( - 'uchar', - 'short', - 'int', - 'float', - argstr='--out_data_type %s', - desc='output data type ') + "uchar", + "short", + "int", + "float", + argstr="--out_data_type %s", + desc="output data type ", + ) resample_type = traits.Enum( - 'interpolate', - 'weighted', - 'nearest', - 'sinc', - 'cubic', - argstr='--resample_type %s', - desc= - ' (default is interpolate)') - no_scale = traits.Bool( - argstr='--no_scale 1', desc='dont rescale values for COR') + "interpolate", + "weighted", + "nearest", + "sinc", + "cubic", + argstr="--resample_type %s", + desc=" (default is interpolate)", + ) + no_scale = traits.Bool(argstr="--no_scale 1", desc="dont rescale values for COR") no_change = traits.Bool( - argstr='--nochange', - desc="don't change type of input to that of template") - tr = traits.Int(argstr='-tr %d', desc='TR in msec') - te = traits.Int(argstr='-te %d', desc='TE in msec') - ti = traits.Int(argstr='-ti %d', desc='TI in msec (note upper case flag)') + argstr="--nochange", desc="don't change type of input to that of template" + ) + tr = traits.Int(argstr="-tr %d", desc="TR in msec") + te = traits.Int(argstr="-te %d", desc="TE in msec") + ti = traits.Int(argstr="-ti %d", desc="TI in msec (note upper case flag)") autoalign_matrix = File( - exists=True, - argstr='--autoalign %s', - desc='text file with autoalign matrix') + exists=True, argstr="--autoalign %s", desc="text file with autoalign matrix" + ) unwarp_gradient = traits.Bool( - argstr='--unwarp_gradient_nonlinearity', - desc='unwarp gradient nonlinearity') + argstr="--unwarp_gradient_nonlinearity", desc="unwarp gradient nonlinearity" + ) apply_transform = File( - exists=True, argstr='--apply_transform %s', desc='apply xfm file') + exists=True, argstr="--apply_transform %s", desc="apply xfm file" + ) apply_inv_transform = File( exists=True, - argstr='--apply_inverse_transform %s', - desc='apply inverse transformation xfm file') - devolve_transform = traits.Str(argstr='--devolvexfm %s', desc='subject id') + argstr="--apply_inverse_transform %s", + desc="apply inverse transformation xfm file", + ) + devolve_transform = traits.Str(argstr="--devolvexfm %s", desc="subject id") crop_center = traits.Tuple( traits.Int, traits.Int, traits.Int, - argstr='--crop %d %d %d', - desc=' crop to 256 around center (x, y, z)') + argstr="--crop %d %d %d", + desc=" crop to 256 around center (x, y, z)", + ) crop_size = traits.Tuple( traits.Int, traits.Int, traits.Int, - argstr='--cropsize %d %d %d', - desc=' crop to size ') + argstr="--cropsize %d %d %d", + desc=" crop to size ", + ) cut_ends = traits.Int( - argstr='--cutends %d', desc='remove ncut slices from the ends') + argstr="--cutends %d", desc="remove ncut slices from the ends" + ) slice_crop = traits.Tuple( traits.Int, traits.Int, - argstr='--slice-crop %d %d', - desc='s_start s_end : keep slices s_start to s_end') + argstr="--slice-crop %d %d", + desc="s_start s_end : keep slices s_start to s_end", + ) slice_reverse = traits.Bool( - argstr='--slice-reverse', - desc='reverse order of slices, update vox2ras') + argstr="--slice-reverse", desc="reverse order of slices, update vox2ras" + ) slice_bias = traits.Float( - argstr='--slice-bias %f', desc='apply half-cosine bias field') - fwhm = traits.Float( - argstr='--fwhm %f', desc='smooth input volume by fwhm mm') + argstr="--slice-bias %f", desc="apply half-cosine bias field" + ) + fwhm = traits.Float(argstr="--fwhm %f", desc="smooth input volume by fwhm mm") _filetypes = [ - 'cor', 'mgh', 'mgz', 'minc', 'analyze', 'analyze4d', 'spm', 'afni', - 'brik', 'bshort', 'bfloat', 'sdt', 'outline', 'otl', 'gdf', 'nifti1', - 'nii', 'niigz' - ] - _infiletypes = [ - 'ge', 'gelx', 'lx', 'ximg', 'siemens', 'dicom', 'siemens_dicom' + "cor", + "mgh", + "mgz", + "minc", + "analyze", + "analyze4d", + "spm", + "afni", + "brik", + "bshort", + "bfloat", + "sdt", + "outline", + "otl", + "gdf", + "nifti1", + "nii", + "niigz", ] + _infiletypes = ["ge", "gelx", "lx", "ximg", "siemens", "dicom", "siemens_dicom"] in_type = traits.Enum( - _filetypes + _infiletypes, - argstr='--in_type %s', - desc='input file type') - out_type = traits.Enum( - _filetypes, argstr='--out_type %s', desc='output file type') + _filetypes + _infiletypes, argstr="--in_type %s", desc="input file type" + ) + out_type = traits.Enum(_filetypes, argstr="--out_type %s", desc="output file type") ascii = traits.Bool( - argstr='--ascii', desc='save output as ascii col>row>slice>frame') + argstr="--ascii", desc="save output as ascii col>row>slice>frame" + ) reorder = traits.Tuple( traits.Int, traits.Int, traits.Int, - argstr='--reorder %d %d %d', - desc='olddim1 olddim2 olddim3') + argstr="--reorder %d %d %d", + desc="olddim1 olddim2 olddim3", + ) invert_contrast = traits.Float( - argstr='--invert_contrast %f', - desc='threshold for inversting contrast') + argstr="--invert_contrast %f", desc="threshold for inversting contrast" + ) in_file = File( exists=True, mandatory=True, position=-2, - argstr='--input_volume %s', - desc='File to read/convert') + argstr="--input_volume %s", + desc="File to read/convert", + ) out_file = File( - argstr='--output_volume %s', + argstr="--output_volume %s", position=-1, genfile=True, - desc='output filename or True to generate one') + desc="output filename or True to generate one", + ) conform = traits.Bool( - argstr='--conform', - desc= - 'conform to 1mm voxel size in coronal slice direction with 256^3 or more' + argstr="--conform", + desc="conform to 1mm voxel size in coronal slice direction with 256^3 or more", ) - conform_min = traits.Bool( - argstr='--conform_min', desc='conform to smallest size') + conform_min = traits.Bool(argstr="--conform_min", desc="conform to smallest size") conform_size = traits.Float( - argstr='--conform_size %s', desc='conform to size_in_mm') - cw256 = traits.Bool( - argstr='--cw256', desc='confrom to dimensions of 256^3') - parse_only = traits.Bool(argstr='--parse_only', desc='parse input only') - subject_name = traits.Str( - argstr='--subject_name %s', desc='subject name ???') + argstr="--conform_size %s", desc="conform to size_in_mm" + ) + cw256 = traits.Bool(argstr="--cw256", desc="confrom to dimensions of 256^3") + parse_only = traits.Bool(argstr="--parse_only", desc="parse input only") + subject_name = traits.Str(argstr="--subject_name %s", desc="subject name ???") reslice_like = File( - exists=True, - argstr='--reslice_like %s', - desc='reslice output to match file') + exists=True, argstr="--reslice_like %s", desc="reslice output to match file" + ) template_type = traits.Enum( _filetypes + _infiletypes, - argstr='--template_type %s', - desc='template file type') + argstr="--template_type %s", + desc="template file type", + ) split = traits.Bool( - argstr='--split', - desc='split output frames into separate output files.') - frame = traits.Int( - argstr='--frame %d', desc='keep only 0-based frame number') - midframe = traits.Bool( - argstr='--mid-frame', desc='keep only the middle frame') - skip_n = traits.Int(argstr='--nskip %d', desc='skip the first n frames') - drop_n = traits.Int(argstr='--ndrop %d', desc='drop the last n frames') + argstr="--split", desc="split output frames into separate output files." + ) + frame = traits.Int(argstr="--frame %d", desc="keep only 0-based frame number") + midframe = traits.Bool(argstr="--mid-frame", desc="keep only the middle frame") + skip_n = traits.Int(argstr="--nskip %d", desc="skip the first n frames") + drop_n = traits.Int(argstr="--ndrop %d", desc="drop the last n frames") frame_subsample = traits.Tuple( traits.Int, traits.Int, traits.Int, - argstr='--fsubsample %d %d %d', - desc='start delta end : frame subsampling (end = -1 for end)') - in_scale = traits.Float( - argstr='--scale %f', desc='input intensity scale factor') + argstr="--fsubsample %d %d %d", + desc="start delta end : frame subsampling (end = -1 for end)", + ) + in_scale = traits.Float(argstr="--scale %f", desc="input intensity scale factor") out_scale = traits.Float( - argstr='--out-scale %d', desc='output intensity scale factor') - in_like = File(exists=True, argstr='--in_like %s', desc='input looks like') + argstr="--out-scale %d", desc="output intensity scale factor" + ) + in_like = File(exists=True, argstr="--in_like %s", desc="input looks like") fill_parcellation = traits.Bool( - argstr='--fill_parcellation', desc='fill parcellation') + argstr="--fill_parcellation", desc="fill parcellation" + ) smooth_parcellation = traits.Bool( - argstr='--smooth_parcellation', desc='smooth parcellation') - zero_outlines = traits.Bool(argstr='--zero_outlines', desc='zero outlines') - color_file = File(exists=True, argstr='--color_file %s', desc='color file') - no_translate = traits.Bool(argstr='--no_translate', desc='???') - status_file = File( - argstr='--status %s', desc='status file for DICOM conversion') + argstr="--smooth_parcellation", desc="smooth parcellation" + ) + zero_outlines = traits.Bool(argstr="--zero_outlines", desc="zero outlines") + color_file = File(exists=True, argstr="--color_file %s", desc="color file") + no_translate = traits.Bool(argstr="--no_translate", desc="???") + status_file = File(argstr="--status %s", desc="status file for DICOM conversion") sdcm_list = File( - exists=True, - argstr='--sdcmlist %s', - desc='list of DICOM files for conversion') + exists=True, argstr="--sdcmlist %s", desc="list of DICOM files for conversion" + ) template_info = traits.Bool( - argstr='--template_info', desc='dump info about template') - crop_gdf = traits.Bool(argstr='--crop_gdf', desc='apply GDF cropping') + argstr="--template_info", desc="dump info about template" + ) + crop_gdf = traits.Bool(argstr="--crop_gdf", desc="apply GDF cropping") zero_ge_z_offset = traits.Bool( - argstr='--zero_ge_z_offset', desc='zero ge z offset ???') + argstr="--zero_ge_z_offset", desc="zero ge z offset ???" + ) class MRIConvertOutputSpec(TraitedSpec): - out_file = OutputMultiPath(File(exists=True), desc='converted output file') + out_file = OutputMultiPath(File(exists=True), desc="converted output file") class MRIConvert(FSCommand): @@ -431,44 +518,44 @@ class MRIConvert(FSCommand): 'mri_convert --out_type mgz --input_volume structural.nii --output_volume outfile.mgz' """ - _cmd = 'mri_convert' + + _cmd = "mri_convert" input_spec = MRIConvertInputSpec output_spec = MRIConvertOutputSpec filemap = dict( - cor='cor', - mgh='mgh', - mgz='mgz', - minc='mnc', - afni='brik', - brik='brik', - bshort='bshort', - spm='img', - analyze='img', - analyze4d='img', - bfloat='bfloat', - nifti1='img', - nii='nii', - niigz='nii.gz') + cor="cor", + mgh="mgh", + mgz="mgz", + minc="mnc", + afni="brik", + brik="brik", + bshort="bshort", + spm="img", + analyze="img", + analyze4d="img", + bfloat="bfloat", + nifti1="img", + nii="nii", + niigz="nii.gz", + ) def _format_arg(self, name, spec, value): - if name in ['in_type', 'out_type', 'template_type']: - if value == 'niigz': - return spec.argstr % 'nii' + if name in ["in_type", "out_type", "template_type"]: + if value == "niigz": + return spec.argstr % "nii" return super(MRIConvert, self)._format_arg(name, spec, value) def _get_outfilename(self): outfile = self.inputs.out_file if not isdefined(outfile): if isdefined(self.inputs.out_type): - suffix = '_out.' + self.filemap[self.inputs.out_type] + suffix = "_out." + self.filemap[self.inputs.out_type] else: - suffix = '_out.nii.gz' + suffix = "_out.nii.gz" outfile = fname_presuffix( - self.inputs.in_file, - newpath=os.getcwd(), - suffix=suffix, - use_ext=False) + self.inputs.in_file, newpath=os.getcwd(), suffix=suffix, use_ext=False + ) return os.path.abspath(outfile) def _list_outputs(self): @@ -480,20 +567,20 @@ def _list_outputs(self): tp = 1 else: tp = size[-1] - if outfile.endswith('.mgz'): - stem = outfile.split('.mgz')[0] - ext = '.mgz' - elif outfile.endswith('.nii.gz'): - stem = outfile.split('.nii.gz')[0] - ext = '.nii.gz' + if outfile.endswith(".mgz"): + stem = outfile.split(".mgz")[0] + ext = ".mgz" + elif outfile.endswith(".nii.gz"): + stem = outfile.split(".nii.gz")[0] + ext = ".nii.gz" else: - stem = '.'.join(outfile.split('.')[:-1]) - ext = '.' + outfile.split('.')[-1] + stem = ".".join(outfile.split(".")[:-1]) + ext = "." + outfile.split(".")[-1] outfile = [] for idx in range(0, tp): - outfile.append(stem + '%04d' % idx + ext) + outfile.append(stem + "%04d" % idx + ext) if isdefined(self.inputs.out_type): - if self.inputs.out_type in ['spm', 'analyze']: + if self.inputs.out_type in ["spm", "analyze"]: # generate all outputs size = load(self.inputs.in_file).shape if len(size) == 3: @@ -502,19 +589,18 @@ def _list_outputs(self): tp = size[-1] # have to take care of all the frame manipulations raise Exception( - 'Not taking frame manipulations into account- please warn the developers' + "Not taking frame manipulations into account- please warn the developers" ) outfiles = [] outfile = self._get_outfilename() for i in range(tp): - outfiles.append( - fname_presuffix(outfile, suffix='%03d' % (i + 1))) + outfiles.append(fname_presuffix(outfile, suffix="%03d" % (i + 1))) outfile = outfiles - outputs['out_file'] = outfile + outputs["out_file"] = outfile return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._get_outfilename() return None @@ -523,31 +609,36 @@ class DICOMConvertInputSpec(FSTraitedSpec): dicom_dir = Directory( exists=True, mandatory=True, - desc='dicom directory from which to convert dicom files') + desc="dicom directory from which to convert dicom files", + ) base_output_dir = Directory( - mandatory=True, - desc='directory in which subject directories are created') + mandatory=True, desc="directory in which subject directories are created" + ) subject_dir_template = traits.Str( - 'S.%04d', usedefault=True, desc='template for subject directory name') - subject_id = traits.Any(desc='subject identifier to insert into template') + "S.%04d", usedefault=True, desc="template for subject directory name" + ) + subject_id = traits.Any(desc="subject identifier to insert into template") file_mapping = traits.List( traits.Tuple(traits.Str, traits.Str), - desc='defines the output fields of interface') + desc="defines the output fields of interface", + ) out_type = traits.Enum( - 'niigz', + "niigz", MRIConvertInputSpec._filetypes, usedefault=True, - desc='defines the type of output file produced') + desc="defines the type of output file produced", + ) dicom_info = File( - exists=True, - desc='File containing summary information from mri_parse_sdcmdir') + exists=True, desc="File containing summary information from mri_parse_sdcmdir" + ) seq_list = traits.List( traits.Str, - requires=['dicom_info'], - desc='list of pulse sequence names to be converted.') + requires=["dicom_info"], + desc="list of pulse sequence names to be converted.", + ) ignore_single_slice = traits.Bool( - requires=['dicom_info'], - desc='ignore volumes containing a single slice') + requires=["dicom_info"], desc="ignore volumes containing a single slice" + ) class DICOMConvert(FSCommand): @@ -562,27 +653,27 @@ class DICOMConvert(FSCommand): >>> cvt.inputs.file_mapping = [('nifti', '*.nii'), ('info', 'dicom*.txt'), ('dti', '*dti.bv*')] """ - _cmd = 'mri_convert' + + _cmd = "mri_convert" input_spec = DICOMConvertInputSpec def _get_dicomfiles(self): """validate fsl bet options if set to None ignore """ - return glob( - os.path.abspath(os.path.join(self.inputs.dicom_dir, '*-1.dcm'))) + return glob(os.path.abspath(os.path.join(self.inputs.dicom_dir, "*-1.dcm"))) def _get_outdir(self): """returns output directory""" subjid = self.inputs.subject_id if not isdefined(subjid): path, fname = os.path.split(self._get_dicomfiles()[0]) - subjid = int(fname.split('-')[0]) + subjid = int(fname.split("-")[0]) if isdefined(self.inputs.subject_dir_template): subjid = self.inputs.subject_dir_template % subjid basedir = self.inputs.base_output_dir if not isdefined(basedir): - basedir = os.path.abspath('.') + basedir = os.path.abspath(".") outdir = os.path.abspath(os.path.join(basedir, subjid)) return outdir @@ -598,11 +689,11 @@ def _get_runs(self): if self.inputs.seq_list: if self.inputs.ignore_single_slice: if (int(s[8]) > 1) and any( - [s[12].startswith(sn) for sn in self.inputs.seq_list]): + [s[12].startswith(sn) for sn in self.inputs.seq_list] + ): runs.append(int(s[2])) else: - if any( - [s[12].startswith(sn) for sn in self.inputs.seq_list]): + if any([s[12].startswith(sn) for sn in self.inputs.seq_list]): runs.append(int(s[2])) else: runs.append(int(s[2])) @@ -614,11 +705,12 @@ def _get_filelist(self, outdir): for f in self._get_dicomfiles(): head, fname = os.path.split(f) fname, ext = os.path.splitext(fname) - fileparts = fname.split('-') + fileparts = fname.split("-") runno = int(fileparts[1]) out_type = MRIConvert.filemap[self.inputs.out_type] - outfile = os.path.join(outdir, '.'.join( - ('%s-%02d' % (fileparts[0], runno), out_type))) + outfile = os.path.join( + outdir, ".".join(("%s-%02d" % (fileparts[0], runno), out_type)) + ) filemap[runno] = (f, outfile) if self.inputs.dicom_info: files = [filemap[r] for r in self._get_runs()] @@ -634,43 +726,48 @@ def cmdline(self): outdir = self._get_outdir() cmd = [] if not os.path.exists(outdir): - cmdstr = 'python -c "import os; os.makedirs(\'%s\')"' % outdir + cmdstr = "python -c \"import os; os.makedirs('%s')\"" % outdir cmd.extend([cmdstr]) - infofile = os.path.join(outdir, 'shortinfo.txt') + infofile = os.path.join(outdir, "shortinfo.txt") if not os.path.exists(infofile): - cmdstr = 'dcmdir-info-mgh %s > %s' % (self.inputs.dicom_dir, - infofile) + cmdstr = "dcmdir-info-mgh %s > %s" % (self.inputs.dicom_dir, infofile) cmd.extend([cmdstr]) files = self._get_filelist(outdir) for infile, outfile in files: if not os.path.exists(outfile): - single_cmd = '%s%s %s %s' % (self._cmd_prefix, self.cmd, - infile, os.path.join(outdir, - outfile)) + single_cmd = "%s%s %s %s" % ( + self._cmd_prefix, + self.cmd, + infile, + os.path.join(outdir, outfile), + ) cmd.extend([single_cmd]) - return '; '.join(cmd) + return "; ".join(cmd) class ResampleInputSpec(FSTraitedSpec): in_file = File( exists=True, - argstr='-i %s', + argstr="-i %s", mandatory=True, - desc='file to resample', - position=-2) + desc="file to resample", + position=-2, + ) resampled_file = File( - argstr='-o %s', desc='output filename', genfile=True, position=-1) + argstr="-o %s", desc="output filename", genfile=True, position=-1 + ) voxel_size = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='-vs %.2f %.2f %.2f', - desc='triplet of output voxel sizes', - mandatory=True) + argstr="-vs %.2f %.2f %.2f", + desc="triplet of output voxel sizes", + mandatory=True, + ) class ResampleOutputSpec(TraitedSpec): - resampled_file = File(exists=True, desc='output filename') + resampled_file = File(exists=True, desc="output filename") class Resample(FSCommand): @@ -689,7 +786,7 @@ class Resample(FSCommand): """ - _cmd = 'mri_convert' + _cmd = "mri_convert" input_spec = ResampleInputSpec output_spec = ResampleOutputSpec @@ -698,179 +795,212 @@ def _get_outfilename(self): outfile = self.inputs.resampled_file else: outfile = fname_presuffix( - self.inputs.in_file, newpath=os.getcwd(), suffix='_resample') + self.inputs.in_file, newpath=os.getcwd(), suffix="_resample" + ) return outfile def _list_outputs(self): outputs = self.output_spec().get() - outputs['resampled_file'] = self._get_outfilename() + outputs["resampled_file"] = self._get_outfilename() return outputs def _gen_filename(self, name): - if name == 'resampled_file': + if name == "resampled_file": return self._get_outfilename() return None class ReconAllInputSpec(CommandLineInputSpec): subject_id = traits.Str( - "recon_all", argstr='-subjid %s', desc='subject name', usedefault=True) + "recon_all", argstr="-subjid %s", desc="subject name", usedefault=True + ) directive = traits.Enum( - 'all', - 'autorecon1', + "all", + "autorecon1", # autorecon2 variants - 'autorecon2', - 'autorecon2-volonly', - 'autorecon2-perhemi', - 'autorecon2-inflate1', - 'autorecon2-cp', - 'autorecon2-wm', + "autorecon2", + "autorecon2-volonly", + "autorecon2-perhemi", + "autorecon2-inflate1", + "autorecon2-cp", + "autorecon2-wm", # autorecon3 variants - 'autorecon3', - 'autorecon3-T2pial', + "autorecon3", + "autorecon3-T2pial", # Mix of autorecon2 and autorecon3 steps - 'autorecon-pial', - 'autorecon-hemi', + "autorecon-pial", + "autorecon-hemi", # Not "multi-stage flags" - 'localGI', - 'qcache', - argstr='-%s', - desc='process directive', + "localGI", + "qcache", + argstr="-%s", + desc="process directive", usedefault=True, - position=0) - hemi = traits.Enum( - 'lh', 'rh', desc='hemisphere to process', argstr="-hemi %s") + position=0, + ) + hemi = traits.Enum("lh", "rh", desc="hemisphere to process", argstr="-hemi %s") T1_files = InputMultiPath( - File(exists=True), - argstr='-i %s...', - desc='name of T1 file to process') + File(exists=True), argstr="-i %s...", desc="name of T1 file to process" + ) T2_file = File( exists=True, argstr="-T2 %s", - min_ver='5.3.0', - desc='Convert T2 image to orig directory') + min_ver="5.3.0", + desc="Convert T2 image to orig directory", + ) FLAIR_file = File( exists=True, argstr="-FLAIR %s", - min_ver='5.3.0', - desc='Convert FLAIR image to orig directory') + min_ver="5.3.0", + desc="Convert FLAIR image to orig directory", + ) use_T2 = traits.Bool( argstr="-T2pial", - min_ver='5.3.0', - xor=['use_FLAIR'], - desc='Use T2 image to refine the pial surface') + min_ver="5.3.0", + xor=["use_FLAIR"], + desc="Use T2 image to refine the pial surface", + ) use_FLAIR = traits.Bool( argstr="-FLAIRpial", - min_ver='5.3.0', - xor=['use_T2'], - desc='Use FLAIR image to refine the pial surface') + min_ver="5.3.0", + xor=["use_T2"], + desc="Use FLAIR image to refine the pial surface", + ) openmp = traits.Int( - argstr="-openmp %d", desc="Number of processors to use in parallel") - parallel = traits.Bool( - argstr="-parallel", desc="Enable parallel execution") + argstr="-openmp %d", desc="Number of processors to use in parallel" + ) + parallel = traits.Bool(argstr="-parallel", desc="Enable parallel execution") hires = traits.Bool( argstr="-hires", - min_ver='6.0.0', - desc="Conform to minimum voxel size (for voxels < 1mm)") + min_ver="6.0.0", + desc="Conform to minimum voxel size (for voxels < 1mm)", + ) mprage = traits.Bool( - argstr='-mprage', - desc=('Assume scan parameters are MGH MP-RAGE ' - 'protocol, which produces darker gray matter')) + argstr="-mprage", + desc=( + "Assume scan parameters are MGH MP-RAGE " + "protocol, which produces darker gray matter" + ), + ) big_ventricles = traits.Bool( - argstr='-bigventricles', - desc=('For use in subjects with enlarged ' - 'ventricles')) + argstr="-bigventricles", + desc=("For use in subjects with enlarged " "ventricles"), + ) brainstem = traits.Bool( - argstr='-brainstem-structures', desc='Segment brainstem structures') + argstr="-brainstem-structures", desc="Segment brainstem structures" + ) hippocampal_subfields_T1 = traits.Bool( - argstr='-hippocampal-subfields-T1', - min_ver='6.0.0', - desc='segment hippocampal subfields using input T1 scan') + argstr="-hippocampal-subfields-T1", + min_ver="6.0.0", + desc="segment hippocampal subfields using input T1 scan", + ) hippocampal_subfields_T2 = traits.Tuple( File(exists=True), traits.Str(), - argstr='-hippocampal-subfields-T2 %s %s', - min_ver='6.0.0', - desc=('segment hippocampal subfields using T2 scan, identified by ' - 'ID (may be combined with hippocampal_subfields_T1)')) + argstr="-hippocampal-subfields-T2 %s %s", + min_ver="6.0.0", + desc=( + "segment hippocampal subfields using T2 scan, identified by " + "ID (may be combined with hippocampal_subfields_T1)" + ), + ) expert = File( - exists=True, - argstr='-expert %s', - desc="Set parameters using expert file") + exists=True, argstr="-expert %s", desc="Set parameters using expert file" + ) xopts = traits.Enum( "use", "clean", "overwrite", - argstr='-xopts-%s', - desc="Use, delete or overwrite existing expert options file") + argstr="-xopts-%s", + desc="Use, delete or overwrite existing expert options file", + ) subjects_dir = Directory( exists=True, - argstr='-sd %s', + argstr="-sd %s", hash_files=False, - desc='path to subjects directory', - genfile=True) - flags = InputMultiPath( - traits.Str, argstr='%s', desc='additional parameters') + desc="path to subjects directory", + genfile=True, + ) + flags = InputMultiPath(traits.Str, argstr="%s", desc="additional parameters") # Expert options - talairach = traits.Str( - desc="Flags to pass to talairach commands", xor=['expert']) + talairach = traits.Str(desc="Flags to pass to talairach commands", xor=["expert"]) mri_normalize = traits.Str( - desc="Flags to pass to mri_normalize commands", xor=['expert']) + desc="Flags to pass to mri_normalize commands", xor=["expert"] + ) mri_watershed = traits.Str( - desc="Flags to pass to mri_watershed commands", xor=['expert']) + desc="Flags to pass to mri_watershed commands", xor=["expert"] + ) mri_em_register = traits.Str( - desc="Flags to pass to mri_em_register commands", xor=['expert']) + desc="Flags to pass to mri_em_register commands", xor=["expert"] + ) mri_ca_normalize = traits.Str( - desc="Flags to pass to mri_ca_normalize commands", xor=['expert']) + desc="Flags to pass to mri_ca_normalize commands", xor=["expert"] + ) mri_ca_register = traits.Str( - desc="Flags to pass to mri_ca_register commands", xor=['expert']) + desc="Flags to pass to mri_ca_register commands", xor=["expert"] + ) mri_remove_neck = traits.Str( - desc="Flags to pass to mri_remove_neck commands", xor=['expert']) + desc="Flags to pass to mri_remove_neck commands", xor=["expert"] + ) mri_ca_label = traits.Str( - desc="Flags to pass to mri_ca_label commands", xor=['expert']) + desc="Flags to pass to mri_ca_label commands", xor=["expert"] + ) mri_segstats = traits.Str( - desc="Flags to pass to mri_segstats commands", xor=['expert']) - mri_mask = traits.Str( - desc="Flags to pass to mri_mask commands", xor=['expert']) + desc="Flags to pass to mri_segstats commands", xor=["expert"] + ) + mri_mask = traits.Str(desc="Flags to pass to mri_mask commands", xor=["expert"]) mri_segment = traits.Str( - desc="Flags to pass to mri_segment commands", xor=['expert']) + desc="Flags to pass to mri_segment commands", xor=["expert"] + ) mri_edit_wm_with_aseg = traits.Str( - desc="Flags to pass to mri_edit_wm_with_aseg commands", xor=['expert']) + desc="Flags to pass to mri_edit_wm_with_aseg commands", xor=["expert"] + ) mri_pretess = traits.Str( - desc="Flags to pass to mri_pretess commands", xor=['expert']) - mri_fill = traits.Str( - desc="Flags to pass to mri_fill commands", xor=['expert']) + desc="Flags to pass to mri_pretess commands", xor=["expert"] + ) + mri_fill = traits.Str(desc="Flags to pass to mri_fill commands", xor=["expert"]) mri_tessellate = traits.Str( - desc="Flags to pass to mri_tessellate commands", xor=['expert']) + desc="Flags to pass to mri_tessellate commands", xor=["expert"] + ) mris_smooth = traits.Str( - desc="Flags to pass to mri_smooth commands", xor=['expert']) + desc="Flags to pass to mri_smooth commands", xor=["expert"] + ) mris_inflate = traits.Str( - desc="Flags to pass to mri_inflate commands", xor=['expert']) + desc="Flags to pass to mri_inflate commands", xor=["expert"] + ) mris_sphere = traits.Str( - desc="Flags to pass to mris_sphere commands", xor=['expert']) + desc="Flags to pass to mris_sphere commands", xor=["expert"] + ) mris_fix_topology = traits.Str( - desc="Flags to pass to mris_fix_topology commands", xor=['expert']) + desc="Flags to pass to mris_fix_topology commands", xor=["expert"] + ) mris_make_surfaces = traits.Str( - desc="Flags to pass to mris_make_surfaces commands", xor=['expert']) + desc="Flags to pass to mris_make_surfaces commands", xor=["expert"] + ) mris_surf2vol = traits.Str( - desc="Flags to pass to mris_surf2vol commands", xor=['expert']) + desc="Flags to pass to mris_surf2vol commands", xor=["expert"] + ) mris_register = traits.Str( - desc="Flags to pass to mris_register commands", xor=['expert']) + desc="Flags to pass to mris_register commands", xor=["expert"] + ) mrisp_paint = traits.Str( - desc="Flags to pass to mrisp_paint commands", xor=['expert']) + desc="Flags to pass to mrisp_paint commands", xor=["expert"] + ) mris_ca_label = traits.Str( - desc="Flags to pass to mris_ca_label commands", xor=['expert']) + desc="Flags to pass to mris_ca_label commands", xor=["expert"] + ) mris_anatomical_stats = traits.Str( - desc="Flags to pass to mris_anatomical_stats commands", xor=['expert']) + desc="Flags to pass to mris_anatomical_stats commands", xor=["expert"] + ) mri_aparc2aseg = traits.Str( - desc="Flags to pass to mri_aparc2aseg commands", xor=['expert']) + desc="Flags to pass to mri_aparc2aseg commands", xor=["expert"] + ) class ReconAllOutputSpec(FreeSurferSource.output_spec): - subjects_dir = Directory( - exists=True, desc='Freesurfer subjects directory.') - subject_id = traits.Str(desc='Subject name for whom to retrieve data') + subjects_dir = Directory(exists=True, desc="Freesurfer subjects directory.") + subject_id = traits.Str(desc="Subject name for whom to retrieve data") class ReconAll(CommandLine): @@ -928,8 +1058,8 @@ class ReconAll(CommandLine): 'recon-all -all -i structural.nii -hippocampal-subfields-T2 structural.nii test -subjid foo -sd .' """ - _cmd = 'recon-all' - _additional_metadata = ['loc', 'altkey'] + _cmd = "recon-all" + _additional_metadata = ["loc", "altkey"] input_spec = ReconAllInputSpec output_spec = ReconAllOutputSpec _can_resume = True @@ -948,214 +1078,309 @@ class ReconAll(CommandLine): # [0] https://surfer.nmr.mgh.harvard.edu/fswiki/ReconAllTableStableV5.3 # [1] https://surfer.nmr.mgh.harvard.edu/fswiki/ReconAllTableStableV6.0 _autorecon1_steps = [ - ('motioncor', ['mri/rawavg.mgz', 'mri/orig.mgz'], []), + ("motioncor", ["mri/rawavg.mgz", "mri/orig.mgz"], []), ( - 'talairach', + "talairach", [ - 'mri/orig_nu.mgz', - 'mri/transforms/talairach.auto.xfm', - 'mri/transforms/talairach.xfm', + "mri/orig_nu.mgz", + "mri/transforms/talairach.auto.xfm", + "mri/transforms/talairach.xfm", # 'mri/transforms/talairach_avi.log', ], - []), - ('nuintensitycor', ['mri/nu.mgz'], []), - ('normalization', ['mri/T1.mgz'], []), - ('skullstrip', [ - 'mri/transforms/talairach_with_skull.lta', - 'mri/brainmask.auto.mgz', 'mri/brainmask.mgz' - ], []), + [], + ), + ("nuintensitycor", ["mri/nu.mgz"], []), + ("normalization", ["mri/T1.mgz"], []), + ( + "skullstrip", + [ + "mri/transforms/talairach_with_skull.lta", + "mri/brainmask.auto.mgz", + "mri/brainmask.mgz", + ], + [], + ), ] if Info.looseversion() < LooseVersion("6.0.0"): _autorecon2_volonly_steps = [ - ('gcareg', ['mri/transforms/talairach.lta'], []), - ('canorm', ['mri/norm.mgz'], []), - ('careg', ['mri/transforms/talairach.m3z'], []), - ('careginv', [ - 'mri/transforms/talairach.m3z.inv.x.mgz', - 'mri/transforms/talairach.m3z.inv.y.mgz', - 'mri/transforms/talairach.m3z.inv.z.mgz', - ], []), - ('rmneck', ['mri/nu_noneck.mgz'], []), - ('skull-lta', ['mri/transforms/talairach_with_skull_2.lta'], []), - ('calabel', [ - 'mri/aseg.auto_noCCseg.mgz', 'mri/aseg.auto.mgz', - 'mri/aseg.mgz' - ], []), - ('normalization2', ['mri/brain.mgz'], []), - ('maskbfs', ['mri/brain.finalsurfs.mgz'], []), - ('segmentation', - ['mri/wm.seg.mgz', 'mri/wm.asegedit.mgz', 'mri/wm.mgz'], []), + ("gcareg", ["mri/transforms/talairach.lta"], []), + ("canorm", ["mri/norm.mgz"], []), + ("careg", ["mri/transforms/talairach.m3z"], []), + ( + "careginv", + [ + "mri/transforms/talairach.m3z.inv.x.mgz", + "mri/transforms/talairach.m3z.inv.y.mgz", + "mri/transforms/talairach.m3z.inv.z.mgz", + ], + [], + ), + ("rmneck", ["mri/nu_noneck.mgz"], []), + ("skull-lta", ["mri/transforms/talairach_with_skull_2.lta"], []), + ( + "calabel", + ["mri/aseg.auto_noCCseg.mgz", "mri/aseg.auto.mgz", "mri/aseg.mgz"], + [], + ), + ("normalization2", ["mri/brain.mgz"], []), + ("maskbfs", ["mri/brain.finalsurfs.mgz"], []), + ( + "segmentation", + ["mri/wm.seg.mgz", "mri/wm.asegedit.mgz", "mri/wm.mgz"], + [], + ), ( - 'fill', + "fill", [ - 'mri/filled.mgz', + "mri/filled.mgz", # 'scripts/ponscc.cut.log', ], - []), + [], + ), ] _autorecon2_lh_steps = [ - ('tessellate', ['surf/lh.orig.nofix'], []), - ('smooth1', ['surf/lh.smoothwm.nofix'], []), - ('inflate1', ['surf/lh.inflated.nofix'], []), - ('qsphere', ['surf/lh.qsphere.nofix'], []), - ('fix', ['surf/lh.orig'], []), - ('white', [ - 'surf/lh.white', 'surf/lh.curv', 'surf/lh.area', - 'label/lh.cortex.label' - ], []), - ('smooth2', ['surf/lh.smoothwm'], []), - ('inflate2', [ - 'surf/lh.inflated', 'surf/lh.sulc', 'surf/lh.inflated.H', - 'surf/lh.inflated.K' - ], []), + ("tessellate", ["surf/lh.orig.nofix"], []), + ("smooth1", ["surf/lh.smoothwm.nofix"], []), + ("inflate1", ["surf/lh.inflated.nofix"], []), + ("qsphere", ["surf/lh.qsphere.nofix"], []), + ("fix", ["surf/lh.orig"], []), + ( + "white", + [ + "surf/lh.white", + "surf/lh.curv", + "surf/lh.area", + "label/lh.cortex.label", + ], + [], + ), + ("smooth2", ["surf/lh.smoothwm"], []), + ( + "inflate2", + [ + "surf/lh.inflated", + "surf/lh.sulc", + "surf/lh.inflated.H", + "surf/lh.inflated.K", + ], + [], + ), # Undocumented in ReconAllTableStableV5.3 - ('curvstats', ['stats/lh.curv.stats'], []), + ("curvstats", ["stats/lh.curv.stats"], []), ] _autorecon3_lh_steps = [ - ('sphere', ['surf/lh.sphere'], []), - ('surfreg', ['surf/lh.sphere.reg'], []), - ('jacobian_white', ['surf/lh.jacobian_white'], []), - ('avgcurv', ['surf/lh.avg_curv'], []), - ('cortparc', ['label/lh.aparc.annot'], []), - ('pial', [ - 'surf/lh.pial', 'surf/lh.curv.pial', 'surf/lh.area.pial', - 'surf/lh.thickness' - ], []), + ("sphere", ["surf/lh.sphere"], []), + ("surfreg", ["surf/lh.sphere.reg"], []), + ("jacobian_white", ["surf/lh.jacobian_white"], []), + ("avgcurv", ["surf/lh.avg_curv"], []), + ("cortparc", ["label/lh.aparc.annot"], []), + ( + "pial", + [ + "surf/lh.pial", + "surf/lh.curv.pial", + "surf/lh.area.pial", + "surf/lh.thickness", + ], + [], + ), # Misnamed outputs in ReconAllTableStableV5.3: ?h.w-c.pct.mgz - ('pctsurfcon', ['surf/lh.w-g.pct.mgh'], []), - ('parcstats', ['stats/lh.aparc.stats'], []), - ('cortparc2', ['label/lh.aparc.a2009s.annot'], []), - ('parcstats2', ['stats/lh.aparc.a2009s.stats'], []), + ("pctsurfcon", ["surf/lh.w-g.pct.mgh"], []), + ("parcstats", ["stats/lh.aparc.stats"], []), + ("cortparc2", ["label/lh.aparc.a2009s.annot"], []), + ("parcstats2", ["stats/lh.aparc.a2009s.stats"], []), # Undocumented in ReconAllTableStableV5.3 - ('cortparc3', ['label/lh.aparc.DKTatlas40.annot'], []), + ("cortparc3", ["label/lh.aparc.DKTatlas40.annot"], []), # Undocumented in ReconAllTableStableV5.3 - ('parcstats3', ['stats/lh.aparc.a2009s.stats'], []), - ('label-exvivo-ec', ['label/lh.entorhinal_exvivo.label'], []), + ("parcstats3", ["stats/lh.aparc.a2009s.stats"], []), + ("label-exvivo-ec", ["label/lh.entorhinal_exvivo.label"], []), ] _autorecon3_added_steps = [ - ('cortribbon', - ['mri/lh.ribbon.mgz', 'mri/rh.ribbon.mgz', 'mri/ribbon.mgz'], []), - ('segstats', ['stats/aseg.stats'], []), - ('aparc2aseg', ['mri/aparc+aseg.mgz', 'mri/aparc.a2009s+aseg.mgz'], - []), - ('wmparc', ['mri/wmparc.mgz', 'stats/wmparc.stats'], []), - ('balabels', ['label/BA.ctab', 'label/BA.thresh.ctab'], []), + ( + "cortribbon", + ["mri/lh.ribbon.mgz", "mri/rh.ribbon.mgz", "mri/ribbon.mgz"], + [], + ), + ("segstats", ["stats/aseg.stats"], []), + ("aparc2aseg", ["mri/aparc+aseg.mgz", "mri/aparc.a2009s+aseg.mgz"], []), + ("wmparc", ["mri/wmparc.mgz", "stats/wmparc.stats"], []), + ("balabels", ["label/BA.ctab", "label/BA.thresh.ctab"], []), ] else: _autorecon2_volonly_steps = [ - ('gcareg', ['mri/transforms/talairach.lta'], []), - ('canorm', ['mri/norm.mgz'], []), - ('careg', ['mri/transforms/talairach.m3z'], []), - ('calabel', [ - 'mri/aseg.auto_noCCseg.mgz', 'mri/aseg.auto.mgz', - 'mri/aseg.mgz' - ], []), - ('normalization2', ['mri/brain.mgz'], []), - ('maskbfs', ['mri/brain.finalsurfs.mgz'], []), - ('segmentation', - ['mri/wm.seg.mgz', 'mri/wm.asegedit.mgz', 'mri/wm.mgz'], []), + ("gcareg", ["mri/transforms/talairach.lta"], []), + ("canorm", ["mri/norm.mgz"], []), + ("careg", ["mri/transforms/talairach.m3z"], []), + ( + "calabel", + ["mri/aseg.auto_noCCseg.mgz", "mri/aseg.auto.mgz", "mri/aseg.mgz"], + [], + ), + ("normalization2", ["mri/brain.mgz"], []), + ("maskbfs", ["mri/brain.finalsurfs.mgz"], []), + ( + "segmentation", + ["mri/wm.seg.mgz", "mri/wm.asegedit.mgz", "mri/wm.mgz"], + [], + ), ( - 'fill', + "fill", [ - 'mri/filled.mgz', + "mri/filled.mgz", # 'scripts/ponscc.cut.log', ], - []), + [], + ), ] _autorecon2_lh_steps = [ - ('tessellate', ['surf/lh.orig.nofix'], []), - ('smooth1', ['surf/lh.smoothwm.nofix'], []), - ('inflate1', ['surf/lh.inflated.nofix'], []), - ('qsphere', ['surf/lh.qsphere.nofix'], []), - ('fix', ['surf/lh.orig'], []), - ('white', [ - 'surf/lh.white.preaparc', 'surf/lh.curv', 'surf/lh.area', - 'label/lh.cortex.label' - ], []), - ('smooth2', ['surf/lh.smoothwm'], []), - ('inflate2', ['surf/lh.inflated', 'surf/lh.sulc'], []), - ('curvHK', [ - 'surf/lh.white.H', 'surf/lh.white.K', 'surf/lh.inflated.H', - 'surf/lh.inflated.K' - ], []), - ('curvstats', ['stats/lh.curv.stats'], []), + ("tessellate", ["surf/lh.orig.nofix"], []), + ("smooth1", ["surf/lh.smoothwm.nofix"], []), + ("inflate1", ["surf/lh.inflated.nofix"], []), + ("qsphere", ["surf/lh.qsphere.nofix"], []), + ("fix", ["surf/lh.orig"], []), + ( + "white", + [ + "surf/lh.white.preaparc", + "surf/lh.curv", + "surf/lh.area", + "label/lh.cortex.label", + ], + [], + ), + ("smooth2", ["surf/lh.smoothwm"], []), + ("inflate2", ["surf/lh.inflated", "surf/lh.sulc"], []), + ( + "curvHK", + [ + "surf/lh.white.H", + "surf/lh.white.K", + "surf/lh.inflated.H", + "surf/lh.inflated.K", + ], + [], + ), + ("curvstats", ["stats/lh.curv.stats"], []), ] _autorecon3_lh_steps = [ - ('sphere', ['surf/lh.sphere'], []), - ('surfreg', ['surf/lh.sphere.reg'], []), - ('jacobian_white', ['surf/lh.jacobian_white'], []), - ('avgcurv', ['surf/lh.avg_curv'], []), - ('cortparc', ['label/lh.aparc.annot'], []), - ('pial', [ - 'surf/lh.pial', 'surf/lh.curv.pial', 'surf/lh.area.pial', - 'surf/lh.thickness', 'surf/lh.white' - ], []), - ('parcstats', ['stats/lh.aparc.stats'], []), - ('cortparc2', ['label/lh.aparc.a2009s.annot'], []), - ('parcstats2', ['stats/lh.aparc.a2009s.stats'], []), - ('cortparc3', ['label/lh.aparc.DKTatlas.annot'], []), - ('parcstats3', ['stats/lh.aparc.DKTatlas.stats'], []), - ('pctsurfcon', ['surf/lh.w-g.pct.mgh'], []), + ("sphere", ["surf/lh.sphere"], []), + ("surfreg", ["surf/lh.sphere.reg"], []), + ("jacobian_white", ["surf/lh.jacobian_white"], []), + ("avgcurv", ["surf/lh.avg_curv"], []), + ("cortparc", ["label/lh.aparc.annot"], []), + ( + "pial", + [ + "surf/lh.pial", + "surf/lh.curv.pial", + "surf/lh.area.pial", + "surf/lh.thickness", + "surf/lh.white", + ], + [], + ), + ("parcstats", ["stats/lh.aparc.stats"], []), + ("cortparc2", ["label/lh.aparc.a2009s.annot"], []), + ("parcstats2", ["stats/lh.aparc.a2009s.stats"], []), + ("cortparc3", ["label/lh.aparc.DKTatlas.annot"], []), + ("parcstats3", ["stats/lh.aparc.DKTatlas.stats"], []), + ("pctsurfcon", ["surf/lh.w-g.pct.mgh"], []), ] _autorecon3_added_steps = [ - ('cortribbon', - ['mri/lh.ribbon.mgz', 'mri/rh.ribbon.mgz', 'mri/ribbon.mgz'], []), - ('hyporelabel', ['mri/aseg.presurf.hypos.mgz'], []), - ('aparc2aseg', [ - 'mri/aparc+aseg.mgz', 'mri/aparc.a2009s+aseg.mgz', - 'mri/aparc.DKTatlas+aseg.mgz' - ], []), - ('apas2aseg', ['mri/aseg.mgz'], ['mri/aparc+aseg.mgz']), - ('segstats', ['stats/aseg.stats'], []), - ('wmparc', ['mri/wmparc.mgz', 'stats/wmparc.stats'], []), + ( + "cortribbon", + ["mri/lh.ribbon.mgz", "mri/rh.ribbon.mgz", "mri/ribbon.mgz"], + [], + ), + ("hyporelabel", ["mri/aseg.presurf.hypos.mgz"], []), + ( + "aparc2aseg", + [ + "mri/aparc+aseg.mgz", + "mri/aparc.a2009s+aseg.mgz", + "mri/aparc.DKTatlas+aseg.mgz", + ], + [], + ), + ("apas2aseg", ["mri/aseg.mgz"], ["mri/aparc+aseg.mgz"]), + ("segstats", ["stats/aseg.stats"], []), + ("wmparc", ["mri/wmparc.mgz", "stats/wmparc.stats"], []), # Note that this is a very incomplete list; however the ctab # files are last to be touched, so this should be reasonable - ('balabels', [ - 'label/BA_exvivo.ctab', 'label/BA_exvivo.thresh.ctab', - 'label/lh.entorhinal_exvivo.label', - 'label/rh.entorhinal_exvivo.label' - ], []), + ( + "balabels", + [ + "label/BA_exvivo.ctab", + "label/BA_exvivo.thresh.ctab", + "label/lh.entorhinal_exvivo.label", + "label/rh.entorhinal_exvivo.label", + ], + [], + ), ] # Fill out autorecon2 steps - _autorecon2_rh_steps = [(step, [out.replace('lh', 'rh') - for out in outs], ins) - for step, outs, ins in _autorecon2_lh_steps] - _autorecon2_perhemi_steps = [(step, [ - of for out in outs for of in (out, out.replace('lh', 'rh')) - ], ins) for step, outs, ins in _autorecon2_lh_steps] + _autorecon2_rh_steps = [ + (step, [out.replace("lh", "rh") for out in outs], ins) + for step, outs, ins in _autorecon2_lh_steps + ] + _autorecon2_perhemi_steps = [ + (step, [of for out in outs for of in (out, out.replace("lh", "rh"))], ins) + for step, outs, ins in _autorecon2_lh_steps + ] _autorecon2_steps = _autorecon2_volonly_steps + _autorecon2_perhemi_steps # Fill out autorecon3 steps - _autorecon3_rh_steps = [(step, [out.replace('lh', 'rh') - for out in outs], ins) - for step, outs, ins in _autorecon3_lh_steps] - _autorecon3_perhemi_steps = [(step, [ - of for out in outs for of in (out, out.replace('lh', 'rh')) - ], ins) for step, outs, ins in _autorecon3_lh_steps] + _autorecon3_rh_steps = [ + (step, [out.replace("lh", "rh") for out in outs], ins) + for step, outs, ins in _autorecon3_lh_steps + ] + _autorecon3_perhemi_steps = [ + (step, [of for out in outs for of in (out, out.replace("lh", "rh"))], ins) + for step, outs, ins in _autorecon3_lh_steps + ] _autorecon3_steps = _autorecon3_perhemi_steps + _autorecon3_added_steps # Fill out autorecon-hemi lh/rh steps - _autorecon_lh_steps = (_autorecon2_lh_steps + _autorecon3_lh_steps) - _autorecon_rh_steps = (_autorecon2_rh_steps + _autorecon3_rh_steps) + _autorecon_lh_steps = _autorecon2_lh_steps + _autorecon3_lh_steps + _autorecon_rh_steps = _autorecon2_rh_steps + _autorecon3_rh_steps _steps = _autorecon1_steps + _autorecon2_steps + _autorecon3_steps _binaries = [ - 'talairach', 'mri_normalize', 'mri_watershed', 'mri_em_register', - 'mri_ca_normalize', 'mri_ca_register', 'mri_remove_neck', - 'mri_ca_label', 'mri_segstats', 'mri_mask', 'mri_segment', - 'mri_edit_wm_with_aseg', 'mri_pretess', 'mri_fill', 'mri_tessellate', - 'mris_smooth', 'mris_inflate', 'mris_sphere', 'mris_fix_topology', - 'mris_make_surfaces', 'mris_surf2vol', 'mris_register', 'mrisp_paint', - 'mris_ca_label', 'mris_anatomical_stats', 'mri_aparc2aseg' + "talairach", + "mri_normalize", + "mri_watershed", + "mri_em_register", + "mri_ca_normalize", + "mri_ca_register", + "mri_remove_neck", + "mri_ca_label", + "mri_segstats", + "mri_mask", + "mri_segment", + "mri_edit_wm_with_aseg", + "mri_pretess", + "mri_fill", + "mri_tessellate", + "mris_smooth", + "mris_inflate", + "mris_sphere", + "mris_fix_topology", + "mris_make_surfaces", + "mris_surf2vol", + "mris_register", + "mrisp_paint", + "mris_ca_label", + "mris_anatomical_stats", + "mri_aparc2aseg", ] def _gen_subjects_dir(self): return os.getcwd() def _gen_filename(self, name): - if name == 'subjects_dir': + if name == "subjects_dir": return self._gen_subjects_dir() return None @@ -1171,47 +1396,57 @@ def _list_outputs(self): if isdefined(self.inputs.hemi): hemi = self.inputs.hemi else: - hemi = 'both' + hemi = "both" outputs = self._outputs().get() outputs.update( FreeSurferSource( - subject_id=self.inputs.subject_id, - subjects_dir=subjects_dir, - hemi=hemi)._list_outputs()) - outputs['subject_id'] = self.inputs.subject_id - outputs['subjects_dir'] = subjects_dir + subject_id=self.inputs.subject_id, subjects_dir=subjects_dir, hemi=hemi + )._list_outputs() + ) + outputs["subject_id"] = self.inputs.subject_id + outputs["subjects_dir"] = subjects_dir return outputs def _is_resuming(self): subjects_dir = self.inputs.subjects_dir if not isdefined(subjects_dir): subjects_dir = self._gen_subjects_dir() - if os.path.isdir( - os.path.join(subjects_dir, self.inputs.subject_id, 'mri')): + if os.path.isdir(os.path.join(subjects_dir, self.inputs.subject_id, "mri")): return True return False def _format_arg(self, name, trait_spec, value): - if name == 'T1_files': + if name == "T1_files": if self._is_resuming(): return None - if name == 'hippocampal_subfields_T1' and \ - isdefined(self.inputs.hippocampal_subfields_T2): + if name == "hippocampal_subfields_T1" and isdefined( + self.inputs.hippocampal_subfields_T2 + ): return None - if all((name == 'hippocampal_subfields_T2', + if all( + ( + name == "hippocampal_subfields_T2", isdefined(self.inputs.hippocampal_subfields_T1) - and self.inputs.hippocampal_subfields_T1)): - argstr = trait_spec.argstr.replace('T2', 'T1T2') + and self.inputs.hippocampal_subfields_T1, + ) + ): + argstr = trait_spec.argstr.replace("T2", "T1T2") return argstr % value - if name == 'directive' and value == 'autorecon-hemi': + if name == "directive" and value == "autorecon-hemi": if not isdefined(self.inputs.hemi): - raise ValueError("Directive 'autorecon-hemi' requires hemi " - "input to be set") - value += ' ' + self.inputs.hemi - if all((name == 'hemi', isdefined(self.inputs.directive) - and self.inputs.directive == 'autorecon-hemi')): + raise ValueError( + "Directive 'autorecon-hemi' requires hemi " "input to be set" + ) + value += " " + self.inputs.hemi + if all( + ( + name == "hemi", + isdefined(self.inputs.directive) + and self.inputs.directive == "autorecon-hemi", + ) + ): return None return super(ReconAll, self)._format_arg(name, trait_spec, value) @@ -1233,28 +1468,26 @@ def cmdline(self): directive = self.inputs.directive if not isdefined(directive): steps = [] - elif directive == 'autorecon1': + elif directive == "autorecon1": steps = self._autorecon1_steps - elif directive == 'autorecon2-volonly': + elif directive == "autorecon2-volonly": steps = self._autorecon2_volonly_steps - elif directive == 'autorecon2-perhemi': + elif directive == "autorecon2-perhemi": steps = self._autorecon2_perhemi_steps - elif directive.startswith('autorecon2'): + elif directive.startswith("autorecon2"): if isdefined(self.inputs.hemi): - if self.inputs.hemi == 'lh': - steps = (self._autorecon2_volonly_steps + - self._autorecon2_lh_steps) + if self.inputs.hemi == "lh": + steps = self._autorecon2_volonly_steps + self._autorecon2_lh_steps else: - steps = (self._autorecon2_volonly_steps + - self._autorecon2_rh_steps) + steps = self._autorecon2_volonly_steps + self._autorecon2_rh_steps else: steps = self._autorecon2_steps - elif directive == 'autorecon-hemi': - if self.inputs.hemi == 'lh': + elif directive == "autorecon-hemi": + if self.inputs.hemi == "lh": steps = self._autorecon_lh_steps else: steps = self._autorecon_rh_steps - elif directive == 'autorecon3': + elif directive == "autorecon3": steps = self._autorecon3_steps else: steps = self._steps @@ -1262,8 +1495,8 @@ def cmdline(self): no_run = True flags = [] for step, outfiles, infiles in steps: - flag = '-{}'.format(step) - noflag = '-no{}'.format(step) + flag = "-{}".format(step) + noflag = "-no{}".format(step) if noflag in cmd: continue elif flag in cmd: @@ -1271,42 +1504,43 @@ def cmdline(self): continue subj_dir = os.path.join(subjects_dir, self.inputs.subject_id) - if check_depends([os.path.join(subj_dir, f) for f in outfiles], - [os.path.join(subj_dir, f) for f in infiles]): + if check_depends( + [os.path.join(subj_dir, f) for f in outfiles], + [os.path.join(subj_dir, f) for f in infiles], + ): flags.append(noflag) else: no_run = False if no_run and not self.force_run: - iflogger.info('recon-all complete : Not running') + iflogger.info("recon-all complete : Not running") return "echo recon-all: nothing to do" - cmd += ' ' + ' '.join(flags) - iflogger.info('resume recon-all : %s', cmd) + cmd += " " + " ".join(flags) + iflogger.info("resume recon-all : %s", cmd) return cmd def _prep_expert_file(self): if isdefined(self.inputs.expert): - return '' + return "" lines = [] for binary in self._binaries: args = getattr(self.inputs, binary) if isdefined(args): - lines.append('{} {}\n'.format(binary, args)) + lines.append("{} {}\n".format(binary, args)) if lines == []: - return '' + return "" - contents = ''.join(lines) - if not isdefined(self.inputs.xopts) and \ - self._get_expert_file() == contents: - return ' -xopts-use' + contents = "".join(lines) + if not isdefined(self.inputs.xopts) and self._get_expert_file() == contents: + return " -xopts-use" - expert_fname = os.path.abspath('expert.opts') - with open(expert_fname, 'w') as fobj: + expert_fname = os.path.abspath("expert.opts") + with open(expert_fname, "w") as fobj: fobj.write(contents) - return ' -expert {}'.format(expert_fname) + return " -expert {}".format(expert_fname) def _get_expert_file(self): # Read pre-existing options file, if it exists @@ -1315,11 +1549,12 @@ def _get_expert_file(self): else: subjects_dir = self._gen_subjects_dir() - xopts_file = os.path.join(subjects_dir, self.inputs.subject_id, - 'scripts', 'expert-options') + xopts_file = os.path.join( + subjects_dir, self.inputs.subject_id, "scripts", "expert-options" + ) if not os.path.exists(xopts_file): - return '' - with open(xopts_file, 'r') as fobj: + return "" + with open(xopts_file, "r") as fobj: return fobj.read() @property @@ -1331,110 +1566,123 @@ def version(self): class BBRegisterInputSpec(FSTraitedSpec): subject_id = traits.Str( - argstr='--s %s', desc='freesurfer subject id', mandatory=True) + argstr="--s %s", desc="freesurfer subject id", mandatory=True + ) source_file = File( - argstr='--mov %s', - desc='source file to be registered', + argstr="--mov %s", + desc="source file to be registered", mandatory=True, - copyfile=False) + copyfile=False, + ) init = traits.Enum( - 'spm', - 'fsl', - 'header', - argstr='--init-%s', + "spm", + "fsl", + "header", + argstr="--init-%s", mandatory=True, - xor=['init_reg_file'], - desc='initialize registration spm, fsl, header') + xor=["init_reg_file"], + desc="initialize registration spm, fsl, header", + ) init_reg_file = File( exists=True, - argstr='--init-reg %s', - desc='existing registration file', - xor=['init'], - mandatory=True) + argstr="--init-reg %s", + desc="existing registration file", + xor=["init"], + mandatory=True, + ) contrast_type = traits.Enum( - 't1', - 't2', - 'bold', - 'dti', - argstr='--%s', - desc='contrast type of image', - mandatory=True) + "t1", + "t2", + "bold", + "dti", + argstr="--%s", + desc="contrast type of image", + mandatory=True, + ) intermediate_file = File( exists=True, argstr="--int %s", - desc="Intermediate image, e.g. in case of partial FOV") + desc="Intermediate image, e.g. in case of partial FOV", + ) reg_frame = traits.Int( argstr="--frame %d", xor=["reg_middle_frame"], - desc="0-based frame index for 4D source file") + desc="0-based frame index for 4D source file", + ) reg_middle_frame = traits.Bool( argstr="--mid-frame", xor=["reg_frame"], - desc="Register middle frame of 4D source file") + desc="Register middle frame of 4D source file", + ) out_reg_file = File( - argstr='--reg %s', desc='output registration file', genfile=True) + argstr="--reg %s", desc="output registration file", genfile=True + ) spm_nifti = traits.Bool( - argstr="--spm-nii", - desc="force use of nifti rather than analyze with SPM") + argstr="--spm-nii", desc="force use of nifti rather than analyze with SPM" + ) epi_mask = traits.Bool( - argstr="--epi-mask", desc="mask out B0 regions in stages 1 and 2") + argstr="--epi-mask", desc="mask out B0 regions in stages 1 and 2" + ) dof = traits.Enum( - 6, 9, 12, argstr='--%d', desc='number of transform degrees of freedom') + 6, 9, 12, argstr="--%d", desc="number of transform degrees of freedom" + ) fsldof = traits.Int( - argstr='--fsl-dof %d', - desc='degrees of freedom for initial registration (FSL)') + argstr="--fsl-dof %d", desc="degrees of freedom for initial registration (FSL)" + ) out_fsl_file = traits.Either( traits.Bool, File, argstr="--fslmat %s", - desc="write the transformation matrix in FSL FLIRT format") + desc="write the transformation matrix in FSL FLIRT format", + ) out_lta_file = traits.Either( traits.Bool, File, argstr="--lta %s", - min_ver='5.2.0', - desc="write the transformation matrix in LTA format") + min_ver="5.2.0", + desc="write the transformation matrix in LTA format", + ) registered_file = traits.Either( traits.Bool, File, - argstr='--o %s', - desc='output warped sourcefile either True or filename') + argstr="--o %s", + desc="output warped sourcefile either True or filename", + ) init_cost_file = traits.Either( traits.Bool, File, - argstr='--initcost %s', - desc='output initial registration cost file') + argstr="--initcost %s", + desc="output initial registration cost file", + ) class BBRegisterInputSpec6(BBRegisterInputSpec): init = traits.Enum( - 'coreg', - 'rr', - 'spm', - 'fsl', - 'header', - 'best', - argstr='--init-%s', - xor=['init_reg_file'], - desc='initialize registration with mri_coreg, spm, fsl, or header') + "coreg", + "rr", + "spm", + "fsl", + "header", + "best", + argstr="--init-%s", + xor=["init_reg_file"], + desc="initialize registration with mri_coreg, spm, fsl, or header", + ) init_reg_file = File( exists=True, - argstr='--init-reg %s', - desc='existing registration file', - xor=['init']) + argstr="--init-reg %s", + desc="existing registration file", + xor=["init"], + ) class BBRegisterOutputSpec(TraitedSpec): - out_reg_file = File(exists=True, desc='Output registration file') - out_fsl_file = File( - exists=True, desc='Output FLIRT-style registration file') - out_lta_file = File(exists=True, desc='Output LTA-style registration file') - min_cost_file = File( - exists=True, desc='Output registration minimum cost file') - init_cost_file = File( - exists=True, desc='Output initial registration cost file') - registered_file = File( - exists=True, desc='Registered and resampled source file') + out_reg_file = File(exists=True, desc="Output registration file") + out_fsl_file = File(exists=True, desc="Output FLIRT-style registration file") + out_lta_file = File(exists=True, desc="Output LTA-style registration file") + min_cost_file = File(exists=True, desc="Output registration minimum cost file") + init_cost_file = File(exists=True, desc="Output initial registration cost file") + registered_file = File(exists=True, desc="Registered and resampled source file") class BBRegister(FSCommand): @@ -1454,8 +1702,8 @@ class BBRegister(FSCommand): """ - _cmd = 'bbregister' - if LooseVersion('0.0.0') < Info.looseversion() < LooseVersion("6.0.0"): + _cmd = "bbregister" + if LooseVersion("0.0.0") < Info.looseversion() < LooseVersion("6.0.0"): input_spec = BBRegisterInputSpec else: input_spec = BBRegisterInputSpec6 @@ -1467,56 +1715,63 @@ def _list_outputs(self): _in = self.inputs if isdefined(_in.out_reg_file): - outputs['out_reg_file'] = op.abspath(_in.out_reg_file) + outputs["out_reg_file"] = op.abspath(_in.out_reg_file) elif _in.source_file: - suffix = '_bbreg_%s.dat' % _in.subject_id - outputs['out_reg_file'] = fname_presuffix( - _in.source_file, suffix=suffix, use_ext=False) + suffix = "_bbreg_%s.dat" % _in.subject_id + outputs["out_reg_file"] = fname_presuffix( + _in.source_file, suffix=suffix, use_ext=False + ) if isdefined(_in.registered_file): if isinstance(_in.registered_file, bool): - outputs['registered_file'] = fname_presuffix( - _in.source_file, suffix='_bbreg') + outputs["registered_file"] = fname_presuffix( + _in.source_file, suffix="_bbreg" + ) else: - outputs['registered_file'] = op.abspath(_in.registered_file) + outputs["registered_file"] = op.abspath(_in.registered_file) if isdefined(_in.out_lta_file): if isinstance(_in.out_lta_file, bool): - suffix = '_bbreg_%s.lta' % _in.subject_id + suffix = "_bbreg_%s.lta" % _in.subject_id out_lta_file = fname_presuffix( - _in.source_file, suffix=suffix, use_ext=False) - outputs['out_lta_file'] = out_lta_file + _in.source_file, suffix=suffix, use_ext=False + ) + outputs["out_lta_file"] = out_lta_file else: - outputs['out_lta_file'] = op.abspath(_in.out_lta_file) + outputs["out_lta_file"] = op.abspath(_in.out_lta_file) if isdefined(_in.out_fsl_file): if isinstance(_in.out_fsl_file, bool): - suffix = '_bbreg_%s.mat' % _in.subject_id + suffix = "_bbreg_%s.mat" % _in.subject_id out_fsl_file = fname_presuffix( - _in.source_file, suffix=suffix, use_ext=False) - outputs['out_fsl_file'] = out_fsl_file + _in.source_file, suffix=suffix, use_ext=False + ) + outputs["out_fsl_file"] = out_fsl_file else: - outputs['out_fsl_file'] = op.abspath(_in.out_fsl_file) + outputs["out_fsl_file"] = op.abspath(_in.out_fsl_file) if isdefined(_in.init_cost_file): if isinstance(_in.out_fsl_file, bool): - outputs[ - 'init_cost_file'] = outputs['out_reg_file'] + '.initcost' + outputs["init_cost_file"] = outputs["out_reg_file"] + ".initcost" else: - outputs['init_cost_file'] = op.abspath(_in.init_cost_file) + outputs["init_cost_file"] = op.abspath(_in.init_cost_file) - outputs['min_cost_file'] = outputs['out_reg_file'] + '.mincost' + outputs["min_cost_file"] = outputs["out_reg_file"] + ".mincost" return outputs def _format_arg(self, name, spec, value): - if name in ('registered_file', 'out_fsl_file', 'out_lta_file', - 'init_cost_file') and isinstance(value, bool): + if name in ( + "registered_file", + "out_fsl_file", + "out_lta_file", + "init_cost_file", + ) and isinstance(value, bool): value = self._list_outputs()[name] return super(BBRegister, self)._format_arg(name, spec, value) def _gen_filename(self, name): - if name == 'out_reg_file': + if name == "out_reg_file": return self._list_outputs()[name] return None @@ -1524,123 +1779,150 @@ def _gen_filename(self, name): class ApplyVolTransformInputSpec(FSTraitedSpec): source_file = File( exists=True, - argstr='--mov %s', + argstr="--mov %s", copyfile=False, mandatory=True, - desc='Input volume you wish to transform') - transformed_file = File( - desc='Output volume', argstr='--o %s', genfile=True) - _targ_xor = ('target_file', 'tal', 'fs_target') + desc="Input volume you wish to transform", + ) + transformed_file = File(desc="Output volume", argstr="--o %s", genfile=True) + _targ_xor = ("target_file", "tal", "fs_target") target_file = File( exists=True, - argstr='--targ %s', + argstr="--targ %s", xor=_targ_xor, - desc='Output template volume', - mandatory=True) + desc="Output template volume", + mandatory=True, + ) tal = traits.Bool( - argstr='--tal', + argstr="--tal", xor=_targ_xor, mandatory=True, - desc='map to a sub FOV of MNI305 (with --reg only)') + desc="map to a sub FOV of MNI305 (with --reg only)", + ) tal_resolution = traits.Float( - argstr="--talres %.10f", desc="Resolution to sample when using tal") + argstr="--talres %.10f", desc="Resolution to sample when using tal" + ) fs_target = traits.Bool( - argstr='--fstarg', + argstr="--fstarg", xor=_targ_xor, mandatory=True, - requires=['reg_file'], - desc='use orig.mgz from subject in regfile as target') - _reg_xor = ('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', - 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject') + requires=["reg_file"], + desc="use orig.mgz from subject in regfile as target", + ) + _reg_xor = ( + "reg_file", + "lta_file", + "lta_inv_file", + "fsl_reg_file", + "xfm_reg_file", + "reg_header", + "mni_152_reg", + "subject", + ) reg_file = File( exists=True, xor=_reg_xor, - argstr='--reg %s', + argstr="--reg %s", mandatory=True, - desc='tkRAS-to-tkRAS matrix (tkregister2 format)') + desc="tkRAS-to-tkRAS matrix (tkregister2 format)", + ) lta_file = File( exists=True, xor=_reg_xor, - argstr='--lta %s', + argstr="--lta %s", mandatory=True, - desc='Linear Transform Array file') + desc="Linear Transform Array file", + ) lta_inv_file = File( exists=True, xor=_reg_xor, - argstr='--lta-inv %s', + argstr="--lta-inv %s", mandatory=True, - desc='LTA, invert') + desc="LTA, invert", + ) reg_file = File( exists=True, xor=_reg_xor, - argstr='--reg %s', + argstr="--reg %s", mandatory=True, - desc='tkRAS-to-tkRAS matrix (tkregister2 format)') + desc="tkRAS-to-tkRAS matrix (tkregister2 format)", + ) fsl_reg_file = File( exists=True, xor=_reg_xor, - argstr='--fsl %s', + argstr="--fsl %s", mandatory=True, - desc='fslRAS-to-fslRAS matrix (FSL format)') + desc="fslRAS-to-fslRAS matrix (FSL format)", + ) xfm_reg_file = File( exists=True, xor=_reg_xor, - argstr='--xfm %s', + argstr="--xfm %s", mandatory=True, - desc='ScannerRAS-to-ScannerRAS matrix (MNI format)') + desc="ScannerRAS-to-ScannerRAS matrix (MNI format)", + ) reg_header = traits.Bool( xor=_reg_xor, - argstr='--regheader', + argstr="--regheader", mandatory=True, - desc='ScannerRAS-to-ScannerRAS matrix = identity') + desc="ScannerRAS-to-ScannerRAS matrix = identity", + ) mni_152_reg = traits.Bool( - xor=_reg_xor, - argstr='--regheader', - mandatory=True, - desc='target MNI152 space') + xor=_reg_xor, argstr="--regheader", mandatory=True, desc="target MNI152 space" + ) subject = traits.Str( xor=_reg_xor, - argstr='--s %s', + argstr="--s %s", mandatory=True, - desc='set matrix = identity and use subject for any templates') - inverse = traits.Bool(desc='sample from target to source', argstr='--inv') + desc="set matrix = identity and use subject for any templates", + ) + inverse = traits.Bool(desc="sample from target to source", argstr="--inv") interp = traits.Enum( - 'trilin', - 'nearest', - 'cubic', - argstr='--interp %s', - desc='Interpolation method ( or nearest)') + "trilin", + "nearest", + "cubic", + argstr="--interp %s", + desc="Interpolation method ( or nearest)", + ) no_resample = traits.Bool( - desc='Do not resample; just change vox2ras matrix', - argstr='--no-resample') + desc="Do not resample; just change vox2ras matrix", argstr="--no-resample" + ) m3z_file = File( argstr="--m3z %s", - desc=('This is the morph to be applied to the volume. ' - 'Unless the morph is in mri/transforms (eg.: for ' - 'talairach.m3z computed by reconall), you will need ' - 'to specify the full path to this morph and use the ' - '--noDefM3zPath flag.')) + desc=( + "This is the morph to be applied to the volume. " + "Unless the morph is in mri/transforms (eg.: for " + "talairach.m3z computed by reconall), you will need " + "to specify the full path to this morph and use the " + "--noDefM3zPath flag." + ), + ) no_ded_m3z_path = traits.Bool( argstr="--noDefM3zPath", - requires=['m3z_file'], - desc=('To be used with the m3z flag. ' - 'Instructs the code not to look for the' - 'm3z morph in the default location ' - '(SUBJECTS_DIR/subj/mri/transforms), ' - 'but instead just use the path ' - 'indicated in --m3z.')) + requires=["m3z_file"], + desc=( + "To be used with the m3z flag. " + "Instructs the code not to look for the" + "m3z morph in the default location " + "(SUBJECTS_DIR/subj/mri/transforms), " + "but instead just use the path " + "indicated in --m3z." + ), + ) invert_morph = traits.Bool( argstr="--inv-morph", - requires=['m3z_file'], - desc=('Compute and use the inverse of the ' - 'non-linear morph to resample the input ' - 'volume. To be used by --m3z.')) + requires=["m3z_file"], + desc=( + "Compute and use the inverse of the " + "non-linear morph to resample the input " + "volume. To be used by --m3z." + ), + ) class ApplyVolTransformOutputSpec(TraitedSpec): - transformed_file = File( - exists=True, desc='Path to output file if used normally') + transformed_file = File(exists=True, desc="Path to output file if used normally") class ApplyVolTransform(FSCommand): @@ -1660,7 +1942,7 @@ class ApplyVolTransform(FSCommand): """ - _cmd = 'mri_vol2vol' + _cmd = "mri_vol2vol" input_spec = ApplyVolTransformInputSpec output_spec = ApplyVolTransformOutputSpec @@ -1669,67 +1951,69 @@ def _get_outfile(self): if not isdefined(outfile): if self.inputs.inverse is True: if self.inputs.fs_target is True: - src = 'orig.mgz' + src = "orig.mgz" else: src = self.inputs.target_file else: src = self.inputs.source_file - outfile = fname_presuffix( - src, newpath=os.getcwd(), suffix='_warped') + outfile = fname_presuffix(src, newpath=os.getcwd(), suffix="_warped") return outfile def _list_outputs(self): outputs = self.output_spec().get() - outputs['transformed_file'] = os.path.abspath(self._get_outfile()) + outputs["transformed_file"] = os.path.abspath(self._get_outfile()) return outputs def _gen_filename(self, name): - if name == 'transformed_file': + if name == "transformed_file": return self._get_outfile() return None class SmoothInputSpec(FSTraitedSpec): - in_file = File( - exists=True, desc='source volume', argstr='--i %s', mandatory=True) + in_file = File(exists=True, desc="source volume", argstr="--i %s", mandatory=True) reg_file = File( - desc='registers volume to surface anatomical ', - argstr='--reg %s', + desc="registers volume to surface anatomical ", + argstr="--reg %s", mandatory=True, - exists=True) - smoothed_file = File(desc='output volume', argstr='--o %s', genfile=True) + exists=True, + ) + smoothed_file = File(desc="output volume", argstr="--o %s", genfile=True) proj_frac_avg = traits.Tuple( traits.Float, traits.Float, traits.Float, - xor=['proj_frac'], - desc='average a long normal min max delta', - argstr='--projfrac-avg %.2f %.2f %.2f') + xor=["proj_frac"], + desc="average a long normal min max delta", + argstr="--projfrac-avg %.2f %.2f %.2f", + ) proj_frac = traits.Float( - desc='project frac of thickness a long surface normal', - xor=['proj_frac_avg'], - argstr='--projfrac %s') + desc="project frac of thickness a long surface normal", + xor=["proj_frac_avg"], + argstr="--projfrac %s", + ) surface_fwhm = traits.Range( low=0.0, - requires=['reg_file'], + requires=["reg_file"], mandatory=True, - xor=['num_iters'], - desc='surface FWHM in mm', - argstr='--fwhm %f') + xor=["num_iters"], + desc="surface FWHM in mm", + argstr="--fwhm %f", + ) num_iters = traits.Range( low=1, - xor=['surface_fwhm'], + xor=["surface_fwhm"], mandatory=True, - argstr='--niters %d', - desc='number of iterations instead of fwhm') + argstr="--niters %d", + desc="number of iterations instead of fwhm", + ) vol_fwhm = traits.Range( - low=0.0, - argstr='--vol-fwhm %f', - desc='volume smoothing outside of surface') + low=0.0, argstr="--vol-fwhm %f", desc="volume smoothing outside of surface" + ) class SmoothOutputSpec(TraitedSpec): - smoothed_file = File(exists=True, desc='smoothed input volume') + smoothed_file = File(exists=True, desc="smoothed input volume") class Smooth(FSCommand): @@ -1754,7 +2038,7 @@ class Smooth(FSCommand): """ - _cmd = 'mris_volsmooth' + _cmd = "mris_volsmooth" input_spec = SmoothInputSpec output_spec = SmoothOutputSpec @@ -1762,12 +2046,12 @@ def _list_outputs(self): outputs = self.output_spec().get() outfile = self.inputs.smoothed_file if not isdefined(outfile): - outfile = self._gen_fname(self.inputs.in_file, suffix='_smooth') - outputs['smoothed_file'] = outfile + outfile = self._gen_fname(self.inputs.in_file, suffix="_smooth") + outputs["smoothed_file"] = outfile return outputs def _gen_filename(self, name): - if name == 'smoothed_file': + if name == "smoothed_file": return self._list_outputs()[name] return None @@ -1775,130 +2059,139 @@ def _gen_filename(self, name): class RobustRegisterInputSpec(FSTraitedSpec): source_file = File( - exists=True, - mandatory=True, - argstr='--mov %s', - desc='volume to be registered') + exists=True, mandatory=True, argstr="--mov %s", desc="volume to be registered" + ) target_file = File( exists=True, mandatory=True, - argstr='--dst %s', - desc='target volume for the registration') + argstr="--dst %s", + desc="target volume for the registration", + ) out_reg_file = traits.Either( True, File, default=True, usedefault=True, - argstr='--lta %s', - desc='registration file; either True or filename') + argstr="--lta %s", + desc="registration file; either True or filename", + ) registered_file = traits.Either( traits.Bool, File, - argstr='--warp %s', - desc='registered image; either True or filename') + argstr="--warp %s", + desc="registered image; either True or filename", + ) weights_file = traits.Either( traits.Bool, File, - argstr='--weights %s', - desc='weights image to write; either True or filename') + argstr="--weights %s", + desc="weights image to write; either True or filename", + ) est_int_scale = traits.Bool( - argstr='--iscale', - desc='estimate intensity scale (recommended for unnormalized images)') + argstr="--iscale", + desc="estimate intensity scale (recommended for unnormalized images)", + ) trans_only = traits.Bool( - argstr='--transonly', desc='find 3 parameter translation only') + argstr="--transonly", desc="find 3 parameter translation only" + ) in_xfm_file = File( - exists=True, - argstr='--transform', - desc='use initial transform on source') + exists=True, argstr="--transform", desc="use initial transform on source" + ) half_source = traits.Either( traits.Bool, File, - argstr='--halfmov %s', - desc="write source volume mapped to halfway space") + argstr="--halfmov %s", + desc="write source volume mapped to halfway space", + ) half_targ = traits.Either( traits.Bool, File, argstr="--halfdst %s", - desc="write target volume mapped to halfway space") + desc="write target volume mapped to halfway space", + ) half_weights = traits.Either( traits.Bool, File, argstr="--halfweights %s", - desc="write weights volume mapped to halfway space") + desc="write weights volume mapped to halfway space", + ) half_source_xfm = traits.Either( traits.Bool, File, argstr="--halfmovlta %s", - desc="write transform from source to halfway space") + desc="write transform from source to halfway space", + ) half_targ_xfm = traits.Either( traits.Bool, File, argstr="--halfdstlta %s", - desc="write transform from target to halfway space") + desc="write transform from target to halfway space", + ) auto_sens = traits.Bool( - argstr='--satit', - xor=['outlier_sens'], + argstr="--satit", + xor=["outlier_sens"], mandatory=True, - desc='auto-detect good sensitivity') + desc="auto-detect good sensitivity", + ) outlier_sens = traits.Float( - argstr='--sat %.4f', - xor=['auto_sens'], + argstr="--sat %.4f", + xor=["auto_sens"], mandatory=True, - desc='set outlier sensitivity explicitly') + desc="set outlier sensitivity explicitly", + ) least_squares = traits.Bool( - argstr='--leastsquares', - desc='use least squares instead of robust estimator') - no_init = traits.Bool(argstr='--noinit', desc='skip transform init') + argstr="--leastsquares", desc="use least squares instead of robust estimator" + ) + no_init = traits.Bool(argstr="--noinit", desc="skip transform init") init_orient = traits.Bool( - argstr='--initorient', - desc='use moments for initial orient (recommended for stripped brains)' + argstr="--initorient", + desc="use moments for initial orient (recommended for stripped brains)", ) max_iterations = traits.Int( - argstr='--maxit %d', desc='maximum # of times on each resolution') + argstr="--maxit %d", desc="maximum # of times on each resolution" + ) high_iterations = traits.Int( - argstr='--highit %d', desc='max # of times on highest resolution') + argstr="--highit %d", desc="max # of times on highest resolution" + ) iteration_thresh = traits.Float( - argstr='--epsit %.3f', desc='stop iterations when below threshold') + argstr="--epsit %.3f", desc="stop iterations when below threshold" + ) subsample_thresh = traits.Int( - argstr='--subsample %d', - desc='subsample if dimension is above threshold size') + argstr="--subsample %d", desc="subsample if dimension is above threshold size" + ) outlier_limit = traits.Float( - argstr='--wlimit %.3f', desc='set maximal outlier limit in satit') + argstr="--wlimit %.3f", desc="set maximal outlier limit in satit" + ) write_vo2vox = traits.Bool( - argstr='--vox2vox', desc='output vox2vox matrix (default is RAS2RAS)') - no_multi = traits.Bool( - argstr='--nomulti', desc='work on highest resolution') + argstr="--vox2vox", desc="output vox2vox matrix (default is RAS2RAS)" + ) + no_multi = traits.Bool(argstr="--nomulti", desc="work on highest resolution") mask_source = File( - exists=True, - argstr='--maskmov %s', - desc='image to mask source volume with') + exists=True, argstr="--maskmov %s", desc="image to mask source volume with" + ) mask_target = File( - exists=True, - argstr='--maskdst %s', - desc='image to mask target volume with') + exists=True, argstr="--maskdst %s", desc="image to mask target volume with" + ) force_double = traits.Bool( - argstr='--doubleprec', desc='use double-precision intensities') - force_float = traits.Bool( - argstr='--floattype', desc='use float intensities') + argstr="--doubleprec", desc="use double-precision intensities" + ) + force_float = traits.Bool(argstr="--floattype", desc="use float intensities") class RobustRegisterOutputSpec(TraitedSpec): out_reg_file = File(exists=True, desc="output registration file") - registered_file = File( - exists=True, desc="output image with registration applied") + registered_file = File(exists=True, desc="output image with registration applied") weights_file = File(exists=True, desc="image of weights used") - half_source = File( - exists=True, desc="source image mapped to halfway space") + half_source = File(exists=True, desc="source image mapped to halfway space") half_targ = File(exists=True, desc="target image mapped to halfway space") - half_weights = File( - exists=True, desc="weights image mapped to halfway space") + half_weights = File(exists=True, desc="weights image mapped to halfway space") half_source_xfm = File( - exists=True, - desc="transform file to map source image to halfway space") + exists=True, desc="transform file to map source image to halfway space" + ) half_targ_xfm = File( - exists=True, - desc="transform file to map target image to halfway space") + exists=True, desc="transform file to map target image to halfway space" + ) class RobustRegister(FSCommand): @@ -1923,14 +2216,21 @@ class RobustRegister(FSCommand): """ - _cmd = 'mri_robust_register' + _cmd = "mri_robust_register" input_spec = RobustRegisterInputSpec output_spec = RobustRegisterOutputSpec def _format_arg(self, name, spec, value): - options = ("out_reg_file", "registered_file", "weights_file", - "half_source", "half_targ", "half_weights", - "half_source_xfm", "half_targ_xfm") + options = ( + "out_reg_file", + "registered_file", + "weights_file", + "half_source", + "half_targ", + "half_weights", + "half_source_xfm", + "half_targ_xfm", + ) if name in options and isinstance(value, bool): value = self._list_outputs()[name] return super(RobustRegister, self)._format_arg(name, spec, value) @@ -1938,8 +2238,7 @@ def _format_arg(self, name, spec, value): def _list_outputs(self): outputs = self.output_spec().get() cwd = os.getcwd() - prefices = dict( - src=self.inputs.source_file, trg=self.inputs.target_file) + prefices = dict(src=self.inputs.source_file, trg=self.inputs.target_file) suffices = dict( out_reg_file=("src", "_robustreg.lta", False), registered_file=("src", "_robustreg", True), @@ -1948,7 +2247,8 @@ def _list_outputs(self): half_targ=("trg", "_halfway", True), half_weights=("src", "_halfweights", True), half_source_xfm=("src", "_robustxfm.lta", False), - half_targ_xfm=("trg", "_robustxfm.lta", False)) + half_targ_xfm=("trg", "_robustxfm.lta", False), + ) for name, sufftup in list(suffices.items()): value = getattr(self.inputs, name) if value: @@ -1957,7 +2257,8 @@ def _list_outputs(self): prefices[sufftup[0]], suffix=sufftup[1], newpath=cwd, - use_ext=sufftup[2]) + use_ext=sufftup[2], + ) else: outputs[name] = os.path.abspath(value) return outputs @@ -1970,29 +2271,23 @@ class FitMSParamsInputSpec(FSTraitedSpec): argstr="%s", position=-2, mandatory=True, - desc="list of FLASH images (must be in mgh format)") - tr_list = traits.List( - traits.Int, desc="list of TRs of the input files (in msec)") - te_list = traits.List( - traits.Float, desc="list of TEs of the input files (in msec)") - flip_list = traits.List( - traits.Int, desc="list of flip angles of the input files") + desc="list of FLASH images (must be in mgh format)", + ) + tr_list = traits.List(traits.Int, desc="list of TRs of the input files (in msec)") + te_list = traits.List(traits.Float, desc="list of TEs of the input files (in msec)") + flip_list = traits.List(traits.Int, desc="list of flip angles of the input files") xfm_list = traits.List( - File(exists=True), - desc="list of transform files to apply to each FLASH image") + File(exists=True), desc="list of transform files to apply to each FLASH image" + ) out_dir = Directory( - argstr="%s", - position=-1, - genfile=True, - desc="directory to store output in") + argstr="%s", position=-1, genfile=True, desc="directory to store output in" + ) class FitMSParamsOutputSpec(TraitedSpec): - t1_image = File( - exists=True, desc="image of estimated T1 relaxation values") - pd_image = File( - exists=True, desc="image of estimated proton density values") + t1_image = File(exists=True, desc="image of estimated T1 relaxation values") + pd_image = File(exists=True, desc="image of estimated proton density values") t2star_image = File(exists=True, desc="image of estimated T2* values") @@ -2009,6 +2304,7 @@ class FitMSParams(FSCommand): 'mri_ms_fitparms flash_05.mgz flash_30.mgz flash_parameters' """ + _cmd = "mri_ms_fitparms" input_spec = FitMSParamsInputSpec output_spec = FitMSParamsOutputSpec @@ -2022,8 +2318,7 @@ def _format_arg(self, name, spec, value): if isdefined(self.inputs.te_list): cmd = " ".join((cmd, "-te %.3f" % self.inputs.te_list[i])) if isdefined(self.inputs.flip_list): - cmd = " ".join((cmd, - "-fa %.1f" % self.inputs.flip_list[i])) + cmd = " ".join((cmd, "-fa %.1f" % self.inputs.flip_list[i])) if isdefined(self.inputs.xfm_list): cmd = " ".join((cmd, "-at %s" % self.inputs.xfm_list[i])) cmd = " ".join((cmd, file)) @@ -2052,31 +2347,27 @@ class SynthesizeFLASHInputSpec(FSTraitedSpec): fixed_weighting = traits.Bool( position=1, argstr="-w", - desc="use a fixed weighting to generate optimal gray/white contrast") + desc="use a fixed weighting to generate optimal gray/white contrast", + ) tr = traits.Float( - mandatory=True, - position=2, - argstr="%.2f", - desc="repetition time (in msec)") + mandatory=True, position=2, argstr="%.2f", desc="repetition time (in msec)" + ) flip_angle = traits.Float( - mandatory=True, - position=3, - argstr="%.2f", - desc="flip angle (in degrees)") + mandatory=True, position=3, argstr="%.2f", desc="flip angle (in degrees)" + ) te = traits.Float( - mandatory=True, position=4, argstr="%.3f", desc="echo time (in msec)") + mandatory=True, position=4, argstr="%.3f", desc="echo time (in msec)" + ) t1_image = File( - exists=True, - mandatory=True, - position=5, - argstr="%s", - desc="image of T1 values") + exists=True, mandatory=True, position=5, argstr="%s", desc="image of T1 values" + ) pd_image = File( exists=True, mandatory=True, position=6, argstr="%s", - desc="image of proton density values") + desc="image of proton density values", + ) out_file = File(genfile=True, argstr="%s", desc="image to write") @@ -2099,6 +2390,7 @@ class SynthesizeFLASH(FSCommand): 'mri_synthesize 20.00 30.00 3.000 T1.mgz PD.mgz flash_30syn.mgz' """ + _cmd = "mri_synthesize" input_spec = SynthesizeFLASHInputSpec output_spec = SynthesizeFLASHOutputSpec @@ -2109,7 +2401,8 @@ def _list_outputs(self): outputs["out_file"] = self.inputs.out_file else: outputs["out_file"] = self._gen_fname( - "synth-flash_%02d.mgz" % self.inputs.flip_angle, suffix="") + "synth-flash_%02d.mgz" % self.inputs.flip_angle, suffix="" + ) return outputs def _gen_filename(self, name): @@ -2124,55 +2417,53 @@ class MNIBiasCorrectionInputSpec(FSTraitedSpec): exists=True, mandatory=True, argstr="--i %s", - desc="input volume. Input can be any format accepted by mri_convert.") + desc="input volume. Input can be any format accepted by mri_convert.", + ) # optional out_file = File( argstr="--o %s", - name_source=['in_file'], - name_template='%s_output', + name_source=["in_file"], + name_template="%s_output", hash_files=False, keep_extension=True, desc="output volume. Output can be any format accepted by mri_convert. " - + "If the output format is COR, then the directory must exist.") + + "If the output format is COR, then the directory must exist.", + ) iterations = traits.Int( - 4, usedefault=True, + 4, + usedefault=True, argstr="--n %d", - desc= - "Number of iterations to run nu_correct. Default is 4. This is the number of times " - + - "that nu_correct is repeated (ie, using the output from the previous run as the input for " - + - "the next). This is different than the -iterations option to nu_correct." + desc="Number of iterations to run nu_correct. Default is 4. This is the number of times " + + "that nu_correct is repeated (ie, using the output from the previous run as the input for " + + "the next). This is different than the -iterations option to nu_correct.", ) protocol_iterations = traits.Int( argstr="--proto-iters %d", - desc= - "Passes Np as argument of the -iterations flag of nu_correct. This is different " - + - "than the --n flag above. Default is not to pass nu_correct the -iterations flag." + desc="Passes Np as argument of the -iterations flag of nu_correct. This is different " + + "than the --n flag above. Default is not to pass nu_correct the -iterations flag.", ) distance = traits.Int(argstr="--distance %d", desc="N3 -distance option") no_rescale = traits.Bool( argstr="--no-rescale", - desc="do not rescale so that global mean of output == input global mean" + desc="do not rescale so that global mean of output == input global mean", ) mask = File( exists=True, argstr="--mask %s", - desc= - "brainmask volume. Input can be any format accepted by mri_convert.") + desc="brainmask volume. Input can be any format accepted by mri_convert.", + ) transform = File( exists=True, argstr="--uchar %s", - desc="tal.xfm. Use mri_make_uchar instead of conforming") + desc="tal.xfm. Use mri_make_uchar instead of conforming", + ) stop = traits.Float( argstr="--stop %f", - desc= - "Convergence threshold below which iteration stops (suggest 0.01 to 0.0001)" + desc="Convergence threshold below which iteration stops (suggest 0.01 to 0.0001)", ) shrink = traits.Int( - argstr="--shrink %d", - desc="Shrink parameter for finer sampling (default is 4)") + argstr="--shrink %d", desc="Shrink parameter for finer sampling (default is 4)" + ) class MNIBiasCorrectionOutputSpec(TraitedSpec): @@ -2207,6 +2498,7 @@ class MNIBiasCorrection(FSCommand): [https://github.com/BIC-MNI/N3] """ + _cmd = "mri_nu_correct.mni" input_spec = MNIBiasCorrectionInputSpec output_spec = MNIBiasCorrectionOutputSpec @@ -2215,26 +2507,21 @@ class MNIBiasCorrection(FSCommand): class WatershedSkullStripInputSpec(FSTraitedSpec): # required in_file = File( - argstr="%s", - exists=True, - mandatory=True, - position=-2, - desc="input volume") + argstr="%s", exists=True, mandatory=True, position=-2, desc="input volume" + ) out_file = File( - 'brainmask.auto.mgz', + "brainmask.auto.mgz", argstr="%s", exists=False, mandatory=True, position=-1, usedefault=True, - desc="output volume") + desc="output volume", + ) # optional - t1 = traits.Bool( - argstr="-T1", desc="specify T1 input volume (T1 grey value = 110)") - brain_atlas = File( - argstr="-brain_atlas %s", exists=True, position=-4, desc="") - transform = File( - argstr="%s", exists=False, position=-3, desc="undocumented") + t1 = traits.Bool(argstr="-T1", desc="specify T1 input volume (T1 grey value = 110)") + brain_atlas = File(argstr="-brain_atlas %s", exists=True, position=-4, desc="") + transform = File(argstr="%s", exists=False, position=-3, desc="undocumented") class WatershedSkullStripOutputSpec(TraitedSpec): @@ -2266,46 +2553,48 @@ class WatershedSkullStrip(FSCommand): >>> skullstrip.cmdline 'mri_watershed -T1 transforms/talairach_with_skull.lta T1.mgz brainmask.auto.mgz' """ - _cmd = 'mri_watershed' + + _cmd = "mri_watershed" input_spec = WatershedSkullStripInputSpec output_spec = WatershedSkullStripOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs class NormalizeInputSpec(FSTraitedSpec): # required in_file = File( - argstr='%s', + argstr="%s", exists=True, mandatory=True, position=-2, - desc="The input file for Normalize") + desc="The input file for Normalize", + ) out_file = File( - argstr='%s', + argstr="%s", position=-1, - name_source=['in_file'], - name_template='%s_norm', + name_source=["in_file"], + name_template="%s_norm", hash_files=False, keep_extension=True, - desc="The output file for Normalize") + desc="The output file for Normalize", + ) # optional gradient = traits.Int( - argstr="-g %d", - desc="use max intensity/mm gradient g (default=1)") + argstr="-g %d", desc="use max intensity/mm gradient g (default=1)" + ) mask = File( - argstr="-mask %s", - exists=True, - desc="The input mask file for Normalize") + argstr="-mask %s", exists=True, desc="The input mask file for Normalize" + ) segmentation = File( - argstr="-aseg %s", - exists=True, - desc="The input segmentation for Normalize") + argstr="-aseg %s", exists=True, desc="The input segmentation for Normalize" + ) transform = File( - exists=True, desc="Tranform file from the header of the input file") + exists=True, desc="Tranform file from the header of the input file" + ) class NormalizeOutputSpec(TraitedSpec): @@ -2327,57 +2616,61 @@ class Normalize(FSCommand): >>> normalize.cmdline 'mri_normalize -g 1 T1.mgz T1_norm.mgz' """ + _cmd = "mri_normalize" input_spec = NormalizeInputSpec output_spec = NormalizeOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs class CANormalizeInputSpec(FSTraitedSpec): in_file = File( - argstr='%s', + argstr="%s", exists=True, mandatory=True, position=-4, - desc="The input file for CANormalize") + desc="The input file for CANormalize", + ) out_file = File( - argstr='%s', + argstr="%s", position=-1, - name_source=['in_file'], - name_template='%s_norm', + name_source=["in_file"], + name_template="%s_norm", hash_files=False, keep_extension=True, - desc="The output file for CANormalize") + desc="The output file for CANormalize", + ) atlas = File( - argstr='%s', + argstr="%s", exists=True, mandatory=True, position=-3, - desc="The atlas file in gca format") + desc="The atlas file in gca format", + ) transform = File( - argstr='%s', + argstr="%s", exists=True, mandatory=True, position=-2, - desc="The tranform file in lta format") + desc="The tranform file in lta format", + ) # optional - mask = File( - argstr='-mask %s', exists=True, desc="Specifies volume to use as mask") + mask = File(argstr="-mask %s", exists=True, desc="Specifies volume to use as mask") control_points = File( - argstr='-c %s', desc="File name for the output control points") + argstr="-c %s", desc="File name for the output control points" + ) long_file = File( - argstr='-long %s', - desc='undocumented flag used in longitudinal processing') + argstr="-long %s", desc="undocumented flag used in longitudinal processing" + ) class CANormalizeOutputSpec(TraitedSpec): out_file = File(exists=False, desc="The output file for Normalize") - control_points = File( - exists=False, desc="The output control points for Normalize") + control_points = File(exists=False, desc="The output control points for Normalize") class CANormalize(FSCommand): @@ -2397,62 +2690,59 @@ class CANormalize(FSCommand): >>> ca_normalize.cmdline 'mri_ca_normalize T1.mgz atlas.nii.gz trans.mat T1_norm.mgz' """ + _cmd = "mri_ca_normalize" input_spec = CANormalizeInputSpec output_spec = CANormalizeOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) - outputs['control_points'] = os.path.abspath(self.inputs.control_points) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) + outputs["control_points"] = os.path.abspath(self.inputs.control_points) return outputs class CARegisterInputSpec(FSTraitedSpecOpenMP): # required in_file = File( - argstr='%s', + argstr="%s", exists=True, mandatory=True, position=-3, - desc="The input volume for CARegister") + desc="The input volume for CARegister", + ) out_file = File( - argstr='%s', - position=-1, - genfile=True, - desc="The output volume for CARegister") + argstr="%s", position=-1, genfile=True, desc="The output volume for CARegister" + ) template = File( - argstr='%s', - exists=True, - position=-2, - desc="The template file in gca format") + argstr="%s", exists=True, position=-2, desc="The template file in gca format" + ) # optional - mask = File( - argstr='-mask %s', exists=True, desc="Specifies volume to use as mask") + mask = File(argstr="-mask %s", exists=True, desc="Specifies volume to use as mask") invert_and_save = traits.Bool( - argstr='-invert-and-save', + argstr="-invert-and-save", position=-4, - desc= - "Invert and save the .m3z multi-dimensional talaraich transform to x, y, and z .mgz files" + desc="Invert and save the .m3z multi-dimensional talaraich transform to x, y, and z .mgz files", ) - no_big_ventricles = traits.Bool( - argstr='-nobigventricles', desc="No big ventricles") + no_big_ventricles = traits.Bool(argstr="-nobigventricles", desc="No big ventricles") transform = File( - argstr='-T %s', exists=True, desc="Specifies transform in lta format") + argstr="-T %s", exists=True, desc="Specifies transform in lta format" + ) align = traits.String( - argstr='-align-%s', desc="Specifies when to perform alignment") + argstr="-align-%s", desc="Specifies when to perform alignment" + ) levels = traits.Int( - argstr='-levels %d', - desc= - "defines how many surrounding voxels will be used in interpolations, default is 6" + argstr="-levels %d", + desc="defines how many surrounding voxels will be used in interpolations, default is 6", ) A = traits.Int( - argstr='-A %d', - desc='undocumented flag used in longitudinal processing') + argstr="-A %d", desc="undocumented flag used in longitudinal processing" + ) l_files = InputMultiPath( File(exists=False), - argstr='-l %s', - desc='undocumented flag used in longitudinal processing') + argstr="-l %s", + desc="undocumented flag used in longitudinal processing", + ) class CARegisterOutputSpec(TraitedSpec): @@ -2473,23 +2763,24 @@ class CARegister(FSCommandOpenMP): >>> ca_register.cmdline 'mri_ca_register norm.mgz talairach.m3z' """ + _cmd = "mri_ca_register" input_spec = CARegisterInputSpec output_spec = CARegisterOutputSpec def _format_arg(self, name, spec, value): if name == "l_files" and len(value) == 1: - value.append('identity.nofile') + value.append("identity.nofile") return super(CARegister, self)._format_arg(name, spec, value) def _gen_fname(self, name): - if name == 'out_file': - return os.path.abspath('talairach.m3z') + if name == "out_file": + return os.path.abspath("talairach.m3z") return None def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs @@ -2500,53 +2791,58 @@ class CALabelInputSpec(FSTraitedSpecOpenMP): position=-4, mandatory=True, exists=True, - desc="Input volume for CALabel") + desc="Input volume for CALabel", + ) out_file = File( argstr="%s", position=-1, mandatory=True, exists=False, - desc="Output file for CALabel") + desc="Output file for CALabel", + ) transform = File( argstr="%s", position=-3, mandatory=True, exists=True, - desc="Input transform for CALabel") + desc="Input transform for CALabel", + ) template = File( argstr="%s", position=-2, mandatory=True, exists=True, - desc="Input template for CALabel") + desc="Input template for CALabel", + ) # optional in_vol = File(argstr="-r %s", exists=True, desc="set input volume") intensities = File( argstr="-r %s", exists=True, - desc="input label intensities file(used in longitudinal processing)") - no_big_ventricles = traits.Bool( - argstr="-nobigventricles", desc="No big ventricles") + desc="input label intensities file(used in longitudinal processing)", + ) + no_big_ventricles = traits.Bool(argstr="-nobigventricles", desc="No big ventricles") align = traits.Bool(argstr="-align", desc="Align CALabel") prior = traits.Float(argstr="-prior %.1f", desc="Prior for CALabel") relabel_unlikely = traits.Tuple( traits.Int, traits.Float, argstr="-relabel_unlikely %d %.1f", - desc=("Reclassify voxels at least some std" - " devs from the mean using some size" - " Gaussian window")) + desc=( + "Reclassify voxels at least some std" + " devs from the mean using some size" + " Gaussian window" + ), + ) label = File( argstr="-l %s", exists=True, - desc= - "Undocumented flag. Autorecon3 uses ../label/{hemisphere}.cortex.label as input file" + desc="Undocumented flag. Autorecon3 uses ../label/{hemisphere}.cortex.label as input file", ) aseg = File( argstr="-aseg %s", exists=True, - desc= - "Undocumented flag. Autorecon3 uses ../mri/aseg.presurf.mgz as input file" + desc="Undocumented flag. Autorecon3 uses ../mri/aseg.presurf.mgz as input file", ) @@ -2570,78 +2866,81 @@ class CALabel(FSCommandOpenMP): >>> ca_label.cmdline 'mri_ca_label norm.mgz trans.mat Template_6.nii out.mgz' """ + _cmd = "mri_ca_label" input_spec = CALabelInputSpec output_spec = CALabelOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs class MRIsCALabelInputSpec(FSTraitedSpecOpenMP): # required subject_id = traits.String( - 'subject_id', + "subject_id", argstr="%s", position=-5, usedefault=True, mandatory=True, - desc="Subject name or ID") + desc="Subject name or ID", + ) hemisphere = traits.Enum( - 'lh', - 'rh', + "lh", + "rh", argstr="%s", position=-4, mandatory=True, - desc="Hemisphere ('lh' or 'rh')") + desc="Hemisphere ('lh' or 'rh')", + ) canonsurf = File( argstr="%s", position=-3, mandatory=True, exists=True, - desc="Input canonical surface file") + desc="Input canonical surface file", + ) classifier = File( argstr="%s", position=-2, mandatory=True, exists=True, - desc="Classifier array input file") + desc="Classifier array input file", + ) smoothwm = File( - mandatory=True, - exists=True, - desc="implicit input {hemisphere}.smoothwm") - curv = File( - mandatory=True, exists=True, desc="implicit input {hemisphere}.curv") - sulc = File( - mandatory=True, exists=True, desc="implicit input {hemisphere}.sulc") + mandatory=True, exists=True, desc="implicit input {hemisphere}.smoothwm" + ) + curv = File(mandatory=True, exists=True, desc="implicit input {hemisphere}.curv") + sulc = File(mandatory=True, exists=True, desc="implicit input {hemisphere}.sulc") out_file = File( argstr="%s", position=-1, exists=False, - name_source=['hemisphere'], + name_source=["hemisphere"], keep_extension=True, hash_files=False, name_template="%s.aparc.annot", - desc="Annotated surface output file") + desc="Annotated surface output file", + ) # optional label = File( argstr="-l %s", exists=True, - desc= - "Undocumented flag. Autorecon3 uses ../label/{hemisphere}.cortex.label as input file" + desc="Undocumented flag. Autorecon3 uses ../label/{hemisphere}.cortex.label as input file", ) aseg = File( argstr="-aseg %s", exists=True, - desc= - "Undocumented flag. Autorecon3 uses ../mri/aseg.presurf.mgz as input file" + desc="Undocumented flag. Autorecon3 uses ../mri/aseg.presurf.mgz as input file", ) seed = traits.Int(argstr="-seed %d", desc="") - copy_inputs = traits.Bool(desc="Copies implicit inputs to node directory " - + "and creates a temp subjects_directory. " + - "Use this when running as a node") + copy_inputs = traits.Bool( + desc="Copies implicit inputs to node directory " + + "and creates a temp subjects_directory. " + + "Use this when running as a node" + ) class MRIsCALabelOutputSpec(TraitedSpec): @@ -2674,6 +2973,7 @@ class MRIsCALabel(FSCommandOpenMP): >>> ca_label.cmdline 'mris_ca_label test lh lh.pial im1.nii lh.aparc.annot' """ + _cmd = "mris_ca_label" input_spec = MRIsCALabelInputSpec output_spec = MRIsCALabelOutputSpec @@ -2681,28 +2981,32 @@ class MRIsCALabel(FSCommandOpenMP): def run(self, **inputs): if self.inputs.copy_inputs: self.inputs.subjects_dir = os.getcwd() - if 'subjects_dir' in inputs: - inputs['subjects_dir'] = self.inputs.subjects_dir - copy2subjdir(self, self.inputs.canonsurf, folder='surf') + if "subjects_dir" in inputs: + inputs["subjects_dir"] = self.inputs.subjects_dir + copy2subjdir(self, self.inputs.canonsurf, folder="surf") copy2subjdir( self, self.inputs.smoothwm, - folder='surf', - basename='{0}.smoothwm'.format(self.inputs.hemisphere)) + folder="surf", + basename="{0}.smoothwm".format(self.inputs.hemisphere), + ) copy2subjdir( self, self.inputs.curv, - folder='surf', - basename='{0}.curv'.format(self.inputs.hemisphere)) + folder="surf", + basename="{0}.curv".format(self.inputs.hemisphere), + ) copy2subjdir( self, self.inputs.sulc, - folder='surf', - basename='{0}.sulc'.format(self.inputs.hemisphere)) + folder="surf", + basename="{0}.sulc".format(self.inputs.hemisphere), + ) # The label directory must exist in order for an output to be written - label_dir = os.path.join(self.inputs.subjects_dir, - self.inputs.subject_id, 'label') + label_dir = os.path.join( + self.inputs.subjects_dir, self.inputs.subject_id, "label" + ) if not os.path.isdir(label_dir): os.makedirs(label_dir) @@ -2711,9 +3015,9 @@ def run(self, **inputs): def _list_outputs(self): outputs = self.output_spec().get() out_basename = os.path.basename(self.inputs.out_file) - outputs['out_file'] = os.path.join(self.inputs.subjects_dir, - self.inputs.subject_id, 'label', - out_basename) + outputs["out_file"] = os.path.join( + self.inputs.subjects_dir, self.inputs.subject_id, "label", out_basename + ) return outputs @@ -2722,39 +3026,45 @@ class SegmentCCInputSpec(FSTraitedSpec): argstr="-aseg %s", mandatory=True, exists=True, - desc="Input aseg file to read from subjects directory") + desc="Input aseg file to read from subjects directory", + ) in_norm = File( mandatory=True, exists=True, - desc="Required undocumented input {subject}/mri/norm.mgz") + desc="Required undocumented input {subject}/mri/norm.mgz", + ) out_file = File( argstr="-o %s", exists=False, - name_source=['in_file'], - name_template='%s.auto.mgz', + name_source=["in_file"], + name_template="%s.auto.mgz", hash_files=False, keep_extension=False, - desc="Filename to write aseg including CC") + desc="Filename to write aseg including CC", + ) out_rotation = File( argstr="-lta %s", mandatory=True, exists=False, - desc="Global filepath for writing rotation lta") + desc="Global filepath for writing rotation lta", + ) subject_id = traits.String( - 'subject_id', + "subject_id", argstr="%s", mandatory=True, position=-1, usedefault=True, - desc="Subject name") + desc="Subject name", + ) copy_inputs = traits.Bool( - desc="If running as a node, set this to True." + - "This will copy the input files to the node " + "directory.") + desc="If running as a node, set this to True." + + "This will copy the input files to the node " + + "directory." + ) class SegmentCCOutputSpec(TraitedSpec): - out_file = File( - exists=False, desc="Output segmentation uncluding corpus collosum") + out_file = File(exists=False, desc="Output segmentation uncluding corpus collosum") out_rotation = File(exists=False, desc="Output lta rotation file") @@ -2798,38 +3108,37 @@ def _format_arg(self, name, spec, value): def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) - outputs['out_rotation'] = os.path.abspath(self.inputs.out_rotation) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) + outputs["out_rotation"] = os.path.abspath(self.inputs.out_rotation) return outputs def run(self, **inputs): if self.inputs.copy_inputs: self.inputs.subjects_dir = os.getcwd() - if 'subjects_dir' in inputs: - inputs['subjects_dir'] = self.inputs.subjects_dir + if "subjects_dir" in inputs: + inputs["subjects_dir"] = self.inputs.subjects_dir for originalfile in [self.inputs.in_file, self.inputs.in_norm]: - copy2subjdir(self, originalfile, folder='mri') + copy2subjdir(self, originalfile, folder="mri") return super(SegmentCC, self).run(**inputs) def aggregate_outputs(self, runtime=None, needed_outputs=None): # it is necessary to find the output files and move # them to the correct loacation predicted_outputs = self._list_outputs() - for name in ['out_file', 'out_rotation']: + for name in ["out_file", "out_rotation"]: out_file = predicted_outputs[name] if not os.path.isfile(out_file): out_base = os.path.basename(out_file) if isdefined(self.inputs.subjects_dir): - subj_dir = os.path.join(self.inputs.subjects_dir, - self.inputs.subject_id) + subj_dir = os.path.join( + self.inputs.subjects_dir, self.inputs.subject_id + ) else: - subj_dir = os.path.join(os.getcwd(), - self.inputs.subject_id) - if name == 'out_file': - out_tmp = os.path.join(subj_dir, 'mri', out_base) - elif name == 'out_rotation': - out_tmp = os.path.join(subj_dir, 'mri', 'transforms', - out_base) + subj_dir = os.path.join(os.getcwd(), self.inputs.subject_id) + if name == "out_file": + out_tmp = os.path.join(subj_dir, "mri", out_base) + elif name == "out_rotation": + out_tmp = os.path.join(subj_dir, "mri", "transforms", out_base) else: out_tmp = None # move the file to correct location @@ -2837,8 +3146,7 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): if not os.path.isdir(os.path.dirname(out_tmp)): os.makedirs(os.path.dirname(out_tmp)) shutil.move(out_tmp, out_file) - return super(SegmentCC, self).aggregate_outputs( - runtime, needed_outputs) + return super(SegmentCC, self).aggregate_outputs(runtime, needed_outputs) class SegmentWMInputSpec(FSTraitedSpec): @@ -2847,13 +3155,15 @@ class SegmentWMInputSpec(FSTraitedSpec): exists=True, mandatory=True, position=-2, - desc="Input file for SegmentWM") + desc="Input file for SegmentWM", + ) out_file = File( argstr="%s", exists=False, mandatory=True, position=-1, - desc="File to be written as output for SegmentWM") + desc="File to be written as output for SegmentWM", + ) class SegmentWMOutputSpec(TraitedSpec): @@ -2883,7 +3193,7 @@ class SegmentWM(FSCommand): def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs @@ -2893,28 +3203,31 @@ class EditWMwithAsegInputSpec(FSTraitedSpec): position=-4, mandatory=True, exists=True, - desc="Input white matter segmentation file") + desc="Input white matter segmentation file", + ) brain_file = File( argstr="%s", position=-3, mandatory=True, exists=True, - desc="Input brain/T1 file") + desc="Input brain/T1 file", + ) seg_file = File( argstr="%s", position=-2, mandatory=True, exists=True, - desc="Input presurf segmentation file") + desc="Input presurf segmentation file", + ) out_file = File( argstr="%s", position=-1, mandatory=True, exists=False, - desc="File to be written as output") + desc="File to be written as output", + ) # optional - keep_in = traits.Bool( - argstr="-keep-in", desc="Keep edits as found in input volume") + keep_in = traits.Bool(argstr="-keep-in", desc="Keep edits as found in input volume") class EditWMwithAsegOutputSpec(TraitedSpec): @@ -2937,13 +3250,14 @@ class EditWMwithAseg(FSCommand): >>> editwm.cmdline 'mri_edit_wm_with_aseg -keep-in T1.mgz norm.mgz aseg.mgz wm.asegedit.mgz' """ - _cmd = 'mri_edit_wm_with_aseg' + + _cmd = "mri_edit_wm_with_aseg" input_spec = EditWMwithAsegInputSpec output_spec = EditWMwithAsegOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs @@ -2952,57 +3266,61 @@ class ConcatenateLTAInputSpec(FSTraitedSpec): in_lta1 = File( exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-3, - desc='maps some src1 to dst1') + desc="maps some src1 to dst1", + ) in_lta2 = traits.Either( File(exists=True), - 'identity.nofile', - argstr='%s', + "identity.nofile", + argstr="%s", position=-2, mandatory=True, - desc='maps dst1(src2) to dst2') + desc="maps dst1(src2) to dst2", + ) out_file = File( position=-1, - argstr='%s', + argstr="%s", hash_files=False, - name_source=['in_lta1'], - name_template='%s_concat', + name_source=["in_lta1"], + name_template="%s_concat", keep_extension=True, - desc='the combined LTA maps: src1 to dst2 = LTA2*LTA1') + desc="the combined LTA maps: src1 to dst2 = LTA2*LTA1", + ) # Inversion and transform type - invert_1 = traits.Bool( - argstr='-invert1', desc='invert in_lta1 before applying it') - invert_2 = traits.Bool( - argstr='-invert2', desc='invert in_lta2 before applying it') - invert_out = traits.Bool(argstr='-invertout', desc='invert output LTA') + invert_1 = traits.Bool(argstr="-invert1", desc="invert in_lta1 before applying it") + invert_2 = traits.Bool(argstr="-invert2", desc="invert in_lta2 before applying it") + invert_out = traits.Bool(argstr="-invertout", desc="invert output LTA") out_type = traits.Enum( - 'VOX2VOX', 'RAS2RAS', argstr='-out_type %d', desc='set final LTA type') + "VOX2VOX", "RAS2RAS", argstr="-out_type %d", desc="set final LTA type" + ) # Talairach options tal_source_file = File( exists=True, - argstr='-tal %s', + argstr="-tal %s", position=-5, - requires=['tal_template_file'], - desc='if in_lta2 is talairach.xfm, specify source for talairach') + requires=["tal_template_file"], + desc="if in_lta2 is talairach.xfm, specify source for talairach", + ) tal_template_file = File( exists=True, - argstr='%s', + argstr="%s", position=-4, - requires=['tal_source_file'], - desc='if in_lta2 is talairach.xfm, specify template for talairach') + requires=["tal_source_file"], + desc="if in_lta2 is talairach.xfm, specify template for talairach", + ) - subject = traits.Str( - argstr='-subject %s', desc='set subject in output LTA') + subject = traits.Str(argstr="-subject %s", desc="set subject in output LTA") # Note rmsdiff would be xor out_file, and would be most easily dealt with # in a new interface. -CJM 2017.10.05 class ConcatenateLTAOutputSpec(TraitedSpec): out_file = File( - exists=False, desc='the combined LTA maps: src1 to dst2 = LTA2*LTA1') + exists=False, desc="the combined LTA maps: src1 to dst2 = LTA2*LTA1" + ) class ConcatenateLTA(FSCommand): @@ -3035,11 +3353,11 @@ class ConcatenateLTA(FSCommand): 'mri_concatenate_lta -invert1 -out_type 1 lta1.lta identity.nofile inv1.lta' """ - _cmd = 'mri_concatenate_lta' + _cmd = "mri_concatenate_lta" input_spec = ConcatenateLTAInputSpec output_spec = ConcatenateLTAOutputSpec def _format_arg(self, name, spec, value): - if name == 'out_type': - value = {'VOX2VOX': 0, 'RAS2RAS': 1}[value] + if name == "out_type": + value = {"VOX2VOX": 0, "RAS2RAS": 1}[value] return super(ConcatenateLTA, self)._format_arg(name, spec, value) diff --git a/nipype/interfaces/freesurfer/registration.py b/nipype/interfaces/freesurfer/registration.py index 1b919485e2..c93f813088 100644 --- a/nipype/interfaces/freesurfer/registration.py +++ b/nipype/interfaces/freesurfer/registration.py @@ -10,32 +10,37 @@ from ... import logging from ...utils.filemanip import split_filename, copyfile -from .base import (FSCommand, FSTraitedSpec, FSScriptCommand, - FSScriptOutputSpec, FSCommandOpenMP, FSTraitedSpecOpenMP) -from ..base import (isdefined, TraitedSpec, File, traits, Directory) +from .base import ( + FSCommand, + FSTraitedSpec, + FSScriptCommand, + FSScriptOutputSpec, + FSCommandOpenMP, + FSTraitedSpecOpenMP, +) +from ..base import isdefined, TraitedSpec, File, traits, Directory -__docformat__ = 'restructuredtext' -iflogger = logging.getLogger('nipype.interface') +__docformat__ = "restructuredtext" +iflogger = logging.getLogger("nipype.interface") class MPRtoMNI305InputSpec(FSTraitedSpec): # environment variables, required # usedefault=True is hack for on_trait_change in __init__ reference_dir = Directory( - "", exists=True, mandatory=True, usedefault=True, desc="TODO") - target = traits.String( - "", mandatory=True, usedefault=True, desc="input atlas file") + "", exists=True, mandatory=True, usedefault=True, desc="TODO" + ) + target = traits.String("", mandatory=True, usedefault=True, desc="input atlas file") # required in_file = File( - argstr='%s', - usedefault=True, - desc="the input file prefix for MPRtoMNI305") + argstr="%s", usedefault=True, desc="the input file prefix for MPRtoMNI305" + ) class MPRtoMNI305OutputSpec(FSScriptOutputSpec): out_file = File( - exists=False, - desc="The output file '_to__t4_vox2vox.txt'") + exists=False, desc="The output file '_to__t4_vox2vox.txt'" + ) class MPRtoMNI305(FSScriptCommand): @@ -61,26 +66,25 @@ class MPRtoMNI305(FSScriptCommand): >>> mprtomni305.run() # doctest: +SKIP """ + _cmd = "mpr2mni305" input_spec = MPRtoMNI305InputSpec output_spec = MPRtoMNI305OutputSpec def __init__(self, **inputs): super(MPRtoMNI305, self).__init__(**inputs) - self.inputs.on_trait_change(self._environ_update, 'target') - self.inputs.on_trait_change(self._environ_update, 'reference_dir') + self.inputs.on_trait_change(self._environ_update, "target") + self.inputs.on_trait_change(self._environ_update, "reference_dir") def _format_arg(self, opt, spec, val): - if opt in ['target', 'reference_dir']: + if opt in ["target", "reference_dir"]: return "" - elif opt == 'in_file': + elif opt == "in_file": _, retval, ext = split_filename(val) # Need to copy file to working cache directory! copyfile( - val, - os.path.abspath(retval + ext), - copy=True, - hashmethod='content') + val, os.path.abspath(retval + ext), copy=True, hashmethod="content" + ) return retval return super(MPRtoMNI305, self)._format_arg(opt, spec, val) @@ -88,7 +92,7 @@ def _environ_update(self): # refdir = os.path.join(Info.home(), val) refdir = self.inputs.reference_dir target = self.inputs.target - self.inputs.environ['MPR2MNI305_TARGET'] = target + self.inputs.environ["MPR2MNI305_TARGET"] = target self.inputs.environ["REFDIR"] = refdir def _get_fname(self, fname): @@ -96,44 +100,40 @@ def _get_fname(self, fname): def _list_outputs(self): outputs = super(MPRtoMNI305, self)._list_outputs() - fullname = "_".join([ - self._get_fname(self.inputs.in_file), "to", self.inputs.target, - "t4", "vox2vox.txt" - ]) - outputs['out_file'] = os.path.abspath(fullname) + fullname = "_".join( + [ + self._get_fname(self.inputs.in_file), + "to", + self.inputs.target, + "t4", + "vox2vox.txt", + ] + ) + outputs["out_file"] = os.path.abspath(fullname) return outputs class RegisterAVItoTalairachInputSpec(FSTraitedSpec): in_file = File( - argstr='%s', - exists=True, - mandatory=True, - position=0, - desc="The input file") + argstr="%s", exists=True, mandatory=True, position=0, desc="The input file" + ) target = File( - argstr='%s', - exists=True, - mandatory=True, - position=1, - desc="The target file") + argstr="%s", exists=True, mandatory=True, position=1, desc="The target file" + ) vox2vox = File( - argstr='%s', - exists=True, - mandatory=True, - position=2, - desc="The vox2vox file") + argstr="%s", exists=True, mandatory=True, position=2, desc="The vox2vox file" + ) out_file = File( - 'talairach.auto.xfm', + "talairach.auto.xfm", usedefault=True, - argstr='%s', + argstr="%s", position=3, - desc="The transform output") + desc="The transform output", + ) class RegisterAVItoTalairachOutputSpec(FSScriptOutputSpec): - out_file = File( - exists=False, desc="The output file for RegisterAVItoTalairach") + out_file = File(exists=False, desc="The output file for RegisterAVItoTalairach") class RegisterAVItoTalairach(FSScriptCommand): @@ -168,50 +168,43 @@ class RegisterAVItoTalairach(FSScriptCommand): >>> register.run() # doctest: +SKIP """ + _cmd = "avi2talxfm" input_spec = RegisterAVItoTalairachInputSpec output_spec = RegisterAVItoTalairachOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs class EMRegisterInputSpec(FSTraitedSpecOpenMP): # required in_file = File( - argstr="%s", - exists=True, - mandatory=True, - position=-3, - desc="in brain volume") + argstr="%s", exists=True, mandatory=True, position=-3, desc="in brain volume" + ) template = File( - argstr="%s", - exists=True, - mandatory=True, - position=-2, - desc="template gca") + argstr="%s", exists=True, mandatory=True, position=-2, desc="template gca" + ) out_file = File( argstr="%s", exists=False, - name_source=['in_file'], + name_source=["in_file"], name_template="%s_transform.lta", hash_files=False, keep_extension=False, position=-1, - desc="output transform") + desc="output transform", + ) # optional - skull = traits.Bool( - argstr="-skull", desc="align to atlas containing skull (uns=5)") + skull = traits.Bool(argstr="-skull", desc="align to atlas containing skull (uns=5)") mask = File(argstr="-mask %s", exists=True, desc="use volume as a mask") nbrspacing = traits.Int( argstr="-uns %d", - desc= - "align to atlas containing skull setting unknown_nbr_spacing = nbrspacing" + desc="align to atlas containing skull setting unknown_nbr_spacing = nbrspacing", ) - transform = File( - argstr="-t %s", exists=True, desc="Previously computed transform") + transform = File(argstr="-t %s", exists=True, desc="Previously computed transform") class EMRegisterOutputSpec(TraitedSpec): @@ -233,13 +226,14 @@ class EMRegister(FSCommandOpenMP): >>> register.cmdline 'mri_em_register -uns 9 -skull norm.mgz aseg.mgz norm_transform.lta' """ - _cmd = 'mri_em_register' + + _cmd = "mri_em_register" input_spec = EMRegisterInputSpec output_spec = EMRegisterOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs @@ -251,42 +245,44 @@ class RegisterInputSpec(FSTraitedSpec): mandatory=True, position=-3, copyfile=True, - desc="Surface to register, often {hemi}.sphere") + desc="Surface to register, often {hemi}.sphere", + ) target = File( argstr="%s", exists=True, mandatory=True, position=-2, - desc="The data to register to. In normal recon-all usage, " + - "this is a template file for average surface.") + desc="The data to register to. In normal recon-all usage, " + + "this is a template file for average surface.", + ) in_sulc = File( exists=True, mandatory=True, copyfile=True, - desc= - "Undocumented mandatory input file ${SUBJECTS_DIR}/surf/{hemisphere}.sulc " + desc="Undocumented mandatory input file ${SUBJECTS_DIR}/surf/{hemisphere}.sulc ", ) out_file = File( argstr="%s", exists=False, position=-1, genfile=True, - desc="Output surface file to capture registration") + desc="Output surface file to capture registration", + ) # optional curv = traits.Bool( argstr="-curv", - requires=['in_smoothwm'], - desc="Use smoothwm curvature for final alignment") + requires=["in_smoothwm"], + desc="Use smoothwm curvature for final alignment", + ) in_smoothwm = File( exists=True, copyfile=True, - desc= - "Undocumented input file ${SUBJECTS_DIR}/surf/{hemisphere}.smoothwm ") + desc="Undocumented input file ${SUBJECTS_DIR}/surf/{hemisphere}.smoothwm ", + ) class RegisterOutputSpec(TraitedSpec): - out_file = File( - exists=False, desc="Output surface file to capture registration") + out_file = File(exists=False, desc="Output surface file to capture registration") class Register(FSCommand): @@ -306,26 +302,26 @@ class Register(FSCommand): 'mris_register -curv lh.pial aseg.mgz lh.pial.reg' """ - _cmd = 'mris_register' + _cmd = "mris_register" input_spec = RegisterInputSpec output_spec = RegisterOutputSpec def _format_arg(self, opt, spec, val): - if opt == 'curv': + if opt == "curv": return spec.argstr return super(Register, self)._format_arg(opt, spec, val) def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._list_outputs()[name] return None def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.out_file): - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) else: - outputs['out_file'] = os.path.abspath(self.inputs.in_surf) + '.reg' + outputs["out_file"] = os.path.abspath(self.inputs.in_surf) + ".reg" return outputs @@ -336,14 +332,12 @@ class PaintInputSpec(FSTraitedSpec): exists=True, mandatory=True, position=-2, - desc="Surface file with grid (vertices) onto which the " + - "template data is to be sampled or 'painted'") + desc="Surface file with grid (vertices) onto which the " + + "template data is to be sampled or 'painted'", + ) template = File( - argstr="%s", - exists=True, - mandatory=True, - position=-3, - desc="Template file") + argstr="%s", exists=True, mandatory=True, position=-3, desc="Template file" + ) # optional template_param = traits.Int(desc="Frame number of the input template") averages = traits.Int(argstr="-a %d", desc="Average curvature patterns") @@ -353,17 +347,17 @@ class PaintInputSpec(FSTraitedSpec): position=-1, name_template="%s.avg_curv", hash_files=False, - name_source=['in_surf'], + name_source=["in_surf"], keep_extension=False, - desc="File containing a surface-worth of per-vertex values, " + - "saved in 'curvature' format.") + desc="File containing a surface-worth of per-vertex values, " + + "saved in 'curvature' format.", + ) class PaintOutputSpec(TraitedSpec): out_file = File( exists=False, - desc= - "File containing a surface-worth of per-vertex values, saved in 'curvature' format." + desc="File containing a surface-worth of per-vertex values, saved in 'curvature' format.", ) @@ -387,150 +381,166 @@ class Paint(FSCommand): 'mrisp_paint -a 5 aseg.mgz lh.pial lh.avg_curv' """ - _cmd = 'mrisp_paint' + _cmd = "mrisp_paint" input_spec = PaintInputSpec output_spec = PaintOutputSpec def _format_arg(self, opt, spec, val): - if opt == 'template': + if opt == "template": if isdefined(self.inputs.template_param): - return spec.argstr % ( - val + '#' + str(self.inputs.template_param)) + return spec.argstr % (val + "#" + str(self.inputs.template_param)) return super(Paint, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs class MRICoregInputSpec(FSTraitedSpec): source_file = File( - argstr='--mov %s', - desc='source file to be registered', + argstr="--mov %s", + desc="source file to be registered", mandatory=True, - copyfile=False) + copyfile=False, + ) reference_file = File( - argstr='--ref %s', - desc='reference (target) file', + argstr="--ref %s", + desc="reference (target) file", mandatory=True, copyfile=False, - xor=['subject_id']) + xor=["subject_id"], + ) out_lta_file = traits.Either( True, File, - argstr='--lta %s', + argstr="--lta %s", default=True, usedefault=True, - desc='output registration file (LTA format)') + desc="output registration file (LTA format)", + ) out_reg_file = traits.Either( - True, - File, - argstr='--regdat %s', - desc='output registration file (REG format)') + True, File, argstr="--regdat %s", desc="output registration file (REG format)" + ) out_params_file = traits.Either( - True, File, argstr='--params %s', desc='output parameters file') + True, File, argstr="--params %s", desc="output parameters file" + ) subjects_dir = Directory( - exists=True, argstr='--sd %s', desc='FreeSurfer SUBJECTS_DIR') + exists=True, argstr="--sd %s", desc="FreeSurfer SUBJECTS_DIR" + ) subject_id = traits.Str( - argstr='--s %s', + argstr="--s %s", position=1, mandatory=True, - xor=['reference_file'], - requires=['subjects_dir'], - desc='freesurfer subject ID (implies ``reference_mask == ' - 'aparc+aseg.mgz`` unless otherwise specified)') + xor=["reference_file"], + requires=["subjects_dir"], + desc="freesurfer subject ID (implies ``reference_mask == " + "aparc+aseg.mgz`` unless otherwise specified)", + ) dof = traits.Enum( - 6, - 9, - 12, - argstr='--dof %d', - desc='number of transform degrees of freedom') + 6, 9, 12, argstr="--dof %d", desc="number of transform degrees of freedom" + ) reference_mask = traits.Either( False, traits.Str, - argstr='--ref-mask %s', + argstr="--ref-mask %s", position=2, - desc='mask reference volume with given mask, or None if ``False``') + desc="mask reference volume with given mask, or None if ``False``", + ) source_mask = traits.Str( - argstr='--mov-mask', desc='mask source file with given mask') - num_threads = traits.Int( - argstr='--threads %d', desc='number of OpenMP threads') + argstr="--mov-mask", desc="mask source file with given mask" + ) + num_threads = traits.Int(argstr="--threads %d", desc="number of OpenMP threads") no_coord_dithering = traits.Bool( - argstr='--no-coord-dither', desc='turn off coordinate dithering') + argstr="--no-coord-dither", desc="turn off coordinate dithering" + ) no_intensity_dithering = traits.Bool( - argstr='--no-intensity-dither', desc='turn off intensity dithering') + argstr="--no-intensity-dither", desc="turn off intensity dithering" + ) sep = traits.List( - argstr='--sep %s...', + argstr="--sep %s...", minlen=1, maxlen=2, - desc='set spatial scales, in voxels (default [2, 4])') + desc="set spatial scales, in voxels (default [2, 4])", + ) initial_translation = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='--trans %g %g %g', - desc='initial translation in mm (implies no_cras0)') + argstr="--trans %g %g %g", + desc="initial translation in mm (implies no_cras0)", + ) initial_rotation = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='--rot %g %g %g', - desc='initial rotation in degrees') + argstr="--rot %g %g %g", + desc="initial rotation in degrees", + ) initial_scale = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='--scale %g %g %g', - desc='initial scale') + argstr="--scale %g %g %g", + desc="initial scale", + ) initial_shear = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='--shear %g %g %g', - desc='initial shear (Hxy, Hxz, Hyz)') + argstr="--shear %g %g %g", + desc="initial shear (Hxy, Hxz, Hyz)", + ) no_cras0 = traits.Bool( - argstr='--no-cras0', - desc='do not set translation parameters to align ' - 'centers of source and reference files') + argstr="--no-cras0", + desc="do not set translation parameters to align " + "centers of source and reference files", + ) max_iters = traits.Range( - low=1, argstr='--nitersmax %d', desc='maximum iterations (default: 4)') + low=1, argstr="--nitersmax %d", desc="maximum iterations (default: 4)" + ) ftol = traits.Float( - argstr='--ftol %e', desc='floating-point tolerance (default=1e-7)') - linmintol = traits.Float(argstr='--linmintol %e') + argstr="--ftol %e", desc="floating-point tolerance (default=1e-7)" + ) + linmintol = traits.Float(argstr="--linmintol %e") saturation_threshold = traits.Range( low=0.0, high=100.0, - argstr='--sat %g', - desc='saturation threshold (default=9.999)') + argstr="--sat %g", + desc="saturation threshold (default=9.999)", + ) conform_reference = traits.Bool( - argstr='--conf-ref', desc='conform reference without rescaling') - no_brute_force = traits.Bool( - argstr='--no-bf', desc='do not brute force search') + argstr="--conf-ref", desc="conform reference without rescaling" + ) + no_brute_force = traits.Bool(argstr="--no-bf", desc="do not brute force search") brute_force_limit = traits.Float( - argstr='--bf-lim %g', - xor=['no_brute_force'], - desc='constrain brute force search to +/- lim') + argstr="--bf-lim %g", + xor=["no_brute_force"], + desc="constrain brute force search to +/- lim", + ) brute_force_samples = traits.Int( - argstr='--bf-nsamp %d', - xor=['no_brute_force'], - desc='number of samples in brute force search') + argstr="--bf-nsamp %d", + xor=["no_brute_force"], + desc="number of samples in brute force search", + ) no_smooth = traits.Bool( - argstr='--no-smooth', - desc='do not apply smoothing to either reference or source file') + argstr="--no-smooth", + desc="do not apply smoothing to either reference or source file", + ) ref_fwhm = traits.Float( - argstr='--ref-fwhm', desc='apply smoothing to reference file') + argstr="--ref-fwhm", desc="apply smoothing to reference file" + ) source_oob = traits.Bool( - argstr='--mov-oob', - desc='count source voxels that are out-of-bounds as 0') + argstr="--mov-oob", desc="count source voxels that are out-of-bounds as 0" + ) # Skipping mat2par class MRICoregOutputSpec(TraitedSpec): - out_reg_file = File(exists=True, desc='output registration file') - out_lta_file = File(exists=True, desc='output LTA-style registration file') - out_params_file = File(exists=True, desc='output parameters file') + out_reg_file = File(exists=True, desc="output registration file") + out_lta_file = File(exists=True, desc="output LTA-style registration file") + out_params_file = File(exists=True, desc="output parameters file") class MRICoreg(FSCommand): @@ -569,16 +579,15 @@ class MRICoreg(FSCommand): 'mri_coreg --s fsaverage --no-ref-mask --lta .../registration.lta --sep 4 --sep 5 --mov moving1.nii --sd .' """ - _cmd = 'mri_coreg' + _cmd = "mri_coreg" input_spec = MRICoregInputSpec output_spec = MRICoregOutputSpec def _format_arg(self, opt, spec, val): - if opt in ('out_reg_file', 'out_lta_file', - 'out_params_file') and val is True: + if opt in ("out_reg_file", "out_lta_file", "out_params_file") and val is True: val = self._list_outputs()[opt] - elif opt == 'reference_mask' and val is False: - return '--no-ref-mask' + elif opt == "reference_mask" and val is False: + return "--no-ref-mask" return super(MRICoreg, self)._format_arg(opt, spec, val) def _list_outputs(self): @@ -587,19 +596,19 @@ def _list_outputs(self): out_lta_file = self.inputs.out_lta_file if isdefined(out_lta_file): if out_lta_file is True: - out_lta_file = 'registration.lta' - outputs['out_lta_file'] = os.path.abspath(out_lta_file) + out_lta_file = "registration.lta" + outputs["out_lta_file"] = os.path.abspath(out_lta_file) out_reg_file = self.inputs.out_reg_file if isdefined(out_reg_file): if out_reg_file is True: - out_reg_file = 'registration.dat' - outputs['out_reg_file'] = os.path.abspath(out_reg_file) + out_reg_file = "registration.dat" + outputs["out_reg_file"] = os.path.abspath(out_reg_file) out_params_file = self.inputs.out_params_file if isdefined(out_params_file): if out_params_file is True: - out_params_file = 'registration.par' - outputs['out_params_file'] = os.path.abspath(out_params_file) + out_params_file = "registration.par" + outputs["out_params_file"] = os.path.abspath(out_params_file) return outputs diff --git a/nipype/interfaces/freesurfer/tests/test_BBRegister.py b/nipype/interfaces/freesurfer/tests/test_BBRegister.py index 81a67742e2..b9ed6a8bcd 100644 --- a/nipype/interfaces/freesurfer/tests/test_BBRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_BBRegister.py @@ -3,111 +3,47 @@ def test_BBRegister_inputs(): input_map_5_3 = dict( - args=dict(argstr='%s', ), - contrast_type=dict( - argstr='--%s', - mandatory=True, - ), - dof=dict(argstr='--%d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - epi_mask=dict(argstr='--epi-mask', ), - fsldof=dict(argstr='--fsl-dof %d', ), - init=dict( - argstr='--init-%s', - mandatory=True, - xor=['init_reg_file'], - ), - init_cost_file=dict(argstr='--initcost %s', ), - init_reg_file=dict( - argstr='--init-reg %s', - mandatory=True, - xor=['init'], - ), - intermediate_file=dict(argstr='--int %s', ), - out_fsl_file=dict(argstr='--fslmat %s', ), - out_lta_file=dict( - argstr='--lta %s', - min_ver='5.2.0', - ), - out_reg_file=dict( - argstr='--reg %s', - genfile=True, - ), - reg_frame=dict( - argstr='--frame %d', - xor=['reg_middle_frame'], - ), - reg_middle_frame=dict( - argstr='--mid-frame', - xor=['reg_frame'], - ), - registered_file=dict(argstr='--o %s', ), - source_file=dict( - argstr='--mov %s', - copyfile=False, - mandatory=True, - ), - spm_nifti=dict(argstr='--spm-nii', ), - subject_id=dict( - argstr='--s %s', - mandatory=True, - ), + args=dict(argstr="%s",), + contrast_type=dict(argstr="--%s", mandatory=True,), + dof=dict(argstr="--%d",), + environ=dict(nohash=True, usedefault=True,), + epi_mask=dict(argstr="--epi-mask",), + fsldof=dict(argstr="--fsl-dof %d",), + init=dict(argstr="--init-%s", mandatory=True, xor=["init_reg_file"],), + init_cost_file=dict(argstr="--initcost %s",), + init_reg_file=dict(argstr="--init-reg %s", mandatory=True, xor=["init"],), + intermediate_file=dict(argstr="--int %s",), + out_fsl_file=dict(argstr="--fslmat %s",), + out_lta_file=dict(argstr="--lta %s", min_ver="5.2.0",), + out_reg_file=dict(argstr="--reg %s", genfile=True,), + reg_frame=dict(argstr="--frame %d", xor=["reg_middle_frame"],), + reg_middle_frame=dict(argstr="--mid-frame", xor=["reg_frame"],), + registered_file=dict(argstr="--o %s",), + source_file=dict(argstr="--mov %s", copyfile=False, mandatory=True,), + spm_nifti=dict(argstr="--spm-nii",), + subject_id=dict(argstr="--s %s", mandatory=True,), subjects_dir=dict(), ) input_map_6_0 = dict( - args=dict(argstr='%s', ), - contrast_type=dict( - argstr='--%s', - mandatory=True, - ), - dof=dict(argstr='--%d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - epi_mask=dict(argstr='--epi-mask', ), - fsldof=dict(argstr='--fsl-dof %d', ), - init=dict( - argstr='--init-%s', - xor=['init_reg_file'], - ), - init_reg_file=dict( - argstr='--init-reg %s', - xor=['init'], - ), - init_cost_file=dict(argstr='--initcost %s', ), - intermediate_file=dict(argstr='--int %s', ), - out_fsl_file=dict(argstr='--fslmat %s', ), - out_lta_file=dict( - argstr='--lta %s', - min_ver='5.2.0', - ), - out_reg_file=dict( - argstr='--reg %s', - genfile=True, - ), - reg_frame=dict( - argstr='--frame %d', - xor=['reg_middle_frame'], - ), - reg_middle_frame=dict( - argstr='--mid-frame', - xor=['reg_frame'], - ), - registered_file=dict(argstr='--o %s', ), - source_file=dict( - argstr='--mov %s', - copyfile=False, - mandatory=True, - ), - spm_nifti=dict(argstr='--spm-nii', ), - subject_id=dict( - argstr='--s %s', - mandatory=True, - ), + args=dict(argstr="%s",), + contrast_type=dict(argstr="--%s", mandatory=True,), + dof=dict(argstr="--%d",), + environ=dict(nohash=True, usedefault=True,), + epi_mask=dict(argstr="--epi-mask",), + fsldof=dict(argstr="--fsl-dof %d",), + init=dict(argstr="--init-%s", xor=["init_reg_file"],), + init_reg_file=dict(argstr="--init-reg %s", xor=["init"],), + init_cost_file=dict(argstr="--initcost %s",), + intermediate_file=dict(argstr="--int %s",), + out_fsl_file=dict(argstr="--fslmat %s",), + out_lta_file=dict(argstr="--lta %s", min_ver="5.2.0",), + out_reg_file=dict(argstr="--reg %s", genfile=True,), + reg_frame=dict(argstr="--frame %d", xor=["reg_middle_frame"],), + reg_middle_frame=dict(argstr="--mid-frame", xor=["reg_frame"],), + registered_file=dict(argstr="--o %s",), + source_file=dict(argstr="--mov %s", copyfile=False, mandatory=True,), + spm_nifti=dict(argstr="--spm-nii",), + subject_id=dict(argstr="--s %s", mandatory=True,), subjects_dir=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py b/nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py index bc34a8d7aa..6c377c9579 100644 --- a/nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py +++ b/nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py @@ -8,7 +8,7 @@ def test_FSSurfaceCommand_inputs(): input_map = dict( - args=dict(argstr='%s'), + args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True), subjects_dir=dict(), ) @@ -22,21 +22,25 @@ def test_FSSurfaceCommand_inputs(): @pytest.mark.skipif(fs.no_freesurfer(), reason="freesurfer is not installed") def test_associated_file(tmpdir): fssrc = FreeSurferSource( - subjects_dir=fs.Info.subjectsdir(), subject_id='fsaverage', hemi='lh') + subjects_dir=fs.Info.subjectsdir(), subject_id="fsaverage", hemi="lh" + ) fssrc.base_dir = tmpdir.strpath fssrc.resource_monitor = False fsavginfo = fssrc.run().outputs.get() # Pairs of white/pial files in the same directories - for white, pial in [('lh.white', 'lh.pial'), ('./lh.white', './lh.pial'), - (fsavginfo['white'], fsavginfo['pial'])]: + for white, pial in [ + ("lh.white", "lh.pial"), + ("./lh.white", "./lh.pial"), + (fsavginfo["white"], fsavginfo["pial"]), + ]: # Unspecified paths, possibly with missing hemisphere information, # are equivalent to using the same directory and hemisphere - for name in ('pial', 'lh.pial', pial): + for name in ("pial", "lh.pial", pial): assert FSSurfaceCommand._associated_file(white, name) == pial # With path information, no changes are made - for name in ('./pial', './lh.pial', fsavginfo['pial']): + for name in ("./pial", "./lh.pial", fsavginfo["pial"]): assert FSSurfaceCommand._associated_file(white, name) == name diff --git a/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py index f3841d3fc2..499d85a437 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py @@ -4,40 +4,24 @@ def test_AddXFormToHeader_inputs(): input_map = dict( - args=dict(argstr='%s', ), - copy_name=dict(argstr='-c', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - out_file=dict( - argstr='%s', - extensions=None, - position=-1, - usedefault=True, - ), + args=dict(argstr="%s",), + copy_name=dict(argstr="-c",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + out_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True,), subjects_dir=dict(), - transform=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - verbose=dict(argstr='-v', ), + transform=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + verbose=dict(argstr="-v",), ) inputs = AddXFormToHeader.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AddXFormToHeader_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = AddXFormToHeader.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py index 9135472d60..2c0d0ad5ef 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py @@ -4,84 +4,39 @@ def test_Aparc2Aseg_inputs(): input_map = dict( - a2009s=dict(argstr='--a2009s', ), - args=dict(argstr='%s', ), - aseg=dict( - argstr='--aseg %s', - extensions=None, - ), + a2009s=dict(argstr="--a2009s",), + args=dict(argstr="%s",), + aseg=dict(argstr="--aseg %s", extensions=None,), copy_inputs=dict(), - ctxseg=dict( - argstr='--ctxseg %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - filled=dict(extensions=None, ), - hypo_wm=dict(argstr='--hypo-as-wm', ), - label_wm=dict(argstr='--labelwm', ), - lh_annotation=dict( - extensions=None, - mandatory=True, - ), - lh_pial=dict( - extensions=None, - mandatory=True, - ), - lh_ribbon=dict( - extensions=None, - mandatory=True, - ), - lh_white=dict( - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr='--o %s', - extensions=None, - mandatory=True, - ), - rh_annotation=dict( - extensions=None, - mandatory=True, - ), - rh_pial=dict( - extensions=None, - mandatory=True, - ), - rh_ribbon=dict( - extensions=None, - mandatory=True, - ), - rh_white=dict( - extensions=None, - mandatory=True, - ), - ribbon=dict( - extensions=None, - mandatory=True, - ), - rip_unknown=dict(argstr='--rip-unknown', ), - subject_id=dict( - argstr='--s %s', - mandatory=True, - usedefault=True, - ), + ctxseg=dict(argstr="--ctxseg %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + filled=dict(extensions=None,), + hypo_wm=dict(argstr="--hypo-as-wm",), + label_wm=dict(argstr="--labelwm",), + lh_annotation=dict(extensions=None, mandatory=True,), + lh_pial=dict(extensions=None, mandatory=True,), + lh_ribbon=dict(extensions=None, mandatory=True,), + lh_white=dict(extensions=None, mandatory=True,), + out_file=dict(argstr="--o %s", extensions=None, mandatory=True,), + rh_annotation=dict(extensions=None, mandatory=True,), + rh_pial=dict(extensions=None, mandatory=True,), + rh_ribbon=dict(extensions=None, mandatory=True,), + rh_white=dict(extensions=None, mandatory=True,), + ribbon=dict(extensions=None, mandatory=True,), + rip_unknown=dict(argstr="--rip-unknown",), + subject_id=dict(argstr="--s %s", mandatory=True, usedefault=True,), subjects_dir=dict(), - volmask=dict(argstr='--volmask', ), + volmask=dict(argstr="--volmask",), ) inputs = Aparc2Aseg.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Aparc2Aseg_outputs(): - output_map = dict(out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict(out_file=dict(argstr="%s", extensions=None,),) outputs = Aparc2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py index 1fde211396..295a376884 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py @@ -4,21 +4,10 @@ def test_Apas2Aseg_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='--i %s', - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr='--o %s', - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="--i %s", extensions=None, mandatory=True,), + out_file=dict(argstr="--o %s", extensions=None, mandatory=True,), subjects_dir=dict(), ) inputs = Apas2Aseg.input_spec() @@ -26,11 +15,10 @@ def test_Apas2Aseg_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Apas2Aseg_outputs(): - output_map = dict(out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict(out_file=dict(argstr="%s", extensions=None,),) outputs = Apas2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py index fb3a8d956f..44bd9eba25 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py @@ -4,58 +4,38 @@ def test_ApplyMask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - invert_xfm=dict(argstr='-invert', ), - keep_mask_deletion_edits=dict(argstr='-keep_mask_deletion_edits', ), - mask_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - mask_thresh=dict(argstr='-T %.4f', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + invert_xfm=dict(argstr="-invert",), + keep_mask_deletion_edits=dict(argstr="-keep_mask_deletion_edits",), + mask_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + mask_thresh=dict(argstr="-T %.4f",), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=True, keep_extension=True, - name_source=['in_file'], - name_template='%s_masked', + name_source=["in_file"], + name_template="%s_masked", position=-1, ), subjects_dir=dict(), - transfer=dict(argstr='-transfer %d', ), - use_abs=dict(argstr='-abs', ), - xfm_file=dict( - argstr='-xform %s', - extensions=None, - ), - xfm_source=dict( - argstr='-lta_src %s', - extensions=None, - ), - xfm_target=dict( - argstr='-lta_dst %s', - extensions=None, - ), + transfer=dict(argstr="-transfer %d",), + use_abs=dict(argstr="-abs",), + xfm_file=dict(argstr="-xform %s", extensions=None,), + xfm_source=dict(argstr="-lta_src %s", extensions=None,), + xfm_target=dict(argstr="-lta_dst %s", extensions=None,), ) inputs = ApplyMask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ApplyMask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ApplyMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py index 40b59358f5..1eedade07a 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py @@ -4,108 +4,151 @@ def test_ApplyVolTransform_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), fs_target=dict( - argstr='--fstarg', + argstr="--fstarg", mandatory=True, - requires=['reg_file'], - xor=('target_file', 'tal', 'fs_target'), + requires=["reg_file"], + xor=("target_file", "tal", "fs_target"), ), fsl_reg_file=dict( - argstr='--fsl %s', + argstr="--fsl %s", extensions=None, mandatory=True, - xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', - 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), - ), - interp=dict(argstr='--interp %s', ), - inverse=dict(argstr='--inv', ), - invert_morph=dict( - argstr='--inv-morph', - requires=['m3z_file'], - ), + xor=( + "reg_file", + "lta_file", + "lta_inv_file", + "fsl_reg_file", + "xfm_reg_file", + "reg_header", + "mni_152_reg", + "subject", + ), + ), + interp=dict(argstr="--interp %s",), + inverse=dict(argstr="--inv",), + invert_morph=dict(argstr="--inv-morph", requires=["m3z_file"],), lta_file=dict( - argstr='--lta %s', + argstr="--lta %s", extensions=None, mandatory=True, - xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', - 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + xor=( + "reg_file", + "lta_file", + "lta_inv_file", + "fsl_reg_file", + "xfm_reg_file", + "reg_header", + "mni_152_reg", + "subject", + ), ), lta_inv_file=dict( - argstr='--lta-inv %s', + argstr="--lta-inv %s", extensions=None, mandatory=True, - xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', - 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), - ), - m3z_file=dict( - argstr='--m3z %s', - extensions=None, - ), + xor=( + "reg_file", + "lta_file", + "lta_inv_file", + "fsl_reg_file", + "xfm_reg_file", + "reg_header", + "mni_152_reg", + "subject", + ), + ), + m3z_file=dict(argstr="--m3z %s", extensions=None,), mni_152_reg=dict( - argstr='--regheader', + argstr="--regheader", mandatory=True, - xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', - 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), - ), - no_ded_m3z_path=dict( - argstr='--noDefM3zPath', - requires=['m3z_file'], - ), - no_resample=dict(argstr='--no-resample', ), + xor=( + "reg_file", + "lta_file", + "lta_inv_file", + "fsl_reg_file", + "xfm_reg_file", + "reg_header", + "mni_152_reg", + "subject", + ), + ), + no_ded_m3z_path=dict(argstr="--noDefM3zPath", requires=["m3z_file"],), + no_resample=dict(argstr="--no-resample",), reg_file=dict( - argstr='--reg %s', + argstr="--reg %s", extensions=None, mandatory=True, - xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', - 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + xor=( + "reg_file", + "lta_file", + "lta_inv_file", + "fsl_reg_file", + "xfm_reg_file", + "reg_header", + "mni_152_reg", + "subject", + ), ), reg_header=dict( - argstr='--regheader', + argstr="--regheader", mandatory=True, - xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', - 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + xor=( + "reg_file", + "lta_file", + "lta_inv_file", + "fsl_reg_file", + "xfm_reg_file", + "reg_header", + "mni_152_reg", + "subject", + ), ), source_file=dict( - argstr='--mov %s', - copyfile=False, - extensions=None, - mandatory=True, + argstr="--mov %s", copyfile=False, extensions=None, mandatory=True, ), subject=dict( - argstr='--s %s', + argstr="--s %s", mandatory=True, - xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', - 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + xor=( + "reg_file", + "lta_file", + "lta_inv_file", + "fsl_reg_file", + "xfm_reg_file", + "reg_header", + "mni_152_reg", + "subject", + ), ), subjects_dir=dict(), tal=dict( - argstr='--tal', - mandatory=True, - xor=('target_file', 'tal', 'fs_target'), + argstr="--tal", mandatory=True, xor=("target_file", "tal", "fs_target"), ), - tal_resolution=dict(argstr='--talres %.10f', ), + tal_resolution=dict(argstr="--talres %.10f",), target_file=dict( - argstr='--targ %s', + argstr="--targ %s", extensions=None, mandatory=True, - xor=('target_file', 'tal', 'fs_target'), - ), - transformed_file=dict( - argstr='--o %s', - extensions=None, - genfile=True, + xor=("target_file", "tal", "fs_target"), ), + transformed_file=dict(argstr="--o %s", extensions=None, genfile=True,), xfm_reg_file=dict( - argstr='--xfm %s', + argstr="--xfm %s", extensions=None, mandatory=True, - xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', - 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + xor=( + "reg_file", + "lta_file", + "lta_inv_file", + "fsl_reg_file", + "xfm_reg_file", + "reg_header", + "mni_152_reg", + "subject", + ), ), ) inputs = ApplyVolTransform.input_spec() @@ -113,8 +156,10 @@ def test_ApplyVolTransform_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ApplyVolTransform_outputs(): - output_map = dict(transformed_file=dict(extensions=None, ), ) + output_map = dict(transformed_file=dict(extensions=None,),) outputs = ApplyVolTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py index 1e81688dcb..28211d63db 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py @@ -4,72 +4,46 @@ def test_Binarize_inputs(): input_map = dict( - abs=dict(argstr='--abs', ), - args=dict(argstr='%s', ), - bin_col_num=dict(argstr='--bincol', ), - bin_val=dict(argstr='--binval %d', ), - bin_val_not=dict(argstr='--binvalnot %d', ), - binary_file=dict( - argstr='--o %s', - extensions=None, - genfile=True, - ), - count_file=dict(argstr='--count %s', ), - dilate=dict(argstr='--dilate %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - erode=dict(argstr='--erode %d', ), - erode2d=dict(argstr='--erode2d %d', ), - frame_no=dict(argstr='--frame %s', ), - in_file=dict( - argstr='--i %s', - copyfile=False, - extensions=None, - mandatory=True, - ), - invert=dict(argstr='--inv', ), - mask_file=dict( - argstr='--mask maskvol', - extensions=None, - ), - mask_thresh=dict(argstr='--mask-thresh %f', ), - match=dict(argstr='--match %d...', ), - max=dict( - argstr='--max %f', - xor=['wm_ven_csf'], - ), - merge_file=dict( - argstr='--merge %s', - extensions=None, - ), - min=dict( - argstr='--min %f', - xor=['wm_ven_csf'], - ), - out_type=dict(argstr='', ), - rmax=dict(argstr='--rmax %f', ), - rmin=dict(argstr='--rmin %f', ), + abs=dict(argstr="--abs",), + args=dict(argstr="%s",), + bin_col_num=dict(argstr="--bincol",), + bin_val=dict(argstr="--binval %d",), + bin_val_not=dict(argstr="--binvalnot %d",), + binary_file=dict(argstr="--o %s", extensions=None, genfile=True,), + count_file=dict(argstr="--count %s",), + dilate=dict(argstr="--dilate %d",), + environ=dict(nohash=True, usedefault=True,), + erode=dict(argstr="--erode %d",), + erode2d=dict(argstr="--erode2d %d",), + frame_no=dict(argstr="--frame %s",), + in_file=dict(argstr="--i %s", copyfile=False, extensions=None, mandatory=True,), + invert=dict(argstr="--inv",), + mask_file=dict(argstr="--mask maskvol", extensions=None,), + mask_thresh=dict(argstr="--mask-thresh %f",), + match=dict(argstr="--match %d...",), + max=dict(argstr="--max %f", xor=["wm_ven_csf"],), + merge_file=dict(argstr="--merge %s", extensions=None,), + min=dict(argstr="--min %f", xor=["wm_ven_csf"],), + out_type=dict(argstr="",), + rmax=dict(argstr="--rmax %f",), + rmin=dict(argstr="--rmin %f",), subjects_dir=dict(), - ventricles=dict(argstr='--ventricles', ), - wm=dict(argstr='--wm', ), - wm_ven_csf=dict( - argstr='--wm+vcsf', - xor=['min', 'max'], - ), - zero_edges=dict(argstr='--zero-edges', ), - zero_slice_edge=dict(argstr='--zero-slice-edges', ), + ventricles=dict(argstr="--ventricles",), + wm=dict(argstr="--wm",), + wm_ven_csf=dict(argstr="--wm+vcsf", xor=["min", "max"],), + zero_edges=dict(argstr="--zero-edges",), + zero_slice_edge=dict(argstr="--zero-slice-edges",), ) inputs = Binarize.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Binarize_outputs(): output_map = dict( - binary_file=dict(extensions=None, ), - count_file=dict(extensions=None, ), + binary_file=dict(extensions=None,), count_file=dict(extensions=None,), ) outputs = Binarize.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py index 92af7daff1..dfc1f86d97 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py @@ -4,65 +4,32 @@ def test_CALabel_inputs(): input_map = dict( - align=dict(argstr='-align', ), - args=dict(argstr='%s', ), - aseg=dict( - argstr='-aseg %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-4, - ), - in_vol=dict( - argstr='-r %s', - extensions=None, - ), - intensities=dict( - argstr='-r %s', - extensions=None, - ), - label=dict( - argstr='-l %s', - extensions=None, - ), - no_big_ventricles=dict(argstr='-nobigventricles', ), + align=dict(argstr="-align",), + args=dict(argstr="%s",), + aseg=dict(argstr="-aseg %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), + in_vol=dict(argstr="-r %s", extensions=None,), + intensities=dict(argstr="-r %s", extensions=None,), + label=dict(argstr="-l %s", extensions=None,), + no_big_ventricles=dict(argstr="-nobigventricles",), num_threads=dict(), - out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), - prior=dict(argstr='-prior %.1f', ), - relabel_unlikely=dict(argstr='-relabel_unlikely %d %.1f', ), + out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + prior=dict(argstr="-prior %.1f",), + relabel_unlikely=dict(argstr="-relabel_unlikely %d %.1f",), subjects_dir=dict(), - template=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - transform=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), + template=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + transform=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), ) inputs = CALabel.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CALabel_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = CALabel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py index 086fc8326d..84018f9a01 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py @@ -4,61 +4,35 @@ def test_CANormalize_inputs(): input_map = dict( - args=dict(argstr='%s', ), - atlas=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - control_points=dict( - argstr='-c %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-4, - ), - long_file=dict( - argstr='-long %s', - extensions=None, - ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), + args=dict(argstr="%s",), + atlas=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + control_points=dict(argstr="-c %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), + long_file=dict(argstr="-long %s", extensions=None,), + mask=dict(argstr="-mask %s", extensions=None,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, keep_extension=True, - name_source=['in_file'], - name_template='%s_norm', + name_source=["in_file"], + name_template="%s_norm", position=-1, ), subjects_dir=dict(), - transform=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), + transform=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), ) inputs = CANormalize.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CANormalize_outputs(): output_map = dict( - control_points=dict(extensions=None, ), - out_file=dict(extensions=None, ), + control_points=dict(extensions=None,), out_file=dict(extensions=None,), ) outputs = CANormalize.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py index e26b0951e2..5ce22ecfa0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py @@ -4,55 +4,31 @@ def test_CARegister_inputs(): input_map = dict( - A=dict(argstr='-A %d', ), - align=dict(argstr='-align-%s', ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - invert_and_save=dict( - argstr='-invert-and-save', - position=-4, - ), - l_files=dict(argstr='-l %s', ), - levels=dict(argstr='-levels %d', ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), - no_big_ventricles=dict(argstr='-nobigventricles', ), + A=dict(argstr="-A %d",), + align=dict(argstr="-align-%s",), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + invert_and_save=dict(argstr="-invert-and-save", position=-4,), + l_files=dict(argstr="-l %s",), + levels=dict(argstr="-levels %d",), + mask=dict(argstr="-mask %s", extensions=None,), + no_big_ventricles=dict(argstr="-nobigventricles",), num_threads=dict(), - out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), + out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), subjects_dir=dict(), - template=dict( - argstr='%s', - extensions=None, - position=-2, - ), - transform=dict( - argstr='-T %s', - extensions=None, - ), + template=dict(argstr="%s", extensions=None, position=-2,), + transform=dict(argstr="-T %s", extensions=None,), ) inputs = CARegister.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CARegister_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = CARegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py index ef75f59c2a..8dcede267e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py @@ -4,37 +4,28 @@ def test_CheckTalairachAlignment_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='-xfm %s', + argstr="-xfm %s", extensions=None, mandatory=True, position=-1, - xor=['subject'], - ), - subject=dict( - argstr='-subj %s', - mandatory=True, - position=-1, - xor=['in_file'], + xor=["subject"], ), + subject=dict(argstr="-subj %s", mandatory=True, position=-1, xor=["in_file"],), subjects_dir=dict(), - threshold=dict( - argstr='-T %.3f', - usedefault=True, - ), + threshold=dict(argstr="-T %.3f", usedefault=True,), ) inputs = CheckTalairachAlignment.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CheckTalairachAlignment_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = CheckTalairachAlignment.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py index baee02187b..c841be04ad 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py @@ -4,50 +4,36 @@ def test_Concatenate_inputs(): input_map = dict( - add_val=dict(argstr='--add %f', ), - args=dict(argstr='%s', ), - combine=dict(argstr='--combine', ), - concatenated_file=dict( - argstr='--o %s', - extensions=None, - genfile=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gmean=dict(argstr='--gmean %d', ), - in_files=dict( - argstr='--i %s...', - mandatory=True, - ), - keep_dtype=dict(argstr='--keep-datatype', ), - mask_file=dict( - argstr='--mask %s', - extensions=None, - ), - max_bonfcor=dict(argstr='--max-bonfcor', ), - max_index=dict(argstr='--max-index', ), - mean_div_n=dict(argstr='--mean-div-n', ), - multiply_by=dict(argstr='--mul %f', ), - multiply_matrix_file=dict( - argstr='--mtx %s', - extensions=None, - ), - paired_stats=dict(argstr='--paired-%s', ), - sign=dict(argstr='--%s', ), - sort=dict(argstr='--sort', ), - stats=dict(argstr='--%s', ), + add_val=dict(argstr="--add %f",), + args=dict(argstr="%s",), + combine=dict(argstr="--combine",), + concatenated_file=dict(argstr="--o %s", extensions=None, genfile=True,), + environ=dict(nohash=True, usedefault=True,), + gmean=dict(argstr="--gmean %d",), + in_files=dict(argstr="--i %s...", mandatory=True,), + keep_dtype=dict(argstr="--keep-datatype",), + mask_file=dict(argstr="--mask %s", extensions=None,), + max_bonfcor=dict(argstr="--max-bonfcor",), + max_index=dict(argstr="--max-index",), + mean_div_n=dict(argstr="--mean-div-n",), + multiply_by=dict(argstr="--mul %f",), + multiply_matrix_file=dict(argstr="--mtx %s", extensions=None,), + paired_stats=dict(argstr="--paired-%s",), + sign=dict(argstr="--%s",), + sort=dict(argstr="--sort",), + stats=dict(argstr="--%s",), subjects_dir=dict(), - vote=dict(argstr='--vote', ), + vote=dict(argstr="--vote",), ) inputs = Concatenate.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Concatenate_outputs(): - output_map = dict(concatenated_file=dict(extensions=None, ), ) + output_map = dict(concatenated_file=dict(extensions=None,),) outputs = Concatenate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py index 07ea2e0d2f..b9750aecbf 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py @@ -4,48 +4,33 @@ def test_ConcatenateLTA_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_lta1=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - in_lta2=dict( - argstr='%s', - mandatory=True, - position=-2, - ), - invert_1=dict(argstr='-invert1', ), - invert_2=dict(argstr='-invert2', ), - invert_out=dict(argstr='-invertout', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_lta1=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + in_lta2=dict(argstr="%s", mandatory=True, position=-2,), + invert_1=dict(argstr="-invert1",), + invert_2=dict(argstr="-invert2",), + invert_out=dict(argstr="-invertout",), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, keep_extension=True, - name_source=['in_lta1'], - name_template='%s_concat', + name_source=["in_lta1"], + name_template="%s_concat", position=-1, ), - out_type=dict(argstr='-out_type %d', ), - subject=dict(argstr='-subject %s', ), + out_type=dict(argstr="-out_type %d",), + subject=dict(argstr="-subject %s",), subjects_dir=dict(), tal_source_file=dict( - argstr='-tal %s', + argstr="-tal %s", extensions=None, position=-5, - requires=['tal_template_file'], + requires=["tal_template_file"], ), tal_template_file=dict( - argstr='%s', - extensions=None, - position=-4, - requires=['tal_source_file'], + argstr="%s", extensions=None, position=-4, requires=["tal_source_file"], ), ) inputs = ConcatenateLTA.input_spec() @@ -53,8 +38,10 @@ def test_ConcatenateLTA_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ConcatenateLTA_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ConcatenateLTA.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py index d074b8b803..68e450315d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py @@ -4,57 +4,31 @@ def test_Contrast_inputs(): input_map = dict( - annotation=dict( - extensions=None, - mandatory=True, - ), - args=dict(argstr='%s', ), + annotation=dict(extensions=None, mandatory=True,), + args=dict(argstr="%s",), copy_inputs=dict(), - cortex=dict( - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hemisphere=dict( - argstr='--%s-only', - mandatory=True, - ), - orig=dict( - extensions=None, - mandatory=True, - ), - rawavg=dict( - extensions=None, - mandatory=True, - ), - subject_id=dict( - argstr='--s %s', - mandatory=True, - usedefault=True, - ), + cortex=dict(extensions=None, mandatory=True,), + environ=dict(nohash=True, usedefault=True,), + hemisphere=dict(argstr="--%s-only", mandatory=True,), + orig=dict(extensions=None, mandatory=True,), + rawavg=dict(extensions=None, mandatory=True,), + subject_id=dict(argstr="--s %s", mandatory=True, usedefault=True,), subjects_dir=dict(), - thickness=dict( - extensions=None, - mandatory=True, - ), - white=dict( - extensions=None, - mandatory=True, - ), + thickness=dict(extensions=None, mandatory=True,), + white=dict(extensions=None, mandatory=True,), ) inputs = Contrast.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Contrast_outputs(): output_map = dict( - out_contrast=dict(extensions=None, ), - out_log=dict(extensions=None, ), - out_stats=dict(extensions=None, ), + out_contrast=dict(extensions=None,), + out_log=dict(extensions=None,), + out_stats=dict(extensions=None,), ) outputs = Contrast.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py index 60351cb36e..906d961740 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py @@ -4,35 +4,29 @@ def test_Curvature_inputs(): input_map = dict( - args=dict(argstr='%s', ), - averages=dict(argstr='-a %d', ), + args=dict(argstr="%s",), + averages=dict(argstr="-a %d",), copy_input=dict(), - distances=dict(argstr='-distances %d %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), + distances=dict(argstr="-distances %d %d",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=True, - extensions=None, - mandatory=True, - position=-2, + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2, ), - n=dict(argstr='-n', ), - save=dict(argstr='-w', ), + n=dict(argstr="-n",), + save=dict(argstr="-w",), subjects_dir=dict(), - threshold=dict(argstr='-thresh %.3f', ), + threshold=dict(argstr="-thresh %.3f",), ) inputs = Curvature.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Curvature_outputs(): output_map = dict( - out_gauss=dict(extensions=None, ), - out_mean=dict(extensions=None, ), + out_gauss=dict(extensions=None,), out_mean=dict(extensions=None,), ) outputs = Curvature.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py index 5750487216..3b69b41def 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py @@ -4,58 +4,35 @@ def test_CurvatureStats_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), copy_inputs=dict(), - curvfile1=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - curvfile2=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hemisphere=dict( - argstr='%s', - mandatory=True, - position=-3, - ), - min_max=dict(argstr='-m', ), + curvfile1=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + curvfile2=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + environ=dict(nohash=True, usedefault=True,), + hemisphere=dict(argstr="%s", mandatory=True, position=-3,), + min_max=dict(argstr="-m",), out_file=dict( - argstr='-o %s', + argstr="-o %s", extensions=None, hash_files=False, - name_source=['hemisphere'], - name_template='%s.curv.stats', - ), - subject_id=dict( - argstr='%s', - mandatory=True, - position=-4, - usedefault=True, + name_source=["hemisphere"], + name_template="%s.curv.stats", ), + subject_id=dict(argstr="%s", mandatory=True, position=-4, usedefault=True,), subjects_dir=dict(), - surface=dict( - argstr='-F %s', - extensions=None, - ), - values=dict(argstr='-G', ), - write=dict(argstr='--writeCurvatureFiles', ), + surface=dict(argstr="-F %s", extensions=None,), + values=dict(argstr="-G",), + write=dict(argstr="--writeCurvatureFiles",), ) inputs = CurvatureStats.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CurvatureStats_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = CurvatureStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py index 8a25f9b33a..3eae700dc6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py @@ -4,19 +4,16 @@ def test_DICOMConvert_inputs(): input_map = dict( - args=dict(argstr='%s', ), - base_output_dir=dict(mandatory=True, ), - dicom_dir=dict(mandatory=True, ), - dicom_info=dict(extensions=None, ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + base_output_dir=dict(mandatory=True,), + dicom_dir=dict(mandatory=True,), + dicom_info=dict(extensions=None,), + environ=dict(nohash=True, usedefault=True,), file_mapping=dict(), - ignore_single_slice=dict(requires=['dicom_info'], ), - out_type=dict(usedefault=True, ), - seq_list=dict(requires=['dicom_info'], ), - subject_dir_template=dict(usedefault=True, ), + ignore_single_slice=dict(requires=["dicom_info"],), + out_type=dict(usedefault=True,), + seq_list=dict(requires=["dicom_info"],), + subject_dir_template=dict(usedefault=True,), subject_id=dict(), subjects_dir=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py index ee33e3fcf2..d87052cebc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py @@ -4,52 +4,35 @@ def test_EMRegister_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), - nbrspacing=dict(argstr='-uns %d', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + mask=dict(argstr="-mask %s", extensions=None,), + nbrspacing=dict(argstr="-uns %d",), num_threads=dict(), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, keep_extension=False, - name_source=['in_file'], - name_template='%s_transform.lta', + name_source=["in_file"], + name_template="%s_transform.lta", position=-1, ), - skull=dict(argstr='-skull', ), + skull=dict(argstr="-skull",), subjects_dir=dict(), - template=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - transform=dict( - argstr='-t %s', - extensions=None, - ), + template=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + transform=dict(argstr="-t %s", extensions=None,), ) inputs = EMRegister.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EMRegister_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = EMRegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py index 2794f3960a..725980b7ab 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py @@ -4,36 +4,13 @@ def test_EditWMwithAseg_inputs(): input_map = dict( - args=dict(argstr='%s', ), - brain_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-4, - ), - keep_in=dict(argstr='-keep-in', ), - out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), - seg_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + brain_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), + keep_in=dict(argstr="-keep-in",), + out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + seg_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), subjects_dir=dict(), ) inputs = EditWMwithAseg.input_spec() @@ -41,8 +18,10 @@ def test_EditWMwithAseg_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EditWMwithAseg_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = EditWMwithAseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py index 47084fc1a3..d31c9278bc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py @@ -4,17 +4,9 @@ def test_EulerNumber_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), subjects_dir=dict(), ) inputs = EulerNumber.input_spec() @@ -22,8 +14,10 @@ def test_EulerNumber_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EulerNumber_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = EulerNumber.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py index e664c9feb9..424d6bdb23 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py @@ -4,22 +4,14 @@ def test_ExtractMainComponent_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source='in_file', - name_template='%s.maincmp', + name_source="in_file", + name_template="%s.maincmp", position=2, ), ) @@ -28,8 +20,10 @@ def test_ExtractMainComponent_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ExtractMainComponent_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ExtractMainComponent.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py b/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py index dc7678e7e6..87f836e34b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py @@ -4,11 +4,8 @@ def test_FSCommand_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), subjects_dir=dict(), ) inputs = FSCommand.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py b/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py index b4c56e8aeb..165191e96c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py @@ -4,11 +4,8 @@ def test_FSCommandOpenMP_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), num_threads=dict(), subjects_dir=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py b/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py index 39b16b845e..162962f578 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py @@ -4,11 +4,8 @@ def test_FSScriptCommand_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), subjects_dir=dict(), ) inputs = FSScriptCommand.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py index 098f20f8b1..3133f52445 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py @@ -4,22 +4,11 @@ def test_FitMSParams_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), flip_list=dict(), - in_files=dict( - argstr='%s', - mandatory=True, - position=-2, - ), - out_dir=dict( - argstr='%s', - genfile=True, - position=-1, - ), + in_files=dict(argstr="%s", mandatory=True, position=-2,), + out_dir=dict(argstr="%s", genfile=True, position=-1,), subjects_dir=dict(), te_list=dict(), tr_list=dict(), @@ -30,11 +19,13 @@ def test_FitMSParams_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FitMSParams_outputs(): output_map = dict( - pd_image=dict(extensions=None, ), - t1_image=dict(extensions=None, ), - t2star_image=dict(extensions=None, ), + pd_image=dict(extensions=None,), + t1_image=dict(extensions=None,), + t2star_image=dict(extensions=None,), ) outputs = FitMSParams.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py index a95e5bbc5a..bfdb140216 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py @@ -4,46 +4,19 @@ def test_FixTopology_inputs(): input_map = dict( - args=dict(argstr='%s', ), - copy_inputs=dict(mandatory=True, ), - environ=dict( - nohash=True, - usedefault=True, - ), - ga=dict(argstr='-ga', ), - hemisphere=dict( - argstr='%s', - mandatory=True, - position=-1, - ), - in_brain=dict( - extensions=None, - mandatory=True, - ), - in_inflated=dict( - extensions=None, - mandatory=True, - ), - in_orig=dict( - extensions=None, - mandatory=True, - ), - in_wm=dict( - extensions=None, - mandatory=True, - ), - mgz=dict(argstr='-mgz', ), - seed=dict(argstr='-seed %d', ), - sphere=dict( - argstr='-sphere %s', - extensions=None, - ), - subject_id=dict( - argstr='%s', - mandatory=True, - position=-2, - usedefault=True, - ), + args=dict(argstr="%s",), + copy_inputs=dict(mandatory=True,), + environ=dict(nohash=True, usedefault=True,), + ga=dict(argstr="-ga",), + hemisphere=dict(argstr="%s", mandatory=True, position=-1,), + in_brain=dict(extensions=None, mandatory=True,), + in_inflated=dict(extensions=None, mandatory=True,), + in_orig=dict(extensions=None, mandatory=True,), + in_wm=dict(extensions=None, mandatory=True,), + mgz=dict(argstr="-mgz",), + seed=dict(argstr="-seed %d",), + sphere=dict(argstr="-sphere %s", extensions=None,), + subject_id=dict(argstr="%s", mandatory=True, position=-2, usedefault=True,), subjects_dir=dict(), ) inputs = FixTopology.input_spec() @@ -51,8 +24,10 @@ def test_FixTopology_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FixTopology_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = FixTopology.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py index 7b5c940935..2ffc84eada 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py @@ -4,46 +4,25 @@ def test_FuseSegmentations_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_norms=dict( - argstr='-n %s', - mandatory=True, - ), - in_segmentations=dict( - argstr='-a %s', - mandatory=True, - ), - in_segmentations_noCC=dict( - argstr='-c %s', - mandatory=True, - ), - out_file=dict( - extensions=None, - mandatory=True, - position=-1, - ), - subject_id=dict( - argstr='%s', - position=-3, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_norms=dict(argstr="-n %s", mandatory=True,), + in_segmentations=dict(argstr="-a %s", mandatory=True,), + in_segmentations_noCC=dict(argstr="-c %s", mandatory=True,), + out_file=dict(extensions=None, mandatory=True, position=-1,), + subject_id=dict(argstr="%s", position=-3,), subjects_dir=dict(), - timepoints=dict( - argstr='%s', - mandatory=True, - position=-2, - ), + timepoints=dict(argstr="%s", mandatory=True, position=-2,), ) inputs = FuseSegmentations.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FuseSegmentations_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = FuseSegmentations.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py index 1a4e966239..a883f39732 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py @@ -4,128 +4,72 @@ def test_GLMFit_inputs(): input_map = dict( - allow_ill_cond=dict(argstr='--illcond', ), - allow_repeated_subjects=dict(argstr='--allowsubjrep', ), - args=dict(argstr='%s', ), - calc_AR1=dict(argstr='--tar1', ), - check_opts=dict(argstr='--checkopts', ), - compute_log_y=dict(argstr='--logy', ), - contrast=dict(argstr='--C %s...', ), - cortex=dict( - argstr='--cortex', - xor=['label_file'], - ), - debug=dict(argstr='--debug', ), + allow_ill_cond=dict(argstr="--illcond",), + allow_repeated_subjects=dict(argstr="--allowsubjrep",), + args=dict(argstr="%s",), + calc_AR1=dict(argstr="--tar1",), + check_opts=dict(argstr="--checkopts",), + compute_log_y=dict(argstr="--logy",), + contrast=dict(argstr="--C %s...",), + cortex=dict(argstr="--cortex", xor=["label_file"],), + debug=dict(argstr="--debug",), design=dict( - argstr='--X %s', - extensions=None, - xor=('fsgd', 'design', 'one_sample'), - ), - diag=dict(argstr='--diag %d', ), - diag_cluster=dict(argstr='--diag-cluster', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_fx_dof=dict( - argstr='--ffxdof %d', - xor=['fixed_fx_dof_file'], + argstr="--X %s", extensions=None, xor=("fsgd", "design", "one_sample"), ), + diag=dict(argstr="--diag %d",), + diag_cluster=dict(argstr="--diag-cluster",), + environ=dict(nohash=True, usedefault=True,), + fixed_fx_dof=dict(argstr="--ffxdof %d", xor=["fixed_fx_dof_file"],), fixed_fx_dof_file=dict( - argstr='--ffxdofdat %d', - extensions=None, - xor=['fixed_fx_dof'], - ), - fixed_fx_var=dict( - argstr='--yffxvar %s', - extensions=None, - ), - force_perm=dict(argstr='--perm-force', ), - fsgd=dict( - argstr='--fsgd %s %s', - xor=('fsgd', 'design', 'one_sample'), - ), - fwhm=dict(argstr='--fwhm %f', ), - glm_dir=dict( - argstr='--glmdir %s', - genfile=True, + argstr="--ffxdofdat %d", extensions=None, xor=["fixed_fx_dof"], ), + fixed_fx_var=dict(argstr="--yffxvar %s", extensions=None,), + force_perm=dict(argstr="--perm-force",), + fsgd=dict(argstr="--fsgd %s %s", xor=("fsgd", "design", "one_sample"),), + fwhm=dict(argstr="--fwhm %f",), + glm_dir=dict(argstr="--glmdir %s", genfile=True,), hemi=dict(), - in_file=dict( - argstr='--y %s', - copyfile=False, - extensions=None, - mandatory=True, - ), - invert_mask=dict(argstr='--mask-inv', ), - label_file=dict( - argstr='--label %s', - extensions=None, - xor=['cortex'], - ), - mask_file=dict( - argstr='--mask %s', - extensions=None, - ), - no_contrast_ok=dict(argstr='--no-contrasts-ok', ), - no_est_fwhm=dict(argstr='--no-est-fwhm', ), - no_mask_smooth=dict(argstr='--no-mask-smooth', ), - no_prune=dict( - argstr='--no-prune', - xor=['prunethresh'], - ), + in_file=dict(argstr="--y %s", copyfile=False, extensions=None, mandatory=True,), + invert_mask=dict(argstr="--mask-inv",), + label_file=dict(argstr="--label %s", extensions=None, xor=["cortex"],), + mask_file=dict(argstr="--mask %s", extensions=None,), + no_contrast_ok=dict(argstr="--no-contrasts-ok",), + no_est_fwhm=dict(argstr="--no-est-fwhm",), + no_mask_smooth=dict(argstr="--no-mask-smooth",), + no_prune=dict(argstr="--no-prune", xor=["prunethresh"],), one_sample=dict( - argstr='--osgm', - xor=('one_sample', 'fsgd', 'design', 'contrast'), - ), - pca=dict(argstr='--pca', ), - per_voxel_reg=dict(argstr='--pvr %s...', ), - profile=dict(argstr='--profile %d', ), - prune=dict(argstr='--prune', ), - prune_thresh=dict( - argstr='--prune_thr %f', - xor=['noprune'], - ), - resynth_test=dict(argstr='--resynthtest %d', ), - save_cond=dict(argstr='--save-cond', ), - save_estimate=dict(argstr='--yhat-save', ), - save_res_corr_mtx=dict(argstr='--eres-scm', ), - save_residual=dict(argstr='--eres-save', ), - seed=dict(argstr='--seed %d', ), - self_reg=dict(argstr='--selfreg %d %d %d', ), - sim_done_file=dict( - argstr='--sim-done %s', - extensions=None, - ), - sim_sign=dict(argstr='--sim-sign %s', ), - simulation=dict(argstr='--sim %s %d %f %s', ), + argstr="--osgm", xor=("one_sample", "fsgd", "design", "contrast"), + ), + pca=dict(argstr="--pca",), + per_voxel_reg=dict(argstr="--pvr %s...",), + profile=dict(argstr="--profile %d",), + prune=dict(argstr="--prune",), + prune_thresh=dict(argstr="--prune_thr %f", xor=["noprune"],), + resynth_test=dict(argstr="--resynthtest %d",), + save_cond=dict(argstr="--save-cond",), + save_estimate=dict(argstr="--yhat-save",), + save_res_corr_mtx=dict(argstr="--eres-scm",), + save_residual=dict(argstr="--eres-save",), + seed=dict(argstr="--seed %d",), + self_reg=dict(argstr="--selfreg %d %d %d",), + sim_done_file=dict(argstr="--sim-done %s", extensions=None,), + sim_sign=dict(argstr="--sim-sign %s",), + simulation=dict(argstr="--sim %s %d %f %s",), subject_id=dict(), subjects_dir=dict(), - surf=dict( - argstr='--surf %s %s %s', - requires=['subject_id', 'hemi'], - ), - surf_geo=dict(usedefault=True, ), - synth=dict(argstr='--synth', ), - uniform=dict(argstr='--uniform %f %f', ), - var_fwhm=dict(argstr='--var-fwhm %f', ), - vox_dump=dict(argstr='--voxdump %d %d %d', ), - weight_file=dict( - extensions=None, - xor=['weighted_ls'], - ), - weight_inv=dict( - argstr='--w-inv', - xor=['weighted_ls'], - ), - weight_sqrt=dict( - argstr='--w-sqrt', - xor=['weighted_ls'], - ), + surf=dict(argstr="--surf %s %s %s", requires=["subject_id", "hemi"],), + surf_geo=dict(usedefault=True,), + synth=dict(argstr="--synth",), + uniform=dict(argstr="--uniform %f %f",), + var_fwhm=dict(argstr="--var-fwhm %f",), + vox_dump=dict(argstr="--voxdump %d %d %d",), + weight_file=dict(extensions=None, xor=["weighted_ls"],), + weight_inv=dict(argstr="--w-inv", xor=["weighted_ls"],), + weight_sqrt=dict(argstr="--w-sqrt", xor=["weighted_ls"],), weighted_ls=dict( - argstr='--wls %s', + argstr="--wls %s", extensions=None, - xor=('weight_file', 'weight_inv', 'weight_sqrt'), + xor=("weight_file", "weight_inv", "weight_sqrt"), ), ) inputs = GLMFit.input_spec() @@ -133,25 +77,27 @@ def test_GLMFit_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GLMFit_outputs(): output_map = dict( - beta_file=dict(extensions=None, ), - dof_file=dict(extensions=None, ), - error_file=dict(extensions=None, ), - error_stddev_file=dict(extensions=None, ), - error_var_file=dict(extensions=None, ), - estimate_file=dict(extensions=None, ), - frame_eigenvectors=dict(extensions=None, ), + beta_file=dict(extensions=None,), + dof_file=dict(extensions=None,), + error_file=dict(extensions=None,), + error_stddev_file=dict(extensions=None,), + error_var_file=dict(extensions=None,), + estimate_file=dict(extensions=None,), + frame_eigenvectors=dict(extensions=None,), ftest_file=dict(), - fwhm_file=dict(extensions=None, ), + fwhm_file=dict(extensions=None,), gamma_file=dict(), gamma_var_file=dict(), glm_dir=dict(), - mask_file=dict(extensions=None, ), + mask_file=dict(extensions=None,), sig_file=dict(), - singular_values=dict(extensions=None, ), - spatial_eigenvectors=dict(extensions=None, ), - svd_stats_file=dict(extensions=None, ), + singular_values=dict(extensions=None,), + spatial_eigenvectors=dict(extensions=None,), + svd_stats_file=dict(extensions=None,), ) outputs = GLMFit.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py index 9dc4b292dd..2a80c0743b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py @@ -4,16 +4,9 @@ def test_ImageInfo_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - position=1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, position=1,), subjects_dir=dict(), ) inputs = ImageInfo.input_spec() @@ -21,6 +14,8 @@ def test_ImageInfo_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ImageInfo_outputs(): output_map = dict( TE=dict(), @@ -31,7 +26,7 @@ def test_ImageInfo_outputs(): file_format=dict(), info=dict(), orientation=dict(), - out_file=dict(extensions=None, ), + out_file=dict(extensions=None,), ph_enc_dir=dict(), vox_sizes=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py index 079e50968d..14cd9fa9f0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py @@ -4,30 +4,17 @@ def test_Jacobian_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_mappedsurf=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - in_origsurf=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_mappedsurf=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + in_origsurf=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, keep_extension=False, - name_source=['in_origsurf'], - name_template='%s.jacobian', + name_source=["in_origsurf"], + name_template="%s.jacobian", position=-1, ), subjects_dir=dict(), @@ -37,8 +24,10 @@ def test_Jacobian_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Jacobian_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Jacobian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py index 82dd02fe0c..ab59b01867 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py @@ -4,84 +4,68 @@ def test_LTAConvert_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_fsl=dict( - argstr='--infsl %s', + argstr="--infsl %s", extensions=None, mandatory=True, - xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', - 'in_itk'), + xor=("in_lta", "in_fsl", "in_mni", "in_reg", "in_niftyreg", "in_itk"), ), in_itk=dict( - argstr='--initk %s', + argstr="--initk %s", extensions=None, mandatory=True, - xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', - 'in_itk'), + xor=("in_lta", "in_fsl", "in_mni", "in_reg", "in_niftyreg", "in_itk"), ), in_lta=dict( - argstr='--inlta %s', + argstr="--inlta %s", mandatory=True, - xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', - 'in_itk'), + xor=("in_lta", "in_fsl", "in_mni", "in_reg", "in_niftyreg", "in_itk"), ), in_mni=dict( - argstr='--inmni %s', + argstr="--inmni %s", extensions=None, mandatory=True, - xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', - 'in_itk'), + xor=("in_lta", "in_fsl", "in_mni", "in_reg", "in_niftyreg", "in_itk"), ), in_niftyreg=dict( - argstr='--inniftyreg %s', + argstr="--inniftyreg %s", extensions=None, mandatory=True, - xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', - 'in_itk'), + xor=("in_lta", "in_fsl", "in_mni", "in_reg", "in_niftyreg", "in_itk"), ), in_reg=dict( - argstr='--inreg %s', + argstr="--inreg %s", extensions=None, mandatory=True, - xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', - 'in_itk'), - ), - invert=dict(argstr='--invert', ), - ltavox2vox=dict( - argstr='--ltavox2vox', - requires=['out_lta'], - ), - out_fsl=dict(argstr='--outfsl %s', ), - out_itk=dict(argstr='--outitk %s', ), - out_lta=dict(argstr='--outlta %s', ), - out_mni=dict(argstr='--outmni %s', ), - out_reg=dict(argstr='--outreg %s', ), - source_file=dict( - argstr='--src %s', - extensions=None, - ), - target_conform=dict(argstr='--trgconform', ), - target_file=dict( - argstr='--trg %s', - extensions=None, + xor=("in_lta", "in_fsl", "in_mni", "in_reg", "in_niftyreg", "in_itk"), ), + invert=dict(argstr="--invert",), + ltavox2vox=dict(argstr="--ltavox2vox", requires=["out_lta"],), + out_fsl=dict(argstr="--outfsl %s",), + out_itk=dict(argstr="--outitk %s",), + out_lta=dict(argstr="--outlta %s",), + out_mni=dict(argstr="--outmni %s",), + out_reg=dict(argstr="--outreg %s",), + source_file=dict(argstr="--src %s", extensions=None,), + target_conform=dict(argstr="--trgconform",), + target_file=dict(argstr="--trg %s", extensions=None,), ) inputs = LTAConvert.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_LTAConvert_outputs(): output_map = dict( - out_fsl=dict(extensions=None, ), - out_itk=dict(extensions=None, ), - out_lta=dict(extensions=None, ), - out_mni=dict(extensions=None, ), - out_reg=dict(extensions=None, ), + out_fsl=dict(extensions=None,), + out_itk=dict(extensions=None,), + out_lta=dict(extensions=None,), + out_mni=dict(extensions=None,), + out_reg=dict(extensions=None,), ) outputs = LTAConvert.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py index 78f7076f92..f800c560f7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py @@ -4,48 +4,28 @@ def test_Label2Annot_inputs(): input_map = dict( - args=dict(argstr='%s', ), - color_table=dict( - argstr='--ctab %s', - extensions=None, - ), + args=dict(argstr="%s",), + color_table=dict(argstr="--ctab %s", extensions=None,), copy_inputs=dict(), - environ=dict( - nohash=True, - usedefault=True, - ), - hemisphere=dict( - argstr='--hemi %s', - mandatory=True, - ), - in_labels=dict( - argstr='--l %s...', - mandatory=True, - ), - keep_max=dict(argstr='--maxstatwinner', ), - orig=dict( - extensions=None, - mandatory=True, - ), - out_annot=dict( - argstr='--a %s', - mandatory=True, - ), - subject_id=dict( - argstr='--s %s', - mandatory=True, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True,), + hemisphere=dict(argstr="--hemi %s", mandatory=True,), + in_labels=dict(argstr="--l %s...", mandatory=True,), + keep_max=dict(argstr="--maxstatwinner",), + orig=dict(extensions=None, mandatory=True,), + out_annot=dict(argstr="--a %s", mandatory=True,), + subject_id=dict(argstr="--s %s", mandatory=True, usedefault=True,), subjects_dir=dict(), - verbose_off=dict(argstr='--noverbose', ), + verbose_off=dict(argstr="--noverbose",), ) inputs = Label2Annot.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Label2Annot_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Label2Annot.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py index 934770875f..e5e227c5a9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py @@ -4,67 +4,37 @@ def test_Label2Label_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), copy_inputs=dict(), - environ=dict( - nohash=True, - usedefault=True, - ), - hemisphere=dict( - argstr='--hemi %s', - mandatory=True, - ), + environ=dict(nohash=True, usedefault=True,), + hemisphere=dict(argstr="--hemi %s", mandatory=True,), out_file=dict( - argstr='--trglabel %s', + argstr="--trglabel %s", extensions=None, hash_files=False, keep_extension=True, - name_source=['source_label'], - name_template='%s_converted', - ), - registration_method=dict( - argstr='--regmethod %s', - usedefault=True, - ), - source_label=dict( - argstr='--srclabel %s', - extensions=None, - mandatory=True, - ), - source_sphere_reg=dict( - extensions=None, - mandatory=True, - ), - source_subject=dict( - argstr='--srcsubject %s', - mandatory=True, - ), - source_white=dict( - extensions=None, - mandatory=True, - ), - sphere_reg=dict( - extensions=None, - mandatory=True, - ), - subject_id=dict( - argstr='--trgsubject %s', - mandatory=True, - usedefault=True, - ), + name_source=["source_label"], + name_template="%s_converted", + ), + registration_method=dict(argstr="--regmethod %s", usedefault=True,), + source_label=dict(argstr="--srclabel %s", extensions=None, mandatory=True,), + source_sphere_reg=dict(extensions=None, mandatory=True,), + source_subject=dict(argstr="--srcsubject %s", mandatory=True,), + source_white=dict(extensions=None, mandatory=True,), + sphere_reg=dict(extensions=None, mandatory=True,), + subject_id=dict(argstr="--trgsubject %s", mandatory=True, usedefault=True,), subjects_dir=dict(), - white=dict( - extensions=None, - mandatory=True, - ), + white=dict(extensions=None, mandatory=True,), ) inputs = Label2Label.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Label2Label_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Label2Label.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py index 11a244e94b..dd890531c9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py @@ -5,88 +5,67 @@ def test_Label2Vol_inputs(): input_map = dict( annot_file=dict( - argstr='--annot %s', + argstr="--annot %s", copyfile=False, extensions=None, mandatory=True, - requires=('subject_id', 'hemi'), - xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), + requires=("subject_id", "hemi"), + xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), ), aparc_aseg=dict( - argstr='--aparc+aseg', + argstr="--aparc+aseg", mandatory=True, - xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), + xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fill_thresh=dict(argstr='--fillthresh %g', ), - hemi=dict(argstr='--hemi %s', ), - identity=dict( - argstr='--identity', - xor=('reg_file', 'reg_header', 'identity'), - ), - invert_mtx=dict(argstr='--invertmtx', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fill_thresh=dict(argstr="--fillthresh %g",), + hemi=dict(argstr="--hemi %s",), + identity=dict(argstr="--identity", xor=("reg_file", "reg_header", "identity"),), + invert_mtx=dict(argstr="--invertmtx",), label_file=dict( - argstr='--label %s...', + argstr="--label %s...", copyfile=False, mandatory=True, - xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), - ), - label_hit_file=dict( - argstr='--hits %s', - extensions=None, - ), - label_voxel_volume=dict(argstr='--labvoxvol %f', ), - map_label_stat=dict( - argstr='--label-stat %s', - extensions=None, - ), - native_vox2ras=dict(argstr='--native-vox2ras', ), - proj=dict( - argstr='--proj %s %f %f %f', - requires=('subject_id', 'hemi'), + xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), ), + label_hit_file=dict(argstr="--hits %s", extensions=None,), + label_voxel_volume=dict(argstr="--labvoxvol %f",), + map_label_stat=dict(argstr="--label-stat %s", extensions=None,), + native_vox2ras=dict(argstr="--native-vox2ras",), + proj=dict(argstr="--proj %s %f %f %f", requires=("subject_id", "hemi"),), reg_file=dict( - argstr='--reg %s', + argstr="--reg %s", extensions=None, - xor=('reg_file', 'reg_header', 'identity'), + xor=("reg_file", "reg_header", "identity"), ), reg_header=dict( - argstr='--regheader %s', + argstr="--regheader %s", extensions=None, - xor=('reg_file', 'reg_header', 'identity'), + xor=("reg_file", "reg_header", "identity"), ), seg_file=dict( - argstr='--seg %s', + argstr="--seg %s", copyfile=False, extensions=None, mandatory=True, - xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), + xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), ), - subject_id=dict(argstr='--subject %s', ), + subject_id=dict(argstr="--subject %s",), subjects_dir=dict(), - surface=dict(argstr='--surf %s', ), - template_file=dict( - argstr='--temp %s', - extensions=None, - mandatory=True, - ), - vol_label_file=dict( - argstr='--o %s', - extensions=None, - genfile=True, - ), + surface=dict(argstr="--surf %s",), + template_file=dict(argstr="--temp %s", extensions=None, mandatory=True,), + vol_label_file=dict(argstr="--o %s", extensions=None, genfile=True,), ) inputs = Label2Vol.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Label2Vol_outputs(): - output_map = dict(vol_label_file=dict(extensions=None, ), ) + output_map = dict(vol_label_file=dict(extensions=None,),) outputs = Label2Vol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py index 6ba4926cd3..16ed15d093 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py @@ -4,50 +4,36 @@ def test_MNIBiasCorrection_inputs(): input_map = dict( - args=dict(argstr='%s', ), - distance=dict(argstr='--distance %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='--i %s', - extensions=None, - mandatory=True, - ), - iterations=dict( - argstr='--n %d', - usedefault=True, - ), - mask=dict( - argstr='--mask %s', - extensions=None, - ), - no_rescale=dict(argstr='--no-rescale', ), + args=dict(argstr="%s",), + distance=dict(argstr="--distance %d",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="--i %s", extensions=None, mandatory=True,), + iterations=dict(argstr="--n %d", usedefault=True,), + mask=dict(argstr="--mask %s", extensions=None,), + no_rescale=dict(argstr="--no-rescale",), out_file=dict( - argstr='--o %s', + argstr="--o %s", extensions=None, hash_files=False, keep_extension=True, - name_source=['in_file'], - name_template='%s_output', + name_source=["in_file"], + name_template="%s_output", ), - protocol_iterations=dict(argstr='--proto-iters %d', ), - shrink=dict(argstr='--shrink %d', ), - stop=dict(argstr='--stop %f', ), + protocol_iterations=dict(argstr="--proto-iters %d",), + shrink=dict(argstr="--shrink %d",), + stop=dict(argstr="--stop %f",), subjects_dir=dict(), - transform=dict( - argstr='--uchar %s', - extensions=None, - ), + transform=dict(argstr="--uchar %s", extensions=None,), ) inputs = MNIBiasCorrection.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MNIBiasCorrection_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MNIBiasCorrection.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py index 3e916cf7dc..ae81998809 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py @@ -4,38 +4,24 @@ def test_MPRtoMNI305_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - usedefault=True, - ), - reference_dir=dict( - mandatory=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, usedefault=True,), + reference_dir=dict(mandatory=True, usedefault=True,), subjects_dir=dict(), - target=dict( - mandatory=True, - usedefault=True, - ), + target=dict(mandatory=True, usedefault=True,), ) inputs = MPRtoMNI305.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MPRtoMNI305_outputs(): output_map = dict( - log_file=dict( - extensions=None, - usedefault=True, - ), - out_file=dict(extensions=None, ), + log_file=dict(extensions=None, usedefault=True,), + out_file=dict(extensions=None,), ) outputs = MPRtoMNI305.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py index f5ab4ff630..b75f338f31 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py @@ -5,136 +5,107 @@ def test_MRIConvert_inputs(): input_map = dict( apply_inv_transform=dict( - argstr='--apply_inverse_transform %s', - extensions=None, + argstr="--apply_inverse_transform %s", extensions=None, ), - apply_transform=dict( - argstr='--apply_transform %s', - extensions=None, - ), - args=dict(argstr='%s', ), - ascii=dict(argstr='--ascii', ), - autoalign_matrix=dict( - argstr='--autoalign %s', - extensions=None, - ), - color_file=dict( - argstr='--color_file %s', - extensions=None, - ), - conform=dict(argstr='--conform', ), - conform_min=dict(argstr='--conform_min', ), - conform_size=dict(argstr='--conform_size %s', ), - crop_center=dict(argstr='--crop %d %d %d', ), - crop_gdf=dict(argstr='--crop_gdf', ), - crop_size=dict(argstr='--cropsize %d %d %d', ), - cut_ends=dict(argstr='--cutends %d', ), - cw256=dict(argstr='--cw256', ), - devolve_transform=dict(argstr='--devolvexfm %s', ), - drop_n=dict(argstr='--ndrop %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fill_parcellation=dict(argstr='--fill_parcellation', ), - force_ras=dict(argstr='--force_ras_good', ), - frame=dict(argstr='--frame %d', ), - frame_subsample=dict(argstr='--fsubsample %d %d %d', ), - fwhm=dict(argstr='--fwhm %f', ), - in_center=dict(argstr='--in_center %s', ), + apply_transform=dict(argstr="--apply_transform %s", extensions=None,), + args=dict(argstr="%s",), + ascii=dict(argstr="--ascii",), + autoalign_matrix=dict(argstr="--autoalign %s", extensions=None,), + color_file=dict(argstr="--color_file %s", extensions=None,), + conform=dict(argstr="--conform",), + conform_min=dict(argstr="--conform_min",), + conform_size=dict(argstr="--conform_size %s",), + crop_center=dict(argstr="--crop %d %d %d",), + crop_gdf=dict(argstr="--crop_gdf",), + crop_size=dict(argstr="--cropsize %d %d %d",), + cut_ends=dict(argstr="--cutends %d",), + cw256=dict(argstr="--cw256",), + devolve_transform=dict(argstr="--devolvexfm %s",), + drop_n=dict(argstr="--ndrop %d",), + environ=dict(nohash=True, usedefault=True,), + fill_parcellation=dict(argstr="--fill_parcellation",), + force_ras=dict(argstr="--force_ras_good",), + frame=dict(argstr="--frame %d",), + frame_subsample=dict(argstr="--fsubsample %d %d %d",), + fwhm=dict(argstr="--fwhm %f",), + in_center=dict(argstr="--in_center %s",), in_file=dict( - argstr='--input_volume %s', - extensions=None, - mandatory=True, - position=-2, - ), - in_i_dir=dict(argstr='--in_i_direction %f %f %f', ), - in_i_size=dict(argstr='--in_i_size %d', ), - in_info=dict(argstr='--in_info', ), - in_j_dir=dict(argstr='--in_j_direction %f %f %f', ), - in_j_size=dict(argstr='--in_j_size %d', ), - in_k_dir=dict(argstr='--in_k_direction %f %f %f', ), - in_k_size=dict(argstr='--in_k_size %d', ), - in_like=dict( - argstr='--in_like %s', - extensions=None, + argstr="--input_volume %s", extensions=None, mandatory=True, position=-2, ), - in_matrix=dict(argstr='--in_matrix', ), - in_orientation=dict(argstr='--in_orientation %s', ), - in_scale=dict(argstr='--scale %f', ), - in_stats=dict(argstr='--in_stats', ), - in_type=dict(argstr='--in_type %s', ), - invert_contrast=dict(argstr='--invert_contrast %f', ), - midframe=dict(argstr='--mid-frame', ), - no_change=dict(argstr='--nochange', ), - no_scale=dict(argstr='--no_scale 1', ), - no_translate=dict(argstr='--no_translate', ), - no_write=dict(argstr='--no_write', ), - out_center=dict(argstr='--out_center %f %f %f', ), - out_datatype=dict(argstr='--out_data_type %s', ), + in_i_dir=dict(argstr="--in_i_direction %f %f %f",), + in_i_size=dict(argstr="--in_i_size %d",), + in_info=dict(argstr="--in_info",), + in_j_dir=dict(argstr="--in_j_direction %f %f %f",), + in_j_size=dict(argstr="--in_j_size %d",), + in_k_dir=dict(argstr="--in_k_direction %f %f %f",), + in_k_size=dict(argstr="--in_k_size %d",), + in_like=dict(argstr="--in_like %s", extensions=None,), + in_matrix=dict(argstr="--in_matrix",), + in_orientation=dict(argstr="--in_orientation %s",), + in_scale=dict(argstr="--scale %f",), + in_stats=dict(argstr="--in_stats",), + in_type=dict(argstr="--in_type %s",), + invert_contrast=dict(argstr="--invert_contrast %f",), + midframe=dict(argstr="--mid-frame",), + no_change=dict(argstr="--nochange",), + no_scale=dict(argstr="--no_scale 1",), + no_translate=dict(argstr="--no_translate",), + no_write=dict(argstr="--no_write",), + out_center=dict(argstr="--out_center %f %f %f",), + out_datatype=dict(argstr="--out_data_type %s",), out_file=dict( - argstr='--output_volume %s', - extensions=None, - genfile=True, - position=-1, + argstr="--output_volume %s", extensions=None, genfile=True, position=-1, ), - out_i_count=dict(argstr='--out_i_count %d', ), - out_i_dir=dict(argstr='--out_i_direction %f %f %f', ), - out_i_size=dict(argstr='--out_i_size %d', ), - out_info=dict(argstr='--out_info', ), - out_j_count=dict(argstr='--out_j_count %d', ), - out_j_dir=dict(argstr='--out_j_direction %f %f %f', ), - out_j_size=dict(argstr='--out_j_size %d', ), - out_k_count=dict(argstr='--out_k_count %d', ), - out_k_dir=dict(argstr='--out_k_direction %f %f %f', ), - out_k_size=dict(argstr='--out_k_size %d', ), - out_matrix=dict(argstr='--out_matrix', ), - out_orientation=dict(argstr='--out_orientation %s', ), - out_scale=dict(argstr='--out-scale %d', ), - out_stats=dict(argstr='--out_stats', ), - out_type=dict(argstr='--out_type %s', ), - parse_only=dict(argstr='--parse_only', ), - read_only=dict(argstr='--read_only', ), - reorder=dict(argstr='--reorder %d %d %d', ), - resample_type=dict(argstr='--resample_type %s', ), - reslice_like=dict( - argstr='--reslice_like %s', - extensions=None, - ), - sdcm_list=dict( - argstr='--sdcmlist %s', - extensions=None, - ), - skip_n=dict(argstr='--nskip %d', ), - slice_bias=dict(argstr='--slice-bias %f', ), - slice_crop=dict(argstr='--slice-crop %d %d', ), - slice_reverse=dict(argstr='--slice-reverse', ), - smooth_parcellation=dict(argstr='--smooth_parcellation', ), - sphinx=dict(argstr='--sphinx', ), - split=dict(argstr='--split', ), - status_file=dict( - argstr='--status %s', - extensions=None, - ), - subject_name=dict(argstr='--subject_name %s', ), + out_i_count=dict(argstr="--out_i_count %d",), + out_i_dir=dict(argstr="--out_i_direction %f %f %f",), + out_i_size=dict(argstr="--out_i_size %d",), + out_info=dict(argstr="--out_info",), + out_j_count=dict(argstr="--out_j_count %d",), + out_j_dir=dict(argstr="--out_j_direction %f %f %f",), + out_j_size=dict(argstr="--out_j_size %d",), + out_k_count=dict(argstr="--out_k_count %d",), + out_k_dir=dict(argstr="--out_k_direction %f %f %f",), + out_k_size=dict(argstr="--out_k_size %d",), + out_matrix=dict(argstr="--out_matrix",), + out_orientation=dict(argstr="--out_orientation %s",), + out_scale=dict(argstr="--out-scale %d",), + out_stats=dict(argstr="--out_stats",), + out_type=dict(argstr="--out_type %s",), + parse_only=dict(argstr="--parse_only",), + read_only=dict(argstr="--read_only",), + reorder=dict(argstr="--reorder %d %d %d",), + resample_type=dict(argstr="--resample_type %s",), + reslice_like=dict(argstr="--reslice_like %s", extensions=None,), + sdcm_list=dict(argstr="--sdcmlist %s", extensions=None,), + skip_n=dict(argstr="--nskip %d",), + slice_bias=dict(argstr="--slice-bias %f",), + slice_crop=dict(argstr="--slice-crop %d %d",), + slice_reverse=dict(argstr="--slice-reverse",), + smooth_parcellation=dict(argstr="--smooth_parcellation",), + sphinx=dict(argstr="--sphinx",), + split=dict(argstr="--split",), + status_file=dict(argstr="--status %s", extensions=None,), + subject_name=dict(argstr="--subject_name %s",), subjects_dir=dict(), - te=dict(argstr='-te %d', ), - template_info=dict(argstr='--template_info', ), - template_type=dict(argstr='--template_type %s', ), - ti=dict(argstr='-ti %d', ), - tr=dict(argstr='-tr %d', ), - unwarp_gradient=dict(argstr='--unwarp_gradient_nonlinearity', ), - vox_size=dict(argstr='-voxsize %f %f %f', ), - zero_ge_z_offset=dict(argstr='--zero_ge_z_offset', ), - zero_outlines=dict(argstr='--zero_outlines', ), + te=dict(argstr="-te %d",), + template_info=dict(argstr="--template_info",), + template_type=dict(argstr="--template_type %s",), + ti=dict(argstr="-ti %d",), + tr=dict(argstr="-tr %d",), + unwarp_gradient=dict(argstr="--unwarp_gradient_nonlinearity",), + vox_size=dict(argstr="-voxsize %f %f %f",), + zero_ge_z_offset=dict(argstr="--zero_ge_z_offset",), + zero_outlines=dict(argstr="--zero_outlines",), ) inputs = MRIConvert.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRIConvert_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(),) outputs = MRIConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py index 39076d96a9..1cef259c82 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py @@ -4,81 +4,65 @@ def test_MRICoreg_inputs(): input_map = dict( - args=dict(argstr='%s', ), - brute_force_limit=dict( - argstr='--bf-lim %g', - xor=['no_brute_force'], - ), - brute_force_samples=dict( - argstr='--bf-nsamp %d', - xor=['no_brute_force'], - ), - conform_reference=dict(argstr='--conf-ref', ), - dof=dict(argstr='--dof %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - ftol=dict(argstr='--ftol %e', ), - initial_rotation=dict(argstr='--rot %g %g %g', ), - initial_scale=dict(argstr='--scale %g %g %g', ), - initial_shear=dict(argstr='--shear %g %g %g', ), - initial_translation=dict(argstr='--trans %g %g %g', ), - linmintol=dict(argstr='--linmintol %e', ), - max_iters=dict(argstr='--nitersmax %d', ), - no_brute_force=dict(argstr='--no-bf', ), - no_coord_dithering=dict(argstr='--no-coord-dither', ), - no_cras0=dict(argstr='--no-cras0', ), - no_intensity_dithering=dict(argstr='--no-intensity-dither', ), - no_smooth=dict(argstr='--no-smooth', ), - num_threads=dict(argstr='--threads %d', ), - out_lta_file=dict( - argstr='--lta %s', - usedefault=True, - ), - out_params_file=dict(argstr='--params %s', ), - out_reg_file=dict(argstr='--regdat %s', ), - ref_fwhm=dict(argstr='--ref-fwhm', ), + args=dict(argstr="%s",), + brute_force_limit=dict(argstr="--bf-lim %g", xor=["no_brute_force"],), + brute_force_samples=dict(argstr="--bf-nsamp %d", xor=["no_brute_force"],), + conform_reference=dict(argstr="--conf-ref",), + dof=dict(argstr="--dof %d",), + environ=dict(nohash=True, usedefault=True,), + ftol=dict(argstr="--ftol %e",), + initial_rotation=dict(argstr="--rot %g %g %g",), + initial_scale=dict(argstr="--scale %g %g %g",), + initial_shear=dict(argstr="--shear %g %g %g",), + initial_translation=dict(argstr="--trans %g %g %g",), + linmintol=dict(argstr="--linmintol %e",), + max_iters=dict(argstr="--nitersmax %d",), + no_brute_force=dict(argstr="--no-bf",), + no_coord_dithering=dict(argstr="--no-coord-dither",), + no_cras0=dict(argstr="--no-cras0",), + no_intensity_dithering=dict(argstr="--no-intensity-dither",), + no_smooth=dict(argstr="--no-smooth",), + num_threads=dict(argstr="--threads %d",), + out_lta_file=dict(argstr="--lta %s", usedefault=True,), + out_params_file=dict(argstr="--params %s",), + out_reg_file=dict(argstr="--regdat %s",), + ref_fwhm=dict(argstr="--ref-fwhm",), reference_file=dict( - argstr='--ref %s', + argstr="--ref %s", copyfile=False, extensions=None, mandatory=True, - xor=['subject_id'], - ), - reference_mask=dict( - argstr='--ref-mask %s', - position=2, + xor=["subject_id"], ), - saturation_threshold=dict(argstr='--sat %g', ), - sep=dict(argstr='--sep %s...', ), + reference_mask=dict(argstr="--ref-mask %s", position=2,), + saturation_threshold=dict(argstr="--sat %g",), + sep=dict(argstr="--sep %s...",), source_file=dict( - argstr='--mov %s', - copyfile=False, - extensions=None, - mandatory=True, + argstr="--mov %s", copyfile=False, extensions=None, mandatory=True, ), - source_mask=dict(argstr='--mov-mask', ), - source_oob=dict(argstr='--mov-oob', ), + source_mask=dict(argstr="--mov-mask",), + source_oob=dict(argstr="--mov-oob",), subject_id=dict( - argstr='--s %s', + argstr="--s %s", mandatory=True, position=1, - requires=['subjects_dir'], - xor=['reference_file'], + requires=["subjects_dir"], + xor=["reference_file"], ), - subjects_dir=dict(argstr='--sd %s', ), + subjects_dir=dict(argstr="--sd %s",), ) inputs = MRICoreg.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRICoreg_outputs(): output_map = dict( - out_lta_file=dict(extensions=None, ), - out_params_file=dict(extensions=None, ), - out_reg_file=dict(extensions=None, ), + out_lta_file=dict(extensions=None,), + out_params_file=dict(extensions=None,), + out_reg_file=dict(extensions=None,), ) outputs = MRICoreg.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py index d70da67a50..c8a2f7090c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py @@ -4,47 +4,24 @@ def test_MRIFill_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - log_file=dict( - argstr='-a %s', - extensions=None, - ), - out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), - segmentation=dict( - argstr='-segmentation %s', - extensions=None, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + log_file=dict(argstr="-a %s", extensions=None,), + out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + segmentation=dict(argstr="-segmentation %s", extensions=None,), subjects_dir=dict(), - transform=dict( - argstr='-xform %s', - extensions=None, - ), + transform=dict(argstr="-xform %s", extensions=None,), ) inputs = MRIFill.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRIFill_outputs(): - output_map = dict( - log_file=dict(extensions=None, ), - out_file=dict(extensions=None, ), - ) + output_map = dict(log_file=dict(extensions=None,), out_file=dict(extensions=None,),) outputs = MRIFill.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py index 2152aec1e5..25137a53a8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py @@ -4,33 +4,12 @@ def test_MRIMarchingCubes_inputs(): input_map = dict( - args=dict(argstr='%s', ), - connectivity_value=dict( - argstr='%d', - position=-1, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - label_value=dict( - argstr='%d', - mandatory=True, - position=2, - ), - out_file=dict( - argstr='./%s', - extensions=None, - genfile=True, - position=-2, - ), + args=dict(argstr="%s",), + connectivity_value=dict(argstr="%d", position=-1, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + label_value=dict(argstr="%d", mandatory=True, position=2,), + out_file=dict(argstr="./%s", extensions=None, genfile=True, position=-2,), subjects_dir=dict(), ) inputs = MRIMarchingCubes.input_spec() @@ -38,8 +17,10 @@ def test_MRIMarchingCubes_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRIMarchingCubes_outputs(): - output_map = dict(surface=dict(extensions=None, ), ) + output_map = dict(surface=dict(extensions=None,),) outputs = MRIMarchingCubes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py index 578d09d9fb..195472d4ad 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py @@ -4,49 +4,33 @@ def test_MRIPretess_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_filled=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-4, - ), - in_norm=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - keep=dict(argstr='-keep', ), - label=dict( - argstr='%s', - mandatory=True, - position=-3, - usedefault=True, - ), - nocorners=dict(argstr='-nocorners', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_filled=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), + in_norm=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + keep=dict(argstr="-keep",), + label=dict(argstr="%s", mandatory=True, position=-3, usedefault=True,), + nocorners=dict(argstr="-nocorners",), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, keep_extension=True, - name_source=['in_filled'], - name_template='%s_pretesswm', + name_source=["in_filled"], + name_template="%s_pretesswm", position=-1, ), subjects_dir=dict(), - test=dict(argstr='-test', ), + test=dict(argstr="-test",), ) inputs = MRIPretess.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRIPretess_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MRIPretess.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py index fab549d02f..03d9ccd2e4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py @@ -4,80 +4,53 @@ def test_MRISPreproc_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), fsgd_file=dict( - argstr='--fsgd %s', - extensions=None, - xor=('subjects', 'fsgd_file', 'subject_file'), - ), - fwhm=dict( - argstr='--fwhm %f', - xor=['num_iters'], - ), - fwhm_source=dict( - argstr='--fwhm-src %f', - xor=['num_iters_source'], - ), - hemi=dict( - argstr='--hemi %s', - mandatory=True, - ), - num_iters=dict( - argstr='--niters %d', - xor=['fwhm'], - ), - num_iters_source=dict( - argstr='--niterssrc %d', - xor=['fwhm_source'], - ), - out_file=dict( - argstr='--out %s', + argstr="--fsgd %s", extensions=None, - genfile=True, - ), - proj_frac=dict(argstr='--projfrac %s', ), - smooth_cortex_only=dict(argstr='--smooth-cortex-only', ), - source_format=dict(argstr='--srcfmt %s', ), + xor=("subjects", "fsgd_file", "subject_file"), + ), + fwhm=dict(argstr="--fwhm %f", xor=["num_iters"],), + fwhm_source=dict(argstr="--fwhm-src %f", xor=["num_iters_source"],), + hemi=dict(argstr="--hemi %s", mandatory=True,), + num_iters=dict(argstr="--niters %d", xor=["fwhm"],), + num_iters_source=dict(argstr="--niterssrc %d", xor=["fwhm_source"],), + out_file=dict(argstr="--out %s", extensions=None, genfile=True,), + proj_frac=dict(argstr="--projfrac %s",), + smooth_cortex_only=dict(argstr="--smooth-cortex-only",), + source_format=dict(argstr="--srcfmt %s",), subject_file=dict( - argstr='--f %s', + argstr="--f %s", extensions=None, - xor=('subjects', 'fsgd_file', 'subject_file'), + xor=("subjects", "fsgd_file", "subject_file"), ), subjects=dict( - argstr='--s %s...', - xor=('subjects', 'fsgd_file', 'subject_file'), + argstr="--s %s...", xor=("subjects", "fsgd_file", "subject_file"), ), subjects_dir=dict(), surf_area=dict( - argstr='--area %s', - xor=('surf_measure', 'surf_measure_file', 'surf_area'), + argstr="--area %s", xor=("surf_measure", "surf_measure_file", "surf_area"), ), - surf_dir=dict(argstr='--surfdir %s', ), + surf_dir=dict(argstr="--surfdir %s",), surf_measure=dict( - argstr='--meas %s', - xor=('surf_measure', 'surf_measure_file', 'surf_area'), + argstr="--meas %s", xor=("surf_measure", "surf_measure_file", "surf_area"), ), surf_measure_file=dict( - argstr='--is %s...', - xor=('surf_measure', 'surf_measure_file', 'surf_area'), + argstr="--is %s...", xor=("surf_measure", "surf_measure_file", "surf_area"), ), - target=dict( - argstr='--target %s', - mandatory=True, - ), - vol_measure_file=dict(argstr='--iv %s %s...', ), + target=dict(argstr="--target %s", mandatory=True,), + vol_measure_file=dict(argstr="--iv %s %s...",), ) inputs = MRISPreproc.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRISPreproc_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MRISPreproc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py index d7a138015a..5a7a711263 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py @@ -4,99 +4,66 @@ def test_MRISPreprocReconAll_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), copy_inputs=dict(), - environ=dict( - nohash=True, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True,), fsgd_file=dict( - argstr='--fsgd %s', + argstr="--fsgd %s", extensions=None, - xor=('subjects', 'fsgd_file', 'subject_file'), - ), - fwhm=dict( - argstr='--fwhm %f', - xor=['num_iters'], - ), - fwhm_source=dict( - argstr='--fwhm-src %f', - xor=['num_iters_source'], - ), - hemi=dict( - argstr='--hemi %s', - mandatory=True, - ), - lh_surfreg_target=dict( - extensions=None, - requires=['surfreg_files'], - ), - num_iters=dict( - argstr='--niters %d', - xor=['fwhm'], - ), - num_iters_source=dict( - argstr='--niterssrc %d', - xor=['fwhm_source'], - ), - out_file=dict( - argstr='--out %s', - extensions=None, - genfile=True, - ), - proj_frac=dict(argstr='--projfrac %s', ), - rh_surfreg_target=dict( - extensions=None, - requires=['surfreg_files'], - ), - smooth_cortex_only=dict(argstr='--smooth-cortex-only', ), - source_format=dict(argstr='--srcfmt %s', ), + xor=("subjects", "fsgd_file", "subject_file"), + ), + fwhm=dict(argstr="--fwhm %f", xor=["num_iters"],), + fwhm_source=dict(argstr="--fwhm-src %f", xor=["num_iters_source"],), + hemi=dict(argstr="--hemi %s", mandatory=True,), + lh_surfreg_target=dict(extensions=None, requires=["surfreg_files"],), + num_iters=dict(argstr="--niters %d", xor=["fwhm"],), + num_iters_source=dict(argstr="--niterssrc %d", xor=["fwhm_source"],), + out_file=dict(argstr="--out %s", extensions=None, genfile=True,), + proj_frac=dict(argstr="--projfrac %s",), + rh_surfreg_target=dict(extensions=None, requires=["surfreg_files"],), + smooth_cortex_only=dict(argstr="--smooth-cortex-only",), + source_format=dict(argstr="--srcfmt %s",), subject_file=dict( - argstr='--f %s', + argstr="--f %s", extensions=None, - xor=('subjects', 'fsgd_file', 'subject_file'), + xor=("subjects", "fsgd_file", "subject_file"), ), subject_id=dict( - argstr='--s %s', + argstr="--s %s", usedefault=True, - xor=('subjects', 'fsgd_file', 'subject_file', 'subject_id'), + xor=("subjects", "fsgd_file", "subject_file", "subject_id"), ), subjects=dict( - argstr='--s %s...', - xor=('subjects', 'fsgd_file', 'subject_file'), + argstr="--s %s...", xor=("subjects", "fsgd_file", "subject_file"), ), subjects_dir=dict(), surf_area=dict( - argstr='--area %s', - xor=('surf_measure', 'surf_measure_file', 'surf_area'), + argstr="--area %s", xor=("surf_measure", "surf_measure_file", "surf_area"), ), - surf_dir=dict(argstr='--surfdir %s', ), + surf_dir=dict(argstr="--surfdir %s",), surf_measure=dict( - argstr='--meas %s', - xor=('surf_measure', 'surf_measure_file', 'surf_area'), + argstr="--meas %s", xor=("surf_measure", "surf_measure_file", "surf_area"), ), surf_measure_file=dict( - argstr='--meas %s', + argstr="--meas %s", extensions=None, - xor=('surf_measure', 'surf_measure_file', 'surf_area'), + xor=("surf_measure", "surf_measure_file", "surf_area"), ), surfreg_files=dict( - argstr='--surfreg %s', - requires=['lh_surfreg_target', 'rh_surfreg_target'], - ), - target=dict( - argstr='--target %s', - mandatory=True, + argstr="--surfreg %s", requires=["lh_surfreg_target", "rh_surfreg_target"], ), - vol_measure_file=dict(argstr='--iv %s %s...', ), + target=dict(argstr="--target %s", mandatory=True,), + vol_measure_file=dict(argstr="--iv %s %s...",), ) inputs = MRISPreprocReconAll.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRISPreprocReconAll_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MRISPreprocReconAll.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py index 464eddb043..8bba694bf7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py @@ -4,39 +4,24 @@ def test_MRITessellate_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - label_value=dict( - argstr='%d', - mandatory=True, - position=-2, - ), - out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + label_value=dict(argstr="%d", mandatory=True, position=-2,), + out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), subjects_dir=dict(), - tesselate_all_voxels=dict(argstr='-a', ), - use_real_RAS_coordinates=dict(argstr='-n', ), + tesselate_all_voxels=dict(argstr="-a",), + use_real_RAS_coordinates=dict(argstr="-n",), ) inputs = MRITessellate.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRITessellate_outputs(): - output_map = dict(surface=dict(extensions=None, ), ) + output_map = dict(surface=dict(extensions=None,),) outputs = MRITessellate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py index 0bac91b343..560f7e4fce 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py @@ -4,75 +4,40 @@ def test_MRIsCALabel_inputs(): input_map = dict( - args=dict(argstr='%s', ), - aseg=dict( - argstr='-aseg %s', - extensions=None, - ), - canonsurf=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - classifier=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + aseg=dict(argstr="-aseg %s", extensions=None,), + canonsurf=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + classifier=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), copy_inputs=dict(), - curv=dict( - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hemisphere=dict( - argstr='%s', - mandatory=True, - position=-4, - ), - label=dict( - argstr='-l %s', - extensions=None, - ), + curv=dict(extensions=None, mandatory=True,), + environ=dict(nohash=True, usedefault=True,), + hemisphere=dict(argstr="%s", mandatory=True, position=-4,), + label=dict(argstr="-l %s", extensions=None,), num_threads=dict(), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, keep_extension=True, - name_source=['hemisphere'], - name_template='%s.aparc.annot', + name_source=["hemisphere"], + name_template="%s.aparc.annot", position=-1, ), - seed=dict(argstr='-seed %d', ), - smoothwm=dict( - extensions=None, - mandatory=True, - ), - subject_id=dict( - argstr='%s', - mandatory=True, - position=-5, - usedefault=True, - ), + seed=dict(argstr="-seed %d",), + smoothwm=dict(extensions=None, mandatory=True,), + subject_id=dict(argstr="%s", mandatory=True, position=-5, usedefault=True,), subjects_dir=dict(), - sulc=dict( - extensions=None, - mandatory=True, - ), + sulc=dict(extensions=None, mandatory=True,), ) inputs = MRIsCALabel.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRIsCALabel_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MRIsCALabel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py index be72b4814f..521c1d5d6c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py @@ -4,43 +4,16 @@ def test_MRIsCalc_inputs(): input_map = dict( - action=dict( - argstr='%s', - mandatory=True, - position=-2, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file1=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), + action=dict(argstr="%s", mandatory=True, position=-2,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file1=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), in_file2=dict( - argstr='%s', - extensions=None, - position=-1, - xor=['in_float', 'in_int'], - ), - in_float=dict( - argstr='%f', - position=-1, - xor=['in_file2', 'in_int'], - ), - in_int=dict( - argstr='%d', - position=-1, - xor=['in_file2', 'in_float'], - ), - out_file=dict( - argstr='-o %s', - extensions=None, - mandatory=True, + argstr="%s", extensions=None, position=-1, xor=["in_float", "in_int"], ), + in_float=dict(argstr="%f", position=-1, xor=["in_file2", "in_int"],), + in_int=dict(argstr="%d", position=-1, xor=["in_file2", "in_float"],), + out_file=dict(argstr="-o %s", extensions=None, mandatory=True,), subjects_dir=dict(), ) inputs = MRIsCalc.input_spec() @@ -48,8 +21,10 @@ def test_MRIsCalc_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRIsCalc_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MRIsCalc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py index 9dd2993621..56fd270efc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py @@ -4,22 +4,11 @@ def test_MRIsCombine_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr='--combinesurfs %s', - mandatory=True, - position=1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_files=dict(argstr="--combinesurfs %s", mandatory=True, position=1,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - mandatory=True, - position=-1, + argstr="%s", extensions=None, genfile=True, mandatory=True, position=-1, ), subjects_dir=dict(), ) @@ -28,8 +17,10 @@ def test_MRIsCombine_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRIsCombine_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MRIsCombine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py index 4c84120d7e..6972ae4f33 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py @@ -4,73 +4,46 @@ def test_MRIsConvert_inputs(): input_map = dict( - annot_file=dict( - argstr='--annot %s', - extensions=None, - ), - args=dict(argstr='%s', ), - dataarray_num=dict(argstr='--da_num %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - functional_file=dict( - argstr='-f %s', - extensions=None, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - label_file=dict( - argstr='--label %s', - extensions=None, - ), - labelstats_outfile=dict( - argstr='--labelstats %s', - extensions=None, - ), - normal=dict(argstr='-n', ), - origname=dict(argstr='-o %s', ), - out_datatype=dict( - mandatory=True, - xor=['out_file'], - ), + annot_file=dict(argstr="--annot %s", extensions=None,), + args=dict(argstr="%s",), + dataarray_num=dict(argstr="--da_num %d",), + environ=dict(nohash=True, usedefault=True,), + functional_file=dict(argstr="-f %s", extensions=None,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + label_file=dict(argstr="--label %s", extensions=None,), + labelstats_outfile=dict(argstr="--labelstats %s", extensions=None,), + normal=dict(argstr="-n",), + origname=dict(argstr="-o %s",), + out_datatype=dict(mandatory=True, xor=["out_file"],), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, mandatory=True, position=-1, - xor=['out_datatype'], - ), - parcstats_file=dict( - argstr='--parcstats %s', - extensions=None, - ), - patch=dict(argstr='-p', ), - rescale=dict(argstr='-r', ), - scalarcurv_file=dict( - argstr='-c %s', - extensions=None, + xor=["out_datatype"], ), - scale=dict(argstr='-s %.3f', ), + parcstats_file=dict(argstr="--parcstats %s", extensions=None,), + patch=dict(argstr="-p",), + rescale=dict(argstr="-r",), + scalarcurv_file=dict(argstr="-c %s", extensions=None,), + scale=dict(argstr="-s %.3f",), subjects_dir=dict(), - talairachxfm_subjid=dict(argstr='-t %s', ), - to_scanner=dict(argstr='--to-scanner', ), - to_tkr=dict(argstr='--to-tkr', ), - vertex=dict(argstr='-v', ), - xyz_ascii=dict(argstr='-a', ), + talairachxfm_subjid=dict(argstr="-t %s",), + to_scanner=dict(argstr="--to-scanner",), + to_tkr=dict(argstr="--to-tkr",), + vertex=dict(argstr="-v",), + xyz_ascii=dict(argstr="-a",), ) inputs = MRIsConvert.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRIsConvert_outputs(): - output_map = dict(converted=dict(extensions=None, ), ) + output_map = dict(converted=dict(extensions=None,),) outputs = MRIsConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py index 0b0297bcdd..b2d97f0d48 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py @@ -4,55 +4,33 @@ def test_MRIsExpand_inputs(): input_map = dict( - args=dict(argstr='%s', ), - distance=dict( - argstr='%g', - mandatory=True, - position=-2, - ), - dt=dict(argstr='-T %g', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + distance=dict(argstr="%g", mandatory=True, position=-2,), + dt=dict(argstr="-T %g",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=-3, - ), - nsurfaces=dict(argstr='-N %d', ), - out_name=dict( - argstr='%s', - position=-1, - usedefault=True, - ), - pial=dict( - argstr='-pial %s', - copyfile=False, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-3, ), - smooth_averages=dict(argstr='-A %d', ), - sphere=dict( - copyfile=False, - usedefault=True, - ), - spring=dict(argstr='-S %g', ), + nsurfaces=dict(argstr="-N %d",), + out_name=dict(argstr="%s", position=-1, usedefault=True,), + pial=dict(argstr="-pial %s", copyfile=False,), + smooth_averages=dict(argstr="-A %d",), + sphere=dict(copyfile=False, usedefault=True,), + spring=dict(argstr="-S %g",), subjects_dir=dict(), - thickness=dict(argstr='-thickness', ), - thickness_name=dict( - argstr='-thickness_name %s', - copyfile=False, - ), - write_iterations=dict(argstr='-W %d', ), + thickness=dict(argstr="-thickness",), + thickness_name=dict(argstr="-thickness_name %s", copyfile=False,), + write_iterations=dict(argstr="-W %d",), ) inputs = MRIsExpand.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRIsExpand_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MRIsExpand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py index 993d578485..aead890eff 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py @@ -4,35 +4,22 @@ def test_MRIsInflate_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=True, - extensions=None, - mandatory=True, - position=-2, - ), - no_save_sulc=dict( - argstr='-no-save-sulc', - xor=['out_sulc'], + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2, ), + no_save_sulc=dict(argstr="-no-save-sulc", xor=["out_sulc"],), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, keep_extension=True, - name_source=['in_file'], - name_template='%s.inflated', + name_source=["in_file"], + name_template="%s.inflated", position=-1, ), - out_sulc=dict( - extensions=None, - xor=['no_save_sulc'], - ), + out_sulc=dict(extensions=None, xor=["no_save_sulc"],), subjects_dir=dict(), ) inputs = MRIsInflate.input_spec() @@ -40,11 +27,10 @@ def test_MRIsInflate_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRIsInflate_outputs(): - output_map = dict( - out_file=dict(extensions=None, ), - out_sulc=dict(extensions=None, ), - ) + output_map = dict(out_file=dict(extensions=None,), out_sulc=dict(extensions=None,),) outputs = MRIsInflate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py index eb1fbe4043..47575cf851 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py @@ -4,54 +4,29 @@ def test_MS_LDA_inputs(): input_map = dict( - args=dict(argstr='%s', ), - conform=dict(argstr='-conform', ), - environ=dict( - nohash=True, - usedefault=True, - ), - images=dict( - argstr='%s', - copyfile=False, - mandatory=True, - position=-1, - ), - label_file=dict( - argstr='-label %s', - extensions=None, - ), - lda_labels=dict( - argstr='-lda %s', - mandatory=True, - sep=' ', - ), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), - shift=dict(argstr='-shift %d', ), + args=dict(argstr="%s",), + conform=dict(argstr="-conform",), + environ=dict(nohash=True, usedefault=True,), + images=dict(argstr="%s", copyfile=False, mandatory=True, position=-1,), + label_file=dict(argstr="-label %s", extensions=None,), + lda_labels=dict(argstr="-lda %s", mandatory=True, sep=" ",), + mask_file=dict(argstr="-mask %s", extensions=None,), + shift=dict(argstr="-shift %d",), subjects_dir=dict(), - use_weights=dict(argstr='-W', ), - vol_synth_file=dict( - argstr='-synth %s', - extensions=None, - mandatory=True, - ), - weight_file=dict( - argstr='-weight %s', - extensions=None, - mandatory=True, - ), + use_weights=dict(argstr="-W",), + vol_synth_file=dict(argstr="-synth %s", extensions=None, mandatory=True,), + weight_file=dict(argstr="-weight %s", extensions=None, mandatory=True,), ) inputs = MS_LDA.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MS_LDA_outputs(): output_map = dict( - vol_synth_file=dict(extensions=None, ), - weight_file=dict(extensions=None, ), + vol_synth_file=dict(extensions=None,), weight_file=dict(extensions=None,), ) outputs = MS_LDA.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py index 48002e6051..3f7b6ac9ab 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py @@ -4,30 +4,21 @@ def test_MakeAverageSubject_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - out_name=dict( - argstr='--out %s', - extensions=None, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + out_name=dict(argstr="--out %s", extensions=None, usedefault=True,), subjects_dir=dict(), - subjects_ids=dict( - argstr='--subjects %s', - mandatory=True, - sep=' ', - ), + subjects_ids=dict(argstr="--subjects %s", mandatory=True, sep=" ",), ) inputs = MakeAverageSubject.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MakeAverageSubject_outputs(): - output_map = dict(average_subject_name=dict(), ) + output_map = dict(average_subject_name=dict(),) outputs = MakeAverageSubject.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py index 9cf56f1222..219150aef9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py @@ -4,84 +4,45 @@ def test_MakeSurfaces_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), copy_inputs=dict(), - environ=dict( - nohash=True, - usedefault=True, - ), - fix_mtl=dict(argstr='-fix_mtl', ), - hemisphere=dict( - argstr='%s', - mandatory=True, - position=-1, - ), - in_T1=dict( - argstr='-T1 %s', - extensions=None, - ), - in_aseg=dict( - argstr='-aseg %s', - extensions=None, - ), - in_filled=dict( - extensions=None, - mandatory=True, - ), - in_label=dict( - extensions=None, - xor=['noaparc'], - ), - in_orig=dict( - argstr='-orig %s', - extensions=None, - mandatory=True, - ), - in_white=dict(extensions=None, ), - in_wm=dict( - extensions=None, - mandatory=True, - ), - longitudinal=dict(argstr='-long', ), - maximum=dict(argstr='-max %.1f', ), - mgz=dict(argstr='-mgz', ), - no_white=dict(argstr='-nowhite', ), - noaparc=dict( - argstr='-noaparc', - xor=['in_label'], - ), - orig_pial=dict( - argstr='-orig_pial %s', - extensions=None, - requires=['in_label'], - ), - orig_white=dict( - argstr='-orig_white %s', - extensions=None, - ), - subject_id=dict( - argstr='%s', - mandatory=True, - position=-2, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True,), + fix_mtl=dict(argstr="-fix_mtl",), + hemisphere=dict(argstr="%s", mandatory=True, position=-1,), + in_T1=dict(argstr="-T1 %s", extensions=None,), + in_aseg=dict(argstr="-aseg %s", extensions=None,), + in_filled=dict(extensions=None, mandatory=True,), + in_label=dict(extensions=None, xor=["noaparc"],), + in_orig=dict(argstr="-orig %s", extensions=None, mandatory=True,), + in_white=dict(extensions=None,), + in_wm=dict(extensions=None, mandatory=True,), + longitudinal=dict(argstr="-long",), + maximum=dict(argstr="-max %.1f",), + mgz=dict(argstr="-mgz",), + no_white=dict(argstr="-nowhite",), + noaparc=dict(argstr="-noaparc", xor=["in_label"],), + orig_pial=dict(argstr="-orig_pial %s", extensions=None, requires=["in_label"],), + orig_white=dict(argstr="-orig_white %s", extensions=None,), + subject_id=dict(argstr="%s", mandatory=True, position=-2, usedefault=True,), subjects_dir=dict(), - white=dict(argstr='-white %s', ), - white_only=dict(argstr='-whiteonly', ), + white=dict(argstr="-white %s",), + white_only=dict(argstr="-whiteonly",), ) inputs = MakeSurfaces.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MakeSurfaces_outputs(): output_map = dict( - out_area=dict(extensions=None, ), - out_cortex=dict(extensions=None, ), - out_curv=dict(extensions=None, ), - out_pial=dict(extensions=None, ), - out_thickness=dict(extensions=None, ), - out_white=dict(extensions=None, ), + out_area=dict(extensions=None,), + out_cortex=dict(extensions=None,), + out_curv=dict(extensions=None,), + out_pial=dict(extensions=None,), + out_thickness=dict(extensions=None,), + out_white=dict(extensions=None,), ) outputs = MakeSurfaces.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py index 905a8929cd..f639141960 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py @@ -4,45 +4,33 @@ def test_Normalize_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - gradient=dict(argstr='-g %d', ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + gradient=dict(argstr="-g %d",), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + mask=dict(argstr="-mask %s", extensions=None,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, keep_extension=True, - name_source=['in_file'], - name_template='%s_norm', + name_source=["in_file"], + name_template="%s_norm", position=-1, ), - segmentation=dict( - argstr='-aseg %s', - extensions=None, - ), + segmentation=dict(argstr="-aseg %s", extensions=None,), subjects_dir=dict(), - transform=dict(extensions=None, ), + transform=dict(extensions=None,), ) inputs = Normalize.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Normalize_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Normalize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py index 712b4cfaac..da476e1cb3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py @@ -4,128 +4,72 @@ def test_OneSampleTTest_inputs(): input_map = dict( - allow_ill_cond=dict(argstr='--illcond', ), - allow_repeated_subjects=dict(argstr='--allowsubjrep', ), - args=dict(argstr='%s', ), - calc_AR1=dict(argstr='--tar1', ), - check_opts=dict(argstr='--checkopts', ), - compute_log_y=dict(argstr='--logy', ), - contrast=dict(argstr='--C %s...', ), - cortex=dict( - argstr='--cortex', - xor=['label_file'], - ), - debug=dict(argstr='--debug', ), + allow_ill_cond=dict(argstr="--illcond",), + allow_repeated_subjects=dict(argstr="--allowsubjrep",), + args=dict(argstr="%s",), + calc_AR1=dict(argstr="--tar1",), + check_opts=dict(argstr="--checkopts",), + compute_log_y=dict(argstr="--logy",), + contrast=dict(argstr="--C %s...",), + cortex=dict(argstr="--cortex", xor=["label_file"],), + debug=dict(argstr="--debug",), design=dict( - argstr='--X %s', - extensions=None, - xor=('fsgd', 'design', 'one_sample'), - ), - diag=dict(argstr='--diag %d', ), - diag_cluster=dict(argstr='--diag-cluster', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_fx_dof=dict( - argstr='--ffxdof %d', - xor=['fixed_fx_dof_file'], + argstr="--X %s", extensions=None, xor=("fsgd", "design", "one_sample"), ), + diag=dict(argstr="--diag %d",), + diag_cluster=dict(argstr="--diag-cluster",), + environ=dict(nohash=True, usedefault=True,), + fixed_fx_dof=dict(argstr="--ffxdof %d", xor=["fixed_fx_dof_file"],), fixed_fx_dof_file=dict( - argstr='--ffxdofdat %d', - extensions=None, - xor=['fixed_fx_dof'], - ), - fixed_fx_var=dict( - argstr='--yffxvar %s', - extensions=None, - ), - force_perm=dict(argstr='--perm-force', ), - fsgd=dict( - argstr='--fsgd %s %s', - xor=('fsgd', 'design', 'one_sample'), - ), - fwhm=dict(argstr='--fwhm %f', ), - glm_dir=dict( - argstr='--glmdir %s', - genfile=True, + argstr="--ffxdofdat %d", extensions=None, xor=["fixed_fx_dof"], ), + fixed_fx_var=dict(argstr="--yffxvar %s", extensions=None,), + force_perm=dict(argstr="--perm-force",), + fsgd=dict(argstr="--fsgd %s %s", xor=("fsgd", "design", "one_sample"),), + fwhm=dict(argstr="--fwhm %f",), + glm_dir=dict(argstr="--glmdir %s", genfile=True,), hemi=dict(), - in_file=dict( - argstr='--y %s', - copyfile=False, - extensions=None, - mandatory=True, - ), - invert_mask=dict(argstr='--mask-inv', ), - label_file=dict( - argstr='--label %s', - extensions=None, - xor=['cortex'], - ), - mask_file=dict( - argstr='--mask %s', - extensions=None, - ), - no_contrast_ok=dict(argstr='--no-contrasts-ok', ), - no_est_fwhm=dict(argstr='--no-est-fwhm', ), - no_mask_smooth=dict(argstr='--no-mask-smooth', ), - no_prune=dict( - argstr='--no-prune', - xor=['prunethresh'], - ), + in_file=dict(argstr="--y %s", copyfile=False, extensions=None, mandatory=True,), + invert_mask=dict(argstr="--mask-inv",), + label_file=dict(argstr="--label %s", extensions=None, xor=["cortex"],), + mask_file=dict(argstr="--mask %s", extensions=None,), + no_contrast_ok=dict(argstr="--no-contrasts-ok",), + no_est_fwhm=dict(argstr="--no-est-fwhm",), + no_mask_smooth=dict(argstr="--no-mask-smooth",), + no_prune=dict(argstr="--no-prune", xor=["prunethresh"],), one_sample=dict( - argstr='--osgm', - xor=('one_sample', 'fsgd', 'design', 'contrast'), - ), - pca=dict(argstr='--pca', ), - per_voxel_reg=dict(argstr='--pvr %s...', ), - profile=dict(argstr='--profile %d', ), - prune=dict(argstr='--prune', ), - prune_thresh=dict( - argstr='--prune_thr %f', - xor=['noprune'], - ), - resynth_test=dict(argstr='--resynthtest %d', ), - save_cond=dict(argstr='--save-cond', ), - save_estimate=dict(argstr='--yhat-save', ), - save_res_corr_mtx=dict(argstr='--eres-scm', ), - save_residual=dict(argstr='--eres-save', ), - seed=dict(argstr='--seed %d', ), - self_reg=dict(argstr='--selfreg %d %d %d', ), - sim_done_file=dict( - argstr='--sim-done %s', - extensions=None, - ), - sim_sign=dict(argstr='--sim-sign %s', ), - simulation=dict(argstr='--sim %s %d %f %s', ), + argstr="--osgm", xor=("one_sample", "fsgd", "design", "contrast"), + ), + pca=dict(argstr="--pca",), + per_voxel_reg=dict(argstr="--pvr %s...",), + profile=dict(argstr="--profile %d",), + prune=dict(argstr="--prune",), + prune_thresh=dict(argstr="--prune_thr %f", xor=["noprune"],), + resynth_test=dict(argstr="--resynthtest %d",), + save_cond=dict(argstr="--save-cond",), + save_estimate=dict(argstr="--yhat-save",), + save_res_corr_mtx=dict(argstr="--eres-scm",), + save_residual=dict(argstr="--eres-save",), + seed=dict(argstr="--seed %d",), + self_reg=dict(argstr="--selfreg %d %d %d",), + sim_done_file=dict(argstr="--sim-done %s", extensions=None,), + sim_sign=dict(argstr="--sim-sign %s",), + simulation=dict(argstr="--sim %s %d %f %s",), subject_id=dict(), subjects_dir=dict(), - surf=dict( - argstr='--surf %s %s %s', - requires=['subject_id', 'hemi'], - ), - surf_geo=dict(usedefault=True, ), - synth=dict(argstr='--synth', ), - uniform=dict(argstr='--uniform %f %f', ), - var_fwhm=dict(argstr='--var-fwhm %f', ), - vox_dump=dict(argstr='--voxdump %d %d %d', ), - weight_file=dict( - extensions=None, - xor=['weighted_ls'], - ), - weight_inv=dict( - argstr='--w-inv', - xor=['weighted_ls'], - ), - weight_sqrt=dict( - argstr='--w-sqrt', - xor=['weighted_ls'], - ), + surf=dict(argstr="--surf %s %s %s", requires=["subject_id", "hemi"],), + surf_geo=dict(usedefault=True,), + synth=dict(argstr="--synth",), + uniform=dict(argstr="--uniform %f %f",), + var_fwhm=dict(argstr="--var-fwhm %f",), + vox_dump=dict(argstr="--voxdump %d %d %d",), + weight_file=dict(extensions=None, xor=["weighted_ls"],), + weight_inv=dict(argstr="--w-inv", xor=["weighted_ls"],), + weight_sqrt=dict(argstr="--w-sqrt", xor=["weighted_ls"],), weighted_ls=dict( - argstr='--wls %s', + argstr="--wls %s", extensions=None, - xor=('weight_file', 'weight_inv', 'weight_sqrt'), + xor=("weight_file", "weight_inv", "weight_sqrt"), ), ) inputs = OneSampleTTest.input_spec() @@ -133,25 +77,27 @@ def test_OneSampleTTest_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_OneSampleTTest_outputs(): output_map = dict( - beta_file=dict(extensions=None, ), - dof_file=dict(extensions=None, ), - error_file=dict(extensions=None, ), - error_stddev_file=dict(extensions=None, ), - error_var_file=dict(extensions=None, ), - estimate_file=dict(extensions=None, ), - frame_eigenvectors=dict(extensions=None, ), + beta_file=dict(extensions=None,), + dof_file=dict(extensions=None,), + error_file=dict(extensions=None,), + error_stddev_file=dict(extensions=None,), + error_var_file=dict(extensions=None,), + estimate_file=dict(extensions=None,), + frame_eigenvectors=dict(extensions=None,), ftest_file=dict(), - fwhm_file=dict(extensions=None, ), + fwhm_file=dict(extensions=None,), gamma_file=dict(), gamma_var_file=dict(), glm_dir=dict(), - mask_file=dict(extensions=None, ), + mask_file=dict(extensions=None,), sig_file=dict(), - singular_values=dict(extensions=None, ), - spatial_eigenvectors=dict(extensions=None, ), - svd_stats_file=dict(extensions=None, ), + singular_values=dict(extensions=None,), + spatial_eigenvectors=dict(extensions=None,), + svd_stats_file=dict(extensions=None,), ) outputs = OneSampleTTest.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Paint.py b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py index 33c76bbe12..0a93abec78 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Paint.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py @@ -4,34 +4,21 @@ def test_Paint_inputs(): input_map = dict( - args=dict(argstr='%s', ), - averages=dict(argstr='-a %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_surf=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + averages=dict(argstr="-a %d",), + environ=dict(nohash=True, usedefault=True,), + in_surf=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, keep_extension=False, - name_source=['in_surf'], - name_template='%s.avg_curv', + name_source=["in_surf"], + name_template="%s.avg_curv", position=-1, ), subjects_dir=dict(), - template=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), + template=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), template_param=dict(), ) inputs = Paint.input_spec() @@ -39,8 +26,10 @@ def test_Paint_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Paint_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Paint.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py index 85a8747bd0..cf42abe8b6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py @@ -4,111 +4,49 @@ def test_ParcellationStats_inputs(): input_map = dict( - args=dict(argstr='%s', ), - aseg=dict( - extensions=None, - mandatory=True, - ), - brainmask=dict( - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + aseg=dict(extensions=None, mandatory=True,), + brainmask=dict(extensions=None, mandatory=True,), copy_inputs=dict(), - cortex_label=dict(extensions=None, ), - environ=dict( - nohash=True, - usedefault=True, - ), - hemisphere=dict( - argstr='%s', - mandatory=True, - position=-2, - ), - in_annotation=dict( - argstr='-a %s', - extensions=None, - xor=['in_label'], - ), - in_cortex=dict( - argstr='-cortex %s', - extensions=None, - ), + cortex_label=dict(extensions=None,), + environ=dict(nohash=True, usedefault=True,), + hemisphere=dict(argstr="%s", mandatory=True, position=-2,), + in_annotation=dict(argstr="-a %s", extensions=None, xor=["in_label"],), + in_cortex=dict(argstr="-cortex %s", extensions=None,), in_label=dict( - argstr='-l %s', - extensions=None, - xor=['in_annotatoin', 'out_color'], - ), - lh_pial=dict( - extensions=None, - mandatory=True, - ), - lh_white=dict( - extensions=None, - mandatory=True, + argstr="-l %s", extensions=None, xor=["in_annotatoin", "out_color"], ), - mgz=dict(argstr='-mgz', ), + lh_pial=dict(extensions=None, mandatory=True,), + lh_white=dict(extensions=None, mandatory=True,), + mgz=dict(argstr="-mgz",), out_color=dict( - argstr='-c %s', - extensions=None, - genfile=True, - xor=['in_label'], + argstr="-c %s", extensions=None, genfile=True, xor=["in_label"], ), out_table=dict( - argstr='-f %s', - extensions=None, - genfile=True, - requires=['tabular_output'], - ), - rh_pial=dict( - extensions=None, - mandatory=True, - ), - rh_white=dict( - extensions=None, - mandatory=True, - ), - ribbon=dict( - extensions=None, - mandatory=True, - ), - subject_id=dict( - argstr='%s', - mandatory=True, - position=-3, - usedefault=True, + argstr="-f %s", extensions=None, genfile=True, requires=["tabular_output"], ), + rh_pial=dict(extensions=None, mandatory=True,), + rh_white=dict(extensions=None, mandatory=True,), + ribbon=dict(extensions=None, mandatory=True,), + subject_id=dict(argstr="%s", mandatory=True, position=-3, usedefault=True,), subjects_dir=dict(), - surface=dict( - argstr='%s', - position=-1, - ), - tabular_output=dict(argstr='-b', ), - th3=dict( - argstr='-th3', - requires=['cortex_label'], - ), - thickness=dict( - extensions=None, - mandatory=True, - ), - transform=dict( - extensions=None, - mandatory=True, - ), - wm=dict( - extensions=None, - mandatory=True, - ), + surface=dict(argstr="%s", position=-1,), + tabular_output=dict(argstr="-b",), + th3=dict(argstr="-th3", requires=["cortex_label"],), + thickness=dict(extensions=None, mandatory=True,), + transform=dict(extensions=None, mandatory=True,), + wm=dict(extensions=None, mandatory=True,), ) inputs = ParcellationStats.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ParcellationStats_outputs(): output_map = dict( - out_color=dict(extensions=None, ), - out_table=dict(extensions=None, ), + out_color=dict(extensions=None,), out_table=dict(extensions=None,), ) outputs = ParcellationStats.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py index 7fed3ad587..243e769266 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py @@ -4,31 +4,23 @@ def test_ParseDICOMDir_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dicom_dir=dict( - argstr='--d %s', - mandatory=True, - ), - dicom_info_file=dict( - argstr='--o %s', - extensions=None, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - sortbyrun=dict(argstr='--sortbyrun', ), + args=dict(argstr="%s",), + dicom_dir=dict(argstr="--d %s", mandatory=True,), + dicom_info_file=dict(argstr="--o %s", extensions=None, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + sortbyrun=dict(argstr="--sortbyrun",), subjects_dir=dict(), - summarize=dict(argstr='--summarize', ), + summarize=dict(argstr="--summarize",), ) inputs = ParseDICOMDir.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ParseDICOMDir_outputs(): - output_map = dict(dicom_info_file=dict(extensions=None, ), ) + output_map = dict(dicom_info_file=dict(extensions=None,),) outputs = ParseDICOMDir.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py index 2d4365e51e..29a9f0006b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py @@ -4,221 +4,107 @@ def test_ReconAll_inputs(): input_map = dict( - FLAIR_file=dict( - argstr='-FLAIR %s', - extensions=None, - min_ver='5.3.0', - ), - T1_files=dict(argstr='-i %s...', ), - T2_file=dict( - argstr='-T2 %s', - extensions=None, - min_ver='5.3.0', - ), - args=dict(argstr='%s', ), - big_ventricles=dict(argstr='-bigventricles', ), - brainstem=dict(argstr='-brainstem-structures', ), - directive=dict( - argstr='-%s', - position=0, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - expert=dict( - argstr='-expert %s', - extensions=None, - ), - flags=dict(argstr='%s', ), - hemi=dict(argstr='-hemi %s', ), + FLAIR_file=dict(argstr="-FLAIR %s", extensions=None, min_ver="5.3.0",), + T1_files=dict(argstr="-i %s...",), + T2_file=dict(argstr="-T2 %s", extensions=None, min_ver="5.3.0",), + args=dict(argstr="%s",), + big_ventricles=dict(argstr="-bigventricles",), + brainstem=dict(argstr="-brainstem-structures",), + directive=dict(argstr="-%s", position=0, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + expert=dict(argstr="-expert %s", extensions=None,), + flags=dict(argstr="%s",), + hemi=dict(argstr="-hemi %s",), hippocampal_subfields_T1=dict( - argstr='-hippocampal-subfields-T1', - min_ver='6.0.0', + argstr="-hippocampal-subfields-T1", min_ver="6.0.0", ), hippocampal_subfields_T2=dict( - argstr='-hippocampal-subfields-T2 %s %s', - min_ver='6.0.0', - ), - hires=dict( - argstr='-hires', - min_ver='6.0.0', - ), - mprage=dict(argstr='-mprage', ), - mri_aparc2aseg=dict(xor=['expert'], ), - mri_ca_label=dict(xor=['expert'], ), - mri_ca_normalize=dict(xor=['expert'], ), - mri_ca_register=dict(xor=['expert'], ), - mri_edit_wm_with_aseg=dict(xor=['expert'], ), - mri_em_register=dict(xor=['expert'], ), - mri_fill=dict(xor=['expert'], ), - mri_mask=dict(xor=['expert'], ), - mri_normalize=dict(xor=['expert'], ), - mri_pretess=dict(xor=['expert'], ), - mri_remove_neck=dict(xor=['expert'], ), - mri_segment=dict(xor=['expert'], ), - mri_segstats=dict(xor=['expert'], ), - mri_tessellate=dict(xor=['expert'], ), - mri_watershed=dict(xor=['expert'], ), - mris_anatomical_stats=dict(xor=['expert'], ), - mris_ca_label=dict(xor=['expert'], ), - mris_fix_topology=dict(xor=['expert'], ), - mris_inflate=dict(xor=['expert'], ), - mris_make_surfaces=dict(xor=['expert'], ), - mris_register=dict(xor=['expert'], ), - mris_smooth=dict(xor=['expert'], ), - mris_sphere=dict(xor=['expert'], ), - mris_surf2vol=dict(xor=['expert'], ), - mrisp_paint=dict(xor=['expert'], ), - openmp=dict(argstr='-openmp %d', ), - parallel=dict(argstr='-parallel', ), - subject_id=dict( - argstr='-subjid %s', - usedefault=True, - ), - subjects_dir=dict( - argstr='-sd %s', - genfile=True, - hash_files=False, - ), - talairach=dict(xor=['expert'], ), - use_FLAIR=dict( - argstr='-FLAIRpial', - min_ver='5.3.0', - xor=['use_T2'], - ), - use_T2=dict( - argstr='-T2pial', - min_ver='5.3.0', - xor=['use_FLAIR'], - ), - xopts=dict(argstr='-xopts-%s', ), + argstr="-hippocampal-subfields-T2 %s %s", min_ver="6.0.0", + ), + hires=dict(argstr="-hires", min_ver="6.0.0",), + mprage=dict(argstr="-mprage",), + mri_aparc2aseg=dict(xor=["expert"],), + mri_ca_label=dict(xor=["expert"],), + mri_ca_normalize=dict(xor=["expert"],), + mri_ca_register=dict(xor=["expert"],), + mri_edit_wm_with_aseg=dict(xor=["expert"],), + mri_em_register=dict(xor=["expert"],), + mri_fill=dict(xor=["expert"],), + mri_mask=dict(xor=["expert"],), + mri_normalize=dict(xor=["expert"],), + mri_pretess=dict(xor=["expert"],), + mri_remove_neck=dict(xor=["expert"],), + mri_segment=dict(xor=["expert"],), + mri_segstats=dict(xor=["expert"],), + mri_tessellate=dict(xor=["expert"],), + mri_watershed=dict(xor=["expert"],), + mris_anatomical_stats=dict(xor=["expert"],), + mris_ca_label=dict(xor=["expert"],), + mris_fix_topology=dict(xor=["expert"],), + mris_inflate=dict(xor=["expert"],), + mris_make_surfaces=dict(xor=["expert"],), + mris_register=dict(xor=["expert"],), + mris_smooth=dict(xor=["expert"],), + mris_sphere=dict(xor=["expert"],), + mris_surf2vol=dict(xor=["expert"],), + mrisp_paint=dict(xor=["expert"],), + openmp=dict(argstr="-openmp %d",), + parallel=dict(argstr="-parallel",), + subject_id=dict(argstr="-subjid %s", usedefault=True,), + subjects_dir=dict(argstr="-sd %s", genfile=True, hash_files=False,), + talairach=dict(xor=["expert"],), + use_FLAIR=dict(argstr="-FLAIRpial", min_ver="5.3.0", xor=["use_T2"],), + use_T2=dict(argstr="-T2pial", min_ver="5.3.0", xor=["use_FLAIR"],), + xopts=dict(argstr="-xopts-%s",), ) inputs = ReconAll.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ReconAll_outputs(): output_map = dict( - BA_stats=dict( - altkey='BA', - loc='stats', - ), - T1=dict( - extensions=None, - loc='mri', - ), - annot=dict( - altkey='*annot', - loc='label', - ), - aparc_a2009s_stats=dict( - altkey='aparc.a2009s', - loc='stats', - ), - aparc_aseg=dict( - altkey='aparc*aseg', - loc='mri', - ), - aparc_stats=dict( - altkey='aparc', - loc='stats', - ), - area_pial=dict( - altkey='area.pial', - loc='surf', - ), - aseg=dict( - extensions=None, - loc='mri', - ), - aseg_stats=dict( - altkey='aseg', - loc='stats', - ), - avg_curv=dict(loc='surf', ), - brain=dict( - extensions=None, - loc='mri', - ), - brainmask=dict( - extensions=None, - loc='mri', - ), - curv=dict(loc='surf', ), - curv_pial=dict( - altkey='curv.pial', - loc='surf', - ), - curv_stats=dict( - altkey='curv', - loc='stats', - ), - entorhinal_exvivo_stats=dict( - altkey='entorhinal_exvivo', - loc='stats', - ), - filled=dict( - extensions=None, - loc='mri', - ), - graymid=dict( - altkey=['graymid', 'midthickness'], - loc='surf', - ), - inflated=dict(loc='surf', ), - jacobian_white=dict(loc='surf', ), - label=dict( - altkey='*label', - loc='label', - ), - norm=dict( - extensions=None, - loc='mri', - ), - nu=dict( - extensions=None, - loc='mri', - ), - orig=dict( - extensions=None, - loc='mri', - ), - pial=dict(loc='surf', ), - rawavg=dict( - extensions=None, - loc='mri', - ), - ribbon=dict( - altkey='*ribbon', - loc='mri', - ), - smoothwm=dict(loc='surf', ), - sphere=dict(loc='surf', ), - sphere_reg=dict( - altkey='sphere.reg', - loc='surf', - ), + BA_stats=dict(altkey="BA", loc="stats",), + T1=dict(extensions=None, loc="mri",), + annot=dict(altkey="*annot", loc="label",), + aparc_a2009s_stats=dict(altkey="aparc.a2009s", loc="stats",), + aparc_aseg=dict(altkey="aparc*aseg", loc="mri",), + aparc_stats=dict(altkey="aparc", loc="stats",), + area_pial=dict(altkey="area.pial", loc="surf",), + aseg=dict(extensions=None, loc="mri",), + aseg_stats=dict(altkey="aseg", loc="stats",), + avg_curv=dict(loc="surf",), + brain=dict(extensions=None, loc="mri",), + brainmask=dict(extensions=None, loc="mri",), + curv=dict(loc="surf",), + curv_pial=dict(altkey="curv.pial", loc="surf",), + curv_stats=dict(altkey="curv", loc="stats",), + entorhinal_exvivo_stats=dict(altkey="entorhinal_exvivo", loc="stats",), + filled=dict(extensions=None, loc="mri",), + graymid=dict(altkey=["graymid", "midthickness"], loc="surf",), + inflated=dict(loc="surf",), + jacobian_white=dict(loc="surf",), + label=dict(altkey="*label", loc="label",), + norm=dict(extensions=None, loc="mri",), + nu=dict(extensions=None, loc="mri",), + orig=dict(extensions=None, loc="mri",), + pial=dict(loc="surf",), + rawavg=dict(extensions=None, loc="mri",), + ribbon=dict(altkey="*ribbon", loc="mri",), + smoothwm=dict(loc="surf",), + sphere=dict(loc="surf",), + sphere_reg=dict(altkey="sphere.reg", loc="surf",), subject_id=dict(), subjects_dir=dict(), - sulc=dict(loc='surf', ), - thickness=dict(loc='surf', ), - volume=dict(loc='surf', ), - white=dict(loc='surf', ), - wm=dict( - extensions=None, - loc='mri', - ), - wmparc=dict( - extensions=None, - loc='mri', - ), - wmparc_stats=dict( - altkey='wmparc', - loc='stats', - ), + sulc=dict(loc="surf",), + thickness=dict(loc="surf",), + volume=dict(loc="surf",), + white=dict(loc="surf",), + wm=dict(extensions=None, loc="mri",), + wmparc=dict(extensions=None, loc="mri",), + wmparc_stats=dict(altkey="wmparc", loc="stats",), ) outputs = ReconAll.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Register.py b/nipype/interfaces/freesurfer/tests/test_auto_Register.py index 2d940d131d..d2add3db5b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Register.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Register.py @@ -4,52 +4,27 @@ def test_Register_inputs(): input_map = dict( - args=dict(argstr='%s', ), - curv=dict( - argstr='-curv', - requires=['in_smoothwm'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_smoothwm=dict( - copyfile=True, - extensions=None, - ), - in_sulc=dict( - copyfile=True, - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + curv=dict(argstr="-curv", requires=["in_smoothwm"],), + environ=dict(nohash=True, usedefault=True,), + in_smoothwm=dict(copyfile=True, extensions=None,), + in_sulc=dict(copyfile=True, extensions=None, mandatory=True,), in_surf=dict( - argstr='%s', - copyfile=True, - extensions=None, - mandatory=True, - position=-3, - ), - out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-3, ), + out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), subjects_dir=dict(), - target=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), + target=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), ) inputs = Register.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Register_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Register.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py index 405a9e5c4f..39a7e754bb 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py @@ -4,49 +4,25 @@ def test_RegisterAVItoTalairach_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, - ), - out_file=dict( - argstr='%s', - extensions=None, - position=3, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + out_file=dict(argstr="%s", extensions=None, position=3, usedefault=True,), subjects_dir=dict(), - target=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - vox2vox=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), + target=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + vox2vox=dict(argstr="%s", extensions=None, mandatory=True, position=2,), ) inputs = RegisterAVItoTalairach.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RegisterAVItoTalairach_outputs(): output_map = dict( - log_file=dict( - extensions=None, - usedefault=True, - ), - out_file=dict(extensions=None, ), + log_file=dict(extensions=None, usedefault=True,), + out_file=dict(extensions=None,), ) outputs = RegisterAVItoTalairach.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py index 6258e7b0b7..fd459f14a7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py @@ -4,53 +4,32 @@ def test_RelabelHypointensities_inputs(): input_map = dict( - args=dict(argstr='%s', ), - aseg=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - lh_white=dict( - copyfile=True, - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + aseg=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + environ=dict(nohash=True, usedefault=True,), + lh_white=dict(copyfile=True, extensions=None, mandatory=True,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, keep_extension=False, - name_source=['aseg'], - name_template='%s.hypos.mgz', + name_source=["aseg"], + name_template="%s.hypos.mgz", position=-1, ), - rh_white=dict( - copyfile=True, - extensions=None, - mandatory=True, - ), + rh_white=dict(copyfile=True, extensions=None, mandatory=True,), subjects_dir=dict(), - surf_directory=dict( - argstr='%s', - position=-2, - usedefault=True, - ), + surf_directory=dict(argstr="%s", position=-2, usedefault=True,), ) inputs = RelabelHypointensities.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RelabelHypointensities_outputs(): - output_map = dict(out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict(out_file=dict(argstr="%s", extensions=None,),) outputs = RelabelHypointensities.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py index 2d4a67045b..d94124a82b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py @@ -4,25 +4,18 @@ def test_RemoveIntersection_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=True, - extensions=None, - mandatory=True, - position=-2, + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2, ), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, keep_extension=True, - name_source=['in_file'], - name_template='%s', + name_source=["in_file"], + name_template="%s", position=-1, ), subjects_dir=dict(), @@ -32,8 +25,10 @@ def test_RemoveIntersection_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RemoveIntersection_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = RemoveIntersection.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py index 18dc6f9311..4050db776b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py @@ -4,48 +4,32 @@ def test_RemoveNeck_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-4, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, keep_extension=True, - name_source=['in_file'], - name_template='%s_noneck', + name_source=["in_file"], + name_template="%s_noneck", position=-1, ), - radius=dict(argstr='-radius %d', ), + radius=dict(argstr="-radius %d",), subjects_dir=dict(), - template=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - transform=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), + template=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + transform=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), ) inputs = RemoveNeck.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RemoveNeck_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = RemoveNeck.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Resample.py b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py index 7e771e10a0..0dccad303c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Resample.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py @@ -4,36 +4,24 @@ def test_Resample_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=-2,), resampled_file=dict( - argstr='-o %s', - extensions=None, - genfile=True, - position=-1, + argstr="-o %s", extensions=None, genfile=True, position=-1, ), subjects_dir=dict(), - voxel_size=dict( - argstr='-vs %.2f %.2f %.2f', - mandatory=True, - ), + voxel_size=dict(argstr="-vs %.2f %.2f %.2f", mandatory=True,), ) inputs = Resample.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Resample_outputs(): - output_map = dict(resampled_file=dict(extensions=None, ), ) + output_map = dict(resampled_file=dict(extensions=None,),) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py index 98e7de44c0..7dde230eb7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py @@ -4,85 +4,56 @@ def test_RobustRegister_inputs(): input_map = dict( - args=dict(argstr='%s', ), - auto_sens=dict( - argstr='--satit', - mandatory=True, - xor=['outlier_sens'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - est_int_scale=dict(argstr='--iscale', ), - force_double=dict(argstr='--doubleprec', ), - force_float=dict(argstr='--floattype', ), - half_source=dict(argstr='--halfmov %s', ), - half_source_xfm=dict(argstr='--halfmovlta %s', ), - half_targ=dict(argstr='--halfdst %s', ), - half_targ_xfm=dict(argstr='--halfdstlta %s', ), - half_weights=dict(argstr='--halfweights %s', ), - high_iterations=dict(argstr='--highit %d', ), - in_xfm_file=dict( - argstr='--transform', - extensions=None, - ), - init_orient=dict(argstr='--initorient', ), - iteration_thresh=dict(argstr='--epsit %.3f', ), - least_squares=dict(argstr='--leastsquares', ), - mask_source=dict( - argstr='--maskmov %s', - extensions=None, - ), - mask_target=dict( - argstr='--maskdst %s', - extensions=None, - ), - max_iterations=dict(argstr='--maxit %d', ), - no_init=dict(argstr='--noinit', ), - no_multi=dict(argstr='--nomulti', ), - out_reg_file=dict( - argstr='--lta %s', - usedefault=True, - ), - outlier_limit=dict(argstr='--wlimit %.3f', ), - outlier_sens=dict( - argstr='--sat %.4f', - mandatory=True, - xor=['auto_sens'], - ), - registered_file=dict(argstr='--warp %s', ), - source_file=dict( - argstr='--mov %s', - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + auto_sens=dict(argstr="--satit", mandatory=True, xor=["outlier_sens"],), + environ=dict(nohash=True, usedefault=True,), + est_int_scale=dict(argstr="--iscale",), + force_double=dict(argstr="--doubleprec",), + force_float=dict(argstr="--floattype",), + half_source=dict(argstr="--halfmov %s",), + half_source_xfm=dict(argstr="--halfmovlta %s",), + half_targ=dict(argstr="--halfdst %s",), + half_targ_xfm=dict(argstr="--halfdstlta %s",), + half_weights=dict(argstr="--halfweights %s",), + high_iterations=dict(argstr="--highit %d",), + in_xfm_file=dict(argstr="--transform", extensions=None,), + init_orient=dict(argstr="--initorient",), + iteration_thresh=dict(argstr="--epsit %.3f",), + least_squares=dict(argstr="--leastsquares",), + mask_source=dict(argstr="--maskmov %s", extensions=None,), + mask_target=dict(argstr="--maskdst %s", extensions=None,), + max_iterations=dict(argstr="--maxit %d",), + no_init=dict(argstr="--noinit",), + no_multi=dict(argstr="--nomulti",), + out_reg_file=dict(argstr="--lta %s", usedefault=True,), + outlier_limit=dict(argstr="--wlimit %.3f",), + outlier_sens=dict(argstr="--sat %.4f", mandatory=True, xor=["auto_sens"],), + registered_file=dict(argstr="--warp %s",), + source_file=dict(argstr="--mov %s", extensions=None, mandatory=True,), subjects_dir=dict(), - subsample_thresh=dict(argstr='--subsample %d', ), - target_file=dict( - argstr='--dst %s', - extensions=None, - mandatory=True, - ), - trans_only=dict(argstr='--transonly', ), - weights_file=dict(argstr='--weights %s', ), - write_vo2vox=dict(argstr='--vox2vox', ), + subsample_thresh=dict(argstr="--subsample %d",), + target_file=dict(argstr="--dst %s", extensions=None, mandatory=True,), + trans_only=dict(argstr="--transonly",), + weights_file=dict(argstr="--weights %s",), + write_vo2vox=dict(argstr="--vox2vox",), ) inputs = RobustRegister.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RobustRegister_outputs(): output_map = dict( - half_source=dict(extensions=None, ), - half_source_xfm=dict(extensions=None, ), - half_targ=dict(extensions=None, ), - half_targ_xfm=dict(extensions=None, ), - half_weights=dict(extensions=None, ), - out_reg_file=dict(extensions=None, ), - registered_file=dict(extensions=None, ), - weights_file=dict(extensions=None, ), + half_source=dict(extensions=None,), + half_source_xfm=dict(extensions=None,), + half_targ=dict(extensions=None,), + half_targ_xfm=dict(extensions=None,), + half_weights=dict(extensions=None,), + out_reg_file=dict(extensions=None,), + registered_file=dict(extensions=None,), + weights_file=dict(extensions=None,), ) outputs = RobustRegister.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py index d56a11c1b9..3ee33a567e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py @@ -4,52 +4,41 @@ def test_RobustTemplate_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), auto_detect_sensitivity=dict( - argstr='--satit', - mandatory=True, - xor=['outlier_sensitivity'], + argstr="--satit", mandatory=True, xor=["outlier_sensitivity"], ), - average_metric=dict(argstr='--average %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_timepoint=dict(argstr='--fixtp', ), - in_files=dict( - argstr='--mov %s', - mandatory=True, - ), - in_intensity_scales=dict(argstr='--iscalein %s', ), - initial_timepoint=dict(argstr='--inittp %d', ), - initial_transforms=dict(argstr='--ixforms %s', ), - intensity_scaling=dict(argstr='--iscale', ), - no_iteration=dict(argstr='--noit', ), + average_metric=dict(argstr="--average %d",), + environ=dict(nohash=True, usedefault=True,), + fixed_timepoint=dict(argstr="--fixtp",), + in_files=dict(argstr="--mov %s", mandatory=True,), + in_intensity_scales=dict(argstr="--iscalein %s",), + initial_timepoint=dict(argstr="--inittp %d",), + initial_transforms=dict(argstr="--ixforms %s",), + intensity_scaling=dict(argstr="--iscale",), + no_iteration=dict(argstr="--noit",), num_threads=dict(), out_file=dict( - argstr='--template %s', - extensions=None, - mandatory=True, - usedefault=True, + argstr="--template %s", extensions=None, mandatory=True, usedefault=True, ), outlier_sensitivity=dict( - argstr='--sat %.4f', - mandatory=True, - xor=['auto_detect_sensitivity'], + argstr="--sat %.4f", mandatory=True, xor=["auto_detect_sensitivity"], ), - scaled_intensity_outputs=dict(argstr='--iscaleout %s', ), + scaled_intensity_outputs=dict(argstr="--iscaleout %s",), subjects_dir=dict(), - subsample_threshold=dict(argstr='--subsample %d', ), - transform_outputs=dict(argstr='--lta %s', ), + subsample_threshold=dict(argstr="--subsample %d",), + transform_outputs=dict(argstr="--lta %s",), ) inputs = RobustTemplate.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RobustTemplate_outputs(): output_map = dict( - out_file=dict(extensions=None, ), + out_file=dict(extensions=None,), scaled_intensity_outputs=dict(), transform_outputs=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py index 44fd5885d2..67cc1fd244 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py @@ -4,116 +4,76 @@ def test_SampleToSurface_inputs(): input_map = dict( - apply_rot=dict(argstr='--rot %.3f %.3f %.3f', ), - apply_trans=dict(argstr='--trans %.3f %.3f %.3f', ), - args=dict(argstr='%s', ), - cortex_mask=dict( - argstr='--cortex', - xor=['mask_label'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fix_tk_reg=dict(argstr='--fixtkreg', ), - float2int_method=dict(argstr='--float2int %s', ), - frame=dict(argstr='--frame %d', ), - hemi=dict( - argstr='--hemi %s', - mandatory=True, - ), - hits_file=dict(argstr='--srchit %s', ), - hits_type=dict(argstr='--srchit_type', ), - ico_order=dict( - argstr='--icoorder %d', - requires=['target_subject'], - ), - interp_method=dict(argstr='--interp %s', ), - mask_label=dict( - argstr='--mask %s', - extensions=None, - xor=['cortex_mask'], - ), + apply_rot=dict(argstr="--rot %.3f %.3f %.3f",), + apply_trans=dict(argstr="--trans %.3f %.3f %.3f",), + args=dict(argstr="%s",), + cortex_mask=dict(argstr="--cortex", xor=["mask_label"],), + environ=dict(nohash=True, usedefault=True,), + fix_tk_reg=dict(argstr="--fixtkreg",), + float2int_method=dict(argstr="--float2int %s",), + frame=dict(argstr="--frame %d",), + hemi=dict(argstr="--hemi %s", mandatory=True,), + hits_file=dict(argstr="--srchit %s",), + hits_type=dict(argstr="--srchit_type",), + ico_order=dict(argstr="--icoorder %d", requires=["target_subject"],), + interp_method=dict(argstr="--interp %s",), + mask_label=dict(argstr="--mask %s", extensions=None, xor=["cortex_mask"],), mni152reg=dict( - argstr='--mni152reg', + argstr="--mni152reg", mandatory=True, - xor=['reg_file', 'reg_header', 'mni152reg'], - ), - no_reshape=dict( - argstr='--noreshape', - xor=['reshape'], - ), - out_file=dict( - argstr='--o %s', - extensions=None, - genfile=True, - ), - out_type=dict(argstr='--out_type %s', ), - override_reg_subj=dict( - argstr='--srcsubject %s', - requires=['subject_id'], - ), - projection_stem=dict( - mandatory=True, - xor=['sampling_method'], - ), - reference_file=dict( - argstr='--ref %s', - extensions=None, - ), + xor=["reg_file", "reg_header", "mni152reg"], + ), + no_reshape=dict(argstr="--noreshape", xor=["reshape"],), + out_file=dict(argstr="--o %s", extensions=None, genfile=True,), + out_type=dict(argstr="--out_type %s",), + override_reg_subj=dict(argstr="--srcsubject %s", requires=["subject_id"],), + projection_stem=dict(mandatory=True, xor=["sampling_method"],), + reference_file=dict(argstr="--ref %s", extensions=None,), reg_file=dict( - argstr='--reg %s', + argstr="--reg %s", extensions=None, mandatory=True, - xor=['reg_file', 'reg_header', 'mni152reg'], + xor=["reg_file", "reg_header", "mni152reg"], ), reg_header=dict( - argstr='--regheader %s', + argstr="--regheader %s", mandatory=True, - requires=['subject_id'], - xor=['reg_file', 'reg_header', 'mni152reg'], - ), - reshape=dict( - argstr='--reshape', - xor=['no_reshape'], + requires=["subject_id"], + xor=["reg_file", "reg_header", "mni152reg"], ), - reshape_slices=dict(argstr='--rf %d', ), + reshape=dict(argstr="--reshape", xor=["no_reshape"],), + reshape_slices=dict(argstr="--rf %d",), sampling_method=dict( - argstr='%s', + argstr="%s", mandatory=True, - requires=['sampling_range', 'sampling_units'], - xor=['projection_stem'], + requires=["sampling_range", "sampling_units"], + xor=["projection_stem"], ), sampling_range=dict(), sampling_units=dict(), - scale_input=dict(argstr='--scale %.3f', ), - smooth_surf=dict(argstr='--surf-fwhm %.3f', ), - smooth_vol=dict(argstr='--fwhm %.3f', ), - source_file=dict( - argstr='--mov %s', - extensions=None, - mandatory=True, - ), + scale_input=dict(argstr="--scale %.3f",), + smooth_surf=dict(argstr="--surf-fwhm %.3f",), + smooth_vol=dict(argstr="--fwhm %.3f",), + source_file=dict(argstr="--mov %s", extensions=None, mandatory=True,), subject_id=dict(), subjects_dir=dict(), - surf_reg=dict( - argstr='--surfreg %s', - requires=['target_subject'], - ), - surface=dict(argstr='--surf %s', ), - target_subject=dict(argstr='--trgsubject %s', ), - vox_file=dict(argstr='--nvox %s', ), + surf_reg=dict(argstr="--surfreg %s", requires=["target_subject"],), + surface=dict(argstr="--surf %s",), + target_subject=dict(argstr="--trgsubject %s",), + vox_file=dict(argstr="--nvox %s",), ) inputs = SampleToSurface.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SampleToSurface_outputs(): output_map = dict( - hits_file=dict(extensions=None, ), - out_file=dict(extensions=None, ), - vox_file=dict(extensions=None, ), + hits_file=dict(extensions=None,), + out_file=dict(extensions=None,), + vox_file=dict(extensions=None,), ) outputs = SampleToSurface.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py index b330c689e9..5cb7321d50 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py @@ -5,110 +5,90 @@ def test_SegStats_inputs(): input_map = dict( annot=dict( - argstr='--annot %s %s %s', + argstr="--annot %s %s %s", mandatory=True, - xor=('segmentation_file', 'annot', 'surf_label'), + xor=("segmentation_file", "annot", "surf_label"), ), - args=dict(argstr='%s', ), - avgwf_file=dict(argstr='--avgwfvol %s', ), - avgwf_txt_file=dict(argstr='--avgwf %s', ), - brain_vol=dict(argstr='--%s', ), - brainmask_file=dict( - argstr='--brainmask %s', - extensions=None, - ), - calc_power=dict(argstr='--%s', ), - calc_snr=dict(argstr='--snr', ), + args=dict(argstr="%s",), + avgwf_file=dict(argstr="--avgwfvol %s",), + avgwf_txt_file=dict(argstr="--avgwf %s",), + brain_vol=dict(argstr="--%s",), + brainmask_file=dict(argstr="--brainmask %s", extensions=None,), + calc_power=dict(argstr="--%s",), + calc_snr=dict(argstr="--snr",), color_table_file=dict( - argstr='--ctab %s', + argstr="--ctab %s", extensions=None, - xor=('color_table_file', 'default_color_table', 'gca_color_table'), + xor=("color_table_file", "default_color_table", "gca_color_table"), ), - cortex_vol_from_surf=dict(argstr='--surf-ctx-vol', ), + cortex_vol_from_surf=dict(argstr="--surf-ctx-vol",), default_color_table=dict( - argstr='--ctab-default', - xor=('color_table_file', 'default_color_table', 'gca_color_table'), + argstr="--ctab-default", + xor=("color_table_file", "default_color_table", "gca_color_table"), ), - empty=dict(argstr='--empty', ), - environ=dict( - nohash=True, - usedefault=True, - ), - etiv=dict(argstr='--etiv', ), + empty=dict(argstr="--empty",), + environ=dict(nohash=True, usedefault=True,), + etiv=dict(argstr="--etiv",), etiv_only=dict(), - euler=dict(argstr='--euler', ), - exclude_ctx_gm_wm=dict(argstr='--excl-ctxgmwm', ), - exclude_id=dict(argstr='--excludeid %d', ), - frame=dict(argstr='--frame %d', ), + euler=dict(argstr="--euler",), + exclude_ctx_gm_wm=dict(argstr="--excl-ctxgmwm",), + exclude_id=dict(argstr="--excludeid %d",), + frame=dict(argstr="--frame %d",), gca_color_table=dict( - argstr='--ctab-gca %s', - extensions=None, - xor=('color_table_file', 'default_color_table', 'gca_color_table'), - ), - in_file=dict( - argstr='--i %s', - extensions=None, - ), - in_intensity=dict( - argstr='--in %s --in-intensity-name %s', + argstr="--ctab-gca %s", extensions=None, + xor=("color_table_file", "default_color_table", "gca_color_table"), ), + in_file=dict(argstr="--i %s", extensions=None,), + in_intensity=dict(argstr="--in %s --in-intensity-name %s", extensions=None,), intensity_units=dict( - argstr='--in-intensity-units %s', - requires=['in_intensity'], + argstr="--in-intensity-units %s", requires=["in_intensity"], ), - mask_erode=dict(argstr='--maskerode %d', ), - mask_file=dict( - argstr='--mask %s', - extensions=None, - ), - mask_frame=dict(requires=['mask_file'], ), - mask_invert=dict(argstr='--maskinvert', ), + mask_erode=dict(argstr="--maskerode %d",), + mask_file=dict(argstr="--mask %s", extensions=None,), + mask_frame=dict(requires=["mask_file"],), + mask_invert=dict(argstr="--maskinvert",), mask_sign=dict(), - mask_thresh=dict(argstr='--maskthresh %f', ), - multiply=dict(argstr='--mul %f', ), - non_empty_only=dict(argstr='--nonempty', ), - partial_volume_file=dict( - argstr='--pv %s', - extensions=None, - ), - segment_id=dict(argstr='--id %s...', ), + mask_thresh=dict(argstr="--maskthresh %f",), + multiply=dict(argstr="--mul %f",), + non_empty_only=dict(argstr="--nonempty",), + partial_volume_file=dict(argstr="--pv %s", extensions=None,), + segment_id=dict(argstr="--id %s...",), segmentation_file=dict( - argstr='--seg %s', + argstr="--seg %s", extensions=None, mandatory=True, - xor=('segmentation_file', 'annot', 'surf_label'), + xor=("segmentation_file", "annot", "surf_label"), ), - sf_avg_file=dict(argstr='--sfavg %s', ), - subcort_gm=dict(argstr='--subcortgray', ), + sf_avg_file=dict(argstr="--sfavg %s",), + subcort_gm=dict(argstr="--subcortgray",), subjects_dir=dict(), summary_file=dict( - argstr='--sum %s', - extensions=None, - genfile=True, - position=-1, + argstr="--sum %s", extensions=None, genfile=True, position=-1, ), - supratent=dict(argstr='--supratent', ), + supratent=dict(argstr="--supratent",), surf_label=dict( - argstr='--slabel %s %s %s', + argstr="--slabel %s %s %s", mandatory=True, - xor=('segmentation_file', 'annot', 'surf_label'), + xor=("segmentation_file", "annot", "surf_label"), ), - total_gray=dict(argstr='--totalgray', ), - vox=dict(argstr='--vox %s', ), - wm_vol_from_surf=dict(argstr='--surf-wm-vol', ), + total_gray=dict(argstr="--totalgray",), + vox=dict(argstr="--vox %s",), + wm_vol_from_surf=dict(argstr="--surf-wm-vol",), ) inputs = SegStats.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SegStats_outputs(): output_map = dict( - avgwf_file=dict(extensions=None, ), - avgwf_txt_file=dict(extensions=None, ), - sf_avg_file=dict(extensions=None, ), - summary_file=dict(extensions=None, ), + avgwf_file=dict(extensions=None,), + avgwf_txt_file=dict(extensions=None,), + sf_avg_file=dict(extensions=None,), + summary_file=dict(extensions=None,), ) outputs = SegStats.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py index 11d1972e16..2301087a3b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py @@ -5,150 +5,102 @@ def test_SegStatsReconAll_inputs(): input_map = dict( annot=dict( - argstr='--annot %s %s %s', + argstr="--annot %s %s %s", mandatory=True, - xor=('segmentation_file', 'annot', 'surf_label'), - ), - args=dict(argstr='%s', ), - aseg=dict(extensions=None, ), - avgwf_file=dict(argstr='--avgwfvol %s', ), - avgwf_txt_file=dict(argstr='--avgwf %s', ), - brain_vol=dict(argstr='--%s', ), - brainmask_file=dict( - argstr='--brainmask %s', - extensions=None, - ), - calc_power=dict(argstr='--%s', ), - calc_snr=dict(argstr='--snr', ), + xor=("segmentation_file", "annot", "surf_label"), + ), + args=dict(argstr="%s",), + aseg=dict(extensions=None,), + avgwf_file=dict(argstr="--avgwfvol %s",), + avgwf_txt_file=dict(argstr="--avgwf %s",), + brain_vol=dict(argstr="--%s",), + brainmask_file=dict(argstr="--brainmask %s", extensions=None,), + calc_power=dict(argstr="--%s",), + calc_snr=dict(argstr="--snr",), color_table_file=dict( - argstr='--ctab %s', + argstr="--ctab %s", extensions=None, - xor=('color_table_file', 'default_color_table', 'gca_color_table'), + xor=("color_table_file", "default_color_table", "gca_color_table"), ), copy_inputs=dict(), - cortex_vol_from_surf=dict(argstr='--surf-ctx-vol', ), + cortex_vol_from_surf=dict(argstr="--surf-ctx-vol",), default_color_table=dict( - argstr='--ctab-default', - xor=('color_table_file', 'default_color_table', 'gca_color_table'), + argstr="--ctab-default", + xor=("color_table_file", "default_color_table", "gca_color_table"), ), - empty=dict(argstr='--empty', ), - environ=dict( - nohash=True, - usedefault=True, - ), - etiv=dict(argstr='--etiv', ), + empty=dict(argstr="--empty",), + environ=dict(nohash=True, usedefault=True,), + etiv=dict(argstr="--etiv",), etiv_only=dict(), - euler=dict(argstr='--euler', ), - exclude_ctx_gm_wm=dict(argstr='--excl-ctxgmwm', ), - exclude_id=dict(argstr='--excludeid %d', ), - frame=dict(argstr='--frame %d', ), + euler=dict(argstr="--euler",), + exclude_ctx_gm_wm=dict(argstr="--excl-ctxgmwm",), + exclude_id=dict(argstr="--excludeid %d",), + frame=dict(argstr="--frame %d",), gca_color_table=dict( - argstr='--ctab-gca %s', - extensions=None, - xor=('color_table_file', 'default_color_table', 'gca_color_table'), - ), - in_file=dict( - argstr='--i %s', - extensions=None, - ), - in_intensity=dict( - argstr='--in %s --in-intensity-name %s', + argstr="--ctab-gca %s", extensions=None, + xor=("color_table_file", "default_color_table", "gca_color_table"), ), + in_file=dict(argstr="--i %s", extensions=None,), + in_intensity=dict(argstr="--in %s --in-intensity-name %s", extensions=None,), intensity_units=dict( - argstr='--in-intensity-units %s', - requires=['in_intensity'], - ), - lh_orig_nofix=dict( - extensions=None, - mandatory=True, - ), - lh_pial=dict( - extensions=None, - mandatory=True, - ), - lh_white=dict( - extensions=None, - mandatory=True, - ), - mask_erode=dict(argstr='--maskerode %d', ), - mask_file=dict( - argstr='--mask %s', - extensions=None, - ), - mask_frame=dict(requires=['mask_file'], ), - mask_invert=dict(argstr='--maskinvert', ), + argstr="--in-intensity-units %s", requires=["in_intensity"], + ), + lh_orig_nofix=dict(extensions=None, mandatory=True,), + lh_pial=dict(extensions=None, mandatory=True,), + lh_white=dict(extensions=None, mandatory=True,), + mask_erode=dict(argstr="--maskerode %d",), + mask_file=dict(argstr="--mask %s", extensions=None,), + mask_frame=dict(requires=["mask_file"],), + mask_invert=dict(argstr="--maskinvert",), mask_sign=dict(), - mask_thresh=dict(argstr='--maskthresh %f', ), - multiply=dict(argstr='--mul %f', ), - non_empty_only=dict(argstr='--nonempty', ), - partial_volume_file=dict( - argstr='--pv %s', - extensions=None, - ), - presurf_seg=dict(extensions=None, ), - rh_orig_nofix=dict( - extensions=None, - mandatory=True, - ), - rh_pial=dict( - extensions=None, - mandatory=True, - ), - rh_white=dict( - extensions=None, - mandatory=True, - ), - ribbon=dict( - extensions=None, - mandatory=True, - ), - segment_id=dict(argstr='--id %s...', ), + mask_thresh=dict(argstr="--maskthresh %f",), + multiply=dict(argstr="--mul %f",), + non_empty_only=dict(argstr="--nonempty",), + partial_volume_file=dict(argstr="--pv %s", extensions=None,), + presurf_seg=dict(extensions=None,), + rh_orig_nofix=dict(extensions=None, mandatory=True,), + rh_pial=dict(extensions=None, mandatory=True,), + rh_white=dict(extensions=None, mandatory=True,), + ribbon=dict(extensions=None, mandatory=True,), + segment_id=dict(argstr="--id %s...",), segmentation_file=dict( - argstr='--seg %s', + argstr="--seg %s", extensions=None, mandatory=True, - xor=('segmentation_file', 'annot', 'surf_label'), - ), - sf_avg_file=dict(argstr='--sfavg %s', ), - subcort_gm=dict(argstr='--subcortgray', ), - subject_id=dict( - argstr='--subject %s', - mandatory=True, - usedefault=True, + xor=("segmentation_file", "annot", "surf_label"), ), + sf_avg_file=dict(argstr="--sfavg %s",), + subcort_gm=dict(argstr="--subcortgray",), + subject_id=dict(argstr="--subject %s", mandatory=True, usedefault=True,), subjects_dir=dict(), summary_file=dict( - argstr='--sum %s', - extensions=None, - genfile=True, - position=-1, + argstr="--sum %s", extensions=None, genfile=True, position=-1, ), - supratent=dict(argstr='--supratent', ), + supratent=dict(argstr="--supratent",), surf_label=dict( - argstr='--slabel %s %s %s', + argstr="--slabel %s %s %s", mandatory=True, - xor=('segmentation_file', 'annot', 'surf_label'), + xor=("segmentation_file", "annot", "surf_label"), ), - total_gray=dict(argstr='--totalgray', ), - transform=dict( - extensions=None, - mandatory=True, - ), - vox=dict(argstr='--vox %s', ), - wm_vol_from_surf=dict(argstr='--surf-wm-vol', ), + total_gray=dict(argstr="--totalgray",), + transform=dict(extensions=None, mandatory=True,), + vox=dict(argstr="--vox %s",), + wm_vol_from_surf=dict(argstr="--surf-wm-vol",), ) inputs = SegStatsReconAll.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SegStatsReconAll_outputs(): output_map = dict( - avgwf_file=dict(extensions=None, ), - avgwf_txt_file=dict(extensions=None, ), - sf_avg_file=dict(extensions=None, ), - summary_file=dict(extensions=None, ), + avgwf_file=dict(extensions=None,), + avgwf_txt_file=dict(extensions=None,), + sf_avg_file=dict(extensions=None,), + summary_file=dict(extensions=None,), ) outputs = SegStatsReconAll.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py index 25b1dd4aff..9343177c30 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py @@ -4,40 +4,21 @@ def test_SegmentCC_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), copy_inputs=dict(), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-aseg %s', - extensions=None, - mandatory=True, - ), - in_norm=dict( - extensions=None, - mandatory=True, - ), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-aseg %s", extensions=None, mandatory=True,), + in_norm=dict(extensions=None, mandatory=True,), out_file=dict( - argstr='-o %s', + argstr="-o %s", extensions=None, hash_files=False, keep_extension=False, - name_source=['in_file'], - name_template='%s.auto.mgz', - ), - out_rotation=dict( - argstr='-lta %s', - extensions=None, - mandatory=True, - ), - subject_id=dict( - argstr='%s', - mandatory=True, - position=-1, - usedefault=True, + name_source=["in_file"], + name_template="%s.auto.mgz", ), + out_rotation=dict(argstr="-lta %s", extensions=None, mandatory=True,), + subject_id=dict(argstr="%s", mandatory=True, position=-1, usedefault=True,), subjects_dir=dict(), ) inputs = SegmentCC.input_spec() @@ -45,10 +26,11 @@ def test_SegmentCC_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SegmentCC_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - out_rotation=dict(extensions=None, ), + out_file=dict(extensions=None,), out_rotation=dict(extensions=None,), ) outputs = SegmentCC.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py index 275b710262..87f4af54c0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py @@ -4,23 +4,10 @@ def test_SegmentWM_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), subjects_dir=dict(), ) inputs = SegmentWM.input_spec() @@ -28,8 +15,10 @@ def test_SegmentWM_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SegmentWM_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = SegmentWM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py index c6c4109728..bc180a00f6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py @@ -4,55 +4,32 @@ def test_Smooth_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='--i %s', - extensions=None, - mandatory=True, - ), - num_iters=dict( - argstr='--niters %d', - mandatory=True, - xor=['surface_fwhm'], - ), - proj_frac=dict( - argstr='--projfrac %s', - xor=['proj_frac_avg'], - ), - proj_frac_avg=dict( - argstr='--projfrac-avg %.2f %.2f %.2f', - xor=['proj_frac'], - ), - reg_file=dict( - argstr='--reg %s', - extensions=None, - mandatory=True, - ), - smoothed_file=dict( - argstr='--o %s', - extensions=None, - genfile=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="--i %s", extensions=None, mandatory=True,), + num_iters=dict(argstr="--niters %d", mandatory=True, xor=["surface_fwhm"],), + proj_frac=dict(argstr="--projfrac %s", xor=["proj_frac_avg"],), + proj_frac_avg=dict(argstr="--projfrac-avg %.2f %.2f %.2f", xor=["proj_frac"],), + reg_file=dict(argstr="--reg %s", extensions=None, mandatory=True,), + smoothed_file=dict(argstr="--o %s", extensions=None, genfile=True,), subjects_dir=dict(), surface_fwhm=dict( - argstr='--fwhm %f', + argstr="--fwhm %f", mandatory=True, - requires=['reg_file'], - xor=['num_iters'], + requires=["reg_file"], + xor=["num_iters"], ), - vol_fwhm=dict(argstr='--vol-fwhm %f', ), + vol_fwhm=dict(argstr="--vol-fwhm %f",), ) inputs = Smooth.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Smooth_outputs(): - output_map = dict(smoothed_file=dict(extensions=None, ), ) + output_map = dict(smoothed_file=dict(extensions=None,),) outputs = Smooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py index 52dbbcf934..aed52899f5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py @@ -4,51 +4,35 @@ def test_SmoothTessellation_inputs(): input_map = dict( - args=dict(argstr='%s', ), - curvature_averaging_iterations=dict(argstr='-a %d', ), - disable_estimates=dict(argstr='-nw', ), - environ=dict( - nohash=True, - usedefault=True, - ), - gaussian_curvature_norm_steps=dict(argstr='%d ', ), - gaussian_curvature_smoothing_steps=dict(argstr='%d', ), + args=dict(argstr="%s",), + curvature_averaging_iterations=dict(argstr="-a %d",), + disable_estimates=dict(argstr="-nw",), + environ=dict(nohash=True, usedefault=True,), + gaussian_curvature_norm_steps=dict(argstr="%d ",), + gaussian_curvature_smoothing_steps=dict(argstr="%d",), in_file=dict( - argstr='%s', - copyfile=True, - extensions=None, - mandatory=True, - position=-2, - ), - normalize_area=dict(argstr='-area', ), - out_area_file=dict( - argstr='-b %s', - extensions=None, - ), - out_curvature_file=dict( - argstr='-c %s', - extensions=None, + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2, ), - out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - seed=dict(argstr='-seed %d', ), - smoothing_iterations=dict(argstr='-n %d', ), - snapshot_writing_iterations=dict(argstr='-w %d', ), + normalize_area=dict(argstr="-area",), + out_area_file=dict(argstr="-b %s", extensions=None,), + out_curvature_file=dict(argstr="-c %s", extensions=None,), + out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + seed=dict(argstr="-seed %d",), + smoothing_iterations=dict(argstr="-n %d",), + snapshot_writing_iterations=dict(argstr="-w %d",), subjects_dir=dict(), - use_gaussian_curvature_smoothing=dict(argstr='-g', ), - use_momentum=dict(argstr='-m', ), + use_gaussian_curvature_smoothing=dict(argstr="-g",), + use_momentum=dict(argstr="-m",), ) inputs = SmoothTessellation.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SmoothTessellation_outputs(): - output_map = dict(surface=dict(extensions=None, ), ) + output_map = dict(surface=dict(extensions=None,),) outputs = SmoothTessellation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py index 4ec5392c31..407354fbfe 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py @@ -4,33 +4,23 @@ def test_Sphere_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=True, - extensions=None, - mandatory=True, - position=-2, - ), - in_smoothwm=dict( - copyfile=True, - extensions=None, + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2, ), - magic=dict(argstr='-q', ), + in_smoothwm=dict(copyfile=True, extensions=None,), + magic=dict(argstr="-q",), num_threads=dict(), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, - name_source=['in_file'], - name_template='%s.sphere', + name_source=["in_file"], + name_template="%s.sphere", position=-1, ), - seed=dict(argstr='-seed %d', ), + seed=dict(argstr="-seed %d",), subjects_dir=dict(), ) inputs = Sphere.input_spec() @@ -38,8 +28,10 @@ def test_Sphere_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Sphere_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Sphere.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py index 6fa42333a5..41c61ea0fa 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py @@ -4,62 +4,29 @@ def test_SphericalAverage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - erode=dict(argstr='-erode %d', ), - fname=dict( - argstr='%s', - mandatory=True, - position=-5, - ), - hemisphere=dict( - argstr='%s', - mandatory=True, - position=-4, - ), - in_average=dict( - argstr='%s', - genfile=True, - position=-2, - ), - in_orig=dict( - argstr='-orig %s', - extensions=None, - ), - in_surf=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - subject_id=dict( - argstr='-o %s', - mandatory=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + erode=dict(argstr="-erode %d",), + fname=dict(argstr="%s", mandatory=True, position=-5,), + hemisphere=dict(argstr="%s", mandatory=True, position=-4,), + in_average=dict(argstr="%s", genfile=True, position=-2,), + in_orig=dict(argstr="-orig %s", extensions=None,), + in_surf=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + subject_id=dict(argstr="-o %s", mandatory=True,), subjects_dir=dict(), - threshold=dict(argstr='-t %.1f', ), - which=dict( - argstr='%s', - mandatory=True, - position=-6, - ), + threshold=dict(argstr="-t %.1f",), + which=dict(argstr="%s", mandatory=True, position=-6,), ) inputs = SphericalAverage.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SphericalAverage_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = SphericalAverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py index d4fc2c2125..c9e20a00b5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py @@ -4,56 +4,38 @@ def test_Surface2VolTransform_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - hemi=dict( - argstr='--hemi %s', - mandatory=True, - ), - mkmask=dict( - argstr='--mkmask', - xor=['source_file'], - ), - projfrac=dict(argstr='--projfrac %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + hemi=dict(argstr="--hemi %s", mandatory=True,), + mkmask=dict(argstr="--mkmask", xor=["source_file"],), + projfrac=dict(argstr="--projfrac %s",), reg_file=dict( - argstr='--volreg %s', - extensions=None, - mandatory=True, - xor=['subject_id'], + argstr="--volreg %s", extensions=None, mandatory=True, xor=["subject_id"], ), source_file=dict( - argstr='--surfval %s', + argstr="--surfval %s", copyfile=False, extensions=None, mandatory=True, - xor=['mkmask'], - ), - subject_id=dict( - argstr='--identity %s', - xor=['reg_file'], - ), - subjects_dir=dict(argstr='--sd %s', ), - surf_name=dict(argstr='--surf %s', ), - template_file=dict( - argstr='--template %s', - extensions=None, + xor=["mkmask"], ), + subject_id=dict(argstr="--identity %s", xor=["reg_file"],), + subjects_dir=dict(argstr="--sd %s",), + surf_name=dict(argstr="--surf %s",), + template_file=dict(argstr="--template %s", extensions=None,), transformed_file=dict( - argstr='--outvol %s', + argstr="--outvol %s", extensions=None, hash_files=False, - name_source=['source_file'], - name_template='%s_asVol.nii', + name_source=["source_file"], + name_template="%s_asVol.nii", ), vertexvol_file=dict( - argstr='--vtxvol %s', + argstr="--vtxvol %s", extensions=None, hash_files=False, - name_source=['source_file'], - name_template='%s_asVol_vertex.nii', + name_source=["source_file"], + name_template="%s_asVol_vertex.nii", ), ) inputs = Surface2VolTransform.input_spec() @@ -61,10 +43,11 @@ def test_Surface2VolTransform_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Surface2VolTransform_outputs(): output_map = dict( - transformed_file=dict(extensions=None, ), - vertexvol_file=dict(extensions=None, ), + transformed_file=dict(extensions=None,), vertexvol_file=dict(extensions=None,), ) outputs = Surface2VolTransform.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py index 56dc5ba071..020f7af7a8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py @@ -4,42 +4,16 @@ def test_SurfaceSmooth_inputs(): input_map = dict( - args=dict(argstr='%s', ), - cortex=dict( - argstr='--cortex', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fwhm=dict( - argstr='--fwhm %.4f', - xor=['smooth_iters'], - ), - hemi=dict( - argstr='--hemi %s', - mandatory=True, - ), - in_file=dict( - argstr='--sval %s', - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr='--tval %s', - extensions=None, - genfile=True, - ), - reshape=dict(argstr='--reshape', ), - smooth_iters=dict( - argstr='--smooth %d', - xor=['fwhm'], - ), - subject_id=dict( - argstr='--s %s', - mandatory=True, - ), + args=dict(argstr="%s",), + cortex=dict(argstr="--cortex", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + fwhm=dict(argstr="--fwhm %.4f", xor=["smooth_iters"],), + hemi=dict(argstr="--hemi %s", mandatory=True,), + in_file=dict(argstr="--sval %s", extensions=None, mandatory=True,), + out_file=dict(argstr="--tval %s", extensions=None, genfile=True,), + reshape=dict(argstr="--reshape",), + smooth_iters=dict(argstr="--smooth %d", xor=["fwhm"],), + subject_id=dict(argstr="--s %s", mandatory=True,), subjects_dir=dict(), ) inputs = SurfaceSmooth.input_spec() @@ -47,8 +21,10 @@ def test_SurfaceSmooth_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SurfaceSmooth_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = SurfaceSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py index d3b37cc2b0..b4b3e8a5e2 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py @@ -4,107 +4,61 @@ def test_SurfaceSnapshots_inputs(): input_map = dict( - annot_file=dict( - argstr='-annotation %s', - extensions=None, - xor=['annot_name'], - ), - annot_name=dict( - argstr='-annotation %s', - xor=['annot_file'], - ), - args=dict(argstr='%s', ), - colortable=dict( - argstr='-colortable %s', - extensions=None, - ), - demean_overlay=dict(argstr='-zm', ), - environ=dict( - nohash=True, - usedefault=True, - ), - hemi=dict( - argstr='%s', - mandatory=True, - position=2, - ), + annot_file=dict(argstr="-annotation %s", extensions=None, xor=["annot_name"],), + annot_name=dict(argstr="-annotation %s", xor=["annot_file"],), + args=dict(argstr="%s",), + colortable=dict(argstr="-colortable %s", extensions=None,), + demean_overlay=dict(argstr="-zm",), + environ=dict(nohash=True, usedefault=True,), + hemi=dict(argstr="%s", mandatory=True, position=2,), identity_reg=dict( - argstr='-overlay-reg-identity', - xor=['overlay_reg', 'identity_reg', 'mni152_reg'], - ), - invert_overlay=dict(argstr='-invphaseflag 1', ), - label_file=dict( - argstr='-label %s', - extensions=None, - xor=['label_name'], - ), - label_name=dict( - argstr='-label %s', - xor=['label_file'], - ), - label_outline=dict(argstr='-label-outline', ), - label_under=dict(argstr='-labels-under', ), + argstr="-overlay-reg-identity", + xor=["overlay_reg", "identity_reg", "mni152_reg"], + ), + invert_overlay=dict(argstr="-invphaseflag 1",), + label_file=dict(argstr="-label %s", extensions=None, xor=["label_name"],), + label_name=dict(argstr="-label %s", xor=["label_file"],), + label_outline=dict(argstr="-label-outline",), + label_under=dict(argstr="-labels-under",), mni152_reg=dict( - argstr='-mni152reg', - xor=['overlay_reg', 'identity_reg', 'mni152_reg'], + argstr="-mni152reg", xor=["overlay_reg", "identity_reg", "mni152_reg"], ), - orig_suffix=dict(argstr='-orig %s', ), + orig_suffix=dict(argstr="-orig %s",), overlay=dict( - argstr='-overlay %s', - extensions=None, - requires=['overlay_range'], + argstr="-overlay %s", extensions=None, requires=["overlay_range"], ), - overlay_range=dict(argstr='%s', ), - overlay_range_offset=dict(argstr='-foffset %.3f', ), + overlay_range=dict(argstr="%s",), + overlay_range_offset=dict(argstr="-foffset %.3f",), overlay_reg=dict( - argstr='-overlay-reg %s', - extensions=None, - xor=['overlay_reg', 'identity_reg', 'mni152_reg'], - ), - patch_file=dict( - argstr='-patch %s', + argstr="-overlay-reg %s", extensions=None, + xor=["overlay_reg", "identity_reg", "mni152_reg"], ), - reverse_overlay=dict(argstr='-revphaseflag 1', ), + patch_file=dict(argstr="-patch %s", extensions=None,), + reverse_overlay=dict(argstr="-revphaseflag 1",), screenshot_stem=dict(), - show_color_scale=dict(argstr='-colscalebarflag 1', ), - show_color_text=dict(argstr='-colscaletext 1', ), - show_curv=dict( - argstr='-curv', - xor=['show_gray_curv'], - ), - show_gray_curv=dict( - argstr='-gray', - xor=['show_curv'], - ), + show_color_scale=dict(argstr="-colscalebarflag 1",), + show_color_text=dict(argstr="-colscaletext 1",), + show_curv=dict(argstr="-curv", xor=["show_gray_curv"],), + show_gray_curv=dict(argstr="-gray", xor=["show_curv"],), six_images=dict(), - sphere_suffix=dict(argstr='-sphere %s', ), - stem_template_args=dict(requires=['screenshot_stem'], ), - subject_id=dict( - argstr='%s', - mandatory=True, - position=1, - ), + sphere_suffix=dict(argstr="-sphere %s",), + stem_template_args=dict(requires=["screenshot_stem"],), + subject_id=dict(argstr="%s", mandatory=True, position=1,), subjects_dir=dict(), - surface=dict( - argstr='%s', - mandatory=True, - position=3, - ), - tcl_script=dict( - argstr='%s', - extensions=None, - genfile=True, - ), - truncate_overlay=dict(argstr='-truncphaseflag 1', ), + surface=dict(argstr="%s", mandatory=True, position=3,), + tcl_script=dict(argstr="%s", extensions=None, genfile=True,), + truncate_overlay=dict(argstr="-truncphaseflag 1",), ) inputs = SurfaceSnapshots.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SurfaceSnapshots_outputs(): - output_map = dict(snapshots=dict(), ) + output_map = dict(snapshots=dict(),) outputs = SurfaceSnapshots.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py index 67ea020def..d8bcf6eb28 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py @@ -4,57 +4,40 @@ def test_SurfaceTransform_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - hemi=dict( - argstr='--hemi %s', - mandatory=True, - ), - out_file=dict( - argstr='--tval %s', - extensions=None, - genfile=True, - ), - reshape=dict(argstr='--reshape', ), - reshape_factor=dict(argstr='--reshape-factor', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + hemi=dict(argstr="--hemi %s", mandatory=True,), + out_file=dict(argstr="--tval %s", extensions=None, genfile=True,), + reshape=dict(argstr="--reshape",), + reshape_factor=dict(argstr="--reshape-factor",), source_annot_file=dict( - argstr='--sval-annot %s', + argstr="--sval-annot %s", extensions=None, mandatory=True, - xor=['source_file'], + xor=["source_file"], ), source_file=dict( - argstr='--sval %s', + argstr="--sval %s", extensions=None, mandatory=True, - xor=['source_annot_file'], - ), - source_subject=dict( - argstr='--srcsubject %s', - mandatory=True, - ), - source_type=dict( - argstr='--sfmt %s', - requires=['source_file'], + xor=["source_annot_file"], ), + source_subject=dict(argstr="--srcsubject %s", mandatory=True,), + source_type=dict(argstr="--sfmt %s", requires=["source_file"],), subjects_dir=dict(), - target_ico_order=dict(argstr='--trgicoorder %d', ), - target_subject=dict( - argstr='--trgsubject %s', - mandatory=True, - ), - target_type=dict(argstr='--tfmt %s', ), + target_ico_order=dict(argstr="--trgicoorder %d",), + target_subject=dict(argstr="--trgsubject %s", mandatory=True,), + target_type=dict(argstr="--tfmt %s",), ) inputs = SurfaceTransform.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SurfaceTransform_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = SurfaceTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py index 73392602a4..396d46e1a9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py @@ -4,56 +4,26 @@ def test_SynthesizeFLASH_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_weighting=dict( - argstr='-w', - position=1, - ), - flip_angle=dict( - argstr='%.2f', - mandatory=True, - position=3, - ), - out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - ), - pd_image=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=6, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fixed_weighting=dict(argstr="-w", position=1,), + flip_angle=dict(argstr="%.2f", mandatory=True, position=3,), + out_file=dict(argstr="%s", extensions=None, genfile=True,), + pd_image=dict(argstr="%s", extensions=None, mandatory=True, position=6,), subjects_dir=dict(), - t1_image=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=5, - ), - te=dict( - argstr='%.3f', - mandatory=True, - position=4, - ), - tr=dict( - argstr='%.2f', - mandatory=True, - position=2, - ), + t1_image=dict(argstr="%s", extensions=None, mandatory=True, position=5,), + te=dict(argstr="%.3f", mandatory=True, position=4,), + tr=dict(argstr="%.2f", mandatory=True, position=2,), ) inputs = SynthesizeFLASH.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SynthesizeFLASH_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = SynthesizeFLASH.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py index e5f6427678..41ad7fef4e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py @@ -4,22 +4,11 @@ def test_TalairachAVI_inputs(): input_map = dict( - args=dict(argstr='%s', ), - atlas=dict(argstr='--atlas %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='--i %s', - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr='--xfm %s', - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + atlas=dict(argstr="--atlas %s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="--i %s", extensions=None, mandatory=True,), + out_file=dict(argstr="--xfm %s", extensions=None, mandatory=True,), subjects_dir=dict(), ) inputs = TalairachAVI.input_spec() @@ -27,11 +16,13 @@ def test_TalairachAVI_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TalairachAVI_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - out_log=dict(extensions=None, ), - out_txt=dict(extensions=None, ), + out_file=dict(extensions=None,), + out_log=dict(extensions=None,), + out_txt=dict(extensions=None,), ) outputs = TalairachAVI.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py index ecf531879e..52b07074e1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py @@ -4,17 +4,9 @@ def test_TalairachQC_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - log_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + log_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), subjects_dir=dict(), ) inputs = TalairachQC.input_spec() @@ -22,11 +14,10 @@ def test_TalairachQC_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TalairachQC_outputs(): - output_map = dict(log_file=dict( - extensions=None, - usedefault=True, - ), ) + output_map = dict(log_file=dict(extensions=None, usedefault=True,),) outputs = TalairachQC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py index 3074cc3934..0e80196220 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py @@ -4,73 +4,40 @@ def test_Tkregister2_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fsl_in_matrix=dict( - argstr='--fsl %s', - extensions=None, - ), - fsl_out=dict(argstr='--fslregout %s', ), - fstal=dict( - argstr='--fstal', - xor=['target_image', 'moving_image', 'reg_file'], - ), - fstarg=dict( - argstr='--fstarg', - xor=['target_image'], - ), - invert_lta_in=dict(requires=['lta_in'], ), - invert_lta_out=dict( - argstr='--ltaout-inv', - requires=['lta_in'], - ), - lta_in=dict( - argstr='--lta %s', - extensions=None, - ), - lta_out=dict(argstr='--ltaout %s', ), - moving_image=dict( - argstr='--mov %s', - extensions=None, - mandatory=True, - ), - movscale=dict(argstr='--movscale %f', ), - noedit=dict( - argstr='--noedit', - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fsl_in_matrix=dict(argstr="--fsl %s", extensions=None,), + fsl_out=dict(argstr="--fslregout %s",), + fstal=dict(argstr="--fstal", xor=["target_image", "moving_image", "reg_file"],), + fstarg=dict(argstr="--fstarg", xor=["target_image"],), + invert_lta_in=dict(requires=["lta_in"],), + invert_lta_out=dict(argstr="--ltaout-inv", requires=["lta_in"],), + lta_in=dict(argstr="--lta %s", extensions=None,), + lta_out=dict(argstr="--ltaout %s",), + moving_image=dict(argstr="--mov %s", extensions=None, mandatory=True,), + movscale=dict(argstr="--movscale %f",), + noedit=dict(argstr="--noedit", usedefault=True,), reg_file=dict( - argstr='--reg %s', - extensions=None, - mandatory=True, - usedefault=True, + argstr="--reg %s", extensions=None, mandatory=True, usedefault=True, ), - reg_header=dict(argstr='--regheader', ), - subject_id=dict(argstr='--s %s', ), + reg_header=dict(argstr="--regheader",), + subject_id=dict(argstr="--s %s",), subjects_dir=dict(), - target_image=dict( - argstr='--targ %s', - extensions=None, - xor=['fstarg'], - ), - xfm=dict( - argstr='--xfm %s', - extensions=None, - ), + target_image=dict(argstr="--targ %s", extensions=None, xor=["fstarg"],), + xfm=dict(argstr="--xfm %s", extensions=None,), ) inputs = Tkregister2.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Tkregister2_outputs(): output_map = dict( - fsl_file=dict(extensions=None, ), - lta_file=dict(extensions=None, ), - reg_file=dict(extensions=None, ), + fsl_file=dict(extensions=None,), + lta_file=dict(extensions=None,), + reg_file=dict(extensions=None,), ) outputs = Tkregister2.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py index bd82d0c198..b4378f3cac 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py @@ -4,45 +4,33 @@ def test_UnpackSDICOMDir_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), config=dict( - argstr='-cfg %s', + argstr="-cfg %s", extensions=None, mandatory=True, - xor=('run_info', 'config', 'seq_config'), + xor=("run_info", "config", "seq_config"), ), - dir_structure=dict(argstr='-%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - log_file=dict( - argstr='-log %s', - extensions=None, - ), - no_info_dump=dict(argstr='-noinfodump', ), - no_unpack_err=dict(argstr='-no-unpackerr', ), - output_dir=dict(argstr='-targ %s', ), + dir_structure=dict(argstr="-%s",), + environ=dict(nohash=True, usedefault=True,), + log_file=dict(argstr="-log %s", extensions=None,), + no_info_dump=dict(argstr="-noinfodump",), + no_unpack_err=dict(argstr="-no-unpackerr",), + output_dir=dict(argstr="-targ %s",), run_info=dict( - argstr='-run %d %s %s %s', + argstr="-run %d %s %s %s", mandatory=True, - xor=('run_info', 'config', 'seq_config'), - ), - scan_only=dict( - argstr='-scanonly %s', - extensions=None, + xor=("run_info", "config", "seq_config"), ), + scan_only=dict(argstr="-scanonly %s", extensions=None,), seq_config=dict( - argstr='-seqcfg %s', + argstr="-seqcfg %s", extensions=None, mandatory=True, - xor=('run_info', 'config', 'seq_config'), - ), - source_dir=dict( - argstr='-src %s', - mandatory=True, + xor=("run_info", "config", "seq_config"), ), - spm_zeropad=dict(argstr='-nspmzeropad %d', ), + source_dir=dict(argstr="-src %s", mandatory=True,), + spm_zeropad=dict(argstr="-nspmzeropad %d",), subjects_dir=dict(), ) inputs = UnpackSDICOMDir.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py index 1a45d4cab3..cef9ddfedc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py @@ -4,60 +4,21 @@ def test_VolumeMask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - aseg=dict( - extensions=None, - xor=['in_aseg'], - ), + args=dict(argstr="%s",), + aseg=dict(extensions=None, xor=["in_aseg"],), copy_inputs=dict(), - environ=dict( - nohash=True, - usedefault=True, - ), - in_aseg=dict( - argstr='--aseg_name %s', - extensions=None, - xor=['aseg'], - ), - left_ribbonlabel=dict( - argstr='--label_left_ribbon %d', - mandatory=True, - ), - left_whitelabel=dict( - argstr='--label_left_white %d', - mandatory=True, - ), - lh_pial=dict( - extensions=None, - mandatory=True, - ), - lh_white=dict( - extensions=None, - mandatory=True, - ), - rh_pial=dict( - extensions=None, - mandatory=True, - ), - rh_white=dict( - extensions=None, - mandatory=True, - ), - right_ribbonlabel=dict( - argstr='--label_right_ribbon %d', - mandatory=True, - ), - right_whitelabel=dict( - argstr='--label_right_white %d', - mandatory=True, - ), - save_ribbon=dict(argstr='--save_ribbon', ), - subject_id=dict( - argstr='%s', - mandatory=True, - position=-1, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True,), + in_aseg=dict(argstr="--aseg_name %s", extensions=None, xor=["aseg"],), + left_ribbonlabel=dict(argstr="--label_left_ribbon %d", mandatory=True,), + left_whitelabel=dict(argstr="--label_left_white %d", mandatory=True,), + lh_pial=dict(extensions=None, mandatory=True,), + lh_white=dict(extensions=None, mandatory=True,), + rh_pial=dict(extensions=None, mandatory=True,), + rh_white=dict(extensions=None, mandatory=True,), + right_ribbonlabel=dict(argstr="--label_right_ribbon %d", mandatory=True,), + right_whitelabel=dict(argstr="--label_right_white %d", mandatory=True,), + save_ribbon=dict(argstr="--save_ribbon",), + subject_id=dict(argstr="%s", mandatory=True, position=-1, usedefault=True,), subjects_dir=dict(), ) inputs = VolumeMask.input_spec() @@ -65,11 +26,13 @@ def test_VolumeMask_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_VolumeMask_outputs(): output_map = dict( - lh_ribbon=dict(extensions=None, ), - out_ribbon=dict(extensions=None, ), - rh_ribbon=dict(extensions=None, ), + lh_ribbon=dict(extensions=None,), + out_ribbon=dict(extensions=None,), + rh_ribbon=dict(extensions=None,), ) outputs = VolumeMask.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py index d489d79295..649e4e497b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py @@ -4,44 +4,26 @@ def test_WatershedSkullStrip_inputs(): input_map = dict( - args=dict(argstr='%s', ), - brain_atlas=dict( - argstr='-brain_atlas %s', - extensions=None, - position=-4, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + brain_atlas=dict(argstr="-brain_atlas %s", extensions=None, position=-4,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - usedefault=True, + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, ), subjects_dir=dict(), - t1=dict(argstr='-T1', ), - transform=dict( - argstr='%s', - extensions=None, - position=-3, - ), + t1=dict(argstr="-T1",), + transform=dict(argstr="%s", extensions=None, position=-3,), ) inputs = WatershedSkullStrip.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_WatershedSkullStrip_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = WatershedSkullStrip.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_model.py b/nipype/interfaces/freesurfer/tests/test_model.py index cd8d129690..0fe0065af5 100644 --- a/nipype/interfaces/freesurfer/tests/test_model.py +++ b/nipype/interfaces/freesurfer/tests/test_model.py @@ -17,9 +17,9 @@ def test_concatenate(tmpdir): tmpdir.chdir() - in1 = tmpdir.join('cont1.nii').strpath - in2 = tmpdir.join('cont2.nii').strpath - out = 'bar.nii' + in1 = tmpdir.join("cont1.nii").strpath + in2 = tmpdir.join("cont2.nii").strpath + out = "bar.nii" data1 = np.zeros((3, 3, 3, 1), dtype=np.float32) data2 = np.ones((3, 3, 3, 5), dtype=np.float32) @@ -31,9 +31,8 @@ def test_concatenate(tmpdir): # Test default behavior res = model.Concatenate(in_files=[in1, in2]).run() - assert res.outputs.concatenated_file == tmpdir.join( - 'concat_output.nii.gz').strpath - assert np.allclose(nb.load('concat_output.nii.gz').get_data(), out_data) + assert res.outputs.concatenated_file == tmpdir.join("concat_output.nii.gz").strpath + assert np.allclose(nb.load("concat_output.nii.gz").get_data(), out_data) # Test specified concatenated_file res = model.Concatenate(in_files=[in1, in2], concatenated_file=out).run() @@ -41,17 +40,19 @@ def test_concatenate(tmpdir): assert np.allclose(nb.load(out, mmap=NUMPY_MMAP).get_data(), out_data) # Test in workflow - wf = pe.Workflow('test_concatenate', base_dir=tmpdir.strpath) + wf = pe.Workflow("test_concatenate", base_dir=tmpdir.strpath) concat = pe.Node( - model.Concatenate(in_files=[in1, in2], concatenated_file=out), - name='concat') + model.Concatenate(in_files=[in1, in2], concatenated_file=out), name="concat" + ) wf.add_nodes([concat]) wf.run() assert np.allclose( - nb.load(tmpdir.join('test_concatenate', 'concat', - out).strpath).get_data(), out_data) + nb.load(tmpdir.join("test_concatenate", "concat", out).strpath).get_data(), + out_data, + ) # Test a simple statistic res = model.Concatenate( - in_files=[in1, in2], concatenated_file=out, stats='mean').run() + in_files=[in1, in2], concatenated_file=out, stats="mean" + ).run() assert np.allclose(nb.load(out, mmap=NUMPY_MMAP).get_data(), mean_data) diff --git a/nipype/interfaces/freesurfer/tests/test_preprocess.py b/nipype/interfaces/freesurfer/tests/test_preprocess.py index b0a6db293a..518d60b9a5 100644 --- a/nipype/interfaces/freesurfer/tests/test_preprocess.py +++ b/nipype/interfaces/freesurfer/tests/test_preprocess.py @@ -11,8 +11,7 @@ from nipype import LooseVersion -@pytest.mark.skipif( - freesurfer.no_freesurfer(), reason="freesurfer is not installed") +@pytest.mark.skipif(freesurfer.no_freesurfer(), reason="freesurfer is not installed") def test_robustregister(create_files_in_directory): filelist, outdir = create_files_in_directory @@ -20,7 +19,7 @@ def test_robustregister(create_files_in_directory): cwd = os.getcwd() # make sure command gets called - assert reg.cmd == 'mri_robust_register' + assert reg.cmd == "mri_robust_register" # test raising error with mandatory args absent with pytest.raises(ValueError): @@ -30,32 +29,35 @@ def test_robustregister(create_files_in_directory): reg.inputs.source_file = filelist[0] reg.inputs.target_file = filelist[1] reg.inputs.auto_sens = True - assert reg.cmdline == ('mri_robust_register --satit --lta ' - '%s/%s_robustreg.lta --mov %s --dst %s' % - (cwd, filelist[0][:-4], filelist[0], filelist[1])) + assert reg.cmdline == ( + "mri_robust_register --satit --lta " + "%s/%s_robustreg.lta --mov %s --dst %s" + % (cwd, filelist[0][:-4], filelist[0], filelist[1]) + ) # constructor based parameter setting reg2 = freesurfer.RobustRegister( source_file=filelist[0], target_file=filelist[1], outlier_sens=3.0, - out_reg_file='foo.lta', - half_targ=True) + out_reg_file="foo.lta", + half_targ=True, + ) assert reg2.cmdline == ( - 'mri_robust_register --halfdst %s_halfway.nii --lta foo.lta ' - '--sat 3.0000 --mov %s --dst %s' % - (os.path.join(outdir, filelist[1][:-4]), filelist[0], filelist[1])) + "mri_robust_register --halfdst %s_halfway.nii --lta foo.lta " + "--sat 3.0000 --mov %s --dst %s" + % (os.path.join(outdir, filelist[1][:-4]), filelist[0], filelist[1]) + ) -@pytest.mark.skipif( - freesurfer.no_freesurfer(), reason="freesurfer is not installed") +@pytest.mark.skipif(freesurfer.no_freesurfer(), reason="freesurfer is not installed") def test_fitmsparams(create_files_in_directory): filelist, outdir = create_files_in_directory fit = freesurfer.FitMSParams() # make sure command gets called - assert fit.cmd == 'mri_ms_fitparms' + assert fit.cmd == "mri_ms_fitparms" # test raising error with mandatory args absent with pytest.raises(ValueError): @@ -64,29 +66,30 @@ def test_fitmsparams(create_files_in_directory): # .inputs based parameters setting fit.inputs.in_files = filelist fit.inputs.out_dir = outdir - assert fit.cmdline == 'mri_ms_fitparms %s %s %s' % (filelist[0], - filelist[1], outdir) + assert fit.cmdline == "mri_ms_fitparms %s %s %s" % ( + filelist[0], + filelist[1], + outdir, + ) # constructor based parameter setting fit2 = freesurfer.FitMSParams( - in_files=filelist, - te_list=[1.5, 3.5], - flip_list=[20, 30], - out_dir=outdir) + in_files=filelist, te_list=[1.5, 3.5], flip_list=[20, 30], out_dir=outdir + ) assert fit2.cmdline == ( - 'mri_ms_fitparms -te %.3f -fa %.1f %s -te %.3f -fa %.1f %s %s' % - (1.500, 20.0, filelist[0], 3.500, 30.0, filelist[1], outdir)) + "mri_ms_fitparms -te %.3f -fa %.1f %s -te %.3f -fa %.1f %s %s" + % (1.500, 20.0, filelist[0], 3.500, 30.0, filelist[1], outdir) + ) -@pytest.mark.skipif( - freesurfer.no_freesurfer(), reason="freesurfer is not installed") +@pytest.mark.skipif(freesurfer.no_freesurfer(), reason="freesurfer is not installed") def test_synthesizeflash(create_files_in_directory): filelist, outdir = create_files_in_directory syn = freesurfer.SynthesizeFLASH() # make sure command gets called - assert syn.cmd == 'mri_synthesize' + assert syn.cmd == "mri_synthesize" # test raising error with mandatory args absent with pytest.raises(ValueError): @@ -99,20 +102,22 @@ def test_synthesizeflash(create_files_in_directory): syn.inputs.te = 4.5 syn.inputs.tr = 20 - assert syn.cmdline == ('mri_synthesize 20.00 30.00 4.500 %s %s %s' % - (filelist[0], filelist[1], - os.path.join(outdir, 'synth-flash_30.mgz'))) + assert syn.cmdline == ( + "mri_synthesize 20.00 30.00 4.500 %s %s %s" + % (filelist[0], filelist[1], os.path.join(outdir, "synth-flash_30.mgz")) + ) # constructor based parameters setting syn2 = freesurfer.SynthesizeFLASH( - t1_image=filelist[0], pd_image=filelist[1], flip_angle=20, te=5, tr=25) - assert syn2.cmdline == ('mri_synthesize 25.00 20.00 5.000 %s %s %s' % - (filelist[0], filelist[1], - os.path.join(outdir, 'synth-flash_20.mgz'))) + t1_image=filelist[0], pd_image=filelist[1], flip_angle=20, te=5, tr=25 + ) + assert syn2.cmdline == ( + "mri_synthesize 25.00 20.00 5.000 %s %s %s" + % (filelist[0], filelist[1], os.path.join(outdir, "synth-flash_20.mgz")) + ) -@pytest.mark.skipif( - freesurfer.no_freesurfer(), reason="freesurfer is not installed") +@pytest.mark.skipif(freesurfer.no_freesurfer(), reason="freesurfer is not installed") def test_mandatory_outvol(create_files_in_directory): filelist, outdir = create_files_in_directory mni = freesurfer.MNIBiasCorrection() @@ -127,28 +132,30 @@ def test_mandatory_outvol(create_files_in_directory): # test with minimal args mni.inputs.in_file = filelist[0] base, ext = os.path.splitext(os.path.basename(filelist[0])) - if ext == '.gz': + if ext == ".gz": base, ext2 = os.path.splitext(base) ext = ext2 + ext - assert mni.cmdline == ('mri_nu_correct.mni --i %s --n 4 --o %s_output%s' % - (filelist[0], base, ext)) + assert mni.cmdline == ( + "mri_nu_correct.mni --i %s --n 4 --o %s_output%s" % (filelist[0], base, ext) + ) # test with custom outfile - mni.inputs.out_file = 'new_corrected_file.mgz' + mni.inputs.out_file = "new_corrected_file.mgz" assert mni.cmdline == ( - 'mri_nu_correct.mni --i %s --n 4 --o new_corrected_file.mgz' % (filelist[0])) + "mri_nu_correct.mni --i %s --n 4 --o new_corrected_file.mgz" % (filelist[0]) + ) # constructor based tests mni2 = freesurfer.MNIBiasCorrection( - in_file=filelist[0], out_file='bias_corrected_output', iterations=2) + in_file=filelist[0], out_file="bias_corrected_output", iterations=2 + ) assert mni2.cmdline == ( - 'mri_nu_correct.mni --i %s --n 2 --o bias_corrected_output' % - filelist[0]) + "mri_nu_correct.mni --i %s --n 2 --o bias_corrected_output" % filelist[0] + ) -@pytest.mark.skipif( - freesurfer.no_freesurfer(), reason="freesurfer is not installed") +@pytest.mark.skipif(freesurfer.no_freesurfer(), reason="freesurfer is not installed") def test_bbregister(create_files_in_directory): filelist, outdir = create_files_in_directory bbr = freesurfer.BBRegister() @@ -160,9 +167,9 @@ def test_bbregister(create_files_in_directory): with pytest.raises(ValueError): bbr.cmdline - bbr.inputs.subject_id = 'fsaverage' + bbr.inputs.subject_id = "fsaverage" bbr.inputs.source_file = filelist[0] - bbr.inputs.contrast_type = 't2' + bbr.inputs.contrast_type = "t2" # Check that 'init' is mandatory in FS < 6, but not in 6+ if Info.looseversion() < LooseVersion("6.0.0"): @@ -171,16 +178,17 @@ def test_bbregister(create_files_in_directory): else: bbr.cmdline - bbr.inputs.init = 'fsl' + bbr.inputs.init = "fsl" base, ext = os.path.splitext(os.path.basename(filelist[0])) - if ext == '.gz': + if ext == ".gz": base, _ = os.path.splitext(base) - assert bbr.cmdline == ('bbregister --t2 --init-fsl ' - '--reg {base}_bbreg_fsaverage.dat ' - '--mov {full} --s fsaverage'.format( - full=filelist[0], base=base)) + assert bbr.cmdline == ( + "bbregister --t2 --init-fsl " + "--reg {base}_bbreg_fsaverage.dat " + "--mov {full} --s fsaverage".format(full=filelist[0], base=base) + ) def test_FSVersion(): diff --git a/nipype/interfaces/freesurfer/tests/test_utils.py b/nipype/interfaces/freesurfer/tests/test_utils.py index 203b2483f8..52348bb5e0 100644 --- a/nipype/interfaces/freesurfer/tests/test_utils.py +++ b/nipype/interfaces/freesurfer/tests/test_utils.py @@ -4,8 +4,10 @@ import os import os.path as op import pytest -from nipype.testing.fixtures import (create_files_in_directory_plus_dummy_file, - create_surf_file_in_directory) +from nipype.testing.fixtures import ( + create_files_in_directory_plus_dummy_file, + create_surf_file_in_directory, +) from nipype.pipeline import engine as pe from nipype.interfaces import freesurfer as fs @@ -18,7 +20,7 @@ def test_sample2surf(create_files_in_directory_plus_dummy_file): s2s = fs.SampleToSurface() # Test underlying command - assert s2s.cmd == 'mri_vol2surf' + assert s2s.cmd == "mri_vol2surf" # Test mandatory args exception with pytest.raises(ValueError): @@ -32,29 +34,30 @@ def test_sample2surf(create_files_in_directory_plus_dummy_file): s2s.inputs.reference_file = files[1] s2s.inputs.hemi = "lh" s2s.inputs.reg_file = files[2] - s2s.inputs.sampling_range = .5 + s2s.inputs.sampling_range = 0.5 s2s.inputs.sampling_units = "frac" s2s.inputs.sampling_method = "point" # Test a basic command line assert s2s.cmdline == ( "mri_vol2surf " - "--hemi lh --o %s --ref %s --reg reg.dat --projfrac 0.500 --mov %s" % - (os.path.join(cwd, "lh.a.mgz"), files[1], files[0])) + "--hemi lh --o %s --ref %s --reg reg.dat --projfrac 0.500 --mov %s" + % (os.path.join(cwd, "lh.a.mgz"), files[1], files[0]) + ) # Test identity s2sish = fs.SampleToSurface( - source_file=files[1], reference_file=files[0], hemi="rh") + source_file=files[1], reference_file=files[0], hemi="rh" + ) assert s2s != s2sish # Test hits file name creation s2s.inputs.hits_file = True - assert s2s._get_outfilename("hits_file") == os.path.join( - cwd, "lh.a_hits.mgz") + assert s2s._get_outfilename("hits_file") == os.path.join(cwd, "lh.a_hits.mgz") # Test that a 2-tuple range raises an error def set_illegal_range(): - s2s.inputs.sampling_range = (.2, .5) + s2s.inputs.sampling_range = (0.2, 0.5) with pytest.raises(TraitError): set_illegal_range() @@ -83,9 +86,10 @@ def test_surfsmooth(create_surf_file_in_directory): smooth.inputs.hemi = "lh" # Test the command line - assert smooth.cmdline == \ - ("mri_surf2surf --cortex --fwhm 5.0000 --hemi lh --sval %s --tval %s/lh.a_smooth%d.nii --s fsaverage" % - (surf, cwd, fwhm)) + assert smooth.cmdline == ( + "mri_surf2surf --cortex --fwhm 5.0000 --hemi lh --sval %s --tval %s/lh.a_smooth%d.nii --s fsaverage" + % (surf, cwd, fwhm) + ) # Test identity shmooth = fs.SurfaceSmooth( @@ -93,7 +97,8 @@ def test_surfsmooth(create_surf_file_in_directory): fwhm=6, in_file=surf, hemi="lh", - out_file="lh.a_smooth.nii") + out_file="lh.a_smooth.nii", + ) assert smooth != shmooth @@ -119,16 +124,18 @@ def test_surfxfm(create_surf_file_in_directory): xfm.inputs.hemi = "lh" # Test the command line - assert xfm.cmdline == \ - ("mri_surf2surf --hemi lh --tval %s/lh.a.fsaverage.nii --sval %s --srcsubject my_subject --trgsubject fsaverage" % - (cwd, surf)) + assert xfm.cmdline == ( + "mri_surf2surf --hemi lh --tval %s/lh.a.fsaverage.nii --sval %s --srcsubject my_subject --trgsubject fsaverage" + % (cwd, surf) + ) # Test identity xfmish = fs.SurfaceTransform( source_subject="fsaverage", target_subject="my_subject", source_file=surf, - hemi="lh") + hemi="lh", + ) assert xfm != xfmish @@ -156,8 +163,7 @@ def test_surfshots(create_files_in_directory_plus_dummy_file): assert fotos.cmdline == "tksurfer fsaverage lh pial -tcl snapshots.tcl" # Test identity - schmotos = fs.SurfaceSnapshots( - subject_id="mysubject", hemi="rh", surface="white") + schmotos = fs.SurfaceSnapshots(subject_id="mysubject", hemi="rh", surface="white") assert fotos != schmotos # Test that the tcl script gets written @@ -183,25 +189,25 @@ def test_surfshots(create_files_in_directory_plus_dummy_file): @pytest.mark.skipif(fs.no_freesurfer(), reason="freesurfer is not installed") def test_mrisexpand(tmpdir): fssrc = FreeSurferSource( - subjects_dir=fs.Info.subjectsdir(), subject_id='fsaverage', hemi='lh') + subjects_dir=fs.Info.subjectsdir(), subject_id="fsaverage", hemi="lh" + ) fsavginfo = fssrc.run().outputs.get() # dt=60 to ensure very short runtime expand_if = fs.MRIsExpand( - in_file=fsavginfo['smoothwm'], out_name='expandtmp', distance=1, dt=60) + in_file=fsavginfo["smoothwm"], out_name="expandtmp", distance=1, dt=60 + ) expand_nd = pe.Node( fs.MRIsExpand( - in_file=fsavginfo['smoothwm'], - out_name='expandtmp', - distance=1, - dt=60), - name='expand_node') + in_file=fsavginfo["smoothwm"], out_name="expandtmp", distance=1, dt=60 + ), + name="expand_node", + ) # Interfaces should have same command line at instantiation - orig_cmdline = 'mris_expand -T 60 {} 1 expandtmp'.format( - fsavginfo['smoothwm']) + orig_cmdline = "mris_expand -T 60 {} 1 expandtmp".format(fsavginfo["smoothwm"]) assert expand_if.cmdline == orig_cmdline assert expand_nd.interface.cmdline == orig_cmdline @@ -209,16 +215,18 @@ def test_mrisexpand(tmpdir): nd_res = expand_nd.run() # Commandlines differ - node_cmdline = 'mris_expand -T 60 -pial {cwd}/lh.pial {cwd}/lh.smoothwm ' \ - '1 expandtmp'.format(cwd=nd_res.runtime.cwd) + node_cmdline = ( + "mris_expand -T 60 -pial {cwd}/lh.pial {cwd}/lh.smoothwm " + "1 expandtmp".format(cwd=nd_res.runtime.cwd) + ) assert nd_res.runtime.cmdline == node_cmdline # Check output - if_out_file = expand_if._list_outputs()['out_file'] - nd_out_file = nd_res.outputs.get()['out_file'] + if_out_file = expand_if._list_outputs()["out_file"] + nd_out_file = nd_res.outputs.get()["out_file"] # Same filename assert op.basename(if_out_file) == op.basename(nd_out_file) # Interface places output in source directory - assert op.dirname(if_out_file) == op.dirname(fsavginfo['smoothwm']) + assert op.dirname(if_out_file) == op.dirname(fsavginfo["smoothwm"]) # Node places output in working directory assert op.dirname(nd_out_file) == nd_res.runtime.cwd diff --git a/nipype/interfaces/freesurfer/utils.py b/nipype/interfaces/freesurfer/utils.py index 106938d54d..ab47dacbd4 100644 --- a/nipype/interfaces/freesurfer/utils.py +++ b/nipype/interfaces/freesurfer/utils.py @@ -9,37 +9,69 @@ from ... import logging from ...utils.filemanip import fname_presuffix, split_filename -from ..base import (TraitedSpec, Directory, File, traits, OutputMultiPath, isdefined, - CommandLine, CommandLineInputSpec) -from .base import (FSCommand, FSTraitedSpec, FSSurfaceCommand, FSScriptCommand, - FSScriptOutputSpec, FSTraitedSpecOpenMP, FSCommandOpenMP) -__docformat__ = 'restructuredtext' +from ..base import ( + TraitedSpec, + Directory, + File, + traits, + OutputMultiPath, + isdefined, + CommandLine, + CommandLineInputSpec, +) +from .base import ( + FSCommand, + FSTraitedSpec, + FSSurfaceCommand, + FSScriptCommand, + FSScriptOutputSpec, + FSTraitedSpecOpenMP, + FSCommandOpenMP, +) + +__docformat__ = "restructuredtext" filemap = dict( - cor='cor', - mgh='mgh', - mgz='mgz', - minc='mnc', - afni='brik', - brik='brik', - bshort='bshort', - spm='img', - analyze='img', - analyze4d='img', - bfloat='bfloat', - nifti1='img', - nii='nii', - niigz='nii.gz', - gii='gii') + cor="cor", + mgh="mgh", + mgz="mgz", + minc="mnc", + afni="brik", + brik="brik", + bshort="bshort", + spm="img", + analyze="img", + analyze4d="img", + bfloat="bfloat", + nifti1="img", + nii="nii", + niigz="nii.gz", + gii="gii", +) filetypes = [ - 'cor', 'mgh', 'mgz', 'minc', 'analyze', 'analyze4d', 'spm', 'afni', 'brik', - 'bshort', 'bfloat', 'sdt', 'outline', 'otl', 'gdf', 'nifti1', 'nii', - 'niigz' + "cor", + "mgh", + "mgz", + "minc", + "analyze", + "analyze4d", + "spm", + "afni", + "brik", + "bshort", + "bfloat", + "sdt", + "outline", + "otl", + "gdf", + "nifti1", + "nii", + "niigz", ] -implicit_filetypes = ['gii'] +implicit_filetypes = ["gii"] -logger = logging.getLogger('nipype.interface') +logger = logging.getLogger("nipype.interface") def copy2subjdir(cls, in_file, folder=None, basename=None, subject_id=None): @@ -57,7 +89,7 @@ def copy2subjdir(cls, in_file, folder=None, basename=None, subject_id=None): if isdefined(cls.inputs.subject_id): subject_id = cls.inputs.subject_id else: - subject_id = 'subject_id' # default + subject_id = "subject_id" # default # check for basename if basename is None: basename = os.path.basename(in_file) @@ -89,20 +121,18 @@ class SampleToSurfaceInputSpec(FSTraitedSpec): exists=True, mandatory=True, argstr="--mov %s", - desc="volume to sample values from") + desc="volume to sample values from", + ) reference_file = File( - exists=True, - argstr="--ref %s", - desc="reference volume (default is orig.mgz)") + exists=True, argstr="--ref %s", desc="reference volume (default is orig.mgz)" + ) hemi = traits.Enum( - "lh", - "rh", - mandatory=True, - argstr="--hemi %s", - desc="target hemisphere") + "lh", "rh", mandatory=True, argstr="--hemi %s", desc="target hemisphere" + ) surface = traits.String( - argstr="--surf %s", desc="target surface (default is white)") + argstr="--surf %s", desc="target surface (default is white)" + ) reg_xors = ["reg_file", "reg_header", "mni152reg"] reg_file = File( @@ -110,35 +140,41 @@ class SampleToSurfaceInputSpec(FSTraitedSpec): argstr="--reg %s", mandatory=True, xor=reg_xors, - desc="source-to-reference registration file") + desc="source-to-reference registration file", + ) reg_header = traits.Bool( argstr="--regheader %s", requires=["subject_id"], mandatory=True, xor=reg_xors, - desc="register based on header geometry") + desc="register based on header geometry", + ) mni152reg = traits.Bool( argstr="--mni152reg", mandatory=True, xor=reg_xors, - desc="source volume is in MNI152 space") + desc="source volume is in MNI152 space", + ) apply_rot = traits.Tuple( traits.Float, traits.Float, traits.Float, argstr="--rot %.3f %.3f %.3f", - desc="rotation angles (in degrees) to apply to reg matrix") + desc="rotation angles (in degrees) to apply to reg matrix", + ) apply_trans = traits.Tuple( traits.Float, traits.Float, traits.Float, argstr="--trans %.3f %.3f %.3f", - desc="translation (in mm) to apply to reg matrix") + desc="translation (in mm) to apply to reg matrix", + ) override_reg_subj = traits.Bool( argstr="--srcsubject %s", requires=["subject_id"], - desc="override the subject in the reg file header") + desc="override the subject in the reg file header", + ) sampling_method = traits.Enum( "point", @@ -148,106 +184,117 @@ class SampleToSurfaceInputSpec(FSTraitedSpec): argstr="%s", xor=["projection_stem"], requires=["sampling_range", "sampling_units"], - desc="how to sample -- at a point or at the max or average over a range" + desc="how to sample -- at a point or at the max or average over a range", ) sampling_range = traits.Either( traits.Float, traits.Tuple(traits.Float, traits.Float, traits.Float), - desc="sampling range - a point or a tuple of (min, max, step)") + desc="sampling range - a point or a tuple of (min, max, step)", + ) sampling_units = traits.Enum( - "mm", "frac", desc="sampling range type -- either 'mm' or 'frac'") + "mm", "frac", desc="sampling range type -- either 'mm' or 'frac'" + ) projection_stem = traits.String( mandatory=True, xor=["sampling_method"], - desc="stem for precomputed linear estimates and volume fractions") + desc="stem for precomputed linear estimates and volume fractions", + ) smooth_vol = traits.Float( - argstr="--fwhm %.3f", desc="smooth input volume (mm fwhm)") + argstr="--fwhm %.3f", desc="smooth input volume (mm fwhm)" + ) smooth_surf = traits.Float( - argstr="--surf-fwhm %.3f", desc="smooth output surface (mm fwhm)") + argstr="--surf-fwhm %.3f", desc="smooth output surface (mm fwhm)" + ) interp_method = traits.Enum( - "nearest", - "trilinear", - argstr="--interp %s", - desc="interpolation method") + "nearest", "trilinear", argstr="--interp %s", desc="interpolation method" + ) cortex_mask = traits.Bool( argstr="--cortex", xor=["mask_label"], - desc="mask the target surface with hemi.cortex.label") + desc="mask the target surface with hemi.cortex.label", + ) mask_label = File( exists=True, argstr="--mask %s", xor=["cortex_mask"], - desc="label file to mask output with") + desc="label file to mask output with", + ) float2int_method = traits.Enum( "round", "tkregister", argstr="--float2int %s", - desc="method to convert reg matrix values (default is round)") + desc="method to convert reg matrix values (default is round)", + ) fix_tk_reg = traits.Bool( - argstr="--fixtkreg", desc="make reg matrix round-compatible") + argstr="--fixtkreg", desc="make reg matrix round-compatible" + ) subject_id = traits.String(desc="subject id") target_subject = traits.String( argstr="--trgsubject %s", - desc="sample to surface of different subject than source") + desc="sample to surface of different subject than source", + ) surf_reg = traits.Either( traits.Bool, traits.Str(), argstr="--surfreg %s", requires=["target_subject"], - desc="use surface registration to target subject") + desc="use surface registration to target subject", + ) ico_order = traits.Int( argstr="--icoorder %d", requires=["target_subject"], - desc="icosahedron order when target_subject is 'ico'") + desc="icosahedron order when target_subject is 'ico'", + ) reshape = traits.Bool( argstr="--reshape", xor=["no_reshape"], - desc="reshape surface vector to fit in non-mgh format") + desc="reshape surface vector to fit in non-mgh format", + ) no_reshape = traits.Bool( argstr="--noreshape", xor=["reshape"], - desc="do not reshape surface vector (default)") + desc="do not reshape surface vector (default)", + ) reshape_slices = traits.Int( - argstr="--rf %d", desc="number of 'slices' for reshaping") + argstr="--rf %d", desc="number of 'slices' for reshaping" + ) scale_input = traits.Float( - argstr="--scale %.3f", desc="multiple all intensities by scale factor") - frame = traits.Int( - argstr="--frame %d", desc="save only one frame (0-based)") + argstr="--scale %.3f", desc="multiple all intensities by scale factor" + ) + frame = traits.Int(argstr="--frame %d", desc="save only one frame (0-based)") - out_file = File( - argstr="--o %s", genfile=True, desc="surface file to write") + out_file = File(argstr="--o %s", genfile=True, desc="surface file to write") out_type = traits.Enum( - filetypes + implicit_filetypes, - argstr="--out_type %s", - desc="output file type") + filetypes + implicit_filetypes, argstr="--out_type %s", desc="output file type" + ) hits_file = traits.Either( traits.Bool, File(exists=True), argstr="--srchit %s", - desc="save image with number of hits at each voxel") - hits_type = traits.Enum( - filetypes, argstr="--srchit_type", desc="hits file type") + desc="save image with number of hits at each voxel", + ) + hits_type = traits.Enum(filetypes, argstr="--srchit_type", desc="hits file type") vox_file = traits.Either( traits.Bool, File, argstr="--nvox %s", - desc="text file with the number of voxels intersecting the surface") + desc="text file with the number of voxels intersecting the surface", + ) class SampleToSurfaceOutputSpec(TraitedSpec): out_file = File(exists=True, desc="surface file") - hits_file = File( - exists=True, desc="image with number of hits at each voxel") + hits_file = File(exists=True, desc="image with number of hits at each voxel") vox_file = File( - exists=True, - desc="text file with the number of voxels intersecting the surface") + exists=True, desc="text file with the number of voxels intersecting the surface" + ) class SampleToSurface(FSCommand): @@ -280,6 +327,7 @@ class SampleToSurface(FSCommand): >>> res = sampler.run() # doctest: +SKIP """ + _cmd = "mri_vol2surf" input_spec = SampleToSurfaceInputSpec output_spec = SampleToSurfaceOutputSpec @@ -310,16 +358,22 @@ def _format_arg(self, name, spec, value): if ext in filemap.values(): raise ValueError( "Cannot create {} file with extension " - "{}".format(value, ext)) + "{}".format(value, ext) + ) else: - logger.warning('Creating %s file with extension %s: %s%s', - value, ext, base, ext) + logger.warning( + "Creating %s file with extension %s: %s%s", + value, + ext, + base, + ext, + ) if value in implicit_filetypes: return "" - if name == 'surf_reg': + if name == "surf_reg": if value is True: - return spec.argstr % 'sphere.reg' + return spec.argstr % "sphere.reg" return super(SampleToSurface, self)._format_arg(name, spec, value) @@ -328,19 +382,20 @@ def _get_outfilename(self, opt="out_file"): if not isdefined(outfile) or isinstance(outfile, bool): if isdefined(self.inputs.out_type): if opt == "hits_file": - suffix = '_hits.' + filemap[self.inputs.out_type] + suffix = "_hits." + filemap[self.inputs.out_type] else: - suffix = '.' + filemap[self.inputs.out_type] + suffix = "." + filemap[self.inputs.out_type] elif opt == "hits_file": suffix = "_hits.mgz" else: - suffix = '.mgz' + suffix = ".mgz" outfile = fname_presuffix( self.inputs.source_file, newpath=os.getcwd(), prefix=self.inputs.hemi + ".", suffix=suffix, - use_ext=False) + use_ext=False, + ) return outfile def _list_outputs(self): @@ -359,7 +414,8 @@ def _list_outputs(self): newpath=os.getcwd(), prefix=self.inputs.hemi + ".", suffix="_vox.txt", - use_ext=False) + use_ext=False, + ) outputs["vox_file"] = voxfile return outputs @@ -371,34 +427,31 @@ def _gen_filename(self, name): class SurfaceSmoothInputSpec(FSTraitedSpec): - in_file = File( - mandatory=True, argstr="--sval %s", desc="source surface file") + in_file = File(mandatory=True, argstr="--sval %s", desc="source surface file") subject_id = traits.String( - mandatory=True, argstr="--s %s", desc="subject id of surface file") + mandatory=True, argstr="--s %s", desc="subject id of surface file" + ) hemi = traits.Enum( - "lh", - "rh", - argstr="--hemi %s", - mandatory=True, - desc="hemisphere to operate on") + "lh", "rh", argstr="--hemi %s", mandatory=True, desc="hemisphere to operate on" + ) fwhm = traits.Float( argstr="--fwhm %.4f", xor=["smooth_iters"], - desc="effective FWHM of the smoothing process") + desc="effective FWHM of the smoothing process", + ) smooth_iters = traits.Int( - argstr="--smooth %d", - xor=["fwhm"], - desc="iterations of the smoothing process") + argstr="--smooth %d", xor=["fwhm"], desc="iterations of the smoothing process" + ) cortex = traits.Bool( True, argstr="--cortex", usedefault=True, - desc="only smooth within $hemi.cortex.label") + desc="only smooth within $hemi.cortex.label", + ) reshape = traits.Bool( - argstr="--reshape", - desc="reshape surface vector to fit in non-mgh format") - out_file = File( - argstr="--tval %s", genfile=True, desc="surface file to write") + argstr="--reshape", desc="reshape surface vector to fit in non-mgh format" + ) + out_file = File(argstr="--tval %s", genfile=True, desc="surface file to write") class SurfaceSmoothOutputSpec(TraitedSpec): @@ -434,6 +487,7 @@ class SurfaceSmooth(FSCommand): >>> smoother.run() # doctest: +SKIP """ + _cmd = "mri_surf2surf" input_spec = SurfaceSmoothInputSpec output_spec = SurfaceSmoothOutputSpec @@ -448,7 +502,8 @@ def _list_outputs(self): else: kernel = self.inputs.smooth_iters outputs["out_file"] = fname_presuffix( - in_file, suffix="_smooth%d" % kernel, newpath=os.getcwd()) + in_file, suffix="_smooth%d" % kernel, newpath=os.getcwd() + ) return outputs def _gen_filename(self, name): @@ -462,28 +517,25 @@ class SurfaceTransformInputSpec(FSTraitedSpec): exists=True, mandatory=True, argstr="--sval %s", - xor=['source_annot_file'], - desc="surface file with source values") + xor=["source_annot_file"], + desc="surface file with source values", + ) source_annot_file = File( exists=True, mandatory=True, argstr="--sval-annot %s", - xor=['source_file'], - desc="surface annotation file") + xor=["source_file"], + desc="surface annotation file", + ) source_subject = traits.String( - mandatory=True, - argstr="--srcsubject %s", - desc="subject id for source surface") + mandatory=True, argstr="--srcsubject %s", desc="subject id for source surface" + ) hemi = traits.Enum( - "lh", - "rh", - argstr="--hemi %s", - mandatory=True, - desc="hemisphere to transform") + "lh", "rh", argstr="--hemi %s", mandatory=True, desc="hemisphere to transform" + ) target_subject = traits.String( - mandatory=True, - argstr="--trgsubject %s", - desc="subject id of target surface") + mandatory=True, argstr="--trgsubject %s", desc="subject id of target surface" + ) target_ico_order = traits.Enum( 1, 2, @@ -493,24 +545,24 @@ class SurfaceTransformInputSpec(FSTraitedSpec): 6, 7, argstr="--trgicoorder %d", - desc=("order of the icosahedron if " - "target_subject is 'ico'")) + desc=("order of the icosahedron if " "target_subject is 'ico'"), + ) source_type = traits.Enum( filetypes, - argstr='--sfmt %s', - requires=['source_file'], - desc="source file format") + argstr="--sfmt %s", + requires=["source_file"], + desc="source file format", + ) target_type = traits.Enum( - filetypes + implicit_filetypes, - argstr='--tfmt %s', - desc="output format") + filetypes + implicit_filetypes, argstr="--tfmt %s", desc="output format" + ) reshape = traits.Bool( - argstr="--reshape", - desc="reshape output surface to conform with Nifti") + argstr="--reshape", desc="reshape output surface to conform with Nifti" + ) reshape_factor = traits.Int( - argstr="--reshape-factor", desc="number of slices in reshaped image") - out_file = File( - argstr="--tval %s", genfile=True, desc="surface file to write") + argstr="--reshape-factor", desc="number of slices in reshaped image" + ) + out_file = File(argstr="--tval %s", genfile=True, desc="surface file to write") class SurfaceTransformOutputSpec(TraitedSpec): @@ -536,6 +588,7 @@ class SurfaceTransform(FSCommand): >>> sxfm.run() # doctest: +SKIP """ + _cmd = "mri_surf2surf" input_spec = SurfaceTransformInputSpec output_spec = SurfaceTransformOutputSpec @@ -543,15 +596,21 @@ class SurfaceTransform(FSCommand): def _format_arg(self, name, spec, value): if name == "target_type": if isdefined(self.inputs.out_file): - _, base, ext = split_filename(self._list_outputs()['out_file']) + _, base, ext = split_filename(self._list_outputs()["out_file"]) if ext != filemap[value]: if ext in filemap.values(): raise ValueError( "Cannot create {} file with extension " - "{}".format(value, ext)) + "{}".format(value, ext) + ) else: - logger.warning('Creating %s file with extension %s: %s%s', - value, ext, base, ext) + logger.warning( + "Creating %s file with extension %s: %s%s", + value, + ext, + base, + ext, + ) if value in implicit_filetypes: return "" return super(SurfaceTransform, self)._format_arg(name, spec, value) @@ -568,10 +627,24 @@ def _list_outputs(self): # Some recon-all files don't have a proper extension (e.g. "lh.thickness") # so we have to account for that here bad_extensions = [ - ".%s" % e for e in [ - "area", "mid", "pial", "avg_curv", "curv", "inflated", - "jacobian_white", "orig", "nofix", "smoothwm", "crv", - "sphere", "sulc", "thickness", "volume", "white" + ".%s" % e + for e in [ + "area", + "mid", + "pial", + "avg_curv", + "curv", + "inflated", + "jacobian_white", + "orig", + "nofix", + "smoothwm", + "crv", + "sphere", + "sulc", + "thickness", + "volume", + "white", ] ] use_ext = True @@ -586,7 +659,8 @@ def _list_outputs(self): source, suffix=".%s%s" % (self.inputs.target_subject, ext), newpath=os.getcwd(), - use_ext=use_ext) + use_ext=use_ext, + ) else: outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs @@ -600,55 +674,59 @@ def _gen_filename(self, name): class Surface2VolTransformInputSpec(FSTraitedSpec): source_file = File( exists=True, - argstr='--surfval %s', + argstr="--surfval %s", copyfile=False, mandatory=True, - xor=['mkmask'], - desc='This is the source of the surface values') - hemi = traits.Str( - argstr='--hemi %s', mandatory=True, desc='hemisphere of data') + xor=["mkmask"], + desc="This is the source of the surface values", + ) + hemi = traits.Str(argstr="--hemi %s", mandatory=True, desc="hemisphere of data") transformed_file = File( name_template="%s_asVol.nii", - desc='Output volume', - argstr='--outvol %s', - name_source=['source_file'], - hash_files=False) + desc="Output volume", + argstr="--outvol %s", + name_source=["source_file"], + hash_files=False, + ) reg_file = File( exists=True, - argstr='--volreg %s', + argstr="--volreg %s", mandatory=True, - desc='tkRAS-to-tkRAS matrix (tkregister2 format)', - xor=['subject_id']) + desc="tkRAS-to-tkRAS matrix (tkregister2 format)", + xor=["subject_id"], + ) template_file = File( - exists=True, argstr='--template %s', desc='Output template volume') + exists=True, argstr="--template %s", desc="Output template volume" + ) mkmask = traits.Bool( - desc='make a mask instead of loading surface values', - argstr='--mkmask', - xor=['source_file']) + desc="make a mask instead of loading surface values", + argstr="--mkmask", + xor=["source_file"], + ) vertexvol_file = File( name_template="%s_asVol_vertex.nii", - desc=('Path name of the vertex output volume, which ' - 'is the same as output volume except that the ' - 'value of each voxel is the vertex-id that is ' - 'mapped to that voxel.'), - argstr='--vtxvol %s', - name_source=['source_file'], - hash_files=False) - surf_name = traits.Str( - argstr='--surf %s', desc='surfname (default is white)') - projfrac = traits.Float(argstr='--projfrac %s', desc='thickness fraction') + desc=( + "Path name of the vertex output volume, which " + "is the same as output volume except that the " + "value of each voxel is the vertex-id that is " + "mapped to that voxel." + ), + argstr="--vtxvol %s", + name_source=["source_file"], + hash_files=False, + ) + surf_name = traits.Str(argstr="--surf %s", desc="surfname (default is white)") + projfrac = traits.Float(argstr="--projfrac %s", desc="thickness fraction") subjects_dir = traits.Str( - argstr='--sd %s', - desc=('freesurfer subjects directory defaults to ' - '$SUBJECTS_DIR')) - subject_id = traits.Str( - argstr='--identity %s', desc='subject id', xor=['reg_file']) + argstr="--sd %s", + desc=("freesurfer subjects directory defaults to " "$SUBJECTS_DIR"), + ) + subject_id = traits.Str(argstr="--identity %s", desc="subject id", xor=["reg_file"]) class Surface2VolTransformOutputSpec(TraitedSpec): - transformed_file = File( - exists=True, desc='Path to output file if used normally') - vertexvol_file = File(desc='vertex map volume path id. Optional') + transformed_file = File(exists=True, desc="Path to output file if used normally") + vertexvol_file = File(desc="vertex map volume path id. Optional") class Surface2VolTransform(FSCommand): @@ -670,7 +748,7 @@ class Surface2VolTransform(FSCommand): """ - _cmd = 'mri_surf2vol' + _cmd = "mri_surf2vol" input_spec = Surface2VolTransformInputSpec output_spec = Surface2VolTransformOutputSpec @@ -682,44 +760,47 @@ class ApplyMaskInputSpec(FSTraitedSpec): mandatory=True, position=-3, argstr="%s", - desc="input image (will be masked)") + desc="input image (will be masked)", + ) mask_file = File( exists=True, mandatory=True, position=-2, argstr="%s", - desc="image defining mask space") + desc="image defining mask space", + ) out_file = File( - name_source=['in_file'], - name_template='%s_masked', + name_source=["in_file"], + name_template="%s_masked", hash_files=True, keep_extension=True, position=-1, argstr="%s", - desc="final image to write") + desc="final image to write", + ) xfm_file = File( exists=True, argstr="-xform %s", - desc="LTA-format transformation matrix to align mask with input") + desc="LTA-format transformation matrix to align mask with input", + ) invert_xfm = traits.Bool(argstr="-invert", desc="invert transformation") xfm_source = File( - exists=True, - argstr="-lta_src %s", - desc="image defining transform source space") + exists=True, argstr="-lta_src %s", desc="image defining transform source space" + ) xfm_target = File( - exists=True, - argstr="-lta_dst %s", - desc="image defining transform target space") + exists=True, argstr="-lta_dst %s", desc="image defining transform target space" + ) use_abs = traits.Bool( - argstr="-abs", desc="take absolute value of mask before applying") - mask_thresh = traits.Float( - argstr="-T %.4f", desc="threshold mask before applying") + argstr="-abs", desc="take absolute value of mask before applying" + ) + mask_thresh = traits.Float(argstr="-T %.4f", desc="threshold mask before applying") keep_mask_deletion_edits = traits.Bool( argstr="-keep_mask_deletion_edits", - desc="transfer voxel-deletion edits (voxels=1) from mask to out vol") + desc="transfer voxel-deletion edits (voxels=1) from mask to out vol", + ) transfer = traits.Int( - argstr="-transfer %d", - desc="transfer only voxel value # from mask to out") + argstr="-transfer %d", desc="transfer only voxel value # from mask to out" + ) class ApplyMaskOutputSpec(TraitedSpec): @@ -735,6 +816,7 @@ class ApplyMask(FSCommand): space with an LTA matrix. """ + _cmd = "mri_mask" input_spec = ApplyMaskInputSpec output_spec = ApplyMaskOutputSpec @@ -743,120 +825,142 @@ class ApplyMask(FSCommand): class SurfaceSnapshotsInputSpec(FSTraitedSpec): subject_id = traits.String( - position=1, argstr="%s", mandatory=True, desc="subject to visualize") + position=1, argstr="%s", mandatory=True, desc="subject to visualize" + ) hemi = traits.Enum( "lh", "rh", position=2, argstr="%s", mandatory=True, - desc="hemisphere to visualize") + desc="hemisphere to visualize", + ) surface = traits.String( - position=3, argstr="%s", mandatory=True, desc="surface to visualize") + position=3, argstr="%s", mandatory=True, desc="surface to visualize" + ) show_curv = traits.Bool( - argstr="-curv", desc="show curvature", xor=["show_gray_curv"]) + argstr="-curv", desc="show curvature", xor=["show_gray_curv"] + ) show_gray_curv = traits.Bool( - argstr="-gray", desc="show curvature in gray", xor=["show_curv"]) + argstr="-gray", desc="show curvature in gray", xor=["show_curv"] + ) overlay = File( exists=True, argstr="-overlay %s", desc="load an overlay volume/surface", - requires=["overlay_range"]) + requires=["overlay_range"], + ) reg_xors = ["overlay_reg", "identity_reg", "mni152_reg"] overlay_reg = File( exists=True, argstr="-overlay-reg %s", xor=reg_xors, - desc="registration matrix file to register overlay to surface") + desc="registration matrix file to register overlay to surface", + ) identity_reg = traits.Bool( argstr="-overlay-reg-identity", xor=reg_xors, - desc="use the identity matrix to register the overlay to the surface") + desc="use the identity matrix to register the overlay to the surface", + ) mni152_reg = traits.Bool( argstr="-mni152reg", xor=reg_xors, - desc="use to display a volume in MNI152 space on the average subject") + desc="use to display a volume in MNI152 space on the average subject", + ) overlay_range = traits.Either( traits.Float, traits.Tuple(traits.Float, traits.Float), traits.Tuple(traits.Float, traits.Float, traits.Float), desc="overlay range--either min, (min, max) or (min, mid, max)", - argstr="%s") + argstr="%s", + ) overlay_range_offset = traits.Float( argstr="-foffset %.3f", - desc="overlay range will be symettric around offset value") + desc="overlay range will be symettric around offset value", + ) truncate_overlay = traits.Bool( - argstr="-truncphaseflag 1", desc="truncate the overlay display") + argstr="-truncphaseflag 1", desc="truncate the overlay display" + ) reverse_overlay = traits.Bool( - argstr="-revphaseflag 1", desc="reverse the overlay display") + argstr="-revphaseflag 1", desc="reverse the overlay display" + ) invert_overlay = traits.Bool( - argstr="-invphaseflag 1", desc="invert the overlay display") + argstr="-invphaseflag 1", desc="invert the overlay display" + ) demean_overlay = traits.Bool(argstr="-zm", desc="remove mean from overlay") annot_file = File( exists=True, argstr="-annotation %s", xor=["annot_name"], - desc="path to annotation file to display") + desc="path to annotation file to display", + ) annot_name = traits.String( argstr="-annotation %s", xor=["annot_file"], - desc= - "name of annotation to display (must be in $subject/label directory") + desc="name of annotation to display (must be in $subject/label directory", + ) label_file = File( exists=True, argstr="-label %s", xor=["label_name"], - desc="path to label file to display") + desc="path to label file to display", + ) label_name = traits.String( argstr="-label %s", xor=["label_file"], - desc="name of label to display (must be in $subject/label directory") + desc="name of label to display (must be in $subject/label directory", + ) - colortable = File( - exists=True, argstr="-colortable %s", desc="load colortable file") + colortable = File(exists=True, argstr="-colortable %s", desc="load colortable file") label_under = traits.Bool( - argstr="-labels-under", desc="draw label/annotation under overlay") + argstr="-labels-under", desc="draw label/annotation under overlay" + ) label_outline = traits.Bool( - argstr="-label-outline", desc="draw label/annotation as outline") + argstr="-label-outline", desc="draw label/annotation as outline" + ) patch_file = File(exists=True, argstr="-patch %s", desc="load a patch") orig_suffix = traits.String( - argstr="-orig %s", desc="set the orig surface suffix string") + argstr="-orig %s", desc="set the orig surface suffix string" + ) sphere_suffix = traits.String( - argstr="-sphere %s", desc="set the sphere.reg suffix string") + argstr="-sphere %s", desc="set the sphere.reg suffix string" + ) show_color_scale = traits.Bool( - argstr="-colscalebarflag 1", desc="display the color scale bar") + argstr="-colscalebarflag 1", desc="display the color scale bar" + ) show_color_text = traits.Bool( - argstr="-colscaletext 1", desc="display text in the color scale bar") + argstr="-colscaletext 1", desc="display text in the color scale bar" + ) six_images = traits.Bool(desc="also take anterior and posterior snapshots") - screenshot_stem = traits.String( - desc="stem to use for screenshot file names") + screenshot_stem = traits.String(desc="stem to use for screenshot file names") stem_template_args = traits.List( traits.String, requires=["screenshot_stem"], - desc= - "input names to use as arguments for a string-formated stem template") + desc="input names to use as arguments for a string-formated stem template", + ) tcl_script = File( exists=True, argstr="%s", genfile=True, - desc="override default screenshot script") + desc="override default screenshot script", + ) class SurfaceSnapshotsOutputSpec(TraitedSpec): snapshots = OutputMultiPath( - File(exists=True), - desc="tiff images of the surface from different perspectives") + File(exists=True), desc="tiff images of the surface from different perspectives" + ) class SurfaceSnapshots(FSCommand): @@ -885,6 +989,7 @@ class SurfaceSnapshots(FSCommand): >>> res = shots.run() # doctest: +SKIP """ + _cmd = "tksurfer" input_spec = SurfaceSnapshotsInputSpec output_spec = SurfaceSnapshotsOutputSpec @@ -902,9 +1007,11 @@ def _format_arg(self, name, spec, value): if len(value) == 2: return "-fminmax %.3f %.3f" % value else: - return "-fminmax %.3f %.3f -fmid %.3f" % (value[0], - value[2], - value[1]) + return "-fminmax %.3f %.3f -fmid %.3f" % ( + value[0], + value[2], + value[1], + ) elif name == "annot_name" and isdefined(value): # Matching annot by name needs to strip the leading hemi and trailing # extension strings @@ -917,8 +1024,11 @@ def _format_arg(self, name, spec, value): def _run_interface(self, runtime): if not isdefined(self.inputs.screenshot_stem): - stem = "%s_%s_%s" % (self.inputs.subject_id, self.inputs.hemi, - self.inputs.surface) + stem = "%s_%s_%s" % ( + self.inputs.subject_id, + self.inputs.hemi, + self.inputs.surface, + ) else: stem = self.inputs.screenshot_stem stem_args = self.inputs.stem_template_args @@ -927,8 +1037,7 @@ def _run_interface(self, runtime): stem = stem % args # Check if the DISPLAY variable is set -- should avoid crashes (might not?) if "DISPLAY" not in os.environ: - raise RuntimeError( - "Graphics are not enabled -- cannot run tksurfer") + raise RuntimeError("Graphics are not enabled -- cannot run tksurfer") runtime.environ["_SNAPSHOT_STEM"] = stem self._write_tcl_script() runtime = super(SurfaceSnapshots, self)._run_interface(runtime) @@ -937,7 +1046,7 @@ def _run_interface(self, runtime): # better exception here if that happened. errors = [ "surfer: failed, no suitable display found", - "Fatal Error in tksurfer.bin: could not open display" + "Fatal Error in tksurfer.bin: could not open display", ] for err in errors: if err in runtime.stderr: @@ -950,21 +1059,33 @@ def _run_interface(self, runtime): def _write_tcl_script(self): fid = open("snapshots.tcl", "w") script = [ - "save_tiff $env(_SNAPSHOT_STEM)-lat.tif", "make_lateral_view", - "rotate_brain_y 180", "redraw", - "save_tiff $env(_SNAPSHOT_STEM)-med.tif", "make_lateral_view", - "rotate_brain_x 90", "redraw", - "save_tiff $env(_SNAPSHOT_STEM)-ven.tif", "make_lateral_view", - "rotate_brain_x -90", "redraw", - "save_tiff $env(_SNAPSHOT_STEM)-dor.tif" + "save_tiff $env(_SNAPSHOT_STEM)-lat.tif", + "make_lateral_view", + "rotate_brain_y 180", + "redraw", + "save_tiff $env(_SNAPSHOT_STEM)-med.tif", + "make_lateral_view", + "rotate_brain_x 90", + "redraw", + "save_tiff $env(_SNAPSHOT_STEM)-ven.tif", + "make_lateral_view", + "rotate_brain_x -90", + "redraw", + "save_tiff $env(_SNAPSHOT_STEM)-dor.tif", ] if isdefined(self.inputs.six_images) and self.inputs.six_images: - script.extend([ - "make_lateral_view", "rotate_brain_y 90", "redraw", - "save_tiff $env(_SNAPSHOT_STEM)-pos.tif", "make_lateral_view", - "rotate_brain_y -90", "redraw", - "save_tiff $env(_SNAPSHOT_STEM)-ant.tif" - ]) + script.extend( + [ + "make_lateral_view", + "rotate_brain_y 90", + "redraw", + "save_tiff $env(_SNAPSHOT_STEM)-pos.tif", + "make_lateral_view", + "rotate_brain_y -90", + "redraw", + "save_tiff $env(_SNAPSHOT_STEM)-ant.tif", + ] + ) script.append("exit") fid.write("\n".join(script)) @@ -973,8 +1094,11 @@ def _write_tcl_script(self): def _list_outputs(self): outputs = self._outputs().get() if not isdefined(self.inputs.screenshot_stem): - stem = "%s_%s_%s" % (self.inputs.subject_id, self.inputs.hemi, - self.inputs.surface) + stem = "%s_%s_%s" % ( + self.inputs.subject_id, + self.inputs.hemi, + self.inputs.surface, + ) else: stem = self.inputs.screenshot_stem stem_args = self.inputs.stem_template_args @@ -1062,73 +1186,77 @@ class MRIsConvertInputSpec(FSTraitedSpec): """ Uses Freesurfer's mris_convert to convert surface files to various formats """ + annot_file = File( - exists=True, - argstr="--annot %s", - desc="input is annotation or gifti label data") + exists=True, argstr="--annot %s", desc="input is annotation or gifti label data" + ) parcstats_file = File( exists=True, argstr="--parcstats %s", - desc="infile is name of text file containing label/val pairs") + desc="infile is name of text file containing label/val pairs", + ) label_file = File( exists=True, argstr="--label %s", - desc="infile is .label file, label is name of this label") + desc="infile is .label file, label is name of this label", + ) scalarcurv_file = File( exists=True, argstr="-c %s", - desc="input is scalar curv overlay file (must still specify surface)") + desc="input is scalar curv overlay file (must still specify surface)", + ) functional_file = File( exists=True, argstr="-f %s", - desc= - "input is functional time-series or other multi-frame data (must specify surface)" + desc="input is functional time-series or other multi-frame data (must specify surface)", ) labelstats_outfile = File( exists=False, argstr="--labelstats %s", - desc= - "outfile is name of gifti file to which label stats will be written") + desc="outfile is name of gifti file to which label stats will be written", + ) - patch = traits.Bool( - argstr="-p", desc="input is a patch, not a full surface") + patch = traits.Bool(argstr="-p", desc="input is a patch, not a full surface") rescale = traits.Bool( - argstr="-r", - desc="rescale vertex xyz so total area is same as group average") - normal = traits.Bool( - argstr="-n", desc="output is an ascii file where vertex data") - xyz_ascii = traits.Bool( - argstr="-a", desc="Print only surface xyz to ascii file") + argstr="-r", desc="rescale vertex xyz so total area is same as group average" + ) + normal = traits.Bool(argstr="-n", desc="output is an ascii file where vertex data") + xyz_ascii = traits.Bool(argstr="-a", desc="Print only surface xyz to ascii file") vertex = traits.Bool( - argstr="-v", desc="Writes out neighbors of a vertex in each row") + argstr="-v", desc="Writes out neighbors of a vertex in each row" + ) scale = traits.Float(argstr="-s %.3f", desc="scale vertex xyz by scale") dataarray_num = traits.Int( argstr="--da_num %d", - desc="if input is gifti, 'num' specifies which data array to use") + desc="if input is gifti, 'num' specifies which data array to use", + ) talairachxfm_subjid = traits.String( - argstr="-t %s", desc="apply talairach xfm of subject to vertex xyz") + argstr="-t %s", desc="apply talairach xfm of subject to vertex xyz" + ) origname = traits.String(argstr="-o %s", desc="read orig positions") in_file = File( exists=True, mandatory=True, position=-2, - argstr='%s', - desc='File to read/convert') + argstr="%s", + desc="File to read/convert", + ) out_file = File( - argstr='%s', + argstr="%s", position=-1, genfile=True, - xor=['out_datatype'], + xor=["out_datatype"], mandatory=True, - desc='output filename or True to generate one') + desc="output filename or True to generate one", + ) out_datatype = traits.Enum( "asc", @@ -1139,18 +1267,18 @@ class MRIsConvertInputSpec(FSTraitedSpec): "gii", "mgh", "mgz", - xor=['out_file'], + xor=["out_file"], mandatory=True, desc="These file formats are supported: ASCII: .asc" - "ICO: .ico, .tri GEO: .geo STL: .stl VTK: .vtk GIFTI: .gii MGH surface-encoded 'volume': .mgh, .mgz" + "ICO: .ico, .tri GEO: .geo STL: .stl VTK: .vtk GIFTI: .gii MGH surface-encoded 'volume': .mgh, .mgz", ) to_scanner = traits.Bool( argstr="--to-scanner", - desc="convert coordinates from native FS (tkr) coords to scanner coords" + desc="convert coordinates from native FS (tkr) coords to scanner coords", ) to_tkr = traits.Bool( argstr="--to-tkr", - desc="convert coordinates from scanner coords to native FS (tkr) coords" + desc="convert coordinates from scanner coords to native FS (tkr) coords", ) @@ -1158,7 +1286,8 @@ class MRIsConvertOutputSpec(TraitedSpec): """ Uses Freesurfer's mris_convert to convert surface files to various formats """ - converted = File(exists=True, desc='converted output surface') + + converted = File(exists=True, desc="converted output surface") class MRIsConvert(FSCommand): @@ -1174,7 +1303,8 @@ class MRIsConvert(FSCommand): >>> mris.inputs.out_datatype = 'gii' >>> mris.run() # doctest: +SKIP """ - _cmd = 'mris_convert' + + _cmd = "mris_convert" input_spec = MRIsConvertInputSpec output_spec = MRIsConvertOutputSpec @@ -1189,7 +1319,7 @@ def _list_outputs(self): return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return os.path.abspath(self._gen_outfilename()) else: return None @@ -1217,30 +1347,33 @@ class MRIsCombineInputSpec(FSTraitedSpec): """ Uses Freesurfer's mris_convert to combine two surface files into one. """ + in_files = traits.List( File(Exists=True), maxlen=2, minlen=2, mandatory=True, position=1, - argstr='--combinesurfs %s', - desc='Two surfaces to be combined.') + argstr="--combinesurfs %s", + desc="Two surfaces to be combined.", + ) out_file = File( - argstr='%s', + argstr="%s", position=-1, genfile=True, mandatory=True, - desc='Output filename. Combined surfaces from in_files.') + desc="Output filename. Combined surfaces from in_files.", + ) class MRIsCombineOutputSpec(TraitedSpec): """ Uses Freesurfer's mris_convert to combine two surface files into one. """ + out_file = File( - exists=True, - desc='Output filename. Combined surfaces from ' - 'in_files.') + exists=True, desc="Output filename. Combined surfaces from " "in_files." + ) class MRIsCombine(FSSurfaceCommand): @@ -1268,7 +1401,8 @@ class MRIsCombine(FSSurfaceCommand): 'mris_convert --combinesurfs lh.pial rh.pial bh.pial' >>> mris.run() # doctest: +SKIP """ - _cmd = 'mris_convert' + + _cmd = "mris_convert" input_spec = MRIsCombineInputSpec output_spec = MRIsCombineOutputSpec @@ -1279,9 +1413,9 @@ def _list_outputs(self): # regardless of input file names, except when path info is # specified path, base = os.path.split(self.inputs.out_file) - if path == '' and base[:3] not in ('lh.', 'rh.'): - base = 'lh.' + base - outputs['out_file'] = os.path.abspath(os.path.join(path, base)) + if path == "" and base[:3] not in ("lh.", "rh."): + base = "lh." + base + outputs["out_file"] = os.path.abspath(os.path.join(path, base)) return outputs @@ -1304,33 +1438,35 @@ class MRITessellateInputSpec(FSTraitedSpec): exists=True, mandatory=True, position=-3, - argstr='%s', - desc='Input volume to tesselate voxels from.') + argstr="%s", + desc="Input volume to tesselate voxels from.", + ) label_value = traits.Int( position=-2, - argstr='%d', + argstr="%d", mandatory=True, - desc= - 'Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)' + desc='Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)', ) out_file = File( - argstr='%s', + argstr="%s", position=-1, genfile=True, - desc='output filename or True to generate one') + desc="output filename or True to generate one", + ) tesselate_all_voxels = traits.Bool( - argstr='-a', - desc='Tessellate the surface of all voxels with different labels') + argstr="-a", desc="Tessellate the surface of all voxels with different labels" + ) use_real_RAS_coordinates = traits.Bool( - argstr='-n', - desc='Saves surface with real RAS coordinates where c_(r,a,s) != 0') + argstr="-n", desc="Saves surface with real RAS coordinates where c_(r,a,s) != 0" + ) class MRITessellateOutputSpec(TraitedSpec): """ Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume """ - surface = File(exists=True, desc='binary surface of the tessellation ') + + surface = File(exists=True, desc="binary surface of the tessellation ") class MRITessellate(FSCommand): @@ -1347,17 +1483,18 @@ class MRITessellate(FSCommand): >>> tess.inputs.out_file = 'lh.hippocampus' >>> tess.run() # doctest: +SKIP """ - _cmd = 'mri_tessellate' + + _cmd = "mri_tessellate" input_spec = MRITessellateInputSpec output_spec = MRITessellateOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['surface'] = os.path.abspath(self._gen_outfilename()) + outputs["surface"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._gen_outfilename() else: return None @@ -1367,7 +1504,7 @@ def _gen_outfilename(self): return self.inputs.out_file else: _, name, ext = split_filename(self.inputs.in_file) - return name + ext + '_' + str(self.inputs.label_value) + return name + ext + "_" + str(self.inputs.label_value) class MRIPretessInputSpec(FSTraitedSpec): @@ -1375,49 +1512,57 @@ class MRIPretessInputSpec(FSTraitedSpec): exists=True, mandatory=True, position=-4, - argstr='%s', - desc=('filled volume, usually wm.mgz')) + argstr="%s", + desc=("filled volume, usually wm.mgz"), + ) label = traits.Either( - traits.Str('wm'), + traits.Str("wm"), traits.Int(1), - argstr='%s', - default='wm', + argstr="%s", + default="wm", mandatory=True, usedefault=True, position=-3, - desc=('label to be picked up, can be a Freesurfer\'s string like ' - '\'wm\' or a label value (e.g. 127 for rh or 255 for lh)')) + desc=( + "label to be picked up, can be a Freesurfer's string like " + "'wm' or a label value (e.g. 127 for rh or 255 for lh)" + ), + ) in_norm = File( exists=True, mandatory=True, position=-2, - argstr='%s', - desc=('the normalized, brain-extracted T1w image. Usually norm.mgz')) + argstr="%s", + desc=("the normalized, brain-extracted T1w image. Usually norm.mgz"), + ) out_file = File( position=-1, - argstr='%s', - name_source=['in_filled'], - name_template='%s_pretesswm', + argstr="%s", + name_source=["in_filled"], + name_template="%s_pretesswm", keep_extension=True, - desc='the output file after mri_pretess.') + desc="the output file after mri_pretess.", + ) nocorners = traits.Bool( False, - argstr='-nocorners', - desc=('do not remove corner configurations' - ' in addition to edge ones.')) - keep = traits.Bool(False, argstr='-keep', desc=('keep WM edits')) + argstr="-nocorners", + desc=("do not remove corner configurations" " in addition to edge ones."), + ) + keep = traits.Bool(False, argstr="-keep", desc=("keep WM edits")) test = traits.Bool( False, - argstr='-test', - desc= - ('adds a voxel that should be removed by ' - 'mri_pretess. The value of the voxel is set to that of an ON-edited WM, ' - 'so it should be kept with -keep. The output will NOT be saved.')) + argstr="-test", + desc=( + "adds a voxel that should be removed by " + "mri_pretess. The value of the voxel is set to that of an ON-edited WM, " + "so it should be kept with -keep. The output will NOT be saved." + ), + ) class MRIPretessOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='output file after mri_pretess') + out_file = File(exists=True, desc="output file after mri_pretess") class MRIPretess(FSCommand): @@ -1444,7 +1589,8 @@ class MRIPretess(FSCommand): >>> pretess.run() # doctest: +SKIP """ - _cmd = 'mri_pretess' + + _cmd = "mri_pretess" input_spec = MRIPretessInputSpec output_spec = MRIPretessOutputSpec @@ -1458,35 +1604,36 @@ class MRIMarchingCubesInputSpec(FSTraitedSpec): exists=True, mandatory=True, position=1, - argstr='%s', - desc='Input volume to tesselate voxels from.') + argstr="%s", + desc="Input volume to tesselate voxels from.", + ) label_value = traits.Int( position=2, - argstr='%d', + argstr="%d", mandatory=True, - desc= - 'Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)' + desc='Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)', ) connectivity_value = traits.Int( 1, position=-1, - argstr='%d', + argstr="%d", usedefault=True, - desc= - 'Alter the marching cubes connectivity: 1=6+,2=18,3=6,4=26 (default=1)' + desc="Alter the marching cubes connectivity: 1=6+,2=18,3=6,4=26 (default=1)", ) out_file = File( - argstr='./%s', + argstr="./%s", position=-2, genfile=True, - desc='output filename or True to generate one') + desc="output filename or True to generate one", + ) class MRIMarchingCubesOutputSpec(TraitedSpec): """ Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume """ - surface = File(exists=True, desc='binary surface of the tessellation ') + + surface = File(exists=True, desc="binary surface of the tessellation ") class MRIMarchingCubes(FSCommand): @@ -1503,17 +1650,18 @@ class MRIMarchingCubes(FSCommand): >>> mc.inputs.out_file = 'lh.hippocampus' >>> mc.run() # doctest: +SKIP """ - _cmd = 'mri_mc' + + _cmd = "mri_mc" input_spec = MRIMarchingCubesInputSpec output_spec = MRIMarchingCubesOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['surface'] = self._gen_outfilename() + outputs["surface"] = self._gen_outfilename() return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._gen_outfilename() else: return None @@ -1523,8 +1671,7 @@ def _gen_outfilename(self): return os.path.abspath(self.inputs.out_file) else: _, name, ext = split_filename(self.inputs.in_file) - return os.path.abspath( - name + ext + '_' + str(self.inputs.label_value)) + return os.path.abspath(name + ext + "_" + str(self.inputs.label_value)) class SmoothTessellationInputSpec(FSTraitedSpec): @@ -1535,50 +1682,62 @@ class SmoothTessellationInputSpec(FSTraitedSpec): in_file = File( exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-2, copyfile=True, - desc='Input volume to tesselate voxels from.') + desc="Input volume to tesselate voxels from.", + ) curvature_averaging_iterations = traits.Int( - argstr='-a %d', - desc='Number of curvature averaging iterations (default=10)') + argstr="-a %d", desc="Number of curvature averaging iterations (default=10)" + ) smoothing_iterations = traits.Int( - argstr='-n %d', desc='Number of smoothing iterations (default=10)') + argstr="-n %d", desc="Number of smoothing iterations (default=10)" + ) snapshot_writing_iterations = traits.Int( - argstr='-w %d', desc='Write snapshot every "n" iterations') + argstr="-w %d", desc='Write snapshot every "n" iterations' + ) use_gaussian_curvature_smoothing = traits.Bool( - argstr='-g', desc='Use Gaussian curvature smoothing') + argstr="-g", desc="Use Gaussian curvature smoothing" + ) gaussian_curvature_norm_steps = traits.Int( - argstr='%d ', desc='Use Gaussian curvature smoothing') + argstr="%d ", desc="Use Gaussian curvature smoothing" + ) gaussian_curvature_smoothing_steps = traits.Int( - argstr='%d', desc='Use Gaussian curvature smoothing') + argstr="%d", desc="Use Gaussian curvature smoothing" + ) disable_estimates = traits.Bool( - argstr='-nw', - desc='Disables the writing of curvature and area estimates') + argstr="-nw", desc="Disables the writing of curvature and area estimates" + ) normalize_area = traits.Bool( - argstr='-area', desc='Normalizes the area after smoothing') - use_momentum = traits.Bool(argstr='-m', desc='Uses momentum') + argstr="-area", desc="Normalizes the area after smoothing" + ) + use_momentum = traits.Bool(argstr="-m", desc="Uses momentum") out_file = File( - argstr='%s', + argstr="%s", position=-1, genfile=True, - desc='output filename or True to generate one') + desc="output filename or True to generate one", + ) out_curvature_file = File( - argstr='-c %s', desc='Write curvature to ?h.curvname (default "curv")') + argstr="-c %s", desc='Write curvature to ?h.curvname (default "curv")' + ) out_area_file = File( - argstr='-b %s', desc='Write area to ?h.areaname (default "area")') + argstr="-b %s", desc='Write area to ?h.areaname (default "area")' + ) seed = traits.Int( - argstr="-seed %d", desc="Seed for setting random number generator") + argstr="-seed %d", desc="Seed for setting random number generator" + ) class SmoothTessellationOutputSpec(TraitedSpec): """ This program smooths the tessellation of a surface using 'mris_smooth' """ - surface = File(exists=True, desc='Smoothed surface file ') + + surface = File(exists=True, desc="Smoothed surface file ") class SmoothTessellation(FSCommand): @@ -1598,17 +1757,18 @@ class SmoothTessellation(FSCommand): >>> smooth.inputs.in_file = 'lh.hippocampus.stl' >>> smooth.run() # doctest: +SKIP """ - _cmd = 'mris_smooth' + + _cmd = "mris_smooth" input_spec = SmoothTessellationInputSpec output_spec = SmoothTessellationOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['surface'] = self._gen_outfilename() + outputs["surface"] = self._gen_outfilename() return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._gen_outfilename() else: return None @@ -1618,7 +1778,7 @@ def _gen_outfilename(self): return os.path.abspath(self.inputs.out_file) else: _, name, ext = split_filename(self.inputs.in_file) - return os.path.abspath(name + '_smoothed' + ext) + return os.path.abspath(name + "_smoothed" + ext) def _run_interface(self, runtime): # The returncode is meaningless in BET. So check the output @@ -1633,19 +1793,21 @@ def _run_interface(self, runtime): class MakeAverageSubjectInputSpec(FSTraitedSpec): subjects_ids = traits.List( traits.Str(), - argstr='--subjects %s', - desc='freesurfer subjects ids to average', + argstr="--subjects %s", + desc="freesurfer subjects ids to average", mandatory=True, - sep=' ') + sep=" ", + ) out_name = File( - 'average', - argstr='--out %s', - desc='name for the average subject', - usedefault=True) + "average", + argstr="--out %s", + desc="name for the average subject", + usedefault=True, + ) class MakeAverageSubjectOutputSpec(TraitedSpec): - average_subject_name = traits.Str(desc='Output registration file') + average_subject_name = traits.Str(desc="Output registration file") class MakeAverageSubject(FSCommand): @@ -1661,33 +1823,31 @@ class MakeAverageSubject(FSCommand): """ - _cmd = 'make_average_subject' + _cmd = "make_average_subject" input_spec = MakeAverageSubjectInputSpec output_spec = MakeAverageSubjectOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['average_subject_name'] = self.inputs.out_name + outputs["average_subject_name"] = self.inputs.out_name return outputs class ExtractMainComponentInputSpec(CommandLineInputSpec): in_file = File( - exists=True, - mandatory=True, - argstr='%s', - position=1, - desc='input surface file') + exists=True, mandatory=True, argstr="%s", position=1, desc="input surface file" + ) out_file = File( - name_template='%s.maincmp', - name_source='in_file', - argstr='%s', + name_template="%s.maincmp", + name_source="in_file", + argstr="%s", position=2, - desc='surface containing main component') + desc="surface containing main component", + ) class ExtractMainComponentOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='surface containing main component') + out_file = File(exists=True, desc="surface containing main component") class ExtractMainComponent(CommandLine): @@ -1703,80 +1863,87 @@ class ExtractMainComponent(CommandLine): """ - _cmd = 'mris_extract_main_component' + _cmd = "mris_extract_main_component" input_spec = ExtractMainComponentInputSpec output_spec = ExtractMainComponentOutputSpec class Tkregister2InputSpec(FSTraitedSpec): target_image = File( - exists=True, argstr="--targ %s", xor=['fstarg'], desc='target volume') + exists=True, argstr="--targ %s", xor=["fstarg"], desc="target volume" + ) fstarg = traits.Bool( False, - argstr='--fstarg', - xor=['target_image'], - desc='use subject\'s T1 as reference') + argstr="--fstarg", + xor=["target_image"], + desc="use subject's T1 as reference", + ) moving_image = File( - exists=True, mandatory=True, argstr="--mov %s", desc='moving volume') + exists=True, mandatory=True, argstr="--mov %s", desc="moving volume" + ) # Input registration file options fsl_in_matrix = File( - exists=True, - argstr="--fsl %s", - desc='fsl-style registration input matrix') + exists=True, argstr="--fsl %s", desc="fsl-style registration input matrix" + ) xfm = File( exists=True, - argstr='--xfm %s', - desc='use a matrix in MNI coordinates as initial registration') + argstr="--xfm %s", + desc="use a matrix in MNI coordinates as initial registration", + ) lta_in = File( exists=True, - argstr='--lta %s', - desc='use a matrix in MNI coordinates as initial registration') + argstr="--lta %s", + desc="use a matrix in MNI coordinates as initial registration", + ) invert_lta_in = traits.Bool( - requires=['lta_in'], desc='Invert input LTA before applying') + requires=["lta_in"], desc="Invert input LTA before applying" + ) # Output registration file options fsl_out = traits.Either( True, File, - argstr='--fslregout %s', - desc='compute an FSL-compatible resgitration matrix') + argstr="--fslregout %s", + desc="compute an FSL-compatible resgitration matrix", + ) lta_out = traits.Either( - True, - File, - argstr='--ltaout %s', - desc='output registration file (LTA format)') + True, File, argstr="--ltaout %s", desc="output registration file (LTA format)" + ) invert_lta_out = traits.Bool( - argstr='--ltaout-inv', - requires=['lta_in'], - desc='Invert input LTA before applying') + argstr="--ltaout-inv", + requires=["lta_in"], + desc="Invert input LTA before applying", + ) - subject_id = traits.String(argstr="--s %s", desc='freesurfer subject ID') + subject_id = traits.String(argstr="--s %s", desc="freesurfer subject ID") noedit = traits.Bool( - True, - argstr="--noedit", - usedefault=True, - desc='do not open edit window (exit)') + True, argstr="--noedit", usedefault=True, desc="do not open edit window (exit)" + ) reg_file = File( - 'register.dat', + "register.dat", usedefault=True, mandatory=True, - argstr='--reg %s', - desc='freesurfer-style registration file') + argstr="--reg %s", + desc="freesurfer-style registration file", + ) reg_header = traits.Bool( - False, argstr='--regheader', desc='compute regstration from headers') + False, argstr="--regheader", desc="compute regstration from headers" + ) fstal = traits.Bool( False, - argstr='--fstal', - xor=['target_image', 'moving_image', 'reg_file'], - desc='set mov to be tal and reg to be tal xfm') + argstr="--fstal", + xor=["target_image", "moving_image", "reg_file"], + desc="set mov to be tal and reg to be tal xfm", + ) movscale = traits.Float( - argstr='--movscale %f', desc='adjust registration matrix to scale mov') + argstr="--movscale %f", desc="adjust registration matrix to scale mov" + ) class Tkregister2OutputSpec(TraitedSpec): - reg_file = File(exists=True, desc='freesurfer-style registration file') - fsl_file = File(desc='FSL-style registration file') - lta_file = File(desc='LTA-style registration file') + reg_file = File(exists=True, desc="freesurfer-style registration file") + fsl_file = File(desc="FSL-style registration file") + lta_file = File(desc="LTA-style registration file") class Tkregister2(FSCommand): @@ -1812,38 +1979,41 @@ class Tkregister2(FSCommand): 'tkregister2 --fsl flirt.mat --mov epi.nii --noedit --reg register.dat' >>> tk2.run() # doctest: +SKIP """ + _cmd = "tkregister2" input_spec = Tkregister2InputSpec output_spec = Tkregister2OutputSpec def _format_arg(self, name, spec, value): - if name == 'lta_in' and self.inputs.invert_lta_in: - spec = '--lta-inv %s' - if name in ('fsl_out', 'lta_out') and value is True: + if name == "lta_in" and self.inputs.invert_lta_in: + spec = "--lta-inv %s" + if name in ("fsl_out", "lta_out") and value is True: value = self._list_outputs()[name] return super(Tkregister2, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self._outputs().get() reg_file = os.path.abspath(self.inputs.reg_file) - outputs['reg_file'] = reg_file + outputs["reg_file"] = reg_file cwd = os.getcwd() fsl_out = self.inputs.fsl_out if isdefined(fsl_out): if fsl_out is True: - outputs['fsl_file'] = fname_presuffix( - reg_file, suffix='.mat', newpath=cwd, use_ext=False) + outputs["fsl_file"] = fname_presuffix( + reg_file, suffix=".mat", newpath=cwd, use_ext=False + ) else: - outputs['fsl_file'] = os.path.abspath(self.inputs.fsl_out) + outputs["fsl_file"] = os.path.abspath(self.inputs.fsl_out) lta_out = self.inputs.lta_out if isdefined(lta_out): if lta_out is True: - outputs['lta_file'] = fname_presuffix( - reg_file, suffix='.lta', newpath=cwd, use_ext=False) + outputs["lta_file"] = fname_presuffix( + reg_file, suffix=".lta", newpath=cwd, use_ext=False + ) else: - outputs['lta_file'] = os.path.abspath(self.inputs.lta_out) + outputs["lta_file"] = os.path.abspath(self.inputs.lta_out) return outputs def _gen_outfilename(self): @@ -1851,34 +2021,26 @@ def _gen_outfilename(self): return os.path.abspath(self.inputs.out_file) else: _, name, ext = split_filename(self.inputs.in_file) - return os.path.abspath(name + '_smoothed' + ext) + return os.path.abspath(name + "_smoothed" + ext) class AddXFormToHeaderInputSpec(FSTraitedSpec): # required in_file = File( - exists=True, - mandatory=True, - position=-2, - argstr="%s", - desc="input volume") + exists=True, mandatory=True, position=-2, argstr="%s", desc="input volume" + ) # transform file does NOT need to exist at the time if using copy_name transform = File( - exists=False, - mandatory=True, - position=-3, - argstr="%s", - desc="xfm file") + exists=False, mandatory=True, position=-3, argstr="%s", desc="xfm file" + ) out_file = File( - 'output.mgz', - position=-1, - argstr="%s", - usedefault=True, - desc="output volume") + "output.mgz", position=-1, argstr="%s", usedefault=True, desc="output volume" + ) # optional copy_name = traits.Bool( - argstr="-c", desc="do not try to load the xfmfile, just copy name") + argstr="-c", desc="do not try to load the xfmfile, just copy name" + ) verbose = traits.Bool(argstr="-v", desc="be verbose") @@ -1911,12 +2073,13 @@ class AddXFormToHeader(FSCommand): [https://surfer.nmr.mgh.harvard.edu/fswiki/mri_add_xform_to_header] """ + _cmd = "mri_add_xform_to_header" input_spec = AddXFormToHeaderInputSpec output_spec = AddXFormToHeaderOutputSpec def _format_arg(self, name, spec, value): - if name == 'transform': + if name == "transform": return value # os.path.abspath(value) # if name == 'copy_name' and value: # self.input_spec.transform @@ -1930,30 +2093,32 @@ def _list_outputs(self): class CheckTalairachAlignmentInputSpec(FSTraitedSpec): in_file = File( - argstr='-xfm %s', - xor=['subject'], + argstr="-xfm %s", + xor=["subject"], exists=True, mandatory=True, position=-1, - desc="specify the talairach.xfm file to check") + desc="specify the talairach.xfm file to check", + ) subject = traits.String( - argstr='-subj %s', - xor=['in_file'], + argstr="-subj %s", + xor=["in_file"], mandatory=True, position=-1, - desc="specify subject's name") + desc="specify subject's name", + ) # optional threshold = traits.Float( default_value=0.010, usedefault=True, - argstr='-T %.3f', - desc="Talairach transforms for subjects with p-values <= T " + - "are considered as very unlikely default=0.010") + argstr="-T %.3f", + desc="Talairach transforms for subjects with p-values <= T " + + "are considered as very unlikely default=0.010", + ) class CheckTalairachAlignmentOutputSpec(TraitedSpec): - out_file = File( - exists=True, desc="The input file for CheckTalairachAlignment") + out_file = File(exists=True, desc="The input file for CheckTalairachAlignment") class CheckTalairachAlignment(FSCommand): @@ -1973,37 +2138,32 @@ class CheckTalairachAlignment(FSCommand): >>> checker.run() # doctest: +SKIP """ + _cmd = "talairach_afd" input_spec = CheckTalairachAlignmentInputSpec output_spec = CheckTalairachAlignmentOutputSpec def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = self.inputs.in_file + outputs["out_file"] = self.inputs.in_file return outputs class TalairachAVIInputSpec(FSTraitedSpec): - in_file = File( - argstr='--i %s', exists=True, mandatory=True, desc="input volume") + in_file = File(argstr="--i %s", exists=True, mandatory=True, desc="input volume") out_file = File( - argstr='--xfm %s', - mandatory=True, - exists=False, - desc="output xfm file") + argstr="--xfm %s", mandatory=True, exists=False, desc="output xfm file" + ) # optional atlas = traits.String( - argstr='--atlas %s', - desc="alternate target atlas (in freesurfer/average dir)") + argstr="--atlas %s", desc="alternate target atlas (in freesurfer/average dir)" + ) class TalairachAVIOutputSpec(TraitedSpec): - out_file = File( - exists=False, desc="The output transform for TalairachAVI") - out_log = File( - exists=False, desc="The output log file for TalairachAVI") - out_txt = File( - exists=False, desc="The output text file for TaliarachAVI") + out_file = File(exists=False, desc="The output transform for TalairachAVI") + out_log = File(exists=False, desc="The output log file for TalairachAVI") + out_txt = File(exists=False, desc="The output text file for TaliarachAVI") class TalairachAVI(FSCommand): @@ -2026,27 +2186,30 @@ class TalairachAVI(FSCommand): >>> example.run() # doctest: +SKIP """ + _cmd = "talairach_avi" input_spec = TalairachAVIInputSpec output_spec = TalairachAVIOutputSpec def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) - outputs['out_log'] = os.path.abspath('talairach_avi.log') - outputs['out_txt'] = os.path.join( + outputs["out_file"] = os.path.abspath(self.inputs.out_file) + outputs["out_log"] = os.path.abspath("talairach_avi.log") + outputs["out_txt"] = os.path.join( os.path.dirname(self.inputs.out_file), - 'talsrcimg_to_' + str(self.inputs.atlas) + 't4_vox2vox.txt') + "talsrcimg_to_" + str(self.inputs.atlas) + "t4_vox2vox.txt", + ) return outputs class TalairachQCInputSpec(FSTraitedSpec): log_file = File( - argstr='%s', + argstr="%s", mandatory=True, exists=True, position=0, - desc="The log file for TalairachQC") + desc="The log file for TalairachQC", + ) class TalairachQC(FSScriptCommand): @@ -2060,6 +2223,7 @@ class TalairachQC(FSScriptCommand): >>> qc.cmdline 'tal_QC_AZS dirs.txt' """ + _cmd = "tal_QC_AZS" input_spec = TalairachQCInputSpec output_spec = FSScriptOutputSpec @@ -2071,28 +2235,32 @@ class RemoveNeckInputSpec(FSTraitedSpec): exists=True, mandatory=True, position=-4, - desc="Input file for RemoveNeck") + desc="Input file for RemoveNeck", + ) out_file = File( argstr="%s", exists=False, - name_source=['in_file'], + name_source=["in_file"], name_template="%s_noneck", hash_files=False, keep_extension=True, position=-1, - desc="Output file for RemoveNeck") + desc="Output file for RemoveNeck", + ) transform = File( argstr="%s", exists=True, mandatory=True, position=-3, - desc="Input transform file for RemoveNeck") + desc="Input transform file for RemoveNeck", + ) template = File( argstr="%s", exists=True, mandatory=True, position=-2, - desc="Input template file for RemoveNeck") + desc="Input template file for RemoveNeck", + ) # optional radius = traits.Int(argstr="-radius %d", desc="Radius") @@ -2116,13 +2284,14 @@ class RemoveNeck(FSCommand): >>> remove_neck.cmdline 'mri_remove_neck norm.mgz trans.mat trans.mat norm_noneck.mgz' """ + _cmd = "mri_remove_neck" input_spec = RemoveNeckInputSpec output_spec = RemoveNeckOutputSpec def _gen_fname(self, name): - if name == 'out_file': - return os.path.abspath('nu_noneck.mgz') + if name == "out_file": + return os.path.abspath("nu_noneck.mgz") return None def _list_outputs(self): @@ -2137,22 +2306,24 @@ class MRIFillInputSpec(FSTraitedSpec): mandatory=True, exists=True, position=-2, - desc="Input white matter file") + desc="Input white matter file", + ) out_file = File( argstr="%s", mandatory=True, exists=False, position=-1, - desc="Output filled volume file name for MRIFill") + desc="Output filled volume file name for MRIFill", + ) # optional segmentation = File( argstr="-segmentation %s", exists=True, - desc="Input segmentation file for MRIFill") + desc="Input segmentation file for MRIFill", + ) transform = File( - argstr="-xform %s", - exists=True, - desc="Input transform file for MRIFill") + argstr="-xform %s", exists=True, desc="Input transform file for MRIFill" + ) log_file = File(argstr="-a %s", desc="Output log file for MRIFill") @@ -2195,23 +2366,23 @@ class MRIsInflateInputSpec(FSTraitedSpec): mandatory=True, exists=True, copyfile=True, - desc="Input file for MRIsInflate") + desc="Input file for MRIsInflate", + ) out_file = File( argstr="%s", position=-1, exists=False, - name_source=['in_file'], + name_source=["in_file"], name_template="%s.inflated", hash_files=False, keep_extension=True, - desc="Output file for MRIsInflate") + desc="Output file for MRIsInflate", + ) # optional - out_sulc = File( - exists=False, xor=['no_save_sulc'], desc="Output sulc file") + out_sulc = File(exists=False, xor=["no_save_sulc"], desc="Output sulc file") no_save_sulc = traits.Bool( - argstr='-no-save-sulc', - xor=['out_sulc'], - desc="Do not save sulc file as output") + argstr="-no-save-sulc", xor=["out_sulc"], desc="Do not save sulc file as output" + ) class MRIsInflateOutputSpec(TraitedSpec): @@ -2233,7 +2404,7 @@ class MRIsInflate(FSCommand): 'mris_inflate -no-save-sulc lh.pial lh.inflated' """ - _cmd = 'mris_inflate' + _cmd = "mris_inflate" input_spec = MRIsInflateInputSpec output_spec = MRIsInflateOutputSpec @@ -2253,27 +2424,30 @@ class SphereInputSpec(FSTraitedSpecOpenMP): copyfile=True, mandatory=True, exists=True, - desc="Input file for Sphere") + desc="Input file for Sphere", + ) out_file = File( argstr="%s", position=-1, exists=False, - name_source=['in_file'], + name_source=["in_file"], hash_files=False, - name_template='%s.sphere', - desc="Output file for Sphere") + name_template="%s.sphere", + desc="Output file for Sphere", + ) # optional seed = traits.Int( - argstr="-seed %d", desc="Seed for setting random number generator") + argstr="-seed %d", desc="Seed for setting random number generator" + ) magic = traits.Bool( argstr="-q", - desc= - "No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu" + desc="No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu", ) in_smoothwm = File( exists=True, copyfile=True, - desc="Input surface required when -q flag is not selected") + desc="Input surface required when -q flag is not selected", + ) class SphereOutputSpec(TraitedSpec): @@ -2292,7 +2466,8 @@ class Sphere(FSCommandOpenMP): >>> sphere.cmdline 'mris_sphere lh.pial lh.sphere' """ - _cmd = 'mris_sphere' + + _cmd = "mris_sphere" input_spec = SphereInputSpec output_spec = SphereOutputSpec @@ -2304,45 +2479,44 @@ def _list_outputs(self): class FixTopologyInputSpec(FSTraitedSpec): in_orig = File( - exists=True, - mandatory=True, - desc="Undocumented input file .orig") + exists=True, mandatory=True, desc="Undocumented input file .orig" + ) in_inflated = File( exists=True, mandatory=True, - desc="Undocumented input file .inflated") - in_brain = File( - exists=True, mandatory=True, desc="Implicit input brain.mgz") + desc="Undocumented input file .inflated", + ) + in_brain = File(exists=True, mandatory=True, desc="Implicit input brain.mgz") in_wm = File(exists=True, mandatory=True, desc="Implicit input wm.mgz") hemisphere = traits.String( - position=-1, - argstr="%s", - mandatory=True, - desc="Hemisphere being processed") + position=-1, argstr="%s", mandatory=True, desc="Hemisphere being processed" + ) subject_id = traits.String( - 'subject_id', + "subject_id", position=-2, argstr="%s", mandatory=True, usedefault=True, - desc="Subject being processed") + desc="Subject being processed", + ) copy_inputs = traits.Bool( mandatory=True, - desc="If running as a node, set this to True " + - "otherwise, the topology fixing will be done " + "in place.") + desc="If running as a node, set this to True " + + "otherwise, the topology fixing will be done " + + "in place.", + ) # optional seed = traits.Int( - argstr="-seed %d", desc="Seed for setting random number generator") + argstr="-seed %d", desc="Seed for setting random number generator" + ) ga = traits.Bool( argstr="-ga", - desc= - "No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu" + desc="No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu", ) mgz = traits.Bool( argstr="-mgz", - desc= - "No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu" + desc="No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu", ) sphere = File(argstr="-sphere %s", desc="Sphere input file") @@ -2372,38 +2546,38 @@ class FixTopology(FSCommand): 'mris_fix_topology -ga -mgz -sphere qsphere.nofix 10335 lh' """ - _cmd = 'mris_fix_topology' + _cmd = "mris_fix_topology" input_spec = FixTopologyInputSpec output_spec = FixTopologyOutputSpec def run(self, **inputs): if self.inputs.copy_inputs: self.inputs.subjects_dir = os.getcwd() - if 'subjects_dir' in inputs: - inputs['subjects_dir'] = self.inputs.subjects_dir + if "subjects_dir" in inputs: + inputs["subjects_dir"] = self.inputs.subjects_dir hemi = self.inputs.hemisphere - copy2subjdir(self, self.inputs.sphere, folder='surf') + copy2subjdir(self, self.inputs.sphere, folder="surf") # the orig file is edited in place self.inputs.in_orig = copy2subjdir( self, self.inputs.in_orig, - folder='surf', - basename='{0}.orig'.format(hemi)) + folder="surf", + basename="{0}.orig".format(hemi), + ) copy2subjdir( self, self.inputs.in_inflated, - folder='surf', - basename='{0}.inflated'.format(hemi)) - copy2subjdir( - self, self.inputs.in_brain, folder='mri', basename='brain.mgz') - copy2subjdir( - self, self.inputs.in_wm, folder='mri', basename='wm.mgz') + folder="surf", + basename="{0}.inflated".format(hemi), + ) + copy2subjdir(self, self.inputs.in_brain, folder="mri", basename="brain.mgz") + copy2subjdir(self, self.inputs.in_wm, folder="mri", basename="wm.mgz") return super(FixTopology, self).run(**inputs) def _format_arg(self, name, spec, value): - if name == 'sphere': + if name == "sphere": # get the basename and take out the hemisphere - suffix = os.path.basename(value).split('.', 1)[1] + suffix = os.path.basename(value).split(".", 1)[1] return spec.argstr % suffix return super(FixTopology, self)._format_arg(name, spec, value) @@ -2419,7 +2593,8 @@ class EulerNumberInputSpec(FSTraitedSpec): position=-1, mandatory=True, exists=True, - desc="Input file for EulerNumber") + desc="Input file for EulerNumber", + ) class EulerNumberOutputSpec(TraitedSpec): @@ -2438,7 +2613,8 @@ class EulerNumber(FSCommand): >>> ft.cmdline 'mris_euler_number lh.pial' """ - _cmd = 'mris_euler_number' + + _cmd = "mris_euler_number" input_spec = EulerNumberInputSpec output_spec = EulerNumberOutputSpec @@ -2455,16 +2631,18 @@ class RemoveIntersectionInputSpec(FSTraitedSpec): mandatory=True, exists=True, copyfile=True, - desc="Input file for RemoveIntersection") + desc="Input file for RemoveIntersection", + ) out_file = File( argstr="%s", position=-1, exists=False, - name_source=['in_file'], - name_template='%s', + name_source=["in_file"], + name_template="%s", hash_files=False, keep_extension=True, - desc="Output file for RemoveIntersection") + desc="Output file for RemoveIntersection", + ) class RemoveIntersectionOutputSpec(TraitedSpec): @@ -2484,7 +2662,7 @@ class RemoveIntersection(FSCommand): 'mris_remove_intersection lh.pial lh.pial' """ - _cmd = 'mris_remove_intersection' + _cmd = "mris_remove_intersection" input_spec = RemoveIntersectionInputSpec output_spec = RemoveIntersectionOutputSpec @@ -2497,82 +2675,83 @@ def _list_outputs(self): class MakeSurfacesInputSpec(FSTraitedSpec): # required hemisphere = traits.Enum( - 'lh', - 'rh', + "lh", + "rh", position=-1, argstr="%s", mandatory=True, - desc="Hemisphere being processed") + desc="Hemisphere being processed", + ) subject_id = traits.String( - 'subject_id', + "subject_id", usedefault=True, position=-2, argstr="%s", mandatory=True, - desc="Subject being processed") + desc="Subject being processed", + ) # implicit in_orig = File( exists=True, mandatory=True, - argstr='-orig %s', - desc="Implicit input file .orig") - in_wm = File( - exists=True, mandatory=True, desc="Implicit input file wm.mgz") - in_filled = File( - exists=True, mandatory=True, desc="Implicit input file filled.mgz") + argstr="-orig %s", + desc="Implicit input file .orig", + ) + in_wm = File(exists=True, mandatory=True, desc="Implicit input file wm.mgz") + in_filled = File(exists=True, mandatory=True, desc="Implicit input file filled.mgz") # optional in_white = File(exists=True, desc="Implicit input that is sometimes used") in_label = File( exists=True, - xor=['noaparc'], - desc="Implicit input label/.aparc.annot") + xor=["noaparc"], + desc="Implicit input label/.aparc.annot", + ) orig_white = File( argstr="-orig_white %s", exists=True, - desc="Specify a white surface to start with") + desc="Specify a white surface to start with", + ) orig_pial = File( argstr="-orig_pial %s", exists=True, - requires=['in_label'], - desc="Specify a pial surface to start with") + requires=["in_label"], + desc="Specify a pial surface to start with", + ) fix_mtl = traits.Bool(argstr="-fix_mtl", desc="Undocumented flag") no_white = traits.Bool(argstr="-nowhite", desc="Undocumented flag") white_only = traits.Bool(argstr="-whiteonly", desc="Undocumented flage") - in_aseg = File( - argstr="-aseg %s", exists=True, desc="Input segmentation file") + in_aseg = File(argstr="-aseg %s", exists=True, desc="Input segmentation file") in_T1 = File(argstr="-T1 %s", exists=True, desc="Input brain or T1 file") mgz = traits.Bool( argstr="-mgz", - desc= - "No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu" + desc="No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu", ) noaparc = traits.Bool( argstr="-noaparc", - xor=['in_label'], - desc= - "No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu" + xor=["in_label"], + desc="No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu", ) maximum = traits.Float( - argstr="-max %.1f", - desc="No documentation (used for longitudinal processing)") + argstr="-max %.1f", desc="No documentation (used for longitudinal processing)" + ) longitudinal = traits.Bool( - argstr="-long", - desc="No documentation (used for longitudinal processing)") + argstr="-long", desc="No documentation (used for longitudinal processing)" + ) white = traits.String(argstr="-white %s", desc="White surface name") copy_inputs = traits.Bool( - desc="If running as a node, set this to True." + - "This will copy the input files to the node " + "directory.") + desc="If running as a node, set this to True." + + "This will copy the input files to the node " + + "directory." + ) class MakeSurfacesOutputSpec(TraitedSpec): - out_white = File( - exists=False, desc="Output white matter hemisphere surface") + out_white = File(exists=False, desc="Output white matter hemisphere surface") out_curv = File(exists=False, desc="Output curv file for MakeSurfaces") out_area = File(exists=False, desc="Output area file for MakeSurfaces") out_cortex = File(exists=False, desc="Output cortex file for MakeSurfaces") out_pial = File(exists=False, desc="Output pial surface for MakeSurfaces") - out_thickness = File( - exists=False, desc="Output thickness file for MakeSurfaces") + out_thickness = File(exists=False, desc="Output thickness file for MakeSurfaces") class MakeSurfaces(FSCommand): @@ -2599,42 +2778,50 @@ class MakeSurfaces(FSCommand): 'mris_make_surfaces -T1 T1.mgz -orig pial -orig_pial pial 10335 lh' """ - _cmd = 'mris_make_surfaces' + _cmd = "mris_make_surfaces" input_spec = MakeSurfacesInputSpec output_spec = MakeSurfacesOutputSpec def run(self, **inputs): if self.inputs.copy_inputs: self.inputs.subjects_dir = os.getcwd() - if 'subjects_dir' in inputs: - inputs['subjects_dir'] = self.inputs.subjects_dir + if "subjects_dir" in inputs: + inputs["subjects_dir"] = self.inputs.subjects_dir + copy2subjdir(self, self.inputs.in_wm, folder="mri", basename="wm.mgz") copy2subjdir( - self, self.inputs.in_wm, folder='mri', basename='wm.mgz') + self, self.inputs.in_filled, folder="mri", basename="filled.mgz" + ) copy2subjdir( self, - self.inputs.in_filled, - folder='mri', - basename='filled.mgz') - copy2subjdir(self, self.inputs.in_white, 'surf', - '{0}.white'.format(self.inputs.hemisphere)) + self.inputs.in_white, + "surf", + "{0}.white".format(self.inputs.hemisphere), + ) for originalfile in [self.inputs.in_aseg, self.inputs.in_T1]: - copy2subjdir(self, originalfile, folder='mri') + copy2subjdir(self, originalfile, folder="mri") for originalfile in [ - self.inputs.orig_white, self.inputs.orig_pial, - self.inputs.in_orig + self.inputs.orig_white, + self.inputs.orig_pial, + self.inputs.in_orig, ]: - copy2subjdir(self, originalfile, folder='surf') + copy2subjdir(self, originalfile, folder="surf") if isdefined(self.inputs.in_label): - copy2subjdir(self, self.inputs.in_label, 'label', - '{0}.aparc.annot'.format(self.inputs.hemisphere)) + copy2subjdir( + self, + self.inputs.in_label, + "label", + "{0}.aparc.annot".format(self.inputs.hemisphere), + ) else: os.makedirs( - os.path.join(self.inputs.subjects_dir, - self.inputs.subject_id, 'label')) + os.path.join( + self.inputs.subjects_dir, self.inputs.subject_id, "label" + ) + ) return super(MakeSurfaces, self).run(**inputs) def _format_arg(self, name, spec, value): - if name in ['in_T1', 'in_aseg']: + if name in ["in_T1", "in_aseg"]: # These inputs do not take full paths as inputs or even basenames basename = os.path.basename(value) # whent the -mgz flag is specified, it assumes the mgz extension @@ -2642,63 +2829,65 @@ def _format_arg(self, name, spec, value): prefix = os.path.splitext(basename)[0] else: prefix = basename - if prefix == 'aseg': + if prefix == "aseg": return # aseg is already the default return spec.argstr % prefix - elif name in ['orig_white', 'orig_pial']: + elif name in ["orig_white", "orig_pial"]: # these inputs do take full file paths or even basenames basename = os.path.basename(value) - suffix = basename.split('.')[1] + suffix = basename.split(".")[1] return spec.argstr % suffix - elif name == 'in_orig': - if value.endswith('lh.orig') or value.endswith('rh.orig'): + elif name == "in_orig": + if value.endswith("lh.orig") or value.endswith("rh.orig"): # {lh,rh}.orig inputs are not sepcified on command line return else: # if the input orig file is different than lh.orig or rh.orig # these inputs do take full file paths or even basenames basename = os.path.basename(value) - suffix = basename.split('.')[1] + suffix = basename.split(".")[1] return spec.argstr % suffix return super(MakeSurfaces, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self._outputs().get() # Outputs are saved in the surf directory - dest_dir = os.path.join(self.inputs.subjects_dir, - self.inputs.subject_id, 'surf') + dest_dir = os.path.join( + self.inputs.subjects_dir, self.inputs.subject_id, "surf" + ) # labels are saved in the label directory - label_dir = os.path.join(self.inputs.subjects_dir, - self.inputs.subject_id, 'label') + label_dir = os.path.join( + self.inputs.subjects_dir, self.inputs.subject_id, "label" + ) if not self.inputs.no_white: outputs["out_white"] = os.path.join( - dest_dir, - str(self.inputs.hemisphere) + '.white') + dest_dir, str(self.inputs.hemisphere) + ".white" + ) # The curv and area files must have the hemisphere names as a prefix outputs["out_curv"] = os.path.join( - dest_dir, - str(self.inputs.hemisphere) + '.curv') + dest_dir, str(self.inputs.hemisphere) + ".curv" + ) outputs["out_area"] = os.path.join( - dest_dir, - str(self.inputs.hemisphere) + '.area') + dest_dir, str(self.inputs.hemisphere) + ".area" + ) # Something determines when a pial surface and thickness file is generated # but documentation doesn't say what. # The orig_pial input is just a guess - if isdefined(self.inputs.orig_pial) or self.inputs.white == 'NOWRITE': + if isdefined(self.inputs.orig_pial) or self.inputs.white == "NOWRITE": outputs["out_curv"] = outputs["out_curv"] + ".pial" outputs["out_area"] = outputs["out_area"] + ".pial" outputs["out_pial"] = os.path.join( - dest_dir, - str(self.inputs.hemisphere) + '.pial') + dest_dir, str(self.inputs.hemisphere) + ".pial" + ) outputs["out_thickness"] = os.path.join( - dest_dir, - str(self.inputs.hemisphere) + '.thickness') + dest_dir, str(self.inputs.hemisphere) + ".thickness" + ) else: # when a pial surface is generated, the cortex label file is not # generated outputs["out_cortex"] = os.path.join( - label_dir, - str(self.inputs.hemisphere) + '.cortex.label') + label_dir, str(self.inputs.hemisphere) + ".cortex.label" + ) return outputs @@ -2709,26 +2898,25 @@ class CurvatureInputSpec(FSTraitedSpec): mandatory=True, exists=True, copyfile=True, - desc="Input file for Curvature") + desc="Input file for Curvature", + ) # optional - threshold = traits.Float( - argstr="-thresh %.3f", desc="Undocumented input threshold") + threshold = traits.Float(argstr="-thresh %.3f", desc="Undocumented input threshold") n = traits.Bool(argstr="-n", desc="Undocumented boolean flag") averages = traits.Int( argstr="-a %d", - desc= - "Perform this number iterative averages of curvature measure before saving" + desc="Perform this number iterative averages of curvature measure before saving", ) save = traits.Bool( argstr="-w", - desc= - "Save curvature files (will only generate screen output without this option)" + desc="Save curvature files (will only generate screen output without this option)", ) distances = traits.Tuple( traits.Int, traits.Int, argstr="-distances %d %d", - desc="Undocumented input integer distances") + desc="Undocumented input integer distances", + ) copy_input = traits.Bool(desc="Copy input file to current directory") @@ -2753,13 +2941,13 @@ class Curvature(FSCommand): 'mris_curvature -w lh.pial' """ - _cmd = 'mris_curvature' + _cmd = "mris_curvature" input_spec = CurvatureInputSpec output_spec = CurvatureOutputSpec def _format_arg(self, name, spec, value): if self.inputs.copy_input: - if name == 'in_file': + if name == "in_file": basename = os.path.basename(value) return spec.argstr % basename return super(Curvature, self)._format_arg(name, spec, value) @@ -2770,60 +2958,66 @@ def _list_outputs(self): in_file = os.path.basename(self.inputs.in_file) else: in_file = self.inputs.in_file - outputs["out_mean"] = os.path.abspath(in_file) + '.H' - outputs["out_gauss"] = os.path.abspath(in_file) + '.K' + outputs["out_mean"] = os.path.abspath(in_file) + ".H" + outputs["out_gauss"] = os.path.abspath(in_file) + ".K" return outputs class CurvatureStatsInputSpec(FSTraitedSpec): surface = File( - argstr="-F %s", - exists=True, - desc="Specify surface file for CurvatureStats") + argstr="-F %s", exists=True, desc="Specify surface file for CurvatureStats" + ) curvfile1 = File( argstr="%s", position=-2, mandatory=True, exists=True, - desc="Input file for CurvatureStats") + desc="Input file for CurvatureStats", + ) curvfile2 = File( argstr="%s", position=-1, mandatory=True, exists=True, - desc="Input file for CurvatureStats") + desc="Input file for CurvatureStats", + ) hemisphere = traits.Enum( - 'lh', - 'rh', + "lh", + "rh", position=-3, argstr="%s", mandatory=True, - desc="Hemisphere being processed") + desc="Hemisphere being processed", + ) subject_id = traits.String( - 'subject_id', + "subject_id", usedefault=True, position=-4, argstr="%s", mandatory=True, - desc="Subject being processed") + desc="Subject being processed", + ) out_file = File( argstr="-o %s", exists=False, - name_source=['hemisphere'], - name_template='%s.curv.stats', + name_source=["hemisphere"], + name_template="%s.curv.stats", hash_files=False, - desc="Output curvature stats file") + desc="Output curvature stats file", + ) # optional min_max = traits.Bool( - argstr="-m", - desc="Output min / max information for the processed curvature.") + argstr="-m", desc="Output min / max information for the processed curvature." + ) values = traits.Bool( - argstr="-G", desc="Triggers a series of derived curvature values") - write = traits.Bool( - argstr="--writeCurvatureFiles", desc="Write curvature files") + argstr="-G", desc="Triggers a series of derived curvature values" + ) + write = traits.Bool(argstr="--writeCurvatureFiles", desc="Write curvature files") copy_inputs = traits.Bool( - desc="If running as a node, set this to True." + - "This will copy the input files to the node " + "directory.") + desc="If running as a node, set this to True." + + "This will copy the input files to the node " + + "directory." + ) class CurvatureStatsOutputSpec(TraitedSpec): @@ -2869,13 +3063,13 @@ class CurvatureStats(FSCommand): 'mris_curvature_stats -m -o lh.curv.stats -F pial -G --writeCurvatureFiles subject_id lh pial pial' """ - _cmd = 'mris_curvature_stats' + _cmd = "mris_curvature_stats" input_spec = CurvatureStatsInputSpec output_spec = CurvatureStatsOutputSpec def _format_arg(self, name, spec, value): - if name in ['surface', 'curvfile1', 'curvfile2']: - prefix = os.path.basename(value).split('.')[1] + if name in ["surface", "curvfile1", "curvfile2"]: + prefix = os.path.basename(value).split(".")[1] return spec.argstr % prefix return super(CurvatureStats, self)._format_arg(name, spec, value) @@ -2887,43 +3081,37 @@ def _list_outputs(self): def run(self, **inputs): if self.inputs.copy_inputs: self.inputs.subjects_dir = os.getcwd() - if 'subjects_dir' in inputs: - inputs['subjects_dir'] = self.inputs.subjects_dir - copy2subjdir(self, self.inputs.surface, 'surf') - copy2subjdir(self, self.inputs.curvfile1, 'surf') - copy2subjdir(self, self.inputs.curvfile2, 'surf') + if "subjects_dir" in inputs: + inputs["subjects_dir"] = self.inputs.subjects_dir + copy2subjdir(self, self.inputs.surface, "surf") + copy2subjdir(self, self.inputs.curvfile1, "surf") + copy2subjdir(self, self.inputs.curvfile2, "surf") return super(CurvatureStats, self).run(**inputs) class JacobianInputSpec(FSTraitedSpec): # required in_origsurf = File( - argstr="%s", - position=-3, - mandatory=True, - exists=True, - desc="Original surface") + argstr="%s", position=-3, mandatory=True, exists=True, desc="Original surface" + ) in_mappedsurf = File( - argstr="%s", - position=-2, - mandatory=True, - exists=True, - desc="Mapped surface") + argstr="%s", position=-2, mandatory=True, exists=True, desc="Mapped surface" + ) # optional out_file = File( argstr="%s", exists=False, position=-1, - name_source=['in_origsurf'], + name_source=["in_origsurf"], hash_files=False, - name_template='%s.jacobian', + name_template="%s.jacobian", keep_extension=False, - desc="Output Jacobian of the surface mapping") + desc="Output Jacobian of the surface mapping", + ) class JacobianOutputSpec(TraitedSpec): - out_file = File( - exists=False, desc="Output Jacobian of the surface mapping") + out_file = File(exists=False, desc="Output Jacobian of the surface mapping") class Jacobian(FSCommand): @@ -2940,49 +3128,45 @@ class Jacobian(FSCommand): 'mris_jacobian lh.pial lh.pial lh.jacobian' """ - _cmd = 'mris_jacobian' + _cmd = "mris_jacobian" input_spec = JacobianInputSpec output_spec = JacobianOutputSpec def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs class MRIsCalcInputSpec(FSTraitedSpec): # required in_file1 = File( - argstr="%s", - position=-3, - mandatory=True, - exists=True, - desc="Input file 1") + argstr="%s", position=-3, mandatory=True, exists=True, desc="Input file 1" + ) action = traits.String( argstr="%s", position=-2, mandatory=True, - desc="Action to perform on input file(s)") + desc="Action to perform on input file(s)", + ) out_file = File( - argstr="-o %s", mandatory=True, desc="Output file after calculation") + argstr="-o %s", mandatory=True, desc="Output file after calculation" + ) # optional in_file2 = File( argstr="%s", exists=True, position=-1, - xor=['in_float', 'in_int'], - desc="Input file 2") + xor=["in_float", "in_int"], + desc="Input file 2", + ) in_float = traits.Float( - argstr="%f", - position=-1, - xor=['in_file2', 'in_int'], - desc="Input float") + argstr="%f", position=-1, xor=["in_file2", "in_int"], desc="Input float" + ) in_int = traits.Int( - argstr="%d", - position=-1, - xor=['in_file2', 'in_float'], - desc="Input integer") + argstr="%d", position=-1, xor=["in_file2", "in_float"], desc="Input integer" + ) class MRIsCalcOutputSpec(TraitedSpec): @@ -3014,70 +3198,74 @@ class MRIsCalc(FSCommand): 'mris_calc -o lh.area.mid lh.area add lh.area.pial' """ - _cmd = 'mris_calc' + _cmd = "mris_calc" input_spec = MRIsCalcInputSpec output_spec = MRIsCalcOutputSpec def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs class VolumeMaskInputSpec(FSTraitedSpec): left_whitelabel = traits.Int( - argstr="--label_left_white %d", - mandatory=True, - desc="Left white matter label") + argstr="--label_left_white %d", mandatory=True, desc="Left white matter label" + ) left_ribbonlabel = traits.Int( argstr="--label_left_ribbon %d", mandatory=True, - desc="Left cortical ribbon label") + desc="Left cortical ribbon label", + ) right_whitelabel = traits.Int( - argstr="--label_right_white %d", - mandatory=True, - desc="Right white matter label") + argstr="--label_right_white %d", mandatory=True, desc="Right white matter label" + ) right_ribbonlabel = traits.Int( argstr="--label_right_ribbon %d", mandatory=True, - desc="Right cortical ribbon label") - lh_pial = File( - mandatory=True, exists=True, desc="Implicit input left pial surface") + desc="Right cortical ribbon label", + ) + lh_pial = File(mandatory=True, exists=True, desc="Implicit input left pial surface") rh_pial = File( - mandatory=True, exists=True, desc="Implicit input right pial surface") + mandatory=True, exists=True, desc="Implicit input right pial surface" + ) lh_white = File( - mandatory=True, - exists=True, - desc="Implicit input left white matter surface") + mandatory=True, exists=True, desc="Implicit input left white matter surface" + ) rh_white = File( - mandatory=True, - exists=True, - desc="Implicit input right white matter surface") + mandatory=True, exists=True, desc="Implicit input right white matter surface" + ) aseg = File( exists=True, - xor=['in_aseg'], - desc="Implicit aseg.mgz segmentation. " + - "Specify a different aseg by using the 'in_aseg' input.") + xor=["in_aseg"], + desc="Implicit aseg.mgz segmentation. " + + "Specify a different aseg by using the 'in_aseg' input.", + ) subject_id = traits.String( - 'subject_id', + "subject_id", usedefault=True, position=-1, argstr="%s", mandatory=True, - desc="Subject being processed") + desc="Subject being processed", + ) # optional in_aseg = File( argstr="--aseg_name %s", exists=True, - xor=['aseg'], - desc="Input aseg file for VolumeMask") + xor=["aseg"], + desc="Input aseg file for VolumeMask", + ) save_ribbon = traits.Bool( argstr="--save_ribbon", - desc="option to save just the ribbon for the " + - "hemispheres in the format ?h.ribbon.mgz") + desc="option to save just the ribbon for the " + + "hemispheres in the format ?h.ribbon.mgz", + ) copy_inputs = traits.Bool( - desc="If running as a node, set this to True." + - "This will copy the implicit input files to the " + "node directory.") + desc="If running as a node, set this to True." + + "This will copy the implicit input files to the " + + "node directory." + ) class VolumeMaskOutputSpec(TraitedSpec): @@ -3114,142 +3302,153 @@ class VolumeMask(FSCommand): 'mris_volmask --label_left_ribbon 3 --label_left_white 2 --label_right_ribbon 42 --label_right_white 41 --save_ribbon 10335' """ - _cmd = 'mris_volmask' + _cmd = "mris_volmask" input_spec = VolumeMaskInputSpec output_spec = VolumeMaskOutputSpec def run(self, **inputs): if self.inputs.copy_inputs: self.inputs.subjects_dir = os.getcwd() - if 'subjects_dir' in inputs: - inputs['subjects_dir'] = self.inputs.subjects_dir - copy2subjdir(self, self.inputs.lh_pial, 'surf', 'lh.pial') - copy2subjdir(self, self.inputs.rh_pial, 'surf', 'rh.pial') - copy2subjdir(self, self.inputs.lh_white, 'surf', 'lh.white') - copy2subjdir(self, self.inputs.rh_white, 'surf', 'rh.white') - copy2subjdir(self, self.inputs.in_aseg, 'mri') - copy2subjdir(self, self.inputs.aseg, 'mri', 'aseg.mgz') + if "subjects_dir" in inputs: + inputs["subjects_dir"] = self.inputs.subjects_dir + copy2subjdir(self, self.inputs.lh_pial, "surf", "lh.pial") + copy2subjdir(self, self.inputs.rh_pial, "surf", "rh.pial") + copy2subjdir(self, self.inputs.lh_white, "surf", "lh.white") + copy2subjdir(self, self.inputs.rh_white, "surf", "rh.white") + copy2subjdir(self, self.inputs.in_aseg, "mri") + copy2subjdir(self, self.inputs.aseg, "mri", "aseg.mgz") return super(VolumeMask, self).run(**inputs) def _format_arg(self, name, spec, value): - if name == 'in_aseg': - return spec.argstr % os.path.basename(value).rstrip('.mgz') + if name == "in_aseg": + return spec.argstr % os.path.basename(value).rstrip(".mgz") return super(VolumeMask, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self._outputs().get() - out_dir = os.path.join(self.inputs.subjects_dir, - self.inputs.subject_id, 'mri') - outputs["out_ribbon"] = os.path.join(out_dir, 'ribbon.mgz') + out_dir = os.path.join(self.inputs.subjects_dir, self.inputs.subject_id, "mri") + outputs["out_ribbon"] = os.path.join(out_dir, "ribbon.mgz") if self.inputs.save_ribbon: - outputs["rh_ribbon"] = os.path.join(out_dir, 'rh.ribbon.mgz') - outputs["lh_ribbon"] = os.path.join(out_dir, 'lh.ribbon.mgz') + outputs["rh_ribbon"] = os.path.join(out_dir, "rh.ribbon.mgz") + outputs["lh_ribbon"] = os.path.join(out_dir, "lh.ribbon.mgz") return outputs class ParcellationStatsInputSpec(FSTraitedSpec): # required subject_id = traits.String( - 'subject_id', + "subject_id", usedefault=True, position=-3, argstr="%s", mandatory=True, - desc="Subject being processed") + desc="Subject being processed", + ) hemisphere = traits.Enum( - 'lh', - 'rh', + "lh", + "rh", position=-2, argstr="%s", mandatory=True, - desc="Hemisphere being processed") + desc="Hemisphere being processed", + ) # implicit wm = File( - mandatory=True, - exists=True, - desc="Input file must be /mri/wm.mgz") + mandatory=True, exists=True, desc="Input file must be /mri/wm.mgz" + ) lh_white = File( mandatory=True, exists=True, - desc="Input file must be /surf/lh.white") + desc="Input file must be /surf/lh.white", + ) rh_white = File( mandatory=True, exists=True, - desc="Input file must be /surf/rh.white") + desc="Input file must be /surf/rh.white", + ) lh_pial = File( - mandatory=True, - exists=True, - desc="Input file must be /surf/lh.pial") + mandatory=True, exists=True, desc="Input file must be /surf/lh.pial" + ) rh_pial = File( - mandatory=True, - exists=True, - desc="Input file must be /surf/rh.pial") + mandatory=True, exists=True, desc="Input file must be /surf/rh.pial" + ) transform = File( mandatory=True, exists=True, - desc="Input file must be /mri/transforms/talairach.xfm") + desc="Input file must be /mri/transforms/talairach.xfm", + ) thickness = File( mandatory=True, exists=True, - desc="Input file must be /surf/?h.thickness") + desc="Input file must be /surf/?h.thickness", + ) brainmask = File( mandatory=True, exists=True, - desc="Input file must be /mri/brainmask.mgz") + desc="Input file must be /mri/brainmask.mgz", + ) aseg = File( mandatory=True, exists=True, - desc="Input file must be /mri/aseg.presurf.mgz") + desc="Input file must be /mri/aseg.presurf.mgz", + ) ribbon = File( mandatory=True, exists=True, - desc="Input file must be /mri/ribbon.mgz") - cortex_label = File( - exists=True, desc="implicit input file {hemi}.cortex.label") + desc="Input file must be /mri/ribbon.mgz", + ) + cortex_label = File(exists=True, desc="implicit input file {hemi}.cortex.label") # optional surface = traits.String( - position=-1, argstr="%s", desc="Input surface (e.g. 'white')") + position=-1, argstr="%s", desc="Input surface (e.g. 'white')" + ) mgz = traits.Bool(argstr="-mgz", desc="Look for mgz files") - in_cortex = File( - argstr="-cortex %s", exists=True, desc="Input cortex label") + in_cortex = File(argstr="-cortex %s", exists=True, desc="Input cortex label") in_annotation = File( argstr="-a %s", exists=True, - xor=['in_label'], - desc= - "compute properties for each label in the annotation file separately") + xor=["in_label"], + desc="compute properties for each label in the annotation file separately", + ) in_label = File( argstr="-l %s", exists=True, - xor=['in_annotatoin', 'out_color'], - desc="limit calculations to specified label") + xor=["in_annotatoin", "out_color"], + desc="limit calculations to specified label", + ) tabular_output = traits.Bool(argstr="-b", desc="Tabular output") out_table = File( argstr="-f %s", exists=False, genfile=True, - requires=['tabular_output'], - desc="Table output to tablefile") + requires=["tabular_output"], + desc="Table output to tablefile", + ) out_color = File( argstr="-c %s", exists=False, genfile=True, - xor=['in_label'], - desc="Output annotation files's colortable to text file") + xor=["in_label"], + desc="Output annotation files's colortable to text file", + ) copy_inputs = traits.Bool( - desc="If running as a node, set this to True." + - "This will copy the input files to the node " + "directory.") + desc="If running as a node, set this to True." + + "This will copy the input files to the node " + + "directory." + ) th3 = traits.Bool( argstr="-th3", requires=["cortex_label"], - desc="turns on new vertex-wise volume calc for mris_anat_stats") + desc="turns on new vertex-wise volume calc for mris_anat_stats", + ) class ParcellationStatsOutputSpec(TraitedSpec): out_table = File(exists=False, desc="Table output to tablefile") out_color = File( - exists=False, desc="Output annotation files's colortable to text file") + exists=False, desc="Output annotation files's colortable to text file" + ) class ParcellationStats(FSCommand): @@ -3280,35 +3479,47 @@ class ParcellationStats(FSCommand): 'mris_anatomical_stats -c test.ctab -f lh.test.stats 10335 lh white' """ - _cmd = 'mris_anatomical_stats' + _cmd = "mris_anatomical_stats" input_spec = ParcellationStatsInputSpec output_spec = ParcellationStatsOutputSpec def run(self, **inputs): if self.inputs.copy_inputs: self.inputs.subjects_dir = os.getcwd() - if 'subjects_dir' in inputs: - inputs['subjects_dir'] = self.inputs.subjects_dir - copy2subjdir(self, self.inputs.lh_white, 'surf', 'lh.white') - copy2subjdir(self, self.inputs.lh_pial, 'surf', 'lh.pial') - copy2subjdir(self, self.inputs.rh_white, 'surf', 'rh.white') - copy2subjdir(self, self.inputs.rh_pial, 'surf', 'rh.pial') - copy2subjdir(self, self.inputs.wm, 'mri', 'wm.mgz') - copy2subjdir(self, self.inputs.transform, - os.path.join('mri', 'transforms'), 'talairach.xfm') - copy2subjdir(self, self.inputs.brainmask, 'mri', 'brainmask.mgz') - copy2subjdir(self, self.inputs.aseg, 'mri', 'aseg.presurf.mgz') - copy2subjdir(self, self.inputs.ribbon, 'mri', 'ribbon.mgz') - copy2subjdir(self, self.inputs.thickness, 'surf', - '{0}.thickness'.format(self.inputs.hemisphere)) + if "subjects_dir" in inputs: + inputs["subjects_dir"] = self.inputs.subjects_dir + copy2subjdir(self, self.inputs.lh_white, "surf", "lh.white") + copy2subjdir(self, self.inputs.lh_pial, "surf", "lh.pial") + copy2subjdir(self, self.inputs.rh_white, "surf", "rh.white") + copy2subjdir(self, self.inputs.rh_pial, "surf", "rh.pial") + copy2subjdir(self, self.inputs.wm, "mri", "wm.mgz") + copy2subjdir( + self, + self.inputs.transform, + os.path.join("mri", "transforms"), + "talairach.xfm", + ) + copy2subjdir(self, self.inputs.brainmask, "mri", "brainmask.mgz") + copy2subjdir(self, self.inputs.aseg, "mri", "aseg.presurf.mgz") + copy2subjdir(self, self.inputs.ribbon, "mri", "ribbon.mgz") + copy2subjdir( + self, + self.inputs.thickness, + "surf", + "{0}.thickness".format(self.inputs.hemisphere), + ) if isdefined(self.inputs.cortex_label): - copy2subjdir(self, self.inputs.cortex_label, 'label', - '{0}.cortex.label'.format(self.inputs.hemisphere)) + copy2subjdir( + self, + self.inputs.cortex_label, + "label", + "{0}.cortex.label".format(self.inputs.hemisphere), + ) createoutputdirs(self._list_outputs()) return super(ParcellationStats, self).run(**inputs) def _gen_filename(self, name): - if name in ['out_table', 'out_color']: + if name in ["out_table", "out_color"]: return self._list_outputs()[name] return None @@ -3318,103 +3529,110 @@ def _list_outputs(self): outputs["out_table"] = os.path.abspath(self.inputs.out_table) else: # subject stats directory - stats_dir = os.path.join(self.inputs.subjects_dir, - self.inputs.subject_id, 'stats') + stats_dir = os.path.join( + self.inputs.subjects_dir, self.inputs.subject_id, "stats" + ) if isdefined(self.inputs.in_annotation): # if out_table is not defined just tag .stats on the end # instead of .annot - if self.inputs.surface == 'pial': - basename = os.path.basename( - self.inputs.in_annotation).replace( - '.annot', '.pial.stats') + if self.inputs.surface == "pial": + basename = os.path.basename(self.inputs.in_annotation).replace( + ".annot", ".pial.stats" + ) else: - basename = os.path.basename( - self.inputs.in_annotation).replace('.annot', '.stats') + basename = os.path.basename(self.inputs.in_annotation).replace( + ".annot", ".stats" + ) elif isdefined(self.inputs.in_label): # if out_table is not defined just tag .stats on the end # instead of .label - if self.inputs.surface == 'pial': + if self.inputs.surface == "pial": basename = os.path.basename(self.inputs.in_label).replace( - '.label', '.pial.stats') + ".label", ".pial.stats" + ) else: basename = os.path.basename(self.inputs.in_label).replace( - '.label', '.stats') + ".label", ".stats" + ) else: - basename = str(self.inputs.hemisphere) + '.aparc.annot.stats' + basename = str(self.inputs.hemisphere) + ".aparc.annot.stats" outputs["out_table"] = os.path.join(stats_dir, basename) if isdefined(self.inputs.out_color): outputs["out_color"] = os.path.abspath(self.inputs.out_color) else: # subject label directory - out_dir = os.path.join(self.inputs.subjects_dir, - self.inputs.subject_id, 'label') + out_dir = os.path.join( + self.inputs.subjects_dir, self.inputs.subject_id, "label" + ) if isdefined(self.inputs.in_annotation): # find the annotation name (if it exists) basename = os.path.basename(self.inputs.in_annotation) - for item in ['lh.', 'rh.', 'aparc.', 'annot']: - basename = basename.replace(item, '') + for item in ["lh.", "rh.", "aparc.", "annot"]: + basename = basename.replace(item, "") annot = basename # if the out_color table is not defined, one with the annotation # name will be created - if 'BA' in annot: - outputs["out_color"] = os.path.join( - out_dir, annot + 'ctab') + if "BA" in annot: + outputs["out_color"] = os.path.join(out_dir, annot + "ctab") else: outputs["out_color"] = os.path.join( - out_dir, 'aparc.annot.' + annot + 'ctab') + out_dir, "aparc.annot." + annot + "ctab" + ) else: - outputs["out_color"] = os.path.join(out_dir, - 'aparc.annot.ctab') + outputs["out_color"] = os.path.join(out_dir, "aparc.annot.ctab") return outputs class ContrastInputSpec(FSTraitedSpec): # required subject_id = traits.String( - 'subject_id', + "subject_id", argstr="--s %s", usedefault=True, mandatory=True, - desc="Subject being processed") + desc="Subject being processed", + ) hemisphere = traits.Enum( - 'lh', - 'rh', + "lh", + "rh", argstr="--%s-only", mandatory=True, - desc="Hemisphere being processed") + desc="Hemisphere being processed", + ) # implicit thickness = File( mandatory=True, exists=True, - desc="Input file must be /surf/?h.thickness") + desc="Input file must be /surf/?h.thickness", + ) white = File( mandatory=True, exists=True, - desc="Input file must be /surf/.white") + desc="Input file must be /surf/.white", + ) annotation = File( mandatory=True, exists=True, - desc= - "Input annotation file must be /label/.aparc.annot" + desc="Input annotation file must be /label/.aparc.annot", ) cortex = File( mandatory=True, exists=True, - desc= - "Input cortex label must be /label/.cortex.label" + desc="Input cortex label must be /label/.cortex.label", ) - orig = File( - exists=True, mandatory=True, desc="Implicit input file mri/orig.mgz") + orig = File(exists=True, mandatory=True, desc="Implicit input file mri/orig.mgz") rawavg = File( - exists=True, mandatory=True, desc="Implicit input file mri/rawavg.mgz") + exists=True, mandatory=True, desc="Implicit input file mri/rawavg.mgz" + ) copy_inputs = traits.Bool( - desc="If running as a node, set this to True." + - "This will copy the input files to the node " + "directory.") + desc="If running as a node, set this to True." + + "This will copy the input files to the node " + + "directory." + ) class ContrastOutputSpec(TraitedSpec): - out_contrast = File( - exists=False, desc="Output contrast file from Contrast") + out_contrast = File(exists=False, desc="Output contrast file from Contrast") out_stats = File(exists=False, desc="Output stats file from Contrast") out_log = File(exists=True, desc="Output log from Contrast") @@ -3439,42 +3657,42 @@ class Contrast(FSCommand): 'pctsurfcon --lh-only --s 10335' """ - _cmd = 'pctsurfcon' + _cmd = "pctsurfcon" input_spec = ContrastInputSpec output_spec = ContrastOutputSpec def run(self, **inputs): if self.inputs.copy_inputs: self.inputs.subjects_dir = os.getcwd() - if 'subjects_dir' in inputs: - inputs['subjects_dir'] = self.inputs.subjects_dir + if "subjects_dir" in inputs: + inputs["subjects_dir"] = self.inputs.subjects_dir hemi = self.inputs.hemisphere - copy2subjdir(self, self.inputs.annotation, 'label', - '{0}.aparc.annot'.format(hemi)) - copy2subjdir(self, self.inputs.cortex, 'label', - '{0}.cortex.label'.format(hemi)) - copy2subjdir(self, self.inputs.white, 'surf', - '{0}.white'.format(hemi)) - copy2subjdir(self, self.inputs.thickness, 'surf', - '{0}.thickness'.format(hemi)) - copy2subjdir(self, self.inputs.orig, 'mri', 'orig.mgz') - copy2subjdir(self, self.inputs.rawavg, 'mri', 'rawavg.mgz') + copy2subjdir( + self, self.inputs.annotation, "label", "{0}.aparc.annot".format(hemi) + ) + copy2subjdir( + self, self.inputs.cortex, "label", "{0}.cortex.label".format(hemi) + ) + copy2subjdir(self, self.inputs.white, "surf", "{0}.white".format(hemi)) + copy2subjdir( + self, self.inputs.thickness, "surf", "{0}.thickness".format(hemi) + ) + copy2subjdir(self, self.inputs.orig, "mri", "orig.mgz") + copy2subjdir(self, self.inputs.rawavg, "mri", "rawavg.mgz") # need to create output directories createoutputdirs(self._list_outputs()) return super(Contrast, self).run(**inputs) def _list_outputs(self): outputs = self._outputs().get() - subject_dir = os.path.join(self.inputs.subjects_dir, - self.inputs.subject_id) + subject_dir = os.path.join(self.inputs.subjects_dir, self.inputs.subject_id) outputs["out_contrast"] = os.path.join( - subject_dir, 'surf', - str(self.inputs.hemisphere) + '.w-g.pct.mgh') + subject_dir, "surf", str(self.inputs.hemisphere) + ".w-g.pct.mgh" + ) outputs["out_stats"] = os.path.join( - subject_dir, 'stats', - str(self.inputs.hemisphere) + '.w-g.pct.stats') - outputs["out_log"] = os.path.join(subject_dir, 'scripts', - 'pctsurfcon.log') + subject_dir, "stats", str(self.inputs.hemisphere) + ".w-g.pct.stats" + ) + outputs["out_log"] = os.path.join(subject_dir, "scripts", "pctsurfcon.log") return outputs @@ -3484,34 +3702,35 @@ class RelabelHypointensitiesInputSpec(FSTraitedSpec): mandatory=True, exists=True, copyfile=True, - desc="Implicit input file must be lh.white") + desc="Implicit input file must be lh.white", + ) rh_white = File( mandatory=True, exists=True, copyfile=True, - desc="Implicit input file must be rh.white") + desc="Implicit input file must be rh.white", + ) aseg = File( - argstr="%s", - position=-3, - mandatory=True, - exists=True, - desc="Input aseg file") + argstr="%s", position=-3, mandatory=True, exists=True, desc="Input aseg file" + ) surf_directory = Directory( - '.', + ".", argstr="%s", position=-2, exists=True, usedefault=True, - desc="Directory containing lh.white and rh.white") + desc="Directory containing lh.white and rh.white", + ) out_file = File( argstr="%s", position=-1, exists=False, - name_source=['aseg'], - name_template='%s.hypos.mgz', + name_source=["aseg"], + name_template="%s.hypos.mgz", hash_files=False, keep_extension=False, - desc="Output aseg file") + desc="Output aseg file", + ) class RelabelHypointensitiesOutputSpec(TraitedSpec): @@ -3534,7 +3753,7 @@ class RelabelHypointensities(FSCommand): 'mri_relabel_hypointensities aseg.mgz . aseg.hypos.mgz' """ - _cmd = 'mri_relabel_hypointensities' + _cmd = "mri_relabel_hypointensities" input_spec = RelabelHypointensitiesInputSpec output_spec = RelabelHypointensitiesOutputSpec @@ -3547,57 +3766,64 @@ def _list_outputs(self): class Aparc2AsegInputSpec(FSTraitedSpec): # required subject_id = traits.String( - 'subject_id', + "subject_id", argstr="--s %s", usedefault=True, mandatory=True, - desc="Subject being processed") + desc="Subject being processed", + ) out_file = File( argstr="--o %s", exists=False, mandatory=True, - desc="Full path of file to save the output segmentation in") + desc="Full path of file to save the output segmentation in", + ) # implicit lh_white = File( mandatory=True, exists=True, - desc="Input file must be /surf/lh.white") + desc="Input file must be /surf/lh.white", + ) rh_white = File( mandatory=True, exists=True, - desc="Input file must be /surf/rh.white") + desc="Input file must be /surf/rh.white", + ) lh_pial = File( - mandatory=True, - exists=True, - desc="Input file must be /surf/lh.pial") + mandatory=True, exists=True, desc="Input file must be /surf/lh.pial" + ) rh_pial = File( - mandatory=True, - exists=True, - desc="Input file must be /surf/rh.pial") + mandatory=True, exists=True, desc="Input file must be /surf/rh.pial" + ) lh_ribbon = File( mandatory=True, exists=True, - desc="Input file must be /mri/lh.ribbon.mgz") + desc="Input file must be /mri/lh.ribbon.mgz", + ) rh_ribbon = File( mandatory=True, exists=True, - desc="Input file must be /mri/rh.ribbon.mgz") + desc="Input file must be /mri/rh.ribbon.mgz", + ) ribbon = File( mandatory=True, exists=True, - desc="Input file must be /mri/ribbon.mgz") + desc="Input file must be /mri/ribbon.mgz", + ) lh_annotation = File( mandatory=True, exists=True, - desc="Input file must be /label/lh.aparc.annot") + desc="Input file must be /label/lh.aparc.annot", + ) rh_annotation = File( mandatory=True, exists=True, - desc="Input file must be /label/rh.aparc.annot") + desc="Input file must be /label/rh.aparc.annot", + ) # optional filled = File( - exists=True, - desc="Implicit input filled file. Only required with FS v5.3.") + exists=True, desc="Implicit input filled file. Only required with FS v5.3." + ) aseg = File(argstr="--aseg %s", exists=True, desc="Input aseg file") volmask = traits.Bool(argstr="--volmask", desc="Volume mask flag") ctxseg = File(argstr="--ctxseg %s", exists=True, desc="") @@ -3607,16 +3833,18 @@ class Aparc2AsegInputSpec(FSTraitedSpec): For each voxel labeled as white matter in the aseg, re-assign its label to be that of the closest cortical point if its distance is less than dmaxctx - """) - hypo_wm = traits.Bool( - argstr="--hypo-as-wm", desc="Label hypointensities as WM") + """, + ) + hypo_wm = traits.Bool(argstr="--hypo-as-wm", desc="Label hypointensities as WM") rip_unknown = traits.Bool( - argstr="--rip-unknown", - desc="Do not label WM based on 'unknown' corical label") + argstr="--rip-unknown", desc="Do not label WM based on 'unknown' corical label" + ) a2009s = traits.Bool(argstr="--a2009s", desc="Using the a2009s atlas") copy_inputs = traits.Bool( - desc="If running as a node, set this to True." + - "This will copy the input files to the node " + "directory.") + desc="If running as a node, set this to True." + + "This will copy the input files to the node " + + "directory." + ) class Aparc2AsegOutputSpec(TraitedSpec): @@ -3660,35 +3888,35 @@ class Aparc2Aseg(FSCommand): 'mri_aparc2aseg --labelwm --o aparc+aseg.mgz --rip-unknown --s subject_id' """ - _cmd = 'mri_aparc2aseg' + _cmd = "mri_aparc2aseg" input_spec = Aparc2AsegInputSpec output_spec = Aparc2AsegOutputSpec def run(self, **inputs): if self.inputs.copy_inputs: self.inputs.subjects_dir = os.getcwd() - if 'subjects_dir' in inputs: - inputs['subjects_dir'] = self.inputs.subjects_dir - copy2subjdir(self, self.inputs.lh_white, 'surf', 'lh.white') - copy2subjdir(self, self.inputs.lh_pial, 'surf', 'lh.pial') - copy2subjdir(self, self.inputs.rh_white, 'surf', 'rh.white') - copy2subjdir(self, self.inputs.rh_pial, 'surf', 'rh.pial') - copy2subjdir(self, self.inputs.lh_ribbon, 'mri', 'lh.ribbon.mgz') - copy2subjdir(self, self.inputs.rh_ribbon, 'mri', 'rh.ribbon.mgz') - copy2subjdir(self, self.inputs.ribbon, 'mri', 'ribbon.mgz') - copy2subjdir(self, self.inputs.aseg, 'mri') - copy2subjdir(self, self.inputs.filled, 'mri', 'filled.mgz') - copy2subjdir(self, self.inputs.lh_annotation, 'label') - copy2subjdir(self, self.inputs.rh_annotation, 'label') + if "subjects_dir" in inputs: + inputs["subjects_dir"] = self.inputs.subjects_dir + copy2subjdir(self, self.inputs.lh_white, "surf", "lh.white") + copy2subjdir(self, self.inputs.lh_pial, "surf", "lh.pial") + copy2subjdir(self, self.inputs.rh_white, "surf", "rh.white") + copy2subjdir(self, self.inputs.rh_pial, "surf", "rh.pial") + copy2subjdir(self, self.inputs.lh_ribbon, "mri", "lh.ribbon.mgz") + copy2subjdir(self, self.inputs.rh_ribbon, "mri", "rh.ribbon.mgz") + copy2subjdir(self, self.inputs.ribbon, "mri", "ribbon.mgz") + copy2subjdir(self, self.inputs.aseg, "mri") + copy2subjdir(self, self.inputs.filled, "mri", "filled.mgz") + copy2subjdir(self, self.inputs.lh_annotation, "label") + copy2subjdir(self, self.inputs.rh_annotation, "label") return super(Aparc2Aseg, self).run(**inputs) def _format_arg(self, name, spec, value): - if name == 'aseg': + if name == "aseg": # aseg does not take a full filename - basename = os.path.basename(value).replace('.mgz', '') + basename = os.path.basename(value).replace(".mgz", "") return spec.argstr % basename - elif name == 'out_file': + elif name == "out_file": return spec.argstr % os.path.abspath(value) return super(Aparc2Aseg, self)._format_arg(name, spec, value) @@ -3702,10 +3930,8 @@ def _list_outputs(self): class Apas2AsegInputSpec(FSTraitedSpec): # required in_file = File( - argstr="--i %s", - mandatory=True, - exists=True, - desc="Input aparc+aseg.mgz") + argstr="--i %s", mandatory=True, exists=True, desc="Input aparc+aseg.mgz" + ) out_file = File(argstr="--o %s", mandatory=True, desc="Output aseg file") @@ -3730,7 +3956,7 @@ class Apas2Aseg(FSCommand): 'apas2aseg --i aseg.mgz --o output.mgz' """ - _cmd = 'apas2aseg' + _cmd = "apas2aseg" input_spec = Apas2AsegInputSpec output_spec = Apas2AsegOutputSpec @@ -3746,52 +3972,66 @@ class MRIsExpandInputSpec(FSTraitedSpec): in_file = File( exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-3, copyfile=False, - desc='Surface to expand') + desc="Surface to expand", + ) distance = traits.Float( mandatory=True, - argstr='%g', + argstr="%g", position=-2, - desc='Distance in mm or fraction of cortical thickness') + desc="Distance in mm or fraction of cortical thickness", + ) out_name = traits.Str( - 'expanded', - argstr='%s', + "expanded", + argstr="%s", position=-1, usedefault=True, - desc=('Output surface file\n' - 'If no path, uses directory of `in_file`\n' - 'If no path AND missing "lh." or "rh.", derive from `in_file`')) + desc=( + "Output surface file\n" + "If no path, uses directory of `in_file`\n" + 'If no path AND missing "lh." or "rh.", derive from `in_file`' + ), + ) thickness = traits.Bool( - argstr='-thickness', - desc='Expand by fraction of cortical thickness, not mm') + argstr="-thickness", desc="Expand by fraction of cortical thickness, not mm" + ) thickness_name = traits.Str( argstr="-thickness_name %s", copyfile=False, - desc=('Name of thickness file (implicit: "thickness")\n' - 'If no path, uses directory of `in_file`\n' - 'If no path AND missing "lh." or "rh.", derive from `in_file`')) + desc=( + 'Name of thickness file (implicit: "thickness")\n' + "If no path, uses directory of `in_file`\n" + 'If no path AND missing "lh." or "rh.", derive from `in_file`' + ), + ) pial = traits.Str( - argstr='-pial %s', + argstr="-pial %s", copyfile=False, - desc=('Name of pial file (implicit: "pial")\n' - 'If no path, uses directory of `in_file`\n' - 'If no path AND missing "lh." or "rh.", derive from `in_file`')) + desc=( + 'Name of pial file (implicit: "pial")\n' + "If no path, uses directory of `in_file`\n" + 'If no path AND missing "lh." or "rh.", derive from `in_file`' + ), + ) sphere = traits.Str( - 'sphere', + "sphere", copyfile=False, usedefault=True, - desc='WARNING: Do not change this trait') - spring = traits.Float(argstr='-S %g', desc="Spring term (implicit: 0.05)") - dt = traits.Float(argstr='-T %g', desc='dt (implicit: 0.25)') + desc="WARNING: Do not change this trait", + ) + spring = traits.Float(argstr="-S %g", desc="Spring term (implicit: 0.05)") + dt = traits.Float(argstr="-T %g", desc="dt (implicit: 0.25)") write_iterations = traits.Int( - argstr='-W %d', desc='Write snapshots of expansion every N iterations') + argstr="-W %d", desc="Write snapshots of expansion every N iterations" + ) smooth_averages = traits.Int( - argstr='-A %d', - desc='Smooth surface with N iterations after expansion') + argstr="-A %d", desc="Smooth surface with N iterations after expansion" + ) nsurfaces = traits.Int( - argstr='-N %d', desc='Number of surfacces to write during expansion') + argstr="-N %d", desc="Number of surfacces to write during expansion" + ) # # Requires dev version - Re-add when min_ver/max_ver support this # # https://github.com/freesurfer/freesurfer/blob/9730cb9/mris_expand/mris_expand.c # navgs = traits.Tuple( @@ -3806,7 +4046,7 @@ class MRIsExpandInputSpec(FSTraitedSpec): class MRIsExpandOutputSpec(TraitedSpec): - out_file = File(desc='Output surface file') + out_file = File(desc="Output surface file") class MRIsExpand(FSSurfaceCommand): @@ -3825,14 +4065,16 @@ class MRIsExpand(FSSurfaceCommand): >>> mris_expand.cmdline 'mris_expand -thickness lh.white 0.5 graymid' """ - _cmd = 'mris_expand' + + _cmd = "mris_expand" input_spec = MRIsExpandInputSpec output_spec = MRIsExpandOutputSpec def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = self._associated_file(self.inputs.in_file, - self.inputs.out_name) + outputs["out_file"] = self._associated_file( + self.inputs.in_file, self.inputs.out_name + ) return outputs def normalize_filenames(self): @@ -3845,100 +4087,103 @@ def normalize_filenames(self): pial = self.inputs.pial if not isdefined(pial): - pial = 'pial' + pial = "pial" self.inputs.pial = self._associated_file(in_file, pial) if isdefined(self.inputs.thickness) and self.inputs.thickness: thickness_name = self.inputs.thickness_name if not isdefined(thickness_name): - thickness_name = 'thickness' - self.inputs.thickness_name = self._associated_file( - in_file, thickness_name) + thickness_name = "thickness" + self.inputs.thickness_name = self._associated_file(in_file, thickness_name) self.inputs.sphere = self._associated_file(in_file, self.inputs.sphere) class LTAConvertInputSpec(CommandLineInputSpec): # Inputs - _in_xor = ('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', 'in_itk') + _in_xor = ("in_lta", "in_fsl", "in_mni", "in_reg", "in_niftyreg", "in_itk") in_lta = traits.Either( File(exists=True), - 'identity.nofile', - argstr='--inlta %s', + "identity.nofile", + argstr="--inlta %s", mandatory=True, xor=_in_xor, - desc='input transform of LTA type') + desc="input transform of LTA type", + ) in_fsl = File( exists=True, - argstr='--infsl %s', + argstr="--infsl %s", mandatory=True, xor=_in_xor, - desc='input transform of FSL type') + desc="input transform of FSL type", + ) in_mni = File( exists=True, - argstr='--inmni %s', + argstr="--inmni %s", mandatory=True, xor=_in_xor, - desc='input transform of MNI/XFM type') + desc="input transform of MNI/XFM type", + ) in_reg = File( exists=True, - argstr='--inreg %s', + argstr="--inreg %s", mandatory=True, xor=_in_xor, - desc='input transform of TK REG type (deprecated format)') + desc="input transform of TK REG type (deprecated format)", + ) in_niftyreg = File( exists=True, - argstr='--inniftyreg %s', + argstr="--inniftyreg %s", mandatory=True, xor=_in_xor, - desc='input transform of Nifty Reg type (inverse RAS2RAS)') + desc="input transform of Nifty Reg type (inverse RAS2RAS)", + ) in_itk = File( exists=True, - argstr='--initk %s', + argstr="--initk %s", mandatory=True, xor=_in_xor, - desc='input transform of ITK type') + desc="input transform of ITK type", + ) # Outputs out_lta = traits.Either( traits.Bool, File, - argstr='--outlta %s', - desc='output linear transform (LTA Freesurfer format)') + argstr="--outlta %s", + desc="output linear transform (LTA Freesurfer format)", + ) out_fsl = traits.Either( - traits.Bool, - File, - argstr='--outfsl %s', - desc='output transform in FSL format') + traits.Bool, File, argstr="--outfsl %s", desc="output transform in FSL format" + ) out_mni = traits.Either( traits.Bool, File, - argstr='--outmni %s', - desc='output transform in MNI/XFM format') + argstr="--outmni %s", + desc="output transform in MNI/XFM format", + ) out_reg = traits.Either( traits.Bool, File, - argstr='--outreg %s', - desc='output transform in reg dat format') + argstr="--outreg %s", + desc="output transform in reg dat format", + ) out_itk = traits.Either( - traits.Bool, - File, - argstr='--outitk %s', - desc='output transform in ITK format') + traits.Bool, File, argstr="--outitk %s", desc="output transform in ITK format" + ) # Optional flags - invert = traits.Bool(argstr='--invert') - ltavox2vox = traits.Bool(argstr='--ltavox2vox', requires=['out_lta']) - source_file = File(exists=True, argstr='--src %s') - target_file = File(exists=True, argstr='--trg %s') - target_conform = traits.Bool(argstr='--trgconform') + invert = traits.Bool(argstr="--invert") + ltavox2vox = traits.Bool(argstr="--ltavox2vox", requires=["out_lta"]) + source_file = File(exists=True, argstr="--src %s") + target_file = File(exists=True, argstr="--trg %s") + target_conform = traits.Bool(argstr="--trgconform") class LTAConvertOutputSpec(TraitedSpec): - out_lta = File( - exists=True, desc='output linear transform (LTA Freesurfer format)') - out_fsl = File(exists=True, desc='output transform in FSL format') - out_mni = File(exists=True, desc='output transform in MNI/XFM format') - out_reg = File(exists=True, desc='output transform in reg dat format') - out_itk = File(exists=True, desc='output transform in ITK format') + out_lta = File(exists=True, desc="output linear transform (LTA Freesurfer format)") + out_fsl = File(exists=True, desc="output transform in FSL format") + out_mni = File(exists=True, desc="output transform in MNI/XFM format") + out_reg = File(exists=True, desc="output transform in reg dat format") + out_itk = File(exists=True, desc="output transform in ITK format") class LTAConvert(CommandLine): @@ -3949,20 +4194,25 @@ class LTAConvert(CommandLine): For complete details, see the `lta_convert documentation. `_ """ + input_spec = LTAConvertInputSpec output_spec = LTAConvertOutputSpec - _cmd = 'lta_convert' + _cmd = "lta_convert" def _format_arg(self, name, spec, value): - if name.startswith('out_') and value is True: + if name.startswith("out_") and value is True: value = self._list_outputs()[name] return super(LTAConvert, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() - for name, default in (('out_lta', 'out.lta'), ('out_fsl', 'out.mat'), - ('out_mni', 'out.xfm'), ('out_reg', 'out.dat'), - ('out_itk', 'out.txt')): + for name, default in ( + ("out_lta", "out.lta"), + ("out_fsl", "out.mat"), + ("out_mni", "out.xfm"), + ("out_reg", "out.dat"), + ("out_itk", "out.txt"), + ): attr = getattr(self.inputs, name) if attr: fname = default if attr is True else attr diff --git a/nipype/interfaces/fsl/__init__.py b/nipype/interfaces/fsl/__init__.py index c6de303307..dd7b3d76d7 100644 --- a/nipype/interfaces/fsl/__init__.py +++ b/nipype/interfaces/fsl/__init__.py @@ -7,30 +7,121 @@ Top-level namespace for fsl. """ -from .base import (FSLCommand, Info, check_fsl, no_fsl, no_fsl_course_data) -from .preprocess import (FAST, FLIRT, ApplyXFM, BET, MCFLIRT, FNIRT, ApplyWarp, - SliceTimer, SUSAN, PRELUDE, FUGUE, FIRST) -from .model import (Level1Design, FEAT, FEATModel, FILMGLS, FEATRegister, - FLAMEO, ContrastMgr, MultipleRegressDesign, L2Model, SMM, - MELODIC, SmoothEstimate, Cluster, Randomise, GLM) +from .base import FSLCommand, Info, check_fsl, no_fsl, no_fsl_course_data +from .preprocess import ( + FAST, + FLIRT, + ApplyXFM, + BET, + MCFLIRT, + FNIRT, + ApplyWarp, + SliceTimer, + SUSAN, + PRELUDE, + FUGUE, + FIRST, +) +from .model import ( + Level1Design, + FEAT, + FEATModel, + FILMGLS, + FEATRegister, + FLAMEO, + ContrastMgr, + MultipleRegressDesign, + L2Model, + SMM, + MELODIC, + SmoothEstimate, + Cluster, + Randomise, + GLM, +) from .utils import ( - AvScale, Smooth, Slice, Merge, ExtractROI, Split, ImageMaths, ImageMeants, - ImageStats, FilterRegressor, Overlay, Slicer, PlotTimeSeries, - PlotMotionParams, ConvertXFM, SwapDimensions, PowerSpectrum, Reorient2Std, - Complex, InvWarp, WarpUtils, ConvertWarp, WarpPoints, WarpPointsToStd, - WarpPointsFromStd, RobustFOV, CopyGeom, MotionOutliers) + AvScale, + Smooth, + Slice, + Merge, + ExtractROI, + Split, + ImageMaths, + ImageMeants, + ImageStats, + FilterRegressor, + Overlay, + Slicer, + PlotTimeSeries, + PlotMotionParams, + ConvertXFM, + SwapDimensions, + PowerSpectrum, + Reorient2Std, + Complex, + InvWarp, + WarpUtils, + ConvertWarp, + WarpPoints, + WarpPointsToStd, + WarpPointsFromStd, + RobustFOV, + CopyGeom, + MotionOutliers, +) -from .epi import (PrepareFieldmap, TOPUP, ApplyTOPUP, Eddy, EPIDeWarp, SigLoss, - EddyCorrect, EpiReg, EddyQuad) -from .dti import (BEDPOSTX, XFibres, DTIFit, ProbTrackX, ProbTrackX2, VecReg, - ProjThresh, FindTheBiggest, DistanceMap, TractSkeleton, - MakeDyadicVectors, BEDPOSTX5, XFibres5) -from .maths import (ChangeDataType, Threshold, MeanImage, ApplyMask, - IsotropicSmooth, TemporalFilter, DilateImage, ErodeImage, - SpatialFilter, UnaryMaths, BinaryMaths, MultiImageMaths, - MaxnImage, MinImage, MedianImage, PercentileImage, - AR1Image) +from .epi import ( + PrepareFieldmap, + TOPUP, + ApplyTOPUP, + Eddy, + EPIDeWarp, + SigLoss, + EddyCorrect, + EpiReg, + EddyQuad, +) +from .dti import ( + BEDPOSTX, + XFibres, + DTIFit, + ProbTrackX, + ProbTrackX2, + VecReg, + ProjThresh, + FindTheBiggest, + DistanceMap, + TractSkeleton, + MakeDyadicVectors, + BEDPOSTX5, + XFibres5, +) +from .maths import ( + ChangeDataType, + Threshold, + MeanImage, + ApplyMask, + IsotropicSmooth, + TemporalFilter, + DilateImage, + ErodeImage, + SpatialFilter, + UnaryMaths, + BinaryMaths, + MultiImageMaths, + MaxnImage, + MinImage, + MedianImage, + PercentileImage, + AR1Image, +) from .possum import B0Calc -from .fix import (AccuracyTester, Classifier, Cleaner, FeatureExtractor, - Training, TrainingSetCreator) +from .fix import ( + AccuracyTester, + Classifier, + Cleaner, + FeatureExtractor, + Training, + TrainingSetCreator, +) from .aroma import ICA_AROMA diff --git a/nipype/interfaces/fsl/aroma.py b/nipype/interfaces/fsl/aroma.py index ed0b85df90..c40a285989 100644 --- a/nipype/interfaces/fsl/aroma.py +++ b/nipype/interfaces/fsl/aroma.py @@ -5,8 +5,15 @@ `ICA-AROMA.py`_ command line tool. """ -from ..base import (TraitedSpec, CommandLineInputSpec, CommandLine, File, - Directory, traits, isdefined) +from ..base import ( + TraitedSpec, + CommandLineInputSpec, + CommandLine, + File, + Directory, + traits, + isdefined, +) import os @@ -14,83 +21,93 @@ class ICA_AROMAInputSpec(CommandLineInputSpec): feat_dir = Directory( exists=True, mandatory=True, - argstr='-feat %s', - xor=['in_file', 'mat_file', 'fnirt_warp_file', 'motion_parameters'], - desc='If a feat directory exists and temporal filtering ' - 'has not been run yet, ICA_AROMA can use the files in ' - 'this directory.') + argstr="-feat %s", + xor=["in_file", "mat_file", "fnirt_warp_file", "motion_parameters"], + desc="If a feat directory exists and temporal filtering " + "has not been run yet, ICA_AROMA can use the files in " + "this directory.", + ) in_file = File( exists=True, mandatory=True, - argstr='-i %s', - xor=['feat_dir'], - desc='volume to be denoised') + argstr="-i %s", + xor=["feat_dir"], + desc="volume to be denoised", + ) out_dir = Directory( - 'out', usedefault=True, mandatory=True, - argstr='-o %s', desc='output directory') + "out", usedefault=True, mandatory=True, argstr="-o %s", desc="output directory" + ) mask = File( - exists=True, - argstr='-m %s', - xor=['feat_dir'], - desc='path/name volume mask') + exists=True, argstr="-m %s", xor=["feat_dir"], desc="path/name volume mask" + ) dim = traits.Int( - argstr='-dim %d', - desc='Dimensionality reduction when running ' - 'MELODIC (defualt is automatic estimation)') + argstr="-dim %d", + desc="Dimensionality reduction when running " + "MELODIC (defualt is automatic estimation)", + ) TR = traits.Float( - argstr='-tr %.3f', - desc='TR in seconds. If this is not specified ' - 'the TR will be extracted from the ' - 'header of the fMRI nifti file.') + argstr="-tr %.3f", + desc="TR in seconds. If this is not specified " + "the TR will be extracted from the " + "header of the fMRI nifti file.", + ) melodic_dir = Directory( exists=True, - argstr='-meldir %s', - desc='path to MELODIC directory if MELODIC has already been run') + argstr="-meldir %s", + desc="path to MELODIC directory if MELODIC has already been run", + ) mat_file = File( exists=True, - argstr='-affmat %s', - xor=['feat_dir'], - desc='path/name of the mat-file describing the ' - 'affine registration (e.g. FSL FLIRT) of the ' - 'functional data to structural space (.mat file)') + argstr="-affmat %s", + xor=["feat_dir"], + desc="path/name of the mat-file describing the " + "affine registration (e.g. FSL FLIRT) of the " + "functional data to structural space (.mat file)", + ) fnirt_warp_file = File( exists=True, - argstr='-warp %s', - xor=['feat_dir'], - desc='File name of the warp-file describing ' - 'the non-linear registration (e.g. FSL FNIRT) ' - 'of the structural data to MNI152 space (.nii.gz)') + argstr="-warp %s", + xor=["feat_dir"], + desc="File name of the warp-file describing " + "the non-linear registration (e.g. FSL FNIRT) " + "of the structural data to MNI152 space (.nii.gz)", + ) motion_parameters = File( exists=True, mandatory=True, - argstr='-mc %s', - xor=['feat_dir'], - desc='motion parameters file') + argstr="-mc %s", + xor=["feat_dir"], + desc="motion parameters file", + ) denoise_type = traits.Enum( - 'nonaggr', - 'aggr', - 'both', - 'no', + "nonaggr", + "aggr", + "both", + "no", usedefault=True, mandatory=True, - argstr='-den %s', - desc='Type of denoising strategy:\n' - '-no: only classification, no denoising\n' - '-nonaggr (default): non-aggresssive denoising, i.e. partial component regression\n' - '-aggr: aggressive denoising, i.e. full component regression\n' - '-both: both aggressive and non-aggressive denoising (two outputs)') + argstr="-den %s", + desc="Type of denoising strategy:\n" + "-no: only classification, no denoising\n" + "-nonaggr (default): non-aggresssive denoising, i.e. partial component regression\n" + "-aggr: aggressive denoising, i.e. full component regression\n" + "-both: both aggressive and non-aggressive denoising (two outputs)", + ) class ICA_AROMAOutputSpec(TraitedSpec): aggr_denoised_file = File( - exists=True, desc='if generated: aggressively denoised volume') + exists=True, desc="if generated: aggressively denoised volume" + ) nonaggr_denoised_file = File( - exists=True, desc='if generated: non aggressively denoised volume') + exists=True, desc="if generated: non aggressively denoised volume" + ) out_dir = Directory( exists=True, - desc='directory contains (in addition to the denoised files): ' - 'melodic.ica + classified_motion_components + ' - 'classification_overview + feature_scores + melodic_ic_mni)') + desc="directory contains (in addition to the denoised files): " + "melodic.ica + classified_motion_components + " + "classification_overview + feature_scores + melodic_ic_mni)", + ) class ICA_AROMA(CommandLine): @@ -121,24 +138,27 @@ class ICA_AROMA(CommandLine): >>> AROMA_obj.cmdline # doctest: +ELLIPSIS 'ICA_AROMA.py -den both -warp warpfield.nii -i functional.nii -m mask.nii.gz -affmat func_to_struct.mat -mc fsl_mcflirt_movpar.txt -o .../ICA_testout' """ - _cmd = 'ICA_AROMA.py' + + _cmd = "ICA_AROMA.py" input_spec = ICA_AROMAInputSpec output_spec = ICA_AROMAOutputSpec def _format_arg(self, name, trait_spec, value): - if name == 'out_dir': + if name == "out_dir": return trait_spec.argstr % os.path.abspath(value) return super(ICA_AROMA, self)._format_arg(name, trait_spec, value) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_dir'] = os.path.abspath(self.inputs.out_dir) - out_dir = outputs['out_dir'] + outputs["out_dir"] = os.path.abspath(self.inputs.out_dir) + out_dir = outputs["out_dir"] - if self.inputs.denoise_type in ('aggr', 'both'): - outputs['aggr_denoised_file'] = os.path.join( - out_dir, 'denoised_func_data_aggr.nii.gz') - if self.inputs.denoise_type in ('nonaggr', 'both'): - outputs['nonaggr_denoised_file'] = os.path.join( - out_dir, 'denoised_func_data_nonaggr.nii.gz') + if self.inputs.denoise_type in ("aggr", "both"): + outputs["aggr_denoised_file"] = os.path.join( + out_dir, "denoised_func_data_aggr.nii.gz" + ) + if self.inputs.denoise_type in ("nonaggr", "both"): + outputs["nonaggr_denoised_file"] = os.path.join( + out_dir, "denoised_func_data_nonaggr.nii.gz" + ) return outputs diff --git a/nipype/interfaces/fsl/base.py b/nipype/interfaces/fsl/base.py index 07ddc4d146..43ad7b9f9c 100644 --- a/nipype/interfaces/fsl/base.py +++ b/nipype/interfaces/fsl/base.py @@ -31,11 +31,10 @@ from ... import logging from ...utils.filemanip import fname_presuffix -from ..base import (traits, isdefined, CommandLine, CommandLineInputSpec, - PackageInfo) +from ..base import traits, isdefined, CommandLine, CommandLineInputSpec, PackageInfo from ...external.due import BibTeX -IFLOGGER = logging.getLogger('nipype.interface') +IFLOGGER = logging.getLogger("nipype.interface") class Info(PackageInfo): @@ -56,14 +55,14 @@ class Info(PackageInfo): """ ftypes = { - 'NIFTI': '.nii', - 'NIFTI_PAIR': '.img', - 'NIFTI_GZ': '.nii.gz', - 'NIFTI_PAIR_GZ': '.img.gz' + "NIFTI": ".nii", + "NIFTI_PAIR": ".img", + "NIFTI_GZ": ".nii.gz", + "NIFTI_PAIR_GZ": ".img.gz", } - if os.getenv('FSLDIR'): - version_file = os.path.join(os.getenv('FSLDIR'), 'etc', 'fslversion') + if os.getenv("FSLDIR"): + version_file = os.path.join(os.getenv("FSLDIR"), "etc", "fslversion") @staticmethod def parse_version(raw_info): @@ -87,7 +86,7 @@ def output_type_to_ext(cls, output_type): try: return cls.ftypes[output_type] except KeyError: - msg = 'Invalid FSLOUTPUTTYPE: ', output_type + msg = "Invalid FSLOUTPUTTYPE: ", output_type raise KeyError(msg) @classmethod @@ -103,28 +102,30 @@ def output_type(cls): Represents the current environment setting of FSLOUTPUTTYPE """ try: - return os.environ['FSLOUTPUTTYPE'] + return os.environ["FSLOUTPUTTYPE"] except KeyError: - IFLOGGER.warning('FSLOUTPUTTYPE environment variable is not set. ' - 'Setting FSLOUTPUTTYPE=NIFTI') - return 'NIFTI' + IFLOGGER.warning( + "FSLOUTPUTTYPE environment variable is not set. " + "Setting FSLOUTPUTTYPE=NIFTI" + ) + return "NIFTI" @staticmethod def standard_image(img_name=None): - '''Grab an image from the standard location. + """Grab an image from the standard location. Returns a list of standard images if called without arguments. - Could be made more fancy to allow for more relocatability''' + Could be made more fancy to allow for more relocatability""" try: - fsldir = os.environ['FSLDIR'] + fsldir = os.environ["FSLDIR"] except KeyError: - raise Exception('FSL environment variables not set') - stdpath = os.path.join(fsldir, 'data', 'standard') + raise Exception("FSL environment variables not set") + stdpath = os.path.join(fsldir, "data", "standard") if img_name is None: return [ - filename.replace(stdpath + '/', '') - for filename in glob(os.path.join(stdpath, '*nii*')) + filename.replace(stdpath + "/", "") + for filename in glob(os.path.join(stdpath, "*nii*")) ] return os.path.join(stdpath, img_name) @@ -140,8 +141,8 @@ class FSLCommandInputSpec(CommandLineInputSpec): ------- fsl.ExtractRoi(tmin=42, tsize=1, output_type='NIFTI') """ - output_type = traits.Enum( - 'NIFTI', list(Info.ftypes.keys()), desc='FSL output type') + + output_type = traits.Enum("NIFTI", list(Info.ftypes.keys()), desc="FSL output type") class FSLCommand(CommandLine): @@ -152,23 +153,26 @@ class FSLCommand(CommandLine): input_spec = FSLCommandInputSpec _output_type = None - references_ = [{ - 'entry': - BibTeX('@article{JenkinsonBeckmannBehrensWoolrichSmith2012,' - 'author={M. Jenkinson, C.F. Beckmann, T.E. Behrens, ' - 'M.W. Woolrich, and S.M. Smith},' - 'title={FSL},' - 'journal={NeuroImage},' - 'volume={62},' - 'pages={782-790},' - 'year={2012},' - '}'), - 'tags': ['implementation'], - }] + references_ = [ + { + "entry": BibTeX( + "@article{JenkinsonBeckmannBehrensWoolrichSmith2012," + "author={M. Jenkinson, C.F. Beckmann, T.E. Behrens, " + "M.W. Woolrich, and S.M. Smith}," + "title={FSL}," + "journal={NeuroImage}," + "volume={62}," + "pages={782-790}," + "year={2012}," + "}" + ), + "tags": ["implementation"], + } + ] def __init__(self, **inputs): super(FSLCommand, self).__init__(**inputs) - self.inputs.on_trait_change(self._output_update, 'output_type') + self.inputs.on_trait_change(self._output_update, "output_type") if self._output_type is None: self._output_type = Info.output_type() @@ -180,7 +184,7 @@ def __init__(self, **inputs): def _output_update(self): self._output_type = self.inputs.output_type - self.inputs.environ.update({'FSLOUTPUTTYPE': self.inputs.output_type}) + self.inputs.environ.update({"FSLOUTPUTTYPE": self.inputs.output_type}) @classmethod def set_default_output_type(cls, output_type): @@ -195,18 +199,13 @@ def set_default_output_type(cls, output_type): if output_type in Info.ftypes: cls._output_type = output_type else: - raise AttributeError('Invalid FSL output_type: %s' % output_type) + raise AttributeError("Invalid FSL output_type: %s" % output_type) @property def version(self): return Info.version() - def _gen_fname(self, - basename, - cwd=None, - suffix=None, - change_ext=True, - ext=None): + def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, ext=None): """Generate a filename based on the given parameters. The filename will take the form: cwd/basename. @@ -232,9 +231,9 @@ def _gen_fname(self, """ - if basename == '': - msg = 'Unable to generate filename for command %s. ' % self.cmd - msg += 'basename is not set!' + if basename == "": + msg = "Unable to generate filename for command %s. " % self.cmd + msg += "basename is not set!" raise ValueError(msg) if cwd is None: cwd = os.getcwd() @@ -242,13 +241,12 @@ def _gen_fname(self, ext = Info.output_type_to_ext(self.inputs.output_type) if change_ext: if suffix: - suffix = ''.join((suffix, ext)) + suffix = "".join((suffix, ext)) else: suffix = ext if suffix is None: - suffix = '' - fname = fname_presuffix( - basename, suffix=suffix, use_ext=False, newpath=cwd) + suffix = "" + fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd) return fname def _overload_extension(self, value, name=None): @@ -276,5 +274,7 @@ def no_fsl(): def no_fsl_course_data(): """check if fsl_course data is present""" - return not ('FSL_COURSE_DATA' in os.environ and os.path.isdir( - os.path.abspath(os.environ['FSL_COURSE_DATA']))) + return not ( + "FSL_COURSE_DATA" in os.environ + and os.path.isdir(os.path.abspath(os.environ["FSL_COURSE_DATA"])) + ) diff --git a/nipype/interfaces/fsl/dti.py b/nipype/interfaces/fsl/dti.py index 86ee527d5a..90f05e3bab 100644 --- a/nipype/interfaces/fsl/dti.py +++ b/nipype/interfaces/fsl/dti.py @@ -9,80 +9,83 @@ import warnings from ...utils.filemanip import fname_presuffix, split_filename, copyfile -from ..base import (TraitedSpec, isdefined, File, Directory, InputMultiPath, - OutputMultiPath, traits) -from .base import (FSLCommand, FSLCommandInputSpec, Info) +from ..base import ( + TraitedSpec, + isdefined, + File, + Directory, + InputMultiPath, + OutputMultiPath, + traits, +) +from .base import FSLCommand, FSLCommandInputSpec, Info class DTIFitInputSpec(FSLCommandInputSpec): dwi = File( exists=True, - desc='diffusion weighted image data file', - argstr='-k %s', + desc="diffusion weighted image data file", + argstr="-k %s", position=0, - mandatory=True) + mandatory=True, + ) base_name = traits.Str( "dtifit_", - desc=('base_name that all output files ' - 'will start with'), - argstr='-o %s', + desc=("base_name that all output files " "will start with"), + argstr="-o %s", position=1, - usedefault=True) + usedefault=True, + ) mask = File( exists=True, - desc='bet binary mask file', - argstr='-m %s', + desc="bet binary mask file", + argstr="-m %s", position=2, - mandatory=True) + mandatory=True, + ) bvecs = File( - exists=True, - desc='b vectors file', - argstr='-r %s', - position=3, - mandatory=True) + exists=True, desc="b vectors file", argstr="-r %s", position=3, mandatory=True + ) bvals = File( - exists=True, - desc='b values file', - argstr='-b %s', - position=4, - mandatory=True) - min_z = traits.Int(argstr='-z %d', desc='min z') - max_z = traits.Int(argstr='-Z %d', desc='max z') - min_y = traits.Int(argstr='-y %d', desc='min y') - max_y = traits.Int(argstr='-Y %d', desc='max y') - min_x = traits.Int(argstr='-x %d', desc='min x') - max_x = traits.Int(argstr='-X %d', desc='max x') + exists=True, desc="b values file", argstr="-b %s", position=4, mandatory=True + ) + min_z = traits.Int(argstr="-z %d", desc="min z") + max_z = traits.Int(argstr="-Z %d", desc="max z") + min_y = traits.Int(argstr="-y %d", desc="min y") + max_y = traits.Int(argstr="-Y %d", desc="max y") + min_x = traits.Int(argstr="-x %d", desc="min x") + max_x = traits.Int(argstr="-X %d", desc="max x") save_tensor = traits.Bool( - desc='save the elements of the tensor', argstr='--save_tensor') - sse = traits.Bool(desc='output sum of squared errors', argstr='--sse') - cni = File( - exists=True, desc='input counfound regressors', argstr='--cni=%s') + desc="save the elements of the tensor", argstr="--save_tensor" + ) + sse = traits.Bool(desc="output sum of squared errors", argstr="--sse") + cni = File(exists=True, desc="input counfound regressors", argstr="--cni=%s") little_bit = traits.Bool( - desc='only process small area of brain', argstr='--littlebit') + desc="only process small area of brain", argstr="--littlebit" + ) gradnonlin = File( - exists=True, argstr='--gradnonlin=%s', desc='gradient non linearities') + exists=True, argstr="--gradnonlin=%s", desc="gradient non linearities" + ) class DTIFitOutputSpec(TraitedSpec): - V1 = File(exists=True, desc='path/name of file with the 1st eigenvector') - V2 = File(exists=True, desc='path/name of file with the 2nd eigenvector') - V3 = File(exists=True, desc='path/name of file with the 3rd eigenvector') - L1 = File(exists=True, desc='path/name of file with the 1st eigenvalue') - L2 = File(exists=True, desc='path/name of file with the 2nd eigenvalue') - L3 = File(exists=True, desc='path/name of file with the 3rd eigenvalue') - MD = File(exists=True, desc='path/name of file with the mean diffusivity') - FA = File( - exists=True, desc='path/name of file with the fractional anisotropy') - MO = File( - exists=True, desc='path/name of file with the mode of anisotropy') + V1 = File(exists=True, desc="path/name of file with the 1st eigenvector") + V2 = File(exists=True, desc="path/name of file with the 2nd eigenvector") + V3 = File(exists=True, desc="path/name of file with the 3rd eigenvector") + L1 = File(exists=True, desc="path/name of file with the 1st eigenvalue") + L2 = File(exists=True, desc="path/name of file with the 2nd eigenvalue") + L3 = File(exists=True, desc="path/name of file with the 3rd eigenvalue") + MD = File(exists=True, desc="path/name of file with the mean diffusivity") + FA = File(exists=True, desc="path/name of file with the fractional anisotropy") + MO = File(exists=True, desc="path/name of file with the mode of anisotropy") S0 = File( exists=True, - desc=('path/name of file with the raw T2 signal with no ' - 'diffusion weighting')) - tensor = File( - exists=True, desc='path/name of file with the 4D tensor volume') - sse = File( - exists=True, desc='path/name of file with the summed squared error') + desc=( + "path/name of file with the raw T2 signal with no " "diffusion weighting" + ), + ) + tensor = File(exists=True, desc="path/name of file with the 4D tensor volume") + sse = File(exists=True, desc="path/name of file with the summed squared error") class DTIFit(FSLCommand): @@ -104,15 +107,14 @@ class DTIFit(FSLCommand): """ - _cmd = 'dtifit' + _cmd = "dtifit" input_spec = DTIFitInputSpec output_spec = DTIFitOutputSpec def _list_outputs(self): - keys_to_ignore = {'outputtype', 'environ', 'args'} + keys_to_ignore = {"outputtype", "environ", "args"} # Optional output: Map output name to input flag - opt_output = {'tensor': self.inputs.save_tensor, - 'sse': self.inputs.sse} + opt_output = {"tensor": self.inputs.save_tensor, "sse": self.inputs.sse} # Ignore optional output, whose corresponding input-flag is not defined # or set to False for output, input_flag in opt_output.items(): @@ -123,155 +125,166 @@ def _list_outputs(self): outputs = self.output_spec().get() for k in set(outputs.keys()) - keys_to_ignore: - outputs[k] = self._gen_fname(self.inputs.base_name, suffix='_' + k) + outputs[k] = self._gen_fname(self.inputs.base_name, suffix="_" + k) return outputs class FSLXCommandInputSpec(FSLCommandInputSpec): dwi = File( exists=True, - argstr='--data=%s', + argstr="--data=%s", mandatory=True, - desc='diffusion weighted image data file') + desc="diffusion weighted image data file", + ) mask = File( exists=True, - argstr='--mask=%s', + argstr="--mask=%s", mandatory=True, - desc='brain binary mask file (i.e. from BET)') + desc="brain binary mask file (i.e. from BET)", + ) bvecs = File( - exists=True, - argstr='--bvecs=%s', - mandatory=True, - desc='b vectors file') - bvals = File( - exists=True, argstr='--bvals=%s', mandatory=True, desc='b values file') + exists=True, argstr="--bvecs=%s", mandatory=True, desc="b vectors file" + ) + bvals = File(exists=True, argstr="--bvals=%s", mandatory=True, desc="b values file") - logdir = Directory('.', argstr='--logdir=%s', usedefault=True) + logdir = Directory(".", argstr="--logdir=%s", usedefault=True) n_fibres = traits.Range( usedefault=True, low=1, value=2, - argstr='--nfibres=%d', - desc=('Maximum number of fibres to fit in each voxel'), - mandatory=True) + argstr="--nfibres=%d", + desc=("Maximum number of fibres to fit in each voxel"), + mandatory=True, + ) model = traits.Enum( 1, 2, 3, - argstr='--model=%d', - desc=('use monoexponential (1, default, required for ' - 'single-shell) or multiexponential (2, multi-' - 'shell) model')) - fudge = traits.Int(argstr='--fudge=%d', desc='ARD fudge factor') + argstr="--model=%d", + desc=( + "use monoexponential (1, default, required for " + "single-shell) or multiexponential (2, multi-" + "shell) model" + ), + ) + fudge = traits.Int(argstr="--fudge=%d", desc="ARD fudge factor") n_jumps = traits.Int( - 5000, usedefault=True, - argstr='--njumps=%d', desc='Num of jumps to be made by MCMC') + 5000, + usedefault=True, + argstr="--njumps=%d", + desc="Num of jumps to be made by MCMC", + ) burn_in = traits.Range( low=0, value=0, usedefault=True, - argstr='--burnin=%d', - desc=('Total num of jumps at start of MCMC to be ' - 'discarded')) + argstr="--burnin=%d", + desc=("Total num of jumps at start of MCMC to be " "discarded"), + ) burn_in_no_ard = traits.Range( low=0, value=0, usedefault=True, - argstr='--burnin_noard=%d', - desc=('num of burnin jumps before the ard is' - ' imposed')) + argstr="--burnin_noard=%d", + desc=("num of burnin jumps before the ard is" " imposed"), + ) sample_every = traits.Range( low=0, value=1, usedefault=True, - argstr='--sampleevery=%d', - desc='Num of jumps for each sample (MCMC)') + argstr="--sampleevery=%d", + desc="Num of jumps for each sample (MCMC)", + ) update_proposal_every = traits.Range( low=1, value=40, usedefault=True, - argstr='--updateproposalevery=%d', - desc=('Num of jumps for each update ' - 'to the proposal density std ' - '(MCMC)')) + argstr="--updateproposalevery=%d", + desc=("Num of jumps for each update " "to the proposal density std " "(MCMC)"), + ) seed = traits.Int( - argstr='--seed=%d', desc='seed for pseudo random number generator') + argstr="--seed=%d", desc="seed for pseudo random number generator" + ) - _xor_inputs1 = ('no_ard', 'all_ard') + _xor_inputs1 = ("no_ard", "all_ard") no_ard = traits.Bool( - argstr='--noard', xor=_xor_inputs1, desc='Turn ARD off on all fibres') + argstr="--noard", xor=_xor_inputs1, desc="Turn ARD off on all fibres" + ) all_ard = traits.Bool( - argstr='--allard', xor=_xor_inputs1, desc='Turn ARD on on all fibres') + argstr="--allard", xor=_xor_inputs1, desc="Turn ARD on on all fibres" + ) - _xor_inputs2 = ('no_spat', 'non_linear', 'cnlinear') + _xor_inputs2 = ("no_spat", "non_linear", "cnlinear") no_spat = traits.Bool( - argstr='--nospat', + argstr="--nospat", xor=_xor_inputs2, - desc='Initialise with tensor, not spatially') + desc="Initialise with tensor, not spatially", + ) non_linear = traits.Bool( - argstr='--nonlinear', - xor=_xor_inputs2, - desc='Initialise with nonlinear fitting') + argstr="--nonlinear", xor=_xor_inputs2, desc="Initialise with nonlinear fitting" + ) cnlinear = traits.Bool( - argstr='--cnonlinear', + argstr="--cnonlinear", xor=_xor_inputs2, - desc=('Initialise with constrained nonlinear ' - 'fitting')) - rician = traits.Bool(argstr='--rician', desc=('use Rician noise modeling')) + desc=("Initialise with constrained nonlinear " "fitting"), + ) + rician = traits.Bool(argstr="--rician", desc=("use Rician noise modeling")) - _xor_inputs3 = ['f0_noard', 'f0_ard'] + _xor_inputs3 = ["f0_noard", "f0_ard"] f0_noard = traits.Bool( - argstr='--f0', + argstr="--f0", xor=_xor_inputs3, - desc=('Noise floor model: add to the model an ' - 'unattenuated signal compartment f0')) + desc=( + "Noise floor model: add to the model an " + "unattenuated signal compartment f0" + ), + ) f0_ard = traits.Bool( - argstr='--f0 --ardf0', - xor=_xor_inputs3 + ['all_ard'], - desc=('Noise floor model: add to the model an ' - 'unattenuated signal compartment f0')) + argstr="--f0 --ardf0", + xor=_xor_inputs3 + ["all_ard"], + desc=( + "Noise floor model: add to the model an " + "unattenuated signal compartment f0" + ), + ) force_dir = traits.Bool( True, - argstr='--forcedir', + argstr="--forcedir", usedefault=True, - desc=('use the actual directory name given ' - '(do not add + to make a new directory)')) + desc=( + "use the actual directory name given " + "(do not add + to make a new directory)" + ), + ) class FSLXCommandOutputSpec(TraitedSpec): dyads = OutputMultiPath( - File(exists=True), - desc=('Mean of PDD distribution' - ' in vector form.')) + File(exists=True), desc=("Mean of PDD distribution" " in vector form.") + ) fsamples = OutputMultiPath( - File(exists=True), - desc=('Samples from the ' - 'distribution on f ' - 'anisotropy')) - mean_dsamples = File( - exists=True, desc='Mean of distribution on diffusivity d') + File(exists=True), desc=("Samples from the " "distribution on f " "anisotropy") + ) + mean_dsamples = File(exists=True, desc="Mean of distribution on diffusivity d") mean_fsamples = OutputMultiPath( - File(exists=True), desc=('Mean of distribution on f ' - 'anisotropy')) + File(exists=True), desc=("Mean of distribution on f " "anisotropy") + ) mean_S0samples = File( - exists=True, - desc=('Mean of distribution on T2w' - 'baseline signal intensity S0')) + exists=True, desc=("Mean of distribution on T2w" "baseline signal intensity S0") + ) mean_tausamples = File( exists=True, - desc=('Mean of distribution on ' - 'tau samples (only with rician ' - 'noise)')) - phsamples = OutputMultiPath( - File(exists=True), desc=('phi samples, per fiber')) - thsamples = OutputMultiPath( - File(exists=True), desc=('theta samples, per fiber')) + desc=("Mean of distribution on " "tau samples (only with rician " "noise)"), + ) + phsamples = OutputMultiPath(File(exists=True), desc=("phi samples, per fiber")) + thsamples = OutputMultiPath(File(exists=True), desc=("theta samples, per fiber")) class FSLXCommand(FSLCommand): """ Base support for ``xfibres`` and ``bedpostx`` """ + input_spec = FSLXCommandInputSpec output_spec = FSLXCommandOutputSpec @@ -289,123 +302,120 @@ def _list_outputs(self, out_dir=None): if isdefined(self.inputs.logdir): out_dir = os.path.abspath(self.inputs.logdir) else: - out_dir = os.path.abspath('logdir') + out_dir = os.path.abspath("logdir") - multi_out = [ - 'dyads', 'fsamples', 'mean_fsamples', 'phsamples', 'thsamples' - ] - single_out = ['mean_dsamples', 'mean_S0samples'] + multi_out = ["dyads", "fsamples", "mean_fsamples", "phsamples", "thsamples"] + single_out = ["mean_dsamples", "mean_S0samples"] for k in single_out: outputs[k] = self._gen_fname(k, cwd=out_dir) if isdefined(self.inputs.rician) and self.inputs.rician: - outputs['mean_tausamples'] = self._gen_fname( - 'mean_tausamples', cwd=out_dir) + outputs["mean_tausamples"] = self._gen_fname("mean_tausamples", cwd=out_dir) for k in multi_out: outputs[k] = [] for i in range(1, n_fibres + 1): - outputs['fsamples'].append( - self._gen_fname('f%dsamples' % i, cwd=out_dir)) - outputs['mean_fsamples'].append( - self._gen_fname('mean_f%dsamples' % i, cwd=out_dir)) + outputs["fsamples"].append(self._gen_fname("f%dsamples" % i, cwd=out_dir)) + outputs["mean_fsamples"].append( + self._gen_fname("mean_f%dsamples" % i, cwd=out_dir) + ) for i in range(1, n_fibres + 1): - outputs['dyads'].append( - self._gen_fname('dyads%d' % i, cwd=out_dir)) - outputs['phsamples'].append( - self._gen_fname('ph%dsamples' % i, cwd=out_dir)) - outputs['thsamples'].append( - self._gen_fname('th%dsamples' % i, cwd=out_dir)) + outputs["dyads"].append(self._gen_fname("dyads%d" % i, cwd=out_dir)) + outputs["phsamples"].append(self._gen_fname("ph%dsamples" % i, cwd=out_dir)) + outputs["thsamples"].append(self._gen_fname("th%dsamples" % i, cwd=out_dir)) return outputs class BEDPOSTX5InputSpec(FSLXCommandInputSpec): - dwi = File( - exists=True, desc='diffusion weighted image data file', mandatory=True) - mask = File(exists=True, desc='bet binary mask file', mandatory=True) - bvecs = File(exists=True, desc='b vectors file', mandatory=True) - bvals = File(exists=True, desc='b values file', mandatory=True) - logdir = Directory(argstr='--logdir=%s') + dwi = File(exists=True, desc="diffusion weighted image data file", mandatory=True) + mask = File(exists=True, desc="bet binary mask file", mandatory=True) + bvecs = File(exists=True, desc="b vectors file", mandatory=True) + bvals = File(exists=True, desc="b values file", mandatory=True) + logdir = Directory(argstr="--logdir=%s") n_fibres = traits.Range( usedefault=True, low=1, value=2, - argstr='-n %d', - desc=('Maximum number of fibres to fit in each voxel'), - mandatory=True) + argstr="-n %d", + desc=("Maximum number of fibres to fit in each voxel"), + mandatory=True, + ) model = traits.Enum( 1, 2, 3, - argstr='-model %d', - desc=('use monoexponential (1, default, required for ' - 'single-shell) or multiexponential (2, multi-' - 'shell) model')) - fudge = traits.Int(argstr='-w %d', desc='ARD fudge factor') + argstr="-model %d", + desc=( + "use monoexponential (1, default, required for " + "single-shell) or multiexponential (2, multi-" + "shell) model" + ), + ) + fudge = traits.Int(argstr="-w %d", desc="ARD fudge factor") n_jumps = traits.Int( - 5000, usedefault=True, - argstr='-j %d', desc='Num of jumps to be made by MCMC') + 5000, usedefault=True, argstr="-j %d", desc="Num of jumps to be made by MCMC" + ) burn_in = traits.Range( low=0, value=0, usedefault=True, - argstr='-b %d', - desc=('Total num of jumps at start of MCMC to be ' - 'discarded')) + argstr="-b %d", + desc=("Total num of jumps at start of MCMC to be " "discarded"), + ) sample_every = traits.Range( low=0, value=1, usedefault=True, - argstr='-s %d', - desc='Num of jumps for each sample (MCMC)') + argstr="-s %d", + desc="Num of jumps for each sample (MCMC)", + ) out_dir = Directory( - 'bedpostx', + "bedpostx", mandatory=True, - desc='output directory', + desc="output directory", usedefault=True, position=1, - argstr='%s') + argstr="%s", + ) gradnonlin = traits.Bool( - False, - argstr='-g', - desc=('consider gradient nonlinearities, ' - 'default off')) - grad_dev = File( - exists=True, desc='grad_dev file, if gradnonlin, -g is True') - use_gpu = traits.Bool(False, desc='Use the GPU version of bedpostx') + False, argstr="-g", desc=("consider gradient nonlinearities, " "default off") + ) + grad_dev = File(exists=True, desc="grad_dev file, if gradnonlin, -g is True") + use_gpu = traits.Bool(False, desc="Use the GPU version of bedpostx") class BEDPOSTX5OutputSpec(TraitedSpec): - mean_dsamples = File( - exists=True, desc='Mean of distribution on diffusivity d') + mean_dsamples = File(exists=True, desc="Mean of distribution on diffusivity d") mean_fsamples = OutputMultiPath( - File(exists=True), desc=('Mean of distribution on f ' - 'anisotropy')) + File(exists=True), desc=("Mean of distribution on f " "anisotropy") + ) mean_S0samples = File( - exists=True, - desc=('Mean of distribution on T2w' - 'baseline signal intensity S0')) + exists=True, desc=("Mean of distribution on T2w" "baseline signal intensity S0") + ) mean_phsamples = OutputMultiPath( - File(exists=True), desc='Mean of distribution on phi') + File(exists=True), desc="Mean of distribution on phi" + ) mean_thsamples = OutputMultiPath( - File(exists=True), desc='Mean of distribution on theta') + File(exists=True), desc="Mean of distribution on theta" + ) merged_thsamples = OutputMultiPath( - File(exists=True), desc=('Samples from the distribution ' - 'on theta')) + File(exists=True), desc=("Samples from the distribution " "on theta") + ) merged_phsamples = OutputMultiPath( - File(exists=True), desc=('Samples from the distribution ' - 'on phi')) + File(exists=True), desc=("Samples from the distribution " "on phi") + ) merged_fsamples = OutputMultiPath( File(exists=True), - desc=('Samples from the distribution on ' - 'anisotropic volume fraction')) + desc=("Samples from the distribution on " "anisotropic volume fraction"), + ) dyads = OutputMultiPath( - File(exists=True), desc='Mean of PDD distribution in vector form.') - dyads_dispersion = OutputMultiPath(File(exists=True), desc=('Dispersion')) + File(exists=True), desc="Mean of PDD distribution in vector form." + ) + dyads_dispersion = OutputMultiPath(File(exists=True), desc=("Dispersion")) class BEDPOSTX5(FSLXCommand): @@ -435,7 +445,7 @@ class BEDPOSTX5(FSLXCommand): """ - _cmd = 'bedpostx' + _cmd = "bedpostx" _default_cmd = _cmd input_spec = BEDPOSTX5InputSpec output_spec = BEDPOSTX5OutputSpec @@ -443,11 +453,11 @@ class BEDPOSTX5(FSLXCommand): def __init__(self, **inputs): super(BEDPOSTX5, self).__init__(**inputs) - self.inputs.on_trait_change(self._cuda_update, 'use_gpu') + self.inputs.on_trait_change(self._cuda_update, "use_gpu") def _cuda_update(self): if isdefined(self.inputs.use_gpu) and self.inputs.use_gpu: - self._cmd = 'bedpostx_gpu' + self._cmd = "bedpostx_gpu" else: self._cmd = self._default_cmd @@ -457,20 +467,18 @@ def _run_interface(self, runtime): if not os.path.exists(subjectdir): os.makedirs(subjectdir) _, _, ext = split_filename(self.inputs.mask) - copyfile(self.inputs.mask, - os.path.join(subjectdir, 'nodif_brain_mask' + ext)) + copyfile(self.inputs.mask, os.path.join(subjectdir, "nodif_brain_mask" + ext)) _, _, ext = split_filename(self.inputs.dwi) - copyfile(self.inputs.dwi, os.path.join(subjectdir, 'data' + ext)) - copyfile(self.inputs.bvals, os.path.join(subjectdir, 'bvals')) - copyfile(self.inputs.bvecs, os.path.join(subjectdir, 'bvecs')) + copyfile(self.inputs.dwi, os.path.join(subjectdir, "data" + ext)) + copyfile(self.inputs.bvals, os.path.join(subjectdir, "bvals")) + copyfile(self.inputs.bvecs, os.path.join(subjectdir, "bvecs")) if isdefined(self.inputs.grad_dev): _, _, ext = split_filename(self.inputs.grad_dev) - copyfile(self.inputs.grad_dev, - os.path.join(subjectdir, 'grad_dev' + ext)) + copyfile(self.inputs.grad_dev, os.path.join(subjectdir, "grad_dev" + ext)) retval = super(BEDPOSTX5, self)._run_interface(runtime) - self._out_dir = subjectdir + '.bedpostX' + self._out_dir = subjectdir + ".bedpostX" return retval def _list_outputs(self): @@ -478,12 +486,17 @@ def _list_outputs(self): n_fibres = self.inputs.n_fibres multi_out = [ - 'merged_thsamples', 'merged_fsamples', 'merged_phsamples', - 'mean_phsamples', 'mean_thsamples', 'mean_fsamples', - 'dyads_dispersion', 'dyads' + "merged_thsamples", + "merged_fsamples", + "merged_phsamples", + "mean_phsamples", + "mean_thsamples", + "mean_fsamples", + "dyads_dispersion", + "dyads", ] - single_out = ['mean_dsamples', 'mean_S0samples'] + single_out = ["mean_dsamples", "mean_S0samples"] for k in single_out: outputs[k] = self._gen_fname(k, cwd=self._out_dir) @@ -492,30 +505,37 @@ def _list_outputs(self): outputs[k] = [] for i in range(1, n_fibres + 1): - outputs['merged_thsamples'].append( - self._gen_fname('merged_th%dsamples' % i, cwd=self._out_dir)) - outputs['merged_fsamples'].append( - self._gen_fname('merged_f%dsamples' % i, cwd=self._out_dir)) - outputs['merged_phsamples'].append( - self._gen_fname('merged_ph%dsamples' % i, cwd=self._out_dir)) - outputs['mean_thsamples'].append( - self._gen_fname('mean_th%dsamples' % i, cwd=self._out_dir)) - outputs['mean_phsamples'].append( - self._gen_fname('mean_ph%dsamples' % i, cwd=self._out_dir)) - outputs['mean_fsamples'].append( - self._gen_fname('mean_f%dsamples' % i, cwd=self._out_dir)) - outputs['dyads'].append( - self._gen_fname('dyads%d' % i, cwd=self._out_dir)) - outputs['dyads_dispersion'].append( - self._gen_fname('dyads%d_dispersion' % i, cwd=self._out_dir)) + outputs["merged_thsamples"].append( + self._gen_fname("merged_th%dsamples" % i, cwd=self._out_dir) + ) + outputs["merged_fsamples"].append( + self._gen_fname("merged_f%dsamples" % i, cwd=self._out_dir) + ) + outputs["merged_phsamples"].append( + self._gen_fname("merged_ph%dsamples" % i, cwd=self._out_dir) + ) + outputs["mean_thsamples"].append( + self._gen_fname("mean_th%dsamples" % i, cwd=self._out_dir) + ) + outputs["mean_phsamples"].append( + self._gen_fname("mean_ph%dsamples" % i, cwd=self._out_dir) + ) + outputs["mean_fsamples"].append( + self._gen_fname("mean_f%dsamples" % i, cwd=self._out_dir) + ) + outputs["dyads"].append(self._gen_fname("dyads%d" % i, cwd=self._out_dir)) + outputs["dyads_dispersion"].append( + self._gen_fname("dyads%d_dispersion" % i, cwd=self._out_dir) + ) return outputs class XFibres5InputSpec(FSLXCommandInputSpec): gradnonlin = File( exists=True, - argstr='--gradnonlin=%s', - desc='gradient file corresponding to slice') + argstr="--gradnonlin=%s", + desc="gradient file corresponding to slice", + ) class XFibres5(FSLXCommand): @@ -523,7 +543,8 @@ class XFibres5(FSLXCommand): Perform model parameters estimation for local (voxelwise) diffusion parameters """ - _cmd = 'xfibres' + + _cmd = "xfibres" input_spec = XFibres5InputSpec output_spec = FSLXCommandOutputSpec @@ -538,143 +559,176 @@ class ProbTrackXBaseInputSpec(FSLCommandInputSpec): fsamples = InputMultiPath(File(exists=True), mandatory=True) samples_base_name = traits.Str( "merged", - desc=('the rootname/base_name for samples ' - 'files'), - argstr='--samples=%s', - usedefault=True) + desc=("the rootname/base_name for samples " "files"), + argstr="--samples=%s", + usedefault=True, + ) mask = File( exists=True, - desc='bet binary mask file in diffusion space', - argstr='-m %s', - mandatory=True) + desc="bet binary mask file in diffusion space", + argstr="-m %s", + mandatory=True, + ) seed = traits.Either( File(exists=True), traits.List(File(exists=True)), traits.List(traits.List(traits.Int(), minlen=3, maxlen=3)), - desc=('seed volume(s), or voxel(s) or freesurfer ' - 'label file'), - argstr='--seed=%s', - mandatory=True) + desc=("seed volume(s), or voxel(s) or freesurfer " "label file"), + argstr="--seed=%s", + mandatory=True, + ) target_masks = InputMultiPath( File(exits=True), - desc=('list of target masks - required for ' - 'seeds_to_targets classification'), - argstr='--targetmasks=%s') + desc=("list of target masks - required for " "seeds_to_targets classification"), + argstr="--targetmasks=%s", + ) waypoints = File( exists=True, - desc=('waypoint mask or ascii list of waypoint masks - ' - 'only keep paths going through ALL the masks'), - argstr='--waypoints=%s') + desc=( + "waypoint mask or ascii list of waypoint masks - " + "only keep paths going through ALL the masks" + ), + argstr="--waypoints=%s", + ) network = traits.Bool( - desc=('activate network mode - only keep paths ' - 'going through at least one seed mask ' - '(required if multiple seed masks)'), - argstr='--network') + desc=( + "activate network mode - only keep paths " + "going through at least one seed mask " + "(required if multiple seed masks)" + ), + argstr="--network", + ) seed_ref = File( exists=True, - desc=('reference vol to define seed space in simple mode ' - '- diffusion space assumed if absent'), - argstr='--seedref=%s') + desc=( + "reference vol to define seed space in simple mode " + "- diffusion space assumed if absent" + ), + argstr="--seedref=%s", + ) out_dir = Directory( exists=True, - argstr='--dir=%s', - desc='directory to put the final volumes in', - genfile=True) + argstr="--dir=%s", + desc="directory to put the final volumes in", + genfile=True, + ) force_dir = traits.Bool( True, - desc=('use the actual directory name given - i.e. ' - 'do not add + to make a new directory'), - argstr='--forcedir', - usedefault=True) + desc=( + "use the actual directory name given - i.e. " + "do not add + to make a new directory" + ), + argstr="--forcedir", + usedefault=True, + ) opd = traits.Bool( - True, - desc='outputs path distributions', - argstr='--opd', - usedefault=True) + True, desc="outputs path distributions", argstr="--opd", usedefault=True + ) correct_path_distribution = traits.Bool( - desc=('correct path distribution ' - 'for the length of the ' - 'pathways'), - argstr='--pd') - os2t = traits.Bool(desc='Outputs seeds to targets', argstr='--os2t') + desc=("correct path distribution " "for the length of the " "pathways"), + argstr="--pd", + ) + os2t = traits.Bool(desc="Outputs seeds to targets", argstr="--os2t") # paths_file = File('nipype_fdtpaths', usedefault=True, argstr='--out=%s', # desc='produces an output file (default is fdt_paths)') avoid_mp = File( exists=True, - desc=('reject pathways passing through locations given by ' - 'this mask'), - argstr='--avoid=%s') + desc=("reject pathways passing through locations given by " "this mask"), + argstr="--avoid=%s", + ) stop_mask = File( exists=True, - argstr='--stop=%s', - desc='stop tracking at locations given by this mask file') + argstr="--stop=%s", + desc="stop tracking at locations given by this mask file", + ) xfm = File( exists=True, - argstr='--xfm=%s', - desc=('transformation matrix taking seed space to DTI space ' - '(either FLIRT matrix or FNIRT warp_field) - default is ' - 'identity')) + argstr="--xfm=%s", + desc=( + "transformation matrix taking seed space to DTI space " + "(either FLIRT matrix or FNIRT warp_field) - default is " + "identity" + ), + ) inv_xfm = File( - argstr='--invxfm=%s', - desc=('transformation matrix taking DTI space to seed ' - 'space (compulsory when using a warp_field for ' - 'seeds_to_dti)')) + argstr="--invxfm=%s", + desc=( + "transformation matrix taking DTI space to seed " + "space (compulsory when using a warp_field for " + "seeds_to_dti)" + ), + ) n_samples = traits.Int( 5000, - argstr='--nsamples=%d', - desc='number of samples - default=5000', - usedefault=True) + argstr="--nsamples=%d", + desc="number of samples - default=5000", + usedefault=True, + ) n_steps = traits.Int( - argstr='--nsteps=%d', desc='number of steps per sample - default=2000') + argstr="--nsteps=%d", desc="number of steps per sample - default=2000" + ) dist_thresh = traits.Float( - argstr='--distthresh=%.3f', - desc=('discards samples shorter than this ' - 'threshold (in mm - default=0)')) + argstr="--distthresh=%.3f", + desc=("discards samples shorter than this " "threshold (in mm - default=0)"), + ) c_thresh = traits.Float( - argstr='--cthr=%.3f', desc='curvature threshold - default=0.2') + argstr="--cthr=%.3f", desc="curvature threshold - default=0.2" + ) sample_random_points = traits.Bool( - argstr='--sampvox', - desc=('sample random points within ' - 'seed voxels')) + argstr="--sampvox", desc=("sample random points within " "seed voxels") + ) step_length = traits.Float( - argstr='--steplength=%.3f', desc='step_length in mm - default=0.5') + argstr="--steplength=%.3f", desc="step_length in mm - default=0.5" + ) loop_check = traits.Bool( - argstr='--loopcheck', - desc=('perform loop_checks on paths - slower, ' - 'but allows lower curvature threshold')) + argstr="--loopcheck", + desc=( + "perform loop_checks on paths - slower, " + "but allows lower curvature threshold" + ), + ) use_anisotropy = traits.Bool( - argstr='--usef', desc='use anisotropy to constrain tracking') + argstr="--usef", desc="use anisotropy to constrain tracking" + ) rand_fib = traits.Enum( 0, 1, 2, 3, - argstr='--randfib=%d', - desc=('options: 0 - default, 1 - to randomly ' - 'sample initial fibres (with f > fibthresh), ' - '2 - to sample in proportion fibres (with ' - 'f>fibthresh) to f, 3 - to sample ALL ' - 'populations at random (even if ' - 'f fibthresh), " + "2 - to sample in proportion fibres (with " + "f>fibthresh) to f, 3 - to sample ALL " + "populations at random (even if " + "f>> pbx2.cmdline 'probtrackx2 --forcedir -m nodif_brain_mask.nii.gz --nsamples=3 --nsteps=10 --opd --dir=. --samples=merged --seed=seed_source.nii.gz' """ - _cmd = 'probtrackx2' + + _cmd = "probtrackx2" input_spec = ProbTrackX2InputSpec output_spec = ProbTrackX2OutputSpec @@ -980,87 +1076,101 @@ def _list_outputs(self): else: out_dir = self.inputs.out_dir - outputs['way_total'] = os.path.abspath( - os.path.join(out_dir, 'waytotal')) + outputs["way_total"] = os.path.abspath(os.path.join(out_dir, "waytotal")) if isdefined(self.inputs.omatrix1): - outputs['network_matrix'] = os.path.abspath( - os.path.join(out_dir, 'matrix_seeds_to_all_targets')) - outputs['matrix1_dot'] = os.path.abspath( - os.path.join(out_dir, 'fdt_matrix1.dot')) + outputs["network_matrix"] = os.path.abspath( + os.path.join(out_dir, "matrix_seeds_to_all_targets") + ) + outputs["matrix1_dot"] = os.path.abspath( + os.path.join(out_dir, "fdt_matrix1.dot") + ) if isdefined(self.inputs.omatrix2): - outputs['lookup_tractspace'] = os.path.abspath( - os.path.join(out_dir, 'lookup_tractspace_fdt_matrix2.nii.gz')) - outputs['matrix2_dot'] = os.path.abspath( - os.path.join(out_dir, 'fdt_matrix2.dot')) + outputs["lookup_tractspace"] = os.path.abspath( + os.path.join(out_dir, "lookup_tractspace_fdt_matrix2.nii.gz") + ) + outputs["matrix2_dot"] = os.path.abspath( + os.path.join(out_dir, "fdt_matrix2.dot") + ) if isdefined(self.inputs.omatrix3): - outputs['matrix3_dot'] = os.path.abspath( - os.path.join(out_dir, 'fdt_matrix3.dot')) + outputs["matrix3_dot"] = os.path.abspath( + os.path.join(out_dir, "fdt_matrix3.dot") + ) return outputs class VecRegInputSpec(FSLCommandInputSpec): in_file = File( exists=True, - argstr='-i %s', - desc='filename for input vector or tensor field', - mandatory=True) + argstr="-i %s", + desc="filename for input vector or tensor field", + mandatory=True, + ) out_file = File( - argstr='-o %s', - desc=('filename for output registered vector or tensor ' - 'field'), + argstr="-o %s", + desc=("filename for output registered vector or tensor " "field"), genfile=True, - hash_files=False) + hash_files=False, + ) ref_vol = File( exists=True, - argstr='-r %s', - desc='filename for reference (target) volume', - mandatory=True) + argstr="-r %s", + desc="filename for reference (target) volume", + mandatory=True, + ) affine_mat = File( - exists=True, - argstr='-t %s', - desc='filename for affine transformation matrix') + exists=True, argstr="-t %s", desc="filename for affine transformation matrix" + ) warp_field = File( exists=True, - argstr='-w %s', - desc=('filename for 4D warp field for nonlinear ' - 'registration')) + argstr="-w %s", + desc=("filename for 4D warp field for nonlinear " "registration"), + ) rotation_mat = File( exists=True, - argstr='--rotmat=%s', - desc=('filename for secondary affine matrix if set, ' - 'this will be used for the rotation of the ' - 'vector/tensor field')) + argstr="--rotmat=%s", + desc=( + "filename for secondary affine matrix if set, " + "this will be used for the rotation of the " + "vector/tensor field" + ), + ) rotation_warp = File( exists=True, - argstr='--rotwarp=%s', - desc=('filename for secondary warp field if set, ' - 'this will be used for the rotation of the ' - 'vector/tensor field')) + argstr="--rotwarp=%s", + desc=( + "filename for secondary warp field if set, " + "this will be used for the rotation of the " + "vector/tensor field" + ), + ) interpolation = traits.Enum( "nearestneighbour", "trilinear", "sinc", "spline", - argstr='--interp=%s', - desc=('interpolation method : ' - 'nearestneighbour, trilinear (default), ' - 'sinc or spline')) - mask = File(exists=True, argstr='-m %s', desc='brain mask in input space') + argstr="--interp=%s", + desc=( + "interpolation method : " + "nearestneighbour, trilinear (default), " + "sinc or spline" + ), + ) + mask = File(exists=True, argstr="-m %s", desc="brain mask in input space") ref_mask = File( exists=True, - argstr='--refmask=%s', - desc=('brain mask in output space (useful for speed up of ' - 'nonlinear reg)')) + argstr="--refmask=%s", + desc=("brain mask in output space (useful for speed up of " "nonlinear reg)"), + ) class VecRegOutputSpec(TraitedSpec): out_file = File( exists=True, - desc=('path/name of filename for the registered vector or ' - 'tensor field')) + desc=("path/name of filename for the registered vector or " "tensor field"), + ) class VecReg(FSLCommand): @@ -1081,7 +1191,7 @@ class VecReg(FSLCommand): """ - _cmd = 'vecreg' + _cmd = "vecreg" input_spec = VecRegInputSpec output_spec = VecRegOutputSpec @@ -1089,22 +1199,23 @@ def _run_interface(self, runtime): if not isdefined(self.inputs.out_file): pth, base_name = os.path.split(self.inputs.in_file) self.inputs.out_file = self._gen_fname( - base_name, cwd=os.path.abspath(pth), suffix='_vreg') + base_name, cwd=os.path.abspath(pth), suffix="_vreg" + ) return super(VecReg, self)._run_interface(runtime) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = self.inputs.out_file - if (not isdefined(outputs['out_file']) - and isdefined(self.inputs.in_file)): + outputs["out_file"] = self.inputs.out_file + if not isdefined(outputs["out_file"]) and isdefined(self.inputs.in_file): pth, base_name = os.path.split(self.inputs.in_file) - outputs['out_file'] = self._gen_fname( - base_name, cwd=os.path.abspath(pth), suffix='_vreg') - outputs['out_file'] = os.path.abspath(outputs['out_file']) + outputs["out_file"] = self._gen_fname( + base_name, cwd=os.path.abspath(pth), suffix="_vreg" + ) + outputs["out_file"] = os.path.abspath(outputs["out_file"]) return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._list_outputs()[name] else: return None @@ -1113,23 +1224,26 @@ def _gen_filename(self, name): class ProjThreshInputSpec(FSLCommandInputSpec): in_files = traits.List( File(exists=True), - argstr='%s', - desc='a list of input volumes', + argstr="%s", + desc="a list of input volumes", mandatory=True, - position=0) + position=0, + ) threshold = traits.Int( - argstr='%d', - desc=('threshold indicating minimum number of seed ' - 'voxels entering this mask region'), + argstr="%d", + desc=( + "threshold indicating minimum number of seed " + "voxels entering this mask region" + ), mandatory=True, - position=1) + position=1, + ) class ProjThreshOuputSpec(TraitedSpec): out_files = traits.List( - File(exists=True), - desc=('path/name of output volume after ' - 'thresholding')) + File(exists=True), desc=("path/name of output volume after " "thresholding") + ) class ProjThresh(FSLCommand): @@ -1148,44 +1262,46 @@ class ProjThresh(FSLCommand): """ - _cmd = 'proj_thresh' + _cmd = "proj_thresh" input_spec = ProjThreshInputSpec output_spec = ProjThreshOuputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_files'] = [] + outputs["out_files"] = [] for name in self.inputs.in_files: cwd, base_name = os.path.split(name) - outputs['out_files'].append( + outputs["out_files"].append( self._gen_fname( base_name, cwd=cwd, - suffix='_proj_seg_thr_{}'.format(self.inputs.threshold))) + suffix="_proj_seg_thr_{}".format(self.inputs.threshold), + ) + ) return outputs class FindTheBiggestInputSpec(FSLCommandInputSpec): in_files = traits.List( File(exists=True), - argstr='%s', - desc=('a list of input volumes or a ' - 'singleMatrixFile'), + argstr="%s", + desc=("a list of input volumes or a " "singleMatrixFile"), position=0, - mandatory=True) + mandatory=True, + ) out_file = File( - argstr='%s', - desc='file with the resulting segmentation', + argstr="%s", + desc="file with the resulting segmentation", position=2, genfile=True, - hash_files=False) + hash_files=False, + ) class FindTheBiggestOutputSpec(TraitedSpec): out_file = File( - exists=True, - argstr='%s', - desc='output file indexed in order of input files') + exists=True, argstr="%s", desc="output file indexed in order of input files" + ) class FindTheBiggest(FSLCommand): @@ -1205,27 +1321,26 @@ class FindTheBiggest(FSLCommand): 'find_the_biggest seeds_to_M1.nii seeds_to_M2.nii biggestSegmentation' """ - _cmd = 'find_the_biggest' + + _cmd = "find_the_biggest" input_spec = FindTheBiggestInputSpec output_spec = FindTheBiggestOutputSpec def _run_interface(self, runtime): if not isdefined(self.inputs.out_file): - self.inputs.out_file = self._gen_fname( - 'biggestSegmentation', suffix='') + self.inputs.out_file = self._gen_fname("biggestSegmentation", suffix="") return super(FindTheBiggest, self)._run_interface(runtime) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = self.inputs.out_file - if not isdefined(outputs['out_file']): - outputs['out_file'] = self._gen_fname( - 'biggestSegmentation', suffix='') - outputs['out_file'] = os.path.abspath(outputs['out_file']) + outputs["out_file"] = self.inputs.out_file + if not isdefined(outputs["out_file"]): + outputs["out_file"] = self._gen_fname("biggestSegmentation", suffix="") + outputs["out_file"] = os.path.abspath(outputs["out_file"]) return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._list_outputs()[name] else: return None @@ -1237,35 +1352,36 @@ class TractSkeletonInputSpec(FSLCommandInputSpec): exists=True, mandatory=True, argstr="-i %s", - desc="input image (typcially mean FA volume)") + desc="input image (typcially mean FA volume)", + ) _proj_inputs = ["threshold", "distance_map", "data_file"] project_data = traits.Bool( argstr="-p %.3f %s %s %s %s", requires=_proj_inputs, - desc="project data onto skeleton") + desc="project data onto skeleton", + ) threshold = traits.Float(desc="skeleton threshold value") distance_map = File(exists=True, desc="distance map image") search_mask_file = File( exists=True, xor=["use_cingulum_mask"], - desc="mask in which to use alternate search rule") + desc="mask in which to use alternate search rule", + ) use_cingulum_mask = traits.Bool( True, usedefault=True, xor=["search_mask_file"], - desc=("perform alternate search using " - "built-in cingulum mask")) - data_file = File( - exists=True, desc="4D data to project onto skeleton (usually FA)") + desc=("perform alternate search using " "built-in cingulum mask"), + ) + data_file = File(exists=True, desc="4D data to project onto skeleton (usually FA)") alt_data_file = File( - exists=True, - argstr="-a %s", - desc="4D non-FA data to project onto skeleton") - alt_skeleton = File( - exists=True, argstr="-s %s", desc="alternate skeleton to use") + exists=True, argstr="-a %s", desc="4D non-FA data to project onto skeleton" + ) + alt_skeleton = File(exists=True, argstr="-s %s", desc="alternate skeleton to use") projected_data = File(desc="input data projected onto skeleton") skeleton_file = traits.Either( - traits.Bool, File, argstr="-o %s", desc="write out skeleton image") + traits.Bool, File, argstr="-o %s", desc="write out skeleton image" + ) class TractSkeletonOutputSpec(TraitedSpec): @@ -1315,8 +1431,13 @@ def _format_arg(self, name, spec, value): proj_file = self._list_outputs()["projected_data"] else: proj_file = _si.projected_data - return spec.argstr % (_si.threshold, _si.distance_map, - mask_file, _si.data_file, proj_file) + return spec.argstr % ( + _si.threshold, + _si.distance_map, + mask_file, + _si.data_file, + proj_file, + ) elif name == "skeleton_file": if isinstance(value, bool): return spec.argstr % self._list_outputs()["skeleton_file"] @@ -1335,18 +1456,14 @@ def _list_outputs(self): if isdefined(_si.alt_data_file): stem = _si.alt_data_file outputs["projected_data"] = fname_presuffix( - stem, - suffix="_skeletonised", - newpath=os.getcwd(), - use_ext=True) + stem, suffix="_skeletonised", newpath=os.getcwd(), use_ext=True + ) if isdefined(_si.skeleton_file) and _si.skeleton_file: outputs["skeleton_file"] = _si.skeleton_file if isinstance(_si.skeleton_file, bool): outputs["skeleton_file"] = fname_presuffix( - _si.in_file, - suffix="_skeleton", - newpath=os.getcwd(), - use_ext=True) + _si.in_file, suffix="_skeleton", newpath=os.getcwd(), use_ext=True + ) return outputs @@ -1356,29 +1473,27 @@ class DistanceMapInputSpec(FSLCommandInputSpec): exists=True, mandatory=True, argstr="--in=%s", - desc="image to calculate distance values for") + desc="image to calculate distance values for", + ) mask_file = File( - exists=True, - argstr="--mask=%s", - desc="binary mask to contrain calculations") + exists=True, argstr="--mask=%s", desc="binary mask to contrain calculations" + ) invert_input = traits.Bool(argstr="--invert", desc="invert input image") local_max_file = traits.Either( traits.Bool, File, argstr="--localmax=%s", desc="write an image of the local maxima", - hash_files=False) + hash_files=False, + ) distance_map = File( - genfile=True, - argstr="--out=%s", - desc="distance map to write", - hash_files=False) + genfile=True, argstr="--out=%s", desc="distance map to write", hash_files=False + ) class DistanceMapOutputSpec(TraitedSpec): - distance_map = File( - exists=True, desc="value is distance to nearest nonzero voxels") + distance_map = File(exists=True, desc="value is distance to nearest nonzero voxels") local_max_file = File(desc="image of local maxima") @@ -1412,21 +1527,16 @@ def _list_outputs(self): outputs["distance_map"] = _si.distance_map if not isdefined(_si.distance_map): outputs["distance_map"] = fname_presuffix( - _si.in_file, - suffix="_dstmap", - use_ext=True, - newpath=os.getcwd()) + _si.in_file, suffix="_dstmap", use_ext=True, newpath=os.getcwd() + ) outputs["distance_map"] = os.path.abspath(outputs["distance_map"]) if isdefined(_si.local_max_file): outputs["local_max_file"] = _si.local_max_file if isinstance(_si.local_max_file, bool): outputs["local_max_file"] = fname_presuffix( - _si.in_file, - suffix="_lclmax", - use_ext=True, - newpath=os.getcwd()) - outputs["local_max_file"] = os.path.abspath( - outputs["local_max_file"]) + _si.in_file, suffix="_lclmax", use_ext=True, newpath=os.getcwd() + ) + outputs["local_max_file"] = os.path.abspath(outputs["local_max_file"]) return outputs def _gen_filename(self, name): @@ -1439,13 +1549,15 @@ class MakeDyadicVectorsInputSpec(FSLCommandInputSpec): theta_vol = File(exists=True, mandatory=True, position=0, argstr="%s") phi_vol = File(exists=True, mandatory=True, position=1, argstr="%s") mask = File(exists=True, position=2, argstr="%s") - output = File( - "dyads", position=3, usedefault=True, argstr="%s", hash_files=False) + output = File("dyads", position=3, usedefault=True, argstr="%s", hash_files=False) perc = traits.Float( - desc=("the {perc}% angle of the output cone of " - "uncertainty (output will be in degrees)"), + desc=( + "the {perc}% angle of the output cone of " + "uncertainty (output will be in degrees)" + ), position=4, - argstr="%f") + argstr="%f", + ) class MakeDyadicVectorsOutputSpec(TraitedSpec): @@ -1465,6 +1577,7 @@ def _list_outputs(self): outputs = self.output_spec().get() outputs["dyads"] = self._gen_fname(self.inputs.output) outputs["dispersion"] = self._gen_fname( - self.inputs.output, suffix="_dispersion") + self.inputs.output, suffix="_dispersion" + ) return outputs diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index a3dceb1902..e7f3ff4318 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -13,52 +13,56 @@ from ...utils.filemanip import split_filename from ...utils import NUMPY_MMAP -from ..base import (traits, TraitedSpec, InputMultiPath, File, isdefined) +from ..base import traits, TraitedSpec, InputMultiPath, File, isdefined from .base import FSLCommand, FSLCommandInputSpec, Info class PrepareFieldmapInputSpec(FSLCommandInputSpec): scanner = traits.String( - 'SIEMENS', - argstr='%s', - position=1, - desc='must be SIEMENS', - usedefault=True) + "SIEMENS", argstr="%s", position=1, desc="must be SIEMENS", usedefault=True + ) in_phase = File( exists=True, - argstr='%s', + argstr="%s", position=2, mandatory=True, - desc=('Phase difference map, in SIEMENS format range from ' - '0-4096 or 0-8192)')) + desc=( + "Phase difference map, in SIEMENS format range from " "0-4096 or 0-8192)" + ), + ) in_magnitude = File( exists=True, - argstr='%s', + argstr="%s", position=3, mandatory=True, - desc='Magnitude difference map, brain extracted') + desc="Magnitude difference map, brain extracted", + ) delta_TE = traits.Float( 2.46, usedefault=True, mandatory=True, - argstr='%f', + argstr="%f", position=-2, - desc=('echo time difference of the ' - 'fieldmap sequence in ms. (usually 2.46ms in' - ' Siemens)')) + desc=( + "echo time difference of the " + "fieldmap sequence in ms. (usually 2.46ms in" + " Siemens)" + ), + ) nocheck = traits.Bool( False, position=-1, - argstr='--nocheck', + argstr="--nocheck", usedefault=True, - desc=('do not perform sanity checks for image ' - 'size/range/dimensions')) + desc=("do not perform sanity checks for image " "size/range/dimensions"), + ) out_fieldmap = File( - argstr='%s', position=4, desc='output name for prepared fieldmap') + argstr="%s", position=4, desc="output name for prepared fieldmap" + ) class PrepareFieldmapOutputSpec(TraitedSpec): - out_fieldmap = File(exists=True, desc='output name for prepared fieldmap') + out_fieldmap = File(exists=True, desc="output name for prepared fieldmap") class PrepareFieldmap(FSLCommand): @@ -85,7 +89,8 @@ class PrepareFieldmap(FSLCommand): """ - _cmd = 'fsl_prepare_fieldmap' + + _cmd = "fsl_prepare_fieldmap" input_spec = PrepareFieldmapInputSpec output_spec = PrepareFieldmapOutputSpec @@ -95,16 +100,17 @@ def _parse_inputs(self, skip=None): if not isdefined(self.inputs.out_fieldmap): self.inputs.out_fieldmap = self._gen_fname( - self.inputs.in_phase, suffix='_fslprepared') + self.inputs.in_phase, suffix="_fslprepared" + ) if not isdefined(self.inputs.nocheck) or not self.inputs.nocheck: - skip += ['nocheck'] + skip += ["nocheck"] return super(PrepareFieldmap, self)._parse_inputs(skip=skip) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_fieldmap'] = self.inputs.out_fieldmap + outputs["out_fieldmap"] = self.inputs.out_fieldmap return outputs def _run_interface(self, runtime): @@ -124,171 +130,192 @@ class TOPUPInputSpec(FSLCommandInputSpec): in_file = File( exists=True, mandatory=True, - desc='name of 4D file with images', - argstr='--imain=%s') + desc="name of 4D file with images", + argstr="--imain=%s", + ) encoding_file = File( exists=True, mandatory=True, - xor=['encoding_direction'], - desc='name of text file with PE directions/times', - argstr='--datain=%s') + xor=["encoding_direction"], + desc="name of text file with PE directions/times", + argstr="--datain=%s", + ) encoding_direction = traits.List( - traits.Enum('y', 'x', 'z', 'x-', 'y-', 'z-'), + traits.Enum("y", "x", "z", "x-", "y-", "z-"), mandatory=True, - xor=['encoding_file'], - requires=['readout_times'], - argstr='--datain=%s', - desc=('encoding direction for automatic ' - 'generation of encoding_file')) + xor=["encoding_file"], + requires=["readout_times"], + argstr="--datain=%s", + desc=("encoding direction for automatic " "generation of encoding_file"), + ) readout_times = InputMultiPath( traits.Float, - requires=['encoding_direction'], - xor=['encoding_file'], + requires=["encoding_direction"], + xor=["encoding_file"], mandatory=True, - desc=('readout times (dwell times by # ' - 'phase-encode steps minus 1)')) + desc=("readout times (dwell times by # " "phase-encode steps minus 1)"), + ) out_base = File( - desc=('base-name of output files (spline ' - 'coefficients (Hz) and movement parameters)'), - name_source=['in_file'], - name_template='%s_base', - argstr='--out=%s', - hash_files=False) + desc=( + "base-name of output files (spline " + "coefficients (Hz) and movement parameters)" + ), + name_source=["in_file"], + name_template="%s_base", + argstr="--out=%s", + hash_files=False, + ) out_field = File( - argstr='--fout=%s', + argstr="--fout=%s", hash_files=False, - name_source=['in_file'], - name_template='%s_field', - desc='name of image file with field (Hz)') + name_source=["in_file"], + name_template="%s_field", + desc="name of image file with field (Hz)", + ) out_warp_prefix = traits.Str( "warpfield", - argstr='--dfout=%s', + argstr="--dfout=%s", hash_files=False, - desc='prefix for the warpfield images (in mm)', - usedefault=True) + desc="prefix for the warpfield images (in mm)", + usedefault=True, + ) out_mat_prefix = traits.Str( "xfm", - argstr='--rbmout=%s', + argstr="--rbmout=%s", hash_files=False, - desc='prefix for the realignment matrices', - usedefault=True) + desc="prefix for the realignment matrices", + usedefault=True, + ) out_jac_prefix = traits.Str( "jac", - argstr='--jacout=%s', + argstr="--jacout=%s", hash_files=False, - desc='prefix for the warpfield images', - usedefault=True) + desc="prefix for the warpfield images", + usedefault=True, + ) out_corrected = File( - argstr='--iout=%s', + argstr="--iout=%s", hash_files=False, - name_source=['in_file'], - name_template='%s_corrected', - desc='name of 4D image file with unwarped images') + name_source=["in_file"], + name_template="%s_corrected", + desc="name of 4D image file with unwarped images", + ) out_logfile = File( - argstr='--logout=%s', - desc='name of log-file', - name_source=['in_file'], - name_template='%s_topup.log', + argstr="--logout=%s", + desc="name of log-file", + name_source=["in_file"], + name_template="%s_topup.log", keep_extension=True, - hash_files=False) + hash_files=False, + ) # TODO: the following traits admit values separated by commas, one value # per registration level inside topup. warp_res = traits.Float( - argstr='--warpres=%f', - desc=('(approximate) resolution (in mm) of warp ' - 'basis for the different sub-sampling levels')) - subsamp = traits.Int(argstr='--subsamp=%d', - desc='sub-sampling scheme') + argstr="--warpres=%f", + desc=( + "(approximate) resolution (in mm) of warp " + "basis for the different sub-sampling levels" + ), + ) + subsamp = traits.Int(argstr="--subsamp=%d", desc="sub-sampling scheme") fwhm = traits.Float( - argstr='--fwhm=%f', - desc='FWHM (in mm) of gaussian smoothing kernel') + argstr="--fwhm=%f", desc="FWHM (in mm) of gaussian smoothing kernel" + ) config = traits.String( - 'b02b0.cnf', - argstr='--config=%s', + "b02b0.cnf", + argstr="--config=%s", usedefault=True, - desc=('Name of config file specifying command line ' - 'arguments')) - max_iter = traits.Int( - argstr='--miter=%d', - desc='max # of non-linear iterations') + desc=("Name of config file specifying command line " "arguments"), + ) + max_iter = traits.Int(argstr="--miter=%d", desc="max # of non-linear iterations") reg_lambda = traits.Float( - argstr='--lambda=%0.f', - desc=('Weight of regularisation, default ' - 'depending on --ssqlambda and --regmod switches.')) + argstr="--lambda=%0.f", + desc=( + "Weight of regularisation, default " + "depending on --ssqlambda and --regmod switches." + ), + ) ssqlambda = traits.Enum( 1, 0, - argstr='--ssqlambda=%d', - desc=('Weight lambda by the current value of the ' - 'ssd. If used (=1), the effective weight of ' - 'regularisation term becomes higher for the ' - 'initial iterations, therefore initial steps' - ' are a little smoother than they would ' - 'without weighting. This reduces the ' - 'risk of finding a local minimum.')) + argstr="--ssqlambda=%d", + desc=( + "Weight lambda by the current value of the " + "ssd. If used (=1), the effective weight of " + "regularisation term becomes higher for the " + "initial iterations, therefore initial steps" + " are a little smoother than they would " + "without weighting. This reduces the " + "risk of finding a local minimum." + ), + ) regmod = traits.Enum( - 'bending_energy', - 'membrane_energy', - argstr='--regmod=%s', - desc=('Regularisation term implementation. Defaults ' - 'to bending_energy. Note that the two functions' - ' have vastly different scales. The membrane ' - 'energy is based on the first derivatives and ' - 'the bending energy on the second derivatives. ' - 'The second derivatives will typically be much ' - 'smaller than the first derivatives, so input ' - 'lambda will have to be larger for ' - 'bending_energy to yield approximately the same' - ' level of regularisation.')) - estmov = traits.Enum( - 1, 0, argstr='--estmov=%d', desc='estimate movements if set') + "bending_energy", + "membrane_energy", + argstr="--regmod=%s", + desc=( + "Regularisation term implementation. Defaults " + "to bending_energy. Note that the two functions" + " have vastly different scales. The membrane " + "energy is based on the first derivatives and " + "the bending energy on the second derivatives. " + "The second derivatives will typically be much " + "smaller than the first derivatives, so input " + "lambda will have to be larger for " + "bending_energy to yield approximately the same" + " level of regularisation." + ), + ) + estmov = traits.Enum(1, 0, argstr="--estmov=%d", desc="estimate movements if set") minmet = traits.Enum( 0, 1, - argstr='--minmet=%d', - desc=('Minimisation method 0=Levenberg-Marquardt, ' - '1=Scaled Conjugate Gradient')) + argstr="--minmet=%d", + desc=( + "Minimisation method 0=Levenberg-Marquardt, " "1=Scaled Conjugate Gradient" + ), + ) splineorder = traits.Int( - argstr='--splineorder=%d', - desc=('order of spline, 2->Qadratic spline, ' - '3->Cubic spline')) + argstr="--splineorder=%d", + desc=("order of spline, 2->Qadratic spline, " "3->Cubic spline"), + ) numprec = traits.Enum( - 'double', - 'float', - argstr='--numprec=%s', - desc=('Precision for representing Hessian, double ' - 'or float.')) + "double", + "float", + argstr="--numprec=%s", + desc=("Precision for representing Hessian, double " "or float."), + ) interp = traits.Enum( - 'spline', - 'linear', - argstr='--interp=%s', - desc='Image interpolation model, linear or spline.') + "spline", + "linear", + argstr="--interp=%s", + desc="Image interpolation model, linear or spline.", + ) scale = traits.Enum( 0, 1, - argstr='--scale=%d', - desc=('If set (=1), the images are individually scaled' - ' to a common mean')) + argstr="--scale=%d", + desc=("If set (=1), the images are individually scaled" " to a common mean"), + ) regrid = traits.Enum( 1, 0, - argstr='--regrid=%d', - desc=('If set (=1), the calculations are done in a ' - 'different grid')) + argstr="--regrid=%d", + desc=("If set (=1), the calculations are done in a " "different grid"), + ) class TOPUPOutputSpec(TraitedSpec): - out_fieldcoef = File( - exists=True, desc='file containing the field coefficients') - out_movpar = File(exists=True, desc='movpar.txt output file') - out_enc_file = File(desc='encoding directions file output for applytopup') - out_field = File(desc='name of image file with field (Hz)') - out_warps = traits.List(File(exists=True), desc='warpfield images') - out_jacs = traits.List(File(exists=True), desc='Jacobian images') - out_mats = traits.List(File(exists=True), desc='realignment matrices') - out_corrected = File(desc='name of 4D image file with unwarped images') - out_logfile = File(desc='name of log-file') + out_fieldcoef = File(exists=True, desc="file containing the field coefficients") + out_movpar = File(exists=True, desc="movpar.txt output file") + out_enc_file = File(desc="encoding directions file output for applytopup") + out_field = File(desc="name of image file with field (Hz)") + out_warps = traits.List(File(exists=True), desc="warpfield images") + out_jacs = traits.List(File(exists=True), desc="Jacobian images") + out_mats = traits.List(File(exists=True), desc="realignment matrices") + out_corrected = File(desc="name of 4D image file with unwarped images") + out_logfile = File(desc="name of log-file") class TOPUP(FSLCommand): @@ -317,59 +344,62 @@ class TOPUP(FSLCommand): >>> res = topup.run() # doctest: +SKIP """ - _cmd = 'topup' + + _cmd = "topup" input_spec = TOPUPInputSpec output_spec = TOPUPOutputSpec def _format_arg(self, name, trait_spec, value): - if name == 'encoding_direction': + if name == "encoding_direction": return trait_spec.argstr % self._generate_encfile() - if name == 'out_base': + if name == "out_base": path, name, ext = split_filename(value) - if path != '': + if path != "": if not os.path.exists(path): - raise ValueError('out_base path must exist if provided') + raise ValueError("out_base path must exist if provided") return super(TOPUP, self)._format_arg(name, trait_spec, value) def _list_outputs(self): outputs = super(TOPUP, self)._list_outputs() - del outputs['out_base'] + del outputs["out_base"] base_path = None if isdefined(self.inputs.out_base): base_path, base, _ = split_filename(self.inputs.out_base) - if base_path == '': + if base_path == "": base_path = None else: - base = split_filename(self.inputs.in_file)[1] + '_base' - outputs['out_fieldcoef'] = self._gen_fname( - base, suffix='_fieldcoef', cwd=base_path) - outputs['out_movpar'] = self._gen_fname( - base, suffix='_movpar', ext='.txt', cwd=base_path) + base = split_filename(self.inputs.in_file)[1] + "_base" + outputs["out_fieldcoef"] = self._gen_fname( + base, suffix="_fieldcoef", cwd=base_path + ) + outputs["out_movpar"] = self._gen_fname( + base, suffix="_movpar", ext=".txt", cwd=base_path + ) n_vols = nb.load(self.inputs.in_file).shape[-1] ext = Info.output_type_to_ext(self.inputs.output_type) - fmt = os.path.abspath('{prefix}_{i:02d}{ext}').format - outputs['out_warps'] = [ + fmt = os.path.abspath("{prefix}_{i:02d}{ext}").format + outputs["out_warps"] = [ fmt(prefix=self.inputs.out_warp_prefix, i=i, ext=ext) for i in range(1, n_vols + 1) ] - outputs['out_jacs'] = [ + outputs["out_jacs"] = [ fmt(prefix=self.inputs.out_jac_prefix, i=i, ext=ext) for i in range(1, n_vols + 1) ] - outputs['out_mats'] = [ + outputs["out_mats"] = [ fmt(prefix=self.inputs.out_mat_prefix, i=i, ext=".mat") for i in range(1, n_vols + 1) ] if isdefined(self.inputs.encoding_direction): - outputs['out_enc_file'] = self._get_encfilename() + outputs["out_enc_file"] = self._get_encfilename() return outputs def _get_encfilename(self): out_file = os.path.join( - os.getcwd(), - ('%s_encfile.txt' % split_filename(self.inputs.in_file)[1])) + os.getcwd(), ("%s_encfile.txt" % split_filename(self.inputs.in_file)[1]) + ) return out_file def _generate_encfile(self): @@ -379,25 +409,28 @@ def _generate_encfile(self): durations = self.inputs.readout_times if len(self.inputs.encoding_direction) != len(durations): if len(self.inputs.readout_times) != 1: - raise ValueError(('Readout time must be a float or match the' - 'length of encoding directions')) + raise ValueError( + ( + "Readout time must be a float or match the" + "length of encoding directions" + ) + ) durations = durations * len(self.inputs.encoding_direction) lines = [] for idx, encdir in enumerate(self.inputs.encoding_direction): direction = 1.0 - if encdir.endswith('-'): + if encdir.endswith("-"): direction = -1.0 line = [ - float(val[0] == encdir[0]) * direction - for val in ['x', 'y', 'z'] + float(val[0] == encdir[0]) * direction for val in ["x", "y", "z"] ] + [durations[idx]] lines.append(line) - np.savetxt(out_file, np.array(lines), fmt=b'%d %d %d %.8f') + np.savetxt(out_file, np.array(lines), fmt=b"%d %d %d %.8f") return out_file def _overload_extension(self, value, name=None): - if name == 'out_base': + if name == "out_base": return value return super(TOPUP, self)._overload_extension(value, name) @@ -406,61 +439,65 @@ class ApplyTOPUPInputSpec(FSLCommandInputSpec): in_files = InputMultiPath( File(exists=True), mandatory=True, - desc='name of file with images', - argstr='--imain=%s', - sep=',') + desc="name of file with images", + argstr="--imain=%s", + sep=",", + ) encoding_file = File( exists=True, mandatory=True, - desc='name of text file with PE directions/times', - argstr='--datain=%s') + desc="name of text file with PE directions/times", + argstr="--datain=%s", + ) in_index = traits.List( traits.Int, - argstr='--inindex=%s', - sep=',', - desc='comma separated list of indices corresponding to --datain') + argstr="--inindex=%s", + sep=",", + desc="comma separated list of indices corresponding to --datain", + ) in_topup_fieldcoef = File( exists=True, argstr="--topup=%s", copyfile=False, - requires=['in_topup_movpar'], - desc=('topup file containing the field ' - 'coefficients')) + requires=["in_topup_movpar"], + desc=("topup file containing the field " "coefficients"), + ) in_topup_movpar = File( exists=True, - requires=['in_topup_fieldcoef'], + requires=["in_topup_fieldcoef"], copyfile=False, - desc='topup movpar.txt file') + desc="topup movpar.txt file", + ) out_corrected = File( - desc='output (warped) image', - name_source=['in_files'], - name_template='%s_corrected', - argstr='--out=%s') + desc="output (warped) image", + name_source=["in_files"], + name_template="%s_corrected", + argstr="--out=%s", + ) method = traits.Enum( - 'jac', - 'lsr', - argstr='--method=%s', - desc=('use jacobian modulation (jac) or least-squares' - ' resampling (lsr)')) + "jac", + "lsr", + argstr="--method=%s", + desc=("use jacobian modulation (jac) or least-squares" " resampling (lsr)"), + ) interp = traits.Enum( - 'trilinear', - 'spline', - argstr='--interp=%s', - desc='interpolation method') + "trilinear", "spline", argstr="--interp=%s", desc="interpolation method" + ) datatype = traits.Enum( - 'char', - 'short', - 'int', - 'float', - 'double', - argstr='-d=%s', - desc='force output data type') + "char", + "short", + "int", + "float", + "double", + argstr="-d=%s", + desc="force output data type", + ) class ApplyTOPUPOutputSpec(TraitedSpec): out_corrected = File( - exists=True, desc=('name of 4D image file with ' - 'unwarped images')) + exists=True, desc=("name of 4D image file with " "unwarped images") + ) class ApplyTOPUP(FSLCommand): @@ -489,7 +526,8 @@ class ApplyTOPUP(FSLCommand): >>> res = applytopup.run() # doctest: +SKIP """ - _cmd = 'applytopup' + + _cmd = "applytopup" input_spec = ApplyTOPUPInputSpec output_spec = ApplyTOPUPOutputSpec @@ -500,15 +538,13 @@ def _parse_inputs(self, skip=None): # If not defined, assume index are the first N entries in the # parameters file, for N input images. if not isdefined(self.inputs.in_index): - self.inputs.in_index = list( - range(1, - len(self.inputs.in_files) + 1)) + self.inputs.in_index = list(range(1, len(self.inputs.in_files) + 1)) return super(ApplyTOPUP, self)._parse_inputs(skip=skip) def _format_arg(self, name, spec, value): - if name == 'in_topup_fieldcoef': - return spec.argstr % value.split('_fieldcoef')[0] + if name == "in_topup_fieldcoef": + return spec.argstr % value.split("_fieldcoef")[0] return super(ApplyTOPUP, self)._format_arg(name, spec, value) @@ -516,179 +552,196 @@ class EddyInputSpec(FSLCommandInputSpec): in_file = File( exists=True, mandatory=True, - argstr='--imain=%s', - desc=('File containing all the images to estimate ' - 'distortions for')) + argstr="--imain=%s", + desc=("File containing all the images to estimate " "distortions for"), + ) in_mask = File( - exists=True, - mandatory=True, - argstr='--mask=%s', - desc='Mask to indicate brain') + exists=True, mandatory=True, argstr="--mask=%s", desc="Mask to indicate brain" + ) in_index = File( exists=True, mandatory=True, - argstr='--index=%s', - desc=('File containing indices for all volumes in --imain ' - 'into --acqp and --topup')) + argstr="--index=%s", + desc=( + "File containing indices for all volumes in --imain " + "into --acqp and --topup" + ), + ) in_acqp = File( exists=True, mandatory=True, - argstr='--acqp=%s', - desc='File containing acquisition parameters') + argstr="--acqp=%s", + desc="File containing acquisition parameters", + ) in_bvec = File( exists=True, mandatory=True, - argstr='--bvecs=%s', - desc=('File containing the b-vectors for all volumes in ' - '--imain')) + argstr="--bvecs=%s", + desc=("File containing the b-vectors for all volumes in " "--imain"), + ) in_bval = File( exists=True, mandatory=True, - argstr='--bvals=%s', - desc=('File containing the b-values for all volumes in ' - '--imain')) + argstr="--bvals=%s", + desc=("File containing the b-values for all volumes in " "--imain"), + ) out_base = traits.Str( - 'eddy_corrected', - argstr='--out=%s', + "eddy_corrected", + argstr="--out=%s", usedefault=True, - desc=('basename for output (warped) image')) + desc=("basename for output (warped) image"), + ) session = File( exists=True, - argstr='--session=%s', - desc=('File containing session indices for all volumes in ' - '--imain')) + argstr="--session=%s", + desc=("File containing session indices for all volumes in " "--imain"), + ) in_topup_fieldcoef = File( exists=True, argstr="--topup=%s", - requires=['in_topup_movpar'], - desc=('topup file containing the field ' - 'coefficients')) + requires=["in_topup_movpar"], + desc=("topup file containing the field " "coefficients"), + ) in_topup_movpar = File( - exists=True, - requires=['in_topup_fieldcoef'], - desc='topup movpar.txt file') + exists=True, requires=["in_topup_fieldcoef"], desc="topup movpar.txt file" + ) flm = traits.Enum( - 'linear', - 'quadratic', - 'cubic', - argstr='--flm=%s', - desc='First level EC model') + "linear", "quadratic", "cubic", argstr="--flm=%s", desc="First level EC model" + ) slm = traits.Enum( - 'none', - 'linear', - 'quadratic', - argstr='--slm=%s', - desc='Second level EC model') + "none", "linear", "quadratic", argstr="--slm=%s", desc="Second level EC model" + ) fep = traits.Bool( - False, argstr='--fep', desc='Fill empty planes in x- or y-directions') + False, argstr="--fep", desc="Fill empty planes in x- or y-directions" + ) interp = traits.Enum( - 'spline', - 'trilinear', - argstr='--interp=%s', - desc='Interpolation model for estimation step') + "spline", + "trilinear", + argstr="--interp=%s", + desc="Interpolation model for estimation step", + ) nvoxhp = traits.Int( - 1000, usedefault=True, - argstr='--nvoxhp=%s', - desc=('# of voxels used to estimate the ' - 'hyperparameters')) + 1000, + usedefault=True, + argstr="--nvoxhp=%s", + desc=("# of voxels used to estimate the " "hyperparameters"), + ) fudge_factor = traits.Float( - 10.0, usedefault=True, - argstr='--ff=%s', - desc=('Fudge factor for hyperparameter ' - 'error variance')) + 10.0, + usedefault=True, + argstr="--ff=%s", + desc=("Fudge factor for hyperparameter " "error variance"), + ) dont_sep_offs_move = traits.Bool( False, - argstr='--dont_sep_offs_move', - desc=('Do NOT attempt to separate ' - 'field offset from subject ' - 'movement')) + argstr="--dont_sep_offs_move", + desc=("Do NOT attempt to separate " "field offset from subject " "movement"), + ) dont_peas = traits.Bool( False, - argstr='--dont_peas', - desc="Do NOT perform a post-eddy alignment of " - "shells") + argstr="--dont_peas", + desc="Do NOT perform a post-eddy alignment of " "shells", + ) fwhm = traits.Float( - desc=('FWHM for conditioning filter when estimating ' - 'the parameters'), - argstr='--fwhm=%s') + desc=("FWHM for conditioning filter when estimating " "the parameters"), + argstr="--fwhm=%s", + ) - niter = traits.Int(5, usedefault=True, - argstr='--niter=%s', desc='Number of iterations') + niter = traits.Int( + 5, usedefault=True, argstr="--niter=%s", desc="Number of iterations" + ) method = traits.Enum( - 'jac', - 'lsr', - argstr='--resamp=%s', - desc=('Final resampling method (jacobian/least ' - 'squares)')) + "jac", + "lsr", + argstr="--resamp=%s", + desc=("Final resampling method (jacobian/least " "squares)"), + ) repol = traits.Bool( - False, argstr='--repol', desc='Detect and replace outlier slices') + False, argstr="--repol", desc="Detect and replace outlier slices" + ) num_threads = traits.Int( - 1, - usedefault=True, - nohash=True, - desc="Number of openmp threads to use") + 1, usedefault=True, nohash=True, desc="Number of openmp threads to use" + ) is_shelled = traits.Bool( False, - argstr='--data_is_shelled', + argstr="--data_is_shelled", desc="Override internal check to ensure that " "date are acquired on a set of b-value " - "shells") + "shells", + ) field = traits.Str( - argstr='--field=%s', + argstr="--field=%s", desc="NonTOPUP fieldmap scaled in Hz - filename has " "to be provided without an extension. TOPUP is " - "strongly recommended") + "strongly recommended", + ) field_mat = File( exists=True, - argstr='--field_mat=%s', + argstr="--field_mat=%s", desc="Matrix that specifies the relative locations of " "the field specified by --field and first volume " - "in file --imain") + "in file --imain", + ) use_cuda = traits.Bool(False, desc="Run eddy using cuda gpu") cnr_maps = traits.Bool( - False, desc='Output CNR-Maps', argstr='--cnr_maps', min_ver='5.0.10') + False, desc="Output CNR-Maps", argstr="--cnr_maps", min_ver="5.0.10" + ) residuals = traits.Bool( - False, desc='Output Residuals', argstr='--residuals', min_ver='5.0.10') + False, desc="Output Residuals", argstr="--residuals", min_ver="5.0.10" + ) class EddyOutputSpec(TraitedSpec): out_corrected = File( - exists=True, desc='4D image file containing all the corrected volumes') + exists=True, desc="4D image file containing all the corrected volumes" + ) out_parameter = File( exists=True, - desc=('text file with parameters definining the field and' - 'movement for each scan')) + desc=( + "text file with parameters definining the field and" + "movement for each scan" + ), + ) out_rotated_bvecs = File( - exists=True, desc='File containing rotated b-values for all volumes') + exists=True, desc="File containing rotated b-values for all volumes" + ) out_movement_rms = File( - exists=True, desc='Summary of the "total movement" in each volume') + exists=True, desc='Summary of the "total movement" in each volume' + ) out_restricted_movement_rms = File( exists=True, - desc=('Summary of the "total movement" in each volume ' - 'disregarding translation in the PE direction')) + desc=( + 'Summary of the "total movement" in each volume ' + "disregarding translation in the PE direction" + ), + ) out_shell_alignment_parameters = File( exists=True, - desc=('File containing rigid body movement parameters ' - 'between the different shells as estimated by a ' - 'post-hoc mutual information based registration')) + desc=( + "File containing rigid body movement parameters " + "between the different shells as estimated by a " + "post-hoc mutual information based registration" + ), + ) out_outlier_report = File( exists=True, - desc=('Text-file with a plain language report on what ' - 'outlier slices eddy has found')) - out_cnr_maps = File( - exists=True, desc='path/name of file with the cnr_maps') - out_residuals = File( - exists=True, desc='path/name of file with the residuals') + desc=( + "Text-file with a plain language report on what " + "outlier slices eddy has found" + ), + ) + out_cnr_maps = File(exists=True, desc="path/name of file with the cnr_maps") + out_residuals = File(exists=True, desc="path/name of file with the residuals") class Eddy(FSLCommand): @@ -723,7 +776,8 @@ class Eddy(FSLCommand): >>> res = eddy.run() # doctest: +SKIP """ - _cmd = 'eddy_openmp' + + _cmd = "eddy_openmp" input_spec = EddyInputSpec output_spec = EddyOutputSpec @@ -731,34 +785,38 @@ class Eddy(FSLCommand): def __init__(self, **inputs): super(Eddy, self).__init__(**inputs) - self.inputs.on_trait_change(self._num_threads_update, 'num_threads') + self.inputs.on_trait_change(self._num_threads_update, "num_threads") if not isdefined(self.inputs.num_threads): self.inputs.num_threads = self._num_threads else: self._num_threads_update() - self.inputs.on_trait_change(self._use_cuda, 'use_cuda') + self.inputs.on_trait_change(self._use_cuda, "use_cuda") if isdefined(self.inputs.use_cuda): self._use_cuda() def _num_threads_update(self): self._num_threads = self.inputs.num_threads if not isdefined(self.inputs.num_threads): - if 'OMP_NUM_THREADS' in self.inputs.environ: - del self.inputs.environ['OMP_NUM_THREADS'] + if "OMP_NUM_THREADS" in self.inputs.environ: + del self.inputs.environ["OMP_NUM_THREADS"] else: - self.inputs.environ['OMP_NUM_THREADS'] = str( - self.inputs.num_threads) + self.inputs.environ["OMP_NUM_THREADS"] = str(self.inputs.num_threads) def _use_cuda(self): - self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp' + self._cmd = "eddy_cuda" if self.inputs.use_cuda else "eddy_openmp" def _run_interface(self, runtime): # If 'eddy_openmp' is missing, use 'eddy' - FSLDIR = os.getenv('FSLDIR', '') + FSLDIR = os.getenv("FSLDIR", "") cmd = self._cmd - if all((FSLDIR != '', cmd == 'eddy_openmp', - not os.path.exists(os.path.join(FSLDIR, 'bin', cmd)))): - self._cmd = 'eddy' + if all( + ( + FSLDIR != "", + cmd == "eddy_openmp", + not os.path.exists(os.path.join(FSLDIR, "bin", cmd)), + ) + ): + self._cmd = "eddy" runtime = super(Eddy, self)._run_interface(runtime) # Restore command to avoid side-effects @@ -766,72 +824,77 @@ def _run_interface(self, runtime): return runtime def _format_arg(self, name, spec, value): - if name == 'in_topup_fieldcoef': - return spec.argstr % value.split('_fieldcoef')[0] - if name == 'out_base': + if name == "in_topup_fieldcoef": + return spec.argstr % value.split("_fieldcoef")[0] + if name == "out_base": return spec.argstr % os.path.abspath(value) return super(Eddy, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_corrected'] = os.path.abspath( - '%s.nii.gz' % self.inputs.out_base) - outputs['out_parameter'] = os.path.abspath( - '%s.eddy_parameters' % self.inputs.out_base) + outputs["out_corrected"] = os.path.abspath("%s.nii.gz" % self.inputs.out_base) + outputs["out_parameter"] = os.path.abspath( + "%s.eddy_parameters" % self.inputs.out_base + ) # File generation might depend on the version of EDDY out_rotated_bvecs = os.path.abspath( - '%s.eddy_rotated_bvecs' % self.inputs.out_base) + "%s.eddy_rotated_bvecs" % self.inputs.out_base + ) out_movement_rms = os.path.abspath( - '%s.eddy_movement_rms' % self.inputs.out_base) + "%s.eddy_movement_rms" % self.inputs.out_base + ) out_restricted_movement_rms = os.path.abspath( - '%s.eddy_restricted_movement_rms' % self.inputs.out_base) + "%s.eddy_restricted_movement_rms" % self.inputs.out_base + ) out_shell_alignment_parameters = os.path.abspath( - '%s.eddy_post_eddy_shell_alignment_parameters' % - self.inputs.out_base) + "%s.eddy_post_eddy_shell_alignment_parameters" % self.inputs.out_base + ) out_outlier_report = os.path.abspath( - '%s.eddy_outlier_report' % self.inputs.out_base) + "%s.eddy_outlier_report" % self.inputs.out_base + ) if isdefined(self.inputs.cnr_maps) and self.inputs.cnr_maps: out_cnr_maps = os.path.abspath( - '%s.eddy_cnr_maps.nii.gz' % self.inputs.out_base) + "%s.eddy_cnr_maps.nii.gz" % self.inputs.out_base + ) if os.path.exists(out_cnr_maps): - outputs['out_cnr_maps'] = out_cnr_maps + outputs["out_cnr_maps"] = out_cnr_maps if isdefined(self.inputs.residuals) and self.inputs.residuals: out_residuals = os.path.abspath( - '%s.eddy_residuals.nii.gz' % self.inputs.out_base) + "%s.eddy_residuals.nii.gz" % self.inputs.out_base + ) if os.path.exists(out_residuals): - outputs['out_residuals'] = out_residuals + outputs["out_residuals"] = out_residuals if os.path.exists(out_rotated_bvecs): - outputs['out_rotated_bvecs'] = out_rotated_bvecs + outputs["out_rotated_bvecs"] = out_rotated_bvecs if os.path.exists(out_movement_rms): - outputs['out_movement_rms'] = out_movement_rms + outputs["out_movement_rms"] = out_movement_rms if os.path.exists(out_restricted_movement_rms): - outputs['out_restricted_movement_rms'] = \ - out_restricted_movement_rms + outputs["out_restricted_movement_rms"] = out_restricted_movement_rms if os.path.exists(out_shell_alignment_parameters): - outputs['out_shell_alignment_parameters'] = \ - out_shell_alignment_parameters + outputs["out_shell_alignment_parameters"] = out_shell_alignment_parameters if os.path.exists(out_outlier_report): - outputs['out_outlier_report'] = out_outlier_report + outputs["out_outlier_report"] = out_outlier_report return outputs class SigLossInputSpec(FSLCommandInputSpec): - in_file = File( - mandatory=True, exists=True, argstr='-i %s', desc='b0 fieldmap file') + in_file = File(mandatory=True, exists=True, argstr="-i %s", desc="b0 fieldmap file") out_file = File( - argstr='-s %s', desc='output signal loss estimate file', genfile=True) + argstr="-s %s", desc="output signal loss estimate file", genfile=True + ) - mask_file = File(exists=True, argstr='-m %s', desc='brain mask file') - echo_time = traits.Float(argstr='--te=%f', desc='echo time in seconds') + mask_file = File(exists=True, argstr="-m %s", desc="brain mask file") + echo_time = traits.Float(argstr="--te=%f", desc="echo time in seconds") slice_direction = traits.Enum( - 'x', 'y', 'z', argstr='-d %s', desc='slicing direction') + "x", "y", "z", argstr="-d %s", desc="slicing direction" + ) class SigLossOuputSpec(TraitedSpec): - out_file = File(exists=True, desc='signal loss estimate file') + out_file = File(exists=True, desc="signal loss estimate file") class SigLoss(FSLCommand): @@ -852,118 +915,118 @@ class SigLoss(FSLCommand): """ + input_spec = SigLossInputSpec output_spec = SigLossOuputSpec - _cmd = 'sigloss' + _cmd = "sigloss" def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = self.inputs.out_file - if ((not isdefined(outputs['out_file'])) - and (isdefined(self.inputs.in_file))): - outputs['out_file'] = self._gen_fname( - self.inputs.in_file, suffix='_sigloss') + outputs["out_file"] = self.inputs.out_file + if (not isdefined(outputs["out_file"])) and (isdefined(self.inputs.in_file)): + outputs["out_file"] = self._gen_fname( + self.inputs.in_file, suffix="_sigloss" + ) return outputs def _gen_filename(self, name): - if name == 'out_file': - return self._list_outputs()['out_file'] + if name == "out_file": + return self._list_outputs()["out_file"] return None class EpiRegInputSpec(FSLCommandInputSpec): epi = File( - exists=True, - argstr='--epi=%s', - mandatory=True, - position=-4, - desc='EPI image') + exists=True, argstr="--epi=%s", mandatory=True, position=-4, desc="EPI image" + ) t1_head = File( exists=True, - argstr='--t1=%s', + argstr="--t1=%s", mandatory=True, position=-3, - desc='wholehead T1 image') + desc="wholehead T1 image", + ) t1_brain = File( exists=True, - argstr='--t1brain=%s', + argstr="--t1brain=%s", mandatory=True, position=-2, - desc='brain extracted T1 image') + desc="brain extracted T1 image", + ) out_base = traits.String( "epi2struct", - desc='output base name', - argstr='--out=%s', + desc="output base name", + argstr="--out=%s", position=-1, - usedefault=True) - fmap = File( - exists=True, argstr='--fmap=%s', desc='fieldmap image (in rad/s)') + usedefault=True, + ) + fmap = File(exists=True, argstr="--fmap=%s", desc="fieldmap image (in rad/s)") fmapmag = File( - exists=True, - argstr='--fmapmag=%s', - desc='fieldmap magnitude image - wholehead') + exists=True, argstr="--fmapmag=%s", desc="fieldmap magnitude image - wholehead" + ) fmapmagbrain = File( exists=True, - argstr='--fmapmagbrain=%s', - desc='fieldmap magnitude image - brain extracted') + argstr="--fmapmagbrain=%s", + desc="fieldmap magnitude image - brain extracted", + ) wmseg = File( exists=True, - argstr='--wmseg=%s', - desc='white matter segmentation of T1 image, has to be named \ - like the t1brain and end on _wmseg') + argstr="--wmseg=%s", + desc="white matter segmentation of T1 image, has to be named \ + like the t1brain and end on _wmseg", + ) echospacing = traits.Float( - argstr='--echospacing=%f', - desc='Effective EPI echo spacing \ - (sometimes called dwell time) - in seconds') + argstr="--echospacing=%f", + desc="Effective EPI echo spacing \ + (sometimes called dwell time) - in seconds", + ) pedir = traits.Enum( - 'x', - 'y', - 'z', - '-x', - '-y', - '-z', - argstr='--pedir=%s', - desc='phase encoding direction, dir = x/y/z/-x/-y/-z') + "x", + "y", + "z", + "-x", + "-y", + "-z", + argstr="--pedir=%s", + desc="phase encoding direction, dir = x/y/z/-x/-y/-z", + ) weight_image = File( - exists=True, - argstr='--weight=%s', - desc='weighting image (in T1 space)') + exists=True, argstr="--weight=%s", desc="weighting image (in T1 space)" + ) no_fmapreg = traits.Bool( False, - argstr='--nofmapreg', - desc='do not perform registration of fmap to T1 \ - (use if fmap already registered)') + argstr="--nofmapreg", + desc="do not perform registration of fmap to T1 \ + (use if fmap already registered)", + ) no_clean = traits.Bool( True, - argstr='--noclean', + argstr="--noclean", usedefault=True, - desc='do not clean up intermediate files') + desc="do not clean up intermediate files", + ) class EpiRegOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='unwarped and coregistered epi input') - out_1vol = File( - exists=True, desc='unwarped and coregistered single volume') - fmap2str_mat = File( - exists=True, desc='rigid fieldmap-to-structural transform') - fmap2epi_mat = File(exists=True, desc='rigid fieldmap-to-epi transform') - fmap_epi = File(exists=True, desc='fieldmap in epi space') - fmap_str = File(exists=True, desc='fieldmap in structural space') - fmapmag_str = File( - exists=True, desc='fieldmap magnitude image in structural space') - epi2str_inv = File(exists=True, desc='rigid structural-to-epi transform') - epi2str_mat = File(exists=True, desc='rigid epi-to-structural transform') - shiftmap = File(exists=True, desc='shiftmap in epi space') + out_file = File(exists=True, desc="unwarped and coregistered epi input") + out_1vol = File(exists=True, desc="unwarped and coregistered single volume") + fmap2str_mat = File(exists=True, desc="rigid fieldmap-to-structural transform") + fmap2epi_mat = File(exists=True, desc="rigid fieldmap-to-epi transform") + fmap_epi = File(exists=True, desc="fieldmap in epi space") + fmap_str = File(exists=True, desc="fieldmap in structural space") + fmapmag_str = File(exists=True, desc="fieldmap magnitude image in structural space") + epi2str_inv = File(exists=True, desc="rigid structural-to-epi transform") + epi2str_mat = File(exists=True, desc="rigid epi-to-structural transform") + shiftmap = File(exists=True, desc="shiftmap in epi space") fullwarp = File( exists=True, - desc='warpfield to unwarp epi and transform into \ - structural space') - wmseg = File( - exists=True, desc='white matter segmentation used in flirt bbr') - seg = File( - exists=True, desc='white matter, gray matter, csf segmentation') - wmedge = File(exists=True, desc='white matter edges for visualization') + desc="warpfield to unwarp epi and transform into \ + structural space", + ) + wmseg = File(exists=True, desc="white matter segmentation used in flirt bbr") + seg = File(exists=True, desc="white matter, gray matter, csf segmentation") + wmedge = File(exists=True, desc="white matter edges for visualization") class EpiReg(FSLCommand): @@ -993,44 +1056,59 @@ class EpiReg(FSLCommand): >>> epireg.run() # doctest: +SKIP """ - _cmd = 'epi_reg' + + _cmd = "epi_reg" input_spec = EpiRegInputSpec output_spec = EpiRegOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.join(os.getcwd(), - self.inputs.out_base + '.nii.gz') - if (not (isdefined(self.inputs.no_fmapreg) and self.inputs.no_fmapreg) - and isdefined(self.inputs.fmap)): - outputs['out_1vol'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_1vol.nii.gz') - outputs['fmap2str_mat'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fieldmap2str.mat') - outputs['fmap2epi_mat'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fieldmaprads2epi.mat') - outputs['fmap_epi'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fieldmaprads2epi.nii.gz') - outputs['fmap_str'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fieldmaprads2str.nii.gz') - outputs['fmapmag_str'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fieldmap2str.nii.gz') - outputs['shiftmap'] = os.path.join( - os.getcwd(), - self.inputs.out_base + '_fieldmaprads2epi_shift.nii.gz') - outputs['fullwarp'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_warp.nii.gz') - outputs['epi2str_inv'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_inv.mat') - - outputs['epi2str_mat'] = os.path.join(os.getcwd(), - self.inputs.out_base + '.mat') - outputs['wmedge'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fast_wmedge.nii.gz') - outputs['wmseg'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fast_wmseg.nii.gz') - outputs['seg'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fast_seg.nii.gz') + outputs["out_file"] = os.path.join( + os.getcwd(), self.inputs.out_base + ".nii.gz" + ) + if not ( + isdefined(self.inputs.no_fmapreg) and self.inputs.no_fmapreg + ) and isdefined(self.inputs.fmap): + outputs["out_1vol"] = os.path.join( + os.getcwd(), self.inputs.out_base + "_1vol.nii.gz" + ) + outputs["fmap2str_mat"] = os.path.join( + os.getcwd(), self.inputs.out_base + "_fieldmap2str.mat" + ) + outputs["fmap2epi_mat"] = os.path.join( + os.getcwd(), self.inputs.out_base + "_fieldmaprads2epi.mat" + ) + outputs["fmap_epi"] = os.path.join( + os.getcwd(), self.inputs.out_base + "_fieldmaprads2epi.nii.gz" + ) + outputs["fmap_str"] = os.path.join( + os.getcwd(), self.inputs.out_base + "_fieldmaprads2str.nii.gz" + ) + outputs["fmapmag_str"] = os.path.join( + os.getcwd(), self.inputs.out_base + "_fieldmap2str.nii.gz" + ) + outputs["shiftmap"] = os.path.join( + os.getcwd(), self.inputs.out_base + "_fieldmaprads2epi_shift.nii.gz" + ) + outputs["fullwarp"] = os.path.join( + os.getcwd(), self.inputs.out_base + "_warp.nii.gz" + ) + outputs["epi2str_inv"] = os.path.join( + os.getcwd(), self.inputs.out_base + "_inv.mat" + ) + + outputs["epi2str_mat"] = os.path.join( + os.getcwd(), self.inputs.out_base + ".mat" + ) + outputs["wmedge"] = os.path.join( + os.getcwd(), self.inputs.out_base + "_fast_wmedge.nii.gz" + ) + outputs["wmseg"] = os.path.join( + os.getcwd(), self.inputs.out_base + "_fast_wmseg.nii.gz" + ) + outputs["seg"] = os.path.join( + os.getcwd(), self.inputs.out_base + "_fast_seg.nii.gz" + ) return outputs @@ -1043,44 +1121,49 @@ def _list_outputs(self): class EPIDeWarpInputSpec(FSLCommandInputSpec): mag_file = File( exists=True, - desc='Magnitude file', - argstr='--mag %s', + desc="Magnitude file", + argstr="--mag %s", position=0, - mandatory=True) + mandatory=True, + ) dph_file = File( exists=True, - desc='Phase file assumed to be scaled from 0 to 4095', - argstr='--dph %s', - mandatory=True) + desc="Phase file assumed to be scaled from 0 to 4095", + argstr="--dph %s", + mandatory=True, + ) exf_file = File( - exists=True, - desc='example func volume (or use epi)', - argstr='--exf %s') - epi_file = File( - exists=True, desc='EPI volume to unwarp', argstr='--epi %s') + exists=True, desc="example func volume (or use epi)", argstr="--exf %s" + ) + epi_file = File(exists=True, desc="EPI volume to unwarp", argstr="--epi %s") tediff = traits.Float( 2.46, usedefault=True, - desc='difference in B0 field map TEs', - argstr='--tediff %s') + desc="difference in B0 field map TEs", + argstr="--tediff %s", + ) esp = traits.Float( - 0.58, desc='EPI echo spacing', argstr='--esp %s', usedefault=True) + 0.58, desc="EPI echo spacing", argstr="--esp %s", usedefault=True + ) sigma = traits.Int( 2, usedefault=True, - argstr='--sigma %s', + argstr="--sigma %s", desc="2D spatial gaussing smoothing \ - stdev (default = 2mm)") - vsm = traits.String( - genfile=True, desc='voxel shift map', argstr='--vsm %s') + stdev (default = 2mm)", + ) + vsm = traits.String(genfile=True, desc="voxel shift map", argstr="--vsm %s") exfdw = traits.String( - desc='dewarped example func volume', genfile=True, argstr='--exfdw %s') + desc="dewarped example func volume", genfile=True, argstr="--exfdw %s" + ) epidw = traits.String( - desc='dewarped epi volume', genfile=False, argstr='--epidw %s') - tmpdir = traits.String(genfile=True, desc='tmpdir', argstr='--tmpdir %s') + desc="dewarped epi volume", genfile=False, argstr="--epidw %s" + ) + tmpdir = traits.String(genfile=True, desc="tmpdir", argstr="--tmpdir %s") nocleanup = traits.Bool( - True, usedefault=True, desc='no cleanup', argstr='--nocleanup') - cleanup = traits.Bool(desc='cleanup', argstr='--cleanup') + True, usedefault=True, desc="no cleanup", argstr="--nocleanup" + ) + cleanup = traits.Bool(desc="cleanup", argstr="--cleanup") class EPIDeWarpOutputSpec(TraitedSpec): @@ -1115,14 +1198,19 @@ class EPIDeWarp(FSLCommand): """ - _cmd = 'epidewarp.fsl' + + _cmd = "epidewarp.fsl" input_spec = EPIDeWarpInputSpec output_spec = EPIDeWarpOutputSpec def __init__(self, **inputs): - warnings.warn(("Deprecated: Please use " - "nipype.workflows.dmri.preprocess.epi.sdc_fmb instead"), - DeprecationWarning) + warnings.warn( + ( + "Deprecated: Please use " + "nipype.workflows.dmri.preprocess.epi.sdc_fmb instead" + ), + DeprecationWarning, + ) return super(EPIDeWarp, self).__init__(**inputs) def _run_interface(self, runtime): @@ -1132,70 +1220,72 @@ def _run_interface(self, runtime): return runtime def _gen_filename(self, name): - if name == 'exfdw': + if name == "exfdw": if isdefined(self.inputs.exf_file): return self._gen_fname(self.inputs.exf_file, suffix="_exfdw") else: return self._gen_fname("exfdw") - if name == 'epidw': + if name == "epidw": if isdefined(self.inputs.epi_file): return self._gen_fname(self.inputs.epi_file, suffix="_epidw") - if name == 'vsm': - return self._gen_fname('vsm') - if name == 'tmpdir': - return os.path.join(os.getcwd(), 'temp') + if name == "vsm": + return self._gen_fname("vsm") + if name == "tmpdir": + return os.path.join(os.getcwd(), "temp") return None def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.exfdw): - outputs['exfdw'] = self._gen_filename('exfdw') + outputs["exfdw"] = self._gen_filename("exfdw") else: - outputs['exfdw'] = self.inputs.exfdw + outputs["exfdw"] = self.inputs.exfdw if isdefined(self.inputs.epi_file): if isdefined(self.inputs.epidw): - outputs['unwarped_file'] = self.inputs.epidw + outputs["unwarped_file"] = self.inputs.epidw else: - outputs['unwarped_file'] = self._gen_filename('epidw') + outputs["unwarped_file"] = self._gen_filename("epidw") if not isdefined(self.inputs.vsm): - outputs['vsm_file'] = self._gen_filename('vsm') + outputs["vsm_file"] = self._gen_filename("vsm") else: - outputs['vsm_file'] = self._gen_fname(self.inputs.vsm) + outputs["vsm_file"] = self._gen_fname(self.inputs.vsm) if not isdefined(self.inputs.tmpdir): - outputs['exf_mask'] = self._gen_fname( - cwd=self._gen_filename('tmpdir'), basename='maskexf') + outputs["exf_mask"] = self._gen_fname( + cwd=self._gen_filename("tmpdir"), basename="maskexf" + ) else: - outputs['exf_mask'] = self._gen_fname( - cwd=self.inputs.tmpdir, basename='maskexf') + outputs["exf_mask"] = self._gen_fname( + cwd=self.inputs.tmpdir, basename="maskexf" + ) return outputs class EddyCorrectInputSpec(FSLCommandInputSpec): in_file = File( - exists=True, - desc='4D input file', - argstr='%s', - position=0, - mandatory=True) + exists=True, desc="4D input file", argstr="%s", position=0, mandatory=True + ) out_file = File( - desc='4D output file', - argstr='%s', + desc="4D output file", + argstr="%s", position=1, - name_source=['in_file'], - name_template='%s_edc', - output_name='eddy_corrected') + name_source=["in_file"], + name_template="%s_edc", + output_name="eddy_corrected", + ) ref_num = traits.Int( 0, - argstr='%d', + argstr="%d", position=2, - desc='reference number', + desc="reference number", mandatory=True, - usedefault=True) + usedefault=True, + ) class EddyCorrectOutputSpec(TraitedSpec): eddy_corrected = File( - exists=True, desc='path/name of 4D eddy corrected output file') + exists=True, desc="path/name of 4D eddy corrected output file" + ) class EddyCorrect(FSLCommand): @@ -1214,13 +1304,16 @@ class EddyCorrect(FSLCommand): 'eddy_correct diffusion.nii diffusion_edc.nii 0' """ - _cmd = 'eddy_correct' + + _cmd = "eddy_correct" input_spec = EddyCorrectInputSpec output_spec = EddyCorrectOutputSpec def __init__(self, **inputs): - warnings.warn(("Deprecated: Please use nipype.interfaces.fsl.epi.Eddy " - "instead"), DeprecationWarning) + warnings.warn( + ("Deprecated: Please use nipype.interfaces.fsl.epi.Eddy " "instead"), + DeprecationWarning, + ) return super(EddyCorrect, self).__init__(**inputs) def _run_interface(self, runtime): @@ -1232,111 +1325,111 @@ def _run_interface(self, runtime): class EddyQuadInputSpec(FSLCommandInputSpec): base_name = traits.Str( - 'eddy_corrected', + "eddy_corrected", usedefault=True, - argstr='%s', - desc=("Basename (including path) for EDDY output files, i.e., " - "corrected images and QC files"), + argstr="%s", + desc=( + "Basename (including path) for EDDY output files, i.e., " + "corrected images and QC files" + ), position=0, ) idx_file = File( exists=True, mandatory=True, argstr="--eddyIdx %s", - desc=("File containing indices for all volumes into acquisition " - "parameters") + desc=("File containing indices for all volumes into acquisition " "parameters"), ) param_file = File( exists=True, mandatory=True, argstr="--eddyParams %s", - desc="File containing acquisition parameters" + desc="File containing acquisition parameters", ) mask_file = File( - exists=True, - mandatory=True, - argstr="--mask %s", - desc="Binary mask file" + exists=True, mandatory=True, argstr="--mask %s", desc="Binary mask file" ) bval_file = File( - exists=True, - mandatory=True, - argstr="--bvals %s", - desc="b-values file" + exists=True, mandatory=True, argstr="--bvals %s", desc="b-values file" ) bvec_file = File( exists=True, argstr="--bvecs %s", - desc=("b-vectors file - only used when .eddy_residuals " - "file is present") + desc=( + "b-vectors file - only used when .eddy_residuals " + "file is present" + ), ) output_dir = traits.Str( - name_template='%s.qc', - name_source=['base_name'], - argstr='--output-dir %s', + name_template="%s.qc", + name_source=["base_name"], + argstr="--output-dir %s", desc="Output directory - default = '.qc'", ) field = File( - exists=True, - argstr='--field %s', - desc="TOPUP estimated field (in Hz)", + exists=True, argstr="--field %s", desc="TOPUP estimated field (in Hz)", ) slice_spec = File( exists=True, - argstr='--slspec %s', + argstr="--slspec %s", desc="Text file specifying slice/group acquisition", ) - verbose = traits.Bool( - argstr='--verbose', - desc="Display debug messages", - ) + verbose = traits.Bool(argstr="--verbose", desc="Display debug messages",) class EddyQuadOutputSpec(TraitedSpec): qc_json = File( exists=True, - desc=("Single subject database containing quality metrics and data " - "info.") - ) - qc_pdf = File( - exists=True, - desc="Single subject QC report." + desc=("Single subject database containing quality metrics and data " "info."), ) + qc_pdf = File(exists=True, desc="Single subject QC report.") avg_b_png = traits.List( File(exists=True), - desc=("Image showing mid-sagittal, -coronal and -axial slices of " - "each averaged b-shell volume.") + desc=( + "Image showing mid-sagittal, -coronal and -axial slices of " + "each averaged b-shell volume." + ), ) avg_b0_pe_png = traits.List( File(exists=True), - desc=("Image showing mid-sagittal, -coronal and -axial slices of " - "each averaged pe-direction b0 volume. Generated when using " - "the -f option.") + desc=( + "Image showing mid-sagittal, -coronal and -axial slices of " + "each averaged pe-direction b0 volume. Generated when using " + "the -f option." + ), ) cnr_png = traits.List( File(exists=True), - desc=("Image showing mid-sagittal, -coronal and -axial slices of " - "each b-shell CNR volume. Generated when CNR maps are " - "available.") + desc=( + "Image showing mid-sagittal, -coronal and -axial slices of " + "each b-shell CNR volume. Generated when CNR maps are " + "available." + ), ) vdm_png = File( exists=True, - desc=("Image showing mid-sagittal, -coronal and -axial slices of " - "the voxel displacement map. Generated when using the -f " - "option.") + desc=( + "Image showing mid-sagittal, -coronal and -axial slices of " + "the voxel displacement map. Generated when using the -f " + "option." + ), ) residuals = File( exists=True, - desc=("Text file containing the volume-wise mask-averaged squared " - "residuals. Generated when residual maps are available.") + desc=( + "Text file containing the volume-wise mask-averaged squared " + "residuals. Generated when residual maps are available." + ), ) clean_volumes = File( exists=True, - desc=("Text file containing a list of clean volumes, based on " - "the eddy squared residuals. To generate a version of the " - "pre-processed dataset without outlier volumes, use: " - "`fslselectvols -i -o " - "eddy_corrected_data_clean --vols=vols_no_outliers.txt`") + desc=( + "Text file containing a list of clean volumes, based on " + "the eddy squared residuals. To generate a version of the " + "pre-processed dataset without outlier volumes, use: " + "`fslselectvols -i -o " + "eddy_corrected_data_clean --vols=vols_no_outliers.txt`" + ), ) @@ -1368,52 +1461,52 @@ class EddyQuad(FSLCommand): >>> res = quad.run() # doctest: +SKIP """ - _cmd = 'eddy_quad' + + _cmd = "eddy_quad" input_spec = EddyQuadInputSpec output_spec = EddyQuadOutputSpec def _list_outputs(self): from glob import glob + outputs = self.output_spec().get() # If the output directory isn't defined, the interface seems to use # the default but not set its value in `self.inputs.output_dir` if not isdefined(self.inputs.output_dir): - out_dir = os.path.abspath(os.path.basename(self.inputs.base_name) + '.qc') + out_dir = os.path.abspath(os.path.basename(self.inputs.base_name) + ".qc") else: out_dir = os.path.abspath(self.inputs.output_dir) - outputs['qc_json'] = os.path.join(out_dir, 'qc.json') - outputs['qc_pdf'] = os.path.join(out_dir, 'qc.pdf') + outputs["qc_json"] = os.path.join(out_dir, "qc.json") + outputs["qc_pdf"] = os.path.join(out_dir, "qc.pdf") # Grab all b* files here. This will also grab the b0_pe* files # as well, but only if the field input was provided. So we'll remove # them later in the next conditional. - outputs['avg_b_png'] = sorted(glob( - os.path.join(out_dir, 'avg_b*.png') - )) + outputs["avg_b_png"] = sorted(glob(os.path.join(out_dir, "avg_b*.png"))) if isdefined(self.inputs.field): - outputs['avg_b0_pe_png'] = sorted(glob( - os.path.join(out_dir, 'avg_b0_pe*.png') - )) + outputs["avg_b0_pe_png"] = sorted( + glob(os.path.join(out_dir, "avg_b0_pe*.png")) + ) # The previous glob for `avg_b_png` also grabbed the # `avg_b0_pe_png` files so we have to remove them # from `avg_b_png`. - for fname in outputs['avg_b0_pe_png']: - outputs['avg_b_png'].remove(fname) + for fname in outputs["avg_b0_pe_png"]: + outputs["avg_b_png"].remove(fname) - outputs['vdm_png'] = os.path.join(out_dir, 'vdm.png') + outputs["vdm_png"] = os.path.join(out_dir, "vdm.png") - outputs['cnr_png'] = sorted(glob(os.path.join(out_dir, 'cnr*.png'))) + outputs["cnr_png"] = sorted(glob(os.path.join(out_dir, "cnr*.png"))) - residuals = os.path.join(out_dir, 'eddy_msr.txt') + residuals = os.path.join(out_dir, "eddy_msr.txt") if os.path.isfile(residuals): - outputs['residuals'] = residuals + outputs["residuals"] = residuals - clean_volumes = os.path.join(out_dir, 'vols_no_outliers.txt') + clean_volumes = os.path.join(out_dir, "vols_no_outliers.txt") if os.path.isfile(clean_volumes): - outputs['clean_volumes'] = clean_volumes + outputs["clean_volumes"] = clean_volumes return outputs diff --git a/nipype/interfaces/fsl/fix.py b/nipype/interfaces/fsl/fix.py index ab30c5da90..769513f8c3 100644 --- a/nipype/interfaces/fsl/fix.py +++ b/nipype/interfaces/fsl/fix.py @@ -55,9 +55,19 @@ """ -from ..base import (TraitedSpec, CommandLineInputSpec, CommandLine, - InputMultiPath, OutputMultiPath, BaseInterface, - BaseInterfaceInputSpec, traits, Directory, File, isdefined) +from ..base import ( + TraitedSpec, + CommandLineInputSpec, + CommandLine, + InputMultiPath, + OutputMultiPath, + BaseInterface, + BaseInterfaceInputSpec, + traits, + Directory, + File, + isdefined, +) import os @@ -65,29 +75,32 @@ class TrainingSetCreatorInputSpec(BaseInterfaceInputSpec): mel_icas_in = InputMultiPath( Directory(exists=True), copyfile=False, - desc='Melodic output directories', - argstr='%s', - position=-1) + desc="Melodic output directories", + argstr="%s", + position=-1, + ) class TrainingSetCreatorOutputSpec(TraitedSpec): mel_icas_out = OutputMultiPath( Directory(exists=True), copyfile=False, - desc='Hand labels for noise vs signal', - argstr='%s', - position=-1) + desc="Hand labels for noise vs signal", + argstr="%s", + position=-1, + ) class TrainingSetCreator(BaseInterface): - '''Goes through set of provided melodic output directories, to find all + """Goes through set of provided melodic output directories, to find all the ones that have a hand_labels_noise.txt file in them. This is outsourced as a separate class, so that the pipeline is rerun everytime a handlabeled file has been changed, or a new one created. - ''' + """ + input_spec = TrainingSetCreatorInputSpec output_spec = TrainingSetCreatorOutputSpec _always_run = True @@ -95,23 +108,24 @@ class TrainingSetCreator(BaseInterface): def _run_interface(self, runtime): mel_icas = [] for item in self.inputs.mel_icas_in: - if os.path.exists(os.path.join(item, 'hand_labels_noise.txt')): + if os.path.exists(os.path.join(item, "hand_labels_noise.txt")): mel_icas.append(item) if len(mel_icas) == 0: raise Exception( - '%s did not find any hand_labels_noise.txt files in the following directories: %s' - % (self.__class__.__name__, mel_icas)) + "%s did not find any hand_labels_noise.txt files in the following directories: %s" + % (self.__class__.__name__, mel_icas) + ) return runtime def _list_outputs(self): mel_icas = [] for item in self.inputs.mel_icas_in: - if os.path.exists(os.path.join(item, 'hand_labels_noise.txt')): + if os.path.exists(os.path.join(item, "hand_labels_noise.txt")): mel_icas.append(item) outputs = self._outputs().get() - outputs['mel_icas_out'] = mel_icas + outputs["mel_icas_out"] = mel_icas return outputs @@ -119,31 +133,34 @@ class FeatureExtractorInputSpec(CommandLineInputSpec): mel_ica = Directory( exists=True, copyfile=False, - desc='Melodic output directory or directories', - argstr='%s', - position=-1) + desc="Melodic output directory or directories", + argstr="%s", + position=-1, + ) class FeatureExtractorOutputSpec(TraitedSpec): mel_ica = Directory( exists=True, copyfile=False, - desc='Melodic output directory or directories', - argstr='%s', - position=-1) + desc="Melodic output directory or directories", + argstr="%s", + position=-1, + ) class FeatureExtractor(CommandLine): - ''' + """ Extract features (for later training and/or classifying) - ''' + """ + input_spec = FeatureExtractorInputSpec output_spec = FeatureExtractorOutputSpec - cmd = 'fix -f' + cmd = "fix -f" def _list_outputs(self): outputs = self.output_spec().get() - outputs['mel_ica'] = self.inputs.mel_ica + outputs["mel_ica"] = self.inputs.mel_ica return outputs @@ -151,42 +168,43 @@ class TrainingInputSpec(CommandLineInputSpec): mel_icas = InputMultiPath( Directory(exists=True), copyfile=False, - desc='Melodic output directories', - argstr='%s', - position=-1) + desc="Melodic output directories", + argstr="%s", + position=-1, + ) trained_wts_filestem = traits.Str( - desc= - 'trained-weights filestem, used for trained_wts_file and output directories', - argstr='%s', - position=1) + desc="trained-weights filestem, used for trained_wts_file and output directories", + argstr="%s", + position=1, + ) loo = traits.Bool( - argstr='-l', - desc='full leave-one-out test with classifier training', - position=2) + argstr="-l", desc="full leave-one-out test with classifier training", position=2 + ) class TrainingOutputSpec(TraitedSpec): - trained_wts_file = File(exists=True, desc='Trained-weights file') + trained_wts_file = File(exists=True, desc="Trained-weights file") class Training(CommandLine): - ''' + """ Train the classifier based on your own FEAT/MELODIC output directory. - ''' + """ + input_spec = TrainingInputSpec output_spec = TrainingOutputSpec - cmd = 'fix -t' + cmd = "fix -t" def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.trained_wts_filestem): - outputs['trained_wts_file'] = os.path.abspath( - self.inputs.trained_wts_filestem + '.RData') + outputs["trained_wts_file"] = os.path.abspath( + self.inputs.trained_wts_filestem + ".RData" + ) else: - outputs['trained_wts_file'] = os.path.abspath( - 'trained_wts_file.RData') + outputs["trained_wts_file"] = os.path.abspath("trained_wts_file.RData") return outputs @@ -194,47 +212,50 @@ class AccuracyTesterInputSpec(CommandLineInputSpec): mel_icas = InputMultiPath( Directory(exists=True), copyfile=False, - desc='Melodic output directories', - argstr='%s', + desc="Melodic output directories", + argstr="%s", position=3, - mandatory=True) + mandatory=True, + ) trained_wts_file = File( - desc='trained-weights file', argstr='%s', position=1, mandatory=True) + desc="trained-weights file", argstr="%s", position=1, mandatory=True + ) output_directory = Directory( - desc= - 'Path to folder in which to store the results of the accuracy test.', - argstr='%s', + desc="Path to folder in which to store the results of the accuracy test.", + argstr="%s", position=2, - mandatory=True) + mandatory=True, + ) class AccuracyTesterOutputSpec(TraitedSpec): output_directory = Directory( - desc= - 'Path to folder in which to store the results of the accuracy test.', - argstr='%s', - position=1) + desc="Path to folder in which to store the results of the accuracy test.", + argstr="%s", + position=1, + ) class AccuracyTester(CommandLine): - ''' + """ Test the accuracy of an existing training dataset on a set of hand-labelled subjects. Note: This may or may not be working. Couldn't presently not confirm because fix fails on this (even outside of nipype) without leaving an error msg. - ''' + """ + input_spec = AccuracyTesterInputSpec output_spec = AccuracyTesterOutputSpec - cmd = 'fix -C' + cmd = "fix -C" def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.output_directory): - outputs['output_directory'] = Directory( - exists=False, value=self.inputs.output_directory) + outputs["output_directory"] = Directory( + exists=False, value=self.inputs.output_directory + ) else: - outputs['output_directory'] = Directory( - exists=False, value='accuracy_test') + outputs["output_directory"] = Directory(exists=False, value="accuracy_test") return outputs @@ -242,58 +263,58 @@ class ClassifierInputSpec(CommandLineInputSpec): mel_ica = Directory( exists=True, copyfile=False, - desc='Melodic output directory or directories', - argstr='%s', - position=1) + desc="Melodic output directory or directories", + argstr="%s", + position=1, + ) trained_wts_file = File( exists=True, - desc='trained-weights file', - argstr='%s', + desc="trained-weights file", + argstr="%s", position=2, mandatory=True, - copyfile=False) + copyfile=False, + ) thresh = traits.Int( - argstr='%d', - desc='Threshold for cleanup.', - position=-1, - mandatory=True) + argstr="%d", desc="Threshold for cleanup.", position=-1, mandatory=True + ) artifacts_list_file = File( - desc= - 'Text file listing which ICs are artifacts; can be the output from classification or can be created manually' + desc="Text file listing which ICs are artifacts; can be the output from classification or can be created manually" ) class ClassifierOutputSpec(TraitedSpec): artifacts_list_file = File( - desc= - 'Text file listing which ICs are artifacts; can be the output from classification or can be created manually' + desc="Text file listing which ICs are artifacts; can be the output from classification or can be created manually" ) class Classifier(CommandLine): - ''' + """ Classify ICA components using a specific training dataset ( is in the range 0-100, typically 5-20). - ''' + """ + input_spec = ClassifierInputSpec output_spec = ClassifierOutputSpec - cmd = 'fix -c' + cmd = "fix -c" def _gen_artifacts_list_file(self, mel_ica, thresh): _, trained_wts_file = os.path.split(self.inputs.trained_wts_file) - trained_wts_filestem = trained_wts_file.split('.')[0] - filestem = 'fix4melview_' + trained_wts_filestem + '_thr' + trained_wts_filestem = trained_wts_file.split(".")[0] + filestem = "fix4melview_" + trained_wts_filestem + "_thr" - fname = os.path.join(mel_ica, filestem + str(thresh) + '.txt') + fname = os.path.join(mel_ica, filestem + str(thresh) + ".txt") return fname def _list_outputs(self): outputs = self.output_spec().get() - outputs['artifacts_list_file'] = self._gen_artifacts_list_file( - self.inputs.mel_ica, self.inputs.thresh) + outputs["artifacts_list_file"] = self._gen_artifacts_list_file( + self.inputs.mel_ica, self.inputs.thresh + ) return outputs @@ -301,68 +322,73 @@ def _list_outputs(self): class CleanerInputSpec(CommandLineInputSpec): artifacts_list_file = File( exists=True, - argstr='%s', + argstr="%s", position=1, mandatory=True, - desc= - 'Text file listing which ICs are artifacts; can be the output from classification or can be created manually' + desc="Text file listing which ICs are artifacts; can be the output from classification or can be created manually", ) cleanup_motion = traits.Bool( - argstr='-m', - desc= - 'cleanup motion confounds, looks for design.fsf for highpass filter cut-off', - position=2) + argstr="-m", + desc="cleanup motion confounds, looks for design.fsf for highpass filter cut-off", + position=2, + ) highpass = traits.Float( 100, - argstr='-m -h %f', + argstr="-m -h %f", usedefault=True, - desc='cleanup motion confounds', - position=2) + desc="cleanup motion confounds", + position=2, + ) aggressive = traits.Bool( - argstr='-A', - desc= - 'Apply aggressive (full variance) cleanup, instead of the default less-aggressive (unique variance) cleanup.', - position=3) + argstr="-A", + desc="Apply aggressive (full variance) cleanup, instead of the default less-aggressive (unique variance) cleanup.", + position=3, + ) confound_file = File( - argstr='-x %s', desc='Include additional confound file.', position=4) + argstr="-x %s", desc="Include additional confound file.", position=4 + ) confound_file_1 = File( - argstr='-x %s', desc='Include additional confound file.', position=5) + argstr="-x %s", desc="Include additional confound file.", position=5 + ) confound_file_2 = File( - argstr='-x %s', desc='Include additional confound file.', position=6) + argstr="-x %s", desc="Include additional confound file.", position=6 + ) class CleanerOutputSpec(TraitedSpec): - cleaned_functional_file = File(exists=True, desc='Cleaned session data') + cleaned_functional_file = File(exists=True, desc="Cleaned session data") class Cleaner(CommandLine): - ''' + """ Extract features (for later training and/or classifying) - ''' + """ + input_spec = CleanerInputSpec output_spec = CleanerOutputSpec - cmd = 'fix -a' + cmd = "fix -a" def _get_cleaned_functional_filename(self, artifacts_list_filename): - ''' extract the proper filename from the first line of the artifacts file ''' - artifacts_list_file = open(artifacts_list_filename, 'r') - functional_filename, extension = artifacts_list_file.readline().split( - '.') + """ extract the proper filename from the first line of the artifacts file """ + artifacts_list_file = open(artifacts_list_filename, "r") + functional_filename, extension = artifacts_list_file.readline().split(".") artifacts_list_file_path, artifacts_list_filename = os.path.split( - artifacts_list_filename) + artifacts_list_filename + ) - return (os.path.join(artifacts_list_file_path, - functional_filename + '_clean.nii.gz')) + return os.path.join( + artifacts_list_file_path, functional_filename + "_clean.nii.gz" + ) def _list_outputs(self): outputs = self.output_spec().get() - outputs[ - 'cleaned_functional_file'] = self._get_cleaned_functional_filename( - self.inputs.artifacts_list_file) + outputs["cleaned_functional_file"] = self._get_cleaned_functional_filename( + self.inputs.artifacts_list_file + ) return outputs diff --git a/nipype/interfaces/fsl/maths.py b/nipype/interfaces/fsl/maths.py index afa9328205..1b64511f9e 100644 --- a/nipype/interfaces/fsl/maths.py +++ b/nipype/interfaces/fsl/maths.py @@ -8,42 +8,35 @@ import os import numpy as np -from ..base import (TraitedSpec, File, traits, InputMultiPath, isdefined) +from ..base import TraitedSpec, File, traits, InputMultiPath, isdefined from .base import FSLCommand, FSLCommandInputSpec class MathsInput(FSLCommandInputSpec): in_file = File( - position=2, - argstr="%s", - exists=True, - mandatory=True, - desc="image to operate on") + position=2, argstr="%s", exists=True, mandatory=True, desc="image to operate on" + ) out_file = File( - genfile=True, - position=-2, - argstr="%s", - desc="image to write", - hash_files=False) + genfile=True, position=-2, argstr="%s", desc="image to write", hash_files=False + ) _dtypes = ["float", "char", "int", "short", "double", "input"] internal_datatype = traits.Enum( *_dtypes, position=1, argstr="-dt %s", - desc=("datatype to use for calculations " - "(default is float)")) + desc=("datatype to use for calculations " "(default is float)") + ) output_datatype = traits.Enum( *_dtypes, position=-1, argstr="-odt %s", - desc=("datatype to use for output (default " - "uses input type)")) + desc=("datatype to use for output (default " "uses input type)") + ) nan2zeros = traits.Bool( - position=3, - argstr='-nan', - desc='change NaNs to zeros before doing anything') + position=3, argstr="-nan", desc="change NaNs to zeros before doing anything" + ) class MathsOutput(TraitedSpec): @@ -63,7 +56,8 @@ def _list_outputs(self): outputs["out_file"] = self.inputs.out_file if not isdefined(self.inputs.out_file): outputs["out_file"] = self._gen_fname( - self.inputs.in_file, suffix=self._suffix) + self.inputs.in_file, suffix=self._suffix + ) outputs["out_file"] = os.path.abspath(outputs["out_file"]) return outputs @@ -77,17 +71,15 @@ class ChangeDataTypeInput(MathsInput): _dtypes = ["float", "char", "int", "short", "double", "input"] output_datatype = traits.Enum( - *_dtypes, - position=-1, - argstr="-odt %s", - mandatory=True, - desc="output data type") + *_dtypes, position=-1, argstr="-odt %s", mandatory=True, desc="output data type" + ) class ChangeDataType(MathsCommand): """Use fslmaths to change the datatype of an image. """ + input_spec = ChangeDataTypeInput _suffix = "_chdt" @@ -95,23 +87,28 @@ class ChangeDataType(MathsCommand): class ThresholdInputSpec(MathsInput): thresh = traits.Float( - mandatory=True, position=4, argstr="%s", desc="threshold value") + mandatory=True, position=4, argstr="%s", desc="threshold value" + ) direction = traits.Enum( "below", "above", usedefault=True, - desc="zero-out either below or above thresh value") + desc="zero-out either below or above thresh value", + ) use_robust_range = traits.Bool( - desc="interpret thresh as percentage (0-100) of robust range") + desc="interpret thresh as percentage (0-100) of robust range" + ) use_nonzero_voxels = traits.Bool( desc="use nonzero voxels to calculate robust range", - requires=["use_robust_range"]) + requires=["use_robust_range"], + ) class Threshold(MathsCommand): """Use fslmaths to apply a threshold to an image in a variety of ways. """ + input_spec = ThresholdInputSpec _suffix = "_thresh" @@ -123,8 +120,7 @@ def _format_arg(self, name, spec, value): arg += "u" arg += "thr" if isdefined(_si.use_robust_range) and _si.use_robust_range: - if (isdefined(_si.use_nonzero_voxels) - and _si.use_nonzero_voxels): + if isdefined(_si.use_nonzero_voxels) and _si.use_nonzero_voxels: arg += "P" else: arg += "p" @@ -143,13 +139,15 @@ class StdImageInput(MathsInput): usedefault=True, argstr="-%sstd", position=4, - desc="dimension to standard deviate across") + desc="dimension to standard deviate across", + ) class StdImage(MathsCommand): """Use fslmaths to generate a standard deviation in an image across a given dimension. """ + input_spec = StdImageInput _suffix = "_std" @@ -164,13 +162,15 @@ class MeanImageInput(MathsInput): usedefault=True, argstr="-%smean", position=4, - desc="dimension to mean across") + desc="dimension to mean across", + ) class MeanImage(MathsCommand): """Use fslmaths to generate a mean image across a given dimension. """ + input_spec = MeanImageInput _suffix = "_mean" @@ -185,7 +185,8 @@ class MaxImageInput(MathsInput): usedefault=True, argstr="-%smax", position=4, - desc="dimension to max across") + desc="dimension to max across", + ) class MaxImage(MathsCommand): @@ -201,6 +202,7 @@ class MaxImage(MathsCommand): 'fslmaths functional.nii -Tmax functional_max.nii' """ + input_spec = MaxImageInput _suffix = "_max" @@ -215,14 +217,15 @@ class PercentileImageInput(MathsInput): usedefault=True, argstr="-%sperc", position=4, - desc="dimension to percentile across") + desc="dimension to percentile across", + ) perc = traits.Range( low=0, high=100, argstr="%f", position=5, - desc=("nth percentile (0-100) of FULL RANGE " - "across dimension")) + desc=("nth percentile (0-100) of FULL RANGE " "across dimension"), + ) class PercentileImage(MathsCommand): @@ -239,6 +242,7 @@ class PercentileImage(MathsCommand): 'fslmaths functional.nii -Tperc 90 functional_perc.nii' """ + input_spec = PercentileImageInput _suffix = "_perc" @@ -253,7 +257,8 @@ class MaxnImageInput(MathsInput): usedefault=True, argstr="-%smaxn", position=4, - desc="dimension to index max across") + desc="dimension to index max across", + ) class MaxnImage(MathsCommand): @@ -261,6 +266,7 @@ class MaxnImage(MathsCommand): a given dimension. """ + input_spec = MaxnImageInput _suffix = "_maxn" @@ -275,13 +281,15 @@ class MinImageInput(MathsInput): usedefault=True, argstr="-%smin", position=4, - desc="dimension to min across") + desc="dimension to min across", + ) class MinImage(MathsCommand): """Use fslmaths to generate a minimum image across a given dimension. """ + input_spec = MinImageInput _suffix = "_min" @@ -296,13 +304,15 @@ class MedianImageInput(MathsInput): usedefault=True, argstr="-%smedian", position=4, - desc="dimension to median across") + desc="dimension to median across", + ) class MedianImage(MathsCommand): """Use fslmaths to generate a median image across a given dimension. """ + input_spec = MedianImageInput _suffix = "_median" @@ -317,8 +327,8 @@ class AR1ImageInput(MathsInput): usedefault=True, argstr="-%sar1", position=4, - desc=("dimension to find AR(1) coefficient" - "across")) + desc=("dimension to find AR(1) coefficient" "across"), + ) class AR1Image(MathsCommand): @@ -326,6 +336,7 @@ class AR1Image(MathsCommand): given dimension. (Should use -odt float and probably demean first) """ + input_spec = AR1ImageInput _suffix = "_ar1" @@ -337,19 +348,22 @@ class IsotropicSmoothInput(MathsInput): xor=["sigma"], position=4, argstr="-s %.5f", - desc="fwhm of smoothing kernel [mm]") + desc="fwhm of smoothing kernel [mm]", + ) sigma = traits.Float( mandatory=True, xor=["fwhm"], position=4, argstr="-s %.5f", - desc="sigma of smoothing kernel [mm]") + desc="sigma of smoothing kernel [mm]", + ) class IsotropicSmooth(MathsCommand): """Use fslmaths to spatially smooth an image with a gaussian kernel. """ + input_spec = IsotropicSmoothInput _suffix = "_smooth" @@ -367,13 +381,15 @@ class ApplyMaskInput(MathsInput): mandatory=True, argstr="-mas %s", position=4, - desc="binary image defining mask space") + desc="binary image defining mask space", + ) class ApplyMask(MathsCommand): """Use fslmaths to apply a binary mask to another image. """ + input_spec = ApplyMaskInput _suffix = "_masked" @@ -390,19 +406,23 @@ class KernelInput(MathsInput): "file", argstr="-kernel %s", position=4, - desc="kernel shape to use") + desc="kernel shape to use", + ) kernel_size = traits.Float( argstr="%.4f", position=5, xor=["kernel_file"], - desc=("kernel size - voxels for box/boxv, mm " - "for sphere, mm sigma for gauss")) + desc=( + "kernel size - voxels for box/boxv, mm " "for sphere, mm sigma for gauss" + ), + ) kernel_file = File( exists=True, argstr="%s", position=5, xor=["kernel_size"], - desc="use external file for kernel") + desc="use external file for kernel", + ) class DilateInput(KernelInput): @@ -414,13 +434,15 @@ class DilateInput(KernelInput): argstr="-dil%s", position=6, mandatory=True, - desc="filtering operation to perfoem in dilation") + desc="filtering operation to perfoem in dilation", + ) class DilateImage(MathsCommand): """Use fslmaths to perform a spatial dilation of an image. """ + input_spec = DilateInput _suffix = "_dil" @@ -437,14 +459,15 @@ class ErodeInput(KernelInput): position=6, usedefault=True, default_value=False, - desc=("if true, minimum filter rather than " - "erosion by zeroing-out")) + desc=("if true, minimum filter rather than " "erosion by zeroing-out"), + ) class ErodeImage(MathsCommand): """Use fslmaths to perform a spatial erosion of an image. """ + input_spec = ErodeInput _suffix = "_ero" @@ -465,13 +488,15 @@ class SpatialFilterInput(KernelInput): argstr="-f%s", position=6, mandatory=True, - desc="operation to filter with") + desc="operation to filter with", + ) class SpatialFilter(MathsCommand): """Use fslmaths to spatially filter an image. """ + input_spec = SpatialFilterInput _suffix = "_filt" @@ -505,13 +530,15 @@ class UnaryMathsInput(MathsInput): argstr="-%s", position=4, mandatory=True, - desc="operation to perform") + desc="operation to perform", + ) class UnaryMaths(MathsCommand): """Use fslmaths to perorm a variety of mathematical operations on an image. """ + input_spec = UnaryMathsInput def _list_outputs(self): @@ -532,20 +559,23 @@ class BinaryMathsInput(MathsInput): mandatory=True, argstr="-%s", position=4, - desc="operation to perform") + desc="operation to perform", + ) operand_file = File( exists=True, argstr="%s", mandatory=True, position=5, xor=["operand_value"], - desc="second image to perform operation with") + desc="second image to perform operation with", + ) operand_value = traits.Float( argstr="%.8f", mandatory=True, position=5, xor=["operand_file"], - desc="value to perform operation with") + desc="value to perform operation with", + ) class BinaryMaths(MathsCommand): @@ -553,6 +583,7 @@ class BinaryMaths(MathsCommand): a numeric value. """ + input_spec = BinaryMathsInput @@ -562,13 +593,13 @@ class MultiImageMathsInput(MathsInput): position=4, argstr="%s", mandatory=True, - desc=("python formatted string of operations " - "to perform")) + desc=("python formatted string of operations " "to perform"), + ) operand_files = InputMultiPath( File(exists=True), mandatory=True, - desc=("list of file names to plug into op " - "string")) + desc=("list of file names to plug into op " "string"), + ) class MultiImageMaths(MathsCommand): @@ -586,6 +617,7 @@ class MultiImageMaths(MathsCommand): 'fslmaths functional.nii -add functional2.nii -mul -1 -div functional3.nii functional4.nii' """ + input_spec = MultiImageMathsInput def _format_arg(self, name, spec, value): @@ -601,13 +633,15 @@ class TemporalFilterInput(MathsInput): argstr="%.6f", position=5, usedefault=True, - desc="lowpass filter sigma (in volumes)") + desc="lowpass filter sigma (in volumes)", + ) highpass_sigma = traits.Float( -1, argstr="-bptf %.6f", position=4, usedefault=True, - desc="highpass filter sigma (in volumes)") + desc="highpass filter sigma (in volumes)", + ) class TemporalFilter(MathsCommand): @@ -615,5 +649,6 @@ class TemporalFilter(MathsCommand): timeseries. """ + input_spec = TemporalFilterInput _suffix = "_filt" diff --git a/nipype/interfaces/fsl/model.py b/nipype/interfaces/fsl/model.py index 5e176ff414..b4e04c690e 100644 --- a/nipype/interfaces/fsl/model.py +++ b/nipype/interfaces/fsl/model.py @@ -17,76 +17,113 @@ from ...utils.filemanip import simplify_list, ensure_list from ...utils.misc import human_order_sorted from ...external.due import BibTeX -from ..base import (File, traits, isdefined, TraitedSpec, BaseInterface, - Directory, InputMultiPath, OutputMultiPath, - BaseInterfaceInputSpec) +from ..base import ( + File, + traits, + isdefined, + TraitedSpec, + BaseInterface, + Directory, + InputMultiPath, + OutputMultiPath, + BaseInterfaceInputSpec, +) from .base import FSLCommand, FSLCommandInputSpec, Info class Level1DesignInputSpec(BaseInterfaceInputSpec): interscan_interval = traits.Float( - mandatory=True, desc='Interscan interval (in secs)') + mandatory=True, desc="Interscan interval (in secs)" + ) session_info = traits.Any( mandatory=True, - desc=('Session specific information generated ' - 'by ``modelgen.SpecifyModel``')) + desc=("Session specific information generated " "by ``modelgen.SpecifyModel``"), + ) bases = traits.Either( traits.Dict( - traits.Enum('dgamma'), - traits.Dict(traits.Enum('derivs'), traits.Bool)), + traits.Enum("dgamma"), traits.Dict(traits.Enum("derivs"), traits.Bool) + ), traits.Dict( - traits.Enum('gamma'), - traits.Dict(traits.Enum('derivs', 'gammasigma', 'gammadelay'))), + traits.Enum("gamma"), + traits.Dict(traits.Enum("derivs", "gammasigma", "gammadelay")), + ), traits.Dict( - traits.Enum('custom'), - traits.Dict(traits.Enum('bfcustompath'), traits.Str)), - traits.Dict(traits.Enum('none'), traits.Dict()), - traits.Dict(traits.Enum('none'), traits.Enum(None)), + traits.Enum("custom"), traits.Dict(traits.Enum("bfcustompath"), traits.Str) + ), + traits.Dict(traits.Enum("none"), traits.Dict()), + traits.Dict(traits.Enum("none"), traits.Enum(None)), mandatory=True, - desc=("name of basis function and options e.g., " - "{'dgamma': {'derivs': True}}"), + desc=( + "name of basis function and options e.g., " "{'dgamma': {'derivs': True}}" + ), ) orthogonalization = traits.Dict( traits.Int, traits.Dict(traits.Int, traits.Either(traits.Bool, traits.Int)), - desc=("which regressors to make orthogonal e.g., " - "{1: {0:0,1:0,2:0}, 2: {0:1,1:1,2:0}} to make the second " - "regressor in a 2-regressor model orthogonal to the first."), - usedefault=True) + desc=( + "which regressors to make orthogonal e.g., " + "{1: {0:0,1:0,2:0}, 2: {0:1,1:1,2:0}} to make the second " + "regressor in a 2-regressor model orthogonal to the first." + ), + usedefault=True, + ) model_serial_correlations = traits.Bool( desc="Option to model serial correlations using an \ autoregressive estimator (order 1). Setting this option is only \ useful in the context of the fsf file. If you set this to False, you need to \ repeat this option for FILMGLS by setting autocorr_noestimate to True", - mandatory=True) + mandatory=True, + ) contrasts = traits.List( traits.Either( - traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), - traits.List(traits.Float)), - traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), - traits.List(traits.Float), traits.List(traits.Float)), - traits.Tuple(traits.Str, traits.Enum('F'), - traits.List( - traits.Either( - traits.Tuple(traits.Str, traits.Enum('T'), - traits.List(traits.Str), - traits.List(traits.Float)), - traits.Tuple(traits.Str, traits.Enum('T'), - traits.List(traits.Str), - traits.List(traits.Float), - traits.List(traits.Float)))))), + traits.Tuple( + traits.Str, + traits.Enum("T"), + traits.List(traits.Str), + traits.List(traits.Float), + ), + traits.Tuple( + traits.Str, + traits.Enum("T"), + traits.List(traits.Str), + traits.List(traits.Float), + traits.List(traits.Float), + ), + traits.Tuple( + traits.Str, + traits.Enum("F"), + traits.List( + traits.Either( + traits.Tuple( + traits.Str, + traits.Enum("T"), + traits.List(traits.Str), + traits.List(traits.Float), + ), + traits.Tuple( + traits.Str, + traits.Enum("T"), + traits.List(traits.Str), + traits.List(traits.Float), + traits.List(traits.Float), + ), + ) + ), + ), + ), desc="List of contrasts with each contrast being a list of the form - \ [('name', 'stat', [condition list], [weight list], [session list])]. if \ session list is None or not provided, all sessions are used. For F \ contrasts, the condition list should contain previously defined \ -T-contrasts.") +T-contrasts.", + ) class Level1DesignOutputSpec(TraitedSpec): - fsf_files = OutputMultiPath( - File(exists=True), desc='FSL feat specification files') + fsf_files = OutputMultiPath(File(exists=True), desc="FSL feat specification files") ev_files = OutputMultiPath( - traits.List(File(exists=True)), desc='condition information files') + traits.List(File(exists=True)), desc="condition information files" + ) class Level1Design(BaseInterface): @@ -107,17 +144,25 @@ class Level1Design(BaseInterface): output_spec = Level1DesignOutputSpec def _create_ev_file(self, evfname, evinfo): - f = open(evfname, 'wt') + f = open(evfname, "wt") for i in evinfo: if len(i) == 3: - f.write('%f %f %f\n' % (i[0], i[1], i[2])) + f.write("%f %f %f\n" % (i[0], i[1], i[2])) else: - f.write('%f\n' % i[0]) + f.write("%f\n" % i[0]) f.close() - def _create_ev_files(self, cwd, runinfo, runidx, ev_parameters, - orthogonalization, contrasts, do_tempfilter, - basis_key): + def _create_ev_files( + self, + cwd, + runinfo, + runidx, + ev_parameters, + orthogonalization, + contrasts, + do_tempfilter, + basis_key, + ): """Creates EV files from condition and regressor information. Parameters: @@ -142,72 +187,74 @@ def _create_ev_files(self, cwd, runinfo, runidx, ev_parameters, basis_key = "hrf" elif basis_key == "gamma": try: - _ = ev_parameters['gammasigma'] + _ = ev_parameters["gammasigma"] except KeyError: - ev_parameters['gammasigma'] = 3 + ev_parameters["gammasigma"] = 3 try: - _ = ev_parameters['gammadelay'] + _ = ev_parameters["gammadelay"] except KeyError: - ev_parameters['gammadelay'] = 6 - ev_template = load_template('feat_ev_' + basis_key + '.tcl') - ev_none = load_template('feat_ev_none.tcl') - ev_ortho = load_template('feat_ev_ortho.tcl') - ev_txt = '' + ev_parameters["gammadelay"] = 6 + ev_template = load_template("feat_ev_" + basis_key + ".tcl") + ev_none = load_template("feat_ev_none.tcl") + ev_ortho = load_template("feat_ev_ortho.tcl") + ev_txt = "" # generate sections for conditions and other nuisance # regressors num_evs = [0, 0] - for field in ['cond', 'regress']: + for field in ["cond", "regress"]: for i, cond in enumerate(runinfo[field]): - name = cond['name'] + name = cond["name"] evname.append(name) - evfname = os.path.join(cwd, 'ev_%s_%d_%d.txt' % (name, runidx, - len(evname))) + evfname = os.path.join( + cwd, "ev_%s_%d_%d.txt" % (name, runidx, len(evname)) + ) evinfo = [] num_evs[0] += 1 num_evs[1] += 1 - if field == 'cond': - for j, onset in enumerate(cond['onset']): + if field == "cond": + for j, onset in enumerate(cond["onset"]): try: - amplitudes = cond['amplitudes'] + amplitudes = cond["amplitudes"] if len(amplitudes) > 1: amp = amplitudes[j] else: amp = amplitudes[0] except KeyError: amp = 1 - if len(cond['duration']) > 1: - evinfo.insert(j, [onset, cond['duration'][j], amp]) + if len(cond["duration"]) > 1: + evinfo.insert(j, [onset, cond["duration"][j], amp]) else: - evinfo.insert(j, [onset, cond['duration'][0], amp]) - ev_parameters['cond_file'] = evfname - ev_parameters['ev_num'] = num_evs[0] - ev_parameters['ev_name'] = name - ev_parameters['tempfilt_yn'] = do_tempfilter - if 'basisorth' not in ev_parameters: - ev_parameters['basisorth'] = 1 - if 'basisfnum' not in ev_parameters: - ev_parameters['basisfnum'] = 1 + evinfo.insert(j, [onset, cond["duration"][0], amp]) + ev_parameters["cond_file"] = evfname + ev_parameters["ev_num"] = num_evs[0] + ev_parameters["ev_name"] = name + ev_parameters["tempfilt_yn"] = do_tempfilter + if "basisorth" not in ev_parameters: + ev_parameters["basisorth"] = 1 + if "basisfnum" not in ev_parameters: + ev_parameters["basisfnum"] = 1 try: - ev_parameters['fsldir'] = os.environ['FSLDIR'] + ev_parameters["fsldir"] = os.environ["FSLDIR"] except KeyError: - if basis_key == 'flobs': - raise Exception( - 'FSL environment variables not set') + if basis_key == "flobs": + raise Exception("FSL environment variables not set") else: - ev_parameters['fsldir'] = '/usr/share/fsl' - ev_parameters['temporalderiv'] = int( - bool(ev_parameters.get('derivs', False))) - if ev_parameters['temporalderiv']: - evname.append(name + 'TD') + ev_parameters["fsldir"] = "/usr/share/fsl" + ev_parameters["temporalderiv"] = int( + bool(ev_parameters.get("derivs", False)) + ) + if ev_parameters["temporalderiv"]: + evname.append(name + "TD") num_evs[1] += 1 ev_txt += ev_template.substitute(ev_parameters) - elif field == 'regress': - evinfo = [[j] for j in cond['val']] + elif field == "regress": + evinfo = [[j] for j in cond["val"]] ev_txt += ev_none.substitute( ev_num=num_evs[0], ev_name=name, tempfilt_yn=do_tempfilter, - cond_file=evfname) + cond_file=evfname, + ) ev_txt += "\n" conds[name] = evfname self._create_ev_file(evfname, evinfo) @@ -221,20 +268,17 @@ def _create_ev_files(self, cwd, runinfo, runidx, ev_parameters, orthogonal = 0 if orthogonal == 1 and initial not in ev_txt: ev_txt += initial + "\n" - ev_txt += ev_ortho.substitute(c0=i, c1=j, - orthogonal=orthogonal) + ev_txt += ev_ortho.substitute(c0=i, c1=j, orthogonal=orthogonal) ev_txt += "\n" # add contrast info to fsf file if isdefined(contrasts): - contrast_header = load_template('feat_contrast_header.tcl') - contrast_prolog = load_template('feat_contrast_prolog.tcl') - contrast_element = load_template('feat_contrast_element.tcl') - contrast_ftest_element = load_template( - 'feat_contrast_ftest_element.tcl') - contrastmask_header = load_template('feat_contrastmask_header.tcl') - contrastmask_footer = load_template('feat_contrastmask_footer.tcl') - contrastmask_element = load_template( - 'feat_contrastmask_element.tcl') + contrast_header = load_template("feat_contrast_header.tcl") + contrast_prolog = load_template("feat_contrast_prolog.tcl") + contrast_element = load_template("feat_contrast_element.tcl") + contrast_ftest_element = load_template("feat_contrast_ftest_element.tcl") + contrastmask_header = load_template("feat_contrastmask_header.tcl") + contrastmask_footer = load_template("feat_contrastmask_footer.tcl") + contrastmask_element = load_template("feat_contrastmask_element.tcl") # add t/f contrast info ev_txt += contrast_header.substitute() con_names = [] @@ -244,7 +288,7 @@ def _create_ev_files(self, cwd, runinfo, runidx, ev_parameters, ftest_idx = [] ttest_idx = [] for j, con in enumerate(contrasts): - if con[1] == 'F': + if con[1] == "F": ftest_idx.append(j) for c in con[2]: if c[0] not in list(con_map.keys()): @@ -253,16 +297,17 @@ def _create_ev_files(self, cwd, runinfo, runidx, ev_parameters, else: ttest_idx.append(j) - for ctype in ['real', 'orig']: + for ctype in ["real", "orig"]: for j, con in enumerate(contrasts): - if con[1] == 'F': + if con[1] == "F": continue tidx = ttest_idx.index(j) + 1 ev_txt += contrast_prolog.substitute( - cnum=tidx, ctype=ctype, cname=con[0]) + cnum=tidx, ctype=ctype, cname=con[0] + ) count = 0 for c in range(1, len(evname) + 1): - if evname[c - 1].endswith('TD') and ctype == 'orig': + if evname[c - 1].endswith("TD") and ctype == "orig": continue count = count + 1 if evname[c - 1] in con[2]: @@ -270,19 +315,20 @@ def _create_ev_files(self, cwd, runinfo, runidx, ev_parameters, else: val = 0.0 ev_txt += contrast_element.substitute( - cnum=tidx, element=count, ctype=ctype, val=val) + cnum=tidx, element=count, ctype=ctype, val=val + ) ev_txt += "\n" for fconidx in ftest_idx: fval = 0 - if (con[0] in con_map.keys() - and fconidx in con_map[con[0]]): + if con[0] in con_map.keys() and fconidx in con_map[con[0]]: fval = 1 ev_txt += contrast_ftest_element.substitute( cnum=ftest_idx.index(fconidx) + 1, element=tidx, ctype=ctype, - val=fval) + val=fval, + ) ev_txt += "\n" # add contrast mask info @@ -290,8 +336,7 @@ def _create_ev_files(self, cwd, runinfo, runidx, ev_parameters, for j, _ in enumerate(contrasts): for k, _ in enumerate(contrasts): if j != k: - ev_txt += contrastmask_element.substitute( - c1=j + 1, c2=k + 1) + ev_txt += contrastmask_element.substitute(c1=j + 1, c2=k + 1) ev_txt += contrastmask_footer.substitute() return num_evs, ev_txt @@ -305,13 +350,13 @@ def _get_func_files(self, session_info): """ func_files = [] for i, info in enumerate(session_info): - func_files.insert(i, info['scans']) + func_files.insert(i, info["scans"]) return func_files def _run_interface(self, runtime): cwd = os.getcwd() - fsf_header = load_template('feat_header_l1.tcl') - fsf_postscript = load_template('feat_nongui.tcl') + fsf_header = load_template("feat_header_l1.tcl") + fsf_postscript = load_template("feat_nongui.tcl") prewhiten = 0 if isdefined(self.inputs.model_serial_correlations): @@ -324,18 +369,25 @@ def _run_interface(self, runtime): n_fcon = 0 if isdefined(self.inputs.contrasts): for i, c in enumerate(self.inputs.contrasts): - if c[1] == 'T': + if c[1] == "T": n_tcon += 1 - elif c[1] == 'F': + elif c[1] == "F": n_fcon += 1 for i, info in enumerate(session_info): do_tempfilter = 1 - if info['hpf'] == np.inf: + if info["hpf"] == np.inf: do_tempfilter = 0 num_evs, cond_txt = self._create_ev_files( - cwd, info, i, ev_parameters, self.inputs.orthogonalization, - self.inputs.contrasts, do_tempfilter, basis_key) + cwd, + info, + i, + ev_parameters, + self.inputs.orthogonalization, + self.inputs.contrasts, + do_tempfilter, + basis_key, + ) nim = load(func_files[i]) (_, _, _, timepoints) = nim.shape fsf_txt = fsf_header.substitute( @@ -347,13 +399,14 @@ def _run_interface(self, runtime): num_evs_real=num_evs[1], num_tcon=n_tcon, num_fcon=n_fcon, - high_pass_filter_cutoff=info['hpf'], + high_pass_filter_cutoff=info["hpf"], temphp_yn=do_tempfilter, - func_file=func_files[i]) + func_file=func_files[i], + ) fsf_txt += cond_txt fsf_txt += fsf_postscript.substitute(overwrite=1) - f = open(os.path.join(cwd, 'run%d.fsf' % i), 'w') + f = open(os.path.join(cwd, "run%d.fsf" % i), "w") f.write(fsf_txt) f.close() @@ -362,29 +415,30 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self.output_spec().get() cwd = os.getcwd() - outputs['fsf_files'] = [] - outputs['ev_files'] = [] + outputs["fsf_files"] = [] + outputs["ev_files"] = [] basis_key = list(self.inputs.bases.keys())[0] ev_parameters = dict(self.inputs.bases[basis_key]) for runno, runinfo in enumerate( - self._format_session_info(self.inputs.session_info)): - outputs['fsf_files'].append(os.path.join(cwd, 'run%d.fsf' % runno)) - outputs['ev_files'].insert(runno, []) + self._format_session_info(self.inputs.session_info) + ): + outputs["fsf_files"].append(os.path.join(cwd, "run%d.fsf" % runno)) + outputs["ev_files"].insert(runno, []) evname = [] - for field in ['cond', 'regress']: + for field in ["cond", "regress"]: for i, cond in enumerate(runinfo[field]): - name = cond['name'] + name = cond["name"] evname.append(name) - evfname = os.path.join(cwd, - 'ev_%s_%d_%d.txt' % (name, runno, - len(evname))) - if field == 'cond': - ev_parameters['temporalderiv'] = int( - bool(ev_parameters.get('derivs', False))) - if ev_parameters['temporalderiv']: - evname.append(name + 'TD') - outputs['ev_files'][runno].append( - os.path.join(cwd, evfname)) + evfname = os.path.join( + cwd, "ev_%s_%d_%d.txt" % (name, runno, len(evname)) + ) + if field == "cond": + ev_parameters["temporalderiv"] = int( + bool(ev_parameters.get("derivs", False)) + ) + if ev_parameters["temporalderiv"]: + evname.append(name + "TD") + outputs["ev_files"][runno].append(os.path.join(cwd, evfname)) return outputs @@ -394,7 +448,8 @@ class FEATInputSpec(FSLCommandInputSpec): mandatory=True, argstr="%s", position=0, - desc="File specifying the feat design spec file") + desc="File specifying the feat design spec file", + ) class FEATOutputSpec(TraitedSpec): @@ -404,35 +459,34 @@ class FEATOutputSpec(TraitedSpec): class FEAT(FSLCommand): """Uses FSL feat to calculate first level stats """ - _cmd = 'feat' + + _cmd = "feat" input_spec = FEATInputSpec output_spec = FEATOutputSpec def _list_outputs(self): outputs = self._outputs().get() is_ica = False - outputs['feat_dir'] = None - with open(self.inputs.fsf_file, 'rt') as fp: + outputs["feat_dir"] = None + with open(self.inputs.fsf_file, "rt") as fp: text = fp.read() if "set fmri(inmelodic) 1" in text: is_ica = True - for line in text.split('\n'): + for line in text.split("\n"): if line.find("set fmri(outputdir)") > -1: try: outputdir_spec = line.split('"')[-2] if os.path.exists(outputdir_spec): - outputs['feat_dir'] = outputdir_spec + outputs["feat_dir"] = outputdir_spec except: pass - if not outputs['feat_dir']: + if not outputs["feat_dir"]: if is_ica: - outputs['feat_dir'] = glob(os.path.join(os.getcwd(), - '*ica'))[0] + outputs["feat_dir"] = glob(os.path.join(os.getcwd(), "*ica"))[0] else: - outputs['feat_dir'] = glob(os.path.join(os.getcwd(), - '*feat'))[0] - print('Outputs from FEATmodel:', outputs) + outputs["feat_dir"] = glob(os.path.join(os.getcwd(), "*feat"))[0] + print("Outputs from FEATmodel:", outputs) return outputs @@ -443,136 +497,141 @@ class FEATModelInputSpec(FSLCommandInputSpec): argstr="%s", position=0, desc="File specifying the feat design spec file", - copyfile=False) + copyfile=False, + ) ev_files = traits.List( File(exists=True), mandatory=True, argstr="%s", desc="Event spec files generated by level1design", position=1, - copyfile=False) + copyfile=False, + ) class FEATModelOutpuSpec(TraitedSpec): - design_file = File( - exists=True, desc='Mat file containing ascii matrix for design') - design_image = File( - exists=True, desc='Graphical representation of design matrix') - design_cov = File( - exists=True, desc='Graphical representation of design covariance') - con_file = File( - exists=True, desc='Contrast file containing contrast vectors') - fcon_file = File(desc='Contrast file containing contrast vectors') + design_file = File(exists=True, desc="Mat file containing ascii matrix for design") + design_image = File(exists=True, desc="Graphical representation of design matrix") + design_cov = File(exists=True, desc="Graphical representation of design covariance") + con_file = File(exists=True, desc="Contrast file containing contrast vectors") + fcon_file = File(desc="Contrast file containing contrast vectors") class FEATModel(FSLCommand): """Uses FSL feat_model to generate design.mat files """ - _cmd = 'feat_model' + + _cmd = "feat_model" input_spec = FEATModelInputSpec output_spec = FEATModelOutpuSpec def _format_arg(self, name, trait_spec, value): - if name == 'fsf_file': - return super(FEATModel, - self)._format_arg(name, trait_spec, - self._get_design_root(value)) - elif name == 'ev_files': - return '' + if name == "fsf_file": + return super(FEATModel, self)._format_arg( + name, trait_spec, self._get_design_root(value) + ) + elif name == "ev_files": + return "" else: return super(FEATModel, self)._format_arg(name, trait_spec, value) def _get_design_root(self, infile): _, fname = os.path.split(infile) - return fname.split('.')[0] + return fname.split(".")[0] def _list_outputs(self): # TODO: figure out file names and get rid off the globs outputs = self._outputs().get() root = self._get_design_root(simplify_list(self.inputs.fsf_file)) - design_file = glob(os.path.join(os.getcwd(), '%s*.mat' % root)) - assert len(design_file) == 1, 'No mat file generated by FEAT Model' - outputs['design_file'] = design_file[0] - design_image = glob(os.path.join(os.getcwd(), '%s.png' % root)) - assert len( - design_image) == 1, 'No design image generated by FEAT Model' - outputs['design_image'] = design_image[0] - design_cov = glob(os.path.join(os.getcwd(), '%s_cov.png' % root)) - assert len( - design_cov) == 1, 'No covariance image generated by FEAT Model' - outputs['design_cov'] = design_cov[0] - con_file = glob(os.path.join(os.getcwd(), '%s*.con' % root)) - assert len(con_file) == 1, 'No con file generated by FEAT Model' - outputs['con_file'] = con_file[0] - fcon_file = glob(os.path.join(os.getcwd(), '%s*.fts' % root)) + design_file = glob(os.path.join(os.getcwd(), "%s*.mat" % root)) + assert len(design_file) == 1, "No mat file generated by FEAT Model" + outputs["design_file"] = design_file[0] + design_image = glob(os.path.join(os.getcwd(), "%s.png" % root)) + assert len(design_image) == 1, "No design image generated by FEAT Model" + outputs["design_image"] = design_image[0] + design_cov = glob(os.path.join(os.getcwd(), "%s_cov.png" % root)) + assert len(design_cov) == 1, "No covariance image generated by FEAT Model" + outputs["design_cov"] = design_cov[0] + con_file = glob(os.path.join(os.getcwd(), "%s*.con" % root)) + assert len(con_file) == 1, "No con file generated by FEAT Model" + outputs["con_file"] = con_file[0] + fcon_file = glob(os.path.join(os.getcwd(), "%s*.fts" % root)) if fcon_file: - assert len(fcon_file) == 1, 'No fts file generated by FEAT Model' - outputs['fcon_file'] = fcon_file[0] + assert len(fcon_file) == 1, "No fts file generated by FEAT Model" + outputs["fcon_file"] = fcon_file[0] return outputs class FILMGLSInputSpec(FSLCommandInputSpec): in_file = File( - exists=True, - mandatory=True, - position=-3, - argstr='%s', - desc='input data file') - design_file = File( - exists=True, position=-2, argstr='%s', desc='design matrix file') + exists=True, mandatory=True, position=-3, argstr="%s", desc="input data file" + ) + design_file = File(exists=True, position=-2, argstr="%s", desc="design matrix file") threshold = traits.Range( - value=1000., + value=1000.0, low=0.0, - argstr='%f', + argstr="%f", position=-1, usedefault=True, - desc='threshold') - smooth_autocorr = traits.Bool( - argstr='-sa', desc='Smooth auto corr estimates') - mask_size = traits.Int(argstr='-ms %d', desc="susan mask size") + desc="threshold", + ) + smooth_autocorr = traits.Bool(argstr="-sa", desc="Smooth auto corr estimates") + mask_size = traits.Int(argstr="-ms %d", desc="susan mask size") brightness_threshold = traits.Range( low=0, - argstr='-epith %d', - desc=('susan brightness threshold, ' - 'otherwise it is estimated')) - full_data = traits.Bool(argstr='-v', desc='output full data') + argstr="-epith %d", + desc=("susan brightness threshold, " "otherwise it is estimated"), + ) + full_data = traits.Bool(argstr="-v", desc="output full data") _estimate_xor = [ - 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', - 'multitaper_product', 'use_pava', 'autocorr_noestimate' + "autocorr_estimate_only", + "fit_armodel", + "tukey_window", + "multitaper_product", + "use_pava", + "autocorr_noestimate", ] autocorr_estimate_only = traits.Bool( - argstr='-ac', + argstr="-ac", xor=_estimate_xor, - desc=('perform autocorrelation ' - 'estimatation only')) + desc=("perform autocorrelation " "estimatation only"), + ) fit_armodel = traits.Bool( - argstr='-ar', + argstr="-ar", xor=_estimate_xor, - desc=('fits autoregressive model - default is ' - 'to use tukey with M=sqrt(numvols)')) + desc=( + "fits autoregressive model - default is " + "to use tukey with M=sqrt(numvols)" + ), + ) tukey_window = traits.Int( - argstr='-tukey %d', + argstr="-tukey %d", xor=_estimate_xor, - desc='tukey window size to estimate autocorr') + desc="tukey window size to estimate autocorr", + ) multitaper_product = traits.Int( - argstr='-mt %d', + argstr="-mt %d", xor=_estimate_xor, - desc=('multitapering with slepian tapers ' - 'and num is the time-bandwidth ' - 'product')) - use_pava = traits.Bool( - argstr='-pava', desc='estimates autocorr using PAVA') + desc=( + "multitapering with slepian tapers " + "and num is the time-bandwidth " + "product" + ), + ) + use_pava = traits.Bool(argstr="-pava", desc="estimates autocorr using PAVA") autocorr_noestimate = traits.Bool( - argstr='-noest', xor=_estimate_xor, desc='do not estimate autocorrs') + argstr="-noest", xor=_estimate_xor, desc="do not estimate autocorrs" + ) output_pwdata = traits.Bool( - argstr='-output_pwdata', - desc=('output prewhitened data and average ' - 'design matrix')) + argstr="-output_pwdata", + desc=("output prewhitened data and average " "design matrix"), + ) results_dir = Directory( - 'results', - argstr='-rn %s', + "results", + argstr="-rn %s", usedefault=True, - desc='directory to store results in') + desc="directory to store results in", + ) class FILMGLSInputSpec505(FSLCommandInputSpec): @@ -580,142 +639,154 @@ class FILMGLSInputSpec505(FSLCommandInputSpec): exists=True, mandatory=True, position=-3, - argstr='--in=%s', - desc='input data file') + argstr="--in=%s", + desc="input data file", + ) design_file = File( - exists=True, position=-2, argstr='--pd=%s', desc='design matrix file') + exists=True, position=-2, argstr="--pd=%s", desc="design matrix file" + ) threshold = traits.Range( - value=1000., + value=1000.0, low=0.0, - argstr='--thr=%f', + argstr="--thr=%f", position=-1, usedefault=True, - desc='threshold') - smooth_autocorr = traits.Bool( - argstr='--sa', desc='Smooth auto corr estimates') - mask_size = traits.Int(argstr='--ms=%d', desc="susan mask size") + desc="threshold", + ) + smooth_autocorr = traits.Bool(argstr="--sa", desc="Smooth auto corr estimates") + mask_size = traits.Int(argstr="--ms=%d", desc="susan mask size") brightness_threshold = traits.Range( low=0, - argstr='--epith=%d', - desc=('susan brightness threshold, ' - 'otherwise it is estimated')) - full_data = traits.Bool(argstr='-v', desc='output full data') + argstr="--epith=%d", + desc=("susan brightness threshold, " "otherwise it is estimated"), + ) + full_data = traits.Bool(argstr="-v", desc="output full data") _estimate_xor = [ - 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', - 'multitaper_product', 'use_pava', 'autocorr_noestimate' + "autocorr_estimate_only", + "fit_armodel", + "tukey_window", + "multitaper_product", + "use_pava", + "autocorr_noestimate", ] autocorr_estimate_only = traits.Bool( - argstr='--ac', + argstr="--ac", xor=_estimate_xor, - desc=('perform autocorrelation ' - 'estimation only')) + desc=("perform autocorrelation " "estimation only"), + ) fit_armodel = traits.Bool( - argstr='--ar', + argstr="--ar", xor=_estimate_xor, - desc=('fits autoregressive model - default is ' - 'to use tukey with M=sqrt(numvols)')) + desc=( + "fits autoregressive model - default is " + "to use tukey with M=sqrt(numvols)" + ), + ) tukey_window = traits.Int( - argstr='--tukey=%d', + argstr="--tukey=%d", xor=_estimate_xor, - desc='tukey window size to estimate autocorr') + desc="tukey window size to estimate autocorr", + ) multitaper_product = traits.Int( - argstr='--mt=%d', + argstr="--mt=%d", xor=_estimate_xor, - desc=('multitapering with slepian tapers ' - 'and num is the time-bandwidth ' - 'product')) - use_pava = traits.Bool( - argstr='--pava', desc='estimates autocorr using PAVA') + desc=( + "multitapering with slepian tapers " + "and num is the time-bandwidth " + "product" + ), + ) + use_pava = traits.Bool(argstr="--pava", desc="estimates autocorr using PAVA") autocorr_noestimate = traits.Bool( - argstr='--noest', xor=_estimate_xor, desc='do not estimate autocorrs') + argstr="--noest", xor=_estimate_xor, desc="do not estimate autocorrs" + ) output_pwdata = traits.Bool( - argstr='--outputPWdata', - desc=('output prewhitened data and average ' - 'design matrix')) + argstr="--outputPWdata", + desc=("output prewhitened data and average " "design matrix"), + ) results_dir = Directory( - 'results', - argstr='--rn=%s', + "results", + argstr="--rn=%s", usedefault=True, - desc='directory to store results in') + desc="directory to store results in", + ) class FILMGLSInputSpec507(FILMGLSInputSpec505): threshold = traits.Float( - default_value=-1000., - argstr='--thr=%f', + default_value=-1000.0, + argstr="--thr=%f", position=-1, usedefault=True, - desc='threshold') + desc="threshold", + ) tcon_file = File( - exists=True, - argstr='--con=%s', - desc='contrast file containing T-contrasts') + exists=True, argstr="--con=%s", desc="contrast file containing T-contrasts" + ) fcon_file = File( - exists=True, - argstr='--fcon=%s', - desc='contrast file containing F-contrasts') + exists=True, argstr="--fcon=%s", desc="contrast file containing F-contrasts" + ) mode = traits.Enum( - 'volumetric', - 'surface', - argstr="--mode=%s", - desc="Type of analysis to be done") + "volumetric", "surface", argstr="--mode=%s", desc="Type of analysis to be done" + ) surface = File( exists=True, argstr="--in2=%s", - desc=("input surface for autocorr smoothing in " - "surface-based analyses")) + desc=("input surface for autocorr smoothing in " "surface-based analyses"), + ) class FILMGLSOutputSpec(TraitedSpec): param_estimates = OutputMultiPath( File(exists=True), - desc=('Parameter estimates for each ' - 'column of the design matrix')) + desc=("Parameter estimates for each " "column of the design matrix"), + ) residual4d = File( exists=True, - desc=('Model fit residual mean-squared error for each ' - 'time point')) - dof_file = File(exists=True, desc='degrees of freedom') + desc=("Model fit residual mean-squared error for each " "time point"), + ) + dof_file = File(exists=True, desc="degrees of freedom") sigmasquareds = File( - exists=True, desc='summary of residuals, See Woolrich, et. al., 2001') + exists=True, desc="summary of residuals, See Woolrich, et. al., 2001" + ) results_dir = Directory( - exists=True, desc='directory storing model estimation output') + exists=True, desc="directory storing model estimation output" + ) corrections = File( - exists=True, - desc=('statistical corrections used within FILM ' - 'modeling')) - thresholdac = File(exists=True, desc='The FILM autocorrelation parameters') - logfile = File(exists=True, desc='FILM run logfile') + exists=True, desc=("statistical corrections used within FILM " "modeling") + ) + thresholdac = File(exists=True, desc="The FILM autocorrelation parameters") + logfile = File(exists=True, desc="FILM run logfile") class FILMGLSOutputSpec507(TraitedSpec): param_estimates = OutputMultiPath( File(exists=True), - desc=('Parameter estimates for each ' - 'column of the design matrix')) + desc=("Parameter estimates for each " "column of the design matrix"), + ) residual4d = File( exists=True, - desc=('Model fit residual mean-squared error for each ' - 'time point')) - dof_file = File(exists=True, desc='degrees of freedom') + desc=("Model fit residual mean-squared error for each " "time point"), + ) + dof_file = File(exists=True, desc="degrees of freedom") sigmasquareds = File( - exists=True, desc='summary of residuals, See Woolrich, et. al., 2001') + exists=True, desc="summary of residuals, See Woolrich, et. al., 2001" + ) results_dir = Directory( - exists=True, desc='directory storing model estimation output') - thresholdac = File(exists=True, desc='The FILM autocorrelation parameters') - logfile = File(exists=True, desc='FILM run logfile') + exists=True, desc="directory storing model estimation output" + ) + thresholdac = File(exists=True, desc="The FILM autocorrelation parameters") + logfile = File(exists=True, desc="FILM run logfile") copes = OutputMultiPath( - File(exists=True), desc='Contrast estimates for each contrast') + File(exists=True), desc="Contrast estimates for each contrast" + ) varcopes = OutputMultiPath( - File(exists=True), desc='Variance estimates for each contrast') - zstats = OutputMultiPath( - File(exists=True), desc='z-stat file for each contrast') - tstats = OutputMultiPath( - File(exists=True), desc='t-stat file for each contrast') - fstats = OutputMultiPath( - File(exists=True), desc='f-stat file for each contrast') - zfstats = OutputMultiPath( - File(exists=True), desc='z-stat file for each F contrast') + File(exists=True), desc="Variance estimates for each contrast" + ) + zstats = OutputMultiPath(File(exists=True), desc="z-stat file for each contrast") + tstats = OutputMultiPath(File(exists=True), desc="t-stat file for each contrast") + fstats = OutputMultiPath(File(exists=True), desc="f-stat file for each contrast") + zfstats = OutputMultiPath(File(exists=True), desc="z-stat file for each F contrast") class FILMGLS(FSLCommand): @@ -748,27 +819,25 @@ class FILMGLS(FSLCommand): """ - _cmd = 'film_gls' + _cmd = "film_gls" input_spec = FILMGLSInputSpec output_spec = FILMGLSOutputSpec - if Info.version() and LooseVersion(Info.version()) > LooseVersion('5.0.6'): + if Info.version() and LooseVersion(Info.version()) > LooseVersion("5.0.6"): input_spec = FILMGLSInputSpec507 output_spec = FILMGLSOutputSpec507 - elif (Info.version() - and LooseVersion(Info.version()) > LooseVersion('5.0.4')): + elif Info.version() and LooseVersion(Info.version()) > LooseVersion("5.0.4"): input_spec = FILMGLSInputSpec505 def _get_pe_files(self, cwd): files = None if isdefined(self.inputs.design_file): - fp = open(self.inputs.design_file, 'rt') + fp = open(self.inputs.design_file, "rt") for line in fp.readlines(): - if line.startswith('/NumWaves'): + if line.startswith("/NumWaves"): numpes = int(line.split()[-1]) files = [] for i in range(numpes): - files.append( - self._gen_fname('pe%d.nii' % (i + 1), cwd=cwd)) + files.append(self._gen_fname("pe%d.nii" % (i + 1), cwd=cwd)) break fp.close() return files @@ -777,16 +846,16 @@ def _get_numcons(self): numtcons = 0 numfcons = 0 if isdefined(self.inputs.tcon_file): - fp = open(self.inputs.tcon_file, 'rt') + fp = open(self.inputs.tcon_file, "rt") for line in fp.readlines(): - if line.startswith('/NumContrasts'): + if line.startswith("/NumContrasts"): numtcons = int(line.split()[-1]) break fp.close() if isdefined(self.inputs.fcon_file): - fp = open(self.inputs.fcon_file, 'rt') + fp = open(self.inputs.fcon_file, "rt") for line in fp.readlines(): - if line.startswith('/NumContrasts'): + if line.startswith("/NumContrasts"): numfcons = int(line.split()[-1]) break fp.close() @@ -796,25 +865,21 @@ def _list_outputs(self): outputs = self._outputs().get() cwd = os.getcwd() results_dir = os.path.join(cwd, self.inputs.results_dir) - outputs['results_dir'] = results_dir + outputs["results_dir"] = results_dir pe_files = self._get_pe_files(results_dir) if pe_files: - outputs['param_estimates'] = pe_files - outputs['residual4d'] = self._gen_fname('res4d.nii', cwd=results_dir) - outputs['dof_file'] = os.path.join(results_dir, 'dof') - outputs['sigmasquareds'] = self._gen_fname( - 'sigmasquareds.nii', cwd=results_dir) - outputs['thresholdac'] = self._gen_fname( - 'threshac1.nii', cwd=results_dir) - if (Info.version() - and LooseVersion(Info.version()) < LooseVersion('5.0.7')): - outputs['corrections'] = self._gen_fname( - 'corrections.nii', cwd=results_dir) - outputs['logfile'] = self._gen_fname( - 'logfile', change_ext=False, cwd=results_dir) - - if (Info.version() - and LooseVersion(Info.version()) > LooseVersion('5.0.6')): + outputs["param_estimates"] = pe_files + outputs["residual4d"] = self._gen_fname("res4d.nii", cwd=results_dir) + outputs["dof_file"] = os.path.join(results_dir, "dof") + outputs["sigmasquareds"] = self._gen_fname("sigmasquareds.nii", cwd=results_dir) + outputs["thresholdac"] = self._gen_fname("threshac1.nii", cwd=results_dir) + if Info.version() and LooseVersion(Info.version()) < LooseVersion("5.0.7"): + outputs["corrections"] = self._gen_fname("corrections.nii", cwd=results_dir) + outputs["logfile"] = self._gen_fname( + "logfile", change_ext=False, cwd=results_dir + ) + + if Info.version() and LooseVersion(Info.version()) > LooseVersion("5.0.6"): pth = results_dir numtcons, numfcons = self._get_numcons() base_contrast = 1 @@ -824,46 +889,47 @@ def _list_outputs(self): tstats = [] for i in range(numtcons): copes.append( - self._gen_fname( - 'cope%d.nii' % (base_contrast + i), cwd=pth)) + self._gen_fname("cope%d.nii" % (base_contrast + i), cwd=pth) + ) varcopes.append( - self._gen_fname( - 'varcope%d.nii' % (base_contrast + i), cwd=pth)) + self._gen_fname("varcope%d.nii" % (base_contrast + i), cwd=pth) + ) zstats.append( - self._gen_fname( - 'zstat%d.nii' % (base_contrast + i), cwd=pth)) + self._gen_fname("zstat%d.nii" % (base_contrast + i), cwd=pth) + ) tstats.append( - self._gen_fname( - 'tstat%d.nii' % (base_contrast + i), cwd=pth)) + self._gen_fname("tstat%d.nii" % (base_contrast + i), cwd=pth) + ) if copes: - outputs['copes'] = copes - outputs['varcopes'] = varcopes - outputs['zstats'] = zstats - outputs['tstats'] = tstats + outputs["copes"] = copes + outputs["varcopes"] = varcopes + outputs["zstats"] = zstats + outputs["tstats"] = tstats fstats = [] zfstats = [] for i in range(numfcons): fstats.append( - self._gen_fname( - 'fstat%d.nii' % (base_contrast + i), cwd=pth)) + self._gen_fname("fstat%d.nii" % (base_contrast + i), cwd=pth) + ) zfstats.append( - self._gen_fname( - 'zfstat%d.nii' % (base_contrast + i), cwd=pth)) + self._gen_fname("zfstat%d.nii" % (base_contrast + i), cwd=pth) + ) if fstats: - outputs['fstats'] = fstats - outputs['zfstats'] = zfstats + outputs["fstats"] = fstats + outputs["zfstats"] = zfstats return outputs class FEATRegisterInputSpec(BaseInterfaceInputSpec): feat_dirs = InputMultiPath( - Directory(exists=True), desc="Lower level feat dirs", mandatory=True) + Directory(exists=True), desc="Lower level feat dirs", mandatory=True + ) reg_image = File( exists=True, desc="image to register to (will be treated as standard)", - mandatory=True) - reg_dof = traits.Int( - 12, desc="registration degrees of freedom", usedefault=True) + mandatory=True, + ) + reg_dof = traits.Int(12, desc="registration degrees of freedom", usedefault=True) class FEATRegisterOutputSpec(TraitedSpec): @@ -873,24 +939,25 @@ class FEATRegisterOutputSpec(TraitedSpec): class FEATRegister(BaseInterface): """Register feat directories to a specific standard """ + input_spec = FEATRegisterInputSpec output_spec = FEATRegisterOutputSpec def _run_interface(self, runtime): - fsf_header = load_template('featreg_header.tcl') - fsf_footer = load_template('feat_nongui.tcl') - fsf_dirs = load_template('feat_fe_featdirs.tcl') + fsf_header = load_template("featreg_header.tcl") + fsf_footer = load_template("feat_nongui.tcl") + fsf_dirs = load_template("feat_fe_featdirs.tcl") num_runs = len(self.inputs.feat_dirs) fsf_txt = fsf_header.substitute( num_runs=num_runs, regimage=self.inputs.reg_image, - regdof=self.inputs.reg_dof) + regdof=self.inputs.reg_dof, + ) for i, rundir in enumerate(ensure_list(self.inputs.feat_dirs)): - fsf_txt += fsf_dirs.substitute( - runno=i + 1, rundir=os.path.abspath(rundir)) + fsf_txt += fsf_dirs.substitute(runno=i + 1, rundir=os.path.abspath(rundir)) fsf_txt += fsf_footer.substitute() - f = open(os.path.join(os.getcwd(), 'register.fsf'), 'wt') + f = open(os.path.join(os.getcwd(), "register.fsf"), "wt") f.write(fsf_txt) f.close() @@ -898,111 +965,117 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['fsf_file'] = os.path.abspath( - os.path.join(os.getcwd(), 'register.fsf')) + outputs["fsf_file"] = os.path.abspath(os.path.join(os.getcwd(), "register.fsf")) return outputs class FLAMEOInputSpec(FSLCommandInputSpec): cope_file = File( exists=True, - argstr='--copefile=%s', + argstr="--copefile=%s", mandatory=True, - desc='cope regressor data file') + desc="cope regressor data file", + ) var_cope_file = File( - exists=True, - argstr='--varcopefile=%s', - desc='varcope weightings data file') + exists=True, argstr="--varcopefile=%s", desc="varcope weightings data file" + ) dof_var_cope_file = File( - exists=True, - argstr='--dofvarcopefile=%s', - desc='dof data file for varcope data') + exists=True, argstr="--dofvarcopefile=%s", desc="dof data file for varcope data" + ) mask_file = File( - exists=True, argstr='--maskfile=%s', mandatory=True, desc='mask file') + exists=True, argstr="--maskfile=%s", mandatory=True, desc="mask file" + ) design_file = File( - exists=True, - argstr='--designfile=%s', - mandatory=True, - desc='design matrix file') + exists=True, argstr="--designfile=%s", mandatory=True, desc="design matrix file" + ) t_con_file = File( exists=True, - argstr='--tcontrastsfile=%s', + argstr="--tcontrastsfile=%s", mandatory=True, - desc='ascii matrix specifying t-contrasts') + desc="ascii matrix specifying t-contrasts", + ) f_con_file = File( exists=True, - argstr='--fcontrastsfile=%s', - desc='ascii matrix specifying f-contrasts') + argstr="--fcontrastsfile=%s", + desc="ascii matrix specifying f-contrasts", + ) cov_split_file = File( exists=True, - argstr='--covsplitfile=%s', + argstr="--covsplitfile=%s", mandatory=True, - desc='ascii matrix specifying the groups the covariance is split into') + desc="ascii matrix specifying the groups the covariance is split into", + ) run_mode = traits.Enum( - 'fe', - 'ols', - 'flame1', - 'flame12', - argstr='--runmode=%s', + "fe", + "ols", + "flame1", + "flame12", + argstr="--runmode=%s", mandatory=True, - desc='inference to perform') - n_jumps = traits.Int( - argstr='--njumps=%d', desc='number of jumps made by mcmc') + desc="inference to perform", + ) + n_jumps = traits.Int(argstr="--njumps=%d", desc="number of jumps made by mcmc") burnin = traits.Int( - argstr='--burnin=%d', - desc=('number of jumps at start of mcmc to be ' - 'discarded')) + argstr="--burnin=%d", + desc=("number of jumps at start of mcmc to be " "discarded"), + ) sample_every = traits.Int( - argstr='--sampleevery=%d', desc='number of jumps for each sample') - fix_mean = traits.Bool(argstr='--fixmean', desc='fix mean for tfit') + argstr="--sampleevery=%d", desc="number of jumps for each sample" + ) + fix_mean = traits.Bool(argstr="--fixmean", desc="fix mean for tfit") infer_outliers = traits.Bool( - argstr='--inferoutliers', desc='infer outliers - not for fe') - no_pe_outputs = traits.Bool( - argstr='--nopeoutput', desc='do not output pe files') + argstr="--inferoutliers", desc="infer outliers - not for fe" + ) + no_pe_outputs = traits.Bool(argstr="--nopeoutput", desc="do not output pe files") sigma_dofs = traits.Int( - argstr='--sigma_dofs=%d', - desc=('sigma (in mm) to use for Gaussian ' - 'smoothing the DOFs in FLAME 2. Default is ' - '1mm, -1 indicates no smoothing')) + argstr="--sigma_dofs=%d", + desc=( + "sigma (in mm) to use for Gaussian " + "smoothing the DOFs in FLAME 2. Default is " + "1mm, -1 indicates no smoothing" + ), + ) outlier_iter = traits.Int( - argstr='--ioni=%d', - desc=('Number of max iterations to use when ' - 'inferring outliers. Default is 12.')) - log_dir = Directory("stats", argstr='--ld=%s', usedefault=True) # ohinds + argstr="--ioni=%d", + desc=( + "Number of max iterations to use when " "inferring outliers. Default is 12." + ), + ) + log_dir = Directory("stats", argstr="--ld=%s", usedefault=True) # ohinds # no support for ven, vef class FLAMEOOutputSpec(TraitedSpec): pes = OutputMultiPath( File(exists=True), - desc=("Parameter estimates for each column of the " - "design matrix for each voxel")) + desc=( + "Parameter estimates for each column of the " "design matrix for each voxel" + ), + ) res4d = OutputMultiPath( File(exists=True), - desc=("Model fit residual mean-squared error for " - "each time point")) + desc=("Model fit residual mean-squared error for " "each time point"), + ) copes = OutputMultiPath( - File(exists=True), desc="Contrast estimates for each contrast") + File(exists=True), desc="Contrast estimates for each contrast" + ) var_copes = OutputMultiPath( - File(exists=True), desc="Variance estimates for each contrast") - zstats = OutputMultiPath( - File(exists=True), desc="z-stat file for each contrast") - tstats = OutputMultiPath( - File(exists=True), desc="t-stat file for each contrast") - zfstats = OutputMultiPath( - File(exists=True), desc="z stat file for each f contrast") - fstats = OutputMultiPath( - File(exists=True), desc="f-stat file for each contrast") + File(exists=True), desc="Variance estimates for each contrast" + ) + zstats = OutputMultiPath(File(exists=True), desc="z-stat file for each contrast") + tstats = OutputMultiPath(File(exists=True), desc="t-stat file for each contrast") + zfstats = OutputMultiPath(File(exists=True), desc="z stat file for each f contrast") + fstats = OutputMultiPath(File(exists=True), desc="f-stat file for each contrast") mrefvars = OutputMultiPath( - File(exists=True), - desc=("mean random effect variances for each " - "contrast")) + File(exists=True), desc=("mean random effect variances for each " "contrast") + ) tdof = OutputMultiPath( - File(exists=True), desc="temporal dof file for each contrast") - weights = OutputMultiPath( - File(exists=True), desc="weights file for each contrast") + File(exists=True), desc="temporal dof file for each contrast" + ) + weights = OutputMultiPath(File(exists=True), desc="weights file for each contrast") stats_dir = Directory( - File(exists=True), desc="directory storing model estimation output") + File(exists=True), desc="directory storing model estimation output" + ) class FLAMEO(FSLCommand): @@ -1027,36 +1100,39 @@ class FLAMEO(FSLCommand): """ - _cmd = 'flameo' + _cmd = "flameo" input_spec = FLAMEOInputSpec output_spec = FLAMEOOutputSpec - references_ = [{ - 'entry': - BibTeX( - '@article{BeckmannJenkinsonSmith2003,' - 'author={C.F. Beckmann, M. Jenkinson, and S.M. Smith},' - 'title={General multilevel linear modeling for group analysis in FMRI.},' - 'journal={NeuroImage},' - 'volume={20},' - 'pages={1052-1063},' - 'year={2003},' - '}'), - 'tags': ['method'], - }, { - 'entry': - BibTeX( - '@article{WoolrichBehrensBeckmannJenkinsonSmith2004,' - 'author={M.W. Woolrich, T.E. Behrens, ' - 'C.F. Beckmann, M. Jenkinson, and S.M. Smith},' - 'title={Multilevel linear modelling for FMRI group analysis using Bayesian inference.},' - 'journal={NeuroImage},' - 'volume={21},' - 'pages={1732-1747},' - 'year={2004},' - '}'), - 'tags': ['method'], - }] + references_ = [ + { + "entry": BibTeX( + "@article{BeckmannJenkinsonSmith2003," + "author={C.F. Beckmann, M. Jenkinson, and S.M. Smith}," + "title={General multilevel linear modeling for group analysis in FMRI.}," + "journal={NeuroImage}," + "volume={20}," + "pages={1052-1063}," + "year={2003}," + "}" + ), + "tags": ["method"], + }, + { + "entry": BibTeX( + "@article{WoolrichBehrensBeckmannJenkinsonSmith2004," + "author={M.W. Woolrich, T.E. Behrens, " + "C.F. Beckmann, M. Jenkinson, and S.M. Smith}," + "title={Multilevel linear modelling for FMRI group analysis using Bayesian inference.}," + "journal={NeuroImage}," + "volume={21}," + "pages={1732-1747}," + "year={2004}," + "}" + ), + "tags": ["method"], + }, + ] # ohinds: 2010-04-06 def _run_interface(self, runtime): @@ -1073,61 +1149,54 @@ def _list_outputs(self): outputs = self._outputs().get() pth = os.path.join(os.getcwd(), self.inputs.log_dir) - pes = human_order_sorted(glob(os.path.join(pth, 'pe[0-9]*.*'))) - assert len(pes) >= 1, 'No pe volumes generated by FSL Estimate' - outputs['pes'] = pes + pes = human_order_sorted(glob(os.path.join(pth, "pe[0-9]*.*"))) + assert len(pes) >= 1, "No pe volumes generated by FSL Estimate" + outputs["pes"] = pes - res4d = human_order_sorted(glob(os.path.join(pth, 'res4d.*'))) - assert len(res4d) == 1, 'No residual volume generated by FSL Estimate' - outputs['res4d'] = res4d[0] + res4d = human_order_sorted(glob(os.path.join(pth, "res4d.*"))) + assert len(res4d) == 1, "No residual volume generated by FSL Estimate" + outputs["res4d"] = res4d[0] - copes = human_order_sorted(glob(os.path.join(pth, 'cope[0-9]*.*'))) - assert len(copes) >= 1, 'No cope volumes generated by FSL CEstimate' - outputs['copes'] = copes + copes = human_order_sorted(glob(os.path.join(pth, "cope[0-9]*.*"))) + assert len(copes) >= 1, "No cope volumes generated by FSL CEstimate" + outputs["copes"] = copes - var_copes = human_order_sorted( - glob(os.path.join(pth, 'varcope[0-9]*.*'))) - assert len( - var_copes) >= 1, 'No varcope volumes generated by FSL CEstimate' - outputs['var_copes'] = var_copes + var_copes = human_order_sorted(glob(os.path.join(pth, "varcope[0-9]*.*"))) + assert len(var_copes) >= 1, "No varcope volumes generated by FSL CEstimate" + outputs["var_copes"] = var_copes - zstats = human_order_sorted(glob(os.path.join(pth, 'zstat[0-9]*.*'))) - assert len(zstats) >= 1, 'No zstat volumes generated by FSL CEstimate' - outputs['zstats'] = zstats + zstats = human_order_sorted(glob(os.path.join(pth, "zstat[0-9]*.*"))) + assert len(zstats) >= 1, "No zstat volumes generated by FSL CEstimate" + outputs["zstats"] = zstats if isdefined(self.inputs.f_con_file): - zfstats = human_order_sorted( - glob(os.path.join(pth, 'zfstat[0-9]*.*'))) - assert len( - zfstats) >= 1, 'No zfstat volumes generated by FSL CEstimate' - outputs['zfstats'] = zfstats - - fstats = human_order_sorted( - glob(os.path.join(pth, 'fstat[0-9]*.*'))) - assert len( - fstats) >= 1, 'No fstat volumes generated by FSL CEstimate' - outputs['fstats'] = fstats - - tstats = human_order_sorted(glob(os.path.join(pth, 'tstat[0-9]*.*'))) - assert len(tstats) >= 1, 'No tstat volumes generated by FSL CEstimate' - outputs['tstats'] = tstats + zfstats = human_order_sorted(glob(os.path.join(pth, "zfstat[0-9]*.*"))) + assert len(zfstats) >= 1, "No zfstat volumes generated by FSL CEstimate" + outputs["zfstats"] = zfstats + + fstats = human_order_sorted(glob(os.path.join(pth, "fstat[0-9]*.*"))) + assert len(fstats) >= 1, "No fstat volumes generated by FSL CEstimate" + outputs["fstats"] = fstats + + tstats = human_order_sorted(glob(os.path.join(pth, "tstat[0-9]*.*"))) + assert len(tstats) >= 1, "No tstat volumes generated by FSL CEstimate" + outputs["tstats"] = tstats mrefs = human_order_sorted( - glob(os.path.join(pth, 'mean_random_effects_var[0-9]*.*'))) - assert len( - mrefs) >= 1, 'No mean random effects volumes generated by FLAMEO' - outputs['mrefvars'] = mrefs + glob(os.path.join(pth, "mean_random_effects_var[0-9]*.*")) + ) + assert len(mrefs) >= 1, "No mean random effects volumes generated by FLAMEO" + outputs["mrefvars"] = mrefs - tdof = human_order_sorted(glob(os.path.join(pth, 'tdof_t[0-9]*.*'))) - assert len(tdof) >= 1, 'No T dof volumes generated by FLAMEO' - outputs['tdof'] = tdof + tdof = human_order_sorted(glob(os.path.join(pth, "tdof_t[0-9]*.*"))) + assert len(tdof) >= 1, "No T dof volumes generated by FLAMEO" + outputs["tdof"] = tdof - weights = human_order_sorted( - glob(os.path.join(pth, 'weights[0-9]*.*'))) - assert len(weights) >= 1, 'No weight volumes generated by FLAMEO' - outputs['weights'] = weights + weights = human_order_sorted(glob(os.path.join(pth, "weights[0-9]*.*"))) + assert len(weights) >= 1, "No weight volumes generated by FLAMEO" + outputs["weights"] = weights - outputs['stats_dir'] = pth + outputs["stats_dir"] = pth return outputs @@ -1136,66 +1205,66 @@ class ContrastMgrInputSpec(FSLCommandInputSpec): tcon_file = File( exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-1, - desc='contrast file containing T-contrasts') + desc="contrast file containing T-contrasts", + ) fcon_file = File( - exists=True, - argstr='-f %s', - desc='contrast file containing F-contrasts') + exists=True, argstr="-f %s", desc="contrast file containing F-contrasts" + ) param_estimates = InputMultiPath( File(exists=True), - argstr='', + argstr="", copyfile=False, mandatory=True, - desc=('Parameter estimates for each ' - 'column of the design matrix')) + desc=("Parameter estimates for each " "column of the design matrix"), + ) corrections = File( exists=True, copyfile=False, mandatory=True, - desc='statistical corrections used within FILM modelling') + desc="statistical corrections used within FILM modelling", + ) dof_file = File( exists=True, - argstr='', + argstr="", copyfile=False, mandatory=True, - desc='degrees of freedom') + desc="degrees of freedom", + ) sigmasquareds = File( exists=True, - argstr='', + argstr="", position=-2, copyfile=False, mandatory=True, - desc=('summary of residuals, See Woolrich, et. al., ' - '2001')) + desc=("summary of residuals, See Woolrich, et. al., " "2001"), + ) contrast_num = traits.Range( - low=1, - argstr='-cope', - desc=('contrast number to start labeling ' - 'copes from')) + low=1, argstr="-cope", desc=("contrast number to start labeling " "copes from") + ) suffix = traits.Str( - argstr='-suffix %s', - desc=('suffix to put on the end of the cope filename ' - 'before the contrast number, default is ' - 'nothing')) + argstr="-suffix %s", + desc=( + "suffix to put on the end of the cope filename " + "before the contrast number, default is " + "nothing" + ), + ) class ContrastMgrOutputSpec(TraitedSpec): copes = OutputMultiPath( - File(exists=True), desc='Contrast estimates for each contrast') + File(exists=True), desc="Contrast estimates for each contrast" + ) varcopes = OutputMultiPath( - File(exists=True), desc='Variance estimates for each contrast') - zstats = OutputMultiPath( - File(exists=True), desc='z-stat file for each contrast') - tstats = OutputMultiPath( - File(exists=True), desc='t-stat file for each contrast') - fstats = OutputMultiPath( - File(exists=True), desc='f-stat file for each contrast') - zfstats = OutputMultiPath( - File(exists=True), desc='z-stat file for each F contrast') - neffs = OutputMultiPath( - File(exists=True), desc='neff file ?? for each contrast') + File(exists=True), desc="Variance estimates for each contrast" + ) + zstats = OutputMultiPath(File(exists=True), desc="z-stat file for each contrast") + tstats = OutputMultiPath(File(exists=True), desc="t-stat file for each contrast") + fstats = OutputMultiPath(File(exists=True), desc="f-stat file for each contrast") + zfstats = OutputMultiPath(File(exists=True), desc="z-stat file for each F contrast") + neffs = OutputMultiPath(File(exists=True), desc="neff file ?? for each contrast") class ContrastMgr(FSLCommand): @@ -1205,10 +1274,10 @@ class ContrastMgr(FSLCommand): same location. This has deprecated for FSL versions 5.0.7+ as the necessary corrections file is no longer generated by FILMGLS. """ - if Info.version() and LooseVersion( - Info.version()) >= LooseVersion("5.0.7"): + + if Info.version() and LooseVersion(Info.version()) >= LooseVersion("5.0.7"): DeprecationWarning("ContrastMgr is deprecated in FSL 5.0.7+") - _cmd = 'contrast_mgr' + _cmd = "contrast_mgr" input_spec = ContrastMgrInputSpec output_spec = ContrastMgrOutputSpec @@ -1222,33 +1291,32 @@ def _run_interface(self, runtime): return runtime def _format_arg(self, name, trait_spec, value): - if name in ['param_estimates', 'corrections', 'dof_file']: - return '' - elif name in ['sigmasquareds']: + if name in ["param_estimates", "corrections", "dof_file"]: + return "" + elif name in ["sigmasquareds"]: path, _ = os.path.split(value) return path else: - return super(ContrastMgr, self)._format_arg( - name, trait_spec, value) + return super(ContrastMgr, self)._format_arg(name, trait_spec, value) def _get_design_root(self, infile): _, fname = os.path.split(infile) - return fname.split('.')[0] + return fname.split(".")[0] def _get_numcons(self): numtcons = 0 numfcons = 0 if isdefined(self.inputs.tcon_file): - fp = open(self.inputs.tcon_file, 'rt') + fp = open(self.inputs.tcon_file, "rt") for line in fp.readlines(): - if line.startswith('/NumContrasts'): + if line.startswith("/NumContrasts"): numtcons = int(line.split()[-1]) break fp.close() if isdefined(self.inputs.fcon_file): - fp = open(self.inputs.fcon_file, 'rt') + fp = open(self.inputs.fcon_file, "rt") for line in fp.readlines(): - if line.startswith('/NumContrasts'): + if line.startswith("/NumContrasts"): numfcons = int(line.split()[-1]) break fp.close() @@ -1267,45 +1335,42 @@ def _list_outputs(self): tstats = [] neffs = [] for i in range(numtcons): - copes.append( - self._gen_fname('cope%d.nii' % (base_contrast + i), cwd=pth)) + copes.append(self._gen_fname("cope%d.nii" % (base_contrast + i), cwd=pth)) varcopes.append( - self._gen_fname( - 'varcope%d.nii' % (base_contrast + i), cwd=pth)) - zstats.append( - self._gen_fname('zstat%d.nii' % (base_contrast + i), cwd=pth)) - tstats.append( - self._gen_fname('tstat%d.nii' % (base_contrast + i), cwd=pth)) - neffs.append( - self._gen_fname('neff%d.nii' % (base_contrast + i), cwd=pth)) + self._gen_fname("varcope%d.nii" % (base_contrast + i), cwd=pth) + ) + zstats.append(self._gen_fname("zstat%d.nii" % (base_contrast + i), cwd=pth)) + tstats.append(self._gen_fname("tstat%d.nii" % (base_contrast + i), cwd=pth)) + neffs.append(self._gen_fname("neff%d.nii" % (base_contrast + i), cwd=pth)) if copes: - outputs['copes'] = copes - outputs['varcopes'] = varcopes - outputs['zstats'] = zstats - outputs['tstats'] = tstats - outputs['neffs'] = neffs + outputs["copes"] = copes + outputs["varcopes"] = varcopes + outputs["zstats"] = zstats + outputs["tstats"] = tstats + outputs["neffs"] = neffs fstats = [] zfstats = [] for i in range(numfcons): - fstats.append( - self._gen_fname('fstat%d.nii' % (base_contrast + i), cwd=pth)) + fstats.append(self._gen_fname("fstat%d.nii" % (base_contrast + i), cwd=pth)) zfstats.append( - self._gen_fname('zfstat%d.nii' % (base_contrast + i), cwd=pth)) + self._gen_fname("zfstat%d.nii" % (base_contrast + i), cwd=pth) + ) if fstats: - outputs['fstats'] = fstats - outputs['zfstats'] = zfstats + outputs["fstats"] = fstats + outputs["zfstats"] = zfstats return outputs class L2ModelInputSpec(BaseInterfaceInputSpec): num_copes = traits.Range( - low=1, mandatory=True, desc='number of copes to be combined') + low=1, mandatory=True, desc="number of copes to be combined" + ) class L2ModelOutputSpec(TraitedSpec): - design_mat = File(exists=True, desc='design matrix file') - design_con = File(exists=True, desc='design contrast file') - design_grp = File(exists=True, desc='design group file') + design_mat = File(exists=True, desc="design matrix file") + design_con = File(exists=True, desc="design contrast file") + design_grp = File(exists=True, desc="design group file") class L2Model(BaseInterface): @@ -1325,43 +1390,44 @@ class L2Model(BaseInterface): def _run_interface(self, runtime): cwd = os.getcwd() mat_txt = [ - '/NumWaves 1', '/NumPoints {:d}'.format(self.inputs.num_copes), - '/PPheights 1', '', '/Matrix' + "/NumWaves 1", + "/NumPoints {:d}".format(self.inputs.num_copes), + "/PPheights 1", + "", + "/Matrix", ] for i in range(self.inputs.num_copes): - mat_txt += ['1'] - mat_txt = '\n'.join(mat_txt) + mat_txt += ["1"] + mat_txt = "\n".join(mat_txt) con_txt = [ - '/ContrastName1 group mean', - '/NumWaves 1', - '/NumContrasts 1', - '/PPheights 1', - '/RequiredEffect 100', # XX where does this + "/ContrastName1 group mean", + "/NumWaves 1", + "/NumContrasts 1", + "/PPheights 1", + "/RequiredEffect 100", # XX where does this # number come from - '', - '/Matrix', - '1' + "", + "/Matrix", + "1", ] - con_txt = '\n'.join(con_txt) + con_txt = "\n".join(con_txt) grp_txt = [ - '/NumWaves 1', '/NumPoints {:d}'.format(self.inputs.num_copes), - '', '/Matrix' + "/NumWaves 1", + "/NumPoints {:d}".format(self.inputs.num_copes), + "", + "/Matrix", ] for i in range(self.inputs.num_copes): - grp_txt += ['1'] - grp_txt = '\n'.join(grp_txt) + grp_txt += ["1"] + grp_txt = "\n".join(grp_txt) - txt = { - 'design.mat': mat_txt, - 'design.con': con_txt, - 'design.grp': grp_txt - } + txt = {"design.mat": mat_txt, "design.con": con_txt, "design.grp": grp_txt} # write design files - for i, name in enumerate(['design.mat', 'design.con', 'design.grp']): - f = open(os.path.join(cwd, name), 'wt') + for i, name in enumerate(["design.mat", "design.con", "design.grp"]): + f = open(os.path.join(cwd, name), "wt") f.write(txt[name]) f.close() @@ -1370,43 +1436,55 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() for field in list(outputs.keys()): - outputs[field] = os.path.join(os.getcwd(), field.replace('_', '.')) + outputs[field] = os.path.join(os.getcwd(), field.replace("_", ".")) return outputs class MultipleRegressDesignInputSpec(BaseInterfaceInputSpec): contrasts = traits.List( traits.Either( - traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), - traits.List(traits.Float)), - traits.Tuple(traits.Str, traits.Enum('F'), - traits.List( - traits.Tuple(traits.Str, traits.Enum('T'), - traits.List(traits.Str), - traits.List(traits.Float)), ))), + traits.Tuple( + traits.Str, + traits.Enum("T"), + traits.List(traits.Str), + traits.List(traits.Float), + ), + traits.Tuple( + traits.Str, + traits.Enum("F"), + traits.List( + traits.Tuple( + traits.Str, + traits.Enum("T"), + traits.List(traits.Str), + traits.List(traits.Float), + ), + ), + ), + ), mandatory=True, desc="List of contrasts with each contrast being a list of the form - \ [('name', 'stat', [condition list], [weight list])]. if \ session list is None or not provided, all sessions are used. For F \ contrasts, the condition list should contain previously defined \ -T-contrasts without any weight list.") +T-contrasts without any weight list.", + ) regressors = traits.Dict( traits.Str, traits.List(traits.Float), mandatory=True, - desc=('dictionary containing named lists of ' - 'regressors')) + desc=("dictionary containing named lists of " "regressors"), + ) groups = traits.List( - traits.Int, - desc=('list of group identifiers (defaults to single ' - 'group)')) + traits.Int, desc=("list of group identifiers (defaults to single " "group)") + ) class MultipleRegressDesignOutputSpec(TraitedSpec): - design_mat = File(exists=True, desc='design matrix file') - design_con = File(exists=True, desc='design t-contrast file') - design_fts = File(exists=True, desc='design f-contrast file') - design_grp = File(exists=True, desc='design group file') + design_mat = File(exists=True, desc="design matrix file") + design_con = File(exists=True, desc="design t-contrast file") + design_fts = File(exists=True, desc="design f-contrast file") + design_grp = File(exists=True, desc="design group file") class MultipleRegressDesign(BaseInterface): @@ -1438,13 +1516,10 @@ def _run_interface(self, runtime): regs = sorted(self.inputs.regressors.keys()) nwaves = len(regs) npoints = len(self.inputs.regressors[regs[0]]) - ntcons = sum([1 for con in self.inputs.contrasts if con[1] == 'T']) - nfcons = sum([1 for con in self.inputs.contrasts if con[1] == 'F']) + ntcons = sum([1 for con in self.inputs.contrasts if con[1] == "T"]) + nfcons = sum([1 for con in self.inputs.contrasts if con[1] == "F"]) # write mat file - mat_txt = [ - '/NumWaves %d' % nwaves, - '/NumPoints %d' % npoints - ] + mat_txt = ["/NumWaves %d" % nwaves, "/NumPoints %d" % npoints] ppheights = [] for reg in regs: maxreg = np.max(self.inputs.regressors[reg]) @@ -1453,77 +1528,76 @@ def _run_interface(self, runtime): regheight = max([abs(minreg), abs(maxreg)]) else: regheight = abs(maxreg - minreg) - ppheights.append('%e' % regheight) - mat_txt += ['/PPheights ' + ' '.join(ppheights)] - mat_txt += ['', '/Matrix'] + ppheights.append("%e" % regheight) + mat_txt += ["/PPheights " + " ".join(ppheights)] + mat_txt += ["", "/Matrix"] for cidx in range(npoints): - mat_txt.append(' '.join( - ['%e' % self.inputs.regressors[key][cidx] for key in regs])) - mat_txt = '\n'.join(mat_txt) + '\n' + mat_txt.append( + " ".join(["%e" % self.inputs.regressors[key][cidx] for key in regs]) + ) + mat_txt = "\n".join(mat_txt) + "\n" # write t-con file con_txt = [] counter = 0 tconmap = {} for conidx, con in enumerate(self.inputs.contrasts): - if con[1] == 'T': + if con[1] == "T": tconmap[conidx] = counter counter += 1 - con_txt += ['/ContrastName%d %s' % (counter, con[0])] + con_txt += ["/ContrastName%d %s" % (counter, con[0])] con_txt += [ - '/NumWaves %d' % nwaves, - '/NumContrasts %d' % ntcons, - '/PPheights %s' % ' '.join( - ['%e' % 1 for i in range(counter)]), - '/RequiredEffect %s' % ' '.join( - ['%.3f' % 100 for i in range(counter)]), '', '/Matrix' + "/NumWaves %d" % nwaves, + "/NumContrasts %d" % ntcons, + "/PPheights %s" % " ".join(["%e" % 1 for i in range(counter)]), + "/RequiredEffect %s" % " ".join(["%.3f" % 100 for i in range(counter)]), + "", + "/Matrix", ] for idx in sorted(tconmap.keys()): convals = np.zeros((nwaves, 1)) for regidx, reg in enumerate(self.inputs.contrasts[idx][2]): - convals[regs.index(reg)] = self.inputs.contrasts[idx][3][ - regidx] - con_txt.append(' '.join(['%e' % val for val in convals])) - con_txt = '\n'.join(con_txt) + '\n' + convals[regs.index(reg)] = self.inputs.contrasts[idx][3][regidx] + con_txt.append(" ".join(["%e" % val for val in convals])) + con_txt = "\n".join(con_txt) + "\n" # write f-con file - fcon_txt = '' + fcon_txt = "" if nfcons: fcon_txt = [ - '/NumWaves %d' % ntcons, - '/NumContrasts %d' % nfcons, '', '/Matrix' + "/NumWaves %d" % ntcons, + "/NumContrasts %d" % nfcons, + "", + "/Matrix", ] for conidx, con in enumerate(self.inputs.contrasts): - if con[1] == 'F': + if con[1] == "F": convals = np.zeros((ntcons, 1)) for tcon in con[2]: convals[tconmap[self.inputs.contrasts.index(tcon)]] = 1 - fcon_txt.append(' '.join(['%d' % val for val in convals])) - fcon_txt = '\n'.join(fcon_txt) - fcon_txt += '\n' + fcon_txt.append(" ".join(["%d" % val for val in convals])) + fcon_txt = "\n".join(fcon_txt) + fcon_txt += "\n" # write group file - grp_txt = [ - '/NumWaves 1', - '/NumPoints %d' % npoints, '', '/Matrix' - ] + grp_txt = ["/NumWaves 1", "/NumPoints %d" % npoints, "", "/Matrix"] for i in range(npoints): if isdefined(self.inputs.groups): - grp_txt += ['%d' % self.inputs.groups[i]] + grp_txt += ["%d" % self.inputs.groups[i]] else: - grp_txt += ['1'] - grp_txt = '\n'.join(grp_txt) + '\n' + grp_txt += ["1"] + grp_txt = "\n".join(grp_txt) + "\n" txt = { - 'design.mat': mat_txt, - 'design.con': con_txt, - 'design.fts': fcon_txt, - 'design.grp': grp_txt + "design.mat": mat_txt, + "design.con": con_txt, + "design.fts": fcon_txt, + "design.grp": grp_txt, } # write design files for key, val in list(txt.items()): - if ('fts' in key) and (nfcons == 0): + if ("fts" in key) and (nfcons == 0): continue - filename = key.replace('_', '.') - f = open(os.path.join(cwd, filename), 'wt') + filename = key.replace("_", ".") + f = open(os.path.join(cwd, filename), "wt") f.write(val) f.close() @@ -1531,11 +1605,11 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - nfcons = sum([1 for con in self.inputs.contrasts if con[1] == 'F']) + nfcons = sum([1 for con in self.inputs.contrasts if con[1] == "F"]) for field in list(outputs.keys()): - if ('fts' in field) and (nfcons == 0): + if ("fts" in field) and (nfcons == 0): continue - outputs[field] = os.path.join(os.getcwd(), field.replace('_', '.')) + outputs[field] = os.path.join(os.getcwd(), field.replace("_", ".")) return outputs @@ -1546,18 +1620,19 @@ class SMMInputSpec(FSLCommandInputSpec): argstr='--sdf="%s"', mandatory=True, desc="statistics spatial map", - copyfile=False) + copyfile=False, + ) mask = File( exists=True, position=1, argstr='--mask="%s"', mandatory=True, desc="mask file", - copyfile=False) + copyfile=False, + ) no_deactivation_class = traits.Bool( - position=2, - argstr="--zfstatmode", - desc="enforces no deactivation class") + position=2, argstr="--zfstatmode", desc="enforces no deactivation class" + ) class SMMOutputSpec(TraitedSpec): @@ -1567,27 +1642,29 @@ class SMMOutputSpec(TraitedSpec): class SMM(FSLCommand): - ''' + """ Spatial Mixture Modelling. For more detail on the spatial mixture modelling see Mixture Models with Adaptive Spatial Regularisation for Segmentation with an Application to FMRI Data; Woolrich, M., Behrens, T., Beckmann, C., and Smith, S.; IEEE Trans. Medical Imaging, 24(1):1-11, 2005. - ''' - _cmd = 'mm --ld=logdir' + """ + + _cmd = "mm --ld=logdir" input_spec = SMMInputSpec output_spec = SMMOutputSpec def _list_outputs(self): outputs = self._outputs().get() # TODO get the true logdir from the stdout - outputs['null_p_map'] = self._gen_fname( - basename="w1_mean", cwd="logdir") - outputs['activation_p_map'] = self._gen_fname( - basename="w2_mean", cwd="logdir") - if (not isdefined(self.inputs.no_deactivation_class) - or not self.inputs.no_deactivation_class): - outputs['deactivation_p_map'] = self._gen_fname( - basename="w3_mean", cwd="logdir") + outputs["null_p_map"] = self._gen_fname(basename="w1_mean", cwd="logdir") + outputs["activation_p_map"] = self._gen_fname(basename="w2_mean", cwd="logdir") + if ( + not isdefined(self.inputs.no_deactivation_class) + or not self.inputs.no_deactivation_class + ): + outputs["deactivation_p_map"] = self._gen_fname( + basename="w3_mean", cwd="logdir" + ) return outputs @@ -1598,128 +1675,144 @@ class MELODICInputSpec(FSLCommandInputSpec): mandatory=True, position=0, desc="input file names (either single file name or a list)", - sep=",") - out_dir = Directory( - argstr="-o %s", desc="output directory name", genfile=True) - mask = File( - exists=True, argstr="-m %s", desc="file name of mask for thresholding") + sep=",", + ) + out_dir = Directory(argstr="-o %s", desc="output directory name", genfile=True) + mask = File(exists=True, argstr="-m %s", desc="file name of mask for thresholding") no_mask = traits.Bool(argstr="--nomask", desc="switch off masking") - update_mask = traits.Bool( - argstr="--update_mask", desc="switch off mask updating") + update_mask = traits.Bool(argstr="--update_mask", desc="switch off mask updating") no_bet = traits.Bool(argstr="--nobet", desc="switch off BET") bg_threshold = traits.Float( argstr="--bgthreshold=%f", - desc=("brain/non-brain threshold used to mask non-brain voxels, as a " - "percentage (only if --nobet selected)")) + desc=( + "brain/non-brain threshold used to mask non-brain voxels, as a " + "percentage (only if --nobet selected)" + ), + ) dim = traits.Int( argstr="-d %d", - desc=("dimensionality reduction into #num dimensions (default: " - "automatic estimation)")) + desc=( + "dimensionality reduction into #num dimensions (default: " + "automatic estimation)" + ), + ) dim_est = traits.Str( argstr="--dimest=%s", - desc=("use specific dim. estimation technique: lap, " - "bic, mdl, aic, mean (default: lap)")) - sep_whiten = traits.Bool( - argstr="--sep_whiten", desc="switch on separate whitening") + desc=( + "use specific dim. estimation technique: lap, " + "bic, mdl, aic, mean (default: lap)" + ), + ) + sep_whiten = traits.Bool(argstr="--sep_whiten", desc="switch on separate whitening") sep_vn = traits.Bool( - argstr="--sep_vn", desc="switch off joined variance normalization") + argstr="--sep_vn", desc="switch off joined variance normalization" + ) migp = traits.Bool(argstr="--migp", desc="switch on MIGP data reduction") - migpN = traits.Int( - argstr="--migpN %d", desc="number of internal Eigenmaps") + migpN = traits.Int(argstr="--migpN %d", desc="number of internal Eigenmaps") migp_shuffle = traits.Bool( - argstr="--migp_shuffle", - desc="randomise MIGP file order (default: TRUE)") + argstr="--migp_shuffle", desc="randomise MIGP file order (default: TRUE)" + ) migp_factor = traits.Int( argstr="--migp_factor %d", - desc= - "Internal Factor of mem-threshold relative to number of Eigenmaps (default: 2)" + desc="Internal Factor of mem-threshold relative to number of Eigenmaps (default: 2)", ) num_ICs = traits.Int( - argstr="-n %d", - desc="number of IC's to extract (for deflation approach)") + argstr="-n %d", desc="number of IC's to extract (for deflation approach)" + ) approach = traits.Str( argstr="-a %s", desc="approach for decomposition, 2D: defl, symm (default), 3D: tica " - "(default), concat") + "(default), concat", + ) non_linearity = traits.Str( - argstr="--nl=%s", desc="nonlinearity: gauss, tanh, pow3, pow4") - var_norm = traits.Bool( - argstr="--vn", desc="switch off variance normalization") + argstr="--nl=%s", desc="nonlinearity: gauss, tanh, pow3, pow4" + ) + var_norm = traits.Bool(argstr="--vn", desc="switch off variance normalization") pbsc = traits.Bool( - argstr="--pbsc", - desc="switch off conversion to percent BOLD signal change") + argstr="--pbsc", desc="switch off conversion to percent BOLD signal change" + ) cov_weight = traits.Float( argstr="--covarweight=%f", - desc=("voxel-wise weights for the covariance matrix (e.g. " - "segmentation information)")) + desc=( + "voxel-wise weights for the covariance matrix (e.g. " + "segmentation information)" + ), + ) epsilon = traits.Float(argstr="--eps=%f", desc="minimum error change") epsilonS = traits.Float( - argstr="--epsS=%f", - desc="minimum error change for rank-1 approximation in TICA") + argstr="--epsS=%f", desc="minimum error change for rank-1 approximation in TICA" + ) maxit = traits.Int( - argstr="--maxit=%d", - desc="maximum number of iterations before restart") + argstr="--maxit=%d", desc="maximum number of iterations before restart" + ) max_restart = traits.Int( - argstr="--maxrestart=%d", desc="maximum number of restarts") + argstr="--maxrestart=%d", desc="maximum number of restarts" + ) mm_thresh = traits.Float( - argstr="--mmthresh=%f", - desc="threshold for Mixture Model based inference") + argstr="--mmthresh=%f", desc="threshold for Mixture Model based inference" + ) no_mm = traits.Bool( - argstr="--no_mm", desc="switch off mixture modelling on IC maps") + argstr="--no_mm", desc="switch off mixture modelling on IC maps" + ) ICs = File( exists=True, argstr="--ICs=%s", - desc="filename of the IC components file for mixture modelling") + desc="filename of the IC components file for mixture modelling", + ) mix = File( exists=True, argstr="--mix=%s", - desc="mixing matrix for mixture modelling / filtering") + desc="mixing matrix for mixture modelling / filtering", + ) smode = File( exists=True, argstr="--smode=%s", - desc="matrix of session modes for report generation") + desc="matrix of session modes for report generation", + ) rem_cmp = traits.List( - traits.Int, argstr="-f %d", desc="component numbers to remove") + traits.Int, argstr="-f %d", desc="component numbers to remove" + ) report = traits.Bool(argstr="--report", desc="generate Melodic web report") bg_image = File( exists=True, argstr="--bgimage=%s", - desc="specify background image for report (default: mean image)") + desc="specify background image for report (default: mean image)", + ) tr_sec = traits.Float(argstr="--tr=%f", desc="TR in seconds") log_power = traits.Bool( - argstr="--logPower", - desc="calculate log of power for frequency spectrum") + argstr="--logPower", desc="calculate log of power for frequency spectrum" + ) t_des = File( - exists=True, - argstr="--Tdes=%s", - desc="design matrix across time-domain") + exists=True, argstr="--Tdes=%s", desc="design matrix across time-domain" + ) t_con = File( - exists=True, - argstr="--Tcon=%s", - desc="t-contrast matrix across time-domain") + exists=True, argstr="--Tcon=%s", desc="t-contrast matrix across time-domain" + ) s_des = File( - exists=True, - argstr="--Sdes=%s", - desc="design matrix across subject-domain") + exists=True, argstr="--Sdes=%s", desc="design matrix across subject-domain" + ) s_con = File( - exists=True, - argstr="--Scon=%s", - desc="t-contrast matrix across subject-domain") + exists=True, argstr="--Scon=%s", desc="t-contrast matrix across subject-domain" + ) out_all = traits.Bool(argstr="--Oall", desc="output everything") out_unmix = traits.Bool(argstr="--Ounmix", desc="output unmixing matrix") out_stats = traits.Bool( - argstr="--Ostats", desc="output thresholded maps and probability maps") + argstr="--Ostats", desc="output thresholded maps and probability maps" + ) out_pca = traits.Bool(argstr="--Opca", desc="output PCA results") out_white = traits.Bool( - argstr="--Owhite", desc="output whitening/dewhitening matrices") + argstr="--Owhite", desc="output whitening/dewhitening matrices" + ) out_orig = traits.Bool(argstr="--Oorig", desc="output the original ICs") out_mean = traits.Bool(argstr="--Omean", desc="output mean volume") report_maps = traits.Str( argstr="--report_maps=%s", - desc="control string for spatial map images (see slicer)") + desc="control string for spatial map images (see slicer)", + ) remove_deriv = traits.Bool( argstr="--remove_deriv", - desc="removes every second entry in paradigm file (EV derivatives)") + desc="removes every second entry in paradigm file (EV derivatives)", + ) class MELODICOutputSpec(TraitedSpec): @@ -1753,18 +1846,19 @@ class MELODIC(FSLCommand): """ + input_spec = MELODICInputSpec output_spec = MELODICOutputSpec - _cmd = 'melodic' + _cmd = "melodic" def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.out_dir): - outputs['out_dir'] = os.path.abspath(self.inputs.out_dir) + outputs["out_dir"] = os.path.abspath(self.inputs.out_dir) else: - outputs['out_dir'] = self._gen_filename("out_dir") + outputs["out_dir"] = self._gen_filename("out_dir") if isdefined(self.inputs.report) and self.inputs.report: - outputs['report_dir'] = os.path.join(outputs['out_dir'], "report") + outputs["report_dir"] = os.path.join(outputs["out_dir"], "report") return outputs def _gen_filename(self, name): @@ -1774,28 +1868,26 @@ def _gen_filename(self, name): class SmoothEstimateInputSpec(FSLCommandInputSpec): dof = traits.Int( - argstr='--dof=%d', + argstr="--dof=%d", mandatory=True, - xor=['zstat_file'], - desc='number of degrees of freedom') + xor=["zstat_file"], + desc="number of degrees of freedom", + ) mask_file = File( - argstr='--mask=%s', - exists=True, - mandatory=True, - desc='brain mask volume') + argstr="--mask=%s", exists=True, mandatory=True, desc="brain mask volume" + ) residual_fit_file = File( - argstr='--res=%s', - exists=True, - requires=['dof'], - desc='residual-fit image file') + argstr="--res=%s", exists=True, requires=["dof"], desc="residual-fit image file" + ) zstat_file = File( - argstr='--zstat=%s', exists=True, xor=['dof'], desc='zstat image file') + argstr="--zstat=%s", exists=True, xor=["dof"], desc="zstat image file" + ) class SmoothEstimateOutputSpec(TraitedSpec): - dlh = traits.Float(desc='smoothness estimate sqrt(det(Lambda))') - volume = traits.Int(desc='number of voxels in mask') - resels = traits.Float(desc='number of resels') + dlh = traits.Float(desc="smoothness estimate sqrt(det(Lambda))") + volume = traits.Int(desc="number of voxels in mask") + resels = traits.Float(desc="number of resels") class SmoothEstimate(FSLCommand): @@ -1814,11 +1906,11 @@ class SmoothEstimate(FSLCommand): input_spec = SmoothEstimateInputSpec output_spec = SmoothEstimateOutputSpec - _cmd = 'smoothest' + _cmd = "smoothest" def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() - stdout = runtime.stdout.split('\n') + stdout = runtime.stdout.split("\n") outputs.dlh = float(stdout[0].split()[1]) outputs.volume = int(stdout[1].split()[1]) outputs.resels = float(stdout[2].split()[1]) @@ -1826,121 +1918,130 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): class ClusterInputSpec(FSLCommandInputSpec): - in_file = File( - argstr='--in=%s', mandatory=True, exists=True, desc='input volume') + in_file = File(argstr="--in=%s", mandatory=True, exists=True, desc="input volume") threshold = traits.Float( - argstr='--thresh=%.10f', - mandatory=True, - desc='threshold for input volume') + argstr="--thresh=%.10f", mandatory=True, desc="threshold for input volume" + ) out_index_file = traits.Either( traits.Bool, File, - argstr='--oindex=%s', - desc='output of cluster index (in size order)', - hash_files=False) + argstr="--oindex=%s", + desc="output of cluster index (in size order)", + hash_files=False, + ) out_threshold_file = traits.Either( traits.Bool, File, - argstr='--othresh=%s', - desc='thresholded image', - hash_files=False) + argstr="--othresh=%s", + desc="thresholded image", + hash_files=False, + ) out_localmax_txt_file = traits.Either( traits.Bool, File, - argstr='--olmax=%s', - desc='local maxima text file', - hash_files=False) + argstr="--olmax=%s", + desc="local maxima text file", + hash_files=False, + ) out_localmax_vol_file = traits.Either( traits.Bool, File, - argstr='--olmaxim=%s', - desc='output of local maxima volume', - hash_files=False) + argstr="--olmaxim=%s", + desc="output of local maxima volume", + hash_files=False, + ) out_size_file = traits.Either( traits.Bool, File, - argstr='--osize=%s', - desc='filename for output of size image', - hash_files=False) + argstr="--osize=%s", + desc="filename for output of size image", + hash_files=False, + ) out_max_file = traits.Either( traits.Bool, File, - argstr='--omax=%s', - desc='filename for output of max image', - hash_files=False) + argstr="--omax=%s", + desc="filename for output of max image", + hash_files=False, + ) out_mean_file = traits.Either( traits.Bool, File, - argstr='--omean=%s', - desc='filename for output of mean image', - hash_files=False) + argstr="--omean=%s", + desc="filename for output of mean image", + hash_files=False, + ) out_pval_file = traits.Either( traits.Bool, File, - argstr='--opvals=%s', - desc='filename for image output of log pvals', - hash_files=False) + argstr="--opvals=%s", + desc="filename for image output of log pvals", + hash_files=False, + ) pthreshold = traits.Float( - argstr='--pthresh=%.10f', - requires=['dlh', 'volume'], - desc='p-threshold for clusters') + argstr="--pthresh=%.10f", + requires=["dlh", "volume"], + desc="p-threshold for clusters", + ) peak_distance = traits.Float( - argstr='--peakdist=%.10f', - desc='minimum distance between local maxima/minima, in mm (default 0)') - cope_file = File(argstr='--cope=%s', desc='cope volume') - volume = traits.Int( - argstr='--volume=%d', desc='number of voxels in the mask') + argstr="--peakdist=%.10f", + desc="minimum distance between local maxima/minima, in mm (default 0)", + ) + cope_file = File(argstr="--cope=%s", desc="cope volume") + volume = traits.Int(argstr="--volume=%d", desc="number of voxels in the mask") dlh = traits.Float( - argstr='--dlh=%.10f', desc='smoothness estimate = sqrt(det(Lambda))') + argstr="--dlh=%.10f", desc="smoothness estimate = sqrt(det(Lambda))" + ) fractional = traits.Bool( False, usedefault=True, - argstr='--fractional', - desc='interprets the threshold as a fraction of the robust range') + argstr="--fractional", + desc="interprets the threshold as a fraction of the robust range", + ) connectivity = traits.Int( - argstr='--connectivity=%d', - desc='the connectivity of voxels (default 26)') + argstr="--connectivity=%d", desc="the connectivity of voxels (default 26)" + ) use_mm = traits.Bool( - False, - usedefault=True, - argstr='--mm', - desc='use mm, not voxel, coordinates') + False, usedefault=True, argstr="--mm", desc="use mm, not voxel, coordinates" + ) find_min = traits.Bool( - False, - usedefault=True, - argstr='--min', - desc='find minima instead of maxima') + False, usedefault=True, argstr="--min", desc="find minima instead of maxima" + ) no_table = traits.Bool( False, usedefault=True, - argstr='--no_table', - desc='suppresses printing of the table info') + argstr="--no_table", + desc="suppresses printing of the table info", + ) minclustersize = traits.Bool( False, usedefault=True, - argstr='--minclustersize', - desc='prints out minimum significant cluster size') + argstr="--minclustersize", + desc="prints out minimum significant cluster size", + ) xfm_file = File( - argstr='--xfm=%s', - desc=('filename for Linear: input->standard-space ' - 'transform. Non-linear: input->highres transform')) + argstr="--xfm=%s", + desc=( + "filename for Linear: input->standard-space " + "transform. Non-linear: input->highres transform" + ), + ) std_space_file = File( - argstr='--stdvol=%s', desc='filename for standard-space volume') - num_maxima = traits.Int( - argstr='--num=%d', desc='no of local maxima to report') - warpfield_file = File( - argstr='--warpvol=%s', desc='file contining warpfield') + argstr="--stdvol=%s", desc="filename for standard-space volume" + ) + num_maxima = traits.Int(argstr="--num=%d", desc="no of local maxima to report") + warpfield_file = File(argstr="--warpvol=%s", desc="file contining warpfield") class ClusterOutputSpec(TraitedSpec): - index_file = File(desc='output of cluster index (in size order)') - threshold_file = File(desc='thresholded image') - localmax_txt_file = File(desc='local maxima text file') - localmax_vol_file = File(desc='output of local maxima volume') - size_file = File(desc='filename for output of size image') - max_file = File(desc='filename for output of max image') - mean_file = File(desc='filename for output of mean image') - pval_file = File(desc='filename for image output of log pvals') + index_file = File(desc="output of cluster index (in size order)") + threshold_file = File(desc="thresholded image") + localmax_txt_file = File(desc="local maxima text file") + localmax_vol_file = File(desc="output of local maxima volume") + size_file = File(desc="filename for output of size image") + max_file = File(desc="filename for output of max image") + mean_file = File(desc="filename for output of mean image") + pval_file = File(desc="filename for image output of log pvals") class Cluster(FSLCommand): @@ -1958,19 +2059,20 @@ class Cluster(FSLCommand): 'cluster --in=zstat1.nii.gz --olmax=stats.txt --thresh=2.3000000000 --mm' """ + input_spec = ClusterInputSpec output_spec = ClusterOutputSpec - _cmd = 'cluster' + _cmd = "cluster" filemap = { - 'out_index_file': 'index', - 'out_threshold_file': 'threshold', - 'out_localmax_txt_file': 'localmax.txt', - 'out_localmax_vol_file': 'localmax', - 'out_size_file': 'size', - 'out_max_file': 'max', - 'out_mean_file': 'mean', - 'out_pval_file': 'pval' + "out_index_file": "index", + "out_threshold_file": "threshold", + "out_localmax_txt_file": "localmax.txt", + "out_localmax_vol_file": "localmax", + "out_size_file": "size", + "out_max_file": "max", + "out_mean_file": "mean", + "out_pval_file": "pval", } def _list_outputs(self): @@ -1982,12 +2084,13 @@ def _list_outputs(self): if isinstance(inval, bool): if inval: change_ext = True - if suffix.endswith('.txt'): + if suffix.endswith(".txt"): change_ext = False outputs[outkey] = self._gen_fname( self.inputs.in_file, - suffix='_' + suffix, - change_ext=change_ext) + suffix="_" + suffix, + change_ext=change_ext, + ) else: outputs[outkey] = os.path.abspath(inval) return outputs @@ -2017,44 +2120,48 @@ class DualRegressionInputSpec(FSLCommandInputSpec): mandatory=True, position=1, desc="4D image containing spatial IC maps (melodic_IC) from the " - "whole-group ICA analysis") + "whole-group ICA analysis", + ) des_norm = traits.Bool( True, argstr="%i", position=2, usedefault=True, desc="Whether to variance-normalise the timecourses used as the " - "stage-2 regressors; True is default and recommended") + "stage-2 regressors; True is default and recommended", + ) one_sample_group_mean = traits.Bool( argstr="-1", position=3, - desc="perform 1-sample group-mean test instead of generic " - "permutation test") + desc="perform 1-sample group-mean test instead of generic " "permutation test", + ) design_file = File( exists=True, argstr="%s", position=3, - desc="Design matrix for final cross-subject modelling with " - "randomise") + desc="Design matrix for final cross-subject modelling with " "randomise", + ) con_file = File( exists=True, argstr="%s", position=4, - desc="Design contrasts for final cross-subject modelling with " - "randomise") + desc="Design contrasts for final cross-subject modelling with " "randomise", + ) n_perm = traits.Int( argstr="%i", mandatory=True, position=5, desc="Number of permutations for randomise; set to 1 for just raw " - "tstat output, set to 0 to not run randomise at all.") + "tstat output, set to 0 to not run randomise at all.", + ) out_dir = Directory( "output", argstr="%s", usedefault=True, position=6, desc="This directory will be created to hold all output and logfiles", - genfile=True) + genfile=True, + ) class DualRegressionOutputSpec(TraitedSpec): @@ -2079,16 +2186,17 @@ class DualRegression(FSLCommand): >>> dual_regression.run() # doctest: +SKIP """ + input_spec = DualRegressionInputSpec output_spec = DualRegressionOutputSpec - _cmd = 'dual_regression' + _cmd = "dual_regression" def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.out_dir): - outputs['out_dir'] = os.path.abspath(self.inputs.out_dir) + outputs["out_dir"] = os.path.abspath(self.inputs.out_dir) else: - outputs['out_dir'] = self._gen_filename("out_dir") + outputs["out_dir"] = self._gen_filename("out_dir") return outputs def _gen_filename(self, name): @@ -2098,92 +2206,103 @@ def _gen_filename(self, name): class RandomiseInputSpec(FSLCommandInputSpec): in_file = File( - exists=True, - desc='4D input file', - argstr='-i %s', - position=0, - mandatory=True) + exists=True, desc="4D input file", argstr="-i %s", position=0, mandatory=True + ) base_name = traits.Str( - 'randomise', - desc='the rootname that all generated files will have', + "randomise", + desc="the rootname that all generated files will have", argstr='-o "%s"', position=1, - usedefault=True) + usedefault=True, + ) design_mat = File( - exists=True, desc='design matrix file', argstr='-d %s', position=2) - tcon = File( - exists=True, desc='t contrasts file', argstr='-t %s', position=3) - fcon = File(exists=True, desc='f contrasts file', argstr='-f %s') - mask = File(exists=True, desc='mask image', argstr='-m %s') + exists=True, desc="design matrix file", argstr="-d %s", position=2 + ) + tcon = File(exists=True, desc="t contrasts file", argstr="-t %s", position=3) + fcon = File(exists=True, desc="f contrasts file", argstr="-f %s") + mask = File(exists=True, desc="mask image", argstr="-m %s") x_block_labels = File( - exists=True, desc='exchangeability block labels file', argstr='-e %s') + exists=True, desc="exchangeability block labels file", argstr="-e %s" + ) demean = traits.Bool( - desc='demean data temporally before model fitting', argstr='-D') + desc="demean data temporally before model fitting", argstr="-D" + ) one_sample_group_mean = traits.Bool( - desc=('perform 1-sample group-mean test instead of generic ' - 'permutation test'), - argstr='-1') + desc=( + "perform 1-sample group-mean test instead of generic " "permutation test" + ), + argstr="-1", + ) show_total_perms = traits.Bool( - desc=('print out how many unique permutations would be generated ' - 'and exit'), - argstr='-q') + desc=("print out how many unique permutations would be generated " "and exit"), + argstr="-q", + ) show_info_parallel_mode = traits.Bool( - desc='print out information required for parallel mode and exit', - argstr='-Q') + desc="print out information required for parallel mode and exit", argstr="-Q" + ) vox_p_values = traits.Bool( - desc='output voxelwise (corrected and uncorrected) p-value images', - argstr='-x') - tfce = traits.Bool( - desc='carry out Threshold-Free Cluster Enhancement', argstr='-T') + desc="output voxelwise (corrected and uncorrected) p-value images", argstr="-x" + ) + tfce = traits.Bool(desc="carry out Threshold-Free Cluster Enhancement", argstr="-T") tfce2D = traits.Bool( - desc=('carry out Threshold-Free Cluster Enhancement with 2D ' - 'optimisation'), - argstr='--T2') - f_only = traits.Bool(desc='calculate f-statistics only', argstr='--f_only') + desc=("carry out Threshold-Free Cluster Enhancement with 2D " "optimisation"), + argstr="--T2", + ) + f_only = traits.Bool(desc="calculate f-statistics only", argstr="--f_only") raw_stats_imgs = traits.Bool( - desc='output raw ( unpermuted ) statistic images', argstr='-R') + desc="output raw ( unpermuted ) statistic images", argstr="-R" + ) p_vec_n_dist_files = traits.Bool( - desc='output permutation vector and null distribution text files', - argstr='-P') + desc="output permutation vector and null distribution text files", argstr="-P" + ) num_perm = traits.Int( - argstr='-n %d', - desc='number of permutations (default 5000, set to 0 for exhaustive)') + argstr="-n %d", + desc="number of permutations (default 5000, set to 0 for exhaustive)", + ) seed = traits.Int( - argstr='--seed=%d', - desc='specific integer seed for random number generator') + argstr="--seed=%d", desc="specific integer seed for random number generator" + ) var_smooth = traits.Int( - argstr='-v %d', desc='use variance smoothing (std is in mm)') + argstr="-v %d", desc="use variance smoothing (std is in mm)" + ) c_thresh = traits.Float( - argstr='-c %.1f', desc='carry out cluster-based thresholding') + argstr="-c %.1f", desc="carry out cluster-based thresholding" + ) cm_thresh = traits.Float( - argstr='-C %.1f', desc='carry out cluster-mass-based thresholding') - f_c_thresh = traits.Float( - argstr='-F %.2f', desc='carry out f cluster thresholding') + argstr="-C %.1f", desc="carry out cluster-mass-based thresholding" + ) + f_c_thresh = traits.Float(argstr="-F %.2f", desc="carry out f cluster thresholding") f_cm_thresh = traits.Float( - argstr='-S %.2f', desc='carry out f cluster-mass thresholding') + argstr="-S %.2f", desc="carry out f cluster-mass thresholding" + ) tfce_H = traits.Float( - argstr='--tfce_H=%.2f', desc='TFCE height parameter (default=2)') + argstr="--tfce_H=%.2f", desc="TFCE height parameter (default=2)" + ) tfce_E = traits.Float( - argstr='--tfce_E=%.2f', desc='TFCE extent parameter (default=0.5)') + argstr="--tfce_E=%.2f", desc="TFCE extent parameter (default=0.5)" + ) tfce_C = traits.Float( - argstr='--tfce_C=%.2f', desc='TFCE connectivity (6 or 26; default=6)') + argstr="--tfce_C=%.2f", desc="TFCE connectivity (6 or 26; default=6)" + ) class RandomiseOutputSpec(TraitedSpec): - tstat_files = traits.List( - File(exists=True), desc='t contrast raw statistic') - fstat_files = traits.List( - File(exists=True), desc='f contrast raw statistic') + tstat_files = traits.List(File(exists=True), desc="t contrast raw statistic") + fstat_files = traits.List(File(exists=True), desc="f contrast raw statistic") t_p_files = traits.List( - File(exists=True), desc='f contrast uncorrected p values files') + File(exists=True), desc="f contrast uncorrected p values files" + ) f_p_files = traits.List( - File(exists=True), desc='f contrast uncorrected p values files') + File(exists=True), desc="f contrast uncorrected p values files" + ) t_corrected_p_files = traits.List( File(exists=True), - desc='t contrast FWE (Family-wise error) corrected p values files') + desc="t contrast FWE (Family-wise error) corrected p values files", + ) f_corrected_p_files = traits.List( File(exists=True), - desc='f contrast FWE (Family-wise error) corrected p values files') + desc="f contrast FWE (Family-wise error) corrected p values files", + ) class Randomise(FSLCommand): @@ -2200,153 +2319,180 @@ class Randomise(FSLCommand): """ - _cmd = 'randomise' + _cmd = "randomise" input_spec = RandomiseInputSpec output_spec = RandomiseOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['tstat_files'] = glob( - self._gen_fname('%s_tstat*.nii' % self.inputs.base_name)) - outputs['fstat_files'] = glob( - self._gen_fname('%s_fstat*.nii' % self.inputs.base_name)) + outputs["tstat_files"] = glob( + self._gen_fname("%s_tstat*.nii" % self.inputs.base_name) + ) + outputs["fstat_files"] = glob( + self._gen_fname("%s_fstat*.nii" % self.inputs.base_name) + ) prefix = False if self.inputs.tfce or self.inputs.tfce2D: - prefix = 'tfce' + prefix = "tfce" elif self.inputs.vox_p_values: - prefix = 'vox' + prefix = "vox" elif self.inputs.c_thresh or self.inputs.f_c_thresh: - prefix = 'clustere' + prefix = "clustere" elif self.inputs.cm_thresh or self.inputs.f_cm_thresh: - prefix = 'clusterm' + prefix = "clusterm" if prefix: - outputs['t_p_files'] = glob( - self._gen_fname('%s_%s_p_tstat*' % (self.inputs.base_name, - prefix))) - outputs['t_corrected_p_files'] = glob( - self._gen_fname('%s_%s_corrp_tstat*.nii' % - (self.inputs.base_name, prefix))) - - outputs['f_p_files'] = glob( - self._gen_fname('%s_%s_p_fstat*.nii' % (self.inputs.base_name, - prefix))) - outputs['f_corrected_p_files'] = glob( - self._gen_fname('%s_%s_corrp_fstat*.nii' % - (self.inputs.base_name, prefix))) + outputs["t_p_files"] = glob( + self._gen_fname("%s_%s_p_tstat*" % (self.inputs.base_name, prefix)) + ) + outputs["t_corrected_p_files"] = glob( + self._gen_fname( + "%s_%s_corrp_tstat*.nii" % (self.inputs.base_name, prefix) + ) + ) + + outputs["f_p_files"] = glob( + self._gen_fname("%s_%s_p_fstat*.nii" % (self.inputs.base_name, prefix)) + ) + outputs["f_corrected_p_files"] = glob( + self._gen_fname( + "%s_%s_corrp_fstat*.nii" % (self.inputs.base_name, prefix) + ) + ) return outputs class GLMInputSpec(FSLCommandInputSpec): in_file = File( exists=True, - argstr='-i %s', + argstr="-i %s", mandatory=True, position=1, - desc='input file name (text matrix or 3D/4D image file)') + desc="input file name (text matrix or 3D/4D image file)", + ) out_file = File( name_template="%s_glm", - argstr='-o %s', + argstr="-o %s", position=3, - desc=('filename for GLM parameter estimates' + ' (GLM betas)'), + desc=("filename for GLM parameter estimates" + " (GLM betas)"), name_source="in_file", - keep_extension=True) + keep_extension=True, + ) design = File( exists=True, - argstr='-d %s', + argstr="-d %s", mandatory=True, position=2, - desc=('file name of the GLM design matrix (text time' + - ' courses for temporal regression or an image' + - ' file for spatial regression)')) + desc=( + "file name of the GLM design matrix (text time" + + " courses for temporal regression or an image" + + " file for spatial regression)" + ), + ) contrasts = File( - exists=True, argstr='-c %s', desc=('matrix of t-statics contrasts')) + exists=True, argstr="-c %s", desc=("matrix of t-statics contrasts") + ) mask = File( - exists=True, - argstr='-m %s', - desc=('mask image file name if input is image')) - dof = traits.Int( - argstr='--dof=%d', desc=('set degrees of freedom' + ' explicitly')) + exists=True, argstr="-m %s", desc=("mask image file name if input is image") + ) + dof = traits.Int(argstr="--dof=%d", desc=("set degrees of freedom" + " explicitly")) des_norm = traits.Bool( - argstr='--des_norm', - desc=('switch on normalization of the design' + - ' matrix columns to unit std deviation')) + argstr="--des_norm", + desc=( + "switch on normalization of the design" + + " matrix columns to unit std deviation" + ), + ) dat_norm = traits.Bool( - argstr='--dat_norm', - desc=('switch on normalization of the data time series to unit std ' - 'deviation')) + argstr="--dat_norm", + desc=( + "switch on normalization of the data time series to unit std " "deviation" + ), + ) var_norm = traits.Bool( - argstr='--vn', desc=('perform MELODIC variance-normalisation on data')) + argstr="--vn", desc=("perform MELODIC variance-normalisation on data") + ) demean = traits.Bool( - argstr='--demean', desc=('switch on demeaining of design and data')) + argstr="--demean", desc=("switch on demeaining of design and data") + ) out_cope = File( - argstr='--out_cope=%s', - desc='output file name for COPE (either as txt or image') + argstr="--out_cope=%s", desc="output file name for COPE (either as txt or image" + ) out_z_name = File( - argstr='--out_z=%s', - desc='output file name for Z-stats (either as txt or image') + argstr="--out_z=%s", desc="output file name for Z-stats (either as txt or image" + ) out_t_name = File( - argstr='--out_t=%s', - desc='output file name for t-stats (either as txt or image') + argstr="--out_t=%s", desc="output file name for t-stats (either as txt or image" + ) out_p_name = File( - argstr='--out_p=%s', - desc=('output file name for p-values of Z-stats (either as text file ' - 'or image)')) + argstr="--out_p=%s", + desc=( + "output file name for p-values of Z-stats (either as text file " "or image)" + ), + ) out_f_name = File( - argstr='--out_f=%s', - desc='output file name for F-value of full model fit') + argstr="--out_f=%s", desc="output file name for F-value of full model fit" + ) out_pf_name = File( - argstr='--out_pf=%s', - desc='output file name for p-value for full model fit') - out_res_name = File( - argstr='--out_res=%s', desc='output file name for residuals') + argstr="--out_pf=%s", desc="output file name for p-value for full model fit" + ) + out_res_name = File(argstr="--out_res=%s", desc="output file name for residuals") out_varcb_name = File( - argstr='--out_varcb=%s', desc='output file name for variance of COPEs') + argstr="--out_varcb=%s", desc="output file name for variance of COPEs" + ) out_sigsq_name = File( - argstr='--out_sigsq=%s', - desc=('output file name for residual noise variance sigma-square')) + argstr="--out_sigsq=%s", + desc=("output file name for residual noise variance sigma-square"), + ) out_data_name = File( - argstr='--out_data=%s', desc='output file name for pre-processed data') + argstr="--out_data=%s", desc="output file name for pre-processed data" + ) out_vnscales_name = File( - argstr='--out_vnscales=%s', - desc=('output file name for scaling factors for variance ' - 'normalisation')) + argstr="--out_vnscales=%s", + desc=("output file name for scaling factors for variance " "normalisation"), + ) class GLMOutputSpec(TraitedSpec): - out_file = File( - exists=True, desc=('file name of GLM parameters (if generated)')) + out_file = File(exists=True, desc=("file name of GLM parameters (if generated)")) out_cope = OutputMultiPath( File(exists=True), - desc=('output file name for COPEs (either as text file or image)')) + desc=("output file name for COPEs (either as text file or image)"), + ) out_z = OutputMultiPath( File(exists=True), - desc=('output file name for COPEs (either as text file or image)')) + desc=("output file name for COPEs (either as text file or image)"), + ) out_t = OutputMultiPath( File(exists=True), - desc=('output file name for t-stats (either as text file or image)')) + desc=("output file name for t-stats (either as text file or image)"), + ) out_p = OutputMultiPath( File(exists=True), - desc=('output file name for p-values of Z-stats (either as text file ' - 'or image)')) + desc=( + "output file name for p-values of Z-stats (either as text file " "or image)" + ), + ) out_f = OutputMultiPath( - File(exists=True), - desc=('output file name for F-value of full model fit')) + File(exists=True), desc=("output file name for F-value of full model fit") + ) out_pf = OutputMultiPath( - File(exists=True), - desc=('output file name for p-value for full model fit')) - out_res = OutputMultiPath( - File(exists=True), desc='output file name for residuals') + File(exists=True), desc=("output file name for p-value for full model fit") + ) + out_res = OutputMultiPath(File(exists=True), desc="output file name for residuals") out_varcb = OutputMultiPath( - File(exists=True), desc='output file name for variance of COPEs') + File(exists=True), desc="output file name for variance of COPEs" + ) out_sigsq = OutputMultiPath( File(exists=True), - desc=('output file name for residual noise variance sigma-square')) + desc=("output file name for residual noise variance sigma-square"), + ) out_data = OutputMultiPath( - File(exists=True), desc='output file for preprocessed data') + File(exists=True), desc="output file for preprocessed data" + ) out_vnscales = OutputMultiPath( File(exists=True), - desc=('output file name for scaling factors for variance ' - 'normalisation')) + desc=("output file name for scaling factors for variance " "normalisation"), + ) class GLM(FSLCommand): @@ -2361,7 +2507,8 @@ class GLM(FSLCommand): 'fsl_glm -i functional.nii -d maps.nii -o functional_glm.nii' """ - _cmd = 'fsl_glm' + + _cmd = "fsl_glm" input_spec = GLMInputSpec output_spec = GLMOutputSpec @@ -2369,38 +2516,37 @@ def _list_outputs(self): outputs = super(GLM, self)._list_outputs() if isdefined(self.inputs.out_cope): - outputs['out_cope'] = os.path.abspath(self.inputs.out_cope) + outputs["out_cope"] = os.path.abspath(self.inputs.out_cope) if isdefined(self.inputs.out_z_name): - outputs['out_z'] = os.path.abspath(self.inputs.out_z_name) + outputs["out_z"] = os.path.abspath(self.inputs.out_z_name) if isdefined(self.inputs.out_t_name): - outputs['out_t'] = os.path.abspath(self.inputs.out_t_name) + outputs["out_t"] = os.path.abspath(self.inputs.out_t_name) if isdefined(self.inputs.out_p_name): - outputs['out_p'] = os.path.abspath(self.inputs.out_p_name) + outputs["out_p"] = os.path.abspath(self.inputs.out_p_name) if isdefined(self.inputs.out_f_name): - outputs['out_f'] = os.path.abspath(self.inputs.out_f_name) + outputs["out_f"] = os.path.abspath(self.inputs.out_f_name) if isdefined(self.inputs.out_pf_name): - outputs['out_pf'] = os.path.abspath(self.inputs.out_pf_name) + outputs["out_pf"] = os.path.abspath(self.inputs.out_pf_name) if isdefined(self.inputs.out_res_name): - outputs['out_res'] = os.path.abspath(self.inputs.out_res_name) + outputs["out_res"] = os.path.abspath(self.inputs.out_res_name) if isdefined(self.inputs.out_varcb_name): - outputs['out_varcb'] = os.path.abspath(self.inputs.out_varcb_name) + outputs["out_varcb"] = os.path.abspath(self.inputs.out_varcb_name) if isdefined(self.inputs.out_sigsq_name): - outputs['out_sigsq'] = os.path.abspath(self.inputs.out_sigsq_name) + outputs["out_sigsq"] = os.path.abspath(self.inputs.out_sigsq_name) if isdefined(self.inputs.out_data_name): - outputs['out_data'] = os.path.abspath(self.inputs.out_data_name) + outputs["out_data"] = os.path.abspath(self.inputs.out_data_name) if isdefined(self.inputs.out_vnscales_name): - outputs['out_vnscales'] = os.path.abspath( - self.inputs.out_vnscales_name) + outputs["out_vnscales"] = os.path.abspath(self.inputs.out_vnscales_name) return outputs @@ -2419,9 +2565,10 @@ def load_template(name): """ from pkg_resources import resource_filename as pkgrf - full_fname = pkgrf('nipype', - os.path.join('interfaces', 'fsl', 'model_templates', - name)) + + full_fname = pkgrf( + "nipype", os.path.join("interfaces", "fsl", "model_templates", name) + ) with open(full_fname) as template_file: template = Template(template_file.read()) diff --git a/nipype/interfaces/fsl/possum.py b/nipype/interfaces/fsl/possum.py index 50b88db185..88797aaecd 100644 --- a/nipype/interfaces/fsl/possum.py +++ b/nipype/interfaces/fsl/possum.py @@ -18,76 +18,100 @@ class B0CalcInputSpec(FSLCommandInputSpec): in_file = File( exists=True, mandatory=True, - argstr='-i %s', + argstr="-i %s", position=0, - desc='filename of input image (usually a tissue/air segmentation)') + desc="filename of input image (usually a tissue/air segmentation)", + ) out_file = File( - argstr='-o %s', + argstr="-o %s", position=1, - name_source=['in_file'], - name_template='%s_b0field', - output_name='out_file', - desc='filename of B0 output volume') + name_source=["in_file"], + name_template="%s_b0field", + output_name="out_file", + desc="filename of B0 output volume", + ) x_grad = traits.Float( - 0.0, usedefault=True, - argstr='--gx=%0.4f', - desc='Value for zeroth-order x-gradient field (per mm)') + 0.0, + usedefault=True, + argstr="--gx=%0.4f", + desc="Value for zeroth-order x-gradient field (per mm)", + ) y_grad = traits.Float( - 0.0, usedefault=True, - argstr='--gy=%0.4f', - desc='Value for zeroth-order y-gradient field (per mm)') + 0.0, + usedefault=True, + argstr="--gy=%0.4f", + desc="Value for zeroth-order y-gradient field (per mm)", + ) z_grad = traits.Float( - 0.0, usedefault=True, - argstr='--gz=%0.4f', - desc='Value for zeroth-order z-gradient field (per mm)') + 0.0, + usedefault=True, + argstr="--gz=%0.4f", + desc="Value for zeroth-order z-gradient field (per mm)", + ) x_b0 = traits.Float( - 0.0, usedefault=True, - argstr='--b0x=%0.2f', - xor=['xyz_b0'], - desc='Value for zeroth-order b0 field (x-component), in Tesla') + 0.0, + usedefault=True, + argstr="--b0x=%0.2f", + xor=["xyz_b0"], + desc="Value for zeroth-order b0 field (x-component), in Tesla", + ) y_b0 = traits.Float( - 0.0, usedefault=True, - argstr='--b0y=%0.2f', - xor=['xyz_b0'], - desc='Value for zeroth-order b0 field (y-component), in Tesla') + 0.0, + usedefault=True, + argstr="--b0y=%0.2f", + xor=["xyz_b0"], + desc="Value for zeroth-order b0 field (y-component), in Tesla", + ) z_b0 = traits.Float( - 1.0, usedefault=True, - argstr='--b0=%0.2f', - xor=['xyz_b0'], - desc='Value for zeroth-order b0 field (z-component), in Tesla') + 1.0, + usedefault=True, + argstr="--b0=%0.2f", + xor=["xyz_b0"], + desc="Value for zeroth-order b0 field (z-component), in Tesla", + ) xyz_b0 = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='--b0x=%0.2f --b0y=%0.2f --b0=%0.2f', - xor=['x_b0', 'y_b0', 'z_b0'], - desc='Zeroth-order B0 field in Tesla') + argstr="--b0x=%0.2f --b0y=%0.2f --b0=%0.2f", + xor=["x_b0", "y_b0", "z_b0"], + desc="Zeroth-order B0 field in Tesla", + ) delta = traits.Float( - -9.45e-6, usedefault=True, - argstr='-d %e', desc='Delta value (chi_tissue - chi_air)') + -9.45e-6, + usedefault=True, + argstr="-d %e", + desc="Delta value (chi_tissue - chi_air)", + ) chi_air = traits.Float( - 4.0e-7, usedefault=True, - argstr='--chi0=%e', desc='susceptibility of air') + 4.0e-7, usedefault=True, argstr="--chi0=%e", desc="susceptibility of air" + ) compute_xyz = traits.Bool( - False, usedefault=True, - argstr='--xyz', - desc='calculate and save all 3 field components (i.e. x,y,z)') + False, + usedefault=True, + argstr="--xyz", + desc="calculate and save all 3 field components (i.e. x,y,z)", + ) extendboundary = traits.Float( - 1.0, usedefault=True, - argstr='--extendboundary=%0.2f', - desc='Relative proportion to extend voxels at boundary') + 1.0, + usedefault=True, + argstr="--extendboundary=%0.2f", + desc="Relative proportion to extend voxels at boundary", + ) directconv = traits.Bool( - False, usedefault=True, - argstr='--directconv', - desc='use direct (image space) convolution, not FFT') + False, + usedefault=True, + argstr="--directconv", + desc="use direct (image space) convolution, not FFT", + ) class B0CalcOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='filename of B0 output volume') + out_file = File(exists=True, desc="filename of B0 output volume") class B0Calc(FSLCommand): @@ -112,6 +136,6 @@ class B0Calc(FSLCommand): """ - _cmd = 'b0calc' + _cmd = "b0calc" input_spec = B0CalcInputSpec output_spec = B0CalcOutputSpec diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py index 9207fbd497..418737be2c 100644 --- a/nipype/interfaces/fsl/preprocess.py +++ b/nipype/interfaces/fsl/preprocess.py @@ -14,8 +14,15 @@ from ... import LooseVersion from ...utils.filemanip import split_filename -from ..base import (TraitedSpec, File, InputMultiPath, OutputMultiPath, - Undefined, traits, isdefined) +from ..base import ( + TraitedSpec, + File, + InputMultiPath, + OutputMultiPath, + Undefined, + traits, + isdefined, +) from .base import FSLCommand, FSLCommandInputSpec, Info @@ -24,72 +31,90 @@ class BETInputSpec(FSLCommandInputSpec): # will put something on the end in_file = File( exists=True, - desc='input file to skull strip', - argstr='%s', + desc="input file to skull strip", + argstr="%s", position=0, - mandatory=True) + mandatory=True, + ) out_file = File( - desc='name of output skull stripped image', - argstr='%s', + desc="name of output skull stripped image", + argstr="%s", position=1, genfile=True, - hash_files=False) - outline = traits.Bool(desc='create surface outline image', argstr='-o') - mask = traits.Bool(desc='create binary mask image', argstr='-m') - skull = traits.Bool(desc='create skull image', argstr='-s') - no_output = traits.Bool( - argstr='-n', desc="Don't generate segmented output") - frac = traits.Float( - desc='fractional intensity threshold', argstr='-f %.2f') + hash_files=False, + ) + outline = traits.Bool(desc="create surface outline image", argstr="-o") + mask = traits.Bool(desc="create binary mask image", argstr="-m") + skull = traits.Bool(desc="create skull image", argstr="-s") + no_output = traits.Bool(argstr="-n", desc="Don't generate segmented output") + frac = traits.Float(desc="fractional intensity threshold", argstr="-f %.2f") vertical_gradient = traits.Float( - argstr='-g %.2f', - desc='vertical gradient in fractional intensity threshold (-1, 1)') - radius = traits.Int(argstr='-r %d', units='mm', desc="head radius") + argstr="-g %.2f", + desc="vertical gradient in fractional intensity threshold (-1, 1)", + ) + radius = traits.Int(argstr="-r %d", units="mm", desc="head radius") center = traits.List( traits.Int, - desc='center of gravity in voxels', - argstr='-c %s', + desc="center of gravity in voxels", + argstr="-c %s", minlen=0, maxlen=3, - units='voxels') + units="voxels", + ) threshold = traits.Bool( - argstr='-t', - desc="apply thresholding to segmented brain image and mask") - mesh = traits.Bool(argstr='-e', desc="generate a vtk mesh brain surface") + argstr="-t", desc="apply thresholding to segmented brain image and mask" + ) + mesh = traits.Bool(argstr="-e", desc="generate a vtk mesh brain surface") # the remaining 'options' are more like modes (mutually exclusive) that # FSL actually implements in a shell script wrapper around the bet binary. # for some combinations of them in specific order a call would not fail, # but in general using more than one of the following is clearly not # supported - _xor_inputs = ('functional', 'reduce_bias', 'robust', 'padding', - 'remove_eyes', 'surfaces', 't2_guided') + _xor_inputs = ( + "functional", + "reduce_bias", + "robust", + "padding", + "remove_eyes", + "surfaces", + "t2_guided", + ) robust = traits.Bool( - desc='robust brain centre estimation (iterates BET several times)', - argstr='-R', - xor=_xor_inputs) + desc="robust brain centre estimation (iterates BET several times)", + argstr="-R", + xor=_xor_inputs, + ) padding = traits.Bool( - desc=('improve BET if FOV is very small in Z (by temporarily padding ' - 'end slices)'), - argstr='-Z', - xor=_xor_inputs) + desc=( + "improve BET if FOV is very small in Z (by temporarily padding " + "end slices)" + ), + argstr="-Z", + xor=_xor_inputs, + ) remove_eyes = traits.Bool( - desc='eye & optic nerve cleanup (can be useful in SIENA)', - argstr='-S', - xor=_xor_inputs) + desc="eye & optic nerve cleanup (can be useful in SIENA)", + argstr="-S", + xor=_xor_inputs, + ) surfaces = traits.Bool( - desc=('run bet2 and then betsurf to get additional skull and scalp ' - 'surfaces (includes registrations)'), - argstr='-A', - xor=_xor_inputs) + desc=( + "run bet2 and then betsurf to get additional skull and scalp " + "surfaces (includes registrations)" + ), + argstr="-A", + xor=_xor_inputs, + ) t2_guided = File( - desc='as with creating surfaces, when also feeding in ' - 'non-brain-extracted T2 (includes registrations)', - argstr='-A2 %s', - xor=_xor_inputs) - functional = traits.Bool( - argstr='-F', xor=_xor_inputs, desc="apply to 4D fMRI data") + desc="as with creating surfaces, when also feeding in " + "non-brain-extracted T2 (includes registrations)", + argstr="-A2 %s", + xor=_xor_inputs, + ) + functional = traits.Bool(argstr="-F", xor=_xor_inputs, desc="apply to 4D fMRI data") reduce_bias = traits.Bool( - argstr='-B', xor=_xor_inputs, desc="bias field and neck cleanup") + argstr="-B", xor=_xor_inputs, desc="bias field and neck cleanup" + ) class BETOutputSpec(TraitedSpec): @@ -98,14 +123,11 @@ class BETOutputSpec(TraitedSpec): outline_file = File(desc="path/name of outline file (if generated)") meshfile = File(desc="path/name of vtk mesh file (if generated)") inskull_mask_file = File(desc="path/name of inskull mask (if generated)") - inskull_mesh_file = File( - desc="path/name of inskull mesh outline (if generated)") + inskull_mesh_file = File(desc="path/name of inskull mesh outline (if generated)") outskull_mask_file = File(desc="path/name of outskull mask (if generated)") - outskull_mesh_file = File( - desc="path/name of outskull mesh outline (if generated)") + outskull_mesh_file = File(desc="path/name of outskull mesh outline (if generated)") outskin_mask_file = File(desc="path/name of outskin mask (if generated)") - outskin_mesh_file = File( - desc="path/name of outskin mesh outline (if generated)") + outskin_mesh_file = File(desc="path/name of outskin mesh outline (if generated)") skull_mask_file = File(desc="path/name of skull mask (if generated)") @@ -128,7 +150,7 @@ class BET(FSLCommand): """ - _cmd = 'bet' + _cmd = "bet" input_spec = BETInputSpec output_spec = BETOutputSpec @@ -144,168 +166,188 @@ def _run_interface(self, runtime): def _gen_outfilename(self): out_file = self.inputs.out_file if not isdefined(out_file) and isdefined(self.inputs.in_file): - out_file = self._gen_fname(self.inputs.in_file, suffix='_brain') + out_file = self._gen_fname(self.inputs.in_file, suffix="_brain") return os.path.abspath(out_file) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = self._gen_outfilename() - if ((isdefined(self.inputs.mesh) and self.inputs.mesh) - or (isdefined(self.inputs.surfaces) and self.inputs.surfaces)): - outputs['meshfile'] = self._gen_fname( - outputs['out_file'], suffix='_mesh.vtk', change_ext=False) - if (isdefined(self.inputs.mask) and self.inputs.mask) or \ - (isdefined(self.inputs.reduce_bias) and - self.inputs.reduce_bias): - outputs['mask_file'] = self._gen_fname( - outputs['out_file'], suffix='_mask') + outputs["out_file"] = self._gen_outfilename() + if (isdefined(self.inputs.mesh) and self.inputs.mesh) or ( + isdefined(self.inputs.surfaces) and self.inputs.surfaces + ): + outputs["meshfile"] = self._gen_fname( + outputs["out_file"], suffix="_mesh.vtk", change_ext=False + ) + if (isdefined(self.inputs.mask) and self.inputs.mask) or ( + isdefined(self.inputs.reduce_bias) and self.inputs.reduce_bias + ): + outputs["mask_file"] = self._gen_fname(outputs["out_file"], suffix="_mask") if isdefined(self.inputs.outline) and self.inputs.outline: - outputs['outline_file'] = self._gen_fname( - outputs['out_file'], suffix='_overlay') + outputs["outline_file"] = self._gen_fname( + outputs["out_file"], suffix="_overlay" + ) if isdefined(self.inputs.surfaces) and self.inputs.surfaces: - outputs['inskull_mask_file'] = self._gen_fname( - outputs['out_file'], suffix='_inskull_mask') - outputs['inskull_mesh_file'] = self._gen_fname( - outputs['out_file'], suffix='_inskull_mesh') - outputs['outskull_mask_file'] = self._gen_fname( - outputs['out_file'], suffix='_outskull_mask') - outputs['outskull_mesh_file'] = self._gen_fname( - outputs['out_file'], suffix='_outskull_mesh') - outputs['outskin_mask_file'] = self._gen_fname( - outputs['out_file'], suffix='_outskin_mask') - outputs['outskin_mesh_file'] = self._gen_fname( - outputs['out_file'], suffix='_outskin_mesh') - outputs['skull_mask_file'] = self._gen_fname( - outputs['out_file'], suffix='_skull_mask') + outputs["inskull_mask_file"] = self._gen_fname( + outputs["out_file"], suffix="_inskull_mask" + ) + outputs["inskull_mesh_file"] = self._gen_fname( + outputs["out_file"], suffix="_inskull_mesh" + ) + outputs["outskull_mask_file"] = self._gen_fname( + outputs["out_file"], suffix="_outskull_mask" + ) + outputs["outskull_mesh_file"] = self._gen_fname( + outputs["out_file"], suffix="_outskull_mesh" + ) + outputs["outskin_mask_file"] = self._gen_fname( + outputs["out_file"], suffix="_outskin_mask" + ) + outputs["outskin_mesh_file"] = self._gen_fname( + outputs["out_file"], suffix="_outskin_mesh" + ) + outputs["skull_mask_file"] = self._gen_fname( + outputs["out_file"], suffix="_skull_mask" + ) if isdefined(self.inputs.no_output) and self.inputs.no_output: - outputs['out_file'] = Undefined + outputs["out_file"] = Undefined return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._gen_outfilename() return None class FASTInputSpec(FSLCommandInputSpec): """ Defines inputs (trait classes) for FAST """ + in_files = InputMultiPath( File(exists=True), copyfile=False, - desc='image, or multi-channel set of images, ' - 'to be segmented', - argstr='%s', + desc="image, or multi-channel set of images, " "to be segmented", + argstr="%s", position=-1, - mandatory=True) - out_basename = File(desc='base name of output files', argstr='-o %s') + mandatory=True, + ) + out_basename = File(desc="base name of output files", argstr="-o %s") # ^^ uses in_file name as basename if none given number_classes = traits.Range( - low=1, high=10, argstr='-n %d', desc='number of tissue-type classes') - output_biasfield = traits.Bool( - desc='output estimated bias field', argstr='-b') + low=1, high=10, argstr="-n %d", desc="number of tissue-type classes" + ) + output_biasfield = traits.Bool(desc="output estimated bias field", argstr="-b") output_biascorrected = traits.Bool( - desc='output restored image (bias-corrected image)', argstr='-B') + desc="output restored image (bias-corrected image)", argstr="-B" + ) img_type = traits.Enum( (1, 2, 3), - desc='int specifying type of image: (1 = T1, 2 = T2, 3 = PD)', - argstr='-t %d') + desc="int specifying type of image: (1 = T1, 2 = T2, 3 = PD)", + argstr="-t %d", + ) bias_iters = traits.Range( low=1, high=10, - argstr='-I %d', - desc='number of main-loop iterations during ' - 'bias-field removal') + argstr="-I %d", + desc="number of main-loop iterations during " "bias-field removal", + ) bias_lowpass = traits.Range( low=4, high=40, - desc='bias field smoothing extent (FWHM) ' - 'in mm', - argstr='-l %d', - units='mm') + desc="bias field smoothing extent (FWHM) " "in mm", + argstr="-l %d", + units="mm", + ) init_seg_smooth = traits.Range( low=0.0001, high=0.1, - desc='initial segmentation spatial ' - 'smoothness (during bias field ' - 'estimation)', - argstr='-f %.3f') + desc="initial segmentation spatial " + "smoothness (during bias field " + "estimation)", + argstr="-f %.3f", + ) segments = traits.Bool( - desc='outputs a separate binary image for each ' - 'tissue type', - argstr='-g') + desc="outputs a separate binary image for each " "tissue type", argstr="-g" + ) init_transform = File( exists=True, - desc=' initialise' - ' using priors', - argstr='-a %s') + desc=" initialise" " using priors", + argstr="-a %s", + ) other_priors = InputMultiPath( File(exist=True), - desc='alternative prior images', - argstr='-A %s', + desc="alternative prior images", + argstr="-A %s", minlen=3, - maxlen=3) + maxlen=3, + ) no_pve = traits.Bool( - desc='turn off PVE (partial volume estimation)', argstr='--nopve') - no_bias = traits.Bool(desc='do not remove bias field', argstr='-N') - use_priors = traits.Bool(desc='use priors throughout', argstr='-P') + desc="turn off PVE (partial volume estimation)", argstr="--nopve" + ) + no_bias = traits.Bool(desc="do not remove bias field", argstr="-N") + use_priors = traits.Bool(desc="use priors throughout", argstr="-P") # ^^ Must also set -a!, mutually inclusive?? No, conditional mandatory... need to figure out how to handle with traits. segment_iters = traits.Range( low=1, high=50, - desc='number of segmentation-initialisation' - ' iterations', - argstr='-W %d') + desc="number of segmentation-initialisation" " iterations", + argstr="-W %d", + ) mixel_smooth = traits.Range( - low=0.0, - high=1.0, - desc='spatial smoothness for mixeltype', - argstr='-R %.2f') + low=0.0, high=1.0, desc="spatial smoothness for mixeltype", argstr="-R %.2f" + ) iters_afterbias = traits.Range( low=1, high=20, - desc='number of main-loop iterations ' - 'after bias-field removal', - argstr='-O %d') + desc="number of main-loop iterations " "after bias-field removal", + argstr="-O %d", + ) hyper = traits.Range( - low=0.0, - high=1.0, - desc='segmentation spatial smoothness', - argstr='-H %.2f') - verbose = traits.Bool(desc='switch on diagnostic messages', argstr='-v') + low=0.0, high=1.0, desc="segmentation spatial smoothness", argstr="-H %.2f" + ) + verbose = traits.Bool(desc="switch on diagnostic messages", argstr="-v") manual_seg = File( - exists=True, desc='Filename containing intensities', argstr='-s %s') + exists=True, desc="Filename containing intensities", argstr="-s %s" + ) probability_maps = traits.Bool( - desc='outputs individual probability maps', argstr='-p') + desc="outputs individual probability maps", argstr="-p" + ) class FASTOutputSpec(TraitedSpec): """Specify possible outputs from FAST""" + tissue_class_map = File( exists=True, - desc='path/name of binary segmented volume file' - ' one val for each class _seg') + desc="path/name of binary segmented volume file" + " one val for each class _seg", + ) tissue_class_files = OutputMultiPath( File( desc=( - 'path/name of binary segmented volumes one file for each class ' - '_seg_x'))) + "path/name of binary segmented volumes one file for each class " + "_seg_x" + ) + ) + ) restored_image = OutputMultiPath( File( desc=( - 'restored images (one for each input image) named according to ' - 'the input images _restore'))) + "restored images (one for each input image) named according to " + "the input images _restore" + ) + ) + ) mixeltype = File(desc="path/name of mixeltype volume file _mixeltype") - partial_volume_map = File(desc='path/name of partial volume file _pveseg') + partial_volume_map = File(desc="path/name of partial volume file _pveseg") partial_volume_files = OutputMultiPath( - File( - desc='path/name of partial volumes files one for each class, _pve_x' - )) + File(desc="path/name of partial volumes files one for each class, _pve_x") + ) - bias_field = OutputMultiPath(File(desc='Estimated bias field _bias')) + bias_field = OutputMultiPath(File(desc="Estimated bias field _bias")) probability_maps = OutputMultiPath( - File(desc='filenames, one for each class, for each input, prob_x')) + File(desc="filenames, one for each class, for each input, prob_x") + ) class FAST(FSLCommand): @@ -325,14 +367,15 @@ class FAST(FSLCommand): >>> out = fastr.run() # doctest: +SKIP """ - _cmd = 'fast' + + _cmd = "fast" input_spec = FASTInputSpec output_spec = FASTOutputSpec def _format_arg(self, name, spec, value): # first do what should be done in general formatted = super(FAST, self)._format_arg(name, spec, value) - if name == 'in_files': + if name == "in_files": # FAST needs the -S parameter value to correspond to the number # of input images, otherwise it will ignore all but the first formatted = "-S %d %s" % (len(value), formatted) @@ -348,287 +391,306 @@ def _list_outputs(self): # input filename _gen_fname_opts = {} if isdefined(self.inputs.out_basename): - _gen_fname_opts['basename'] = self.inputs.out_basename - _gen_fname_opts['cwd'] = os.getcwd() + _gen_fname_opts["basename"] = self.inputs.out_basename + _gen_fname_opts["cwd"] = os.getcwd() else: - _gen_fname_opts['basename'] = self.inputs.in_files[-1] - _gen_fname_opts['cwd'], _, _ = split_filename( - _gen_fname_opts['basename']) + _gen_fname_opts["basename"] = self.inputs.in_files[-1] + _gen_fname_opts["cwd"], _, _ = split_filename(_gen_fname_opts["basename"]) - outputs['tissue_class_map'] = self._gen_fname( - suffix='_seg', **_gen_fname_opts) + outputs["tissue_class_map"] = self._gen_fname(suffix="_seg", **_gen_fname_opts) if self.inputs.segments: - outputs['tissue_class_files'] = [] + outputs["tissue_class_files"] = [] for i in range(nclasses): - outputs['tissue_class_files'].append( - self._gen_fname(suffix='_seg_%d' % i, **_gen_fname_opts)) + outputs["tissue_class_files"].append( + self._gen_fname(suffix="_seg_%d" % i, **_gen_fname_opts) + ) if isdefined(self.inputs.output_biascorrected): - outputs['restored_image'] = [] + outputs["restored_image"] = [] if len(self.inputs.in_files) > 1: # for multi-image segmentation there is one corrected image # per input for val, f in enumerate(self.inputs.in_files): # image numbering is 1-based - outputs['restored_image'].append( + outputs["restored_image"].append( self._gen_fname( - suffix='_restore_%d' % (val + 1), - **_gen_fname_opts)) + suffix="_restore_%d" % (val + 1), **_gen_fname_opts + ) + ) else: # single image segmentation has unnumbered output image - outputs['restored_image'].append( - self._gen_fname(suffix='_restore', **_gen_fname_opts)) + outputs["restored_image"].append( + self._gen_fname(suffix="_restore", **_gen_fname_opts) + ) - outputs['mixeltype'] = self._gen_fname( - suffix='_mixeltype', **_gen_fname_opts) + outputs["mixeltype"] = self._gen_fname(suffix="_mixeltype", **_gen_fname_opts) if not self.inputs.no_pve: - outputs['partial_volume_map'] = self._gen_fname( - suffix='_pveseg', **_gen_fname_opts) - outputs['partial_volume_files'] = [] + outputs["partial_volume_map"] = self._gen_fname( + suffix="_pveseg", **_gen_fname_opts + ) + outputs["partial_volume_files"] = [] for i in range(nclasses): - outputs['partial_volume_files'].append( - self._gen_fname(suffix='_pve_%d' % i, **_gen_fname_opts)) + outputs["partial_volume_files"].append( + self._gen_fname(suffix="_pve_%d" % i, **_gen_fname_opts) + ) if self.inputs.output_biasfield: - outputs['bias_field'] = [] + outputs["bias_field"] = [] if len(self.inputs.in_files) > 1: # for multi-image segmentation there is one bias field image # per input for val, f in enumerate(self.inputs.in_files): # image numbering is 1-based - outputs['bias_field'].append( + outputs["bias_field"].append( self._gen_fname( - suffix='_bias_%d' % (val + 1), **_gen_fname_opts)) + suffix="_bias_%d" % (val + 1), **_gen_fname_opts + ) + ) else: # single image segmentation has unnumbered output image - outputs['bias_field'].append( - self._gen_fname(suffix='_bias', **_gen_fname_opts)) + outputs["bias_field"].append( + self._gen_fname(suffix="_bias", **_gen_fname_opts) + ) if self.inputs.probability_maps: - outputs['probability_maps'] = [] + outputs["probability_maps"] = [] for i in range(nclasses): - outputs['probability_maps'].append( - self._gen_fname(suffix='_prob_%d' % i, **_gen_fname_opts)) + outputs["probability_maps"].append( + self._gen_fname(suffix="_prob_%d" % i, **_gen_fname_opts) + ) return outputs class FLIRTInputSpec(FSLCommandInputSpec): in_file = File( - exists=True, - argstr='-in %s', - mandatory=True, - position=0, - desc='input file') + exists=True, argstr="-in %s", mandatory=True, position=0, desc="input file" + ) reference = File( - exists=True, - argstr='-ref %s', - mandatory=True, - position=1, - desc='reference file') + exists=True, argstr="-ref %s", mandatory=True, position=1, desc="reference file" + ) out_file = File( - argstr='-out %s', - desc='registered output file', - name_source=['in_file'], - name_template='%s_flirt', + argstr="-out %s", + desc="registered output file", + name_source=["in_file"], + name_template="%s_flirt", position=2, - hash_files=False) + hash_files=False, + ) out_matrix_file = File( - argstr='-omat %s', - name_source=['in_file'], + argstr="-omat %s", + name_source=["in_file"], keep_extension=True, - name_template='%s_flirt.mat', - desc='output affine matrix in 4x4 asciii format', + name_template="%s_flirt.mat", + desc="output affine matrix in 4x4 asciii format", position=3, - hash_files=False) + hash_files=False, + ) out_log = File( - name_source=['in_file'], + name_source=["in_file"], keep_extension=True, - requires=['save_log'], - name_template='%s_flirt.log', - desc='output log') - in_matrix_file = File(argstr='-init %s', desc='input 4x4 affine matrix') + requires=["save_log"], + name_template="%s_flirt.log", + desc="output log", + ) + in_matrix_file = File(argstr="-init %s", desc="input 4x4 affine matrix") apply_xfm = traits.Bool( - argstr='-applyxfm', + argstr="-applyxfm", desc=( - 'apply transformation supplied by in_matrix_file or uses_qform to' - ' use the affine matrix stored in the reference header')) + "apply transformation supplied by in_matrix_file or uses_qform to" + " use the affine matrix stored in the reference header" + ), + ) apply_isoxfm = traits.Float( - argstr='-applyisoxfm %f', - xor=['apply_xfm'], - desc='as applyxfm but forces isotropic resampling') + argstr="-applyisoxfm %f", + xor=["apply_xfm"], + desc="as applyxfm but forces isotropic resampling", + ) datatype = traits.Enum( - 'char', - 'short', - 'int', - 'float', - 'double', - argstr='-datatype %s', - desc='force output data type') + "char", + "short", + "int", + "float", + "double", + argstr="-datatype %s", + desc="force output data type", + ) cost = traits.Enum( - 'mutualinfo', - 'corratio', - 'normcorr', - 'normmi', - 'leastsq', - 'labeldiff', - 'bbr', - argstr='-cost %s', - desc='cost function') + "mutualinfo", + "corratio", + "normcorr", + "normmi", + "leastsq", + "labeldiff", + "bbr", + argstr="-cost %s", + desc="cost function", + ) # XXX What is the difference between 'cost' and 'searchcost'? Are # these both necessary or do they map to the same variable. cost_func = traits.Enum( - 'mutualinfo', - 'corratio', - 'normcorr', - 'normmi', - 'leastsq', - 'labeldiff', - 'bbr', - argstr='-searchcost %s', - desc='cost function') + "mutualinfo", + "corratio", + "normcorr", + "normmi", + "leastsq", + "labeldiff", + "bbr", + argstr="-searchcost %s", + desc="cost function", + ) uses_qform = traits.Bool( - argstr='-usesqform', desc='initialize using sform or qform') - display_init = traits.Bool( - argstr='-displayinit', desc='display initial matrix') + argstr="-usesqform", desc="initialize using sform or qform" + ) + display_init = traits.Bool(argstr="-displayinit", desc="display initial matrix") angle_rep = traits.Enum( - 'quaternion', - 'euler', - argstr='-anglerep %s', - desc='representation of rotation angles') + "quaternion", + "euler", + argstr="-anglerep %s", + desc="representation of rotation angles", + ) interp = traits.Enum( - 'trilinear', - 'nearestneighbour', - 'sinc', - 'spline', - argstr='-interp %s', - desc='final interpolation method used in reslicing') + "trilinear", + "nearestneighbour", + "sinc", + "spline", + argstr="-interp %s", + desc="final interpolation method used in reslicing", + ) sinc_width = traits.Int( - argstr='-sincwidth %d', units='voxels', desc='full-width in voxels') + argstr="-sincwidth %d", units="voxels", desc="full-width in voxels" + ) sinc_window = traits.Enum( - 'rectangular', - 'hanning', - 'blackman', - argstr='-sincwindow %s', - desc='sinc window') # XXX better doc - bins = traits.Int(argstr='-bins %d', desc='number of histogram bins') - dof = traits.Int( - argstr='-dof %d', desc='number of transform degrees of freedom') - no_resample = traits.Bool( - argstr='-noresample', desc='do not change input sampling') + "rectangular", + "hanning", + "blackman", + argstr="-sincwindow %s", + desc="sinc window", + ) # XXX better doc + bins = traits.Int(argstr="-bins %d", desc="number of histogram bins") + dof = traits.Int(argstr="-dof %d", desc="number of transform degrees of freedom") + no_resample = traits.Bool(argstr="-noresample", desc="do not change input sampling") force_scaling = traits.Bool( - argstr='-forcescaling', desc='force rescaling even for low-res images') + argstr="-forcescaling", desc="force rescaling even for low-res images" + ) min_sampling = traits.Float( - argstr='-minsampling %f', - units='mm', - desc='set minimum voxel dimension for sampling') + argstr="-minsampling %f", + units="mm", + desc="set minimum voxel dimension for sampling", + ) padding_size = traits.Int( - argstr='-paddingsize %d', - units='voxels', - desc='for applyxfm: interpolates outside image ' - 'by size') + argstr="-paddingsize %d", + units="voxels", + desc="for applyxfm: interpolates outside image " "by size", + ) searchr_x = traits.List( traits.Int, minlen=2, maxlen=2, - units='degrees', - argstr='-searchrx %s', - desc='search angles along x-axis, in degrees') + units="degrees", + argstr="-searchrx %s", + desc="search angles along x-axis, in degrees", + ) searchr_y = traits.List( traits.Int, minlen=2, maxlen=2, - units='degrees', - argstr='-searchry %s', - desc='search angles along y-axis, in degrees') + units="degrees", + argstr="-searchry %s", + desc="search angles along y-axis, in degrees", + ) searchr_z = traits.List( traits.Int, minlen=2, maxlen=2, - units='degrees', - argstr='-searchrz %s', - desc='search angles along z-axis, in degrees') + units="degrees", + argstr="-searchrz %s", + desc="search angles along z-axis, in degrees", + ) no_search = traits.Bool( - argstr='-nosearch', desc='set all angular searches to ranges 0 to 0') + argstr="-nosearch", desc="set all angular searches to ranges 0 to 0" + ) coarse_search = traits.Int( - argstr='-coarsesearch %d', - units='degrees', - desc='coarse search delta angle') + argstr="-coarsesearch %d", units="degrees", desc="coarse search delta angle" + ) fine_search = traits.Int( - argstr='-finesearch %d', - units='degrees', - desc='fine search delta angle') + argstr="-finesearch %d", units="degrees", desc="fine search delta angle" + ) schedule = File( - exists=True, argstr='-schedule %s', desc='replaces default schedule') + exists=True, argstr="-schedule %s", desc="replaces default schedule" + ) ref_weight = File( - exists=True, - argstr='-refweight %s', - desc='File for reference weighting volume') + exists=True, argstr="-refweight %s", desc="File for reference weighting volume" + ) in_weight = File( - exists=True, - argstr='-inweight %s', - desc='File for input weighting volume') - no_clamp = traits.Bool( - argstr='-noclamp', desc='do not use intensity clamping') + exists=True, argstr="-inweight %s", desc="File for input weighting volume" + ) + no_clamp = traits.Bool(argstr="-noclamp", desc="do not use intensity clamping") no_resample_blur = traits.Bool( - argstr='-noresampblur', desc='do not use blurring on downsampling') - rigid2D = traits.Bool( - argstr='-2D', desc='use 2D rigid body mode - ignores dof') - save_log = traits.Bool(desc='save to log file') - verbose = traits.Int(argstr='-verbose %d', desc='verbose mode, 0 is least') + argstr="-noresampblur", desc="do not use blurring on downsampling" + ) + rigid2D = traits.Bool(argstr="-2D", desc="use 2D rigid body mode - ignores dof") + save_log = traits.Bool(desc="save to log file") + verbose = traits.Int(argstr="-verbose %d", desc="verbose mode, 0 is least") bgvalue = traits.Float( 0, - argstr='-setbackground %f', - desc=('use specified background value for points ' - 'outside FOV')) + argstr="-setbackground %f", + desc=("use specified background value for points " "outside FOV"), + ) # BBR options wm_seg = File( - argstr='-wmseg %s', - min_ver='5.0.0', - desc='white matter segmentation volume needed by BBR cost function') + argstr="-wmseg %s", + min_ver="5.0.0", + desc="white matter segmentation volume needed by BBR cost function", + ) wmcoords = File( - argstr='-wmcoords %s', - min_ver='5.0.0', - desc='white matter boundary coordinates for BBR cost function') + argstr="-wmcoords %s", + min_ver="5.0.0", + desc="white matter boundary coordinates for BBR cost function", + ) wmnorms = File( - argstr='-wmnorms %s', - min_ver='5.0.0', - desc='white matter boundary normals for BBR cost function') + argstr="-wmnorms %s", + min_ver="5.0.0", + desc="white matter boundary normals for BBR cost function", + ) fieldmap = File( - argstr='-fieldmap %s', - min_ver='5.0.0', - desc=('fieldmap image in rads/s - must be already registered to the ' - 'reference image')) + argstr="-fieldmap %s", + min_ver="5.0.0", + desc=( + "fieldmap image in rads/s - must be already registered to the " + "reference image" + ), + ) fieldmapmask = File( - argstr='-fieldmapmask %s', - min_ver='5.0.0', - desc='mask for fieldmap image') + argstr="-fieldmapmask %s", min_ver="5.0.0", desc="mask for fieldmap image" + ) pedir = traits.Int( - argstr='-pedir %d', - min_ver='5.0.0', - desc='phase encode direction of EPI - 1/2/3=x/y/z & -1/-2/-3=-x/-y/-z') + argstr="-pedir %d", + min_ver="5.0.0", + desc="phase encode direction of EPI - 1/2/3=x/y/z & -1/-2/-3=-x/-y/-z", + ) echospacing = traits.Float( - argstr='-echospacing %f', - min_ver='5.0.0', - desc='value of EPI echo spacing - units of seconds') + argstr="-echospacing %f", + min_ver="5.0.0", + desc="value of EPI echo spacing - units of seconds", + ) bbrtype = traits.Enum( - 'signed', - 'global_abs', - 'local_abs', - argstr='-bbrtype %s', - min_ver='5.0.0', - desc=('type of bbr cost function: signed [default], global_abs, ' - 'local_abs')) + "signed", + "global_abs", + "local_abs", + argstr="-bbrtype %s", + min_ver="5.0.0", + desc=("type of bbr cost function: signed [default], global_abs, " "local_abs"), + ) bbrslope = traits.Float( - argstr='-bbrslope %f', min_ver='5.0.0', desc='value of bbr slope') + argstr="-bbrslope %f", min_ver="5.0.0", desc="value of bbr slope" + ) class FLIRTOutputSpec(TraitedSpec): - out_file = File( - exists=True, desc='path/name of registered file (if generated)') + out_file = File(exists=True, desc="path/name of registered file (if generated)") out_matrix_file = File( - exists=True, - desc='path/name of calculated affine transform ' - '(if generated)') - out_log = File(desc='path/name of output log (if generated)') + exists=True, desc="path/name of calculated affine transform " "(if generated)" + ) + out_log = File(desc="path/name of output log (if generated)") class FLIRT(FSLCommand): @@ -653,17 +715,19 @@ class FLIRT(FSLCommand): >>> res = flt.run() #doctest: +SKIP """ - _cmd = 'flirt' + + _cmd = "flirt" input_spec = FLIRTInputSpec output_spec = FLIRTOutputSpec _log_written = False def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = super(FLIRT, self).aggregate_outputs( - runtime=runtime, needed_outputs=needed_outputs) + runtime=runtime, needed_outputs=needed_outputs + ) if self.inputs.save_log and not self._log_written: with open(outputs.out_log, "a") as text_file: - text_file.write(runtime.stdout + '\n') + text_file.write(runtime.stdout + "\n") self._log_written = True return outputs @@ -672,22 +736,27 @@ def _parse_inputs(self, skip=None): skip = [] if self.inputs.save_log and not self.inputs.verbose: self.inputs.verbose = 1 - if self.inputs.apply_xfm and not (self.inputs.in_matrix_file - or self.inputs.uses_qform): - raise RuntimeError('Argument apply_xfm requires in_matrix_file or ' - 'uses_qform arguments to run') - skip.append('save_log') + if self.inputs.apply_xfm and not ( + self.inputs.in_matrix_file or self.inputs.uses_qform + ): + raise RuntimeError( + "Argument apply_xfm requires in_matrix_file or " + "uses_qform arguments to run" + ) + skip.append("save_log") return super(FLIRT, self)._parse_inputs(skip=skip) class ApplyXFMInputSpec(FLIRTInputSpec): apply_xfm = traits.Bool( True, - argstr='-applyxfm', + argstr="-applyxfm", desc=( - 'apply transformation supplied by in_matrix_file or uses_qform to' - ' use the affine matrix stored in the reference header'), - usedefault=True) + "apply transformation supplied by in_matrix_file or uses_qform to" + " use the affine matrix stored in the reference header" + ), + usedefault=True, + ) class ApplyXFM(FLIRT): @@ -711,6 +780,7 @@ class ApplyXFM(FLIRT): >>> result = applyxfm.run() # doctest: +SKIP """ + input_spec = ApplyXFMInputSpec @@ -720,70 +790,69 @@ class MCFLIRTInputSpec(FSLCommandInputSpec): position=0, argstr="-in %s", mandatory=True, - desc="timeseries to motion-correct") + desc="timeseries to motion-correct", + ) out_file = File( - argstr='-out %s', genfile=True, desc="file to write", hash_files=False) + argstr="-out %s", genfile=True, desc="file to write", hash_files=False + ) cost = traits.Enum( - 'mutualinfo', - 'woods', - 'corratio', - 'normcorr', - 'normmi', - 'leastsquares', - argstr='-cost %s', - desc="cost function to optimize") - bins = traits.Int(argstr='-bins %d', desc="number of histogram bins") - dof = traits.Int( - argstr='-dof %d', desc="degrees of freedom for the transformation") - ref_vol = traits.Int(argstr='-refvol %d', desc="volume to align frames to") - scaling = traits.Float( - argstr='-scaling %.2f', desc="scaling factor to use") + "mutualinfo", + "woods", + "corratio", + "normcorr", + "normmi", + "leastsquares", + argstr="-cost %s", + desc="cost function to optimize", + ) + bins = traits.Int(argstr="-bins %d", desc="number of histogram bins") + dof = traits.Int(argstr="-dof %d", desc="degrees of freedom for the transformation") + ref_vol = traits.Int(argstr="-refvol %d", desc="volume to align frames to") + scaling = traits.Float(argstr="-scaling %.2f", desc="scaling factor to use") smooth = traits.Float( - argstr='-smooth %.2f', desc="smoothing factor for the cost function") + argstr="-smooth %.2f", desc="smoothing factor for the cost function" + ) rotation = traits.Int( - argstr='-rotation %d', desc="scaling factor for rotation tolerances") + argstr="-rotation %d", desc="scaling factor for rotation tolerances" + ) stages = traits.Int( - argstr='-stages %d', - desc="stages (if 4, perform final search with sinc interpolation") - init = File( - exists=True, argstr='-init %s', desc="inital transformation matrix") + argstr="-stages %d", + desc="stages (if 4, perform final search with sinc interpolation", + ) + init = File(exists=True, argstr="-init %s", desc="inital transformation matrix") interpolation = traits.Enum( "spline", "nn", "sinc", argstr="-%s_final", - desc="interpolation method for transformation") - use_gradient = traits.Bool( - argstr='-gdt', desc="run search on gradient images") - use_contour = traits.Bool( - argstr='-edge', desc="run search on contour images") - mean_vol = traits.Bool(argstr='-meanvol', desc="register to mean volume") + desc="interpolation method for transformation", + ) + use_gradient = traits.Bool(argstr="-gdt", desc="run search on gradient images") + use_contour = traits.Bool(argstr="-edge", desc="run search on contour images") + mean_vol = traits.Bool(argstr="-meanvol", desc="register to mean volume") stats_imgs = traits.Bool( - argstr='-stats', desc="produce variance and std. dev. images") - save_mats = traits.Bool( - argstr='-mats', desc="save transformation matrices") - save_plots = traits.Bool( - argstr='-plots', desc="save transformation parameters") + argstr="-stats", desc="produce variance and std. dev. images" + ) + save_mats = traits.Bool(argstr="-mats", desc="save transformation matrices") + save_plots = traits.Bool(argstr="-plots", desc="save transformation parameters") save_rms = traits.Bool( - argstr='-rmsabs -rmsrel', desc="save rms displacement parameters") + argstr="-rmsabs -rmsrel", desc="save rms displacement parameters" + ) ref_file = File( - exists=True, - argstr='-reffile %s', - desc="target image for motion correction") + exists=True, argstr="-reffile %s", desc="target image for motion correction" + ) class MCFLIRTOutputSpec(TraitedSpec): out_file = File(exists=True, desc="motion-corrected timeseries") variance_img = File(exists=True, desc="variance image") std_img = File(exists=True, desc="standard deviation image") - mean_img = File( - exists=True, desc="mean timeseries image (if mean_vol=True)") + mean_img = File(exists=True, desc="mean timeseries image (if mean_vol=True)") par_file = File(exists=True, desc="text-file with motion parameters") - mat_file = OutputMultiPath( - File(exists=True), desc="transformation matrices") + mat_file = OutputMultiPath(File(exists=True), desc="transformation matrices") rms_files = OutputMultiPath( - File(exists=True), - desc="absolute and relative displacement parameters") + File(exists=True), desc="absolute and relative displacement parameters" + ) class MCFLIRT(FSLCommand): @@ -804,7 +873,8 @@ class MCFLIRT(FSLCommand): >>> res = mcflt.run() # doctest: +SKIP """ - _cmd = 'mcflirt' + + _cmd = "mcflirt" input_spec = MCFLIRTInputSpec output_spec = MCFLIRTOutputSpec @@ -819,21 +889,25 @@ def _format_arg(self, name, spec, value): def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = self._gen_outfilename() - output_dir = os.path.dirname(outputs['out_file']) + outputs["out_file"] = self._gen_outfilename() + output_dir = os.path.dirname(outputs["out_file"]) if isdefined(self.inputs.stats_imgs) and self.inputs.stats_imgs: - if LooseVersion(Info.version()) < LooseVersion('6.0.0'): + if LooseVersion(Info.version()) < LooseVersion("6.0.0"): # FSL <6.0 outputs have .nii.gz_variance.nii.gz as extension - outputs['variance_img'] = self._gen_fname( - outputs['out_file'] + '_variance.ext', cwd=output_dir) - outputs['std_img'] = self._gen_fname( - outputs['out_file'] + '_sigma.ext', cwd=output_dir) + outputs["variance_img"] = self._gen_fname( + outputs["out_file"] + "_variance.ext", cwd=output_dir + ) + outputs["std_img"] = self._gen_fname( + outputs["out_file"] + "_sigma.ext", cwd=output_dir + ) else: - outputs['variance_img'] = self._gen_fname( - outputs['out_file'], suffix='_variance', cwd=output_dir) - outputs['std_img'] = self._gen_fname( - outputs['out_file'], suffix='_sigma', cwd=output_dir) + outputs["variance_img"] = self._gen_fname( + outputs["out_file"], suffix="_variance", cwd=output_dir + ) + outputs["std_img"] = self._gen_fname( + outputs["out_file"], suffix="_sigma", cwd=output_dir + ) # The mean image created if -stats option is specified ('meanvol') # is missing the top and bottom slices. Therefore we only expose the @@ -842,33 +916,34 @@ def _list_outputs(self): # Note that the same problem holds for the std and variance image. if isdefined(self.inputs.mean_vol) and self.inputs.mean_vol: - if LooseVersion(Info.version()) < LooseVersion('6.0.0'): + if LooseVersion(Info.version()) < LooseVersion("6.0.0"): # FSL <6.0 outputs have .nii.gz_mean_img.nii.gz as extension - outputs['mean_img'] = self._gen_fname( - outputs['out_file'] + '_mean_reg.ext', cwd=output_dir) + outputs["mean_img"] = self._gen_fname( + outputs["out_file"] + "_mean_reg.ext", cwd=output_dir + ) else: - outputs['mean_img'] = self._gen_fname( - outputs['out_file'], suffix='_mean_reg', cwd=output_dir) + outputs["mean_img"] = self._gen_fname( + outputs["out_file"], suffix="_mean_reg", cwd=output_dir + ) if isdefined(self.inputs.save_mats) and self.inputs.save_mats: - _, filename = os.path.split(outputs['out_file']) - matpathname = os.path.join(output_dir, filename + '.mat') + _, filename = os.path.split(outputs["out_file"]) + matpathname = os.path.join(output_dir, filename + ".mat") _, _, _, timepoints = load(self.inputs.in_file).shape - outputs['mat_file'] = [] + outputs["mat_file"] = [] for t in range(timepoints): - outputs['mat_file'].append( - os.path.join(matpathname, 'MAT_%04d' % t)) + outputs["mat_file"].append(os.path.join(matpathname, "MAT_%04d" % t)) if isdefined(self.inputs.save_plots) and self.inputs.save_plots: # Note - if e.g. out_file has .nii.gz, you get .nii.gz.par, # which is what mcflirt does! - outputs['par_file'] = outputs['out_file'] + '.par' + outputs["par_file"] = outputs["out_file"] + ".par" if isdefined(self.inputs.save_rms) and self.inputs.save_rms: - outfile = outputs['out_file'] - outputs['rms_files'] = [outfile + '_abs.rms', outfile + '_rel.rms'] + outfile = outputs["out_file"] + outputs["rms_files"] = [outfile + "_abs.rms", outfile + "_rel.rms"] return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._gen_outfilename() return None @@ -877,241 +952,284 @@ def _gen_outfilename(self): if isdefined(out_file): out_file = os.path.realpath(out_file) if not isdefined(out_file) and isdefined(self.inputs.in_file): - out_file = self._gen_fname(self.inputs.in_file, suffix='_mcf') + out_file = self._gen_fname(self.inputs.in_file, suffix="_mcf") return os.path.abspath(out_file) class FNIRTInputSpec(FSLCommandInputSpec): ref_file = File( - exists=True, - argstr='--ref=%s', - mandatory=True, - desc='name of reference image') + exists=True, argstr="--ref=%s", mandatory=True, desc="name of reference image" + ) in_file = File( - exists=True, - argstr='--in=%s', - mandatory=True, - desc='name of input image') + exists=True, argstr="--in=%s", mandatory=True, desc="name of input image" + ) affine_file = File( - exists=True, - argstr='--aff=%s', - desc='name of file containing affine transform') + exists=True, argstr="--aff=%s", desc="name of file containing affine transform" + ) inwarp_file = File( exists=True, - argstr='--inwarp=%s', - desc='name of file containing initial non-linear warps') + argstr="--inwarp=%s", + desc="name of file containing initial non-linear warps", + ) in_intensitymap_file = traits.List( File(exists=True), - argstr='--intin=%s', + argstr="--intin=%s", copyfile=False, minlen=1, maxlen=2, - desc=('name of file/files containing ' - 'initial intensity mapping ' - 'usually generated by previous ' - 'fnirt run')) + desc=( + "name of file/files containing " + "initial intensity mapping " + "usually generated by previous " + "fnirt run" + ), + ) fieldcoeff_file = traits.Either( traits.Bool, File, - argstr='--cout=%s', - desc='name of output file with field coefficients or true') + argstr="--cout=%s", + desc="name of output file with field coefficients or true", + ) warped_file = File( - argstr='--iout=%s', - desc='name of output image', - genfile=True, - hash_files=False) + argstr="--iout=%s", desc="name of output image", genfile=True, hash_files=False + ) field_file = traits.Either( traits.Bool, File, - argstr='--fout=%s', - desc='name of output file with field or true', - hash_files=False) + argstr="--fout=%s", + desc="name of output file with field or true", + hash_files=False, + ) jacobian_file = traits.Either( traits.Bool, File, - argstr='--jout=%s', - desc=('name of file for writing out the ' - 'Jacobian of the field (for ' - 'diagnostic or VBM purposes)'), - hash_files=False) + argstr="--jout=%s", + desc=( + "name of file for writing out the " + "Jacobian of the field (for " + "diagnostic or VBM purposes)" + ), + hash_files=False, + ) modulatedref_file = traits.Either( traits.Bool, File, - argstr='--refout=%s', - desc=('name of file for writing out ' - 'intensity modulated --ref (for ' - 'diagnostic purposes)'), - hash_files=False) + argstr="--refout=%s", + desc=( + "name of file for writing out " + "intensity modulated --ref (for " + "diagnostic purposes)" + ), + hash_files=False, + ) out_intensitymap_file = traits.Either( traits.Bool, File, - argstr='--intout=%s', - desc=('name of files for writing ' - 'information pertaining to ' - 'intensity mapping'), - hash_files=False) + argstr="--intout=%s", + desc=( + "name of files for writing " + "information pertaining to " + "intensity mapping" + ), + hash_files=False, + ) log_file = File( - argstr='--logout=%s', - desc='Name of log-file', - genfile=True, - hash_files=False) + argstr="--logout=%s", desc="Name of log-file", genfile=True, hash_files=False + ) config_file = traits.Either( traits.Enum("T1_2_MNI152_2mm", "FA_2_FMRIB58_1mm"), File(exists=True), - argstr='--config=%s', - desc='Name of config file specifying command line arguments') + argstr="--config=%s", + desc="Name of config file specifying command line arguments", + ) refmask_file = File( exists=True, - argstr='--refmask=%s', - desc='name of file with mask in reference space') + argstr="--refmask=%s", + desc="name of file with mask in reference space", + ) inmask_file = File( exists=True, - argstr='--inmask=%s', - desc='name of file with mask in input image space') + argstr="--inmask=%s", + desc="name of file with mask in input image space", + ) skip_refmask = traits.Bool( - argstr='--applyrefmask=0', - xor=['apply_refmask'], - desc='Skip specified refmask if set, default false') + argstr="--applyrefmask=0", + xor=["apply_refmask"], + desc="Skip specified refmask if set, default false", + ) skip_inmask = traits.Bool( - argstr='--applyinmask=0', - xor=['apply_inmask'], - desc='skip specified inmask if set, default false') + argstr="--applyinmask=0", + xor=["apply_inmask"], + desc="skip specified inmask if set, default false", + ) apply_refmask = traits.List( traits.Enum(0, 1), - argstr='--applyrefmask=%s', - xor=['skip_refmask'], - desc=('list of iterations to use reference mask on (1 to use, 0 to ' - 'skip)'), - sep=",") + argstr="--applyrefmask=%s", + xor=["skip_refmask"], + desc=("list of iterations to use reference mask on (1 to use, 0 to " "skip)"), + sep=",", + ) apply_inmask = traits.List( traits.Enum(0, 1), - argstr='--applyinmask=%s', - xor=['skip_inmask'], - desc='list of iterations to use input mask on (1 to use, 0 to skip)', - sep=",") + argstr="--applyinmask=%s", + xor=["skip_inmask"], + desc="list of iterations to use input mask on (1 to use, 0 to skip)", + sep=",", + ) skip_implicit_ref_masking = traits.Bool( - argstr='--imprefm=0', - desc=('skip implicit masking based on value in --ref image. ' - 'Default = 0')) + argstr="--imprefm=0", + desc=("skip implicit masking based on value in --ref image. " "Default = 0"), + ) skip_implicit_in_masking = traits.Bool( - argstr='--impinm=0', - desc=('skip implicit masking based on value in --in image. ' - 'Default = 0')) + argstr="--impinm=0", + desc=("skip implicit masking based on value in --in image. " "Default = 0"), + ) refmask_val = traits.Float( - argstr='--imprefval=%f', - desc='Value to mask out in --ref image. Default =0.0') + argstr="--imprefval=%f", desc="Value to mask out in --ref image. Default =0.0" + ) inmask_val = traits.Float( - argstr='--impinval=%f', - desc='Value to mask out in --in image. Default =0.0') + argstr="--impinval=%f", desc="Value to mask out in --in image. Default =0.0" + ) max_nonlin_iter = traits.List( traits.Int, - argstr='--miter=%s', - desc='Max # of non-linear iterations list, default [5, 5, 5, 5]', - sep=",") + argstr="--miter=%s", + desc="Max # of non-linear iterations list, default [5, 5, 5, 5]", + sep=",", + ) subsampling_scheme = traits.List( traits.Int, - argstr='--subsamp=%s', - desc='sub-sampling scheme, list, default [4, 2, 1, 1]', - sep=",") + argstr="--subsamp=%s", + desc="sub-sampling scheme, list, default [4, 2, 1, 1]", + sep=",", + ) warp_resolution = traits.Tuple( traits.Int, traits.Int, traits.Int, - argstr='--warpres=%d,%d,%d', - desc=('(approximate) resolution (in mm) of warp basis in x-, y- and ' - 'z-direction, default 10, 10, 10')) + argstr="--warpres=%d,%d,%d", + desc=( + "(approximate) resolution (in mm) of warp basis in x-, y- and " + "z-direction, default 10, 10, 10" + ), + ) spline_order = traits.Int( - argstr='--splineorder=%d', - desc='Order of spline, 2->Qadratic spline, 3->Cubic spline. Default=3') + argstr="--splineorder=%d", + desc="Order of spline, 2->Qadratic spline, 3->Cubic spline. Default=3", + ) in_fwhm = traits.List( traits.Int, - argstr='--infwhm=%s', - desc=('FWHM (in mm) of gaussian smoothing kernel for input volume, ' - 'default [6, 4, 2, 2]'), - sep=",") + argstr="--infwhm=%s", + desc=( + "FWHM (in mm) of gaussian smoothing kernel for input volume, " + "default [6, 4, 2, 2]" + ), + sep=",", + ) ref_fwhm = traits.List( traits.Int, - argstr='--reffwhm=%s', - desc=('FWHM (in mm) of gaussian smoothing kernel for ref volume, ' - 'default [4, 2, 0, 0]'), - sep=",") + argstr="--reffwhm=%s", + desc=( + "FWHM (in mm) of gaussian smoothing kernel for ref volume, " + "default [4, 2, 0, 0]" + ), + sep=",", + ) regularization_model = traits.Enum( - 'membrane_energy', - 'bending_energy', - argstr='--regmod=%s', - desc=('Model for regularisation of warp-field [membrane_energy ' - 'bending_energy], default bending_energy')) + "membrane_energy", + "bending_energy", + argstr="--regmod=%s", + desc=( + "Model for regularisation of warp-field [membrane_energy " + "bending_energy], default bending_energy" + ), + ) regularization_lambda = traits.List( traits.Float, - argstr='--lambda=%s', - desc=('Weight of regularisation, default depending on --ssqlambda and ' - '--regmod switches. See user documetation.'), - sep=",") + argstr="--lambda=%s", + desc=( + "Weight of regularisation, default depending on --ssqlambda and " + "--regmod switches. See user documetation." + ), + sep=",", + ) skip_lambda_ssq = traits.Bool( - argstr='--ssqlambda=0', - desc='If true, lambda is not weighted by current ssq, default false') + argstr="--ssqlambda=0", + desc="If true, lambda is not weighted by current ssq, default false", + ) jacobian_range = traits.Tuple( traits.Float, traits.Float, - argstr='--jacrange=%f,%f', - desc='Allowed range of Jacobian determinants, default 0.01, 100.0') + argstr="--jacrange=%f,%f", + desc="Allowed range of Jacobian determinants, default 0.01, 100.0", + ) derive_from_ref = traits.Bool( - argstr='--refderiv', - desc=('If true, ref image is used to calculate derivatives. ' - 'Default false')) + argstr="--refderiv", + desc=("If true, ref image is used to calculate derivatives. " "Default false"), + ) intensity_mapping_model = traits.Enum( - 'none', - 'global_linear', - 'global_non_linear', - 'local_linear', - 'global_non_linear_with_bias', - 'local_non_linear', - argstr='--intmod=%s', - desc='Model for intensity-mapping') + "none", + "global_linear", + "global_non_linear", + "local_linear", + "global_non_linear_with_bias", + "local_non_linear", + argstr="--intmod=%s", + desc="Model for intensity-mapping", + ) intensity_mapping_order = traits.Int( - argstr='--intorder=%d', - desc='Order of poynomial for mapping intensities, default 5') + argstr="--intorder=%d", + desc="Order of poynomial for mapping intensities, default 5", + ) biasfield_resolution = traits.Tuple( traits.Int, traits.Int, traits.Int, - argstr='--biasres=%d,%d,%d', - desc=('Resolution (in mm) of bias-field modelling local intensities, ' - 'default 50, 50, 50')) + argstr="--biasres=%d,%d,%d", + desc=( + "Resolution (in mm) of bias-field modelling local intensities, " + "default 50, 50, 50" + ), + ) bias_regularization_lambda = traits.Float( - argstr='--biaslambda=%f', - desc='Weight of regularisation for bias-field, default 10000') + argstr="--biaslambda=%f", + desc="Weight of regularisation for bias-field, default 10000", + ) skip_intensity_mapping = traits.Bool( - argstr='--estint=0', - xor=['apply_intensity_mapping'], - desc='Skip estimate intensity-mapping default false') + argstr="--estint=0", + xor=["apply_intensity_mapping"], + desc="Skip estimate intensity-mapping default false", + ) apply_intensity_mapping = traits.List( traits.Enum(0, 1), - argstr='--estint=%s', - xor=['skip_intensity_mapping'], - desc=('List of subsampling levels to apply intensity mapping for ' - '(0 to skip, 1 to apply)'), - sep=",") + argstr="--estint=%s", + xor=["skip_intensity_mapping"], + desc=( + "List of subsampling levels to apply intensity mapping for " + "(0 to skip, 1 to apply)" + ), + sep=",", + ) hessian_precision = traits.Enum( - 'double', - 'float', - argstr='--numprec=%s', - desc=('Precision for representing Hessian, double or float. ' - 'Default double')) + "double", + "float", + argstr="--numprec=%s", + desc=("Precision for representing Hessian, double or float. " "Default double"), + ) class FNIRTOutputSpec(TraitedSpec): - fieldcoeff_file = File(exists=True, desc='file with field coefficients') - warped_file = File(exists=True, desc='warped image') - field_file = File(desc='file with warp field') - jacobian_file = File(desc='file containing Jacobian of the field') - modulatedref_file = File(desc='file containing intensity modulated --ref') + fieldcoeff_file = File(exists=True, desc="file with field coefficients") + warped_file = File(exists=True, desc="warped image") + field_file = File(desc="file with warp field") + jacobian_file = File(desc="file containing Jacobian of the field") + modulatedref_file = File(desc="file containing intensity modulated --ref") out_intensitymap_file = traits.List( File, minlen=2, maxlen=2, - desc='files containing info pertaining to intensity mapping') - log_file = File(desc='Name of log-file') + desc="files containing info pertaining to intensity mapping", + ) + log_file = File(desc="Name of log-file") class FNIRT(FSLCommand): @@ -1146,18 +1264,18 @@ class FNIRT(FSLCommand): """ - _cmd = 'fnirt' + _cmd = "fnirt" input_spec = FNIRTInputSpec output_spec = FNIRTOutputSpec filemap = { - 'warped_file': 'warped', - 'field_file': 'field', - 'jacobian_file': 'field_jacobian', - 'modulatedref_file': 'modulated', - 'out_intensitymap_file': 'intmap', - 'log_file': 'log.txt', - 'fieldcoeff_file': 'fieldwarp' + "warped_file": "warped", + "field_file": "field", + "jacobian_file": "field_jacobian", + "modulatedref_file": "modulated", + "out_intensitymap_file": "intmap", + "log_file": "log.txt", + "fieldcoeff_file": "fieldwarp", } def _list_outputs(self): @@ -1165,48 +1283,49 @@ def _list_outputs(self): for key, suffix in list(self.filemap.items()): inval = getattr(self.inputs, key) change_ext = True - if key in ['warped_file', 'log_file']: - if suffix.endswith('.txt'): + if key in ["warped_file", "log_file"]: + if suffix.endswith(".txt"): change_ext = False if isdefined(inval): outputs[key] = os.path.abspath(inval) else: outputs[key] = self._gen_fname( - self.inputs.in_file, - suffix='_' + suffix, - change_ext=change_ext) + self.inputs.in_file, suffix="_" + suffix, change_ext=change_ext + ) elif isdefined(inval): if isinstance(inval, bool): if inval: outputs[key] = self._gen_fname( self.inputs.in_file, - suffix='_' + suffix, - change_ext=change_ext) + suffix="_" + suffix, + change_ext=change_ext, + ) else: outputs[key] = os.path.abspath(inval) - if key == 'out_intensitymap_file' and isdefined(outputs[key]): + if key == "out_intensitymap_file" and isdefined(outputs[key]): basename = FNIRT.intensitymap_file_basename(outputs[key]) outputs[key] = [ outputs[key], - '%s.txt' % basename, + "%s.txt" % basename, ] return outputs def _format_arg(self, name, spec, value): - if name in ('in_intensitymap_file', 'out_intensitymap_file'): - if name == 'out_intensitymap_file': + if name in ("in_intensitymap_file", "out_intensitymap_file"): + if name == "out_intensitymap_file": value = self._list_outputs()[name] value = [FNIRT.intensitymap_file_basename(v) for v in value] - assert len(set(value)) == 1, ( - 'Found different basenames for {}: {}'.format(name, value)) + assert len(set(value)) == 1, "Found different basenames for {}: {}".format( + name, value + ) return spec.argstr % value[0] if name in list(self.filemap.keys()): return spec.argstr % self._list_outputs()[name] return super(FNIRT, self)._format_arg(name, spec, value) def _gen_filename(self, name): - if name in ['warped_file', 'log_file']: + if name in ["warped_file", "log_file"]: return self._list_outputs()[name] return None @@ -1220,12 +1339,12 @@ def write_config(self, configfile): configfile : /path/to/configfile """ try: - fid = open(configfile, 'w+') + fid = open(configfile, "w+") except IOError: - print('unable to create config_file %s' % (configfile)) + print("unable to create config_file %s" % (configfile)) for item in list(self.inputs.get().items()): - fid.write('%s\n' % (item)) + fid.write("%s\n" % (item)) fid.close() @classmethod @@ -1233,9 +1352,9 @@ def intensitymap_file_basename(cls, f): """Removes valid intensitymap extensions from `f`, returning a basename that can refer to both intensitymap files. """ - for ext in list(Info.ftypes.values()) + ['.txt']: + for ext in list(Info.ftypes.values()) + [".txt"]: if f.endswith(ext): - return f[:-len(ext)] + return f[: -len(ext)] # TODO consider warning for this case return f @@ -1243,74 +1362,86 @@ def intensitymap_file_basename(cls, f): class ApplyWarpInputSpec(FSLCommandInputSpec): in_file = File( exists=True, - argstr='--in=%s', + argstr="--in=%s", mandatory=True, position=0, - desc='image to be warped') + desc="image to be warped", + ) out_file = File( - argstr='--out=%s', + argstr="--out=%s", genfile=True, position=2, - desc='output filename', - hash_files=False) + desc="output filename", + hash_files=False, + ) ref_file = File( exists=True, - argstr='--ref=%s', + argstr="--ref=%s", mandatory=True, position=1, - desc='reference image') + desc="reference image", + ) field_file = File( - exists=True, argstr='--warp=%s', desc='file containing warp field') + exists=True, argstr="--warp=%s", desc="file containing warp field" + ) abswarp = traits.Bool( - argstr='--abs', - xor=['relwarp'], - desc="treat warp field as absolute: x' = w(x)") + argstr="--abs", xor=["relwarp"], desc="treat warp field as absolute: x' = w(x)" + ) relwarp = traits.Bool( - argstr='--rel', - xor=['abswarp'], + argstr="--rel", + xor=["abswarp"], position=-1, - desc="treat warp field as relative: x' = x + w(x)") + desc="treat warp field as relative: x' = x + w(x)", + ) datatype = traits.Enum( - 'char', - 'short', - 'int', - 'float', - 'double', - argstr='--datatype=%s', - desc='Force output data type [char short int float double].') + "char", + "short", + "int", + "float", + "double", + argstr="--datatype=%s", + desc="Force output data type [char short int float double].", + ) supersample = traits.Bool( - argstr='--super', - desc='intermediary supersampling of output, default is off') + argstr="--super", desc="intermediary supersampling of output, default is off" + ) superlevel = traits.Either( - traits.Enum('a'), + traits.Enum("a"), traits.Int, - argstr='--superlevel=%s', - desc=("level of intermediary supersampling, a for 'automatic' or " - "integer level. Default = 2")) + argstr="--superlevel=%s", + desc=( + "level of intermediary supersampling, a for 'automatic' or " + "integer level. Default = 2" + ), + ) premat = File( exists=True, - argstr='--premat=%s', - desc='filename for pre-transform (affine matrix)') + argstr="--premat=%s", + desc="filename for pre-transform (affine matrix)", + ) postmat = File( exists=True, - argstr='--postmat=%s', - desc='filename for post-transform (affine matrix)') + argstr="--postmat=%s", + desc="filename for post-transform (affine matrix)", + ) mask_file = File( exists=True, - argstr='--mask=%s', - desc='filename for mask image (in reference space)') + argstr="--mask=%s", + desc="filename for mask image (in reference space)", + ) interp = traits.Enum( - 'nn', - 'trilinear', - 'sinc', - 'spline', - argstr='--interp=%s', + "nn", + "trilinear", + "sinc", + "spline", + argstr="--interp=%s", position=-2, - desc='interpolation method') + desc="interpolation method", + ) class ApplyWarpOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='Warped output file') + out_file = File(exists=True, desc="Warped output file") class ApplyWarp(FSLCommand): @@ -1329,26 +1460,25 @@ class ApplyWarp(FSLCommand): """ - _cmd = 'applywarp' + _cmd = "applywarp" input_spec = ApplyWarpInputSpec output_spec = ApplyWarpOutputSpec def _format_arg(self, name, spec, value): - if name == 'superlevel': + if name == "superlevel": return spec.argstr % str(value) return super(ApplyWarp, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self._outputs().get() if not isdefined(self.inputs.out_file): - outputs['out_file'] = self._gen_fname( - self.inputs.in_file, suffix='_warp') + outputs["out_file"] = self._gen_fname(self.inputs.in_file, suffix="_warp") else: - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._list_outputs()[name] return None @@ -1356,45 +1486,53 @@ def _gen_filename(self, name): class SliceTimerInputSpec(FSLCommandInputSpec): in_file = File( exists=True, - argstr='--in=%s', + argstr="--in=%s", mandatory=True, position=0, - desc='filename of input timeseries') + desc="filename of input timeseries", + ) out_file = File( - argstr='--out=%s', + argstr="--out=%s", genfile=True, - desc='filename of output timeseries', - hash_files=False) - index_dir = traits.Bool( - argstr='--down', desc='slice indexing from top to bottom') + desc="filename of output timeseries", + hash_files=False, + ) + index_dir = traits.Bool(argstr="--down", desc="slice indexing from top to bottom") time_repetition = traits.Float( - argstr='--repeat=%f', desc='Specify TR of data - default is 3s') + argstr="--repeat=%f", desc="Specify TR of data - default is 3s" + ) slice_direction = traits.Enum( 1, 2, 3, - argstr='--direction=%d', - desc='direction of slice acquisition (x=1, y=2, z=3) - default is z') - interleaved = traits.Bool( - argstr='--odd', desc='use interleaved acquisition') + argstr="--direction=%d", + desc="direction of slice acquisition (x=1, y=2, z=3) - default is z", + ) + interleaved = traits.Bool(argstr="--odd", desc="use interleaved acquisition") custom_timings = File( exists=True, - argstr='--tcustom=%s', - desc=('slice timings, in fractions of TR, range 0:1 (default is 0.5 = ' - 'no shift)')) + argstr="--tcustom=%s", + desc=( + "slice timings, in fractions of TR, range 0:1 (default is 0.5 = " + "no shift)" + ), + ) global_shift = traits.Float( - argstr='--tglobal', - desc='shift in fraction of TR, range 0:1 (default is 0.5 = no shift)') + argstr="--tglobal", + desc="shift in fraction of TR, range 0:1 (default is 0.5 = no shift)", + ) custom_order = File( exists=True, - argstr='--ocustom=%s', - desc=('filename of single-column custom interleave order file (first ' - 'slice is referred to as 1 not 0)')) + argstr="--ocustom=%s", + desc=( + "filename of single-column custom interleave order file (first " + "slice is referred to as 1 not 0)" + ), + ) class SliceTimerOutputSpec(TraitedSpec): - slice_time_corrected_file = File( - exists=True, desc='slice time corrected file') + slice_time_corrected_file = File(exists=True, desc="slice time corrected file") class SliceTimer(FSLCommand): @@ -1411,7 +1549,7 @@ class SliceTimer(FSLCommand): """ - _cmd = 'slicetimer' + _cmd = "slicetimer" input_spec = SliceTimerInputSpec output_spec = SliceTimerOutputSpec @@ -1419,69 +1557,80 @@ def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.out_file if not isdefined(out_file): - out_file = self._gen_fname(self.inputs.in_file, suffix='_st') - outputs['slice_time_corrected_file'] = os.path.abspath(out_file) + out_file = self._gen_fname(self.inputs.in_file, suffix="_st") + outputs["slice_time_corrected_file"] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): - if name == 'out_file': - return self._list_outputs()['slice_time_corrected_file'] + if name == "out_file": + return self._list_outputs()["slice_time_corrected_file"] return None class SUSANInputSpec(FSLCommandInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=1, - desc='filename of input timeseries') + desc="filename of input timeseries", + ) brightness_threshold = traits.Float( - argstr='%.10f', + argstr="%.10f", position=2, mandatory=True, - desc=('brightness threshold and should be greater than noise level ' - 'and less than contrast of edges to be preserved.')) + desc=( + "brightness threshold and should be greater than noise level " + "and less than contrast of edges to be preserved." + ), + ) fwhm = traits.Float( - argstr='%.10f', + argstr="%.10f", position=3, mandatory=True, - desc='fwhm of smoothing, in mm, gets converted using sqrt(8*log(2))') + desc="fwhm of smoothing, in mm, gets converted using sqrt(8*log(2))", + ) dimension = traits.Enum( 3, 2, - argstr='%d', + argstr="%d", position=4, usedefault=True, - desc='within-plane (2) or fully 3D (3)') + desc="within-plane (2) or fully 3D (3)", + ) use_median = traits.Enum( 1, 0, - argstr='%d', + argstr="%d", position=5, usedefault=True, - desc=('whether to use a local median filter in the cases where ' - 'single-point noise is detected')) + desc=( + "whether to use a local median filter in the cases where " + "single-point noise is detected" + ), + ) usans = traits.List( traits.Tuple(File(exists=True), traits.Float), maxlen=2, - argstr='', + argstr="", position=6, usedefault=True, - desc='determines whether the smoothing area (USAN) is to be ' - 'found from secondary images (0, 1 or 2). A negative ' - 'value for any brightness threshold will auto-set the ' - 'threshold at 10% of the robust range') + desc="determines whether the smoothing area (USAN) is to be " + "found from secondary images (0, 1 or 2). A negative " + "value for any brightness threshold will auto-set the " + "threshold at 10% of the robust range", + ) out_file = File( - argstr='%s', + argstr="%s", position=-1, genfile=True, - desc='output file name', - hash_files=False) + desc="output file name", + hash_files=False, + ) class SUSANOutputSpec(TraitedSpec): - smoothed_file = File(exists=True, desc='smoothed output file') + smoothed_file = File(exists=True, desc="smoothed output file") class SUSAN(FSLCommand): @@ -1504,154 +1653,170 @@ class SUSAN(FSLCommand): >>> result = sus.run() # doctest: +SKIP """ - _cmd = 'susan' + _cmd = "susan" input_spec = SUSANInputSpec output_spec = SUSANOutputSpec def _format_arg(self, name, spec, value): - if name == 'fwhm': + if name == "fwhm": return spec.argstr % (float(value) / np.sqrt(8 * np.log(2))) - if name == 'usans': + if name == "usans": if not value: - return '0' + return "0" arglist = [str(len(value))] for filename, thresh in value: - arglist.extend([filename, '%.10f' % thresh]) - return ' '.join(arglist) + arglist.extend([filename, "%.10f" % thresh]) + return " ".join(arglist) return super(SUSAN, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.out_file if not isdefined(out_file): - out_file = self._gen_fname(self.inputs.in_file, suffix='_smooth') - outputs['smoothed_file'] = os.path.abspath(out_file) + out_file = self._gen_fname(self.inputs.in_file, suffix="_smooth") + outputs["smoothed_file"] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): - if name == 'out_file': - return self._list_outputs()['smoothed_file'] + if name == "out_file": + return self._list_outputs()["smoothed_file"] return None class FUGUEInputSpec(FSLCommandInputSpec): - in_file = File( - exists=True, argstr='--in=%s', desc='filename of input volume') + in_file = File(exists=True, argstr="--in=%s", desc="filename of input volume") shift_in_file = File( exists=True, - argstr='--loadshift=%s', - desc='filename for reading pixel shift volume') + argstr="--loadshift=%s", + desc="filename for reading pixel shift volume", + ) phasemap_in_file = File( - exists=True, - argstr='--phasemap=%s', - desc='filename for input phase image') + exists=True, argstr="--phasemap=%s", desc="filename for input phase image" + ) fmap_in_file = File( exists=True, - argstr='--loadfmap=%s', - desc='filename for loading fieldmap (rad/s)') + argstr="--loadfmap=%s", + desc="filename for loading fieldmap (rad/s)", + ) unwarped_file = File( - argstr='--unwarp=%s', - desc='apply unwarping and save as filename', - xor=['warped_file'], - requires=['in_file']) + argstr="--unwarp=%s", + desc="apply unwarping and save as filename", + xor=["warped_file"], + requires=["in_file"], + ) warped_file = File( - argstr='--warp=%s', - desc='apply forward warping and save as filename', - xor=['unwarped_file'], - requires=['in_file']) + argstr="--warp=%s", + desc="apply forward warping and save as filename", + xor=["unwarped_file"], + requires=["in_file"], + ) forward_warping = traits.Bool( - False, - usedefault=True, - desc='apply forward warping instead of unwarping') + False, usedefault=True, desc="apply forward warping instead of unwarping" + ) dwell_to_asym_ratio = traits.Float( - argstr='--dwelltoasym=%.10f', desc='set the dwell to asym time ratio') + argstr="--dwelltoasym=%.10f", desc="set the dwell to asym time ratio" + ) dwell_time = traits.Float( - argstr='--dwell=%.10f', - desc=('set the EPI dwell time per phase-encode line - same as echo ' - 'spacing - (sec)')) + argstr="--dwell=%.10f", + desc=( + "set the EPI dwell time per phase-encode line - same as echo " + "spacing - (sec)" + ), + ) asym_se_time = traits.Float( - argstr='--asym=%.10f', - desc='set the fieldmap asymmetric spin echo time (sec)') - median_2dfilter = traits.Bool( - argstr='--median', desc='apply 2D median filtering') + argstr="--asym=%.10f", desc="set the fieldmap asymmetric spin echo time (sec)" + ) + median_2dfilter = traits.Bool(argstr="--median", desc="apply 2D median filtering") despike_2dfilter = traits.Bool( - argstr='--despike', desc='apply a 2D de-spiking filter') + argstr="--despike", desc="apply a 2D de-spiking filter" + ) no_gap_fill = traits.Bool( - argstr='--nofill', - desc='do not apply gap-filling measure to the fieldmap') + argstr="--nofill", desc="do not apply gap-filling measure to the fieldmap" + ) no_extend = traits.Bool( - argstr='--noextend', - desc='do not apply rigid-body extrapolation to the fieldmap') + argstr="--noextend", + desc="do not apply rigid-body extrapolation to the fieldmap", + ) smooth2d = traits.Float( - argstr='--smooth2=%.2f', - desc='apply 2D Gaussian smoothing of sigma N (in mm)') + argstr="--smooth2=%.2f", desc="apply 2D Gaussian smoothing of sigma N (in mm)" + ) smooth3d = traits.Float( - argstr='--smooth3=%.2f', - desc='apply 3D Gaussian smoothing of sigma N (in mm)') + argstr="--smooth3=%.2f", desc="apply 3D Gaussian smoothing of sigma N (in mm)" + ) poly_order = traits.Int( - argstr='--poly=%d', desc='apply polynomial fitting of order N') + argstr="--poly=%d", desc="apply polynomial fitting of order N" + ) fourier_order = traits.Int( - argstr='--fourier=%d', - desc='apply Fourier (sinusoidal) fitting of order N') - pava = traits.Bool( - argstr='--pava', desc='apply monotonic enforcement via PAVA') + argstr="--fourier=%d", desc="apply Fourier (sinusoidal) fitting of order N" + ) + pava = traits.Bool(argstr="--pava", desc="apply monotonic enforcement via PAVA") despike_threshold = traits.Float( - argstr='--despikethreshold=%s', - desc='specify the threshold for de-spiking (default=3.0)') + argstr="--despikethreshold=%s", + desc="specify the threshold for de-spiking (default=3.0)", + ) unwarp_direction = traits.Enum( - 'x', - 'y', - 'z', - 'x-', - 'y-', - 'z-', - argstr='--unwarpdir=%s', - desc='specifies direction of warping (default y)') + "x", + "y", + "z", + "x-", + "y-", + "z-", + argstr="--unwarpdir=%s", + desc="specifies direction of warping (default y)", + ) phase_conjugate = traits.Bool( - argstr='--phaseconj', desc='apply phase conjugate method of unwarping') + argstr="--phaseconj", desc="apply phase conjugate method of unwarping" + ) icorr = traits.Bool( - argstr='--icorr', - requires=['shift_in_file'], - desc=('apply intensity correction to unwarping (pixel shift method ' - 'only)')) + argstr="--icorr", + requires=["shift_in_file"], + desc=("apply intensity correction to unwarping (pixel shift method " "only)"), + ) icorr_only = traits.Bool( - argstr='--icorronly', - requires=['unwarped_file'], - desc='apply intensity correction only') + argstr="--icorronly", + requires=["unwarped_file"], + desc="apply intensity correction only", + ) mask_file = File( - exists=True, - argstr='--mask=%s', - desc='filename for loading valid mask') + exists=True, argstr="--mask=%s", desc="filename for loading valid mask" + ) nokspace = traits.Bool( - False, argstr='--nokspace', desc='do not use k-space forward warping') + False, argstr="--nokspace", desc="do not use k-space forward warping" + ) # Special outputs: shift (voxel shift map, vsm) save_shift = traits.Bool( - False, xor=['save_unmasked_shift'], desc='write pixel shift volume') + False, xor=["save_unmasked_shift"], desc="write pixel shift volume" + ) shift_out_file = File( - argstr='--saveshift=%s', desc='filename for saving pixel shift volume') + argstr="--saveshift=%s", desc="filename for saving pixel shift volume" + ) save_unmasked_shift = traits.Bool( - argstr='--unmaskshift', - xor=['save_shift'], - desc='saves the unmasked shiftmap when using --saveshift') + argstr="--unmaskshift", + xor=["save_shift"], + desc="saves the unmasked shiftmap when using --saveshift", + ) # Special outputs: fieldmap (fmap) save_fmap = traits.Bool( - False, xor=['save_unmasked_fmap'], desc='write field map volume') + False, xor=["save_unmasked_fmap"], desc="write field map volume" + ) fmap_out_file = File( - argstr='--savefmap=%s', desc='filename for saving fieldmap (rad/s)') + argstr="--savefmap=%s", desc="filename for saving fieldmap (rad/s)" + ) save_unmasked_fmap = traits.Bool( False, - argstr='--unmaskfmap', - xor=['save_fmap'], - desc='saves the unmasked fieldmap when using --savefmap') + argstr="--unmaskfmap", + xor=["save_fmap"], + desc="saves the unmasked fieldmap when using --savefmap", + ) class FUGUEOutputSpec(TraitedSpec): - unwarped_file = File(desc='unwarped file') - warped_file = File(desc='forward warped file') - shift_out_file = File(desc='voxel shift map file') - fmap_out_file = File(desc='fieldmap file') + unwarped_file = File(desc="unwarped file") + warped_file = File(desc="forward warped file") + shift_out_file = File(desc="voxel shift map file") + fmap_out_file = File(desc="fieldmap file") class FUGUE(FSLCommand): @@ -1719,7 +1884,7 @@ class FUGUE(FSLCommand): """ - _cmd = 'fugue' + _cmd = "fugue" input_spec = FUGUEInputSpec output_spec = FUGUEOutputSpec @@ -1733,82 +1898,97 @@ def _parse_inputs(self, skip=None): if not input_phase and not input_vsm and not input_fmap: raise RuntimeError( - ('Either phasemap_in_file, shift_in_file or fmap_in_file must ' - 'be set.')) + ( + "Either phasemap_in_file, shift_in_file or fmap_in_file must " + "be set." + ) + ) if not isdefined(self.inputs.in_file): - skip += ['unwarped_file', 'warped_file'] + skip += ["unwarped_file", "warped_file"] else: if self.inputs.forward_warping: - skip += ['unwarped_file'] - trait_spec = self.inputs.trait('warped_file') + skip += ["unwarped_file"] + trait_spec = self.inputs.trait("warped_file") trait_spec.name_template = "%s_warped" - trait_spec.name_source = 'in_file' - trait_spec.output_name = 'warped_file' + trait_spec.name_source = "in_file" + trait_spec.output_name = "warped_file" else: - skip += ['warped_file'] - trait_spec = self.inputs.trait('unwarped_file') + skip += ["warped_file"] + trait_spec = self.inputs.trait("unwarped_file") trait_spec.name_template = "%s_unwarped" - trait_spec.name_source = 'in_file' - trait_spec.output_name = 'unwarped_file' + trait_spec.name_source = "in_file" + trait_spec.output_name = "unwarped_file" # Handle shift output if not isdefined(self.inputs.shift_out_file): - vsm_save_masked = (isdefined(self.inputs.save_shift) - and self.inputs.save_shift) - vsm_save_unmasked = (isdefined(self.inputs.save_unmasked_shift) - and self.inputs.save_unmasked_shift) - - if (vsm_save_masked or vsm_save_unmasked): - trait_spec = self.inputs.trait('shift_out_file') - trait_spec.output_name = 'shift_out_file' + vsm_save_masked = ( + isdefined(self.inputs.save_shift) and self.inputs.save_shift + ) + vsm_save_unmasked = ( + isdefined(self.inputs.save_unmasked_shift) + and self.inputs.save_unmasked_shift + ) + + if vsm_save_masked or vsm_save_unmasked: + trait_spec = self.inputs.trait("shift_out_file") + trait_spec.output_name = "shift_out_file" if input_fmap: - trait_spec.name_source = 'fmap_in_file' + trait_spec.name_source = "fmap_in_file" elif input_phase: - trait_spec.name_source = 'phasemap_in_file' + trait_spec.name_source = "phasemap_in_file" elif input_vsm: - trait_spec.name_source = 'shift_in_file' + trait_spec.name_source = "shift_in_file" else: raise RuntimeError( - ('Either phasemap_in_file, shift_in_file or ' - 'fmap_in_file must be set.')) + ( + "Either phasemap_in_file, shift_in_file or " + "fmap_in_file must be set." + ) + ) if vsm_save_unmasked: - trait_spec.name_template = '%s_vsm_unmasked' + trait_spec.name_template = "%s_vsm_unmasked" else: - trait_spec.name_template = '%s_vsm' + trait_spec.name_template = "%s_vsm" else: - skip += ['save_shift', 'save_unmasked_shift', 'shift_out_file'] + skip += ["save_shift", "save_unmasked_shift", "shift_out_file"] # Handle fieldmap output if not isdefined(self.inputs.fmap_out_file): - fmap_save_masked = (isdefined(self.inputs.save_fmap) - and self.inputs.save_fmap) - fmap_save_unmasked = (isdefined(self.inputs.save_unmasked_fmap) - and self.inputs.save_unmasked_fmap) - - if (fmap_save_masked or fmap_save_unmasked): - trait_spec = self.inputs.trait('fmap_out_file') - trait_spec.output_name = 'fmap_out_file' + fmap_save_masked = ( + isdefined(self.inputs.save_fmap) and self.inputs.save_fmap + ) + fmap_save_unmasked = ( + isdefined(self.inputs.save_unmasked_fmap) + and self.inputs.save_unmasked_fmap + ) + + if fmap_save_masked or fmap_save_unmasked: + trait_spec = self.inputs.trait("fmap_out_file") + trait_spec.output_name = "fmap_out_file" if input_vsm: - trait_spec.name_source = 'shift_in_file' + trait_spec.name_source = "shift_in_file" elif input_phase: - trait_spec.name_source = 'phasemap_in_file' + trait_spec.name_source = "phasemap_in_file" elif input_fmap: - trait_spec.name_source = 'fmap_in_file' + trait_spec.name_source = "fmap_in_file" else: raise RuntimeError( - ('Either phasemap_in_file, shift_in_file or ' - 'fmap_in_file must be set.')) + ( + "Either phasemap_in_file, shift_in_file or " + "fmap_in_file must be set." + ) + ) if fmap_save_unmasked: - trait_spec.name_template = '%s_fieldmap_unmasked' + trait_spec.name_template = "%s_fieldmap_unmasked" else: - trait_spec.name_template = '%s_fieldmap' + trait_spec.name_template = "%s_fieldmap" else: - skip += ['save_fmap', 'save_unmasked_fmap', 'fmap_out_file'] + skip += ["save_fmap", "save_unmasked_fmap", "fmap_out_file"] return super(FUGUE, self)._parse_inputs(skip=skip) @@ -1816,66 +1996,75 @@ def _parse_inputs(self, skip=None): class PRELUDEInputSpec(FSLCommandInputSpec): complex_phase_file = File( exists=True, - argstr='--complex=%s', + argstr="--complex=%s", mandatory=True, - xor=['magnitude_file', 'phase_file'], - desc='complex phase input volume') + xor=["magnitude_file", "phase_file"], + desc="complex phase input volume", + ) magnitude_file = File( exists=True, - argstr='--abs=%s', + argstr="--abs=%s", mandatory=True, - xor=['complex_phase_file'], - desc='file containing magnitude image') + xor=["complex_phase_file"], + desc="file containing magnitude image", + ) phase_file = File( exists=True, - argstr='--phase=%s', + argstr="--phase=%s", mandatory=True, - xor=['complex_phase_file'], - desc='raw phase file') + xor=["complex_phase_file"], + desc="raw phase file", + ) unwrapped_phase_file = File( genfile=True, - argstr='--unwrap=%s', - desc='file containing unwrapepd phase', - hash_files=False) + argstr="--unwrap=%s", + desc="file containing unwrapepd phase", + hash_files=False, + ) num_partitions = traits.Int( - argstr='--numphasesplit=%d', desc='number of phase partitions to use') + argstr="--numphasesplit=%d", desc="number of phase partitions to use" + ) labelprocess2d = traits.Bool( - argstr='--labelslices', - desc='does label processing in 2D (slice at a time)') + argstr="--labelslices", desc="does label processing in 2D (slice at a time)" + ) process2d = traits.Bool( - argstr='--slices', - xor=['labelprocess2d'], - desc='does all processing in 2D (slice at a time)') + argstr="--slices", + xor=["labelprocess2d"], + desc="does all processing in 2D (slice at a time)", + ) process3d = traits.Bool( - argstr='--force3D', - xor=['labelprocess2d', 'process2d'], - desc='forces all processing to be full 3D') + argstr="--force3D", + xor=["labelprocess2d", "process2d"], + desc="forces all processing to be full 3D", + ) threshold = traits.Float( - argstr='--thresh=%.10f', desc='intensity threshold for masking') + argstr="--thresh=%.10f", desc="intensity threshold for masking" + ) mask_file = File( - exists=True, argstr='--mask=%s', desc='filename of mask input volume') + exists=True, argstr="--mask=%s", desc="filename of mask input volume" + ) start = traits.Int( - argstr='--start=%d', desc='first image number to process (default 0)') + argstr="--start=%d", desc="first image number to process (default 0)" + ) end = traits.Int( - argstr='--end=%d', desc='final image number to process (default Inf)') + argstr="--end=%d", desc="final image number to process (default Inf)" + ) savemask_file = File( - argstr='--savemask=%s', - desc='saving the mask volume', - hash_files=False) + argstr="--savemask=%s", desc="saving the mask volume", hash_files=False + ) rawphase_file = File( - argstr='--rawphase=%s', - desc='saving the raw phase output', - hash_files=False) + argstr="--rawphase=%s", desc="saving the raw phase output", hash_files=False + ) label_file = File( - argstr='--labels=%s', - desc='saving the area labels output', - hash_files=False) + argstr="--labels=%s", desc="saving the area labels output", hash_files=False + ) removeramps = traits.Bool( - argstr='--removeramps', desc='remove phase ramps during unwrapping') + argstr="--removeramps", desc="remove phase ramps during unwrapping" + ) class PRELUDEOutputSpec(TraitedSpec): - unwrapped_phase_file = File(exists=True, desc='unwrapped phase file') + unwrapped_phase_file = File(exists=True, desc="unwrapped phase file") class PRELUDE(FSLCommand): @@ -1887,30 +2076,31 @@ class PRELUDE(FSLCommand): Please insert examples for use of this command """ + input_spec = PRELUDEInputSpec output_spec = PRELUDEOutputSpec - _cmd = 'prelude' + _cmd = "prelude" def __init__(self, **kwargs): super(PRELUDE, self).__init__(**kwargs) - warn('This has not been fully tested. Please report any failures.') + warn("This has not been fully tested. Please report any failures.") def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.unwrapped_phase_file if not isdefined(out_file): if isdefined(self.inputs.phase_file): - out_file = self._gen_fname( - self.inputs.phase_file, suffix='_unwrapped') + out_file = self._gen_fname(self.inputs.phase_file, suffix="_unwrapped") elif isdefined(self.inputs.complex_phase_file): out_file = self._gen_fname( - self.inputs.complex_phase_file, suffix='_phase_unwrapped') - outputs['unwrapped_phase_file'] = os.path.abspath(out_file) + self.inputs.complex_phase_file, suffix="_phase_unwrapped" + ) + outputs["unwrapped_phase_file"] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): - if name == 'unwrapped_phase_file': - return self._list_outputs()['unwrapped_phase_file'] + if name == "unwrapped_phase_file": + return self._list_outputs()["unwrapped_phase_file"] return None @@ -1920,72 +2110,87 @@ class FIRSTInputSpec(FSLCommandInputSpec): mandatory=True, position=-2, copyfile=False, - argstr='-i %s', - desc='input data file') + argstr="-i %s", + desc="input data file", + ) out_file = File( - 'segmented', + "segmented", usedefault=True, mandatory=True, position=-1, - argstr='-o %s', - desc='output data file', - hash_files=False) - verbose = traits.Bool(argstr='-v', position=1, desc="Use verbose logging.") + argstr="-o %s", + desc="output data file", + hash_files=False, + ) + verbose = traits.Bool(argstr="-v", position=1, desc="Use verbose logging.") brain_extracted = traits.Bool( - argstr='-b', + argstr="-b", position=2, - desc="Input structural image is already brain-extracted") + desc="Input structural image is already brain-extracted", + ) no_cleanup = traits.Bool( - argstr='-d', + argstr="-d", position=3, - desc="Input structural image is already brain-extracted") + desc="Input structural image is already brain-extracted", + ) method = traits.Enum( - 'auto', - 'fast', - 'none', - xor=['method_as_numerical_threshold'], - argstr='-m %s', + "auto", + "fast", + "none", + xor=["method_as_numerical_threshold"], + argstr="-m %s", position=4, usedefault=True, - desc=("Method must be one of auto, fast, none, or it can be entered " - "using the 'method_as_numerical_threshold' input")) + desc=( + "Method must be one of auto, fast, none, or it can be entered " + "using the 'method_as_numerical_threshold' input" + ), + ) method_as_numerical_threshold = traits.Float( - argstr='-m %.4f', + argstr="-m %.4f", position=4, - desc=("Specify a numerical threshold value or use the 'method' input " - "to choose auto, fast, or none")) + desc=( + "Specify a numerical threshold value or use the 'method' input " + "to choose auto, fast, or none" + ), + ) list_of_specific_structures = traits.List( traits.Str, - argstr='-s %s', - sep=',', + argstr="-s %s", + sep=",", position=5, minlen=1, - desc='Runs only on the specified structures (e.g. L_Hipp, R_Hipp' - 'L_Accu, R_Accu, L_Amyg, R_Amyg' - 'L_Caud, R_Caud, L_Pall, R_Pall' - 'L_Puta, R_Puta, L_Thal, R_Thal, BrStem') + desc="Runs only on the specified structures (e.g. L_Hipp, R_Hipp" + "L_Accu, R_Accu, L_Amyg, R_Amyg" + "L_Caud, R_Caud, L_Pall, R_Pall" + "L_Puta, R_Puta, L_Thal, R_Thal, BrStem", + ) affine_file = File( exists=True, position=6, - argstr='-a %s', - desc=('Affine matrix to use (e.g. img2std.mat) (does not ' - 're-run registration)')) + argstr="-a %s", + desc=( + "Affine matrix to use (e.g. img2std.mat) (does not " "re-run registration)" + ), + ) class FIRSTOutputSpec(TraitedSpec): vtk_surfaces = OutputMultiPath( - File(exists=True), - desc='VTK format meshes for each subcortical region') - bvars = OutputMultiPath( - File(exists=True), desc='bvars for each subcortical region') + File(exists=True), desc="VTK format meshes for each subcortical region" + ) + bvars = OutputMultiPath(File(exists=True), desc="bvars for each subcortical region") original_segmentations = File( exists=True, - desc=('3D image file containing the segmented regions ' - 'as integer values. Uses CMA labelling')) + desc=( + "3D image file containing the segmented regions " + "as integer values. Uses CMA labelling" + ), + ) segmentation_file = File( exists=True, - desc=('4D image file containing a single volume per ' - 'segmented region')) + desc=("4D image file containing a single volume per " "segmented region"), + ) class FIRST(FSLCommand): @@ -2004,7 +2209,7 @@ class FIRST(FSLCommand): """ - _cmd = 'run_first_all' + _cmd = "run_first_all" input_spec = FIRSTInputSpec output_spec = FIRSTOutputSpec @@ -2015,51 +2220,60 @@ def _list_outputs(self): structures = self.inputs.list_of_specific_structures else: structures = [ - 'L_Hipp', 'R_Hipp', 'L_Accu', 'R_Accu', 'L_Amyg', 'R_Amyg', - 'L_Caud', 'R_Caud', 'L_Pall', 'R_Pall', 'L_Puta', 'R_Puta', - 'L_Thal', 'R_Thal', 'BrStem' + "L_Hipp", + "R_Hipp", + "L_Accu", + "R_Accu", + "L_Amyg", + "R_Amyg", + "L_Caud", + "R_Caud", + "L_Pall", + "R_Pall", + "L_Puta", + "R_Puta", + "L_Thal", + "R_Thal", + "BrStem", ] - outputs['original_segmentations'] = \ - self._gen_fname('original_segmentations') - outputs['segmentation_file'] = self._gen_fname('segmentation_file') - outputs['vtk_surfaces'] = self._gen_mesh_names('vtk_surfaces', - structures) - outputs['bvars'] = self._gen_mesh_names('bvars', structures) + outputs["original_segmentations"] = self._gen_fname("original_segmentations") + outputs["segmentation_file"] = self._gen_fname("segmentation_file") + outputs["vtk_surfaces"] = self._gen_mesh_names("vtk_surfaces", structures) + outputs["bvars"] = self._gen_mesh_names("bvars", structures) return outputs def _gen_fname(self, basename): path, outname, ext = split_filename(self.inputs.out_file) - method = 'none' - if isdefined(self.inputs.method) and self.inputs.method != 'none': - method = 'fast' - if (self.inputs.list_of_specific_structures - and self.inputs.method == 'auto'): - method = 'none' + method = "none" + if isdefined(self.inputs.method) and self.inputs.method != "none": + method = "fast" + if self.inputs.list_of_specific_structures and self.inputs.method == "auto": + method = "none" if isdefined(self.inputs.method_as_numerical_threshold): - thres = '%.4f' % self.inputs.method_as_numerical_threshold - method = thres.replace('.', '') + thres = "%.4f" % self.inputs.method_as_numerical_threshold + method = thres.replace(".", "") - if basename == 'original_segmentations': - return op.abspath('%s_all_%s_origsegs.nii.gz' % (outname, method)) - if basename == 'segmentation_file': - return op.abspath('%s_all_%s_firstseg.nii.gz' % (outname, method)) + if basename == "original_segmentations": + return op.abspath("%s_all_%s_origsegs.nii.gz" % (outname, method)) + if basename == "segmentation_file": + return op.abspath("%s_all_%s_firstseg.nii.gz" % (outname, method)) return None def _gen_mesh_names(self, name, structures): path, prefix, ext = split_filename(self.inputs.out_file) - if name == 'vtk_surfaces': + if name == "vtk_surfaces": vtks = list() for struct in structures: - vtk = prefix + '-' + struct + '_first.vtk' + vtk = prefix + "-" + struct + "_first.vtk" vtks.append(op.abspath(vtk)) return vtks - if name == 'bvars': + if name == "bvars": bvars = list() for struct in structures: - bvar = prefix + '-' + struct + '_first.bvars' + bvar = prefix + "-" + struct + "_first.bvars" bvars.append(op.abspath(bvar)) return bvars return None diff --git a/nipype/interfaces/fsl/tests/test_FILMGLS.py b/nipype/interfaces/fsl/tests/test_FILMGLS.py index a37fc1b116..b4934f4d7a 100644 --- a/nipype/interfaces/fsl/tests/test_FILMGLS.py +++ b/nipype/interfaces/fsl/tests/test_FILMGLS.py @@ -4,119 +4,144 @@ def test_filmgls(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), autocorr_estimate_only=dict( xor=[ - 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', - 'multitaper_product', 'use_pava', 'autocorr_noestimate' + "autocorr_estimate_only", + "fit_armodel", + "tukey_window", + "multitaper_product", + "use_pava", + "autocorr_noestimate", ], - argstr='-ac', + argstr="-ac", ), autocorr_noestimate=dict( xor=[ - 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', - 'multitaper_product', 'use_pava', 'autocorr_noestimate' + "autocorr_estimate_only", + "fit_armodel", + "tukey_window", + "multitaper_product", + "use_pava", + "autocorr_noestimate", ], - argstr='-noest', + argstr="-noest", ), - brightness_threshold=dict(argstr='-epith %d', ), - design_file=dict(argstr='%s', ), - environ=dict(usedefault=True, ), + brightness_threshold=dict(argstr="-epith %d",), + design_file=dict(argstr="%s",), + environ=dict(usedefault=True,), fit_armodel=dict( xor=[ - 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', - 'multitaper_product', 'use_pava', 'autocorr_noestimate' + "autocorr_estimate_only", + "fit_armodel", + "tukey_window", + "multitaper_product", + "use_pava", + "autocorr_noestimate", ], - argstr='-ar', + argstr="-ar", ), - full_data=dict(argstr='-v', ), - in_file=dict( - mandatory=True, - argstr='%s', - ), - mask_size=dict(argstr='-ms %d', ), + full_data=dict(argstr="-v",), + in_file=dict(mandatory=True, argstr="%s",), + mask_size=dict(argstr="-ms %d",), multitaper_product=dict( xor=[ - 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', - 'multitaper_product', 'use_pava', 'autocorr_noestimate' + "autocorr_estimate_only", + "fit_armodel", + "tukey_window", + "multitaper_product", + "use_pava", + "autocorr_noestimate", ], - argstr='-mt %d', + argstr="-mt %d", ), - output_pwdata=dict(argstr='-output_pwdata', ), + output_pwdata=dict(argstr="-output_pwdata",), output_type=dict(), - results_dir=dict( - usedefault=True, - argstr='-rn %s', - ), - smooth_autocorr=dict(argstr='-sa', ), - threshold=dict(argstr='%f', ), + results_dir=dict(usedefault=True, argstr="-rn %s",), + smooth_autocorr=dict(argstr="-sa",), + threshold=dict(argstr="%f",), tukey_window=dict( xor=[ - 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', - 'multitaper_product', 'use_pava', 'autocorr_noestimate' + "autocorr_estimate_only", + "fit_armodel", + "tukey_window", + "multitaper_product", + "use_pava", + "autocorr_noestimate", ], - argstr='-tukey %d', + argstr="-tukey %d", ), - use_pava=dict(argstr='-pava', ), + use_pava=dict(argstr="-pava",), ) input_map2 = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), autocorr_estimate_only=dict( xor=[ - 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', - 'multitaper_product', 'use_pava', 'autocorr_noestimate' + "autocorr_estimate_only", + "fit_armodel", + "tukey_window", + "multitaper_product", + "use_pava", + "autocorr_noestimate", ], - argstr='--ac', + argstr="--ac", ), autocorr_noestimate=dict( xor=[ - 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', - 'multitaper_product', 'use_pava', 'autocorr_noestimate' + "autocorr_estimate_only", + "fit_armodel", + "tukey_window", + "multitaper_product", + "use_pava", + "autocorr_noestimate", ], - argstr='--noest', + argstr="--noest", ), - brightness_threshold=dict(argstr='--epith=%d', ), - design_file=dict(argstr='--pd=%s', ), - environ=dict(usedefault=True, ), + brightness_threshold=dict(argstr="--epith=%d",), + design_file=dict(argstr="--pd=%s",), + environ=dict(usedefault=True,), fit_armodel=dict( xor=[ - 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', - 'multitaper_product', 'use_pava', 'autocorr_noestimate' + "autocorr_estimate_only", + "fit_armodel", + "tukey_window", + "multitaper_product", + "use_pava", + "autocorr_noestimate", ], - argstr='--ar', - ), - full_data=dict(argstr='-v', ), - in_file=dict( - mandatory=True, - argstr='--in=%s', + argstr="--ar", ), - mask_size=dict(argstr='--ms=%d', ), + full_data=dict(argstr="-v",), + in_file=dict(mandatory=True, argstr="--in=%s",), + mask_size=dict(argstr="--ms=%d",), multitaper_product=dict( xor=[ - 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', - 'multitaper_product', 'use_pava', 'autocorr_noestimate' + "autocorr_estimate_only", + "fit_armodel", + "tukey_window", + "multitaper_product", + "use_pava", + "autocorr_noestimate", ], - argstr='--mt=%d', + argstr="--mt=%d", ), - output_pwdata=dict(argstr='--outputPWdata', ), + output_pwdata=dict(argstr="--outputPWdata",), output_type=dict(), - results_dir=dict( - argstr='--rn=%s', - usedefault=True, - ), - smooth_autocorr=dict(argstr='--sa', ), - threshold=dict( - usedefault=True, - argstr='--thr=%f', - ), + results_dir=dict(argstr="--rn=%s", usedefault=True,), + smooth_autocorr=dict(argstr="--sa",), + threshold=dict(usedefault=True, argstr="--thr=%f",), tukey_window=dict( xor=[ - 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', - 'multitaper_product', 'use_pava', 'autocorr_noestimate' + "autocorr_estimate_only", + "fit_armodel", + "tukey_window", + "multitaper_product", + "use_pava", + "autocorr_noestimate", ], - argstr='--tukey=%d', + argstr="--tukey=%d", ), - use_pava=dict(argstr='--pava', ), + use_pava=dict(argstr="--pava",), ) instance = FILMGLS() if isinstance(instance.inputs, FILMGLSInputSpec): diff --git a/nipype/interfaces/fsl/tests/test_Level1Design_functions.py b/nipype/interfaces/fsl/tests/test_Level1Design_functions.py index 44c04d41d5..f13ddfaccf 100644 --- a/nipype/interfaces/fsl/tests/test_Level1Design_functions.py +++ b/nipype/interfaces/fsl/tests/test_Level1Design_functions.py @@ -8,25 +8,31 @@ def test_level1design(tmpdir): old = tmpdir.chdir() l = Level1Design() runinfo = dict( - cond=[{ - 'name': 'test_condition', - 'onset': [0, 10], - 'duration': [10, 10] - }], - regress=[]) + cond=[{"name": "test_condition", "onset": [0, 10], "duration": [10, 10]}], + regress=[], + ) runidx = 0 contrasts = Undefined do_tempfilter = False orthogonalization = {} - basic_ev_parameters = {'temporalderiv': False} - convolution_variants = [('custom', 7, { - 'temporalderiv': False, - 'bfcustompath': '/some/path' - }), ('hrf', 3, basic_ev_parameters), ('dgamma', 3, basic_ev_parameters), - ('gamma', 2, basic_ev_parameters), - ('none', 0, basic_ev_parameters)] + basic_ev_parameters = {"temporalderiv": False} + convolution_variants = [ + ("custom", 7, {"temporalderiv": False, "bfcustompath": "/some/path"}), + ("hrf", 3, basic_ev_parameters), + ("dgamma", 3, basic_ev_parameters), + ("gamma", 2, basic_ev_parameters), + ("none", 0, basic_ev_parameters), + ] for key, val, ev_parameters in convolution_variants: output_num, output_txt = Level1Design._create_ev_files( - l, os.getcwd(), runinfo, runidx, ev_parameters, orthogonalization, - contrasts, do_tempfilter, key) + l, + os.getcwd(), + runinfo, + runidx, + ev_parameters, + orthogonalization, + contrasts, + do_tempfilter, + key, + ) assert "set fmri(convolve1) {0}".format(val) in output_txt diff --git a/nipype/interfaces/fsl/tests/test_auto_AR1Image.py b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py index e631c31d70..23e6a19b6c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AR1Image.py +++ b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py @@ -4,41 +4,16 @@ def test_AR1Image_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='-%sar1', - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="-%sar1", position=4, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = AR1Image.input_spec() @@ -46,8 +21,10 @@ def test_AR1Image_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AR1Image_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = AR1Image.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py index bc1e962fa1..6faf6d5d27 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py +++ b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py @@ -4,27 +4,12 @@ def test_AccuracyTester_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - mel_icas=dict( - argstr='%s', - copyfile=False, - mandatory=True, - position=3, - ), - output_directory=dict( - argstr='%s', - mandatory=True, - position=2, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + mel_icas=dict(argstr="%s", copyfile=False, mandatory=True, position=3,), + output_directory=dict(argstr="%s", mandatory=True, position=2,), trained_wts_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, + argstr="%s", extensions=None, mandatory=True, position=1, ), ) inputs = AccuracyTester.input_spec() @@ -32,11 +17,10 @@ def test_AccuracyTester_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AccuracyTester_outputs(): - output_map = dict(output_directory=dict( - argstr='%s', - position=1, - ), ) + output_map = dict(output_directory=dict(argstr="%s", position=1,),) outputs = AccuracyTester.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py index dfc98c5a02..dcd7fc6081 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py @@ -4,42 +4,16 @@ def test_ApplyMask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - mask_file=dict( - argstr='-mas %s', - extensions=None, - mandatory=True, - position=4, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + mask_file=dict(argstr="-mas %s", extensions=None, mandatory=True, position=4,), + nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = ApplyMask.input_spec() @@ -47,8 +21,10 @@ def test_ApplyMask_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ApplyMask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ApplyMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py index 0af0b43b0a..a600c425e6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py @@ -4,44 +4,28 @@ def test_ApplyTOPUP_inputs(): input_map = dict( - args=dict(argstr='%s', ), - datatype=dict(argstr='-d=%s', ), - encoding_file=dict( - argstr='--datain=%s', - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr='--imain=%s', - mandatory=True, - sep=',', - ), - in_index=dict( - argstr='--inindex=%s', - sep=',', - ), + args=dict(argstr="%s",), + datatype=dict(argstr="-d=%s",), + encoding_file=dict(argstr="--datain=%s", extensions=None, mandatory=True,), + environ=dict(nohash=True, usedefault=True,), + in_files=dict(argstr="--imain=%s", mandatory=True, sep=",",), + in_index=dict(argstr="--inindex=%s", sep=",",), in_topup_fieldcoef=dict( - argstr='--topup=%s', + argstr="--topup=%s", copyfile=False, extensions=None, - requires=['in_topup_movpar'], + requires=["in_topup_movpar"], ), in_topup_movpar=dict( - copyfile=False, - extensions=None, - requires=['in_topup_fieldcoef'], + copyfile=False, extensions=None, requires=["in_topup_fieldcoef"], ), - interp=dict(argstr='--interp=%s', ), - method=dict(argstr='--method=%s', ), + interp=dict(argstr="--interp=%s",), + method=dict(argstr="--method=%s",), out_corrected=dict( - argstr='--out=%s', + argstr="--out=%s", extensions=None, - name_source=['in_files'], - name_template='%s_corrected', + name_source=["in_files"], + name_template="%s_corrected", ), output_type=dict(), ) @@ -50,8 +34,10 @@ def test_ApplyTOPUP_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ApplyTOPUP_outputs(): - output_map = dict(out_corrected=dict(extensions=None, ), ) + output_map = dict(out_corrected=dict(extensions=None,),) outputs = ApplyTOPUP.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py index a6d924e825..8deaf747c2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py @@ -4,71 +4,38 @@ def test_ApplyWarp_inputs(): input_map = dict( - abswarp=dict( - argstr='--abs', - xor=['relwarp'], - ), - args=dict(argstr='%s', ), - datatype=dict(argstr='--datatype=%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - field_file=dict( - argstr='--warp=%s', - extensions=None, - ), - in_file=dict( - argstr='--in=%s', - extensions=None, - mandatory=True, - position=0, - ), - interp=dict( - argstr='--interp=%s', - position=-2, - ), - mask_file=dict( - argstr='--mask=%s', - extensions=None, - ), + abswarp=dict(argstr="--abs", xor=["relwarp"],), + args=dict(argstr="%s",), + datatype=dict(argstr="--datatype=%s",), + environ=dict(nohash=True, usedefault=True,), + field_file=dict(argstr="--warp=%s", extensions=None,), + in_file=dict(argstr="--in=%s", extensions=None, mandatory=True, position=0,), + interp=dict(argstr="--interp=%s", position=-2,), + mask_file=dict(argstr="--mask=%s", extensions=None,), out_file=dict( - argstr='--out=%s', + argstr="--out=%s", extensions=None, genfile=True, hash_files=False, position=2, ), output_type=dict(), - postmat=dict( - argstr='--postmat=%s', - extensions=None, - ), - premat=dict( - argstr='--premat=%s', - extensions=None, - ), - ref_file=dict( - argstr='--ref=%s', - extensions=None, - mandatory=True, - position=1, - ), - relwarp=dict( - argstr='--rel', - position=-1, - xor=['abswarp'], - ), - superlevel=dict(argstr='--superlevel=%s', ), - supersample=dict(argstr='--super', ), + postmat=dict(argstr="--postmat=%s", extensions=None,), + premat=dict(argstr="--premat=%s", extensions=None,), + ref_file=dict(argstr="--ref=%s", extensions=None, mandatory=True, position=1,), + relwarp=dict(argstr="--rel", position=-1, xor=["abswarp"],), + superlevel=dict(argstr="--superlevel=%s",), + supersample=dict(argstr="--super",), ) inputs = ApplyWarp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ApplyWarp_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ApplyWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py index b7264213b9..116748d4c9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py @@ -4,175 +4,90 @@ def test_ApplyXFM_inputs(): input_map = dict( - angle_rep=dict(argstr='-anglerep %s', ), - apply_isoxfm=dict( - argstr='-applyisoxfm %f', - xor=['apply_xfm'], - ), - apply_xfm=dict( - argstr='-applyxfm', - usedefault=True, - ), - args=dict(argstr='%s', ), - bbrslope=dict( - argstr='-bbrslope %f', - min_ver='5.0.0', - ), - bbrtype=dict( - argstr='-bbrtype %s', - min_ver='5.0.0', - ), - bgvalue=dict(argstr='-setbackground %f', ), - bins=dict(argstr='-bins %d', ), - coarse_search=dict( - argstr='-coarsesearch %d', - units='degrees', - ), - cost=dict(argstr='-cost %s', ), - cost_func=dict(argstr='-searchcost %s', ), - datatype=dict(argstr='-datatype %s', ), - display_init=dict(argstr='-displayinit', ), - dof=dict(argstr='-dof %d', ), - echospacing=dict( - argstr='-echospacing %f', - min_ver='5.0.0', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fieldmap=dict( - argstr='-fieldmap %s', - extensions=None, - min_ver='5.0.0', - ), - fieldmapmask=dict( - argstr='-fieldmapmask %s', - extensions=None, - min_ver='5.0.0', - ), - fine_search=dict( - argstr='-finesearch %d', - units='degrees', - ), - force_scaling=dict(argstr='-forcescaling', ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - position=0, - ), - in_matrix_file=dict( - argstr='-init %s', - extensions=None, - ), - in_weight=dict( - argstr='-inweight %s', - extensions=None, - ), - interp=dict(argstr='-interp %s', ), - min_sampling=dict( - argstr='-minsampling %f', - units='mm', - ), - no_clamp=dict(argstr='-noclamp', ), - no_resample=dict(argstr='-noresample', ), - no_resample_blur=dict(argstr='-noresampblur', ), - no_search=dict(argstr='-nosearch', ), + angle_rep=dict(argstr="-anglerep %s",), + apply_isoxfm=dict(argstr="-applyisoxfm %f", xor=["apply_xfm"],), + apply_xfm=dict(argstr="-applyxfm", usedefault=True,), + args=dict(argstr="%s",), + bbrslope=dict(argstr="-bbrslope %f", min_ver="5.0.0",), + bbrtype=dict(argstr="-bbrtype %s", min_ver="5.0.0",), + bgvalue=dict(argstr="-setbackground %f",), + bins=dict(argstr="-bins %d",), + coarse_search=dict(argstr="-coarsesearch %d", units="degrees",), + cost=dict(argstr="-cost %s",), + cost_func=dict(argstr="-searchcost %s",), + datatype=dict(argstr="-datatype %s",), + display_init=dict(argstr="-displayinit",), + dof=dict(argstr="-dof %d",), + echospacing=dict(argstr="-echospacing %f", min_ver="5.0.0",), + environ=dict(nohash=True, usedefault=True,), + fieldmap=dict(argstr="-fieldmap %s", extensions=None, min_ver="5.0.0",), + fieldmapmask=dict(argstr="-fieldmapmask %s", extensions=None, min_ver="5.0.0",), + fine_search=dict(argstr="-finesearch %d", units="degrees",), + force_scaling=dict(argstr="-forcescaling",), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=0,), + in_matrix_file=dict(argstr="-init %s", extensions=None,), + in_weight=dict(argstr="-inweight %s", extensions=None,), + interp=dict(argstr="-interp %s",), + min_sampling=dict(argstr="-minsampling %f", units="mm",), + no_clamp=dict(argstr="-noclamp",), + no_resample=dict(argstr="-noresample",), + no_resample_blur=dict(argstr="-noresampblur",), + no_search=dict(argstr="-nosearch",), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, hash_files=False, - name_source=['in_file'], - name_template='%s_flirt', + name_source=["in_file"], + name_template="%s_flirt", position=2, ), out_log=dict( extensions=None, keep_extension=True, - name_source=['in_file'], - name_template='%s_flirt.log', - requires=['save_log'], + name_source=["in_file"], + name_template="%s_flirt.log", + requires=["save_log"], ), out_matrix_file=dict( - argstr='-omat %s', + argstr="-omat %s", extensions=None, hash_files=False, keep_extension=True, - name_source=['in_file'], - name_template='%s_flirt.mat', + name_source=["in_file"], + name_template="%s_flirt.mat", position=3, ), output_type=dict(), - padding_size=dict( - argstr='-paddingsize %d', - units='voxels', - ), - pedir=dict( - argstr='-pedir %d', - min_ver='5.0.0', - ), - ref_weight=dict( - argstr='-refweight %s', - extensions=None, - ), - reference=dict( - argstr='-ref %s', - extensions=None, - mandatory=True, - position=1, - ), - rigid2D=dict(argstr='-2D', ), + padding_size=dict(argstr="-paddingsize %d", units="voxels",), + pedir=dict(argstr="-pedir %d", min_ver="5.0.0",), + ref_weight=dict(argstr="-refweight %s", extensions=None,), + reference=dict(argstr="-ref %s", extensions=None, mandatory=True, position=1,), + rigid2D=dict(argstr="-2D",), save_log=dict(), - schedule=dict( - argstr='-schedule %s', - extensions=None, - ), - searchr_x=dict( - argstr='-searchrx %s', - units='degrees', - ), - searchr_y=dict( - argstr='-searchry %s', - units='degrees', - ), - searchr_z=dict( - argstr='-searchrz %s', - units='degrees', - ), - sinc_width=dict( - argstr='-sincwidth %d', - units='voxels', - ), - sinc_window=dict(argstr='-sincwindow %s', ), - uses_qform=dict(argstr='-usesqform', ), - verbose=dict(argstr='-verbose %d', ), - wm_seg=dict( - argstr='-wmseg %s', - extensions=None, - min_ver='5.0.0', - ), - wmcoords=dict( - argstr='-wmcoords %s', - extensions=None, - min_ver='5.0.0', - ), - wmnorms=dict( - argstr='-wmnorms %s', - extensions=None, - min_ver='5.0.0', - ), + schedule=dict(argstr="-schedule %s", extensions=None,), + searchr_x=dict(argstr="-searchrx %s", units="degrees",), + searchr_y=dict(argstr="-searchry %s", units="degrees",), + searchr_z=dict(argstr="-searchrz %s", units="degrees",), + sinc_width=dict(argstr="-sincwidth %d", units="voxels",), + sinc_window=dict(argstr="-sincwindow %s",), + uses_qform=dict(argstr="-usesqform",), + verbose=dict(argstr="-verbose %d",), + wm_seg=dict(argstr="-wmseg %s", extensions=None, min_ver="5.0.0",), + wmcoords=dict(argstr="-wmcoords %s", extensions=None, min_ver="5.0.0",), + wmnorms=dict(argstr="-wmnorms %s", extensions=None, min_ver="5.0.0",), ) inputs = ApplyXFM.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ApplyXFM_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - out_log=dict(extensions=None, ), - out_matrix_file=dict(extensions=None, ), + out_file=dict(extensions=None,), + out_log=dict(extensions=None,), + out_matrix_file=dict(extensions=None,), ) outputs = ApplyXFM.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_AvScale.py b/nipype/interfaces/fsl/tests/test_auto_AvScale.py index 0b803fd558..5f636ec453 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AvScale.py +++ b/nipype/interfaces/fsl/tests/test_auto_AvScale.py @@ -4,28 +4,19 @@ def test_AvScale_inputs(): input_map = dict( - all_param=dict(argstr='--allparams', ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - mat_file=dict( - argstr='%s', - extensions=None, - position=-2, - ), - ref_file=dict( - argstr='%s', - extensions=None, - position=-1, - ), + all_param=dict(argstr="--allparams",), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + mat_file=dict(argstr="%s", extensions=None, position=-2,), + ref_file=dict(argstr="%s", extensions=None, position=-1,), ) inputs = AvScale.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AvScale_outputs(): output_map = dict( average_scaling=dict(), diff --git a/nipype/interfaces/fsl/tests/test_auto_B0Calc.py b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py index 532780c8cb..85175fd428 100644 --- a/nipype/interfaces/fsl/tests/test_auto_B0Calc.py +++ b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py @@ -4,85 +4,42 @@ def test_B0Calc_inputs(): input_map = dict( - args=dict(argstr='%s', ), - chi_air=dict( - argstr='--chi0=%e', - usedefault=True, - ), - compute_xyz=dict( - argstr='--xyz', - usedefault=True, - ), - delta=dict( - argstr='-d %e', - usedefault=True, - ), - directconv=dict( - argstr='--directconv', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - extendboundary=dict( - argstr='--extendboundary=%0.2f', - usedefault=True, - ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - position=0, - ), + args=dict(argstr="%s",), + chi_air=dict(argstr="--chi0=%e", usedefault=True,), + compute_xyz=dict(argstr="--xyz", usedefault=True,), + delta=dict(argstr="-d %e", usedefault=True,), + directconv=dict(argstr="--directconv", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + extendboundary=dict(argstr="--extendboundary=%0.2f", usedefault=True,), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0,), out_file=dict( - argstr='-o %s', + argstr="-o %s", extensions=None, - name_source=['in_file'], - name_template='%s_b0field', - output_name='out_file', + name_source=["in_file"], + name_template="%s_b0field", + output_name="out_file", position=1, ), output_type=dict(), - x_b0=dict( - argstr='--b0x=%0.2f', - usedefault=True, - xor=['xyz_b0'], - ), - x_grad=dict( - argstr='--gx=%0.4f', - usedefault=True, - ), + x_b0=dict(argstr="--b0x=%0.2f", usedefault=True, xor=["xyz_b0"],), + x_grad=dict(argstr="--gx=%0.4f", usedefault=True,), xyz_b0=dict( - argstr='--b0x=%0.2f --b0y=%0.2f --b0=%0.2f', - xor=['x_b0', 'y_b0', 'z_b0'], - ), - y_b0=dict( - argstr='--b0y=%0.2f', - usedefault=True, - xor=['xyz_b0'], - ), - y_grad=dict( - argstr='--gy=%0.4f', - usedefault=True, - ), - z_b0=dict( - argstr='--b0=%0.2f', - usedefault=True, - xor=['xyz_b0'], - ), - z_grad=dict( - argstr='--gz=%0.4f', - usedefault=True, + argstr="--b0x=%0.2f --b0y=%0.2f --b0=%0.2f", xor=["x_b0", "y_b0", "z_b0"], ), + y_b0=dict(argstr="--b0y=%0.2f", usedefault=True, xor=["xyz_b0"],), + y_grad=dict(argstr="--gy=%0.4f", usedefault=True,), + z_b0=dict(argstr="--b0=%0.2f", usedefault=True, xor=["xyz_b0"],), + z_grad=dict(argstr="--gz=%0.4f", usedefault=True,), ) inputs = B0Calc.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_B0Calc_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = B0Calc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py index 54cbcf0410..8941e775c6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py +++ b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py @@ -4,98 +4,39 @@ def test_BEDPOSTX5_inputs(): input_map = dict( - all_ard=dict( - argstr='--allard', - xor=('no_ard', 'all_ard'), - ), - args=dict(argstr='%s', ), - burn_in=dict( - argstr='-b %d', - usedefault=True, - ), - burn_in_no_ard=dict( - argstr='--burnin_noard=%d', - usedefault=True, - ), - bvals=dict( - extensions=None, - mandatory=True, - ), - bvecs=dict( - extensions=None, - mandatory=True, - ), + all_ard=dict(argstr="--allard", xor=("no_ard", "all_ard"),), + args=dict(argstr="%s",), + burn_in=dict(argstr="-b %d", usedefault=True,), + burn_in_no_ard=dict(argstr="--burnin_noard=%d", usedefault=True,), + bvals=dict(extensions=None, mandatory=True,), + bvecs=dict(extensions=None, mandatory=True,), cnlinear=dict( - argstr='--cnonlinear', - xor=('no_spat', 'non_linear', 'cnlinear'), - ), - dwi=dict( - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - f0_ard=dict( - argstr='--f0 --ardf0', - xor=['f0_noard', 'f0_ard', 'all_ard'], - ), - f0_noard=dict( - argstr='--f0', - xor=['f0_noard', 'f0_ard'], - ), - force_dir=dict( - argstr='--forcedir', - usedefault=True, - ), - fudge=dict(argstr='-w %d', ), - grad_dev=dict(extensions=None, ), - gradnonlin=dict(argstr='-g', ), - logdir=dict(argstr='--logdir=%s', ), - mask=dict( - extensions=None, - mandatory=True, - ), - model=dict(argstr='-model %d', ), - n_fibres=dict( - argstr='-n %d', - mandatory=True, - usedefault=True, - ), - n_jumps=dict( - argstr='-j %d', - usedefault=True, - ), - no_ard=dict( - argstr='--noard', - xor=('no_ard', 'all_ard'), - ), - no_spat=dict( - argstr='--nospat', - xor=('no_spat', 'non_linear', 'cnlinear'), - ), + argstr="--cnonlinear", xor=("no_spat", "non_linear", "cnlinear"), + ), + dwi=dict(extensions=None, mandatory=True,), + environ=dict(nohash=True, usedefault=True,), + f0_ard=dict(argstr="--f0 --ardf0", xor=["f0_noard", "f0_ard", "all_ard"],), + f0_noard=dict(argstr="--f0", xor=["f0_noard", "f0_ard"],), + force_dir=dict(argstr="--forcedir", usedefault=True,), + fudge=dict(argstr="-w %d",), + grad_dev=dict(extensions=None,), + gradnonlin=dict(argstr="-g",), + logdir=dict(argstr="--logdir=%s",), + mask=dict(extensions=None, mandatory=True,), + model=dict(argstr="-model %d",), + n_fibres=dict(argstr="-n %d", mandatory=True, usedefault=True,), + n_jumps=dict(argstr="-j %d", usedefault=True,), + no_ard=dict(argstr="--noard", xor=("no_ard", "all_ard"),), + no_spat=dict(argstr="--nospat", xor=("no_spat", "non_linear", "cnlinear"),), non_linear=dict( - argstr='--nonlinear', - xor=('no_spat', 'non_linear', 'cnlinear'), - ), - out_dir=dict( - argstr='%s', - mandatory=True, - position=1, - usedefault=True, + argstr="--nonlinear", xor=("no_spat", "non_linear", "cnlinear"), ), + out_dir=dict(argstr="%s", mandatory=True, position=1, usedefault=True,), output_type=dict(), - rician=dict(argstr='--rician', ), - sample_every=dict( - argstr='-s %d', - usedefault=True, - ), - seed=dict(argstr='--seed=%d', ), - update_proposal_every=dict( - argstr='--updateproposalevery=%d', - usedefault=True, - ), + rician=dict(argstr="--rician",), + sample_every=dict(argstr="-s %d", usedefault=True,), + seed=dict(argstr="--seed=%d",), + update_proposal_every=dict(argstr="--updateproposalevery=%d", usedefault=True,), use_gpu=dict(), ) inputs = BEDPOSTX5.input_spec() @@ -103,12 +44,14 @@ def test_BEDPOSTX5_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BEDPOSTX5_outputs(): output_map = dict( dyads=dict(), dyads_dispersion=dict(), - mean_S0samples=dict(extensions=None, ), - mean_dsamples=dict(extensions=None, ), + mean_S0samples=dict(extensions=None,), + mean_dsamples=dict(extensions=None,), mean_fsamples=dict(), mean_phsamples=dict(), mean_thsamples=dict(), diff --git a/nipype/interfaces/fsl/tests/test_auto_BET.py b/nipype/interfaces/fsl/tests/test_auto_BET.py index 33968d9ac9..538a92b720 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BET.py +++ b/nipype/interfaces/fsl/tests/test_auto_BET.py @@ -4,96 +4,129 @@ def test_BET_inputs(): input_map = dict( - args=dict(argstr='%s', ), - center=dict( - argstr='-c %s', - units='voxels', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - frac=dict(argstr='-f %.2f', ), + args=dict(argstr="%s",), + center=dict(argstr="-c %s", units="voxels",), + environ=dict(nohash=True, usedefault=True,), + frac=dict(argstr="-f %.2f",), functional=dict( - argstr='-F', - xor=('functional', 'reduce_bias', 'robust', 'padding', - 'remove_eyes', 'surfaces', 't2_guided'), - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, + argstr="-F", + xor=( + "functional", + "reduce_bias", + "robust", + "padding", + "remove_eyes", + "surfaces", + "t2_guided", + ), ), - mask=dict(argstr='-m', ), - mesh=dict(argstr='-e', ), - no_output=dict(argstr='-n', ), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + mask=dict(argstr="-m",), + mesh=dict(argstr="-e",), + no_output=dict(argstr="-n",), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=1, ), - outline=dict(argstr='-o', ), + outline=dict(argstr="-o",), output_type=dict(), padding=dict( - argstr='-Z', - xor=('functional', 'reduce_bias', 'robust', 'padding', - 'remove_eyes', 'surfaces', 't2_guided'), - ), - radius=dict( - argstr='-r %d', - units='mm', + argstr="-Z", + xor=( + "functional", + "reduce_bias", + "robust", + "padding", + "remove_eyes", + "surfaces", + "t2_guided", + ), ), + radius=dict(argstr="-r %d", units="mm",), reduce_bias=dict( - argstr='-B', - xor=('functional', 'reduce_bias', 'robust', 'padding', - 'remove_eyes', 'surfaces', 't2_guided'), + argstr="-B", + xor=( + "functional", + "reduce_bias", + "robust", + "padding", + "remove_eyes", + "surfaces", + "t2_guided", + ), ), remove_eyes=dict( - argstr='-S', - xor=('functional', 'reduce_bias', 'robust', 'padding', - 'remove_eyes', 'surfaces', 't2_guided'), + argstr="-S", + xor=( + "functional", + "reduce_bias", + "robust", + "padding", + "remove_eyes", + "surfaces", + "t2_guided", + ), ), robust=dict( - argstr='-R', - xor=('functional', 'reduce_bias', 'robust', 'padding', - 'remove_eyes', 'surfaces', 't2_guided'), + argstr="-R", + xor=( + "functional", + "reduce_bias", + "robust", + "padding", + "remove_eyes", + "surfaces", + "t2_guided", + ), ), - skull=dict(argstr='-s', ), + skull=dict(argstr="-s",), surfaces=dict( - argstr='-A', - xor=('functional', 'reduce_bias', 'robust', 'padding', - 'remove_eyes', 'surfaces', 't2_guided'), + argstr="-A", + xor=( + "functional", + "reduce_bias", + "robust", + "padding", + "remove_eyes", + "surfaces", + "t2_guided", + ), ), t2_guided=dict( - argstr='-A2 %s', + argstr="-A2 %s", extensions=None, - xor=('functional', 'reduce_bias', 'robust', 'padding', - 'remove_eyes', 'surfaces', 't2_guided'), + xor=( + "functional", + "reduce_bias", + "robust", + "padding", + "remove_eyes", + "surfaces", + "t2_guided", + ), ), - threshold=dict(argstr='-t', ), - vertical_gradient=dict(argstr='-g %.2f', ), + threshold=dict(argstr="-t",), + vertical_gradient=dict(argstr="-g %.2f",), ) inputs = BET.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BET_outputs(): output_map = dict( - inskull_mask_file=dict(extensions=None, ), - inskull_mesh_file=dict(extensions=None, ), - mask_file=dict(extensions=None, ), - meshfile=dict(extensions=None, ), - out_file=dict(extensions=None, ), - outline_file=dict(extensions=None, ), - outskin_mask_file=dict(extensions=None, ), - outskin_mesh_file=dict(extensions=None, ), - outskull_mask_file=dict(extensions=None, ), - outskull_mesh_file=dict(extensions=None, ), - skull_mask_file=dict(extensions=None, ), + inskull_mask_file=dict(extensions=None,), + inskull_mesh_file=dict(extensions=None,), + mask_file=dict(extensions=None,), + meshfile=dict(extensions=None,), + out_file=dict(extensions=None,), + outline_file=dict(extensions=None,), + outskin_mask_file=dict(extensions=None,), + outskin_mesh_file=dict(extensions=None,), + outskull_mask_file=dict(extensions=None,), + outskull_mesh_file=dict(extensions=None,), + skull_mask_file=dict(extensions=None,), ) outputs = BET.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py index 055f37e35f..685058f2d4 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py @@ -4,54 +4,26 @@ def test_BinaryMaths_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + nan2zeros=dict(argstr="-nan", position=3,), operand_file=dict( - argstr='%s', + argstr="%s", extensions=None, mandatory=True, position=5, - xor=['operand_value'], + xor=["operand_value"], ), operand_value=dict( - argstr='%.8f', - mandatory=True, - position=5, - xor=['operand_file'], - ), - operation=dict( - argstr='-%s', - mandatory=True, - position=4, + argstr="%.8f", mandatory=True, position=5, xor=["operand_file"], ), + operation=dict(argstr="-%s", mandatory=True, position=4,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = BinaryMaths.input_spec() @@ -59,8 +31,10 @@ def test_BinaryMaths_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BinaryMaths_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = BinaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py index a4428446e7..9dfe5ed83f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py +++ b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py @@ -4,37 +4,15 @@ def test_ChangeDataType_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - mandatory=True, - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", mandatory=True, position=-1,), output_type=dict(), ) inputs = ChangeDataType.input_spec() @@ -42,8 +20,10 @@ def test_ChangeDataType_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ChangeDataType_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ChangeDataType.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Classifier.py b/nipype/interfaces/fsl/tests/test_auto_Classifier.py index dbd0f129a2..e81bb72096 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Classifier.py +++ b/nipype/interfaces/fsl/tests/test_auto_Classifier.py @@ -4,28 +4,13 @@ def test_Classifier_inputs(): input_map = dict( - args=dict(argstr='%s', ), - artifacts_list_file=dict(extensions=None, ), - environ=dict( - nohash=True, - usedefault=True, - ), - mel_ica=dict( - argstr='%s', - copyfile=False, - position=1, - ), - thresh=dict( - argstr='%d', - mandatory=True, - position=-1, - ), + args=dict(argstr="%s",), + artifacts_list_file=dict(extensions=None,), + environ=dict(nohash=True, usedefault=True,), + mel_ica=dict(argstr="%s", copyfile=False, position=1,), + thresh=dict(argstr="%d", mandatory=True, position=-1,), trained_wts_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=2, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=2, ), ) inputs = Classifier.input_spec() @@ -33,8 +18,10 @@ def test_Classifier_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Classifier_outputs(): - output_map = dict(artifacts_list_file=dict(extensions=None, ), ) + output_map = dict(artifacts_list_file=dict(extensions=None,),) outputs = Classifier.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py index 8e587fde99..d5462a23d8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py @@ -4,53 +4,27 @@ def test_Cleaner_inputs(): input_map = dict( - aggressive=dict( - argstr='-A', - position=3, - ), - args=dict(argstr='%s', ), + aggressive=dict(argstr="-A", position=3,), + args=dict(argstr="%s",), artifacts_list_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - cleanup_motion=dict( - argstr='-m', - position=2, - ), - confound_file=dict( - argstr='-x %s', - extensions=None, - position=4, - ), - confound_file_1=dict( - argstr='-x %s', - extensions=None, - position=5, - ), - confound_file_2=dict( - argstr='-x %s', - extensions=None, - position=6, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - highpass=dict( - argstr='-m -h %f', - position=2, - usedefault=True, - ), + argstr="%s", extensions=None, mandatory=True, position=1, + ), + cleanup_motion=dict(argstr="-m", position=2,), + confound_file=dict(argstr="-x %s", extensions=None, position=4,), + confound_file_1=dict(argstr="-x %s", extensions=None, position=5,), + confound_file_2=dict(argstr="-x %s", extensions=None, position=6,), + environ=dict(nohash=True, usedefault=True,), + highpass=dict(argstr="-m -h %f", position=2, usedefault=True,), ) inputs = Cleaner.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Cleaner_outputs(): - output_map = dict(cleaned_functional_file=dict(extensions=None, ), ) + output_map = dict(cleaned_functional_file=dict(extensions=None,),) outputs = Cleaner.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Cluster.py b/nipype/interfaces/fsl/tests/test_auto_Cluster.py index 10d8af2706..dee38d4875 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cluster.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cluster.py @@ -4,114 +4,52 @@ def test_Cluster_inputs(): input_map = dict( - args=dict(argstr='%s', ), - connectivity=dict(argstr='--connectivity=%d', ), - cope_file=dict( - argstr='--cope=%s', - extensions=None, - ), - dlh=dict(argstr='--dlh=%.10f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - find_min=dict( - argstr='--min', - usedefault=True, - ), - fractional=dict( - argstr='--fractional', - usedefault=True, - ), - in_file=dict( - argstr='--in=%s', - extensions=None, - mandatory=True, - ), - minclustersize=dict( - argstr='--minclustersize', - usedefault=True, - ), - no_table=dict( - argstr='--no_table', - usedefault=True, - ), - num_maxima=dict(argstr='--num=%d', ), - out_index_file=dict( - argstr='--oindex=%s', - hash_files=False, - ), - out_localmax_txt_file=dict( - argstr='--olmax=%s', - hash_files=False, - ), - out_localmax_vol_file=dict( - argstr='--olmaxim=%s', - hash_files=False, - ), - out_max_file=dict( - argstr='--omax=%s', - hash_files=False, - ), - out_mean_file=dict( - argstr='--omean=%s', - hash_files=False, - ), - out_pval_file=dict( - argstr='--opvals=%s', - hash_files=False, - ), - out_size_file=dict( - argstr='--osize=%s', - hash_files=False, - ), - out_threshold_file=dict( - argstr='--othresh=%s', - hash_files=False, - ), + args=dict(argstr="%s",), + connectivity=dict(argstr="--connectivity=%d",), + cope_file=dict(argstr="--cope=%s", extensions=None,), + dlh=dict(argstr="--dlh=%.10f",), + environ=dict(nohash=True, usedefault=True,), + find_min=dict(argstr="--min", usedefault=True,), + fractional=dict(argstr="--fractional", usedefault=True,), + in_file=dict(argstr="--in=%s", extensions=None, mandatory=True,), + minclustersize=dict(argstr="--minclustersize", usedefault=True,), + no_table=dict(argstr="--no_table", usedefault=True,), + num_maxima=dict(argstr="--num=%d",), + out_index_file=dict(argstr="--oindex=%s", hash_files=False,), + out_localmax_txt_file=dict(argstr="--olmax=%s", hash_files=False,), + out_localmax_vol_file=dict(argstr="--olmaxim=%s", hash_files=False,), + out_max_file=dict(argstr="--omax=%s", hash_files=False,), + out_mean_file=dict(argstr="--omean=%s", hash_files=False,), + out_pval_file=dict(argstr="--opvals=%s", hash_files=False,), + out_size_file=dict(argstr="--osize=%s", hash_files=False,), + out_threshold_file=dict(argstr="--othresh=%s", hash_files=False,), output_type=dict(), - peak_distance=dict(argstr='--peakdist=%.10f', ), - pthreshold=dict( - argstr='--pthresh=%.10f', - requires=['dlh', 'volume'], - ), - std_space_file=dict( - argstr='--stdvol=%s', - extensions=None, - ), - threshold=dict( - argstr='--thresh=%.10f', - mandatory=True, - ), - use_mm=dict( - argstr='--mm', - usedefault=True, - ), - volume=dict(argstr='--volume=%d', ), - warpfield_file=dict( - argstr='--warpvol=%s', - extensions=None, - ), - xfm_file=dict( - argstr='--xfm=%s', - extensions=None, - ), + peak_distance=dict(argstr="--peakdist=%.10f",), + pthreshold=dict(argstr="--pthresh=%.10f", requires=["dlh", "volume"],), + std_space_file=dict(argstr="--stdvol=%s", extensions=None,), + threshold=dict(argstr="--thresh=%.10f", mandatory=True,), + use_mm=dict(argstr="--mm", usedefault=True,), + volume=dict(argstr="--volume=%d",), + warpfield_file=dict(argstr="--warpvol=%s", extensions=None,), + xfm_file=dict(argstr="--xfm=%s", extensions=None,), ) inputs = Cluster.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Cluster_outputs(): output_map = dict( - index_file=dict(extensions=None, ), - localmax_txt_file=dict(extensions=None, ), - localmax_vol_file=dict(extensions=None, ), - max_file=dict(extensions=None, ), - mean_file=dict(extensions=None, ), - pval_file=dict(extensions=None, ), - size_file=dict(extensions=None, ), - threshold_file=dict(extensions=None, ), + index_file=dict(extensions=None,), + localmax_txt_file=dict(extensions=None,), + localmax_vol_file=dict(extensions=None,), + max_file=dict(extensions=None,), + mean_file=dict(extensions=None,), + pval_file=dict(extensions=None,), + size_file=dict(extensions=None,), + threshold_file=dict(extensions=None,), ) outputs = Cluster.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Complex.py b/nipype/interfaces/fsl/tests/test_auto_Complex.py index 2f068e7a1d..ac631cde80 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Complex.py +++ b/nipype/interfaces/fsl/tests/test_auto_Complex.py @@ -4,167 +4,185 @@ def test_Complex_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), complex_cartesian=dict( - argstr='-complex', + argstr="-complex", position=1, xor=[ - 'real_polar', 'real_cartesian', 'complex_cartesian', - 'complex_polar', 'complex_split', 'complex_merge' + "real_polar", + "real_cartesian", + "complex_cartesian", + "complex_polar", + "complex_split", + "complex_merge", ], ), - complex_in_file=dict( - argstr='%s', - extensions=None, - position=2, - ), - complex_in_file2=dict( - argstr='%s', - extensions=None, - position=3, - ), + complex_in_file=dict(argstr="%s", extensions=None, position=2,), + complex_in_file2=dict(argstr="%s", extensions=None, position=3,), complex_merge=dict( - argstr='-complexmerge', + argstr="-complexmerge", position=1, xor=[ - 'real_polar', 'real_cartesian', 'complex_cartesian', - 'complex_polar', 'complex_split', 'complex_merge', 'start_vol', - 'end_vol' + "real_polar", + "real_cartesian", + "complex_cartesian", + "complex_polar", + "complex_split", + "complex_merge", + "start_vol", + "end_vol", ], ), complex_out_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, position=-3, xor=[ - 'complex_out_file', 'magnitude_out_file', 'phase_out_file', - 'real_out_file', 'imaginary_out_file', 'real_polar', - 'real_cartesian' + "complex_out_file", + "magnitude_out_file", + "phase_out_file", + "real_out_file", + "imaginary_out_file", + "real_polar", + "real_cartesian", ], ), complex_polar=dict( - argstr='-complexpolar', + argstr="-complexpolar", position=1, xor=[ - 'real_polar', 'real_cartesian', 'complex_cartesian', - 'complex_polar', 'complex_split', 'complex_merge' + "real_polar", + "real_cartesian", + "complex_cartesian", + "complex_polar", + "complex_split", + "complex_merge", ], ), complex_split=dict( - argstr='-complexsplit', + argstr="-complexsplit", position=1, xor=[ - 'real_polar', 'real_cartesian', 'complex_cartesian', - 'complex_polar', 'complex_split', 'complex_merge' + "real_polar", + "real_cartesian", + "complex_cartesian", + "complex_polar", + "complex_split", + "complex_merge", ], ), - end_vol=dict( - argstr='%d', - position=-1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - imaginary_in_file=dict( - argstr='%s', - extensions=None, - position=3, - ), + end_vol=dict(argstr="%d", position=-1,), + environ=dict(nohash=True, usedefault=True,), + imaginary_in_file=dict(argstr="%s", extensions=None, position=3,), imaginary_out_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, position=-3, xor=[ - 'complex_out_file', 'magnitude_out_file', 'phase_out_file', - 'real_polar', 'complex_cartesian', 'complex_polar', - 'complex_split', 'complex_merge' + "complex_out_file", + "magnitude_out_file", + "phase_out_file", + "real_polar", + "complex_cartesian", + "complex_polar", + "complex_split", + "complex_merge", ], ), - magnitude_in_file=dict( - argstr='%s', - extensions=None, - position=2, - ), + magnitude_in_file=dict(argstr="%s", extensions=None, position=2,), magnitude_out_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, position=-4, xor=[ - 'complex_out_file', 'real_out_file', 'imaginary_out_file', - 'real_cartesian', 'complex_cartesian', 'complex_polar', - 'complex_split', 'complex_merge' + "complex_out_file", + "real_out_file", + "imaginary_out_file", + "real_cartesian", + "complex_cartesian", + "complex_polar", + "complex_split", + "complex_merge", ], ), output_type=dict(), - phase_in_file=dict( - argstr='%s', - extensions=None, - position=3, - ), + phase_in_file=dict(argstr="%s", extensions=None, position=3,), phase_out_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, position=-3, xor=[ - 'complex_out_file', 'real_out_file', 'imaginary_out_file', - 'real_cartesian', 'complex_cartesian', 'complex_polar', - 'complex_split', 'complex_merge' + "complex_out_file", + "real_out_file", + "imaginary_out_file", + "real_cartesian", + "complex_cartesian", + "complex_polar", + "complex_split", + "complex_merge", ], ), real_cartesian=dict( - argstr='-realcartesian', + argstr="-realcartesian", position=1, xor=[ - 'real_polar', 'real_cartesian', 'complex_cartesian', - 'complex_polar', 'complex_split', 'complex_merge' + "real_polar", + "real_cartesian", + "complex_cartesian", + "complex_polar", + "complex_split", + "complex_merge", ], ), - real_in_file=dict( - argstr='%s', - extensions=None, - position=2, - ), + real_in_file=dict(argstr="%s", extensions=None, position=2,), real_out_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, position=-4, xor=[ - 'complex_out_file', 'magnitude_out_file', 'phase_out_file', - 'real_polar', 'complex_cartesian', 'complex_polar', - 'complex_split', 'complex_merge' + "complex_out_file", + "magnitude_out_file", + "phase_out_file", + "real_polar", + "complex_cartesian", + "complex_polar", + "complex_split", + "complex_merge", ], ), real_polar=dict( - argstr='-realpolar', + argstr="-realpolar", position=1, xor=[ - 'real_polar', 'real_cartesian', 'complex_cartesian', - 'complex_polar', 'complex_split', 'complex_merge' + "real_polar", + "real_cartesian", + "complex_cartesian", + "complex_polar", + "complex_split", + "complex_merge", ], ), - start_vol=dict( - argstr='%d', - position=-2, - ), + start_vol=dict(argstr="%d", position=-2,), ) inputs = Complex.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Complex_outputs(): output_map = dict( - complex_out_file=dict(extensions=None, ), - imaginary_out_file=dict(extensions=None, ), - magnitude_out_file=dict(extensions=None, ), - phase_out_file=dict(extensions=None, ), - real_out_file=dict(extensions=None, ), + complex_out_file=dict(extensions=None,), + imaginary_out_file=dict(extensions=None,), + magnitude_out_file=dict(extensions=None,), + phase_out_file=dict(extensions=None,), + real_out_file=dict(extensions=None,), ) outputs = Complex.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py index 0a58eac8e3..185c089889 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py +++ b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py @@ -4,53 +4,27 @@ def test_ContrastMgr_inputs(): input_map = dict( - args=dict(argstr='%s', ), - contrast_num=dict(argstr='-cope', ), - corrections=dict( - copyfile=False, - extensions=None, - mandatory=True, - ), - dof_file=dict( - argstr='', - copyfile=False, - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fcon_file=dict( - argstr='-f %s', - extensions=None, - ), + args=dict(argstr="%s",), + contrast_num=dict(argstr="-cope",), + corrections=dict(copyfile=False, extensions=None, mandatory=True,), + dof_file=dict(argstr="", copyfile=False, extensions=None, mandatory=True,), + environ=dict(nohash=True, usedefault=True,), + fcon_file=dict(argstr="-f %s", extensions=None,), output_type=dict(), - param_estimates=dict( - argstr='', - copyfile=False, - mandatory=True, - ), + param_estimates=dict(argstr="", copyfile=False, mandatory=True,), sigmasquareds=dict( - argstr='', - copyfile=False, - extensions=None, - mandatory=True, - position=-2, - ), - suffix=dict(argstr='-suffix %s', ), - tcon_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, + argstr="", copyfile=False, extensions=None, mandatory=True, position=-2, ), + suffix=dict(argstr="-suffix %s",), + tcon_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), ) inputs = ContrastMgr.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ContrastMgr_outputs(): output_map = dict( copes=dict(), diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py index 5b321f761f..dcfc562258 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py @@ -4,81 +4,42 @@ def test_ConvertWarp_inputs(): input_map = dict( - abswarp=dict( - argstr='--abs', - xor=['relwarp'], - ), - args=dict(argstr='%s', ), - cons_jacobian=dict(argstr='--constrainj', ), - environ=dict( - nohash=True, - usedefault=True, - ), - jacobian_max=dict(argstr='--jmax=%f', ), - jacobian_min=dict(argstr='--jmin=%f', ), - midmat=dict( - argstr='--midmat=%s', - extensions=None, - ), - out_abswarp=dict( - argstr='--absout', - xor=['out_relwarp'], - ), + abswarp=dict(argstr="--abs", xor=["relwarp"],), + args=dict(argstr="%s",), + cons_jacobian=dict(argstr="--constrainj",), + environ=dict(nohash=True, usedefault=True,), + jacobian_max=dict(argstr="--jmax=%f",), + jacobian_min=dict(argstr="--jmin=%f",), + midmat=dict(argstr="--midmat=%s", extensions=None,), + out_abswarp=dict(argstr="--absout", xor=["out_relwarp"],), out_file=dict( - argstr='--out=%s', + argstr="--out=%s", extensions=None, - name_source=['reference'], - name_template='%s_concatwarp', - output_name='out_file', + name_source=["reference"], + name_template="%s_concatwarp", + output_name="out_file", position=-1, ), - out_relwarp=dict( - argstr='--relout', - xor=['out_abswarp'], - ), + out_relwarp=dict(argstr="--relout", xor=["out_abswarp"],), output_type=dict(), - postmat=dict( - argstr='--postmat=%s', - extensions=None, - ), - premat=dict( - argstr='--premat=%s', - extensions=None, - ), - reference=dict( - argstr='--ref=%s', - extensions=None, - mandatory=True, - position=1, - ), - relwarp=dict( - argstr='--rel', - xor=['abswarp'], - ), - shift_direction=dict( - argstr='--shiftdir=%s', - requires=['shift_in_file'], - ), - shift_in_file=dict( - argstr='--shiftmap=%s', - extensions=None, - ), - warp1=dict( - argstr='--warp1=%s', - extensions=None, - ), - warp2=dict( - argstr='--warp2=%s', - extensions=None, - ), + postmat=dict(argstr="--postmat=%s", extensions=None,), + premat=dict(argstr="--premat=%s", extensions=None,), + reference=dict(argstr="--ref=%s", extensions=None, mandatory=True, position=1,), + relwarp=dict(argstr="--rel", xor=["abswarp"],), + shift_direction=dict(argstr="--shiftdir=%s", requires=["shift_in_file"],), + shift_in_file=dict(argstr="--shiftmap=%s", extensions=None,), + warp1=dict(argstr="--warp1=%s", extensions=None,), + warp2=dict(argstr="--warp2=%s", extensions=None,), ) inputs = ConvertWarp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ConvertWarp_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ConvertWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py index f4bf1b24f9..b4311d0d37 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py @@ -4,41 +4,29 @@ def test_ConvertXFM_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), concat_xfm=dict( - argstr='-concat', + argstr="-concat", position=-3, - requires=['in_file2'], - xor=['invert_xfm', 'concat_xfm', 'fix_scale_skew'], - ), - environ=dict( - nohash=True, - usedefault=True, + requires=["in_file2"], + xor=["invert_xfm", "concat_xfm", "fix_scale_skew"], ), + environ=dict(nohash=True, usedefault=True,), fix_scale_skew=dict( - argstr='-fixscaleskew', + argstr="-fixscaleskew", position=-3, - requires=['in_file2'], - xor=['invert_xfm', 'concat_xfm', 'fix_scale_skew'], - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), - in_file2=dict( - argstr='%s', - extensions=None, - position=-2, + requires=["in_file2"], + xor=["invert_xfm", "concat_xfm", "fix_scale_skew"], ), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + in_file2=dict(argstr="%s", extensions=None, position=-2,), invert_xfm=dict( - argstr='-inverse', + argstr="-inverse", position=-3, - xor=['invert_xfm', 'concat_xfm', 'fix_scale_skew'], + xor=["invert_xfm", "concat_xfm", "fix_scale_skew"], ), out_file=dict( - argstr='-omat %s', + argstr="-omat %s", extensions=None, genfile=True, hash_files=False, @@ -51,8 +39,10 @@ def test_ConvertXFM_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ConvertXFM_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ConvertXFM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py index 9a402237cd..ade301a00e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py +++ b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py @@ -4,31 +4,20 @@ def test_CopyGeom_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), dest_file=dict( - argstr='%s', + argstr="%s", copyfile=True, extensions=None, mandatory=True, - name_source='dest_file', - name_template='%s', - output_name='out_file', + name_source="dest_file", + name_template="%s", + output_name="out_file", position=1, ), - environ=dict( - nohash=True, - usedefault=True, - ), - ignore_dims=dict( - argstr='-d', - position='-1', - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, - ), + environ=dict(nohash=True, usedefault=True,), + ignore_dims=dict(argstr="-d", position="-1",), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), output_type=dict(), ) inputs = CopyGeom.input_spec() @@ -36,8 +25,10 @@ def test_CopyGeom_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CopyGeom_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = CopyGeom.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_DTIFit.py b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py index 83dfc512b6..a0cf704d87 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py @@ -4,78 +4,47 @@ def test_DTIFit_inputs(): input_map = dict( - args=dict(argstr='%s', ), - base_name=dict( - argstr='-o %s', - position=1, - usedefault=True, - ), - bvals=dict( - argstr='-b %s', - extensions=None, - mandatory=True, - position=4, - ), - bvecs=dict( - argstr='-r %s', - extensions=None, - mandatory=True, - position=3, - ), - cni=dict( - argstr='--cni=%s', - extensions=None, - ), - dwi=dict( - argstr='-k %s', - extensions=None, - mandatory=True, - position=0, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gradnonlin=dict( - argstr='--gradnonlin=%s', - extensions=None, - ), - little_bit=dict(argstr='--littlebit', ), - mask=dict( - argstr='-m %s', - extensions=None, - mandatory=True, - position=2, - ), - max_x=dict(argstr='-X %d', ), - max_y=dict(argstr='-Y %d', ), - max_z=dict(argstr='-Z %d', ), - min_x=dict(argstr='-x %d', ), - min_y=dict(argstr='-y %d', ), - min_z=dict(argstr='-z %d', ), + args=dict(argstr="%s",), + base_name=dict(argstr="-o %s", position=1, usedefault=True,), + bvals=dict(argstr="-b %s", extensions=None, mandatory=True, position=4,), + bvecs=dict(argstr="-r %s", extensions=None, mandatory=True, position=3,), + cni=dict(argstr="--cni=%s", extensions=None,), + dwi=dict(argstr="-k %s", extensions=None, mandatory=True, position=0,), + environ=dict(nohash=True, usedefault=True,), + gradnonlin=dict(argstr="--gradnonlin=%s", extensions=None,), + little_bit=dict(argstr="--littlebit",), + mask=dict(argstr="-m %s", extensions=None, mandatory=True, position=2,), + max_x=dict(argstr="-X %d",), + max_y=dict(argstr="-Y %d",), + max_z=dict(argstr="-Z %d",), + min_x=dict(argstr="-x %d",), + min_y=dict(argstr="-y %d",), + min_z=dict(argstr="-z %d",), output_type=dict(), - save_tensor=dict(argstr='--save_tensor', ), - sse=dict(argstr='--sse', ), + save_tensor=dict(argstr="--save_tensor",), + sse=dict(argstr="--sse",), ) inputs = DTIFit.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DTIFit_outputs(): output_map = dict( - FA=dict(extensions=None, ), - L1=dict(extensions=None, ), - L2=dict(extensions=None, ), - L3=dict(extensions=None, ), - MD=dict(extensions=None, ), - MO=dict(extensions=None, ), - S0=dict(extensions=None, ), - V1=dict(extensions=None, ), - V2=dict(extensions=None, ), - V3=dict(extensions=None, ), - sse=dict(extensions=None, ), - tensor=dict(extensions=None, ), + FA=dict(extensions=None,), + L1=dict(extensions=None,), + L2=dict(extensions=None,), + L3=dict(extensions=None,), + MD=dict(extensions=None,), + MO=dict(extensions=None,), + S0=dict(extensions=None,), + V1=dict(extensions=None,), + V2=dict(extensions=None,), + V3=dict(extensions=None,), + sse=dict(extensions=None,), + tensor=dict(extensions=None,), ) outputs = DTIFit.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_DilateImage.py b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py index 0bd0fbd869..4a9a49eb45 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DilateImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py @@ -4,56 +4,21 @@ def test_DilateImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), kernel_file=dict( - argstr='%s', - extensions=None, - position=5, - xor=['kernel_size'], - ), - kernel_shape=dict( - argstr='-kernel %s', - position=4, - ), - kernel_size=dict( - argstr='%.4f', - position=5, - xor=['kernel_file'], - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), - operation=dict( - argstr='-dil%s', - mandatory=True, - position=6, + argstr="%s", extensions=None, position=5, xor=["kernel_size"], ), + kernel_shape=dict(argstr="-kernel %s", position=4,), + kernel_size=dict(argstr="%.4f", position=5, xor=["kernel_file"],), + nan2zeros=dict(argstr="-nan", position=3,), + operation=dict(argstr="-dil%s", mandatory=True, position=6,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = DilateImage.input_spec() @@ -61,8 +26,10 @@ def test_DilateImage_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DilateImage_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = DilateImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py index 6e67f07c4d..bd2134ec5d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py +++ b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py @@ -4,31 +4,15 @@ def test_DistanceMap_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), distance_map=dict( - argstr='--out=%s', - extensions=None, - genfile=True, - hash_files=False, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='--in=%s', - extensions=None, - mandatory=True, - ), - invert_input=dict(argstr='--invert', ), - local_max_file=dict( - argstr='--localmax=%s', - hash_files=False, - ), - mask_file=dict( - argstr='--mask=%s', - extensions=None, + argstr="--out=%s", extensions=None, genfile=True, hash_files=False, ), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="--in=%s", extensions=None, mandatory=True,), + invert_input=dict(argstr="--invert",), + local_max_file=dict(argstr="--localmax=%s", hash_files=False,), + mask_file=dict(argstr="--mask=%s", extensions=None,), output_type=dict(), ) inputs = DistanceMap.input_spec() @@ -36,10 +20,11 @@ def test_DistanceMap_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DistanceMap_outputs(): output_map = dict( - distance_map=dict(extensions=None, ), - local_max_file=dict(extensions=None, ), + distance_map=dict(extensions=None,), local_max_file=dict(extensions=None,), ) outputs = DistanceMap.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_DualRegression.py b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py index 2a9477da6c..99e6a0f23d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DualRegression.py +++ b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py @@ -4,53 +4,18 @@ def test_DualRegression_inputs(): input_map = dict( - args=dict(argstr='%s', ), - con_file=dict( - argstr='%s', - extensions=None, - position=4, - ), - des_norm=dict( - argstr='%i', - position=2, - usedefault=True, - ), - design_file=dict( - argstr='%s', - extensions=None, - position=3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + con_file=dict(argstr="%s", extensions=None, position=4,), + des_norm=dict(argstr="%i", position=2, usedefault=True,), + design_file=dict(argstr="%s", extensions=None, position=3,), + environ=dict(nohash=True, usedefault=True,), group_IC_maps_4D=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - in_files=dict( - argstr='%s', - mandatory=True, - position=-1, - sep=' ', - ), - n_perm=dict( - argstr='%i', - mandatory=True, - position=5, - ), - one_sample_group_mean=dict( - argstr='-1', - position=3, - ), - out_dir=dict( - argstr='%s', - genfile=True, - position=6, - usedefault=True, + argstr="%s", extensions=None, mandatory=True, position=1, ), + in_files=dict(argstr="%s", mandatory=True, position=-1, sep=" ",), + n_perm=dict(argstr="%i", mandatory=True, position=5,), + one_sample_group_mean=dict(argstr="-1", position=3,), + out_dir=dict(argstr="%s", genfile=True, position=6, usedefault=True,), output_type=dict(), ) inputs = DualRegression.input_spec() @@ -58,8 +23,10 @@ def test_DualRegression_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DualRegression_outputs(): - output_map = dict(out_dir=dict(), ) + output_map = dict(out_dir=dict(),) outputs = DualRegression.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py index 3f5396ea8e..39476ab324 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py @@ -4,76 +4,36 @@ def test_EPIDeWarp_inputs(): input_map = dict( - args=dict(argstr='%s', ), - cleanup=dict(argstr='--cleanup', ), - dph_file=dict( - argstr='--dph %s', - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - epi_file=dict( - argstr='--epi %s', - extensions=None, - ), - epidw=dict( - argstr='--epidw %s', - genfile=False, - ), - esp=dict( - argstr='--esp %s', - usedefault=True, - ), - exf_file=dict( - argstr='--exf %s', - extensions=None, - ), - exfdw=dict( - argstr='--exfdw %s', - genfile=True, - ), - mag_file=dict( - argstr='--mag %s', - extensions=None, - mandatory=True, - position=0, - ), - nocleanup=dict( - argstr='--nocleanup', - usedefault=True, - ), + args=dict(argstr="%s",), + cleanup=dict(argstr="--cleanup",), + dph_file=dict(argstr="--dph %s", extensions=None, mandatory=True,), + environ=dict(nohash=True, usedefault=True,), + epi_file=dict(argstr="--epi %s", extensions=None,), + epidw=dict(argstr="--epidw %s", genfile=False,), + esp=dict(argstr="--esp %s", usedefault=True,), + exf_file=dict(argstr="--exf %s", extensions=None,), + exfdw=dict(argstr="--exfdw %s", genfile=True,), + mag_file=dict(argstr="--mag %s", extensions=None, mandatory=True, position=0,), + nocleanup=dict(argstr="--nocleanup", usedefault=True,), output_type=dict(), - sigma=dict( - argstr='--sigma %s', - usedefault=True, - ), - tediff=dict( - argstr='--tediff %s', - usedefault=True, - ), - tmpdir=dict( - argstr='--tmpdir %s', - genfile=True, - ), - vsm=dict( - argstr='--vsm %s', - genfile=True, - ), + sigma=dict(argstr="--sigma %s", usedefault=True,), + tediff=dict(argstr="--tediff %s", usedefault=True,), + tmpdir=dict(argstr="--tmpdir %s", genfile=True,), + vsm=dict(argstr="--vsm %s", genfile=True,), ) inputs = EPIDeWarp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EPIDeWarp_outputs(): output_map = dict( - exf_mask=dict(extensions=None, ), - exfdw=dict(extensions=None, ), - unwarped_file=dict(extensions=None, ), - vsm_file=dict(extensions=None, ), + exf_mask=dict(extensions=None,), + exfdw=dict(extensions=None,), + unwarped_file=dict(extensions=None,), + vsm_file=dict(extensions=None,), ) outputs = EPIDeWarp.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Eddy.py b/nipype/interfaces/fsl/tests/test_auto_Eddy.py index 065fbb1889..cc7eff7a27 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Eddy.py +++ b/nipype/interfaces/fsl/tests/test_auto_Eddy.py @@ -4,98 +4,39 @@ def test_Eddy_inputs(): input_map = dict( - args=dict(argstr='%s', ), - cnr_maps=dict( - argstr='--cnr_maps', - min_ver='5.0.10', - ), - dont_peas=dict(argstr='--dont_peas', ), - dont_sep_offs_move=dict(argstr='--dont_sep_offs_move', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fep=dict(argstr='--fep', ), - field=dict(argstr='--field=%s', ), - field_mat=dict( - argstr='--field_mat=%s', - extensions=None, - ), - flm=dict(argstr='--flm=%s', ), - fudge_factor=dict( - argstr='--ff=%s', - usedefault=True, - ), - fwhm=dict(argstr='--fwhm=%s', ), - in_acqp=dict( - argstr='--acqp=%s', - extensions=None, - mandatory=True, - ), - in_bval=dict( - argstr='--bvals=%s', - extensions=None, - mandatory=True, - ), - in_bvec=dict( - argstr='--bvecs=%s', - extensions=None, - mandatory=True, - ), - in_file=dict( - argstr='--imain=%s', - extensions=None, - mandatory=True, - ), - in_index=dict( - argstr='--index=%s', - extensions=None, - mandatory=True, - ), - in_mask=dict( - argstr='--mask=%s', - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + cnr_maps=dict(argstr="--cnr_maps", min_ver="5.0.10",), + dont_peas=dict(argstr="--dont_peas",), + dont_sep_offs_move=dict(argstr="--dont_sep_offs_move",), + environ=dict(nohash=True, usedefault=True,), + fep=dict(argstr="--fep",), + field=dict(argstr="--field=%s",), + field_mat=dict(argstr="--field_mat=%s", extensions=None,), + flm=dict(argstr="--flm=%s",), + fudge_factor=dict(argstr="--ff=%s", usedefault=True,), + fwhm=dict(argstr="--fwhm=%s",), + in_acqp=dict(argstr="--acqp=%s", extensions=None, mandatory=True,), + in_bval=dict(argstr="--bvals=%s", extensions=None, mandatory=True,), + in_bvec=dict(argstr="--bvecs=%s", extensions=None, mandatory=True,), + in_file=dict(argstr="--imain=%s", extensions=None, mandatory=True,), + in_index=dict(argstr="--index=%s", extensions=None, mandatory=True,), + in_mask=dict(argstr="--mask=%s", extensions=None, mandatory=True,), in_topup_fieldcoef=dict( - argstr='--topup=%s', - extensions=None, - requires=['in_topup_movpar'], - ), - in_topup_movpar=dict( - extensions=None, - requires=['in_topup_fieldcoef'], - ), - interp=dict(argstr='--interp=%s', ), - is_shelled=dict(argstr='--data_is_shelled', ), - method=dict(argstr='--resamp=%s', ), - niter=dict( - argstr='--niter=%s', - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - nvoxhp=dict( - argstr='--nvoxhp=%s', - usedefault=True, - ), - out_base=dict( - argstr='--out=%s', - usedefault=True, - ), + argstr="--topup=%s", extensions=None, requires=["in_topup_movpar"], + ), + in_topup_movpar=dict(extensions=None, requires=["in_topup_fieldcoef"],), + interp=dict(argstr="--interp=%s",), + is_shelled=dict(argstr="--data_is_shelled",), + method=dict(argstr="--resamp=%s",), + niter=dict(argstr="--niter=%s", usedefault=True,), + num_threads=dict(nohash=True, usedefault=True,), + nvoxhp=dict(argstr="--nvoxhp=%s", usedefault=True,), + out_base=dict(argstr="--out=%s", usedefault=True,), output_type=dict(), - repol=dict(argstr='--repol', ), - residuals=dict( - argstr='--residuals', - min_ver='5.0.10', - ), - session=dict( - argstr='--session=%s', - extensions=None, - ), - slm=dict(argstr='--slm=%s', ), + repol=dict(argstr="--repol",), + residuals=dict(argstr="--residuals", min_ver="5.0.10",), + session=dict(argstr="--session=%s", extensions=None,), + slm=dict(argstr="--slm=%s",), use_cuda=dict(), ) inputs = Eddy.input_spec() @@ -103,17 +44,19 @@ def test_Eddy_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Eddy_outputs(): output_map = dict( - out_cnr_maps=dict(extensions=None, ), - out_corrected=dict(extensions=None, ), - out_movement_rms=dict(extensions=None, ), - out_outlier_report=dict(extensions=None, ), - out_parameter=dict(extensions=None, ), - out_residuals=dict(extensions=None, ), - out_restricted_movement_rms=dict(extensions=None, ), - out_rotated_bvecs=dict(extensions=None, ), - out_shell_alignment_parameters=dict(extensions=None, ), + out_cnr_maps=dict(extensions=None,), + out_corrected=dict(extensions=None,), + out_movement_rms=dict(extensions=None,), + out_outlier_report=dict(extensions=None,), + out_parameter=dict(extensions=None,), + out_residuals=dict(extensions=None,), + out_restricted_movement_rms=dict(extensions=None,), + out_rotated_bvecs=dict(extensions=None,), + out_shell_alignment_parameters=dict(extensions=None,), ) outputs = Eddy.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py index f1d5113661..fbd2af30cb 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py +++ b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py @@ -4,40 +4,29 @@ def test_EddyCorrect_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source=['in_file'], - name_template='%s_edc', - output_name='eddy_corrected', + name_source=["in_file"], + name_template="%s_edc", + output_name="eddy_corrected", position=1, ), output_type=dict(), - ref_num=dict( - argstr='%d', - mandatory=True, - position=2, - usedefault=True, - ), + ref_num=dict(argstr="%d", mandatory=True, position=2, usedefault=True,), ) inputs = EddyCorrect.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EddyCorrect_outputs(): - output_map = dict(eddy_corrected=dict(extensions=None, ), ) + output_map = dict(eddy_corrected=dict(extensions=None,),) outputs = EddyCorrect.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py index edfe407ae4..14b6ca3a28 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py +++ b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py @@ -4,71 +4,39 @@ def test_EddyQuad_inputs(): input_map = dict( - args=dict(argstr='%s', ), - base_name=dict( - argstr='%s', - position=0, - usedefault=True, - ), - bval_file=dict( - argstr='--bvals %s', - extensions=None, - mandatory=True, - ), - bvec_file=dict( - argstr='--bvecs %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - field=dict( - argstr='--field %s', - extensions=None, - ), - idx_file=dict( - argstr='--eddyIdx %s', - extensions=None, - mandatory=True, - ), - mask_file=dict( - argstr='--mask %s', - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + base_name=dict(argstr="%s", position=0, usedefault=True,), + bval_file=dict(argstr="--bvals %s", extensions=None, mandatory=True,), + bvec_file=dict(argstr="--bvecs %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + field=dict(argstr="--field %s", extensions=None,), + idx_file=dict(argstr="--eddyIdx %s", extensions=None, mandatory=True,), + mask_file=dict(argstr="--mask %s", extensions=None, mandatory=True,), output_dir=dict( - argstr='--output-dir %s', - name_source=['base_name'], - name_template='%s.qc', + argstr="--output-dir %s", name_source=["base_name"], name_template="%s.qc", ), output_type=dict(), - param_file=dict( - argstr='--eddyParams %s', - extensions=None, - mandatory=True, - ), - slice_spec=dict( - argstr='--slspec %s', - extensions=None, - ), - verbose=dict(argstr='--verbose', ), + param_file=dict(argstr="--eddyParams %s", extensions=None, mandatory=True,), + slice_spec=dict(argstr="--slspec %s", extensions=None,), + verbose=dict(argstr="--verbose",), ) inputs = EddyQuad.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EddyQuad_outputs(): output_map = dict( avg_b0_pe_png=dict(), avg_b_png=dict(), - clean_volumes=dict(extensions=None, ), + clean_volumes=dict(extensions=None,), cnr_png=dict(), - qc_json=dict(extensions=None, ), - qc_pdf=dict(extensions=None, ), - residuals=dict(extensions=None, ), - vdm_png=dict(extensions=None, ), + qc_json=dict(extensions=None,), + qc_pdf=dict(extensions=None,), + residuals=dict(extensions=None,), + vdm_png=dict(extensions=None,), ) outputs = EddyQuad.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py index 8f7cadfbb5..f1f1482260 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py @@ -4,84 +4,48 @@ def test_EpiReg_inputs(): input_map = dict( - args=dict(argstr='%s', ), - echospacing=dict(argstr='--echospacing=%f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - epi=dict( - argstr='--epi=%s', - extensions=None, - mandatory=True, - position=-4, - ), - fmap=dict( - argstr='--fmap=%s', - extensions=None, - ), - fmapmag=dict( - argstr='--fmapmag=%s', - extensions=None, - ), - fmapmagbrain=dict( - argstr='--fmapmagbrain=%s', - extensions=None, - ), - no_clean=dict( - argstr='--noclean', - usedefault=True, - ), - no_fmapreg=dict(argstr='--nofmapreg', ), - out_base=dict( - argstr='--out=%s', - position=-1, - usedefault=True, - ), + args=dict(argstr="%s",), + echospacing=dict(argstr="--echospacing=%f",), + environ=dict(nohash=True, usedefault=True,), + epi=dict(argstr="--epi=%s", extensions=None, mandatory=True, position=-4,), + fmap=dict(argstr="--fmap=%s", extensions=None,), + fmapmag=dict(argstr="--fmapmag=%s", extensions=None,), + fmapmagbrain=dict(argstr="--fmapmagbrain=%s", extensions=None,), + no_clean=dict(argstr="--noclean", usedefault=True,), + no_fmapreg=dict(argstr="--nofmapreg",), + out_base=dict(argstr="--out=%s", position=-1, usedefault=True,), output_type=dict(), - pedir=dict(argstr='--pedir=%s', ), + pedir=dict(argstr="--pedir=%s",), t1_brain=dict( - argstr='--t1brain=%s', - extensions=None, - mandatory=True, - position=-2, - ), - t1_head=dict( - argstr='--t1=%s', - extensions=None, - mandatory=True, - position=-3, - ), - weight_image=dict( - argstr='--weight=%s', - extensions=None, - ), - wmseg=dict( - argstr='--wmseg=%s', - extensions=None, + argstr="--t1brain=%s", extensions=None, mandatory=True, position=-2, ), + t1_head=dict(argstr="--t1=%s", extensions=None, mandatory=True, position=-3,), + weight_image=dict(argstr="--weight=%s", extensions=None,), + wmseg=dict(argstr="--wmseg=%s", extensions=None,), ) inputs = EpiReg.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EpiReg_outputs(): output_map = dict( - epi2str_inv=dict(extensions=None, ), - epi2str_mat=dict(extensions=None, ), - fmap2epi_mat=dict(extensions=None, ), - fmap2str_mat=dict(extensions=None, ), - fmap_epi=dict(extensions=None, ), - fmap_str=dict(extensions=None, ), - fmapmag_str=dict(extensions=None, ), - fullwarp=dict(extensions=None, ), - out_1vol=dict(extensions=None, ), - out_file=dict(extensions=None, ), - seg=dict(extensions=None, ), - shiftmap=dict(extensions=None, ), - wmedge=dict(extensions=None, ), - wmseg=dict(extensions=None, ), + epi2str_inv=dict(extensions=None,), + epi2str_mat=dict(extensions=None,), + fmap2epi_mat=dict(extensions=None,), + fmap2str_mat=dict(extensions=None,), + fmap_epi=dict(extensions=None,), + fmap_str=dict(extensions=None,), + fmapmag_str=dict(extensions=None,), + fullwarp=dict(extensions=None,), + out_1vol=dict(extensions=None,), + out_file=dict(extensions=None,), + seg=dict(extensions=None,), + shiftmap=dict(extensions=None,), + wmedge=dict(extensions=None,), + wmseg=dict(extensions=None,), ) outputs = EpiReg.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py index 09612d176a..1b813812c3 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py @@ -4,56 +4,21 @@ def test_ErodeImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), kernel_file=dict( - argstr='%s', - extensions=None, - position=5, - xor=['kernel_size'], - ), - kernel_shape=dict( - argstr='-kernel %s', - position=4, - ), - kernel_size=dict( - argstr='%.4f', - position=5, - xor=['kernel_file'], - ), - minimum_filter=dict( - argstr='%s', - position=6, - usedefault=True, - ), - nan2zeros=dict( - argstr='-nan', - position=3, + argstr="%s", extensions=None, position=5, xor=["kernel_size"], ), + kernel_shape=dict(argstr="-kernel %s", position=4,), + kernel_size=dict(argstr="%.4f", position=5, xor=["kernel_file"],), + minimum_filter=dict(argstr="%s", position=6, usedefault=True,), + nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = ErodeImage.input_spec() @@ -61,8 +26,10 @@ def test_ErodeImage_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ErodeImage_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ErodeImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py index 04bcc217bc..4039eb50a2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py +++ b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py @@ -4,73 +4,45 @@ def test_ExtractROI_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), crop_list=dict( - argstr='%s', + argstr="%s", position=2, xor=[ - 'x_min', 'x_size', 'y_min', 'y_size', 'z_min', 'z_size', - 't_min', 't_size' + "x_min", + "x_size", + "y_min", + "y_size", + "z_min", + "z_size", + "t_min", + "t_size", ], ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, - ), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), output_type=dict(), roi_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=1, - ), - t_min=dict( - argstr='%d', - position=8, - ), - t_size=dict( - argstr='%d', - position=9, - ), - x_min=dict( - argstr='%d', - position=2, - ), - x_size=dict( - argstr='%d', - position=3, - ), - y_min=dict( - argstr='%d', - position=4, - ), - y_size=dict( - argstr='%d', - position=5, - ), - z_min=dict( - argstr='%d', - position=6, - ), - z_size=dict( - argstr='%d', - position=7, - ), + argstr="%s", extensions=None, genfile=True, hash_files=False, position=1, + ), + t_min=dict(argstr="%d", position=8,), + t_size=dict(argstr="%d", position=9,), + x_min=dict(argstr="%d", position=2,), + x_size=dict(argstr="%d", position=3,), + y_min=dict(argstr="%d", position=4,), + y_size=dict(argstr="%d", position=5,), + z_min=dict(argstr="%d", position=6,), + z_size=dict(argstr="%d", position=7,), ) inputs = ExtractROI.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ExtractROI_outputs(): - output_map = dict(roi_file=dict(extensions=None, ), ) + output_map = dict(roi_file=dict(extensions=None,),) outputs = ExtractROI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FAST.py b/nipype/interfaces/fsl/tests/test_auto_FAST.py index aae90cd3a3..3c245682a6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FAST.py +++ b/nipype/interfaces/fsl/tests/test_auto_FAST.py @@ -4,67 +4,49 @@ def test_FAST_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bias_iters=dict(argstr='-I %d', ), - bias_lowpass=dict( - argstr='-l %d', - units='mm', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hyper=dict(argstr='-H %.2f', ), - img_type=dict(argstr='-t %d', ), - in_files=dict( - argstr='%s', - copyfile=False, - mandatory=True, - position=-1, - ), - init_seg_smooth=dict(argstr='-f %.3f', ), - init_transform=dict( - argstr='-a %s', - extensions=None, - ), - iters_afterbias=dict(argstr='-O %d', ), - manual_seg=dict( - argstr='-s %s', - extensions=None, - ), - mixel_smooth=dict(argstr='-R %.2f', ), - no_bias=dict(argstr='-N', ), - no_pve=dict(argstr='--nopve', ), - number_classes=dict(argstr='-n %d', ), - other_priors=dict(argstr='-A %s', ), - out_basename=dict( - argstr='-o %s', - extensions=None, - ), - output_biascorrected=dict(argstr='-B', ), - output_biasfield=dict(argstr='-b', ), + args=dict(argstr="%s",), + bias_iters=dict(argstr="-I %d",), + bias_lowpass=dict(argstr="-l %d", units="mm",), + environ=dict(nohash=True, usedefault=True,), + hyper=dict(argstr="-H %.2f",), + img_type=dict(argstr="-t %d",), + in_files=dict(argstr="%s", copyfile=False, mandatory=True, position=-1,), + init_seg_smooth=dict(argstr="-f %.3f",), + init_transform=dict(argstr="-a %s", extensions=None,), + iters_afterbias=dict(argstr="-O %d",), + manual_seg=dict(argstr="-s %s", extensions=None,), + mixel_smooth=dict(argstr="-R %.2f",), + no_bias=dict(argstr="-N",), + no_pve=dict(argstr="--nopve",), + number_classes=dict(argstr="-n %d",), + other_priors=dict(argstr="-A %s",), + out_basename=dict(argstr="-o %s", extensions=None,), + output_biascorrected=dict(argstr="-B",), + output_biasfield=dict(argstr="-b",), output_type=dict(), - probability_maps=dict(argstr='-p', ), - segment_iters=dict(argstr='-W %d', ), - segments=dict(argstr='-g', ), - use_priors=dict(argstr='-P', ), - verbose=dict(argstr='-v', ), + probability_maps=dict(argstr="-p",), + segment_iters=dict(argstr="-W %d",), + segments=dict(argstr="-g",), + use_priors=dict(argstr="-P",), + verbose=dict(argstr="-v",), ) inputs = FAST.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FAST_outputs(): output_map = dict( bias_field=dict(), - mixeltype=dict(extensions=None, ), + mixeltype=dict(extensions=None,), partial_volume_files=dict(), - partial_volume_map=dict(extensions=None, ), + partial_volume_map=dict(extensions=None,), probability_maps=dict(), restored_image=dict(), tissue_class_files=dict(), - tissue_class_map=dict(extensions=None, ), + tissue_class_map=dict(extensions=None,), ) outputs = FAST.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FEAT.py b/nipype/interfaces/fsl/tests/test_auto_FEAT.py index 3b86062923..f2d4e1a90d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEAT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEAT.py @@ -4,17 +4,9 @@ def test_FEAT_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fsf_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fsf_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), output_type=dict(), ) inputs = FEAT.input_spec() @@ -22,8 +14,10 @@ def test_FEAT_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FEAT_outputs(): - output_map = dict(feat_dir=dict(), ) + output_map = dict(feat_dir=dict(),) outputs = FEAT.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATModel.py b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py index 7f1b98c8c4..46e959a2a0 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEATModel.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py @@ -4,23 +4,11 @@ def test_FEATModel_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - ev_files=dict( - argstr='%s', - copyfile=False, - mandatory=True, - position=1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + ev_files=dict(argstr="%s", copyfile=False, mandatory=True, position=1,), fsf_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=0, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0, ), output_type=dict(), ) @@ -29,13 +17,15 @@ def test_FEATModel_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FEATModel_outputs(): output_map = dict( - con_file=dict(extensions=None, ), - design_cov=dict(extensions=None, ), - design_file=dict(extensions=None, ), - design_image=dict(extensions=None, ), - fcon_file=dict(extensions=None, ), + con_file=dict(extensions=None,), + design_cov=dict(extensions=None,), + design_file=dict(extensions=None,), + design_image=dict(extensions=None,), + fcon_file=dict(extensions=None,), ) outputs = FEATModel.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py index 56a3e2ba01..bd7ae5f7c3 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py @@ -4,20 +4,19 @@ def test_FEATRegister_inputs(): input_map = dict( - feat_dirs=dict(mandatory=True, ), - reg_dof=dict(usedefault=True, ), - reg_image=dict( - extensions=None, - mandatory=True, - ), + feat_dirs=dict(mandatory=True,), + reg_dof=dict(usedefault=True,), + reg_image=dict(extensions=None, mandatory=True,), ) inputs = FEATRegister.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FEATRegister_outputs(): - output_map = dict(fsf_file=dict(extensions=None, ), ) + output_map = dict(fsf_file=dict(extensions=None,),) outputs = FEATRegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FIRST.py b/nipype/interfaces/fsl/tests/test_auto_FIRST.py index 4b9cb5928b..c34f1737d2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FIRST.py +++ b/nipype/interfaces/fsl/tests/test_auto_FIRST.py @@ -4,48 +4,28 @@ def test_FIRST_inputs(): input_map = dict( - affine_file=dict( - argstr='-a %s', - extensions=None, - position=6, - ), - args=dict(argstr='%s', ), - brain_extracted=dict( - argstr='-b', - position=2, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + affine_file=dict(argstr="-a %s", extensions=None, position=6,), + args=dict(argstr="%s",), + brain_extracted=dict(argstr="-b", position=2,), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='-i %s', + argstr="-i %s", copyfile=False, extensions=None, mandatory=True, position=-2, ), - list_of_specific_structures=dict( - argstr='-s %s', - position=5, - sep=',', - ), + list_of_specific_structures=dict(argstr="-s %s", position=5, sep=",",), method=dict( - argstr='-m %s', + argstr="-m %s", position=4, usedefault=True, - xor=['method_as_numerical_threshold'], - ), - method_as_numerical_threshold=dict( - argstr='-m %.4f', - position=4, - ), - no_cleanup=dict( - argstr='-d', - position=3, + xor=["method_as_numerical_threshold"], ), + method_as_numerical_threshold=dict(argstr="-m %.4f", position=4,), + no_cleanup=dict(argstr="-d", position=3,), out_file=dict( - argstr='-o %s', + argstr="-o %s", extensions=None, hash_files=False, mandatory=True, @@ -53,21 +33,20 @@ def test_FIRST_inputs(): usedefault=True, ), output_type=dict(), - verbose=dict( - argstr='-v', - position=1, - ), + verbose=dict(argstr="-v", position=1,), ) inputs = FIRST.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FIRST_outputs(): output_map = dict( bvars=dict(), - original_segmentations=dict(extensions=None, ), - segmentation_file=dict(extensions=None, ), + original_segmentations=dict(extensions=None,), + segmentation_file=dict(extensions=None,), vtk_surfaces=dict(), ) outputs = FIRST.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py index e3dfb7f933..13690485a5 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py +++ b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py @@ -4,71 +4,37 @@ def test_FLAMEO_inputs(): input_map = dict( - args=dict(argstr='%s', ), - burnin=dict(argstr='--burnin=%d', ), - cope_file=dict( - argstr='--copefile=%s', - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + burnin=dict(argstr="--burnin=%d",), + cope_file=dict(argstr="--copefile=%s", extensions=None, mandatory=True,), cov_split_file=dict( - argstr='--covsplitfile=%s', - extensions=None, - mandatory=True, - ), - design_file=dict( - argstr='--designfile=%s', - extensions=None, - mandatory=True, - ), - dof_var_cope_file=dict( - argstr='--dofvarcopefile=%s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - f_con_file=dict( - argstr='--fcontrastsfile=%s', - extensions=None, - ), - fix_mean=dict(argstr='--fixmean', ), - infer_outliers=dict(argstr='--inferoutliers', ), - log_dir=dict( - argstr='--ld=%s', - usedefault=True, - ), - mask_file=dict( - argstr='--maskfile=%s', - extensions=None, - mandatory=True, - ), - n_jumps=dict(argstr='--njumps=%d', ), - no_pe_outputs=dict(argstr='--nopeoutput', ), - outlier_iter=dict(argstr='--ioni=%d', ), + argstr="--covsplitfile=%s", extensions=None, mandatory=True, + ), + design_file=dict(argstr="--designfile=%s", extensions=None, mandatory=True,), + dof_var_cope_file=dict(argstr="--dofvarcopefile=%s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + f_con_file=dict(argstr="--fcontrastsfile=%s", extensions=None,), + fix_mean=dict(argstr="--fixmean",), + infer_outliers=dict(argstr="--inferoutliers",), + log_dir=dict(argstr="--ld=%s", usedefault=True,), + mask_file=dict(argstr="--maskfile=%s", extensions=None, mandatory=True,), + n_jumps=dict(argstr="--njumps=%d",), + no_pe_outputs=dict(argstr="--nopeoutput",), + outlier_iter=dict(argstr="--ioni=%d",), output_type=dict(), - run_mode=dict( - argstr='--runmode=%s', - mandatory=True, - ), - sample_every=dict(argstr='--sampleevery=%d', ), - sigma_dofs=dict(argstr='--sigma_dofs=%d', ), - t_con_file=dict( - argstr='--tcontrastsfile=%s', - extensions=None, - mandatory=True, - ), - var_cope_file=dict( - argstr='--varcopefile=%s', - extensions=None, - ), + run_mode=dict(argstr="--runmode=%s", mandatory=True,), + sample_every=dict(argstr="--sampleevery=%d",), + sigma_dofs=dict(argstr="--sigma_dofs=%d",), + t_con_file=dict(argstr="--tcontrastsfile=%s", extensions=None, mandatory=True,), + var_cope_file=dict(argstr="--varcopefile=%s", extensions=None,), ) inputs = FLAMEO.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FLAMEO_outputs(): output_map = dict( copes=dict(), diff --git a/nipype/interfaces/fsl/tests/test_auto_FLIRT.py b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py index 88cb684c01..ce2fca2486 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FLIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py @@ -4,172 +4,90 @@ def test_FLIRT_inputs(): input_map = dict( - angle_rep=dict(argstr='-anglerep %s', ), - apply_isoxfm=dict( - argstr='-applyisoxfm %f', - xor=['apply_xfm'], - ), - apply_xfm=dict(argstr='-applyxfm', ), - args=dict(argstr='%s', ), - bbrslope=dict( - argstr='-bbrslope %f', - min_ver='5.0.0', - ), - bbrtype=dict( - argstr='-bbrtype %s', - min_ver='5.0.0', - ), - bgvalue=dict(argstr='-setbackground %f', ), - bins=dict(argstr='-bins %d', ), - coarse_search=dict( - argstr='-coarsesearch %d', - units='degrees', - ), - cost=dict(argstr='-cost %s', ), - cost_func=dict(argstr='-searchcost %s', ), - datatype=dict(argstr='-datatype %s', ), - display_init=dict(argstr='-displayinit', ), - dof=dict(argstr='-dof %d', ), - echospacing=dict( - argstr='-echospacing %f', - min_ver='5.0.0', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fieldmap=dict( - argstr='-fieldmap %s', - extensions=None, - min_ver='5.0.0', - ), - fieldmapmask=dict( - argstr='-fieldmapmask %s', - extensions=None, - min_ver='5.0.0', - ), - fine_search=dict( - argstr='-finesearch %d', - units='degrees', - ), - force_scaling=dict(argstr='-forcescaling', ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - position=0, - ), - in_matrix_file=dict( - argstr='-init %s', - extensions=None, - ), - in_weight=dict( - argstr='-inweight %s', - extensions=None, - ), - interp=dict(argstr='-interp %s', ), - min_sampling=dict( - argstr='-minsampling %f', - units='mm', - ), - no_clamp=dict(argstr='-noclamp', ), - no_resample=dict(argstr='-noresample', ), - no_resample_blur=dict(argstr='-noresampblur', ), - no_search=dict(argstr='-nosearch', ), + angle_rep=dict(argstr="-anglerep %s",), + apply_isoxfm=dict(argstr="-applyisoxfm %f", xor=["apply_xfm"],), + apply_xfm=dict(argstr="-applyxfm",), + args=dict(argstr="%s",), + bbrslope=dict(argstr="-bbrslope %f", min_ver="5.0.0",), + bbrtype=dict(argstr="-bbrtype %s", min_ver="5.0.0",), + bgvalue=dict(argstr="-setbackground %f",), + bins=dict(argstr="-bins %d",), + coarse_search=dict(argstr="-coarsesearch %d", units="degrees",), + cost=dict(argstr="-cost %s",), + cost_func=dict(argstr="-searchcost %s",), + datatype=dict(argstr="-datatype %s",), + display_init=dict(argstr="-displayinit",), + dof=dict(argstr="-dof %d",), + echospacing=dict(argstr="-echospacing %f", min_ver="5.0.0",), + environ=dict(nohash=True, usedefault=True,), + fieldmap=dict(argstr="-fieldmap %s", extensions=None, min_ver="5.0.0",), + fieldmapmask=dict(argstr="-fieldmapmask %s", extensions=None, min_ver="5.0.0",), + fine_search=dict(argstr="-finesearch %d", units="degrees",), + force_scaling=dict(argstr="-forcescaling",), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=0,), + in_matrix_file=dict(argstr="-init %s", extensions=None,), + in_weight=dict(argstr="-inweight %s", extensions=None,), + interp=dict(argstr="-interp %s",), + min_sampling=dict(argstr="-minsampling %f", units="mm",), + no_clamp=dict(argstr="-noclamp",), + no_resample=dict(argstr="-noresample",), + no_resample_blur=dict(argstr="-noresampblur",), + no_search=dict(argstr="-nosearch",), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, hash_files=False, - name_source=['in_file'], - name_template='%s_flirt', + name_source=["in_file"], + name_template="%s_flirt", position=2, ), out_log=dict( extensions=None, keep_extension=True, - name_source=['in_file'], - name_template='%s_flirt.log', - requires=['save_log'], + name_source=["in_file"], + name_template="%s_flirt.log", + requires=["save_log"], ), out_matrix_file=dict( - argstr='-omat %s', + argstr="-omat %s", extensions=None, hash_files=False, keep_extension=True, - name_source=['in_file'], - name_template='%s_flirt.mat', + name_source=["in_file"], + name_template="%s_flirt.mat", position=3, ), output_type=dict(), - padding_size=dict( - argstr='-paddingsize %d', - units='voxels', - ), - pedir=dict( - argstr='-pedir %d', - min_ver='5.0.0', - ), - ref_weight=dict( - argstr='-refweight %s', - extensions=None, - ), - reference=dict( - argstr='-ref %s', - extensions=None, - mandatory=True, - position=1, - ), - rigid2D=dict(argstr='-2D', ), + padding_size=dict(argstr="-paddingsize %d", units="voxels",), + pedir=dict(argstr="-pedir %d", min_ver="5.0.0",), + ref_weight=dict(argstr="-refweight %s", extensions=None,), + reference=dict(argstr="-ref %s", extensions=None, mandatory=True, position=1,), + rigid2D=dict(argstr="-2D",), save_log=dict(), - schedule=dict( - argstr='-schedule %s', - extensions=None, - ), - searchr_x=dict( - argstr='-searchrx %s', - units='degrees', - ), - searchr_y=dict( - argstr='-searchry %s', - units='degrees', - ), - searchr_z=dict( - argstr='-searchrz %s', - units='degrees', - ), - sinc_width=dict( - argstr='-sincwidth %d', - units='voxels', - ), - sinc_window=dict(argstr='-sincwindow %s', ), - uses_qform=dict(argstr='-usesqform', ), - verbose=dict(argstr='-verbose %d', ), - wm_seg=dict( - argstr='-wmseg %s', - extensions=None, - min_ver='5.0.0', - ), - wmcoords=dict( - argstr='-wmcoords %s', - extensions=None, - min_ver='5.0.0', - ), - wmnorms=dict( - argstr='-wmnorms %s', - extensions=None, - min_ver='5.0.0', - ), + schedule=dict(argstr="-schedule %s", extensions=None,), + searchr_x=dict(argstr="-searchrx %s", units="degrees",), + searchr_y=dict(argstr="-searchry %s", units="degrees",), + searchr_z=dict(argstr="-searchrz %s", units="degrees",), + sinc_width=dict(argstr="-sincwidth %d", units="voxels",), + sinc_window=dict(argstr="-sincwindow %s",), + uses_qform=dict(argstr="-usesqform",), + verbose=dict(argstr="-verbose %d",), + wm_seg=dict(argstr="-wmseg %s", extensions=None, min_ver="5.0.0",), + wmcoords=dict(argstr="-wmcoords %s", extensions=None, min_ver="5.0.0",), + wmnorms=dict(argstr="-wmnorms %s", extensions=None, min_ver="5.0.0",), ) inputs = FLIRT.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FLIRT_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - out_log=dict(extensions=None, ), - out_matrix_file=dict(extensions=None, ), + out_file=dict(extensions=None,), + out_log=dict(extensions=None,), + out_matrix_file=dict(extensions=None,), ) outputs = FLIRT.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py index c54a2d2955..794ae7d5f0 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py @@ -4,133 +4,57 @@ def test_FNIRT_inputs(): input_map = dict( - affine_file=dict( - argstr='--aff=%s', - extensions=None, - ), - apply_inmask=dict( - argstr='--applyinmask=%s', - sep=',', - xor=['skip_inmask'], - ), + affine_file=dict(argstr="--aff=%s", extensions=None,), + apply_inmask=dict(argstr="--applyinmask=%s", sep=",", xor=["skip_inmask"],), apply_intensity_mapping=dict( - argstr='--estint=%s', - sep=',', - xor=['skip_intensity_mapping'], - ), - apply_refmask=dict( - argstr='--applyrefmask=%s', - sep=',', - xor=['skip_refmask'], - ), - args=dict(argstr='%s', ), - bias_regularization_lambda=dict(argstr='--biaslambda=%f', ), - biasfield_resolution=dict(argstr='--biasres=%d,%d,%d', ), - config_file=dict(argstr='--config=%s', ), - derive_from_ref=dict(argstr='--refderiv', ), - environ=dict( - nohash=True, - usedefault=True, - ), - field_file=dict( - argstr='--fout=%s', - hash_files=False, - ), - fieldcoeff_file=dict(argstr='--cout=%s', ), - hessian_precision=dict(argstr='--numprec=%s', ), - in_file=dict( - argstr='--in=%s', - extensions=None, - mandatory=True, - ), - in_fwhm=dict( - argstr='--infwhm=%s', - sep=',', - ), - in_intensitymap_file=dict( - argstr='--intin=%s', - copyfile=False, - ), - inmask_file=dict( - argstr='--inmask=%s', - extensions=None, - ), - inmask_val=dict(argstr='--impinval=%f', ), - intensity_mapping_model=dict(argstr='--intmod=%s', ), - intensity_mapping_order=dict(argstr='--intorder=%d', ), - inwarp_file=dict( - argstr='--inwarp=%s', - extensions=None, - ), - jacobian_file=dict( - argstr='--jout=%s', - hash_files=False, - ), - jacobian_range=dict(argstr='--jacrange=%f,%f', ), + argstr="--estint=%s", sep=",", xor=["skip_intensity_mapping"], + ), + apply_refmask=dict(argstr="--applyrefmask=%s", sep=",", xor=["skip_refmask"],), + args=dict(argstr="%s",), + bias_regularization_lambda=dict(argstr="--biaslambda=%f",), + biasfield_resolution=dict(argstr="--biasres=%d,%d,%d",), + config_file=dict(argstr="--config=%s",), + derive_from_ref=dict(argstr="--refderiv",), + environ=dict(nohash=True, usedefault=True,), + field_file=dict(argstr="--fout=%s", hash_files=False,), + fieldcoeff_file=dict(argstr="--cout=%s",), + hessian_precision=dict(argstr="--numprec=%s",), + in_file=dict(argstr="--in=%s", extensions=None, mandatory=True,), + in_fwhm=dict(argstr="--infwhm=%s", sep=",",), + in_intensitymap_file=dict(argstr="--intin=%s", copyfile=False,), + inmask_file=dict(argstr="--inmask=%s", extensions=None,), + inmask_val=dict(argstr="--impinval=%f",), + intensity_mapping_model=dict(argstr="--intmod=%s",), + intensity_mapping_order=dict(argstr="--intorder=%d",), + inwarp_file=dict(argstr="--inwarp=%s", extensions=None,), + jacobian_file=dict(argstr="--jout=%s", hash_files=False,), + jacobian_range=dict(argstr="--jacrange=%f,%f",), log_file=dict( - argstr='--logout=%s', - extensions=None, - genfile=True, - hash_files=False, - ), - max_nonlin_iter=dict( - argstr='--miter=%s', - sep=',', - ), - modulatedref_file=dict( - argstr='--refout=%s', - hash_files=False, - ), - out_intensitymap_file=dict( - argstr='--intout=%s', - hash_files=False, + argstr="--logout=%s", extensions=None, genfile=True, hash_files=False, ), + max_nonlin_iter=dict(argstr="--miter=%s", sep=",",), + modulatedref_file=dict(argstr="--refout=%s", hash_files=False,), + out_intensitymap_file=dict(argstr="--intout=%s", hash_files=False,), output_type=dict(), - ref_file=dict( - argstr='--ref=%s', - extensions=None, - mandatory=True, - ), - ref_fwhm=dict( - argstr='--reffwhm=%s', - sep=',', - ), - refmask_file=dict( - argstr='--refmask=%s', - extensions=None, - ), - refmask_val=dict(argstr='--imprefval=%f', ), - regularization_lambda=dict( - argstr='--lambda=%s', - sep=',', - ), - regularization_model=dict(argstr='--regmod=%s', ), - skip_implicit_in_masking=dict(argstr='--impinm=0', ), - skip_implicit_ref_masking=dict(argstr='--imprefm=0', ), - skip_inmask=dict( - argstr='--applyinmask=0', - xor=['apply_inmask'], - ), + ref_file=dict(argstr="--ref=%s", extensions=None, mandatory=True,), + ref_fwhm=dict(argstr="--reffwhm=%s", sep=",",), + refmask_file=dict(argstr="--refmask=%s", extensions=None,), + refmask_val=dict(argstr="--imprefval=%f",), + regularization_lambda=dict(argstr="--lambda=%s", sep=",",), + regularization_model=dict(argstr="--regmod=%s",), + skip_implicit_in_masking=dict(argstr="--impinm=0",), + skip_implicit_ref_masking=dict(argstr="--imprefm=0",), + skip_inmask=dict(argstr="--applyinmask=0", xor=["apply_inmask"],), skip_intensity_mapping=dict( - argstr='--estint=0', - xor=['apply_intensity_mapping'], - ), - skip_lambda_ssq=dict(argstr='--ssqlambda=0', ), - skip_refmask=dict( - argstr='--applyrefmask=0', - xor=['apply_refmask'], + argstr="--estint=0", xor=["apply_intensity_mapping"], ), - spline_order=dict(argstr='--splineorder=%d', ), - subsampling_scheme=dict( - argstr='--subsamp=%s', - sep=',', - ), - warp_resolution=dict(argstr='--warpres=%d,%d,%d', ), + skip_lambda_ssq=dict(argstr="--ssqlambda=0",), + skip_refmask=dict(argstr="--applyrefmask=0", xor=["apply_refmask"],), + spline_order=dict(argstr="--splineorder=%d",), + subsampling_scheme=dict(argstr="--subsamp=%s", sep=",",), + warp_resolution=dict(argstr="--warpres=%d,%d,%d",), warped_file=dict( - argstr='--iout=%s', - extensions=None, - genfile=True, - hash_files=False, + argstr="--iout=%s", extensions=None, genfile=True, hash_files=False, ), ) inputs = FNIRT.input_spec() @@ -138,15 +62,17 @@ def test_FNIRT_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FNIRT_outputs(): output_map = dict( - field_file=dict(extensions=None, ), - fieldcoeff_file=dict(extensions=None, ), - jacobian_file=dict(extensions=None, ), - log_file=dict(extensions=None, ), - modulatedref_file=dict(extensions=None, ), + field_file=dict(extensions=None,), + fieldcoeff_file=dict(extensions=None,), + jacobian_file=dict(extensions=None,), + log_file=dict(extensions=None,), + modulatedref_file=dict(extensions=None,), out_intensitymap_file=dict(), - warped_file=dict(extensions=None, ), + warped_file=dict(extensions=None,), ) outputs = FNIRT.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py b/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py index 5398e4532d..bc4c0443ee 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py @@ -4,11 +4,8 @@ def test_FSLCommand_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), output_type=dict(), ) inputs = FSLCommand.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py index 4ca61eebf1..d9c30cd262 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py @@ -4,111 +4,52 @@ def test_FSLXCommand_inputs(): input_map = dict( - all_ard=dict( - argstr='--allard', - xor=('no_ard', 'all_ard'), - ), - args=dict(argstr='%s', ), - burn_in=dict( - argstr='--burnin=%d', - usedefault=True, - ), - burn_in_no_ard=dict( - argstr='--burnin_noard=%d', - usedefault=True, - ), - bvals=dict( - argstr='--bvals=%s', - extensions=None, - mandatory=True, - ), - bvecs=dict( - argstr='--bvecs=%s', - extensions=None, - mandatory=True, - ), + all_ard=dict(argstr="--allard", xor=("no_ard", "all_ard"),), + args=dict(argstr="%s",), + burn_in=dict(argstr="--burnin=%d", usedefault=True,), + burn_in_no_ard=dict(argstr="--burnin_noard=%d", usedefault=True,), + bvals=dict(argstr="--bvals=%s", extensions=None, mandatory=True,), + bvecs=dict(argstr="--bvecs=%s", extensions=None, mandatory=True,), cnlinear=dict( - argstr='--cnonlinear', - xor=('no_spat', 'non_linear', 'cnlinear'), - ), - dwi=dict( - argstr='--data=%s', - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - f0_ard=dict( - argstr='--f0 --ardf0', - xor=['f0_noard', 'f0_ard', 'all_ard'], - ), - f0_noard=dict( - argstr='--f0', - xor=['f0_noard', 'f0_ard'], - ), - force_dir=dict( - argstr='--forcedir', - usedefault=True, - ), - fudge=dict(argstr='--fudge=%d', ), - logdir=dict( - argstr='--logdir=%s', - usedefault=True, - ), - mask=dict( - argstr='--mask=%s', - extensions=None, - mandatory=True, - ), - model=dict(argstr='--model=%d', ), - n_fibres=dict( - argstr='--nfibres=%d', - mandatory=True, - usedefault=True, - ), - n_jumps=dict( - argstr='--njumps=%d', - usedefault=True, - ), - no_ard=dict( - argstr='--noard', - xor=('no_ard', 'all_ard'), - ), - no_spat=dict( - argstr='--nospat', - xor=('no_spat', 'non_linear', 'cnlinear'), - ), + argstr="--cnonlinear", xor=("no_spat", "non_linear", "cnlinear"), + ), + dwi=dict(argstr="--data=%s", extensions=None, mandatory=True,), + environ=dict(nohash=True, usedefault=True,), + f0_ard=dict(argstr="--f0 --ardf0", xor=["f0_noard", "f0_ard", "all_ard"],), + f0_noard=dict(argstr="--f0", xor=["f0_noard", "f0_ard"],), + force_dir=dict(argstr="--forcedir", usedefault=True,), + fudge=dict(argstr="--fudge=%d",), + logdir=dict(argstr="--logdir=%s", usedefault=True,), + mask=dict(argstr="--mask=%s", extensions=None, mandatory=True,), + model=dict(argstr="--model=%d",), + n_fibres=dict(argstr="--nfibres=%d", mandatory=True, usedefault=True,), + n_jumps=dict(argstr="--njumps=%d", usedefault=True,), + no_ard=dict(argstr="--noard", xor=("no_ard", "all_ard"),), + no_spat=dict(argstr="--nospat", xor=("no_spat", "non_linear", "cnlinear"),), non_linear=dict( - argstr='--nonlinear', - xor=('no_spat', 'non_linear', 'cnlinear'), + argstr="--nonlinear", xor=("no_spat", "non_linear", "cnlinear"), ), output_type=dict(), - rician=dict(argstr='--rician', ), - sample_every=dict( - argstr='--sampleevery=%d', - usedefault=True, - ), - seed=dict(argstr='--seed=%d', ), - update_proposal_every=dict( - argstr='--updateproposalevery=%d', - usedefault=True, - ), + rician=dict(argstr="--rician",), + sample_every=dict(argstr="--sampleevery=%d", usedefault=True,), + seed=dict(argstr="--seed=%d",), + update_proposal_every=dict(argstr="--updateproposalevery=%d", usedefault=True,), ) inputs = FSLXCommand.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FSLXCommand_outputs(): output_map = dict( dyads=dict(), fsamples=dict(), - mean_S0samples=dict(extensions=None, ), - mean_dsamples=dict(extensions=None, ), + mean_S0samples=dict(extensions=None,), + mean_dsamples=dict(extensions=None,), mean_fsamples=dict(), - mean_tausamples=dict(extensions=None, ), + mean_tausamples=dict(extensions=None,), phsamples=dict(), thsamples=dict(), ) diff --git a/nipype/interfaces/fsl/tests/test_auto_FUGUE.py b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py index d1fcb85640..a1f6873658 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FUGUE.py +++ b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py @@ -4,86 +4,50 @@ def test_FUGUE_inputs(): input_map = dict( - args=dict(argstr='%s', ), - asym_se_time=dict(argstr='--asym=%.10f', ), - despike_2dfilter=dict(argstr='--despike', ), - despike_threshold=dict(argstr='--despikethreshold=%s', ), - dwell_time=dict(argstr='--dwell=%.10f', ), - dwell_to_asym_ratio=dict(argstr='--dwelltoasym=%.10f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fmap_in_file=dict( - argstr='--loadfmap=%s', - extensions=None, - ), - fmap_out_file=dict( - argstr='--savefmap=%s', - extensions=None, - ), - forward_warping=dict(usedefault=True, ), - fourier_order=dict(argstr='--fourier=%d', ), - icorr=dict( - argstr='--icorr', - requires=['shift_in_file'], - ), - icorr_only=dict( - argstr='--icorronly', - requires=['unwarped_file'], - ), - in_file=dict( - argstr='--in=%s', - extensions=None, - ), - mask_file=dict( - argstr='--mask=%s', - extensions=None, - ), - median_2dfilter=dict(argstr='--median', ), - no_extend=dict(argstr='--noextend', ), - no_gap_fill=dict(argstr='--nofill', ), - nokspace=dict(argstr='--nokspace', ), + args=dict(argstr="%s",), + asym_se_time=dict(argstr="--asym=%.10f",), + despike_2dfilter=dict(argstr="--despike",), + despike_threshold=dict(argstr="--despikethreshold=%s",), + dwell_time=dict(argstr="--dwell=%.10f",), + dwell_to_asym_ratio=dict(argstr="--dwelltoasym=%.10f",), + environ=dict(nohash=True, usedefault=True,), + fmap_in_file=dict(argstr="--loadfmap=%s", extensions=None,), + fmap_out_file=dict(argstr="--savefmap=%s", extensions=None,), + forward_warping=dict(usedefault=True,), + fourier_order=dict(argstr="--fourier=%d",), + icorr=dict(argstr="--icorr", requires=["shift_in_file"],), + icorr_only=dict(argstr="--icorronly", requires=["unwarped_file"],), + in_file=dict(argstr="--in=%s", extensions=None,), + mask_file=dict(argstr="--mask=%s", extensions=None,), + median_2dfilter=dict(argstr="--median",), + no_extend=dict(argstr="--noextend",), + no_gap_fill=dict(argstr="--nofill",), + nokspace=dict(argstr="--nokspace",), output_type=dict(), - pava=dict(argstr='--pava', ), - phase_conjugate=dict(argstr='--phaseconj', ), - phasemap_in_file=dict( - argstr='--phasemap=%s', - extensions=None, - ), - poly_order=dict(argstr='--poly=%d', ), - save_fmap=dict(xor=['save_unmasked_fmap'], ), - save_shift=dict(xor=['save_unmasked_shift'], ), - save_unmasked_fmap=dict( - argstr='--unmaskfmap', - xor=['save_fmap'], - ), - save_unmasked_shift=dict( - argstr='--unmaskshift', - xor=['save_shift'], - ), - shift_in_file=dict( - argstr='--loadshift=%s', - extensions=None, - ), - shift_out_file=dict( - argstr='--saveshift=%s', - extensions=None, - ), - smooth2d=dict(argstr='--smooth2=%.2f', ), - smooth3d=dict(argstr='--smooth3=%.2f', ), - unwarp_direction=dict(argstr='--unwarpdir=%s', ), + pava=dict(argstr="--pava",), + phase_conjugate=dict(argstr="--phaseconj",), + phasemap_in_file=dict(argstr="--phasemap=%s", extensions=None,), + poly_order=dict(argstr="--poly=%d",), + save_fmap=dict(xor=["save_unmasked_fmap"],), + save_shift=dict(xor=["save_unmasked_shift"],), + save_unmasked_fmap=dict(argstr="--unmaskfmap", xor=["save_fmap"],), + save_unmasked_shift=dict(argstr="--unmaskshift", xor=["save_shift"],), + shift_in_file=dict(argstr="--loadshift=%s", extensions=None,), + shift_out_file=dict(argstr="--saveshift=%s", extensions=None,), + smooth2d=dict(argstr="--smooth2=%.2f",), + smooth3d=dict(argstr="--smooth3=%.2f",), + unwarp_direction=dict(argstr="--unwarpdir=%s",), unwarped_file=dict( - argstr='--unwarp=%s', + argstr="--unwarp=%s", extensions=None, - requires=['in_file'], - xor=['warped_file'], + requires=["in_file"], + xor=["warped_file"], ), warped_file=dict( - argstr='--warp=%s', + argstr="--warp=%s", extensions=None, - requires=['in_file'], - xor=['unwarped_file'], + requires=["in_file"], + xor=["unwarped_file"], ), ) inputs = FUGUE.input_spec() @@ -91,12 +55,14 @@ def test_FUGUE_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FUGUE_outputs(): output_map = dict( - fmap_out_file=dict(extensions=None, ), - shift_out_file=dict(extensions=None, ), - unwarped_file=dict(extensions=None, ), - warped_file=dict(extensions=None, ), + fmap_out_file=dict(extensions=None,), + shift_out_file=dict(extensions=None,), + unwarped_file=dict(extensions=None,), + warped_file=dict(extensions=None,), ) outputs = FUGUE.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py index 20fc8061f3..03d627a0bf 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py +++ b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py @@ -4,28 +4,19 @@ def test_FeatureExtractor_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - mel_ica=dict( - argstr='%s', - copyfile=False, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + mel_ica=dict(argstr="%s", copyfile=False, position=-1,), ) inputs = FeatureExtractor.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FeatureExtractor_outputs(): - output_map = dict(mel_ica=dict( - argstr='%s', - copyfile=False, - position=-1, - ), ) + output_map = dict(mel_ica=dict(argstr="%s", copyfile=False, position=-1,),) outputs = FeatureExtractor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py index 47470f5209..d63a61ea1c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py +++ b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py @@ -4,57 +4,33 @@ def test_FilterRegressor_inputs(): input_map = dict( - args=dict(argstr='%s', ), - design_file=dict( - argstr='-d %s', - extensions=None, - mandatory=True, - position=3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + design_file=dict(argstr="-d %s", extensions=None, mandatory=True, position=3,), + environ=dict(nohash=True, usedefault=True,), filter_all=dict( - argstr="-f '%s'", - mandatory=True, - position=4, - xor=['filter_columns'], + argstr="-f '%s'", mandatory=True, position=4, xor=["filter_columns"], ), filter_columns=dict( - argstr="-f '%s'", - mandatory=True, - position=4, - xor=['filter_all'], - ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - position=1, - ), - mask=dict( - argstr='-m %s', - extensions=None, + argstr="-f '%s'", mandatory=True, position=4, xor=["filter_all"], ), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1,), + mask=dict(argstr="-m %s", extensions=None,), out_file=dict( - argstr='-o %s', - extensions=None, - genfile=True, - hash_files=False, - position=2, + argstr="-o %s", extensions=None, genfile=True, hash_files=False, position=2, ), - out_vnscales=dict(argstr='--out_vnscales', ), + out_vnscales=dict(argstr="--out_vnscales",), output_type=dict(), - var_norm=dict(argstr='--vn', ), + var_norm=dict(argstr="--vn",), ) inputs = FilterRegressor.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FilterRegressor_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = FilterRegressor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py index a6ed3974e1..0152e34ed0 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py +++ b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py @@ -4,22 +4,11 @@ def test_FindTheBiggest_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr='%s', - mandatory=True, - position=0, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_files=dict(argstr="%s", mandatory=True, position=0,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=2, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=2, ), output_type=dict(), ) @@ -28,11 +17,10 @@ def test_FindTheBiggest_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FindTheBiggest_outputs(): - output_map = dict(out_file=dict( - argstr='%s', - extensions=None, - ), ) + output_map = dict(out_file=dict(argstr="%s", extensions=None,),) outputs = FindTheBiggest.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_GLM.py b/nipype/interfaces/fsl/tests/test_auto_GLM.py index 887e5fcee5..61a550884d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_GLM.py +++ b/nipype/interfaces/fsl/tests/test_auto_GLM.py @@ -4,101 +4,51 @@ def test_GLM_inputs(): input_map = dict( - args=dict(argstr='%s', ), - contrasts=dict( - argstr='-c %s', - extensions=None, - ), - dat_norm=dict(argstr='--dat_norm', ), - demean=dict(argstr='--demean', ), - des_norm=dict(argstr='--des_norm', ), - design=dict( - argstr='-d %s', - extensions=None, - mandatory=True, - position=2, - ), - dof=dict(argstr='--dof=%d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - position=1, - ), - mask=dict( - argstr='-m %s', - extensions=None, - ), - out_cope=dict( - argstr='--out_cope=%s', - extensions=None, - ), - out_data_name=dict( - argstr='--out_data=%s', - extensions=None, - ), - out_f_name=dict( - argstr='--out_f=%s', - extensions=None, - ), + args=dict(argstr="%s",), + contrasts=dict(argstr="-c %s", extensions=None,), + dat_norm=dict(argstr="--dat_norm",), + demean=dict(argstr="--demean",), + des_norm=dict(argstr="--des_norm",), + design=dict(argstr="-d %s", extensions=None, mandatory=True, position=2,), + dof=dict(argstr="--dof=%d",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1,), + mask=dict(argstr="-m %s", extensions=None,), + out_cope=dict(argstr="--out_cope=%s", extensions=None,), + out_data_name=dict(argstr="--out_data=%s", extensions=None,), + out_f_name=dict(argstr="--out_f=%s", extensions=None,), out_file=dict( - argstr='-o %s', + argstr="-o %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_glm', + name_source="in_file", + name_template="%s_glm", position=3, ), - out_p_name=dict( - argstr='--out_p=%s', - extensions=None, - ), - out_pf_name=dict( - argstr='--out_pf=%s', - extensions=None, - ), - out_res_name=dict( - argstr='--out_res=%s', - extensions=None, - ), - out_sigsq_name=dict( - argstr='--out_sigsq=%s', - extensions=None, - ), - out_t_name=dict( - argstr='--out_t=%s', - extensions=None, - ), - out_varcb_name=dict( - argstr='--out_varcb=%s', - extensions=None, - ), - out_vnscales_name=dict( - argstr='--out_vnscales=%s', - extensions=None, - ), - out_z_name=dict( - argstr='--out_z=%s', - extensions=None, - ), + out_p_name=dict(argstr="--out_p=%s", extensions=None,), + out_pf_name=dict(argstr="--out_pf=%s", extensions=None,), + out_res_name=dict(argstr="--out_res=%s", extensions=None,), + out_sigsq_name=dict(argstr="--out_sigsq=%s", extensions=None,), + out_t_name=dict(argstr="--out_t=%s", extensions=None,), + out_varcb_name=dict(argstr="--out_varcb=%s", extensions=None,), + out_vnscales_name=dict(argstr="--out_vnscales=%s", extensions=None,), + out_z_name=dict(argstr="--out_z=%s", extensions=None,), output_type=dict(), - var_norm=dict(argstr='--vn', ), + var_norm=dict(argstr="--vn",), ) inputs = GLM.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GLM_outputs(): output_map = dict( out_cope=dict(), out_data=dict(), out_f=dict(), - out_file=dict(extensions=None, ), + out_file=dict(extensions=None,), out_p=dict(), out_pf=dict(), out_res=dict(), diff --git a/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py index cb55f36e28..51975c5bef 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py +++ b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py @@ -4,68 +4,39 @@ def test_ICA_AROMA_inputs(): input_map = dict( - TR=dict(argstr='-tr %.3f', ), - args=dict(argstr='%s', ), - denoise_type=dict( - argstr='-den %s', - mandatory=True, - usedefault=True, - ), - dim=dict(argstr='-dim %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), + TR=dict(argstr="-tr %.3f",), + args=dict(argstr="%s",), + denoise_type=dict(argstr="-den %s", mandatory=True, usedefault=True,), + dim=dict(argstr="-dim %d",), + environ=dict(nohash=True, usedefault=True,), feat_dir=dict( - argstr='-feat %s', + argstr="-feat %s", mandatory=True, - xor=[ - 'in_file', 'mat_file', 'fnirt_warp_file', 'motion_parameters' - ], - ), - fnirt_warp_file=dict( - argstr='-warp %s', - extensions=None, - xor=['feat_dir'], + xor=["in_file", "mat_file", "fnirt_warp_file", "motion_parameters"], ), + fnirt_warp_file=dict(argstr="-warp %s", extensions=None, xor=["feat_dir"],), in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - xor=['feat_dir'], - ), - mask=dict( - argstr='-m %s', - extensions=None, - xor=['feat_dir'], + argstr="-i %s", extensions=None, mandatory=True, xor=["feat_dir"], ), - mat_file=dict( - argstr='-affmat %s', - extensions=None, - xor=['feat_dir'], - ), - melodic_dir=dict(argstr='-meldir %s', ), + mask=dict(argstr="-m %s", extensions=None, xor=["feat_dir"],), + mat_file=dict(argstr="-affmat %s", extensions=None, xor=["feat_dir"],), + melodic_dir=dict(argstr="-meldir %s",), motion_parameters=dict( - argstr='-mc %s', - extensions=None, - mandatory=True, - xor=['feat_dir'], - ), - out_dir=dict( - argstr='-o %s', - mandatory=True, - usedefault=True, + argstr="-mc %s", extensions=None, mandatory=True, xor=["feat_dir"], ), + out_dir=dict(argstr="-o %s", mandatory=True, usedefault=True,), ) inputs = ICA_AROMA.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ICA_AROMA_outputs(): output_map = dict( - aggr_denoised_file=dict(extensions=None, ), - nonaggr_denoised_file=dict(extensions=None, ), + aggr_denoised_file=dict(extensions=None,), + nonaggr_denoised_file=dict(extensions=None,), out_dir=dict(), ) outputs = ICA_AROMA.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py index 9c3c254fea..02a73d2662 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py @@ -4,40 +4,15 @@ def test_ImageMaths_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - in_file2=dict( - argstr='%s', - extensions=None, - position=3, - ), - mask_file=dict( - argstr='-mas %s', - extensions=None, - ), - op_string=dict( - argstr='%s', - position=2, - ), - out_data_type=dict( - argstr='-odt %s', - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + in_file2=dict(argstr="%s", extensions=None, position=3,), + mask_file=dict(argstr="-mas %s", extensions=None,), + op_string=dict(argstr="%s", position=2,), + out_data_type=dict(argstr="-odt %s", position=-1,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), output_type=dict(), suffix=dict(), @@ -47,8 +22,10 @@ def test_ImageMaths_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ImageMaths_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ImageMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py index 2565d8536d..2d53d25c1f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py @@ -4,46 +4,29 @@ def test_ImageMeants_inputs(): input_map = dict( - args=dict(argstr='%s', ), - eig=dict(argstr='--eig', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - position=0, - ), - mask=dict( - argstr='-m %s', - extensions=None, - ), - nobin=dict(argstr='--no_bin', ), - order=dict( - argstr='--order=%d', - usedefault=True, - ), - out_file=dict( - argstr='-o %s', - extensions=None, - genfile=True, - hash_files=False, - ), + args=dict(argstr="%s",), + eig=dict(argstr="--eig",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0,), + mask=dict(argstr="-m %s", extensions=None,), + nobin=dict(argstr="--no_bin",), + order=dict(argstr="--order=%d", usedefault=True,), + out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False,), output_type=dict(), - show_all=dict(argstr='--showall', ), - spatial_coord=dict(argstr='-c %s', ), - transpose=dict(argstr='--transpose', ), - use_mm=dict(argstr='--usemm', ), + show_all=dict(argstr="--showall",), + spatial_coord=dict(argstr="-c %s",), + transpose=dict(argstr="--transpose",), + use_mm=dict(argstr="--usemm",), ) inputs = ImageMeants.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ImageMeants_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ImageMeants.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py index f1ad146797..0c309880bb 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py @@ -4,44 +4,24 @@ def test_ImageStats_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=3, - ), - index_mask_file=dict( - argstr='-K %s', - extensions=None, - position=2, - ), - mask_file=dict( - argstr='', - extensions=None, - ), - op_string=dict( - argstr='%s', - mandatory=True, - position=4, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=3,), + index_mask_file=dict(argstr="-K %s", extensions=None, position=2,), + mask_file=dict(argstr="", extensions=None,), + op_string=dict(argstr="%s", mandatory=True, position=4,), output_type=dict(), - split_4d=dict( - argstr='-t', - position=1, - ), + split_4d=dict(argstr="-t", position=1,), ) inputs = ImageStats.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ImageStats_outputs(): - output_map = dict(out_stat=dict(), ) + output_map = dict(out_stat=dict(),) outputs = ImageStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_InvWarp.py b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py index b78ede0ec4..b116f19737 100644 --- a/nipype/interfaces/fsl/tests/test_auto_InvWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py @@ -4,50 +4,35 @@ def test_InvWarp_inputs(): input_map = dict( - absolute=dict( - argstr='--abs', - xor=['relative'], - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + absolute=dict(argstr="--abs", xor=["relative"],), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), inverse_warp=dict( - argstr='--out=%s', + argstr="--out=%s", extensions=None, hash_files=False, - name_source=['warp'], - name_template='%s_inverse', + name_source=["warp"], + name_template="%s_inverse", ), - jacobian_max=dict(argstr='--jmax=%f', ), - jacobian_min=dict(argstr='--jmin=%f', ), - niter=dict(argstr='--niter=%d', ), - noconstraint=dict(argstr='--noconstraint', ), + jacobian_max=dict(argstr="--jmax=%f",), + jacobian_min=dict(argstr="--jmin=%f",), + niter=dict(argstr="--niter=%d",), + noconstraint=dict(argstr="--noconstraint",), output_type=dict(), - reference=dict( - argstr='--ref=%s', - extensions=None, - mandatory=True, - ), - regularise=dict(argstr='--regularise=%f', ), - relative=dict( - argstr='--rel', - xor=['absolute'], - ), - warp=dict( - argstr='--warp=%s', - extensions=None, - mandatory=True, - ), + reference=dict(argstr="--ref=%s", extensions=None, mandatory=True,), + regularise=dict(argstr="--regularise=%f",), + relative=dict(argstr="--rel", xor=["absolute"],), + warp=dict(argstr="--warp=%s", extensions=None, mandatory=True,), ) inputs = InvWarp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_InvWarp_outputs(): - output_map = dict(inverse_warp=dict(extensions=None, ), ) + output_map = dict(inverse_warp=dict(extensions=None,),) outputs = InvWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py index 5cad38954b..58186672ec 100644 --- a/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py +++ b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py @@ -4,57 +4,28 @@ def test_IsotropicSmooth_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fwhm=dict( - argstr='-s %.5f', - mandatory=True, - position=4, - xor=['sigma'], - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fwhm=dict(argstr="-s %.5f", mandatory=True, position=4, xor=["sigma"],), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), - sigma=dict( - argstr='-s %.5f', - mandatory=True, - position=4, - xor=['fwhm'], - ), + sigma=dict(argstr="-s %.5f", mandatory=True, position=4, xor=["fwhm"],), ) inputs = IsotropicSmooth.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_IsotropicSmooth_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = IsotropicSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_L2Model.py b/nipype/interfaces/fsl/tests/test_auto_L2Model.py index aad29206e2..6d16cc6038 100644 --- a/nipype/interfaces/fsl/tests/test_auto_L2Model.py +++ b/nipype/interfaces/fsl/tests/test_auto_L2Model.py @@ -3,17 +3,19 @@ def test_L2Model_inputs(): - input_map = dict(num_copes=dict(mandatory=True, ), ) + input_map = dict(num_copes=dict(mandatory=True,),) inputs = L2Model.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_L2Model_outputs(): output_map = dict( - design_con=dict(extensions=None, ), - design_grp=dict(extensions=None, ), - design_mat=dict(extensions=None, ), + design_con=dict(extensions=None,), + design_grp=dict(extensions=None,), + design_mat=dict(extensions=None,), ) outputs = L2Model.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Level1Design.py b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py index 8b1c076ac6..f8ed336e43 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Level1Design.py +++ b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py @@ -4,23 +4,22 @@ def test_Level1Design_inputs(): input_map = dict( - bases=dict(mandatory=True, ), + bases=dict(mandatory=True,), contrasts=dict(), - interscan_interval=dict(mandatory=True, ), - model_serial_correlations=dict(mandatory=True, ), - orthogonalization=dict(usedefault=True, ), - session_info=dict(mandatory=True, ), + interscan_interval=dict(mandatory=True,), + model_serial_correlations=dict(mandatory=True,), + orthogonalization=dict(usedefault=True,), + session_info=dict(mandatory=True,), ) inputs = Level1Design.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Level1Design_outputs(): - output_map = dict( - ev_files=dict(), - fsf_files=dict(), - ) + output_map = dict(ev_files=dict(), fsf_files=dict(),) outputs = Level1Design.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py index 84cf5cdc98..8f52f40eb0 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py @@ -4,63 +4,48 @@ def test_MCFLIRT_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bins=dict(argstr='-bins %d', ), - cost=dict(argstr='-cost %s', ), - dof=dict(argstr='-dof %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - position=0, - ), - init=dict( - argstr='-init %s', - extensions=None, - ), - interpolation=dict(argstr='-%s_final', ), - mean_vol=dict(argstr='-meanvol', ), + args=dict(argstr="%s",), + bins=dict(argstr="-bins %d",), + cost=dict(argstr="-cost %s",), + dof=dict(argstr="-dof %d",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=0,), + init=dict(argstr="-init %s", extensions=None,), + interpolation=dict(argstr="-%s_final",), + mean_vol=dict(argstr="-meanvol",), out_file=dict( - argstr='-out %s', - extensions=None, - genfile=True, - hash_files=False, + argstr="-out %s", extensions=None, genfile=True, hash_files=False, ), output_type=dict(), - ref_file=dict( - argstr='-reffile %s', - extensions=None, - ), - ref_vol=dict(argstr='-refvol %d', ), - rotation=dict(argstr='-rotation %d', ), - save_mats=dict(argstr='-mats', ), - save_plots=dict(argstr='-plots', ), - save_rms=dict(argstr='-rmsabs -rmsrel', ), - scaling=dict(argstr='-scaling %.2f', ), - smooth=dict(argstr='-smooth %.2f', ), - stages=dict(argstr='-stages %d', ), - stats_imgs=dict(argstr='-stats', ), - use_contour=dict(argstr='-edge', ), - use_gradient=dict(argstr='-gdt', ), + ref_file=dict(argstr="-reffile %s", extensions=None,), + ref_vol=dict(argstr="-refvol %d",), + rotation=dict(argstr="-rotation %d",), + save_mats=dict(argstr="-mats",), + save_plots=dict(argstr="-plots",), + save_rms=dict(argstr="-rmsabs -rmsrel",), + scaling=dict(argstr="-scaling %.2f",), + smooth=dict(argstr="-smooth %.2f",), + stages=dict(argstr="-stages %d",), + stats_imgs=dict(argstr="-stats",), + use_contour=dict(argstr="-edge",), + use_gradient=dict(argstr="-gdt",), ) inputs = MCFLIRT.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MCFLIRT_outputs(): output_map = dict( mat_file=dict(), - mean_img=dict(extensions=None, ), - out_file=dict(extensions=None, ), - par_file=dict(extensions=None, ), + mean_img=dict(extensions=None,), + out_file=dict(extensions=None,), + par_file=dict(extensions=None,), rms_files=dict(), - std_img=dict(extensions=None, ), - variance_img=dict(extensions=None, ), + std_img=dict(extensions=None,), + variance_img=dict(extensions=None,), ) outputs = MCFLIRT.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MELODIC.py b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py index 2c0aeab9f1..86e4e0e2a2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MELODIC.py +++ b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py @@ -4,106 +4,67 @@ def test_MELODIC_inputs(): input_map = dict( - ICs=dict( - argstr='--ICs=%s', - extensions=None, - ), - approach=dict(argstr='-a %s', ), - args=dict(argstr='%s', ), - bg_image=dict( - argstr='--bgimage=%s', - extensions=None, - ), - bg_threshold=dict(argstr='--bgthreshold=%f', ), - cov_weight=dict(argstr='--covarweight=%f', ), - dim=dict(argstr='-d %d', ), - dim_est=dict(argstr='--dimest=%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - epsilon=dict(argstr='--eps=%f', ), - epsilonS=dict(argstr='--epsS=%f', ), - in_files=dict( - argstr='-i %s', - mandatory=True, - position=0, - sep=',', - ), - log_power=dict(argstr='--logPower', ), - mask=dict( - argstr='-m %s', - extensions=None, - ), - max_restart=dict(argstr='--maxrestart=%d', ), - maxit=dict(argstr='--maxit=%d', ), - migp=dict(argstr='--migp', ), - migpN=dict(argstr='--migpN %d', ), - migp_factor=dict(argstr='--migp_factor %d', ), - migp_shuffle=dict(argstr='--migp_shuffle', ), - mix=dict( - argstr='--mix=%s', - extensions=None, - ), - mm_thresh=dict(argstr='--mmthresh=%f', ), - no_bet=dict(argstr='--nobet', ), - no_mask=dict(argstr='--nomask', ), - no_mm=dict(argstr='--no_mm', ), - non_linearity=dict(argstr='--nl=%s', ), - num_ICs=dict(argstr='-n %d', ), - out_all=dict(argstr='--Oall', ), - out_dir=dict( - argstr='-o %s', - genfile=True, - ), - out_mean=dict(argstr='--Omean', ), - out_orig=dict(argstr='--Oorig', ), - out_pca=dict(argstr='--Opca', ), - out_stats=dict(argstr='--Ostats', ), - out_unmix=dict(argstr='--Ounmix', ), - out_white=dict(argstr='--Owhite', ), + ICs=dict(argstr="--ICs=%s", extensions=None,), + approach=dict(argstr="-a %s",), + args=dict(argstr="%s",), + bg_image=dict(argstr="--bgimage=%s", extensions=None,), + bg_threshold=dict(argstr="--bgthreshold=%f",), + cov_weight=dict(argstr="--covarweight=%f",), + dim=dict(argstr="-d %d",), + dim_est=dict(argstr="--dimest=%s",), + environ=dict(nohash=True, usedefault=True,), + epsilon=dict(argstr="--eps=%f",), + epsilonS=dict(argstr="--epsS=%f",), + in_files=dict(argstr="-i %s", mandatory=True, position=0, sep=",",), + log_power=dict(argstr="--logPower",), + mask=dict(argstr="-m %s", extensions=None,), + max_restart=dict(argstr="--maxrestart=%d",), + maxit=dict(argstr="--maxit=%d",), + migp=dict(argstr="--migp",), + migpN=dict(argstr="--migpN %d",), + migp_factor=dict(argstr="--migp_factor %d",), + migp_shuffle=dict(argstr="--migp_shuffle",), + mix=dict(argstr="--mix=%s", extensions=None,), + mm_thresh=dict(argstr="--mmthresh=%f",), + no_bet=dict(argstr="--nobet",), + no_mask=dict(argstr="--nomask",), + no_mm=dict(argstr="--no_mm",), + non_linearity=dict(argstr="--nl=%s",), + num_ICs=dict(argstr="-n %d",), + out_all=dict(argstr="--Oall",), + out_dir=dict(argstr="-o %s", genfile=True,), + out_mean=dict(argstr="--Omean",), + out_orig=dict(argstr="--Oorig",), + out_pca=dict(argstr="--Opca",), + out_stats=dict(argstr="--Ostats",), + out_unmix=dict(argstr="--Ounmix",), + out_white=dict(argstr="--Owhite",), output_type=dict(), - pbsc=dict(argstr='--pbsc', ), - rem_cmp=dict(argstr='-f %d', ), - remove_deriv=dict(argstr='--remove_deriv', ), - report=dict(argstr='--report', ), - report_maps=dict(argstr='--report_maps=%s', ), - s_con=dict( - argstr='--Scon=%s', - extensions=None, - ), - s_des=dict( - argstr='--Sdes=%s', - extensions=None, - ), - sep_vn=dict(argstr='--sep_vn', ), - sep_whiten=dict(argstr='--sep_whiten', ), - smode=dict( - argstr='--smode=%s', - extensions=None, - ), - t_con=dict( - argstr='--Tcon=%s', - extensions=None, - ), - t_des=dict( - argstr='--Tdes=%s', - extensions=None, - ), - tr_sec=dict(argstr='--tr=%f', ), - update_mask=dict(argstr='--update_mask', ), - var_norm=dict(argstr='--vn', ), + pbsc=dict(argstr="--pbsc",), + rem_cmp=dict(argstr="-f %d",), + remove_deriv=dict(argstr="--remove_deriv",), + report=dict(argstr="--report",), + report_maps=dict(argstr="--report_maps=%s",), + s_con=dict(argstr="--Scon=%s", extensions=None,), + s_des=dict(argstr="--Sdes=%s", extensions=None,), + sep_vn=dict(argstr="--sep_vn",), + sep_whiten=dict(argstr="--sep_whiten",), + smode=dict(argstr="--smode=%s", extensions=None,), + t_con=dict(argstr="--Tcon=%s", extensions=None,), + t_des=dict(argstr="--Tdes=%s", extensions=None,), + tr_sec=dict(argstr="--tr=%f",), + update_mask=dict(argstr="--update_mask",), + var_norm=dict(argstr="--vn",), ) inputs = MELODIC.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MELODIC_outputs(): - output_map = dict( - out_dir=dict(), - report_dir=dict(), - ) + output_map = dict(out_dir=dict(), report_dir=dict(),) outputs = MELODIC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py index 05611f4b80..760072bab9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py +++ b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py @@ -4,51 +4,26 @@ def test_MakeDyadicVectors_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - mask=dict( - argstr='%s', - extensions=None, - position=2, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + mask=dict(argstr="%s", extensions=None, position=2,), output=dict( - argstr='%s', - extensions=None, - hash_files=False, - position=3, - usedefault=True, + argstr="%s", extensions=None, hash_files=False, position=3, usedefault=True, ), output_type=dict(), - perc=dict( - argstr='%f', - position=4, - ), - phi_vol=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - theta_vol=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, - ), + perc=dict(argstr="%f", position=4,), + phi_vol=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + theta_vol=dict(argstr="%s", extensions=None, mandatory=True, position=0,), ) inputs = MakeDyadicVectors.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MakeDyadicVectors_outputs(): - output_map = dict( - dispersion=dict(extensions=None, ), - dyads=dict(extensions=None, ), - ) + output_map = dict(dispersion=dict(extensions=None,), dyads=dict(extensions=None,),) outputs = MakeDyadicVectors.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py index ff7ae2090b..ce7f058663 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py @@ -4,36 +4,15 @@ def test_MathsCommand_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = MathsCommand.input_spec() @@ -41,8 +20,10 @@ def test_MathsCommand_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MathsCommand_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MathsCommand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py index 03fbb31d6e..1baa75963b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MaxImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py @@ -4,41 +4,16 @@ def test_MaxImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='-%smax', - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="-%smax", position=4, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = MaxImage.input_spec() @@ -46,8 +21,10 @@ def test_MaxImage_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MaxImage_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MaxImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py index a53b285396..aa52ba3bb7 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py @@ -4,41 +4,16 @@ def test_MaxnImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='-%smaxn', - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="-%smaxn", position=4, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = MaxnImage.input_spec() @@ -46,8 +21,10 @@ def test_MaxnImage_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MaxnImage_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MaxnImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MeanImage.py b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py index ad2c6633ef..076cb08a76 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MeanImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py @@ -4,41 +4,16 @@ def test_MeanImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='-%smean', - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="-%smean", position=4, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = MeanImage.input_spec() @@ -46,8 +21,10 @@ def test_MeanImage_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MeanImage_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MeanImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MedianImage.py b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py index 38b06c61eb..a70ff14b2f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MedianImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py @@ -4,41 +4,16 @@ def test_MedianImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='-%smedian', - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="-%smedian", position=4, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = MedianImage.input_spec() @@ -46,8 +21,10 @@ def test_MedianImage_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MedianImage_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MedianImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Merge.py b/nipype/interfaces/fsl/tests/test_auto_Merge.py index 04cb5eea2c..45db6482a9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Merge.py +++ b/nipype/interfaces/fsl/tests/test_auto_Merge.py @@ -4,42 +4,30 @@ def test_Merge_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='-%s', - mandatory=True, - position=0, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr='%s', - mandatory=True, - position=2, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="-%s", mandatory=True, position=0,), + environ=dict(nohash=True, usedefault=True,), + in_files=dict(argstr="%s", mandatory=True, position=2,), merged_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, - name_source='in_files', - name_template='%s_merged', + name_source="in_files", + name_template="%s_merged", position=1, ), output_type=dict(), - tr=dict( - argstr='%.2f', - position=-1, - ), + tr=dict(argstr="%.2f", position=-1,), ) inputs = Merge.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Merge_outputs(): - output_map = dict(merged_file=dict(extensions=None, ), ) + output_map = dict(merged_file=dict(extensions=None,),) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MinImage.py b/nipype/interfaces/fsl/tests/test_auto_MinImage.py index e16d5f2a26..9d5416bd15 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MinImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MinImage.py @@ -4,41 +4,16 @@ def test_MinImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='-%smin', - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="-%smin", position=4, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = MinImage.input_spec() @@ -46,8 +21,10 @@ def test_MinImage_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MinImage_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MinImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py index 8ef7c61cb1..4c8ce55636 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py +++ b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py @@ -4,60 +4,52 @@ def test_MotionOutliers_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dummy=dict(argstr='--dummy=%d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), - mask=dict( - argstr='-m %s', - extensions=None, - ), - metric=dict(argstr='--%s', ), - no_motion_correction=dict(argstr='--nomoco', ), + args=dict(argstr="%s",), + dummy=dict(argstr="--dummy=%d",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True,), + mask=dict(argstr="-m %s", extensions=None,), + metric=dict(argstr="--%s",), + no_motion_correction=dict(argstr="--nomoco",), out_file=dict( - argstr='-o %s', + argstr="-o %s", extensions=None, hash_files=False, keep_extension=True, - name_source='in_file', - name_template='%s_outliers.txt', + name_source="in_file", + name_template="%s_outliers.txt", ), out_metric_plot=dict( - argstr='-p %s', + argstr="-p %s", extensions=None, hash_files=False, keep_extension=True, - name_source='in_file', - name_template='%s_metrics.png', + name_source="in_file", + name_template="%s_metrics.png", ), out_metric_values=dict( - argstr='-s %s', + argstr="-s %s", extensions=None, hash_files=False, keep_extension=True, - name_source='in_file', - name_template='%s_metrics.txt', + name_source="in_file", + name_template="%s_metrics.txt", ), output_type=dict(), - threshold=dict(argstr='--thresh=%g', ), + threshold=dict(argstr="--thresh=%g",), ) inputs = MotionOutliers.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MotionOutliers_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - out_metric_plot=dict(extensions=None, ), - out_metric_values=dict(extensions=None, ), + out_file=dict(extensions=None,), + out_metric_plot=dict(extensions=None,), + out_metric_values=dict(extensions=None,), ) outputs = MotionOutliers.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py index a6963467a6..ba96daf994 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py @@ -4,42 +4,17 @@ def test_MultiImageMaths_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), - op_string=dict( - argstr='%s', - mandatory=True, - position=4, - ), - operand_files=dict(mandatory=True, ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + nan2zeros=dict(argstr="-nan", position=3,), + op_string=dict(argstr="%s", mandatory=True, position=4,), + operand_files=dict(mandatory=True,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = MultiImageMaths.input_spec() @@ -47,8 +22,10 @@ def test_MultiImageMaths_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MultiImageMaths_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MultiImageMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py index 17b692f5ac..fe3ce1b0b1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py +++ b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py @@ -4,21 +4,23 @@ def test_MultipleRegressDesign_inputs(): input_map = dict( - contrasts=dict(mandatory=True, ), + contrasts=dict(mandatory=True,), groups=dict(), - regressors=dict(mandatory=True, ), + regressors=dict(mandatory=True,), ) inputs = MultipleRegressDesign.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MultipleRegressDesign_outputs(): output_map = dict( - design_con=dict(extensions=None, ), - design_fts=dict(extensions=None, ), - design_grp=dict(extensions=None, ), - design_mat=dict(extensions=None, ), + design_con=dict(extensions=None,), + design_fts=dict(extensions=None,), + design_grp=dict(extensions=None,), + design_mat=dict(extensions=None,), ) outputs = MultipleRegressDesign.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Overlay.py b/nipype/interfaces/fsl/tests/test_auto_Overlay.py index 8a2dad8690..e09ef17541 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Overlay.py +++ b/nipype/interfaces/fsl/tests/test_auto_Overlay.py @@ -4,91 +4,53 @@ def test_Overlay_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), auto_thresh_bg=dict( - argstr='-a', + argstr="-a", mandatory=True, position=5, - xor=('auto_thresh_bg', 'full_bg_range', 'bg_thresh'), + xor=("auto_thresh_bg", "full_bg_range", "bg_thresh"), ), background_image=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=4, + argstr="%s", extensions=None, mandatory=True, position=4, ), bg_thresh=dict( - argstr='%.3f %.3f', + argstr="%.3f %.3f", mandatory=True, position=5, - xor=('auto_thresh_bg', 'full_bg_range', 'bg_thresh'), - ), - environ=dict( - nohash=True, - usedefault=True, + xor=("auto_thresh_bg", "full_bg_range", "bg_thresh"), ), + environ=dict(nohash=True, usedefault=True,), full_bg_range=dict( - argstr='-A', + argstr="-A", mandatory=True, position=5, - xor=('auto_thresh_bg', 'full_bg_range', 'bg_thresh'), + xor=("auto_thresh_bg", "full_bg_range", "bg_thresh"), ), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-1, - ), - out_type=dict( - argstr='%s', - position=2, - usedefault=True, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-1, ), + out_type=dict(argstr="%s", position=2, usedefault=True,), output_type=dict(), - show_negative_stats=dict( - argstr='%s', - position=8, - xor=['stat_image2'], - ), - stat_image=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=6, - ), + show_negative_stats=dict(argstr="%s", position=8, xor=["stat_image2"],), + stat_image=dict(argstr="%s", extensions=None, mandatory=True, position=6,), stat_image2=dict( - argstr='%s', - extensions=None, - position=9, - xor=['show_negative_stats'], - ), - stat_thresh=dict( - argstr='%.2f %.2f', - mandatory=True, - position=7, - ), - stat_thresh2=dict( - argstr='%.2f %.2f', - position=10, - ), - transparency=dict( - argstr='%s', - position=1, - usedefault=True, - ), - use_checkerboard=dict( - argstr='-c', - position=3, + argstr="%s", extensions=None, position=9, xor=["show_negative_stats"], ), + stat_thresh=dict(argstr="%.2f %.2f", mandatory=True, position=7,), + stat_thresh2=dict(argstr="%.2f %.2f", position=10,), + transparency=dict(argstr="%s", position=1, usedefault=True,), + use_checkerboard=dict(argstr="-c", position=3,), ) inputs = Overlay.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Overlay_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Overlay.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py index cc5cdad018..2c3623a76a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py +++ b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py @@ -4,68 +4,41 @@ def test_PRELUDE_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), complex_phase_file=dict( - argstr='--complex=%s', + argstr="--complex=%s", extensions=None, mandatory=True, - xor=['magnitude_file', 'phase_file'], + xor=["magnitude_file", "phase_file"], ), - end=dict(argstr='--end=%d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - label_file=dict( - argstr='--labels=%s', - extensions=None, - hash_files=False, - ), - labelprocess2d=dict(argstr='--labelslices', ), + end=dict(argstr="--end=%d",), + environ=dict(nohash=True, usedefault=True,), + label_file=dict(argstr="--labels=%s", extensions=None, hash_files=False,), + labelprocess2d=dict(argstr="--labelslices",), magnitude_file=dict( - argstr='--abs=%s', + argstr="--abs=%s", extensions=None, mandatory=True, - xor=['complex_phase_file'], - ), - mask_file=dict( - argstr='--mask=%s', - extensions=None, + xor=["complex_phase_file"], ), - num_partitions=dict(argstr='--numphasesplit=%d', ), + mask_file=dict(argstr="--mask=%s", extensions=None,), + num_partitions=dict(argstr="--numphasesplit=%d",), output_type=dict(), phase_file=dict( - argstr='--phase=%s', + argstr="--phase=%s", extensions=None, mandatory=True, - xor=['complex_phase_file'], - ), - process2d=dict( - argstr='--slices', - xor=['labelprocess2d'], - ), - process3d=dict( - argstr='--force3D', - xor=['labelprocess2d', 'process2d'], - ), - rawphase_file=dict( - argstr='--rawphase=%s', - extensions=None, - hash_files=False, - ), - removeramps=dict(argstr='--removeramps', ), - savemask_file=dict( - argstr='--savemask=%s', - extensions=None, - hash_files=False, - ), - start=dict(argstr='--start=%d', ), - threshold=dict(argstr='--thresh=%.10f', ), + xor=["complex_phase_file"], + ), + process2d=dict(argstr="--slices", xor=["labelprocess2d"],), + process3d=dict(argstr="--force3D", xor=["labelprocess2d", "process2d"],), + rawphase_file=dict(argstr="--rawphase=%s", extensions=None, hash_files=False,), + removeramps=dict(argstr="--removeramps",), + savemask_file=dict(argstr="--savemask=%s", extensions=None, hash_files=False,), + start=dict(argstr="--start=%d",), + threshold=dict(argstr="--thresh=%.10f",), unwrapped_phase_file=dict( - argstr='--unwrap=%s', - extensions=None, - genfile=True, - hash_files=False, + argstr="--unwrap=%s", extensions=None, genfile=True, hash_files=False, ), ) inputs = PRELUDE.input_spec() @@ -73,8 +46,10 @@ def test_PRELUDE_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_PRELUDE_outputs(): - output_map = dict(unwrapped_phase_file=dict(extensions=None, ), ) + output_map = dict(unwrapped_phase_file=dict(extensions=None,),) outputs = PRELUDE.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py index 6e186c9d44..2b272b006c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py @@ -4,54 +4,28 @@ def test_PercentileImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='-%sperc', - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="-%sperc", position=4, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), - perc=dict( - argstr='%f', - position=5, - ), + perc=dict(argstr="%f", position=5,), ) inputs = PercentileImage.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_PercentileImage_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = PercentileImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py index c548766a71..c12494e50b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py +++ b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py @@ -4,37 +4,24 @@ def test_PlotMotionParams_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - mandatory=True, - position=1, - ), - in_source=dict(mandatory=True, ), - out_file=dict( - argstr='-o %s', - extensions=None, - genfile=True, - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", mandatory=True, position=1,), + in_source=dict(mandatory=True,), + out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False,), output_type=dict(), - plot_size=dict(argstr='%s', ), - plot_type=dict( - argstr='%s', - mandatory=True, - ), + plot_size=dict(argstr="%s",), + plot_type=dict(argstr="%s", mandatory=True,), ) inputs = PlotMotionParams.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_PlotMotionParams_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = PlotMotionParams.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py index 8d51e1dd5a..0f3954fcf2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py +++ b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py @@ -4,68 +4,34 @@ def test_PlotTimeSeries_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - mandatory=True, - position=1, - ), - labels=dict(argstr='%s', ), - legend_file=dict( - argstr='--legend=%s', - extensions=None, - ), - out_file=dict( - argstr='-o %s', - extensions=None, - genfile=True, - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", mandatory=True, position=1,), + labels=dict(argstr="%s",), + legend_file=dict(argstr="--legend=%s", extensions=None,), + out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False,), output_type=dict(), - plot_finish=dict( - argstr='--finish=%d', - xor=('plot_range', ), - ), - plot_range=dict( - argstr='%s', - xor=('plot_start', 'plot_finish'), - ), - plot_size=dict(argstr='%s', ), - plot_start=dict( - argstr='--start=%d', - xor=('plot_range', ), - ), - sci_notation=dict(argstr='--sci', ), - title=dict(argstr='%s', ), - x_precision=dict(argstr='--precision=%d', ), - x_units=dict( - argstr='-u %d', - usedefault=True, - ), - y_max=dict( - argstr='--ymax=%.2f', - xor=('y_range', ), - ), - y_min=dict( - argstr='--ymin=%.2f', - xor=('y_range', ), - ), - y_range=dict( - argstr='%s', - xor=('y_min', 'y_max'), - ), + plot_finish=dict(argstr="--finish=%d", xor=("plot_range",),), + plot_range=dict(argstr="%s", xor=("plot_start", "plot_finish"),), + plot_size=dict(argstr="%s",), + plot_start=dict(argstr="--start=%d", xor=("plot_range",),), + sci_notation=dict(argstr="--sci",), + title=dict(argstr="%s",), + x_precision=dict(argstr="--precision=%d",), + x_units=dict(argstr="-u %d", usedefault=True,), + y_max=dict(argstr="--ymax=%.2f", xor=("y_range",),), + y_min=dict(argstr="--ymin=%.2f", xor=("y_range",),), + y_range=dict(argstr="%s", xor=("y_min", "y_max"),), ) inputs = PlotTimeSeries.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_PlotTimeSeries_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = PlotTimeSeries.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py index df30704138..5aa19309fc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py +++ b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py @@ -4,23 +4,11 @@ def test_PowerSpectrum_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=1, ), output_type=dict(), ) @@ -29,8 +17,10 @@ def test_PowerSpectrum_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_PowerSpectrum_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = PowerSpectrum.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py index cf2b9c41b0..d6d39b595c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py +++ b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py @@ -4,53 +4,25 @@ def test_PrepareFieldmap_inputs(): input_map = dict( - args=dict(argstr='%s', ), - delta_TE=dict( - argstr='%f', - mandatory=True, - position=-2, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_magnitude=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=3, - ), - in_phase=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - nocheck=dict( - argstr='--nocheck', - position=-1, - usedefault=True, - ), - out_fieldmap=dict( - argstr='%s', - extensions=None, - position=4, - ), + args=dict(argstr="%s",), + delta_TE=dict(argstr="%f", mandatory=True, position=-2, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + in_magnitude=dict(argstr="%s", extensions=None, mandatory=True, position=3,), + in_phase=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + nocheck=dict(argstr="--nocheck", position=-1, usedefault=True,), + out_fieldmap=dict(argstr="%s", extensions=None, position=4,), output_type=dict(), - scanner=dict( - argstr='%s', - position=1, - usedefault=True, - ), + scanner=dict(argstr="%s", position=1, usedefault=True,), ) inputs = PrepareFieldmap.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_PrepareFieldmap_outputs(): - output_map = dict(out_fieldmap=dict(extensions=None, ), ) + output_map = dict(out_fieldmap=dict(extensions=None,),) outputs = PrepareFieldmap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py index b7eacdaa01..1e2d0f5486 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py @@ -4,110 +4,60 @@ def test_ProbTrackX_inputs(): input_map = dict( - args=dict(argstr='%s', ), - avoid_mp=dict( - argstr='--avoid=%s', - extensions=None, - ), - c_thresh=dict(argstr='--cthr=%.3f', ), - correct_path_distribution=dict(argstr='--pd', ), - dist_thresh=dict(argstr='--distthresh=%.3f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fibst=dict(argstr='--fibst=%d', ), - force_dir=dict( - argstr='--forcedir', - usedefault=True, - ), - fsamples=dict(mandatory=True, ), - inv_xfm=dict( - argstr='--invxfm=%s', - extensions=None, - ), - loop_check=dict(argstr='--loopcheck', ), - mask=dict( - argstr='-m %s', - extensions=None, - mandatory=True, - ), - mask2=dict( - argstr='--mask2=%s', - extensions=None, - ), - mesh=dict( - argstr='--mesh=%s', - extensions=None, - ), - mod_euler=dict(argstr='--modeuler', ), - mode=dict( - argstr='--mode=%s', - genfile=True, - ), - n_samples=dict( - argstr='--nsamples=%d', - usedefault=True, - ), - n_steps=dict(argstr='--nsteps=%d', ), - network=dict(argstr='--network', ), - opd=dict( - argstr='--opd', - usedefault=True, - ), - os2t=dict(argstr='--os2t', ), - out_dir=dict( - argstr='--dir=%s', - genfile=True, - ), + args=dict(argstr="%s",), + avoid_mp=dict(argstr="--avoid=%s", extensions=None,), + c_thresh=dict(argstr="--cthr=%.3f",), + correct_path_distribution=dict(argstr="--pd",), + dist_thresh=dict(argstr="--distthresh=%.3f",), + environ=dict(nohash=True, usedefault=True,), + fibst=dict(argstr="--fibst=%d",), + force_dir=dict(argstr="--forcedir", usedefault=True,), + fsamples=dict(mandatory=True,), + inv_xfm=dict(argstr="--invxfm=%s", extensions=None,), + loop_check=dict(argstr="--loopcheck",), + mask=dict(argstr="-m %s", extensions=None, mandatory=True,), + mask2=dict(argstr="--mask2=%s", extensions=None,), + mesh=dict(argstr="--mesh=%s", extensions=None,), + mod_euler=dict(argstr="--modeuler",), + mode=dict(argstr="--mode=%s", genfile=True,), + n_samples=dict(argstr="--nsamples=%d", usedefault=True,), + n_steps=dict(argstr="--nsteps=%d",), + network=dict(argstr="--network",), + opd=dict(argstr="--opd", usedefault=True,), + os2t=dict(argstr="--os2t",), + out_dir=dict(argstr="--dir=%s", genfile=True,), output_type=dict(), - phsamples=dict(mandatory=True, ), - rand_fib=dict(argstr='--randfib=%d', ), - random_seed=dict(argstr='--rseed', ), - s2tastext=dict(argstr='--s2tastext', ), - sample_random_points=dict(argstr='--sampvox', ), - samples_base_name=dict( - argstr='--samples=%s', - usedefault=True, - ), - seed=dict( - argstr='--seed=%s', - mandatory=True, - ), - seed_ref=dict( - argstr='--seedref=%s', - extensions=None, - ), - step_length=dict(argstr='--steplength=%.3f', ), - stop_mask=dict( - argstr='--stop=%s', - extensions=None, - ), - target_masks=dict(argstr='--targetmasks=%s', ), - thsamples=dict(mandatory=True, ), - use_anisotropy=dict(argstr='--usef', ), - verbose=dict(argstr='--verbose=%d', ), - waypoints=dict( - argstr='--waypoints=%s', - extensions=None, - ), - xfm=dict( - argstr='--xfm=%s', - extensions=None, - ), + phsamples=dict(mandatory=True,), + rand_fib=dict(argstr="--randfib=%d",), + random_seed=dict(argstr="--rseed",), + s2tastext=dict(argstr="--s2tastext",), + sample_random_points=dict(argstr="--sampvox",), + samples_base_name=dict(argstr="--samples=%s", usedefault=True,), + seed=dict(argstr="--seed=%s", mandatory=True,), + seed_ref=dict(argstr="--seedref=%s", extensions=None,), + step_length=dict(argstr="--steplength=%.3f",), + stop_mask=dict(argstr="--stop=%s", extensions=None,), + target_masks=dict(argstr="--targetmasks=%s",), + thsamples=dict(mandatory=True,), + use_anisotropy=dict(argstr="--usef",), + verbose=dict(argstr="--verbose=%d",), + waypoints=dict(argstr="--waypoints=%s", extensions=None,), + xfm=dict(argstr="--xfm=%s", extensions=None,), ) inputs = ProbTrackX.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ProbTrackX_outputs(): output_map = dict( fdt_paths=dict(), - log=dict(extensions=None, ), + log=dict(extensions=None,), particle_files=dict(), targets=dict(), - way_total=dict(extensions=None, ), + way_total=dict(extensions=None,), ) outputs = ProbTrackX.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py index 3e1a6c12c3..56bff1e5ac 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py @@ -4,144 +4,79 @@ def test_ProbTrackX2_inputs(): input_map = dict( - args=dict(argstr='%s', ), - avoid_mp=dict( - argstr='--avoid=%s', - extensions=None, - ), - c_thresh=dict(argstr='--cthr=%.3f', ), - colmask4=dict( - argstr='--colmask4=%s', - extensions=None, - ), - correct_path_distribution=dict(argstr='--pd', ), - dist_thresh=dict(argstr='--distthresh=%.3f', ), - distthresh1=dict(argstr='--distthresh1=%.3f', ), - distthresh3=dict(argstr='--distthresh3=%.3f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fibst=dict(argstr='--fibst=%d', ), - fopd=dict( - argstr='--fopd=%s', - extensions=None, - ), - force_dir=dict( - argstr='--forcedir', - usedefault=True, - ), - fsamples=dict(mandatory=True, ), - inv_xfm=dict( - argstr='--invxfm=%s', - extensions=None, - ), - loop_check=dict(argstr='--loopcheck', ), - lrtarget3=dict( - argstr='--lrtarget3=%s', - extensions=None, - ), - mask=dict( - argstr='-m %s', - extensions=None, - mandatory=True, - ), - meshspace=dict(argstr='--meshspace=%s', ), - mod_euler=dict(argstr='--modeuler', ), - n_samples=dict( - argstr='--nsamples=%d', - usedefault=True, - ), - n_steps=dict(argstr='--nsteps=%d', ), - network=dict(argstr='--network', ), - omatrix1=dict(argstr='--omatrix1', ), - omatrix2=dict( - argstr='--omatrix2', - requires=['target2'], - ), - omatrix3=dict( - argstr='--omatrix3', - requires=['target3', 'lrtarget3'], - ), - omatrix4=dict(argstr='--omatrix4', ), - onewaycondition=dict(argstr='--onewaycondition', ), - opd=dict( - argstr='--opd', - usedefault=True, - ), - os2t=dict(argstr='--os2t', ), - out_dir=dict( - argstr='--dir=%s', - genfile=True, - ), + args=dict(argstr="%s",), + avoid_mp=dict(argstr="--avoid=%s", extensions=None,), + c_thresh=dict(argstr="--cthr=%.3f",), + colmask4=dict(argstr="--colmask4=%s", extensions=None,), + correct_path_distribution=dict(argstr="--pd",), + dist_thresh=dict(argstr="--distthresh=%.3f",), + distthresh1=dict(argstr="--distthresh1=%.3f",), + distthresh3=dict(argstr="--distthresh3=%.3f",), + environ=dict(nohash=True, usedefault=True,), + fibst=dict(argstr="--fibst=%d",), + fopd=dict(argstr="--fopd=%s", extensions=None,), + force_dir=dict(argstr="--forcedir", usedefault=True,), + fsamples=dict(mandatory=True,), + inv_xfm=dict(argstr="--invxfm=%s", extensions=None,), + loop_check=dict(argstr="--loopcheck",), + lrtarget3=dict(argstr="--lrtarget3=%s", extensions=None,), + mask=dict(argstr="-m %s", extensions=None, mandatory=True,), + meshspace=dict(argstr="--meshspace=%s",), + mod_euler=dict(argstr="--modeuler",), + n_samples=dict(argstr="--nsamples=%d", usedefault=True,), + n_steps=dict(argstr="--nsteps=%d",), + network=dict(argstr="--network",), + omatrix1=dict(argstr="--omatrix1",), + omatrix2=dict(argstr="--omatrix2", requires=["target2"],), + omatrix3=dict(argstr="--omatrix3", requires=["target3", "lrtarget3"],), + omatrix4=dict(argstr="--omatrix4",), + onewaycondition=dict(argstr="--onewaycondition",), + opd=dict(argstr="--opd", usedefault=True,), + os2t=dict(argstr="--os2t",), + out_dir=dict(argstr="--dir=%s", genfile=True,), output_type=dict(), - phsamples=dict(mandatory=True, ), - rand_fib=dict(argstr='--randfib=%d', ), - random_seed=dict(argstr='--rseed', ), - s2tastext=dict(argstr='--s2tastext', ), - sample_random_points=dict(argstr='--sampvox', ), - samples_base_name=dict( - argstr='--samples=%s', - usedefault=True, - ), - seed=dict( - argstr='--seed=%s', - mandatory=True, - ), - seed_ref=dict( - argstr='--seedref=%s', - extensions=None, - ), - simple=dict(argstr='--simple', ), - step_length=dict(argstr='--steplength=%.3f', ), - stop_mask=dict( - argstr='--stop=%s', - extensions=None, - ), - target2=dict( - argstr='--target2=%s', - extensions=None, - ), - target3=dict( - argstr='--target3=%s', - extensions=None, - ), - target4=dict( - argstr='--target4=%s', - extensions=None, - ), - target_masks=dict(argstr='--targetmasks=%s', ), - thsamples=dict(mandatory=True, ), - use_anisotropy=dict(argstr='--usef', ), - verbose=dict(argstr='--verbose=%d', ), - waycond=dict(argstr='--waycond=%s', ), - wayorder=dict(argstr='--wayorder', ), - waypoints=dict( - argstr='--waypoints=%s', - extensions=None, - ), - xfm=dict( - argstr='--xfm=%s', - extensions=None, - ), + phsamples=dict(mandatory=True,), + rand_fib=dict(argstr="--randfib=%d",), + random_seed=dict(argstr="--rseed",), + s2tastext=dict(argstr="--s2tastext",), + sample_random_points=dict(argstr="--sampvox",), + samples_base_name=dict(argstr="--samples=%s", usedefault=True,), + seed=dict(argstr="--seed=%s", mandatory=True,), + seed_ref=dict(argstr="--seedref=%s", extensions=None,), + simple=dict(argstr="--simple",), + step_length=dict(argstr="--steplength=%.3f",), + stop_mask=dict(argstr="--stop=%s", extensions=None,), + target2=dict(argstr="--target2=%s", extensions=None,), + target3=dict(argstr="--target3=%s", extensions=None,), + target4=dict(argstr="--target4=%s", extensions=None,), + target_masks=dict(argstr="--targetmasks=%s",), + thsamples=dict(mandatory=True,), + use_anisotropy=dict(argstr="--usef",), + verbose=dict(argstr="--verbose=%d",), + waycond=dict(argstr="--waycond=%s",), + wayorder=dict(argstr="--wayorder",), + waypoints=dict(argstr="--waypoints=%s", extensions=None,), + xfm=dict(argstr="--xfm=%s", extensions=None,), ) inputs = ProbTrackX2.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ProbTrackX2_outputs(): output_map = dict( fdt_paths=dict(), - log=dict(extensions=None, ), - lookup_tractspace=dict(extensions=None, ), - matrix1_dot=dict(extensions=None, ), - matrix2_dot=dict(extensions=None, ), - matrix3_dot=dict(extensions=None, ), - network_matrix=dict(extensions=None, ), + log=dict(extensions=None,), + lookup_tractspace=dict(extensions=None,), + matrix1_dot=dict(extensions=None,), + matrix2_dot=dict(extensions=None,), + matrix3_dot=dict(extensions=None,), + network_matrix=dict(extensions=None,), particle_files=dict(), targets=dict(), - way_total=dict(extensions=None, ), + way_total=dict(extensions=None,), ) outputs = ProbTrackX2.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py b/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py index 304beace0d..cc1a6a03ac 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py @@ -4,30 +4,21 @@ def test_ProjThresh_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr='%s', - mandatory=True, - position=0, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_files=dict(argstr="%s", mandatory=True, position=0,), output_type=dict(), - threshold=dict( - argstr='%d', - mandatory=True, - position=1, - ), + threshold=dict(argstr="%d", mandatory=True, position=1,), ) inputs = ProjThresh.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ProjThresh_outputs(): - output_map = dict(out_files=dict(), ) + output_map = dict(out_files=dict(),) outputs = ProjThresh.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Randomise.py b/nipype/interfaces/fsl/tests/test_auto_Randomise.py index 4b6194a3d8..95c1cf7d59 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Randomise.py +++ b/nipype/interfaces/fsl/tests/test_auto_Randomise.py @@ -4,71 +4,44 @@ def test_Randomise_inputs(): input_map = dict( - args=dict(argstr='%s', ), - base_name=dict( - argstr='-o "%s"', - position=1, - usedefault=True, - ), - c_thresh=dict(argstr='-c %.1f', ), - cm_thresh=dict(argstr='-C %.1f', ), - demean=dict(argstr='-D', ), - design_mat=dict( - argstr='-d %s', - extensions=None, - position=2, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - f_c_thresh=dict(argstr='-F %.2f', ), - f_cm_thresh=dict(argstr='-S %.2f', ), - f_only=dict(argstr='--f_only', ), - fcon=dict( - argstr='-f %s', - extensions=None, - ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - position=0, - ), - mask=dict( - argstr='-m %s', - extensions=None, - ), - num_perm=dict(argstr='-n %d', ), - one_sample_group_mean=dict(argstr='-1', ), + args=dict(argstr="%s",), + base_name=dict(argstr='-o "%s"', position=1, usedefault=True,), + c_thresh=dict(argstr="-c %.1f",), + cm_thresh=dict(argstr="-C %.1f",), + demean=dict(argstr="-D",), + design_mat=dict(argstr="-d %s", extensions=None, position=2,), + environ=dict(nohash=True, usedefault=True,), + f_c_thresh=dict(argstr="-F %.2f",), + f_cm_thresh=dict(argstr="-S %.2f",), + f_only=dict(argstr="--f_only",), + fcon=dict(argstr="-f %s", extensions=None,), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0,), + mask=dict(argstr="-m %s", extensions=None,), + num_perm=dict(argstr="-n %d",), + one_sample_group_mean=dict(argstr="-1",), output_type=dict(), - p_vec_n_dist_files=dict(argstr='-P', ), - raw_stats_imgs=dict(argstr='-R', ), - seed=dict(argstr='--seed=%d', ), - show_info_parallel_mode=dict(argstr='-Q', ), - show_total_perms=dict(argstr='-q', ), - tcon=dict( - argstr='-t %s', - extensions=None, - position=3, - ), - tfce=dict(argstr='-T', ), - tfce2D=dict(argstr='--T2', ), - tfce_C=dict(argstr='--tfce_C=%.2f', ), - tfce_E=dict(argstr='--tfce_E=%.2f', ), - tfce_H=dict(argstr='--tfce_H=%.2f', ), - var_smooth=dict(argstr='-v %d', ), - vox_p_values=dict(argstr='-x', ), - x_block_labels=dict( - argstr='-e %s', - extensions=None, - ), + p_vec_n_dist_files=dict(argstr="-P",), + raw_stats_imgs=dict(argstr="-R",), + seed=dict(argstr="--seed=%d",), + show_info_parallel_mode=dict(argstr="-Q",), + show_total_perms=dict(argstr="-q",), + tcon=dict(argstr="-t %s", extensions=None, position=3,), + tfce=dict(argstr="-T",), + tfce2D=dict(argstr="--T2",), + tfce_C=dict(argstr="--tfce_C=%.2f",), + tfce_E=dict(argstr="--tfce_E=%.2f",), + tfce_H=dict(argstr="--tfce_H=%.2f",), + var_smooth=dict(argstr="-v %d",), + vox_p_values=dict(argstr="-x",), + x_block_labels=dict(argstr="-e %s", extensions=None,), ) inputs = Randomise.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Randomise_outputs(): output_map = dict( f_corrected_p_files=dict(), diff --git a/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py index e3843be681..d81874e76a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py +++ b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py @@ -4,22 +4,10 @@ def test_Reorient2Std_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True,), + out_file=dict(argstr="%s", extensions=None, genfile=True, hash_files=False,), output_type=dict(), ) inputs = Reorient2Std.input_spec() @@ -27,8 +15,10 @@ def test_Reorient2Std_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Reorient2Std_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Reorient2Std.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py index 3e0ce74dd3..fbadb82c99 100644 --- a/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py +++ b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py @@ -4,31 +4,23 @@ def test_RobustFOV_inputs(): input_map = dict( - args=dict(argstr='%s', ), - brainsize=dict(argstr='-b %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - position=0, - ), + args=dict(argstr="%s",), + brainsize=dict(argstr="-b %d",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0,), out_roi=dict( - argstr='-r %s', + argstr="-r %s", extensions=None, hash_files=False, - name_source=['in_file'], - name_template='%s_ROI', + name_source=["in_file"], + name_template="%s_ROI", ), out_transform=dict( - argstr='-m %s', + argstr="-m %s", extensions=None, hash_files=False, - name_source=['in_file'], - name_template='%s_to_ROI', + name_source=["in_file"], + name_template="%s_to_ROI", ), output_type=dict(), ) @@ -37,10 +29,11 @@ def test_RobustFOV_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RobustFOV_outputs(): output_map = dict( - out_roi=dict(extensions=None, ), - out_transform=dict(extensions=None, ), + out_roi=dict(extensions=None,), out_transform=dict(extensions=None,), ) outputs = RobustFOV.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_SMM.py b/nipype/interfaces/fsl/tests/test_auto_SMM.py index 9d3591ae27..2042d0845f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SMM.py +++ b/nipype/interfaces/fsl/tests/test_auto_SMM.py @@ -4,11 +4,8 @@ def test_SMM_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), mask=dict( argstr='--mask="%s"', copyfile=False, @@ -16,10 +13,7 @@ def test_SMM_inputs(): mandatory=True, position=1, ), - no_deactivation_class=dict( - argstr='--zfstatmode', - position=2, - ), + no_deactivation_class=dict(argstr="--zfstatmode", position=2,), output_type=dict(), spatial_data_file=dict( argstr='--sdf="%s"', @@ -34,11 +28,13 @@ def test_SMM_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SMM_outputs(): output_map = dict( - activation_p_map=dict(extensions=None, ), - deactivation_p_map=dict(extensions=None, ), - null_p_map=dict(extensions=None, ), + activation_p_map=dict(extensions=None,), + deactivation_p_map=dict(extensions=None,), + null_p_map=dict(extensions=None,), ) outputs = SMM.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_SUSAN.py b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py index 44b19350be..7f7f270be1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SUSAN.py +++ b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py @@ -4,58 +4,28 @@ def test_SUSAN_inputs(): input_map = dict( - args=dict(argstr='%s', ), - brightness_threshold=dict( - argstr='%.10f', - mandatory=True, - position=2, - ), - dimension=dict( - argstr='%d', - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fwhm=dict( - argstr='%.10f', - mandatory=True, - position=3, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), + args=dict(argstr="%s",), + brightness_threshold=dict(argstr="%.10f", mandatory=True, position=2,), + dimension=dict(argstr="%d", position=4, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + fwhm=dict(argstr="%.10f", mandatory=True, position=3,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-1, ), output_type=dict(), - usans=dict( - argstr='', - position=6, - usedefault=True, - ), - use_median=dict( - argstr='%d', - position=5, - usedefault=True, - ), + usans=dict(argstr="", position=6, usedefault=True,), + use_median=dict(argstr="%d", position=5, usedefault=True,), ) inputs = SUSAN.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SUSAN_outputs(): - output_map = dict(smoothed_file=dict(extensions=None, ), ) + output_map = dict(smoothed_file=dict(extensions=None,),) outputs = SUSAN.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SigLoss.py b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py index 193557d297..fe63fdce23 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SigLoss.py +++ b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py @@ -4,36 +4,24 @@ def test_SigLoss_inputs(): input_map = dict( - args=dict(argstr='%s', ), - echo_time=dict(argstr='--te=%f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), - mask_file=dict( - argstr='-m %s', - extensions=None, - ), - out_file=dict( - argstr='-s %s', - extensions=None, - genfile=True, - ), + args=dict(argstr="%s",), + echo_time=dict(argstr="--te=%f",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True,), + mask_file=dict(argstr="-m %s", extensions=None,), + out_file=dict(argstr="-s %s", extensions=None, genfile=True,), output_type=dict(), - slice_direction=dict(argstr='-d %s', ), + slice_direction=dict(argstr="-d %s",), ) inputs = SigLoss.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SigLoss_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = SigLoss.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Slice.py b/nipype/interfaces/fsl/tests/test_auto_Slice.py index a3e604b657..f96ee854b3 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Slice.py +++ b/nipype/interfaces/fsl/tests/test_auto_Slice.py @@ -4,22 +4,12 @@ def test_Slice_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr='%s', - copyfile=False, - extensions=None, - mandatory=True, - position=0, - ), - out_base_name=dict( - argstr='%s', - position=1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0, ), + out_base_name=dict(argstr="%s", position=1,), output_type=dict(), ) inputs = Slice.input_spec() @@ -27,8 +17,10 @@ def test_Slice_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Slice_outputs(): - output_map = dict(out_files=dict(), ) + output_map = dict(out_files=dict(),) outputs = Slice.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py index 9e26d7952b..0d0c0fc0f4 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py +++ b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py @@ -4,45 +4,30 @@ def test_SliceTimer_inputs(): input_map = dict( - args=dict(argstr='%s', ), - custom_order=dict( - argstr='--ocustom=%s', - extensions=None, - ), - custom_timings=dict( - argstr='--tcustom=%s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - global_shift=dict(argstr='--tglobal', ), - in_file=dict( - argstr='--in=%s', - extensions=None, - mandatory=True, - position=0, - ), - index_dir=dict(argstr='--down', ), - interleaved=dict(argstr='--odd', ), + args=dict(argstr="%s",), + custom_order=dict(argstr="--ocustom=%s", extensions=None,), + custom_timings=dict(argstr="--tcustom=%s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + global_shift=dict(argstr="--tglobal",), + in_file=dict(argstr="--in=%s", extensions=None, mandatory=True, position=0,), + index_dir=dict(argstr="--down",), + interleaved=dict(argstr="--odd",), out_file=dict( - argstr='--out=%s', - extensions=None, - genfile=True, - hash_files=False, + argstr="--out=%s", extensions=None, genfile=True, hash_files=False, ), output_type=dict(), - slice_direction=dict(argstr='--direction=%d', ), - time_repetition=dict(argstr='--repeat=%f', ), + slice_direction=dict(argstr="--direction=%d",), + time_repetition=dict(argstr="--repeat=%f",), ) inputs = SliceTimer.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SliceTimer_outputs(): - output_map = dict(slice_time_corrected_file=dict(extensions=None, ), ) + output_map = dict(slice_time_corrected_file=dict(extensions=None,),) outputs = SliceTimer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Slicer.py b/nipype/interfaces/fsl/tests/test_auto_Slicer.py index d843870561..205aab061b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Slicer.py +++ b/nipype/interfaces/fsl/tests/test_auto_Slicer.py @@ -5,103 +5,56 @@ def test_Slicer_inputs(): input_map = dict( all_axial=dict( - argstr='-A', + argstr="-A", position=10, - requires=['image_width'], - xor=('single_slice', 'middle_slices', 'all_axial', 'sample_axial'), - ), - args=dict(argstr='%s', ), - colour_map=dict( - argstr='-l %s', - extensions=None, - position=4, - ), - dither_edges=dict( - argstr='-t', - position=7, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - image_edges=dict( - argstr='%s', - extensions=None, - position=2, - ), - image_width=dict( - argstr='%d', - position=-2, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - intensity_range=dict( - argstr='-i %.3f %.3f', - position=5, - ), - label_slices=dict( - argstr='-L', - position=3, - usedefault=True, - ), + requires=["image_width"], + xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), + ), + args=dict(argstr="%s",), + colour_map=dict(argstr="-l %s", extensions=None, position=4,), + dither_edges=dict(argstr="-t", position=7,), + environ=dict(nohash=True, usedefault=True,), + image_edges=dict(argstr="%s", extensions=None, position=2,), + image_width=dict(argstr="%d", position=-2,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + intensity_range=dict(argstr="-i %.3f %.3f", position=5,), + label_slices=dict(argstr="-L", position=3, usedefault=True,), middle_slices=dict( - argstr='-a', + argstr="-a", position=10, - xor=('single_slice', 'middle_slices', 'all_axial', 'sample_axial'), - ), - nearest_neighbour=dict( - argstr='-n', - position=8, + xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), ), + nearest_neighbour=dict(argstr="-n", position=8,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-1, ), output_type=dict(), sample_axial=dict( - argstr='-S %d', + argstr="-S %d", position=10, - requires=['image_width'], - xor=('single_slice', 'middle_slices', 'all_axial', 'sample_axial'), - ), - scaling=dict( - argstr='-s %f', - position=0, - ), - show_orientation=dict( - argstr='%s', - position=9, - usedefault=True, + requires=["image_width"], + xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), ), + scaling=dict(argstr="-s %f", position=0,), + show_orientation=dict(argstr="%s", position=9, usedefault=True,), single_slice=dict( - argstr='-%s', + argstr="-%s", position=10, - requires=['slice_number'], - xor=('single_slice', 'middle_slices', 'all_axial', 'sample_axial'), - ), - slice_number=dict( - argstr='-%d', - position=11, - ), - threshold_edges=dict( - argstr='-e %.3f', - position=6, + requires=["slice_number"], + xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), ), + slice_number=dict(argstr="-%d", position=11,), + threshold_edges=dict(argstr="-e %.3f", position=6,), ) inputs = Slicer.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Slicer_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Slicer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Smooth.py b/nipype/interfaces/fsl/tests/test_auto_Smooth.py index bc710d76a5..733f0e83f1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Smooth.py +++ b/nipype/interfaces/fsl/tests/test_auto_Smooth.py @@ -4,36 +4,28 @@ def test_Smooth_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), fwhm=dict( - argstr='-kernel gauss %.03f -fmean', + argstr="-kernel gauss %.03f -fmean", mandatory=True, position=1, - xor=['sigma'], - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, + xor=["sigma"], ), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), output_type=dict(), sigma=dict( - argstr='-kernel gauss %.03f -fmean', + argstr="-kernel gauss %.03f -fmean", mandatory=True, position=1, - xor=['fwhm'], + xor=["fwhm"], ), smoothed_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, - name_source=['in_file'], - name_template='%s_smooth', + name_source=["in_file"], + name_template="%s_smooth", position=2, ), ) @@ -42,8 +34,10 @@ def test_Smooth_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Smooth_outputs(): - output_map = dict(smoothed_file=dict(extensions=None, ), ) + output_map = dict(smoothed_file=dict(extensions=None,),) outputs = Smooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py index 811bc1e4b9..b6fac84352 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py +++ b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py @@ -4,44 +4,23 @@ def test_SmoothEstimate_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dof=dict( - argstr='--dof=%d', - mandatory=True, - xor=['zstat_file'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - mask_file=dict( - argstr='--mask=%s', - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + dof=dict(argstr="--dof=%d", mandatory=True, xor=["zstat_file"],), + environ=dict(nohash=True, usedefault=True,), + mask_file=dict(argstr="--mask=%s", extensions=None, mandatory=True,), output_type=dict(), - residual_fit_file=dict( - argstr='--res=%s', - extensions=None, - requires=['dof'], - ), - zstat_file=dict( - argstr='--zstat=%s', - extensions=None, - xor=['dof'], - ), + residual_fit_file=dict(argstr="--res=%s", extensions=None, requires=["dof"],), + zstat_file=dict(argstr="--zstat=%s", extensions=None, xor=["dof"],), ) inputs = SmoothEstimate.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SmoothEstimate_outputs(): - output_map = dict( - dlh=dict(), - resels=dict(), - volume=dict(), - ) + output_map = dict(dlh=dict(), resels=dict(), volume=dict(),) outputs = SmoothEstimate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py index 2f267eb6bf..ec2b59ba6d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py +++ b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py @@ -4,56 +4,21 @@ def test_SpatialFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), kernel_file=dict( - argstr='%s', - extensions=None, - position=5, - xor=['kernel_size'], - ), - kernel_shape=dict( - argstr='-kernel %s', - position=4, - ), - kernel_size=dict( - argstr='%.4f', - position=5, - xor=['kernel_file'], - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), - operation=dict( - argstr='-f%s', - mandatory=True, - position=6, + argstr="%s", extensions=None, position=5, xor=["kernel_size"], ), + kernel_shape=dict(argstr="-kernel %s", position=4,), + kernel_size=dict(argstr="%.4f", position=5, xor=["kernel_file"],), + nan2zeros=dict(argstr="-nan", position=3,), + operation=dict(argstr="-f%s", mandatory=True, position=6,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = SpatialFilter.input_spec() @@ -61,8 +26,10 @@ def test_SpatialFilter_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SpatialFilter_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = SpatialFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Split.py b/nipype/interfaces/fsl/tests/test_auto_Split.py index 2c922ad583..26b814b9c0 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Split.py +++ b/nipype/interfaces/fsl/tests/test_auto_Split.py @@ -4,26 +4,11 @@ def test_Split_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='-%s', - mandatory=True, - position=2, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, - ), - out_base_name=dict( - argstr='%s', - position=1, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="-%s", mandatory=True, position=2,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + out_base_name=dict(argstr="%s", position=1,), output_type=dict(), ) inputs = Split.input_spec() @@ -31,8 +16,10 @@ def test_Split_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Split_outputs(): - output_map = dict(out_files=dict(), ) + output_map = dict(out_files=dict(),) outputs = Split.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_StdImage.py b/nipype/interfaces/fsl/tests/test_auto_StdImage.py index a8b7e764b5..073ebfa7ee 100644 --- a/nipype/interfaces/fsl/tests/test_auto_StdImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_StdImage.py @@ -4,41 +4,16 @@ def test_StdImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict( - argstr='-%sstd', - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), + args=dict(argstr="%s",), + dimension=dict(argstr="-%sstd", position=4, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = StdImage.input_spec() @@ -46,8 +21,10 @@ def test_StdImage_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_StdImage_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = StdImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py index 241cfbcf27..ac56fad17e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py +++ b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py @@ -4,27 +4,11 @@ def test_SwapDimensions_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position='1', - ), - new_dims=dict( - argstr='%s %s %s', - mandatory=True, - ), - out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position="1",), + new_dims=dict(argstr="%s %s %s", mandatory=True,), + out_file=dict(argstr="%s", extensions=None, genfile=True, hash_files=False,), output_type=dict(), ) inputs = SwapDimensions.input_spec() @@ -32,8 +16,10 @@ def test_SwapDimensions_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SwapDimensions_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = SwapDimensions.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TOPUP.py b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py index f98dbef518..f34023f799 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TOPUP.py +++ b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py @@ -4,112 +4,90 @@ def test_TOPUP_inputs(): input_map = dict( - args=dict(argstr='%s', ), - config=dict( - argstr='--config=%s', - usedefault=True, - ), + args=dict(argstr="%s",), + config=dict(argstr="--config=%s", usedefault=True,), encoding_direction=dict( - argstr='--datain=%s', + argstr="--datain=%s", mandatory=True, - requires=['readout_times'], - xor=['encoding_file'], + requires=["readout_times"], + xor=["encoding_file"], ), encoding_file=dict( - argstr='--datain=%s', + argstr="--datain=%s", extensions=None, mandatory=True, - xor=['encoding_direction'], - ), - environ=dict( - nohash=True, - usedefault=True, + xor=["encoding_direction"], ), - estmov=dict(argstr='--estmov=%d', ), - fwhm=dict(argstr='--fwhm=%f', ), - in_file=dict( - argstr='--imain=%s', - extensions=None, - mandatory=True, - ), - interp=dict(argstr='--interp=%s', ), - max_iter=dict(argstr='--miter=%d', ), - minmet=dict(argstr='--minmet=%d', ), - numprec=dict(argstr='--numprec=%s', ), + environ=dict(nohash=True, usedefault=True,), + estmov=dict(argstr="--estmov=%d",), + fwhm=dict(argstr="--fwhm=%f",), + in_file=dict(argstr="--imain=%s", extensions=None, mandatory=True,), + interp=dict(argstr="--interp=%s",), + max_iter=dict(argstr="--miter=%d",), + minmet=dict(argstr="--minmet=%d",), + numprec=dict(argstr="--numprec=%s",), out_base=dict( - argstr='--out=%s', + argstr="--out=%s", extensions=None, hash_files=False, - name_source=['in_file'], - name_template='%s_base', + name_source=["in_file"], + name_template="%s_base", ), out_corrected=dict( - argstr='--iout=%s', + argstr="--iout=%s", extensions=None, hash_files=False, - name_source=['in_file'], - name_template='%s_corrected', + name_source=["in_file"], + name_template="%s_corrected", ), out_field=dict( - argstr='--fout=%s', + argstr="--fout=%s", extensions=None, hash_files=False, - name_source=['in_file'], - name_template='%s_field', - ), - out_jac_prefix=dict( - argstr='--jacout=%s', - hash_files=False, - usedefault=True, + name_source=["in_file"], + name_template="%s_field", ), + out_jac_prefix=dict(argstr="--jacout=%s", hash_files=False, usedefault=True,), out_logfile=dict( - argstr='--logout=%s', + argstr="--logout=%s", extensions=None, hash_files=False, keep_extension=True, - name_source=['in_file'], - name_template='%s_topup.log', - ), - out_mat_prefix=dict( - argstr='--rbmout=%s', - hash_files=False, - usedefault=True, - ), - out_warp_prefix=dict( - argstr='--dfout=%s', - hash_files=False, - usedefault=True, + name_source=["in_file"], + name_template="%s_topup.log", ), + out_mat_prefix=dict(argstr="--rbmout=%s", hash_files=False, usedefault=True,), + out_warp_prefix=dict(argstr="--dfout=%s", hash_files=False, usedefault=True,), output_type=dict(), readout_times=dict( - mandatory=True, - requires=['encoding_direction'], - xor=['encoding_file'], + mandatory=True, requires=["encoding_direction"], xor=["encoding_file"], ), - reg_lambda=dict(argstr='--lambda=%0.f', ), - regmod=dict(argstr='--regmod=%s', ), - regrid=dict(argstr='--regrid=%d', ), - scale=dict(argstr='--scale=%d', ), - splineorder=dict(argstr='--splineorder=%d', ), - ssqlambda=dict(argstr='--ssqlambda=%d', ), - subsamp=dict(argstr='--subsamp=%d', ), - warp_res=dict(argstr='--warpres=%f', ), + reg_lambda=dict(argstr="--lambda=%0.f",), + regmod=dict(argstr="--regmod=%s",), + regrid=dict(argstr="--regrid=%d",), + scale=dict(argstr="--scale=%d",), + splineorder=dict(argstr="--splineorder=%d",), + ssqlambda=dict(argstr="--ssqlambda=%d",), + subsamp=dict(argstr="--subsamp=%d",), + warp_res=dict(argstr="--warpres=%f",), ) inputs = TOPUP.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TOPUP_outputs(): output_map = dict( - out_corrected=dict(extensions=None, ), - out_enc_file=dict(extensions=None, ), - out_field=dict(extensions=None, ), - out_fieldcoef=dict(extensions=None, ), + out_corrected=dict(extensions=None,), + out_enc_file=dict(extensions=None,), + out_field=dict(extensions=None,), + out_fieldcoef=dict(extensions=None,), out_jacs=dict(), - out_logfile=dict(extensions=None, ), + out_logfile=dict(extensions=None,), out_mats=dict(), - out_movpar=dict(extensions=None, ), + out_movpar=dict(extensions=None,), out_warps=dict(), ) outputs = TOPUP.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py index 9f4ecfbd1a..a764c6c1a6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py +++ b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py @@ -4,46 +4,17 @@ def test_TemporalFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - highpass_sigma=dict( - argstr='-bptf %.6f', - position=4, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - lowpass_sigma=dict( - argstr='%.6f', - position=5, - usedefault=True, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + highpass_sigma=dict(argstr="-bptf %.6f", position=4, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + lowpass_sigma=dict(argstr="%.6f", position=5, usedefault=True,), + nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = TemporalFilter.input_spec() @@ -51,8 +22,10 @@ def test_TemporalFilter_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TemporalFilter_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TemporalFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Threshold.py b/nipype/interfaces/fsl/tests/test_auto_Threshold.py index 0b1eaeaa17..4156b8f82b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Threshold.py +++ b/nipype/interfaces/fsl/tests/test_auto_Threshold.py @@ -4,44 +4,19 @@ def test_Threshold_inputs(): input_map = dict( - args=dict(argstr='%s', ), - direction=dict(usedefault=True, ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), + args=dict(argstr="%s",), + direction=dict(usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), - thresh=dict( - argstr='%s', - mandatory=True, - position=4, - ), - use_nonzero_voxels=dict(requires=['use_robust_range'], ), + thresh=dict(argstr="%s", mandatory=True, position=4,), + use_nonzero_voxels=dict(requires=["use_robust_range"],), use_robust_range=dict(), ) inputs = Threshold.input_spec() @@ -49,8 +24,10 @@ def test_Threshold_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Threshold_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Threshold.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py index e630e10a40..f5c6c38f35 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py +++ b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py @@ -4,52 +4,34 @@ def test_TractSkeleton_inputs(): input_map = dict( - alt_data_file=dict( - argstr='-a %s', - extensions=None, - ), - alt_skeleton=dict( - argstr='-s %s', - extensions=None, - ), - args=dict(argstr='%s', ), - data_file=dict(extensions=None, ), - distance_map=dict(extensions=None, ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), + alt_data_file=dict(argstr="-a %s", extensions=None,), + alt_skeleton=dict(argstr="-s %s", extensions=None,), + args=dict(argstr="%s",), + data_file=dict(extensions=None,), + distance_map=dict(extensions=None,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True,), output_type=dict(), project_data=dict( - argstr='-p %.3f %s %s %s %s', - requires=['threshold', 'distance_map', 'data_file'], + argstr="-p %.3f %s %s %s %s", + requires=["threshold", "distance_map", "data_file"], ), - projected_data=dict(extensions=None, ), - search_mask_file=dict( - extensions=None, - xor=['use_cingulum_mask'], - ), - skeleton_file=dict(argstr='-o %s', ), + projected_data=dict(extensions=None,), + search_mask_file=dict(extensions=None, xor=["use_cingulum_mask"],), + skeleton_file=dict(argstr="-o %s",), threshold=dict(), - use_cingulum_mask=dict( - usedefault=True, - xor=['search_mask_file'], - ), + use_cingulum_mask=dict(usedefault=True, xor=["search_mask_file"],), ) inputs = TractSkeleton.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TractSkeleton_outputs(): output_map = dict( - projected_data=dict(extensions=None, ), - skeleton_file=dict(extensions=None, ), + projected_data=dict(extensions=None,), skeleton_file=dict(extensions=None,), ) outputs = TractSkeleton.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Training.py b/nipype/interfaces/fsl/tests/test_auto_Training.py index 51617d97ed..5626f3e483 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Training.py +++ b/nipype/interfaces/fsl/tests/test_auto_Training.py @@ -4,32 +4,21 @@ def test_Training_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - loo=dict( - argstr='-l', - position=2, - ), - mel_icas=dict( - argstr='%s', - copyfile=False, - position=-1, - ), - trained_wts_filestem=dict( - argstr='%s', - position=1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + loo=dict(argstr="-l", position=2,), + mel_icas=dict(argstr="%s", copyfile=False, position=-1,), + trained_wts_filestem=dict(argstr="%s", position=1,), ) inputs = Training.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Training_outputs(): - output_map = dict(trained_wts_file=dict(extensions=None, ), ) + output_map = dict(trained_wts_file=dict(extensions=None,),) outputs = Training.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py index edde69fc32..df4c1c2257 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py +++ b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py @@ -3,22 +3,16 @@ def test_TrainingSetCreator_inputs(): - input_map = dict(mel_icas_in=dict( - argstr='%s', - copyfile=False, - position=-1, - ), ) + input_map = dict(mel_icas_in=dict(argstr="%s", copyfile=False, position=-1,),) inputs = TrainingSetCreator.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TrainingSetCreator_outputs(): - output_map = dict(mel_icas_out=dict( - argstr='%s', - copyfile=False, - position=-1, - ), ) + output_map = dict(mel_icas_out=dict(argstr="%s", copyfile=False, position=-1,),) outputs = TrainingSetCreator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py index 20fde2afca..67662a8c7f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py @@ -4,41 +4,16 @@ def test_UnaryMaths_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr='-dt %s', - position=1, - ), - nan2zeros=dict( - argstr='-nan', - position=3, - ), - operation=dict( - argstr='-%s', - mandatory=True, - position=4, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + internal_datatype=dict(argstr="-dt %s", position=1,), + nan2zeros=dict(argstr="-nan", position=3,), + operation=dict(argstr="-%s", mandatory=True, position=4,), out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr='-odt %s', - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, ), + output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = UnaryMaths.input_spec() @@ -46,8 +21,10 @@ def test_UnaryMaths_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_UnaryMaths_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = UnaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_VecReg.py b/nipype/interfaces/fsl/tests/test_auto_VecReg.py index 41c74d2eb2..4f802628cd 100644 --- a/nipype/interfaces/fsl/tests/test_auto_VecReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_VecReg.py @@ -4,61 +4,29 @@ def test_VecReg_inputs(): input_map = dict( - affine_mat=dict( - argstr='-t %s', - extensions=None, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), - interpolation=dict(argstr='--interp=%s', ), - mask=dict( - argstr='-m %s', - extensions=None, - ), - out_file=dict( - argstr='-o %s', - extensions=None, - genfile=True, - hash_files=False, - ), + affine_mat=dict(argstr="-t %s", extensions=None,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True,), + interpolation=dict(argstr="--interp=%s",), + mask=dict(argstr="-m %s", extensions=None,), + out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False,), output_type=dict(), - ref_mask=dict( - argstr='--refmask=%s', - extensions=None, - ), - ref_vol=dict( - argstr='-r %s', - extensions=None, - mandatory=True, - ), - rotation_mat=dict( - argstr='--rotmat=%s', - extensions=None, - ), - rotation_warp=dict( - argstr='--rotwarp=%s', - extensions=None, - ), - warp_field=dict( - argstr='-w %s', - extensions=None, - ), + ref_mask=dict(argstr="--refmask=%s", extensions=None,), + ref_vol=dict(argstr="-r %s", extensions=None, mandatory=True,), + rotation_mat=dict(argstr="--rotmat=%s", extensions=None,), + rotation_warp=dict(argstr="--rotwarp=%s", extensions=None,), + warp_field=dict(argstr="-w %s", extensions=None,), ) inputs = VecReg.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_VecReg_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = VecReg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py index 541cff38aa..70980aa1be 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py @@ -4,59 +4,31 @@ def test_WarpPoints_inputs(): input_map = dict( - args=dict(argstr='%s', ), - coord_mm=dict( - argstr='-mm', - xor=['coord_vox'], - ), - coord_vox=dict( - argstr='-vox', - xor=['coord_mm'], - ), - dest_file=dict( - argstr='-dest %s', - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_coords=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), + args=dict(argstr="%s",), + coord_mm=dict(argstr="-mm", xor=["coord_vox"],), + coord_vox=dict(argstr="-vox", xor=["coord_mm"],), + dest_file=dict(argstr="-dest %s", extensions=None, mandatory=True,), + environ=dict(nohash=True, usedefault=True,), + in_coords=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), out_file=dict( extensions=None, - name_source='in_coords', - name_template='%s_warped', - output_name='out_file', - ), - src_file=dict( - argstr='-src %s', - extensions=None, - mandatory=True, - ), - warp_file=dict( - argstr='-warp %s', - extensions=None, - xor=['xfm_file'], - ), - xfm_file=dict( - argstr='-xfm %s', - extensions=None, - xor=['warp_file'], + name_source="in_coords", + name_template="%s_warped", + output_name="out_file", ), + src_file=dict(argstr="-src %s", extensions=None, mandatory=True,), + warp_file=dict(argstr="-warp %s", extensions=None, xor=["xfm_file"],), + xfm_file=dict(argstr="-xfm %s", extensions=None, xor=["warp_file"],), ) inputs = WarpPoints.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_WarpPoints_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = WarpPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py index 369794e3c0..a6fa949890 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py @@ -4,53 +4,25 @@ def test_WarpPointsFromStd_inputs(): input_map = dict( - args=dict(argstr='%s', ), - coord_mm=dict( - argstr='-mm', - xor=['coord_vox'], - ), - coord_vox=dict( - argstr='-vox', - xor=['coord_mm'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - img_file=dict( - argstr='-img %s', - extensions=None, - mandatory=True, - ), - in_coords=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - std_file=dict( - argstr='-std %s', - extensions=None, - mandatory=True, - ), - warp_file=dict( - argstr='-warp %s', - extensions=None, - xor=['xfm_file'], - ), - xfm_file=dict( - argstr='-xfm %s', - extensions=None, - xor=['warp_file'], - ), + args=dict(argstr="%s",), + coord_mm=dict(argstr="-mm", xor=["coord_vox"],), + coord_vox=dict(argstr="-vox", xor=["coord_mm"],), + environ=dict(nohash=True, usedefault=True,), + img_file=dict(argstr="-img %s", extensions=None, mandatory=True,), + in_coords=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + std_file=dict(argstr="-std %s", extensions=None, mandatory=True,), + warp_file=dict(argstr="-warp %s", extensions=None, xor=["xfm_file"],), + xfm_file=dict(argstr="-xfm %s", extensions=None, xor=["warp_file"],), ) inputs = WarpPointsFromStd.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_WarpPointsFromStd_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = WarpPointsFromStd.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py index 9c62aa6ec7..9debbe6a74 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py @@ -4,63 +4,32 @@ def test_WarpPointsToStd_inputs(): input_map = dict( - args=dict(argstr='%s', ), - coord_mm=dict( - argstr='-mm', - xor=['coord_vox'], - ), - coord_vox=dict( - argstr='-vox', - xor=['coord_mm'], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - img_file=dict( - argstr='-img %s', - extensions=None, - mandatory=True, - ), - in_coords=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), + args=dict(argstr="%s",), + coord_mm=dict(argstr="-mm", xor=["coord_vox"],), + coord_vox=dict(argstr="-vox", xor=["coord_mm"],), + environ=dict(nohash=True, usedefault=True,), + img_file=dict(argstr="-img %s", extensions=None, mandatory=True,), + in_coords=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), out_file=dict( extensions=None, - name_source='in_coords', - name_template='%s_warped', - output_name='out_file', - ), - premat_file=dict( - argstr='-premat %s', - extensions=None, - ), - std_file=dict( - argstr='-std %s', - extensions=None, - mandatory=True, - ), - warp_file=dict( - argstr='-warp %s', - extensions=None, - xor=['xfm_file'], - ), - xfm_file=dict( - argstr='-xfm %s', - extensions=None, - xor=['warp_file'], - ), + name_source="in_coords", + name_template="%s_warped", + output_name="out_file", + ), + premat_file=dict(argstr="-premat %s", extensions=None,), + std_file=dict(argstr="-std %s", extensions=None, mandatory=True,), + warp_file=dict(argstr="-warp %s", extensions=None, xor=["xfm_file"],), + xfm_file=dict(argstr="-xfm %s", extensions=None, xor=["warp_file"],), ) inputs = WarpPointsToStd.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_WarpPointsToStd_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = WarpPointsToStd.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py index ab91cf8d41..cdb0e86e64 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py @@ -4,51 +4,35 @@ def test_WarpUtils_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='--in=%s', - extensions=None, - mandatory=True, - ), - knot_space=dict(argstr='--knotspace=%d,%d,%d', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="--in=%s", extensions=None, mandatory=True,), + knot_space=dict(argstr="--knotspace=%d,%d,%d",), out_file=dict( - argstr='--out=%s', + argstr="--out=%s", extensions=None, - name_source=['in_file'], - output_name='out_file', + name_source=["in_file"], + output_name="out_file", position=-1, ), - out_format=dict(argstr='--outformat=%s', ), - out_jacobian=dict( - argstr='--jac=%s', - extensions=None, - ), + out_format=dict(argstr="--outformat=%s",), + out_jacobian=dict(argstr="--jac=%s", extensions=None,), output_type=dict(), - reference=dict( - argstr='--ref=%s', - extensions=None, - mandatory=True, - ), - warp_resolution=dict(argstr='--warpres=%0.4f,%0.4f,%0.4f', ), - with_affine=dict(argstr='--withaff', ), - write_jacobian=dict( - mandatory=True, - usedefault=True, - ), + reference=dict(argstr="--ref=%s", extensions=None, mandatory=True,), + warp_resolution=dict(argstr="--warpres=%0.4f,%0.4f,%0.4f",), + with_affine=dict(argstr="--withaff",), + write_jacobian=dict(mandatory=True, usedefault=True,), ) inputs = WarpUtils.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_WarpUtils_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - out_jacobian=dict(extensions=None, ), + out_file=dict(extensions=None,), out_jacobian=dict(extensions=None,), ) outputs = WarpUtils.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_XFibres5.py b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py index 1a8bb61389..9a4e973569 100644 --- a/nipype/interfaces/fsl/tests/test_auto_XFibres5.py +++ b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py @@ -4,115 +4,53 @@ def test_XFibres5_inputs(): input_map = dict( - all_ard=dict( - argstr='--allard', - xor=('no_ard', 'all_ard'), - ), - args=dict(argstr='%s', ), - burn_in=dict( - argstr='--burnin=%d', - usedefault=True, - ), - burn_in_no_ard=dict( - argstr='--burnin_noard=%d', - usedefault=True, - ), - bvals=dict( - argstr='--bvals=%s', - extensions=None, - mandatory=True, - ), - bvecs=dict( - argstr='--bvecs=%s', - extensions=None, - mandatory=True, - ), + all_ard=dict(argstr="--allard", xor=("no_ard", "all_ard"),), + args=dict(argstr="%s",), + burn_in=dict(argstr="--burnin=%d", usedefault=True,), + burn_in_no_ard=dict(argstr="--burnin_noard=%d", usedefault=True,), + bvals=dict(argstr="--bvals=%s", extensions=None, mandatory=True,), + bvecs=dict(argstr="--bvecs=%s", extensions=None, mandatory=True,), cnlinear=dict( - argstr='--cnonlinear', - xor=('no_spat', 'non_linear', 'cnlinear'), - ), - dwi=dict( - argstr='--data=%s', - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - f0_ard=dict( - argstr='--f0 --ardf0', - xor=['f0_noard', 'f0_ard', 'all_ard'], - ), - f0_noard=dict( - argstr='--f0', - xor=['f0_noard', 'f0_ard'], - ), - force_dir=dict( - argstr='--forcedir', - usedefault=True, - ), - fudge=dict(argstr='--fudge=%d', ), - gradnonlin=dict( - argstr='--gradnonlin=%s', - extensions=None, - ), - logdir=dict( - argstr='--logdir=%s', - usedefault=True, - ), - mask=dict( - argstr='--mask=%s', - extensions=None, - mandatory=True, - ), - model=dict(argstr='--model=%d', ), - n_fibres=dict( - argstr='--nfibres=%d', - mandatory=True, - usedefault=True, - ), - n_jumps=dict( - argstr='--njumps=%d', - usedefault=True, - ), - no_ard=dict( - argstr='--noard', - xor=('no_ard', 'all_ard'), - ), - no_spat=dict( - argstr='--nospat', - xor=('no_spat', 'non_linear', 'cnlinear'), - ), + argstr="--cnonlinear", xor=("no_spat", "non_linear", "cnlinear"), + ), + dwi=dict(argstr="--data=%s", extensions=None, mandatory=True,), + environ=dict(nohash=True, usedefault=True,), + f0_ard=dict(argstr="--f0 --ardf0", xor=["f0_noard", "f0_ard", "all_ard"],), + f0_noard=dict(argstr="--f0", xor=["f0_noard", "f0_ard"],), + force_dir=dict(argstr="--forcedir", usedefault=True,), + fudge=dict(argstr="--fudge=%d",), + gradnonlin=dict(argstr="--gradnonlin=%s", extensions=None,), + logdir=dict(argstr="--logdir=%s", usedefault=True,), + mask=dict(argstr="--mask=%s", extensions=None, mandatory=True,), + model=dict(argstr="--model=%d",), + n_fibres=dict(argstr="--nfibres=%d", mandatory=True, usedefault=True,), + n_jumps=dict(argstr="--njumps=%d", usedefault=True,), + no_ard=dict(argstr="--noard", xor=("no_ard", "all_ard"),), + no_spat=dict(argstr="--nospat", xor=("no_spat", "non_linear", "cnlinear"),), non_linear=dict( - argstr='--nonlinear', - xor=('no_spat', 'non_linear', 'cnlinear'), + argstr="--nonlinear", xor=("no_spat", "non_linear", "cnlinear"), ), output_type=dict(), - rician=dict(argstr='--rician', ), - sample_every=dict( - argstr='--sampleevery=%d', - usedefault=True, - ), - seed=dict(argstr='--seed=%d', ), - update_proposal_every=dict( - argstr='--updateproposalevery=%d', - usedefault=True, - ), + rician=dict(argstr="--rician",), + sample_every=dict(argstr="--sampleevery=%d", usedefault=True,), + seed=dict(argstr="--seed=%d",), + update_proposal_every=dict(argstr="--updateproposalevery=%d", usedefault=True,), ) inputs = XFibres5.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_XFibres5_outputs(): output_map = dict( dyads=dict(), fsamples=dict(), - mean_S0samples=dict(extensions=None, ), - mean_dsamples=dict(extensions=None, ), + mean_S0samples=dict(extensions=None,), + mean_dsamples=dict(extensions=None,), mean_fsamples=dict(), - mean_tausamples=dict(extensions=None, ), + mean_tausamples=dict(extensions=None,), phsamples=dict(), thsamples=dict(), ) diff --git a/nipype/interfaces/fsl/tests/test_base.py b/nipype/interfaces/fsl/tests/test_base.py index 71022997b6..52f93b545f 100644 --- a/nipype/interfaces/fsl/tests/test_base.py +++ b/nipype/interfaces/fsl/tests/test_base.py @@ -13,8 +13,8 @@ @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") def test_fslversion(): ver = fsl.Info.version() - ver = ver.split('.') - assert ver[0] in ['4', '5'] + ver = ver.split(".") + assert ver[0] in ["4", "5"] @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -30,14 +30,14 @@ def test_outputtype_to_ext(): assert res == ext with pytest.raises(KeyError): - fsl.Info.output_type_to_ext('JUNK') + fsl.Info.output_type_to_ext("JUNK") @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") def test_FSLCommand(): # Most methods in FSLCommand are tested in the subclasses. Only # testing the one item that is not. - cmd = fsl.FSLCommand(command='ls') + cmd = fsl.FSLCommand(command="ls") res = cmd.run() assert type(res) == InterfaceResult @@ -45,13 +45,13 @@ def test_FSLCommand(): @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") def test_FSLCommand2(): # Check default output type and environ - cmd = fsl.FSLCommand(command='junk') + cmd = fsl.FSLCommand(command="junk") assert cmd._output_type == fsl.Info.output_type() - assert cmd.inputs.environ['FSLOUTPUTTYPE'] == cmd._output_type + assert cmd.inputs.environ["FSLOUTPUTTYPE"] == cmd._output_type assert cmd._output_type in fsl.Info.ftypes cmd = fsl.FSLCommand - cmdinst = fsl.FSLCommand(command='junk') + cmdinst = fsl.FSLCommand(command="junk") for out_type in fsl.Info.ftypes: cmd.set_default_output_type(out_type) assert cmd._output_type == out_type @@ -64,38 +64,23 @@ def test_FSLCommand2(): @pytest.mark.parametrize( "args, desired_name", [ - ({}, { - "file": 'foo.nii.gz' - }), # just the filename + ({}, {"file": "foo.nii.gz"}), # just the filename # filename with suffix - ({ - "suffix": '_brain' - }, { - "file": 'foo_brain.nii.gz' - }), + ({"suffix": "_brain"}, {"file": "foo_brain.nii.gz"}), ( - { - "suffix": '_brain', - "cwd": '/data' - }, + {"suffix": "_brain", "cwd": "/data"}, # filename with suffix and working directory - { - "dir": '/data', - "file": 'foo_brain.nii.gz' - }), + {"dir": "/data", "file": "foo_brain.nii.gz"}, + ), # filename with suffix and no file extension change - ({ - "suffix": '_brain.mat', - "change_ext": False - }, { - "file": 'foo_brain.mat' - }) - ]) + ({"suffix": "_brain.mat", "change_ext": False}, {"file": "foo_brain.mat"}), + ], +) def test_gen_fname(args, desired_name): # Test _gen_fname method of FSLCommand - cmd = fsl.FSLCommand(command='junk', output_type='NIFTI_GZ') + cmd = fsl.FSLCommand(command="junk", output_type="NIFTI_GZ") pth = os.getcwd() - fname = cmd._gen_fname('foo.nii.gz', **args) + fname = cmd._gen_fname("foo.nii.gz", **args) if "dir" in desired_name.keys(): desired = os.path.join(desired_name["dir"], desired_name["file"]) else: diff --git a/nipype/interfaces/fsl/tests/test_dti.py b/nipype/interfaces/fsl/tests/test_dti.py index 19d807d217..8f5abfc662 100644 --- a/nipype/interfaces/fsl/tests/test_dti.py +++ b/nipype/interfaces/fsl/tests/test_dti.py @@ -17,7 +17,7 @@ def test_dtifit2(create_files_in_directory): filelist, outdir = create_files_in_directory dti = fsl.DTIFit() # make sure command gets called - assert dti.cmd == 'dtifit' + assert dti.cmd == "dtifit" # test raising error with mandatory args absent with pytest.raises(ValueError): @@ -25,211 +25,203 @@ def test_dtifit2(create_files_in_directory): # .inputs based parameters setting dti.inputs.dwi = filelist[0] - dti.inputs.base_name = 'foo.dti.nii' + dti.inputs.base_name = "foo.dti.nii" dti.inputs.mask = filelist[1] dti.inputs.bvecs = filelist[0] dti.inputs.bvals = filelist[1] dti.inputs.min_z = 10 dti.inputs.max_z = 50 - assert dti.cmdline == \ - 'dtifit -k %s -o foo.dti.nii -m %s -r %s -b %s -Z 50 -z 10' % (filelist[0], - filelist[1], - filelist[0], - filelist[1]) + assert ( + dti.cmdline + == "dtifit -k %s -o foo.dti.nii -m %s -r %s -b %s -Z 50 -z 10" + % (filelist[0], filelist[1], filelist[0], filelist[1]) + ) -@pytest.mark.xfail( - reason="These tests are skipped until we clean up some of this code") +@pytest.mark.xfail(reason="These tests are skipped until we clean up some of this code") def test_randomise2(): rand = fsl.Randomise() # make sure command gets called - assert rand.cmd == 'randomise' + assert rand.cmd == "randomise" # test raising error with mandatory args absent with pytest.raises(ValueError): rand.run() # .inputs based parameters setting - rand.inputs.input_4D = 'infile.nii' - rand.inputs.output_rootname = 'outfile' - rand.inputs.design_matrix = 'design.mat' - rand.inputs.t_contrast = 'infile.con' + rand.inputs.input_4D = "infile.nii" + rand.inputs.output_rootname = "outfile" + rand.inputs.design_matrix = "design.mat" + rand.inputs.t_contrast = "infile.con" actualCmdline = sorted(rand.cmdline.split()) - cmd = 'randomise -i infile.nii -o outfile -d design.mat -t infile.con' + cmd = "randomise -i infile.nii -o outfile -d design.mat -t infile.con" desiredCmdline = sorted(cmd.split()) assert actualCmdline == desiredCmdline # .run based parameter setting rand2 = fsl.Randomise( - input_4D='infile2', - output_rootname='outfile2', - f_contrast='infile.f', + input_4D="infile2", + output_rootname="outfile2", + f_contrast="infile.f", one_sample_gmean=True, - int_seed=4) + int_seed=4, + ) actualCmdline = sorted(rand2.cmdline.split()) - cmd = 'randomise -i infile2 -o outfile2 -1 -f infile.f --seed=4' + cmd = "randomise -i infile2 -o outfile2 -1 -f infile.f --seed=4" desiredCmdline = sorted(cmd.split()) assert actualCmdline == desiredCmdline rand3 = fsl.Randomise() - results = rand3.run(input_4D='infile3', output_rootname='outfile3') - assert results.runtime.cmdline == \ - 'randomise -i infile3 -o outfile3' + results = rand3.run(input_4D="infile3", output_rootname="outfile3") + assert results.runtime.cmdline == "randomise -i infile3 -o outfile3" # test arguments for opt_map opt_map = { - 'demean_data': ('-D', True), - 'one_sample_gmean': ('-1', True), - 'mask_image': ('-m inp_mask', 'inp_mask'), - 'design_matrix': ('-d design.mat', 'design.mat'), - 't_contrast': ('-t input.con', 'input.con'), - 'f_contrast': ('-f input.fts', 'input.fts'), - 'xchange_block_labels': ('-e design.grp', 'design.grp'), - 'print_unique_perm': ('-q', True), - 'print_info_parallelMode': ('-Q', True), - 'num_permutations': ('-n 10', 10), - 'vox_pvalus': ('-x', True), - 'fstats_only': ('--fonly', True), - 'thresh_free_cluster': ('-T', True), - 'thresh_free_cluster_2Dopt': ('--T2', True), - 'cluster_thresholding': ('-c 0.20', 0.20), - 'cluster_mass_thresholding': ('-C 0.40', 0.40), - 'fcluster_thresholding': ('-F 0.10', 0.10), - 'fcluster_mass_thresholding': ('-S 0.30', 0.30), - 'variance_smoothing': ('-v 0.20', 0.20), - 'diagnostics_off': ('--quiet', True), - 'output_raw': ('-R', True), - 'output_perm_vect': ('-P', True), - 'int_seed': ('--seed=20', 20), - 'TFCE_height_param': ('--tfce_H=0.11', 0.11), - 'TFCE_extent_param': ('--tfce_E=0.50', 0.50), - 'TFCE_connectivity': ('--tfce_C=0.30', 0.30), - 'list_num_voxel_EVs_pos': ('--vxl=1,2,3,4', '1,2,3,4'), - 'list_img_voxel_EVs': ('--vxf=6,7,8,9,3', '6,7,8,9,3') + "demean_data": ("-D", True), + "one_sample_gmean": ("-1", True), + "mask_image": ("-m inp_mask", "inp_mask"), + "design_matrix": ("-d design.mat", "design.mat"), + "t_contrast": ("-t input.con", "input.con"), + "f_contrast": ("-f input.fts", "input.fts"), + "xchange_block_labels": ("-e design.grp", "design.grp"), + "print_unique_perm": ("-q", True), + "print_info_parallelMode": ("-Q", True), + "num_permutations": ("-n 10", 10), + "vox_pvalus": ("-x", True), + "fstats_only": ("--fonly", True), + "thresh_free_cluster": ("-T", True), + "thresh_free_cluster_2Dopt": ("--T2", True), + "cluster_thresholding": ("-c 0.20", 0.20), + "cluster_mass_thresholding": ("-C 0.40", 0.40), + "fcluster_thresholding": ("-F 0.10", 0.10), + "fcluster_mass_thresholding": ("-S 0.30", 0.30), + "variance_smoothing": ("-v 0.20", 0.20), + "diagnostics_off": ("--quiet", True), + "output_raw": ("-R", True), + "output_perm_vect": ("-P", True), + "int_seed": ("--seed=20", 20), + "TFCE_height_param": ("--tfce_H=0.11", 0.11), + "TFCE_extent_param": ("--tfce_E=0.50", 0.50), + "TFCE_connectivity": ("--tfce_C=0.30", 0.30), + "list_num_voxel_EVs_pos": ("--vxl=1,2,3,4", "1,2,3,4"), + "list_img_voxel_EVs": ("--vxf=6,7,8,9,3", "6,7,8,9,3"), } for name, settings in list(opt_map.items()): rand4 = fsl.Randomise( - input_4D='infile', output_rootname='root', **{ - name: settings[1] - }) - assert rand4.cmdline == rand4.cmd + ' -i infile -o root ' + settings[0] + input_4D="infile", output_rootname="root", **{name: settings[1]} + ) + assert rand4.cmdline == rand4.cmd + " -i infile -o root " + settings[0] -@pytest.mark.xfail( - reason="These tests are skipped until we clean up some of this code") +@pytest.mark.xfail(reason="These tests are skipped until we clean up some of this code") def test_Randomise_parallel(): rand = fsl.Randomise_parallel() # make sure command gets called - assert rand.cmd == 'randomise_parallel' + assert rand.cmd == "randomise_parallel" # test raising error with mandatory args absent with pytest.raises(ValueError): rand.run() # .inputs based parameters setting - rand.inputs.input_4D = 'infile.nii' - rand.inputs.output_rootname = 'outfile' - rand.inputs.design_matrix = 'design.mat' - rand.inputs.t_contrast = 'infile.con' + rand.inputs.input_4D = "infile.nii" + rand.inputs.output_rootname = "outfile" + rand.inputs.design_matrix = "design.mat" + rand.inputs.t_contrast = "infile.con" actualCmdline = sorted(rand.cmdline.split()) - cmd = ('randomise_parallel -i infile.nii -o outfile -d design.mat -t ' - 'infile.con') + cmd = "randomise_parallel -i infile.nii -o outfile -d design.mat -t " "infile.con" desiredCmdline = sorted(cmd.split()) assert actualCmdline == desiredCmdline # .run based parameter setting rand2 = fsl.Randomise_parallel( - input_4D='infile2', - output_rootname='outfile2', - f_contrast='infile.f', + input_4D="infile2", + output_rootname="outfile2", + f_contrast="infile.f", one_sample_gmean=True, - int_seed=4) + int_seed=4, + ) actualCmdline = sorted(rand2.cmdline.split()) - cmd = 'randomise_parallel -i infile2 -o outfile2 -1 -f infile.f --seed=4' + cmd = "randomise_parallel -i infile2 -o outfile2 -1 -f infile.f --seed=4" desiredCmdline = sorted(cmd.split()) assert actualCmdline == desiredCmdline rand3 = fsl.Randomise_parallel() - results = rand3.run(input_4D='infile3', output_rootname='outfile3') - assert results.runtime.cmdline == \ - 'randomise_parallel -i infile3 -o outfile3' + results = rand3.run(input_4D="infile3", output_rootname="outfile3") + assert results.runtime.cmdline == "randomise_parallel -i infile3 -o outfile3" # test arguments for opt_map opt_map = { - 'demean_data': ('-D', True), - 'one_sample_gmean': ('-1', True), - 'mask_image': ('-m inp_mask', 'inp_mask'), - 'design_matrix': ('-d design.mat', 'design.mat'), - 't_contrast': ('-t input.con', 'input.con'), - 'f_contrast': ('-f input.fts', 'input.fts'), - 'xchange_block_labels': ('-e design.grp', 'design.grp'), - 'print_unique_perm': ('-q', True), - 'print_info_parallelMode': ('-Q', True), - 'num_permutations': ('-n 10', 10), - 'vox_pvalus': ('-x', True), - 'fstats_only': ('--fonly', True), - 'thresh_free_cluster': ('-T', True), - 'thresh_free_cluster_2Dopt': ('--T2', True), - 'cluster_thresholding': ('-c 0.20', 0.20), - 'cluster_mass_thresholding': ('-C 0.40', 0.40), - 'fcluster_thresholding': ('-F 0.10', 0.10), - 'fcluster_mass_thresholding': ('-S 0.30', 0.30), - 'variance_smoothing': ('-v 0.20', 0.20), - 'diagnostics_off': ('--quiet', True), - 'output_raw': ('-R', True), - 'output_perm_vect': ('-P', True), - 'int_seed': ('--seed=20', 20), - 'TFCE_height_param': ('--tfce_H=0.11', 0.11), - 'TFCE_extent_param': ('--tfce_E=0.50', 0.50), - 'TFCE_connectivity': ('--tfce_C=0.30', 0.30), - 'list_num_voxel_EVs_pos': ('--vxl=' + repr([1, 2, 3, 4]), - repr([1, 2, 3, 4])), - 'list_img_voxel_EVs': ('--vxf=' + repr([6, 7, 8, 9, 3]), - repr([6, 7, 8, 9, 3])) + "demean_data": ("-D", True), + "one_sample_gmean": ("-1", True), + "mask_image": ("-m inp_mask", "inp_mask"), + "design_matrix": ("-d design.mat", "design.mat"), + "t_contrast": ("-t input.con", "input.con"), + "f_contrast": ("-f input.fts", "input.fts"), + "xchange_block_labels": ("-e design.grp", "design.grp"), + "print_unique_perm": ("-q", True), + "print_info_parallelMode": ("-Q", True), + "num_permutations": ("-n 10", 10), + "vox_pvalus": ("-x", True), + "fstats_only": ("--fonly", True), + "thresh_free_cluster": ("-T", True), + "thresh_free_cluster_2Dopt": ("--T2", True), + "cluster_thresholding": ("-c 0.20", 0.20), + "cluster_mass_thresholding": ("-C 0.40", 0.40), + "fcluster_thresholding": ("-F 0.10", 0.10), + "fcluster_mass_thresholding": ("-S 0.30", 0.30), + "variance_smoothing": ("-v 0.20", 0.20), + "diagnostics_off": ("--quiet", True), + "output_raw": ("-R", True), + "output_perm_vect": ("-P", True), + "int_seed": ("--seed=20", 20), + "TFCE_height_param": ("--tfce_H=0.11", 0.11), + "TFCE_extent_param": ("--tfce_E=0.50", 0.50), + "TFCE_connectivity": ("--tfce_C=0.30", 0.30), + "list_num_voxel_EVs_pos": ("--vxl=" + repr([1, 2, 3, 4]), repr([1, 2, 3, 4])), + "list_img_voxel_EVs": ("--vxf=" + repr([6, 7, 8, 9, 3]), repr([6, 7, 8, 9, 3])), } for name, settings in list(opt_map.items()): rand4 = fsl.Randomise_parallel( - input_4D='infile', output_rootname='root', **{ - name: settings[1] - }) - assert rand4.cmdline == rand4.cmd + ' -i infile -o root ' + settings[0] + input_4D="infile", output_rootname="root", **{name: settings[1]} + ) + assert rand4.cmdline == rand4.cmd + " -i infile -o root " + settings[0] # test proj_thresh -@pytest.mark.xfail( - reason="These tests are skipped until we clean up some of this code") +@pytest.mark.xfail(reason="These tests are skipped until we clean up some of this code") def test_Proj_thresh(): proj = fsl.ProjThresh() # make sure command gets called - assert proj.cmd == 'proj_thresh' + assert proj.cmd == "proj_thresh" # test raising error with mandatory args absent with pytest.raises(ValueError): proj.run() # .inputs based parameters setting - proj.inputs.volumes = ['vol1', 'vol2', 'vol3'] + proj.inputs.volumes = ["vol1", "vol2", "vol3"] proj.inputs.threshold = 3 - assert proj.cmdline == 'proj_thresh vol1 vol2 vol3 3' + assert proj.cmdline == "proj_thresh vol1 vol2 vol3 3" - proj2 = fsl.ProjThresh(threshold=10, volumes=['vola', 'volb']) - assert proj2.cmdline == 'proj_thresh vola volb 10' + proj2 = fsl.ProjThresh(threshold=10, volumes=["vola", "volb"]) + assert proj2.cmdline == "proj_thresh vola volb 10" # .run based parameters setting proj3 = fsl.ProjThresh() - results = proj3.run(volumes=['inp1', 'inp3', 'inp2'], threshold=2) - assert results.runtime.cmdline == 'proj_thresh inp1 inp3 inp2 2' + results = proj3.run(volumes=["inp1", "inp3", "inp2"], threshold=2) + assert results.runtime.cmdline == "proj_thresh inp1 inp3 inp2 2" assert results.runtime.returncode != 0 assert isinstance(results.interface.inputs.volumes, list) assert results.interface.inputs.threshold == 2 @@ -239,103 +231,104 @@ def test_Proj_thresh(): # test vec_reg -@pytest.mark.xfail( - reason="These tests are skipped until we clean up some of this code") +@pytest.mark.xfail(reason="These tests are skipped until we clean up some of this code") def test_Vec_reg(): vrg = fsl.VecReg() # make sure command gets called - assert vrg.cmd == 'vecreg' + assert vrg.cmd == "vecreg" # test raising error with mandatory args absent with pytest.raises(ValueError): vrg.run() # .inputs based parameters setting - vrg.inputs.infile = 'infile' - vrg.inputs.outfile = 'outfile' - vrg.inputs.refVolName = 'MNI152' - vrg.inputs.affineTmat = 'tmat.mat' - assert vrg.cmdline == 'vecreg -i infile -o outfile -r MNI152 -t tmat.mat' + vrg.inputs.infile = "infile" + vrg.inputs.outfile = "outfile" + vrg.inputs.refVolName = "MNI152" + vrg.inputs.affineTmat = "tmat.mat" + assert vrg.cmdline == "vecreg -i infile -o outfile -r MNI152 -t tmat.mat" # .run based parameter setting vrg2 = fsl.VecReg( - infile='infile2', - outfile='outfile2', - refVolName='MNI152', - affineTmat='tmat2.mat', - brainMask='nodif_brain_mask') + infile="infile2", + outfile="outfile2", + refVolName="MNI152", + affineTmat="tmat2.mat", + brainMask="nodif_brain_mask", + ) actualCmdline = sorted(vrg2.cmdline.split()) - cmd = 'vecreg -i infile2 -o outfile2 -r MNI152 -t tmat2.mat -m nodif_brain_mask' + cmd = "vecreg -i infile2 -o outfile2 -r MNI152 -t tmat2.mat -m nodif_brain_mask" desiredCmdline = sorted(cmd.split()) assert actualCmdline == desiredCmdline vrg3 = fsl.VecReg() results = vrg3.run( - infile='infile3', - outfile='outfile3', - refVolName='MNI152', - affineTmat='tmat3.mat', + infile="infile3", + outfile="outfile3", + refVolName="MNI152", + affineTmat="tmat3.mat", ) - assert results.runtime.cmdline == \ - 'vecreg -i infile3 -o outfile3 -r MNI152 -t tmat3.mat' + assert ( + results.runtime.cmdline + == "vecreg -i infile3 -o outfile3 -r MNI152 -t tmat3.mat" + ) assert results.runtime.returncode != 0 - assert results.interface.inputs.infile == 'infile3' - assert results.interface.inputs.outfile == 'outfile3' - assert results.interface.inputs.refVolName == 'MNI152' - assert results.interface.inputs.affineTmat == 'tmat3.mat' + assert results.interface.inputs.infile == "infile3" + assert results.interface.inputs.outfile == "outfile3" + assert results.interface.inputs.refVolName == "MNI152" + assert results.interface.inputs.affineTmat == "tmat3.mat" # test arguments for opt_map opt_map = { - 'verbose': ('-v', True), - 'helpDoc': ('-h', True), - 'tensor': ('--tensor', True), - 'affineTmat': ('-t Tmat', 'Tmat'), - 'warpFile': ('-w wrpFile', 'wrpFile'), - 'interpolation': ('--interp=sinc', 'sinc'), - 'brainMask': ('-m mask', 'mask') + "verbose": ("-v", True), + "helpDoc": ("-h", True), + "tensor": ("--tensor", True), + "affineTmat": ("-t Tmat", "Tmat"), + "warpFile": ("-w wrpFile", "wrpFile"), + "interpolation": ("--interp=sinc", "sinc"), + "brainMask": ("-m mask", "mask"), } for name, settings in list(opt_map.items()): vrg4 = fsl.VecReg( - infile='infile', - outfile='outfile', - refVolName='MNI152', - **{ - name: settings[1] - }) - assert vrg4.cmdline == vrg4.cmd + \ - ' -i infile -o outfile -r MNI152 ' + settings[0] + infile="infile", + outfile="outfile", + refVolName="MNI152", + **{name: settings[1]} + ) + assert ( + vrg4.cmdline == vrg4.cmd + " -i infile -o outfile -r MNI152 " + settings[0] + ) # test find_the_biggest -@pytest.mark.xfail( - reason="These tests are skipped until we clean up some of this code") +@pytest.mark.xfail(reason="These tests are skipped until we clean up some of this code") def test_Find_the_biggest(): fbg = fsl.FindTheBiggest() # make sure command gets called - assert fbg.cmd == 'find_the_biggest' + assert fbg.cmd == "find_the_biggest" # test raising error with mandatory args absent with pytest.raises(ValueError): fbg.run() # .inputs based parameters setting - fbg.inputs.infiles = 'seed*' - fbg.inputs.outfile = 'fbgfile' - assert fbg.cmdline == 'find_the_biggest seed* fbgfile' + fbg.inputs.infiles = "seed*" + fbg.inputs.outfile = "fbgfile" + assert fbg.cmdline == "find_the_biggest seed* fbgfile" - fbg2 = fsl.FindTheBiggest(infiles='seed2*', outfile='fbgfile2') - assert fbg2.cmdline == 'find_the_biggest seed2* fbgfile2' + fbg2 = fsl.FindTheBiggest(infiles="seed2*", outfile="fbgfile2") + assert fbg2.cmdline == "find_the_biggest seed2* fbgfile2" # .run based parameters setting fbg3 = fsl.FindTheBiggest() - results = fbg3.run(infiles='seed3', outfile='out3') - assert results.runtime.cmdline == 'find_the_biggest seed3 out3' + results = fbg3.run(infiles="seed3", outfile="out3") + assert results.runtime.cmdline == "find_the_biggest seed3 out3" # test arguments for opt_map # Find_the_biggest doesn't have an opt_map{} @@ -359,8 +352,9 @@ def test_tbss_skeleton(create_files_in_directory): # First by implicit argument skeletor.inputs.skeleton_file = True - assert skeletor.cmdline == \ - "tbss_skeleton -i a.nii -o %s" % os.path.join(newdir, "a_skeleton.nii") + assert skeletor.cmdline == "tbss_skeleton -i a.nii -o %s" % os.path.join( + newdir, "a_skeleton.nii" + ) # Now with a specific name skeletor.inputs.skeleton_file = "old_boney.nii" @@ -379,15 +373,19 @@ def test_tbss_skeleton(create_files_in_directory): bones.inputs.data_file = "b.nii" # Even though that's silly # Now we get a command line - assert bones.cmdline == \ - "tbss_skeleton -i a.nii -p 0.200 b.nii %s b.nii %s" % (Info.standard_image("LowerCingulum_1mm.nii.gz"), - os.path.join(newdir, "b_skeletonised.nii")) + assert bones.cmdline == "tbss_skeleton -i a.nii -p 0.200 b.nii %s b.nii %s" % ( + Info.standard_image("LowerCingulum_1mm.nii.gz"), + os.path.join(newdir, "b_skeletonised.nii"), + ) # Can we specify a mask? bones.inputs.use_cingulum_mask = Undefined bones.inputs.search_mask_file = "a.nii" - assert bones.cmdline == \ - "tbss_skeleton -i a.nii -p 0.200 b.nii a.nii b.nii %s" % os.path.join(newdir, "b_skeletonised.nii") + assert ( + bones.cmdline + == "tbss_skeleton -i a.nii -p 0.200 b.nii a.nii b.nii %s" + % os.path.join(newdir, "b_skeletonised.nii") + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -408,15 +406,20 @@ def test_distancemap(create_files_in_directory): # It should assert mapper.cmdline == "distancemap --out=%s --in=a.nii" % os.path.join( - newdir, "a_dstmap.nii") + newdir, "a_dstmap.nii" + ) # And we should be able to write out a maxima map mapper.inputs.local_max_file = True - assert mapper.cmdline == \ - "distancemap --out=%s --in=a.nii --localmax=%s" % (os.path.join(newdir, "a_dstmap.nii"), - os.path.join(newdir, "a_lclmax.nii")) + assert mapper.cmdline == "distancemap --out=%s --in=a.nii --localmax=%s" % ( + os.path.join(newdir, "a_dstmap.nii"), + os.path.join(newdir, "a_lclmax.nii"), + ) # And call it whatever we want mapper.inputs.local_max_file = "max.nii" - assert mapper.cmdline == \ - "distancemap --out=%s --in=a.nii --localmax=max.nii" % os.path.join(newdir, "a_dstmap.nii") + assert ( + mapper.cmdline + == "distancemap --out=%s --in=a.nii --localmax=max.nii" + % os.path.join(newdir, "a_dstmap.nii") + ) diff --git a/nipype/interfaces/fsl/tests/test_epi.py b/nipype/interfaces/fsl/tests/test_epi.py index bf025e991d..e8f408de45 100644 --- a/nipype/interfaces/fsl/tests/test_epi.py +++ b/nipype/interfaces/fsl/tests/test_epi.py @@ -17,7 +17,7 @@ def test_eddy_correct2(create_files_in_directory): eddy = fsl.EddyCorrect() # make sure command gets called - assert eddy.cmd == 'eddy_correct' + assert eddy.cmd == "eddy_correct" # test raising error with mandatory args absent with pytest.raises(ValueError): @@ -25,14 +25,13 @@ def test_eddy_correct2(create_files_in_directory): # .inputs based parameters setting eddy.inputs.in_file = filelist[0] - eddy.inputs.out_file = 'foo_eddc.nii' + eddy.inputs.out_file = "foo_eddc.nii" eddy.inputs.ref_num = 100 - assert eddy.cmdline == 'eddy_correct %s foo_eddc.nii 100' % filelist[0] + assert eddy.cmdline == "eddy_correct %s foo_eddc.nii 100" % filelist[0] # .run based parameter setting - eddy2 = fsl.EddyCorrect( - in_file=filelist[0], out_file='foo_ec.nii', ref_num=20) - assert eddy2.cmdline == 'eddy_correct %s foo_ec.nii 20' % filelist[0] + eddy2 = fsl.EddyCorrect(in_file=filelist[0], out_file="foo_ec.nii", ref_num=20) + assert eddy2.cmdline == "eddy_correct %s foo_ec.nii 20" % filelist[0] # test arguments for opt_map # eddy_correct class doesn't have opt_map{} diff --git a/nipype/interfaces/fsl/tests/test_maths.py b/nipype/interfaces/fsl/tests/test_maths.py index 7760c6dbe4..9b05645997 100644 --- a/nipype/interfaces/fsl/tests/test_maths.py +++ b/nipype/interfaces/fsl/tests/test_maths.py @@ -31,23 +31,23 @@ def test_maths_base(create_files_in_directory_plus_output_type): out_file = "a_maths{}".format(out_ext) # Now test the most basic command line - assert maths.cmdline == "fslmaths a.nii {}".format( - os.path.join(testdir, out_file)) + assert maths.cmdline == "fslmaths a.nii {}".format(os.path.join(testdir, out_file)) # Now test that we can set the various data types dtypes = ["float", "char", "int", "short", "double", "input"] int_cmdline = "fslmaths -dt {} a.nii " + os.path.join(testdir, out_file) - out_cmdline = "fslmaths a.nii " + os.path.join(testdir, - out_file) + " -odt {}" - duo_cmdline = "fslmaths -dt {} a.nii " + os.path.join( - testdir, out_file) + " -odt {}" + out_cmdline = "fslmaths a.nii " + os.path.join(testdir, out_file) + " -odt {}" + duo_cmdline = ( + "fslmaths -dt {} a.nii " + os.path.join(testdir, out_file) + " -odt {}" + ) for dtype in dtypes: foo = fsl.MathsCommand(in_file="a.nii", internal_datatype=dtype) assert foo.cmdline == int_cmdline.format(dtype) bar = fsl.MathsCommand(in_file="a.nii", output_datatype=dtype) assert bar.cmdline == out_cmdline.format(dtype) foobar = fsl.MathsCommand( - in_file="a.nii", internal_datatype=dtype, output_datatype=dtype) + in_file="a.nii", internal_datatype=dtype, output_datatype=dtype + ) assert foobar.cmdline == duo_cmdline.format(dtype, dtype) # Test that we can ask for an outfile name @@ -81,8 +81,7 @@ def test_changedt(create_files_in_directory_plus_output_type): dtypes = ["float", "char", "int", "short", "double", "input"] cmdline = "fslmaths a.nii b.nii -odt {}" for dtype in dtypes: - foo = fsl.MathsCommand( - in_file="a.nii", out_file="b.nii", output_datatype=dtype) + foo = fsl.MathsCommand(in_file="a.nii", out_file="b.nii", output_datatype=dtype) assert foo.cmdline == cmdline.format(dtype) @@ -102,18 +101,20 @@ def test_threshold(create_files_in_directory_plus_output_type): # Test the various opstrings cmdline = "fslmaths a.nii {} b.nii" - for val in [0, 0., -1, -1.5, -0.5, 0.5, 3, 400, 400.5]: + for val in [0, 0.0, -1, -1.5, -0.5, 0.5, 3, 400, 400.5]: thresh.inputs.thresh = val assert thresh.cmdline == cmdline.format("-thr {:.10f}".format(val)) val = "{:.10f}".format(42) thresh = fsl.Threshold( - in_file="a.nii", out_file="b.nii", thresh=42, use_robust_range=True) + in_file="a.nii", out_file="b.nii", thresh=42, use_robust_range=True + ) assert thresh.cmdline == cmdline.format("-thrp " + val) thresh.inputs.use_nonzero_voxels = True assert thresh.cmdline == cmdline.format("-thrP " + val) thresh = fsl.Threshold( - in_file="a.nii", out_file="b.nii", thresh=42, direction="above") + in_file="a.nii", out_file="b.nii", thresh=42, direction="above" + ) assert thresh.cmdline == cmdline.format("-uthr " + val) thresh.inputs.use_robust_range = True assert thresh.cmdline == cmdline.format("-uthrp " + val) @@ -143,7 +144,8 @@ def test_meanimage(create_files_in_directory_plus_output_type): # Test the auto naming meaner = fsl.MeanImage(in_file="a.nii") assert meaner.cmdline == "fslmaths a.nii -Tmean {}".format( - os.path.join(testdir, "a_mean{}".format(out_ext))) + os.path.join(testdir, "a_mean{}".format(out_ext)) + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -166,9 +168,10 @@ def test_stdimage(create_files_in_directory_plus_output_type): assert stder.cmdline == cmdline.format(dim) # Test the auto naming - stder = fsl.StdImage(in_file="a.nii", output_type='NIFTI') + stder = fsl.StdImage(in_file="a.nii", output_type="NIFTI") assert stder.cmdline == "fslmaths a.nii -Tstd {}".format( - os.path.join(testdir, "a_std.nii")) + os.path.join(testdir, "a_std.nii") + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -193,7 +196,8 @@ def test_maximage(create_files_in_directory_plus_output_type): # Test the auto naming maxer = fsl.MaxImage(in_file="a.nii") assert maxer.cmdline == "fslmaths a.nii -Tmax {}".format( - os.path.join(testdir, "a_max{}".format(out_ext))) + os.path.join(testdir, "a_max{}".format(out_ext)) + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -212,19 +216,18 @@ def test_smooth(create_files_in_directory_plus_output_type): # Test smoothing kernels cmdline = "fslmaths a.nii -s {:.5f} b.nii" - for val in [0, 1., 1, 25, 0.5, 8 / 3.]: - smoother = fsl.IsotropicSmooth( - in_file="a.nii", out_file="b.nii", sigma=val) + for val in [0, 1.0, 1, 25, 0.5, 8 / 3.0]: + smoother = fsl.IsotropicSmooth(in_file="a.nii", out_file="b.nii", sigma=val) assert smoother.cmdline == cmdline.format(val) - smoother = fsl.IsotropicSmooth( - in_file="a.nii", out_file="b.nii", fwhm=val) + smoother = fsl.IsotropicSmooth(in_file="a.nii", out_file="b.nii", fwhm=val) val = float(val) / np.sqrt(8 * np.log(2)) assert smoother.cmdline == cmdline.format(val) # Test automatic naming smoother = fsl.IsotropicSmooth(in_file="a.nii", sigma=5) assert smoother.cmdline == "fslmaths a.nii -s {:.5f} {}".format( - 5, os.path.join(testdir, "a_smooth{}".format(out_ext))) + 5, os.path.join(testdir, "a_smooth{}".format(out_ext)) + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -248,7 +251,8 @@ def test_mask(create_files_in_directory_plus_output_type): # Test auto name generation masker = fsl.ApplyMask(in_file="a.nii", mask_file="b.nii") assert masker.cmdline == "fslmaths a.nii -mas b.nii " + os.path.join( - testdir, "a_masked{}".format(out_ext)) + testdir, "a_masked{}".format(out_ext) + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -276,8 +280,10 @@ def test_dilation(create_files_in_directory_plus_output_type): for size in [1, 1.5, 5]: diller.inputs.kernel_shape = k diller.inputs.kernel_size = size - assert diller.cmdline == "fslmaths a.nii -kernel {} {:.4f} -dilF b.nii".format( - k, size) + assert ( + diller.cmdline + == "fslmaths a.nii -kernel {} {:.4f} -dilF b.nii".format(k, size) + ) # Test that we can use a file kernel f = open("kernel.txt", "w").close() @@ -290,7 +296,8 @@ def test_dilation(create_files_in_directory_plus_output_type): # Test that we don't need to request an out name dil = fsl.DilateImage(in_file="a.nii", operation="max") assert dil.cmdline == "fslmaths a.nii -dilF {}".format( - os.path.join(testdir, "a_dil{}".format(out_ext))) + os.path.join(testdir, "a_dil{}".format(out_ext)) + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -313,7 +320,8 @@ def test_erosion(create_files_in_directory_plus_output_type): # Test that we don't need to request an out name erode = fsl.ErodeImage(in_file="a.nii") assert erode.cmdline == "fslmaths a.nii -ero {}".format( - os.path.join(testdir, "a_ero{}".format(out_ext))) + os.path.join(testdir, "a_ero{}".format(out_ext)) + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -338,7 +346,8 @@ def test_spatial_filter(create_files_in_directory_plus_output_type): # Test that we don't need to ask for an out name filter = fsl.SpatialFilter(in_file="a.nii", operation="mean") assert filter.cmdline == "fslmaths a.nii -fmean {}".format( - os.path.join(testdir, "a_filt{}".format(out_ext))) + os.path.join(testdir, "a_filt{}".format(out_ext)) + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -356,10 +365,7 @@ def test_unarymaths(create_files_in_directory_plus_output_type): maths.run() # Test the different operations - ops = [ - "exp", "log", "sin", "cos", "sqr", "sqrt", "recip", "abs", "bin", - "index" - ] + ops = ["exp", "log", "sin", "cos", "sqr", "sqrt", "recip", "abs", "bin", "index"] for op in ops: maths.inputs.operation = op assert maths.cmdline == "fslmaths a.nii -{} b.nii".format(op) @@ -368,7 +374,8 @@ def test_unarymaths(create_files_in_directory_plus_output_type): for op in ops: maths = fsl.UnaryMaths(in_file="a.nii", operation=op) assert maths.cmdline == "fslmaths a.nii -{} {}".format( - op, os.path.join(testdir, "a_{}{}".format(op, out_ext))) + op, os.path.join(testdir, "a_{}{}".format(op, out_ext)) + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -387,26 +394,25 @@ def test_binarymaths(create_files_in_directory_plus_output_type): # Test the different operations ops = ["add", "sub", "mul", "div", "rem", "min", "max"] - operands = ["b.nii", -2, -0.5, 0, .123456, np.pi, 500] + operands = ["b.nii", -2, -0.5, 0, 0.123456, np.pi, 500] for op in ops: for ent in operands: - maths = fsl.BinaryMaths( - in_file="a.nii", out_file="c.nii", operation=op) + maths = fsl.BinaryMaths(in_file="a.nii", out_file="c.nii", operation=op) if ent == "b.nii": maths.inputs.operand_file = ent - assert maths.cmdline == "fslmaths a.nii -{} b.nii c.nii".format( - op) + assert maths.cmdline == "fslmaths a.nii -{} b.nii c.nii".format(op) else: maths.inputs.operand_value = ent assert maths.cmdline == "fslmaths a.nii -{} {:.8f} c.nii".format( - op, ent) + op, ent + ) # Test that we don't need to ask for an out file for op in ops: - maths = fsl.BinaryMaths( - in_file="a.nii", operation=op, operand_file="b.nii") + maths = fsl.BinaryMaths(in_file="a.nii", operation=op, operand_file="b.nii") assert maths.cmdline == "fslmaths a.nii -{} b.nii {}".format( - op, os.path.join(testdir, "a_maths{}".format(out_ext))) + op, os.path.join(testdir, "a_maths{}".format(out_ext)) + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -425,19 +431,18 @@ def test_multimaths(create_files_in_directory_plus_output_type): # Test a few operations maths.inputs.operand_files = ["a.nii", "b.nii"] - opstrings = [ - "-add %s -div %s", "-max 1 -sub %s -min %s", "-mas %s -add %s" - ] + opstrings = ["-add %s -div %s", "-max 1 -sub %s -min %s", "-mas %s -add %s"] for ostr in opstrings: maths.inputs.op_string = ostr - assert maths.cmdline == "fslmaths a.nii %s c.nii" % ostr % ("a.nii", - "b.nii") + assert maths.cmdline == "fslmaths a.nii %s c.nii" % ostr % ("a.nii", "b.nii") # Test that we don't need to ask for an out file maths = fsl.MultiImageMaths( - in_file="a.nii", op_string="-add %s -mul 5", operand_files=["b.nii"]) - assert maths.cmdline == \ - "fslmaths a.nii -add b.nii -mul 5 %s" % os.path.join(testdir, "a_maths%s" % out_ext) + in_file="a.nii", op_string="-add %s -mul 5", operand_files=["b.nii"] + ) + assert maths.cmdline == "fslmaths a.nii -add b.nii -mul 5 %s" % os.path.join( + testdir, "a_maths%s" % out_ext + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -459,9 +464,11 @@ def test_tempfilt(create_files_in_directory_plus_output_type): filt.inputs.highpass_sigma = win[0] filt.inputs.lowpass_sigma = win[1] assert filt.cmdline == "fslmaths a.nii -bptf {:.6f} {:.6f} b.nii".format( - win[0], win[1]) + win[0], win[1] + ) # Test that we don't need to ask for an out file filt = fsl.TemporalFilter(in_file="a.nii", highpass_sigma=64) - assert filt.cmdline == \ - "fslmaths a.nii -bptf 64.000000 -1.000000 {}".format(os.path.join(testdir, "a_filt{}".format(out_ext))) + assert filt.cmdline == "fslmaths a.nii -bptf 64.000000 -1.000000 {}".format( + os.path.join(testdir, "a_filt{}".format(out_ext)) + ) diff --git a/nipype/interfaces/fsl/tests/test_model.py b/nipype/interfaces/fsl/tests/test_model.py index c5c2f5bf35..ea86d8f628 100644 --- a/nipype/interfaces/fsl/tests/test_model.py +++ b/nipype/interfaces/fsl/tests/test_model.py @@ -13,15 +13,17 @@ def test_MultipleRegressDesign(tmpdir): tmpdir.chdir() foo = fsl.MultipleRegressDesign() foo.inputs.regressors = dict( - voice_stenght=[1, 1, 1], age=[0.2, 0.4, 0.5], BMI=[1, -1, 2]) - con1 = ['voice_and_age', 'T', ['age', 'voice_stenght'], [0.5, 0.5]] - con2 = ['just_BMI', 'T', ['BMI'], [1]] - foo.inputs.contrasts = [con1, con2, ['con3', 'F', [con1, con2]]] + voice_stenght=[1, 1, 1], age=[0.2, 0.4, 0.5], BMI=[1, -1, 2] + ) + con1 = ["voice_and_age", "T", ["age", "voice_stenght"], [0.5, 0.5]] + con2 = ["just_BMI", "T", ["BMI"], [1]] + foo.inputs.contrasts = [con1, con2, ["con3", "F", [con1, con2]]] res = foo.run() for ii in ["mat", "con", "fts", "grp"]: - assert getattr(res.outputs, - "design_" + ii) == tmpdir.join('design.' + ii).strpath + assert ( + getattr(res.outputs, "design_" + ii) == tmpdir.join("design." + ii).strpath + ) design_mat_expected_content = """/NumWaves 3 /NumPoints 3 @@ -61,5 +63,6 @@ def test_MultipleRegressDesign(tmpdir): 1 """ for ii in ["mat", "con", "fts", "grp"]: - assert tmpdir.join('design.' + ii).read() == eval( - "design_" + ii + "_expected_content") + assert tmpdir.join("design." + ii).read() == eval( + "design_" + ii + "_expected_content" + ) diff --git a/nipype/interfaces/fsl/tests/test_preprocess.py b/nipype/interfaces/fsl/tests/test_preprocess.py index 5caffae1b1..438f3f0ec4 100644 --- a/nipype/interfaces/fsl/tests/test_preprocess.py +++ b/nipype/interfaces/fsl/tests/test_preprocess.py @@ -23,7 +23,7 @@ def fsl_name(obj, fname): @pytest.fixture() def setup_infile(tmpdir): ext = Info.output_type_to_ext(Info.output_type()) - tmp_infile = tmpdir.join('foo' + ext) + tmp_infile = tmpdir.join("foo" + ext) tmp_infile.open("w") return (tmp_infile.strpath, tmpdir.strpath) @@ -32,7 +32,7 @@ def setup_infile(tmpdir): def test_bet(setup_infile): tmp_infile, tp_dir = setup_infile better = fsl.BET() - assert better.cmd == 'bet' + assert better.cmd == "bet" # Test raising error with mandatory args absent with pytest.raises(ValueError): @@ -40,19 +40,19 @@ def test_bet(setup_infile): # Test generated outfile name better.inputs.in_file = tmp_infile - outfile = fsl_name(better, 'foo_brain') + outfile = fsl_name(better, "foo_brain") outpath = os.path.join(os.getcwd(), outfile) - realcmd = 'bet %s %s' % (tmp_infile, outpath) + realcmd = "bet %s %s" % (tmp_infile, outpath) assert better.cmdline == realcmd # Test specified outfile name - outfile = fsl_name(better, '/newdata/bar') + outfile = fsl_name(better, "/newdata/bar") better.inputs.out_file = outfile - realcmd = 'bet %s %s' % (tmp_infile, outfile) + realcmd = "bet %s %s" % (tmp_infile, outfile) assert better.cmdline == realcmd # infile foo.nii doesn't exist def func(): - better.run(in_file='foo2.nii', out_file='bar.nii') + better.run(in_file="foo2.nii", out_file="bar.nii") with pytest.raises(TraitError): func() @@ -60,17 +60,17 @@ def func(): # Our options and some test values for them # Should parallel the opt_map structure in the class for clarity opt_map = { - 'outline': ('-o', True), - 'mask': ('-m', True), - 'skull': ('-s', True), - 'no_output': ('-n', True), - 'frac': ('-f 0.40', 0.4), - 'vertical_gradient': ('-g 0.75', 0.75), - 'radius': ('-r 20', 20), - 'center': ('-c 54 75 80', [54, 75, 80]), - 'threshold': ('-t', True), - 'mesh': ('-e', True), - 'surfaces': ('-A', True) + "outline": ("-o", True), + "mask": ("-m", True), + "skull": ("-s", True), + "no_output": ("-n", True), + "frac": ("-f 0.40", 0.4), + "vertical_gradient": ("-g 0.75", 0.75), + "radius": ("-r 20", 20), + "center": ("-c 54 75 80", [54, 75, 80]), + "threshold": ("-t", True), + "mesh": ("-e", True), + "surfaces": ("-A", True) # 'verbose': ('-v', True), # 'flags': ('--i-made-this-up', '--i-made-this-up'), } @@ -78,13 +78,13 @@ def func(): # test each of our arguments better = fsl.BET() - outfile = fsl_name(better, 'foo_brain') + outfile = fsl_name(better, "foo_brain") outpath = os.path.join(os.getcwd(), outfile) for name, settings in list(opt_map.items()): better = fsl.BET(**{name: settings[1]}) # Add mandatory input better.inputs.in_file = tmp_infile - realcmd = ' '.join([better.cmd, tmp_infile, outpath, settings[0]]) + realcmd = " ".join([better.cmd, tmp_infile, outpath, settings[0]]) assert better.cmdline == realcmd @@ -99,62 +99,61 @@ def test_fast(setup_infile): fasted = fsl.FAST(in_files=tmp_infile, verbose=True) fasted2 = fsl.FAST(in_files=[tmp_infile, tmp_infile], verbose=True) - assert faster.cmd == 'fast' + assert faster.cmd == "fast" assert faster.inputs.verbose assert faster.inputs.manual_seg == Undefined assert faster.inputs != fasted.inputs - assert fasted.cmdline == 'fast -v -S 1 %s' % (tmp_infile) - assert fasted2.cmdline == 'fast -v -S 2 %s %s' % (tmp_infile, tmp_infile) + assert fasted.cmdline == "fast -v -S 1 %s" % (tmp_infile) + assert fasted2.cmdline == "fast -v -S 2 %s %s" % (tmp_infile, tmp_infile) faster = fsl.FAST() faster.inputs.in_files = tmp_infile - assert faster.cmdline == 'fast -S 1 %s' % (tmp_infile) + assert faster.cmdline == "fast -S 1 %s" % (tmp_infile) faster.inputs.in_files = [tmp_infile, tmp_infile] - assert faster.cmdline == 'fast -S 2 %s %s' % (tmp_infile, tmp_infile) + assert faster.cmdline == "fast -S 2 %s %s" % (tmp_infile, tmp_infile) # Our options and some test values for them # Should parallel the opt_map structure in the class for clarity opt_map = { - 'number_classes': ('-n 4', 4), - 'bias_iters': ('-I 5', 5), - 'bias_lowpass': ('-l 15', 15), - 'img_type': ('-t 2', 2), - 'init_seg_smooth': ('-f 0.035', 0.035), - 'segments': ('-g', True), - 'init_transform': ('-a %s' % (tmp_infile), '%s' % (tmp_infile)), - 'other_priors': - ('-A %s %s %s' % (tmp_infile, tmp_infile, tmp_infile), - (['%s' % (tmp_infile), - '%s' % (tmp_infile), - '%s' % (tmp_infile)])), - 'no_pve': ('--nopve', True), - 'output_biasfield': ('-b', True), - 'output_biascorrected': ('-B', True), - 'no_bias': ('-N', True), - 'out_basename': ('-o fasted', 'fasted'), - 'use_priors': ('-P', True), - 'segment_iters': ('-W 14', 14), - 'mixel_smooth': ('-R 0.25', 0.25), - 'iters_afterbias': ('-O 3', 3), - 'hyper': ('-H 0.15', 0.15), - 'verbose': ('-v', True), - 'manual_seg': ('-s %s' % (tmp_infile), '%s' % (tmp_infile)), - 'probability_maps': ('-p', True), + "number_classes": ("-n 4", 4), + "bias_iters": ("-I 5", 5), + "bias_lowpass": ("-l 15", 15), + "img_type": ("-t 2", 2), + "init_seg_smooth": ("-f 0.035", 0.035), + "segments": ("-g", True), + "init_transform": ("-a %s" % (tmp_infile), "%s" % (tmp_infile)), + "other_priors": ( + "-A %s %s %s" % (tmp_infile, tmp_infile, tmp_infile), + (["%s" % (tmp_infile), "%s" % (tmp_infile), "%s" % (tmp_infile)]), + ), + "no_pve": ("--nopve", True), + "output_biasfield": ("-b", True), + "output_biascorrected": ("-B", True), + "no_bias": ("-N", True), + "out_basename": ("-o fasted", "fasted"), + "use_priors": ("-P", True), + "segment_iters": ("-W 14", 14), + "mixel_smooth": ("-R 0.25", 0.25), + "iters_afterbias": ("-O 3", 3), + "hyper": ("-H 0.15", 0.15), + "verbose": ("-v", True), + "manual_seg": ("-s %s" % (tmp_infile), "%s" % (tmp_infile)), + "probability_maps": ("-p", True), } # test each of our arguments for name, settings in list(opt_map.items()): faster = fsl.FAST(in_files=tmp_infile, **{name: settings[1]}) - assert faster.cmdline == ' '.join( - [faster.cmd, settings[0], - "-S 1 %s" % tmp_infile]) + assert faster.cmdline == " ".join( + [faster.cmd, settings[0], "-S 1 %s" % tmp_infile] + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") def test_fast_list_outputs(setup_infile, tmpdir): - ''' By default (no -o), FSL's fast command outputs files into the same + """ By default (no -o), FSL's fast command outputs files into the same directory as the input files. If the flag -o is set, it outputs files into - the cwd ''' + the cwd """ def _run_and_test(opts, output_base): outputs = fsl.FAST(**opts)._list_outputs() @@ -162,21 +161,22 @@ def _run_and_test(opts, output_base): if output: for filename in ensure_list(output): assert os.path.realpath(filename).startswith( - os.path.realpath(output_base)) + os.path.realpath(output_base) + ) # set up tmp_infile, indir = setup_infile cwd = tmpdir.mkdir("new") cwd.chdir() assert indir != cwd.strpath - out_basename = 'a_basename' + out_basename = "a_basename" # run and test - opts = {'in_files': tmp_infile} + opts = {"in_files": tmp_infile} input_path, input_filename, input_ext = split_filename(tmp_infile) _run_and_test(opts, os.path.join(input_path, input_filename)) - opts['out_basename'] = out_basename + opts["out_basename"] = out_basename _run_and_test(opts, os.path.join(cwd.strpath, out_basename)) @@ -196,32 +196,36 @@ def test_flirt(setup_flirt): tmpdir, infile, reffile = setup_flirt flirter = fsl.FLIRT() - assert flirter.cmd == 'flirt' + assert flirter.cmd == "flirt" flirter.inputs.bins = 256 - flirter.inputs.cost = 'mutualinfo' + flirter.inputs.cost = "mutualinfo" flirted = fsl.FLIRT( in_file=infile, reference=reffile, - out_file='outfile', - out_matrix_file='outmat.mat', + out_file="outfile", + out_matrix_file="outmat.mat", bins=256, - cost='mutualinfo') + cost="mutualinfo", + ) flirt_est = fsl.FLIRT( in_file=infile, reference=reffile, - out_matrix_file='outmat.mat', + out_matrix_file="outmat.mat", bins=256, - cost='mutualinfo') + cost="mutualinfo", + ) assert flirter.inputs != flirted.inputs assert flirted.inputs != flirt_est.inputs assert flirter.inputs.bins == flirted.inputs.bins assert flirter.inputs.cost == flirt_est.inputs.cost - realcmd = 'flirt -in %s -ref %s -out outfile -omat outmat.mat ' \ - '-bins 256 -cost mutualinfo' % (infile, reffile) + realcmd = ( + "flirt -in %s -ref %s -out outfile -omat outmat.mat " + "-bins 256 -cost mutualinfo" % (infile, reffile) + ) assert flirted.cmdline == realcmd flirter = fsl.FLIRT() @@ -236,10 +240,14 @@ def test_flirt(setup_flirt): # Generate outfile and outmatrix pth, fname, ext = split_filename(infile) - outfile = fsl_name(flirter, '%s_flirt' % fname) - outmat = '%s_flirt.mat' % fname - realcmd = 'flirt -in %s -ref %s -out %s -omat %s' % (infile, reffile, - outfile, outmat) + outfile = fsl_name(flirter, "%s_flirt" % fname) + outmat = "%s_flirt.mat" % fname + realcmd = "flirt -in %s -ref %s -out %s -omat %s" % ( + infile, + reffile, + outfile, + outmat, + ) assert flirter.cmdline == realcmd # test apply_xfm option @@ -251,10 +259,10 @@ def test_flirt(setup_flirt): axfm2 = deepcopy(axfm) # test uses_qform axfm.inputs.uses_qform = True - assert axfm.cmdline == (realcmd + ' -applyxfm -usesqform') + assert axfm.cmdline == (realcmd + " -applyxfm -usesqform") # test in_matrix_file axfm2.inputs.in_matrix_file = reffile - assert axfm2.cmdline == (realcmd + ' -applyxfm -init %s' % reffile) + assert axfm2.cmdline == (realcmd + " -applyxfm -init %s" % reffile) tmpfile = tmpdir.join("file4test.nii") tmpfile.open("w") @@ -262,39 +270,49 @@ def test_flirt(setup_flirt): # cmdline is updated correctly. for key, trait_spec in sorted(fsl.FLIRT.input_spec().traits().items()): # Skip mandatory inputs and the trait methods - if key in ('trait_added', 'trait_modified', 'in_file', 'reference', - 'environ', 'output_type', 'out_file', 'out_matrix_file', - 'in_matrix_file', 'apply_xfm', - 'resource_monitor', 'out_log', - 'save_log'): + if key in ( + "trait_added", + "trait_modified", + "in_file", + "reference", + "environ", + "output_type", + "out_file", + "out_matrix_file", + "in_matrix_file", + "apply_xfm", + "resource_monitor", + "out_log", + "save_log", + ): continue param = None value = None - if key == 'args': - param = '-v' - value = '-v' + if key == "args": + param = "-v" + value = "-v" elif isinstance(trait_spec.trait_type, File): value = tmpfile.strpath param = trait_spec.argstr % value elif trait_spec.default is False: param = trait_spec.argstr value = True - elif key in ('searchr_x', 'searchr_y', 'searchr_z'): + elif key in ("searchr_x", "searchr_y", "searchr_z"): value = [-45, 45] - param = trait_spec.argstr % ' '.join(str(elt) for elt in value) + param = trait_spec.argstr % " ".join(str(elt) for elt in value) else: value = trait_spec.default param = trait_spec.argstr % value - cmdline = 'flirt -in %s -ref %s' % (infile, reffile) + cmdline = "flirt -in %s -ref %s" % (infile, reffile) # Handle autogeneration of outfile pth, fname, ext = split_filename(infile) - outfile = fsl_name(fsl.FLIRT(), '%s_flirt' % fname) - outfile = ' '.join(['-out', outfile]) + outfile = fsl_name(fsl.FLIRT(), "%s_flirt" % fname) + outfile = " ".join(["-out", outfile]) # Handle autogeneration of outmatrix - outmatrix = '%s_flirt.mat' % fname - outmatrix = ' '.join(['-omat', outmatrix]) + outmatrix = "%s_flirt.mat" % fname + outmatrix = " ".join(["-omat", outmatrix]) # Build command line - cmdline = ' '.join([cmdline, outfile, outmatrix, param]) + cmdline = " ".join([cmdline, outfile, outmatrix, param]) flirter = fsl.FLIRT(in_file=infile, reference=reffile) setattr(flirter.inputs, key, value) assert flirter.cmdline == cmdline @@ -302,13 +320,13 @@ def test_flirt(setup_flirt): # Test OutputSpec flirter = fsl.FLIRT(in_file=infile, reference=reffile) pth, fname, ext = split_filename(infile) - flirter.inputs.out_file = ''.join(['foo', ext]) - flirter.inputs.out_matrix_file = ''.join(['bar', ext]) + flirter.inputs.out_file = "".join(["foo", ext]) + flirter.inputs.out_matrix_file = "".join(["bar", ext]) outs = flirter._list_outputs() - assert outs['out_file'] == \ - os.path.join(os.getcwd(), flirter.inputs.out_file) - assert outs['out_matrix_file'] == \ - os.path.join(os.getcwd(), flirter.inputs.out_matrix_file) + assert outs["out_file"] == os.path.join(os.getcwd(), flirter.inputs.out_file) + assert outs["out_matrix_file"] == os.path.join( + os.getcwd(), flirter.inputs.out_matrix_file + ) assert not isdefined(flirter.inputs.out_log) @@ -318,19 +336,19 @@ def test_mcflirt(setup_flirt): tmpdir, infile, reffile = setup_flirt frt = fsl.MCFLIRT() - assert frt.cmd == 'mcflirt' + assert frt.cmd == "mcflirt" # Test generated outfile name frt.inputs.in_file = infile _, nme = os.path.split(infile) outfile = os.path.join(os.getcwd(), nme) - outfile = frt._gen_fname(outfile, suffix='_mcf') - realcmd = 'mcflirt -in ' + infile + ' -out ' + outfile + outfile = frt._gen_fname(outfile, suffix="_mcf") + realcmd = "mcflirt -in " + infile + " -out " + outfile assert frt.cmdline == realcmd # Test specified outfile name - outfile2 = '/newdata/bar.nii' + outfile2 = "/newdata/bar.nii" frt.inputs.out_file = outfile2 - realcmd = 'mcflirt -in ' + infile + ' -out ' + outfile2 + realcmd = "mcflirt -in " + infile + " -out " + outfile2 assert frt.cmdline == realcmd @@ -340,36 +358,34 @@ def test_mcflirt_opt(setup_flirt): _, nme = os.path.split(infile) opt_map = { - 'cost': ('-cost mutualinfo', 'mutualinfo'), - 'bins': ('-bins 256', 256), - 'dof': ('-dof 6', 6), - 'ref_vol': ('-refvol 2', 2), - 'scaling': ('-scaling 6.00', 6.00), - 'smooth': ('-smooth 1.00', 1.00), - 'rotation': ('-rotation 2', 2), - 'stages': ('-stages 3', 3), - 'init': ('-init %s' % (infile), infile), - 'use_gradient': ('-gdt', True), - 'use_contour': ('-edge', True), - 'mean_vol': ('-meanvol', True), - 'stats_imgs': ('-stats', True), - 'save_mats': ('-mats', True), - 'save_plots': ('-plots', True), + "cost": ("-cost mutualinfo", "mutualinfo"), + "bins": ("-bins 256", 256), + "dof": ("-dof 6", 6), + "ref_vol": ("-refvol 2", 2), + "scaling": ("-scaling 6.00", 6.00), + "smooth": ("-smooth 1.00", 1.00), + "rotation": ("-rotation 2", 2), + "stages": ("-stages 3", 3), + "init": ("-init %s" % (infile), infile), + "use_gradient": ("-gdt", True), + "use_contour": ("-edge", True), + "mean_vol": ("-meanvol", True), + "stats_imgs": ("-stats", True), + "save_mats": ("-mats", True), + "save_plots": ("-plots", True), } for name, settings in list(opt_map.items()): fnt = fsl.MCFLIRT(in_file=infile, **{name: settings[1]}) outfile = os.path.join(os.getcwd(), nme) - outfile = fnt._gen_fname(outfile, suffix='_mcf') + outfile = fnt._gen_fname(outfile, suffix="_mcf") - instr = '-in %s' % (infile) - outstr = '-out %s' % (outfile) - if name in ('init', 'cost', 'dof', 'mean_vol', 'bins'): - assert fnt.cmdline == ' '.join( - [fnt.cmd, instr, settings[0], outstr]) + instr = "-in %s" % (infile) + outstr = "-out %s" % (outfile) + if name in ("init", "cost", "dof", "mean_vol", "bins"): + assert fnt.cmdline == " ".join([fnt.cmd, instr, settings[0], outstr]) else: - assert fnt.cmdline == ' '.join( - [fnt.cmd, instr, outstr, settings[0]]) + assert fnt.cmdline == " ".join([fnt.cmd, instr, outstr, settings[0]]) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -378,8 +394,7 @@ def test_mcflirt_noinput(): fnt = fsl.MCFLIRT() with pytest.raises(ValueError) as excinfo: fnt.run() - assert str(excinfo.value).startswith( - "MCFLIRT requires a value for input 'in_file'") + assert str(excinfo.value).startswith("MCFLIRT requires a value for input 'in_file'") # test fnirt @@ -391,50 +406,58 @@ def test_fnirt(setup_flirt): tmpdir, infile, reffile = setup_flirt tmpdir.chdir() fnirt = fsl.FNIRT() - assert fnirt.cmd == 'fnirt' + assert fnirt.cmd == "fnirt" # Test list parameters - params = [('subsampling_scheme', '--subsamp', [4, 2, 2, 1], - '4,2,2,1'), ('max_nonlin_iter', '--miter', [4, 4, 4, 2], - '4,4,4,2'), ('ref_fwhm', '--reffwhm', [4, 2, 2, 0], - '4,2,2,0'), ('in_fwhm', '--infwhm', - [4, 2, 2, 0], '4,2,2,0'), - ('apply_refmask', '--applyrefmask', [0, 0, 1, 1], - '0,0,1,1'), ('apply_inmask', '--applyinmask', [0, 0, 0, 1], - '0,0,0,1'), ('regularization_lambda', '--lambda', - [0.5, 0.75], '0.5,0.75'), - ('intensity_mapping_model', '--intmod', 'global_non_linear', - 'global_non_linear')] + params = [ + ("subsampling_scheme", "--subsamp", [4, 2, 2, 1], "4,2,2,1"), + ("max_nonlin_iter", "--miter", [4, 4, 4, 2], "4,4,4,2"), + ("ref_fwhm", "--reffwhm", [4, 2, 2, 0], "4,2,2,0"), + ("in_fwhm", "--infwhm", [4, 2, 2, 0], "4,2,2,0"), + ("apply_refmask", "--applyrefmask", [0, 0, 1, 1], "0,0,1,1"), + ("apply_inmask", "--applyinmask", [0, 0, 0, 1], "0,0,0,1"), + ("regularization_lambda", "--lambda", [0.5, 0.75], "0.5,0.75"), + ( + "intensity_mapping_model", + "--intmod", + "global_non_linear", + "global_non_linear", + ), + ] for item, flag, val, strval in params: fnirt = fsl.FNIRT(in_file=infile, ref_file=reffile, **{item: val}) - log = fnirt._gen_fname(infile, suffix='_log.txt', change_ext=False) - iout = fnirt._gen_fname(infile, suffix='_warped') - if item in ('max_nonlin_iter'): - cmd = 'fnirt --in=%s '\ - '--logout=%s'\ - ' %s=%s --ref=%s'\ - ' --iout=%s' % (infile, log, - flag, strval, reffile, iout) - elif item in ('in_fwhm', 'intensity_mapping_model'): - cmd = 'fnirt --in=%s %s=%s --logout=%s '\ - '--ref=%s --iout=%s' % (infile, flag, - strval, log, reffile, iout) - elif item.startswith('apply'): - cmd = 'fnirt %s=%s '\ - '--in=%s '\ - '--logout=%s '\ - '--ref=%s --iout=%s' % (flag, strval, - infile, log, - reffile, - iout) + log = fnirt._gen_fname(infile, suffix="_log.txt", change_ext=False) + iout = fnirt._gen_fname(infile, suffix="_warped") + if item in ("max_nonlin_iter"): + cmd = ( + "fnirt --in=%s " + "--logout=%s" + " %s=%s --ref=%s" + " --iout=%s" % (infile, log, flag, strval, reffile, iout) + ) + elif item in ("in_fwhm", "intensity_mapping_model"): + cmd = "fnirt --in=%s %s=%s --logout=%s " "--ref=%s --iout=%s" % ( + infile, + flag, + strval, + log, + reffile, + iout, + ) + elif item.startswith("apply"): + cmd = ( + "fnirt %s=%s " + "--in=%s " + "--logout=%s " + "--ref=%s --iout=%s" % (flag, strval, infile, log, reffile, iout) + ) else: - cmd = 'fnirt '\ - '--in=%s --logout=%s '\ - '--ref=%s %s=%s --iout=%s' % (infile, log, - reffile, - flag, strval, - iout) + cmd = ( + "fnirt " + "--in=%s --logout=%s " + "--ref=%s %s=%s --iout=%s" % (infile, log, reffile, flag, strval, iout) + ) assert fnirt.cmdline == cmd # Test ValueError is raised when missing mandatory args @@ -443,83 +466,84 @@ def test_fnirt(setup_flirt): fnirt.run() fnirt.inputs.in_file = infile fnirt.inputs.ref_file = reffile - intmap_basename = '%s_intmap' % fsl.FNIRT.intensitymap_file_basename( - infile) + intmap_basename = "%s_intmap" % fsl.FNIRT.intensitymap_file_basename(infile) intmap_image = fsl_name(fnirt, intmap_basename) - intmap_txt = '%s.txt' % intmap_basename + intmap_txt = "%s.txt" % intmap_basename # doing this to create the file to pass tests for file existence - with open(intmap_image, 'w'): + with open(intmap_image, "w"): pass - with open(intmap_txt, 'w'): + with open(intmap_txt, "w"): pass # test files - opt_map = [('affine_file', '--aff=%s' % infile, - infile), ('inwarp_file', '--inwarp=%s' % infile, infile), - ('in_intensitymap_file', '--intin=%s' % intmap_basename, - [intmap_image]), ('in_intensitymap_file', - '--intin=%s' % intmap_basename, - [intmap_image, intmap_txt]), - ('config_file', '--config=%s' % infile, - infile), ('refmask_file', '--refmask=%s' % infile, - infile), ('inmask_file', '--inmask=%s' % infile, - infile), ('field_file', - '--fout=%s' % infile, infile), - ('jacobian_file', '--jout=%s' % infile, - infile), ('modulatedref_file', '--refout=%s' % infile, - infile), ('out_intensitymap_file', - '--intout=%s' % intmap_basename, True), - ('out_intensitymap_file', '--intout=%s' % intmap_basename, - intmap_image), ('fieldcoeff_file', '--cout=%s' % infile, - infile), ('log_file', '--logout=%s' % infile, - infile)] + opt_map = [ + ("affine_file", "--aff=%s" % infile, infile), + ("inwarp_file", "--inwarp=%s" % infile, infile), + ("in_intensitymap_file", "--intin=%s" % intmap_basename, [intmap_image]), + ( + "in_intensitymap_file", + "--intin=%s" % intmap_basename, + [intmap_image, intmap_txt], + ), + ("config_file", "--config=%s" % infile, infile), + ("refmask_file", "--refmask=%s" % infile, infile), + ("inmask_file", "--inmask=%s" % infile, infile), + ("field_file", "--fout=%s" % infile, infile), + ("jacobian_file", "--jout=%s" % infile, infile), + ("modulatedref_file", "--refout=%s" % infile, infile), + ("out_intensitymap_file", "--intout=%s" % intmap_basename, True), + ("out_intensitymap_file", "--intout=%s" % intmap_basename, intmap_image), + ("fieldcoeff_file", "--cout=%s" % infile, infile), + ("log_file", "--logout=%s" % infile, infile), + ] for (name, settings, arg) in opt_map: fnirt = fsl.FNIRT(in_file=infile, ref_file=reffile, **{name: arg}) - if name in ('config_file', 'affine_file', 'field_file', - 'fieldcoeff_file'): - cmd = 'fnirt %s --in=%s '\ - '--logout=%s '\ - '--ref=%s --iout=%s' % (settings, infile, log, - reffile, iout) - elif name in ('refmask_file'): - cmd = 'fnirt --in=%s '\ - '--logout=%s --ref=%s '\ - '%s '\ - '--iout=%s' % (infile, log, - reffile, - settings, - iout) - elif name in ('in_intensitymap_file', 'inwarp_file', 'inmask_file', - 'jacobian_file'): - cmd = 'fnirt --in=%s '\ - '%s '\ - '--logout=%s --ref=%s '\ - '--iout=%s' % (infile, - settings, - log, - reffile, - iout) - elif name in ('log_file'): - cmd = 'fnirt --in=%s '\ - '%s --ref=%s '\ - '--iout=%s' % (infile, - settings, - reffile, - iout) + if name in ("config_file", "affine_file", "field_file", "fieldcoeff_file"): + cmd = ( + "fnirt %s --in=%s " + "--logout=%s " + "--ref=%s --iout=%s" % (settings, infile, log, reffile, iout) + ) + elif name in ("refmask_file"): + cmd = ( + "fnirt --in=%s " + "--logout=%s --ref=%s " + "%s " + "--iout=%s" % (infile, log, reffile, settings, iout) + ) + elif name in ( + "in_intensitymap_file", + "inwarp_file", + "inmask_file", + "jacobian_file", + ): + cmd = ( + "fnirt --in=%s " + "%s " + "--logout=%s --ref=%s " + "--iout=%s" % (infile, settings, log, reffile, iout) + ) + elif name in ("log_file"): + cmd = ( + "fnirt --in=%s " + "%s --ref=%s " + "--iout=%s" % (infile, settings, reffile, iout) + ) else: - cmd = 'fnirt --in=%s '\ - '--logout=%s %s '\ - '--ref=%s --iout=%s' % (infile, log, - settings, - reffile, iout) + cmd = ( + "fnirt --in=%s " + "--logout=%s %s " + "--ref=%s --iout=%s" % (infile, log, settings, reffile, iout) + ) assert fnirt.cmdline == cmd - if name == 'out_intensitymap_file': - assert fnirt._list_outputs()['out_intensitymap_file'] == [ - intmap_image, intmap_txt + if name == "out_intensitymap_file": + assert fnirt._list_outputs()["out_intensitymap_file"] == [ + intmap_image, + intmap_txt, ] @@ -527,32 +551,29 @@ def test_fnirt(setup_flirt): def test_applywarp(setup_flirt): tmpdir, infile, reffile = setup_flirt opt_map = { - 'out_file': ('--out=bar.nii', 'bar.nii'), - 'premat': ('--premat=%s' % (reffile), reffile), - 'postmat': ('--postmat=%s' % (reffile), reffile), + "out_file": ("--out=bar.nii", "bar.nii"), + "premat": ("--premat=%s" % (reffile), reffile), + "postmat": ("--postmat=%s" % (reffile), reffile), } # in_file, ref_file, field_file mandatory for name, settings in list(opt_map.items()): awarp = fsl.ApplyWarp( - in_file=infile, - ref_file=reffile, - field_file=reffile, - **{ - name: settings[1] - }) - if name == 'out_file': - realcmd = 'applywarp --in=%s '\ - '--ref=%s --out=%s '\ - '--warp=%s' % (infile, reffile, - settings[1], reffile) + in_file=infile, ref_file=reffile, field_file=reffile, **{name: settings[1]} + ) + if name == "out_file": + realcmd = ( + "applywarp --in=%s " + "--ref=%s --out=%s " + "--warp=%s" % (infile, reffile, settings[1], reffile) + ) else: - outfile = awarp._gen_fname(infile, suffix='_warp') - realcmd = 'applywarp --in=%s '\ - '--ref=%s --out=%s '\ - '--warp=%s %s' % (infile, reffile, - outfile, reffile, - settings[0]) + outfile = awarp._gen_fname(infile, suffix="_warp") + realcmd = ( + "applywarp --in=%s " + "--ref=%s --out=%s " + "--warp=%s %s" % (infile, reffile, outfile, reffile, settings[0]) + ) assert awarp.cmdline == realcmd @@ -563,32 +584,49 @@ def setup_fugue(tmpdir): import os.path as op d = np.ones((80, 80, 80)) - infile = tmpdir.join('dumbfile.nii.gz').strpath + infile = tmpdir.join("dumbfile.nii.gz").strpath nb.Nifti1Image(d, None, None).to_filename(infile) return (tmpdir, infile) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") -@pytest.mark.parametrize("attr, out_file", [({ - "save_unmasked_fmap": True, - "fmap_in_file": "infile", - "mask_file": "infile", - "output_type": "NIFTI_GZ" -}, 'fmap_out_file'), ({ - "save_unmasked_shift": True, - "fmap_in_file": "infile", - "dwell_time": 1.e-3, - "mask_file": "infile", - "output_type": "NIFTI_GZ" -}, "shift_out_file"), ({ - "in_file": "infile", - "mask_file": "infile", - "shift_in_file": "infile", - "output_type": "NIFTI_GZ" -}, 'unwarped_file')]) +@pytest.mark.parametrize( + "attr, out_file", + [ + ( + { + "save_unmasked_fmap": True, + "fmap_in_file": "infile", + "mask_file": "infile", + "output_type": "NIFTI_GZ", + }, + "fmap_out_file", + ), + ( + { + "save_unmasked_shift": True, + "fmap_in_file": "infile", + "dwell_time": 1.0e-3, + "mask_file": "infile", + "output_type": "NIFTI_GZ", + }, + "shift_out_file", + ), + ( + { + "in_file": "infile", + "mask_file": "infile", + "shift_in_file": "infile", + "output_type": "NIFTI_GZ", + }, + "unwarped_file", + ), + ], +) def test_fugue(setup_fugue, attr, out_file): import os.path as op + tmpdir, infile = setup_fugue fugue = fsl.FUGUE() @@ -601,26 +639,26 @@ def test_fugue(setup_fugue, attr, out_file): assert isdefined(getattr(res.outputs, out_file)) trait_spec = fugue.inputs.trait(out_file) - out_name = trait_spec.name_template % 'dumbfile' - out_name += '.nii.gz' + out_name = trait_spec.name_template % "dumbfile" + out_name += ".nii.gz" assert op.basename(getattr(res.outputs, out_file)) == out_name @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") def test_first_genfname(): first = fsl.FIRST() - first.inputs.out_file = 'segment.nii' + first.inputs.out_file = "segment.nii" first.inputs.output_type = "NIFTI_GZ" - value = first._gen_fname(basename='original_segmentations') - expected_value = os.path.abspath('segment_all_fast_origsegs.nii.gz') + value = first._gen_fname(basename="original_segmentations") + expected_value = os.path.abspath("segment_all_fast_origsegs.nii.gz") assert value == expected_value - first.inputs.method = 'none' - value = first._gen_fname(basename='original_segmentations') - expected_value = os.path.abspath('segment_all_none_origsegs.nii.gz') + first.inputs.method = "none" + value = first._gen_fname(basename="original_segmentations") + expected_value = os.path.abspath("segment_all_none_origsegs.nii.gz") assert value == expected_value - first.inputs.method = 'auto' - first.inputs.list_of_specific_structures = ['L_Hipp', 'R_Hipp'] - value = first._gen_fname(basename='original_segmentations') - expected_value = os.path.abspath('segment_all_none_origsegs.nii.gz') + first.inputs.method = "auto" + first.inputs.list_of_specific_structures = ["L_Hipp", "R_Hipp"] + value = first._gen_fname(basename="original_segmentations") + expected_value = os.path.abspath("segment_all_none_origsegs.nii.gz") assert value == expected_value diff --git a/nipype/interfaces/fsl/tests/test_utils.py b/nipype/interfaces/fsl/tests/test_utils.py index 5df6d88a49..ca52354dd4 100644 --- a/nipype/interfaces/fsl/tests/test_utils.py +++ b/nipype/interfaces/fsl/tests/test_utils.py @@ -21,7 +21,7 @@ def test_fslroi(create_files_in_directory_plus_output_type): roi = fsl.ExtractROI() # make sure command gets called - assert roi.cmd == 'fslroi' + assert roi.cmd == "fslroi" # test raising error with mandatory args absent with pytest.raises(ValueError): @@ -29,15 +29,15 @@ def test_fslroi(create_files_in_directory_plus_output_type): # .inputs based parameters setting roi.inputs.in_file = filelist[0] - roi.inputs.roi_file = 'foo_roi.nii' + roi.inputs.roi_file = "foo_roi.nii" roi.inputs.t_min = 10 roi.inputs.t_size = 20 - assert roi.cmdline == 'fslroi %s foo_roi.nii 10 20' % filelist[0] + assert roi.cmdline == "fslroi %s foo_roi.nii 10 20" % filelist[0] # .run based parameter setting roi2 = fsl.ExtractROI( in_file=filelist[0], - roi_file='foo2_roi.nii', + roi_file="foo2_roi.nii", t_min=20, t_size=40, x_min=3, @@ -45,9 +45,9 @@ def test_fslroi(create_files_in_directory_plus_output_type): y_min=40, y_size=10, z_min=5, - z_size=20) - assert roi2.cmdline == \ - 'fslroi %s foo2_roi.nii 3 30 40 10 5 20 20 40' % filelist[0] + z_size=20, + ) + assert roi2.cmdline == "fslroi %s foo2_roi.nii 3 30 40 10 5 20 20 40" % filelist[0] # test arguments for opt_map # Fslroi class doesn't have a filled opt_map{} @@ -60,7 +60,7 @@ def test_fslmerge(create_files_in_directory_plus_output_type): merger = fsl.Merge() # make sure command gets called - assert merger.cmd == 'fslmerge' + assert merger.cmd == "fslmerge" # test raising error with mandatory args absent with pytest.raises(ValueError): @@ -68,27 +68,31 @@ def test_fslmerge(create_files_in_directory_plus_output_type): # .inputs based parameters setting merger.inputs.in_files = filelist - merger.inputs.merged_file = 'foo_merged.nii' - merger.inputs.dimension = 't' - merger.inputs.output_type = 'NIFTI' - assert merger.cmdline == 'fslmerge -t foo_merged.nii %s' % ' '.join( - filelist) + merger.inputs.merged_file = "foo_merged.nii" + merger.inputs.dimension = "t" + merger.inputs.output_type = "NIFTI" + assert merger.cmdline == "fslmerge -t foo_merged.nii %s" % " ".join(filelist) # verify that providing a tr value updates the dimension to tr merger.inputs.tr = 2.25 - assert merger.cmdline == 'fslmerge -tr foo_merged.nii %s %.2f' % ( - ' '.join(filelist), 2.25) + assert merger.cmdline == "fslmerge -tr foo_merged.nii %s %.2f" % ( + " ".join(filelist), + 2.25, + ) # .run based parameter setting merger2 = fsl.Merge( in_files=filelist, - merged_file='foo_merged.nii', - dimension='t', - output_type='NIFTI', - tr=2.25) + merged_file="foo_merged.nii", + dimension="t", + output_type="NIFTI", + tr=2.25, + ) - assert merger2.cmdline == \ - 'fslmerge -tr foo_merged.nii %s %.2f' % (' '.join(filelist), 2.25) + assert merger2.cmdline == "fslmerge -tr foo_merged.nii %s %.2f" % ( + " ".join(filelist), + 2.25, + ) # test arguments for opt_map # Fslmerge class doesn't have a filled opt_map{} @@ -103,7 +107,7 @@ def test_fslmaths(create_files_in_directory_plus_output_type): math = fsl.ImageMaths() # make sure command gets called - assert math.cmd == 'fslmaths' + assert math.cmd == "fslmaths" # test raising error with mandatory args absent with pytest.raises(ValueError): @@ -111,15 +115,18 @@ def test_fslmaths(create_files_in_directory_plus_output_type): # .inputs based parameters setting math.inputs.in_file = filelist[0] - math.inputs.op_string = '-add 2.5 -mul input_volume2' - math.inputs.out_file = 'foo_math.nii' - assert math.cmdline == \ - 'fslmaths %s -add 2.5 -mul input_volume2 foo_math.nii' % filelist[0] + math.inputs.op_string = "-add 2.5 -mul input_volume2" + math.inputs.out_file = "foo_math.nii" + assert ( + math.cmdline + == "fslmaths %s -add 2.5 -mul input_volume2 foo_math.nii" % filelist[0] + ) # .run based parameter setting math2 = fsl.ImageMaths( - in_file=filelist[0], op_string='-add 2.5', out_file='foo2_math.nii') - assert math2.cmdline == 'fslmaths %s -add 2.5 foo2_math.nii' % filelist[0] + in_file=filelist[0], op_string="-add 2.5", out_file="foo2_math.nii" + ) + assert math2.cmdline == "fslmaths %s -add 2.5 foo2_math.nii" % filelist[0] # test arguments for opt_map # Fslmath class doesn't have opt_map{} @@ -134,7 +141,7 @@ def test_overlay(create_files_in_directory_plus_output_type): overlay = fsl.Overlay() # make sure command gets called - assert overlay.cmd == 'overlay' + assert overlay.cmd == "overlay" # test raising error with mandatory args absent with pytest.raises(ValueError): @@ -146,10 +153,12 @@ def test_overlay(create_files_in_directory_plus_output_type): overlay.inputs.background_image = filelist[1] overlay.inputs.auto_thresh_bg = True overlay.inputs.show_negative_stats = True - overlay.inputs.out_file = 'foo_overlay.nii' - assert overlay.cmdline == \ - 'overlay 1 0 %s -a %s 2.50 10.00 %s -2.50 -10.00 foo_overlay.nii' % ( - filelist[1], filelist[0], filelist[0]) + overlay.inputs.out_file = "foo_overlay.nii" + assert ( + overlay.cmdline + == "overlay 1 0 %s -a %s 2.50 10.00 %s -2.50 -10.00 foo_overlay.nii" + % (filelist[1], filelist[0], filelist[0]) + ) # .run based parameter setting overlay2 = fsl.Overlay( @@ -157,9 +166,12 @@ def test_overlay(create_files_in_directory_plus_output_type): stat_thresh=(2.5, 10), background_image=filelist[1], auto_thresh_bg=True, - out_file='foo2_overlay.nii') - assert overlay2.cmdline == 'overlay 1 0 %s -a %s 2.50 10.00 foo2_overlay.nii' % ( - filelist[1], filelist[0]) + out_file="foo2_overlay.nii", + ) + assert overlay2.cmdline == "overlay 1 0 %s -a %s 2.50 10.00 foo2_overlay.nii" % ( + filelist[1], + filelist[0], + ) # test slicer @@ -171,7 +183,7 @@ def test_slicer(create_files_in_directory_plus_output_type): slicer = fsl.Slicer() # make sure command gets called - assert slicer.cmd == 'slicer' + assert slicer.cmd == "slicer" # test raising error with mandatory args absent with pytest.raises(ValueError): @@ -180,27 +192,29 @@ def test_slicer(create_files_in_directory_plus_output_type): # .inputs based parameters setting slicer.inputs.in_file = filelist[0] slicer.inputs.image_edges = filelist[1] - slicer.inputs.intensity_range = (10., 20.) + slicer.inputs.intensity_range = (10.0, 20.0) slicer.inputs.all_axial = True slicer.inputs.image_width = 750 - slicer.inputs.out_file = 'foo_bar.png' - assert slicer.cmdline == \ - 'slicer %s %s -L -i 10.000 20.000 -A 750 foo_bar.png' % ( - filelist[0], filelist[1]) + slicer.inputs.out_file = "foo_bar.png" + assert slicer.cmdline == "slicer %s %s -L -i 10.000 20.000 -A 750 foo_bar.png" % ( + filelist[0], + filelist[1], + ) # .run based parameter setting slicer2 = fsl.Slicer( in_file=filelist[0], middle_slices=True, label_slices=False, - out_file='foo_bar2.png') - assert slicer2.cmdline == 'slicer %s -a foo_bar2.png' % (filelist[0]) + out_file="foo_bar2.png", + ) + assert slicer2.cmdline == "slicer %s -a foo_bar2.png" % (filelist[0]) def create_parfiles(): - np.savetxt('a.par', np.random.rand(6, 3)) - np.savetxt('b.par', np.random.rand(6, 3)) - return ['a.par', 'b.par'] + np.savetxt("a.par", np.random.rand(6, 3)) + np.savetxt("b.par", np.random.rand(6, 3)) + return ["a.par", "b.par"] # test fsl_tsplot @@ -213,7 +227,7 @@ def test_plottimeseries(create_files_in_directory_plus_output_type): plotter = fsl.PlotTimeSeries() # make sure command gets called - assert plotter.cmd == 'fsl_tsplot' + assert plotter.cmd == "fsl_tsplot" # test raising error with mandatory args absent with pytest.raises(ValueError): @@ -221,23 +235,24 @@ def test_plottimeseries(create_files_in_directory_plus_output_type): # .inputs based parameters setting plotter.inputs.in_file = parfiles[0] - plotter.inputs.labels = ['x', 'y', 'z'] + plotter.inputs.labels = ["x", "y", "z"] plotter.inputs.y_range = (0, 1) - plotter.inputs.title = 'test plot' - plotter.inputs.out_file = 'foo.png' - assert plotter.cmdline == \ - ('fsl_tsplot -i %s -a x,y,z -o foo.png -t \'test plot\' -u 1 --ymin=0 --ymax=1' - % parfiles[0]) + plotter.inputs.title = "test plot" + plotter.inputs.out_file = "foo.png" + assert plotter.cmdline == ( + "fsl_tsplot -i %s -a x,y,z -o foo.png -t 'test plot' -u 1 --ymin=0 --ymax=1" + % parfiles[0] + ) # .run based parameter setting plotter2 = fsl.PlotTimeSeries( - in_file=parfiles, - title='test2 plot', - plot_range=(2, 5), - out_file='bar.png') - assert plotter2.cmdline == \ - 'fsl_tsplot -i %s,%s -o bar.png --start=2 --finish=5 -t \'test2 plot\' -u 1' % tuple( - parfiles) + in_file=parfiles, title="test2 plot", plot_range=(2, 5), out_file="bar.png" + ) + assert ( + plotter2.cmdline + == "fsl_tsplot -i %s,%s -o bar.png --start=2 --finish=5 -t 'test2 plot' -u 1" + % tuple(parfiles) + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -247,7 +262,7 @@ def test_plotmotionparams(create_files_in_directory_plus_output_type): plotter = fsl.PlotMotionParams() # make sure command gets called - assert plotter.cmd == 'fsl_tsplot' + assert plotter.cmd == "fsl_tsplot" # test raising error with mandatory args absent with pytest.raises(ValueError): @@ -255,22 +270,25 @@ def test_plotmotionparams(create_files_in_directory_plus_output_type): # .inputs based parameters setting plotter.inputs.in_file = parfiles[0] - plotter.inputs.in_source = 'fsl' - plotter.inputs.plot_type = 'rotations' - plotter.inputs.out_file = 'foo.png' - assert plotter.cmdline == \ - ('fsl_tsplot -i %s -o foo.png -t \'MCFLIRT estimated rotations (radians)\' ' - '--start=1 --finish=3 -a x,y,z' % parfiles[0]) + plotter.inputs.in_source = "fsl" + plotter.inputs.plot_type = "rotations" + plotter.inputs.out_file = "foo.png" + assert plotter.cmdline == ( + "fsl_tsplot -i %s -o foo.png -t 'MCFLIRT estimated rotations (radians)' " + "--start=1 --finish=3 -a x,y,z" % parfiles[0] + ) # .run based parameter setting plotter2 = fsl.PlotMotionParams( in_file=parfiles[1], - in_source='spm', - plot_type='translations', - out_file='bar.png') - assert plotter2.cmdline == \ - ('fsl_tsplot -i %s -o bar.png -t \'Realign estimated translations (mm)\' ' - '--start=1 --finish=3 -a x,y,z' % parfiles[1]) + in_source="spm", + plot_type="translations", + out_file="bar.png", + ) + assert plotter2.cmdline == ( + "fsl_tsplot -i %s -o bar.png -t 'Realign estimated translations (mm)' " + "--start=1 --finish=3 -a x,y,z" % parfiles[1] + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -289,16 +307,16 @@ def test_convertxfm(create_files_in_directory_plus_output_type): cvt.inputs.in_file = filelist[0] cvt.inputs.invert_xfm = True cvt.inputs.out_file = "foo.mat" - assert cvt.cmdline == 'convert_xfm -omat foo.mat -inverse %s' % filelist[0] + assert cvt.cmdline == "convert_xfm -omat foo.mat -inverse %s" % filelist[0] # constructor based parameter setting cvt2 = fsl.ConvertXFM( - in_file=filelist[0], - in_file2=filelist[1], - concat_xfm=True, - out_file="bar.mat") - assert cvt2.cmdline == \ - "convert_xfm -omat bar.mat -concat %s %s" % (filelist[1], filelist[0]) + in_file=filelist[0], in_file2=filelist[1], concat_xfm=True, out_file="bar.mat" + ) + assert cvt2.cmdline == "convert_xfm -omat bar.mat -concat %s %s" % ( + filelist[1], + filelist[0], + ) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -320,7 +338,8 @@ def test_swapdims(create_files_in_directory_plus_output_type): swap.inputs.in_file = files[0] swap.inputs.new_dims = ("x", "y", "z") assert swap.cmdline == "fslswapdim a.nii x y z %s" % os.path.realpath( - os.path.join(testdir, "a_newdims%s" % out_ext)) + os.path.join(testdir, "a_newdims%s" % out_ext) + ) # Test that we can set an output name swap.inputs.out_file = "b.nii" diff --git a/nipype/interfaces/fsl/utils.py b/nipype/interfaces/fsl/utils.py index dacb8c9228..6ada44e046 100644 --- a/nipype/interfaces/fsl/utils.py +++ b/nipype/interfaces/fsl/utils.py @@ -17,20 +17,23 @@ import numpy as np -from ...utils.filemanip import (load_json, save_json, split_filename, - fname_presuffix) -from ..base import (traits, TraitedSpec, OutputMultiPath, File, CommandLine, - CommandLineInputSpec, isdefined) +from ...utils.filemanip import load_json, save_json, split_filename, fname_presuffix +from ..base import ( + traits, + TraitedSpec, + OutputMultiPath, + File, + CommandLine, + CommandLineInputSpec, + isdefined, +) from .base import FSLCommand, FSLCommandInputSpec, Info class CopyGeomInputSpec(FSLCommandInputSpec): in_file = File( - exists=True, - mandatory=True, - argstr="%s", - position=0, - desc="source image") + exists=True, mandatory=True, argstr="%s", position=0, desc="source image" + ) dest_file = File( exists=True, mandatory=True, @@ -38,11 +41,13 @@ class CopyGeomInputSpec(FSLCommandInputSpec): position=1, desc="destination image", copyfile=True, - output_name='out_file', - name_source='dest_file', - name_template='%s') + output_name="out_file", + name_source="dest_file", + name_template="%s", + ) ignore_dims = traits.Bool( - desc='Do not copy image dimensions', argstr='-d', position="-1") + desc="Do not copy image dimensions", argstr="-d", position="-1" + ) class CopyGeomOutputSpec(TraitedSpec): @@ -58,6 +63,7 @@ class CopyGeom(FSLCommand): different files will result in loss of information or potentially incorrect settings. """ + _cmd = "fslcpgeom" input_spec = CopyGeomInputSpec output_spec = CopyGeomOutputSpec @@ -65,36 +71,32 @@ class CopyGeom(FSLCommand): class RobustFOVInputSpec(FSLCommandInputSpec): in_file = File( - exists=True, - desc='input filename', - argstr='-i %s', - position=0, - mandatory=True) + exists=True, desc="input filename", argstr="-i %s", position=0, mandatory=True + ) out_roi = File( desc="ROI volume output name", argstr="-r %s", - name_source=['in_file'], + name_source=["in_file"], hash_files=False, - name_template='%s_ROI') + name_template="%s_ROI", + ) brainsize = traits.Int( - desc=('size of brain in z-dimension (default ' - '170mm/150mm)'), - argstr='-b %d') + desc=("size of brain in z-dimension (default " "170mm/150mm)"), argstr="-b %d" + ) out_transform = File( - desc=("Transformation matrix in_file to out_roi " - "output name"), + desc=("Transformation matrix in_file to out_roi " "output name"), argstr="-m %s", - name_source=['in_file'], + name_source=["in_file"], hash_files=False, - name_template='%s_to_ROI') + name_template="%s_to_ROI", + ) class RobustFOVOutputSpec(TraitedSpec): out_roi = File(exists=True, desc="ROI volume output name") out_transform = File( - exists=True, - desc=("Transformation matrix in_file to out_roi " - "output name")) + exists=True, desc=("Transformation matrix in_file to out_roi " "output name") + ) class RobustFOV(FSLCommand): @@ -104,7 +106,7 @@ class RobustFOV(FSLCommand): 150mm to 170mm. """ - _cmd = 'robustfov' + _cmd = "robustfov" input_spec = RobustFOVInputSpec output_spec = RobustFOVOutputSpec @@ -112,45 +114,46 @@ class RobustFOV(FSLCommand): class ImageMeantsInputSpec(FSLCommandInputSpec): in_file = File( exists=True, - desc='input file for computing the average timeseries', - argstr='-i %s', + desc="input file for computing the average timeseries", + argstr="-i %s", position=0, - mandatory=True) + mandatory=True, + ) out_file = File( - desc='name of output text matrix', - argstr='-o %s', + desc="name of output text matrix", + argstr="-o %s", genfile=True, - hash_files=False) - mask = File(exists=True, desc='input 3D mask', argstr='-m %s') + hash_files=False, + ) + mask = File(exists=True, desc="input 3D mask", argstr="-m %s") spatial_coord = traits.List( traits.Int, - desc=(' requested spatial coordinate ' - '(instead of mask)'), - argstr='-c %s') + desc=(" requested spatial coordinate " "(instead of mask)"), + argstr="-c %s", + ) use_mm = traits.Bool( - desc=('use mm instead of voxel coordinates (for -c ' - 'option)'), - argstr='--usemm') + desc=("use mm instead of voxel coordinates (for -c " "option)"), + argstr="--usemm", + ) show_all = traits.Bool( - desc=('show all voxel time series (within mask) ' - 'instead of averaging'), - argstr='--showall') + desc=("show all voxel time series (within mask) " "instead of averaging"), + argstr="--showall", + ) eig = traits.Bool( - desc=('calculate Eigenvariate(s) instead of mean (output will have 0 ' - 'mean)'), - argstr='--eig') + desc=("calculate Eigenvariate(s) instead of mean (output will have 0 " "mean)"), + argstr="--eig", + ) order = traits.Int( - 1, - desc='select number of Eigenvariates', - argstr='--order=%d', - usedefault=True) + 1, desc="select number of Eigenvariates", argstr="--order=%d", usedefault=True + ) nobin = traits.Bool( - desc=('do not binarise the mask for calculation of ' - 'Eigenvariates'), - argstr='--no_bin') + desc=("do not binarise the mask for calculation of " "Eigenvariates"), + argstr="--no_bin", + ) transpose = traits.Bool( - desc=('output results in transpose format (one row per voxel/mean)'), - argstr='--transpose') + desc=("output results in transpose format (one row per voxel/mean)"), + argstr="--transpose", + ) class ImageMeantsOutputSpec(TraitedSpec): @@ -163,21 +166,23 @@ class ImageMeants(FSLCommand): in the mask (or all voxels in the image if no mask is specified) """ - _cmd = 'fslmeants' + + _cmd = "fslmeants" input_spec = ImageMeantsInputSpec output_spec = ImageMeantsOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = self.inputs.out_file - if not isdefined(outputs['out_file']): - outputs['out_file'] = self._gen_fname( - self.inputs.in_file, suffix='_ts', ext='.txt', change_ext=True) - outputs['out_file'] = os.path.abspath(outputs['out_file']) + outputs["out_file"] = self.inputs.out_file + if not isdefined(outputs["out_file"]): + outputs["out_file"] = self._gen_fname( + self.inputs.in_file, suffix="_ts", ext=".txt", change_ext=True + ) + outputs["out_file"] = os.path.abspath(outputs["out_file"]) return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._list_outputs()[name] return None @@ -187,22 +192,24 @@ class SmoothInputSpec(FSLCommandInputSpec): sigma = traits.Float( argstr="-kernel gauss %.03f -fmean", position=1, - xor=['fwhm'], + xor=["fwhm"], mandatory=True, - desc='gaussian kernel sigma in mm (not voxels)') + desc="gaussian kernel sigma in mm (not voxels)", + ) fwhm = traits.Float( argstr="-kernel gauss %.03f -fmean", position=1, - xor=['sigma'], + xor=["sigma"], mandatory=True, - desc=('gaussian kernel fwhm, will be converted to sigma in mm ' - '(not voxels)')) + desc=("gaussian kernel fwhm, will be converted to sigma in mm " "(not voxels)"), + ) smoothed_file = File( argstr="%s", position=2, - name_source=['in_file'], - name_template='%s_smooth', - hash_files=False) + name_source=["in_file"], + name_template="%s_smooth", + hash_files=False, + ) class SmoothOutputSpec(TraitedSpec): @@ -249,18 +256,24 @@ class Smooth(FSLCommand): input_spec = SmoothInputSpec output_spec = SmoothOutputSpec - _cmd = 'fslmaths' + _cmd = "fslmaths" def _format_arg(self, name, trait_spec, value): - if name == 'fwhm': + if name == "fwhm": sigma = float(value) / np.sqrt(8 * np.log(2)) return super(Smooth, self)._format_arg(name, trait_spec, sigma) return super(Smooth, self)._format_arg(name, trait_spec, value) class SliceInputSpec(FSLCommandInputSpec): - in_file = File(exists=True, argstr="%s", position=0, mandatory=True, - desc="input filename", copyfile=False) + in_file = File( + exists=True, + argstr="%s", + position=0, + mandatory=True, + desc="input filename", + copyfile=False, + ) out_base_name = traits.Str(argstr="%s", position=1, desc="outputs prefix") @@ -285,7 +298,7 @@ class Slice(FSLCommand): """ - _cmd = 'fslslice' + _cmd = "fslslice" input_spec = SliceInputSpec output_spec = SliceOutputSpec @@ -307,44 +320,50 @@ def _list_outputs(self): """ outputs = self._outputs().get() ext = Info.output_type_to_ext(self.inputs.output_type) - suffix = '_slice_*' + ext + suffix = "_slice_*" + ext if isdefined(self.inputs.out_base_name): - fname_template = os.path.abspath( - self.inputs.out_base_name + suffix) + fname_template = os.path.abspath(self.inputs.out_base_name + suffix) else: - fname_template = fname_presuffix(self.inputs.in_file, - suffix=suffix, use_ext=False) + fname_template = fname_presuffix( + self.inputs.in_file, suffix=suffix, use_ext=False + ) - outputs['out_files'] = sorted(glob(fname_template)) + outputs["out_files"] = sorted(glob(fname_template)) return outputs class MergeInputSpec(FSLCommandInputSpec): - in_files = traits.List( - File(exists=True), argstr="%s", position=2, mandatory=True) + in_files = traits.List(File(exists=True), argstr="%s", position=2, mandatory=True) dimension = traits.Enum( - 't', - 'x', - 'y', - 'z', - 'a', + "t", + "x", + "y", + "z", + "a", argstr="-%s", position=0, - desc=("dimension along which to merge, optionally " - "set tr input when dimension is t"), - mandatory=True) + desc=( + "dimension along which to merge, optionally " + "set tr input when dimension is t" + ), + mandatory=True, + ) tr = traits.Float( position=-1, - argstr='%.2f', - desc=('use to specify TR in seconds (default is 1.00 ' - 'sec), overrides dimension and sets it to tr')) + argstr="%.2f", + desc=( + "use to specify TR in seconds (default is 1.00 " + "sec), overrides dimension and sets it to tr" + ), + ) merged_file = File( argstr="%s", position=1, - name_source='in_files', - name_template='%s_merged', - hash_files=False) + name_source="in_files", + name_template="%s_merged", + hash_files=False, + ) class MergeOutputSpec(TraitedSpec): @@ -378,35 +397,29 @@ class Merge(FSLCommand): """ - _cmd = 'fslmerge' + _cmd = "fslmerge" input_spec = MergeInputSpec output_spec = MergeOutputSpec def _format_arg(self, name, spec, value): - if name == 'tr': - if self.inputs.dimension != 't': - raise ValueError('When TR is specified, dimension must be t') + if name == "tr": + if self.inputs.dimension != "t": + raise ValueError("When TR is specified, dimension must be t") return spec.argstr % value - if name == 'dimension': + if name == "dimension": if isdefined(self.inputs.tr): - return '-tr' + return "-tr" return spec.argstr % value return super(Merge, self)._format_arg(name, spec, value) class ExtractROIInputSpec(FSLCommandInputSpec): in_file = File( - exists=True, - argstr="%s", - position=0, - desc="input file", - mandatory=True) + exists=True, argstr="%s", position=0, desc="input file", mandatory=True + ) roi_file = File( - argstr="%s", - position=1, - desc="output file", - genfile=True, - hash_files=False) + argstr="%s", position=1, desc="output file", genfile=True, hash_files=False + ) x_min = traits.Int(argstr="%d", position=2) x_size = traits.Int(argstr="%d", position=3) y_min = traits.Int(argstr="%d", position=4) @@ -416,15 +429,22 @@ class ExtractROIInputSpec(FSLCommandInputSpec): t_min = traits.Int(argstr="%d", position=8) t_size = traits.Int(argstr="%d", position=9) _crop_xor = [ - 'x_min', 'x_size', 'y_min', 'y_size', 'z_min', 'z_size', 't_min', - 't_size' + "x_min", + "x_size", + "y_min", + "y_size", + "z_min", + "z_size", + "t_min", + "t_size", ] crop_list = traits.List( traits.Tuple(traits.Int, traits.Int), argstr="%s", position=2, xor=_crop_xor, - desc="list of two tuples specifying crop options") + desc="list of two tuples specifying crop options", + ) class ExtractROIOutputSpec(TraitedSpec): @@ -457,7 +477,7 @@ class ExtractROI(FSLCommand): """ - _cmd = 'fslroi' + _cmd = "fslroi" input_spec = ExtractROIInputSpec output_spec = ExtractROIOutputSpec @@ -485,36 +505,33 @@ def _list_outputs(self): """ outputs = self._outputs().get() - outputs['roi_file'] = self.inputs.roi_file - if not isdefined(outputs['roi_file']): - outputs['roi_file'] = self._gen_fname( - self.inputs.in_file, suffix='_roi') - outputs['roi_file'] = os.path.abspath(outputs['roi_file']) + outputs["roi_file"] = self.inputs.roi_file + if not isdefined(outputs["roi_file"]): + outputs["roi_file"] = self._gen_fname(self.inputs.in_file, suffix="_roi") + outputs["roi_file"] = os.path.abspath(outputs["roi_file"]) return outputs def _gen_filename(self, name): - if name == 'roi_file': + if name == "roi_file": return self._list_outputs()[name] return None class SplitInputSpec(FSLCommandInputSpec): in_file = File( - exists=True, - argstr="%s", - position=0, - mandatory=True, - desc="input filename") + exists=True, argstr="%s", position=0, mandatory=True, desc="input filename" + ) out_base_name = traits.Str(argstr="%s", position=1, desc="outputs prefix") dimension = traits.Enum( - 't', - 'x', - 'y', - 'z', + "t", + "x", + "y", + "z", argstr="-%s", position=2, mandatory=True, - desc="dimension along which the file will be split") + desc="dimension along which the file will be split", + ) class SplitOutputSpec(TraitedSpec): @@ -525,7 +542,8 @@ class Split(FSLCommand): """Uses FSL Fslsplit command to separate a volume into images in time, x, y or z dimension. """ - _cmd = 'fslsplit' + + _cmd = "fslsplit" input_spec = SplitInputSpec output_spec = SplitOutputSpec @@ -547,36 +565,37 @@ def _list_outputs(self): """ outputs = self._outputs().get() ext = Info.output_type_to_ext(self.inputs.output_type) - outbase = 'vol[0-9]*' + outbase = "vol[0-9]*" if isdefined(self.inputs.out_base_name): - outbase = '%s[0-9]*' % self.inputs.out_base_name - outputs['out_files'] = sorted( - glob(os.path.join(os.getcwd(), outbase + ext))) + outbase = "%s[0-9]*" % self.inputs.out_base_name + outputs["out_files"] = sorted(glob(os.path.join(os.getcwd(), outbase + ext))) return outputs class ImageMathsInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr="%s", mandatory=True, position=1) in_file2 = File(exists=True, argstr="%s", position=3) - mask_file = File(exists=True, argstr='-mas %s', - desc='use (following image>0) to mask current image') + mask_file = File( + exists=True, + argstr="-mas %s", + desc="use (following image>0) to mask current image", + ) out_file = File(argstr="%s", position=-2, genfile=True, hash_files=False) op_string = traits.Str( - argstr="%s", - position=2, - desc="string defining the operation, i. e. -add") + argstr="%s", position=2, desc="string defining the operation, i. e. -add" + ) suffix = traits.Str(desc="out_file suffix") out_data_type = traits.Enum( - 'char', - 'short', - 'int', - 'float', - 'double', - 'input', + "char", + "short", + "int", + "float", + "double", + "input", argstr="-odt %s", position=-1, - desc=("output datatype, one of (char, short, " - "int, float, double, input)")) + desc=("output datatype, one of (char, short, " "int, float, double, input)"), + ) class ImageMathsOutputSpec(TraitedSpec): @@ -600,29 +619,29 @@ class ImageMaths(FSLCommand): """ + input_spec = ImageMathsInputSpec output_spec = ImageMathsOutputSpec - _cmd = 'fslmaths' + _cmd = "fslmaths" def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._list_outputs()[name] return None def _parse_inputs(self, skip=None): - return super(ImageMaths, self)._parse_inputs(skip=['suffix']) + return super(ImageMaths, self)._parse_inputs(skip=["suffix"]) def _list_outputs(self): - suffix = '_maths' # ohinds: build suffix + suffix = "_maths" # ohinds: build suffix if isdefined(self.inputs.suffix): suffix = self.inputs.suffix outputs = self._outputs().get() - outputs['out_file'] = self.inputs.out_file - if not isdefined(outputs['out_file']): - outputs['out_file'] = self._gen_fname( - self.inputs.in_file, suffix=suffix) - outputs['out_file'] = os.path.abspath(outputs['out_file']) + outputs["out_file"] = self.inputs.out_file + if not isdefined(outputs["out_file"]): + outputs["out_file"] = self._gen_fname(self.inputs.in_file, suffix=suffix) + outputs["out_file"] = os.path.abspath(outputs["out_file"]) return outputs @@ -632,41 +651,46 @@ class FilterRegressorInputSpec(FSLCommandInputSpec): argstr="-i %s", desc="input file name (4D image)", mandatory=True, - position=1) + position=1, + ) out_file = File( argstr="-o %s", desc="output file name for the filtered data", genfile=True, position=2, - hash_files=False) + hash_files=False, + ) design_file = File( exists=True, argstr="-d %s", position=3, mandatory=True, - desc=("name of the matrix with time courses (e.g. GLM " - "design or MELODIC mixing matrix)")) + desc=( + "name of the matrix with time courses (e.g. GLM " + "design or MELODIC mixing matrix)" + ), + ) filter_columns = traits.List( traits.Int, argstr="-f '%s'", xor=["filter_all"], mandatory=True, position=4, - desc=("(1-based) column indices to filter out of the data")) + desc=("(1-based) column indices to filter out of the data"), + ) filter_all = traits.Bool( mandatory=True, argstr="-f '%s'", xor=["filter_columns"], position=4, - desc=("use all columns in the design file in " - "denoising")) + desc=("use all columns in the design file in " "denoising"), + ) mask = File(exists=True, argstr="-m %s", desc="mask image file name") - var_norm = traits.Bool( - argstr="--vn", desc="perform variance-normalization on data") + var_norm = traits.Bool(argstr="--vn", desc="perform variance-normalization on data") out_vnscales = traits.Bool( argstr="--out_vnscales", - desc=("output scaling factors for variance " - "normalization")) + desc=("output scaling factors for variance " "normalization"), + ) class FilterRegressorOutputSpec(TraitedSpec): @@ -678,12 +702,13 @@ class FilterRegressor(FSLCommand): Uses simple OLS regression on 4D images """ + input_spec = FilterRegressorInputSpec output_spec = FilterRegressorOutputSpec - _cmd = 'fsl_regfilt' + _cmd = "fsl_regfilt" def _format_arg(self, name, trait_spec, value): - if name == 'filter_columns': + if name == "filter_columns": return trait_spec.argstr % ",".join(map(str, value)) elif name == "filter_all": design = np.loadtxt(self.inputs.design_file) @@ -691,59 +716,62 @@ def _format_arg(self, name, trait_spec, value): n_cols = design.shape[1] except IndexError: n_cols = 1 - return trait_spec.argstr % ",".join( - map(str, list(range(1, n_cols + 1)))) - return super(FilterRegressor, self)._format_arg( - name, trait_spec, value) + return trait_spec.argstr % ",".join(map(str, list(range(1, n_cols + 1)))) + return super(FilterRegressor, self)._format_arg(name, trait_spec, value) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = self.inputs.out_file - if not isdefined(outputs['out_file']): - outputs['out_file'] = self._gen_fname( - self.inputs.in_file, suffix='_regfilt') - outputs['out_file'] = os.path.abspath(outputs['out_file']) + outputs["out_file"] = self.inputs.out_file + if not isdefined(outputs["out_file"]): + outputs["out_file"] = self._gen_fname( + self.inputs.in_file, suffix="_regfilt" + ) + outputs["out_file"] = os.path.abspath(outputs["out_file"]) return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._list_outputs()[name] return None class ImageStatsInputSpec(FSLCommandInputSpec): split_4d = traits.Bool( - argstr='-t', + argstr="-t", position=1, - desc=('give a separate output line for each 3D ' - 'volume of a 4D timeseries')) + desc=("give a separate output line for each 3D " "volume of a 4D timeseries"), + ) in_file = File( exists=True, argstr="%s", mandatory=True, position=3, - desc='input file to generate stats of') + desc="input file to generate stats of", + ) op_string = traits.Str( argstr="%s", mandatory=True, position=4, - desc=("string defining the operation, options are " - "applied in order, e.g. -M -l 10 -M will " - "report the non-zero mean, apply a threshold " - "and then report the new nonzero mean")) - mask_file = File( - exists=True, argstr="", desc='mask file used for option -k %s') + desc=( + "string defining the operation, options are " + "applied in order, e.g. -M -l 10 -M will " + "report the non-zero mean, apply a threshold " + "and then report the new nonzero mean" + ), + ) + mask_file = File(exists=True, argstr="", desc="mask file used for option -k %s") index_mask_file = File( exists=True, argstr="-K %s", position=2, desc="generate seperate n submasks from indexMask, " - "for indexvalues 1..n where n is the maximum index " - "value in indexMask, and generate statistics for each submask") + "for indexvalues 1..n where n is the maximum index " + "value in indexMask, and generate statistics for each submask", + ) class ImageStatsOutputSpec(TraitedSpec): - out_stat = traits.Any(desc='stats output') + out_stat = traits.Any(desc="stats output") class ImageStats(FSLCommand): @@ -763,35 +791,35 @@ class ImageStats(FSLCommand): """ + input_spec = ImageStatsInputSpec output_spec = ImageStatsOutputSpec - _cmd = 'fslstats' + _cmd = "fslstats" def _format_arg(self, name, trait_spec, value): - if name == 'mask_file': - return '' - if name == 'op_string': - if '-k %s' in self.inputs.op_string: + if name == "mask_file": + return "" + if name == "op_string": + if "-k %s" in self.inputs.op_string: if isdefined(self.inputs.mask_file): return self.inputs.op_string % self.inputs.mask_file else: - raise ValueError( - '-k %s option in op_string requires mask_file') + raise ValueError("-k %s option in op_string requires mask_file") return super(ImageStats, self)._format_arg(name, trait_spec, value) def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() # local caching for backward compatibility - outfile = os.path.join(os.getcwd(), 'stat_result.json') + outfile = os.path.join(os.getcwd(), "stat_result.json") if runtime is None: try: - out_stat = load_json(outfile)['stat'] + out_stat = load_json(outfile)["stat"] except IOError: return self.run().outputs else: out_stat = [] - for line in runtime.stdout.split('\n'): + for line in runtime.stdout.split("\n"): if line: values = line.split() if len(values) > 1: @@ -806,31 +834,35 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): class AvScaleInputSpec(CommandLineInputSpec): - all_param = traits.Bool(False, argstr='--allparams') - mat_file = File( - exists=True, argstr='%s', desc='mat file to read', position=-2) + all_param = traits.Bool(False, argstr="--allparams") + mat_file = File(exists=True, argstr="%s", desc="mat file to read", position=-2) ref_file = File( exists=True, - argstr='%s', + argstr="%s", position=-1, - desc='reference file to get center of rotation') + desc="reference file to get center of rotation", + ) class AvScaleOutputSpec(TraitedSpec): rotation_translation_matrix = traits.List( - traits.List(traits.Float), desc='Rotation and Translation Matrix') - scales = traits.List(traits.Float, desc='Scales (x,y,z)') - skews = traits.List(traits.Float, desc='Skews') - average_scaling = traits.Float(desc='Average Scaling') - determinant = traits.Float(desc='Determinant') + traits.List(traits.Float), desc="Rotation and Translation Matrix" + ) + scales = traits.List(traits.Float, desc="Scales (x,y,z)") + skews = traits.List(traits.Float, desc="Skews") + average_scaling = traits.Float(desc="Average Scaling") + determinant = traits.Float(desc="Determinant") forward_half_transform = traits.List( - traits.List(traits.Float), desc='Forward Half Transform') + traits.List(traits.Float), desc="Forward Half Transform" + ) backward_half_transform = traits.List( - traits.List(traits.Float), desc='Backwards Half Transform') + traits.List(traits.Float), desc="Backwards Half Transform" + ) left_right_orientation_preserved = traits.Bool( - desc='True if LR orientation preserved') - rot_angles = traits.List(traits.Float, desc='rotation angles') - translations = traits.List(traits.Float, desc='translations') + desc="True if LR orientation preserved" + ) + rot_angles = traits.List(traits.Float, desc="rotation angles") + translations = traits.List(traits.Float, desc="translations") class AvScale(CommandLine): @@ -845,56 +877,60 @@ class AvScale(CommandLine): """ + input_spec = AvScaleInputSpec output_spec = AvScaleOutputSpec - _cmd = 'avscale' + _cmd = "avscale" def _run_interface(self, runtime): runtime = super(AvScale, self)._run_interface(runtime) expr = re.compile( - r'Rotation & Translation Matrix:\n(?P[0-9\. \n-]+)[\s\n]*' - r'(Rotation Angles \(x,y,z\) \[rads\] = (?P[0-9\. -]+))?[\s\n]*' - r'(Translations \(x,y,z\) \[mm\] = (?P[0-9\. -]+))?[\s\n]*' - r'Scales \(x,y,z\) = (?P[0-9\. -]+)[\s\n]*' - r'Skews \(xy,xz,yz\) = (?P[0-9\. -]+)[\s\n]*' - r'Average scaling = (?P[0-9\.-]+)[\s\n]*' - r'Determinant = (?P[0-9\.-]+)[\s\n]*' - r'Left-Right orientation: (?P[A-Za-z]+)[\s\n]*' - r'Forward half transform =[\s]*\n' - r'(?P[0-9\. \n-]+)[\s\n]*' - r'Backward half transform =[\s]*\n' - r'(?P[0-9\. \n-]+)[\s\n]*') + r"Rotation & Translation Matrix:\n(?P[0-9\. \n-]+)[\s\n]*" + r"(Rotation Angles \(x,y,z\) \[rads\] = (?P[0-9\. -]+))?[\s\n]*" + r"(Translations \(x,y,z\) \[mm\] = (?P[0-9\. -]+))?[\s\n]*" + r"Scales \(x,y,z\) = (?P[0-9\. -]+)[\s\n]*" + r"Skews \(xy,xz,yz\) = (?P[0-9\. -]+)[\s\n]*" + r"Average scaling = (?P[0-9\.-]+)[\s\n]*" + r"Determinant = (?P[0-9\.-]+)[\s\n]*" + r"Left-Right orientation: (?P[A-Za-z]+)[\s\n]*" + r"Forward half transform =[\s]*\n" + r"(?P[0-9\. \n-]+)[\s\n]*" + r"Backward half transform =[\s]*\n" + r"(?P[0-9\. \n-]+)[\s\n]*" + ) out = expr.search(runtime.stdout).groupdict() outputs = {} - outputs['rotation_translation_matrix'] = [[ - float(v) for v in r.strip().split(' ') - ] for r in out['rot_tran_mat'].strip().split('\n')] - outputs['scales'] = [ - float(s) for s in out['scales'].strip().split(' ') + outputs["rotation_translation_matrix"] = [ + [float(v) for v in r.strip().split(" ")] + for r in out["rot_tran_mat"].strip().split("\n") + ] + outputs["scales"] = [float(s) for s in out["scales"].strip().split(" ")] + outputs["skews"] = [float(s) for s in out["skews"].strip().split(" ")] + outputs["average_scaling"] = float(out["avg_scaling"].strip()) + outputs["determinant"] = float(out["determinant"].strip()) + outputs["left_right_orientation_preserved"] = ( + out["lr_orientation"].strip() == "preserved" + ) + outputs["forward_half_transform"] = [ + [float(v) for v in r.strip().split(" ")] + for r in out["fwd_half_xfm"].strip().split("\n") + ] + outputs["backward_half_transform"] = [ + [float(v) for v in r.strip().split(" ")] + for r in out["bwd_half_xfm"].strip().split("\n") ] - outputs['skews'] = [float(s) for s in out['skews'].strip().split(' ')] - outputs['average_scaling'] = float(out['avg_scaling'].strip()) - outputs['determinant'] = float(out['determinant'].strip()) - outputs['left_right_orientation_preserved'] = out[ - 'lr_orientation'].strip() == 'preserved' - outputs['forward_half_transform'] = [[ - float(v) for v in r.strip().split(' ') - ] for r in out['fwd_half_xfm'].strip().split('\n')] - outputs['backward_half_transform'] = [[ - float(v) for v in r.strip().split(' ') - ] for r in out['bwd_half_xfm'].strip().split('\n')] if self.inputs.all_param: - outputs['rot_angles'] = [ - float(r) for r in out['rot_angles'].strip().split(' ') + outputs["rot_angles"] = [ + float(r) for r in out["rot_angles"].strip().split(" ") ] - outputs['translations'] = [ - float(r) for r in out['translations'].strip().split(' ') + outputs["translations"] = [ + float(r) for r in out["translations"].strip().split(" ") ] - setattr(self, '_results', outputs) + setattr(self, "_results", outputs) return runtime def _list_outputs(self): @@ -903,90 +939,100 @@ def _list_outputs(self): class OverlayInputSpec(FSLCommandInputSpec): transparency = traits.Bool( - desc='make overlay colors semi-transparent', + desc="make overlay colors semi-transparent", position=1, - argstr='%s', + argstr="%s", usedefault=True, - default_value=True) + default_value=True, + ) out_type = traits.Enum( - 'float', - 'int', + "float", + "int", position=2, usedefault=True, - argstr='%s', - desc='write output with float or int') + argstr="%s", + desc="write output with float or int", + ) use_checkerboard = traits.Bool( - desc='use checkerboard mask for overlay', argstr='-c', position=3) + desc="use checkerboard mask for overlay", argstr="-c", position=3 + ) background_image = File( exists=True, position=4, mandatory=True, - argstr='%s', - desc='image to use as background') - _xor_inputs = ('auto_thresh_bg', 'full_bg_range', 'bg_thresh') + argstr="%s", + desc="image to use as background", + ) + _xor_inputs = ("auto_thresh_bg", "full_bg_range", "bg_thresh") auto_thresh_bg = traits.Bool( - desc=('automatically threshold the background image'), - argstr='-a', + desc=("automatically threshold the background image"), + argstr="-a", position=5, xor=_xor_inputs, - mandatory=True) + mandatory=True, + ) full_bg_range = traits.Bool( - desc='use full range of background image', - argstr='-A', + desc="use full range of background image", + argstr="-A", position=5, xor=_xor_inputs, - mandatory=True) + mandatory=True, + ) bg_thresh = traits.Tuple( traits.Float, traits.Float, - argstr='%.3f %.3f', + argstr="%.3f %.3f", position=5, - desc='min and max values for background intensity', + desc="min and max values for background intensity", xor=_xor_inputs, - mandatory=True) + mandatory=True, + ) stat_image = File( exists=True, position=6, mandatory=True, - argstr='%s', - desc='statistical image to overlay in color') + argstr="%s", + desc="statistical image to overlay in color", + ) stat_thresh = traits.Tuple( traits.Float, traits.Float, position=7, mandatory=True, - argstr='%.2f %.2f', - desc=('min and max values for the statistical ' - 'overlay')) + argstr="%.2f %.2f", + desc=("min and max values for the statistical " "overlay"), + ) show_negative_stats = traits.Bool( - desc=('display negative statistics in ' - 'overlay'), - xor=['stat_image2'], - argstr='%s', - position=8) + desc=("display negative statistics in " "overlay"), + xor=["stat_image2"], + argstr="%s", + position=8, + ) stat_image2 = File( exists=True, position=9, - xor=['show_negative_stats'], - argstr='%s', - desc='second statistical image to overlay in color') + xor=["show_negative_stats"], + argstr="%s", + desc="second statistical image to overlay in color", + ) stat_thresh2 = traits.Tuple( traits.Float, traits.Float, position=10, - desc=('min and max values for second ' - 'statistical overlay'), - argstr='%.2f %.2f') + desc=("min and max values for second " "statistical overlay"), + argstr="%.2f %.2f", + ) out_file = File( - desc='combined image volume', + desc="combined image volume", position=-1, - argstr='%s', + argstr="%s", genfile=True, - hash_files=False) + hash_files=False, + ) class OverlayOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='combined image volume') + out_file = File(exists=True, desc="combined image volume") class Overlay(FSLCommand): @@ -1008,25 +1054,28 @@ class Overlay(FSLCommand): """ - _cmd = 'overlay' + + _cmd = "overlay" input_spec = OverlayInputSpec output_spec = OverlayOutputSpec def _format_arg(self, name, spec, value): - if name == 'transparency': + if name == "transparency": if value: - return '1' + return "1" else: - return '0' - if name == 'out_type': - if value == 'float': - return '0' + return "0" + if name == "out_type": + if value == "float": + return "0" else: - return '1' - if name == 'show_negative_stats': - return '%s %.2f %.2f' % (self.inputs.stat_image, - self.inputs.stat_thresh[0] * -1, - self.inputs.stat_thresh[1] * -1) + return "1" + if name == "show_negative_stats": + return "%s %.2f %.2f" % ( + self.inputs.stat_image, + self.inputs.stat_thresh[0] * -1, + self.inputs.stat_thresh[1] * -1, + ) return super(Overlay, self)._format_arg(name, spec, value) def _list_outputs(self): @@ -1034,117 +1083,122 @@ def _list_outputs(self): out_file = self.inputs.out_file if not isdefined(out_file): if isdefined(self.inputs.stat_image2) and ( - not isdefined(self.inputs.show_negative_stats) - or not self.inputs.show_negative_stats): + not isdefined(self.inputs.show_negative_stats) + or not self.inputs.show_negative_stats + ): stem = "%s_and_%s" % ( split_filename(self.inputs.stat_image)[1], - split_filename(self.inputs.stat_image2)[1]) + split_filename(self.inputs.stat_image2)[1], + ) else: stem = split_filename(self.inputs.stat_image)[1] - out_file = self._gen_fname(stem, suffix='_overlay') - outputs['out_file'] = os.path.abspath(out_file) + out_file = self._gen_fname(stem, suffix="_overlay") + outputs["out_file"] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): - if name == 'out_file': - return self._list_outputs()['out_file'] + if name == "out_file": + return self._list_outputs()["out_file"] return None class SlicerInputSpec(FSLCommandInputSpec): in_file = File( - exists=True, - position=1, - argstr='%s', - mandatory=True, - desc='input volume') + exists=True, position=1, argstr="%s", mandatory=True, desc="input volume" + ) image_edges = File( exists=True, position=2, - argstr='%s', - desc=('volume to display edge overlay for (useful for ' - 'checking registration')) + argstr="%s", + desc=( + "volume to display edge overlay for (useful for " "checking registration" + ), + ) label_slices = traits.Bool( position=3, - argstr='-L', - desc='display slice number', + argstr="-L", + desc="display slice number", usedefault=True, - default_value=True) + default_value=True, + ) colour_map = File( exists=True, position=4, - argstr='-l %s', - desc=('use different colour map from that stored in ' - 'nifti header')) + argstr="-l %s", + desc=("use different colour map from that stored in " "nifti header"), + ) intensity_range = traits.Tuple( traits.Float, traits.Float, position=5, - argstr='-i %.3f %.3f', - desc='min and max intensities to display') + argstr="-i %.3f %.3f", + desc="min and max intensities to display", + ) threshold_edges = traits.Float( - position=6, argstr='-e %.3f', desc='use threshold for edges') + position=6, argstr="-e %.3f", desc="use threshold for edges" + ) dither_edges = traits.Bool( - position=7, - argstr='-t', - desc=('produce semi-transparent (dithered) ' - 'edges')) + position=7, argstr="-t", desc=("produce semi-transparent (dithered) " "edges") + ) nearest_neighbour = traits.Bool( position=8, - argstr='-n', - desc=('use nearest neighbor interpolation ' - 'for output')) + argstr="-n", + desc=("use nearest neighbor interpolation " "for output"), + ) show_orientation = traits.Bool( position=9, - argstr='%s', + argstr="%s", usedefault=True, default_value=True, - desc='label left-right orientation') - _xor_options = ('single_slice', 'middle_slices', 'all_axial', - 'sample_axial') + desc="label left-right orientation", + ) + _xor_options = ("single_slice", "middle_slices", "all_axial", "sample_axial") single_slice = traits.Enum( - 'x', - 'y', - 'z', + "x", + "y", + "z", position=10, - argstr='-%s', + argstr="-%s", xor=_xor_options, - requires=['slice_number'], - desc=('output picture of single slice in the x, y, or z plane')) + requires=["slice_number"], + desc=("output picture of single slice in the x, y, or z plane"), + ) slice_number = traits.Int( - position=11, argstr='-%d', desc='slice number to save in picture') + position=11, argstr="-%d", desc="slice number to save in picture" + ) middle_slices = traits.Bool( position=10, - argstr='-a', + argstr="-a", xor=_xor_options, - desc=('output picture of mid-sagittal, axial, ' - 'and coronal slices')) + desc=("output picture of mid-sagittal, axial, " "and coronal slices"), + ) all_axial = traits.Bool( position=10, - argstr='-A', + argstr="-A", xor=_xor_options, - requires=['image_width'], - desc='output all axial slices into one picture') + requires=["image_width"], + desc="output all axial slices into one picture", + ) sample_axial = traits.Int( position=10, - argstr='-S %d', + argstr="-S %d", xor=_xor_options, - requires=['image_width'], - desc=('output every n axial slices into one ' - 'picture')) - image_width = traits.Int( - position=-2, argstr='%d', desc='max picture width') + requires=["image_width"], + desc=("output every n axial slices into one " "picture"), + ) + image_width = traits.Int(position=-2, argstr="%d", desc="max picture width") out_file = File( position=-1, genfile=True, - argstr='%s', - desc='picture to write', - hash_files=False) - scaling = traits.Float(position=0, argstr='-s %f', desc='image scale') + argstr="%s", + desc="picture to write", + hash_files=False, + ) + scaling = traits.Float(position=0, argstr="-s %f", desc="image scale") class SlicerOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='picture to write') + out_file = File(exists=True, desc="picture to write") class Slicer(FSLCommand): @@ -1164,34 +1218,35 @@ class Slicer(FSLCommand): """ - _cmd = 'slicer' + + _cmd = "slicer" input_spec = SlicerInputSpec output_spec = SlicerOutputSpec def _format_arg(self, name, spec, value): - if name == 'show_orientation': + if name == "show_orientation": if value: - return '' + return "" else: - return '-u' + return "-u" elif name == "label_slices": if value: - return '-L' + return "-L" else: - return '' + return "" return super(Slicer, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.out_file if not isdefined(out_file): - out_file = self._gen_fname(self.inputs.in_file, ext='.png') - outputs['out_file'] = os.path.abspath(out_file) + out_file = self._gen_fname(self.inputs.in_file, ext=".png") + outputs["out_file"] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): - if name == 'out_file': - return self._list_outputs()['out_file'] + if name == "out_file": + return self._list_outputs()["out_file"] return None @@ -1203,61 +1258,58 @@ class PlotTimeSeriesInputSpec(FSLCommandInputSpec): mandatory=True, argstr="%s", position=1, - desc=("file or list of files with columns of " - "timecourse information")) + desc=("file or list of files with columns of " "timecourse information"), + ) plot_start = traits.Int( argstr="--start=%d", - xor=("plot_range", ), - desc="first column from in-file to plot") + xor=("plot_range",), + desc="first column from in-file to plot", + ) plot_finish = traits.Int( argstr="--finish=%d", - xor=("plot_range", ), - desc="final column from in-file to plot") + xor=("plot_range",), + desc="final column from in-file to plot", + ) plot_range = traits.Tuple( traits.Int, traits.Int, argstr="%s", xor=("plot_start", "plot_finish"), - desc=("first and last columns from the in-file " - "to plot")) + desc=("first and last columns from the in-file " "to plot"), + ) title = traits.Str(argstr="%s", desc="plot title") legend_file = File(exists=True, argstr="--legend=%s", desc="legend file") labels = traits.Either( - traits.Str, - traits.List(traits.Str), - argstr="%s", - desc="label or list of labels") - y_min = traits.Float( - argstr="--ymin=%.2f", desc="minumum y value", xor=("y_range", )) - y_max = traits.Float( - argstr="--ymax=%.2f", desc="maximum y value", xor=("y_range", )) + traits.Str, traits.List(traits.Str), argstr="%s", desc="label or list of labels" + ) + y_min = traits.Float(argstr="--ymin=%.2f", desc="minumum y value", xor=("y_range",)) + y_max = traits.Float(argstr="--ymax=%.2f", desc="maximum y value", xor=("y_range",)) y_range = traits.Tuple( traits.Float, traits.Float, argstr="%s", xor=("y_min", "y_max"), - desc="min and max y axis values") + desc="min and max y axis values", + ) x_units = traits.Int( argstr="-u %d", usedefault=True, default_value=1, - desc=("scaling units for x-axis (between 1 and length of in file)")) + desc=("scaling units for x-axis (between 1 and length of in file)"), + ) plot_size = traits.Tuple( - traits.Int, - traits.Int, - argstr="%s", - desc="plot image height and width") - x_precision = traits.Int( - argstr="--precision=%d", desc="precision of x-axis labels") - sci_notation = traits.Bool( - argstr="--sci", desc="switch on scientific notation") + traits.Int, traits.Int, argstr="%s", desc="plot image height and width" + ) + x_precision = traits.Int(argstr="--precision=%d", desc="precision of x-axis labels") + sci_notation = traits.Bool(argstr="--sci", desc="switch on scientific notation") out_file = File( - argstr="-o %s", genfile=True, desc="image to write", hash_files=False) + argstr="-o %s", genfile=True, desc="image to write", hash_files=False + ) class PlotTimeSeriesOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='image to write') + out_file = File(exists=True, desc="image to write") class PlotTimeSeries(FSLCommand): @@ -1275,6 +1327,7 @@ class PlotTimeSeries(FSLCommand): """ + _cmd = "fsl_tsplot" input_spec = PlotTimeSeriesInputSpec output_spec = PlotTimeSeriesOutputSpec @@ -1293,7 +1346,7 @@ def _format_arg(self, name, spec, value): else: return "-a %s" % value elif name == "title": - return "-t \'%s\'" % value + return "-t '%s'" % value elif name == "plot_range": return "--start=%d --finish=%d" % value elif name == "y_range": @@ -1310,13 +1363,13 @@ def _list_outputs(self): infile = self.inputs.in_file[0] else: infile = self.inputs.in_file - out_file = self._gen_fname(infile, ext='.png') - outputs['out_file'] = os.path.abspath(out_file) + out_file = self._gen_fname(infile, ext=".png") + outputs["out_file"] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): - if name == 'out_file': - return self._list_outputs()['out_file'] + if name == "out_file": + return self._list_outputs()["out_file"] return None @@ -1328,33 +1381,33 @@ class PlotMotionParamsInputSpec(FSLCommandInputSpec): mandatory=True, argstr="%s", position=1, - desc="file with motion parameters") + desc="file with motion parameters", + ) in_source = traits.Enum( "spm", "fsl", mandatory=True, - desc=("which program generated the motion " - "parameter file - fsl, spm")) + desc=("which program generated the motion " "parameter file - fsl, spm"), + ) plot_type = traits.Enum( "rotations", "translations", "displacement", argstr="%s", mandatory=True, - desc=("which motion type to plot - rotations, " - "translations, displacement")) + desc=("which motion type to plot - rotations, " "translations, displacement"), + ) plot_size = traits.Tuple( - traits.Int, - traits.Int, - argstr="%s", - desc="plot image height and width") + traits.Int, traits.Int, argstr="%s", desc="plot image height and width" + ) out_file = File( - argstr="-o %s", genfile=True, desc="image to write", hash_files=False) + argstr="-o %s", genfile=True, desc="image to write", hash_files=False + ) class PlotMotionParamsOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='image to write') + out_file = File(exists=True, desc="image to write") class PlotMotionParams(FSLCommand): @@ -1384,7 +1437,8 @@ class PlotMotionParams(FSLCommand): more flexibilty, see the :class:`fsl.PlotTimeSeries` interface. """ - _cmd = 'fsl_tsplot' + + _cmd = "fsl_tsplot" input_spec = PlotMotionParamsInputSpec output_spec = PlotMotionParamsOutputSpec @@ -1393,24 +1447,27 @@ def _format_arg(self, name, spec, value): if name == "plot_type": source = self.inputs.in_source - if self.inputs.plot_type == 'displacement': - title = '-t \'MCFLIRT estimated mean displacement (mm)\'' - labels = '-a abs,rel' - return '%s %s' % (title, labels) + if self.inputs.plot_type == "displacement": + title = "-t 'MCFLIRT estimated mean displacement (mm)'" + labels = "-a abs,rel" + return "%s %s" % (title, labels) # Get the right starting and ending position depending on source # package sfdict = dict( - fsl_rot=(1, 3), fsl_tra=(4, 6), spm_rot=(4, 6), spm_tra=(1, 3)) + fsl_rot=(1, 3), fsl_tra=(4, 6), spm_rot=(4, 6), spm_tra=(1, 3) + ) # Format the title properly - sfstr = "--start=%d --finish=%d" % sfdict["%s_%s" % (source, - value[:3])] + sfstr = "--start=%d --finish=%d" % sfdict["%s_%s" % (source, value[:3])] titledict = dict(fsl="MCFLIRT", spm="Realign") unitdict = dict(rot="radians", tra="mm") - title = "\'%s estimated %s (%s)\'" % (titledict[source], value, - unitdict[value[:3]]) + title = "'%s estimated %s (%s)'" % ( + titledict[source], + value, + unitdict[value[:3]], + ) return "-t %s %s -a x,y,z" % (title, sfstr) elif name == "plot_size": @@ -1432,16 +1489,18 @@ def _list_outputs(self): infile = self.inputs.in_file[0] else: infile = self.inputs.in_file - plttype = dict( - rot="rot", tra="trans", dis="disp")[self.inputs.plot_type[:3]] + plttype = dict(rot="rot", tra="trans", dis="disp")[ + self.inputs.plot_type[:3] + ] out_file = fname_presuffix( - infile, suffix="_%s.png" % plttype, use_ext=False) - outputs['out_file'] = os.path.abspath(out_file) + infile, suffix="_%s.png" % plttype, use_ext=False + ) + outputs["out_file"] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): - if name == 'out_file': - return self._list_outputs()['out_file'] + if name == "out_file": + return self._list_outputs()["out_file"] return None @@ -1451,38 +1510,39 @@ class ConvertXFMInputSpec(FSLCommandInputSpec): mandatory=True, argstr="%s", position=-1, - desc="input transformation matrix") + desc="input transformation matrix", + ) in_file2 = File( exists=True, argstr="%s", position=-2, - desc="second input matrix (for use with fix_scale_skew or concat_xfm)") + desc="second input matrix (for use with fix_scale_skew or concat_xfm)", + ) _options = ["invert_xfm", "concat_xfm", "fix_scale_skew"] invert_xfm = traits.Bool( - argstr="-inverse", - position=-3, - xor=_options, - desc="invert input transformation") + argstr="-inverse", position=-3, xor=_options, desc="invert input transformation" + ) concat_xfm = traits.Bool( argstr="-concat", position=-3, xor=_options, requires=["in_file2"], - desc=("write joint transformation of two input " - "matrices")) + desc=("write joint transformation of two input " "matrices"), + ) fix_scale_skew = traits.Bool( argstr="-fixscaleskew", position=-3, xor=_options, requires=["in_file2"], - desc=("use secondary matrix to fix scale and " - "skew")) + desc=("use secondary matrix to fix scale and " "skew"), + ) out_file = File( genfile=True, argstr="-omat %s", position=1, desc="final transformation matrix", - hash_files=False) + hash_files=False, + ) class ConvertXFMOutputSpec(TraitedSpec): @@ -1517,10 +1577,8 @@ def _list_outputs(self): _, infile1, _ = split_filename(self.inputs.in_file) if self.inputs.invert_xfm: outfile = fname_presuffix( - infile1, - suffix="_inv.mat", - newpath=os.getcwd(), - use_ext=False) + infile1, suffix="_inv.mat", newpath=os.getcwd(), use_ext=False + ) else: if self.inputs.concat_xfm: _, infile2, _ = split_filename(self.inputs.in_file2) @@ -1528,13 +1586,12 @@ def _list_outputs(self): "%s_%s" % (infile1, infile2), suffix=".mat", newpath=os.getcwd(), - use_ext=False) + use_ext=False, + ) else: outfile = fname_presuffix( - infile1, - suffix="_fix.mat", - newpath=os.getcwd(), - use_ext=False) + infile1, suffix="_fix.mat", newpath=os.getcwd(), use_ext=False + ) outputs["out_file"] = os.path.abspath(outfile) return outputs @@ -1547,23 +1604,18 @@ def _gen_filename(self, name): class SwapDimensionsInputSpec(FSLCommandInputSpec): in_file = File( - exists=True, - mandatory=True, - argstr="%s", - position="1", - desc="input image") - _dims = [ - "x", "-x", "y", "-y", "z", "-z", "RL", "LR", "AP", "PA", "IS", "SI" - ] + exists=True, mandatory=True, argstr="%s", position="1", desc="input image" + ) + _dims = ["x", "-x", "y", "-y", "z", "-z", "RL", "LR", "AP", "PA", "IS", "SI"] new_dims = traits.Tuple( traits.Enum(_dims), traits.Enum(_dims), traits.Enum(_dims), argstr="%s %s %s", mandatory=True, - desc="3-tuple of new dimension order") - out_file = File( - genfile=True, argstr="%s", desc="image to write", hash_files=False) + desc="3-tuple of new dimension order", + ) + out_file = File(genfile=True, argstr="%s", desc="image to write", hash_files=False) class SwapDimensionsOutputSpec(TraitedSpec): @@ -1580,6 +1632,7 @@ class SwapDimensions(FSLCommand): (RL, LR, AP, PA, IS, SI). """ + _cmd = "fslswapdim" input_spec = SwapDimensionsInputSpec output_spec = SwapDimensionsOutputSpec @@ -1589,7 +1642,8 @@ def _list_outputs(self): outputs["out_file"] = self.inputs.out_file if not isdefined(self.inputs.out_file): outputs["out_file"] = self._gen_fname( - self.inputs.in_file, suffix='_newdims') + self.inputs.in_file, suffix="_newdims" + ) outputs["out_file"] = os.path.abspath(outputs["out_file"]) return outputs @@ -1605,20 +1659,21 @@ class PowerSpectrumInputSpec(FSLCommandInputSpec): in_file = File( exists=True, desc="input 4D file to estimate the power spectrum", - argstr='%s', + argstr="%s", position=0, - mandatory=True) + mandatory=True, + ) out_file = File( - desc='name of output 4D file for power spectrum', - argstr='%s', + desc="name of output 4D file for power spectrum", + argstr="%s", position=1, genfile=True, - hash_files=False) + hash_files=False, + ) class PowerSpectrumOutputSpec(TraitedSpec): - out_file = File( - exists=True, desc="path/name of the output 4D power spectrum file") + out_file = File(exists=True, desc="path/name of the output 4D power spectrum file") class PowerSpectrum(FSLCommand): @@ -1635,41 +1690,42 @@ class PowerSpectrum(FSLCommand): """ - _cmd = 'fslpspec' + _cmd = "fslpspec" input_spec = PowerSpectrumInputSpec output_spec = PowerSpectrumOutputSpec def _gen_outfilename(self): out_file = self.inputs.out_file if not isdefined(out_file) and isdefined(self.inputs.in_file): - out_file = self._gen_fname(self.inputs.in_file, suffix='_ps') + out_file = self._gen_fname(self.inputs.in_file, suffix="_ps") return out_file def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self._gen_outfilename()) + outputs["out_file"] = os.path.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._gen_outfilename() return None class SigLossInputSpec(FSLCommandInputSpec): - in_file = File( - mandatory=True, exists=True, argstr='-i %s', desc='b0 fieldmap file') + in_file = File(mandatory=True, exists=True, argstr="-i %s", desc="b0 fieldmap file") out_file = File( - argstr='-s %s', desc='output signal loss estimate file', genfile=True) + argstr="-s %s", desc="output signal loss estimate file", genfile=True + ) - mask_file = File(exists=True, argstr='-m %s', desc='brain mask file') - echo_time = traits.Float(argstr='--te=%f', desc='echo time in seconds') + mask_file = File(exists=True, argstr="-m %s", desc="brain mask file") + echo_time = traits.Float(argstr="--te=%f", desc="echo time in seconds") slice_direction = traits.Enum( - 'x', 'y', 'z', argstr='-d %s', desc='slicing direction') + "x", "y", "z", argstr="-d %s", desc="slicing direction" + ) class SigLossOuputSpec(TraitedSpec): - out_file = File(exists=True, desc='signal loss estimate file') + out_file = File(exists=True, desc="signal loss estimate file") class SigLoss(FSLCommand): @@ -1685,22 +1741,23 @@ class SigLoss(FSLCommand): """ + input_spec = SigLossInputSpec output_spec = SigLossOuputSpec - _cmd = 'sigloss' + _cmd = "sigloss" def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = self.inputs.out_file - if not isdefined(outputs['out_file']) and \ - isdefined(self.inputs.in_file): - outputs['out_file'] = self._gen_fname( - self.inputs.in_file, suffix='_sigloss') + outputs["out_file"] = self.inputs.out_file + if not isdefined(outputs["out_file"]) and isdefined(self.inputs.in_file): + outputs["out_file"] = self._gen_fname( + self.inputs.in_file, suffix="_sigloss" + ) return outputs def _gen_filename(self, name): - if name == 'out_file': - return self._list_outputs()['out_file'] + if name == "out_file": + return self._list_outputs()["out_file"] return None @@ -1727,95 +1784,118 @@ class Reorient2Std(FSLCommand): """ - _cmd = 'fslreorient2std' + + _cmd = "fslreorient2std" input_spec = Reorient2StdInputSpec output_spec = Reorient2StdOutputSpec def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": return self._gen_fname(self.inputs.in_file, suffix="_reoriented") return None def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.out_file): - outputs['out_file'] = self._gen_filename('out_file') + outputs["out_file"] = self._gen_filename("out_file") else: - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs class InvWarpInputSpec(FSLCommandInputSpec): warp = File( exists=True, - argstr='--warp=%s', + argstr="--warp=%s", mandatory=True, - desc=('Name of file containing warp-coefficients/fields. This ' - 'would typically be the output from the --cout switch of' - ' fnirt (but can also use fields, like the output from ' - '--fout).')) + desc=( + "Name of file containing warp-coefficients/fields. This " + "would typically be the output from the --cout switch of" + " fnirt (but can also use fields, like the output from " + "--fout)." + ), + ) reference = File( exists=True, - argstr='--ref=%s', + argstr="--ref=%s", mandatory=True, - desc=('Name of a file in target space. Note that the ' - 'target space is now different from the target ' - 'space that was used to create the --warp file. It ' - 'would typically be the file that was specified ' - 'with the --in argument when running fnirt.')) + desc=( + "Name of a file in target space. Note that the " + "target space is now different from the target " + "space that was used to create the --warp file. It " + "would typically be the file that was specified " + "with the --in argument when running fnirt." + ), + ) inverse_warp = File( - argstr='--out=%s', - name_source=['warp'], + argstr="--out=%s", + name_source=["warp"], hash_files=False, - name_template='%s_inverse', - desc=('Name of output file, containing warps that are ' - 'the "reverse" of those in --warp. This will be ' - 'a field-file (rather than a file of spline ' - 'coefficients), and it will have any affine ' - 'component included as part of the ' - 'displacements.')) + name_template="%s_inverse", + desc=( + "Name of output file, containing warps that are " + 'the "reverse" of those in --warp. This will be ' + "a field-file (rather than a file of spline " + "coefficients), and it will have any affine " + "component included as part of the " + "displacements." + ), + ) absolute = traits.Bool( - argstr='--abs', - xor=['relative'], - desc=('If set it indicates that the warps in --warp' - ' should be interpreted as absolute, provided' - ' that it is not created by fnirt (which ' - 'always uses relative warps). If set it also ' - 'indicates that the output --out should be ' - 'absolute.')) + argstr="--abs", + xor=["relative"], + desc=( + "If set it indicates that the warps in --warp" + " should be interpreted as absolute, provided" + " that it is not created by fnirt (which " + "always uses relative warps). If set it also " + "indicates that the output --out should be " + "absolute." + ), + ) relative = traits.Bool( - argstr='--rel', - xor=['absolute'], - desc=('If set it indicates that the warps in --warp' - ' should be interpreted as relative. I.e. the' - ' values in --warp are displacements from the' - ' coordinates in the --ref space. If set it ' - 'also indicates that the output --out should ' - 'be relative.')) + argstr="--rel", + xor=["absolute"], + desc=( + "If set it indicates that the warps in --warp" + " should be interpreted as relative. I.e. the" + " values in --warp are displacements from the" + " coordinates in the --ref space. If set it " + "also indicates that the output --out should " + "be relative." + ), + ) niter = traits.Int( - argstr='--niter=%d', - desc=('Determines how many iterations of the ' - 'gradient-descent search that should be run.')) + argstr="--niter=%d", + desc=( + "Determines how many iterations of the " + "gradient-descent search that should be run." + ), + ) regularise = traits.Float( - argstr='--regularise=%f', - desc='Regularization strength (deafult=1.0).') + argstr="--regularise=%f", desc="Regularization strength (deafult=1.0)." + ) noconstraint = traits.Bool( - argstr='--noconstraint', desc='Do not apply Jacobian constraint') + argstr="--noconstraint", desc="Do not apply Jacobian constraint" + ) jacobian_min = traits.Float( - argstr='--jmin=%f', - desc=('Minimum acceptable Jacobian value for ' - 'constraint (default 0.01)')) + argstr="--jmin=%f", + desc=("Minimum acceptable Jacobian value for " "constraint (default 0.01)"), + ) jacobian_max = traits.Float( - argstr='--jmax=%f', - desc=('Maximum acceptable Jacobian value for ' - 'constraint (default 100.0)')) + argstr="--jmax=%f", + desc=("Maximum acceptable Jacobian value for " "constraint (default 100.0)"), + ) class InvWarpOutputSpec(TraitedSpec): inverse_warp = File( exists=True, - desc=('Name of output file, containing warps that are ' - 'the "reverse" of those in --warp.')) + desc=( + "Name of output file, containing warps that are " + 'the "reverse" of those in --warp.' + ), + ) class InvWarp(FSLCommand): @@ -1841,7 +1921,7 @@ class InvWarp(FSLCommand): input_spec = InvWarpInputSpec output_spec = InvWarpOutputSpec - _cmd = 'invwarp' + _cmd = "invwarp" class ComplexInputSpec(FSLCommandInputSpec): @@ -1851,82 +1931,68 @@ class ComplexInputSpec(FSLCommandInputSpec): real_in_file = File(exists=True, argstr="%s", position=2) imaginary_in_file = File(exists=True, argstr="%s", position=3) magnitude_in_file = File(exists=True, argstr="%s", position=2) - phase_in_file = File(exists=True, argstr='%s', position=3) + phase_in_file = File(exists=True, argstr="%s", position=3) _ofs = [ - 'complex_out_file', 'magnitude_out_file', 'phase_out_file', - 'real_out_file', 'imaginary_out_file' + "complex_out_file", + "magnitude_out_file", + "phase_out_file", + "real_out_file", + "imaginary_out_file", ] _conversion = [ - 'real_polar', - 'real_cartesian', - 'complex_cartesian', - 'complex_polar', - 'complex_split', - 'complex_merge', + "real_polar", + "real_cartesian", + "complex_cartesian", + "complex_polar", + "complex_split", + "complex_merge", ] complex_out_file = File( - genfile=True, argstr="%s", position=-3, xor=_ofs + _conversion[:2]) + genfile=True, argstr="%s", position=-3, xor=_ofs + _conversion[:2] + ) magnitude_out_file = File( genfile=True, argstr="%s", position=-4, - xor=_ofs[:1] + _ofs[3:] + _conversion[1:]) + xor=_ofs[:1] + _ofs[3:] + _conversion[1:], + ) phase_out_file = File( genfile=True, argstr="%s", position=-3, - xor=_ofs[:1] + _ofs[3:] + _conversion[1:]) + xor=_ofs[:1] + _ofs[3:] + _conversion[1:], + ) real_out_file = File( genfile=True, argstr="%s", position=-4, - xor=_ofs[:3] + _conversion[:1] + _conversion[2:]) + xor=_ofs[:3] + _conversion[:1] + _conversion[2:], + ) imaginary_out_file = File( genfile=True, argstr="%s", position=-3, - xor=_ofs[:3] + _conversion[:1] + _conversion[2:]) + xor=_ofs[:3] + _conversion[:1] + _conversion[2:], + ) - start_vol = traits.Int(position=-2, argstr='%d') - end_vol = traits.Int(position=-1, argstr='%d') + start_vol = traits.Int(position=-2, argstr="%d") + end_vol = traits.Int(position=-1, argstr="%d") - real_polar = traits.Bool( - argstr='-realpolar', - xor=_conversion, - position=1, - ) + real_polar = traits.Bool(argstr="-realpolar", xor=_conversion, position=1,) # requires=['complex_in_file','magnitude_out_file','phase_out_file']) - real_cartesian = traits.Bool( - argstr='-realcartesian', - xor=_conversion, - position=1, - ) + real_cartesian = traits.Bool(argstr="-realcartesian", xor=_conversion, position=1,) # requires=['complex_in_file','real_out_file','imaginary_out_file']) - complex_cartesian = traits.Bool( - argstr='-complex', - xor=_conversion, - position=1, - ) + complex_cartesian = traits.Bool(argstr="-complex", xor=_conversion, position=1,) # requires=['real_in_file','imaginary_in_file','complex_out_file']) - complex_polar = traits.Bool( - argstr='-complexpolar', - xor=_conversion, - position=1, - ) + complex_polar = traits.Bool(argstr="-complexpolar", xor=_conversion, position=1,) # requires=['magnitude_in_file','phase_in_file', # 'magnitude_out_file','phase_out_file']) - complex_split = traits.Bool( - argstr='-complexsplit', - xor=_conversion, - position=1, - ) + complex_split = traits.Bool(argstr="-complexsplit", xor=_conversion, position=1,) # requires=['complex_in_file','complex_out_file']) complex_merge = traits.Bool( - argstr='-complexmerge', - xor=_conversion + ['start_vol', 'end_vol'], - position=1, + argstr="-complexmerge", xor=_conversion + ["start_vol", "end_vol"], position=1, ) @@ -1954,7 +2020,8 @@ class Complex(FSLCommand): """ - _cmd = 'fslcomplex' + + _cmd = "fslcomplex" input_spec = ComplexInputSpec output_spec = ComplexOuputSpec @@ -1970,7 +2037,7 @@ def _parse_inputs(self, skip=None): return super(Complex, self)._parse_inputs(skip) def _gen_filename(self, name): - if name == 'complex_out_file': + if name == "complex_out_file": if self.inputs.complex_cartesian: in_file = self.inputs.real_in_file elif self.inputs.complex_polar: @@ -1980,14 +2047,13 @@ def _gen_filename(self, name): else: return None return self._gen_fname(in_file, suffix="_cplx") - elif name == 'magnitude_out_file': + elif name == "magnitude_out_file": return self._gen_fname(self.inputs.complex_in_file, suffix="_mag") - elif name == 'phase_out_file': - return self._gen_fname( - self.inputs.complex_in_file, suffix="_phase") - elif name == 'real_out_file': + elif name == "phase_out_file": + return self._gen_fname(self.inputs.complex_in_file, suffix="_phase") + elif name == "real_out_file": return self._gen_fname(self.inputs.complex_in_file, suffix="_real") - elif name == 'imaginary_out_file': + elif name == "imaginary_out_file": return self._gen_fname(self.inputs.complex_in_file, suffix="_imag") return None @@ -1999,110 +2065,140 @@ def _get_output(self, name): def _list_outputs(self): outputs = self.output_spec().get() - if self.inputs.complex_cartesian or self.inputs.complex_polar or \ - self.inputs.complex_split or self.inputs.complex_merge: - outputs['complex_out_file'] = self._get_output('complex_out_file') + if ( + self.inputs.complex_cartesian + or self.inputs.complex_polar + or self.inputs.complex_split + or self.inputs.complex_merge + ): + outputs["complex_out_file"] = self._get_output("complex_out_file") elif self.inputs.real_cartesian: - outputs['real_out_file'] = self._get_output('real_out_file') - outputs['imaginary_out_file'] = self._get_output( - 'imaginary_out_file') + outputs["real_out_file"] = self._get_output("real_out_file") + outputs["imaginary_out_file"] = self._get_output("imaginary_out_file") elif self.inputs.real_polar: - outputs['magnitude_out_file'] = self._get_output( - 'magnitude_out_file') - outputs['phase_out_file'] = self._get_output('phase_out_file') + outputs["magnitude_out_file"] = self._get_output("magnitude_out_file") + outputs["phase_out_file"] = self._get_output("phase_out_file") return outputs class WarpUtilsInputSpec(FSLCommandInputSpec): in_file = File( exists=True, - argstr='--in=%s', + argstr="--in=%s", mandatory=True, - desc=('Name of file containing warp-coefficients/fields. This ' - 'would typically be the output from the --cout switch of ' - 'fnirt (but can also use fields, like the output from ' - '--fout).')) + desc=( + "Name of file containing warp-coefficients/fields. This " + "would typically be the output from the --cout switch of " + "fnirt (but can also use fields, like the output from " + "--fout)." + ), + ) reference = File( exists=True, - argstr='--ref=%s', + argstr="--ref=%s", mandatory=True, - desc=('Name of a file in target space. Note that the ' - 'target space is now different from the target ' - 'space that was used to create the --warp file. It ' - 'would typically be the file that was specified ' - 'with the --in argument when running fnirt.')) + desc=( + "Name of a file in target space. Note that the " + "target space is now different from the target " + "space that was used to create the --warp file. It " + "would typically be the file that was specified " + "with the --in argument when running fnirt." + ), + ) out_format = traits.Enum( - 'spline', - 'field', - argstr='--outformat=%s', - desc=('Specifies the output format. If set to field (default) ' - 'the output will be a (4D) field-file. If set to spline ' - 'the format will be a (4D) file of spline coefficients.')) + "spline", + "field", + argstr="--outformat=%s", + desc=( + "Specifies the output format. If set to field (default) " + "the output will be a (4D) field-file. If set to spline " + "the format will be a (4D) file of spline coefficients." + ), + ) warp_resolution = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='--warpres=%0.4f,%0.4f,%0.4f', - desc=('Specifies the resolution/knot-spacing of the splines pertaining' - ' to the coefficients in the --out file. This parameter is only ' - 'relevant if --outformat is set to spline. It should be noted ' - 'that if the --in file has a higher resolution, the resulting ' - 'coefficients will pertain to the closest (in a least-squares' - ' sense) file in the space of fields with the --warpres' - ' resolution. It should also be noted that the resolution ' - 'will always be an integer multiple of the voxel ' - 'size.')) + argstr="--warpres=%0.4f,%0.4f,%0.4f", + desc=( + "Specifies the resolution/knot-spacing of the splines pertaining" + " to the coefficients in the --out file. This parameter is only " + "relevant if --outformat is set to spline. It should be noted " + "that if the --in file has a higher resolution, the resulting " + "coefficients will pertain to the closest (in a least-squares" + " sense) file in the space of fields with the --warpres" + " resolution. It should also be noted that the resolution " + "will always be an integer multiple of the voxel " + "size." + ), + ) knot_space = traits.Tuple( traits.Int, traits.Int, traits.Int, - argstr='--knotspace=%d,%d,%d', - desc=('Alternative (to --warpres) specification of the resolution of ' - 'the output spline-field.')) + argstr="--knotspace=%d,%d,%d", + desc=( + "Alternative (to --warpres) specification of the resolution of " + "the output spline-field." + ), + ) out_file = File( - argstr='--out=%s', + argstr="--out=%s", position=-1, - name_source=['in_file'], - output_name='out_file', - desc=('Name of output file. The format of the output depends on what ' - 'other parameters are set. The default format is a (4D) ' - 'field-file. If the --outformat is set to spline the format ' - 'will be a (4D) file of spline coefficients.')) + name_source=["in_file"], + output_name="out_file", + desc=( + "Name of output file. The format of the output depends on what " + "other parameters are set. The default format is a (4D) " + "field-file. If the --outformat is set to spline the format " + "will be a (4D) file of spline coefficients." + ), + ) write_jacobian = traits.Bool( False, mandatory=True, usedefault=True, - desc='Switch on --jac flag with automatically generated filename') + desc="Switch on --jac flag with automatically generated filename", + ) out_jacobian = File( - argstr='--jac=%s', - desc=('Specifies that a (3D) file of Jacobian determinants ' - 'corresponding to --in should be produced and written to ' - 'filename.')) + argstr="--jac=%s", + desc=( + "Specifies that a (3D) file of Jacobian determinants " + "corresponding to --in should be produced and written to " + "filename." + ), + ) with_affine = traits.Bool( False, - argstr='--withaff', - desc=('Specifies that the affine transform (i.e. that which was ' - 'specified for the --aff parameter in fnirt) should be ' - 'included as displacements in the --out file. That can be ' - 'useful for interfacing with software that cannot decode ' - 'FSL/fnirt coefficient-files (where the affine transform is ' - 'stored separately from the displacements).')) + argstr="--withaff", + desc=( + "Specifies that the affine transform (i.e. that which was " + "specified for the --aff parameter in fnirt) should be " + "included as displacements in the --out file. That can be " + "useful for interfacing with software that cannot decode " + "FSL/fnirt coefficient-files (where the affine transform is " + "stored separately from the displacements)." + ), + ) class WarpUtilsOutputSpec(TraitedSpec): out_file = File( - desc=('Name of output file, containing the warp as field or ' - 'coefficients.')) + desc=("Name of output file, containing the warp as field or " "coefficients.") + ) out_jacobian = File( - desc=('Name of output file, containing the map of the determinant of ' - 'the Jacobian')) + desc=( + "Name of output file, containing the map of the determinant of " + "the Jacobian" + ) + ) class WarpUtils(FSLCommand): @@ -2130,160 +2226,175 @@ class WarpUtils(FSLCommand): input_spec = WarpUtilsInputSpec output_spec = WarpUtilsOutputSpec - _cmd = 'fnirtfileutils' + _cmd = "fnirtfileutils" def _parse_inputs(self, skip=None): if skip is None: skip = [] - suffix = 'field' - if (isdefined(self.inputs.out_format) - and self.inputs.out_format == 'spline'): - suffix = 'coeffs' + suffix = "field" + if isdefined(self.inputs.out_format) and self.inputs.out_format == "spline": + suffix = "coeffs" - trait_spec = self.inputs.trait('out_file') + trait_spec = self.inputs.trait("out_file") trait_spec.name_template = "%s_" + suffix if self.inputs.write_jacobian: if not isdefined(self.inputs.out_jacobian): - jac_spec = self.inputs.trait('out_jacobian') - jac_spec.name_source = ['in_file'] - jac_spec.name_template = '%s_jac' - jac_spec.output_name = 'out_jacobian' + jac_spec = self.inputs.trait("out_jacobian") + jac_spec.name_source = ["in_file"] + jac_spec.name_template = "%s_jac" + jac_spec.output_name = "out_jacobian" else: - skip += ['out_jacobian'] + skip += ["out_jacobian"] - skip += ['write_jacobian'] + skip += ["write_jacobian"] return super(WarpUtils, self)._parse_inputs(skip=skip) class ConvertWarpInputSpec(FSLCommandInputSpec): reference = File( exists=True, - argstr='--ref=%s', + argstr="--ref=%s", mandatory=True, position=1, - desc='Name of a file in target space of the full transform.') + desc="Name of a file in target space of the full transform.", + ) out_file = File( - argstr='--out=%s', + argstr="--out=%s", position=-1, - name_source=['reference'], - name_template='%s_concatwarp', - output_name='out_file', - desc=('Name of output file, containing warps that are the combination ' - 'of all those given as arguments. The format of this will be a ' - 'field-file (rather than spline coefficients) with any affine ' - 'components included.')) + name_source=["reference"], + name_template="%s_concatwarp", + output_name="out_file", + desc=( + "Name of output file, containing warps that are the combination " + "of all those given as arguments. The format of this will be a " + "field-file (rather than spline coefficients) with any affine " + "components included." + ), + ) premat = File( exists=True, - argstr='--premat=%s', - desc='filename for pre-transform (affine matrix)') + argstr="--premat=%s", + desc="filename for pre-transform (affine matrix)", + ) warp1 = File( exists=True, - argstr='--warp1=%s', - desc='Name of file containing initial ' - 'warp-fields/coefficients (follows premat). This could ' - 'e.g. be a fnirt-transform from a subjects structural ' - 'scan to an average of a group of subjects.') + argstr="--warp1=%s", + desc="Name of file containing initial " + "warp-fields/coefficients (follows premat). This could " + "e.g. be a fnirt-transform from a subjects structural " + "scan to an average of a group of subjects.", + ) midmat = File( exists=True, argstr="--midmat=%s", - desc="Name of file containing mid-warp-affine transform") + desc="Name of file containing mid-warp-affine transform", + ) warp2 = File( exists=True, - argstr='--warp2=%s', - desc='Name of file containing secondary warp-fields/coefficients ' - '(after warp1/midmat but before postmat). This could e.g. be a ' - 'fnirt-transform from the average of a group of subjects to some ' - 'standard space (e.g. MNI152).') + argstr="--warp2=%s", + desc="Name of file containing secondary warp-fields/coefficients " + "(after warp1/midmat but before postmat). This could e.g. be a " + "fnirt-transform from the average of a group of subjects to some " + "standard space (e.g. MNI152).", + ) postmat = File( exists=True, - argstr='--postmat=%s', - desc='Name of file containing an affine transform (applied last). It ' - 'could e.g. be an affine transform that maps the MNI152-space ' - 'into a better approximation to the Talairach-space (if indeed ' - 'there is one).') + argstr="--postmat=%s", + desc="Name of file containing an affine transform (applied last). It " + "could e.g. be an affine transform that maps the MNI152-space " + "into a better approximation to the Talairach-space (if indeed " + "there is one).", + ) shift_in_file = File( exists=True, - argstr='--shiftmap=%s', + argstr="--shiftmap=%s", desc='Name of file containing a "shiftmap", a non-linear transform ' - 'with displacements only in one direction (applied first, before ' - 'premat). This would typically be a fieldmap that has been ' - 'pre-processed using fugue that maps a subjects functional (EPI) ' - 'data onto an undistorted space (i.e. a space that corresponds ' - 'to his/her true anatomy).') + "with displacements only in one direction (applied first, before " + "premat). This would typically be a fieldmap that has been " + "pre-processed using fugue that maps a subjects functional (EPI) " + "data onto an undistorted space (i.e. a space that corresponds " + "to his/her true anatomy).", + ) shift_direction = traits.Enum( - 'y-', - 'y', - 'x', - 'x-', - 'z', - 'z-', + "y-", + "y", + "x", + "x-", + "z", + "z-", argstr="--shiftdir=%s", - requires=['shift_in_file'], - desc='Indicates the direction that the distortions from ' - '--shiftmap goes. It depends on the direction and ' - 'polarity of the phase-encoding in the EPI sequence.') + requires=["shift_in_file"], + desc="Indicates the direction that the distortions from " + "--shiftmap goes. It depends on the direction and " + "polarity of the phase-encoding in the EPI sequence.", + ) cons_jacobian = traits.Bool( False, - argstr='--constrainj', - desc='Constrain the Jacobian of the warpfield to lie within specified ' - 'min/max limits.') + argstr="--constrainj", + desc="Constrain the Jacobian of the warpfield to lie within specified " + "min/max limits.", + ) jacobian_min = traits.Float( - argstr='--jmin=%f', - desc='Minimum acceptable Jacobian value for ' - 'constraint (default 0.01)') + argstr="--jmin=%f", + desc="Minimum acceptable Jacobian value for " "constraint (default 0.01)", + ) jacobian_max = traits.Float( - argstr='--jmax=%f', - desc='Maximum acceptable Jacobian value for ' - 'constraint (default 100.0)') + argstr="--jmax=%f", + desc="Maximum acceptable Jacobian value for " "constraint (default 100.0)", + ) abswarp = traits.Bool( - argstr='--abs', - xor=['relwarp'], - desc='If set it indicates that the warps in --warp1 and --warp2 should' - ' be interpreted as absolute. I.e. the values in --warp1/2 are ' - 'the coordinates in the next space, rather than displacements. ' - 'This flag is ignored if --warp1/2 was created by fnirt, which ' - 'always creates relative displacements.') + argstr="--abs", + xor=["relwarp"], + desc="If set it indicates that the warps in --warp1 and --warp2 should" + " be interpreted as absolute. I.e. the values in --warp1/2 are " + "the coordinates in the next space, rather than displacements. " + "This flag is ignored if --warp1/2 was created by fnirt, which " + "always creates relative displacements.", + ) relwarp = traits.Bool( - argstr='--rel', - xor=['abswarp'], - desc='If set it indicates that the warps in --warp1/2 should be ' - 'interpreted as relative. I.e. the values in --warp1/2 are ' - 'displacements from the coordinates in the next space.') + argstr="--rel", + xor=["abswarp"], + desc="If set it indicates that the warps in --warp1/2 should be " + "interpreted as relative. I.e. the values in --warp1/2 are " + "displacements from the coordinates in the next space.", + ) out_abswarp = traits.Bool( - argstr='--absout', - xor=['out_relwarp'], - desc='If set it indicates that the warps in --out should be absolute, ' - 'i.e. the values in --out are displacements from the coordinates ' - 'in --ref.') + argstr="--absout", + xor=["out_relwarp"], + desc="If set it indicates that the warps in --out should be absolute, " + "i.e. the values in --out are displacements from the coordinates " + "in --ref.", + ) out_relwarp = traits.Bool( - argstr='--relout', - xor=['out_abswarp'], - desc='If set it indicates that the warps in --out should be relative, ' - 'i.e. the values in --out are displacements from the coordinates ' - 'in --ref.') + argstr="--relout", + xor=["out_abswarp"], + desc="If set it indicates that the warps in --out should be relative, " + "i.e. the values in --out are displacements from the coordinates " + "in --ref.", + ) class ConvertWarpOutputSpec(TraitedSpec): out_file = File( exists=True, - desc='Name of output file, containing the warp as field or ' - 'coefficients.') + desc="Name of output file, containing the warp as field or " "coefficients.", + ) class ConvertWarp(FSLCommand): @@ -2309,59 +2420,63 @@ class ConvertWarp(FSLCommand): input_spec = ConvertWarpInputSpec output_spec = ConvertWarpOutputSpec - _cmd = 'convertwarp' + _cmd = "convertwarp" class WarpPointsBaseInputSpec(CommandLineInputSpec): in_coords = File( exists=True, position=-1, - argstr='%s', + argstr="%s", mandatory=True, - desc='filename of file containing coordinates') + desc="filename of file containing coordinates", + ) xfm_file = File( exists=True, - argstr='-xfm %s', - xor=['warp_file'], - desc='filename of affine transform (e.g. source2dest.mat)') + argstr="-xfm %s", + xor=["warp_file"], + desc="filename of affine transform (e.g. source2dest.mat)", + ) warp_file = File( exists=True, - argstr='-warp %s', - xor=['xfm_file'], - desc='filename of warpfield (e.g. ' - 'intermediate2dest_warp.nii.gz)') + argstr="-warp %s", + xor=["xfm_file"], + desc="filename of warpfield (e.g. " "intermediate2dest_warp.nii.gz)", + ) coord_vox = traits.Bool( True, - argstr='-vox', - xor=['coord_mm'], - desc='all coordinates in voxels - default') + argstr="-vox", + xor=["coord_mm"], + desc="all coordinates in voxels - default", + ) coord_mm = traits.Bool( - False, argstr='-mm', xor=['coord_vox'], desc='all coordinates in mm') + False, argstr="-mm", xor=["coord_vox"], desc="all coordinates in mm" + ) out_file = File( - name_source='in_coords', - name_template='%s_warped', - output_name='out_file', - desc='output file name') + name_source="in_coords", + name_template="%s_warped", + output_name="out_file", + desc="output file name", + ) class WarpPointsInputSpec(WarpPointsBaseInputSpec): src_file = File( - exists=True, - argstr='-src %s', - mandatory=True, - desc='filename of source image') + exists=True, argstr="-src %s", mandatory=True, desc="filename of source image" + ) dest_file = File( exists=True, - argstr='-dest %s', + argstr="-dest %s", mandatory=True, - desc='filename of destination image') + desc="filename of destination image", + ) class WarpPointsOutputSpec(TraitedSpec): out_file = File( exists=True, - desc='Name of output file, containing the warp as field or ' - 'coefficients.') + desc="Name of output file, containing the warp as field or " "coefficients.", + ) class WarpPoints(CommandLine): @@ -2390,8 +2505,8 @@ class WarpPoints(CommandLine): input_spec = WarpPointsInputSpec output_spec = WarpPointsOutputSpec - _cmd = 'img2imgcoord' - _terminal_output = 'stream' + _cmd = "img2imgcoord" + _terminal_output = "stream" def __init__(self, command=None, **inputs): self._tmpfile = None @@ -2401,24 +2516,26 @@ def __init__(self, command=None, **inputs): super(WarpPoints, self).__init__(command=command, **inputs) def _format_arg(self, name, trait_spec, value): - if name == 'out_file': - return '' + if name == "out_file": + return "" return super(WarpPoints, self)._format_arg(name, trait_spec, value) def _parse_inputs(self, skip=None): fname, ext = op.splitext(self.inputs.in_coords) - setattr(self, '_in_file', fname) - setattr(self, '_outformat', ext[1:]) - first_args = super(WarpPoints, - self)._parse_inputs(skip=['in_coords', 'out_file']) + setattr(self, "_in_file", fname) + setattr(self, "_outformat", ext[1:]) + first_args = super(WarpPoints, self)._parse_inputs( + skip=["in_coords", "out_file"] + ) - second_args = fname + '.txt' + second_args = fname + ".txt" - if ext in ['.vtk', '.trk']: + if ext in [".vtk", ".trk"]: if self._tmpfile is None: self._tmpfile = tempfile.NamedTemporaryFile( - suffix='.txt', dir=os.getcwd(), delete=False).name + suffix=".txt", dir=os.getcwd(), delete=False + ).name second_args = self._tmpfile return first_args + [second_args] @@ -2428,16 +2545,15 @@ def _vtk_to_coords(self, in_file, out_file=None): from ...interfaces import vtkbase as VTKInfo if VTKInfo.no_tvtk(): - raise ImportError( - 'TVTK is required and tvtk package was not found') + raise ImportError("TVTK is required and tvtk package was not found") - reader = tvtk.PolyDataReader(file_name=in_file + '.vtk') + reader = tvtk.PolyDataReader(file_name=in_file + ".vtk") reader.update() mesh = VTKInfo.vtk_output(reader) points = mesh.points if out_file is None: - out_file, _ = op.splitext(in_file) + '.txt' + out_file, _ = op.splitext(in_file) + ".txt" np.savetxt(out_file, points) return out_file @@ -2447,8 +2563,7 @@ def _coords_to_vtk(self, points, out_file): from ...interfaces import vtkbase as VTKInfo if VTKInfo.no_tvtk(): - raise ImportError( - 'TVTK is required and tvtk package was not found') + raise ImportError("TVTK is required and tvtk package was not found") reader = tvtk.PolyDataReader(file_name=self.inputs.in_file) reader.update() @@ -2462,37 +2577,37 @@ def _coords_to_vtk(self, points, out_file): def _trk_to_coords(self, in_file, out_file=None): from nibabel.trackvis import TrackvisFile + trkfile = TrackvisFile.from_file(in_file) streamlines = trkfile.streamlines if out_file is None: out_file, _ = op.splitext(in_file) - np.savetxt(streamlines, out_file + '.txt') - return out_file + '.txt' + np.savetxt(streamlines, out_file + ".txt") + return out_file + ".txt" def _coords_to_trk(self, points, out_file): - raise NotImplementedError('trk files are not yet supported') + raise NotImplementedError("trk files are not yet supported") def _overload_extension(self, value, name): - if name == 'out_file': - return '%s.%s' % (value, getattr(self, '_outformat')) + if name == "out_file": + return "%s.%s" % (value, getattr(self, "_outformat")) def _run_interface(self, runtime): - fname = getattr(self, '_in_file') - outformat = getattr(self, '_outformat') + fname = getattr(self, "_in_file") + outformat = getattr(self, "_outformat") tmpfile = None - if outformat == 'vtk': + if outformat == "vtk": tmpfile = self._tmpfile self._vtk_to_coords(fname, out_file=tmpfile) - elif outformat == 'trk': + elif outformat == "trk": tmpfile = self._tmpfile self._trk_to_coords(fname, out_file=tmpfile) runtime = super(WarpPoints, self)._run_interface(runtime) - newpoints = np.fromstring( - '\n'.join(runtime.stdout.split('\n')[1:]), sep=' ') + newpoints = np.fromstring("\n".join(runtime.stdout.split("\n")[1:]), sep=" ") if tmpfile is not None: try: @@ -2500,11 +2615,11 @@ def _run_interface(self, runtime): except: pass - out_file = self._filename_from_source('out_file') + out_file = self._filename_from_source("out_file") - if outformat == 'vtk': + if outformat == "vtk": self._coords_to_vtk(newpoints, out_file) - elif outformat == 'trk': + elif outformat == "trk": self._coords_to_trk(newpoints, out_file) else: np.savetxt(out_file, newpoints.reshape(-1, 3)) @@ -2514,20 +2629,21 @@ def _run_interface(self, runtime): class WarpPointsToStdInputSpec(WarpPointsBaseInputSpec): img_file = File( - exists=True, - argstr='-img %s', - mandatory=True, - desc=('filename of input image')) + exists=True, argstr="-img %s", mandatory=True, desc=("filename of input image") + ) std_file = File( exists=True, - argstr='-std %s', + argstr="-std %s", mandatory=True, - desc=('filename of destination image')) + desc=("filename of destination image"), + ) premat_file = File( exists=True, - argstr='-premat %s', - desc=('filename of pre-warp affine transform ' - '(e.g. example_func2highres.mat)')) + argstr="-premat %s", + desc=( + "filename of pre-warp affine transform " "(e.g. example_func2highres.mat)" + ), + ) class WarpPointsToStd(WarpPoints): @@ -2558,45 +2674,51 @@ class WarpPointsToStd(WarpPoints): input_spec = WarpPointsToStdInputSpec output_spec = WarpPointsOutputSpec - _cmd = 'img2stdcoord' - _terminal_output = 'file_split' + _cmd = "img2stdcoord" + _terminal_output = "file_split" class WarpPointsFromStdInputSpec(CommandLineInputSpec): img_file = File( exists=True, - argstr='-img %s', + argstr="-img %s", mandatory=True, - desc='filename of a destination image') + desc="filename of a destination image", + ) std_file = File( exists=True, - argstr='-std %s', + argstr="-std %s", mandatory=True, - desc='filename of the image in standard space') + desc="filename of the image in standard space", + ) in_coords = File( exists=True, position=-2, - argstr='%s', + argstr="%s", mandatory=True, - desc='filename of file containing coordinates') + desc="filename of file containing coordinates", + ) xfm_file = File( exists=True, - argstr='-xfm %s', - xor=['warp_file'], - desc='filename of affine transform (e.g. source2dest.mat)') + argstr="-xfm %s", + xor=["warp_file"], + desc="filename of affine transform (e.g. source2dest.mat)", + ) warp_file = File( exists=True, - argstr='-warp %s', - xor=['xfm_file'], - desc='filename of warpfield (e.g. ' - 'intermediate2dest_warp.nii.gz)') + argstr="-warp %s", + xor=["xfm_file"], + desc="filename of warpfield (e.g. " "intermediate2dest_warp.nii.gz)", + ) coord_vox = traits.Bool( True, - argstr='-vox', - xor=['coord_mm'], - desc='all coordinates in voxels - default') + argstr="-vox", + xor=["coord_mm"], + desc="all coordinates in voxels - default", + ) coord_mm = traits.Bool( - False, argstr='-mm', xor=['coord_vox'], desc='all coordinates in mm') + False, argstr="-mm", xor=["coord_vox"], desc="all coordinates in mm" + ) class WarpPointsFromStd(CommandLine): @@ -2625,63 +2747,69 @@ class WarpPointsFromStd(CommandLine): input_spec = WarpPointsFromStdInputSpec output_spec = WarpPointsOutputSpec - _cmd = 'std2imgcoord' + _cmd = "std2imgcoord" def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath('stdout.nipype') + outputs["out_file"] = op.abspath("stdout.nipype") return outputs class MotionOutliersInputSpec(FSLCommandInputSpec): in_file = File( - exists=True, - mandatory=True, - desc="unfiltered 4D image", - argstr="-i %s") + exists=True, mandatory=True, desc="unfiltered 4D image", argstr="-i %s" + ) out_file = File( argstr="-o %s", - name_source='in_file', - name_template='%s_outliers.txt', + name_source="in_file", + name_template="%s_outliers.txt", keep_extension=True, - desc='output outlier file name', - hash_files=False) - mask = File( - exists=True, argstr="-m %s", desc="mask image for calculating metric") + desc="output outlier file name", + hash_files=False, + ) + mask = File(exists=True, argstr="-m %s", desc="mask image for calculating metric") metric = traits.Enum( - 'refrms', ['refrms', 'dvars', 'refmse', 'fd', 'fdrms'], + "refrms", + ["refrms", "dvars", "refmse", "fd", "fdrms"], argstr="--%s", - desc='metrics: refrms - RMS intensity difference to reference volume ' - 'as metric [default metric], refmse - Mean Square Error version ' - 'of refrms (used in original version of fsl_motion_outliers), ' - 'dvars - DVARS, fd - frame displacement, fdrms - FD with RMS ' - 'matrix calculation') + desc="metrics: refrms - RMS intensity difference to reference volume " + "as metric [default metric], refmse - Mean Square Error version " + "of refrms (used in original version of fsl_motion_outliers), " + "dvars - DVARS, fd - frame displacement, fdrms - FD with RMS " + "matrix calculation", + ) threshold = traits.Float( argstr="--thresh=%g", - desc=("specify absolute threshold value " - "(otherwise use box-plot cutoff = P75 + " - "1.5*IQR)")) + desc=( + "specify absolute threshold value " + "(otherwise use box-plot cutoff = P75 + " + "1.5*IQR)" + ), + ) no_motion_correction = traits.Bool( - argstr="--nomoco", - desc="do not run motion correction (assumed already done)") + argstr="--nomoco", desc="do not run motion correction (assumed already done)" + ) dummy = traits.Int( argstr="--dummy=%d", - desc='number of dummy scans to delete (before running anything and ' - 'creating EVs)') + desc="number of dummy scans to delete (before running anything and " + "creating EVs)", + ) out_metric_values = File( argstr="-s %s", - name_source='in_file', - name_template='%s_metrics.txt', + name_source="in_file", + name_template="%s_metrics.txt", keep_extension=True, - desc='output metric values (DVARS etc.) file name', - hash_files=False) + desc="output metric values (DVARS etc.) file name", + hash_files=False, + ) out_metric_plot = File( argstr="-p %s", - name_source='in_file', - name_template='%s_metrics.png', + name_source="in_file", + name_template="%s_metrics.png", hash_files=False, keep_extension=True, - desc='output metric values plot (DVARS etc.) file name') + desc="output metric values plot (DVARS etc.) file name", + ) class MotionOutliersOutputSpec(TraitedSpec): @@ -2705,4 +2833,4 @@ class MotionOutliers(FSLCommand): input_spec = MotionOutliersInputSpec output_spec = MotionOutliersOutputSpec - _cmd = 'fsl_motion_outliers' + _cmd = "fsl_motion_outliers" diff --git a/nipype/interfaces/image.py b/nipype/interfaces/image.py index d72bb47c42..b3f3f433cd 100644 --- a/nipype/interfaces/image.py +++ b/nipype/interfaces/image.py @@ -3,28 +3,30 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: from ..utils.filemanip import fname_presuffix -from .base import (SimpleInterface, TraitedSpec, BaseInterfaceInputSpec, - traits, File) +from .base import SimpleInterface, TraitedSpec, BaseInterfaceInputSpec, traits, File from .. import LooseVersion class RescaleInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, - desc='Skull-stripped image to rescale') - ref_file = File(exists=True, mandatory=True, - desc='Skull-stripped reference image') - invert = traits.Bool(desc='Invert contrast of rescaled image') - percentile = traits.Range(low=0., high=50., value=0., usedefault=True, - desc='Percentile to use for reference to allow ' - 'for outliers - 1 indicates the 1st and ' - '99th percentiles in the input file will ' - 'be mapped to the 99th and 1st percentiles ' - 'in the reference; 0 indicates minima and ' - 'maxima will be mapped') + in_file = File(exists=True, mandatory=True, desc="Skull-stripped image to rescale") + ref_file = File(exists=True, mandatory=True, desc="Skull-stripped reference image") + invert = traits.Bool(desc="Invert contrast of rescaled image") + percentile = traits.Range( + low=0.0, + high=50.0, + value=0.0, + usedefault=True, + desc="Percentile to use for reference to allow " + "for outliers - 1 indicates the 1st and " + "99th percentiles in the input file will " + "be mapped to the 99th and 1st percentiles " + "in the reference; 0 indicates minima and " + "maxima will be mapped", + ) class RescaleOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='Rescaled image') + out_file = File(exists=True, desc="Rescaled image") class Rescale(SimpleInterface): @@ -57,6 +59,7 @@ class Rescale(SimpleInterface): >>> res = invert_t1w.run() # doctest: +SKIP """ + input_spec = RescaleInputSpec output_spec = RescaleOutputSpec @@ -71,7 +74,7 @@ def _run_interface(self, runtime): in_mask = data > 0 ref_mask = ref_data > 0 - q = [self.inputs.percentile, 100. - self.inputs.percentile] + q = [self.inputs.percentile, 100.0 - self.inputs.percentile] in_low, in_high = np.percentile(data[in_mask], q) ref_low, ref_high = np.percentile(ref_data[ref_mask], q) scale_factor = (ref_high - ref_low) / (in_high - in_low) @@ -79,33 +82,41 @@ def _run_interface(self, runtime): signal = in_high - data if self.inputs.invert else data - in_low out_data = in_mask * (signal * scale_factor + ref_low) - suffix = '_inv' if self.inputs.invert else '_rescaled' - out_file = fname_presuffix(self.inputs.in_file, suffix=suffix, - newpath=runtime.cwd) + suffix = "_inv" if self.inputs.invert else "_rescaled" + out_file = fname_presuffix( + self.inputs.in_file, suffix=suffix, newpath=runtime.cwd + ) img.__class__(out_data, img.affine, img.header).to_filename(out_file) - self._results['out_file'] = out_file + self._results["out_file"] = out_file return runtime -_axes = ('RL', 'AP', 'SI') +_axes = ("RL", "AP", "SI") _orientations = tuple( - ''.join((x[i], y[j], z[k])) - for x in _axes for y in _axes for z in _axes + "".join((x[i], y[j], z[k])) + for x in _axes + for y in _axes + for z in _axes if x != y != z != x - for i in (0, 1) for j in (0, 1) for k in (0, 1)) + for i in (0, 1) + for j in (0, 1) + for k in (0, 1) +) class ReorientInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc='Input image') - orientation = traits.Enum(_orientations, usedefault=True, - desc='Target axis orientation') + in_file = File(exists=True, mandatory=True, desc="Input image") + orientation = traits.Enum( + _orientations, usedefault=True, desc="Target axis orientation" + ) class ReorientOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='Reoriented image') - transform = File(exists=True, - desc='Affine transform from input orientation to output') + out_file = File(exists=True, desc="Reoriented image") + transform = File( + exists=True, desc="Affine transform from input orientation to output" + ) class Reorient(SimpleInterface): @@ -168,14 +179,14 @@ class Reorient(SimpleInterface): >>> os.unlink(res.outputs.transform) """ + input_spec = ReorientInputSpec output_spec = ReorientOutputSpec def _run_interface(self, runtime): import numpy as np import nibabel as nb - from nibabel.orientations import ( - axcodes2ornt, ornt_transform, inv_ornt_aff) + from nibabel.orientations import axcodes2ornt, ornt_transform, inv_ornt_aff fname = self.inputs.in_file orig_img = nb.load(fname) @@ -188,26 +199,26 @@ def _run_interface(self, runtime): affine_xfm = inv_ornt_aff(transform, orig_img.shape) # Check can be eliminated when minimum nibabel version >= 2.4 - if LooseVersion(nb.__version__) >= LooseVersion('2.4.0'): + if LooseVersion(nb.__version__) >= LooseVersion("2.4.0"): reoriented = orig_img.as_reoriented(transform) else: reoriented = _as_reoriented_backport(orig_img, transform) # Image may be reoriented if reoriented is not orig_img: - suffix = '_' + self.inputs.orientation.lower() - out_name = fname_presuffix(fname, suffix=suffix, - newpath=runtime.cwd) + suffix = "_" + self.inputs.orientation.lower() + out_name = fname_presuffix(fname, suffix=suffix, newpath=runtime.cwd) reoriented.to_filename(out_name) else: out_name = fname - mat_name = fname_presuffix(fname, suffix='.mat', - newpath=runtime.cwd, use_ext=False) - np.savetxt(mat_name, affine_xfm, fmt='%.08f') + mat_name = fname_presuffix( + fname, suffix=".mat", newpath=runtime.cwd, use_ext=False + ) + np.savetxt(mat_name, affine_xfm, fmt="%.08f") - self._results['out_file'] = out_name - self._results['transform'] = mat_name + self._results["out_file"] = out_name + self._results["transform"] = mat_name return runtime @@ -217,6 +228,7 @@ def _as_reoriented_backport(img, ornt): import numpy as np import nibabel as nb from nibabel.orientations import inv_ornt_aff + if np.array_equal(ornt, [[0, 1], [1, 1], [2, 1]]): return img @@ -226,8 +238,10 @@ def _as_reoriented_backport(img, ornt): if isinstance(reoriented, nb.Nifti1Pair): # Also apply the transform to the dim_info fields - new_dim = [None if orig_dim is None else int(ornt[orig_dim, 0]) - for orig_dim in img.header.get_dim_info()] + new_dim = [ + None if orig_dim is None else int(ornt[orig_dim, 0]) + for orig_dim in img.header.get_dim_info() + ] reoriented.header.set_dim_info(*new_dim) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index a5498f5c9f..071c834e14 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -27,15 +27,31 @@ from .. import config, logging from ..utils.filemanip import ( - copyfile, simplify_list, ensure_list, - get_related_files, split_filename) + copyfile, + simplify_list, + ensure_list, + get_related_files, + split_filename, +) from ..utils.misc import human_order_sorted, str2bool from .base import ( - TraitedSpec, traits, Str, File, Directory, BaseInterface, InputMultiPath, - isdefined, OutputMultiPath, DynamicTraitedSpec, Undefined, BaseInterfaceInputSpec, - LibraryBaseInterface, SimpleInterface) - -iflogger = logging.getLogger('nipype.interface') + TraitedSpec, + traits, + Str, + File, + Directory, + BaseInterface, + InputMultiPath, + isdefined, + OutputMultiPath, + DynamicTraitedSpec, + Undefined, + BaseInterfaceInputSpec, + LibraryBaseInterface, + SimpleInterface, +) + +iflogger = logging.getLogger("nipype.interface") def copytree(src, dst, use_hardlink=False): @@ -50,7 +66,7 @@ def copytree(src, dst, use_hardlink=False): try: os.makedirs(dst) except OSError as why: - if 'File exists' in why.strerror: + if "File exists" in why.strerror: pass else: raise why @@ -66,8 +82,9 @@ def copytree(src, dst, use_hardlink=False): srcname, dstname, True, - hashmethod='content', - use_hardlink=use_hardlink) + hashmethod="content", + use_hardlink=use_hardlink, + ) except (IOError, os.error) as why: errors.append((srcname, dstname, str(why))) # catch the Error from the recursive copytree so that we can @@ -107,21 +124,26 @@ def _get_head_bucket(s3_resource, bucket_name): try: s3_resource.meta.client.head_bucket(Bucket=bucket_name) except botocore.exceptions.ClientError as exc: - error_code = int(exc.response['Error']['Code']) + error_code = int(exc.response["Error"]["Code"]) if error_code == 403: - err_msg = 'Access to bucket: %s is denied; check credentials'\ - % bucket_name + err_msg = "Access to bucket: %s is denied; check credentials" % bucket_name raise Exception(err_msg) elif error_code == 404: - err_msg = 'Bucket: %s does not exist; check spelling and try '\ - 'again' % bucket_name + err_msg = ( + "Bucket: %s does not exist; check spelling and try " + "again" % bucket_name + ) raise Exception(err_msg) else: - err_msg = 'Unable to connect to bucket: %s. Error message:\n%s'\ - % (bucket_name, exc) + err_msg = "Unable to connect to bucket: %s. Error message:\n%s" % ( + bucket_name, + exc, + ) except Exception as exc: - err_msg = 'Unable to connect to bucket: %s. Error message:\n%s'\ - % (bucket_name, exc) + err_msg = "Unable to connect to bucket: %s. Error message:\n%s" % ( + bucket_name, + exc, + ) raise Exception(err_msg) @@ -141,14 +163,14 @@ def _add_output_traits(self, base): # Class to track percentage of S3 file upload class ProgressPercentage(object): - ''' + """ Callable class instsance (via __call__ method) that displays upload percentage of a file to S3 - ''' + """ def __init__(self, filename): - ''' - ''' + """ + """ # Import packages import threading @@ -160,8 +182,8 @@ def __init__(self, filename): self._lock = threading.Lock() def __call__(self, bytes_amount): - ''' - ''' + """ + """ # Import packages import sys @@ -173,8 +195,11 @@ def __call__(self, bytes_amount): percentage = (self._seen_so_far // self._size) * 100 else: percentage = 0 - progress_str = '%d / %d (%.2f%%)\r'\ - % (self._seen_so_far, self._size, percentage) + progress_str = "%d / %d (%.2f%%)\r" % ( + self._seen_so_far, + self._size, + percentage, + ) # Write to stdout sys.stdout.write(progress_str) @@ -183,43 +208,52 @@ def __call__(self, bytes_amount): # DataSink inputs class DataSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - ''' - ''' + """ + """ # Init inputspec data attributes - base_directory = Directory( - desc='Path to the base directory for storing data.') - container = Str( - desc='Folder within base directory in which to store output') + base_directory = Directory(desc="Path to the base directory for storing data.") + container = Str(desc="Folder within base directory in which to store output") parameterization = traits.Bool( - True, usedefault=True, desc='store output in parametrized structure') - strip_dir = Directory(desc='path to strip out of filename') + True, usedefault=True, desc="store output in parametrized structure" + ) + strip_dir = Directory(desc="path to strip out of filename") substitutions = InputMultiPath( traits.Tuple(Str, Str), - desc=('List of 2-tuples reflecting string ' - 'to substitute and string to replace ' - 'it with')) - regexp_substitutions = \ - InputMultiPath(traits.Tuple(Str, Str), - desc=('List of 2-tuples reflecting a pair of a ' - 'Python regexp pattern and a replacement ' - 'string. Invoked after string `substitutions`')) + desc=( + "List of 2-tuples reflecting string " + "to substitute and string to replace " + "it with" + ), + ) + regexp_substitutions = InputMultiPath( + traits.Tuple(Str, Str), + desc=( + "List of 2-tuples reflecting a pair of a " + "Python regexp pattern and a replacement " + "string. Invoked after string `substitutions`" + ), + ) _outputs = traits.Dict(Str, value={}, usedefault=True) remove_dest_dir = traits.Bool( - False, usedefault=True, desc='remove dest directory when copying dirs') + False, usedefault=True, desc="remove dest directory when copying dirs" + ) # AWS S3 data attributes - creds_path = Str(desc='Filepath to AWS credentials file for S3 bucket ' - 'access; if not specified, the credentials will ' - 'be taken from the AWS_ACCESS_KEY_ID and ' - 'AWS_SECRET_ACCESS_KEY environment variables') - encrypt_bucket_keys = traits.Bool(desc='Flag indicating whether to use S3 ' - 'server-side AES-256 encryption') + creds_path = Str( + desc="Filepath to AWS credentials file for S3 bucket " + "access; if not specified, the credentials will " + "be taken from the AWS_ACCESS_KEY_ID and " + "AWS_SECRET_ACCESS_KEY environment variables" + ) + encrypt_bucket_keys = traits.Bool( + desc="Flag indicating whether to use S3 " "server-side AES-256 encryption" + ) # Set this if user wishes to override the bucket with their own - bucket = traits.Any(desc='Boto3 S3 bucket for manual override of bucket') + bucket = traits.Any(desc="Boto3 S3 bucket for manual override of bucket") # Set this if user wishes to have local copy of files as well - local_copy = Str(desc='Copy files locally as well as to S3 bucket') + local_copy = Str(desc="Copy files locally as well as to S3 bucket") # Set call-able inputs attributes def __setattr__(self, key, value): @@ -238,7 +272,7 @@ def __setattr__(self, key, value): class DataSinkOutputSpec(TraitedSpec): # Init out file - out_file = traits.Any(desc='datasink output') + out_file = traits.Any(desc="datasink output") # Custom DataSink class @@ -339,10 +373,9 @@ def _get_dst(self, src): if self.inputs.parameterization: dst = path if isdefined(self.inputs.strip_dir): - dst = dst.replace(self.inputs.strip_dir, '') + dst = dst.replace(self.inputs.strip_dir, "") folders = [ - folder for folder in dst.split(os.path.sep) - if folder.startswith('_') + folder for folder in dst.split(os.path.sep) if folder.startswith("_") ] dst = os.path.sep.join(folders) if fname: @@ -364,22 +397,32 @@ def _substitute(self, pathstr): oldpathstr = pathstr pathstr = pathstr.replace(key, val) if pathstr != oldpathstr: - iflogger.debug('sub.str: %s -> %s using %r -> %r', - oldpathstr, pathstr, key, val) + iflogger.debug( + "sub.str: %s -> %s using %r -> %r", + oldpathstr, + pathstr, + key, + val, + ) if isdefined(self.inputs.regexp_substitutions): for key, val in self.inputs.regexp_substitutions: oldpathstr = pathstr pathstr, _ = re.subn(key, val, pathstr) if pathstr != oldpathstr: - iflogger.debug('sub.regexp: %s -> %s using %r -> %r', - oldpathstr, pathstr, key, val) + iflogger.debug( + "sub.regexp: %s -> %s using %r -> %r", + oldpathstr, + pathstr, + key, + val, + ) if pathstr_ != pathstr: - iflogger.info('sub: %s -> %s', pathstr_, pathstr) + iflogger.info("sub: %s -> %s", pathstr_, pathstr) return pathstr # Check for s3 in base directory def _check_s3_base_dir(self): - ''' + """ Method to see if the datasink's base directory specifies an S3 bucket path; if it does, it parses the path for the bucket name in the form 's3://bucket_name/...' and returns it @@ -395,11 +438,11 @@ def _check_s3_base_dir(self): bucket_name : string name of the S3 bucket to connect to; if the base directory is not a valid S3 path, defaults to '' - ''' + """ # Init variables - s3_str = 's3://' - bucket_name = '' + s3_str = "s3://" + bucket_name = "" base_directory = self.inputs.base_directory if not isdefined(base_directory): @@ -408,14 +451,14 @@ def _check_s3_base_dir(self): # Explicitly lower-case the "s3" if base_directory.lower().startswith(s3_str): - base_dir_sp = base_directory.split('/') + base_dir_sp = base_directory.split("/") base_dir_sp[0] = base_dir_sp[0].lower() - base_directory = '/'.join(base_dir_sp) + base_directory = "/".join(base_dir_sp) # Check if 's3://' in base dir if base_directory.startswith(s3_str): # Expects bucket name to be 's3://bucket_name/base_dir/..' - bucket_name = base_directory.split(s3_str)[1].split('/')[0] + bucket_name = base_directory.split(s3_str)[1].split("/")[0] s3_flag = True # Otherwise it's just a normal datasink else: @@ -426,7 +469,7 @@ def _check_s3_base_dir(self): # Function to return AWS secure environment variables def _return_aws_keys(self): - ''' + """ Method to return AWS access key id and secret access key using credentials found in a local file. @@ -441,7 +484,7 @@ def _return_aws_keys(self): string of the AWS access key ID aws_secret_access_key : string string of the AWS secret access key - ''' + """ # Import packages import os @@ -451,40 +494,39 @@ def _return_aws_keys(self): # Check if creds exist if creds_path and os.path.exists(creds_path): - with open(creds_path, 'r') as creds_in: + with open(creds_path, "r") as creds_in: # Grab csv rows row1 = creds_in.readline() row2 = creds_in.readline() # Are they root or user keys - if 'User Name' in row1: + if "User Name" in row1: # And split out for keys - aws_access_key_id = row2.split(',')[1] - aws_secret_access_key = row2.split(',')[2] - elif 'AWSAccessKeyId' in row1: + aws_access_key_id = row2.split(",")[1] + aws_secret_access_key = row2.split(",")[2] + elif "AWSAccessKeyId" in row1: # And split out for keys - aws_access_key_id = row1.split('=')[1] - aws_secret_access_key = row2.split('=')[1] + aws_access_key_id = row1.split("=")[1] + aws_secret_access_key = row2.split("=")[1] else: - err_msg = 'Credentials file not recognized, check file is correct' + err_msg = "Credentials file not recognized, check file is correct" raise Exception(err_msg) # Strip any carriage return/line feeds - aws_access_key_id = aws_access_key_id.replace('\r', '').replace( - '\n', '') - aws_secret_access_key = aws_secret_access_key.replace('\r', - '').replace( - '\n', '') + aws_access_key_id = aws_access_key_id.replace("\r", "").replace("\n", "") + aws_secret_access_key = aws_secret_access_key.replace("\r", "").replace( + "\n", "" + ) else: - aws_access_key_id = os.getenv('AWS_ACCESS_KEY_ID') - aws_secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY') + aws_access_key_id = os.getenv("AWS_ACCESS_KEY_ID") + aws_secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY") # Return keys return aws_access_key_id, aws_secret_access_key # Fetch bucket object def _fetch_bucket(self, bucket_name): - ''' + """ Method to return a bucket object which can be used to interact with an AWS S3 bucket using credentials found in a local file. @@ -500,15 +542,14 @@ def _fetch_bucket(self, bucket_name): bucket : boto3.resources.factory.s3.Bucket boto3 s3 Bucket object which is used to interact with files in an S3 bucket on AWS - ''' + """ # Import packages try: import boto3 import botocore except ImportError as exc: - err_msg = 'Boto3 package is not installed - install boto3 and '\ - 'try again.' + err_msg = "Boto3 package is not installed - install boto3 and " "try again." raise Exception(err_msg) # Init variables @@ -516,34 +557,35 @@ def _fetch_bucket(self, bucket_name): # Get AWS credentials try: - aws_access_key_id, aws_secret_access_key = \ - self._return_aws_keys() + aws_access_key_id, aws_secret_access_key = self._return_aws_keys() except Exception as exc: - err_msg = 'There was a problem extracting the AWS credentials '\ - 'from the credentials file provided: %s. Error:\n%s'\ - % (creds_path, exc) + err_msg = ( + "There was a problem extracting the AWS credentials " + "from the credentials file provided: %s. Error:\n%s" % (creds_path, exc) + ) raise Exception(err_msg) # Try and get AWS credentials if a creds_path is specified if aws_access_key_id and aws_secret_access_key: # Init connection - iflogger.info('Connecting to S3 bucket: %s with credentials...', - bucket_name) + iflogger.info( + "Connecting to S3 bucket: %s with credentials...", bucket_name + ) # Use individual session for each instance of DataSink # Better when datasinks are being used in multi-threading, see: # http://boto3.readthedocs.org/en/latest/guide/resources.html#multithreading session = boto3.session.Session( aws_access_key_id=aws_access_key_id, - aws_secret_access_key=aws_secret_access_key) + aws_secret_access_key=aws_secret_access_key, + ) else: - iflogger.info('Connecting to S3 bucket: %s with IAM role...', - bucket_name) + iflogger.info("Connecting to S3 bucket: %s with IAM role...", bucket_name) # Lean on AWS environment / IAM role authentication and authorization session = boto3.session.Session() - s3_resource = session.resource('s3', use_ssl=True) + s3_resource = session.resource("s3", use_ssl=True) # And try fetch the bucket with the name argument try: @@ -552,9 +594,10 @@ def _fetch_bucket(self, bucket_name): # Try to connect anonymously s3_resource.meta.client.meta.events.register( - 'choose-signer.s3.*', botocore.handlers.disable_signing) + "choose-signer.s3.*", botocore.handlers.disable_signing + ) - iflogger.info('Connecting to AWS: %s anonymously...', bucket_name) + iflogger.info("Connecting to AWS: %s anonymously...", bucket_name) _get_head_bucket(s3_resource, bucket_name) # Explicitly declare a secure SSL connection for bucket object @@ -563,12 +606,11 @@ def _fetch_bucket(self, bucket_name): # Return the bucket return bucket - # Send up to S3 method def _upload_to_s3(self, bucket, src, dst): - ''' + """ Method to upload outputs to S3 bucket instead of on local disk - ''' + """ # Import packages import hashlib @@ -577,12 +619,12 @@ def _upload_to_s3(self, bucket, src, dst): from botocore.exceptions import ClientError # Init variables - s3_str = 's3://' + s3_str = "s3://" s3_prefix = s3_str + bucket.name # Explicitly lower-case the "s3" - if dst[:len(s3_str)].lower() == s3_str: - dst = s3_str + dst[len(s3_str):] + if dst[: len(s3_str)].lower() == s3_str: + dst = s3_str + dst[len(s3_str) :] # If src is a directory, collect files (this assumes dst is a dir too) if os.path.isdir(src): @@ -590,10 +632,7 @@ def _upload_to_s3(self, bucket, src, dst): for root, dirs, files in os.walk(src): src_files.extend([os.path.join(root, fil) for fil in files]) # Make the dst files have the dst folder as base dir - dst_files = [ - os.path.join(dst, - src_f.split(src)[1]) for src_f in src_files - ] + dst_files = [os.path.join(dst, src_f.split(src)[1]) for src_f in src_files] else: src_files = [src] dst_files = [dst] @@ -602,7 +641,7 @@ def _upload_to_s3(self, bucket, src, dst): for src_idx, src_f in enumerate(src_files): # Get destination filename/keyname dst_f = dst_files[src_idx] - dst_k = dst_f.replace(s3_prefix, '').lstrip('/') + dst_k = dst_f.replace(s3_prefix, "").lstrip("/") # See if same file is already up there try: @@ -610,31 +649,29 @@ def _upload_to_s3(self, bucket, src, dst): dst_md5 = dst_obj.e_tag.strip('"') # See if same file is already there - src_read = open(src_f, 'rb').read() + src_read = open(src_f, "rb").read() src_md5 = hashlib.md5(src_read).hexdigest() # Move to next loop iteration if dst_md5 == src_md5: - iflogger.info('File %s already exists on S3, skipping...', - dst_f) + iflogger.info("File %s already exists on S3, skipping...", dst_f) continue else: - iflogger.info('Overwriting previous S3 file...') + iflogger.info("Overwriting previous S3 file...") except ClientError: - iflogger.info('New file to S3') + iflogger.info("New file to S3") # Copy file up to S3 (either encrypted or not) - iflogger.info('Uploading %s to S3 bucket, %s, as %s...', src_f, - bucket.name, dst_f) + iflogger.info( + "Uploading %s to S3 bucket, %s, as %s...", src_f, bucket.name, dst_f + ) if self.inputs.encrypt_bucket_keys: - extra_args = {'ServerSideEncryption': 'AES256'} + extra_args = {"ServerSideEncryption": "AES256"} else: extra_args = {} bucket.upload_file( - src_f, - dst_k, - ExtraArgs=extra_args, - Callback=ProgressPercentage(src_f)) + src_f, dst_k, ExtraArgs=extra_args, Callback=ProgressPercentage(src_f) + ) # List outputs, main run routine def _list_outputs(self): @@ -645,8 +682,7 @@ def _list_outputs(self): outputs = self.output_spec().get() out_files = [] # Use hardlink - use_hardlink = str2bool( - config.get('execution', 'try_hard_link_datasink')) + use_hardlink = str2bool(config.get("execution", "try_hard_link_datasink")) # Set local output directory if specified if isdefined(self.inputs.local_copy): @@ -655,7 +691,7 @@ def _list_outputs(self): outdir = self.inputs.base_directory # If base directory isn't given, assume current directory if not isdefined(outdir): - outdir = '.' + outdir = "." # Check if base directory reflects S3 bucket upload s3_flag, bucket_name = self._check_s3_base_dir() @@ -671,18 +707,21 @@ def _list_outputs(self): # If encountering an exception during bucket access, set output # base directory to a local folder except Exception as exc: - s3dir = '' + s3dir = "" if not isdefined(self.inputs.local_copy): local_out_exception = os.path.join( - os.path.expanduser('~'), - 's3_datasink_' + bucket_name) + os.path.expanduser("~"), "s3_datasink_" + bucket_name + ) outdir = local_out_exception # Log local copying directory iflogger.info( - 'Access to S3 failed! Storing outputs locally at: ' - '%s\nError: %s', outdir, exc) + "Access to S3 failed! Storing outputs locally at: " + "%s\nError: %s", + outdir, + exc, + ) else: - s3dir = '' + s3dir = "" # If container input is given, append that to outdir if isdefined(self.inputs.container): @@ -697,7 +736,7 @@ def _list_outputs(self): try: os.makedirs(outdir) except OSError as inst: - if 'File exists' in inst.strerror: + if "File exists" in inst.strerror: pass else: raise (inst) @@ -711,8 +750,8 @@ def _list_outputs(self): tempoutdir = outdir if s3_flag: s3tempoutdir = s3dir - for d in key.split('.'): - if d[0] == '@': + for d in key.split("."): + if d[0] == "@": continue tempoutdir = os.path.join(tempoutdir, d) if s3_flag: @@ -728,7 +767,7 @@ def _list_outputs(self): # Format src and dst files src = os.path.abspath(src) if not os.path.isfile(src): - src = os.path.join(src, '') + src = os.path.join(src, "") dst = self._get_dst(src) if s3_flag: s3dst = os.path.join(s3tempoutdir, dst) @@ -748,31 +787,32 @@ def _list_outputs(self): try: os.makedirs(path) except OSError as inst: - if 'File exists' in inst.strerror: + if "File exists" in inst.strerror: pass else: raise (inst) # If src is a file, copy it to dst if os.path.isfile(src): - iflogger.debug('copyfile: %s %s', src, dst) + iflogger.debug("copyfile: %s %s", src, dst) copyfile( src, dst, copy=True, - hashmethod='content', - use_hardlink=use_hardlink) + hashmethod="content", + use_hardlink=use_hardlink, + ) out_files.append(dst) # If src is a directory, copy entire contents to dst dir elif os.path.isdir(src): if os.path.exists(dst) and self.inputs.remove_dest_dir: - iflogger.debug('removing: %s', dst) + iflogger.debug("removing: %s", dst) shutil.rmtree(dst) - iflogger.debug('copydir: %s %s', src, dst) + iflogger.debug("copydir: %s %s", src, dst) copytree(src, dst) out_files.append(dst) # Return outputs dictionary - outputs['out_file'] = out_files + outputs["out_file"] = out_files return outputs @@ -781,36 +821,37 @@ class S3DataGrabberInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): anon = traits.Bool( False, usedefault=True, - desc= - 'Use anonymous connection to s3. If this is set to True, boto may print' - + - ' a urlopen error, but this does not prevent data from being downloaded.' - ) - region = Str('us-east-1', usedefault=True, desc='Region of s3 bucket') - bucket = Str( - mandatory=True, desc='Amazon S3 bucket where your data is stored') + desc="Use anonymous connection to s3. If this is set to True, boto may print" + + " a urlopen error, but this does not prevent data from being downloaded.", + ) + region = Str("us-east-1", usedefault=True, desc="Region of s3 bucket") + bucket = Str(mandatory=True, desc="Amazon S3 bucket where your data is stored") bucket_path = Str( - '', - usedefault=True, - desc='Location within your bucket for subject data.') + "", usedefault=True, desc="Location within your bucket for subject data." + ) local_directory = Directory( exists=True, - desc='Path to the local directory for subject data to be downloaded ' - 'and accessed. Should be on HDFS for Spark jobs.') + desc="Path to the local directory for subject data to be downloaded " + "and accessed. Should be on HDFS for Spark jobs.", + ) raise_on_empty = traits.Bool( True, usedefault=True, - desc='Generate exception if list is empty for a given field') + desc="Generate exception if list is empty for a given field", + ) sort_filelist = traits.Bool( - mandatory=True, desc='Sort the filelist that matches the template') + mandatory=True, desc="Sort the filelist that matches the template" + ) template = Str( mandatory=True, - desc='Layout used to get files. Relative to bucket_path if defined.' - 'Uses regex rather than glob style formatting.') + desc="Layout used to get files. Relative to bucket_path if defined." + "Uses regex rather than glob style formatting.", + ) template_args = traits.Dict( key_trait=Str, value_trait=traits.List(traits.List), - desc='Information to plug into template') + desc="Information to plug into template", + ) class S3DataGrabber(LibraryBaseInterface, IOBase): @@ -846,7 +887,7 @@ class S3DataGrabber(LibraryBaseInterface, IOBase): input_spec = S3DataGrabberInputSpec output_spec = DynamicTraitedSpec _always_run = True - _pkg = 'boto' + _pkg = "boto" def __init__(self, infields=None, outfields=None, **kwargs): """ @@ -862,7 +903,7 @@ def __init__(self, infields=None, outfields=None, **kwargs): """ if not outfields: - outfields = ['outfiles'] + outfields = ["outfiles"] super(S3DataGrabber, self).__init__(**kwargs) undefined_traits = {} # used for mandatory inputs check @@ -873,11 +914,13 @@ def __init__(self, infields=None, outfields=None, **kwargs): self.inputs.add_trait(key, traits.Any) undefined_traits[key] = Undefined # add ability to insert field specific templates - self.inputs.add_trait('field_template', - traits.Dict( - traits.Enum(outfields), - desc="arguments that fit into template")) - undefined_traits['field_template'] = Undefined + self.inputs.add_trait( + "field_template", + traits.Dict( + traits.Enum(outfields), desc="arguments that fit into template" + ), + ) + undefined_traits["field_template"] = Undefined if not isdefined(self.inputs.template_args): self.inputs.template_args = {} for key in outfields: @@ -902,30 +945,35 @@ def _list_outputs(self): # infields are mandatory, however I could not figure out how to set 'mandatory' flag dynamically # hence manual check import boto + if self._infields: for key in self._infields: value = getattr(self.inputs, key) if not isdefined(value): - msg = "%s requires a value for input '%s' because it was listed in 'infields'" % \ - (self.__class__.__name__, key) + msg = ( + "%s requires a value for input '%s' because it was listed in 'infields'" + % (self.__class__.__name__, key) + ) raise ValueError(msg) outputs = {} # get list of all files in s3 bucket conn = boto.connect_s3(anon=self.inputs.anon) bkt = conn.get_bucket(self.inputs.bucket) - bkt_files = list( - k.key for k in bkt.list(prefix=self.inputs.bucket_path)) + bkt_files = list(k.key for k in bkt.list(prefix=self.inputs.bucket_path)) # keys are outfields, args are template args for the outfield for key, args in list(self.inputs.template_args.items()): outputs[key] = [] template = self.inputs.template - if hasattr(self.inputs, 'field_template') and \ - isdefined(self.inputs.field_template) and \ - key in self.inputs.field_template: + if ( + hasattr(self.inputs, "field_template") + and isdefined(self.inputs.field_template) + and key in self.inputs.field_template + ): template = self.inputs.field_template[ - key] # template override for multiple outfields + key + ] # template override for multiple outfields if isdefined(self.inputs.bucket_path): template = os.path.join(self.inputs.bucket_path, template) if not args: @@ -934,8 +982,10 @@ def _list_outputs(self): if re.match(template, fname): filelist.append(fname) if len(filelist) == 0: - msg = 'Output key: %s Template: %s returned no files' % ( - key, template) + msg = "Output key: %s Template: %s returned no files" % ( + key, + template, + ) if self.inputs.raise_on_empty: raise IOError(msg) else: @@ -947,22 +997,20 @@ def _list_outputs(self): for argnum, arglist in enumerate(args): maxlen = 1 for arg in arglist: - if isinstance(arg, - (str, bytes)) and hasattr(self.inputs, arg): + if isinstance(arg, (str, bytes)) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): if (maxlen > 1) and (len(arg) != maxlen): raise ValueError( - 'incompatible number of arguments for %s' % - key) + "incompatible number of arguments for %s" % key + ) if len(arg) > maxlen: maxlen = len(arg) outfiles = [] for i in range(maxlen): argtuple = [] for arg in arglist: - if isinstance(arg, (str, bytes)) and hasattr( - self.inputs, arg): + if isinstance(arg, (str, bytes)) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): argtuple.append(arg[i]) @@ -974,16 +1022,19 @@ def _list_outputs(self): filledtemplate = template % tuple(argtuple) except TypeError as e: raise TypeError( - e.message + - ": Template %s failed to convert with args %s" - % (template, str(tuple(argtuple)))) + e.message + + ": Template %s failed to convert with args %s" + % (template, str(tuple(argtuple))) + ) outfiles = [] for fname in bkt_files: if re.match(filledtemplate, fname): outfiles.append(fname) if len(outfiles) == 0: - msg = 'Output key: %s Template: %s returned no files' % ( - key, filledtemplate) + msg = "Output key: %s Template: %s returned no files" % ( + key, + filledtemplate, + ) if self.inputs.raise_on_empty: raise IOError(msg) else: @@ -1019,19 +1070,19 @@ def _list_outputs(self): # directory, returning the local path. def s3tolocal(self, s3path, bkt): import boto + # path formatting local_directory = str(self.inputs.local_directory) bucket_path = str(self.inputs.bucket_path) template = str(self.inputs.template) - if not os.path.basename(local_directory) == '': - local_directory += '/' - if not os.path.basename(bucket_path) == '': - bucket_path += '/' - if template[0] == '/': + if not os.path.basename(local_directory) == "": + local_directory += "/" + if not os.path.basename(bucket_path) == "": + bucket_path += "/" + if template[0] == "/": template = template[1:] - localpath = s3path.replace(bucket_path, - local_directory) + localpath = s3path.replace(bucket_path, local_directory) localdir = os.path.split(localpath)[0] if not os.path.exists(localdir): os.makedirs(localdir) @@ -1043,25 +1094,28 @@ def s3tolocal(self, s3path, bkt): class DataGrabberInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): base_directory = Directory( - exists=True, - desc='Path to the base directory consisting of subject data.') + exists=True, desc="Path to the base directory consisting of subject data." + ) raise_on_empty = traits.Bool( True, usedefault=True, - desc='Generate exception if list is empty for a given field') + desc="Generate exception if list is empty for a given field", + ) drop_blank_outputs = traits.Bool( - False, usedefault=True, - desc="Remove ``None`` entries from output lists" - ) + False, usedefault=True, desc="Remove ``None`` entries from output lists" + ) sort_filelist = traits.Bool( - mandatory=True, desc='Sort the filelist that matches the template') + mandatory=True, desc="Sort the filelist that matches the template" + ) template = Str( mandatory=True, - desc='Layout used to get files. relative to base directory if defined') + desc="Layout used to get files. relative to base directory if defined", + ) template_args = traits.Dict( key_trait=Str, value_trait=traits.List(traits.List), - desc='Information to plug into template') + desc="Information to plug into template", + ) class DataGrabber(IOBase): @@ -1115,6 +1169,7 @@ class DataGrabber(IOBase): >>> dg.inputs.template_args['struct'] = [['sid']] """ + input_spec = DataGrabberInputSpec output_spec = DynamicTraitedSpec _always_run = True @@ -1133,7 +1188,7 @@ def __init__(self, infields=None, outfields=None, **kwargs): """ if not outfields: - outfields = ['outfiles'] + outfields = ["outfiles"] super(DataGrabber, self).__init__(**kwargs) undefined_traits = {} # used for mandatory inputs check @@ -1144,11 +1199,13 @@ def __init__(self, infields=None, outfields=None, **kwargs): self.inputs.add_trait(key, traits.Any) undefined_traits[key] = Undefined # add ability to insert field specific templates - self.inputs.add_trait('field_template', - traits.Dict( - traits.Enum(outfields), - desc="arguments that fit into template")) - undefined_traits['field_template'] = Undefined + self.inputs.add_trait( + "field_template", + traits.Dict( + traits.Enum(outfields), desc="arguments that fit into template" + ), + ) + undefined_traits["field_template"] = Undefined if not isdefined(self.inputs.template_args): self.inputs.template_args = {} for key in outfields: @@ -1175,28 +1232,35 @@ def _list_outputs(self): for key in self._infields: value = getattr(self.inputs, key) if not isdefined(value): - msg = "%s requires a value for input '%s' because it was listed in 'infields'" % \ - (self.__class__.__name__, key) + msg = ( + "%s requires a value for input '%s' because it was listed in 'infields'" + % (self.__class__.__name__, key) + ) raise ValueError(msg) outputs = {} for key, args in list(self.inputs.template_args.items()): outputs[key] = [] template = self.inputs.template - if hasattr(self.inputs, 'field_template') and \ - isdefined(self.inputs.field_template) and \ - key in self.inputs.field_template: + if ( + hasattr(self.inputs, "field_template") + and isdefined(self.inputs.field_template) + and key in self.inputs.field_template + ): template = self.inputs.field_template[key] if isdefined(self.inputs.base_directory): template = os.path.join( - os.path.abspath(self.inputs.base_directory), template) + os.path.abspath(self.inputs.base_directory), template + ) else: template = os.path.abspath(template) if not args: filelist = glob.glob(template) if len(filelist) == 0: - msg = 'Output key: %s Template: %s returned no files' % ( - key, template) + msg = "Output key: %s Template: %s returned no files" % ( + key, + template, + ) if self.inputs.raise_on_empty: raise IOError(msg) else: @@ -1208,22 +1272,20 @@ def _list_outputs(self): for argnum, arglist in enumerate(args): maxlen = 1 for arg in arglist: - if isinstance(arg, - (str, bytes)) and hasattr(self.inputs, arg): + if isinstance(arg, (str, bytes)) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): if (maxlen > 1) and (len(arg) != maxlen): raise ValueError( - 'incompatible number of arguments for %s' % - key) + "incompatible number of arguments for %s" % key + ) if len(arg) > maxlen: maxlen = len(arg) outfiles = [] for i in range(maxlen): argtuple = [] for arg in arglist: - if isinstance(arg, (str, bytes)) and hasattr( - self.inputs, arg): + if isinstance(arg, (str, bytes)) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): argtuple.append(arg[i]) @@ -1235,13 +1297,16 @@ def _list_outputs(self): filledtemplate = template % tuple(argtuple) except TypeError as e: raise TypeError( - e.message + - ": Template %s failed to convert with args %s" - % (template, str(tuple(argtuple)))) + e.message + + ": Template %s failed to convert with args %s" + % (template, str(tuple(argtuple))) + ) outfiles = glob.glob(filledtemplate) if len(outfiles) == 0: - msg = 'Output key: %s Template: %s returned no files' % ( - key, filledtemplate) + msg = "Output key: %s Template: %s returned no files" % ( + key, + filledtemplate, + ) if self.inputs.raise_on_empty: raise IOError(msg) else: @@ -1265,28 +1330,30 @@ def _list_outputs(self): class SelectFilesInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - base_directory = Directory( - exists=True, desc="Root path common to templates.") + base_directory = Directory(exists=True, desc="Root path common to templates.") sort_filelist = traits.Bool( True, usedefault=True, - desc="When matching mutliple files, return them" - " in sorted order.") + desc="When matching mutliple files, return them" " in sorted order.", + ) raise_on_empty = traits.Bool( True, usedefault=True, - desc="Raise an exception if a template pattern " - "matches no files.") + desc="Raise an exception if a template pattern " "matches no files.", + ) force_lists = traits.Either( traits.Bool(), traits.List(Str()), default=False, usedefault=True, - desc=("Whether to return outputs as a list even" - " when only one file matches the template. " - "Either a boolean that applies to all output " - "fields or a list of output field names to " - "coerce to a list")) + desc=( + "Whether to return outputs as a list even" + " when only one file matches the template. " + "Either a boolean that applies to all output " + "fields or a list of output field names to " + "coerce to a list" + ), + ) class SelectFiles(IOBase): @@ -1320,6 +1387,7 @@ class SelectFiles(IOBase): >>> dg.inputs.run = [2, 4] """ + input_spec = SelectFilesInputSpec output_spec = DynamicTraitedSpec _always_run = True @@ -1369,8 +1437,13 @@ def _add_output_traits(self, base): def _list_outputs(self): """Find the files and expose them as interface outputs.""" outputs = {} - info = dict([(k, v) for k, v in list(self.inputs.__dict__.items()) - if k in self._infields]) + info = dict( + [ + (k, v) + for k, v in list(self.inputs.__dict__.items()) + if k in self._infields + ] + ) force_lists = self.inputs.force_lists if isinstance(force_lists, bool): @@ -1380,8 +1453,9 @@ def _list_outputs(self): bad_fields = ", ".join(list(bad_fields)) plural = "s" if len(bad_fields) > 1 else "" verb = "were" if len(bad_fields) > 1 else "was" - msg = ("The field%s '%s' %s set in 'force_lists' and not in " - "'templates'.") % (plural, bad_fields, verb) + msg = ( + "The field%s '%s' %s set in 'force_lists' and not in " "'templates'." + ) % (plural, bad_fields, verb) raise ValueError(msg) for field, template in list(self._templates.items()): @@ -1390,8 +1464,7 @@ def _list_outputs(self): # Build the full template path if isdefined(self.inputs.base_directory): - template = op.abspath( - op.join(self.inputs.base_directory, template)) + template = op.abspath(op.join(self.inputs.base_directory, template)) else: template = op.abspath(template) @@ -1406,7 +1479,9 @@ def _list_outputs(self): # Handle the case where nothing matched if not filelist: msg = "No files were found matching %s template: %s" % ( - field, filled_template) + field, + filled_template, + ) if self.inputs.raise_on_empty: raise IOError(msg) else: @@ -1426,25 +1501,22 @@ def _list_outputs(self): class DataFinderInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - root_paths = traits.Either( - traits.List(), - Str(), - mandatory=True, - ) + root_paths = traits.Either(traits.List(), Str(), mandatory=True,) match_regex = Str( - '(.+)', - usedefault=True, - desc=("Regular expression for matching paths.")) + "(.+)", usedefault=True, desc=("Regular expression for matching paths.") + ) ignore_regexes = traits.List( - desc=("List of regular expressions, " - "if any match the path it will be " - "ignored.")) - max_depth = traits.Int(desc="The maximum depth to search beneath " - "the root_paths") - min_depth = traits.Int(desc="The minimum depth to search beneath " - "the root paths") + desc=( + "List of regular expressions, " + "if any match the path it will be " + "ignored." + ) + ) + max_depth = traits.Int(desc="The maximum depth to search beneath " "the root_paths") + min_depth = traits.Int(desc="The minimum depth to search beneath " "the root paths") unpack_single = traits.Bool( - False, usedefault=True, desc="Unpack single results from list") + False, usedefault=True, desc="Unpack single results from list" + ) class DataFinder(IOBase): @@ -1496,10 +1568,10 @@ def _match_path(self, target_path): if match is not None: match_dict = match.groupdict() if self.result is None: - self.result = {'out_paths': []} + self.result = {"out_paths": []} for key in list(match_dict.keys()): self.result[key] = [] - self.result['out_paths'].append(target_path) + self.result["out_paths"].append(target_path) for key, val in list(match_dict.items()): self.result[key].append(val) @@ -1519,14 +1591,15 @@ def _run_interface(self, runtime): if self.inputs.ignore_regexes is Undefined: self.ignore_regexes = [] else: - self.ignore_regexes = \ - [re.compile(regex) - for regex in self.inputs.ignore_regexes] + self.ignore_regexes = [ + re.compile(regex) for regex in self.inputs.ignore_regexes + ] self.result = None for root_path in self.inputs.root_paths: # Handle tilda/env variables and remove extra seperators root_path = os.path.normpath( - os.path.expandvars(os.path.expanduser(root_path))) + os.path.expandvars(os.path.expanduser(root_path)) + ) # Check if the root_path is a file if os.path.isfile(root_path): if min_depth == 0: @@ -1535,7 +1608,7 @@ def _run_interface(self, runtime): # Walk through directory structure checking paths for curr_dir, sub_dirs, files in os.walk(root_path): # Determine the current depth from the root_path - curr_depth = (curr_dir.count(os.sep) - root_path.count(os.sep)) + curr_depth = curr_dir.count(os.sep) - root_path.count(os.sep) # If the max path depth has been reached, clear sub_dirs # and files if max_depth is not None and curr_depth >= max_depth: @@ -1548,7 +1621,7 @@ def _run_interface(self, runtime): for infile in files: full_path = os.path.join(curr_dir, infile) self._match_path(full_path) - if (self.inputs.unpack_single and len(self.result['out_paths']) == 1): + if self.inputs.unpack_single and len(self.result["out_paths"]) == 1: for key, vals in list(self.result.items()): self.result[key] = vals[0] else: @@ -1557,10 +1630,10 @@ def _run_interface(self, runtime): if key == "out_paths": continue sort_tuples = human_order_sorted( - list(zip(self.result["out_paths"], self.result[key]))) + list(zip(self.result["out_paths"], self.result[key])) + ) self.result[key] = [x for (_, x) in sort_tuples] - self.result["out_paths"] = human_order_sorted( - self.result["out_paths"]) + self.result["out_paths"] = human_order_sorted(self.result["out_paths"]) if not self.result: raise RuntimeError("Regular expression did not match any files!") @@ -1574,149 +1647,159 @@ def _list_outputs(self): class FSSourceInputSpec(BaseInterfaceInputSpec): - subjects_dir = Directory(exists=True, mandatory=True, - desc='Freesurfer subjects directory.') - subject_id = Str(mandatory=True, - desc='Subject name for whom to retrieve data') - hemi = traits.Enum('both', 'lh', 'rh', usedefault=True, - desc='Selects hemisphere specific outputs') + subjects_dir = Directory( + exists=True, mandatory=True, desc="Freesurfer subjects directory." + ) + subject_id = Str(mandatory=True, desc="Subject name for whom to retrieve data") + hemi = traits.Enum( + "both", "lh", "rh", usedefault=True, desc="Selects hemisphere specific outputs" + ) class FSSourceOutputSpec(TraitedSpec): - T1 = File( - exists=True, desc='Intensity normalized whole-head volume', loc='mri') + T1 = File(exists=True, desc="Intensity normalized whole-head volume", loc="mri") aseg = File( exists=True, - loc='mri', - desc='Volumetric map of regions from automatic segmentation') - brain = File( - exists=True, desc='Intensity normalized brain-only volume', loc='mri') - brainmask = File( - exists=True, desc='Skull-stripped (brain-only) volume', loc='mri') - filled = File(exists=True, desc='Subcortical mass volume', loc='mri') - norm = File( - exists=True, desc='Normalized skull-stripped volume', loc='mri') - nu = File( - exists=True, - desc='Non-uniformity corrected whole-head volume', - loc='mri') - orig = File( - exists=True, - desc='Base image conformed to Freesurfer space', - loc='mri') + loc="mri", + desc="Volumetric map of regions from automatic segmentation", + ) + brain = File(exists=True, desc="Intensity normalized brain-only volume", loc="mri") + brainmask = File(exists=True, desc="Skull-stripped (brain-only) volume", loc="mri") + filled = File(exists=True, desc="Subcortical mass volume", loc="mri") + norm = File(exists=True, desc="Normalized skull-stripped volume", loc="mri") + nu = File(exists=True, desc="Non-uniformity corrected whole-head volume", loc="mri") + orig = File(exists=True, desc="Base image conformed to Freesurfer space", loc="mri") rawavg = File( - exists=True, desc='Volume formed by averaging input images', loc='mri') + exists=True, desc="Volume formed by averaging input images", loc="mri" + ) ribbon = OutputMultiPath( File(exists=True), - desc='Volumetric maps of cortical ribbons', - loc='mri', - altkey='*ribbon') - wm = File(exists=True, desc='Segmented white-matter volume', loc='mri') + desc="Volumetric maps of cortical ribbons", + loc="mri", + altkey="*ribbon", + ) + wm = File(exists=True, desc="Segmented white-matter volume", loc="mri") wmparc = File( exists=True, - loc='mri', - desc='Aparc parcellation projected into subcortical white matter') + loc="mri", + desc="Aparc parcellation projected into subcortical white matter", + ) curv = OutputMultiPath( - File(exists=True), desc='Maps of surface curvature', loc='surf') + File(exists=True), desc="Maps of surface curvature", loc="surf" + ) avg_curv = OutputMultiPath( File(exists=True), - desc='Average atlas curvature, sampled to subject', - loc='surf') + desc="Average atlas curvature, sampled to subject", + loc="surf", + ) inflated = OutputMultiPath( - File(exists=True), desc='Inflated surface meshes', loc='surf') + File(exists=True), desc="Inflated surface meshes", loc="surf" + ) pial = OutputMultiPath( - File(exists=True), - desc='Gray matter/pia mater surface meshes', - loc='surf') + File(exists=True), desc="Gray matter/pia mater surface meshes", loc="surf" + ) area_pial = OutputMultiPath( File(exists=True), - desc='Mean area of triangles each vertex on the pial surface is ' - 'associated with', - loc='surf', - altkey='area.pial') + desc="Mean area of triangles each vertex on the pial surface is " + "associated with", + loc="surf", + altkey="area.pial", + ) curv_pial = OutputMultiPath( File(exists=True), - desc='Curvature of pial surface', - loc='surf', - altkey='curv.pial') + desc="Curvature of pial surface", + loc="surf", + altkey="curv.pial", + ) smoothwm = OutputMultiPath( - File(exists=True), loc='surf', desc='Smoothed original surface meshes') + File(exists=True), loc="surf", desc="Smoothed original surface meshes" + ) sphere = OutputMultiPath( - File(exists=True), desc='Spherical surface meshes', loc='surf') + File(exists=True), desc="Spherical surface meshes", loc="surf" + ) sulc = OutputMultiPath( - File(exists=True), desc='Surface maps of sulcal depth', loc='surf') + File(exists=True), desc="Surface maps of sulcal depth", loc="surf" + ) thickness = OutputMultiPath( - File(exists=True), - loc='surf', - desc='Surface maps of cortical thickness') + File(exists=True), loc="surf", desc="Surface maps of cortical thickness" + ) volume = OutputMultiPath( - File(exists=True), desc='Surface maps of cortical volume', loc='surf') + File(exists=True), desc="Surface maps of cortical volume", loc="surf" + ) white = OutputMultiPath( - File(exists=True), desc='White/gray matter surface meshes', loc='surf') + File(exists=True), desc="White/gray matter surface meshes", loc="surf" + ) jacobian_white = OutputMultiPath( File(exists=True), - desc='Distortion required to register to spherical atlas', - loc='surf') + desc="Distortion required to register to spherical atlas", + loc="surf", + ) graymid = OutputMultiPath( File(exists=True), - desc='Graymid/midthickness surface meshes', - loc='surf', - altkey=['graymid', 'midthickness']) + desc="Graymid/midthickness surface meshes", + loc="surf", + altkey=["graymid", "midthickness"], + ) label = OutputMultiPath( File(exists=True), - desc='Volume and surface label files', - loc='label', - altkey='*label') + desc="Volume and surface label files", + loc="label", + altkey="*label", + ) annot = OutputMultiPath( - File(exists=True), - desc='Surface annotation files', - loc='label', - altkey='*annot') + File(exists=True), desc="Surface annotation files", loc="label", altkey="*annot" + ) aparc_aseg = OutputMultiPath( File(exists=True), - loc='mri', - altkey='aparc*aseg', - desc='Aparc parcellation projected into aseg volume') + loc="mri", + altkey="aparc*aseg", + desc="Aparc parcellation projected into aseg volume", + ) sphere_reg = OutputMultiPath( File(exists=True), - loc='surf', - altkey='sphere.reg', - desc='Spherical registration file') + loc="surf", + altkey="sphere.reg", + desc="Spherical registration file", + ) aseg_stats = OutputMultiPath( File(exists=True), - loc='stats', - altkey='aseg', - desc='Automated segmentation statistics file') + loc="stats", + altkey="aseg", + desc="Automated segmentation statistics file", + ) wmparc_stats = OutputMultiPath( File(exists=True), - loc='stats', - altkey='wmparc', - desc='White matter parcellation statistics file') + loc="stats", + altkey="wmparc", + desc="White matter parcellation statistics file", + ) aparc_stats = OutputMultiPath( File(exists=True), - loc='stats', - altkey='aparc', - desc='Aparc parcellation statistics files') + loc="stats", + altkey="aparc", + desc="Aparc parcellation statistics files", + ) BA_stats = OutputMultiPath( File(exists=True), - loc='stats', - altkey='BA', - desc='Brodmann Area statistics files') + loc="stats", + altkey="BA", + desc="Brodmann Area statistics files", + ) aparc_a2009s_stats = OutputMultiPath( File(exists=True), - loc='stats', - altkey='aparc.a2009s', - desc='Aparc a2009s parcellation statistics files') + loc="stats", + altkey="aparc.a2009s", + desc="Aparc a2009s parcellation statistics files", + ) curv_stats = OutputMultiPath( - File(exists=True), - loc='stats', - altkey='curv', - desc='Curvature statistics files') + File(exists=True), loc="stats", altkey="curv", desc="Curvature statistics files" + ) entorhinal_exvivo_stats = OutputMultiPath( File(exists=True), - loc='stats', - altkey='entorhinal_exvivo', - desc='Entorhinal exvivo statistics files') + loc="stats", + altkey="entorhinal_exvivo", + desc="Entorhinal exvivo statistics files", + ) class FreeSurferSource(IOBase): @@ -1735,36 +1818,35 @@ class FreeSurferSource(IOBase): >>> res = fs.run() # doctest: +SKIP """ + input_spec = FSSourceInputSpec output_spec = FSSourceOutputSpec _always_run = True - _additional_metadata = ['loc', 'altkey'] + _additional_metadata = ["loc", "altkey"] def _get_files(self, path, key, dirval, altkey=None): - globsuffix = '' - if dirval == 'mri': - globsuffix = '.mgz' - elif dirval == 'stats': - globsuffix = '.stats' - globprefix = '' - if dirval in ('surf', 'label', 'stats'): - if self.inputs.hemi != 'both': - globprefix = self.inputs.hemi + '.' + globsuffix = "" + if dirval == "mri": + globsuffix = ".mgz" + elif dirval == "stats": + globsuffix = ".stats" + globprefix = "" + if dirval in ("surf", "label", "stats"): + if self.inputs.hemi != "both": + globprefix = self.inputs.hemi + "." else: - globprefix = '?h.' - if key in ('aseg_stats', 'wmparc_stats'): - globprefix = '' - elif key == 'ribbon': - if self.inputs.hemi != 'both': - globprefix = self.inputs.hemi + '.' + globprefix = "?h." + if key in ("aseg_stats", "wmparc_stats"): + globprefix = "" + elif key == "ribbon": + if self.inputs.hemi != "both": + globprefix = self.inputs.hemi + "." else: - globprefix = '*' + globprefix = "*" keys = ensure_list(altkey) if altkey else [key] - globfmt = os.path.join(path, dirval, ''.join((globprefix, '{}', - globsuffix))) + globfmt = os.path.join(path, dirval, "".join((globprefix, "{}", globsuffix))) return [ - os.path.abspath(f) for key in keys - for f in glob.glob(globfmt.format(key)) + os.path.abspath(f) for key in keys for f in glob.glob(globfmt.format(key)) ] def _list_outputs(self): @@ -1773,9 +1855,12 @@ def _list_outputs(self): output_traits = self._outputs() outputs = output_traits.get() for k in list(outputs.keys()): - val = self._get_files(subject_path, k, - output_traits.traits()[k].loc, - output_traits.traits()[k].altkey) + val = self._get_files( + subject_path, + k, + output_traits.traits()[k].loc, + output_traits.traits()[k].altkey, + ) if val: outputs[k] = simplify_list(val) return outputs @@ -1785,23 +1870,24 @@ class XNATSourceInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): query_template = Str( mandatory=True, - desc=('Layout used to get files. Relative to base ' - 'directory if defined')) + desc=("Layout used to get files. Relative to base " "directory if defined"), + ) query_template_args = traits.Dict( Str, traits.List(traits.List), value=dict(outfiles=[]), usedefault=True, - desc='Information to plug into template') + desc="Information to plug into template", + ) - server = Str(mandatory=True, requires=['user', 'pwd'], xor=['config']) + server = Str(mandatory=True, requires=["user", "pwd"], xor=["config"]) user = Str() pwd = traits.Password() - config = File(mandatory=True, xor=['server']) + config = File(mandatory=True, xor=["server"]) - cache_dir = Directory(desc='Cache directory') + cache_dir = Directory(desc="Cache directory") class XNATSource(LibraryBaseInterface, IOBase): @@ -1837,9 +1923,10 @@ class XNATSource(LibraryBaseInterface, IOBase): """ + input_spec = XNATSourceInputSpec output_spec = DynamicTraitedSpec - _pkg = 'pyxnat' + _pkg = "pyxnat" def __init__(self, infields=None, outfields=None, **kwargs): """ @@ -1862,15 +1949,17 @@ def __init__(self, infields=None, outfields=None, **kwargs): for key in infields: self.inputs.add_trait(key, traits.Any) undefined_traits[key] = Undefined - self.inputs.query_template_args['outfiles'] = [infields] + self.inputs.query_template_args["outfiles"] = [infields] if outfields: # add ability to insert field specific templates self.inputs.add_trait( - 'field_template', + "field_template", traits.Dict( traits.Enum(outfields), - desc="arguments that fit into query_template")) - undefined_traits['field_template'] = Undefined + desc="arguments that fit into query_template", + ), + ) + undefined_traits["field_template"] = Undefined # self.inputs.remove_trait('query_template_args') outdict = {} for key in outfields: @@ -1896,52 +1985,59 @@ def _list_outputs(self): if self.inputs.config: xnat = pyxnat.Interface(config=self.inputs.config) else: - xnat = pyxnat.Interface(self.inputs.server, self.inputs.user, - self.inputs.pwd, cache_dir) + xnat = pyxnat.Interface( + self.inputs.server, self.inputs.user, self.inputs.pwd, cache_dir + ) if self._infields: for key in self._infields: value = getattr(self.inputs, key) if not isdefined(value): - msg = ("%s requires a value for input '%s' " - "because it was listed in 'infields'" % - (self.__class__.__name__, key)) + msg = ( + "%s requires a value for input '%s' " + "because it was listed in 'infields'" + % (self.__class__.__name__, key) + ) raise ValueError(msg) outputs = {} for key, args in list(self.inputs.query_template_args.items()): outputs[key] = [] template = self.inputs.query_template - if hasattr(self.inputs, 'field_template') and \ - isdefined(self.inputs.field_template) and \ - key in self.inputs.field_template: + if ( + hasattr(self.inputs, "field_template") + and isdefined(self.inputs.field_template) + and key in self.inputs.field_template + ): template = self.inputs.field_template[key] if not args: - file_objects = xnat.select(template).get('obj') + file_objects = xnat.select(template).get("obj") if file_objects == []: - raise IOError('Template %s returned no files' % template) - outputs[key] = simplify_list([ - str(file_object.get()) for file_object in file_objects - if file_object.exists() - ]) + raise IOError("Template %s returned no files" % template) + outputs[key] = simplify_list( + [ + str(file_object.get()) + for file_object in file_objects + if file_object.exists() + ] + ) for argnum, arglist in enumerate(args): maxlen = 1 for arg in arglist: - if isinstance(arg, - (str, bytes)) and hasattr(self.inputs, arg): + if isinstance(arg, (str, bytes)) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): if (maxlen > 1) and (len(arg) != maxlen): - raise ValueError('incompatible number ' - 'of arguments for %s' % key) + raise ValueError( + "incompatible number " "of arguments for %s" % key + ) if len(arg) > maxlen: maxlen = len(arg) outfiles = [] for i in range(maxlen): argtuple = [] for arg in arglist: - if isinstance(arg, (str, bytes)) and \ - hasattr(self.inputs, arg): + if isinstance(arg, (str, bytes)) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): argtuple.append(arg[i]) @@ -1949,29 +2045,31 @@ def _list_outputs(self): argtuple.append(arg) if argtuple: target = template % tuple(argtuple) - file_objects = xnat.select(target).get('obj') + file_objects = xnat.select(target).get("obj") if file_objects == []: - raise IOError('Template %s ' - 'returned no files' % target) - - outfiles = simplify_list([ - str(file_object.get()) - for file_object in file_objects - if file_object.exists() - ]) + raise IOError("Template %s " "returned no files" % target) + + outfiles = simplify_list( + [ + str(file_object.get()) + for file_object in file_objects + if file_object.exists() + ] + ) else: - file_objects = xnat.select(template).get('obj') + file_objects = xnat.select(template).get("obj") if file_objects == []: - raise IOError('Template %s ' - 'returned no files' % template) + raise IOError("Template %s " "returned no files" % template) - outfiles = simplify_list([ - str(file_object.get()) - for file_object in file_objects - if file_object.exists() - ]) + outfiles = simplify_list( + [ + str(file_object.get()) + for file_object in file_objects + if file_object.exists() + ] + ) outputs[key].insert(i, outfiles) if len(outputs[key]) == 0: @@ -1985,36 +2083,44 @@ class XNATSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): _outputs = traits.Dict(Str, value={}, usedefault=True) - server = Str(mandatory=True, requires=['user', 'pwd'], xor=['config']) + server = Str(mandatory=True, requires=["user", "pwd"], xor=["config"]) user = Str() pwd = traits.Password() - config = File(mandatory=True, xor=['server']) - cache_dir = Directory(desc='') + config = File(mandatory=True, xor=["server"]) + cache_dir = Directory(desc="") - project_id = Str( - desc='Project in which to store the outputs', mandatory=True) + project_id = Str(desc="Project in which to store the outputs", mandatory=True) - subject_id = Str(desc='Set to subject id', mandatory=True) + subject_id = Str(desc="Set to subject id", mandatory=True) - experiment_id = Str(desc='Set to workflow name', mandatory=True) + experiment_id = Str(desc="Set to workflow name", mandatory=True) assessor_id = Str( - desc=('Option to customize ouputs representation in XNAT - ' - 'assessor level will be used with specified id'), - xor=['reconstruction_id']) + desc=( + "Option to customize ouputs representation in XNAT - " + "assessor level will be used with specified id" + ), + xor=["reconstruction_id"], + ) reconstruction_id = Str( - desc=('Option to customize ouputs representation in XNAT - ' - 'reconstruction level will be used with specified id'), - xor=['assessor_id']) + desc=( + "Option to customize ouputs representation in XNAT - " + "reconstruction level will be used with specified id" + ), + xor=["assessor_id"], + ) share = traits.Bool( False, - desc=('Option to share the subjects from the original project' - 'instead of creating new ones when possible - the created ' - 'experiments are then shared back to the original project'), - usedefault=True) + desc=( + "Option to share the subjects from the original project" + "instead of creating new ones when possible - the created " + "experiments are then shared back to the original project" + ), + usedefault=True, + ) def __setattr__(self, key, value): if key not in self.copyable_trait_names(): @@ -2028,8 +2134,9 @@ class XNATSink(LibraryBaseInterface, IOBase): list of nifti files and provides a set of structured output fields. """ + input_spec = XNATSinkInputSpec - _pkg = 'pyxnat' + _pkg = "pyxnat" def _list_outputs(self): """Execute this module. @@ -2042,34 +2149,36 @@ def _list_outputs(self): if self.inputs.config: xnat = pyxnat.Interface(config=self.inputs.config) else: - xnat = pyxnat.Interface(self.inputs.server, self.inputs.user, - self.inputs.pwd, cache_dir) + xnat = pyxnat.Interface( + self.inputs.server, self.inputs.user, self.inputs.pwd, cache_dir + ) # if possible share the subject from the original project if self.inputs.share: subject_id = self.inputs.subject_id result = xnat.select( - 'xnat:subjectData', - ['xnat:subjectData/PROJECT', 'xnat:subjectData/SUBJECT_ID' - ]).where('xnat:subjectData/SUBJECT_ID = %s AND' % subject_id) + "xnat:subjectData", + ["xnat:subjectData/PROJECT", "xnat:subjectData/SUBJECT_ID"], + ).where("xnat:subjectData/SUBJECT_ID = %s AND" % subject_id) # subject containing raw data exists on the server - if (result.data and isinstance(result.data[0], dict)): + if result.data and isinstance(result.data[0], dict): result = result.data[0] - shared = xnat.select('/project/%s/subject/%s' % - (self.inputs.project_id, - self.inputs.subject_id)) + shared = xnat.select( + "/project/%s/subject/%s" + % (self.inputs.project_id, self.inputs.subject_id) + ) if not shared.exists(): # subject not in share project - share_project = xnat.select( - '/project/%s' % self.inputs.project_id) + share_project = xnat.select("/project/%s" % self.inputs.project_id) if not share_project.exists(): # check project exists share_project.insert() - subject = xnat.select('/project/%(project)s' - '/subject/%(subject_id)s' % result) + subject = xnat.select( + "/project/%(project)s" "/subject/%(subject_id)s" % result + ) subject.share(str(self.inputs.project_id)) @@ -2077,17 +2186,18 @@ def _list_outputs(self): uri_template_args = dict( project_id=quote_id(self.inputs.project_id), subject_id=self.inputs.subject_id, - experiment_id=quote_id(self.inputs.experiment_id)) + experiment_id=quote_id(self.inputs.experiment_id), + ) if self.inputs.share: - uri_template_args['original_project'] = result['project'] + uri_template_args["original_project"] = result["project"] if self.inputs.assessor_id: - uri_template_args['assessor_id'] = quote_id( - self.inputs.assessor_id) + uri_template_args["assessor_id"] = quote_id(self.inputs.assessor_id) elif self.inputs.reconstruction_id: - uri_template_args['reconstruction_id'] = quote_id( - self.inputs.reconstruction_id) + uri_template_args["reconstruction_id"] = quote_id( + self.inputs.reconstruction_id + ) # gather outputs and upload them for key, files in list(self.inputs._outputs.items()): @@ -2096,27 +2206,29 @@ def _list_outputs(self): if isinstance(name, list): for i, file_name in enumerate(name): - push_file(self, xnat, file_name, '%s_' % i + key, - uri_template_args) + push_file( + self, xnat, file_name, "%s_" % i + key, uri_template_args + ) else: push_file(self, xnat, name, key, uri_template_args) def quote_id(string): - return str(string).replace('_', '---') + return str(string).replace("_", "---") def unquote_id(string): - return str(string).replace('---', '_') + return str(string).replace("---", "_") def push_file(self, xnat, file_name, out_key, uri_template_args): # grab info from output file names val_list = [ - unquote_id(val) for part in os.path.split(file_name)[0].split(os.sep) - for val in part.split('_')[1:] - if part.startswith('_') and len(part.split('_')) % 2 + unquote_id(val) + for part in os.path.split(file_name)[0].split(os.sep) + for val in part.split("_")[1:] + if part.startswith("_") and len(part.split("_")) % 2 ] keymap = dict(list(zip(val_list[1::2], val_list[2::2]))) @@ -2127,38 +2239,41 @@ def push_file(self, xnat, file_name, out_key, uri_template_args): _label.extend([key, val]) # select and define container level - uri_template_args['container_type'] = None + uri_template_args["container_type"] = None - for container in ['assessor_id', 'reconstruction_id']: + for container in ["assessor_id", "reconstruction_id"]: if getattr(self.inputs, container): - uri_template_args['container_type'] = container.split('_id')[0] - uri_template_args['container_id'] = uri_template_args[container] + uri_template_args["container_type"] = container.split("_id")[0] + uri_template_args["container_id"] = uri_template_args[container] - if uri_template_args['container_type'] is None: - uri_template_args['container_type'] = 'reconstruction' + if uri_template_args["container_type"] is None: + uri_template_args["container_type"] = "reconstruction" - uri_template_args['container_id'] = unquote_id( - uri_template_args['experiment_id']) + uri_template_args["container_id"] = unquote_id( + uri_template_args["experiment_id"] + ) if _label: - uri_template_args['container_id'] += ( - '_results_%s' % '_'.join(_label)) + uri_template_args["container_id"] += "_results_%s" % "_".join(_label) else: - uri_template_args['container_id'] += '_results' + uri_template_args["container_id"] += "_results" # define resource level - uri_template_args['resource_label'] = ('%s_%s' % - (uri_template_args['container_id'], - out_key.split('.')[0])) + uri_template_args["resource_label"] = "%s_%s" % ( + uri_template_args["container_id"], + out_key.split(".")[0], + ) # define file level - uri_template_args['file_name'] = os.path.split( - os.path.abspath(unquote_id(file_name)))[1] + uri_template_args["file_name"] = os.path.split( + os.path.abspath(unquote_id(file_name)) + )[1] uri_template = ( - '/project/%(project_id)s/subject/%(subject_id)s' - '/experiment/%(experiment_id)s/%(container_type)s/%(container_id)s' - '/out/resource/%(resource_label)s/file/%(file_name)s') + "/project/%(project_id)s/subject/%(subject_id)s" + "/experiment/%(experiment_id)s/%(container_type)s/%(container_id)s" + "/out/resource/%(resource_label)s/file/%(file_name)s" + ) # unquote values before uploading for key in list(uri_template_args.keys()): @@ -2166,18 +2281,19 @@ def push_file(self, xnat, file_name, out_key, uri_template_args): # upload file remote_file = xnat.select(uri_template % uri_template_args) - remote_file.insert( - file_name, experiments='xnat:imageSessionData', use_label=True) + remote_file.insert(file_name, experiments="xnat:imageSessionData", use_label=True) # shares the experiment back to the original project if relevant - if 'original_project' in uri_template_args: + if "original_project" in uri_template_args: experiment_template = ( - '/project/%(original_project)s' - '/subject/%(subject_id)s/experiment/%(experiment_id)s') + "/project/%(original_project)s" + "/subject/%(subject_id)s/experiment/%(experiment_id)s" + ) xnat.select(experiment_template % uri_template_args).share( - uri_template_args['original_project']) + uri_template_args["original_project"] + ) def capture_provenance(): @@ -2212,8 +2328,9 @@ class SQLiteSink(LibraryBaseInterface, IOBase): >>> sql.run() # doctest: +SKIP """ + input_spec = SQLiteSinkInputSpec - _pkg = 'sqlite3' + _pkg = "sqlite3" def __init__(self, input_names, **inputs): @@ -2226,13 +2343,17 @@ def _list_outputs(self): """Execute this module. """ import sqlite3 - conn = sqlite3.connect( - self.inputs.database_file, check_same_thread=False) + + conn = sqlite3.connect(self.inputs.database_file, check_same_thread=False) c = conn.cursor() - c.execute("INSERT OR REPLACE INTO %s (" % self.inputs.table_name + - ",".join(self._input_names) + ") VALUES (" + - ",".join(["?"] * len(self._input_names)) + ")", - [getattr(self.inputs, name) for name in self._input_names]) + c.execute( + "INSERT OR REPLACE INTO %s (" % self.inputs.table_name + + ",".join(self._input_names) + + ") VALUES (" + + ",".join(["?"] * len(self._input_names)) + + ")", + [getattr(self.inputs, name) for name in self._input_names], + ) conn.commit() c.close() return None @@ -2240,17 +2361,16 @@ def _list_outputs(self): class MySQLSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): host = Str( - 'localhost', + "localhost", mandatory=True, - requires=['username', 'password'], - xor=['config'], - usedefault=True) + requires=["username", "password"], + xor=["config"], + usedefault=True, + ) config = File( - mandatory=True, - xor=['host'], - desc="MySQL Options File (same format as my.cnf)") - database_name = Str( - mandatory=True, desc='Otherwise known as the schema name') + mandatory=True, xor=["host"], desc="MySQL Options File (same format as my.cnf)" + ) + database_name = Str(mandatory=True, desc="Otherwise known as the schema name") table_name = Str(mandatory=True) username = Str() password = Str() @@ -2272,6 +2392,7 @@ class MySQLSink(IOBase): >>> sql.run() # doctest: +SKIP """ + input_spec = MySQLSinkInputSpec def __init__(self, input_names, **inputs): @@ -2285,45 +2406,52 @@ def _list_outputs(self): """Execute this module. """ import MySQLdb + if isdefined(self.inputs.config): conn = MySQLdb.connect( - db=self.inputs.database_name, - read_default_file=self.inputs.config) + db=self.inputs.database_name, read_default_file=self.inputs.config + ) else: conn = MySQLdb.connect( host=self.inputs.host, user=self.inputs.username, passwd=self.inputs.password, - db=self.inputs.database_name) + db=self.inputs.database_name, + ) c = conn.cursor() - c.execute("REPLACE INTO %s (" % self.inputs.table_name + - ",".join(self._input_names) + ") VALUES (" + - ",".join(["%s"] * len(self._input_names)) + ")", - [getattr(self.inputs, name) for name in self._input_names]) + c.execute( + "REPLACE INTO %s (" % self.inputs.table_name + + ",".join(self._input_names) + + ") VALUES (" + + ",".join(["%s"] * len(self._input_names)) + + ")", + [getattr(self.inputs, name) for name in self._input_names], + ) conn.commit() c.close() return None class SSHDataGrabberInputSpec(DataGrabberInputSpec): - hostname = Str(mandatory=True, desc='Server hostname.') - username = Str(desc='Server username.') - password = traits.Password(desc='Server password.') + hostname = Str(mandatory=True, desc="Server hostname.") + username = Str(desc="Server username.") + password = traits.Password(desc="Server password.") download_files = traits.Bool( True, usedefault=True, - desc='If false it will return the file names without downloading them') + desc="If false it will return the file names without downloading them", + ) base_directory = Str( - mandatory=True, - desc='Path to the base directory consisting of subject data.') + mandatory=True, desc="Path to the base directory consisting of subject data." + ) template_expression = traits.Enum( - ['fnmatch', 'regexp'], + ["fnmatch", "regexp"], usedefault=True, - desc='Use either fnmatch or regexp to express templates') + desc="Use either fnmatch or regexp to express templates", + ) ssh_log_to_file = Str( - '', - usedefault=True, - desc='If set SSH commands will be logged to the given file') + "", usedefault=True, desc="If set SSH commands will be logged to the given file" + ) class SSHDataGrabber(LibraryBaseInterface, DataGrabber): @@ -2387,10 +2515,11 @@ class SSHDataGrabber(LibraryBaseInterface, DataGrabber): >>> dg.inputs.template_args['struct'] = [['sid']] """ + input_spec = SSHDataGrabberInputSpec output_spec = DynamicTraitedSpec _always_run = False - _pkg = 'paramiko' + _pkg = "paramiko" def __init__(self, infields=None, outfields=None, **kwargs): """ @@ -2406,18 +2535,21 @@ def __init__(self, infields=None, outfields=None, **kwargs): """ if not outfields: - outfields = ['outfiles'] + outfields = ["outfiles"] kwargs = kwargs.copy() - kwargs['infields'] = infields - kwargs['outfields'] = outfields + kwargs["infields"] = infields + kwargs["outfields"] = outfields super(SSHDataGrabber, self).__init__(**kwargs) - if (None in (self.inputs.username, self.inputs.password)): - raise ValueError("either both username and password " - "are provided or none of them") + if None in (self.inputs.username, self.inputs.password): + raise ValueError( + "either both username and password " "are provided or none of them" + ) - if (self.inputs.template_expression == 'regexp' - and self.inputs.template[-1] != '$'): - self.inputs.template += '$' + if ( + self.inputs.template_expression == "regexp" + and self.inputs.template[-1] != "$" + ): + self.inputs.template += "$" def _get_files_over_ssh(self, template): """Get the files matching template over an SSH connection.""" @@ -2430,17 +2562,17 @@ def _get_files_over_ssh(self, template): template_dir = os.path.dirname(template) template_base = os.path.basename(template) every_file_in_dir = sftp.listdir(template_dir) - if self.inputs.template_expression == 'fnmatch': + if self.inputs.template_expression == "fnmatch": outfiles = fnmatch.filter(every_file_in_dir, template_base) - elif self.inputs.template_expression == 'regexp': + elif self.inputs.template_expression == "regexp": regexp = re.compile(template_base) outfiles = list(filter(regexp.match, every_file_in_dir)) else: - raise ValueError('template_expression value invalid') + raise ValueError("template_expression value invalid") if len(outfiles) == 0: # no files - msg = 'Output template: %s returned no files' % template + msg = "Output template: %s returned no files" % template if self.inputs.raise_on_empty: raise IOError(msg) else: @@ -2456,22 +2588,25 @@ def _get_files_over_ssh(self, template): # actually download the files, if desired if self.inputs.download_files: - files_to_download = copy.copy(outfiles) # make sure new list! + files_to_download = copy.copy(outfiles) # make sure new list! # check to see if there are any related files to download for file_to_download in files_to_download: related_to_current = get_related_files( - file_to_download, include_this_file=False) + file_to_download, include_this_file=False + ) existing_related_not_downloading = [ - f for f in related_to_current - if f in every_file_in_dir and f not in files_to_download] + f + for f in related_to_current + if f in every_file_in_dir and f not in files_to_download + ] files_to_download.extend(existing_related_not_downloading) for f in files_to_download: try: sftp.get(os.path.join(template_dir, f), f) except IOError: - iflogger.info('remote file %s not found' % f) + iflogger.info("remote file %s not found" % f) # return value outfiles = simplify_list(outfiles) @@ -2489,17 +2624,21 @@ def _list_outputs(self): for key in self._infields: value = getattr(self.inputs, key) if not isdefined(value): - msg = "%s requires a value for input '%s' because it was listed in 'infields'" % \ - (self.__class__.__name__, key) + msg = ( + "%s requires a value for input '%s' because it was listed in 'infields'" + % (self.__class__.__name__, key) + ) raise ValueError(msg) outputs = {} for key, args in list(self.inputs.template_args.items()): outputs[key] = [] template = self.inputs.template - if hasattr(self.inputs, 'field_template') and \ - isdefined(self.inputs.field_template) and \ - key in self.inputs.field_template: + if ( + hasattr(self.inputs, "field_template") + and isdefined(self.inputs.field_template) + and key in self.inputs.field_template + ): template = self.inputs.field_template[key] if not args: @@ -2508,22 +2647,20 @@ def _list_outputs(self): for argnum, arglist in enumerate(args): maxlen = 1 for arg in arglist: - if isinstance(arg, - (str, bytes)) and hasattr(self.inputs, arg): + if isinstance(arg, (str, bytes)) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): if (maxlen > 1) and (len(arg) != maxlen): raise ValueError( - 'incompatible number of arguments for %s' % - key) + "incompatible number of arguments for %s" % key + ) if len(arg) > maxlen: maxlen = len(arg) outfiles = [] for i in range(maxlen): argtuple = [] for arg in arglist: - if isinstance(arg, (str, bytes)) and hasattr( - self.inputs, arg): + if isinstance(arg, (str, bytes)) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): argtuple.append(arg[i]) @@ -2535,9 +2672,10 @@ def _list_outputs(self): filledtemplate = template % tuple(argtuple) except TypeError as e: raise TypeError( - e.message + - ": Template %s failed to convert with args %s" - % (template, str(tuple(argtuple)))) + e.message + + ": Template %s failed to convert with args %s" + % (template, str(tuple(argtuple))) + ) outputs[key].append(self._get_files_over_ssh(filledtemplate)) @@ -2560,29 +2698,33 @@ def _list_outputs(self): def _get_ssh_client(self): import paramiko + config = paramiko.SSHConfig() - config.parse(open(os.path.expanduser('~/.ssh/config'))) + config.parse(open(os.path.expanduser("~/.ssh/config"))) host = config.lookup(self.inputs.hostname) - if 'proxycommand' in host: + if "proxycommand" in host: proxy = paramiko.ProxyCommand( - subprocess.check_output([ - os.environ['SHELL'], '-c', - 'echo %s' % host['proxycommand'] - ]).strip()) + subprocess.check_output( + [os.environ["SHELL"], "-c", "echo %s" % host["proxycommand"]] + ).strip() + ) else: proxy = None client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - client.connect(host['hostname'], username=host['user'], sock=proxy) + client.connect(host["hostname"], username=host["user"], sock=proxy) return client class JSONFileGrabberInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - in_file = File(exists=True, desc='JSON source file') + in_file = File(exists=True, desc="JSON source file") defaults = traits.Dict( - desc=('JSON dictionary that sets default output' - 'values, overridden by values found in in_file')) + desc=( + "JSON dictionary that sets default output" + "values, overridden by values found in in_file" + ) + ) class JSONFileGrabber(IOBase): @@ -2605,6 +2747,7 @@ class JSONFileGrabber(IOBase): >>> pprint.pprint(res.outputs.get()) # doctest:, +ELLIPSIS {'param1': 'exampleStr', 'param2': 4, 'param3': 1.0} """ + input_spec = JSONFileGrabberInputSpec output_spec = DynamicTraitedSpec _always_run = True @@ -2614,11 +2757,11 @@ def _list_outputs(self): outputs = {} if isdefined(self.inputs.in_file): - with open(self.inputs.in_file, 'r') as f: + with open(self.inputs.in_file, "r") as f: data = simplejson.load(f) if not isinstance(data, dict): - raise RuntimeError('JSON input has no dictionary structure') + raise RuntimeError("JSON input has no dictionary structure") for key, value in list(data.items()): outputs[key] = value @@ -2633,9 +2776,8 @@ def _list_outputs(self): class JSONFileSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - out_file = File(desc='JSON sink file') - in_dict = traits.Dict( - value={}, usedefault=True, desc='input JSON dictionary') + out_file = File(desc="JSON sink file") + in_dict = traits.Dict(value={}, usedefault=True, desc="input JSON dictionary") _outputs = traits.Dict(value={}, usedefault=True) def __setattr__(self, key, value): @@ -2650,7 +2792,7 @@ def __setattr__(self, key, value): class JSONFileSinkOutputSpec(TraitedSpec): - out_file = File(desc='JSON sink file') + out_file = File(desc="JSON sink file") class JSONFileSink(IOBase): @@ -2681,6 +2823,7 @@ class JSONFileSink(IOBase): >>> dictsink.run() # doctest: +SKIP """ + input_spec = JSONFileSinkInputSpec output_spec = JSONFileSinkOutputSpec @@ -2699,8 +2842,8 @@ def __init__(self, infields=[], force_run=True, **inputs): self._always_run = True def _process_name(self, name, val): - if '.' in name: - newkeys = name.split('.') + if "." in name: + newkeys = name.split(".") name = newkeys.pop(0) nested_dict = {newkeys.pop(): val} @@ -2715,7 +2858,7 @@ def _list_outputs(self): import os.path as op if not isdefined(self.inputs.out_file): - out_file = op.abspath('datasink.json') + out_file = op.abspath("datasink.json") else: out_file = op.abspath(self.inputs.out_file) @@ -2723,37 +2866,35 @@ def _list_outputs(self): # Overwrite in_dict entries automatically for key, val in list(self.inputs._outputs.items()): - if not isdefined(val) or key == 'trait_added': + if not isdefined(val) or key == "trait_added": continue key, val = self._process_name(key, val) out_dict[key] = val - with open(out_file, 'w') as f: + with open(out_file, "w") as f: f.write(str(simplejson.dumps(out_dict, ensure_ascii=False))) outputs = self.output_spec().get() - outputs['out_file'] = out_file + outputs["out_file"] = out_file return outputs class BIDSDataGrabberInputSpec(DynamicTraitedSpec): - base_dir = Directory( - exists=True, - desc='Path to BIDS Directory.', - mandatory=True) + base_dir = Directory(exists=True, desc="Path to BIDS Directory.", mandatory=True) output_query = traits.Dict( - key_trait=Str, - value_trait=traits.Dict, - desc='Queries for outfield outputs') + key_trait=Str, value_trait=traits.Dict, desc="Queries for outfield outputs" + ) raise_on_empty = traits.Bool( - True, usedefault=True, - desc='Generate exception if list is empty for a given field') + True, + usedefault=True, + desc="Generate exception if list is empty for a given field", + ) index_derivatives = traits.Bool( - False, mandatory=True, usedefault=True, - desc='Index derivatives/ sub-directory') + False, mandatory=True, usedefault=True, desc="Index derivatives/ sub-directory" + ) extra_derivatives = traits.List( - Directory(exists=True), - desc='Additional derivative directories to index') + Directory(exists=True), desc="Additional derivative directories to index" + ) class BIDSDataGrabber(LibraryBaseInterface, IOBase): @@ -2786,10 +2927,11 @@ class BIDSDataGrabber(LibraryBaseInterface, IOBase): >>> results = bg.run() # doctest: +SKIP """ + input_spec = BIDSDataGrabberInputSpec output_spec = DynamicTraitedSpec _always_run = True - _pkg = 'bids' + _pkg = "bids" def __init__(self, infields=None, **kwargs): """ @@ -2802,19 +2944,25 @@ def __init__(self, infields=None, **kwargs): if not isdefined(self.inputs.output_query): self.inputs.output_query = { - "bold": {"datatype": "func", "suffix": "bold", - "extensions": ["nii", ".nii.gz"]}, - "T1w": {"datatype": "anat", "suffix": "T1w", - "extensions": ["nii", ".nii.gz"]}, - } + "bold": { + "datatype": "func", + "suffix": "bold", + "extensions": ["nii", ".nii.gz"], + }, + "T1w": { + "datatype": "anat", + "suffix": "T1w", + "extensions": ["nii", ".nii.gz"], + }, + } # If infields is empty, use all BIDS entities if infields is None: from bids import layout as bidslayout - bids_config = join( - dirname(bidslayout.__file__), 'config', 'bids.json') - bids_config = json.load(open(bids_config, 'r')) - infields = [i['name'] for i in bids_config['entities']] + + bids_config = join(dirname(bidslayout.__file__), "config", "bids.json") + bids_config = json.load(open(bids_config, "r")) + infields = [i["name"] for i in bids_config["entities"]] self._infields = infields or [] @@ -2828,8 +2976,10 @@ def __init__(self, infields=None, **kwargs): def _list_outputs(self): from bids import BIDSLayout - layout = BIDSLayout(self.inputs.base_dir, - derivatives=self.inputs.index_derivatives) + + layout = BIDSLayout( + self.inputs.base_dir, derivatives=self.inputs.index_derivatives + ) if isdefined(self.inputs.extra_derivatives): layout.add_derivatives(self.inputs.extra_derivatives) @@ -2845,9 +2995,9 @@ def _list_outputs(self): for key, query in self.inputs.output_query.items(): args = query.copy() args.update(filters) - filelist = layout.get(return_type='file', **args) + filelist = layout.get(return_type="file", **args) if len(filelist) == 0: - msg = 'Output key: %s returned no files' % key + msg = "Output key: %s returned no files" % key if self.inputs.raise_on_empty: raise IOError(msg) else: @@ -2862,14 +3012,16 @@ def _add_output_traits(self, base): class ExportFileInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc='Input file name') - out_file = File(mandatory=True, desc='Output file name') - check_extension = traits.Bool(True, desc='Ensure that the input and output file extensions match') - clobber = traits.Bool(desc='Permit overwriting existing files') + in_file = File(exists=True, mandatory=True, desc="Input file name") + out_file = File(mandatory=True, desc="Output file name") + check_extension = traits.Bool( + True, desc="Ensure that the input and output file extensions match" + ) + clobber = traits.Bool(desc="Permit overwriting existing files") class ExportFileOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='Output file name') + out_file = File(exists=True, desc="Output file name") class ExportFile(SimpleInterface): @@ -2893,6 +3045,7 @@ class ExportFile(SimpleInterface): True """ + input_spec = ExportFileInputSpec output_spec = ExportFileOutputSpec @@ -2900,10 +3053,16 @@ def _run_interface(self, runtime): if not self.inputs.clobber and op.exists(self.inputs.out_file): raise FileExistsError(self.inputs.out_file) if not op.isabs(self.inputs.out_file): - raise ValueError('Out_file must be an absolute path.') - if (self.inputs.check_extension and - split_filename(self.inputs.in_file)[2] != split_filename(self.inputs.out_file)[2]): - raise RuntimeError('%s and %s have different extensions' % (self.inputs.in_file, self.inputs.out_file)) + raise ValueError("Out_file must be an absolute path.") + if ( + self.inputs.check_extension + and split_filename(self.inputs.in_file)[2] + != split_filename(self.inputs.out_file)[2] + ): + raise RuntimeError( + "%s and %s have different extensions" + % (self.inputs.in_file, self.inputs.out_file) + ) shutil.copy(str(self.inputs.in_file), str(self.inputs.out_file)) - self._results['out_file'] = self.inputs.out_file + self._results["out_file"] = self.inputs.out_file return runtime diff --git a/nipype/interfaces/matlab.py b/nipype/interfaces/matlab.py index 71fc7ab0e5..488635843e 100644 --- a/nipype/interfaces/matlab.py +++ b/nipype/interfaces/matlab.py @@ -5,25 +5,33 @@ import os from .. import config -from .base import (CommandLineInputSpec, InputMultiPath, isdefined, - CommandLine, traits, File, Directory) +from .base import ( + CommandLineInputSpec, + InputMultiPath, + isdefined, + CommandLine, + traits, + File, + Directory, +) def get_matlab_command(): - if 'NIPYPE_NO_MATLAB' in os.environ: + if "NIPYPE_NO_MATLAB" in os.environ: return None try: - matlab_cmd = os.environ['MATLABCMD'] + matlab_cmd = os.environ["MATLABCMD"] except: - matlab_cmd = 'matlab' + matlab_cmd = "matlab" try: res = CommandLine( - command='which', + command="which", args=matlab_cmd, resource_monitor=False, - terminal_output='allatonce').run() + terminal_output="allatonce", + ).run() matlab_path = res.runtime.stdout.strip() except Exception: return None @@ -37,49 +45,51 @@ class MatlabInputSpec(CommandLineInputSpec): """ Basic expected inputs to Matlab interface """ script = traits.Str( - argstr='-r \"%s;exit\"', - desc='m-code to run', - mandatory=True, - position=-1) + argstr='-r "%s;exit"', desc="m-code to run", mandatory=True, position=-1 + ) uses_mcr = traits.Bool( - desc='use MCR interface', - xor=['nodesktop', 'nosplash', 'single_comp_thread'], - nohash=True) + desc="use MCR interface", + xor=["nodesktop", "nosplash", "single_comp_thread"], + nohash=True, + ) nodesktop = traits.Bool( True, - argstr='-nodesktop', + argstr="-nodesktop", usedefault=True, - desc='Switch off desktop mode on unix platforms', - nohash=True) + desc="Switch off desktop mode on unix platforms", + nohash=True, + ) nosplash = traits.Bool( True, - argstr='-nosplash', + argstr="-nosplash", usedefault=True, - desc='Switch of splash screen', - nohash=True) - logfile = File(argstr='-logfile %s', desc='Save matlab output to log') + desc="Switch of splash screen", + nohash=True, + ) + logfile = File(argstr="-logfile %s", desc="Save matlab output to log") single_comp_thread = traits.Bool( - argstr="-singleCompThread", - desc="force single threaded operation", - nohash=True) + argstr="-singleCompThread", desc="force single threaded operation", nohash=True + ) # non-commandline options - mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True) + mfile = traits.Bool(True, desc="Run m-code using m-file", usedefault=True) script_file = File( - 'pyscript.m', usedefault=True, desc='Name of file to write m-code to') - paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath') + "pyscript.m", usedefault=True, desc="Name of file to write m-code to" + ) + paths = InputMultiPath(Directory(), desc="Paths to add to matlabpath") prescript = traits.List( - ["ver,", "try,"], - usedefault=True, - desc='prescript to be added before code') + ["ver,", "try,"], usedefault=True, desc="prescript to be added before code" + ) postscript = traits.List( [ - "\n,catch ME,", "fprintf(2,'MATLAB code threw an exception:\\n');", + "\n,catch ME,", + "fprintf(2,'MATLAB code threw an exception:\\n');", "fprintf(2,'%s\\n',ME.message);", "if length(ME.stack) ~= 0, fprintf(2,'File:%s\\nName:%s\\nLine:%d\\n',ME.stack.file,ME.stack.name,ME.stack.line);, end;", - "end;" + "end;", ], - desc='script added after code', - usedefault=True) + desc="script added after code", + usedefault=True, + ) class MatlabCommand(CommandLine): @@ -91,7 +101,7 @@ class MatlabCommand(CommandLine): >>> out = mlab.run() # doctest: +SKIP """ - _cmd = 'matlab' + _cmd = "matlab" _default_matlab_cmd = None _default_mfile = None _default_paths = None @@ -113,13 +123,14 @@ def __init__(self, matlab_cmd=None, **inputs): if self._default_paths and not isdefined(self.inputs.paths): self.inputs.paths = self._default_paths - if not isdefined(self.inputs.single_comp_thread) and \ - not isdefined(self.inputs.uses_mcr): - if config.getboolean('execution', 'single_thread_matlab'): + if not isdefined(self.inputs.single_comp_thread) and not isdefined( + self.inputs.uses_mcr + ): + if config.getboolean("execution", "single_thread_matlab"): self.inputs.single_comp_thread = True # For matlab commands force all output to be returned since matlab # does not have a clean way of notifying an error - self.terminal_output = 'allatonce' + self.terminal_output = "allatonce" @classmethod def set_default_matlab_cmd(cls, matlab_cmd): @@ -155,23 +166,23 @@ def set_default_paths(cls, paths): cls._default_paths = paths def _run_interface(self, runtime): - self.terminal_output = 'allatonce' + self.terminal_output = "allatonce" runtime = super(MatlabCommand, self)._run_interface(runtime) try: # Matlab can leave the terminal in a barbbled state - os.system('stty sane') + os.system("stty sane") except: # We might be on a system where stty doesn't exist pass - if 'MATLAB code threw an exception' in runtime.stderr: + if "MATLAB code threw an exception" in runtime.stderr: self.raise_exception(runtime) return runtime def _format_arg(self, name, trait_spec, value): - if name in ['script']: + if name in ["script"]: argstr = trait_spec.argstr if self.inputs.uses_mcr: - argstr = '%s' + argstr = "%s" return self._gen_matlab_command(argstr, value) return super(MatlabCommand, self)._format_arg(name, trait_spec, value) @@ -189,33 +200,34 @@ def _gen_matlab_command(self, argstr, script_lines): # prescript takes different default value depending on the mfile argument if mfile: prescript.insert( - 0, - "fprintf(1,'Executing %s at %s:\\n',mfilename(),datestr(now));" + 0, "fprintf(1,'Executing %s at %s:\\n',mfilename(),datestr(now));" ) else: - prescript.insert( - 0, "fprintf(1,'Executing code at %s:\\n',datestr(now));") + prescript.insert(0, "fprintf(1,'Executing code at %s:\\n',datestr(now));") for path in paths: prescript.append("addpath('%s');\n" % path) if not mfile: # clean up the code of comments and replace newlines with commas - script_lines = ','.join([ - line for line in script_lines.split("\n") - if not line.strip().startswith("%") - ]) + script_lines = ",".join( + [ + line + for line in script_lines.split("\n") + if not line.strip().startswith("%") + ] + ) - script_lines = '\n'.join(prescript) + script_lines + '\n'.join( - postscript) + script_lines = "\n".join(prescript) + script_lines + "\n".join(postscript) if mfile: - with open(os.path.join(cwd, self.inputs.script_file), - 'wt') as mfile: + with open(os.path.join(cwd, self.inputs.script_file), "wt") as mfile: mfile.write(script_lines) if self.inputs.uses_mcr: - script = '%s' % (os.path.join(cwd, self.inputs.script_file)) + script = "%s" % (os.path.join(cwd, self.inputs.script_file)) else: script = "addpath('%s');%s" % ( - cwd, self.inputs.script_file.split('.')[0]) + cwd, + self.inputs.script_file.split(".")[0], + ) else: - script = ''.join(script_lines.split('\n')) + script = "".join(script_lines.split("\n")) return argstr % script diff --git a/nipype/interfaces/meshfix.py b/nipype/interfaces/meshfix.py index 53c7f56cfe..d1689ad9b4 100644 --- a/nipype/interfaces/meshfix.py +++ b/nipype/interfaces/meshfix.py @@ -6,145 +6,163 @@ import os.path as op from ..utils.filemanip import split_filename -from .base import (CommandLine, CommandLineInputSpec, traits, TraitedSpec, - isdefined, File) +from .base import ( + CommandLine, + CommandLineInputSpec, + traits, + TraitedSpec, + isdefined, + File, +) class MeshFixInputSpec(CommandLineInputSpec): number_of_biggest_shells = traits.Int( - argstr='--shells %d', desc="Only the N biggest shells are kept") + argstr="--shells %d", desc="Only the N biggest shells are kept" + ) epsilon_angle = traits.Range( - argstr='-a %f', + argstr="-a %f", low=0.0, high=2.0, - desc="Epsilon angle in degrees (must be between 0 and 2)") + desc="Epsilon angle in degrees (must be between 0 and 2)", + ) join_overlapping_largest_components = traits.Bool( - argstr='-j', - xor=['join_closest_components'], - desc='Join 2 biggest components if they overlap, remove the rest.') + argstr="-j", + xor=["join_closest_components"], + desc="Join 2 biggest components if they overlap, remove the rest.", + ) join_closest_components = traits.Bool( - argstr='-jc', - xor=['join_closest_components'], - desc='Join the closest pair of components.') + argstr="-jc", + xor=["join_closest_components"], + desc="Join the closest pair of components.", + ) quiet_mode = traits.Bool( - argstr='-q', desc="Quiet mode, don't write much to stdout.") + argstr="-q", desc="Quiet mode, don't write much to stdout." + ) - dont_clean = traits.Bool(argstr='--no-clean', desc="Don't Clean") + dont_clean = traits.Bool(argstr="--no-clean", desc="Don't Clean") save_as_stl = traits.Bool( - xor=['save_as_vrml', 'save_as_freesurfer_mesh'], - argstr='--stl', - desc="Result is saved in stereolithographic format (.stl)") + xor=["save_as_vrml", "save_as_freesurfer_mesh"], + argstr="--stl", + desc="Result is saved in stereolithographic format (.stl)", + ) save_as_vrml = traits.Bool( - argstr='--wrl', - xor=['save_as_stl', 'save_as_freesurfer_mesh'], - desc="Result is saved in VRML1.0 format (.wrl)") + argstr="--wrl", + xor=["save_as_stl", "save_as_freesurfer_mesh"], + desc="Result is saved in VRML1.0 format (.wrl)", + ) save_as_freesurfer_mesh = traits.Bool( - argstr='--fsmesh', - xor=['save_as_vrml', 'save_as_stl'], - desc="Result is saved in freesurfer mesh format") + argstr="--fsmesh", + xor=["save_as_vrml", "save_as_stl"], + desc="Result is saved in freesurfer mesh format", + ) - remove_handles = traits.Bool( - argstr='--remove-handles', desc="Remove handles") + remove_handles = traits.Bool(argstr="--remove-handles", desc="Remove handles") uniform_remeshing_steps = traits.Int( - argstr='-u %d', - requires=['uniform_remeshing_vertices'], - desc="Number of steps for uniform remeshing of the whole mesh") + argstr="-u %d", + requires=["uniform_remeshing_vertices"], + desc="Number of steps for uniform remeshing of the whole mesh", + ) uniform_remeshing_vertices = traits.Int( - argstr='--vertices %d', - requires=['uniform_remeshing_steps'], + argstr="--vertices %d", + requires=["uniform_remeshing_steps"], desc="Constrains the number of vertices." - "Must be used with uniform_remeshing_steps") + "Must be used with uniform_remeshing_steps", + ) laplacian_smoothing_steps = traits.Int( - argstr='--smooth %d', - desc="The number of laplacian smoothing steps to apply") + argstr="--smooth %d", desc="The number of laplacian smoothing steps to apply" + ) x_shift = traits.Int( - argstr='--smooth %d', - desc="Shifts the coordinates of the vertices when saving. Output must be in FreeSurfer format" + argstr="--smooth %d", + desc="Shifts the coordinates of the vertices when saving. Output must be in FreeSurfer format", ) # Cutting, decoupling, dilation cut_outer = traits.Int( - argstr='--cut-outer %d', - desc="Remove triangles of 1st that are outside of the 2nd shell.") + argstr="--cut-outer %d", + desc="Remove triangles of 1st that are outside of the 2nd shell.", + ) cut_inner = traits.Int( - argstr='--cut-inner %d', - desc="Remove triangles of 1st that are inside of the 2nd shell. Dilate 2nd by N; Fill holes and keep only 1st afterwards." + argstr="--cut-inner %d", + desc="Remove triangles of 1st that are inside of the 2nd shell. Dilate 2nd by N; Fill holes and keep only 1st afterwards.", ) decouple_inin = traits.Int( - argstr='--decouple-inin %d', + argstr="--decouple-inin %d", desc="Treat 1st file as inner, 2nd file as outer component." - "Resolve overlaps by moving inners triangles inwards. Constrain the min distance between the components > d." + "Resolve overlaps by moving inners triangles inwards. Constrain the min distance between the components > d.", ) decouple_outin = traits.Int( - argstr='--decouple-outin %d', + argstr="--decouple-outin %d", desc="Treat 1st file as outer, 2nd file as inner component." - "Resolve overlaps by moving outers triangles inwards. Constrain the min distance between the components > d." + "Resolve overlaps by moving outers triangles inwards. Constrain the min distance between the components > d.", ) decouple_outout = traits.Int( - argstr='--decouple-outout %d', + argstr="--decouple-outout %d", desc="Treat 1st file as outer, 2nd file as inner component." - "Resolve overlaps by moving outers triangles outwards. Constrain the min distance between the components > d." + "Resolve overlaps by moving outers triangles outwards. Constrain the min distance between the components > d.", ) finetuning_inwards = traits.Bool( - argstr='--fineTuneIn ', - requires=['finetuning_distance', 'finetuning_substeps'], + argstr="--fineTuneIn ", + requires=["finetuning_distance", "finetuning_substeps"], position=-3, - desc="Used to fine-tune the minimal distance between surfaces." + desc="Used to fine-tune the minimal distance between surfaces.", ) finetuning_outwards = traits.Bool( - argstr='--fineTuneOut ', - requires=['finetuning_distance', 'finetuning_substeps'], + argstr="--fineTuneOut ", + requires=["finetuning_distance", "finetuning_substeps"], position=-3, - xor=['finetuning_inwards'], - desc='Similar to finetuning_inwards, but ensures minimal distance in the other direction' + xor=["finetuning_inwards"], + desc="Similar to finetuning_inwards, but ensures minimal distance in the other direction", ) finetuning_distance = traits.Float( - argstr='%f', - requires=['finetuning_substeps'], + argstr="%f", + requires=["finetuning_substeps"], position=-2, desc="Used to fine-tune the minimal distance between surfaces." - "A minimal distance d is ensured, and reached in n substeps. When using the surfaces for subsequent volume meshing by gmsh, this step prevent too flat tetrahedra2)" + "A minimal distance d is ensured, and reached in n substeps. When using the surfaces for subsequent volume meshing by gmsh, this step prevent too flat tetrahedra2)", ) finetuning_substeps = traits.Int( - argstr='%d', - requires=['finetuning_distance'], + argstr="%d", + requires=["finetuning_distance"], position=-1, desc="Used to fine-tune the minimal distance between surfaces." - "A minimal distance d is ensured, and reached in n substeps. When using the surfaces for subsequent volume meshing by gmsh, this step prevent too flat tetrahedra2)" + "A minimal distance d is ensured, and reached in n substeps. When using the surfaces for subsequent volume meshing by gmsh, this step prevent too flat tetrahedra2)", ) dilation = traits.Int( - argstr='--dilate %d', - desc="Dilate the surface by d. d < 0 means shrinking.") + argstr="--dilate %d", desc="Dilate the surface by d. d < 0 means shrinking." + ) set_intersections_to_one = traits.Bool( - argstr='--intersect', + argstr="--intersect", desc="If the mesh contains intersections, return value = 1." - "If saved in gmsh format, intersections will be highlighted.") + "If saved in gmsh format, intersections will be highlighted.", + ) in_file1 = File(exists=True, argstr="%s", position=1, mandatory=True) in_file2 = File(exists=True, argstr="%s", position=2) output_type = traits.Enum( - 'off', ['stl', 'msh', 'wrl', 'vrml', 'fs', 'off'], + "off", + ["stl", "msh", "wrl", "vrml", "fs", "off"], usedefault=True, - desc='The output type to save the file as.') + desc="The output type to save the file as.", + ) out_filename = File( - genfile=True, - argstr="-o %s", - desc='The output filename for the fixed mesh file') + genfile=True, argstr="-o %s", desc="The output filename for the fixed mesh file" + ) class MeshFixOutputSpec(TraitedSpec): - mesh_file = File(exists=True, desc='The output mesh file') + mesh_file = File(exists=True, desc="The output mesh file") class MeshFix(CommandLine): @@ -177,7 +195,8 @@ class MeshFix(CommandLine): >>> fix.cmdline 'meshfix lh-pial.stl rh-pial.stl -o lh-pial_fixed.off' """ - _cmd = 'meshfix' + + _cmd = "meshfix" input_spec = MeshFixInputSpec output_spec = MeshFixOutputSpec @@ -185,33 +204,32 @@ def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.out_filename): path, name, ext = split_filename(self.inputs.out_filename) - ext = ext.replace('.', '') - out_types = ['stl', 'msh', 'wrl', 'vrml', 'fs', 'off'] + ext = ext.replace(".", "") + out_types = ["stl", "msh", "wrl", "vrml", "fs", "off"] # Make sure that the output filename uses one of the possible file types if any(ext == out_type.lower() for out_type in out_types): - outputs['mesh_file'] = op.abspath(self.inputs.out_filename) + outputs["mesh_file"] = op.abspath(self.inputs.out_filename) else: - outputs['mesh_file'] = op.abspath( - name + '.' + self.inputs.output_type) + outputs["mesh_file"] = op.abspath(name + "." + self.inputs.output_type) else: - outputs['mesh_file'] = op.abspath(self._gen_outfilename()) + outputs["mesh_file"] = op.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): - if name == 'out_filename': + if name == "out_filename": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file1) - if self.inputs.save_as_freesurfer_mesh or self.inputs.output_type == 'fs': - self.inputs.output_type = 'fs' + if self.inputs.save_as_freesurfer_mesh or self.inputs.output_type == "fs": + self.inputs.output_type = "fs" self.inputs.save_as_freesurfer_mesh = True - if self.inputs.save_as_stl or self.inputs.output_type == 'stl': - self.inputs.output_type = 'stl' + if self.inputs.save_as_stl or self.inputs.output_type == "stl": + self.inputs.output_type = "stl" self.inputs.save_as_stl = True - if self.inputs.save_as_vrml or self.inputs.output_type == 'vrml': - self.inputs.output_type = 'vrml' + if self.inputs.save_as_vrml or self.inputs.output_type == "vrml": + self.inputs.output_type = "vrml" self.inputs.save_as_vrml = True - return name + '_fixed.' + self.inputs.output_type + return name + "_fixed." + self.inputs.output_type diff --git a/nipype/interfaces/minc/__init__.py b/nipype/interfaces/minc/__init__.py index 1ebea58b64..c593ea998b 100644 --- a/nipype/interfaces/minc/__init__.py +++ b/nipype/interfaces/minc/__init__.py @@ -9,7 +9,7 @@ http://carlo-hamalainen.net """ -from .base import (Info) +from .base import Info from .minc import ( Average, diff --git a/nipype/interfaces/minc/base.py b/nipype/interfaces/minc/base.py index 9436c2c3d1..5aca3e434e 100644 --- a/nipype/interfaces/minc/base.py +++ b/nipype/interfaces/minc/base.py @@ -14,7 +14,7 @@ from ..base import CommandLine -warnings.filterwarnings('always', category=UserWarning) +warnings.filterwarnings("always", category=UserWarning) def check_minc(): @@ -52,47 +52,46 @@ def version(): """ try: clout = CommandLine( - command='mincinfo', - args='-version', - terminal_output='allatonce').run() + command="mincinfo", args="-version", terminal_output="allatonce" + ).run() except IOError: return None out = clout.runtime.stdout def read_program_version(s): - if 'program' in s: - return s.split(':')[1].strip() + if "program" in s: + return s.split(":")[1].strip() return None def read_libminc_version(s): - if 'libminc' in s: - return s.split(':')[1].strip() + if "libminc" in s: + return s.split(":")[1].strip() return None def read_netcdf_version(s): - if 'netcdf' in s: - return ' '.join(s.split(':')[1:]).strip() + if "netcdf" in s: + return " ".join(s.split(":")[1:]).strip() return None def read_hdf5_version(s): - if 'HDF5' in s: - return s.split(':')[1].strip() + if "HDF5" in s: + return s.split(":")[1].strip() return None versions = { - 'minc': None, - 'libminc': None, - 'netcdf': None, - 'hdf5': None, + "minc": None, + "libminc": None, + "netcdf": None, + "hdf5": None, } - for l in out.split('\n'): + for l in out.split("\n"): for (name, f) in [ - ('minc', read_program_version), - ('libminc', read_libminc_version), - ('netcdf', read_netcdf_version), - ('hdf5', read_hdf5_version), + ("minc", read_program_version), + ("libminc", read_libminc_version), + ("netcdf", read_netcdf_version), + ("hdf5", read_hdf5_version), ]: if f(l) is not None: versions[name] = f(l) @@ -126,11 +125,13 @@ def aggregate_filename(files, new_suffix): path = os.getcwd() - if common_prefix == '': + if common_prefix == "": return os.path.abspath( os.path.join( - path, - os.path.splitext(files[0])[0] + '_' + new_suffix + '.mnc')) + path, os.path.splitext(files[0])[0] + "_" + new_suffix + ".mnc" + ) + ) else: return os.path.abspath( - os.path.join(path, common_prefix + '_' + new_suffix + '.mnc')) + os.path.join(path, common_prefix + "_" + new_suffix + ".mnc") + ) diff --git a/nipype/interfaces/minc/minc.py b/nipype/interfaces/minc/minc.py index 791c7bbc64..b4dfa1dac8 100644 --- a/nipype/interfaces/minc/minc.py +++ b/nipype/interfaces/minc/minc.py @@ -14,203 +14,233 @@ import re import warnings -from ..base import (TraitedSpec, CommandLineInputSpec, CommandLine, - StdOutCommandLineInputSpec, StdOutCommandLine, File, - Directory, InputMultiPath, OutputMultiPath, traits, - isdefined) +from ..base import ( + TraitedSpec, + CommandLineInputSpec, + CommandLine, + StdOutCommandLineInputSpec, + StdOutCommandLine, + File, + Directory, + InputMultiPath, + OutputMultiPath, + traits, + isdefined, +) from .base import aggregate_filename -warnings.filterwarnings('always', category=UserWarning) +warnings.filterwarnings("always", category=UserWarning) class ExtractInputSpec(StdOutCommandLineInputSpec): input_file = File( - desc='input file', - exists=True, - mandatory=True, - argstr='%s', - position=-2, + desc="input file", exists=True, mandatory=True, argstr="%s", position=-2, ) output_file = File( - desc='output file', + desc="output file", position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s.raw', - keep_extension=False) + name_template="%s.raw", + keep_extension=False, + ) _xor_write = ( - 'write_ascii', - 'write_ascii', - 'write_byte', - 'write_short', - 'write_int', - 'write_long', - 'write_float', - 'write_double', - 'write_signed', - 'write_unsigned', + "write_ascii", + "write_ascii", + "write_byte", + "write_short", + "write_int", + "write_long", + "write_float", + "write_double", + "write_signed", + "write_unsigned", ) write_ascii = traits.Bool( - desc='Write out data as ascii strings (default).', - argstr='-ascii', - xor=_xor_write) + desc="Write out data as ascii strings (default).", + argstr="-ascii", + xor=_xor_write, + ) write_byte = traits.Bool( - desc='Write out data as bytes.', argstr='-byte', xor=_xor_write) + desc="Write out data as bytes.", argstr="-byte", xor=_xor_write + ) write_short = traits.Bool( - desc='Write out data as short integers.', - argstr='-short', - xor=_xor_write) + desc="Write out data as short integers.", argstr="-short", xor=_xor_write + ) write_int = traits.Bool( - desc='Write out data as 32-bit integers.', - argstr='-int', - xor=_xor_write) + desc="Write out data as 32-bit integers.", argstr="-int", xor=_xor_write + ) write_long = traits.Bool( - desc='Superseded by write_int.', argstr='-long', xor=_xor_write) + desc="Superseded by write_int.", argstr="-long", xor=_xor_write + ) write_float = traits.Bool( - desc='Write out data as single precision floating-point values.', - argstr='-float', - xor=_xor_write) + desc="Write out data as single precision floating-point values.", + argstr="-float", + xor=_xor_write, + ) write_double = traits.Bool( - desc='Write out data as double precision floating-point values.', - argstr='-double', - xor=_xor_write) + desc="Write out data as double precision floating-point values.", + argstr="-double", + xor=_xor_write, + ) - _xor_signed = ('write_signed', 'write_unsigned') + _xor_signed = ("write_signed", "write_unsigned") write_signed = traits.Bool( - desc='Write out signed data.', argstr='-signed', xor=_xor_signed) + desc="Write out signed data.", argstr="-signed", xor=_xor_signed + ) write_unsigned = traits.Bool( - desc='Write out unsigned data.', argstr='-unsigned', xor=_xor_signed) + desc="Write out unsigned data.", argstr="-unsigned", xor=_xor_signed + ) write_range = traits.Tuple( traits.Float, traits.Float, - argstr='-range %s %s', - desc= - 'Specify the range of output values\nDefault value: 1.79769e+308 1.79769e+308.', + argstr="-range %s %s", + desc="Specify the range of output values\nDefault value: 1.79769e+308 1.79769e+308.", ) _xor_normalize = ( - 'normalize', - 'nonormalize', + "normalize", + "nonormalize", ) normalize = traits.Bool( - desc='Normalize integer pixel values to file max and min.', - argstr='-normalize', - xor=_xor_normalize) + desc="Normalize integer pixel values to file max and min.", + argstr="-normalize", + xor=_xor_normalize, + ) nonormalize = traits.Bool( - desc='Turn off pixel normalization.', - argstr='-nonormalize', - xor=_xor_normalize) + desc="Turn off pixel normalization.", argstr="-nonormalize", xor=_xor_normalize + ) image_range = traits.Tuple( traits.Float, traits.Float, - desc='Specify the range of real image values for normalization.', - argstr='-image_range %s %s') + desc="Specify the range of real image values for normalization.", + argstr="-image_range %s %s", + ) image_minimum = traits.Float( - desc=('Specify the minimum real image value for normalization.' - 'Default value: 1.79769e+308.'), - argstr='-image_minimum %s') + desc=( + "Specify the minimum real image value for normalization." + "Default value: 1.79769e+308." + ), + argstr="-image_minimum %s", + ) image_maximum = traits.Float( - desc=('Specify the maximum real image value for normalization.' - 'Default value: 1.79769e+308.'), - argstr='-image_maximum %s') + desc=( + "Specify the maximum real image value for normalization." + "Default value: 1.79769e+308." + ), + argstr="-image_maximum %s", + ) start = InputMultiPath( traits.Int, - desc='Specifies corner of hyperslab (C conventions for indices).', - sep=',', - argstr='-start %s', + desc="Specifies corner of hyperslab (C conventions for indices).", + sep=",", + argstr="-start %s", ) count = InputMultiPath( traits.Int, - desc='Specifies edge lengths of hyperslab to read.', - sep=',', - argstr='-count %s', + desc="Specifies edge lengths of hyperslab to read.", + sep=",", + argstr="-count %s", ) # FIXME Can we make sure that len(start) == len(count)? - _xor_flip = ('flip_positive_direction', 'flip_negative_direction', - 'flip_any_direction') + _xor_flip = ( + "flip_positive_direction", + "flip_negative_direction", + "flip_any_direction", + ) flip_positive_direction = traits.Bool( - desc='Flip images to always have positive direction.', - argstr='-positive_direction', - xor=_xor_flip) + desc="Flip images to always have positive direction.", + argstr="-positive_direction", + xor=_xor_flip, + ) flip_negative_direction = traits.Bool( - desc='Flip images to always have negative direction.', - argstr='-negative_direction', - xor=_xor_flip) + desc="Flip images to always have negative direction.", + argstr="-negative_direction", + xor=_xor_flip, + ) flip_any_direction = traits.Bool( - desc='Do not flip images (Default).', - argstr='-any_direction', - xor=_xor_flip) + desc="Do not flip images (Default).", argstr="-any_direction", xor=_xor_flip + ) - _xor_x_flip = ('flip_x_positive', 'flip_x_negative', 'flip_x_any') + _xor_x_flip = ("flip_x_positive", "flip_x_negative", "flip_x_any") flip_x_positive = traits.Bool( - desc='Flip images to give positive xspace:step value (left-to-right).', - argstr='+xdirection', - xor=_xor_x_flip) + desc="Flip images to give positive xspace:step value (left-to-right).", + argstr="+xdirection", + xor=_xor_x_flip, + ) flip_x_negative = traits.Bool( - desc='Flip images to give negative xspace:step value (right-to-left).', - argstr='-xdirection', - xor=_xor_x_flip) + desc="Flip images to give negative xspace:step value (right-to-left).", + argstr="-xdirection", + xor=_xor_x_flip, + ) flip_x_any = traits.Bool( - desc='Don\'t flip images along x-axis (default).', - argstr='-xanydirection', - xor=_xor_x_flip) + desc="Don't flip images along x-axis (default).", + argstr="-xanydirection", + xor=_xor_x_flip, + ) - _xor_y_flip = ('flip_y_positive', 'flip_y_negative', 'flip_y_any') + _xor_y_flip = ("flip_y_positive", "flip_y_negative", "flip_y_any") flip_y_positive = traits.Bool( - desc='Flip images to give positive yspace:step value (post-to-ant).', - argstr='+ydirection', - xor=_xor_y_flip) + desc="Flip images to give positive yspace:step value (post-to-ant).", + argstr="+ydirection", + xor=_xor_y_flip, + ) flip_y_negative = traits.Bool( - desc='Flip images to give negative yspace:step value (ant-to-post).', - argstr='-ydirection', - xor=_xor_y_flip) + desc="Flip images to give negative yspace:step value (ant-to-post).", + argstr="-ydirection", + xor=_xor_y_flip, + ) flip_y_any = traits.Bool( - desc='Don\'t flip images along y-axis (default).', - argstr='-yanydirection', - xor=_xor_y_flip) + desc="Don't flip images along y-axis (default).", + argstr="-yanydirection", + xor=_xor_y_flip, + ) - _xor_z_flip = ('flip_z_positive', 'flip_z_negative', 'flip_z_any') + _xor_z_flip = ("flip_z_positive", "flip_z_negative", "flip_z_any") flip_z_positive = traits.Bool( - desc='Flip images to give positive zspace:step value (inf-to-sup).', - argstr='+zdirection', - xor=_xor_z_flip) + desc="Flip images to give positive zspace:step value (inf-to-sup).", + argstr="+zdirection", + xor=_xor_z_flip, + ) flip_z_negative = traits.Bool( - desc='Flip images to give negative zspace:step value (sup-to-inf).', - argstr='-zdirection', - xor=_xor_z_flip) + desc="Flip images to give negative zspace:step value (sup-to-inf).", + argstr="-zdirection", + xor=_xor_z_flip, + ) flip_z_any = traits.Bool( - desc='Don\'t flip images along z-axis (default).', - argstr='-zanydirection', - xor=_xor_z_flip) + desc="Don't flip images along z-axis (default).", + argstr="-zanydirection", + xor=_xor_z_flip, + ) class ExtractOutputSpec(TraitedSpec): - output_file = File(desc='output file in raw/text format', exists=True) + output_file = File(desc="output file in raw/text format", exists=True) class Extract(StdOutCommandLine): @@ -231,89 +261,98 @@ class Extract(StdOutCommandLine): input_spec = ExtractInputSpec output_spec = ExtractOutputSpec - _cmd = 'mincextract' + _cmd = "mincextract" class ToRawInputSpec(StdOutCommandLineInputSpec): input_file = File( - desc='input file', - exists=True, - mandatory=True, - argstr='%s', - position=-2, + desc="input file", exists=True, mandatory=True, argstr="%s", position=-2, ) output_file = File( - desc='output file', + desc="output file", position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s.raw', - keep_extension=False) + name_template="%s.raw", + keep_extension=False, + ) - _xor_write = ('write_byte', 'write_short', 'write_int', 'write_long', - 'write_float', 'write_double') + _xor_write = ( + "write_byte", + "write_short", + "write_int", + "write_long", + "write_float", + "write_double", + ) write_byte = traits.Bool( - desc='Write out data as bytes.', argstr='-byte', xor=_xor_write) + desc="Write out data as bytes.", argstr="-byte", xor=_xor_write + ) write_short = traits.Bool( - desc='Write out data as short integers.', - argstr='-short', - xor=_xor_write) + desc="Write out data as short integers.", argstr="-short", xor=_xor_write + ) write_int = traits.Bool( - desc='Write out data as 32-bit integers.', - argstr='-int', - xor=_xor_write) + desc="Write out data as 32-bit integers.", argstr="-int", xor=_xor_write + ) write_long = traits.Bool( - desc='Superseded by write_int.', argstr='-long', xor=_xor_write) + desc="Superseded by write_int.", argstr="-long", xor=_xor_write + ) write_float = traits.Bool( - desc='Write out data as single precision floating-point values.', - argstr='-float', - xor=_xor_write) + desc="Write out data as single precision floating-point values.", + argstr="-float", + xor=_xor_write, + ) write_double = traits.Bool( - desc='Write out data as double precision floating-point values.', - argstr='-double', - xor=_xor_write) + desc="Write out data as double precision floating-point values.", + argstr="-double", + xor=_xor_write, + ) - _xor_signed = ('write_signed', 'write_unsigned') + _xor_signed = ("write_signed", "write_unsigned") write_signed = traits.Bool( - desc='Write out signed data.', argstr='-signed', xor=_xor_signed) + desc="Write out signed data.", argstr="-signed", xor=_xor_signed + ) write_unsigned = traits.Bool( - desc='Write out unsigned data.', argstr='-unsigned', xor=_xor_signed) + desc="Write out unsigned data.", argstr="-unsigned", xor=_xor_signed + ) write_range = traits.Tuple( traits.Float, traits.Float, - argstr='-range %s %s', - desc=('Specify the range of output values.' - 'Default value: 1.79769e+308 1.79769e+308.'), + argstr="-range %s %s", + desc=( + "Specify the range of output values." + "Default value: 1.79769e+308 1.79769e+308." + ), ) _xor_normalize = ( - 'normalize', - 'nonormalize', + "normalize", + "nonormalize", ) normalize = traits.Bool( - desc='Normalize integer pixel values to file max and min.', - argstr='-normalize', - xor=_xor_normalize) + desc="Normalize integer pixel values to file max and min.", + argstr="-normalize", + xor=_xor_normalize, + ) nonormalize = traits.Bool( - desc='Turn off pixel normalization.', - argstr='-nonormalize', - xor=_xor_normalize) + desc="Turn off pixel normalization.", argstr="-nonormalize", xor=_xor_normalize + ) class ToRawOutputSpec(TraitedSpec): - output_file = File(desc='output file in raw format', exists=True) + output_file = File(desc="output file in raw format", exists=True) class ToRaw(StdOutCommandLine): @@ -335,40 +374,42 @@ class ToRaw(StdOutCommandLine): input_spec = ToRawInputSpec output_spec = ToRawOutputSpec - _cmd = 'minctoraw' + _cmd = "minctoraw" class ConvertInputSpec(CommandLineInputSpec): input_file = File( - desc='input file for converting', + desc="input file for converting", exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-2, ) output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s_convert_output.mnc') + name_template="%s_convert_output.mnc", + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) - two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2') + default_value=True, + ) + two = traits.Bool(desc="Create a MINC 2 output file.", argstr="-2") template = traits.Bool( - desc= - ('Create a template file. The dimensions, variables, and' - 'attributes of the input file are preserved but all data it set to zero.' - ), - argstr='-template', + desc=( + "Create a template file. The dimensions, variables, and" + "attributes of the input file are preserved but all data it set to zero." + ), + argstr="-template", ) compression = traits.Enum( @@ -382,20 +423,19 @@ class ConvertInputSpec(CommandLineInputSpec): 7, 8, 9, - argstr='-compress %s', - desc='Set the compression level, from 0 (disabled) to 9 (maximum).', + argstr="-compress %s", + desc="Set the compression level, from 0 (disabled) to 9 (maximum).", ) chunk = traits.Range( low=0, - desc= - 'Set the target block size for chunking (0 default, >1 block size).', - argstr='-chunk %d', + desc="Set the target block size for chunking (0 default, >1 block size).", + argstr="-chunk %d", ) class ConvertOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) + output_file = File(desc="output file", exists=True) class Convert(CommandLine): @@ -412,42 +452,43 @@ class Convert(CommandLine): input_spec = ConvertInputSpec output_spec = ConvertOutputSpec - _cmd = 'mincconvert' + _cmd = "mincconvert" class CopyInputSpec(CommandLineInputSpec): input_file = File( - desc='input file to copy', + desc="input file to copy", exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-2, ) output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s_copy.mnc') + name_template="%s_copy.mnc", + ) - _xor_pixel = ('pixel_values', 'real_values') + _xor_pixel = ("pixel_values", "real_values") pixel_values = traits.Bool( - desc='Copy pixel values as is.', - argstr='-pixel_values', - xor=_xor_pixel) + desc="Copy pixel values as is.", argstr="-pixel_values", xor=_xor_pixel + ) real_values = traits.Bool( - desc='Copy real pixel intensities (default).', - argstr='-real_values', - xor=_xor_pixel) + desc="Copy real pixel intensities (default).", + argstr="-real_values", + xor=_xor_pixel, + ) class CopyOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) + output_file = File(desc="output file", exists=True) class Copy(CommandLine): @@ -464,72 +505,75 @@ class Copy(CommandLine): input_spec = CopyInputSpec output_spec = CopyOutputSpec - _cmd = 'minccopy' + _cmd = "minccopy" class ToEcatInputSpec(CommandLineInputSpec): input_file = File( - desc='input file to convert', + desc="input file to convert", exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-2, ) output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s_to_ecat.v', - keep_extension=False) + name_template="%s_to_ecat.v", + keep_extension=False, + ) ignore_patient_variable = traits.Bool( - desc='Ignore informations from the minc patient variable.', - argstr='-ignore_patient_variable', + desc="Ignore informations from the minc patient variable.", + argstr="-ignore_patient_variable", ) ignore_study_variable = traits.Bool( - desc='Ignore informations from the minc study variable.', - argstr='-ignore_study_variable', + desc="Ignore informations from the minc study variable.", + argstr="-ignore_study_variable", ) ignore_acquisition_variable = traits.Bool( - desc='Ignore informations from the minc acquisition variable.', - argstr='-ignore_acquisition_variable', + desc="Ignore informations from the minc acquisition variable.", + argstr="-ignore_acquisition_variable", ) ignore_ecat_acquisition_variable = traits.Bool( - desc='Ignore informations from the minc ecat_acquisition variable.', - argstr='-ignore_ecat_acquisition_variable', + desc="Ignore informations from the minc ecat_acquisition variable.", + argstr="-ignore_ecat_acquisition_variable", ) ignore_ecat_main = traits.Bool( - desc='Ignore informations from the minc ecat-main variable.', - argstr='-ignore_ecat_main', + desc="Ignore informations from the minc ecat-main variable.", + argstr="-ignore_ecat_main", ) ignore_ecat_subheader_variable = traits.Bool( - desc='Ignore informations from the minc ecat-subhdr variable.', - argstr='-ignore_ecat_subheader_variable', + desc="Ignore informations from the minc ecat-subhdr variable.", + argstr="-ignore_ecat_subheader_variable", ) no_decay_corr_fctr = traits.Bool( - desc='Do not compute the decay correction factors', - argstr='-no_decay_corr_fctr', + desc="Do not compute the decay correction factors", + argstr="-no_decay_corr_fctr", ) voxels_as_integers = traits.Bool( - desc=('Voxel values are treated as integers, scale and' - 'calibration factors are set to unity'), - argstr='-label', + desc=( + "Voxel values are treated as integers, scale and" + "calibration factors are set to unity" + ), + argstr="-label", ) class ToEcatOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) + output_file = File(desc="output file", exists=True) class ToEcat(CommandLine): @@ -552,85 +596,84 @@ class ToEcat(CommandLine): input_spec = ToEcatInputSpec output_spec = ToEcatOutputSpec - _cmd = 'minctoecat' + _cmd = "minctoecat" class DumpInputSpec(StdOutCommandLineInputSpec): input_file = File( - desc='input file', - exists=True, - mandatory=True, - argstr='%s', - position=-2, + desc="input file", exists=True, mandatory=True, argstr="%s", position=-2, ) output_file = File( - desc='output file', + desc="output file", position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s_dump.txt', - keep_extension=False) + name_template="%s_dump.txt", + keep_extension=False, + ) _xor_coords_or_header = ( - 'coordinate_data', - 'header_data', + "coordinate_data", + "header_data", ) coordinate_data = traits.Bool( - desc='Coordinate variable data and header information.', - argstr='-c', - xor=_xor_coords_or_header) + desc="Coordinate variable data and header information.", + argstr="-c", + xor=_xor_coords_or_header, + ) header_data = traits.Bool( - desc='Header information only, no data.', - argstr='-h', - xor=_xor_coords_or_header) + desc="Header information only, no data.", argstr="-h", xor=_xor_coords_or_header + ) _xor_annotations = ( - 'annotations_brief', - 'annotations_full', + "annotations_brief", + "annotations_full", ) annotations_brief = traits.Enum( - 'c', - 'f', - argstr='-b %s', - desc='Brief annotations for C or Fortran indices in data.', - xor=_xor_annotations) + "c", + "f", + argstr="-b %s", + desc="Brief annotations for C or Fortran indices in data.", + xor=_xor_annotations, + ) annotations_full = traits.Enum( - 'c', - 'f', - argstr='-f %s', - desc='Full annotations for C or Fortran indices in data.', - xor=_xor_annotations) + "c", + "f", + argstr="-f %s", + desc="Full annotations for C or Fortran indices in data.", + xor=_xor_annotations, + ) variables = InputMultiPath( traits.Str, - desc='Output data for specified variables only.', - sep=',', - argstr='-v %s') + desc="Output data for specified variables only.", + sep=",", + argstr="-v %s", + ) line_length = traits.Range( - low=0, - desc='Line length maximum in data section (default 80).', - argstr='-l %d') + low=0, desc="Line length maximum in data section (default 80).", argstr="-l %d" + ) netcdf_name = traits.Str( - desc='Name for netCDF (default derived from file name).', - argstr='-n %s') + desc="Name for netCDF (default derived from file name).", argstr="-n %s" + ) precision = traits.Either( traits.Int(), traits.Tuple(traits.Int, traits.Int), - desc='Display floating-point values with less precision', - argstr='%s', + desc="Display floating-point values with less precision", + argstr="%s", ) # See _format_arg in Dump for actual formatting. class DumpOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) + output_file = File(desc="output file", exists=True) class Dump(StdOutCommandLine): @@ -652,208 +695,227 @@ class Dump(StdOutCommandLine): input_spec = DumpInputSpec output_spec = DumpOutputSpec - _cmd = 'mincdump' + _cmd = "mincdump" def _format_arg(self, name, spec, value): - if name == 'precision': + if name == "precision": if isinstance(value, int): - return '-p %d' % value - elif isinstance(value, tuple) and isinstance( - value[0], int) and isinstance(value[1], int): - return '-p %d,%d' % ( - value[0], - value[1], - ) + return "-p %d" % value + elif ( + isinstance(value, tuple) + and isinstance(value[0], int) + and isinstance(value[1], int) + ): + return "-p %d,%d" % (value[0], value[1],) else: - raise ValueError('Invalid precision argument: ' + str(value)) + raise ValueError("Invalid precision argument: " + str(value)) return super(Dump, self)._format_arg(name, spec, value) class AverageInputSpec(CommandLineInputSpec): - _xor_input_files = ('input_files', 'filelist') + _xor_input_files = ("input_files", "filelist") input_files = InputMultiPath( File(exists=True), - desc='input file(s)', + desc="input file(s)", mandatory=True, - sep=' ', - argstr='%s', + sep=" ", + argstr="%s", position=-2, - xor=_xor_input_files) + xor=_xor_input_files, + ) filelist = File( - desc='Specify the name of a file containing input file names.', - argstr='-filelist %s', + desc="Specify the name of a file containing input file names.", + argstr="-filelist %s", exists=True, mandatory=True, - xor=_xor_input_files) + xor=_xor_input_files, + ) output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['input_files'], + name_source=["input_files"], hash_files=False, - name_template='%s_averaged.mnc') + name_template="%s_averaged.mnc", + ) - two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2') + two = traits.Bool(desc="Create a MINC 2 output file.", argstr="-2") clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) _xor_verbose = ( - 'verbose', - 'quiet', + "verbose", + "quiet", ) verbose = traits.Bool( - desc='Print out log messages (default).', - argstr='-verbose', - xor=_xor_verbose) + desc="Print out log messages (default).", argstr="-verbose", xor=_xor_verbose + ) quiet = traits.Bool( - desc='Do not print out log messages.', - argstr='-quiet', - xor=_xor_verbose) + desc="Do not print out log messages.", argstr="-quiet", xor=_xor_verbose + ) - debug = traits.Bool(desc='Print out debugging messages.', argstr='-debug') + debug = traits.Bool(desc="Print out debugging messages.", argstr="-debug") _xor_check_dimensions = ( - 'check_dimensions', - 'no_check_dimensions', + "check_dimensions", + "no_check_dimensions", ) check_dimensions = traits.Bool( - desc='Check that dimension info matches across files (default).', - argstr='-check_dimensions', - xor=_xor_check_dimensions) + desc="Check that dimension info matches across files (default).", + argstr="-check_dimensions", + xor=_xor_check_dimensions, + ) no_check_dimensions = traits.Bool( - desc='Do not check dimension info.', - argstr='-nocheck_dimensions', - xor=_xor_check_dimensions) + desc="Do not check dimension info.", + argstr="-nocheck_dimensions", + xor=_xor_check_dimensions, + ) _xor_format = ( - 'format_filetype', - 'format_byte', - 'format_short', - 'format_int', - 'format_long', - 'format_float', - 'format_double', - 'format_signed', - 'format_unsigned', + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", ) format_filetype = traits.Bool( - desc='Use data type of first file (default).', - argstr='-filetype', - xor=_xor_format) + desc="Use data type of first file (default).", + argstr="-filetype", + xor=_xor_format, + ) format_byte = traits.Bool( - desc='Write out byte data.', argstr='-byte', xor=_xor_format) + desc="Write out byte data.", argstr="-byte", xor=_xor_format + ) format_short = traits.Bool( - desc='Write out short integer data.', argstr='-short', xor=_xor_format) + desc="Write out short integer data.", argstr="-short", xor=_xor_format + ) format_int = traits.Bool( - desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format) + desc="Write out 32-bit integer data.", argstr="-int", xor=_xor_format + ) format_long = traits.Bool( - desc='Superseded by -int.', argstr='-long', xor=_xor_format) + desc="Superseded by -int.", argstr="-long", xor=_xor_format + ) format_float = traits.Bool( - desc='Write out single-precision floating-point data.', - argstr='-float', - xor=_xor_format) + desc="Write out single-precision floating-point data.", + argstr="-float", + xor=_xor_format, + ) format_double = traits.Bool( - desc='Write out double-precision floating-point data.', - argstr='-double', - xor=_xor_format) + desc="Write out double-precision floating-point data.", + argstr="-double", + xor=_xor_format, + ) format_signed = traits.Bool( - desc='Write signed integer data.', argstr='-signed', xor=_xor_format) + desc="Write signed integer data.", argstr="-signed", xor=_xor_format + ) format_unsigned = traits.Bool( - desc='Write unsigned integer data (default).', - argstr='-unsigned', - xor=_xor_format) + desc="Write unsigned integer data (default).", + argstr="-unsigned", + xor=_xor_format, + ) max_buffer_size_in_kb = traits.Range( low=0, - desc='Specify the maximum size of the internal buffers (in kbytes).', + desc="Specify the maximum size of the internal buffers (in kbytes).", value=4096, usedefault=True, - argstr='-max_buffer_size_in_kb %d', + argstr="-max_buffer_size_in_kb %d", ) _xor_normalize = ( - 'normalize', - 'nonormalize', + "normalize", + "nonormalize", ) normalize = traits.Bool( - desc='Normalize data sets for mean intensity.', - argstr='-normalize', - xor=_xor_normalize) + desc="Normalize data sets for mean intensity.", + argstr="-normalize", + xor=_xor_normalize, + ) nonormalize = traits.Bool( - desc='Do not normalize data sets (default).', - argstr='-nonormalize', - xor=_xor_normalize) + desc="Do not normalize data sets (default).", + argstr="-nonormalize", + xor=_xor_normalize, + ) voxel_range = traits.Tuple( traits.Int, traits.Int, - argstr='-range %d %d', - desc='Valid range for output data.') + argstr="-range %d %d", + desc="Valid range for output data.", + ) - sdfile = File( - desc='Specify an output sd file (default=none).', argstr='-sdfile %s') + sdfile = File(desc="Specify an output sd file (default=none).", argstr="-sdfile %s") - _xor_copy_header = ('copy_header', 'no_copy_header') + _xor_copy_header = ("copy_header", "no_copy_header") copy_header = traits.Bool( - desc= - 'Copy all of the header from the first file (default for one file).', - argstr='-copy_header', - xor=_xor_copy_header) + desc="Copy all of the header from the first file (default for one file).", + argstr="-copy_header", + xor=_xor_copy_header, + ) no_copy_header = traits.Bool( - desc= - 'Do not copy all of the header from the first file (default for many files)).', - argstr='-nocopy_header', - xor=_xor_copy_header) + desc="Do not copy all of the header from the first file (default for many files)).", + argstr="-nocopy_header", + xor=_xor_copy_header, + ) avgdim = traits.Str( - desc='Specify a dimension along which we wish to average.', - argstr='-avgdim %s') + desc="Specify a dimension along which we wish to average.", argstr="-avgdim %s" + ) binarize = traits.Bool( - desc='Binarize the volume by looking for values in a given range.', - argstr='-binarize') + desc="Binarize the volume by looking for values in a given range.", + argstr="-binarize", + ) binrange = traits.Tuple( traits.Float, traits.Float, - argstr='-binrange %s %s', - desc= - 'Specify a range for binarization. Default value: 1.79769e+308 -1.79769e+308.' + argstr="-binrange %s %s", + desc="Specify a range for binarization. Default value: 1.79769e+308 -1.79769e+308.", ) binvalue = traits.Float( - desc=('Specify a target value (+/- 0.5) for' - 'binarization. Default value: -1.79769e+308'), - argstr='-binvalue %s') + desc=( + "Specify a target value (+/- 0.5) for" + "binarization. Default value: -1.79769e+308" + ), + argstr="-binvalue %s", + ) weights = InputMultiPath( traits.Str, desc='Specify weights for averaging (",,...").', - sep=',', - argstr='-weights %s', + sep=",", + argstr="-weights %s", ) width_weighted = traits.Bool( - desc='Weight by dimension widths when -avgdim is used.', - argstr='-width_weighted', - requires=('avgdim', )) + desc="Weight by dimension widths when -avgdim is used.", + argstr="-width_weighted", + requires=("avgdim",), + ) class AverageOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) + output_file = File(desc="output file", exists=True) class Average(CommandLine): @@ -873,43 +935,46 @@ class Average(CommandLine): input_spec = AverageInputSpec output_spec = AverageOutputSpec - _cmd = 'mincaverage' + _cmd = "mincaverage" class BlobInputSpec(CommandLineInputSpec): input_file = File( - desc='input file to blob', + desc="input file to blob", exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-2, ) output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s_blob.mnc') + name_template="%s_blob.mnc", + ) trace = traits.Bool( - desc='compute the trace (approximate growth and shrinkage) -- FAST', - argstr='-trace') + desc="compute the trace (approximate growth and shrinkage) -- FAST", + argstr="-trace", + ) determinant = traits.Bool( - desc='compute the determinant (exact growth and shrinkage) -- SLOW', - argstr='-determinant') + desc="compute the determinant (exact growth and shrinkage) -- SLOW", + argstr="-determinant", + ) translation = traits.Bool( - desc='compute translation (structure displacement)', - argstr='-translation') + desc="compute translation (structure displacement)", argstr="-translation" + ) magnitude = traits.Bool( - desc='compute the magnitude of the displacement vector', - argstr='-magnitude') + desc="compute the magnitude of the displacement vector", argstr="-magnitude" + ) class BlobOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) + output_file = File(desc="output file", exists=True) class Blob(CommandLine): @@ -927,175 +992,194 @@ class Blob(CommandLine): input_spec = BlobInputSpec output_spec = BlobOutputSpec - _cmd = 'mincblob' + _cmd = "mincblob" class CalcInputSpec(CommandLineInputSpec): - _xor_input_files = ('input_files', 'filelist') + _xor_input_files = ("input_files", "filelist") input_files = InputMultiPath( File(exists=True), - desc='input file(s) for calculation', + desc="input file(s) for calculation", mandatory=True, - sep=' ', - argstr='%s', + sep=" ", + argstr="%s", position=-2, ) output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['input_files'], + name_source=["input_files"], hash_files=False, - name_template='%s_calc.mnc') + name_template="%s_calc.mnc", + ) - two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2') + two = traits.Bool(desc="Create a MINC 2 output file.", argstr="-2") clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) _xor_verbose = ( - 'verbose', - 'quiet', + "verbose", + "quiet", ) verbose = traits.Bool( - desc='Print out log messages (default).', - argstr='-verbose', - xor=_xor_verbose) + desc="Print out log messages (default).", argstr="-verbose", xor=_xor_verbose + ) quiet = traits.Bool( - desc='Do not print out log messages.', - argstr='-quiet', - xor=_xor_verbose) + desc="Do not print out log messages.", argstr="-quiet", xor=_xor_verbose + ) - debug = traits.Bool(desc='Print out debugging messages.', argstr='-debug') + debug = traits.Bool(desc="Print out debugging messages.", argstr="-debug") filelist = File( - desc='Specify the name of a file containing input file names.', - argstr='-filelist %s', + desc="Specify the name of a file containing input file names.", + argstr="-filelist %s", mandatory=True, - xor=_xor_input_files) + xor=_xor_input_files, + ) - _xor_copy_header = ('copy_header', 'no_copy_header') + _xor_copy_header = ("copy_header", "no_copy_header") copy_header = traits.Bool( - desc='Copy all of the header from the first file.', - argstr='-copy_header', - xor=_xor_copy_header) + desc="Copy all of the header from the first file.", + argstr="-copy_header", + xor=_xor_copy_header, + ) no_copy_header = traits.Bool( - desc='Do not copy all of the header from the first file.', - argstr='-nocopy_header', - xor=_xor_copy_header) + desc="Do not copy all of the header from the first file.", + argstr="-nocopy_header", + xor=_xor_copy_header, + ) _xor_format = ( - 'format_filetype', - 'format_byte', - 'format_short', - 'format_int', - 'format_long', - 'format_float', - 'format_double', - 'format_signed', - 'format_unsigned', + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", ) format_filetype = traits.Bool( - desc='Use data type of first file (default).', - argstr='-filetype', - xor=_xor_format) + desc="Use data type of first file (default).", + argstr="-filetype", + xor=_xor_format, + ) format_byte = traits.Bool( - desc='Write out byte data.', argstr='-byte', xor=_xor_format) + desc="Write out byte data.", argstr="-byte", xor=_xor_format + ) format_short = traits.Bool( - desc='Write out short integer data.', argstr='-short', xor=_xor_format) + desc="Write out short integer data.", argstr="-short", xor=_xor_format + ) format_int = traits.Bool( - desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format) + desc="Write out 32-bit integer data.", argstr="-int", xor=_xor_format + ) format_long = traits.Bool( - desc='Superseded by -int.', argstr='-long', xor=_xor_format) + desc="Superseded by -int.", argstr="-long", xor=_xor_format + ) format_float = traits.Bool( - desc='Write out single-precision floating-point data.', - argstr='-float', - xor=_xor_format) + desc="Write out single-precision floating-point data.", + argstr="-float", + xor=_xor_format, + ) format_double = traits.Bool( - desc='Write out double-precision floating-point data.', - argstr='-double', - xor=_xor_format) + desc="Write out double-precision floating-point data.", + argstr="-double", + xor=_xor_format, + ) format_signed = traits.Bool( - desc='Write signed integer data.', argstr='-signed', xor=_xor_format) + desc="Write signed integer data.", argstr="-signed", xor=_xor_format + ) format_unsigned = traits.Bool( - desc='Write unsigned integer data (default).', - argstr='-unsigned', - xor=_xor_format) + desc="Write unsigned integer data (default).", + argstr="-unsigned", + xor=_xor_format, + ) voxel_range = traits.Tuple( traits.Int, traits.Int, - argstr='-range %d %d', - desc='Valid range for output data.', + argstr="-range %d %d", + desc="Valid range for output data.", ) max_buffer_size_in_kb = traits.Range( low=0, - desc='Specify the maximum size of the internal buffers (in kbytes).', - argstr='-max_buffer_size_in_kb %d') + desc="Specify the maximum size of the internal buffers (in kbytes).", + argstr="-max_buffer_size_in_kb %d", + ) _xor_check_dimensions = ( - 'check_dimensions', - 'no_check_dimensions', + "check_dimensions", + "no_check_dimensions", ) check_dimensions = traits.Bool( - desc='Check that files have matching dimensions (default).', - argstr='-check_dimensions', - xor=_xor_check_dimensions) + desc="Check that files have matching dimensions (default).", + argstr="-check_dimensions", + xor=_xor_check_dimensions, + ) no_check_dimensions = traits.Bool( - desc='Do not check that files have matching dimensions.', - argstr='-nocheck_dimensions', - xor=_xor_check_dimensions) + desc="Do not check that files have matching dimensions.", + argstr="-nocheck_dimensions", + xor=_xor_check_dimensions, + ) # FIXME Is it sensible to use ignore_nan and propagate_nan at the same # time? Document this. ignore_nan = traits.Bool( - desc='Ignore invalid data (NaN) for accumulations.', - argstr='-ignore_nan') + desc="Ignore invalid data (NaN) for accumulations.", argstr="-ignore_nan" + ) propagate_nan = traits.Bool( - desc='Invalid data in any file at a voxel produces a NaN (default).', - argstr='-propagate_nan') + desc="Invalid data in any file at a voxel produces a NaN (default).", + argstr="-propagate_nan", + ) # FIXME Double-check that these are mutually exclusive? - _xor_nan_zero_illegal = ('output_nan', 'output_zero', - 'output_illegal_value') + _xor_nan_zero_illegal = ("output_nan", "output_zero", "output_illegal_value") output_nan = traits.Bool( - desc='Output NaN when an illegal operation is done (default).', - argstr='-nan', - xor=_xor_nan_zero_illegal) + desc="Output NaN when an illegal operation is done (default).", + argstr="-nan", + xor=_xor_nan_zero_illegal, + ) output_zero = traits.Bool( - desc='Output zero when an illegal operation is done.', - argstr='-zero', - xor=_xor_nan_zero_illegal) + desc="Output zero when an illegal operation is done.", + argstr="-zero", + xor=_xor_nan_zero_illegal, + ) output_illegal = traits.Bool( - desc= - 'Value to write out when an illegal operation is done. Default value: 1.79769e+308', - argstr='-illegal_value', - xor=_xor_nan_zero_illegal) + desc="Value to write out when an illegal operation is done. Default value: 1.79769e+308", + argstr="-illegal_value", + xor=_xor_nan_zero_illegal, + ) - _xor_expression = ('expression', 'expfile') + _xor_expression = ("expression", "expfile") expression = traits.Str( - desc='Expression to use in calculations.', - argstr='-expression \'%s\'', + desc="Expression to use in calculations.", + argstr="-expression '%s'", xor=_xor_expression, - mandatory=True) + mandatory=True, + ) expfile = File( - desc='Name of file containing expression.', - argstr='-expfile %s', + desc="Name of file containing expression.", + argstr="-expfile %s", xor=_xor_expression, - mandatory=True) + mandatory=True, + ) # FIXME test this one, the argstr will probably need tweaking, see # _format_arg. @@ -1103,22 +1187,24 @@ class CalcInputSpec(CommandLineInputSpec): traits.Tuple( traits.Str, File, - argstr='-outfile %s %s', - desc= - ('List of (symbol, file) tuples indicating that output should be written' - 'to the specified file, taking values from the symbol which should be' - 'created in the expression (see the EXAMPLES section). If this option' - 'is given, then all non-option arguments are taken as input files.' - 'This option can be used multiple times for multiple output files.' - ))) + argstr="-outfile %s %s", + desc=( + "List of (symbol, file) tuples indicating that output should be written" + "to the specified file, taking values from the symbol which should be" + "created in the expression (see the EXAMPLES section). If this option" + "is given, then all non-option arguments are taken as input files." + "This option can be used multiple times for multiple output files." + ), + ) + ) eval_width = traits.Int( - desc='Number of voxels to evaluate simultaneously.', - argstr='-eval_width %s') + desc="Number of voxels to evaluate simultaneously.", argstr="-eval_width %s" + ) class CalcOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) + output_file = File(desc="output file", exists=True) class Calc(CommandLine): @@ -1138,7 +1224,7 @@ class Calc(CommandLine): input_spec = CalcInputSpec output_spec = CalcOutputSpec - _cmd = 'minccalc' + _cmd = "minccalc" # FIXME mincbbox produces output like @@ -1151,47 +1237,49 @@ class Calc(CommandLine): class BBoxInputSpec(StdOutCommandLineInputSpec): input_file = File( - desc='input file', - exists=True, - mandatory=True, - argstr='%s', - position=-2, + desc="input file", exists=True, mandatory=True, argstr="%s", position=-2, ) output_file = File( - desc='output file containing bounding box corners', + desc="output file containing bounding box corners", position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s_bbox.txt', - keep_extension=False) + name_template="%s_bbox.txt", + keep_extension=False, + ) threshold = traits.Int( 0, - desc='VIO_Real value threshold for bounding box. Default value: 0.', - argstr='-threshold') + desc="VIO_Real value threshold for bounding box. Default value: 0.", + argstr="-threshold", + ) - _xor_one_two = ('one_line', 'two_lines') + _xor_one_two = ("one_line", "two_lines") one_line = traits.Bool( - desc='Output on one line (default): start_x y z width_x y z', - argstr='-one_line', - xor=_xor_one_two) + desc="Output on one line (default): start_x y z width_x y z", + argstr="-one_line", + xor=_xor_one_two, + ) two_lines = traits.Bool( - desc='Output on two lines: start_x y z \n width_x y z', - argstr='-two_lines', - xor=_xor_one_two) + desc="Output on two lines: start_x y z \n width_x y z", + argstr="-two_lines", + xor=_xor_one_two, + ) format_mincresample = traits.Bool( - desc= - 'Output format for mincresample: (-step x y z -start x y z -nelements x y z', - argstr='-mincresample') + desc="Output format for mincresample: (-step x y z -start x y z -nelements x y z", + argstr="-mincresample", + ) format_mincreshape = traits.Bool( - desc='Output format for mincreshape: (-start x,y,z -count dx,dy,dz', - argstr='-mincreshape') + desc="Output format for mincreshape: (-start x,y,z -count dx,dy,dz", + argstr="-mincreshape", + ) format_minccrop = traits.Bool( - desc='Output format for minccrop: (-xlim x1 x2 -ylim y1 y2 -zlim z1 z2', - argstr='-minccrop') + desc="Output format for minccrop: (-xlim x1 x2 -ylim y1 y2 -zlim z1 z2", + argstr="-minccrop", + ) # FIXME Not implemented, will clash with our parsing of the output? # Command-specific options: @@ -1202,8 +1290,7 @@ class BBoxInputSpec(StdOutCommandLineInputSpec): class BBoxOutputSpec(TraitedSpec): - output_file = File( - desc='output file containing bounding box corners', exists=True) + output_file = File(desc="output file containing bounding box corners", exists=True) class BBox(StdOutCommandLine): @@ -1222,7 +1309,7 @@ class BBox(StdOutCommandLine): input_spec = BBoxInputSpec output_spec = BBoxOutputSpec - _cmd = 'mincbbox' + _cmd = "mincbbox" class BeastInputSpec(CommandLineInputSpec): @@ -1272,94 +1359,117 @@ class BeastInputSpec(CommandLineInputSpec): """ probability_map = traits.Bool( - desc='Output the probability map instead of crisp mask.', - argstr='-probability') + desc="Output the probability map instead of crisp mask.", argstr="-probability" + ) flip_images = traits.Bool( - desc= - 'Flip images around the mid-sagittal plane to increase patch count.', - argstr='-flip') + desc="Flip images around the mid-sagittal plane to increase patch count.", + argstr="-flip", + ) load_moments = traits.Bool( - desc=('Do not calculate moments instead use precalculated' - 'library moments. (for optimization purposes)'), - argstr='-load_moments') - fill_holes = traits.Bool( - desc='Fill holes in the binary output.', argstr='-fill') + desc=( + "Do not calculate moments instead use precalculated" + "library moments. (for optimization purposes)" + ), + argstr="-load_moments", + ) + fill_holes = traits.Bool(desc="Fill holes in the binary output.", argstr="-fill") median_filter = traits.Bool( - desc='Apply a median filter on the probability map.', argstr='-median') + desc="Apply a median filter on the probability map.", argstr="-median" + ) nlm_filter = traits.Bool( - desc='Apply an NLM filter on the probability map (experimental).', - argstr='-nlm_filter') + desc="Apply an NLM filter on the probability map (experimental).", + argstr="-nlm_filter", + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) configuration_file = File( - desc='Specify configuration file.', argstr='-configuration %s') + desc="Specify configuration file.", argstr="-configuration %s" + ) voxel_size = traits.Int( - 4, usedefault=True, - desc=('Specify voxel size for calculations (4, 2, or 1).' - 'Default value: 4. Assumes no multiscale. Use configuration' - 'file for multiscale.'), - argstr='-voxel_size %s') + 4, + usedefault=True, + desc=( + "Specify voxel size for calculations (4, 2, or 1)." + "Default value: 4. Assumes no multiscale. Use configuration" + "file for multiscale." + ), + argstr="-voxel_size %s", + ) abspath = traits.Bool( - desc= - 'File paths in the library are absolute (default is relative to library root).', - argstr='-abspath', + desc="File paths in the library are absolute (default is relative to library root).", + argstr="-abspath", usedefault=True, - default_value=True) + default_value=True, + ) patch_size = traits.Int( - 1, usedefault=True, - desc='Specify patch size for single scale approach. Default value: 1.', - argstr='-patch_size %s') + 1, + usedefault=True, + desc="Specify patch size for single scale approach. Default value: 1.", + argstr="-patch_size %s", + ) search_area = traits.Int( - 2, usedefault=True, - desc= - 'Specify size of search area for single scale approach. Default value: 2.', - argstr='-search_area %s') + 2, + usedefault=True, + desc="Specify size of search area for single scale approach. Default value: 2.", + argstr="-search_area %s", + ) confidence_level_alpha = traits.Float( - 0.5, usedefault=True, - desc='Specify confidence level Alpha. Default value: 0.5', - argstr='-alpha %s') + 0.5, + usedefault=True, + desc="Specify confidence level Alpha. Default value: 0.5", + argstr="-alpha %s", + ) smoothness_factor_beta = traits.Float( - 0.5, usedefault=True, - desc='Specify smoothness factor Beta. Default value: 0.25', - argstr='-beta %s') + 0.5, + usedefault=True, + desc="Specify smoothness factor Beta. Default value: 0.25", + argstr="-beta %s", + ) threshold_patch_selection = traits.Float( - 0.95, usedefault=True, - desc='Specify threshold for patch selection. Default value: 0.95', - argstr='-threshold %s') + 0.95, + usedefault=True, + desc="Specify threshold for patch selection. Default value: 0.95", + argstr="-threshold %s", + ) number_selected_images = traits.Int( - 20, usedefault=True, - desc='Specify number of selected images. Default value: 20', - argstr='-selection_num %s') + 20, + usedefault=True, + desc="Specify number of selected images. Default value: 20", + argstr="-selection_num %s", + ) same_resolution = traits.Bool( - desc='Output final mask with the same resolution as input file.', - argstr='-same_resolution') + desc="Output final mask with the same resolution as input file.", + argstr="-same_resolution", + ) library_dir = Directory( - desc='library directory', position=-3, argstr='%s', mandatory=True) - input_file = File( - desc='input file', position=-2, argstr='%s', mandatory=True) + desc="library directory", position=-3, argstr="%s", mandatory=True + ) + input_file = File(desc="input file", position=-2, argstr="%s", mandatory=True) output_file = File( - desc='output file', + desc="output file", position=-1, - argstr='%s', - name_source=['input_file'], + argstr="%s", + name_source=["input_file"], hash_files=False, - name_template='%s_beast_mask.mnc') + name_template="%s_beast_mask.mnc", + ) class BeastOutputSpec(TraitedSpec): - output_file = File(desc='output mask file', exists=True) + output_file = File(desc="output mask file", exists=True) class Beast(CommandLine): @@ -1379,150 +1489,158 @@ class Beast(CommandLine): input_spec = BeastInputSpec output_spec = BeastOutputSpec - _cmd = 'mincbeast' + _cmd = "mincbeast" class PikInputSpec(CommandLineInputSpec): input_file = File( - desc='input file', - exists=True, - mandatory=True, - argstr='%s', - position=-2, + desc="input file", exists=True, mandatory=True, argstr="%s", position=-2, ) - _xor_image_type = ('jpg', 'png') + _xor_image_type = ("jpg", "png") - jpg = traits.Bool(desc='Output a jpg file.', xor=_xor_image_type) - png = traits.Bool(desc='Output a png file (default).', xor=_xor_image_type) + jpg = traits.Bool(desc="Output a jpg file.", xor=_xor_image_type) + png = traits.Bool(desc="Output a png file (default).", xor=_xor_image_type) output_file = File( - desc='output file', - argstr='%s', + desc="output file", + argstr="%s", genfile=True, position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s.png', - keep_extension=False) + name_template="%s.png", + keep_extension=False, + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) # FIXME not implemented: --verbose # --fake # --lookup ==> arguments to pass to minclookup scale = traits.Int( - 2, usedefault=True, - desc=('Scaling factor for resulting image. By default images are' - 'output at twice their original resolution.'), - argstr='--scale %s') + 2, + usedefault=True, + desc=( + "Scaling factor for resulting image. By default images are" + "output at twice their original resolution." + ), + argstr="--scale %s", + ) width = traits.Int( - desc= - 'Autoscale the resulting image to have a fixed image width (in pixels).', - argstr='--width %s') + desc="Autoscale the resulting image to have a fixed image width (in pixels).", + argstr="--width %s", + ) depth = traits.Enum( 8, 16, - desc='Bitdepth for resulting image 8 or 16 (MSB machines only!)', - argstr='--depth %s') + desc="Bitdepth for resulting image 8 or 16 (MSB machines only!)", + argstr="--depth %s", + ) - _xor_title = ('title_string', 'title_with_filename') + _xor_title = ("title_string", "title_with_filename") title = traits.Either( - traits.Bool(desc='Use input filename as title in resulting image.'), - traits.Str(desc='Add a title to the resulting image.'), - argstr='%s') # see _format_arg for actual arg string + traits.Bool(desc="Use input filename as title in resulting image."), + traits.Str(desc="Add a title to the resulting image."), + argstr="%s", + ) # see _format_arg for actual arg string title_size = traits.Int( - desc='Font point size for the title.', - argstr='--title_size %s', - requires=['title']) + desc="Font point size for the title.", + argstr="--title_size %s", + requires=["title"], + ) annotated_bar = traits.Bool( - desc= - 'create an annotated bar to match the image (use height of the output image)', - argstr='--anot_bar') + desc="create an annotated bar to match the image (use height of the output image)", + argstr="--anot_bar", + ) # FIXME tuple of floats? Not voxel values? Man page doesn't specify. minc_range = traits.Tuple( traits.Float, traits.Float, - desc='Valid range of values for MINC file.', - argstr='--range %s %s') + desc="Valid range of values for MINC file.", + argstr="--range %s %s", + ) - _xor_image_range = ('image_range', 'auto_range') + _xor_image_range = ("image_range", "auto_range") image_range = traits.Tuple( traits.Float, traits.Float, - desc='Range of image values to use for pixel intensity.', - argstr='--image_range %s %s', - xor=_xor_image_range) + desc="Range of image values to use for pixel intensity.", + argstr="--image_range %s %s", + xor=_xor_image_range, + ) auto_range = traits.Bool( - desc= - 'Automatically determine image range using a 5 and 95% PcT. (histogram)', - argstr='--auto_range', - xor=_xor_image_range) + desc="Automatically determine image range using a 5 and 95% PcT. (histogram)", + argstr="--auto_range", + xor=_xor_image_range, + ) start = traits.Int( - desc='Slice number to get. (note this is in voxel co-ordinates).', - argstr='--slice %s') # FIXME Int is correct? + desc="Slice number to get. (note this is in voxel co-ordinates).", + argstr="--slice %s", + ) # FIXME Int is correct? - _xor_slice = ('slice_z', 'slice_y', 'slice_x') + _xor_slice = ("slice_z", "slice_y", "slice_x") slice_z = traits.Bool( - desc='Get an axial/transverse (z) slice.', argstr='-z', xor=_xor_slice) - slice_y = traits.Bool( - desc='Get a coronal (y) slice.', argstr='-y', xor=_xor_slice) + desc="Get an axial/transverse (z) slice.", argstr="-z", xor=_xor_slice + ) + slice_y = traits.Bool(desc="Get a coronal (y) slice.", argstr="-y", xor=_xor_slice) slice_x = traits.Bool( - desc='Get a sagittal (x) slice.', argstr='-x', - xor=_xor_slice) # FIXME typo in man page? sagital? + desc="Get a sagittal (x) slice.", argstr="-x", xor=_xor_slice + ) # FIXME typo in man page? sagital? triplanar = traits.Bool( - desc='Create a triplanar view of the input file.', - argstr='--triplanar') + desc="Create a triplanar view of the input file.", argstr="--triplanar" + ) tile_size = traits.Int( - desc='Pixel size for each image in a triplanar.', - argstr='--tilesize %s') + desc="Pixel size for each image in a triplanar.", argstr="--tilesize %s" + ) - _xor_sagittal_offset = ('sagittal_offset', 'sagittal_offset_perc') + _xor_sagittal_offset = ("sagittal_offset", "sagittal_offset_perc") sagittal_offset = traits.Int( - desc='Offset the sagittal slice from the centre.', - argstr='--sagittal_offset %s') + desc="Offset the sagittal slice from the centre.", argstr="--sagittal_offset %s" + ) sagittal_offset_perc = traits.Range( low=0, high=100, - desc='Offset the sagittal slice by a percentage from the centre.', - argstr='--sagittal_offset_perc %d', + desc="Offset the sagittal slice by a percentage from the centre.", + argstr="--sagittal_offset_perc %d", ) - _xor_vertical_horizontal = ('vertical_triplanar_view', - 'horizontal_triplanar_view') + _xor_vertical_horizontal = ("vertical_triplanar_view", "horizontal_triplanar_view") vertical_triplanar_view = traits.Bool( - desc='Create a vertical triplanar view (Default).', - argstr='--vertical', - xor=_xor_vertical_horizontal) + desc="Create a vertical triplanar view (Default).", + argstr="--vertical", + xor=_xor_vertical_horizontal, + ) horizontal_triplanar_view = traits.Bool( - desc='Create a horizontal triplanar view.', - argstr='--horizontal', - xor=_xor_vertical_horizontal) + desc="Create a horizontal triplanar view.", + argstr="--horizontal", + xor=_xor_vertical_horizontal, + ) - lookup = traits.Str( - desc='Arguments to pass to minclookup', argstr='--lookup %s') + lookup = traits.Str(desc="Arguments to pass to minclookup", argstr="--lookup %s") class PikOutputSpec(TraitedSpec): - output_file = File(desc='output image', exists=True) + output_file = File(desc="output image", exists=True) class Pik(CommandLine): @@ -1545,102 +1663,104 @@ class Pik(CommandLine): input_spec = PikInputSpec output_spec = PikOutputSpec - _cmd = 'mincpik' + _cmd = "mincpik" def _format_arg(self, name, spec, value): - if name == 'title': + if name == "title": if isinstance(value, bool) and value: - return '--title' + return "--title" elif isinstance(value, str): - return '--title --title_text %s' % (value, ) + return "--title --title_text %s" % (value,) else: - raise ValueError( - 'Unknown value for "title" argument: ' + str(value)) + raise ValueError('Unknown value for "title" argument: ' + str(value)) return super(Pik, self)._format_arg(name, spec, value) class BlurInputSpec(CommandLineInputSpec): input_file = File( - desc='input file', - exists=True, - mandatory=True, - argstr='%s', - position=-2, + desc="input file", exists=True, mandatory=True, argstr="%s", position=-2, ) - output_file_base = File(desc='output file base', argstr='%s', position=-1) + output_file_base = File(desc="output file base", argstr="%s", position=-1) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) - _xor_kernel = ('gaussian', 'rect') + _xor_kernel = ("gaussian", "rect") gaussian = traits.Bool( - desc='Use a gaussian smoothing kernel (default).', - argstr='-gaussian', - xor=_xor_kernel) + desc="Use a gaussian smoothing kernel (default).", + argstr="-gaussian", + xor=_xor_kernel, + ) rect = traits.Bool( - desc='Use a rect (box) smoothing kernel.', - argstr='-rect', - xor=_xor_kernel) + desc="Use a rect (box) smoothing kernel.", argstr="-rect", xor=_xor_kernel + ) gradient = traits.Bool( - desc='Create the gradient magnitude volume as well.', - argstr='-gradient') + desc="Create the gradient magnitude volume as well.", argstr="-gradient" + ) partial = traits.Bool( - desc= - 'Create the partial derivative and gradient magnitude volumes as well.', - argstr='-partial') + desc="Create the partial derivative and gradient magnitude volumes as well.", + argstr="-partial", + ) no_apodize = traits.Bool( - desc='Do not apodize the data before blurring.', argstr='-no_apodize') + desc="Do not apodize the data before blurring.", argstr="-no_apodize" + ) - _xor_main_options = ('fwhm', 'fwhm3d', 'standard_dev') + _xor_main_options = ("fwhm", "fwhm3d", "standard_dev") fwhm = traits.Float( 0, - desc='Full-width-half-maximum of gaussian kernel. Default value: 0.', - argstr='-fwhm %s', + desc="Full-width-half-maximum of gaussian kernel. Default value: 0.", + argstr="-fwhm %s", xor=_xor_main_options, - mandatory=True) + mandatory=True, + ) standard_dev = traits.Float( 0, - desc='Standard deviation of gaussian kernel. Default value: 0.', - argstr='-standarddev %s', + desc="Standard deviation of gaussian kernel. Default value: 0.", + argstr="-standarddev %s", xor=_xor_main_options, - mandatory=True) + mandatory=True, + ) fwhm3d = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='-3dfwhm %s %s %s', - desc=('Full-width-half-maximum of gaussian kernel.' - 'Default value: -1.79769e+308 -1.79769e+308 -1.79769e+308.'), + argstr="-3dfwhm %s %s %s", + desc=( + "Full-width-half-maximum of gaussian kernel." + "Default value: -1.79769e+308 -1.79769e+308 -1.79769e+308." + ), xor=_xor_main_options, - mandatory=True) + mandatory=True, + ) dimensions = traits.Enum( 3, 1, 2, - desc= - 'Number of dimensions to blur (either 1,2 or 3). Default value: 3.', - argstr='-dimensions %s') + desc="Number of dimensions to blur (either 1,2 or 3). Default value: 3.", + argstr="-dimensions %s", + ) class BlurOutputSpec(TraitedSpec): - output_file = File(desc='Blurred output file.', exists=True) + output_file = File(desc="Blurred output file.", exists=True) - gradient_dxyz = File(desc='Gradient dxyz.') - partial_dx = File(desc='Partial gradient dx.') - partial_dy = File(desc='Partial gradient dy.') - partial_dz = File(desc='Partial gradient dz.') - partial_dxyz = File(desc='Partial gradient dxyz.') + gradient_dxyz = File(desc="Gradient dxyz.") + partial_dx = File(desc="Partial gradient dx.") + partial_dy = File(desc="Partial gradient dy.") + partial_dz = File(desc="Partial gradient dz.") + partial_dxyz = File(desc="Partial gradient dxyz.") class Blur(StdOutCommandLine): @@ -1682,7 +1802,7 @@ class Blur(StdOutCommandLine): input_spec = BlurInputSpec output_spec = BlurOutputSpec - _cmd = 'mincblur' + _cmd = "mincblur" def _gen_output_base(self): output_file_base = self.inputs.output_file_base @@ -1690,13 +1810,11 @@ def _gen_output_base(self): if isdefined(output_file_base): return output_file_base else: - base_file_name = os.path.split( - self.inputs.input_file)[1] # e.g. 'foo.mnc' - base_file_name_no_ext = os.path.splitext(base_file_name)[ - 0] # e.g. 'foo' + base_file_name = os.path.split(self.inputs.input_file)[1] # e.g. 'foo.mnc' + base_file_name_no_ext = os.path.splitext(base_file_name)[0] # e.g. 'foo' output_base = os.path.join( - os.getcwd(), base_file_name_no_ext + - '_bluroutput') # e.g. '/tmp/blah/foo_bluroutput' + os.getcwd(), base_file_name_no_ext + "_bluroutput" + ) # e.g. '/tmp/blah/foo_bluroutput' # return os.path.splitext(self.inputs.input_file)[0] + # '_bluroutput' return output_base @@ -1706,16 +1824,16 @@ def _list_outputs(self): output_file_base = self._gen_output_base() - outputs['output_file'] = output_file_base + '_blur.mnc' + outputs["output_file"] = output_file_base + "_blur.mnc" if isdefined(self.inputs.gradient): - outputs['gradient_dxyz'] = output_file_base + '_dxyz.mnc' + outputs["gradient_dxyz"] = output_file_base + "_dxyz.mnc" if isdefined(self.inputs.partial): - outputs['partial_dx'] = output_file_base + '_dx.mnc' - outputs['partial_dy'] = output_file_base + '_dy.mnc' - outputs['partial_dz'] = output_file_base + '_dz.mnc' - outputs['partial_dxyz'] = output_file_base + '_dxyz.mnc' + outputs["partial_dx"] = output_file_base + "_dx.mnc" + outputs["partial_dy"] = output_file_base + "_dy.mnc" + outputs["partial_dz"] = output_file_base + "_dz.mnc" + outputs["partial_dxyz"] = output_file_base + "_dxyz.mnc" return outputs @@ -1730,156 +1848,177 @@ def cmdline(self): # FIXME this seems like a bit of a hack. Can we force output_file # to show up in cmdline by default, even if it isn't specified in # the instantiation of Pik? - return '%s %s' % (orig_cmdline, self._gen_output_base()) + return "%s %s" % (orig_cmdline, self._gen_output_base()) class MathInputSpec(CommandLineInputSpec): - _xor_input_files = ('input_files', 'filelist') + _xor_input_files = ("input_files", "filelist") input_files = InputMultiPath( File(exists=True), - desc='input file(s) for calculation', + desc="input file(s) for calculation", mandatory=True, - sep=' ', - argstr='%s', + sep=" ", + argstr="%s", position=-2, - xor=_xor_input_files) + xor=_xor_input_files, + ) output_file = File( - desc='output file', - argstr='%s', + desc="output file", + argstr="%s", genfile=True, position=-1, - name_source=['input_files'], + name_source=["input_files"], hash_files=False, - name_template='%s_mincmath.mnc') + name_template="%s_mincmath.mnc", + ) filelist = File( - desc='Specify the name of a file containing input file names.', - argstr='-filelist %s', + desc="Specify the name of a file containing input file names.", + argstr="-filelist %s", exists=True, mandatory=True, - xor=_xor_input_files) + xor=_xor_input_files, + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) - two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2') + two = traits.Bool(desc="Create a MINC 2 output file.", argstr="-2") - _xor_copy_header = ('copy_header', 'no_copy_header') + _xor_copy_header = ("copy_header", "no_copy_header") copy_header = traits.Bool( - desc= - 'Copy all of the header from the first file (default for one file).', - argstr='-copy_header', - xor=_xor_copy_header) + desc="Copy all of the header from the first file (default for one file).", + argstr="-copy_header", + xor=_xor_copy_header, + ) no_copy_header = traits.Bool( - desc= - 'Do not copy all of the header from the first file (default for many files)).', - argstr='-nocopy_header', - xor=_xor_copy_header) + desc="Do not copy all of the header from the first file (default for many files)).", + argstr="-nocopy_header", + xor=_xor_copy_header, + ) _xor_format = ( - 'format_filetype', - 'format_byte', - 'format_short', - 'format_int', - 'format_long', - 'format_float', - 'format_double', - 'format_signed', - 'format_unsigned', + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", ) format_filetype = traits.Bool( - desc='Use data type of first file (default).', - argstr='-filetype', - xor=_xor_format) + desc="Use data type of first file (default).", + argstr="-filetype", + xor=_xor_format, + ) format_byte = traits.Bool( - desc='Write out byte data.', argstr='-byte', xor=_xor_format) + desc="Write out byte data.", argstr="-byte", xor=_xor_format + ) format_short = traits.Bool( - desc='Write out short integer data.', argstr='-short', xor=_xor_format) + desc="Write out short integer data.", argstr="-short", xor=_xor_format + ) format_int = traits.Bool( - desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format) + desc="Write out 32-bit integer data.", argstr="-int", xor=_xor_format + ) format_long = traits.Bool( - desc='Superseded by -int.', argstr='-long', xor=_xor_format) + desc="Superseded by -int.", argstr="-long", xor=_xor_format + ) format_float = traits.Bool( - desc='Write out single-precision floating-point data.', - argstr='-float', - xor=_xor_format) + desc="Write out single-precision floating-point data.", + argstr="-float", + xor=_xor_format, + ) format_double = traits.Bool( - desc='Write out double-precision floating-point data.', - argstr='-double', - xor=_xor_format) + desc="Write out double-precision floating-point data.", + argstr="-double", + xor=_xor_format, + ) format_signed = traits.Bool( - desc='Write signed integer data.', argstr='-signed', xor=_xor_format) + desc="Write signed integer data.", argstr="-signed", xor=_xor_format + ) format_unsigned = traits.Bool( - desc='Write unsigned integer data (default).', - argstr='-unsigned', - xor=_xor_format) + desc="Write unsigned integer data (default).", + argstr="-unsigned", + xor=_xor_format, + ) voxel_range = traits.Tuple( traits.Int, traits.Int, - argstr='-range %d %d', - desc='Valid range for output data.') + argstr="-range %d %d", + desc="Valid range for output data.", + ) max_buffer_size_in_kb = traits.Range( low=0, - desc='Specify the maximum size of the internal buffers (in kbytes).', + desc="Specify the maximum size of the internal buffers (in kbytes).", value=4096, usedefault=True, - argstr='-max_buffer_size_in_kb %d', + argstr="-max_buffer_size_in_kb %d", ) _xor_check_dimensions = ( - 'check_dimensions', - 'no_check_dimensions', + "check_dimensions", + "no_check_dimensions", ) check_dimensions = traits.Bool( - desc='Check that dimension info matches across files (default).', - argstr='-check_dimensions', - xor=_xor_check_dimensions) + desc="Check that dimension info matches across files (default).", + argstr="-check_dimensions", + xor=_xor_check_dimensions, + ) no_check_dimensions = traits.Bool( - desc='Do not check dimension info.', - argstr='-nocheck_dimensions', - xor=_xor_check_dimensions) + desc="Do not check dimension info.", + argstr="-nocheck_dimensions", + xor=_xor_check_dimensions, + ) dimension = traits.Str( - desc= - 'Specify a dimension along which we wish to perform a calculation.', - argstr='-dimension %s') + desc="Specify a dimension along which we wish to perform a calculation.", + argstr="-dimension %s", + ) # FIXME Is it sensible to use ignore_nan and propagate_nan at the same # time? Document this. ignore_nan = traits.Bool( - desc='Ignore invalid data (NaN) for accumulations.', - argstr='-ignore_nan') + desc="Ignore invalid data (NaN) for accumulations.", argstr="-ignore_nan" + ) propagate_nan = traits.Bool( - desc='Invalid data in any file at a voxel produces a NaN (default).', - argstr='-propagate_nan') + desc="Invalid data in any file at a voxel produces a NaN (default).", + argstr="-propagate_nan", + ) # FIXME Double-check that these are mutually exclusive? - _xor_nan_zero_illegal = ('output_nan', 'output_zero', - 'output_illegal_value') + _xor_nan_zero_illegal = ("output_nan", "output_zero", "output_illegal_value") output_nan = traits.Bool( - desc='Output NaN when an illegal operation is done (default).', - argstr='-nan', - xor=_xor_nan_zero_illegal) + desc="Output NaN when an illegal operation is done (default).", + argstr="-nan", + xor=_xor_nan_zero_illegal, + ) output_zero = traits.Bool( - desc='Output zero when an illegal operation is done.', - argstr='-zero', - xor=_xor_nan_zero_illegal) + desc="Output zero when an illegal operation is done.", + argstr="-zero", + xor=_xor_nan_zero_illegal, + ) output_illegal = traits.Bool( - desc=('Value to write out when an illegal operation' - 'is done. Default value: 1.79769e+308'), - argstr='-illegal_value', - xor=_xor_nan_zero_illegal) + desc=( + "Value to write out when an illegal operation" + "is done. Default value: 1.79769e+308" + ), + argstr="-illegal_value", + xor=_xor_nan_zero_illegal, + ) # FIXME A whole bunch of the parameters will be mutually exclusive, e.g. surely can't do sqrt and abs at the same time? # Or does mincmath do one and then the next? @@ -1889,156 +2028,186 @@ class MathInputSpec(CommandLineInputSpec): ########################################################################## bool_or_const_traits = [ - 'test_gt', 'test_lt', 'test_eq', 'test_ne', 'test_ge', 'test_le', - 'calc_add', 'calc_sub', 'calc_mul', 'calc_div' + "test_gt", + "test_lt", + "test_eq", + "test_ne", + "test_ge", + "test_le", + "calc_add", + "calc_sub", + "calc_mul", + "calc_div", ] test_gt = traits.Either( traits.Bool(), traits.Float(), - desc='Test for vol1 > vol2 or vol1 > constant.', - argstr='-gt') + desc="Test for vol1 > vol2 or vol1 > constant.", + argstr="-gt", + ) test_lt = traits.Either( traits.Bool(), traits.Float(), - desc='Test for vol1 < vol2 or vol1 < constant.', - argstr='-lt') + desc="Test for vol1 < vol2 or vol1 < constant.", + argstr="-lt", + ) test_eq = traits.Either( traits.Bool(), traits.Float(), - desc='Test for integer vol1 == vol2 or vol1 == constant.', - argstr='-eq') + desc="Test for integer vol1 == vol2 or vol1 == constant.", + argstr="-eq", + ) test_ne = traits.Either( traits.Bool(), traits.Float(), - desc='Test for integer vol1 != vol2 or vol1 != const.', - argstr='-ne') + desc="Test for integer vol1 != vol2 or vol1 != const.", + argstr="-ne", + ) test_ge = traits.Either( traits.Bool(), traits.Float(), - desc='Test for vol1 >= vol2 or vol1 >= const.', - argstr='-ge') + desc="Test for vol1 >= vol2 or vol1 >= const.", + argstr="-ge", + ) test_le = traits.Either( traits.Bool(), traits.Float(), - desc='Test for vol1 <= vol2 or vol1 <= const.', - argstr='-le') + desc="Test for vol1 <= vol2 or vol1 <= const.", + argstr="-le", + ) calc_add = traits.Either( traits.Bool(), traits.Float(), - desc='Add N volumes or volume + constant.', - argstr='-add') + desc="Add N volumes or volume + constant.", + argstr="-add", + ) calc_sub = traits.Either( traits.Bool(), traits.Float(), - desc='Subtract 2 volumes or volume - constant.', - argstr='-sub') + desc="Subtract 2 volumes or volume - constant.", + argstr="-sub", + ) calc_mul = traits.Either( traits.Bool(), traits.Float(), - desc='Multiply N volumes or volume * constant.', - argstr='-mult') + desc="Multiply N volumes or volume * constant.", + argstr="-mult", + ) calc_div = traits.Either( traits.Bool(), traits.Float(), - desc='Divide 2 volumes or volume / constant.', - argstr='-div') + desc="Divide 2 volumes or volume / constant.", + argstr="-div", + ) ###################################### # Traits that expect a single volume # ###################################### single_volume_traits = [ - 'invert', 'calc_not', 'sqrt', 'square', 'abs', 'exp', 'log', 'scale', - 'clamp', 'segment', 'nsegment', 'isnan', 'isnan' + "invert", + "calc_not", + "sqrt", + "square", + "abs", + "exp", + "log", + "scale", + "clamp", + "segment", + "nsegment", + "isnan", + "isnan", ] # FIXME enforce this in _parse_inputs and check for other members invert = traits.Either( - traits.Float(), desc='Calculate 1/c.', argstr='-invert -const %s') + traits.Float(), desc="Calculate 1/c.", argstr="-invert -const %s" + ) - calc_not = traits.Bool(desc='Calculate !vol1.', argstr='-not') + calc_not = traits.Bool(desc="Calculate !vol1.", argstr="-not") - sqrt = traits.Bool(desc='Take square root of a volume.', argstr='-sqrt') - square = traits.Bool(desc='Take square of a volume.', argstr='-square') - abs = traits.Bool(desc='Take absolute value of a volume.', argstr='-abs') + sqrt = traits.Bool(desc="Take square root of a volume.", argstr="-sqrt") + square = traits.Bool(desc="Take square of a volume.", argstr="-square") + abs = traits.Bool(desc="Take absolute value of a volume.", argstr="-abs") exp = traits.Tuple( traits.Float, traits.Float, - argstr='-exp -const2 %s %s', - desc='Calculate c2*exp(c1*x). Both constants must be specified.') + argstr="-exp -const2 %s %s", + desc="Calculate c2*exp(c1*x). Both constants must be specified.", + ) log = traits.Tuple( traits.Float, traits.Float, - argstr='-log -const2 %s %s', - desc='Calculate log(x/c2)/c1. The constants c1 and c2 default to 1.') + argstr="-log -const2 %s %s", + desc="Calculate log(x/c2)/c1. The constants c1 and c2 default to 1.", + ) scale = traits.Tuple( traits.Float, traits.Float, - argstr='-scale -const2 %s %s', - desc='Scale a volume: volume * c1 + c2.') + argstr="-scale -const2 %s %s", + desc="Scale a volume: volume * c1 + c2.", + ) clamp = traits.Tuple( traits.Float, traits.Float, - argstr='-clamp -const2 %s %s', - desc='Clamp a volume to lie between two values.') + argstr="-clamp -const2 %s %s", + desc="Clamp a volume to lie between two values.", + ) segment = traits.Tuple( traits.Float, traits.Float, - argstr='-segment -const2 %s %s', - desc= - 'Segment a volume using range of -const2: within range = 1, outside range = 0.' + argstr="-segment -const2 %s %s", + desc="Segment a volume using range of -const2: within range = 1, outside range = 0.", ) nsegment = traits.Tuple( traits.Float, traits.Float, - argstr='-nsegment -const2 %s %s', - desc='Opposite of -segment: within range = 0, outside range = 1.') + argstr="-nsegment -const2 %s %s", + desc="Opposite of -segment: within range = 0, outside range = 1.", + ) - isnan = traits.Bool(desc='Test for NaN values in vol1.', argstr='-isnan') + isnan = traits.Bool(desc="Test for NaN values in vol1.", argstr="-isnan") - nisnan = traits.Bool(desc='Negation of -isnan.', argstr='-nisnan') + nisnan = traits.Bool(desc="Negation of -isnan.", argstr="-nisnan") ############################################ # Traits that expect precisely two volumes # ############################################ - two_volume_traits = ['percentdiff'] + two_volume_traits = ["percentdiff"] percentdiff = traits.Float( - desc= - 'Percent difference between 2 volumes, thresholded (const def=0.0).', - argstr='-percentdiff') + desc="Percent difference between 2 volumes, thresholded (const def=0.0).", + argstr="-percentdiff", + ) ##################################### # Traits that expect N >= 1 volumes # ##################################### - n_volume_traits = [ - 'count_valid', 'maximum', 'minimum', 'calc_add', 'calc_or' - ] + n_volume_traits = ["count_valid", "maximum", "minimum", "calc_add", "calc_or"] count_valid = traits.Bool( - desc='Count the number of valid values in N volumes.', - argstr='-count_valid') + desc="Count the number of valid values in N volumes.", argstr="-count_valid" + ) - maximum = traits.Bool(desc='Find maximum of N volumes.', argstr='-maximum') - minimum = traits.Bool(desc='Find minimum of N volumes.', argstr='-minimum') + maximum = traits.Bool(desc="Find maximum of N volumes.", argstr="-maximum") + minimum = traits.Bool(desc="Find minimum of N volumes.", argstr="-minimum") - calc_and = traits.Bool( - desc='Calculate vol1 && vol2 (&& ...).', argstr='-and') - calc_or = traits.Bool( - desc='Calculate vol1 || vol2 (|| ...).', argstr='-or') + calc_and = traits.Bool(desc="Calculate vol1 && vol2 (&& ...).", argstr="-and") + calc_or = traits.Bool(desc="Calculate vol1 || vol2 (|| ...).", argstr="-or") class MathOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) + output_file = File(desc="output file", exists=True) class Math(StdOutCommandLine): @@ -2064,7 +2233,7 @@ class Math(StdOutCommandLine): input_spec = MathInputSpec output_spec = MathOutputSpec - _cmd = 'mincmath' + _cmd = "mincmath" def _format_arg(self, name, spec, value): assert value is not None @@ -2076,18 +2245,11 @@ def _format_arg(self, name, spec, value): if isinstance(value, bool) and value: return spec.argstr elif isinstance(value, bool) and not value: - raise ValueError('Does not make sense to specify %s=False' % - (name, )) + raise ValueError("Does not make sense to specify %s=False" % (name,)) elif isinstance(value, float): - return '%s -const %s' % ( - spec.argstr, - value, - ) + return "%s -const %s" % (spec.argstr, value,) else: - raise ValueError('Invalid %s argument: %s' % ( - name, - value, - )) + raise ValueError("Invalid %s argument: %s" % (name, value,)) return super(Math, self)._format_arg(name, spec, value) @@ -2104,22 +2266,19 @@ def _parse_inputs(self): if isinstance(t, bool): if nr_input_files != 2: raise ValueError( - 'Due to the %s option we expected 2 files but input_files is of length %d' - % ( - n, - nr_input_files, - )) + "Due to the %s option we expected 2 files but input_files is of length %d" + % (n, nr_input_files,) + ) elif isinstance(t, float): if nr_input_files != 1: raise ValueError( - 'Due to the %s option we expected 1 file but input_files is of length %d' - % ( - n, - nr_input_files, - )) + "Due to the %s option we expected 1 file but input_files is of length %d" + % (n, nr_input_files,) + ) else: raise ValueError( - 'Argument should be a bool or const, but got: %s' % t) + "Argument should be a bool or const, but got: %s" % t + ) for n in self.input_spec.single_volume_traits: t = self.inputs.__getattribute__(n) @@ -2127,11 +2286,9 @@ def _parse_inputs(self): if isdefined(t): if nr_input_files != 1: raise ValueError( - 'Due to the %s option we expected 1 file but input_files is of length %d' - % ( - n, - nr_input_files, - )) + "Due to the %s option we expected 1 file but input_files is of length %d" + % (n, nr_input_files,) + ) for n in self.input_spec.two_volume_traits: t = self.inputs.__getattribute__(n) @@ -2139,11 +2296,9 @@ def _parse_inputs(self): if isdefined(t): if nr_input_files != 2: raise ValueError( - 'Due to the %s option we expected 2 files but input_files is of length %d' - % ( - n, - nr_input_files, - )) + "Due to the %s option we expected 2 files but input_files is of length %d" + % (n, nr_input_files,) + ) for n in self.input_spec.n_volume_traits: t = self.inputs.__getattribute__(n) @@ -2151,11 +2306,9 @@ def _parse_inputs(self): if isdefined(t): if not nr_input_files >= 1: raise ValueError( - 'Due to the %s option we expected at least one file but input_files is of length %d' - % ( - n, - nr_input_files, - )) + "Due to the %s option we expected at least one file but input_files is of length %d" + % (n, nr_input_files,) + ) return super(Math, self)._parse_inputs() @@ -2171,58 +2324,58 @@ class ResampleInputSpec(CommandLineInputSpec): """ input_file = File( - desc='input file for resampling', + desc="input file for resampling", exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-2, ) output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s_resample.mnc') + name_template="%s_resample.mnc", + ) # This is a dummy input. - input_grid_files = InputMultiPath( - File, - desc='input grid file(s)', - ) + input_grid_files = InputMultiPath(File, desc="input grid file(s)",) - two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2') + two = traits.Bool(desc="Create a MINC 2 output file.", argstr="-2") clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) - _xor_interpolation = ('trilinear_interpolation', 'tricubic_interpolation', - 'nearest_neighbour_interpolation', - 'sinc_interpolation') + _xor_interpolation = ( + "trilinear_interpolation", + "tricubic_interpolation", + "nearest_neighbour_interpolation", + "sinc_interpolation", + ) trilinear_interpolation = traits.Bool( - desc='Do trilinear interpolation.', - argstr='-trilinear', - xor=_xor_interpolation) + desc="Do trilinear interpolation.", argstr="-trilinear", xor=_xor_interpolation + ) tricubic_interpolation = traits.Bool( - desc='Do tricubic interpolation.', - argstr='-tricubic', - xor=_xor_interpolation) + desc="Do tricubic interpolation.", argstr="-tricubic", xor=_xor_interpolation + ) nearest_neighbour_interpolation = traits.Bool( - desc='Do nearest neighbour interpolation.', - argstr='-nearest_neighbour', - xor=_xor_interpolation) + desc="Do nearest neighbour interpolation.", + argstr="-nearest_neighbour", + xor=_xor_interpolation, + ) sinc_interpolation = traits.Bool( - desc='Do windowed sinc interpolation.', - argstr='-sinc', - xor=_xor_interpolation) + desc="Do windowed sinc interpolation.", argstr="-sinc", xor=_xor_interpolation + ) half_width_sinc_window = traits.Enum( 5, @@ -2235,293 +2388,343 @@ class ResampleInputSpec(CommandLineInputSpec): 8, 9, 10, - desc='Set half-width of sinc window (1-10). Default value: 5.', - argstr='-width %s', - requires=['sinc_interpolation']) + desc="Set half-width of sinc window (1-10). Default value: 5.", + argstr="-width %s", + requires=["sinc_interpolation"], + ) - _xor_sinc_window_type = ('sinc_window_hanning', 'sinc_window_hamming') + _xor_sinc_window_type = ("sinc_window_hanning", "sinc_window_hamming") sinc_window_hanning = traits.Bool( - desc='Set sinc window type to Hanning.', - argstr='-hanning', + desc="Set sinc window type to Hanning.", + argstr="-hanning", xor=_xor_sinc_window_type, - requires=['sinc_interpolation']) + requires=["sinc_interpolation"], + ) sinc_window_hamming = traits.Bool( - desc='Set sinc window type to Hamming.', - argstr='-hamming', + desc="Set sinc window type to Hamming.", + argstr="-hamming", xor=_xor_sinc_window_type, - requires=['sinc_interpolation']) + requires=["sinc_interpolation"], + ) transformation = File( - desc='File giving world transformation. (Default = identity).', + desc="File giving world transformation. (Default = identity).", exists=True, - argstr='-transformation %s') + argstr="-transformation %s", + ) invert_transformation = traits.Bool( - desc='Invert the transformation before using it.', - argstr='-invert_transformation') + desc="Invert the transformation before using it.", + argstr="-invert_transformation", + ) - _xor_input_sampling = ('vio_transform', 'no_input_sampling') + _xor_input_sampling = ("vio_transform", "no_input_sampling") vio_transform = traits.Bool( - desc='VIO_Transform the input sampling with the transform (default).', - argstr='-tfm_input_sampling', - xor=_xor_input_sampling) + desc="VIO_Transform the input sampling with the transform (default).", + argstr="-tfm_input_sampling", + xor=_xor_input_sampling, + ) no_input_sampling = traits.Bool( - desc='Use the input sampling without transforming (old behaviour).', - argstr='-use_input_sampling', - xor=_xor_input_sampling) + desc="Use the input sampling without transforming (old behaviour).", + argstr="-use_input_sampling", + xor=_xor_input_sampling, + ) like = File( - desc='Specifies a model file for the resampling.', - argstr='-like %s', - exists=True) + desc="Specifies a model file for the resampling.", + argstr="-like %s", + exists=True, + ) _xor_format = ( - 'format_byte', - 'format_short', - 'format_int', - 'format_long', - 'format_float', - 'format_double', - 'format_signed', - 'format_unsigned', + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", ) format_byte = traits.Bool( - desc='Write out byte data.', argstr='-byte', xor=_xor_format) + desc="Write out byte data.", argstr="-byte", xor=_xor_format + ) format_short = traits.Bool( - desc='Write out short integer data.', argstr='-short', xor=_xor_format) + desc="Write out short integer data.", argstr="-short", xor=_xor_format + ) format_int = traits.Bool( - desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format) + desc="Write out 32-bit integer data.", argstr="-int", xor=_xor_format + ) format_long = traits.Bool( - desc='Superseded by -int.', argstr='-long', xor=_xor_format) + desc="Superseded by -int.", argstr="-long", xor=_xor_format + ) format_float = traits.Bool( - desc='Write out single-precision floating-point data.', - argstr='-float', - xor=_xor_format) + desc="Write out single-precision floating-point data.", + argstr="-float", + xor=_xor_format, + ) format_double = traits.Bool( - desc='Write out double-precision floating-point data.', - argstr='-double', - xor=_xor_format) + desc="Write out double-precision floating-point data.", + argstr="-double", + xor=_xor_format, + ) format_signed = traits.Bool( - desc='Write signed integer data.', argstr='-signed', xor=_xor_format) + desc="Write signed integer data.", argstr="-signed", xor=_xor_format + ) format_unsigned = traits.Bool( - desc='Write unsigned integer data (default).', - argstr='-unsigned', - xor=_xor_format) + desc="Write unsigned integer data (default).", + argstr="-unsigned", + xor=_xor_format, + ) output_range = traits.Tuple( traits.Float, traits.Float, - argstr='-range %s %s', - desc= - 'Valid range for output data. Default value: -1.79769e+308 -1.79769e+308.' + argstr="-range %s %s", + desc="Valid range for output data. Default value: -1.79769e+308 -1.79769e+308.", ) - _xor_slices = ('transverse', 'sagittal', 'coronal') + _xor_slices = ("transverse", "sagittal", "coronal") transverse_slices = traits.Bool( - desc='Write out transverse slices.', - argstr='-transverse', - xor=_xor_slices) + desc="Write out transverse slices.", argstr="-transverse", xor=_xor_slices + ) sagittal_slices = traits.Bool( - desc='Write out sagittal slices', argstr='-sagittal', xor=_xor_slices) + desc="Write out sagittal slices", argstr="-sagittal", xor=_xor_slices + ) coronal_slices = traits.Bool( - desc='Write out coronal slices', argstr='-coronal', xor=_xor_slices) + desc="Write out coronal slices", argstr="-coronal", xor=_xor_slices + ) - _xor_fill = ('nofill', 'fill') + _xor_fill = ("nofill", "fill") no_fill = traits.Bool( - desc='Use value zero for points outside of input volume.', - argstr='-nofill', - xor=_xor_fill) + desc="Use value zero for points outside of input volume.", + argstr="-nofill", + xor=_xor_fill, + ) fill = traits.Bool( - desc='Use a fill value for points outside of input volume.', - argstr='-fill', - xor=_xor_fill) + desc="Use a fill value for points outside of input volume.", + argstr="-fill", + xor=_xor_fill, + ) fill_value = traits.Float( - desc=('Specify a fill value for points outside of input volume.' - 'Default value: 1.79769e+308.'), - argstr='-fillvalue %s', - requires=['fill']) + desc=( + "Specify a fill value for points outside of input volume." + "Default value: 1.79769e+308." + ), + argstr="-fillvalue %s", + requires=["fill"], + ) - _xor_scale = ('keep_real_range', 'nokeep_real_range') + _xor_scale = ("keep_real_range", "nokeep_real_range") keep_real_range = traits.Bool( - desc='Keep the real scale of the input volume.', - argstr='-keep_real_range', - xor=_xor_scale) + desc="Keep the real scale of the input volume.", + argstr="-keep_real_range", + xor=_xor_scale, + ) nokeep_real_range = traits.Bool( - desc='Do not keep the real scale of the data (default).', - argstr='-nokeep_real_range', - xor=_xor_scale) + desc="Do not keep the real scale of the data (default).", + argstr="-nokeep_real_range", + xor=_xor_scale, + ) - _xor_spacetype = ('spacetype', 'talairach') + _xor_spacetype = ("spacetype", "talairach") spacetype = traits.Str( - desc='Set the spacetype attribute to a specified string.', - argstr='-spacetype %s') - talairach = traits.Bool( - desc='Output is in Talairach space.', argstr='-talairach') + desc="Set the spacetype attribute to a specified string.", + argstr="-spacetype %s", + ) + talairach = traits.Bool(desc="Output is in Talairach space.", argstr="-talairach") origin = traits.Tuple( traits.Float, traits.Float, traits.Float, - desc=('Origin of first pixel in 3D space.' - 'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'), - argstr='-origin %s %s %s') + desc=( + "Origin of first pixel in 3D space." + "Default value: 1.79769e+308 1.79769e+308 1.79769e+308." + ), + argstr="-origin %s %s %s", + ) standard_sampling = traits.Bool( - desc='Set the sampling to standard values (step, start and dircos).', - argstr='-standard_sampling') # FIXME Bool? + desc="Set the sampling to standard values (step, start and dircos).", + argstr="-standard_sampling", + ) # FIXME Bool? units = traits.Str( - desc='Specify the units of the output sampling.', - argstr='-units %s') # FIXME String? + desc="Specify the units of the output sampling.", argstr="-units %s" + ) # FIXME String? # Elements along each dimension. # FIXME Ints? Ranges? # FIXME Check that this xor behaves correctly. - _xor_nelements = ('nelements', 'nelements_x_y_or_z') + _xor_nelements = ("nelements", "nelements_x_y_or_z") # nr elements along each dimension nelements = traits.Tuple( traits.Int, traits.Int, traits.Int, - desc='Number of elements along each dimension (X, Y, Z).', - argstr='-nelements %s %s %s', - xor=_xor_nelements) + desc="Number of elements along each dimension (X, Y, Z).", + argstr="-nelements %s %s %s", + xor=_xor_nelements, + ) # FIXME Is mincresample happy if we only specify one of these, or do we # need the requires=...? xnelements = traits.Int( - desc='Number of elements along the X dimension.', - argstr='-xnelements %s', - requires=('ynelements', 'znelements'), - xor=_xor_nelements) + desc="Number of elements along the X dimension.", + argstr="-xnelements %s", + requires=("ynelements", "znelements"), + xor=_xor_nelements, + ) ynelements = traits.Int( - desc='Number of elements along the Y dimension.', - argstr='-ynelements %s', - requires=('xnelements', 'znelements'), - xor=_xor_nelements) + desc="Number of elements along the Y dimension.", + argstr="-ynelements %s", + requires=("xnelements", "znelements"), + xor=_xor_nelements, + ) znelements = traits.Int( - desc='Number of elements along the Z dimension.', - argstr='-znelements %s', - requires=('xnelements', 'ynelements'), - xor=_xor_nelements) + desc="Number of elements along the Z dimension.", + argstr="-znelements %s", + requires=("xnelements", "ynelements"), + xor=_xor_nelements, + ) # step size along each dimension - _xor_step = ('step', 'step_x_y_or_z') + _xor_step = ("step", "step_x_y_or_z") step = traits.Tuple( traits.Int, traits.Int, traits.Int, - desc= - 'Step size along each dimension (X, Y, Z). Default value: (0, 0, 0).', - argstr='-step %s %s %s', - xor=_xor_nelements) + desc="Step size along each dimension (X, Y, Z). Default value: (0, 0, 0).", + argstr="-step %s %s %s", + xor=_xor_nelements, + ) # FIXME Use the requires=...? xstep = traits.Int( - desc='Step size along the X dimension. Default value: 0.', - argstr='-xstep %s', - requires=('ystep', 'zstep'), - xor=_xor_step) + desc="Step size along the X dimension. Default value: 0.", + argstr="-xstep %s", + requires=("ystep", "zstep"), + xor=_xor_step, + ) ystep = traits.Int( - desc='Step size along the Y dimension. Default value: 0.', - argstr='-ystep %s', - requires=('xstep', 'zstep'), - xor=_xor_step) + desc="Step size along the Y dimension. Default value: 0.", + argstr="-ystep %s", + requires=("xstep", "zstep"), + xor=_xor_step, + ) zstep = traits.Int( - desc='Step size along the Z dimension. Default value: 0.', - argstr='-zstep %s', - requires=('xstep', 'ystep'), - xor=_xor_step) + desc="Step size along the Z dimension. Default value: 0.", + argstr="-zstep %s", + requires=("xstep", "ystep"), + xor=_xor_step, + ) # start point along each dimension - _xor_start = ('start', 'start_x_y_or_z') + _xor_start = ("start", "start_x_y_or_z") start = traits.Tuple( traits.Float, traits.Float, traits.Float, - desc=('Start point along each dimension (X, Y, Z).' - 'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'), - argstr='-start %s %s %s', - xor=_xor_nelements) + desc=( + "Start point along each dimension (X, Y, Z)." + "Default value: 1.79769e+308 1.79769e+308 1.79769e+308." + ), + argstr="-start %s %s %s", + xor=_xor_nelements, + ) # FIXME Use the requires=...? xstart = traits.Float( - desc='Start point along the X dimension. Default value: 1.79769e+308.', - argstr='-xstart %s', - requires=('ystart', 'zstart'), - xor=_xor_start) + desc="Start point along the X dimension. Default value: 1.79769e+308.", + argstr="-xstart %s", + requires=("ystart", "zstart"), + xor=_xor_start, + ) ystart = traits.Float( - desc='Start point along the Y dimension. Default value: 1.79769e+308.', - argstr='-ystart %s', - requires=('xstart', 'zstart'), - xor=_xor_start) + desc="Start point along the Y dimension. Default value: 1.79769e+308.", + argstr="-ystart %s", + requires=("xstart", "zstart"), + xor=_xor_start, + ) zstart = traits.Float( - desc='Start point along the Z dimension. Default value: 1.79769e+308.', - argstr='-zstart %s', - requires=('xstart', 'ystart'), - xor=_xor_start) + desc="Start point along the Z dimension. Default value: 1.79769e+308.", + argstr="-zstart %s", + requires=("xstart", "ystart"), + xor=_xor_start, + ) # dircos along each dimension - _xor_dircos = ('dircos', 'dircos_x_y_or_z') + _xor_dircos = ("dircos", "dircos_x_y_or_z") dircos = traits.Tuple( traits.Float, traits.Float, traits.Float, desc=( - 'Direction cosines along each dimension (X, Y, Z). Default value:' - '1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308 ...' - ' 1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308.' + "Direction cosines along each dimension (X, Y, Z). Default value:" + "1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308 ..." + " 1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308." ), - argstr='-dircos %s %s %s', - xor=_xor_nelements) + argstr="-dircos %s %s %s", + xor=_xor_nelements, + ) # FIXME Use the requires=...? xdircos = traits.Float( - desc=('Direction cosines along the X dimension.' - 'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'), - argstr='-xdircos %s', - requires=('ydircos', 'zdircos'), - xor=_xor_dircos) + desc=( + "Direction cosines along the X dimension." + "Default value: 1.79769e+308 1.79769e+308 1.79769e+308." + ), + argstr="-xdircos %s", + requires=("ydircos", "zdircos"), + xor=_xor_dircos, + ) ydircos = traits.Float( - desc=('Direction cosines along the Y dimension.' - 'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'), - argstr='-ydircos %s', - requires=('xdircos', 'zdircos'), - xor=_xor_dircos) + desc=( + "Direction cosines along the Y dimension." + "Default value: 1.79769e+308 1.79769e+308 1.79769e+308." + ), + argstr="-ydircos %s", + requires=("xdircos", "zdircos"), + xor=_xor_dircos, + ) zdircos = traits.Float( - desc=('Direction cosines along the Z dimension.' - 'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'), - argstr='-zdircos %s', - requires=('xdircos', 'ydircos'), - xor=_xor_dircos) + desc=( + "Direction cosines along the Z dimension." + "Default value: 1.79769e+308 1.79769e+308 1.79769e+308." + ), + argstr="-zdircos %s", + requires=("xdircos", "ydircos"), + xor=_xor_dircos, + ) class ResampleOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) + output_file = File(desc="output file", exists=True) class Resample(StdOutCommandLine): @@ -2540,7 +2743,7 @@ class Resample(StdOutCommandLine): input_spec = ResampleInputSpec output_spec = ResampleOutputSpec - _cmd = 'mincresample' + _cmd = "mincresample" class NormInputSpec(CommandLineInputSpec): @@ -2558,89 +2761,95 @@ class NormInputSpec(CommandLineInputSpec): """ input_file = File( - desc='input file to normalise', + desc="input file to normalise", exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-2, ) output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s_norm.mnc') + name_template="%s_norm.mnc", + ) output_threshold_mask = File( - desc='File in which to store the threshold mask.', - argstr='-threshold_mask %s', - name_source=['input_file'], + desc="File in which to store the threshold mask.", + argstr="-threshold_mask %s", + name_source=["input_file"], hash_files=False, - name_template='%s_norm_threshold_mask.mnc') + name_template="%s_norm_threshold_mask.mnc", + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) # Normalisation Options mask = File( - desc='Calculate the image normalisation within a mask.', - argstr='-mask %s', - exists=True) + desc="Calculate the image normalisation within a mask.", + argstr="-mask %s", + exists=True, + ) clamp = traits.Bool( - desc='Force the ouput range between limits [default].', - argstr='-clamp', + desc="Force the ouput range between limits [default].", + argstr="-clamp", usedefault=True, - default_value=True) + default_value=True, + ) cutoff = traits.Range( low=0.0, high=100.0, - desc= - 'Cutoff value to use to calculate thresholds by a histogram PcT in %. [default: 0.01]', - argstr='-cutoff %s', + desc="Cutoff value to use to calculate thresholds by a histogram PcT in %. [default: 0.01]", + argstr="-cutoff %s", ) - lower = traits.Float(desc='Lower real value to use.', argstr='-lower %s') - upper = traits.Float(desc='Upper real value to use.', argstr='-upper %s') + lower = traits.Float(desc="Lower real value to use.", argstr="-lower %s") + upper = traits.Float(desc="Upper real value to use.", argstr="-upper %s") out_floor = traits.Float( - desc='Output files maximum [default: 0]', - argstr='-out_floor %s') # FIXME is this a float? + desc="Output files maximum [default: 0]", argstr="-out_floor %s" + ) # FIXME is this a float? out_ceil = traits.Float( - desc='Output files minimum [default: 100]', - argstr='-out_ceil %s') # FIXME is this a float? + desc="Output files minimum [default: 100]", argstr="-out_ceil %s" + ) # FIXME is this a float? # Threshold Options threshold = traits.Bool( - desc= - 'Threshold the image (set values below threshold_perc to -out_floor).', - argstr='-threshold') + desc="Threshold the image (set values below threshold_perc to -out_floor).", + argstr="-threshold", + ) threshold_perc = traits.Range( low=0.0, high=100.0, - desc= - 'Threshold percentage (0.1 == lower 10% of intensity range) [default: 0.1].', - argstr='-threshold_perc %s') + desc="Threshold percentage (0.1 == lower 10% of intensity range) [default: 0.1].", + argstr="-threshold_perc %s", + ) threshold_bmt = traits.Bool( - desc='Use the resulting image BiModalT as the threshold.', - argstr='-threshold_bmt') + desc="Use the resulting image BiModalT as the threshold.", + argstr="-threshold_bmt", + ) threshold_blur = traits.Float( - desc='Blur FWHM for intensity edges then thresholding [default: 2].', - argstr='-threshold_blur %s') + desc="Blur FWHM for intensity edges then thresholding [default: 2].", + argstr="-threshold_blur %s", + ) class NormOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) - output_threshold_mask = File(desc='threshold mask file') + output_file = File(desc="output file", exists=True) + output_threshold_mask = File(desc="threshold mask file") class Norm(CommandLine): @@ -2658,7 +2867,7 @@ class Norm(CommandLine): input_spec = NormInputSpec output_spec = NormOutputSpec - _cmd = 'mincnorm' + _cmd = "mincnorm" """ @@ -2699,50 +2908,53 @@ class VolcentreInputSpec(CommandLineInputSpec): """ input_file = File( - desc='input file to centre', + desc="input file to centre", exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-2, ) output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s_volcentre.mnc') + name_template="%s_volcentre.mnc", + ) verbose = traits.Bool( - desc='Print out log messages. Default: False.', argstr='-verbose') + desc="Print out log messages. Default: False.", argstr="-verbose" + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) com = traits.Bool( - desc= - 'Use the CoM of the volume for the new centre (via mincstats). Default: False', - argstr='-com') + desc="Use the CoM of the volume for the new centre (via mincstats). Default: False", + argstr="-com", + ) centre = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='-centre %s %s %s', - desc='Centre to use (x,y,z) [default: 0 0 0].', + argstr="-centre %s %s %s", + desc="Centre to use (x,y,z) [default: 0 0 0].", ) zero_dircos = traits.Bool( - desc='Set the direction cosines to identity [default].', - argstr='-zero_dircos') + desc="Set the direction cosines to identity [default].", argstr="-zero_dircos" + ) class VolcentreOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) + output_file = File(desc="output file", exists=True) class Volcentre(CommandLine): @@ -2759,7 +2971,7 @@ class Volcentre(CommandLine): input_spec = VolcentreInputSpec output_spec = VolcentreOutputSpec - _cmd = 'volcentre' + _cmd = "volcentre" class VolpadInputSpec(CommandLineInputSpec): @@ -2787,55 +2999,59 @@ class VolpadInputSpec(CommandLineInputSpec): """ input_file = File( - desc='input file to centre', + desc="input file to centre", exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-2, ) output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s_volpad.mnc') + name_template="%s_volpad.mnc", + ) verbose = traits.Bool( - desc='Print out log messages. Default: False.', argstr='-verbose') + desc="Print out log messages. Default: False.", argstr="-verbose" + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) auto = traits.Bool( - desc= - 'Automatically determine padding distances (uses -distance as max). Default: False.', - argstr='-auto') + desc="Automatically determine padding distances (uses -distance as max). Default: False.", + argstr="-auto", + ) auto_freq = traits.Float( - desc= - 'Frequency of voxels over bimodalt threshold to stop at [default: 500].', - argstr='-auto_freq %s') + desc="Frequency of voxels over bimodalt threshold to stop at [default: 500].", + argstr="-auto_freq %s", + ) distance = traits.Int( - desc='Padding distance (in voxels) [default: 4].', - argstr='-distance %s') + desc="Padding distance (in voxels) [default: 4].", argstr="-distance %s" + ) smooth = traits.Bool( - desc='Smooth (blur) edges before padding. Default: False.', - argstr='-smooth') + desc="Smooth (blur) edges before padding. Default: False.", argstr="-smooth" + ) smooth_distance = traits.Int( - desc='Smoothing distance (in voxels) [default: 4].', - argstr='-smooth_distance %s') + desc="Smoothing distance (in voxels) [default: 4].", + argstr="-smooth_distance %s", + ) class VolpadOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) + output_file = File(desc="output file", exists=True) class Volpad(CommandLine): @@ -2852,52 +3068,57 @@ class Volpad(CommandLine): input_spec = VolpadInputSpec output_spec = VolpadOutputSpec - _cmd = 'volpad' + _cmd = "volpad" class VolisoInputSpec(CommandLineInputSpec): input_file = File( - desc='input file to convert to isotropic sampling', + desc="input file to convert to isotropic sampling", exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-2, ) output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s_voliso.mnc') + name_template="%s_voliso.mnc", + ) verbose = traits.Bool( - desc='Print out log messages. Default: False.', argstr='--verbose') + desc="Print out log messages. Default: False.", argstr="--verbose" + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='--clobber', + desc="Overwrite existing file.", + argstr="--clobber", usedefault=True, - default_value=True) + default_value=True, + ) maxstep = traits.Float( - desc='The target maximum step desired in the output volume.', - argstr='--maxstep %s') + desc="The target maximum step desired in the output volume.", + argstr="--maxstep %s", + ) minstep = traits.Float( - desc='The target minimum step desired in the output volume.', - argstr='--minstep %s') + desc="The target minimum step desired in the output volume.", + argstr="--minstep %s", + ) avgstep = traits.Bool( - desc= - 'Calculate the maximum step from the average steps of the input volume.', - argstr='--avgstep') + desc="Calculate the maximum step from the average steps of the input volume.", + argstr="--avgstep", + ) class VolisoOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) + output_file = File(desc="output file", exists=True) class Voliso(CommandLine): @@ -2915,42 +3136,43 @@ class Voliso(CommandLine): input_spec = VolisoInputSpec output_spec = VolisoOutputSpec - _cmd = 'voliso' + _cmd = "voliso" class GennlxfmInputSpec(CommandLineInputSpec): output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['like'], + name_source=["like"], hash_files=False, - name_template='%s_gennlxfm.xfm') + name_template="%s_gennlxfm.xfm", + ) verbose = traits.Bool( - desc='Print out log messages. Default: False.', argstr='-verbose') + desc="Print out log messages. Default: False.", argstr="-verbose" + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) ident = traits.Bool( - desc='Generate an identity xfm. Default: False.', argstr='-ident') - step = traits.Int( - desc='Output ident xfm step [default: 1].', argstr='-step %s') + desc="Generate an identity xfm. Default: False.", argstr="-ident" + ) + step = traits.Int(desc="Output ident xfm step [default: 1].", argstr="-step %s") like = File( - desc='Generate a nlxfm like this file.', - exists=True, - argstr='-like %s', + desc="Generate a nlxfm like this file.", exists=True, argstr="-like %s", ) class GennlxfmOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) - output_grid = File(desc='output grid', exists=True) + output_file = File(desc="output file", exists=True) + output_grid = File(desc="output grid", exists=True) class Gennlxfm(CommandLine): @@ -2973,51 +3195,53 @@ class Gennlxfm(CommandLine): input_spec = GennlxfmInputSpec output_spec = GennlxfmOutputSpec - _cmd = 'gennlxfm' + _cmd = "gennlxfm" def _list_outputs(self): outputs = super(Gennlxfm, self)._list_outputs() - outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc', - outputs['output_file']) + outputs["output_grid"] = re.sub( + ".(nlxfm|xfm)$", "_grid_0.mnc", outputs["output_file"] + ) return outputs class XfmConcatInputSpec(CommandLineInputSpec): input_files = InputMultiPath( File(exists=True), - desc='input file(s)', + desc="input file(s)", mandatory=True, - sep=' ', - argstr='%s', - position=-2) + sep=" ", + argstr="%s", + position=-2, + ) # This is a dummy input. - input_grid_files = InputMultiPath( - File, - desc='input grid file(s)', - ) + input_grid_files = InputMultiPath(File, desc="input grid file(s)",) output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['input_files'], + name_source=["input_files"], hash_files=False, - name_template='%s_xfmconcat.xfm') + name_template="%s_xfmconcat.xfm", + ) verbose = traits.Bool( - desc='Print out log messages. Default: False.', argstr='-verbose') + desc="Print out log messages. Default: False.", argstr="-verbose" + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) class XfmConcatOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) - output_grids = OutputMultiPath(File(exists=True), desc='output grids') + output_file = File(desc="output file", exists=True) + output_grids = OutputMultiPath(File(exists=True), desc="output grids") class XfmConcat(CommandLine): @@ -3036,64 +3260,60 @@ class XfmConcat(CommandLine): input_spec = XfmConcatInputSpec output_spec = XfmConcatOutputSpec - _cmd = 'xfmconcat' + _cmd = "xfmconcat" def _list_outputs(self): outputs = super(XfmConcat, self)._list_outputs() - if os.path.exists(outputs['output_file']): - if 'grid' in open(outputs['output_file'], 'r').read(): - outputs['output_grids'] = glob.glob( - re.sub('.(nlxfm|xfm)$', '_grid_*.mnc', - outputs['output_file'])) + if os.path.exists(outputs["output_file"]): + if "grid" in open(outputs["output_file"], "r").read(): + outputs["output_grids"] = glob.glob( + re.sub(".(nlxfm|xfm)$", "_grid_*.mnc", outputs["output_file"]) + ) return outputs class BestLinRegInputSpec(CommandLineInputSpec): source = File( - desc='source Minc file', - exists=True, - mandatory=True, - argstr='%s', - position=-4, + desc="source Minc file", exists=True, mandatory=True, argstr="%s", position=-4, ) target = File( - desc='target Minc file', - exists=True, - mandatory=True, - argstr='%s', - position=-3, + desc="target Minc file", exists=True, mandatory=True, argstr="%s", position=-3, ) output_xfm = File( - desc='output xfm file', + desc="output xfm file", genfile=True, - argstr='%s', + argstr="%s", position=-2, - name_source=['source'], + name_source=["source"], hash_files=False, - name_template='%s_bestlinreg.xfm', - keep_extension=False) + name_template="%s_bestlinreg.xfm", + keep_extension=False, + ) output_mnc = File( - desc='output mnc file', + desc="output mnc file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['source'], + name_source=["source"], hash_files=False, - name_template='%s_bestlinreg.mnc', - keep_extension=False) + name_template="%s_bestlinreg.mnc", + keep_extension=False, + ) verbose = traits.Bool( - desc='Print out log messages. Default: False.', argstr='-verbose') + desc="Print out log messages. Default: False.", argstr="-verbose" + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) # FIXME Very bare implementation, none of these are done yet: """ @@ -3107,8 +3327,8 @@ class BestLinRegInputSpec(CommandLineInputSpec): class BestLinRegOutputSpec(TraitedSpec): - output_xfm = File(desc='output xfm file', exists=True) - output_mnc = File(desc='output mnc file', exists=True) + output_xfm = File(desc="output xfm file", exists=True) + output_mnc = File(desc="output mnc file", exists=True) class BestLinReg(CommandLine): @@ -3132,69 +3352,58 @@ class BestLinReg(CommandLine): input_spec = BestLinRegInputSpec output_spec = BestLinRegOutputSpec - _cmd = 'bestlinreg' + _cmd = "bestlinreg" class NlpFitInputSpec(CommandLineInputSpec): source = File( - desc='source Minc file', - exists=True, - mandatory=True, - argstr='%s', - position=-3, + desc="source Minc file", exists=True, mandatory=True, argstr="%s", position=-3, ) target = File( - desc='target Minc file', - exists=True, - mandatory=True, - argstr='%s', - position=-2, + desc="target Minc file", exists=True, mandatory=True, argstr="%s", position=-2, ) - output_xfm = File( - desc='output xfm file', - genfile=True, - argstr='%s', - position=-1, - ) + output_xfm = File(desc="output xfm file", genfile=True, argstr="%s", position=-1,) # This is a dummy input. - input_grid_files = InputMultiPath( - File, - desc='input grid file(s)', - ) + input_grid_files = InputMultiPath(File, desc="input grid file(s)",) config_file = File( - desc='File containing the fitting configuration use.', - argstr='-config_file %s', + desc="File containing the fitting configuration use.", + argstr="-config_file %s", mandatory=True, - exists=True) + exists=True, + ) init_xfm = File( - desc='Initial transformation (default identity).', - argstr='-init_xfm %s', + desc="Initial transformation (default identity).", + argstr="-init_xfm %s", mandatory=True, - exists=True) + exists=True, + ) source_mask = File( - desc='Source mask to use during fitting.', - argstr='-source_mask %s', + desc="Source mask to use during fitting.", + argstr="-source_mask %s", mandatory=True, - exists=True) + exists=True, + ) verbose = traits.Bool( - desc='Print out log messages. Default: False.', argstr='-verbose') + desc="Print out log messages. Default: False.", argstr="-verbose" + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) class NlpFitOutputSpec(TraitedSpec): - output_xfm = File(desc='output xfm file', exists=True) - output_grid = File(desc='output grid file', exists=True) + output_xfm = File(desc="output xfm file", exists=True) + output_grid = File(desc="output grid file", exists=True) class NlpFit(CommandLine): @@ -3222,30 +3431,33 @@ class NlpFit(CommandLine): input_spec = NlpFitInputSpec output_spec = NlpFitOutputSpec - _cmd = 'nlpfit' + _cmd = "nlpfit" def _gen_filename(self, name): - if name == 'output_xfm': + if name == "output_xfm": output_xfm = self.inputs.output_xfm if isdefined(output_xfm): return os.path.abspath(output_xfm) else: - return aggregate_filename( - [self.inputs.source, self.inputs.target], - 'nlpfit_xfm_output') + '.xfm' + return ( + aggregate_filename( + [self.inputs.source, self.inputs.target], "nlpfit_xfm_output" + ) + + ".xfm" + ) else: raise NotImplemented def _list_outputs(self): outputs = self.output_spec().get() - outputs['output_xfm'] = os.path.abspath( - self._gen_filename('output_xfm')) + outputs["output_xfm"] = os.path.abspath(self._gen_filename("output_xfm")) - assert os.path.exists(outputs['output_xfm']) - if 'grid' in open(outputs['output_xfm'], 'r').read(): - outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc', - outputs['output_xfm']) + assert os.path.exists(outputs["output_xfm"]) + if "grid" in open(outputs["output_xfm"], "r").read(): + outputs["output_grid"] = re.sub( + ".(nlxfm|xfm)$", "_grid_0.mnc", outputs["output_xfm"] + ) return outputs @@ -3253,49 +3465,48 @@ def _list_outputs(self): class XfmAvgInputSpec(CommandLineInputSpec): input_files = InputMultiPath( File(exists=True), - desc='input file(s)', + desc="input file(s)", mandatory=True, - sep=' ', - argstr='%s', - position=-2) + sep=" ", + argstr="%s", + position=-2, + ) # This is a dummy input. - input_grid_files = InputMultiPath( - File, - desc='input grid file(s)', - ) + input_grid_files = InputMultiPath(File, desc="input grid file(s)",) - output_file = File( - desc='output file', - genfile=True, - argstr='%s', - position=-1, - ) + output_file = File(desc="output file", genfile=True, argstr="%s", position=-1,) verbose = traits.Bool( - desc='Print out log messages. Default: False.', argstr='-verbose') + desc="Print out log messages. Default: False.", argstr="-verbose" + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) # FIXME xor these: avg_linear = traits.Bool( - desc='average the linear part [default].', argstr='-avg_linear') + desc="average the linear part [default].", argstr="-avg_linear" + ) avg_nonlinear = traits.Bool( - desc='average the non-linear part [default].', argstr='-avg_nonlinear') + desc="average the non-linear part [default].", argstr="-avg_nonlinear" + ) ignore_linear = traits.Bool( - desc='opposite of -avg_linear.', argstr='-ignore_linear') + desc="opposite of -avg_linear.", argstr="-ignore_linear" + ) ignore_nonlinear = traits.Bool( - desc='opposite of -avg_nonlinear.', argstr='-ignore_nonline') + desc="opposite of -avg_nonlinear.", argstr="-ignore_nonline" + ) class XfmAvgOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) - output_grid = File(desc='output grid file', exists=True) + output_file = File(desc="output file", exists=True) + output_grid = File(desc="output grid file", exists=True) class XfmAvg(CommandLine): @@ -3321,62 +3532,59 @@ class XfmAvg(CommandLine): input_spec = XfmAvgInputSpec output_spec = XfmAvgOutputSpec - _cmd = 'xfmavg' + _cmd = "xfmavg" def _gen_filename(self, name): - if name == 'output_file': + if name == "output_file": output_file = self.inputs.output_file if isdefined(output_file): return os.path.abspath(output_file) else: - return aggregate_filename(self.inputs.input_files, - 'xfmavg_output') + '.xfm' + return ( + aggregate_filename(self.inputs.input_files, "xfmavg_output") + + ".xfm" + ) else: raise NotImplemented def _gen_outfilename(self): - return self._gen_filename('output_file') + return self._gen_filename("output_file") def _list_outputs(self): outputs = self.output_spec().get() - outputs['output_file'] = os.path.abspath(self._gen_outfilename()) + outputs["output_file"] = os.path.abspath(self._gen_outfilename()) - assert os.path.exists(outputs['output_file']) - if 'grid' in open(outputs['output_file'], 'r').read(): - outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc', - outputs['output_file']) + assert os.path.exists(outputs["output_file"]) + if "grid" in open(outputs["output_file"], "r").read(): + outputs["output_grid"] = re.sub( + ".(nlxfm|xfm)$", "_grid_0.mnc", outputs["output_file"] + ) return outputs class XfmInvertInputSpec(CommandLineInputSpec): input_file = File( - desc='input file', - exists=True, - mandatory=True, - argstr='%s', - position=-2) - - output_file = File( - desc='output file', - genfile=True, - argstr='%s', - position=-1, + desc="input file", exists=True, mandatory=True, argstr="%s", position=-2 ) + output_file = File(desc="output file", genfile=True, argstr="%s", position=-1,) + verbose = traits.Bool( - desc='Print out log messages. Default: False.', argstr='-verbose') + desc="Print out log messages. Default: False.", argstr="-verbose" + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) class XfmInvertOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) - output_grid = File(desc='output grid file', exists=True) + output_file = File(desc="output file", exists=True) + output_grid = File(desc="output grid file", exists=True) class XfmInvert(CommandLine): @@ -3395,31 +3603,34 @@ class XfmInvert(CommandLine): input_spec = XfmInvertInputSpec output_spec = XfmInvertOutputSpec - _cmd = 'xfminvert' + _cmd = "xfminvert" def _gen_filename(self, name): - if name == 'output_file': + if name == "output_file": output_file = self.inputs.output_file if isdefined(output_file): return os.path.abspath(output_file) else: - return aggregate_filename([self.inputs.input_file], - 'xfminvert_output') + '.xfm' + return ( + aggregate_filename([self.inputs.input_file], "xfminvert_output") + + ".xfm" + ) else: raise NotImplemented def _gen_outfilename(self): - return self._gen_filename('output_file') + return self._gen_filename("output_file") def _list_outputs(self): outputs = self.output_spec().get() - outputs['output_file'] = os.path.abspath(self._gen_outfilename()) + outputs["output_file"] = os.path.abspath(self._gen_outfilename()) - assert os.path.exists(outputs['output_file']) - if 'grid' in open(outputs['output_file'], 'r').read(): - outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc', - outputs['output_file']) + assert os.path.exists(outputs["output_file"]) + if "grid" in open(outputs["output_file"], "r").read(): + outputs["output_grid"] = re.sub( + ".(nlxfm|xfm)$", "_grid_0.mnc", outputs["output_file"] + ) return outputs @@ -3427,54 +3638,63 @@ def _list_outputs(self): class BigAverageInputSpec(CommandLineInputSpec): input_files = InputMultiPath( File(exists=True), - desc='input file(s)', + desc="input file(s)", mandatory=True, - sep=' ', - argstr='%s', - position=-2) + sep=" ", + argstr="%s", + position=-2, + ) output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['input_files'], + name_source=["input_files"], hash_files=False, - name_template='%s_bigaverage.mnc') + name_template="%s_bigaverage.mnc", + ) verbose = traits.Bool( - desc='Print out log messages. Default: False.', argstr='--verbose') + desc="Print out log messages. Default: False.", argstr="--verbose" + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='--clobber', + desc="Overwrite existing file.", + argstr="--clobber", usedefault=True, - default_value=True) + default_value=True, + ) # FIXME Redumentary implementation, various parameters not implemented. # TODO! output_float = traits.Bool( - desc='Output files with float precision.', argstr='--float') + desc="Output files with float precision.", argstr="--float" + ) robust = traits.Bool( - desc=('Perform robust averaging, features that are outside 1 standard' - 'deviation from the mean are downweighted. Works well for noisy' - 'data with artifacts. see the --tmpdir option if you have a' - 'large number of input files.'), - argstr='-robust') + desc=( + "Perform robust averaging, features that are outside 1 standard" + "deviation from the mean are downweighted. Works well for noisy" + "data with artifacts. see the --tmpdir option if you have a" + "large number of input files." + ), + argstr="-robust", + ) # Should Nipype deal with where the temp directory is? - tmpdir = Directory(desc='temporary files directory', argstr='-tmpdir %s') + tmpdir = Directory(desc="temporary files directory", argstr="-tmpdir %s") sd_file = File( - desc='Place standard deviation image in specified file.', - argstr='--sdfile %s', - name_source=['input_files'], + desc="Place standard deviation image in specified file.", + argstr="--sdfile %s", + name_source=["input_files"], hash_files=False, - name_template='%s_bigaverage_stdev.mnc') + name_template="%s_bigaverage_stdev.mnc", + ) class BigAverageOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) - sd_file = File(desc='standard deviation image', exists=True) + output_file = File(desc="output file", exists=True) + sd_file = File(desc="standard deviation image", exists=True) class BigAverage(CommandLine): @@ -3512,42 +3732,41 @@ class BigAverage(CommandLine): input_spec = BigAverageInputSpec output_spec = BigAverageOutputSpec - _cmd = 'mincbigaverage' + _cmd = "mincbigaverage" class ReshapeInputSpec(CommandLineInputSpec): input_file = File( - desc='input file', - exists=True, - mandatory=True, - argstr='%s', - position=-2) + desc="input file", exists=True, mandatory=True, argstr="%s", position=-2 + ) output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s_reshape.mnc') + name_template="%s_reshape.mnc", + ) verbose = traits.Bool( - desc='Print out log messages. Default: False.', argstr='-verbose') + desc="Print out log messages. Default: False.", argstr="-verbose" + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) # FIXME MANY options not implemented! - write_short = traits.Bool( - desc='Convert to short integer data.', argstr='-short') + write_short = traits.Bool(desc="Convert to short integer data.", argstr="-short") class ReshapeOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) + output_file = File(desc="output file", exists=True) class Reshape(CommandLine): @@ -3570,78 +3789,76 @@ class Reshape(CommandLine): input_spec = ReshapeInputSpec output_spec = ReshapeOutputSpec - _cmd = 'mincreshape' + _cmd = "mincreshape" class VolSymmInputSpec(CommandLineInputSpec): input_file = File( - desc='input file', - exists=True, - mandatory=True, - argstr='%s', - position=-3) + desc="input file", exists=True, mandatory=True, argstr="%s", position=-3 + ) trans_file = File( - desc='output xfm trans file', + desc="output xfm trans file", genfile=True, - argstr='%s', + argstr="%s", position=-2, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s_vol_symm.xfm', - keep_extension=False) + name_template="%s_vol_symm.xfm", + keep_extension=False, + ) output_file = File( - desc='output file', + desc="output file", genfile=True, - argstr='%s', + argstr="%s", position=-1, - name_source=['input_file'], + name_source=["input_file"], hash_files=False, - name_template='%s_vol_symm.mnc') + name_template="%s_vol_symm.mnc", + ) # This is a dummy input. - input_grid_files = InputMultiPath( - File, - desc='input grid file(s)', - ) + input_grid_files = InputMultiPath(File, desc="input grid file(s)",) verbose = traits.Bool( - desc='Print out log messages. Default: False.', argstr='-verbose') + desc="Print out log messages. Default: False.", argstr="-verbose" + ) clobber = traits.Bool( - desc='Overwrite existing file.', - argstr='-clobber', + desc="Overwrite existing file.", + argstr="-clobber", usedefault=True, - default_value=True) + default_value=True, + ) # FIXME MANY options not implemented! - fit_linear = traits.Bool(desc='Fit using a linear xfm.', argstr='-linear') - fit_nonlinear = traits.Bool( - desc='Fit using a non-linear xfm.', argstr='-nonlinear') + fit_linear = traits.Bool(desc="Fit using a linear xfm.", argstr="-linear") + fit_nonlinear = traits.Bool(desc="Fit using a non-linear xfm.", argstr="-nonlinear") # FIXME This changes the input/output behaviour of trans_file! Split into # two separate interfaces? nofit = traits.Bool( - desc='Use the input transformation instead of generating one.', - argstr='-nofit') + desc="Use the input transformation instead of generating one.", argstr="-nofit" + ) config_file = File( - desc= - 'File containing the fitting configuration (nlpfit -help for info).', - argstr='-config_file %s', - exists=True) + desc="File containing the fitting configuration (nlpfit -help for info).", + argstr="-config_file %s", + exists=True, + ) - x = traits.Bool(desc='Flip volume in x-plane (default).', argstr='-x') - y = traits.Bool(desc='Flip volume in y-plane.', argstr='-y') - z = traits.Bool(desc='Flip volume in z-plane.', argstr='-z') + x = traits.Bool(desc="Flip volume in x-plane (default).", argstr="-x") + y = traits.Bool(desc="Flip volume in y-plane.", argstr="-y") + z = traits.Bool(desc="Flip volume in z-plane.", argstr="-z") class VolSymmOutputSpec(TraitedSpec): - output_file = File(desc='output file', exists=True) - trans_file = File(desc='xfm trans file', exists=True) + output_file = File(desc="output file", exists=True) + trans_file = File(desc="xfm trans file", exists=True) output_grid = File( - desc='output grid file', exists=True) # FIXME Is exists=True correct? + desc="output grid file", exists=True + ) # FIXME Is exists=True correct? class VolSymm(CommandLine): @@ -3667,15 +3884,16 @@ class VolSymm(CommandLine): input_spec = VolSymmInputSpec output_spec = VolSymmOutputSpec - _cmd = 'volsymm' + _cmd = "volsymm" def _list_outputs(self): outputs = super(VolSymm, self)._list_outputs() # Have to manually check for the grid files. - if os.path.exists(outputs['trans_file']): - if 'grid' in open(outputs['trans_file'], 'r').read(): - outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc', - outputs['trans_file']) + if os.path.exists(outputs["trans_file"]): + if "grid" in open(outputs["trans_file"], "r").read(): + outputs["output_grid"] = re.sub( + ".(nlxfm|xfm)$", "_grid_0.mnc", outputs["trans_file"] + ) return outputs diff --git a/nipype/interfaces/minc/testdata.py b/nipype/interfaces/minc/testdata.py index f027efa35e..f4e2836d65 100644 --- a/nipype/interfaces/minc/testdata.py +++ b/nipype/interfaces/minc/testdata.py @@ -3,14 +3,11 @@ import os from ...testing import example_data -minc2Dfile = example_data('minc_test_2D_00.mnc') -minc3Dfile = example_data('minc_test_3D_00.mnc') +minc2Dfile = example_data("minc_test_2D_00.mnc") +minc3Dfile = example_data("minc_test_3D_00.mnc") -nlp_config = example_data('minc_nlp.conf') +nlp_config = example_data("minc_nlp.conf") -def nonempty_minc_data(i, shape='2D'): - return example_data('minc_test_%s_%.2d.mnc' % ( - shape, - i, - )) +def nonempty_minc_data(i, shape="2D"): + return example_data("minc_test_%s_%.2d.mnc" % (shape, i,)) diff --git a/nipype/interfaces/minc/tests/test_auto_Average.py b/nipype/interfaces/minc/tests/test_auto_Average.py index dd8b49efe3..c9066611dd 100644 --- a/nipype/interfaces/minc/tests/test_auto_Average.py +++ b/nipype/interfaces/minc/tests/test_auto_Average.py @@ -4,154 +4,195 @@ def test_Average_inputs(): input_map = dict( - args=dict(argstr='%s', ), - avgdim=dict(argstr='-avgdim %s', ), - binarize=dict(argstr='-binarize', ), - binrange=dict(argstr='-binrange %s %s', ), - binvalue=dict(argstr='-binvalue %s', ), + args=dict(argstr="%s",), + avgdim=dict(argstr="-avgdim %s",), + binarize=dict(argstr="-binarize",), + binrange=dict(argstr="-binrange %s %s",), + binvalue=dict(argstr="-binvalue %s",), check_dimensions=dict( - argstr='-check_dimensions', - xor=('check_dimensions', 'no_check_dimensions'), - ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - copy_header=dict( - argstr='-copy_header', - xor=('copy_header', 'no_copy_header'), - ), - debug=dict(argstr='-debug', ), - environ=dict( - nohash=True, - usedefault=True, + argstr="-check_dimensions", xor=("check_dimensions", "no_check_dimensions"), ), + clobber=dict(argstr="-clobber", usedefault=True,), + copy_header=dict(argstr="-copy_header", xor=("copy_header", "no_copy_header"),), + debug=dict(argstr="-debug",), + environ=dict(nohash=True, usedefault=True,), filelist=dict( - argstr='-filelist %s', + argstr="-filelist %s", extensions=None, mandatory=True, - xor=('input_files', 'filelist'), + xor=("input_files", "filelist"), ), format_byte=dict( - argstr='-byte', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-byte", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_double=dict( - argstr='-double', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-double", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_filetype=dict( - argstr='-filetype', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-filetype", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_float=dict( - argstr='-float', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-float", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_int=dict( - argstr='-int', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-int", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_long=dict( - argstr='-long', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-long", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_short=dict( - argstr='-short', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-short", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_signed=dict( - argstr='-signed', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-signed", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_unsigned=dict( - argstr='-unsigned', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-unsigned", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), input_files=dict( - argstr='%s', + argstr="%s", mandatory=True, position=-2, - sep=' ', - xor=('input_files', 'filelist'), + sep=" ", + xor=("input_files", "filelist"), ), max_buffer_size_in_kb=dict( - argstr='-max_buffer_size_in_kb %d', - usedefault=True, + argstr="-max_buffer_size_in_kb %d", usedefault=True, ), no_check_dimensions=dict( - argstr='-nocheck_dimensions', - xor=('check_dimensions', 'no_check_dimensions'), + argstr="-nocheck_dimensions", + xor=("check_dimensions", "no_check_dimensions"), ), no_copy_header=dict( - argstr='-nocopy_header', - xor=('copy_header', 'no_copy_header'), - ), - nonormalize=dict( - argstr='-nonormalize', - xor=('normalize', 'nonormalize'), - ), - normalize=dict( - argstr='-normalize', - xor=('normalize', 'nonormalize'), + argstr="-nocopy_header", xor=("copy_header", "no_copy_header"), ), + nonormalize=dict(argstr="-nonormalize", xor=("normalize", "nonormalize"),), + normalize=dict(argstr="-normalize", xor=("normalize", "nonormalize"),), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['input_files'], - name_template='%s_averaged.mnc', + name_source=["input_files"], + name_template="%s_averaged.mnc", position=-1, ), - quiet=dict( - argstr='-quiet', - xor=('verbose', 'quiet'), - ), - sdfile=dict( - argstr='-sdfile %s', - extensions=None, - ), - two=dict(argstr='-2', ), - verbose=dict( - argstr='-verbose', - xor=('verbose', 'quiet'), - ), - voxel_range=dict(argstr='-range %d %d', ), - weights=dict( - argstr='-weights %s', - sep=',', - ), - width_weighted=dict( - argstr='-width_weighted', - requires=('avgdim', ), - ), + quiet=dict(argstr="-quiet", xor=("verbose", "quiet"),), + sdfile=dict(argstr="-sdfile %s", extensions=None,), + two=dict(argstr="-2",), + verbose=dict(argstr="-verbose", xor=("verbose", "quiet"),), + voxel_range=dict(argstr="-range %d %d",), + weights=dict(argstr="-weights %s", sep=",",), + width_weighted=dict(argstr="-width_weighted", requires=("avgdim",),), ) inputs = Average.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Average_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = Average.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_BBox.py b/nipype/interfaces/minc/tests/test_auto_BBox.py index 9b92660b9a..c1dfbb370a 100644 --- a/nipype/interfaces/minc/tests/test_auto_BBox.py +++ b/nipype/interfaces/minc/tests/test_auto_BBox.py @@ -4,51 +4,34 @@ def test_BBox_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - format_minccrop=dict(argstr='-minccrop', ), - format_mincresample=dict(argstr='-mincresample', ), - format_mincreshape=dict(argstr='-mincreshape', ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - one_line=dict( - argstr='-one_line', - xor=('one_line', 'two_lines'), - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + format_minccrop=dict(argstr="-minccrop",), + format_mincresample=dict(argstr="-mincresample",), + format_mincreshape=dict(argstr="-mincreshape",), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + one_line=dict(argstr="-one_line", xor=("one_line", "two_lines"),), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), output_file=dict( extensions=None, hash_files=False, keep_extension=False, - name_source=['input_file'], - name_template='%s_bbox.txt', + name_source=["input_file"], + name_template="%s_bbox.txt", position=-1, ), - threshold=dict(argstr='-threshold', ), - two_lines=dict( - argstr='-two_lines', - xor=('one_line', 'two_lines'), - ), + threshold=dict(argstr="-threshold",), + two_lines=dict(argstr="-two_lines", xor=("one_line", "two_lines"),), ) inputs = BBox.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BBox_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = BBox.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Beast.py b/nipype/interfaces/minc/tests/test_auto_Beast.py index 62944284a6..5aed48440f 100644 --- a/nipype/interfaces/minc/tests/test_auto_Beast.py +++ b/nipype/interfaces/minc/tests/test_auto_Beast.py @@ -4,85 +4,45 @@ def test_Beast_inputs(): input_map = dict( - abspath=dict( - argstr='-abspath', - usedefault=True, - ), - args=dict(argstr='%s', ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - confidence_level_alpha=dict( - argstr='-alpha %s', - usedefault=True, - ), - configuration_file=dict( - argstr='-configuration %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fill_holes=dict(argstr='-fill', ), - flip_images=dict(argstr='-flip', ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - library_dir=dict( - argstr='%s', - mandatory=True, - position=-3, - ), - load_moments=dict(argstr='-load_moments', ), - median_filter=dict(argstr='-median', ), - nlm_filter=dict(argstr='-nlm_filter', ), - number_selected_images=dict( - argstr='-selection_num %s', - usedefault=True, - ), + abspath=dict(argstr="-abspath", usedefault=True,), + args=dict(argstr="%s",), + clobber=dict(argstr="-clobber", usedefault=True,), + confidence_level_alpha=dict(argstr="-alpha %s", usedefault=True,), + configuration_file=dict(argstr="-configuration %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + fill_holes=dict(argstr="-fill",), + flip_images=dict(argstr="-flip",), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + library_dir=dict(argstr="%s", mandatory=True, position=-3,), + load_moments=dict(argstr="-load_moments",), + median_filter=dict(argstr="-median",), + nlm_filter=dict(argstr="-nlm_filter",), + number_selected_images=dict(argstr="-selection_num %s", usedefault=True,), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, - name_source=['input_file'], - name_template='%s_beast_mask.mnc', + name_source=["input_file"], + name_template="%s_beast_mask.mnc", position=-1, ), - patch_size=dict( - argstr='-patch_size %s', - usedefault=True, - ), - probability_map=dict(argstr='-probability', ), - same_resolution=dict(argstr='-same_resolution', ), - search_area=dict( - argstr='-search_area %s', - usedefault=True, - ), - smoothness_factor_beta=dict( - argstr='-beta %s', - usedefault=True, - ), - threshold_patch_selection=dict( - argstr='-threshold %s', - usedefault=True, - ), - voxel_size=dict( - argstr='-voxel_size %s', - usedefault=True, - ), + patch_size=dict(argstr="-patch_size %s", usedefault=True,), + probability_map=dict(argstr="-probability",), + same_resolution=dict(argstr="-same_resolution",), + search_area=dict(argstr="-search_area %s", usedefault=True,), + smoothness_factor_beta=dict(argstr="-beta %s", usedefault=True,), + threshold_patch_selection=dict(argstr="-threshold %s", usedefault=True,), + voxel_size=dict(argstr="-voxel_size %s", usedefault=True,), ) inputs = Beast.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Beast_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = Beast.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_BestLinReg.py b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py index 64aceb6182..3e765b0e52 100644 --- a/nipype/interfaces/minc/tests/test_auto_BestLinReg.py +++ b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py @@ -4,58 +4,43 @@ def test_BestLinReg_inputs(): input_map = dict( - args=dict(argstr='%s', ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + clobber=dict(argstr="-clobber", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), output_mnc=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, keep_extension=False, - name_source=['source'], - name_template='%s_bestlinreg.mnc', + name_source=["source"], + name_template="%s_bestlinreg.mnc", position=-1, ), output_xfm=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, keep_extension=False, - name_source=['source'], - name_template='%s_bestlinreg.xfm', + name_source=["source"], + name_template="%s_bestlinreg.xfm", position=-2, ), - source=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-4, - ), - target=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - verbose=dict(argstr='-verbose', ), + source=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), + target=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + verbose=dict(argstr="-verbose",), ) inputs = BestLinReg.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BestLinReg_outputs(): output_map = dict( - output_mnc=dict(extensions=None, ), - output_xfm=dict(extensions=None, ), + output_mnc=dict(extensions=None,), output_xfm=dict(extensions=None,), ) outputs = BestLinReg.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_BigAverage.py b/nipype/interfaces/minc/tests/test_auto_BigAverage.py index 30c8540598..539ae73488 100644 --- a/nipype/interfaces/minc/tests/test_auto_BigAverage.py +++ b/nipype/interfaces/minc/tests/test_auto_BigAverage.py @@ -4,51 +4,41 @@ def test_BigAverage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - clobber=dict( - argstr='--clobber', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_files=dict( - argstr='%s', - mandatory=True, - position=-2, - sep=' ', - ), + args=dict(argstr="%s",), + clobber=dict(argstr="--clobber", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" ",), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['input_files'], - name_template='%s_bigaverage.mnc', + name_source=["input_files"], + name_template="%s_bigaverage.mnc", position=-1, ), - output_float=dict(argstr='--float', ), - robust=dict(argstr='-robust', ), + output_float=dict(argstr="--float",), + robust=dict(argstr="-robust",), sd_file=dict( - argstr='--sdfile %s', + argstr="--sdfile %s", extensions=None, hash_files=False, - name_source=['input_files'], - name_template='%s_bigaverage_stdev.mnc', + name_source=["input_files"], + name_template="%s_bigaverage_stdev.mnc", ), - tmpdir=dict(argstr='-tmpdir %s', ), - verbose=dict(argstr='--verbose', ), + tmpdir=dict(argstr="-tmpdir %s",), + verbose=dict(argstr="--verbose",), ) inputs = BigAverage.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BigAverage_outputs(): output_map = dict( - output_file=dict(extensions=None, ), - sd_file=dict(extensions=None, ), + output_file=dict(extensions=None,), sd_file=dict(extensions=None,), ) outputs = BigAverage.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Blob.py b/nipype/interfaces/minc/tests/test_auto_Blob.py index 2cce7f294d..f51c3693f6 100644 --- a/nipype/interfaces/minc/tests/test_auto_Blob.py +++ b/nipype/interfaces/minc/tests/test_auto_Blob.py @@ -4,38 +4,32 @@ def test_Blob_inputs(): input_map = dict( - args=dict(argstr='%s', ), - determinant=dict(argstr='-determinant', ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - magnitude=dict(argstr='-magnitude', ), + args=dict(argstr="%s",), + determinant=dict(argstr="-determinant",), + environ=dict(nohash=True, usedefault=True,), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + magnitude=dict(argstr="-magnitude",), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['input_file'], - name_template='%s_blob.mnc', + name_source=["input_file"], + name_template="%s_blob.mnc", position=-1, ), - trace=dict(argstr='-trace', ), - translation=dict(argstr='-translation', ), + trace=dict(argstr="-trace",), + translation=dict(argstr="-translation",), ) inputs = Blob.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Blob_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = Blob.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Blur.py b/nipype/interfaces/minc/tests/test_auto_Blur.py index 267863d80c..b37942f768 100644 --- a/nipype/interfaces/minc/tests/test_auto_Blur.py +++ b/nipype/interfaces/minc/tests/test_auto_Blur.py @@ -4,52 +4,29 @@ def test_Blur_inputs(): input_map = dict( - args=dict(argstr='%s', ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - dimensions=dict(argstr='-dimensions %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + clobber=dict(argstr="-clobber", usedefault=True,), + dimensions=dict(argstr="-dimensions %s",), + environ=dict(nohash=True, usedefault=True,), fwhm=dict( - argstr='-fwhm %s', - mandatory=True, - xor=('fwhm', 'fwhm3d', 'standard_dev'), + argstr="-fwhm %s", mandatory=True, xor=("fwhm", "fwhm3d", "standard_dev"), ), fwhm3d=dict( - argstr='-3dfwhm %s %s %s', - mandatory=True, - xor=('fwhm', 'fwhm3d', 'standard_dev'), - ), - gaussian=dict( - argstr='-gaussian', - xor=('gaussian', 'rect'), - ), - gradient=dict(argstr='-gradient', ), - input_file=dict( - argstr='%s', - extensions=None, + argstr="-3dfwhm %s %s %s", mandatory=True, - position=-2, - ), - no_apodize=dict(argstr='-no_apodize', ), - output_file_base=dict( - argstr='%s', - extensions=None, - position=-1, - ), - partial=dict(argstr='-partial', ), - rect=dict( - argstr='-rect', - xor=('gaussian', 'rect'), - ), + xor=("fwhm", "fwhm3d", "standard_dev"), + ), + gaussian=dict(argstr="-gaussian", xor=("gaussian", "rect"),), + gradient=dict(argstr="-gradient",), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + no_apodize=dict(argstr="-no_apodize",), + output_file_base=dict(argstr="%s", extensions=None, position=-1,), + partial=dict(argstr="-partial",), + rect=dict(argstr="-rect", xor=("gaussian", "rect"),), standard_dev=dict( - argstr='-standarddev %s', + argstr="-standarddev %s", mandatory=True, - xor=('fwhm', 'fwhm3d', 'standard_dev'), + xor=("fwhm", "fwhm3d", "standard_dev"), ), ) inputs = Blur.input_spec() @@ -57,14 +34,16 @@ def test_Blur_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Blur_outputs(): output_map = dict( - gradient_dxyz=dict(extensions=None, ), - output_file=dict(extensions=None, ), - partial_dx=dict(extensions=None, ), - partial_dxyz=dict(extensions=None, ), - partial_dy=dict(extensions=None, ), - partial_dz=dict(extensions=None, ), + gradient_dxyz=dict(extensions=None,), + output_file=dict(extensions=None,), + partial_dx=dict(extensions=None,), + partial_dxyz=dict(extensions=None,), + partial_dy=dict(extensions=None,), + partial_dz=dict(extensions=None,), ) outputs = Blur.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Calc.py b/nipype/interfaces/minc/tests/test_auto_Calc.py index 6d077c5a52..670278dfa9 100644 --- a/nipype/interfaces/minc/tests/test_auto_Calc.py +++ b/nipype/interfaces/minc/tests/test_auto_Calc.py @@ -4,153 +4,201 @@ def test_Calc_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), check_dimensions=dict( - argstr='-check_dimensions', - xor=('check_dimensions', 'no_check_dimensions'), + argstr="-check_dimensions", xor=("check_dimensions", "no_check_dimensions"), ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - copy_header=dict( - argstr='-copy_header', - xor=('copy_header', 'no_copy_header'), - ), - debug=dict(argstr='-debug', ), - environ=dict( - nohash=True, - usedefault=True, - ), - eval_width=dict(argstr='-eval_width %s', ), + clobber=dict(argstr="-clobber", usedefault=True,), + copy_header=dict(argstr="-copy_header", xor=("copy_header", "no_copy_header"),), + debug=dict(argstr="-debug",), + environ=dict(nohash=True, usedefault=True,), + eval_width=dict(argstr="-eval_width %s",), expfile=dict( - argstr='-expfile %s', + argstr="-expfile %s", extensions=None, mandatory=True, - xor=('expression', 'expfile'), + xor=("expression", "expfile"), ), expression=dict( - argstr="-expression '%s'", - mandatory=True, - xor=('expression', 'expfile'), + argstr="-expression '%s'", mandatory=True, xor=("expression", "expfile"), ), filelist=dict( - argstr='-filelist %s', + argstr="-filelist %s", extensions=None, mandatory=True, - xor=('input_files', 'filelist'), + xor=("input_files", "filelist"), ), format_byte=dict( - argstr='-byte', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-byte", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_double=dict( - argstr='-double', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-double", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_filetype=dict( - argstr='-filetype', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-filetype", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_float=dict( - argstr='-float', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-float", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_int=dict( - argstr='-int', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-int", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_long=dict( - argstr='-long', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-long", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_short=dict( - argstr='-short', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-short", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_signed=dict( - argstr='-signed', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-signed", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_unsigned=dict( - argstr='-unsigned', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), - ), - ignore_nan=dict(argstr='-ignore_nan', ), - input_files=dict( - argstr='%s', - mandatory=True, - position=-2, - sep=' ', - ), - max_buffer_size_in_kb=dict(argstr='-max_buffer_size_in_kb %d', ), + argstr="-unsigned", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), + ), + ignore_nan=dict(argstr="-ignore_nan",), + input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" ",), + max_buffer_size_in_kb=dict(argstr="-max_buffer_size_in_kb %d",), no_check_dimensions=dict( - argstr='-nocheck_dimensions', - xor=('check_dimensions', 'no_check_dimensions'), + argstr="-nocheck_dimensions", + xor=("check_dimensions", "no_check_dimensions"), ), no_copy_header=dict( - argstr='-nocopy_header', - xor=('copy_header', 'no_copy_header'), + argstr="-nocopy_header", xor=("copy_header", "no_copy_header"), ), outfiles=dict(), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['input_files'], - name_template='%s_calc.mnc', + name_source=["input_files"], + name_template="%s_calc.mnc", position=-1, ), output_illegal=dict( - argstr='-illegal_value', - xor=('output_nan', 'output_zero', 'output_illegal_value'), + argstr="-illegal_value", + xor=("output_nan", "output_zero", "output_illegal_value"), ), output_nan=dict( - argstr='-nan', - xor=('output_nan', 'output_zero', 'output_illegal_value'), + argstr="-nan", xor=("output_nan", "output_zero", "output_illegal_value"), ), output_zero=dict( - argstr='-zero', - xor=('output_nan', 'output_zero', 'output_illegal_value'), - ), - propagate_nan=dict(argstr='-propagate_nan', ), - quiet=dict( - argstr='-quiet', - xor=('verbose', 'quiet'), - ), - two=dict(argstr='-2', ), - verbose=dict( - argstr='-verbose', - xor=('verbose', 'quiet'), + argstr="-zero", xor=("output_nan", "output_zero", "output_illegal_value"), ), - voxel_range=dict(argstr='-range %d %d', ), + propagate_nan=dict(argstr="-propagate_nan",), + quiet=dict(argstr="-quiet", xor=("verbose", "quiet"),), + two=dict(argstr="-2",), + verbose=dict(argstr="-verbose", xor=("verbose", "quiet"),), + voxel_range=dict(argstr="-range %d %d",), ) inputs = Calc.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Calc_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = Calc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Convert.py b/nipype/interfaces/minc/tests/test_auto_Convert.py index ba312ea2d3..695d371b47 100644 --- a/nipype/interfaces/minc/tests/test_auto_Convert.py +++ b/nipype/interfaces/minc/tests/test_auto_Convert.py @@ -4,42 +4,33 @@ def test_Convert_inputs(): input_map = dict( - args=dict(argstr='%s', ), - chunk=dict(argstr='-chunk %d', ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - compression=dict(argstr='-compress %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + chunk=dict(argstr="-chunk %d",), + clobber=dict(argstr="-clobber", usedefault=True,), + compression=dict(argstr="-compress %s",), + environ=dict(nohash=True, usedefault=True,), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['input_file'], - name_template='%s_convert_output.mnc', + name_source=["input_file"], + name_template="%s_convert_output.mnc", position=-1, ), - template=dict(argstr='-template', ), - two=dict(argstr='-2', ), + template=dict(argstr="-template",), + two=dict(argstr="-2",), ) inputs = Convert.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Convert_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = Convert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Copy.py b/nipype/interfaces/minc/tests/test_auto_Copy.py index 7bb0605c39..91736a67b3 100644 --- a/nipype/interfaces/minc/tests/test_auto_Copy.py +++ b/nipype/interfaces/minc/tests/test_auto_Copy.py @@ -4,42 +4,30 @@ def test_Copy_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['input_file'], - name_template='%s_copy.mnc', + name_source=["input_file"], + name_template="%s_copy.mnc", position=-1, ), - pixel_values=dict( - argstr='-pixel_values', - xor=('pixel_values', 'real_values'), - ), - real_values=dict( - argstr='-real_values', - xor=('pixel_values', 'real_values'), - ), + pixel_values=dict(argstr="-pixel_values", xor=("pixel_values", "real_values"),), + real_values=dict(argstr="-real_values", xor=("pixel_values", "real_values"),), ) inputs = Copy.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Copy_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = Copy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Dump.py b/nipype/interfaces/minc/tests/test_auto_Dump.py index 4713f0aec3..eb1fe2c6a7 100644 --- a/nipype/interfaces/minc/tests/test_auto_Dump.py +++ b/nipype/interfaces/minc/tests/test_auto_Dump.py @@ -5,61 +5,39 @@ def test_Dump_inputs(): input_map = dict( annotations_brief=dict( - argstr='-b %s', - xor=('annotations_brief', 'annotations_full'), + argstr="-b %s", xor=("annotations_brief", "annotations_full"), ), annotations_full=dict( - argstr='-f %s', - xor=('annotations_brief', 'annotations_full'), - ), - args=dict(argstr='%s', ), - coordinate_data=dict( - argstr='-c', - xor=('coordinate_data', 'header_data'), - ), - environ=dict( - nohash=True, - usedefault=True, - ), - header_data=dict( - argstr='-h', - xor=('coordinate_data', 'header_data'), - ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - line_length=dict(argstr='-l %d', ), - netcdf_name=dict(argstr='-n %s', ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), + argstr="-f %s", xor=("annotations_brief", "annotations_full"), + ), + args=dict(argstr="%s",), + coordinate_data=dict(argstr="-c", xor=("coordinate_data", "header_data"),), + environ=dict(nohash=True, usedefault=True,), + header_data=dict(argstr="-h", xor=("coordinate_data", "header_data"),), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + line_length=dict(argstr="-l %d",), + netcdf_name=dict(argstr="-n %s",), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), output_file=dict( extensions=None, hash_files=False, keep_extension=False, - name_source=['input_file'], - name_template='%s_dump.txt', + name_source=["input_file"], + name_template="%s_dump.txt", position=-1, ), - precision=dict(argstr='%s', ), - variables=dict( - argstr='-v %s', - sep=',', - ), + precision=dict(argstr="%s",), + variables=dict(argstr="-v %s", sep=",",), ) inputs = Dump.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Dump_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = Dump.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Extract.py b/nipype/interfaces/minc/tests/test_auto_Extract.py index a3dfb069e5..0c05d4ab5f 100644 --- a/nipype/interfaces/minc/tests/test_auto_Extract.py +++ b/nipype/interfaces/minc/tests/test_auto_Extract.py @@ -4,151 +4,194 @@ def test_Extract_inputs(): input_map = dict( - args=dict(argstr='%s', ), - count=dict( - argstr='-count %s', - sep=',', - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + count=dict(argstr="-count %s", sep=",",), + environ=dict(nohash=True, usedefault=True,), flip_any_direction=dict( - argstr='-any_direction', - xor=('flip_positive_direction', 'flip_negative_direction', - 'flip_any_direction'), + argstr="-any_direction", + xor=( + "flip_positive_direction", + "flip_negative_direction", + "flip_any_direction", + ), ), flip_negative_direction=dict( - argstr='-negative_direction', - xor=('flip_positive_direction', 'flip_negative_direction', - 'flip_any_direction'), + argstr="-negative_direction", + xor=( + "flip_positive_direction", + "flip_negative_direction", + "flip_any_direction", + ), ), flip_positive_direction=dict( - argstr='-positive_direction', - xor=('flip_positive_direction', 'flip_negative_direction', - 'flip_any_direction'), + argstr="-positive_direction", + xor=( + "flip_positive_direction", + "flip_negative_direction", + "flip_any_direction", + ), ), flip_x_any=dict( - argstr='-xanydirection', - xor=('flip_x_positive', 'flip_x_negative', 'flip_x_any'), + argstr="-xanydirection", + xor=("flip_x_positive", "flip_x_negative", "flip_x_any"), ), flip_x_negative=dict( - argstr='-xdirection', - xor=('flip_x_positive', 'flip_x_negative', 'flip_x_any'), + argstr="-xdirection", + xor=("flip_x_positive", "flip_x_negative", "flip_x_any"), ), flip_x_positive=dict( - argstr='+xdirection', - xor=('flip_x_positive', 'flip_x_negative', 'flip_x_any'), + argstr="+xdirection", + xor=("flip_x_positive", "flip_x_negative", "flip_x_any"), ), flip_y_any=dict( - argstr='-yanydirection', - xor=('flip_y_positive', 'flip_y_negative', 'flip_y_any'), + argstr="-yanydirection", + xor=("flip_y_positive", "flip_y_negative", "flip_y_any"), ), flip_y_negative=dict( - argstr='-ydirection', - xor=('flip_y_positive', 'flip_y_negative', 'flip_y_any'), + argstr="-ydirection", + xor=("flip_y_positive", "flip_y_negative", "flip_y_any"), ), flip_y_positive=dict( - argstr='+ydirection', - xor=('flip_y_positive', 'flip_y_negative', 'flip_y_any'), + argstr="+ydirection", + xor=("flip_y_positive", "flip_y_negative", "flip_y_any"), ), flip_z_any=dict( - argstr='-zanydirection', - xor=('flip_z_positive', 'flip_z_negative', 'flip_z_any'), + argstr="-zanydirection", + xor=("flip_z_positive", "flip_z_negative", "flip_z_any"), ), flip_z_negative=dict( - argstr='-zdirection', - xor=('flip_z_positive', 'flip_z_negative', 'flip_z_any'), + argstr="-zdirection", + xor=("flip_z_positive", "flip_z_negative", "flip_z_any"), ), flip_z_positive=dict( - argstr='+zdirection', - xor=('flip_z_positive', 'flip_z_negative', 'flip_z_any'), - ), - image_maximum=dict(argstr='-image_maximum %s', ), - image_minimum=dict(argstr='-image_minimum %s', ), - image_range=dict(argstr='-image_range %s %s', ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - nonormalize=dict( - argstr='-nonormalize', - xor=('normalize', 'nonormalize'), - ), - normalize=dict( - argstr='-normalize', - xor=('normalize', 'nonormalize'), - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), + argstr="+zdirection", + xor=("flip_z_positive", "flip_z_negative", "flip_z_any"), + ), + image_maximum=dict(argstr="-image_maximum %s",), + image_minimum=dict(argstr="-image_minimum %s",), + image_range=dict(argstr="-image_range %s %s",), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + nonormalize=dict(argstr="-nonormalize", xor=("normalize", "nonormalize"),), + normalize=dict(argstr="-normalize", xor=("normalize", "nonormalize"),), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), output_file=dict( extensions=None, hash_files=False, keep_extension=False, - name_source=['input_file'], - name_template='%s.raw', + name_source=["input_file"], + name_template="%s.raw", position=-1, ), - start=dict( - argstr='-start %s', - sep=',', - ), + start=dict(argstr="-start %s", sep=",",), write_ascii=dict( - argstr='-ascii', - xor=('write_ascii', 'write_ascii', 'write_byte', 'write_short', - 'write_int', 'write_long', 'write_float', 'write_double', - 'write_signed', 'write_unsigned'), + argstr="-ascii", + xor=( + "write_ascii", + "write_ascii", + "write_byte", + "write_short", + "write_int", + "write_long", + "write_float", + "write_double", + "write_signed", + "write_unsigned", + ), ), write_byte=dict( - argstr='-byte', - xor=('write_ascii', 'write_ascii', 'write_byte', 'write_short', - 'write_int', 'write_long', 'write_float', 'write_double', - 'write_signed', 'write_unsigned'), + argstr="-byte", + xor=( + "write_ascii", + "write_ascii", + "write_byte", + "write_short", + "write_int", + "write_long", + "write_float", + "write_double", + "write_signed", + "write_unsigned", + ), ), write_double=dict( - argstr='-double', - xor=('write_ascii', 'write_ascii', 'write_byte', 'write_short', - 'write_int', 'write_long', 'write_float', 'write_double', - 'write_signed', 'write_unsigned'), + argstr="-double", + xor=( + "write_ascii", + "write_ascii", + "write_byte", + "write_short", + "write_int", + "write_long", + "write_float", + "write_double", + "write_signed", + "write_unsigned", + ), ), write_float=dict( - argstr='-float', - xor=('write_ascii', 'write_ascii', 'write_byte', 'write_short', - 'write_int', 'write_long', 'write_float', 'write_double', - 'write_signed', 'write_unsigned'), + argstr="-float", + xor=( + "write_ascii", + "write_ascii", + "write_byte", + "write_short", + "write_int", + "write_long", + "write_float", + "write_double", + "write_signed", + "write_unsigned", + ), ), write_int=dict( - argstr='-int', - xor=('write_ascii', 'write_ascii', 'write_byte', 'write_short', - 'write_int', 'write_long', 'write_float', 'write_double', - 'write_signed', 'write_unsigned'), + argstr="-int", + xor=( + "write_ascii", + "write_ascii", + "write_byte", + "write_short", + "write_int", + "write_long", + "write_float", + "write_double", + "write_signed", + "write_unsigned", + ), ), write_long=dict( - argstr='-long', - xor=('write_ascii', 'write_ascii', 'write_byte', 'write_short', - 'write_int', 'write_long', 'write_float', 'write_double', - 'write_signed', 'write_unsigned'), - ), - write_range=dict(argstr='-range %s %s', ), + argstr="-long", + xor=( + "write_ascii", + "write_ascii", + "write_byte", + "write_short", + "write_int", + "write_long", + "write_float", + "write_double", + "write_signed", + "write_unsigned", + ), + ), + write_range=dict(argstr="-range %s %s",), write_short=dict( - argstr='-short', - xor=('write_ascii', 'write_ascii', 'write_byte', 'write_short', - 'write_int', 'write_long', 'write_float', 'write_double', - 'write_signed', 'write_unsigned'), - ), - write_signed=dict( - argstr='-signed', - xor=('write_signed', 'write_unsigned'), - ), + argstr="-short", + xor=( + "write_ascii", + "write_ascii", + "write_byte", + "write_short", + "write_int", + "write_long", + "write_float", + "write_double", + "write_signed", + "write_unsigned", + ), + ), + write_signed=dict(argstr="-signed", xor=("write_signed", "write_unsigned"),), write_unsigned=dict( - argstr='-unsigned', - xor=('write_signed', 'write_unsigned'), + argstr="-unsigned", xor=("write_signed", "write_unsigned"), ), ) inputs = Extract.input_spec() @@ -156,8 +199,10 @@ def test_Extract_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Extract_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = Extract.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py index 58383c799e..deb6449d3d 100644 --- a/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py +++ b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py @@ -4,41 +4,33 @@ def test_Gennlxfm_inputs(): input_map = dict( - args=dict(argstr='%s', ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ident=dict(argstr='-ident', ), - like=dict( - argstr='-like %s', - extensions=None, - ), + args=dict(argstr="%s",), + clobber=dict(argstr="-clobber", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + ident=dict(argstr="-ident",), + like=dict(argstr="-like %s", extensions=None,), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['like'], - name_template='%s_gennlxfm.xfm', + name_source=["like"], + name_template="%s_gennlxfm.xfm", position=-1, ), - step=dict(argstr='-step %s', ), - verbose=dict(argstr='-verbose', ), + step=dict(argstr="-step %s",), + verbose=dict(argstr="-verbose",), ) inputs = Gennlxfm.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Gennlxfm_outputs(): output_map = dict( - output_file=dict(extensions=None, ), - output_grid=dict(extensions=None, ), + output_file=dict(extensions=None,), output_grid=dict(extensions=None,), ) outputs = Gennlxfm.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Math.py b/nipype/interfaces/minc/tests/test_auto_Math.py index 4758c9d897..32a5f68d66 100644 --- a/nipype/interfaces/minc/tests/test_auto_Math.py +++ b/nipype/interfaces/minc/tests/test_auto_Math.py @@ -4,165 +4,225 @@ def test_Math_inputs(): input_map = dict( - abs=dict(argstr='-abs', ), - args=dict(argstr='%s', ), - calc_add=dict(argstr='-add', ), - calc_and=dict(argstr='-and', ), - calc_div=dict(argstr='-div', ), - calc_mul=dict(argstr='-mult', ), - calc_not=dict(argstr='-not', ), - calc_or=dict(argstr='-or', ), - calc_sub=dict(argstr='-sub', ), + abs=dict(argstr="-abs",), + args=dict(argstr="%s",), + calc_add=dict(argstr="-add",), + calc_and=dict(argstr="-and",), + calc_div=dict(argstr="-div",), + calc_mul=dict(argstr="-mult",), + calc_not=dict(argstr="-not",), + calc_or=dict(argstr="-or",), + calc_sub=dict(argstr="-sub",), check_dimensions=dict( - argstr='-check_dimensions', - xor=('check_dimensions', 'no_check_dimensions'), - ), - clamp=dict(argstr='-clamp -const2 %s %s', ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - copy_header=dict( - argstr='-copy_header', - xor=('copy_header', 'no_copy_header'), - ), - count_valid=dict(argstr='-count_valid', ), - dimension=dict(argstr='-dimension %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - exp=dict(argstr='-exp -const2 %s %s', ), + argstr="-check_dimensions", xor=("check_dimensions", "no_check_dimensions"), + ), + clamp=dict(argstr="-clamp -const2 %s %s",), + clobber=dict(argstr="-clobber", usedefault=True,), + copy_header=dict(argstr="-copy_header", xor=("copy_header", "no_copy_header"),), + count_valid=dict(argstr="-count_valid",), + dimension=dict(argstr="-dimension %s",), + environ=dict(nohash=True, usedefault=True,), + exp=dict(argstr="-exp -const2 %s %s",), filelist=dict( - argstr='-filelist %s', + argstr="-filelist %s", extensions=None, mandatory=True, - xor=('input_files', 'filelist'), + xor=("input_files", "filelist"), ), format_byte=dict( - argstr='-byte', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-byte", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_double=dict( - argstr='-double', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-double", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_filetype=dict( - argstr='-filetype', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-filetype", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_float=dict( - argstr='-float', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-float", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_int=dict( - argstr='-int', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-int", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_long=dict( - argstr='-long', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-long", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_short=dict( - argstr='-short', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-short", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_signed=dict( - argstr='-signed', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), + argstr="-signed", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_unsigned=dict( - argstr='-unsigned', - xor=('format_filetype', 'format_byte', 'format_short', - 'format_int', 'format_long', 'format_float', 'format_double', - 'format_signed', 'format_unsigned'), - ), - ignore_nan=dict(argstr='-ignore_nan', ), + argstr="-unsigned", + xor=( + "format_filetype", + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), + ), + ignore_nan=dict(argstr="-ignore_nan",), input_files=dict( - argstr='%s', + argstr="%s", mandatory=True, position=-2, - sep=' ', - xor=('input_files', 'filelist'), + sep=" ", + xor=("input_files", "filelist"), ), - invert=dict(argstr='-invert -const %s', ), - isnan=dict(argstr='-isnan', ), - log=dict(argstr='-log -const2 %s %s', ), + invert=dict(argstr="-invert -const %s",), + isnan=dict(argstr="-isnan",), + log=dict(argstr="-log -const2 %s %s",), max_buffer_size_in_kb=dict( - argstr='-max_buffer_size_in_kb %d', - usedefault=True, + argstr="-max_buffer_size_in_kb %d", usedefault=True, ), - maximum=dict(argstr='-maximum', ), - minimum=dict(argstr='-minimum', ), - nisnan=dict(argstr='-nisnan', ), + maximum=dict(argstr="-maximum",), + minimum=dict(argstr="-minimum",), + nisnan=dict(argstr="-nisnan",), no_check_dimensions=dict( - argstr='-nocheck_dimensions', - xor=('check_dimensions', 'no_check_dimensions'), + argstr="-nocheck_dimensions", + xor=("check_dimensions", "no_check_dimensions"), ), no_copy_header=dict( - argstr='-nocopy_header', - xor=('copy_header', 'no_copy_header'), + argstr="-nocopy_header", xor=("copy_header", "no_copy_header"), ), - nsegment=dict(argstr='-nsegment -const2 %s %s', ), + nsegment=dict(argstr="-nsegment -const2 %s %s",), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['input_files'], - name_template='%s_mincmath.mnc', + name_source=["input_files"], + name_template="%s_mincmath.mnc", position=-1, ), output_illegal=dict( - argstr='-illegal_value', - xor=('output_nan', 'output_zero', 'output_illegal_value'), + argstr="-illegal_value", + xor=("output_nan", "output_zero", "output_illegal_value"), ), output_nan=dict( - argstr='-nan', - xor=('output_nan', 'output_zero', 'output_illegal_value'), + argstr="-nan", xor=("output_nan", "output_zero", "output_illegal_value"), ), output_zero=dict( - argstr='-zero', - xor=('output_nan', 'output_zero', 'output_illegal_value'), - ), - percentdiff=dict(argstr='-percentdiff', ), - propagate_nan=dict(argstr='-propagate_nan', ), - scale=dict(argstr='-scale -const2 %s %s', ), - segment=dict(argstr='-segment -const2 %s %s', ), - sqrt=dict(argstr='-sqrt', ), - square=dict(argstr='-square', ), - test_eq=dict(argstr='-eq', ), - test_ge=dict(argstr='-ge', ), - test_gt=dict(argstr='-gt', ), - test_le=dict(argstr='-le', ), - test_lt=dict(argstr='-lt', ), - test_ne=dict(argstr='-ne', ), - two=dict(argstr='-2', ), - voxel_range=dict(argstr='-range %d %d', ), + argstr="-zero", xor=("output_nan", "output_zero", "output_illegal_value"), + ), + percentdiff=dict(argstr="-percentdiff",), + propagate_nan=dict(argstr="-propagate_nan",), + scale=dict(argstr="-scale -const2 %s %s",), + segment=dict(argstr="-segment -const2 %s %s",), + sqrt=dict(argstr="-sqrt",), + square=dict(argstr="-square",), + test_eq=dict(argstr="-eq",), + test_ge=dict(argstr="-ge",), + test_gt=dict(argstr="-gt",), + test_le=dict(argstr="-le",), + test_lt=dict(argstr="-lt",), + test_ne=dict(argstr="-ne",), + two=dict(argstr="-2",), + voxel_range=dict(argstr="-range %d %d",), ) inputs = Math.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Math_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = Math.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_NlpFit.py b/nipype/interfaces/minc/tests/test_auto_NlpFit.py index c0092ab704..58e9e985db 100644 --- a/nipype/interfaces/minc/tests/test_auto_NlpFit.py +++ b/nipype/interfaces/minc/tests/test_auto_NlpFit.py @@ -4,60 +4,28 @@ def test_NlpFit_inputs(): input_map = dict( - args=dict(argstr='%s', ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - config_file=dict( - argstr='-config_file %s', - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - init_xfm=dict( - argstr='-init_xfm %s', - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s",), + clobber=dict(argstr="-clobber", usedefault=True,), + config_file=dict(argstr="-config_file %s", extensions=None, mandatory=True,), + environ=dict(nohash=True, usedefault=True,), + init_xfm=dict(argstr="-init_xfm %s", extensions=None, mandatory=True,), input_grid_files=dict(), - output_xfm=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - source=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - source_mask=dict( - argstr='-source_mask %s', - extensions=None, - mandatory=True, - ), - target=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - verbose=dict(argstr='-verbose', ), + output_xfm=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + source=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + source_mask=dict(argstr="-source_mask %s", extensions=None, mandatory=True,), + target=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + verbose=dict(argstr="-verbose",), ) inputs = NlpFit.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_NlpFit_outputs(): output_map = dict( - output_grid=dict(extensions=None, ), - output_xfm=dict(extensions=None, ), + output_grid=dict(extensions=None,), output_xfm=dict(extensions=None,), ) outputs = NlpFit.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Norm.py b/nipype/interfaces/minc/tests/test_auto_Norm.py index 4bbae0454f..462b61459f 100644 --- a/nipype/interfaces/minc/tests/test_auto_Norm.py +++ b/nipype/interfaces/minc/tests/test_auto_Norm.py @@ -4,64 +4,49 @@ def test_Norm_inputs(): input_map = dict( - args=dict(argstr='%s', ), - clamp=dict( - argstr='-clamp', - usedefault=True, - ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - cutoff=dict(argstr='-cutoff %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - lower=dict(argstr='-lower %s', ), - mask=dict( - argstr='-mask %s', - extensions=None, - ), - out_ceil=dict(argstr='-out_ceil %s', ), - out_floor=dict(argstr='-out_floor %s', ), + args=dict(argstr="%s",), + clamp=dict(argstr="-clamp", usedefault=True,), + clobber=dict(argstr="-clobber", usedefault=True,), + cutoff=dict(argstr="-cutoff %s",), + environ=dict(nohash=True, usedefault=True,), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + lower=dict(argstr="-lower %s",), + mask=dict(argstr="-mask %s", extensions=None,), + out_ceil=dict(argstr="-out_ceil %s",), + out_floor=dict(argstr="-out_floor %s",), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['input_file'], - name_template='%s_norm.mnc', + name_source=["input_file"], + name_template="%s_norm.mnc", position=-1, ), output_threshold_mask=dict( - argstr='-threshold_mask %s', + argstr="-threshold_mask %s", extensions=None, hash_files=False, - name_source=['input_file'], - name_template='%s_norm_threshold_mask.mnc', + name_source=["input_file"], + name_template="%s_norm_threshold_mask.mnc", ), - threshold=dict(argstr='-threshold', ), - threshold_blur=dict(argstr='-threshold_blur %s', ), - threshold_bmt=dict(argstr='-threshold_bmt', ), - threshold_perc=dict(argstr='-threshold_perc %s', ), - upper=dict(argstr='-upper %s', ), + threshold=dict(argstr="-threshold",), + threshold_blur=dict(argstr="-threshold_blur %s",), + threshold_bmt=dict(argstr="-threshold_bmt",), + threshold_perc=dict(argstr="-threshold_perc %s",), + upper=dict(argstr="-upper %s",), ) inputs = Norm.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Norm_outputs(): output_map = dict( - output_file=dict(extensions=None, ), - output_threshold_mask=dict(extensions=None, ), + output_file=dict(extensions=None,), + output_threshold_mask=dict(extensions=None,), ) outputs = Norm.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Pik.py b/nipype/interfaces/minc/tests/test_auto_Pik.py index 2a2ff5f851..530ead0317 100644 --- a/nipype/interfaces/minc/tests/test_auto_Pik.py +++ b/nipype/interfaces/minc/tests/test_auto_Pik.py @@ -4,88 +4,60 @@ def test_Pik_inputs(): input_map = dict( - annotated_bar=dict(argstr='--anot_bar', ), - args=dict(argstr='%s', ), - auto_range=dict( - argstr='--auto_range', - xor=('image_range', 'auto_range'), - ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - depth=dict(argstr='--depth %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + annotated_bar=dict(argstr="--anot_bar",), + args=dict(argstr="%s",), + auto_range=dict(argstr="--auto_range", xor=("image_range", "auto_range"),), + clobber=dict(argstr="-clobber", usedefault=True,), + depth=dict(argstr="--depth %s",), + environ=dict(nohash=True, usedefault=True,), horizontal_triplanar_view=dict( - argstr='--horizontal', - xor=('vertical_triplanar_view', 'horizontal_triplanar_view'), + argstr="--horizontal", + xor=("vertical_triplanar_view", "horizontal_triplanar_view"), ), image_range=dict( - argstr='--image_range %s %s', - xor=('image_range', 'auto_range'), + argstr="--image_range %s %s", xor=("image_range", "auto_range"), ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - jpg=dict(xor=('jpg', 'png'), ), - lookup=dict(argstr='--lookup %s', ), - minc_range=dict(argstr='--range %s %s', ), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + jpg=dict(xor=("jpg", "png"),), + lookup=dict(argstr="--lookup %s",), + minc_range=dict(argstr="--range %s %s",), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, keep_extension=False, - name_source=['input_file'], - name_template='%s.png', + name_source=["input_file"], + name_template="%s.png", position=-1, ), - png=dict(xor=('jpg', 'png'), ), - sagittal_offset=dict(argstr='--sagittal_offset %s', ), - sagittal_offset_perc=dict(argstr='--sagittal_offset_perc %d', ), - scale=dict( - argstr='--scale %s', - usedefault=True, - ), - slice_x=dict( - argstr='-x', - xor=('slice_z', 'slice_y', 'slice_x'), - ), - slice_y=dict( - argstr='-y', - xor=('slice_z', 'slice_y', 'slice_x'), - ), - slice_z=dict( - argstr='-z', - xor=('slice_z', 'slice_y', 'slice_x'), - ), - start=dict(argstr='--slice %s', ), - tile_size=dict(argstr='--tilesize %s', ), - title=dict(argstr='%s', ), - title_size=dict( - argstr='--title_size %s', - requires=['title'], - ), - triplanar=dict(argstr='--triplanar', ), + png=dict(xor=("jpg", "png"),), + sagittal_offset=dict(argstr="--sagittal_offset %s",), + sagittal_offset_perc=dict(argstr="--sagittal_offset_perc %d",), + scale=dict(argstr="--scale %s", usedefault=True,), + slice_x=dict(argstr="-x", xor=("slice_z", "slice_y", "slice_x"),), + slice_y=dict(argstr="-y", xor=("slice_z", "slice_y", "slice_x"),), + slice_z=dict(argstr="-z", xor=("slice_z", "slice_y", "slice_x"),), + start=dict(argstr="--slice %s",), + tile_size=dict(argstr="--tilesize %s",), + title=dict(argstr="%s",), + title_size=dict(argstr="--title_size %s", requires=["title"],), + triplanar=dict(argstr="--triplanar",), vertical_triplanar_view=dict( - argstr='--vertical', - xor=('vertical_triplanar_view', 'horizontal_triplanar_view'), + argstr="--vertical", + xor=("vertical_triplanar_view", "horizontal_triplanar_view"), ), - width=dict(argstr='--width %s', ), + width=dict(argstr="--width %s",), ) inputs = Pik.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Pik_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = Pik.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Resample.py b/nipype/interfaces/minc/tests/test_auto_Resample.py index 3fdf821b88..32385be6c9 100644 --- a/nipype/interfaces/minc/tests/test_auto_Resample.py +++ b/nipype/interfaces/minc/tests/test_auto_Resample.py @@ -4,244 +4,274 @@ def test_Resample_inputs(): input_map = dict( - args=dict(argstr='%s', ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), + args=dict(argstr="%s",), + clobber=dict(argstr="-clobber", usedefault=True,), coronal_slices=dict( - argstr='-coronal', - xor=('transverse', 'sagittal', 'coronal'), + argstr="-coronal", xor=("transverse", "sagittal", "coronal"), ), dircos=dict( - argstr='-dircos %s %s %s', - xor=('nelements', 'nelements_x_y_or_z'), - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fill=dict( - argstr='-fill', - xor=('nofill', 'fill'), - ), - fill_value=dict( - argstr='-fillvalue %s', - requires=['fill'], + argstr="-dircos %s %s %s", xor=("nelements", "nelements_x_y_or_z"), ), + environ=dict(nohash=True, usedefault=True,), + fill=dict(argstr="-fill", xor=("nofill", "fill"),), + fill_value=dict(argstr="-fillvalue %s", requires=["fill"],), format_byte=dict( - argstr='-byte', - xor=('format_byte', 'format_short', 'format_int', 'format_long', - 'format_float', 'format_double', 'format_signed', - 'format_unsigned'), + argstr="-byte", + xor=( + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_double=dict( - argstr='-double', - xor=('format_byte', 'format_short', 'format_int', 'format_long', - 'format_float', 'format_double', 'format_signed', - 'format_unsigned'), + argstr="-double", + xor=( + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_float=dict( - argstr='-float', - xor=('format_byte', 'format_short', 'format_int', 'format_long', - 'format_float', 'format_double', 'format_signed', - 'format_unsigned'), + argstr="-float", + xor=( + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_int=dict( - argstr='-int', - xor=('format_byte', 'format_short', 'format_int', 'format_long', - 'format_float', 'format_double', 'format_signed', - 'format_unsigned'), + argstr="-int", + xor=( + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_long=dict( - argstr='-long', - xor=('format_byte', 'format_short', 'format_int', 'format_long', - 'format_float', 'format_double', 'format_signed', - 'format_unsigned'), + argstr="-long", + xor=( + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_short=dict( - argstr='-short', - xor=('format_byte', 'format_short', 'format_int', 'format_long', - 'format_float', 'format_double', 'format_signed', - 'format_unsigned'), + argstr="-short", + xor=( + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_signed=dict( - argstr='-signed', - xor=('format_byte', 'format_short', 'format_int', 'format_long', - 'format_float', 'format_double', 'format_signed', - 'format_unsigned'), + argstr="-signed", + xor=( + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), format_unsigned=dict( - argstr='-unsigned', - xor=('format_byte', 'format_short', 'format_int', 'format_long', - 'format_float', 'format_double', 'format_signed', - 'format_unsigned'), + argstr="-unsigned", + xor=( + "format_byte", + "format_short", + "format_int", + "format_long", + "format_float", + "format_double", + "format_signed", + "format_unsigned", + ), ), half_width_sinc_window=dict( - argstr='-width %s', - requires=['sinc_interpolation'], - ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, + argstr="-width %s", requires=["sinc_interpolation"], ), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), input_grid_files=dict(), - invert_transformation=dict(argstr='-invert_transformation', ), + invert_transformation=dict(argstr="-invert_transformation",), keep_real_range=dict( - argstr='-keep_real_range', - xor=('keep_real_range', 'nokeep_real_range'), - ), - like=dict( - argstr='-like %s', - extensions=None, + argstr="-keep_real_range", xor=("keep_real_range", "nokeep_real_range"), ), + like=dict(argstr="-like %s", extensions=None,), nearest_neighbour_interpolation=dict( - argstr='-nearest_neighbour', - xor=('trilinear_interpolation', 'tricubic_interpolation', - 'nearest_neighbour_interpolation', 'sinc_interpolation'), + argstr="-nearest_neighbour", + xor=( + "trilinear_interpolation", + "tricubic_interpolation", + "nearest_neighbour_interpolation", + "sinc_interpolation", + ), ), nelements=dict( - argstr='-nelements %s %s %s', - xor=('nelements', 'nelements_x_y_or_z'), - ), - no_fill=dict( - argstr='-nofill', - xor=('nofill', 'fill'), + argstr="-nelements %s %s %s", xor=("nelements", "nelements_x_y_or_z"), ), + no_fill=dict(argstr="-nofill", xor=("nofill", "fill"),), no_input_sampling=dict( - argstr='-use_input_sampling', - xor=('vio_transform', 'no_input_sampling'), + argstr="-use_input_sampling", xor=("vio_transform", "no_input_sampling"), ), nokeep_real_range=dict( - argstr='-nokeep_real_range', - xor=('keep_real_range', 'nokeep_real_range'), + argstr="-nokeep_real_range", xor=("keep_real_range", "nokeep_real_range"), ), - origin=dict(argstr='-origin %s %s %s', ), + origin=dict(argstr="-origin %s %s %s",), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['input_file'], - name_template='%s_resample.mnc', + name_source=["input_file"], + name_template="%s_resample.mnc", position=-1, ), - output_range=dict(argstr='-range %s %s', ), + output_range=dict(argstr="-range %s %s",), sagittal_slices=dict( - argstr='-sagittal', - xor=('transverse', 'sagittal', 'coronal'), + argstr="-sagittal", xor=("transverse", "sagittal", "coronal"), ), sinc_interpolation=dict( - argstr='-sinc', - xor=('trilinear_interpolation', 'tricubic_interpolation', - 'nearest_neighbour_interpolation', 'sinc_interpolation'), + argstr="-sinc", + xor=( + "trilinear_interpolation", + "tricubic_interpolation", + "nearest_neighbour_interpolation", + "sinc_interpolation", + ), ), sinc_window_hamming=dict( - argstr='-hamming', - requires=['sinc_interpolation'], - xor=('sinc_window_hanning', 'sinc_window_hamming'), + argstr="-hamming", + requires=["sinc_interpolation"], + xor=("sinc_window_hanning", "sinc_window_hamming"), ), sinc_window_hanning=dict( - argstr='-hanning', - requires=['sinc_interpolation'], - xor=('sinc_window_hanning', 'sinc_window_hamming'), - ), - spacetype=dict(argstr='-spacetype %s', ), - standard_sampling=dict(argstr='-standard_sampling', ), - start=dict( - argstr='-start %s %s %s', - xor=('nelements', 'nelements_x_y_or_z'), - ), - step=dict( - argstr='-step %s %s %s', - xor=('nelements', 'nelements_x_y_or_z'), - ), - talairach=dict(argstr='-talairach', ), - transformation=dict( - argstr='-transformation %s', - extensions=None, - ), + argstr="-hanning", + requires=["sinc_interpolation"], + xor=("sinc_window_hanning", "sinc_window_hamming"), + ), + spacetype=dict(argstr="-spacetype %s",), + standard_sampling=dict(argstr="-standard_sampling",), + start=dict(argstr="-start %s %s %s", xor=("nelements", "nelements_x_y_or_z"),), + step=dict(argstr="-step %s %s %s", xor=("nelements", "nelements_x_y_or_z"),), + talairach=dict(argstr="-talairach",), + transformation=dict(argstr="-transformation %s", extensions=None,), transverse_slices=dict( - argstr='-transverse', - xor=('transverse', 'sagittal', 'coronal'), + argstr="-transverse", xor=("transverse", "sagittal", "coronal"), ), tricubic_interpolation=dict( - argstr='-tricubic', - xor=('trilinear_interpolation', 'tricubic_interpolation', - 'nearest_neighbour_interpolation', 'sinc_interpolation'), + argstr="-tricubic", + xor=( + "trilinear_interpolation", + "tricubic_interpolation", + "nearest_neighbour_interpolation", + "sinc_interpolation", + ), ), trilinear_interpolation=dict( - argstr='-trilinear', - xor=('trilinear_interpolation', 'tricubic_interpolation', - 'nearest_neighbour_interpolation', 'sinc_interpolation'), - ), - two=dict(argstr='-2', ), - units=dict(argstr='-units %s', ), + argstr="-trilinear", + xor=( + "trilinear_interpolation", + "tricubic_interpolation", + "nearest_neighbour_interpolation", + "sinc_interpolation", + ), + ), + two=dict(argstr="-2",), + units=dict(argstr="-units %s",), vio_transform=dict( - argstr='-tfm_input_sampling', - xor=('vio_transform', 'no_input_sampling'), + argstr="-tfm_input_sampling", xor=("vio_transform", "no_input_sampling"), ), xdircos=dict( - argstr='-xdircos %s', - requires=('ydircos', 'zdircos'), - xor=('dircos', 'dircos_x_y_or_z'), + argstr="-xdircos %s", + requires=("ydircos", "zdircos"), + xor=("dircos", "dircos_x_y_or_z"), ), xnelements=dict( - argstr='-xnelements %s', - requires=('ynelements', 'znelements'), - xor=('nelements', 'nelements_x_y_or_z'), + argstr="-xnelements %s", + requires=("ynelements", "znelements"), + xor=("nelements", "nelements_x_y_or_z"), ), xstart=dict( - argstr='-xstart %s', - requires=('ystart', 'zstart'), - xor=('start', 'start_x_y_or_z'), + argstr="-xstart %s", + requires=("ystart", "zstart"), + xor=("start", "start_x_y_or_z"), ), xstep=dict( - argstr='-xstep %s', - requires=('ystep', 'zstep'), - xor=('step', 'step_x_y_or_z'), + argstr="-xstep %s", + requires=("ystep", "zstep"), + xor=("step", "step_x_y_or_z"), ), ydircos=dict( - argstr='-ydircos %s', - requires=('xdircos', 'zdircos'), - xor=('dircos', 'dircos_x_y_or_z'), + argstr="-ydircos %s", + requires=("xdircos", "zdircos"), + xor=("dircos", "dircos_x_y_or_z"), ), ynelements=dict( - argstr='-ynelements %s', - requires=('xnelements', 'znelements'), - xor=('nelements', 'nelements_x_y_or_z'), + argstr="-ynelements %s", + requires=("xnelements", "znelements"), + xor=("nelements", "nelements_x_y_or_z"), ), ystart=dict( - argstr='-ystart %s', - requires=('xstart', 'zstart'), - xor=('start', 'start_x_y_or_z'), + argstr="-ystart %s", + requires=("xstart", "zstart"), + xor=("start", "start_x_y_or_z"), ), ystep=dict( - argstr='-ystep %s', - requires=('xstep', 'zstep'), - xor=('step', 'step_x_y_or_z'), + argstr="-ystep %s", + requires=("xstep", "zstep"), + xor=("step", "step_x_y_or_z"), ), zdircos=dict( - argstr='-zdircos %s', - requires=('xdircos', 'ydircos'), - xor=('dircos', 'dircos_x_y_or_z'), + argstr="-zdircos %s", + requires=("xdircos", "ydircos"), + xor=("dircos", "dircos_x_y_or_z"), ), znelements=dict( - argstr='-znelements %s', - requires=('xnelements', 'ynelements'), - xor=('nelements', 'nelements_x_y_or_z'), + argstr="-znelements %s", + requires=("xnelements", "ynelements"), + xor=("nelements", "nelements_x_y_or_z"), ), zstart=dict( - argstr='-zstart %s', - requires=('xstart', 'ystart'), - xor=('start', 'start_x_y_or_z'), + argstr="-zstart %s", + requires=("xstart", "ystart"), + xor=("start", "start_x_y_or_z"), ), zstep=dict( - argstr='-zstep %s', - requires=('xstep', 'ystep'), - xor=('step', 'step_x_y_or_z'), + argstr="-zstep %s", + requires=("xstep", "ystep"), + xor=("step", "step_x_y_or_z"), ), ) inputs = Resample.input_spec() @@ -249,8 +279,10 @@ def test_Resample_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Resample_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Reshape.py b/nipype/interfaces/minc/tests/test_auto_Reshape.py index 4d51d6800b..92b0e5862e 100644 --- a/nipype/interfaces/minc/tests/test_auto_Reshape.py +++ b/nipype/interfaces/minc/tests/test_auto_Reshape.py @@ -4,40 +4,31 @@ def test_Reshape_inputs(): input_map = dict( - args=dict(argstr='%s', ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + clobber=dict(argstr="-clobber", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['input_file'], - name_template='%s_reshape.mnc', + name_source=["input_file"], + name_template="%s_reshape.mnc", position=-1, ), - verbose=dict(argstr='-verbose', ), - write_short=dict(argstr='-short', ), + verbose=dict(argstr="-verbose",), + write_short=dict(argstr="-short",), ) inputs = Reshape.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Reshape_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = Reshape.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_ToEcat.py b/nipype/interfaces/minc/tests/test_auto_ToEcat.py index eb64e6fa0b..02936ae4f4 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToEcat.py +++ b/nipype/interfaces/minc/tests/test_auto_ToEcat.py @@ -4,46 +4,39 @@ def test_ToEcat_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - ignore_acquisition_variable=dict( - argstr='-ignore_acquisition_variable', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + ignore_acquisition_variable=dict(argstr="-ignore_acquisition_variable",), ignore_ecat_acquisition_variable=dict( - argstr='-ignore_ecat_acquisition_variable', ), - ignore_ecat_main=dict(argstr='-ignore_ecat_main', ), - ignore_ecat_subheader_variable=dict( - argstr='-ignore_ecat_subheader_variable', ), - ignore_patient_variable=dict(argstr='-ignore_patient_variable', ), - ignore_study_variable=dict(argstr='-ignore_study_variable', ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, + argstr="-ignore_ecat_acquisition_variable", ), - no_decay_corr_fctr=dict(argstr='-no_decay_corr_fctr', ), + ignore_ecat_main=dict(argstr="-ignore_ecat_main",), + ignore_ecat_subheader_variable=dict(argstr="-ignore_ecat_subheader_variable",), + ignore_patient_variable=dict(argstr="-ignore_patient_variable",), + ignore_study_variable=dict(argstr="-ignore_study_variable",), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + no_decay_corr_fctr=dict(argstr="-no_decay_corr_fctr",), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, keep_extension=False, - name_source=['input_file'], - name_template='%s_to_ecat.v', + name_source=["input_file"], + name_template="%s_to_ecat.v", position=-1, ), - voxels_as_integers=dict(argstr='-label', ), + voxels_as_integers=dict(argstr="-label",), ) inputs = ToEcat.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ToEcat_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = ToEcat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_ToRaw.py b/nipype/interfaces/minc/tests/test_auto_ToRaw.py index f0aa06d3ad..7a15e49f65 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToRaw.py +++ b/nipype/interfaces/minc/tests/test_auto_ToRaw.py @@ -4,77 +4,90 @@ def test_ToRaw_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - nonormalize=dict( - argstr='-nonormalize', - xor=('normalize', 'nonormalize'), - ), - normalize=dict( - argstr='-normalize', - xor=('normalize', 'nonormalize'), - ), - out_file=dict( - argstr='> %s', - extensions=None, - genfile=True, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + nonormalize=dict(argstr="-nonormalize", xor=("normalize", "nonormalize"),), + normalize=dict(argstr="-normalize", xor=("normalize", "nonormalize"),), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), output_file=dict( extensions=None, hash_files=False, keep_extension=False, - name_source=['input_file'], - name_template='%s.raw', + name_source=["input_file"], + name_template="%s.raw", position=-1, ), write_byte=dict( - argstr='-byte', - xor=('write_byte', 'write_short', 'write_int', 'write_long', - 'write_float', 'write_double'), + argstr="-byte", + xor=( + "write_byte", + "write_short", + "write_int", + "write_long", + "write_float", + "write_double", + ), ), write_double=dict( - argstr='-double', - xor=('write_byte', 'write_short', 'write_int', 'write_long', - 'write_float', 'write_double'), + argstr="-double", + xor=( + "write_byte", + "write_short", + "write_int", + "write_long", + "write_float", + "write_double", + ), ), write_float=dict( - argstr='-float', - xor=('write_byte', 'write_short', 'write_int', 'write_long', - 'write_float', 'write_double'), + argstr="-float", + xor=( + "write_byte", + "write_short", + "write_int", + "write_long", + "write_float", + "write_double", + ), ), write_int=dict( - argstr='-int', - xor=('write_byte', 'write_short', 'write_int', 'write_long', - 'write_float', 'write_double'), + argstr="-int", + xor=( + "write_byte", + "write_short", + "write_int", + "write_long", + "write_float", + "write_double", + ), ), write_long=dict( - argstr='-long', - xor=('write_byte', 'write_short', 'write_int', 'write_long', - 'write_float', 'write_double'), + argstr="-long", + xor=( + "write_byte", + "write_short", + "write_int", + "write_long", + "write_float", + "write_double", + ), ), - write_range=dict(argstr='-range %s %s', ), + write_range=dict(argstr="-range %s %s",), write_short=dict( - argstr='-short', - xor=('write_byte', 'write_short', 'write_int', 'write_long', - 'write_float', 'write_double'), - ), - write_signed=dict( - argstr='-signed', - xor=('write_signed', 'write_unsigned'), + argstr="-short", + xor=( + "write_byte", + "write_short", + "write_int", + "write_long", + "write_float", + "write_double", + ), ), + write_signed=dict(argstr="-signed", xor=("write_signed", "write_unsigned"),), write_unsigned=dict( - argstr='-unsigned', - xor=('write_signed', 'write_unsigned'), + argstr="-unsigned", xor=("write_signed", "write_unsigned"), ), ) inputs = ToRaw.input_spec() @@ -82,8 +95,10 @@ def test_ToRaw_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ToRaw_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = ToRaw.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_VolSymm.py b/nipype/interfaces/minc/tests/test_auto_VolSymm.py index b71a1105a3..aeb8e6d23a 100644 --- a/nipype/interfaces/minc/tests/test_auto_VolSymm.py +++ b/nipype/interfaces/minc/tests/test_auto_VolSymm.py @@ -4,63 +4,51 @@ def test_VolSymm_inputs(): input_map = dict( - args=dict(argstr='%s', ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - config_file=dict( - argstr='-config_file %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fit_linear=dict(argstr='-linear', ), - fit_nonlinear=dict(argstr='-nonlinear', ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), + args=dict(argstr="%s",), + clobber=dict(argstr="-clobber", usedefault=True,), + config_file=dict(argstr="-config_file %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + fit_linear=dict(argstr="-linear",), + fit_nonlinear=dict(argstr="-nonlinear",), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), input_grid_files=dict(), - nofit=dict(argstr='-nofit', ), + nofit=dict(argstr="-nofit",), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['input_file'], - name_template='%s_vol_symm.mnc', + name_source=["input_file"], + name_template="%s_vol_symm.mnc", position=-1, ), trans_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, keep_extension=False, - name_source=['input_file'], - name_template='%s_vol_symm.xfm', + name_source=["input_file"], + name_template="%s_vol_symm.xfm", position=-2, ), - verbose=dict(argstr='-verbose', ), - x=dict(argstr='-x', ), - y=dict(argstr='-y', ), - z=dict(argstr='-z', ), + verbose=dict(argstr="-verbose",), + x=dict(argstr="-x",), + y=dict(argstr="-y",), + z=dict(argstr="-z",), ) inputs = VolSymm.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_VolSymm_outputs(): output_map = dict( - output_file=dict(extensions=None, ), - output_grid=dict(extensions=None, ), - trans_file=dict(extensions=None, ), + output_file=dict(extensions=None,), + output_grid=dict(extensions=None,), + trans_file=dict(extensions=None,), ) outputs = VolSymm.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Volcentre.py b/nipype/interfaces/minc/tests/test_auto_Volcentre.py index 7b43524fe4..492714adf4 100644 --- a/nipype/interfaces/minc/tests/test_auto_Volcentre.py +++ b/nipype/interfaces/minc/tests/test_auto_Volcentre.py @@ -4,42 +4,33 @@ def test_Volcentre_inputs(): input_map = dict( - args=dict(argstr='%s', ), - centre=dict(argstr='-centre %s %s %s', ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - com=dict(argstr='-com', ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + centre=dict(argstr="-centre %s %s %s",), + clobber=dict(argstr="-clobber", usedefault=True,), + com=dict(argstr="-com",), + environ=dict(nohash=True, usedefault=True,), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['input_file'], - name_template='%s_volcentre.mnc', + name_source=["input_file"], + name_template="%s_volcentre.mnc", position=-1, ), - verbose=dict(argstr='-verbose', ), - zero_dircos=dict(argstr='-zero_dircos', ), + verbose=dict(argstr="-verbose",), + zero_dircos=dict(argstr="-zero_dircos",), ) inputs = Volcentre.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Volcentre_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = Volcentre.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Voliso.py b/nipype/interfaces/minc/tests/test_auto_Voliso.py index 873a763468..534315d0cf 100644 --- a/nipype/interfaces/minc/tests/test_auto_Voliso.py +++ b/nipype/interfaces/minc/tests/test_auto_Voliso.py @@ -4,42 +4,33 @@ def test_Voliso_inputs(): input_map = dict( - args=dict(argstr='%s', ), - avgstep=dict(argstr='--avgstep', ), - clobber=dict( - argstr='--clobber', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - maxstep=dict(argstr='--maxstep %s', ), - minstep=dict(argstr='--minstep %s', ), + args=dict(argstr="%s",), + avgstep=dict(argstr="--avgstep",), + clobber=dict(argstr="--clobber", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + maxstep=dict(argstr="--maxstep %s",), + minstep=dict(argstr="--minstep %s",), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['input_file'], - name_template='%s_voliso.mnc', + name_source=["input_file"], + name_template="%s_voliso.mnc", position=-1, ), - verbose=dict(argstr='--verbose', ), + verbose=dict(argstr="--verbose",), ) inputs = Voliso.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Voliso_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = Voliso.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Volpad.py b/nipype/interfaces/minc/tests/test_auto_Volpad.py index 910ca5d827..ce67c4ef73 100644 --- a/nipype/interfaces/minc/tests/test_auto_Volpad.py +++ b/nipype/interfaces/minc/tests/test_auto_Volpad.py @@ -4,44 +4,35 @@ def test_Volpad_inputs(): input_map = dict( - args=dict(argstr='%s', ), - auto=dict(argstr='-auto', ), - auto_freq=dict(argstr='-auto_freq %s', ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - distance=dict(argstr='-distance %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + auto=dict(argstr="-auto",), + auto_freq=dict(argstr="-auto_freq %s",), + clobber=dict(argstr="-clobber", usedefault=True,), + distance=dict(argstr="-distance %s",), + environ=dict(nohash=True, usedefault=True,), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['input_file'], - name_template='%s_volpad.mnc', + name_source=["input_file"], + name_template="%s_volpad.mnc", position=-1, ), - smooth=dict(argstr='-smooth', ), - smooth_distance=dict(argstr='-smooth_distance %s', ), - verbose=dict(argstr='-verbose', ), + smooth=dict(argstr="-smooth",), + smooth_distance=dict(argstr="-smooth_distance %s",), + verbose=dict(argstr="-verbose",), ) inputs = Volpad.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Volpad_outputs(): - output_map = dict(output_file=dict(extensions=None, ), ) + output_map = dict(output_file=dict(extensions=None,),) outputs = Volpad.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_XfmAvg.py b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py index dc8a7028e8..f688494751 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmAvg.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py @@ -4,43 +4,28 @@ def test_XfmAvg_inputs(): input_map = dict( - args=dict(argstr='%s', ), - avg_linear=dict(argstr='-avg_linear', ), - avg_nonlinear=dict(argstr='-avg_nonlinear', ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ignore_linear=dict(argstr='-ignore_linear', ), - ignore_nonlinear=dict(argstr='-ignore_nonline', ), - input_files=dict( - argstr='%s', - mandatory=True, - position=-2, - sep=' ', - ), + args=dict(argstr="%s",), + avg_linear=dict(argstr="-avg_linear",), + avg_nonlinear=dict(argstr="-avg_nonlinear",), + clobber=dict(argstr="-clobber", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + ignore_linear=dict(argstr="-ignore_linear",), + ignore_nonlinear=dict(argstr="-ignore_nonline",), + input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" ",), input_grid_files=dict(), - output_file=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - verbose=dict(argstr='-verbose', ), + output_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + verbose=dict(argstr="-verbose",), ) inputs = XfmAvg.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_XfmAvg_outputs(): output_map = dict( - output_file=dict(extensions=None, ), - output_grid=dict(extensions=None, ), + output_file=dict(extensions=None,), output_grid=dict(extensions=None,), ) outputs = XfmAvg.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_XfmConcat.py b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py index a9dfa63cad..23642895da 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmConcat.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py @@ -4,43 +4,31 @@ def test_XfmConcat_inputs(): input_map = dict( - args=dict(argstr='%s', ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_files=dict( - argstr='%s', - mandatory=True, - position=-2, - sep=' ', - ), + args=dict(argstr="%s",), + clobber=dict(argstr="-clobber", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" ",), input_grid_files=dict(), output_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, hash_files=False, - name_source=['input_files'], - name_template='%s_xfmconcat.xfm', + name_source=["input_files"], + name_template="%s_xfmconcat.xfm", position=-1, ), - verbose=dict(argstr='-verbose', ), + verbose=dict(argstr="-verbose",), ) inputs = XfmConcat.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_XfmConcat_outputs(): - output_map = dict( - output_file=dict(extensions=None, ), - output_grids=dict(), - ) + output_map = dict(output_file=dict(extensions=None,), output_grids=dict(),) outputs = XfmConcat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py index 4dacaa4876..7f0c42c433 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py @@ -4,38 +4,23 @@ def test_XfmInvert_inputs(): input_map = dict( - args=dict(argstr='%s', ), - clobber=dict( - argstr='-clobber', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - output_file=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - verbose=dict(argstr='-verbose', ), + args=dict(argstr="%s",), + clobber=dict(argstr="-clobber", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + output_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + verbose=dict(argstr="-verbose",), ) inputs = XfmInvert.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_XfmInvert_outputs(): output_map = dict( - output_file=dict(extensions=None, ), - output_grid=dict(extensions=None, ), + output_file=dict(extensions=None,), output_grid=dict(extensions=None,), ) outputs = XfmInvert.output_spec() diff --git a/nipype/interfaces/mipav/__init__.py b/nipype/interfaces/mipav/__init__.py index 2d5fce8652..2bdbfef78b 100644 --- a/nipype/interfaces/mipav/__init__.py +++ b/nipype/interfaces/mipav/__init__.py @@ -1,10 +1,21 @@ # -*- coding: utf-8 -*- from .developer import ( - JistLaminarVolumetricLayering, JistBrainMgdmSegmentation, - JistLaminarProfileGeometry, JistLaminarProfileCalculator, MedicAlgorithmN3, - JistLaminarROIAveraging, MedicAlgorithmLesionToads, - JistBrainMp2rageSkullStripping, JistCortexSurfaceMeshInflation, RandomVol, - MedicAlgorithmImageCalculator, JistBrainMp2rageDuraEstimation, - JistLaminarProfileSampling, MedicAlgorithmMipavReorient, - MedicAlgorithmSPECTRE2010, JistBrainPartialVolumeFilter, - JistIntensityMp2rageMasking, MedicAlgorithmThresholdToBinaryMask) + JistLaminarVolumetricLayering, + JistBrainMgdmSegmentation, + JistLaminarProfileGeometry, + JistLaminarProfileCalculator, + MedicAlgorithmN3, + JistLaminarROIAveraging, + MedicAlgorithmLesionToads, + JistBrainMp2rageSkullStripping, + JistCortexSurfaceMeshInflation, + RandomVol, + MedicAlgorithmImageCalculator, + JistBrainMp2rageDuraEstimation, + JistLaminarProfileSampling, + MedicAlgorithmMipavReorient, + MedicAlgorithmSPECTRE2010, + JistBrainPartialVolumeFilter, + JistIntensityMp2rageMasking, + MedicAlgorithmThresholdToBinaryMask, +) diff --git a/nipype/interfaces/mipav/developer.py b/nipype/interfaces/mipav/developer.py index ffb9e10cc3..9bc24b1a80 100644 --- a/nipype/interfaces/mipav/developer.py +++ b/nipype/interfaces/mipav/developer.py @@ -5,45 +5,54 @@ import os -from ..base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ..base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class JistLaminarVolumetricLayeringInputSpec(CommandLineInputSpec): inInner = File( - desc="Inner Distance Image (GM/WM boundary)", - exists=True, - argstr="--inInner %s") + desc="Inner Distance Image (GM/WM boundary)", exists=True, argstr="--inInner %s" + ) inOuter = File( desc="Outer Distance Image (CSF/GM boundary)", exists=True, - argstr="--inOuter %s") + argstr="--inOuter %s", + ) inNumber = traits.Int(desc="Number of layers", argstr="--inNumber %d") inMax = traits.Int( - desc="Max iterations for narrow band evolution", argstr="--inMax %d") + desc="Max iterations for narrow band evolution", argstr="--inMax %d" + ) inMin = traits.Float( - desc="Min change ratio for narrow band evolution", argstr="--inMin %f") + desc="Min change ratio for narrow band evolution", argstr="--inMin %f" + ) inLayering = traits.Enum( "distance-preserving", "volume-preserving", desc="Layering method", - argstr="--inLayering %s") + argstr="--inLayering %s", + ) inLayering2 = traits.Enum( - "outward", - "inward", - desc="Layering direction", - argstr="--inLayering2 %s") + "outward", "inward", desc="Layering direction", argstr="--inLayering2 %s" + ) incurvature = traits.Int( - desc="curvature approximation scale (voxels)", - argstr="--incurvature %d") + desc="curvature approximation scale (voxels)", argstr="--incurvature %d" + ) inratio = traits.Float( - desc="ratio smoothing kernel size (voxels)", argstr="--inratio %f") + desc="ratio smoothing kernel size (voxels)", argstr="--inratio %f" + ) inpresmooth = traits.Enum( - "true", - "false", - desc="pre-smooth cortical surfaces", - argstr="--inpresmooth %s") + "true", "false", desc="pre-smooth cortical surfaces", argstr="--inpresmooth %s" + ) inTopology = traits.Enum( "26/6", "6/26", @@ -54,35 +63,40 @@ class JistLaminarVolumetricLayeringInputSpec(CommandLineInputSpec): "wco", "no", desc="Topology", - argstr="--inTopology %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + argstr="--inTopology %s", + ) + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outContinuous = traits.Either( traits.Bool, File(), hash_files=False, desc="Continuous depth measurement", - argstr="--outContinuous %s") + argstr="--outContinuous %s", + ) outDiscrete = traits.Either( traits.Bool, File(), hash_files=False, desc="Discrete sampled layers", - argstr="--outDiscrete %s") + argstr="--outDiscrete %s", + ) outLayer = traits.Either( traits.Bool, File(), hash_files=False, desc="Layer boundary surfaces", - argstr="--outLayer %s") + argstr="--outLayer %s", + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class JistLaminarVolumetricLayeringOutputSpec(TraitedSpec): @@ -109,30 +123,27 @@ class JistLaminarVolumetricLayering(SEMLikeCommandLine): output_spec = JistLaminarVolumetricLayeringOutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.laminar.JistLaminarVolumetricLayering " _outputs_filenames = { - 'outContinuous': 'outContinuous.nii', - 'outLayer': 'outLayer.nii', - 'outDiscrete': 'outDiscrete.nii' + "outContinuous": "outContinuous.nii", + "outLayer": "outLayer.nii", + "outDiscrete": "outDiscrete.nii", } _redirect_x = True class JistBrainMgdmSegmentationInputSpec(CommandLineInputSpec): - inMP2RAGE = File( - desc="MP2RAGE T1 Map Image", exists=True, argstr="--inMP2RAGE %s") + inMP2RAGE = File(desc="MP2RAGE T1 Map Image", exists=True, argstr="--inMP2RAGE %s") inMP2RAGE2 = File( - desc="MP2RAGE T1-weighted Image", - exists=True, - argstr="--inMP2RAGE2 %s") + desc="MP2RAGE T1-weighted Image", exists=True, argstr="--inMP2RAGE2 %s" + ) inPV = File(desc="PV / Dura Image", exists=True, argstr="--inPV %s") inMPRAGE = File( - desc="MPRAGE T1-weighted Image", exists=True, argstr="--inMPRAGE %s") + desc="MPRAGE T1-weighted Image", exists=True, argstr="--inMPRAGE %s" + ) inFLAIR = File(desc="FLAIR Image", exists=True, argstr="--inFLAIR %s") inAtlas = File(desc="Atlas file", exists=True, argstr="--inAtlas %s") inData = traits.Float(desc="Data weight", argstr="--inData %f") - inCurvature = traits.Float( - desc="Curvature weight", argstr="--inCurvature %f") - inPosterior = traits.Float( - desc="Posterior scale (mm)", argstr="--inPosterior %f") + inCurvature = traits.Float(desc="Curvature weight", argstr="--inCurvature %f") + inPosterior = traits.Float(desc="Posterior scale (mm)", argstr="--inPosterior %f") inMax = traits.Int(desc="Max iterations", argstr="--inMax %d") inMin = traits.Float(desc="Min change", argstr="--inMin %f") inSteps = traits.Int(desc="Steps", argstr="--inSteps %d") @@ -146,60 +157,62 @@ class JistBrainMgdmSegmentationInputSpec(CommandLineInputSpec): "wco", "no", desc="Topology", - argstr="--inTopology %s") + argstr="--inTopology %s", + ) inCompute = traits.Enum( - "true", "false", desc="Compute posteriors", argstr="--inCompute %s") + "true", "false", desc="Compute posteriors", argstr="--inCompute %s" + ) inAdjust = traits.Enum( - "true", - "false", - desc="Adjust intensity priors", - argstr="--inAdjust %s") + "true", "false", desc="Adjust intensity priors", argstr="--inAdjust %s" + ) inOutput = traits.Enum( - "segmentation", - "memberships", - desc="Output images", - argstr="--inOutput %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + "segmentation", "memberships", desc="Output images", argstr="--inOutput %s" + ) + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outSegmented = traits.Either( traits.Bool, File(), hash_files=False, desc="Segmented Brain Image", - argstr="--outSegmented %s") + argstr="--outSegmented %s", + ) outLevelset = traits.Either( traits.Bool, File(), hash_files=False, desc="Levelset Boundary Image", - argstr="--outLevelset %s") + argstr="--outLevelset %s", + ) outPosterior2 = traits.Either( traits.Bool, File(), hash_files=False, desc="Posterior Maximum Memberships (4D)", - argstr="--outPosterior2 %s") + argstr="--outPosterior2 %s", + ) outPosterior3 = traits.Either( traits.Bool, File(), hash_files=False, desc="Posterior Maximum Labels (4D)", - argstr="--outPosterior3 %s") + argstr="--outPosterior3 %s", + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class JistBrainMgdmSegmentationOutputSpec(TraitedSpec): outSegmented = File(desc="Segmented Brain Image", exists=True) outLevelset = File(desc="Levelset Boundary Image", exists=True) - outPosterior2 = File( - desc="Posterior Maximum Memberships (4D)", exists=True) + outPosterior2 = File(desc="Posterior Maximum Memberships (4D)", exists=True) outPosterior3 = File(desc="Posterior Maximum Labels (4D)", exists=True) @@ -218,17 +231,16 @@ class JistBrainMgdmSegmentation(SEMLikeCommandLine): output_spec = JistBrainMgdmSegmentationOutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.brain.JistBrainMgdmSegmentation " _outputs_filenames = { - 'outSegmented': 'outSegmented.nii', - 'outPosterior2': 'outPosterior2.nii', - 'outPosterior3': 'outPosterior3.nii', - 'outLevelset': 'outLevelset.nii' + "outSegmented": "outSegmented.nii", + "outPosterior2": "outPosterior2.nii", + "outPosterior3": "outPosterior3.nii", + "outLevelset": "outLevelset.nii", } _redirect_x = True class JistLaminarProfileGeometryInputSpec(CommandLineInputSpec): - inProfile = File( - desc="Profile Surface Image", exists=True, argstr="--inProfile %s") + inProfile = File(desc="Profile Surface Image", exists=True, argstr="--inProfile %s") incomputed = traits.Enum( "thickness", "curvedness", @@ -239,32 +251,27 @@ class JistLaminarProfileGeometryInputSpec(CommandLineInputSpec): "profile_curvature", "profile_torsion", desc="computed measure", - argstr="--incomputed %s") + argstr="--incomputed %s", + ) inregularization = traits.Enum( - "none", - "Gaussian", - desc="regularization", - argstr="--inregularization %s") - insmoothing = traits.Float( - desc="smoothing parameter", argstr="--insmoothing %f") - inoutside = traits.Float( - desc="outside extension (mm)", argstr="--inoutside %f") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + "none", "Gaussian", desc="regularization", argstr="--inregularization %s" + ) + insmoothing = traits.Float(desc="smoothing parameter", argstr="--insmoothing %f") + inoutside = traits.Float(desc="outside extension (mm)", argstr="--inoutside %f") + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outResult = traits.Either( - traits.Bool, - File(), - hash_files=False, - desc="Result", - argstr="--outResult %s") + traits.Bool, File(), hash_files=False, desc="Result", argstr="--outResult %s" + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class JistLaminarProfileGeometryOutputSpec(TraitedSpec): @@ -285,38 +292,37 @@ class JistLaminarProfileGeometry(SEMLikeCommandLine): input_spec = JistLaminarProfileGeometryInputSpec output_spec = JistLaminarProfileGeometryOutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.laminar.JistLaminarProfileGeometry " - _outputs_filenames = {'outResult': 'outResult.nii'} + _outputs_filenames = {"outResult": "outResult.nii"} _redirect_x = True class JistLaminarProfileCalculatorInputSpec(CommandLineInputSpec): inIntensity = File( - desc="Intensity Profile Image", exists=True, argstr="--inIntensity %s") - inMask = File( - desc="Mask Image (opt, 3D or 4D)", exists=True, argstr="--inMask %s") + desc="Intensity Profile Image", exists=True, argstr="--inIntensity %s" + ) + inMask = File(desc="Mask Image (opt, 3D or 4D)", exists=True, argstr="--inMask %s") incomputed = traits.Enum( "mean", "stdev", "skewness", "kurtosis", desc="computed statistic", - argstr="--incomputed %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + argstr="--incomputed %s", + ) + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outResult = traits.Either( - traits.Bool, - File(), - hash_files=False, - desc="Result", - argstr="--outResult %s") + traits.Bool, File(), hash_files=False, desc="Result", argstr="--outResult %s" + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class JistLaminarProfileCalculatorOutputSpec(TraitedSpec): @@ -337,64 +343,65 @@ class JistLaminarProfileCalculator(SEMLikeCommandLine): input_spec = JistLaminarProfileCalculatorInputSpec output_spec = JistLaminarProfileCalculatorOutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.laminar.JistLaminarProfileCalculator " - _outputs_filenames = {'outResult': 'outResult.nii'} + _outputs_filenames = {"outResult": "outResult.nii"} _redirect_x = True class MedicAlgorithmN3InputSpec(CommandLineInputSpec): inInput = File(desc="Input Volume", exists=True, argstr="--inInput %s") inSignal = traits.Float( - desc= - "Default = min + 1, Values at less than threshold are treated as part of the background", - argstr="--inSignal %f") - inMaximum = traits.Int( - desc="Maximum number of Iterations", argstr="--inMaximum %d") + desc="Default = min + 1, Values at less than threshold are treated as part of the background", + argstr="--inSignal %f", + ) + inMaximum = traits.Int(desc="Maximum number of Iterations", argstr="--inMaximum %d") inEnd = traits.Float( - desc= - "Usually 0.01-0.00001, The measure used to terminate the iterations is the coefficient of variation of change in field estimates between successive iterations.", - argstr="--inEnd %f") + desc="Usually 0.01-0.00001, The measure used to terminate the iterations is the coefficient of variation of change in field estimates between successive iterations.", + argstr="--inEnd %f", + ) inField = traits.Float( - desc= - "Characteristic distance over which the field varies. The distance between adjacent knots in bspline fitting with at least 4 knots going in every dimension. The default in the dialog is one third the distance (resolution * extents) of the smallest dimension.", - argstr="--inField %f") + desc="Characteristic distance over which the field varies. The distance between adjacent knots in bspline fitting with at least 4 knots going in every dimension. The default in the dialog is one third the distance (resolution * extents) of the smallest dimension.", + argstr="--inField %f", + ) inSubsample = traits.Float( - desc= - "Usually between 1-32, The factor by which the data is subsampled to a lower resolution in estimating the slowly varying non-uniformity field. Reduce sampling in the finest sampling direction by the shrink factor.", - argstr="--inSubsample %f") + desc="Usually between 1-32, The factor by which the data is subsampled to a lower resolution in estimating the slowly varying non-uniformity field. Reduce sampling in the finest sampling direction by the shrink factor.", + argstr="--inSubsample %f", + ) inKernel = traits.Float( - desc= - "Usually between 0.05-0.50, Width of deconvolution kernel used to sharpen the histogram. Larger values give faster convergence while smaller values give greater accuracy.", - argstr="--inKernel %f") - inWeiner = traits.Float( - desc="Usually between 0.0-1.0", argstr="--inWeiner %f") + desc="Usually between 0.05-0.50, Width of deconvolution kernel used to sharpen the histogram. Larger values give faster convergence while smaller values give greater accuracy.", + argstr="--inKernel %f", + ) + inWeiner = traits.Float(desc="Usually between 0.0-1.0", argstr="--inWeiner %f") inAutomatic = traits.Enum( "true", "false", - desc= - "If true determines the threshold by histogram analysis. If true a VOI cannot be used and the input threshold is ignored.", - argstr="--inAutomatic %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + desc="If true determines the threshold by histogram analysis. If true a VOI cannot be used and the input threshold is ignored.", + argstr="--inAutomatic %s", + ) + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outInhomogeneity = traits.Either( traits.Bool, File(), hash_files=False, desc="Inhomogeneity Corrected Volume", - argstr="--outInhomogeneity %s") + argstr="--outInhomogeneity %s", + ) outInhomogeneity2 = traits.Either( traits.Bool, File(), hash_files=False, desc="Inhomogeneity Field", - argstr="--outInhomogeneity2 %s") + argstr="--outInhomogeneity2 %s", + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class MedicAlgorithmN3OutputSpec(TraitedSpec): @@ -417,35 +424,33 @@ class MedicAlgorithmN3(SEMLikeCommandLine): output_spec = MedicAlgorithmN3OutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run edu.jhu.ece.iacl.plugins.classification.MedicAlgorithmN3 " _outputs_filenames = { - 'outInhomogeneity2': 'outInhomogeneity2.nii', - 'outInhomogeneity': 'outInhomogeneity.nii' + "outInhomogeneity2": "outInhomogeneity2.nii", + "outInhomogeneity": "outInhomogeneity.nii", } _redirect_x = True class JistLaminarROIAveragingInputSpec(CommandLineInputSpec): inIntensity = File( - desc="Intensity Profile Image", exists=True, argstr="--inIntensity %s") + desc="Intensity Profile Image", exists=True, argstr="--inIntensity %s" + ) inROI = File(desc="ROI Mask", exists=True, argstr="--inROI %s") inROI2 = traits.Str(desc="ROI Name", argstr="--inROI2 %s") - inMask = File( - desc="Mask Image (opt, 3D or 4D)", exists=True, argstr="--inMask %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + inMask = File(desc="Mask Image (opt, 3D or 4D)", exists=True, argstr="--inMask %s") + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outROI3 = traits.Either( - traits.Bool, - File(), - hash_files=False, - desc="ROI Average", - argstr="--outROI3 %s") + traits.Bool, File(), hash_files=False, desc="ROI Average", argstr="--outROI3 %s" + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class JistLaminarROIAveragingOutputSpec(TraitedSpec): @@ -466,152 +471,159 @@ class JistLaminarROIAveraging(SEMLikeCommandLine): input_spec = JistLaminarROIAveragingInputSpec output_spec = JistLaminarROIAveragingOutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.laminar.JistLaminarROIAveraging " - _outputs_filenames = {'outROI3': 'outROI3'} + _outputs_filenames = {"outROI3": "outROI3"} _redirect_x = True class MedicAlgorithmLesionToadsInputSpec(CommandLineInputSpec): - inT1_MPRAGE = File( - desc="T1_MPRAGE Image", exists=True, argstr="--inT1_MPRAGE %s") - inT1_SPGR = File( - desc="T1_SPGR Image", exists=True, argstr="--inT1_SPGR %s") + inT1_MPRAGE = File(desc="T1_MPRAGE Image", exists=True, argstr="--inT1_MPRAGE %s") + inT1_SPGR = File(desc="T1_SPGR Image", exists=True, argstr="--inT1_SPGR %s") inFLAIR = File(desc="FLAIR Image", exists=True, argstr="--inFLAIR %s") inAtlas = traits.Enum( - "With Lesion", "No Lesion", desc="Atlas to Use", argstr="--inAtlas %s") + "With Lesion", "No Lesion", desc="Atlas to Use", argstr="--inAtlas %s" + ) inOutput = traits.Enum( "hard segmentation", "hard segmentation+memberships", "cruise inputs", "dura removal inputs", desc="Output images", - argstr="--inOutput %s") + argstr="--inOutput %s", + ) inOutput2 = traits.Enum( "true", "false", - desc= - "Output the hard classification using maximum membership (not neceesarily topologically correct)", - argstr="--inOutput2 %s") + desc="Output the hard classification using maximum membership (not neceesarily topologically correct)", + argstr="--inOutput2 %s", + ) inCorrect = traits.Enum( - "true", - "false", - desc="Correct MR field inhomogeneity.", - argstr="--inCorrect %s") + "true", "false", desc="Correct MR field inhomogeneity.", argstr="--inCorrect %s" + ) inOutput3 = traits.Enum( "true", "false", desc="Output the estimated inhomogeneity field", - argstr="--inOutput3 %s") + argstr="--inOutput3 %s", + ) inAtlas2 = File( - desc="Atlas File - With Lesions", exists=True, argstr="--inAtlas2 %s") + desc="Atlas File - With Lesions", exists=True, argstr="--inAtlas2 %s" + ) inAtlas3 = File( desc="Atlas File - No Lesion - T1 and FLAIR", exists=True, - argstr="--inAtlas3 %s") + argstr="--inAtlas3 %s", + ) inAtlas4 = File( - desc="Atlas File - No Lesion - T1 Only", - exists=True, - argstr="--inAtlas4 %s") + desc="Atlas File - No Lesion - T1 Only", exists=True, argstr="--inAtlas4 %s" + ) inMaximum = traits.Int( - desc= - "Maximum distance from the interventricular WM boundary to downweight the lesion membership to avoid false postives", - argstr="--inMaximum %d") - inMaximum2 = traits.Int( - desc="Maximum Ventircle Distance", argstr="--inMaximum2 %d") + desc="Maximum distance from the interventricular WM boundary to downweight the lesion membership to avoid false postives", + argstr="--inMaximum %d", + ) + inMaximum2 = traits.Int(desc="Maximum Ventircle Distance", argstr="--inMaximum2 %d") inMaximum3 = traits.Int( - desc="Maximum InterVentricular Distance", argstr="--inMaximum3 %d") + desc="Maximum InterVentricular Distance", argstr="--inMaximum3 %d" + ) inInclude = traits.Enum( "true", "false", desc="Include lesion in WM class in hard classification", - argstr="--inInclude %s") + argstr="--inInclude %s", + ) inAtlas5 = traits.Float( desc="Controls the effect of the statistical atlas on the segmentation", - argstr="--inAtlas5 %f") + argstr="--inAtlas5 %f", + ) inSmooting = traits.Float( desc="Controls the effect of neighberhood voxels on the membership", - argstr="--inSmooting %f") + argstr="--inSmooting %f", + ) inMaximum4 = traits.Float( - desc= - "Maximum amount of relative change in the energy function considered as the convergence criteria", - argstr="--inMaximum4 %f") - inMaximum5 = traits.Int( - desc="Maximum iterations", argstr="--inMaximum5 %d") + desc="Maximum amount of relative change in the energy function considered as the convergence criteria", + argstr="--inMaximum4 %f", + ) + inMaximum5 = traits.Int(desc="Maximum iterations", argstr="--inMaximum5 %d") inAtlas6 = traits.Enum( - "rigid", - "multi_fully_affine", - desc="Atlas alignment", - argstr="--inAtlas6 %s") + "rigid", "multi_fully_affine", desc="Atlas alignment", argstr="--inAtlas6 %s" + ) inConnectivity = traits.Enum( "(26,6)", "(6,26)", "(6,18)", "(18,6)", desc="Connectivity (foreground,background)", - argstr="--inConnectivity %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + argstr="--inConnectivity %s", + ) + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outHard = traits.Either( traits.Bool, File(), hash_files=False, desc="Hard segmentation", - argstr="--outHard %s") + argstr="--outHard %s", + ) outHard2 = traits.Either( traits.Bool, File(), hash_files=False, desc="Hard segmentationfrom memberships", - argstr="--outHard2 %s") + argstr="--outHard2 %s", + ) outInhomogeneity = traits.Either( traits.Bool, File(), hash_files=False, desc="Inhomogeneity Field", - argstr="--outInhomogeneity %s") + argstr="--outInhomogeneity %s", + ) outMembership = traits.Either( traits.Bool, File(), hash_files=False, desc="Membership Functions", - argstr="--outMembership %s") + argstr="--outMembership %s", + ) outLesion = traits.Either( traits.Bool, File(), hash_files=False, desc="Lesion Segmentation", - argstr="--outLesion %s") + argstr="--outLesion %s", + ) outSulcal = traits.Either( traits.Bool, File(), hash_files=False, desc="Sulcal CSF Membership", - argstr="--outSulcal %s") + argstr="--outSulcal %s", + ) outCortical = traits.Either( traits.Bool, File(), hash_files=False, desc="Cortical GM Membership", - argstr="--outCortical %s") + argstr="--outCortical %s", + ) outFilled = traits.Either( traits.Bool, File(), hash_files=False, desc="Filled WM Membership", - argstr="--outFilled %s") + argstr="--outFilled %s", + ) outWM = traits.Either( - traits.Bool, - File(), - hash_files=False, - desc="WM Mask", - argstr="--outWM %s") + traits.Bool, File(), hash_files=False, desc="WM Mask", argstr="--outWM %s" + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class MedicAlgorithmLesionToadsOutputSpec(TraitedSpec): @@ -644,68 +656,68 @@ class MedicAlgorithmLesionToads(SEMLikeCommandLine): output_spec = MedicAlgorithmLesionToadsOutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run edu.jhu.ece.iacl.plugins.classification.MedicAlgorithmLesionToads " _outputs_filenames = { - 'outWM': 'outWM.nii', - 'outHard': 'outHard.nii', - 'outFilled': 'outFilled.nii', - 'outMembership': 'outMembership.nii', - 'outInhomogeneity': 'outInhomogeneity.nii', - 'outCortical': 'outCortical.nii', - 'outHard2': 'outHard2.nii', - 'outLesion': 'outLesion.nii', - 'outSulcal': 'outSulcal.nii' + "outWM": "outWM.nii", + "outHard": "outHard.nii", + "outFilled": "outFilled.nii", + "outMembership": "outMembership.nii", + "outInhomogeneity": "outInhomogeneity.nii", + "outCortical": "outCortical.nii", + "outHard2": "outHard2.nii", + "outLesion": "outLesion.nii", + "outSulcal": "outSulcal.nii", } _redirect_x = True class JistBrainMp2rageSkullStrippingInputSpec(CommandLineInputSpec): inSecond = File( - desc="Second inversion (Inv2) Image", - exists=True, - argstr="--inSecond %s") - inT1 = File( - desc="T1 Map (T1_Images) Image (opt)", exists=True, argstr="--inT1 %s") + desc="Second inversion (Inv2) Image", exists=True, argstr="--inSecond %s" + ) + inT1 = File(desc="T1 Map (T1_Images) Image (opt)", exists=True, argstr="--inT1 %s") inT1weighted = File( - desc="T1-weighted (UNI) Image (opt)", - exists=True, - argstr="--inT1weighted %s") - inFilter = File( - desc="Filter Image (opt)", exists=True, argstr="--inFilter %s") - inSkip = traits.Enum( - "true", "false", desc="Skip zero values", argstr="--inSkip %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + desc="T1-weighted (UNI) Image (opt)", exists=True, argstr="--inT1weighted %s" + ) + inFilter = File(desc="Filter Image (opt)", exists=True, argstr="--inFilter %s") + inSkip = traits.Enum("true", "false", desc="Skip zero values", argstr="--inSkip %s") + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outBrain = traits.Either( traits.Bool, File(), hash_files=False, desc="Brain Mask Image", - argstr="--outBrain %s") + argstr="--outBrain %s", + ) outMasked = traits.Either( traits.Bool, File(), hash_files=False, desc="Masked T1 Map Image", - argstr="--outMasked %s") + argstr="--outMasked %s", + ) outMasked2 = traits.Either( traits.Bool, File(), hash_files=False, desc="Masked T1-weighted Image", - argstr="--outMasked2 %s") + argstr="--outMasked2 %s", + ) outMasked3 = traits.Either( traits.Bool, File(), hash_files=False, desc="Masked Filter Image", - argstr="--outMasked3 %s") + argstr="--outMasked3 %s", + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class JistBrainMp2rageSkullStrippingOutputSpec(TraitedSpec): @@ -730,24 +742,23 @@ class JistBrainMp2rageSkullStripping(SEMLikeCommandLine): output_spec = JistBrainMp2rageSkullStrippingOutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.brain.JistBrainMp2rageSkullStripping " _outputs_filenames = { - 'outBrain': 'outBrain.nii', - 'outMasked3': 'outMasked3.nii', - 'outMasked2': 'outMasked2.nii', - 'outMasked': 'outMasked.nii' + "outBrain": "outBrain.nii", + "outMasked3": "outMasked3.nii", + "outMasked2": "outMasked2.nii", + "outMasked": "outMasked.nii", } _redirect_x = True class JistCortexSurfaceMeshInflationInputSpec(CommandLineInputSpec): - inLevelset = File( - desc="Levelset Image", exists=True, argstr="--inLevelset %s") + inLevelset = File(desc="Levelset Image", exists=True, argstr="--inLevelset %s") inSOR = traits.Float(desc="SOR Parameter", argstr="--inSOR %f") - inMean = traits.Float( - desc="Mean Curvature Threshold", argstr="--inMean %f") + inMean = traits.Float(desc="Mean Curvature Threshold", argstr="--inMean %f") inStep = traits.Int(desc="Step Size", argstr="--inStep %d") inMax = traits.Int(desc="Max Iterations", argstr="--inMax %d") inLorentzian = traits.Enum( - "true", "false", desc="Lorentzian Norm", argstr="--inLorentzian %s") + "true", "false", desc="Lorentzian Norm", argstr="--inLorentzian %s" + ) inTopology = traits.Enum( "26/6", "6/26", @@ -758,29 +769,33 @@ class JistCortexSurfaceMeshInflationInputSpec(CommandLineInputSpec): "wco", "no", desc="Topology", - argstr="--inTopology %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + argstr="--inTopology %s", + ) + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outOriginal = traits.Either( traits.Bool, File(), hash_files=False, desc="Original Surface", - argstr="--outOriginal %s") + argstr="--outOriginal %s", + ) outInflated = traits.Either( traits.Bool, File(), hash_files=False, desc="Inflated Surface", - argstr="--outInflated %s") + argstr="--outInflated %s", + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class JistCortexSurfaceMeshInflationOutputSpec(TraitedSpec): @@ -805,52 +820,40 @@ class JistCortexSurfaceMeshInflation(SEMLikeCommandLine): input_spec = JistCortexSurfaceMeshInflationInputSpec output_spec = JistCortexSurfaceMeshInflationOutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.cortex.JistCortexSurfaceMeshInflation " - _outputs_filenames = { - 'outOriginal': 'outOriginal', - 'outInflated': 'outInflated' - } + _outputs_filenames = {"outOriginal": "outOriginal", "outInflated": "outInflated"} _redirect_x = True class RandomVolInputSpec(CommandLineInputSpec): - inSize = traits.Int( - desc="Size of Volume in X direction", argstr="--inSize %d") - inSize2 = traits.Int( - desc="Size of Volume in Y direction", argstr="--inSize2 %d") - inSize3 = traits.Int( - desc="Size of Volume in Z direction", argstr="--inSize3 %d") - inSize4 = traits.Int( - desc="Size of Volume in t direction", argstr="--inSize4 %d") + inSize = traits.Int(desc="Size of Volume in X direction", argstr="--inSize %d") + inSize2 = traits.Int(desc="Size of Volume in Y direction", argstr="--inSize2 %d") + inSize3 = traits.Int(desc="Size of Volume in Z direction", argstr="--inSize3 %d") + inSize4 = traits.Int(desc="Size of Volume in t direction", argstr="--inSize4 %d") inStandard = traits.Int( - desc="Standard Deviation for Normal Distribution", - argstr="--inStandard %d") + desc="Standard Deviation for Normal Distribution", argstr="--inStandard %d" + ) inLambda = traits.Float( - desc="Lambda Value for Exponential Distribution", - argstr="--inLambda %f") + desc="Lambda Value for Exponential Distribution", argstr="--inLambda %f" + ) inMaximum = traits.Int(desc="Maximum Value", argstr="--inMaximum %d") inMinimum = traits.Int(desc="Minimum Value", argstr="--inMinimum %d") inField = traits.Enum( - "Uniform", - "Normal", - "Exponential", - desc="Field", - argstr="--inField %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + "Uniform", "Normal", "Exponential", desc="Field", argstr="--inField %s" + ) + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outRand1 = traits.Either( - traits.Bool, - File(), - hash_files=False, - desc="Rand1", - argstr="--outRand1 %s") + traits.Bool, File(), hash_files=False, desc="Rand1", argstr="--outRand1 %s" + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class RandomVolOutputSpec(TraitedSpec): @@ -873,7 +876,7 @@ class RandomVol(SEMLikeCommandLine): input_spec = RandomVolInputSpec output_spec = RandomVolOutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run edu.jhu.bme.smile.demo.RandomVol " - _outputs_filenames = {'outRand1': 'outRand1.nii'} + _outputs_filenames = {"outRand1": "outRand1.nii"} _redirect_x = True @@ -888,23 +891,26 @@ class MedicAlgorithmImageCalculatorInputSpec(CommandLineInputSpec): "Min", "Max", desc="Operation", - argstr="--inOperation %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + argstr="--inOperation %s", + ) + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outResult = traits.Either( traits.Bool, File(), hash_files=False, desc="Result Volume", - argstr="--outResult %s") + argstr="--outResult %s", + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class MedicAlgorithmImageCalculatorOutputSpec(TraitedSpec): @@ -927,44 +933,41 @@ class MedicAlgorithmImageCalculator(SEMLikeCommandLine): input_spec = MedicAlgorithmImageCalculatorInputSpec output_spec = MedicAlgorithmImageCalculatorOutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run edu.jhu.ece.iacl.plugins.utilities.math.MedicAlgorithmImageCalculator " - _outputs_filenames = {'outResult': 'outResult.nii'} + _outputs_filenames = {"outResult": "outResult.nii"} _redirect_x = True class JistBrainMp2rageDuraEstimationInputSpec(CommandLineInputSpec): inSecond = File( - desc="Second inversion (Inv2) Image", - exists=True, - argstr="--inSecond %s") - inSkull = File( - desc="Skull Stripping Mask", exists=True, argstr="--inSkull %s") + desc="Second inversion (Inv2) Image", exists=True, argstr="--inSecond %s" + ) + inSkull = File(desc="Skull Stripping Mask", exists=True, argstr="--inSkull %s") inDistance = traits.Float( - desc="Distance to background (mm)", argstr="--inDistance %f") + desc="Distance to background (mm)", argstr="--inDistance %f" + ) inoutput = traits.Enum( "dura_region", "boundary", "dura_prior", "bg_prior", "intens_prior", - desc= - "Outputs an estimate of the dura / CSF boundary or an estimate of the entire dura region.", - argstr="--inoutput %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + desc="Outputs an estimate of the dura / CSF boundary or an estimate of the entire dura region.", + argstr="--inoutput %s", + ) + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outDura = traits.Either( - traits.Bool, - File(), - hash_files=False, - desc="Dura Image", - argstr="--outDura %s") + traits.Bool, File(), hash_files=False, desc="Dura Image", argstr="--outDura %s" + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class JistBrainMp2rageDuraEstimationOutputSpec(TraitedSpec): @@ -985,39 +988,39 @@ class JistBrainMp2rageDuraEstimation(SEMLikeCommandLine): input_spec = JistBrainMp2rageDuraEstimationInputSpec output_spec = JistBrainMp2rageDuraEstimationOutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.brain.JistBrainMp2rageDuraEstimation " - _outputs_filenames = {'outDura': 'outDura.nii'} + _outputs_filenames = {"outDura": "outDura.nii"} _redirect_x = True class JistLaminarProfileSamplingInputSpec(CommandLineInputSpec): - inProfile = File( - desc="Profile Surface Image", exists=True, argstr="--inProfile %s") - inIntensity = File( - desc="Intensity Image", exists=True, argstr="--inIntensity %s") - inCortex = File( - desc="Cortex Mask (opt)", exists=True, argstr="--inCortex %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + inProfile = File(desc="Profile Surface Image", exists=True, argstr="--inProfile %s") + inIntensity = File(desc="Intensity Image", exists=True, argstr="--inIntensity %s") + inCortex = File(desc="Cortex Mask (opt)", exists=True, argstr="--inCortex %s") + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outProfilemapped = traits.Either( traits.Bool, File(), hash_files=False, desc="Profile-mapped Intensity Image", - argstr="--outProfilemapped %s") + argstr="--outProfilemapped %s", + ) outProfile2 = traits.Either( traits.Bool, File(), hash_files=False, desc="Profile 4D Mask", - argstr="--outProfile2 %s") + argstr="--outProfile2 %s", + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class JistLaminarProfileSamplingOutputSpec(TraitedSpec): @@ -1040,15 +1043,14 @@ class JistLaminarProfileSampling(SEMLikeCommandLine): output_spec = JistLaminarProfileSamplingOutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.laminar.JistLaminarProfileSampling " _outputs_filenames = { - 'outProfile2': 'outProfile2.nii', - 'outProfilemapped': 'outProfilemapped.nii' + "outProfile2": "outProfile2.nii", + "outProfilemapped": "outProfilemapped.nii", } _redirect_x = True class MedicAlgorithmMipavReorientInputSpec(CommandLineInputSpec): - inSource = InputMultiPath( - File, desc="Source", sep=";", argstr="--inSource %s") + inSource = InputMultiPath(File, desc="Source", sep=";", argstr="--inSource %s") inTemplate = File(desc="Template", exists=True, argstr="--inTemplate %s") inNew = traits.Enum( "Dicom axial", @@ -1056,7 +1058,8 @@ class MedicAlgorithmMipavReorientInputSpec(CommandLineInputSpec): "Dicom sagittal", "User defined", desc="New image orientation", - argstr="--inNew %s") + argstr="--inNew %s", + ) inUser = traits.Enum( "Unknown", "Patient Right to Left", @@ -1066,7 +1069,8 @@ class MedicAlgorithmMipavReorientInputSpec(CommandLineInputSpec): "Patient Inferior to Superior", "Patient Superior to Inferior", desc="User defined X-axis orientation (image left to right)", - argstr="--inUser %s") + argstr="--inUser %s", + ) inUser2 = traits.Enum( "Unknown", "Patient Right to Left", @@ -1076,7 +1080,8 @@ class MedicAlgorithmMipavReorientInputSpec(CommandLineInputSpec): "Patient Inferior to Superior", "Patient Superior to Inferior", desc="User defined Y-axis orientation (image top to bottom)", - argstr="--inUser2 %s") + argstr="--inUser2 %s", + ) inUser3 = traits.Enum( "Unknown", "Patient Right to Left", @@ -1086,14 +1091,16 @@ class MedicAlgorithmMipavReorientInputSpec(CommandLineInputSpec): "Patient Inferior to Superior", "Patient Superior to Inferior", desc="User defined Z-axis orientation (into the screen)", - argstr="--inUser3 %s") + argstr="--inUser3 %s", + ) inUser4 = traits.Enum( "Axial", "Coronal", "Sagittal", "Unknown", desc="User defined Image Orientation", - argstr="--inUser4 %s") + argstr="--inUser4 %s", + ) inInterpolation = traits.Enum( "Nearest Neighbor", "Trilinear", @@ -1104,26 +1111,30 @@ class MedicAlgorithmMipavReorientInputSpec(CommandLineInputSpec): "Heptic Lagrangian", "Windowed Sinc", desc="Interpolation", - argstr="--inInterpolation %s") + argstr="--inInterpolation %s", + ) inResolution = traits.Enum( "Unchanged", "Finest cubic", "Coarsest cubic", "Same as template", desc="Resolution", - argstr="--inResolution %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + argstr="--inResolution %s", + ) + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outReoriented = InputMultiPath( - File, desc="Reoriented Volume", sep=";", argstr="--outReoriented %s") + File, desc="Reoriented Volume", sep=";", argstr="--outReoriented %s" + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class MedicAlgorithmMipavReorientOutputSpec(TraitedSpec): @@ -1150,86 +1161,91 @@ class MedicAlgorithmMipavReorient(SEMLikeCommandLine): class MedicAlgorithmSPECTRE2010InputSpec(CommandLineInputSpec): inInput = File( - desc="Input volume to be skullstripped.", - exists=True, - argstr="--inInput %s") + desc="Input volume to be skullstripped.", exists=True, argstr="--inInput %s" + ) inAtlas = File( - desc= - "SPECTRE atlas description file. A text file enumerating atlas files and landmarks.", + desc="SPECTRE atlas description file. A text file enumerating atlas files and landmarks.", exists=True, - argstr="--inAtlas %s") + argstr="--inAtlas %s", + ) inInitial = traits.Int( - desc= - "Erosion of the inital mask, which is based on the probability mask and the classification., The initial mask is ouput as the d0 volume at the conclusion of SPECTRE.", - argstr="--inInitial %d") + desc="Erosion of the inital mask, which is based on the probability mask and the classification., The initial mask is ouput as the d0 volume at the conclusion of SPECTRE.", + argstr="--inInitial %d", + ) inImage = traits.Enum( "T1_SPGR", "T1_ALT", "T1_MPRAGE", "T2", "FLAIR", - desc= - "Set the image modality. MP-RAGE is recommended for most T1 sequence images.", - argstr="--inImage %s") + desc="Set the image modality. MP-RAGE is recommended for most T1 sequence images.", + argstr="--inImage %s", + ) inOutput = traits.Enum( "true", "false", - desc= - "Determines if the output results are transformed back into the space of the original input image.", - argstr="--inOutput %s") + desc="Determines if the output results are transformed back into the space of the original input image.", + argstr="--inOutput %s", + ) inFind = traits.Enum( - "true", "false", desc="Find Midsaggital Plane", argstr="--inFind %s") + "true", "false", desc="Find Midsaggital Plane", argstr="--inFind %s" + ) inRun = traits.Enum( - "true", "false", desc="Run Smooth Brain Mask", argstr="--inRun %s") + "true", "false", desc="Run Smooth Brain Mask", argstr="--inRun %s" + ) inResample = traits.Enum( "true", "false", - desc= - "Determines if the data is resampled to be isotropic during the processing.", - argstr="--inResample %s") + desc="Determines if the data is resampled to be isotropic during the processing.", + argstr="--inResample %s", + ) inInitial2 = traits.Float( - desc="Initial probability threshold", argstr="--inInitial2 %f") + desc="Initial probability threshold", argstr="--inInitial2 %f" + ) inMinimum = traits.Float( - desc="Minimum probability threshold", argstr="--inMinimum %f") + desc="Minimum probability threshold", argstr="--inMinimum %f" + ) inMMC = traits.Int( - desc= - "The size of the dilation step within the Modified Morphological Closing.", - argstr="--inMMC %d") + desc="The size of the dilation step within the Modified Morphological Closing.", + argstr="--inMMC %d", + ) inMMC2 = traits.Int( - desc= - "The size of the erosion step within the Modified Morphological Closing.", - argstr="--inMMC2 %d") + desc="The size of the erosion step within the Modified Morphological Closing.", + argstr="--inMMC2 %d", + ) inInhomogeneity = traits.Enum( "true", "false", - desc= - "Set to false by default, this parameter will make FANTASM try to do inhomogeneity correction during it's iterative cycle.", - argstr="--inInhomogeneity %s") + desc="Set to false by default, this parameter will make FANTASM try to do inhomogeneity correction during it's iterative cycle.", + argstr="--inInhomogeneity %s", + ) inSmoothing = traits.Float(argstr="--inSmoothing %f") inBackground = traits.Float(argstr="--inBackground %f") inOutput2 = traits.Enum( - "true", "false", desc="Output Plane?", argstr="--inOutput2 %s") + "true", "false", desc="Output Plane?", argstr="--inOutput2 %s" + ) inOutput3 = traits.Enum( - "true", "false", desc="Output Split-Halves?", argstr="--inOutput3 %s") + "true", "false", desc="Output Split-Halves?", argstr="--inOutput3 %s" + ) inOutput4 = traits.Enum( - "true", - "false", - desc="Output Segmentation on Plane?", - argstr="--inOutput4 %s") + "true", "false", desc="Output Segmentation on Plane?", argstr="--inOutput4 %s" + ) inDegrees = traits.Enum( "Rigid - 6", "Global rescale - 7", "Specific rescale - 9", "Affine - 12", desc="Degrees of freedom", - argstr="--inDegrees %s") + argstr="--inDegrees %s", + ) inCost = traits.Enum( "Correlation ratio", "Least squares", "Normalized cross correlation", "Normalized mutual information", desc="Cost function", - argstr="--inCost %s") + argstr="--inCost %s", + ) inRegistration = traits.Enum( "Trilinear", "Bspline 3rd order", @@ -1239,7 +1255,8 @@ class MedicAlgorithmSPECTRE2010InputSpec(CommandLineInputSpec): "Heptic Lagrangian", "Windowed sinc", desc="Registration interpolation", - argstr="--inRegistration %s") + argstr="--inRegistration %s", + ) inOutput5 = traits.Enum( "Trilinear", "Bspline 3rd order", @@ -1250,137 +1267,145 @@ class MedicAlgorithmSPECTRE2010InputSpec(CommandLineInputSpec): "Windowed sinc", "Nearest Neighbor", desc="Output interpolation", - argstr="--inOutput5 %s") + argstr="--inOutput5 %s", + ) inApply = traits.Enum( - "All", "X", "Y", "Z", desc="Apply rotation", argstr="--inApply %s") + "All", "X", "Y", "Z", desc="Apply rotation", argstr="--inApply %s" + ) inMinimum2 = traits.Float(desc="Minimum angle", argstr="--inMinimum2 %f") inMaximum = traits.Float(desc="Maximum angle", argstr="--inMaximum %f") - inCoarse = traits.Float( - desc="Coarse angle increment", argstr="--inCoarse %f") + inCoarse = traits.Float(desc="Coarse angle increment", argstr="--inCoarse %f") inFine = traits.Float(desc="Fine angle increment", argstr="--inFine %f") inMultiple = traits.Int( - desc="Multiple of tolerance to bracket the minimum", - argstr="--inMultiple %d") + desc="Multiple of tolerance to bracket the minimum", argstr="--inMultiple %d" + ) inNumber = traits.Int(desc="Number of iterations", argstr="--inNumber %d") inNumber2 = traits.Int( - desc="Number of minima from Level 8 to test at Level 4", - argstr="--inNumber2 %d") + desc="Number of minima from Level 8 to test at Level 4", argstr="--inNumber2 %d" + ) inUse = traits.Enum( "true", "false", - desc= - "Use the max of the min resolutions of the two datasets when resampling", - argstr="--inUse %s") + desc="Use the max of the min resolutions of the two datasets when resampling", + argstr="--inUse %s", + ) inSubsample = traits.Enum( - "true", - "false", - desc="Subsample image for speed", - argstr="--inSubsample %s") + "true", "false", desc="Subsample image for speed", argstr="--inSubsample %s" + ) inSkip = traits.Enum( "true", "false", desc="Skip multilevel search (Assume images are close to alignment)", - argstr="--inSkip %s") + argstr="--inSkip %s", + ) inMultithreading = traits.Enum( "true", "false", - desc= - "Set to false by default, this parameter controls the multithreaded behavior of the linear registration.", - argstr="--inMultithreading %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + desc="Set to false by default, this parameter controls the multithreaded behavior of the linear registration.", + argstr="--inMultithreading %s", + ) + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outOriginal = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "If Output in Original Space Flag is true then outputs the original input volume. Otherwise outputs the axialy reoriented input volume.", - argstr="--outOriginal %s") + desc="If Output in Original Space Flag is true then outputs the original input volume. Otherwise outputs the axialy reoriented input volume.", + argstr="--outOriginal %s", + ) outStripped = traits.Either( traits.Bool, File(), hash_files=False, desc="Skullstripped result of the input volume with just the brain.", - argstr="--outStripped %s") + argstr="--outStripped %s", + ) outMask = traits.Either( traits.Bool, File(), hash_files=False, desc="Binary Mask of the skullstripped result with just the brain", - argstr="--outMask %s") + argstr="--outMask %s", + ) outPrior = traits.Either( traits.Bool, File(), hash_files=False, desc="Probability prior from the atlas registrations", - argstr="--outPrior %s") + argstr="--outPrior %s", + ) outFANTASM = traits.Either( traits.Bool, File(), hash_files=False, desc="Tissue classification of of the whole input volume.", - argstr="--outFANTASM %s") + argstr="--outFANTASM %s", + ) outd0 = traits.Either( traits.Bool, File(), hash_files=False, desc="Initial Brainmask", - argstr="--outd0 %s") + argstr="--outd0 %s", + ) outMidsagittal = traits.Either( traits.Bool, File(), hash_files=False, desc="Plane dividing the brain hemispheres", - argstr="--outMidsagittal %s") + argstr="--outMidsagittal %s", + ) outSplitHalves = traits.Either( traits.Bool, File(), hash_files=False, desc="Skullstripped mask of the brain with the hemispheres divided.", - argstr="--outSplitHalves %s") + argstr="--outSplitHalves %s", + ) outSegmentation = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "2D image showing the tissue classification on the midsagittal plane", - argstr="--outSegmentation %s") + desc="2D image showing the tissue classification on the midsagittal plane", + argstr="--outSegmentation %s", + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class MedicAlgorithmSPECTRE2010OutputSpec(TraitedSpec): outOriginal = File( - desc= - "If Output in Original Space Flag is true then outputs the original input volume. Otherwise outputs the axialy reoriented input volume.", - exists=True) + desc="If Output in Original Space Flag is true then outputs the original input volume. Otherwise outputs the axialy reoriented input volume.", + exists=True, + ) outStripped = File( desc="Skullstripped result of the input volume with just the brain.", - exists=True) + exists=True, + ) outMask = File( - desc="Binary Mask of the skullstripped result with just the brain", - exists=True) - outPrior = File( - desc="Probability prior from the atlas registrations", exists=True) + desc="Binary Mask of the skullstripped result with just the brain", exists=True + ) + outPrior = File(desc="Probability prior from the atlas registrations", exists=True) outFANTASM = File( - desc="Tissue classification of of the whole input volume.", - exists=True) + desc="Tissue classification of of the whole input volume.", exists=True + ) outd0 = File(desc="Initial Brainmask", exists=True) - outMidsagittal = File( - desc="Plane dividing the brain hemispheres", exists=True) + outMidsagittal = File(desc="Plane dividing the brain hemispheres", exists=True) outSplitHalves = File( desc="Skullstripped mask of the brain with the hemispheres divided.", - exists=True) + exists=True, + ) outSegmentation = File( - desc= - "2D image showing the tissue classification on the midsagittal plane", - exists=True) + desc="2D image showing the tissue classification on the midsagittal plane", + exists=True, + ) class MedicAlgorithmSPECTRE2010(SEMLikeCommandLine): @@ -1409,15 +1434,15 @@ class MedicAlgorithmSPECTRE2010(SEMLikeCommandLine): output_spec = MedicAlgorithmSPECTRE2010OutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run edu.jhu.ece.iacl.plugins.segmentation.skull_strip.MedicAlgorithmSPECTRE2010 " _outputs_filenames = { - 'outd0': 'outd0.nii', - 'outOriginal': 'outOriginal.nii', - 'outMask': 'outMask.nii', - 'outSplitHalves': 'outSplitHalves.nii', - 'outMidsagittal': 'outMidsagittal.nii', - 'outPrior': 'outPrior.nii', - 'outFANTASM': 'outFANTASM.nii', - 'outSegmentation': 'outSegmentation.nii', - 'outStripped': 'outStripped.nii' + "outd0": "outd0.nii", + "outOriginal": "outOriginal.nii", + "outMask": "outMask.nii", + "outSplitHalves": "outSplitHalves.nii", + "outMidsagittal": "outMidsagittal.nii", + "outPrior": "outPrior.nii", + "outFANTASM": "outFANTASM.nii", + "outSegmentation": "outSegmentation.nii", + "outStripped": "outStripped.nii", } _redirect_x = True @@ -1428,27 +1453,30 @@ class JistBrainPartialVolumeFilterInputSpec(CommandLineInputSpec): "bright", "dark", "both", - desc= - "Outputs the raw intensity values or a probability score for the partial volume regions.", - argstr="--inPV %s") + desc="Outputs the raw intensity values or a probability score for the partial volume regions.", + argstr="--inPV %s", + ) inoutput = traits.Enum( - "probability", "intensity", desc="output", argstr="--inoutput %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + "probability", "intensity", desc="output", argstr="--inoutput %s" + ) + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outPartial = traits.Either( traits.Bool, File(), hash_files=False, desc="Partial Volume Image", - argstr="--outPartial %s") + argstr="--outPartial %s", + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class JistBrainPartialVolumeFilterOutputSpec(TraitedSpec): @@ -1469,71 +1497,74 @@ class JistBrainPartialVolumeFilter(SEMLikeCommandLine): input_spec = JistBrainPartialVolumeFilterInputSpec output_spec = JistBrainPartialVolumeFilterOutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.brain.JistBrainPartialVolumeFilter " - _outputs_filenames = {'outPartial': 'outPartial.nii'} + _outputs_filenames = {"outPartial": "outPartial.nii"} _redirect_x = True class JistIntensityMp2rageMaskingInputSpec(CommandLineInputSpec): inSecond = File( - desc="Second inversion (Inv2) Image", - exists=True, - argstr="--inSecond %s") + desc="Second inversion (Inv2) Image", exists=True, argstr="--inSecond %s" + ) inQuantitative = File( desc="Quantitative T1 Map (T1_Images) Image", exists=True, - argstr="--inQuantitative %s") + argstr="--inQuantitative %s", + ) inT1weighted = File( - desc="T1-weighted (UNI) Image", - exists=True, - argstr="--inT1weighted %s") + desc="T1-weighted (UNI) Image", exists=True, argstr="--inT1weighted %s" + ) inBackground = traits.Enum( "exponential", "half-normal", - desc= - "Model distribution for background noise (default is half-normal, exponential is more stringent).", - argstr="--inBackground %s") - inSkip = traits.Enum( - "true", "false", desc="Skip zero values", argstr="--inSkip %s") + desc="Model distribution for background noise (default is half-normal, exponential is more stringent).", + argstr="--inBackground %s", + ) + inSkip = traits.Enum("true", "false", desc="Skip zero values", argstr="--inSkip %s") inMasking = traits.Enum( "binary", "proba", - desc= - "Whether to use a binary threshold or a weighted average based on the probability.", - argstr="--inMasking %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + desc="Whether to use a binary threshold or a weighted average based on the probability.", + argstr="--inMasking %s", + ) + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outSignal = traits.Either( traits.Bool, File(), hash_files=False, desc="Signal Proba Image", - argstr="--outSignal_Proba %s") + argstr="--outSignal_Proba %s", + ) outSignal2 = traits.Either( traits.Bool, File(), hash_files=False, desc="Signal Mask Image", - argstr="--outSignal_Mask %s") + argstr="--outSignal_Mask %s", + ) outMasked = traits.Either( traits.Bool, File(), hash_files=False, desc="Masked T1 Map Image", - argstr="--outMasked_T1_Map %s") + argstr="--outMasked_T1_Map %s", + ) outMasked2 = traits.Either( traits.Bool, File(), hash_files=False, desc="Masked Iso Image", - argstr="--outMasked_T1weighted %s") + argstr="--outMasked_T1weighted %s", + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class JistIntensityMp2rageMaskingOutputSpec(TraitedSpec): @@ -1558,38 +1589,38 @@ class JistIntensityMp2rageMasking(SEMLikeCommandLine): output_spec = JistIntensityMp2rageMaskingOutputSpec _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.intensity.JistIntensityMp2rageMasking " _outputs_filenames = { - 'outSignal2': 'outSignal2.nii', - 'outSignal': 'outSignal.nii', - 'outMasked2': 'outMasked2.nii', - 'outMasked': 'outMasked.nii' + "outSignal2": "outSignal2.nii", + "outSignal": "outSignal.nii", + "outMasked2": "outMasked2.nii", + "outMasked": "outMasked.nii", } _redirect_x = True class MedicAlgorithmThresholdToBinaryMaskInputSpec(CommandLineInputSpec): - inLabel = InputMultiPath( - File, desc="Input volumes", sep=";", argstr="--inLabel %s") - inMinimum = traits.Float( - desc="Minimum threshold value.", argstr="--inMinimum %f") - inMaximum = traits.Float( - desc="Maximum threshold value.", argstr="--inMaximum %f") + inLabel = InputMultiPath(File, desc="Input volumes", sep=";", argstr="--inLabel %s") + inMinimum = traits.Float(desc="Minimum threshold value.", argstr="--inMinimum %f") + inMaximum = traits.Float(desc="Maximum threshold value.", argstr="--inMaximum %f") inUse = traits.Enum( "true", "false", desc="Use the images max intensity as the max value of the range.", - argstr="--inUse %s") - xPrefExt = traits.Enum( - "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + argstr="--inUse %s", + ) + xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") outBinary = InputMultiPath( - File, desc="Binary Mask", sep=";", argstr="--outBinary %s") + File, desc="Binary Mask", sep=";", argstr="--outBinary %s" + ) null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int( - desc="Set default maximum heap size", argstr="-xDefaultMem %d") + desc="Set default maximum heap size", argstr="-xDefaultMem %d" + ) xMaxProcess = traits.Int( 1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", - usedefault=True) + usedefault=True, + ) class MedicAlgorithmThresholdToBinaryMaskOutputSpec(TraitedSpec): diff --git a/nipype/interfaces/mipav/generate_classes.py b/nipype/interfaces/mipav/generate_classes.py index df48a2d2da..55f0f6a5db 100644 --- a/nipype/interfaces/mipav/generate_classes.py +++ b/nipype/interfaces/mipav/generate_classes.py @@ -7,49 +7,54 @@ # every tool in the modules list must be found on the default path # AND calling the module with --xml must be supported and compliant. modules_list = [ - 'edu.jhu.bme.smile.demo.RandomVol', - 'de.mpg.cbs.jist.laminar.JistLaminarProfileCalculator', - 'de.mpg.cbs.jist.laminar.JistLaminarProfileSampling', - 'de.mpg.cbs.jist.laminar.JistLaminarROIAveraging', - 'de.mpg.cbs.jist.laminar.JistLaminarVolumetricLayering', - 'de.mpg.cbs.jist.laminar.JistLaminarProfileGeometry', - 'de.mpg.cbs.jist.brain.JistBrainMgdmSegmentation', - 'de.mpg.cbs.jist.brain.JistBrainMp2rageSkullStripping', - 'de.mpg.cbs.jist.brain.JistBrainPartialVolumeFilter', - 'de.mpg.cbs.jist.brain.JistBrainMp2rageDuraEstimation' + "edu.jhu.bme.smile.demo.RandomVol", + "de.mpg.cbs.jist.laminar.JistLaminarProfileCalculator", + "de.mpg.cbs.jist.laminar.JistLaminarProfileSampling", + "de.mpg.cbs.jist.laminar.JistLaminarROIAveraging", + "de.mpg.cbs.jist.laminar.JistLaminarVolumetricLayering", + "de.mpg.cbs.jist.laminar.JistLaminarProfileGeometry", + "de.mpg.cbs.jist.brain.JistBrainMgdmSegmentation", + "de.mpg.cbs.jist.brain.JistBrainMp2rageSkullStripping", + "de.mpg.cbs.jist.brain.JistBrainPartialVolumeFilter", + "de.mpg.cbs.jist.brain.JistBrainMp2rageDuraEstimation", ] modules_from_chris = [ - 'edu.jhu.ece.iacl.plugins.segmentation.skull_strip.MedicAlgorithmSPECTRE2010', - 'edu.jhu.ece.iacl.plugins.utilities.volume.MedicAlgorithmMipavReorient', - 'edu.jhu.ece.iacl.plugins.utilities.math.MedicAlgorithmImageCalculator', - 'de.mpg.cbs.jist.brain.JistBrainMp2rageDuraEstimation', - 'de.mpg.cbs.jist.brain.JistBrainPartialVolumeFilter', - 'edu.jhu.ece.iacl.plugins.utilities.volume.MedicAlgorithmThresholdToBinaryMask', + "edu.jhu.ece.iacl.plugins.segmentation.skull_strip.MedicAlgorithmSPECTRE2010", + "edu.jhu.ece.iacl.plugins.utilities.volume.MedicAlgorithmMipavReorient", + "edu.jhu.ece.iacl.plugins.utilities.math.MedicAlgorithmImageCalculator", + "de.mpg.cbs.jist.brain.JistBrainMp2rageDuraEstimation", + "de.mpg.cbs.jist.brain.JistBrainPartialVolumeFilter", + "edu.jhu.ece.iacl.plugins.utilities.volume.MedicAlgorithmThresholdToBinaryMask", # 'de.mpg.cbs.jist.cortex.JistCortexFullCRUISE', # waiting for http://www.nitrc.org/tracker/index.php?func=detail&aid=7236&group_id=228&atid=942 to be fixed - 'de.mpg.cbs.jist.cortex.JistCortexSurfaceMeshInflation' + "de.mpg.cbs.jist.cortex.JistCortexSurfaceMeshInflation", ] modules_from_julia = [ - 'de.mpg.cbs.jist.intensity.JistIntensityMp2rageMasking', - 'edu.jhu.ece.iacl.plugins.segmentation.skull_strip.MedicAlgorithmSPECTRE2010' + "de.mpg.cbs.jist.intensity.JistIntensityMp2rageMasking", + "edu.jhu.ece.iacl.plugins.segmentation.skull_strip.MedicAlgorithmSPECTRE2010", ] modules_from_leonie = [ - 'edu.jhu.ece.iacl.plugins.classification.MedicAlgorithmLesionToads' + "edu.jhu.ece.iacl.plugins.classification.MedicAlgorithmLesionToads" ] modules_from_yasinyazici = [ - 'edu.jhu.ece.iacl.plugins.classification.MedicAlgorithmN3' + "edu.jhu.ece.iacl.plugins.classification.MedicAlgorithmN3" ] modules_list = list( - set(modules_list).union(modules_from_chris).union(modules_from_leonie) - .union(modules_from_julia).union(modules_from_yasinyazici).union( - modules_list)) + set(modules_list) + .union(modules_from_chris) + .union(modules_from_leonie) + .union(modules_from_julia) + .union(modules_from_yasinyazici) + .union(modules_list) + ) generate_all_classes( modules_list=modules_list, launcher=["java edu.jhu.ece.iacl.jist.cli.run"], redirect_x=True, - mipav_hacks=True) + mipav_hacks=True, + ) diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py index 70ff508dfb..d97c5d904b 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py @@ -4,80 +4,46 @@ def test_JistBrainMgdmSegmentation_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inAdjust=dict(argstr='--inAdjust %s', ), - inAtlas=dict( - argstr='--inAtlas %s', - extensions=None, - ), - inCompute=dict(argstr='--inCompute %s', ), - inCurvature=dict(argstr='--inCurvature %f', ), - inData=dict(argstr='--inData %f', ), - inFLAIR=dict( - argstr='--inFLAIR %s', - extensions=None, - ), - inMP2RAGE=dict( - argstr='--inMP2RAGE %s', - extensions=None, - ), - inMP2RAGE2=dict( - argstr='--inMP2RAGE2 %s', - extensions=None, - ), - inMPRAGE=dict( - argstr='--inMPRAGE %s', - extensions=None, - ), - inMax=dict(argstr='--inMax %d', ), - inMin=dict(argstr='--inMin %f', ), - inOutput=dict(argstr='--inOutput %s', ), - inPV=dict( - argstr='--inPV %s', - extensions=None, - ), - inPosterior=dict(argstr='--inPosterior %f', ), - inSteps=dict(argstr='--inSteps %d', ), - inTopology=dict(argstr='--inTopology %s', ), - null=dict(argstr='--null %s', ), - outLevelset=dict( - argstr='--outLevelset %s', - hash_files=False, - ), - outPosterior2=dict( - argstr='--outPosterior2 %s', - hash_files=False, - ), - outPosterior3=dict( - argstr='--outPosterior3 %s', - hash_files=False, - ), - outSegmented=dict( - argstr='--outSegmented %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inAdjust=dict(argstr="--inAdjust %s",), + inAtlas=dict(argstr="--inAtlas %s", extensions=None,), + inCompute=dict(argstr="--inCompute %s",), + inCurvature=dict(argstr="--inCurvature %f",), + inData=dict(argstr="--inData %f",), + inFLAIR=dict(argstr="--inFLAIR %s", extensions=None,), + inMP2RAGE=dict(argstr="--inMP2RAGE %s", extensions=None,), + inMP2RAGE2=dict(argstr="--inMP2RAGE2 %s", extensions=None,), + inMPRAGE=dict(argstr="--inMPRAGE %s", extensions=None,), + inMax=dict(argstr="--inMax %d",), + inMin=dict(argstr="--inMin %f",), + inOutput=dict(argstr="--inOutput %s",), + inPV=dict(argstr="--inPV %s", extensions=None,), + inPosterior=dict(argstr="--inPosterior %f",), + inSteps=dict(argstr="--inSteps %d",), + inTopology=dict(argstr="--inTopology %s",), + null=dict(argstr="--null %s",), + outLevelset=dict(argstr="--outLevelset %s", hash_files=False,), + outPosterior2=dict(argstr="--outPosterior2 %s", hash_files=False,), + outPosterior3=dict(argstr="--outPosterior3 %s", hash_files=False,), + outSegmented=dict(argstr="--outSegmented %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = JistBrainMgdmSegmentation.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_JistBrainMgdmSegmentation_outputs(): output_map = dict( - outLevelset=dict(extensions=None, ), - outPosterior2=dict(extensions=None, ), - outPosterior3=dict(extensions=None, ), - outSegmented=dict(extensions=None, ), + outLevelset=dict(extensions=None,), + outPosterior2=dict(extensions=None,), + outPosterior3=dict(extensions=None,), + outSegmented=dict(extensions=None,), ) outputs = JistBrainMgdmSegmentation.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py index 3fcbeaf418..fa55aa0d75 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py @@ -4,40 +4,27 @@ def test_JistBrainMp2rageDuraEstimation_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inDistance=dict(argstr='--inDistance %f', ), - inSecond=dict( - argstr='--inSecond %s', - extensions=None, - ), - inSkull=dict( - argstr='--inSkull %s', - extensions=None, - ), - inoutput=dict(argstr='--inoutput %s', ), - null=dict(argstr='--null %s', ), - outDura=dict( - argstr='--outDura %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inDistance=dict(argstr="--inDistance %f",), + inSecond=dict(argstr="--inSecond %s", extensions=None,), + inSkull=dict(argstr="--inSkull %s", extensions=None,), + inoutput=dict(argstr="--inoutput %s",), + null=dict(argstr="--null %s",), + outDura=dict(argstr="--outDura %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = JistBrainMp2rageDuraEstimation.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_JistBrainMp2rageDuraEstimation_outputs(): - output_map = dict(outDura=dict(extensions=None, ), ) + output_map = dict(outDura=dict(extensions=None,),) outputs = JistBrainMp2rageDuraEstimation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py index 378a5eb7bb..5f4a6eb616 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py @@ -4,63 +4,35 @@ def test_JistBrainMp2rageSkullStripping_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inFilter=dict( - argstr='--inFilter %s', - extensions=None, - ), - inSecond=dict( - argstr='--inSecond %s', - extensions=None, - ), - inSkip=dict(argstr='--inSkip %s', ), - inT1=dict( - argstr='--inT1 %s', - extensions=None, - ), - inT1weighted=dict( - argstr='--inT1weighted %s', - extensions=None, - ), - null=dict(argstr='--null %s', ), - outBrain=dict( - argstr='--outBrain %s', - hash_files=False, - ), - outMasked=dict( - argstr='--outMasked %s', - hash_files=False, - ), - outMasked2=dict( - argstr='--outMasked2 %s', - hash_files=False, - ), - outMasked3=dict( - argstr='--outMasked3 %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inFilter=dict(argstr="--inFilter %s", extensions=None,), + inSecond=dict(argstr="--inSecond %s", extensions=None,), + inSkip=dict(argstr="--inSkip %s",), + inT1=dict(argstr="--inT1 %s", extensions=None,), + inT1weighted=dict(argstr="--inT1weighted %s", extensions=None,), + null=dict(argstr="--null %s",), + outBrain=dict(argstr="--outBrain %s", hash_files=False,), + outMasked=dict(argstr="--outMasked %s", hash_files=False,), + outMasked2=dict(argstr="--outMasked2 %s", hash_files=False,), + outMasked3=dict(argstr="--outMasked3 %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = JistBrainMp2rageSkullStripping.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_JistBrainMp2rageSkullStripping_outputs(): output_map = dict( - outBrain=dict(extensions=None, ), - outMasked=dict(extensions=None, ), - outMasked2=dict(extensions=None, ), - outMasked3=dict(extensions=None, ), + outBrain=dict(extensions=None,), + outMasked=dict(extensions=None,), + outMasked2=dict(extensions=None,), + outMasked3=dict(extensions=None,), ) outputs = JistBrainMp2rageSkullStripping.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py index 229d58ec2b..016b18f1e7 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py @@ -4,36 +4,26 @@ def test_JistBrainPartialVolumeFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inInput=dict( - argstr='--inInput %s', - extensions=None, - ), - inPV=dict(argstr='--inPV %s', ), - inoutput=dict(argstr='--inoutput %s', ), - null=dict(argstr='--null %s', ), - outPartial=dict( - argstr='--outPartial %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inInput=dict(argstr="--inInput %s", extensions=None,), + inPV=dict(argstr="--inPV %s",), + inoutput=dict(argstr="--inoutput %s",), + null=dict(argstr="--null %s",), + outPartial=dict(argstr="--outPartial %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = JistBrainPartialVolumeFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_JistBrainPartialVolumeFilter_outputs(): - output_map = dict(outPartial=dict(extensions=None, ), ) + output_map = dict(outPartial=dict(extensions=None,),) outputs = JistBrainPartialVolumeFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py index 2790fa75b4..88ae68161c 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py @@ -4,46 +4,32 @@ def test_JistCortexSurfaceMeshInflation_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inLevelset=dict( - argstr='--inLevelset %s', - extensions=None, - ), - inLorentzian=dict(argstr='--inLorentzian %s', ), - inMax=dict(argstr='--inMax %d', ), - inMean=dict(argstr='--inMean %f', ), - inSOR=dict(argstr='--inSOR %f', ), - inStep=dict(argstr='--inStep %d', ), - inTopology=dict(argstr='--inTopology %s', ), - null=dict(argstr='--null %s', ), - outInflated=dict( - argstr='--outInflated %s', - hash_files=False, - ), - outOriginal=dict( - argstr='--outOriginal %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inLevelset=dict(argstr="--inLevelset %s", extensions=None,), + inLorentzian=dict(argstr="--inLorentzian %s",), + inMax=dict(argstr="--inMax %d",), + inMean=dict(argstr="--inMean %f",), + inSOR=dict(argstr="--inSOR %f",), + inStep=dict(argstr="--inStep %d",), + inTopology=dict(argstr="--inTopology %s",), + null=dict(argstr="--null %s",), + outInflated=dict(argstr="--outInflated %s", hash_files=False,), + outOriginal=dict(argstr="--outOriginal %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = JistCortexSurfaceMeshInflation.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_JistCortexSurfaceMeshInflation_outputs(): output_map = dict( - outInflated=dict(extensions=None, ), - outOriginal=dict(extensions=None, ), + outInflated=dict(extensions=None,), outOriginal=dict(extensions=None,), ) outputs = JistCortexSurfaceMeshInflation.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py index ae294ff2d1..aaab939380 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py @@ -4,61 +4,36 @@ def test_JistIntensityMp2rageMasking_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inBackground=dict(argstr='--inBackground %s', ), - inMasking=dict(argstr='--inMasking %s', ), - inQuantitative=dict( - argstr='--inQuantitative %s', - extensions=None, - ), - inSecond=dict( - argstr='--inSecond %s', - extensions=None, - ), - inSkip=dict(argstr='--inSkip %s', ), - inT1weighted=dict( - argstr='--inT1weighted %s', - extensions=None, - ), - null=dict(argstr='--null %s', ), - outMasked=dict( - argstr='--outMasked_T1_Map %s', - hash_files=False, - ), - outMasked2=dict( - argstr='--outMasked_T1weighted %s', - hash_files=False, - ), - outSignal=dict( - argstr='--outSignal_Proba %s', - hash_files=False, - ), - outSignal2=dict( - argstr='--outSignal_Mask %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inBackground=dict(argstr="--inBackground %s",), + inMasking=dict(argstr="--inMasking %s",), + inQuantitative=dict(argstr="--inQuantitative %s", extensions=None,), + inSecond=dict(argstr="--inSecond %s", extensions=None,), + inSkip=dict(argstr="--inSkip %s",), + inT1weighted=dict(argstr="--inT1weighted %s", extensions=None,), + null=dict(argstr="--null %s",), + outMasked=dict(argstr="--outMasked_T1_Map %s", hash_files=False,), + outMasked2=dict(argstr="--outMasked_T1weighted %s", hash_files=False,), + outSignal=dict(argstr="--outSignal_Proba %s", hash_files=False,), + outSignal2=dict(argstr="--outSignal_Mask %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = JistIntensityMp2rageMasking.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_JistIntensityMp2rageMasking_outputs(): output_map = dict( - outMasked=dict(extensions=None, ), - outMasked2=dict(extensions=None, ), - outSignal=dict(extensions=None, ), - outSignal2=dict(extensions=None, ), + outMasked=dict(extensions=None,), + outMasked2=dict(extensions=None,), + outSignal=dict(extensions=None,), + outSignal2=dict(extensions=None,), ) outputs = JistIntensityMp2rageMasking.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py index a9695e39ce..d8447b9773 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py @@ -4,39 +4,26 @@ def test_JistLaminarProfileCalculator_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inIntensity=dict( - argstr='--inIntensity %s', - extensions=None, - ), - inMask=dict( - argstr='--inMask %s', - extensions=None, - ), - incomputed=dict(argstr='--incomputed %s', ), - null=dict(argstr='--null %s', ), - outResult=dict( - argstr='--outResult %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inIntensity=dict(argstr="--inIntensity %s", extensions=None,), + inMask=dict(argstr="--inMask %s", extensions=None,), + incomputed=dict(argstr="--incomputed %s",), + null=dict(argstr="--null %s",), + outResult=dict(argstr="--outResult %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = JistLaminarProfileCalculator.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_JistLaminarProfileCalculator_outputs(): - output_map = dict(outResult=dict(extensions=None, ), ) + output_map = dict(outResult=dict(extensions=None,),) outputs = JistLaminarProfileCalculator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py index 7b0dc2c788..4a0c4f392a 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py @@ -4,38 +4,28 @@ def test_JistLaminarProfileGeometry_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inProfile=dict( - argstr='--inProfile %s', - extensions=None, - ), - incomputed=dict(argstr='--incomputed %s', ), - inoutside=dict(argstr='--inoutside %f', ), - inregularization=dict(argstr='--inregularization %s', ), - insmoothing=dict(argstr='--insmoothing %f', ), - null=dict(argstr='--null %s', ), - outResult=dict( - argstr='--outResult %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inProfile=dict(argstr="--inProfile %s", extensions=None,), + incomputed=dict(argstr="--incomputed %s",), + inoutside=dict(argstr="--inoutside %f",), + inregularization=dict(argstr="--inregularization %s",), + insmoothing=dict(argstr="--insmoothing %f",), + null=dict(argstr="--null %s",), + outResult=dict(argstr="--outResult %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = JistLaminarProfileGeometry.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_JistLaminarProfileGeometry_outputs(): - output_map = dict(outResult=dict(extensions=None, ), ) + output_map = dict(outResult=dict(extensions=None,),) outputs = JistLaminarProfileGeometry.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py index 7ef82a5920..3ad28793f0 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py @@ -4,48 +4,28 @@ def test_JistLaminarProfileSampling_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inCortex=dict( - argstr='--inCortex %s', - extensions=None, - ), - inIntensity=dict( - argstr='--inIntensity %s', - extensions=None, - ), - inProfile=dict( - argstr='--inProfile %s', - extensions=None, - ), - null=dict(argstr='--null %s', ), - outProfile2=dict( - argstr='--outProfile2 %s', - hash_files=False, - ), - outProfilemapped=dict( - argstr='--outProfilemapped %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inCortex=dict(argstr="--inCortex %s", extensions=None,), + inIntensity=dict(argstr="--inIntensity %s", extensions=None,), + inProfile=dict(argstr="--inProfile %s", extensions=None,), + null=dict(argstr="--null %s",), + outProfile2=dict(argstr="--outProfile2 %s", hash_files=False,), + outProfilemapped=dict(argstr="--outProfilemapped %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = JistLaminarProfileSampling.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_JistLaminarProfileSampling_outputs(): output_map = dict( - outProfile2=dict(extensions=None, ), - outProfilemapped=dict(extensions=None, ), + outProfile2=dict(extensions=None,), outProfilemapped=dict(extensions=None,), ) outputs = JistLaminarProfileSampling.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py index 4536d3a58c..7120db3045 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py @@ -4,43 +4,27 @@ def test_JistLaminarROIAveraging_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inIntensity=dict( - argstr='--inIntensity %s', - extensions=None, - ), - inMask=dict( - argstr='--inMask %s', - extensions=None, - ), - inROI=dict( - argstr='--inROI %s', - extensions=None, - ), - inROI2=dict(argstr='--inROI2 %s', ), - null=dict(argstr='--null %s', ), - outROI3=dict( - argstr='--outROI3 %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inIntensity=dict(argstr="--inIntensity %s", extensions=None,), + inMask=dict(argstr="--inMask %s", extensions=None,), + inROI=dict(argstr="--inROI %s", extensions=None,), + inROI2=dict(argstr="--inROI2 %s",), + null=dict(argstr="--null %s",), + outROI3=dict(argstr="--outROI3 %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = JistLaminarROIAveraging.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_JistLaminarROIAveraging_outputs(): - output_map = dict(outROI3=dict(extensions=None, ), ) + output_map = dict(outROI3=dict(extensions=None,),) outputs = JistLaminarROIAveraging.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py index 17d44c7cec..7b57e483ef 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py @@ -4,58 +4,39 @@ def test_JistLaminarVolumetricLayering_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inInner=dict( - argstr='--inInner %s', - extensions=None, - ), - inLayering=dict(argstr='--inLayering %s', ), - inLayering2=dict(argstr='--inLayering2 %s', ), - inMax=dict(argstr='--inMax %d', ), - inMin=dict(argstr='--inMin %f', ), - inNumber=dict(argstr='--inNumber %d', ), - inOuter=dict( - argstr='--inOuter %s', - extensions=None, - ), - inTopology=dict(argstr='--inTopology %s', ), - incurvature=dict(argstr='--incurvature %d', ), - inpresmooth=dict(argstr='--inpresmooth %s', ), - inratio=dict(argstr='--inratio %f', ), - null=dict(argstr='--null %s', ), - outContinuous=dict( - argstr='--outContinuous %s', - hash_files=False, - ), - outDiscrete=dict( - argstr='--outDiscrete %s', - hash_files=False, - ), - outLayer=dict( - argstr='--outLayer %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inInner=dict(argstr="--inInner %s", extensions=None,), + inLayering=dict(argstr="--inLayering %s",), + inLayering2=dict(argstr="--inLayering2 %s",), + inMax=dict(argstr="--inMax %d",), + inMin=dict(argstr="--inMin %f",), + inNumber=dict(argstr="--inNumber %d",), + inOuter=dict(argstr="--inOuter %s", extensions=None,), + inTopology=dict(argstr="--inTopology %s",), + incurvature=dict(argstr="--incurvature %d",), + inpresmooth=dict(argstr="--inpresmooth %s",), + inratio=dict(argstr="--inratio %f",), + null=dict(argstr="--null %s",), + outContinuous=dict(argstr="--outContinuous %s", hash_files=False,), + outDiscrete=dict(argstr="--outDiscrete %s", hash_files=False,), + outLayer=dict(argstr="--outLayer %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = JistLaminarVolumetricLayering.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_JistLaminarVolumetricLayering_outputs(): output_map = dict( - outContinuous=dict(extensions=None, ), - outDiscrete=dict(extensions=None, ), - outLayer=dict(extensions=None, ), + outContinuous=dict(extensions=None,), + outDiscrete=dict(extensions=None,), + outLayer=dict(extensions=None,), ) outputs = JistLaminarVolumetricLayering.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py index 353a521492..9d7ff807d6 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py @@ -4,39 +4,26 @@ def test_MedicAlgorithmImageCalculator_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inOperation=dict(argstr='--inOperation %s', ), - inVolume=dict( - argstr='--inVolume %s', - extensions=None, - ), - inVolume2=dict( - argstr='--inVolume2 %s', - extensions=None, - ), - null=dict(argstr='--null %s', ), - outResult=dict( - argstr='--outResult %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inOperation=dict(argstr="--inOperation %s",), + inVolume=dict(argstr="--inVolume %s", extensions=None,), + inVolume2=dict(argstr="--inVolume2 %s", extensions=None,), + null=dict(argstr="--null %s",), + outResult=dict(argstr="--outResult %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = MedicAlgorithmImageCalculator.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MedicAlgorithmImageCalculator_outputs(): - output_map = dict(outResult=dict(extensions=None, ), ) + output_map = dict(outResult=dict(extensions=None,),) outputs = MedicAlgorithmImageCalculator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py index 8024590f92..f9036d1207 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py @@ -4,110 +4,61 @@ def test_MedicAlgorithmLesionToads_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inAtlas=dict(argstr='--inAtlas %s', ), - inAtlas2=dict( - argstr='--inAtlas2 %s', - extensions=None, - ), - inAtlas3=dict( - argstr='--inAtlas3 %s', - extensions=None, - ), - inAtlas4=dict( - argstr='--inAtlas4 %s', - extensions=None, - ), - inAtlas5=dict(argstr='--inAtlas5 %f', ), - inAtlas6=dict(argstr='--inAtlas6 %s', ), - inConnectivity=dict(argstr='--inConnectivity %s', ), - inCorrect=dict(argstr='--inCorrect %s', ), - inFLAIR=dict( - argstr='--inFLAIR %s', - extensions=None, - ), - inInclude=dict(argstr='--inInclude %s', ), - inMaximum=dict(argstr='--inMaximum %d', ), - inMaximum2=dict(argstr='--inMaximum2 %d', ), - inMaximum3=dict(argstr='--inMaximum3 %d', ), - inMaximum4=dict(argstr='--inMaximum4 %f', ), - inMaximum5=dict(argstr='--inMaximum5 %d', ), - inOutput=dict(argstr='--inOutput %s', ), - inOutput2=dict(argstr='--inOutput2 %s', ), - inOutput3=dict(argstr='--inOutput3 %s', ), - inSmooting=dict(argstr='--inSmooting %f', ), - inT1_MPRAGE=dict( - argstr='--inT1_MPRAGE %s', - extensions=None, - ), - inT1_SPGR=dict( - argstr='--inT1_SPGR %s', - extensions=None, - ), - null=dict(argstr='--null %s', ), - outCortical=dict( - argstr='--outCortical %s', - hash_files=False, - ), - outFilled=dict( - argstr='--outFilled %s', - hash_files=False, - ), - outHard=dict( - argstr='--outHard %s', - hash_files=False, - ), - outHard2=dict( - argstr='--outHard2 %s', - hash_files=False, - ), - outInhomogeneity=dict( - argstr='--outInhomogeneity %s', - hash_files=False, - ), - outLesion=dict( - argstr='--outLesion %s', - hash_files=False, - ), - outMembership=dict( - argstr='--outMembership %s', - hash_files=False, - ), - outSulcal=dict( - argstr='--outSulcal %s', - hash_files=False, - ), - outWM=dict( - argstr='--outWM %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inAtlas=dict(argstr="--inAtlas %s",), + inAtlas2=dict(argstr="--inAtlas2 %s", extensions=None,), + inAtlas3=dict(argstr="--inAtlas3 %s", extensions=None,), + inAtlas4=dict(argstr="--inAtlas4 %s", extensions=None,), + inAtlas5=dict(argstr="--inAtlas5 %f",), + inAtlas6=dict(argstr="--inAtlas6 %s",), + inConnectivity=dict(argstr="--inConnectivity %s",), + inCorrect=dict(argstr="--inCorrect %s",), + inFLAIR=dict(argstr="--inFLAIR %s", extensions=None,), + inInclude=dict(argstr="--inInclude %s",), + inMaximum=dict(argstr="--inMaximum %d",), + inMaximum2=dict(argstr="--inMaximum2 %d",), + inMaximum3=dict(argstr="--inMaximum3 %d",), + inMaximum4=dict(argstr="--inMaximum4 %f",), + inMaximum5=dict(argstr="--inMaximum5 %d",), + inOutput=dict(argstr="--inOutput %s",), + inOutput2=dict(argstr="--inOutput2 %s",), + inOutput3=dict(argstr="--inOutput3 %s",), + inSmooting=dict(argstr="--inSmooting %f",), + inT1_MPRAGE=dict(argstr="--inT1_MPRAGE %s", extensions=None,), + inT1_SPGR=dict(argstr="--inT1_SPGR %s", extensions=None,), + null=dict(argstr="--null %s",), + outCortical=dict(argstr="--outCortical %s", hash_files=False,), + outFilled=dict(argstr="--outFilled %s", hash_files=False,), + outHard=dict(argstr="--outHard %s", hash_files=False,), + outHard2=dict(argstr="--outHard2 %s", hash_files=False,), + outInhomogeneity=dict(argstr="--outInhomogeneity %s", hash_files=False,), + outLesion=dict(argstr="--outLesion %s", hash_files=False,), + outMembership=dict(argstr="--outMembership %s", hash_files=False,), + outSulcal=dict(argstr="--outSulcal %s", hash_files=False,), + outWM=dict(argstr="--outWM %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = MedicAlgorithmLesionToads.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MedicAlgorithmLesionToads_outputs(): output_map = dict( - outCortical=dict(extensions=None, ), - outFilled=dict(extensions=None, ), - outHard=dict(extensions=None, ), - outHard2=dict(extensions=None, ), - outInhomogeneity=dict(extensions=None, ), - outLesion=dict(extensions=None, ), - outMembership=dict(extensions=None, ), - outSulcal=dict(extensions=None, ), - outWM=dict(extensions=None, ), + outCortical=dict(extensions=None,), + outFilled=dict(extensions=None,), + outHard=dict(extensions=None,), + outHard2=dict(extensions=None,), + outInhomogeneity=dict(extensions=None,), + outLesion=dict(extensions=None,), + outMembership=dict(extensions=None,), + outSulcal=dict(extensions=None,), + outWM=dict(extensions=None,), ) outputs = MedicAlgorithmLesionToads.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py index 14a9829a11..0c941fdbc7 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py @@ -4,43 +4,30 @@ def test_MedicAlgorithmMipavReorient_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inInterpolation=dict(argstr='--inInterpolation %s', ), - inNew=dict(argstr='--inNew %s', ), - inResolution=dict(argstr='--inResolution %s', ), - inSource=dict( - argstr='--inSource %s', - sep=';', - ), - inTemplate=dict( - argstr='--inTemplate %s', - extensions=None, - ), - inUser=dict(argstr='--inUser %s', ), - inUser2=dict(argstr='--inUser2 %s', ), - inUser3=dict(argstr='--inUser3 %s', ), - inUser4=dict(argstr='--inUser4 %s', ), - null=dict(argstr='--null %s', ), - outReoriented=dict( - argstr='--outReoriented %s', - sep=';', - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inInterpolation=dict(argstr="--inInterpolation %s",), + inNew=dict(argstr="--inNew %s",), + inResolution=dict(argstr="--inResolution %s",), + inSource=dict(argstr="--inSource %s", sep=";",), + inTemplate=dict(argstr="--inTemplate %s", extensions=None,), + inUser=dict(argstr="--inUser %s",), + inUser2=dict(argstr="--inUser2 %s",), + inUser3=dict(argstr="--inUser3 %s",), + inUser4=dict(argstr="--inUser4 %s",), + null=dict(argstr="--null %s",), + outReoriented=dict(argstr="--outReoriented %s", sep=";",), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = MedicAlgorithmMipavReorient.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MedicAlgorithmMipavReorient_outputs(): output_map = dict() outputs = MedicAlgorithmMipavReorient.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py index f1623bb19b..d9a8a25023 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py @@ -4,48 +4,35 @@ def test_MedicAlgorithmN3_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inAutomatic=dict(argstr='--inAutomatic %s', ), - inEnd=dict(argstr='--inEnd %f', ), - inField=dict(argstr='--inField %f', ), - inInput=dict( - argstr='--inInput %s', - extensions=None, - ), - inKernel=dict(argstr='--inKernel %f', ), - inMaximum=dict(argstr='--inMaximum %d', ), - inSignal=dict(argstr='--inSignal %f', ), - inSubsample=dict(argstr='--inSubsample %f', ), - inWeiner=dict(argstr='--inWeiner %f', ), - null=dict(argstr='--null %s', ), - outInhomogeneity=dict( - argstr='--outInhomogeneity %s', - hash_files=False, - ), - outInhomogeneity2=dict( - argstr='--outInhomogeneity2 %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inAutomatic=dict(argstr="--inAutomatic %s",), + inEnd=dict(argstr="--inEnd %f",), + inField=dict(argstr="--inField %f",), + inInput=dict(argstr="--inInput %s", extensions=None,), + inKernel=dict(argstr="--inKernel %f",), + inMaximum=dict(argstr="--inMaximum %d",), + inSignal=dict(argstr="--inSignal %f",), + inSubsample=dict(argstr="--inSubsample %f",), + inWeiner=dict(argstr="--inWeiner %f",), + null=dict(argstr="--null %s",), + outInhomogeneity=dict(argstr="--outInhomogeneity %s", hash_files=False,), + outInhomogeneity2=dict(argstr="--outInhomogeneity2 %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = MedicAlgorithmN3.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MedicAlgorithmN3_outputs(): output_map = dict( - outInhomogeneity=dict(extensions=None, ), - outInhomogeneity2=dict(extensions=None, ), + outInhomogeneity=dict(extensions=None,), + outInhomogeneity2=dict(extensions=None,), ) outputs = MedicAlgorithmN3.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py index e2de49a746..8e51a9c7bc 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py @@ -4,111 +4,74 @@ def test_MedicAlgorithmSPECTRE2010_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inApply=dict(argstr='--inApply %s', ), - inAtlas=dict( - argstr='--inAtlas %s', - extensions=None, - ), - inBackground=dict(argstr='--inBackground %f', ), - inCoarse=dict(argstr='--inCoarse %f', ), - inCost=dict(argstr='--inCost %s', ), - inDegrees=dict(argstr='--inDegrees %s', ), - inFind=dict(argstr='--inFind %s', ), - inFine=dict(argstr='--inFine %f', ), - inImage=dict(argstr='--inImage %s', ), - inInhomogeneity=dict(argstr='--inInhomogeneity %s', ), - inInitial=dict(argstr='--inInitial %d', ), - inInitial2=dict(argstr='--inInitial2 %f', ), - inInput=dict( - argstr='--inInput %s', - extensions=None, - ), - inMMC=dict(argstr='--inMMC %d', ), - inMMC2=dict(argstr='--inMMC2 %d', ), - inMaximum=dict(argstr='--inMaximum %f', ), - inMinimum=dict(argstr='--inMinimum %f', ), - inMinimum2=dict(argstr='--inMinimum2 %f', ), - inMultiple=dict(argstr='--inMultiple %d', ), - inMultithreading=dict(argstr='--inMultithreading %s', ), - inNumber=dict(argstr='--inNumber %d', ), - inNumber2=dict(argstr='--inNumber2 %d', ), - inOutput=dict(argstr='--inOutput %s', ), - inOutput2=dict(argstr='--inOutput2 %s', ), - inOutput3=dict(argstr='--inOutput3 %s', ), - inOutput4=dict(argstr='--inOutput4 %s', ), - inOutput5=dict(argstr='--inOutput5 %s', ), - inRegistration=dict(argstr='--inRegistration %s', ), - inResample=dict(argstr='--inResample %s', ), - inRun=dict(argstr='--inRun %s', ), - inSkip=dict(argstr='--inSkip %s', ), - inSmoothing=dict(argstr='--inSmoothing %f', ), - inSubsample=dict(argstr='--inSubsample %s', ), - inUse=dict(argstr='--inUse %s', ), - null=dict(argstr='--null %s', ), - outFANTASM=dict( - argstr='--outFANTASM %s', - hash_files=False, - ), - outMask=dict( - argstr='--outMask %s', - hash_files=False, - ), - outMidsagittal=dict( - argstr='--outMidsagittal %s', - hash_files=False, - ), - outOriginal=dict( - argstr='--outOriginal %s', - hash_files=False, - ), - outPrior=dict( - argstr='--outPrior %s', - hash_files=False, - ), - outSegmentation=dict( - argstr='--outSegmentation %s', - hash_files=False, - ), - outSplitHalves=dict( - argstr='--outSplitHalves %s', - hash_files=False, - ), - outStripped=dict( - argstr='--outStripped %s', - hash_files=False, - ), - outd0=dict( - argstr='--outd0 %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inApply=dict(argstr="--inApply %s",), + inAtlas=dict(argstr="--inAtlas %s", extensions=None,), + inBackground=dict(argstr="--inBackground %f",), + inCoarse=dict(argstr="--inCoarse %f",), + inCost=dict(argstr="--inCost %s",), + inDegrees=dict(argstr="--inDegrees %s",), + inFind=dict(argstr="--inFind %s",), + inFine=dict(argstr="--inFine %f",), + inImage=dict(argstr="--inImage %s",), + inInhomogeneity=dict(argstr="--inInhomogeneity %s",), + inInitial=dict(argstr="--inInitial %d",), + inInitial2=dict(argstr="--inInitial2 %f",), + inInput=dict(argstr="--inInput %s", extensions=None,), + inMMC=dict(argstr="--inMMC %d",), + inMMC2=dict(argstr="--inMMC2 %d",), + inMaximum=dict(argstr="--inMaximum %f",), + inMinimum=dict(argstr="--inMinimum %f",), + inMinimum2=dict(argstr="--inMinimum2 %f",), + inMultiple=dict(argstr="--inMultiple %d",), + inMultithreading=dict(argstr="--inMultithreading %s",), + inNumber=dict(argstr="--inNumber %d",), + inNumber2=dict(argstr="--inNumber2 %d",), + inOutput=dict(argstr="--inOutput %s",), + inOutput2=dict(argstr="--inOutput2 %s",), + inOutput3=dict(argstr="--inOutput3 %s",), + inOutput4=dict(argstr="--inOutput4 %s",), + inOutput5=dict(argstr="--inOutput5 %s",), + inRegistration=dict(argstr="--inRegistration %s",), + inResample=dict(argstr="--inResample %s",), + inRun=dict(argstr="--inRun %s",), + inSkip=dict(argstr="--inSkip %s",), + inSmoothing=dict(argstr="--inSmoothing %f",), + inSubsample=dict(argstr="--inSubsample %s",), + inUse=dict(argstr="--inUse %s",), + null=dict(argstr="--null %s",), + outFANTASM=dict(argstr="--outFANTASM %s", hash_files=False,), + outMask=dict(argstr="--outMask %s", hash_files=False,), + outMidsagittal=dict(argstr="--outMidsagittal %s", hash_files=False,), + outOriginal=dict(argstr="--outOriginal %s", hash_files=False,), + outPrior=dict(argstr="--outPrior %s", hash_files=False,), + outSegmentation=dict(argstr="--outSegmentation %s", hash_files=False,), + outSplitHalves=dict(argstr="--outSplitHalves %s", hash_files=False,), + outStripped=dict(argstr="--outStripped %s", hash_files=False,), + outd0=dict(argstr="--outd0 %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = MedicAlgorithmSPECTRE2010.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MedicAlgorithmSPECTRE2010_outputs(): output_map = dict( - outFANTASM=dict(extensions=None, ), - outMask=dict(extensions=None, ), - outMidsagittal=dict(extensions=None, ), - outOriginal=dict(extensions=None, ), - outPrior=dict(extensions=None, ), - outSegmentation=dict(extensions=None, ), - outSplitHalves=dict(extensions=None, ), - outStripped=dict(extensions=None, ), - outd0=dict(extensions=None, ), + outFANTASM=dict(extensions=None,), + outMask=dict(extensions=None,), + outMidsagittal=dict(extensions=None,), + outOriginal=dict(extensions=None,), + outPrior=dict(extensions=None,), + outSegmentation=dict(extensions=None,), + outSplitHalves=dict(extensions=None,), + outStripped=dict(extensions=None,), + outd0=dict(extensions=None,), ) outputs = MedicAlgorithmSPECTRE2010.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py index e4bfae7e0f..ca0179d231 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py @@ -4,35 +4,25 @@ def test_MedicAlgorithmThresholdToBinaryMask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inLabel=dict( - argstr='--inLabel %s', - sep=';', - ), - inMaximum=dict(argstr='--inMaximum %f', ), - inMinimum=dict(argstr='--inMinimum %f', ), - inUse=dict(argstr='--inUse %s', ), - null=dict(argstr='--null %s', ), - outBinary=dict( - argstr='--outBinary %s', - sep=';', - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inLabel=dict(argstr="--inLabel %s", sep=";",), + inMaximum=dict(argstr="--inMaximum %f",), + inMinimum=dict(argstr="--inMinimum %f",), + inUse=dict(argstr="--inUse %s",), + null=dict(argstr="--null %s",), + outBinary=dict(argstr="--outBinary %s", sep=";",), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = MedicAlgorithmThresholdToBinaryMask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MedicAlgorithmThresholdToBinaryMask_outputs(): output_map = dict() outputs = MedicAlgorithmThresholdToBinaryMask.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_RandomVol.py b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py index 8c9662c9be..1dd7520626 100644 --- a/nipype/interfaces/mipav/tests/test_auto_RandomVol.py +++ b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py @@ -4,39 +4,32 @@ def test_RandomVol_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inField=dict(argstr='--inField %s', ), - inLambda=dict(argstr='--inLambda %f', ), - inMaximum=dict(argstr='--inMaximum %d', ), - inMinimum=dict(argstr='--inMinimum %d', ), - inSize=dict(argstr='--inSize %d', ), - inSize2=dict(argstr='--inSize2 %d', ), - inSize3=dict(argstr='--inSize3 %d', ), - inSize4=dict(argstr='--inSize4 %d', ), - inStandard=dict(argstr='--inStandard %d', ), - null=dict(argstr='--null %s', ), - outRand1=dict( - argstr='--outRand1 %s', - hash_files=False, - ), - xDefaultMem=dict(argstr='-xDefaultMem %d', ), - xMaxProcess=dict( - argstr='-xMaxProcess %d', - usedefault=True, - ), - xPrefExt=dict(argstr='--xPrefExt %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inField=dict(argstr="--inField %s",), + inLambda=dict(argstr="--inLambda %f",), + inMaximum=dict(argstr="--inMaximum %d",), + inMinimum=dict(argstr="--inMinimum %d",), + inSize=dict(argstr="--inSize %d",), + inSize2=dict(argstr="--inSize2 %d",), + inSize3=dict(argstr="--inSize3 %d",), + inSize4=dict(argstr="--inSize4 %d",), + inStandard=dict(argstr="--inStandard %d",), + null=dict(argstr="--null %s",), + outRand1=dict(argstr="--outRand1 %s", hash_files=False,), + xDefaultMem=dict(argstr="-xDefaultMem %d",), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), + xPrefExt=dict(argstr="--xPrefExt %s",), ) inputs = RandomVol.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RandomVol_outputs(): - output_map = dict(outRand1=dict(extensions=None, ), ) + output_map = dict(outRand1=dict(extensions=None,),) outputs = RandomVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mixins/__init__.py b/nipype/interfaces/mixins/__init__.py index 587d3a22a8..a64dc34ff2 100644 --- a/nipype/interfaces/mixins/__init__.py +++ b/nipype/interfaces/mixins/__init__.py @@ -1,2 +1,5 @@ from .reporting import ( - ReportCapableInterface, ReportCapableInputSpec, ReportCapableOutputSpec) + ReportCapableInterface, + ReportCapableInputSpec, + ReportCapableOutputSpec, +) diff --git a/nipype/interfaces/mixins/reporting.py b/nipype/interfaces/mixins/reporting.py index fecdb337c5..182738ca64 100644 --- a/nipype/interfaces/mixins/reporting.py +++ b/nipype/interfaces/mixins/reporting.py @@ -7,23 +7,27 @@ from abc import abstractmethod from ... import logging -from ..base import ( - File, BaseInterface, BaseInterfaceInputSpec, TraitedSpec) +from ..base import File, BaseInterface, BaseInterfaceInputSpec, TraitedSpec -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") class ReportCapableInputSpec(BaseInterfaceInputSpec): - out_report = File('report', usedefault=True, hash_files=False, - desc='filename for the visual report') + out_report = File( + "report", + usedefault=True, + hash_files=False, + desc="filename for the visual report", + ) class ReportCapableOutputSpec(TraitedSpec): - out_report = File(desc='filename for the visual report') + out_report = File(desc="filename for the visual report") class ReportCapableInterface(BaseInterface): """Mixin to enable reporting for Nipype interfaces""" + _out_report = None def __init__(self, generate_report=False, **kwargs): @@ -39,8 +43,9 @@ def _post_run_hook(self, runtime): self._out_report = self.inputs.out_report if not os.path.isabs(self._out_report): - self._out_report = os.path.abspath(os.path.join(runtime.cwd, - self._out_report)) + self._out_report = os.path.abspath( + os.path.join(runtime.cwd, self._out_report) + ) self._generate_report() @@ -52,7 +57,7 @@ def _list_outputs(self): except NotImplementedError: outputs = {} if self._out_report is not None: - outputs['out_report'] = self._out_report + outputs["out_report"] = self._out_report return outputs @abstractmethod diff --git a/nipype/interfaces/mne/base.py b/nipype/interfaces/mne/base.py index 175a73e126..f410da794a 100644 --- a/nipype/interfaces/mne/base.py +++ b/nipype/interfaces/mne/base.py @@ -4,74 +4,86 @@ from ... import logging from ...utils.filemanip import simplify_list -from ..base import (traits, File, Directory, TraitedSpec, OutputMultiPath) +from ..base import traits, File, Directory, TraitedSpec, OutputMultiPath from ..freesurfer.base import FSCommand, FSTraitedSpec -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") class WatershedBEMInputSpec(FSTraitedSpec): subject_id = traits.Str( - argstr='--subject %s', + argstr="--subject %s", mandatory=True, - desc='Subject ID (must have a complete Freesurfer directory)') + desc="Subject ID (must have a complete Freesurfer directory)", + ) subjects_dir = Directory( exists=True, mandatory=True, usedefault=True, - desc='Path to Freesurfer subjects directory') + desc="Path to Freesurfer subjects directory", + ) volume = traits.Enum( - 'T1', - 'aparc+aseg', - 'aseg', - 'brain', - 'orig', - 'brainmask', - 'ribbon', - argstr='--volume %s', + "T1", + "aparc+aseg", + "aseg", + "brain", + "orig", + "brainmask", + "ribbon", + argstr="--volume %s", usedefault=True, - desc='The volume from the "mri" directory to use (defaults to T1)') + desc='The volume from the "mri" directory to use (defaults to T1)', + ) overwrite = traits.Bool( True, usedefault=True, - argstr='--overwrite', - desc='Overwrites the existing files') + argstr="--overwrite", + desc="Overwrites the existing files", + ) atlas_mode = traits.Bool( - argstr='--atlas', - desc='Use atlas mode for registration (default: no rigid alignment)') + argstr="--atlas", + desc="Use atlas mode for registration (default: no rigid alignment)", + ) class WatershedBEMOutputSpec(TraitedSpec): mesh_files = OutputMultiPath( File(exists=True), - desc=('Paths to the output meshes (brain, inner ' - 'skull, outer skull, outer skin)')) + desc=( + "Paths to the output meshes (brain, inner " + "skull, outer skull, outer skin)" + ), + ) brain_surface = File( - exists=True, - loc='bem/watershed', - desc='Brain surface (in Freesurfer format)') + exists=True, loc="bem/watershed", desc="Brain surface (in Freesurfer format)" + ) inner_skull_surface = File( exists=True, - loc='bem/watershed', - desc='Inner skull surface (in Freesurfer format)') + loc="bem/watershed", + desc="Inner skull surface (in Freesurfer format)", + ) outer_skull_surface = File( exists=True, - loc='bem/watershed', - desc='Outer skull surface (in Freesurfer format)') + loc="bem/watershed", + desc="Outer skull surface (in Freesurfer format)", + ) outer_skin_surface = File( exists=True, - loc='bem/watershed', - desc='Outer skin surface (in Freesurfer format)') + loc="bem/watershed", + desc="Outer skin surface (in Freesurfer format)", + ) fif_file = File( exists=True, - loc='bem', - altkey='fif', - desc='"fif" format file for EEG processing in MNE') + loc="bem", + altkey="fif", + desc='"fif" format file for EEG processing in MNE', + ) cor_files = OutputMultiPath( File(exists=True), - loc='bem/watershed/ws', - altkey='COR', - desc='"COR" format files') + loc="bem/watershed/ws", + altkey="COR", + desc='"COR" format files', + ) class WatershedBEM(FSCommand): @@ -90,18 +102,18 @@ class WatershedBEM(FSCommand): """ - _cmd = 'mne watershed_bem' + _cmd = "mne watershed_bem" input_spec = WatershedBEMInputSpec output_spec = WatershedBEMOutputSpec - _additional_metadata = ['loc', 'altkey'] + _additional_metadata = ["loc", "altkey"] def _get_files(self, path, key, dirval, altkey=None): - globsuffix = '*' - globprefix = '*' + globsuffix = "*" + globprefix = "*" keydir = op.join(path, dirval) if altkey: key = altkey - globpattern = op.join(keydir, ''.join((globprefix, key, globsuffix))) + globpattern = op.join(keydir, "".join((globprefix, key, globsuffix))) return glob.glob(globpattern) def _list_outputs(self): @@ -111,10 +123,13 @@ def _list_outputs(self): output_traits = self._outputs() mesh_paths = [] for k in list(outputs.keys()): - if k != 'mesh_files': - val = self._get_files(subject_path, k, - output_traits.traits()[k].loc, - output_traits.traits()[k].altkey) + if k != "mesh_files": + val = self._get_files( + subject_path, + k, + output_traits.traits()[k].loc, + output_traits.traits()[k].altkey, + ) if val: value_list = simplify_list(val) if isinstance(value_list, list): @@ -126,7 +141,7 @@ def _list_outputs(self): else: raise TypeError outputs[k] = out_files - if not k.rfind('surface') == -1: + if not k.rfind("surface") == -1: mesh_paths.append(out_files) - outputs['mesh_files'] = mesh_paths + outputs["mesh_files"] = mesh_paths return outputs diff --git a/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py index 2664a7d437..30e6a1de7e 100644 --- a/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py +++ b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py @@ -4,62 +4,30 @@ def test_WatershedBEM_inputs(): input_map = dict( - args=dict(argstr='%s', ), - atlas_mode=dict(argstr='--atlas', ), - environ=dict( - nohash=True, - usedefault=True, - ), - overwrite=dict( - argstr='--overwrite', - usedefault=True, - ), - subject_id=dict( - argstr='--subject %s', - mandatory=True, - ), - subjects_dir=dict( - mandatory=True, - usedefault=True, - ), - volume=dict( - argstr='--volume %s', - usedefault=True, - ), + args=dict(argstr="%s",), + atlas_mode=dict(argstr="--atlas",), + environ=dict(nohash=True, usedefault=True,), + overwrite=dict(argstr="--overwrite", usedefault=True,), + subject_id=dict(argstr="--subject %s", mandatory=True,), + subjects_dir=dict(mandatory=True, usedefault=True,), + volume=dict(argstr="--volume %s", usedefault=True,), ) inputs = WatershedBEM.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_WatershedBEM_outputs(): output_map = dict( - brain_surface=dict( - extensions=None, - loc='bem/watershed', - ), - cor_files=dict( - altkey='COR', - loc='bem/watershed/ws', - ), - fif_file=dict( - altkey='fif', - extensions=None, - loc='bem', - ), - inner_skull_surface=dict( - extensions=None, - loc='bem/watershed', - ), + brain_surface=dict(extensions=None, loc="bem/watershed",), + cor_files=dict(altkey="COR", loc="bem/watershed/ws",), + fif_file=dict(altkey="fif", extensions=None, loc="bem",), + inner_skull_surface=dict(extensions=None, loc="bem/watershed",), mesh_files=dict(), - outer_skin_surface=dict( - extensions=None, - loc='bem/watershed', - ), - outer_skull_surface=dict( - extensions=None, - loc='bem/watershed', - ), + outer_skin_surface=dict(extensions=None, loc="bem/watershed",), + outer_skull_surface=dict(extensions=None, loc="bem/watershed",), ) outputs = WatershedBEM.output_spec() diff --git a/nipype/interfaces/mrtrix/__init__.py b/nipype/interfaces/mrtrix/__init__.py index ea066d4cd8..917d576eda 100644 --- a/nipype/interfaces/mrtrix/__init__.py +++ b/nipype/interfaces/mrtrix/__init__.py @@ -1,15 +1,35 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from .tracking import (Tracks2Prob, FilterTracks, StreamlineTrack, - DiffusionTensorStreamlineTrack, - SphericallyDeconvolutedStreamlineTrack, - ProbabilisticSphericallyDeconvolutedStreamlineTrack) -from .tensors import (FSL2MRTrix, ConstrainedSphericalDeconvolution, - DWI2SphericalHarmonicsImage, EstimateResponseForSH, - GenerateDirections, FindShPeaks, Directions2Amplitude) -from .preprocess import (MRConvert, MRMultiply, MRTrixViewer, MRTrixInfo, - GenerateWhiteMatterMask, DWI2Tensor, - Tensor2ApparentDiffusion, Tensor2FractionalAnisotropy, - Tensor2Vector, MedianFilter3D, Erode, Threshold) +from .tracking import ( + Tracks2Prob, + FilterTracks, + StreamlineTrack, + DiffusionTensorStreamlineTrack, + SphericallyDeconvolutedStreamlineTrack, + ProbabilisticSphericallyDeconvolutedStreamlineTrack, +) +from .tensors import ( + FSL2MRTrix, + ConstrainedSphericalDeconvolution, + DWI2SphericalHarmonicsImage, + EstimateResponseForSH, + GenerateDirections, + FindShPeaks, + Directions2Amplitude, +) +from .preprocess import ( + MRConvert, + MRMultiply, + MRTrixViewer, + MRTrixInfo, + GenerateWhiteMatterMask, + DWI2Tensor, + Tensor2ApparentDiffusion, + Tensor2FractionalAnisotropy, + Tensor2Vector, + MedianFilter3D, + Erode, + Threshold, +) from .convert import MRTrix2TrackVis diff --git a/nipype/interfaces/mrtrix/convert.py b/nipype/interfaces/mrtrix/convert.py index 53dbbffcb1..b2314271c4 100644 --- a/nipype/interfaces/mrtrix/convert.py +++ b/nipype/interfaces/mrtrix/convert.py @@ -14,12 +14,13 @@ from ..base import TraitedSpec, File, isdefined from ..dipy.base import DipyBaseInterface, HAVE_DIPY as have_dipy -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") def get_vox_dims(volume): import nibabel as nb from nipype.utils import NUMPY_MMAP + if isinstance(volume, list): volume = volume[0] nii = nb.load(volume, mmap=NUMPY_MMAP) @@ -31,6 +32,7 @@ def get_vox_dims(volume): def get_data_dims(volume): import nibabel as nb from nipype.utils import NUMPY_MMAP + if isinstance(volume, list): volume = volume[0] nii = nb.load(volume, mmap=NUMPY_MMAP) @@ -41,10 +43,10 @@ def get_data_dims(volume): def transform_to_affine(streams, header, affine): from dipy.tracking.utils import move_streamlines + rotation, scale = np.linalg.qr(affine) streams = move_streamlines(streams, rotation) - scale[0:3, 0:3] = np.dot(scale[0:3, 0:3], - np.diag(1. / header['voxel_size'])) + scale[0:3, 0:3] = np.dot(scale[0:3, 0:3], np.diag(1.0 / header["voxel_size"])) scale[0:3, 3] = abs(scale[0:3, 3]) streams = move_streamlines(streams, scale) return streams @@ -57,58 +59,57 @@ def read_mrtrix_tracks(in_file, as_generator=True): def read_mrtrix_header(in_file): - fileobj = open(in_file, 'rb') + fileobj = open(in_file, "rb") header = {} - iflogger.info('Reading header data...') + iflogger.info("Reading header data...") for line in fileobj: line = line.decode() - if line == 'END\n': - iflogger.info('Reached the end of the header!') + if line == "END\n": + iflogger.info("Reached the end of the header!") break - elif ': ' in line: - line = line.replace('\n', '') + elif ": " in line: + line = line.replace("\n", "") line = line.replace("'", "") - key = line.split(': ')[0] - value = line.split(': ')[1] + key = line.split(": ")[0] + value = line.split(": ")[1] header[key] = value iflogger.info('...adding "%s" to header for key "%s"', value, key) fileobj.close() - header['count'] = int(header['count'].replace('\n', '')) - header['offset'] = int(header['file'].replace('.', '')) + header["count"] = int(header["count"].replace("\n", "")) + header["offset"] = int(header["file"].replace(".", "")) return header def read_mrtrix_streamlines(in_file, header, as_generator=True): - offset = header['offset'] - stream_count = header['count'] - fileobj = open(in_file, 'rb') + offset = header["offset"] + stream_count = header["count"] + fileobj = open(in_file, "rb") fileobj.seek(offset) endianness = native_code - f4dt = np.dtype(endianness + 'f4') + f4dt = np.dtype(endianness + "f4") pt_cols = 3 bytesize = pt_cols * 4 def points_per_track(offset): track_points = [] - iflogger.info('Identifying the number of points per tract...') + iflogger.info("Identifying the number of points per tract...") all_str = fileobj.read() num_triplets = int(len(all_str) / bytesize) - pts = np.ndarray( - shape=(num_triplets, pt_cols), dtype='f4', buffer=all_str) + pts = np.ndarray(shape=(num_triplets, pt_cols), dtype="f4", buffer=all_str) nonfinite_list = np.where(np.invert(np.isfinite(pts[:, 2]))) nonfinite_list = list(nonfinite_list[0])[ - 0:-1] # Converts numpy array to list, removes the last value + 0:-1 + ] # Converts numpy array to list, removes the last value for idx, value in enumerate(nonfinite_list): if idx == 0: track_points.append(nonfinite_list[idx]) else: - track_points.append( - nonfinite_list[idx] - nonfinite_list[idx - 1] - 1) + track_points.append(nonfinite_list[idx] - nonfinite_list[idx - 1] - 1) return track_points, nonfinite_list def track_gen(track_points): n_streams = 0 - iflogger.info('Reading tracks...') + iflogger.info("Reading tracks...") while True: try: n_pts = track_points[n_streams] @@ -118,13 +119,14 @@ def track_gen(track_points): nan_str = fileobj.read(bytesize) if len(pts_str) < (n_pts * bytesize): if not n_streams == stream_count: - raise HeaderError('Expecting %s points, found only %s' % - (stream_count, n_streams)) - iflogger.error('Expecting %s points, found only %s', - stream_count, n_streams) + raise HeaderError( + "Expecting %s points, found only %s" % (stream_count, n_streams) + ) + iflogger.error( + "Expecting %s points, found only %s", stream_count, n_streams + ) break - pts = np.ndarray( - shape=(n_pts, pt_cols), dtype=f4dt, buffer=pts_str) + pts = np.ndarray(shape=(n_pts, pt_cols), dtype=f4dt, buffer=pts_str) nan_pt = np.ndarray(shape=(1, pt_cols), dtype=f4dt, buffer=nan_str) if np.isfinite(nan_pt[0][0]): raise ValueError @@ -133,15 +135,14 @@ def track_gen(track_points): yield xyz n_streams += 1 if n_streams == stream_count: - iflogger.info('100%% : %i tracks read', n_streams) + iflogger.info("100%% : %i tracks read", n_streams) raise StopIteration try: if n_streams % int(stream_count / 100) == 0: percent = int(float(n_streams) / float(stream_count) * 100) - iflogger.info('%i%% : %i tracks read', percent, n_streams) + iflogger.info("%i%% : %i tracks read", percent, n_streams) except ZeroDivisionError: - iflogger.info('%i stream read out of %i', n_streams, - stream_count) + iflogger.info("%i stream read out of %i", n_streams, stream_count) track_points, nonfinite_list = points_per_track(offset) fileobj.seek(offset) @@ -155,22 +156,22 @@ class MRTrix2TrackVisInputSpec(TraitedSpec): in_file = File( exists=True, mandatory=True, - desc='The input file for the tracks in MRTrix (.tck) format') - image_file = File( - exists=True, desc='The image the tracks were generated from') + desc="The input file for the tracks in MRTrix (.tck) format", + ) + image_file = File(exists=True, desc="The image the tracks were generated from") matrix_file = File( exists=True, - desc= - 'A transformation matrix to apply to the tracts after they have been generated (from FLIRT - affine transformation from image_file to registration_image_file)' + desc="A transformation matrix to apply to the tracts after they have been generated (from FLIRT - affine transformation from image_file to registration_image_file)", ) registration_image_file = File( - exists=True, - desc='The final image the tracks should be registered to.') + exists=True, desc="The final image the tracks should be registered to." + ) out_filename = File( - 'converted.trk', + "converted.trk", genfile=True, usedefault=True, - desc='The output filename for the tracks in TrackVis (.trk) format') + desc="The output filename for the tracks in TrackVis (.trk) format", + ) class MRTrix2TrackVisOutputSpec(TraitedSpec): @@ -189,12 +190,13 @@ class MRTrix2TrackVis(DipyBaseInterface): >>> tck2trk.inputs.image_file = 'diffusion.nii' >>> tck2trk.run() # doctest: +SKIP """ + input_spec = MRTrix2TrackVisInputSpec output_spec = MRTrix2TrackVisOutputSpec def _run_interface(self, runtime): - from dipy.tracking.utils import move_streamlines, \ - affine_from_fsl_mat_file + from dipy.tracking.utils import move_streamlines, affine_from_fsl_mat_file + dx, dy, dz = get_data_dims(self.inputs.image_file) vx, vy, vz = get_vox_dims(self.inputs.image_file) image_file = nb.load(self.inputs.image_file) @@ -202,81 +204,82 @@ def _run_interface(self, runtime): out_filename = op.abspath(self.inputs.out_filename) # Reads MRTrix tracks - header, streamlines = read_mrtrix_tracks( - self.inputs.in_file, as_generator=True) - iflogger.info('MRTrix Header:') + header, streamlines = read_mrtrix_tracks(self.inputs.in_file, as_generator=True) + iflogger.info("MRTrix Header:") iflogger.info(header) # Writes to Trackvis trk_header = nb.trackvis.empty_header() - trk_header['dim'] = [dx, dy, dz] - trk_header['voxel_size'] = [vx, vy, vz] - trk_header['n_count'] = header['count'] + trk_header["dim"] = [dx, dy, dz] + trk_header["voxel_size"] = [vx, vy, vz] + trk_header["n_count"] = header["count"] if isdefined(self.inputs.matrix_file) and isdefined( - self.inputs.registration_image_file): - iflogger.info('Applying transformation from matrix file %s', - self.inputs.matrix_file) + self.inputs.registration_image_file + ): + iflogger.info( + "Applying transformation from matrix file %s", self.inputs.matrix_file + ) xfm = np.genfromtxt(self.inputs.matrix_file) iflogger.info(xfm) - registration_image_file = nb.load( - self.inputs.registration_image_file) + registration_image_file = nb.load(self.inputs.registration_image_file) reg_affine = registration_image_file.affine - r_dx, r_dy, r_dz = get_data_dims( - self.inputs.registration_image_file) - r_vx, r_vy, r_vz = get_vox_dims( - self.inputs.registration_image_file) - iflogger.info('Using affine from registration image file %s', - self.inputs.registration_image_file) + r_dx, r_dy, r_dz = get_data_dims(self.inputs.registration_image_file) + r_vx, r_vy, r_vz = get_vox_dims(self.inputs.registration_image_file) + iflogger.info( + "Using affine from registration image file %s", + self.inputs.registration_image_file, + ) iflogger.info(reg_affine) - trk_header['vox_to_ras'] = reg_affine - trk_header['dim'] = [r_dx, r_dy, r_dz] - trk_header['voxel_size'] = [r_vx, r_vy, r_vz] + trk_header["vox_to_ras"] = reg_affine + trk_header["dim"] = [r_dx, r_dy, r_dz] + trk_header["voxel_size"] = [r_vx, r_vy, r_vz] - affine = np.dot(affine, np.diag(1. / np.array([vx, vy, vz, 1]))) + affine = np.dot(affine, np.diag(1.0 / np.array([vx, vy, vz, 1]))) transformed_streamlines = transform_to_affine( - streamlines, trk_header, affine) + streamlines, trk_header, affine + ) - aff = affine_from_fsl_mat_file(xfm, [vx, vy, vz], - [r_vx, r_vy, r_vz]) + aff = affine_from_fsl_mat_file(xfm, [vx, vy, vz], [r_vx, r_vy, r_vz]) iflogger.info(aff) axcode = aff2axcodes(reg_affine) - trk_header['voxel_order'] = axcode[0] + axcode[1] + axcode[2] + trk_header["voxel_order"] = axcode[0] + axcode[1] + axcode[2] final_streamlines = move_streamlines(transformed_streamlines, aff) trk_tracks = ((ii, None, None) for ii in final_streamlines) trk.write(out_filename, trk_tracks, trk_header) - iflogger.info('Saving transformed Trackvis file as %s', - out_filename) - iflogger.info('New TrackVis Header:') + iflogger.info("Saving transformed Trackvis file as %s", out_filename) + iflogger.info("New TrackVis Header:") iflogger.info(trk_header) else: iflogger.info( - 'Applying transformation from scanner coordinates to %s', - self.inputs.image_file) + "Applying transformation from scanner coordinates to %s", + self.inputs.image_file, + ) axcode = aff2axcodes(affine) - trk_header['voxel_order'] = axcode[0] + axcode[1] + axcode[2] - trk_header['vox_to_ras'] = affine + trk_header["voxel_order"] = axcode[0] + axcode[1] + axcode[2] + trk_header["vox_to_ras"] = affine transformed_streamlines = transform_to_affine( - streamlines, trk_header, affine) + streamlines, trk_header, affine + ) trk_tracks = ((ii, None, None) for ii in transformed_streamlines) trk.write(out_filename, trk_tracks, trk_header) - iflogger.info('Saving Trackvis file as %s', out_filename) - iflogger.info('TrackVis Header:') + iflogger.info("Saving Trackvis file as %s", out_filename) + iflogger.info("TrackVis Header:") iflogger.info(trk_header) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = op.abspath(self.inputs.out_filename) + outputs["out_file"] = op.abspath(self.inputs.out_filename) return outputs def _gen_filename(self, name): - if name == 'out_filename': + if name == "out_filename": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '.trk' + return name + ".trk" diff --git a/nipype/interfaces/mrtrix/preprocess.py b/nipype/interfaces/mrtrix/preprocess.py index adc2ef27d7..48f4bde719 100644 --- a/nipype/interfaces/mrtrix/preprocess.py +++ b/nipype/interfaces/mrtrix/preprocess.py @@ -5,47 +5,52 @@ import os.path as op from ...utils.filemanip import split_filename -from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, - File, InputMultiPath, isdefined) +from ..base import ( + CommandLineInputSpec, + CommandLine, + traits, + TraitedSpec, + File, + InputMultiPath, + isdefined, +) class MRConvertInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='voxel-order data filename') - out_filename = File( - genfile=True, argstr='%s', position=-1, desc='Output filename') + desc="voxel-order data filename", + ) + out_filename = File(genfile=True, argstr="%s", position=-1, desc="Output filename") extract_at_axis = traits.Enum( 1, 2, 3, - argstr='-coord %s', + argstr="-coord %s", position=1, - desc= - '"Extract data only at the coordinates specified. This option specifies the Axis. Must be used in conjunction with extract_at_coordinate.' + desc='"Extract data only at the coordinates specified. This option specifies the Axis. Must be used in conjunction with extract_at_coordinate.', ) extract_at_coordinate = traits.List( traits.Float, - argstr='%s', - sep=',', + argstr="%s", + sep=",", position=2, minlen=1, maxlen=3, - desc= - '"Extract data only at the coordinates specified. This option specifies the coordinates. Must be used in conjunction with extract_at_axis. Three comma-separated numbers giving the size of each voxel in mm.' + desc='"Extract data only at the coordinates specified. This option specifies the coordinates. Must be used in conjunction with extract_at_axis. Three comma-separated numbers giving the size of each voxel in mm.', ) voxel_dims = traits.List( traits.Float, - argstr='-vox %s', - sep=',', + argstr="-vox %s", + sep=",", position=3, minlen=3, maxlen=3, - desc= - 'Three comma-separated numbers giving the size of each voxel in mm.') + desc="Three comma-separated numbers giving the size of each voxel in mm.", + ) output_datatype = traits.Enum( "nii", "float", @@ -54,10 +59,9 @@ class MRConvertInputSpec(CommandLineInputSpec): "int", "long", "double", - argstr='-output %s', + argstr="-output %s", position=2, - desc= - '"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"' + desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"', ) # , usedefault=True) extension = traits.Enum( "mif", @@ -69,9 +73,9 @@ class MRConvertInputSpec(CommandLineInputSpec): "long", "double", position=2, - desc= - '"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"', - usedefault=True) + desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"', + usedefault=True, + ) layout = traits.Enum( "nii", "float", @@ -80,33 +84,34 @@ class MRConvertInputSpec(CommandLineInputSpec): "int", "long", "double", - argstr='-output %s', + argstr="-output %s", position=2, - desc= - 'specify the layout of the data in memory. The actual layout produced will depend on whether the output image format can support it.' + desc="specify the layout of the data in memory. The actual layout produced will depend on whether the output image format can support it.", ) resample = traits.Float( - argstr='-scale %d', + argstr="-scale %d", position=3, - units='mm', - desc='Apply scaling to the intensity values.') + units="mm", + desc="Apply scaling to the intensity values.", + ) offset_bias = traits.Float( - argstr='-scale %d', + argstr="-scale %d", position=3, - units='mm', - desc='Apply offset to the intensity values.') + units="mm", + desc="Apply offset to the intensity values.", + ) replace_NaN_with_zero = traits.Bool( - argstr='-zero', position=3, desc="Replace all NaN values with zero.") + argstr="-zero", position=3, desc="Replace all NaN values with zero." + ) prs = traits.Bool( - argstr='-prs', + argstr="-prs", position=3, - desc= - "Assume that the DW gradients are specified in the PRS frame (Siemens DICOM only)." + desc="Assume that the DW gradients are specified in the PRS frame (Siemens DICOM only).", ) class MRConvertOutputSpec(TraitedSpec): - converted = File(exists=True, desc='path/name of 4D volume in voxel order') + converted = File(exists=True, desc="path/name of 4D volume in voxel order") class MRConvert(CommandLine): @@ -128,21 +133,21 @@ class MRConvert(CommandLine): >>> mrconvert.run() # doctest: +SKIP """ - _cmd = 'mrconvert' + _cmd = "mrconvert" input_spec = MRConvertInputSpec output_spec = MRConvertOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['converted'] = self.inputs.out_filename - if not isdefined(outputs['converted']): - outputs['converted'] = op.abspath(self._gen_outfilename()) + outputs["converted"] = self.inputs.out_filename + if not isdefined(outputs["converted"]): + outputs["converted"] = op.abspath(self._gen_outfilename()) else: - outputs['converted'] = op.abspath(outputs['converted']) + outputs["converted"] = op.abspath(outputs["converted"]) return outputs def _gen_filename(self, name): - if name == 'out_filename': + if name == "out_filename": return self._gen_outfilename() else: return None @@ -152,67 +157,76 @@ def _gen_outfilename(self): if isdefined(self.inputs.out_filename): outname = self.inputs.out_filename else: - outname = name + '_mrconvert.' + self.inputs.extension + outname = name + "_mrconvert." + self.inputs.extension return outname class DWI2TensorInputSpec(CommandLineInputSpec): in_file = InputMultiPath( File(exists=True), - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='Diffusion-weighted images') + desc="Diffusion-weighted images", + ) out_filename = File( name_template="%s_tensor.mif", name_source="in_file", output_name="tensor", - argstr='%s', - desc='Output tensor filename', - position=-1) + argstr="%s", + desc="Output tensor filename", + position=-1, + ) encoding_file = File( - argstr='-grad %s', + argstr="-grad %s", position=2, - desc=('Encoding file supplied as a 4xN text file with ' - 'each line is in the format [ X Y Z b ], where ' - '[ X Y Z ] describe the direction of the applied ' - 'gradient, and b gives the b-value in units ' - '(1000 s/mm^2). See FSL2MRTrix()')) + desc=( + "Encoding file supplied as a 4xN text file with " + "each line is in the format [ X Y Z b ], where " + "[ X Y Z ] describe the direction of the applied " + "gradient, and b gives the b-value in units " + "(1000 s/mm^2). See FSL2MRTrix()" + ), + ) ignore_slice_by_volume = traits.List( traits.Int, - argstr='-ignoreslices %s', - sep=' ', + argstr="-ignoreslices %s", + sep=" ", position=2, minlen=2, maxlen=2, - desc=('Requires two values (i.e. [34 ' - '1] for [Slice Volume] Ignores ' - 'the image slices specified ' - 'when computing the tensor. ' - 'Slice here means the z ' - 'coordinate of the slice to be ' - 'ignored.')) + desc=( + "Requires two values (i.e. [34 " + "1] for [Slice Volume] Ignores " + "the image slices specified " + "when computing the tensor. " + "Slice here means the z " + "coordinate of the slice to be " + "ignored." + ), + ) ignore_volumes = traits.List( traits.Int, - argstr='-ignorevolumes %s', - sep=' ', + argstr="-ignorevolumes %s", + sep=" ", position=2, minlen=1, - desc=('Requires two values (i.e. [2 5 6] for ' - '[Volumes] Ignores the image volumes ' - 'specified when computing the tensor.')) + desc=( + "Requires two values (i.e. [2 5 6] for " + "[Volumes] Ignores the image volumes " + "specified when computing the tensor." + ), + ) quiet = traits.Bool( - argstr='-quiet', + argstr="-quiet", position=1, - desc=("Do not display information messages or progress " - "status.")) - debug = traits.Bool( - argstr='-debug', position=1, desc="Display debugging messages.") + desc=("Do not display information messages or progress " "status."), + ) + debug = traits.Bool(argstr="-debug", position=1, desc="Display debugging messages.") class DWI2TensorOutputSpec(TraitedSpec): - tensor = File( - exists=True, desc='path/name of output diffusion tensor image') + tensor = File(exists=True, desc="path/name of output diffusion tensor image") class DWI2Tensor(CommandLine): @@ -231,7 +245,7 @@ class DWI2Tensor(CommandLine): >>> dwi2tensor.run() # doctest: +SKIP """ - _cmd = 'dwi2tensor' + _cmd = "dwi2tensor" input_spec = DWI2TensorInputSpec output_spec = DWI2TensorOutputSpec @@ -239,25 +253,26 @@ class DWI2Tensor(CommandLine): class Tensor2VectorInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='Diffusion tensor image') + desc="Diffusion tensor image", + ) out_filename = File( - genfile=True, argstr='%s', position=-1, desc='Output vector filename') + genfile=True, argstr="%s", position=-1, desc="Output vector filename" + ) quiet = traits.Bool( - argstr='-quiet', + argstr="-quiet", position=1, - desc="Do not display information messages or progress status.") - debug = traits.Bool( - argstr='-debug', position=1, desc="Display debugging messages.") + desc="Do not display information messages or progress status.", + ) + debug = traits.Bool(argstr="-debug", position=1, desc="Display debugging messages.") class Tensor2VectorOutputSpec(TraitedSpec): vector = File( exists=True, - desc= - 'the output image of the major eigenvectors of the diffusion tensor image.' + desc="the output image of the major eigenvectors of the diffusion tensor image.", ) @@ -274,55 +289,56 @@ class Tensor2Vector(CommandLine): >>> tensor2vector.run() # doctest: +SKIP """ - _cmd = 'tensor2vector' + _cmd = "tensor2vector" input_spec = Tensor2VectorInputSpec output_spec = Tensor2VectorOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['vector'] = self.inputs.out_filename - if not isdefined(outputs['vector']): - outputs['vector'] = op.abspath(self._gen_outfilename()) + outputs["vector"] = self.inputs.out_filename + if not isdefined(outputs["vector"]): + outputs["vector"] = op.abspath(self._gen_outfilename()) else: - outputs['vector'] = op.abspath(outputs['vector']) + outputs["vector"] = op.abspath(outputs["vector"]) return outputs def _gen_filename(self, name): - if name == 'out_filename': + if name == "out_filename": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_vector.mif' + return name + "_vector.mif" class Tensor2FractionalAnisotropyInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='Diffusion tensor image') + desc="Diffusion tensor image", + ) out_filename = File( genfile=True, - argstr='%s', + argstr="%s", position=-1, - desc='Output Fractional Anisotropy filename') + desc="Output Fractional Anisotropy filename", + ) quiet = traits.Bool( - argstr='-quiet', + argstr="-quiet", position=1, - desc="Do not display information messages or progress status.") - debug = traits.Bool( - argstr='-debug', position=1, desc="Display debugging messages.") + desc="Do not display information messages or progress status.", + ) + debug = traits.Bool(argstr="-debug", position=1, desc="Display debugging messages.") class Tensor2FractionalAnisotropyOutputSpec(TraitedSpec): FA = File( exists=True, - desc= - 'the output image of the major eigenvectors of the diffusion tensor image.' + desc="the output image of the major eigenvectors of the diffusion tensor image.", ) @@ -339,55 +355,56 @@ class Tensor2FractionalAnisotropy(CommandLine): >>> tensor2FA.run() # doctest: +SKIP """ - _cmd = 'tensor2FA' + _cmd = "tensor2FA" input_spec = Tensor2FractionalAnisotropyInputSpec output_spec = Tensor2FractionalAnisotropyOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['FA'] = self.inputs.out_filename - if not isdefined(outputs['FA']): - outputs['FA'] = op.abspath(self._gen_outfilename()) + outputs["FA"] = self.inputs.out_filename + if not isdefined(outputs["FA"]): + outputs["FA"] = op.abspath(self._gen_outfilename()) else: - outputs['FA'] = op.abspath(outputs['FA']) + outputs["FA"] = op.abspath(outputs["FA"]) return outputs def _gen_filename(self, name): - if name == 'out_filename': + if name == "out_filename": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_FA.mif' + return name + "_FA.mif" class Tensor2ApparentDiffusionInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='Diffusion tensor image') + desc="Diffusion tensor image", + ) out_filename = File( genfile=True, - argstr='%s', + argstr="%s", position=-1, - desc='Output Fractional Anisotropy filename') + desc="Output Fractional Anisotropy filename", + ) quiet = traits.Bool( - argstr='-quiet', + argstr="-quiet", position=1, - desc="Do not display information messages or progress status.") - debug = traits.Bool( - argstr='-debug', position=1, desc="Display debugging messages.") + desc="Do not display information messages or progress status.", + ) + debug = traits.Bool(argstr="-debug", position=1, desc="Display debugging messages.") class Tensor2ApparentDiffusionOutputSpec(TraitedSpec): ADC = File( exists=True, - desc= - 'the output image of the major eigenvectors of the diffusion tensor image.' + desc="the output image of the major eigenvectors of the diffusion tensor image.", ) @@ -404,49 +421,51 @@ class Tensor2ApparentDiffusion(CommandLine): >>> tensor2ADC.run() # doctest: +SKIP """ - _cmd = 'tensor2ADC' + _cmd = "tensor2ADC" input_spec = Tensor2ApparentDiffusionInputSpec output_spec = Tensor2ApparentDiffusionOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['ADC'] = self.inputs.out_filename - if not isdefined(outputs['ADC']): - outputs['ADC'] = op.abspath(self._gen_outfilename()) + outputs["ADC"] = self.inputs.out_filename + if not isdefined(outputs["ADC"]): + outputs["ADC"] = op.abspath(self._gen_outfilename()) else: - outputs['ADC'] = op.abspath(outputs['ADC']) + outputs["ADC"] = op.abspath(outputs["ADC"]) return outputs def _gen_filename(self, name): - if name == 'out_filename': + if name == "out_filename": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_ADC.mif' + return name + "_ADC.mif" class MRMultiplyInputSpec(CommandLineInputSpec): in_files = InputMultiPath( File(exists=True), - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='Input images to be multiplied') + desc="Input images to be multiplied", + ) out_filename = File( - genfile=True, argstr='%s', position=-1, desc='Output image filename') + genfile=True, argstr="%s", position=-1, desc="Output image filename" + ) quiet = traits.Bool( - argstr='-quiet', + argstr="-quiet", position=1, - desc="Do not display information messages or progress status.") - debug = traits.Bool( - argstr='-debug', position=1, desc="Display debugging messages.") + desc="Do not display information messages or progress status.", + ) + debug = traits.Bool(argstr="-debug", position=1, desc="Display debugging messages.") class MRMultiplyOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output image of the multiplication') + out_file = File(exists=True, desc="the output image of the multiplication") class MRMultiply(CommandLine): @@ -462,43 +481,44 @@ class MRMultiply(CommandLine): >>> MRmult.run() # doctest: +SKIP """ - _cmd = 'mrmult' + _cmd = "mrmult" input_spec = MRMultiplyInputSpec output_spec = MRMultiplyOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = self.inputs.out_filename - if not isdefined(outputs['out_file']): - outputs['out_file'] = op.abspath(self._gen_outfilename()) + outputs["out_file"] = self.inputs.out_filename + if not isdefined(outputs["out_file"]): + outputs["out_file"] = op.abspath(self._gen_outfilename()) else: - outputs['out_file'] = op.abspath(outputs['out_file']) + outputs["out_file"] = op.abspath(outputs["out_file"]) return outputs def _gen_filename(self, name): - if name == 'out_filename': + if name == "out_filename": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_files[0]) - return name + '_MRMult.mif' + return name + "_MRMult.mif" class MRTrixViewerInputSpec(CommandLineInputSpec): in_files = InputMultiPath( File(exists=True), - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='Input images to be viewed') + desc="Input images to be viewed", + ) quiet = traits.Bool( - argstr='-quiet', + argstr="-quiet", position=1, - desc="Do not display information messages or progress status.") - debug = traits.Bool( - argstr='-debug', position=1, desc="Display debugging messages.") + desc="Do not display information messages or progress status.", + ) + debug = traits.Bool(argstr="-debug", position=1, desc="Display debugging messages.") class MRTrixViewerOutputSpec(TraitedSpec): @@ -518,7 +538,7 @@ class MRTrixViewer(CommandLine): >>> MRview.run() # doctest: +SKIP """ - _cmd = 'mrview' + _cmd = "mrview" input_spec = MRTrixViewerInputSpec output_spec = MRTrixViewerOutputSpec @@ -529,10 +549,11 @@ def _list_outputs(self): class MRTrixInfoInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='Input images to be read') + desc="Input images to be read", + ) class MRTrixInfoOutputSpec(TraitedSpec): @@ -552,7 +573,7 @@ class MRTrixInfo(CommandLine): >>> MRinfo.run() # doctest: +SKIP """ - _cmd = 'mrinfo' + _cmd = "mrinfo" input_spec = MRTrixInfoInputSpec output_spec = MRTrixInfoOutputSpec @@ -563,38 +584,35 @@ def _list_outputs(self): class GenerateWhiteMatterMaskInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-3, - desc='Diffusion-weighted images') + desc="Diffusion-weighted images", + ) binary_mask = File( - exists=True, - argstr='%s', - mandatory=True, - position=-2, - desc='Binary brain mask') + exists=True, argstr="%s", mandatory=True, position=-2, desc="Binary brain mask" + ) out_WMProb_filename = File( genfile=True, - argstr='%s', + argstr="%s", position=-1, - desc='Output WM probability image filename') + desc="Output WM probability image filename", + ) encoding_file = File( exists=True, - argstr='-grad %s', + argstr="-grad %s", mandatory=True, position=1, - desc= - 'Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix' + desc="Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix", ) noise_level_margin = traits.Float( - argstr='-margin %s', - desc= - 'Specify the width of the margin on either side of the image to be used to estimate the noise level (default = 10)' + argstr="-margin %s", + desc="Specify the width of the margin on either side of the image to be used to estimate the noise level (default = 10)", ) class GenerateWhiteMatterMaskOutputSpec(TraitedSpec): - WMprobabilitymap = File(exists=True, desc='WMprobabilitymap') + WMprobabilitymap = File(exists=True, desc="WMprobabilitymap") class GenerateWhiteMatterMask(CommandLine): @@ -611,51 +629,53 @@ class GenerateWhiteMatterMask(CommandLine): >>> genWM.run() # doctest: +SKIP """ - _cmd = 'gen_WM_mask' + _cmd = "gen_WM_mask" input_spec = GenerateWhiteMatterMaskInputSpec output_spec = GenerateWhiteMatterMaskOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['WMprobabilitymap'] = op.abspath(self._gen_outfilename()) + outputs["WMprobabilitymap"] = op.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): - if name == 'out_WMProb_filename': + if name == "out_WMProb_filename": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_WMProb.mif' + return name + "_WMProb.mif" class ErodeInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='Input mask image to be eroded') + desc="Input mask image to be eroded", + ) out_filename = File( - genfile=True, argstr='%s', position=-1, desc='Output image filename') + genfile=True, argstr="%s", position=-1, desc="Output image filename" + ) number_of_passes = traits.Int( - argstr='-npass %s', desc='the number of passes (default: 1)') + argstr="-npass %s", desc="the number of passes (default: 1)" + ) dilate = traits.Bool( - argstr='-dilate', - position=1, - desc="Perform dilation rather than erosion") + argstr="-dilate", position=1, desc="Perform dilation rather than erosion" + ) quiet = traits.Bool( - argstr='-quiet', + argstr="-quiet", position=1, - desc="Do not display information messages or progress status.") - debug = traits.Bool( - argstr='-debug', position=1, desc="Display debugging messages.") + desc="Do not display information messages or progress status.", + ) + debug = traits.Bool(argstr="-debug", position=1, desc="Display debugging messages.") class ErodeOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output image') + out_file = File(exists=True, desc="the output image") class Erode(CommandLine): @@ -670,64 +690,63 @@ class Erode(CommandLine): >>> erode.inputs.in_file = 'mask.mif' >>> erode.run() # doctest: +SKIP """ - _cmd = 'erode' + + _cmd = "erode" input_spec = ErodeInputSpec output_spec = ErodeOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = self.inputs.out_filename - if not isdefined(outputs['out_file']): - outputs['out_file'] = op.abspath(self._gen_outfilename()) + outputs["out_file"] = self.inputs.out_filename + if not isdefined(outputs["out_file"]): + outputs["out_file"] = op.abspath(self._gen_outfilename()) else: - outputs['out_file'] = op.abspath(outputs['out_file']) + outputs["out_file"] = op.abspath(outputs["out_file"]) return outputs def _gen_filename(self, name): - if name == 'out_filename': + if name == "out_filename": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_erode.mif' + return name + "_erode.mif" class ThresholdInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='The input image to be thresholded') + desc="The input image to be thresholded", + ) out_filename = File( - genfile=True, - argstr='%s', - position=-1, - desc='The output binary image mask.') + genfile=True, argstr="%s", position=-1, desc="The output binary image mask." + ) absolute_threshold_value = traits.Float( - argstr='-abs %s', - desc='Specify threshold value as absolute intensity.') + argstr="-abs %s", desc="Specify threshold value as absolute intensity." + ) percentage_threshold_value = traits.Float( - argstr='-percent %s', - desc= - 'Specify threshold value as a percentage of the peak intensity in the input image.' + argstr="-percent %s", + desc="Specify threshold value as a percentage of the peak intensity in the input image.", ) - invert = traits.Bool( - argstr='-invert', position=1, desc="Invert output binary mask") + invert = traits.Bool(argstr="-invert", position=1, desc="Invert output binary mask") replace_zeros_with_NaN = traits.Bool( - argstr='-nan', position=1, desc="Replace all zero values with NaN") + argstr="-nan", position=1, desc="Replace all zero values with NaN" + ) quiet = traits.Bool( - argstr='-quiet', + argstr="-quiet", position=1, - desc="Do not display information messages or progress status.") - debug = traits.Bool( - argstr='-debug', position=1, desc="Display debugging messages.") + desc="Do not display information messages or progress status.", + ) + debug = traits.Bool(argstr="-debug", position=1, desc="Display debugging messages.") class ThresholdOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='The output binary image mask.') + out_file = File(exists=True, desc="The output binary image mask.") class Threshold(CommandLine): @@ -748,49 +767,51 @@ class Threshold(CommandLine): >>> thresh.run() # doctest: +SKIP """ - _cmd = 'threshold' + _cmd = "threshold" input_spec = ThresholdInputSpec output_spec = ThresholdOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = self.inputs.out_filename - if not isdefined(outputs['out_file']): - outputs['out_file'] = op.abspath(self._gen_outfilename()) + outputs["out_file"] = self.inputs.out_filename + if not isdefined(outputs["out_file"]): + outputs["out_file"] = op.abspath(self._gen_outfilename()) else: - outputs['out_file'] = op.abspath(outputs['out_file']) + outputs["out_file"] = op.abspath(outputs["out_file"]) return outputs def _gen_filename(self, name): - if name == 'out_filename': + if name == "out_filename": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_thresh.mif' + return name + "_thresh.mif" class MedianFilter3DInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='Input images to be smoothed') + desc="Input images to be smoothed", + ) out_filename = File( - genfile=True, argstr='%s', position=-1, desc='Output image filename') + genfile=True, argstr="%s", position=-1, desc="Output image filename" + ) quiet = traits.Bool( - argstr='-quiet', + argstr="-quiet", position=1, - desc="Do not display information messages or progress status.") - debug = traits.Bool( - argstr='-debug', position=1, desc="Display debugging messages.") + desc="Do not display information messages or progress status.", + ) + debug = traits.Bool(argstr="-debug", position=1, desc="Display debugging messages.") class MedianFilter3DOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output image') + out_file = File(exists=True, desc="the output image") class MedianFilter3D(CommandLine): @@ -806,82 +827,82 @@ class MedianFilter3D(CommandLine): >>> median3d.run() # doctest: +SKIP """ - _cmd = 'median3D' + _cmd = "median3D" input_spec = MedianFilter3DInputSpec output_spec = MedianFilter3DOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = self.inputs.out_filename - if not isdefined(outputs['out_file']): - outputs['out_file'] = op.abspath(self._gen_outfilename()) + outputs["out_file"] = self.inputs.out_filename + if not isdefined(outputs["out_file"]): + outputs["out_file"] = op.abspath(self._gen_outfilename()) else: - outputs['out_file'] = op.abspath(outputs['out_file']) + outputs["out_file"] = op.abspath(outputs["out_file"]) return outputs def _gen_filename(self, name): - if name == 'out_filename': + if name == "out_filename": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_median3D.mif' + return name + "_median3D.mif" class MRTransformInputSpec(CommandLineInputSpec): in_files = InputMultiPath( File(exists=True), - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='Input images to be transformed') - out_filename = File( - genfile=True, argstr='%s', position=-1, desc='Output image') + desc="Input images to be transformed", + ) + out_filename = File(genfile=True, argstr="%s", position=-1, desc="Output image") invert = traits.Bool( - argstr='-inverse', + argstr="-inverse", position=1, - desc="Invert the specified transform before using it") + desc="Invert the specified transform before using it", + ) replace_transform = traits.Bool( - argstr='-replace', + argstr="-replace", position=1, - desc= - "replace the current transform by that specified, rather than applying it to the current transform" + desc="replace the current transform by that specified, rather than applying it to the current transform", ) transformation_file = File( exists=True, - argstr='-transform %s', + argstr="-transform %s", position=1, - desc='The transform to apply, in the form of a 4x4 ascii file.') + desc="The transform to apply, in the form of a 4x4 ascii file.", + ) template_image = File( exists=True, - argstr='-template %s', + argstr="-template %s", position=1, - desc='Reslice the input image to match the specified template image.') + desc="Reslice the input image to match the specified template image.", + ) reference_image = File( exists=True, - argstr='-reference %s', + argstr="-reference %s", position=1, - desc= - 'in case the transform supplied maps from the input image onto a reference image, use this option to specify the reference. Note that this implicitly sets the -replace option.' + desc="in case the transform supplied maps from the input image onto a reference image, use this option to specify the reference. Note that this implicitly sets the -replace option.", ) flip_x = traits.Bool( - argstr='-flipx', + argstr="-flipx", position=1, - desc= - "assume the transform is supplied assuming a coordinate system with the x-axis reversed relative to the MRtrix convention (i.e. x increases from right to left). This is required to handle transform matrices produced by FSL's FLIRT command. This is only used in conjunction with the -reference option." + desc="assume the transform is supplied assuming a coordinate system with the x-axis reversed relative to the MRtrix convention (i.e. x increases from right to left). This is required to handle transform matrices produced by FSL's FLIRT command. This is only used in conjunction with the -reference option.", ) quiet = traits.Bool( - argstr='-quiet', + argstr="-quiet", position=1, - desc="Do not display information messages or progress status.") - debug = traits.Bool( - argstr='-debug', position=1, desc="Display debugging messages.") + desc="Do not display information messages or progress status.", + ) + debug = traits.Bool(argstr="-debug", position=1, desc="Display debugging messages.") class MRTransformOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output image of the transformation') + out_file = File(exists=True, desc="the output image of the transformation") class MRTransform(CommandLine): @@ -896,25 +917,25 @@ class MRTransform(CommandLine): >>> MRxform.run() # doctest: +SKIP """ - _cmd = 'mrtransform' + _cmd = "mrtransform" input_spec = MRTransformInputSpec output_spec = MRTransformOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = self.inputs.out_filename - if not isdefined(outputs['out_file']): - outputs['out_file'] = op.abspath(self._gen_outfilename()) + outputs["out_file"] = self.inputs.out_filename + if not isdefined(outputs["out_file"]): + outputs["out_file"] = op.abspath(self._gen_outfilename()) else: - outputs['out_file'] = op.abspath(outputs['out_file']) + outputs["out_file"] = op.abspath(outputs["out_file"]) return outputs def _gen_filename(self, name): - if name == 'out_filename': + if name == "out_filename": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_files[0]) - return name + '_MRTransform.mif' + return name + "_MRTransform.mif" diff --git a/nipype/interfaces/mrtrix/tensors.py b/nipype/interfaces/mrtrix/tensors.py index 6751b6196d..d7cbb5f7a8 100644 --- a/nipype/interfaces/mrtrix/tensors.py +++ b/nipype/interfaces/mrtrix/tensors.py @@ -7,42 +7,46 @@ from ... import logging from ...utils.filemanip import split_filename -from ..base import (CommandLineInputSpec, CommandLine, BaseInterface, traits, - File, TraitedSpec, isdefined) -iflogger = logging.getLogger('nipype.interface') +from ..base import ( + CommandLineInputSpec, + CommandLine, + BaseInterface, + traits, + File, + TraitedSpec, + isdefined, +) + +iflogger = logging.getLogger("nipype.interface") class DWI2SphericalHarmonicsImageInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='Diffusion-weighted images') - out_filename = File( - genfile=True, argstr='%s', position=-1, desc='Output filename') + desc="Diffusion-weighted images", + ) + out_filename = File(genfile=True, argstr="%s", position=-1, desc="Output filename") encoding_file = File( exists=True, - argstr='-grad %s', + argstr="-grad %s", mandatory=True, position=1, - desc= - 'Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix' + desc="Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix", ) maximum_harmonic_order = traits.Float( - argstr='-lmax %s', - desc= - 'set the maximum harmonic order for the output series. By default, the program will use the highest possible lmax given the number of diffusion-weighted images.' + argstr="-lmax %s", + desc="set the maximum harmonic order for the output series. By default, the program will use the highest possible lmax given the number of diffusion-weighted images.", ) normalise = traits.Bool( - argstr='-normalise', - position=3, - desc="normalise the DW signal to the b=0 image") + argstr="-normalise", position=3, desc="normalise the DW signal to the b=0 image" + ) class DWI2SphericalHarmonicsImageOutputSpec(TraitedSpec): - spherical_harmonics_image = File( - exists=True, desc='Spherical harmonics image') + spherical_harmonics_image = File(exists=True, desc="Spherical harmonics image") class DWI2SphericalHarmonicsImage(CommandLine): @@ -83,110 +87,100 @@ class DWI2SphericalHarmonicsImage(CommandLine): >>> dwi2SH.inputs.encoding_file = 'encoding.txt' >>> dwi2SH.run() # doctest: +SKIP """ - _cmd = 'dwi2SH' + + _cmd = "dwi2SH" input_spec = DWI2SphericalHarmonicsImageInputSpec output_spec = DWI2SphericalHarmonicsImageOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['spherical_harmonics_image'] = self.inputs.out_filename - if not isdefined(outputs['spherical_harmonics_image']): - outputs['spherical_harmonics_image'] = op.abspath( - self._gen_outfilename()) + outputs["spherical_harmonics_image"] = self.inputs.out_filename + if not isdefined(outputs["spherical_harmonics_image"]): + outputs["spherical_harmonics_image"] = op.abspath(self._gen_outfilename()) else: - outputs['spherical_harmonics_image'] = op.abspath( - outputs['spherical_harmonics_image']) + outputs["spherical_harmonics_image"] = op.abspath( + outputs["spherical_harmonics_image"] + ) return outputs def _gen_filename(self, name): - if name == 'out_filename': + if name == "out_filename": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_SH.mif' + return name + "_SH.mif" class ConstrainedSphericalDeconvolutionInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-3, - desc='diffusion-weighted image') + desc="diffusion-weighted image", + ) response_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc= - 'the diffusion-weighted signal response function for a single fibre population (see EstimateResponse)' + desc="the diffusion-weighted signal response function for a single fibre population (see EstimateResponse)", ) - out_filename = File( - genfile=True, argstr='%s', position=-1, desc='Output filename') + out_filename = File(genfile=True, argstr="%s", position=-1, desc="Output filename") mask_image = File( exists=True, - argstr='-mask %s', + argstr="-mask %s", position=2, - desc= - 'only perform computation within the specified binary brain mask image' + desc="only perform computation within the specified binary brain mask image", ) encoding_file = File( exists=True, - argstr='-grad %s', + argstr="-grad %s", position=1, - desc= - 'Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix' + desc="Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix", ) filter_file = File( exists=True, - argstr='-filter %s', + argstr="-filter %s", position=-2, - desc= - 'a text file containing the filtering coefficients for each even harmonic order.' - 'the linear frequency filtering parameters used for the initial linear spherical deconvolution step (default = [ 1 1 1 0 0 ]).' + desc="a text file containing the filtering coefficients for each even harmonic order." + "the linear frequency filtering parameters used for the initial linear spherical deconvolution step (default = [ 1 1 1 0 0 ]).", ) lambda_value = traits.Float( - argstr='-lambda %s', - desc= - 'the regularisation parameter lambda that controls the strength of the constraint (default = 1.0).' + argstr="-lambda %s", + desc="the regularisation parameter lambda that controls the strength of the constraint (default = 1.0).", ) maximum_harmonic_order = traits.Int( - argstr='-lmax %s', - desc= - 'set the maximum harmonic order for the output series. By default, the program will use the highest possible lmax given the number of diffusion-weighted images.' + argstr="-lmax %s", + desc="set the maximum harmonic order for the output series. By default, the program will use the highest possible lmax given the number of diffusion-weighted images.", ) threshold_value = traits.Float( - argstr='-threshold %s', - desc= - 'the threshold below which the amplitude of the FOD is assumed to be zero, expressed as a fraction of the mean value of the initial FOD (default = 0.1)' + argstr="-threshold %s", + desc="the threshold below which the amplitude of the FOD is assumed to be zero, expressed as a fraction of the mean value of the initial FOD (default = 0.1)", ) iterations = traits.Int( - argstr='-niter %s', - desc= - 'the maximum number of iterations to perform for each voxel (default = 50)' + argstr="-niter %s", + desc="the maximum number of iterations to perform for each voxel (default = 50)", ) - debug = traits.Bool(argstr='-debug', desc='Display debugging messages.') + debug = traits.Bool(argstr="-debug", desc="Display debugging messages.") directions_file = File( exists=True, - argstr='-directions %s', + argstr="-directions %s", position=-2, - desc= - 'a text file containing the [ el az ] pairs for the directions: Specify the directions over which to apply the non-negativity constraint (by default, the built-in 300 direction set is used)' + desc="a text file containing the [ el az ] pairs for the directions: Specify the directions over which to apply the non-negativity constraint (by default, the built-in 300 direction set is used)", ) normalise = traits.Bool( - argstr='-normalise', - position=3, - desc="normalise the DW signal to the b=0 image") + argstr="-normalise", position=3, desc="normalise the DW signal to the b=0 image" + ) class ConstrainedSphericalDeconvolutionOutputSpec(TraitedSpec): - spherical_harmonics_image = File( - exists=True, desc='Spherical harmonics image') + spherical_harmonics_image = File(exists=True, desc="Spherical harmonics image") class ConstrainedSphericalDeconvolution(CommandLine): @@ -221,72 +215,70 @@ class ConstrainedSphericalDeconvolution(CommandLine): >>> csdeconv.run() # doctest: +SKIP """ - _cmd = 'csdeconv' + _cmd = "csdeconv" input_spec = ConstrainedSphericalDeconvolutionInputSpec output_spec = ConstrainedSphericalDeconvolutionOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['spherical_harmonics_image'] = self.inputs.out_filename - if not isdefined(outputs['spherical_harmonics_image']): - outputs['spherical_harmonics_image'] = op.abspath( - self._gen_outfilename()) + outputs["spherical_harmonics_image"] = self.inputs.out_filename + if not isdefined(outputs["spherical_harmonics_image"]): + outputs["spherical_harmonics_image"] = op.abspath(self._gen_outfilename()) else: - outputs['spherical_harmonics_image'] = op.abspath( - outputs['spherical_harmonics_image']) + outputs["spherical_harmonics_image"] = op.abspath( + outputs["spherical_harmonics_image"] + ) return outputs def _gen_filename(self, name): - if name == 'out_filename': + if name == "out_filename": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_CSD.mif' + return name + "_CSD.mif" class EstimateResponseForSHInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-3, - desc='Diffusion-weighted images') + desc="Diffusion-weighted images", + ) mask_image = File( exists=True, mandatory=True, - argstr='%s', + argstr="%s", position=-2, - desc= - 'only perform computation within the specified binary brain mask image' + desc="only perform computation within the specified binary brain mask image", ) - out_filename = File( - genfile=True, argstr='%s', position=-1, desc='Output filename') + out_filename = File(genfile=True, argstr="%s", position=-1, desc="Output filename") encoding_file = File( exists=True, - argstr='-grad %s', + argstr="-grad %s", mandatory=True, position=1, - desc= - 'Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix' + desc="Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix", ) maximum_harmonic_order = traits.Int( - argstr='-lmax %s', - desc= - 'set the maximum harmonic order for the output series. By default, the program will use the highest possible lmax given the number of diffusion-weighted images.' + argstr="-lmax %s", + desc="set the maximum harmonic order for the output series. By default, the program will use the highest possible lmax given the number of diffusion-weighted images.", ) normalise = traits.Bool( - argstr='-normalise', desc='normalise the DW signal to the b=0 image') + argstr="-normalise", desc="normalise the DW signal to the b=0 image" + ) quiet = traits.Bool( - argstr='-quiet', - desc='Do not display information messages or progress status.') - debug = traits.Bool(argstr='-debug', desc='Display debugging messages.') + argstr="-quiet", desc="Do not display information messages or progress status." + ) + debug = traits.Bool(argstr="-debug", desc="Display debugging messages.") class EstimateResponseForSHOutputSpec(TraitedSpec): - response = File(exists=True, desc='Spherical harmonics image') + response = File(exists=True, desc="Spherical harmonics image") class EstimateResponseForSH(CommandLine): @@ -303,28 +295,29 @@ class EstimateResponseForSH(CommandLine): >>> estresp.inputs.encoding_file = 'encoding.txt' >>> estresp.run() # doctest: +SKIP """ - _cmd = 'estimate_response' + + _cmd = "estimate_response" input_spec = EstimateResponseForSHInputSpec output_spec = EstimateResponseForSHOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['response'] = self.inputs.out_filename - if not isdefined(outputs['response']): - outputs['response'] = op.abspath(self._gen_outfilename()) + outputs["response"] = self.inputs.out_filename + if not isdefined(outputs["response"]): + outputs["response"] = op.abspath(self._gen_outfilename()) else: - outputs['response'] = op.abspath(outputs['response']) + outputs["response"] = op.abspath(outputs["response"]) return outputs def _gen_filename(self, name): - if name == 'out_filename': + if name == "out_filename": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_ER.txt' + return name + "_ER.txt" def concat_files(bvec_file, bval_file, invert_x, invert_y, invert_z): @@ -334,42 +327,47 @@ def concat_files(bvec_file, bval_file, invert_x, invert_y, invert_z): bvecs = np.transpose(bvecs) if invert_x: bvecs[0, :] = -bvecs[0, :] - iflogger.info('Inverting b-vectors in the x direction') + iflogger.info("Inverting b-vectors in the x direction") if invert_y: bvecs[1, :] = -bvecs[1, :] - iflogger.info('Inverting b-vectors in the y direction') + iflogger.info("Inverting b-vectors in the y direction") if invert_z: bvecs[2, :] = -bvecs[2, :] - iflogger.info('Inverting b-vectors in the z direction') + iflogger.info("Inverting b-vectors in the z direction") iflogger.info(np.shape(bvecs)) iflogger.info(np.shape(bvals)) encoding = np.transpose(np.vstack((bvecs, bvals))) _, bvec, _ = split_filename(bvec_file) _, bval, _ = split_filename(bval_file) - out_encoding_file = bvec + '_' + bval + '.txt' + out_encoding_file = bvec + "_" + bval + ".txt" np.savetxt(out_encoding_file, encoding) return out_encoding_file class FSL2MRTrixInputSpec(TraitedSpec): bvec_file = File( - exists=True, mandatory=True, desc='FSL b-vectors file (3xN text file)') + exists=True, mandatory=True, desc="FSL b-vectors file (3xN text file)" + ) bval_file = File( - exists=True, mandatory=True, desc='FSL b-values file (1xN text file)') + exists=True, mandatory=True, desc="FSL b-values file (1xN text file)" + ) invert_x = traits.Bool( - False, usedefault=True, desc='Inverts the b-vectors along the x-axis') + False, usedefault=True, desc="Inverts the b-vectors along the x-axis" + ) invert_y = traits.Bool( - False, usedefault=True, desc='Inverts the b-vectors along the y-axis') + False, usedefault=True, desc="Inverts the b-vectors along the y-axis" + ) invert_z = traits.Bool( - False, usedefault=True, desc='Inverts the b-vectors along the z-axis') - out_encoding_file = File(genfile=True, desc='Output encoding filename') + False, usedefault=True, desc="Inverts the b-vectors along the z-axis" + ) + out_encoding_file = File(genfile=True, desc="Output encoding filename") class FSL2MRTrixOutputSpec(TraitedSpec): encoding_file = File( - desc= - 'The gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient' - 'and b gives the b-value in units (1000 s/mm^2).') + desc="The gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient" + "and b gives the b-value in units (1000 s/mm^2)." + ) class FSL2MRTrix(BaseInterface): @@ -389,23 +387,27 @@ class FSL2MRTrix(BaseInterface): >>> fsl2mrtrix.inputs.invert_y = True >>> fsl2mrtrix.run() # doctest: +SKIP """ + input_spec = FSL2MRTrixInputSpec output_spec = FSL2MRTrixOutputSpec def _run_interface(self, runtime): - encoding = concat_files(self.inputs.bvec_file, self.inputs.bval_file, - self.inputs.invert_x, self.inputs.invert_y, - self.inputs.invert_z) + encoding = concat_files( + self.inputs.bvec_file, + self.inputs.bval_file, + self.inputs.invert_x, + self.inputs.invert_y, + self.inputs.invert_z, + ) return runtime def _list_outputs(self): outputs = self.output_spec().get() - outputs['encoding_file'] = op.abspath( - self._gen_filename('out_encoding_file')) + outputs["encoding_file"] = op.abspath(self._gen_filename("out_encoding_file")) return outputs def _gen_filename(self, name): - if name == 'out_encoding_file': + if name == "out_encoding_file": return self._gen_outfilename() else: return None @@ -413,40 +415,40 @@ def _gen_filename(self, name): def _gen_outfilename(self): _, bvec, _ = split_filename(self.inputs.bvec_file) _, bval, _ = split_filename(self.inputs.bval_file) - return bvec + '_' + bval + '.txt' + return bvec + "_" + bval + ".txt" class GenerateDirectionsInputSpec(CommandLineInputSpec): num_dirs = traits.Int( mandatory=True, - argstr='%s', + argstr="%s", position=-2, - desc='the number of directions to generate.') + desc="the number of directions to generate.", + ) power = traits.Float( - argstr='-power %s', - desc='specify exponent to use for repulsion power law.') + argstr="-power %s", desc="specify exponent to use for repulsion power law." + ) niter = traits.Int( - argstr='-niter %s', - desc='specify the maximum number of iterations to perform.') - display_info = traits.Bool( - argstr='-info', desc='Display information messages.') + argstr="-niter %s", desc="specify the maximum number of iterations to perform." + ) + display_info = traits.Bool(argstr="-info", desc="Display information messages.") quiet_display = traits.Bool( - argstr='-quiet', - desc='do not display information messages or progress status.') - display_debug = traits.Bool( - argstr='-debug', desc='Display debugging messages.') + argstr="-quiet", desc="do not display information messages or progress status." + ) + display_debug = traits.Bool(argstr="-debug", desc="Display debugging messages.") out_file = File( - name_source=['num_dirs'], - name_template='directions_%d.txt', - argstr='%s', + name_source=["num_dirs"], + name_template="directions_%d.txt", + argstr="%s", hash_files=False, position=-1, - desc='the text file to write the directions to, as [ az el ] pairs.') + desc="the text file to write the directions to, as [ az el ] pairs.", + ) class GenerateDirectionsOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='directions file') + out_file = File(exists=True, desc="directions file") class GenerateDirections(CommandLine): @@ -462,7 +464,7 @@ class GenerateDirections(CommandLine): >>> gendir.run() # doctest: +SKIP """ - _cmd = 'gendir' + _cmd = "gendir" input_spec = GenerateDirectionsInputSpec output_spec = GenerateDirectionsOutputSpec @@ -470,58 +472,57 @@ class GenerateDirections(CommandLine): class FindShPeaksInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-3, - desc='the input image of SH coefficients.') + desc="the input image of SH coefficients.", + ) directions_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='the set of directions to use as seeds for the peak finding') + desc="the set of directions to use as seeds for the peak finding", + ) peaks_image = File( exists=True, - argstr='-peaks %s', - desc= - 'the program will try to find the peaks that most closely match those in the image provided' + argstr="-peaks %s", + desc="the program will try to find the peaks that most closely match those in the image provided", ) num_peaks = traits.Int( - argstr='-num %s', desc='the number of peaks to extract (default is 3)') + argstr="-num %s", desc="the number of peaks to extract (default is 3)" + ) peak_directions = traits.List( traits.Float, - argstr='-direction %s', - sep=' ', + argstr="-direction %s", + sep=" ", minlen=2, maxlen=2, - desc= - 'phi theta. the direction of a peak to estimate. The algorithm will attempt to find the same number of peaks as have been specified using this option ' - ' phi: the azimuthal angle of the direction (in degrees). theta: the elevation angle of the direction (in degrees, from the vertical z-axis)' + desc="phi theta. the direction of a peak to estimate. The algorithm will attempt to find the same number of peaks as have been specified using this option " + " phi: the azimuthal angle of the direction (in degrees). theta: the elevation angle of the direction (in degrees, from the vertical z-axis)", ) peak_threshold = traits.Float( - argstr='-threshold %s', - desc= - 'only peak amplitudes greater than the threshold will be considered') - display_info = traits.Bool( - argstr='-info', desc='Display information messages.') + argstr="-threshold %s", + desc="only peak amplitudes greater than the threshold will be considered", + ) + display_info = traits.Bool(argstr="-info", desc="Display information messages.") quiet_display = traits.Bool( - argstr='-quiet', - desc='do not display information messages or progress status.') - display_debug = traits.Bool( - argstr='-debug', desc='Display debugging messages.') + argstr="-quiet", desc="do not display information messages or progress status." + ) + display_debug = traits.Bool(argstr="-debug", desc="Display debugging messages.") out_file = File( name_template="%s_peak_dirs.mif", keep_extension=False, - argstr='%s', + argstr="%s", hash_files=False, position=-1, - desc= - 'the output image. Each volume corresponds to the x, y & z component of each peak direction vector in turn', - name_source=["in_file"]) + desc="the output image. Each volume corresponds to the x, y & z component of each peak direction vector in turn", + name_source=["in_file"], + ) class FindShPeaksOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='Peak directions image') + out_file = File(exists=True, desc="Peak directions image") class FindShPeaks(CommandLine): @@ -539,7 +540,7 @@ class FindShPeaks(CommandLine): >>> shpeaks.run() # doctest: +SKIP """ - _cmd = 'find_SH_peaks' + _cmd = "find_SH_peaks" input_spec = FindShPeaksInputSpec output_spec = FindShPeaksOutputSpec @@ -547,49 +548,46 @@ class FindShPeaks(CommandLine): class Directions2AmplitudeInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc= - 'the input directions image. Each volume corresponds to the x, y & z component of each direction vector in turn.' + desc="the input directions image. Each volume corresponds to the x, y & z component of each direction vector in turn.", ) peaks_image = File( exists=True, - argstr='-peaks %s', - desc= - 'the program will try to find the peaks that most closely match those in the image provided' + argstr="-peaks %s", + desc="the program will try to find the peaks that most closely match those in the image provided", ) num_peaks = traits.Int( - argstr='-num %s', desc='the number of peaks to extract (default is 3)') + argstr="-num %s", desc="the number of peaks to extract (default is 3)" + ) peak_directions = traits.List( traits.Float, - argstr='-direction %s', - sep=' ', + argstr="-direction %s", + sep=" ", minlen=2, maxlen=2, - desc= - 'phi theta. the direction of a peak to estimate. The algorithm will attempt to find the same number of peaks as have been specified using this option ' - ' phi: the azimuthal angle of the direction (in degrees). theta: the elevation angle of the direction (in degrees, from the vertical z-axis)' + desc="phi theta. the direction of a peak to estimate. The algorithm will attempt to find the same number of peaks as have been specified using this option " + " phi: the azimuthal angle of the direction (in degrees). theta: the elevation angle of the direction (in degrees, from the vertical z-axis)", ) - display_info = traits.Bool( - argstr='-info', desc='Display information messages.') + display_info = traits.Bool(argstr="-info", desc="Display information messages.") quiet_display = traits.Bool( - argstr='-quiet', - desc='do not display information messages or progress status.') - display_debug = traits.Bool( - argstr='-debug', desc='Display debugging messages.') + argstr="-quiet", desc="do not display information messages or progress status." + ) + display_debug = traits.Bool(argstr="-debug", desc="Display debugging messages.") out_file = File( name_template="%s_amplitudes.mif", keep_extension=False, - argstr='%s', + argstr="%s", hash_files=False, position=-1, - desc='the output amplitudes image', - name_source=["in_file"]) + desc="the output amplitudes image", + name_source=["in_file"], + ) class Directions2AmplitudeOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='amplitudes image') + out_file = File(exists=True, desc="amplitudes image") class Directions2Amplitude(CommandLine): @@ -605,6 +603,6 @@ class Directions2Amplitude(CommandLine): >>> amplitudes.run() # doctest: +SKIP """ - _cmd = 'dir2amp' + _cmd = "dir2amp" input_spec = Directions2AmplitudeInputSpec output_spec = Directions2AmplitudeOutputSpec diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py index ba11991004..cee549fae9 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py @@ -4,66 +4,31 @@ def test_ConstrainedSphericalDeconvolution_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debug=dict(argstr='-debug', ), - directions_file=dict( - argstr='-directions %s', - extensions=None, - position=-2, - ), - encoding_file=dict( - argstr='-grad %s', - extensions=None, - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - filter_file=dict( - argstr='-filter %s', - extensions=None, - position=-2, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - iterations=dict(argstr='-niter %s', ), - lambda_value=dict(argstr='-lambda %s', ), - mask_image=dict( - argstr='-mask %s', - extensions=None, - position=2, - ), - maximum_harmonic_order=dict(argstr='-lmax %s', ), - normalise=dict( - argstr='-normalise', - position=3, - ), - out_filename=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - response_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - threshold_value=dict(argstr='-threshold %s', ), + args=dict(argstr="%s",), + debug=dict(argstr="-debug",), + directions_file=dict(argstr="-directions %s", extensions=None, position=-2,), + encoding_file=dict(argstr="-grad %s", extensions=None, position=1,), + environ=dict(nohash=True, usedefault=True,), + filter_file=dict(argstr="-filter %s", extensions=None, position=-2,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + iterations=dict(argstr="-niter %s",), + lambda_value=dict(argstr="-lambda %s",), + mask_image=dict(argstr="-mask %s", extensions=None, position=2,), + maximum_harmonic_order=dict(argstr="-lmax %s",), + normalise=dict(argstr="-normalise", position=3,), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + response_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + threshold_value=dict(argstr="-threshold %s",), ) inputs = ConstrainedSphericalDeconvolution.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ConstrainedSphericalDeconvolution_outputs(): - output_map = dict(spherical_harmonics_image=dict(extensions=None, ), ) + output_map = dict(spherical_harmonics_image=dict(extensions=None,),) outputs = ConstrainedSphericalDeconvolution.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py index 12246fa6c8..7bd1399f30 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py @@ -4,42 +4,25 @@ def test_DWI2SphericalHarmonicsImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), encoding_file=dict( - argstr='-grad %s', - extensions=None, - mandatory=True, - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - maximum_harmonic_order=dict(argstr='-lmax %s', ), - normalise=dict( - argstr='-normalise', - position=3, - ), - out_filename=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, + argstr="-grad %s", extensions=None, mandatory=True, position=1, ), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + maximum_harmonic_order=dict(argstr="-lmax %s",), + normalise=dict(argstr="-normalise", position=3,), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), ) inputs = DWI2SphericalHarmonicsImage.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DWI2SphericalHarmonicsImage_outputs(): - output_map = dict(spherical_harmonics_image=dict(extensions=None, ), ) + output_map = dict(spherical_harmonics_image=dict(extensions=None,),) outputs = DWI2SphericalHarmonicsImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py index 69f3be425f..476f68e9ef 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py @@ -4,55 +4,32 @@ def test_DWI2Tensor_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debug=dict( - argstr='-debug', - position=1, - ), - encoding_file=dict( - argstr='-grad %s', - extensions=None, - position=2, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ignore_slice_by_volume=dict( - argstr='-ignoreslices %s', - position=2, - sep=' ', - ), - ignore_volumes=dict( - argstr='-ignorevolumes %s', - position=2, - sep=' ', - ), - in_file=dict( - argstr='%s', - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + debug=dict(argstr="-debug", position=1,), + encoding_file=dict(argstr="-grad %s", extensions=None, position=2,), + environ=dict(nohash=True, usedefault=True,), + ignore_slice_by_volume=dict(argstr="-ignoreslices %s", position=2, sep=" ",), + ignore_volumes=dict(argstr="-ignorevolumes %s", position=2, sep=" ",), + in_file=dict(argstr="%s", mandatory=True, position=-2,), out_filename=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source='in_file', - name_template='%s_tensor.mif', - output_name='tensor', + name_source="in_file", + name_template="%s_tensor.mif", + output_name="tensor", position=-1, ), - quiet=dict( - argstr='-quiet', - position=1, - ), + quiet=dict(argstr="-quiet", position=1,), ) inputs = DWI2Tensor.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DWI2Tensor_outputs(): - output_map = dict(tensor=dict(extensions=None, ), ) + output_map = dict(tensor=dict(extensions=None,),) outputs = DWI2Tensor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py index 35b5ec1063..66122fcfdb 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py @@ -4,126 +4,84 @@ def test_DiffusionTensorStreamlineTrack_inputs(): input_map = dict( - args=dict(argstr='%s', ), - cutoff_value=dict( - argstr='-cutoff %s', - units='NA', - ), - desired_number_of_tracks=dict(argstr='-number %d', ), - do_not_precompute=dict(argstr='-noprecomputed', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + cutoff_value=dict(argstr="-cutoff %s", units="NA",), + desired_number_of_tracks=dict(argstr="-number %d",), + do_not_precompute=dict(argstr="-noprecomputed",), + environ=dict(nohash=True, usedefault=True,), exclude_file=dict( - argstr='-exclude %s', - extensions=None, - xor=['exclude_file', 'exclude_spec'], + argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"], ), exclude_spec=dict( - argstr='-exclude %s', + argstr="-exclude %s", position=2, - sep=',', - units='mm', - xor=['exclude_file', 'exclude_spec'], + sep=",", + units="mm", + xor=["exclude_file", "exclude_spec"], ), gradient_encoding_file=dict( - argstr='-grad %s', - extensions=None, - mandatory=True, - position=-2, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, + argstr="-grad %s", extensions=None, mandatory=True, position=-2, ), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), include_file=dict( - argstr='-include %s', - extensions=None, - xor=['include_file', 'include_spec'], + argstr="-include %s", extensions=None, xor=["include_file", "include_spec"], ), include_spec=dict( - argstr='-include %s', + argstr="-include %s", position=2, - sep=',', - units='mm', - xor=['include_file', 'include_spec'], - ), - initial_cutoff_value=dict( - argstr='-initcutoff %s', - units='NA', - ), - initial_direction=dict( - argstr='-initdirection %s', - units='voxels', - ), - inputmodel=dict( - argstr='%s', - position=-3, - usedefault=True, + sep=",", + units="mm", + xor=["include_file", "include_spec"], ), + initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA",), + initial_direction=dict(argstr="-initdirection %s", units="voxels",), + inputmodel=dict(argstr="%s", position=-3, usedefault=True,), mask_file=dict( - argstr='-mask %s', - extensions=None, - xor=['mask_file', 'mask_spec'], + argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"], ), mask_spec=dict( - argstr='-mask %s', + argstr="-mask %s", position=2, - sep=',', - units='mm', - xor=['mask_file', 'mask_spec'], - ), - maximum_number_of_tracks=dict(argstr='-maxnum %d', ), - maximum_tract_length=dict( - argstr='-length %s', - units='mm', - ), - minimum_radius_of_curvature=dict( - argstr='-curvature %s', - units='mm', - ), - minimum_tract_length=dict( - argstr='-minlength %s', - units='mm', - ), - no_mask_interpolation=dict(argstr='-nomaskinterp', ), + sep=",", + units="mm", + xor=["mask_file", "mask_spec"], + ), + maximum_number_of_tracks=dict(argstr="-maxnum %d",), + maximum_tract_length=dict(argstr="-length %s", units="mm",), + minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm",), + minimum_tract_length=dict(argstr="-minlength %s", units="mm",), + no_mask_interpolation=dict(argstr="-nomaskinterp",), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source=['in_file'], - name_template='%s_tracked.tck', - output_name='tracked', + name_source=["in_file"], + name_template="%s_tracked.tck", + output_name="tracked", position=-1, ), seed_file=dict( - argstr='-seed %s', - extensions=None, - xor=['seed_file', 'seed_spec'], + argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"], ), seed_spec=dict( - argstr='-seed %s', + argstr="-seed %s", position=2, - sep=',', - units='mm', - xor=['seed_file', 'seed_spec'], - ), - step_size=dict( - argstr='-step %s', - units='mm', + sep=",", + units="mm", + xor=["seed_file", "seed_spec"], ), - stop=dict(argstr='-stop', ), - unidirectional=dict(argstr='-unidirectional', ), + step_size=dict(argstr="-step %s", units="mm",), + stop=dict(argstr="-stop",), + unidirectional=dict(argstr="-unidirectional",), ) inputs = DiffusionTensorStreamlineTrack.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DiffusionTensorStreamlineTrack_outputs(): - output_map = dict(tracked=dict(extensions=None, ), ) + output_map = dict(tracked=dict(extensions=None,),) outputs = DiffusionTensorStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py index a68cf10cd3..66c75db7dc 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py @@ -4,46 +4,34 @@ def test_Directions2Amplitude_inputs(): input_map = dict( - args=dict(argstr='%s', ), - display_debug=dict(argstr='-debug', ), - display_info=dict(argstr='-info', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - num_peaks=dict(argstr='-num %s', ), + args=dict(argstr="%s",), + display_debug=dict(argstr="-debug",), + display_info=dict(argstr="-info",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + num_peaks=dict(argstr="-num %s",), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, keep_extension=False, - name_source=['in_file'], - name_template='%s_amplitudes.mif', + name_source=["in_file"], + name_template="%s_amplitudes.mif", position=-1, ), - peak_directions=dict( - argstr='-direction %s', - sep=' ', - ), - peaks_image=dict( - argstr='-peaks %s', - extensions=None, - ), - quiet_display=dict(argstr='-quiet', ), + peak_directions=dict(argstr="-direction %s", sep=" ",), + peaks_image=dict(argstr="-peaks %s", extensions=None,), + quiet_display=dict(argstr="-quiet",), ) inputs = Directions2Amplitude.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Directions2Amplitude_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Directions2Amplitude.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Erode.py b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py index 10f6d464de..cb038461ad 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Erode.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py @@ -4,44 +4,24 @@ def test_Erode_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debug=dict( - argstr='-debug', - position=1, - ), - dilate=dict( - argstr='-dilate', - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - number_of_passes=dict(argstr='-npass %s', ), - out_filename=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict( - argstr='-quiet', - position=1, - ), + args=dict(argstr="%s",), + debug=dict(argstr="-debug", position=1,), + dilate=dict(argstr="-dilate", position=1,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + number_of_passes=dict(argstr="-npass %s",), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + quiet=dict(argstr="-quiet", position=1,), ) inputs = Erode.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Erode_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Erode.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py index 5eb68f522a..4040fe9479 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py @@ -4,47 +4,28 @@ def test_EstimateResponseForSH_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debug=dict(argstr='-debug', ), + args=dict(argstr="%s",), + debug=dict(argstr="-debug",), encoding_file=dict( - argstr='-grad %s', - extensions=None, - mandatory=True, - position=1, + argstr="-grad %s", extensions=None, mandatory=True, position=1, ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - mask_image=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - maximum_harmonic_order=dict(argstr='-lmax %s', ), - normalise=dict(argstr='-normalise', ), - out_filename=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict(argstr='-quiet', ), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + mask_image=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + maximum_harmonic_order=dict(argstr="-lmax %s",), + normalise=dict(argstr="-normalise",), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + quiet=dict(argstr="-quiet",), ) inputs = EstimateResponseForSH.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EstimateResponseForSH_outputs(): - output_map = dict(response=dict(extensions=None, ), ) + output_map = dict(response=dict(extensions=None,),) outputs = EstimateResponseForSH.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py b/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py index d5b8bb8f76..4772abcbc4 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py @@ -4,29 +4,22 @@ def test_FSL2MRTrix_inputs(): input_map = dict( - bval_file=dict( - extensions=None, - mandatory=True, - ), - bvec_file=dict( - extensions=None, - mandatory=True, - ), - invert_x=dict(usedefault=True, ), - invert_y=dict(usedefault=True, ), - invert_z=dict(usedefault=True, ), - out_encoding_file=dict( - extensions=None, - genfile=True, - ), + bval_file=dict(extensions=None, mandatory=True,), + bvec_file=dict(extensions=None, mandatory=True,), + invert_x=dict(usedefault=True,), + invert_y=dict(usedefault=True,), + invert_z=dict(usedefault=True,), + out_encoding_file=dict(extensions=None, genfile=True,), ) inputs = FSL2MRTrix.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FSL2MRTrix_outputs(): - output_map = dict(encoding_file=dict(extensions=None, ), ) + output_map = dict(encoding_file=dict(extensions=None,),) outputs = FSL2MRTrix.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py index bda0a2059b..0ce949eb41 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py @@ -4,71 +4,52 @@ def test_FilterTracks_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debug=dict( - argstr='-debug', - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + debug=dict(argstr="-debug", position=1,), + environ=dict(nohash=True, usedefault=True,), exclude_file=dict( - argstr='-exclude %s', - extensions=None, - xor=['exclude_file', 'exclude_spec'], + argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"], ), exclude_spec=dict( - argstr='-exclude %s', + argstr="-exclude %s", position=2, - sep=',', - units='mm', - xor=['exclude_file', 'exclude_spec'], - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, + sep=",", + units="mm", + xor=["exclude_file", "exclude_spec"], ), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), include_file=dict( - argstr='-include %s', - extensions=None, - xor=['include_file', 'include_spec'], + argstr="-include %s", extensions=None, xor=["include_file", "include_spec"], ), include_spec=dict( - argstr='-include %s', + argstr="-include %s", position=2, - sep=',', - units='mm', - xor=['include_file', 'include_spec'], - ), - invert=dict(argstr='-invert', ), - minimum_tract_length=dict( - argstr='-minlength %s', - units='mm', + sep=",", + units="mm", + xor=["include_file", "include_spec"], ), - no_mask_interpolation=dict(argstr='-nomaskinterp', ), + invert=dict(argstr="-invert",), + minimum_tract_length=dict(argstr="-minlength %s", units="mm",), + no_mask_interpolation=dict(argstr="-nomaskinterp",), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, - name_source=['in_file'], - name_template='%s_filt', + name_source=["in_file"], + name_template="%s_filt", position=-1, ), - quiet=dict( - argstr='-quiet', - position=1, - ), + quiet=dict(argstr="-quiet", position=1,), ) inputs = FilterTracks.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FilterTracks_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = FilterTracks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py index 010b912752..c8e6a9cb3d 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py @@ -4,53 +4,38 @@ def test_FindShPeaks_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), directions_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - display_debug=dict(argstr='-debug', ), - display_info=dict(argstr='-info', ), - environ=dict( - nohash=True, - usedefault=True, + argstr="%s", extensions=None, mandatory=True, position=-2, ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - num_peaks=dict(argstr='-num %s', ), + display_debug=dict(argstr="-debug",), + display_info=dict(argstr="-info",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + num_peaks=dict(argstr="-num %s",), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, keep_extension=False, - name_source=['in_file'], - name_template='%s_peak_dirs.mif', + name_source=["in_file"], + name_template="%s_peak_dirs.mif", position=-1, ), - peak_directions=dict( - argstr='-direction %s', - sep=' ', - ), - peak_threshold=dict(argstr='-threshold %s', ), - peaks_image=dict( - argstr='-peaks %s', - extensions=None, - ), - quiet_display=dict(argstr='-quiet', ), + peak_directions=dict(argstr="-direction %s", sep=" ",), + peak_threshold=dict(argstr="-threshold %s",), + peaks_image=dict(argstr="-peaks %s", extensions=None,), + quiet_display=dict(argstr="-quiet",), ) inputs = FindShPeaks.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FindShPeaks_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = FindShPeaks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py index ae8f2a3cfa..f980f9386b 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py @@ -4,37 +4,32 @@ def test_GenerateDirections_inputs(): input_map = dict( - args=dict(argstr='%s', ), - display_debug=dict(argstr='-debug', ), - display_info=dict(argstr='-info', ), - environ=dict( - nohash=True, - usedefault=True, - ), - niter=dict(argstr='-niter %s', ), - num_dirs=dict( - argstr='%s', - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + display_debug=dict(argstr="-debug",), + display_info=dict(argstr="-info",), + environ=dict(nohash=True, usedefault=True,), + niter=dict(argstr="-niter %s",), + num_dirs=dict(argstr="%s", mandatory=True, position=-2,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, hash_files=False, - name_source=['num_dirs'], - name_template='directions_%d.txt', + name_source=["num_dirs"], + name_template="directions_%d.txt", position=-1, ), - power=dict(argstr='-power %s', ), - quiet_display=dict(argstr='-quiet', ), + power=dict(argstr="-power %s",), + quiet_display=dict(argstr="-quiet",), ) inputs = GenerateDirections.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GenerateDirections_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = GenerateDirections.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py index f558927eab..dc58ac51d2 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py @@ -4,35 +4,16 @@ def test_GenerateWhiteMatterMask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - binary_mask=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + binary_mask=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), encoding_file=dict( - argstr='-grad %s', - extensions=None, - mandatory=True, - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, + argstr="-grad %s", extensions=None, mandatory=True, position=1, ), - noise_level_margin=dict(argstr='-margin %s', ), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + noise_level_margin=dict(argstr="-margin %s",), out_WMProb_filename=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, + argstr="%s", extensions=None, genfile=True, position=-1, ), ) inputs = GenerateWhiteMatterMask.input_spec() @@ -40,8 +21,10 @@ def test_GenerateWhiteMatterMask_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GenerateWhiteMatterMask_outputs(): - output_map = dict(WMprobabilitymap=dict(extensions=None, ), ) + output_map = dict(WMprobabilitymap=dict(extensions=None,),) outputs = GenerateWhiteMatterMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py index d2b9a02030..792beb6aa0 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py @@ -4,75 +4,30 @@ def test_MRConvert_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - extension=dict( - position=2, - usedefault=True, - ), - extract_at_axis=dict( - argstr='-coord %s', - position=1, - ), - extract_at_coordinate=dict( - argstr='%s', - position=2, - sep=',', - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - layout=dict( - argstr='-output %s', - position=2, - ), - offset_bias=dict( - argstr='-scale %d', - position=3, - units='mm', - ), - out_filename=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - output_datatype=dict( - argstr='-output %s', - position=2, - ), - prs=dict( - argstr='-prs', - position=3, - ), - replace_NaN_with_zero=dict( - argstr='-zero', - position=3, - ), - resample=dict( - argstr='-scale %d', - position=3, - units='mm', - ), - voxel_dims=dict( - argstr='-vox %s', - position=3, - sep=',', - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + extension=dict(position=2, usedefault=True,), + extract_at_axis=dict(argstr="-coord %s", position=1,), + extract_at_coordinate=dict(argstr="%s", position=2, sep=",",), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + layout=dict(argstr="-output %s", position=2,), + offset_bias=dict(argstr="-scale %d", position=3, units="mm",), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + output_datatype=dict(argstr="-output %s", position=2,), + prs=dict(argstr="-prs", position=3,), + replace_NaN_with_zero=dict(argstr="-zero", position=3,), + resample=dict(argstr="-scale %d", position=3, units="mm",), + voxel_dims=dict(argstr="-vox %s", position=3, sep=",",), ) inputs = MRConvert.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRConvert_outputs(): - output_map = dict(converted=dict(extensions=None, ), ) + output_map = dict(converted=dict(extensions=None,),) outputs = MRConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py index fa9526c037..5525ef1130 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py @@ -4,38 +4,22 @@ def test_MRMultiply_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debug=dict( - argstr='-debug', - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr='%s', - mandatory=True, - position=-2, - ), - out_filename=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict( - argstr='-quiet', - position=1, - ), + args=dict(argstr="%s",), + debug=dict(argstr="-debug", position=1,), + environ=dict(nohash=True, usedefault=True,), + in_files=dict(argstr="%s", mandatory=True, position=-2,), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + quiet=dict(argstr="-quiet", position=1,), ) inputs = MRMultiply.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRMultiply_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MRMultiply.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py index d41758e65e..60e0f452ac 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py @@ -4,65 +4,28 @@ def test_MRTransform_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debug=dict( - argstr='-debug', - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - flip_x=dict( - argstr='-flipx', - position=1, - ), - in_files=dict( - argstr='%s', - mandatory=True, - position=-2, - ), - invert=dict( - argstr='-inverse', - position=1, - ), - out_filename=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict( - argstr='-quiet', - position=1, - ), - reference_image=dict( - argstr='-reference %s', - extensions=None, - position=1, - ), - replace_transform=dict( - argstr='-replace', - position=1, - ), - template_image=dict( - argstr='-template %s', - extensions=None, - position=1, - ), - transformation_file=dict( - argstr='-transform %s', - extensions=None, - position=1, - ), + args=dict(argstr="%s",), + debug=dict(argstr="-debug", position=1,), + environ=dict(nohash=True, usedefault=True,), + flip_x=dict(argstr="-flipx", position=1,), + in_files=dict(argstr="%s", mandatory=True, position=-2,), + invert=dict(argstr="-inverse", position=1,), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + quiet=dict(argstr="-quiet", position=1,), + reference_image=dict(argstr="-reference %s", extensions=None, position=1,), + replace_transform=dict(argstr="-replace", position=1,), + template_image=dict(argstr="-template %s", extensions=None, position=1,), + transformation_file=dict(argstr="-transform %s", extensions=None, position=1,), ) inputs = MRTransform.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRTransform_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MRTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py index 130436fae6..05f80b4646 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py @@ -4,26 +4,21 @@ def test_MRTrix2TrackVis_inputs(): input_map = dict( - image_file=dict(extensions=None, ), - in_file=dict( - extensions=None, - mandatory=True, - ), - matrix_file=dict(extensions=None, ), - out_filename=dict( - extensions=None, - genfile=True, - usedefault=True, - ), - registration_image_file=dict(extensions=None, ), + image_file=dict(extensions=None,), + in_file=dict(extensions=None, mandatory=True,), + matrix_file=dict(extensions=None,), + out_filename=dict(extensions=None, genfile=True, usedefault=True,), + registration_image_file=dict(extensions=None,), ) inputs = MRTrix2TrackVis.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRTrix2TrackVis_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MRTrix2TrackVis.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py index 1ca965b012..5e3fd2882e 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py @@ -4,23 +4,17 @@ def test_MRTrixInfo_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), ) inputs = MRTrixInfo.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRTrixInfo_outputs(): output_map = dict() outputs = MRTrixInfo.output_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py index dd8b6066c3..711191bd16 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py @@ -4,30 +4,19 @@ def test_MRTrixViewer_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debug=dict( - argstr='-debug', - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr='%s', - mandatory=True, - position=-2, - ), - quiet=dict( - argstr='-quiet', - position=1, - ), + args=dict(argstr="%s",), + debug=dict(argstr="-debug", position=1,), + environ=dict(nohash=True, usedefault=True,), + in_files=dict(argstr="%s", mandatory=True, position=-2,), + quiet=dict(argstr="-quiet", position=1,), ) inputs = MRTrixViewer.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRTrixViewer_outputs(): output_map = dict() outputs = MRTrixViewer.output_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py index 68e255e9d3..0b3f38dcbc 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py @@ -4,39 +4,22 @@ def test_MedianFilter3D_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debug=dict( - argstr='-debug', - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - out_filename=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict( - argstr='-quiet', - position=1, - ), + args=dict(argstr="%s",), + debug=dict(argstr="-debug", position=1,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + quiet=dict(argstr="-quiet", position=1,), ) inputs = MedianFilter3D.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MedianFilter3D_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MedianFilter3D.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py index 0dd42aab78..e640da1306 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py @@ -4,121 +4,82 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): input_map = dict( - args=dict(argstr='%s', ), - cutoff_value=dict( - argstr='-cutoff %s', - units='NA', - ), - desired_number_of_tracks=dict(argstr='-number %d', ), - do_not_precompute=dict(argstr='-noprecomputed', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + cutoff_value=dict(argstr="-cutoff %s", units="NA",), + desired_number_of_tracks=dict(argstr="-number %d",), + do_not_precompute=dict(argstr="-noprecomputed",), + environ=dict(nohash=True, usedefault=True,), exclude_file=dict( - argstr='-exclude %s', - extensions=None, - xor=['exclude_file', 'exclude_spec'], + argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"], ), exclude_spec=dict( - argstr='-exclude %s', + argstr="-exclude %s", position=2, - sep=',', - units='mm', - xor=['exclude_file', 'exclude_spec'], - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, + sep=",", + units="mm", + xor=["exclude_file", "exclude_spec"], ), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), include_file=dict( - argstr='-include %s', - extensions=None, - xor=['include_file', 'include_spec'], + argstr="-include %s", extensions=None, xor=["include_file", "include_spec"], ), include_spec=dict( - argstr='-include %s', + argstr="-include %s", position=2, - sep=',', - units='mm', - xor=['include_file', 'include_spec'], - ), - initial_cutoff_value=dict( - argstr='-initcutoff %s', - units='NA', - ), - initial_direction=dict( - argstr='-initdirection %s', - units='voxels', - ), - inputmodel=dict( - argstr='%s', - position=-3, - usedefault=True, + sep=",", + units="mm", + xor=["include_file", "include_spec"], ), + initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA",), + initial_direction=dict(argstr="-initdirection %s", units="voxels",), + inputmodel=dict(argstr="%s", position=-3, usedefault=True,), mask_file=dict( - argstr='-mask %s', - extensions=None, - xor=['mask_file', 'mask_spec'], + argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"], ), mask_spec=dict( - argstr='-mask %s', + argstr="-mask %s", position=2, - sep=',', - units='mm', - xor=['mask_file', 'mask_spec'], - ), - maximum_number_of_tracks=dict(argstr='-maxnum %d', ), - maximum_number_of_trials=dict(argstr='-trials %s', ), - maximum_tract_length=dict( - argstr='-length %s', - units='mm', - ), - minimum_radius_of_curvature=dict( - argstr='-curvature %s', - units='mm', - ), - minimum_tract_length=dict( - argstr='-minlength %s', - units='mm', - ), - no_mask_interpolation=dict(argstr='-nomaskinterp', ), + sep=",", + units="mm", + xor=["mask_file", "mask_spec"], + ), + maximum_number_of_tracks=dict(argstr="-maxnum %d",), + maximum_number_of_trials=dict(argstr="-trials %s",), + maximum_tract_length=dict(argstr="-length %s", units="mm",), + minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm",), + minimum_tract_length=dict(argstr="-minlength %s", units="mm",), + no_mask_interpolation=dict(argstr="-nomaskinterp",), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source=['in_file'], - name_template='%s_tracked.tck', - output_name='tracked', + name_source=["in_file"], + name_template="%s_tracked.tck", + output_name="tracked", position=-1, ), seed_file=dict( - argstr='-seed %s', - extensions=None, - xor=['seed_file', 'seed_spec'], + argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"], ), seed_spec=dict( - argstr='-seed %s', + argstr="-seed %s", position=2, - sep=',', - units='mm', - xor=['seed_file', 'seed_spec'], - ), - step_size=dict( - argstr='-step %s', - units='mm', + sep=",", + units="mm", + xor=["seed_file", "seed_spec"], ), - stop=dict(argstr='-stop', ), - unidirectional=dict(argstr='-unidirectional', ), + step_size=dict(argstr="-step %s", units="mm",), + stop=dict(argstr="-stop",), + unidirectional=dict(argstr="-unidirectional",), ) inputs = ProbabilisticSphericallyDeconvolutedStreamlineTrack.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_outputs(): - output_map = dict(tracked=dict(extensions=None, ), ) + output_map = dict(tracked=dict(extensions=None,),) outputs = ProbabilisticSphericallyDeconvolutedStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py index 3a1c806453..bc32741331 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py @@ -4,120 +4,81 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): input_map = dict( - args=dict(argstr='%s', ), - cutoff_value=dict( - argstr='-cutoff %s', - units='NA', - ), - desired_number_of_tracks=dict(argstr='-number %d', ), - do_not_precompute=dict(argstr='-noprecomputed', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + cutoff_value=dict(argstr="-cutoff %s", units="NA",), + desired_number_of_tracks=dict(argstr="-number %d",), + do_not_precompute=dict(argstr="-noprecomputed",), + environ=dict(nohash=True, usedefault=True,), exclude_file=dict( - argstr='-exclude %s', - extensions=None, - xor=['exclude_file', 'exclude_spec'], + argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"], ), exclude_spec=dict( - argstr='-exclude %s', + argstr="-exclude %s", position=2, - sep=',', - units='mm', - xor=['exclude_file', 'exclude_spec'], - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, + sep=",", + units="mm", + xor=["exclude_file", "exclude_spec"], ), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), include_file=dict( - argstr='-include %s', - extensions=None, - xor=['include_file', 'include_spec'], + argstr="-include %s", extensions=None, xor=["include_file", "include_spec"], ), include_spec=dict( - argstr='-include %s', + argstr="-include %s", position=2, - sep=',', - units='mm', - xor=['include_file', 'include_spec'], - ), - initial_cutoff_value=dict( - argstr='-initcutoff %s', - units='NA', - ), - initial_direction=dict( - argstr='-initdirection %s', - units='voxels', - ), - inputmodel=dict( - argstr='%s', - position=-3, - usedefault=True, + sep=",", + units="mm", + xor=["include_file", "include_spec"], ), + initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA",), + initial_direction=dict(argstr="-initdirection %s", units="voxels",), + inputmodel=dict(argstr="%s", position=-3, usedefault=True,), mask_file=dict( - argstr='-mask %s', - extensions=None, - xor=['mask_file', 'mask_spec'], + argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"], ), mask_spec=dict( - argstr='-mask %s', + argstr="-mask %s", position=2, - sep=',', - units='mm', - xor=['mask_file', 'mask_spec'], - ), - maximum_number_of_tracks=dict(argstr='-maxnum %d', ), - maximum_tract_length=dict( - argstr='-length %s', - units='mm', - ), - minimum_radius_of_curvature=dict( - argstr='-curvature %s', - units='mm', - ), - minimum_tract_length=dict( - argstr='-minlength %s', - units='mm', - ), - no_mask_interpolation=dict(argstr='-nomaskinterp', ), + sep=",", + units="mm", + xor=["mask_file", "mask_spec"], + ), + maximum_number_of_tracks=dict(argstr="-maxnum %d",), + maximum_tract_length=dict(argstr="-length %s", units="mm",), + minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm",), + minimum_tract_length=dict(argstr="-minlength %s", units="mm",), + no_mask_interpolation=dict(argstr="-nomaskinterp",), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source=['in_file'], - name_template='%s_tracked.tck', - output_name='tracked', + name_source=["in_file"], + name_template="%s_tracked.tck", + output_name="tracked", position=-1, ), seed_file=dict( - argstr='-seed %s', - extensions=None, - xor=['seed_file', 'seed_spec'], + argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"], ), seed_spec=dict( - argstr='-seed %s', + argstr="-seed %s", position=2, - sep=',', - units='mm', - xor=['seed_file', 'seed_spec'], - ), - step_size=dict( - argstr='-step %s', - units='mm', + sep=",", + units="mm", + xor=["seed_file", "seed_spec"], ), - stop=dict(argstr='-stop', ), - unidirectional=dict(argstr='-unidirectional', ), + step_size=dict(argstr="-step %s", units="mm",), + stop=dict(argstr="-stop",), + unidirectional=dict(argstr="-unidirectional",), ) inputs = SphericallyDeconvolutedStreamlineTrack.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SphericallyDeconvolutedStreamlineTrack_outputs(): - output_map = dict(tracked=dict(extensions=None, ), ) + output_map = dict(tracked=dict(extensions=None,),) outputs = SphericallyDeconvolutedStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py index 5515305a1f..bf58f3fcd0 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py @@ -4,120 +4,81 @@ def test_StreamlineTrack_inputs(): input_map = dict( - args=dict(argstr='%s', ), - cutoff_value=dict( - argstr='-cutoff %s', - units='NA', - ), - desired_number_of_tracks=dict(argstr='-number %d', ), - do_not_precompute=dict(argstr='-noprecomputed', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + cutoff_value=dict(argstr="-cutoff %s", units="NA",), + desired_number_of_tracks=dict(argstr="-number %d",), + do_not_precompute=dict(argstr="-noprecomputed",), + environ=dict(nohash=True, usedefault=True,), exclude_file=dict( - argstr='-exclude %s', - extensions=None, - xor=['exclude_file', 'exclude_spec'], + argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"], ), exclude_spec=dict( - argstr='-exclude %s', + argstr="-exclude %s", position=2, - sep=',', - units='mm', - xor=['exclude_file', 'exclude_spec'], - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, + sep=",", + units="mm", + xor=["exclude_file", "exclude_spec"], ), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), include_file=dict( - argstr='-include %s', - extensions=None, - xor=['include_file', 'include_spec'], + argstr="-include %s", extensions=None, xor=["include_file", "include_spec"], ), include_spec=dict( - argstr='-include %s', + argstr="-include %s", position=2, - sep=',', - units='mm', - xor=['include_file', 'include_spec'], - ), - initial_cutoff_value=dict( - argstr='-initcutoff %s', - units='NA', - ), - initial_direction=dict( - argstr='-initdirection %s', - units='voxels', - ), - inputmodel=dict( - argstr='%s', - position=-3, - usedefault=True, + sep=",", + units="mm", + xor=["include_file", "include_spec"], ), + initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA",), + initial_direction=dict(argstr="-initdirection %s", units="voxels",), + inputmodel=dict(argstr="%s", position=-3, usedefault=True,), mask_file=dict( - argstr='-mask %s', - extensions=None, - xor=['mask_file', 'mask_spec'], + argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"], ), mask_spec=dict( - argstr='-mask %s', + argstr="-mask %s", position=2, - sep=',', - units='mm', - xor=['mask_file', 'mask_spec'], - ), - maximum_number_of_tracks=dict(argstr='-maxnum %d', ), - maximum_tract_length=dict( - argstr='-length %s', - units='mm', - ), - minimum_radius_of_curvature=dict( - argstr='-curvature %s', - units='mm', - ), - minimum_tract_length=dict( - argstr='-minlength %s', - units='mm', - ), - no_mask_interpolation=dict(argstr='-nomaskinterp', ), + sep=",", + units="mm", + xor=["mask_file", "mask_spec"], + ), + maximum_number_of_tracks=dict(argstr="-maxnum %d",), + maximum_tract_length=dict(argstr="-length %s", units="mm",), + minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm",), + minimum_tract_length=dict(argstr="-minlength %s", units="mm",), + no_mask_interpolation=dict(argstr="-nomaskinterp",), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source=['in_file'], - name_template='%s_tracked.tck', - output_name='tracked', + name_source=["in_file"], + name_template="%s_tracked.tck", + output_name="tracked", position=-1, ), seed_file=dict( - argstr='-seed %s', - extensions=None, - xor=['seed_file', 'seed_spec'], + argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"], ), seed_spec=dict( - argstr='-seed %s', + argstr="-seed %s", position=2, - sep=',', - units='mm', - xor=['seed_file', 'seed_spec'], - ), - step_size=dict( - argstr='-step %s', - units='mm', + sep=",", + units="mm", + xor=["seed_file", "seed_spec"], ), - stop=dict(argstr='-stop', ), - unidirectional=dict(argstr='-unidirectional', ), + step_size=dict(argstr="-step %s", units="mm",), + stop=dict(argstr="-stop",), + unidirectional=dict(argstr="-unidirectional",), ) inputs = StreamlineTrack.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_StreamlineTrack_outputs(): - output_map = dict(tracked=dict(extensions=None, ), ) + output_map = dict(tracked=dict(extensions=None,),) outputs = StreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py index 088760ae85..da5225cc42 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py @@ -4,39 +4,22 @@ def test_Tensor2ApparentDiffusion_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debug=dict( - argstr='-debug', - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - out_filename=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict( - argstr='-quiet', - position=1, - ), + args=dict(argstr="%s",), + debug=dict(argstr="-debug", position=1,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + quiet=dict(argstr="-quiet", position=1,), ) inputs = Tensor2ApparentDiffusion.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Tensor2ApparentDiffusion_outputs(): - output_map = dict(ADC=dict(extensions=None, ), ) + output_map = dict(ADC=dict(extensions=None,),) outputs = Tensor2ApparentDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py index 376755ef8f..8f9937b550 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py @@ -4,39 +4,22 @@ def test_Tensor2FractionalAnisotropy_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debug=dict( - argstr='-debug', - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - out_filename=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict( - argstr='-quiet', - position=1, - ), + args=dict(argstr="%s",), + debug=dict(argstr="-debug", position=1,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + quiet=dict(argstr="-quiet", position=1,), ) inputs = Tensor2FractionalAnisotropy.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Tensor2FractionalAnisotropy_outputs(): - output_map = dict(FA=dict(extensions=None, ), ) + output_map = dict(FA=dict(extensions=None,),) outputs = Tensor2FractionalAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py index 93b1a2cf7f..dcc58860a4 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py @@ -4,39 +4,22 @@ def test_Tensor2Vector_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debug=dict( - argstr='-debug', - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - out_filename=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict( - argstr='-quiet', - position=1, - ), + args=dict(argstr="%s",), + debug=dict(argstr="-debug", position=1,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + quiet=dict(argstr="-quiet", position=1,), ) inputs = Tensor2Vector.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Tensor2Vector_outputs(): - output_map = dict(vector=dict(extensions=None, ), ) + output_map = dict(vector=dict(extensions=None,),) outputs = Tensor2Vector.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py index 1edaf3cc24..b1e9a27016 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py @@ -4,49 +4,26 @@ def test_Threshold_inputs(): input_map = dict( - absolute_threshold_value=dict(argstr='-abs %s', ), - args=dict(argstr='%s', ), - debug=dict( - argstr='-debug', - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - invert=dict( - argstr='-invert', - position=1, - ), - out_filename=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - percentage_threshold_value=dict(argstr='-percent %s', ), - quiet=dict( - argstr='-quiet', - position=1, - ), - replace_zeros_with_NaN=dict( - argstr='-nan', - position=1, - ), + absolute_threshold_value=dict(argstr="-abs %s",), + args=dict(argstr="%s",), + debug=dict(argstr="-debug", position=1,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + invert=dict(argstr="-invert", position=1,), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + percentage_threshold_value=dict(argstr="-percent %s",), + quiet=dict(argstr="-quiet", position=1,), + replace_zeros_with_NaN=dict(argstr="-nan", position=1,), ) inputs = Threshold.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Threshold_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Threshold.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py index 55b320809e..b8bc425de7 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py @@ -4,58 +4,26 @@ def test_Tracks2Prob_inputs(): input_map = dict( - args=dict(argstr='%s', ), - colour=dict( - argstr='-colour', - position=3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fraction=dict( - argstr='-fraction', - position=3, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - out_filename=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - output_datatype=dict( - argstr='-datatype %s', - position=2, - ), - resample=dict( - argstr='-resample %d', - position=3, - units='mm', - ), - template_file=dict( - argstr='-template %s', - extensions=None, - position=1, - ), - voxel_dims=dict( - argstr='-vox %s', - position=2, - sep=',', - ), + args=dict(argstr="%s",), + colour=dict(argstr="-colour", position=3,), + environ=dict(nohash=True, usedefault=True,), + fraction=dict(argstr="-fraction", position=3,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + output_datatype=dict(argstr="-datatype %s", position=2,), + resample=dict(argstr="-resample %d", position=3, units="mm",), + template_file=dict(argstr="-template %s", extensions=None, position=1,), + voxel_dims=dict(argstr="-vox %s", position=2, sep=",",), ) inputs = Tracks2Prob.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Tracks2Prob_outputs(): - output_map = dict(tract_image=dict(extensions=None, ), ) + output_map = dict(tract_image=dict(extensions=None,),) outputs = Tracks2Prob.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tracking.py b/nipype/interfaces/mrtrix/tracking.py index f115841482..c922c4fba4 100644 --- a/nipype/interfaces/mrtrix/tracking.py +++ b/nipype/interfaces/mrtrix/tracking.py @@ -6,84 +6,90 @@ import os.path as op from ...utils.filemanip import split_filename -from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, - File, isdefined) +from ..base import ( + CommandLineInputSpec, + CommandLine, + traits, + TraitedSpec, + File, + isdefined, +) class FilterTracksInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='input tracks to be filtered') - include_xor = ['include_file', 'include_spec'] + desc="input tracks to be filtered", + ) + include_xor = ["include_file", "include_spec"] include_file = File( - exists=True, - argstr='-include %s', - desc='inclusion file', - xor=include_xor) + exists=True, argstr="-include %s", desc="inclusion file", xor=include_xor + ) include_spec = traits.List( traits.Float, - desc='inclusion specification in mm and radius (x y z r)', + desc="inclusion specification in mm and radius (x y z r)", position=2, - argstr='-include %s', + argstr="-include %s", minlen=4, maxlen=4, - sep=',', - units='mm', - xor=include_xor) + sep=",", + units="mm", + xor=include_xor, + ) - exclude_xor = ['exclude_file', 'exclude_spec'] + exclude_xor = ["exclude_file", "exclude_spec"] exclude_file = File( - exists=True, - argstr='-exclude %s', - desc='exclusion file', - xor=exclude_xor) + exists=True, argstr="-exclude %s", desc="exclusion file", xor=exclude_xor + ) exclude_spec = traits.List( traits.Float, - desc='exclusion specification in mm and radius (x y z r)', + desc="exclusion specification in mm and radius (x y z r)", position=2, - argstr='-exclude %s', + argstr="-exclude %s", minlen=4, maxlen=4, - sep=',', - units='mm', - xor=exclude_xor) + sep=",", + units="mm", + xor=exclude_xor, + ) minimum_tract_length = traits.Float( - argstr='-minlength %s', - units='mm', - desc= - "Sets the minimum length of any track in millimeters (default is 10 mm)." + argstr="-minlength %s", + units="mm", + desc="Sets the minimum length of any track in millimeters (default is 10 mm).", ) out_file = File( - argstr='%s', + argstr="%s", position=-1, - desc='Output filtered track filename', - name_source=['in_file'], + desc="Output filtered track filename", + name_source=["in_file"], hash_files=False, - name_template='%s_filt') + name_template="%s_filt", + ) no_mask_interpolation = traits.Bool( - argstr='-nomaskinterp', - desc="Turns off trilinear interpolation of mask images.") + argstr="-nomaskinterp", desc="Turns off trilinear interpolation of mask images." + ) invert = traits.Bool( - argstr='-invert', + argstr="-invert", desc="invert the matching process, so that tracks that would" - "otherwise have been included are now excluded and vice-versa.") + "otherwise have been included are now excluded and vice-versa.", + ) quiet = traits.Bool( - argstr='-quiet', + argstr="-quiet", position=1, - desc="Do not display information messages or progress status.") - debug = traits.Bool( - argstr='-debug', position=1, desc="Display debugging messages.") + desc="Do not display information messages or progress status.", + ) + debug = traits.Bool(argstr="-debug", position=1, desc="Display debugging messages.") class FilterTracksOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output filtered tracks') + out_file = File(exists=True, desc="the output filtered tracks") class FilterTracks(CommandLine): @@ -100,45 +106,39 @@ class FilterTracks(CommandLine): >>> filt.run() # doctest: +SKIP """ - _cmd = 'filter_tracks' + _cmd = "filter_tracks" input_spec = FilterTracksInputSpec output_spec = FilterTracksOutputSpec class Tracks2ProbInputSpec(CommandLineInputSpec): in_file = File( - exists=True, - argstr='%s', - mandatory=True, - position=-2, - desc='tract file') + exists=True, argstr="%s", mandatory=True, position=-2, desc="tract file" + ) template_file = File( exists=True, - argstr='-template %s', + argstr="-template %s", position=1, - desc= - 'an image file to be used as a template for the output (the output image wil have the same transform and field of view)' + desc="an image file to be used as a template for the output (the output image wil have the same transform and field of view)", ) voxel_dims = traits.List( traits.Float, - argstr='-vox %s', - sep=',', + argstr="-vox %s", + sep=",", position=2, minlen=3, maxlen=3, - desc= - 'Three comma-separated numbers giving the size of each voxel in mm.') + desc="Three comma-separated numbers giving the size of each voxel in mm.", + ) colour = traits.Bool( - argstr='-colour', + argstr="-colour", position=3, - desc= - "add colour to the output image according to the direction of the tracks." + desc="add colour to the output image according to the direction of the tracks.", ) fraction = traits.Bool( - argstr='-fraction', + argstr="-fraction", position=3, - desc= - "produce an image of the fraction of fibres through each voxel (as a proportion of the total number in the file), rather than the count." + desc="produce an image of the fraction of fibres through each voxel (as a proportion of the total number in the file), rather than the count.", ) output_datatype = traits.Enum( "Bit", @@ -150,25 +150,21 @@ class Tracks2ProbInputSpec(CommandLineInputSpec): "UInt32", "float32", "float64", - argstr='-datatype %s', + argstr="-datatype %s", position=2, - desc= - '"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"' + desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"', ) # , usedefault=True) resample = traits.Float( - argstr='-resample %d', + argstr="-resample %d", position=3, - units='mm', - desc= - 'resample the tracks at regular intervals using Hermite interpolation. If omitted, the program will select an appropriate interpolation factor automatically.' + units="mm", + desc="resample the tracks at regular intervals using Hermite interpolation. If omitted, the program will select an appropriate interpolation factor automatically.", ) - out_filename = File( - genfile=True, argstr='%s', position=-1, desc='output data file') + out_filename = File(genfile=True, argstr="%s", position=-1, desc="output data file") class Tracks2ProbOutputSpec(TraitedSpec): - tract_image = File( - exists=True, desc='Output tract count or track density image') + tract_image = File(exists=True, desc="Output tract count or track density image") class Tracks2Prob(CommandLine): @@ -188,199 +184,197 @@ class Tracks2Prob(CommandLine): >>> tdi.run() # doctest: +SKIP """ - _cmd = 'tracks2prob' + _cmd = "tracks2prob" input_spec = Tracks2ProbInputSpec output_spec = Tracks2ProbOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['tract_image'] = self.inputs.out_filename - if not isdefined(outputs['tract_image']): - outputs['tract_image'] = op.abspath(self._gen_outfilename()) + outputs["tract_image"] = self.inputs.out_filename + if not isdefined(outputs["tract_image"]): + outputs["tract_image"] = op.abspath(self._gen_outfilename()) else: - outputs['tract_image'] = os.path.abspath(outputs['tract_image']) + outputs["tract_image"] = os.path.abspath(outputs["tract_image"]) return outputs def _gen_filename(self, name): - if name == 'out_filename': + if name == "out_filename": return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_TDI.mif' + return name + "_TDI.mif" class StreamlineTrackInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='the image containing the source data.' - 'The type of data required depends on the type of tracking as set in the preceeding argument. For DT methods, ' - 'the base DWI are needed. For SD methods, the SH harmonic coefficients of the FOD are needed.' + desc="the image containing the source data." + "The type of data required depends on the type of tracking as set in the preceeding argument. For DT methods, " + "the base DWI are needed. For SD methods, the SH harmonic coefficients of the FOD are needed.", ) - seed_xor = ['seed_file', 'seed_spec'] - seed_file = File( - exists=True, argstr='-seed %s', desc='seed file', xor=seed_xor) + seed_xor = ["seed_file", "seed_spec"] + seed_file = File(exists=True, argstr="-seed %s", desc="seed file", xor=seed_xor) seed_spec = traits.List( traits.Float, - desc='seed specification in mm and radius (x y z r)', + desc="seed specification in mm and radius (x y z r)", position=2, - argstr='-seed %s', + argstr="-seed %s", minlen=4, maxlen=4, - sep=',', - units='mm', - xor=seed_xor) + sep=",", + units="mm", + xor=seed_xor, + ) - include_xor = ['include_file', 'include_spec'] + include_xor = ["include_file", "include_spec"] include_file = File( - exists=True, - argstr='-include %s', - desc='inclusion file', - xor=include_xor) + exists=True, argstr="-include %s", desc="inclusion file", xor=include_xor + ) include_spec = traits.List( traits.Float, - desc='inclusion specification in mm and radius (x y z r)', + desc="inclusion specification in mm and radius (x y z r)", position=2, - argstr='-include %s', + argstr="-include %s", minlen=4, maxlen=4, - sep=',', - units='mm', - xor=include_xor) + sep=",", + units="mm", + xor=include_xor, + ) - exclude_xor = ['exclude_file', 'exclude_spec'] + exclude_xor = ["exclude_file", "exclude_spec"] exclude_file = File( - exists=True, - argstr='-exclude %s', - desc='exclusion file', - xor=exclude_xor) + exists=True, argstr="-exclude %s", desc="exclusion file", xor=exclude_xor + ) exclude_spec = traits.List( traits.Float, - desc='exclusion specification in mm and radius (x y z r)', + desc="exclusion specification in mm and radius (x y z r)", position=2, - argstr='-exclude %s', + argstr="-exclude %s", minlen=4, maxlen=4, - sep=',', - units='mm', - xor=exclude_xor) + sep=",", + units="mm", + xor=exclude_xor, + ) - mask_xor = ['mask_file', 'mask_spec'] + mask_xor = ["mask_file", "mask_spec"] mask_file = File( exists=True, - argstr='-mask %s', - desc='mask file. Only tracks within mask.', - xor=mask_xor) + argstr="-mask %s", + desc="mask file. Only tracks within mask.", + xor=mask_xor, + ) mask_spec = traits.List( traits.Float, - desc= - 'Mask specification in mm and radius (x y z r). Tracks will be terminated when they leave the ROI.', + desc="Mask specification in mm and radius (x y z r). Tracks will be terminated when they leave the ROI.", position=2, - argstr='-mask %s', + argstr="-mask %s", minlen=4, maxlen=4, - sep=',', - units='mm', - xor=mask_xor) + sep=",", + units="mm", + xor=mask_xor, + ) inputmodel = traits.Enum( - 'DT_STREAM', - 'SD_PROB', - 'SD_STREAM', - argstr='%s', - desc='input model type', + "DT_STREAM", + "SD_PROB", + "SD_STREAM", + argstr="%s", + desc="input model type", usedefault=True, - position=-3) + position=-3, + ) stop = traits.Bool( - argstr='-stop', - desc="stop track as soon as it enters any of the include regions.") + argstr="-stop", + desc="stop track as soon as it enters any of the include regions.", + ) do_not_precompute = traits.Bool( - argstr='-noprecomputed', - desc= - "Turns off precomputation of the legendre polynomial values. Warning: this will slow down the algorithm by a factor of approximately 4." + argstr="-noprecomputed", + desc="Turns off precomputation of the legendre polynomial values. Warning: this will slow down the algorithm by a factor of approximately 4.", ) unidirectional = traits.Bool( - argstr='-unidirectional', - desc= - "Track from the seed point in one direction only (default is to track in both directions)." + argstr="-unidirectional", + desc="Track from the seed point in one direction only (default is to track in both directions).", ) no_mask_interpolation = traits.Bool( - argstr='-nomaskinterp', - desc="Turns off trilinear interpolation of mask images.") + argstr="-nomaskinterp", desc="Turns off trilinear interpolation of mask images." + ) step_size = traits.Float( - argstr='-step %s', - units='mm', - desc="Set the step size of the algorithm in mm (default is 0.2).") + argstr="-step %s", + units="mm", + desc="Set the step size of the algorithm in mm (default is 0.2).", + ) minimum_radius_of_curvature = traits.Float( - argstr='-curvature %s', - units='mm', - desc= - "Set the minimum radius of curvature (default is 2 mm for DT_STREAM, 0 for SD_STREAM, 1 mm for SD_PROB and DT_PROB)" + argstr="-curvature %s", + units="mm", + desc="Set the minimum radius of curvature (default is 2 mm for DT_STREAM, 0 for SD_STREAM, 1 mm for SD_PROB and DT_PROB)", ) desired_number_of_tracks = traits.Int( - argstr='-number %d', - desc='Sets the desired number of tracks.' - 'The program will continue to generate tracks until this number of tracks have been selected and written to the output file' - '(default is 100 for *_STREAM methods, 1000 for *_PROB methods).') + argstr="-number %d", + desc="Sets the desired number of tracks." + "The program will continue to generate tracks until this number of tracks have been selected and written to the output file" + "(default is 100 for *_STREAM methods, 1000 for *_PROB methods).", + ) maximum_number_of_tracks = traits.Int( - argstr='-maxnum %d', - desc='Sets the maximum number of tracks to generate.' + argstr="-maxnum %d", + desc="Sets the maximum number of tracks to generate." "The program will not generate more tracks than this number, even if the desired number of tracks hasn't yet been reached" - '(default is 100 x number).') + "(default is 100 x number).", + ) minimum_tract_length = traits.Float( - argstr='-minlength %s', - units='mm', - desc= - "Sets the minimum length of any track in millimeters (default is 10 mm)." + argstr="-minlength %s", + units="mm", + desc="Sets the minimum length of any track in millimeters (default is 10 mm).", ) maximum_tract_length = traits.Float( - argstr='-length %s', - units='mm', - desc= - "Sets the maximum length of any track in millimeters (default is 200 mm)." + argstr="-length %s", + units="mm", + desc="Sets the maximum length of any track in millimeters (default is 200 mm).", ) cutoff_value = traits.Float( - argstr='-cutoff %s', - units='NA', - desc= - "Set the FA or FOD amplitude cutoff for terminating tracks (default is 0.1)." + argstr="-cutoff %s", + units="NA", + desc="Set the FA or FOD amplitude cutoff for terminating tracks (default is 0.1).", ) initial_cutoff_value = traits.Float( - argstr='-initcutoff %s', - units='NA', - desc= - "Sets the minimum FA or FOD amplitude for initiating tracks (default is twice the normal cutoff)." + argstr="-initcutoff %s", + units="NA", + desc="Sets the minimum FA or FOD amplitude for initiating tracks (default is twice the normal cutoff).", ) initial_direction = traits.List( traits.Int, - desc='Specify the initial tracking direction as a vector', - argstr='-initdirection %s', + desc="Specify the initial tracking direction as a vector", + argstr="-initdirection %s", minlen=2, maxlen=2, - units='voxels') + units="voxels", + ) out_file = File( - argstr='%s', + argstr="%s", position=-1, - name_source=['in_file'], - name_template='%s_tracked.tck', - output_name='tracked', - desc='output data file') + name_source=["in_file"], + name_template="%s_tracked.tck", + output_name="tracked", + desc="output data file", + ) class StreamlineTrackOutputSpec(TraitedSpec): - tracked = File( - exists=True, desc='output file containing reconstructed tracts') + tracked = File(exists=True, desc="output file containing reconstructed tracts") class StreamlineTrack(CommandLine): @@ -403,7 +397,8 @@ class StreamlineTrack(CommandLine): 'streamtrack -mask mask.nii -seed seed_mask.nii SD_PROB data.Bfloat data_tracked.tck' >>> strack.run() # doctest: +SKIP """ - _cmd = 'streamtrack' + + _cmd = "streamtrack" input_spec = StreamlineTrackInputSpec output_spec = StreamlineTrackOutputSpec @@ -411,11 +406,10 @@ class StreamlineTrack(CommandLine): class DiffusionTensorStreamlineTrackInputSpec(StreamlineTrackInputSpec): gradient_encoding_file = File( exists=True, - argstr='-grad %s', + argstr="-grad %s", mandatory=True, position=-2, - desc= - 'Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix' + desc="Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix", ) @@ -439,16 +433,15 @@ class DiffusionTensorStreamlineTrack(StreamlineTrack): def __init__(self, command=None, **inputs): inputs["inputmodel"] = "DT_STREAM" - return super(DiffusionTensorStreamlineTrack, self).__init__( - command, **inputs) + return super(DiffusionTensorStreamlineTrack, self).__init__(command, **inputs) class ProbabilisticSphericallyDeconvolutedStreamlineTrackInputSpec( - StreamlineTrackInputSpec): + StreamlineTrackInputSpec +): maximum_number_of_trials = traits.Int( - argstr='-trials %s', - desc= - "Set the maximum number of sampling trials at each point (only used for probabilistic tracking)." + argstr="-trials %s", + desc="Set the maximum number of sampling trials at each point (only used for probabilistic tracking).", ) @@ -469,12 +462,14 @@ class ProbabilisticSphericallyDeconvolutedStreamlineTrack(StreamlineTrack): >>> sdprobtrack.inputs.seed_file = 'seed_mask.nii' >>> sdprobtrack.run() # doctest: +SKIP """ + input_spec = ProbabilisticSphericallyDeconvolutedStreamlineTrackInputSpec def __init__(self, command=None, **inputs): inputs["inputmodel"] = "SD_PROB" - return super(ProbabilisticSphericallyDeconvolutedStreamlineTrack, - self).__init__(command, **inputs) + return super( + ProbabilisticSphericallyDeconvolutedStreamlineTrack, self + ).__init__(command, **inputs) class SphericallyDeconvolutedStreamlineTrack(StreamlineTrack): @@ -494,9 +489,11 @@ class SphericallyDeconvolutedStreamlineTrack(StreamlineTrack): >>> sdtrack.inputs.seed_file = 'seed_mask.nii' >>> sdtrack.run() # doctest: +SKIP """ + input_spec = StreamlineTrackInputSpec def __init__(self, command=None, **inputs): inputs["inputmodel"] = "SD_STREAM" return super(SphericallyDeconvolutedStreamlineTrack, self).__init__( - command, **inputs) + command, **inputs + ) diff --git a/nipype/interfaces/mrtrix3/__init__.py b/nipype/interfaces/mrtrix3/__init__.py index 9fb4311730..0dbe3bb872 100644 --- a/nipype/interfaces/mrtrix3/__init__.py +++ b/nipype/interfaces/mrtrix3/__init__.py @@ -2,11 +2,26 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: # -*- coding: utf-8 -*- -from .utils import (Mesh2PVE, Generate5tt, BrainMask, TensorMetrics, - ComputeTDI, TCK2VTK, MRMath, MRConvert, MRResize, - DWIExtract) -from .preprocess import (ResponseSD, ACTPrepareFSL, ReplaceFSwithFIRST, - DWIDenoise, MRDeGibbs, DWIBiasCorrect) +from .utils import ( + Mesh2PVE, + Generate5tt, + BrainMask, + TensorMetrics, + ComputeTDI, + TCK2VTK, + MRMath, + MRConvert, + MRResize, + DWIExtract, +) +from .preprocess import ( + ResponseSD, + ACTPrepareFSL, + ReplaceFSwithFIRST, + DWIDenoise, + MRDeGibbs, + DWIBiasCorrect, +) from .tracking import Tractography from .reconst import FitTensor, EstimateFOD from .connectivity import LabelConfig, LabelConvert, BuildConnectome diff --git a/nipype/interfaces/mrtrix3/base.py b/nipype/interfaces/mrtrix3/base.py index 53a9a2284e..15b208b4cd 100644 --- a/nipype/interfaces/mrtrix3/base.py +++ b/nipype/interfaces/mrtrix3/base.py @@ -4,27 +4,35 @@ from ... import logging, LooseVersion from ...utils.filemanip import which -from ..base import (CommandLineInputSpec, CommandLine, traits, File, isdefined, PackageInfo) -iflogger = logging.getLogger('nipype.interface') +from ..base import ( + CommandLineInputSpec, + CommandLine, + traits, + File, + isdefined, + PackageInfo, +) + +iflogger = logging.getLogger("nipype.interface") class Info(PackageInfo): - version_cmd = 'mrconvert --version' + version_cmd = "mrconvert --version" @staticmethod def parse_version(raw_info): # info is like: "== mrconvert 0.3.15-githash" for line in raw_info.splitlines(): - if line.startswith('== mrconvert '): + if line.startswith("== mrconvert "): v_string = line.split()[2] break else: return None # -githash may or may not be appended - v_string = v_string.split('-')[0] + v_string = v_string.split("-")[0] - return '.'.join(v_string.split('.')[:3]) + return ".".join(v_string.split(".")[:3]) @classmethod def looseversion(cls): @@ -32,56 +40,61 @@ def looseversion(cls): If no version found, use LooseVersion('0.0.0') """ - return LooseVersion(cls.version() or '0.0.0') + return LooseVersion(cls.version() or "0.0.0") class MRTrix3BaseInputSpec(CommandLineInputSpec): nthreads = traits.Int( - argstr='-nthreads %d', - desc='number of threads. if zero, the number' - ' of available cpus will be used', - nohash=True) + argstr="-nthreads %d", + desc="number of threads. if zero, the number" " of available cpus will be used", + nohash=True, + ) # DW gradient table import options grad_file = File( exists=True, - argstr='-grad %s', - desc='dw gradient scheme (MRTrix format)', - xor=['grad_fsl']) + argstr="-grad %s", + desc="dw gradient scheme (MRTrix format)", + xor=["grad_fsl"], + ) grad_fsl = traits.Tuple( File(exists=True), File(exists=True), - argstr='-fslgrad %s %s', - desc='(bvecs, bvals) dw gradient scheme (FSL format)', - xor=['grad_file']) + argstr="-fslgrad %s %s", + desc="(bvecs, bvals) dw gradient scheme (FSL format)", + xor=["grad_file"], + ) bval_scale = traits.Enum( - 'yes', - 'no', - argstr='-bvalue_scaling %s', - desc='specifies whether the b - values should be scaled by the square' - ' of the corresponding DW gradient norm, as often required for ' - 'multishell or DSI DW acquisition schemes. The default action ' - 'can also be set in the MRtrix config file, under the ' - 'BValueScaling entry. Valid choices are yes / no, true / ' - 'false, 0 / 1 (default: true).') + "yes", + "no", + argstr="-bvalue_scaling %s", + desc="specifies whether the b - values should be scaled by the square" + " of the corresponding DW gradient norm, as often required for " + "multishell or DSI DW acquisition schemes. The default action " + "can also be set in the MRtrix config file, under the " + "BValueScaling entry. Valid choices are yes / no, true / " + "false, 0 / 1 (default: true).", + ) in_bvec = File( - exists=True, argstr='-fslgrad %s %s', desc='bvecs file in FSL format') - in_bval = File(exists=True, desc='bvals file in FSL format') + exists=True, argstr="-fslgrad %s %s", desc="bvecs file in FSL format" + ) + in_bval = File(exists=True, desc="bvals file in FSL format") class MRTrix3Base(CommandLine): def _format_arg(self, name, trait_spec, value): - if name == 'nthreads' and value == 0: + if name == "nthreads" and value == 0: value = 1 try: from multiprocessing import cpu_count + value = cpu_count() except: - iflogger.warning('Number of threads could not be computed') + iflogger.warning("Number of threads could not be computed") pass return trait_spec.argstr % value - if name == 'in_bvec': + if name == "in_bvec": return trait_spec.argstr % (value, self.inputs.in_bval) return super(MRTrix3Base, self)._format_arg(name, trait_spec, value) @@ -91,17 +104,17 @@ def _parse_inputs(self, skip=None): skip = [] try: - if (isdefined(self.inputs.grad_file) - or isdefined(self.inputs.grad_fsl)): - skip += ['in_bvec', 'in_bval'] + if isdefined(self.inputs.grad_file) or isdefined(self.inputs.grad_fsl): + skip += ["in_bvec", "in_bval"] is_bvec = isdefined(self.inputs.in_bvec) is_bval = isdefined(self.inputs.in_bval) if is_bvec or is_bval: if not is_bvec or not is_bval: - raise RuntimeError('If using bvecs and bvals inputs, both' - 'should be defined') - skip += ['in_bval'] + raise RuntimeError( + "If using bvecs and bvals inputs, both" "should be defined" + ) + skip += ["in_bval"] except AttributeError: pass diff --git a/nipype/interfaces/mrtrix3/connectivity.py b/nipype/interfaces/mrtrix3/connectivity.py index 63277d7a38..308eccd45f 100644 --- a/nipype/interfaces/mrtrix3/connectivity.py +++ b/nipype/interfaces/mrtrix3/connectivity.py @@ -5,92 +5,95 @@ import os import os.path as op -from ..base import (CommandLineInputSpec, traits, TraitedSpec, File, isdefined) +from ..base import CommandLineInputSpec, traits, TraitedSpec, File, isdefined from .base import MRTrix3Base class BuildConnectomeInputSpec(CommandLineInputSpec): in_file = File( - exists=True, - argstr='%s', - mandatory=True, - position=-3, - desc='input tractography') - in_parc = File( - exists=True, argstr='%s', position=-2, desc='parcellation file') + exists=True, argstr="%s", mandatory=True, position=-3, desc="input tractography" + ) + in_parc = File(exists=True, argstr="%s", position=-2, desc="parcellation file") out_file = File( - 'connectome.csv', - argstr='%s', + "connectome.csv", + argstr="%s", mandatory=True, position=-1, usedefault=True, - desc='output file after processing') + desc="output file after processing", + ) nthreads = traits.Int( - argstr='-nthreads %d', - desc='number of threads. if zero, the number' - ' of available cpus will be used', - nohash=True) + argstr="-nthreads %d", + desc="number of threads. if zero, the number" " of available cpus will be used", + nohash=True, + ) vox_lookup = traits.Bool( - argstr='-assignment_voxel_lookup', - desc='use a simple voxel lookup value at each streamline endpoint') + argstr="-assignment_voxel_lookup", + desc="use a simple voxel lookup value at each streamline endpoint", + ) search_radius = traits.Float( - argstr='-assignment_radial_search %f', - desc='perform a radial search from each streamline endpoint to locate ' - 'the nearest node. Argument is the maximum radius in mm; if no node is' - ' found within this radius, the streamline endpoint is not assigned to' - ' any node.') + argstr="-assignment_radial_search %f", + desc="perform a radial search from each streamline endpoint to locate " + "the nearest node. Argument is the maximum radius in mm; if no node is" + " found within this radius, the streamline endpoint is not assigned to" + " any node.", + ) search_reverse = traits.Float( - argstr='-assignment_reverse_search %f', - desc='traverse from each streamline endpoint inwards along the ' - 'streamline, in search of the last node traversed by the streamline. ' - 'Argument is the maximum traversal length in mm (set to 0 to allow ' - 'search to continue to the streamline midpoint).') + argstr="-assignment_reverse_search %f", + desc="traverse from each streamline endpoint inwards along the " + "streamline, in search of the last node traversed by the streamline. " + "Argument is the maximum traversal length in mm (set to 0 to allow " + "search to continue to the streamline midpoint).", + ) search_forward = traits.Float( - argstr='-assignment_forward_search %f', - desc='project the streamline forwards from the endpoint in search of a' - 'parcellation node voxel. Argument is the maximum traversal length in ' - 'mm.') + argstr="-assignment_forward_search %f", + desc="project the streamline forwards from the endpoint in search of a" + "parcellation node voxel. Argument is the maximum traversal length in " + "mm.", + ) metric = traits.Enum( - 'count', - 'meanlength', - 'invlength', - 'invnodevolume', - 'mean_scalar', - 'invlength_invnodevolume', - argstr='-metric %s', - desc='specify the edge' - ' weight metric') + "count", + "meanlength", + "invlength", + "invnodevolume", + "mean_scalar", + "invlength_invnodevolume", + argstr="-metric %s", + desc="specify the edge" " weight metric", + ) in_scalar = File( exists=True, - argstr='-image %s', - desc='provide the associated image ' - 'for the mean_scalar metric') + argstr="-image %s", + desc="provide the associated image " "for the mean_scalar metric", + ) in_weights = File( exists=True, - argstr='-tck_weights_in %s', - desc='specify a text scalar ' - 'file containing the streamline weights') + argstr="-tck_weights_in %s", + desc="specify a text scalar " "file containing the streamline weights", + ) keep_unassigned = traits.Bool( - argstr='-keep_unassigned', - desc='By default, the program discards the' - ' information regarding those streamlines that are not successfully ' - 'assigned to a node pair. Set this option to keep these values (will ' - 'be the first row/column in the output matrix)') + argstr="-keep_unassigned", + desc="By default, the program discards the" + " information regarding those streamlines that are not successfully " + "assigned to a node pair. Set this option to keep these values (will " + "be the first row/column in the output matrix)", + ) zero_diagonal = traits.Bool( - argstr='-zero_diagonal', - desc='set all diagonal entries in the matrix ' - 'to zero (these represent streamlines that connect to the same node at' - ' both ends)') + argstr="-zero_diagonal", + desc="set all diagonal entries in the matrix " + "to zero (these represent streamlines that connect to the same node at" + " both ends)", + ) class BuildConnectomeOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output response file') + out_file = File(exists=True, desc="the output response file") class BuildConnectome(MRTrix3Base): @@ -110,69 +113,74 @@ class BuildConnectome(MRTrix3Base): >>> mat.run() # doctest: +SKIP """ - _cmd = 'tck2connectome' + _cmd = "tck2connectome" input_spec = BuildConnectomeInputSpec output_spec = BuildConnectomeOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class LabelConfigInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-3, - desc='input anatomical image') + desc="input anatomical image", + ) in_config = File( - exists=True, - argstr='%s', - position=-2, - desc='connectome configuration file') + exists=True, argstr="%s", position=-2, desc="connectome configuration file" + ) out_file = File( - 'parcellation.mif', - argstr='%s', + "parcellation.mif", + argstr="%s", mandatory=True, position=-1, usedefault=True, - desc='output file after processing') + desc="output file after processing", + ) lut_basic = File( - argstr='-lut_basic %s', - desc='get information from ' - 'a basic lookup table consisting of index / name pairs') + argstr="-lut_basic %s", + desc="get information from " + "a basic lookup table consisting of index / name pairs", + ) lut_fs = File( - argstr='-lut_freesurfer %s', - desc='get information from ' + argstr="-lut_freesurfer %s", + desc="get information from " 'a FreeSurfer lookup table(typically "FreeSurferColorLUT' - '.txt")') + '.txt")', + ) lut_aal = File( - argstr='-lut_aal %s', - desc='get information from the AAL ' - 'lookup table (typically "ROI_MNI_V4.txt")') + argstr="-lut_aal %s", + desc="get information from the AAL " + 'lookup table (typically "ROI_MNI_V4.txt")', + ) lut_itksnap = File( - argstr='-lut_itksnap %s', - desc='get information from an' - ' ITK - SNAP lookup table(this includes the IIT atlas ' - 'file "LUT_GM.txt")') + argstr="-lut_itksnap %s", + desc="get information from an" + " ITK - SNAP lookup table(this includes the IIT atlas " + 'file "LUT_GM.txt")', + ) spine = File( - argstr='-spine %s', - desc='provide a manually-defined ' - 'segmentation of the base of the spine where the streamlines' - ' terminate, so that this can become a node in the connection' - ' matrix.') + argstr="-spine %s", + desc="provide a manually-defined " + "segmentation of the base of the spine where the streamlines" + " terminate, so that this can become a node in the connection" + " matrix.", + ) nthreads = traits.Int( - argstr='-nthreads %d', - desc='number of threads. if zero, the number' - ' of available cpus will be used', - nohash=True) + argstr="-nthreads %d", + desc="number of threads. if zero, the number" " of available cpus will be used", + nohash=True, + ) class LabelConfigOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output response file') + out_file = File(exists=True, desc="the output response file") class LabelConfig(MRTrix3Base): @@ -191,7 +199,7 @@ class LabelConfig(MRTrix3Base): >>> labels.run() # doctest: +SKIP """ - _cmd = 'labelconfig' + _cmd = "labelconfig" input_spec = LabelConfigInputSpec output_spec = LabelConfigOutputSpec @@ -201,65 +209,69 @@ def _parse_inputs(self, skip=None): if not isdefined(self.inputs.in_config): from distutils.spawn import find_executable + path = find_executable(self._cmd) if path is None: - path = os.getenv(MRTRIX3_HOME, '/opt/mrtrix3') + path = os.getenv(MRTRIX3_HOME, "/opt/mrtrix3") else: path = op.dirname(op.dirname(path)) self.inputs.in_config = op.join( - path, 'src/dwi/tractography/connectomics/' - 'example_configs/fs_default.txt') + path, + "src/dwi/tractography/connectomics/" "example_configs/fs_default.txt", + ) return super(LabelConfig, self)._parse_inputs(skip=skip) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class LabelConvertInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-4, - desc='input anatomical image') + desc="input anatomical image", + ) in_lut = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-3, - desc='get information from ' - 'a basic lookup table consisting of index / name pairs') + desc="get information from " + "a basic lookup table consisting of index / name pairs", + ) in_config = File( - exists=True, - argstr='%s', - position=-2, - desc='connectome configuration file') + exists=True, argstr="%s", position=-2, desc="connectome configuration file" + ) out_file = File( - 'parcellation.mif', - argstr='%s', + "parcellation.mif", + argstr="%s", mandatory=True, position=-1, usedefault=True, - desc='output file after processing') + desc="output file after processing", + ) spine = File( - argstr='-spine %s', - desc='provide a manually-defined ' - 'segmentation of the base of the spine where the streamlines' - ' terminate, so that this can become a node in the connection' - ' matrix.') + argstr="-spine %s", + desc="provide a manually-defined " + "segmentation of the base of the spine where the streamlines" + " terminate, so that this can become a node in the connection" + " matrix.", + ) num_threads = traits.Int( - argstr='-nthreads %d', - desc='number of threads. if zero, the number' - ' of available cpus will be used', - nohash=True) + argstr="-nthreads %d", + desc="number of threads. if zero, the number" " of available cpus will be used", + nohash=True, + ) class LabelConvertOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output response file') + out_file = File(exists=True, desc="the output response file") class LabelConvert(MRTrix3Base): @@ -279,7 +291,7 @@ class LabelConvert(MRTrix3Base): >>> labels.run() # doctest: +SKIP """ - _cmd = 'labelconvert' + _cmd = "labelconvert" input_spec = LabelConvertInputSpec output_spec = LabelConvertOutputSpec @@ -289,19 +301,21 @@ def _parse_inputs(self, skip=None): if not isdefined(self.inputs.in_config): from nipype.utils.filemanip import which + path = which(self._cmd) if path is None: - path = os.getenv(MRTRIX3_HOME, '/opt/mrtrix3') + path = os.getenv(MRTRIX3_HOME, "/opt/mrtrix3") else: path = op.dirname(op.dirname(path)) self.inputs.in_config = op.join( - path, 'src/dwi/tractography/connectomics/' - 'example_configs/fs_default.txt') + path, + "src/dwi/tractography/connectomics/" "example_configs/fs_default.txt", + ) return super(LabelConvert, self)._parse_inputs(skip=skip) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index eeeb9e820a..9384ef43c7 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -4,45 +4,50 @@ import os.path as op -from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, - File, isdefined, Undefined, InputMultiObject) +from ..base import ( + CommandLineInputSpec, + CommandLine, + traits, + TraitedSpec, + File, + isdefined, + Undefined, + InputMultiObject, +) from .base import MRTrix3BaseInputSpec, MRTrix3Base class DWIDenoiseInputSpec(MRTrix3BaseInputSpec): in_file = File( - exists=True, - argstr='%s', - position=-2, - mandatory=True, - desc='input DWI image') - mask = File( - exists=True, - argstr='-mask %s', - position=1, - desc='mask image') + exists=True, argstr="%s", position=-2, mandatory=True, desc="input DWI image" + ) + mask = File(exists=True, argstr="-mask %s", position=1, desc="mask image") extent = traits.Tuple( (traits.Int, traits.Int, traits.Int), - argstr='-extent %d,%d,%d', - desc='set the window size of the denoising filter. (default = 5,5,5)') + argstr="-extent %d,%d,%d", + desc="set the window size of the denoising filter. (default = 5,5,5)", + ) noise = File( - argstr='-noise %s', - name_template='%s_noise', - name_source='in_file', + argstr="-noise %s", + name_template="%s_noise", + name_source="in_file", keep_extension=True, - desc='the output noise map') + desc="the output noise map", + ) out_file = File( - argstr='%s', + argstr="%s", position=-1, - name_template='%s_denoised', - name_source='in_file', + name_template="%s_denoised", + name_source="in_file", keep_extension=True, - desc='the output denoised DWI image') + desc="the output denoised DWI image", + ) class DWIDenoiseOutputSpec(TraitedSpec): - noise = File(desc='the output noise map', exists=True) - out_file = File(desc='the output denoised DWI image', exists=True) + noise = File(desc="the output noise map", exists=True) + out_file = File(desc="the output denoised DWI image", exists=True) + class DWIDenoise(MRTrix3Base): """ @@ -76,54 +81,58 @@ class DWIDenoise(MRTrix3Base): >>> denoise.run() # doctest: +SKIP """ - _cmd = 'dwidenoise' + _cmd = "dwidenoise" input_spec = DWIDenoiseInputSpec output_spec = DWIDenoiseOutputSpec class MRDeGibbsInputSpec(MRTrix3BaseInputSpec): in_file = File( - exists=True, - argstr='%s', - position=-2, - mandatory=True, - desc='input DWI image') + exists=True, argstr="%s", position=-2, mandatory=True, desc="input DWI image" + ) axes = traits.ListInt( default_value=[0, 1], usedefault=True, - sep=',', + sep=",", minlen=2, maxlen=2, - argstr='-axes %s', - desc='indicate the plane in which the data was acquired (axial = 0,1; ' - 'coronal = 0,2; sagittal = 1,2') + argstr="-axes %s", + desc="indicate the plane in which the data was acquired (axial = 0,1; " + "coronal = 0,2; sagittal = 1,2", + ) nshifts = traits.Int( default_value=20, usedefault=True, - argstr='-nshifts %d', - desc='discretization of subpixel spacing (default = 20)') + argstr="-nshifts %d", + desc="discretization of subpixel spacing (default = 20)", + ) minW = traits.Int( default_value=1, usedefault=True, - argstr='-minW %d', - desc='left border of window used for total variation (TV) computation ' - '(default = 1)') + argstr="-minW %d", + desc="left border of window used for total variation (TV) computation " + "(default = 1)", + ) maxW = traits.Int( default_value=3, usedefault=True, - argstr='-maxW %d', - desc='right border of window used for total variation (TV) computation ' - '(default = 3)') + argstr="-maxW %d", + desc="right border of window used for total variation (TV) computation " + "(default = 3)", + ) out_file = File( - name_template='%s_unr', - name_source='in_file', + name_template="%s_unr", + name_source="in_file", keep_extension=True, - argstr='%s', + argstr="%s", position=-1, - desc='the output unringed DWI image') + desc="the output unringed DWI image", + ) + class MRDeGibbsOutputSpec(TraitedSpec): - out_file = File(desc='the output unringed DWI image', exists=True) + out_file = File(desc="the output unringed DWI image", exists=True) + class MRDeGibbs(MRTrix3Base): """ @@ -161,46 +170,44 @@ class MRDeGibbs(MRTrix3Base): >>> unring.run() # doctest: +SKIP """ - _cmd = 'mrdegibbs' + _cmd = "mrdegibbs" input_spec = MRDeGibbsInputSpec output_spec = MRDeGibbsOutputSpec class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): in_file = File( - exists=True, - argstr='%s', - position=-2, - mandatory=True, - desc='input DWI image') - in_mask = File( - argstr='-mask %s', - desc='input mask image for bias field estimation') + exists=True, argstr="%s", position=-2, mandatory=True, desc="input DWI image" + ) + in_mask = File(argstr="-mask %s", desc="input mask image for bias field estimation") use_ants = traits.Bool( - argstr='-ants', + argstr="-ants", mandatory=True, - desc='use ANTS N4 to estimate the inhomogeneity field', - xor=['use_fsl']) + desc="use ANTS N4 to estimate the inhomogeneity field", + xor=["use_fsl"], + ) use_fsl = traits.Bool( - argstr='-fsl', + argstr="-fsl", mandatory=True, - desc='use FSL FAST to estimate the inhomogeneity field', - xor=['use_ants']) - bias = File( - argstr='-bias %s', - desc='bias field') + desc="use FSL FAST to estimate the inhomogeneity field", + xor=["use_ants"], + ) + bias = File(argstr="-bias %s", desc="bias field") out_file = File( - name_template='%s_biascorr', - name_source='in_file', + name_template="%s_biascorr", + name_source="in_file", keep_extension=True, - argstr='%s', + argstr="%s", position=-1, - desc='the output bias corrected DWI image', - genfile=True) + desc="the output bias corrected DWI image", + genfile=True, + ) + class DWIBiasCorrectOutputSpec(TraitedSpec): - bias = File(desc='the output bias field', exists=True) - out_file = File(desc='the output bias corrected DWI image', exists=True) + bias = File(desc="the output bias field", exists=True) + out_file = File(desc="the output bias corrected DWI image", exists=True) + class DWIBiasCorrect(MRTrix3Base): """ @@ -221,52 +228,51 @@ class DWIBiasCorrect(MRTrix3Base): >>> bias_correct.run() # doctest: +SKIP """ - _cmd = 'dwibiascorrect' + _cmd = "dwibiascorrect" input_spec = DWIBiasCorrectInputSpec output_spec = DWIBiasCorrectOutputSpec class ResponseSDInputSpec(MRTrix3BaseInputSpec): algorithm = traits.Enum( - 'msmt_5tt', - 'dhollander', - 'tournier', - 'tax', - argstr='%s', + "msmt_5tt", + "dhollander", + "tournier", + "tax", + argstr="%s", position=1, mandatory=True, - desc='response estimation algorithm (multi-tissue)') + desc="response estimation algorithm (multi-tissue)", + ) in_file = File( - exists=True, - argstr='%s', - position=-5, - mandatory=True, - desc='input DWI image') - mtt_file = File(argstr='%s', position=-4, desc='input 5tt image') + exists=True, argstr="%s", position=-5, mandatory=True, desc="input DWI image" + ) + mtt_file = File(argstr="%s", position=-4, desc="input 5tt image") wm_file = File( - 'wm.txt', - argstr='%s', + "wm.txt", + argstr="%s", position=-3, usedefault=True, - desc='output WM response text file') - gm_file = File( - argstr='%s', position=-2, desc='output GM response text file') - csf_file = File( - argstr='%s', position=-1, desc='output CSF response text file') - in_mask = File( - exists=True, argstr='-mask %s', desc='provide initial mask image') + desc="output WM response text file", + ) + gm_file = File(argstr="%s", position=-2, desc="output GM response text file") + csf_file = File(argstr="%s", position=-1, desc="output CSF response text file") + in_mask = File(exists=True, argstr="-mask %s", desc="provide initial mask image") max_sh = InputMultiObject( traits.Int, - argstr='-lmax %s', - sep=',', - desc=('maximum harmonic degree of response function - single value for ' - 'single-shell response, list for multi-shell response')) + argstr="-lmax %s", + sep=",", + desc=( + "maximum harmonic degree of response function - single value for " + "single-shell response, list for multi-shell response" + ), + ) class ResponseSDOutputSpec(TraitedSpec): - wm_file = File(argstr='%s', desc='output WM response text file') - gm_file = File(argstr='%s', desc='output GM response text file') - csf_file = File(argstr='%s', desc='output CSF response text file') + wm_file = File(argstr="%s", desc="output WM response text file") + gm_file = File(argstr="%s", desc="output GM response text file") + csf_file = File(argstr="%s", desc="output CSF response text file") class ResponseSD(MRTrix3Base): @@ -291,39 +297,41 @@ class ResponseSD(MRTrix3Base): 'dwi2response tournier -fslgrad bvecs bvals -lmax 6,8,10 dwi.mif wm.txt' """ - _cmd = 'dwi2response' + _cmd = "dwi2response" input_spec = ResponseSDInputSpec output_spec = ResponseSDOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['wm_file'] = op.abspath(self.inputs.wm_file) + outputs["wm_file"] = op.abspath(self.inputs.wm_file) if self.inputs.gm_file != Undefined: - outputs['gm_file'] = op.abspath(self.inputs.gm_file) + outputs["gm_file"] = op.abspath(self.inputs.gm_file) if self.inputs.csf_file != Undefined: - outputs['csf_file'] = op.abspath(self.inputs.csf_file) + outputs["csf_file"] = op.abspath(self.inputs.csf_file) return outputs class ACTPrepareFSLInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='input anatomical image') + desc="input anatomical image", + ) out_file = File( - 'act_5tt.mif', - argstr='%s', + "act_5tt.mif", + argstr="%s", mandatory=True, position=-1, usedefault=True, - desc='output file after processing') + desc="output file after processing", + ) class ACTPrepareFSLOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output response file') + out_file = File(exists=True, desc="the output response file") class ACTPrepareFSL(CommandLine): @@ -342,46 +350,43 @@ class ACTPrepareFSL(CommandLine): >>> prep.run() # doctest: +SKIP """ - _cmd = 'act_anat_prepare_fsl' + _cmd = "act_anat_prepare_fsl" input_spec = ACTPrepareFSLInputSpec output_spec = ACTPrepareFSLOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class ReplaceFSwithFIRSTInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-4, - desc='input anatomical image') + desc="input anatomical image", + ) in_t1w = File( - exists=True, - argstr='%s', - mandatory=True, - position=-3, - desc='input T1 image') + exists=True, argstr="%s", mandatory=True, position=-3, desc="input T1 image" + ) in_config = File( - exists=True, - argstr='%s', - position=-2, - desc='connectome configuration file') + exists=True, argstr="%s", position=-2, desc="connectome configuration file" + ) out_file = File( - 'aparc+first.mif', - argstr='%s', + "aparc+first.mif", + argstr="%s", mandatory=True, position=-1, usedefault=True, - desc='output file after processing') + desc="output file after processing", + ) class ReplaceFSwithFIRSTOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output response file') + out_file = File(exists=True, desc="the output response file") class ReplaceFSwithFIRST(CommandLine): @@ -403,11 +408,11 @@ class ReplaceFSwithFIRST(CommandLine): >>> prep.run() # doctest: +SKIP """ - _cmd = 'fs_parc_replace_sgm_first' + _cmd = "fs_parc_replace_sgm_first" input_spec = ReplaceFSwithFIRSTInputSpec output_spec = ReplaceFSwithFIRSTOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index be89022267..2f2854ed8c 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -11,41 +11,49 @@ class FitTensorInputSpec(MRTrix3BaseInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='input diffusion weighted images') + desc="input diffusion weighted images", + ) out_file = File( - 'dti.mif', - argstr='%s', + "dti.mif", + argstr="%s", mandatory=True, position=-1, usedefault=True, - desc='the output diffusion tensor image') + desc="the output diffusion tensor image", + ) # General options in_mask = File( exists=True, - argstr='-mask %s', - desc=('only perform computation within the specified ' - 'binary brain mask image')) + argstr="-mask %s", + desc=( + "only perform computation within the specified " "binary brain mask image" + ), + ) method = traits.Enum( - 'nonlinear', - 'loglinear', - 'sech', - 'rician', - argstr='-method %s', - desc=('select method used to perform the fitting')) + "nonlinear", + "loglinear", + "sech", + "rician", + argstr="-method %s", + desc=("select method used to perform the fitting"), + ) reg_term = traits.Float( - argstr='-regularisation %f', - max_ver='0.3.13', - desc=('specify the strength of the regularisation term on the ' - 'magnitude of the tensor elements (default = 5000). This ' - 'only applies to the non-linear methods')) + argstr="-regularisation %f", + max_ver="0.3.13", + desc=( + "specify the strength of the regularisation term on the " + "magnitude of the tensor elements (default = 5000). This " + "only applies to the non-linear methods" + ), + ) class FitTensorOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output DTI file') + out_file = File(exists=True, desc="the output DTI file") class FitTensor(MRTrix3Base): @@ -66,73 +74,82 @@ class FitTensor(MRTrix3Base): >>> tsr.run() # doctest: +SKIP """ - _cmd = 'dwi2tensor' + _cmd = "dwi2tensor" input_spec = FitTensorInputSpec output_spec = FitTensorOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class EstimateFODInputSpec(MRTrix3BaseInputSpec): algorithm = traits.Enum( - 'csd', - 'msmt_csd', - argstr='%s', + "csd", + "msmt_csd", + argstr="%s", position=-8, mandatory=True, - desc='FOD algorithm') + desc="FOD algorithm", + ) in_file = File( - exists=True, - argstr='%s', - position=-7, - mandatory=True, - desc='input DWI image') + exists=True, argstr="%s", position=-7, mandatory=True, desc="input DWI image" + ) wm_txt = File( - argstr='%s', position=-6, mandatory=True, desc='WM response text file') + argstr="%s", position=-6, mandatory=True, desc="WM response text file" + ) wm_odf = File( - 'wm.mif', - argstr='%s', + "wm.mif", + argstr="%s", position=-5, usedefault=True, mandatory=True, - desc='output WM ODF') - gm_txt = File(argstr='%s', position=-4, desc='GM response text file') - gm_odf = File('gm.mif', usedefault=True, argstr='%s', - position=-3, desc='output GM ODF') - csf_txt = File(argstr='%s', position=-2, desc='CSF response text file') - csf_odf = File('csf.mif', usedefault=True, argstr='%s', - position=-1, desc='output CSF ODF') - mask_file = File(exists=True, argstr='-mask %s', desc='mask image') + desc="output WM ODF", + ) + gm_txt = File(argstr="%s", position=-4, desc="GM response text file") + gm_odf = File( + "gm.mif", usedefault=True, argstr="%s", position=-3, desc="output GM ODF" + ) + csf_txt = File(argstr="%s", position=-2, desc="CSF response text file") + csf_odf = File( + "csf.mif", usedefault=True, argstr="%s", position=-1, desc="output CSF ODF" + ) + mask_file = File(exists=True, argstr="-mask %s", desc="mask image") # DW Shell selection options shell = traits.List( traits.Float, - sep=',', - argstr='-shell %s', - desc='specify one or more dw gradient shells') + sep=",", + argstr="-shell %s", + desc="specify one or more dw gradient shells", + ) max_sh = InputMultiObject( traits.Int, value=[8], usedefault=True, - argstr='-lmax %s', - sep=',', - desc=('maximum harmonic degree of response function - single value for single-shell response, list for multi-shell response')) + argstr="-lmax %s", + sep=",", + desc=( + "maximum harmonic degree of response function - single value for single-shell response, list for multi-shell response" + ), + ) in_dirs = File( exists=True, - argstr='-directions %s', - desc=('specify the directions over which to apply the non-negativity ' - 'constraint (by default, the built-in 300 direction set is ' - 'used). These should be supplied as a text file containing the ' - '[ az el ] pairs for the directions.')) + argstr="-directions %s", + desc=( + "specify the directions over which to apply the non-negativity " + "constraint (by default, the built-in 300 direction set is " + "used). These should be supplied as a text file containing the " + "[ az el ] pairs for the directions." + ), + ) class EstimateFODOutputSpec(TraitedSpec): - wm_odf = File(argstr='%s', desc='output WM ODF') - gm_odf = File(argstr='%s', desc='output GM ODF') - csf_odf = File(argstr='%s', desc='output CSF ODF') + wm_odf = File(argstr="%s", desc="output WM ODF") + gm_odf = File(argstr="%s", desc="output GM ODF") + csf_odf = File(argstr="%s", desc="output CSF ODF") class EstimateFOD(MRTrix3Base): @@ -153,15 +170,15 @@ class EstimateFOD(MRTrix3Base): >>> fod.run() # doctest: +SKIP """ - _cmd = 'dwi2fod' + _cmd = "dwi2fod" input_spec = EstimateFODInputSpec output_spec = EstimateFODOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['wm_odf'] = op.abspath(self.inputs.wm_odf) + outputs["wm_odf"] = op.abspath(self.inputs.wm_odf) if self.inputs.gm_odf != Undefined: - outputs['gm_odf'] = op.abspath(self.inputs.gm_odf) + outputs["gm_odf"] = op.abspath(self.inputs.gm_odf) if self.inputs.csf_odf != Undefined: - outputs['csf_odf'] = op.abspath(self.inputs.csf_odf) + outputs["csf_odf"] = op.abspath(self.inputs.csf_odf) return outputs diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py index 137ddec81f..8064175d65 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py @@ -4,23 +4,11 @@ def test_ACTPrepareFSL_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - usedefault=True, + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, ), ) inputs = ACTPrepareFSL.input_spec() @@ -28,8 +16,10 @@ def test_ACTPrepareFSL_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ACTPrepareFSL_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ACTPrepareFSL.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py index 9d22520268..c7ce6cc9af 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py @@ -4,42 +4,17 @@ def test_BrainMask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bval_scale=dict(argstr='-bvalue_scaling %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr='-grad %s', - extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], - ), - in_bval=dict(extensions=None, ), - in_bvec=dict( - argstr='-fslgrad %s %s', - extensions=None, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, - ), + args=dict(argstr="%s",), + bval_scale=dict(argstr="-bvalue_scaling %s",), + environ=dict(nohash=True, usedefault=True,), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), + in_bval=dict(extensions=None,), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + nthreads=dict(argstr="-nthreads %d", nohash=True,), out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - usedefault=True, + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, ), ) inputs = BrainMask.input_spec() @@ -47,8 +22,10 @@ def test_BrainMask_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BrainMask_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = BrainMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py index 2e4c70cbe4..95aae6fc03 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py @@ -4,56 +4,33 @@ def test_BuildConnectome_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - in_parc=dict( - argstr='%s', - extensions=None, - position=-2, - ), - in_scalar=dict( - argstr='-image %s', - extensions=None, - ), - in_weights=dict( - argstr='-tck_weights_in %s', - extensions=None, - ), - keep_unassigned=dict(argstr='-keep_unassigned', ), - metric=dict(argstr='-metric %s', ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + in_parc=dict(argstr="%s", extensions=None, position=-2,), + in_scalar=dict(argstr="-image %s", extensions=None,), + in_weights=dict(argstr="-tck_weights_in %s", extensions=None,), + keep_unassigned=dict(argstr="-keep_unassigned",), + metric=dict(argstr="-metric %s",), + nthreads=dict(argstr="-nthreads %d", nohash=True,), out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - usedefault=True, + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, ), - search_forward=dict(argstr='-assignment_forward_search %f', ), - search_radius=dict(argstr='-assignment_radial_search %f', ), - search_reverse=dict(argstr='-assignment_reverse_search %f', ), - vox_lookup=dict(argstr='-assignment_voxel_lookup', ), - zero_diagonal=dict(argstr='-zero_diagonal', ), + search_forward=dict(argstr="-assignment_forward_search %f",), + search_radius=dict(argstr="-assignment_radial_search %f",), + search_reverse=dict(argstr="-assignment_reverse_search %f",), + vox_lookup=dict(argstr="-assignment_voxel_lookup",), + zero_diagonal=dict(argstr="-zero_diagonal",), ) inputs = BuildConnectome.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BuildConnectome_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = BuildConnectome.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py index 81fcf0ad7d..f6a8734cef 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py @@ -4,66 +4,37 @@ def test_ComputeTDI_inputs(): input_map = dict( - args=dict(argstr='%s', ), - contrast=dict(argstr='-constrast %s', ), - data_type=dict(argstr='-datatype %s', ), - dixel=dict( - argstr='-dixel %s', - extensions=None, - ), - ends_only=dict(argstr='-ends_only', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fwhm_tck=dict(argstr='-fwhm_tck %f', ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - in_map=dict( - argstr='-image %s', - extensions=None, - ), - map_zero=dict(argstr='-map_zero', ), - max_tod=dict(argstr='-tod %d', ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, - ), - out_file=dict( - argstr='%s', - extensions=None, - position=-1, - usedefault=True, - ), - precise=dict(argstr='-precise', ), - reference=dict( - argstr='-template %s', - extensions=None, - ), - stat_tck=dict(argstr='-stat_tck %s', ), - stat_vox=dict(argstr='-stat_vox %s', ), - tck_weights=dict( - argstr='-tck_weights_in %s', - extensions=None, - ), - upsample=dict(argstr='-upsample %d', ), - use_dec=dict(argstr='-dec', ), - vox_size=dict( - argstr='-vox %s', - sep=',', - ), + args=dict(argstr="%s",), + contrast=dict(argstr="-constrast %s",), + data_type=dict(argstr="-datatype %s",), + dixel=dict(argstr="-dixel %s", extensions=None,), + ends_only=dict(argstr="-ends_only",), + environ=dict(nohash=True, usedefault=True,), + fwhm_tck=dict(argstr="-fwhm_tck %f",), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + in_map=dict(argstr="-image %s", extensions=None,), + map_zero=dict(argstr="-map_zero",), + max_tod=dict(argstr="-tod %d",), + nthreads=dict(argstr="-nthreads %d", nohash=True,), + out_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True,), + precise=dict(argstr="-precise",), + reference=dict(argstr="-template %s", extensions=None,), + stat_tck=dict(argstr="-stat_tck %s",), + stat_vox=dict(argstr="-stat_vox %s",), + tck_weights=dict(argstr="-tck_weights_in %s", extensions=None,), + upsample=dict(argstr="-upsample %d",), + use_dec=dict(argstr="-dec",), + vox_size=dict(argstr="-vox %s", sep=",",), ) inputs = ComputeTDI.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ComputeTDI_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ComputeTDI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py index d5accaa3ee..210b39b141 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py @@ -4,74 +4,38 @@ def test_DWIBiasCorrect_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bias=dict( - argstr='-bias %s', - extensions=None, - ), - bval_scale=dict(argstr='-bvalue_scaling %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr='-grad %s', - extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], - ), - in_bval=dict(extensions=None, ), - in_bvec=dict( - argstr='-fslgrad %s %s', - extensions=None, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - in_mask=dict( - argstr='-mask %s', - extensions=None, - ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, - ), + args=dict(argstr="%s",), + bias=dict(argstr="-bias %s", extensions=None,), + bval_scale=dict(argstr="-bvalue_scaling %s",), + environ=dict(nohash=True, usedefault=True,), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), + in_bval=dict(extensions=None,), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + in_mask=dict(argstr="-mask %s", extensions=None,), + nthreads=dict(argstr="-nthreads %d", nohash=True,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, genfile=True, keep_extension=True, - name_source='in_file', - name_template='%s_biascorr', + name_source="in_file", + name_template="%s_biascorr", position=-1, ), - use_ants=dict( - argstr='-ants', - mandatory=True, - xor=['use_fsl'], - ), - use_fsl=dict( - argstr='-fsl', - mandatory=True, - xor=['use_ants'], - ), + use_ants=dict(argstr="-ants", mandatory=True, xor=["use_fsl"],), + use_fsl=dict(argstr="-fsl", mandatory=True, xor=["use_ants"],), ) inputs = DWIBiasCorrect.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DWIBiasCorrect_outputs(): - output_map = dict( - bias=dict(extensions=None, ), - out_file=dict(extensions=None, ), - ) + output_map = dict(bias=dict(extensions=None,), out_file=dict(extensions=None,),) outputs = DWIBiasCorrect.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py index 248ab20da2..d5050327aa 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py @@ -4,55 +4,30 @@ def test_DWIDenoise_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bval_scale=dict(argstr='-bvalue_scaling %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - extent=dict(argstr='-extent %d,%d,%d', ), - grad_file=dict( - argstr='-grad %s', - extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], - ), - in_bval=dict(extensions=None, ), - in_bvec=dict( - argstr='-fslgrad %s %s', - extensions=None, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - mask=dict( - argstr='-mask %s', - extensions=None, - position=1, - ), + args=dict(argstr="%s",), + bval_scale=dict(argstr="-bvalue_scaling %s",), + environ=dict(nohash=True, usedefault=True,), + extent=dict(argstr="-extent %d,%d,%d",), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), + in_bval=dict(extensions=None,), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + mask=dict(argstr="-mask %s", extensions=None, position=1,), noise=dict( - argstr='-noise %s', + argstr="-noise %s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_noise', - ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, + name_source="in_file", + name_template="%s_noise", ), + nthreads=dict(argstr="-nthreads %d", nohash=True,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_denoised', + name_source="in_file", + name_template="%s_denoised", position=-1, ), ) @@ -61,11 +36,10 @@ def test_DWIDenoise_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DWIDenoise_outputs(): - output_map = dict( - noise=dict(extensions=None, ), - out_file=dict(extensions=None, ), - ) + output_map = dict(noise=dict(extensions=None,), out_file=dict(extensions=None,),) outputs = DWIDenoise.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py index 67f4992768..18fa49c260 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py @@ -4,57 +4,30 @@ def test_DWIExtract_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bval_scale=dict(argstr='-bvalue_scaling %s', ), - bzero=dict(argstr='-bzero', ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr='-grad %s', - extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], - ), - in_bval=dict(extensions=None, ), - in_bvec=dict( - argstr='-fslgrad %s %s', - extensions=None, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - nobzero=dict(argstr='-no_bzero', ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, - ), - out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), - shell=dict( - argstr='-shell %s', - sep=',', - ), - singleshell=dict(argstr='-singleshell', ), + args=dict(argstr="%s",), + bval_scale=dict(argstr="-bvalue_scaling %s",), + bzero=dict(argstr="-bzero",), + environ=dict(nohash=True, usedefault=True,), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), + in_bval=dict(extensions=None,), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + nobzero=dict(argstr="-no_bzero",), + nthreads=dict(argstr="-nthreads %d", nohash=True,), + out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + shell=dict(argstr="-shell %s", sep=",",), + singleshell=dict(argstr="-singleshell",), ) inputs = DWIExtract.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DWIExtract_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = DWIExtract.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py index 495257ca03..bfadae423f 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py @@ -4,113 +4,41 @@ def test_EstimateFOD_inputs(): input_map = dict( - algorithm=dict( - argstr='%s', - mandatory=True, - position=-8, - ), - args=dict(argstr='%s', ), - bval_scale=dict(argstr='-bvalue_scaling %s', ), - csf_odf=dict( - argstr='%s', - extensions=None, - position=-1, - usedefault=True, - ), - csf_txt=dict( - argstr='%s', - extensions=None, - position=-2, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gm_odf=dict( - argstr='%s', - extensions=None, - position=-3, - usedefault=True, - ), - gm_txt=dict( - argstr='%s', - extensions=None, - position=-4, - ), - grad_file=dict( - argstr='-grad %s', - extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], - ), - in_bval=dict(extensions=None, ), - in_bvec=dict( - argstr='-fslgrad %s %s', - extensions=None, - ), - in_dirs=dict( - argstr='-directions %s', - extensions=None, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-7, - ), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), - max_sh=dict( - argstr='-lmax %s', - sep=',', - usedefault=True, - ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, - ), - shell=dict( - argstr='-shell %s', - sep=',', - ), + algorithm=dict(argstr="%s", mandatory=True, position=-8,), + args=dict(argstr="%s",), + bval_scale=dict(argstr="-bvalue_scaling %s",), + csf_odf=dict(argstr="%s", extensions=None, position=-1, usedefault=True,), + csf_txt=dict(argstr="%s", extensions=None, position=-2,), + environ=dict(nohash=True, usedefault=True,), + gm_odf=dict(argstr="%s", extensions=None, position=-3, usedefault=True,), + gm_txt=dict(argstr="%s", extensions=None, position=-4,), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), + in_bval=dict(extensions=None,), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), + in_dirs=dict(argstr="-directions %s", extensions=None,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-7,), + mask_file=dict(argstr="-mask %s", extensions=None,), + max_sh=dict(argstr="-lmax %s", sep=",", usedefault=True,), + nthreads=dict(argstr="-nthreads %d", nohash=True,), + shell=dict(argstr="-shell %s", sep=",",), wm_odf=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-5, - usedefault=True, - ), - wm_txt=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-6, + argstr="%s", extensions=None, mandatory=True, position=-5, usedefault=True, ), + wm_txt=dict(argstr="%s", extensions=None, mandatory=True, position=-6,), ) inputs = EstimateFOD.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EstimateFOD_outputs(): output_map = dict( - csf_odf=dict( - argstr='%s', - extensions=None, - ), - gm_odf=dict( - argstr='%s', - extensions=None, - ), - wm_odf=dict( - argstr='%s', - extensions=None, - ), + csf_odf=dict(argstr="%s", extensions=None,), + gm_odf=dict(argstr="%s", extensions=None,), + wm_odf=dict(argstr="%s", extensions=None,), ) outputs = EstimateFOD.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py index 9491194e77..d586dbaf59 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py @@ -4,60 +4,31 @@ def test_FitTensor_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bval_scale=dict(argstr='-bvalue_scaling %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr='-grad %s', - extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], - ), - in_bval=dict(extensions=None, ), - in_bvec=dict( - argstr='-fslgrad %s %s', - extensions=None, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - in_mask=dict( - argstr='-mask %s', - extensions=None, - ), - method=dict(argstr='-method %s', ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, - ), + args=dict(argstr="%s",), + bval_scale=dict(argstr="-bvalue_scaling %s",), + environ=dict(nohash=True, usedefault=True,), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), + in_bval=dict(extensions=None,), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + in_mask=dict(argstr="-mask %s", extensions=None,), + method=dict(argstr="-method %s",), + nthreads=dict(argstr="-nthreads %d", nohash=True,), out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - usedefault=True, - ), - reg_term=dict( - argstr='-regularisation %f', - max_ver='0.3.13', + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, ), + reg_term=dict(argstr="-regularisation %f", max_ver="0.3.13",), ) inputs = FitTensor.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FitTensor_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = FitTensor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py index d73ea20a1c..d41fd52a11 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py @@ -4,55 +4,27 @@ def test_Generate5tt_inputs(): input_map = dict( - algorithm=dict( - argstr='%s', - mandatory=True, - position=-3, - ), - args=dict(argstr='%s', ), - bval_scale=dict(argstr='-bvalue_scaling %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr='-grad %s', - extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], - ), - in_bval=dict(extensions=None, ), - in_bvec=dict( - argstr='-fslgrad %s %s', - extensions=None, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, - ), - out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), + algorithm=dict(argstr="%s", mandatory=True, position=-3,), + args=dict(argstr="%s",), + bval_scale=dict(argstr="-bvalue_scaling %s",), + environ=dict(nohash=True, usedefault=True,), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), + in_bval=dict(extensions=None,), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + nthreads=dict(argstr="-nthreads %d", nohash=True,), + out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), ) inputs = Generate5tt.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Generate5tt_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Generate5tt.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py index 90a0f25314..2c37a6bc93 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py @@ -4,61 +4,29 @@ def test_LabelConfig_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_config=dict( - argstr='%s', - extensions=None, - position=-2, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - lut_aal=dict( - argstr='-lut_aal %s', - extensions=None, - ), - lut_basic=dict( - argstr='-lut_basic %s', - extensions=None, - ), - lut_fs=dict( - argstr='-lut_freesurfer %s', - extensions=None, - ), - lut_itksnap=dict( - argstr='-lut_itksnap %s', - extensions=None, - ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_config=dict(argstr="%s", extensions=None, position=-2,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + lut_aal=dict(argstr="-lut_aal %s", extensions=None,), + lut_basic=dict(argstr="-lut_basic %s", extensions=None,), + lut_fs=dict(argstr="-lut_freesurfer %s", extensions=None,), + lut_itksnap=dict(argstr="-lut_itksnap %s", extensions=None,), + nthreads=dict(argstr="-nthreads %d", nohash=True,), out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - usedefault=True, - ), - spine=dict( - argstr='-spine %s', - extensions=None, + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, ), + spine=dict(argstr="-spine %s", extensions=None,), ) inputs = LabelConfig.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_LabelConfig_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = LabelConfig.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py index 6d4a6c8dcb..9db9bb4df5 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py @@ -4,51 +4,26 @@ def test_LabelConvert_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_config=dict( - argstr='%s', - extensions=None, - position=-2, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-4, - ), - in_lut=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - num_threads=dict( - argstr='-nthreads %d', - nohash=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_config=dict(argstr="%s", extensions=None, position=-2,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), + in_lut=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + num_threads=dict(argstr="-nthreads %d", nohash=True,), out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - usedefault=True, - ), - spine=dict( - argstr='-spine %s', - extensions=None, + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, ), + spine=dict(argstr="-spine %s", extensions=None,), ) inputs = LabelConvert.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_LabelConvert_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = LabelConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py index b959c22546..2440113e20 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py @@ -4,67 +4,32 @@ def test_MRConvert_inputs(): input_map = dict( - args=dict(argstr='%s', ), - axes=dict( - argstr='-axes %s', - sep=',', - ), - bval_scale=dict(argstr='-bvalue_scaling %s', ), - coord=dict( - argstr='-coord %s', - sep=' ', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr='-grad %s', - extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], - ), - in_bval=dict(extensions=None, ), - in_bvec=dict( - argstr='-fslgrad %s %s', - extensions=None, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, - ), + args=dict(argstr="%s",), + axes=dict(argstr="-axes %s", sep=",",), + bval_scale=dict(argstr="-bvalue_scaling %s",), + coord=dict(argstr="-coord %s", sep=" ",), + environ=dict(nohash=True, usedefault=True,), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), + in_bval=dict(extensions=None,), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + nthreads=dict(argstr="-nthreads %d", nohash=True,), out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - usedefault=True, - ), - scaling=dict( - argstr='-scaling %s', - sep=',', - ), - vox=dict( - argstr='-vox %s', - sep=',', + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, ), + scaling=dict(argstr="-scaling %s", sep=",",), + vox=dict(argstr="-vox %s", sep=",",), ) inputs = MRConvert.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRConvert_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MRConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py index 010fbb8a25..439e834eb2 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py @@ -4,61 +4,25 @@ def test_MRDeGibbs_inputs(): input_map = dict( - args=dict(argstr='%s', ), - axes=dict( - argstr='-axes %s', - maxlen=2, - minlen=2, - sep=',', - usedefault=True, - ), - bval_scale=dict(argstr='-bvalue_scaling %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr='-grad %s', - extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], - ), - in_bval=dict(extensions=None, ), - in_bvec=dict( - argstr='-fslgrad %s %s', - extensions=None, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - maxW=dict( - argstr='-maxW %d', - usedefault=True, - ), - minW=dict( - argstr='-minW %d', - usedefault=True, - ), - nshifts=dict( - argstr='-nshifts %d', - usedefault=True, - ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, - ), + args=dict(argstr="%s",), + axes=dict(argstr="-axes %s", maxlen=2, minlen=2, sep=",", usedefault=True,), + bval_scale=dict(argstr="-bvalue_scaling %s",), + environ=dict(nohash=True, usedefault=True,), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), + in_bval=dict(extensions=None,), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + maxW=dict(argstr="-maxW %d", usedefault=True,), + minW=dict(argstr="-minW %d", usedefault=True,), + nshifts=dict(argstr="-nshifts %d", usedefault=True,), + nthreads=dict(argstr="-nthreads %d", nohash=True,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_unr', + name_source="in_file", + name_template="%s_unr", position=-1, ), ) @@ -67,8 +31,10 @@ def test_MRDeGibbs_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRDeGibbs_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MRDeGibbs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py index d2a1d057b8..33f9c82d22 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py @@ -4,56 +4,28 @@ def test_MRMath_inputs(): input_map = dict( - args=dict(argstr='%s', ), - axis=dict(argstr='-axis %d', ), - bval_scale=dict(argstr='-bvalue_scaling %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr='-grad %s', - extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], - ), - in_bval=dict(extensions=None, ), - in_bvec=dict( - argstr='-fslgrad %s %s', - extensions=None, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, - ), - operation=dict( - argstr='%s', - mandatory=True, - position=-2, - ), - out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), + args=dict(argstr="%s",), + axis=dict(argstr="-axis %d",), + bval_scale=dict(argstr="-bvalue_scaling %s",), + environ=dict(nohash=True, usedefault=True,), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), + in_bval=dict(extensions=None,), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + nthreads=dict(argstr="-nthreads %d", nohash=True,), + operation=dict(argstr="%s", mandatory=True, position=-2,), + out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), ) inputs = MRMath.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRMath_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MRMath.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py index 3c904259b5..cf41dfe856 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py @@ -4,62 +4,34 @@ def test_MRResize_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bval_scale=dict(argstr='-bvalue_scaling %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr='-grad %s', - extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], - ), + args=dict(argstr="%s",), + bval_scale=dict(argstr="-bvalue_scaling %s",), + environ=dict(nohash=True, usedefault=True,), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), image_size=dict( - argstr='-size %d,%d,%d', - mandatory=True, - xor=['voxel_size', 'scale_factor'], - ), - in_bval=dict(extensions=None, ), - in_bvec=dict( - argstr='-fslgrad %s %s', - extensions=None, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - interpolation=dict( - argstr='-interp %s', - usedefault=True, - ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, + argstr="-size %d,%d,%d", mandatory=True, xor=["voxel_size", "scale_factor"], ), + in_bval=dict(extensions=None,), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + interpolation=dict(argstr="-interp %s", usedefault=True,), + nthreads=dict(argstr="-nthreads %d", nohash=True,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, keep_extension=True, - name_source=['in_file'], - name_template='%s_resized', + name_source=["in_file"], + name_template="%s_resized", position=-1, ), scale_factor=dict( - argstr='-scale %g,%g,%g', - mandatory=True, - xor=['image_size', 'voxel_size'], + argstr="-scale %g,%g,%g", mandatory=True, xor=["image_size", "voxel_size"], ), voxel_size=dict( - argstr='-voxel %g,%g,%g', + argstr="-voxel %g,%g,%g", mandatory=True, - xor=['image_size', 'scale_factor'], + xor=["image_size", "scale_factor"], ), ) inputs = MRResize.input_spec() @@ -67,8 +39,10 @@ def test_MRResize_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MRResize_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MRResize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py index ed91143ab8..a5042e58d9 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py @@ -4,11 +4,7 @@ def test_MRTrix3Base_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), ) inputs = MRTrix3Base.input_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py index 92a5349d04..602d3c0228 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py @@ -4,42 +4,24 @@ def test_Mesh2PVE_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), - in_first=dict( - argstr='-first %s', - extensions=None, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + in_first=dict(argstr="-first %s", extensions=None,), out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - usedefault=True, - ), - reference=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, ), + reference=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), ) inputs = Mesh2PVE.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Mesh2PVE_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Mesh2PVE.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py index 4a78432f5e..802f2fd64a 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py @@ -4,34 +4,13 @@ def test_ReplaceFSwithFIRST_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_config=dict( - argstr='%s', - extensions=None, - position=-2, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-4, - ), - in_t1w=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-3, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_config=dict(argstr="%s", extensions=None, position=-2,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), + in_t1w=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - usedefault=True, + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, ), ) inputs = ReplaceFSwithFIRST.input_spec() @@ -39,8 +18,10 @@ def test_ReplaceFSwithFIRST_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ReplaceFSwithFIRST_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ReplaceFSwithFIRST.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py index fee3dce67e..b35f6529e7 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py @@ -4,90 +4,35 @@ def test_ResponseSD_inputs(): input_map = dict( - algorithm=dict( - argstr='%s', - mandatory=True, - position=1, - ), - args=dict(argstr='%s', ), - bval_scale=dict(argstr='-bvalue_scaling %s', ), - csf_file=dict( - argstr='%s', - extensions=None, - position=-1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gm_file=dict( - argstr='%s', - extensions=None, - position=-2, - ), - grad_file=dict( - argstr='-grad %s', - extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], - ), - in_bval=dict(extensions=None, ), - in_bvec=dict( - argstr='-fslgrad %s %s', - extensions=None, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-5, - ), - in_mask=dict( - argstr='-mask %s', - extensions=None, - ), - max_sh=dict( - argstr='-lmax %s', - sep=',', - ), - mtt_file=dict( - argstr='%s', - extensions=None, - position=-4, - ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, - ), - wm_file=dict( - argstr='%s', - extensions=None, - position=-3, - usedefault=True, - ), + algorithm=dict(argstr="%s", mandatory=True, position=1,), + args=dict(argstr="%s",), + bval_scale=dict(argstr="-bvalue_scaling %s",), + csf_file=dict(argstr="%s", extensions=None, position=-1,), + environ=dict(nohash=True, usedefault=True,), + gm_file=dict(argstr="%s", extensions=None, position=-2,), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), + in_bval=dict(extensions=None,), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-5,), + in_mask=dict(argstr="-mask %s", extensions=None,), + max_sh=dict(argstr="-lmax %s", sep=",",), + mtt_file=dict(argstr="%s", extensions=None, position=-4,), + nthreads=dict(argstr="-nthreads %d", nohash=True,), + wm_file=dict(argstr="%s", extensions=None, position=-3, usedefault=True,), ) inputs = ResponseSD.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ResponseSD_outputs(): output_map = dict( - csf_file=dict( - argstr='%s', - extensions=None, - ), - gm_file=dict( - argstr='%s', - extensions=None, - ), - wm_file=dict( - argstr='%s', - extensions=None, - ), + csf_file=dict(argstr="%s", extensions=None,), + gm_file=dict(argstr="%s", extensions=None,), + wm_file=dict(argstr="%s", extensions=None,), ) outputs = ResponseSD.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py index d841a3fc6f..2c72dee012 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py @@ -4,43 +4,23 @@ def test_TCK2VTK_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, - ), - out_file=dict( - argstr='%s', - extensions=None, - position=-1, - usedefault=True, - ), - reference=dict( - argstr='-image %s', - extensions=None, - ), - voxel=dict( - argstr='-image %s', - extensions=None, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + nthreads=dict(argstr="-nthreads %d", nohash=True,), + out_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True,), + reference=dict(argstr="-image %s", extensions=None,), + voxel=dict(argstr="-image %s", extensions=None,), ) inputs = TCK2VTK.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TCK2VTK_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TCK2VTK.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py index 45da9dbf1c..be6736cecb 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py @@ -4,55 +4,30 @@ def test_TensorMetrics_inputs(): input_map = dict( - args=dict(argstr='%s', ), - component=dict( - argstr='-num %s', - sep=',', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - ), - in_mask=dict( - argstr='-mask %s', - extensions=None, - ), - modulate=dict(argstr='-modulate %s', ), - out_adc=dict( - argstr='-adc %s', - extensions=None, - ), - out_eval=dict( - argstr='-value %s', - extensions=None, - ), - out_evec=dict( - argstr='-vector %s', - extensions=None, - ), - out_fa=dict( - argstr='-fa %s', - extensions=None, - ), + args=dict(argstr="%s",), + component=dict(argstr="-num %s", sep=",", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + in_mask=dict(argstr="-mask %s", extensions=None,), + modulate=dict(argstr="-modulate %s",), + out_adc=dict(argstr="-adc %s", extensions=None,), + out_eval=dict(argstr="-value %s", extensions=None,), + out_evec=dict(argstr="-vector %s", extensions=None,), + out_fa=dict(argstr="-fa %s", extensions=None,), ) inputs = TensorMetrics.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TensorMetrics_outputs(): output_map = dict( - out_adc=dict(extensions=None, ), - out_eval=dict(extensions=None, ), - out_evec=dict(extensions=None, ), - out_fa=dict(extensions=None, ), + out_adc=dict(extensions=None,), + out_eval=dict(extensions=None,), + out_evec=dict(extensions=None,), + out_fa=dict(extensions=None,), ) outputs = TensorMetrics.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py index f8239c5341..9c1b51d363 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py @@ -4,126 +4,71 @@ def test_Tractography_inputs(): input_map = dict( - act_file=dict( - argstr='-act %s', - extensions=None, - ), - algorithm=dict( - argstr='-algorithm %s', - usedefault=True, - ), - angle=dict(argstr='-angle %f', ), - args=dict(argstr='%s', ), - backtrack=dict(argstr='-backtrack', ), - bval_scale=dict(argstr='-bvalue_scaling %s', ), - crop_at_gmwmi=dict(argstr='-crop_at_gmwmi', ), - cutoff=dict(argstr='-cutoff %f', ), - cutoff_init=dict(argstr='-initcutoff %f', ), - downsample=dict(argstr='-downsample %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr='-grad %s', - extensions=None, - xor=['grad_fsl'], - ), - grad_fsl=dict( - argstr='-fslgrad %s %s', - xor=['grad_file'], - ), - in_bval=dict(extensions=None, ), - in_bvec=dict( - argstr='-fslgrad %s %s', - extensions=None, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-2, - ), - init_dir=dict(argstr='-initdirection %f,%f,%f', ), - max_length=dict(argstr='-maxlength %f', ), - max_seed_attempts=dict(argstr='-max_seed_attempts %d', ), - max_tracks=dict(argstr='-maxnum %d', ), - min_length=dict(argstr='-minlength %f', ), - n_samples=dict( - argstr='-samples %d', - usedefault=True, - ), - n_tracks=dict( - argstr='-number %d', - max_ver='0.4', - ), - n_trials=dict(argstr='-trials %d', ), - noprecompt=dict(argstr='-noprecomputed', ), - nthreads=dict( - argstr='-nthreads %d', - nohash=True, - ), + act_file=dict(argstr="-act %s", extensions=None,), + algorithm=dict(argstr="-algorithm %s", usedefault=True,), + angle=dict(argstr="-angle %f",), + args=dict(argstr="%s",), + backtrack=dict(argstr="-backtrack",), + bval_scale=dict(argstr="-bvalue_scaling %s",), + crop_at_gmwmi=dict(argstr="-crop_at_gmwmi",), + cutoff=dict(argstr="-cutoff %f",), + cutoff_init=dict(argstr="-initcutoff %f",), + downsample=dict(argstr="-downsample %f",), + environ=dict(nohash=True, usedefault=True,), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), + in_bval=dict(extensions=None,), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + init_dir=dict(argstr="-initdirection %f,%f,%f",), + max_length=dict(argstr="-maxlength %f",), + max_seed_attempts=dict(argstr="-max_seed_attempts %d",), + max_tracks=dict(argstr="-maxnum %d",), + min_length=dict(argstr="-minlength %f",), + n_samples=dict(argstr="-samples %d", usedefault=True,), + n_tracks=dict(argstr="-number %d", max_ver="0.4",), + n_trials=dict(argstr="-trials %d",), + noprecompt=dict(argstr="-noprecomputed",), + nthreads=dict(argstr="-nthreads %d", nohash=True,), out_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=-1, - usedefault=True, - ), - out_seeds=dict( - argstr='-output_seeds %s', - extensions=None, - usedefault=True, - ), - power=dict(argstr='-power %d', ), - roi_excl=dict(argstr='-exclude %s', ), - roi_incl=dict(argstr='-include %s', ), - roi_mask=dict(argstr='-mask %s', ), - seed_dynamic=dict( - argstr='-seed_dynamic %s', - extensions=None, - ), + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, + ), + out_seeds=dict(argstr="-output_seeds %s", extensions=None, usedefault=True,), + power=dict(argstr="-power %d",), + roi_excl=dict(argstr="-exclude %s",), + roi_incl=dict(argstr="-include %s",), + roi_mask=dict(argstr="-mask %s",), + seed_dynamic=dict(argstr="-seed_dynamic %s", extensions=None,), seed_gmwmi=dict( - argstr='-seed_gmwmi %s', - extensions=None, - requires=['act_file'], + argstr="-seed_gmwmi %s", extensions=None, requires=["act_file"], ), seed_grid_voxel=dict( - argstr='-seed_grid_per_voxel %s %d', - xor=['seed_image', 'seed_rnd_voxel'], - ), - seed_image=dict( - argstr='-seed_image %s', - extensions=None, - ), - seed_rejection=dict( - argstr='-seed_rejection %s', - extensions=None, + argstr="-seed_grid_per_voxel %s %d", xor=["seed_image", "seed_rnd_voxel"], ), + seed_image=dict(argstr="-seed_image %s", extensions=None,), + seed_rejection=dict(argstr="-seed_rejection %s", extensions=None,), seed_rnd_voxel=dict( - argstr='-seed_random_per_voxel %s %d', - xor=['seed_image', 'seed_grid_voxel'], - ), - seed_sphere=dict(argstr='-seed_sphere %f,%f,%f,%f', ), - select=dict( - argstr='-select %d', - min_ver='3', - ), - sph_trait=dict(argstr='%f,%f,%f,%f', ), - step_size=dict(argstr='-step %f', ), - stop=dict(argstr='-stop', ), - unidirectional=dict(argstr='-unidirectional', ), - use_rk4=dict(argstr='-rk4', ), + argstr="-seed_random_per_voxel %s %d", + xor=["seed_image", "seed_grid_voxel"], + ), + seed_sphere=dict(argstr="-seed_sphere %f,%f,%f,%f",), + select=dict(argstr="-select %d", min_ver="3",), + sph_trait=dict(argstr="%f,%f,%f,%f",), + step_size=dict(argstr="-step %f",), + stop=dict(argstr="-stop",), + unidirectional=dict(argstr="-unidirectional",), + use_rk4=dict(argstr="-rk4",), ) inputs = Tractography.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Tractography_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - out_seeds=dict(extensions=None, ), + out_file=dict(extensions=None,), out_seeds=dict(extensions=None,), ) outputs = Tractography.output_spec() diff --git a/nipype/interfaces/mrtrix3/tracking.py b/nipype/interfaces/mrtrix3/tracking.py index fb7d8e7375..e71d9cd37a 100644 --- a/nipype/interfaces/mrtrix3/tracking.py +++ b/nipype/interfaces/mrtrix3/tracking.py @@ -10,155 +10,217 @@ class TractographyInputSpec(MRTrix3BaseInputSpec): sph_trait = traits.Tuple( - traits.Float, - traits.Float, - traits.Float, - traits.Float, - argstr='%f,%f,%f,%f') + traits.Float, traits.Float, traits.Float, traits.Float, argstr="%f,%f,%f,%f" + ) in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='input file to be processed') + desc="input file to be processed", + ) out_file = File( - 'tracked.tck', - argstr='%s', + "tracked.tck", + argstr="%s", mandatory=True, position=-1, usedefault=True, - desc='output file containing tracks') + desc="output file containing tracks", + ) algorithm = traits.Enum( - 'iFOD2', - 'FACT', - 'iFOD1', - 'Nulldist', - 'SD_Stream', - 'Tensor_Det', - 'Tensor_Prob', + "iFOD2", + "FACT", + "iFOD1", + "Nulldist", + "SD_Stream", + "Tensor_Det", + "Tensor_Prob", usedefault=True, - argstr='-algorithm %s', - desc='tractography algorithm to be used') + argstr="-algorithm %s", + desc="tractography algorithm to be used", + ) # ROIs processing options roi_incl = traits.Either( File(exists=True), sph_trait, - argstr='-include %s', - desc=('specify an inclusion region of interest, streamlines must' - ' traverse ALL inclusion regions to be accepted')) + argstr="-include %s", + desc=( + "specify an inclusion region of interest, streamlines must" + " traverse ALL inclusion regions to be accepted" + ), + ) roi_excl = traits.Either( File(exists=True), sph_trait, - argstr='-exclude %s', - desc=('specify an exclusion region of interest, streamlines that' - ' enter ANY exclude region will be discarded')) + argstr="-exclude %s", + desc=( + "specify an exclusion region of interest, streamlines that" + " enter ANY exclude region will be discarded" + ), + ) roi_mask = traits.Either( File(exists=True), sph_trait, - argstr='-mask %s', - desc=('specify a masking region of interest. If defined,' - 'streamlines exiting the mask will be truncated')) + argstr="-mask %s", + desc=( + "specify a masking region of interest. If defined," + "streamlines exiting the mask will be truncated" + ), + ) # Streamlines tractography options step_size = traits.Float( - argstr='-step %f', - desc=('set the step size of the algorithm in mm (default is 0.1' - ' x voxelsize; for iFOD2: 0.5 x voxelsize)')) + argstr="-step %f", + desc=( + "set the step size of the algorithm in mm (default is 0.1" + " x voxelsize; for iFOD2: 0.5 x voxelsize)" + ), + ) angle = traits.Float( - argstr='-angle %f', - desc=('set the maximum angle between successive steps (default ' - 'is 90deg x stepsize / voxelsize)')) + argstr="-angle %f", + desc=( + "set the maximum angle between successive steps (default " + "is 90deg x stepsize / voxelsize)" + ), + ) n_tracks = traits.Int( - argstr='-number %d', - max_ver='0.4', - desc=('set the desired number of tracks. The program will continue' - ' to generate tracks until this number of tracks have been ' - 'selected and written to the output file')) + argstr="-number %d", + max_ver="0.4", + desc=( + "set the desired number of tracks. The program will continue" + " to generate tracks until this number of tracks have been " + "selected and written to the output file" + ), + ) select = traits.Int( - argstr='-select %d', - min_ver='3', - desc=('set the desired number of tracks. The program will continue' - ' to generate tracks until this number of tracks have been ' - 'selected and written to the output file')) + argstr="-select %d", + min_ver="3", + desc=( + "set the desired number of tracks. The program will continue" + " to generate tracks until this number of tracks have been " + "selected and written to the output file" + ), + ) max_tracks = traits.Int( - argstr='-maxnum %d', - desc=('set the maximum number of tracks to generate. The program ' - 'will not generate more tracks than this number, even if ' - 'the desired number of tracks hasn\'t yet been reached ' - '(default is 100 x number)')) + argstr="-maxnum %d", + desc=( + "set the maximum number of tracks to generate. The program " + "will not generate more tracks than this number, even if " + "the desired number of tracks hasn't yet been reached " + "(default is 100 x number)" + ), + ) max_length = traits.Float( - argstr='-maxlength %f', - desc=('set the maximum length of any track in mm (default is ' - '100 x voxelsize)')) + argstr="-maxlength %f", + desc=( + "set the maximum length of any track in mm (default is " "100 x voxelsize)" + ), + ) min_length = traits.Float( - argstr='-minlength %f', - desc=('set the minimum length of any track in mm (default is ' - '5 x voxelsize)')) + argstr="-minlength %f", + desc=( + "set the minimum length of any track in mm (default is " "5 x voxelsize)" + ), + ) cutoff = traits.Float( - argstr='-cutoff %f', - desc=('set the FA or FOD amplitude cutoff for terminating ' - 'tracks (default is 0.1)')) + argstr="-cutoff %f", + desc=( + "set the FA or FOD amplitude cutoff for terminating " + "tracks (default is 0.1)" + ), + ) cutoff_init = traits.Float( - argstr='-initcutoff %f', - desc=('set the minimum FA or FOD amplitude for initiating ' - 'tracks (default is the same as the normal cutoff)')) + argstr="-initcutoff %f", + desc=( + "set the minimum FA or FOD amplitude for initiating " + "tracks (default is the same as the normal cutoff)" + ), + ) n_trials = traits.Int( - argstr='-trials %d', - desc=('set the maximum number of sampling trials at each point' - ' (only used for probabilistic tracking)')) + argstr="-trials %d", + desc=( + "set the maximum number of sampling trials at each point" + " (only used for probabilistic tracking)" + ), + ) unidirectional = traits.Bool( - argstr='-unidirectional', - desc=('track from the seed point in one direction only ' - '(default is to track in both directions)')) + argstr="-unidirectional", + desc=( + "track from the seed point in one direction only " + "(default is to track in both directions)" + ), + ) init_dir = traits.Tuple( traits.Float, traits.Float, traits.Float, - argstr='-initdirection %f,%f,%f', - desc=('specify an initial direction for the tracking (this ' - 'should be supplied as a vector of 3 comma-separated values')) + argstr="-initdirection %f,%f,%f", + desc=( + "specify an initial direction for the tracking (this " + "should be supplied as a vector of 3 comma-separated values" + ), + ) noprecompt = traits.Bool( - argstr='-noprecomputed', - desc=('do NOT pre-compute legendre polynomial values. Warning: this ' - 'will slow down the algorithm by a factor of approximately 4')) + argstr="-noprecomputed", + desc=( + "do NOT pre-compute legendre polynomial values. Warning: this " + "will slow down the algorithm by a factor of approximately 4" + ), + ) power = traits.Int( - argstr='-power %d', - desc=('raise the FOD to the power specified (default is 1/nsamples)')) + argstr="-power %d", + desc=("raise the FOD to the power specified (default is 1/nsamples)"), + ) n_samples = traits.Int( - 4, usedefault=True, - argstr='-samples %d', - desc=('set the number of FOD samples to take per step for the 2nd ' - 'order (iFOD2) method')) + 4, + usedefault=True, + argstr="-samples %d", + desc=( + "set the number of FOD samples to take per step for the 2nd " + "order (iFOD2) method" + ), + ) use_rk4 = traits.Bool( - argstr='-rk4', - desc=('use 4th-order Runge-Kutta integration (slower, but eliminates' - ' curvature overshoot in 1st-order deterministic methods)')) + argstr="-rk4", + desc=( + "use 4th-order Runge-Kutta integration (slower, but eliminates" + " curvature overshoot in 1st-order deterministic methods)" + ), + ) stop = traits.Bool( - argstr='-stop', - desc=('stop propagating a streamline once it has traversed all ' - 'include regions')) + argstr="-stop", + desc=( + "stop propagating a streamline once it has traversed all " "include regions" + ), + ) downsample = traits.Float( - argstr='-downsample %f', - desc='downsample the generated streamlines to reduce output file size') + argstr="-downsample %f", + desc="downsample the generated streamlines to reduce output file size", + ) # Anatomically-Constrained Tractography options act_file = File( exists=True, - argstr='-act %s', - desc=('use the Anatomically-Constrained Tractography framework during' - ' tracking; provided image must be in the 5TT ' - '(five - tissue - type) format')) - backtrack = traits.Bool( - argstr='-backtrack', desc='allow tracks to be truncated') + argstr="-act %s", + desc=( + "use the Anatomically-Constrained Tractography framework during" + " tracking; provided image must be in the 5TT " + "(five - tissue - type) format" + ), + ) + backtrack = traits.Bool(argstr="-backtrack", desc="allow tracks to be truncated") crop_at_gmwmi = traits.Bool( - argstr='-crop_at_gmwmi', - desc=('crop streamline endpoints more ' - 'precisely as they cross the GM-WM interface')) + argstr="-crop_at_gmwmi", + desc=( + "crop streamline endpoints more " + "precisely as they cross the GM-WM interface" + ), + ) # Tractography seeding options seed_sphere = traits.Tuple( @@ -166,64 +228,85 @@ class TractographyInputSpec(MRTrix3BaseInputSpec): traits.Float, traits.Float, traits.Float, - argstr='-seed_sphere %f,%f,%f,%f', - desc='spherical seed') + argstr="-seed_sphere %f,%f,%f,%f", + desc="spherical seed", + ) seed_image = File( exists=True, - argstr='-seed_image %s', - desc='seed streamlines entirely at random within mask') + argstr="-seed_image %s", + desc="seed streamlines entirely at random within mask", + ) seed_rnd_voxel = traits.Tuple( File(exists=True), traits.Int(), - argstr='-seed_random_per_voxel %s %d', - xor=['seed_image', 'seed_grid_voxel'], - desc=('seed a fixed number of streamlines per voxel in a mask ' - 'image; random placement of seeds in each voxel')) + argstr="-seed_random_per_voxel %s %d", + xor=["seed_image", "seed_grid_voxel"], + desc=( + "seed a fixed number of streamlines per voxel in a mask " + "image; random placement of seeds in each voxel" + ), + ) seed_grid_voxel = traits.Tuple( File(exists=True), traits.Int(), - argstr='-seed_grid_per_voxel %s %d', - xor=['seed_image', 'seed_rnd_voxel'], - desc=('seed a fixed number of streamlines per voxel in a mask ' - 'image; place seeds on a 3D mesh grid (grid_size argument ' - 'is per axis; so a grid_size of 3 results in 27 seeds per' - ' voxel)')) + argstr="-seed_grid_per_voxel %s %d", + xor=["seed_image", "seed_rnd_voxel"], + desc=( + "seed a fixed number of streamlines per voxel in a mask " + "image; place seeds on a 3D mesh grid (grid_size argument " + "is per axis; so a grid_size of 3 results in 27 seeds per" + " voxel)" + ), + ) seed_rejection = File( exists=True, - argstr='-seed_rejection %s', - desc=('seed from an image using rejection sampling (higher ' - 'values = more probable to seed from')) + argstr="-seed_rejection %s", + desc=( + "seed from an image using rejection sampling (higher " + "values = more probable to seed from" + ), + ) seed_gmwmi = File( exists=True, - argstr='-seed_gmwmi %s', - requires=['act_file'], - desc=('seed from the grey matter - white matter interface (only ' - 'valid if using ACT framework)')) + argstr="-seed_gmwmi %s", + requires=["act_file"], + desc=( + "seed from the grey matter - white matter interface (only " + "valid if using ACT framework)" + ), + ) seed_dynamic = File( exists=True, - argstr='-seed_dynamic %s', - desc=('determine seed points dynamically using the SIFT model ' - '(must not provide any other seeding mechanism). Note that' - ' while this seeding mechanism improves the distribution of' - ' reconstructed streamlines density, it should NOT be used ' - 'as a substitute for the SIFT method itself.')) + argstr="-seed_dynamic %s", + desc=( + "determine seed points dynamically using the SIFT model " + "(must not provide any other seeding mechanism). Note that" + " while this seeding mechanism improves the distribution of" + " reconstructed streamlines density, it should NOT be used " + "as a substitute for the SIFT method itself." + ), + ) max_seed_attempts = traits.Int( - argstr='-max_seed_attempts %d', - desc=('set the maximum number of times that the tracking ' - 'algorithm should attempt to find an appropriate tracking' - ' direction from a given seed point')) + argstr="-max_seed_attempts %d", + desc=( + "set the maximum number of times that the tracking " + "algorithm should attempt to find an appropriate tracking" + " direction from a given seed point" + ), + ) out_seeds = File( - 'out_seeds.nii.gz', usedefault=True, - argstr='-output_seeds %s', - desc=('output the seed location of all successful streamlines to' - ' a file')) + "out_seeds.nii.gz", + usedefault=True, + argstr="-output_seeds %s", + desc=("output the seed location of all successful streamlines to" " a file"), + ) class TractographyOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output filtered tracks') + out_file = File(exists=True, desc="the output filtered tracks") out_seeds = File( - desc=('output the seed location of all successful' - ' streamlines to a file')) + desc=("output the seed location of all successful" " streamlines to a file") + ) class Tractography(MRTrix3Base): @@ -273,18 +356,18 @@ class Tractography(MRTrix3Base): >>> tk.run() # doctest: +SKIP """ - _cmd = 'tckgen' + _cmd = "tckgen" input_spec = TractographyInputSpec output_spec = TractographyOutputSpec def _format_arg(self, name, trait_spec, value): - if 'roi_' in name and isinstance(value, tuple): - value = ['%f' % v for v in value] - return trait_spec.argstr % ','.join(value) + if "roi_" in name and isinstance(value, tuple): + value = ["%f" % v for v in value] + return trait_spec.argstr % ",".join(value) return super(Tractography, self)._format_arg(name, trait_spec, value) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index 52f5caab64..d13b5d0ce7 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -4,29 +4,38 @@ import os.path as op -from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, - File, InputMultiPath, isdefined) +from ..base import ( + CommandLineInputSpec, + CommandLine, + traits, + TraitedSpec, + File, + InputMultiPath, + isdefined, +) from .base import MRTrix3BaseInputSpec, MRTrix3Base class BrainMaskInputSpec(MRTrix3BaseInputSpec): in_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='input diffusion weighted images') + desc="input diffusion weighted images", + ) out_file = File( - 'brainmask.mif', - argstr='%s', + "brainmask.mif", + argstr="%s", mandatory=True, position=-1, usedefault=True, - desc='output brain mask') + desc="output brain mask", + ) class BrainMaskOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output response file') + out_file = File(exists=True, desc="the output response file") class BrainMask(CommandLine): @@ -45,45 +54,45 @@ class BrainMask(CommandLine): >>> bmsk.run() # doctest: +SKIP """ - _cmd = 'dwi2mask' + _cmd = "dwi2mask" input_spec = BrainMaskInputSpec output_spec = BrainMaskOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class Mesh2PVEInputSpec(CommandLineInputSpec): in_file = File( - exists=True, - argstr='%s', - mandatory=True, - position=-3, - desc='input mesh') + exists=True, argstr="%s", mandatory=True, position=-3, desc="input mesh" + ) reference = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=-2, - desc='input reference image') + desc="input reference image", + ) in_first = File( exists=True, - argstr='-first %s', - desc='indicates that the mesh file is provided by FSL FIRST') + argstr="-first %s", + desc="indicates that the mesh file is provided by FSL FIRST", + ) out_file = File( - 'mesh2volume.nii.gz', - argstr='%s', + "mesh2volume.nii.gz", + argstr="%s", mandatory=True, position=-1, usedefault=True, - desc='output file containing SH coefficients') + desc="output file containing SH coefficients", + ) class Mesh2PVEOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output response file') + out_file = File(exists=True, desc="the output response file") class Mesh2PVE(CommandLine): @@ -104,37 +113,34 @@ class Mesh2PVE(CommandLine): >>> m2p.run() # doctest: +SKIP """ - _cmd = 'mesh2pve' + _cmd = "mesh2pve" input_spec = Mesh2PVEInputSpec output_spec = Mesh2PVEOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class Generate5ttInputSpec(MRTrix3BaseInputSpec): algorithm = traits.Enum( - 'fsl', - 'gif', - 'freesurfer', - argstr='%s', + "fsl", + "gif", + "freesurfer", + argstr="%s", position=-3, mandatory=True, - desc='tissue segmentation algorithm') + desc="tissue segmentation algorithm", + ) in_file = File( - exists=True, - argstr='%s', - mandatory=True, - position=-2, - desc='input image') - out_file = File( - argstr='%s', mandatory=True, position=-1, desc='output image') + exists=True, argstr="%s", mandatory=True, position=-2, desc="input image" + ) + out_file = File(argstr="%s", mandatory=True, position=-1, desc="output image") class Generate5ttOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='output image') + out_file = File(exists=True, desc="output image") class Generate5tt(MRTrix3Base): @@ -155,56 +161,56 @@ class Generate5tt(MRTrix3Base): >>> gen5tt.run() # doctest: +SKIP """ - _cmd = '5ttgen' + _cmd = "5ttgen" input_spec = Generate5ttInputSpec output_spec = Generate5ttOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class TensorMetricsInputSpec(CommandLineInputSpec): in_file = File( - exists=True, - argstr='%s', - mandatory=True, - position=-1, - desc='input DTI image') - - out_fa = File(argstr='-fa %s', desc='output FA file') - out_adc = File(argstr='-adc %s', desc='output ADC file') - out_evec = File( - argstr='-vector %s', desc='output selected eigenvector(s) file') - out_eval = File( - argstr='-value %s', desc='output selected eigenvalue(s) file') + exists=True, argstr="%s", mandatory=True, position=-1, desc="input DTI image" + ) + + out_fa = File(argstr="-fa %s", desc="output FA file") + out_adc = File(argstr="-adc %s", desc="output ADC file") + out_evec = File(argstr="-vector %s", desc="output selected eigenvector(s) file") + out_eval = File(argstr="-value %s", desc="output selected eigenvalue(s) file") component = traits.List( [1], usedefault=True, - argstr='-num %s', - sep=',', - desc=('specify the desired eigenvalue/eigenvector(s). Note that ' - 'several eigenvalues can be specified as a number sequence')) + argstr="-num %s", + sep=",", + desc=( + "specify the desired eigenvalue/eigenvector(s). Note that " + "several eigenvalues can be specified as a number sequence" + ), + ) in_mask = File( exists=True, - argstr='-mask %s', - desc=('only perform computation within the specified binary' - ' brain mask image')) + argstr="-mask %s", + desc=( + "only perform computation within the specified binary" " brain mask image" + ), + ) modulate = traits.Enum( - 'FA', - 'none', - 'eval', - argstr='-modulate %s', - desc=('how to modulate the magnitude of the' - ' eigenvectors')) + "FA", + "none", + "eval", + argstr="-modulate %s", + desc=("how to modulate the magnitude of the" " eigenvectors"), + ) class TensorMetricsOutputSpec(TraitedSpec): - out_fa = File(desc='output FA file') - out_adc = File(desc='output ADC file') - out_evec = File(desc='output selected eigenvector(s) file') - out_eval = File(desc='output selected eigenvalue(s) file') + out_fa = File(desc="output FA file") + out_adc = File(desc="output ADC file") + out_evec = File(desc="output selected eigenvector(s) file") + out_eval = File(desc="output selected eigenvalue(s) file") class TensorMetrics(CommandLine): @@ -224,7 +230,7 @@ class TensorMetrics(CommandLine): >>> comp.run() # doctest: +SKIP """ - _cmd = 'tensor2metric' + _cmd = "tensor2metric" input_spec = TensorMetricsInputSpec output_spec = TensorMetricsOutputSpec @@ -240,127 +246,130 @@ def _list_outputs(self): class ComputeTDIInputSpec(CommandLineInputSpec): in_file = File( - exists=True, - argstr='%s', - mandatory=True, - position=-2, - desc='input tractography') + exists=True, argstr="%s", mandatory=True, position=-2, desc="input tractography" + ) out_file = File( - 'tdi.mif', - argstr='%s', - usedefault=True, - position=-1, - desc='output TDI file') + "tdi.mif", argstr="%s", usedefault=True, position=-1, desc="output TDI file" + ) reference = File( exists=True, - argstr='-template %s', - desc='a reference' - 'image to be used as template') + argstr="-template %s", + desc="a reference" "image to be used as template", + ) vox_size = traits.List( - traits.Int, argstr='-vox %s', sep=',', desc='voxel dimensions') + traits.Int, argstr="-vox %s", sep=",", desc="voxel dimensions" + ) data_type = traits.Enum( - 'float', - 'unsigned int', - argstr='-datatype %s', - desc='specify output image data type') - use_dec = traits.Bool(argstr='-dec', desc='perform mapping in DEC space') + "float", + "unsigned int", + argstr="-datatype %s", + desc="specify output image data type", + ) + use_dec = traits.Bool(argstr="-dec", desc="perform mapping in DEC space") dixel = File( - argstr='-dixel %s', - desc='map streamlines to' - 'dixels within each voxel. Directions are stored as' - 'azimuth elevation pairs.') + argstr="-dixel %s", + desc="map streamlines to" + "dixels within each voxel. Directions are stored as" + "azimuth elevation pairs.", + ) max_tod = traits.Int( - argstr='-tod %d', - desc='generate a Track Orientation ' - 'Distribution (TOD) in each voxel.') + argstr="-tod %d", + desc="generate a Track Orientation " "Distribution (TOD) in each voxel.", + ) contrast = traits.Enum( - 'tdi', - 'length', - 'invlength', - 'scalar_map', - 'scalar_map_conut', - 'fod_amp', - 'curvature', - argstr='-constrast %s', - desc='define the desired ' - 'form of contrast for the output image') + "tdi", + "length", + "invlength", + "scalar_map", + "scalar_map_conut", + "fod_amp", + "curvature", + argstr="-constrast %s", + desc="define the desired " "form of contrast for the output image", + ) in_map = File( exists=True, - argstr='-image %s', - desc='provide the' - 'scalar image map for generating images with ' - '\'scalar_map\' contrasts, or the SHs image for fod_amp') + argstr="-image %s", + desc="provide the" + "scalar image map for generating images with " + "'scalar_map' contrasts, or the SHs image for fod_amp", + ) stat_vox = traits.Enum( - 'sum', - 'min', - 'mean', - 'max', - argstr='-stat_vox %s', - desc='define the statistic for choosing the final' - 'voxel intesities for a given contrast') + "sum", + "min", + "mean", + "max", + argstr="-stat_vox %s", + desc="define the statistic for choosing the final" + "voxel intesities for a given contrast", + ) stat_tck = traits.Enum( - 'mean', - 'sum', - 'min', - 'max', - 'median', - 'mean_nonzero', - 'gaussian', - 'ends_min', - 'ends_mean', - 'ends_max', - 'ends_prod', - argstr='-stat_tck %s', - desc='define the statistic for choosing ' - 'the contribution to be made by each streamline as a function of' - ' the samples taken along their lengths.') + "mean", + "sum", + "min", + "max", + "median", + "mean_nonzero", + "gaussian", + "ends_min", + "ends_mean", + "ends_max", + "ends_prod", + argstr="-stat_tck %s", + desc="define the statistic for choosing " + "the contribution to be made by each streamline as a function of" + " the samples taken along their lengths.", + ) fwhm_tck = traits.Float( - argstr='-fwhm_tck %f', - desc='define the statistic for choosing the' - ' contribution to be made by each streamline as a function of the ' - 'samples taken along their lengths') + argstr="-fwhm_tck %f", + desc="define the statistic for choosing the" + " contribution to be made by each streamline as a function of the " + "samples taken along their lengths", + ) map_zero = traits.Bool( - argstr='-map_zero', - desc='if a streamline has zero contribution based ' - 'on the contrast & statistic, typically it is not mapped; use this ' - 'option to still contribute to the map even if this is the case ' - '(these non-contributing voxels can then influence the mean value in ' - 'each voxel of the map)') + argstr="-map_zero", + desc="if a streamline has zero contribution based " + "on the contrast & statistic, typically it is not mapped; use this " + "option to still contribute to the map even if this is the case " + "(these non-contributing voxels can then influence the mean value in " + "each voxel of the map)", + ) upsample = traits.Int( - argstr='-upsample %d', - desc='upsample the tracks by' - ' some ratio using Hermite interpolation before ' - 'mappping') + argstr="-upsample %d", + desc="upsample the tracks by" + " some ratio using Hermite interpolation before " + "mappping", + ) precise = traits.Bool( - argstr='-precise', - desc='use a more precise streamline mapping ' - 'strategy, that accurately quantifies the length through each voxel ' - '(these lengths are then taken into account during TWI calculation)') + argstr="-precise", + desc="use a more precise streamline mapping " + "strategy, that accurately quantifies the length through each voxel " + "(these lengths are then taken into account during TWI calculation)", + ) ends_only = traits.Bool( - argstr='-ends_only', - desc='only map the streamline' - ' endpoints to the image') + argstr="-ends_only", desc="only map the streamline" " endpoints to the image" + ) tck_weights = File( exists=True, - argstr='-tck_weights_in %s', - desc='specify' - ' a text scalar file containing the streamline weights') + argstr="-tck_weights_in %s", + desc="specify" " a text scalar file containing the streamline weights", + ) nthreads = traits.Int( - argstr='-nthreads %d', - desc='number of threads. if zero, the number' - ' of available cpus will be used', - nohash=True) + argstr="-nthreads %d", + desc="number of threads. if zero, the number" " of available cpus will be used", + nohash=True, + ) class ComputeTDIOutputSpec(TraitedSpec): - out_file = File(desc='output TDI file') + out_file = File(desc="output TDI file") class ComputeTDI(MRTrix3Base): @@ -417,51 +426,47 @@ class ComputeTDI(MRTrix3Base): >>> tdi.run() # doctest: +SKIP """ - _cmd = 'tckmap' + _cmd = "tckmap" input_spec = ComputeTDIInputSpec output_spec = ComputeTDIOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class TCK2VTKInputSpec(CommandLineInputSpec): in_file = File( - exists=True, - argstr='%s', - mandatory=True, - position=-2, - desc='input tractography') + exists=True, argstr="%s", mandatory=True, position=-2, desc="input tractography" + ) out_file = File( - 'tracks.vtk', - argstr='%s', - usedefault=True, - position=-1, - desc='output VTK file') + "tracks.vtk", argstr="%s", usedefault=True, position=-1, desc="output VTK file" + ) reference = File( exists=True, - argstr='-image %s', - desc='if specified, the properties of' - ' this image will be used to convert track point positions from real ' - '(scanner) coordinates into image coordinates (in mm).') + argstr="-image %s", + desc="if specified, the properties of" + " this image will be used to convert track point positions from real " + "(scanner) coordinates into image coordinates (in mm).", + ) voxel = File( exists=True, - argstr='-image %s', - desc='if specified, the properties of' - ' this image will be used to convert track point positions from real ' - '(scanner) coordinates into image coordinates.') + argstr="-image %s", + desc="if specified, the properties of" + " this image will be used to convert track point positions from real " + "(scanner) coordinates into image coordinates.", + ) nthreads = traits.Int( - argstr='-nthreads %d', - desc='number of threads. if zero, the number' - ' of available cpus will be used', - nohash=True) + argstr="-nthreads %d", + desc="number of threads. if zero, the number" " of available cpus will be used", + nohash=True, + ) class TCK2VTKOutputSpec(TraitedSpec): - out_file = File(desc='output VTK file') + out_file = File(desc="output VTK file") class TCK2VTK(MRTrix3Base): @@ -481,38 +486,36 @@ class TCK2VTK(MRTrix3Base): >>> vtk.run() # doctest: +SKIP """ - _cmd = 'tck2vtk' + _cmd = "tck2vtk" input_spec = TCK2VTKInputSpec output_spec = TCK2VTKOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class DWIExtractInputSpec(MRTrix3BaseInputSpec): in_file = File( - exists=True, - argstr='%s', - mandatory=True, - position=-2, - desc='input image') - out_file = File( - argstr='%s', mandatory=True, position=-1, desc='output image') - bzero = traits.Bool(argstr='-bzero', desc='extract b=0 volumes') - nobzero = traits.Bool(argstr='-no_bzero', desc='extract non b=0 volumes') + exists=True, argstr="%s", mandatory=True, position=-2, desc="input image" + ) + out_file = File(argstr="%s", mandatory=True, position=-1, desc="output image") + bzero = traits.Bool(argstr="-bzero", desc="extract b=0 volumes") + nobzero = traits.Bool(argstr="-no_bzero", desc="extract non b=0 volumes") singleshell = traits.Bool( - argstr='-singleshell', desc='extract volumes with a specific shell') + argstr="-singleshell", desc="extract volumes with a specific shell" + ) shell = traits.List( traits.Float, - sep=',', - argstr='-shell %s', - desc='specify one or more gradient shells') + sep=",", + argstr="-shell %s", + desc="specify one or more gradient shells", + ) class DWIExtractOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='output image') + out_file = File(exists=True, desc="output image") class DWIExtract(MRTrix3Base): @@ -534,54 +537,53 @@ class DWIExtract(MRTrix3Base): >>> dwiextract.run() # doctest: +SKIP """ - _cmd = 'dwiextract' + _cmd = "dwiextract" input_spec = DWIExtractInputSpec output_spec = DWIExtractOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class MRConvertInputSpec(MRTrix3BaseInputSpec): in_file = File( - exists=True, - argstr='%s', - mandatory=True, - position=-2, - desc='input image') + exists=True, argstr="%s", mandatory=True, position=-2, desc="input image" + ) out_file = File( - 'dwi.mif', - argstr='%s', + "dwi.mif", + argstr="%s", mandatory=True, position=-1, usedefault=True, - desc='output image') + desc="output image", + ) coord = traits.List( traits.Float, - sep=' ', - argstr='-coord %s', - desc='extract data at the specified coordinates') + sep=" ", + argstr="-coord %s", + desc="extract data at the specified coordinates", + ) vox = traits.List( - traits.Float, - sep=',', - argstr='-vox %s', - desc='change the voxel dimensions') + traits.Float, sep=",", argstr="-vox %s", desc="change the voxel dimensions" + ) axes = traits.List( traits.Int, - sep=',', - argstr='-axes %s', - desc='specify the axes that will be used') + sep=",", + argstr="-axes %s", + desc="specify the axes that will be used", + ) scaling = traits.List( traits.Float, - sep=',', - argstr='-scaling %s', - desc='specify the data scaling parameter') + sep=",", + argstr="-scaling %s", + desc="specify the data scaling parameter", + ) class MRConvertOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='output image') + out_file = File(exists=True, desc="output image") class MRConvert(MRTrix3Base): @@ -601,50 +603,46 @@ class MRConvert(MRTrix3Base): >>> mrconvert.run() # doctest: +SKIP """ - _cmd = 'mrconvert' + _cmd = "mrconvert" input_spec = MRConvertInputSpec output_spec = MRConvertOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class MRMathInputSpec(MRTrix3BaseInputSpec): in_file = File( - exists=True, - argstr='%s', - mandatory=True, - position=-3, - desc='input image') - out_file = File( - argstr='%s', mandatory=True, position=-1, desc='output image') + exists=True, argstr="%s", mandatory=True, position=-3, desc="input image" + ) + out_file = File(argstr="%s", mandatory=True, position=-1, desc="output image") operation = traits.Enum( - 'mean', - 'median', - 'sum', - 'product', - 'rms', - 'norm', - 'var', - 'std', - 'min', - 'max', - 'absmax', - 'magmax', - argstr='%s', + "mean", + "median", + "sum", + "product", + "rms", + "norm", + "var", + "std", + "min", + "max", + "absmax", + "magmax", + argstr="%s", position=-2, mandatory=True, - desc='operation to computer along a specified axis') + desc="operation to computer along a specified axis", + ) axis = traits.Int( - 0, - argstr='-axis %d', - desc='specfied axis to perform the operation along') + 0, argstr="-axis %d", desc="specfied axis to perform the operation along" + ) class MRMathOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='output image') + out_file = File(exists=True, desc="output image") class MRMath(MRTrix3Base): @@ -667,67 +665,63 @@ class MRMath(MRTrix3Base): >>> mrmath.run() # doctest: +SKIP """ - _cmd = 'mrmath' + _cmd = "mrmath" input_spec = MRMathInputSpec output_spec = MRMathOutputSpec def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs["out_file"] = op.abspath(self.inputs.out_file) return outputs class MRResizeInputSpec(MRTrix3BaseInputSpec): in_file = File( - exists=True, - argstr='%s', - position=-2, - mandatory=True, - desc='input DWI image' + exists=True, argstr="%s", position=-2, mandatory=True, desc="input DWI image" ) image_size = traits.Tuple( (traits.Int, traits.Int, traits.Int), - argstr='-size %d,%d,%d', + argstr="-size %d,%d,%d", mandatory=True, - desc='Number of voxels in each dimension of output image', - xor=['voxel_size', 'scale_factor'], + desc="Number of voxels in each dimension of output image", + xor=["voxel_size", "scale_factor"], ) voxel_size = traits.Tuple( (traits.Float, traits.Float, traits.Float), - argstr='-voxel %g,%g,%g', + argstr="-voxel %g,%g,%g", mandatory=True, - desc='Desired voxel size in mm for the output image', - xor=['image_size', 'scale_factor'], + desc="Desired voxel size in mm for the output image", + xor=["image_size", "scale_factor"], ) scale_factor = traits.Tuple( (traits.Float, traits.Float, traits.Float), - argstr='-scale %g,%g,%g', + argstr="-scale %g,%g,%g", mandatory=True, - desc='Scale factors to rescale the image by in each dimension', - xor=['image_size', 'voxel_size'], + desc="Scale factors to rescale the image by in each dimension", + xor=["image_size", "voxel_size"], ) interpolation = traits.Enum( - 'cubic', - 'nearest', - 'linear', - 'sinc', - argstr='-interp %s', + "cubic", + "nearest", + "linear", + "sinc", + argstr="-interp %s", usedefault=True, - desc='set the interpolation method to use when resizing (choices: ' - 'nearest, linear, cubic, sinc. Default: cubic).', + desc="set the interpolation method to use when resizing (choices: " + "nearest, linear, cubic, sinc. Default: cubic).", ) out_file = File( - argstr='%s', - name_template='%s_resized', - name_source=['in_file'], + argstr="%s", + name_template="%s_resized", + name_source=["in_file"], keep_extension=True, position=-1, - desc='the output resized DWI image', + desc="the output resized DWI image", ) class MRResizeOutputSpec(TraitedSpec): - out_file = File(desc='the output resized DWI image', exists=True) + out_file = File(desc="the output resized DWI image", exists=True) class MRResize(MRTrix3Base): @@ -768,6 +762,6 @@ class MRResize(MRTrix3Base): >>> scale_resize.run() # doctest: +SKIP """ - _cmd = 'mrresize' + _cmd = "mrresize" input_spec = MRResizeInputSpec output_spec = MRResizeOutputSpec diff --git a/nipype/interfaces/niftyfit/asl.py b/nipype/interfaces/niftyfit/asl.py index 0d13880e1e..5c23769e8f 100644 --- a/nipype/interfaces/niftyfit/asl.py +++ b/nipype/interfaces/niftyfit/asl.py @@ -11,79 +11,78 @@ class FitAslInputSpec(CommandLineInputSpec): """ Input Spec for FitAsl. """ - desc = 'Filename of the 4D ASL (control/label) source image (mandatory).' + + desc = "Filename of the 4D ASL (control/label) source image (mandatory)." source_file = File( - position=1, - exists=True, - argstr='-source %s', - mandatory=True, - desc=desc) - pasl = traits.Bool(desc='Fit PASL ASL data [default]', argstr='-pasl') - pcasl = traits.Bool(desc='Fit PCASL ASL data', argstr='-pcasl') + position=1, exists=True, argstr="-source %s", mandatory=True, desc=desc + ) + pasl = traits.Bool(desc="Fit PASL ASL data [default]", argstr="-pasl") + pcasl = traits.Bool(desc="Fit PCASL ASL data", argstr="-pcasl") # *** Output options: - desc = 'Filename of the Cerebral Blood Flow map (in ml/100g/min).' + desc = "Filename of the Cerebral Blood Flow map (in ml/100g/min)." cbf_file = File( - name_source=['source_file'], - name_template='%s_cbf.nii.gz', - argstr='-cbf %s', - desc=desc) + name_source=["source_file"], + name_template="%s_cbf.nii.gz", + argstr="-cbf %s", + desc=desc, + ) error_file = File( - name_source=['source_file'], - name_template='%s_error.nii.gz', - argstr='-error %s', - desc='Filename of the CBF error map.') + name_source=["source_file"], + name_template="%s_error.nii.gz", + argstr="-error %s", + desc="Filename of the CBF error map.", + ) syn_file = File( - name_source=['source_file'], - name_template='%s_syn.nii.gz', - argstr='-syn %s', - desc='Filename of the synthetic ASL data.') + name_source=["source_file"], + name_template="%s_syn.nii.gz", + argstr="-syn %s", + desc="Filename of the synthetic ASL data.", + ) # *** Input options (see also fit_qt1 for generic T1 fitting): - desc = 'Filename of the estimated input T1 map (in ms).' - t1map = File(exists=True, argstr='-t1map %s', desc=desc) - desc = 'Filename of the estimated input M0 map.' - m0map = File(exists=True, argstr='-m0map %s', desc=desc) - desc = 'Filename of the estimated input M0 map error.' - m0mape = File(exists=True, argstr='-m0mape %s', desc=desc) - desc = 'Filename of a [1,2,5]s Inversion Recovery volume (T1/M0 fitting \ -carried out internally).' - - ir_volume = File(exists=True, argstr='-IRvolume %s', desc=desc) - desc = 'Output of [1,2,5]s Inversion Recovery fitting.' - ir_output = File(exists=True, argstr='-IRoutput %s', desc=desc) + desc = "Filename of the estimated input T1 map (in ms)." + t1map = File(exists=True, argstr="-t1map %s", desc=desc) + desc = "Filename of the estimated input M0 map." + m0map = File(exists=True, argstr="-m0map %s", desc=desc) + desc = "Filename of the estimated input M0 map error." + m0mape = File(exists=True, argstr="-m0mape %s", desc=desc) + desc = "Filename of a [1,2,5]s Inversion Recovery volume (T1/M0 fitting \ +carried out internally)." + + ir_volume = File(exists=True, argstr="-IRvolume %s", desc=desc) + desc = "Output of [1,2,5]s Inversion Recovery fitting." + ir_output = File(exists=True, argstr="-IRoutput %s", desc=desc) # *** Experimental options (Choose those suitable for the model!): mask = File( - position=2, - exists=True, - desc='Filename of image mask.', - argstr='-mask %s') + position=2, exists=True, desc="Filename of image mask.", argstr="-mask %s" + ) t1_art_cmp = traits.Float( - desc='T1 of arterial component [1650ms].', argstr='-T1a %f') - desc = 'Single plasma/tissue partition coefficient [0.9ml/g].' - plasma_coeff = traits.Float(desc=desc, argstr='-L %f') - desc = 'Labelling efficiency [0.99 (pasl), 0.85 (pcasl)], ensure any \ -background suppression pulses are included in -eff' + desc="T1 of arterial component [1650ms].", argstr="-T1a %f" + ) + desc = "Single plasma/tissue partition coefficient [0.9ml/g]." + plasma_coeff = traits.Float(desc=desc, argstr="-L %f") + desc = "Labelling efficiency [0.99 (pasl), 0.85 (pcasl)], ensure any \ +background suppression pulses are included in -eff" - eff = traits.Float(desc=desc, argstr='-eff %f') - desc = 'Outlier rejection for multi CL volumes (enter z-score threshold \ -(e.g. 2.5)) [off].' + eff = traits.Float(desc=desc, argstr="-eff %f") + desc = "Outlier rejection for multi CL volumes (enter z-score threshold \ +(e.g. 2.5)) [off]." - out = traits.Float(desc=desc, argstr='-out %f') + out = traits.Float(desc=desc, argstr="-out %f") # *** PCASL options (Choose those suitable for the model!): - pld = traits.Float(desc='Post Labelling Delay [2000ms].', argstr='-PLD %f') - ldd = traits.Float(desc='Labelling Duration [1800ms].', argstr='-LDD %f') - desc = 'Difference in labelling delay per slice [0.0 ms/slice.' - dpld = traits.Float(desc=desc, argstr='-dPLD %f') + pld = traits.Float(desc="Post Labelling Delay [2000ms].", argstr="-PLD %f") + ldd = traits.Float(desc="Labelling Duration [1800ms].", argstr="-LDD %f") + desc = "Difference in labelling delay per slice [0.0 ms/slice." + dpld = traits.Float(desc=desc, argstr="-dPLD %f") # *** PASL options (Choose those suitable for the model!): - t_inv1 = traits.Float( - desc='Saturation pulse time [800ms].', argstr='-Tinv1 %f') - t_inv2 = traits.Float(desc='Inversion time [2000ms].', argstr='-Tinv2 %f') - desc = 'Difference in inversion time per slice [0ms/slice].' - dt_inv2 = traits.Float(desc=desc, argstr='-dTinv2 %f') + t_inv1 = traits.Float(desc="Saturation pulse time [800ms].", argstr="-Tinv1 %f") + t_inv2 = traits.Float(desc="Inversion time [2000ms].", argstr="-Tinv2 %f") + desc = "Difference in inversion time per slice [0ms/slice]." + dt_inv2 = traits.Float(desc=desc, argstr="-dTinv2 %f") # *** Other experimental assumptions: @@ -91,50 +90,52 @@ class FitAslInputSpec(CommandLineInputSpec): # desc = 'Slope and intercept for Arterial Transit Time.' # ATT = traits.Float(desc=desc, argstr='-ATT %f') - gm_t1 = traits.Float(desc='T1 of GM [1150ms].', argstr='-gmT1 %f') + gm_t1 = traits.Float(desc="T1 of GM [1150ms].", argstr="-gmT1 %f") gm_plasma = traits.Float( - desc='Plasma/GM water partition [0.95ml/g].', argstr='-gmL %f') - gm_ttt = traits.Float(desc='Time to GM [ATT+0ms].', argstr='-gmTTT %f') - wm_t1 = traits.Float(desc='T1 of WM [800ms].', argstr='-wmT1 %f') + desc="Plasma/GM water partition [0.95ml/g].", argstr="-gmL %f" + ) + gm_ttt = traits.Float(desc="Time to GM [ATT+0ms].", argstr="-gmTTT %f") + wm_t1 = traits.Float(desc="T1 of WM [800ms].", argstr="-wmT1 %f") wm_plasma = traits.Float( - desc='Plasma/WM water partition [0.82ml/g].', argstr='-wmL %f') - wm_ttt = traits.Float(desc='Time to WM [ATT+0ms].', argstr='-wmTTT %f') + desc="Plasma/WM water partition [0.82ml/g].", argstr="-wmL %f" + ) + wm_ttt = traits.Float(desc="Time to WM [ATT+0ms].", argstr="-wmTTT %f") # *** Segmentation options: - desc = 'Filename of the 4D segmentation (in ASL space) for L/T1 \ -estimation and PV correction {WM,GM,CSF}.' + desc = "Filename of the 4D segmentation (in ASL space) for L/T1 \ +estimation and PV correction {WM,GM,CSF}." - seg = File(exists=True, argstr='-seg %s', desc=desc) - desc = 'Use sigmoid to estimate L from T1: L(T1|gmL,wmL) [Off].' - sig = traits.Bool(desc=desc, argstr='-sig') - desc = 'Simple PV correction (CBF=vg*CBFg + vw*CBFw, with CBFw=f*CBFg) \ -[0.25].' + seg = File(exists=True, argstr="-seg %s", desc=desc) + desc = "Use sigmoid to estimate L from T1: L(T1|gmL,wmL) [Off]." + sig = traits.Bool(desc=desc, argstr="-sig") + desc = "Simple PV correction (CBF=vg*CBFg + vw*CBFw, with CBFw=f*CBFg) \ +[0.25]." - pv0 = traits.Int(desc=desc, argstr='-pv0 %d') - pv2 = traits.Int(desc='In plane PV kernel size [3x3].', argstr='-pv2 %d') + pv0 = traits.Int(desc=desc, argstr="-pv0 %d") + pv2 = traits.Int(desc="In plane PV kernel size [3x3].", argstr="-pv2 %d") pv3 = traits.Tuple( traits.Int, traits.Int, traits.Int, - desc='3D kernel size [3x3x1].', - argstr='-pv3 %d %d %d') - desc = 'Multiply CBF by this value (e.g. if CL are mislabelled use -1.0).' - mul = traits.Float(desc=desc, argstr='-mul %f') - mulgm = traits.Bool( - desc='Multiply CBF by segmentation [Off].', argstr='-sig') - desc = 'Set PV threshold for switching off LSQR [O.05].' - pv_threshold = traits.Bool(desc=desc, argstr='-pvthreshold') - segstyle = traits.Bool( - desc='Set CBF as [gm,wm] not [wm,gm].', argstr='-segstyle') + desc="3D kernel size [3x3x1].", + argstr="-pv3 %d %d %d", + ) + desc = "Multiply CBF by this value (e.g. if CL are mislabelled use -1.0)." + mul = traits.Float(desc=desc, argstr="-mul %f") + mulgm = traits.Bool(desc="Multiply CBF by segmentation [Off].", argstr="-sig") + desc = "Set PV threshold for switching off LSQR [O.05]." + pv_threshold = traits.Bool(desc=desc, argstr="-pvthreshold") + segstyle = traits.Bool(desc="Set CBF as [gm,wm] not [wm,gm].", argstr="-segstyle") class FitAslOutputSpec(TraitedSpec): """ Output Spec for FitAsl. """ - desc = 'Filename of the Cerebral Blood Flow map (in ml/100g/min).' + + desc = "Filename of the Cerebral Blood Flow map (in ml/100g/min)." cbf_file = File(exists=True, desc=desc) - desc = 'Filename of the CBF error map.' + desc = "Filename of the CBF error map." error_file = File(exists=True, desc=desc) - desc = 'Filename of the synthetic ASL data.' + desc = "Filename of the synthetic ASL data." syn_file = File(exists=True, desc=desc) @@ -158,7 +159,8 @@ class FitAsl(NiftyFitCommand): -syn asl_syn.nii.gz' """ - _cmd = get_custom_path('fit_asl', env_dir='NIFTYFITDIR') + + _cmd = get_custom_path("fit_asl", env_dir="NIFTYFITDIR") input_spec = FitAslInputSpec output_spec = FitAslOutputSpec - _suffix = '_fit_asl' + _suffix = "_fit_asl" diff --git a/nipype/interfaces/niftyfit/base.py b/nipype/interfaces/niftyfit/base.py index 87c1d63825..7af72cd30d 100644 --- a/nipype/interfaces/niftyfit/base.py +++ b/nipype/interfaces/niftyfit/base.py @@ -27,16 +27,17 @@ class NiftyFitCommand(CommandLine): """ Base support interface for NiftyFit commands. """ - _suffix = '_nf' + + _suffix = "_nf" def __init__(self, **inputs): """ Init method calling super. No version to be checked.""" super(NiftyFitCommand, self).__init__(**inputs) def _gen_fname(self, basename, out_dir=None, suffix=None, ext=None): - if basename == '': - msg = 'Unable to generate filename for command %s. ' % self.cmd - msg += 'basename is not set!' + if basename == "": + msg = "Unable to generate filename for command %s. " % self.cmd + msg += "basename is not set!" raise ValueError(msg) _, final_bn, final_ext = split_filename(basename) if out_dir is None: @@ -44,5 +45,5 @@ def _gen_fname(self, basename, out_dir=None, suffix=None, ext=None): if ext is not None: final_ext = ext if suffix is not None: - final_bn = ''.join((final_bn, suffix)) + final_bn = "".join((final_bn, suffix)) return os.path.abspath(os.path.join(out_dir, final_bn + final_ext)) diff --git a/nipype/interfaces/niftyfit/dwi.py b/nipype/interfaces/niftyfit/dwi.py index 900a558fa1..1cfc8826d8 100644 --- a/nipype/interfaces/niftyfit/dwi.py +++ b/nipype/interfaces/niftyfit/dwi.py @@ -11,247 +11,293 @@ class FitDwiInputSpec(CommandLineInputSpec): """ Input Spec for FitDwi. """ + # Inputs options source_file = File( position=1, exists=True, - argstr='-source %s', + argstr="-source %s", mandatory=True, - desc='The source image containing the dwi data.') - desc = 'The file containing the bvalues of the source DWI.' + desc="The source image containing the dwi data.", + ) + desc = "The file containing the bvalues of the source DWI." bval_file = File( - position=2, exists=True, argstr='-bval %s', mandatory=True, desc=desc) - desc = 'The file containing the bvectors of the source DWI.' + position=2, exists=True, argstr="-bval %s", mandatory=True, desc=desc + ) + desc = "The file containing the bvectors of the source DWI." bvec_file = File( - position=3, exists=True, argstr='-bvec %s', mandatory=True, desc=desc) + position=3, exists=True, argstr="-bvec %s", mandatory=True, desc=desc + ) te_file = File( - exists=True, - argstr='-TE %s', - desc='Filename of TEs (ms).', - xor=['te_file']) + exists=True, argstr="-TE %s", desc="Filename of TEs (ms).", xor=["te_file"] + ) te_value = File( - exists=True, - argstr='-TE %s', - desc='Value of TEs (ms).', - xor=['te_file']) - mask_file = File( - exists=True, desc='The image mask', argstr='-mask %s') - desc = 'Filename of parameter priors for -ball and -nod.' - prior_file = File(exists=True, argstr='-prior %s', desc=desc) - desc = 'Rotate the output tensors according to the q/s form of the image \ -(resulting tensors will be in mm coordinates, default: 0).' - - rot_sform_flag = traits.Int(desc=desc, argstr='-rotsform %d') + exists=True, argstr="-TE %s", desc="Value of TEs (ms).", xor=["te_file"] + ) + mask_file = File(exists=True, desc="The image mask", argstr="-mask %s") + desc = "Filename of parameter priors for -ball and -nod." + prior_file = File(exists=True, argstr="-prior %s", desc=desc) + desc = "Rotate the output tensors according to the q/s form of the image \ +(resulting tensors will be in mm coordinates, default: 0)." + + rot_sform_flag = traits.Int(desc=desc, argstr="-rotsform %d") # generic output options: error_file = File( - name_source=['source_file'], - name_template='%s_error.nii.gz', - desc='Filename of parameter error maps.', - argstr='-error %s') + name_source=["source_file"], + name_template="%s_error.nii.gz", + desc="Filename of parameter error maps.", + argstr="-error %s", + ) res_file = File( - name_source=['source_file'], - name_template='%s_resmap.nii.gz', - desc='Filename of model residual map.', - argstr='-res %s') + name_source=["source_file"], + name_template="%s_resmap.nii.gz", + desc="Filename of model residual map.", + argstr="-res %s", + ) syn_file = File( - name_source=['source_file'], - name_template='%s_syn.nii.gz', - desc='Filename of synthetic image.', - argstr='-syn %s') + name_source=["source_file"], + name_template="%s_syn.nii.gz", + desc="Filename of synthetic image.", + argstr="-syn %s", + ) nodiff_file = File( - name_source=['source_file'], - name_template='%s_no_diff.nii.gz', - desc='Filename of average no diffusion image.', - argstr='-nodiff %s') + name_source=["source_file"], + name_template="%s_no_diff.nii.gz", + desc="Filename of average no diffusion image.", + argstr="-nodiff %s", + ) # Output options, with templated output names based on the source image mcmap_file = File( - name_source=['source_file'], - name_template='%s_mcmap.nii.gz', - desc='Filename of multi-compartment model parameter map ' - '(-ivim,-ball,-nod)', - argstr='-mcmap %s', - requires=['nodv_flag']) + name_source=["source_file"], + name_template="%s_mcmap.nii.gz", + desc="Filename of multi-compartment model parameter map " "(-ivim,-ball,-nod)", + argstr="-mcmap %s", + requires=["nodv_flag"], + ) # Model Specific Output options: mdmap_file = File( - name_source=['source_file'], - name_template='%s_mdmap.nii.gz', - desc='Filename of MD map/ADC', - argstr='-mdmap %s') + name_source=["source_file"], + name_template="%s_mdmap.nii.gz", + desc="Filename of MD map/ADC", + argstr="-mdmap %s", + ) famap_file = File( - name_source=['source_file'], - name_template='%s_famap.nii.gz', - desc='Filename of FA map', - argstr='-famap %s') + name_source=["source_file"], + name_template="%s_famap.nii.gz", + desc="Filename of FA map", + argstr="-famap %s", + ) v1map_file = File( - name_source=['source_file'], - name_template='%s_v1map.nii.gz', - desc='Filename of PDD map [x,y,z]', - argstr='-v1map %s') + name_source=["source_file"], + name_template="%s_v1map.nii.gz", + desc="Filename of PDD map [x,y,z]", + argstr="-v1map %s", + ) rgbmap_file = File( - name_source=['source_file'], - name_template='%s_rgbmap.nii.gz', - desc='Filename of colour-coded FA map', - argstr='-rgbmap %s', - requires=['dti_flag']) + name_source=["source_file"], + name_template="%s_rgbmap.nii.gz", + desc="Filename of colour-coded FA map", + argstr="-rgbmap %s", + requires=["dti_flag"], + ) - desc = 'Use lower triangular (tenmap2) or diagonal, off-diagonal tensor \ -format' + desc = "Use lower triangular (tenmap2) or diagonal, off-diagonal tensor \ +format" - ten_type = traits.Enum( - 'lower-tri', 'diag-off-diag', desc=desc, usedefault=True) + ten_type = traits.Enum("lower-tri", "diag-off-diag", desc=desc, usedefault=True) tenmap_file = File( - name_source=['source_file'], - name_template='%s_tenmap.nii.gz', - desc='Filename of tensor map [diag,offdiag].', - argstr='-tenmap %s', - requires=['dti_flag']) + name_source=["source_file"], + name_template="%s_tenmap.nii.gz", + desc="Filename of tensor map [diag,offdiag].", + argstr="-tenmap %s", + requires=["dti_flag"], + ) tenmap2_file = File( - name_source=['source_file'], - name_template='%s_tenmap2.nii.gz', - desc='Filename of tensor map [lower tri]', - argstr='-tenmap2 %s', - requires=['dti_flag']) + name_source=["source_file"], + name_template="%s_tenmap2.nii.gz", + desc="Filename of tensor map [lower tri]", + argstr="-tenmap2 %s", + requires=["dti_flag"], + ) # Methods options - desc = 'Fit single exponential to non-directional data [default with \ -no b-vectors]' + desc = "Fit single exponential to non-directional data [default with \ +no b-vectors]" mono_flag = traits.Bool( desc=desc, - argstr='-mono', + argstr="-mono", position=4, xor=[ - 'ivim_flag', 'dti_flag', 'ball_flag', 'ballv_flag', 'nod_flag', - 'nodv_flag' - ]) + "ivim_flag", + "dti_flag", + "ball_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", + ], + ) ivim_flag = traits.Bool( - desc='Fit IVIM model to non-directional data.', - argstr='-ivim', + desc="Fit IVIM model to non-directional data.", + argstr="-ivim", position=4, xor=[ - 'mono_flag', 'dti_flag', 'ball_flag', 'ballv_flag', 'nod_flag', - 'nodv_flag' - ]) - desc = 'Fit the tensor model [default with b-vectors].' + "mono_flag", + "dti_flag", + "ball_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", + ], + ) + desc = "Fit the tensor model [default with b-vectors]." dti_flag = traits.Bool( desc=desc, - argstr='-dti', + argstr="-dti", position=4, xor=[ - 'mono_flag', 'ivim_flag', 'ball_flag', 'ballv_flag', 'nod_flag', - 'nodv_flag' - ]) + "mono_flag", + "ivim_flag", + "ball_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", + ], + ) ball_flag = traits.Bool( - desc='Fit the ball and stick model.', - argstr='-ball', + desc="Fit the ball and stick model.", + argstr="-ball", position=4, xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'ballv_flag', 'nod_flag', - 'nodv_flag' - ]) - desc = 'Fit the ball and stick model with optimised PDD.' + "mono_flag", + "ivim_flag", + "dti_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", + ], + ) + desc = "Fit the ball and stick model with optimised PDD." ballv_flag = traits.Bool( desc=desc, - argstr='-ballv', + argstr="-ballv", position=4, xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', 'nod_flag', - 'nodv_flag' - ]) + "mono_flag", + "ivim_flag", + "dti_flag", + "ball_flag", + "nod_flag", + "nodv_flag", + ], + ) nod_flag = traits.Bool( - desc='Fit the NODDI model', - argstr='-nod', + desc="Fit the NODDI model", + argstr="-nod", position=4, xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', 'ballv_flag', - 'nodv_flag' - ]) + "mono_flag", + "ivim_flag", + "dti_flag", + "ball_flag", + "ballv_flag", + "nodv_flag", + ], + ) nodv_flag = traits.Bool( - desc='Fit the NODDI model with optimised PDD', - argstr='-nodv', + desc="Fit the NODDI model with optimised PDD", + argstr="-nodv", position=4, xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', 'ballv_flag', - 'nod_flag' - ]) + "mono_flag", + "ivim_flag", + "dti_flag", + "ball_flag", + "ballv_flag", + "nod_flag", + ], + ) # Experimental options - desc = 'Maximum number of non-linear LSQR iterations [100x2 passes])' - maxit_val = traits.Int(desc=desc, argstr='-maxit %d', requires=['gn_flag']) - desc = 'LM parameters (initial value, decrease rate) [100,1.2].' + desc = "Maximum number of non-linear LSQR iterations [100x2 passes])" + maxit_val = traits.Int(desc=desc, argstr="-maxit %d", requires=["gn_flag"]) + desc = "LM parameters (initial value, decrease rate) [100,1.2]." lm_vals = traits.Tuple( - traits.Float, - traits.Float, - argstr='-lm %f %f', - requires=['gn_flag'], - desc=desc) - desc = 'Use Gauss-Newton algorithm [Levenberg-Marquardt].' - gn_flag = traits.Bool(desc=desc, argstr='-gn', xor=['wls_flag']) - desc = 'Use Variational Bayes fitting with known prior (currently \ -identity covariance...).' - - vb_flag = traits.Bool(desc=desc, argstr='-vb') + traits.Float, traits.Float, argstr="-lm %f %f", requires=["gn_flag"], desc=desc + ) + desc = "Use Gauss-Newton algorithm [Levenberg-Marquardt]." + gn_flag = traits.Bool(desc=desc, argstr="-gn", xor=["wls_flag"]) + desc = "Use Variational Bayes fitting with known prior (currently \ +identity covariance...)." + + vb_flag = traits.Bool(desc=desc, argstr="-vb") cov_file = File( exists=True, - desc='Filename of ithe nc*nc covariance matrix [I]', - argstr='-cov %s') - wls_flag = traits.Bool(desc=desc, argstr='-wls', xor=['gn_flag']) - desc = 'Use location-weighted least squares for DTI fitting [3x3 Gaussian]' - swls_val = traits.Float(desc=desc, argstr='-swls %f') - slice_no = traits.Int( - desc='Fit to single slice number.', argstr='-slice %d') + desc="Filename of ithe nc*nc covariance matrix [I]", + argstr="-cov %s", + ) + wls_flag = traits.Bool(desc=desc, argstr="-wls", xor=["gn_flag"]) + desc = "Use location-weighted least squares for DTI fitting [3x3 Gaussian]" + swls_val = traits.Float(desc=desc, argstr="-swls %f") + slice_no = traits.Int(desc="Fit to single slice number.", argstr="-slice %d") voxel = traits.Tuple( traits.Int, traits.Int, traits.Int, - desc='Fit to single voxel only.', - argstr='-voxel %d %d %d') + desc="Fit to single voxel only.", + argstr="-voxel %d %d %d", + ) diso_val = traits.Float( - desc='Isotropic diffusivity for -nod [3e-3]', argstr='-diso %f') + desc="Isotropic diffusivity for -nod [3e-3]", argstr="-diso %f" + ) dpr_val = traits.Float( - desc='Parallel diffusivity for -nod [1.7e-3].', argstr='-dpr %f') - wm_t2_val = traits.Float( - desc='White matter T2 value [80ms].', argstr='-wmT2 %f') - csf_t2_val = traits.Float(desc='CSF T2 value [400ms].', argstr='-csfT2 %f') - desc = 'Threshold for perfusion/diffsuion effects [100].' - perf_thr = traits.Float(desc=desc, argstr='-perfthreshold %f') + desc="Parallel diffusivity for -nod [1.7e-3].", argstr="-dpr %f" + ) + wm_t2_val = traits.Float(desc="White matter T2 value [80ms].", argstr="-wmT2 %f") + csf_t2_val = traits.Float(desc="CSF T2 value [400ms].", argstr="-csfT2 %f") + desc = "Threshold for perfusion/diffsuion effects [100]." + perf_thr = traits.Float(desc=desc, argstr="-perfthreshold %f") # MCMC options: mcout = File( - name_source=['source_file'], - name_template='%s_mcout.txt', - desc='Filename of mc samples (ascii text file)', - argstr='-mcout %s') + name_source=["source_file"], + name_template="%s_mcout.txt", + desc="Filename of mc samples (ascii text file)", + argstr="-mcout %s", + ) mcsamples = traits.Int( - desc='Number of samples to keep [100].', argstr='-mcsamples %d') + desc="Number of samples to keep [100].", argstr="-mcsamples %d" + ) mcmaxit = traits.Int( - desc='Number of iterations to run [10,000].', argstr='-mcmaxit %d') + desc="Number of iterations to run [10,000].", argstr="-mcmaxit %d" + ) acceptance = traits.Float( - desc='Fraction of iterations to accept [0.23].', - argstr='-accpetance %f') + desc="Fraction of iterations to accept [0.23].", argstr="-accpetance %f" + ) class FitDwiOutputSpec(TraitedSpec): """ Output Spec for FitDwi. """ - error_file = File(desc='Filename of parameter error maps') - res_file = File(desc='Filename of model residual map') - syn_file = File(desc='Filename of synthetic image') - nodiff_file = File(desc='Filename of average no diffusion image.') - mdmap_file = File(desc='Filename of MD map/ADC') - famap_file = File(desc='Filename of FA map') - v1map_file = File(desc='Filename of PDD map [x,y,z]') - rgbmap_file = File(desc='Filename of colour FA map') - tenmap_file = File(desc='Filename of tensor map') - tenmap2_file = File(desc='Filename of tensor map [lower tri]') + error_file = File(desc="Filename of parameter error maps") + res_file = File(desc="Filename of model residual map") + syn_file = File(desc="Filename of synthetic image") + nodiff_file = File(desc="Filename of average no diffusion image.") + mdmap_file = File(desc="Filename of MD map/ADC") + famap_file = File(desc="Filename of FA map") + v1map_file = File(desc="Filename of PDD map [x,y,z]") + rgbmap_file = File(desc="Filename of colour FA map") + tenmap_file = File(desc="Filename of tensor map") + tenmap2_file = File(desc="Filename of tensor map [lower tri]") - mcmap_file = File(desc='Filename of multi-compartment model ' - 'parameter map (-ivim,-ball,-nod).') - mcout = File(desc='Filename of mc samples (ascii text file)') + mcmap_file = File( + desc="Filename of multi-compartment model " "parameter map (-ivim,-ball,-nod)." + ) + mcout = File(desc="Filename of mc samples (ascii text file)") class FitDwi(NiftyFitCommand): @@ -281,174 +327,231 @@ class FitDwi(NiftyFitCommand): -v1map dwi_v1map.nii.gz' """ - _cmd = get_custom_path('fit_dwi', env_dir='NIFTYFITDIR') + + _cmd = get_custom_path("fit_dwi", env_dir="NIFTYFITDIR") input_spec = FitDwiInputSpec output_spec = FitDwiOutputSpec - _suffix = '_fit_dwi' + _suffix = "_fit_dwi" def _format_arg(self, name, trait_spec, value): - if name == 'tenmap_file' and self.inputs.ten_type != 'diag-off-diag': - return '' - if name == 'tenmap2_file' and self.inputs.ten_type != 'lower-tri': - return '' + if name == "tenmap_file" and self.inputs.ten_type != "diag-off-diag": + return "" + if name == "tenmap2_file" and self.inputs.ten_type != "lower-tri": + return "" return super(FitDwi, self)._format_arg(name, trait_spec, value) class DwiToolInputSpec(CommandLineInputSpec): """ Input Spec for DwiTool. """ - desc = 'The source image containing the fitted model.' + + desc = "The source image containing the fitted model." source_file = File( - position=1, - exists=True, - desc=desc, - argstr='-source %s', - mandatory=True) - desc = 'The file containing the bvalues of the source DWI.' + position=1, exists=True, desc=desc, argstr="-source %s", mandatory=True + ) + desc = "The file containing the bvalues of the source DWI." bval_file = File( - position=2, exists=True, desc=desc, argstr='-bval %s', mandatory=True) - desc = 'The file containing the bvectors of the source DWI.' - bvec_file = File( - position=3, exists=True, desc=desc, argstr='-bvec %s') + position=2, exists=True, desc=desc, argstr="-bval %s", mandatory=True + ) + desc = "The file containing the bvectors of the source DWI." + bvec_file = File(position=3, exists=True, desc=desc, argstr="-bvec %s") b0_file = File( position=4, exists=True, - desc='The B0 image corresponding to the source DWI', - argstr='-b0 %s') - mask_file = File( - position=5, exists=True, desc='The image mask', argstr='-mask %s') + desc="The B0 image corresponding to the source DWI", + argstr="-b0 %s", + ) + mask_file = File(position=5, exists=True, desc="The image mask", argstr="-mask %s") # Output options, with templated output names based on the source image - desc = 'Filename of multi-compartment model parameter map \ -(-ivim,-ball,-nod)' + desc = "Filename of multi-compartment model parameter map \ +(-ivim,-ball,-nod)" mcmap_file = File( - name_source=['source_file'], - name_template='%s_mcmap.nii.gz', + name_source=["source_file"], + name_template="%s_mcmap.nii.gz", desc=desc, - argstr='-mcmap %s') - desc = 'Filename of synthetic image. Requires: bvec_file/b0_file.' + argstr="-mcmap %s", + ) + desc = "Filename of synthetic image. Requires: bvec_file/b0_file." syn_file = File( - name_source=['source_file'], - name_template='%s_syn.nii.gz', + name_source=["source_file"], + name_template="%s_syn.nii.gz", desc=desc, - argstr='-syn %s', - requires=['bvec_file', 'b0_file']) + argstr="-syn %s", + requires=["bvec_file", "b0_file"], + ) mdmap_file = File( - name_source=['source_file'], - name_template='%s_mdmap.nii.gz', - desc='Filename of MD map/ADC', - argstr='-mdmap %s') + name_source=["source_file"], + name_template="%s_mdmap.nii.gz", + desc="Filename of MD map/ADC", + argstr="-mdmap %s", + ) famap_file = File( - name_source=['source_file'], - name_template='%s_famap.nii.gz', - desc='Filename of FA map', - argstr='-famap %s') + name_source=["source_file"], + name_template="%s_famap.nii.gz", + desc="Filename of FA map", + argstr="-famap %s", + ) v1map_file = File( - name_source=['source_file'], - name_template='%s_v1map.nii.gz', - desc='Filename of PDD map [x,y,z]', - argstr='-v1map %s') + name_source=["source_file"], + name_template="%s_v1map.nii.gz", + desc="Filename of PDD map [x,y,z]", + argstr="-v1map %s", + ) rgbmap_file = File( - name_source=['source_file'], - name_template='%s_rgbmap.nii.gz', - desc='Filename of colour FA map.', - argstr='-rgbmap %s') + name_source=["source_file"], + name_template="%s_rgbmap.nii.gz", + desc="Filename of colour FA map.", + argstr="-rgbmap %s", + ) logdti_file = File( - name_source=['source_file'], - name_template='%s_logdti2.nii.gz', - desc='Filename of output logdti map.', - argstr='-logdti2 %s') + name_source=["source_file"], + name_template="%s_logdti2.nii.gz", + desc="Filename of output logdti map.", + argstr="-logdti2 %s", + ) # Methods options - desc = 'Input is a single exponential to non-directional data \ -[default with no b-vectors]' + desc = "Input is a single exponential to non-directional data \ +[default with no b-vectors]" mono_flag = traits.Bool( desc=desc, position=6, - argstr='-mono', + argstr="-mono", xor=[ - 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', 'ballv_flag', - 'nod_flag', 'nodv_flag' - ]) - desc = 'Inputs is an IVIM model to non-directional data.' + "ivim_flag", + "dti_flag", + "dti_flag2", + "ball_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", + ], + ) + desc = "Inputs is an IVIM model to non-directional data." ivim_flag = traits.Bool( desc=desc, position=6, - argstr='-ivim', + argstr="-ivim", xor=[ - 'mono_flag', 'dti_flag', 'dti_flag2', 'ball_flag', 'ballv_flag', - 'nod_flag', 'nodv_flag' - ]) + "mono_flag", + "dti_flag", + "dti_flag2", + "ball_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", + ], + ) dti_flag = traits.Bool( - desc='Input is a tensor model diag/off-diag.', + desc="Input is a tensor model diag/off-diag.", position=6, - argstr='-dti', + argstr="-dti", xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag2', 'ball_flag', 'ballv_flag', - 'nod_flag', 'nodv_flag' - ]) + "mono_flag", + "ivim_flag", + "dti_flag2", + "ball_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", + ], + ) dti_flag2 = traits.Bool( - desc='Input is a tensor model lower triangular', + desc="Input is a tensor model lower triangular", position=6, - argstr='-dti2', + argstr="-dti2", xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', 'ballv_flag', - 'nod_flag', 'nodv_flag' - ]) + "mono_flag", + "ivim_flag", + "dti_flag", + "ball_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", + ], + ) ball_flag = traits.Bool( - desc='Input is a ball and stick model.', + desc="Input is a ball and stick model.", position=6, - argstr='-ball', + argstr="-ball", xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', 'ballv_flag', - 'nod_flag', 'nodv_flag' - ]) - desc = 'Input is a ball and stick model with optimised PDD.' + "mono_flag", + "ivim_flag", + "dti_flag", + "dti_flag2", + "ballv_flag", + "nod_flag", + "nodv_flag", + ], + ) + desc = "Input is a ball and stick model with optimised PDD." ballv_flag = traits.Bool( desc=desc, position=6, - argstr='-ballv', + argstr="-ballv", xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', - 'nod_flag', 'nodv_flag' - ]) + "mono_flag", + "ivim_flag", + "dti_flag", + "dti_flag2", + "ball_flag", + "nod_flag", + "nodv_flag", + ], + ) nod_flag = traits.Bool( - desc='Input is a NODDI model', + desc="Input is a NODDI model", position=6, - argstr='-nod', + argstr="-nod", xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', - 'ballv_flag', 'nodv_flag' - ]) + "mono_flag", + "ivim_flag", + "dti_flag", + "dti_flag2", + "ball_flag", + "ballv_flag", + "nodv_flag", + ], + ) nodv_flag = traits.Bool( - desc='Input is a NODDI model with optimised PDD', + desc="Input is a NODDI model with optimised PDD", position=6, - argstr='-nodv', + argstr="-nodv", xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', - 'ballv_flag', 'nod_flag' - ]) + "mono_flag", + "ivim_flag", + "dti_flag", + "dti_flag2", + "ball_flag", + "ballv_flag", + "nod_flag", + ], + ) # Experimental options diso_val = traits.Float( - desc='Isotropic diffusivity for -nod [3e-3]', argstr='-diso %f') + desc="Isotropic diffusivity for -nod [3e-3]", argstr="-diso %f" + ) dpr_val = traits.Float( - desc='Parallel diffusivity for -nod [1.7e-3].', argstr='-dpr %f') + desc="Parallel diffusivity for -nod [1.7e-3].", argstr="-dpr %f" + ) class DwiToolOutputSpec(TraitedSpec): """ Output Spec for DwiTool. """ - desc = 'Filename of multi-compartment model parameter map \ -(-ivim,-ball,-nod)' + + desc = "Filename of multi-compartment model parameter map \ +(-ivim,-ball,-nod)" mcmap_file = File(desc=desc) - syn_file = File(desc='Filename of synthetic image') - mdmap_file = File(desc='Filename of MD map/ADC') - famap_file = File(desc='Filename of FA map') - v1map_file = File(desc='Filename of PDD map [x,y,z]') - rgbmap_file = File(desc='Filename of colour FA map') - logdti_file = File(desc='Filename of output logdti map') + syn_file = File(desc="Filename of synthetic image") + mdmap_file = File(desc="Filename of MD map/ADC") + famap_file = File(desc="Filename of FA map") + v1map_file = File(desc="Filename of PDD map [x,y,z]") + rgbmap_file = File(desc="Filename of colour FA map") + logdti_file = File(desc="Filename of output logdti map") class DwiTool(NiftyFitCommand): @@ -480,18 +583,21 @@ class DwiTool(NiftyFitCommand): -syn dwi_syn.nii.gz -v1map dwi_v1map.nii.gz' """ - _cmd = get_custom_path('dwi_tool', env_dir='NIFTYFITDIR') + + _cmd = get_custom_path("dwi_tool", env_dir="NIFTYFITDIR") input_spec = DwiToolInputSpec output_spec = DwiToolOutputSpec - _suffix = '_dwi_tool' + _suffix = "_dwi_tool" def _format_arg(self, name, trait_spec, value): - if name == 'syn_file': - if not isdefined(self.inputs.bvec_file) or \ - not isdefined(self.inputs.b0_file): - return '' - if name in ['logdti_file', 'rgbmap_file']: - if not isdefined(self.inputs.dti_flag) and \ - not isdefined(self.inputs.dti_flag2): - return '' + if name == "syn_file": + if not isdefined(self.inputs.bvec_file) or not isdefined( + self.inputs.b0_file + ): + return "" + if name in ["logdti_file", "rgbmap_file"]: + if not isdefined(self.inputs.dti_flag) and not isdefined( + self.inputs.dti_flag2 + ): + return "" return super(DwiTool, self)._format_arg(name, trait_spec, value) diff --git a/nipype/interfaces/niftyfit/qt1.py b/nipype/interfaces/niftyfit/qt1.py index 9df8034526..9337de2306 100644 --- a/nipype/interfaces/niftyfit/qt1.py +++ b/nipype/interfaces/niftyfit/qt1.py @@ -12,150 +12,149 @@ class FitQt1InputSpec(CommandLineInputSpec): """ Input Spec for FitQt1. """ - desc = 'Filename of the 4D Multi-Echo T1 source image.' + + desc = "Filename of the 4D Multi-Echo T1 source image." source_file = File( - position=1, - exists=True, - desc=desc, - argstr='-source %s', - mandatory=True) + position=1, exists=True, desc=desc, argstr="-source %s", mandatory=True + ) # Output options: t1map_file = File( - name_source=['source_file'], - name_template='%s_t1map.nii.gz', - argstr='-t1map %s', - desc='Filename of the estimated output T1 map (in ms).') + name_source=["source_file"], + name_template="%s_t1map.nii.gz", + argstr="-t1map %s", + desc="Filename of the estimated output T1 map (in ms).", + ) m0map_file = File( - name_source=['source_file'], - name_template='%s_m0map.nii.gz', - argstr='-m0map %s', - desc='Filename of the estimated input M0 map.') - desc = 'Filename of the estimated output multi-parameter map.' + name_source=["source_file"], + name_template="%s_m0map.nii.gz", + argstr="-m0map %s", + desc="Filename of the estimated input M0 map.", + ) + desc = "Filename of the estimated output multi-parameter map." mcmap_file = File( - name_source=['source_file'], - name_template='%s_mcmap.nii.gz', - argstr='-mcmap %s', - desc=desc) + name_source=["source_file"], + name_template="%s_mcmap.nii.gz", + argstr="-mcmap %s", + desc=desc, + ) comp_file = File( - name_source=['source_file'], - name_template='%s_comp.nii.gz', - argstr='-comp %s', - desc='Filename of the estimated multi-component T1 map.') - desc = 'Filename of the error map (symmetric matrix, [Diag,OffDiag]).' + name_source=["source_file"], + name_template="%s_comp.nii.gz", + argstr="-comp %s", + desc="Filename of the estimated multi-component T1 map.", + ) + desc = "Filename of the error map (symmetric matrix, [Diag,OffDiag])." error_file = File( - name_source=['source_file'], - name_template='%s_error.nii.gz', - argstr='-error %s', - desc=desc) + name_source=["source_file"], + name_template="%s_error.nii.gz", + argstr="-error %s", + desc=desc, + ) syn_file = File( - name_source=['source_file'], - name_template='%s_syn.nii.gz', - argstr='-syn %s', - desc='Filename of the synthetic ASL data.') + name_source=["source_file"], + name_template="%s_syn.nii.gz", + argstr="-syn %s", + desc="Filename of the synthetic ASL data.", + ) res_file = File( - name_source=['source_file'], - name_template='%s_res.nii.gz', - argstr='-res %s', - desc='Filename of the model fit residuals') + name_source=["source_file"], + name_template="%s_res.nii.gz", + argstr="-res %s", + desc="Filename of the model fit residuals", + ) # Other options: mask = File( - position=2, - exists=True, - desc='Filename of image mask.', - argstr='-mask %s') + position=2, exists=True, desc="Filename of image mask.", argstr="-mask %s" + ) prior = File( - position=3, - exists=True, - desc='Filename of parameter prior.', - argstr='-prior %s') - te_value = traits.Float( - desc='TE Echo Time [0ms!].', argstr='-TE %f', position=4) + position=3, exists=True, desc="Filename of parameter prior.", argstr="-prior %s" + ) + te_value = traits.Float(desc="TE Echo Time [0ms!].", argstr="-TE %f", position=4) tr_value = traits.Float( - desc='TR Repetition Time [10s!].', argstr='-TR %f', position=5) - desc = 'Number of components to fit [1] (currently IR/SR only)' + desc="TR Repetition Time [10s!].", argstr="-TR %f", position=5 + ) + desc = "Number of components to fit [1] (currently IR/SR only)" # set position to be ahead of TIs - nb_comp = traits.Int(desc=desc, position=6, argstr='-nc %d') - desc = 'Set LM parameters (initial value, decrease rate) [100,1.2].' + nb_comp = traits.Int(desc=desc, position=6, argstr="-nc %d") + desc = "Set LM parameters (initial value, decrease rate) [100,1.2]." lm_val = traits.Tuple( - traits.Float, traits.Float, desc=desc, argstr='-lm %f %f', position=7) - desc = 'Use Gauss-Newton algorithm [Levenberg-Marquardt].' - gn_flag = traits.Bool(desc=desc, argstr='-gn', position=8) + traits.Float, traits.Float, desc=desc, argstr="-lm %f %f", position=7 + ) + desc = "Use Gauss-Newton algorithm [Levenberg-Marquardt]." + gn_flag = traits.Bool(desc=desc, argstr="-gn", position=8) slice_no = traits.Int( - desc='Fit to single slice number.', argstr='-slice %d', position=9) + desc="Fit to single slice number.", argstr="-slice %d", position=9 + ) voxel = traits.Tuple( traits.Int, traits.Int, traits.Int, - desc='Fit to single voxel only.', - argstr='-voxel %d %d %d', - position=10) - maxit = traits.Int( - desc='NLSQR iterations [100].', argstr='-maxit %d', position=11) + desc="Fit to single voxel only.", + argstr="-voxel %d %d %d", + position=10, + ) + maxit = traits.Int(desc="NLSQR iterations [100].", argstr="-maxit %d", position=11) # IR options: sr_flag = traits.Bool( - desc='Saturation Recovery fitting [default].', - argstr='-SR', - position=12) + desc="Saturation Recovery fitting [default].", argstr="-SR", position=12 + ) ir_flag = traits.Bool( - desc='Inversion Recovery fitting [default].', - argstr='-IR', - position=13) + desc="Inversion Recovery fitting [default].", argstr="-IR", position=13 + ) tis = traits.List( traits.Float, position=14, - desc='Inversion times for T1 data [1s,2s,5s].', - argstr='-TIs %s', - sep=' ') + desc="Inversion times for T1 data [1s,2s,5s].", + argstr="-TIs %s", + sep=" ", + ) tis_list = File( - exists=True, - argstr='-TIlist %s', - desc='Filename of list of pre-defined TIs.') + exists=True, argstr="-TIlist %s", desc="Filename of list of pre-defined TIs." + ) t1_list = File( - exists=True, - argstr='-T1list %s', - desc='Filename of list of pre-defined T1s') - t1min = traits.Float( - desc='Minimum tissue T1 value [400ms].', argstr='-T1min %f') - t1max = traits.Float( - desc='Maximum tissue T1 value [4000ms].', argstr='-T1max %f') + exists=True, argstr="-T1list %s", desc="Filename of list of pre-defined T1s" + ) + t1min = traits.Float(desc="Minimum tissue T1 value [400ms].", argstr="-T1min %f") + t1max = traits.Float(desc="Maximum tissue T1 value [4000ms].", argstr="-T1max %f") # SPGR options - spgr = traits.Bool(desc='Spoiled Gradient Echo fitting', argstr='-SPGR') - flips = traits.List( - traits.Float, desc='Flip angles', argstr='-flips %s', sep=' ') - desc = 'Filename of list of pre-defined flip angles (deg).' - flips_list = File(exists=True, argstr='-fliplist %s', desc=desc) - desc = 'Filename of B1 estimate for fitting (or include in prior).' - b1map = File(exists=True, argstr='-b1map %s', desc=desc) + spgr = traits.Bool(desc="Spoiled Gradient Echo fitting", argstr="-SPGR") + flips = traits.List(traits.Float, desc="Flip angles", argstr="-flips %s", sep=" ") + desc = "Filename of list of pre-defined flip angles (deg)." + flips_list = File(exists=True, argstr="-fliplist %s", desc=desc) + desc = "Filename of B1 estimate for fitting (or include in prior)." + b1map = File(exists=True, argstr="-b1map %s", desc=desc) # MCMC options: mcout = File( - exists=True, - desc='Filename of mc samples (ascii text file)', - argstr='-mcout %s') + exists=True, desc="Filename of mc samples (ascii text file)", argstr="-mcout %s" + ) mcsamples = traits.Int( - desc='Number of samples to keep [100].', argstr='-mcsamples %d') + desc="Number of samples to keep [100].", argstr="-mcsamples %d" + ) mcmaxit = traits.Int( - desc='Number of iterations to run [10,000].', argstr='-mcmaxit %d') + desc="Number of iterations to run [10,000].", argstr="-mcmaxit %d" + ) acceptance = traits.Float( - desc='Fraction of iterations to accept [0.23].', - argstr='-acceptance %f') + desc="Fraction of iterations to accept [0.23].", argstr="-acceptance %f" + ) class FitQt1OutputSpec(TraitedSpec): """ Output Spec for FitQt1. """ - t1map_file = File(desc='Filename of the estimated output T1 map (in ms)') - m0map_file = File(desc='Filename of the m0 map') - desc = 'Filename of the estimated output multi-parameter map' + + t1map_file = File(desc="Filename of the estimated output T1 map (in ms)") + m0map_file = File(desc="Filename of the m0 map") + desc = "Filename of the estimated output multi-parameter map" mcmap_file = File(desc=desc) - comp_file = File(desc='Filename of the estimated multi-component T1 map.') - desc = 'Filename of the error map (symmetric matrix, [Diag,OffDiag])' + comp_file = File(desc="Filename of the estimated multi-component T1 map.") + desc = "Filename of the error map (symmetric matrix, [Diag,OffDiag])" error_file = File(desc=desc) - syn_file = File(desc='Filename of the synthetic ASL data') - res_file = File(desc='Filename of the model fit residuals') + syn_file = File(desc="Filename of the synthetic ASL data") + res_file = File(desc="Filename of the model fit residuals") class FitQt1(NiftyFitCommand): @@ -180,7 +179,8 @@ class FitQt1(NiftyFitCommand): -res TI4D_res.nii.gz -syn TI4D_syn.nii.gz -t1map TI4D_t1map.nii.gz' """ - _cmd = get_custom_path('fit_qt1', env_dir='NIFTYFITDIR') + + _cmd = get_custom_path("fit_qt1", env_dir="NIFTYFITDIR") input_spec = FitQt1InputSpec output_spec = FitQt1OutputSpec - _suffix = '_fit_qt1' + _suffix = "_fit_qt1" diff --git a/nipype/interfaces/niftyfit/tests/test_asl.py b/nipype/interfaces/niftyfit/tests/test_asl.py index f703555c16..bbcfd4f01a 100644 --- a/nipype/interfaces/niftyfit/tests/test_asl.py +++ b/nipype/interfaces/niftyfit/tests/test_asl.py @@ -11,15 +11,14 @@ from ...niftyreg.tests.test_regutils import no_nifty_tool -@pytest.mark.skipif( - no_nifty_tool(cmd='fit_asl'), reason="niftyfit is not installed") +@pytest.mark.skipif(no_nifty_tool(cmd="fit_asl"), reason="niftyfit is not installed") def test_fit_asl(): """ Testing FitAsl interface.""" # Create the test node fit_asl = FitAsl() # Check if the command is properly defined - cmd = get_custom_path('fit_asl', env_dir='NIFTYFIT_DIR') + cmd = get_custom_path("fit_asl", env_dir="NIFTYFIT_DIR") assert fit_asl.cmd == cmd # test raising error with mandatory args absent @@ -28,16 +27,16 @@ def test_fit_asl(): # Tests on the interface: # Runs cbf fitting assuming all tissue is GM! - in_file = example_data('asl.nii.gz') + in_file = example_data("asl.nii.gz") fit_asl.inputs.source_file = in_file - cmd_tmp = '{cmd} -source {in_file} -cbf {cbf} -error {error} -syn {syn}' + cmd_tmp = "{cmd} -source {in_file} -cbf {cbf} -error {error} -syn {syn}" expected_cmd = cmd_tmp.format( cmd=cmd, in_file=in_file, - cbf='asl_cbf.nii.gz', - error='asl_error.nii.gz', - syn='asl_syn.nii.gz', + cbf="asl_cbf.nii.gz", + error="asl_error.nii.gz", + syn="asl_syn.nii.gz", ) assert fit_asl.cmdline == expected_cmd @@ -46,24 +45,24 @@ def test_fit_asl(): # the segmentation data to fit tissue specific blood flow parameters # (lambda,transit times,T1) fit_asl2 = FitAsl(sig=True) - in_file = example_data('asl.nii.gz') - t1map = example_data('T1map.nii.gz') - seg = example_data('segmentation0.nii.gz') + in_file = example_data("asl.nii.gz") + t1map = example_data("T1map.nii.gz") + seg = example_data("segmentation0.nii.gz") fit_asl2.inputs.source_file = in_file fit_asl2.inputs.t1map = t1map fit_asl2.inputs.seg = seg - cmd_tmp = '{cmd} -source {in_file} -cbf {cbf} -error {error} \ --seg {seg} -sig -syn {syn} -t1map {t1map}' + cmd_tmp = "{cmd} -source {in_file} -cbf {cbf} -error {error} \ +-seg {seg} -sig -syn {syn} -t1map {t1map}" expected_cmd = cmd_tmp.format( cmd=cmd, in_file=in_file, t1map=t1map, seg=seg, - cbf='asl_cbf.nii.gz', - error='asl_error.nii.gz', - syn='asl_syn.nii.gz', + cbf="asl_cbf.nii.gz", + error="asl_error.nii.gz", + syn="asl_syn.nii.gz", ) assert fit_asl2.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py index 9a59243c9e..cf81acdc55 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py @@ -4,146 +4,163 @@ def test_DwiTool_inputs(): input_map = dict( - args=dict(argstr='%s', ), - b0_file=dict( - argstr='-b0 %s', - extensions=None, - position=4, - ), + args=dict(argstr="%s",), + b0_file=dict(argstr="-b0 %s", extensions=None, position=4,), ball_flag=dict( - argstr='-ball', + argstr="-ball", position=6, xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', - 'ballv_flag', 'nod_flag', 'nodv_flag' + "mono_flag", + "ivim_flag", + "dti_flag", + "dti_flag2", + "ballv_flag", + "nod_flag", + "nodv_flag", ], ), ballv_flag=dict( - argstr='-ballv', + argstr="-ballv", position=6, xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', - 'nod_flag', 'nodv_flag' + "mono_flag", + "ivim_flag", + "dti_flag", + "dti_flag2", + "ball_flag", + "nod_flag", + "nodv_flag", ], ), - bval_file=dict( - argstr='-bval %s', - extensions=None, - mandatory=True, - position=2, - ), - bvec_file=dict( - argstr='-bvec %s', - extensions=None, - position=3, - ), - diso_val=dict(argstr='-diso %f', ), - dpr_val=dict(argstr='-dpr %f', ), + bval_file=dict(argstr="-bval %s", extensions=None, mandatory=True, position=2,), + bvec_file=dict(argstr="-bvec %s", extensions=None, position=3,), + diso_val=dict(argstr="-diso %f",), + dpr_val=dict(argstr="-dpr %f",), dti_flag=dict( - argstr='-dti', + argstr="-dti", position=6, xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag2', 'ball_flag', - 'ballv_flag', 'nod_flag', 'nodv_flag' + "mono_flag", + "ivim_flag", + "dti_flag2", + "ball_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", ], ), dti_flag2=dict( - argstr='-dti2', + argstr="-dti2", position=6, xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', - 'ballv_flag', 'nod_flag', 'nodv_flag' + "mono_flag", + "ivim_flag", + "dti_flag", + "ball_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", ], ), - environ=dict( - nohash=True, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True,), famap_file=dict( - argstr='-famap %s', + argstr="-famap %s", extensions=None, - name_source=['source_file'], - name_template='%s_famap.nii.gz', + name_source=["source_file"], + name_template="%s_famap.nii.gz", ), ivim_flag=dict( - argstr='-ivim', + argstr="-ivim", position=6, xor=[ - 'mono_flag', 'dti_flag', 'dti_flag2', 'ball_flag', - 'ballv_flag', 'nod_flag', 'nodv_flag' + "mono_flag", + "dti_flag", + "dti_flag2", + "ball_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", ], ), logdti_file=dict( - argstr='-logdti2 %s', - extensions=None, - name_source=['source_file'], - name_template='%s_logdti2.nii.gz', - ), - mask_file=dict( - argstr='-mask %s', + argstr="-logdti2 %s", extensions=None, - position=5, + name_source=["source_file"], + name_template="%s_logdti2.nii.gz", ), + mask_file=dict(argstr="-mask %s", extensions=None, position=5,), mcmap_file=dict( - argstr='-mcmap %s', + argstr="-mcmap %s", extensions=None, - name_source=['source_file'], - name_template='%s_mcmap.nii.gz', + name_source=["source_file"], + name_template="%s_mcmap.nii.gz", ), mdmap_file=dict( - argstr='-mdmap %s', + argstr="-mdmap %s", extensions=None, - name_source=['source_file'], - name_template='%s_mdmap.nii.gz', + name_source=["source_file"], + name_template="%s_mdmap.nii.gz", ), mono_flag=dict( - argstr='-mono', + argstr="-mono", position=6, xor=[ - 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', - 'ballv_flag', 'nod_flag', 'nodv_flag' + "ivim_flag", + "dti_flag", + "dti_flag2", + "ball_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", ], ), nod_flag=dict( - argstr='-nod', + argstr="-nod", position=6, xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', - 'ballv_flag', 'nodv_flag' + "mono_flag", + "ivim_flag", + "dti_flag", + "dti_flag2", + "ball_flag", + "ballv_flag", + "nodv_flag", ], ), nodv_flag=dict( - argstr='-nodv', + argstr="-nodv", position=6, xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', - 'ballv_flag', 'nod_flag' + "mono_flag", + "ivim_flag", + "dti_flag", + "dti_flag2", + "ball_flag", + "ballv_flag", + "nod_flag", ], ), rgbmap_file=dict( - argstr='-rgbmap %s', + argstr="-rgbmap %s", extensions=None, - name_source=['source_file'], - name_template='%s_rgbmap.nii.gz', + name_source=["source_file"], + name_template="%s_rgbmap.nii.gz", ), source_file=dict( - argstr='-source %s', - extensions=None, - mandatory=True, - position=1, + argstr="-source %s", extensions=None, mandatory=True, position=1, ), syn_file=dict( - argstr='-syn %s', + argstr="-syn %s", extensions=None, - name_source=['source_file'], - name_template='%s_syn.nii.gz', - requires=['bvec_file', 'b0_file'], + name_source=["source_file"], + name_template="%s_syn.nii.gz", + requires=["bvec_file", "b0_file"], ), v1map_file=dict( - argstr='-v1map %s', + argstr="-v1map %s", extensions=None, - name_source=['source_file'], - name_template='%s_v1map.nii.gz', + name_source=["source_file"], + name_template="%s_v1map.nii.gz", ), ) inputs = DwiTool.input_spec() @@ -151,15 +168,17 @@ def test_DwiTool_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DwiTool_outputs(): output_map = dict( - famap_file=dict(extensions=None, ), - logdti_file=dict(extensions=None, ), - mcmap_file=dict(extensions=None, ), - mdmap_file=dict(extensions=None, ), - rgbmap_file=dict(extensions=None, ), - syn_file=dict(extensions=None, ), - v1map_file=dict(extensions=None, ), + famap_file=dict(extensions=None,), + logdti_file=dict(extensions=None,), + mcmap_file=dict(extensions=None,), + mdmap_file=dict(extensions=None,), + rgbmap_file=dict(extensions=None,), + syn_file=dict(extensions=None,), + v1map_file=dict(extensions=None,), ) outputs = DwiTool.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py index 7e2401ed7f..567831f9f4 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py @@ -4,101 +4,75 @@ def test_FitAsl_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), cbf_file=dict( - argstr='-cbf %s', + argstr="-cbf %s", extensions=None, - name_source=['source_file'], - name_template='%s_cbf.nii.gz', - ), - dpld=dict(argstr='-dPLD %f', ), - dt_inv2=dict(argstr='-dTinv2 %f', ), - eff=dict(argstr='-eff %f', ), - environ=dict( - nohash=True, - usedefault=True, + name_source=["source_file"], + name_template="%s_cbf.nii.gz", ), + dpld=dict(argstr="-dPLD %f",), + dt_inv2=dict(argstr="-dTinv2 %f",), + eff=dict(argstr="-eff %f",), + environ=dict(nohash=True, usedefault=True,), error_file=dict( - argstr='-error %s', - extensions=None, - name_source=['source_file'], - name_template='%s_error.nii.gz', - ), - gm_plasma=dict(argstr='-gmL %f', ), - gm_t1=dict(argstr='-gmT1 %f', ), - gm_ttt=dict(argstr='-gmTTT %f', ), - ir_output=dict( - argstr='-IRoutput %s', - extensions=None, - ), - ir_volume=dict( - argstr='-IRvolume %s', - extensions=None, - ), - ldd=dict(argstr='-LDD %f', ), - m0map=dict( - argstr='-m0map %s', - extensions=None, - ), - m0mape=dict( - argstr='-m0mape %s', + argstr="-error %s", extensions=None, + name_source=["source_file"], + name_template="%s_error.nii.gz", ), - mask=dict( - argstr='-mask %s', - extensions=None, - position=2, - ), - mul=dict(argstr='-mul %f', ), - mulgm=dict(argstr='-sig', ), - out=dict(argstr='-out %f', ), - pasl=dict(argstr='-pasl', ), - pcasl=dict(argstr='-pcasl', ), - plasma_coeff=dict(argstr='-L %f', ), - pld=dict(argstr='-PLD %f', ), - pv0=dict(argstr='-pv0 %d', ), - pv2=dict(argstr='-pv2 %d', ), - pv3=dict(argstr='-pv3 %d %d %d', ), - pv_threshold=dict(argstr='-pvthreshold', ), - seg=dict( - argstr='-seg %s', - extensions=None, - ), - segstyle=dict(argstr='-segstyle', ), - sig=dict(argstr='-sig', ), + gm_plasma=dict(argstr="-gmL %f",), + gm_t1=dict(argstr="-gmT1 %f",), + gm_ttt=dict(argstr="-gmTTT %f",), + ir_output=dict(argstr="-IRoutput %s", extensions=None,), + ir_volume=dict(argstr="-IRvolume %s", extensions=None,), + ldd=dict(argstr="-LDD %f",), + m0map=dict(argstr="-m0map %s", extensions=None,), + m0mape=dict(argstr="-m0mape %s", extensions=None,), + mask=dict(argstr="-mask %s", extensions=None, position=2,), + mul=dict(argstr="-mul %f",), + mulgm=dict(argstr="-sig",), + out=dict(argstr="-out %f",), + pasl=dict(argstr="-pasl",), + pcasl=dict(argstr="-pcasl",), + plasma_coeff=dict(argstr="-L %f",), + pld=dict(argstr="-PLD %f",), + pv0=dict(argstr="-pv0 %d",), + pv2=dict(argstr="-pv2 %d",), + pv3=dict(argstr="-pv3 %d %d %d",), + pv_threshold=dict(argstr="-pvthreshold",), + seg=dict(argstr="-seg %s", extensions=None,), + segstyle=dict(argstr="-segstyle",), + sig=dict(argstr="-sig",), source_file=dict( - argstr='-source %s', - extensions=None, - mandatory=True, - position=1, + argstr="-source %s", extensions=None, mandatory=True, position=1, ), syn_file=dict( - argstr='-syn %s', + argstr="-syn %s", extensions=None, - name_source=['source_file'], - name_template='%s_syn.nii.gz', + name_source=["source_file"], + name_template="%s_syn.nii.gz", ), - t1_art_cmp=dict(argstr='-T1a %f', ), - t1map=dict( - argstr='-t1map %s', - extensions=None, - ), - t_inv1=dict(argstr='-Tinv1 %f', ), - t_inv2=dict(argstr='-Tinv2 %f', ), - wm_plasma=dict(argstr='-wmL %f', ), - wm_t1=dict(argstr='-wmT1 %f', ), - wm_ttt=dict(argstr='-wmTTT %f', ), + t1_art_cmp=dict(argstr="-T1a %f",), + t1map=dict(argstr="-t1map %s", extensions=None,), + t_inv1=dict(argstr="-Tinv1 %f",), + t_inv2=dict(argstr="-Tinv2 %f",), + wm_plasma=dict(argstr="-wmL %f",), + wm_t1=dict(argstr="-wmT1 %f",), + wm_ttt=dict(argstr="-wmTTT %f",), ) inputs = FitAsl.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FitAsl_outputs(): output_map = dict( - cbf_file=dict(extensions=None, ), - error_file=dict(extensions=None, ), - syn_file=dict(extensions=None, ), + cbf_file=dict(extensions=None,), + error_file=dict(extensions=None,), + syn_file=dict(extensions=None,), ) outputs = FitAsl.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py index afb70755fc..57bee972e1 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py @@ -4,233 +4,218 @@ def test_FitDwi_inputs(): input_map = dict( - acceptance=dict(argstr='-accpetance %f', ), - args=dict(argstr='%s', ), + acceptance=dict(argstr="-accpetance %f",), + args=dict(argstr="%s",), ball_flag=dict( - argstr='-ball', + argstr="-ball", position=4, xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'ballv_flag', 'nod_flag', - 'nodv_flag' + "mono_flag", + "ivim_flag", + "dti_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", ], ), ballv_flag=dict( - argstr='-ballv', + argstr="-ballv", position=4, xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', 'nod_flag', - 'nodv_flag' + "mono_flag", + "ivim_flag", + "dti_flag", + "ball_flag", + "nod_flag", + "nodv_flag", ], ), - bval_file=dict( - argstr='-bval %s', - extensions=None, - mandatory=True, - position=2, - ), - bvec_file=dict( - argstr='-bvec %s', - extensions=None, - mandatory=True, - position=3, - ), - cov_file=dict( - argstr='-cov %s', - extensions=None, - ), - csf_t2_val=dict(argstr='-csfT2 %f', ), - diso_val=dict(argstr='-diso %f', ), - dpr_val=dict(argstr='-dpr %f', ), + bval_file=dict(argstr="-bval %s", extensions=None, mandatory=True, position=2,), + bvec_file=dict(argstr="-bvec %s", extensions=None, mandatory=True, position=3,), + cov_file=dict(argstr="-cov %s", extensions=None,), + csf_t2_val=dict(argstr="-csfT2 %f",), + diso_val=dict(argstr="-diso %f",), + dpr_val=dict(argstr="-dpr %f",), dti_flag=dict( - argstr='-dti', + argstr="-dti", position=4, xor=[ - 'mono_flag', 'ivim_flag', 'ball_flag', 'ballv_flag', - 'nod_flag', 'nodv_flag' + "mono_flag", + "ivim_flag", + "ball_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", ], ), - environ=dict( - nohash=True, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True,), error_file=dict( - argstr='-error %s', + argstr="-error %s", extensions=None, - name_source=['source_file'], - name_template='%s_error.nii.gz', + name_source=["source_file"], + name_template="%s_error.nii.gz", ), famap_file=dict( - argstr='-famap %s', + argstr="-famap %s", extensions=None, - name_source=['source_file'], - name_template='%s_famap.nii.gz', - ), - gn_flag=dict( - argstr='-gn', - xor=['wls_flag'], + name_source=["source_file"], + name_template="%s_famap.nii.gz", ), + gn_flag=dict(argstr="-gn", xor=["wls_flag"],), ivim_flag=dict( - argstr='-ivim', + argstr="-ivim", position=4, xor=[ - 'mono_flag', 'dti_flag', 'ball_flag', 'ballv_flag', 'nod_flag', - 'nodv_flag' + "mono_flag", + "dti_flag", + "ball_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", ], ), - lm_vals=dict( - argstr='-lm %f %f', - requires=['gn_flag'], - ), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), - maxit_val=dict( - argstr='-maxit %d', - requires=['gn_flag'], - ), + lm_vals=dict(argstr="-lm %f %f", requires=["gn_flag"],), + mask_file=dict(argstr="-mask %s", extensions=None,), + maxit_val=dict(argstr="-maxit %d", requires=["gn_flag"],), mcmap_file=dict( - argstr='-mcmap %s', + argstr="-mcmap %s", extensions=None, - name_source=['source_file'], - name_template='%s_mcmap.nii.gz', - requires=['nodv_flag'], + name_source=["source_file"], + name_template="%s_mcmap.nii.gz", + requires=["nodv_flag"], ), - mcmaxit=dict(argstr='-mcmaxit %d', ), + mcmaxit=dict(argstr="-mcmaxit %d",), mcout=dict( - argstr='-mcout %s', + argstr="-mcout %s", extensions=None, - name_source=['source_file'], - name_template='%s_mcout.txt', + name_source=["source_file"], + name_template="%s_mcout.txt", ), - mcsamples=dict(argstr='-mcsamples %d', ), + mcsamples=dict(argstr="-mcsamples %d",), mdmap_file=dict( - argstr='-mdmap %s', + argstr="-mdmap %s", extensions=None, - name_source=['source_file'], - name_template='%s_mdmap.nii.gz', + name_source=["source_file"], + name_template="%s_mdmap.nii.gz", ), mono_flag=dict( - argstr='-mono', + argstr="-mono", position=4, xor=[ - 'ivim_flag', 'dti_flag', 'ball_flag', 'ballv_flag', 'nod_flag', - 'nodv_flag' + "ivim_flag", + "dti_flag", + "ball_flag", + "ballv_flag", + "nod_flag", + "nodv_flag", ], ), nod_flag=dict( - argstr='-nod', + argstr="-nod", position=4, xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', - 'ballv_flag', 'nodv_flag' + "mono_flag", + "ivim_flag", + "dti_flag", + "ball_flag", + "ballv_flag", + "nodv_flag", ], ), nodiff_file=dict( - argstr='-nodiff %s', + argstr="-nodiff %s", extensions=None, - name_source=['source_file'], - name_template='%s_no_diff.nii.gz', + name_source=["source_file"], + name_template="%s_no_diff.nii.gz", ), nodv_flag=dict( - argstr='-nodv', + argstr="-nodv", position=4, xor=[ - 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', - 'ballv_flag', 'nod_flag' + "mono_flag", + "ivim_flag", + "dti_flag", + "ball_flag", + "ballv_flag", + "nod_flag", ], ), - perf_thr=dict(argstr='-perfthreshold %f', ), - prior_file=dict( - argstr='-prior %s', - extensions=None, - ), + perf_thr=dict(argstr="-perfthreshold %f",), + prior_file=dict(argstr="-prior %s", extensions=None,), res_file=dict( - argstr='-res %s', + argstr="-res %s", extensions=None, - name_source=['source_file'], - name_template='%s_resmap.nii.gz', + name_source=["source_file"], + name_template="%s_resmap.nii.gz", ), rgbmap_file=dict( - argstr='-rgbmap %s', + argstr="-rgbmap %s", extensions=None, - name_source=['source_file'], - name_template='%s_rgbmap.nii.gz', - requires=['dti_flag'], + name_source=["source_file"], + name_template="%s_rgbmap.nii.gz", + requires=["dti_flag"], ), - rot_sform_flag=dict(argstr='-rotsform %d', ), - slice_no=dict(argstr='-slice %d', ), + rot_sform_flag=dict(argstr="-rotsform %d",), + slice_no=dict(argstr="-slice %d",), source_file=dict( - argstr='-source %s', - extensions=None, - mandatory=True, - position=1, + argstr="-source %s", extensions=None, mandatory=True, position=1, ), - swls_val=dict(argstr='-swls %f', ), + swls_val=dict(argstr="-swls %f",), syn_file=dict( - argstr='-syn %s', - extensions=None, - name_source=['source_file'], - name_template='%s_syn.nii.gz', - ), - te_file=dict( - argstr='-TE %s', + argstr="-syn %s", extensions=None, - xor=['te_file'], + name_source=["source_file"], + name_template="%s_syn.nii.gz", ), - te_value=dict( - argstr='-TE %s', - extensions=None, - xor=['te_file'], - ), - ten_type=dict(usedefault=True, ), + te_file=dict(argstr="-TE %s", extensions=None, xor=["te_file"],), + te_value=dict(argstr="-TE %s", extensions=None, xor=["te_file"],), + ten_type=dict(usedefault=True,), tenmap2_file=dict( - argstr='-tenmap2 %s', + argstr="-tenmap2 %s", extensions=None, - name_source=['source_file'], - name_template='%s_tenmap2.nii.gz', - requires=['dti_flag'], + name_source=["source_file"], + name_template="%s_tenmap2.nii.gz", + requires=["dti_flag"], ), tenmap_file=dict( - argstr='-tenmap %s', + argstr="-tenmap %s", extensions=None, - name_source=['source_file'], - name_template='%s_tenmap.nii.gz', - requires=['dti_flag'], + name_source=["source_file"], + name_template="%s_tenmap.nii.gz", + requires=["dti_flag"], ), v1map_file=dict( - argstr='-v1map %s', + argstr="-v1map %s", extensions=None, - name_source=['source_file'], - name_template='%s_v1map.nii.gz', - ), - vb_flag=dict(argstr='-vb', ), - voxel=dict(argstr='-voxel %d %d %d', ), - wls_flag=dict( - argstr='-wls', - xor=['gn_flag'], + name_source=["source_file"], + name_template="%s_v1map.nii.gz", ), - wm_t2_val=dict(argstr='-wmT2 %f', ), + vb_flag=dict(argstr="-vb",), + voxel=dict(argstr="-voxel %d %d %d",), + wls_flag=dict(argstr="-wls", xor=["gn_flag"],), + wm_t2_val=dict(argstr="-wmT2 %f",), ) inputs = FitDwi.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FitDwi_outputs(): output_map = dict( - error_file=dict(extensions=None, ), - famap_file=dict(extensions=None, ), - mcmap_file=dict(extensions=None, ), - mcout=dict(extensions=None, ), - mdmap_file=dict(extensions=None, ), - nodiff_file=dict(extensions=None, ), - res_file=dict(extensions=None, ), - rgbmap_file=dict(extensions=None, ), - syn_file=dict(extensions=None, ), - tenmap2_file=dict(extensions=None, ), - tenmap_file=dict(extensions=None, ), - v1map_file=dict(extensions=None, ), + error_file=dict(extensions=None,), + famap_file=dict(extensions=None,), + mcmap_file=dict(extensions=None,), + mcout=dict(extensions=None,), + mdmap_file=dict(extensions=None,), + nodiff_file=dict(extensions=None,), + res_file=dict(extensions=None,), + rgbmap_file=dict(extensions=None,), + syn_file=dict(extensions=None,), + tenmap2_file=dict(extensions=None,), + tenmap_file=dict(extensions=None,), + v1map_file=dict(extensions=None,), ) outputs = FitDwi.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py index e27aee7da2..be09fc90fc 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py @@ -4,159 +4,95 @@ def test_FitQt1_inputs(): input_map = dict( - acceptance=dict(argstr='-acceptance %f', ), - args=dict(argstr='%s', ), - b1map=dict( - argstr='-b1map %s', - extensions=None, - ), + acceptance=dict(argstr="-acceptance %f",), + args=dict(argstr="%s",), + b1map=dict(argstr="-b1map %s", extensions=None,), comp_file=dict( - argstr='-comp %s', + argstr="-comp %s", extensions=None, - name_source=['source_file'], - name_template='%s_comp.nii.gz', - ), - environ=dict( - nohash=True, - usedefault=True, + name_source=["source_file"], + name_template="%s_comp.nii.gz", ), + environ=dict(nohash=True, usedefault=True,), error_file=dict( - argstr='-error %s', + argstr="-error %s", extensions=None, - name_source=['source_file'], - name_template='%s_error.nii.gz', - ), - flips=dict( - argstr='-flips %s', - sep=' ', - ), - flips_list=dict( - argstr='-fliplist %s', - extensions=None, - ), - gn_flag=dict( - argstr='-gn', - position=8, - ), - ir_flag=dict( - argstr='-IR', - position=13, - ), - lm_val=dict( - argstr='-lm %f %f', - position=7, - ), + name_source=["source_file"], + name_template="%s_error.nii.gz", + ), + flips=dict(argstr="-flips %s", sep=" ",), + flips_list=dict(argstr="-fliplist %s", extensions=None,), + gn_flag=dict(argstr="-gn", position=8,), + ir_flag=dict(argstr="-IR", position=13,), + lm_val=dict(argstr="-lm %f %f", position=7,), m0map_file=dict( - argstr='-m0map %s', - extensions=None, - name_source=['source_file'], - name_template='%s_m0map.nii.gz', - ), - mask=dict( - argstr='-mask %s', + argstr="-m0map %s", extensions=None, - position=2, - ), - maxit=dict( - argstr='-maxit %d', - position=11, + name_source=["source_file"], + name_template="%s_m0map.nii.gz", ), + mask=dict(argstr="-mask %s", extensions=None, position=2,), + maxit=dict(argstr="-maxit %d", position=11,), mcmap_file=dict( - argstr='-mcmap %s', + argstr="-mcmap %s", extensions=None, - name_source=['source_file'], - name_template='%s_mcmap.nii.gz', - ), - mcmaxit=dict(argstr='-mcmaxit %d', ), - mcout=dict( - argstr='-mcout %s', - extensions=None, - ), - mcsamples=dict(argstr='-mcsamples %d', ), - nb_comp=dict( - argstr='-nc %d', - position=6, - ), - prior=dict( - argstr='-prior %s', - extensions=None, - position=3, - ), + name_source=["source_file"], + name_template="%s_mcmap.nii.gz", + ), + mcmaxit=dict(argstr="-mcmaxit %d",), + mcout=dict(argstr="-mcout %s", extensions=None,), + mcsamples=dict(argstr="-mcsamples %d",), + nb_comp=dict(argstr="-nc %d", position=6,), + prior=dict(argstr="-prior %s", extensions=None, position=3,), res_file=dict( - argstr='-res %s', + argstr="-res %s", extensions=None, - name_source=['source_file'], - name_template='%s_res.nii.gz', - ), - slice_no=dict( - argstr='-slice %d', - position=9, + name_source=["source_file"], + name_template="%s_res.nii.gz", ), + slice_no=dict(argstr="-slice %d", position=9,), source_file=dict( - argstr='-source %s', - extensions=None, - mandatory=True, - position=1, - ), - spgr=dict(argstr='-SPGR', ), - sr_flag=dict( - argstr='-SR', - position=12, + argstr="-source %s", extensions=None, mandatory=True, position=1, ), + spgr=dict(argstr="-SPGR",), + sr_flag=dict(argstr="-SR", position=12,), syn_file=dict( - argstr='-syn %s', - extensions=None, - name_source=['source_file'], - name_template='%s_syn.nii.gz', - ), - t1_list=dict( - argstr='-T1list %s', + argstr="-syn %s", extensions=None, + name_source=["source_file"], + name_template="%s_syn.nii.gz", ), + t1_list=dict(argstr="-T1list %s", extensions=None,), t1map_file=dict( - argstr='-t1map %s', - extensions=None, - name_source=['source_file'], - name_template='%s_t1map.nii.gz', - ), - t1max=dict(argstr='-T1max %f', ), - t1min=dict(argstr='-T1min %f', ), - te_value=dict( - argstr='-TE %f', - position=4, - ), - tis=dict( - argstr='-TIs %s', - position=14, - sep=' ', - ), - tis_list=dict( - argstr='-TIlist %s', + argstr="-t1map %s", extensions=None, - ), - tr_value=dict( - argstr='-TR %f', - position=5, - ), - voxel=dict( - argstr='-voxel %d %d %d', - position=10, - ), + name_source=["source_file"], + name_template="%s_t1map.nii.gz", + ), + t1max=dict(argstr="-T1max %f",), + t1min=dict(argstr="-T1min %f",), + te_value=dict(argstr="-TE %f", position=4,), + tis=dict(argstr="-TIs %s", position=14, sep=" ",), + tis_list=dict(argstr="-TIlist %s", extensions=None,), + tr_value=dict(argstr="-TR %f", position=5,), + voxel=dict(argstr="-voxel %d %d %d", position=10,), ) inputs = FitQt1.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FitQt1_outputs(): output_map = dict( - comp_file=dict(extensions=None, ), - error_file=dict(extensions=None, ), - m0map_file=dict(extensions=None, ), - mcmap_file=dict(extensions=None, ), - res_file=dict(extensions=None, ), - syn_file=dict(extensions=None, ), - t1map_file=dict(extensions=None, ), + comp_file=dict(extensions=None,), + error_file=dict(extensions=None,), + m0map_file=dict(extensions=None,), + mcmap_file=dict(extensions=None,), + res_file=dict(extensions=None,), + syn_file=dict(extensions=None,), + t1map_file=dict(extensions=None,), ) outputs = FitQt1.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py b/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py index 83d600d26e..e6fb0b0bbb 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py @@ -4,11 +4,7 @@ def test_NiftyFitCommand_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), ) inputs = NiftyFitCommand.input_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_dwi.py b/nipype/interfaces/niftyfit/tests/test_dwi.py index 270d9c666a..a726301203 100644 --- a/nipype/interfaces/niftyfit/tests/test_dwi.py +++ b/nipype/interfaces/niftyfit/tests/test_dwi.py @@ -10,15 +10,14 @@ from ...niftyreg.tests.test_regutils import no_nifty_tool -@pytest.mark.skipif( - no_nifty_tool(cmd='fit_dwi'), reason="niftyfit is not installed") +@pytest.mark.skipif(no_nifty_tool(cmd="fit_dwi"), reason="niftyfit is not installed") def test_fit_dwi(): """ Testing FitDwi interface.""" # Create a node object fit_dwi = FitDwi() # Check if the command is properly defined - cmd = get_custom_path('fit_dwi', env_dir='NIFTYFITDIR') + cmd = get_custom_path("fit_dwi", env_dir="NIFTYFITDIR") assert fit_dwi.cmd == cmd # test raising error with mandatory args absent @@ -26,47 +25,47 @@ def test_fit_dwi(): fit_dwi.run() # Assign some input data - in_file = example_data('dwi.nii.gz') - bval_file = example_data('bvals') - bvec_file = example_data('bvecs') + in_file = example_data("dwi.nii.gz") + bval_file = example_data("bvals") + bvec_file = example_data("bvecs") fit_dwi.inputs.source_file = in_file fit_dwi.inputs.bval_file = bval_file fit_dwi.inputs.bvec_file = bvec_file fit_dwi.inputs.dti_flag = True - cmd_tmp = '{cmd} -source {in_file} -bval {bval} -bvec {bvec} -dti \ + cmd_tmp = "{cmd} -source {in_file} -bval {bval} -bvec {bvec} -dti \ -error {error} -famap {fa} -mcmap {mc} -mcout {mcout} -mdmap {md} -nodiff \ -{nodiff} -res {res} -rgbmap {rgb} -syn {syn} -tenmap2 {ten2} -v1map {v1}' +{nodiff} -res {res} -rgbmap {rgb} -syn {syn} -tenmap2 {ten2} -v1map {v1}" expected_cmd = cmd_tmp.format( cmd=cmd, in_file=in_file, bval=bval_file, bvec=bvec_file, - error='dwi_error.nii.gz', - fa='dwi_famap.nii.gz', - mc='dwi_mcmap.nii.gz', - md='dwi_mdmap.nii.gz', - nodiff='dwi_no_diff.nii.gz', - res='dwi_resmap.nii.gz', - rgb='dwi_rgbmap.nii.gz', - syn='dwi_syn.nii.gz', - ten2='dwi_tenmap2.nii.gz', - v1='dwi_v1map.nii.gz', - mcout='dwi_mcout.txt') + error="dwi_error.nii.gz", + fa="dwi_famap.nii.gz", + mc="dwi_mcmap.nii.gz", + md="dwi_mdmap.nii.gz", + nodiff="dwi_no_diff.nii.gz", + res="dwi_resmap.nii.gz", + rgb="dwi_rgbmap.nii.gz", + syn="dwi_syn.nii.gz", + ten2="dwi_tenmap2.nii.gz", + v1="dwi_v1map.nii.gz", + mcout="dwi_mcout.txt", + ) assert fit_dwi.cmdline == expected_cmd -@pytest.mark.skipif( - no_nifty_tool(cmd='dwi_tool'), reason="niftyfit is not installed") +@pytest.mark.skipif(no_nifty_tool(cmd="dwi_tool"), reason="niftyfit is not installed") def test_dwi_tool(): """ Testing DwiTool interface.""" # Create a node object dwi_tool = DwiTool() # Check if the command is properly defined - cmd = get_custom_path('dwi_tool', env_dir='NIFTYFITDIR') + cmd = get_custom_path("dwi_tool", env_dir="NIFTYFITDIR") assert dwi_tool.cmd == cmd # test raising error with mandatory args absent @@ -74,11 +73,11 @@ def test_dwi_tool(): dwi_tool.run() # Assign some input data - in_file = example_data('dwi.nii.gz') - bval_file = example_data('bvals') - bvec_file = example_data('bvecs') - b0_file = example_data('b0.nii') - mask_file = example_data('mask.nii.gz') + in_file = example_data("dwi.nii.gz") + bval_file = example_data("bvals") + bvec_file = example_data("bvecs") + b0_file = example_data("b0.nii") + mask_file = example_data("mask.nii.gz") dwi_tool.inputs.source_file = in_file dwi_tool.inputs.mask_file = mask_file dwi_tool.inputs.bval_file = bval_file @@ -86,9 +85,9 @@ def test_dwi_tool(): dwi_tool.inputs.b0_file = b0_file dwi_tool.inputs.dti_flag = True - cmd_tmp = '{cmd} -source {in_file} -bval {bval} -bvec {bvec} -b0 {b0} \ + cmd_tmp = "{cmd} -source {in_file} -bval {bval} -bvec {bvec} -b0 {b0} \ -mask {mask} -dti -famap {fa} -logdti2 {log} -mcmap {mc} -mdmap {md} \ --rgbmap {rgb} -syn {syn} -v1map {v1}' +-rgbmap {rgb} -syn {syn} -v1map {v1}" expected_cmd = cmd_tmp.format( cmd=cmd, @@ -97,12 +96,13 @@ def test_dwi_tool(): bvec=bvec_file, b0=b0_file, mask=mask_file, - fa='dwi_famap.nii.gz', - log='dwi_logdti2.nii.gz', - mc='dwi_mcmap.nii.gz', - md='dwi_mdmap.nii.gz', - rgb='dwi_rgbmap.nii.gz', - syn='dwi_syn.nii.gz', - v1='dwi_v1map.nii.gz') + fa="dwi_famap.nii.gz", + log="dwi_logdti2.nii.gz", + mc="dwi_mcmap.nii.gz", + md="dwi_mdmap.nii.gz", + rgb="dwi_rgbmap.nii.gz", + syn="dwi_syn.nii.gz", + v1="dwi_v1map.nii.gz", + ) assert dwi_tool.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyfit/tests/test_qt1.py b/nipype/interfaces/niftyfit/tests/test_qt1.py index 894017e654..9146e7e97f 100644 --- a/nipype/interfaces/niftyfit/tests/test_qt1.py +++ b/nipype/interfaces/niftyfit/tests/test_qt1.py @@ -10,15 +10,14 @@ from ..qt1 import FitQt1 -@pytest.mark.skipif( - no_nifty_tool(cmd='fit_qt1'), reason="niftyfit is not installed") +@pytest.mark.skipif(no_nifty_tool(cmd="fit_qt1"), reason="niftyfit is not installed") def test_fit_qt1(): """ Testing FitQt1 interface.""" # Create a node object fit_qt1 = FitQt1() # Check if the command is properly defined - cmd = get_custom_path('fit_qt1', env_dir='NIFTYFITDIR') + cmd = get_custom_path("fit_qt1", env_dir="NIFTYFITDIR") assert fit_qt1.cmd == cmd # test raising error with mandatory args absent @@ -26,68 +25,68 @@ def test_fit_qt1(): fit_qt1.run() # Regular test: - in_file = example_data('TI4D.nii.gz') + in_file = example_data("TI4D.nii.gz") fit_qt1.inputs.source_file = in_file - cmd_tmp = '{cmd} -source {in_file} -comp {comp} -error {error} -m0map \ -{map0} -mcmap {cmap} -res {res} -syn {syn} -t1map {t1map}' + cmd_tmp = "{cmd} -source {in_file} -comp {comp} -error {error} -m0map \ +{map0} -mcmap {cmap} -res {res} -syn {syn} -t1map {t1map}" expected_cmd = cmd_tmp.format( cmd=cmd, in_file=in_file, - comp='TI4D_comp.nii.gz', - map0='TI4D_m0map.nii.gz', - error='TI4D_error.nii.gz', - cmap='TI4D_mcmap.nii.gz', - res='TI4D_res.nii.gz', - t1map='TI4D_t1map.nii.gz', - syn='TI4D_syn.nii.gz', + comp="TI4D_comp.nii.gz", + map0="TI4D_m0map.nii.gz", + error="TI4D_error.nii.gz", + cmap="TI4D_mcmap.nii.gz", + res="TI4D_res.nii.gz", + t1map="TI4D_t1map.nii.gz", + syn="TI4D_syn.nii.gz", ) assert fit_qt1.cmdline == expected_cmd # Runs T1 fitting to inversion and saturation recovery data (NLSQR) fit_qt1_2 = FitQt1(tis=[1, 2, 5], ir_flag=True) - in_file = example_data('TI4D.nii.gz') + in_file = example_data("TI4D.nii.gz") fit_qt1_2.inputs.source_file = in_file - cmd_tmp = '{cmd} -source {in_file} -IR -TIs 1.0 2.0 5.0 \ + cmd_tmp = "{cmd} -source {in_file} -IR -TIs 1.0 2.0 5.0 \ -comp {comp} -error {error} -m0map {map0} -mcmap {cmap} -res {res} \ --syn {syn} -t1map {t1map}' +-syn {syn} -t1map {t1map}" expected_cmd = cmd_tmp.format( cmd=cmd, in_file=in_file, - comp='TI4D_comp.nii.gz', - map0='TI4D_m0map.nii.gz', - error='TI4D_error.nii.gz', - cmap='TI4D_mcmap.nii.gz', - res='TI4D_res.nii.gz', - t1map='TI4D_t1map.nii.gz', - syn='TI4D_syn.nii.gz', + comp="TI4D_comp.nii.gz", + map0="TI4D_m0map.nii.gz", + error="TI4D_error.nii.gz", + cmap="TI4D_mcmap.nii.gz", + res="TI4D_res.nii.gz", + t1map="TI4D_t1map.nii.gz", + syn="TI4D_syn.nii.gz", ) assert fit_qt1_2.cmdline == expected_cmd # Runs T1 fitting to spoiled gradient echo (SPGR) data (NLSQR) fit_qt1_3 = FitQt1(flips=[2, 4, 8], spgr=True) - in_file = example_data('TI4D.nii.gz') + in_file = example_data("TI4D.nii.gz") fit_qt1_3.inputs.source_file = in_file - cmd_tmp = '{cmd} -source {in_file} -comp {comp} -error {error} \ + cmd_tmp = "{cmd} -source {in_file} -comp {comp} -error {error} \ -flips 2.0 4.0 8.0 -m0map {map0} -mcmap {cmap} -res {res} -SPGR -syn {syn} \ --t1map {t1map}' +-t1map {t1map}" expected_cmd = cmd_tmp.format( cmd=cmd, in_file=in_file, - comp='TI4D_comp.nii.gz', - map0='TI4D_m0map.nii.gz', - error='TI4D_error.nii.gz', - cmap='TI4D_mcmap.nii.gz', - res='TI4D_res.nii.gz', - t1map='TI4D_t1map.nii.gz', - syn='TI4D_syn.nii.gz', + comp="TI4D_comp.nii.gz", + map0="TI4D_m0map.nii.gz", + error="TI4D_error.nii.gz", + cmap="TI4D_mcmap.nii.gz", + res="TI4D_res.nii.gz", + t1map="TI4D_t1map.nii.gz", + syn="TI4D_syn.nii.gz", ) assert fit_qt1_3.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyreg/__init__.py b/nipype/interfaces/niftyreg/__init__.py index 9854ebaea3..2ea7b95b26 100644 --- a/nipype/interfaces/niftyreg/__init__.py +++ b/nipype/interfaces/niftyreg/__init__.py @@ -10,5 +10,11 @@ from .base import get_custom_path from .reg import RegAladin, RegF3D -from .regutils import (RegResample, RegJacobian, RegAverage, RegTools, - RegTransform, RegMeasure) +from .regutils import ( + RegResample, + RegJacobian, + RegAverage, + RegTools, + RegTransform, + RegMeasure, +) diff --git a/nipype/interfaces/niftyreg/base.py b/nipype/interfaces/niftyreg/base.py index 0b1e0c514a..aa343dcfcb 100644 --- a/nipype/interfaces/niftyreg/base.py +++ b/nipype/interfaces/niftyreg/base.py @@ -22,29 +22,32 @@ from ..base import CommandLine, CommandLineInputSpec, traits, Undefined from ...utils.filemanip import split_filename -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") -def get_custom_path(command, env_dir='NIFTYREGDIR'): - return os.path.join(os.getenv(env_dir, ''), command) +def get_custom_path(command, env_dir="NIFTYREGDIR"): + return os.path.join(os.getenv(env_dir, ""), command) class NiftyRegCommandInputSpec(CommandLineInputSpec): """Input Spec for niftyreg interfaces.""" + # Set the number of omp thread to use omp_core_val = traits.Int( - int(os.environ.get('OMP_NUM_THREADS', '1')), - desc='Number of openmp thread to use', - argstr='-omp %i', - usedefault=True) + int(os.environ.get("OMP_NUM_THREADS", "1")), + desc="Number of openmp thread to use", + argstr="-omp %i", + usedefault=True, + ) class NiftyRegCommand(CommandLine): """ Base support interface for NiftyReg commands. """ - _suffix = '_nr' - _min_version = '1.5.30' + + _suffix = "_nr" + _min_version = "1.5.30" input_spec = NiftyRegCommandInputSpec @@ -55,34 +58,33 @@ def __init__(self, required_version=None, **inputs): _version = self.version_from_command() if _version: _version = _version.decode("utf-8") - if self._min_version is not None and \ - StrictVersion(_version) < StrictVersion(self._min_version): - msg = 'A later version of Niftyreg is required (%s < %s)' + if self._min_version is not None and StrictVersion( + _version + ) < StrictVersion(self._min_version): + msg = "A later version of Niftyreg is required (%s < %s)" iflogger.warning(msg, _version, self._min_version) if required_version is not None: if StrictVersion(_version) != StrictVersion(required_version): - msg = 'The version of NiftyReg differs from the required' - msg += '(%s != %s)' + msg = "The version of NiftyReg differs from the required" + msg += "(%s != %s)" iflogger.warning(msg, _version, self.required_version) - self.inputs.on_trait_change(self._omp_update, 'omp_core_val') - self.inputs.on_trait_change(self._environ_update, 'environ') + self.inputs.on_trait_change(self._omp_update, "omp_core_val") + self.inputs.on_trait_change(self._environ_update, "environ") self._omp_update() def _omp_update(self): if self.inputs.omp_core_val: - self.inputs.environ['OMP_NUM_THREADS'] = \ - str(self.inputs.omp_core_val) + self.inputs.environ["OMP_NUM_THREADS"] = str(self.inputs.omp_core_val) self.num_threads = self.inputs.omp_core_val else: - if 'OMP_NUM_THREADS' in self.inputs.environ: - del self.inputs.environ['OMP_NUM_THREADS'] + if "OMP_NUM_THREADS" in self.inputs.environ: + del self.inputs.environ["OMP_NUM_THREADS"] self.num_threads = 1 def _environ_update(self): if self.inputs.environ: - if 'OMP_NUM_THREADS' in self.inputs.environ: - self.inputs.omp_core_val = \ - int(self.inputs.environ['OMP_NUM_THREADS']) + if "OMP_NUM_THREADS" in self.inputs.environ: + self.inputs.omp_core_val = int(self.inputs.environ["OMP_NUM_THREADS"]) else: self.inputs.omp_core_val = Undefined else: @@ -91,16 +93,16 @@ def _environ_update(self): def check_version(self): _version = self.version_from_command() if not _version: - raise Exception('Niftyreg not found') + raise Exception("Niftyreg not found") # Decoding to string: _version = _version.decode("utf-8") if StrictVersion(_version) < StrictVersion(self._min_version): - err = 'A later version of Niftyreg is required (%s < %s)' + err = "A later version of Niftyreg is required (%s < %s)" raise ValueError(err % (_version, self._min_version)) if self.required_version: if StrictVersion(_version) != StrictVersion(self.required_version): - err = 'The version of NiftyReg differs from the required' - err += '(%s != %s)' + err = "The version of NiftyReg differs from the required" + err += "(%s != %s)" raise ValueError(err % (_version, self.required_version)) @property @@ -111,14 +113,14 @@ def exists(self): return self.version_from_command() is not None def _format_arg(self, name, spec, value): - if name == 'omp_core_val': + if name == "omp_core_val": self.numthreads = value return super(NiftyRegCommand, self)._format_arg(name, spec, value) def _gen_fname(self, basename, out_dir=None, suffix=None, ext=None): - if basename == '': - msg = 'Unable to generate filename for command %s. ' % self.cmd - msg += 'basename is not set!' + if basename == "": + msg = "Unable to generate filename for command %s. " % self.cmd + msg += "basename is not set!" raise ValueError(msg) _, final_bn, final_ext = split_filename(basename) if out_dir is None: @@ -126,5 +128,5 @@ def _gen_fname(self, basename, out_dir=None, suffix=None, ext=None): if ext is not None: final_ext = ext if suffix is not None: - final_bn = ''.join((final_bn, suffix)) + final_bn = "".join((final_bn, suffix)) return os.path.abspath(os.path.join(out_dir, final_bn + final_ext)) diff --git a/nipype/interfaces/niftyreg/reg.py b/nipype/interfaces/niftyreg/reg.py index 26985a3e58..f149006d49 100644 --- a/nipype/interfaces/niftyreg/reg.py +++ b/nipype/interfaces/niftyreg/reg.py @@ -16,105 +16,111 @@ class RegAladinInputSpec(NiftyRegCommandInputSpec): """ Input Spec for RegAladin. """ + # Input reference file ref_file = File( exists=True, - desc='The input reference/target image', - argstr='-ref %s', - mandatory=True) + desc="The input reference/target image", + argstr="-ref %s", + mandatory=True, + ) # Input floating file flo_file = File( exists=True, - desc='The input floating/source image', - argstr='-flo %s', - mandatory=True) + desc="The input floating/source image", + argstr="-flo %s", + mandatory=True, + ) # No symmetric flag - nosym_flag = traits.Bool( - argstr='-noSym', desc='Turn off symmetric registration') + nosym_flag = traits.Bool(argstr="-noSym", desc="Turn off symmetric registration") # Rigid only registration - rig_only_flag = traits.Bool( - argstr='-rigOnly', desc='Do only a rigid registration') + rig_only_flag = traits.Bool(argstr="-rigOnly", desc="Do only a rigid registration") # Directly optimise affine flag - desc = 'Directly optimise the affine parameters' - aff_direct_flag = traits.Bool(argstr='-affDirect', desc=desc) + desc = "Directly optimise the affine parameters" + aff_direct_flag = traits.Bool(argstr="-affDirect", desc=desc) # Input affine in_aff_file = File( - exists=True, - desc='The input affine transformation', - argstr='-inaff %s') + exists=True, desc="The input affine transformation", argstr="-inaff %s" + ) # Input reference mask - rmask_file = File( - exists=True, desc='The input reference mask', argstr='-rmask %s') + rmask_file = File(exists=True, desc="The input reference mask", argstr="-rmask %s") # Input floating mask - fmask_file = File( - exists=True, desc='The input floating mask', argstr='-fmask %s') + fmask_file = File(exists=True, desc="The input floating mask", argstr="-fmask %s") # Maximum number of iterations maxit_val = traits.Range( - desc='Maximum number of iterations', argstr='-maxit %d', low=0) + desc="Maximum number of iterations", argstr="-maxit %d", low=0 + ) # Multiresolution levels ln_val = traits.Range( - desc='Number of resolution levels to create', argstr='-ln %d', low=0) + desc="Number of resolution levels to create", argstr="-ln %d", low=0 + ) # Number of resolution levels to process lp_val = traits.Range( - desc='Number of resolution levels to perform', argstr='-lp %d', low=0) + desc="Number of resolution levels to perform", argstr="-lp %d", low=0 + ) # Smoothing to apply on reference image - desc = 'Amount of smoothing to apply to reference image' - smoo_r_val = traits.Float(desc=desc, argstr='-smooR %f') + desc = "Amount of smoothing to apply to reference image" + smoo_r_val = traits.Float(desc=desc, argstr="-smooR %f") # Smoothing to apply on floating image - desc = 'Amount of smoothing to apply to floating image' - smoo_f_val = traits.Float(desc=desc, argstr='-smooF %f') + desc = "Amount of smoothing to apply to floating image" + smoo_f_val = traits.Float(desc=desc, argstr="-smooF %f") # Use nifti header to initialise transformation - desc = 'Use nifti header to initialise transformation' - nac_flag = traits.Bool(desc=desc, argstr='-nac') + desc = "Use nifti header to initialise transformation" + nac_flag = traits.Bool(desc=desc, argstr="-nac") # Use the input masks centre of mass to initialise the transformation - desc = 'Use the masks centre of mass to initialise the transformation' - cog_flag = traits.Bool(desc=desc, argstr='-cog') + desc = "Use the masks centre of mass to initialise the transformation" + cog_flag = traits.Bool(desc=desc, argstr="-cog") # Percent of blocks that are considered active. v_val = traits.Range( - desc='Percent of blocks that are active', argstr='-pv %d', low=0) + desc="Percent of blocks that are active", argstr="-pv %d", low=0 + ) # Percent of inlier blocks - i_val = traits.Range( - desc='Percent of inlier blocks', argstr='-pi %d', low=0) + i_val = traits.Range(desc="Percent of inlier blocks", argstr="-pi %d", low=0) # Lower threshold on reference image ref_low_val = traits.Float( - desc='Lower threshold value on reference image', - argstr='-refLowThr %f') + desc="Lower threshold value on reference image", argstr="-refLowThr %f" + ) # Upper threshold on reference image ref_up_val = traits.Float( - desc='Upper threshold value on reference image', argstr='-refUpThr %f') + desc="Upper threshold value on reference image", argstr="-refUpThr %f" + ) # Lower threshold on floating image flo_low_val = traits.Float( - desc='Lower threshold value on floating image', argstr='-floLowThr %f') + desc="Lower threshold value on floating image", argstr="-floLowThr %f" + ) # Upper threshold on floating image flo_up_val = traits.Float( - desc='Upper threshold value on floating image', argstr='-floUpThr %f') + desc="Upper threshold value on floating image", argstr="-floUpThr %f" + ) # Platform to use - platform_val = traits.Int(desc='Platform index', argstr='-platf %i') + platform_val = traits.Int(desc="Platform index", argstr="-platf %i") # Platform to use - gpuid_val = traits.Int(desc='Device to use id', argstr='-gpuid %i') + gpuid_val = traits.Int(desc="Device to use id", argstr="-gpuid %i") # Verbosity off - verbosity_off_flag = traits.Bool( - argstr='-voff', desc='Turn off verbose output') + verbosity_off_flag = traits.Bool(argstr="-voff", desc="Turn off verbose output") # Affine output transformation matrix file aff_file = File( - name_source=['flo_file'], - name_template='%s_aff.txt', - desc='The output affine matrix file', - argstr='-aff %s') + name_source=["flo_file"], + name_template="%s_aff.txt", + desc="The output affine matrix file", + argstr="-aff %s", + ) # Result warped image file res_file = File( - name_source=['flo_file'], - name_template='%s_res.nii.gz', - desc='The affine transformed floating image', - argstr='-res %s') + name_source=["flo_file"], + name_template="%s_res.nii.gz", + desc="The affine transformed floating image", + argstr="-res %s", + ) class RegAladinOutputSpec(TraitedSpec): """ Output Spec for RegAladin. """ - aff_file = File(desc='The output affine file') - res_file = File(desc='The output transformed image') - desc = 'Output string in the format for reg_average' + + aff_file = File(desc="The output affine file") + res_file = File(desc="The output transformed image") + desc = "Output string in the format for reg_average" avg_output = traits.String(desc=desc) @@ -141,7 +147,8 @@ class RegAladin(NiftyRegCommand): -res im2_res.nii.gz -rmask mask.nii' """ - _cmd = get_custom_path('reg_aladin') + + _cmd = get_custom_path("reg_aladin") input_spec = RegAladinInputSpec output_spec = RegAladinOutputSpec @@ -149,196 +156,200 @@ def _list_outputs(self): outputs = super(RegAladin, self)._list_outputs() # Make a list of the linear transformation file and the input image - aff = os.path.abspath(outputs['aff_file']) + aff = os.path.abspath(outputs["aff_file"]) flo = os.path.abspath(self.inputs.flo_file) - outputs['avg_output'] = '%s %s' % (aff, flo) + outputs["avg_output"] = "%s %s" % (aff, flo) return outputs class RegF3DInputSpec(NiftyRegCommandInputSpec): """ Input Spec for RegF3D. """ + # Input reference file ref_file = File( exists=True, - desc='The input reference/target image', - argstr='-ref %s', - mandatory=True) + desc="The input reference/target image", + argstr="-ref %s", + mandatory=True, + ) # Input floating file flo_file = File( exists=True, - desc='The input floating/source image', - argstr='-flo %s', - mandatory=True) + desc="The input floating/source image", + argstr="-flo %s", + mandatory=True, + ) # Input Affine file aff_file = File( - exists=True, - desc='The input affine transformation file', - argstr='-aff %s') + exists=True, desc="The input affine transformation file", argstr="-aff %s" + ) # Input cpp file incpp_file = File( - exists=True, - desc='The input cpp transformation file', - argstr='-incpp %s') + exists=True, desc="The input cpp transformation file", argstr="-incpp %s" + ) # Reference mask - rmask_file = File( - exists=True, desc='Reference image mask', argstr='-rmask %s') + rmask_file = File(exists=True, desc="Reference image mask", argstr="-rmask %s") # Smoothing kernel for reference - desc = 'Smoothing kernel width for reference image' - ref_smooth_val = traits.Float(desc=desc, argstr='-smooR %f') + desc = "Smoothing kernel width for reference image" + ref_smooth_val = traits.Float(desc=desc, argstr="-smooR %f") # Smoothing kernel for floating - desc = 'Smoothing kernel width for floating image' - flo_smooth_val = traits.Float(desc=desc, argstr='-smooF %f') + desc = "Smoothing kernel width for floating image" + flo_smooth_val = traits.Float(desc=desc, argstr="-smooF %f") # Lower threshold for reference image rlwth_thr_val = traits.Float( - desc='Lower threshold for reference image', argstr='--rLwTh %f') + desc="Lower threshold for reference image", argstr="--rLwTh %f" + ) # Upper threshold for reference image rupth_thr_val = traits.Float( - desc='Upper threshold for reference image', argstr='--rUpTh %f') + desc="Upper threshold for reference image", argstr="--rUpTh %f" + ) # Lower threshold for reference image flwth_thr_val = traits.Float( - desc='Lower threshold for floating image', argstr='--fLwTh %f') + desc="Lower threshold for floating image", argstr="--fLwTh %f" + ) # Upper threshold for reference image fupth_thr_val = traits.Float( - desc='Upper threshold for floating image', argstr='--fUpTh %f') + desc="Upper threshold for floating image", argstr="--fUpTh %f" + ) # Lower threshold for reference image - desc = 'Lower threshold for reference image at the specified time point' + desc = "Lower threshold for reference image at the specified time point" rlwth2_thr_val = traits.Tuple( - traits.Range(low=0), traits.Float, desc=desc, argstr='-rLwTh %d %f') + traits.Range(low=0), traits.Float, desc=desc, argstr="-rLwTh %d %f" + ) # Upper threshold for reference image - desc = 'Upper threshold for reference image at the specified time point' + desc = "Upper threshold for reference image at the specified time point" rupth2_thr_val = traits.Tuple( - traits.Range(low=0), traits.Float, desc=desc, argstr='-rUpTh %d %f') + traits.Range(low=0), traits.Float, desc=desc, argstr="-rUpTh %d %f" + ) # Lower threshold for reference image - desc = 'Lower threshold for floating image at the specified time point' + desc = "Lower threshold for floating image at the specified time point" flwth2_thr_val = traits.Tuple( - traits.Range(low=0), traits.Float, desc=desc, argstr='-fLwTh %d %f') + traits.Range(low=0), traits.Float, desc=desc, argstr="-fLwTh %d %f" + ) # Upper threshold for reference image - desc = 'Upper threshold for floating image at the specified time point' + desc = "Upper threshold for floating image at the specified time point" fupth2_thr_val = traits.Tuple( - traits.Range(low=0), traits.Float, desc=desc, argstr='-fUpTh %d %f') + traits.Range(low=0), traits.Float, desc=desc, argstr="-fUpTh %d %f" + ) # Final grid spacing along the 3 axes - sx_val = traits.Float( - desc='Final grid spacing along the x axes', argstr='-sx %f') - sy_val = traits.Float( - desc='Final grid spacing along the y axes', argstr='-sy %f') - sz_val = traits.Float( - desc='Final grid spacing along the z axes', argstr='-sz %f') + sx_val = traits.Float(desc="Final grid spacing along the x axes", argstr="-sx %f") + sy_val = traits.Float(desc="Final grid spacing along the y axes", argstr="-sy %f") + sz_val = traits.Float(desc="Final grid spacing along the z axes", argstr="-sz %f") # Regularisation options - be_val = traits.Float(desc='Bending energy value', argstr='-be %f') - le_val = traits.Float( - desc='Linear elasticity penalty term', argstr='-le %f') + be_val = traits.Float(desc="Bending energy value", argstr="-be %f") + le_val = traits.Float(desc="Linear elasticity penalty term", argstr="-le %f") jl_val = traits.Float( - desc='Log of jacobian of deformation penalty value', argstr='-jl %f') - desc = 'Do not approximate the log of jacobian penalty at control points \ -only' + desc="Log of jacobian of deformation penalty value", argstr="-jl %f" + ) + desc = "Do not approximate the log of jacobian penalty at control points \ +only" - no_app_jl_flag = traits.Bool(argstr='-noAppJL', desc=desc) + no_app_jl_flag = traits.Bool(argstr="-noAppJL", desc=desc) # Similarity measure options - desc = 'use NMI even when other options are specified' - nmi_flag = traits.Bool(argstr='--nmi', desc=desc) - desc = 'Number of bins in the histogram for reference image' - rbn_val = traits.Range(low=0, desc=desc, argstr='--rbn %d') - desc = 'Number of bins in the histogram for reference image' - fbn_val = traits.Range(low=0, desc=desc, argstr='--fbn %d') - desc = 'Number of bins in the histogram for reference image for given \ -time point' + desc = "use NMI even when other options are specified" + nmi_flag = traits.Bool(argstr="--nmi", desc=desc) + desc = "Number of bins in the histogram for reference image" + rbn_val = traits.Range(low=0, desc=desc, argstr="--rbn %d") + desc = "Number of bins in the histogram for reference image" + fbn_val = traits.Range(low=0, desc=desc, argstr="--fbn %d") + desc = "Number of bins in the histogram for reference image for given \ +time point" rbn2_val = traits.Tuple( - traits.Range(low=0), - traits.Range(low=0), - desc=desc, - argstr='-rbn %d %d') + traits.Range(low=0), traits.Range(low=0), desc=desc, argstr="-rbn %d %d" + ) - desc = 'Number of bins in the histogram for reference image for given \ -time point' + desc = "Number of bins in the histogram for reference image for given \ +time point" fbn2_val = traits.Tuple( - traits.Range(low=0), - traits.Range(low=0), - desc=desc, - argstr='-fbn %d %d') + traits.Range(low=0), traits.Range(low=0), desc=desc, argstr="-fbn %d %d" + ) lncc_val = traits.Float( - desc='SD of the Gaussian for computing LNCC', argstr='--lncc %f') - desc = 'SD of the Gaussian for computing LNCC for a given time point' + desc="SD of the Gaussian for computing LNCC", argstr="--lncc %f" + ) + desc = "SD of the Gaussian for computing LNCC for a given time point" lncc2_val = traits.Tuple( - traits.Range(low=0), traits.Float, desc=desc, argstr='-lncc %d %f') + traits.Range(low=0), traits.Float, desc=desc, argstr="-lncc %d %f" + ) - ssd_flag = traits.Bool( - desc='Use SSD as the similarity measure', argstr='--ssd') - desc = 'Use SSD as the similarity measure for a given time point' - ssd2_flag = traits.Range(low=0, desc=desc, argstr='-ssd %d') + ssd_flag = traits.Bool(desc="Use SSD as the similarity measure", argstr="--ssd") + desc = "Use SSD as the similarity measure for a given time point" + ssd2_flag = traits.Range(low=0, desc=desc, argstr="-ssd %d") kld_flag = traits.Bool( - desc='Use KL divergence as the similarity measure', argstr='--kld') - desc = 'Use KL divergence as the similarity measure for a given time point' - kld2_flag = traits.Range(low=0, desc=desc, argstr='-kld %d') - amc_flag = traits.Bool(desc='Use additive NMI', argstr='-amc') + desc="Use KL divergence as the similarity measure", argstr="--kld" + ) + desc = "Use KL divergence as the similarity measure for a given time point" + kld2_flag = traits.Range(low=0, desc=desc, argstr="-kld %d") + amc_flag = traits.Bool(desc="Use additive NMI", argstr="-amc") - nox_flag = traits.Bool(desc="Don't optimise in x direction", argstr='-nox') - noy_flag = traits.Bool(desc="Don't optimise in y direction", argstr='-noy') - noz_flag = traits.Bool(desc="Don't optimise in z direction", argstr='-noz') + nox_flag = traits.Bool(desc="Don't optimise in x direction", argstr="-nox") + noy_flag = traits.Bool(desc="Don't optimise in y direction", argstr="-noy") + noz_flag = traits.Bool(desc="Don't optimise in z direction", argstr="-noz") # Optimization options maxit_val = traits.Range( - low=0, - argstr='-maxit %d', - desc='Maximum number of iterations per level') + low=0, argstr="-maxit %d", desc="Maximum number of iterations per level" + ) ln_val = traits.Range( - low=0, argstr='-ln %d', desc='Number of resolution levels to create') + low=0, argstr="-ln %d", desc="Number of resolution levels to create" + ) lp_val = traits.Range( - low=0, argstr='-lp %d', desc='Number of resolution levels to perform') + low=0, argstr="-lp %d", desc="Number of resolution levels to perform" + ) nopy_flag = traits.Bool( - desc='Do not use the multiresolution approach', argstr='-nopy') - noconj_flag = traits.Bool( - desc='Use simple GD optimization', argstr='-noConj') - desc = 'Add perturbation steps after each optimization step' - pert_val = traits.Range(low=0, desc=desc, argstr='-pert %d') + desc="Do not use the multiresolution approach", argstr="-nopy" + ) + noconj_flag = traits.Bool(desc="Use simple GD optimization", argstr="-noConj") + desc = "Add perturbation steps after each optimization step" + pert_val = traits.Range(low=0, desc=desc, argstr="-pert %d") # F3d2 options - vel_flag = traits.Bool( - desc='Use velocity field integration', argstr='-vel') - fmask_file = File( - exists=True, desc='Floating image mask', argstr='-fmask %s') + vel_flag = traits.Bool(desc="Use velocity field integration", argstr="-vel") + fmask_file = File(exists=True, desc="Floating image mask", argstr="-fmask %s") # Other options - desc = 'Kernel width for smoothing the metric gradient' - smooth_grad_val = traits.Float(desc=desc, argstr='-smoothGrad %f') + desc = "Kernel width for smoothing the metric gradient" + smooth_grad_val = traits.Float(desc=desc, argstr="-smoothGrad %f") # Padding value - pad_val = traits.Float(desc='Padding value', argstr='-pad %f') + pad_val = traits.Float(desc="Padding value", argstr="-pad %f") # verbosity off - verbosity_off_flag = traits.Bool( - argstr='-voff', desc='Turn off verbose output') + verbosity_off_flag = traits.Bool(argstr="-voff", desc="Turn off verbose output") # Output CPP image file cpp_file = File( - name_source=['flo_file'], - name_template='%s_cpp.nii.gz', - desc='The output CPP file', - argstr='-cpp %s') + name_source=["flo_file"], + name_template="%s_cpp.nii.gz", + desc="The output CPP file", + argstr="-cpp %s", + ) # Output warped image file res_file = File( - name_source=['flo_file'], - name_template='%s_res.nii.gz', - desc='The output resampled image', - argstr='-res %s') + name_source=["flo_file"], + name_template="%s_res.nii.gz", + desc="The output resampled image", + argstr="-res %s", + ) class RegF3DOutputSpec(TraitedSpec): """ Output Spec for RegF3D. """ - cpp_file = File(desc='The output CPP file') - res_file = File(desc='The output resampled image') - invcpp_file = File(desc='The output inverse CPP file') - invres_file = File(desc='The output inverse res file') - desc = 'Output string in the format for reg_average' + + cpp_file = File(desc="The output CPP file") + res_file = File(desc="The output resampled image") + invcpp_file = File(desc="The output inverse CPP file") + invres_file = File(desc="The output inverse res file") + desc = "Output string in the format for reg_average" avg_output = traits.String(desc=desc) @@ -364,7 +375,8 @@ class RegF3D(NiftyRegCommand): -res im2_res.nii.gz -rmask mask.nii' """ - _cmd = get_custom_path('reg_f3d') + + _cmd = get_custom_path("reg_f3d") input_spec = RegF3DInputSpec output_spec = RegF3DOutputSpec @@ -377,20 +389,23 @@ def _list_outputs(self): outputs = super(RegF3D, self)._list_outputs() if self.inputs.vel_flag is True: - res_name = self._remove_extension(outputs['res_file']) - cpp_name = self._remove_extension(outputs['cpp_file']) - outputs['invres_file'] = '%s_backward.nii.gz' % res_name - outputs['invcpp_file'] = '%s_backward.nii.gz' % cpp_name + res_name = self._remove_extension(outputs["res_file"]) + cpp_name = self._remove_extension(outputs["cpp_file"]) + outputs["invres_file"] = "%s_backward.nii.gz" % res_name + outputs["invcpp_file"] = "%s_backward.nii.gz" % cpp_name # Make a list of the linear transformation file and the input image if self.inputs.vel_flag is True and isdefined(self.inputs.aff_file): - cpp_file = os.path.abspath(outputs['cpp_file']) + cpp_file = os.path.abspath(outputs["cpp_file"]) flo_file = os.path.abspath(self.inputs.flo_file) - outputs['avg_output'] = '%s %s %s' % (self.inputs.aff_file, - cpp_file, flo_file) + outputs["avg_output"] = "%s %s %s" % ( + self.inputs.aff_file, + cpp_file, + flo_file, + ) else: - cpp_file = os.path.abspath(outputs['cpp_file']) + cpp_file = os.path.abspath(outputs["cpp_file"]) flo_file = os.path.abspath(self.inputs.flo_file) - outputs['avg_output'] = '%s %s' % (cpp_file, flo_file) + outputs["avg_output"] = "%s %s" % (cpp_file, flo_file) return outputs diff --git a/nipype/interfaces/niftyreg/regutils.py b/nipype/interfaces/niftyreg/regutils.py index 2d08a7119d..032f106933 100644 --- a/nipype/interfaces/niftyreg/regutils.py +++ b/nipype/interfaces/niftyreg/regutils.py @@ -15,70 +15,72 @@ class RegResampleInputSpec(NiftyRegCommandInputSpec): """ Input Spec for RegResample. """ + # Input reference file ref_file = File( exists=True, - desc='The input reference/target image', - argstr='-ref %s', - mandatory=True) + desc="The input reference/target image", + argstr="-ref %s", + mandatory=True, + ) # Input floating file flo_file = File( exists=True, - desc='The input floating/source image', - argstr='-flo %s', - mandatory=True) + desc="The input floating/source image", + argstr="-flo %s", + mandatory=True, + ) # Input deformation field trans_file = File( - exists=True, desc='The input transformation file', argstr='-trans %s') + exists=True, desc="The input transformation file", argstr="-trans %s" + ) type = traits.Enum( - 'res', - 'blank', - argstr='-%s', + "res", + "blank", + argstr="-%s", position=-2, usedefault=True, - desc='Type of output') + desc="Type of output", + ) # Output file name out_file = File( - name_source=['flo_file'], - name_template='%s', - argstr='%s', + name_source=["flo_file"], + name_template="%s", + argstr="%s", position=-1, - desc='The output filename of the transformed image') + desc="The output filename of the transformed image", + ) # Interpolation type inter_val = traits.Enum( - 'NN', - 'LIN', - 'CUB', - 'SINC', - desc='Interpolation type', - argstr='-inter %d') + "NN", "LIN", "CUB", "SINC", desc="Interpolation type", argstr="-inter %d" + ) # Padding value - pad_val = traits.Float(desc='Padding value', argstr='-pad %f') + pad_val = traits.Float(desc="Padding value", argstr="-pad %f") # Tensor flag - tensor_flag = traits.Bool(desc='Resample Tensor Map', argstr='-tensor ') + tensor_flag = traits.Bool(desc="Resample Tensor Map", argstr="-tensor ") # Verbosity off - verbosity_off_flag = traits.Bool( - argstr='-voff', desc='Turn off verbose output') + verbosity_off_flag = traits.Bool(argstr="-voff", desc="Turn off verbose output") # PSF flag - desc = 'Perform the resampling in two steps to resample an image to a \ -lower resolution' + desc = "Perform the resampling in two steps to resample an image to a \ +lower resolution" - psf_flag = traits.Bool(argstr='-psf', desc=desc) - desc = 'Minimise the matrix metric (0) or the determinant (1) when \ -estimating the PSF [0]' + psf_flag = traits.Bool(argstr="-psf", desc=desc) + desc = "Minimise the matrix metric (0) or the determinant (1) when \ +estimating the PSF [0]" - psf_alg = traits.Enum(0, 1, argstr='-psf_alg %d', desc=desc) + psf_alg = traits.Enum(0, 1, argstr="-psf_alg %d", desc=desc) class RegResampleOutputSpec(TraitedSpec): """ Output Spec for RegResample. """ - out_file = File(desc='The output filename of the transformed image') + + out_file = File(desc="The output filename of the transformed image") class RegResample(NiftyRegCommand): @@ -104,14 +106,15 @@ class RegResample(NiftyRegCommand): warpfield.nii -res im2_res.nii.gz' """ - _cmd = get_custom_path('reg_resample') + + _cmd = get_custom_path("reg_resample") input_spec = RegResampleInputSpec output_spec = RegResampleOutputSpec # Need this overload to properly constraint the interpolation type input def _format_arg(self, name, spec, value): - if name == 'inter_val': - inter_val = {'NN': 0, 'LIN': 1, 'CUB': 3, 'SINC': 4} + if name == "inter_val": + inter_val = {"NN": 0, "LIN": 1, "CUB": 3, "SINC": 4} return spec.argstr % inter_val[value] else: return super(RegResample, self)._format_arg(name, spec, value) @@ -119,39 +122,44 @@ def _format_arg(self, name, spec, value): def _overload_extension(self, value, name=None): path, base, _ = split_filename(value) suffix = self.inputs.type - return os.path.join(path, '{0}_{1}.nii.gz'.format(base, suffix)) + return os.path.join(path, "{0}_{1}.nii.gz".format(base, suffix)) class RegJacobianInputSpec(NiftyRegCommandInputSpec): """ Input Spec for RegJacobian. """ + # Reference file name - desc = 'Reference/target file (required if specifying CPP transformations.' - ref_file = File(exists=True, desc=desc, argstr='-ref %s') + desc = "Reference/target file (required if specifying CPP transformations." + ref_file = File(exists=True, desc=desc, argstr="-ref %s") # Input transformation file trans_file = File( exists=True, - desc='The input non-rigid transformation', - argstr='-trans %s', - mandatory=True) + desc="The input non-rigid transformation", + argstr="-trans %s", + mandatory=True, + ) type = traits.Enum( - 'jac', - 'jacL', - 'jacM', + "jac", + "jacL", + "jacM", usedefault=True, - argstr='-%s', + argstr="-%s", position=-2, - desc='Type of jacobian outcome') + desc="Type of jacobian outcome", + ) out_file = File( - name_source=['trans_file'], - name_template='%s', - desc='The output jacobian determinant file name', - argstr='%s', - position=-1) + name_source=["trans_file"], + name_template="%s", + desc="The output jacobian determinant file name", + argstr="%s", + position=-1, + ) class RegJacobianOutputSpec(TraitedSpec): """ Output Spec for RegJacobian. """ - out_file = File(desc='The output file') + + out_file = File(desc="The output file") class RegJacobian(NiftyRegCommand): @@ -174,129 +182,129 @@ class RegJacobian(NiftyRegCommand): warpfield_jac.nii.gz' """ - _cmd = get_custom_path('reg_jacobian') + + _cmd = get_custom_path("reg_jacobian") input_spec = RegJacobianInputSpec output_spec = RegJacobianOutputSpec def _overload_extension(self, value, name=None): path, base, _ = split_filename(value) suffix = self.inputs.type - return os.path.join(path, '{0}_{1}.nii.gz'.format(base, suffix)) + return os.path.join(path, "{0}_{1}.nii.gz".format(base, suffix)) class RegToolsInputSpec(NiftyRegCommandInputSpec): """ Input Spec for RegTools. """ + # Input image file in_file = File( - exists=True, - desc='The input image file path', - argstr='-in %s', - mandatory=True) + exists=True, desc="The input image file path", argstr="-in %s", mandatory=True + ) # Output file path out_file = File( - name_source=['in_file'], - name_template='%s_tools.nii.gz', - desc='The output file name', - argstr='-out %s') + name_source=["in_file"], + name_template="%s_tools.nii.gz", + desc="The output file name", + argstr="-out %s", + ) # Make the output image isotropic - iso_flag = traits.Bool(argstr='-iso', desc='Make output image isotropic') + iso_flag = traits.Bool(argstr="-iso", desc="Make output image isotropic") # Set scale, slope to 0 and 1. - noscl_flag = traits.Bool( - argstr='-noscl', desc='Set scale, slope to 0 and 1') + noscl_flag = traits.Bool(argstr="-noscl", desc="Set scale, slope to 0 and 1") # Values outside the mask are set to NaN mask_file = File( - exists=True, - desc='Values outside the mask are set to NaN', - argstr='-nan %s') + exists=True, desc="Values outside the mask are set to NaN", argstr="-nan %s" + ) # Threshold the input image - desc = 'Binarise the input image with the given threshold' - thr_val = traits.Float(desc=desc, argstr='-thr %f') + desc = "Binarise the input image with the given threshold" + thr_val = traits.Float(desc=desc, argstr="-thr %f") # Binarise the input image - bin_flag = traits.Bool(argstr='-bin', desc='Binarise the input image') + bin_flag = traits.Bool(argstr="-bin", desc="Binarise the input image") # Compute the mean RMS between the two images rms_val = File( - exists=True, - desc='Compute the mean RMS between the images', - argstr='-rms %s') + exists=True, desc="Compute the mean RMS between the images", argstr="-rms %s" + ) # Perform division by image or value div_val = traits.Either( traits.Float, File(exists=True), - desc='Divide the input by image or value', - argstr='-div %s') + desc="Divide the input by image or value", + argstr="-div %s", + ) # Perform multiplication by image or value mul_val = traits.Either( traits.Float, File(exists=True), - desc='Multiply the input by image or value', - argstr='-mul %s') + desc="Multiply the input by image or value", + argstr="-mul %s", + ) # Perform addition by image or value add_val = traits.Either( traits.Float, File(exists=True), - desc='Add to the input image or value', - argstr='-add %s') + desc="Add to the input image or value", + argstr="-add %s", + ) # Perform subtraction by image or value sub_val = traits.Either( traits.Float, File(exists=True), - desc='Add to the input image or value', - argstr='-sub %s') + desc="Add to the input image or value", + argstr="-sub %s", + ) # Downsample the image by a factor of 2. down_flag = traits.Bool( - desc='Downsample the image by a factor of 2', argstr='-down') + desc="Downsample the image by a factor of 2", argstr="-down" + ) # Smoothing using spline kernel - desc = 'Smooth the input image using a cubic spline kernel' + desc = "Smooth the input image using a cubic spline kernel" smo_s_val = traits.Tuple( - traits.Float, - traits.Float, - traits.Float, - desc=desc, - argstr='-smoS %f %f %f') + traits.Float, traits.Float, traits.Float, desc=desc, argstr="-smoS %f %f %f" + ) # Change the resolution of the input image chg_res_val = traits.Tuple( traits.Float, traits.Float, traits.Float, - desc='Change the resolution of the input image', - argstr='-chgres %f %f %f') + desc="Change the resolution of the input image", + argstr="-chgres %f %f %f", + ) # Smoothing using Gaussian kernel - desc = 'Smooth the input image using a Gaussian kernel' + desc = "Smooth the input image using a Gaussian kernel" smo_g_val = traits.Tuple( - traits.Float, - traits.Float, - traits.Float, - desc=desc, - argstr='-smoG %f %f %f') + traits.Float, traits.Float, traits.Float, desc=desc, argstr="-smoG %f %f %f" + ) # Interpolation type inter_val = traits.Enum( - 'NN', - 'LIN', - 'CUB', - 'SINC', - desc='Interpolation order to use to warp the floating image', - argstr='-interp %d') + "NN", + "LIN", + "CUB", + "SINC", + desc="Interpolation order to use to warp the floating image", + argstr="-interp %d", + ) class RegToolsOutputSpec(TraitedSpec): """ Output Spec for RegTools. """ - out_file = File(desc='The output file', exists=True) + + out_file = File(desc="The output file", exists=True) class RegTools(NiftyRegCommand): @@ -319,15 +327,16 @@ class RegTools(NiftyRegCommand): 'reg_tools -in im1.nii -mul 4.0 -omp 4 -out im1_tools.nii.gz' """ - _cmd = get_custom_path('reg_tools') + + _cmd = get_custom_path("reg_tools") input_spec = RegToolsInputSpec output_spec = RegToolsOutputSpec - _suffix = '_tools' + _suffix = "_tools" # Need this overload to properly constraint the interpolation type input def _format_arg(self, name, spec, value): - if name == 'inter_val': - inter_val = {'NN': 0, 'LIN': 1, 'CUB': 3, 'SINC': 4} + if name == "inter_val": + inter_val = {"NN": 0, "LIN": 1, "CUB": 3, "SINC": 4} return spec.argstr % inter_val[value] else: return super(RegTools, self)._format_arg(name, spec, value) @@ -335,99 +344,127 @@ def _format_arg(self, name, spec, value): class RegAverageInputSpec(NiftyRegCommandInputSpec): """ Input Spec for RegAverage. """ + avg_files = traits.List( File(exist=True), position=1, - argstr='-avg %s', - sep=' ', + argstr="-avg %s", + sep=" ", xor=[ - 'avg_lts_files', 'avg_ref_file', 'demean1_ref_file', - 'demean2_ref_file', 'demean3_ref_file', 'warp_files' + "avg_lts_files", + "avg_ref_file", + "demean1_ref_file", + "demean2_ref_file", + "demean3_ref_file", + "warp_files", ], - desc='Averaging of images/affine transformations') + desc="Averaging of images/affine transformations", + ) - desc = 'Robust average of affine transformations' + desc = "Robust average of affine transformations" avg_lts_files = traits.List( File(exist=True), position=1, - argstr='-avg_lts %s', - sep=' ', + argstr="-avg_lts %s", + sep=" ", xor=[ - 'avg_files', 'avg_ref_file', 'demean1_ref_file', - 'demean2_ref_file', 'demean3_ref_file', 'warp_files' + "avg_files", + "avg_ref_file", + "demean1_ref_file", + "demean2_ref_file", + "demean3_ref_file", + "warp_files", ], - desc=desc) + desc=desc, + ) - desc = 'All input images are resampled into the space of \ - and averaged. A cubic spline interpolation scheme is used for resampling' + desc = "All input images are resampled into the space of \ + and averaged. A cubic spline interpolation scheme is used for resampling" avg_ref_file = File( position=1, - argstr='-avg_tran %s', + argstr="-avg_tran %s", xor=[ - 'avg_files', 'avg_lts_files', 'demean1_ref_file', - 'demean2_ref_file', 'demean3_ref_file' + "avg_files", + "avg_lts_files", + "demean1_ref_file", + "demean2_ref_file", + "demean3_ref_file", ], - requires=['warp_files'], - desc=desc) + requires=["warp_files"], + desc=desc, + ) - desc = 'Average images and demean average image that have affine \ -transformations to a common space' + desc = "Average images and demean average image that have affine \ +transformations to a common space" demean1_ref_file = File( position=1, - argstr='-demean1 %s', + argstr="-demean1 %s", xor=[ - 'avg_files', 'avg_lts_files', 'avg_ref_file', 'demean2_ref_file', - 'demean3_ref_file' + "avg_files", + "avg_lts_files", + "avg_ref_file", + "demean2_ref_file", + "demean3_ref_file", ], - requires=['warp_files'], - desc=desc) + requires=["warp_files"], + desc=desc, + ) - desc = 'Average images and demean average image that have non-rigid \ -transformations to a common space' + desc = "Average images and demean average image that have non-rigid \ +transformations to a common space" demean2_ref_file = File( position=1, - argstr='-demean2 %s', + argstr="-demean2 %s", xor=[ - 'avg_files', 'avg_lts_files', 'avg_ref_file', 'demean1_ref_file', - 'demean3_ref_file' + "avg_files", + "avg_lts_files", + "avg_ref_file", + "demean1_ref_file", + "demean3_ref_file", ], - requires=['warp_files'], - desc=desc) + requires=["warp_files"], + desc=desc, + ) - desc = 'Average images and demean average image that have linear and \ -non-rigid transformations to a common space' + desc = "Average images and demean average image that have linear and \ +non-rigid transformations to a common space" demean3_ref_file = File( position=1, - argstr='-demean3 %s', + argstr="-demean3 %s", xor=[ - 'avg_files', 'avg_lts_files', 'avg_ref_file', 'demean1_ref_file', - 'demean2_ref_file' + "avg_files", + "avg_lts_files", + "avg_ref_file", + "demean1_ref_file", + "demean2_ref_file", ], - requires=['warp_files'], - desc=desc) + requires=["warp_files"], + desc=desc, + ) - desc = 'transformation files and floating image pairs/triplets to the \ -reference space' + desc = "transformation files and floating image pairs/triplets to the \ +reference space" warp_files = traits.List( File(exist=True), position=-1, - argstr='%s', - sep=' ', - xor=['avg_files', 'avg_lts_files'], - desc=desc) + argstr="%s", + sep=" ", + xor=["avg_files", "avg_lts_files"], + desc=desc, + ) - out_file = File( - genfile=True, position=0, desc='Output file name', argstr='%s') + out_file = File(genfile=True, position=0, desc="Output file name", argstr="%s") class RegAverageOutputSpec(TraitedSpec): """ Output Spec for RegAverage. """ - out_file = File(desc='Output file name') + + out_file = File(desc="Output file name") class RegAverage(NiftyRegCommand): @@ -455,20 +492,21 @@ class RegAverage(NiftyRegCommand): >>> node.cmdline # doctest: +ELLIPSIS 'reg_average --cmd_file .../reg_average_cmd' """ - _cmd = get_custom_path('reg_average') + + _cmd = get_custom_path("reg_average") input_spec = RegAverageInputSpec output_spec = RegAverageOutputSpec - _suffix = 'avg_out' + _suffix = "avg_out" def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": if isdefined(self.inputs.avg_lts_files): - return self._gen_fname(self._suffix, ext='.txt') + return self._gen_fname(self._suffix, ext=".txt") elif isdefined(self.inputs.avg_files): _, _, _ext = split_filename(self.inputs.avg_files[0]) - if _ext not in ['.nii', '.nii.gz', '.hdr', '.img', '.img.gz']: + if _ext not in [".nii", ".nii.gz", ".hdr", ".img", ".img.gz"]: return self._gen_fname(self._suffix, ext=_ext) - return self._gen_fname(self._suffix, ext='.nii.gz') + return self._gen_fname(self._suffix, ext=".nii.gz") return None @@ -476,9 +514,9 @@ def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.out_file): - outputs['out_file'] = self.inputs.out_file + outputs["out_file"] = self.inputs.out_file else: - outputs['out_file'] = self._gen_filename('out_file') + outputs["out_file"] = self._gen_filename("out_file") return outputs @@ -486,134 +524,196 @@ def _list_outputs(self): def cmdline(self): """ Rewrite the cmdline to write options in text_file.""" argv = super(RegAverage, self).cmdline - reg_average_cmd = os.path.join(os.getcwd(), 'reg_average_cmd') - with open(reg_average_cmd, 'w') as f: + reg_average_cmd = os.path.join(os.getcwd(), "reg_average_cmd") + with open(reg_average_cmd, "w") as f: f.write(argv) - return '%s --cmd_file %s' % (self.cmd, reg_average_cmd) + return "%s --cmd_file %s" % (self.cmd, reg_average_cmd) class RegTransformInputSpec(NiftyRegCommandInputSpec): """ Input Spec for RegTransform. """ + ref1_file = File( exists=True, - desc='The input reference/target image', - argstr='-ref %s', - position=0) + desc="The input reference/target image", + argstr="-ref %s", + position=0, + ) ref2_file = File( exists=True, - desc='The input second reference/target image', - argstr='-ref2 %s', + desc="The input second reference/target image", + argstr="-ref2 %s", position=1, - requires=['ref1_file']) + requires=["ref1_file"], + ) def_input = File( exists=True, - argstr='-def %s', + argstr="-def %s", position=-2, - desc='Compute deformation field from transformation', + desc="Compute deformation field from transformation", xor=[ - 'disp_input', 'flow_input', 'comp_input', 'upd_s_form_input', - 'inv_aff_input', 'inv_nrr_input', 'half_input', 'make_aff_input', - 'aff_2_rig_input', 'flirt_2_nr_input' - ]) + "disp_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", + ], + ) disp_input = File( exists=True, - argstr='-disp %s', + argstr="-disp %s", position=-2, - desc='Compute displacement field from transformation', + desc="Compute displacement field from transformation", xor=[ - 'def_input', 'flow_input', 'comp_input', 'upd_s_form_input', - 'inv_aff_input', 'inv_nrr_input', 'half_input', 'make_aff_input', - 'aff_2_rig_input', 'flirt_2_nr_input' - ]) + "def_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", + ], + ) flow_input = File( exists=True, - argstr='-flow %s', + argstr="-flow %s", position=-2, - desc='Compute flow field from spline SVF', + desc="Compute flow field from spline SVF", xor=[ - 'def_input', 'disp_input', 'comp_input', 'upd_s_form_input', - 'inv_aff_input', 'inv_nrr_input', 'half_input', 'make_aff_input', - 'aff_2_rig_input', 'flirt_2_nr_input' - ]) + "def_input", + "disp_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", + ], + ) comp_input = File( exists=True, - argstr='-comp %s', + argstr="-comp %s", position=-3, - desc='compose two transformations', + desc="compose two transformations", xor=[ - 'def_input', 'disp_input', 'flow_input', 'upd_s_form_input', - 'inv_aff_input', 'inv_nrr_input', 'half_input', 'make_aff_input', - 'aff_2_rig_input', 'flirt_2_nr_input' + "def_input", + "disp_input", + "flow_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", ], - requires=['comp_input2']) + requires=["comp_input2"], + ) comp_input2 = File( - exists=True, - argstr='%s', - position=-2, - desc='compose two transformations') + exists=True, argstr="%s", position=-2, desc="compose two transformations" + ) - desc = 'Update s-form using the affine transformation' + desc = "Update s-form using the affine transformation" upd_s_form_input = File( exists=True, - argstr='-updSform %s', + argstr="-updSform %s", position=-3, desc=desc, xor=[ - 'def_input', 'disp_input', 'flow_input', 'comp_input', - 'inv_aff_input', 'inv_nrr_input', 'half_input', 'make_aff_input', - 'aff_2_rig_input', 'flirt_2_nr_input' + "def_input", + "disp_input", + "flow_input", + "comp_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", ], - requires=['upd_s_form_input2']) + requires=["upd_s_form_input2"], + ) - desc = 'Update s-form using the affine transformation' + desc = "Update s-form using the affine transformation" upd_s_form_input2 = File( - exists=True, - argstr='%s', - position=-2, - desc=desc, - requires=['upd_s_form_input']) + exists=True, argstr="%s", position=-2, desc=desc, requires=["upd_s_form_input"] + ) inv_aff_input = File( exists=True, - argstr='-invAff %s', + argstr="-invAff %s", position=-2, - desc='Invert an affine transformation', + desc="Invert an affine transformation", xor=[ - 'def_input', 'disp_input', 'flow_input', 'comp_input', - 'upd_s_form_input', 'inv_nrr_input', 'half_input', - 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' - ]) + "def_input", + "disp_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", + ], + ) inv_nrr_input = traits.Tuple( File(exists=True), File(exists=True), - desc='Invert a non-linear transformation', - argstr='-invNrr %s %s', + desc="Invert a non-linear transformation", + argstr="-invNrr %s %s", position=-2, xor=[ - 'def_input', 'disp_input', 'flow_input', 'comp_input', - 'upd_s_form_input', 'inv_aff_input', 'half_input', - 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' - ]) + "def_input", + "disp_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", + ], + ) half_input = File( exists=True, - argstr='-half %s', + argstr="-half %s", position=-2, - desc='Half way to the input transformation', + desc="Half way to the input transformation", xor=[ - 'def_input', 'disp_input', 'flow_input', 'comp_input', - 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', - 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' - ]) + "def_input", + "disp_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", + ], + ) - argstr_tmp = '-makeAff %f %f %f %f %f %f %f %f %f %f %f %f' + argstr_tmp = "-makeAff %f %f %f %f %f %f %f %f %f %f %f %f" make_aff_input = traits.Tuple( traits.Float, traits.Float, @@ -629,51 +729,74 @@ class RegTransformInputSpec(NiftyRegCommandInputSpec): traits.Float, argstr=argstr_tmp, position=-2, - desc='Make an affine transformation matrix', + desc="Make an affine transformation matrix", xor=[ - 'def_input', 'disp_input', 'flow_input', 'comp_input', - 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', 'half_input', - 'aff_2_rig_input', 'flirt_2_nr_input' - ]) + "def_input", + "disp_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "aff_2_rig_input", + "flirt_2_nr_input", + ], + ) - desc = 'Extract the rigid component from affine transformation' + desc = "Extract the rigid component from affine transformation" aff_2_rig_input = File( exists=True, - argstr='-aff2rig %s', + argstr="-aff2rig %s", position=-2, desc=desc, xor=[ - 'def_input', 'disp_input', 'flow_input', 'comp_input', - 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', 'half_input', - 'make_aff_input', 'flirt_2_nr_input' - ]) + "def_input", + "disp_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "flirt_2_nr_input", + ], + ) - desc = 'Convert a FLIRT affine transformation to niftyreg affine \ -transformation' + desc = "Convert a FLIRT affine transformation to niftyreg affine \ +transformation" flirt_2_nr_input = traits.Tuple( File(exists=True), File(exists=True), File(exists=True), - argstr='-flirtAff2NR %s %s %s', + argstr="-flirtAff2NR %s %s %s", position=-2, desc=desc, xor=[ - 'def_input', 'disp_input', 'flow_input', 'comp_input', - 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', 'half_input', - 'make_aff_input', 'aff_2_rig_input' - ]) + "def_input", + "disp_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", + ], + ) out_file = File( - genfile=True, - position=-1, - argstr='%s', - desc='transformation file to write') + genfile=True, position=-1, argstr="%s", desc="transformation file to write" + ) class RegTransformOutputSpec(TraitedSpec): """ Output Spec for RegTransform. """ - out_file = File(desc='Output File (transformation in any format)') + + out_file = File(desc="Output File (transformation in any format)") class RegTransform(NiftyRegCommand): @@ -694,62 +817,68 @@ class RegTransform(NiftyRegCommand): 'reg_transform -omp 4 -def warpfield.nii .../warpfield_trans.nii.gz' """ - _cmd = get_custom_path('reg_transform') + + _cmd = get_custom_path("reg_transform") input_spec = RegTransformInputSpec output_spec = RegTransformOutputSpec - _suffix = '_trans' + _suffix = "_trans" def _find_input(self): inputs = [ - self.inputs.def_input, self.inputs.disp_input, - self.inputs.flow_input, self.inputs.comp_input, - self.inputs.comp_input2, self.inputs.upd_s_form_input, - self.inputs.inv_aff_input, self.inputs.inv_nrr_input, - self.inputs.half_input, self.inputs.make_aff_input, - self.inputs.aff_2_rig_input, self.inputs.flirt_2_nr_input + self.inputs.def_input, + self.inputs.disp_input, + self.inputs.flow_input, + self.inputs.comp_input, + self.inputs.comp_input2, + self.inputs.upd_s_form_input, + self.inputs.inv_aff_input, + self.inputs.inv_nrr_input, + self.inputs.half_input, + self.inputs.make_aff_input, + self.inputs.aff_2_rig_input, + self.inputs.flirt_2_nr_input, ] entries = [] for entry in inputs: if isdefined(entry): entries.append(entry) _, _, ext = split_filename(entry) - if ext == '.nii' or ext == '.nii.gz' or ext == '.hdr': + if ext == ".nii" or ext == ".nii.gz" or ext == ".hdr": return entry if len(entries): return entries[0] return None def _gen_filename(self, name): - if name == 'out_file': + if name == "out_file": if isdefined(self.inputs.make_aff_input): - return self._gen_fname( - 'matrix', suffix=self._suffix, ext='.txt') + return self._gen_fname("matrix", suffix=self._suffix, ext=".txt") - if isdefined(self.inputs.comp_input) and \ - isdefined(self.inputs.comp_input2): + if isdefined(self.inputs.comp_input) and isdefined(self.inputs.comp_input2): _, bn1, ext1 = split_filename(self.inputs.comp_input) _, _, ext2 = split_filename(self.inputs.comp_input2) - if ext1 in ['.nii', '.nii.gz', '.hdr', '.img', '.img.gz'] or \ - ext2 in ['.nii', '.nii.gz', '.hdr', '.img', '.img.gz']: - return self._gen_fname( - bn1, suffix=self._suffix, ext='.nii.gz') + if ext1 in [".nii", ".nii.gz", ".hdr", ".img", ".img.gz"] or ext2 in [ + ".nii", + ".nii.gz", + ".hdr", + ".img", + ".img.gz", + ]: + return self._gen_fname(bn1, suffix=self._suffix, ext=".nii.gz") else: return self._gen_fname(bn1, suffix=self._suffix, ext=ext1) if isdefined(self.inputs.flirt_2_nr_input): return self._gen_fname( - self.inputs.flirt_2_nr_input[0], - suffix=self._suffix, - ext='.txt') + self.inputs.flirt_2_nr_input[0], suffix=self._suffix, ext=".txt" + ) input_to_use = self._find_input() _, _, ext = split_filename(input_to_use) - if ext not in ['.nii', '.nii.gz', '.hdr', '.img', '.img.gz']: - return self._gen_fname( - input_to_use, suffix=self._suffix, ext=ext) + if ext not in [".nii", ".nii.gz", ".hdr", ".img", ".img.gz"]: + return self._gen_fname(input_to_use, suffix=self._suffix, ext=ext) else: - return self._gen_fname( - input_to_use, suffix=self._suffix, ext='.nii.gz') + return self._gen_fname(input_to_use, suffix=self._suffix, ext=".nii.gz") return None @@ -757,45 +886,51 @@ def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.out_file): - outputs['out_file'] = self.inputs.out_file + outputs["out_file"] = self.inputs.out_file else: - outputs['out_file'] = self._gen_filename('out_file') + outputs["out_file"] = self._gen_filename("out_file") return outputs class RegMeasureInputSpec(NiftyRegCommandInputSpec): """ Input Spec for RegMeasure. """ + # Input reference file ref_file = File( exists=True, - desc='The input reference/target image', - argstr='-ref %s', - mandatory=True) + desc="The input reference/target image", + argstr="-ref %s", + mandatory=True, + ) # Input floating file flo_file = File( exists=True, - desc='The input floating/source image', - argstr='-flo %s', - mandatory=True) + desc="The input floating/source image", + argstr="-flo %s", + mandatory=True, + ) measure_type = traits.Enum( - 'ncc', - 'lncc', - 'nmi', - 'ssd', + "ncc", + "lncc", + "nmi", + "ssd", mandatory=True, - argstr='-%s', - desc='Measure of similarity to compute') + argstr="-%s", + desc="Measure of similarity to compute", + ) out_file = File( - name_source=['flo_file'], - name_template='%s', - argstr='-out %s', - desc='The output text file containing the measure') + name_source=["flo_file"], + name_template="%s", + argstr="-out %s", + desc="The output text file containing the measure", + ) class RegMeasureOutputSpec(TraitedSpec): """ Output Spec for RegMeasure. """ - out_file = File(desc='The output text file containing the measure') + + out_file = File(desc="The output text file containing the measure") class RegMeasure(NiftyRegCommand): @@ -817,11 +952,12 @@ class RegMeasure(NiftyRegCommand): 'reg_measure -flo im2.nii -lncc -omp 4 -out im2_lncc.txt -ref im1.nii' """ - _cmd = get_custom_path('reg_measure') + + _cmd = get_custom_path("reg_measure") input_spec = RegMeasureInputSpec output_spec = RegMeasureOutputSpec def _overload_extension(self, value, name=None): path, base, _ = split_filename(value) suffix = self.inputs.measure_type - return os.path.join(path, '{0}_{1}.txt'.format(base, suffix)) + return os.path.join(path, "{0}_{1}.txt".format(base, suffix)) diff --git a/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py b/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py index 016ca5654b..75e103edbe 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py @@ -4,15 +4,9 @@ def test_NiftyRegCommand_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - omp_core_val=dict( - argstr='-omp %i', - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + omp_core_val=dict(argstr="-omp %i", usedefault=True,), ) inputs = NiftyRegCommand.input_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py index ed11753b5d..16ca83bdba 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py @@ -4,79 +4,58 @@ def test_RegAladin_inputs(): input_map = dict( - aff_direct_flag=dict(argstr='-affDirect', ), + aff_direct_flag=dict(argstr="-affDirect",), aff_file=dict( - argstr='-aff %s', + argstr="-aff %s", extensions=None, - name_source=['flo_file'], - name_template='%s_aff.txt', + name_source=["flo_file"], + name_template="%s_aff.txt", ), - args=dict(argstr='%s', ), - cog_flag=dict(argstr='-cog', ), - environ=dict( - nohash=True, - usedefault=True, - ), - flo_file=dict( - argstr='-flo %s', - extensions=None, - mandatory=True, - ), - flo_low_val=dict(argstr='-floLowThr %f', ), - flo_up_val=dict(argstr='-floUpThr %f', ), - fmask_file=dict( - argstr='-fmask %s', - extensions=None, - ), - gpuid_val=dict(argstr='-gpuid %i', ), - i_val=dict(argstr='-pi %d', ), - in_aff_file=dict( - argstr='-inaff %s', - extensions=None, - ), - ln_val=dict(argstr='-ln %d', ), - lp_val=dict(argstr='-lp %d', ), - maxit_val=dict(argstr='-maxit %d', ), - nac_flag=dict(argstr='-nac', ), - nosym_flag=dict(argstr='-noSym', ), - omp_core_val=dict( - argstr='-omp %i', - usedefault=True, - ), - platform_val=dict(argstr='-platf %i', ), - ref_file=dict( - argstr='-ref %s', - extensions=None, - mandatory=True, - ), - ref_low_val=dict(argstr='-refLowThr %f', ), - ref_up_val=dict(argstr='-refUpThr %f', ), + args=dict(argstr="%s",), + cog_flag=dict(argstr="-cog",), + environ=dict(nohash=True, usedefault=True,), + flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True,), + flo_low_val=dict(argstr="-floLowThr %f",), + flo_up_val=dict(argstr="-floUpThr %f",), + fmask_file=dict(argstr="-fmask %s", extensions=None,), + gpuid_val=dict(argstr="-gpuid %i",), + i_val=dict(argstr="-pi %d",), + in_aff_file=dict(argstr="-inaff %s", extensions=None,), + ln_val=dict(argstr="-ln %d",), + lp_val=dict(argstr="-lp %d",), + maxit_val=dict(argstr="-maxit %d",), + nac_flag=dict(argstr="-nac",), + nosym_flag=dict(argstr="-noSym",), + omp_core_val=dict(argstr="-omp %i", usedefault=True,), + platform_val=dict(argstr="-platf %i",), + ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True,), + ref_low_val=dict(argstr="-refLowThr %f",), + ref_up_val=dict(argstr="-refUpThr %f",), res_file=dict( - argstr='-res %s', + argstr="-res %s", extensions=None, - name_source=['flo_file'], - name_template='%s_res.nii.gz', + name_source=["flo_file"], + name_template="%s_res.nii.gz", ), - rig_only_flag=dict(argstr='-rigOnly', ), - rmask_file=dict( - argstr='-rmask %s', - extensions=None, - ), - smoo_f_val=dict(argstr='-smooF %f', ), - smoo_r_val=dict(argstr='-smooR %f', ), - v_val=dict(argstr='-pv %d', ), - verbosity_off_flag=dict(argstr='-voff', ), + rig_only_flag=dict(argstr="-rigOnly",), + rmask_file=dict(argstr="-rmask %s", extensions=None,), + smoo_f_val=dict(argstr="-smooF %f",), + smoo_r_val=dict(argstr="-smooR %f",), + v_val=dict(argstr="-pv %d",), + verbosity_off_flag=dict(argstr="-voff",), ) inputs = RegAladin.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RegAladin_outputs(): output_map = dict( - aff_file=dict(extensions=None, ), + aff_file=dict(extensions=None,), avg_output=dict(), - res_file=dict(extensions=None, ), + res_file=dict(extensions=None,), ) outputs = RegAladin.output_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py index 8eb40cd38e..3ee172453f 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py @@ -4,84 +4,90 @@ def test_RegAverage_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), avg_files=dict( - argstr='-avg %s', + argstr="-avg %s", position=1, - sep=' ', + sep=" ", xor=[ - 'avg_lts_files', 'avg_ref_file', 'demean1_ref_file', - 'demean2_ref_file', 'demean3_ref_file', 'warp_files' + "avg_lts_files", + "avg_ref_file", + "demean1_ref_file", + "demean2_ref_file", + "demean3_ref_file", + "warp_files", ], ), avg_lts_files=dict( - argstr='-avg_lts %s', + argstr="-avg_lts %s", position=1, - sep=' ', + sep=" ", xor=[ - 'avg_files', 'avg_ref_file', 'demean1_ref_file', - 'demean2_ref_file', 'demean3_ref_file', 'warp_files' + "avg_files", + "avg_ref_file", + "demean1_ref_file", + "demean2_ref_file", + "demean3_ref_file", + "warp_files", ], ), avg_ref_file=dict( - argstr='-avg_tran %s', + argstr="-avg_tran %s", extensions=None, position=1, - requires=['warp_files'], + requires=["warp_files"], xor=[ - 'avg_files', 'avg_lts_files', 'demean1_ref_file', - 'demean2_ref_file', 'demean3_ref_file' + "avg_files", + "avg_lts_files", + "demean1_ref_file", + "demean2_ref_file", + "demean3_ref_file", ], ), demean1_ref_file=dict( - argstr='-demean1 %s', + argstr="-demean1 %s", extensions=None, position=1, - requires=['warp_files'], + requires=["warp_files"], xor=[ - 'avg_files', 'avg_lts_files', 'avg_ref_file', - 'demean2_ref_file', 'demean3_ref_file' + "avg_files", + "avg_lts_files", + "avg_ref_file", + "demean2_ref_file", + "demean3_ref_file", ], ), demean2_ref_file=dict( - argstr='-demean2 %s', + argstr="-demean2 %s", extensions=None, position=1, - requires=['warp_files'], + requires=["warp_files"], xor=[ - 'avg_files', 'avg_lts_files', 'avg_ref_file', - 'demean1_ref_file', 'demean3_ref_file' + "avg_files", + "avg_lts_files", + "avg_ref_file", + "demean1_ref_file", + "demean3_ref_file", ], ), demean3_ref_file=dict( - argstr='-demean3 %s', + argstr="-demean3 %s", extensions=None, position=1, - requires=['warp_files'], + requires=["warp_files"], xor=[ - 'avg_files', 'avg_lts_files', 'avg_ref_file', - 'demean1_ref_file', 'demean2_ref_file' + "avg_files", + "avg_lts_files", + "avg_ref_file", + "demean1_ref_file", + "demean2_ref_file", ], ), - environ=dict( - nohash=True, - usedefault=True, - ), - omp_core_val=dict( - argstr='-omp %i', - usedefault=True, - ), - out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - position=0, - ), + environ=dict(nohash=True, usedefault=True,), + omp_core_val=dict(argstr="-omp %i", usedefault=True,), + out_file=dict(argstr="%s", extensions=None, genfile=True, position=0,), warp_files=dict( - argstr='%s', - position=-1, - sep=' ', - xor=['avg_files', 'avg_lts_files'], + argstr="%s", position=-1, sep=" ", xor=["avg_files", "avg_lts_files"], ), ) inputs = RegAverage.input_spec() @@ -89,8 +95,10 @@ def test_RegAverage_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RegAverage_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = RegAverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py index 20fdee1f08..a70318cd43 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py @@ -4,108 +4,84 @@ def test_RegF3D_inputs(): input_map = dict( - aff_file=dict( - argstr='-aff %s', - extensions=None, - ), - amc_flag=dict(argstr='-amc', ), - args=dict(argstr='%s', ), - be_val=dict(argstr='-be %f', ), + aff_file=dict(argstr="-aff %s", extensions=None,), + amc_flag=dict(argstr="-amc",), + args=dict(argstr="%s",), + be_val=dict(argstr="-be %f",), cpp_file=dict( - argstr='-cpp %s', - extensions=None, - name_source=['flo_file'], - name_template='%s_cpp.nii.gz', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fbn2_val=dict(argstr='-fbn %d %d', ), - fbn_val=dict(argstr='--fbn %d', ), - flo_file=dict( - argstr='-flo %s', - extensions=None, - mandatory=True, - ), - flo_smooth_val=dict(argstr='-smooF %f', ), - flwth2_thr_val=dict(argstr='-fLwTh %d %f', ), - flwth_thr_val=dict(argstr='--fLwTh %f', ), - fmask_file=dict( - argstr='-fmask %s', - extensions=None, - ), - fupth2_thr_val=dict(argstr='-fUpTh %d %f', ), - fupth_thr_val=dict(argstr='--fUpTh %f', ), - incpp_file=dict( - argstr='-incpp %s', - extensions=None, - ), - jl_val=dict(argstr='-jl %f', ), - kld2_flag=dict(argstr='-kld %d', ), - kld_flag=dict(argstr='--kld', ), - le_val=dict(argstr='-le %f', ), - ln_val=dict(argstr='-ln %d', ), - lncc2_val=dict(argstr='-lncc %d %f', ), - lncc_val=dict(argstr='--lncc %f', ), - lp_val=dict(argstr='-lp %d', ), - maxit_val=dict(argstr='-maxit %d', ), - nmi_flag=dict(argstr='--nmi', ), - no_app_jl_flag=dict(argstr='-noAppJL', ), - noconj_flag=dict(argstr='-noConj', ), - nopy_flag=dict(argstr='-nopy', ), - nox_flag=dict(argstr='-nox', ), - noy_flag=dict(argstr='-noy', ), - noz_flag=dict(argstr='-noz', ), - omp_core_val=dict( - argstr='-omp %i', - usedefault=True, - ), - pad_val=dict(argstr='-pad %f', ), - pert_val=dict(argstr='-pert %d', ), - rbn2_val=dict(argstr='-rbn %d %d', ), - rbn_val=dict(argstr='--rbn %d', ), - ref_file=dict( - argstr='-ref %s', + argstr="-cpp %s", extensions=None, - mandatory=True, + name_source=["flo_file"], + name_template="%s_cpp.nii.gz", ), - ref_smooth_val=dict(argstr='-smooR %f', ), + environ=dict(nohash=True, usedefault=True,), + fbn2_val=dict(argstr="-fbn %d %d",), + fbn_val=dict(argstr="--fbn %d",), + flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True,), + flo_smooth_val=dict(argstr="-smooF %f",), + flwth2_thr_val=dict(argstr="-fLwTh %d %f",), + flwth_thr_val=dict(argstr="--fLwTh %f",), + fmask_file=dict(argstr="-fmask %s", extensions=None,), + fupth2_thr_val=dict(argstr="-fUpTh %d %f",), + fupth_thr_val=dict(argstr="--fUpTh %f",), + incpp_file=dict(argstr="-incpp %s", extensions=None,), + jl_val=dict(argstr="-jl %f",), + kld2_flag=dict(argstr="-kld %d",), + kld_flag=dict(argstr="--kld",), + le_val=dict(argstr="-le %f",), + ln_val=dict(argstr="-ln %d",), + lncc2_val=dict(argstr="-lncc %d %f",), + lncc_val=dict(argstr="--lncc %f",), + lp_val=dict(argstr="-lp %d",), + maxit_val=dict(argstr="-maxit %d",), + nmi_flag=dict(argstr="--nmi",), + no_app_jl_flag=dict(argstr="-noAppJL",), + noconj_flag=dict(argstr="-noConj",), + nopy_flag=dict(argstr="-nopy",), + nox_flag=dict(argstr="-nox",), + noy_flag=dict(argstr="-noy",), + noz_flag=dict(argstr="-noz",), + omp_core_val=dict(argstr="-omp %i", usedefault=True,), + pad_val=dict(argstr="-pad %f",), + pert_val=dict(argstr="-pert %d",), + rbn2_val=dict(argstr="-rbn %d %d",), + rbn_val=dict(argstr="--rbn %d",), + ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True,), + ref_smooth_val=dict(argstr="-smooR %f",), res_file=dict( - argstr='-res %s', + argstr="-res %s", extensions=None, - name_source=['flo_file'], - name_template='%s_res.nii.gz', + name_source=["flo_file"], + name_template="%s_res.nii.gz", ), - rlwth2_thr_val=dict(argstr='-rLwTh %d %f', ), - rlwth_thr_val=dict(argstr='--rLwTh %f', ), - rmask_file=dict( - argstr='-rmask %s', - extensions=None, - ), - rupth2_thr_val=dict(argstr='-rUpTh %d %f', ), - rupth_thr_val=dict(argstr='--rUpTh %f', ), - smooth_grad_val=dict(argstr='-smoothGrad %f', ), - ssd2_flag=dict(argstr='-ssd %d', ), - ssd_flag=dict(argstr='--ssd', ), - sx_val=dict(argstr='-sx %f', ), - sy_val=dict(argstr='-sy %f', ), - sz_val=dict(argstr='-sz %f', ), - vel_flag=dict(argstr='-vel', ), - verbosity_off_flag=dict(argstr='-voff', ), + rlwth2_thr_val=dict(argstr="-rLwTh %d %f",), + rlwth_thr_val=dict(argstr="--rLwTh %f",), + rmask_file=dict(argstr="-rmask %s", extensions=None,), + rupth2_thr_val=dict(argstr="-rUpTh %d %f",), + rupth_thr_val=dict(argstr="--rUpTh %f",), + smooth_grad_val=dict(argstr="-smoothGrad %f",), + ssd2_flag=dict(argstr="-ssd %d",), + ssd_flag=dict(argstr="--ssd",), + sx_val=dict(argstr="-sx %f",), + sy_val=dict(argstr="-sy %f",), + sz_val=dict(argstr="-sz %f",), + vel_flag=dict(argstr="-vel",), + verbosity_off_flag=dict(argstr="-voff",), ) inputs = RegF3D.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RegF3D_outputs(): output_map = dict( avg_output=dict(), - cpp_file=dict(extensions=None, ), - invcpp_file=dict(extensions=None, ), - invres_file=dict(extensions=None, ), - res_file=dict(extensions=None, ), + cpp_file=dict(extensions=None,), + invcpp_file=dict(extensions=None,), + invres_file=dict(extensions=None,), + res_file=dict(extensions=None,), ) outputs = RegF3D.output_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py index 9c08f4ef42..5a0291e1af 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py @@ -4,44 +4,29 @@ def test_RegJacobian_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - omp_core_val=dict( - argstr='-omp %i', - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + omp_core_val=dict(argstr="-omp %i", usedefault=True,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source=['trans_file'], - name_template='%s', + name_source=["trans_file"], + name_template="%s", position=-1, ), - ref_file=dict( - argstr='-ref %s', - extensions=None, - ), - trans_file=dict( - argstr='-trans %s', - extensions=None, - mandatory=True, - ), - type=dict( - argstr='-%s', - position=-2, - usedefault=True, - ), + ref_file=dict(argstr="-ref %s", extensions=None,), + trans_file=dict(argstr="-trans %s", extensions=None, mandatory=True,), + type=dict(argstr="-%s", position=-2, usedefault=True,), ) inputs = RegJacobian.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RegJacobian_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = RegJacobian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py index 93224b8a90..8ae16aa9c8 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py @@ -4,43 +4,28 @@ def test_RegMeasure_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - flo_file=dict( - argstr='-flo %s', - extensions=None, - mandatory=True, - ), - measure_type=dict( - argstr='-%s', - mandatory=True, - ), - omp_core_val=dict( - argstr='-omp %i', - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True,), + measure_type=dict(argstr="-%s", mandatory=True,), + omp_core_val=dict(argstr="-omp %i", usedefault=True,), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, - name_source=['flo_file'], - name_template='%s', - ), - ref_file=dict( - argstr='-ref %s', - extensions=None, - mandatory=True, + name_source=["flo_file"], + name_template="%s", ), + ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True,), ) inputs = RegMeasure.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RegMeasure_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = RegMeasure.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py index 5a1852a97d..2836efb4f8 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py @@ -4,55 +4,36 @@ def test_RegResample_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - flo_file=dict( - argstr='-flo %s', - extensions=None, - mandatory=True, - ), - inter_val=dict(argstr='-inter %d', ), - omp_core_val=dict( - argstr='-omp %i', - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True,), + inter_val=dict(argstr="-inter %d",), + omp_core_val=dict(argstr="-omp %i", usedefault=True,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source=['flo_file'], - name_template='%s', + name_source=["flo_file"], + name_template="%s", position=-1, ), - pad_val=dict(argstr='-pad %f', ), - psf_alg=dict(argstr='-psf_alg %d', ), - psf_flag=dict(argstr='-psf', ), - ref_file=dict( - argstr='-ref %s', - extensions=None, - mandatory=True, - ), - tensor_flag=dict(argstr='-tensor ', ), - trans_file=dict( - argstr='-trans %s', - extensions=None, - ), - type=dict( - argstr='-%s', - position=-2, - usedefault=True, - ), - verbosity_off_flag=dict(argstr='-voff', ), + pad_val=dict(argstr="-pad %f",), + psf_alg=dict(argstr="-psf_alg %d",), + psf_flag=dict(argstr="-psf",), + ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True,), + tensor_flag=dict(argstr="-tensor ",), + trans_file=dict(argstr="-trans %s", extensions=None,), + type=dict(argstr="-%s", position=-2, usedefault=True,), + verbosity_off_flag=dict(argstr="-voff",), ) inputs = RegResample.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RegResample_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = RegResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py index 9b81307511..0b0513ef4d 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py @@ -4,55 +4,41 @@ def test_RegTools_inputs(): input_map = dict( - add_val=dict(argstr='-add %s', ), - args=dict(argstr='%s', ), - bin_flag=dict(argstr='-bin', ), - chg_res_val=dict(argstr='-chgres %f %f %f', ), - div_val=dict(argstr='-div %s', ), - down_flag=dict(argstr='-down', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - ), - inter_val=dict(argstr='-interp %d', ), - iso_flag=dict(argstr='-iso', ), - mask_file=dict( - argstr='-nan %s', - extensions=None, - ), - mul_val=dict(argstr='-mul %s', ), - noscl_flag=dict(argstr='-noscl', ), - omp_core_val=dict( - argstr='-omp %i', - usedefault=True, - ), + add_val=dict(argstr="-add %s",), + args=dict(argstr="%s",), + bin_flag=dict(argstr="-bin",), + chg_res_val=dict(argstr="-chgres %f %f %f",), + div_val=dict(argstr="-div %s",), + down_flag=dict(argstr="-down",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), + inter_val=dict(argstr="-interp %d",), + iso_flag=dict(argstr="-iso",), + mask_file=dict(argstr="-nan %s", extensions=None,), + mul_val=dict(argstr="-mul %s",), + noscl_flag=dict(argstr="-noscl",), + omp_core_val=dict(argstr="-omp %i", usedefault=True,), out_file=dict( - argstr='-out %s', - extensions=None, - name_source=['in_file'], - name_template='%s_tools.nii.gz', - ), - rms_val=dict( - argstr='-rms %s', + argstr="-out %s", extensions=None, + name_source=["in_file"], + name_template="%s_tools.nii.gz", ), - smo_g_val=dict(argstr='-smoG %f %f %f', ), - smo_s_val=dict(argstr='-smoS %f %f %f', ), - sub_val=dict(argstr='-sub %s', ), - thr_val=dict(argstr='-thr %f', ), + rms_val=dict(argstr="-rms %s", extensions=None,), + smo_g_val=dict(argstr="-smoG %f %f %f",), + smo_s_val=dict(argstr="-smoS %f %f %f",), + sub_val=dict(argstr="-sub %s",), + thr_val=dict(argstr="-thr %f",), ) inputs = RegTools.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RegTools_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = RegTools.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py index 07df2bb65b..7a16c6e452 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py @@ -5,150 +5,202 @@ def test_RegTransform_inputs(): input_map = dict( aff_2_rig_input=dict( - argstr='-aff2rig %s', + argstr="-aff2rig %s", extensions=None, position=-2, xor=[ - 'def_input', 'disp_input', 'flow_input', 'comp_input', - 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', - 'half_input', 'make_aff_input', 'flirt_2_nr_input' + "def_input", + "disp_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "flirt_2_nr_input", ], ), - args=dict(argstr='%s', ), + args=dict(argstr="%s",), comp_input=dict( - argstr='-comp %s', + argstr="-comp %s", extensions=None, position=-3, - requires=['comp_input2'], + requires=["comp_input2"], xor=[ - 'def_input', 'disp_input', 'flow_input', 'upd_s_form_input', - 'inv_aff_input', 'inv_nrr_input', 'half_input', - 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + "def_input", + "disp_input", + "flow_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", ], ), - comp_input2=dict( - argstr='%s', - extensions=None, - position=-2, - ), + comp_input2=dict(argstr="%s", extensions=None, position=-2,), def_input=dict( - argstr='-def %s', + argstr="-def %s", extensions=None, position=-2, xor=[ - 'disp_input', 'flow_input', 'comp_input', 'upd_s_form_input', - 'inv_aff_input', 'inv_nrr_input', 'half_input', - 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + "disp_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", ], ), disp_input=dict( - argstr='-disp %s', + argstr="-disp %s", extensions=None, position=-2, xor=[ - 'def_input', 'flow_input', 'comp_input', 'upd_s_form_input', - 'inv_aff_input', 'inv_nrr_input', 'half_input', - 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + "def_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", ], ), - environ=dict( - nohash=True, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True,), flirt_2_nr_input=dict( - argstr='-flirtAff2NR %s %s %s', + argstr="-flirtAff2NR %s %s %s", position=-2, xor=[ - 'def_input', 'disp_input', 'flow_input', 'comp_input', - 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', - 'half_input', 'make_aff_input', 'aff_2_rig_input' + "def_input", + "disp_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", ], ), flow_input=dict( - argstr='-flow %s', + argstr="-flow %s", extensions=None, position=-2, xor=[ - 'def_input', 'disp_input', 'comp_input', 'upd_s_form_input', - 'inv_aff_input', 'inv_nrr_input', 'half_input', - 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + "def_input", + "disp_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", ], ), half_input=dict( - argstr='-half %s', + argstr="-half %s", extensions=None, position=-2, xor=[ - 'def_input', 'disp_input', 'flow_input', 'comp_input', - 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', - 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + "def_input", + "disp_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", ], ), inv_aff_input=dict( - argstr='-invAff %s', + argstr="-invAff %s", extensions=None, position=-2, xor=[ - 'def_input', 'disp_input', 'flow_input', 'comp_input', - 'upd_s_form_input', 'inv_nrr_input', 'half_input', - 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + "def_input", + "disp_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", ], ), inv_nrr_input=dict( - argstr='-invNrr %s %s', + argstr="-invNrr %s %s", position=-2, xor=[ - 'def_input', 'disp_input', 'flow_input', 'comp_input', - 'upd_s_form_input', 'inv_aff_input', 'half_input', - 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + "def_input", + "disp_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", ], ), make_aff_input=dict( - argstr='-makeAff %f %f %f %f %f %f %f %f %f %f %f %f', + argstr="-makeAff %f %f %f %f %f %f %f %f %f %f %f %f", position=-2, xor=[ - 'def_input', 'disp_input', 'flow_input', 'comp_input', - 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', - 'half_input', 'aff_2_rig_input', 'flirt_2_nr_input' + "def_input", + "disp_input", + "flow_input", + "comp_input", + "upd_s_form_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "aff_2_rig_input", + "flirt_2_nr_input", ], ), - omp_core_val=dict( - argstr='-omp %i', - usedefault=True, - ), - out_file=dict( - argstr='%s', - extensions=None, - genfile=True, - position=-1, - ), - ref1_file=dict( - argstr='-ref %s', - extensions=None, - position=0, - ), + omp_core_val=dict(argstr="-omp %i", usedefault=True,), + out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + ref1_file=dict(argstr="-ref %s", extensions=None, position=0,), ref2_file=dict( - argstr='-ref2 %s', - extensions=None, - position=1, - requires=['ref1_file'], + argstr="-ref2 %s", extensions=None, position=1, requires=["ref1_file"], ), upd_s_form_input=dict( - argstr='-updSform %s', + argstr="-updSform %s", extensions=None, position=-3, - requires=['upd_s_form_input2'], + requires=["upd_s_form_input2"], xor=[ - 'def_input', 'disp_input', 'flow_input', 'comp_input', - 'inv_aff_input', 'inv_nrr_input', 'half_input', - 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + "def_input", + "disp_input", + "flow_input", + "comp_input", + "inv_aff_input", + "inv_nrr_input", + "half_input", + "make_aff_input", + "aff_2_rig_input", + "flirt_2_nr_input", ], ), upd_s_form_input2=dict( - argstr='%s', - extensions=None, - position=-2, - requires=['upd_s_form_input'], + argstr="%s", extensions=None, position=-2, requires=["upd_s_form_input"], ), ) inputs = RegTransform.input_spec() @@ -156,8 +208,10 @@ def test_RegTransform_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RegTransform_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = RegTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_reg.py b/nipype/interfaces/niftyreg/tests/test_reg.py index 862760139e..77b56e21da 100644 --- a/nipype/interfaces/niftyreg/tests/test_reg.py +++ b/nipype/interfaces/niftyreg/tests/test_reg.py @@ -5,43 +5,44 @@ import pytest from ....testing import example_data -from .. import (get_custom_path, RegAladin, RegF3D) +from .. import get_custom_path, RegAladin, RegF3D from .test_regutils import no_nifty_tool @pytest.mark.skipif( - no_nifty_tool(cmd='reg_aladin'), - reason="niftyreg is not installed. reg_aladin not found.") + no_nifty_tool(cmd="reg_aladin"), + reason="niftyreg is not installed. reg_aladin not found.", +) def test_reg_aladin(): """ tests for reg_aladin interface""" # Create a reg_aladin object nr_aladin = RegAladin() # Check if the command is properly defined - assert nr_aladin.cmd == get_custom_path('reg_aladin') + assert nr_aladin.cmd == get_custom_path("reg_aladin") # test raising error with mandatory args absent with pytest.raises(ValueError): nr_aladin.run() # Assign some input data - ref_file = example_data('im1.nii') - flo_file = example_data('im2.nii') - rmask_file = example_data('mask.nii') + ref_file = example_data("im1.nii") + flo_file = example_data("im2.nii") + rmask_file = example_data("mask.nii") nr_aladin.inputs.ref_file = ref_file nr_aladin.inputs.flo_file = flo_file nr_aladin.inputs.rmask_file = rmask_file nr_aladin.inputs.omp_core_val = 4 - cmd_tmp = '{cmd} -aff {aff} -flo {flo} -omp 4 -ref {ref} -res {res} \ --rmask {rmask}' + cmd_tmp = "{cmd} -aff {aff} -flo {flo} -omp 4 -ref {ref} -res {res} \ +-rmask {rmask}" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_aladin'), - aff='im2_aff.txt', + cmd=get_custom_path("reg_aladin"), + aff="im2_aff.txt", flo=flo_file, ref=ref_file, - res='im2_res.nii.gz', + res="im2_res.nii.gz", rmask=rmask_file, ) @@ -49,24 +50,24 @@ def test_reg_aladin(): @pytest.mark.skipif( - no_nifty_tool(cmd='reg_f3d'), - reason="niftyreg is not installed. reg_f3d not found.") + no_nifty_tool(cmd="reg_f3d"), reason="niftyreg is not installed. reg_f3d not found." +) def test_reg_f3d(): """ tests for reg_f3d interface""" # Create a reg_f3d object nr_f3d = RegF3D() # Check if the command is properly defined - assert nr_f3d.cmd == get_custom_path('reg_f3d') + assert nr_f3d.cmd == get_custom_path("reg_f3d") # test raising error with mandatory args absent with pytest.raises(ValueError): nr_f3d.run() # Assign some input data - ref_file = example_data('im1.nii') - flo_file = example_data('im2.nii') - rmask_file = example_data('mask.nii') + ref_file = example_data("im1.nii") + flo_file = example_data("im2.nii") + rmask_file = example_data("mask.nii") nr_f3d.inputs.ref_file = ref_file nr_f3d.inputs.flo_file = flo_file nr_f3d.inputs.rmask_file = rmask_file @@ -75,15 +76,15 @@ def test_reg_f3d(): nr_f3d.inputs.be_val = 0.1 nr_f3d.inputs.le_val = 0.1 - cmd_tmp = '{cmd} -be 0.100000 -cpp {cpp} -flo {flo} -le 0.100000 -omp 4 \ --ref {ref} -res {res} -rmask {rmask} -vel' + cmd_tmp = "{cmd} -be 0.100000 -cpp {cpp} -flo {flo} -le 0.100000 -omp 4 \ +-ref {ref} -res {res} -rmask {rmask} -vel" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_f3d'), - cpp='im2_cpp.nii.gz', + cmd=get_custom_path("reg_f3d"), + cpp="im2_cpp.nii.gz", flo=flo_file, ref=ref_file, - res='im2_res.nii.gz', + res="im2_res.nii.gz", rmask=rmask_file, ) diff --git a/nipype/interfaces/niftyreg/tests/test_regutils.py b/nipype/interfaces/niftyreg/tests/test_regutils.py index 918d556ab2..26431ddc44 100644 --- a/nipype/interfaces/niftyreg/tests/test_regutils.py +++ b/nipype/interfaces/niftyreg/tests/test_regutils.py @@ -6,8 +6,15 @@ from ....utils.filemanip import which from ....testing import example_data -from .. import (get_custom_path, RegAverage, RegResample, RegJacobian, - RegTools, RegMeasure, RegTransform) +from .. import ( + get_custom_path, + RegAverage, + RegResample, + RegJacobian, + RegTools, + RegMeasure, + RegTransform, +) def no_nifty_tool(cmd=None): @@ -15,455 +22,509 @@ def no_nifty_tool(cmd=None): @pytest.mark.skipif( - no_nifty_tool(cmd='reg_resample'), - reason="niftyreg is not installed. reg_resample not found.") + no_nifty_tool(cmd="reg_resample"), + reason="niftyreg is not installed. reg_resample not found.", +) def test_reg_resample_res(): """ tests for reg_resample interface """ # Create a reg_resample object nr_resample = RegResample() # Check if the command is properly defined - assert nr_resample.cmd == get_custom_path('reg_resample') + assert nr_resample.cmd == get_custom_path("reg_resample") # test raising error with mandatory args absent with pytest.raises(ValueError): nr_resample.run() # Resample res - ref_file = example_data('im1.nii') - flo_file = example_data('im2.nii') - trans_file = example_data('warpfield.nii') + ref_file = example_data("im1.nii") + flo_file = example_data("im2.nii") + trans_file = example_data("warpfield.nii") nr_resample.inputs.ref_file = ref_file nr_resample.inputs.flo_file = flo_file nr_resample.inputs.trans_file = trans_file - nr_resample.inputs.inter_val = 'LIN' + nr_resample.inputs.inter_val = "LIN" nr_resample.inputs.omp_core_val = 4 - cmd_tmp = '{cmd} -flo {flo} -inter 1 -omp 4 -ref {ref} -trans {trans} \ --res {res}' + cmd_tmp = "{cmd} -flo {flo} -inter 1 -omp 4 -ref {ref} -trans {trans} \ +-res {res}" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_resample'), + cmd=get_custom_path("reg_resample"), flo=flo_file, ref=ref_file, trans=trans_file, - res='im2_res.nii.gz') + res="im2_res.nii.gz", + ) assert nr_resample.cmdline == expected_cmd # test_reg_resample_blank() - nr_resample_2 = RegResample(type='blank', inter_val='LIN', omp_core_val=4) - ref_file = example_data('im1.nii') - flo_file = example_data('im2.nii') - trans_file = example_data('warpfield.nii') + nr_resample_2 = RegResample(type="blank", inter_val="LIN", omp_core_val=4) + ref_file = example_data("im1.nii") + flo_file = example_data("im2.nii") + trans_file = example_data("warpfield.nii") nr_resample_2.inputs.ref_file = ref_file nr_resample_2.inputs.flo_file = flo_file nr_resample_2.inputs.trans_file = trans_file - cmd_tmp = '{cmd} -flo {flo} -inter 1 -omp 4 -ref {ref} -trans {trans} \ --blank {blank}' + cmd_tmp = "{cmd} -flo {flo} -inter 1 -omp 4 -ref {ref} -trans {trans} \ +-blank {blank}" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_resample'), + cmd=get_custom_path("reg_resample"), flo=flo_file, ref=ref_file, trans=trans_file, - blank='im2_blank.nii.gz') + blank="im2_blank.nii.gz", + ) assert nr_resample_2.cmdline == expected_cmd @pytest.mark.skipif( - no_nifty_tool(cmd='reg_jacobian'), - reason="niftyreg is not installed. reg_jacobian not found.") + no_nifty_tool(cmd="reg_jacobian"), + reason="niftyreg is not installed. reg_jacobian not found.", +) def test_reg_jacobian_jac(): """ Test interface for RegJacobian """ # Create a reg_jacobian object nr_jacobian = RegJacobian() # Check if the command is properly defined - assert nr_jacobian.cmd == get_custom_path('reg_jacobian') + assert nr_jacobian.cmd == get_custom_path("reg_jacobian") # test raising error with mandatory args absent with pytest.raises(ValueError): nr_jacobian.run() # Test Reg Jacobian: jac - ref_file = example_data('im1.nii') - trans_file = example_data('warpfield.nii') + ref_file = example_data("im1.nii") + trans_file = example_data("warpfield.nii") nr_jacobian.inputs.ref_file = ref_file nr_jacobian.inputs.trans_file = trans_file nr_jacobian.inputs.omp_core_val = 4 - cmd_tmp = '{cmd} -omp 4 -ref {ref} -trans {trans} -jac {jac}' + cmd_tmp = "{cmd} -omp 4 -ref {ref} -trans {trans} -jac {jac}" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_jacobian'), + cmd=get_custom_path("reg_jacobian"), ref=ref_file, trans=trans_file, - jac='warpfield_jac.nii.gz') + jac="warpfield_jac.nii.gz", + ) assert nr_jacobian.cmdline == expected_cmd # Test Reg Jacobian: jac m - nr_jacobian_2 = RegJacobian(type='jacM', omp_core_val=4) - ref_file = example_data('im1.nii') - trans_file = example_data('warpfield.nii') + nr_jacobian_2 = RegJacobian(type="jacM", omp_core_val=4) + ref_file = example_data("im1.nii") + trans_file = example_data("warpfield.nii") nr_jacobian_2.inputs.ref_file = ref_file nr_jacobian_2.inputs.trans_file = trans_file - cmd_tmp = '{cmd} -omp 4 -ref {ref} -trans {trans} -jacM {jac}' + cmd_tmp = "{cmd} -omp 4 -ref {ref} -trans {trans} -jacM {jac}" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_jacobian'), + cmd=get_custom_path("reg_jacobian"), ref=ref_file, trans=trans_file, - jac='warpfield_jacM.nii.gz') + jac="warpfield_jacM.nii.gz", + ) assert nr_jacobian_2.cmdline == expected_cmd # Test Reg Jacobian: jac l - nr_jacobian_3 = RegJacobian(type='jacL', omp_core_val=4) - ref_file = example_data('im1.nii') - trans_file = example_data('warpfield.nii') + nr_jacobian_3 = RegJacobian(type="jacL", omp_core_val=4) + ref_file = example_data("im1.nii") + trans_file = example_data("warpfield.nii") nr_jacobian_3.inputs.ref_file = ref_file nr_jacobian_3.inputs.trans_file = trans_file - cmd_tmp = '{cmd} -omp 4 -ref {ref} -trans {trans} -jacL {jac}' + cmd_tmp = "{cmd} -omp 4 -ref {ref} -trans {trans} -jacL {jac}" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_jacobian'), + cmd=get_custom_path("reg_jacobian"), ref=ref_file, trans=trans_file, - jac='warpfield_jacL.nii.gz') + jac="warpfield_jacL.nii.gz", + ) assert nr_jacobian_3.cmdline == expected_cmd @pytest.mark.skipif( - no_nifty_tool(cmd='reg_tools'), - reason="niftyreg is not installed. reg_tools not found.") + no_nifty_tool(cmd="reg_tools"), + reason="niftyreg is not installed. reg_tools not found.", +) def test_reg_tools_mul(): """ tests for reg_tools interface """ # Create a reg_tools object nr_tools = RegTools() # Check if the command is properly defined - assert nr_tools.cmd == get_custom_path('reg_tools') + assert nr_tools.cmd == get_custom_path("reg_tools") # test raising error with mandatory args absent with pytest.raises(ValueError): nr_tools.run() # Test reg_tools: mul - in_file = example_data('im1.nii') + in_file = example_data("im1.nii") nr_tools.inputs.in_file = in_file nr_tools.inputs.mul_val = 4 nr_tools.inputs.omp_core_val = 4 - cmd_tmp = '{cmd} -in {in_file} -mul 4.0 -omp 4 -out {out_file}' + cmd_tmp = "{cmd} -in {in_file} -mul 4.0 -omp 4 -out {out_file}" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_tools'), - in_file=in_file, - out_file='im1_tools.nii.gz') + cmd=get_custom_path("reg_tools"), in_file=in_file, out_file="im1_tools.nii.gz" + ) assert nr_tools.cmdline == expected_cmd # Test reg_tools: iso nr_tools_2 = RegTools(iso_flag=True, omp_core_val=4) - in_file = example_data('im1.nii') + in_file = example_data("im1.nii") nr_tools_2.inputs.in_file = in_file - cmd_tmp = '{cmd} -in {in_file} -iso -omp 4 -out {out_file}' + cmd_tmp = "{cmd} -in {in_file} -iso -omp 4 -out {out_file}" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_tools'), - in_file=in_file, - out_file='im1_tools.nii.gz') + cmd=get_custom_path("reg_tools"), in_file=in_file, out_file="im1_tools.nii.gz" + ) assert nr_tools_2.cmdline == expected_cmd @pytest.mark.skipif( - no_nifty_tool(cmd='reg_average'), - reason="niftyreg is not installed. reg_average not found.") + no_nifty_tool(cmd="reg_average"), + reason="niftyreg is not installed. reg_average not found.", +) def test_reg_average(): """ tests for reg_average interface """ # Create a reg_average object nr_average = RegAverage() # Check if the command is properly defined - assert nr_average.cmd == get_custom_path('reg_average') + assert nr_average.cmd == get_custom_path("reg_average") # Average niis - one_file = example_data('im1.nii') - two_file = example_data('im2.nii') - three_file = example_data('im3.nii') + one_file = example_data("im1.nii") + two_file = example_data("im2.nii") + three_file = example_data("im3.nii") nr_average.inputs.avg_files = [one_file, two_file, three_file] nr_average.inputs.omp_core_val = 1 generated_cmd = nr_average.cmdline # Read the reg_average_cmd - reg_average_cmd = os.path.join(os.getcwd(), 'reg_average_cmd') - with open(reg_average_cmd, 'rb') as f_obj: + reg_average_cmd = os.path.join(os.getcwd(), "reg_average_cmd") + with open(reg_average_cmd, "rb") as f_obj: argv = f_obj.read() os.remove(reg_average_cmd) - expected_argv = '%s %s -avg %s %s %s -omp 1' % ( - get_custom_path('reg_average'), - os.path.join(os.getcwd(), 'avg_out.nii.gz'), one_file, two_file, - three_file) + expected_argv = "%s %s -avg %s %s %s -omp 1" % ( + get_custom_path("reg_average"), + os.path.join(os.getcwd(), "avg_out.nii.gz"), + one_file, + two_file, + three_file, + ) - assert argv.decode('utf-8') == expected_argv + assert argv.decode("utf-8") == expected_argv # Test command line with text file - expected_cmd = ('%s --cmd_file %s' % (get_custom_path('reg_average'), - reg_average_cmd)) + expected_cmd = "%s --cmd_file %s" % ( + get_custom_path("reg_average"), + reg_average_cmd, + ) assert generated_cmd == expected_cmd # Test Reg Average: average txt nr_average_2 = RegAverage() - one_file = example_data('TransformParameters.0.txt') - two_file = example_data('ants_Affine.txt') - three_file = example_data('elastix.txt') + one_file = example_data("TransformParameters.0.txt") + two_file = example_data("ants_Affine.txt") + three_file = example_data("elastix.txt") nr_average_2.inputs.avg_files = [one_file, two_file, three_file] nr_average_2.inputs.omp_core_val = 1 generated_cmd = nr_average_2.cmdline # Read the reg_average_cmd - reg_average_cmd = os.path.join(os.getcwd(), 'reg_average_cmd') - with open(reg_average_cmd, 'rb') as f_obj: + reg_average_cmd = os.path.join(os.getcwd(), "reg_average_cmd") + with open(reg_average_cmd, "rb") as f_obj: argv = f_obj.read() os.remove(reg_average_cmd) - expected_argv = '%s %s -avg %s %s %s -omp 1' % ( - get_custom_path('reg_average'), - os.path.join(os.getcwd(), 'avg_out.txt'), one_file, two_file, - three_file) + expected_argv = "%s %s -avg %s %s %s -omp 1" % ( + get_custom_path("reg_average"), + os.path.join(os.getcwd(), "avg_out.txt"), + one_file, + two_file, + three_file, + ) - assert argv.decode('utf-8') == expected_argv + assert argv.decode("utf-8") == expected_argv # Test Reg Average: average list nr_average_3 = RegAverage() - one_file = example_data('TransformParameters.0.txt') - two_file = example_data('ants_Affine.txt') - three_file = example_data('elastix.txt') + one_file = example_data("TransformParameters.0.txt") + two_file = example_data("ants_Affine.txt") + three_file = example_data("elastix.txt") nr_average_3.inputs.avg_lts_files = [one_file, two_file, three_file] nr_average_3.inputs.omp_core_val = 1 generated_cmd = nr_average_3.cmdline # Read the reg_average_cmd - reg_average_cmd = os.path.join(os.getcwd(), 'reg_average_cmd') - with open(reg_average_cmd, 'rb') as f_obj: + reg_average_cmd = os.path.join(os.getcwd(), "reg_average_cmd") + with open(reg_average_cmd, "rb") as f_obj: argv = f_obj.read() os.remove(reg_average_cmd) - expected_argv = ('%s %s -avg_lts %s %s %s -omp 1' % - (get_custom_path('reg_average'), - os.path.join(os.getcwd(), 'avg_out.txt'), one_file, - two_file, three_file)) + expected_argv = "%s %s -avg_lts %s %s %s -omp 1" % ( + get_custom_path("reg_average"), + os.path.join(os.getcwd(), "avg_out.txt"), + one_file, + two_file, + three_file, + ) - assert argv.decode('utf-8') == expected_argv + assert argv.decode("utf-8") == expected_argv # Test Reg Average: average ref nr_average_4 = RegAverage() - ref_file = example_data('anatomical.nii') - one_file = example_data('im1.nii') - two_file = example_data('im2.nii') - three_file = example_data('im3.nii') - trans1_file = example_data('roi01.nii') - trans2_file = example_data('roi02.nii') - trans3_file = example_data('roi03.nii') + ref_file = example_data("anatomical.nii") + one_file = example_data("im1.nii") + two_file = example_data("im2.nii") + three_file = example_data("im3.nii") + trans1_file = example_data("roi01.nii") + trans2_file = example_data("roi02.nii") + trans3_file = example_data("roi03.nii") nr_average_4.inputs.warp_files = [ - trans1_file, one_file, trans2_file, two_file, trans3_file, three_file + trans1_file, + one_file, + trans2_file, + two_file, + trans3_file, + three_file, ] nr_average_4.inputs.avg_ref_file = ref_file nr_average_4.inputs.omp_core_val = 1 generated_cmd = nr_average_4.cmdline # Read the reg_average_cmd - reg_average_cmd = os.path.join(os.getcwd(), 'reg_average_cmd') - with open(reg_average_cmd, 'rb') as f_obj: + reg_average_cmd = os.path.join(os.getcwd(), "reg_average_cmd") + with open(reg_average_cmd, "rb") as f_obj: argv = f_obj.read() os.remove(reg_average_cmd) - expected_argv = ('%s %s -avg_tran %s -omp 1 %s %s %s %s %s %s' % - (get_custom_path('reg_average'), - os.path.join(os.getcwd(), 'avg_out.nii.gz'), ref_file, - trans1_file, one_file, trans2_file, two_file, - trans3_file, three_file)) + expected_argv = "%s %s -avg_tran %s -omp 1 %s %s %s %s %s %s" % ( + get_custom_path("reg_average"), + os.path.join(os.getcwd(), "avg_out.nii.gz"), + ref_file, + trans1_file, + one_file, + trans2_file, + two_file, + trans3_file, + three_file, + ) - assert argv.decode('utf-8') == expected_argv + assert argv.decode("utf-8") == expected_argv # Test Reg Average: demean3 nr_average_5 = RegAverage() - ref_file = example_data('anatomical.nii') - one_file = example_data('im1.nii') - two_file = example_data('im2.nii') - three_file = example_data('im3.nii') - aff1_file = example_data('TransformParameters.0.txt') - aff2_file = example_data('ants_Affine.txt') - aff3_file = example_data('elastix.txt') - trans1_file = example_data('roi01.nii') - trans2_file = example_data('roi02.nii') - trans3_file = example_data('roi03.nii') + ref_file = example_data("anatomical.nii") + one_file = example_data("im1.nii") + two_file = example_data("im2.nii") + three_file = example_data("im3.nii") + aff1_file = example_data("TransformParameters.0.txt") + aff2_file = example_data("ants_Affine.txt") + aff3_file = example_data("elastix.txt") + trans1_file = example_data("roi01.nii") + trans2_file = example_data("roi02.nii") + trans3_file = example_data("roi03.nii") nr_average_5.inputs.warp_files = [ - aff1_file, trans1_file, one_file, aff2_file, trans2_file, two_file, - aff3_file, trans3_file, three_file + aff1_file, + trans1_file, + one_file, + aff2_file, + trans2_file, + two_file, + aff3_file, + trans3_file, + three_file, ] nr_average_5.inputs.demean3_ref_file = ref_file nr_average_5.inputs.omp_core_val = 1 generated_cmd = nr_average_5.cmdline # Read the reg_average_cmd - reg_average_cmd = os.path.join(os.getcwd(), 'reg_average_cmd') - with open(reg_average_cmd, 'rb') as f_obj: + reg_average_cmd = os.path.join(os.getcwd(), "reg_average_cmd") + with open(reg_average_cmd, "rb") as f_obj: argv = f_obj.read() os.remove(reg_average_cmd) - expected_argv = ('%s %s -demean3 %s -omp 1 %s %s %s %s %s %s %s %s %s' % - (get_custom_path('reg_average'), - os.path.join(os.getcwd(), 'avg_out.nii.gz'), ref_file, - aff1_file, trans1_file, one_file, aff2_file, trans2_file, - two_file, aff3_file, trans3_file, three_file)) + expected_argv = "%s %s -demean3 %s -omp 1 %s %s %s %s %s %s %s %s %s" % ( + get_custom_path("reg_average"), + os.path.join(os.getcwd(), "avg_out.nii.gz"), + ref_file, + aff1_file, + trans1_file, + one_file, + aff2_file, + trans2_file, + two_file, + aff3_file, + trans3_file, + three_file, + ) - assert argv.decode('utf-8') == expected_argv + assert argv.decode("utf-8") == expected_argv @pytest.mark.skipif( - no_nifty_tool(cmd='reg_transform'), - reason="niftyreg is not installed. reg_transform not found.") + no_nifty_tool(cmd="reg_transform"), + reason="niftyreg is not installed. reg_transform not found.", +) def test_reg_transform_def(): """ tests for reg_transform interface """ # Create a reg_transform object nr_transform = RegTransform() # Check if the command is properly defined - assert nr_transform.cmd == get_custom_path('reg_transform') + assert nr_transform.cmd == get_custom_path("reg_transform") # Assign some input data - trans_file = example_data('warpfield.nii') + trans_file = example_data("warpfield.nii") nr_transform.inputs.def_input = trans_file nr_transform.inputs.omp_core_val = 4 - cmd_tmp = '{cmd} -omp 4 -def {trans_file} {out_file}' + cmd_tmp = "{cmd} -omp 4 -def {trans_file} {out_file}" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_transform'), + cmd=get_custom_path("reg_transform"), trans_file=trans_file, - out_file=os.path.join(os.getcwd(), 'warpfield_trans.nii.gz')) + out_file=os.path.join(os.getcwd(), "warpfield_trans.nii.gz"), + ) assert nr_transform.cmdline == expected_cmd # Test reg_transform: def ref nr_transform_2 = RegTransform(omp_core_val=4) - ref_file = example_data('im1.nii') - trans_file = example_data('warpfield.nii') + ref_file = example_data("im1.nii") + trans_file = example_data("warpfield.nii") nr_transform_2.inputs.ref1_file = ref_file nr_transform_2.inputs.def_input = trans_file - cmd_tmp = '{cmd} -ref {ref_file} -omp 4 -def {trans_file} {out_file}' + cmd_tmp = "{cmd} -ref {ref_file} -omp 4 -def {trans_file} {out_file}" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_transform'), + cmd=get_custom_path("reg_transform"), ref_file=ref_file, trans_file=trans_file, - out_file=os.path.join(os.getcwd(), 'warpfield_trans.nii.gz')) + out_file=os.path.join(os.getcwd(), "warpfield_trans.nii.gz"), + ) assert nr_transform_2.cmdline == expected_cmd # Test reg_transform: comp nii nr_transform_3 = RegTransform(omp_core_val=4) - ref_file = example_data('im1.nii') - trans_file = example_data('warpfield.nii') - trans2_file = example_data('anatomical.nii') + ref_file = example_data("im1.nii") + trans_file = example_data("warpfield.nii") + trans2_file = example_data("anatomical.nii") nr_transform_3.inputs.ref1_file = ref_file nr_transform_3.inputs.comp_input2 = trans2_file nr_transform_3.inputs.comp_input = trans_file - cmd_tmp = '{cmd} -ref {ref_file} -omp 4 -comp {trans1} {trans2} {out_file}' + cmd_tmp = "{cmd} -ref {ref_file} -omp 4 -comp {trans1} {trans2} {out_file}" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_transform'), + cmd=get_custom_path("reg_transform"), ref_file=ref_file, trans1=trans_file, trans2=trans2_file, - out_file=os.path.join(os.getcwd(), 'warpfield_trans.nii.gz')) + out_file=os.path.join(os.getcwd(), "warpfield_trans.nii.gz"), + ) assert nr_transform_3.cmdline == expected_cmd # Test reg_transform: comp txt nr_transform_4 = RegTransform(omp_core_val=4) - aff1_file = example_data('ants_Affine.txt') - aff2_file = example_data('elastix.txt') + aff1_file = example_data("ants_Affine.txt") + aff2_file = example_data("elastix.txt") nr_transform_4.inputs.comp_input2 = aff2_file nr_transform_4.inputs.comp_input = aff1_file - cmd_tmp = '{cmd} -omp 4 -comp {aff1} {aff2} {out_file}' + cmd_tmp = "{cmd} -omp 4 -comp {aff1} {aff2} {out_file}" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_transform'), + cmd=get_custom_path("reg_transform"), aff1=aff1_file, aff2=aff2_file, - out_file=os.path.join(os.getcwd(), 'ants_Affine_trans.txt')) + out_file=os.path.join(os.getcwd(), "ants_Affine_trans.txt"), + ) assert nr_transform_4.cmdline == expected_cmd # Test reg_transform: comp nr_transform_5 = RegTransform(omp_core_val=4) - trans_file = example_data('warpfield.nii') - aff_file = example_data('elastix.txt') + trans_file = example_data("warpfield.nii") + aff_file = example_data("elastix.txt") nr_transform_5.inputs.comp_input2 = trans_file nr_transform_5.inputs.comp_input = aff_file - cmd_tmp = '{cmd} -omp 4 -comp {aff} {trans} {out_file}' + cmd_tmp = "{cmd} -omp 4 -comp {aff} {trans} {out_file}" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_transform'), + cmd=get_custom_path("reg_transform"), aff=aff_file, trans=trans_file, - out_file=os.path.join(os.getcwd(), 'elastix_trans.nii.gz')) + out_file=os.path.join(os.getcwd(), "elastix_trans.nii.gz"), + ) assert nr_transform_5.cmdline == expected_cmd # Test reg_transform: flirt nr_transform_6 = RegTransform(omp_core_val=4) - aff_file = example_data('elastix.txt') - ref_file = example_data('im1.nii') - in_file = example_data('im2.nii') + aff_file = example_data("elastix.txt") + ref_file = example_data("im1.nii") + in_file = example_data("im2.nii") nr_transform_6.inputs.flirt_2_nr_input = (aff_file, ref_file, in_file) - cmd_tmp = '{cmd} -omp 4 -flirtAff2NR {aff} {ref} {in_file} {out_file}' + cmd_tmp = "{cmd} -omp 4 -flirtAff2NR {aff} {ref} {in_file} {out_file}" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_transform'), + cmd=get_custom_path("reg_transform"), aff=aff_file, ref=ref_file, in_file=in_file, - out_file=os.path.join(os.getcwd(), 'elastix_trans.txt')) + out_file=os.path.join(os.getcwd(), "elastix_trans.txt"), + ) assert nr_transform_6.cmdline == expected_cmd @pytest.mark.skipif( - no_nifty_tool(cmd='reg_measure'), - reason="niftyreg is not installed. reg_measure not found.") + no_nifty_tool(cmd="reg_measure"), + reason="niftyreg is not installed. reg_measure not found.", +) def test_reg_measure(): """ tests for reg_measure interface """ # Create a reg_measure object nr_measure = RegMeasure() # Check if the command is properly defined - assert nr_measure.cmd == get_custom_path('reg_measure') + assert nr_measure.cmd == get_custom_path("reg_measure") # test raising error with mandatory args absent with pytest.raises(ValueError): nr_measure.run() # Assign some input data - ref_file = example_data('im1.nii') - flo_file = example_data('im2.nii') + ref_file = example_data("im1.nii") + flo_file = example_data("im2.nii") nr_measure.inputs.ref_file = ref_file nr_measure.inputs.flo_file = flo_file - nr_measure.inputs.measure_type = 'lncc' + nr_measure.inputs.measure_type = "lncc" nr_measure.inputs.omp_core_val = 4 - cmd_tmp = '{cmd} -flo {flo} -lncc -omp 4 -out {out} -ref {ref}' + cmd_tmp = "{cmd} -flo {flo} -lncc -omp 4 -out {out} -ref {ref}" expected_cmd = cmd_tmp.format( - cmd=get_custom_path('reg_measure'), + cmd=get_custom_path("reg_measure"), flo=flo_file, - out='im2_lncc.txt', - ref=ref_file) + out="im2_lncc.txt", + ref=ref_file, + ) assert nr_measure.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyseg/__init__.py b/nipype/interfaces/niftyseg/__init__.py index 14b391edd5..f5d908a8ff 100644 --- a/nipype/interfaces/niftyseg/__init__.py +++ b/nipype/interfaces/niftyseg/__init__.py @@ -10,7 +10,6 @@ from .em import EM from .label_fusion import LabelFusion, CalcTopNCC from .lesions import FillLesions -from .maths import (UnaryMaths, BinaryMaths, BinaryMathsInteger, TupleMaths, - Merge) +from .maths import UnaryMaths, BinaryMaths, BinaryMathsInteger, TupleMaths, Merge from .patchmatch import PatchMatch from .stats import UnaryStats, BinaryStats diff --git a/nipype/interfaces/niftyseg/base.py b/nipype/interfaces/niftyseg/base.py index cc831aa9b5..65f1f9ff14 100644 --- a/nipype/interfaces/niftyseg/base.py +++ b/nipype/interfaces/niftyseg/base.py @@ -23,7 +23,8 @@ class NiftySegCommand(NiftyFitCommand): """ Base support interface for NiftySeg commands. """ - _suffix = '_ns' + + _suffix = "_ns" _min_version = None def __init__(self, **inputs): @@ -31,4 +32,5 @@ def __init__(self, **inputs): def get_version(self): return super(NiftySegCommand, self).version_from_command( - cmd='seg_EM', flag='--version') + cmd="seg_EM", flag="--version" + ) diff --git a/nipype/interfaces/niftyseg/em.py b/nipype/interfaces/niftyseg/em.py index e9c749c282..d6fb4d5180 100644 --- a/nipype/interfaces/niftyseg/em.py +++ b/nipype/interfaces/niftyseg/em.py @@ -11,119 +11,130 @@ See the docstrings of the individual classes for examples. """ -from ..base import (TraitedSpec, File, traits, CommandLineInputSpec, - InputMultiPath) +from ..base import TraitedSpec, File, traits, CommandLineInputSpec, InputMultiPath from .base import NiftySegCommand from ..niftyreg.base import get_custom_path class EMInputSpec(CommandLineInputSpec): """Input Spec for EM.""" + in_file = File( - argstr='-in %s', + argstr="-in %s", exists=True, mandatory=True, - desc='Input image to segment', - position=4) + desc="Input image to segment", + position=4, + ) mask_file = File( - argstr='-mask %s', - exists=True, - desc='Filename of the ROI for label fusion') + argstr="-mask %s", exists=True, desc="Filename of the ROI for label fusion" + ) # Priors no_prior = traits.Int( - argstr='-nopriors %s', + argstr="-nopriors %s", mandatory=True, - desc='Number of classes to use without prior', - xor=['prior_4D', 'priors']) + desc="Number of classes to use without prior", + xor=["prior_4D", "priors"], + ) prior_4D = File( - argstr='-prior4D %s', + argstr="-prior4D %s", exists=True, mandatory=True, - desc='4D file containing the priors', - xor=['no_prior', 'priors']) + desc="4D file containing the priors", + xor=["no_prior", "priors"], + ) priors = InputMultiPath( - argstr='%s', + argstr="%s", mandatory=True, - desc='List of priors filepaths.', - xor=['no_prior', 'prior_4D']) + desc="List of priors filepaths.", + xor=["no_prior", "prior_4D"], + ) # iterations max_iter = traits.Int( - argstr='-max_iter %s', + argstr="-max_iter %s", default_value=100, usedefault=True, - desc='Maximum number of iterations') + desc="Maximum number of iterations", + ) min_iter = traits.Int( - argstr='-min_iter %s', + argstr="-min_iter %s", default_value=0, usedefault=True, - desc='Minimum number of iterations') + desc="Minimum number of iterations", + ) # other options bc_order_val = traits.Int( - argstr='-bc_order %s', + argstr="-bc_order %s", default_value=3, usedefault=True, - desc='Polynomial order for the bias field') + desc="Polynomial order for the bias field", + ) mrf_beta_val = traits.Float( - argstr='-mrf_beta %s', desc='Weight of the Markov Random Field') + argstr="-mrf_beta %s", desc="Weight of the Markov Random Field" + ) - desc = 'Bias field correction will run only if the ratio of improvement \ -is below bc_thresh. (default=0 [OFF])' + desc = "Bias field correction will run only if the ratio of improvement \ +is below bc_thresh. (default=0 [OFF])" bc_thresh_val = traits.Float( - argstr='-bc_thresh %s', - default_value=0, - usedefault=True, - desc=desc) + argstr="-bc_thresh %s", default_value=0, usedefault=True, desc=desc + ) - desc = 'Amount of regularization over the diagonal of the covariance \ -matrix [above 1]' + desc = "Amount of regularization over the diagonal of the covariance \ +matrix [above 1]" - reg_val = traits.Float(argstr='-reg %s', desc=desc) + reg_val = traits.Float(argstr="-reg %s", desc=desc) - desc = 'Outlier detection as in (Van Leemput TMI 2003). is the \ + desc = "Outlier detection as in (Van Leemput TMI 2003). is the \ Mahalanobis threshold [recommended between 3 and 7] is a convergence \ -ratio below which the outlier detection is going to be done [recommended 0.01]' +ratio below which the outlier detection is going to be done [recommended 0.01]" outlier_val = traits.Tuple( - traits.Float(), traits.Float(), argstr='-outlier %s %s', desc=desc) + traits.Float(), traits.Float(), argstr="-outlier %s %s", desc=desc + ) - desc = 'Relax Priors [relaxation factor: 00 (recommended=2.0)] /only 3D/' + desc = "Relax Priors [relaxation factor: 00 (recommended=2.0)] /only 3D/" relax_priors = traits.Tuple( - traits.Float(), traits.Float(), argstr='-rf %s %s', desc=desc) + traits.Float(), traits.Float(), argstr="-rf %s %s", desc=desc + ) # outputs out_file = File( - name_source=['in_file'], - name_template='%s_em.nii.gz', - argstr='-out %s', - desc='Output segmentation') + name_source=["in_file"], + name_template="%s_em.nii.gz", + argstr="-out %s", + desc="Output segmentation", + ) out_bc_file = File( - name_source=['in_file'], - name_template='%s_bc_em.nii.gz', - argstr='-bc_out %s', - desc='Output bias corrected image') + name_source=["in_file"], + name_template="%s_bc_em.nii.gz", + argstr="-bc_out %s", + desc="Output bias corrected image", + ) out_outlier_file = File( - name_source=['in_file'], - name_template='%s_outlier_em.nii.gz', - argstr='-out_outlier %s', - desc='Output outlierness image') + name_source=["in_file"], + name_template="%s_outlier_em.nii.gz", + argstr="-out_outlier %s", + desc="Output outlierness image", + ) class EMOutputSpec(TraitedSpec): """Output Spec for EM.""" + out_file = File(desc="Output segmentation") out_bc_file = File(desc="Output bias corrected image") - out_outlier_file = File(desc='Output outlierness image') + out_outlier_file = File(desc="Output outlierness image") class EM(NiftySegCommand): @@ -147,15 +158,16 @@ class EM(NiftySegCommand): -bc_out im1_bc_em.nii.gz -out im1_em.nii.gz -out_outlier im1_outlier_em.nii.gz' """ - _cmd = get_custom_path('seg_EM', env_dir='NIFTYSEGDIR') - _suffix = '_em' + + _cmd = get_custom_path("seg_EM", env_dir="NIFTYSEGDIR") + _suffix = "_em" input_spec = EMInputSpec output_spec = EMOutputSpec def _format_arg(self, opt, spec, val): """Convert input to appropriate format for seg_EM.""" - if opt == 'priors': + if opt == "priors": _nb_priors = len(self.inputs.priors) - return '-priors %d %s' % (_nb_priors, ' '.join(self.inputs.priors)) + return "-priors %d %s" % (_nb_priors, " ".join(self.inputs.priors)) else: return super(EM, self)._format_arg(opt, spec, val) diff --git a/nipype/interfaces/niftyseg/label_fusion.py b/nipype/interfaces/niftyseg/label_fusion.py index 5185b64f68..aa255247d2 100644 --- a/nipype/interfaces/niftyseg/label_fusion.py +++ b/nipype/interfaces/niftyseg/label_fusion.py @@ -7,107 +7,118 @@ import os import warnings -from ..base import (TraitedSpec, File, traits, isdefined, CommandLineInputSpec, - NipypeInterfaceError) +from ..base import ( + TraitedSpec, + File, + traits, + isdefined, + CommandLineInputSpec, + NipypeInterfaceError, +) from .base import NiftySegCommand from ..niftyreg.base import get_custom_path from ...utils.filemanip import load_json, save_json, split_filename warn = warnings.warn -warnings.filterwarnings('always', category=UserWarning) +warnings.filterwarnings("always", category=UserWarning) class LabelFusionInput(CommandLineInputSpec): """Input Spec for LabelFusion.""" + in_file = File( - argstr='-in %s', + argstr="-in %s", exists=True, mandatory=True, position=1, - desc='Filename of the 4D integer label image.') + desc="Filename of the 4D integer label image.", + ) - template_file = File(exists=True, desc='Registered templates (4D Image)') + template_file = File(exists=True, desc="Registered templates (4D Image)") file_to_seg = File( - exists=True, - mandatory=True, - desc='Original image to segment (3D Image)') + exists=True, mandatory=True, desc="Original image to segment (3D Image)" + ) mask_file = File( - argstr='-mask %s', - exists=True, - desc='Filename of the ROI for label fusion') + argstr="-mask %s", exists=True, desc="Filename of the ROI for label fusion" + ) out_file = File( - argstr='-out %s', - name_source=['in_file'], - name_template='%s', - desc='Output consensus segmentation') + argstr="-out %s", + name_source=["in_file"], + name_template="%s", + desc="Output consensus segmentation", + ) prob_flag = traits.Bool( - desc='Probabilistic/Fuzzy segmented image', argstr='-outProb') + desc="Probabilistic/Fuzzy segmented image", argstr="-outProb" + ) - desc = 'Verbose level [0 = off, 1 = on, 2 = debug] (default = 0)' - verbose = traits.Enum('0', '1', '2', desc=desc, argstr='-v %s') + desc = "Verbose level [0 = off, 1 = on, 2 = debug] (default = 0)" + verbose = traits.Enum("0", "1", "2", desc=desc, argstr="-v %s") - desc = 'Only consider non-consensus voxels to calculate statistics' - unc = traits.Bool(desc=desc, argstr='-unc') + desc = "Only consider non-consensus voxels to calculate statistics" + unc = traits.Bool(desc=desc, argstr="-unc") classifier_type = traits.Enum( - 'STEPS', - 'STAPLE', - 'MV', - 'SBA', - argstr='-%s', + "STEPS", + "STAPLE", + "MV", + "SBA", + argstr="-%s", mandatory=True, position=2, - desc='Type of Classifier Fusion.') + desc="Type of Classifier Fusion.", + ) desc = "Gaussian kernel size in mm to compute the local similarity" kernel_size = traits.Float(desc=desc) - template_num = traits.Int(desc='Number of labels to use') + template_num = traits.Int(desc="Number of labels to use") # STAPLE and MV options sm_ranking = traits.Enum( - 'ALL', - 'GNCC', - 'ROINCC', - 'LNCC', - argstr='-%s', + "ALL", + "GNCC", + "ROINCC", + "LNCC", + argstr="-%s", usedefault=True, position=3, - desc='Ranking for STAPLE and MV') + desc="Ranking for STAPLE and MV", + ) - dilation_roi = traits.Int(desc='Dilation of the ROI ( d>=1 )') + dilation_roi = traits.Int(desc="Dilation of the ROI ( d>=1 )") # STAPLE and STEPS options - desc = 'Proportion of the label (only for single labels).' - proportion = traits.Float(argstr='-prop %s', desc=desc) + desc = "Proportion of the label (only for single labels)." + proportion = traits.Float(argstr="-prop %s", desc=desc) - desc = 'Update label proportions at each iteration' - prob_update_flag = traits.Bool(desc=desc, argstr='-prop_update') + desc = "Update label proportions at each iteration" + prob_update_flag = traits.Bool(desc=desc, argstr="-prop_update") - desc = 'Value of P and Q [ 0 < (P,Q) < 1 ] (default = 0.99 0.99)' - set_pq = traits.Tuple( - traits.Float, traits.Float, argstr='-setPQ %f %f', desc=desc) + desc = "Value of P and Q [ 0 < (P,Q) < 1 ] (default = 0.99 0.99)" + set_pq = traits.Tuple(traits.Float, traits.Float, argstr="-setPQ %f %f", desc=desc) mrf_value = traits.Float( - argstr='-MRF_beta %f', desc='MRF prior strength (between 0 and 5)') + argstr="-MRF_beta %f", desc="MRF prior strength (between 0 and 5)" + ) - desc = 'Maximum number of iterations (default = 15).' - max_iter = traits.Int(argstr='-max_iter %d', desc=desc) + desc = "Maximum number of iterations (default = 15)." + max_iter = traits.Int(argstr="-max_iter %d", desc=desc) - desc = 'If percent of labels agree, then area is not uncertain.' - unc_thresh = traits.Float(argstr='-uncthres %f', desc=desc) + desc = "If percent of labels agree, then area is not uncertain." + unc_thresh = traits.Float(argstr="-uncthres %f", desc=desc) - desc = 'Ratio for convergence (default epsilon = 10^-5).' - conv = traits.Float(argstr='-conv %f', desc=desc) + desc = "Ratio for convergence (default epsilon = 10^-5)." + conv = traits.Float(argstr="-conv %f", desc=desc) class LabelFusionOutput(TraitedSpec): """Output Spec for LabelFusion.""" - out_file = File(exists=True, desc='image written after calculations') + + out_file = File(exists=True, desc="image written after calculations") class LabelFusion(NiftySegCommand): @@ -148,24 +159,31 @@ class LabelFusion(NiftySegCommand): 'seg_LabFusion -in im1.nii -STEPS 2.000000 2 im2.nii im3.nii -out im1_steps.nii' """ - _cmd = get_custom_path('seg_LabFusion', env_dir='NIFTYSEGDIR') + + _cmd = get_custom_path("seg_LabFusion", env_dir="NIFTYSEGDIR") input_spec = LabelFusionInput output_spec = LabelFusionOutput - _suffix = '_label_fused' + _suffix = "_label_fused" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for seg_maths.""" # Remove options if not STAPLE or STEPS as fusion type: - if opt in ['proportion', 'prob_update_flag', 'set_pq', 'mrf_value', - 'max_iter', 'unc_thresh', 'conv'] and\ - self.inputs.classifier_type not in ['STAPLE', 'STEPS']: - return '' - - if opt == 'sm_ranking': + if opt in [ + "proportion", + "prob_update_flag", + "set_pq", + "mrf_value", + "max_iter", + "unc_thresh", + "conv", + ] and self.inputs.classifier_type not in ["STAPLE", "STEPS"]: + return "" + + if opt == "sm_ranking": return self.get_staple_args(val) # Return options string if STEPS: - if opt == 'classifier_type' and val == 'STEPS': + if opt == "classifier_type" and val == "STEPS": return self.get_steps_args() return super(LabelFusion, self)._format_arg(opt, spec, val) @@ -186,18 +204,20 @@ def get_steps_args(self): 'classifier_type' is set to 'STEPS'." raise NipypeInterfaceError(err) - return "-STEPS %f %d %s %s" % (self.inputs.kernel_size, - self.inputs.template_num, - self.inputs.file_to_seg, - self.inputs.template_file) + return "-STEPS %f %d %s %s" % ( + self.inputs.kernel_size, + self.inputs.template_num, + self.inputs.file_to_seg, + self.inputs.template_file, + ) def get_staple_args(self, ranking): classtype = self.inputs.classifier_type - if classtype not in ['STAPLE', 'MV']: + if classtype not in ["STAPLE", "MV"]: return None - if ranking == 'ALL': - return '-ALL' + if ranking == "ALL": + return "-ALL" if not isdefined(self.inputs.template_file): err = "LabelFusion requires a value for input 'tramplate_file' \ @@ -210,18 +230,21 @@ def get_staple_args(self, ranking): raise NipypeInterfaceError(err % (classtype, ranking)) - if ranking == 'GNCC': + if ranking == "GNCC": if not isdefined(self.inputs.template_num): err = "LabelFusion requires a value for input 'template_num' \ when 'classifier_type' is set to '%s' and 'sm_ranking' is set to '%s'." raise NipypeInterfaceError(err % (classtype, ranking)) - return "-%s %d %s %s" % (ranking, self.inputs.template_num, - self.inputs.file_to_seg, - self.inputs.template_file) + return "-%s %d %s %s" % ( + ranking, + self.inputs.template_num, + self.inputs.file_to_seg, + self.inputs.template_file, + ) - elif ranking == 'ROINCC': + elif ranking == "ROINCC": if not isdefined(self.inputs.dilation_roi): err = "LabelFusion requires a value for input 'dilation_roi' \ when 'classifier_type' is set to '%s' and 'sm_ranking' is set to '%s'." @@ -234,61 +257,62 @@ def get_staple_args(self, ranking): raise NipypeInterfaceError(err % self.inputs.dilation_roi) - return "-%s %d %d %s %s" % (ranking, self.inputs.dilation_roi, - self.inputs.template_num, - self.inputs.file_to_seg, - self.inputs.template_file) - elif ranking == 'LNCC': + return "-%s %d %d %s %s" % ( + ranking, + self.inputs.dilation_roi, + self.inputs.template_num, + self.inputs.file_to_seg, + self.inputs.template_file, + ) + elif ranking == "LNCC": if not isdefined(self.inputs.kernel_size): err = "LabelFusion requires a value for input 'kernel_size' \ when 'classifier_type' is set to '%s' and 'sm_ranking' is set to '%s'." raise NipypeInterfaceError(err % (classtype, ranking)) - return "-%s %f %d %s %s" % (ranking, self.inputs.kernel_size, - self.inputs.template_num, - self.inputs.file_to_seg, - self.inputs.template_file) + return "-%s %f %d %s %s" % ( + ranking, + self.inputs.kernel_size, + self.inputs.template_num, + self.inputs.file_to_seg, + self.inputs.template_file, + ) def _overload_extension(self, value, name=None): path, base, _ = split_filename(value) _, _, ext = split_filename(self.inputs.in_file) suffix = self.inputs.classifier_type.lower() - return os.path.join(path, '{0}_{1}{2}'.format(base, suffix, ext)) + return os.path.join(path, "{0}_{1}{2}".format(base, suffix, ext)) class CalcTopNCCInputSpec(CommandLineInputSpec): """Input Spec for CalcTopNCC.""" + in_file = File( - argstr='-target %s', - exists=True, - mandatory=True, - desc='Target file', - position=1) + argstr="-target %s", exists=True, mandatory=True, desc="Target file", position=1 + ) num_templates = traits.Int( - argstr='-templates %s', - mandatory=True, - position=2, - desc='Number of Templates') + argstr="-templates %s", mandatory=True, position=2, desc="Number of Templates" + ) in_templates = traits.List( - File(exists=True), argstr="%s", position=3, mandatory=True) + File(exists=True), argstr="%s", position=3, mandatory=True + ) top_templates = traits.Int( - argstr='-n %s', - mandatory=True, - position=4, - desc='Number of Top Templates') + argstr="-n %s", mandatory=True, position=4, desc="Number of Top Templates" + ) mask_file = File( - argstr='-mask %s', - exists=True, - desc='Filename of the ROI for label fusion') + argstr="-mask %s", exists=True, desc="Filename of the ROI for label fusion" + ) class CalcTopNCCOutputSpec(TraitedSpec): """Output Spec for CalcTopNCC.""" + out_files = traits.Any(File(exists=True)) @@ -307,23 +331,24 @@ class CalcTopNCC(NiftySegCommand): 'seg_CalcTopNCC -target im1.nii -templates 2 im2.nii im3.nii -n 1' """ - _cmd = get_custom_path('seg_CalcTopNCC', env_dir='NIFTYSEGDIR') - _suffix = '_topNCC' + + _cmd = get_custom_path("seg_CalcTopNCC", env_dir="NIFTYSEGDIR") + _suffix = "_topNCC" input_spec = CalcTopNCCInputSpec output_spec = CalcTopNCCOutputSpec def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() # local caching for backward compatibility - outfile = os.path.join(os.getcwd(), 'CalcTopNCC.json') + outfile = os.path.join(os.getcwd(), "CalcTopNCC.json") if runtime is None or not runtime.stdout: try: - out_files = load_json(outfile)['files'] + out_files = load_json(outfile)["files"] except IOError: return self.run().outputs else: out_files = [] - for line in runtime.stdout.split('\n'): + for line in runtime.stdout.split("\n"): if line: values = line.split() if len(values) > 1: diff --git a/nipype/interfaces/niftyseg/lesions.py b/nipype/interfaces/niftyseg/lesions.py index 14d7f23c6b..0d055a55f2 100644 --- a/nipype/interfaces/niftyseg/lesions.py +++ b/nipype/interfaces/niftyseg/lesions.py @@ -18,85 +18,88 @@ from ..niftyreg.base import get_custom_path warn = warnings.warn -warnings.filterwarnings('always', category=UserWarning) +warnings.filterwarnings("always", category=UserWarning) class FillLesionsInputSpec(CommandLineInputSpec): """Input Spec for FillLesions.""" + # Mandatory input arguments in_file = File( - argstr='-i %s', + argstr="-i %s", exists=True, mandatory=True, - desc='Input image to fill lesions', - position=1) + desc="Input image to fill lesions", + position=1, + ) lesion_mask = File( - argstr='-l %s', - exists=True, - mandatory=True, - desc='Lesion mask', - position=2) + argstr="-l %s", exists=True, mandatory=True, desc="Lesion mask", position=2 + ) # Output file name out_file = File( - name_source=['in_file'], - name_template='%s_lesions_filled.nii.gz', - desc='The output filename of the fill lesions results', - argstr='-o %s', - position=3) + name_source=["in_file"], + name_template="%s_lesions_filled.nii.gz", + desc="The output filename of the fill lesions results", + argstr="-o %s", + position=3, + ) # Optional arguments desc = "Dilate the mask times (in voxels, by default 0)" - in_dilation = traits.Int(desc=desc, argstr='-dil %d') + in_dilation = traits.Int(desc=desc, argstr="-dil %d") - desc = 'Percentage of minimum number of voxels between patches \ -(by default 0.5).' + desc = "Percentage of minimum number of voxels between patches \ +(by default 0.5)." - match = traits.Float(desc=desc, argstr='-match %f') + match = traits.Float(desc=desc, argstr="-match %f") - desc = 'Minimum percentage of valid voxels in target patch \ -(by default 0).' + desc = "Minimum percentage of valid voxels in target patch \ +(by default 0)." - search = traits.Float(desc=desc, argstr='-search %f') + search = traits.Float(desc=desc, argstr="-search %f") - desc = 'Smoothing by (in minimal 6-neighbourhood voxels \ -(by default 0.1)).' + desc = "Smoothing by (in minimal 6-neighbourhood voxels \ +(by default 0.1))." - smooth = traits.Float(desc=desc, argstr='-smo %f') + smooth = traits.Float(desc=desc, argstr="-smo %f") - desc = 'Search regions size respect biggest patch size (by default 4).' - size = traits.Int(desc=desc, argstr='-size %d') + desc = "Search regions size respect biggest patch size (by default 4)." + size = traits.Int(desc=desc, argstr="-size %d") - desc = 'Patch cardinality weighting factor (by default 2).' - cwf = traits.Float(desc=desc, argstr='-cwf %f') + desc = "Patch cardinality weighting factor (by default 2)." + cwf = traits.Float(desc=desc, argstr="-cwf %f") - desc = 'Give a binary mask with the valid search areas.' - bin_mask = File(desc=desc, argstr='-mask %s') + desc = "Give a binary mask with the valid search areas." + bin_mask = File(desc=desc, argstr="-mask %s") desc = "Guizard et al. (FIN 2015) method, it doesn't include the \ multiresolution/hierarchical inpainting part, this part needs to be done \ with some external software such as reg_tools and reg_resample from NiftyReg. \ By default it uses the method presented in Prados et al. (Neuroimage 2016)." - other = traits.Bool(desc=desc, argstr='-other') + other = traits.Bool(desc=desc, argstr="-other") use_2d = traits.Bool( - desc='Uses 2D patches in the Z axis, by default 3D.', argstr='-2D') + desc="Uses 2D patches in the Z axis, by default 3D.", argstr="-2D" + ) debug = traits.Bool( - desc='Save all intermidium files (by default OFF).', argstr='-debug') + desc="Save all intermidium files (by default OFF).", argstr="-debug" + ) - desc = 'Set output (char, short, int, uchar, ushort, uint, \ -float, double).' + desc = "Set output (char, short, int, uchar, ushort, uint, \ +float, double)." - out_datatype = traits.String(desc=desc, argstr='-odt %s') + out_datatype = traits.String(desc=desc, argstr="-odt %s") - verbose = traits.Bool(desc='Verbose (by default OFF).', argstr='-v') + verbose = traits.Bool(desc="Verbose (by default OFF).", argstr="-v") class FillLesionsOutputSpec(TraitedSpec): """Output Spec for FillLesions.""" + out_file = File(desc="Output segmentation") @@ -118,6 +121,7 @@ class FillLesions(NiftySegCommand): 'seg_FillLesions -i im1.nii -l im2.nii -o im1_lesions_filled.nii.gz' """ - _cmd = get_custom_path('seg_FillLesions', env_dir='NIFTYSEGDIR') + + _cmd = get_custom_path("seg_FillLesions", env_dir="NIFTYSEGDIR") input_spec = FillLesionsInputSpec output_spec = FillLesionsOutputSpec diff --git a/nipype/interfaces/niftyseg/maths.py b/nipype/interfaces/niftyseg/maths.py index d4773f86e8..0afea087a0 100644 --- a/nipype/interfaces/niftyseg/maths.py +++ b/nipype/interfaces/niftyseg/maths.py @@ -13,8 +13,14 @@ import os -from ..base import (TraitedSpec, File, traits, isdefined, CommandLineInputSpec, - NipypeInterfaceError) +from ..base import ( + TraitedSpec, + File, + traits, + isdefined, + CommandLineInputSpec, + NipypeInterfaceError, +) from .base import NiftySegCommand from ..niftyreg.base import get_custom_path from ...utils.filemanip import split_filename @@ -22,36 +28,37 @@ class MathsInput(CommandLineInputSpec): """Input Spec for seg_maths interfaces.""" + in_file = File( - position=2, - argstr='%s', - exists=True, - mandatory=True, - desc='image to operate on') + position=2, argstr="%s", exists=True, mandatory=True, desc="image to operate on" + ) out_file = File( - name_source=['in_file'], - name_template='%s', + name_source=["in_file"], + name_template="%s", position=-2, - argstr='%s', - desc='image to write') + argstr="%s", + desc="image to write", + ) - desc = 'datatype to use for output (default uses input type)' + desc = "datatype to use for output (default uses input type)" output_datatype = traits.Enum( - 'float', - 'char', - 'int', - 'short', - 'double', - 'input', + "float", + "char", + "int", + "short", + "double", + "input", position=-3, - argstr='-odt %s', - desc=desc) + argstr="-odt %s", + desc=desc, + ) class MathsOutput(TraitedSpec): """Output Spec for seg_maths interfaces.""" - out_file = File(desc='image written after calculations') + + out_file = File(desc="image written after calculations") class MathsCommand(NiftySegCommand): @@ -71,52 +78,55 @@ class MathsCommand(NiftySegCommand): into several 3D images, to estimating the maximum, minimum and average over all time-points, etc. """ - _cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR') + + _cmd = get_custom_path("seg_maths", env_dir="NIFTYSEGDIR") input_spec = MathsInput output_spec = MathsOutput - _suffix = '_maths' + _suffix = "_maths" def _overload_extension(self, value, name=None): path, base, _ = split_filename(value) _, _, ext = split_filename(self.inputs.in_file) suffix = self._suffix - if suffix != '_merged' and isdefined(self.inputs.operation): - suffix = '_' + self.inputs.operation + if suffix != "_merged" and isdefined(self.inputs.operation): + suffix = "_" + self.inputs.operation - return os.path.join(path, '{0}{1}{2}'.format(base, suffix, ext)) + return os.path.join(path, "{0}{1}{2}".format(base, suffix, ext)) class UnaryMathsInput(MathsInput): """Input Spec for seg_maths Unary operations.""" + operation = traits.Enum( - 'sqrt', - 'exp', - 'log', - 'recip', - 'abs', - 'bin', - 'otsu', - 'lconcomp', - 'concomp6', - 'concomp26', - 'fill', - 'euc', - 'tpmax', - 'tmean', - 'tmax', - 'tmin', - 'splitlab', - 'removenan', - 'isnan', - 'subsamp2', - 'scl', - '4to5', - 'range', - argstr='-%s', + "sqrt", + "exp", + "log", + "recip", + "abs", + "bin", + "otsu", + "lconcomp", + "concomp6", + "concomp26", + "fill", + "euc", + "tpmax", + "tmean", + "tmax", + "tmin", + "splitlab", + "removenan", + "isnan", + "subsamp2", + "scl", + "4to5", + "range", + argstr="-%s", position=4, mandatory=True, - desc='operation to perform') + desc="operation to perform", + ) class UnaryMaths(MathsCommand): @@ -218,60 +228,66 @@ class UnaryMaths(MathsCommand): >>> unary_isnan.run() # doctest: +SKIP """ + input_spec = UnaryMathsInput class BinaryMathsInput(MathsInput): """Input Spec for seg_maths Binary operations.""" + operation = traits.Enum( - 'mul', - 'div', - 'add', - 'sub', - 'pow', - 'thr', - 'uthr', - 'smo', - 'edge', - 'sobel3', - 'sobel5', - 'min', - 'smol', - 'geo', - 'llsnorm', - 'masknan', - 'hdr_copy', - 'splitinter', + "mul", + "div", + "add", + "sub", + "pow", + "thr", + "uthr", + "smo", + "edge", + "sobel3", + "sobel5", + "min", + "smol", + "geo", + "llsnorm", + "masknan", + "hdr_copy", + "splitinter", mandatory=True, - argstr='-%s', + argstr="-%s", position=4, - desc='operation to perform') + desc="operation to perform", + ) operand_file = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=5, - xor=['operand_value', 'operand_str'], - desc='second image to perform operation with') + xor=["operand_value", "operand_str"], + desc="second image to perform operation with", + ) operand_value = traits.Float( - argstr='%.8f', + argstr="%.8f", mandatory=True, position=5, - xor=['operand_file', 'operand_str'], - desc='float value to perform operation with') + xor=["operand_file", "operand_str"], + desc="float value to perform operation with", + ) - desc = 'string value to perform operation splitinter' + desc = "string value to perform operation splitinter" operand_str = traits.Enum( - 'x', - 'y', - 'z', - argstr='%s', + "x", + "y", + "z", + argstr="%s", mandatory=True, position=5, - xor=['operand_value', 'operand_file'], - desc=desc) + xor=["operand_value", "operand_file"], + desc=desc, + ) class BinaryMaths(MathsCommand): @@ -368,70 +384,71 @@ class BinaryMaths(MathsCommand): >>> binary_splitinter.run() # doctest: +SKIP """ + input_spec = BinaryMathsInput def _format_arg(self, opt, spec, val): """Convert input to appropriate format for seg_maths.""" - if opt == 'operand_str' and self.inputs.operation != 'splitinter': + if opt == "operand_str" and self.inputs.operation != "splitinter": err = 'operand_str set but with an operation different than \ "splitinter"' raise NipypeInterfaceError(err) - if opt == 'operation': + if opt == "operation": # Only float - if val in [ - 'pow', 'thr', 'uthr', 'smo', 'edge', 'sobel3', 'sobel5', - 'smol' - ]: + if val in ["pow", "thr", "uthr", "smo", "edge", "sobel3", "sobel5", "smol"]: if not isdefined(self.inputs.operand_value): - err = 'operand_value not set for {0}.'.format(val) + err = "operand_value not set for {0}.".format(val) raise NipypeInterfaceError(err) # only files - elif val in ['min', 'llsnorm', 'masknan', 'hdr_copy']: + elif val in ["min", "llsnorm", "masknan", "hdr_copy"]: if not isdefined(self.inputs.operand_file): - err = 'operand_file not set for {0}.'.format(val) + err = "operand_file not set for {0}.".format(val) raise NipypeInterfaceError(err) # splitinter: - elif val == 'splitinter': + elif val == "splitinter": if not isdefined(self.inputs.operand_str): - err = 'operand_str not set for splitinter.' + err = "operand_str not set for splitinter." raise NipypeInterfaceError(err) - if opt == 'operand_value' and float(val) == 0.0: - return '0' + if opt == "operand_value" and float(val) == 0.0: + return "0" return super(BinaryMaths, self)._format_arg(opt, spec, val) def _overload_extension(self, value, name=None): - if self.inputs.operation == 'hdr_copy': + if self.inputs.operation == "hdr_copy": path, base, _ = split_filename(value) _, base, ext = split_filename(self.inputs.operand_file) suffix = self.inputs.operation - return os.path.join(path, '{0}{1}{2}'.format(base, suffix, ext)) + return os.path.join(path, "{0}{1}{2}".format(base, suffix, ext)) else: return super(BinaryMaths, self)._overload_extension(value, name) class BinaryMathsInputInteger(MathsInput): """Input Spec for seg_maths Binary operations that require integer.""" + operation = traits.Enum( - 'dil', - 'ero', - 'tp', - 'equal', - 'pad', - 'crop', + "dil", + "ero", + "tp", + "equal", + "pad", + "crop", mandatory=True, - argstr='-%s', + argstr="-%s", position=4, - desc='operation to perform') + desc="operation to perform", + ) operand_value = traits.Int( - argstr='%d', + argstr="%d", mandatory=True, position=5, - desc='int value to perform operation with') + desc="int value to perform operation with", + ) class BinaryMathsInteger(MathsCommand): @@ -488,51 +505,50 @@ class BinaryMathsInteger(MathsCommand): >>> binaryi_pad.run() # doctest: +SKIP """ + input_spec = BinaryMathsInputInteger class TupleMathsInput(MathsInput): """Input Spec for seg_maths Tuple operations.""" + operation = traits.Enum( - 'lncc', - 'lssd', - 'lltsnorm', + "lncc", + "lssd", + "lltsnorm", mandatory=True, - argstr='-%s', + argstr="-%s", position=4, - desc='operation to perform') + desc="operation to perform", + ) operand_file1 = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=5, - xor=['operand_value1'], - desc='image to perform operation 1 with') + xor=["operand_value1"], + desc="image to perform operation 1 with", + ) - desc = 'float value to perform operation 1 with' + desc = "float value to perform operation 1 with" operand_value1 = traits.Float( - argstr='%.8f', - mandatory=True, - position=5, - xor=['operand_file1'], - desc=desc) + argstr="%.8f", mandatory=True, position=5, xor=["operand_file1"], desc=desc + ) operand_file2 = File( exists=True, - argstr='%s', + argstr="%s", mandatory=True, position=6, - xor=['operand_value2'], - desc='image to perform operation 2 with') + xor=["operand_value2"], + desc="image to perform operation 2 with", + ) - desc = 'float value to perform operation 2 with' + desc = "float value to perform operation 2 with" operand_value2 = traits.Float( - argstr='%.8f', - mandatory=True, - position=6, - xor=['operand_file2'], - desc=desc) + argstr="%.8f", mandatory=True, position=6, xor=["operand_file2"], desc=desc + ) class TupleMaths(MathsCommand): @@ -592,17 +608,19 @@ class TupleMaths(MathsCommand): im1_lltsnorm.nii' >>> tuple_lltsnorm.run() # doctest: +SKIP """ + input_spec = TupleMathsInput class MergeInput(MathsInput): """Input Spec for seg_maths merge operation.""" - dimension = traits.Int( - mandatory=True, desc='Dimension to merge the images.') - desc = 'List of images to merge to the working image .' + dimension = traits.Int(mandatory=True, desc="Dimension to merge the images.") + + desc = "List of images to merge to the working image ." merge_files = traits.List( - File(exists=True), argstr='%s', mandatory=True, position=4, desc=desc) + File(exists=True), argstr="%s", mandatory=True, position=4, desc=desc + ) class Merge(MathsCommand): @@ -633,13 +651,13 @@ class Merge(MathsCommand): 'seg_maths im1.nii -merge 2 2 im2.nii im3.nii -odt float im1_merged.nii' """ + input_spec = MergeInput - _suffix = '_merged' + _suffix = "_merged" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for seg_maths.""" - if opt == 'merge_files': - return "-merge %d %d %s" % (len(val), self.inputs.dimension, - ' '.join(val)) + if opt == "merge_files": + return "-merge %d %d %s" % (len(val), self.inputs.dimension, " ".join(val)) return super(Merge, self)._format_arg(opt, spec, val) diff --git a/nipype/interfaces/niftyseg/patchmatch.py b/nipype/interfaces/niftyseg/patchmatch.py index 5732b1ba17..9dd7ddff5e 100644 --- a/nipype/interfaces/niftyseg/patchmatch.py +++ b/nipype/interfaces/niftyseg/patchmatch.py @@ -12,61 +12,65 @@ from ..niftyreg.base import get_custom_path warn = warnings.warn -warnings.filterwarnings('always', category=UserWarning) +warnings.filterwarnings("always", category=UserWarning) class PatchMatchInputSpec(CommandLineInputSpec): """Input Spec for PatchMatch.""" + # Mandatory input arguments in_file = File( - argstr='-i %s', + argstr="-i %s", exists=True, mandatory=True, - desc='Input image to segment', - position=1) + desc="Input image to segment", + position=1, + ) mask_file = File( - argstr='-m %s', + argstr="-m %s", exists=True, mandatory=True, - desc='Input mask for the area where applies PatchMatch', - position=2) + desc="Input mask for the area where applies PatchMatch", + position=2, + ) database_file = File( - argstr='-db %s', + argstr="-db %s", exists=True, mandatory=True, - desc='Database with the segmentations', - position=3) + desc="Database with the segmentations", + position=3, + ) # Output file name out_file = File( - name_source=['in_file'], - name_template='%s_pm.nii.gz', - desc='The output filename of the patchmatch results', - argstr='-o %s', - position=4) + name_source=["in_file"], + name_template="%s_pm.nii.gz", + desc="The output filename of the patchmatch results", + argstr="-o %s", + position=4, + ) # Optional arguments - patch_size = traits.Int(desc="Patch size, #voxels", argstr='-size %i') + patch_size = traits.Int(desc="Patch size, #voxels", argstr="-size %i") desc = "Constrained search area size, number of times bigger than the \ patchsize" - cs_size = traits.Int(desc=desc, argstr='-cs %i') + cs_size = traits.Int(desc=desc, argstr="-cs %i") - match_num = traits.Int( - desc="Number of better matching", argstr='-match %i') + match_num = traits.Int(desc="Number of better matching", argstr="-match %i") - pm_num = traits.Int( - desc="Number of patchmatch executions", argstr='-pm %i') + pm_num = traits.Int(desc="Number of patchmatch executions", argstr="-pm %i") desc = "Number of iterations for the patchmatch algorithm" - it_num = traits.Int(desc=desc, argstr='-it %i') + it_num = traits.Int(desc=desc, argstr="-it %i") class PatchMatchOutputSpec(TraitedSpec): """OutputSpec for PatchMatch.""" + out_file = File(desc="Output segmentation") @@ -99,7 +103,8 @@ class PatchMatch(NiftySegCommand): 'seg_PatchMatch -i im1.nii -m im2.nii -db db.xml -o im1_pm.nii.gz' """ - _cmd = get_custom_path('seg_PatchMatch', env_dir='NIFTYSEGDIR') + + _cmd = get_custom_path("seg_PatchMatch", env_dir="NIFTYSEGDIR") input_spec = PatchMatchInputSpec output_spec = PatchMatchOutputSpec - _suffix = '_pm' + _suffix = "_pm" diff --git a/nipype/interfaces/niftyseg/stats.py b/nipype/interfaces/niftyseg/stats.py index 94c7abd49e..611f293b42 100644 --- a/nipype/interfaces/niftyseg/stats.py +++ b/nipype/interfaces/niftyseg/stats.py @@ -13,27 +13,27 @@ class StatsInput(CommandLineInputSpec): """Input Spec for seg_stats interfaces.""" + in_file = File( - position=2, - argstr='%s', - exists=True, - mandatory=True, - desc='image to operate on') + position=2, argstr="%s", exists=True, mandatory=True, desc="image to operate on" + ) # Constrains mask_file = File( exists=True, position=-2, - argstr='-m %s', - desc='statistics within the masked area') + argstr="-m %s", + desc="statistics within the masked area", + ) - desc = 'Only estimate statistics if voxel is larger than ' - larger_voxel = traits.Float(argstr='-t %f', position=-3, desc=desc) + desc = "Only estimate statistics if voxel is larger than " + larger_voxel = traits.Float(argstr="-t %f", position=-3, desc=desc) class StatsOutput(TraitedSpec): """Output Spec for seg_stats interfaces.""" - output = traits.Array(desc='Output array from seg_stats') + + output = traits.Array(desc="Output array from seg_stats") class StatsCommand(NiftySegCommand): @@ -50,14 +50,15 @@ class StatsCommand(NiftySegCommand): robust to the presence of NaNs, and can be constrained by a mask and/or thresholded at a certain level. """ - _cmd = get_custom_path('seg_stats', env_dir='NIFTYSEGDIR') + + _cmd = get_custom_path("seg_stats", env_dir="NIFTYSEGDIR") input_spec = StatsInput output_spec = StatsOutput def _parse_stdout(self, stdout): out = [] for string_line in stdout.split("\n"): - if string_line.startswith('#'): + if string_line.startswith("#"): continue if len(string_line) <= 1: continue @@ -72,34 +73,36 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self.output_spec().get() - outputs['output'] = self.output + outputs["output"] = self.output return outputs class UnaryStatsInput(StatsInput): """Input Spec for seg_stats unary operations.""" + operation = traits.Enum( - 'r', - 'R', - 'a', - 's', - 'v', - 'vl', - 'vp', - 'n', - 'np', - 'e', - 'ne', - 'x', - 'X', - 'c', - 'B', - 'xvox', - 'xdim', - argstr='-%s', + "r", + "R", + "a", + "s", + "v", + "vl", + "vp", + "n", + "np", + "e", + "ne", + "x", + "X", + "c", + "B", + "xvox", + "xdim", + argstr="-%s", position=4, mandatory=True, - desc='operation to perform') + desc="operation to perform", + ) class UnaryStats(StatsCommand): @@ -178,26 +181,29 @@ class UnaryStats(StatsCommand): >>> unary_x.run() # doctest: +SKIP """ + input_spec = UnaryStatsInput class BinaryStatsInput(StatsInput): """Input Spec for seg_stats Binary operations.""" + operation = traits.Enum( - 'p', - 'sa', - 'ss', - 'svp', - 'al', - 'd', - 'ncc', - 'nmi', - 'Vl', - 'Nl', + "p", + "sa", + "ss", + "svp", + "al", + "d", + "ncc", + "nmi", + "Vl", + "Nl", mandatory=True, - argstr='-%s', + argstr="-%s", position=4, - desc='operation to perform') + desc="operation to perform", + ) operand_file = File( exists=True, @@ -205,14 +211,16 @@ class BinaryStatsInput(StatsInput): mandatory=True, position=5, xor=["operand_value"], - desc="second image to perform operation with") + desc="second image to perform operation with", + ) operand_value = traits.Float( - argstr='%.8f', + argstr="%.8f", mandatory=True, position=5, xor=["operand_file"], - desc='value to perform operation with') + desc="value to perform operation with", + ) class BinaryStats(StatsCommand): @@ -280,4 +288,5 @@ class BinaryStats(StatsCommand): >>> binary_nl.run() # doctest: +SKIP """ + input_spec = BinaryStatsInput diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py index d094c52724..ae7bb8a8ef 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py @@ -4,60 +4,47 @@ def test_BinaryMaths_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), operand_file=dict( - argstr='%s', + argstr="%s", extensions=None, mandatory=True, position=5, - xor=['operand_value', 'operand_str'], + xor=["operand_value", "operand_str"], ), operand_str=dict( - argstr='%s', + argstr="%s", mandatory=True, position=5, - xor=['operand_value', 'operand_file'], + xor=["operand_value", "operand_file"], ), operand_value=dict( - argstr='%.8f', + argstr="%.8f", mandatory=True, position=5, - xor=['operand_file', 'operand_str'], - ), - operation=dict( - argstr='-%s', - mandatory=True, - position=4, + xor=["operand_file", "operand_str"], ), + operation=dict(argstr="-%s", mandatory=True, position=4,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source=['in_file'], - name_template='%s', + name_source=["in_file"], + name_template="%s", position=-2, ), - output_datatype=dict( - argstr='-odt %s', - position=-3, - ), + output_datatype=dict(argstr="-odt %s", position=-3,), ) inputs = BinaryMaths.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BinaryMaths_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = BinaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py index 7c80638583..195a361f58 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py @@ -4,46 +4,29 @@ def test_BinaryMathsInteger_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - operand_value=dict( - argstr='%d', - mandatory=True, - position=5, - ), - operation=dict( - argstr='-%s', - mandatory=True, - position=4, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + operand_value=dict(argstr="%d", mandatory=True, position=5,), + operation=dict(argstr="-%s", mandatory=True, position=4,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source=['in_file'], - name_template='%s', + name_source=["in_file"], + name_template="%s", position=-2, ), - output_datatype=dict( - argstr='-odt %s', - position=-3, - ), + output_datatype=dict(argstr="-odt %s", position=-3,), ) inputs = BinaryMathsInteger.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BinaryMathsInteger_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = BinaryMathsInteger.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py index 184e089335..61ef530418 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py @@ -4,52 +4,32 @@ def test_BinaryStats_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - larger_voxel=dict( - argstr='-t %f', - position=-3, - ), - mask_file=dict( - argstr='-m %s', - extensions=None, - position=-2, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + larger_voxel=dict(argstr="-t %f", position=-3,), + mask_file=dict(argstr="-m %s", extensions=None, position=-2,), operand_file=dict( - argstr='%s', + argstr="%s", extensions=None, mandatory=True, position=5, - xor=['operand_value'], + xor=["operand_value"], ), operand_value=dict( - argstr='%.8f', - mandatory=True, - position=5, - xor=['operand_file'], - ), - operation=dict( - argstr='-%s', - mandatory=True, - position=4, + argstr="%.8f", mandatory=True, position=5, xor=["operand_file"], ), + operation=dict(argstr="-%s", mandatory=True, position=4,), ) inputs = BinaryStats.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BinaryStats_outputs(): - output_map = dict(output=dict(), ) + output_map = dict(output=dict(),) outputs = BinaryStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py index e75455c96f..f1c16859eb 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py @@ -4,44 +4,23 @@ def test_CalcTopNCC_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-target %s', - extensions=None, - mandatory=True, - position=1, - ), - in_templates=dict( - argstr='%s', - mandatory=True, - position=3, - ), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), - num_templates=dict( - argstr='-templates %s', - mandatory=True, - position=2, - ), - top_templates=dict( - argstr='-n %s', - mandatory=True, - position=4, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-target %s", extensions=None, mandatory=True, position=1,), + in_templates=dict(argstr="%s", mandatory=True, position=3,), + mask_file=dict(argstr="-mask %s", extensions=None,), + num_templates=dict(argstr="-templates %s", mandatory=True, position=2,), + top_templates=dict(argstr="-n %s", mandatory=True, position=4,), ) inputs = CalcTopNCC.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CalcTopNCC_outputs(): - output_map = dict(out_files=dict(), ) + output_map = dict(out_files=dict(),) outputs = CalcTopNCC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_EM.py b/nipype/interfaces/niftyseg/tests/test_auto_EM.py index eac0fd86a0..ac340e89ff 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_EM.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_EM.py @@ -4,86 +4,59 @@ def test_EM_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bc_order_val=dict( - argstr='-bc_order %s', - usedefault=True, - ), - bc_thresh_val=dict( - argstr='-bc_thresh %s', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - position=4, - ), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), - max_iter=dict( - argstr='-max_iter %s', - usedefault=True, - ), - min_iter=dict( - argstr='-min_iter %s', - usedefault=True, - ), - mrf_beta_val=dict(argstr='-mrf_beta %s', ), + args=dict(argstr="%s",), + bc_order_val=dict(argstr="-bc_order %s", usedefault=True,), + bc_thresh_val=dict(argstr="-bc_thresh %s", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=4,), + mask_file=dict(argstr="-mask %s", extensions=None,), + max_iter=dict(argstr="-max_iter %s", usedefault=True,), + min_iter=dict(argstr="-min_iter %s", usedefault=True,), + mrf_beta_val=dict(argstr="-mrf_beta %s",), no_prior=dict( - argstr='-nopriors %s', - mandatory=True, - xor=['prior_4D', 'priors'], + argstr="-nopriors %s", mandatory=True, xor=["prior_4D", "priors"], ), out_bc_file=dict( - argstr='-bc_out %s', + argstr="-bc_out %s", extensions=None, - name_source=['in_file'], - name_template='%s_bc_em.nii.gz', + name_source=["in_file"], + name_template="%s_bc_em.nii.gz", ), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, - name_source=['in_file'], - name_template='%s_em.nii.gz', + name_source=["in_file"], + name_template="%s_em.nii.gz", ), out_outlier_file=dict( - argstr='-out_outlier %s', + argstr="-out_outlier %s", extensions=None, - name_source=['in_file'], - name_template='%s_outlier_em.nii.gz', + name_source=["in_file"], + name_template="%s_outlier_em.nii.gz", ), - outlier_val=dict(argstr='-outlier %s %s', ), + outlier_val=dict(argstr="-outlier %s %s",), prior_4D=dict( - argstr='-prior4D %s', + argstr="-prior4D %s", extensions=None, mandatory=True, - xor=['no_prior', 'priors'], + xor=["no_prior", "priors"], ), - priors=dict( - argstr='%s', - mandatory=True, - xor=['no_prior', 'prior_4D'], - ), - reg_val=dict(argstr='-reg %s', ), - relax_priors=dict(argstr='-rf %s %s', ), + priors=dict(argstr="%s", mandatory=True, xor=["no_prior", "prior_4D"],), + reg_val=dict(argstr="-reg %s",), + relax_priors=dict(argstr="-rf %s %s",), ) inputs = EM.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EM_outputs(): output_map = dict( - out_bc_file=dict(extensions=None, ), - out_file=dict(extensions=None, ), - out_outlier_file=dict(extensions=None, ), + out_bc_file=dict(extensions=None,), + out_file=dict(extensions=None,), + out_outlier_file=dict(extensions=None,), ) outputs = EM.output_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py index a6d11d735e..0e4c3d65bf 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py @@ -4,53 +4,39 @@ def test_FillLesions_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bin_mask=dict( - argstr='-mask %s', - extensions=None, - ), - cwf=dict(argstr='-cwf %f', ), - debug=dict(argstr='-debug', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_dilation=dict(argstr='-dil %d', ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - position=1, - ), - lesion_mask=dict( - argstr='-l %s', - extensions=None, - mandatory=True, - position=2, - ), - match=dict(argstr='-match %f', ), - other=dict(argstr='-other', ), - out_datatype=dict(argstr='-odt %s', ), + args=dict(argstr="%s",), + bin_mask=dict(argstr="-mask %s", extensions=None,), + cwf=dict(argstr="-cwf %f",), + debug=dict(argstr="-debug",), + environ=dict(nohash=True, usedefault=True,), + in_dilation=dict(argstr="-dil %d",), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1,), + lesion_mask=dict(argstr="-l %s", extensions=None, mandatory=True, position=2,), + match=dict(argstr="-match %f",), + other=dict(argstr="-other",), + out_datatype=dict(argstr="-odt %s",), out_file=dict( - argstr='-o %s', + argstr="-o %s", extensions=None, - name_source=['in_file'], - name_template='%s_lesions_filled.nii.gz', + name_source=["in_file"], + name_template="%s_lesions_filled.nii.gz", position=3, ), - search=dict(argstr='-search %f', ), - size=dict(argstr='-size %d', ), - smooth=dict(argstr='-smo %f', ), - use_2d=dict(argstr='-2D', ), - verbose=dict(argstr='-v', ), + search=dict(argstr="-search %f",), + size=dict(argstr="-size %d",), + smooth=dict(argstr="-smo %f",), + use_2d=dict(argstr="-2D",), + verbose=dict(argstr="-v",), ) inputs = FillLesions.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FillLesions_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = FillLesions.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py index 5d8f170899..ba319d3475 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py @@ -4,63 +4,43 @@ def test_LabelFusion_inputs(): input_map = dict( - args=dict(argstr='%s', ), - classifier_type=dict( - argstr='-%s', - mandatory=True, - position=2, - ), - conv=dict(argstr='-conv %f', ), + args=dict(argstr="%s",), + classifier_type=dict(argstr="-%s", mandatory=True, position=2,), + conv=dict(argstr="-conv %f",), dilation_roi=dict(), - environ=dict( - nohash=True, - usedefault=True, - ), - file_to_seg=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - position=1, - ), + environ=dict(nohash=True, usedefault=True,), + file_to_seg=dict(extensions=None, mandatory=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=1,), kernel_size=dict(), - mask_file=dict( - argstr='-mask %s', - extensions=None, - ), - max_iter=dict(argstr='-max_iter %d', ), - mrf_value=dict(argstr='-MRF_beta %f', ), + mask_file=dict(argstr="-mask %s", extensions=None,), + max_iter=dict(argstr="-max_iter %d",), + mrf_value=dict(argstr="-MRF_beta %f",), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, - name_source=['in_file'], - name_template='%s', - ), - prob_flag=dict(argstr='-outProb', ), - prob_update_flag=dict(argstr='-prop_update', ), - proportion=dict(argstr='-prop %s', ), - set_pq=dict(argstr='-setPQ %f %f', ), - sm_ranking=dict( - argstr='-%s', - position=3, - usedefault=True, + name_source=["in_file"], + name_template="%s", ), - template_file=dict(extensions=None, ), + prob_flag=dict(argstr="-outProb",), + prob_update_flag=dict(argstr="-prop_update",), + proportion=dict(argstr="-prop %s",), + set_pq=dict(argstr="-setPQ %f %f",), + sm_ranking=dict(argstr="-%s", position=3, usedefault=True,), + template_file=dict(extensions=None,), template_num=dict(), - unc=dict(argstr='-unc', ), - unc_thresh=dict(argstr='-uncthres %f', ), - verbose=dict(argstr='-v %s', ), + unc=dict(argstr="-unc",), + unc_thresh=dict(argstr="-uncthres %f",), + verbose=dict(argstr="-v %s",), ) inputs = LabelFusion.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_LabelFusion_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = LabelFusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py index 742e12447c..f8f7bcf95d 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py @@ -4,36 +4,27 @@ def test_MathsCommand_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source=['in_file'], - name_template='%s', + name_source=["in_file"], + name_template="%s", position=-2, ), - output_datatype=dict( - argstr='-odt %s', - position=-3, - ), + output_datatype=dict(argstr="-odt %s", position=-3,), ) inputs = MathsCommand.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MathsCommand_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MathsCommand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_Merge.py b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py index 02052f2ada..cfeb8a01d0 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_Merge.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py @@ -4,42 +4,29 @@ def test_Merge_inputs(): input_map = dict( - args=dict(argstr='%s', ), - dimension=dict(mandatory=True, ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - merge_files=dict( - argstr='%s', - mandatory=True, - position=4, - ), + args=dict(argstr="%s",), + dimension=dict(mandatory=True,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + merge_files=dict(argstr="%s", mandatory=True, position=4,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source=['in_file'], - name_template='%s', + name_source=["in_file"], + name_template="%s", position=-2, ), - output_datatype=dict( - argstr='-odt %s', - position=-3, - ), + output_datatype=dict(argstr="-odt %s", position=-3,), ) inputs = Merge.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Merge_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py index c0d36cdfe6..e78d913a4c 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py @@ -4,11 +4,7 @@ def test_NiftySegCommand_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), ) inputs = NiftySegCommand.input_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py index 9206be5647..3832a197f6 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py @@ -4,49 +4,35 @@ def test_PatchMatch_inputs(): input_map = dict( - args=dict(argstr='%s', ), - cs_size=dict(argstr='-cs %i', ), + args=dict(argstr="%s",), + cs_size=dict(argstr="-cs %i",), database_file=dict( - argstr='-db %s', - extensions=None, - mandatory=True, - position=3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - position=1, + argstr="-db %s", extensions=None, mandatory=True, position=3, ), - it_num=dict(argstr='-it %i', ), - mask_file=dict( - argstr='-m %s', - extensions=None, - mandatory=True, - position=2, - ), - match_num=dict(argstr='-match %i', ), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1,), + it_num=dict(argstr="-it %i",), + mask_file=dict(argstr="-m %s", extensions=None, mandatory=True, position=2,), + match_num=dict(argstr="-match %i",), out_file=dict( - argstr='-o %s', + argstr="-o %s", extensions=None, - name_source=['in_file'], - name_template='%s_pm.nii.gz', + name_source=["in_file"], + name_template="%s_pm.nii.gz", position=4, ), - patch_size=dict(argstr='-size %i', ), - pm_num=dict(argstr='-pm %i', ), + patch_size=dict(argstr="-size %i",), + pm_num=dict(argstr="-pm %i",), ) inputs = PatchMatch.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_PatchMatch_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = PatchMatch.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py index b4a3e25cdb..b0332f1a46 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py @@ -4,34 +4,21 @@ def test_StatsCommand_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - larger_voxel=dict( - argstr='-t %f', - position=-3, - ), - mask_file=dict( - argstr='-m %s', - extensions=None, - position=-2, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + larger_voxel=dict(argstr="-t %f", position=-3,), + mask_file=dict(argstr="-m %s", extensions=None, position=-2,), ) inputs = StatsCommand.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_StatsCommand_outputs(): - output_map = dict(output=dict(), ) + output_map = dict(output=dict(),) outputs = StatsCommand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py index 30f03e31a1..9fc193b442 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py @@ -4,67 +4,48 @@ def test_TupleMaths_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), operand_file1=dict( - argstr='%s', + argstr="%s", extensions=None, mandatory=True, position=5, - xor=['operand_value1'], + xor=["operand_value1"], ), operand_file2=dict( - argstr='%s', + argstr="%s", extensions=None, mandatory=True, position=6, - xor=['operand_value2'], + xor=["operand_value2"], ), operand_value1=dict( - argstr='%.8f', - mandatory=True, - position=5, - xor=['operand_file1'], + argstr="%.8f", mandatory=True, position=5, xor=["operand_file1"], ), operand_value2=dict( - argstr='%.8f', - mandatory=True, - position=6, - xor=['operand_file2'], - ), - operation=dict( - argstr='-%s', - mandatory=True, - position=4, + argstr="%.8f", mandatory=True, position=6, xor=["operand_file2"], ), + operation=dict(argstr="-%s", mandatory=True, position=4,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source=['in_file'], - name_template='%s', + name_source=["in_file"], + name_template="%s", position=-2, ), - output_datatype=dict( - argstr='-odt %s', - position=-3, - ), + output_datatype=dict(argstr="-odt %s", position=-3,), ) inputs = TupleMaths.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TupleMaths_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = TupleMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py index 5acd5cb1e3..0409efb5c7 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py @@ -4,41 +4,28 @@ def test_UnaryMaths_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - operation=dict( - argstr='-%s', - mandatory=True, - position=4, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + operation=dict(argstr="-%s", mandatory=True, position=4,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, - name_source=['in_file'], - name_template='%s', + name_source=["in_file"], + name_template="%s", position=-2, ), - output_datatype=dict( - argstr='-odt %s', - position=-3, - ), + output_datatype=dict(argstr="-odt %s", position=-3,), ) inputs = UnaryMaths.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_UnaryMaths_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = UnaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py index 9e3d7f81fc..177f044fd1 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py @@ -4,39 +4,22 @@ def test_UnaryStats_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), - larger_voxel=dict( - argstr='-t %f', - position=-3, - ), - mask_file=dict( - argstr='-m %s', - extensions=None, - position=-2, - ), - operation=dict( - argstr='-%s', - mandatory=True, - position=4, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + larger_voxel=dict(argstr="-t %f", position=-3,), + mask_file=dict(argstr="-m %s", extensions=None, position=-2,), + operation=dict(argstr="-%s", mandatory=True, position=4,), ) inputs = UnaryStats.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_UnaryStats_outputs(): - output_map = dict(output=dict(), ) + output_map = dict(output=dict(),) outputs = UnaryStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_em_interfaces.py b/nipype/interfaces/niftyseg/tests/test_em_interfaces.py index f4c56da2fe..5615f3e61c 100644 --- a/nipype/interfaces/niftyseg/tests/test_em_interfaces.py +++ b/nipype/interfaces/niftyseg/tests/test_em_interfaces.py @@ -9,15 +9,14 @@ from .. import EM -@pytest.mark.skipif( - no_nifty_tool(cmd='seg_EM'), reason="niftyseg is not installed") +@pytest.mark.skipif(no_nifty_tool(cmd="seg_EM"), reason="niftyseg is not installed") def test_seg_em(): # Create a node object seg_em = EM() # Check if the command is properly defined - cmd = get_custom_path('seg_EM', env_dir='NIFTYSEGDIR') + cmd = get_custom_path("seg_EM", env_dir="NIFTYSEGDIR") assert seg_em.cmd == cmd # test raising error with mandatory args absent @@ -25,19 +24,19 @@ def test_seg_em(): seg_em.run() # Assign some input data - in_file = example_data('im1.nii') + in_file = example_data("im1.nii") seg_em.inputs.in_file = in_file seg_em.inputs.no_prior = 4 - cmd_tmp = '{cmd} -in {in_file} -nopriors 4 -bc_out {bc_out} -out \ -{out_file} -out_outlier {out_outlier}' + cmd_tmp = "{cmd} -in {in_file} -nopriors 4 -bc_out {bc_out} -out \ +{out_file} -out_outlier {out_outlier}" expected_cmd = cmd_tmp.format( cmd=cmd, in_file=in_file, - out_file='im1_em.nii.gz', - bc_out='im1_bc_em.nii.gz', - out_outlier='im1_outlier_em.nii.gz', + out_file="im1_em.nii.gz", + bc_out="im1_bc_em.nii.gz", + out_outlier="im1_outlier_em.nii.gz", ) assert seg_em.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyseg/tests/test_extra_PatchMatch.py b/nipype/interfaces/niftyseg/tests/test_extra_PatchMatch.py index 9fe82ac544..18156e37f1 100644 --- a/nipype/interfaces/niftyseg/tests/test_extra_PatchMatch.py +++ b/nipype/interfaces/niftyseg/tests/test_extra_PatchMatch.py @@ -10,14 +10,15 @@ @pytest.mark.skipif( - no_nifty_tool(cmd='seg_PatchMatch'), reason="niftyseg is not installed") + no_nifty_tool(cmd="seg_PatchMatch"), reason="niftyseg is not installed" +) def test_seg_patchmatch(): # Create a node object seg_patchmatch = PatchMatch() # Check if the command is properly defined - cmd = get_custom_path('seg_PatchMatch', env_dir='NIFTYSEGDIR') + cmd = get_custom_path("seg_PatchMatch", env_dir="NIFTYSEGDIR") assert seg_patchmatch.cmd == cmd # test raising error with mandatory args absent @@ -25,20 +26,20 @@ def test_seg_patchmatch(): seg_patchmatch.run() # Assign some input data - in_file = example_data('im1.nii') - mask_file = example_data('im2.nii') - db_file = example_data('db.xml') + in_file = example_data("im1.nii") + mask_file = example_data("im2.nii") + db_file = example_data("db.xml") seg_patchmatch.inputs.in_file = in_file seg_patchmatch.inputs.mask_file = mask_file seg_patchmatch.inputs.database_file = db_file - cmd_tmp = '{cmd} -i {in_file} -m {mask_file} -db {db} -o {out_file}' + cmd_tmp = "{cmd} -i {in_file} -m {mask_file} -db {db} -o {out_file}" expected_cmd = cmd_tmp.format( cmd=cmd, in_file=in_file, mask_file=mask_file, db=db_file, - out_file='im1_pm.nii.gz', + out_file="im1_pm.nii.gz", ) assert seg_patchmatch.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyseg/tests/test_label_fusion.py b/nipype/interfaces/niftyseg/tests/test_label_fusion.py index fdc186d6c2..0a469a774e 100644 --- a/nipype/interfaces/niftyseg/tests/test_label_fusion.py +++ b/nipype/interfaces/niftyseg/tests/test_label_fusion.py @@ -10,14 +10,15 @@ @pytest.mark.skipif( - no_nifty_tool(cmd='seg_LabFusion'), reason="niftyseg is not installed") + no_nifty_tool(cmd="seg_LabFusion"), reason="niftyseg is not installed" +) def test_seg_lab_fusion(): """ Test interfaces for seg_labfusion""" # Create a node object steps = LabelFusion() # Check if the command is properly defined - cmd = get_custom_path('seg_LabFusion', env_dir='NIFTYSEGDIR') + cmd = get_custom_path("seg_LabFusion", env_dir="NIFTYSEGDIR") assert steps.cmd == cmd # test raising error with mandatory args absent @@ -25,86 +26,84 @@ def test_seg_lab_fusion(): steps.run() # Assign some input data - in_file = example_data('im1.nii') - file_to_seg = example_data('im2.nii') - template_file = example_data('im3.nii') + in_file = example_data("im1.nii") + file_to_seg = example_data("im2.nii") + template_file = example_data("im3.nii") steps.inputs.in_file = in_file steps.inputs.kernel_size = 2.0 steps.inputs.file_to_seg = file_to_seg steps.inputs.template_file = template_file steps.inputs.template_num = 2 - steps.inputs.classifier_type = 'STEPS' + steps.inputs.classifier_type = "STEPS" - cmd_tmp = '{cmd} -in {in_file} -STEPS 2.000000 2 {file_to_seg} \ -{template_file} -out {out_file}' + cmd_tmp = "{cmd} -in {in_file} -STEPS 2.000000 2 {file_to_seg} \ +{template_file} -out {out_file}" expected_cmd = cmd_tmp.format( cmd=cmd, in_file=in_file, file_to_seg=file_to_seg, template_file=template_file, - out_file='im1_steps.nii', + out_file="im1_steps.nii", ) assert steps.cmdline == expected_cmd # Staple - staple = LabelFusion( - kernel_size=2.0, template_num=2, classifier_type='STAPLE') - in_file = example_data('im1.nii') - file_to_seg = example_data('im2.nii') - template_file = example_data('im3.nii') + staple = LabelFusion(kernel_size=2.0, template_num=2, classifier_type="STAPLE") + in_file = example_data("im1.nii") + file_to_seg = example_data("im2.nii") + template_file = example_data("im3.nii") staple.inputs.in_file = in_file staple.inputs.file_to_seg = file_to_seg staple.inputs.template_file = template_file - cmd_tmp = '{cmd} -in {in_file} -STAPLE -ALL -out {out_file}' + cmd_tmp = "{cmd} -in {in_file} -STAPLE -ALL -out {out_file}" expected_cmd = cmd_tmp.format( cmd=cmd, in_file=in_file, file_to_seg=file_to_seg, template_file=template_file, - out_file='im1_staple.nii', + out_file="im1_staple.nii", ) assert staple.cmdline == expected_cmd # Assign some input data mv_node = LabelFusion( - template_num=2, - classifier_type='MV', - sm_ranking='ROINCC', - dilation_roi=2) - in_file = example_data('im1.nii') - file_to_seg = example_data('im2.nii') - template_file = example_data('im3.nii') + template_num=2, classifier_type="MV", sm_ranking="ROINCC", dilation_roi=2 + ) + in_file = example_data("im1.nii") + file_to_seg = example_data("im2.nii") + template_file = example_data("im3.nii") mv_node.inputs.in_file = in_file mv_node.inputs.file_to_seg = file_to_seg mv_node.inputs.template_file = template_file - cmd_tmp = '{cmd} -in {in_file} -MV -ROINCC 2 2 {file_to_seg} \ -{template_file} -out {out_file}' + cmd_tmp = "{cmd} -in {in_file} -MV -ROINCC 2 2 {file_to_seg} \ +{template_file} -out {out_file}" expected_cmd = cmd_tmp.format( cmd=cmd, in_file=in_file, file_to_seg=file_to_seg, template_file=template_file, - out_file='im1_mv.nii', + out_file="im1_mv.nii", ) assert mv_node.cmdline == expected_cmd @pytest.mark.skipif( - no_nifty_tool(cmd='seg_CalcTopNCC'), reason="niftyseg is not installed") + no_nifty_tool(cmd="seg_CalcTopNCC"), reason="niftyseg is not installed" +) def test_seg_calctopncc(): """ Test interfaces for seg_CalctoNCC""" # Create a node object calctopncc = CalcTopNCC() # Check if the command is properly defined - cmd = get_custom_path('seg_CalcTopNCC', env_dir='NIFTYSEGDIR') + cmd = get_custom_path("seg_CalcTopNCC", env_dir="NIFTYSEGDIR") assert calctopncc.cmd == cmd # test raising error with mandatory args absent @@ -112,20 +111,15 @@ def test_seg_calctopncc(): calctopncc.run() # Assign some input data - in_file = example_data('im1.nii') - file1 = example_data('im2.nii') - file2 = example_data('im3.nii') + in_file = example_data("im1.nii") + file1 = example_data("im2.nii") + file2 = example_data("im3.nii") calctopncc.inputs.in_file = in_file calctopncc.inputs.num_templates = 2 calctopncc.inputs.in_templates = [file1, file2] calctopncc.inputs.top_templates = 1 - cmd_tmp = '{cmd} -target {in_file} -templates 2 {file1} {file2} -n 1' - expected_cmd = cmd_tmp.format( - cmd=cmd, - in_file=in_file, - file1=file1, - file2=file2, - ) + cmd_tmp = "{cmd} -target {in_file} -templates 2 {file1} {file2} -n 1" + expected_cmd = cmd_tmp.format(cmd=cmd, in_file=in_file, file1=file1, file2=file2,) assert calctopncc.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyseg/tests/test_lesions.py b/nipype/interfaces/niftyseg/tests/test_lesions.py index 24b7e82cd7..d46b380cc2 100644 --- a/nipype/interfaces/niftyseg/tests/test_lesions.py +++ b/nipype/interfaces/niftyseg/tests/test_lesions.py @@ -10,14 +10,15 @@ @pytest.mark.skipif( - no_nifty_tool(cmd='seg_FillLesions'), reason="niftyseg is not installed") + no_nifty_tool(cmd="seg_FillLesions"), reason="niftyseg is not installed" +) def test_seg_filllesions(): # Create a node object seg_fill = FillLesions() # Check if the command is properly defined - cmd = get_custom_path('seg_FillLesions', env_dir='NIFTYSEGDIR') + cmd = get_custom_path("seg_FillLesions", env_dir="NIFTYSEGDIR") assert seg_fill.cmd == cmd # test raising error with mandatory args absent @@ -25,16 +26,16 @@ def test_seg_filllesions(): seg_fill.run() # Assign some input data - in_file = example_data('im1.nii') - lesion_mask = example_data('im2.nii') + in_file = example_data("im1.nii") + lesion_mask = example_data("im2.nii") seg_fill.inputs.in_file = in_file seg_fill.inputs.lesion_mask = lesion_mask - expected_cmd = '{cmd} -i {in_file} -l {lesion_mask} -o {out_file}'.format( + expected_cmd = "{cmd} -i {in_file} -l {lesion_mask} -o {out_file}".format( cmd=cmd, in_file=in_file, lesion_mask=lesion_mask, - out_file='im1_lesions_filled.nii.gz', + out_file="im1_lesions_filled.nii.gz", ) assert seg_fill.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyseg/tests/test_maths.py b/nipype/interfaces/niftyseg/tests/test_maths.py index 0680a8a481..84740b7447 100644 --- a/nipype/interfaces/niftyseg/tests/test_maths.py +++ b/nipype/interfaces/niftyseg/tests/test_maths.py @@ -6,18 +6,17 @@ from ....testing import example_data from ...niftyreg import get_custom_path from ...niftyreg.tests.test_regutils import no_nifty_tool -from .. import (UnaryMaths, BinaryMaths, BinaryMathsInteger, TupleMaths, Merge) +from .. import UnaryMaths, BinaryMaths, BinaryMathsInteger, TupleMaths, Merge -@pytest.mark.skipif( - no_nifty_tool(cmd='seg_maths'), reason="niftyseg is not installed") +@pytest.mark.skipif(no_nifty_tool(cmd="seg_maths"), reason="niftyseg is not installed") def test_unary_maths(): # Create a node object unarym = UnaryMaths() # Check if the command is properly defined - cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR') + cmd = get_custom_path("seg_maths", env_dir="NIFTYSEGDIR") assert unarym.cmd == cmd # test raising error with mandatory args absent @@ -25,26 +24,26 @@ def test_unary_maths(): unarym.run() # Assign some input data - in_file = example_data('im1.nii') + in_file = example_data("im1.nii") unarym.inputs.in_file = in_file - unarym.inputs.operation = 'otsu' - unarym.inputs.output_datatype = 'float' + unarym.inputs.operation = "otsu" + unarym.inputs.output_datatype = "float" - expected_cmd = '{cmd} {in_file} -otsu -odt float {out_file}'.format( - cmd=cmd, in_file=in_file, out_file='im1_otsu.nii') + expected_cmd = "{cmd} {in_file} -otsu -odt float {out_file}".format( + cmd=cmd, in_file=in_file, out_file="im1_otsu.nii" + ) assert unarym.cmdline == expected_cmd -@pytest.mark.skipif( - no_nifty_tool(cmd='seg_maths'), reason="niftyseg is not installed") +@pytest.mark.skipif(no_nifty_tool(cmd="seg_maths"), reason="niftyseg is not installed") def test_binary_maths(): # Create a node object binarym = BinaryMaths() # Check if the command is properly defined - cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR') + cmd = get_custom_path("seg_maths", env_dir="NIFTYSEGDIR") assert binarym.cmd == cmd # test raising error with mandatory args absent @@ -52,28 +51,26 @@ def test_binary_maths(): binarym.run() # Assign some input data - in_file = example_data('im1.nii') + in_file = example_data("im1.nii") binarym.inputs.in_file = in_file binarym.inputs.operand_value = 2.0 - binarym.inputs.operation = 'sub' - binarym.inputs.output_datatype = 'float' + binarym.inputs.operation = "sub" + binarym.inputs.output_datatype = "float" - cmd_tmp = '{cmd} {in_file} -sub 2.00000000 -odt float {out_file}' - expected_cmd = cmd_tmp.format( - cmd=cmd, in_file=in_file, out_file='im1_sub.nii') + cmd_tmp = "{cmd} {in_file} -sub 2.00000000 -odt float {out_file}" + expected_cmd = cmd_tmp.format(cmd=cmd, in_file=in_file, out_file="im1_sub.nii") assert binarym.cmdline == expected_cmd -@pytest.mark.skipif( - no_nifty_tool(cmd='seg_maths'), reason="niftyseg is not installed") +@pytest.mark.skipif(no_nifty_tool(cmd="seg_maths"), reason="niftyseg is not installed") def test_int_binary_maths(): # Create a node object ibinarym = BinaryMathsInteger() # Check if the command is properly defined - cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR') + cmd = get_custom_path("seg_maths", env_dir="NIFTYSEGDIR") assert ibinarym.cmd == cmd # test raising error with mandatory args absent @@ -81,27 +78,27 @@ def test_int_binary_maths(): ibinarym.run() # Assign some input data - in_file = example_data('im1.nii') + in_file = example_data("im1.nii") ibinarym.inputs.in_file = in_file ibinarym.inputs.operand_value = 2 - ibinarym.inputs.operation = 'dil' - ibinarym.inputs.output_datatype = 'float' + ibinarym.inputs.operation = "dil" + ibinarym.inputs.output_datatype = "float" - expected_cmd = '{cmd} {in_file} -dil 2 -odt float {out_file}'.format( - cmd=cmd, in_file=in_file, out_file='im1_dil.nii') + expected_cmd = "{cmd} {in_file} -dil 2 -odt float {out_file}".format( + cmd=cmd, in_file=in_file, out_file="im1_dil.nii" + ) assert ibinarym.cmdline == expected_cmd -@pytest.mark.skipif( - no_nifty_tool(cmd='seg_maths'), reason="niftyseg is not installed") +@pytest.mark.skipif(no_nifty_tool(cmd="seg_maths"), reason="niftyseg is not installed") def test_tuple_maths(): # Create a node object tuplem = TupleMaths() # Check if the command is properly defined - cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR') + cmd = get_custom_path("seg_maths", env_dir="NIFTYSEGDIR") assert tuplem.cmd == cmd # test raising error with mandatory args absent @@ -109,30 +106,30 @@ def test_tuple_maths(): tuplem.run() # Assign some input data - in_file = example_data('im1.nii') - op_file = example_data('im2.nii') + in_file = example_data("im1.nii") + op_file = example_data("im2.nii") tuplem.inputs.in_file = in_file - tuplem.inputs.operation = 'lncc' + tuplem.inputs.operation = "lncc" tuplem.inputs.operand_file1 = op_file tuplem.inputs.operand_value2 = 2.0 - tuplem.inputs.output_datatype = 'float' + tuplem.inputs.output_datatype = "float" - cmd_tmp = '{cmd} {in_file} -lncc {op} 2.00000000 -odt float {out_file}' + cmd_tmp = "{cmd} {in_file} -lncc {op} 2.00000000 -odt float {out_file}" expected_cmd = cmd_tmp.format( - cmd=cmd, in_file=in_file, op=op_file, out_file='im1_lncc.nii') + cmd=cmd, in_file=in_file, op=op_file, out_file="im1_lncc.nii" + ) assert tuplem.cmdline == expected_cmd -@pytest.mark.skipif( - no_nifty_tool(cmd='seg_maths'), reason="niftyseg is not installed") +@pytest.mark.skipif(no_nifty_tool(cmd="seg_maths"), reason="niftyseg is not installed") def test_merge(): # Create a node object merge = Merge() # Check if the command is properly defined - cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR') + cmd = get_custom_path("seg_maths", env_dir="NIFTYSEGDIR") assert merge.cmd == cmd # test raising error with mandatory args absent @@ -140,20 +137,17 @@ def test_merge(): merge.run() # Assign some input data - in_file = example_data('im1.nii') - file1 = example_data('im2.nii') - file2 = example_data('im3.nii') + in_file = example_data("im1.nii") + file1 = example_data("im2.nii") + file2 = example_data("im3.nii") merge.inputs.in_file = in_file merge.inputs.merge_files = [file1, file2] merge.inputs.dimension = 2 - merge.inputs.output_datatype = 'float' + merge.inputs.output_datatype = "float" - cmd_tmp = '{cmd} {in_file} -merge 2 2 {f1} {f2} -odt float {out_file}' + cmd_tmp = "{cmd} {in_file} -merge 2 2 {f1} {f2} -odt float {out_file}" expected_cmd = cmd_tmp.format( - cmd=cmd, - in_file=in_file, - f1=file1, - f2=file2, - out_file='im1_merged.nii') + cmd=cmd, in_file=in_file, f1=file1, f2=file2, out_file="im1_merged.nii" + ) assert merge.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyseg/tests/test_stats.py b/nipype/interfaces/niftyseg/tests/test_stats.py index 91c234e98d..cfeefe404a 100644 --- a/nipype/interfaces/niftyseg/tests/test_stats.py +++ b/nipype/interfaces/niftyseg/tests/test_stats.py @@ -9,15 +9,14 @@ from .. import UnaryStats, BinaryStats -@pytest.mark.skipif( - no_nifty_tool(cmd='seg_stats'), reason="niftyseg is not installed") +@pytest.mark.skipif(no_nifty_tool(cmd="seg_stats"), reason="niftyseg is not installed") def test_unary_stats(): """ Test for the seg_stats interfaces """ # Create a node object unarys = UnaryStats() # Check if the command is properly defined - cmd = get_custom_path('seg_stats', env_dir='NIFTYSEGDIR') + cmd = get_custom_path("seg_stats", env_dir="NIFTYSEGDIR") assert unarys.cmd == cmd # test raising error with mandatory args absent @@ -25,24 +24,23 @@ def test_unary_stats(): unarys.run() # Assign some input data - in_file = example_data('im1.nii') + in_file = example_data("im1.nii") unarys.inputs.in_file = in_file - unarys.inputs.operation = 'a' + unarys.inputs.operation = "a" - expected_cmd = '{cmd} {in_file} -a'.format(cmd=cmd, in_file=in_file) + expected_cmd = "{cmd} {in_file} -a".format(cmd=cmd, in_file=in_file) assert unarys.cmdline == expected_cmd -@pytest.mark.skipif( - no_nifty_tool(cmd='seg_stats'), reason="niftyseg is not installed") +@pytest.mark.skipif(no_nifty_tool(cmd="seg_stats"), reason="niftyseg is not installed") def test_binary_stats(): """ Test for the seg_stats interfaces """ # Create a node object binarys = BinaryStats() # Check if the command is properly defined - cmd = get_custom_path('seg_stats', env_dir='NIFTYSEGDIR') + cmd = get_custom_path("seg_stats", env_dir="NIFTYSEGDIR") assert binarys.cmd == cmd # test raising error with mandatory args absent @@ -50,12 +48,11 @@ def test_binary_stats(): binarys.run() # Assign some input data - in_file = example_data('im1.nii') + in_file = example_data("im1.nii") binarys.inputs.in_file = in_file binarys.inputs.operand_value = 2 - binarys.inputs.operation = 'sa' + binarys.inputs.operation = "sa" - expected_cmd = '{cmd} {in_file} -sa 2.00000000'.format( - cmd=cmd, in_file=in_file) + expected_cmd = "{cmd} {in_file} -sa 2.00000000".format(cmd=cmd, in_file=in_file) assert binarys.cmdline == expected_cmd diff --git a/nipype/interfaces/nilearn.py b/nipype/interfaces/nilearn.py index 06bfdf899f..38ecb84a3a 100644 --- a/nipype/interfaces/nilearn.py +++ b/nipype/interfaces/nilearn.py @@ -1,78 +1,88 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -''' +""" Algorithms to compute statistics on :abbr:`fMRI (functional MRI)` -''' +""" import os import numpy as np import nibabel as nb -from ..interfaces.base import (traits, TraitedSpec, LibraryBaseInterface, - SimpleInterface, BaseInterfaceInputSpec, File, - InputMultiPath) +from ..interfaces.base import ( + traits, + TraitedSpec, + LibraryBaseInterface, + SimpleInterface, + BaseInterfaceInputSpec, + File, + InputMultiPath, +) class NilearnBaseInterface(LibraryBaseInterface): - _pkg = 'nilearn' + _pkg = "nilearn" class SignalExtractionInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc='4-D fMRI nii file') + in_file = File(exists=True, mandatory=True, desc="4-D fMRI nii file") label_files = InputMultiPath( File(exists=True), mandatory=True, - desc='a 3-D label image, with 0 denoting ' - 'background, or a list of 3-D probability ' - 'maps (one per label) or the equivalent 4D ' - 'file.') + desc="a 3-D label image, with 0 denoting " + "background, or a list of 3-D probability " + "maps (one per label) or the equivalent 4D " + "file.", + ) class_labels = traits.List( mandatory=True, - desc='Human-readable labels for each segment ' - 'in the label file, in order. The length of ' - 'class_labels must be equal to the number of ' - 'segments (background excluded). This list ' - 'corresponds to the class labels in label_file ' - 'in ascending order') + desc="Human-readable labels for each segment " + "in the label file, in order. The length of " + "class_labels must be equal to the number of " + "segments (background excluded). This list " + "corresponds to the class labels in label_file " + "in ascending order", + ) out_file = File( - 'signals.tsv', + "signals.tsv", usedefault=True, exists=False, - desc='The name of the file to output to. ' - 'signals.tsv by default') + desc="The name of the file to output to. " "signals.tsv by default", + ) incl_shared_variance = traits.Bool( True, usedefault=True, - desc='By default ' - '(True), returns simple time series calculated from each ' - 'region independently (e.g., for noise regression). If ' - 'False, returns unique signals for each region, discarding ' - 'shared variance (e.g., for connectivity. Only has effect ' - 'with 4D probability maps.') + desc="By default " + "(True), returns simple time series calculated from each " + "region independently (e.g., for noise regression). If " + "False, returns unique signals for each region, discarding " + "shared variance (e.g., for connectivity. Only has effect " + "with 4D probability maps.", + ) include_global = traits.Bool( False, usedefault=True, - desc='If True, include an extra column ' + desc="If True, include an extra column " 'labeled "GlobalSignal", with values calculated from the entire brain ' - '(instead of just regions).') + "(instead of just regions).", + ) detrend = traits.Bool( - False, - usedefault=True, - desc='If True, perform detrending using nilearn.') + False, usedefault=True, desc="If True, perform detrending using nilearn." + ) class SignalExtractionOutputSpec(TraitedSpec): out_file = File( exists=True, - desc='tsv file containing the computed ' - 'signals, with as many columns as there are labels and as ' - 'many rows as there are timepoints in in_file, plus a ' - 'header row with values from class_labels') + desc="tsv file containing the computed " + "signals, with as many columns as there are labels and as " + "many rows as there are timepoints in in_file, plus a " + "header row with values from class_labels", + ) class SignalExtraction(NilearnBaseInterface, SimpleInterface): - ''' + """ Extracts signals over tissue classes or brain regions >>> seinterface = SignalExtraction() @@ -83,7 +93,8 @@ class SignalExtraction(NilearnBaseInterface, SimpleInterface): >>> seinterface.inputs.class_labels = segments >>> seinterface.inputs.detrend = True >>> seinterface.inputs.include_global = True - ''' + """ + input_spec = SignalExtractionInputSpec output_spec = SignalExtractionOutputSpec @@ -95,20 +106,17 @@ def _run_interface(self, runtime): signals.append(masker.fit_transform(self.inputs.in_file)) region_signals = np.hstack(signals) - output = np.vstack((self.inputs.class_labels, - region_signals.astype(str))) + output = np.vstack((self.inputs.class_labels, region_signals.astype(str))) # save output - self._results['out_file'] = os.path.join(runtime.cwd, - self.inputs.out_file) - np.savetxt( - self._results['out_file'], output, fmt=b'%s', delimiter='\t') + self._results["out_file"] = os.path.join(runtime.cwd, self.inputs.out_file) + np.savetxt(self._results["out_file"], output, fmt=b"%s", delimiter="\t") return runtime def _process_inputs(self): - ''' validate and process inputs into useful form. + """ validate and process inputs into useful form. Returns a list of nilearn maskers and the list of corresponding label - names.''' + names.""" import nilearn.input_data as nl import nilearn.image as nli @@ -124,34 +132,41 @@ def _process_inputs(self): if self.inputs.incl_shared_variance: # independent computation for img in nli.iter_img(label_data): maskers.append( - nl.NiftiMapsMasker( - self._4d(img.get_data(), img.affine))) + nl.NiftiMapsMasker(self._4d(img.get_data(), img.affine)) + ) else: # one computation fitting all maskers.append(nl.NiftiMapsMasker(label_data)) # check label list size if not np.isclose(int(n_labels), n_labels): raise ValueError( - 'The label files {} contain invalid value {}. Check input.' - .format(self.inputs.label_files, n_labels)) + "The label files {} contain invalid value {}. Check input.".format( + self.inputs.label_files, n_labels + ) + ) if len(self.inputs.class_labels) != n_labels: - raise ValueError('The length of class_labels {} does not ' - 'match the number of regions {} found in ' - 'label_files {}'.format(self.inputs.class_labels, - n_labels, - self.inputs.label_files)) + raise ValueError( + "The length of class_labels {} does not " + "match the number of regions {} found in " + "label_files {}".format( + self.inputs.class_labels, n_labels, self.inputs.label_files + ) + ) if self.inputs.include_global: global_label_data = label_data.get_data().sum( - axis=3) # sum across all regions - global_label_data = np.rint(global_label_data).astype(int).clip( - 0, 1) # binarize + axis=3 + ) # sum across all regions + global_label_data = ( + np.rint(global_label_data).astype(int).clip(0, 1) + ) # binarize global_label_data = self._4d(global_label_data, label_data.affine) global_masker = nl.NiftiLabelsMasker( - global_label_data, detrend=self.inputs.detrend) + global_label_data, detrend=self.inputs.detrend + ) maskers.insert(0, global_masker) - self.inputs.class_labels.insert(0, 'GlobalSignal') + self.inputs.class_labels.insert(0, "GlobalSignal") for masker in maskers: masker.set_params(detrend=self.inputs.detrend) @@ -159,6 +174,6 @@ def _process_inputs(self): return maskers def _4d(self, array, affine): - ''' takes a 3-dimensional numpy array and an affine, - returns the equivalent 4th dimensional nifti file ''' + """ takes a 3-dimensional numpy array and an affine, + returns the equivalent 4th dimensional nifti file """ return nb.Nifti1Image(array[:, :, :, np.newaxis], affine) diff --git a/nipype/interfaces/nipy/base.py b/nipype/interfaces/nipy/base.py index 077499c52a..0991730e81 100644 --- a/nipype/interfaces/nipy/base.py +++ b/nipype/interfaces/nipy/base.py @@ -11,10 +11,10 @@ # Remove in 2.0 have_nipy = True try: - package_check('nipy') + package_check("nipy") except ImportError: have_nipy = False class NipyBaseInterface(LibraryBaseInterface): - _pkg = 'nipy' + _pkg = "nipy" diff --git a/nipype/interfaces/nipy/model.py b/nipype/interfaces/nipy/model.py index 66d1bbb6a9..c6287dac28 100644 --- a/nipype/interfaces/nipy/model.py +++ b/nipype/interfaces/nipy/model.py @@ -4,8 +4,14 @@ from ...utils import NUMPY_MMAP from .base import NipyBaseInterface -from ..base import (TraitedSpec, traits, File, OutputMultiPath, - BaseInterfaceInputSpec, isdefined) +from ..base import ( + TraitedSpec, + traits, + File, + OutputMultiPath, + BaseInterfaceInputSpec, + isdefined, +) class FitGLMInputSpec(BaseInterfaceInputSpec): @@ -13,49 +19,61 @@ class FitGLMInputSpec(BaseInterfaceInputSpec): minlen=1, maxlen=1, mandatory=True, - desc=('Session specific information generated by' - ' ``modelgen.SpecifyModel``, FitGLM does ' - 'not support multiple runs uless they are ' - 'concatenated (see SpecifyModel options)')) + desc=( + "Session specific information generated by" + " ``modelgen.SpecifyModel``, FitGLM does " + "not support multiple runs uless they are " + "concatenated (see SpecifyModel options)" + ), + ) hrf_model = traits.Enum( - 'Canonical', - 'Canonical With Derivative', - 'FIR', - desc=("that specifies the hemodynamic reponse " - "function it can be 'Canonical', 'Canonical " - "With Derivative' or 'FIR'"), - usedefault=True) + "Canonical", + "Canonical With Derivative", + "FIR", + desc=( + "that specifies the hemodynamic reponse " + "function it can be 'Canonical', 'Canonical " + "With Derivative' or 'FIR'" + ), + usedefault=True, + ) drift_model = traits.Enum( "Cosine", "Polynomial", "Blank", - desc=("string that specifies the desired drift " - "model, to be chosen among 'Polynomial', " - "'Cosine', 'Blank'"), - usedefault=True) + desc=( + "string that specifies the desired drift " + "model, to be chosen among 'Polynomial', " + "'Cosine', 'Blank'" + ), + usedefault=True, + ) TR = traits.Float(mandatory=True) model = traits.Enum( "ar1", "spherical", - desc=("autoregressive mode is available only for the " - "kalman method"), - usedefault=True) + desc=("autoregressive mode is available only for the " "kalman method"), + usedefault=True, + ) method = traits.Enum( "kalman", "ols", - desc=("method to fit the model, ols or kalma; kalman " - "is more time consuming but it supports " - "autoregressive model"), - usedefault=True) + desc=( + "method to fit the model, ols or kalma; kalman " + "is more time consuming but it supports " + "autoregressive model" + ), + usedefault=True, + ) mask = File( exists=True, - desc=("restrict the fitting only to the region defined " - "by this mask")) + desc=("restrict the fitting only to the region defined " "by this mask"), + ) normalize_design_matrix = traits.Bool( False, - desc=("normalize (zscore) the " - "regressors before fitting"), - usedefault=True) + desc=("normalize (zscore) the " "regressors before fitting"), + usedefault=True, + ) save_residuals = traits.Bool(False, usedefault=True) plot_design_matrix = traits.Bool(False, usedefault=True) @@ -73,9 +91,10 @@ class FitGLMOutputSpec(TraitedSpec): class FitGLM(NipyBaseInterface): - ''' + """ Fit GLM model based on the specified design. Supports only single or concatenated runs. - ''' + """ + input_spec = FitGLMInputSpec output_spec = FitGLMOutputSpec @@ -84,6 +103,7 @@ def _run_interface(self, runtime): import numpy as np import nipy.modalities.fmri.glm as GLM import nipy.modalities.fmri.design_matrix as dm + try: BlockParadigm = dm.BlockParadigm except AttributeError: @@ -91,7 +111,7 @@ def _run_interface(self, runtime): session_info = self.inputs.session_info - functional_runs = self.inputs.session_info[0]['scans'] + functional_runs = self.inputs.session_info[0]["scans"] if isinstance(functional_runs, (str, bytes)): functional_runs = [functional_runs] nii = nb.load(functional_runs[0]) @@ -115,21 +135,22 @@ def _run_interface(self, runtime): nscans = timeseries.shape[1] - if 'hpf' in list(session_info[0].keys()): - hpf = session_info[0]['hpf'] + if "hpf" in list(session_info[0].keys()): + hpf = session_info[0]["hpf"] drift_model = self.inputs.drift_model else: hpf = 0 drift_model = "Blank" reg_names = [] - for reg in session_info[0]['regress']: - reg_names.append(reg['name']) + for reg in session_info[0]["regress"]: + reg_names.append(reg["name"]) reg_vals = np.zeros((nscans, len(reg_names))) for i in range(len(reg_names)): - reg_vals[:, i] = np.array( - session_info[0]['regress'][i]['val']).reshape(1, -1) + reg_vals[:, i] = np.array(session_info[0]["regress"][i]["val"]).reshape( + 1, -1 + ) frametimes = np.linspace(0, (nscans - 1) * self.inputs.TR, nscans) @@ -137,17 +158,16 @@ def _run_interface(self, runtime): onsets = [] duration = [] - for i, cond in enumerate(session_info[0]['cond']): - onsets += cond['onset'] - conditions += [cond['name']] * len(cond['onset']) - if len(cond['duration']) == 1: - duration += cond['duration'] * len(cond['onset']) + for i, cond in enumerate(session_info[0]["cond"]): + onsets += cond["onset"] + conditions += [cond["name"]] * len(cond["onset"]) + if len(cond["duration"]) == 1: + duration += cond["duration"] * len(cond["onset"]) else: - duration += cond['duration'] + duration += cond["duration"] if conditions: - paradigm = BlockParadigm( - con_id=conditions, onset=onsets, duration=duration) + paradigm = BlockParadigm(con_id=conditions, onset=onsets, duration=duration) else: paradigm = None design_matrix, self._reg_names = dm.dmtx_light( @@ -157,15 +177,17 @@ def _run_interface(self, runtime): hfcut=hpf, hrf_model=self.inputs.hrf_model, add_regs=reg_vals, - add_reg_names=reg_names) + add_reg_names=reg_names, + ) if self.inputs.normalize_design_matrix: for i in range(len(self._reg_names) - 1): - design_matrix[:, i] = (( - design_matrix[:, i] - design_matrix[:, i].mean()) / - design_matrix[:, i].std()) + design_matrix[:, i] = ( + design_matrix[:, i] - design_matrix[:, i].mean() + ) / design_matrix[:, i].std() if self.inputs.plot_design_matrix: import pylab + pylab.pcolor(design_matrix) pylab.savefig("design_matrix.pdf") pylab.close() @@ -176,10 +198,11 @@ def _run_interface(self, runtime): timeseries.T, design_matrix, method=self.inputs.method, - model=self.inputs.model) + model=self.inputs.model, + ) self._beta_file = os.path.abspath("beta.nii") - beta = np.zeros(mask.shape + (glm.beta.shape[0], )) + beta = np.zeros(mask.shape + (glm.beta.shape[0],)) beta[mask, :] = glm.beta.T nb.save(nb.Nifti1Image(beta, nii.affine), self._beta_file) @@ -190,11 +213,10 @@ def _run_interface(self, runtime): if self.inputs.save_residuals: explained = np.dot(design_matrix, glm.beta) - residuals = np.zeros(mask.shape + (nscans, )) + residuals = np.zeros(mask.shape + (nscans,)) residuals[mask, :] = timeseries - explained.T self._residuals_file = os.path.abspath("residuals.nii") - nb.save( - nb.Nifti1Image(residuals, nii.affine), self._residuals_file) + nb.save(nb.Nifti1Image(residuals, nii.affine), self._residuals_file) self._nvbeta = glm.nvbeta self._dof = glm.dof @@ -229,33 +251,53 @@ def _list_outputs(self): class EstimateContrastInputSpec(BaseInterfaceInputSpec): contrasts = traits.List( traits.Either( - traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), - traits.List(traits.Float)), - traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), - traits.List(traits.Float), traits.List(traits.Float)), - traits.Tuple(traits.Str, traits.Enum('F'), - traits.List( - traits.Either( - traits.Tuple(traits.Str, traits.Enum('T'), - traits.List(traits.Str), - traits.List(traits.Float)), - traits.Tuple(traits.Str, traits.Enum('T'), - traits.List(traits.Str), - traits.List(traits.Float), - traits.List(traits.Float)))))), + traits.Tuple( + traits.Str, + traits.Enum("T"), + traits.List(traits.Str), + traits.List(traits.Float), + ), + traits.Tuple( + traits.Str, + traits.Enum("T"), + traits.List(traits.Str), + traits.List(traits.Float), + traits.List(traits.Float), + ), + traits.Tuple( + traits.Str, + traits.Enum("F"), + traits.List( + traits.Either( + traits.Tuple( + traits.Str, + traits.Enum("T"), + traits.List(traits.Str), + traits.List(traits.Float), + ), + traits.Tuple( + traits.Str, + traits.Enum("T"), + traits.List(traits.Str), + traits.List(traits.Float), + traits.List(traits.Float), + ), + ) + ), + ), + ), desc="""List of contrasts with each contrast being a list of the form: [('name', 'stat', [condition list], [weight list], [session list])]. if session list is None or not provided, all sessions are used. For F contrasts, the condition list should contain previously defined T-contrasts.""", - mandatory=True) + mandatory=True, + ) beta = File( - exists=True, - desc="beta coefficients of the fitted model", - mandatory=True) + exists=True, desc="beta coefficients of the fitted model", mandatory=True + ) nvbeta = traits.Any(mandatory=True) - s2 = File( - exists=True, desc="squared variance of the residuals", mandatory=True) + s2 = File(exists=True, desc="squared variance of the residuals", mandatory=True) dof = traits.Any(desc="degrees of freedom", mandatory=True) constants = traits.Any(mandatory=True) axis = traits.Any(mandatory=True) @@ -270,9 +312,10 @@ class EstimateContrastOutputSpec(TraitedSpec): class EstimateContrast(NipyBaseInterface): - ''' + """ Estimate contrast of a fitted model. - ''' + """ + input_spec = EstimateContrastInputSpec output_spec = EstimateContrastOutputSpec diff --git a/nipype/interfaces/nipy/preprocess.py b/nipype/interfaces/nipy/preprocess.py index 0f6ee5031d..3da52fbd04 100644 --- a/nipype/interfaces/nipy/preprocess.py +++ b/nipype/interfaces/nipy/preprocess.py @@ -8,19 +8,30 @@ from ...utils.filemanip import split_filename, fname_presuffix from .base import NipyBaseInterface, have_nipy -from ..base import (TraitedSpec, traits, BaseInterfaceInputSpec, - isdefined, File, InputMultiPath, OutputMultiPath) +from ..base import ( + TraitedSpec, + traits, + BaseInterfaceInputSpec, + isdefined, + File, + InputMultiPath, + OutputMultiPath, +) class ComputeMaskInputSpec(BaseInterfaceInputSpec): mean_volume = File( exists=True, mandatory=True, - desc="mean EPI image, used to compute the threshold for the mask") + desc="mean EPI image, used to compute the threshold for the mask", + ) reference_volume = File( exists=True, - desc=("reference volume used to compute the mask. " - "If none is give, the mean volume is used.")) + desc=( + "reference volume used to compute the mask. " + "If none is give, the mean volume is used." + ), + ) m = traits.Float(desc="lower fraction of the histogram to be discarded") M = traits.Float(desc="upper fraction of the histogram to be discarded") cc = traits.Bool(desc="Keep only the largest connected component") @@ -36,14 +47,16 @@ class ComputeMask(NipyBaseInterface): def _run_interface(self, runtime): from nipy.labs.mask import compute_mask + args = {} for key in [ - k for k, _ in list(self.inputs.items()) - if k not in BaseInterfaceInputSpec().trait_names() + k + for k, _ in list(self.inputs.items()) + if k not in BaseInterfaceInputSpec().trait_names() ]: value = getattr(self.inputs, key) if isdefined(value): - if key in ['mean_volume', 'reference_volume']: + if key in ["mean_volume", "reference_volume"]: nii = nb.load(value, mmap=NUMPY_MMAP) value = nii.get_data() args[key] = value @@ -53,7 +66,8 @@ def _run_interface(self, runtime): self._brain_mask_path = os.path.abspath("%s_mask.%s" % (name, ext)) nb.save( nb.Nifti1Image(brain_mask.astype(np.uint8), nii.affine), - self._brain_mask_path) + self._brain_mask_path, + ) return runtime @@ -65,34 +79,42 @@ def _list_outputs(self): class SpaceTimeRealignerInputSpec(BaseInterfaceInputSpec): in_file = InputMultiPath( - File(exists=True), - mandatory=True, - min_ver='0.4.0.dev', - desc="File to realign") - tr = traits.Float(desc="TR in seconds", requires=['slice_times']) + File(exists=True), mandatory=True, min_ver="0.4.0.dev", desc="File to realign" + ) + tr = traits.Float(desc="TR in seconds", requires=["slice_times"]) slice_times = traits.Either( traits.List(traits.Float()), - traits.Enum('asc_alt_2', 'asc_alt_2_1', 'asc_alt_half', - 'asc_alt_siemens', 'ascending', 'desc_alt_2', - 'desc_alt_half', 'descending'), - desc=('Actual slice acquisition times.')) + traits.Enum( + "asc_alt_2", + "asc_alt_2_1", + "asc_alt_half", + "asc_alt_siemens", + "ascending", + "desc_alt_2", + "desc_alt_half", + "descending", + ), + desc=("Actual slice acquisition times."), + ) slice_info = traits.Either( traits.Int, traits.List(min_len=2, max_len=2), - desc=('Single integer or length 2 sequence ' - 'If int, the axis in `images` that is the ' - 'slice axis. In a 4D image, this will ' - 'often be axis = 2. If a 2 sequence, then' - ' elements are ``(slice_axis, ' - 'slice_direction)``, where ``slice_axis`` ' - 'is the slice axis in the image as above, ' - 'and ``slice_direction`` is 1 if the ' - 'slices were acquired slice 0 first, slice' - ' -1 last, or -1 if acquired slice -1 ' - 'first, slice 0 last. If `slice_info` is ' - 'an int, assume ' - '``slice_direction`` == 1.'), - requires=['slice_times'], + desc=( + "Single integer or length 2 sequence " + "If int, the axis in `images` that is the " + "slice axis. In a 4D image, this will " + "often be axis = 2. If a 2 sequence, then" + " elements are ``(slice_axis, " + "slice_direction)``, where ``slice_axis`` " + "is the slice axis in the image as above, " + "and ``slice_direction`` is 1 if the " + "slices were acquired slice 0 first, slice" + " -1 last, or -1 if acquired slice -1 " + "first, slice 0 last. If `slice_info` is " + "an int, assume " + "``slice_direction`` == 1." + ), + requires=["slice_times"], ) @@ -100,8 +122,8 @@ class SpaceTimeRealignerOutputSpec(TraitedSpec): out_file = OutputMultiPath(File(exists=True), desc="Realigned files") par_file = OutputMultiPath( File(exists=True), - desc=("Motion parameter files. Angles are not " - "euler angles")) + desc=("Motion parameter files. Angles are not " "euler angles"), + ) class SpaceTimeRealigner(NipyBaseInterface): @@ -141,18 +163,20 @@ class SpaceTimeRealigner(NipyBaseInterface): input_spec = SpaceTimeRealignerInputSpec output_spec = SpaceTimeRealignerOutputSpec - keywords = ['slice timing', 'motion correction'] + keywords = ["slice timing", "motion correction"] def _run_interface(self, runtime): from nipy import save_image, load_image + all_ims = [load_image(fname) for fname in self.inputs.in_file] if not isdefined(self.inputs.slice_times): - from nipy.algorithms.registration.groupwise_registration import \ - SpaceRealign + from nipy.algorithms.registration.groupwise_registration import SpaceRealign + R = SpaceRealign(all_ims) else: from nipy.algorithms.registration import SpaceTimeRealign + R = SpaceTimeRealign( all_ims, tr=self.inputs.tr, @@ -168,24 +192,25 @@ def _run_interface(self, runtime): for j, corr in enumerate(corr_run): self._out_file_path.append( - os.path.abspath('corr_%s.nii.gz' % - (split_filename(self.inputs.in_file[j])[1]))) + os.path.abspath( + "corr_%s.nii.gz" % (split_filename(self.inputs.in_file[j])[1]) + ) + ) save_image(corr, self._out_file_path[j]) self._par_file_path.append( - os.path.abspath('%s.par' % - (os.path.split(self.inputs.in_file[j])[1]))) - mfile = open(self._par_file_path[j], 'w') + os.path.abspath("%s.par" % (os.path.split(self.inputs.in_file[j])[1])) + ) + mfile = open(self._par_file_path[j], "w") motion = R._transforms[j] # nipy does not encode euler angles. return in original form of # translation followed by rotation vector see: # http://en.wikipedia.org/wiki/Rodrigues'_rotation_formula for i, mo in enumerate(motion): params = [ - '%.10f' % item - for item in np.hstack((mo.translation, mo.rotation)) + "%.10f" % item for item in np.hstack((mo.translation, mo.rotation)) ] - string = ' '.join(params) + '\n' + string = " ".join(params) + "\n" mfile.write(string) mfile.close() @@ -193,23 +218,23 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = self._out_file_path - outputs['par_file'] = self._par_file_path + outputs["out_file"] = self._out_file_path + outputs["par_file"] = self._par_file_path return outputs class TrimInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc="EPI image to trim") - begin_index = traits.Int(0, usedefault=True, desc='first volume') + begin_index = traits.Int(0, usedefault=True, desc="first volume") end_index = traits.Int( - 0, - usedefault=True, - desc='last volume indexed as in python (and 0 for last)') - out_file = File(desc='output filename') + 0, usedefault=True, desc="last volume indexed as in python (and 0 for last)" + ) + out_file = File(desc="output filename") suffix = traits.Str( - '_trim', + "_trim", usedefault=True, - desc='suffix for out_file to use if no out_file provided') + desc="suffix for out_file to use if no out_file provided", + ) class TrimOutputSpec(TraitedSpec): @@ -233,7 +258,7 @@ class Trim(NipyBaseInterface): output_spec = TrimOutputSpec def _run_interface(self, runtime): - out_file = self._list_outputs()['out_file'] + out_file = self._list_outputs()["out_file"] nii = nb.load(self.inputs.in_file) if self.inputs.end_index == 0: s = slice(self.inputs.begin_index, nii.shape[3]) @@ -245,11 +270,10 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = self.inputs.out_file - if not isdefined(outputs['out_file']): - outputs['out_file'] = fname_presuffix( - self.inputs.in_file, - newpath=os.getcwd(), - suffix=self.inputs.suffix) - outputs['out_file'] = os.path.abspath(outputs['out_file']) + outputs["out_file"] = self.inputs.out_file + if not isdefined(outputs["out_file"]): + outputs["out_file"] = fname_presuffix( + self.inputs.in_file, newpath=os.getcwd(), suffix=self.inputs.suffix + ) + outputs["out_file"] = os.path.abspath(outputs["out_file"]) return outputs diff --git a/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py index 943b3f43d4..aa34d55caf 100644 --- a/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py +++ b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py @@ -7,19 +7,18 @@ def test_ComputeMask_inputs(): M=dict(), cc=dict(), m=dict(), - mean_volume=dict( - extensions=None, - mandatory=True, - ), - reference_volume=dict(extensions=None, ), + mean_volume=dict(extensions=None, mandatory=True,), + reference_volume=dict(extensions=None,), ) inputs = ComputeMask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ComputeMask_outputs(): - output_map = dict(brain_mask=dict(extensions=None, ), ) + output_map = dict(brain_mask=dict(extensions=None,),) outputs = ComputeMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py index 3f9d725fac..c89423bc74 100644 --- a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py @@ -4,33 +4,25 @@ def test_EstimateContrast_inputs(): input_map = dict( - axis=dict(mandatory=True, ), - beta=dict( - extensions=None, - mandatory=True, - ), - constants=dict(mandatory=True, ), - contrasts=dict(mandatory=True, ), - dof=dict(mandatory=True, ), - mask=dict(extensions=None, ), - nvbeta=dict(mandatory=True, ), - reg_names=dict(mandatory=True, ), - s2=dict( - extensions=None, - mandatory=True, - ), + axis=dict(mandatory=True,), + beta=dict(extensions=None, mandatory=True,), + constants=dict(mandatory=True,), + contrasts=dict(mandatory=True,), + dof=dict(mandatory=True,), + mask=dict(extensions=None,), + nvbeta=dict(mandatory=True,), + reg_names=dict(mandatory=True,), + s2=dict(extensions=None, mandatory=True,), ) inputs = EstimateContrast.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EstimateContrast_outputs(): - output_map = dict( - p_maps=dict(), - stat_maps=dict(), - z_maps=dict(), - ) + output_map = dict(p_maps=dict(), stat_maps=dict(), z_maps=dict(),) outputs = EstimateContrast.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py index d453b98a25..7aa96870c7 100644 --- a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py +++ b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py @@ -4,33 +4,35 @@ def test_FitGLM_inputs(): input_map = dict( - TR=dict(mandatory=True, ), - drift_model=dict(usedefault=True, ), - hrf_model=dict(usedefault=True, ), - mask=dict(extensions=None, ), - method=dict(usedefault=True, ), - model=dict(usedefault=True, ), - normalize_design_matrix=dict(usedefault=True, ), - plot_design_matrix=dict(usedefault=True, ), - save_residuals=dict(usedefault=True, ), - session_info=dict(mandatory=True, ), + TR=dict(mandatory=True,), + drift_model=dict(usedefault=True,), + hrf_model=dict(usedefault=True,), + mask=dict(extensions=None,), + method=dict(usedefault=True,), + model=dict(usedefault=True,), + normalize_design_matrix=dict(usedefault=True,), + plot_design_matrix=dict(usedefault=True,), + save_residuals=dict(usedefault=True,), + session_info=dict(mandatory=True,), ) inputs = FitGLM.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FitGLM_outputs(): output_map = dict( - a=dict(extensions=None, ), + a=dict(extensions=None,), axis=dict(), - beta=dict(extensions=None, ), + beta=dict(extensions=None,), constants=dict(), dof=dict(), nvbeta=dict(), reg_names=dict(), - residuals=dict(extensions=None, ), - s2=dict(extensions=None, ), + residuals=dict(extensions=None,), + s2=dict(extensions=None,), ) outputs = FitGLM.output_spec() diff --git a/nipype/interfaces/nipy/tests/test_auto_Similarity.py b/nipype/interfaces/nipy/tests/test_auto_Similarity.py index e09754d382..ac0b3c853a 100644 --- a/nipype/interfaces/nipy/tests/test_auto_Similarity.py +++ b/nipype/interfaces/nipy/tests/test_auto_Similarity.py @@ -4,25 +4,21 @@ def test_Similarity_inputs(): input_map = dict( - mask1=dict(extensions=None, ), - mask2=dict(extensions=None, ), - metric=dict(usedefault=True, ), - volume1=dict( - extensions=None, - mandatory=True, - ), - volume2=dict( - extensions=None, - mandatory=True, - ), + mask1=dict(extensions=None,), + mask2=dict(extensions=None,), + metric=dict(usedefault=True,), + volume1=dict(extensions=None, mandatory=True,), + volume2=dict(extensions=None, mandatory=True,), ) inputs = Similarity.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Similarity_outputs(): - output_map = dict(similarity=dict(), ) + output_map = dict(similarity=dict(),) outputs = Similarity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py b/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py index c511737c70..2025d62498 100644 --- a/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py +++ b/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py @@ -4,24 +4,20 @@ def test_SpaceTimeRealigner_inputs(): input_map = dict( - in_file=dict( - mandatory=True, - min_ver='0.4.0.dev', - ), - slice_info=dict(requires=['slice_times'], ), + in_file=dict(mandatory=True, min_ver="0.4.0.dev",), + slice_info=dict(requires=["slice_times"],), slice_times=dict(), - tr=dict(requires=['slice_times'], ), + tr=dict(requires=["slice_times"],), ) inputs = SpaceTimeRealigner.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SpaceTimeRealigner_outputs(): - output_map = dict( - out_file=dict(), - par_file=dict(), - ) + output_map = dict(out_file=dict(), par_file=dict(),) outputs = SpaceTimeRealigner.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_Trim.py b/nipype/interfaces/nipy/tests/test_auto_Trim.py index ff7e66ab97..4b33c8b4b2 100644 --- a/nipype/interfaces/nipy/tests/test_auto_Trim.py +++ b/nipype/interfaces/nipy/tests/test_auto_Trim.py @@ -4,22 +4,21 @@ def test_Trim_inputs(): input_map = dict( - begin_index=dict(usedefault=True, ), - end_index=dict(usedefault=True, ), - in_file=dict( - extensions=None, - mandatory=True, - ), - out_file=dict(extensions=None, ), - suffix=dict(usedefault=True, ), + begin_index=dict(usedefault=True,), + end_index=dict(usedefault=True,), + in_file=dict(extensions=None, mandatory=True,), + out_file=dict(extensions=None,), + suffix=dict(usedefault=True,), ) inputs = Trim.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Trim_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Trim.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/utils.py b/nipype/interfaces/nipy/utils.py index 22856c01d6..23be89b4f5 100644 --- a/nipype/interfaces/nipy/utils.py +++ b/nipype/interfaces/nipy/utils.py @@ -4,8 +4,7 @@ import nibabel as nb from .base import NipyBaseInterface, have_nipy -from ..base import (TraitedSpec, traits, BaseInterfaceInputSpec, - File, isdefined) +from ..base import TraitedSpec, traits, BaseInterfaceInputSpec, File, isdefined class SimilarityInputSpec(BaseInterfaceInputSpec): @@ -14,7 +13,7 @@ class SimilarityInputSpec(BaseInterfaceInputSpec): mask1 = File(exists=True, desc="3D volume") mask2 = File(exists=True, desc="3D volume") metric = traits.Either( - traits.Enum('cc', 'cr', 'crl1', 'mi', 'nmi', 'slr'), + traits.Enum("cc", "cr", "crl1", "mi", "nmi", "slr"), traits.Callable(), desc="""str or callable Cost-function for assessing image similarity. If a string, @@ -24,7 +23,8 @@ class SimilarityInputSpec(BaseInterfaceInputSpec): supervised log-likelihood ratio. If a callable, it should take a two-dimensional array representing the image joint histogram as an input and return a float.""", - usedefault=True) + usedefault=True, + ) class SimilarityOutputSpec(TraitedSpec): @@ -55,13 +55,19 @@ class Similarity(NipyBaseInterface): output_spec = SimilarityOutputSpec def __init__(self, **inputs): - warnings.warn(("This interface is deprecated since 0.10.0." - " Please use nipype.algorithms.metrics.Similarity"), - DeprecationWarning) + warnings.warn( + ( + "This interface is deprecated since 0.10.0." + " Please use nipype.algorithms.metrics.Similarity" + ), + DeprecationWarning, + ) super(Similarity, self).__init__(**inputs) def _run_interface(self, runtime): - from nipy.algorithms.registration.histogram_registration import HistogramRegistration + from nipy.algorithms.registration.histogram_registration import ( + HistogramRegistration, + ) from nipy.algorithms.registration.affine import Affine vol1_nii = nb.load(self.inputs.volume1) @@ -82,12 +88,13 @@ def _run_interface(self, runtime): to_img=vol2_nii, similarity=self.inputs.metric, from_mask=mask1, - to_mask=mask2) + to_mask=mask2, + ) self._similarity = histreg.eval(Affine()) return runtime def _list_outputs(self): outputs = self._outputs().get() - outputs['similarity'] = self._similarity + outputs["similarity"] = self._similarity return outputs diff --git a/nipype/interfaces/nitime/__init__.py b/nipype/interfaces/nitime/__init__.py index 656d601b3e..f237859eb6 100644 --- a/nipype/interfaces/nitime/__init__.py +++ b/nipype/interfaces/nitime/__init__.py @@ -2,5 +2,8 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from .analysis import (CoherenceAnalyzerInputSpec, CoherenceAnalyzerOutputSpec, - CoherenceAnalyzer) +from .analysis import ( + CoherenceAnalyzerInputSpec, + CoherenceAnalyzerOutputSpec, + CoherenceAnalyzer, +) diff --git a/nipype/interfaces/nitime/analysis.py b/nipype/interfaces/nitime/analysis.py index 7f393efadf..93787c1964 100644 --- a/nipype/interfaces/nitime/analysis.py +++ b/nipype/interfaces/nitime/analysis.py @@ -16,12 +16,18 @@ from ...utils.misc import package_check from ...utils.filemanip import fname_presuffix from .base import NitimeBaseInterface -from ..base import (TraitedSpec, File, Undefined, traits, - isdefined, BaseInterfaceInputSpec) +from ..base import ( + TraitedSpec, + File, + Undefined, + traits, + isdefined, + BaseInterfaceInputSpec, +) have_nitime = True try: - package_check('nitime') + package_check("nitime") except ImportError: have_nitime = False @@ -30,83 +36,96 @@ class CoherenceAnalyzerInputSpec(BaseInterfaceInputSpec): # Input either csv file, or time-series object and use _xor_inputs to # discriminate - _xor_inputs = ('in_file', 'in_TS') + _xor_inputs = ("in_file", "in_TS") in_file = File( - desc=('csv file with ROIs on the columns and ' - 'time-points on the rows. ROI names at the top row'), + desc=( + "csv file with ROIs on the columns and " + "time-points on the rows. ROI names at the top row" + ), exists=True, - requires=('TR', )) + requires=("TR",), + ) # If you gave just a file name, you need to specify the sampling_rate: TR = traits.Float( - desc=('The TR used to collect the data' - 'in your csv file ')) + desc=("The TR used to collect the data" "in your csv file ") + ) - in_TS = traits.Any(desc='a nitime TimeSeries object') + in_TS = traits.Any(desc="a nitime TimeSeries object") NFFT = traits.Range( low=32, value=64, usedefault=True, - desc=('This is the size of the window used for ' - 'the spectral estimation. Use values between ' - '32 and the number of samples in your time-series.' - '(Defaults to 64.)')) + desc=( + "This is the size of the window used for " + "the spectral estimation. Use values between " + "32 and the number of samples in your time-series." + "(Defaults to 64.)" + ), + ) n_overlap = traits.Range( low=0, value=0, usedefault=True, - desc=('The number of samples which overlap' - 'between subsequent windows.(Defaults to 0)')) + desc=( + "The number of samples which overlap" + "between subsequent windows.(Defaults to 0)" + ), + ) frequency_range = traits.List( value=[0.02, 0.15], usedefault=True, minlen=2, maxlen=2, - desc=('The range of frequencies over' - 'which the analysis will average.' - '[low,high] (Default [0.02,0.15]')) + desc=( + "The range of frequencies over" + "which the analysis will average." + "[low,high] (Default [0.02,0.15]" + ), + ) output_csv_file = File( - desc= - 'File to write outputs (coherence,time-delay) with file-names: file_name_ {coherence,timedelay}' + desc="File to write outputs (coherence,time-delay) with file-names: file_name_ {coherence,timedelay}" ) output_figure_file = File( - desc= - 'File to write output figures (coherence,time-delay) with file-names: file_name_{coherence,timedelay}. Possible formats: .png,.svg,.pdf,.jpg,...' + desc="File to write output figures (coherence,time-delay) with file-names: file_name_{coherence,timedelay}. Possible formats: .png,.svg,.pdf,.jpg,..." ) figure_type = traits.Enum( - 'matrix', - 'network', + "matrix", + "network", usedefault=True, - desc=("The type of plot to generate, where " - "'matrix' denotes a matrix image and" - "'network' denotes a graph representation." - " Default: 'matrix'")) + desc=( + "The type of plot to generate, where " + "'matrix' denotes a matrix image and" + "'network' denotes a graph representation." + " Default: 'matrix'" + ), + ) class CoherenceAnalyzerOutputSpec(TraitedSpec): coherence_array = traits.Array( - desc=('The pairwise coherence values' - 'between the ROIs')) + desc=("The pairwise coherence values" "between the ROIs") + ) timedelay_array = traits.Array( - desc=('The pairwise time delays between the' - 'ROIs (in seconds)')) + desc=("The pairwise time delays between the" "ROIs (in seconds)") + ) coherence_csv = File( - desc=('A csv file containing the pairwise ' - 'coherence values')) + desc=("A csv file containing the pairwise " "coherence values") + ) timedelay_csv = File( - desc=('A csv file containing the pairwise ' - 'time delay values')) + desc=("A csv file containing the pairwise " "time delay values") + ) - coherence_fig = File(desc=('Figure representing coherence values')) - timedelay_fig = File(desc=('Figure representing coherence values')) + coherence_fig = File(desc=("Figure representing coherence values")) + timedelay_fig = File(desc=("Figure representing coherence values")) class CoherenceAnalyzer(NitimeBaseInterface): @@ -132,20 +151,21 @@ def _read_csv(self): "First row of in_file should contain ROI names as strings of characters" ) - roi_names = open(self.inputs.in_file).readline().replace( - '\"', '').strip('\n').split(',') + roi_names = ( + open(self.inputs.in_file).readline().replace('"', "").strip("\n").split(",") + ) # Transpose, so that the time is the last dimension: - data = np.loadtxt(self.inputs.in_file, skiprows=1, delimiter=',').T + data = np.loadtxt(self.inputs.in_file, skiprows=1, delimiter=",").T return data, roi_names def _csv2ts(self): """ Read data from the in_file and generate a nitime TimeSeries object""" from nitime.timeseries import TimeSeries + data, roi_names = self._read_csv() - TS = TimeSeries( - data=data, sampling_interval=self.inputs.TR, time_unit='s') + TS = TimeSeries(data=data, sampling_interval=self.inputs.TR, time_unit="s") TS.metadata = dict(ROIs=roi_names) @@ -154,6 +174,7 @@ def _csv2ts(self): # Rewrite _run_interface, but not run def _run_interface(self, runtime): import nitime.analysis as nta + lb, ub = self.inputs.frequency_range if self.inputs.in_TS is Undefined: @@ -165,21 +186,24 @@ def _run_interface(self, runtime): TS = self.inputs.in_TS # deal with creating or storing ROI names: - if 'ROIs' not in TS.metadata: - self.ROIs = ['roi_%d' % x for x, _ in enumerate(TS.data)] + if "ROIs" not in TS.metadata: + self.ROIs = ["roi_%d" % x for x, _ in enumerate(TS.data)] else: - self.ROIs = TS.metadata['ROIs'] + self.ROIs = TS.metadata["ROIs"] A = nta.CoherenceAnalyzer( TS, method=dict( - this_method='welch', + this_method="welch", NFFT=self.inputs.NFFT, - n_overlap=self.inputs.n_overlap)) + n_overlap=self.inputs.n_overlap, + ), + ) freq_idx = np.where( - (A.frequencies > self.inputs.frequency_range[0]) * - (A.frequencies < self.inputs.frequency_range[1]))[0] + (A.frequencies > self.inputs.frequency_range[0]) + * (A.frequencies < self.inputs.frequency_range[1]) + )[0] # Get the coherence matrix from the analyzer, averaging on the last # (frequency) dimension: (roi X roi array) @@ -198,29 +222,31 @@ def _list_outputs(self): # file name + path) # Always defined (the arrays): - outputs['coherence_array'] = self.coherence - outputs['timedelay_array'] = self.delay + outputs["coherence_array"] = self.coherence + outputs["timedelay_array"] = self.delay # Conditional - if isdefined(self.inputs.output_csv_file) and hasattr( - self, 'coherence'): + if isdefined(self.inputs.output_csv_file) and hasattr(self, "coherence"): # we need to make a function that we call here that writes the # coherence values to this file "coherence_csv" and makes the # time_delay csv file?? self._make_output_files() - outputs['coherence_csv'] = fname_presuffix( - self.inputs.output_csv_file, suffix='_coherence') + outputs["coherence_csv"] = fname_presuffix( + self.inputs.output_csv_file, suffix="_coherence" + ) - outputs['timedelay_csv'] = fname_presuffix( - self.inputs.output_csv_file, suffix='_delay') + outputs["timedelay_csv"] = fname_presuffix( + self.inputs.output_csv_file, suffix="_delay" + ) - if isdefined(self.inputs.output_figure_file) and hasattr( - self, 'coherence'): + if isdefined(self.inputs.output_figure_file) and hasattr(self, "coherence"): self._make_output_figures() - outputs['coherence_fig'] = fname_presuffix( - self.inputs.output_figure_file, suffix='_coherence') - outputs['timedelay_fig'] = fname_presuffix( - self.inputs.output_figure_file, suffix='_delay') + outputs["coherence_fig"] = fname_presuffix( + self.inputs.output_figure_file, suffix="_coherence" + ) + outputs["timedelay_fig"] = fname_presuffix( + self.inputs.output_figure_file, suffix="_delay" + ) return outputs @@ -228,18 +254,19 @@ def _make_output_files(self): """ Generate the output csv files. """ - for this in zip([self.coherence, self.delay], ['coherence', 'delay']): + for this in zip([self.coherence, self.delay], ["coherence", "delay"]): tmp_f = tempfile.mkstemp()[1] - np.savetxt(tmp_f, this[0], delimiter=',') + np.savetxt(tmp_f, this[0], delimiter=",") fid = open( - fname_presuffix( - self.inputs.output_csv_file, suffix='_%s' % this[1]), 'w+') + fname_presuffix(self.inputs.output_csv_file, suffix="_%s" % this[1]), + "w+", + ) # this writes ROIs as header line - fid.write(',' + ','.join(self.ROIs) + '\n') + fid.write("," + ",".join(self.ROIs) + "\n") # this writes ROI and data to a line for r, line in zip(self.ROIs, open(tmp_f)): - fid.write('%s,%s' % (r, line)) + fid.write("%s,%s" % (r, line)) fid.close() def _make_output_figures(self): @@ -249,31 +276,32 @@ def _make_output_figures(self): """ import nitime.viz as viz - if self.inputs.figure_type == 'matrix': + + if self.inputs.figure_type == "matrix": fig_coh = viz.drawmatrix_channels( - self.coherence, channel_names=self.ROIs, color_anchor=0) + self.coherence, channel_names=self.ROIs, color_anchor=0 + ) fig_coh.savefig( - fname_presuffix( - self.inputs.output_figure_file, suffix='_coherence')) + fname_presuffix(self.inputs.output_figure_file, suffix="_coherence") + ) fig_dt = viz.drawmatrix_channels( - self.delay, channel_names=self.ROIs, color_anchor=0) + self.delay, channel_names=self.ROIs, color_anchor=0 + ) fig_dt.savefig( - fname_presuffix( - self.inputs.output_figure_file, suffix='_delay')) + fname_presuffix(self.inputs.output_figure_file, suffix="_delay") + ) else: - fig_coh = viz.drawgraph_channels( - self.coherence, channel_names=self.ROIs) + fig_coh = viz.drawgraph_channels(self.coherence, channel_names=self.ROIs) fig_coh.savefig( - fname_presuffix( - self.inputs.output_figure_file, suffix='_coherence')) + fname_presuffix(self.inputs.output_figure_file, suffix="_coherence") + ) - fig_dt = viz.drawgraph_channels( - self.delay, channel_names=self.ROIs) + fig_dt = viz.drawgraph_channels(self.delay, channel_names=self.ROIs) fig_dt.savefig( - fname_presuffix( - self.inputs.output_figure_file, suffix='_delay')) + fname_presuffix(self.inputs.output_figure_file, suffix="_delay") + ) diff --git a/nipype/interfaces/nitime/base.py b/nipype/interfaces/nitime/base.py index d9139f2c71..fb31cafc75 100644 --- a/nipype/interfaces/nitime/base.py +++ b/nipype/interfaces/nitime/base.py @@ -7,4 +7,4 @@ class NitimeBaseInterface(LibraryBaseInterface): - _pkg = 'nitime' + _pkg = "nitime" diff --git a/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py index 0af36046ac..d188c27800 100644 --- a/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py +++ b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py @@ -4,32 +4,31 @@ def test_CoherenceAnalyzer_inputs(): input_map = dict( - NFFT=dict(usedefault=True, ), + NFFT=dict(usedefault=True,), TR=dict(), - figure_type=dict(usedefault=True, ), - frequency_range=dict(usedefault=True, ), + figure_type=dict(usedefault=True,), + frequency_range=dict(usedefault=True,), in_TS=dict(), - in_file=dict( - extensions=None, - requires=('TR', ), - ), - n_overlap=dict(usedefault=True, ), - output_csv_file=dict(extensions=None, ), - output_figure_file=dict(extensions=None, ), + in_file=dict(extensions=None, requires=("TR",),), + n_overlap=dict(usedefault=True,), + output_csv_file=dict(extensions=None,), + output_figure_file=dict(extensions=None,), ) inputs = CoherenceAnalyzer.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CoherenceAnalyzer_outputs(): output_map = dict( coherence_array=dict(), - coherence_csv=dict(extensions=None, ), - coherence_fig=dict(extensions=None, ), + coherence_csv=dict(extensions=None,), + coherence_fig=dict(extensions=None,), timedelay_array=dict(), - timedelay_csv=dict(extensions=None, ), - timedelay_fig=dict(extensions=None, ), + timedelay_csv=dict(extensions=None,), + timedelay_fig=dict(extensions=None,), ) outputs = CoherenceAnalyzer.output_spec() diff --git a/nipype/interfaces/nitime/tests/test_nitime.py b/nipype/interfaces/nitime/tests/test_nitime.py index daea1a177b..507e1856ae 100644 --- a/nipype/interfaces/nitime/tests/test_nitime.py +++ b/nipype/interfaces/nitime/tests/test_nitime.py @@ -11,7 +11,7 @@ import nipype.interfaces.nitime as nitime no_nitime = not nitime.analysis.have_nitime -display_available = 'DISPLAY' in os.environ and os.environ['DISPLAY'] +display_available = "DISPLAY" in os.environ and os.environ["DISPLAY"] @pytest.mark.skipif(no_nitime, reason="nitime is not installed") @@ -20,14 +20,14 @@ def test_read_csv(): time-series object """ CA = nitime.CoherenceAnalyzer() CA.inputs.TR = 1.89 # bogus value just to pass traits test - CA.inputs.in_file = example_data('fmri_timeseries_nolabels.csv') + CA.inputs.in_file = example_data("fmri_timeseries_nolabels.csv") with pytest.raises(ValueError): CA._read_csv() - CA.inputs.in_file = example_data('fmri_timeseries.csv') + CA.inputs.in_file = example_data("fmri_timeseries.csv") data, roi_names = CA._read_csv() assert data[0][0] == 10125.9 - assert roi_names[0] == 'WM' + assert roi_names[0] == "WM" @pytest.mark.skipif(no_nitime, reason="nitime is not installed") @@ -40,11 +40,11 @@ def test_coherence_analysis(tmpdir): # This is the nipype interface analysis: CA = nitime.CoherenceAnalyzer() CA.inputs.TR = 1.89 - CA.inputs.in_file = example_data('fmri_timeseries.csv') + CA.inputs.in_file = example_data("fmri_timeseries.csv") if display_available: - tmp_png = tempfile.mkstemp(suffix='.png')[1] + tmp_png = tempfile.mkstemp(suffix=".png")[1] CA.inputs.output_figure_file = tmp_png - tmp_csv = tempfile.mkstemp(suffix='.csv')[1] + tmp_csv = tempfile.mkstemp(suffix=".csv")[1] CA.inputs.output_csv_file = tmp_csv o = CA.run() @@ -52,7 +52,7 @@ def test_coherence_analysis(tmpdir): # This is the nitime analysis: TR = 1.89 - data_rec = np.recfromcsv(example_data('fmri_timeseries.csv')) + data_rec = np.recfromcsv(example_data("fmri_timeseries.csv")) roi_names = np.array(data_rec.dtype.names) n_samples = data_rec.shape[0] data = np.zeros((len(roi_names), n_samples)) @@ -64,16 +64,18 @@ def test_coherence_analysis(tmpdir): assert (CA._csv2ts().data == T.data).all() - T.metadata['roi'] = roi_names + T.metadata["roi"] = roi_names C = nta.CoherenceAnalyzer( T, method=dict( - this_method='welch', - NFFT=CA.inputs.NFFT, - n_overlap=CA.inputs.n_overlap)) - - freq_idx = np.where((C.frequencies > CA.inputs.frequency_range[0]) * - (C.frequencies < CA.inputs.frequency_range[1]))[0] + this_method="welch", NFFT=CA.inputs.NFFT, n_overlap=CA.inputs.n_overlap + ), + ) + + freq_idx = np.where( + (C.frequencies > CA.inputs.frequency_range[0]) + * (C.frequencies < CA.inputs.frequency_range[1]) + )[0] # Extract the coherence and average across these frequency bands: # Averaging is done on the last dimension diff --git a/nipype/interfaces/petpvc.py b/nipype/interfaces/petpvc.py index ffa892e7e0..c59b7f2777 100644 --- a/nipype/interfaces/petpvc.py +++ b/nipype/interfaces/petpvc.py @@ -4,57 +4,88 @@ import os -from .base import TraitedSpec, CommandLineInputSpec, CommandLine, File, isdefined, traits +from .base import ( + TraitedSpec, + CommandLineInputSpec, + CommandLine, + File, + isdefined, + traits, +) from ..utils.filemanip import fname_presuffix from ..external.due import BibTeX pvc_methods = [ - 'GTM', 'IY', 'IY+RL', 'IY+VC', 'LABBE', 'LABBE+MTC', 'LABBE+MTC+RL', - 'LABBE+MTC+VC', 'LABBE+RBV', 'LABBE+RBV+RL', 'LABBE+RBV+VC', 'MG', 'MG+RL', - 'MG+VC', 'MTC', 'MTC+RL', 'MTC+VC', 'RBV', 'RBV+RL', 'RBV+VC', 'RL', 'VC' + "GTM", + "IY", + "IY+RL", + "IY+VC", + "LABBE", + "LABBE+MTC", + "LABBE+MTC+RL", + "LABBE+MTC+VC", + "LABBE+RBV", + "LABBE+RBV+RL", + "LABBE+RBV+VC", + "MG", + "MG+RL", + "MG+VC", + "MTC", + "MTC+RL", + "MTC+VC", + "RBV", + "RBV+RL", + "RBV+VC", + "RL", + "VC", ] class PETPVCInputSpec(CommandLineInputSpec): - in_file = File( - desc="PET image file", exists=True, mandatory=True, argstr="-i %s") - out_file = File( - desc="Output file", genfile=True, hash_files=False, argstr="-o %s") + in_file = File(desc="PET image file", exists=True, mandatory=True, argstr="-i %s") + out_file = File(desc="Output file", genfile=True, hash_files=False, argstr="-o %s") mask_file = File( - desc="Mask image file", exists=True, mandatory=True, argstr="-m %s") + desc="Mask image file", exists=True, mandatory=True, argstr="-m %s" + ) pvc = traits.Enum( - pvc_methods, desc="Desired PVC method", mandatory=True, argstr="-p %s") + pvc_methods, desc="Desired PVC method", mandatory=True, argstr="-p %s" + ) fwhm_x = traits.Float( desc="The full-width at half maximum in mm along x-axis", mandatory=True, - argstr="-x %.4f") + argstr="-x %.4f", + ) fwhm_y = traits.Float( desc="The full-width at half maximum in mm along y-axis", mandatory=True, - argstr="-y %.4f") + argstr="-y %.4f", + ) fwhm_z = traits.Float( desc="The full-width at half maximum in mm along z-axis", mandatory=True, - argstr="-z %.4f") + argstr="-z %.4f", + ) debug = traits.Bool( desc="Prints debug information", usedefault=True, default_value=False, - argstr="-d") + argstr="-d", + ) n_iter = traits.Int( - desc="Number of iterations", default_value=10, usedefault=True, - argstr="-n %d") + desc="Number of iterations", default_value=10, usedefault=True, argstr="-n %d" + ) n_deconv = traits.Int( desc="Number of deconvolution iterations", default_value=10, usedefault=True, - argstr="-k %d") + argstr="-k %d", + ) alpha = traits.Float( - desc="Alpha value", default_value=1.5, usedefault=True, - argstr="-a %.4f") + desc="Alpha value", default_value=1.5, usedefault=True, argstr="-a %.4f" + ) stop_crit = traits.Float( - desc="Stopping criterion", default_value=0.01, usedefault=True, - argstr="-s %.4f") + desc="Stopping criterion", default_value=0.01, usedefault=True, argstr="-s %.4f" + ) class PETPVCOutputSpec(TraitedSpec): @@ -145,48 +176,48 @@ class PETPVC(CommandLine): >>> pvc.inputs.fwhm_z = 2.0 >>> outs = pvc.run() #doctest: +SKIP """ + input_spec = PETPVCInputSpec output_spec = PETPVCOutputSpec - _cmd = 'petpvc' - - references_ = [{ - 'entry': - BibTeX( - "@article{0031-9155-61-22-7975," - "author={Benjamin A Thomas and Vesna Cuplov and Alexandre Bousse and " - "Adriana Mendes and Kris Thielemans and Brian F Hutton and Kjell Erlandsson}," - "title={PETPVC: a toolbox for performing partial volume correction " - "techniques in positron emission tomography}," - "journal={Physics in Medicine and Biology}," - "volume={61}," - "number={22}," - "pages={7975}," - "url={http://stacks.iop.org/0031-9155/61/i=22/a=7975}," - "doi={https://doi.org/10.1088/0031-9155/61/22/7975}," - "year={2016}," - "}"), - 'description': - 'PETPVC software implementation publication', - 'tags': ['implementation'], - }] + _cmd = "petpvc" + + references_ = [ + { + "entry": BibTeX( + "@article{0031-9155-61-22-7975," + "author={Benjamin A Thomas and Vesna Cuplov and Alexandre Bousse and " + "Adriana Mendes and Kris Thielemans and Brian F Hutton and Kjell Erlandsson}," + "title={PETPVC: a toolbox for performing partial volume correction " + "techniques in positron emission tomography}," + "journal={Physics in Medicine and Biology}," + "volume={61}," + "number={22}," + "pages={7975}," + "url={http://stacks.iop.org/0031-9155/61/i=22/a=7975}," + "doi={https://doi.org/10.1088/0031-9155/61/22/7975}," + "year={2016}," + "}" + ), + "description": "PETPVC software implementation publication", + "tags": ["implementation"], + } + ] def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = self.inputs.out_file - if not isdefined(outputs['out_file']): + outputs["out_file"] = self.inputs.out_file + if not isdefined(outputs["out_file"]): method_name = self.inputs.pvc.lower() - outputs['out_file'] = self._gen_fname( - self.inputs.in_file, suffix='_{}_pvc'.format(method_name)) + outputs["out_file"] = self._gen_fname( + self.inputs.in_file, suffix="_{}_pvc".format(method_name) + ) - outputs['out_file'] = os.path.abspath(outputs['out_file']) + outputs["out_file"] = os.path.abspath(outputs["out_file"]) return outputs - def _gen_fname(self, - basename, - cwd=None, - suffix=None, - change_ext=True, - ext='.nii.gz'): + def _gen_fname( + self, basename, cwd=None, suffix=None, change_ext=True, ext=".nii.gz" + ): """Generate a filename based on the given parameters. The filename will take the form: cwd/basename. @@ -211,24 +242,23 @@ def _gen_fname(self, New filename based on given parameters. """ - if basename == '': - msg = 'Unable to generate filename for command %s. ' % self.cmd - msg += 'basename is not set!' + if basename == "": + msg = "Unable to generate filename for command %s. " % self.cmd + msg += "basename is not set!" raise ValueError(msg) if cwd is None: cwd = os.getcwd() if change_ext: if suffix: - suffix = ''.join((suffix, ext)) + suffix = "".join((suffix, ext)) else: suffix = ext if suffix is None: - suffix = '' - fname = fname_presuffix( - basename, suffix=suffix, use_ext=False, newpath=cwd) + suffix = "" + fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd) return fname def _gen_filename(self, name): - if name == 'out_file': - return self._list_outputs()['out_file'] + if name == "out_file": + return self._list_outputs()["out_file"] return None diff --git a/nipype/interfaces/quickshear.py b/nipype/interfaces/quickshear.py index 7508c0a356..7725abfeb5 100644 --- a/nipype/interfaces/quickshear.py +++ b/nipype/interfaces/quickshear.py @@ -10,27 +10,26 @@ class QuickshearInputSpec(CommandLineInputSpec): in_file = File( exists=True, position=1, - argstr='%s', + argstr="%s", mandatory=True, - desc="neuroimage to deface") + desc="neuroimage to deface", + ) mask_file = File( - exists=True, - position=2, - argstr='%s', - desc="brain mask", - mandatory=True) + exists=True, position=2, argstr="%s", desc="brain mask", mandatory=True + ) out_file = File( name_template="%s_defaced", - name_source='in_file', + name_source="in_file", position=3, - argstr='%s', + argstr="%s", desc="defaced output image", - keep_extension=True) + keep_extension=True, + ) buff = traits.Int( position=4, - argstr='%d', - desc='buffer size (in voxels) between shearing ' - 'plane and the brain') + argstr="%d", + desc="buffer size (in voxels) between shearing " "plane and the brain", + ) class QuickshearOutputSpec(TraitedSpec): @@ -72,19 +71,23 @@ class Quickshear(CommandLine): >>> inputnode.inputs.in_file = 'T1.nii' >>> res = deface_wf.run() # doctest: +SKIP """ - _cmd = 'quickshear' + + _cmd = "quickshear" input_spec = QuickshearInputSpec output_spec = QuickshearOutputSpec - references_ = [{ - 'entry': - BibTeX('@inproceedings{Schimke2011,' - 'address = {San Francisco},' - 'author = {Schimke, Nakeisha and Hale, John},' - 'booktitle = {Proceedings of the 2nd USENIX Conference on ' - 'Health Security and Privacy},' - 'title = {{Quickshear Defacing for Neuroimages}},' - 'year = {2011},' - 'month = sep}'), - 'tags': ['implementation'], - }] + references_ = [ + { + "entry": BibTeX( + "@inproceedings{Schimke2011," + "address = {San Francisco}," + "author = {Schimke, Nakeisha and Hale, John}," + "booktitle = {Proceedings of the 2nd USENIX Conference on " + "Health Security and Privacy}," + "title = {{Quickshear Defacing for Neuroimages}}," + "year = {2011}," + "month = sep}" + ), + "tags": ["implementation"], + } + ] diff --git a/nipype/interfaces/semtools/brains/__init__.py b/nipype/interfaces/semtools/brains/__init__.py index a8bd05a1be..dd369fb168 100644 --- a/nipype/interfaces/semtools/brains/__init__.py +++ b/nipype/interfaces/semtools/brains/__init__.py @@ -1,5 +1,8 @@ # -*- coding: utf-8 -*- from .segmentation import SimilarityIndex, BRAINSTalairach, BRAINSTalairachMask -from .utilities import (HistogramMatchingFilter, GenerateEdgeMapImage, - GeneratePurePlugMask) +from .utilities import ( + HistogramMatchingFilter, + GenerateEdgeMapImage, + GeneratePurePlugMask, +) from .classify import BRAINSPosteriorToContinuousClass diff --git a/nipype/interfaces/semtools/brains/classify.py b/nipype/interfaces/semtools/brains/classify.py index 89bb74f039..f59b53183e 100644 --- a/nipype/interfaces/semtools/brains/classify.py +++ b/nipype/interfaces/semtools/brains/classify.py @@ -5,49 +5,63 @@ import os -from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ...base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class BRAINSPosteriorToContinuousClassInputSpec(CommandLineInputSpec): inputWhiteVolume = File( desc="White Matter Posterior Volume", exists=True, - argstr="--inputWhiteVolume %s") + argstr="--inputWhiteVolume %s", + ) inputBasalGmVolume = File( desc="Basal Grey Matter Posterior Volume", exists=True, - argstr="--inputBasalGmVolume %s") + argstr="--inputBasalGmVolume %s", + ) inputSurfaceGmVolume = File( desc="Surface Grey Matter Posterior Volume", exists=True, - argstr="--inputSurfaceGmVolume %s") + argstr="--inputSurfaceGmVolume %s", + ) inputCsfVolume = File( - desc="CSF Posterior Volume", exists=True, argstr="--inputCsfVolume %s") + desc="CSF Posterior Volume", exists=True, argstr="--inputCsfVolume %s" + ) inputVbVolume = File( - desc="Venous Blood Posterior Volume", - exists=True, - argstr="--inputVbVolume %s") + desc="Venous Blood Posterior Volume", exists=True, argstr="--inputVbVolume %s" + ) inputCrblGmVolume = File( desc="Cerebellum Grey Matter Posterior Volume", exists=True, - argstr="--inputCrblGmVolume %s") + argstr="--inputCrblGmVolume %s", + ) inputCrblWmVolume = File( desc="Cerebellum White Matter Posterior Volume", exists=True, - argstr="--inputCrblWmVolume %s") + argstr="--inputCrblWmVolume %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Output Continuous Tissue Classified Image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) class BRAINSPosteriorToContinuousClassOutputSpec(TraitedSpec): - outputVolume = File( - desc="Output Continuous Tissue Classified Image", exists=True) + outputVolume = File(desc="Output Continuous Tissue Classified Image", exists=True) class BRAINSPosteriorToContinuousClass(SEMLikeCommandLine): @@ -72,5 +86,5 @@ class BRAINSPosteriorToContinuousClass(SEMLikeCommandLine): input_spec = BRAINSPosteriorToContinuousClassInputSpec output_spec = BRAINSPosteriorToContinuousClassOutputSpec _cmd = " BRAINSPosteriorToContinuousClass " - _outputs_filenames = {'outputVolume': 'outputVolume'} + _outputs_filenames = {"outputVolume": "outputVolume"} _redirect_x = False diff --git a/nipype/interfaces/semtools/brains/segmentation.py b/nipype/interfaces/semtools/brains/segmentation.py index fae5e4f1a2..5b0a901277 100644 --- a/nipype/interfaces/semtools/brains/segmentation.py +++ b/nipype/interfaces/semtools/brains/segmentation.py @@ -5,28 +5,38 @@ import os -from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ...base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class SimilarityIndexInputSpec(CommandLineInputSpec): outputCSVFilename = File( - desc="output CSV Filename", - exists=True, - argstr="--outputCSVFilename %s") + desc="output CSV Filename", exists=True, argstr="--outputCSVFilename %s" + ) ANNContinuousVolume = File( desc="ANN Continuous volume to be compared to the manual volume", exists=True, - argstr="--ANNContinuousVolume %s") + argstr="--ANNContinuousVolume %s", + ) inputManualVolume = File( desc="input manual(reference) volume", exists=True, - argstr="--inputManualVolume %s") + argstr="--inputManualVolume %s", + ) thresholdInterval = traits.Float( - desc= - "Threshold interval to compute similarity index between zero and one", - argstr="--thresholdInterval %f") + desc="Threshold interval to compute similarity index between zero and one", + argstr="--thresholdInterval %f", + ) class SimilarityIndexOutputSpec(TraitedSpec): @@ -57,46 +67,47 @@ class SimilarityIndex(SEMLikeCommandLine): class BRAINSTalairachInputSpec(CommandLineInputSpec): AC = InputMultiPath( - traits.Float, desc="Location of AC Point ", sep=",", argstr="--AC %s") + traits.Float, desc="Location of AC Point ", sep=",", argstr="--AC %s" + ) ACisIndex = traits.Bool(desc="AC Point is Index", argstr="--ACisIndex ") PC = InputMultiPath( - traits.Float, desc="Location of PC Point ", sep=",", argstr="--PC %s") + traits.Float, desc="Location of PC Point ", sep=",", argstr="--PC %s" + ) PCisIndex = traits.Bool(desc="PC Point is Index", argstr="--PCisIndex ") SLA = InputMultiPath( - traits.Float, - desc="Location of SLA Point ", - sep=",", - argstr="--SLA %s") + traits.Float, desc="Location of SLA Point ", sep=",", argstr="--SLA %s" + ) SLAisIndex = traits.Bool(desc="SLA Point is Index", argstr="--SLAisIndex ") IRP = InputMultiPath( - traits.Float, - desc="Location of IRP Point ", - sep=",", - argstr="--IRP %s") + traits.Float, desc="Location of IRP Point ", sep=",", argstr="--IRP %s" + ) IRPisIndex = traits.Bool(desc="IRP Point is Index", argstr="--IRPisIndex ") inputVolume = File( desc="Input image used to define physical space of images", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) outputBox = traits.Either( traits.Bool, File(), hash_files=False, desc="Name of the resulting Talairach Bounding Box file", - argstr="--outputBox %s") + argstr="--outputBox %s", + ) outputGrid = traits.Either( traits.Bool, File(), hash_files=False, desc="Name of the resulting Talairach Grid file", - argstr="--outputGrid %s") + argstr="--outputGrid %s", + ) class BRAINSTalairachOutputSpec(TraitedSpec): outputBox = File( - desc="Name of the resulting Talairach Bounding Box file", exists=True) - outputGrid = File( - desc="Name of the resulting Talairach Grid file", exists=True) + desc="Name of the resulting Talairach Bounding Box file", exists=True + ) + outputGrid = File(desc="Name of the resulting Talairach Grid file", exists=True) class BRAINSTalairach(SEMLikeCommandLine): @@ -121,7 +132,7 @@ class BRAINSTalairach(SEMLikeCommandLine): input_spec = BRAINSTalairachInputSpec output_spec = BRAINSTalairachOutputSpec _cmd = " BRAINSTalairach " - _outputs_filenames = {'outputGrid': 'outputGrid', 'outputBox': 'outputBox'} + _outputs_filenames = {"outputGrid": "outputGrid", "outputBox": "outputBox"} _redirect_x = False @@ -129,34 +140,39 @@ class BRAINSTalairachMaskInputSpec(CommandLineInputSpec): inputVolume = File( desc="Input image used to define physical space of resulting mask", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) talairachParameters = File( desc="Name of the Talairach parameter file.", exists=True, - argstr="--talairachParameters %s") + argstr="--talairachParameters %s", + ) talairachBox = File( - desc="Name of the Talairach box file.", - exists=True, - argstr="--talairachBox %s") + desc="Name of the Talairach box file.", exists=True, argstr="--talairachBox %s" + ) hemisphereMode = traits.Enum( "left", "right", "both", desc="Mode for box creation: left, right, both", - argstr="--hemisphereMode %s") + argstr="--hemisphereMode %s", + ) expand = traits.Bool( - desc="Expand exterior box to include surface CSF", argstr="--expand ") + desc="Expand exterior box to include surface CSF", argstr="--expand " + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Output filename for the resulting binary image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) class BRAINSTalairachMaskOutputSpec(TraitedSpec): outputVolume = File( - desc="Output filename for the resulting binary image", exists=True) + desc="Output filename for the resulting binary image", exists=True + ) class BRAINSTalairachMask(SEMLikeCommandLine): @@ -181,5 +197,5 @@ class BRAINSTalairachMask(SEMLikeCommandLine): input_spec = BRAINSTalairachMaskInputSpec output_spec = BRAINSTalairachMaskOutputSpec _cmd = " BRAINSTalairachMask " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py index a3478e0af0..fd22f39bba 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py @@ -4,51 +4,26 @@ def test_BRAINSPosteriorToContinuousClass_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputBasalGmVolume=dict( - argstr='--inputBasalGmVolume %s', - extensions=None, - ), - inputCrblGmVolume=dict( - argstr='--inputCrblGmVolume %s', - extensions=None, - ), - inputCrblWmVolume=dict( - argstr='--inputCrblWmVolume %s', - extensions=None, - ), - inputCsfVolume=dict( - argstr='--inputCsfVolume %s', - extensions=None, - ), - inputSurfaceGmVolume=dict( - argstr='--inputSurfaceGmVolume %s', - extensions=None, - ), - inputVbVolume=dict( - argstr='--inputVbVolume %s', - extensions=None, - ), - inputWhiteVolume=dict( - argstr='--inputWhiteVolume %s', - extensions=None, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputBasalGmVolume=dict(argstr="--inputBasalGmVolume %s", extensions=None,), + inputCrblGmVolume=dict(argstr="--inputCrblGmVolume %s", extensions=None,), + inputCrblWmVolume=dict(argstr="--inputCrblWmVolume %s", extensions=None,), + inputCsfVolume=dict(argstr="--inputCsfVolume %s", extensions=None,), + inputSurfaceGmVolume=dict(argstr="--inputSurfaceGmVolume %s", extensions=None,), + inputVbVolume=dict(argstr="--inputVbVolume %s", extensions=None,), + inputWhiteVolume=dict(argstr="--inputWhiteVolume %s", extensions=None,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = BRAINSPosteriorToContinuousClass.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSPosteriorToContinuousClass_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = BRAINSPosteriorToContinuousClass.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py index 398330e5f9..0999af73d0 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py @@ -4,53 +4,30 @@ def test_BRAINSTalairach_inputs(): input_map = dict( - AC=dict( - argstr='--AC %s', - sep=',', - ), - ACisIndex=dict(argstr='--ACisIndex ', ), - IRP=dict( - argstr='--IRP %s', - sep=',', - ), - IRPisIndex=dict(argstr='--IRPisIndex ', ), - PC=dict( - argstr='--PC %s', - sep=',', - ), - PCisIndex=dict(argstr='--PCisIndex ', ), - SLA=dict( - argstr='--SLA %s', - sep=',', - ), - SLAisIndex=dict(argstr='--SLAisIndex ', ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - outputBox=dict( - argstr='--outputBox %s', - hash_files=False, - ), - outputGrid=dict( - argstr='--outputGrid %s', - hash_files=False, - ), + AC=dict(argstr="--AC %s", sep=",",), + ACisIndex=dict(argstr="--ACisIndex ",), + IRP=dict(argstr="--IRP %s", sep=",",), + IRPisIndex=dict(argstr="--IRPisIndex ",), + PC=dict(argstr="--PC %s", sep=",",), + PCisIndex=dict(argstr="--PCisIndex ",), + SLA=dict(argstr="--SLA %s", sep=",",), + SLAisIndex=dict(argstr="--SLAisIndex ",), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + outputBox=dict(argstr="--outputBox %s", hash_files=False,), + outputGrid=dict(argstr="--outputGrid %s", hash_files=False,), ) inputs = BRAINSTalairach.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSTalairach_outputs(): output_map = dict( - outputBox=dict(extensions=None, ), - outputGrid=dict(extensions=None, ), + outputBox=dict(extensions=None,), outputGrid=dict(extensions=None,), ) outputs = BRAINSTalairach.output_spec() diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py index 97f353ad96..959733ce42 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py @@ -4,37 +4,24 @@ def test_BRAINSTalairachMask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - expand=dict(argstr='--expand ', ), - hemisphereMode=dict(argstr='--hemisphereMode %s', ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - talairachBox=dict( - argstr='--talairachBox %s', - extensions=None, - ), - talairachParameters=dict( - argstr='--talairachParameters %s', - extensions=None, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + expand=dict(argstr="--expand ",), + hemisphereMode=dict(argstr="--hemisphereMode %s",), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + talairachBox=dict(argstr="--talairachBox %s", extensions=None,), + talairachParameters=dict(argstr="--talairachParameters %s", extensions=None,), ) inputs = BRAINSTalairachMask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSTalairachMask_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = BRAINSTalairachMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py index 7c5256cea4..766c9c7a2b 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py @@ -4,39 +4,31 @@ def test_GenerateEdgeMapImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMRVolumes=dict(argstr='--inputMRVolumes %s...', ), - inputMask=dict( - argstr='--inputMask %s', - extensions=None, - ), - lowerPercentileMatching=dict(argstr='--lowerPercentileMatching %f', ), - maximumOutputRange=dict(argstr='--maximumOutputRange %d', ), - minimumOutputRange=dict(argstr='--minimumOutputRange %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputEdgeMap=dict( - argstr='--outputEdgeMap %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputMRVolumes=dict(argstr="--inputMRVolumes %s...",), + inputMask=dict(argstr="--inputMask %s", extensions=None,), + lowerPercentileMatching=dict(argstr="--lowerPercentileMatching %f",), + maximumOutputRange=dict(argstr="--maximumOutputRange %d",), + minimumOutputRange=dict(argstr="--minimumOutputRange %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputEdgeMap=dict(argstr="--outputEdgeMap %s", hash_files=False,), outputMaximumGradientImage=dict( - argstr='--outputMaximumGradientImage %s', - hash_files=False, + argstr="--outputMaximumGradientImage %s", hash_files=False, ), - upperPercentileMatching=dict(argstr='--upperPercentileMatching %f', ), + upperPercentileMatching=dict(argstr="--upperPercentileMatching %f",), ) inputs = GenerateEdgeMapImage.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GenerateEdgeMapImage_outputs(): output_map = dict( - outputEdgeMap=dict(extensions=None, ), - outputMaximumGradientImage=dict(extensions=None, ), + outputEdgeMap=dict(extensions=None,), + outputMaximumGradientImage=dict(extensions=None,), ) outputs = GenerateEdgeMapImage.output_spec() diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py index 2cc68391bd..5cba5f42d9 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py @@ -4,29 +4,22 @@ def test_GeneratePurePlugMask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputImageModalities=dict(argstr='--inputImageModalities %s...', ), - numberOfSubSamples=dict( - argstr='--numberOfSubSamples %s', - sep=',', - ), - outputMaskFile=dict( - argstr='--outputMaskFile %s', - hash_files=False, - ), - threshold=dict(argstr='--threshold %f', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputImageModalities=dict(argstr="--inputImageModalities %s...",), + numberOfSubSamples=dict(argstr="--numberOfSubSamples %s", sep=",",), + outputMaskFile=dict(argstr="--outputMaskFile %s", hash_files=False,), + threshold=dict(argstr="--threshold %f",), ) inputs = GeneratePurePlugMask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GeneratePurePlugMask_outputs(): - output_map = dict(outputMaskFile=dict(extensions=None, ), ) + output_map = dict(outputMaskFile=dict(extensions=None,),) outputs = GeneratePurePlugMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py index a257b52384..746857c627 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py @@ -4,44 +4,30 @@ def test_HistogramMatchingFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - histogramAlgorithm=dict(argstr='--histogramAlgorithm %s', ), - inputBinaryVolume=dict( - argstr='--inputBinaryVolume %s', - extensions=None, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), - numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + histogramAlgorithm=dict(argstr="--histogramAlgorithm %s",), + inputBinaryVolume=dict(argstr="--inputBinaryVolume %s", extensions=None,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), referenceBinaryVolume=dict( - argstr='--referenceBinaryVolume %s', - extensions=None, + argstr="--referenceBinaryVolume %s", extensions=None, ), - referenceVolume=dict( - argstr='--referenceVolume %s', - extensions=None, - ), - verbose=dict(argstr='--verbose ', ), - writeHistogram=dict(argstr='--writeHistogram %s', ), + referenceVolume=dict(argstr="--referenceVolume %s", extensions=None,), + verbose=dict(argstr="--verbose ",), + writeHistogram=dict(argstr="--writeHistogram %s",), ) inputs = HistogramMatchingFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_HistogramMatchingFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = HistogramMatchingFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py index 8a09941f16..348ecf67d0 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py @@ -4,30 +4,20 @@ def test_SimilarityIndex_inputs(): input_map = dict( - ANNContinuousVolume=dict( - argstr='--ANNContinuousVolume %s', - extensions=None, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputManualVolume=dict( - argstr='--inputManualVolume %s', - extensions=None, - ), - outputCSVFilename=dict( - argstr='--outputCSVFilename %s', - extensions=None, - ), - thresholdInterval=dict(argstr='--thresholdInterval %f', ), + ANNContinuousVolume=dict(argstr="--ANNContinuousVolume %s", extensions=None,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputManualVolume=dict(argstr="--inputManualVolume %s", extensions=None,), + outputCSVFilename=dict(argstr="--outputCSVFilename %s", extensions=None,), + thresholdInterval=dict(argstr="--thresholdInterval %f",), ) inputs = SimilarityIndex.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SimilarityIndex_outputs(): output_map = dict() outputs = SimilarityIndex.output_spec() diff --git a/nipype/interfaces/semtools/brains/utilities.py b/nipype/interfaces/semtools/brains/utilities.py index d794c9c587..7c6351eaab 100644 --- a/nipype/interfaces/semtools/brains/utilities.py +++ b/nipype/interfaces/semtools/brains/utilities.py @@ -5,47 +5,62 @@ import os -from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ...base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class HistogramMatchingFilterInputSpec(CommandLineInputSpec): inputVolume = File( desc="The Input image to be computed for statistics", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) referenceVolume = File( desc="The Input image to be computed for statistics", exists=True, - argstr="--referenceVolume %s") + argstr="--referenceVolume %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Output Image File Name", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) referenceBinaryVolume = File( - desc="referenceBinaryVolume", - exists=True, - argstr="--referenceBinaryVolume %s") + desc="referenceBinaryVolume", exists=True, argstr="--referenceBinaryVolume %s" + ) inputBinaryVolume = File( - desc="inputBinaryVolume", exists=True, argstr="--inputBinaryVolume %s") + desc="inputBinaryVolume", exists=True, argstr="--inputBinaryVolume %s" + ) numberOfMatchPoints = traits.Int( - desc=" number of histogram matching points", - argstr="--numberOfMatchPoints %d") + desc=" number of histogram matching points", argstr="--numberOfMatchPoints %d" + ) numberOfHistogramBins = traits.Int( - desc=" number of histogram bin", argstr="--numberOfHistogramBins %d") + desc=" number of histogram bin", argstr="--numberOfHistogramBins %d" + ) writeHistogram = traits.Str( - desc= - " decide if histogram data would be written with prefixe of the file name", - argstr="--writeHistogram %s") + desc=" decide if histogram data would be written with prefixe of the file name", + argstr="--writeHistogram %s", + ) histogramAlgorithm = traits.Enum( "OtsuHistogramMatching", desc=" histogram algrithm selection", - argstr="--histogramAlgorithm %s") + argstr="--histogramAlgorithm %s", + ) verbose = traits.Bool( - desc=" verbose mode running for debbuging", argstr="--verbose ") + desc=" verbose mode running for debbuging", argstr="--verbose " + ) class HistogramMatchingFilterOutputSpec(TraitedSpec): @@ -68,58 +83,62 @@ class HistogramMatchingFilter(SEMLikeCommandLine): input_spec = HistogramMatchingFilterInputSpec output_spec = HistogramMatchingFilterOutputSpec _cmd = " HistogramMatchingFilter " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False class GenerateEdgeMapImageInputSpec(CommandLineInputSpec): inputMRVolumes = InputMultiPath( File(exists=True), - desc= - "List of input structural MR volumes to create the maximum edgemap", - argstr="--inputMRVolumes %s...") + desc="List of input structural MR volumes to create the maximum edgemap", + argstr="--inputMRVolumes %s...", + ) inputMask = File( - desc= - "Input mask file name. If set, image histogram percentiles will be calculated within the mask", + desc="Input mask file name. If set, image histogram percentiles will be calculated within the mask", exists=True, - argstr="--inputMask %s") + argstr="--inputMask %s", + ) minimumOutputRange = traits.Int( - desc= - "Map lower quantile and below to minimum output range. It should be a small number greater than zero. Default is 1", - argstr="--minimumOutputRange %d") + desc="Map lower quantile and below to minimum output range. It should be a small number greater than zero. Default is 1", + argstr="--minimumOutputRange %d", + ) maximumOutputRange = traits.Int( - desc= - "Map upper quantile and above to maximum output range. Default is 255 that is the maximum range of unsigned char", - argstr="--maximumOutputRange %d") + desc="Map upper quantile and above to maximum output range. Default is 255 that is the maximum range of unsigned char", + argstr="--maximumOutputRange %d", + ) lowerPercentileMatching = traits.Float( - desc= - "Map lower quantile and below to minOutputRange. It should be a value between zero and one", - argstr="--lowerPercentileMatching %f") + desc="Map lower quantile and below to minOutputRange. It should be a value between zero and one", + argstr="--lowerPercentileMatching %f", + ) upperPercentileMatching = traits.Float( - desc= - "Map upper quantile and above to maxOutputRange. It should be a value between zero and one", - argstr="--upperPercentileMatching %f") + desc="Map upper quantile and above to maxOutputRange. It should be a value between zero and one", + argstr="--upperPercentileMatching %f", + ) outputEdgeMap = traits.Either( traits.Bool, File(), hash_files=False, desc="output edgemap file name", - argstr="--outputEdgeMap %s") + argstr="--outputEdgeMap %s", + ) outputMaximumGradientImage = traits.Either( traits.Bool, File(), hash_files=False, desc="output gradient image file name", - argstr="--outputMaximumGradientImage %s") + argstr="--outputMaximumGradientImage %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class GenerateEdgeMapImageOutputSpec(TraitedSpec): outputEdgeMap = File(desc="(required) output file name", exists=True) outputMaximumGradientImage = File( - desc="output gradient image file name", exists=True) + desc="output gradient image file name", exists=True + ) class GenerateEdgeMapImage(SEMLikeCommandLine): @@ -139,8 +158,8 @@ class GenerateEdgeMapImage(SEMLikeCommandLine): output_spec = GenerateEdgeMapImageOutputSpec _cmd = " GenerateEdgeMapImage " _outputs_filenames = { - 'outputEdgeMap': 'outputEdgeMap', - 'outputMaximumGradientImage': 'outputMaximumGradientImage' + "outputEdgeMap": "outputEdgeMap", + "outputMaximumGradientImage": "outputMaximumGradientImage", } _redirect_x = False @@ -149,27 +168,28 @@ class GeneratePurePlugMaskInputSpec(CommandLineInputSpec): inputImageModalities = InputMultiPath( File(exists=True), desc="List of input image file names to create pure plugs mask", - argstr="--inputImageModalities %s...") + argstr="--inputImageModalities %s...", + ) threshold = traits.Float( - desc="threshold value to define class membership", - argstr="--threshold %f") + desc="threshold value to define class membership", argstr="--threshold %f" + ) numberOfSubSamples = InputMultiPath( traits.Int, - desc= - "Number of continous index samples taken at each direction of lattice space for each plug volume", + desc="Number of continous index samples taken at each direction of lattice space for each plug volume", sep=",", - argstr="--numberOfSubSamples %s") + argstr="--numberOfSubSamples %s", + ) outputMaskFile = traits.Either( traits.Bool, File(), hash_files=False, desc="Output binary mask file name", - argstr="--outputMaskFile %s") + argstr="--outputMaskFile %s", + ) class GeneratePurePlugMaskOutputSpec(TraitedSpec): - outputMaskFile = File( - desc="(required) Output binary mask file name", exists=True) + outputMaskFile = File(desc="(required) Output binary mask file name", exists=True) class GeneratePurePlugMask(SEMLikeCommandLine): @@ -188,5 +208,5 @@ class GeneratePurePlugMask(SEMLikeCommandLine): input_spec = GeneratePurePlugMaskInputSpec output_spec = GeneratePurePlugMaskOutputSpec _cmd = " GeneratePurePlugMask " - _outputs_filenames = {'outputMaskFile': 'outputMaskFile'} + _outputs_filenames = {"outputMaskFile": "outputMaskFile"} _redirect_x = False diff --git a/nipype/interfaces/semtools/converters.py b/nipype/interfaces/semtools/converters.py index de638935e5..f3c1d432f5 100644 --- a/nipype/interfaces/semtools/converters.py +++ b/nipype/interfaces/semtools/converters.py @@ -5,23 +5,35 @@ import os -from ..base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ..base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class DWISimpleCompareInputSpec(CommandLineInputSpec): inputVolume1 = File( desc="First input volume (.nhdr or .nrrd)", exists=True, - argstr="--inputVolume1 %s") + argstr="--inputVolume1 %s", + ) inputVolume2 = File( desc="Second input volume (.nhdr or .nrrd)", exists=True, - argstr="--inputVolume2 %s") + argstr="--inputVolume2 %s", + ) checkDWIData = traits.Bool( desc="check for existence of DWI data, and if present, compare it", - argstr="--checkDWIData ") + argstr="--checkDWIData ", + ) class DWISimpleCompareOutputSpec(TraitedSpec): @@ -58,11 +70,13 @@ class DWICompareInputSpec(CommandLineInputSpec): inputVolume1 = File( desc="First input volume (.nhdr or .nrrd)", exists=True, - argstr="--inputVolume1 %s") + argstr="--inputVolume1 %s", + ) inputVolume2 = File( desc="Second input volume (.nhdr or .nrrd)", exists=True, - argstr="--inputVolume2 %s") + argstr="--inputVolume2 %s", + ) class DWICompareOutputSpec(TraitedSpec): diff --git a/nipype/interfaces/semtools/diffusion/__init__.py b/nipype/interfaces/semtools/diffusion/__init__.py index f9f414d087..28044fe337 100644 --- a/nipype/interfaces/semtools/diffusion/__init__.py +++ b/nipype/interfaces/semtools/diffusion/__init__.py @@ -2,13 +2,29 @@ from .diffusion import dtiaverage, dtiestim, dtiprocess, DWIConvert from .tractography import * from .gtract import ( - gtractTransformToDisplacementField, gtractInvertBSplineTransform, - gtractConcatDwi, gtractAverageBvalues, gtractCoregBvalues, - gtractResampleAnisotropy, gtractResampleCodeImage, - gtractCopyImageOrientation, gtractCreateGuideFiber, gtractAnisotropyMap, - gtractClipAnisotropy, gtractResampleB0, gtractInvertRigidTransform, - gtractImageConformity, compareTractInclusion, gtractFastMarchingTracking, - gtractInvertDisplacementField, gtractCoRegAnatomy, - gtractResampleDWIInPlace, gtractCostFastMarching, gtractFiberTracking, - extractNrrdVectorIndex, gtractResampleFibers, gtractTensor) + gtractTransformToDisplacementField, + gtractInvertBSplineTransform, + gtractConcatDwi, + gtractAverageBvalues, + gtractCoregBvalues, + gtractResampleAnisotropy, + gtractResampleCodeImage, + gtractCopyImageOrientation, + gtractCreateGuideFiber, + gtractAnisotropyMap, + gtractClipAnisotropy, + gtractResampleB0, + gtractInvertRigidTransform, + gtractImageConformity, + compareTractInclusion, + gtractFastMarchingTracking, + gtractInvertDisplacementField, + gtractCoRegAnatomy, + gtractResampleDWIInPlace, + gtractCostFastMarching, + gtractFiberTracking, + extractNrrdVectorIndex, + gtractResampleFibers, + gtractTensor, +) from .maxcurvature import maxcurvature diff --git a/nipype/interfaces/semtools/diffusion/diffusion.py b/nipype/interfaces/semtools/diffusion/diffusion.py index af943a04fb..90377b8ee0 100644 --- a/nipype/interfaces/semtools/diffusion/diffusion.py +++ b/nipype/interfaces/semtools/diffusion/diffusion.py @@ -5,26 +5,37 @@ import os -from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ...base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class dtiaverageInputSpec(CommandLineInputSpec): inputs = InputMultiPath( File(exists=True), desc="List of all the tensor fields to be averaged", - argstr="--inputs %s...") + argstr="--inputs %s...", + ) tensor_output = traits.Either( traits.Bool, File(), hash_files=False, desc="Averaged tensor volume", - argstr="--tensor_output %s") + argstr="--tensor_output %s", + ) DTI_double = traits.Bool( - desc= - "Tensor components are saved as doubles (cannot be visualized in Slicer)", - argstr="--DTI_double ") + desc="Tensor components are saved as doubles (cannot be visualized in Slicer)", + argstr="--DTI_double ", + ) verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ") @@ -57,112 +68,112 @@ class dtiaverage(SEMLikeCommandLine): input_spec = dtiaverageInputSpec output_spec = dtiaverageOutputSpec _cmd = " dtiaverage " - _outputs_filenames = {'tensor_output': 'tensor_output.nii'} + _outputs_filenames = {"tensor_output": "tensor_output.nii"} _redirect_x = False class dtiestimInputSpec(CommandLineInputSpec): dwi_image = File( - desc="DWI image volume (required)", - exists=True, - argstr="--dwi_image %s") + desc="DWI image volume (required)", exists=True, argstr="--dwi_image %s" + ) tensor_output = traits.Either( traits.Bool, File(), hash_files=False, desc="Tensor OutputImage", - argstr="--tensor_output %s") + argstr="--tensor_output %s", + ) B0 = traits.Either( traits.Bool, File(), hash_files=False, desc="Baseline image, average of all baseline images", - argstr="--B0 %s") + argstr="--B0 %s", + ) idwi = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "idwi output image. Image with isotropic diffusion-weighted information = geometric mean of diffusion images", - argstr="--idwi %s") + desc="idwi output image. Image with isotropic diffusion-weighted information = geometric mean of diffusion images", + argstr="--idwi %s", + ) B0_mask_output = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "B0 mask used for the estimation. B0 thresholded either with the -t option value or the automatic OTSU value", - argstr="--B0_mask_output %s") + desc="B0 mask used for the estimation. B0 thresholded either with the -t option value or the automatic OTSU value", + argstr="--B0_mask_output %s", + ) brain_mask = File( - desc= - "Brain mask. Image where for every voxel == 0 the tensors are not estimated. Be aware that in addition a threshold based masking will be performed by default. If such an additional threshold masking is NOT desired, then use option -t 0.", + desc="Brain mask. Image where for every voxel == 0 the tensors are not estimated. Be aware that in addition a threshold based masking will be performed by default. If such an additional threshold masking is NOT desired, then use option -t 0.", exists=True, - argstr="--brain_mask %s") + argstr="--brain_mask %s", + ) bad_region_mask = File( - desc= - "Bad region mask. Image where for every voxel > 0 the tensors are not estimated", + desc="Bad region mask. Image where for every voxel > 0 the tensors are not estimated", exists=True, - argstr="--bad_region_mask %s") + argstr="--bad_region_mask %s", + ) method = traits.Enum( "lls", "wls", "nls", "ml", - desc= - "Esitmation method (lls:linear least squares, wls:weighted least squares, nls:non-linear least squares, ml:maximum likelihood)", - argstr="--method %s") + desc="Esitmation method (lls:linear least squares, wls:weighted least squares, nls:non-linear least squares, ml:maximum likelihood)", + argstr="--method %s", + ) correction = traits.Enum( "none", "zero", "abs", "nearest", - desc= - "Correct the tensors if computed tensor is not semi-definite positive", - argstr="--correction %s") + desc="Correct the tensors if computed tensor is not semi-definite positive", + argstr="--correction %s", + ) threshold = traits.Int( - desc= - "Baseline threshold for estimation. If not specified calculated using an OTSU threshold on the baseline image.", - argstr="--threshold %d") + desc="Baseline threshold for estimation. If not specified calculated using an OTSU threshold on the baseline image.", + argstr="--threshold %d", + ) weight_iterations = traits.Int( - desc= - "Number of iterations to recaluate weightings from tensor estimate", - argstr="--weight_iterations %d") + desc="Number of iterations to recaluate weightings from tensor estimate", + argstr="--weight_iterations %d", + ) step = traits.Float( - desc="Gradient descent step size (for nls and ml methods)", - argstr="--step %f") + desc="Gradient descent step size (for nls and ml methods)", argstr="--step %f" + ) sigma = traits.Float(argstr="--sigma %f") DTI_double = traits.Bool( - desc= - "Tensor components are saved as doubles (cannot be visualized in Slicer)", - argstr="--DTI_double ") + desc="Tensor components are saved as doubles (cannot be visualized in Slicer)", + argstr="--DTI_double ", + ) verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ") defaultTensor = InputMultiPath( traits.Float, - desc= - "Default tensor used if estimated tensor is below a given threshold", + desc="Default tensor used if estimated tensor is below a given threshold", sep=",", - argstr="--defaultTensor %s") + argstr="--defaultTensor %s", + ) shiftNeg = traits.Bool( - desc= - "Shift eigenvalues so all are positive (accounts for bad tensors related to noise or acquisition error). This is the same option as the one available in DWIToDTIEstimation in Slicer (but instead of just adding the minimum eigenvalue to all the eigenvalues if it is smaller than 0, we use a coefficient to have stictly positive eigenvalues", - argstr="--shiftNeg ") + desc="Shift eigenvalues so all are positive (accounts for bad tensors related to noise or acquisition error). This is the same option as the one available in DWIToDTIEstimation in Slicer (but instead of just adding the minimum eigenvalue to all the eigenvalues if it is smaller than 0, we use a coefficient to have stictly positive eigenvalues", + argstr="--shiftNeg ", + ) shiftNegCoeff = traits.Float( - desc= - "Shift eigenvalues so all are positive (accounts for bad tensors related to noise or acquisition error). Instead of just adding the minimum eigenvalue to all the eigenvalues if it is smaller than 0, we use a coefficient to have stictly positive eigenvalues. Coefficient must be between 1.0 and 1.001 (included).", - argstr="--shiftNegCoeff %f") + desc="Shift eigenvalues so all are positive (accounts for bad tensors related to noise or acquisition error). Instead of just adding the minimum eigenvalue to all the eigenvalues if it is smaller than 0, we use a coefficient to have stictly positive eigenvalues. Coefficient must be between 1.0 and 1.001 (included).", + argstr="--shiftNegCoeff %f", + ) class dtiestimOutputSpec(TraitedSpec): tensor_output = File(desc="Tensor OutputImage", exists=True) - B0 = File( - desc="Baseline image, average of all baseline images", exists=True) + B0 = File(desc="Baseline image, average of all baseline images", exists=True) idwi = File( - desc= - "idwi output image. Image with isotropic diffusion-weighted information = geometric mean of diffusion images", - exists=True) + desc="idwi output image. Image with isotropic diffusion-weighted information = geometric mean of diffusion images", + exists=True, + ) B0_mask_output = File( - desc= - "B0 mask used for the estimation. B0 thresholded either with the -t option value or the automatic OTSU value", - exists=True) + desc="B0 mask used for the estimation. B0 thresholded either with the -t option value or the automatic OTSU value", + exists=True, + ) class dtiestim(SEMLikeCommandLine): @@ -212,165 +223,177 @@ class dtiestim(SEMLikeCommandLine): output_spec = dtiestimOutputSpec _cmd = " dtiestim " _outputs_filenames = { - 'B0': 'B0.nii', - 'idwi': 'idwi.nii', - 'tensor_output': 'tensor_output.nii', - 'B0_mask_output': 'B0_mask_output.nii' + "B0": "B0.nii", + "idwi": "idwi.nii", + "tensor_output": "tensor_output.nii", + "B0_mask_output": "B0_mask_output.nii", } _redirect_x = False class dtiprocessInputSpec(CommandLineInputSpec): - dti_image = File( - desc="DTI tensor volume", exists=True, argstr="--dti_image %s") + dti_image = File(desc="DTI tensor volume", exists=True, argstr="--dti_image %s") fa_output = traits.Either( traits.Bool, File(), hash_files=False, desc="Fractional Anisotropy output file", - argstr="--fa_output %s") + argstr="--fa_output %s", + ) md_output = traits.Either( traits.Bool, File(), hash_files=False, desc="Mean Diffusivity output file", - argstr="--md_output %s") + argstr="--md_output %s", + ) sigma = traits.Float(desc="Scale of gradients", argstr="--sigma %f") fa_gradient_output = traits.Either( traits.Bool, File(), hash_files=False, desc="Fractional Anisotropy Gradient output file", - argstr="--fa_gradient_output %s") + argstr="--fa_gradient_output %s", + ) fa_gradmag_output = traits.Either( traits.Bool, File(), hash_files=False, desc="Fractional Anisotropy Gradient Magnitude output file", - argstr="--fa_gradmag_output %s") + argstr="--fa_gradmag_output %s", + ) color_fa_output = traits.Either( traits.Bool, File(), hash_files=False, desc="Color Fractional Anisotropy output file", - argstr="--color_fa_output %s") + argstr="--color_fa_output %s", + ) principal_eigenvector_output = traits.Either( traits.Bool, File(), hash_files=False, desc="Principal Eigenvectors Output", - argstr="--principal_eigenvector_output %s") + argstr="--principal_eigenvector_output %s", + ) negative_eigenvector_output = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Negative Eigenvectors Output: create a binary image where if any of the eigen value is below zero, the voxel is set to 1, otherwise 0.", - argstr="--negative_eigenvector_output %s") + desc="Negative Eigenvectors Output: create a binary image where if any of the eigen value is below zero, the voxel is set to 1, otherwise 0.", + argstr="--negative_eigenvector_output %s", + ) frobenius_norm_output = traits.Either( traits.Bool, File(), hash_files=False, desc="Frobenius Norm Output", - argstr="--frobenius_norm_output %s") + argstr="--frobenius_norm_output %s", + ) lambda1_output = traits.Either( traits.Bool, File(), hash_files=False, desc="Axial Diffusivity - Lambda 1 (largest eigenvalue) output", - argstr="--lambda1_output %s") + argstr="--lambda1_output %s", + ) lambda2_output = traits.Either( traits.Bool, File(), hash_files=False, desc="Lambda 2 (middle eigenvalue) output", - argstr="--lambda2_output %s") + argstr="--lambda2_output %s", + ) lambda3_output = traits.Either( traits.Bool, File(), hash_files=False, desc="Lambda 3 (smallest eigenvalue) output", - argstr="--lambda3_output %s") + argstr="--lambda3_output %s", + ) RD_output = traits.Either( traits.Bool, File(), hash_files=False, desc="RD (Radial Diffusivity 1/2*(lambda2+lambda3)) output", - argstr="--RD_output %s") + argstr="--RD_output %s", + ) rot_output = traits.Either( traits.Bool, File(), hash_files=False, desc="Rotated tensor output file. Must also specify the dof file.", - argstr="--rot_output %s") + argstr="--rot_output %s", + ) affineitk_file = File( desc="Transformation file for affine transformation. ITK format.", exists=True, - argstr="--affineitk_file %s") + argstr="--affineitk_file %s", + ) dof_file = File( - desc= - "Transformation file for affine transformation. This can be ITK format (or the outdated RView).", + desc="Transformation file for affine transformation. This can be ITK format (or the outdated RView).", exists=True, - argstr="--dof_file %s") + argstr="--dof_file %s", + ) newdof_file = File( - desc= - "Transformation file for affine transformation. RView NEW format. (txt file output of dof2mat)", + desc="Transformation file for affine transformation. RView NEW format. (txt file output of dof2mat)", exists=True, - argstr="--newdof_file %s") + argstr="--newdof_file %s", + ) mask = File( - desc= - "Mask tensors. Specify --outmask if you want to save the masked tensor field, otherwise the mask is applied just for the current processing ", + desc="Mask tensors. Specify --outmask if you want to save the masked tensor field, otherwise the mask is applied just for the current processing ", exists=True, - argstr="--mask %s") + argstr="--mask %s", + ) outmask = traits.Either( traits.Bool, File(), hash_files=False, desc="Name of the masked tensor field.", - argstr="--outmask %s") + argstr="--outmask %s", + ) hField = traits.Bool( - desc= - "forward and inverse transformations are h-fields instead of displacement fields", - argstr="--hField ") + desc="forward and inverse transformations are h-fields instead of displacement fields", + argstr="--hField ", + ) forward = File( - desc= - "Forward transformation. Assumed to be a deformation field in world coordinates, unless the --h-field option is specified.", + desc="Forward transformation. Assumed to be a deformation field in world coordinates, unless the --h-field option is specified.", exists=True, - argstr="--forward %s") + argstr="--forward %s", + ) deformation_output = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Warped tensor field based on a deformation field. This option requires the --forward,-F transformation to be specified.", - argstr="--deformation_output %s") + desc="Warped tensor field based on a deformation field. This option requires the --forward,-F transformation to be specified.", + argstr="--deformation_output %s", + ) interpolation = traits.Enum( "nearestneighbor", "linear", "cubic", desc="Interpolation type (nearestneighbor, linear, cubic)", - argstr="--interpolation %s") + argstr="--interpolation %s", + ) reorientation = traits.Enum( - "fs", - "ppd", - desc="Reorientation type (fs, ppd)", - argstr="--reorientation %s") + "fs", "ppd", desc="Reorientation type (fs, ppd)", argstr="--reorientation %s" + ) correction = traits.Enum( "none", "zero", "abs", "nearest", - desc= - "Correct the tensors if computed tensor is not semi-definite positive", - argstr="--correction %s") + desc="Correct the tensors if computed tensor is not semi-definite positive", + argstr="--correction %s", + ) scalar_float = traits.Bool( - desc= - "Write scalar [FA,MD] as unscaled float (with their actual values, otherwise scaled by 10 000). Also causes FA to be unscaled [0..1].", - argstr="--scalar_float ") + desc="Write scalar [FA,MD] as unscaled float (with their actual values, otherwise scaled by 10 000). Also causes FA to be unscaled [0..1].", + argstr="--scalar_float ", + ) DTI_double = traits.Bool( - desc= - "Tensor components are saved as doubles (cannot be visualized in Slicer)", - argstr="--DTI_double ") + desc="Tensor components are saved as doubles (cannot be visualized in Slicer)", + argstr="--DTI_double ", + ) verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ") @@ -378,37 +401,36 @@ class dtiprocessOutputSpec(TraitedSpec): fa_output = File(desc="Fractional Anisotropy output file", exists=True) md_output = File(desc="Mean Diffusivity output file", exists=True) fa_gradient_output = File( - desc="Fractional Anisotropy Gradient output file", exists=True) + desc="Fractional Anisotropy Gradient output file", exists=True + ) fa_gradmag_output = File( - desc="Fractional Anisotropy Gradient Magnitude output file", - exists=True) - color_fa_output = File( - desc="Color Fractional Anisotropy output file", exists=True) + desc="Fractional Anisotropy Gradient Magnitude output file", exists=True + ) + color_fa_output = File(desc="Color Fractional Anisotropy output file", exists=True) principal_eigenvector_output = File( - desc="Principal Eigenvectors Output", exists=True) + desc="Principal Eigenvectors Output", exists=True + ) negative_eigenvector_output = File( - desc= - "Negative Eigenvectors Output: create a binary image where if any of the eigen value is below zero, the voxel is set to 1, otherwise 0.", - exists=True) + desc="Negative Eigenvectors Output: create a binary image where if any of the eigen value is below zero, the voxel is set to 1, otherwise 0.", + exists=True, + ) frobenius_norm_output = File(desc="Frobenius Norm Output", exists=True) lambda1_output = File( - desc="Axial Diffusivity - Lambda 1 (largest eigenvalue) output", - exists=True) - lambda2_output = File( - desc="Lambda 2 (middle eigenvalue) output", exists=True) - lambda3_output = File( - desc="Lambda 3 (smallest eigenvalue) output", exists=True) + desc="Axial Diffusivity - Lambda 1 (largest eigenvalue) output", exists=True + ) + lambda2_output = File(desc="Lambda 2 (middle eigenvalue) output", exists=True) + lambda3_output = File(desc="Lambda 3 (smallest eigenvalue) output", exists=True) RD_output = File( - desc="RD (Radial Diffusivity 1/2*(lambda2+lambda3)) output", - exists=True) + desc="RD (Radial Diffusivity 1/2*(lambda2+lambda3)) output", exists=True + ) rot_output = File( - desc="Rotated tensor output file. Must also specify the dof file.", - exists=True) + desc="Rotated tensor output file. Must also specify the dof file.", exists=True + ) outmask = File(desc="Name of the masked tensor field.", exists=True) deformation_output = File( - desc= - "Warped tensor field based on a deformation field. This option requires the --forward,-F transformation to be specified.", - exists=True) + desc="Warped tensor field based on a deformation field. This option requires the --forward,-F transformation to be specified.", + exists=True, + ) class dtiprocess(SEMLikeCommandLine): @@ -448,21 +470,21 @@ class dtiprocess(SEMLikeCommandLine): output_spec = dtiprocessOutputSpec _cmd = " dtiprocess " _outputs_filenames = { - 'fa_gradmag_output': 'fa_gradmag_output.nii', - 'fa_gradient_output': 'fa_gradient_output.nii', - 'lambda1_output': 'lambda1_output.nii', - 'lambda2_output': 'lambda2_output.nii', - 'color_fa_output': 'color_fa_output.nii', - 'fa_output': 'fa_output.nii', - 'frobenius_norm_output': 'frobenius_norm_output.nii', - 'principal_eigenvector_output': 'principal_eigenvector_output.nii', - 'outmask': 'outmask.nii', - 'lambda3_output': 'lambda3_output.nii', - 'negative_eigenvector_output': 'negative_eigenvector_output.nii', - 'md_output': 'md_output.nii', - 'RD_output': 'RD_output.nii', - 'deformation_output': 'deformation_output.nii', - 'rot_output': 'rot_output.nii' + "fa_gradmag_output": "fa_gradmag_output.nii", + "fa_gradient_output": "fa_gradient_output.nii", + "lambda1_output": "lambda1_output.nii", + "lambda2_output": "lambda2_output.nii", + "color_fa_output": "color_fa_output.nii", + "fa_output": "fa_output.nii", + "frobenius_norm_output": "frobenius_norm_output.nii", + "principal_eigenvector_output": "principal_eigenvector_output.nii", + "outmask": "outmask.nii", + "lambda3_output": "lambda3_output.nii", + "negative_eigenvector_output": "negative_eigenvector_output.nii", + "md_output": "md_output.nii", + "RD_output": "RD_output.nii", + "deformation_output": "deformation_output.nii", + "rot_output": "rot_output.nii", } _redirect_x = False @@ -473,103 +495,112 @@ class DWIConvertInputSpec(CommandLineInputSpec): "DicomToFSL", "NrrdToFSL", "FSLToNrrd", - desc= - "Determine which conversion to perform. DicomToNrrd (default): Convert DICOM series to NRRD DicomToFSL: Convert DICOM series to NIfTI File + gradient/bvalue text files NrrdToFSL: Convert DWI NRRD file to NIfTI File + gradient/bvalue text files FSLToNrrd: Convert NIfTI File + gradient/bvalue text files to NRRD file.", - argstr="--conversionMode %s") + desc="Determine which conversion to perform. DicomToNrrd (default): Convert DICOM series to NRRD DicomToFSL: Convert DICOM series to NIfTI File + gradient/bvalue text files NrrdToFSL: Convert DWI NRRD file to NIfTI File + gradient/bvalue text files FSLToNrrd: Convert NIfTI File + gradient/bvalue text files to NRRD file.", + argstr="--conversionMode %s", + ) inputVolume = File( desc="Input DWI volume -- not used for DicomToNrrd mode.", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Output filename (.nhdr or .nrrd)", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) inputDicomDirectory = Directory( desc="Directory holding Dicom series", exists=True, - argstr="--inputDicomDirectory %s") + argstr="--inputDicomDirectory %s", + ) fslNIFTIFile = File( desc="4D NIfTI file containing gradient volumes", exists=True, - argstr="--fslNIFTIFile %s") + argstr="--fslNIFTIFile %s", + ) inputBValues = File( desc="The B Values are stored in FSL .bval text file format", exists=True, - argstr="--inputBValues %s") + argstr="--inputBValues %s", + ) inputBVectors = File( desc="The Gradient Vectors are stored in FSL .bvec text file format", exists=True, - argstr="--inputBVectors %s") + argstr="--inputBVectors %s", + ) outputBValues = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "The B Values are stored in FSL .bval text file format (defaults to .bval)", - argstr="--outputBValues %s") + desc="The B Values are stored in FSL .bval text file format (defaults to .bval)", + argstr="--outputBValues %s", + ) outputBVectors = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "The Gradient Vectors are stored in FSL .bvec text file format (defaults to .bvec)", - argstr="--outputBVectors %s") + desc="The Gradient Vectors are stored in FSL .bvec text file format (defaults to .bvec)", + argstr="--outputBVectors %s", + ) fMRI = traits.Bool( - desc="Output a NRRD file, but without gradients", argstr="--fMRI ") + desc="Output a NRRD file, but without gradients", argstr="--fMRI " + ) writeProtocolGradientsFile = traits.Bool( - desc= - "Write the protocol gradients to a file suffixed by \'.txt\' as they were specified in the procol by multiplying each diffusion gradient direction by the measurement frame. This file is for debugging purposes only, the format is not fixed, and will likely change as debugging of new dicom formats is necessary.", - argstr="--writeProtocolGradientsFile ") + desc="Write the protocol gradients to a file suffixed by '.txt' as they were specified in the procol by multiplying each diffusion gradient direction by the measurement frame. This file is for debugging purposes only, the format is not fixed, and will likely change as debugging of new dicom formats is necessary.", + argstr="--writeProtocolGradientsFile ", + ) useIdentityMeaseurementFrame = traits.Bool( - desc= - "Adjust all the gradients so that the measurement frame is an identity matrix.", - argstr="--useIdentityMeaseurementFrame ") + desc="Adjust all the gradients so that the measurement frame is an identity matrix.", + argstr="--useIdentityMeaseurementFrame ", + ) useBMatrixGradientDirections = traits.Bool( - desc= - "Fill the nhdr header with the gradient directions and bvalues computed out of the BMatrix. Only changes behavior for Siemens data. In some cases the standard public gradients are not properly computed. The gradients can emperically computed from the private BMatrix fields. In some cases the private BMatrix is consistent with the public grandients, but not in all cases, when it exists BMatrix is usually most robust.", - argstr="--useBMatrixGradientDirections ") + desc="Fill the nhdr header with the gradient directions and bvalues computed out of the BMatrix. Only changes behavior for Siemens data. In some cases the standard public gradients are not properly computed. The gradients can emperically computed from the private BMatrix fields. In some cases the private BMatrix is consistent with the public grandients, but not in all cases, when it exists BMatrix is usually most robust.", + argstr="--useBMatrixGradientDirections ", + ) outputDirectory = traits.Either( traits.Bool, Directory(), hash_files=False, desc="Directory holding the output NRRD file", - argstr="--outputDirectory %s") + argstr="--outputDirectory %s", + ) gradientVectorFile = traits.Either( traits.Bool, File(), hash_files=False, desc="Text file giving gradient vectors", - argstr="--gradientVectorFile %s") + argstr="--gradientVectorFile %s", + ) smallGradientThreshold = traits.Float( - desc= - "If a gradient magnitude is greater than 0 and less than smallGradientThreshold, then DWIConvert will display an error message and quit, unless the useBMatrixGradientDirections option is set.", - argstr="--smallGradientThreshold %f") + desc="If a gradient magnitude is greater than 0 and less than smallGradientThreshold, then DWIConvert will display an error message and quit, unless the useBMatrixGradientDirections option is set.", + argstr="--smallGradientThreshold %f", + ) allowLossyConversion = traits.Bool( - desc= - "The only supported output type is \'short\'. Conversion from images of a different type may cause data loss due to rounding or truncation. Use with caution!", - argstr="--allowLossyConversion ") + desc="The only supported output type is 'short'. Conversion from images of a different type may cause data loss due to rounding or truncation. Use with caution!", + argstr="--allowLossyConversion ", + ) transposeInputBVectors = traits.Bool( - desc= - "FSL input BVectors are expected to be encoded in the input file as one vector per line. If it is not the case, use this option to transpose the file as it is read.", - argstr="--transposeInputBVectors ") + desc="FSL input BVectors are expected to be encoded in the input file as one vector per line. If it is not the case, use this option to transpose the file as it is read.", + argstr="--transposeInputBVectors ", + ) class DWIConvertOutputSpec(TraitedSpec): outputVolume = File(desc="Output filename (.nhdr or .nrrd)", exists=True) outputBValues = File( - desc= - "The B Values are stored in FSL .bval text file format (defaults to .bval)", - exists=True) + desc="The B Values are stored in FSL .bval text file format (defaults to .bval)", + exists=True, + ) outputBVectors = File( - desc= - "The Gradient Vectors are stored in FSL .bvec text file format (defaults to .bvec)", - exists=True) + desc="The Gradient Vectors are stored in FSL .bvec text file format (defaults to .bvec)", + exists=True, + ) outputDirectory = Directory( - desc="Directory holding the output NRRD file", exists=True) - gradientVectorFile = File( - desc="Text file giving gradient vectors", exists=True) + desc="Directory holding the output NRRD file", exists=True + ) + gradientVectorFile = File(desc="Text file giving gradient vectors", exists=True) class DWIConvert(SEMLikeCommandLine): @@ -595,10 +626,10 @@ class DWIConvert(SEMLikeCommandLine): output_spec = DWIConvertOutputSpec _cmd = " DWIConvert " _outputs_filenames = { - 'outputVolume': 'outputVolume.nii', - 'outputDirectory': 'outputDirectory', - 'outputBValues': 'outputBValues.bval', - 'gradientVectorFile': 'gradientVectorFile', - 'outputBVectors': 'outputBVectors.bvec' + "outputVolume": "outputVolume.nii", + "outputDirectory": "outputDirectory", + "outputBValues": "outputBValues.bval", + "gradientVectorFile": "gradientVectorFile", + "outputBVectors": "outputBVectors.bvec", } _redirect_x = False diff --git a/nipype/interfaces/semtools/diffusion/gtract.py b/nipype/interfaces/semtools/diffusion/gtract.py index 999c898599..0ad255b536 100644 --- a/nipype/interfaces/semtools/diffusion/gtract.py +++ b/nipype/interfaces/semtools/diffusion/gtract.py @@ -5,35 +5,44 @@ import os -from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ...base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class gtractTransformToDisplacementFieldInputSpec(CommandLineInputSpec): inputTransform = File( - desc="Input Transform File Name", - exists=True, - argstr="--inputTransform %s") + desc="Input Transform File Name", exists=True, argstr="--inputTransform %s" + ) inputReferenceVolume = File( - desc= - "Required: input image file name to exemplify the anatomical space over which to vcl_express the transform as a displacement field.", + desc="Required: input image file name to exemplify the anatomical space over which to vcl_express the transform as a displacement field.", exists=True, - argstr="--inputReferenceVolume %s") + argstr="--inputReferenceVolume %s", + ) outputDeformationFieldVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Output deformation field", - argstr="--outputDeformationFieldVolume %s") + argstr="--outputDeformationFieldVolume %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractTransformToDisplacementFieldOutputSpec(TraitedSpec): - outputDeformationFieldVolume = File( - desc="Output deformation field", exists=True) + outputDeformationFieldVolume = File(desc="Output deformation field", exists=True) class gtractTransformToDisplacementField(SEMLikeCommandLine): @@ -59,40 +68,43 @@ class gtractTransformToDisplacementField(SEMLikeCommandLine): output_spec = gtractTransformToDisplacementFieldOutputSpec _cmd = " gtractTransformToDisplacementField " _outputs_filenames = { - 'outputDeformationFieldVolume': 'outputDeformationFieldVolume.nii' + "outputDeformationFieldVolume": "outputDeformationFieldVolume.nii" } _redirect_x = False class gtractInvertBSplineTransformInputSpec(CommandLineInputSpec): inputReferenceVolume = File( - desc= - "Required: input image file name to exemplify the anatomical space to interpolate over.", + desc="Required: input image file name to exemplify the anatomical space to interpolate over.", exists=True, - argstr="--inputReferenceVolume %s") + argstr="--inputReferenceVolume %s", + ) inputTransform = File( desc="Required: input B-Spline transform file name", exists=True, - argstr="--inputTransform %s") + argstr="--inputTransform %s", + ) outputTransform = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: output transform file name", - argstr="--outputTransform %s") + argstr="--outputTransform %s", + ) landmarkDensity = InputMultiPath( traits.Int, desc="Number of landmark subdivisions in all 3 directions", sep=",", - argstr="--landmarkDensity %s") + argstr="--landmarkDensity %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractInvertBSplineTransformOutputSpec(TraitedSpec): - outputTransform = File( - desc="Required: output transform file name", exists=True) + outputTransform = File(desc="Required: output transform file name", exists=True) class gtractInvertBSplineTransform(SEMLikeCommandLine): @@ -117,37 +129,38 @@ class gtractInvertBSplineTransform(SEMLikeCommandLine): input_spec = gtractInvertBSplineTransformInputSpec output_spec = gtractInvertBSplineTransformOutputSpec _cmd = " gtractInvertBSplineTransform " - _outputs_filenames = {'outputTransform': 'outputTransform.h5'} + _outputs_filenames = {"outputTransform": "outputTransform.h5"} _redirect_x = False class gtractConcatDwiInputSpec(CommandLineInputSpec): inputVolume = InputMultiPath( File(exists=True), - desc= - "Required: input file containing the first diffusion weighted image", - argstr="--inputVolume %s...") + desc="Required: input file containing the first diffusion weighted image", + argstr="--inputVolume %s...", + ) ignoreOrigins = traits.Bool( - desc= - "If image origins are different force all images to origin of first image", - argstr="--ignoreOrigins ") + desc="If image origins are different force all images to origin of first image", + argstr="--ignoreOrigins ", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: name of output NRRD file containing the combined diffusion weighted images.", - argstr="--outputVolume %s") + desc="Required: name of output NRRD file containing the combined diffusion weighted images.", + argstr="--outputVolume %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractConcatDwiOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: name of output NRRD file containing the combined diffusion weighted images.", - exists=True) + desc="Required: name of output NRRD file containing the combined diffusion weighted images.", + exists=True, + ) class gtractConcatDwi(SEMLikeCommandLine): @@ -172,40 +185,42 @@ class gtractConcatDwi(SEMLikeCommandLine): input_spec = gtractConcatDwiInputSpec output_spec = gtractConcatDwiOutputSpec _cmd = " gtractConcatDwi " - _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _outputs_filenames = {"outputVolume": "outputVolume.nrrd"} _redirect_x = False class gtractAverageBvaluesInputSpec(CommandLineInputSpec): inputVolume = File( - desc= - "Required: input image file name containing multiple baseline gradients to average", + desc="Required: input image file name containing multiple baseline gradients to average", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: name of output NRRD file containing directly averaged baseline images", - argstr="--outputVolume %s") + desc="Required: name of output NRRD file containing directly averaged baseline images", + argstr="--outputVolume %s", + ) directionsTolerance = traits.Float( desc="Tolerance for matching identical gradient direction pairs", - argstr="--directionsTolerance %f") + argstr="--directionsTolerance %f", + ) averageB0only = traits.Bool( - desc= - "Average only baseline gradients. All other gradient directions are not averaged, but retained in the outputVolume", - argstr="--averageB0only ") + desc="Average only baseline gradients. All other gradient directions are not averaged, but retained in the outputVolume", + argstr="--averageB0only ", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractAverageBvaluesOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: name of output NRRD file containing directly averaged baseline images", - exists=True) + desc="Required: name of output NRRD file containing directly averaged baseline images", + exists=True, + ) class gtractAverageBvalues(SEMLikeCommandLine): @@ -230,90 +245,92 @@ class gtractAverageBvalues(SEMLikeCommandLine): input_spec = gtractAverageBvaluesInputSpec output_spec = gtractAverageBvaluesOutputSpec _cmd = " gtractAverageBvalues " - _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _outputs_filenames = {"outputVolume": "outputVolume.nrrd"} _redirect_x = False class gtractCoregBvaluesInputSpec(CommandLineInputSpec): movingVolume = File( - desc= - "Required: input moving image file name. In order to register gradients within a scan to its first gradient, set the movingVolume and fixedVolume as the same image.", + desc="Required: input moving image file name. In order to register gradients within a scan to its first gradient, set the movingVolume and fixedVolume as the same image.", exists=True, - argstr="--movingVolume %s") + argstr="--movingVolume %s", + ) fixedVolume = File( - desc= - "Required: input fixed image file name. It is recommended that this image should either contain or be a b0 image.", + desc="Required: input fixed image file name. It is recommended that this image should either contain or be a b0 image.", exists=True, - argstr="--fixedVolume %s") + argstr="--fixedVolume %s", + ) fixedVolumeIndex = traits.Int( - desc= - "Index in the fixed image for registration. It is recommended that this image should be a b0 image.", - argstr="--fixedVolumeIndex %d") + desc="Index in the fixed image for registration. It is recommended that this image should be a b0 image.", + argstr="--fixedVolumeIndex %d", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: name of output NRRD file containing moving images individually resampled and fit to the specified fixed image index.", - argstr="--outputVolume %s") + desc="Required: name of output NRRD file containing moving images individually resampled and fit to the specified fixed image index.", + argstr="--outputVolume %s", + ) outputTransform = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Registration 3D transforms concatenated in a single output file. There are no tools that can use this, but can be used for debugging purposes.", - argstr="--outputTransform %s") + desc="Registration 3D transforms concatenated in a single output file. There are no tools that can use this, but can be used for debugging purposes.", + argstr="--outputTransform %s", + ) eddyCurrentCorrection = traits.Bool( - desc= - "Flag to perform eddy current corection in addition to motion correction (recommended)", - argstr="--eddyCurrentCorrection ") + desc="Flag to perform eddy current corection in addition to motion correction (recommended)", + argstr="--eddyCurrentCorrection ", + ) numberOfIterations = traits.Int( - desc="Number of iterations in each 3D fit", - argstr="--numberOfIterations %d") + desc="Number of iterations in each 3D fit", argstr="--numberOfIterations %d" + ) numberOfSpatialSamples = traits.Int( - desc= - "The number of voxels sampled for mutual information computation. Increase this for a slower, more careful fit. NOTE that it is suggested to use samplingPercentage instead of this option. However, if set, it overwrites the samplingPercentage option. ", - argstr="--numberOfSpatialSamples %d") + desc="The number of voxels sampled for mutual information computation. Increase this for a slower, more careful fit. NOTE that it is suggested to use samplingPercentage instead of this option. However, if set, it overwrites the samplingPercentage option. ", + argstr="--numberOfSpatialSamples %d", + ) samplingPercentage = traits.Float( - desc= - "This is a number in (0.0,1.0] interval that shows the percentage of the input fixed image voxels that are sampled for mutual information computation. Increase this for a slower, more careful fit. You can also limit the sampling focus with ROI masks and ROIAUTO mask generation. The default is to use approximately 5% of voxels (for backwards compatibility 5% ~= 500000/(256*256*256)). Typical values range from 1% for low detail images to 20% for high detail images.", - argstr="--samplingPercentage %f") + desc="This is a number in (0.0,1.0] interval that shows the percentage of the input fixed image voxels that are sampled for mutual information computation. Increase this for a slower, more careful fit. You can also limit the sampling focus with ROI masks and ROIAUTO mask generation. The default is to use approximately 5% of voxels (for backwards compatibility 5% ~= 500000/(256*256*256)). Typical values range from 1% for low detail images to 20% for high detail images.", + argstr="--samplingPercentage %f", + ) relaxationFactor = traits.Float( - desc= - "Fraction of gradient from Jacobian to attempt to move in each 3D fit step (adjust when eddyCurrentCorrection is enabled; suggested value = 0.25)", - argstr="--relaxationFactor %f") + desc="Fraction of gradient from Jacobian to attempt to move in each 3D fit step (adjust when eddyCurrentCorrection is enabled; suggested value = 0.25)", + argstr="--relaxationFactor %f", + ) maximumStepSize = traits.Float( - desc= - "Maximum permitted step size to move in each 3D fit step (adjust when eddyCurrentCorrection is enabled; suggested value = 0.1)", - argstr="--maximumStepSize %f") + desc="Maximum permitted step size to move in each 3D fit step (adjust when eddyCurrentCorrection is enabled; suggested value = 0.1)", + argstr="--maximumStepSize %f", + ) minimumStepSize = traits.Float( - desc= - "Minimum required step size to move in each 3D fit step without converging -- decrease this to make the fit more exacting", - argstr="--minimumStepSize %f") + desc="Minimum required step size to move in each 3D fit step without converging -- decrease this to make the fit more exacting", + argstr="--minimumStepSize %f", + ) spatialScale = traits.Float( - desc= - "How much to scale up changes in position compared to unit rotational changes in radians -- decrease this to put more rotation in the fit", - argstr="--spatialScale %f") + desc="How much to scale up changes in position compared to unit rotational changes in radians -- decrease this to put more rotation in the fit", + argstr="--spatialScale %f", + ) registerB0Only = traits.Bool( - desc="Register the B0 images only", argstr="--registerB0Only ") + desc="Register the B0 images only", argstr="--registerB0Only " + ) debugLevel = traits.Int( - desc= - "Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", - argstr="--debugLevel %d") + desc="Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", + argstr="--debugLevel %d", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractCoregBvaluesOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: name of output NRRD file containing moving images individually resampled and fit to the specified fixed image index.", - exists=True) + desc="Required: name of output NRRD file containing moving images individually resampled and fit to the specified fixed image index.", + exists=True, + ) outputTransform = File( - desc= - "Registration 3D transforms concatenated in a single output file. There are no tools that can use this, but can be used for debugging purposes.", - exists=True) + desc="Registration 3D transforms concatenated in a single output file. There are no tools that can use this, but can be used for debugging purposes.", + exists=True, + ) class gtractCoregBvalues(SEMLikeCommandLine): @@ -339,8 +356,8 @@ class gtractCoregBvalues(SEMLikeCommandLine): output_spec = gtractCoregBvaluesOutputSpec _cmd = " gtractCoregBvalues " _outputs_filenames = { - 'outputVolume': 'outputVolume.nrrd', - 'outputTransform': 'outputTransform.h5' + "outputVolume": "outputVolume.nrrd", + "outputTransform": "outputTransform.h5", } _redirect_x = False @@ -349,38 +366,42 @@ class gtractResampleAnisotropyInputSpec(CommandLineInputSpec): inputAnisotropyVolume = File( desc="Required: input file containing the anisotropy image", exists=True, - argstr="--inputAnisotropyVolume %s") + argstr="--inputAnisotropyVolume %s", + ) inputAnatomicalVolume = File( - desc= - "Required: input file containing the anatomical image whose characteristics will be cloned.", + desc="Required: input file containing the anatomical image whose characteristics will be cloned.", exists=True, - argstr="--inputAnatomicalVolume %s") + argstr="--inputAnatomicalVolume %s", + ) inputTransform = File( desc="Required: input Rigid OR Bspline transform file name", exists=True, - argstr="--inputTransform %s") + argstr="--inputTransform %s", + ) transformType = traits.Enum( "Rigid", "B-Spline", desc="Transform type: Rigid, B-Spline", - argstr="--transformType %s") + argstr="--transformType %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: name of output NRRD file containing the resampled transformed anisotropy image.", - argstr="--outputVolume %s") + desc="Required: name of output NRRD file containing the resampled transformed anisotropy image.", + argstr="--outputVolume %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractResampleAnisotropyOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: name of output NRRD file containing the resampled transformed anisotropy image.", - exists=True) + desc="Required: name of output NRRD file containing the resampled transformed anisotropy image.", + exists=True, + ) class gtractResampleAnisotropy(SEMLikeCommandLine): @@ -405,7 +426,7 @@ class gtractResampleAnisotropy(SEMLikeCommandLine): input_spec = gtractResampleAnisotropyInputSpec output_spec = gtractResampleAnisotropyOutputSpec _cmd = " gtractResampleAnisotropy " - _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _outputs_filenames = {"outputVolume": "outputVolume.nrrd"} _redirect_x = False @@ -413,16 +434,18 @@ class gtractResampleCodeImageInputSpec(CommandLineInputSpec): inputCodeVolume = File( desc="Required: input file containing the code image", exists=True, - argstr="--inputCodeVolume %s") + argstr="--inputCodeVolume %s", + ) inputReferenceVolume = File( - desc= - "Required: input file containing the standard image to clone the characteristics of.", + desc="Required: input file containing the standard image to clone the characteristics of.", exists=True, - argstr="--inputReferenceVolume %s") + argstr="--inputReferenceVolume %s", + ) inputTransform = File( desc="Required: input Rigid or Inverse-B-Spline transform file name", exists=True, - argstr="--inputTransform %s") + argstr="--inputTransform %s", + ) transformType = traits.Enum( "Rigid", "Affine", @@ -430,24 +453,26 @@ class gtractResampleCodeImageInputSpec(CommandLineInputSpec): "Inverse-B-Spline", "None", desc="Transform type: Rigid or Inverse-B-Spline", - argstr="--transformType %s") + argstr="--transformType %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: name of output NRRD file containing the resampled code image in acquisition space.", - argstr="--outputVolume %s") + desc="Required: name of output NRRD file containing the resampled code image in acquisition space.", + argstr="--outputVolume %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractResampleCodeImageOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: name of output NRRD file containing the resampled code image in acquisition space.", - exists=True) + desc="Required: name of output NRRD file containing the resampled code image in acquisition space.", + exists=True, + ) class gtractResampleCodeImage(SEMLikeCommandLine): @@ -472,37 +497,39 @@ class gtractResampleCodeImage(SEMLikeCommandLine): input_spec = gtractResampleCodeImageInputSpec output_spec = gtractResampleCodeImageOutputSpec _cmd = " gtractResampleCodeImage " - _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _outputs_filenames = {"outputVolume": "outputVolume.nrrd"} _redirect_x = False class gtractCopyImageOrientationInputSpec(CommandLineInputSpec): inputVolume = File( - desc= - "Required: input file containing the signed short image to reorient without resampling.", + desc="Required: input file containing the signed short image to reorient without resampling.", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) inputReferenceVolume = File( desc="Required: input file containing orietation that will be cloned.", exists=True, - argstr="--inputReferenceVolume %s") + argstr="--inputReferenceVolume %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: name of output NRRD or Nifti file containing the reoriented image in reference image space.", - argstr="--outputVolume %s") + desc="Required: name of output NRRD or Nifti file containing the reoriented image in reference image space.", + argstr="--outputVolume %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractCopyImageOrientationOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: name of output NRRD or Nifti file containing the reoriented image in reference image space.", - exists=True) + desc="Required: name of output NRRD or Nifti file containing the reoriented image in reference image space.", + exists=True, + ) class gtractCopyImageOrientation(SEMLikeCommandLine): @@ -527,7 +554,7 @@ class gtractCopyImageOrientation(SEMLikeCommandLine): input_spec = gtractCopyImageOrientationInputSpec output_spec = gtractCopyImageOrientationOutputSpec _cmd = " gtractCopyImageOrientation " - _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _outputs_filenames = {"outputVolume": "outputVolume.nrrd"} _redirect_x = False @@ -535,28 +562,30 @@ class gtractCreateGuideFiberInputSpec(CommandLineInputSpec): inputFiber = File( desc="Required: input fiber tract file name", exists=True, - argstr="--inputFiber %s") + argstr="--inputFiber %s", + ) numberOfPoints = traits.Int( - desc="Number of points in output guide fiber", - argstr="--numberOfPoints %d") + desc="Number of points in output guide fiber", argstr="--numberOfPoints %d" + ) outputFiber = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: output guide fiber file name", - argstr="--outputFiber %s") + argstr="--outputFiber %s", + ) writeXMLPolyDataFile = traits.Bool( - desc= - "Flag to make use of XML files when reading and writing vtkPolyData.", - argstr="--writeXMLPolyDataFile ") + desc="Flag to make use of XML files when reading and writing vtkPolyData.", + argstr="--writeXMLPolyDataFile ", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractCreateGuideFiberOutputSpec(TraitedSpec): - outputFiber = File( - desc="Required: output guide fiber file name", exists=True) + outputFiber = File(desc="Required: output guide fiber file name", exists=True) class gtractCreateGuideFiber(SEMLikeCommandLine): @@ -581,7 +610,7 @@ class gtractCreateGuideFiber(SEMLikeCommandLine): input_spec = gtractCreateGuideFiberInputSpec output_spec = gtractCreateGuideFiberOutputSpec _cmd = " gtractCreateGuideFiber " - _outputs_filenames = {'outputFiber': 'outputFiber.vtk'} + _outputs_filenames = {"outputFiber": "outputFiber.vtk"} _redirect_x = False @@ -589,7 +618,8 @@ class gtractAnisotropyMapInputSpec(CommandLineInputSpec): inputTensorVolume = File( desc="Required: input file containing the diffusion tensor image", exists=True, - argstr="--inputTensorVolume %s") + argstr="--inputTensorVolume %s", + ) anisotropyType = traits.Enum( "ADC", "FA", @@ -599,24 +629,26 @@ class gtractAnisotropyMapInputSpec(CommandLineInputSpec): "RD", "LI", desc="Anisotropy Mapping Type: ADC, FA, RA, VR, AD, RD, LI", - argstr="--anisotropyType %s") + argstr="--anisotropyType %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: name of output NRRD file containing the selected kind of anisotropy scalar.", - argstr="--outputVolume %s") + desc="Required: name of output NRRD file containing the selected kind of anisotropy scalar.", + argstr="--outputVolume %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractAnisotropyMapOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: name of output NRRD file containing the selected kind of anisotropy scalar.", - exists=True) + desc="Required: name of output NRRD file containing the selected kind of anisotropy scalar.", + exists=True, + ) class gtractAnisotropyMap(SEMLikeCommandLine): @@ -641,38 +673,38 @@ class gtractAnisotropyMap(SEMLikeCommandLine): input_spec = gtractAnisotropyMapInputSpec output_spec = gtractAnisotropyMapOutputSpec _cmd = " gtractAnisotropyMap " - _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _outputs_filenames = {"outputVolume": "outputVolume.nrrd"} _redirect_x = False class gtractClipAnisotropyInputSpec(CommandLineInputSpec): inputVolume = File( - desc="Required: input image file name", - exists=True, - argstr="--inputVolume %s") + desc="Required: input image file name", exists=True, argstr="--inputVolume %s" + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: name of output NRRD file containing the clipped anisotropy image", - argstr="--outputVolume %s") + desc="Required: name of output NRRD file containing the clipped anisotropy image", + argstr="--outputVolume %s", + ) clipFirstSlice = traits.Bool( - desc="Clip the first slice of the anisotropy image", - argstr="--clipFirstSlice ") + desc="Clip the first slice of the anisotropy image", argstr="--clipFirstSlice " + ) clipLastSlice = traits.Bool( - desc="Clip the last slice of the anisotropy image", - argstr="--clipLastSlice ") + desc="Clip the last slice of the anisotropy image", argstr="--clipLastSlice " + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractClipAnisotropyOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: name of output NRRD file containing the clipped anisotropy image", - exists=True) + desc="Required: name of output NRRD file containing the clipped anisotropy image", + exists=True, + ) class gtractClipAnisotropy(SEMLikeCommandLine): @@ -697,7 +729,7 @@ class gtractClipAnisotropy(SEMLikeCommandLine): input_spec = gtractClipAnisotropyInputSpec output_spec = gtractClipAnisotropyOutputSpec _cmd = " gtractClipAnisotropy " - _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _outputs_filenames = {"outputVolume": "outputVolume.nrrd"} _redirect_x = False @@ -705,41 +737,46 @@ class gtractResampleB0InputSpec(CommandLineInputSpec): inputVolume = File( desc="Required: input file containing the 4D image", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) inputAnatomicalVolume = File( - desc= - "Required: input file containing the anatomical image defining the origin, spacing and size of the resampled image (template)", + desc="Required: input file containing the anatomical image defining the origin, spacing and size of the resampled image (template)", exists=True, - argstr="--inputAnatomicalVolume %s") + argstr="--inputAnatomicalVolume %s", + ) inputTransform = File( desc="Required: input Rigid OR Bspline transform file name", exists=True, - argstr="--inputTransform %s") + argstr="--inputTransform %s", + ) vectorIndex = traits.Int( desc="Index in the diffusion weighted image set for the B0 image", - argstr="--vectorIndex %d") + argstr="--vectorIndex %d", + ) transformType = traits.Enum( "Rigid", "B-Spline", desc="Transform type: Rigid, B-Spline", - argstr="--transformType %s") + argstr="--transformType %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: name of output NRRD file containing the resampled input image.", - argstr="--outputVolume %s") + desc="Required: name of output NRRD file containing the resampled input image.", + argstr="--outputVolume %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractResampleB0OutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: name of output NRRD file containing the resampled input image.", - exists=True) + desc="Required: name of output NRRD file containing the resampled input image.", + exists=True, + ) class gtractResampleB0(SEMLikeCommandLine): @@ -764,7 +801,7 @@ class gtractResampleB0(SEMLikeCommandLine): input_spec = gtractResampleB0InputSpec output_spec = gtractResampleB0OutputSpec _cmd = " gtractResampleB0 " - _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _outputs_filenames = {"outputVolume": "outputVolume.nrrd"} _redirect_x = False @@ -772,21 +809,23 @@ class gtractInvertRigidTransformInputSpec(CommandLineInputSpec): inputTransform = File( desc="Required: input rigid transform file name", exists=True, - argstr="--inputTransform %s") + argstr="--inputTransform %s", + ) outputTransform = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: output transform file name", - argstr="--outputTransform %s") + argstr="--outputTransform %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractInvertRigidTransformOutputSpec(TraitedSpec): - outputTransform = File( - desc="Required: output transform file name", exists=True) + outputTransform = File(desc="Required: output transform file name", exists=True) class gtractInvertRigidTransform(SEMLikeCommandLine): @@ -811,38 +850,39 @@ class gtractInvertRigidTransform(SEMLikeCommandLine): input_spec = gtractInvertRigidTransformInputSpec output_spec = gtractInvertRigidTransformOutputSpec _cmd = " gtractInvertRigidTransform " - _outputs_filenames = {'outputTransform': 'outputTransform.h5'} + _outputs_filenames = {"outputTransform": "outputTransform.h5"} _redirect_x = False class gtractImageConformityInputSpec(CommandLineInputSpec): inputVolume = File( - desc= - "Required: input file containing the signed short image to reorient without resampling.", + desc="Required: input file containing the signed short image to reorient without resampling.", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) inputReferenceVolume = File( - desc= - "Required: input file containing the standard image to clone the characteristics of.", + desc="Required: input file containing the standard image to clone the characteristics of.", exists=True, - argstr="--inputReferenceVolume %s") + argstr="--inputReferenceVolume %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: name of output Nrrd or Nifti file containing the reoriented image in reference image space.", - argstr="--outputVolume %s") + desc="Required: name of output Nrrd or Nifti file containing the reoriented image in reference image space.", + argstr="--outputVolume %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractImageConformityOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: name of output Nrrd or Nifti file containing the reoriented image in reference image space.", - exists=True) + desc="Required: name of output Nrrd or Nifti file containing the reoriented image in reference image space.", + exists=True, + ) class gtractImageConformity(SEMLikeCommandLine): @@ -867,7 +907,7 @@ class gtractImageConformity(SEMLikeCommandLine): input_spec = gtractImageConformityInputSpec output_spec = gtractImageConformityOutputSpec _cmd = " gtractImageConformity " - _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _outputs_filenames = {"outputVolume": "outputVolume.nrrd"} _redirect_x = False @@ -875,31 +915,36 @@ class compareTractInclusionInputSpec(CommandLineInputSpec): testFiber = File( desc="Required: test fiber tract file name", exists=True, - argstr="--testFiber %s") + argstr="--testFiber %s", + ) standardFiber = File( desc="Required: standard fiber tract file name", exists=True, - argstr="--standardFiber %s") + argstr="--standardFiber %s", + ) closeness = traits.Float( - desc= - "Closeness of every test fiber to some fiber in the standard tract, computed as a sum of squares of spatial differences of standard points", - argstr="--closeness %f") + desc="Closeness of every test fiber to some fiber in the standard tract, computed as a sum of squares of spatial differences of standard points", + argstr="--closeness %f", + ) numberOfPoints = traits.Int( - desc="Number of points in comparison fiber pairs", - argstr="--numberOfPoints %d") + desc="Number of points in comparison fiber pairs", argstr="--numberOfPoints %d" + ) testForBijection = traits.Bool( desc="Flag to apply the closeness criterion both ways", - argstr="--testForBijection ") + argstr="--testForBijection ", + ) testForFiberCardinality = traits.Bool( desc="Flag to require the same number of fibers in both tracts", - argstr="--testForFiberCardinality ") + argstr="--testForFiberCardinality ", + ) writeXMLPolyDataFile = traits.Bool( - desc= - "Flag to make use of XML files when reading and writing vtkPolyData.", - argstr="--writeXMLPolyDataFile ") + desc="Flag to make use of XML files when reading and writing vtkPolyData.", + argstr="--writeXMLPolyDataFile ", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class compareTractInclusionOutputSpec(TraitedSpec): @@ -936,59 +981,68 @@ class gtractFastMarchingTrackingInputSpec(CommandLineInputSpec): inputTensorVolume = File( desc="Required: input tensor image file name", exists=True, - argstr="--inputTensorVolume %s") + argstr="--inputTensorVolume %s", + ) inputAnisotropyVolume = File( desc="Required: input anisotropy image file name", exists=True, - argstr="--inputAnisotropyVolume %s") + argstr="--inputAnisotropyVolume %s", + ) inputCostVolume = File( desc="Required: input vcl_cost image file name", exists=True, - argstr="--inputCostVolume %s") + argstr="--inputCostVolume %s", + ) inputStartingSeedsLabelMapVolume = File( desc="Required: input starting seeds LabelMap image file name", exists=True, - argstr="--inputStartingSeedsLabelMapVolume %s") + argstr="--inputStartingSeedsLabelMapVolume %s", + ) startingSeedsLabel = traits.Int( - desc="Label value for Starting Seeds", - argstr="--startingSeedsLabel %d") + desc="Label value for Starting Seeds", argstr="--startingSeedsLabel %d" + ) outputTract = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", - argstr="--outputTract %s") + desc="Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", + argstr="--outputTract %s", + ) writeXMLPolyDataFile = traits.Bool( desc="Flag to make use of the XML format for vtkPolyData fiber tracts.", - argstr="--writeXMLPolyDataFile ") + argstr="--writeXMLPolyDataFile ", + ) numberOfIterations = traits.Int( desc="Number of iterations used for the optimization", - argstr="--numberOfIterations %d") + argstr="--numberOfIterations %d", + ) seedThreshold = traits.Float( - desc="Anisotropy threshold used for seed selection", - argstr="--seedThreshold %f") + desc="Anisotropy threshold used for seed selection", argstr="--seedThreshold %f" + ) trackingThreshold = traits.Float( desc="Anisotropy threshold used for fiber tracking", - argstr="--trackingThreshold %f") + argstr="--trackingThreshold %f", + ) costStepSize = traits.Float( - desc="Cost image sub-voxel sampling", argstr="--costStepSize %f") + desc="Cost image sub-voxel sampling", argstr="--costStepSize %f" + ) maximumStepSize = traits.Float( - desc="Maximum step size to move when tracking", - argstr="--maximumStepSize %f") + desc="Maximum step size to move when tracking", argstr="--maximumStepSize %f" + ) minimumStepSize = traits.Float( - desc="Minimum step size to move when tracking", - argstr="--minimumStepSize %f") + desc="Minimum step size to move when tracking", argstr="--minimumStepSize %f" + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractFastMarchingTrackingOutputSpec(TraitedSpec): outputTract = File( - desc= - "Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", - exists=True) + desc="Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", + exists=True, + ) class gtractFastMarchingTracking(SEMLikeCommandLine): @@ -1013,32 +1067,36 @@ class gtractFastMarchingTracking(SEMLikeCommandLine): input_spec = gtractFastMarchingTrackingInputSpec output_spec = gtractFastMarchingTrackingOutputSpec _cmd = " gtractFastMarchingTracking " - _outputs_filenames = {'outputTract': 'outputTract.vtk'} + _outputs_filenames = {"outputTract": "outputTract.vtk"} _redirect_x = False class gtractInvertDisplacementFieldInputSpec(CommandLineInputSpec): baseImage = File( - desc= - "Required: base image used to define the size of the inverse field", + desc="Required: base image used to define the size of the inverse field", exists=True, - argstr="--baseImage %s") + argstr="--baseImage %s", + ) deformationImage = File( desc="Required: Displacement field image", exists=True, - argstr="--deformationImage %s") + argstr="--deformationImage %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: Output deformation field", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) subsamplingFactor = traits.Int( desc="Subsampling factor for the deformation field", - argstr="--subsamplingFactor %d") + argstr="--subsamplingFactor %d", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractInvertDisplacementFieldOutputSpec(TraitedSpec): @@ -1067,107 +1125,115 @@ class gtractInvertDisplacementField(SEMLikeCommandLine): input_spec = gtractInvertDisplacementFieldInputSpec output_spec = gtractInvertDisplacementFieldOutputSpec _cmd = " gtractInvertDisplacementField " - _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _outputs_filenames = {"outputVolume": "outputVolume.nrrd"} _redirect_x = False class gtractCoRegAnatomyInputSpec(CommandLineInputSpec): inputVolume = File( - desc= - "Required: input vector image file name. It is recommended that the input volume is the skull stripped baseline image of the DWI scan.", + desc="Required: input vector image file name. It is recommended that the input volume is the skull stripped baseline image of the DWI scan.", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) inputAnatomicalVolume = File( - desc= - "Required: input anatomical image file name. It is recommended that that the input anatomical image has been skull stripped and has the same orientation as the DWI scan.", + desc="Required: input anatomical image file name. It is recommended that that the input anatomical image has been skull stripped and has the same orientation as the DWI scan.", exists=True, - argstr="--inputAnatomicalVolume %s") + argstr="--inputAnatomicalVolume %s", + ) vectorIndex = traits.Int( - desc= - "Vector image index in the moving image (within the DWI) to be used for registration.", - argstr="--vectorIndex %d") + desc="Vector image index in the moving image (within the DWI) to be used for registration.", + argstr="--vectorIndex %d", + ) inputRigidTransform = File( - desc= - "Required (for B-Spline type co-registration): input rigid transform file name. Used as a starting point for the anatomical B-Spline registration.", + desc="Required (for B-Spline type co-registration): input rigid transform file name. Used as a starting point for the anatomical B-Spline registration.", exists=True, - argstr="--inputRigidTransform %s") + argstr="--inputRigidTransform %s", + ) outputTransformName = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: filename for the fit transform.", - argstr="--outputTransformName %s") + argstr="--outputTransformName %s", + ) transformType = traits.Enum( "Rigid", "Bspline", desc="Transform Type: Rigid|Bspline", - argstr="--transformType %s") + argstr="--transformType %s", + ) numberOfIterations = traits.Int( desc="Number of iterations in the selected 3D fit", - argstr="--numberOfIterations %d") + argstr="--numberOfIterations %d", + ) gridSize = InputMultiPath( traits.Int, desc="Number of grid subdivisions in all 3 directions", sep=",", - argstr="--gridSize %s") + argstr="--gridSize %s", + ) borderSize = traits.Int(desc="Size of border", argstr="--borderSize %d") numberOfHistogramBins = traits.Int( - desc="Number of histogram bins", argstr="--numberOfHistogramBins %d") + desc="Number of histogram bins", argstr="--numberOfHistogramBins %d" + ) spatialScale = traits.Int( - desc= - "Scales the number of voxels in the image by this value to specify the number of voxels used in the registration", - argstr="--spatialScale %d") - convergence = traits.Float( - desc="Convergence Factor", argstr="--convergence %f") + desc="Scales the number of voxels in the image by this value to specify the number of voxels used in the registration", + argstr="--spatialScale %d", + ) + convergence = traits.Float(desc="Convergence Factor", argstr="--convergence %f") gradientTolerance = traits.Float( - desc="Gradient Tolerance", argstr="--gradientTolerance %f") + desc="Gradient Tolerance", argstr="--gradientTolerance %f" + ) maxBSplineDisplacement = traits.Float( - desc= - " Sets the maximum allowed displacements in image physical coordinates for BSpline control grid along each axis. A value of 0.0 indicates that the problem should be unbounded. NOTE: This only constrains the BSpline portion, and does not limit the displacement from the associated bulk transform. This can lead to a substantial reduction in computation time in the BSpline optimizer., ", - argstr="--maxBSplineDisplacement %f") + desc=" Sets the maximum allowed displacements in image physical coordinates for BSpline control grid along each axis. A value of 0.0 indicates that the problem should be unbounded. NOTE: This only constrains the BSpline portion, and does not limit the displacement from the associated bulk transform. This can lead to a substantial reduction in computation time in the BSpline optimizer., ", + argstr="--maxBSplineDisplacement %f", + ) maximumStepSize = traits.Float( desc="Maximum permitted step size to move in the selected 3D fit", - argstr="--maximumStepSize %f") + argstr="--maximumStepSize %f", + ) minimumStepSize = traits.Float( - desc= - "Minimum required step size to move in the selected 3D fit without converging -- decrease this to make the fit more exacting", - argstr="--minimumStepSize %f") + desc="Minimum required step size to move in the selected 3D fit without converging -- decrease this to make the fit more exacting", + argstr="--minimumStepSize %f", + ) translationScale = traits.Float( - desc= - "How much to scale up changes in position compared to unit rotational changes in radians -- decrease this to put more translation in the fit", - argstr="--translationScale %f") + desc="How much to scale up changes in position compared to unit rotational changes in radians -- decrease this to put more translation in the fit", + argstr="--translationScale %f", + ) relaxationFactor = traits.Float( - desc= - "Fraction of gradient from Jacobian to attempt to move in the selected 3D fit", - argstr="--relaxationFactor %f") + desc="Fraction of gradient from Jacobian to attempt to move in the selected 3D fit", + argstr="--relaxationFactor %f", + ) numberOfSamples = traits.Int( - desc= - "The number of voxels sampled for mutual information computation. Increase this for a slower, more careful fit. NOTE that it is suggested to use samplingPercentage instead of this option. However, if set, it overwrites the samplingPercentage option. ", - argstr="--numberOfSamples %d") + desc="The number of voxels sampled for mutual information computation. Increase this for a slower, more careful fit. NOTE that it is suggested to use samplingPercentage instead of this option. However, if set, it overwrites the samplingPercentage option. ", + argstr="--numberOfSamples %d", + ) samplingPercentage = traits.Float( - desc= - "This is a number in (0.0,1.0] interval that shows the percentage of the input fixed image voxels that are sampled for mutual information computation. Increase this for a slower, more careful fit. You can also limit the sampling focus with ROI masks and ROIAUTO mask generation. The default is to use approximately 5% of voxels (for backwards compatibility 5% ~= 500000/(256*256*256)). Typical values range from 1% for low detail images to 20% for high detail images.", - argstr="--samplingPercentage %f") + desc="This is a number in (0.0,1.0] interval that shows the percentage of the input fixed image voxels that are sampled for mutual information computation. Increase this for a slower, more careful fit. You can also limit the sampling focus with ROI masks and ROIAUTO mask generation. The default is to use approximately 5% of voxels (for backwards compatibility 5% ~= 500000/(256*256*256)). Typical values range from 1% for low detail images to 20% for high detail images.", + argstr="--samplingPercentage %f", + ) useMomentsAlign = traits.Bool( - desc= - "MomentsAlign assumes that the center of mass of the images represent similar structures. Perform a MomentsAlign registration as part of the sequential registration steps. This option MUST come first, and CAN NOT be used with either CenterOfHeadLAlign, GeometryAlign, or initialTransform file. This family of options superceeds the use of transformType if any of them are set.", - argstr="--useMomentsAlign ") + desc="MomentsAlign assumes that the center of mass of the images represent similar structures. Perform a MomentsAlign registration as part of the sequential registration steps. This option MUST come first, and CAN NOT be used with either CenterOfHeadLAlign, GeometryAlign, or initialTransform file. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useMomentsAlign ", + ) useGeometryAlign = traits.Bool( - desc= - "GeometryAlign on assumes that the center of the voxel lattice of the images represent similar structures. Perform a GeometryCenterAlign registration as part of the sequential registration steps. This option MUST come first, and CAN NOT be used with either MomentsAlign, CenterOfHeadAlign, or initialTransform file. This family of options superceeds the use of transformType if any of them are set.", - argstr="--useGeometryAlign ") + desc="GeometryAlign on assumes that the center of the voxel lattice of the images represent similar structures. Perform a GeometryCenterAlign registration as part of the sequential registration steps. This option MUST come first, and CAN NOT be used with either MomentsAlign, CenterOfHeadAlign, or initialTransform file. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useGeometryAlign ", + ) useCenterOfHeadAlign = traits.Bool( - desc= - "CenterOfHeadAlign attempts to find a hemisphere full of foreground voxels from the superior direction as an estimate of where the center of a head shape would be to drive a center of mass estimate. Perform a CenterOfHeadAlign registration as part of the sequential registration steps. This option MUST come first, and CAN NOT be used with either MomentsAlign, GeometryAlign, or initialTransform file. This family of options superceeds the use of transformType if any of them are set.", - argstr="--useCenterOfHeadAlign ") + desc="CenterOfHeadAlign attempts to find a hemisphere full of foreground voxels from the superior direction as an estimate of where the center of a head shape would be to drive a center of mass estimate. Perform a CenterOfHeadAlign registration as part of the sequential registration steps. This option MUST come first, and CAN NOT be used with either MomentsAlign, GeometryAlign, or initialTransform file. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useCenterOfHeadAlign ", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractCoRegAnatomyOutputSpec(TraitedSpec): outputTransformName = File( - desc="Required: filename for the fit transform.", exists=True) + desc="Required: filename for the fit transform.", exists=True + ) class gtractCoRegAnatomy(SEMLikeCommandLine): @@ -1192,7 +1258,7 @@ class gtractCoRegAnatomy(SEMLikeCommandLine): input_spec = gtractCoRegAnatomyInputSpec output_spec = gtractCoRegAnatomyOutputSpec _cmd = " gtractCoRegAnatomy " - _outputs_filenames = {'outputTransformName': 'outputTransformName.h5'} + _outputs_filenames = {"outputTransformName": "outputTransformName.h5"} _redirect_x = False @@ -1200,59 +1266,62 @@ class gtractResampleDWIInPlaceInputSpec(CommandLineInputSpec): inputVolume = File( desc="Required: input image is a 4D NRRD image.", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) referenceVolume = File( - desc= - "If provided, resample to the final space of the referenceVolume 3D data set.", + desc="If provided, resample to the final space of the referenceVolume 3D data set.", exists=True, - argstr="--referenceVolume %s") + argstr="--referenceVolume %s", + ) outputResampledB0 = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Convenience function for extracting the first index location (assumed to be the B0)", - argstr="--outputResampledB0 %s") + desc="Convenience function for extracting the first index location (assumed to be the B0)", + argstr="--outputResampledB0 %s", + ) inputTransform = File( - desc= - "Required: transform file derived from rigid registration of b0 image to reference structural image.", + desc="Required: transform file derived from rigid registration of b0 image to reference structural image.", exists=True, - argstr="--inputTransform %s") + argstr="--inputTransform %s", + ) warpDWITransform = File( desc="Optional: transform file to warp gradient volumes.", exists=True, - argstr="--warpDWITransform %s") + argstr="--warpDWITransform %s", + ) debugLevel = traits.Int( - desc= - "Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", - argstr="--debugLevel %d") + desc="Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", + argstr="--debugLevel %d", + ) imageOutputSize = InputMultiPath( traits.Int, - desc= - "The voxel lattice for the output image, padding is added if necessary. NOTE: if 0,0,0, then the inputVolume size is used.", + desc="The voxel lattice for the output image, padding is added if necessary. NOTE: if 0,0,0, then the inputVolume size is used.", sep=",", - argstr="--imageOutputSize %s") + argstr="--imageOutputSize %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: output image (NRRD file) that has been rigidly transformed into the space of the structural image and padded if image padding was changed from 0,0,0 default.", - argstr="--outputVolume %s") + desc="Required: output image (NRRD file) that has been rigidly transformed into the space of the structural image and padded if image padding was changed from 0,0,0 default.", + argstr="--outputVolume %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractResampleDWIInPlaceOutputSpec(TraitedSpec): outputResampledB0 = File( - desc= - "Convenience function for extracting the first index location (assumed to be the B0)", - exists=True) + desc="Convenience function for extracting the first index location (assumed to be the B0)", + exists=True, + ) outputVolume = File( - desc= - "Required: output image (NRRD file) that has been rigidly transformed into the space of the structural image and padded if image padding was changed from 0,0,0 default.", - exists=True) + desc="Required: output image (NRRD file) that has been rigidly transformed into the space of the structural image and padded if image padding was changed from 0,0,0 default.", + exists=True, + ) class gtractResampleDWIInPlace(SEMLikeCommandLine): @@ -1278,8 +1347,8 @@ class gtractResampleDWIInPlace(SEMLikeCommandLine): output_spec = gtractResampleDWIInPlaceOutputSpec _cmd = " gtractResampleDWIInPlace " _outputs_filenames = { - 'outputResampledB0': 'outputResampledB0.nii', - 'outputVolume': 'outputVolume.nii' + "outputResampledB0": "outputResampledB0.nii", + "outputVolume": "outputVolume.nii", } _redirect_x = False @@ -1288,42 +1357,50 @@ class gtractCostFastMarchingInputSpec(CommandLineInputSpec): inputTensorVolume = File( desc="Required: input tensor image file name", exists=True, - argstr="--inputTensorVolume %s") + argstr="--inputTensorVolume %s", + ) inputAnisotropyVolume = File( desc="Required: input anisotropy image file name", exists=True, - argstr="--inputAnisotropyVolume %s") + argstr="--inputAnisotropyVolume %s", + ) inputStartingSeedsLabelMapVolume = File( desc="Required: input starting seeds LabelMap image file name", exists=True, - argstr="--inputStartingSeedsLabelMapVolume %s") + argstr="--inputStartingSeedsLabelMapVolume %s", + ) startingSeedsLabel = traits.Int( - desc="Label value for Starting Seeds", - argstr="--startingSeedsLabel %d") + desc="Label value for Starting Seeds", argstr="--startingSeedsLabel %d" + ) outputCostVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Output vcl_cost image", - argstr="--outputCostVolume %s") + argstr="--outputCostVolume %s", + ) outputSpeedVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Output speed image", - argstr="--outputSpeedVolume %s") + argstr="--outputSpeedVolume %s", + ) anisotropyWeight = traits.Float( desc="Anisotropy weight used for vcl_cost function calculations", - argstr="--anisotropyWeight %f") + argstr="--anisotropyWeight %f", + ) stoppingValue = traits.Float( desc="Terminiating value for vcl_cost function estimation", - argstr="--stoppingValue %f") + argstr="--stoppingValue %f", + ) seedThreshold = traits.Float( - desc="Anisotropy threshold used for seed selection", - argstr="--seedThreshold %f") + desc="Anisotropy threshold used for seed selection", argstr="--seedThreshold %f" + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractCostFastMarchingOutputSpec(TraitedSpec): @@ -1354,122 +1431,130 @@ class gtractCostFastMarching(SEMLikeCommandLine): output_spec = gtractCostFastMarchingOutputSpec _cmd = " gtractCostFastMarching " _outputs_filenames = { - 'outputCostVolume': 'outputCostVolume.nrrd', - 'outputSpeedVolume': 'outputSpeedVolume.nrrd' + "outputCostVolume": "outputCostVolume.nrrd", + "outputSpeedVolume": "outputSpeedVolume.nrrd", } _redirect_x = False class gtractFiberTrackingInputSpec(CommandLineInputSpec): inputTensorVolume = File( - desc= - "Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): input tensor image file name", + desc="Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): input tensor image file name", exists=True, - argstr="--inputTensorVolume %s") + argstr="--inputTensorVolume %s", + ) inputAnisotropyVolume = File( - desc= - "Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): input anisotropy image file name", + desc="Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): input anisotropy image file name", exists=True, - argstr="--inputAnisotropyVolume %s") + argstr="--inputAnisotropyVolume %s", + ) inputStartingSeedsLabelMapVolume = File( - desc= - "Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): input starting seeds LabelMap image file name", + desc="Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): input starting seeds LabelMap image file name", exists=True, - argstr="--inputStartingSeedsLabelMapVolume %s") + argstr="--inputStartingSeedsLabelMapVolume %s", + ) startingSeedsLabel = traits.Int( - desc= - "Label value for Starting Seeds (required if Label number used to create seed point in Slicer was not 1)", - argstr="--startingSeedsLabel %d") + desc="Label value for Starting Seeds (required if Label number used to create seed point in Slicer was not 1)", + argstr="--startingSeedsLabel %d", + ) inputEndingSeedsLabelMapVolume = File( - desc= - "Required (for Streamline, GraphSearch, and Guided fiber tracking methods): input ending seeds LabelMap image file name", + desc="Required (for Streamline, GraphSearch, and Guided fiber tracking methods): input ending seeds LabelMap image file name", exists=True, - argstr="--inputEndingSeedsLabelMapVolume %s") + argstr="--inputEndingSeedsLabelMapVolume %s", + ) endingSeedsLabel = traits.Int( - desc= - "Label value for Ending Seeds (required if Label number used to create seed point in Slicer was not 1)", - argstr="--endingSeedsLabel %d") + desc="Label value for Ending Seeds (required if Label number used to create seed point in Slicer was not 1)", + argstr="--endingSeedsLabel %d", + ) inputTract = File( - desc= - "Required (for Guided fiber tracking method): guide fiber in vtkPolydata file containing one tract line.", + desc="Required (for Guided fiber tracking method): guide fiber in vtkPolydata file containing one tract line.", exists=True, - argstr="--inputTract %s") + argstr="--inputTract %s", + ) outputTract = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): name of output vtkPolydata file containing tract lines and the point data collected along them.", - argstr="--outputTract %s") + desc="Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): name of output vtkPolydata file containing tract lines and the point data collected along them.", + argstr="--outputTract %s", + ) writeXMLPolyDataFile = traits.Bool( desc="Flag to make use of the XML format for vtkPolyData fiber tracts.", - argstr="--writeXMLPolyDataFile ") + argstr="--writeXMLPolyDataFile ", + ) trackingMethod = traits.Enum( "Guided", "Free", "Streamline", "GraphSearch", desc="Fiber tracking Filter Type: Guided|Free|Streamline|GraphSearch", - argstr="--trackingMethod %s") + argstr="--trackingMethod %s", + ) guidedCurvatureThreshold = traits.Float( desc="Guided Curvature Threshold (Degrees)", - argstr="--guidedCurvatureThreshold %f") + argstr="--guidedCurvatureThreshold %f", + ) maximumGuideDistance = traits.Float( desc="Maximum distance for using the guide fiber direction", - argstr="--maximumGuideDistance %f") + argstr="--maximumGuideDistance %f", + ) seedThreshold = traits.Float( - desc= - "Anisotropy threshold for seed selection (recommended for Free fiber tracking)", - argstr="--seedThreshold %f") + desc="Anisotropy threshold for seed selection (recommended for Free fiber tracking)", + argstr="--seedThreshold %f", + ) trackingThreshold = traits.Float( - desc= - "Anisotropy threshold for fiber tracking (anisotropy values of the next point along the path)", - argstr="--trackingThreshold %f") + desc="Anisotropy threshold for fiber tracking (anisotropy values of the next point along the path)", + argstr="--trackingThreshold %f", + ) curvatureThreshold = traits.Float( - desc= - "Curvature threshold in degrees (recommended for Free fiber tracking)", - argstr="--curvatureThreshold %f") + desc="Curvature threshold in degrees (recommended for Free fiber tracking)", + argstr="--curvatureThreshold %f", + ) branchingThreshold = traits.Float( - desc= - "Anisotropy Branching threshold (recommended for GraphSearch fiber tracking method)", - argstr="--branchingThreshold %f") + desc="Anisotropy Branching threshold (recommended for GraphSearch fiber tracking method)", + argstr="--branchingThreshold %f", + ) maximumBranchPoints = traits.Int( - desc= - "Maximum branch points (recommended for GraphSearch fiber tracking method)", - argstr="--maximumBranchPoints %d") + desc="Maximum branch points (recommended for GraphSearch fiber tracking method)", + argstr="--maximumBranchPoints %d", + ) useRandomWalk = traits.Bool( - desc="Flag to use random walk.", argstr="--useRandomWalk ") + desc="Flag to use random walk.", argstr="--useRandomWalk " + ) randomSeed = traits.Int( - desc="Random number generator seed", argstr="--randomSeed %d") + desc="Random number generator seed", argstr="--randomSeed %d" + ) branchingAngle = traits.Float( - desc= - "Branching angle in degrees (recommended for GraphSearch fiber tracking method)", - argstr="--branchingAngle %f") + desc="Branching angle in degrees (recommended for GraphSearch fiber tracking method)", + argstr="--branchingAngle %f", + ) minimumLength = traits.Float( desc="Minimum fiber length. Helpful for filtering invalid tracts.", - argstr="--minimumLength %f") + argstr="--minimumLength %f", + ) maximumLength = traits.Float( - desc="Maximum fiber length (voxels)", argstr="--maximumLength %f") - stepSize = traits.Float( - desc="Fiber tracking step size", argstr="--stepSize %f") + desc="Maximum fiber length (voxels)", argstr="--maximumLength %f" + ) + stepSize = traits.Float(desc="Fiber tracking step size", argstr="--stepSize %f") useLoopDetection = traits.Bool( - desc="Flag to make use of loop detection.", - argstr="--useLoopDetection ") + desc="Flag to make use of loop detection.", argstr="--useLoopDetection " + ) useTend = traits.Bool( - desc="Flag to make use of Tend F and Tend G parameters.", - argstr="--useTend ") + desc="Flag to make use of Tend F and Tend G parameters.", argstr="--useTend " + ) tendF = traits.Float(desc="Tend F parameter", argstr="--tendF %f") tendG = traits.Float(desc="Tend G parameter", argstr="--tendG %f") numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractFiberTrackingOutputSpec(TraitedSpec): outputTract = File( - desc= - "Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): name of output vtkPolydata file containing tract lines and the point data collected along them.", - exists=True) + desc="Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): name of output vtkPolydata file containing tract lines and the point data collected along them.", + exists=True, + ) class gtractFiberTracking(SEMLikeCommandLine): @@ -1494,43 +1579,45 @@ class gtractFiberTracking(SEMLikeCommandLine): input_spec = gtractFiberTrackingInputSpec output_spec = gtractFiberTrackingOutputSpec _cmd = " gtractFiberTracking " - _outputs_filenames = {'outputTract': 'outputTract.vtk'} + _outputs_filenames = {"outputTract": "outputTract.vtk"} _redirect_x = False class extractNrrdVectorIndexInputSpec(CommandLineInputSpec): inputVolume = File( - desc= - "Required: input file containing the vector that will be extracted", + desc="Required: input file containing the vector that will be extracted", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) vectorIndex = traits.Int( - desc="Index in the vector image to extract", argstr="--vectorIndex %d") + desc="Index in the vector image to extract", argstr="--vectorIndex %d" + ) setImageOrientation = traits.Enum( "AsAcquired", "Axial", "Coronal", "Sagittal", - desc= - "Sets the image orientation of the extracted vector (Axial, Coronal, Sagittal)", - argstr="--setImageOrientation %s") + desc="Sets the image orientation of the extracted vector (Axial, Coronal, Sagittal)", + argstr="--setImageOrientation %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: name of output NRRD file containing the vector image at the given index", - argstr="--outputVolume %s") + desc="Required: name of output NRRD file containing the vector image at the given index", + argstr="--outputVolume %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class extractNrrdVectorIndexOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: name of output NRRD file containing the vector image at the given index", - exists=True) + desc="Required: name of output NRRD file containing the vector image at the given index", + exists=True, + ) class extractNrrdVectorIndex(SEMLikeCommandLine): @@ -1555,7 +1642,7 @@ class extractNrrdVectorIndex(SEMLikeCommandLine): input_spec = extractNrrdVectorIndexInputSpec output_spec = extractNrrdVectorIndexOutputSpec _cmd = " extractNrrdVectorIndex " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False @@ -1563,35 +1650,40 @@ class gtractResampleFibersInputSpec(CommandLineInputSpec): inputForwardDeformationFieldVolume = File( desc="Required: input forward deformation field image file name", exists=True, - argstr="--inputForwardDeformationFieldVolume %s") + argstr="--inputForwardDeformationFieldVolume %s", + ) inputReverseDeformationFieldVolume = File( desc="Required: input reverse deformation field image file name", exists=True, - argstr="--inputReverseDeformationFieldVolume %s") + argstr="--inputReverseDeformationFieldVolume %s", + ) inputTract = File( desc="Required: name of input vtkPolydata file containing tract lines.", exists=True, - argstr="--inputTract %s") + argstr="--inputTract %s", + ) outputTract = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", - argstr="--outputTract %s") + desc="Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", + argstr="--outputTract %s", + ) writeXMLPolyDataFile = traits.Bool( desc="Flag to make use of the XML format for vtkPolyData fiber tracts.", - argstr="--writeXMLPolyDataFile ") + argstr="--writeXMLPolyDataFile ", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractResampleFibersOutputSpec(TraitedSpec): outputTract = File( - desc= - "Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", - exists=True) + desc="Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", + exists=True, + ) class gtractResampleFibers(SEMLikeCommandLine): @@ -1616,70 +1708,74 @@ class gtractResampleFibers(SEMLikeCommandLine): input_spec = gtractResampleFibersInputSpec output_spec = gtractResampleFibersOutputSpec _cmd = " gtractResampleFibers " - _outputs_filenames = {'outputTract': 'outputTract.vtk'} + _outputs_filenames = {"outputTract": "outputTract.vtk"} _redirect_x = False class gtractTensorInputSpec(CommandLineInputSpec): inputVolume = File( - desc= - "Required: input image 4D NRRD image. Must contain data based on at least 6 distinct diffusion directions. The inputVolume is allowed to have multiple b0 and gradient direction images. Averaging of the b0 image is done internally in this step. Prior averaging of the DWIs is not required.", + desc="Required: input image 4D NRRD image. Must contain data based on at least 6 distinct diffusion directions. The inputVolume is allowed to have multiple b0 and gradient direction images. Averaging of the b0 image is done internally in this step. Prior averaging of the DWIs is not required.", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: name of output NRRD file containing the Tensor vector image", - argstr="--outputVolume %s") + desc="Required: name of output NRRD file containing the Tensor vector image", + argstr="--outputVolume %s", + ) medianFilterSize = InputMultiPath( traits.Int, desc="Median filter radius in all 3 directions", sep=",", - argstr="--medianFilterSize %s") + argstr="--medianFilterSize %s", + ) maskProcessingMode = traits.Enum( "NOMASK", "ROIAUTO", "ROI", - desc= - "ROIAUTO: mask is implicitly defined using a otsu forground and hole filling algorithm. ROI: Uses the masks to define what parts of the image should be used for computing the transform. NOMASK: no mask used", - argstr="--maskProcessingMode %s") + desc="ROIAUTO: mask is implicitly defined using a otsu forground and hole filling algorithm. ROI: Uses the masks to define what parts of the image should be used for computing the transform. NOMASK: no mask used", + argstr="--maskProcessingMode %s", + ) maskVolume = File( desc="Mask Image, if maskProcessingMode is ROI", exists=True, - argstr="--maskVolume %s") + argstr="--maskVolume %s", + ) backgroundSuppressingThreshold = traits.Int( - desc= - "Image threshold to suppress background. This sets a threshold used on the b0 image to remove background voxels from processing. Typically, values of 100 and 500 work well for Siemens and GE DTI data, respectively. Check your data particularly in the globus pallidus to make sure the brain tissue is not being eliminated with this threshold.", - argstr="--backgroundSuppressingThreshold %d") + desc="Image threshold to suppress background. This sets a threshold used on the b0 image to remove background voxels from processing. Typically, values of 100 and 500 work well for Siemens and GE DTI data, respectively. Check your data particularly in the globus pallidus to make sure the brain tissue is not being eliminated with this threshold.", + argstr="--backgroundSuppressingThreshold %d", + ) resampleIsotropic = traits.Bool( - desc= - "Flag to resample to isotropic voxels. Enabling this feature is recommended if fiber tracking will be performed.", - argstr="--resampleIsotropic ") - size = traits.Float( - desc="Isotropic voxel size to resample to", argstr="--size %f") + desc="Flag to resample to isotropic voxels. Enabling this feature is recommended if fiber tracking will be performed.", + argstr="--resampleIsotropic ", + ) + size = traits.Float(desc="Isotropic voxel size to resample to", argstr="--size %f") b0Index = traits.Int( - desc="Index in input vector index to extract", argstr="--b0Index %d") + desc="Index in input vector index to extract", argstr="--b0Index %d" + ) applyMeasurementFrame = traits.Bool( desc="Flag to apply the measurement frame to the gradient directions", - argstr="--applyMeasurementFrame ") + argstr="--applyMeasurementFrame ", + ) ignoreIndex = InputMultiPath( traits.Int, - desc= - "Ignore diffusion gradient index. Used to remove specific gradient directions with artifacts.", + desc="Ignore diffusion gradient index. Used to remove specific gradient directions with artifacts.", sep=",", - argstr="--ignoreIndex %s") + argstr="--ignoreIndex %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class gtractTensorOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: name of output NRRD file containing the Tensor vector image", - exists=True) + desc="Required: name of output NRRD file containing the Tensor vector image", + exists=True, + ) class gtractTensor(SEMLikeCommandLine): @@ -1704,5 +1800,5 @@ class gtractTensor(SEMLikeCommandLine): input_spec = gtractTensorInputSpec output_spec = gtractTensorOutputSpec _cmd = " gtractTensor " - _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _outputs_filenames = {"outputVolume": "outputVolume.nrrd"} _redirect_x = False diff --git a/nipype/interfaces/semtools/diffusion/maxcurvature.py b/nipype/interfaces/semtools/diffusion/maxcurvature.py index 570109eb1b..fdd5057097 100644 --- a/nipype/interfaces/semtools/diffusion/maxcurvature.py +++ b/nipype/interfaces/semtools/diffusion/maxcurvature.py @@ -5,19 +5,25 @@ import os -from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ...base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class maxcurvatureInputSpec(CommandLineInputSpec): image = File(desc="FA Image", exists=True, argstr="--image %s") output = traits.Either( - traits.Bool, - File(), - hash_files=False, - desc="Output File", - argstr="--output %s") + traits.Bool, File(), hash_files=False, desc="Output File", argstr="--output %s" + ) sigma = traits.Float(desc="Scale of Gradients", argstr="--sigma %f") verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ") @@ -52,5 +58,5 @@ class maxcurvature(SEMLikeCommandLine): input_spec = maxcurvatureInputSpec output_spec = maxcurvatureOutputSpec _cmd = " maxcurvature " - _outputs_filenames = {'output': 'output.nii'} + _outputs_filenames = {"output": "output.nii"} _redirect_x = False diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py index 3b1578cc42..d1f8c33324 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py @@ -4,72 +4,41 @@ def test_DWIConvert_inputs(): input_map = dict( - allowLossyConversion=dict(argstr='--allowLossyConversion ', ), - args=dict(argstr='%s', ), - conversionMode=dict(argstr='--conversionMode %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fMRI=dict(argstr='--fMRI ', ), - fslNIFTIFile=dict( - argstr='--fslNIFTIFile %s', - extensions=None, - ), - gradientVectorFile=dict( - argstr='--gradientVectorFile %s', - hash_files=False, - ), - inputBValues=dict( - argstr='--inputBValues %s', - extensions=None, - ), - inputBVectors=dict( - argstr='--inputBVectors %s', - extensions=None, - ), - inputDicomDirectory=dict(argstr='--inputDicomDirectory %s', ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - outputBValues=dict( - argstr='--outputBValues %s', - hash_files=False, - ), - outputBVectors=dict( - argstr='--outputBVectors %s', - hash_files=False, - ), - outputDirectory=dict( - argstr='--outputDirectory %s', - hash_files=False, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - smallGradientThreshold=dict(argstr='--smallGradientThreshold %f', ), - transposeInputBVectors=dict(argstr='--transposeInputBVectors ', ), - useBMatrixGradientDirections=dict( - argstr='--useBMatrixGradientDirections ', ), - useIdentityMeaseurementFrame=dict( - argstr='--useIdentityMeaseurementFrame ', ), - writeProtocolGradientsFile=dict( - argstr='--writeProtocolGradientsFile ', ), + allowLossyConversion=dict(argstr="--allowLossyConversion ",), + args=dict(argstr="%s",), + conversionMode=dict(argstr="--conversionMode %s",), + environ=dict(nohash=True, usedefault=True,), + fMRI=dict(argstr="--fMRI ",), + fslNIFTIFile=dict(argstr="--fslNIFTIFile %s", extensions=None,), + gradientVectorFile=dict(argstr="--gradientVectorFile %s", hash_files=False,), + inputBValues=dict(argstr="--inputBValues %s", extensions=None,), + inputBVectors=dict(argstr="--inputBVectors %s", extensions=None,), + inputDicomDirectory=dict(argstr="--inputDicomDirectory %s",), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + outputBValues=dict(argstr="--outputBValues %s", hash_files=False,), + outputBVectors=dict(argstr="--outputBVectors %s", hash_files=False,), + outputDirectory=dict(argstr="--outputDirectory %s", hash_files=False,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + smallGradientThreshold=dict(argstr="--smallGradientThreshold %f",), + transposeInputBVectors=dict(argstr="--transposeInputBVectors ",), + useBMatrixGradientDirections=dict(argstr="--useBMatrixGradientDirections ",), + useIdentityMeaseurementFrame=dict(argstr="--useIdentityMeaseurementFrame ",), + writeProtocolGradientsFile=dict(argstr="--writeProtocolGradientsFile ",), ) inputs = DWIConvert.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DWIConvert_outputs(): output_map = dict( - gradientVectorFile=dict(extensions=None, ), - outputBValues=dict(extensions=None, ), - outputBVectors=dict(extensions=None, ), + gradientVectorFile=dict(extensions=None,), + outputBValues=dict(extensions=None,), + outputBVectors=dict(extensions=None,), outputDirectory=dict(), - outputVolume=dict(extensions=None, ), + outputVolume=dict(extensions=None,), ) outputs = DWIConvert.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py index 601c33628b..ed184ae4f2 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py @@ -4,31 +4,24 @@ def test_compareTractInclusion_inputs(): input_map = dict( - args=dict(argstr='%s', ), - closeness=dict(argstr='--closeness %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - numberOfPoints=dict(argstr='--numberOfPoints %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - standardFiber=dict( - argstr='--standardFiber %s', - extensions=None, - ), - testFiber=dict( - argstr='--testFiber %s', - extensions=None, - ), - testForBijection=dict(argstr='--testForBijection ', ), - testForFiberCardinality=dict(argstr='--testForFiberCardinality ', ), - writeXMLPolyDataFile=dict(argstr='--writeXMLPolyDataFile ', ), + args=dict(argstr="%s",), + closeness=dict(argstr="--closeness %f",), + environ=dict(nohash=True, usedefault=True,), + numberOfPoints=dict(argstr="--numberOfPoints %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + standardFiber=dict(argstr="--standardFiber %s", extensions=None,), + testFiber=dict(argstr="--testFiber %s", extensions=None,), + testForBijection=dict(argstr="--testForBijection ",), + testForFiberCardinality=dict(argstr="--testForFiberCardinality ",), + writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile ",), ) inputs = compareTractInclusion.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_compareTractInclusion_outputs(): output_map = dict() outputs = compareTractInclusion.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py index 6af27942ea..fe4e00032b 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py @@ -4,26 +4,22 @@ def test_dtiaverage_inputs(): input_map = dict( - DTI_double=dict(argstr='--DTI_double ', ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputs=dict(argstr='--inputs %s...', ), - tensor_output=dict( - argstr='--tensor_output %s', - hash_files=False, - ), - verbose=dict(argstr='--verbose ', ), + DTI_double=dict(argstr="--DTI_double ",), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputs=dict(argstr="--inputs %s...",), + tensor_output=dict(argstr="--tensor_output %s", hash_files=False,), + verbose=dict(argstr="--verbose ",), ) inputs = dtiaverage.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_dtiaverage_outputs(): - output_map = dict(tensor_output=dict(extensions=None, ), ) + output_map = dict(tensor_output=dict(extensions=None,),) outputs = dtiaverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py index afa519667b..c7586fc34d 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py @@ -4,65 +4,40 @@ def test_dtiestim_inputs(): input_map = dict( - B0=dict( - argstr='--B0 %s', - hash_files=False, - ), - B0_mask_output=dict( - argstr='--B0_mask_output %s', - hash_files=False, - ), - DTI_double=dict(argstr='--DTI_double ', ), - args=dict(argstr='%s', ), - bad_region_mask=dict( - argstr='--bad_region_mask %s', - extensions=None, - ), - brain_mask=dict( - argstr='--brain_mask %s', - extensions=None, - ), - correction=dict(argstr='--correction %s', ), - defaultTensor=dict( - argstr='--defaultTensor %s', - sep=',', - ), - dwi_image=dict( - argstr='--dwi_image %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - idwi=dict( - argstr='--idwi %s', - hash_files=False, - ), - method=dict(argstr='--method %s', ), - shiftNeg=dict(argstr='--shiftNeg ', ), - shiftNegCoeff=dict(argstr='--shiftNegCoeff %f', ), - sigma=dict(argstr='--sigma %f', ), - step=dict(argstr='--step %f', ), - tensor_output=dict( - argstr='--tensor_output %s', - hash_files=False, - ), - threshold=dict(argstr='--threshold %d', ), - verbose=dict(argstr='--verbose ', ), - weight_iterations=dict(argstr='--weight_iterations %d', ), + B0=dict(argstr="--B0 %s", hash_files=False,), + B0_mask_output=dict(argstr="--B0_mask_output %s", hash_files=False,), + DTI_double=dict(argstr="--DTI_double ",), + args=dict(argstr="%s",), + bad_region_mask=dict(argstr="--bad_region_mask %s", extensions=None,), + brain_mask=dict(argstr="--brain_mask %s", extensions=None,), + correction=dict(argstr="--correction %s",), + defaultTensor=dict(argstr="--defaultTensor %s", sep=",",), + dwi_image=dict(argstr="--dwi_image %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + idwi=dict(argstr="--idwi %s", hash_files=False,), + method=dict(argstr="--method %s",), + shiftNeg=dict(argstr="--shiftNeg ",), + shiftNegCoeff=dict(argstr="--shiftNegCoeff %f",), + sigma=dict(argstr="--sigma %f",), + step=dict(argstr="--step %f",), + tensor_output=dict(argstr="--tensor_output %s", hash_files=False,), + threshold=dict(argstr="--threshold %d",), + verbose=dict(argstr="--verbose ",), + weight_iterations=dict(argstr="--weight_iterations %d",), ) inputs = dtiestim.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_dtiestim_outputs(): output_map = dict( - B0=dict(extensions=None, ), - B0_mask_output=dict(extensions=None, ), - idwi=dict(extensions=None, ), - tensor_output=dict(extensions=None, ), + B0=dict(extensions=None,), + B0_mask_output=dict(extensions=None,), + idwi=dict(extensions=None,), + tensor_output=dict(extensions=None,), ) outputs = dtiestim.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py index 5095e7d469..01a53b18cc 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py @@ -4,126 +4,68 @@ def test_dtiprocess_inputs(): input_map = dict( - DTI_double=dict(argstr='--DTI_double ', ), - RD_output=dict( - argstr='--RD_output %s', - hash_files=False, - ), - affineitk_file=dict( - argstr='--affineitk_file %s', - extensions=None, - ), - args=dict(argstr='%s', ), - color_fa_output=dict( - argstr='--color_fa_output %s', - hash_files=False, - ), - correction=dict(argstr='--correction %s', ), - deformation_output=dict( - argstr='--deformation_output %s', - hash_files=False, - ), - dof_file=dict( - argstr='--dof_file %s', - extensions=None, - ), - dti_image=dict( - argstr='--dti_image %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fa_gradient_output=dict( - argstr='--fa_gradient_output %s', - hash_files=False, - ), - fa_gradmag_output=dict( - argstr='--fa_gradmag_output %s', - hash_files=False, - ), - fa_output=dict( - argstr='--fa_output %s', - hash_files=False, - ), - forward=dict( - argstr='--forward %s', - extensions=None, - ), + DTI_double=dict(argstr="--DTI_double ",), + RD_output=dict(argstr="--RD_output %s", hash_files=False,), + affineitk_file=dict(argstr="--affineitk_file %s", extensions=None,), + args=dict(argstr="%s",), + color_fa_output=dict(argstr="--color_fa_output %s", hash_files=False,), + correction=dict(argstr="--correction %s",), + deformation_output=dict(argstr="--deformation_output %s", hash_files=False,), + dof_file=dict(argstr="--dof_file %s", extensions=None,), + dti_image=dict(argstr="--dti_image %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + fa_gradient_output=dict(argstr="--fa_gradient_output %s", hash_files=False,), + fa_gradmag_output=dict(argstr="--fa_gradmag_output %s", hash_files=False,), + fa_output=dict(argstr="--fa_output %s", hash_files=False,), + forward=dict(argstr="--forward %s", extensions=None,), frobenius_norm_output=dict( - argstr='--frobenius_norm_output %s', - hash_files=False, - ), - hField=dict(argstr='--hField ', ), - interpolation=dict(argstr='--interpolation %s', ), - lambda1_output=dict( - argstr='--lambda1_output %s', - hash_files=False, - ), - lambda2_output=dict( - argstr='--lambda2_output %s', - hash_files=False, - ), - lambda3_output=dict( - argstr='--lambda3_output %s', - hash_files=False, - ), - mask=dict( - argstr='--mask %s', - extensions=None, - ), - md_output=dict( - argstr='--md_output %s', - hash_files=False, - ), + argstr="--frobenius_norm_output %s", hash_files=False, + ), + hField=dict(argstr="--hField ",), + interpolation=dict(argstr="--interpolation %s",), + lambda1_output=dict(argstr="--lambda1_output %s", hash_files=False,), + lambda2_output=dict(argstr="--lambda2_output %s", hash_files=False,), + lambda3_output=dict(argstr="--lambda3_output %s", hash_files=False,), + mask=dict(argstr="--mask %s", extensions=None,), + md_output=dict(argstr="--md_output %s", hash_files=False,), negative_eigenvector_output=dict( - argstr='--negative_eigenvector_output %s', - hash_files=False, - ), - newdof_file=dict( - argstr='--newdof_file %s', - extensions=None, - ), - outmask=dict( - argstr='--outmask %s', - hash_files=False, + argstr="--negative_eigenvector_output %s", hash_files=False, ), + newdof_file=dict(argstr="--newdof_file %s", extensions=None,), + outmask=dict(argstr="--outmask %s", hash_files=False,), principal_eigenvector_output=dict( - argstr='--principal_eigenvector_output %s', - hash_files=False, - ), - reorientation=dict(argstr='--reorientation %s', ), - rot_output=dict( - argstr='--rot_output %s', - hash_files=False, + argstr="--principal_eigenvector_output %s", hash_files=False, ), - scalar_float=dict(argstr='--scalar_float ', ), - sigma=dict(argstr='--sigma %f', ), - verbose=dict(argstr='--verbose ', ), + reorientation=dict(argstr="--reorientation %s",), + rot_output=dict(argstr="--rot_output %s", hash_files=False,), + scalar_float=dict(argstr="--scalar_float ",), + sigma=dict(argstr="--sigma %f",), + verbose=dict(argstr="--verbose ",), ) inputs = dtiprocess.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_dtiprocess_outputs(): output_map = dict( - RD_output=dict(extensions=None, ), - color_fa_output=dict(extensions=None, ), - deformation_output=dict(extensions=None, ), - fa_gradient_output=dict(extensions=None, ), - fa_gradmag_output=dict(extensions=None, ), - fa_output=dict(extensions=None, ), - frobenius_norm_output=dict(extensions=None, ), - lambda1_output=dict(extensions=None, ), - lambda2_output=dict(extensions=None, ), - lambda3_output=dict(extensions=None, ), - md_output=dict(extensions=None, ), - negative_eigenvector_output=dict(extensions=None, ), - outmask=dict(extensions=None, ), - principal_eigenvector_output=dict(extensions=None, ), - rot_output=dict(extensions=None, ), + RD_output=dict(extensions=None,), + color_fa_output=dict(extensions=None,), + deformation_output=dict(extensions=None,), + fa_gradient_output=dict(extensions=None,), + fa_gradmag_output=dict(extensions=None,), + fa_output=dict(extensions=None,), + frobenius_norm_output=dict(extensions=None,), + lambda1_output=dict(extensions=None,), + lambda2_output=dict(extensions=None,), + lambda3_output=dict(extensions=None,), + md_output=dict(extensions=None,), + negative_eigenvector_output=dict(extensions=None,), + outmask=dict(extensions=None,), + principal_eigenvector_output=dict(extensions=None,), + rot_output=dict(extensions=None,), ) outputs = dtiprocess.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py index f0ef5a1bbd..51ec99b1b8 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py @@ -4,30 +4,23 @@ def test_extractNrrdVectorIndex_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - setImageOrientation=dict(argstr='--setImageOrientation %s', ), - vectorIndex=dict(argstr='--vectorIndex %d', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + setImageOrientation=dict(argstr="--setImageOrientation %s",), + vectorIndex=dict(argstr="--vectorIndex %d",), ) inputs = extractNrrdVectorIndex.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_extractNrrdVectorIndex_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = extractNrrdVectorIndex.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py index 02e8e364ee..3af3c53648 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py @@ -4,29 +4,22 @@ def test_gtractAnisotropyMap_inputs(): input_map = dict( - anisotropyType=dict(argstr='--anisotropyType %s', ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputTensorVolume=dict( - argstr='--inputTensorVolume %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + anisotropyType=dict(argstr="--anisotropyType %s",), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = gtractAnisotropyMap.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractAnisotropyMap_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = gtractAnisotropyMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py index 59f8fef00c..1155f11628 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py @@ -4,30 +4,23 @@ def test_gtractAverageBvalues_inputs(): input_map = dict( - args=dict(argstr='%s', ), - averageB0only=dict(argstr='--averageB0only ', ), - directionsTolerance=dict(argstr='--directionsTolerance %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + averageB0only=dict(argstr="--averageB0only ",), + directionsTolerance=dict(argstr="--directionsTolerance %f",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = gtractAverageBvalues.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractAverageBvalues_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = gtractAverageBvalues.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py index cf44a5361e..00fc963f69 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py @@ -4,30 +4,23 @@ def test_gtractClipAnisotropy_inputs(): input_map = dict( - args=dict(argstr='%s', ), - clipFirstSlice=dict(argstr='--clipFirstSlice ', ), - clipLastSlice=dict(argstr='--clipLastSlice ', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + clipFirstSlice=dict(argstr="--clipFirstSlice ",), + clipLastSlice=dict(argstr="--clipLastSlice ",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = gtractClipAnisotropy.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractClipAnisotropy_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = gtractClipAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py index ae2924540d..a5d2337c44 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py @@ -4,58 +4,44 @@ def test_gtractCoRegAnatomy_inputs(): input_map = dict( - args=dict(argstr='%s', ), - borderSize=dict(argstr='--borderSize %d', ), - convergence=dict(argstr='--convergence %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - gradientTolerance=dict(argstr='--gradientTolerance %f', ), - gridSize=dict( - argstr='--gridSize %s', - sep=',', - ), + args=dict(argstr="%s",), + borderSize=dict(argstr="--borderSize %d",), + convergence=dict(argstr="--convergence %f",), + environ=dict(nohash=True, usedefault=True,), + gradientTolerance=dict(argstr="--gradientTolerance %f",), + gridSize=dict(argstr="--gridSize %s", sep=",",), inputAnatomicalVolume=dict( - argstr='--inputAnatomicalVolume %s', - extensions=None, - ), - inputRigidTransform=dict( - argstr='--inputRigidTransform %s', - extensions=None, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, + argstr="--inputAnatomicalVolume %s", extensions=None, ), - maxBSplineDisplacement=dict(argstr='--maxBSplineDisplacement %f', ), - maximumStepSize=dict(argstr='--maximumStepSize %f', ), - minimumStepSize=dict(argstr='--minimumStepSize %f', ), - numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), - numberOfIterations=dict(argstr='--numberOfIterations %d', ), - numberOfSamples=dict(argstr='--numberOfSamples %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputTransformName=dict( - argstr='--outputTransformName %s', - hash_files=False, - ), - relaxationFactor=dict(argstr='--relaxationFactor %f', ), - samplingPercentage=dict(argstr='--samplingPercentage %f', ), - spatialScale=dict(argstr='--spatialScale %d', ), - transformType=dict(argstr='--transformType %s', ), - translationScale=dict(argstr='--translationScale %f', ), - useCenterOfHeadAlign=dict(argstr='--useCenterOfHeadAlign ', ), - useGeometryAlign=dict(argstr='--useGeometryAlign ', ), - useMomentsAlign=dict(argstr='--useMomentsAlign ', ), - vectorIndex=dict(argstr='--vectorIndex %d', ), + inputRigidTransform=dict(argstr="--inputRigidTransform %s", extensions=None,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + maxBSplineDisplacement=dict(argstr="--maxBSplineDisplacement %f",), + maximumStepSize=dict(argstr="--maximumStepSize %f",), + minimumStepSize=dict(argstr="--minimumStepSize %f",), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), + numberOfIterations=dict(argstr="--numberOfIterations %d",), + numberOfSamples=dict(argstr="--numberOfSamples %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputTransformName=dict(argstr="--outputTransformName %s", hash_files=False,), + relaxationFactor=dict(argstr="--relaxationFactor %f",), + samplingPercentage=dict(argstr="--samplingPercentage %f",), + spatialScale=dict(argstr="--spatialScale %d",), + transformType=dict(argstr="--transformType %s",), + translationScale=dict(argstr="--translationScale %f",), + useCenterOfHeadAlign=dict(argstr="--useCenterOfHeadAlign ",), + useGeometryAlign=dict(argstr="--useGeometryAlign ",), + useMomentsAlign=dict(argstr="--useMomentsAlign ",), + vectorIndex=dict(argstr="--vectorIndex %d",), ) inputs = gtractCoRegAnatomy.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractCoRegAnatomy_outputs(): - output_map = dict(outputTransformName=dict(extensions=None, ), ) + output_map = dict(outputTransformName=dict(extensions=None,),) outputs = gtractCoRegAnatomy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py index 3320da9206..8fd46f9ab6 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py @@ -4,26 +4,22 @@ def test_gtractConcatDwi_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - ignoreOrigins=dict(argstr='--ignoreOrigins ', ), - inputVolume=dict(argstr='--inputVolume %s...', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + ignoreOrigins=dict(argstr="--ignoreOrigins ",), + inputVolume=dict(argstr="--inputVolume %s...",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = gtractConcatDwi.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractConcatDwi_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = gtractConcatDwi.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py index e9ac5031cb..4ce50c9faa 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py @@ -4,32 +4,22 @@ def test_gtractCopyImageOrientation_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputReferenceVolume=dict( - argstr='--inputReferenceVolume %s', - extensions=None, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = gtractCopyImageOrientation.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractCopyImageOrientation_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = gtractCopyImageOrientation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py index 316c2ab507..639dc8cd69 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py @@ -4,49 +4,35 @@ def test_gtractCoregBvalues_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debugLevel=dict(argstr='--debugLevel %d', ), - eddyCurrentCorrection=dict(argstr='--eddyCurrentCorrection ', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedVolume=dict( - argstr='--fixedVolume %s', - extensions=None, - ), - fixedVolumeIndex=dict(argstr='--fixedVolumeIndex %d', ), - maximumStepSize=dict(argstr='--maximumStepSize %f', ), - minimumStepSize=dict(argstr='--minimumStepSize %f', ), - movingVolume=dict( - argstr='--movingVolume %s', - extensions=None, - ), - numberOfIterations=dict(argstr='--numberOfIterations %d', ), - numberOfSpatialSamples=dict(argstr='--numberOfSpatialSamples %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputTransform=dict( - argstr='--outputTransform %s', - hash_files=False, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - registerB0Only=dict(argstr='--registerB0Only ', ), - relaxationFactor=dict(argstr='--relaxationFactor %f', ), - samplingPercentage=dict(argstr='--samplingPercentage %f', ), - spatialScale=dict(argstr='--spatialScale %f', ), + args=dict(argstr="%s",), + debugLevel=dict(argstr="--debugLevel %d",), + eddyCurrentCorrection=dict(argstr="--eddyCurrentCorrection ",), + environ=dict(nohash=True, usedefault=True,), + fixedVolume=dict(argstr="--fixedVolume %s", extensions=None,), + fixedVolumeIndex=dict(argstr="--fixedVolumeIndex %d",), + maximumStepSize=dict(argstr="--maximumStepSize %f",), + minimumStepSize=dict(argstr="--minimumStepSize %f",), + movingVolume=dict(argstr="--movingVolume %s", extensions=None,), + numberOfIterations=dict(argstr="--numberOfIterations %d",), + numberOfSpatialSamples=dict(argstr="--numberOfSpatialSamples %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + registerB0Only=dict(argstr="--registerB0Only ",), + relaxationFactor=dict(argstr="--relaxationFactor %f",), + samplingPercentage=dict(argstr="--samplingPercentage %f",), + spatialScale=dict(argstr="--spatialScale %f",), ) inputs = gtractCoregBvalues.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractCoregBvalues_outputs(): output_map = dict( - outputTransform=dict(extensions=None, ), - outputVolume=dict(extensions=None, ), + outputTransform=dict(extensions=None,), outputVolume=dict(extensions=None,), ) outputs = gtractCoregBvalues.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py index 08e262b306..cd5d34952d 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py @@ -4,46 +4,34 @@ def test_gtractCostFastMarching_inputs(): input_map = dict( - anisotropyWeight=dict(argstr='--anisotropyWeight %f', ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + anisotropyWeight=dict(argstr="--anisotropyWeight %f",), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), inputAnisotropyVolume=dict( - argstr='--inputAnisotropyVolume %s', - extensions=None, + argstr="--inputAnisotropyVolume %s", extensions=None, ), inputStartingSeedsLabelMapVolume=dict( - argstr='--inputStartingSeedsLabelMapVolume %s', - extensions=None, - ), - inputTensorVolume=dict( - argstr='--inputTensorVolume %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputCostVolume=dict( - argstr='--outputCostVolume %s', - hash_files=False, + argstr="--inputStartingSeedsLabelMapVolume %s", extensions=None, ), - outputSpeedVolume=dict( - argstr='--outputSpeedVolume %s', - hash_files=False, - ), - seedThreshold=dict(argstr='--seedThreshold %f', ), - startingSeedsLabel=dict(argstr='--startingSeedsLabel %d', ), - stoppingValue=dict(argstr='--stoppingValue %f', ), + inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputCostVolume=dict(argstr="--outputCostVolume %s", hash_files=False,), + outputSpeedVolume=dict(argstr="--outputSpeedVolume %s", hash_files=False,), + seedThreshold=dict(argstr="--seedThreshold %f",), + startingSeedsLabel=dict(argstr="--startingSeedsLabel %d",), + stoppingValue=dict(argstr="--stoppingValue %f",), ) inputs = gtractCostFastMarching.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractCostFastMarching_outputs(): output_map = dict( - outputCostVolume=dict(extensions=None, ), - outputSpeedVolume=dict(extensions=None, ), + outputCostVolume=dict(extensions=None,), + outputSpeedVolume=dict(extensions=None,), ) outputs = gtractCostFastMarching.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py index 4129974e29..6b2b0a31e5 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py @@ -4,30 +4,23 @@ def test_gtractCreateGuideFiber_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputFiber=dict( - argstr='--inputFiber %s', - extensions=None, - ), - numberOfPoints=dict(argstr='--numberOfPoints %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputFiber=dict( - argstr='--outputFiber %s', - hash_files=False, - ), - writeXMLPolyDataFile=dict(argstr='--writeXMLPolyDataFile ', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputFiber=dict(argstr="--inputFiber %s", extensions=None,), + numberOfPoints=dict(argstr="--numberOfPoints %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputFiber=dict(argstr="--outputFiber %s", hash_files=False,), + writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile ",), ) inputs = gtractCreateGuideFiber.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractCreateGuideFiber_outputs(): - output_map = dict(outputFiber=dict(extensions=None, ), ) + output_map = dict(outputFiber=dict(extensions=None,),) outputs = gtractCreateGuideFiber.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py index 1b71e0c953..15ee3053f0 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py @@ -4,48 +4,36 @@ def test_gtractFastMarchingTracking_inputs(): input_map = dict( - args=dict(argstr='%s', ), - costStepSize=dict(argstr='--costStepSize %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + costStepSize=dict(argstr="--costStepSize %f",), + environ=dict(nohash=True, usedefault=True,), inputAnisotropyVolume=dict( - argstr='--inputAnisotropyVolume %s', - extensions=None, - ), - inputCostVolume=dict( - argstr='--inputCostVolume %s', - extensions=None, + argstr="--inputAnisotropyVolume %s", extensions=None, ), + inputCostVolume=dict(argstr="--inputCostVolume %s", extensions=None,), inputStartingSeedsLabelMapVolume=dict( - argstr='--inputStartingSeedsLabelMapVolume %s', - extensions=None, - ), - inputTensorVolume=dict( - argstr='--inputTensorVolume %s', - extensions=None, + argstr="--inputStartingSeedsLabelMapVolume %s", extensions=None, ), - maximumStepSize=dict(argstr='--maximumStepSize %f', ), - minimumStepSize=dict(argstr='--minimumStepSize %f', ), - numberOfIterations=dict(argstr='--numberOfIterations %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputTract=dict( - argstr='--outputTract %s', - hash_files=False, - ), - seedThreshold=dict(argstr='--seedThreshold %f', ), - startingSeedsLabel=dict(argstr='--startingSeedsLabel %d', ), - trackingThreshold=dict(argstr='--trackingThreshold %f', ), - writeXMLPolyDataFile=dict(argstr='--writeXMLPolyDataFile ', ), + inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None,), + maximumStepSize=dict(argstr="--maximumStepSize %f",), + minimumStepSize=dict(argstr="--minimumStepSize %f",), + numberOfIterations=dict(argstr="--numberOfIterations %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputTract=dict(argstr="--outputTract %s", hash_files=False,), + seedThreshold=dict(argstr="--seedThreshold %f",), + startingSeedsLabel=dict(argstr="--startingSeedsLabel %d",), + trackingThreshold=dict(argstr="--trackingThreshold %f",), + writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile ",), ) inputs = gtractFastMarchingTracking.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractFastMarchingTracking_outputs(): - output_map = dict(outputTract=dict(extensions=None, ), ) + output_map = dict(outputTract=dict(extensions=None,),) outputs = gtractFastMarchingTracking.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py index 2f68a54610..510c00013a 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py @@ -4,66 +4,52 @@ def test_gtractFiberTracking_inputs(): input_map = dict( - args=dict(argstr='%s', ), - branchingAngle=dict(argstr='--branchingAngle %f', ), - branchingThreshold=dict(argstr='--branchingThreshold %f', ), - curvatureThreshold=dict(argstr='--curvatureThreshold %f', ), - endingSeedsLabel=dict(argstr='--endingSeedsLabel %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - guidedCurvatureThreshold=dict( - argstr='--guidedCurvatureThreshold %f', ), + args=dict(argstr="%s",), + branchingAngle=dict(argstr="--branchingAngle %f",), + branchingThreshold=dict(argstr="--branchingThreshold %f",), + curvatureThreshold=dict(argstr="--curvatureThreshold %f",), + endingSeedsLabel=dict(argstr="--endingSeedsLabel %d",), + environ=dict(nohash=True, usedefault=True,), + guidedCurvatureThreshold=dict(argstr="--guidedCurvatureThreshold %f",), inputAnisotropyVolume=dict( - argstr='--inputAnisotropyVolume %s', - extensions=None, + argstr="--inputAnisotropyVolume %s", extensions=None, ), inputEndingSeedsLabelMapVolume=dict( - argstr='--inputEndingSeedsLabelMapVolume %s', - extensions=None, + argstr="--inputEndingSeedsLabelMapVolume %s", extensions=None, ), inputStartingSeedsLabelMapVolume=dict( - argstr='--inputStartingSeedsLabelMapVolume %s', - extensions=None, - ), - inputTensorVolume=dict( - argstr='--inputTensorVolume %s', - extensions=None, - ), - inputTract=dict( - argstr='--inputTract %s', - extensions=None, + argstr="--inputStartingSeedsLabelMapVolume %s", extensions=None, ), - maximumBranchPoints=dict(argstr='--maximumBranchPoints %d', ), - maximumGuideDistance=dict(argstr='--maximumGuideDistance %f', ), - maximumLength=dict(argstr='--maximumLength %f', ), - minimumLength=dict(argstr='--minimumLength %f', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputTract=dict( - argstr='--outputTract %s', - hash_files=False, - ), - randomSeed=dict(argstr='--randomSeed %d', ), - seedThreshold=dict(argstr='--seedThreshold %f', ), - startingSeedsLabel=dict(argstr='--startingSeedsLabel %d', ), - stepSize=dict(argstr='--stepSize %f', ), - tendF=dict(argstr='--tendF %f', ), - tendG=dict(argstr='--tendG %f', ), - trackingMethod=dict(argstr='--trackingMethod %s', ), - trackingThreshold=dict(argstr='--trackingThreshold %f', ), - useLoopDetection=dict(argstr='--useLoopDetection ', ), - useRandomWalk=dict(argstr='--useRandomWalk ', ), - useTend=dict(argstr='--useTend ', ), - writeXMLPolyDataFile=dict(argstr='--writeXMLPolyDataFile ', ), + inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None,), + inputTract=dict(argstr="--inputTract %s", extensions=None,), + maximumBranchPoints=dict(argstr="--maximumBranchPoints %d",), + maximumGuideDistance=dict(argstr="--maximumGuideDistance %f",), + maximumLength=dict(argstr="--maximumLength %f",), + minimumLength=dict(argstr="--minimumLength %f",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputTract=dict(argstr="--outputTract %s", hash_files=False,), + randomSeed=dict(argstr="--randomSeed %d",), + seedThreshold=dict(argstr="--seedThreshold %f",), + startingSeedsLabel=dict(argstr="--startingSeedsLabel %d",), + stepSize=dict(argstr="--stepSize %f",), + tendF=dict(argstr="--tendF %f",), + tendG=dict(argstr="--tendG %f",), + trackingMethod=dict(argstr="--trackingMethod %s",), + trackingThreshold=dict(argstr="--trackingThreshold %f",), + useLoopDetection=dict(argstr="--useLoopDetection ",), + useRandomWalk=dict(argstr="--useRandomWalk ",), + useTend=dict(argstr="--useTend ",), + writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile ",), ) inputs = gtractFiberTracking.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractFiberTracking_outputs(): - output_map = dict(outputTract=dict(extensions=None, ), ) + output_map = dict(outputTract=dict(extensions=None,),) outputs = gtractFiberTracking.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py index 37157c27b6..b382e97133 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py @@ -4,32 +4,22 @@ def test_gtractImageConformity_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputReferenceVolume=dict( - argstr='--inputReferenceVolume %s', - extensions=None, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = gtractImageConformity.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractImageConformity_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = gtractImageConformity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py index e15985bbf6..9affd39654 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py @@ -4,36 +4,23 @@ def test_gtractInvertBSplineTransform_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputReferenceVolume=dict( - argstr='--inputReferenceVolume %s', - extensions=None, - ), - inputTransform=dict( - argstr='--inputTransform %s', - extensions=None, - ), - landmarkDensity=dict( - argstr='--landmarkDensity %s', - sep=',', - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputTransform=dict( - argstr='--outputTransform %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None,), + inputTransform=dict(argstr="--inputTransform %s", extensions=None,), + landmarkDensity=dict(argstr="--landmarkDensity %s", sep=",",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), ) inputs = gtractInvertBSplineTransform.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractInvertBSplineTransform_outputs(): - output_map = dict(outputTransform=dict(extensions=None, ), ) + output_map = dict(outputTransform=dict(extensions=None,),) outputs = gtractInvertBSplineTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py index 97b2038c06..2ec1e53e42 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py @@ -4,33 +4,23 @@ def test_gtractInvertDisplacementField_inputs(): input_map = dict( - args=dict(argstr='%s', ), - baseImage=dict( - argstr='--baseImage %s', - extensions=None, - ), - deformationImage=dict( - argstr='--deformationImage %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - subsamplingFactor=dict(argstr='--subsamplingFactor %d', ), + args=dict(argstr="%s",), + baseImage=dict(argstr="--baseImage %s", extensions=None,), + deformationImage=dict(argstr="--deformationImage %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + subsamplingFactor=dict(argstr="--subsamplingFactor %d",), ) inputs = gtractInvertDisplacementField.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractInvertDisplacementField_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = gtractInvertDisplacementField.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py index 60d92c44ac..bbf3b5b260 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py @@ -4,28 +4,21 @@ def test_gtractInvertRigidTransform_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputTransform=dict( - argstr='--inputTransform %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputTransform=dict( - argstr='--outputTransform %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputTransform=dict(argstr="--inputTransform %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), ) inputs = gtractInvertRigidTransform.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractInvertRigidTransform_outputs(): - output_map = dict(outputTransform=dict(extensions=None, ), ) + output_map = dict(outputTransform=dict(extensions=None,),) outputs = gtractInvertRigidTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py index dc2916a5fe..dd30cda525 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py @@ -4,37 +4,28 @@ def test_gtractResampleAnisotropy_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), inputAnatomicalVolume=dict( - argstr='--inputAnatomicalVolume %s', - extensions=None, + argstr="--inputAnatomicalVolume %s", extensions=None, ), inputAnisotropyVolume=dict( - argstr='--inputAnisotropyVolume %s', - extensions=None, - ), - inputTransform=dict( - argstr='--inputTransform %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, + argstr="--inputAnisotropyVolume %s", extensions=None, ), - transformType=dict(argstr='--transformType %s', ), + inputTransform=dict(argstr="--inputTransform %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + transformType=dict(argstr="--transformType %s",), ) inputs = gtractResampleAnisotropy.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractResampleAnisotropy_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = gtractResampleAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py index ec80dd6268..e512fed7b5 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py @@ -4,38 +4,27 @@ def test_gtractResampleB0_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), inputAnatomicalVolume=dict( - argstr='--inputAnatomicalVolume %s', - extensions=None, - ), - inputTransform=dict( - argstr='--inputTransform %s', - extensions=None, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, + argstr="--inputAnatomicalVolume %s", extensions=None, ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - transformType=dict(argstr='--transformType %s', ), - vectorIndex=dict(argstr='--vectorIndex %d', ), + inputTransform=dict(argstr="--inputTransform %s", extensions=None,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + transformType=dict(argstr="--transformType %s",), + vectorIndex=dict(argstr="--vectorIndex %d",), ) inputs = gtractResampleB0.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractResampleB0_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = gtractResampleB0.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py index 01c8e024db..4cc5c30e4f 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py @@ -4,37 +4,24 @@ def test_gtractResampleCodeImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputCodeVolume=dict( - argstr='--inputCodeVolume %s', - extensions=None, - ), - inputReferenceVolume=dict( - argstr='--inputReferenceVolume %s', - extensions=None, - ), - inputTransform=dict( - argstr='--inputTransform %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - transformType=dict(argstr='--transformType %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputCodeVolume=dict(argstr="--inputCodeVolume %s", extensions=None,), + inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None,), + inputTransform=dict(argstr="--inputTransform %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + transformType=dict(argstr="--transformType %s",), ) inputs = gtractResampleCodeImage.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractResampleCodeImage_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = gtractResampleCodeImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py index e1ce459566..f87aa364cc 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py @@ -4,51 +4,28 @@ def test_gtractResampleDWIInPlace_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debugLevel=dict(argstr='--debugLevel %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - imageOutputSize=dict( - argstr='--imageOutputSize %s', - sep=',', - ), - inputTransform=dict( - argstr='--inputTransform %s', - extensions=None, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputResampledB0=dict( - argstr='--outputResampledB0 %s', - hash_files=False, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - referenceVolume=dict( - argstr='--referenceVolume %s', - extensions=None, - ), - warpDWITransform=dict( - argstr='--warpDWITransform %s', - extensions=None, - ), + args=dict(argstr="%s",), + debugLevel=dict(argstr="--debugLevel %d",), + environ=dict(nohash=True, usedefault=True,), + imageOutputSize=dict(argstr="--imageOutputSize %s", sep=",",), + inputTransform=dict(argstr="--inputTransform %s", extensions=None,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputResampledB0=dict(argstr="--outputResampledB0 %s", hash_files=False,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + referenceVolume=dict(argstr="--referenceVolume %s", extensions=None,), + warpDWITransform=dict(argstr="--warpDWITransform %s", extensions=None,), ) inputs = gtractResampleDWIInPlace.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractResampleDWIInPlace_outputs(): output_map = dict( - outputResampledB0=dict(extensions=None, ), - outputVolume=dict(extensions=None, ), + outputResampledB0=dict(extensions=None,), outputVolume=dict(extensions=None,), ) outputs = gtractResampleDWIInPlace.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py index c957857860..3c7a6b33b8 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py @@ -4,37 +4,28 @@ def test_gtractResampleFibers_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), inputForwardDeformationFieldVolume=dict( - argstr='--inputForwardDeformationFieldVolume %s', - extensions=None, + argstr="--inputForwardDeformationFieldVolume %s", extensions=None, ), inputReverseDeformationFieldVolume=dict( - argstr='--inputReverseDeformationFieldVolume %s', - extensions=None, - ), - inputTract=dict( - argstr='--inputTract %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputTract=dict( - argstr='--outputTract %s', - hash_files=False, + argstr="--inputReverseDeformationFieldVolume %s", extensions=None, ), - writeXMLPolyDataFile=dict(argstr='--writeXMLPolyDataFile ', ), + inputTract=dict(argstr="--inputTract %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputTract=dict(argstr="--outputTract %s", hash_files=False,), + writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile ",), ) inputs = gtractResampleFibers.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractResampleFibers_outputs(): - output_map = dict(outputTract=dict(extensions=None, ), ) + output_map = dict(outputTract=dict(extensions=None,),) outputs = gtractResampleFibers.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py index be941353cd..2372b9599f 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py @@ -4,47 +4,32 @@ def test_gtractTensor_inputs(): input_map = dict( - applyMeasurementFrame=dict(argstr='--applyMeasurementFrame ', ), - args=dict(argstr='%s', ), - b0Index=dict(argstr='--b0Index %d', ), + applyMeasurementFrame=dict(argstr="--applyMeasurementFrame ",), + args=dict(argstr="%s",), + b0Index=dict(argstr="--b0Index %d",), backgroundSuppressingThreshold=dict( - argstr='--backgroundSuppressingThreshold %d', ), - environ=dict( - nohash=True, - usedefault=True, + argstr="--backgroundSuppressingThreshold %d", ), - ignoreIndex=dict( - argstr='--ignoreIndex %s', - sep=',', - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - maskProcessingMode=dict(argstr='--maskProcessingMode %s', ), - maskVolume=dict( - argstr='--maskVolume %s', - extensions=None, - ), - medianFilterSize=dict( - argstr='--medianFilterSize %s', - sep=',', - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - resampleIsotropic=dict(argstr='--resampleIsotropic ', ), - size=dict(argstr='--size %f', ), + environ=dict(nohash=True, usedefault=True,), + ignoreIndex=dict(argstr="--ignoreIndex %s", sep=",",), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + maskProcessingMode=dict(argstr="--maskProcessingMode %s",), + maskVolume=dict(argstr="--maskVolume %s", extensions=None,), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + resampleIsotropic=dict(argstr="--resampleIsotropic ",), + size=dict(argstr="--size %f",), ) inputs = gtractTensor.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractTensor_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = gtractTensor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py index 9b965676db..3999871191 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py @@ -4,23 +4,13 @@ def test_gtractTransformToDisplacementField_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputReferenceVolume=dict( - argstr='--inputReferenceVolume %s', - extensions=None, - ), - inputTransform=dict( - argstr='--inputTransform %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None,), + inputTransform=dict(argstr="--inputTransform %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), outputDeformationFieldVolume=dict( - argstr='--outputDeformationFieldVolume %s', - hash_files=False, + argstr="--outputDeformationFieldVolume %s", hash_files=False, ), ) inputs = gtractTransformToDisplacementField.input_spec() @@ -28,8 +18,10 @@ def test_gtractTransformToDisplacementField_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_gtractTransformToDisplacementField_outputs(): - output_map = dict(outputDeformationFieldVolume=dict(extensions=None, ), ) + output_map = dict(outputDeformationFieldVolume=dict(extensions=None,),) outputs = gtractTransformToDisplacementField.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py index 1cef41a6c7..9ec247675e 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py @@ -4,29 +4,22 @@ def test_maxcurvature_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - image=dict( - argstr='--image %s', - extensions=None, - ), - output=dict( - argstr='--output %s', - hash_files=False, - ), - sigma=dict(argstr='--sigma %f', ), - verbose=dict(argstr='--verbose ', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + image=dict(argstr="--image %s", extensions=None,), + output=dict(argstr="--output %s", hash_files=False,), + sigma=dict(argstr="--sigma %f",), + verbose=dict(argstr="--verbose ",), ) inputs = maxcurvature.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_maxcurvature_outputs(): - output_map = dict(output=dict(extensions=None, ), ) + output_map = dict(output=dict(extensions=None,),) outputs = maxcurvature.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py b/nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py index 19adc2a817..becf1466e9 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py +++ b/nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py @@ -5,14 +5,22 @@ import os -from ....base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ....base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class fiberstatsInputSpec(CommandLineInputSpec): - fiber_file = File( - desc="DTI Fiber File", exists=True, argstr="--fiber_file %s") + fiber_file = File(desc="DTI Fiber File", exists=True, argstr="--fiber_file %s") verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ") diff --git a/nipype/interfaces/semtools/diffusion/tractography/fiberprocess.py b/nipype/interfaces/semtools/diffusion/tractography/fiberprocess.py index c0e9dcbbaf..1798ead449 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/fiberprocess.py +++ b/nipype/interfaces/semtools/diffusion/tractography/fiberprocess.py @@ -5,76 +5,86 @@ import os -from ....base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ....base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class fiberprocessInputSpec(CommandLineInputSpec): - fiber_file = File( - desc="DTI fiber file", exists=True, argstr="--fiber_file %s") + fiber_file = File(desc="DTI fiber file", exists=True, argstr="--fiber_file %s") fiber_output = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Output fiber file. May be warped or updated with new data depending on other options used.", - argstr="--fiber_output %s") + desc="Output fiber file. May be warped or updated with new data depending on other options used.", + argstr="--fiber_output %s", + ) tensor_volume = File( desc="Interpolate tensor values from the given field", exists=True, - argstr="--tensor_volume %s") + argstr="--tensor_volume %s", + ) h_field = File( - desc= - "HField for warp and statistics lookup. If this option is used tensor-volume must also be specified.", + desc="HField for warp and statistics lookup. If this option is used tensor-volume must also be specified.", exists=True, - argstr="--h_field %s") + argstr="--h_field %s", + ) displacement_field = File( - desc= - "Displacement Field for warp and statistics lookup. If this option is used tensor-volume must also be specified.", + desc="Displacement Field for warp and statistics lookup. If this option is used tensor-volume must also be specified.", exists=True, - argstr="--displacement_field %s") + argstr="--displacement_field %s", + ) saveProperties = traits.Bool( - desc= - "save the tensor property as scalar data into the vtk (only works for vtk fiber files). ", - argstr="--saveProperties ") + desc="save the tensor property as scalar data into the vtk (only works for vtk fiber files). ", + argstr="--saveProperties ", + ) no_warp = traits.Bool( - desc= - "Do not warp the geometry of the tensors only obtain the new statistics.", - argstr="--no_warp ") + desc="Do not warp the geometry of the tensors only obtain the new statistics.", + argstr="--no_warp ", + ) fiber_radius = traits.Float( - desc="set radius of all fibers to this value", - argstr="--fiber_radius %f") + desc="set radius of all fibers to this value", argstr="--fiber_radius %f" + ) index_space = traits.Bool( - desc= - "Use index-space for fiber output coordinates, otherwise us world space for fiber output coordinates (from tensor file).", - argstr="--index_space ") + desc="Use index-space for fiber output coordinates, otherwise us world space for fiber output coordinates (from tensor file).", + argstr="--index_space ", + ) voxelize = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Voxelize fiber into a label map (the labelmap filename is the argument of -V). The tensor file must be specified using -T for information about the size, origin, spacing of the image. The deformation is applied before the voxelization ", - argstr="--voxelize %s") + desc="Voxelize fiber into a label map (the labelmap filename is the argument of -V). The tensor file must be specified using -T for information about the size, origin, spacing of the image. The deformation is applied before the voxelization ", + argstr="--voxelize %s", + ) voxelize_count_fibers = traits.Bool( desc="Count number of fibers per-voxel instead of just setting to 1", - argstr="--voxelize_count_fibers ") + argstr="--voxelize_count_fibers ", + ) voxel_label = traits.Int( - desc="Label for voxelized fiber", argstr="--voxel_label %d") + desc="Label for voxelized fiber", argstr="--voxel_label %d" + ) verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ") - noDataChange = traits.Bool( - desc="Do not change data ??? ", argstr="--noDataChange ") + noDataChange = traits.Bool(desc="Do not change data ??? ", argstr="--noDataChange ") class fiberprocessOutputSpec(TraitedSpec): fiber_output = File( - desc= - "Output fiber file. May be warped or updated with new data depending on other options used.", - exists=True) + desc="Output fiber file. May be warped or updated with new data depending on other options used.", + exists=True, + ) voxelize = File( - desc= - "Voxelize fiber into a label map (the labelmap filename is the argument of -V). The tensor file must be specified using -T for information about the size, origin, spacing of the image. The deformation is applied before the voxelization ", - exists=True) + desc="Voxelize fiber into a label map (the labelmap filename is the argument of -V). The tensor file must be specified using -T for information about the size, origin, spacing of the image. The deformation is applied before the voxelization ", + exists=True, + ) class fiberprocess(SEMLikeCommandLine): @@ -104,7 +114,7 @@ class fiberprocess(SEMLikeCommandLine): output_spec = fiberprocessOutputSpec _cmd = " fiberprocess " _outputs_filenames = { - 'fiber_output': 'fiber_output.vtk', - 'voxelize': 'voxelize.nii' + "fiber_output": "fiber_output.vtk", + "voxelize": "voxelize.nii", } _redirect_x = False diff --git a/nipype/interfaces/semtools/diffusion/tractography/fibertrack.py b/nipype/interfaces/semtools/diffusion/tractography/fibertrack.py index 498cb2579d..c6eb7f13e0 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/fibertrack.py +++ b/nipype/interfaces/semtools/diffusion/tractography/fibertrack.py @@ -5,60 +5,70 @@ import os -from ....base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ....base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class fibertrackInputSpec(CommandLineInputSpec): input_tensor_file = File( - desc="Tensor Image", exists=True, argstr="--input_tensor_file %s") + desc="Tensor Image", exists=True, argstr="--input_tensor_file %s" + ) input_roi_file = File( - desc= - "The filename of the image which contains the labels used for seeding and constraining the algorithm.", + desc="The filename of the image which contains the labels used for seeding and constraining the algorithm.", exists=True, - argstr="--input_roi_file %s") + argstr="--input_roi_file %s", + ) output_fiber_file = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "The filename for the fiber file produced by the algorithm. This file must end in a .fib or .vtk extension for ITK spatial object and vtkPolyData formats respectively.", - argstr="--output_fiber_file %s") + desc="The filename for the fiber file produced by the algorithm. This file must end in a .fib or .vtk extension for ITK spatial object and vtkPolyData formats respectively.", + argstr="--output_fiber_file %s", + ) source_label = traits.Int( - desc= - "The label of voxels in the labelfile to use for seeding tractography. One tract is seeded from the center of each voxel with this label", - argstr="--source_label %d") + desc="The label of voxels in the labelfile to use for seeding tractography. One tract is seeded from the center of each voxel with this label", + argstr="--source_label %d", + ) target_label = traits.Int( - desc= - "The label of voxels in the labelfile used to constrain tractography. Tracts that do not pass through a voxel with this label are rejected. Set this keep all tracts.", - argstr="--target_label %d") - forbidden_label = traits.Int( - desc="Forbidden label", argstr="--forbidden_label %d") + desc="The label of voxels in the labelfile used to constrain tractography. Tracts that do not pass through a voxel with this label are rejected. Set this keep all tracts.", + argstr="--target_label %d", + ) + forbidden_label = traits.Int(desc="Forbidden label", argstr="--forbidden_label %d") whole_brain = traits.Bool( - desc= - "If this option is enabled all voxels in the image are used to seed tractography. When this option is enabled both source and target labels function as target labels", - argstr="--whole_brain ") + desc="If this option is enabled all voxels in the image are used to seed tractography. When this option is enabled both source and target labels function as target labels", + argstr="--whole_brain ", + ) max_angle = traits.Float( - desc="Maximum angle of change in radians", argstr="--max_angle %f") + desc="Maximum angle of change in radians", argstr="--max_angle %f" + ) step_size = traits.Float( - desc="Step size in mm for the tracking algorithm", - argstr="--step_size %f") + desc="Step size in mm for the tracking algorithm", argstr="--step_size %f" + ) min_fa = traits.Float( - desc="The minimum FA threshold to continue tractography", - argstr="--min_fa %f") + desc="The minimum FA threshold to continue tractography", argstr="--min_fa %f" + ) force = traits.Bool(desc="Ignore sanity checks.", argstr="--force ") verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ") really_verbose = traits.Bool( - desc="Follow detail of fiber tracking algorithm", - argstr="--really_verbose ") + desc="Follow detail of fiber tracking algorithm", argstr="--really_verbose " + ) class fibertrackOutputSpec(TraitedSpec): output_fiber_file = File( - desc= - "The filename for the fiber file produced by the algorithm. This file must end in a .fib or .vtk extension for ITK spatial object and vtkPolyData formats respectively.", - exists=True) + desc="The filename for the fiber file produced by the algorithm. This file must end in a .fib or .vtk extension for ITK spatial object and vtkPolyData formats respectively.", + exists=True, + ) class fibertrack(SEMLikeCommandLine): @@ -90,5 +100,5 @@ class fibertrack(SEMLikeCommandLine): input_spec = fibertrackInputSpec output_spec = fibertrackOutputSpec _cmd = " fibertrack " - _outputs_filenames = {'output_fiber_file': 'output_fiber_file.vtk'} + _outputs_filenames = {"output_fiber_file": "output_fiber_file.vtk"} _redirect_x = False diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py index 1a40f44cc3..a7e86c79b8 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py @@ -4,72 +4,54 @@ def test_UKFTractography_inputs(): input_map = dict( - Ql=dict(argstr='--Ql %f', ), - Qm=dict(argstr='--Qm %f', ), - Qw=dict(argstr='--Qw %f', ), - Rs=dict(argstr='--Rs %f', ), - args=dict(argstr='%s', ), - dwiFile=dict( - argstr='--dwiFile %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - freeWater=dict(argstr='--freeWater ', ), - fullTensorModel=dict(argstr='--fullTensorModel ', ), - labels=dict( - argstr='--labels %s', - sep=',', - ), - maskFile=dict( - argstr='--maskFile %s', - extensions=None, - ), - maxBranchingAngle=dict(argstr='--maxBranchingAngle %f', ), - maxHalfFiberLength=dict(argstr='--maxHalfFiberLength %f', ), - minBranchingAngle=dict(argstr='--minBranchingAngle %f', ), - minFA=dict(argstr='--minFA %f', ), - minGA=dict(argstr='--minGA %f', ), - numTensor=dict(argstr='--numTensor %s', ), - numThreads=dict(argstr='--numThreads %d', ), - recordCovariance=dict(argstr='--recordCovariance ', ), - recordFA=dict(argstr='--recordFA ', ), - recordFreeWater=dict(argstr='--recordFreeWater ', ), - recordLength=dict(argstr='--recordLength %f', ), - recordNMSE=dict(argstr='--recordNMSE ', ), - recordState=dict(argstr='--recordState ', ), - recordTensors=dict(argstr='--recordTensors ', ), - recordTrace=dict(argstr='--recordTrace ', ), - seedFALimit=dict(argstr='--seedFALimit %f', ), - seedsFile=dict( - argstr='--seedsFile %s', - extensions=None, - ), - seedsPerVoxel=dict(argstr='--seedsPerVoxel %d', ), - stepLength=dict(argstr='--stepLength %f', ), - storeGlyphs=dict(argstr='--storeGlyphs ', ), - tracts=dict( - argstr='--tracts %s', - hash_files=False, - ), + Ql=dict(argstr="--Ql %f",), + Qm=dict(argstr="--Qm %f",), + Qw=dict(argstr="--Qw %f",), + Rs=dict(argstr="--Rs %f",), + args=dict(argstr="%s",), + dwiFile=dict(argstr="--dwiFile %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + freeWater=dict(argstr="--freeWater ",), + fullTensorModel=dict(argstr="--fullTensorModel ",), + labels=dict(argstr="--labels %s", sep=",",), + maskFile=dict(argstr="--maskFile %s", extensions=None,), + maxBranchingAngle=dict(argstr="--maxBranchingAngle %f",), + maxHalfFiberLength=dict(argstr="--maxHalfFiberLength %f",), + minBranchingAngle=dict(argstr="--minBranchingAngle %f",), + minFA=dict(argstr="--minFA %f",), + minGA=dict(argstr="--minGA %f",), + numTensor=dict(argstr="--numTensor %s",), + numThreads=dict(argstr="--numThreads %d",), + recordCovariance=dict(argstr="--recordCovariance ",), + recordFA=dict(argstr="--recordFA ",), + recordFreeWater=dict(argstr="--recordFreeWater ",), + recordLength=dict(argstr="--recordLength %f",), + recordNMSE=dict(argstr="--recordNMSE ",), + recordState=dict(argstr="--recordState ",), + recordTensors=dict(argstr="--recordTensors ",), + recordTrace=dict(argstr="--recordTrace ",), + seedFALimit=dict(argstr="--seedFALimit %f",), + seedsFile=dict(argstr="--seedsFile %s", extensions=None,), + seedsPerVoxel=dict(argstr="--seedsPerVoxel %d",), + stepLength=dict(argstr="--stepLength %f",), + storeGlyphs=dict(argstr="--storeGlyphs ",), + tracts=dict(argstr="--tracts %s", hash_files=False,), tractsWithSecondTensor=dict( - argstr='--tractsWithSecondTensor %s', - hash_files=False, + argstr="--tractsWithSecondTensor %s", hash_files=False, ), - writeAsciiTracts=dict(argstr='--writeAsciiTracts ', ), - writeUncompressedTracts=dict(argstr='--writeUncompressedTracts ', ), + writeAsciiTracts=dict(argstr="--writeAsciiTracts ",), + writeUncompressedTracts=dict(argstr="--writeUncompressedTracts ",), ) inputs = UKFTractography.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_UKFTractography_outputs(): output_map = dict( - tracts=dict(extensions=None, ), - tractsWithSecondTensor=dict(extensions=None, ), + tracts=dict(extensions=None,), tractsWithSecondTensor=dict(extensions=None,), ) outputs = UKFTractography.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py index e34b12cffc..d25c1a10ca 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py @@ -4,53 +4,33 @@ def test_fiberprocess_inputs(): input_map = dict( - args=dict(argstr='%s', ), - displacement_field=dict( - argstr='--displacement_field %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fiber_file=dict( - argstr='--fiber_file %s', - extensions=None, - ), - fiber_output=dict( - argstr='--fiber_output %s', - hash_files=False, - ), - fiber_radius=dict(argstr='--fiber_radius %f', ), - h_field=dict( - argstr='--h_field %s', - extensions=None, - ), - index_space=dict(argstr='--index_space ', ), - noDataChange=dict(argstr='--noDataChange ', ), - no_warp=dict(argstr='--no_warp ', ), - saveProperties=dict(argstr='--saveProperties ', ), - tensor_volume=dict( - argstr='--tensor_volume %s', - extensions=None, - ), - verbose=dict(argstr='--verbose ', ), - voxel_label=dict(argstr='--voxel_label %d', ), - voxelize=dict( - argstr='--voxelize %s', - hash_files=False, - ), - voxelize_count_fibers=dict(argstr='--voxelize_count_fibers ', ), + args=dict(argstr="%s",), + displacement_field=dict(argstr="--displacement_field %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + fiber_file=dict(argstr="--fiber_file %s", extensions=None,), + fiber_output=dict(argstr="--fiber_output %s", hash_files=False,), + fiber_radius=dict(argstr="--fiber_radius %f",), + h_field=dict(argstr="--h_field %s", extensions=None,), + index_space=dict(argstr="--index_space ",), + noDataChange=dict(argstr="--noDataChange ",), + no_warp=dict(argstr="--no_warp ",), + saveProperties=dict(argstr="--saveProperties ",), + tensor_volume=dict(argstr="--tensor_volume %s", extensions=None,), + verbose=dict(argstr="--verbose ",), + voxel_label=dict(argstr="--voxel_label %d",), + voxelize=dict(argstr="--voxelize %s", hash_files=False,), + voxelize_count_fibers=dict(argstr="--voxelize_count_fibers ",), ) inputs = fiberprocess.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_fiberprocess_outputs(): output_map = dict( - fiber_output=dict(extensions=None, ), - voxelize=dict(extensions=None, ), + fiber_output=dict(extensions=None,), voxelize=dict(extensions=None,), ) outputs = fiberprocess.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py index c3f750e4cb..570ea316c7 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py @@ -4,22 +4,18 @@ def test_fiberstats_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fiber_file=dict( - argstr='--fiber_file %s', - extensions=None, - ), - verbose=dict(argstr='--verbose ', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fiber_file=dict(argstr="--fiber_file %s", extensions=None,), + verbose=dict(argstr="--verbose ",), ) inputs = fiberstats.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_fiberstats_outputs(): output_map = dict() outputs = fiberstats.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py index 1d241b8388..d3994690d1 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py @@ -4,41 +4,31 @@ def test_fibertrack_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - forbidden_label=dict(argstr='--forbidden_label %d', ), - force=dict(argstr='--force ', ), - input_roi_file=dict( - argstr='--input_roi_file %s', - extensions=None, - ), - input_tensor_file=dict( - argstr='--input_tensor_file %s', - extensions=None, - ), - max_angle=dict(argstr='--max_angle %f', ), - min_fa=dict(argstr='--min_fa %f', ), - output_fiber_file=dict( - argstr='--output_fiber_file %s', - hash_files=False, - ), - really_verbose=dict(argstr='--really_verbose ', ), - source_label=dict(argstr='--source_label %d', ), - step_size=dict(argstr='--step_size %f', ), - target_label=dict(argstr='--target_label %d', ), - verbose=dict(argstr='--verbose ', ), - whole_brain=dict(argstr='--whole_brain ', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + forbidden_label=dict(argstr="--forbidden_label %d",), + force=dict(argstr="--force ",), + input_roi_file=dict(argstr="--input_roi_file %s", extensions=None,), + input_tensor_file=dict(argstr="--input_tensor_file %s", extensions=None,), + max_angle=dict(argstr="--max_angle %f",), + min_fa=dict(argstr="--min_fa %f",), + output_fiber_file=dict(argstr="--output_fiber_file %s", hash_files=False,), + really_verbose=dict(argstr="--really_verbose ",), + source_label=dict(argstr="--source_label %d",), + step_size=dict(argstr="--step_size %f",), + target_label=dict(argstr="--target_label %d",), + verbose=dict(argstr="--verbose ",), + whole_brain=dict(argstr="--whole_brain ",), ) inputs = fibertrack.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_fibertrack_outputs(): - output_map = dict(output_fiber_file=dict(extensions=None, ), ) + output_map = dict(output_fiber_file=dict(extensions=None,),) outputs = fibertrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tractography/ukftractography.py b/nipype/interfaces/semtools/diffusion/tractography/ukftractography.py index 11971dbb6d..228d162560 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/ukftractography.py +++ b/nipype/interfaces/semtools/diffusion/tractography/ukftractography.py @@ -5,139 +5,153 @@ import os -from ....base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ....base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class UKFTractographyInputSpec(CommandLineInputSpec): dwiFile = File(desc="Input DWI volume", exists=True, argstr="--dwiFile %s") seedsFile = File( - desc= - "Seeds for diffusion. If not specified, full brain tractography will be performed, and the algorithm will start from every voxel in the brain mask where the Generalized Anisotropy is bigger than 0.18", + desc="Seeds for diffusion. If not specified, full brain tractography will be performed, and the algorithm will start from every voxel in the brain mask where the Generalized Anisotropy is bigger than 0.18", exists=True, - argstr="--seedsFile %s") + argstr="--seedsFile %s", + ) labels = InputMultiPath( traits.Int, desc="A vector of the ROI labels to be used", sep=",", - argstr="--labels %s") + argstr="--labels %s", + ) maskFile = File( - desc="Mask for diffusion tractography", - exists=True, - argstr="--maskFile %s") + desc="Mask for diffusion tractography", exists=True, argstr="--maskFile %s" + ) tracts = traits.Either( traits.Bool, File(), hash_files=False, desc="Tracts generated, with first tensor output", - argstr="--tracts %s") + argstr="--tracts %s", + ) writeAsciiTracts = traits.Bool( - desc="Write tract file as a VTK binary data file", - argstr="--writeAsciiTracts ") + desc="Write tract file as a VTK binary data file", argstr="--writeAsciiTracts " + ) writeUncompressedTracts = traits.Bool( desc="Write tract file as a VTK uncompressed data file", - argstr="--writeUncompressedTracts ") + argstr="--writeUncompressedTracts ", + ) seedsPerVoxel = traits.Int( - desc= - " Each seed generates a fiber, thus using more seeds generates more fibers. In general use 1 or 2 seeds, and for a more thorough result use 5 or 10 (depending on your machine this may take up to 2 days to run)., ", - argstr="--seedsPerVoxel %d") + desc=" Each seed generates a fiber, thus using more seeds generates more fibers. In general use 1 or 2 seeds, and for a more thorough result use 5 or 10 (depending on your machine this may take up to 2 days to run)., ", + argstr="--seedsPerVoxel %d", + ) numTensor = traits.Enum( - "1", "2", desc="Number of tensors used", argstr="--numTensor %s") + "1", "2", desc="Number of tensors used", argstr="--numTensor %s" + ) freeWater = traits.Bool( - desc= - "Adds a term for free water difusion to the model. (Note for experts: if checked, the 1T simple model is forced) ", - argstr="--freeWater ") + desc="Adds a term for free water difusion to the model. (Note for experts: if checked, the 1T simple model is forced) ", + argstr="--freeWater ", + ) recordFA = traits.Bool( - desc= - "Whether to store FA. Attaches field 'FA', and 'FA2' for 2-tensor case to fiber. ", - argstr="--recordFA ") + desc="Whether to store FA. Attaches field 'FA', and 'FA2' for 2-tensor case to fiber. ", + argstr="--recordFA ", + ) recordFreeWater = traits.Bool( - desc= - "Whether to store the fraction of free water. Attaches field 'FreeWater' to fiber.", - argstr="--recordFreeWater ") + desc="Whether to store the fraction of free water. Attaches field 'FreeWater' to fiber.", + argstr="--recordFreeWater ", + ) recordTrace = traits.Bool( - desc= - "Whether to store Trace. Attaches field 'Trace', and 'Trace2' for 2-tensor case to fiber.", - argstr="--recordTrace ") + desc="Whether to store Trace. Attaches field 'Trace', and 'Trace2' for 2-tensor case to fiber.", + argstr="--recordTrace ", + ) recordTensors = traits.Bool( - desc= - "Recording the tensors enables Slicer to color the fiber bundles by FA, orientation, and so on. The fields will be called 'TensorN', where N is the tensor number. ", - argstr="--recordTensors ") + desc="Recording the tensors enables Slicer to color the fiber bundles by FA, orientation, and so on. The fields will be called 'TensorN', where N is the tensor number. ", + argstr="--recordTensors ", + ) recordNMSE = traits.Bool( desc="Whether to store NMSE. Attaches field 'NMSE' to fiber. ", - argstr="--recordNMSE ") + argstr="--recordNMSE ", + ) recordState = traits.Bool( - desc= - "Whether to attach the states to the fiber. Will generate field 'state'.", - argstr="--recordState ") + desc="Whether to attach the states to the fiber. Will generate field 'state'.", + argstr="--recordState ", + ) recordCovariance = traits.Bool( - desc= - "Whether to store the covariance. Will generate field 'covariance' in fiber.", - argstr="--recordCovariance ") + desc="Whether to store the covariance. Will generate field 'covariance' in fiber.", + argstr="--recordCovariance ", + ) recordLength = traits.Float( - desc="Record length of tractography, in millimeters", - argstr="--recordLength %f") + desc="Record length of tractography, in millimeters", argstr="--recordLength %f" + ) minFA = traits.Float( - desc= - "Abort the tractography when the Fractional Anisotropy is less than this value", - argstr="--minFA %f") + desc="Abort the tractography when the Fractional Anisotropy is less than this value", + argstr="--minFA %f", + ) minGA = traits.Float( - desc= - "Abort the tractography when the Generalized Anisotropy is less than this value", - argstr="--minGA %f") + desc="Abort the tractography when the Generalized Anisotropy is less than this value", + argstr="--minGA %f", + ) fullTensorModel = traits.Bool( - desc= - "Whether to use the full tensor model. If unchecked, use the default simple tensor model", - argstr="--fullTensorModel ") + desc="Whether to use the full tensor model. If unchecked, use the default simple tensor model", + argstr="--fullTensorModel ", + ) numThreads = traits.Int( - desc= - "Number of threads used during computation. Set to the number of cores on your workstation for optimal speed. If left undefined the number of cores detected will be used. ", - argstr="--numThreads %d") + desc="Number of threads used during computation. Set to the number of cores on your workstation for optimal speed. If left undefined the number of cores detected will be used. ", + argstr="--numThreads %d", + ) stepLength = traits.Float( - desc="Step length of tractography, in millimeters", - argstr="--stepLength %f") + desc="Step length of tractography, in millimeters", argstr="--stepLength %f" + ) maxHalfFiberLength = traits.Float( - desc= - "The max length limit of the half fibers generated during tractography. Here the fiber is \'half\' because the tractography goes in only one direction from one seed point at a time", - argstr="--maxHalfFiberLength %f") + desc="The max length limit of the half fibers generated during tractography. Here the fiber is 'half' because the tractography goes in only one direction from one seed point at a time", + argstr="--maxHalfFiberLength %f", + ) seedFALimit = traits.Float( desc="Seed points whose FA are below this value are excluded", - argstr="--seedFALimit %f") - Qm = traits.Float( - desc="Process noise for angles/direction", argstr="--Qm %f") + argstr="--seedFALimit %f", + ) + Qm = traits.Float(desc="Process noise for angles/direction", argstr="--Qm %f") Ql = traits.Float(desc="Process noise for eigenvalues", argstr="--Ql %f") Qw = traits.Float( - desc= - "Process noise for free water weights, ignored if no free water estimation", - argstr="--Qw %f") + desc="Process noise for free water weights, ignored if no free water estimation", + argstr="--Qw %f", + ) Rs = traits.Float(desc="Measurement noise", argstr="--Rs %f") maxBranchingAngle = traits.Float( - desc= - "Maximum branching angle, in degrees. When using multiple tensors, a new branch will be created when the tensors' major directions form an angle between (minBranchingAngle, maxBranchingAngle). Branching is supressed when this maxBranchingAngle is set to 0.0", - argstr="--maxBranchingAngle %f") + desc="Maximum branching angle, in degrees. When using multiple tensors, a new branch will be created when the tensors' major directions form an angle between (minBranchingAngle, maxBranchingAngle). Branching is supressed when this maxBranchingAngle is set to 0.0", + argstr="--maxBranchingAngle %f", + ) minBranchingAngle = traits.Float( - desc= - "Minimum branching angle, in degrees. When using multiple tensors, a new branch will be created when the tensors' major directions form an angle between (minBranchingAngle, maxBranchingAngle)", - argstr="--minBranchingAngle %f") + desc="Minimum branching angle, in degrees. When using multiple tensors, a new branch will be created when the tensors' major directions form an angle between (minBranchingAngle, maxBranchingAngle)", + argstr="--minBranchingAngle %f", + ) tractsWithSecondTensor = traits.Either( traits.Bool, File(), hash_files=False, desc="Tracts generated, with second tensor output (if there is one)", - argstr="--tractsWithSecondTensor %s") + argstr="--tractsWithSecondTensor %s", + ) storeGlyphs = traits.Bool( - desc= - "Store tensors' main directions as two-point lines in a separate file named glyphs_{tracts}. When using multiple tensors, only the major tensors' main directions are stored", - argstr="--storeGlyphs ") + desc="Store tensors' main directions as two-point lines in a separate file named glyphs_{tracts}. When using multiple tensors, only the major tensors' main directions are stored", + argstr="--storeGlyphs ", + ) class UKFTractographyOutputSpec(TraitedSpec): - tracts = File( - desc="Tracts generated, with first tensor output", exists=True) + tracts = File(desc="Tracts generated, with first tensor output", exists=True) tractsWithSecondTensor = File( desc="Tracts generated, with second tensor output (if there is one)", - exists=True) + exists=True, + ) class UKFTractography(SEMLikeCommandLine): @@ -161,7 +175,7 @@ class UKFTractography(SEMLikeCommandLine): output_spec = UKFTractographyOutputSpec _cmd = " UKFTractography " _outputs_filenames = { - 'tracts': 'tracts.vtp', - 'tractsWithSecondTensor': 'tractsWithSecondTensor.vtp' + "tracts": "tracts.vtp", + "tractsWithSecondTensor": "tractsWithSecondTensor.vtp", } _redirect_x = False diff --git a/nipype/interfaces/semtools/featurecreator.py b/nipype/interfaces/semtools/featurecreator.py index 69ff2d675c..f02d19fda8 100644 --- a/nipype/interfaces/semtools/featurecreator.py +++ b/nipype/interfaces/semtools/featurecreator.py @@ -5,22 +5,33 @@ import os -from ..base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ..base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class GenerateCsfClippedFromClassifiedImageInputSpec(CommandLineInputSpec): inputCassifiedVolume = File( desc="Required: input tissue label image", exists=True, - argstr="--inputCassifiedVolume %s") + argstr="--inputCassifiedVolume %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: output image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) class GenerateCsfClippedFromClassifiedImageOutputSpec(TraitedSpec): @@ -47,5 +58,5 @@ class GenerateCsfClippedFromClassifiedImage(SEMLikeCommandLine): input_spec = GenerateCsfClippedFromClassifiedImageInputSpec output_spec = GenerateCsfClippedFromClassifiedImageOutputSpec _cmd = " GenerateCsfClippedFromClassifiedImage " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False diff --git a/nipype/interfaces/semtools/filtering/__init__.py b/nipype/interfaces/semtools/filtering/__init__.py index 82ad67a9c1..b5b7eccb20 100644 --- a/nipype/interfaces/semtools/filtering/__init__.py +++ b/nipype/interfaces/semtools/filtering/__init__.py @@ -1,9 +1,22 @@ # -*- coding: utf-8 -*- from .denoising import UnbiasedNonLocalMeans from .featuredetection import ( - GenerateSummedGradientImage, CannySegmentationLevelSetImageFilter, - DilateImage, TextureFromNoiseImageFilter, FlippedDifference, ErodeImage, - GenerateBrainClippedImage, NeighborhoodMedian, GenerateTestImage, - NeighborhoodMean, HammerAttributeCreator, TextureMeasureFilter, DilateMask, - DumpBinaryTrainingVectors, DistanceMaps, STAPLEAnalysis, - GradientAnisotropicDiffusionImageFilter, CannyEdge) + GenerateSummedGradientImage, + CannySegmentationLevelSetImageFilter, + DilateImage, + TextureFromNoiseImageFilter, + FlippedDifference, + ErodeImage, + GenerateBrainClippedImage, + NeighborhoodMedian, + GenerateTestImage, + NeighborhoodMean, + HammerAttributeCreator, + TextureMeasureFilter, + DilateMask, + DumpBinaryTrainingVectors, + DistanceMaps, + STAPLEAnalysis, + GradientAnisotropicDiffusionImageFilter, + CannyEdge, +) diff --git a/nipype/interfaces/semtools/filtering/denoising.py b/nipype/interfaces/semtools/filtering/denoising.py index 97d687c512..2ca6840128 100644 --- a/nipype/interfaces/semtools/filtering/denoising.py +++ b/nipype/interfaces/semtools/filtering/denoising.py @@ -5,50 +5,58 @@ import os -from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ...base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class UnbiasedNonLocalMeansInputSpec(CommandLineInputSpec): sigma = traits.Float( - desc= - "The root power of noise (sigma) in the complex Gaussian process the Rician comes from. If it is underestimated, the algorithm fails to remove the noise. If it is overestimated, over-blurring is likely to occur.", - argstr="--sigma %f") + desc="The root power of noise (sigma) in the complex Gaussian process the Rician comes from. If it is underestimated, the algorithm fails to remove the noise. If it is overestimated, over-blurring is likely to occur.", + argstr="--sigma %f", + ) rs = InputMultiPath( traits.Int, - desc= - "The algorithm search for similar voxels in a neighborhood of this radius (radii larger than 5,5,5 are very slow, and the results can be only marginally better. Small radii may fail to effectively remove the noise).", + desc="The algorithm search for similar voxels in a neighborhood of this radius (radii larger than 5,5,5 are very slow, and the results can be only marginally better. Small radii may fail to effectively remove the noise).", sep=",", - argstr="--rs %s") + argstr="--rs %s", + ) rc = InputMultiPath( traits.Int, - desc= - "Similarity between blocks is computed as the difference between mean values and gradients. These parameters are computed fitting a hyperplane with LS inside a neighborhood of this size", + desc="Similarity between blocks is computed as the difference between mean values and gradients. These parameters are computed fitting a hyperplane with LS inside a neighborhood of this size", sep=",", - argstr="--rc %s") + argstr="--rc %s", + ) hp = traits.Float( - desc= - "This parameter is related to noise; the larger the parameter, the more aggressive the filtering. Should be near 1, and only values between 0.8 and 1.2 are allowed", - argstr="--hp %f") + desc="This parameter is related to noise; the larger the parameter, the more aggressive the filtering. Should be near 1, and only values between 0.8 and 1.2 are allowed", + argstr="--hp %f", + ) ps = traits.Float( - desc= - "To accelerate computations, preselection is used: if the normalized difference is above this threshold, the voxel will be discarded (non used for average)", - argstr="--ps %f") - inputVolume = File( - position=-2, desc="Input MRI volume.", exists=True, argstr="%s") + desc="To accelerate computations, preselection is used: if the normalized difference is above this threshold, the voxel will be discarded (non used for average)", + argstr="--ps %f", + ) + inputVolume = File(position=-2, desc="Input MRI volume.", exists=True, argstr="%s") outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output (filtered) MRI volume.", - argstr="%s") + argstr="%s", + ) class UnbiasedNonLocalMeansOutputSpec(TraitedSpec): - outputVolume = File( - position=-1, desc="Output (filtered) MRI volume.", exists=True) + outputVolume = File(position=-1, desc="Output (filtered) MRI volume.", exists=True) class UnbiasedNonLocalMeans(SEMLikeCommandLine): @@ -79,5 +87,5 @@ class UnbiasedNonLocalMeans(SEMLikeCommandLine): input_spec = UnbiasedNonLocalMeansInputSpec output_spec = UnbiasedNonLocalMeansOutputSpec _cmd = " UnbiasedNonLocalMeans " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False diff --git a/nipype/interfaces/semtools/filtering/featuredetection.py b/nipype/interfaces/semtools/filtering/featuredetection.py index ca4973ab43..e15e1de6b0 100644 --- a/nipype/interfaces/semtools/filtering/featuredetection.py +++ b/nipype/interfaces/semtools/filtering/featuredetection.py @@ -5,33 +5,42 @@ import os -from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ...base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class GenerateSummedGradientImageInputSpec(CommandLineInputSpec): inputVolume1 = File( - desc="input volume 1, usally t1 image", - exists=True, - argstr="--inputVolume1 %s") + desc="input volume 1, usally t1 image", exists=True, argstr="--inputVolume1 %s" + ) inputVolume2 = File( - desc="input volume 2, usally t2 image", - exists=True, - argstr="--inputVolume2 %s") + desc="input volume 2, usally t2 image", exists=True, argstr="--inputVolume2 %s" + ) outputFileName = traits.Either( traits.Bool, File(), hash_files=False, desc="(required) output file name", - argstr="--outputFileName %s") + argstr="--outputFileName %s", + ) MaximumGradient = traits.Bool( - desc= - "If set this flag, it will compute maximum gradient between two input volumes instead of sum of it.", - argstr="--MaximumGradient ") + desc="If set this flag, it will compute maximum gradient between two input volumes instead of sum of it.", + argstr="--MaximumGradient ", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class GenerateSummedGradientImageOutputSpec(TraitedSpec): @@ -56,7 +65,7 @@ class GenerateSummedGradientImage(SEMLikeCommandLine): input_spec = GenerateSummedGradientImageInputSpec output_spec = GenerateSummedGradientImageOutputSpec _cmd = " GenerateSummedGradientImage " - _outputs_filenames = {'outputFileName': 'outputFileName'} + _outputs_filenames = {"outputFileName": "outputFileName"} _redirect_x = False @@ -64,21 +73,23 @@ class CannySegmentationLevelSetImageFilterInputSpec(CommandLineInputSpec): inputVolume = File(exists=True, argstr="--inputVolume %s") initialModel = File(exists=True, argstr="--initialModel %s") outputVolume = traits.Either( - traits.Bool, File(), hash_files=False, argstr="--outputVolume %s") + traits.Bool, File(), hash_files=False, argstr="--outputVolume %s" + ) outputSpeedVolume = traits.Either( - traits.Bool, File(), hash_files=False, argstr="--outputSpeedVolume %s") + traits.Bool, File(), hash_files=False, argstr="--outputSpeedVolume %s" + ) cannyThreshold = traits.Float( - desc="Canny Threshold Value", argstr="--cannyThreshold %f") - cannyVariance = traits.Float( - desc="Canny variance", argstr="--cannyVariance %f") + desc="Canny Threshold Value", argstr="--cannyThreshold %f" + ) + cannyVariance = traits.Float(desc="Canny variance", argstr="--cannyVariance %f") advectionWeight = traits.Float( - desc= - "Controls the smoothness of the resulting mask, small number are more smooth, large numbers allow more sharp corners. ", - argstr="--advectionWeight %f") + desc="Controls the smoothness of the resulting mask, small number are more smooth, large numbers allow more sharp corners. ", + argstr="--advectionWeight %f", + ) initialModelIsovalue = traits.Float( - desc= - "The identification of the input model iso-surface. (for a binary image with 0s and 1s use 0.5) (for a binary image with 0s and 255's use 127.5).", - argstr="--initialModelIsovalue %f") + desc="The identification of the input model iso-surface. (for a binary image with 0s and 1s use 0.5) (for a binary image with 0s and 255's use 127.5).", + argstr="--initialModelIsovalue %f", + ) maxIterations = traits.Int(desc="The", argstr="--maxIterations %d") @@ -108,27 +119,31 @@ class CannySegmentationLevelSetImageFilter(SEMLikeCommandLine): output_spec = CannySegmentationLevelSetImageFilterOutputSpec _cmd = " CannySegmentationLevelSetImageFilter " _outputs_filenames = { - 'outputVolume': 'outputVolume.nii', - 'outputSpeedVolume': 'outputSpeedVolume.nii' + "outputVolume": "outputVolume.nii", + "outputSpeedVolume": "outputSpeedVolume.nii", } _redirect_x = False class DilateImageInputSpec(CommandLineInputSpec): inputVolume = File( - desc="Required: input image", exists=True, argstr="--inputVolume %s") + desc="Required: input image", exists=True, argstr="--inputVolume %s" + ) inputMaskVolume = File( desc="Required: input brain mask image", exists=True, - argstr="--inputMaskVolume %s") + argstr="--inputMaskVolume %s", + ) inputRadius = traits.Int( - desc="Required: input neighborhood radius", argstr="--inputRadius %d") + desc="Required: input neighborhood radius", argstr="--inputRadius %d" + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: output image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) class DilateImageOutputSpec(TraitedSpec): @@ -155,21 +170,24 @@ class DilateImage(SEMLikeCommandLine): input_spec = DilateImageInputSpec output_spec = DilateImageOutputSpec _cmd = " DilateImage " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False class TextureFromNoiseImageFilterInputSpec(CommandLineInputSpec): inputVolume = File( - desc="Required: input image", exists=True, argstr="--inputVolume %s") + desc="Required: input image", exists=True, argstr="--inputVolume %s" + ) inputRadius = traits.Int( - desc="Required: input neighborhood radius", argstr="--inputRadius %d") + desc="Required: input neighborhood radius", argstr="--inputRadius %d" + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: output image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) class TextureFromNoiseImageFilterOutputSpec(TraitedSpec): @@ -196,23 +214,26 @@ class TextureFromNoiseImageFilter(SEMLikeCommandLine): input_spec = TextureFromNoiseImageFilterInputSpec output_spec = TextureFromNoiseImageFilterOutputSpec _cmd = " TextureFromNoiseImageFilter " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False class FlippedDifferenceInputSpec(CommandLineInputSpec): inputVolume = File( - desc="Required: input image", exists=True, argstr="--inputVolume %s") + desc="Required: input image", exists=True, argstr="--inputVolume %s" + ) inputMaskVolume = File( desc="Required: input brain mask image", exists=True, - argstr="--inputMaskVolume %s") + argstr="--inputMaskVolume %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: output image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) class FlippedDifferenceOutputSpec(TraitedSpec): @@ -239,25 +260,29 @@ class FlippedDifference(SEMLikeCommandLine): input_spec = FlippedDifferenceInputSpec output_spec = FlippedDifferenceOutputSpec _cmd = " FlippedDifference " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False class ErodeImageInputSpec(CommandLineInputSpec): inputVolume = File( - desc="Required: input image", exists=True, argstr="--inputVolume %s") + desc="Required: input image", exists=True, argstr="--inputVolume %s" + ) inputMaskVolume = File( desc="Required: input brain mask image", exists=True, - argstr="--inputMaskVolume %s") + argstr="--inputMaskVolume %s", + ) inputRadius = traits.Int( - desc="Required: input neighborhood radius", argstr="--inputRadius %d") + desc="Required: input neighborhood radius", argstr="--inputRadius %d" + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: output image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) class ErodeImageOutputSpec(TraitedSpec): @@ -284,28 +309,28 @@ class ErodeImage(SEMLikeCommandLine): input_spec = ErodeImageInputSpec output_spec = ErodeImageOutputSpec _cmd = " ErodeImage " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False class GenerateBrainClippedImageInputSpec(CommandLineInputSpec): inputImg = File( - desc="input volume 1, usally t1 image", - exists=True, - argstr="--inputImg %s") + desc="input volume 1, usally t1 image", exists=True, argstr="--inputImg %s" + ) inputMsk = File( - desc="input volume 2, usally t2 image", - exists=True, - argstr="--inputMsk %s") + desc="input volume 2, usally t2 image", exists=True, argstr="--inputMsk %s" + ) outputFileName = traits.Either( traits.Bool, File(), hash_files=False, desc="(required) output file name", - argstr="--outputFileName %s") + argstr="--outputFileName %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class GenerateBrainClippedImageOutputSpec(TraitedSpec): @@ -330,25 +355,29 @@ class GenerateBrainClippedImage(SEMLikeCommandLine): input_spec = GenerateBrainClippedImageInputSpec output_spec = GenerateBrainClippedImageOutputSpec _cmd = " GenerateBrainClippedImage " - _outputs_filenames = {'outputFileName': 'outputFileName'} + _outputs_filenames = {"outputFileName": "outputFileName"} _redirect_x = False class NeighborhoodMedianInputSpec(CommandLineInputSpec): inputVolume = File( - desc="Required: input image", exists=True, argstr="--inputVolume %s") + desc="Required: input image", exists=True, argstr="--inputVolume %s" + ) inputMaskVolume = File( desc="Required: input brain mask image", exists=True, - argstr="--inputMaskVolume %s") + argstr="--inputMaskVolume %s", + ) inputRadius = traits.Int( - desc="Required: input neighborhood radius", argstr="--inputRadius %d") + desc="Required: input neighborhood radius", argstr="--inputRadius %d" + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: output image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) class NeighborhoodMedianOutputSpec(TraitedSpec): @@ -375,27 +404,26 @@ class NeighborhoodMedian(SEMLikeCommandLine): input_spec = NeighborhoodMedianInputSpec output_spec = NeighborhoodMedianOutputSpec _cmd = " NeighborhoodMedian " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False class GenerateTestImageInputSpec(CommandLineInputSpec): inputVolume = File( - desc="input volume 1, usally t1 image", - exists=True, - argstr="--inputVolume %s") + desc="input volume 1, usally t1 image", exists=True, argstr="--inputVolume %s" + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="(required) output file name", - argstr="--outputVolume %s") - lowerBoundOfOutputVolume = traits.Float( - argstr="--lowerBoundOfOutputVolume %f") - upperBoundOfOutputVolume = traits.Float( - argstr="--upperBoundOfOutputVolume %f") + argstr="--outputVolume %s", + ) + lowerBoundOfOutputVolume = traits.Float(argstr="--lowerBoundOfOutputVolume %f") + upperBoundOfOutputVolume = traits.Float(argstr="--upperBoundOfOutputVolume %f") outputVolumeSize = traits.Float( - desc="output Volume Size", argstr="--outputVolumeSize %f") + desc="output Volume Size", argstr="--outputVolumeSize %f" + ) class GenerateTestImageOutputSpec(TraitedSpec): @@ -420,25 +448,29 @@ class GenerateTestImage(SEMLikeCommandLine): input_spec = GenerateTestImageInputSpec output_spec = GenerateTestImageOutputSpec _cmd = " GenerateTestImage " - _outputs_filenames = {'outputVolume': 'outputVolume'} + _outputs_filenames = {"outputVolume": "outputVolume"} _redirect_x = False class NeighborhoodMeanInputSpec(CommandLineInputSpec): inputVolume = File( - desc="Required: input image", exists=True, argstr="--inputVolume %s") + desc="Required: input image", exists=True, argstr="--inputVolume %s" + ) inputMaskVolume = File( desc="Required: input brain mask image", exists=True, - argstr="--inputMaskVolume %s") + argstr="--inputMaskVolume %s", + ) inputRadius = traits.Int( - desc="Required: input neighborhood radius", argstr="--inputRadius %d") + desc="Required: input neighborhood radius", argstr="--inputRadius %d" + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: output image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) class NeighborhoodMeanOutputSpec(TraitedSpec): @@ -465,30 +497,32 @@ class NeighborhoodMean(SEMLikeCommandLine): input_spec = NeighborhoodMeanInputSpec output_spec = NeighborhoodMeanOutputSpec _cmd = " NeighborhoodMean " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False class HammerAttributeCreatorInputSpec(CommandLineInputSpec): Scale = traits.Int(desc="Determine Scale of Ball", argstr="--Scale %d") - Strength = traits.Float( - desc="Determine Strength of Edges", argstr="--Strength %f") + Strength = traits.Float(desc="Determine Strength of Edges", argstr="--Strength %f") inputGMVolume = File( desc="Required: input grey matter posterior image", exists=True, - argstr="--inputGMVolume %s") + argstr="--inputGMVolume %s", + ) inputWMVolume = File( desc="Required: input white matter posterior image", exists=True, - argstr="--inputWMVolume %s") + argstr="--inputWMVolume %s", + ) inputCSFVolume = File( desc="Required: input CSF posterior image", exists=True, - argstr="--inputCSFVolume %s") + argstr="--inputCSFVolume %s", + ) outputVolumeBase = traits.Str( - desc= - "Required: output image base name to be appended for each feature vector.", - argstr="--outputVolumeBase %s") + desc="Required: output image base name to be appended for each feature vector.", + argstr="--outputVolumeBase %s", + ) class HammerAttributeCreatorOutputSpec(TraitedSpec): @@ -525,7 +559,8 @@ class TextureMeasureFilterInputSpec(CommandLineInputSpec): distance = traits.Int(argstr="--distance %d") insideROIValue = traits.Float(argstr="--insideROIValue %f") outputFilename = traits.Either( - traits.Bool, File(), hash_files=False, argstr="--outputFilename %s") + traits.Bool, File(), hash_files=False, argstr="--outputFilename %s" + ) class TextureMeasureFilterOutputSpec(TraitedSpec): @@ -552,29 +587,33 @@ class TextureMeasureFilter(SEMLikeCommandLine): input_spec = TextureMeasureFilterInputSpec output_spec = TextureMeasureFilterOutputSpec _cmd = " TextureMeasureFilter " - _outputs_filenames = {'outputFilename': 'outputFilename'} + _outputs_filenames = {"outputFilename": "outputFilename"} _redirect_x = False class DilateMaskInputSpec(CommandLineInputSpec): inputVolume = File( - desc="Required: input image", exists=True, argstr="--inputVolume %s") + desc="Required: input image", exists=True, argstr="--inputVolume %s" + ) inputBinaryVolume = File( desc="Required: input brain mask image", exists=True, - argstr="--inputBinaryVolume %s") + argstr="--inputBinaryVolume %s", + ) sizeStructuralElement = traits.Int( - desc= - "size of structural element. sizeStructuralElement=1 means that 3x3x3 structuring element for 3D", - argstr="--sizeStructuralElement %d") + desc="size of structural element. sizeStructuralElement=1 means that 3x3x3 structuring element for 3D", + argstr="--sizeStructuralElement %d", + ) lowerThreshold = traits.Float( - desc="Required: lowerThreshold value", argstr="--lowerThreshold %f") + desc="Required: lowerThreshold value", argstr="--lowerThreshold %f" + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: output image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) class DilateMaskOutputSpec(TraitedSpec): @@ -601,7 +640,7 @@ class DilateMask(SEMLikeCommandLine): input_spec = DilateMaskInputSpec output_spec = DilateMaskOutputSpec _cmd = " DilateMask " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False @@ -609,11 +648,13 @@ class DumpBinaryTrainingVectorsInputSpec(CommandLineInputSpec): inputHeaderFilename = File( desc="Required: input header file name", exists=True, - argstr="--inputHeaderFilename %s") + argstr="--inputHeaderFilename %s", + ) inputVectorFilename = File( desc="Required: input vector filename", exists=True, - argstr="--inputVectorFilename %s") + argstr="--inputVectorFilename %s", + ) class DumpBinaryTrainingVectorsOutputSpec(TraitedSpec): @@ -648,21 +689,24 @@ class DistanceMapsInputSpec(CommandLineInputSpec): inputLabelVolume = File( desc="Required: input tissue label image", exists=True, - argstr="--inputLabelVolume %s") + argstr="--inputLabelVolume %s", + ) inputMaskVolume = File( desc="Required: input brain mask image", exists=True, - argstr="--inputMaskVolume %s") + argstr="--inputMaskVolume %s", + ) inputTissueLabel = traits.Int( - desc= - "Required: input integer value of tissue type used to calculate distance", - argstr="--inputTissueLabel %d") + desc="Required: input integer value of tissue type used to calculate distance", + argstr="--inputTissueLabel %d", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: output image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) class DistanceMapsOutputSpec(TraitedSpec): @@ -689,24 +733,26 @@ class DistanceMaps(SEMLikeCommandLine): input_spec = DistanceMapsInputSpec output_spec = DistanceMapsOutputSpec _cmd = " DistanceMaps " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False class STAPLEAnalysisInputSpec(CommandLineInputSpec): inputDimension = traits.Int( - desc="Required: input image Dimension 2 or 3", - argstr="--inputDimension %d") + desc="Required: input image Dimension 2 or 3", argstr="--inputDimension %d" + ) inputLabelVolume = InputMultiPath( File(exists=True), desc="Required: input label volume", - argstr="--inputLabelVolume %s...") + argstr="--inputLabelVolume %s...", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: output image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) class STAPLEAnalysisOutputSpec(TraitedSpec): @@ -733,26 +779,30 @@ class STAPLEAnalysis(SEMLikeCommandLine): input_spec = STAPLEAnalysisInputSpec output_spec = STAPLEAnalysisOutputSpec _cmd = " STAPLEAnalysis " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False class GradientAnisotropicDiffusionImageFilterInputSpec(CommandLineInputSpec): inputVolume = File( - desc="Required: input image", exists=True, argstr="--inputVolume %s") + desc="Required: input image", exists=True, argstr="--inputVolume %s" + ) numberOfIterations = traits.Int( - desc="Optional value for number of Iterations", - argstr="--numberOfIterations %d") + desc="Optional value for number of Iterations", argstr="--numberOfIterations %d" + ) timeStep = traits.Float( - desc="Time step for diffusion process", argstr="--timeStep %f") + desc="Time step for diffusion process", argstr="--timeStep %f" + ) conductance = traits.Float( - desc="Conductance for diffusion process", argstr="--conductance %f") + desc="Conductance for diffusion process", argstr="--conductance %f" + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: output image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) class GradientAnisotropicDiffusionImageFilterOutputSpec(TraitedSpec): @@ -773,7 +823,7 @@ class GradientAnisotropicDiffusionImageFilter(SEMLikeCommandLine): input_spec = GradientAnisotropicDiffusionImageFilterInputSpec output_spec = GradientAnisotropicDiffusionImageFilterOutputSpec _cmd = " GradientAnisotropicDiffusionImageFilter " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False @@ -781,25 +831,27 @@ class CannyEdgeInputSpec(CommandLineInputSpec): inputVolume = File( desc="Required: input tissue label image", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) variance = traits.Float( - desc= - "Variance and Maximum error are used in the Gaussian smoothing of the input image. See itkDiscreteGaussianImageFilter for information on these parameters.", - argstr="--variance %f") + desc="Variance and Maximum error are used in the Gaussian smoothing of the input image. See itkDiscreteGaussianImageFilter for information on these parameters.", + argstr="--variance %f", + ) upperThreshold = traits.Float( - desc= - "Threshold is the lowest allowed value in the output image. Its data type is the same as the data type of the output image. Any values below the Threshold level will be replaced with the OutsideValue parameter value, whose default is zero. ", - argstr="--upperThreshold %f") + desc="Threshold is the lowest allowed value in the output image. Its data type is the same as the data type of the output image. Any values below the Threshold level will be replaced with the OutsideValue parameter value, whose default is zero. ", + argstr="--upperThreshold %f", + ) lowerThreshold = traits.Float( - desc= - "Threshold is the lowest allowed value in the output image. Its data type is the same as the data type of the output image. Any values below the Threshold level will be replaced with the OutsideValue parameter value, whose default is zero. ", - argstr="--lowerThreshold %f") + desc="Threshold is the lowest allowed value in the output image. Its data type is the same as the data type of the output image. Any values below the Threshold level will be replaced with the OutsideValue parameter value, whose default is zero. ", + argstr="--lowerThreshold %f", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Required: output image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) class CannyEdgeOutputSpec(TraitedSpec): @@ -826,5 +878,5 @@ class CannyEdge(SEMLikeCommandLine): input_spec = CannyEdgeInputSpec output_spec = CannyEdgeOutputSpec _cmd = " CannyEdge " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py index da580ab5de..66d4da8a6c 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py @@ -4,30 +4,23 @@ def test_CannyEdge_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - lowerThreshold=dict(argstr='--lowerThreshold %f', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - upperThreshold=dict(argstr='--upperThreshold %f', ), - variance=dict(argstr='--variance %f', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + lowerThreshold=dict(argstr="--lowerThreshold %f",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + upperThreshold=dict(argstr="--upperThreshold %f",), + variance=dict(argstr="--variance %f",), ) inputs = CannyEdge.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CannyEdge_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = CannyEdge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py index 30f9fe6f15..f77517fbb5 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py @@ -4,42 +4,28 @@ def test_CannySegmentationLevelSetImageFilter_inputs(): input_map = dict( - advectionWeight=dict(argstr='--advectionWeight %f', ), - args=dict(argstr='%s', ), - cannyThreshold=dict(argstr='--cannyThreshold %f', ), - cannyVariance=dict(argstr='--cannyVariance %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - initialModel=dict( - argstr='--initialModel %s', - extensions=None, - ), - initialModelIsovalue=dict(argstr='--initialModelIsovalue %f', ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - maxIterations=dict(argstr='--maxIterations %d', ), - outputSpeedVolume=dict( - argstr='--outputSpeedVolume %s', - hash_files=False, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + advectionWeight=dict(argstr="--advectionWeight %f",), + args=dict(argstr="%s",), + cannyThreshold=dict(argstr="--cannyThreshold %f",), + cannyVariance=dict(argstr="--cannyVariance %f",), + environ=dict(nohash=True, usedefault=True,), + initialModel=dict(argstr="--initialModel %s", extensions=None,), + initialModelIsovalue=dict(argstr="--initialModelIsovalue %f",), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + maxIterations=dict(argstr="--maxIterations %d",), + outputSpeedVolume=dict(argstr="--outputSpeedVolume %s", hash_files=False,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = CannySegmentationLevelSetImageFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CannySegmentationLevelSetImageFilter_outputs(): output_map = dict( - outputSpeedVolume=dict(extensions=None, ), - outputVolume=dict(extensions=None, ), + outputSpeedVolume=dict(extensions=None,), outputVolume=dict(extensions=None,), ) outputs = CannySegmentationLevelSetImageFilter.output_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py index cf30cce648..901aef8d82 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py @@ -4,32 +4,22 @@ def test_DilateImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMaskVolume=dict( - argstr='--inputMaskVolume %s', - extensions=None, - ), - inputRadius=dict(argstr='--inputRadius %d', ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), + inputRadius=dict(argstr="--inputRadius %d",), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = DilateImage.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DilateImage_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = DilateImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py index 1da3cc6ebe..0311b115fb 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py @@ -4,33 +4,23 @@ def test_DilateMask_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputBinaryVolume=dict( - argstr='--inputBinaryVolume %s', - extensions=None, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - lowerThreshold=dict(argstr='--lowerThreshold %f', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - sizeStructuralElement=dict(argstr='--sizeStructuralElement %d', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputBinaryVolume=dict(argstr="--inputBinaryVolume %s", extensions=None,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + lowerThreshold=dict(argstr="--lowerThreshold %f",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + sizeStructuralElement=dict(argstr="--sizeStructuralElement %d",), ) inputs = DilateMask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DilateMask_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = DilateMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py index d7de88405a..730644e779 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py @@ -4,32 +4,22 @@ def test_DistanceMaps_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputLabelVolume=dict( - argstr='--inputLabelVolume %s', - extensions=None, - ), - inputMaskVolume=dict( - argstr='--inputMaskVolume %s', - extensions=None, - ), - inputTissueLabel=dict(argstr='--inputTissueLabel %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputLabelVolume=dict(argstr="--inputLabelVolume %s", extensions=None,), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), + inputTissueLabel=dict(argstr="--inputTissueLabel %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = DistanceMaps.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DistanceMaps_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = DistanceMaps.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py index 956f0ef34d..0d7df6bf80 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py @@ -4,25 +4,18 @@ def test_DumpBinaryTrainingVectors_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputHeaderFilename=dict( - argstr='--inputHeaderFilename %s', - extensions=None, - ), - inputVectorFilename=dict( - argstr='--inputVectorFilename %s', - extensions=None, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputHeaderFilename=dict(argstr="--inputHeaderFilename %s", extensions=None,), + inputVectorFilename=dict(argstr="--inputVectorFilename %s", extensions=None,), ) inputs = DumpBinaryTrainingVectors.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DumpBinaryTrainingVectors_outputs(): output_map = dict() outputs = DumpBinaryTrainingVectors.output_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py index 50552d0484..568cafee66 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py @@ -4,32 +4,22 @@ def test_ErodeImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMaskVolume=dict( - argstr='--inputMaskVolume %s', - extensions=None, - ), - inputRadius=dict(argstr='--inputRadius %d', ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), + inputRadius=dict(argstr="--inputRadius %d",), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = ErodeImage.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ErodeImage_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = ErodeImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py index 2f10684f7f..5498ea004d 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py @@ -4,31 +4,21 @@ def test_FlippedDifference_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMaskVolume=dict( - argstr='--inputMaskVolume %s', - extensions=None, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = FlippedDifference.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FlippedDifference_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = FlippedDifference.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py index 20d1c1718a..661bd6c1af 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py @@ -4,32 +4,22 @@ def test_GenerateBrainClippedImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputImg=dict( - argstr='--inputImg %s', - extensions=None, - ), - inputMsk=dict( - argstr='--inputMsk %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputFileName=dict( - argstr='--outputFileName %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputImg=dict(argstr="--inputImg %s", extensions=None,), + inputMsk=dict(argstr="--inputMsk %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputFileName=dict(argstr="--outputFileName %s", hash_files=False,), ) inputs = GenerateBrainClippedImage.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GenerateBrainClippedImage_outputs(): - output_map = dict(outputFileName=dict(extensions=None, ), ) + output_map = dict(outputFileName=dict(extensions=None,),) outputs = GenerateBrainClippedImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py index 5f5ba62aaa..2eedb1831c 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py @@ -4,33 +4,23 @@ def test_GenerateSummedGradientImage_inputs(): input_map = dict( - MaximumGradient=dict(argstr='--MaximumGradient ', ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr='--inputVolume1 %s', - extensions=None, - ), - inputVolume2=dict( - argstr='--inputVolume2 %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputFileName=dict( - argstr='--outputFileName %s', - hash_files=False, - ), + MaximumGradient=dict(argstr="--MaximumGradient ",), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None,), + inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputFileName=dict(argstr="--outputFileName %s", hash_files=False,), ) inputs = GenerateSummedGradientImage.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GenerateSummedGradientImage_outputs(): - output_map = dict(outputFileName=dict(extensions=None, ), ) + output_map = dict(outputFileName=dict(extensions=None,),) outputs = GenerateSummedGradientImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py index bd05325a42..364ef50be4 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py @@ -4,32 +4,23 @@ def test_GenerateTestImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - lowerBoundOfOutputVolume=dict( - argstr='--lowerBoundOfOutputVolume %f', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - outputVolumeSize=dict(argstr='--outputVolumeSize %f', ), - upperBoundOfOutputVolume=dict( - argstr='--upperBoundOfOutputVolume %f', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + lowerBoundOfOutputVolume=dict(argstr="--lowerBoundOfOutputVolume %f",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + outputVolumeSize=dict(argstr="--outputVolumeSize %f",), + upperBoundOfOutputVolume=dict(argstr="--upperBoundOfOutputVolume %f",), ) inputs = GenerateTestImage.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GenerateTestImage_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = GenerateTestImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py index e07ac2157e..383a836a20 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py @@ -4,30 +4,23 @@ def test_GradientAnisotropicDiffusionImageFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - conductance=dict(argstr='--conductance %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - numberOfIterations=dict(argstr='--numberOfIterations %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - timeStep=dict(argstr='--timeStep %f', ), + args=dict(argstr="%s",), + conductance=dict(argstr="--conductance %f",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + numberOfIterations=dict(argstr="--numberOfIterations %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + timeStep=dict(argstr="--timeStep %f",), ) inputs = GradientAnisotropicDiffusionImageFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GradientAnisotropicDiffusionImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = GradientAnisotropicDiffusionImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py index 45e3e253ba..9e77140c41 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py @@ -4,32 +4,22 @@ def test_HammerAttributeCreator_inputs(): input_map = dict( - Scale=dict(argstr='--Scale %d', ), - Strength=dict(argstr='--Strength %f', ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputCSFVolume=dict( - argstr='--inputCSFVolume %s', - extensions=None, - ), - inputGMVolume=dict( - argstr='--inputGMVolume %s', - extensions=None, - ), - inputWMVolume=dict( - argstr='--inputWMVolume %s', - extensions=None, - ), - outputVolumeBase=dict(argstr='--outputVolumeBase %s', ), + Scale=dict(argstr="--Scale %d",), + Strength=dict(argstr="--Strength %f",), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputCSFVolume=dict(argstr="--inputCSFVolume %s", extensions=None,), + inputGMVolume=dict(argstr="--inputGMVolume %s", extensions=None,), + inputWMVolume=dict(argstr="--inputWMVolume %s", extensions=None,), + outputVolumeBase=dict(argstr="--outputVolumeBase %s",), ) inputs = HammerAttributeCreator.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_HammerAttributeCreator_outputs(): output_map = dict() outputs = HammerAttributeCreator.output_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py index db06fd3746..9b4652a60c 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py @@ -4,32 +4,22 @@ def test_NeighborhoodMean_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMaskVolume=dict( - argstr='--inputMaskVolume %s', - extensions=None, - ), - inputRadius=dict(argstr='--inputRadius %d', ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), + inputRadius=dict(argstr="--inputRadius %d",), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = NeighborhoodMean.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_NeighborhoodMean_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = NeighborhoodMean.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py index 0ddeb1eb16..ae1fc8266d 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py @@ -4,32 +4,22 @@ def test_NeighborhoodMedian_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMaskVolume=dict( - argstr='--inputMaskVolume %s', - extensions=None, - ), - inputRadius=dict(argstr='--inputRadius %d', ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), + inputRadius=dict(argstr="--inputRadius %d",), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = NeighborhoodMedian.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_NeighborhoodMedian_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = NeighborhoodMedian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py index 095f678e51..9d841a6052 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py @@ -4,25 +4,21 @@ def test_STAPLEAnalysis_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputDimension=dict(argstr='--inputDimension %d', ), - inputLabelVolume=dict(argstr='--inputLabelVolume %s...', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputDimension=dict(argstr="--inputDimension %d",), + inputLabelVolume=dict(argstr="--inputLabelVolume %s...",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = STAPLEAnalysis.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_STAPLEAnalysis_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = STAPLEAnalysis.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py index 53d3031995..9d820afb58 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py @@ -4,28 +4,21 @@ def test_TextureFromNoiseImageFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputRadius=dict(argstr='--inputRadius %d', ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputRadius=dict(argstr="--inputRadius %d",), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = TextureFromNoiseImageFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TextureFromNoiseImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = TextureFromNoiseImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py index 2306af1639..ea9e8843d5 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py @@ -4,33 +4,23 @@ def test_TextureMeasureFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - distance=dict(argstr='--distance %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMaskVolume=dict( - argstr='--inputMaskVolume %s', - extensions=None, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - insideROIValue=dict(argstr='--insideROIValue %f', ), - outputFilename=dict( - argstr='--outputFilename %s', - hash_files=False, - ), + args=dict(argstr="%s",), + distance=dict(argstr="--distance %d",), + environ=dict(nohash=True, usedefault=True,), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + insideROIValue=dict(argstr="--insideROIValue %f",), + outputFilename=dict(argstr="--outputFilename %s", hash_files=False,), ) inputs = TextureMeasureFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TextureMeasureFilter_outputs(): - output_map = dict(outputFilename=dict(extensions=None, ), ) + output_map = dict(outputFilename=dict(extensions=None,),) outputs = TextureMeasureFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py index edf1a78553..113d9607c2 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py @@ -4,43 +4,25 @@ def test_UnbiasedNonLocalMeans_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - hp=dict(argstr='--hp %f', ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - ps=dict(argstr='--ps %f', ), - rc=dict( - argstr='--rc %s', - sep=',', - ), - rs=dict( - argstr='--rs %s', - sep=',', - ), - sigma=dict(argstr='--sigma %f', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + hp=dict(argstr="--hp %f",), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + ps=dict(argstr="--ps %f",), + rc=dict(argstr="--rc %s", sep=",",), + rs=dict(argstr="--rs %s", sep=",",), + sigma=dict(argstr="--sigma %f",), ) inputs = UnbiasedNonLocalMeans.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_UnbiasedNonLocalMeans_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = UnbiasedNonLocalMeans.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/legacy/registration.py b/nipype/interfaces/semtools/legacy/registration.py index 04bb425e3d..9835889040 100644 --- a/nipype/interfaces/semtools/legacy/registration.py +++ b/nipype/interfaces/semtools/legacy/registration.py @@ -5,44 +5,53 @@ import os -from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ...base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class scalartransformInputSpec(CommandLineInputSpec): - input_image = File( - desc="Image to tranform", exists=True, argstr="--input_image %s") + input_image = File(desc="Image to tranform", exists=True, argstr="--input_image %s") output_image = traits.Either( traits.Bool, File(), hash_files=False, desc="The transformed image", - argstr="--output_image %s") + argstr="--output_image %s", + ) transformation = traits.Either( traits.Bool, File(), hash_files=False, desc="Output file for transformation parameters", - argstr="--transformation %s") - invert = traits.Bool( - desc="Invert tranform before applying.", argstr="--invert ") + argstr="--transformation %s", + ) + invert = traits.Bool(desc="Invert tranform before applying.", argstr="--invert ") deformation = File( - desc="Deformation field.", exists=True, argstr="--deformation %s") - h_field = traits.Bool( - desc="The deformation is an h-field.", argstr="--h_field ") + desc="Deformation field.", exists=True, argstr="--deformation %s" + ) + h_field = traits.Bool(desc="The deformation is an h-field.", argstr="--h_field ") interpolation = traits.Enum( "nearestneighbor", "linear", "cubic", desc="Interpolation type (nearestneighbor, linear, cubic)", - argstr="--interpolation %s") + argstr="--interpolation %s", + ) class scalartransformOutputSpec(TraitedSpec): output_image = File(desc="The transformed image", exists=True) - transformation = File( - desc="Output file for transformation parameters", exists=True) + transformation = File(desc="Output file for transformation parameters", exists=True) class scalartransform(SEMLikeCommandLine): @@ -68,7 +77,7 @@ class scalartransform(SEMLikeCommandLine): output_spec = scalartransformOutputSpec _cmd = " scalartransform " _outputs_filenames = { - 'output_image': 'output_image.nii', - 'transformation': 'transformation' + "output_image": "output_image.nii", + "transformation": "transformation", } _redirect_x = False diff --git a/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py index d28abe6cc6..248c98df3d 100644 --- a/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py +++ b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py @@ -4,40 +4,26 @@ def test_scalartransform_inputs(): input_map = dict( - args=dict(argstr='%s', ), - deformation=dict( - argstr='--deformation %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - h_field=dict(argstr='--h_field ', ), - input_image=dict( - argstr='--input_image %s', - extensions=None, - ), - interpolation=dict(argstr='--interpolation %s', ), - invert=dict(argstr='--invert ', ), - output_image=dict( - argstr='--output_image %s', - hash_files=False, - ), - transformation=dict( - argstr='--transformation %s', - hash_files=False, - ), + args=dict(argstr="%s",), + deformation=dict(argstr="--deformation %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + h_field=dict(argstr="--h_field ",), + input_image=dict(argstr="--input_image %s", extensions=None,), + interpolation=dict(argstr="--interpolation %s",), + invert=dict(argstr="--invert ",), + output_image=dict(argstr="--output_image %s", hash_files=False,), + transformation=dict(argstr="--transformation %s", hash_files=False,), ) inputs = scalartransform.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_scalartransform_outputs(): output_map = dict( - output_image=dict(extensions=None, ), - transformation=dict(extensions=None, ), + output_image=dict(extensions=None,), transformation=dict(extensions=None,), ) outputs = scalartransform.output_spec() diff --git a/nipype/interfaces/semtools/registration/__init__.py b/nipype/interfaces/semtools/registration/__init__.py index 3487d13134..6d52169ab3 100644 --- a/nipype/interfaces/semtools/registration/__init__.py +++ b/nipype/interfaces/semtools/registration/__init__.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -from .specialized import (VBRAINSDemonWarp, BRAINSDemonWarp, - BRAINSTransformFromFiducials) +from .specialized import VBRAINSDemonWarp, BRAINSDemonWarp, BRAINSTransformFromFiducials from .brainsresample import BRAINSResample from .brainsfit import BRAINSFit from .brainsresize import BRAINSResize diff --git a/nipype/interfaces/semtools/registration/brainsfit.py b/nipype/interfaces/semtools/registration/brainsfit.py index 6142aac418..343edd2155 100644 --- a/nipype/interfaces/semtools/registration/brainsfit.py +++ b/nipype/interfaces/semtools/registration/brainsfit.py @@ -5,154 +5,163 @@ import os -from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ...base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class BRAINSFitInputSpec(CommandLineInputSpec): fixedVolume = File( - desc= - "Input fixed image (the moving image will be transformed into this image space).", + desc="Input fixed image (the moving image will be transformed into this image space).", exists=True, - argstr="--fixedVolume %s") + argstr="--fixedVolume %s", + ) movingVolume = File( - desc= - "Input moving image (this image will be transformed into the fixed image space).", + desc="Input moving image (this image will be transformed into the fixed image space).", exists=True, - argstr="--movingVolume %s") + argstr="--movingVolume %s", + ) samplingPercentage = traits.Float( - desc= - "Fraction of voxels of the fixed image that will be used for registration. The number has to be larger than zero and less or equal to one. Higher values increase the computation time but may give more accurate results. You can also limit the sampling focus with ROI masks and ROIAUTO mask generation. The default is 0.002 (use approximately 0.2% of voxels, resulting in 100000 samples in a 512x512x192 volume) to provide a very fast registration in most cases. Typical values range from 0.01 (1%) for low detail images to 0.2 (20%) for high detail images.", - argstr="--samplingPercentage %f") + desc="Fraction of voxels of the fixed image that will be used for registration. The number has to be larger than zero and less or equal to one. Higher values increase the computation time but may give more accurate results. You can also limit the sampling focus with ROI masks and ROIAUTO mask generation. The default is 0.002 (use approximately 0.2% of voxels, resulting in 100000 samples in a 512x512x192 volume) to provide a very fast registration in most cases. Typical values range from 0.01 (1%) for low detail images to 0.2 (20%) for high detail images.", + argstr="--samplingPercentage %f", + ) splineGridSize = InputMultiPath( traits.Int, - desc= - "Number of BSpline grid subdivisions along each axis of the fixed image, centered on the image space. Values must be 3 or higher for the BSpline to be correctly computed.", + desc="Number of BSpline grid subdivisions along each axis of the fixed image, centered on the image space. Values must be 3 or higher for the BSpline to be correctly computed.", sep=",", - argstr="--splineGridSize %s") + argstr="--splineGridSize %s", + ) linearTransform = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "(optional) Output estimated transform - in case the computed transform is not BSpline. NOTE: You must set at least one output object (transform and/or output volume).", - argstr="--linearTransform %s") + desc="(optional) Output estimated transform - in case the computed transform is not BSpline. NOTE: You must set at least one output object (transform and/or output volume).", + argstr="--linearTransform %s", + ) bsplineTransform = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "(optional) Output estimated transform - in case the computed transform is BSpline. NOTE: You must set at least one output object (transform and/or output volume).", - argstr="--bsplineTransform %s") + desc="(optional) Output estimated transform - in case the computed transform is BSpline. NOTE: You must set at least one output object (transform and/or output volume).", + argstr="--bsplineTransform %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "(optional) Output image: the moving image warped to the fixed image space. NOTE: You must set at least one output object (transform and/or output volume).", - argstr="--outputVolume %s") + desc="(optional) Output image: the moving image warped to the fixed image space. NOTE: You must set at least one output object (transform and/or output volume).", + argstr="--outputVolume %s", + ) initialTransform = File( - desc= - "Transform to be applied to the moving image to initialize the registration. This can only be used if Initialize Transform Mode is Off.", + desc="Transform to be applied to the moving image to initialize the registration. This can only be used if Initialize Transform Mode is Off.", exists=True, - argstr="--initialTransform %s") + argstr="--initialTransform %s", + ) initializeTransformMode = traits.Enum( "Off", "useMomentsAlign", "useCenterOfHeadAlign", "useGeometryAlign", "useCenterOfROIAlign", - desc= - "Determine how to initialize the transform center. useMomentsAlign assumes that the center of mass of the images represent similar structures. useCenterOfHeadAlign attempts to use the top of head and shape of neck to drive a center of mass estimate. useGeometryAlign on assumes that the center of the voxel lattice of the images represent similar structures. Off assumes that the physical space of the images are close. This flag is mutually exclusive with the Initialization transform.", - argstr="--initializeTransformMode %s") + desc="Determine how to initialize the transform center. useMomentsAlign assumes that the center of mass of the images represent similar structures. useCenterOfHeadAlign attempts to use the top of head and shape of neck to drive a center of mass estimate. useGeometryAlign on assumes that the center of the voxel lattice of the images represent similar structures. Off assumes that the physical space of the images are close. This flag is mutually exclusive with the Initialization transform.", + argstr="--initializeTransformMode %s", + ) useRigid = traits.Bool( - desc= - "Perform a rigid registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", - argstr="--useRigid ") + desc="Perform a rigid registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", + argstr="--useRigid ", + ) useScaleVersor3D = traits.Bool( - desc= - "Perform a ScaleVersor3D registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", - argstr="--useScaleVersor3D ") + desc="Perform a ScaleVersor3D registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", + argstr="--useScaleVersor3D ", + ) useScaleSkewVersor3D = traits.Bool( - desc= - "Perform a ScaleSkewVersor3D registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", - argstr="--useScaleSkewVersor3D ") + desc="Perform a ScaleSkewVersor3D registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", + argstr="--useScaleSkewVersor3D ", + ) useAffine = traits.Bool( - desc= - "Perform an Affine registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", - argstr="--useAffine ") + desc="Perform an Affine registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", + argstr="--useAffine ", + ) useBSpline = traits.Bool( - desc= - "Perform a BSpline registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", - argstr="--useBSpline ") + desc="Perform a BSpline registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", + argstr="--useBSpline ", + ) useSyN = traits.Bool( - desc= - "Perform a SyN registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", - argstr="--useSyN ") + desc="Perform a SyN registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", + argstr="--useSyN ", + ) useComposite = traits.Bool( - desc= - "Perform a Composite registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", - argstr="--useComposite ") + desc="Perform a Composite registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", + argstr="--useComposite ", + ) maskProcessingMode = traits.Enum( "NOMASK", "ROIAUTO", "ROI", - desc= - "Specifies a mask to only consider a certain image region for the registration. If ROIAUTO is chosen, then the mask is computed using Otsu thresholding and hole filling. If ROI is chosen then the mask has to be specified as in input.", - argstr="--maskProcessingMode %s") + desc="Specifies a mask to only consider a certain image region for the registration. If ROIAUTO is chosen, then the mask is computed using Otsu thresholding and hole filling. If ROI is chosen then the mask has to be specified as in input.", + argstr="--maskProcessingMode %s", + ) fixedBinaryVolume = File( - desc= - "Fixed Image binary mask volume, required if Masking Option is ROI. Image areas where the mask volume has zero value are ignored during the registration.", + desc="Fixed Image binary mask volume, required if Masking Option is ROI. Image areas where the mask volume has zero value are ignored during the registration.", exists=True, - argstr="--fixedBinaryVolume %s") + argstr="--fixedBinaryVolume %s", + ) movingBinaryVolume = File( - desc= - "Moving Image binary mask volume, required if Masking Option is ROI. Image areas where the mask volume has zero value are ignored during the registration.", + desc="Moving Image binary mask volume, required if Masking Option is ROI. Image areas where the mask volume has zero value are ignored during the registration.", exists=True, - argstr="--movingBinaryVolume %s") + argstr="--movingBinaryVolume %s", + ) outputFixedVolumeROI = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "ROI that is automatically computed from the fixed image. Only available if Masking Option is ROIAUTO. Image areas where the mask volume has zero value are ignored during the registration.", - argstr="--outputFixedVolumeROI %s") + desc="ROI that is automatically computed from the fixed image. Only available if Masking Option is ROIAUTO. Image areas where the mask volume has zero value are ignored during the registration.", + argstr="--outputFixedVolumeROI %s", + ) outputMovingVolumeROI = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "ROI that is automatically computed from the moving image. Only available if Masking Option is ROIAUTO. Image areas where the mask volume has zero value are ignored during the registration.", - argstr="--outputMovingVolumeROI %s") + desc="ROI that is automatically computed from the moving image. Only available if Masking Option is ROIAUTO. Image areas where the mask volume has zero value are ignored during the registration.", + argstr="--outputMovingVolumeROI %s", + ) useROIBSpline = traits.Bool( - desc= - "If enabled then the bounding box of the input ROIs defines the BSpline grid support region. Otherwise the BSpline grid support region is the whole fixed image.", - argstr="--useROIBSpline ") + desc="If enabled then the bounding box of the input ROIs defines the BSpline grid support region. Otherwise the BSpline grid support region is the whole fixed image.", + argstr="--useROIBSpline ", + ) histogramMatch = traits.Bool( - desc= - "Apply histogram matching operation for the input images to make them more similar. This is suitable for images of the same modality that may have different brightness or contrast, but the same overall intensity profile. Do NOT use if registering images from different modalities.", - argstr="--histogramMatch ") + desc="Apply histogram matching operation for the input images to make them more similar. This is suitable for images of the same modality that may have different brightness or contrast, but the same overall intensity profile. Do NOT use if registering images from different modalities.", + argstr="--histogramMatch ", + ) medianFilterSize = InputMultiPath( traits.Int, - desc= - "Apply median filtering to reduce noise in the input volumes. The 3 values specify the radius for the optional MedianImageFilter preprocessing in all 3 directions (in voxels).", + desc="Apply median filtering to reduce noise in the input volumes. The 3 values specify the radius for the optional MedianImageFilter preprocessing in all 3 directions (in voxels).", sep=",", - argstr="--medianFilterSize %s") + argstr="--medianFilterSize %s", + ) removeIntensityOutliers = traits.Float( - desc= - "Remove very high and very low intensity voxels from the input volumes. The parameter specifies the half percentage to decide outliers of image intensities. The default value is zero, which means no outlier removal. If the value of 0.005 is given, the 0.005% of both tails will be thrown away, so 0.01% of intensities in total would be ignored in the statistic calculation.", - argstr="--removeIntensityOutliers %f") + desc="Remove very high and very low intensity voxels from the input volumes. The parameter specifies the half percentage to decide outliers of image intensities. The default value is zero, which means no outlier removal. If the value of 0.005 is given, the 0.005% of both tails will be thrown away, so 0.01% of intensities in total would be ignored in the statistic calculation.", + argstr="--removeIntensityOutliers %f", + ) fixedVolume2 = File( - desc= - "Input fixed image that will be used for multimodal registration. (the moving image will be transformed into this image space).", + desc="Input fixed image that will be used for multimodal registration. (the moving image will be transformed into this image space).", exists=True, - argstr="--fixedVolume2 %s") + argstr="--fixedVolume2 %s", + ) movingVolume2 = File( - desc= - "Input moving image that will be used for multimodal registration(this image will be transformed into the fixed image space).", + desc="Input moving image that will be used for multimodal registration(this image will be transformed into the fixed image space).", exists=True, - argstr="--movingVolume2 %s") + argstr="--movingVolume2 %s", + ) outputVolumePixelType = traits.Enum( "float", "short", @@ -161,15 +170,16 @@ class BRAINSFitInputSpec(CommandLineInputSpec): "uint", "uchar", desc="Data type for representing a voxel of the Output Volume.", - argstr="--outputVolumePixelType %s") + argstr="--outputVolumePixelType %s", + ) backgroundFillValue = traits.Float( - desc= - "This value will be used for filling those areas of the output image that have no corresponding voxels in the input moving image.", - argstr="--backgroundFillValue %f") + desc="This value will be used for filling those areas of the output image that have no corresponding voxels in the input moving image.", + argstr="--backgroundFillValue %f", + ) scaleOutputValues = traits.Bool( - desc= - "If true, and the voxel values do not fit within the minimum and maximum values of the desired outputVolumePixelType, then linearly scale the min/max output image voxel values to fit within the min/max range of the outputVolumePixelType.", - argstr="--scaleOutputValues ") + desc="If true, and the voxel values do not fit within the minimum and maximum values of the desired outputVolumePixelType, then linearly scale the min/max output image voxel values to fit within the min/max range of the outputVolumePixelType.", + argstr="--scaleOutputValues ", + ) interpolationMode = traits.Enum( "NearestNeighbor", "Linear", @@ -181,199 +191,200 @@ class BRAINSFitInputSpec(CommandLineInputSpec): "Welch", "Lanczos", "Blackman", - desc= - "Type of interpolation to be used when applying transform to moving volume. Options are Linear, NearestNeighbor, BSpline, WindowedSinc, Hamming, Cosine, Welch, Lanczos, or ResampleInPlace. The ResampleInPlace option will create an image with the same discrete voxel values and will adjust the origin and direction of the physical space interpretation.", - argstr="--interpolationMode %s") + desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, NearestNeighbor, BSpline, WindowedSinc, Hamming, Cosine, Welch, Lanczos, or ResampleInPlace. The ResampleInPlace option will create an image with the same discrete voxel values and will adjust the origin and direction of the physical space interpretation.", + argstr="--interpolationMode %s", + ) numberOfIterations = InputMultiPath( traits.Int, - desc= - "The maximum number of iterations to try before stopping the optimization. When using a lower value (500-1000) then the registration is forced to terminate earlier but there is a higher risk of stopping before an optimal solution is reached.", + desc="The maximum number of iterations to try before stopping the optimization. When using a lower value (500-1000) then the registration is forced to terminate earlier but there is a higher risk of stopping before an optimal solution is reached.", sep=",", - argstr="--numberOfIterations %s") + argstr="--numberOfIterations %s", + ) maximumStepLength = traits.Float( - desc= - "Starting step length of the optimizer. In general, higher values allow for recovering larger initial misalignments but there is an increased chance that the registration will not converge.", - argstr="--maximumStepLength %f") + desc="Starting step length of the optimizer. In general, higher values allow for recovering larger initial misalignments but there is an increased chance that the registration will not converge.", + argstr="--maximumStepLength %f", + ) minimumStepLength = InputMultiPath( traits.Float, - desc= - "Each step in the optimization takes steps at least this big. When none are possible, registration is complete. Smaller values allows the optimizer to make smaller adjustments, but the registration time may increase.", + desc="Each step in the optimization takes steps at least this big. When none are possible, registration is complete. Smaller values allows the optimizer to make smaller adjustments, but the registration time may increase.", sep=",", - argstr="--minimumStepLength %s") + argstr="--minimumStepLength %s", + ) relaxationFactor = traits.Float( - desc= - "Specifies how quickly the optimization step length is decreased during registration. The value must be larger than 0 and smaller than 1. Larger values result in slower step size decrease, which allow for recovering larger initial misalignments but it increases the registration time and the chance that the registration will not converge.", - argstr="--relaxationFactor %f") + desc="Specifies how quickly the optimization step length is decreased during registration. The value must be larger than 0 and smaller than 1. Larger values result in slower step size decrease, which allow for recovering larger initial misalignments but it increases the registration time and the chance that the registration will not converge.", + argstr="--relaxationFactor %f", + ) translationScale = traits.Float( - desc= - "How much to scale up changes in position (in mm) compared to unit rotational changes (in radians) -- decrease this to allow for more rotation in the search pattern.", - argstr="--translationScale %f") + desc="How much to scale up changes in position (in mm) compared to unit rotational changes (in radians) -- decrease this to allow for more rotation in the search pattern.", + argstr="--translationScale %f", + ) reproportionScale = traits.Float( - desc= - "ScaleVersor3D 'Scale' compensation factor. Increase this to allow for more rescaling in a ScaleVersor3D or ScaleSkewVersor3D search pattern. 1.0 works well with a translationScale of 1000.0", - argstr="--reproportionScale %f") + desc="ScaleVersor3D 'Scale' compensation factor. Increase this to allow for more rescaling in a ScaleVersor3D or ScaleSkewVersor3D search pattern. 1.0 works well with a translationScale of 1000.0", + argstr="--reproportionScale %f", + ) skewScale = traits.Float( - desc= - "ScaleSkewVersor3D Skew compensation factor. Increase this to allow for more skew in a ScaleSkewVersor3D search pattern. 1.0 works well with a translationScale of 1000.0", - argstr="--skewScale %f") + desc="ScaleSkewVersor3D Skew compensation factor. Increase this to allow for more skew in a ScaleSkewVersor3D search pattern. 1.0 works well with a translationScale of 1000.0", + argstr="--skewScale %f", + ) maxBSplineDisplacement = traits.Float( - desc= - "Maximum allowed displacements in image physical coordinates (mm) for BSpline control grid along each axis. A value of 0.0 indicates that the problem should be unbounded. NOTE: This only constrains the BSpline portion, and does not limit the displacement from the associated bulk transform. This can lead to a substantial reduction in computation time in the BSpline optimizer., ", - argstr="--maxBSplineDisplacement %f") + desc="Maximum allowed displacements in image physical coordinates (mm) for BSpline control grid along each axis. A value of 0.0 indicates that the problem should be unbounded. NOTE: This only constrains the BSpline portion, and does not limit the displacement from the associated bulk transform. This can lead to a substantial reduction in computation time in the BSpline optimizer., ", + argstr="--maxBSplineDisplacement %f", + ) fixedVolumeTimeIndex = traits.Int( - desc= - "The index in the time series for the 3D fixed image to fit. Only allowed if the fixed input volume is 4-dimensional.", - argstr="--fixedVolumeTimeIndex %d") + desc="The index in the time series for the 3D fixed image to fit. Only allowed if the fixed input volume is 4-dimensional.", + argstr="--fixedVolumeTimeIndex %d", + ) movingVolumeTimeIndex = traits.Int( - desc= - "The index in the time series for the 3D moving image to fit. Only allowed if the moving input volume is 4-dimensional", - argstr="--movingVolumeTimeIndex %d") + desc="The index in the time series for the 3D moving image to fit. Only allowed if the moving input volume is 4-dimensional", + argstr="--movingVolumeTimeIndex %d", + ) numberOfHistogramBins = traits.Int( - desc= - "The number of histogram levels used for mutual information metric estimation.", - argstr="--numberOfHistogramBins %d") + desc="The number of histogram levels used for mutual information metric estimation.", + argstr="--numberOfHistogramBins %d", + ) numberOfMatchPoints = traits.Int( - desc= - "Number of histogram match points used for mutual information metric estimation.", - argstr="--numberOfMatchPoints %d") + desc="Number of histogram match points used for mutual information metric estimation.", + argstr="--numberOfMatchPoints %d", + ) costMetric = traits.Enum( "MMI", "MSE", "NC", "MIH", - desc= - "The cost metric to be used during fitting. Defaults to MMI. Options are MMI (Mattes Mutual Information), MSE (Mean Square Error), NC (Normalized Correlation), MC (Match Cardinality for binary images)", - argstr="--costMetric %s") + desc="The cost metric to be used during fitting. Defaults to MMI. Options are MMI (Mattes Mutual Information), MSE (Mean Square Error), NC (Normalized Correlation), MC (Match Cardinality for binary images)", + argstr="--costMetric %s", + ) maskInferiorCutOffFromCenter = traits.Float( - desc= - "If Initialize Transform Mode is set to useCenterOfHeadAlign or Masking Option is ROIAUTO then this value defines the how much is cut of from the inferior part of the image. The cut-off distance is specified in millimeters, relative to the image center. If the value is 1000 or larger then no cut-off performed.", - argstr="--maskInferiorCutOffFromCenter %f") + desc="If Initialize Transform Mode is set to useCenterOfHeadAlign or Masking Option is ROIAUTO then this value defines the how much is cut of from the inferior part of the image. The cut-off distance is specified in millimeters, relative to the image center. If the value is 1000 or larger then no cut-off performed.", + argstr="--maskInferiorCutOffFromCenter %f", + ) ROIAutoDilateSize = traits.Float( - desc= - "This flag is only relevant when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. A setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", - argstr="--ROIAutoDilateSize %f") + desc="This flag is only relevant when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. A setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", + argstr="--ROIAutoDilateSize %f", + ) ROIAutoClosingSize = traits.Float( - desc= - "This flag is only relevant when using ROIAUTO mode for initializing masks. It defines the hole closing size in mm. It is rounded up to the nearest whole pixel size in each direction. The default is to use a closing size of 9mm. For mouse data this value may need to be reset to 0.9 or smaller.", - argstr="--ROIAutoClosingSize %f") + desc="This flag is only relevant when using ROIAUTO mode for initializing masks. It defines the hole closing size in mm. It is rounded up to the nearest whole pixel size in each direction. The default is to use a closing size of 9mm. For mouse data this value may need to be reset to 0.9 or smaller.", + argstr="--ROIAutoClosingSize %f", + ) numberOfSamples = traits.Int( - desc= - "The number of voxels sampled for mutual information computation. Increase this for higher accuracy, at the cost of longer computation time., NOTE that it is suggested to use samplingPercentage instead of this option. However, if set to non-zero, numberOfSamples overwrites the samplingPercentage option. ", - argstr="--numberOfSamples %d") + desc="The number of voxels sampled for mutual information computation. Increase this for higher accuracy, at the cost of longer computation time., NOTE that it is suggested to use samplingPercentage instead of this option. However, if set to non-zero, numberOfSamples overwrites the samplingPercentage option. ", + argstr="--numberOfSamples %d", + ) strippedOutputTransform = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Rigid component of the estimated affine transform. Can be used to rigidly register the moving image to the fixed image. NOTE: This value is overridden if either bsplineTransform or linearTransform is set.", - argstr="--strippedOutputTransform %s") + desc="Rigid component of the estimated affine transform. Can be used to rigidly register the moving image to the fixed image. NOTE: This value is overridden if either bsplineTransform or linearTransform is set.", + argstr="--strippedOutputTransform %s", + ) transformType = InputMultiPath( traits.Str, - desc= - "Specifies a list of registration types to be used. The valid types are, Rigid, ScaleVersor3D, ScaleSkewVersor3D, Affine, BSpline and SyN. Specifying more than one in a comma separated list will initialize the next stage with the previous results. If registrationClass flag is used, it overrides this parameter setting.", + desc="Specifies a list of registration types to be used. The valid types are, Rigid, ScaleVersor3D, ScaleSkewVersor3D, Affine, BSpline and SyN. Specifying more than one in a comma separated list will initialize the next stage with the previous results. If registrationClass flag is used, it overrides this parameter setting.", sep=",", - argstr="--transformType %s") + argstr="--transformType %s", + ) outputTransform = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "(optional) Filename to which save the (optional) estimated transform. NOTE: You must select either the outputTransform or the outputVolume option.", - argstr="--outputTransform %s") + desc="(optional) Filename to which save the (optional) estimated transform. NOTE: You must select either the outputTransform or the outputVolume option.", + argstr="--outputTransform %s", + ) initializeRegistrationByCurrentGenericTransform = traits.Bool( - desc= - "If this flag is ON, the current generic composite transform, resulted from the linear registration stages, is set to initialize the follow nonlinear registration process. However, by the default behaviour, the moving image is first warped based on the existant transform before it is passed to the BSpline registration filter. It is done to speed up the BSpline registration by reducing the computations of composite transform Jacobian.", - argstr="--initializeRegistrationByCurrentGenericTransform ") + desc="If this flag is ON, the current generic composite transform, resulted from the linear registration stages, is set to initialize the follow nonlinear registration process. However, by the default behaviour, the moving image is first warped based on the existant transform before it is passed to the BSpline registration filter. It is done to speed up the BSpline registration by reducing the computations of composite transform Jacobian.", + argstr="--initializeRegistrationByCurrentGenericTransform ", + ) failureExitCode = traits.Int( - desc= - "If the fit fails, exit with this status code. (It can be used to force a successfult exit status of (0) if the registration fails due to reaching the maximum number of iterations.", - argstr="--failureExitCode %d") + desc="If the fit fails, exit with this status code. (It can be used to force a successfult exit status of (0) if the registration fails due to reaching the maximum number of iterations.", + argstr="--failureExitCode %d", + ) writeTransformOnFailure = traits.Bool( - desc= - "Flag to save the final transform even if the numberOfIterations are reached without convergence. (Intended for use when --failureExitCode 0 )", - argstr="--writeTransformOnFailure ") + desc="Flag to save the final transform even if the numberOfIterations are reached without convergence. (Intended for use when --failureExitCode 0 )", + argstr="--writeTransformOnFailure ", + ) numberOfThreads = traits.Int( - desc= - "Explicitly specify the maximum number of threads to use. (default is auto-detected)", - argstr="--numberOfThreads %d") + desc="Explicitly specify the maximum number of threads to use. (default is auto-detected)", + argstr="--numberOfThreads %d", + ) debugLevel = traits.Int( - desc= - "Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", - argstr="--debugLevel %d") + desc="Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", + argstr="--debugLevel %d", + ) costFunctionConvergenceFactor = traits.Float( - desc= - "From itkLBFGSBOptimizer.h: Set/Get the CostFunctionConvergenceFactor. Algorithm terminates when the reduction in cost function is less than (factor * epsmcj) where epsmch is the machine precision. Typical values for factor: 1e+12 for low accuracy; 1e+7 for moderate accuracy and 1e+1 for extremely high accuracy. 1e+9 seems to work well., ", - argstr="--costFunctionConvergenceFactor %f") + desc="From itkLBFGSBOptimizer.h: Set/Get the CostFunctionConvergenceFactor. Algorithm terminates when the reduction in cost function is less than (factor * epsmcj) where epsmch is the machine precision. Typical values for factor: 1e+12 for low accuracy; 1e+7 for moderate accuracy and 1e+1 for extremely high accuracy. 1e+9 seems to work well., ", + argstr="--costFunctionConvergenceFactor %f", + ) projectedGradientTolerance = traits.Float( - desc= - "From itkLBFGSBOptimizer.h: Set/Get the ProjectedGradientTolerance. Algorithm terminates when the project gradient is below the tolerance. Default lbfgsb value is 1e-5, but 1e-4 seems to work well., ", - argstr="--projectedGradientTolerance %f") + desc="From itkLBFGSBOptimizer.h: Set/Get the ProjectedGradientTolerance. Algorithm terminates when the project gradient is below the tolerance. Default lbfgsb value is 1e-5, but 1e-4 seems to work well., ", + argstr="--projectedGradientTolerance %f", + ) maximumNumberOfEvaluations = traits.Int( - desc= - "Maximum number of evaluations for line search in lbfgsb optimizer.", - argstr="--maximumNumberOfEvaluations %d") + desc="Maximum number of evaluations for line search in lbfgsb optimizer.", + argstr="--maximumNumberOfEvaluations %d", + ) maximumNumberOfCorrections = traits.Int( desc="Maximum number of corrections in lbfgsb optimizer.", - argstr="--maximumNumberOfCorrections %d") + argstr="--maximumNumberOfCorrections %d", + ) gui = traits.Bool( - desc= - "Display intermediate image volumes for debugging. NOTE: This is not part of the standard build sytem, and probably does nothing on your installation.", - argstr="--gui ") + desc="Display intermediate image volumes for debugging. NOTE: This is not part of the standard build sytem, and probably does nothing on your installation.", + argstr="--gui ", + ) promptUser = traits.Bool( - desc= - "Prompt the user to hit enter each time an image is sent to the DebugImageViewer", - argstr="--promptUser ") + desc="Prompt the user to hit enter each time an image is sent to the DebugImageViewer", + argstr="--promptUser ", + ) metricSamplingStrategy = traits.Enum( "Random", - desc= - "It defines the method that registration filter uses to sample the input fixed image. Only Random is supported for now.", - argstr="--metricSamplingStrategy %s") + desc="It defines the method that registration filter uses to sample the input fixed image. Only Random is supported for now.", + argstr="--metricSamplingStrategy %s", + ) logFileReport = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "A file to write out final information report in CSV file: MetricName,MetricValue,FixedImageName,FixedMaskName,MovingImageName,MovingMaskName", - argstr="--logFileReport %s") + desc="A file to write out final information report in CSV file: MetricName,MetricValue,FixedImageName,FixedMaskName,MovingImageName,MovingMaskName", + argstr="--logFileReport %s", + ) writeOutputTransformInFloat = traits.Bool( - desc= - "By default, the output registration transforms (either the output composite transform or each transform component) are written to the disk in double precision. If this flag is ON, the output transforms will be written in single (float) precision. It is especially important if the output transform is a displacement field transform, or it is a composite transform that includes several displacement fields.", - argstr="--writeOutputTransformInFloat ") + desc="By default, the output registration transforms (either the output composite transform or each transform component) are written to the disk in double precision. If this flag is ON, the output transforms will be written in single (float) precision. It is especially important if the output transform is a displacement field transform, or it is a composite transform that includes several displacement fields.", + argstr="--writeOutputTransformInFloat ", + ) class BRAINSFitOutputSpec(TraitedSpec): linearTransform = File( - desc= - "(optional) Output estimated transform - in case the computed transform is not BSpline. NOTE: You must set at least one output object (transform and/or output volume).", - exists=True) + desc="(optional) Output estimated transform - in case the computed transform is not BSpline. NOTE: You must set at least one output object (transform and/or output volume).", + exists=True, + ) bsplineTransform = File( - desc= - "(optional) Output estimated transform - in case the computed transform is BSpline. NOTE: You must set at least one output object (transform and/or output volume).", - exists=True) + desc="(optional) Output estimated transform - in case the computed transform is BSpline. NOTE: You must set at least one output object (transform and/or output volume).", + exists=True, + ) outputVolume = File( - desc= - "(optional) Output image: the moving image warped to the fixed image space. NOTE: You must set at least one output object (transform and/or output volume).", - exists=True) + desc="(optional) Output image: the moving image warped to the fixed image space. NOTE: You must set at least one output object (transform and/or output volume).", + exists=True, + ) outputFixedVolumeROI = File( - desc= - "ROI that is automatically computed from the fixed image. Only available if Masking Option is ROIAUTO. Image areas where the mask volume has zero value are ignored during the registration.", - exists=True) + desc="ROI that is automatically computed from the fixed image. Only available if Masking Option is ROIAUTO. Image areas where the mask volume has zero value are ignored during the registration.", + exists=True, + ) outputMovingVolumeROI = File( - desc= - "ROI that is automatically computed from the moving image. Only available if Masking Option is ROIAUTO. Image areas where the mask volume has zero value are ignored during the registration.", - exists=True) + desc="ROI that is automatically computed from the moving image. Only available if Masking Option is ROIAUTO. Image areas where the mask volume has zero value are ignored during the registration.", + exists=True, + ) strippedOutputTransform = File( - desc= - "Rigid component of the estimated affine transform. Can be used to rigidly register the moving image to the fixed image. NOTE: This value is overridden if either bsplineTransform or linearTransform is set.", - exists=True) + desc="Rigid component of the estimated affine transform. Can be used to rigidly register the moving image to the fixed image. NOTE: This value is overridden if either bsplineTransform or linearTransform is set.", + exists=True, + ) outputTransform = File( - desc= - "(optional) Filename to which save the (optional) estimated transform. NOTE: You must select either the outputTransform or the outputVolume option.", - exists=True) + desc="(optional) Filename to which save the (optional) estimated transform. NOTE: You must select either the outputTransform or the outputVolume option.", + exists=True, + ) logFileReport = File( - desc= - "A file to write out final information report in CSV file: MetricName,MetricValue,FixedImageName,FixedMaskName,MovingImageName,MovingMaskName", - exists=True) + desc="A file to write out final information report in CSV file: MetricName,MetricValue,FixedImageName,FixedMaskName,MovingImageName,MovingMaskName", + exists=True, + ) class BRAINSFit(SEMLikeCommandLine): @@ -399,13 +410,13 @@ class BRAINSFit(SEMLikeCommandLine): output_spec = BRAINSFitOutputSpec _cmd = " BRAINSFit " _outputs_filenames = { - 'outputVolume': 'outputVolume.nii', - 'bsplineTransform': 'bsplineTransform.h5', - 'outputTransform': 'outputTransform.h5', - 'outputFixedVolumeROI': 'outputFixedVolumeROI.nii', - 'strippedOutputTransform': 'strippedOutputTransform.h5', - 'outputMovingVolumeROI': 'outputMovingVolumeROI.nii', - 'linearTransform': 'linearTransform.h5', - 'logFileReport': 'logFileReport' + "outputVolume": "outputVolume.nii", + "bsplineTransform": "bsplineTransform.h5", + "outputTransform": "outputTransform.h5", + "outputFixedVolumeROI": "outputFixedVolumeROI.nii", + "strippedOutputTransform": "strippedOutputTransform.h5", + "outputMovingVolumeROI": "outputMovingVolumeROI.nii", + "linearTransform": "linearTransform.h5", + "logFileReport": "logFileReport", } _redirect_x = False diff --git a/nipype/interfaces/semtools/registration/brainsresample.py b/nipype/interfaces/semtools/registration/brainsresample.py index f9ea80acbd..0eb6b5f29c 100644 --- a/nipype/interfaces/semtools/registration/brainsresample.py +++ b/nipype/interfaces/semtools/registration/brainsresample.py @@ -5,25 +5,34 @@ import os -from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ...base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class BRAINSResampleInputSpec(CommandLineInputSpec): - inputVolume = File( - desc="Image To Warp", exists=True, argstr="--inputVolume %s") + inputVolume = File(desc="Image To Warp", exists=True, argstr="--inputVolume %s") referenceVolume = File( - desc= - "Reference image used only to define the output space. If not specified, the warping is done in the same space as the image to warp.", + desc="Reference image used only to define the output space. If not specified, the warping is done in the same space as the image to warp.", exists=True, - argstr="--referenceVolume %s") + argstr="--referenceVolume %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Resulting deformed image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) pixelType = traits.Enum( "float", "short", @@ -32,19 +41,19 @@ class BRAINSResampleInputSpec(CommandLineInputSpec): "uint", "uchar", "binary", - desc= - "Specifies the pixel type for the input/output images. The \'binary\' pixel type uses a modified algorithm whereby the image is read in as unsigned char, a signed distance map is created, signed distance map is resampled, and then a thresholded image of type unsigned char is written to disk.", - argstr="--pixelType %s") + desc="Specifies the pixel type for the input/output images. The 'binary' pixel type uses a modified algorithm whereby the image is read in as unsigned char, a signed distance map is created, signed distance map is resampled, and then a thresholded image of type unsigned char is written to disk.", + argstr="--pixelType %s", + ) deformationVolume = File( - desc= - "Displacement Field to be used to warp the image (ITKv3 or earlier)", + desc="Displacement Field to be used to warp the image (ITKv3 or earlier)", exists=True, - argstr="--deformationVolume %s") + argstr="--deformationVolume %s", + ) warpTransform = File( - desc= - "Filename for the BRAINSFit transform (ITKv3 or earlier) or composite transform file (ITKv4)", + desc="Filename for the BRAINSFit transform (ITKv3 or earlier) or composite transform file (ITKv4)", exists=True, - argstr="--warpTransform %s") + argstr="--warpTransform %s", + ) interpolationMode = traits.Enum( "NearestNeighbor", "Linear", @@ -56,24 +65,24 @@ class BRAINSResampleInputSpec(CommandLineInputSpec): "Welch", "Lanczos", "Blackman", - desc= - "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", - argstr="--interpolationMode %s") + desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s", + ) inverseTransform = traits.Bool( - desc= - "True/False is to compute inverse of given transformation. Default is false", - argstr="--inverseTransform ") - defaultValue = traits.Float( - desc="Default voxel value", argstr="--defaultValue %f") + desc="True/False is to compute inverse of given transformation. Default is false", + argstr="--inverseTransform ", + ) + defaultValue = traits.Float(desc="Default voxel value", argstr="--defaultValue %f") gridSpacing = InputMultiPath( traits.Int, - desc= - "Add warped grid to output image to help show the deformation that occured with specified spacing. A spacing of 0 in a dimension indicates that grid lines should be rendered to fall exactly (i.e. do not allow displacements off that plane). This is useful for makeing a 2D image of grid lines from the 3D space", + desc="Add warped grid to output image to help show the deformation that occured with specified spacing. A spacing of 0 in a dimension indicates that grid lines should be rendered to fall exactly (i.e. do not allow displacements off that plane). This is useful for makeing a 2D image of grid lines from the 3D space", sep=",", - argstr="--gridSpacing %s") + argstr="--gridSpacing %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSResampleOutputSpec(TraitedSpec): @@ -102,5 +111,5 @@ class BRAINSResample(SEMLikeCommandLine): input_spec = BRAINSResampleInputSpec output_spec = BRAINSResampleOutputSpec _cmd = " BRAINSResample " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False diff --git a/nipype/interfaces/semtools/registration/brainsresize.py b/nipype/interfaces/semtools/registration/brainsresize.py index 11238dd914..a81ee204b5 100644 --- a/nipype/interfaces/semtools/registration/brainsresize.py +++ b/nipype/interfaces/semtools/registration/brainsresize.py @@ -5,20 +5,29 @@ import os -from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ...base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class BRAINSResizeInputSpec(CommandLineInputSpec): - inputVolume = File( - desc="Image To Scale", exists=True, argstr="--inputVolume %s") + inputVolume = File(desc="Image To Scale", exists=True, argstr="--inputVolume %s") outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Resulting scaled image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) pixelType = traits.Enum( "float", "short", @@ -27,12 +36,12 @@ class BRAINSResizeInputSpec(CommandLineInputSpec): "uint", "uchar", "binary", - desc= - "Specifies the pixel type for the input/output images. The \'binary\' pixel type uses a modified algorithm whereby the image is read in as unsigned char, a signed distance map is created, signed distance map is resampled, and then a thresholded image of type unsigned char is written to disk.", - argstr="--pixelType %s") + desc="Specifies the pixel type for the input/output images. The 'binary' pixel type uses a modified algorithm whereby the image is read in as unsigned char, a signed distance map is created, signed distance map is resampled, and then a thresholded image of type unsigned char is written to disk.", + argstr="--pixelType %s", + ) scaleFactor = traits.Float( - desc="The scale factor for the image spacing.", - argstr="--scaleFactor %f") + desc="The scale factor for the image spacing.", argstr="--scaleFactor %f" + ) class BRAINSResizeOutputSpec(TraitedSpec): @@ -59,5 +68,5 @@ class BRAINSResize(SEMLikeCommandLine): input_spec = BRAINSResizeInputSpec output_spec = BRAINSResizeOutputSpec _cmd = " BRAINSResize " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False diff --git a/nipype/interfaces/semtools/registration/specialized.py b/nipype/interfaces/semtools/registration/specialized.py index 2cc08e3ec7..5a6daf5250 100644 --- a/nipype/interfaces/semtools/registration/specialized.py +++ b/nipype/interfaces/semtools/registration/specialized.py @@ -5,52 +5,63 @@ import os -from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ...base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class VBRAINSDemonWarpInputSpec(CommandLineInputSpec): movingVolume = InputMultiPath( File(exists=True), desc="Required: input moving image", - argstr="--movingVolume %s...") + argstr="--movingVolume %s...", + ) fixedVolume = InputMultiPath( File(exists=True), desc="Required: input fixed (target) image", - argstr="--fixedVolume %s...") + argstr="--fixedVolume %s...", + ) inputPixelType = traits.Enum( "float", "short", "ushort", "int", "uchar", - desc= - "Input volumes will be typecast to this format: float|short|ushort|int|uchar", - argstr="--inputPixelType %s") + desc="Input volumes will be typecast to this format: float|short|ushort|int|uchar", + argstr="--inputPixelType %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: output resampled moving image (will have the same physical space as the fixedVolume).", - argstr="--outputVolume %s") + desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).", + argstr="--outputVolume %s", + ) outputDisplacementFieldVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Output deformation field vector image (will have the same physical space as the fixedVolume).", - argstr="--outputDisplacementFieldVolume %s") + desc="Output deformation field vector image (will have the same physical space as the fixedVolume).", + argstr="--outputDisplacementFieldVolume %s", + ) outputPixelType = traits.Enum( "float", "short", "ushort", "int", "uchar", - desc= - "outputVolume will be typecast to this format: float|short|ushort|int|uchar", - argstr="--outputPixelType %s") + desc="outputVolume will be typecast to this format: float|short|ushort|int|uchar", + argstr="--outputPixelType %s", + ) interpolationMode = traits.Enum( "NearestNeighbor", "Linear", @@ -62,171 +73,185 @@ class VBRAINSDemonWarpInputSpec(CommandLineInputSpec): "Welch", "Lanczos", "Blackman", - desc= - "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", - argstr="--interpolationMode %s") + desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s", + ) registrationFilterType = traits.Enum( "Demons", "FastSymmetricForces", "Diffeomorphic", "LogDemons", "SymmetricLogDemons", - desc= - "Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic|LogDemons|SymmetricLogDemons", - argstr="--registrationFilterType %s") + desc="Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic|LogDemons|SymmetricLogDemons", + argstr="--registrationFilterType %s", + ) smoothDisplacementFieldSigma = traits.Float( - desc= - "A gaussian smoothing value to be applied to the deformation feild at each iteration.", - argstr="--smoothDisplacementFieldSigma %f") + desc="A gaussian smoothing value to be applied to the deformation feild at each iteration.", + argstr="--smoothDisplacementFieldSigma %f", + ) numberOfPyramidLevels = traits.Int( - desc= - "Number of image pyramid levels to use in the multi-resolution registration.", - argstr="--numberOfPyramidLevels %d") + desc="Number of image pyramid levels to use in the multi-resolution registration.", + argstr="--numberOfPyramidLevels %d", + ) minimumFixedPyramid = InputMultiPath( traits.Int, - desc= - "The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + desc="The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", sep=",", - argstr="--minimumFixedPyramid %s") + argstr="--minimumFixedPyramid %s", + ) minimumMovingPyramid = InputMultiPath( traits.Int, - desc= - "The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + desc="The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", sep=",", - argstr="--minimumMovingPyramid %s") + argstr="--minimumMovingPyramid %s", + ) arrayOfPyramidLevelIterations = InputMultiPath( traits.Int, desc="The number of iterations for each pyramid level", sep=",", - argstr="--arrayOfPyramidLevelIterations %s") + argstr="--arrayOfPyramidLevelIterations %s", + ) histogramMatch = traits.Bool( - desc= - "Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.", - argstr="--histogramMatch ") + desc="Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.", + argstr="--histogramMatch ", + ) numberOfHistogramBins = traits.Int( - desc="The number of histogram levels", - argstr="--numberOfHistogramBins %d") + desc="The number of histogram levels", argstr="--numberOfHistogramBins %d" + ) numberOfMatchPoints = traits.Int( desc="The number of match points for histrogramMatch", - argstr="--numberOfMatchPoints %d") + argstr="--numberOfMatchPoints %d", + ) medianFilterSize = InputMultiPath( traits.Int, - desc= - "Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.", + desc="Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.", sep=",", - argstr="--medianFilterSize %s") + argstr="--medianFilterSize %s", + ) initializeWithDisplacementField = File( desc="Initial deformation field vector image file name", exists=True, - argstr="--initializeWithDisplacementField %s") + argstr="--initializeWithDisplacementField %s", + ) initializeWithTransform = File( desc="Initial Transform filename", exists=True, - argstr="--initializeWithTransform %s") + argstr="--initializeWithTransform %s", + ) makeBOBF = traits.Bool( - desc= - "Flag to make Brain-Only Background-Filled versions of the input and target volumes.", - argstr="--makeBOBF ") + desc="Flag to make Brain-Only Background-Filled versions of the input and target volumes.", + argstr="--makeBOBF ", + ) fixedBinaryVolume = File( desc="Mask filename for desired region of interest in the Fixed image.", exists=True, - argstr="--fixedBinaryVolume %s") + argstr="--fixedBinaryVolume %s", + ) movingBinaryVolume = File( - desc= - "Mask filename for desired region of interest in the Moving image.", + desc="Mask filename for desired region of interest in the Moving image.", exists=True, - argstr="--movingBinaryVolume %s") + argstr="--movingBinaryVolume %s", + ) lowerThresholdForBOBF = traits.Int( - desc="Lower threshold for performing BOBF", - argstr="--lowerThresholdForBOBF %d") + desc="Lower threshold for performing BOBF", argstr="--lowerThresholdForBOBF %d" + ) upperThresholdForBOBF = traits.Int( - desc="Upper threshold for performing BOBF", - argstr="--upperThresholdForBOBF %d") + desc="Upper threshold for performing BOBF", argstr="--upperThresholdForBOBF %d" + ) backgroundFillValue = traits.Int( desc="Replacement value to overwrite background when performing BOBF", - argstr="--backgroundFillValue %d") + argstr="--backgroundFillValue %d", + ) seedForBOBF = InputMultiPath( traits.Int, desc="coordinates in all 3 directions for Seed when performing BOBF", sep=",", - argstr="--seedForBOBF %s") + argstr="--seedForBOBF %s", + ) neighborhoodForBOBF = InputMultiPath( traits.Int, - desc= - "neighborhood in all 3 directions to be included when performing BOBF", + desc="neighborhood in all 3 directions to be included when performing BOBF", sep=",", - argstr="--neighborhoodForBOBF %s") + argstr="--neighborhoodForBOBF %s", + ) outputDisplacementFieldPrefix = traits.Str( - desc= - "Displacement field filename prefix for writing separate x, y, and z component images", - argstr="--outputDisplacementFieldPrefix %s") + desc="Displacement field filename prefix for writing separate x, y, and z component images", + argstr="--outputDisplacementFieldPrefix %s", + ) outputCheckerboardVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", - argstr="--outputCheckerboardVolume %s") + desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + argstr="--outputCheckerboardVolume %s", + ) checkerboardPatternSubdivisions = InputMultiPath( traits.Int, desc="Number of Checkerboard subdivisions in all 3 directions", sep=",", - argstr="--checkerboardPatternSubdivisions %s") + argstr="--checkerboardPatternSubdivisions %s", + ) outputNormalized = traits.Bool( - desc= - "Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.", - argstr="--outputNormalized ") + desc="Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.", + argstr="--outputNormalized ", + ) outputDebug = traits.Bool( - desc="Flag to write debugging images after each step.", - argstr="--outputDebug ") + desc="Flag to write debugging images after each step.", argstr="--outputDebug " + ) weightFactors = InputMultiPath( traits.Float, desc="Weight fatctors for each input images", sep=",", - argstr="--weightFactors %s") + argstr="--weightFactors %s", + ) gradient_type = traits.Enum( "0", "1", "2", - desc= - "Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)", - argstr="--gradient_type %s") + desc="Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)", + argstr="--gradient_type %s", + ) upFieldSmoothing = traits.Float( desc="Smoothing sigma for the update field at each iteration", - argstr="--upFieldSmoothing %f") + argstr="--upFieldSmoothing %f", + ) max_step_length = traits.Float( desc="Maximum length of an update vector (0: no restriction)", - argstr="--max_step_length %f") + argstr="--max_step_length %f", + ) use_vanilla_dem = traits.Bool( - desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem ") + desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem " + ) gui = traits.Bool( - desc="Display intermediate image volumes for debugging", - argstr="--gui ") + desc="Display intermediate image volumes for debugging", argstr="--gui " + ) promptUser = traits.Bool( - desc= - "Prompt the user to hit enter each time an image is sent to the DebugImageViewer", - argstr="--promptUser ") + desc="Prompt the user to hit enter each time an image is sent to the DebugImageViewer", + argstr="--promptUser ", + ) numberOfBCHApproximationTerms = traits.Int( desc="Number of terms in the BCH expansion", - argstr="--numberOfBCHApproximationTerms %d") + argstr="--numberOfBCHApproximationTerms %d", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class VBRAINSDemonWarpOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: output resampled moving image (will have the same physical space as the fixedVolume).", - exists=True) + desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).", + exists=True, + ) outputDisplacementFieldVolume = File( - desc= - "Output deformation field vector image (will have the same physical space as the fixedVolume).", - exists=True) + desc="Output deformation field vector image (will have the same physical space as the fixedVolume).", + exists=True, + ) outputCheckerboardVolume = File( - desc= - "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", - exists=True) + desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + exists=True, + ) class VBRAINSDemonWarp(SEMLikeCommandLine): @@ -252,54 +277,54 @@ class VBRAINSDemonWarp(SEMLikeCommandLine): output_spec = VBRAINSDemonWarpOutputSpec _cmd = " VBRAINSDemonWarp " _outputs_filenames = { - 'outputVolume': 'outputVolume.nii', - 'outputCheckerboardVolume': 'outputCheckerboardVolume.nii', - 'outputDisplacementFieldVolume': 'outputDisplacementFieldVolume.nrrd' + "outputVolume": "outputVolume.nii", + "outputCheckerboardVolume": "outputCheckerboardVolume.nii", + "outputDisplacementFieldVolume": "outputDisplacementFieldVolume.nrrd", } _redirect_x = False class BRAINSDemonWarpInputSpec(CommandLineInputSpec): movingVolume = File( - desc="Required: input moving image", - exists=True, - argstr="--movingVolume %s") + desc="Required: input moving image", exists=True, argstr="--movingVolume %s" + ) fixedVolume = File( desc="Required: input fixed (target) image", exists=True, - argstr="--fixedVolume %s") + argstr="--fixedVolume %s", + ) inputPixelType = traits.Enum( "float", "short", "ushort", "int", "uchar", - desc= - "Input volumes will be typecast to this format: float|short|ushort|int|uchar", - argstr="--inputPixelType %s") + desc="Input volumes will be typecast to this format: float|short|ushort|int|uchar", + argstr="--inputPixelType %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: output resampled moving image (will have the same physical space as the fixedVolume).", - argstr="--outputVolume %s") + desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).", + argstr="--outputVolume %s", + ) outputDisplacementFieldVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Output deformation field vector image (will have the same physical space as the fixedVolume).", - argstr="--outputDisplacementFieldVolume %s") + desc="Output deformation field vector image (will have the same physical space as the fixedVolume).", + argstr="--outputDisplacementFieldVolume %s", + ) outputPixelType = traits.Enum( "float", "short", "ushort", "int", "uchar", - desc= - "outputVolume will be typecast to this format: float|short|ushort|int|uchar", - argstr="--outputPixelType %s") + desc="outputVolume will be typecast to this format: float|short|ushort|int|uchar", + argstr="--outputPixelType %s", + ) interpolationMode = traits.Enum( "NearestNeighbor", "Linear", @@ -311,168 +336,181 @@ class BRAINSDemonWarpInputSpec(CommandLineInputSpec): "Welch", "Lanczos", "Blackman", - desc= - "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", - argstr="--interpolationMode %s") + desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s", + ) registrationFilterType = traits.Enum( "Demons", "FastSymmetricForces", "Diffeomorphic", - desc= - "Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic", - argstr="--registrationFilterType %s") + desc="Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic", + argstr="--registrationFilterType %s", + ) smoothDisplacementFieldSigma = traits.Float( - desc= - "A gaussian smoothing value to be applied to the deformation feild at each iteration.", - argstr="--smoothDisplacementFieldSigma %f") + desc="A gaussian smoothing value to be applied to the deformation feild at each iteration.", + argstr="--smoothDisplacementFieldSigma %f", + ) numberOfPyramidLevels = traits.Int( - desc= - "Number of image pyramid levels to use in the multi-resolution registration.", - argstr="--numberOfPyramidLevels %d") + desc="Number of image pyramid levels to use in the multi-resolution registration.", + argstr="--numberOfPyramidLevels %d", + ) minimumFixedPyramid = InputMultiPath( traits.Int, - desc= - "The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + desc="The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", sep=",", - argstr="--minimumFixedPyramid %s") + argstr="--minimumFixedPyramid %s", + ) minimumMovingPyramid = InputMultiPath( traits.Int, - desc= - "The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + desc="The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", sep=",", - argstr="--minimumMovingPyramid %s") + argstr="--minimumMovingPyramid %s", + ) arrayOfPyramidLevelIterations = InputMultiPath( traits.Int, desc="The number of iterations for each pyramid level", sep=",", - argstr="--arrayOfPyramidLevelIterations %s") + argstr="--arrayOfPyramidLevelIterations %s", + ) histogramMatch = traits.Bool( - desc= - "Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.", - argstr="--histogramMatch ") + desc="Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.", + argstr="--histogramMatch ", + ) numberOfHistogramBins = traits.Int( - desc="The number of histogram levels", - argstr="--numberOfHistogramBins %d") + desc="The number of histogram levels", argstr="--numberOfHistogramBins %d" + ) numberOfMatchPoints = traits.Int( desc="The number of match points for histrogramMatch", - argstr="--numberOfMatchPoints %d") + argstr="--numberOfMatchPoints %d", + ) medianFilterSize = InputMultiPath( traits.Int, - desc= - "Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.", + desc="Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.", sep=",", - argstr="--medianFilterSize %s") + argstr="--medianFilterSize %s", + ) initializeWithDisplacementField = File( desc="Initial deformation field vector image file name", exists=True, - argstr="--initializeWithDisplacementField %s") + argstr="--initializeWithDisplacementField %s", + ) initializeWithTransform = File( desc="Initial Transform filename", exists=True, - argstr="--initializeWithTransform %s") + argstr="--initializeWithTransform %s", + ) maskProcessingMode = traits.Enum( "NOMASK", "ROIAUTO", "ROI", "BOBF", - desc= - "What mode to use for using the masks: NOMASK|ROIAUTO|ROI|BOBF. If ROIAUTO is choosen, then the mask is implicitly defined using a otsu forground and hole filling algorithm. Where the Region Of Interest mode uses the masks to define what parts of the image should be used for computing the deformation field. Brain Only Background Fill uses the masks to pre-process the input images by clipping and filling in the background with a predefined value.", - argstr="--maskProcessingMode %s") + desc="What mode to use for using the masks: NOMASK|ROIAUTO|ROI|BOBF. If ROIAUTO is choosen, then the mask is implicitly defined using a otsu forground and hole filling algorithm. Where the Region Of Interest mode uses the masks to define what parts of the image should be used for computing the deformation field. Brain Only Background Fill uses the masks to pre-process the input images by clipping and filling in the background with a predefined value.", + argstr="--maskProcessingMode %s", + ) fixedBinaryVolume = File( desc="Mask filename for desired region of interest in the Fixed image.", exists=True, - argstr="--fixedBinaryVolume %s") + argstr="--fixedBinaryVolume %s", + ) movingBinaryVolume = File( - desc= - "Mask filename for desired region of interest in the Moving image.", + desc="Mask filename for desired region of interest in the Moving image.", exists=True, - argstr="--movingBinaryVolume %s") + argstr="--movingBinaryVolume %s", + ) lowerThresholdForBOBF = traits.Int( - desc="Lower threshold for performing BOBF", - argstr="--lowerThresholdForBOBF %d") + desc="Lower threshold for performing BOBF", argstr="--lowerThresholdForBOBF %d" + ) upperThresholdForBOBF = traits.Int( - desc="Upper threshold for performing BOBF", - argstr="--upperThresholdForBOBF %d") + desc="Upper threshold for performing BOBF", argstr="--upperThresholdForBOBF %d" + ) backgroundFillValue = traits.Int( desc="Replacement value to overwrite background when performing BOBF", - argstr="--backgroundFillValue %d") + argstr="--backgroundFillValue %d", + ) seedForBOBF = InputMultiPath( traits.Int, desc="coordinates in all 3 directions for Seed when performing BOBF", sep=",", - argstr="--seedForBOBF %s") + argstr="--seedForBOBF %s", + ) neighborhoodForBOBF = InputMultiPath( traits.Int, - desc= - "neighborhood in all 3 directions to be included when performing BOBF", + desc="neighborhood in all 3 directions to be included when performing BOBF", sep=",", - argstr="--neighborhoodForBOBF %s") + argstr="--neighborhoodForBOBF %s", + ) outputDisplacementFieldPrefix = traits.Str( - desc= - "Displacement field filename prefix for writing separate x, y, and z component images", - argstr="--outputDisplacementFieldPrefix %s") + desc="Displacement field filename prefix for writing separate x, y, and z component images", + argstr="--outputDisplacementFieldPrefix %s", + ) outputCheckerboardVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", - argstr="--outputCheckerboardVolume %s") + desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + argstr="--outputCheckerboardVolume %s", + ) checkerboardPatternSubdivisions = InputMultiPath( traits.Int, desc="Number of Checkerboard subdivisions in all 3 directions", sep=",", - argstr="--checkerboardPatternSubdivisions %s") + argstr="--checkerboardPatternSubdivisions %s", + ) outputNormalized = traits.Bool( - desc= - "Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.", - argstr="--outputNormalized ") + desc="Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.", + argstr="--outputNormalized ", + ) outputDebug = traits.Bool( - desc="Flag to write debugging images after each step.", - argstr="--outputDebug ") + desc="Flag to write debugging images after each step.", argstr="--outputDebug " + ) gradient_type = traits.Enum( "0", "1", "2", - desc= - "Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)", - argstr="--gradient_type %s") + desc="Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)", + argstr="--gradient_type %s", + ) upFieldSmoothing = traits.Float( desc="Smoothing sigma for the update field at each iteration", - argstr="--upFieldSmoothing %f") + argstr="--upFieldSmoothing %f", + ) max_step_length = traits.Float( desc="Maximum length of an update vector (0: no restriction)", - argstr="--max_step_length %f") + argstr="--max_step_length %f", + ) use_vanilla_dem = traits.Bool( - desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem ") + desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem " + ) gui = traits.Bool( - desc="Display intermediate image volumes for debugging", - argstr="--gui ") + desc="Display intermediate image volumes for debugging", argstr="--gui " + ) promptUser = traits.Bool( - desc= - "Prompt the user to hit enter each time an image is sent to the DebugImageViewer", - argstr="--promptUser ") + desc="Prompt the user to hit enter each time an image is sent to the DebugImageViewer", + argstr="--promptUser ", + ) numberOfBCHApproximationTerms = traits.Int( desc="Number of terms in the BCH expansion", - argstr="--numberOfBCHApproximationTerms %d") + argstr="--numberOfBCHApproximationTerms %d", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSDemonWarpOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: output resampled moving image (will have the same physical space as the fixedVolume).", - exists=True) + desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).", + exists=True, + ) outputDisplacementFieldVolume = File( - desc= - "Output deformation field vector image (will have the same physical space as the fixedVolume).", - exists=True) + desc="Output deformation field vector image (will have the same physical space as the fixedVolume).", + exists=True, + ) outputCheckerboardVolume = File( - desc= - "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", - exists=True) + desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + exists=True, + ) class BRAINSDemonWarp(SEMLikeCommandLine): @@ -498,9 +536,9 @@ class BRAINSDemonWarp(SEMLikeCommandLine): output_spec = BRAINSDemonWarpOutputSpec _cmd = " BRAINSDemonWarp " _outputs_filenames = { - 'outputVolume': 'outputVolume.nii', - 'outputCheckerboardVolume': 'outputCheckerboardVolume.nii', - 'outputDisplacementFieldVolume': 'outputDisplacementFieldVolume.nrrd' + "outputVolume": "outputVolume.nii", + "outputCheckerboardVolume": "outputCheckerboardVolume.nii", + "outputDisplacementFieldVolume": "outputDisplacementFieldVolume.nrrd", } _redirect_x = False @@ -509,39 +547,47 @@ class BRAINSTransformFromFiducialsInputSpec(CommandLineInputSpec): fixedLandmarks = InputMultiPath( traits.List(traits.Float(), minlen=3, maxlen=3), desc="Ordered list of landmarks in the fixed image", - argstr="--fixedLandmarks %s...") + argstr="--fixedLandmarks %s...", + ) movingLandmarks = InputMultiPath( traits.List(traits.Float(), minlen=3, maxlen=3), desc="Ordered list of landmarks in the moving image", - argstr="--movingLandmarks %s...") + argstr="--movingLandmarks %s...", + ) saveTransform = traits.Either( traits.Bool, File(), hash_files=False, desc="Save the transform that results from registration", - argstr="--saveTransform %s") + argstr="--saveTransform %s", + ) transformType = traits.Enum( "Translation", "Rigid", "Similarity", desc="Type of transform to produce", - argstr="--transformType %s") + argstr="--transformType %s", + ) fixedLandmarksFile = File( desc="An fcsv formatted file with a list of landmark points.", exists=True, - argstr="--fixedLandmarksFile %s") + argstr="--fixedLandmarksFile %s", + ) movingLandmarksFile = File( desc="An fcsv formatted file with a list of landmark points.", exists=True, - argstr="--movingLandmarksFile %s") + argstr="--movingLandmarksFile %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSTransformFromFiducialsOutputSpec(TraitedSpec): saveTransform = File( - desc="Save the transform that results from registration", exists=True) + desc="Save the transform that results from registration", exists=True + ) class BRAINSTransformFromFiducials(SEMLikeCommandLine): @@ -564,5 +610,5 @@ class BRAINSTransformFromFiducials(SEMLikeCommandLine): input_spec = BRAINSTransformFromFiducialsInputSpec output_spec = BRAINSTransformFromFiducialsOutputSpec _cmd = " BRAINSTransformFromFiducials " - _outputs_filenames = {'saveTransform': 'saveTransform.h5'} + _outputs_filenames = {"saveTransform": "saveTransform.h5"} _redirect_x = False diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py index efe6d50fce..4136c8105d 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py @@ -4,113 +4,77 @@ def test_BRAINSDemonWarp_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), arrayOfPyramidLevelIterations=dict( - argstr='--arrayOfPyramidLevelIterations %s', - sep=',', + argstr="--arrayOfPyramidLevelIterations %s", sep=",", ), - backgroundFillValue=dict(argstr='--backgroundFillValue %d', ), + backgroundFillValue=dict(argstr="--backgroundFillValue %d",), checkerboardPatternSubdivisions=dict( - argstr='--checkerboardPatternSubdivisions %s', - sep=',', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedBinaryVolume=dict( - argstr='--fixedBinaryVolume %s', - extensions=None, - ), - fixedVolume=dict( - argstr='--fixedVolume %s', - extensions=None, - ), - gradient_type=dict(argstr='--gradient_type %s', ), - gui=dict(argstr='--gui ', ), - histogramMatch=dict(argstr='--histogramMatch ', ), + argstr="--checkerboardPatternSubdivisions %s", sep=",", + ), + environ=dict(nohash=True, usedefault=True,), + fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None,), + fixedVolume=dict(argstr="--fixedVolume %s", extensions=None,), + gradient_type=dict(argstr="--gradient_type %s",), + gui=dict(argstr="--gui ",), + histogramMatch=dict(argstr="--histogramMatch ",), initializeWithDisplacementField=dict( - argstr='--initializeWithDisplacementField %s', - extensions=None, + argstr="--initializeWithDisplacementField %s", extensions=None, ), initializeWithTransform=dict( - argstr='--initializeWithTransform %s', - extensions=None, - ), - inputPixelType=dict(argstr='--inputPixelType %s', ), - interpolationMode=dict(argstr='--interpolationMode %s', ), - lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), - maskProcessingMode=dict(argstr='--maskProcessingMode %s', ), - max_step_length=dict(argstr='--max_step_length %f', ), - medianFilterSize=dict( - argstr='--medianFilterSize %s', - sep=',', - ), - minimumFixedPyramid=dict( - argstr='--minimumFixedPyramid %s', - sep=',', - ), - minimumMovingPyramid=dict( - argstr='--minimumMovingPyramid %s', - sep=',', - ), - movingBinaryVolume=dict( - argstr='--movingBinaryVolume %s', - extensions=None, - ), - movingVolume=dict( - argstr='--movingVolume %s', - extensions=None, - ), - neighborhoodForBOBF=dict( - argstr='--neighborhoodForBOBF %s', - sep=',', - ), + argstr="--initializeWithTransform %s", extensions=None, + ), + inputPixelType=dict(argstr="--inputPixelType %s",), + interpolationMode=dict(argstr="--interpolationMode %s",), + lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d",), + maskProcessingMode=dict(argstr="--maskProcessingMode %s",), + max_step_length=dict(argstr="--max_step_length %f",), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), + minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=",",), + minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=",",), + movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None,), + movingVolume=dict(argstr="--movingVolume %s", extensions=None,), + neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=",",), numberOfBCHApproximationTerms=dict( - argstr='--numberOfBCHApproximationTerms %d', ), - numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), - numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), - numberOfPyramidLevels=dict(argstr='--numberOfPyramidLevels %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), + argstr="--numberOfBCHApproximationTerms %d", + ), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), + numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), outputCheckerboardVolume=dict( - argstr='--outputCheckerboardVolume %s', - hash_files=False, + argstr="--outputCheckerboardVolume %s", hash_files=False, ), - outputDebug=dict(argstr='--outputDebug ', ), + outputDebug=dict(argstr="--outputDebug ",), outputDisplacementFieldPrefix=dict( - argstr='--outputDisplacementFieldPrefix %s', ), - outputDisplacementFieldVolume=dict( - argstr='--outputDisplacementFieldVolume %s', - hash_files=False, - ), - outputNormalized=dict(argstr='--outputNormalized ', ), - outputPixelType=dict(argstr='--outputPixelType %s', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, + argstr="--outputDisplacementFieldPrefix %s", ), - promptUser=dict(argstr='--promptUser ', ), - registrationFilterType=dict(argstr='--registrationFilterType %s', ), - seedForBOBF=dict( - argstr='--seedForBOBF %s', - sep=',', - ), - smoothDisplacementFieldSigma=dict( - argstr='--smoothDisplacementFieldSigma %f', ), - upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), - upperThresholdForBOBF=dict(argstr='--upperThresholdForBOBF %d', ), - use_vanilla_dem=dict(argstr='--use_vanilla_dem ', ), + outputDisplacementFieldVolume=dict( + argstr="--outputDisplacementFieldVolume %s", hash_files=False, + ), + outputNormalized=dict(argstr="--outputNormalized ",), + outputPixelType=dict(argstr="--outputPixelType %s",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + promptUser=dict(argstr="--promptUser ",), + registrationFilterType=dict(argstr="--registrationFilterType %s",), + seedForBOBF=dict(argstr="--seedForBOBF %s", sep=",",), + smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f",), + upFieldSmoothing=dict(argstr="--upFieldSmoothing %f",), + upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d",), + use_vanilla_dem=dict(argstr="--use_vanilla_dem ",), ) inputs = BRAINSDemonWarp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(extensions=None, ), - outputDisplacementFieldVolume=dict(extensions=None, ), - outputVolume=dict(extensions=None, ), + outputCheckerboardVolume=dict(extensions=None,), + outputDisplacementFieldVolume=dict(extensions=None,), + outputVolume=dict(extensions=None,), ) outputs = BRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py index 81422f2296..683d34694b 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py @@ -4,159 +4,101 @@ def test_BRAINSFit_inputs(): input_map = dict( - ROIAutoClosingSize=dict(argstr='--ROIAutoClosingSize %f', ), - ROIAutoDilateSize=dict(argstr='--ROIAutoDilateSize %f', ), - args=dict(argstr='%s', ), - backgroundFillValue=dict(argstr='--backgroundFillValue %f', ), - bsplineTransform=dict( - argstr='--bsplineTransform %s', - hash_files=False, - ), + ROIAutoClosingSize=dict(argstr="--ROIAutoClosingSize %f",), + ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f",), + args=dict(argstr="%s",), + backgroundFillValue=dict(argstr="--backgroundFillValue %f",), + bsplineTransform=dict(argstr="--bsplineTransform %s", hash_files=False,), costFunctionConvergenceFactor=dict( - argstr='--costFunctionConvergenceFactor %f', ), - costMetric=dict(argstr='--costMetric %s', ), - debugLevel=dict(argstr='--debugLevel %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - failureExitCode=dict(argstr='--failureExitCode %d', ), - fixedBinaryVolume=dict( - argstr='--fixedBinaryVolume %s', - extensions=None, - ), - fixedVolume=dict( - argstr='--fixedVolume %s', - extensions=None, - ), - fixedVolume2=dict( - argstr='--fixedVolume2 %s', - extensions=None, - ), - fixedVolumeTimeIndex=dict(argstr='--fixedVolumeTimeIndex %d', ), - gui=dict(argstr='--gui ', ), - histogramMatch=dict(argstr='--histogramMatch ', ), - initialTransform=dict( - argstr='--initialTransform %s', - extensions=None, - ), + argstr="--costFunctionConvergenceFactor %f", + ), + costMetric=dict(argstr="--costMetric %s",), + debugLevel=dict(argstr="--debugLevel %d",), + environ=dict(nohash=True, usedefault=True,), + failureExitCode=dict(argstr="--failureExitCode %d",), + fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None,), + fixedVolume=dict(argstr="--fixedVolume %s", extensions=None,), + fixedVolume2=dict(argstr="--fixedVolume2 %s", extensions=None,), + fixedVolumeTimeIndex=dict(argstr="--fixedVolumeTimeIndex %d",), + gui=dict(argstr="--gui ",), + histogramMatch=dict(argstr="--histogramMatch ",), + initialTransform=dict(argstr="--initialTransform %s", extensions=None,), initializeRegistrationByCurrentGenericTransform=dict( - argstr='--initializeRegistrationByCurrentGenericTransform ', ), - initializeTransformMode=dict(argstr='--initializeTransformMode %s', ), - interpolationMode=dict(argstr='--interpolationMode %s', ), - linearTransform=dict( - argstr='--linearTransform %s', - hash_files=False, - ), - logFileReport=dict( - argstr='--logFileReport %s', - hash_files=False, - ), - maskInferiorCutOffFromCenter=dict( - argstr='--maskInferiorCutOffFromCenter %f', ), - maskProcessingMode=dict(argstr='--maskProcessingMode %s', ), - maxBSplineDisplacement=dict(argstr='--maxBSplineDisplacement %f', ), - maximumNumberOfCorrections=dict( - argstr='--maximumNumberOfCorrections %d', ), - maximumNumberOfEvaluations=dict( - argstr='--maximumNumberOfEvaluations %d', ), - maximumStepLength=dict(argstr='--maximumStepLength %f', ), - medianFilterSize=dict( - argstr='--medianFilterSize %s', - sep=',', - ), - metricSamplingStrategy=dict(argstr='--metricSamplingStrategy %s', ), - minimumStepLength=dict( - argstr='--minimumStepLength %s', - sep=',', - ), - movingBinaryVolume=dict( - argstr='--movingBinaryVolume %s', - extensions=None, - ), - movingVolume=dict( - argstr='--movingVolume %s', - extensions=None, - ), - movingVolume2=dict( - argstr='--movingVolume2 %s', - extensions=None, - ), - movingVolumeTimeIndex=dict(argstr='--movingVolumeTimeIndex %d', ), - numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), - numberOfIterations=dict( - argstr='--numberOfIterations %s', - sep=',', - ), - numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), - numberOfSamples=dict(argstr='--numberOfSamples %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), + argstr="--initializeRegistrationByCurrentGenericTransform ", + ), + initializeTransformMode=dict(argstr="--initializeTransformMode %s",), + interpolationMode=dict(argstr="--interpolationMode %s",), + linearTransform=dict(argstr="--linearTransform %s", hash_files=False,), + logFileReport=dict(argstr="--logFileReport %s", hash_files=False,), + maskInferiorCutOffFromCenter=dict(argstr="--maskInferiorCutOffFromCenter %f",), + maskProcessingMode=dict(argstr="--maskProcessingMode %s",), + maxBSplineDisplacement=dict(argstr="--maxBSplineDisplacement %f",), + maximumNumberOfCorrections=dict(argstr="--maximumNumberOfCorrections %d",), + maximumNumberOfEvaluations=dict(argstr="--maximumNumberOfEvaluations %d",), + maximumStepLength=dict(argstr="--maximumStepLength %f",), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), + metricSamplingStrategy=dict(argstr="--metricSamplingStrategy %s",), + minimumStepLength=dict(argstr="--minimumStepLength %s", sep=",",), + movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None,), + movingVolume=dict(argstr="--movingVolume %s", extensions=None,), + movingVolume2=dict(argstr="--movingVolume2 %s", extensions=None,), + movingVolumeTimeIndex=dict(argstr="--movingVolumeTimeIndex %d",), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), + numberOfIterations=dict(argstr="--numberOfIterations %s", sep=",",), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), + numberOfSamples=dict(argstr="--numberOfSamples %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), outputFixedVolumeROI=dict( - argstr='--outputFixedVolumeROI %s', - hash_files=False, + argstr="--outputFixedVolumeROI %s", hash_files=False, ), outputMovingVolumeROI=dict( - argstr='--outputMovingVolumeROI %s', - hash_files=False, - ), - outputTransform=dict( - argstr='--outputTransform %s', - hash_files=False, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - outputVolumePixelType=dict(argstr='--outputVolumePixelType %s', ), - projectedGradientTolerance=dict( - argstr='--projectedGradientTolerance %f', ), - promptUser=dict(argstr='--promptUser ', ), - relaxationFactor=dict(argstr='--relaxationFactor %f', ), - removeIntensityOutliers=dict(argstr='--removeIntensityOutliers %f', ), - reproportionScale=dict(argstr='--reproportionScale %f', ), - samplingPercentage=dict(argstr='--samplingPercentage %f', ), - scaleOutputValues=dict(argstr='--scaleOutputValues ', ), - skewScale=dict(argstr='--skewScale %f', ), - splineGridSize=dict( - argstr='--splineGridSize %s', - sep=',', - ), + argstr="--outputMovingVolumeROI %s", hash_files=False, + ), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + outputVolumePixelType=dict(argstr="--outputVolumePixelType %s",), + projectedGradientTolerance=dict(argstr="--projectedGradientTolerance %f",), + promptUser=dict(argstr="--promptUser ",), + relaxationFactor=dict(argstr="--relaxationFactor %f",), + removeIntensityOutliers=dict(argstr="--removeIntensityOutliers %f",), + reproportionScale=dict(argstr="--reproportionScale %f",), + samplingPercentage=dict(argstr="--samplingPercentage %f",), + scaleOutputValues=dict(argstr="--scaleOutputValues ",), + skewScale=dict(argstr="--skewScale %f",), + splineGridSize=dict(argstr="--splineGridSize %s", sep=",",), strippedOutputTransform=dict( - argstr='--strippedOutputTransform %s', - hash_files=False, - ), - transformType=dict( - argstr='--transformType %s', - sep=',', - ), - translationScale=dict(argstr='--translationScale %f', ), - useAffine=dict(argstr='--useAffine ', ), - useBSpline=dict(argstr='--useBSpline ', ), - useComposite=dict(argstr='--useComposite ', ), - useROIBSpline=dict(argstr='--useROIBSpline ', ), - useRigid=dict(argstr='--useRigid ', ), - useScaleSkewVersor3D=dict(argstr='--useScaleSkewVersor3D ', ), - useScaleVersor3D=dict(argstr='--useScaleVersor3D ', ), - useSyN=dict(argstr='--useSyN ', ), - writeOutputTransformInFloat=dict( - argstr='--writeOutputTransformInFloat ', ), - writeTransformOnFailure=dict(argstr='--writeTransformOnFailure ', ), + argstr="--strippedOutputTransform %s", hash_files=False, + ), + transformType=dict(argstr="--transformType %s", sep=",",), + translationScale=dict(argstr="--translationScale %f",), + useAffine=dict(argstr="--useAffine ",), + useBSpline=dict(argstr="--useBSpline ",), + useComposite=dict(argstr="--useComposite ",), + useROIBSpline=dict(argstr="--useROIBSpline ",), + useRigid=dict(argstr="--useRigid ",), + useScaleSkewVersor3D=dict(argstr="--useScaleSkewVersor3D ",), + useScaleVersor3D=dict(argstr="--useScaleVersor3D ",), + useSyN=dict(argstr="--useSyN ",), + writeOutputTransformInFloat=dict(argstr="--writeOutputTransformInFloat ",), + writeTransformOnFailure=dict(argstr="--writeTransformOnFailure ",), ) inputs = BRAINSFit.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSFit_outputs(): output_map = dict( - bsplineTransform=dict(extensions=None, ), - linearTransform=dict(extensions=None, ), - logFileReport=dict(extensions=None, ), - outputFixedVolumeROI=dict(extensions=None, ), - outputMovingVolumeROI=dict(extensions=None, ), - outputTransform=dict(extensions=None, ), - outputVolume=dict(extensions=None, ), - strippedOutputTransform=dict(extensions=None, ), + bsplineTransform=dict(extensions=None,), + linearTransform=dict(extensions=None,), + logFileReport=dict(extensions=None,), + outputFixedVolumeROI=dict(extensions=None,), + outputMovingVolumeROI=dict(extensions=None,), + outputTransform=dict(extensions=None,), + outputVolume=dict(extensions=None,), + strippedOutputTransform=dict(extensions=None,), ) outputs = BRAINSFit.output_spec() diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py index 092689442d..2d94f19a5f 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py @@ -4,48 +4,29 @@ def test_BRAINSResample_inputs(): input_map = dict( - args=dict(argstr='%s', ), - defaultValue=dict(argstr='--defaultValue %f', ), - deformationVolume=dict( - argstr='--deformationVolume %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gridSpacing=dict( - argstr='--gridSpacing %s', - sep=',', - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - interpolationMode=dict(argstr='--interpolationMode %s', ), - inverseTransform=dict(argstr='--inverseTransform ', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - pixelType=dict(argstr='--pixelType %s', ), - referenceVolume=dict( - argstr='--referenceVolume %s', - extensions=None, - ), - warpTransform=dict( - argstr='--warpTransform %s', - extensions=None, - ), + args=dict(argstr="%s",), + defaultValue=dict(argstr="--defaultValue %f",), + deformationVolume=dict(argstr="--deformationVolume %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + gridSpacing=dict(argstr="--gridSpacing %s", sep=",",), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + interpolationMode=dict(argstr="--interpolationMode %s",), + inverseTransform=dict(argstr="--inverseTransform ",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + pixelType=dict(argstr="--pixelType %s",), + referenceVolume=dict(argstr="--referenceVolume %s", extensions=None,), + warpTransform=dict(argstr="--warpTransform %s", extensions=None,), ) inputs = BRAINSResample.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSResample_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = BRAINSResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py index 98280ec104..db477b2593 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py @@ -4,29 +4,22 @@ def test_BRAINSResize_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - pixelType=dict(argstr='--pixelType %s', ), - scaleFactor=dict(argstr='--scaleFactor %f', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + pixelType=dict(argstr="--pixelType %s",), + scaleFactor=dict(argstr="--scaleFactor %f",), ) inputs = BRAINSResize.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSResize_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = BRAINSResize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py index 5fea3e44e3..4e5c6ae239 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py @@ -4,35 +4,25 @@ def test_BRAINSTransformFromFiducials_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedLandmarks=dict(argstr='--fixedLandmarks %s...', ), - fixedLandmarksFile=dict( - argstr='--fixedLandmarksFile %s', - extensions=None, - ), - movingLandmarks=dict(argstr='--movingLandmarks %s...', ), - movingLandmarksFile=dict( - argstr='--movingLandmarksFile %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - saveTransform=dict( - argstr='--saveTransform %s', - hash_files=False, - ), - transformType=dict(argstr='--transformType %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fixedLandmarks=dict(argstr="--fixedLandmarks %s...",), + fixedLandmarksFile=dict(argstr="--fixedLandmarksFile %s", extensions=None,), + movingLandmarks=dict(argstr="--movingLandmarks %s...",), + movingLandmarksFile=dict(argstr="--movingLandmarksFile %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + saveTransform=dict(argstr="--saveTransform %s", hash_files=False,), + transformType=dict(argstr="--transformType %s",), ) inputs = BRAINSTransformFromFiducials.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSTransformFromFiducials_outputs(): - output_map = dict(saveTransform=dict(extensions=None, ), ) + output_map = dict(saveTransform=dict(extensions=None,),) outputs = BRAINSTransformFromFiducials.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py index fb0a3dd88e..098360c1e3 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -4,111 +4,78 @@ def test_VBRAINSDemonWarp_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), arrayOfPyramidLevelIterations=dict( - argstr='--arrayOfPyramidLevelIterations %s', - sep=',', + argstr="--arrayOfPyramidLevelIterations %s", sep=",", ), - backgroundFillValue=dict(argstr='--backgroundFillValue %d', ), + backgroundFillValue=dict(argstr="--backgroundFillValue %d",), checkerboardPatternSubdivisions=dict( - argstr='--checkerboardPatternSubdivisions %s', - sep=',', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedBinaryVolume=dict( - argstr='--fixedBinaryVolume %s', - extensions=None, - ), - fixedVolume=dict(argstr='--fixedVolume %s...', ), - gradient_type=dict(argstr='--gradient_type %s', ), - gui=dict(argstr='--gui ', ), - histogramMatch=dict(argstr='--histogramMatch ', ), + argstr="--checkerboardPatternSubdivisions %s", sep=",", + ), + environ=dict(nohash=True, usedefault=True,), + fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None,), + fixedVolume=dict(argstr="--fixedVolume %s...",), + gradient_type=dict(argstr="--gradient_type %s",), + gui=dict(argstr="--gui ",), + histogramMatch=dict(argstr="--histogramMatch ",), initializeWithDisplacementField=dict( - argstr='--initializeWithDisplacementField %s', - extensions=None, + argstr="--initializeWithDisplacementField %s", extensions=None, ), initializeWithTransform=dict( - argstr='--initializeWithTransform %s', - extensions=None, - ), - inputPixelType=dict(argstr='--inputPixelType %s', ), - interpolationMode=dict(argstr='--interpolationMode %s', ), - lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), - makeBOBF=dict(argstr='--makeBOBF ', ), - max_step_length=dict(argstr='--max_step_length %f', ), - medianFilterSize=dict( - argstr='--medianFilterSize %s', - sep=',', - ), - minimumFixedPyramid=dict( - argstr='--minimumFixedPyramid %s', - sep=',', - ), - minimumMovingPyramid=dict( - argstr='--minimumMovingPyramid %s', - sep=',', - ), - movingBinaryVolume=dict( - argstr='--movingBinaryVolume %s', - extensions=None, - ), - movingVolume=dict(argstr='--movingVolume %s...', ), - neighborhoodForBOBF=dict( - argstr='--neighborhoodForBOBF %s', - sep=',', - ), + argstr="--initializeWithTransform %s", extensions=None, + ), + inputPixelType=dict(argstr="--inputPixelType %s",), + interpolationMode=dict(argstr="--interpolationMode %s",), + lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d",), + makeBOBF=dict(argstr="--makeBOBF ",), + max_step_length=dict(argstr="--max_step_length %f",), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), + minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=",",), + minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=",",), + movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None,), + movingVolume=dict(argstr="--movingVolume %s...",), + neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=",",), numberOfBCHApproximationTerms=dict( - argstr='--numberOfBCHApproximationTerms %d', ), - numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), - numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), - numberOfPyramidLevels=dict(argstr='--numberOfPyramidLevels %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), + argstr="--numberOfBCHApproximationTerms %d", + ), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), + numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), outputCheckerboardVolume=dict( - argstr='--outputCheckerboardVolume %s', - hash_files=False, + argstr="--outputCheckerboardVolume %s", hash_files=False, ), - outputDebug=dict(argstr='--outputDebug ', ), + outputDebug=dict(argstr="--outputDebug ",), outputDisplacementFieldPrefix=dict( - argstr='--outputDisplacementFieldPrefix %s', ), - outputDisplacementFieldVolume=dict( - argstr='--outputDisplacementFieldVolume %s', - hash_files=False, - ), - outputNormalized=dict(argstr='--outputNormalized ', ), - outputPixelType=dict(argstr='--outputPixelType %s', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - promptUser=dict(argstr='--promptUser ', ), - registrationFilterType=dict(argstr='--registrationFilterType %s', ), - seedForBOBF=dict( - argstr='--seedForBOBF %s', - sep=',', - ), - smoothDisplacementFieldSigma=dict( - argstr='--smoothDisplacementFieldSigma %f', ), - upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), - upperThresholdForBOBF=dict(argstr='--upperThresholdForBOBF %d', ), - use_vanilla_dem=dict(argstr='--use_vanilla_dem ', ), - weightFactors=dict( - argstr='--weightFactors %s', - sep=',', + argstr="--outputDisplacementFieldPrefix %s", ), + outputDisplacementFieldVolume=dict( + argstr="--outputDisplacementFieldVolume %s", hash_files=False, + ), + outputNormalized=dict(argstr="--outputNormalized ",), + outputPixelType=dict(argstr="--outputPixelType %s",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + promptUser=dict(argstr="--promptUser ",), + registrationFilterType=dict(argstr="--registrationFilterType %s",), + seedForBOBF=dict(argstr="--seedForBOBF %s", sep=",",), + smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f",), + upFieldSmoothing=dict(argstr="--upFieldSmoothing %f",), + upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d",), + use_vanilla_dem=dict(argstr="--use_vanilla_dem ",), + weightFactors=dict(argstr="--weightFactors %s", sep=",",), ) inputs = VBRAINSDemonWarp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_VBRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(extensions=None, ), - outputDisplacementFieldVolume=dict(extensions=None, ), - outputVolume=dict(extensions=None, ), + outputCheckerboardVolume=dict(extensions=None,), + outputDisplacementFieldVolume=dict(extensions=None,), + outputVolume=dict(extensions=None,), ) outputs = VBRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/__init__.py b/nipype/interfaces/semtools/segmentation/__init__.py index e5ea4e2776..0cc6090203 100644 --- a/nipype/interfaces/semtools/segmentation/__init__.py +++ b/nipype/interfaces/semtools/segmentation/__init__.py @@ -1,5 +1,11 @@ # -*- coding: utf-8 -*- from .specialized import ( - BRAINSCut, BRAINSROIAuto, BRAINSConstellationDetector, - BRAINSCreateLabelMapFromProbabilityMaps, BinaryMaskEditorBasedOnLandmarks, - BRAINSMultiSTAPLE, BRAINSABC, ESLR) + BRAINSCut, + BRAINSROIAuto, + BRAINSConstellationDetector, + BRAINSCreateLabelMapFromProbabilityMaps, + BinaryMaskEditorBasedOnLandmarks, + BRAINSMultiSTAPLE, + BRAINSABC, + ESLR, +) diff --git a/nipype/interfaces/semtools/segmentation/specialized.py b/nipype/interfaces/semtools/segmentation/specialized.py index fa08b8e260..483b5470fe 100644 --- a/nipype/interfaces/semtools/segmentation/specialized.py +++ b/nipype/interfaces/semtools/segmentation/specialized.py @@ -5,66 +5,78 @@ import os -from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ...base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class BRAINSCutInputSpec(CommandLineInputSpec): netConfiguration = File( - desc= - "XML File defining BRAINSCut parameters. OLD NAME. PLEASE USE modelConfigurationFilename instead.", + desc="XML File defining BRAINSCut parameters. OLD NAME. PLEASE USE modelConfigurationFilename instead.", exists=True, - argstr="--netConfiguration %s") + argstr="--netConfiguration %s", + ) modelConfigurationFilename = File( desc="XML File defining BRAINSCut parameters", exists=True, - argstr="--modelConfigurationFilename %s") + argstr="--modelConfigurationFilename %s", + ) trainModelStartIndex = traits.Int( - desc="Starting iteration for training", - argstr="--trainModelStartIndex %d") + desc="Starting iteration for training", argstr="--trainModelStartIndex %d" + ) verbose = traits.Int( - desc="print out some debugging information", argstr="--verbose %d") + desc="print out some debugging information", argstr="--verbose %d" + ) multiStructureThreshold = traits.Bool( desc="multiStructureThreshold module to deal with overlaping area", - argstr="--multiStructureThreshold ") + argstr="--multiStructureThreshold ", + ) histogramEqualization = traits.Bool( - desc= - "A Histogram Equalization process could be added to the creating/applying process from Subject To Atlas. Default is false, which genreate input vectors without Histogram Equalization. ", - argstr="--histogramEqualization ") + desc="A Histogram Equalization process could be added to the creating/applying process from Subject To Atlas. Default is false, which genreate input vectors without Histogram Equalization. ", + argstr="--histogramEqualization ", + ) computeSSEOn = traits.Bool( - desc= - "compute Sum of Square Error (SSE) along the trained model until the number of iteration given in the modelConfigurationFilename file", - argstr="--computeSSEOn ") + desc="compute Sum of Square Error (SSE) along the trained model until the number of iteration given in the modelConfigurationFilename file", + argstr="--computeSSEOn ", + ) generateProbability = traits.Bool( - desc="Generate probability map", argstr="--generateProbability ") + desc="Generate probability map", argstr="--generateProbability " + ) createVectors = traits.Bool( - desc="create vectors for training neural net", - argstr="--createVectors ") - trainModel = traits.Bool( - desc="train the neural net", argstr="--trainModel ") + desc="create vectors for training neural net", argstr="--createVectors " + ) + trainModel = traits.Bool(desc="train the neural net", argstr="--trainModel ") NoTrainingVectorShuffling = traits.Bool( desc="If this flag is on, there will be no shuffling.", - argstr="--NoTrainingVectorShuffling ") - applyModel = traits.Bool( - desc="apply the neural net", argstr="--applyModel ") + argstr="--NoTrainingVectorShuffling ", + ) + applyModel = traits.Bool(desc="apply the neural net", argstr="--applyModel ") validate = traits.Bool( - desc= - "validate data set.Just need for the first time run ( This is for validation of xml file and not working yet )", - argstr="--validate ") + desc="validate data set.Just need for the first time run ( This is for validation of xml file and not working yet )", + argstr="--validate ", + ) method = traits.Enum("RandomForest", "ANN", argstr="--method %s") numberOfTrees = traits.Int( - desc= - " Random tree: number of trees. This is to be used when only one model with specified depth wish to be created. ", - argstr="--numberOfTrees %d") + desc=" Random tree: number of trees. This is to be used when only one model with specified depth wish to be created. ", + argstr="--numberOfTrees %d", + ) randomTreeDepth = traits.Int( - desc= - " Random tree depth. This is to be used when only one model with specified depth wish to be created. ", - argstr="--randomTreeDepth %d") + desc=" Random tree depth. This is to be used when only one model with specified depth wish to be created. ", + argstr="--randomTreeDepth %d", + ) modelFilename = traits.Str( - desc= - " model file name given from user (not by xml configuration file) ", - argstr="--modelFilename %s") + desc=" model file name given from user (not by xml configuration file) ", + argstr="--modelFilename %s", + ) class BRAINSCutOutputSpec(TraitedSpec): @@ -97,41 +109,45 @@ class BRAINSROIAutoInputSpec(CommandLineInputSpec): inputVolume = File( desc="The input image for finding the largest region filled mask.", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) outputROIMaskVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="The ROI automatically found from the input image.", - argstr="--outputROIMaskVolume %s") + argstr="--outputROIMaskVolume %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "The inputVolume with optional [maskOutput|cropOutput] to the region of the brain mask.", - argstr="--outputVolume %s") + desc="The inputVolume with optional [maskOutput|cropOutput] to the region of the brain mask.", + argstr="--outputVolume %s", + ) maskOutput = traits.Bool( - desc="The inputVolume multiplied by the ROI mask.", - argstr="--maskOutput ") + desc="The inputVolume multiplied by the ROI mask.", argstr="--maskOutput " + ) cropOutput = traits.Bool( desc="The inputVolume cropped to the region of the ROI mask.", - argstr="--cropOutput ") + argstr="--cropOutput ", + ) otsuPercentileThreshold = traits.Float( desc="Parameter to the Otsu threshold algorithm.", - argstr="--otsuPercentileThreshold %f") + argstr="--otsuPercentileThreshold %f", + ) thresholdCorrectionFactor = traits.Float( - desc= - "A factor to scale the Otsu algorithm's result threshold, in case clipping mangles the image.", - argstr="--thresholdCorrectionFactor %f") + desc="A factor to scale the Otsu algorithm's result threshold, in case clipping mangles the image.", + argstr="--thresholdCorrectionFactor %f", + ) closingSize = traits.Float( - desc= - "The Closing Size (in millimeters) for largest connected filled mask. This value is divided by image spacing and rounded to the next largest voxel number.", - argstr="--closingSize %f") + desc="The Closing Size (in millimeters) for largest connected filled mask. This value is divided by image spacing and rounded to the next largest voxel number.", + argstr="--closingSize %f", + ) ROIAutoDilateSize = traits.Float( - desc= - "This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", - argstr="--ROIAutoDilateSize %f") + desc="This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", + argstr="--ROIAutoDilateSize %f", + ) outputVolumePixelType = traits.Enum( "float", "short", @@ -139,21 +155,23 @@ class BRAINSROIAutoInputSpec(CommandLineInputSpec): "int", "uint", "uchar", - desc= - "The output image Pixel Type is the scalar datatype for representation of the Output Volume.", - argstr="--outputVolumePixelType %s") + desc="The output image Pixel Type is the scalar datatype for representation of the Output Volume.", + argstr="--outputVolumePixelType %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSROIAutoOutputSpec(TraitedSpec): outputROIMaskVolume = File( - desc="The ROI automatically found from the input image.", exists=True) + desc="The ROI automatically found from the input image.", exists=True + ) outputVolume = File( - desc= - "The inputVolume with optional [maskOutput|cropOutput] to the region of the brain mask.", - exists=True) + desc="The inputVolume with optional [maskOutput|cropOutput] to the region of the brain mask.", + exists=True, + ) class BRAINSROIAuto(SEMLikeCommandLine): @@ -177,119 +195,122 @@ class BRAINSROIAuto(SEMLikeCommandLine): output_spec = BRAINSROIAutoOutputSpec _cmd = " BRAINSROIAuto " _outputs_filenames = { - 'outputVolume': 'outputVolume.nii', - 'outputROIMaskVolume': 'outputROIMaskVolume.nii' + "outputVolume": "outputVolume.nii", + "outputROIMaskVolume": "outputROIMaskVolume.nii", } _redirect_x = False class BRAINSConstellationDetectorInputSpec(CommandLineInputSpec): houghEyeDetectorMode = traits.Int( - desc= - ", This flag controls the mode of Hough eye detector. By default, value of 1 is for T1W images, while the value of 0 is for T2W and PD images., ", - argstr="--houghEyeDetectorMode %d") + desc=", This flag controls the mode of Hough eye detector. By default, value of 1 is for T1W images, while the value of 0 is for T2W and PD images., ", + argstr="--houghEyeDetectorMode %d", + ) inputTemplateModel = File( desc="User-specified template model., ", exists=True, - argstr="--inputTemplateModel %s") + argstr="--inputTemplateModel %s", + ) LLSModel = File( desc="Linear least squares model filename in HD5 format", exists=True, - argstr="--LLSModel %s") + argstr="--LLSModel %s", + ) inputVolume = File( desc="Input image in which to find ACPC points", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "ACPC-aligned output image with the same voxels, but updated origin, and direction cosign so that the AC point would fall at the physical location (0.0,0.0,0.0), and the mid-sagital plane is the plane where physical L/R coordinate is 0.0.", - argstr="--outputVolume %s") + desc="ACPC-aligned output image with the same voxels, but updated origin, and direction cosign so that the AC point would fall at the physical location (0.0,0.0,0.0), and the mid-sagital plane is the plane where physical L/R coordinate is 0.0.", + argstr="--outputVolume %s", + ) outputResampledVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "ACPC-aligned output image in a resampled unifor space. Currently this is a 1mm, 256^3, Identity direction image.", - argstr="--outputResampledVolume %s") + desc="ACPC-aligned output image in a resampled unifor space. Currently this is a 1mm, 256^3, Identity direction image.", + argstr="--outputResampledVolume %s", + ) outputTransform = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "The filename for the original space to ACPC alignment to be written (in .h5 format)., ", - argstr="--outputTransform %s") + desc="The filename for the original space to ACPC alignment to be written (in .h5 format)., ", + argstr="--outputTransform %s", + ) outputLandmarksInInputSpace = traits.Either( traits.Bool, File(), hash_files=False, - desc= - ", The filename for the new subject-specific landmark definition file in the same format produced by Slicer3 (.fcsv) with the landmarks in the original image space (the detected RP, AC, PC, and VN4) in it to be written., ", - argstr="--outputLandmarksInInputSpace %s") + desc=", The filename for the new subject-specific landmark definition file in the same format produced by Slicer3 (.fcsv) with the landmarks in the original image space (the detected RP, AC, PC, and VN4) in it to be written., ", + argstr="--outputLandmarksInInputSpace %s", + ) outputLandmarksInACPCAlignedSpace = traits.Either( traits.Bool, File(), hash_files=False, - desc= - ", The filename for the new subject-specific landmark definition file in the same format produced by Slicer3 (.fcsv) with the landmarks in the output image space (the detected RP, AC, PC, and VN4) in it to be written., ", - argstr="--outputLandmarksInACPCAlignedSpace %s") + desc=", The filename for the new subject-specific landmark definition file in the same format produced by Slicer3 (.fcsv) with the landmarks in the output image space (the detected RP, AC, PC, and VN4) in it to be written., ", + argstr="--outputLandmarksInACPCAlignedSpace %s", + ) outputMRML = traits.Either( traits.Bool, File(), hash_files=False, - desc= - ", The filename for the new subject-specific scene definition file in the same format produced by Slicer3 (in .mrml format). Only the components that were specified by the user on command line would be generated. Compatible components include inputVolume, outputVolume, outputLandmarksInInputSpace, outputLandmarksInACPCAlignedSpace, and outputTransform., ", - argstr="--outputMRML %s") + desc=", The filename for the new subject-specific scene definition file in the same format produced by Slicer3 (in .mrml format). Only the components that were specified by the user on command line would be generated. Compatible components include inputVolume, outputVolume, outputLandmarksInInputSpace, outputLandmarksInACPCAlignedSpace, and outputTransform., ", + argstr="--outputMRML %s", + ) outputVerificationScript = traits.Either( traits.Bool, File(), hash_files=False, - desc= - ", The filename for the Slicer3 script that verifies the aligned landmarks against the aligned image file. This will happen only in conjunction with saveOutputLandmarks and an outputVolume., ", - argstr="--outputVerificationScript %s") + desc=", The filename for the Slicer3 script that verifies the aligned landmarks against the aligned image file. This will happen only in conjunction with saveOutputLandmarks and an outputVolume., ", + argstr="--outputVerificationScript %s", + ) mspQualityLevel = traits.Int( - desc= - ", Flag cotrols how agressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds), NOTE: -1= Prealigned so no estimate!., ", - argstr="--mspQualityLevel %d") + desc=", Flag cotrols how agressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds), NOTE: -1= Prealigned so no estimate!., ", + argstr="--mspQualityLevel %d", + ) otsuPercentileThreshold = traits.Float( - desc= - ", This is a parameter to FindLargestForegroundFilledMask, which is employed when acLowerBound is set and an outputUntransformedClippedVolume is requested., ", - argstr="--otsuPercentileThreshold %f") + desc=", This is a parameter to FindLargestForegroundFilledMask, which is employed when acLowerBound is set and an outputUntransformedClippedVolume is requested., ", + argstr="--otsuPercentileThreshold %f", + ) acLowerBound = traits.Float( - desc= - ", When generating a resampled output image, replace the image with the BackgroundFillValue everywhere below the plane This Far in physical units (millimeters) below (inferior to) the AC point (as found by the model.) The oversize default was chosen to have no effect. Based on visualizing a thousand masks in the IPIG study, we recommend a limit no smaller than 80.0 mm., ", - argstr="--acLowerBound %f") + desc=", When generating a resampled output image, replace the image with the BackgroundFillValue everywhere below the plane This Far in physical units (millimeters) below (inferior to) the AC point (as found by the model.) The oversize default was chosen to have no effect. Based on visualizing a thousand masks in the IPIG study, we recommend a limit no smaller than 80.0 mm., ", + argstr="--acLowerBound %f", + ) cutOutHeadInOutputVolume = traits.Bool( - desc= - ", Flag to cut out just the head tissue when producing an (un)transformed clipped volume., ", - argstr="--cutOutHeadInOutputVolume ") + desc=", Flag to cut out just the head tissue when producing an (un)transformed clipped volume., ", + argstr="--cutOutHeadInOutputVolume ", + ) outputUntransformedClippedVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Output image in which to store neck-clipped input image, with the use of --acLowerBound and maybe --cutOutHeadInUntransformedVolume.", - argstr="--outputUntransformedClippedVolume %s") + desc="Output image in which to store neck-clipped input image, with the use of --acLowerBound and maybe --cutOutHeadInUntransformedVolume.", + argstr="--outputUntransformedClippedVolume %s", + ) rescaleIntensities = traits.Bool( - desc= - ", Flag to turn on rescaling image intensities on input., ", - argstr="--rescaleIntensities ") + desc=", Flag to turn on rescaling image intensities on input., ", + argstr="--rescaleIntensities ", + ) trimRescaledIntensities = traits.Float( - desc= - ", Turn on clipping the rescaled image one-tailed on input. Units of standard deviations above the mean. Very large values are very permissive. Non-positive value turns clipping off. Defaults to removing 0.00001 of a normal tail above the mean., ", - argstr="--trimRescaledIntensities %f") + desc=", Turn on clipping the rescaled image one-tailed on input. Units of standard deviations above the mean. Very large values are very permissive. Non-positive value turns clipping off. Defaults to removing 0.00001 of a normal tail above the mean., ", + argstr="--trimRescaledIntensities %f", + ) rescaleIntensitiesOutputRange = InputMultiPath( traits.Int, - desc= - ", This pair of integers gives the lower and upper bounds on the signal portion of the output image. Out-of-field voxels are taken from BackgroundFillValue., ", + desc=", This pair of integers gives the lower and upper bounds on the signal portion of the output image. Out-of-field voxels are taken from BackgroundFillValue., ", sep=",", - argstr="--rescaleIntensitiesOutputRange %s") + argstr="--rescaleIntensitiesOutputRange %s", + ) BackgroundFillValue = traits.Str( - desc= - "Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", - argstr="--BackgroundFillValue %s") + desc="Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", + argstr="--BackgroundFillValue %s", + ) interpolationMode = traits.Enum( "NearestNeighbor", "Linear", @@ -301,141 +322,146 @@ class BRAINSConstellationDetectorInputSpec(CommandLineInputSpec): "Welch", "Lanczos", "Blackman", - desc= - "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", - argstr="--interpolationMode %s") + desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s", + ) forceACPoint = InputMultiPath( traits.Float, - desc= - ", Use this flag to manually specify the AC point from the original image on the command line., ", + desc=", Use this flag to manually specify the AC point from the original image on the command line., ", sep=",", - argstr="--forceACPoint %s") + argstr="--forceACPoint %s", + ) forcePCPoint = InputMultiPath( traits.Float, - desc= - ", Use this flag to manually specify the PC point from the original image on the command line., ", + desc=", Use this flag to manually specify the PC point from the original image on the command line., ", sep=",", - argstr="--forcePCPoint %s") + argstr="--forcePCPoint %s", + ) forceVN4Point = InputMultiPath( traits.Float, - desc= - ", Use this flag to manually specify the VN4 point from the original image on the command line., ", + desc=", Use this flag to manually specify the VN4 point from the original image on the command line., ", sep=",", - argstr="--forceVN4Point %s") + argstr="--forceVN4Point %s", + ) forceRPPoint = InputMultiPath( traits.Float, - desc= - ", Use this flag to manually specify the RP point from the original image on the command line., ", + desc=", Use this flag to manually specify the RP point from the original image on the command line., ", sep=",", - argstr="--forceRPPoint %s") + argstr="--forceRPPoint %s", + ) inputLandmarksEMSP = File( - desc= - ", The filename for the new subject-specific landmark definition file in the same format produced by Slicer3 (in .fcsv) with the landmarks in the estimated MSP aligned space to be loaded. The detector will only process landmarks not enlisted on the file., ", + desc=", The filename for the new subject-specific landmark definition file in the same format produced by Slicer3 (in .fcsv) with the landmarks in the estimated MSP aligned space to be loaded. The detector will only process landmarks not enlisted on the file., ", exists=True, - argstr="--inputLandmarksEMSP %s") + argstr="--inputLandmarksEMSP %s", + ) forceHoughEyeDetectorReportFailure = traits.Bool( - desc= - ", Flag indicates whether the Hough eye detector should report failure, ", - argstr="--forceHoughEyeDetectorReportFailure ") + desc=", Flag indicates whether the Hough eye detector should report failure, ", + argstr="--forceHoughEyeDetectorReportFailure ", + ) rmpj = traits.Float( - desc= - ", Search radius for MPJ in unit of mm, ", - argstr="--rmpj %f") + desc=", Search radius for MPJ in unit of mm, ", + argstr="--rmpj %f", + ) rac = traits.Float( desc=", Search radius for AC in unit of mm, ", - argstr="--rac %f") + argstr="--rac %f", + ) rpc = traits.Float( desc=", Search radius for PC in unit of mm, ", - argstr="--rpc %f") + argstr="--rpc %f", + ) rVN4 = traits.Float( - desc= - ", Search radius for VN4 in unit of mm, ", - argstr="--rVN4 %f") + desc=", Search radius for VN4 in unit of mm, ", + argstr="--rVN4 %f", + ) debug = traits.Bool( - desc= - ", Show internal debugging information., ", - argstr="--debug ") + desc=", Show internal debugging information., ", + argstr="--debug ", + ) verbose = traits.Bool( desc=", Show more verbose output, ", - argstr="--verbose ") + argstr="--verbose ", + ) writeBranded2DImage = traits.Either( traits.Bool, File(), hash_files=False, - desc= - ", The filename for the 2D .png branded midline debugging image. This will happen only in conjunction with requesting an outputVolume., ", - argstr="--writeBranded2DImage %s") + desc=", The filename for the 2D .png branded midline debugging image. This will happen only in conjunction with requesting an outputVolume., ", + argstr="--writeBranded2DImage %s", + ) resultsDir = traits.Either( traits.Bool, Directory(), hash_files=False, - desc= - ", The directory for the debuging images to be written., ", - argstr="--resultsDir %s") + desc=", The directory for the debuging images to be written., ", + argstr="--resultsDir %s", + ) writedebuggingImagesLevel = traits.Int( - desc= - ", This flag controls if debugging images are produced. By default value of 0 is no images. Anything greater than zero will be increasing level of debugging images., ", - argstr="--writedebuggingImagesLevel %d") + desc=", This flag controls if debugging images are produced. By default value of 0 is no images. Anything greater than zero will be increasing level of debugging images., ", + argstr="--writedebuggingImagesLevel %d", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) atlasVolume = File( desc="Atlas volume image to be used for BRAINSFit registration", exists=True, - argstr="--atlasVolume %s") + argstr="--atlasVolume %s", + ) atlasLandmarks = File( - desc= - "Atlas landmarks to be used for BRAINSFit registration initialization, ", + desc="Atlas landmarks to be used for BRAINSFit registration initialization, ", exists=True, - argstr="--atlasLandmarks %s") + argstr="--atlasLandmarks %s", + ) atlasLandmarkWeights = File( - desc= - "Weights associated with atlas landmarks to be used for BRAINSFit registration initialization, ", + desc="Weights associated with atlas landmarks to be used for BRAINSFit registration initialization, ", exists=True, - argstr="--atlasLandmarkWeights %s") + argstr="--atlasLandmarkWeights %s", + ) class BRAINSConstellationDetectorOutputSpec(TraitedSpec): outputVolume = File( - desc= - "ACPC-aligned output image with the same voxels, but updated origin, and direction cosign so that the AC point would fall at the physical location (0.0,0.0,0.0), and the mid-sagital plane is the plane where physical L/R coordinate is 0.0.", - exists=True) + desc="ACPC-aligned output image with the same voxels, but updated origin, and direction cosign so that the AC point would fall at the physical location (0.0,0.0,0.0), and the mid-sagital plane is the plane where physical L/R coordinate is 0.0.", + exists=True, + ) outputResampledVolume = File( - desc= - "ACPC-aligned output image in a resampled unifor space. Currently this is a 1mm, 256^3, Identity direction image.", - exists=True) + desc="ACPC-aligned output image in a resampled unifor space. Currently this is a 1mm, 256^3, Identity direction image.", + exists=True, + ) outputTransform = File( - desc= - "The filename for the original space to ACPC alignment to be written (in .h5 format)., ", - exists=True) + desc="The filename for the original space to ACPC alignment to be written (in .h5 format)., ", + exists=True, + ) outputLandmarksInInputSpace = File( - desc= - ", The filename for the new subject-specific landmark definition file in the same format produced by Slicer3 (.fcsv) with the landmarks in the original image space (the detected RP, AC, PC, and VN4) in it to be written., ", - exists=True) + desc=", The filename for the new subject-specific landmark definition file in the same format produced by Slicer3 (.fcsv) with the landmarks in the original image space (the detected RP, AC, PC, and VN4) in it to be written., ", + exists=True, + ) outputLandmarksInACPCAlignedSpace = File( - desc= - ", The filename for the new subject-specific landmark definition file in the same format produced by Slicer3 (.fcsv) with the landmarks in the output image space (the detected RP, AC, PC, and VN4) in it to be written., ", - exists=True) + desc=", The filename for the new subject-specific landmark definition file in the same format produced by Slicer3 (.fcsv) with the landmarks in the output image space (the detected RP, AC, PC, and VN4) in it to be written., ", + exists=True, + ) outputMRML = File( - desc= - ", The filename for the new subject-specific scene definition file in the same format produced by Slicer3 (in .mrml format). Only the components that were specified by the user on command line would be generated. Compatible components include inputVolume, outputVolume, outputLandmarksInInputSpace, outputLandmarksInACPCAlignedSpace, and outputTransform., ", - exists=True) + desc=", The filename for the new subject-specific scene definition file in the same format produced by Slicer3 (in .mrml format). Only the components that were specified by the user on command line would be generated. Compatible components include inputVolume, outputVolume, outputLandmarksInInputSpace, outputLandmarksInACPCAlignedSpace, and outputTransform., ", + exists=True, + ) outputVerificationScript = File( - desc= - ", The filename for the Slicer3 script that verifies the aligned landmarks against the aligned image file. This will happen only in conjunction with saveOutputLandmarks and an outputVolume., ", - exists=True) + desc=", The filename for the Slicer3 script that verifies the aligned landmarks against the aligned image file. This will happen only in conjunction with saveOutputLandmarks and an outputVolume., ", + exists=True, + ) outputUntransformedClippedVolume = File( - desc= - "Output image in which to store neck-clipped input image, with the use of --acLowerBound and maybe --cutOutHeadInUntransformedVolume.", - exists=True) + desc="Output image in which to store neck-clipped input image, with the use of --acLowerBound and maybe --cutOutHeadInUntransformedVolume.", + exists=True, + ) writeBranded2DImage = File( - desc= - ", The filename for the 2D .png branded midline debugging image. This will happen only in conjunction with requesting an outputVolume., ", - exists=True) + desc=", The filename for the 2D .png branded midline debugging image. This will happen only in conjunction with requesting an outputVolume., ", + exists=True, + ) resultsDir = Directory( - desc= - ", The directory for the debuging images to be written., ", - exists=True) + desc=", The directory for the debuging images to be written., ", + exists=True, + ) class BRAINSConstellationDetector(SEMLikeCommandLine): @@ -455,26 +481,16 @@ class BRAINSConstellationDetector(SEMLikeCommandLine): output_spec = BRAINSConstellationDetectorOutputSpec _cmd = " BRAINSConstellationDetector " _outputs_filenames = { - 'outputVolume': - 'outputVolume.nii.gz', - 'outputMRML': - 'outputMRML.mrml', - 'resultsDir': - 'resultsDir', - 'outputResampledVolume': - 'outputResampledVolume.nii.gz', - 'outputTransform': - 'outputTransform.h5', - 'writeBranded2DImage': - 'writeBranded2DImage.png', - 'outputLandmarksInACPCAlignedSpace': - 'outputLandmarksInACPCAlignedSpace.fcsv', - 'outputLandmarksInInputSpace': - 'outputLandmarksInInputSpace.fcsv', - 'outputUntransformedClippedVolume': - 'outputUntransformedClippedVolume.nii.gz', - 'outputVerificationScript': - 'outputVerificationScript.sh' + "outputVolume": "outputVolume.nii.gz", + "outputMRML": "outputMRML.mrml", + "resultsDir": "resultsDir", + "outputResampledVolume": "outputResampledVolume.nii.gz", + "outputTransform": "outputTransform.h5", + "writeBranded2DImage": "writeBranded2DImage.png", + "outputLandmarksInACPCAlignedSpace": "outputLandmarksInACPCAlignedSpace.fcsv", + "outputLandmarksInInputSpace": "outputLandmarksInInputSpace.fcsv", + "outputUntransformedClippedVolume": "outputUntransformedClippedVolume.nii.gz", + "outputVerificationScript": "outputVerificationScript.sh", } _redirect_x = False @@ -483,37 +499,42 @@ class BRAINSCreateLabelMapFromProbabilityMapsInputSpec(CommandLineInputSpec): inputProbabilityVolume = InputMultiPath( File(exists=True), desc="The list of proobabilityimages.", - argstr="--inputProbabilityVolume %s...") + argstr="--inputProbabilityVolume %s...", + ) priorLabelCodes = InputMultiPath( traits.Int, - desc= - "A list of PriorLabelCode values used for coding the output label images", + desc="A list of PriorLabelCode values used for coding the output label images", sep=",", - argstr="--priorLabelCodes %s") + argstr="--priorLabelCodes %s", + ) foregroundPriors = InputMultiPath( traits.Int, desc="A list: For each Prior Label, 1 if foreground, 0 if background", sep=",", - argstr="--foregroundPriors %s") + argstr="--foregroundPriors %s", + ) nonAirRegionMask = File( - desc= - "a mask representing the \'NonAirRegion\' -- Just force pixels in this region to zero", + desc="a mask representing the 'NonAirRegion' -- Just force pixels in this region to zero", exists=True, - argstr="--nonAirRegionMask %s") + argstr="--nonAirRegionMask %s", + ) inclusionThreshold = traits.Float( - desc="tolerance for inclusion", argstr="--inclusionThreshold %f") + desc="tolerance for inclusion", argstr="--inclusionThreshold %f" + ) dirtyLabelVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="the labels prior to cleaning", - argstr="--dirtyLabelVolume %s") + argstr="--dirtyLabelVolume %s", + ) cleanLabelVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="the foreground labels volume", - argstr="--cleanLabelVolume %s") + argstr="--cleanLabelVolume %s", + ) class BRAINSCreateLabelMapFromProbabilityMapsOutputSpec(TraitedSpec): @@ -534,8 +555,8 @@ class BRAINSCreateLabelMapFromProbabilityMaps(SEMLikeCommandLine): output_spec = BRAINSCreateLabelMapFromProbabilityMapsOutputSpec _cmd = " BRAINSCreateLabelMapFromProbabilityMaps " _outputs_filenames = { - 'dirtyLabelVolume': 'dirtyLabelVolume.nii', - 'cleanLabelVolume': 'cleanLabelVolume.nii' + "dirtyLabelVolume": "dirtyLabelVolume.nii", + "cleanLabelVolume": "cleanLabelVolume.nii", } _redirect_x = False @@ -544,47 +565,50 @@ class BinaryMaskEditorBasedOnLandmarksInputSpec(CommandLineInputSpec): inputBinaryVolume = File( desc="Input binary image in which to be edited", exists=True, - argstr="--inputBinaryVolume %s") + argstr="--inputBinaryVolume %s", + ) outputBinaryVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Output binary image in which to be edited", - argstr="--outputBinaryVolume %s") + argstr="--outputBinaryVolume %s", + ) inputLandmarksFilename = File( - desc= - " The filename for the landmark definition file in the same format produced by Slicer3 (.fcsv). ", + desc=" The filename for the landmark definition file in the same format produced by Slicer3 (.fcsv). ", exists=True, - argstr="--inputLandmarksFilename %s") + argstr="--inputLandmarksFilename %s", + ) inputLandmarkNames = InputMultiPath( traits.Str, - desc= - " A target input landmark name to be edited. This should be listed in the inputLandmakrFilename Given. ", + desc=" A target input landmark name to be edited. This should be listed in the inputLandmakrFilename Given. ", sep=",", - argstr="--inputLandmarkNames %s") + argstr="--inputLandmarkNames %s", + ) setCutDirectionForLandmark = InputMultiPath( traits.Str, - desc= - "Setting the cutting out direction of the input binary image to the one of anterior, posterior, left, right, superior or posterior. (ENUMERATION: ANTERIOR, POSTERIOR, LEFT, RIGHT, SUPERIOR, POSTERIOR) ", + desc="Setting the cutting out direction of the input binary image to the one of anterior, posterior, left, right, superior or posterior. (ENUMERATION: ANTERIOR, POSTERIOR, LEFT, RIGHT, SUPERIOR, POSTERIOR) ", sep=",", - argstr="--setCutDirectionForLandmark %s") + argstr="--setCutDirectionForLandmark %s", + ) setCutDirectionForObliquePlane = InputMultiPath( traits.Str, - desc= - "If this is true, the mask will be thresholded out to the direction of inferior, posterior, and/or left. Default behavrior is that cutting out to the direction of superior, anterior and/or right. ", + desc="If this is true, the mask will be thresholded out to the direction of inferior, posterior, and/or left. Default behavrior is that cutting out to the direction of superior, anterior and/or right. ", sep=",", - argstr="--setCutDirectionForObliquePlane %s") + argstr="--setCutDirectionForObliquePlane %s", + ) inputLandmarkNamesForObliquePlane = InputMultiPath( traits.Str, - desc= - " Three subset landmark names of inputLandmarksFilename for a oblique plane computation. The plane computed for binary volume editing. ", + desc=" Three subset landmark names of inputLandmarksFilename for a oblique plane computation. The plane computed for binary volume editing. ", sep=",", - argstr="--inputLandmarkNamesForObliquePlane %s") + argstr="--inputLandmarkNamesForObliquePlane %s", + ) class BinaryMaskEditorBasedOnLandmarksOutputSpec(TraitedSpec): outputBinaryVolume = File( - desc="Output binary image in which to be edited", exists=True) + desc="Output binary image in which to be edited", exists=True + ) class BinaryMaskEditorBasedOnLandmarks(SEMLikeCommandLine): @@ -601,50 +625,56 @@ class BinaryMaskEditorBasedOnLandmarks(SEMLikeCommandLine): input_spec = BinaryMaskEditorBasedOnLandmarksInputSpec output_spec = BinaryMaskEditorBasedOnLandmarksOutputSpec _cmd = " BinaryMaskEditorBasedOnLandmarks " - _outputs_filenames = {'outputBinaryVolume': 'outputBinaryVolume.nii'} + _outputs_filenames = {"outputBinaryVolume": "outputBinaryVolume.nii"} _redirect_x = False class BRAINSMultiSTAPLEInputSpec(CommandLineInputSpec): inputCompositeT1Volume = File( - desc= - "Composite T1, all label maps transofrmed into the space for this image.", + desc="Composite T1, all label maps transofrmed into the space for this image.", exists=True, - argstr="--inputCompositeT1Volume %s") + argstr="--inputCompositeT1Volume %s", + ) inputLabelVolume = InputMultiPath( File(exists=True), desc="The list of proobabilityimages.", - argstr="--inputLabelVolume %s...") + argstr="--inputLabelVolume %s...", + ) inputTransform = InputMultiPath( File(exists=True), desc="transforms to apply to label volumes", - argstr="--inputTransform %s...") + argstr="--inputTransform %s...", + ) labelForUndecidedPixels = traits.Int( - desc="Label for undecided pixels", - argstr="--labelForUndecidedPixels %d") + desc="Label for undecided pixels", argstr="--labelForUndecidedPixels %d" + ) resampledVolumePrefix = traits.Str( desc="if given, write out resampled volumes with this prefix", - argstr="--resampledVolumePrefix %s") + argstr="--resampledVolumePrefix %s", + ) skipResampling = traits.Bool( - desc="Omit resampling images into reference space", - argstr="--skipResampling ") + desc="Omit resampling images into reference space", argstr="--skipResampling " + ) outputMultiSTAPLE = traits.Either( traits.Bool, File(), hash_files=False, desc="the MultiSTAPLE average of input label volumes", - argstr="--outputMultiSTAPLE %s") + argstr="--outputMultiSTAPLE %s", + ) outputConfusionMatrix = traits.Either( traits.Bool, File(), hash_files=False, desc="Confusion Matrix", - argstr="--outputConfusionMatrix %s") + argstr="--outputConfusionMatrix %s", + ) class BRAINSMultiSTAPLEOutputSpec(TraitedSpec): outputMultiSTAPLE = File( - desc="the MultiSTAPLE average of input label volumes", exists=True) + desc="the MultiSTAPLE average of input label volumes", exists=True + ) outputConfusionMatrix = File(desc="Confusion Matrix", exists=True) @@ -661,8 +691,8 @@ class BRAINSMultiSTAPLE(SEMLikeCommandLine): output_spec = BRAINSMultiSTAPLEOutputSpec _cmd = " BRAINSMultiSTAPLE " _outputs_filenames = { - 'outputMultiSTAPLE': 'outputMultiSTAPLE.nii', - 'outputConfusionMatrix': 'outputConfusionMatrixh5|mat|txt' + "outputMultiSTAPLE": "outputMultiSTAPLE.nii", + "outputConfusionMatrix": "outputConfusionMatrixh5|mat|txt", } _redirect_x = False @@ -671,90 +701,97 @@ class BRAINSABCInputSpec(CommandLineInputSpec): inputVolumes = InputMultiPath( File(exists=True), desc="The list of input image files to be segmented.", - argstr="--inputVolumes %s...") + argstr="--inputVolumes %s...", + ) atlasDefinition = File( desc="Contains all parameters for Atlas", exists=True, - argstr="--atlasDefinition %s") + argstr="--atlasDefinition %s", + ) restoreState = File( desc="The initial state for the registration process", exists=True, - argstr="--restoreState %s") + argstr="--restoreState %s", + ) saveState = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "(optional) Filename to which save the final state of the registration", - argstr="--saveState %s") + desc="(optional) Filename to which save the final state of the registration", + argstr="--saveState %s", + ) inputVolumeTypes = InputMultiPath( traits.Str, desc="The list of input image types corresponding to the inputVolumes.", sep=",", - argstr="--inputVolumeTypes %s") + argstr="--inputVolumeTypes %s", + ) outputDir = traits.Either( traits.Bool, Directory(), hash_files=False, desc="Ouput directory", - argstr="--outputDir %s") + argstr="--outputDir %s", + ) atlasToSubjectTransformType = traits.Enum( "Identity", "Rigid", "Affine", "BSpline", "SyN", - desc= - " What type of linear transform type do you want to use to register the atlas to the reference subject image.", - argstr="--atlasToSubjectTransformType %s") + desc=" What type of linear transform type do you want to use to register the atlas to the reference subject image.", + argstr="--atlasToSubjectTransformType %s", + ) atlasToSubjectTransform = traits.Either( traits.Bool, File(), hash_files=False, desc="The transform from atlas to the subject", - argstr="--atlasToSubjectTransform %s") + argstr="--atlasToSubjectTransform %s", + ) atlasToSubjectInitialTransform = traits.Either( traits.Bool, File(), hash_files=False, desc="The initial transform from atlas to the subject", - argstr="--atlasToSubjectInitialTransform %s") + argstr="--atlasToSubjectInitialTransform %s", + ) subjectIntermodeTransformType = traits.Enum( "Identity", "Rigid", "Affine", "BSpline", - desc= - " What type of linear transform type do you want to use to register the atlas to the reference subject image.", - argstr="--subjectIntermodeTransformType %s") + desc=" What type of linear transform type do you want to use to register the atlas to the reference subject image.", + argstr="--subjectIntermodeTransformType %s", + ) outputVolumes = traits.Either( traits.Bool, - InputMultiPath(File(), ), + InputMultiPath(File(),), hash_files=False, - desc= - "Corrected Output Images: should specify the same number of images as inputVolume, if only one element is given, then it is used as a file pattern where %s is replaced by the imageVolumeType, and %d by the index list location.", - argstr="--outputVolumes %s...") + desc="Corrected Output Images: should specify the same number of images as inputVolume, if only one element is given, then it is used as a file pattern where %s is replaced by the imageVolumeType, and %d by the index list location.", + argstr="--outputVolumes %s...", + ) outputLabels = traits.Either( traits.Bool, File(), hash_files=False, desc="Output Label Image", - argstr="--outputLabels %s") + argstr="--outputLabels %s", + ) outputDirtyLabels = traits.Either( traits.Bool, File(), hash_files=False, desc="Output Dirty Label Image", - argstr="--outputDirtyLabels %s") + argstr="--outputDirtyLabels %s", + ) posteriorTemplate = traits.Str( desc="filename template for Posterior output files", - argstr="--posteriorTemplate %s") + argstr="--posteriorTemplate %s", + ) outputFormat = traits.Enum( - "NIFTI", - "Meta", - "Nrrd", - desc="Output format", - argstr="--outputFormat %s") + "NIFTI", "Meta", "Nrrd", desc="Output format", argstr="--outputFormat %s" + ) interpolationMode = traits.Enum( "BSpline", "NearestNeighbor", @@ -766,93 +803,97 @@ class BRAINSABCInputSpec(CommandLineInputSpec): "Welch", "Lanczos", "Blackman", - desc= - "Type of interpolation to be used when applying transform to moving volume. Options are Linear, NearestNeighbor, BSpline, WindowedSinc, or ResampleInPlace. The ResampleInPlace option will create an image with the same discrete voxel values and will adjust the origin and direction of the physical space interpretation.", - argstr="--interpolationMode %s") - maxIterations = traits.Int( - desc="Filter iterations", argstr="--maxIterations %d") + desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, NearestNeighbor, BSpline, WindowedSinc, or ResampleInPlace. The ResampleInPlace option will create an image with the same discrete voxel values and will adjust the origin and direction of the physical space interpretation.", + argstr="--interpolationMode %s", + ) + maxIterations = traits.Int(desc="Filter iterations", argstr="--maxIterations %d") medianFilterSize = InputMultiPath( traits.Int, - desc= - "The radius for the optional MedianImageFilter preprocessing in all 3 directions.", + desc="The radius for the optional MedianImageFilter preprocessing in all 3 directions.", sep=",", - argstr="--medianFilterSize %s") + argstr="--medianFilterSize %s", + ) filterIteration = traits.Int( - desc="Filter iterations", argstr="--filterIteration %d") + desc="Filter iterations", argstr="--filterIteration %d" + ) filterTimeStep = traits.Float( - desc= - "Filter time step should be less than (PixelSpacing/(1^(DIM+1)), value is set to negative, then allow automatic setting of this value. ", - argstr="--filterTimeStep %f") + desc="Filter time step should be less than (PixelSpacing/(1^(DIM+1)), value is set to negative, then allow automatic setting of this value. ", + argstr="--filterTimeStep %f", + ) filterMethod = traits.Enum( "None", "CurvatureFlow", "GradientAnisotropicDiffusion", "Median", desc="Filter method for preprocessing of registration", - argstr="--filterMethod %s") - maxBiasDegree = traits.Int( - desc="Maximum bias degree", argstr="--maxBiasDegree %d") + argstr="--filterMethod %s", + ) + maxBiasDegree = traits.Int(desc="Maximum bias degree", argstr="--maxBiasDegree %d") useKNN = traits.Bool( - desc="Use the KNN stage of estimating posteriors.", argstr="--useKNN ") + desc="Use the KNN stage of estimating posteriors.", argstr="--useKNN " + ) purePlugsThreshold = traits.Float( - desc= - "If this threshold value is greater than zero, only pure samples are used to compute the distributions in EM classification, and only pure samples are used for KNN training. The default value is set to 0, that means not using pure plugs. However, a value of 0.2 is suggested if you want to activate using pure plugs option.", - argstr="--purePlugsThreshold %f") + desc="If this threshold value is greater than zero, only pure samples are used to compute the distributions in EM classification, and only pure samples are used for KNN training. The default value is set to 0, that means not using pure plugs. However, a value of 0.2 is suggested if you want to activate using pure plugs option.", + argstr="--purePlugsThreshold %f", + ) numberOfSubSamplesInEachPlugArea = InputMultiPath( traits.Int, - desc= - "Number of continous index samples taken at each direction of lattice space for each plug volume.", + desc="Number of continous index samples taken at each direction of lattice space for each plug volume.", sep=",", - argstr="--numberOfSubSamplesInEachPlugArea %s") + argstr="--numberOfSubSamplesInEachPlugArea %s", + ) atlasWarpingOff = traits.Bool( - desc="Deformable registration of atlas to subject", - argstr="--atlasWarpingOff ") + desc="Deformable registration of atlas to subject", argstr="--atlasWarpingOff " + ) gridSize = InputMultiPath( traits.Int, desc="Grid size for atlas warping with BSplines", sep=",", - argstr="--gridSize %s") + argstr="--gridSize %s", + ) defaultSuffix = traits.Str(argstr="--defaultSuffix %s") implicitOutputs = traits.Either( traits.Bool, - InputMultiPath(File(), ), + InputMultiPath(File(),), hash_files=False, - desc= - "Outputs to be made available to NiPype. Needed because not all BRAINSABC outputs have command line arguments.", - argstr="--implicitOutputs %s...") + desc="Outputs to be made available to NiPype. Needed because not all BRAINSABC outputs have command line arguments.", + argstr="--implicitOutputs %s...", + ) debuglevel = traits.Int( - desc= - "Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", - argstr="--debuglevel %d") + desc="Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", + argstr="--debuglevel %d", + ) writeLess = traits.Bool( desc="Does not write posteriors and filtered, bias corrected images", - argstr="--writeLess ") + argstr="--writeLess ", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSABCOutputSpec(TraitedSpec): saveState = File( - desc= - "(optional) Filename to which save the final state of the registration", - exists=True) + desc="(optional) Filename to which save the final state of the registration", + exists=True, + ) outputDir = Directory(desc="Ouput directory", exists=True) atlasToSubjectTransform = File( - desc="The transform from atlas to the subject", exists=True) + desc="The transform from atlas to the subject", exists=True + ) atlasToSubjectInitialTransform = File( - desc="The initial transform from atlas to the subject", exists=True) + desc="The initial transform from atlas to the subject", exists=True + ) outputVolumes = OutputMultiPath( File(exists=True), - desc= - "Corrected Output Images: should specify the same number of images as inputVolume, if only one element is given, then it is used as a file pattern where %s is replaced by the imageVolumeType, and %d by the index list location." + desc="Corrected Output Images: should specify the same number of images as inputVolume, if only one element is given, then it is used as a file pattern where %s is replaced by the imageVolumeType, and %d by the index list location.", ) outputLabels = File(desc="Output Label Image", exists=True) outputDirtyLabels = File(desc="Output Dirty Label Image", exists=True) implicitOutputs = OutputMultiPath( File(exists=True), - desc= - "Outputs to be made available to NiPype. Needed because not all BRAINSABC outputs have command line arguments." + desc="Outputs to be made available to NiPype. Needed because not all BRAINSABC outputs have command line arguments.", ) @@ -869,44 +910,52 @@ class BRAINSABC(SEMLikeCommandLine): output_spec = BRAINSABCOutputSpec _cmd = " BRAINSABC " _outputs_filenames = { - 'saveState': 'saveState.h5', - 'outputLabels': 'outputLabels.nii.gz', - 'atlasToSubjectTransform': 'atlasToSubjectTransform.h5', - 'atlasToSubjectInitialTransform': 'atlasToSubjectInitialTransform.h5', - 'outputDirtyLabels': 'outputDirtyLabels.nii.gz', - 'outputVolumes': 'outputVolumes.nii.gz', - 'outputDir': 'outputDir', - 'implicitOutputs': 'implicitOutputs.nii.gz' + "saveState": "saveState.h5", + "outputLabels": "outputLabels.nii.gz", + "atlasToSubjectTransform": "atlasToSubjectTransform.h5", + "atlasToSubjectInitialTransform": "atlasToSubjectInitialTransform.h5", + "outputDirtyLabels": "outputDirtyLabels.nii.gz", + "outputVolumes": "outputVolumes.nii.gz", + "outputDir": "outputDir", + "implicitOutputs": "implicitOutputs.nii.gz", } _redirect_x = False class ESLRInputSpec(CommandLineInputSpec): inputVolume = File( - desc="Input Label Volume", exists=True, argstr="--inputVolume %s") + desc="Input Label Volume", exists=True, argstr="--inputVolume %s" + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Output Label Volume", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) low = traits.Int( - desc="The lower bound of the labels to be used.", argstr="--low %d") + desc="The lower bound of the labels to be used.", argstr="--low %d" + ) high = traits.Int( - desc="The higher bound of the labels to be used.", argstr="--high %d") + desc="The higher bound of the labels to be used.", argstr="--high %d" + ) closingSize = traits.Int( - desc="The closing size for hole filling.", argstr="--closingSize %d") + desc="The closing size for hole filling.", argstr="--closingSize %d" + ) openingSize = traits.Int( - desc="The opening size for hole filling.", argstr="--openingSize %d") + desc="The opening size for hole filling.", argstr="--openingSize %d" + ) safetySize = traits.Int( - desc="The safetySize size for the clipping region.", - argstr="--safetySize %d") + desc="The safetySize size for the clipping region.", argstr="--safetySize %d" + ) preserveOutside = traits.Bool( desc="For values outside the specified range, preserve those values.", - argstr="--preserveOutside ") + argstr="--preserveOutside ", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class ESLROutputSpec(TraitedSpec): @@ -925,5 +974,5 @@ class ESLR(SEMLikeCommandLine): input_spec = ESLRInputSpec output_spec = ESLROutputSpec _cmd = " ESLR " - _outputs_filenames = {'outputVolume': 'outputVolume.nii.gz'} + _outputs_filenames = {"outputVolume": "outputVolume.nii.gz"} _redirect_x = False diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py index a6be21c287..e43cdf412c 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py @@ -4,103 +4,66 @@ def test_BRAINSABC_inputs(): input_map = dict( - args=dict(argstr='%s', ), - atlasDefinition=dict( - argstr='--atlasDefinition %s', - extensions=None, - ), + args=dict(argstr="%s",), + atlasDefinition=dict(argstr="--atlasDefinition %s", extensions=None,), atlasToSubjectInitialTransform=dict( - argstr='--atlasToSubjectInitialTransform %s', - hash_files=False, + argstr="--atlasToSubjectInitialTransform %s", hash_files=False, ), atlasToSubjectTransform=dict( - argstr='--atlasToSubjectTransform %s', - hash_files=False, - ), - atlasToSubjectTransformType=dict( - argstr='--atlasToSubjectTransformType %s', ), - atlasWarpingOff=dict(argstr='--atlasWarpingOff ', ), - debuglevel=dict(argstr='--debuglevel %d', ), - defaultSuffix=dict(argstr='--defaultSuffix %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - filterIteration=dict(argstr='--filterIteration %d', ), - filterMethod=dict(argstr='--filterMethod %s', ), - filterTimeStep=dict(argstr='--filterTimeStep %f', ), - gridSize=dict( - argstr='--gridSize %s', - sep=',', - ), - implicitOutputs=dict( - argstr='--implicitOutputs %s...', - hash_files=False, - ), - inputVolumeTypes=dict( - argstr='--inputVolumeTypes %s', - sep=',', - ), - inputVolumes=dict(argstr='--inputVolumes %s...', ), - interpolationMode=dict(argstr='--interpolationMode %s', ), - maxBiasDegree=dict(argstr='--maxBiasDegree %d', ), - maxIterations=dict(argstr='--maxIterations %d', ), - medianFilterSize=dict( - argstr='--medianFilterSize %s', - sep=',', + argstr="--atlasToSubjectTransform %s", hash_files=False, ), + atlasToSubjectTransformType=dict(argstr="--atlasToSubjectTransformType %s",), + atlasWarpingOff=dict(argstr="--atlasWarpingOff ",), + debuglevel=dict(argstr="--debuglevel %d",), + defaultSuffix=dict(argstr="--defaultSuffix %s",), + environ=dict(nohash=True, usedefault=True,), + filterIteration=dict(argstr="--filterIteration %d",), + filterMethod=dict(argstr="--filterMethod %s",), + filterTimeStep=dict(argstr="--filterTimeStep %f",), + gridSize=dict(argstr="--gridSize %s", sep=",",), + implicitOutputs=dict(argstr="--implicitOutputs %s...", hash_files=False,), + inputVolumeTypes=dict(argstr="--inputVolumeTypes %s", sep=",",), + inputVolumes=dict(argstr="--inputVolumes %s...",), + interpolationMode=dict(argstr="--interpolationMode %s",), + maxBiasDegree=dict(argstr="--maxBiasDegree %d",), + maxIterations=dict(argstr="--maxIterations %d",), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), numberOfSubSamplesInEachPlugArea=dict( - argstr='--numberOfSubSamplesInEachPlugArea %s', - sep=',', - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputDir=dict( - argstr='--outputDir %s', - hash_files=False, - ), - outputDirtyLabels=dict( - argstr='--outputDirtyLabels %s', - hash_files=False, - ), - outputFormat=dict(argstr='--outputFormat %s', ), - outputLabels=dict( - argstr='--outputLabels %s', - hash_files=False, - ), - outputVolumes=dict( - argstr='--outputVolumes %s...', - hash_files=False, - ), - posteriorTemplate=dict(argstr='--posteriorTemplate %s', ), - purePlugsThreshold=dict(argstr='--purePlugsThreshold %f', ), - restoreState=dict( - argstr='--restoreState %s', - extensions=None, - ), - saveState=dict( - argstr='--saveState %s', - hash_files=False, + argstr="--numberOfSubSamplesInEachPlugArea %s", sep=",", ), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputDir=dict(argstr="--outputDir %s", hash_files=False,), + outputDirtyLabels=dict(argstr="--outputDirtyLabels %s", hash_files=False,), + outputFormat=dict(argstr="--outputFormat %s",), + outputLabels=dict(argstr="--outputLabels %s", hash_files=False,), + outputVolumes=dict(argstr="--outputVolumes %s...", hash_files=False,), + posteriorTemplate=dict(argstr="--posteriorTemplate %s",), + purePlugsThreshold=dict(argstr="--purePlugsThreshold %f",), + restoreState=dict(argstr="--restoreState %s", extensions=None,), + saveState=dict(argstr="--saveState %s", hash_files=False,), subjectIntermodeTransformType=dict( - argstr='--subjectIntermodeTransformType %s', ), - useKNN=dict(argstr='--useKNN ', ), - writeLess=dict(argstr='--writeLess ', ), + argstr="--subjectIntermodeTransformType %s", + ), + useKNN=dict(argstr="--useKNN ",), + writeLess=dict(argstr="--writeLess ",), ) inputs = BRAINSABC.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSABC_outputs(): output_map = dict( - atlasToSubjectInitialTransform=dict(extensions=None, ), - atlasToSubjectTransform=dict(extensions=None, ), + atlasToSubjectInitialTransform=dict(extensions=None,), + atlasToSubjectTransform=dict(extensions=None,), implicitOutputs=dict(), outputDir=dict(), - outputDirtyLabels=dict(extensions=None, ), - outputLabels=dict(extensions=None, ), + outputDirtyLabels=dict(extensions=None,), + outputLabels=dict(extensions=None,), outputVolumes=dict(), - saveState=dict(extensions=None, ), + saveState=dict(extensions=None,), ) outputs = BRAINSABC.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py index 7f1f2b747b..a2eb766db3 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py @@ -4,137 +4,82 @@ def test_BRAINSConstellationDetector_inputs(): input_map = dict( - BackgroundFillValue=dict(argstr='--BackgroundFillValue %s', ), - LLSModel=dict( - argstr='--LLSModel %s', - extensions=None, - ), - acLowerBound=dict(argstr='--acLowerBound %f', ), - args=dict(argstr='%s', ), - atlasLandmarkWeights=dict( - argstr='--atlasLandmarkWeights %s', - extensions=None, - ), - atlasLandmarks=dict( - argstr='--atlasLandmarks %s', - extensions=None, - ), - atlasVolume=dict( - argstr='--atlasVolume %s', - extensions=None, - ), - cutOutHeadInOutputVolume=dict(argstr='--cutOutHeadInOutputVolume ', ), - debug=dict(argstr='--debug ', ), - environ=dict( - nohash=True, - usedefault=True, - ), - forceACPoint=dict( - argstr='--forceACPoint %s', - sep=',', - ), + BackgroundFillValue=dict(argstr="--BackgroundFillValue %s",), + LLSModel=dict(argstr="--LLSModel %s", extensions=None,), + acLowerBound=dict(argstr="--acLowerBound %f",), + args=dict(argstr="%s",), + atlasLandmarkWeights=dict(argstr="--atlasLandmarkWeights %s", extensions=None,), + atlasLandmarks=dict(argstr="--atlasLandmarks %s", extensions=None,), + atlasVolume=dict(argstr="--atlasVolume %s", extensions=None,), + cutOutHeadInOutputVolume=dict(argstr="--cutOutHeadInOutputVolume ",), + debug=dict(argstr="--debug ",), + environ=dict(nohash=True, usedefault=True,), + forceACPoint=dict(argstr="--forceACPoint %s", sep=",",), forceHoughEyeDetectorReportFailure=dict( - argstr='--forceHoughEyeDetectorReportFailure ', ), - forcePCPoint=dict( - argstr='--forcePCPoint %s', - sep=',', - ), - forceRPPoint=dict( - argstr='--forceRPPoint %s', - sep=',', - ), - forceVN4Point=dict( - argstr='--forceVN4Point %s', - sep=',', - ), - houghEyeDetectorMode=dict(argstr='--houghEyeDetectorMode %d', ), - inputLandmarksEMSP=dict( - argstr='--inputLandmarksEMSP %s', - extensions=None, - ), - inputTemplateModel=dict( - argstr='--inputTemplateModel %s', - extensions=None, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - interpolationMode=dict(argstr='--interpolationMode %s', ), - mspQualityLevel=dict(argstr='--mspQualityLevel %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), + argstr="--forceHoughEyeDetectorReportFailure ", + ), + forcePCPoint=dict(argstr="--forcePCPoint %s", sep=",",), + forceRPPoint=dict(argstr="--forceRPPoint %s", sep=",",), + forceVN4Point=dict(argstr="--forceVN4Point %s", sep=",",), + houghEyeDetectorMode=dict(argstr="--houghEyeDetectorMode %d",), + inputLandmarksEMSP=dict(argstr="--inputLandmarksEMSP %s", extensions=None,), + inputTemplateModel=dict(argstr="--inputTemplateModel %s", extensions=None,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + interpolationMode=dict(argstr="--interpolationMode %s",), + mspQualityLevel=dict(argstr="--mspQualityLevel %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f",), outputLandmarksInACPCAlignedSpace=dict( - argstr='--outputLandmarksInACPCAlignedSpace %s', - hash_files=False, + argstr="--outputLandmarksInACPCAlignedSpace %s", hash_files=False, ), outputLandmarksInInputSpace=dict( - argstr='--outputLandmarksInInputSpace %s', - hash_files=False, - ), - outputMRML=dict( - argstr='--outputMRML %s', - hash_files=False, + argstr="--outputLandmarksInInputSpace %s", hash_files=False, ), + outputMRML=dict(argstr="--outputMRML %s", hash_files=False,), outputResampledVolume=dict( - argstr='--outputResampledVolume %s', - hash_files=False, - ), - outputTransform=dict( - argstr='--outputTransform %s', - hash_files=False, + argstr="--outputResampledVolume %s", hash_files=False, ), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), outputUntransformedClippedVolume=dict( - argstr='--outputUntransformedClippedVolume %s', - hash_files=False, + argstr="--outputUntransformedClippedVolume %s", hash_files=False, ), outputVerificationScript=dict( - argstr='--outputVerificationScript %s', - hash_files=False, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, + argstr="--outputVerificationScript %s", hash_files=False, ), - rVN4=dict(argstr='--rVN4 %f', ), - rac=dict(argstr='--rac %f', ), - rescaleIntensities=dict(argstr='--rescaleIntensities ', ), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + rVN4=dict(argstr="--rVN4 %f",), + rac=dict(argstr="--rac %f",), + rescaleIntensities=dict(argstr="--rescaleIntensities ",), rescaleIntensitiesOutputRange=dict( - argstr='--rescaleIntensitiesOutputRange %s', - sep=',', - ), - resultsDir=dict( - argstr='--resultsDir %s', - hash_files=False, - ), - rmpj=dict(argstr='--rmpj %f', ), - rpc=dict(argstr='--rpc %f', ), - trimRescaledIntensities=dict(argstr='--trimRescaledIntensities %f', ), - verbose=dict(argstr='--verbose ', ), - writeBranded2DImage=dict( - argstr='--writeBranded2DImage %s', - hash_files=False, - ), - writedebuggingImagesLevel=dict( - argstr='--writedebuggingImagesLevel %d', ), + argstr="--rescaleIntensitiesOutputRange %s", sep=",", + ), + resultsDir=dict(argstr="--resultsDir %s", hash_files=False,), + rmpj=dict(argstr="--rmpj %f",), + rpc=dict(argstr="--rpc %f",), + trimRescaledIntensities=dict(argstr="--trimRescaledIntensities %f",), + verbose=dict(argstr="--verbose ",), + writeBranded2DImage=dict(argstr="--writeBranded2DImage %s", hash_files=False,), + writedebuggingImagesLevel=dict(argstr="--writedebuggingImagesLevel %d",), ) inputs = BRAINSConstellationDetector.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSConstellationDetector_outputs(): output_map = dict( - outputLandmarksInACPCAlignedSpace=dict(extensions=None, ), - outputLandmarksInInputSpace=dict(extensions=None, ), - outputMRML=dict(extensions=None, ), - outputResampledVolume=dict(extensions=None, ), - outputTransform=dict(extensions=None, ), - outputUntransformedClippedVolume=dict(extensions=None, ), - outputVerificationScript=dict(extensions=None, ), - outputVolume=dict(extensions=None, ), + outputLandmarksInACPCAlignedSpace=dict(extensions=None,), + outputLandmarksInInputSpace=dict(extensions=None,), + outputMRML=dict(extensions=None,), + outputResampledVolume=dict(extensions=None,), + outputTransform=dict(extensions=None,), + outputUntransformedClippedVolume=dict(extensions=None,), + outputVerificationScript=dict(extensions=None,), + outputVolume=dict(extensions=None,), resultsDir=dict(), - writeBranded2DImage=dict(extensions=None, ), + writeBranded2DImage=dict(extensions=None,), ) outputs = BRAINSConstellationDetector.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py index 5d74d787b5..06bf0165ca 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py @@ -4,43 +4,27 @@ def test_BRAINSCreateLabelMapFromProbabilityMaps_inputs(): input_map = dict( - args=dict(argstr='%s', ), - cleanLabelVolume=dict( - argstr='--cleanLabelVolume %s', - hash_files=False, - ), - dirtyLabelVolume=dict( - argstr='--dirtyLabelVolume %s', - hash_files=False, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - foregroundPriors=dict( - argstr='--foregroundPriors %s', - sep=',', - ), - inclusionThreshold=dict(argstr='--inclusionThreshold %f', ), - inputProbabilityVolume=dict(argstr='--inputProbabilityVolume %s...', ), - nonAirRegionMask=dict( - argstr='--nonAirRegionMask %s', - extensions=None, - ), - priorLabelCodes=dict( - argstr='--priorLabelCodes %s', - sep=',', - ), + args=dict(argstr="%s",), + cleanLabelVolume=dict(argstr="--cleanLabelVolume %s", hash_files=False,), + dirtyLabelVolume=dict(argstr="--dirtyLabelVolume %s", hash_files=False,), + environ=dict(nohash=True, usedefault=True,), + foregroundPriors=dict(argstr="--foregroundPriors %s", sep=",",), + inclusionThreshold=dict(argstr="--inclusionThreshold %f",), + inputProbabilityVolume=dict(argstr="--inputProbabilityVolume %s...",), + nonAirRegionMask=dict(argstr="--nonAirRegionMask %s", extensions=None,), + priorLabelCodes=dict(argstr="--priorLabelCodes %s", sep=",",), ) inputs = BRAINSCreateLabelMapFromProbabilityMaps.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSCreateLabelMapFromProbabilityMaps_outputs(): output_map = dict( - cleanLabelVolume=dict(extensions=None, ), - dirtyLabelVolume=dict(extensions=None, ), + cleanLabelVolume=dict(extensions=None,), + dirtyLabelVolume=dict(extensions=None,), ) outputs = BRAINSCreateLabelMapFromProbabilityMaps.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py index 4731029fb1..c72579c470 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py @@ -4,41 +4,35 @@ def test_BRAINSCut_inputs(): input_map = dict( - NoTrainingVectorShuffling=dict( - argstr='--NoTrainingVectorShuffling ', ), - applyModel=dict(argstr='--applyModel ', ), - args=dict(argstr='%s', ), - computeSSEOn=dict(argstr='--computeSSEOn ', ), - createVectors=dict(argstr='--createVectors ', ), - environ=dict( - nohash=True, - usedefault=True, - ), - generateProbability=dict(argstr='--generateProbability ', ), - histogramEqualization=dict(argstr='--histogramEqualization ', ), - method=dict(argstr='--method %s', ), + NoTrainingVectorShuffling=dict(argstr="--NoTrainingVectorShuffling ",), + applyModel=dict(argstr="--applyModel ",), + args=dict(argstr="%s",), + computeSSEOn=dict(argstr="--computeSSEOn ",), + createVectors=dict(argstr="--createVectors ",), + environ=dict(nohash=True, usedefault=True,), + generateProbability=dict(argstr="--generateProbability ",), + histogramEqualization=dict(argstr="--histogramEqualization ",), + method=dict(argstr="--method %s",), modelConfigurationFilename=dict( - argstr='--modelConfigurationFilename %s', - extensions=None, - ), - modelFilename=dict(argstr='--modelFilename %s', ), - multiStructureThreshold=dict(argstr='--multiStructureThreshold ', ), - netConfiguration=dict( - argstr='--netConfiguration %s', - extensions=None, + argstr="--modelConfigurationFilename %s", extensions=None, ), - numberOfTrees=dict(argstr='--numberOfTrees %d', ), - randomTreeDepth=dict(argstr='--randomTreeDepth %d', ), - trainModel=dict(argstr='--trainModel ', ), - trainModelStartIndex=dict(argstr='--trainModelStartIndex %d', ), - validate=dict(argstr='--validate ', ), - verbose=dict(argstr='--verbose %d', ), + modelFilename=dict(argstr="--modelFilename %s",), + multiStructureThreshold=dict(argstr="--multiStructureThreshold ",), + netConfiguration=dict(argstr="--netConfiguration %s", extensions=None,), + numberOfTrees=dict(argstr="--numberOfTrees %d",), + randomTreeDepth=dict(argstr="--randomTreeDepth %d",), + trainModel=dict(argstr="--trainModel ",), + trainModelStartIndex=dict(argstr="--trainModelStartIndex %d",), + validate=dict(argstr="--validate ",), + verbose=dict(argstr="--verbose %d",), ) inputs = BRAINSCut.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSCut_outputs(): output_map = dict() outputs = BRAINSCut.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py index 85364618b8..7f91e4e803 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py @@ -4,38 +4,32 @@ def test_BRAINSMultiSTAPLE_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), inputCompositeT1Volume=dict( - argstr='--inputCompositeT1Volume %s', - extensions=None, + argstr="--inputCompositeT1Volume %s", extensions=None, ), - inputLabelVolume=dict(argstr='--inputLabelVolume %s...', ), - inputTransform=dict(argstr='--inputTransform %s...', ), - labelForUndecidedPixels=dict(argstr='--labelForUndecidedPixels %d', ), + inputLabelVolume=dict(argstr="--inputLabelVolume %s...",), + inputTransform=dict(argstr="--inputTransform %s...",), + labelForUndecidedPixels=dict(argstr="--labelForUndecidedPixels %d",), outputConfusionMatrix=dict( - argstr='--outputConfusionMatrix %s', - hash_files=False, - ), - outputMultiSTAPLE=dict( - argstr='--outputMultiSTAPLE %s', - hash_files=False, + argstr="--outputConfusionMatrix %s", hash_files=False, ), - resampledVolumePrefix=dict(argstr='--resampledVolumePrefix %s', ), - skipResampling=dict(argstr='--skipResampling ', ), + outputMultiSTAPLE=dict(argstr="--outputMultiSTAPLE %s", hash_files=False,), + resampledVolumePrefix=dict(argstr="--resampledVolumePrefix %s",), + skipResampling=dict(argstr="--skipResampling ",), ) inputs = BRAINSMultiSTAPLE.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSMultiSTAPLE_outputs(): output_map = dict( - outputConfusionMatrix=dict(extensions=None, ), - outputMultiSTAPLE=dict(extensions=None, ), + outputConfusionMatrix=dict(extensions=None,), + outputMultiSTAPLE=dict(extensions=None,), ) outputs = BRAINSMultiSTAPLE.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py index c4b68f598b..69562cc9ce 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -4,42 +4,30 @@ def test_BRAINSROIAuto_inputs(): input_map = dict( - ROIAutoDilateSize=dict(argstr='--ROIAutoDilateSize %f', ), - args=dict(argstr='%s', ), - closingSize=dict(argstr='--closingSize %f', ), - cropOutput=dict(argstr='--cropOutput ', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - maskOutput=dict(argstr='--maskOutput ', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), - outputROIMaskVolume=dict( - argstr='--outputROIMaskVolume %s', - hash_files=False, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - outputVolumePixelType=dict(argstr='--outputVolumePixelType %s', ), - thresholdCorrectionFactor=dict( - argstr='--thresholdCorrectionFactor %f', ), + ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f",), + args=dict(argstr="%s",), + closingSize=dict(argstr="--closingSize %f",), + cropOutput=dict(argstr="--cropOutput ",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + maskOutput=dict(argstr="--maskOutput ",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f",), + outputROIMaskVolume=dict(argstr="--outputROIMaskVolume %s", hash_files=False,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + outputVolumePixelType=dict(argstr="--outputVolumePixelType %s",), + thresholdCorrectionFactor=dict(argstr="--thresholdCorrectionFactor %f",), ) inputs = BRAINSROIAuto.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSROIAuto_outputs(): output_map = dict( - outputROIMaskVolume=dict(extensions=None, ), - outputVolume=dict(extensions=None, ), + outputROIMaskVolume=dict(extensions=None,), outputVolume=dict(extensions=None,), ) outputs = BRAINSROIAuto.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py index 859af9132b..fd28644cdc 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py @@ -4,38 +4,22 @@ def test_BinaryMaskEditorBasedOnLandmarks_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputBinaryVolume=dict( - argstr='--inputBinaryVolume %s', - extensions=None, - ), - inputLandmarkNames=dict( - argstr='--inputLandmarkNames %s', - sep=',', - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputBinaryVolume=dict(argstr="--inputBinaryVolume %s", extensions=None,), + inputLandmarkNames=dict(argstr="--inputLandmarkNames %s", sep=",",), inputLandmarkNamesForObliquePlane=dict( - argstr='--inputLandmarkNamesForObliquePlane %s', - sep=',', + argstr="--inputLandmarkNamesForObliquePlane %s", sep=",", ), inputLandmarksFilename=dict( - argstr='--inputLandmarksFilename %s', - extensions=None, - ), - outputBinaryVolume=dict( - argstr='--outputBinaryVolume %s', - hash_files=False, + argstr="--inputLandmarksFilename %s", extensions=None, ), + outputBinaryVolume=dict(argstr="--outputBinaryVolume %s", hash_files=False,), setCutDirectionForLandmark=dict( - argstr='--setCutDirectionForLandmark %s', - sep=',', + argstr="--setCutDirectionForLandmark %s", sep=",", ), setCutDirectionForObliquePlane=dict( - argstr='--setCutDirectionForObliquePlane %s', - sep=',', + argstr="--setCutDirectionForObliquePlane %s", sep=",", ), ) inputs = BinaryMaskEditorBasedOnLandmarks.input_spec() @@ -43,8 +27,10 @@ def test_BinaryMaskEditorBasedOnLandmarks_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BinaryMaskEditorBasedOnLandmarks_outputs(): - output_map = dict(outputBinaryVolume=dict(extensions=None, ), ) + output_map = dict(outputBinaryVolume=dict(extensions=None,),) outputs = BinaryMaskEditorBasedOnLandmarks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py index 8c006051a7..2fe0fc16ce 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py @@ -4,34 +4,27 @@ def test_ESLR_inputs(): input_map = dict( - args=dict(argstr='%s', ), - closingSize=dict(argstr='--closingSize %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - high=dict(argstr='--high %d', ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - low=dict(argstr='--low %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - openingSize=dict(argstr='--openingSize %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - preserveOutside=dict(argstr='--preserveOutside ', ), - safetySize=dict(argstr='--safetySize %d', ), + args=dict(argstr="%s",), + closingSize=dict(argstr="--closingSize %d",), + environ=dict(nohash=True, usedefault=True,), + high=dict(argstr="--high %d",), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + low=dict(argstr="--low %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + openingSize=dict(argstr="--openingSize %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + preserveOutside=dict(argstr="--preserveOutside ",), + safetySize=dict(argstr="--safetySize %d",), ) inputs = ESLR.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ESLR_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = ESLR.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/testing/featuredetection.py b/nipype/interfaces/semtools/testing/featuredetection.py index e8f332c0a6..19e5076b0a 100644 --- a/nipype/interfaces/semtools/testing/featuredetection.py +++ b/nipype/interfaces/semtools/testing/featuredetection.py @@ -3,16 +3,28 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class SphericalCoordinateGenerationInputSpec(CommandLineInputSpec): inputAtlasImage = File( - desc="Input atlas image", exists=True, argstr="--inputAtlasImage %s") + desc="Input atlas image", exists=True, argstr="--inputAtlasImage %s" + ) outputPath = traits.Str( - desc="Output path for rho, phi and theta images", - argstr="--outputPath %s") + desc="Output path for rho, phi and theta images", argstr="--outputPath %s" + ) class SphericalCoordinateGenerationOutputSpec(TraitedSpec): diff --git a/nipype/interfaces/semtools/testing/generateaveragelmkfile.py b/nipype/interfaces/semtools/testing/generateaveragelmkfile.py index bbb414c366..3995a9b73d 100644 --- a/nipype/interfaces/semtools/testing/generateaveragelmkfile.py +++ b/nipype/interfaces/semtools/testing/generateaveragelmkfile.py @@ -3,7 +3,18 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os @@ -12,21 +23,22 @@ class GenerateAverageLmkFileInputSpec(CommandLineInputSpec): traits.Str, desc="Input landmark files names (.fcsv or .wts)", sep=",", - argstr="--inputLandmarkFiles %s") + argstr="--inputLandmarkFiles %s", + ) outputLandmarkFile = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Ouput landmark file name that includes average values for landmarks (.fcsv or .wts)", - argstr="--outputLandmarkFile %s") + desc="Ouput landmark file name that includes average values for landmarks (.fcsv or .wts)", + argstr="--outputLandmarkFile %s", + ) class GenerateAverageLmkFileOutputSpec(TraitedSpec): outputLandmarkFile = File( - desc= - "Ouput landmark file name that includes average values for landmarks (.fcsv or .wts)", - exists=True) + desc="Ouput landmark file name that includes average values for landmarks (.fcsv or .wts)", + exists=True, + ) class GenerateAverageLmkFile(SEMLikeCommandLine): @@ -43,5 +55,5 @@ class GenerateAverageLmkFile(SEMLikeCommandLine): input_spec = GenerateAverageLmkFileInputSpec output_spec = GenerateAverageLmkFileOutputSpec _cmd = " GenerateAverageLmkFile " - _outputs_filenames = {'outputLandmarkFile': 'outputLandmarkFile'} + _outputs_filenames = {"outputLandmarkFile": "outputLandmarkFile"} _redirect_x = False diff --git a/nipype/interfaces/semtools/testing/landmarkscompare.py b/nipype/interfaces/semtools/testing/landmarkscompare.py index 872d6d0df0..066a92f24b 100644 --- a/nipype/interfaces/semtools/testing/landmarkscompare.py +++ b/nipype/interfaces/semtools/testing/landmarkscompare.py @@ -3,7 +3,18 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os @@ -11,15 +22,17 @@ class LandmarksCompareInputSpec(CommandLineInputSpec): inputLandmarkFile1 = File( desc="First input landmark file (.fcsv or .wts)", exists=True, - argstr="--inputLandmarkFile1 %s") + argstr="--inputLandmarkFile1 %s", + ) inputLandmarkFile2 = File( desc="Second input landmark file (.fcsv or .wts)", exists=True, - argstr="--inputLandmarkFile2 %s") + argstr="--inputLandmarkFile2 %s", + ) tolerance = traits.Float( - desc= - "The maximum error (in mm) allowed in each direction of a landmark", - argstr="--tolerance %f") + desc="The maximum error (in mm) allowed in each direction of a landmark", + argstr="--tolerance %f", + ) class LandmarksCompareOutputSpec(TraitedSpec): diff --git a/nipype/interfaces/semtools/tests/test_auto_DWICompare.py b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py index 305d60b6aa..a9a270d6a5 100644 --- a/nipype/interfaces/semtools/tests/test_auto_DWICompare.py +++ b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py @@ -4,25 +4,18 @@ def test_DWICompare_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr='--inputVolume1 %s', - extensions=None, - ), - inputVolume2=dict( - argstr='--inputVolume2 %s', - extensions=None, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None,), + inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None,), ) inputs = DWICompare.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DWICompare_outputs(): output_map = dict() outputs = DWICompare.output_spec() diff --git a/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py index 9140141254..f8f099104d 100644 --- a/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py +++ b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py @@ -4,26 +4,19 @@ def test_DWISimpleCompare_inputs(): input_map = dict( - args=dict(argstr='%s', ), - checkDWIData=dict(argstr='--checkDWIData ', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr='--inputVolume1 %s', - extensions=None, - ), - inputVolume2=dict( - argstr='--inputVolume2 %s', - extensions=None, - ), + args=dict(argstr="%s",), + checkDWIData=dict(argstr="--checkDWIData ",), + environ=dict(nohash=True, usedefault=True,), + inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None,), + inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None,), ) inputs = DWISimpleCompare.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DWISimpleCompare_outputs(): output_map = dict() outputs = DWISimpleCompare.output_spec() diff --git a/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py index 1cdeab73f7..99d8d5d226 100644 --- a/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py +++ b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py @@ -4,27 +4,20 @@ def test_GenerateCsfClippedFromClassifiedImage_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputCassifiedVolume=dict( - argstr='--inputCassifiedVolume %s', - extensions=None, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputCassifiedVolume=dict(argstr="--inputCassifiedVolume %s", extensions=None,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = GenerateCsfClippedFromClassifiedImage.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GenerateCsfClippedFromClassifiedImage_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = GenerateCsfClippedFromClassifiedImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/__init__.py b/nipype/interfaces/semtools/utilities/__init__.py index b59f373cf9..2209064909 100644 --- a/nipype/interfaces/semtools/utilities/__init__.py +++ b/nipype/interfaces/semtools/utilities/__init__.py @@ -1,10 +1,23 @@ # -*- coding: utf-8 -*- from .brains import ( - BRAINSConstellationModeler, landmarksConstellationWeights, - BRAINSTrimForegroundInDirection, BRAINSLmkTransform, BRAINSMush, - BRAINSTransformConvert, landmarksConstellationAligner, BRAINSEyeDetector, - BRAINSLinearModelerEPCA, BRAINSInitializedControlPoints, - CleanUpOverlapLabels, BRAINSClipInferior, - GenerateLabelMapFromProbabilityMap, BRAINSAlignMSP, - BRAINSLandmarkInitializer, insertMidACPCpoint, BRAINSSnapShotWriter, - JointHistogram, ShuffleVectorsModule, ImageRegionPlotter) + BRAINSConstellationModeler, + landmarksConstellationWeights, + BRAINSTrimForegroundInDirection, + BRAINSLmkTransform, + BRAINSMush, + BRAINSTransformConvert, + landmarksConstellationAligner, + BRAINSEyeDetector, + BRAINSLinearModelerEPCA, + BRAINSInitializedControlPoints, + CleanUpOverlapLabels, + BRAINSClipInferior, + GenerateLabelMapFromProbabilityMap, + BRAINSAlignMSP, + BRAINSLandmarkInitializer, + insertMidACPCpoint, + BRAINSSnapShotWriter, + JointHistogram, + ShuffleVectorsModule, + ImageRegionPlotter, +) diff --git a/nipype/interfaces/semtools/utilities/brains.py b/nipype/interfaces/semtools/utilities/brains.py index abc696b5d9..59a61a1137 100644 --- a/nipype/interfaces/semtools/utilities/brains.py +++ b/nipype/interfaces/semtools/utilities/brains.py @@ -5,82 +5,93 @@ import os -from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, - TraitedSpec, File, Directory, traits, isdefined, - InputMultiPath, OutputMultiPath) +from ...base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) class BRAINSConstellationModelerInputSpec(CommandLineInputSpec): verbose = traits.Bool( desc=", Show more verbose output, ", - argstr="--verbose ") + argstr="--verbose ", + ) inputTrainingList = File( - desc= - ", Setup file, giving all parameters for training up a template model for each landmark., ", + desc=", Setup file, giving all parameters for training up a template model for each landmark., ", exists=True, - argstr="--inputTrainingList %s") + argstr="--inputTrainingList %s", + ) outputModel = traits.Either( traits.Bool, File(), hash_files=False, - desc= - ", The full filename of the output model file., ", - argstr="--outputModel %s") + desc=", The full filename of the output model file., ", + argstr="--outputModel %s", + ) saveOptimizedLandmarks = traits.Bool( - desc= - ", Flag to make a new subject-specific landmark definition file in the same format produced by Slicer3 with the optimized landmark (the detected RP, AC, and PC) in it. Useful to tighten the variances in the ConstellationModeler., ", - argstr="--saveOptimizedLandmarks ") + desc=", Flag to make a new subject-specific landmark definition file in the same format produced by Slicer3 with the optimized landmark (the detected RP, AC, and PC) in it. Useful to tighten the variances in the ConstellationModeler., ", + argstr="--saveOptimizedLandmarks ", + ) optimizedLandmarksFilenameExtender = traits.Str( - desc= - ", If the trainingList is (indexFullPathName) and contains landmark data filenames [path]/[filename].fcsv , make the optimized landmarks filenames out of [path]/[filename](thisExtender) and the optimized version of the input trainingList out of (indexFullPathName)(thisExtender) , when you rewrite all the landmarks according to the saveOptimizedLandmarks flag., ", - argstr="--optimizedLandmarksFilenameExtender %s") + desc=", If the trainingList is (indexFullPathName) and contains landmark data filenames [path]/[filename].fcsv , make the optimized landmarks filenames out of [path]/[filename](thisExtender) and the optimized version of the input trainingList out of (indexFullPathName)(thisExtender) , when you rewrite all the landmarks according to the saveOptimizedLandmarks flag., ", + argstr="--optimizedLandmarksFilenameExtender %s", + ) resultsDir = traits.Either( traits.Bool, Directory(), hash_files=False, - desc= - ", The directory for the results to be written., ", - argstr="--resultsDir %s") + desc=", The directory for the results to be written., ", + argstr="--resultsDir %s", + ) mspQualityLevel = traits.Int( - desc= - ", Flag cotrols how agressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds)., ", - argstr="--mspQualityLevel %d") + desc=", Flag cotrols how agressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds)., ", + argstr="--mspQualityLevel %d", + ) rescaleIntensities = traits.Bool( - desc= - ", Flag to turn on rescaling image intensities on input., ", - argstr="--rescaleIntensities ") + desc=", Flag to turn on rescaling image intensities on input., ", + argstr="--rescaleIntensities ", + ) trimRescaledIntensities = traits.Float( - desc= - ", Turn on clipping the rescaled image one-tailed on input. Units of standard deviations above the mean. Very large values are very permissive. Non-positive value turns clipping off. Defaults to removing 0.00001 of a normal tail above the mean., ", - argstr="--trimRescaledIntensities %f") + desc=", Turn on clipping the rescaled image one-tailed on input. Units of standard deviations above the mean. Very large values are very permissive. Non-positive value turns clipping off. Defaults to removing 0.00001 of a normal tail above the mean., ", + argstr="--trimRescaledIntensities %f", + ) rescaleIntensitiesOutputRange = InputMultiPath( traits.Int, - desc= - ", This pair of integers gives the lower and upper bounds on the signal portion of the output image. Out-of-field voxels are taken from BackgroundFillValue., ", + desc=", This pair of integers gives the lower and upper bounds on the signal portion of the output image. Out-of-field voxels are taken from BackgroundFillValue., ", sep=",", - argstr="--rescaleIntensitiesOutputRange %s") + argstr="--rescaleIntensitiesOutputRange %s", + ) BackgroundFillValue = traits.Str( - desc= - "Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", - argstr="--BackgroundFillValue %s") + desc="Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", + argstr="--BackgroundFillValue %s", + ) writedebuggingImagesLevel = traits.Int( - desc= - ", This flag controls if debugging images are produced. By default value of 0 is no images. Anything greater than zero will be increasing level of debugging images., ", - argstr="--writedebuggingImagesLevel %d") + desc=", This flag controls if debugging images are produced. By default value of 0 is no images. Anything greater than zero will be increasing level of debugging images., ", + argstr="--writedebuggingImagesLevel %d", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSConstellationModelerOutputSpec(TraitedSpec): outputModel = File( - desc= - ", The full filename of the output model file., ", - exists=True) + desc=", The full filename of the output model file., ", + exists=True, + ) resultsDir = Directory( - desc= - ", The directory for the results to be written., ", - exists=True) + desc=", The directory for the results to be written., ", + exists=True, + ) class BRAINSConstellationModeler(SEMLikeCommandLine): @@ -95,41 +106,40 @@ class BRAINSConstellationModeler(SEMLikeCommandLine): input_spec = BRAINSConstellationModelerInputSpec output_spec = BRAINSConstellationModelerOutputSpec _cmd = " BRAINSConstellationModeler " - _outputs_filenames = { - 'outputModel': 'outputModel.mdl', - 'resultsDir': 'resultsDir' - } + _outputs_filenames = {"outputModel": "outputModel.mdl", "resultsDir": "resultsDir"} _redirect_x = False class landmarksConstellationWeightsInputSpec(CommandLineInputSpec): inputTrainingList = File( - desc= - ", Setup file, giving all parameters for training up a Weight list for landmark., ", + desc=", Setup file, giving all parameters for training up a Weight list for landmark., ", exists=True, - argstr="--inputTrainingList %s") + argstr="--inputTrainingList %s", + ) inputTemplateModel = File( desc="User-specified template model., ", exists=True, - argstr="--inputTemplateModel %s") + argstr="--inputTemplateModel %s", + ) LLSModel = File( desc="Linear least squares model filename in HD5 format", exists=True, - argstr="--LLSModel %s") + argstr="--LLSModel %s", + ) outputWeightsList = traits.Either( traits.Bool, File(), hash_files=False, - desc= - ", The filename of a csv file which is a list of landmarks and their corresponding weights., ", - argstr="--outputWeightsList %s") + desc=", The filename of a csv file which is a list of landmarks and their corresponding weights., ", + argstr="--outputWeightsList %s", + ) class landmarksConstellationWeightsOutputSpec(TraitedSpec): outputWeightsList = File( - desc= - ", The filename of a csv file which is a list of landmarks and their corresponding weights., ", - exists=True) + desc=", The filename of a csv file which is a list of landmarks and their corresponding weights., ", + exists=True, + ) class landmarksConstellationWeights(SEMLikeCommandLine): @@ -144,7 +154,7 @@ class landmarksConstellationWeights(SEMLikeCommandLine): input_spec = landmarksConstellationWeightsInputSpec output_spec = landmarksConstellationWeightsOutputSpec _cmd = " landmarksConstellationWeights " - _outputs_filenames = {'outputWeightsList': 'outputWeightsList.wts'} + _outputs_filenames = {"outputWeightsList": "outputWeightsList.wts"} _redirect_x = False @@ -152,44 +162,46 @@ class BRAINSTrimForegroundInDirectionInputSpec(CommandLineInputSpec): inputVolume = File( desc="Input image to trim off the neck (and also air-filling noise.)", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Output image with neck and air-filling noise trimmed isotropic image with AC at center of image.", - argstr="--outputVolume %s") + desc="Output image with neck and air-filling noise trimmed isotropic image with AC at center of image.", + argstr="--outputVolume %s", + ) directionCode = traits.Int( - desc= - ", This flag chooses which dimension to compare. The sign lets you flip direction., ", - argstr="--directionCode %d") + desc=", This flag chooses which dimension to compare. The sign lets you flip direction., ", + argstr="--directionCode %d", + ) otsuPercentileThreshold = traits.Float( - desc= - ", This is a parameter to FindLargestForegroundFilledMask, which is employed to trim off air-filling noise., ", - argstr="--otsuPercentileThreshold %f") + desc=", This is a parameter to FindLargestForegroundFilledMask, which is employed to trim off air-filling noise., ", + argstr="--otsuPercentileThreshold %f", + ) closingSize = traits.Int( - desc= - ", This is a parameter to FindLargestForegroundFilledMask, ", - argstr="--closingSize %d") + desc=", This is a parameter to FindLargestForegroundFilledMask, ", + argstr="--closingSize %d", + ) headSizeLimit = traits.Float( - desc= - ", Use this to vary from the command line our search for how much upper tissue is head for the center-of-mass calculation. Units are CCs, not cubic millimeters., ", - argstr="--headSizeLimit %f") + desc=", Use this to vary from the command line our search for how much upper tissue is head for the center-of-mass calculation. Units are CCs, not cubic millimeters., ", + argstr="--headSizeLimit %f", + ) BackgroundFillValue = traits.Str( - desc= - "Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", - argstr="--BackgroundFillValue %s") + desc="Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", + argstr="--BackgroundFillValue %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSTrimForegroundInDirectionOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Output image with neck and air-filling noise trimmed isotropic image with AC at center of image.", - exists=True) + desc="Output image with neck and air-filling noise trimmed isotropic image with AC at center of image.", + exists=True, + ) class BRAINSTrimForegroundInDirection(SEMLikeCommandLine): @@ -208,7 +220,7 @@ class BRAINSTrimForegroundInDirection(SEMLikeCommandLine): input_spec = BRAINSTrimForegroundInDirectionInputSpec output_spec = BRAINSTrimForegroundInDirectionOutputSpec _cmd = " BRAINSTrimForegroundInDirection " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False @@ -216,42 +228,51 @@ class BRAINSLmkTransformInputSpec(CommandLineInputSpec): inputMovingLandmarks = File( desc="Input Moving Landmark list file in fcsv, ", exists=True, - argstr="--inputMovingLandmarks %s") + argstr="--inputMovingLandmarks %s", + ) inputFixedLandmarks = File( desc="Input Fixed Landmark list file in fcsv, ", exists=True, - argstr="--inputFixedLandmarks %s") + argstr="--inputFixedLandmarks %s", + ) outputAffineTransform = traits.Either( traits.Bool, File(), hash_files=False, desc="The filename for the estimated affine transform, ", - argstr="--outputAffineTransform %s") + argstr="--outputAffineTransform %s", + ) inputMovingVolume = File( desc="The filename of input moving volume", exists=True, - argstr="--inputMovingVolume %s") + argstr="--inputMovingVolume %s", + ) inputReferenceVolume = File( desc="The filename of the reference volume", exists=True, - argstr="--inputReferenceVolume %s") + argstr="--inputReferenceVolume %s", + ) outputResampledVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="The filename of the output resampled volume", - argstr="--outputResampledVolume %s") + argstr="--outputResampledVolume %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSLmkTransformOutputSpec(TraitedSpec): outputAffineTransform = File( desc="The filename for the estimated affine transform, ", - exists=True) + exists=True, + ) outputResampledVolume = File( - desc="The filename of the output resampled volume", exists=True) + desc="The filename of the output resampled volume", exists=True + ) class BRAINSLmkTransform(SEMLikeCommandLine): @@ -271,8 +292,8 @@ class BRAINSLmkTransform(SEMLikeCommandLine): output_spec = BRAINSLmkTransformOutputSpec _cmd = " BRAINSLmkTransform " _outputs_filenames = { - 'outputResampledVolume': 'outputResampledVolume.nii', - 'outputAffineTransform': 'outputAffineTransform.h5' + "outputResampledVolume": "outputResampledVolume.nii", + "outputAffineTransform": "outputAffineTransform.h5", } _redirect_x = False @@ -281,82 +302,95 @@ class BRAINSMushInputSpec(CommandLineInputSpec): inputFirstVolume = File( desc="Input image (1) for mixture optimization", exists=True, - argstr="--inputFirstVolume %s") + argstr="--inputFirstVolume %s", + ) inputSecondVolume = File( desc="Input image (2) for mixture optimization", exists=True, - argstr="--inputSecondVolume %s") + argstr="--inputSecondVolume %s", + ) inputMaskVolume = File( desc="Input label image for mixture optimization", exists=True, - argstr="--inputMaskVolume %s") + argstr="--inputMaskVolume %s", + ) outputWeightsFile = traits.Either( traits.Bool, File(), hash_files=False, desc="Output Weights File", - argstr="--outputWeightsFile %s") + argstr="--outputWeightsFile %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="The MUSH image produced from the T1 and T2 weighted images", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) outputMask = traits.Either( traits.Bool, File(), hash_files=False, desc="The brain volume mask generated from the MUSH image", - argstr="--outputMask %s") + argstr="--outputMask %s", + ) seed = InputMultiPath( traits.Int, desc="Seed Point for Brain Region Filling", sep=",", - argstr="--seed %s") + argstr="--seed %s", + ) desiredMean = traits.Float( desc="Desired mean within the mask for weighted sum of both images.", - argstr="--desiredMean %f") + argstr="--desiredMean %f", + ) desiredVariance = traits.Float( - desc= - "Desired variance within the mask for weighted sum of both images.", - argstr="--desiredVariance %f") + desc="Desired variance within the mask for weighted sum of both images.", + argstr="--desiredVariance %f", + ) lowerThresholdFactorPre = traits.Float( desc="Lower threshold factor for finding an initial brain mask", - argstr="--lowerThresholdFactorPre %f") + argstr="--lowerThresholdFactorPre %f", + ) upperThresholdFactorPre = traits.Float( desc="Upper threshold factor for finding an initial brain mask", - argstr="--upperThresholdFactorPre %f") + argstr="--upperThresholdFactorPre %f", + ) lowerThresholdFactor = traits.Float( desc="Lower threshold factor for defining the brain mask", - argstr="--lowerThresholdFactor %f") + argstr="--lowerThresholdFactor %f", + ) upperThresholdFactor = traits.Float( desc="Upper threshold factor for defining the brain mask", - argstr="--upperThresholdFactor %f") + argstr="--upperThresholdFactor %f", + ) boundingBoxSize = InputMultiPath( traits.Int, - desc= - "Size of the cubic bounding box mask used when no brain mask is present", + desc="Size of the cubic bounding box mask used when no brain mask is present", sep=",", - argstr="--boundingBoxSize %s") + argstr="--boundingBoxSize %s", + ) boundingBoxStart = InputMultiPath( traits.Int, - desc= - "XYZ point-coordinate for the start of the cubic bounding box mask used when no brain mask is present", + desc="XYZ point-coordinate for the start of the cubic bounding box mask used when no brain mask is present", sep=",", - argstr="--boundingBoxStart %s") + argstr="--boundingBoxStart %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSMushOutputSpec(TraitedSpec): outputWeightsFile = File(desc="Output Weights File", exists=True) outputVolume = File( - desc="The MUSH image produced from the T1 and T2 weighted images", - exists=True) + desc="The MUSH image produced from the T1 and T2 weighted images", exists=True + ) outputMask = File( - desc="The brain volume mask generated from the MUSH image", - exists=True) + desc="The brain volume mask generated from the MUSH image", exists=True + ) class BRAINSMush(SEMLikeCommandLine): @@ -382,9 +416,9 @@ class BRAINSMush(SEMLikeCommandLine): output_spec = BRAINSMushOutputSpec _cmd = " BRAINSMush " _outputs_filenames = { - 'outputMask': 'outputMask.nii.gz', - 'outputWeightsFile': 'outputWeightsFile.txt', - 'outputVolume': 'outputVolume.nii.gz' + "outputMask": "outputMask.nii.gz", + "outputWeightsFile": "outputWeightsFile.txt", + "outputVolume": "outputVolume.nii.gz", } _redirect_x = False @@ -399,22 +433,21 @@ class BRAINSTransformConvertInputSpec(CommandLineInputSpec): "ScaleSkewVersor", "DisplacementField", "Same", - desc= - "The target transformation type. Must be conversion-compatible with the input transform type", - argstr="--outputTransformType %s") + desc="The target transformation type. Must be conversion-compatible with the input transform type", + argstr="--outputTransformType %s", + ) outputPrecisionType = traits.Enum( "double", "float", - desc= - "Precision type of the output transform. It can be either single precision or double precision", - argstr="--outputPrecisionType %s") + desc="Precision type of the output transform. It can be either single precision or double precision", + argstr="--outputPrecisionType %s", + ) displacementVolume = traits.Either( - traits.Bool, - File(), - hash_files=False, - argstr="--displacementVolume %s") + traits.Bool, File(), hash_files=False, argstr="--displacementVolume %s" + ) outputTransform = traits.Either( - traits.Bool, File(), hash_files=False, argstr="--outputTransform %s") + traits.Bool, File(), hash_files=False, argstr="--outputTransform %s" + ) class BRAINSTransformConvertOutputSpec(TraitedSpec): @@ -443,8 +476,8 @@ class BRAINSTransformConvert(SEMLikeCommandLine): output_spec = BRAINSTransformConvertOutputSpec _cmd = " BRAINSTransformConvert " _outputs_filenames = { - 'displacementVolume': 'displacementVolume.nii', - 'outputTransform': 'outputTransform.mat' + "displacementVolume": "displacementVolume.nii", + "outputTransform": "outputTransform.mat", } _redirect_x = False @@ -453,18 +486,19 @@ class landmarksConstellationAlignerInputSpec(CommandLineInputSpec): inputLandmarksPaired = File( desc="Input landmark file (.fcsv)", exists=True, - argstr="--inputLandmarksPaired %s") + argstr="--inputLandmarksPaired %s", + ) outputLandmarksPaired = traits.Either( traits.Bool, File(), hash_files=False, desc="Output landmark file (.fcsv)", - argstr="--outputLandmarksPaired %s") + argstr="--outputLandmarksPaired %s", + ) class landmarksConstellationAlignerOutputSpec(TraitedSpec): - outputLandmarksPaired = File( - desc="Output landmark file (.fcsv)", exists=True) + outputLandmarksPaired = File(desc="Output landmark file (.fcsv)", exists=True) class landmarksConstellationAligner(SEMLikeCommandLine): @@ -481,24 +515,24 @@ class landmarksConstellationAligner(SEMLikeCommandLine): input_spec = landmarksConstellationAlignerInputSpec output_spec = landmarksConstellationAlignerOutputSpec _cmd = " landmarksConstellationAligner " - _outputs_filenames = {'outputLandmarksPaired': 'outputLandmarksPaired'} + _outputs_filenames = {"outputLandmarksPaired": "outputLandmarksPaired"} _redirect_x = False class BRAINSEyeDetectorInputSpec(CommandLineInputSpec): numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") - inputVolume = File( - desc="The input volume", exists=True, argstr="--inputVolume %s") + argstr="--numberOfThreads %d", + ) + inputVolume = File(desc="The input volume", exists=True, argstr="--inputVolume %s") outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="The output volume", - argstr="--outputVolume %s") - debugDir = traits.Str( - desc="A place for debug information", argstr="--debugDir %s") + argstr="--outputVolume %s", + ) + debugDir = traits.Str(desc="A place for debug information", argstr="--debugDir %s") class BRAINSEyeDetectorOutputSpec(TraitedSpec): @@ -519,7 +553,7 @@ class BRAINSEyeDetector(SEMLikeCommandLine): input_spec = BRAINSEyeDetectorInputSpec output_spec = BRAINSEyeDetectorOutputSpec _cmd = " BRAINSEyeDetector " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False @@ -527,10 +561,12 @@ class BRAINSLinearModelerEPCAInputSpec(CommandLineInputSpec): inputTrainingList = File( desc="Input Training Landmark List Filename, ", exists=True, - argstr="--inputTrainingList %s") + argstr="--inputTrainingList %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSLinearModelerEPCAOutputSpec(TraitedSpec): @@ -558,31 +594,33 @@ class BRAINSLinearModelerEPCA(SEMLikeCommandLine): class BRAINSInitializedControlPointsInputSpec(CommandLineInputSpec): - inputVolume = File( - desc="Input Volume", exists=True, argstr="--inputVolume %s") + inputVolume = File(desc="Input Volume", exists=True, argstr="--inputVolume %s") outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Output Volume", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) splineGridSize = InputMultiPath( traits.Int, - desc= - "The number of subdivisions of the BSpline Grid to be centered on the image space. Each dimension must have at least 3 subdivisions for the BSpline to be correctly computed. ", + desc="The number of subdivisions of the BSpline Grid to be centered on the image space. Each dimension must have at least 3 subdivisions for the BSpline to be correctly computed. ", sep=",", - argstr="--splineGridSize %s") + argstr="--splineGridSize %s", + ) permuteOrder = InputMultiPath( traits.Int, - desc= - "The permutation order for the images. The default is 0,1,2 (i.e. no permutation)", + desc="The permutation order for the images. The default is 0,1,2 (i.e. no permutation)", sep=",", - argstr="--permuteOrder %s") + argstr="--permuteOrder %s", + ) outputLandmarksFile = traits.Str( - desc="Output filename", argstr="--outputLandmarksFile %s") + desc="Output filename", argstr="--outputLandmarksFile %s" + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSInitializedControlPointsOutputSpec(TraitedSpec): @@ -609,30 +647,29 @@ class BRAINSInitializedControlPoints(SEMLikeCommandLine): input_spec = BRAINSInitializedControlPointsInputSpec output_spec = BRAINSInitializedControlPointsOutputSpec _cmd = " BRAINSInitializedControlPoints " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False class CleanUpOverlapLabelsInputSpec(CommandLineInputSpec): inputBinaryVolumes = InputMultiPath( File(exists=True), - desc= - "The list of binary images to be checked and cleaned up. Order is important. Binary volume given first always wins out. ", - argstr="--inputBinaryVolumes %s...") + desc="The list of binary images to be checked and cleaned up. Order is important. Binary volume given first always wins out. ", + argstr="--inputBinaryVolumes %s...", + ) outputBinaryVolumes = traits.Either( traits.Bool, - InputMultiPath(File(), ), + InputMultiPath(File(),), hash_files=False, - desc= - "The output label map images, with integer values in it. Each label value specified in the inputLabels is combined into this output label map volume", - argstr="--outputBinaryVolumes %s...") + desc="The output label map images, with integer values in it. Each label value specified in the inputLabels is combined into this output label map volume", + argstr="--outputBinaryVolumes %s...", + ) class CleanUpOverlapLabelsOutputSpec(TraitedSpec): outputBinaryVolumes = OutputMultiPath( File(exists=True), - desc= - "The output label map images, with integer values in it. Each label value specified in the inputLabels is combined into this output label map volume" + desc="The output label map images, with integer values in it. Each label value specified in the inputLabels is combined into this output label map volume", ) @@ -652,7 +689,7 @@ class CleanUpOverlapLabels(SEMLikeCommandLine): input_spec = CleanUpOverlapLabelsInputSpec output_spec = CleanUpOverlapLabelsOutputSpec _cmd = " CleanUpOverlapLabels " - _outputs_filenames = {'outputBinaryVolumes': 'outputBinaryVolumes.nii'} + _outputs_filenames = {"outputBinaryVolumes": "outputBinaryVolumes.nii"} _redirect_x = False @@ -660,32 +697,34 @@ class BRAINSClipInferiorInputSpec(CommandLineInputSpec): inputVolume = File( desc="Input image to make a clipped short int copy from.", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Output image, a short int copy of the upper portion of the input image, filled with BackgroundFillValue.", - argstr="--outputVolume %s") + desc="Output image, a short int copy of the upper portion of the input image, filled with BackgroundFillValue.", + argstr="--outputVolume %s", + ) acLowerBound = traits.Float( - desc= - ", When the input image to the output image, replace the image with the BackgroundFillValue everywhere below the plane This Far in physical units (millimeters) below (inferior to) the AC point (assumed to be the voxel field middle.) The oversize default was chosen to have no effect. Based on visualizing a thousand masks in the IPIG study, we recommend a limit no smaller than 80.0 mm., ", - argstr="--acLowerBound %f") + desc=", When the input image to the output image, replace the image with the BackgroundFillValue everywhere below the plane This Far in physical units (millimeters) below (inferior to) the AC point (assumed to be the voxel field middle.) The oversize default was chosen to have no effect. Based on visualizing a thousand masks in the IPIG study, we recommend a limit no smaller than 80.0 mm., ", + argstr="--acLowerBound %f", + ) BackgroundFillValue = traits.Str( - desc= - "Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", - argstr="--BackgroundFillValue %s") + desc="Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", + argstr="--BackgroundFillValue %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSClipInferiorOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Output image, a short int copy of the upper portion of the input image, filled with BackgroundFillValue.", - exists=True) + desc="Output image, a short int copy of the upper portion of the input image, filled with BackgroundFillValue.", + exists=True, + ) class BRAINSClipInferior(SEMLikeCommandLine): @@ -702,7 +741,7 @@ class BRAINSClipInferior(SEMLikeCommandLine): input_spec = BRAINSClipInferiorInputSpec output_spec = BRAINSClipInferiorOutputSpec _cmd = " BRAINSClipInferior " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} _redirect_x = False @@ -710,21 +749,25 @@ class GenerateLabelMapFromProbabilityMapInputSpec(CommandLineInputSpec): inputVolumes = InputMultiPath( File(exists=True), desc="The Input probaiblity images to be computed for lable maps", - argstr="--inputVolumes %s...") + argstr="--inputVolumes %s...", + ) outputLabelVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="The Input binary image for region of interest", - argstr="--outputLabelVolume %s") + argstr="--outputLabelVolume %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class GenerateLabelMapFromProbabilityMapOutputSpec(TraitedSpec): outputLabelVolume = File( - desc="The Input binary image for region of interest", exists=True) + desc="The Input binary image for region of interest", exists=True + ) class GenerateLabelMapFromProbabilityMap(SEMLikeCommandLine): @@ -743,7 +786,7 @@ class GenerateLabelMapFromProbabilityMap(SEMLikeCommandLine): input_spec = GenerateLabelMapFromProbabilityMapInputSpec output_spec = GenerateLabelMapFromProbabilityMapOutputSpec _cmd = " GenerateLabelMapFromProbabilityMap " - _outputs_filenames = {'outputLabelVolume': 'outputLabelVolume.nii.gz'} + _outputs_filenames = {"outputLabelVolume": "outputLabelVolume.nii.gz"} _redirect_x = False @@ -751,47 +794,51 @@ class BRAINSAlignMSPInputSpec(CommandLineInputSpec): inputVolume = File( desc=", The Image to be resampled, ", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) OutputresampleMSP = traits.Either( traits.Bool, File(), hash_files=False, desc=", The image to be output., ", - argstr="--OutputresampleMSP %s") + argstr="--OutputresampleMSP %s", + ) verbose = traits.Bool( - desc=", Show more verbose output, ", argstr="--verbose ") + desc=", Show more verbose output, ", argstr="--verbose " + ) resultsDir = traits.Either( traits.Bool, Directory(), hash_files=False, desc=", The directory for the results to be written., ", - argstr="--resultsDir %s") + argstr="--resultsDir %s", + ) writedebuggingImagesLevel = traits.Int( - desc= - ", This flag controls if debugging images are produced. By default value of 0 is no images. Anything greater than zero will be increasing level of debugging images., ", - argstr="--writedebuggingImagesLevel %d") + desc=", This flag controls if debugging images are produced. By default value of 0 is no images. Anything greater than zero will be increasing level of debugging images., ", + argstr="--writedebuggingImagesLevel %d", + ) mspQualityLevel = traits.Int( - desc= - ", Flag cotrols how agressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds)., ", - argstr="--mspQualityLevel %d") + desc=", Flag cotrols how agressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds)., ", + argstr="--mspQualityLevel %d", + ) rescaleIntensities = traits.Bool( - desc= - ", Flag to turn on rescaling image intensities on input., ", - argstr="--rescaleIntensities ") + desc=", Flag to turn on rescaling image intensities on input., ", + argstr="--rescaleIntensities ", + ) trimRescaledIntensities = traits.Float( - desc= - ", Turn on clipping the rescaled image one-tailed on input. Units of standard deviations above the mean. Very large values are very permissive. Non-positive value turns clipping off. Defaults to removing 0.00001 of a normal tail above the mean., ", - argstr="--trimRescaledIntensities %f") + desc=", Turn on clipping the rescaled image one-tailed on input. Units of standard deviations above the mean. Very large values are very permissive. Non-positive value turns clipping off. Defaults to removing 0.00001 of a normal tail above the mean., ", + argstr="--trimRescaledIntensities %f", + ) rescaleIntensitiesOutputRange = InputMultiPath( traits.Int, - desc= - ", This pair of integers gives the lower and upper bounds on the signal portion of the output image. Out-of-field voxels are taken from BackgroundFillValue., ", + desc=", This pair of integers gives the lower and upper bounds on the signal portion of the output image. Out-of-field voxels are taken from BackgroundFillValue., ", sep=",", - argstr="--rescaleIntensitiesOutputRange %s") + argstr="--rescaleIntensitiesOutputRange %s", + ) BackgroundFillValue = traits.Str( - desc= - "Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", - argstr="--BackgroundFillValue %s") + desc="Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", + argstr="--BackgroundFillValue %s", + ) interpolationMode = traits.Enum( "NearestNeighbor", "Linear", @@ -803,20 +850,23 @@ class BRAINSAlignMSPInputSpec(CommandLineInputSpec): "Welch", "Lanczos", "Blackman", - desc= - "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", - argstr="--interpolationMode %s") + desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSAlignMSPOutputSpec(TraitedSpec): OutputresampleMSP = File( - desc=", The image to be output., ", exists=True) + desc=", The image to be output., ", exists=True + ) resultsDir = Directory( desc=", The directory for the results to be written., ", - exists=True) + exists=True, + ) class BRAINSAlignMSP(SEMLikeCommandLine): @@ -832,8 +882,8 @@ class BRAINSAlignMSP(SEMLikeCommandLine): output_spec = BRAINSAlignMSPOutputSpec _cmd = " BRAINSAlignMSP " _outputs_filenames = { - 'OutputresampleMSP': 'OutputresampleMSP.nii', - 'resultsDir': 'resultsDir' + "OutputresampleMSP": "OutputresampleMSP.nii", + "resultsDir": "resultsDir", } _redirect_x = False @@ -842,28 +892,31 @@ class BRAINSLandmarkInitializerInputSpec(CommandLineInputSpec): inputFixedLandmarkFilename = File( desc="input fixed landmark. *.fcsv", exists=True, - argstr="--inputFixedLandmarkFilename %s") + argstr="--inputFixedLandmarkFilename %s", + ) inputMovingLandmarkFilename = File( desc="input moving landmark. *.fcsv", exists=True, - argstr="--inputMovingLandmarkFilename %s") + argstr="--inputMovingLandmarkFilename %s", + ) inputWeightFilename = File( - desc= - "Input weight file name for landmarks. Higher weighted landmark will be considered more heavily. Weights are propotional, that is the magnitude of weights will be normalized by its minimum and maximum value. ", + desc="Input weight file name for landmarks. Higher weighted landmark will be considered more heavily. Weights are propotional, that is the magnitude of weights will be normalized by its minimum and maximum value. ", exists=True, - argstr="--inputWeightFilename %s") + argstr="--inputWeightFilename %s", + ) outputTransformFilename = traits.Either( traits.Bool, File(), hash_files=False, desc="output transform file name (ex: ./outputTransform.mat) ", - argstr="--outputTransformFilename %s") + argstr="--outputTransformFilename %s", + ) class BRAINSLandmarkInitializerOutputSpec(TraitedSpec): outputTransformFilename = File( - desc="output transform file name (ex: ./outputTransform.mat) ", - exists=True) + desc="output transform file name (ex: ./outputTransform.mat) ", exists=True + ) class BRAINSLandmarkInitializer(SEMLikeCommandLine): @@ -884,21 +937,21 @@ class BRAINSLandmarkInitializer(SEMLikeCommandLine): input_spec = BRAINSLandmarkInitializerInputSpec output_spec = BRAINSLandmarkInitializerOutputSpec _cmd = " BRAINSLandmarkInitializer " - _outputs_filenames = {'outputTransformFilename': 'outputTransformFilename'} + _outputs_filenames = {"outputTransformFilename": "outputTransformFilename"} _redirect_x = False class insertMidACPCpointInputSpec(CommandLineInputSpec): inputLandmarkFile = File( - desc="Input landmark file (.fcsv)", - exists=True, - argstr="--inputLandmarkFile %s") + desc="Input landmark file (.fcsv)", exists=True, argstr="--inputLandmarkFile %s" + ) outputLandmarkFile = traits.Either( traits.Bool, File(), hash_files=False, desc="Output landmark file (.fcsv)", - argstr="--outputLandmarkFile %s") + argstr="--outputLandmarkFile %s", + ) class insertMidACPCpointOutputSpec(TraitedSpec): @@ -919,56 +972,56 @@ class insertMidACPCpoint(SEMLikeCommandLine): input_spec = insertMidACPCpointInputSpec output_spec = insertMidACPCpointOutputSpec _cmd = " insertMidACPCpoint " - _outputs_filenames = {'outputLandmarkFile': 'outputLandmarkFile'} + _outputs_filenames = {"outputLandmarkFile": "outputLandmarkFile"} _redirect_x = False class BRAINSSnapShotWriterInputSpec(CommandLineInputSpec): inputVolumes = InputMultiPath( File(exists=True), - desc= - "Input image volume list to be extracted as 2D image. Multiple input is possible. At least one input is required.", - argstr="--inputVolumes %s...") + desc="Input image volume list to be extracted as 2D image. Multiple input is possible. At least one input is required.", + argstr="--inputVolumes %s...", + ) inputBinaryVolumes = InputMultiPath( File(exists=True), - desc= - "Input mask (binary) volume list to be extracted as 2D image. Multiple input is possible.", - argstr="--inputBinaryVolumes %s...") + desc="Input mask (binary) volume list to be extracted as 2D image. Multiple input is possible.", + argstr="--inputBinaryVolumes %s...", + ) inputSliceToExtractInPhysicalPoint = InputMultiPath( traits.Float, - desc= - "2D slice number of input images. For autoWorkUp output, which AC-PC aligned, 0,0,0 will be the center.", + desc="2D slice number of input images. For autoWorkUp output, which AC-PC aligned, 0,0,0 will be the center.", sep=",", - argstr="--inputSliceToExtractInPhysicalPoint %s") + argstr="--inputSliceToExtractInPhysicalPoint %s", + ) inputSliceToExtractInIndex = InputMultiPath( traits.Int, - desc= - "2D slice number of input images. For size of 256*256*256 image, 128 is usually used.", + desc="2D slice number of input images. For size of 256*256*256 image, 128 is usually used.", sep=",", - argstr="--inputSliceToExtractInIndex %s") + argstr="--inputSliceToExtractInIndex %s", + ) inputSliceToExtractInPercent = InputMultiPath( traits.Int, - desc= - "2D slice number of input images. Percentage input from 0%-100%. (ex. --inputSliceToExtractInPercent 50,50,50", + desc="2D slice number of input images. Percentage input from 0%-100%. (ex. --inputSliceToExtractInPercent 50,50,50", sep=",", - argstr="--inputSliceToExtractInPercent %s") + argstr="--inputSliceToExtractInPercent %s", + ) inputPlaneDirection = InputMultiPath( traits.Int, - desc= - "Plane to display. In general, 0=saggital, 1=coronal, and 2=axial plane.", + desc="Plane to display. In general, 0=saggital, 1=coronal, and 2=axial plane.", sep=",", - argstr="--inputPlaneDirection %s") + argstr="--inputPlaneDirection %s", + ) outputFilename = traits.Either( traits.Bool, File(), hash_files=False, desc="2D file name of input images. Required.", - argstr="--outputFilename %s") + argstr="--outputFilename %s", + ) class BRAINSSnapShotWriterOutputSpec(TraitedSpec): - outputFilename = File( - desc="2D file name of input images. Required.", exists=True) + outputFilename = File(desc="2D file name of input images. Required.", exists=True) class BRAINSSnapShotWriter(SEMLikeCommandLine): @@ -989,7 +1042,7 @@ class BRAINSSnapShotWriter(SEMLikeCommandLine): input_spec = BRAINSSnapShotWriterInputSpec output_spec = BRAINSSnapShotWriterOutputSpec _cmd = " BRAINSSnapShotWriter " - _outputs_filenames = {'outputFilename': 'outputFilename'} + _outputs_filenames = {"outputFilename": "outputFilename"} _redirect_x = False @@ -997,27 +1050,30 @@ class JointHistogramInputSpec(CommandLineInputSpec): inputVolumeInXAxis = File( desc="The Input image to be computed for statistics", exists=True, - argstr="--inputVolumeInXAxis %s") + argstr="--inputVolumeInXAxis %s", + ) inputVolumeInYAxis = File( desc="The Input image to be computed for statistics", exists=True, - argstr="--inputVolumeInYAxis %s") + argstr="--inputVolumeInYAxis %s", + ) inputMaskVolumeInXAxis = File( - desc= - "Input mask volume for inputVolumeInXAxis. Histogram will be computed just for the masked region", + desc="Input mask volume for inputVolumeInXAxis. Histogram will be computed just for the masked region", exists=True, - argstr="--inputMaskVolumeInXAxis %s") + argstr="--inputMaskVolumeInXAxis %s", + ) inputMaskVolumeInYAxis = File( - desc= - "Input mask volume for inputVolumeInYAxis. Histogram will be computed just for the masked region", + desc="Input mask volume for inputVolumeInYAxis. Histogram will be computed just for the masked region", exists=True, - argstr="--inputMaskVolumeInYAxis %s") + argstr="--inputMaskVolumeInYAxis %s", + ) outputJointHistogramImage = traits.Str( - desc= - " output joint histogram image file name. Histogram is usually 2D image. ", - argstr="--outputJointHistogramImage %s") + desc=" output joint histogram image file name. Histogram is usually 2D image. ", + argstr="--outputJointHistogramImage %s", + ) verbose = traits.Bool( - desc=" print debugging information, ", argstr="--verbose ") + desc=" print debugging information, ", argstr="--verbose " + ) class JointHistogramOutputSpec(TraitedSpec): @@ -1046,28 +1102,28 @@ class JointHistogram(SEMLikeCommandLine): class ShuffleVectorsModuleInputSpec(CommandLineInputSpec): inputVectorFileBaseName = File( - desc= - "input vector file name prefix. Usually end with .txt and header file has prost fix of .txt.hdr", + desc="input vector file name prefix. Usually end with .txt and header file has prost fix of .txt.hdr", exists=True, - argstr="--inputVectorFileBaseName %s") + argstr="--inputVectorFileBaseName %s", + ) outputVectorFileBaseName = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "output vector file name prefix. Usually end with .txt and header file has prost fix of .txt.hdr", - argstr="--outputVectorFileBaseName %s") + desc="output vector file name prefix. Usually end with .txt and header file has prost fix of .txt.hdr", + argstr="--outputVectorFileBaseName %s", + ) resampleProportion = traits.Float( - desc= - "downsample size of 1 will be the same size as the input images, downsample size of 3 will throw 2/3 the vectors away.", - argstr="--resampleProportion %f") + desc="downsample size of 1 will be the same size as the input images, downsample size of 3 will throw 2/3 the vectors away.", + argstr="--resampleProportion %f", + ) class ShuffleVectorsModuleOutputSpec(TraitedSpec): outputVectorFileBaseName = File( - desc= - "output vector file name prefix. Usually end with .txt and header file has prost fix of .txt.hdr", - exists=True) + desc="output vector file name prefix. Usually end with .txt and header file has prost fix of .txt.hdr", + exists=True, + ) class ShuffleVectorsModule(SEMLikeCommandLine): @@ -1088,9 +1144,7 @@ class ShuffleVectorsModule(SEMLikeCommandLine): input_spec = ShuffleVectorsModuleInputSpec output_spec = ShuffleVectorsModuleOutputSpec _cmd = " ShuffleVectorsModule " - _outputs_filenames = { - 'outputVectorFileBaseName': 'outputVectorFileBaseName' - } + _outputs_filenames = {"outputVectorFileBaseName": "outputVectorFileBaseName"} _redirect_x = False @@ -1098,32 +1152,38 @@ class ImageRegionPlotterInputSpec(CommandLineInputSpec): inputVolume1 = File( desc="The Input image to be computed for statistics", exists=True, - argstr="--inputVolume1 %s") + argstr="--inputVolume1 %s", + ) inputVolume2 = File( desc="The Input image to be computed for statistics", exists=True, - argstr="--inputVolume2 %s") + argstr="--inputVolume2 %s", + ) inputBinaryROIVolume = File( desc="The Input binary image for region of interest", exists=True, - argstr="--inputBinaryROIVolume %s") + argstr="--inputBinaryROIVolume %s", + ) inputLabelVolume = File( - desc="The Label Image", exists=True, argstr="--inputLabelVolume %s") + desc="The Label Image", exists=True, argstr="--inputLabelVolume %s" + ) numberOfHistogramBins = traits.Int( - desc=" the number of histogram levels", - argstr="--numberOfHistogramBins %d") + desc=" the number of histogram levels", argstr="--numberOfHistogramBins %d" + ) outputJointHistogramData = traits.Str( - desc=" output data file name", argstr="--outputJointHistogramData %s") + desc=" output data file name", argstr="--outputJointHistogramData %s" + ) useROIAUTO = traits.Bool( - desc= - " Use ROIAUTO to compute region of interest. This cannot be used with inputLabelVolume", - argstr="--useROIAUTO ") + desc=" Use ROIAUTO to compute region of interest. This cannot be used with inputLabelVolume", + argstr="--useROIAUTO ", + ) useIntensityForHistogram = traits.Bool( - desc= - " Create Intensity Joint Histogram instead of Quantile Joint Histogram", - argstr="--useIntensityForHistogram ") + desc=" Create Intensity Joint Histogram instead of Quantile Joint Histogram", + argstr="--useIntensityForHistogram ", + ) verbose = traits.Bool( - desc=" print debugging information, ", argstr="--verbose ") + desc=" print debugging information, ", argstr="--verbose " + ) class ImageRegionPlotterOutputSpec(TraitedSpec): @@ -1152,42 +1212,45 @@ class ImageRegionPlotter(SEMLikeCommandLine): class fcsv_to_hdf5InputSpec(CommandLineInputSpec): versionID = traits.Str( - desc= - ", Current version ID. It should be match with the version of BCD that will be using the output model file, ", - argstr="--versionID %s") + desc=", Current version ID. It should be match with the version of BCD that will be using the output model file, ", + argstr="--versionID %s", + ) landmarksInformationFile = traits.Either( traits.Bool, File(), hash_files=False, desc=", name of HDF5 file to write matrices into, ", - argstr="--landmarksInformationFile %s") + argstr="--landmarksInformationFile %s", + ) landmarkTypesList = File( desc=", file containing list of landmark types, ", exists=True, - argstr="--landmarkTypesList %s") + argstr="--landmarkTypesList %s", + ) modelFile = traits.Either( traits.Bool, File(), hash_files=False, - desc= - ", name of HDF5 file containing BRAINSConstellationDetector Model file (LLSMatrices, LLSMeans and LLSSearchRadii), ", - argstr="--modelFile %s") + desc=", name of HDF5 file containing BRAINSConstellationDetector Model file (LLSMatrices, LLSMeans and LLSSearchRadii), ", + argstr="--modelFile %s", + ) landmarkGlobPattern = traits.Str( - desc="Glob pattern to select fcsv files", - argstr="--landmarkGlobPattern %s") + desc="Glob pattern to select fcsv files", argstr="--landmarkGlobPattern %s" + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class fcsv_to_hdf5OutputSpec(TraitedSpec): landmarksInformationFile = File( - desc=", name of HDF5 file to write matrices into, ", - exists=True) + desc=", name of HDF5 file to write matrices into, ", exists=True + ) modelFile = File( - desc= - ", name of HDF5 file containing BRAINSConstellationDetector Model file (LLSMatrices, LLSMeans and LLSSearchRadii), ", - exists=True) + desc=", name of HDF5 file containing BRAINSConstellationDetector Model file (LLSMatrices, LLSMeans and LLSSearchRadii), ", + exists=True, + ) class fcsv_to_hdf5(SEMLikeCommandLine): @@ -1203,8 +1266,8 @@ class fcsv_to_hdf5(SEMLikeCommandLine): output_spec = fcsv_to_hdf5OutputSpec _cmd = " fcsv_to_hdf5 " _outputs_filenames = { - 'modelFile': 'modelFile', - 'landmarksInformationFile': 'landmarksInformationFile.h5' + "modelFile": "modelFile", + "landmarksInformationFile": "landmarksInformationFile.h5", } _redirect_x = False @@ -1213,38 +1276,38 @@ class FindCenterOfBrainInputSpec(CommandLineInputSpec): inputVolume = File( desc="The image in which to find the center.", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) imageMask = File(exists=True, argstr="--imageMask %s") clippedImageMask = traits.Either( - traits.Bool, File(), hash_files=False, argstr="--clippedImageMask %s") + traits.Bool, File(), hash_files=False, argstr="--clippedImageMask %s" + ) maximize = traits.Bool(argstr="--maximize ") axis = traits.Int(argstr="--axis %d") - otsuPercentileThreshold = traits.Float( - argstr="--otsuPercentileThreshold %f") + otsuPercentileThreshold = traits.Float(argstr="--otsuPercentileThreshold %f") closingSize = traits.Int(argstr="--closingSize %d") headSizeLimit = traits.Float(argstr="--headSizeLimit %f") headSizeEstimate = traits.Float(argstr="--headSizeEstimate %f") backgroundValue = traits.Int(argstr="--backgroundValue %d") generateDebugImages = traits.Bool(argstr="--generateDebugImages ") debugDistanceImage = traits.Either( - traits.Bool, - File(), - hash_files=False, - argstr="--debugDistanceImage %s") + traits.Bool, File(), hash_files=False, argstr="--debugDistanceImage %s" + ) debugGridImage = traits.Either( - traits.Bool, File(), hash_files=False, argstr="--debugGridImage %s") + traits.Bool, File(), hash_files=False, argstr="--debugGridImage %s" + ) debugAfterGridComputationsForegroundImage = traits.Either( traits.Bool, File(), hash_files=False, - argstr="--debugAfterGridComputationsForegroundImage %s") + argstr="--debugAfterGridComputationsForegroundImage %s", + ) debugClippedImageMask = traits.Either( - traits.Bool, - File(), - hash_files=False, - argstr="--debugClippedImageMask %s") + traits.Bool, File(), hash_files=False, argstr="--debugClippedImageMask %s" + ) debugTrimmedImage = traits.Either( - traits.Bool, File(), hash_files=False, argstr="--debugTrimmedImage %s") + traits.Bool, File(), hash_files=False, argstr="--debugTrimmedImage %s" + ) class FindCenterOfBrainOutputSpec(TraitedSpec): @@ -1277,17 +1340,11 @@ class FindCenterOfBrain(SEMLikeCommandLine): output_spec = FindCenterOfBrainOutputSpec _cmd = " FindCenterOfBrain " _outputs_filenames = { - 'debugClippedImageMask': - 'debugClippedImageMask.nii', - 'debugTrimmedImage': - 'debugTrimmedImage.nii', - 'debugDistanceImage': - 'debugDistanceImage.nii', - 'debugGridImage': - 'debugGridImage.nii', - 'clippedImageMask': - 'clippedImageMask.nii', - 'debugAfterGridComputationsForegroundImage': - 'debugAfterGridComputationsForegroundImage.nii' + "debugClippedImageMask": "debugClippedImageMask.nii", + "debugTrimmedImage": "debugTrimmedImage.nii", + "debugDistanceImage": "debugDistanceImage.nii", + "debugGridImage": "debugGridImage.nii", + "clippedImageMask": "clippedImageMask.nii", + "debugAfterGridComputationsForegroundImage": "debugAfterGridComputationsForegroundImage.nii", } _redirect_x = False diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py index b5913daba3..2dabdd4b6d 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py @@ -4,47 +4,32 @@ def test_BRAINSAlignMSP_inputs(): input_map = dict( - BackgroundFillValue=dict(argstr='--BackgroundFillValue %s', ), - OutputresampleMSP=dict( - argstr='--OutputresampleMSP %s', - hash_files=False, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - interpolationMode=dict(argstr='--interpolationMode %s', ), - mspQualityLevel=dict(argstr='--mspQualityLevel %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - rescaleIntensities=dict(argstr='--rescaleIntensities ', ), + BackgroundFillValue=dict(argstr="--BackgroundFillValue %s",), + OutputresampleMSP=dict(argstr="--OutputresampleMSP %s", hash_files=False,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + interpolationMode=dict(argstr="--interpolationMode %s",), + mspQualityLevel=dict(argstr="--mspQualityLevel %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + rescaleIntensities=dict(argstr="--rescaleIntensities ",), rescaleIntensitiesOutputRange=dict( - argstr='--rescaleIntensitiesOutputRange %s', - sep=',', - ), - resultsDir=dict( - argstr='--resultsDir %s', - hash_files=False, + argstr="--rescaleIntensitiesOutputRange %s", sep=",", ), - trimRescaledIntensities=dict(argstr='--trimRescaledIntensities %f', ), - verbose=dict(argstr='--verbose ', ), - writedebuggingImagesLevel=dict( - argstr='--writedebuggingImagesLevel %d', ), + resultsDir=dict(argstr="--resultsDir %s", hash_files=False,), + trimRescaledIntensities=dict(argstr="--trimRescaledIntensities %f",), + verbose=dict(argstr="--verbose ",), + writedebuggingImagesLevel=dict(argstr="--writedebuggingImagesLevel %d",), ) inputs = BRAINSAlignMSP.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSAlignMSP_outputs(): - output_map = dict( - OutputresampleMSP=dict(extensions=None, ), - resultsDir=dict(), - ) + output_map = dict(OutputresampleMSP=dict(extensions=None,), resultsDir=dict(),) outputs = BRAINSAlignMSP.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py index 39fb8fe023..76958e0b2f 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py @@ -4,30 +4,23 @@ def test_BRAINSClipInferior_inputs(): input_map = dict( - BackgroundFillValue=dict(argstr='--BackgroundFillValue %s', ), - acLowerBound=dict(argstr='--acLowerBound %f', ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + BackgroundFillValue=dict(argstr="--BackgroundFillValue %s",), + acLowerBound=dict(argstr="--acLowerBound %f",), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = BRAINSClipInferior.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSClipInferior_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = BRAINSClipInferior.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py index 0edcebc668..61129f62cc 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py @@ -4,49 +4,35 @@ def test_BRAINSConstellationModeler_inputs(): input_map = dict( - BackgroundFillValue=dict(argstr='--BackgroundFillValue %s', ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputTrainingList=dict( - argstr='--inputTrainingList %s', - extensions=None, - ), - mspQualityLevel=dict(argstr='--mspQualityLevel %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), + BackgroundFillValue=dict(argstr="--BackgroundFillValue %s",), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputTrainingList=dict(argstr="--inputTrainingList %s", extensions=None,), + mspQualityLevel=dict(argstr="--mspQualityLevel %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), optimizedLandmarksFilenameExtender=dict( - argstr='--optimizedLandmarksFilenameExtender %s', ), - outputModel=dict( - argstr='--outputModel %s', - hash_files=False, + argstr="--optimizedLandmarksFilenameExtender %s", ), - rescaleIntensities=dict(argstr='--rescaleIntensities ', ), + outputModel=dict(argstr="--outputModel %s", hash_files=False,), + rescaleIntensities=dict(argstr="--rescaleIntensities ",), rescaleIntensitiesOutputRange=dict( - argstr='--rescaleIntensitiesOutputRange %s', - sep=',', - ), - resultsDir=dict( - argstr='--resultsDir %s', - hash_files=False, + argstr="--rescaleIntensitiesOutputRange %s", sep=",", ), - saveOptimizedLandmarks=dict(argstr='--saveOptimizedLandmarks ', ), - trimRescaledIntensities=dict(argstr='--trimRescaledIntensities %f', ), - verbose=dict(argstr='--verbose ', ), - writedebuggingImagesLevel=dict( - argstr='--writedebuggingImagesLevel %d', ), + resultsDir=dict(argstr="--resultsDir %s", hash_files=False,), + saveOptimizedLandmarks=dict(argstr="--saveOptimizedLandmarks ",), + trimRescaledIntensities=dict(argstr="--trimRescaledIntensities %f",), + verbose=dict(argstr="--verbose ",), + writedebuggingImagesLevel=dict(argstr="--writedebuggingImagesLevel %d",), ) inputs = BRAINSConstellationModeler.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSConstellationModeler_outputs(): - output_map = dict( - outputModel=dict(extensions=None, ), - resultsDir=dict(), - ) + output_map = dict(outputModel=dict(extensions=None,), resultsDir=dict(),) outputs = BRAINSConstellationModeler.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py index 9c1fe4a993..8ac3c8a21a 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py @@ -4,29 +4,22 @@ def test_BRAINSEyeDetector_inputs(): input_map = dict( - args=dict(argstr='%s', ), - debugDir=dict(argstr='--debugDir %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + debugDir=dict(argstr="--debugDir %s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = BRAINSEyeDetector.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSEyeDetector_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = BRAINSEyeDetector.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py index 66accefbee..cf20b5a37b 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py @@ -4,37 +4,24 @@ def test_BRAINSInitializedControlPoints_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputLandmarksFile=dict(argstr='--outputLandmarksFile %s', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - permuteOrder=dict( - argstr='--permuteOrder %s', - sep=',', - ), - splineGridSize=dict( - argstr='--splineGridSize %s', - sep=',', - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputLandmarksFile=dict(argstr="--outputLandmarksFile %s",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + permuteOrder=dict(argstr="--permuteOrder %s", sep=",",), + splineGridSize=dict(argstr="--splineGridSize %s", sep=",",), ) inputs = BRAINSInitializedControlPoints.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSInitializedControlPoints_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = BRAINSInitializedControlPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py index dd39f41295..cdb17c6232 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py @@ -4,26 +4,17 @@ def test_BRAINSLandmarkInitializer_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), inputFixedLandmarkFilename=dict( - argstr='--inputFixedLandmarkFilename %s', - extensions=None, + argstr="--inputFixedLandmarkFilename %s", extensions=None, ), inputMovingLandmarkFilename=dict( - argstr='--inputMovingLandmarkFilename %s', - extensions=None, - ), - inputWeightFilename=dict( - argstr='--inputWeightFilename %s', - extensions=None, + argstr="--inputMovingLandmarkFilename %s", extensions=None, ), + inputWeightFilename=dict(argstr="--inputWeightFilename %s", extensions=None,), outputTransformFilename=dict( - argstr='--outputTransformFilename %s', - hash_files=False, + argstr="--outputTransformFilename %s", hash_files=False, ), ) inputs = BRAINSLandmarkInitializer.input_spec() @@ -31,8 +22,10 @@ def test_BRAINSLandmarkInitializer_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSLandmarkInitializer_outputs(): - output_map = dict(outputTransformFilename=dict(extensions=None, ), ) + output_map = dict(outputTransformFilename=dict(extensions=None,),) outputs = BRAINSLandmarkInitializer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py index 6692e342a0..a9fdee554b 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py @@ -4,22 +4,18 @@ def test_BRAINSLinearModelerEPCA_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputTrainingList=dict( - argstr='--inputTrainingList %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputTrainingList=dict(argstr="--inputTrainingList %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), ) inputs = BRAINSLinearModelerEPCA.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSLinearModelerEPCA_outputs(): output_map = dict() outputs = BRAINSLinearModelerEPCA.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py index 88b0846768..f459589580 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py @@ -4,35 +4,18 @@ def test_BRAINSLmkTransform_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputFixedLandmarks=dict( - argstr='--inputFixedLandmarks %s', - extensions=None, - ), - inputMovingLandmarks=dict( - argstr='--inputMovingLandmarks %s', - extensions=None, - ), - inputMovingVolume=dict( - argstr='--inputMovingVolume %s', - extensions=None, - ), - inputReferenceVolume=dict( - argstr='--inputReferenceVolume %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputFixedLandmarks=dict(argstr="--inputFixedLandmarks %s", extensions=None,), + inputMovingLandmarks=dict(argstr="--inputMovingLandmarks %s", extensions=None,), + inputMovingVolume=dict(argstr="--inputMovingVolume %s", extensions=None,), + inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), outputAffineTransform=dict( - argstr='--outputAffineTransform %s', - hash_files=False, + argstr="--outputAffineTransform %s", hash_files=False, ), outputResampledVolume=dict( - argstr='--outputResampledVolume %s', - hash_files=False, + argstr="--outputResampledVolume %s", hash_files=False, ), ) inputs = BRAINSLmkTransform.input_spec() @@ -40,10 +23,12 @@ def test_BRAINSLmkTransform_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSLmkTransform_outputs(): output_map = dict( - outputAffineTransform=dict(extensions=None, ), - outputResampledVolume=dict(extensions=None, ), + outputAffineTransform=dict(extensions=None,), + outputResampledVolume=dict(extensions=None,), ) outputs = BRAINSLmkTransform.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py index a1bde9d454..b16829ef8f 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py @@ -4,65 +4,37 @@ def test_BRAINSMush_inputs(): input_map = dict( - args=dict(argstr='%s', ), - boundingBoxSize=dict( - argstr='--boundingBoxSize %s', - sep=',', - ), - boundingBoxStart=dict( - argstr='--boundingBoxStart %s', - sep=',', - ), - desiredMean=dict(argstr='--desiredMean %f', ), - desiredVariance=dict(argstr='--desiredVariance %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputFirstVolume=dict( - argstr='--inputFirstVolume %s', - extensions=None, - ), - inputMaskVolume=dict( - argstr='--inputMaskVolume %s', - extensions=None, - ), - inputSecondVolume=dict( - argstr='--inputSecondVolume %s', - extensions=None, - ), - lowerThresholdFactor=dict(argstr='--lowerThresholdFactor %f', ), - lowerThresholdFactorPre=dict(argstr='--lowerThresholdFactorPre %f', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputMask=dict( - argstr='--outputMask %s', - hash_files=False, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - outputWeightsFile=dict( - argstr='--outputWeightsFile %s', - hash_files=False, - ), - seed=dict( - argstr='--seed %s', - sep=',', - ), - upperThresholdFactor=dict(argstr='--upperThresholdFactor %f', ), - upperThresholdFactorPre=dict(argstr='--upperThresholdFactorPre %f', ), + args=dict(argstr="%s",), + boundingBoxSize=dict(argstr="--boundingBoxSize %s", sep=",",), + boundingBoxStart=dict(argstr="--boundingBoxStart %s", sep=",",), + desiredMean=dict(argstr="--desiredMean %f",), + desiredVariance=dict(argstr="--desiredVariance %f",), + environ=dict(nohash=True, usedefault=True,), + inputFirstVolume=dict(argstr="--inputFirstVolume %s", extensions=None,), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), + inputSecondVolume=dict(argstr="--inputSecondVolume %s", extensions=None,), + lowerThresholdFactor=dict(argstr="--lowerThresholdFactor %f",), + lowerThresholdFactorPre=dict(argstr="--lowerThresholdFactorPre %f",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputMask=dict(argstr="--outputMask %s", hash_files=False,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + outputWeightsFile=dict(argstr="--outputWeightsFile %s", hash_files=False,), + seed=dict(argstr="--seed %s", sep=",",), + upperThresholdFactor=dict(argstr="--upperThresholdFactor %f",), + upperThresholdFactorPre=dict(argstr="--upperThresholdFactorPre %f",), ) inputs = BRAINSMush.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSMush_outputs(): output_map = dict( - outputMask=dict(extensions=None, ), - outputVolume=dict(extensions=None, ), - outputWeightsFile=dict(extensions=None, ), + outputMask=dict(extensions=None,), + outputVolume=dict(extensions=None,), + outputWeightsFile=dict(extensions=None,), ) outputs = BRAINSMush.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py index 2118b58ed9..30bf49a6ad 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py @@ -4,41 +4,31 @@ def test_BRAINSSnapShotWriter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputBinaryVolumes=dict(argstr='--inputBinaryVolumes %s...', ), - inputPlaneDirection=dict( - argstr='--inputPlaneDirection %s', - sep=',', - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputBinaryVolumes=dict(argstr="--inputBinaryVolumes %s...",), + inputPlaneDirection=dict(argstr="--inputPlaneDirection %s", sep=",",), inputSliceToExtractInIndex=dict( - argstr='--inputSliceToExtractInIndex %s', - sep=',', + argstr="--inputSliceToExtractInIndex %s", sep=",", ), inputSliceToExtractInPercent=dict( - argstr='--inputSliceToExtractInPercent %s', - sep=',', + argstr="--inputSliceToExtractInPercent %s", sep=",", ), inputSliceToExtractInPhysicalPoint=dict( - argstr='--inputSliceToExtractInPhysicalPoint %s', - sep=',', - ), - inputVolumes=dict(argstr='--inputVolumes %s...', ), - outputFilename=dict( - argstr='--outputFilename %s', - hash_files=False, + argstr="--inputSliceToExtractInPhysicalPoint %s", sep=",", ), + inputVolumes=dict(argstr="--inputVolumes %s...",), + outputFilename=dict(argstr="--outputFilename %s", hash_files=False,), ) inputs = BRAINSSnapShotWriter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSSnapShotWriter_outputs(): - output_map = dict(outputFilename=dict(extensions=None, ), ) + output_map = dict(outputFilename=dict(extensions=None,),) outputs = BRAINSSnapShotWriter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py index 6abf0a9f3f..4316f4561b 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py @@ -4,39 +4,26 @@ def test_BRAINSTransformConvert_inputs(): input_map = dict( - args=dict(argstr='%s', ), - displacementVolume=dict( - argstr='--displacementVolume %s', - hash_files=False, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputTransform=dict( - argstr='--inputTransform %s', - extensions=None, - ), - outputPrecisionType=dict(argstr='--outputPrecisionType %s', ), - outputTransform=dict( - argstr='--outputTransform %s', - hash_files=False, - ), - outputTransformType=dict(argstr='--outputTransformType %s', ), - referenceVolume=dict( - argstr='--referenceVolume %s', - extensions=None, - ), + args=dict(argstr="%s",), + displacementVolume=dict(argstr="--displacementVolume %s", hash_files=False,), + environ=dict(nohash=True, usedefault=True,), + inputTransform=dict(argstr="--inputTransform %s", extensions=None,), + outputPrecisionType=dict(argstr="--outputPrecisionType %s",), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), + outputTransformType=dict(argstr="--outputTransformType %s",), + referenceVolume=dict(argstr="--referenceVolume %s", extensions=None,), ) inputs = BRAINSTransformConvert.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSTransformConvert_outputs(): output_map = dict( - displacementVolume=dict(extensions=None, ), - outputTransform=dict(extensions=None, ), + displacementVolume=dict(extensions=None,), + outputTransform=dict(extensions=None,), ) outputs = BRAINSTransformConvert.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py index 2bf13debc4..ed3180746c 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py @@ -4,33 +4,26 @@ def test_BRAINSTrimForegroundInDirection_inputs(): input_map = dict( - BackgroundFillValue=dict(argstr='--BackgroundFillValue %s', ), - args=dict(argstr='%s', ), - closingSize=dict(argstr='--closingSize %d', ), - directionCode=dict(argstr='--directionCode %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - headSizeLimit=dict(argstr='--headSizeLimit %f', ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), + BackgroundFillValue=dict(argstr="--BackgroundFillValue %s",), + args=dict(argstr="%s",), + closingSize=dict(argstr="--closingSize %d",), + directionCode=dict(argstr="--directionCode %d",), + environ=dict(nohash=True, usedefault=True,), + headSizeLimit=dict(argstr="--headSizeLimit %f",), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), ) inputs = BRAINSTrimForegroundInDirection.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSTrimForegroundInDirection_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = BRAINSTrimForegroundInDirection.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py b/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py index fe56cb0e38..b7a9167092 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py @@ -4,15 +4,11 @@ def test_CleanUpOverlapLabels_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputBinaryVolumes=dict(argstr='--inputBinaryVolumes %s...', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputBinaryVolumes=dict(argstr="--inputBinaryVolumes %s...",), outputBinaryVolumes=dict( - argstr='--outputBinaryVolumes %s...', - hash_files=False, + argstr="--outputBinaryVolumes %s...", hash_files=False, ), ) inputs = CleanUpOverlapLabels.input_spec() @@ -20,8 +16,10 @@ def test_CleanUpOverlapLabels_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CleanUpOverlapLabels_outputs(): - output_map = dict(outputBinaryVolumes=dict(), ) + output_map = dict(outputBinaryVolumes=dict(),) outputs = CleanUpOverlapLabels.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py index 701eb2438e..bf2d9ab03d 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py @@ -4,65 +4,44 @@ def test_FindCenterOfBrain_inputs(): input_map = dict( - args=dict(argstr='%s', ), - axis=dict(argstr='--axis %d', ), - backgroundValue=dict(argstr='--backgroundValue %d', ), - clippedImageMask=dict( - argstr='--clippedImageMask %s', - hash_files=False, - ), - closingSize=dict(argstr='--closingSize %d', ), + args=dict(argstr="%s",), + axis=dict(argstr="--axis %d",), + backgroundValue=dict(argstr="--backgroundValue %d",), + clippedImageMask=dict(argstr="--clippedImageMask %s", hash_files=False,), + closingSize=dict(argstr="--closingSize %d",), debugAfterGridComputationsForegroundImage=dict( - argstr='--debugAfterGridComputationsForegroundImage %s', - hash_files=False, + argstr="--debugAfterGridComputationsForegroundImage %s", hash_files=False, ), debugClippedImageMask=dict( - argstr='--debugClippedImageMask %s', - hash_files=False, - ), - debugDistanceImage=dict( - argstr='--debugDistanceImage %s', - hash_files=False, - ), - debugGridImage=dict( - argstr='--debugGridImage %s', - hash_files=False, - ), - debugTrimmedImage=dict( - argstr='--debugTrimmedImage %s', - hash_files=False, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - generateDebugImages=dict(argstr='--generateDebugImages ', ), - headSizeEstimate=dict(argstr='--headSizeEstimate %f', ), - headSizeLimit=dict(argstr='--headSizeLimit %f', ), - imageMask=dict( - argstr='--imageMask %s', - extensions=None, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - maximize=dict(argstr='--maximize ', ), - otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), + argstr="--debugClippedImageMask %s", hash_files=False, + ), + debugDistanceImage=dict(argstr="--debugDistanceImage %s", hash_files=False,), + debugGridImage=dict(argstr="--debugGridImage %s", hash_files=False,), + debugTrimmedImage=dict(argstr="--debugTrimmedImage %s", hash_files=False,), + environ=dict(nohash=True, usedefault=True,), + generateDebugImages=dict(argstr="--generateDebugImages ",), + headSizeEstimate=dict(argstr="--headSizeEstimate %f",), + headSizeLimit=dict(argstr="--headSizeLimit %f",), + imageMask=dict(argstr="--imageMask %s", extensions=None,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + maximize=dict(argstr="--maximize ",), + otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f",), ) inputs = FindCenterOfBrain.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FindCenterOfBrain_outputs(): output_map = dict( - clippedImageMask=dict(extensions=None, ), - debugAfterGridComputationsForegroundImage=dict(extensions=None, ), - debugClippedImageMask=dict(extensions=None, ), - debugDistanceImage=dict(extensions=None, ), - debugGridImage=dict(extensions=None, ), - debugTrimmedImage=dict(extensions=None, ), + clippedImageMask=dict(extensions=None,), + debugAfterGridComputationsForegroundImage=dict(extensions=None,), + debugClippedImageMask=dict(extensions=None,), + debugDistanceImage=dict(extensions=None,), + debugGridImage=dict(extensions=None,), + debugTrimmedImage=dict(extensions=None,), ) outputs = FindCenterOfBrain.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py index f2a89bbe6a..289473e902 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py @@ -4,25 +4,21 @@ def test_GenerateLabelMapFromProbabilityMap_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolumes=dict(argstr='--inputVolumes %s...', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputLabelVolume=dict( - argstr='--outputLabelVolume %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolumes=dict(argstr="--inputVolumes %s...",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputLabelVolume=dict(argstr="--outputLabelVolume %s", hash_files=False,), ) inputs = GenerateLabelMapFromProbabilityMap.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GenerateLabelMapFromProbabilityMap_outputs(): - output_map = dict(outputLabelVolume=dict(extensions=None, ), ) + output_map = dict(outputLabelVolume=dict(extensions=None,),) outputs = GenerateLabelMapFromProbabilityMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py index e51aa52502..7d9f72aedb 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py @@ -4,39 +4,25 @@ def test_ImageRegionPlotter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputBinaryROIVolume=dict( - argstr='--inputBinaryROIVolume %s', - extensions=None, - ), - inputLabelVolume=dict( - argstr='--inputLabelVolume %s', - extensions=None, - ), - inputVolume1=dict( - argstr='--inputVolume1 %s', - extensions=None, - ), - inputVolume2=dict( - argstr='--inputVolume2 %s', - extensions=None, - ), - numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), - outputJointHistogramData=dict( - argstr='--outputJointHistogramData %s', ), - useIntensityForHistogram=dict(argstr='--useIntensityForHistogram ', ), - useROIAUTO=dict(argstr='--useROIAUTO ', ), - verbose=dict(argstr='--verbose ', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputBinaryROIVolume=dict(argstr="--inputBinaryROIVolume %s", extensions=None,), + inputLabelVolume=dict(argstr="--inputLabelVolume %s", extensions=None,), + inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None,), + inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None,), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), + outputJointHistogramData=dict(argstr="--outputJointHistogramData %s",), + useIntensityForHistogram=dict(argstr="--useIntensityForHistogram ",), + useROIAUTO=dict(argstr="--useROIAUTO ",), + verbose=dict(argstr="--verbose ",), ) inputs = ImageRegionPlotter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ImageRegionPlotter_outputs(): output_map = dict() outputs = ImageRegionPlotter.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py index ec583132b6..6ac46c3695 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py @@ -4,36 +4,26 @@ def test_JointHistogram_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), inputMaskVolumeInXAxis=dict( - argstr='--inputMaskVolumeInXAxis %s', - extensions=None, + argstr="--inputMaskVolumeInXAxis %s", extensions=None, ), inputMaskVolumeInYAxis=dict( - argstr='--inputMaskVolumeInYAxis %s', - extensions=None, - ), - inputVolumeInXAxis=dict( - argstr='--inputVolumeInXAxis %s', - extensions=None, - ), - inputVolumeInYAxis=dict( - argstr='--inputVolumeInYAxis %s', - extensions=None, + argstr="--inputMaskVolumeInYAxis %s", extensions=None, ), - outputJointHistogramImage=dict( - argstr='--outputJointHistogramImage %s', ), - verbose=dict(argstr='--verbose ', ), + inputVolumeInXAxis=dict(argstr="--inputVolumeInXAxis %s", extensions=None,), + inputVolumeInYAxis=dict(argstr="--inputVolumeInYAxis %s", extensions=None,), + outputJointHistogramImage=dict(argstr="--outputJointHistogramImage %s",), + verbose=dict(argstr="--verbose ",), ) inputs = JointHistogram.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_JointHistogram_outputs(): output_map = dict() outputs = JointHistogram.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py index 3343270e9d..f3541344a0 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py @@ -4,28 +4,25 @@ def test_ShuffleVectorsModule_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), inputVectorFileBaseName=dict( - argstr='--inputVectorFileBaseName %s', - extensions=None, + argstr="--inputVectorFileBaseName %s", extensions=None, ), outputVectorFileBaseName=dict( - argstr='--outputVectorFileBaseName %s', - hash_files=False, + argstr="--outputVectorFileBaseName %s", hash_files=False, ), - resampleProportion=dict(argstr='--resampleProportion %f', ), + resampleProportion=dict(argstr="--resampleProportion %f",), ) inputs = ShuffleVectorsModule.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ShuffleVectorsModule_outputs(): - output_map = dict(outputVectorFileBaseName=dict(extensions=None, ), ) + output_map = dict(outputVectorFileBaseName=dict(extensions=None,),) outputs = ShuffleVectorsModule.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py index afd2d091ce..f64c302328 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py @@ -4,36 +4,28 @@ def test_fcsv_to_hdf5_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - landmarkGlobPattern=dict(argstr='--landmarkGlobPattern %s', ), - landmarkTypesList=dict( - argstr='--landmarkTypesList %s', - extensions=None, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + landmarkGlobPattern=dict(argstr="--landmarkGlobPattern %s",), + landmarkTypesList=dict(argstr="--landmarkTypesList %s", extensions=None,), landmarksInformationFile=dict( - argstr='--landmarksInformationFile %s', - hash_files=False, - ), - modelFile=dict( - argstr='--modelFile %s', - hash_files=False, + argstr="--landmarksInformationFile %s", hash_files=False, ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - versionID=dict(argstr='--versionID %s', ), + modelFile=dict(argstr="--modelFile %s", hash_files=False,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + versionID=dict(argstr="--versionID %s",), ) inputs = fcsv_to_hdf5.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_fcsv_to_hdf5_outputs(): output_map = dict( - landmarksInformationFile=dict(extensions=None, ), - modelFile=dict(extensions=None, ), + landmarksInformationFile=dict(extensions=None,), + modelFile=dict(extensions=None,), ) outputs = fcsv_to_hdf5.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py index fa2335fed6..7e02bfc9e1 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py @@ -4,27 +4,20 @@ def test_insertMidACPCpoint_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputLandmarkFile=dict( - argstr='--inputLandmarkFile %s', - extensions=None, - ), - outputLandmarkFile=dict( - argstr='--outputLandmarkFile %s', - hash_files=False, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputLandmarkFile=dict(argstr="--inputLandmarkFile %s", extensions=None,), + outputLandmarkFile=dict(argstr="--outputLandmarkFile %s", hash_files=False,), ) inputs = insertMidACPCpoint.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_insertMidACPCpoint_outputs(): - output_map = dict(outputLandmarkFile=dict(extensions=None, ), ) + output_map = dict(outputLandmarkFile=dict(extensions=None,),) outputs = insertMidACPCpoint.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py index 098cdfa613..79fedd5b68 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py @@ -4,18 +4,11 @@ def test_landmarksConstellationAligner_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputLandmarksPaired=dict( - argstr='--inputLandmarksPaired %s', - extensions=None, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputLandmarksPaired=dict(argstr="--inputLandmarksPaired %s", extensions=None,), outputLandmarksPaired=dict( - argstr='--outputLandmarksPaired %s', - hash_files=False, + argstr="--outputLandmarksPaired %s", hash_files=False, ), ) inputs = landmarksConstellationAligner.input_spec() @@ -23,8 +16,10 @@ def test_landmarksConstellationAligner_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_landmarksConstellationAligner_outputs(): - output_map = dict(outputLandmarksPaired=dict(extensions=None, ), ) + output_map = dict(outputLandmarksPaired=dict(extensions=None,),) outputs = landmarksConstellationAligner.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py index 702f8b6266..5bd27aa957 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py @@ -4,35 +4,22 @@ def test_landmarksConstellationWeights_inputs(): input_map = dict( - LLSModel=dict( - argstr='--LLSModel %s', - extensions=None, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputTemplateModel=dict( - argstr='--inputTemplateModel %s', - extensions=None, - ), - inputTrainingList=dict( - argstr='--inputTrainingList %s', - extensions=None, - ), - outputWeightsList=dict( - argstr='--outputWeightsList %s', - hash_files=False, - ), + LLSModel=dict(argstr="--LLSModel %s", extensions=None,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputTemplateModel=dict(argstr="--inputTemplateModel %s", extensions=None,), + inputTrainingList=dict(argstr="--inputTrainingList %s", extensions=None,), + outputWeightsList=dict(argstr="--outputWeightsList %s", hash_files=False,), ) inputs = landmarksConstellationWeights.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_landmarksConstellationWeights_outputs(): - output_map = dict(outputWeightsList=dict(extensions=None, ), ) + output_map = dict(outputWeightsList=dict(extensions=None,),) outputs = landmarksConstellationWeights.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/__init__.py b/nipype/interfaces/slicer/__init__.py index 5198d81be3..bef4698d03 100644 --- a/nipype/interfaces/slicer/__init__.py +++ b/nipype/interfaces/slicer/__init__.py @@ -3,8 +3,14 @@ from .segmentation import * from .filtering import * from .utilities import EMSegmentTransformToNewFormat -from .surface import (MergeModels, ModelToLabelMap, GrayscaleModelMaker, - ProbeVolumeWithModel, LabelMapSmoothing, ModelMaker) +from .surface import ( + MergeModels, + ModelToLabelMap, + GrayscaleModelMaker, + ProbeVolumeWithModel, + LabelMapSmoothing, + ModelMaker, +) from .quantification import * from .legacy import * from .registration import * diff --git a/nipype/interfaces/slicer/converters.py b/nipype/interfaces/slicer/converters.py index e93b994110..cc477e99d0 100644 --- a/nipype/interfaces/slicer/converters.py +++ b/nipype/interfaces/slicer/converters.py @@ -3,7 +3,18 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os @@ -11,36 +22,40 @@ class DicomToNrrdConverterInputSpec(CommandLineInputSpec): inputDicomDirectory = Directory( desc="Directory holding Dicom series", exists=True, - argstr="--inputDicomDirectory %s") + argstr="--inputDicomDirectory %s", + ) outputDirectory = traits.Either( traits.Bool, Directory(), hash_files=False, desc="Directory holding the output NRRD format", - argstr="--outputDirectory %s") + argstr="--outputDirectory %s", + ) outputVolume = traits.Str( - desc="Output filename (.nhdr or .nrrd)", argstr="--outputVolume %s") + desc="Output filename (.nhdr or .nrrd)", argstr="--outputVolume %s" + ) smallGradientThreshold = traits.Float( - desc= - "If a gradient magnitude is greater than 0 and less than smallGradientThreshold, then DicomToNrrdConverter will display an error message and quit, unless the useBMatrixGradientDirections option is set.", - argstr="--smallGradientThreshold %f") + desc="If a gradient magnitude is greater than 0 and less than smallGradientThreshold, then DicomToNrrdConverter will display an error message and quit, unless the useBMatrixGradientDirections option is set.", + argstr="--smallGradientThreshold %f", + ) writeProtocolGradientsFile = traits.Bool( - desc= - "Write the protocol gradients to a file suffixed by \'.txt\' as they were specified in the procol by multiplying each diffusion gradient direction by the measurement frame. This file is for debugging purposes only, the format is not fixed, and will likely change as debugging of new dicom formats is necessary.", - argstr="--writeProtocolGradientsFile ") + desc="Write the protocol gradients to a file suffixed by '.txt' as they were specified in the procol by multiplying each diffusion gradient direction by the measurement frame. This file is for debugging purposes only, the format is not fixed, and will likely change as debugging of new dicom formats is necessary.", + argstr="--writeProtocolGradientsFile ", + ) useIdentityMeaseurementFrame = traits.Bool( - desc= - "Adjust all the gradients so that the measurement frame is an identity matrix.", - argstr="--useIdentityMeaseurementFrame ") + desc="Adjust all the gradients so that the measurement frame is an identity matrix.", + argstr="--useIdentityMeaseurementFrame ", + ) useBMatrixGradientDirections = traits.Bool( - desc= - "Fill the nhdr header with the gradient directions and bvalues computed out of the BMatrix. Only changes behavior for Siemens data.", - argstr="--useBMatrixGradientDirections ") + desc="Fill the nhdr header with the gradient directions and bvalues computed out of the BMatrix. Only changes behavior for Siemens data.", + argstr="--useBMatrixGradientDirections ", + ) class DicomToNrrdConverterOutputSpec(TraitedSpec): outputDirectory = Directory( - desc="Directory holding the output NRRD format", exists=True) + desc="Directory holding the output NRRD format", exists=True + ) class DicomToNrrdConverter(SEMLikeCommandLine): @@ -65,19 +80,19 @@ class DicomToNrrdConverter(SEMLikeCommandLine): input_spec = DicomToNrrdConverterInputSpec output_spec = DicomToNrrdConverterOutputSpec _cmd = "DicomToNrrdConverter " - _outputs_filenames = {'outputDirectory': 'outputDirectory'} + _outputs_filenames = {"outputDirectory": "outputDirectory"} class OrientScalarVolumeInputSpec(CommandLineInputSpec): - inputVolume1 = File( - position=-2, desc="Input volume 1", exists=True, argstr="%s") + inputVolume1 = File(position=-2, desc="Input volume 1", exists=True, argstr="%s") outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="The oriented volume", - argstr="%s") + argstr="%s", + ) orientation = traits.Enum( "Axial", "Coronal", @@ -131,7 +146,8 @@ class OrientScalarVolumeInputSpec(CommandLineInputSpec): "AIL", "ASL", desc="Orientation choices", - argstr="--orientation %s") + argstr="--orientation %s", + ) class OrientScalarVolumeOutputSpec(TraitedSpec): @@ -158,4 +174,4 @@ class OrientScalarVolume(SEMLikeCommandLine): input_spec = OrientScalarVolumeInputSpec output_spec = OrientScalarVolumeOutputSpec _cmd = "OrientScalarVolume " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} diff --git a/nipype/interfaces/slicer/diffusion/__init__.py b/nipype/interfaces/slicer/diffusion/__init__.py index f6081f6c0c..d7f3089de5 100644 --- a/nipype/interfaces/slicer/diffusion/__init__.py +++ b/nipype/interfaces/slicer/diffusion/__init__.py @@ -1,5 +1,12 @@ # -*- coding: utf-8 -*- from .diffusion import ( - ResampleDTIVolume, DWIRicianLMMSEFilter, TractographyLabelMapSeeding, - DWIJointRicianLMMSEFilter, DiffusionWeightedVolumeMasking, DTIimport, - DWIToDTIEstimation, DiffusionTensorScalarMeasurements, DTIexport) + ResampleDTIVolume, + DWIRicianLMMSEFilter, + TractographyLabelMapSeeding, + DWIJointRicianLMMSEFilter, + DiffusionWeightedVolumeMasking, + DTIimport, + DWIToDTIEstimation, + DiffusionTensorScalarMeasurements, + DTIexport, +) diff --git a/nipype/interfaces/slicer/diffusion/diffusion.py b/nipype/interfaces/slicer/diffusion/diffusion.py index a088d25f8a..8fc0f0c6c9 100644 --- a/nipype/interfaces/slicer/diffusion/diffusion.py +++ b/nipype/interfaces/slicer/diffusion/diffusion.py @@ -3,138 +3,156 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class ResampleDTIVolumeInputSpec(CommandLineInputSpec): inputVolume = File( - position=-2, - desc="Input volume to be resampled", - exists=True, - argstr="%s") + position=-2, desc="Input volume to be resampled", exists=True, argstr="%s" + ) outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Resampled Volume", - argstr="%s") + argstr="%s", + ) Reference = File( desc="Reference Volume (spacing,size,orientation,origin)", exists=True, - argstr="--Reference %s") + argstr="--Reference %s", + ) transformationFile = File(exists=True, argstr="--transformationFile %s") defField = File( - desc= - "File containing the deformation field (3D vector image containing vectors with 3 components)", + desc="File containing the deformation field (3D vector image containing vectors with 3 components)", exists=True, - argstr="--defField %s") + argstr="--defField %s", + ) hfieldtype = traits.Enum( "displacement", "h-Field", desc="Set if the deformation field is an -Field", - argstr="--hfieldtype %s") + argstr="--hfieldtype %s", + ) interpolation = traits.Enum( "linear", "nn", "ws", "bs", - desc= - "Sampling algorithm (linear , nn (nearest neighborhoor), ws (WindowedSinc), bs (BSpline) )", - argstr="--interpolation %s") + desc="Sampling algorithm (linear , nn (nearest neighborhoor), ws (WindowedSinc), bs (BSpline) )", + argstr="--interpolation %s", + ) correction = traits.Enum( "zero", "none", "abs", "nearest", - desc= - "Correct the tensors if computed tensor is not semi-definite positive", - argstr="--correction %s") + desc="Correct the tensors if computed tensor is not semi-definite positive", + argstr="--correction %s", + ) transform_tensor_method = traits.Enum( "PPD", "FS", - desc= - "Chooses between 2 methods to transform the tensors: Finite Strain (FS), faster but less accurate, or Preservation of the Principal Direction (PPD)", - argstr="--transform_tensor_method %s") + desc="Chooses between 2 methods to transform the tensors: Finite Strain (FS), faster but less accurate, or Preservation of the Principal Direction (PPD)", + argstr="--transform_tensor_method %s", + ) transform_order = traits.Enum( "input-to-output", "output-to-input", desc="Select in what order the transforms are read", - argstr="--transform_order %s") + argstr="--transform_order %s", + ) notbulk = traits.Bool( - desc= - "The transform following the BSpline transform is not set as a bulk transform for the BSpline transform", - argstr="--notbulk ") + desc="The transform following the BSpline transform is not set as a bulk transform for the BSpline transform", + argstr="--notbulk ", + ) spaceChange = traits.Bool( - desc= - "Space Orientation between transform and image is different (RAS/LPS) (warning: if the transform is a Transform Node in Slicer3, do not select)", - argstr="--spaceChange ") + desc="Space Orientation between transform and image is different (RAS/LPS) (warning: if the transform is a Transform Node in Slicer3, do not select)", + argstr="--spaceChange ", + ) rotation_point = traits.List( desc="Center of rotation (only for rigid and affine transforms)", - argstr="--rotation_point %s") + argstr="--rotation_point %s", + ) centered_transform = traits.Bool( - desc= - "Set the center of the transformation to the center of the input image (only for rigid and affine transforms)", - argstr="--centered_transform ") + desc="Set the center of the transformation to the center of the input image (only for rigid and affine transforms)", + argstr="--centered_transform ", + ) image_center = traits.Enum( "input", "output", - desc= - "Image to use to center the transform (used only if \'Centered Transform\' is selected)", - argstr="--image_center %s") + desc="Image to use to center the transform (used only if 'Centered Transform' is selected)", + argstr="--image_center %s", + ) Inverse_ITK_Transformation = traits.Bool( - desc= - "Inverse the transformation before applying it from output image to input image (only for rigid and affine transforms)", - argstr="--Inverse_ITK_Transformation ") + desc="Inverse the transformation before applying it from output image to input image (only for rigid and affine transforms)", + argstr="--Inverse_ITK_Transformation ", + ) spacing = InputMultiPath( traits.Float, desc="Spacing along each dimension (0 means use input spacing)", sep=",", - argstr="--spacing %s") + argstr="--spacing %s", + ) size = InputMultiPath( traits.Float, desc="Size along each dimension (0 means use input size)", sep=",", - argstr="--size %s") - origin = traits.List( - desc="Origin of the output Image", argstr="--origin %s") + argstr="--size %s", + ) + origin = traits.List(desc="Origin of the output Image", argstr="--origin %s") direction_matrix = InputMultiPath( traits.Float, - desc= - "9 parameters of the direction matrix by rows (ijk to LPS if LPS transform, ijk to RAS if RAS transform)", + desc="9 parameters of the direction matrix by rows (ijk to LPS if LPS transform, ijk to RAS if RAS transform)", sep=",", - argstr="--direction_matrix %s") + argstr="--direction_matrix %s", + ) number_of_thread = traits.Int( desc="Number of thread used to compute the output image", - argstr="--number_of_thread %d") + argstr="--number_of_thread %d", + ) default_pixel_value = traits.Float( - desc= - "Default pixel value for samples falling outside of the input region", - argstr="--default_pixel_value %f") + desc="Default pixel value for samples falling outside of the input region", + argstr="--default_pixel_value %f", + ) window_function = traits.Enum( "h", "c", "w", "l", "b", - desc= - "Window Function , h = Hamming , c = Cosine , w = Welch , l = Lanczos , b = Blackman", - argstr="--window_function %s") + desc="Window Function , h = Hamming , c = Cosine , w = Welch , l = Lanczos , b = Blackman", + argstr="--window_function %s", + ) spline_order = traits.Int( desc="Spline Order (Spline order may be from 0 to 5)", - argstr="--spline_order %d") + argstr="--spline_order %d", + ) transform_matrix = InputMultiPath( traits.Float, - desc= - "12 parameters of the transform matrix by rows ( --last 3 being translation-- )", + desc="12 parameters of the transform matrix by rows ( --last 3 being translation-- )", sep=",", - argstr="--transform_matrix %s") + argstr="--transform_matrix %s", + ) transform = traits.Enum( "rt", "a", desc="Transform algorithm, rt = Rigid Transform, a = Affine Transform", - argstr="--transform %s") + argstr="--transform %s", + ) class ResampleDTIVolumeOutputSpec(TraitedSpec): @@ -161,45 +179,50 @@ class ResampleDTIVolume(SEMLikeCommandLine): input_spec = ResampleDTIVolumeInputSpec output_spec = ResampleDTIVolumeOutputSpec _cmd = "ResampleDTIVolume " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} class DWIRicianLMMSEFilterInputSpec(CommandLineInputSpec): iter = traits.Int( - desc="Number of iterations for the noise removal filter.", - argstr="--iter %d") + desc="Number of iterations for the noise removal filter.", argstr="--iter %d" + ) re = InputMultiPath( - traits.Int, desc="Estimation radius.", sep=",", argstr="--re %s") - rf = InputMultiPath( - traits.Int, desc="Filtering radius.", sep=",", argstr="--rf %s") + traits.Int, desc="Estimation radius.", sep=",", argstr="--re %s" + ) + rf = InputMultiPath(traits.Int, desc="Filtering radius.", sep=",", argstr="--rf %s") mnvf = traits.Int( desc="Minimum number of voxels in kernel used for filtering.", - argstr="--mnvf %d") + argstr="--mnvf %d", + ) mnve = traits.Int( desc="Minimum number of voxels in kernel used for estimation.", - argstr="--mnve %d") + argstr="--mnve %d", + ) minnstd = traits.Int( - desc="Minimum allowed noise standard deviation.", - argstr="--minnstd %d") + desc="Minimum allowed noise standard deviation.", argstr="--minnstd %d" + ) maxnstd = traits.Int( - desc="Maximum allowed noise standard deviation.", - argstr="--maxnstd %d") + desc="Maximum allowed noise standard deviation.", argstr="--maxnstd %d" + ) hrf = traits.Float( - desc="How many histogram bins per unit interval.", argstr="--hrf %f") + desc="How many histogram bins per unit interval.", argstr="--hrf %f" + ) uav = traits.Bool( - desc="Use absolute value in case of negative square.", argstr="--uav ") - inputVolume = File( - position=-2, desc="Input DWI volume.", exists=True, argstr="%s") + desc="Use absolute value in case of negative square.", argstr="--uav " + ) + inputVolume = File(position=-2, desc="Input DWI volume.", exists=True, argstr="%s") outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output DWI volume.", - argstr="%s") + argstr="%s", + ) compressOutput = traits.Bool( desc="Compress the data of the compressed file using gzip", - argstr="--compressOutput ") + argstr="--compressOutput ", + ) class DWIRicianLMMSEFilterOutputSpec(TraitedSpec): @@ -229,73 +252,79 @@ class DWIRicianLMMSEFilter(SEMLikeCommandLine): input_spec = DWIRicianLMMSEFilterInputSpec output_spec = DWIRicianLMMSEFilterOutputSpec _cmd = "DWIRicianLMMSEFilter " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} class TractographyLabelMapSeedingInputSpec(CommandLineInputSpec): - InputVolume = File( - position=-2, desc="Input DTI volume", exists=True, argstr="%s") + InputVolume = File(position=-2, desc="Input DTI volume", exists=True, argstr="%s") inputroi = File( - desc="Label map with seeding ROIs", - exists=True, - argstr="--inputroi %s") + desc="Label map with seeding ROIs", exists=True, argstr="--inputroi %s" + ) OutputFibers = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Tractography result", - argstr="%s") + argstr="%s", + ) useindexspace = traits.Bool( - desc="Seed at IJK voxel grid", argstr="--useindexspace ") + desc="Seed at IJK voxel grid", argstr="--useindexspace " + ) seedspacing = traits.Float( - desc= - "Spacing (in mm) between seed points, only matters if use Use Index Space is off", - argstr="--seedspacing %f") + desc="Spacing (in mm) between seed points, only matters if use Use Index Space is off", + argstr="--seedspacing %f", + ) randomgrid = traits.Bool( - desc="Enable random placing of seeds", argstr="--randomgrid ") + desc="Enable random placing of seeds", argstr="--randomgrid " + ) clthreshold = traits.Float( desc="Minimum Linear Measure for the seeding to start.", - argstr="--clthreshold %f") + argstr="--clthreshold %f", + ) minimumlength = traits.Float( - desc="Minimum length of the fibers (in mm)", - argstr="--minimumlength %f") + desc="Minimum length of the fibers (in mm)", argstr="--minimumlength %f" + ) maximumlength = traits.Float( - desc="Maximum length of fibers (in mm)", argstr="--maximumlength %f") + desc="Maximum length of fibers (in mm)", argstr="--maximumlength %f" + ) stoppingmode = traits.Enum( "LinearMeasure", "FractionalAnisotropy", desc="Tensor measurement used to stop the tractography", - argstr="--stoppingmode %s") + argstr="--stoppingmode %s", + ) stoppingvalue = traits.Float( - desc= - "Tractography will stop when the stopping measurement drops below this value", - argstr="--stoppingvalue %f") + desc="Tractography will stop when the stopping measurement drops below this value", + argstr="--stoppingvalue %f", + ) stoppingcurvature = traits.Float( - desc= - "Tractography will stop if radius of curvature becomes smaller than this number units are degrees per mm", - argstr="--stoppingcurvature %f") + desc="Tractography will stop if radius of curvature becomes smaller than this number units are degrees per mm", + argstr="--stoppingcurvature %f", + ) integrationsteplength = traits.Float( desc="Distance between points on the same fiber in mm", - argstr="--integrationsteplength %f") + argstr="--integrationsteplength %f", + ) label = traits.Int( - desc="Label value that defines seeding region.", argstr="--label %d") + desc="Label value that defines seeding region.", argstr="--label %d" + ) writetofile = traits.Bool( - desc="Write fibers to disk or create in the scene?", - argstr="--writetofile ") + desc="Write fibers to disk or create in the scene?", argstr="--writetofile " + ) outputdirectory = traits.Either( traits.Bool, Directory(), hash_files=False, desc="Directory in which to save fiber(s)", - argstr="--outputdirectory %s") + argstr="--outputdirectory %s", + ) name = traits.Str(desc="Name to use for fiber files", argstr="--name %s") class TractographyLabelMapSeedingOutputSpec(TraitedSpec): OutputFibers = File(position=-1, desc="Tractography result", exists=True) - outputdirectory = Directory( - desc="Directory in which to save fiber(s)", exists=True) + outputdirectory = Directory(desc="Directory in which to save fiber(s)", exists=True) class TractographyLabelMapSeeding(SEMLikeCommandLine): @@ -321,32 +350,33 @@ class TractographyLabelMapSeeding(SEMLikeCommandLine): output_spec = TractographyLabelMapSeedingOutputSpec _cmd = "TractographyLabelMapSeeding " _outputs_filenames = { - 'OutputFibers': 'OutputFibers.vtk', - 'outputdirectory': 'outputdirectory' + "OutputFibers": "OutputFibers.vtk", + "outputdirectory": "outputdirectory", } class DWIJointRicianLMMSEFilterInputSpec(CommandLineInputSpec): re = InputMultiPath( - traits.Int, desc="Estimation radius.", sep=",", argstr="--re %s") - rf = InputMultiPath( - traits.Int, desc="Filtering radius.", sep=",", argstr="--rf %s") + traits.Int, desc="Estimation radius.", sep=",", argstr="--re %s" + ) + rf = InputMultiPath(traits.Int, desc="Filtering radius.", sep=",", argstr="--rf %s") ng = traits.Int( - desc= - "The number of the closest gradients that are used to jointly filter a given gradient direction (0 to use all).", - argstr="--ng %d") - inputVolume = File( - position=-2, desc="Input DWI volume.", exists=True, argstr="%s") + desc="The number of the closest gradients that are used to jointly filter a given gradient direction (0 to use all).", + argstr="--ng %d", + ) + inputVolume = File(position=-2, desc="Input DWI volume.", exists=True, argstr="%s") outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output DWI volume.", - argstr="%s") + argstr="%s", + ) compressOutput = traits.Bool( desc="Compress the data of the compressed file using gzip", - argstr="--compressOutput ") + argstr="--compressOutput ", + ) class DWIJointRicianLMMSEFilterOutputSpec(TraitedSpec): @@ -376,37 +406,38 @@ class DWIJointRicianLMMSEFilter(SEMLikeCommandLine): input_spec = DWIJointRicianLMMSEFilterInputSpec output_spec = DWIJointRicianLMMSEFilterOutputSpec _cmd = "DWIJointRicianLMMSEFilter " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} class DiffusionWeightedVolumeMaskingInputSpec(CommandLineInputSpec): - inputVolume = File( - position=-4, desc="Input DWI volume", exists=True, argstr="%s") + inputVolume = File(position=-4, desc="Input DWI volume", exists=True, argstr="%s") outputBaseline = traits.Either( traits.Bool, File(), position=-2, hash_files=False, desc="Estimated baseline volume", - argstr="%s") + argstr="%s", + ) thresholdMask = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Otsu Threshold Mask", - argstr="%s") + argstr="%s", + ) otsuomegathreshold = traits.Float( - desc= - "Control the sharpness of the threshold in the Otsu computation. 0: lower threshold, 1: higher threhold", - argstr="--otsuomegathreshold %f") + desc="Control the sharpness of the threshold in the Otsu computation. 0: lower threshold, 1: higher threhold", + argstr="--otsuomegathreshold %f", + ) removeislands = traits.Bool( - desc="Remove Islands in Threshold Mask?", argstr="--removeislands ") + desc="Remove Islands in Threshold Mask?", argstr="--removeislands " + ) class DiffusionWeightedVolumeMaskingOutputSpec(TraitedSpec): - outputBaseline = File( - position=-2, desc="Estimated baseline volume", exists=True) + outputBaseline = File(position=-2, desc="Estimated baseline volume", exists=True) thresholdMask = File(position=-1, desc="Otsu Threshold Mask", exists=True) @@ -431,25 +462,25 @@ class DiffusionWeightedVolumeMasking(SEMLikeCommandLine): output_spec = DiffusionWeightedVolumeMaskingOutputSpec _cmd = "DiffusionWeightedVolumeMasking " _outputs_filenames = { - 'outputBaseline': 'outputBaseline.nii', - 'thresholdMask': 'thresholdMask.nii' + "outputBaseline": "outputBaseline.nii", + "thresholdMask": "thresholdMask.nii", } class DTIimportInputSpec(CommandLineInputSpec): - inputFile = File( - position=-2, desc="Input DTI file", exists=True, argstr="%s") + inputFile = File(position=-2, desc="Input DTI file", exists=True, argstr="%s") outputTensor = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output DTI volume", - argstr="%s") + argstr="%s", + ) testingmode = traits.Bool( - desc= - "Enable testing mode. Sample helix file (helix-DTI.nhdr) will be loaded into Slicer and converted in Nifti.", - argstr="--testingmode ") + desc="Enable testing mode. Sample helix file (helix-DTI.nhdr) will be loaded into Slicer and converted in Nifti.", + argstr="--testingmode ", + ) class DTIimportOutputSpec(TraitedSpec): @@ -476,45 +507,45 @@ class DTIimport(SEMLikeCommandLine): input_spec = DTIimportInputSpec output_spec = DTIimportOutputSpec _cmd = "DTIimport " - _outputs_filenames = {'outputTensor': 'outputTensor.nii'} + _outputs_filenames = {"outputTensor": "outputTensor.nii"} class DWIToDTIEstimationInputSpec(CommandLineInputSpec): - inputVolume = File( - position=-3, desc="Input DWI volume", exists=True, argstr="%s") + inputVolume = File(position=-3, desc="Input DWI volume", exists=True, argstr="%s") mask = File( - desc="Mask where the tensors will be computed", - exists=True, - argstr="--mask %s") + desc="Mask where the tensors will be computed", exists=True, argstr="--mask %s" + ) outputTensor = traits.Either( traits.Bool, File(), position=-2, hash_files=False, desc="Estimated DTI volume", - argstr="%s") + argstr="%s", + ) outputBaseline = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Estimated baseline volume", - argstr="%s") + argstr="%s", + ) enumeration = traits.Enum( "LS", "WLS", desc="LS: Least Squares, WLS: Weighted Least Squares", - argstr="--enumeration %s") + argstr="--enumeration %s", + ) shiftNeg = traits.Bool( - desc= - "Shift eigenvalues so all are positive (accounts for bad tensors related to noise or acquisition error)", - argstr="--shiftNeg ") + desc="Shift eigenvalues so all are positive (accounts for bad tensors related to noise or acquisition error)", + argstr="--shiftNeg ", + ) class DWIToDTIEstimationOutputSpec(TraitedSpec): outputTensor = File(position=-2, desc="Estimated DTI volume", exists=True) - outputBaseline = File( - position=-1, desc="Estimated baseline volume", exists=True) + outputBaseline = File(position=-1, desc="Estimated baseline volume", exists=True) class DWIToDTIEstimation(SEMLikeCommandLine): @@ -542,21 +573,21 @@ class DWIToDTIEstimation(SEMLikeCommandLine): output_spec = DWIToDTIEstimationOutputSpec _cmd = "DWIToDTIEstimation " _outputs_filenames = { - 'outputTensor': 'outputTensor.nii', - 'outputBaseline': 'outputBaseline.nii' + "outputTensor": "outputTensor.nii", + "outputBaseline": "outputBaseline.nii", } class DiffusionTensorScalarMeasurementsInputSpec(CommandLineInputSpec): - inputVolume = File( - position=-3, desc="Input DTI volume", exists=True, argstr="%s") + inputVolume = File(position=-3, desc="Input DTI volume", exists=True, argstr="%s") outputScalar = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Scalar volume derived from tensor", - argstr="%s") + argstr="%s", + ) enumeration = traits.Enum( "Trace", "Determinant", @@ -584,12 +615,14 @@ class DiffusionTensorScalarMeasurementsInputSpec(CommandLineInputSpec): "ParallelDiffusivity", "PerpendicularDffusivity", desc="An enumeration of strings", - argstr="--enumeration %s") + argstr="--enumeration %s", + ) class DiffusionTensorScalarMeasurementsOutputSpec(TraitedSpec): outputScalar = File( - position=-1, desc="Scalar volume derived from tensor", exists=True) + position=-1, desc="Scalar volume derived from tensor", exists=True + ) class DiffusionTensorScalarMeasurements(SEMLikeCommandLine): @@ -612,19 +645,19 @@ class DiffusionTensorScalarMeasurements(SEMLikeCommandLine): input_spec = DiffusionTensorScalarMeasurementsInputSpec output_spec = DiffusionTensorScalarMeasurementsOutputSpec _cmd = "DiffusionTensorScalarMeasurements " - _outputs_filenames = {'outputScalar': 'outputScalar.nii'} + _outputs_filenames = {"outputScalar": "outputScalar.nii"} class DTIexportInputSpec(CommandLineInputSpec): - inputTensor = File( - position=-2, desc="Input DTI volume", exists=True, argstr="%s") + inputTensor = File(position=-2, desc="Input DTI volume", exists=True, argstr="%s") outputFile = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output DTI file", - argstr="%s") + argstr="%s", + ) class DTIexportOutputSpec(TraitedSpec): @@ -651,4 +684,4 @@ class DTIexport(SEMLikeCommandLine): input_spec = DTIexportInputSpec output_spec = DTIexportOutputSpec _cmd = "DTIexport " - _outputs_filenames = {'outputFile': 'outputFile'} + _outputs_filenames = {"outputFile": "outputFile"} diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py index 5b72027e1a..7c7f02cacc 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py @@ -4,32 +4,20 @@ def test_DTIexport_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputTensor=dict( - argstr='%s', - extensions=None, - position=-2, - ), - outputFile=dict( - argstr='%s', - hash_files=False, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputTensor=dict(argstr="%s", extensions=None, position=-2,), + outputFile=dict(argstr="%s", hash_files=False, position=-1,), ) inputs = DTIexport.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DTIexport_outputs(): - output_map = dict(outputFile=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputFile=dict(extensions=None, position=-1,),) outputs = DTIexport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py index 9b9c5f1929..65ed035819 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py @@ -4,33 +4,21 @@ def test_DTIimport_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputFile=dict( - argstr='%s', - extensions=None, - position=-2, - ), - outputTensor=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - testingmode=dict(argstr='--testingmode ', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputFile=dict(argstr="%s", extensions=None, position=-2,), + outputTensor=dict(argstr="%s", hash_files=False, position=-1,), + testingmode=dict(argstr="--testingmode ",), ) inputs = DTIimport.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DTIimport_outputs(): - output_map = dict(outputTensor=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputTensor=dict(extensions=None, position=-1,),) outputs = DTIimport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py index 186014c407..3c0d7c8861 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py @@ -4,42 +4,24 @@ def test_DWIJointRicianLMMSEFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - compressOutput=dict(argstr='--compressOutput ', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - ng=dict(argstr='--ng %d', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - re=dict( - argstr='--re %s', - sep=',', - ), - rf=dict( - argstr='--rf %s', - sep=',', - ), + args=dict(argstr="%s",), + compressOutput=dict(argstr="--compressOutput ",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + ng=dict(argstr="--ng %d",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + re=dict(argstr="--re %s", sep=",",), + rf=dict(argstr="--rf %s", sep=",",), ) inputs = DWIJointRicianLMMSEFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DWIJointRicianLMMSEFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = DWIJointRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py index 1c488f912f..8dda7d3105 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py @@ -4,48 +4,30 @@ def test_DWIRicianLMMSEFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - compressOutput=dict(argstr='--compressOutput ', ), - environ=dict( - nohash=True, - usedefault=True, - ), - hrf=dict(argstr='--hrf %f', ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - iter=dict(argstr='--iter %d', ), - maxnstd=dict(argstr='--maxnstd %d', ), - minnstd=dict(argstr='--minnstd %d', ), - mnve=dict(argstr='--mnve %d', ), - mnvf=dict(argstr='--mnvf %d', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - re=dict( - argstr='--re %s', - sep=',', - ), - rf=dict( - argstr='--rf %s', - sep=',', - ), - uav=dict(argstr='--uav ', ), + args=dict(argstr="%s",), + compressOutput=dict(argstr="--compressOutput ",), + environ=dict(nohash=True, usedefault=True,), + hrf=dict(argstr="--hrf %f",), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + iter=dict(argstr="--iter %d",), + maxnstd=dict(argstr="--maxnstd %d",), + minnstd=dict(argstr="--minnstd %d",), + mnve=dict(argstr="--mnve %d",), + mnvf=dict(argstr="--mnvf %d",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + re=dict(argstr="--re %s", sep=",",), + rf=dict(argstr="--rf %s", sep=",",), + uav=dict(argstr="--uav ",), ) inputs = DWIRicianLMMSEFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DWIRicianLMMSEFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = DWIRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py index 87a1300476..ecb618c2ff 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py @@ -4,48 +4,26 @@ def test_DWIToDTIEstimation_inputs(): input_map = dict( - args=dict(argstr='%s', ), - enumeration=dict(argstr='--enumeration %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-3, - ), - mask=dict( - argstr='--mask %s', - extensions=None, - ), - outputBaseline=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - outputTensor=dict( - argstr='%s', - hash_files=False, - position=-2, - ), - shiftNeg=dict(argstr='--shiftNeg ', ), + args=dict(argstr="%s",), + enumeration=dict(argstr="--enumeration %s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="%s", extensions=None, position=-3,), + mask=dict(argstr="--mask %s", extensions=None,), + outputBaseline=dict(argstr="%s", hash_files=False, position=-1,), + outputTensor=dict(argstr="%s", hash_files=False, position=-2,), + shiftNeg=dict(argstr="--shiftNeg ",), ) inputs = DWIToDTIEstimation.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DWIToDTIEstimation_outputs(): output_map = dict( - outputBaseline=dict( - extensions=None, - position=-1, - ), - outputTensor=dict( - extensions=None, - position=-2, - ), + outputBaseline=dict(extensions=None, position=-1,), + outputTensor=dict(extensions=None, position=-2,), ) outputs = DWIToDTIEstimation.output_spec() diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py index 44b5d9228b..70df302ae6 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py @@ -4,33 +4,21 @@ def test_DiffusionTensorScalarMeasurements_inputs(): input_map = dict( - args=dict(argstr='%s', ), - enumeration=dict(argstr='--enumeration %s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-3, - ), - outputScalar=dict( - argstr='%s', - hash_files=False, - position=-1, - ), + args=dict(argstr="%s",), + enumeration=dict(argstr="--enumeration %s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="%s", extensions=None, position=-3,), + outputScalar=dict(argstr="%s", hash_files=False, position=-1,), ) inputs = DiffusionTensorScalarMeasurements.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DiffusionTensorScalarMeasurements_outputs(): - output_map = dict(outputScalar=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputScalar=dict(extensions=None, position=-1,),) outputs = DiffusionTensorScalarMeasurements.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py index fdd979678e..659f86a642 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py @@ -4,44 +4,25 @@ def test_DiffusionWeightedVolumeMasking_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-4, - ), - otsuomegathreshold=dict(argstr='--otsuomegathreshold %f', ), - outputBaseline=dict( - argstr='%s', - hash_files=False, - position=-2, - ), - removeislands=dict(argstr='--removeislands ', ), - thresholdMask=dict( - argstr='%s', - hash_files=False, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="%s", extensions=None, position=-4,), + otsuomegathreshold=dict(argstr="--otsuomegathreshold %f",), + outputBaseline=dict(argstr="%s", hash_files=False, position=-2,), + removeislands=dict(argstr="--removeislands ",), + thresholdMask=dict(argstr="%s", hash_files=False, position=-1,), ) inputs = DiffusionWeightedVolumeMasking.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DiffusionWeightedVolumeMasking_outputs(): output_map = dict( - outputBaseline=dict( - extensions=None, - position=-2, - ), - thresholdMask=dict( - extensions=None, - position=-1, - ), + outputBaseline=dict(extensions=None, position=-2,), + thresholdMask=dict(extensions=None, position=-1,), ) outputs = DiffusionWeightedVolumeMasking.output_spec() diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py index 1933c1d674..8db387f985 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py @@ -4,78 +4,44 @@ def test_ResampleDTIVolume_inputs(): input_map = dict( - Inverse_ITK_Transformation=dict( - argstr='--Inverse_ITK_Transformation ', ), - Reference=dict( - argstr='--Reference %s', - extensions=None, - ), - args=dict(argstr='%s', ), - centered_transform=dict(argstr='--centered_transform ', ), - correction=dict(argstr='--correction %s', ), - defField=dict( - argstr='--defField %s', - extensions=None, - ), - default_pixel_value=dict(argstr='--default_pixel_value %f', ), - direction_matrix=dict( - argstr='--direction_matrix %s', - sep=',', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hfieldtype=dict(argstr='--hfieldtype %s', ), - image_center=dict(argstr='--image_center %s', ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - interpolation=dict(argstr='--interpolation %s', ), - notbulk=dict(argstr='--notbulk ', ), - number_of_thread=dict(argstr='--number_of_thread %d', ), - origin=dict(argstr='--origin %s', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - rotation_point=dict(argstr='--rotation_point %s', ), - size=dict( - argstr='--size %s', - sep=',', - ), - spaceChange=dict(argstr='--spaceChange ', ), - spacing=dict( - argstr='--spacing %s', - sep=',', - ), - spline_order=dict(argstr='--spline_order %d', ), - transform=dict(argstr='--transform %s', ), - transform_matrix=dict( - argstr='--transform_matrix %s', - sep=',', - ), - transform_order=dict(argstr='--transform_order %s', ), - transform_tensor_method=dict(argstr='--transform_tensor_method %s', ), - transformationFile=dict( - argstr='--transformationFile %s', - extensions=None, - ), - window_function=dict(argstr='--window_function %s', ), + Inverse_ITK_Transformation=dict(argstr="--Inverse_ITK_Transformation ",), + Reference=dict(argstr="--Reference %s", extensions=None,), + args=dict(argstr="%s",), + centered_transform=dict(argstr="--centered_transform ",), + correction=dict(argstr="--correction %s",), + defField=dict(argstr="--defField %s", extensions=None,), + default_pixel_value=dict(argstr="--default_pixel_value %f",), + direction_matrix=dict(argstr="--direction_matrix %s", sep=",",), + environ=dict(nohash=True, usedefault=True,), + hfieldtype=dict(argstr="--hfieldtype %s",), + image_center=dict(argstr="--image_center %s",), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + interpolation=dict(argstr="--interpolation %s",), + notbulk=dict(argstr="--notbulk ",), + number_of_thread=dict(argstr="--number_of_thread %d",), + origin=dict(argstr="--origin %s",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + rotation_point=dict(argstr="--rotation_point %s",), + size=dict(argstr="--size %s", sep=",",), + spaceChange=dict(argstr="--spaceChange ",), + spacing=dict(argstr="--spacing %s", sep=",",), + spline_order=dict(argstr="--spline_order %d",), + transform=dict(argstr="--transform %s",), + transform_matrix=dict(argstr="--transform_matrix %s", sep=",",), + transform_order=dict(argstr="--transform_order %s",), + transform_tensor_method=dict(argstr="--transform_tensor_method %s",), + transformationFile=dict(argstr="--transformationFile %s", extensions=None,), + window_function=dict(argstr="--window_function %s",), ) inputs = ResampleDTIVolume.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ResampleDTIVolume_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = ResampleDTIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py index e14bfeece8..6f36ac2a63 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py @@ -4,55 +4,36 @@ def test_TractographyLabelMapSeeding_inputs(): input_map = dict( - InputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - OutputFibers=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - args=dict(argstr='%s', ), - clthreshold=dict(argstr='--clthreshold %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputroi=dict( - argstr='--inputroi %s', - extensions=None, - ), - integrationsteplength=dict(argstr='--integrationsteplength %f', ), - label=dict(argstr='--label %d', ), - maximumlength=dict(argstr='--maximumlength %f', ), - minimumlength=dict(argstr='--minimumlength %f', ), - name=dict(argstr='--name %s', ), - outputdirectory=dict( - argstr='--outputdirectory %s', - hash_files=False, - ), - randomgrid=dict(argstr='--randomgrid ', ), - seedspacing=dict(argstr='--seedspacing %f', ), - stoppingcurvature=dict(argstr='--stoppingcurvature %f', ), - stoppingmode=dict(argstr='--stoppingmode %s', ), - stoppingvalue=dict(argstr='--stoppingvalue %f', ), - useindexspace=dict(argstr='--useindexspace ', ), - writetofile=dict(argstr='--writetofile ', ), + InputVolume=dict(argstr="%s", extensions=None, position=-2,), + OutputFibers=dict(argstr="%s", hash_files=False, position=-1,), + args=dict(argstr="%s",), + clthreshold=dict(argstr="--clthreshold %f",), + environ=dict(nohash=True, usedefault=True,), + inputroi=dict(argstr="--inputroi %s", extensions=None,), + integrationsteplength=dict(argstr="--integrationsteplength %f",), + label=dict(argstr="--label %d",), + maximumlength=dict(argstr="--maximumlength %f",), + minimumlength=dict(argstr="--minimumlength %f",), + name=dict(argstr="--name %s",), + outputdirectory=dict(argstr="--outputdirectory %s", hash_files=False,), + randomgrid=dict(argstr="--randomgrid ",), + seedspacing=dict(argstr="--seedspacing %f",), + stoppingcurvature=dict(argstr="--stoppingcurvature %f",), + stoppingmode=dict(argstr="--stoppingmode %s",), + stoppingvalue=dict(argstr="--stoppingvalue %f",), + useindexspace=dict(argstr="--useindexspace ",), + writetofile=dict(argstr="--writetofile ",), ) inputs = TractographyLabelMapSeeding.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TractographyLabelMapSeeding_outputs(): output_map = dict( - OutputFibers=dict( - extensions=None, - position=-1, - ), - outputdirectory=dict(), + OutputFibers=dict(extensions=None, position=-1,), outputdirectory=dict(), ) outputs = TractographyLabelMapSeeding.output_spec() diff --git a/nipype/interfaces/slicer/filtering/__init__.py b/nipype/interfaces/slicer/filtering/__init__.py index 1270e20d26..13b79f8705 100644 --- a/nipype/interfaces/slicer/filtering/__init__.py +++ b/nipype/interfaces/slicer/filtering/__init__.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- -from .morphology import (GrayscaleGrindPeakImageFilter, - GrayscaleFillHoleImageFilter) -from .denoising import (GradientAnisotropicDiffusion, - CurvatureAnisotropicDiffusion, GaussianBlurImageFilter, - MedianImageFilter) -from .arithmetic import (MultiplyScalarVolumes, MaskScalarVolume, - SubtractScalarVolumes, AddScalarVolumes, - CastScalarVolume) +from .morphology import GrayscaleGrindPeakImageFilter, GrayscaleFillHoleImageFilter +from .denoising import ( + GradientAnisotropicDiffusion, + CurvatureAnisotropicDiffusion, + GaussianBlurImageFilter, + MedianImageFilter, +) +from .arithmetic import ( + MultiplyScalarVolumes, + MaskScalarVolume, + SubtractScalarVolumes, + AddScalarVolumes, + CastScalarVolume, +) from .extractskeleton import ExtractSkeleton from .histogrammatching import HistogramMatching from .thresholdscalarvolume import ThresholdScalarVolume diff --git a/nipype/interfaces/slicer/filtering/arithmetic.py b/nipype/interfaces/slicer/filtering/arithmetic.py index 22785e32e1..64d60feef3 100644 --- a/nipype/interfaces/slicer/filtering/arithmetic.py +++ b/nipype/interfaces/slicer/filtering/arithmetic.py @@ -3,30 +3,40 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class MultiplyScalarVolumesInputSpec(CommandLineInputSpec): - inputVolume1 = File( - position=-3, desc="Input volume 1", exists=True, argstr="%s") - inputVolume2 = File( - position=-2, desc="Input volume 2", exists=True, argstr="%s") + inputVolume1 = File(position=-3, desc="Input volume 1", exists=True, argstr="%s") + inputVolume2 = File(position=-2, desc="Input volume 2", exists=True, argstr="%s") outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Volume1 * Volume2", - argstr="%s") + argstr="%s", + ) order = traits.Enum( "0", "1", "2", "3", - desc= - "Interpolation order if two images are in different coordinate frames or have different sampling.", - argstr="--order %s") + desc="Interpolation order if two images are in different coordinate frames or have different sampling.", + argstr="--order %s", + ) class MultiplyScalarVolumesOutputSpec(TraitedSpec): @@ -53,42 +63,39 @@ class MultiplyScalarVolumes(SEMLikeCommandLine): input_spec = MultiplyScalarVolumesInputSpec output_spec = MultiplyScalarVolumesOutputSpec _cmd = "MultiplyScalarVolumes " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} class MaskScalarVolumeInputSpec(CommandLineInputSpec): InputVolume = File( - position=-3, - desc="Input volume to be masked", - exists=True, - argstr="%s") + position=-3, desc="Input volume to be masked", exists=True, argstr="%s" + ) MaskVolume = File( - position=-2, - desc="Label volume containing the mask", - exists=True, - argstr="%s") + position=-2, desc="Label volume containing the mask", exists=True, argstr="%s" + ) OutputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, - desc= - "Output volume: Input Volume masked by label value from Mask Volume", - argstr="%s") + desc="Output volume: Input Volume masked by label value from Mask Volume", + argstr="%s", + ) label = traits.Int( - desc="Label value in the Mask Volume to use as the mask", - argstr="--label %d") + desc="Label value in the Mask Volume to use as the mask", argstr="--label %d" + ) replace = traits.Int( desc="Value to use for the output volume outside of the mask", - argstr="--replace %d") + argstr="--replace %d", + ) class MaskScalarVolumeOutputSpec(TraitedSpec): OutputVolume = File( position=-1, - desc= - "Output volume: Input Volume masked by label value from Mask Volume", - exists=True) + desc="Output volume: Input Volume masked by label value from Mask Volume", + exists=True, + ) class MaskScalarVolume(SEMLikeCommandLine): @@ -111,29 +118,28 @@ class MaskScalarVolume(SEMLikeCommandLine): input_spec = MaskScalarVolumeInputSpec output_spec = MaskScalarVolumeOutputSpec _cmd = "MaskScalarVolume " - _outputs_filenames = {'OutputVolume': 'OutputVolume.nii'} + _outputs_filenames = {"OutputVolume": "OutputVolume.nii"} class SubtractScalarVolumesInputSpec(CommandLineInputSpec): - inputVolume1 = File( - position=-3, desc="Input volume 1", exists=True, argstr="%s") - inputVolume2 = File( - position=-2, desc="Input volume 2", exists=True, argstr="%s") + inputVolume1 = File(position=-3, desc="Input volume 1", exists=True, argstr="%s") + inputVolume2 = File(position=-2, desc="Input volume 2", exists=True, argstr="%s") outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Volume1 - Volume2", - argstr="%s") + argstr="%s", + ) order = traits.Enum( "0", "1", "2", "3", - desc= - "Interpolation order if two images are in different coordinate frames or have different sampling.", - argstr="--order %s") + desc="Interpolation order if two images are in different coordinate frames or have different sampling.", + argstr="--order %s", + ) class SubtractScalarVolumesOutputSpec(TraitedSpec): @@ -160,29 +166,28 @@ class SubtractScalarVolumes(SEMLikeCommandLine): input_spec = SubtractScalarVolumesInputSpec output_spec = SubtractScalarVolumesOutputSpec _cmd = "SubtractScalarVolumes " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} class AddScalarVolumesInputSpec(CommandLineInputSpec): - inputVolume1 = File( - position=-3, desc="Input volume 1", exists=True, argstr="%s") - inputVolume2 = File( - position=-2, desc="Input volume 2", exists=True, argstr="%s") + inputVolume1 = File(position=-3, desc="Input volume 1", exists=True, argstr="%s") + inputVolume2 = File(position=-2, desc="Input volume 2", exists=True, argstr="%s") outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Volume1 + Volume2", - argstr="%s") + argstr="%s", + ) order = traits.Enum( "0", "1", "2", "3", - desc= - "Interpolation order if two images are in different coordinate frames or have different sampling.", - argstr="--order %s") + desc="Interpolation order if two images are in different coordinate frames or have different sampling.", + argstr="--order %s", + ) class AddScalarVolumesOutputSpec(TraitedSpec): @@ -209,22 +214,21 @@ class AddScalarVolumes(SEMLikeCommandLine): input_spec = AddScalarVolumesInputSpec output_spec = AddScalarVolumesOutputSpec _cmd = "AddScalarVolumes " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} class CastScalarVolumeInputSpec(CommandLineInputSpec): InputVolume = File( - position=-2, - desc="Input volume, the volume to cast.", - exists=True, - argstr="%s") + position=-2, desc="Input volume, the volume to cast.", exists=True, argstr="%s" + ) OutputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output volume, cast to the new type.", - argstr="%s") + argstr="%s", + ) type = traits.Enum( "Char", "UnsignedChar", @@ -235,12 +239,14 @@ class CastScalarVolumeInputSpec(CommandLineInputSpec): "Float", "Double", desc="Type for the new output volume.", - argstr="--type %s") + argstr="--type %s", + ) class CastScalarVolumeOutputSpec(TraitedSpec): OutputVolume = File( - position=-1, desc="Output volume, cast to the new type.", exists=True) + position=-1, desc="Output volume, cast to the new type.", exists=True + ) class CastScalarVolume(SEMLikeCommandLine): @@ -265,4 +271,4 @@ class CastScalarVolume(SEMLikeCommandLine): input_spec = CastScalarVolumeInputSpec output_spec = CastScalarVolumeOutputSpec _cmd = "CastScalarVolume " - _outputs_filenames = {'OutputVolume': 'OutputVolume.nii'} + _outputs_filenames = {"OutputVolume": "OutputVolume.nii"} diff --git a/nipype/interfaces/slicer/filtering/checkerboardfilter.py b/nipype/interfaces/slicer/filtering/checkerboardfilter.py index e4ad85dc5e..1789359e7d 100644 --- a/nipype/interfaces/slicer/filtering/checkerboardfilter.py +++ b/nipype/interfaces/slicer/filtering/checkerboardfilter.py @@ -3,28 +3,42 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class CheckerBoardFilterInputSpec(CommandLineInputSpec): checkerPattern = InputMultiPath( traits.Int, - desc= - "The pattern of input 1 and input 2 in the output image. The user can specify the number of checkers in each dimension. A checkerPattern of 2,2,1 means that images will alternate in every other checker in the first two dimensions. The same pattern will be used in the 3rd dimension.", + desc="The pattern of input 1 and input 2 in the output image. The user can specify the number of checkers in each dimension. A checkerPattern of 2,2,1 means that images will alternate in every other checker in the first two dimensions. The same pattern will be used in the 3rd dimension.", sep=",", - argstr="--checkerPattern %s") + argstr="--checkerPattern %s", + ) inputVolume1 = File( - position=-3, desc="First Input volume", exists=True, argstr="%s") + position=-3, desc="First Input volume", exists=True, argstr="%s" + ) inputVolume2 = File( - position=-2, desc="Second Input volume", exists=True, argstr="%s") + position=-2, desc="Second Input volume", exists=True, argstr="%s" + ) outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", - argstr="%s") + argstr="%s", + ) class CheckerBoardFilterOutputSpec(TraitedSpec): @@ -51,4 +65,4 @@ class CheckerBoardFilter(SEMLikeCommandLine): input_spec = CheckerBoardFilterInputSpec output_spec = CheckerBoardFilterOutputSpec _cmd = "CheckerBoardFilter " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} diff --git a/nipype/interfaces/slicer/filtering/denoising.py b/nipype/interfaces/slicer/filtering/denoising.py index 0dbaaebf74..6c26b74618 100644 --- a/nipype/interfaces/slicer/filtering/denoising.py +++ b/nipype/interfaces/slicer/filtering/denoising.py @@ -3,35 +3,45 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class GradientAnisotropicDiffusionInputSpec(CommandLineInputSpec): conductance = traits.Float( - desc= - "Conductance controls the sensitivity of the conductance term. As a general rule, the lower the value, the more strongly the filter preserves edges. A high value will cause diffusion (smoothing) across edges. Note that the number of iterations controls how much smoothing is done within regions bounded by edges.", - argstr="--conductance %f") + desc="Conductance controls the sensitivity of the conductance term. As a general rule, the lower the value, the more strongly the filter preserves edges. A high value will cause diffusion (smoothing) across edges. Note that the number of iterations controls how much smoothing is done within regions bounded by edges.", + argstr="--conductance %f", + ) iterations = traits.Int( - desc= - "The more iterations, the more smoothing. Each iteration takes the same amount of time. If it takes 10 seconds for one iteration, then it will take 100 seconds for 10 iterations. Note that the conductance controls how much each iteration smooths across edges.", - argstr="--iterations %d") + desc="The more iterations, the more smoothing. Each iteration takes the same amount of time. If it takes 10 seconds for one iteration, then it will take 100 seconds for 10 iterations. Note that the conductance controls how much each iteration smooths across edges.", + argstr="--iterations %d", + ) timeStep = traits.Float( - desc= - "The time step depends on the dimensionality of the image. In Slicer the images are 3D and the default (.0625) time step will provide a stable solution.", - argstr="--timeStep %f") + desc="The time step depends on the dimensionality of the image. In Slicer the images are 3D and the default (.0625) time step will provide a stable solution.", + argstr="--timeStep %f", + ) inputVolume = File( - position=-2, - desc="Input volume to be filtered", - exists=True, - argstr="%s") + position=-2, desc="Input volume to be filtered", exists=True, argstr="%s" + ) outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", - argstr="%s") + argstr="%s", + ) class GradientAnisotropicDiffusionOutputSpec(TraitedSpec): @@ -60,34 +70,33 @@ class GradientAnisotropicDiffusion(SEMLikeCommandLine): input_spec = GradientAnisotropicDiffusionInputSpec output_spec = GradientAnisotropicDiffusionOutputSpec _cmd = "GradientAnisotropicDiffusion " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} class CurvatureAnisotropicDiffusionInputSpec(CommandLineInputSpec): conductance = traits.Float( - desc= - "Conductance controls the sensitivity of the conductance term. As a general rule, the lower the value, the more strongly the filter preserves edges. A high value will cause diffusion (smoothing) across edges. Note that the number of iterations controls how much smoothing is done within regions bounded by edges.", - argstr="--conductance %f") + desc="Conductance controls the sensitivity of the conductance term. As a general rule, the lower the value, the more strongly the filter preserves edges. A high value will cause diffusion (smoothing) across edges. Note that the number of iterations controls how much smoothing is done within regions bounded by edges.", + argstr="--conductance %f", + ) iterations = traits.Int( - desc= - "The more iterations, the more smoothing. Each iteration takes the same amount of time. If it takes 10 seconds for one iteration, then it will take 100 seconds for 10 iterations. Note that the conductance controls how much each iteration smooths across edges.", - argstr="--iterations %d") + desc="The more iterations, the more smoothing. Each iteration takes the same amount of time. If it takes 10 seconds for one iteration, then it will take 100 seconds for 10 iterations. Note that the conductance controls how much each iteration smooths across edges.", + argstr="--iterations %d", + ) timeStep = traits.Float( - desc= - "The time step depends on the dimensionality of the image. In Slicer the images are 3D and the default (.0625) time step will provide a stable solution.", - argstr="--timeStep %f") + desc="The time step depends on the dimensionality of the image. In Slicer the images are 3D and the default (.0625) time step will provide a stable solution.", + argstr="--timeStep %f", + ) inputVolume = File( - position=-2, - desc="Input volume to be filtered", - exists=True, - argstr="%s") + position=-2, desc="Input volume to be filtered", exists=True, argstr="%s" + ) outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", - argstr="%s") + argstr="%s", + ) class CurvatureAnisotropicDiffusionOutputSpec(TraitedSpec): @@ -118,22 +127,23 @@ class CurvatureAnisotropicDiffusion(SEMLikeCommandLine): input_spec = CurvatureAnisotropicDiffusionInputSpec output_spec = CurvatureAnisotropicDiffusionOutputSpec _cmd = "CurvatureAnisotropicDiffusion " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} class GaussianBlurImageFilterInputSpec(CommandLineInputSpec): sigma = traits.Float( desc="Sigma value in physical units (e.g., mm) of the Gaussian kernel", - argstr="--sigma %f") - inputVolume = File( - position=-2, desc="Input volume", exists=True, argstr="%s") + argstr="--sigma %f", + ) + inputVolume = File(position=-2, desc="Input volume", exists=True, argstr="%s") outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Blurred Volume", - argstr="%s") + argstr="%s", + ) class GaussianBlurImageFilterOutputSpec(TraitedSpec): @@ -160,7 +170,7 @@ class GaussianBlurImageFilter(SEMLikeCommandLine): input_spec = GaussianBlurImageFilterInputSpec output_spec = GaussianBlurImageFilterOutputSpec _cmd = "GaussianBlurImageFilter " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} class MedianImageFilterInputSpec(CommandLineInputSpec): @@ -168,19 +178,19 @@ class MedianImageFilterInputSpec(CommandLineInputSpec): traits.Int, desc="The size of the neighborhood in each dimension", sep=",", - argstr="--neighborhood %s") + argstr="--neighborhood %s", + ) inputVolume = File( - position=-2, - desc="Input volume to be filtered", - exists=True, - argstr="%s") + position=-2, desc="Input volume to be filtered", exists=True, argstr="%s" + ) outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", - argstr="%s") + argstr="%s", + ) class MedianImageFilterOutputSpec(TraitedSpec): @@ -207,4 +217,4 @@ class MedianImageFilter(SEMLikeCommandLine): input_spec = MedianImageFilterInputSpec output_spec = MedianImageFilterOutputSpec _cmd = "MedianImageFilter " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} diff --git a/nipype/interfaces/slicer/filtering/extractskeleton.py b/nipype/interfaces/slicer/filtering/extractskeleton.py index d7770c8f2e..7900be472c 100644 --- a/nipype/interfaces/slicer/filtering/extractskeleton.py +++ b/nipype/interfaces/slicer/filtering/extractskeleton.py @@ -3,37 +3,51 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class ExtractSkeletonInputSpec(CommandLineInputSpec): - InputImageFileName = File( - position=-2, desc="Input image", exists=True, argstr="%s") + InputImageFileName = File(position=-2, desc="Input image", exists=True, argstr="%s") OutputImageFileName = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Skeleton of the input image", - argstr="%s") + argstr="%s", + ) type = traits.Enum( - "1D", "2D", desc="Type of skeleton to create", argstr="--type %s") + "1D", "2D", desc="Type of skeleton to create", argstr="--type %s" + ) dontPrune = traits.Bool( desc="Return the full skeleton, not just the maximal skeleton", - argstr="--dontPrune ") + argstr="--dontPrune ", + ) numPoints = traits.Int( - desc="Number of points used to represent the skeleton", - argstr="--numPoints %d") + desc="Number of points used to represent the skeleton", argstr="--numPoints %d" + ) pointsFile = traits.Str( - desc= - "Name of the file to store the coordinates of the central (1D) skeleton points", - argstr="--pointsFile %s") + desc="Name of the file to store the coordinates of the central (1D) skeleton points", + argstr="--pointsFile %s", + ) class ExtractSkeletonOutputSpec(TraitedSpec): OutputImageFileName = File( - position=-1, desc="Skeleton of the input image", exists=True) + position=-1, desc="Skeleton of the input image", exists=True + ) class ExtractSkeleton(SEMLikeCommandLine): @@ -56,4 +70,4 @@ class ExtractSkeleton(SEMLikeCommandLine): input_spec = ExtractSkeletonInputSpec output_spec = ExtractSkeletonOutputSpec _cmd = "ExtractSkeleton " - _outputs_filenames = {'OutputImageFileName': 'OutputImageFileName.nii'} + _outputs_filenames = {"OutputImageFileName": "OutputImageFileName.nii"} diff --git a/nipype/interfaces/slicer/filtering/histogrammatching.py b/nipype/interfaces/slicer/filtering/histogrammatching.py index 1b3b26b061..9b6cb17813 100644 --- a/nipype/interfaces/slicer/filtering/histogrammatching.py +++ b/nipype/interfaces/slicer/filtering/histogrammatching.py @@ -3,47 +3,58 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class HistogramMatchingInputSpec(CommandLineInputSpec): numberOfHistogramLevels = traits.Int( desc="The number of hisogram levels to use", - argstr="--numberOfHistogramLevels %d") + argstr="--numberOfHistogramLevels %d", + ) numberOfMatchPoints = traits.Int( - desc="The number of match points to use", - argstr="--numberOfMatchPoints %d") + desc="The number of match points to use", argstr="--numberOfMatchPoints %d" + ) threshold = traits.Bool( - desc= - "If on, only pixels above the mean in each volume are thresholded.", - argstr="--threshold ") + desc="If on, only pixels above the mean in each volume are thresholded.", + argstr="--threshold ", + ) inputVolume = File( - position=-3, - desc="Input volume to be filtered", - exists=True, - argstr="%s") + position=-3, desc="Input volume to be filtered", exists=True, argstr="%s" + ) referenceVolume = File( position=-2, desc="Input volume whose histogram will be matched", exists=True, - argstr="%s") + argstr="%s", + ) outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, - desc= - "Output volume. This is the input volume with intensities matched to the reference volume.", - argstr="%s") + desc="Output volume. This is the input volume with intensities matched to the reference volume.", + argstr="%s", + ) class HistogramMatchingOutputSpec(TraitedSpec): outputVolume = File( position=-1, - desc= - "Output volume. This is the input volume with intensities matched to the reference volume.", - exists=True) + desc="Output volume. This is the input volume with intensities matched to the reference volume.", + exists=True, + ) class HistogramMatching(SEMLikeCommandLine): @@ -72,4 +83,4 @@ class HistogramMatching(SEMLikeCommandLine): input_spec = HistogramMatchingInputSpec output_spec = HistogramMatchingOutputSpec _cmd = "HistogramMatching " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} diff --git a/nipype/interfaces/slicer/filtering/imagelabelcombine.py b/nipype/interfaces/slicer/filtering/imagelabelcombine.py index 067a575045..666385845d 100644 --- a/nipype/interfaces/slicer/filtering/imagelabelcombine.py +++ b/nipype/interfaces/slicer/filtering/imagelabelcombine.py @@ -3,30 +3,44 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class ImageLabelCombineInputSpec(CommandLineInputSpec): InputLabelMap_A = File( - position=-3, desc="Label map image", exists=True, argstr="%s") + position=-3, desc="Label map image", exists=True, argstr="%s" + ) InputLabelMap_B = File( - position=-2, desc="Label map image", exists=True, argstr="%s") + position=-2, desc="Label map image", exists=True, argstr="%s" + ) OutputLabelMap = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Resulting Label map image", - argstr="%s") + argstr="%s", + ) first_overwrites = traits.Bool( desc="Use first or second label when both are present", - argstr="--first_overwrites ") + argstr="--first_overwrites ", + ) class ImageLabelCombineOutputSpec(TraitedSpec): - OutputLabelMap = File( - position=-1, desc="Resulting Label map image", exists=True) + OutputLabelMap = File(position=-1, desc="Resulting Label map image", exists=True) class ImageLabelCombine(SEMLikeCommandLine): @@ -47,4 +61,4 @@ class ImageLabelCombine(SEMLikeCommandLine): input_spec = ImageLabelCombineInputSpec output_spec = ImageLabelCombineOutputSpec _cmd = "ImageLabelCombine " - _outputs_filenames = {'OutputLabelMap': 'OutputLabelMap.nii'} + _outputs_filenames = {"OutputLabelMap": "OutputLabelMap.nii"} diff --git a/nipype/interfaces/slicer/filtering/morphology.py b/nipype/interfaces/slicer/filtering/morphology.py index 913c63d5ab..b33d3e9c6d 100644 --- a/nipype/interfaces/slicer/filtering/morphology.py +++ b/nipype/interfaces/slicer/filtering/morphology.py @@ -3,23 +3,33 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class GrayscaleGrindPeakImageFilterInputSpec(CommandLineInputSpec): inputVolume = File( - position=-2, - desc="Input volume to be filtered", - exists=True, - argstr="%s") + position=-2, desc="Input volume to be filtered", exists=True, argstr="%s" + ) outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", - argstr="%s") + argstr="%s", + ) class GrayscaleGrindPeakImageFilterOutputSpec(TraitedSpec): @@ -56,22 +66,21 @@ class GrayscaleGrindPeakImageFilter(SEMLikeCommandLine): input_spec = GrayscaleGrindPeakImageFilterInputSpec output_spec = GrayscaleGrindPeakImageFilterOutputSpec _cmd = "GrayscaleGrindPeakImageFilter " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} class GrayscaleFillHoleImageFilterInputSpec(CommandLineInputSpec): inputVolume = File( - position=-2, - desc="Input volume to be filtered", - exists=True, - argstr="%s") + position=-2, desc="Input volume to be filtered", exists=True, argstr="%s" + ) outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", - argstr="%s") + argstr="%s", + ) class GrayscaleFillHoleImageFilterOutputSpec(TraitedSpec): @@ -106,4 +115,4 @@ class GrayscaleFillHoleImageFilter(SEMLikeCommandLine): input_spec = GrayscaleFillHoleImageFilterInputSpec output_spec = GrayscaleFillHoleImageFilterOutputSpec _cmd = "GrayscaleFillHoleImageFilter " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} diff --git a/nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py b/nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py index 28f694f77e..b1243160b5 100644 --- a/nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py +++ b/nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py @@ -3,7 +3,18 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os @@ -11,60 +22,62 @@ class N4ITKBiasFieldCorrectionInputSpec(CommandLineInputSpec): inputimage = File( desc="Input image where you observe signal inhomegeneity", exists=True, - argstr="--inputimage %s") + argstr="--inputimage %s", + ) maskimage = File( - desc= - "Binary mask that defines the structure of your interest. NOTE: This parameter is OPTIONAL. If the mask is not specified, the module will use internally Otsu thresholding to define this mask. Better processing results can often be obtained when a meaningful mask is defined.", + desc="Binary mask that defines the structure of your interest. NOTE: This parameter is OPTIONAL. If the mask is not specified, the module will use internally Otsu thresholding to define this mask. Better processing results can often be obtained when a meaningful mask is defined.", exists=True, - argstr="--maskimage %s") + argstr="--maskimage %s", + ) outputimage = traits.Either( traits.Bool, File(), hash_files=False, desc="Result of processing", - argstr="--outputimage %s") + argstr="--outputimage %s", + ) outputbiasfield = traits.Either( traits.Bool, File(), hash_files=False, desc="Recovered bias field (OPTIONAL)", - argstr="--outputbiasfield %s") + argstr="--outputbiasfield %s", + ) iterations = InputMultiPath( traits.Int, - desc= - "Maximum number of iterations at each level of resolution. Larger values will increase execution time, but may lead to better results.", + desc="Maximum number of iterations at each level of resolution. Larger values will increase execution time, but may lead to better results.", sep=",", - argstr="--iterations %s") + argstr="--iterations %s", + ) convergencethreshold = traits.Float( - desc= - "Stopping criterion for the iterative bias estimation. Larger values will lead to smaller execution time.", - argstr="--convergencethreshold %f") + desc="Stopping criterion for the iterative bias estimation. Larger values will lead to smaller execution time.", + argstr="--convergencethreshold %f", + ) meshresolution = InputMultiPath( traits.Float, - desc= - "Resolution of the initial bspline grid defined as a sequence of three numbers. The actual resolution will be defined by adding the bspline order (default is 3) to the resolution in each dimension specified here. For example, 1,1,1 will result in a 4x4x4 grid of control points. This parameter may need to be adjusted based on your input image. In the multi-resolution N4 framework, the resolution of the bspline grid at subsequent iterations will be doubled. The number of resolutions is implicitly defined by Number of iterations parameter (the size of this list is the number of resolutions)", + desc="Resolution of the initial bspline grid defined as a sequence of three numbers. The actual resolution will be defined by adding the bspline order (default is 3) to the resolution in each dimension specified here. For example, 1,1,1 will result in a 4x4x4 grid of control points. This parameter may need to be adjusted based on your input image. In the multi-resolution N4 framework, the resolution of the bspline grid at subsequent iterations will be doubled. The number of resolutions is implicitly defined by Number of iterations parameter (the size of this list is the number of resolutions)", sep=",", - argstr="--meshresolution %s") + argstr="--meshresolution %s", + ) splinedistance = traits.Float( - desc= - "An alternative means to define the spline grid, by setting the distance between the control points. This parameter is used only if the grid resolution is not specified.", - argstr="--splinedistance %f") + desc="An alternative means to define the spline grid, by setting the distance between the control points. This parameter is used only if the grid resolution is not specified.", + argstr="--splinedistance %f", + ) shrinkfactor = traits.Int( - desc= - "Defines how much the image should be upsampled before estimating the inhomogeneity field. Increase if you want to reduce the execution time. 1 corresponds to the original resolution. Larger values will significantly reduce the computation time.", - argstr="--shrinkfactor %d") + desc="Defines how much the image should be upsampled before estimating the inhomogeneity field. Increase if you want to reduce the execution time. 1 corresponds to the original resolution. Larger values will significantly reduce the computation time.", + argstr="--shrinkfactor %d", + ) bsplineorder = traits.Int( - desc= - "Order of B-spline used in the approximation. Larger values will lead to longer execution times, may result in overfitting and poor result.", - argstr="--bsplineorder %d") - weightimage = File( - desc="Weight Image", exists=True, argstr="--weightimage %s") + desc="Order of B-spline used in the approximation. Larger values will lead to longer execution times, may result in overfitting and poor result.", + argstr="--bsplineorder %d", + ) + weightimage = File(desc="Weight Image", exists=True, argstr="--weightimage %s") histogramsharpening = InputMultiPath( traits.Float, - desc= - "A vector of up to three values. Non-zero values correspond to Bias Field Full Width at Half Maximum, Wiener filter noise, and Number of histogram bins.", + desc="A vector of up to three values. Non-zero values correspond to Bias Field Full Width at Half Maximum, Wiener filter noise, and Number of histogram bins.", sep=",", - argstr="--histogramsharpening %s") + argstr="--histogramsharpening %s", + ) class N4ITKBiasFieldCorrectionOutputSpec(TraitedSpec): @@ -93,6 +106,6 @@ class N4ITKBiasFieldCorrection(SEMLikeCommandLine): output_spec = N4ITKBiasFieldCorrectionOutputSpec _cmd = "N4ITKBiasFieldCorrection " _outputs_filenames = { - 'outputimage': 'outputimage.nii', - 'outputbiasfield': 'outputbiasfield.nii' + "outputimage": "outputimage.nii", + "outputbiasfield": "outputbiasfield.nii", } diff --git a/nipype/interfaces/slicer/filtering/resamplescalarvectordwivolume.py b/nipype/interfaces/slicer/filtering/resamplescalarvectordwivolume.py index 6205b76b54..c41827cb39 100644 --- a/nipype/interfaces/slicer/filtering/resamplescalarvectordwivolume.py +++ b/nipype/interfaces/slicer/filtering/resamplescalarvectordwivolume.py @@ -3,123 +3,139 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class ResampleScalarVectorDWIVolumeInputSpec(CommandLineInputSpec): inputVolume = File( - position=-2, - desc="Input Volume to be resampled", - exists=True, - argstr="%s") + position=-2, desc="Input Volume to be resampled", exists=True, argstr="%s" + ) outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Resampled Volume", - argstr="%s") + argstr="%s", + ) Reference = File( desc="Reference Volume (spacing,size,orientation,origin)", exists=True, - argstr="--Reference %s") + argstr="--Reference %s", + ) transformationFile = File(exists=True, argstr="--transformationFile %s") defField = File( - desc= - "File containing the deformation field (3D vector image containing vectors with 3 components)", + desc="File containing the deformation field (3D vector image containing vectors with 3 components)", exists=True, - argstr="--defField %s") + argstr="--defField %s", + ) hfieldtype = traits.Enum( "displacement", "h-Field", desc="Set if the deformation field is an h-Field", - argstr="--hfieldtype %s") + argstr="--hfieldtype %s", + ) interpolation = traits.Enum( "linear", "nn", "ws", "bs", - desc= - "Sampling algorithm (linear or nn (nearest neighborhoor), ws (WindowedSinc), bs (BSpline) )", - argstr="--interpolation %s") + desc="Sampling algorithm (linear or nn (nearest neighborhoor), ws (WindowedSinc), bs (BSpline) )", + argstr="--interpolation %s", + ) transform_order = traits.Enum( "input-to-output", "output-to-input", desc="Select in what order the transforms are read", - argstr="--transform_order %s") + argstr="--transform_order %s", + ) notbulk = traits.Bool( - desc= - "The transform following the BSpline transform is not set as a bulk transform for the BSpline transform", - argstr="--notbulk ") + desc="The transform following the BSpline transform is not set as a bulk transform for the BSpline transform", + argstr="--notbulk ", + ) spaceChange = traits.Bool( - desc= - "Space Orientation between transform and image is different (RAS/LPS) (warning: if the transform is a Transform Node in Slicer3, do not select)", - argstr="--spaceChange ") + desc="Space Orientation between transform and image is different (RAS/LPS) (warning: if the transform is a Transform Node in Slicer3, do not select)", + argstr="--spaceChange ", + ) rotation_point = traits.List( - desc= - "Rotation Point in case of rotation around a point (otherwise useless)", - argstr="--rotation_point %s") + desc="Rotation Point in case of rotation around a point (otherwise useless)", + argstr="--rotation_point %s", + ) centered_transform = traits.Bool( - desc= - "Set the center of the transformation to the center of the input image", - argstr="--centered_transform ") + desc="Set the center of the transformation to the center of the input image", + argstr="--centered_transform ", + ) image_center = traits.Enum( "input", "output", - desc= - "Image to use to center the transform (used only if \'Centered Transform\' is selected)", - argstr="--image_center %s") + desc="Image to use to center the transform (used only if 'Centered Transform' is selected)", + argstr="--image_center %s", + ) Inverse_ITK_Transformation = traits.Bool( - desc= - "Inverse the transformation before applying it from output image to input image", - argstr="--Inverse_ITK_Transformation ") + desc="Inverse the transformation before applying it from output image to input image", + argstr="--Inverse_ITK_Transformation ", + ) spacing = InputMultiPath( traits.Float, desc="Spacing along each dimension (0 means use input spacing)", sep=",", - argstr="--spacing %s") + argstr="--spacing %s", + ) size = InputMultiPath( traits.Float, desc="Size along each dimension (0 means use input size)", sep=",", - argstr="--size %s") - origin = traits.List( - desc="Origin of the output Image", argstr="--origin %s") + argstr="--size %s", + ) + origin = traits.List(desc="Origin of the output Image", argstr="--origin %s") direction_matrix = InputMultiPath( traits.Float, - desc= - "9 parameters of the direction matrix by rows (ijk to LPS if LPS transform, ijk to RAS if RAS transform)", + desc="9 parameters of the direction matrix by rows (ijk to LPS if LPS transform, ijk to RAS if RAS transform)", sep=",", - argstr="--direction_matrix %s") + argstr="--direction_matrix %s", + ) number_of_thread = traits.Int( desc="Number of thread used to compute the output image", - argstr="--number_of_thread %d") + argstr="--number_of_thread %d", + ) default_pixel_value = traits.Float( - desc= - "Default pixel value for samples falling outside of the input region", - argstr="--default_pixel_value %f") + desc="Default pixel value for samples falling outside of the input region", + argstr="--default_pixel_value %f", + ) window_function = traits.Enum( "h", "c", "w", "l", "b", - desc= - "Window Function , h = Hamming , c = Cosine , w = Welch , l = Lanczos , b = Blackman", - argstr="--window_function %s") + desc="Window Function , h = Hamming , c = Cosine , w = Welch , l = Lanczos , b = Blackman", + argstr="--window_function %s", + ) spline_order = traits.Int(desc="Spline Order", argstr="--spline_order %d") transform_matrix = InputMultiPath( traits.Float, - desc= - "12 parameters of the transform matrix by rows ( --last 3 being translation-- )", + desc="12 parameters of the transform matrix by rows ( --last 3 being translation-- )", sep=",", - argstr="--transform_matrix %s") + argstr="--transform_matrix %s", + ) transform = traits.Enum( "rt", "a", desc="Transform algorithm, rt = Rigid Transform, a = Affine Transform", - argstr="--transform %s") + argstr="--transform %s", + ) class ResampleScalarVectorDWIVolumeOutputSpec(TraitedSpec): @@ -150,4 +166,4 @@ class ResampleScalarVectorDWIVolume(SEMLikeCommandLine): input_spec = ResampleScalarVectorDWIVolumeInputSpec output_spec = ResampleScalarVectorDWIVolumeOutputSpec _cmd = "ResampleScalarVectorDWIVolume " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py index cd05a91d05..3ea978e771 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py @@ -4,38 +4,22 @@ def test_AddScalarVolumes_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr='%s', - extensions=None, - position=-3, - ), - inputVolume2=dict( - argstr='%s', - extensions=None, - position=-2, - ), - order=dict(argstr='--order %s', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume1=dict(argstr="%s", extensions=None, position=-3,), + inputVolume2=dict(argstr="%s", extensions=None, position=-2,), + order=dict(argstr="--order %s",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), ) inputs = AddScalarVolumes.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AddScalarVolumes_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = AddScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py index 6ed35e0df7..e57c2a691b 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py @@ -4,33 +4,21 @@ def test_CastScalarVolume_inputs(): input_map = dict( - InputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - OutputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - type=dict(argstr='--type %s', ), + InputVolume=dict(argstr="%s", extensions=None, position=-2,), + OutputVolume=dict(argstr="%s", hash_files=False, position=-1,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + type=dict(argstr="--type %s",), ) inputs = CastScalarVolume.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CastScalarVolume_outputs(): - output_map = dict(OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputVolume=dict(extensions=None, position=-1,),) outputs = CastScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py index 9ad8894a35..564e2e14f8 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py @@ -4,41 +4,22 @@ def test_CheckerBoardFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - checkerPattern=dict( - argstr='--checkerPattern %s', - sep=',', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr='%s', - extensions=None, - position=-3, - ), - inputVolume2=dict( - argstr='%s', - extensions=None, - position=-2, - ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), + args=dict(argstr="%s",), + checkerPattern=dict(argstr="--checkerPattern %s", sep=",",), + environ=dict(nohash=True, usedefault=True,), + inputVolume1=dict(argstr="%s", extensions=None, position=-3,), + inputVolume2=dict(argstr="%s", extensions=None, position=-2,), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), ) inputs = CheckerBoardFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CheckerBoardFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = CheckerBoardFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py index 0dd2a4f946..189bd459a0 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py @@ -4,35 +4,23 @@ def test_CurvatureAnisotropicDiffusion_inputs(): input_map = dict( - args=dict(argstr='%s', ), - conductance=dict(argstr='--conductance %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - iterations=dict(argstr='--iterations %d', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - timeStep=dict(argstr='--timeStep %f', ), + args=dict(argstr="%s",), + conductance=dict(argstr="--conductance %f",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + iterations=dict(argstr="--iterations %d",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + timeStep=dict(argstr="--timeStep %f",), ) inputs = CurvatureAnisotropicDiffusion.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CurvatureAnisotropicDiffusion_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = CurvatureAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py index f3b027c12e..93861cefa5 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py @@ -4,36 +4,24 @@ def test_ExtractSkeleton_inputs(): input_map = dict( - InputImageFileName=dict( - argstr='%s', - extensions=None, - position=-2, - ), - OutputImageFileName=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - args=dict(argstr='%s', ), - dontPrune=dict(argstr='--dontPrune ', ), - environ=dict( - nohash=True, - usedefault=True, - ), - numPoints=dict(argstr='--numPoints %d', ), - pointsFile=dict(argstr='--pointsFile %s', ), - type=dict(argstr='--type %s', ), + InputImageFileName=dict(argstr="%s", extensions=None, position=-2,), + OutputImageFileName=dict(argstr="%s", hash_files=False, position=-1,), + args=dict(argstr="%s",), + dontPrune=dict(argstr="--dontPrune ",), + environ=dict(nohash=True, usedefault=True,), + numPoints=dict(argstr="--numPoints %d",), + pointsFile=dict(argstr="--pointsFile %s",), + type=dict(argstr="--type %s",), ) inputs = ExtractSkeleton.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ExtractSkeleton_outputs(): - output_map = dict(OutputImageFileName=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputImageFileName=dict(extensions=None, position=-1,),) outputs = ExtractSkeleton.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py index 5e01ce71cc..a0655cfb6e 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py @@ -4,33 +4,21 @@ def test_GaussianBlurImageFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - sigma=dict(argstr='--sigma %f', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + sigma=dict(argstr="--sigma %f",), ) inputs = GaussianBlurImageFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GaussianBlurImageFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = GaussianBlurImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py index 3d803b9222..2b6e77d1d8 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py @@ -4,35 +4,23 @@ def test_GradientAnisotropicDiffusion_inputs(): input_map = dict( - args=dict(argstr='%s', ), - conductance=dict(argstr='--conductance %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - iterations=dict(argstr='--iterations %d', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - timeStep=dict(argstr='--timeStep %f', ), + args=dict(argstr="%s",), + conductance=dict(argstr="--conductance %f",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + iterations=dict(argstr="--iterations %d",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + timeStep=dict(argstr="--timeStep %f",), ) inputs = GradientAnisotropicDiffusion.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GradientAnisotropicDiffusion_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = GradientAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py index 2d9bf34805..a12177d820 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py @@ -4,32 +4,20 @@ def test_GrayscaleFillHoleImageFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), ) inputs = GrayscaleFillHoleImageFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GrayscaleFillHoleImageFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = GrayscaleFillHoleImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py index ad1a04aff7..ab1c23f716 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py @@ -4,32 +4,20 @@ def test_GrayscaleGrindPeakImageFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), ) inputs = GrayscaleGrindPeakImageFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GrayscaleGrindPeakImageFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = GrayscaleGrindPeakImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py index 3ace435981..003ec4c8d8 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py @@ -4,40 +4,24 @@ def test_HistogramMatching_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-3, - ), - numberOfHistogramLevels=dict(argstr='--numberOfHistogramLevels %d', ), - numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - referenceVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - threshold=dict(argstr='--threshold ', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="%s", extensions=None, position=-3,), + numberOfHistogramLevels=dict(argstr="--numberOfHistogramLevels %d",), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + referenceVolume=dict(argstr="%s", extensions=None, position=-2,), + threshold=dict(argstr="--threshold ",), ) inputs = HistogramMatching.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_HistogramMatching_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = HistogramMatching.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py index ec9a2bd0b2..8fb1596420 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py @@ -4,38 +4,22 @@ def test_ImageLabelCombine_inputs(): input_map = dict( - InputLabelMap_A=dict( - argstr='%s', - extensions=None, - position=-3, - ), - InputLabelMap_B=dict( - argstr='%s', - extensions=None, - position=-2, - ), - OutputLabelMap=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - first_overwrites=dict(argstr='--first_overwrites ', ), + InputLabelMap_A=dict(argstr="%s", extensions=None, position=-3,), + InputLabelMap_B=dict(argstr="%s", extensions=None, position=-2,), + OutputLabelMap=dict(argstr="%s", hash_files=False, position=-1,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + first_overwrites=dict(argstr="--first_overwrites ",), ) inputs = ImageLabelCombine.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ImageLabelCombine_outputs(): - output_map = dict(OutputLabelMap=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputLabelMap=dict(extensions=None, position=-1,),) outputs = ImageLabelCombine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py index 17c20d18dc..fd8bf6aaf7 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py @@ -4,39 +4,23 @@ def test_MaskScalarVolume_inputs(): input_map = dict( - InputVolume=dict( - argstr='%s', - extensions=None, - position=-3, - ), - MaskVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - OutputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - label=dict(argstr='--label %d', ), - replace=dict(argstr='--replace %d', ), + InputVolume=dict(argstr="%s", extensions=None, position=-3,), + MaskVolume=dict(argstr="%s", extensions=None, position=-2,), + OutputVolume=dict(argstr="%s", hash_files=False, position=-1,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + label=dict(argstr="--label %d",), + replace=dict(argstr="--replace %d",), ) inputs = MaskScalarVolume.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MaskScalarVolume_outputs(): - output_map = dict(OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputVolume=dict(extensions=None, position=-1,),) outputs = MaskScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py index d02373d1c9..969a0dead6 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py @@ -4,36 +4,21 @@ def test_MedianImageFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - neighborhood=dict( - argstr='--neighborhood %s', - sep=',', - ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + neighborhood=dict(argstr="--neighborhood %s", sep=",",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), ) inputs = MedianImageFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MedianImageFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = MedianImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py index 95ac2f98a4..f6e521fe8e 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py @@ -4,38 +4,22 @@ def test_MultiplyScalarVolumes_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr='%s', - extensions=None, - position=-3, - ), - inputVolume2=dict( - argstr='%s', - extensions=None, - position=-2, - ), - order=dict(argstr='--order %s', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume1=dict(argstr="%s", extensions=None, position=-3,), + inputVolume2=dict(argstr="%s", extensions=None, position=-2,), + order=dict(argstr="--order %s",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), ) inputs = MultiplyScalarVolumes.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MultiplyScalarVolumes_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = MultiplyScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py index 0938eab33c..435b4d0f6a 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py @@ -4,57 +4,31 @@ def test_N4ITKBiasFieldCorrection_inputs(): input_map = dict( - args=dict(argstr='%s', ), - bsplineorder=dict(argstr='--bsplineorder %d', ), - convergencethreshold=dict(argstr='--convergencethreshold %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - histogramsharpening=dict( - argstr='--histogramsharpening %s', - sep=',', - ), - inputimage=dict( - argstr='--inputimage %s', - extensions=None, - ), - iterations=dict( - argstr='--iterations %s', - sep=',', - ), - maskimage=dict( - argstr='--maskimage %s', - extensions=None, - ), - meshresolution=dict( - argstr='--meshresolution %s', - sep=',', - ), - outputbiasfield=dict( - argstr='--outputbiasfield %s', - hash_files=False, - ), - outputimage=dict( - argstr='--outputimage %s', - hash_files=False, - ), - shrinkfactor=dict(argstr='--shrinkfactor %d', ), - splinedistance=dict(argstr='--splinedistance %f', ), - weightimage=dict( - argstr='--weightimage %s', - extensions=None, - ), + args=dict(argstr="%s",), + bsplineorder=dict(argstr="--bsplineorder %d",), + convergencethreshold=dict(argstr="--convergencethreshold %f",), + environ=dict(nohash=True, usedefault=True,), + histogramsharpening=dict(argstr="--histogramsharpening %s", sep=",",), + inputimage=dict(argstr="--inputimage %s", extensions=None,), + iterations=dict(argstr="--iterations %s", sep=",",), + maskimage=dict(argstr="--maskimage %s", extensions=None,), + meshresolution=dict(argstr="--meshresolution %s", sep=",",), + outputbiasfield=dict(argstr="--outputbiasfield %s", hash_files=False,), + outputimage=dict(argstr="--outputimage %s", hash_files=False,), + shrinkfactor=dict(argstr="--shrinkfactor %d",), + splinedistance=dict(argstr="--splinedistance %f",), + weightimage=dict(argstr="--weightimage %s", extensions=None,), ) inputs = N4ITKBiasFieldCorrection.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_N4ITKBiasFieldCorrection_outputs(): output_map = dict( - outputbiasfield=dict(extensions=None, ), - outputimage=dict(extensions=None, ), + outputbiasfield=dict(extensions=None,), outputimage=dict(extensions=None,), ) outputs = N4ITKBiasFieldCorrection.output_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py index 2e95aaf01e..d1a28f3374 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py @@ -4,76 +4,42 @@ def test_ResampleScalarVectorDWIVolume_inputs(): input_map = dict( - Inverse_ITK_Transformation=dict( - argstr='--Inverse_ITK_Transformation ', ), - Reference=dict( - argstr='--Reference %s', - extensions=None, - ), - args=dict(argstr='%s', ), - centered_transform=dict(argstr='--centered_transform ', ), - defField=dict( - argstr='--defField %s', - extensions=None, - ), - default_pixel_value=dict(argstr='--default_pixel_value %f', ), - direction_matrix=dict( - argstr='--direction_matrix %s', - sep=',', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hfieldtype=dict(argstr='--hfieldtype %s', ), - image_center=dict(argstr='--image_center %s', ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - interpolation=dict(argstr='--interpolation %s', ), - notbulk=dict(argstr='--notbulk ', ), - number_of_thread=dict(argstr='--number_of_thread %d', ), - origin=dict(argstr='--origin %s', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - rotation_point=dict(argstr='--rotation_point %s', ), - size=dict( - argstr='--size %s', - sep=',', - ), - spaceChange=dict(argstr='--spaceChange ', ), - spacing=dict( - argstr='--spacing %s', - sep=',', - ), - spline_order=dict(argstr='--spline_order %d', ), - transform=dict(argstr='--transform %s', ), - transform_matrix=dict( - argstr='--transform_matrix %s', - sep=',', - ), - transform_order=dict(argstr='--transform_order %s', ), - transformationFile=dict( - argstr='--transformationFile %s', - extensions=None, - ), - window_function=dict(argstr='--window_function %s', ), + Inverse_ITK_Transformation=dict(argstr="--Inverse_ITK_Transformation ",), + Reference=dict(argstr="--Reference %s", extensions=None,), + args=dict(argstr="%s",), + centered_transform=dict(argstr="--centered_transform ",), + defField=dict(argstr="--defField %s", extensions=None,), + default_pixel_value=dict(argstr="--default_pixel_value %f",), + direction_matrix=dict(argstr="--direction_matrix %s", sep=",",), + environ=dict(nohash=True, usedefault=True,), + hfieldtype=dict(argstr="--hfieldtype %s",), + image_center=dict(argstr="--image_center %s",), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + interpolation=dict(argstr="--interpolation %s",), + notbulk=dict(argstr="--notbulk ",), + number_of_thread=dict(argstr="--number_of_thread %d",), + origin=dict(argstr="--origin %s",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + rotation_point=dict(argstr="--rotation_point %s",), + size=dict(argstr="--size %s", sep=",",), + spaceChange=dict(argstr="--spaceChange ",), + spacing=dict(argstr="--spacing %s", sep=",",), + spline_order=dict(argstr="--spline_order %d",), + transform=dict(argstr="--transform %s",), + transform_matrix=dict(argstr="--transform_matrix %s", sep=",",), + transform_order=dict(argstr="--transform_order %s",), + transformationFile=dict(argstr="--transformationFile %s", extensions=None,), + window_function=dict(argstr="--window_function %s",), ) inputs = ResampleScalarVectorDWIVolume.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ResampleScalarVectorDWIVolume_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = ResampleScalarVectorDWIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py index 70d8908ce0..6d8ae8ad73 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py @@ -4,38 +4,22 @@ def test_SubtractScalarVolumes_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr='%s', - extensions=None, - position=-3, - ), - inputVolume2=dict( - argstr='%s', - extensions=None, - position=-2, - ), - order=dict(argstr='--order %s', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume1=dict(argstr="%s", extensions=None, position=-3,), + inputVolume2=dict(argstr="%s", extensions=None, position=-2,), + order=dict(argstr="--order %s",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), ) inputs = SubtractScalarVolumes.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SubtractScalarVolumes_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = SubtractScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py index 51e7d1726d..eec13b435e 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py @@ -4,37 +4,25 @@ def test_ThresholdScalarVolume_inputs(): input_map = dict( - InputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - OutputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - lower=dict(argstr='--lower %d', ), - outsidevalue=dict(argstr='--outsidevalue %d', ), - threshold=dict(argstr='--threshold %d', ), - thresholdtype=dict(argstr='--thresholdtype %s', ), - upper=dict(argstr='--upper %d', ), + InputVolume=dict(argstr="%s", extensions=None, position=-2,), + OutputVolume=dict(argstr="%s", hash_files=False, position=-1,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + lower=dict(argstr="--lower %d",), + outsidevalue=dict(argstr="--outsidevalue %d",), + threshold=dict(argstr="--threshold %d",), + thresholdtype=dict(argstr="--thresholdtype %s",), + upper=dict(argstr="--upper %d",), ) inputs = ThresholdScalarVolume.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ThresholdScalarVolume_outputs(): - output_map = dict(OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputVolume=dict(extensions=None, position=-1,),) outputs = ThresholdScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py index 3ab237831e..d45159cc1b 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py @@ -4,39 +4,24 @@ def test_VotingBinaryHoleFillingImageFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - background=dict(argstr='--background %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - foreground=dict(argstr='--foreground %d', ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - majorityThreshold=dict(argstr='--majorityThreshold %d', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - radius=dict( - argstr='--radius %s', - sep=',', - ), + args=dict(argstr="%s",), + background=dict(argstr="--background %d",), + environ=dict(nohash=True, usedefault=True,), + foreground=dict(argstr="--foreground %d",), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + majorityThreshold=dict(argstr="--majorityThreshold %d",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + radius=dict(argstr="--radius %s", sep=",",), ) inputs = VotingBinaryHoleFillingImageFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_VotingBinaryHoleFillingImageFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = VotingBinaryHoleFillingImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/thresholdscalarvolume.py b/nipype/interfaces/slicer/filtering/thresholdscalarvolume.py index 041ce10990..e72284456b 100644 --- a/nipype/interfaces/slicer/filtering/thresholdscalarvolume.py +++ b/nipype/interfaces/slicer/filtering/thresholdscalarvolume.py @@ -3,39 +3,49 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class ThresholdScalarVolumeInputSpec(CommandLineInputSpec): - InputVolume = File( - position=-2, desc="Input volume", exists=True, argstr="%s") + InputVolume = File(position=-2, desc="Input volume", exists=True, argstr="%s") OutputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Thresholded input volume", - argstr="%s") + argstr="%s", + ) threshold = traits.Int(desc="Threshold value", argstr="--threshold %d") lower = traits.Int(desc="Lower threshold value", argstr="--lower %d") upper = traits.Int(desc="Upper threshold value", argstr="--upper %d") outsidevalue = traits.Int( - desc= - "Set the voxels to this value if they fall outside the threshold range", - argstr="--outsidevalue %d") + desc="Set the voxels to this value if they fall outside the threshold range", + argstr="--outsidevalue %d", + ) thresholdtype = traits.Enum( "Below", "Above", "Outside", - desc= - "What kind of threshold to perform. If Outside is selected, uses Upper and Lower values. If Below is selected, uses the ThresholdValue, if Above is selected, uses the ThresholdValue.", - argstr="--thresholdtype %s") + desc="What kind of threshold to perform. If Outside is selected, uses Upper and Lower values. If Below is selected, uses the ThresholdValue, if Above is selected, uses the ThresholdValue.", + argstr="--thresholdtype %s", + ) class ThresholdScalarVolumeOutputSpec(TraitedSpec): - OutputVolume = File( - position=-1, desc="Thresholded input volume", exists=True) + OutputVolume = File(position=-1, desc="Thresholded input volume", exists=True) class ThresholdScalarVolume(SEMLikeCommandLine): @@ -58,4 +68,4 @@ class ThresholdScalarVolume(SEMLikeCommandLine): input_spec = ThresholdScalarVolumeInputSpec output_spec = ThresholdScalarVolumeOutputSpec _cmd = "ThresholdScalarVolume " - _outputs_filenames = {'OutputVolume': 'OutputVolume.nii'} + _outputs_filenames = {"OutputVolume": "OutputVolume.nii"} diff --git a/nipype/interfaces/slicer/filtering/votingbinaryholefillingimagefilter.py b/nipype/interfaces/slicer/filtering/votingbinaryholefillingimagefilter.py index 9c19799d04..2ed3736d1b 100644 --- a/nipype/interfaces/slicer/filtering/votingbinaryholefillingimagefilter.py +++ b/nipype/interfaces/slicer/filtering/votingbinaryholefillingimagefilter.py @@ -3,7 +3,18 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os @@ -12,29 +23,31 @@ class VotingBinaryHoleFillingImageFilterInputSpec(CommandLineInputSpec): traits.Int, desc="The radius of a hole to be filled", sep=",", - argstr="--radius %s") + argstr="--radius %s", + ) majorityThreshold = traits.Int( - desc= - "The number of pixels over 50% that will decide whether an OFF pixel will become ON or not. For example, if the neighborhood of a pixel has 124 pixels (excluding itself), the 50% will be 62, and if you set a Majority threshold of 5, that means that the filter will require 67 or more neighbor pixels to be ON in order to switch the current OFF pixel to ON.", - argstr="--majorityThreshold %d") + desc="The number of pixels over 50% that will decide whether an OFF pixel will become ON or not. For example, if the neighborhood of a pixel has 124 pixels (excluding itself), the 50% will be 62, and if you set a Majority threshold of 5, that means that the filter will require 67 or more neighbor pixels to be ON in order to switch the current OFF pixel to ON.", + argstr="--majorityThreshold %d", + ) background = traits.Int( desc="The value associated with the background (not object)", - argstr="--background %d") + argstr="--background %d", + ) foreground = traits.Int( desc="The value associated with the foreground (object)", - argstr="--foreground %d") + argstr="--foreground %d", + ) inputVolume = File( - position=-2, - desc="Input volume to be filtered", - exists=True, - argstr="%s") + position=-2, desc="Input volume to be filtered", exists=True, argstr="%s" + ) outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", - argstr="%s") + argstr="%s", + ) class VotingBinaryHoleFillingImageFilterOutputSpec(TraitedSpec): @@ -61,4 +74,4 @@ class VotingBinaryHoleFillingImageFilter(SEMLikeCommandLine): input_spec = VotingBinaryHoleFillingImageFilterInputSpec output_spec = VotingBinaryHoleFillingImageFilterOutputSpec _cmd = "VotingBinaryHoleFillingImageFilter " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} diff --git a/nipype/interfaces/slicer/generate_classes.py b/nipype/interfaces/slicer/generate_classes.py index 371c957acd..54eeb0b089 100644 --- a/nipype/interfaces/slicer/generate_classes.py +++ b/nipype/interfaces/slicer/generate_classes.py @@ -9,7 +9,10 @@ from shutil import rmtree import keyword -python_keywords = keyword.kwlist # If c++ SEM module uses one of these key words as a command line parameter, we need to modify variable + +python_keywords = ( + keyword.kwlist +) # If c++ SEM module uses one of these key words as a command line parameter, we need to modify variable def force_to_valid_python_variable_name(old_name): @@ -24,18 +27,19 @@ def force_to_valid_python_variable_name(old_name): new_name = old_name new_name = new_name.lstrip().rstrip() if old_name in python_keywords: - new_name = 'opt_' + old_name + new_name = "opt_" + old_name return new_name def add_class_to_package(class_codes, class_names, module_name, package_dir): module_python_filename = os.path.join(package_dir, "%s.py" % module_name) - f_m = open(module_python_filename, 'w') - f_i = open(os.path.join(package_dir, "__init__.py"), 'a+') - f_m.write("""# -*- coding: utf-8 -*- + f_m = open(module_python_filename, "w") + f_i = open(os.path.join(package_dir, "__init__.py"), "a+") + f_m.write( + """# -*- coding: utf-8 -*- \"\"\"Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.\"\"\"\n\n""" - ) + ) imports = """\ from ..base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath) @@ -54,21 +58,19 @@ def crawl_code_struct(code_struct, package_dir): module_name = k.lower() class_name = k class_code = v - add_class_to_package([class_code], [class_name], module_name, - package_dir) + add_class_to_package([class_code], [class_name], module_name, package_dir) else: l1 = {} l2 = {} for key in list(v.keys()): - if (isinstance(v[key], str) - or isinstance(v[key], (str, bytes))): + if isinstance(v[key], str) or isinstance(v[key], (str, bytes)): l1[key] = v[key] else: l2[key] = v[key] if l2: v = l2 subpackages.append(k.lower()) - f_i = open(os.path.join(package_dir, "__init__.py"), 'a+') + f_i = open(os.path.join(package_dir, "__init__.py"), "a+") f_i.write("from %s import *\n" % k.lower()) f_i.close() new_pkg_dir = os.path.join(package_dir, k.lower()) @@ -83,9 +85,10 @@ def crawl_code_struct(code_struct, package_dir): v = l1 module_name = k.lower() add_class_to_package( - list(v.values()), list(v.keys()), module_name, package_dir) + list(v.values()), list(v.keys()), module_name, package_dir + ) if subpackages: - f = open(os.path.join(package_dir, "setup.py"), 'w') + f = open(os.path.join(package_dir, "setup.py"), "w") f.write( """# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: @@ -101,17 +104,22 @@ def configuration(parent_package='',top_path=None): if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) -""".format(pkg_name=package_dir.split("/")[-1], - sub_pks="\n ".join([ - "config.add_data_dir('%s')" % sub_pkg for sub_pkg in subpackages - ]))) +""".format( + pkg_name=package_dir.split("/")[-1], + sub_pks="\n ".join( + [ + "config.add_data_dir('%s')" % sub_pkg + for sub_pkg in subpackages + ] + ), + ) + ) f.close() -def generate_all_classes(modules_list=[], - launcher=[], - redirect_x=False, - mipav_hacks=False): +def generate_all_classes( + modules_list=[], launcher=[], redirect_x=False, mipav_hacks=False +): """ modules_list contains all the SEM compliant tools that should have wrappers created for them. launcher containtains the command line prefix wrapper arugments needed to prepare a proper environment for each of the modules. @@ -122,7 +130,8 @@ def generate_all_classes(modules_list=[], print("Generating Definition for module {0}".format(module)) print("^" * 80) package, code, module = generate_class( - module, launcher, redirect_x=redirect_x, mipav_hacks=mipav_hacks) + module, launcher, redirect_x=redirect_x, mipav_hacks=mipav_hacks + ) cur_package = all_code module_name = package.strip().split(" ")[0].split(".")[-1] for package in package.strip().split(" ")[0].split(".")[:-1]: @@ -137,11 +146,9 @@ def generate_all_classes(modules_list=[], crawl_code_struct(all_code, os.getcwd()) -def generate_class(module, - launcher, - strip_module_name_prefix=True, - redirect_x=False, - mipav_hacks=False): +def generate_class( + module, launcher, strip_module_name_prefix=True, redirect_x=False, mipav_hacks=False +): dom = grab_xml(module, launcher, mipav_hacks=mipav_hacks) if strip_module_name_prefix: module_name = module.split(".")[-1] @@ -153,32 +160,39 @@ def generate_class(module, # self._outputs_nodes = [] - class_string = "\"\"\"" + class_string = '"""' for desc_str in [ - 'title', 'category', 'description', 'version', 'documentation-url', - 'license', 'contributor', 'acknowledgements' + "title", + "category", + "description", + "version", + "documentation-url", + "license", + "contributor", + "acknowledgements", ]: el = dom.getElementsByTagName(desc_str) if el and el[0].firstChild and el[0].firstChild.nodeValue.strip(): - class_string += desc_str + ": " + el[0].firstChild.nodeValue.strip( - ) + "\n\n" - if desc_str == 'category': + class_string += ( + desc_str + ": " + el[0].firstChild.nodeValue.strip() + "\n\n" + ) + if desc_str == "category": category = el[0].firstChild.nodeValue.strip() - class_string += "\"\"\"" + class_string += '"""' for paramGroup in dom.getElementsByTagName("parameters"): - indices = paramGroup.getElementsByTagName('index') + indices = paramGroup.getElementsByTagName("index") max_index = 0 for index in indices: if int(index.firstChild.nodeValue) > max_index: max_index = int(index.firstChild.nodeValue) for param in paramGroup.childNodes: - if param.nodeName in ['label', 'description', '#text', '#comment']: + if param.nodeName in ["label", "description", "#text", "#comment"]: continue traitsParams = {} - longFlagNode = param.getElementsByTagName('longflag') + longFlagNode = param.getElementsByTagName("longflag") if longFlagNode: # Prefer to use longFlag as name if it is given, rather than the parameter name longFlagName = longFlagNode[0].firstChild.nodeValue @@ -192,102 +206,113 @@ def generate_class(module, name = force_to_valid_python_variable_name(name) traitsParams["argstr"] = "--" + longFlagName + " " else: - name = param.getElementsByTagName('name')[ - 0].firstChild.nodeValue + name = param.getElementsByTagName("name")[0].firstChild.nodeValue name = force_to_valid_python_variable_name(name) - if param.getElementsByTagName('index'): + if param.getElementsByTagName("index"): traitsParams["argstr"] = "" else: traitsParams["argstr"] = "--" + name + " " - if param.getElementsByTagName( - 'description') and param.getElementsByTagName( - 'description')[0].firstChild: - traitsParams["desc"] = param.getElementsByTagName( - 'description')[0].firstChild.nodeValue.replace( - '"', "\\\"").replace("\n", ", ") + if ( + param.getElementsByTagName("description") + and param.getElementsByTagName("description")[0].firstChild + ): + traitsParams["desc"] = ( + param.getElementsByTagName("description")[0] + .firstChild.nodeValue.replace('"', '\\"') + .replace("\n", ", ") + ) argsDict = { - 'directory': '%s', - 'file': '%s', - 'integer': "%d", - 'double': "%f", - 'float': "%f", - 'image': "%s", - 'transform': "%s", - 'boolean': '', - 'string-enumeration': '%s', - 'string': "%s", - 'integer-enumeration': '%s', - 'table': '%s', - 'point': '%s', - 'region': '%s', - 'geometry': '%s' + "directory": "%s", + "file": "%s", + "integer": "%d", + "double": "%f", + "float": "%f", + "image": "%s", + "transform": "%s", + "boolean": "", + "string-enumeration": "%s", + "string": "%s", + "integer-enumeration": "%s", + "table": "%s", + "point": "%s", + "region": "%s", + "geometry": "%s", } - if param.nodeName.endswith('-vector'): + if param.nodeName.endswith("-vector"): traitsParams["argstr"] += "%s" else: traitsParams["argstr"] += argsDict[param.nodeName] - index = param.getElementsByTagName('index') + index = param.getElementsByTagName("index") if index: - traitsParams["position"] = int( - index[0].firstChild.nodeValue) - (max_index + 1) + traitsParams["position"] = int(index[0].firstChild.nodeValue) - ( + max_index + 1 + ) - desc = param.getElementsByTagName('description') + desc = param.getElementsByTagName("description") if index: traitsParams["desc"] = desc[0].firstChild.nodeValue typesDict = { - 'integer': "traits.Int", - 'double': "traits.Float", - 'float': "traits.Float", - 'image': "File", - 'transform': "File", - 'boolean': "traits.Bool", - 'string': "traits.Str", - 'file': "File", - 'geometry': "File", - 'directory': "Directory", - 'table': "File", - 'point': "traits.List", - 'region': "traits.List" + "integer": "traits.Int", + "double": "traits.Float", + "float": "traits.Float", + "image": "File", + "transform": "File", + "boolean": "traits.Bool", + "string": "traits.Str", + "file": "File", + "geometry": "File", + "directory": "Directory", + "table": "File", + "point": "traits.List", + "region": "traits.List", } - if param.nodeName.endswith('-enumeration'): + if param.nodeName.endswith("-enumeration"): type = "traits.Enum" values = [ - '"%s"' % str(el.firstChild.nodeValue).replace('"', '') - for el in param.getElementsByTagName('element') + '"%s"' % str(el.firstChild.nodeValue).replace('"', "") + for el in param.getElementsByTagName("element") ] - elif param.nodeName.endswith('-vector'): + elif param.nodeName.endswith("-vector"): type = "InputMultiPath" if param.nodeName in [ - 'file', 'directory', 'image', 'geometry', 'transform', - 'table' + "file", + "directory", + "image", + "geometry", + "transform", + "table", ]: values = [ - "%s(exists=True)" % typesDict[param.nodeName.replace( - '-vector', '')] + "%s(exists=True)" + % typesDict[param.nodeName.replace("-vector", "")] ] else: - values = [typesDict[param.nodeName.replace('-vector', '')]] + values = [typesDict[param.nodeName.replace("-vector", "")]] if mipav_hacks is True: traitsParams["sep"] = ";" else: - traitsParams["sep"] = ',' - elif param.getAttribute('multiple') == "true": + traitsParams["sep"] = "," + elif param.getAttribute("multiple") == "true": type = "InputMultiPath" if param.nodeName in [ - 'file', 'directory', 'image', 'geometry', 'transform', - 'table' + "file", + "directory", + "image", + "geometry", + "transform", + "table", ]: values = ["%s(exists=True)" % typesDict[param.nodeName]] - elif param.nodeName in ['point', 'region']: + elif param.nodeName in ["point", "region"]: values = [ - "%s(traits.Float(), minlen=3, maxlen=3)" % - typesDict[param.nodeName] + "%s(traits.Float(), minlen=3, maxlen=3)" + % typesDict[param.nodeName] ] else: values = [typesDict[param.nodeName]] @@ -297,60 +322,85 @@ def generate_class(module, type = typesDict[param.nodeName] if param.nodeName in [ - 'file', 'directory', 'image', 'geometry', 'transform', - 'table' + "file", + "directory", + "image", + "geometry", + "transform", + "table", ]: - if not param.getElementsByTagName('channel'): + if not param.getElementsByTagName("channel"): raise RuntimeError( - "Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field.\n{0}". - format(traitsParams)) - elif param.getElementsByTagName('channel')[ - 0].firstChild.nodeValue == 'output': + "Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field.\n{0}".format( + traitsParams + ) + ) + elif ( + param.getElementsByTagName("channel")[0].firstChild.nodeValue + == "output" + ): traitsParams["hash_files"] = False inputTraits.append( - "%s = traits.Either(traits.Bool, %s(%s), %s)" % - (name, type, - parse_values(values).replace("exists=True", ""), - parse_params(traitsParams))) + "%s = traits.Either(traits.Bool, %s(%s), %s)" + % ( + name, + type, + parse_values(values).replace("exists=True", ""), + parse_params(traitsParams), + ) + ) traitsParams["exists"] = True traitsParams.pop("argstr") traitsParams.pop("hash_files") - outputTraits.append("%s = %s(%s%s)" % - (name, type.replace("Input", "Output"), - parse_values(values), - parse_params(traitsParams))) - - outputs_filenames[name] = gen_filename_from_param( - param, name) - elif param.getElementsByTagName('channel')[ - 0].firstChild.nodeValue == 'input': + outputTraits.append( + "%s = %s(%s%s)" + % ( + name, + type.replace("Input", "Output"), + parse_values(values), + parse_params(traitsParams), + ) + ) + + outputs_filenames[name] = gen_filename_from_param(param, name) + elif ( + param.getElementsByTagName("channel")[0].firstChild.nodeValue + == "input" + ): if param.nodeName in [ - 'file', 'directory', 'image', 'geometry', - 'transform', 'table' + "file", + "directory", + "image", + "geometry", + "transform", + "table", ] and type not in ["InputMultiPath", "traits.List"]: traitsParams["exists"] = True - inputTraits.append("%s = %s(%s%s)" % - (name, type, parse_values(values), - parse_params(traitsParams))) + inputTraits.append( + "%s = %s(%s%s)" + % (name, type, parse_values(values), parse_params(traitsParams)) + ) else: raise RuntimeError( - "Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field to be in ['input','output'].\n{0}". - format(traitsParams)) + "Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field to be in ['input','output'].\n{0}".format( + traitsParams + ) + ) else: # For all other parameter types, they are implicitly only input types - inputTraits.append("%s = %s(%s%s)" % - (name, type, parse_values(values), - parse_params(traitsParams))) + inputTraits.append( + "%s = %s(%s%s)" + % (name, type, parse_values(values), parse_params(traitsParams)) + ) if mipav_hacks: blacklisted_inputs = ["maxMemoryUsage"] inputTraits = [ - trait for trait in inputTraits - if trait.split()[0] not in blacklisted_inputs + trait for trait in inputTraits if trait.split()[0] not in blacklisted_inputs ] compulsory_inputs = [ 'xDefaultMem = traits.Int(desc="Set default maximum heap size", argstr="-xDefaultMem %d")', - 'xMaxProcess = traits.Int(1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", usedefault=True)' + 'xMaxProcess = traits.Int(1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", usedefault=True)', ] inputTraits += compulsory_inputs @@ -366,9 +416,9 @@ def generate_class(module, output_spec_code += " " + trait + "\n" output_filenames_code = "_outputs_filenames = {" - output_filenames_code += ",".join([ - "'%s':'%s'" % (key, value) for key, value in outputs_filenames.items() - ]) + output_filenames_code += ",".join( + ["'%s':'%s'" % (key, value) for key, value in outputs_filenames.items()] + ) output_filenames_code += "}" input_spec_code += "\n\n" @@ -383,10 +433,13 @@ def generate_class(module, %output_filenames_code%\n""" template += " _redirect_x = {0}\n".format(str(redirect_x)) - main_class = template.replace('%class_str%', class_string).replace( - "%module_name%", module_name).replace("%name%", module).replace( - "%output_filenames_code%", output_filenames_code).replace( - "%launcher%", " ".join(launcher)) + main_class = ( + template.replace("%class_str%", class_string) + .replace("%module_name%", module_name) + .replace("%name%", module) + .replace("%output_filenames_code%", output_filenames_code) + .replace("%launcher%", " ".join(launcher)) + ) return category, input_spec_code + output_spec_code + main_class, module_name @@ -398,7 +451,8 @@ def grab_xml(module, launcher, mipav_hacks=False): command_list.extend([module, "--xml"]) final_command = " ".join(command_list) xmlReturnValue = subprocess.Popen( - final_command, stdout=subprocess.PIPE, shell=True).communicate()[0] + final_command, stdout=subprocess.PIPE, shell=True + ).communicate()[0] if mipav_hacks: # workaround for a jist bug https://www.nitrc.org/tracker/index.php?func=detail&aid=7234&group_id=228&atid=942 new_xml = "" @@ -418,10 +472,10 @@ def grab_xml(module, launcher, mipav_hacks=False): # workaround for a JIST bug https://www.nitrc.org/tracker/index.php?func=detail&aid=7233&group_id=228&atid=942 if xmlReturnValue.strip().endswith("XML"): xmlReturnValue = xmlReturnValue.strip()[:-3] - if xmlReturnValue.strip().startswith( - "Error: Unable to set default atlas"): - xmlReturnValue = xmlReturnValue.strip()[len( - "Error: Unable to set default atlas"):] + if xmlReturnValue.strip().startswith("Error: Unable to set default atlas"): + xmlReturnValue = xmlReturnValue.strip()[ + len("Error: Unable to set default atlas") : + ] try: dom = xml.dom.minidom.parseString(xmlReturnValue.strip()) except Exception as e: @@ -442,13 +496,13 @@ def parse_params(params): if isinstance(value, (str, bytes)): list.append('%s="%s"' % (key, value.replace('"', "'"))) else: - list.append('%s=%s' % (key, value)) + list.append("%s=%s" % (key, value)) return ", ".join(list) def parse_values(values): - values = ['%s' % value for value in values] + values = ["%s" % value for value in values] if len(values) > 0: retstr = ", ".join(values) + ", " else: @@ -461,15 +515,15 @@ def gen_filename_from_param(param, base): if fileExtensions: # It is possible that multiple file extensions can be specified in a # comma separated list, This will extract just the first extension - firstFileExtension = fileExtensions.split(',')[0] + firstFileExtension = fileExtensions.split(",")[0] ext = firstFileExtension else: ext = { - 'image': '.nii', - 'transform': '.mat', - 'file': '', - 'directory': '', - 'geometry': '.vtk' + "image": ".nii", + "transform": ".mat", + "file": "", + "directory": "", + "geometry": ".vtk", }[param.nodeName] return base + ext @@ -479,68 +533,68 @@ def gen_filename_from_param(param, base): # every tool in the modules list must be found on the default path # AND calling the module with --xml must be supported and compliant. modules_list = [ - 'MedianImageFilter', - 'CheckerBoardFilter', - 'EMSegmentCommandLine', - 'GrayscaleFillHoleImageFilter', + "MedianImageFilter", + "CheckerBoardFilter", + "EMSegmentCommandLine", + "GrayscaleFillHoleImageFilter", # 'CreateDICOMSeries', #missing channel - 'TractographyLabelMapSeeding', - 'IntensityDifferenceMetric', - 'DWIToDTIEstimation', - 'MaskScalarVolume', - 'ImageLabelCombine', - 'DTIimport', - 'OtsuThresholdImageFilter', - 'ExpertAutomatedRegistration', - 'ThresholdScalarVolume', - 'DWIUnbiasedNonLocalMeansFilter', - 'BRAINSFit', - 'MergeModels', - 'ResampleDTIVolume', - 'MultiplyScalarVolumes', - 'LabelMapSmoothing', - 'RigidRegistration', - 'VotingBinaryHoleFillingImageFilter', - 'BRAINSROIAuto', - 'RobustStatisticsSegmenter', - 'GradientAnisotropicDiffusion', - 'ProbeVolumeWithModel', - 'ModelMaker', - 'ExtractSkeleton', - 'GrayscaleGrindPeakImageFilter', - 'N4ITKBiasFieldCorrection', - 'BRAINSResample', - 'DTIexport', - 'VBRAINSDemonWarp', - 'ResampleScalarVectorDWIVolume', - 'ResampleScalarVolume', - 'OtsuThresholdSegmentation', + "TractographyLabelMapSeeding", + "IntensityDifferenceMetric", + "DWIToDTIEstimation", + "MaskScalarVolume", + "ImageLabelCombine", + "DTIimport", + "OtsuThresholdImageFilter", + "ExpertAutomatedRegistration", + "ThresholdScalarVolume", + "DWIUnbiasedNonLocalMeansFilter", + "BRAINSFit", + "MergeModels", + "ResampleDTIVolume", + "MultiplyScalarVolumes", + "LabelMapSmoothing", + "RigidRegistration", + "VotingBinaryHoleFillingImageFilter", + "BRAINSROIAuto", + "RobustStatisticsSegmenter", + "GradientAnisotropicDiffusion", + "ProbeVolumeWithModel", + "ModelMaker", + "ExtractSkeleton", + "GrayscaleGrindPeakImageFilter", + "N4ITKBiasFieldCorrection", + "BRAINSResample", + "DTIexport", + "VBRAINSDemonWarp", + "ResampleScalarVectorDWIVolume", + "ResampleScalarVolume", + "OtsuThresholdSegmentation", # 'ExecutionModelTour', - 'HistogramMatching', - 'BRAINSDemonWarp', - 'ModelToLabelMap', - 'GaussianBlurImageFilter', - 'DiffusionWeightedVolumeMasking', - 'GrayscaleModelMaker', - 'CastScalarVolume', - 'DicomToNrrdConverter', - 'AffineRegistration', - 'AddScalarVolumes', - 'LinearRegistration', - 'SimpleRegionGrowingSegmentation', - 'DWIJointRicianLMMSEFilter', - 'MultiResolutionAffineRegistration', - 'SubtractScalarVolumes', - 'DWIRicianLMMSEFilter', - 'OrientScalarVolume', - 'FiducialRegistration', - 'BSplineDeformableRegistration', - 'CurvatureAnisotropicDiffusion', - 'PETStandardUptakeValueComputation', - 'DiffusionTensorScalarMeasurements', - 'ACPCTransform', - 'EMSegmentTransformToNewFormat', - 'BSplineToDeformationField' + "HistogramMatching", + "BRAINSDemonWarp", + "ModelToLabelMap", + "GaussianBlurImageFilter", + "DiffusionWeightedVolumeMasking", + "GrayscaleModelMaker", + "CastScalarVolume", + "DicomToNrrdConverter", + "AffineRegistration", + "AddScalarVolumes", + "LinearRegistration", + "SimpleRegionGrowingSegmentation", + "DWIJointRicianLMMSEFilter", + "MultiResolutionAffineRegistration", + "SubtractScalarVolumes", + "DWIRicianLMMSEFilter", + "OrientScalarVolume", + "FiducialRegistration", + "BSplineDeformableRegistration", + "CurvatureAnisotropicDiffusion", + "PETStandardUptakeValueComputation", + "DiffusionTensorScalarMeasurements", + "ACPCTransform", + "EMSegmentTransformToNewFormat", + "BSplineToDeformationField", ] # SlicerExecutionModel compliant tools that are usually statically built, and don't need the Slicer3 --launcher diff --git a/nipype/interfaces/slicer/legacy/__init__.py b/nipype/interfaces/slicer/legacy/__init__.py index 92cbc1ff73..f65d44f058 100644 --- a/nipype/interfaces/slicer/legacy/__init__.py +++ b/nipype/interfaces/slicer/legacy/__init__.py @@ -3,7 +3,11 @@ from .segmentation import OtsuThresholdSegmentation from .filtering import OtsuThresholdImageFilter, ResampleScalarVolume from .converters import BSplineToDeformationField -from .registration import (BSplineDeformableRegistration, AffineRegistration, - MultiResolutionAffineRegistration, - RigidRegistration, LinearRegistration, - ExpertAutomatedRegistration) +from .registration import ( + BSplineDeformableRegistration, + AffineRegistration, + MultiResolutionAffineRegistration, + RigidRegistration, + LinearRegistration, + ExpertAutomatedRegistration, +) diff --git a/nipype/interfaces/slicer/legacy/converters.py b/nipype/interfaces/slicer/legacy/converters.py index f5af1ad29b..aadd840d71 100644 --- a/nipype/interfaces/slicer/legacy/converters.py +++ b/nipype/interfaces/slicer/legacy/converters.py @@ -3,7 +3,18 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os @@ -11,7 +22,8 @@ class BSplineToDeformationFieldInputSpec(CommandLineInputSpec): tfm = File(exists=True, argstr="--tfm %s") refImage = File(exists=True, argstr="--refImage %s") defImage = traits.Either( - traits.Bool, File(), hash_files=False, argstr="--defImage %s") + traits.Bool, File(), hash_files=False, argstr="--defImage %s" + ) class BSplineToDeformationFieldOutputSpec(TraitedSpec): @@ -38,4 +50,4 @@ class BSplineToDeformationField(SEMLikeCommandLine): input_spec = BSplineToDeformationFieldInputSpec output_spec = BSplineToDeformationFieldOutputSpec _cmd = "BSplineToDeformationField " - _outputs_filenames = {'defImage': 'defImage.nii'} + _outputs_filenames = {"defImage": "defImage.nii"} diff --git a/nipype/interfaces/slicer/legacy/diffusion/denoising.py b/nipype/interfaces/slicer/legacy/diffusion/denoising.py index 0cc8cce0f6..0cde8fe64e 100644 --- a/nipype/interfaces/slicer/legacy/diffusion/denoising.py +++ b/nipype/interfaces/slicer/legacy/diffusion/denoising.py @@ -3,46 +3,57 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class DWIUnbiasedNonLocalMeansFilterInputSpec(CommandLineInputSpec): rs = InputMultiPath( traits.Int, - desc= - "The algorithm search for similar voxels in a neighborhood of this size (larger sizes than the default one are extremely slow).", + desc="The algorithm search for similar voxels in a neighborhood of this size (larger sizes than the default one are extremely slow).", sep=",", - argstr="--rs %s") + argstr="--rs %s", + ) rc = InputMultiPath( traits.Int, - desc= - "Similarity between blocks is measured using windows of this size.", + desc="Similarity between blocks is measured using windows of this size.", sep=",", - argstr="--rc %s") + argstr="--rc %s", + ) hp = traits.Float( - desc= - "This parameter is related to noise; the larger the parameter, the more agressive the filtering. Should be near 1, and only values between 0.8 and 1.2 are allowed", - argstr="--hp %f") + desc="This parameter is related to noise; the larger the parameter, the more agressive the filtering. Should be near 1, and only values between 0.8 and 1.2 are allowed", + argstr="--hp %f", + ) ng = traits.Int( - desc= - "The number of the closest gradients that are used to jointly filter a given gradient direction (a maximum of 5 is allowed).", - argstr="--ng %d") + desc="The number of the closest gradients that are used to jointly filter a given gradient direction (a maximum of 5 is allowed).", + argstr="--ng %d", + ) re = InputMultiPath( traits.Int, - desc= - "A neighborhood of this size is used to compute the statistics for noise estimation.", + desc="A neighborhood of this size is used to compute the statistics for noise estimation.", sep=",", - argstr="--re %s") - inputVolume = File( - position=-2, desc="Input DWI volume.", exists=True, argstr="%s") + argstr="--re %s", + ) + inputVolume = File(position=-2, desc="Input DWI volume.", exists=True, argstr="%s") outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output DWI volume.", - argstr="%s") + argstr="%s", + ) class DWIUnbiasedNonLocalMeansFilterOutputSpec(TraitedSpec): @@ -73,4 +84,4 @@ class DWIUnbiasedNonLocalMeansFilter(SEMLikeCommandLine): input_spec = DWIUnbiasedNonLocalMeansFilterInputSpec output_spec = DWIUnbiasedNonLocalMeansFilterOutputSpec _cmd = "DWIUnbiasedNonLocalMeansFilter " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} diff --git a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py index 0be53e7afc..81f61c1bbc 100644 --- a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py +++ b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py @@ -4,46 +4,25 @@ def test_DWIUnbiasedNonLocalMeansFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - hp=dict(argstr='--hp %f', ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - ng=dict(argstr='--ng %d', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - rc=dict( - argstr='--rc %s', - sep=',', - ), - re=dict( - argstr='--re %s', - sep=',', - ), - rs=dict( - argstr='--rs %s', - sep=',', - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + hp=dict(argstr="--hp %f",), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + ng=dict(argstr="--ng %d",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + rc=dict(argstr="--rc %s", sep=",",), + re=dict(argstr="--re %s", sep=",",), + rs=dict(argstr="--rs %s", sep=",",), ) inputs = DWIUnbiasedNonLocalMeansFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DWIUnbiasedNonLocalMeansFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = DWIUnbiasedNonLocalMeansFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/filtering.py b/nipype/interfaces/slicer/legacy/filtering.py index aaed2350e0..ee041bbc50 100644 --- a/nipype/interfaces/slicer/legacy/filtering.py +++ b/nipype/interfaces/slicer/legacy/filtering.py @@ -2,35 +2,45 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class OtsuThresholdImageFilterInputSpec(CommandLineInputSpec): insideValue = traits.Int( - desc= - "The value assigned to pixels that are inside the computed threshold", - argstr="--insideValue %d") + desc="The value assigned to pixels that are inside the computed threshold", + argstr="--insideValue %d", + ) outsideValue = traits.Int( - desc= - "The value assigned to pixels that are outside the computed threshold", - argstr="--outsideValue %d") + desc="The value assigned to pixels that are outside the computed threshold", + argstr="--outsideValue %d", + ) numberOfBins = traits.Int( - desc= - "This is an advanced parameter. The number of bins in the histogram used to model the probability mass function of the two intensity distributions. Small numbers of bins may result in a more conservative threshold. The default should suffice for most applications. Experimentation is the only way to see the effect of varying this parameter.", - argstr="--numberOfBins %d") + desc="This is an advanced parameter. The number of bins in the histogram used to model the probability mass function of the two intensity distributions. Small numbers of bins may result in a more conservative threshold. The default should suffice for most applications. Experimentation is the only way to see the effect of varying this parameter.", + argstr="--numberOfBins %d", + ) inputVolume = File( - position=-2, - desc="Input volume to be filtered", - exists=True, - argstr="%s") + position=-2, desc="Input volume to be filtered", exists=True, argstr="%s" + ) outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", - argstr="%s") + argstr="%s", + ) class OtsuThresholdImageFilterOutputSpec(TraitedSpec): @@ -61,7 +71,7 @@ class OtsuThresholdImageFilter(SEMLikeCommandLine): input_spec = OtsuThresholdImageFilterInputSpec output_spec = OtsuThresholdImageFilterOutputSpec _cmd = "OtsuThresholdImageFilter " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} class ResampleScalarVolumeInputSpec(CommandLineInputSpec): @@ -69,7 +79,8 @@ class ResampleScalarVolumeInputSpec(CommandLineInputSpec): traits.Float, desc="Spacing along each dimension (0 means use input spacing)", sep=",", - argstr="--spacing %s") + argstr="--spacing %s", + ) interpolation = traits.Enum( "linear", "nearestNeighbor", @@ -79,21 +90,20 @@ class ResampleScalarVolumeInputSpec(CommandLineInputSpec): "welch", "lanczos", "blackman", - desc= - "Sampling algorithm (linear, nearest neighbor, bspline(cubic) or windowed sinc). There are several sinc algorithms available as described in the following publication: Erik H. W. Meijering, Wiro J. Niessen, Josien P. W. Pluim, Max A. Viergever: Quantitative Comparison of Sinc-Approximating Kernels for Medical Image Interpolation. MICCAI 1999, pp. 210-217. Each window has a radius of 3;", - argstr="--interpolation %s") + desc="Sampling algorithm (linear, nearest neighbor, bspline(cubic) or windowed sinc). There are several sinc algorithms available as described in the following publication: Erik H. W. Meijering, Wiro J. Niessen, Josien P. W. Pluim, Max A. Viergever: Quantitative Comparison of Sinc-Approximating Kernels for Medical Image Interpolation. MICCAI 1999, pp. 210-217. Each window has a radius of 3;", + argstr="--interpolation %s", + ) InputVolume = File( - position=-2, - desc="Input volume to be resampled", - exists=True, - argstr="%s") + position=-2, desc="Input volume to be resampled", exists=True, argstr="%s" + ) OutputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Resampled Volume", - argstr="%s") + argstr="%s", + ) class ResampleScalarVolumeOutputSpec(TraitedSpec): @@ -120,4 +130,4 @@ class ResampleScalarVolume(SEMLikeCommandLine): input_spec = ResampleScalarVolumeInputSpec output_spec = ResampleScalarVolumeOutputSpec _cmd = "ResampleScalarVolume " - _outputs_filenames = {'OutputVolume': 'OutputVolume.nii'} + _outputs_filenames = {"OutputVolume": "OutputVolume.nii"} diff --git a/nipype/interfaces/slicer/legacy/registration.py b/nipype/interfaces/slicer/legacy/registration.py index 7f73d85d82..3bfd2377d4 100644 --- a/nipype/interfaces/slicer/legacy/registration.py +++ b/nipype/interfaces/slicer/legacy/registration.py @@ -3,85 +3,94 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class BSplineDeformableRegistrationInputSpec(CommandLineInputSpec): - iterations = traits.Int( - desc="Number of iterations", argstr="--iterations %d") + iterations = traits.Int(desc="Number of iterations", argstr="--iterations %d") gridSize = traits.Int( - desc= - "Number of grid points on interior of the fixed image. Larger grid sizes allow for finer registrations.", - argstr="--gridSize %d") + desc="Number of grid points on interior of the fixed image. Larger grid sizes allow for finer registrations.", + argstr="--gridSize %d", + ) histogrambins = traits.Int( - desc= - "Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a deformable registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", - argstr="--histogrambins %d") + desc="Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a deformable registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", + argstr="--histogrambins %d", + ) spatialsamples = traits.Int( - desc= - "Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", - argstr="--spatialsamples %d") + desc="Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", + argstr="--spatialsamples %d", + ) constrain = traits.Bool( - desc= - "Constrain the deformation to the amount specified in Maximum Deformation", - argstr="--constrain ") + desc="Constrain the deformation to the amount specified in Maximum Deformation", + argstr="--constrain ", + ) maximumDeformation = traits.Float( - desc= - "If Constrain Deformation is checked, limit the deformation to this amount.", - argstr="--maximumDeformation %f") + desc="If Constrain Deformation is checked, limit the deformation to this amount.", + argstr="--maximumDeformation %f", + ) default = traits.Int( - desc= - "Default pixel value used if resampling a pixel outside of the volume.", - argstr="--default %d") + desc="Default pixel value used if resampling a pixel outside of the volume.", + argstr="--default %d", + ) initialtransform = File( - desc= - "Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. This transform should be an affine or rigid transform. It is used an a bulk transform for the BSpline. Optional.", + desc="Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. This transform should be an affine or rigid transform. It is used an a bulk transform for the BSpline. Optional.", exists=True, - argstr="--initialtransform %s") + argstr="--initialtransform %s", + ) FixedImageFileName = File( - position=-2, - desc="Fixed image to which to register", - exists=True, - argstr="%s") + position=-2, desc="Fixed image to which to register", exists=True, argstr="%s" + ) MovingImageFileName = File( - position=-1, desc="Moving image", exists=True, argstr="%s") + position=-1, desc="Moving image", exists=True, argstr="%s" + ) outputtransform = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Transform calculated that aligns the fixed and moving image. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", - argstr="--outputtransform %s") + desc="Transform calculated that aligns the fixed and moving image. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--outputtransform %s", + ) outputwarp = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Vector field that applies an equivalent warp as the BSpline. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional.", - argstr="--outputwarp %s") + desc="Vector field that applies an equivalent warp as the BSpline. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional.", + argstr="--outputwarp %s", + ) resampledmovingfilename = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Resampled moving image to fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", - argstr="--resampledmovingfilename %s") + desc="Resampled moving image to fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--resampledmovingfilename %s", + ) class BSplineDeformableRegistrationOutputSpec(TraitedSpec): outputtransform = File( - desc= - "Transform calculated that aligns the fixed and moving image. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", - exists=True) + desc="Transform calculated that aligns the fixed and moving image. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True, + ) outputwarp = File( - desc= - "Vector field that applies an equivalent warp as the BSpline. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional.", - exists=True) + desc="Vector field that applies an equivalent warp as the BSpline. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional.", + exists=True, + ) resampledmovingfilename = File( - desc= - "Resampled moving image to fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", - exists=True) + desc="Resampled moving image to fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True, + ) class BSplineDeformableRegistration(SEMLikeCommandLine): @@ -105,72 +114,70 @@ class BSplineDeformableRegistration(SEMLikeCommandLine): output_spec = BSplineDeformableRegistrationOutputSpec _cmd = "BSplineDeformableRegistration " _outputs_filenames = { - 'resampledmovingfilename': 'resampledmovingfilename.nii', - 'outputtransform': 'outputtransform.txt', - 'outputwarp': 'outputwarp.nrrd' + "resampledmovingfilename": "resampledmovingfilename.nii", + "outputtransform": "outputtransform.txt", + "outputwarp": "outputwarp.nrrd", } class AffineRegistrationInputSpec(CommandLineInputSpec): fixedsmoothingfactor = traits.Int( - desc= - "Amount of smoothing applied to fixed image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", - argstr="--fixedsmoothingfactor %d") + desc="Amount of smoothing applied to fixed image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", + argstr="--fixedsmoothingfactor %d", + ) movingsmoothingfactor = traits.Int( - desc= - "Amount of smoothing applied to moving image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", - argstr="--movingsmoothingfactor %d") + desc="Amount of smoothing applied to moving image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", + argstr="--movingsmoothingfactor %d", + ) histogrambins = traits.Int( - desc= - "Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", - argstr="--histogrambins %d") + desc="Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", + argstr="--histogrambins %d", + ) spatialsamples = traits.Int( - desc= - "Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", - argstr="--spatialsamples %d") - iterations = traits.Int( - desc="Number of iterations", argstr="--iterations %d") + desc="Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", + argstr="--spatialsamples %d", + ) + iterations = traits.Int(desc="Number of iterations", argstr="--iterations %d") translationscale = traits.Float( - desc= - "Relative scale of translations to rotations, i.e. a value of 100 means 10mm = 1 degree. (Actual scale used is 1/(TranslationScale^2)). This parameter is used to \'weight\' or \'standardized\' the transform parameters and their effect on the registration objective function.", - argstr="--translationscale %f") + desc="Relative scale of translations to rotations, i.e. a value of 100 means 10mm = 1 degree. (Actual scale used is 1/(TranslationScale^2)). This parameter is used to 'weight' or 'standardized' the transform parameters and their effect on the registration objective function.", + argstr="--translationscale %f", + ) initialtransform = File( - desc= - "Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. Optional.", + desc="Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. Optional.", exists=True, - argstr="--initialtransform %s") + argstr="--initialtransform %s", + ) FixedImageFileName = File( - position=-2, - desc="Fixed image to which to register", - exists=True, - argstr="%s") + position=-2, desc="Fixed image to which to register", exists=True, argstr="%s" + ) MovingImageFileName = File( - position=-1, desc="Moving image", exists=True, argstr="%s") + position=-1, desc="Moving image", exists=True, argstr="%s" + ) outputtransform = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", - argstr="--outputtransform %s") + desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--outputtransform %s", + ) resampledmovingfilename = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", - argstr="--resampledmovingfilename %s") + desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--resampledmovingfilename %s", + ) class AffineRegistrationOutputSpec(TraitedSpec): outputtransform = File( - desc= - "Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", - exists=True) + desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True, + ) resampledmovingfilename = File( - desc= - "Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", - exists=True) + desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True, + ) class AffineRegistration(SEMLikeCommandLine): @@ -200,62 +207,70 @@ class AffineRegistration(SEMLikeCommandLine): output_spec = AffineRegistrationOutputSpec _cmd = "AffineRegistration " _outputs_filenames = { - 'resampledmovingfilename': 'resampledmovingfilename.nii', - 'outputtransform': 'outputtransform.txt' + "resampledmovingfilename": "resampledmovingfilename.nii", + "outputtransform": "outputtransform.txt", } class MultiResolutionAffineRegistrationInputSpec(CommandLineInputSpec): fixedImage = File( position=-2, - desc= - "Image which defines the space into which the moving image is registered", + desc="Image which defines the space into which the moving image is registered", exists=True, - argstr="%s") + argstr="%s", + ) movingImage = File( position=-1, - desc= - "The transform goes from the fixed image's space into the moving image's space", + desc="The transform goes from the fixed image's space into the moving image's space", exists=True, - argstr="%s") + argstr="%s", + ) resampledImage = traits.Either( traits.Bool, File(), hash_files=False, desc="Registration results", - argstr="--resampledImage %s") + argstr="--resampledImage %s", + ) saveTransform = traits.Either( traits.Bool, File(), hash_files=False, desc="Save the output transform from the registration", - argstr="--saveTransform %s") + argstr="--saveTransform %s", + ) fixedImageMask = File( desc="Label image which defines a mask of interest for the fixed image", exists=True, - argstr="--fixedImageMask %s") + argstr="--fixedImageMask %s", + ) fixedImageROI = traits.List( desc="Label image which defines a ROI of interest for the fixed image", - argstr="--fixedImageROI %s") + argstr="--fixedImageROI %s", + ) numIterations = traits.Int( desc="Number of iterations to run at each resolution level.", - argstr="--numIterations %d") + argstr="--numIterations %d", + ) numLineIterations = traits.Int( desc="Number of iterations to run at each resolution level.", - argstr="--numLineIterations %d") + argstr="--numLineIterations %d", + ) stepSize = traits.Float( - desc="The maximum step size of the optimizer in voxels", - argstr="--stepSize %f") + desc="The maximum step size of the optimizer in voxels", argstr="--stepSize %f" + ) stepTolerance = traits.Float( desc="The maximum step size of the optimizer in voxels", - argstr="--stepTolerance %f") + argstr="--stepTolerance %f", + ) metricTolerance = traits.Float(argstr="--metricTolerance %f") class MultiResolutionAffineRegistrationOutputSpec(TraitedSpec): resampledImage = File(desc="Registration results", exists=True) saveTransform = File( - desc="Save the output transform from the registration", exists=True) + desc="Save the output transform from the registration", exists=True + ) class MultiResolutionAffineRegistration(SEMLikeCommandLine): @@ -279,85 +294,84 @@ class MultiResolutionAffineRegistration(SEMLikeCommandLine): output_spec = MultiResolutionAffineRegistrationOutputSpec _cmd = "MultiResolutionAffineRegistration " _outputs_filenames = { - 'resampledImage': 'resampledImage.nii', - 'saveTransform': 'saveTransform.txt' + "resampledImage": "resampledImage.nii", + "saveTransform": "saveTransform.txt", } class RigidRegistrationInputSpec(CommandLineInputSpec): fixedsmoothingfactor = traits.Int( - desc= - "Amount of smoothing applied to fixed image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", - argstr="--fixedsmoothingfactor %d") + desc="Amount of smoothing applied to fixed image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", + argstr="--fixedsmoothingfactor %d", + ) movingsmoothingfactor = traits.Int( - desc= - "Amount of smoothing applied to moving image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", - argstr="--movingsmoothingfactor %d") + desc="Amount of smoothing applied to moving image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", + argstr="--movingsmoothingfactor %d", + ) testingmode = traits.Bool( - desc= - "Enable testing mode. Input transform will be used to construct floating image. The floating image will be ignored if passed.", - argstr="--testingmode ") + desc="Enable testing mode. Input transform will be used to construct floating image. The floating image will be ignored if passed.", + argstr="--testingmode ", + ) histogrambins = traits.Int( - desc= - "Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", - argstr="--histogrambins %d") + desc="Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", + argstr="--histogrambins %d", + ) spatialsamples = traits.Int( - desc= - "Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", - argstr="--spatialsamples %d") + desc="Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", + argstr="--spatialsamples %d", + ) iterations = InputMultiPath( traits.Int, - desc= - "Comma separated list of iterations. Must have the same number of elements as the learning rate.", + desc="Comma separated list of iterations. Must have the same number of elements as the learning rate.", sep=",", - argstr="--iterations %s") + argstr="--iterations %s", + ) learningrate = InputMultiPath( traits.Float, - desc= - "Comma separated list of learning rates. Learning rate is a scale factor on the gradient of the registration objective function (gradient with respect to the parameters of the transformation) used to update the parameters of the transformation during optimization. Smaller values cause the optimizer to take smaller steps through the parameter space. Larger values are typically used early in the registration process to take large jumps in parameter space followed by smaller values to home in on the optimum value of the registration objective function. Default is: 0.01, 0.005, 0.0005, 0.0002. Must have the same number of elements as iterations.", + desc="Comma separated list of learning rates. Learning rate is a scale factor on the gradient of the registration objective function (gradient with respect to the parameters of the transformation) used to update the parameters of the transformation during optimization. Smaller values cause the optimizer to take smaller steps through the parameter space. Larger values are typically used early in the registration process to take large jumps in parameter space followed by smaller values to home in on the optimum value of the registration objective function. Default is: 0.01, 0.005, 0.0005, 0.0002. Must have the same number of elements as iterations.", sep=",", - argstr="--learningrate %s") + argstr="--learningrate %s", + ) translationscale = traits.Float( - desc= - "Relative scale of translations to rotations, i.e. a value of 100 means 10mm = 1 degree. (Actual scale used 1/(TranslationScale^2)). This parameter is used to \'weight\' or \'standardized\' the transform parameters and their effect on the registration objective function.", - argstr="--translationscale %f") + desc="Relative scale of translations to rotations, i.e. a value of 100 means 10mm = 1 degree. (Actual scale used 1/(TranslationScale^2)). This parameter is used to 'weight' or 'standardized' the transform parameters and their effect on the registration objective function.", + argstr="--translationscale %f", + ) initialtransform = File( - desc= - "Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. Optional.", + desc="Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. Optional.", exists=True, - argstr="--initialtransform %s") + argstr="--initialtransform %s", + ) FixedImageFileName = File( - position=-2, - desc="Fixed image to which to register", - exists=True, - argstr="%s") + position=-2, desc="Fixed image to which to register", exists=True, argstr="%s" + ) MovingImageFileName = File( - position=-1, desc="Moving image", exists=True, argstr="%s") + position=-1, desc="Moving image", exists=True, argstr="%s" + ) outputtransform = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", - argstr="--outputtransform %s") + desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--outputtransform %s", + ) resampledmovingfilename = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", - argstr="--resampledmovingfilename %s") + desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--resampledmovingfilename %s", + ) class RigidRegistrationOutputSpec(TraitedSpec): outputtransform = File( - desc= - "Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", - exists=True) + desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True, + ) resampledmovingfilename = File( - desc= - "Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", - exists=True) + desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True, + ) class RigidRegistration(SEMLikeCommandLine): @@ -391,81 +405,80 @@ class RigidRegistration(SEMLikeCommandLine): output_spec = RigidRegistrationOutputSpec _cmd = "RigidRegistration " _outputs_filenames = { - 'resampledmovingfilename': 'resampledmovingfilename.nii', - 'outputtransform': 'outputtransform.txt' + "resampledmovingfilename": "resampledmovingfilename.nii", + "outputtransform": "outputtransform.txt", } class LinearRegistrationInputSpec(CommandLineInputSpec): fixedsmoothingfactor = traits.Int( - desc= - "Amount of smoothing applied to fixed image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", - argstr="--fixedsmoothingfactor %d") + desc="Amount of smoothing applied to fixed image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", + argstr="--fixedsmoothingfactor %d", + ) movingsmoothingfactor = traits.Int( - desc= - "Amount of smoothing applied to moving image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", - argstr="--movingsmoothingfactor %d") + desc="Amount of smoothing applied to moving image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", + argstr="--movingsmoothingfactor %d", + ) histogrambins = traits.Int( - desc= - "Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", - argstr="--histogrambins %d") + desc="Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", + argstr="--histogrambins %d", + ) spatialsamples = traits.Int( - desc= - "Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", - argstr="--spatialsamples %d") + desc="Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", + argstr="--spatialsamples %d", + ) iterations = InputMultiPath( traits.Int, - desc= - "Comma separated list of iterations. Must have the same number of elements as the learning rate.", + desc="Comma separated list of iterations. Must have the same number of elements as the learning rate.", sep=",", - argstr="--iterations %s") + argstr="--iterations %s", + ) learningrate = InputMultiPath( traits.Float, - desc= - "Comma separated list of learning rates. Learning rate is a scale factor on the gradient of the registration objective function (gradient with respect to the parameters of the transformation) used to update the parameters of the transformation during optimization. Smaller values cause the optimizer to take smaller steps through the parameter space. Larger values are typically used early in the registration process to take large jumps in parameter space followed by smaller values to home in on the optimum value of the registration objective function. Default is: 0.01, 0.005, 0.0005, 0.0002. Must have the same number of elements as iterations.", + desc="Comma separated list of learning rates. Learning rate is a scale factor on the gradient of the registration objective function (gradient with respect to the parameters of the transformation) used to update the parameters of the transformation during optimization. Smaller values cause the optimizer to take smaller steps through the parameter space. Larger values are typically used early in the registration process to take large jumps in parameter space followed by smaller values to home in on the optimum value of the registration objective function. Default is: 0.01, 0.005, 0.0005, 0.0002. Must have the same number of elements as iterations.", sep=",", - argstr="--learningrate %s") + argstr="--learningrate %s", + ) translationscale = traits.Float( - desc= - "Relative scale of translations to rotations, i.e. a value of 100 means 10mm = 1 degree. (Actual scale used 1/(TranslationScale^2)). This parameter is used to \'weight\' or \'standardized\' the transform parameters and their effect on the registration objective function.", - argstr="--translationscale %f") + desc="Relative scale of translations to rotations, i.e. a value of 100 means 10mm = 1 degree. (Actual scale used 1/(TranslationScale^2)). This parameter is used to 'weight' or 'standardized' the transform parameters and their effect on the registration objective function.", + argstr="--translationscale %f", + ) initialtransform = File( - desc= - "Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. Optional.", + desc="Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. Optional.", exists=True, - argstr="--initialtransform %s") + argstr="--initialtransform %s", + ) FixedImageFileName = File( - position=-2, - desc="Fixed image to which to register", - exists=True, - argstr="%s") + position=-2, desc="Fixed image to which to register", exists=True, argstr="%s" + ) MovingImageFileName = File( - position=-1, desc="Moving image", exists=True, argstr="%s") + position=-1, desc="Moving image", exists=True, argstr="%s" + ) outputtransform = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", - argstr="--outputtransform %s") + desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--outputtransform %s", + ) resampledmovingfilename = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", - argstr="--resampledmovingfilename %s") + desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--resampledmovingfilename %s", + ) class LinearRegistrationOutputSpec(TraitedSpec): outputtransform = File( - desc= - "Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", - exists=True) + desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True, + ) resampledmovingfilename = File( - desc= - "Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", - exists=True) + desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True, + ) class LinearRegistration(SEMLikeCommandLine): @@ -489,40 +502,43 @@ class LinearRegistration(SEMLikeCommandLine): output_spec = LinearRegistrationOutputSpec _cmd = "LinearRegistration " _outputs_filenames = { - 'resampledmovingfilename': 'resampledmovingfilename.nii', - 'outputtransform': 'outputtransform.txt' + "resampledmovingfilename": "resampledmovingfilename.nii", + "outputtransform": "outputtransform.txt", } class ExpertAutomatedRegistrationInputSpec(CommandLineInputSpec): fixedImage = File( position=-2, - desc= - "Image which defines the space into which the moving image is registered", + desc="Image which defines the space into which the moving image is registered", exists=True, - argstr="%s") + argstr="%s", + ) movingImage = File( position=-1, - desc= - "The transform goes from the fixed image's space into the moving image's space", + desc="The transform goes from the fixed image's space into the moving image's space", exists=True, - argstr="%s") + argstr="%s", + ) resampledImage = traits.Either( traits.Bool, File(), hash_files=False, desc="Registration results", - argstr="--resampledImage %s") + argstr="--resampledImage %s", + ) loadTransform = File( desc="Load a transform that is immediately applied to the moving image", exists=True, - argstr="--loadTransform %s") + argstr="--loadTransform %s", + ) saveTransform = traits.Either( traits.Bool, File(), hash_files=False, desc="Save the transform that results from registration", - argstr="--saveTransform %s") + argstr="--saveTransform %s", + ) initialization = traits.Enum( "None", "Landmarks", @@ -530,7 +546,8 @@ class ExpertAutomatedRegistrationInputSpec(CommandLineInputSpec): "CentersOfMass", "SecondMoments", desc="Method to prime the registration process", - argstr="--initialization %s") + argstr="--initialization %s", + ) registration = traits.Enum( "None", "Initial", @@ -541,92 +558,107 @@ class ExpertAutomatedRegistrationInputSpec(CommandLineInputSpec): "PipelineAffine", "PipelineBSpline", desc="Method for the registration process", - argstr="--registration %s") + argstr="--registration %s", + ) metric = traits.Enum( "MattesMI", "NormCorr", "MeanSqrd", desc="Method to quantify image match", - argstr="--metric %s") + argstr="--metric %s", + ) expectedOffset = traits.Float( - desc="Expected misalignment after initialization", - argstr="--expectedOffset %f") + desc="Expected misalignment after initialization", argstr="--expectedOffset %f" + ) expectedRotation = traits.Float( desc="Expected misalignment after initialization", - argstr="--expectedRotation %f") + argstr="--expectedRotation %f", + ) expectedScale = traits.Float( - desc="Expected misalignment after initialization", - argstr="--expectedScale %f") + desc="Expected misalignment after initialization", argstr="--expectedScale %f" + ) expectedSkew = traits.Float( - desc="Expected misalignment after initialization", - argstr="--expectedSkew %f") + desc="Expected misalignment after initialization", argstr="--expectedSkew %f" + ) verbosityLevel = traits.Enum( "Silent", "Standard", "Verbose", desc="Level of detail of reporting progress", - argstr="--verbosityLevel %s") + argstr="--verbosityLevel %s", + ) sampleFromOverlap = traits.Bool( - desc= - "Limit metric evaluation to the fixed image region overlapped by the moving image", - argstr="--sampleFromOverlap ") + desc="Limit metric evaluation to the fixed image region overlapped by the moving image", + argstr="--sampleFromOverlap ", + ) fixedImageMask = File( desc="Image which defines a mask for the fixed image", exists=True, - argstr="--fixedImageMask %s") + argstr="--fixedImageMask %s", + ) randomNumberSeed = traits.Int( desc="Seed to generate a consistent random number sequence", - argstr="--randomNumberSeed %d") + argstr="--randomNumberSeed %d", + ) numberOfThreads = traits.Int( - desc="Number of CPU threads to use", argstr="--numberOfThreads %d") + desc="Number of CPU threads to use", argstr="--numberOfThreads %d" + ) minimizeMemory = traits.Bool( - desc= - "Reduce the amount of memory required at the cost of increased computation time", - argstr="--minimizeMemory ") + desc="Reduce the amount of memory required at the cost of increased computation time", + argstr="--minimizeMemory ", + ) interpolation = traits.Enum( "NearestNeighbor", "Linear", "BSpline", desc="Method for interpolation within the optimization process", - argstr="--interpolation %s") + argstr="--interpolation %s", + ) fixedLandmarks = InputMultiPath( traits.List(traits.Float(), minlen=3, maxlen=3), desc="Ordered list of landmarks in the fixed image", - argstr="--fixedLandmarks %s...") + argstr="--fixedLandmarks %s...", + ) movingLandmarks = InputMultiPath( traits.List(traits.Float(), minlen=3, maxlen=3), desc="Ordered list of landmarks in the moving image", - argstr="--movingLandmarks %s...") + argstr="--movingLandmarks %s...", + ) rigidMaxIterations = traits.Int( desc="Maximum number of rigid optimization iterations", - argstr="--rigidMaxIterations %d") + argstr="--rigidMaxIterations %d", + ) rigidSamplingRatio = traits.Float( - desc= - "Portion of the image to use in computing the metric during rigid registration", - argstr="--rigidSamplingRatio %f") + desc="Portion of the image to use in computing the metric during rigid registration", + argstr="--rigidSamplingRatio %f", + ) affineMaxIterations = traits.Int( desc="Maximum number of affine optimization iterations", - argstr="--affineMaxIterations %d") + argstr="--affineMaxIterations %d", + ) affineSamplingRatio = traits.Float( - desc= - "Portion of the image to use in computing the metric during affine registration", - argstr="--affineSamplingRatio %f") + desc="Portion of the image to use in computing the metric during affine registration", + argstr="--affineSamplingRatio %f", + ) bsplineMaxIterations = traits.Int( desc="Maximum number of bspline optimization iterations", - argstr="--bsplineMaxIterations %d") + argstr="--bsplineMaxIterations %d", + ) bsplineSamplingRatio = traits.Float( - desc= - "Portion of the image to use in computing the metric during BSpline registration", - argstr="--bsplineSamplingRatio %f") + desc="Portion of the image to use in computing the metric during BSpline registration", + argstr="--bsplineSamplingRatio %f", + ) controlPointSpacing = traits.Int( desc="Number of pixels between control points", - argstr="--controlPointSpacing %d") + argstr="--controlPointSpacing %d", + ) class ExpertAutomatedRegistrationOutputSpec(TraitedSpec): resampledImage = File(desc="Registration results", exists=True) saveTransform = File( - desc="Save the transform that results from registration", exists=True) + desc="Save the transform that results from registration", exists=True + ) class ExpertAutomatedRegistration(SEMLikeCommandLine): @@ -650,6 +682,6 @@ class ExpertAutomatedRegistration(SEMLikeCommandLine): output_spec = ExpertAutomatedRegistrationOutputSpec _cmd = "ExpertAutomatedRegistration " _outputs_filenames = { - 'resampledImage': 'resampledImage.nii', - 'saveTransform': 'saveTransform.txt' + "resampledImage": "resampledImage.nii", + "saveTransform": "saveTransform.txt", } diff --git a/nipype/interfaces/slicer/legacy/segmentation.py b/nipype/interfaces/slicer/legacy/segmentation.py index 3500d50d50..118ffbcb77 100644 --- a/nipype/interfaces/slicer/legacy/segmentation.py +++ b/nipype/interfaces/slicer/legacy/segmentation.py @@ -3,39 +3,49 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class OtsuThresholdSegmentationInputSpec(CommandLineInputSpec): brightObjects = traits.Bool( - desc= - "Segmenting bright objects on a dark background or dark objects on a bright background.", - argstr="--brightObjects ") + desc="Segmenting bright objects on a dark background or dark objects on a bright background.", + argstr="--brightObjects ", + ) numberOfBins = traits.Int( - desc= - "This is an advanced parameter. The number of bins in the histogram used to model the probability mass function of the two intensity distributions. Small numbers of bins may result in a more conservative threshold. The default should suffice for most applications. Experimentation is the only way to see the effect of varying this parameter.", - argstr="--numberOfBins %d") + desc="This is an advanced parameter. The number of bins in the histogram used to model the probability mass function of the two intensity distributions. Small numbers of bins may result in a more conservative threshold. The default should suffice for most applications. Experimentation is the only way to see the effect of varying this parameter.", + argstr="--numberOfBins %d", + ) faceConnected = traits.Bool( - desc= - "This is an advanced parameter. Adjacent voxels are face connected. This affects the connected component algorithm. If this parameter is false, more regions are likely to be identified.", - argstr="--faceConnected ") + desc="This is an advanced parameter. Adjacent voxels are face connected. This affects the connected component algorithm. If this parameter is false, more regions are likely to be identified.", + argstr="--faceConnected ", + ) minimumObjectSize = traits.Int( - desc= - "Minimum size of object to retain. This parameter can be used to get rid of small regions in noisy images.", - argstr="--minimumObjectSize %d") + desc="Minimum size of object to retain. This parameter can be used to get rid of small regions in noisy images.", + argstr="--minimumObjectSize %d", + ) inputVolume = File( - position=-2, - desc="Input volume to be segmented", - exists=True, - argstr="%s") + position=-2, desc="Input volume to be segmented", exists=True, argstr="%s" + ) outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", - argstr="%s") + argstr="%s", + ) class OtsuThresholdSegmentationOutputSpec(TraitedSpec): @@ -62,4 +72,4 @@ class OtsuThresholdSegmentation(SEMLikeCommandLine): input_spec = OtsuThresholdSegmentationInputSpec output_spec = OtsuThresholdSegmentationOutputSpec _cmd = "OtsuThresholdSegmentation " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py index 2ca1242922..44857bf3a8 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py @@ -4,49 +4,33 @@ def test_AffineRegistration_inputs(): input_map = dict( - FixedImageFileName=dict( - argstr='%s', - extensions=None, - position=-2, - ), - MovingImageFileName=dict( - argstr='%s', - extensions=None, - position=-1, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedsmoothingfactor=dict(argstr='--fixedsmoothingfactor %d', ), - histogrambins=dict(argstr='--histogrambins %d', ), - initialtransform=dict( - argstr='--initialtransform %s', - extensions=None, - ), - iterations=dict(argstr='--iterations %d', ), - movingsmoothingfactor=dict(argstr='--movingsmoothingfactor %d', ), - outputtransform=dict( - argstr='--outputtransform %s', - hash_files=False, - ), + FixedImageFileName=dict(argstr="%s", extensions=None, position=-2,), + MovingImageFileName=dict(argstr="%s", extensions=None, position=-1,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fixedsmoothingfactor=dict(argstr="--fixedsmoothingfactor %d",), + histogrambins=dict(argstr="--histogrambins %d",), + initialtransform=dict(argstr="--initialtransform %s", extensions=None,), + iterations=dict(argstr="--iterations %d",), + movingsmoothingfactor=dict(argstr="--movingsmoothingfactor %d",), + outputtransform=dict(argstr="--outputtransform %s", hash_files=False,), resampledmovingfilename=dict( - argstr='--resampledmovingfilename %s', - hash_files=False, + argstr="--resampledmovingfilename %s", hash_files=False, ), - spatialsamples=dict(argstr='--spatialsamples %d', ), - translationscale=dict(argstr='--translationscale %f', ), + spatialsamples=dict(argstr="--spatialsamples %d",), + translationscale=dict(argstr="--translationscale %f",), ) inputs = AffineRegistration.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_AffineRegistration_outputs(): output_map = dict( - outputtransform=dict(extensions=None, ), - resampledmovingfilename=dict(extensions=None, ), + outputtransform=dict(extensions=None,), + resampledmovingfilename=dict(extensions=None,), ) outputs = AffineRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py index 889992c9ec..7c777ebca3 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py @@ -4,55 +4,36 @@ def test_BSplineDeformableRegistration_inputs(): input_map = dict( - FixedImageFileName=dict( - argstr='%s', - extensions=None, - position=-2, - ), - MovingImageFileName=dict( - argstr='%s', - extensions=None, - position=-1, - ), - args=dict(argstr='%s', ), - constrain=dict(argstr='--constrain ', ), - default=dict(argstr='--default %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - gridSize=dict(argstr='--gridSize %d', ), - histogrambins=dict(argstr='--histogrambins %d', ), - initialtransform=dict( - argstr='--initialtransform %s', - extensions=None, - ), - iterations=dict(argstr='--iterations %d', ), - maximumDeformation=dict(argstr='--maximumDeformation %f', ), - outputtransform=dict( - argstr='--outputtransform %s', - hash_files=False, - ), - outputwarp=dict( - argstr='--outputwarp %s', - hash_files=False, - ), + FixedImageFileName=dict(argstr="%s", extensions=None, position=-2,), + MovingImageFileName=dict(argstr="%s", extensions=None, position=-1,), + args=dict(argstr="%s",), + constrain=dict(argstr="--constrain ",), + default=dict(argstr="--default %d",), + environ=dict(nohash=True, usedefault=True,), + gridSize=dict(argstr="--gridSize %d",), + histogrambins=dict(argstr="--histogrambins %d",), + initialtransform=dict(argstr="--initialtransform %s", extensions=None,), + iterations=dict(argstr="--iterations %d",), + maximumDeformation=dict(argstr="--maximumDeformation %f",), + outputtransform=dict(argstr="--outputtransform %s", hash_files=False,), + outputwarp=dict(argstr="--outputwarp %s", hash_files=False,), resampledmovingfilename=dict( - argstr='--resampledmovingfilename %s', - hash_files=False, + argstr="--resampledmovingfilename %s", hash_files=False, ), - spatialsamples=dict(argstr='--spatialsamples %d', ), + spatialsamples=dict(argstr="--spatialsamples %d",), ) inputs = BSplineDeformableRegistration.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BSplineDeformableRegistration_outputs(): output_map = dict( - outputtransform=dict(extensions=None, ), - outputwarp=dict(extensions=None, ), - resampledmovingfilename=dict(extensions=None, ), + outputtransform=dict(extensions=None,), + outputwarp=dict(extensions=None,), + resampledmovingfilename=dict(extensions=None,), ) outputs = BSplineDeformableRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py index 54dd8157f1..c4dc0f8969 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py @@ -4,31 +4,21 @@ def test_BSplineToDeformationField_inputs(): input_map = dict( - args=dict(argstr='%s', ), - defImage=dict( - argstr='--defImage %s', - hash_files=False, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - refImage=dict( - argstr='--refImage %s', - extensions=None, - ), - tfm=dict( - argstr='--tfm %s', - extensions=None, - ), + args=dict(argstr="%s",), + defImage=dict(argstr="--defImage %s", hash_files=False,), + environ=dict(nohash=True, usedefault=True,), + refImage=dict(argstr="--refImage %s", extensions=None,), + tfm=dict(argstr="--tfm %s", extensions=None,), ) inputs = BSplineToDeformationField.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BSplineToDeformationField_outputs(): - output_map = dict(defImage=dict(extensions=None, ), ) + output_map = dict(defImage=dict(extensions=None,),) outputs = BSplineToDeformationField.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py index 8289fc924b..6536d699b0 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py @@ -4,69 +4,47 @@ def test_ExpertAutomatedRegistration_inputs(): input_map = dict( - affineMaxIterations=dict(argstr='--affineMaxIterations %d', ), - affineSamplingRatio=dict(argstr='--affineSamplingRatio %f', ), - args=dict(argstr='%s', ), - bsplineMaxIterations=dict(argstr='--bsplineMaxIterations %d', ), - bsplineSamplingRatio=dict(argstr='--bsplineSamplingRatio %f', ), - controlPointSpacing=dict(argstr='--controlPointSpacing %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - expectedOffset=dict(argstr='--expectedOffset %f', ), - expectedRotation=dict(argstr='--expectedRotation %f', ), - expectedScale=dict(argstr='--expectedScale %f', ), - expectedSkew=dict(argstr='--expectedSkew %f', ), - fixedImage=dict( - argstr='%s', - extensions=None, - position=-2, - ), - fixedImageMask=dict( - argstr='--fixedImageMask %s', - extensions=None, - ), - fixedLandmarks=dict(argstr='--fixedLandmarks %s...', ), - initialization=dict(argstr='--initialization %s', ), - interpolation=dict(argstr='--interpolation %s', ), - loadTransform=dict( - argstr='--loadTransform %s', - extensions=None, - ), - metric=dict(argstr='--metric %s', ), - minimizeMemory=dict(argstr='--minimizeMemory ', ), - movingImage=dict( - argstr='%s', - extensions=None, - position=-1, - ), - movingLandmarks=dict(argstr='--movingLandmarks %s...', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - randomNumberSeed=dict(argstr='--randomNumberSeed %d', ), - registration=dict(argstr='--registration %s', ), - resampledImage=dict( - argstr='--resampledImage %s', - hash_files=False, - ), - rigidMaxIterations=dict(argstr='--rigidMaxIterations %d', ), - rigidSamplingRatio=dict(argstr='--rigidSamplingRatio %f', ), - sampleFromOverlap=dict(argstr='--sampleFromOverlap ', ), - saveTransform=dict( - argstr='--saveTransform %s', - hash_files=False, - ), - verbosityLevel=dict(argstr='--verbosityLevel %s', ), + affineMaxIterations=dict(argstr="--affineMaxIterations %d",), + affineSamplingRatio=dict(argstr="--affineSamplingRatio %f",), + args=dict(argstr="%s",), + bsplineMaxIterations=dict(argstr="--bsplineMaxIterations %d",), + bsplineSamplingRatio=dict(argstr="--bsplineSamplingRatio %f",), + controlPointSpacing=dict(argstr="--controlPointSpacing %d",), + environ=dict(nohash=True, usedefault=True,), + expectedOffset=dict(argstr="--expectedOffset %f",), + expectedRotation=dict(argstr="--expectedRotation %f",), + expectedScale=dict(argstr="--expectedScale %f",), + expectedSkew=dict(argstr="--expectedSkew %f",), + fixedImage=dict(argstr="%s", extensions=None, position=-2,), + fixedImageMask=dict(argstr="--fixedImageMask %s", extensions=None,), + fixedLandmarks=dict(argstr="--fixedLandmarks %s...",), + initialization=dict(argstr="--initialization %s",), + interpolation=dict(argstr="--interpolation %s",), + loadTransform=dict(argstr="--loadTransform %s", extensions=None,), + metric=dict(argstr="--metric %s",), + minimizeMemory=dict(argstr="--minimizeMemory ",), + movingImage=dict(argstr="%s", extensions=None, position=-1,), + movingLandmarks=dict(argstr="--movingLandmarks %s...",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + randomNumberSeed=dict(argstr="--randomNumberSeed %d",), + registration=dict(argstr="--registration %s",), + resampledImage=dict(argstr="--resampledImage %s", hash_files=False,), + rigidMaxIterations=dict(argstr="--rigidMaxIterations %d",), + rigidSamplingRatio=dict(argstr="--rigidSamplingRatio %f",), + sampleFromOverlap=dict(argstr="--sampleFromOverlap ",), + saveTransform=dict(argstr="--saveTransform %s", hash_files=False,), + verbosityLevel=dict(argstr="--verbosityLevel %s",), ) inputs = ExpertAutomatedRegistration.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ExpertAutomatedRegistration_outputs(): output_map = dict( - resampledImage=dict(extensions=None, ), - saveTransform=dict(extensions=None, ), + resampledImage=dict(extensions=None,), saveTransform=dict(extensions=None,), ) outputs = ExpertAutomatedRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py index 454f645088..b19d3be344 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py @@ -4,56 +4,34 @@ def test_LinearRegistration_inputs(): input_map = dict( - FixedImageFileName=dict( - argstr='%s', - extensions=None, - position=-2, - ), - MovingImageFileName=dict( - argstr='%s', - extensions=None, - position=-1, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedsmoothingfactor=dict(argstr='--fixedsmoothingfactor %d', ), - histogrambins=dict(argstr='--histogrambins %d', ), - initialtransform=dict( - argstr='--initialtransform %s', - extensions=None, - ), - iterations=dict( - argstr='--iterations %s', - sep=',', - ), - learningrate=dict( - argstr='--learningrate %s', - sep=',', - ), - movingsmoothingfactor=dict(argstr='--movingsmoothingfactor %d', ), - outputtransform=dict( - argstr='--outputtransform %s', - hash_files=False, - ), + FixedImageFileName=dict(argstr="%s", extensions=None, position=-2,), + MovingImageFileName=dict(argstr="%s", extensions=None, position=-1,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fixedsmoothingfactor=dict(argstr="--fixedsmoothingfactor %d",), + histogrambins=dict(argstr="--histogrambins %d",), + initialtransform=dict(argstr="--initialtransform %s", extensions=None,), + iterations=dict(argstr="--iterations %s", sep=",",), + learningrate=dict(argstr="--learningrate %s", sep=",",), + movingsmoothingfactor=dict(argstr="--movingsmoothingfactor %d",), + outputtransform=dict(argstr="--outputtransform %s", hash_files=False,), resampledmovingfilename=dict( - argstr='--resampledmovingfilename %s', - hash_files=False, + argstr="--resampledmovingfilename %s", hash_files=False, ), - spatialsamples=dict(argstr='--spatialsamples %d', ), - translationscale=dict(argstr='--translationscale %f', ), + spatialsamples=dict(argstr="--spatialsamples %d",), + translationscale=dict(argstr="--translationscale %f",), ) inputs = LinearRegistration.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_LinearRegistration_outputs(): output_map = dict( - outputtransform=dict(extensions=None, ), - resampledmovingfilename=dict(extensions=None, ), + outputtransform=dict(extensions=None,), + resampledmovingfilename=dict(extensions=None,), ) outputs = LinearRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py index 605db755f6..ad7f89e35d 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py @@ -4,49 +4,30 @@ def test_MultiResolutionAffineRegistration_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedImage=dict( - argstr='%s', - extensions=None, - position=-2, - ), - fixedImageMask=dict( - argstr='--fixedImageMask %s', - extensions=None, - ), - fixedImageROI=dict(argstr='--fixedImageROI %s', ), - metricTolerance=dict(argstr='--metricTolerance %f', ), - movingImage=dict( - argstr='%s', - extensions=None, - position=-1, - ), - numIterations=dict(argstr='--numIterations %d', ), - numLineIterations=dict(argstr='--numLineIterations %d', ), - resampledImage=dict( - argstr='--resampledImage %s', - hash_files=False, - ), - saveTransform=dict( - argstr='--saveTransform %s', - hash_files=False, - ), - stepSize=dict(argstr='--stepSize %f', ), - stepTolerance=dict(argstr='--stepTolerance %f', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fixedImage=dict(argstr="%s", extensions=None, position=-2,), + fixedImageMask=dict(argstr="--fixedImageMask %s", extensions=None,), + fixedImageROI=dict(argstr="--fixedImageROI %s",), + metricTolerance=dict(argstr="--metricTolerance %f",), + movingImage=dict(argstr="%s", extensions=None, position=-1,), + numIterations=dict(argstr="--numIterations %d",), + numLineIterations=dict(argstr="--numLineIterations %d",), + resampledImage=dict(argstr="--resampledImage %s", hash_files=False,), + saveTransform=dict(argstr="--saveTransform %s", hash_files=False,), + stepSize=dict(argstr="--stepSize %f",), + stepTolerance=dict(argstr="--stepTolerance %f",), ) inputs = MultiResolutionAffineRegistration.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MultiResolutionAffineRegistration_outputs(): output_map = dict( - resampledImage=dict(extensions=None, ), - saveTransform=dict(extensions=None, ), + resampledImage=dict(extensions=None,), saveTransform=dict(extensions=None,), ) outputs = MultiResolutionAffineRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py index 50782aec03..853fbb5caa 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py @@ -4,35 +4,23 @@ def test_OtsuThresholdImageFilter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - insideValue=dict(argstr='--insideValue %d', ), - numberOfBins=dict(argstr='--numberOfBins %d', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - outsideValue=dict(argstr='--outsideValue %d', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + insideValue=dict(argstr="--insideValue %d",), + numberOfBins=dict(argstr="--numberOfBins %d",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + outsideValue=dict(argstr="--outsideValue %d",), ) inputs = OtsuThresholdImageFilter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_OtsuThresholdImageFilter_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = OtsuThresholdImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py index 5340c37639..561bb246f2 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py @@ -4,36 +4,24 @@ def test_OtsuThresholdSegmentation_inputs(): input_map = dict( - args=dict(argstr='%s', ), - brightObjects=dict(argstr='--brightObjects ', ), - environ=dict( - nohash=True, - usedefault=True, - ), - faceConnected=dict(argstr='--faceConnected ', ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - minimumObjectSize=dict(argstr='--minimumObjectSize %d', ), - numberOfBins=dict(argstr='--numberOfBins %d', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), + args=dict(argstr="%s",), + brightObjects=dict(argstr="--brightObjects ",), + environ=dict(nohash=True, usedefault=True,), + faceConnected=dict(argstr="--faceConnected ",), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + minimumObjectSize=dict(argstr="--minimumObjectSize %d",), + numberOfBins=dict(argstr="--numberOfBins %d",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), ) inputs = OtsuThresholdSegmentation.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_OtsuThresholdSegmentation_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = OtsuThresholdSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py index 49c2ac2ffb..a95967feca 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py @@ -4,37 +4,22 @@ def test_ResampleScalarVolume_inputs(): input_map = dict( - InputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - OutputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - interpolation=dict(argstr='--interpolation %s', ), - spacing=dict( - argstr='--spacing %s', - sep=',', - ), + InputVolume=dict(argstr="%s", extensions=None, position=-2,), + OutputVolume=dict(argstr="%s", hash_files=False, position=-1,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + interpolation=dict(argstr="--interpolation %s",), + spacing=dict(argstr="--spacing %s", sep=",",), ) inputs = ResampleScalarVolume.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ResampleScalarVolume_outputs(): - output_map = dict(OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputVolume=dict(extensions=None, position=-1,),) outputs = ResampleScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py index 92a8af7dfb..ea4b5eda53 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py @@ -4,57 +4,35 @@ def test_RigidRegistration_inputs(): input_map = dict( - FixedImageFileName=dict( - argstr='%s', - extensions=None, - position=-2, - ), - MovingImageFileName=dict( - argstr='%s', - extensions=None, - position=-1, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedsmoothingfactor=dict(argstr='--fixedsmoothingfactor %d', ), - histogrambins=dict(argstr='--histogrambins %d', ), - initialtransform=dict( - argstr='--initialtransform %s', - extensions=None, - ), - iterations=dict( - argstr='--iterations %s', - sep=',', - ), - learningrate=dict( - argstr='--learningrate %s', - sep=',', - ), - movingsmoothingfactor=dict(argstr='--movingsmoothingfactor %d', ), - outputtransform=dict( - argstr='--outputtransform %s', - hash_files=False, - ), + FixedImageFileName=dict(argstr="%s", extensions=None, position=-2,), + MovingImageFileName=dict(argstr="%s", extensions=None, position=-1,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fixedsmoothingfactor=dict(argstr="--fixedsmoothingfactor %d",), + histogrambins=dict(argstr="--histogrambins %d",), + initialtransform=dict(argstr="--initialtransform %s", extensions=None,), + iterations=dict(argstr="--iterations %s", sep=",",), + learningrate=dict(argstr="--learningrate %s", sep=",",), + movingsmoothingfactor=dict(argstr="--movingsmoothingfactor %d",), + outputtransform=dict(argstr="--outputtransform %s", hash_files=False,), resampledmovingfilename=dict( - argstr='--resampledmovingfilename %s', - hash_files=False, + argstr="--resampledmovingfilename %s", hash_files=False, ), - spatialsamples=dict(argstr='--spatialsamples %d', ), - testingmode=dict(argstr='--testingmode ', ), - translationscale=dict(argstr='--translationscale %f', ), + spatialsamples=dict(argstr="--spatialsamples %d",), + testingmode=dict(argstr="--testingmode ",), + translationscale=dict(argstr="--translationscale %f",), ) inputs = RigidRegistration.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RigidRegistration_outputs(): output_map = dict( - outputtransform=dict(extensions=None, ), - resampledmovingfilename=dict(extensions=None, ), + outputtransform=dict(extensions=None,), + resampledmovingfilename=dict(extensions=None,), ) outputs = RigidRegistration.output_spec() diff --git a/nipype/interfaces/slicer/quantification/changequantification.py b/nipype/interfaces/slicer/quantification/changequantification.py index 5abf1b1287..c0e36b9bf9 100644 --- a/nipype/interfaces/slicer/quantification/changequantification.py +++ b/nipype/interfaces/slicer/quantification/changequantification.py @@ -3,55 +3,68 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class IntensityDifferenceMetricInputSpec(CommandLineInputSpec): sensitivityThreshold = traits.Float( - desc= - "This parameter should be between 0 and 1, and defines how sensitive the metric should be to the intensity changes.", - argstr="--sensitivityThreshold %f") + desc="This parameter should be between 0 and 1, and defines how sensitive the metric should be to the intensity changes.", + argstr="--sensitivityThreshold %f", + ) changingBandSize = traits.Int( - desc= - "How far (in mm) from the boundary of the segmentation should the intensity changes be considered.", - argstr="--changingBandSize %d") + desc="How far (in mm) from the boundary of the segmentation should the intensity changes be considered.", + argstr="--changingBandSize %d", + ) baselineVolume = File( - position=-4, - desc="Baseline volume to be compared to", - exists=True, - argstr="%s") + position=-4, desc="Baseline volume to be compared to", exists=True, argstr="%s" + ) baselineSegmentationVolume = File( position=-3, - desc= - "Label volume that contains segmentation of the structure of interest in the baseline volume.", + desc="Label volume that contains segmentation of the structure of interest in the baseline volume.", exists=True, - argstr="%s") + argstr="%s", + ) followupVolume = File( position=-2, desc="Followup volume to be compare to the baseline", exists=True, - argstr="%s") + argstr="%s", + ) outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output volume to keep the results of change quantification.", - argstr="%s") + argstr="%s", + ) reportFileName = traits.Either( traits.Bool, File(), hash_files=False, desc="Report file name", - argstr="--reportFileName %s") + argstr="--reportFileName %s", + ) class IntensityDifferenceMetricOutputSpec(TraitedSpec): outputVolume = File( position=-1, desc="Output volume to keep the results of change quantification.", - exists=True) + exists=True, + ) reportFileName = File(desc="Report file name", exists=True) @@ -81,6 +94,6 @@ class IntensityDifferenceMetric(SEMLikeCommandLine): output_spec = IntensityDifferenceMetricOutputSpec _cmd = "IntensityDifferenceMetric " _outputs_filenames = { - 'outputVolume': 'outputVolume.nii', - 'reportFileName': 'reportFileName' + "outputVolume": "outputVolume.nii", + "reportFileName": "reportFileName", } diff --git a/nipype/interfaces/slicer/quantification/petstandarduptakevaluecomputation.py b/nipype/interfaces/slicer/quantification/petstandarduptakevaluecomputation.py index 0edfca3fbb..9cf01c5359 100644 --- a/nipype/interfaces/slicer/quantification/petstandarduptakevaluecomputation.py +++ b/nipype/interfaces/slicer/quantification/petstandarduptakevaluecomputation.py @@ -3,53 +3,67 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class PETStandardUptakeValueComputationInputSpec(CommandLineInputSpec): petDICOMPath = Directory( - desc= - "Input path to a directory containing a PET volume containing DICOM header information for SUV computation", + desc="Input path to a directory containing a PET volume containing DICOM header information for SUV computation", exists=True, - argstr="--petDICOMPath %s") + argstr="--petDICOMPath %s", + ) petVolume = File( - desc= - "Input PET volume for SUVbw computation (must be the same volume as pointed to by the DICOM path!).", + desc="Input PET volume for SUVbw computation (must be the same volume as pointed to by the DICOM path!).", exists=True, - argstr="--petVolume %s") + argstr="--petVolume %s", + ) labelMap = File( desc="Input label volume containing the volumes of interest", exists=True, - argstr="--labelMap %s") + argstr="--labelMap %s", + ) color = File( desc="Color table to to map labels to colors and names", exists=True, - argstr="--color %s") + argstr="--color %s", + ) csvFile = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "A file holding the output SUV values in comma separated lines, one per label. Optional.", - argstr="--csvFile %s") + desc="A file holding the output SUV values in comma separated lines, one per label. Optional.", + argstr="--csvFile %s", + ) OutputLabel = traits.Str( desc="List of labels for which SUV values were computed", - argstr="--OutputLabel %s") + argstr="--OutputLabel %s", + ) OutputLabelValue = traits.Str( desc="List of label values for which SUV values were computed", - argstr="--OutputLabelValue %s") + argstr="--OutputLabelValue %s", + ) SUVMax = traits.Str(desc="SUV max for each label", argstr="--SUVMax %s") SUVMean = traits.Str(desc="SUV mean for each label", argstr="--SUVMean %s") - SUVMin = traits.Str( - desc="SUV minimum for each label", argstr="--SUVMin %s") + SUVMin = traits.Str(desc="SUV minimum for each label", argstr="--SUVMin %s") class PETStandardUptakeValueComputationOutputSpec(TraitedSpec): csvFile = File( - desc= - "A file holding the output SUV values in comma separated lines, one per label. Optional.", - exists=True) + desc="A file holding the output SUV values in comma separated lines, one per label. Optional.", + exists=True, + ) class PETStandardUptakeValueComputation(SEMLikeCommandLine): @@ -72,4 +86,4 @@ class PETStandardUptakeValueComputation(SEMLikeCommandLine): input_spec = PETStandardUptakeValueComputationInputSpec output_spec = PETStandardUptakeValueComputationOutputSpec _cmd = "PETStandardUptakeValueComputation " - _outputs_filenames = {'csvFile': 'csvFile.csv'} + _outputs_filenames = {"csvFile": "csvFile.csv"} diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py index 6462d2ff48..512991571e 100644 --- a/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py @@ -4,50 +4,27 @@ def test_IntensityDifferenceMetric_inputs(): input_map = dict( - args=dict(argstr='%s', ), - baselineSegmentationVolume=dict( - argstr='%s', - extensions=None, - position=-3, - ), - baselineVolume=dict( - argstr='%s', - extensions=None, - position=-4, - ), - changingBandSize=dict(argstr='--changingBandSize %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - followupVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - reportFileName=dict( - argstr='--reportFileName %s', - hash_files=False, - ), - sensitivityThreshold=dict(argstr='--sensitivityThreshold %f', ), + args=dict(argstr="%s",), + baselineSegmentationVolume=dict(argstr="%s", extensions=None, position=-3,), + baselineVolume=dict(argstr="%s", extensions=None, position=-4,), + changingBandSize=dict(argstr="--changingBandSize %d",), + environ=dict(nohash=True, usedefault=True,), + followupVolume=dict(argstr="%s", extensions=None, position=-2,), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + reportFileName=dict(argstr="--reportFileName %s", hash_files=False,), + sensitivityThreshold=dict(argstr="--sensitivityThreshold %f",), ) inputs = IntensityDifferenceMetric.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_IntensityDifferenceMetric_outputs(): output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - reportFileName=dict(extensions=None, ), + outputVolume=dict(extensions=None, position=-1,), + reportFileName=dict(extensions=None,), ) outputs = IntensityDifferenceMetric.output_spec() diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py index c30fc0c0cf..61ee94ec6e 100644 --- a/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py @@ -4,41 +4,28 @@ def test_PETStandardUptakeValueComputation_inputs(): input_map = dict( - OutputLabel=dict(argstr='--OutputLabel %s', ), - OutputLabelValue=dict(argstr='--OutputLabelValue %s', ), - SUVMax=dict(argstr='--SUVMax %s', ), - SUVMean=dict(argstr='--SUVMean %s', ), - SUVMin=dict(argstr='--SUVMin %s', ), - args=dict(argstr='%s', ), - color=dict( - argstr='--color %s', - extensions=None, - ), - csvFile=dict( - argstr='--csvFile %s', - hash_files=False, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - labelMap=dict( - argstr='--labelMap %s', - extensions=None, - ), - petDICOMPath=dict(argstr='--petDICOMPath %s', ), - petVolume=dict( - argstr='--petVolume %s', - extensions=None, - ), + OutputLabel=dict(argstr="--OutputLabel %s",), + OutputLabelValue=dict(argstr="--OutputLabelValue %s",), + SUVMax=dict(argstr="--SUVMax %s",), + SUVMean=dict(argstr="--SUVMean %s",), + SUVMin=dict(argstr="--SUVMin %s",), + args=dict(argstr="%s",), + color=dict(argstr="--color %s", extensions=None,), + csvFile=dict(argstr="--csvFile %s", hash_files=False,), + environ=dict(nohash=True, usedefault=True,), + labelMap=dict(argstr="--labelMap %s", extensions=None,), + petDICOMPath=dict(argstr="--petDICOMPath %s",), + petVolume=dict(argstr="--petVolume %s", extensions=None,), ) inputs = PETStandardUptakeValueComputation.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_PETStandardUptakeValueComputation_outputs(): - output_map = dict(csvFile=dict(extensions=None, ), ) + output_map = dict(csvFile=dict(extensions=None,),) outputs = PETStandardUptakeValueComputation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/__init__.py b/nipype/interfaces/slicer/registration/__init__.py index 2d03aabe03..faa3c92b2f 100644 --- a/nipype/interfaces/slicer/registration/__init__.py +++ b/nipype/interfaces/slicer/registration/__init__.py @@ -1,5 +1,9 @@ # -*- coding: utf-8 -*- -from .specialized import (ACPCTransform, FiducialRegistration, - VBRAINSDemonWarp, BRAINSDemonWarp) +from .specialized import ( + ACPCTransform, + FiducialRegistration, + VBRAINSDemonWarp, + BRAINSDemonWarp, +) from .brainsresample import BRAINSResample from .brainsfit import BRAINSFit diff --git a/nipype/interfaces/slicer/registration/brainsfit.py b/nipype/interfaces/slicer/registration/brainsfit.py index adbd733976..2ca7f09d3c 100644 --- a/nipype/interfaces/slicer/registration/brainsfit.py +++ b/nipype/interfaces/slicer/registration/brainsfit.py @@ -3,121 +3,134 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class BRAINSFitInputSpec(CommandLineInputSpec): fixedVolume = File( - desc= - "The fixed image for registration by mutual information optimization.", + desc="The fixed image for registration by mutual information optimization.", exists=True, - argstr="--fixedVolume %s") + argstr="--fixedVolume %s", + ) movingVolume = File( - desc= - "The moving image for registration by mutual information optimization.", + desc="The moving image for registration by mutual information optimization.", exists=True, - argstr="--movingVolume %s") + argstr="--movingVolume %s", + ) bsplineTransform = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS BSpline", - argstr="--bsplineTransform %s") + desc="(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS BSpline", + argstr="--bsplineTransform %s", + ) linearTransform = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS ---NOT--- BSpline", - argstr="--linearTransform %s") + desc="(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS ---NOT--- BSpline", + argstr="--linearTransform %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "(optional) Output image for registration. NOTE: You must select either the outputTransform or the outputVolume option.", - argstr="--outputVolume %s") + desc="(optional) Output image for registration. NOTE: You must select either the outputTransform or the outputVolume option.", + argstr="--outputVolume %s", + ) initialTransform = File( - desc= - "Filename of transform used to initialize the registration. This CAN NOT be used with either CenterOfHeadLAlign, MomentsAlign, GeometryAlign, or initialTransform file.", + desc="Filename of transform used to initialize the registration. This CAN NOT be used with either CenterOfHeadLAlign, MomentsAlign, GeometryAlign, or initialTransform file.", exists=True, - argstr="--initialTransform %s") + argstr="--initialTransform %s", + ) initializeTransformMode = traits.Enum( "Off", "useMomentsAlign", "useCenterOfHeadAlign", "useGeometryAlign", "useCenterOfROIAlign", - desc= - "Determine how to initialize the transform center. GeometryAlign on assumes that the center of the voxel lattice of the images represent similar structures. MomentsAlign assumes that the center of mass of the images represent similar structures. useCenterOfHeadAlign attempts to use the top of head and shape of neck to drive a center of mass estimate. Off assumes that the physical space of the images are close, and that centering in terms of the image Origins is a good starting point. This flag is mutually exclusive with the initialTransform flag.", - argstr="--initializeTransformMode %s") + desc="Determine how to initialize the transform center. GeometryAlign on assumes that the center of the voxel lattice of the images represent similar structures. MomentsAlign assumes that the center of mass of the images represent similar structures. useCenterOfHeadAlign attempts to use the top of head and shape of neck to drive a center of mass estimate. Off assumes that the physical space of the images are close, and that centering in terms of the image Origins is a good starting point. This flag is mutually exclusive with the initialTransform flag.", + argstr="--initializeTransformMode %s", + ) useRigid = traits.Bool( - desc= - "Perform a rigid registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", - argstr="--useRigid ") + desc="Perform a rigid registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useRigid ", + ) useScaleVersor3D = traits.Bool( - desc= - "Perform a ScaleVersor3D registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", - argstr="--useScaleVersor3D ") + desc="Perform a ScaleVersor3D registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useScaleVersor3D ", + ) useScaleSkewVersor3D = traits.Bool( - desc= - "Perform a ScaleSkewVersor3D registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", - argstr="--useScaleSkewVersor3D ") + desc="Perform a ScaleSkewVersor3D registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useScaleSkewVersor3D ", + ) useAffine = traits.Bool( - desc= - "Perform an Affine registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", - argstr="--useAffine ") + desc="Perform an Affine registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useAffine ", + ) useBSpline = traits.Bool( - desc= - "Perform a BSpline registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", - argstr="--useBSpline ") + desc="Perform a BSpline registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useBSpline ", + ) numberOfSamples = traits.Int( - desc= - "The number of voxels sampled for mutual information computation. Increase this for a slower, more careful fit. You can also limit the sampling focus with ROI masks and ROIAUTO mask generation.", - argstr="--numberOfSamples %d") + desc="The number of voxels sampled for mutual information computation. Increase this for a slower, more careful fit. You can also limit the sampling focus with ROI masks and ROIAUTO mask generation.", + argstr="--numberOfSamples %d", + ) splineGridSize = InputMultiPath( traits.Int, - desc= - "The number of subdivisions of the BSpline Grid to be centered on the image space. Each dimension must have at least 3 subdivisions for the BSpline to be correctly computed. ", + desc="The number of subdivisions of the BSpline Grid to be centered on the image space. Each dimension must have at least 3 subdivisions for the BSpline to be correctly computed. ", sep=",", - argstr="--splineGridSize %s") + argstr="--splineGridSize %s", + ) numberOfIterations = InputMultiPath( traits.Int, - desc= - "The maximum number of iterations to try before failing to converge. Use an explicit limit like 500 or 1000 to manage risk of divergence", + desc="The maximum number of iterations to try before failing to converge. Use an explicit limit like 500 or 1000 to manage risk of divergence", sep=",", - argstr="--numberOfIterations %s") + argstr="--numberOfIterations %s", + ) maskProcessingMode = traits.Enum( "NOMASK", "ROIAUTO", "ROI", - desc= - "What mode to use for using the masks. If ROIAUTO is choosen, then the mask is implicitly defined using a otsu forground and hole filling algorithm. The Region Of Interest mode (choose ROI) uses the masks to define what parts of the image should be used for computing the transform.", - argstr="--maskProcessingMode %s") + desc="What mode to use for using the masks. If ROIAUTO is choosen, then the mask is implicitly defined using a otsu forground and hole filling algorithm. The Region Of Interest mode (choose ROI) uses the masks to define what parts of the image should be used for computing the transform.", + argstr="--maskProcessingMode %s", + ) fixedBinaryVolume = File( desc="Fixed Image binary mask volume, ONLY FOR MANUAL ROI mode.", exists=True, - argstr="--fixedBinaryVolume %s") + argstr="--fixedBinaryVolume %s", + ) movingBinaryVolume = File( desc="Moving Image binary mask volume, ONLY FOR MANUAL ROI mode.", exists=True, - argstr="--movingBinaryVolume %s") + argstr="--movingBinaryVolume %s", + ) outputFixedVolumeROI = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "The ROI automatically found in fixed image, ONLY FOR ROIAUTO mode.", - argstr="--outputFixedVolumeROI %s") + desc="The ROI automatically found in fixed image, ONLY FOR ROIAUTO mode.", + argstr="--outputFixedVolumeROI %s", + ) outputMovingVolumeROI = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "The ROI automatically found in moving image, ONLY FOR ROIAUTO mode.", - argstr="--outputMovingVolumeROI %s") + desc="The ROI automatically found in moving image, ONLY FOR ROIAUTO mode.", + argstr="--outputMovingVolumeROI %s", + ) outputVolumePixelType = traits.Enum( "float", "short", @@ -125,20 +138,21 @@ class BRAINSFitInputSpec(CommandLineInputSpec): "int", "uint", "uchar", - desc= - "The output image Pixel Type is the scalar datatype for representation of the Output Volume.", - argstr="--outputVolumePixelType %s") + desc="The output image Pixel Type is the scalar datatype for representation of the Output Volume.", + argstr="--outputVolumePixelType %s", + ) backgroundFillValue = traits.Float( desc="Background fill value for output image.", - argstr="--backgroundFillValue %f") + argstr="--backgroundFillValue %f", + ) maskInferiorCutOffFromCenter = traits.Float( - desc= - "For use with --useCenterOfHeadAlign (and --maskProcessingMode ROIAUTO): the cut-off below the image centers, in millimeters, ", - argstr="--maskInferiorCutOffFromCenter %f") + desc="For use with --useCenterOfHeadAlign (and --maskProcessingMode ROIAUTO): the cut-off below the image centers, in millimeters, ", + argstr="--maskInferiorCutOffFromCenter %f", + ) scaleOutputValues = traits.Bool( - desc= - "If true, and the voxel values do not fit within the minimum and maximum values of the desired outputVolumePixelType, then linearly scale the min/max output image voxel values to fit within the min/max range of the outputVolumePixelType.", - argstr="--scaleOutputValues ") + desc="If true, and the voxel values do not fit within the minimum and maximum values of the desired outputVolumePixelType, then linearly scale the min/max output image voxel values to fit within the min/max range of the outputVolumePixelType.", + argstr="--scaleOutputValues ", + ) interpolationMode = traits.Enum( "NearestNeighbor", "Linear", @@ -150,201 +164,202 @@ class BRAINSFitInputSpec(CommandLineInputSpec): "Welch", "Lanczos", "Blackman", - desc= - "Type of interpolation to be used when applying transform to moving volume. Options are Linear, NearestNeighbor, BSpline, WindowedSinc, or ResampleInPlace. The ResampleInPlace option will create an image with the same discrete voxel values and will adjust the origin and direction of the physical space interpretation.", - argstr="--interpolationMode %s") + desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, NearestNeighbor, BSpline, WindowedSinc, or ResampleInPlace. The ResampleInPlace option will create an image with the same discrete voxel values and will adjust the origin and direction of the physical space interpretation.", + argstr="--interpolationMode %s", + ) minimumStepLength = InputMultiPath( traits.Float, - desc= - "Each step in the optimization takes steps at least this big. When none are possible, registration is complete.", + desc="Each step in the optimization takes steps at least this big. When none are possible, registration is complete.", sep=",", - argstr="--minimumStepLength %s") + argstr="--minimumStepLength %s", + ) translationScale = traits.Float( - desc= - "How much to scale up changes in position compared to unit rotational changes in radians -- decrease this to put more rotation in the search pattern.", - argstr="--translationScale %f") + desc="How much to scale up changes in position compared to unit rotational changes in radians -- decrease this to put more rotation in the search pattern.", + argstr="--translationScale %f", + ) reproportionScale = traits.Float( - desc= - "ScaleVersor3D 'Scale' compensation factor. Increase this to put more rescaling in a ScaleVersor3D or ScaleSkewVersor3D search pattern. 1.0 works well with a translationScale of 1000.0", - argstr="--reproportionScale %f") + desc="ScaleVersor3D 'Scale' compensation factor. Increase this to put more rescaling in a ScaleVersor3D or ScaleSkewVersor3D search pattern. 1.0 works well with a translationScale of 1000.0", + argstr="--reproportionScale %f", + ) skewScale = traits.Float( - desc= - "ScaleSkewVersor3D Skew compensation factor. Increase this to put more skew in a ScaleSkewVersor3D search pattern. 1.0 works well with a translationScale of 1000.0", - argstr="--skewScale %f") + desc="ScaleSkewVersor3D Skew compensation factor. Increase this to put more skew in a ScaleSkewVersor3D search pattern. 1.0 works well with a translationScale of 1000.0", + argstr="--skewScale %f", + ) maxBSplineDisplacement = traits.Float( - desc= - " Sets the maximum allowed displacements in image physical coordinates for BSpline control grid along each axis. A value of 0.0 indicates that the problem should be unbounded. NOTE: This only constrains the BSpline portion, and does not limit the displacement from the associated bulk transform. This can lead to a substantial reduction in computation time in the BSpline optimizer., ", - argstr="--maxBSplineDisplacement %f") + desc=" Sets the maximum allowed displacements in image physical coordinates for BSpline control grid along each axis. A value of 0.0 indicates that the problem should be unbounded. NOTE: This only constrains the BSpline portion, and does not limit the displacement from the associated bulk transform. This can lead to a substantial reduction in computation time in the BSpline optimizer., ", + argstr="--maxBSplineDisplacement %f", + ) histogramMatch = traits.Bool( - desc= - "Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile. Do NOT use if registering images from different modailties.", - argstr="--histogramMatch ") + desc="Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile. Do NOT use if registering images from different modailties.", + argstr="--histogramMatch ", + ) numberOfHistogramBins = traits.Int( - desc="The number of histogram levels", - argstr="--numberOfHistogramBins %d") + desc="The number of histogram levels", argstr="--numberOfHistogramBins %d" + ) numberOfMatchPoints = traits.Int( - desc="the number of match points", argstr="--numberOfMatchPoints %d") + desc="the number of match points", argstr="--numberOfMatchPoints %d" + ) strippedOutputTransform = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "File name for the rigid component of the estimated affine transform. Can be used to rigidly register the moving image to the fixed image. NOTE: This value is overwritten if either bsplineTransform or linearTransform is set.", - argstr="--strippedOutputTransform %s") + desc="File name for the rigid component of the estimated affine transform. Can be used to rigidly register the moving image to the fixed image. NOTE: This value is overwritten if either bsplineTransform or linearTransform is set.", + argstr="--strippedOutputTransform %s", + ) transformType = InputMultiPath( traits.Str, - desc= - "Specifies a list of registration types to be used. The valid types are, Rigid, ScaleVersor3D, ScaleSkewVersor3D, Affine, and BSpline. Specifiying more than one in a comma separated list will initialize the next stage with the previous results. If registrationClass flag is used, it overrides this parameter setting.", + desc="Specifies a list of registration types to be used. The valid types are, Rigid, ScaleVersor3D, ScaleSkewVersor3D, Affine, and BSpline. Specifiying more than one in a comma separated list will initialize the next stage with the previous results. If registrationClass flag is used, it overrides this parameter setting.", sep=",", - argstr="--transformType %s") + argstr="--transformType %s", + ) outputTransform = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "(optional) Filename to which save the (optional) estimated transform. NOTE: You must select either the outputTransform or the outputVolume option.", - argstr="--outputTransform %s") + desc="(optional) Filename to which save the (optional) estimated transform. NOTE: You must select either the outputTransform or the outputVolume option.", + argstr="--outputTransform %s", + ) fixedVolumeTimeIndex = traits.Int( - desc= - "The index in the time series for the 3D fixed image to fit, if 4-dimensional.", - argstr="--fixedVolumeTimeIndex %d") + desc="The index in the time series for the 3D fixed image to fit, if 4-dimensional.", + argstr="--fixedVolumeTimeIndex %d", + ) movingVolumeTimeIndex = traits.Int( - desc= - "The index in the time series for the 3D moving image to fit, if 4-dimensional.", - argstr="--movingVolumeTimeIndex %d") + desc="The index in the time series for the 3D moving image to fit, if 4-dimensional.", + argstr="--movingVolumeTimeIndex %d", + ) medianFilterSize = InputMultiPath( traits.Int, - desc= - "The radius for the optional MedianImageFilter preprocessing in all 3 directions.", + desc="The radius for the optional MedianImageFilter preprocessing in all 3 directions.", sep=",", - argstr="--medianFilterSize %s") + argstr="--medianFilterSize %s", + ) removeIntensityOutliers = traits.Float( - desc= - "The half percentage to decide outliers of image intensities. The default value is zero, which means no outlier removal. If the value of 0.005 is given, the moduel will throw away 0.005 % of both tails, so 0.01% of intensities in total would be ignored in its statistic calculation. ", - argstr="--removeIntensityOutliers %f") + desc="The half percentage to decide outliers of image intensities. The default value is zero, which means no outlier removal. If the value of 0.005 is given, the moduel will throw away 0.005 % of both tails, so 0.01% of intensities in total would be ignored in its statistic calculation. ", + argstr="--removeIntensityOutliers %f", + ) useCachingOfBSplineWeightsMode = traits.Enum( "ON", "OFF", - desc= - "This is a 5x speed advantage at the expense of requiring much more memory. Only relevant when transformType is BSpline.", - argstr="--useCachingOfBSplineWeightsMode %s") + desc="This is a 5x speed advantage at the expense of requiring much more memory. Only relevant when transformType is BSpline.", + argstr="--useCachingOfBSplineWeightsMode %s", + ) useExplicitPDFDerivativesMode = traits.Enum( "AUTO", "ON", "OFF", - desc= - "Using mode AUTO means OFF for BSplineDeformableTransforms and ON for the linear transforms. The ON alternative uses more memory to sometimes do a better job.", - argstr="--useExplicitPDFDerivativesMode %s") + desc="Using mode AUTO means OFF for BSplineDeformableTransforms and ON for the linear transforms. The ON alternative uses more memory to sometimes do a better job.", + argstr="--useExplicitPDFDerivativesMode %s", + ) ROIAutoDilateSize = traits.Float( - desc= - "This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", - argstr="--ROIAutoDilateSize %f") + desc="This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", + argstr="--ROIAutoDilateSize %f", + ) ROIAutoClosingSize = traits.Float( - desc= - "This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the hole closing size in mm. It is rounded up to the nearest whole pixel size in each direction. The default is to use a closing size of 9mm. For mouse data this value may need to be reset to 0.9 or smaller.", - argstr="--ROIAutoClosingSize %f") + desc="This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the hole closing size in mm. It is rounded up to the nearest whole pixel size in each direction. The default is to use a closing size of 9mm. For mouse data this value may need to be reset to 0.9 or smaller.", + argstr="--ROIAutoClosingSize %f", + ) relaxationFactor = traits.Float( - desc= - "Internal debugging parameter, and should probably never be used from the command line. This will be removed in the future.", - argstr="--relaxationFactor %f") + desc="Internal debugging parameter, and should probably never be used from the command line. This will be removed in the future.", + argstr="--relaxationFactor %f", + ) maximumStepLength = traits.Float( - desc= - "Internal debugging parameter, and should probably never be used from the command line. This will be removed in the future.", - argstr="--maximumStepLength %f") + desc="Internal debugging parameter, and should probably never be used from the command line. This will be removed in the future.", + argstr="--maximumStepLength %f", + ) failureExitCode = traits.Int( - desc= - "If the fit fails, exit with this status code. (It can be used to force a successfult exit status of (0) if the registration fails due to reaching the maximum number of iterations.", - argstr="--failureExitCode %d") + desc="If the fit fails, exit with this status code. (It can be used to force a successfult exit status of (0) if the registration fails due to reaching the maximum number of iterations.", + argstr="--failureExitCode %d", + ) writeTransformOnFailure = traits.Bool( - desc= - "Flag to save the final transform even if the numberOfIterations are reached without convergence. (Intended for use when --failureExitCode 0 )", - argstr="--writeTransformOnFailure ") + desc="Flag to save the final transform even if the numberOfIterations are reached without convergence. (Intended for use when --failureExitCode 0 )", + argstr="--writeTransformOnFailure ", + ) numberOfThreads = traits.Int( - desc= - "Explicitly specify the maximum number of threads to use. (default is auto-detected)", - argstr="--numberOfThreads %d") + desc="Explicitly specify the maximum number of threads to use. (default is auto-detected)", + argstr="--numberOfThreads %d", + ) forceMINumberOfThreads = traits.Int( - desc= - "Force the the maximum number of threads to use for non thread safe MI metric. CAUTION: Inconsistent results my arise!", - argstr="--forceMINumberOfThreads %d") + desc="Force the the maximum number of threads to use for non thread safe MI metric. CAUTION: Inconsistent results my arise!", + argstr="--forceMINumberOfThreads %d", + ) debugLevel = traits.Int( - desc= - "Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", - argstr="--debugLevel %d") + desc="Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", + argstr="--debugLevel %d", + ) costFunctionConvergenceFactor = traits.Float( - desc= - " From itkLBFGSBOptimizer.h: Set/Get the CostFunctionConvergenceFactor. Algorithm terminates when the reduction in cost function is less than (factor * epsmcj) where epsmch is the machine precision. Typical values for factor: 1e+12 for low accuracy; 1e+7 for moderate accuracy and 1e+1 for extremely high accuracy. 1e+9 seems to work well., ", - argstr="--costFunctionConvergenceFactor %f") + desc=" From itkLBFGSBOptimizer.h: Set/Get the CostFunctionConvergenceFactor. Algorithm terminates when the reduction in cost function is less than (factor * epsmcj) where epsmch is the machine precision. Typical values for factor: 1e+12 for low accuracy; 1e+7 for moderate accuracy and 1e+1 for extremely high accuracy. 1e+9 seems to work well., ", + argstr="--costFunctionConvergenceFactor %f", + ) projectedGradientTolerance = traits.Float( - desc= - " From itkLBFGSBOptimizer.h: Set/Get the ProjectedGradientTolerance. Algorithm terminates when the project gradient is below the tolerance. Default lbfgsb value is 1e-5, but 1e-4 seems to work well., ", - argstr="--projectedGradientTolerance %f") + desc=" From itkLBFGSBOptimizer.h: Set/Get the ProjectedGradientTolerance. Algorithm terminates when the project gradient is below the tolerance. Default lbfgsb value is 1e-5, but 1e-4 seems to work well., ", + argstr="--projectedGradientTolerance %f", + ) gui = traits.Bool( - desc= - "Display intermediate image volumes for debugging. NOTE: This is not part of the standard build sytem, and probably does nothing on your installation.", - argstr="--gui ") + desc="Display intermediate image volumes for debugging. NOTE: This is not part of the standard build sytem, and probably does nothing on your installation.", + argstr="--gui ", + ) promptUser = traits.Bool( - desc= - "Prompt the user to hit enter each time an image is sent to the DebugImageViewer", - argstr="--promptUser ") + desc="Prompt the user to hit enter each time an image is sent to the DebugImageViewer", + argstr="--promptUser ", + ) NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00 = traits.Bool( - desc="DO NOT USE THIS FLAG", - argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00 ") + desc="DO NOT USE THIS FLAG", argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00 " + ) NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01 = traits.Bool( - desc="DO NOT USE THIS FLAG", - argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01 ") + desc="DO NOT USE THIS FLAG", argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01 " + ) NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 = traits.Bool( - desc="DO NOT USE THIS FLAG", - argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 ") + desc="DO NOT USE THIS FLAG", argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 " + ) permitParameterVariation = InputMultiPath( traits.Int, - desc= - "A bit vector to permit linear transform parameters to vary under optimization. The vector order corresponds with transform parameters, and beyond the end ones fill in as a default. For instance, you can choose to rotate only in x (pitch) with 1,0,0; this is mostly for expert use in turning on and off individual degrees of freedom in rotation, translation or scaling without multiplying the number of transform representations; this trick is probably meaningless when tried with the general affine transform.", + desc="A bit vector to permit linear transform parameters to vary under optimization. The vector order corresponds with transform parameters, and beyond the end ones fill in as a default. For instance, you can choose to rotate only in x (pitch) with 1,0,0; this is mostly for expert use in turning on and off individual degrees of freedom in rotation, translation or scaling without multiplying the number of transform representations; this trick is probably meaningless when tried with the general affine transform.", sep=",", - argstr="--permitParameterVariation %s") + argstr="--permitParameterVariation %s", + ) costMetric = traits.Enum( "MMI", "MSE", "NC", "MC", - desc= - "The cost metric to be used during fitting. Defaults to MMI. Options are MMI (Mattes Mutual Information), MSE (Mean Square Error), NC (Normalized Correlation), MC (Match Cardinality for binary images)", - argstr="--costMetric %s") + desc="The cost metric to be used during fitting. Defaults to MMI. Options are MMI (Mattes Mutual Information), MSE (Mean Square Error), NC (Normalized Correlation), MC (Match Cardinality for binary images)", + argstr="--costMetric %s", + ) writeOutputTransformInFloat = traits.Bool( - desc= - "By default, the output registration transforms (either the output composite transform or each transform component) are written to the disk in double precision. If this flag is ON, the output transforms will be written in single (float) precision. It is especially important if the output transform is a displacement field transform, or it is a composite transform that includes several displacement fields.", - argstr="--writeOutputTransformInFloat ") + desc="By default, the output registration transforms (either the output composite transform or each transform component) are written to the disk in double precision. If this flag is ON, the output transforms will be written in single (float) precision. It is especially important if the output transform is a displacement field transform, or it is a composite transform that includes several displacement fields.", + argstr="--writeOutputTransformInFloat ", + ) class BRAINSFitOutputSpec(TraitedSpec): bsplineTransform = File( - desc= - "(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS BSpline", - exists=True) + desc="(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS BSpline", + exists=True, + ) linearTransform = File( - desc= - "(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS ---NOT--- BSpline", - exists=True) + desc="(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS ---NOT--- BSpline", + exists=True, + ) outputVolume = File( - desc= - "(optional) Output image for registration. NOTE: You must select either the outputTransform or the outputVolume option.", - exists=True) + desc="(optional) Output image for registration. NOTE: You must select either the outputTransform or the outputVolume option.", + exists=True, + ) outputFixedVolumeROI = File( - desc= - "The ROI automatically found in fixed image, ONLY FOR ROIAUTO mode.", - exists=True) + desc="The ROI automatically found in fixed image, ONLY FOR ROIAUTO mode.", + exists=True, + ) outputMovingVolumeROI = File( - desc= - "The ROI automatically found in moving image, ONLY FOR ROIAUTO mode.", - exists=True) + desc="The ROI automatically found in moving image, ONLY FOR ROIAUTO mode.", + exists=True, + ) strippedOutputTransform = File( - desc= - "File name for the rigid component of the estimated affine transform. Can be used to rigidly register the moving image to the fixed image. NOTE: This value is overwritten if either bsplineTransform or linearTransform is set.", - exists=True) + desc="File name for the rigid component of the estimated affine transform. Can be used to rigidly register the moving image to the fixed image. NOTE: This value is overwritten if either bsplineTransform or linearTransform is set.", + exists=True, + ) outputTransform = File( - desc= - "(optional) Filename to which save the (optional) estimated transform. NOTE: You must select either the outputTransform or the outputVolume option.", - exists=True) + desc="(optional) Filename to which save the (optional) estimated transform. NOTE: You must select either the outputTransform or the outputVolume option.", + exists=True, + ) class BRAINSFit(SEMLikeCommandLine): @@ -370,11 +385,11 @@ class BRAINSFit(SEMLikeCommandLine): output_spec = BRAINSFitOutputSpec _cmd = "BRAINSFit " _outputs_filenames = { - 'outputVolume': 'outputVolume.nii', - 'bsplineTransform': 'bsplineTransform.mat', - 'outputTransform': 'outputTransform.mat', - 'outputFixedVolumeROI': 'outputFixedVolumeROI.nii', - 'strippedOutputTransform': 'strippedOutputTransform.mat', - 'outputMovingVolumeROI': 'outputMovingVolumeROI.nii', - 'linearTransform': 'linearTransform.mat' + "outputVolume": "outputVolume.nii", + "bsplineTransform": "bsplineTransform.mat", + "outputTransform": "outputTransform.mat", + "outputFixedVolumeROI": "outputFixedVolumeROI.nii", + "strippedOutputTransform": "strippedOutputTransform.mat", + "outputMovingVolumeROI": "outputMovingVolumeROI.nii", + "linearTransform": "linearTransform.mat", } diff --git a/nipype/interfaces/slicer/registration/brainsresample.py b/nipype/interfaces/slicer/registration/brainsresample.py index a3b79681fd..9031b86d8c 100644 --- a/nipype/interfaces/slicer/registration/brainsresample.py +++ b/nipype/interfaces/slicer/registration/brainsresample.py @@ -3,24 +3,35 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class BRAINSResampleInputSpec(CommandLineInputSpec): - inputVolume = File( - desc="Image To Warp", exists=True, argstr="--inputVolume %s") + inputVolume = File(desc="Image To Warp", exists=True, argstr="--inputVolume %s") referenceVolume = File( - desc= - "Reference image used only to define the output space. If not specified, the warping is done in the same space as the image to warp.", + desc="Reference image used only to define the output space. If not specified, the warping is done in the same space as the image to warp.", exists=True, - argstr="--referenceVolume %s") + argstr="--referenceVolume %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="Resulting deformed image", - argstr="--outputVolume %s") + argstr="--outputVolume %s", + ) pixelType = traits.Enum( "float", "short", @@ -29,18 +40,19 @@ class BRAINSResampleInputSpec(CommandLineInputSpec): "uint", "uchar", "binary", - desc= - "Specifies the pixel type for the input/output images. The \'binary\' pixel type uses a modified algorithm whereby the image is read in as unsigned char, a signed distance map is created, signed distance map is resampled, and then a thresholded image of type unsigned char is written to disk.", - argstr="--pixelType %s") + desc="Specifies the pixel type for the input/output images. The 'binary' pixel type uses a modified algorithm whereby the image is read in as unsigned char, a signed distance map is created, signed distance map is resampled, and then a thresholded image of type unsigned char is written to disk.", + argstr="--pixelType %s", + ) deformationVolume = File( desc="Displacement Field to be used to warp the image", exists=True, - argstr="--deformationVolume %s") + argstr="--deformationVolume %s", + ) warpTransform = File( - desc= - "Filename for the BRAINSFit transform used in place of the deformation field", + desc="Filename for the BRAINSFit transform used in place of the deformation field", exists=True, - argstr="--warpTransform %s") + argstr="--warpTransform %s", + ) interpolationMode = traits.Enum( "NearestNeighbor", "Linear", @@ -52,24 +64,24 @@ class BRAINSResampleInputSpec(CommandLineInputSpec): "Welch", "Lanczos", "Blackman", - desc= - "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", - argstr="--interpolationMode %s") + desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s", + ) inverseTransform = traits.Bool( - desc= - "True/False is to compute inverse of given transformation. Default is false", - argstr="--inverseTransform ") - defaultValue = traits.Float( - desc="Default voxel value", argstr="--defaultValue %f") + desc="True/False is to compute inverse of given transformation. Default is false", + argstr="--inverseTransform ", + ) + defaultValue = traits.Float(desc="Default voxel value", argstr="--defaultValue %f") gridSpacing = InputMultiPath( traits.Int, - desc= - "Add warped grid to output image to help show the deformation that occured with specified spacing. A spacing of 0 in a dimension indicates that grid lines should be rendered to fall exactly (i.e. do not allow displacements off that plane). This is useful for makeing a 2D image of grid lines from the 3D space ", + desc="Add warped grid to output image to help show the deformation that occured with specified spacing. A spacing of 0 in a dimension indicates that grid lines should be rendered to fall exactly (i.e. do not allow displacements off that plane). This is useful for makeing a 2D image of grid lines from the 3D space ", sep=",", - argstr="--gridSpacing %s") + argstr="--gridSpacing %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSResampleOutputSpec(TraitedSpec): @@ -100,4 +112,4 @@ class BRAINSResample(SEMLikeCommandLine): input_spec = BRAINSResampleInputSpec output_spec = BRAINSResampleOutputSpec _cmd = "BRAINSResample " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} diff --git a/nipype/interfaces/slicer/registration/specialized.py b/nipype/interfaces/slicer/registration/specialized.py index 9c6c3f5f20..12835b0cef 100644 --- a/nipype/interfaces/slicer/registration/specialized.py +++ b/nipype/interfaces/slicer/registration/specialized.py @@ -3,37 +3,49 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class ACPCTransformInputSpec(CommandLineInputSpec): acpc = InputMultiPath( traits.List(traits.Float(), minlen=3, maxlen=3), - desc= - "ACPC line, two fiducial points, one at the anterior commissure and one at the posterior commissure.", - argstr="--acpc %s...") + desc="ACPC line, two fiducial points, one at the anterior commissure and one at the posterior commissure.", + argstr="--acpc %s...", + ) midline = InputMultiPath( traits.List(traits.Float(), minlen=3, maxlen=3), - desc= - "The midline is a series of points defining the division between the hemispheres of the brain (the mid sagittal plane).", - argstr="--midline %s...") + desc="The midline is a series of points defining the division between the hemispheres of the brain (the mid sagittal plane).", + argstr="--midline %s...", + ) outputTransform = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "A transform filled in from the ACPC and Midline registration calculation", - argstr="--outputTransform %s") + desc="A transform filled in from the ACPC and Midline registration calculation", + argstr="--outputTransform %s", + ) debugSwitch = traits.Bool( - desc="Click if wish to see debugging output", argstr="--debugSwitch ") + desc="Click if wish to see debugging output", argstr="--debugSwitch " + ) class ACPCTransformOutputSpec(TraitedSpec): outputTransform = File( - desc= - "A transform filled in from the ACPC and Midline registration calculation", - exists=True) + desc="A transform filled in from the ACPC and Midline registration calculation", + exists=True, + ) class ACPCTransform(SEMLikeCommandLine): @@ -58,39 +70,44 @@ class ACPCTransform(SEMLikeCommandLine): input_spec = ACPCTransformInputSpec output_spec = ACPCTransformOutputSpec _cmd = "ACPCTransform " - _outputs_filenames = {'outputTransform': 'outputTransform.mat'} + _outputs_filenames = {"outputTransform": "outputTransform.mat"} class FiducialRegistrationInputSpec(CommandLineInputSpec): fixedLandmarks = InputMultiPath( traits.List(traits.Float(), minlen=3, maxlen=3), desc="Ordered list of landmarks in the fixed image", - argstr="--fixedLandmarks %s...") + argstr="--fixedLandmarks %s...", + ) movingLandmarks = InputMultiPath( traits.List(traits.Float(), minlen=3, maxlen=3), desc="Ordered list of landmarks in the moving image", - argstr="--movingLandmarks %s...") + argstr="--movingLandmarks %s...", + ) saveTransform = traits.Either( traits.Bool, File(), hash_files=False, desc="Save the transform that results from registration", - argstr="--saveTransform %s") + argstr="--saveTransform %s", + ) transformType = traits.Enum( "Translation", "Rigid", "Similarity", desc="Type of transform to produce", - argstr="--transformType %s") + argstr="--transformType %s", + ) rms = traits.Float(desc="Display RMS Error.", argstr="--rms %f") outputMessage = traits.Str( - desc="Provides more information on the output", - argstr="--outputMessage %s") + desc="Provides more information on the output", argstr="--outputMessage %s" + ) class FiducialRegistrationOutputSpec(TraitedSpec): saveTransform = File( - desc="Save the transform that results from registration", exists=True) + desc="Save the transform that results from registration", exists=True + ) class FiducialRegistration(SEMLikeCommandLine): @@ -113,50 +130,52 @@ class FiducialRegistration(SEMLikeCommandLine): input_spec = FiducialRegistrationInputSpec output_spec = FiducialRegistrationOutputSpec _cmd = "FiducialRegistration " - _outputs_filenames = {'saveTransform': 'saveTransform.txt'} + _outputs_filenames = {"saveTransform": "saveTransform.txt"} class VBRAINSDemonWarpInputSpec(CommandLineInputSpec): movingVolume = InputMultiPath( File(exists=True), desc="Required: input moving image", - argstr="--movingVolume %s...") + argstr="--movingVolume %s...", + ) fixedVolume = InputMultiPath( File(exists=True), desc="Required: input fixed (target) image", - argstr="--fixedVolume %s...") + argstr="--fixedVolume %s...", + ) inputPixelType = traits.Enum( "float", "short", "ushort", "int", "uchar", - desc= - "Input volumes will be typecast to this format: float|short|ushort|int|uchar", - argstr="--inputPixelType %s") + desc="Input volumes will be typecast to this format: float|short|ushort|int|uchar", + argstr="--inputPixelType %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: output resampled moving image (will have the same physical space as the fixedVolume).", - argstr="--outputVolume %s") + desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).", + argstr="--outputVolume %s", + ) outputDisplacementFieldVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Output deformation field vector image (will have the same physical space as the fixedVolume).", - argstr="--outputDisplacementFieldVolume %s") + desc="Output deformation field vector image (will have the same physical space as the fixedVolume).", + argstr="--outputDisplacementFieldVolume %s", + ) outputPixelType = traits.Enum( "float", "short", "ushort", "int", "uchar", - desc= - "outputVolume will be typecast to this format: float|short|ushort|int|uchar", - argstr="--outputPixelType %s") + desc="outputVolume will be typecast to this format: float|short|ushort|int|uchar", + argstr="--outputPixelType %s", + ) interpolationMode = traits.Enum( "NearestNeighbor", "Linear", @@ -168,171 +187,185 @@ class VBRAINSDemonWarpInputSpec(CommandLineInputSpec): "Welch", "Lanczos", "Blackman", - desc= - "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", - argstr="--interpolationMode %s") + desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s", + ) registrationFilterType = traits.Enum( "Demons", "FastSymmetricForces", "Diffeomorphic", "LogDemons", "SymmetricLogDemons", - desc= - "Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic|LogDemons|SymmetricLogDemons", - argstr="--registrationFilterType %s") + desc="Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic|LogDemons|SymmetricLogDemons", + argstr="--registrationFilterType %s", + ) smoothDisplacementFieldSigma = traits.Float( - desc= - "A gaussian smoothing value to be applied to the deformation feild at each iteration.", - argstr="--smoothDisplacementFieldSigma %f") + desc="A gaussian smoothing value to be applied to the deformation feild at each iteration.", + argstr="--smoothDisplacementFieldSigma %f", + ) numberOfPyramidLevels = traits.Int( - desc= - "Number of image pyramid levels to use in the multi-resolution registration.", - argstr="--numberOfPyramidLevels %d") + desc="Number of image pyramid levels to use in the multi-resolution registration.", + argstr="--numberOfPyramidLevels %d", + ) minimumFixedPyramid = InputMultiPath( traits.Int, - desc= - "The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + desc="The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", sep=",", - argstr="--minimumFixedPyramid %s") + argstr="--minimumFixedPyramid %s", + ) minimumMovingPyramid = InputMultiPath( traits.Int, - desc= - "The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + desc="The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", sep=",", - argstr="--minimumMovingPyramid %s") + argstr="--minimumMovingPyramid %s", + ) arrayOfPyramidLevelIterations = InputMultiPath( traits.Int, desc="The number of iterations for each pyramid level", sep=",", - argstr="--arrayOfPyramidLevelIterations %s") + argstr="--arrayOfPyramidLevelIterations %s", + ) histogramMatch = traits.Bool( - desc= - "Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.", - argstr="--histogramMatch ") + desc="Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.", + argstr="--histogramMatch ", + ) numberOfHistogramBins = traits.Int( - desc="The number of histogram levels", - argstr="--numberOfHistogramBins %d") + desc="The number of histogram levels", argstr="--numberOfHistogramBins %d" + ) numberOfMatchPoints = traits.Int( desc="The number of match points for histrogramMatch", - argstr="--numberOfMatchPoints %d") + argstr="--numberOfMatchPoints %d", + ) medianFilterSize = InputMultiPath( traits.Int, - desc= - "Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.", + desc="Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.", sep=",", - argstr="--medianFilterSize %s") + argstr="--medianFilterSize %s", + ) initializeWithDisplacementField = File( desc="Initial deformation field vector image file name", exists=True, - argstr="--initializeWithDisplacementField %s") + argstr="--initializeWithDisplacementField %s", + ) initializeWithTransform = File( desc="Initial Transform filename", exists=True, - argstr="--initializeWithTransform %s") + argstr="--initializeWithTransform %s", + ) makeBOBF = traits.Bool( - desc= - "Flag to make Brain-Only Background-Filled versions of the input and target volumes.", - argstr="--makeBOBF ") + desc="Flag to make Brain-Only Background-Filled versions of the input and target volumes.", + argstr="--makeBOBF ", + ) fixedBinaryVolume = File( desc="Mask filename for desired region of interest in the Fixed image.", exists=True, - argstr="--fixedBinaryVolume %s") + argstr="--fixedBinaryVolume %s", + ) movingBinaryVolume = File( - desc= - "Mask filename for desired region of interest in the Moving image.", + desc="Mask filename for desired region of interest in the Moving image.", exists=True, - argstr="--movingBinaryVolume %s") + argstr="--movingBinaryVolume %s", + ) lowerThresholdForBOBF = traits.Int( - desc="Lower threshold for performing BOBF", - argstr="--lowerThresholdForBOBF %d") + desc="Lower threshold for performing BOBF", argstr="--lowerThresholdForBOBF %d" + ) upperThresholdForBOBF = traits.Int( - desc="Upper threshold for performing BOBF", - argstr="--upperThresholdForBOBF %d") + desc="Upper threshold for performing BOBF", argstr="--upperThresholdForBOBF %d" + ) backgroundFillValue = traits.Int( desc="Replacement value to overwrite background when performing BOBF", - argstr="--backgroundFillValue %d") + argstr="--backgroundFillValue %d", + ) seedForBOBF = InputMultiPath( traits.Int, desc="coordinates in all 3 directions for Seed when performing BOBF", sep=",", - argstr="--seedForBOBF %s") + argstr="--seedForBOBF %s", + ) neighborhoodForBOBF = InputMultiPath( traits.Int, - desc= - "neighborhood in all 3 directions to be included when performing BOBF", + desc="neighborhood in all 3 directions to be included when performing BOBF", sep=",", - argstr="--neighborhoodForBOBF %s") + argstr="--neighborhoodForBOBF %s", + ) outputDisplacementFieldPrefix = traits.Str( - desc= - "Displacement field filename prefix for writing separate x, y, and z component images", - argstr="--outputDisplacementFieldPrefix %s") + desc="Displacement field filename prefix for writing separate x, y, and z component images", + argstr="--outputDisplacementFieldPrefix %s", + ) outputCheckerboardVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", - argstr="--outputCheckerboardVolume %s") + desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + argstr="--outputCheckerboardVolume %s", + ) checkerboardPatternSubdivisions = InputMultiPath( traits.Int, desc="Number of Checkerboard subdivisions in all 3 directions", sep=",", - argstr="--checkerboardPatternSubdivisions %s") + argstr="--checkerboardPatternSubdivisions %s", + ) outputNormalized = traits.Bool( - desc= - "Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.", - argstr="--outputNormalized ") + desc="Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.", + argstr="--outputNormalized ", + ) outputDebug = traits.Bool( - desc="Flag to write debugging images after each step.", - argstr="--outputDebug ") + desc="Flag to write debugging images after each step.", argstr="--outputDebug " + ) weightFactors = InputMultiPath( traits.Float, desc="Weight fatctors for each input images", sep=",", - argstr="--weightFactors %s") + argstr="--weightFactors %s", + ) gradient_type = traits.Enum( "0", "1", "2", - desc= - "Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)", - argstr="--gradient_type %s") + desc="Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)", + argstr="--gradient_type %s", + ) upFieldSmoothing = traits.Float( desc="Smoothing sigma for the update field at each iteration", - argstr="--upFieldSmoothing %f") + argstr="--upFieldSmoothing %f", + ) max_step_length = traits.Float( desc="Maximum length of an update vector (0: no restriction)", - argstr="--max_step_length %f") + argstr="--max_step_length %f", + ) use_vanilla_dem = traits.Bool( - desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem ") + desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem " + ) gui = traits.Bool( - desc="Display intermediate image volumes for debugging", - argstr="--gui ") + desc="Display intermediate image volumes for debugging", argstr="--gui " + ) promptUser = traits.Bool( - desc= - "Prompt the user to hit enter each time an image is sent to the DebugImageViewer", - argstr="--promptUser ") + desc="Prompt the user to hit enter each time an image is sent to the DebugImageViewer", + argstr="--promptUser ", + ) numberOfBCHApproximationTerms = traits.Int( desc="Number of terms in the BCH expansion", - argstr="--numberOfBCHApproximationTerms %d") + argstr="--numberOfBCHApproximationTerms %d", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class VBRAINSDemonWarpOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: output resampled moving image (will have the same physical space as the fixedVolume).", - exists=True) + desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).", + exists=True, + ) outputDisplacementFieldVolume = File( - desc= - "Output deformation field vector image (will have the same physical space as the fixedVolume).", - exists=True) + desc="Output deformation field vector image (will have the same physical space as the fixedVolume).", + exists=True, + ) outputCheckerboardVolume = File( - desc= - "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", - exists=True) + desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + exists=True, + ) class VBRAINSDemonWarp(SEMLikeCommandLine): @@ -361,53 +394,53 @@ class VBRAINSDemonWarp(SEMLikeCommandLine): output_spec = VBRAINSDemonWarpOutputSpec _cmd = "VBRAINSDemonWarp " _outputs_filenames = { - 'outputVolume': 'outputVolume.nii', - 'outputCheckerboardVolume': 'outputCheckerboardVolume.nii', - 'outputDisplacementFieldVolume': 'outputDisplacementFieldVolume.nrrd' + "outputVolume": "outputVolume.nii", + "outputCheckerboardVolume": "outputCheckerboardVolume.nii", + "outputDisplacementFieldVolume": "outputDisplacementFieldVolume.nrrd", } class BRAINSDemonWarpInputSpec(CommandLineInputSpec): movingVolume = File( - desc="Required: input moving image", - exists=True, - argstr="--movingVolume %s") + desc="Required: input moving image", exists=True, argstr="--movingVolume %s" + ) fixedVolume = File( desc="Required: input fixed (target) image", exists=True, - argstr="--fixedVolume %s") + argstr="--fixedVolume %s", + ) inputPixelType = traits.Enum( "float", "short", "ushort", "int", "uchar", - desc= - "Input volumes will be typecast to this format: float|short|ushort|int|uchar", - argstr="--inputPixelType %s") + desc="Input volumes will be typecast to this format: float|short|ushort|int|uchar", + argstr="--inputPixelType %s", + ) outputVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Required: output resampled moving image (will have the same physical space as the fixedVolume).", - argstr="--outputVolume %s") + desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).", + argstr="--outputVolume %s", + ) outputDisplacementFieldVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Output deformation field vector image (will have the same physical space as the fixedVolume).", - argstr="--outputDisplacementFieldVolume %s") + desc="Output deformation field vector image (will have the same physical space as the fixedVolume).", + argstr="--outputDisplacementFieldVolume %s", + ) outputPixelType = traits.Enum( "float", "short", "ushort", "int", "uchar", - desc= - "outputVolume will be typecast to this format: float|short|ushort|int|uchar", - argstr="--outputPixelType %s") + desc="outputVolume will be typecast to this format: float|short|ushort|int|uchar", + argstr="--outputPixelType %s", + ) interpolationMode = traits.Enum( "NearestNeighbor", "Linear", @@ -419,168 +452,181 @@ class BRAINSDemonWarpInputSpec(CommandLineInputSpec): "Welch", "Lanczos", "Blackman", - desc= - "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", - argstr="--interpolationMode %s") + desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s", + ) registrationFilterType = traits.Enum( "Demons", "FastSymmetricForces", "Diffeomorphic", - desc= - "Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic", - argstr="--registrationFilterType %s") + desc="Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic", + argstr="--registrationFilterType %s", + ) smoothDisplacementFieldSigma = traits.Float( - desc= - "A gaussian smoothing value to be applied to the deformation feild at each iteration.", - argstr="--smoothDisplacementFieldSigma %f") + desc="A gaussian smoothing value to be applied to the deformation feild at each iteration.", + argstr="--smoothDisplacementFieldSigma %f", + ) numberOfPyramidLevels = traits.Int( - desc= - "Number of image pyramid levels to use in the multi-resolution registration.", - argstr="--numberOfPyramidLevels %d") + desc="Number of image pyramid levels to use in the multi-resolution registration.", + argstr="--numberOfPyramidLevels %d", + ) minimumFixedPyramid = InputMultiPath( traits.Int, - desc= - "The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + desc="The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", sep=",", - argstr="--minimumFixedPyramid %s") + argstr="--minimumFixedPyramid %s", + ) minimumMovingPyramid = InputMultiPath( traits.Int, - desc= - "The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + desc="The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", sep=",", - argstr="--minimumMovingPyramid %s") + argstr="--minimumMovingPyramid %s", + ) arrayOfPyramidLevelIterations = InputMultiPath( traits.Int, desc="The number of iterations for each pyramid level", sep=",", - argstr="--arrayOfPyramidLevelIterations %s") + argstr="--arrayOfPyramidLevelIterations %s", + ) histogramMatch = traits.Bool( - desc= - "Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.", - argstr="--histogramMatch ") + desc="Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.", + argstr="--histogramMatch ", + ) numberOfHistogramBins = traits.Int( - desc="The number of histogram levels", - argstr="--numberOfHistogramBins %d") + desc="The number of histogram levels", argstr="--numberOfHistogramBins %d" + ) numberOfMatchPoints = traits.Int( desc="The number of match points for histrogramMatch", - argstr="--numberOfMatchPoints %d") + argstr="--numberOfMatchPoints %d", + ) medianFilterSize = InputMultiPath( traits.Int, - desc= - "Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.", + desc="Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.", sep=",", - argstr="--medianFilterSize %s") + argstr="--medianFilterSize %s", + ) initializeWithDisplacementField = File( desc="Initial deformation field vector image file name", exists=True, - argstr="--initializeWithDisplacementField %s") + argstr="--initializeWithDisplacementField %s", + ) initializeWithTransform = File( desc="Initial Transform filename", exists=True, - argstr="--initializeWithTransform %s") + argstr="--initializeWithTransform %s", + ) maskProcessingMode = traits.Enum( "NOMASK", "ROIAUTO", "ROI", "BOBF", - desc= - "What mode to use for using the masks: NOMASK|ROIAUTO|ROI|BOBF. If ROIAUTO is choosen, then the mask is implicitly defined using a otsu forground and hole filling algorithm. Where the Region Of Interest mode uses the masks to define what parts of the image should be used for computing the deformation field. Brain Only Background Fill uses the masks to pre-process the input images by clipping and filling in the background with a predefined value.", - argstr="--maskProcessingMode %s") + desc="What mode to use for using the masks: NOMASK|ROIAUTO|ROI|BOBF. If ROIAUTO is choosen, then the mask is implicitly defined using a otsu forground and hole filling algorithm. Where the Region Of Interest mode uses the masks to define what parts of the image should be used for computing the deformation field. Brain Only Background Fill uses the masks to pre-process the input images by clipping and filling in the background with a predefined value.", + argstr="--maskProcessingMode %s", + ) fixedBinaryVolume = File( desc="Mask filename for desired region of interest in the Fixed image.", exists=True, - argstr="--fixedBinaryVolume %s") + argstr="--fixedBinaryVolume %s", + ) movingBinaryVolume = File( - desc= - "Mask filename for desired region of interest in the Moving image.", + desc="Mask filename for desired region of interest in the Moving image.", exists=True, - argstr="--movingBinaryVolume %s") + argstr="--movingBinaryVolume %s", + ) lowerThresholdForBOBF = traits.Int( - desc="Lower threshold for performing BOBF", - argstr="--lowerThresholdForBOBF %d") + desc="Lower threshold for performing BOBF", argstr="--lowerThresholdForBOBF %d" + ) upperThresholdForBOBF = traits.Int( - desc="Upper threshold for performing BOBF", - argstr="--upperThresholdForBOBF %d") + desc="Upper threshold for performing BOBF", argstr="--upperThresholdForBOBF %d" + ) backgroundFillValue = traits.Int( desc="Replacement value to overwrite background when performing BOBF", - argstr="--backgroundFillValue %d") + argstr="--backgroundFillValue %d", + ) seedForBOBF = InputMultiPath( traits.Int, desc="coordinates in all 3 directions for Seed when performing BOBF", sep=",", - argstr="--seedForBOBF %s") + argstr="--seedForBOBF %s", + ) neighborhoodForBOBF = InputMultiPath( traits.Int, - desc= - "neighborhood in all 3 directions to be included when performing BOBF", + desc="neighborhood in all 3 directions to be included when performing BOBF", sep=",", - argstr="--neighborhoodForBOBF %s") + argstr="--neighborhoodForBOBF %s", + ) outputDisplacementFieldPrefix = traits.Str( - desc= - "Displacement field filename prefix for writing separate x, y, and z component images", - argstr="--outputDisplacementFieldPrefix %s") + desc="Displacement field filename prefix for writing separate x, y, and z component images", + argstr="--outputDisplacementFieldPrefix %s", + ) outputCheckerboardVolume = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", - argstr="--outputCheckerboardVolume %s") + desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + argstr="--outputCheckerboardVolume %s", + ) checkerboardPatternSubdivisions = InputMultiPath( traits.Int, desc="Number of Checkerboard subdivisions in all 3 directions", sep=",", - argstr="--checkerboardPatternSubdivisions %s") + argstr="--checkerboardPatternSubdivisions %s", + ) outputNormalized = traits.Bool( - desc= - "Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.", - argstr="--outputNormalized ") + desc="Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.", + argstr="--outputNormalized ", + ) outputDebug = traits.Bool( - desc="Flag to write debugging images after each step.", - argstr="--outputDebug ") + desc="Flag to write debugging images after each step.", argstr="--outputDebug " + ) gradient_type = traits.Enum( "0", "1", "2", - desc= - "Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)", - argstr="--gradient_type %s") + desc="Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)", + argstr="--gradient_type %s", + ) upFieldSmoothing = traits.Float( desc="Smoothing sigma for the update field at each iteration", - argstr="--upFieldSmoothing %f") + argstr="--upFieldSmoothing %f", + ) max_step_length = traits.Float( desc="Maximum length of an update vector (0: no restriction)", - argstr="--max_step_length %f") + argstr="--max_step_length %f", + ) use_vanilla_dem = traits.Bool( - desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem ") + desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem " + ) gui = traits.Bool( - desc="Display intermediate image volumes for debugging", - argstr="--gui ") + desc="Display intermediate image volumes for debugging", argstr="--gui " + ) promptUser = traits.Bool( - desc= - "Prompt the user to hit enter each time an image is sent to the DebugImageViewer", - argstr="--promptUser ") + desc="Prompt the user to hit enter each time an image is sent to the DebugImageViewer", + argstr="--promptUser ", + ) numberOfBCHApproximationTerms = traits.Int( desc="Number of terms in the BCH expansion", - argstr="--numberOfBCHApproximationTerms %d") + argstr="--numberOfBCHApproximationTerms %d", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSDemonWarpOutputSpec(TraitedSpec): outputVolume = File( - desc= - "Required: output resampled moving image (will have the same physical space as the fixedVolume).", - exists=True) + desc="Required: output resampled moving image (will have the same physical space as the fixedVolume).", + exists=True, + ) outputDisplacementFieldVolume = File( - desc= - "Output deformation field vector image (will have the same physical space as the fixedVolume).", - exists=True) + desc="Output deformation field vector image (will have the same physical space as the fixedVolume).", + exists=True, + ) outputCheckerboardVolume = File( - desc= - "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", - exists=True) + desc="Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + exists=True, + ) class BRAINSDemonWarp(SEMLikeCommandLine): @@ -609,7 +655,7 @@ class BRAINSDemonWarp(SEMLikeCommandLine): output_spec = BRAINSDemonWarpOutputSpec _cmd = "BRAINSDemonWarp " _outputs_filenames = { - 'outputVolume': 'outputVolume.nii', - 'outputCheckerboardVolume': 'outputCheckerboardVolume.nii', - 'outputDisplacementFieldVolume': 'outputDisplacementFieldVolume.nrrd' + "outputVolume": "outputVolume.nii", + "outputCheckerboardVolume": "outputCheckerboardVolume.nii", + "outputDisplacementFieldVolume": "outputDisplacementFieldVolume.nrrd", } diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py index f60ab0866b..e36498110b 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py @@ -4,26 +4,22 @@ def test_ACPCTransform_inputs(): input_map = dict( - acpc=dict(argstr='--acpc %s...', ), - args=dict(argstr='%s', ), - debugSwitch=dict(argstr='--debugSwitch ', ), - environ=dict( - nohash=True, - usedefault=True, - ), - midline=dict(argstr='--midline %s...', ), - outputTransform=dict( - argstr='--outputTransform %s', - hash_files=False, - ), + acpc=dict(argstr="--acpc %s...",), + args=dict(argstr="%s",), + debugSwitch=dict(argstr="--debugSwitch ",), + environ=dict(nohash=True, usedefault=True,), + midline=dict(argstr="--midline %s...",), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), ) inputs = ACPCTransform.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ACPCTransform_outputs(): - output_map = dict(outputTransform=dict(extensions=None, ), ) + output_map = dict(outputTransform=dict(extensions=None,),) outputs = ACPCTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py index efe6d50fce..4136c8105d 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py @@ -4,113 +4,77 @@ def test_BRAINSDemonWarp_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), arrayOfPyramidLevelIterations=dict( - argstr='--arrayOfPyramidLevelIterations %s', - sep=',', + argstr="--arrayOfPyramidLevelIterations %s", sep=",", ), - backgroundFillValue=dict(argstr='--backgroundFillValue %d', ), + backgroundFillValue=dict(argstr="--backgroundFillValue %d",), checkerboardPatternSubdivisions=dict( - argstr='--checkerboardPatternSubdivisions %s', - sep=',', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedBinaryVolume=dict( - argstr='--fixedBinaryVolume %s', - extensions=None, - ), - fixedVolume=dict( - argstr='--fixedVolume %s', - extensions=None, - ), - gradient_type=dict(argstr='--gradient_type %s', ), - gui=dict(argstr='--gui ', ), - histogramMatch=dict(argstr='--histogramMatch ', ), + argstr="--checkerboardPatternSubdivisions %s", sep=",", + ), + environ=dict(nohash=True, usedefault=True,), + fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None,), + fixedVolume=dict(argstr="--fixedVolume %s", extensions=None,), + gradient_type=dict(argstr="--gradient_type %s",), + gui=dict(argstr="--gui ",), + histogramMatch=dict(argstr="--histogramMatch ",), initializeWithDisplacementField=dict( - argstr='--initializeWithDisplacementField %s', - extensions=None, + argstr="--initializeWithDisplacementField %s", extensions=None, ), initializeWithTransform=dict( - argstr='--initializeWithTransform %s', - extensions=None, - ), - inputPixelType=dict(argstr='--inputPixelType %s', ), - interpolationMode=dict(argstr='--interpolationMode %s', ), - lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), - maskProcessingMode=dict(argstr='--maskProcessingMode %s', ), - max_step_length=dict(argstr='--max_step_length %f', ), - medianFilterSize=dict( - argstr='--medianFilterSize %s', - sep=',', - ), - minimumFixedPyramid=dict( - argstr='--minimumFixedPyramid %s', - sep=',', - ), - minimumMovingPyramid=dict( - argstr='--minimumMovingPyramid %s', - sep=',', - ), - movingBinaryVolume=dict( - argstr='--movingBinaryVolume %s', - extensions=None, - ), - movingVolume=dict( - argstr='--movingVolume %s', - extensions=None, - ), - neighborhoodForBOBF=dict( - argstr='--neighborhoodForBOBF %s', - sep=',', - ), + argstr="--initializeWithTransform %s", extensions=None, + ), + inputPixelType=dict(argstr="--inputPixelType %s",), + interpolationMode=dict(argstr="--interpolationMode %s",), + lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d",), + maskProcessingMode=dict(argstr="--maskProcessingMode %s",), + max_step_length=dict(argstr="--max_step_length %f",), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), + minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=",",), + minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=",",), + movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None,), + movingVolume=dict(argstr="--movingVolume %s", extensions=None,), + neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=",",), numberOfBCHApproximationTerms=dict( - argstr='--numberOfBCHApproximationTerms %d', ), - numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), - numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), - numberOfPyramidLevels=dict(argstr='--numberOfPyramidLevels %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), + argstr="--numberOfBCHApproximationTerms %d", + ), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), + numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), outputCheckerboardVolume=dict( - argstr='--outputCheckerboardVolume %s', - hash_files=False, + argstr="--outputCheckerboardVolume %s", hash_files=False, ), - outputDebug=dict(argstr='--outputDebug ', ), + outputDebug=dict(argstr="--outputDebug ",), outputDisplacementFieldPrefix=dict( - argstr='--outputDisplacementFieldPrefix %s', ), - outputDisplacementFieldVolume=dict( - argstr='--outputDisplacementFieldVolume %s', - hash_files=False, - ), - outputNormalized=dict(argstr='--outputNormalized ', ), - outputPixelType=dict(argstr='--outputPixelType %s', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, + argstr="--outputDisplacementFieldPrefix %s", ), - promptUser=dict(argstr='--promptUser ', ), - registrationFilterType=dict(argstr='--registrationFilterType %s', ), - seedForBOBF=dict( - argstr='--seedForBOBF %s', - sep=',', - ), - smoothDisplacementFieldSigma=dict( - argstr='--smoothDisplacementFieldSigma %f', ), - upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), - upperThresholdForBOBF=dict(argstr='--upperThresholdForBOBF %d', ), - use_vanilla_dem=dict(argstr='--use_vanilla_dem ', ), + outputDisplacementFieldVolume=dict( + argstr="--outputDisplacementFieldVolume %s", hash_files=False, + ), + outputNormalized=dict(argstr="--outputNormalized ",), + outputPixelType=dict(argstr="--outputPixelType %s",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + promptUser=dict(argstr="--promptUser ",), + registrationFilterType=dict(argstr="--registrationFilterType %s",), + seedForBOBF=dict(argstr="--seedForBOBF %s", sep=",",), + smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f",), + upFieldSmoothing=dict(argstr="--upFieldSmoothing %f",), + upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d",), + use_vanilla_dem=dict(argstr="--use_vanilla_dem ",), ) inputs = BRAINSDemonWarp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(extensions=None, ), - outputDisplacementFieldVolume=dict(extensions=None, ), - outputVolume=dict(extensions=None, ), + outputCheckerboardVolume=dict(extensions=None,), + outputDisplacementFieldVolume=dict(extensions=None,), + outputVolume=dict(extensions=None,), ) outputs = BRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py index 58c903d144..9d558f1e68 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py @@ -5,149 +5,103 @@ def test_BRAINSFit_inputs(): input_map = dict( NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00=dict( - argstr='--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00 ', ), + argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00 ", + ), NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01=dict( - argstr='--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01 ', ), + argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01 ", + ), NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02=dict( - argstr='--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 ', ), - ROIAutoClosingSize=dict(argstr='--ROIAutoClosingSize %f', ), - ROIAutoDilateSize=dict(argstr='--ROIAutoDilateSize %f', ), - args=dict(argstr='%s', ), - backgroundFillValue=dict(argstr='--backgroundFillValue %f', ), - bsplineTransform=dict( - argstr='--bsplineTransform %s', - hash_files=False, + argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 ", ), + ROIAutoClosingSize=dict(argstr="--ROIAutoClosingSize %f",), + ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f",), + args=dict(argstr="%s",), + backgroundFillValue=dict(argstr="--backgroundFillValue %f",), + bsplineTransform=dict(argstr="--bsplineTransform %s", hash_files=False,), costFunctionConvergenceFactor=dict( - argstr='--costFunctionConvergenceFactor %f', ), - costMetric=dict(argstr='--costMetric %s', ), - debugLevel=dict(argstr='--debugLevel %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - failureExitCode=dict(argstr='--failureExitCode %d', ), - fixedBinaryVolume=dict( - argstr='--fixedBinaryVolume %s', - extensions=None, - ), - fixedVolume=dict( - argstr='--fixedVolume %s', - extensions=None, - ), - fixedVolumeTimeIndex=dict(argstr='--fixedVolumeTimeIndex %d', ), - forceMINumberOfThreads=dict(argstr='--forceMINumberOfThreads %d', ), - gui=dict(argstr='--gui ', ), - histogramMatch=dict(argstr='--histogramMatch ', ), - initialTransform=dict( - argstr='--initialTransform %s', - extensions=None, - ), - initializeTransformMode=dict(argstr='--initializeTransformMode %s', ), - interpolationMode=dict(argstr='--interpolationMode %s', ), - linearTransform=dict( - argstr='--linearTransform %s', - hash_files=False, - ), - maskInferiorCutOffFromCenter=dict( - argstr='--maskInferiorCutOffFromCenter %f', ), - maskProcessingMode=dict(argstr='--maskProcessingMode %s', ), - maxBSplineDisplacement=dict(argstr='--maxBSplineDisplacement %f', ), - maximumStepLength=dict(argstr='--maximumStepLength %f', ), - medianFilterSize=dict( - argstr='--medianFilterSize %s', - sep=',', - ), - minimumStepLength=dict( - argstr='--minimumStepLength %s', - sep=',', - ), - movingBinaryVolume=dict( - argstr='--movingBinaryVolume %s', - extensions=None, - ), - movingVolume=dict( - argstr='--movingVolume %s', - extensions=None, - ), - movingVolumeTimeIndex=dict(argstr='--movingVolumeTimeIndex %d', ), - numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), - numberOfIterations=dict( - argstr='--numberOfIterations %s', - sep=',', - ), - numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), - numberOfSamples=dict(argstr='--numberOfSamples %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), + argstr="--costFunctionConvergenceFactor %f", + ), + costMetric=dict(argstr="--costMetric %s",), + debugLevel=dict(argstr="--debugLevel %d",), + environ=dict(nohash=True, usedefault=True,), + failureExitCode=dict(argstr="--failureExitCode %d",), + fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None,), + fixedVolume=dict(argstr="--fixedVolume %s", extensions=None,), + fixedVolumeTimeIndex=dict(argstr="--fixedVolumeTimeIndex %d",), + forceMINumberOfThreads=dict(argstr="--forceMINumberOfThreads %d",), + gui=dict(argstr="--gui ",), + histogramMatch=dict(argstr="--histogramMatch ",), + initialTransform=dict(argstr="--initialTransform %s", extensions=None,), + initializeTransformMode=dict(argstr="--initializeTransformMode %s",), + interpolationMode=dict(argstr="--interpolationMode %s",), + linearTransform=dict(argstr="--linearTransform %s", hash_files=False,), + maskInferiorCutOffFromCenter=dict(argstr="--maskInferiorCutOffFromCenter %f",), + maskProcessingMode=dict(argstr="--maskProcessingMode %s",), + maxBSplineDisplacement=dict(argstr="--maxBSplineDisplacement %f",), + maximumStepLength=dict(argstr="--maximumStepLength %f",), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), + minimumStepLength=dict(argstr="--minimumStepLength %s", sep=",",), + movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None,), + movingVolume=dict(argstr="--movingVolume %s", extensions=None,), + movingVolumeTimeIndex=dict(argstr="--movingVolumeTimeIndex %d",), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), + numberOfIterations=dict(argstr="--numberOfIterations %s", sep=",",), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), + numberOfSamples=dict(argstr="--numberOfSamples %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), outputFixedVolumeROI=dict( - argstr='--outputFixedVolumeROI %s', - hash_files=False, + argstr="--outputFixedVolumeROI %s", hash_files=False, ), outputMovingVolumeROI=dict( - argstr='--outputMovingVolumeROI %s', - hash_files=False, - ), - outputTransform=dict( - argstr='--outputTransform %s', - hash_files=False, - ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - outputVolumePixelType=dict(argstr='--outputVolumePixelType %s', ), - permitParameterVariation=dict( - argstr='--permitParameterVariation %s', - sep=',', - ), - projectedGradientTolerance=dict( - argstr='--projectedGradientTolerance %f', ), - promptUser=dict(argstr='--promptUser ', ), - relaxationFactor=dict(argstr='--relaxationFactor %f', ), - removeIntensityOutliers=dict(argstr='--removeIntensityOutliers %f', ), - reproportionScale=dict(argstr='--reproportionScale %f', ), - scaleOutputValues=dict(argstr='--scaleOutputValues ', ), - skewScale=dict(argstr='--skewScale %f', ), - splineGridSize=dict( - argstr='--splineGridSize %s', - sep=',', - ), + argstr="--outputMovingVolumeROI %s", hash_files=False, + ), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + outputVolumePixelType=dict(argstr="--outputVolumePixelType %s",), + permitParameterVariation=dict(argstr="--permitParameterVariation %s", sep=",",), + projectedGradientTolerance=dict(argstr="--projectedGradientTolerance %f",), + promptUser=dict(argstr="--promptUser ",), + relaxationFactor=dict(argstr="--relaxationFactor %f",), + removeIntensityOutliers=dict(argstr="--removeIntensityOutliers %f",), + reproportionScale=dict(argstr="--reproportionScale %f",), + scaleOutputValues=dict(argstr="--scaleOutputValues ",), + skewScale=dict(argstr="--skewScale %f",), + splineGridSize=dict(argstr="--splineGridSize %s", sep=",",), strippedOutputTransform=dict( - argstr='--strippedOutputTransform %s', - hash_files=False, + argstr="--strippedOutputTransform %s", hash_files=False, ), - transformType=dict( - argstr='--transformType %s', - sep=',', - ), - translationScale=dict(argstr='--translationScale %f', ), - useAffine=dict(argstr='--useAffine ', ), - useBSpline=dict(argstr='--useBSpline ', ), + transformType=dict(argstr="--transformType %s", sep=",",), + translationScale=dict(argstr="--translationScale %f",), + useAffine=dict(argstr="--useAffine ",), + useBSpline=dict(argstr="--useBSpline ",), useCachingOfBSplineWeightsMode=dict( - argstr='--useCachingOfBSplineWeightsMode %s', ), + argstr="--useCachingOfBSplineWeightsMode %s", + ), useExplicitPDFDerivativesMode=dict( - argstr='--useExplicitPDFDerivativesMode %s', ), - useRigid=dict(argstr='--useRigid ', ), - useScaleSkewVersor3D=dict(argstr='--useScaleSkewVersor3D ', ), - useScaleVersor3D=dict(argstr='--useScaleVersor3D ', ), - writeOutputTransformInFloat=dict( - argstr='--writeOutputTransformInFloat ', ), - writeTransformOnFailure=dict(argstr='--writeTransformOnFailure ', ), + argstr="--useExplicitPDFDerivativesMode %s", + ), + useRigid=dict(argstr="--useRigid ",), + useScaleSkewVersor3D=dict(argstr="--useScaleSkewVersor3D ",), + useScaleVersor3D=dict(argstr="--useScaleVersor3D ",), + writeOutputTransformInFloat=dict(argstr="--writeOutputTransformInFloat ",), + writeTransformOnFailure=dict(argstr="--writeTransformOnFailure ",), ) inputs = BRAINSFit.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSFit_outputs(): output_map = dict( - bsplineTransform=dict(extensions=None, ), - linearTransform=dict(extensions=None, ), - outputFixedVolumeROI=dict(extensions=None, ), - outputMovingVolumeROI=dict(extensions=None, ), - outputTransform=dict(extensions=None, ), - outputVolume=dict(extensions=None, ), - strippedOutputTransform=dict(extensions=None, ), + bsplineTransform=dict(extensions=None,), + linearTransform=dict(extensions=None,), + outputFixedVolumeROI=dict(extensions=None,), + outputMovingVolumeROI=dict(extensions=None,), + outputTransform=dict(extensions=None,), + outputVolume=dict(extensions=None,), + strippedOutputTransform=dict(extensions=None,), ) outputs = BRAINSFit.output_spec() diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py index 092689442d..2d94f19a5f 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py @@ -4,48 +4,29 @@ def test_BRAINSResample_inputs(): input_map = dict( - args=dict(argstr='%s', ), - defaultValue=dict(argstr='--defaultValue %f', ), - deformationVolume=dict( - argstr='--deformationVolume %s', - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gridSpacing=dict( - argstr='--gridSpacing %s', - sep=',', - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - interpolationMode=dict(argstr='--interpolationMode %s', ), - inverseTransform=dict(argstr='--inverseTransform ', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - pixelType=dict(argstr='--pixelType %s', ), - referenceVolume=dict( - argstr='--referenceVolume %s', - extensions=None, - ), - warpTransform=dict( - argstr='--warpTransform %s', - extensions=None, - ), + args=dict(argstr="%s",), + defaultValue=dict(argstr="--defaultValue %f",), + deformationVolume=dict(argstr="--deformationVolume %s", extensions=None,), + environ=dict(nohash=True, usedefault=True,), + gridSpacing=dict(argstr="--gridSpacing %s", sep=",",), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + interpolationMode=dict(argstr="--interpolationMode %s",), + inverseTransform=dict(argstr="--inverseTransform ",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + pixelType=dict(argstr="--pixelType %s",), + referenceVolume=dict(argstr="--referenceVolume %s", extensions=None,), + warpTransform=dict(argstr="--warpTransform %s", extensions=None,), ) inputs = BRAINSResample.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSResample_outputs(): - output_map = dict(outputVolume=dict(extensions=None, ), ) + output_map = dict(outputVolume=dict(extensions=None,),) outputs = BRAINSResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py index e3e7124535..65c6016db6 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py @@ -4,28 +4,24 @@ def test_FiducialRegistration_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedLandmarks=dict(argstr='--fixedLandmarks %s...', ), - movingLandmarks=dict(argstr='--movingLandmarks %s...', ), - outputMessage=dict(argstr='--outputMessage %s', ), - rms=dict(argstr='--rms %f', ), - saveTransform=dict( - argstr='--saveTransform %s', - hash_files=False, - ), - transformType=dict(argstr='--transformType %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fixedLandmarks=dict(argstr="--fixedLandmarks %s...",), + movingLandmarks=dict(argstr="--movingLandmarks %s...",), + outputMessage=dict(argstr="--outputMessage %s",), + rms=dict(argstr="--rms %f",), + saveTransform=dict(argstr="--saveTransform %s", hash_files=False,), + transformType=dict(argstr="--transformType %s",), ) inputs = FiducialRegistration.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FiducialRegistration_outputs(): - output_map = dict(saveTransform=dict(extensions=None, ), ) + output_map = dict(saveTransform=dict(extensions=None,),) outputs = FiducialRegistration.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py index fb0a3dd88e..098360c1e3 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -4,111 +4,78 @@ def test_VBRAINSDemonWarp_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), arrayOfPyramidLevelIterations=dict( - argstr='--arrayOfPyramidLevelIterations %s', - sep=',', + argstr="--arrayOfPyramidLevelIterations %s", sep=",", ), - backgroundFillValue=dict(argstr='--backgroundFillValue %d', ), + backgroundFillValue=dict(argstr="--backgroundFillValue %d",), checkerboardPatternSubdivisions=dict( - argstr='--checkerboardPatternSubdivisions %s', - sep=',', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedBinaryVolume=dict( - argstr='--fixedBinaryVolume %s', - extensions=None, - ), - fixedVolume=dict(argstr='--fixedVolume %s...', ), - gradient_type=dict(argstr='--gradient_type %s', ), - gui=dict(argstr='--gui ', ), - histogramMatch=dict(argstr='--histogramMatch ', ), + argstr="--checkerboardPatternSubdivisions %s", sep=",", + ), + environ=dict(nohash=True, usedefault=True,), + fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None,), + fixedVolume=dict(argstr="--fixedVolume %s...",), + gradient_type=dict(argstr="--gradient_type %s",), + gui=dict(argstr="--gui ",), + histogramMatch=dict(argstr="--histogramMatch ",), initializeWithDisplacementField=dict( - argstr='--initializeWithDisplacementField %s', - extensions=None, + argstr="--initializeWithDisplacementField %s", extensions=None, ), initializeWithTransform=dict( - argstr='--initializeWithTransform %s', - extensions=None, - ), - inputPixelType=dict(argstr='--inputPixelType %s', ), - interpolationMode=dict(argstr='--interpolationMode %s', ), - lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), - makeBOBF=dict(argstr='--makeBOBF ', ), - max_step_length=dict(argstr='--max_step_length %f', ), - medianFilterSize=dict( - argstr='--medianFilterSize %s', - sep=',', - ), - minimumFixedPyramid=dict( - argstr='--minimumFixedPyramid %s', - sep=',', - ), - minimumMovingPyramid=dict( - argstr='--minimumMovingPyramid %s', - sep=',', - ), - movingBinaryVolume=dict( - argstr='--movingBinaryVolume %s', - extensions=None, - ), - movingVolume=dict(argstr='--movingVolume %s...', ), - neighborhoodForBOBF=dict( - argstr='--neighborhoodForBOBF %s', - sep=',', - ), + argstr="--initializeWithTransform %s", extensions=None, + ), + inputPixelType=dict(argstr="--inputPixelType %s",), + interpolationMode=dict(argstr="--interpolationMode %s",), + lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d",), + makeBOBF=dict(argstr="--makeBOBF ",), + max_step_length=dict(argstr="--max_step_length %f",), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), + minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=",",), + minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=",",), + movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None,), + movingVolume=dict(argstr="--movingVolume %s...",), + neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=",",), numberOfBCHApproximationTerms=dict( - argstr='--numberOfBCHApproximationTerms %d', ), - numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), - numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), - numberOfPyramidLevels=dict(argstr='--numberOfPyramidLevels %d', ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), + argstr="--numberOfBCHApproximationTerms %d", + ), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), + numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d",), + numberOfThreads=dict(argstr="--numberOfThreads %d",), outputCheckerboardVolume=dict( - argstr='--outputCheckerboardVolume %s', - hash_files=False, + argstr="--outputCheckerboardVolume %s", hash_files=False, ), - outputDebug=dict(argstr='--outputDebug ', ), + outputDebug=dict(argstr="--outputDebug ",), outputDisplacementFieldPrefix=dict( - argstr='--outputDisplacementFieldPrefix %s', ), - outputDisplacementFieldVolume=dict( - argstr='--outputDisplacementFieldVolume %s', - hash_files=False, - ), - outputNormalized=dict(argstr='--outputNormalized ', ), - outputPixelType=dict(argstr='--outputPixelType %s', ), - outputVolume=dict( - argstr='--outputVolume %s', - hash_files=False, - ), - promptUser=dict(argstr='--promptUser ', ), - registrationFilterType=dict(argstr='--registrationFilterType %s', ), - seedForBOBF=dict( - argstr='--seedForBOBF %s', - sep=',', - ), - smoothDisplacementFieldSigma=dict( - argstr='--smoothDisplacementFieldSigma %f', ), - upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), - upperThresholdForBOBF=dict(argstr='--upperThresholdForBOBF %d', ), - use_vanilla_dem=dict(argstr='--use_vanilla_dem ', ), - weightFactors=dict( - argstr='--weightFactors %s', - sep=',', + argstr="--outputDisplacementFieldPrefix %s", ), + outputDisplacementFieldVolume=dict( + argstr="--outputDisplacementFieldVolume %s", hash_files=False, + ), + outputNormalized=dict(argstr="--outputNormalized ",), + outputPixelType=dict(argstr="--outputPixelType %s",), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + promptUser=dict(argstr="--promptUser ",), + registrationFilterType=dict(argstr="--registrationFilterType %s",), + seedForBOBF=dict(argstr="--seedForBOBF %s", sep=",",), + smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f",), + upFieldSmoothing=dict(argstr="--upFieldSmoothing %f",), + upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d",), + use_vanilla_dem=dict(argstr="--use_vanilla_dem ",), + weightFactors=dict(argstr="--weightFactors %s", sep=",",), ) inputs = VBRAINSDemonWarp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_VBRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(extensions=None, ), - outputDisplacementFieldVolume=dict(extensions=None, ), - outputVolume=dict(extensions=None, ), + outputCheckerboardVolume=dict(extensions=None,), + outputDisplacementFieldVolume=dict(extensions=None,), + outputVolume=dict(extensions=None,), ) outputs = VBRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/slicer/segmentation/__init__.py b/nipype/interfaces/slicer/segmentation/__init__.py index 5b3cf6d468..48fdc62f8c 100644 --- a/nipype/interfaces/slicer/segmentation/__init__.py +++ b/nipype/interfaces/slicer/segmentation/__init__.py @@ -1,4 +1,3 @@ # -*- coding: utf-8 -*- -from .specialized import (RobustStatisticsSegmenter, EMSegmentCommandLine, - BRAINSROIAuto) +from .specialized import RobustStatisticsSegmenter, EMSegmentCommandLine, BRAINSROIAuto from .simpleregiongrowingsegmentation import SimpleRegionGrowingSegmentation diff --git a/nipype/interfaces/slicer/segmentation/simpleregiongrowingsegmentation.py b/nipype/interfaces/slicer/segmentation/simpleregiongrowingsegmentation.py index d466ccc1ac..a32a8dde5c 100644 --- a/nipype/interfaces/slicer/segmentation/simpleregiongrowingsegmentation.py +++ b/nipype/interfaces/slicer/segmentation/simpleregiongrowingsegmentation.py @@ -3,46 +3,57 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class SimpleRegionGrowingSegmentationInputSpec(CommandLineInputSpec): smoothingIterations = traits.Int( - desc="Number of smoothing iterations", - argstr="--smoothingIterations %d") - timestep = traits.Float( - desc="Timestep for curvature flow", argstr="--timestep %f") + desc="Number of smoothing iterations", argstr="--smoothingIterations %d" + ) + timestep = traits.Float(desc="Timestep for curvature flow", argstr="--timestep %f") iterations = traits.Int( - desc="Number of iterations of region growing", - argstr="--iterations %d") + desc="Number of iterations of region growing", argstr="--iterations %d" + ) multiplier = traits.Float( desc="Number of standard deviations to include in intensity model", - argstr="--multiplier %f") + argstr="--multiplier %f", + ) neighborhood = traits.Int( - desc= - "The radius of the neighborhood over which to calculate intensity model", - argstr="--neighborhood %d") + desc="The radius of the neighborhood over which to calculate intensity model", + argstr="--neighborhood %d", + ) labelvalue = traits.Int( - desc= - "The integer value (0-255) to use for the segmentation results. This will determine the color of the segmentation that will be generated by the Region growing algorithm", - argstr="--labelvalue %d") + desc="The integer value (0-255) to use for the segmentation results. This will determine the color of the segmentation that will be generated by the Region growing algorithm", + argstr="--labelvalue %d", + ) seed = InputMultiPath( traits.List(traits.Float(), minlen=3, maxlen=3), desc="Seed point(s) for region growing", - argstr="--seed %s...") + argstr="--seed %s...", + ) inputVolume = File( - position=-2, - desc="Input volume to be filtered", - exists=True, - argstr="%s") + position=-2, desc="Input volume to be filtered", exists=True, argstr="%s" + ) outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", - argstr="%s") + argstr="%s", + ) class SimpleRegionGrowingSegmentationOutputSpec(TraitedSpec): @@ -69,4 +80,4 @@ class SimpleRegionGrowingSegmentation(SEMLikeCommandLine): input_spec = SimpleRegionGrowingSegmentationInputSpec output_spec = SimpleRegionGrowingSegmentationOutputSpec _cmd = "SimpleRegionGrowingSegmentation " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} diff --git a/nipype/interfaces/slicer/segmentation/specialized.py b/nipype/interfaces/slicer/segmentation/specialized.py index fdfeb74e37..0ae90d0334 100644 --- a/nipype/interfaces/slicer/segmentation/specialized.py +++ b/nipype/interfaces/slicer/segmentation/specialized.py @@ -3,49 +3,59 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class RobustStatisticsSegmenterInputSpec(CommandLineInputSpec): expectedVolume = traits.Float( desc="The approximate volume of the object, in mL.", - argstr="--expectedVolume %f") + argstr="--expectedVolume %f", + ) intensityHomogeneity = traits.Float( - desc= - "What is the homogeneity of intensity within the object? Given constant intensity at 1.0 score and extreme fluctuating intensity at 0.", - argstr="--intensityHomogeneity %f") + desc="What is the homogeneity of intensity within the object? Given constant intensity at 1.0 score and extreme fluctuating intensity at 0.", + argstr="--intensityHomogeneity %f", + ) curvatureWeight = traits.Float( - desc= - "Given sphere 1.0 score and extreme rough bounday/surface 0 score, what is the expected smoothness of the object?", - argstr="--curvatureWeight %f") + desc="Given sphere 1.0 score and extreme rough bounday/surface 0 score, what is the expected smoothness of the object?", + argstr="--curvatureWeight %f", + ) labelValue = traits.Int( - desc="Label value of the output image", argstr="--labelValue %d") + desc="Label value of the output image", argstr="--labelValue %d" + ) maxRunningTime = traits.Float( desc="The program will stop if this time is reached.", - argstr="--maxRunningTime %f") + argstr="--maxRunningTime %f", + ) originalImageFileName = File( - position=-3, - desc="Original image to be segmented", - exists=True, - argstr="%s") + position=-3, desc="Original image to be segmented", exists=True, argstr="%s" + ) labelImageFileName = File( - position=-2, - desc="Label image for initialization", - exists=True, - argstr="%s") + position=-2, desc="Label image for initialization", exists=True, argstr="%s" + ) segmentedImageFileName = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Segmented image", - argstr="%s") + argstr="%s", + ) class RobustStatisticsSegmenterOutputSpec(TraitedSpec): - segmentedImageFileName = File( - position=-1, desc="Segmented image", exists=True) + segmentedImageFileName = File(position=-1, desc="Segmented image", exists=True) class RobustStatisticsSegmenter(SEMLikeCommandLine): @@ -68,116 +78,118 @@ class RobustStatisticsSegmenter(SEMLikeCommandLine): input_spec = RobustStatisticsSegmenterInputSpec output_spec = RobustStatisticsSegmenterOutputSpec _cmd = "RobustStatisticsSegmenter " - _outputs_filenames = { - 'segmentedImageFileName': 'segmentedImageFileName.nii' - } + _outputs_filenames = {"segmentedImageFileName": "segmentedImageFileName.nii"} class EMSegmentCommandLineInputSpec(CommandLineInputSpec): mrmlSceneFileName = File( desc="Active MRML scene that contains EMSegment algorithm parameters.", exists=True, - argstr="--mrmlSceneFileName %s") + argstr="--mrmlSceneFileName %s", + ) resultVolumeFileName = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "The file name that the segmentation result volume will be written to.", - argstr="--resultVolumeFileName %s") + desc="The file name that the segmentation result volume will be written to.", + argstr="--resultVolumeFileName %s", + ) targetVolumeFileNames = InputMultiPath( File(exists=True), - desc= - "File names of target volumes (to be segmented). The number of target images must be equal to the number of target images specified in the parameter set, and these images must be spatially aligned.", - argstr="--targetVolumeFileNames %s...") + desc="File names of target volumes (to be segmented). The number of target images must be equal to the number of target images specified in the parameter set, and these images must be spatially aligned.", + argstr="--targetVolumeFileNames %s...", + ) intermediateResultsDirectory = Directory( - desc= - "Directory where EMSegmenter will write intermediate data (e.g., aligned atlas data).", + desc="Directory where EMSegmenter will write intermediate data (e.g., aligned atlas data).", exists=True, - argstr="--intermediateResultsDirectory %s") + argstr="--intermediateResultsDirectory %s", + ) parametersMRMLNodeName = traits.Str( - desc= - "The name of the EMSegment parameters node within the active MRML scene. Leave blank for default.", - argstr="--parametersMRMLNodeName %s") + desc="The name of the EMSegment parameters node within the active MRML scene. Leave blank for default.", + argstr="--parametersMRMLNodeName %s", + ) disableMultithreading = traits.Int( - desc= - "Disable multithreading for the EMSegmenter algorithm only! Preprocessing might still run in multi-threaded mode. -1: Do not overwrite default value. 0: Disable. 1: Enable.", - argstr="--disableMultithreading %d") + desc="Disable multithreading for the EMSegmenter algorithm only! Preprocessing might still run in multi-threaded mode. -1: Do not overwrite default value. 0: Disable. 1: Enable.", + argstr="--disableMultithreading %d", + ) dontUpdateIntermediateData = traits.Int( - desc= - "Disable update of intermediate results. -1: Do not overwrite default value. 0: Disable. 1: Enable.", - argstr="--dontUpdateIntermediateData %d") + desc="Disable update of intermediate results. -1: Do not overwrite default value. 0: Disable. 1: Enable.", + argstr="--dontUpdateIntermediateData %d", + ) verbose = traits.Bool(desc="Enable verbose output.", argstr="--verbose ") loadTargetCentered = traits.Bool( - desc="Read target files centered.", argstr="--loadTargetCentered ") + desc="Read target files centered.", argstr="--loadTargetCentered " + ) loadAtlasNonCentered = traits.Bool( - desc="Read atlas files non-centered.", - argstr="--loadAtlasNonCentered ") + desc="Read atlas files non-centered.", argstr="--loadAtlasNonCentered " + ) taskPreProcessingSetting = traits.Str( desc="Specifies the different task parameter. Leave blank for default.", - argstr="--taskPreProcessingSetting %s") + argstr="--taskPreProcessingSetting %s", + ) keepTempFiles = traits.Bool( - desc= - "If flag is set then at the end of command the temporary files are not removed", - argstr="--keepTempFiles ") + desc="If flag is set then at the end of command the temporary files are not removed", + argstr="--keepTempFiles ", + ) resultStandardVolumeFileName = File( - desc= - "Used for testing. Compare segmentation results to this image and return EXIT_FAILURE if they do not match.", + desc="Used for testing. Compare segmentation results to this image and return EXIT_FAILURE if they do not match.", exists=True, - argstr="--resultStandardVolumeFileName %s") + argstr="--resultStandardVolumeFileName %s", + ) dontWriteResults = traits.Bool( - desc= - "Used for testing. Don't actually write the resulting labelmap to disk.", - argstr="--dontWriteResults ") + desc="Used for testing. Don't actually write the resulting labelmap to disk.", + argstr="--dontWriteResults ", + ) generateEmptyMRMLSceneAndQuit = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Used for testing. Only write a scene with default mrml parameters.", - argstr="--generateEmptyMRMLSceneAndQuit %s") + desc="Used for testing. Only write a scene with default mrml parameters.", + argstr="--generateEmptyMRMLSceneAndQuit %s", + ) resultMRMLSceneFileName = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Write out the MRML scene after command line substitutions have been made.", - argstr="--resultMRMLSceneFileName %s") + desc="Write out the MRML scene after command line substitutions have been made.", + argstr="--resultMRMLSceneFileName %s", + ) disableCompression = traits.Bool( desc="Don't use compression when writing result image to disk.", - argstr="--disableCompression ") + argstr="--disableCompression ", + ) atlasVolumeFileNames = InputMultiPath( File(exists=True), - desc= - "Use an alternative atlas to the one that is specified by the mrml file - note the order matters ! ", - argstr="--atlasVolumeFileNames %s...") + desc="Use an alternative atlas to the one that is specified by the mrml file - note the order matters ! ", + argstr="--atlasVolumeFileNames %s...", + ) registrationPackage = traits.Str( - desc= - "specify the registration package for preprocessing (CMTK or BRAINS or PLASTIMATCH or DEMONS)", - argstr="--registrationPackage %s") + desc="specify the registration package for preprocessing (CMTK or BRAINS or PLASTIMATCH or DEMONS)", + argstr="--registrationPackage %s", + ) registrationAffineType = traits.Int( - desc= - "specify the accuracy of the affine registration. -2: Do not overwrite default, -1: Test, 0: Disable, 1: Fast, 2: Accurate", - argstr="--registrationAffineType %d") + desc="specify the accuracy of the affine registration. -2: Do not overwrite default, -1: Test, 0: Disable, 1: Fast, 2: Accurate", + argstr="--registrationAffineType %d", + ) registrationDeformableType = traits.Int( - desc= - "specify the accuracy of the deformable registration. -2: Do not overwrite default, -1: Test, 0: Disable, 1: Fast, 2: Accurate", - argstr="--registrationDeformableType %d") + desc="specify the accuracy of the deformable registration. -2: Do not overwrite default, -1: Test, 0: Disable, 1: Fast, 2: Accurate", + argstr="--registrationDeformableType %d", + ) class EMSegmentCommandLineOutputSpec(TraitedSpec): resultVolumeFileName = File( - desc= - "The file name that the segmentation result volume will be written to.", - exists=True) + desc="The file name that the segmentation result volume will be written to.", + exists=True, + ) generateEmptyMRMLSceneAndQuit = File( - desc= - "Used for testing. Only write a scene with default mrml parameters.", - exists=True) + desc="Used for testing. Only write a scene with default mrml parameters.", + exists=True, + ) resultMRMLSceneFileName = File( - desc= - "Write out the MRML scene after command line substitutions have been made.", - exists=True) + desc="Write out the MRML scene after command line substitutions have been made.", + exists=True, + ) class EMSegmentCommandLine(SEMLikeCommandLine): @@ -206,9 +218,9 @@ class EMSegmentCommandLine(SEMLikeCommandLine): output_spec = EMSegmentCommandLineOutputSpec _cmd = "EMSegmentCommandLine " _outputs_filenames = { - 'generateEmptyMRMLSceneAndQuit': 'generateEmptyMRMLSceneAndQuit', - 'resultMRMLSceneFileName': 'resultMRMLSceneFileName', - 'resultVolumeFileName': 'resultVolumeFileName.mhd' + "generateEmptyMRMLSceneAndQuit": "generateEmptyMRMLSceneAndQuit", + "resultMRMLSceneFileName": "resultMRMLSceneFileName", + "resultVolumeFileName": "resultVolumeFileName.mhd", } @@ -216,34 +228,38 @@ class BRAINSROIAutoInputSpec(CommandLineInputSpec): inputVolume = File( desc="The input image for finding the largest region filled mask.", exists=True, - argstr="--inputVolume %s") + argstr="--inputVolume %s", + ) outputROIMaskVolume = traits.Either( traits.Bool, File(), hash_files=False, desc="The ROI automatically found from the input image.", - argstr="--outputROIMaskVolume %s") + argstr="--outputROIMaskVolume %s", + ) outputClippedVolumeROI = traits.Either( traits.Bool, File(), hash_files=False, desc="The inputVolume clipped to the region of the brain mask.", - argstr="--outputClippedVolumeROI %s") + argstr="--outputClippedVolumeROI %s", + ) otsuPercentileThreshold = traits.Float( desc="Parameter to the Otsu threshold algorithm.", - argstr="--otsuPercentileThreshold %f") + argstr="--otsuPercentileThreshold %f", + ) thresholdCorrectionFactor = traits.Float( - desc= - "A factor to scale the Otsu algorithm's result threshold, in case clipping mangles the image.", - argstr="--thresholdCorrectionFactor %f") + desc="A factor to scale the Otsu algorithm's result threshold, in case clipping mangles the image.", + argstr="--thresholdCorrectionFactor %f", + ) closingSize = traits.Float( - desc= - "The Closing Size (in millimeters) for largest connected filled mask. This value is divided by image spacing and rounded to the next largest voxel number.", - argstr="--closingSize %f") + desc="The Closing Size (in millimeters) for largest connected filled mask. This value is divided by image spacing and rounded to the next largest voxel number.", + argstr="--closingSize %f", + ) ROIAutoDilateSize = traits.Float( - desc= - "This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", - argstr="--ROIAutoDilateSize %f") + desc="This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", + argstr="--ROIAutoDilateSize %f", + ) outputVolumePixelType = traits.Enum( "float", "short", @@ -251,20 +267,22 @@ class BRAINSROIAutoInputSpec(CommandLineInputSpec): "int", "uint", "uchar", - desc= - "The output image Pixel Type is the scalar datatype for representation of the Output Volume.", - argstr="--outputVolumePixelType %s") + desc="The output image Pixel Type is the scalar datatype for representation of the Output Volume.", + argstr="--outputVolumePixelType %s", + ) numberOfThreads = traits.Int( desc="Explicitly specify the maximum number of threads to use.", - argstr="--numberOfThreads %d") + argstr="--numberOfThreads %d", + ) class BRAINSROIAutoOutputSpec(TraitedSpec): outputROIMaskVolume = File( - desc="The ROI automatically found from the input image.", exists=True) + desc="The ROI automatically found from the input image.", exists=True + ) outputClippedVolumeROI = File( - desc="The inputVolume clipped to the region of the brain mask.", - exists=True) + desc="The inputVolume clipped to the region of the brain mask.", exists=True + ) class BRAINSROIAuto(SEMLikeCommandLine): @@ -289,6 +307,6 @@ class BRAINSROIAuto(SEMLikeCommandLine): output_spec = BRAINSROIAutoOutputSpec _cmd = "BRAINSROIAuto " _outputs_filenames = { - 'outputROIMaskVolume': 'outputROIMaskVolume.nii', - 'outputClippedVolumeROI': 'outputClippedVolumeROI.nii' + "outputROIMaskVolume": "outputROIMaskVolume.nii", + "outputClippedVolumeROI": "outputClippedVolumeROI.nii", } diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py index 57959c7102..c88798ec2d 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -4,40 +4,31 @@ def test_BRAINSROIAuto_inputs(): input_map = dict( - ROIAutoDilateSize=dict(argstr='--ROIAutoDilateSize %f', ), - args=dict(argstr='%s', ), - closingSize=dict(argstr='--closingSize %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='--inputVolume %s', - extensions=None, - ), - numberOfThreads=dict(argstr='--numberOfThreads %d', ), - otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), + ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f",), + args=dict(argstr="%s",), + closingSize=dict(argstr="--closingSize %f",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="--inputVolume %s", extensions=None,), + numberOfThreads=dict(argstr="--numberOfThreads %d",), + otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f",), outputClippedVolumeROI=dict( - argstr='--outputClippedVolumeROI %s', - hash_files=False, - ), - outputROIMaskVolume=dict( - argstr='--outputROIMaskVolume %s', - hash_files=False, + argstr="--outputClippedVolumeROI %s", hash_files=False, ), - outputVolumePixelType=dict(argstr='--outputVolumePixelType %s', ), - thresholdCorrectionFactor=dict( - argstr='--thresholdCorrectionFactor %f', ), + outputROIMaskVolume=dict(argstr="--outputROIMaskVolume %s", hash_files=False,), + outputVolumePixelType=dict(argstr="--outputVolumePixelType %s",), + thresholdCorrectionFactor=dict(argstr="--thresholdCorrectionFactor %f",), ) inputs = BRAINSROIAuto.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BRAINSROIAuto_outputs(): output_map = dict( - outputClippedVolumeROI=dict(extensions=None, ), - outputROIMaskVolume=dict(extensions=None, ), + outputClippedVolumeROI=dict(extensions=None,), + outputROIMaskVolume=dict(extensions=None,), ) outputs = BRAINSROIAuto.output_spec() diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py index 30a0ab2ade..241f58c6c0 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py @@ -4,62 +4,50 @@ def test_EMSegmentCommandLine_inputs(): input_map = dict( - args=dict(argstr='%s', ), - atlasVolumeFileNames=dict(argstr='--atlasVolumeFileNames %s...', ), - disableCompression=dict(argstr='--disableCompression ', ), - disableMultithreading=dict(argstr='--disableMultithreading %d', ), - dontUpdateIntermediateData=dict( - argstr='--dontUpdateIntermediateData %d', ), - dontWriteResults=dict(argstr='--dontWriteResults ', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + atlasVolumeFileNames=dict(argstr="--atlasVolumeFileNames %s...",), + disableCompression=dict(argstr="--disableCompression ",), + disableMultithreading=dict(argstr="--disableMultithreading %d",), + dontUpdateIntermediateData=dict(argstr="--dontUpdateIntermediateData %d",), + dontWriteResults=dict(argstr="--dontWriteResults ",), + environ=dict(nohash=True, usedefault=True,), generateEmptyMRMLSceneAndQuit=dict( - argstr='--generateEmptyMRMLSceneAndQuit %s', - hash_files=False, - ), - intermediateResultsDirectory=dict( - argstr='--intermediateResultsDirectory %s', ), - keepTempFiles=dict(argstr='--keepTempFiles ', ), - loadAtlasNonCentered=dict(argstr='--loadAtlasNonCentered ', ), - loadTargetCentered=dict(argstr='--loadTargetCentered ', ), - mrmlSceneFileName=dict( - argstr='--mrmlSceneFileName %s', - extensions=None, + argstr="--generateEmptyMRMLSceneAndQuit %s", hash_files=False, ), - parametersMRMLNodeName=dict(argstr='--parametersMRMLNodeName %s', ), - registrationAffineType=dict(argstr='--registrationAffineType %d', ), - registrationDeformableType=dict( - argstr='--registrationDeformableType %d', ), - registrationPackage=dict(argstr='--registrationPackage %s', ), + intermediateResultsDirectory=dict(argstr="--intermediateResultsDirectory %s",), + keepTempFiles=dict(argstr="--keepTempFiles ",), + loadAtlasNonCentered=dict(argstr="--loadAtlasNonCentered ",), + loadTargetCentered=dict(argstr="--loadTargetCentered ",), + mrmlSceneFileName=dict(argstr="--mrmlSceneFileName %s", extensions=None,), + parametersMRMLNodeName=dict(argstr="--parametersMRMLNodeName %s",), + registrationAffineType=dict(argstr="--registrationAffineType %d",), + registrationDeformableType=dict(argstr="--registrationDeformableType %d",), + registrationPackage=dict(argstr="--registrationPackage %s",), resultMRMLSceneFileName=dict( - argstr='--resultMRMLSceneFileName %s', - hash_files=False, + argstr="--resultMRMLSceneFileName %s", hash_files=False, ), resultStandardVolumeFileName=dict( - argstr='--resultStandardVolumeFileName %s', - extensions=None, + argstr="--resultStandardVolumeFileName %s", extensions=None, ), resultVolumeFileName=dict( - argstr='--resultVolumeFileName %s', - hash_files=False, + argstr="--resultVolumeFileName %s", hash_files=False, ), - targetVolumeFileNames=dict(argstr='--targetVolumeFileNames %s...', ), - taskPreProcessingSetting=dict( - argstr='--taskPreProcessingSetting %s', ), - verbose=dict(argstr='--verbose ', ), + targetVolumeFileNames=dict(argstr="--targetVolumeFileNames %s...",), + taskPreProcessingSetting=dict(argstr="--taskPreProcessingSetting %s",), + verbose=dict(argstr="--verbose ",), ) inputs = EMSegmentCommandLine.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EMSegmentCommandLine_outputs(): output_map = dict( - generateEmptyMRMLSceneAndQuit=dict(extensions=None, ), - resultMRMLSceneFileName=dict(extensions=None, ), - resultVolumeFileName=dict(extensions=None, ), + generateEmptyMRMLSceneAndQuit=dict(extensions=None,), + resultMRMLSceneFileName=dict(extensions=None,), + resultVolumeFileName=dict(extensions=None,), ) outputs = EMSegmentCommandLine.output_spec() diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py index 4d9bbee882..deed609acb 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py @@ -4,42 +4,26 @@ def test_RobustStatisticsSegmenter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - curvatureWeight=dict(argstr='--curvatureWeight %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - expectedVolume=dict(argstr='--expectedVolume %f', ), - intensityHomogeneity=dict(argstr='--intensityHomogeneity %f', ), - labelImageFileName=dict( - argstr='%s', - extensions=None, - position=-2, - ), - labelValue=dict(argstr='--labelValue %d', ), - maxRunningTime=dict(argstr='--maxRunningTime %f', ), - originalImageFileName=dict( - argstr='%s', - extensions=None, - position=-3, - ), - segmentedImageFileName=dict( - argstr='%s', - hash_files=False, - position=-1, - ), + args=dict(argstr="%s",), + curvatureWeight=dict(argstr="--curvatureWeight %f",), + environ=dict(nohash=True, usedefault=True,), + expectedVolume=dict(argstr="--expectedVolume %f",), + intensityHomogeneity=dict(argstr="--intensityHomogeneity %f",), + labelImageFileName=dict(argstr="%s", extensions=None, position=-2,), + labelValue=dict(argstr="--labelValue %d",), + maxRunningTime=dict(argstr="--maxRunningTime %f",), + originalImageFileName=dict(argstr="%s", extensions=None, position=-3,), + segmentedImageFileName=dict(argstr="%s", hash_files=False, position=-1,), ) inputs = RobustStatisticsSegmenter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RobustStatisticsSegmenter_outputs(): - output_map = dict(segmentedImageFileName=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(segmentedImageFileName=dict(extensions=None, position=-1,),) outputs = RobustStatisticsSegmenter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py index 2d3fe30de5..9da82507cc 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py @@ -4,39 +4,27 @@ def test_SimpleRegionGrowingSegmentation_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - iterations=dict(argstr='--iterations %d', ), - labelvalue=dict(argstr='--labelvalue %d', ), - multiplier=dict(argstr='--multiplier %f', ), - neighborhood=dict(argstr='--neighborhood %d', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - seed=dict(argstr='--seed %s...', ), - smoothingIterations=dict(argstr='--smoothingIterations %d', ), - timestep=dict(argstr='--timestep %f', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + iterations=dict(argstr="--iterations %d",), + labelvalue=dict(argstr="--labelvalue %d",), + multiplier=dict(argstr="--multiplier %f",), + neighborhood=dict(argstr="--neighborhood %d",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + seed=dict(argstr="--seed %s...",), + smoothingIterations=dict(argstr="--smoothingIterations %d",), + timestep=dict(argstr="--timestep %f",), ) inputs = SimpleRegionGrowingSegmentation.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SimpleRegionGrowingSegmentation_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = SimpleRegionGrowingSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/surface.py b/nipype/interfaces/slicer/surface.py index 6a1dfe2cc0..970d067157 100644 --- a/nipype/interfaces/slicer/surface.py +++ b/nipype/interfaces/slicer/surface.py @@ -3,7 +3,18 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os @@ -11,12 +22,8 @@ class MergeModelsInputSpec(CommandLineInputSpec): Model1 = File(position=-3, desc="Model", exists=True, argstr="%s") Model2 = File(position=-2, desc="Model", exists=True, argstr="%s") ModelOutput = traits.Either( - traits.Bool, - File(), - position=-1, - hash_files=False, - desc="Model", - argstr="%s") + traits.Bool, File(), position=-1, hash_files=False, desc="Model", argstr="%s" + ) class MergeModelsOutputSpec(TraitedSpec): @@ -43,13 +50,12 @@ class MergeModels(SEMLikeCommandLine): input_spec = MergeModelsInputSpec output_spec = MergeModelsOutputSpec _cmd = "MergeModels " - _outputs_filenames = {'ModelOutput': 'ModelOutput.vtk'} + _outputs_filenames = {"ModelOutput": "ModelOutput.vtk"} class ModelToLabelMapInputSpec(CommandLineInputSpec): distance = traits.Float(desc="Sample distance", argstr="--distance %f") - InputVolume = File( - position=-3, desc="Input volume", exists=True, argstr="%s") + InputVolume = File(position=-3, desc="Input volume", exists=True, argstr="%s") surface = File(position=-2, desc="Model", exists=True, argstr="%s") OutputVolume = traits.Either( traits.Bool, @@ -57,7 +63,8 @@ class ModelToLabelMapInputSpec(CommandLineInputSpec): position=-1, hash_files=False, desc="The label volume", - argstr="%s") + argstr="%s", + ) class ModelToLabelMapOutputSpec(TraitedSpec): @@ -84,7 +91,7 @@ class ModelToLabelMap(SEMLikeCommandLine): input_spec = ModelToLabelMapInputSpec output_spec = ModelToLabelMapOutputSpec _cmd = "ModelToLabelMap " - _outputs_filenames = {'OutputVolume': 'OutputVolume.nii'} + _outputs_filenames = {"OutputVolume": "OutputVolume.nii"} class GrayscaleModelMakerInputSpec(CommandLineInputSpec): @@ -92,39 +99,43 @@ class GrayscaleModelMakerInputSpec(CommandLineInputSpec): position=-2, desc="Volume containing the input grayscale data.", exists=True, - argstr="%s") + argstr="%s", + ) OutputGeometry = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output that contains geometry model.", - argstr="%s") + argstr="%s", + ) threshold = traits.Float( - desc= - "Grayscale threshold of isosurface. The resulting surface of triangles separates the volume into voxels that lie above (inside) and below (outside) the threshold.", - argstr="--threshold %f") + desc="Grayscale threshold of isosurface. The resulting surface of triangles separates the volume into voxels that lie above (inside) and below (outside) the threshold.", + argstr="--threshold %f", + ) name = traits.Str(desc="Name to use for this model.", argstr="--name %s") smooth = traits.Int( desc="Number of smoothing iterations. If 0, no smoothing will be done.", - argstr="--smooth %d") + argstr="--smooth %d", + ) decimate = traits.Float( - desc= - "Target reduction during decimation, as a decimal percentage reduction in the number of polygons. If 0, no decimation will be done.", - argstr="--decimate %f") + desc="Target reduction during decimation, as a decimal percentage reduction in the number of polygons. If 0, no decimation will be done.", + argstr="--decimate %f", + ) splitnormals = traits.Bool( - desc= - "Splitting normals is useful for visualizing sharp features. However it creates holes in surfaces which affect measurements", - argstr="--splitnormals ") + desc="Splitting normals is useful for visualizing sharp features. However it creates holes in surfaces which affect measurements", + argstr="--splitnormals ", + ) pointnormals = traits.Bool( - desc= - "Calculate the point normals? Calculated point normals make the surface appear smooth. Without point normals, the surface will appear faceted.", - argstr="--pointnormals ") + desc="Calculate the point normals? Calculated point normals make the surface appear smooth. Without point normals, the surface will appear faceted.", + argstr="--pointnormals ", + ) class GrayscaleModelMakerOutputSpec(TraitedSpec): OutputGeometry = File( - position=-1, desc="Output that contains geometry model.", exists=True) + position=-1, desc="Output that contains geometry model.", exists=True + ) class GrayscaleModelMaker(SEMLikeCommandLine): @@ -149,24 +160,22 @@ class GrayscaleModelMaker(SEMLikeCommandLine): input_spec = GrayscaleModelMakerInputSpec output_spec = GrayscaleModelMakerOutputSpec _cmd = "GrayscaleModelMaker " - _outputs_filenames = {'OutputGeometry': 'OutputGeometry.vtk'} + _outputs_filenames = {"OutputGeometry": "OutputGeometry.vtk"} class ProbeVolumeWithModelInputSpec(CommandLineInputSpec): InputVolume = File( - position=-3, - desc="Volume to use to 'paint' the model", - exists=True, - argstr="%s") - InputModel = File( - position=-2, desc="Input model", exists=True, argstr="%s") + position=-3, desc="Volume to use to 'paint' the model", exists=True, argstr="%s" + ) + InputModel = File(position=-2, desc="Input model", exists=True, argstr="%s") OutputModel = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Output 'painted' model", - argstr="%s") + argstr="%s", + ) class ProbeVolumeWithModelOutputSpec(TraitedSpec): @@ -193,34 +202,34 @@ class ProbeVolumeWithModel(SEMLikeCommandLine): input_spec = ProbeVolumeWithModelInputSpec output_spec = ProbeVolumeWithModelOutputSpec _cmd = "ProbeVolumeWithModel " - _outputs_filenames = {'OutputModel': 'OutputModel.vtk'} + _outputs_filenames = {"OutputModel": "OutputModel.vtk"} class LabelMapSmoothingInputSpec(CommandLineInputSpec): labelToSmooth = traits.Int( - desc= - "The label to smooth. All others will be ignored. If no label is selected by the user, the maximum label in the image is chosen by default.", - argstr="--labelToSmooth %d") + desc="The label to smooth. All others will be ignored. If no label is selected by the user, the maximum label in the image is chosen by default.", + argstr="--labelToSmooth %d", + ) numberOfIterations = traits.Int( desc="The number of iterations of the level set AntiAliasing algorithm", - argstr="--numberOfIterations %d") - maxRMSError = traits.Float( - desc="The maximum RMS error.", argstr="--maxRMSError %f") + argstr="--numberOfIterations %d", + ) + maxRMSError = traits.Float(desc="The maximum RMS error.", argstr="--maxRMSError %f") gaussianSigma = traits.Float( desc="The standard deviation of the Gaussian kernel", - argstr="--gaussianSigma %f") + argstr="--gaussianSigma %f", + ) inputVolume = File( - position=-2, - desc="Input label map to smooth", - exists=True, - argstr="%s") + position=-2, desc="Input label map to smooth", exists=True, argstr="%s" + ) outputVolume = traits.Either( traits.Bool, File(), position=-1, hash_files=False, desc="Smoothed label map", - argstr="%s") + argstr="%s", + ) class LabelMapSmoothingOutputSpec(TraitedSpec): @@ -247,98 +256,98 @@ class LabelMapSmoothing(SEMLikeCommandLine): input_spec = LabelMapSmoothingInputSpec output_spec = LabelMapSmoothingOutputSpec _cmd = "LabelMapSmoothing " - _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _outputs_filenames = {"outputVolume": "outputVolume.nii"} class ModelMakerInputSpec(CommandLineInputSpec): InputVolume = File( position=-1, - desc= - "Input label map. The Input Volume drop down menu is populated with the label map volumes that are present in the scene, select one from which to generate models.", + desc="Input label map. The Input Volume drop down menu is populated with the label map volumes that are present in the scene, select one from which to generate models.", exists=True, - argstr="%s") + argstr="%s", + ) color = File( desc="Color table to make labels to colors and objects", exists=True, - argstr="--color %s") + argstr="--color %s", + ) modelSceneFile = traits.Either( traits.Bool, - InputMultiPath(File(), ), + InputMultiPath(File(),), hash_files=False, - desc= - "Generated models, under a model hierarchy node. Models are imported into Slicer under a model hierarchy node, and their colors are set by the color table associated with the input label map volume. The model hierarchy node must be created before running the model maker, by selecting Create New ModelHierarchy from the Models drop down menu. If you're running from the command line, a model hierarchy node in a new mrml scene will be created for you.", - argstr="--modelSceneFile %s...") + desc="Generated models, under a model hierarchy node. Models are imported into Slicer under a model hierarchy node, and their colors are set by the color table associated with the input label map volume. The model hierarchy node must be created before running the model maker, by selecting Create New ModelHierarchy from the Models drop down menu. If you're running from the command line, a model hierarchy node in a new mrml scene will be created for you.", + argstr="--modelSceneFile %s...", + ) name = traits.Str( - desc= - "Name to use for this model. Any text entered in the entry box will be the starting string for the created model file names. The label number and the color name will also be part of the file name. If making multiple models, use this as a prefix to the label and color name.", - argstr="--name %s") + desc="Name to use for this model. Any text entered in the entry box will be the starting string for the created model file names. The label number and the color name will also be part of the file name. If making multiple models, use this as a prefix to the label and color name.", + argstr="--name %s", + ) generateAll = traits.Bool( - desc= - "Generate models for all labels in the input volume. select this option if you want to create all models that correspond to all values in a labelmap volume (using the Joint Smoothing option below is useful with this option). Ignores Labels, Start Label, End Label settings. Skips label 0.", - argstr="--generateAll ") + desc="Generate models for all labels in the input volume. select this option if you want to create all models that correspond to all values in a labelmap volume (using the Joint Smoothing option below is useful with this option). Ignores Labels, Start Label, End Label settings. Skips label 0.", + argstr="--generateAll ", + ) labels = InputMultiPath( traits.Int, - desc= - "A comma separated list of label values from which to make models. f you specify a list of Labels, it will override any start/end label settings. If you click Generate All Models it will override the list of labels and any start/end label settings.", + desc="A comma separated list of label values from which to make models. f you specify a list of Labels, it will override any start/end label settings. If you click Generate All Models it will override the list of labels and any start/end label settings.", sep=",", - argstr="--labels %s") + argstr="--labels %s", + ) start = traits.Int( - desc= - "If you want to specify a continuous range of labels from which to generate models, enter the lower label here. Voxel value from which to start making models. Used instead of the label list to specify a range (make sure the label list is empty or it will over ride this).", - argstr="--start %d") + desc="If you want to specify a continuous range of labels from which to generate models, enter the lower label here. Voxel value from which to start making models. Used instead of the label list to specify a range (make sure the label list is empty or it will over ride this).", + argstr="--start %d", + ) end = traits.Int( - desc= - "If you want to specify a continuous range of labels from which to generate models, enter the higher label here. Voxel value up to which to continue making models. Skip any values with zero voxels.", - argstr="--end %d") + desc="If you want to specify a continuous range of labels from which to generate models, enter the higher label here. Voxel value up to which to continue making models. Skip any values with zero voxels.", + argstr="--end %d", + ) skipUnNamed = traits.Bool( - desc= - "Select this to not generate models from labels that do not have names defined in the color look up table associated with the input label map. If true, only models which have an entry in the color table will be generated. If false, generate all models that exist within the label range.", - argstr="--skipUnNamed ") + desc="Select this to not generate models from labels that do not have names defined in the color look up table associated with the input label map. If true, only models which have an entry in the color table will be generated. If false, generate all models that exist within the label range.", + argstr="--skipUnNamed ", + ) jointsmooth = traits.Bool( - desc= - "This will ensure that all resulting models fit together smoothly, like jigsaw puzzle pieces. Otherwise the models will be smoothed independently and may overlap.", - argstr="--jointsmooth ") + desc="This will ensure that all resulting models fit together smoothly, like jigsaw puzzle pieces. Otherwise the models will be smoothed independently and may overlap.", + argstr="--jointsmooth ", + ) smooth = traits.Int( - desc= - "Here you can set the number of smoothing iterations for Laplacian smoothing, or the degree of the polynomial approximating the windowed Sinc function. Use 0 if you wish no smoothing. ", - argstr="--smooth %d") + desc="Here you can set the number of smoothing iterations for Laplacian smoothing, or the degree of the polynomial approximating the windowed Sinc function. Use 0 if you wish no smoothing. ", + argstr="--smooth %d", + ) filtertype = traits.Enum( "Sinc", "Laplacian", - desc= - "You can control the type of smoothing done on the models by selecting a filter type of either Sinc or Laplacian.", - argstr="--filtertype %s") + desc="You can control the type of smoothing done on the models by selecting a filter type of either Sinc or Laplacian.", + argstr="--filtertype %s", + ) decimate = traits.Float( - desc= - "Chose the target reduction in number of polygons as a decimal percentage (between 0 and 1) of the number of polygons. Specifies the percentage of triangles to be removed. For example, 0.1 means 10% reduction and 0.9 means 90% reduction.", - argstr="--decimate %f") + desc="Chose the target reduction in number of polygons as a decimal percentage (between 0 and 1) of the number of polygons. Specifies the percentage of triangles to be removed. For example, 0.1 means 10% reduction and 0.9 means 90% reduction.", + argstr="--decimate %f", + ) splitnormals = traits.Bool( - desc= - "Splitting normals is useful for visualizing sharp features. However it creates holes in surfaces which affects measurements.", - argstr="--splitnormals ") + desc="Splitting normals is useful for visualizing sharp features. However it creates holes in surfaces which affects measurements.", + argstr="--splitnormals ", + ) pointnormals = traits.Bool( - desc= - "Turn this flag on if you wish to calculate the normal vectors for the points.", - argstr="--pointnormals ") + desc="Turn this flag on if you wish to calculate the normal vectors for the points.", + argstr="--pointnormals ", + ) pad = traits.Bool( - desc= - "Pad the input volume with zero value voxels on all 6 faces in order to ensure the production of closed surfaces. Sets the origin translation and extent translation so that the models still line up with the unpadded input volume.", - argstr="--pad ") + desc="Pad the input volume with zero value voxels on all 6 faces in order to ensure the production of closed surfaces. Sets the origin translation and extent translation so that the models still line up with the unpadded input volume.", + argstr="--pad ", + ) saveIntermediateModels = traits.Bool( - desc= - "You can save a copy of the models after each of the intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation). These intermediate models are not saved in the mrml file, you have to load them manually after turning off deleting temporary files in they python console (View ->Python Interactor) using the following command slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff().", - argstr="--saveIntermediateModels ") + desc="You can save a copy of the models after each of the intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation). These intermediate models are not saved in the mrml file, you have to load them manually after turning off deleting temporary files in they python console (View ->Python Interactor) using the following command slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff().", + argstr="--saveIntermediateModels ", + ) debug = traits.Bool( - desc= - "turn this flag on in order to see debugging output (look in the Error Log window that is accessed via the View menu)", - argstr="--debug ") + desc="turn this flag on in order to see debugging output (look in the Error Log window that is accessed via the View menu)", + argstr="--debug ", + ) class ModelMakerOutputSpec(TraitedSpec): modelSceneFile = OutputMultiPath( File(exists=True), - desc= - "Generated models, under a model hierarchy node. Models are imported into Slicer under a model hierarchy node, and their colors are set by the color table associated with the input label map volume. The model hierarchy node must be created before running the model maker, by selecting Create New ModelHierarchy from the Models drop down menu. If you're running from the command line, a model hierarchy node in a new mrml scene will be created for you." + desc="Generated models, under a model hierarchy node. Models are imported into Slicer under a model hierarchy node, and their colors are set by the color table associated with the input label map volume. The model hierarchy node must be created before running the model maker, by selecting Create New ModelHierarchy from the Models drop down menu. If you're running from the command line, a model hierarchy node in a new mrml scene will be created for you.", ) @@ -364,4 +373,4 @@ class ModelMaker(SEMLikeCommandLine): input_spec = ModelMakerInputSpec output_spec = ModelMakerOutputSpec _cmd = "ModelMaker " - _outputs_filenames = {'modelSceneFile': 'modelSceneFile.mrml'} + _outputs_filenames = {"modelSceneFile": "modelSceneFile.mrml"} diff --git a/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py b/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py index 3e6b0d875d..6411e0ee54 100644 --- a/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py +++ b/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py @@ -4,32 +4,25 @@ def test_DicomToNrrdConverter_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputDicomDirectory=dict(argstr='--inputDicomDirectory %s', ), - outputDirectory=dict( - argstr='--outputDirectory %s', - hash_files=False, - ), - outputVolume=dict(argstr='--outputVolume %s', ), - smallGradientThreshold=dict(argstr='--smallGradientThreshold %f', ), - useBMatrixGradientDirections=dict( - argstr='--useBMatrixGradientDirections ', ), - useIdentityMeaseurementFrame=dict( - argstr='--useIdentityMeaseurementFrame ', ), - writeProtocolGradientsFile=dict( - argstr='--writeProtocolGradientsFile ', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputDicomDirectory=dict(argstr="--inputDicomDirectory %s",), + outputDirectory=dict(argstr="--outputDirectory %s", hash_files=False,), + outputVolume=dict(argstr="--outputVolume %s",), + smallGradientThreshold=dict(argstr="--smallGradientThreshold %f",), + useBMatrixGradientDirections=dict(argstr="--useBMatrixGradientDirections ",), + useIdentityMeaseurementFrame=dict(argstr="--useIdentityMeaseurementFrame ",), + writeProtocolGradientsFile=dict(argstr="--writeProtocolGradientsFile ",), ) inputs = DicomToNrrdConverter.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DicomToNrrdConverter_outputs(): - output_map = dict(outputDirectory=dict(), ) + output_map = dict(outputDirectory=dict(),) outputs = DicomToNrrdConverter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py index 371d7cb1b3..d65723f4af 100644 --- a/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py +++ b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py @@ -4,28 +4,21 @@ def test_EMSegmentTransformToNewFormat_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMRMLFileName=dict( - argstr='--inputMRMLFileName %s', - extensions=None, - ), - outputMRMLFileName=dict( - argstr='--outputMRMLFileName %s', - hash_files=False, - ), - templateFlag=dict(argstr='--templateFlag ', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputMRMLFileName=dict(argstr="--inputMRMLFileName %s", extensions=None,), + outputMRMLFileName=dict(argstr="--outputMRMLFileName %s", hash_files=False,), + templateFlag=dict(argstr="--templateFlag ",), ) inputs = EMSegmentTransformToNewFormat.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EMSegmentTransformToNewFormat_outputs(): - output_map = dict(outputMRMLFileName=dict(extensions=None, ), ) + output_map = dict(outputMRMLFileName=dict(extensions=None,),) outputs = EMSegmentTransformToNewFormat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py index 9b9cb3b367..eaaa00d788 100644 --- a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py @@ -4,38 +4,26 @@ def test_GrayscaleModelMaker_inputs(): input_map = dict( - InputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - OutputGeometry=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - args=dict(argstr='%s', ), - decimate=dict(argstr='--decimate %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - name=dict(argstr='--name %s', ), - pointnormals=dict(argstr='--pointnormals ', ), - smooth=dict(argstr='--smooth %d', ), - splitnormals=dict(argstr='--splitnormals ', ), - threshold=dict(argstr='--threshold %f', ), + InputVolume=dict(argstr="%s", extensions=None, position=-2,), + OutputGeometry=dict(argstr="%s", hash_files=False, position=-1,), + args=dict(argstr="%s",), + decimate=dict(argstr="--decimate %f",), + environ=dict(nohash=True, usedefault=True,), + name=dict(argstr="--name %s",), + pointnormals=dict(argstr="--pointnormals ",), + smooth=dict(argstr="--smooth %d",), + splitnormals=dict(argstr="--splitnormals ",), + threshold=dict(argstr="--threshold %f",), ) inputs = GrayscaleModelMaker.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GrayscaleModelMaker_outputs(): - output_map = dict(OutputGeometry=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputGeometry=dict(extensions=None, position=-1,),) outputs = GrayscaleModelMaker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py index c33abad34c..5077a0f23c 100644 --- a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py +++ b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py @@ -4,36 +4,24 @@ def test_LabelMapSmoothing_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - gaussianSigma=dict(argstr='--gaussianSigma %f', ), - inputVolume=dict( - argstr='%s', - extensions=None, - position=-2, - ), - labelToSmooth=dict(argstr='--labelToSmooth %d', ), - maxRMSError=dict(argstr='--maxRMSError %f', ), - numberOfIterations=dict(argstr='--numberOfIterations %d', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + gaussianSigma=dict(argstr="--gaussianSigma %f",), + inputVolume=dict(argstr="%s", extensions=None, position=-2,), + labelToSmooth=dict(argstr="--labelToSmooth %d",), + maxRMSError=dict(argstr="--maxRMSError %f",), + numberOfIterations=dict(argstr="--numberOfIterations %d",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), ) inputs = LabelMapSmoothing.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_LabelMapSmoothing_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = LabelMapSmoothing.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py index 86fdd83399..43398aa45f 100644 --- a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py +++ b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py @@ -4,37 +4,21 @@ def test_MergeModels_inputs(): input_map = dict( - Model1=dict( - argstr='%s', - extensions=None, - position=-3, - ), - Model2=dict( - argstr='%s', - extensions=None, - position=-2, - ), - ModelOutput=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + Model1=dict(argstr="%s", extensions=None, position=-3,), + Model2=dict(argstr="%s", extensions=None, position=-2,), + ModelOutput=dict(argstr="%s", hash_files=False, position=-1,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), ) inputs = MergeModels.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MergeModels_outputs(): - output_map = dict(ModelOutput=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(ModelOutput=dict(extensions=None, position=-1,),) outputs = MergeModels.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py index ef6a3f3c0d..888b7e0477 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py @@ -4,50 +4,36 @@ def test_ModelMaker_inputs(): input_map = dict( - InputVolume=dict( - argstr='%s', - extensions=None, - position=-1, - ), - args=dict(argstr='%s', ), - color=dict( - argstr='--color %s', - extensions=None, - ), - debug=dict(argstr='--debug ', ), - decimate=dict(argstr='--decimate %f', ), - end=dict(argstr='--end %d', ), - environ=dict( - nohash=True, - usedefault=True, - ), - filtertype=dict(argstr='--filtertype %s', ), - generateAll=dict(argstr='--generateAll ', ), - jointsmooth=dict(argstr='--jointsmooth ', ), - labels=dict( - argstr='--labels %s', - sep=',', - ), - modelSceneFile=dict( - argstr='--modelSceneFile %s...', - hash_files=False, - ), - name=dict(argstr='--name %s', ), - pad=dict(argstr='--pad ', ), - pointnormals=dict(argstr='--pointnormals ', ), - saveIntermediateModels=dict(argstr='--saveIntermediateModels ', ), - skipUnNamed=dict(argstr='--skipUnNamed ', ), - smooth=dict(argstr='--smooth %d', ), - splitnormals=dict(argstr='--splitnormals ', ), - start=dict(argstr='--start %d', ), + InputVolume=dict(argstr="%s", extensions=None, position=-1,), + args=dict(argstr="%s",), + color=dict(argstr="--color %s", extensions=None,), + debug=dict(argstr="--debug ",), + decimate=dict(argstr="--decimate %f",), + end=dict(argstr="--end %d",), + environ=dict(nohash=True, usedefault=True,), + filtertype=dict(argstr="--filtertype %s",), + generateAll=dict(argstr="--generateAll ",), + jointsmooth=dict(argstr="--jointsmooth ",), + labels=dict(argstr="--labels %s", sep=",",), + modelSceneFile=dict(argstr="--modelSceneFile %s...", hash_files=False,), + name=dict(argstr="--name %s",), + pad=dict(argstr="--pad ",), + pointnormals=dict(argstr="--pointnormals ",), + saveIntermediateModels=dict(argstr="--saveIntermediateModels ",), + skipUnNamed=dict(argstr="--skipUnNamed ",), + smooth=dict(argstr="--smooth %d",), + splitnormals=dict(argstr="--splitnormals ",), + start=dict(argstr="--start %d",), ) inputs = ModelMaker.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ModelMaker_outputs(): - output_map = dict(modelSceneFile=dict(), ) + output_map = dict(modelSceneFile=dict(),) outputs = ModelMaker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py index 8c53d9dabe..ad7c305824 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py @@ -4,38 +4,22 @@ def test_ModelToLabelMap_inputs(): input_map = dict( - InputVolume=dict( - argstr='%s', - extensions=None, - position=-3, - ), - OutputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - args=dict(argstr='%s', ), - distance=dict(argstr='--distance %f', ), - environ=dict( - nohash=True, - usedefault=True, - ), - surface=dict( - argstr='%s', - extensions=None, - position=-2, - ), + InputVolume=dict(argstr="%s", extensions=None, position=-3,), + OutputVolume=dict(argstr="%s", hash_files=False, position=-1,), + args=dict(argstr="%s",), + distance=dict(argstr="--distance %f",), + environ=dict(nohash=True, usedefault=True,), + surface=dict(argstr="%s", extensions=None, position=-2,), ) inputs = ModelToLabelMap.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ModelToLabelMap_outputs(): - output_map = dict(OutputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputVolume=dict(extensions=None, position=-1,),) outputs = ModelToLabelMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py index 4e61a5c819..97e7d4ae38 100644 --- a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py +++ b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py @@ -4,33 +4,21 @@ def test_OrientScalarVolume_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr='%s', - extensions=None, - position=-2, - ), - orientation=dict(argstr='--orientation %s', ), - outputVolume=dict( - argstr='%s', - hash_files=False, - position=-1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + inputVolume1=dict(argstr="%s", extensions=None, position=-2,), + orientation=dict(argstr="--orientation %s",), + outputVolume=dict(argstr="%s", hash_files=False, position=-1,), ) inputs = OrientScalarVolume.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_OrientScalarVolume_outputs(): - output_map = dict(outputVolume=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(outputVolume=dict(extensions=None, position=-1,),) outputs = OrientScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py index 98b572e4c8..c4b12dc7a0 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py +++ b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py @@ -4,37 +4,21 @@ def test_ProbeVolumeWithModel_inputs(): input_map = dict( - InputModel=dict( - argstr='%s', - extensions=None, - position=-2, - ), - InputVolume=dict( - argstr='%s', - extensions=None, - position=-3, - ), - OutputModel=dict( - argstr='%s', - hash_files=False, - position=-1, - ), - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + InputModel=dict(argstr="%s", extensions=None, position=-2,), + InputVolume=dict(argstr="%s", extensions=None, position=-3,), + OutputModel=dict(argstr="%s", hash_files=False, position=-1,), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), ) inputs = ProbeVolumeWithModel.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ProbeVolumeWithModel_outputs(): - output_map = dict(OutputModel=dict( - extensions=None, - position=-1, - ), ) + output_map = dict(OutputModel=dict(extensions=None, position=-1,),) outputs = ProbeVolumeWithModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py b/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py index b8eeb5bd56..dda2e3d8f8 100644 --- a/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py +++ b/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py @@ -4,11 +4,7 @@ def test_SlicerCommandLine_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), ) inputs = SlicerCommandLine.input_spec() diff --git a/nipype/interfaces/slicer/utilities.py b/nipype/interfaces/slicer/utilities.py index 5faf640570..ad998e58fe 100644 --- a/nipype/interfaces/slicer/utilities.py +++ b/nipype/interfaces/slicer/utilities.py @@ -3,34 +3,45 @@ """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" -from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +from nipype.interfaces.base import ( + CommandLine, + CommandLineInputSpec, + SEMLikeCommandLine, + TraitedSpec, + File, + Directory, + traits, + isdefined, + InputMultiPath, + OutputMultiPath, +) import os class EMSegmentTransformToNewFormatInputSpec(CommandLineInputSpec): inputMRMLFileName = File( - desc= - "Active MRML scene that contains EMSegment algorithm parameters in the format before 3.6.3 - please include absolute file name in path.", + desc="Active MRML scene that contains EMSegment algorithm parameters in the format before 3.6.3 - please include absolute file name in path.", exists=True, - argstr="--inputMRMLFileName %s") + argstr="--inputMRMLFileName %s", + ) outputMRMLFileName = traits.Either( traits.Bool, File(), hash_files=False, - desc= - "Write out the MRML scene after transformation to format 3.6.3 has been made. - has to be in the same directory as the input MRML file due to Slicer Core bug - please include absolute file name in path ", - argstr="--outputMRMLFileName %s") + desc="Write out the MRML scene after transformation to format 3.6.3 has been made. - has to be in the same directory as the input MRML file due to Slicer Core bug - please include absolute file name in path ", + argstr="--outputMRMLFileName %s", + ) templateFlag = traits.Bool( - desc= - "Set to true if the transformed mrml file should be used as template file ", - argstr="--templateFlag ") + desc="Set to true if the transformed mrml file should be used as template file ", + argstr="--templateFlag ", + ) class EMSegmentTransformToNewFormatOutputSpec(TraitedSpec): outputMRMLFileName = File( - desc= - "Write out the MRML scene after transformation to format 3.6.3 has been made. - has to be in the same directory as the input MRML file due to Slicer Core bug - please include absolute file name in path ", - exists=True) + desc="Write out the MRML scene after transformation to format 3.6.3 has been made. - has to be in the same directory as the input MRML file due to Slicer Core bug - please include absolute file name in path ", + exists=True, + ) class EMSegmentTransformToNewFormat(SEMLikeCommandLine): @@ -51,4 +62,4 @@ class EMSegmentTransformToNewFormat(SEMLikeCommandLine): input_spec = EMSegmentTransformToNewFormatInputSpec output_spec = EMSegmentTransformToNewFormatOutputSpec _cmd = "EMSegmentTransformToNewFormat " - _outputs_filenames = {'outputMRMLFileName': 'outputMRMLFileName.mrml'} + _outputs_filenames = {"outputMRMLFileName": "outputMRMLFileName.mrml"} diff --git a/nipype/interfaces/spm/__init__.py b/nipype/interfaces/spm/__init__.py index b97c828450..0d5c91abfb 100644 --- a/nipype/interfaces/spm/__init__.py +++ b/nipype/interfaces/spm/__init__.py @@ -3,14 +3,39 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Top-level namespace for spm.""" -from .base import (Info, SPMCommand, logger, no_spm, scans_for_fname, - scans_for_fnames) -from .preprocess import (FieldMap, SliceTiming, Realign, RealignUnwarp, - Coregister, Normalize, Normalize12, Segment, - Smooth, NewSegment, DARTEL, DARTELNorm2MNI, - CreateWarped, VBMSegment) -from .model import (Level1Design, EstimateModel, EstimateContrast, Threshold, - OneSampleTTestDesign, TwoSampleTTestDesign, - PairedTTestDesign, MultipleRegressionDesign) -from .utils import (Analyze2nii, CalcCoregAffine, ApplyTransform, Reslice, - ApplyInverseDeformation, ResliceToReference, DicomImport) +from .base import Info, SPMCommand, logger, no_spm, scans_for_fname, scans_for_fnames +from .preprocess import ( + FieldMap, + SliceTiming, + Realign, + RealignUnwarp, + Coregister, + Normalize, + Normalize12, + Segment, + Smooth, + NewSegment, + DARTEL, + DARTELNorm2MNI, + CreateWarped, + VBMSegment, +) +from .model import ( + Level1Design, + EstimateModel, + EstimateContrast, + Threshold, + OneSampleTTestDesign, + TwoSampleTTestDesign, + PairedTTestDesign, + MultipleRegressionDesign, +) +from .utils import ( + Analyze2nii, + CalcCoregAffine, + ApplyTransform, + Reslice, + ApplyInverseDeformation, + ResliceToReference, + DicomImport, +) diff --git a/nipype/interfaces/spm/base.py b/nipype/interfaces/spm/base.py index 785e069c04..a70e0ab166 100644 --- a/nipype/interfaces/spm/base.py +++ b/nipype/interfaces/spm/base.py @@ -25,15 +25,23 @@ # Local imports from ... import logging from ...utils import spm_docs as sd, NUMPY_MMAP -from ..base import (BaseInterface, traits, isdefined, InputMultiPath, - BaseInterfaceInputSpec, Directory, Undefined, ImageFile, - PackageInfo) +from ..base import ( + BaseInterface, + traits, + isdefined, + InputMultiPath, + BaseInterfaceInputSpec, + Directory, + Undefined, + ImageFile, + PackageInfo, +) from ..base.traits_extension import NoDefaultSpecified from ..matlab import MatlabCommand from ...external.due import due, Doi, BibTeX -__docformat__ = 'restructuredtext' -logger = logging.getLogger('nipype.interface') +__docformat__ = "restructuredtext" +logger = logging.getLogger("nipype.interface") def func_is_3d(in_file): @@ -66,18 +74,18 @@ def scans_for_fname(fname): """ if isinstance(fname, list): - scans = np.zeros((len(fname), ), dtype=object) + scans = np.zeros((len(fname),), dtype=object) for sno, f in enumerate(fname): - scans[sno] = '%s,1' % f + scans[sno] = "%s,1" % f return scans img = load(fname, mmap=NUMPY_MMAP) if len(img.shape) == 3: - return np.array(('%s,1' % fname, ), dtype=object) + return np.array(("%s,1" % fname,), dtype=object) else: n_scans = img.shape[3] - scans = np.zeros((n_scans, ), dtype=object) + scans = np.zeros((n_scans,), dtype=object) for sno in range(n_scans): - scans[sno] = '%s,%d' % (fname, sno + 1) + scans[sno] = "%s,%d" % (fname, sno + 1) return scans @@ -98,7 +106,7 @@ def scans_for_fnames(fnames, keep4d=False, separate_sessions=False): if func_is_3d(fnames[0]): fnames = [fnames] if separate_sessions or keep4d: - flist = np.zeros((len(fnames), ), dtype=object) + flist = np.zeros((len(fnames),), dtype=object) for i, f in enumerate(fnames): if separate_sessions: if keep4d: @@ -128,6 +136,7 @@ class Info(PackageInfo): to any call in the Info class to maintain memoization. Otherwise, it will default to the parameters in the `getinfo` function below. """ + _path = None _name = None _command = None @@ -179,19 +188,22 @@ def getinfo(klass, matlab_cmd=None, paths=None, use_mcr=None): returns None of path not found """ - use_mcr = use_mcr or 'FORCE_SPMMCR' in os.environ - matlab_cmd = matlab_cmd or ((use_mcr and os.getenv('SPMMCRCMD')) - or os.getenv('MATLABCMD', 'matlab -nodesktop -nosplash')) - - if klass._name and klass._path and klass._version and \ - klass._command == matlab_cmd and klass._paths == paths: - - return { - 'name': klass._name, - 'path': klass._path, - 'release': klass._version - } - logger.debug('matlab command or path has changed. recomputing version.') + use_mcr = use_mcr or "FORCE_SPMMCR" in os.environ + matlab_cmd = matlab_cmd or ( + (use_mcr and os.getenv("SPMMCRCMD")) + or os.getenv("MATLABCMD", "matlab -nodesktop -nosplash") + ) + + if ( + klass._name + and klass._path + and klass._version + and klass._command == matlab_cmd + and klass._paths == paths + ): + + return {"name": klass._name, "path": klass._path, "release": klass._version} + logger.debug("matlab command or path has changed. recomputing version.") mlab = MatlabCommand(matlab_cmd=matlab_cmd, resource_monitor=False) mlab.inputs.mfile = False if paths: @@ -216,7 +228,7 @@ def getinfo(klass, matlab_cmd=None, paths=None, use_mcr=None): except (IOError, RuntimeError) as e: # if no Matlab at all -- exception could be raised # No Matlab -- no spm - logger.debug('%s', e) + logger.debug("%s", e) klass._version = None klass._path = None klass._name = None @@ -226,13 +238,13 @@ def getinfo(klass, matlab_cmd=None, paths=None, use_mcr=None): out = sd._strip_header(out.runtime.stdout) out_dict = {} - for part in out.split('|'): - key, val = part.split(':') + for part in out.split("|"): + key, val = part.split(":") out_dict[key] = val - klass._version = out_dict['release'] - klass._path = out_dict['path'] - klass._name = out_dict['name'] + klass._version = out_dict["release"] + klass._path = out_dict["path"] + klass._name = out_dict["name"] klass._command = matlab_cmd klass._paths = paths return out_dict @@ -243,23 +255,23 @@ def no_spm(): used with pytest.mark.skipif decorator to skip tests that will fail if spm is not installed""" - if 'NIPYPE_NO_MATLAB' in os.environ or Info.version() is None: + if "NIPYPE_NO_MATLAB" in os.environ or Info.version() is None: return True else: return False class SPMCommandInputSpec(BaseInterfaceInputSpec): - matlab_cmd = traits.Str(desc='matlab command to use') - paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath') - mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True) - use_mcr = traits.Bool(desc='Run m-code using SPM MCR') + matlab_cmd = traits.Str(desc="matlab command to use") + paths = InputMultiPath(Directory(), desc="Paths to add to matlabpath") + mfile = traits.Bool(True, desc="Run m-code using m-file", usedefault=True) + use_mcr = traits.Bool(desc="Run m-code using SPM MCR") use_v8struct = traits.Bool( True, - min_ver='8', + min_ver="8", usedefault=True, - desc=('Generate SPM8 and higher ' - 'compatible jobs')) + desc=("Generate SPM8 and higher " "compatible jobs"), + ) class SPMCommand(BaseInterface): @@ -267,36 +279,38 @@ class SPMCommand(BaseInterface): WARNING: Pseudo prototype class, meant to be subclassed """ + input_spec = SPMCommandInputSpec - _additional_metadata = ['field'] + _additional_metadata = ["field"] - _jobtype = 'basetype' - _jobname = 'basename' + _jobtype = "basetype" + _jobname = "basename" _matlab_cmd = None _paths = None _use_mcr = None - references_ = [{ - 'entry': - BibTeX( - "@book{FrackowiakFristonFrithDolanMazziotta1997," - "author={R.S.J. Frackowiak, K.J. Friston, C.D. Frith, R.J. Dolan, and J.C. Mazziotta}," - "title={Human Brain Function}," - "publisher={Academic Press USA}," - "year={1997}," - "}"), - 'description': - 'The fundamental text on Statistical Parametric Mapping (SPM)', - # 'path': "nipype.interfaces.spm", - 'tags': ['implementation'], - }] + references_ = [ + { + "entry": BibTeX( + "@book{FrackowiakFristonFrithDolanMazziotta1997," + "author={R.S.J. Frackowiak, K.J. Friston, C.D. Frith, R.J. Dolan, and J.C. Mazziotta}," + "title={Human Brain Function}," + "publisher={Academic Press USA}," + "year={1997}," + "}" + ), + "description": "The fundamental text on Statistical Parametric Mapping (SPM)", + # 'path': "nipype.interfaces.spm", + "tags": ["implementation"], + } + ] def __init__(self, **inputs): super(SPMCommand, self).__init__(**inputs) self.inputs.on_trait_change( - self._matlab_cmd_update, - ['matlab_cmd', 'mfile', 'paths', 'use_mcr']) + self._matlab_cmd_update, ["matlab_cmd", "mfile", "paths", "use_mcr"] + ) self._find_mlab_cmd_defaults() self._check_mlab_inputs() self._matlab_cmd_update() @@ -306,19 +320,16 @@ def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None): cls._matlab_cmd = matlab_cmd cls._paths = paths cls._use_mcr = use_mcr - info_dict = Info.getinfo( - matlab_cmd=matlab_cmd, - paths=paths, - use_mcr=use_mcr) + info_dict = Info.getinfo(matlab_cmd=matlab_cmd, paths=paths, use_mcr=use_mcr) def _find_mlab_cmd_defaults(self): # check if the user has set environment variables to enforce # the standalone (MCR) version of SPM - if self._use_mcr or 'FORCE_SPMMCR' in os.environ: + if self._use_mcr or "FORCE_SPMMCR" in os.environ: self._use_mcr = True if self._matlab_cmd is None: try: - self._matlab_cmd = os.environ['SPMMCRCMD'] + self._matlab_cmd = os.environ["SPMMCRCMD"] except KeyError: pass @@ -330,9 +341,11 @@ def _matlab_cmd_update(self): matlab_cmd=self.inputs.matlab_cmd, mfile=self.inputs.mfile, paths=self.inputs.paths, - resource_monitor=False) - self.mlab.inputs.script_file = 'pyscript_%s.m' % \ - self.__class__.__name__.split('.')[-1].lower() + resource_monitor=False, + ) + self.mlab.inputs.script_file = ( + "pyscript_%s.m" % self.__class__.__name__.split(".")[-1].lower() + ) if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr: self.mlab.inputs.nodesktop = Undefined self.mlab.inputs.nosplash = Undefined @@ -345,10 +358,10 @@ def version(self): info_dict = Info.getinfo( matlab_cmd=self.inputs.matlab_cmd, paths=self.inputs.paths, - use_mcr=self.inputs.use_mcr) + use_mcr=self.inputs.use_mcr, + ) if info_dict: - return '%s.%s' % (info_dict['name'].split('SPM')[-1], - info_dict['release']) + return "%s.%s" % (info_dict["name"].split("SPM")[-1], info_dict["release"]) @property def jobtype(self): @@ -369,11 +382,12 @@ def _check_mlab_inputs(self): def _run_interface(self, runtime): """Executes the SPM function using MATLAB.""" self.mlab.inputs.script = self._make_matlab_command( - deepcopy(self._parse_inputs())) + deepcopy(self._parse_inputs()) + ) results = self.mlab.run() runtime.returncode = results.runtime.returncode if self.mlab.inputs.uses_mcr: - if 'Skipped' in results.runtime.stdout: + if "Skipped" in results.runtime.stdout: self.raise_exception(runtime) runtime.stdout = results.runtime.stdout runtime.stderr = results.runtime.stderr @@ -404,8 +418,8 @@ def _parse_inputs(self, skip=()): if not isdefined(value): continue field = spec.field - if '.' in field: - fields = field.split('.') + if "." in field: + fields = field.split(".") dictref = spmdict for f in fields[:-1]: if f not in list(dictref.keys()): @@ -443,9 +457,9 @@ def _reformat_dict_for_savemat(self, contents): return [newdict] except TypeError: - print('Requires dict input') + print("Requires dict input") - def _generate_job(self, prefix='', contents=None): + def _generate_job(self, prefix="", contents=None): """Recursive function to generate spm job specification as a string Parameters @@ -458,7 +472,7 @@ def _generate_job(self, prefix='', contents=None): matlab commands. """ - jobstring = '' + jobstring = "" if contents is None: return jobstring if isinstance(contents, list): @@ -482,22 +496,20 @@ def _generate_job(self, prefix='', contents=None): jobstring += "{...\n" for i, val in enumerate(contents): if isinstance(val, np.ndarray): - jobstring += self._generate_job( - prefix=None, contents=val) + jobstring += self._generate_job(prefix=None, contents=val) elif isinstance(val, list): items_format = [] for el in val: items_format += [ - '{}' if not isinstance(el, (str, bytes)) else - '\'{}\'' + "{}" if not isinstance(el, (str, bytes)) else "'{}'" ] - val_format = ', '.join(items_format).format - jobstring += '[{}];...\n'.format(val_format(*val)) + val_format = ", ".join(items_format).format + jobstring += "[{}];...\n".format(val_format(*val)) elif isinstance(val, (str, bytes)): - jobstring += '\'{}\';...\n'.format(val) + jobstring += "'{}';...\n".format(val) else: - jobstring += '%s;...\n' % str(val) - jobstring += '};\n' + jobstring += "%s;...\n" % str(val) + jobstring += "};\n" else: for i, val in enumerate(contents): for field in val.dtype.fields: @@ -548,36 +560,45 @@ def _make_matlab_command(self, contents, postscript=None): end\n """ if self.mlab.inputs.mfile: - if (isdefined(self.inputs.use_v8struct) - and self.inputs.use_v8struct): - mscript += self._generate_job('jobs{1}.spm.%s.%s' % - (self.jobtype, - self.jobname), contents[0]) + if isdefined(self.inputs.use_v8struct) and self.inputs.use_v8struct: + mscript += self._generate_job( + "jobs{1}.spm.%s.%s" % (self.jobtype, self.jobname), contents[0] + ) else: if self.jobname in [ - 'st', 'smooth', 'preproc', 'preproc8', 'fmri_spec', - 'fmri_est', 'factorial_design', 'defs' + "st", + "smooth", + "preproc", + "preproc8", + "fmri_spec", + "fmri_est", + "factorial_design", + "defs", ]: # parentheses - mscript += self._generate_job('jobs{1}.%s{1}.%s(1)' % - (self.jobtype, - self.jobname), contents[0]) + mscript += self._generate_job( + "jobs{1}.%s{1}.%s(1)" % (self.jobtype, self.jobname), + contents[0], + ) else: # curly brackets - mscript += self._generate_job('jobs{1}.%s{1}.%s{1}' % - (self.jobtype, - self.jobname), contents[0]) + mscript += self._generate_job( + "jobs{1}.%s{1}.%s{1}" % (self.jobtype, self.jobname), + contents[0], + ) else: from scipy.io import savemat + jobdef = { - 'jobs': [{ - self.jobtype: [{ - self.jobname: - self.reformat_dict_for_savemat(contents[0]) - }] - }] + "jobs": [ + { + self.jobtype: [ + {self.jobname: self.reformat_dict_for_savemat(contents[0])} + ] + } + ] } - savemat(os.path.join(cwd, 'pyjobs_%s.mat' % self.jobname), jobdef) + savemat(os.path.join(cwd, "pyjobs_%s.mat" % self.jobname), jobdef) mscript += "load pyjobs_%s;\n\n" % self.jobname mscript += """ spm_jobman(\'run\', jobs);\n @@ -596,9 +617,15 @@ def _make_matlab_command(self, contents, postscript=None): class ImageFileSPM(ImageFile): """Defines a trait whose value must be a NIfTI file.""" - def __init__(self, value=NoDefaultSpecified, exists=False, resolve=False, **metadata): + def __init__( + self, value=NoDefaultSpecified, exists=False, resolve=False, **metadata + ): """Create an ImageFileSPM trait.""" super(ImageFileSPM, self).__init__( - value=value, exists=exists, types=['nifti1', 'nifti2'], - allow_compressed=False, resolve=resolve, **metadata) - + value=value, + exists=exists, + types=["nifti1", "nifti2"], + allow_compressed=False, + resolve=resolve, + **metadata + ) diff --git a/nipype/interfaces/spm/model.py b/nipype/interfaces/spm/model.py index 4a1d4c80a2..20b16e0870 100644 --- a/nipype/interfaces/spm/model.py +++ b/nipype/interfaces/spm/model.py @@ -13,49 +13,57 @@ # Local imports from ... import logging -from ...utils.filemanip import (ensure_list, simplify_list, - split_filename) -from ..base import (Bunch, traits, TraitedSpec, File, Directory, - OutputMultiPath, InputMultiPath, isdefined) -from .base import (SPMCommand, SPMCommandInputSpec, scans_for_fnames, - ImageFileSPM) - -__docformat__ = 'restructuredtext' -iflogger = logging.getLogger('nipype.interface') +from ...utils.filemanip import ensure_list, simplify_list, split_filename +from ..base import ( + Bunch, + traits, + TraitedSpec, + File, + Directory, + OutputMultiPath, + InputMultiPath, + isdefined, +) +from .base import SPMCommand, SPMCommandInputSpec, scans_for_fnames, ImageFileSPM + +__docformat__ = "restructuredtext" +iflogger = logging.getLogger("nipype.interface") class Level1DesignInputSpec(SPMCommandInputSpec): spm_mat_dir = Directory( - exists=True, field='dir', desc='directory to store SPM.mat file (opt)') + exists=True, field="dir", desc="directory to store SPM.mat file (opt)" + ) timing_units = traits.Enum( - 'secs', - 'scans', - field='timing.units', - desc='units for specification of onsets', - mandatory=True) + "secs", + "scans", + field="timing.units", + desc="units for specification of onsets", + mandatory=True, + ) interscan_interval = traits.Float( - field='timing.RT', desc='Interscan interval in secs', mandatory=True) + field="timing.RT", desc="Interscan interval in secs", mandatory=True + ) microtime_resolution = traits.Int( - field='timing.fmri_t', - desc=('Number of time-bins per scan ' - 'in secs (opt)')) + field="timing.fmri_t", desc=("Number of time-bins per scan " "in secs (opt)") + ) microtime_onset = traits.Float( - field='timing.fmri_t0', - desc=('The onset/time-bin in seconds for ' - 'alignment (opt)')) + field="timing.fmri_t0", + desc=("The onset/time-bin in seconds for " "alignment (opt)"), + ) session_info = traits.Any( - field='sess', - desc=('Session specific information generated ' - 'by ``modelgen.SpecifyModel``'), - mandatory=True) + field="sess", + desc=("Session specific information generated " "by ``modelgen.SpecifyModel``"), + mandatory=True, + ) factor_info = traits.List( - traits.Dict(traits.Enum('name', 'levels')), - field='fact', - desc=('Factor specific information ' - 'file (opt)')) + traits.Dict(traits.Enum("name", "levels")), + field="fact", + desc=("Factor specific information " "file (opt)"), + ) bases = traits.Dict( - traits.Enum('hrf', 'fourier', 'fourier_han', 'gamma', 'fir'), - field='bases', + traits.Enum("hrf", "fourier", "fourier_han", "gamma", "fir"), + field="bases", desc=""" dict {'name':{'basesparam1':val,...}} name : string @@ -73,42 +81,46 @@ class Level1DesignInputSpec(SPMCommandInputSpec): order : int Number of basis functions """, - mandatory=True) + mandatory=True, + ) volterra_expansion_order = traits.Enum( - 1, 2, field='volt', desc=('Model interactions - ' - 'yes:1, no:2')) + 1, 2, field="volt", desc=("Model interactions - " "yes:1, no:2") + ) global_intensity_normalization = traits.Enum( - 'none', - 'scaling', - field='global', - desc=('Global intensity ' - 'normalization - ' - 'scaling or none')) + "none", + "scaling", + field="global", + desc=("Global intensity " "normalization - " "scaling or none"), + ) mask_image = File( - exists=True, - field='mask', - desc='Image for explicitly masking the analysis') + exists=True, field="mask", desc="Image for explicitly masking the analysis" + ) mask_threshold = traits.Either( - traits.Enum('-Inf'), + traits.Enum("-Inf"), traits.Float(), desc="Thresholding for the mask", - default='-Inf', - usedefault=True) + default="-Inf", + usedefault=True, + ) model_serial_correlations = traits.Enum( - 'AR(1)', - 'FAST', - 'none', - field='cvi', - desc=('Model serial correlations ' - 'AR(1), FAST or none. FAST ' - 'is available in SPM12')) + "AR(1)", + "FAST", + "none", + field="cvi", + desc=( + "Model serial correlations " + "AR(1), FAST or none. FAST " + "is available in SPM12" + ), + ) flags = traits.Dict( - desc='Additional arguments to the job, e.g., a common SPM operation is to ' - 'modify the default masking threshold (mthresh)') + desc="Additional arguments to the job, e.g., a common SPM operation is to " + "modify the default masking threshold (mthresh)" + ) class Level1DesignOutputSpec(TraitedSpec): - spm_mat_file = File(exists=True, desc='SPM mat file') + spm_mat_file = File(exists=True, desc="SPM mat file") class Level1Design(SPMCommand): @@ -132,15 +144,15 @@ class Level1Design(SPMCommand): input_spec = Level1DesignInputSpec output_spec = Level1DesignOutputSpec - _jobtype = 'stats' - _jobname = 'fmri_spec' + _jobtype = "stats" + _jobname = "fmri_spec" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt in ['spm_mat_dir', 'mask_image']: + if opt in ["spm_mat_dir", "mask_image"]: return np.array([str(val)], dtype=object) - if opt in ['session_info']: # , 'factor_info']: + if opt in ["session_info"]: # , 'factor_info']: if isinstance(val, dict): return [val] else: @@ -150,17 +162,17 @@ def _format_arg(self, opt, spec, val): def _parse_inputs(self): """validate spm realign options if set to None ignore """ - einputs = super(Level1Design, - self)._parse_inputs(skip=('mask_threshold', 'flags')) + einputs = super(Level1Design, self)._parse_inputs( + skip=("mask_threshold", "flags") + ) if isdefined(self.inputs.flags): - einputs[0].update( - {flag: val - for (flag, val) in self.inputs.flags.items()}) - for sessinfo in einputs[0]['sess']: - sessinfo['scans'] = scans_for_fnames( - ensure_list(sessinfo['scans']), keep4d=False) + einputs[0].update({flag: val for (flag, val) in self.inputs.flags.items()}) + for sessinfo in einputs[0]["sess"]: + sessinfo["scans"] = scans_for_fnames( + ensure_list(sessinfo["scans"]), keep4d=False + ) if not isdefined(self.inputs.spm_mat_dir): - einputs[0]['dir'] = np.array([str(os.getcwd())], dtype=object) + einputs[0]["dir"] = np.array([str(os.getcwd())], dtype=object) return einputs def _make_matlab_command(self, content): @@ -172,68 +184,79 @@ def _make_matlab_command(self, content): # SPM doesn't handle explicit masking properly, especially # when you want to use the entire mask image postscript = "load SPM;\n" - postscript += ("SPM.xM.VM = spm_vol('%s');\n" % simplify_list( - self.inputs.mask_image)) + postscript += "SPM.xM.VM = spm_vol('%s');\n" % simplify_list( + self.inputs.mask_image + ) postscript += "SPM.xM.I = 0;\n" postscript += "SPM.xM.T = [];\n" - postscript += ("SPM.xM.TH = ones(size(SPM.xM.TH))*(%s);\n" % - self.inputs.mask_threshold) - postscript += ("SPM.xM.xs = struct('Masking', " - "'explicit masking only');\n") + postscript += ( + "SPM.xM.TH = ones(size(SPM.xM.TH))*(%s);\n" % self.inputs.mask_threshold + ) + postscript += "SPM.xM.xs = struct('Masking', " "'explicit masking only');\n" postscript += "save SPM SPM;\n" else: postscript = None return super(Level1Design, self)._make_matlab_command( - content, postscript=postscript) + content, postscript=postscript + ) def _list_outputs(self): outputs = self._outputs().get() - spm = os.path.join(os.getcwd(), 'SPM.mat') - outputs['spm_mat_file'] = spm + spm = os.path.join(os.getcwd(), "SPM.mat") + outputs["spm_mat_file"] = spm return outputs class EstimateModelInputSpec(SPMCommandInputSpec): spm_mat_file = File( exists=True, - field='spmmat', + field="spmmat", copyfile=True, mandatory=True, - desc='Absolute path to SPM.mat') + desc="Absolute path to SPM.mat", + ) estimation_method = traits.Dict( - traits.Enum('Classical', 'Bayesian2', 'Bayesian'), - field='method', + traits.Enum("Classical", "Bayesian2", "Bayesian"), + field="method", mandatory=True, - desc=('Dictionary of either Classical: 1, Bayesian: 1, ' - 'or Bayesian2: 1 (dict)')) + desc=( + "Dictionary of either Classical: 1, Bayesian: 1, " "or Bayesian2: 1 (dict)" + ), + ) write_residuals = traits.Bool( - field='write_residuals', desc="Write individual residual images") - flags = traits.Dict(desc='Additional arguments') + field="write_residuals", desc="Write individual residual images" + ) + flags = traits.Dict(desc="Additional arguments") class EstimateModelOutputSpec(TraitedSpec): - mask_image = ImageFileSPM( - exists=True, desc='binary mask to constrain estimation') + mask_image = ImageFileSPM(exists=True, desc="binary mask to constrain estimation") beta_images = OutputMultiPath( - ImageFileSPM(exists=True), desc='design parameter estimates') + ImageFileSPM(exists=True), desc="design parameter estimates" + ) residual_image = ImageFileSPM( - exists=True, desc='Mean-squared image of the residuals') + exists=True, desc="Mean-squared image of the residuals" + ) residual_images = OutputMultiPath( ImageFileSPM(exists=True), - desc="individual residual images (requires `write_residuals`") - RPVimage = ImageFileSPM(exists=True, desc='Resels per voxel image') - spm_mat_file = File(exists=True, desc='Updated SPM mat file') + desc="individual residual images (requires `write_residuals`", + ) + RPVimage = ImageFileSPM(exists=True, desc="Resels per voxel image") + spm_mat_file = File(exists=True, desc="Updated SPM mat file") labels = ImageFileSPM(exists=True, desc="label file") SDerror = OutputMultiPath( - ImageFileSPM(exists=True), - desc="Images of the standard deviation of the error") + ImageFileSPM(exists=True), desc="Images of the standard deviation of the error" + ) ARcoef = OutputMultiPath( - ImageFileSPM(exists=True), desc="Images of the AR coefficient") + ImageFileSPM(exists=True), desc="Images of the AR coefficient" + ) Cbetas = OutputMultiPath( - ImageFileSPM(exists=True), desc="Images of the parameter posteriors") + ImageFileSPM(exists=True), desc="Images of the parameter posteriors" + ) SDbetas = OutputMultiPath( ImageFileSPM(exists=True), - desc="Images of the standard deviation of parameter posteriors") + desc="Images of the standard deviation of parameter posteriors", + ) class EstimateModel(SPMCommand): @@ -248,19 +271,20 @@ class EstimateModel(SPMCommand): >>> est.inputs.estimation_method = {'Classical': 1} >>> est.run() # doctest: +SKIP """ + input_spec = EstimateModelInputSpec output_spec = EstimateModelOutputSpec - _jobtype = 'stats' - _jobname = 'fmri_est' + _jobtype = "stats" + _jobname = "fmri_est" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt == 'spm_mat_file': + if opt == "spm_mat_file": return np.array([str(val)], dtype=object) - if opt == 'estimation_method': + if opt == "estimation_method": if isinstance(val, (str, bytes)): - return {'{}'.format(val): 1} + return {"{}".format(val): 1} else: return val return super(EstimateModel, self)._format_arg(opt, spec, val) @@ -268,106 +292,132 @@ def _format_arg(self, opt, spec, val): def _parse_inputs(self): """validate spm realign options if set to None ignore """ - einputs = super(EstimateModel, self)._parse_inputs(skip=('flags')) + einputs = super(EstimateModel, self)._parse_inputs(skip=("flags")) if isdefined(self.inputs.flags): - einputs[0].update( - {flag: val - for (flag, val) in self.inputs.flags.items()}) + einputs[0].update({flag: val for (flag, val) in self.inputs.flags.items()}) return einputs def _list_outputs(self): import scipy.io as sio + outputs = self._outputs().get() pth = os.path.dirname(self.inputs.spm_mat_file) - outtype = 'nii' if '12' in self.version.split('.')[0] else 'img' + outtype = "nii" if "12" in self.version.split(".")[0] else "img" spm = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False) - betas = [vbeta.fname[0] for vbeta in spm['SPM'][0, 0].Vbeta[0]] - if ('Bayesian' in self.inputs.estimation_method.keys() - or 'Bayesian2' in self.inputs.estimation_method.keys()): - outputs['labels'] = os.path.join(pth, 'labels.{}'.format(outtype)) - outputs['SDerror'] = glob(os.path.join(pth, 'Sess*_SDerror*')) - outputs['ARcoef'] = glob(os.path.join(pth, 'Sess*_AR_*')) + betas = [vbeta.fname[0] for vbeta in spm["SPM"][0, 0].Vbeta[0]] + if ( + "Bayesian" in self.inputs.estimation_method.keys() + or "Bayesian2" in self.inputs.estimation_method.keys() + ): + outputs["labels"] = os.path.join(pth, "labels.{}".format(outtype)) + outputs["SDerror"] = glob(os.path.join(pth, "Sess*_SDerror*")) + outputs["ARcoef"] = glob(os.path.join(pth, "Sess*_AR_*")) if betas: - outputs['Cbetas'] = [ - os.path.join(pth, 'C{}'.format(beta)) for beta in betas + outputs["Cbetas"] = [ + os.path.join(pth, "C{}".format(beta)) for beta in betas ] - outputs['SDbetas'] = [ - os.path.join(pth, 'SD{}'.format(beta)) for beta in betas + outputs["SDbetas"] = [ + os.path.join(pth, "SD{}".format(beta)) for beta in betas ] - if 'Classical' in self.inputs.estimation_method.keys(): - outputs['residual_image'] = os.path.join( - pth, 'ResMS.{}'.format(outtype)) - outputs['RPVimage'] = os.path.join(pth, 'RPV.{}'.format(outtype)) + if "Classical" in self.inputs.estimation_method.keys(): + outputs["residual_image"] = os.path.join(pth, "ResMS.{}".format(outtype)) + outputs["RPVimage"] = os.path.join(pth, "RPV.{}".format(outtype)) if self.inputs.write_residuals: - outputs['residual_images'] = glob(os.path.join(pth, 'Res_*')) + outputs["residual_images"] = glob(os.path.join(pth, "Res_*")) if betas: - outputs['beta_images'] = [ - os.path.join(pth, beta) for beta in betas - ] + outputs["beta_images"] = [os.path.join(pth, beta) for beta in betas] - outputs['mask_image'] = os.path.join(pth, 'mask.{}'.format(outtype)) - outputs['spm_mat_file'] = os.path.join(pth, 'SPM.mat') + outputs["mask_image"] = os.path.join(pth, "mask.{}".format(outtype)) + outputs["spm_mat_file"] = os.path.join(pth, "SPM.mat") return outputs class EstimateContrastInputSpec(SPMCommandInputSpec): spm_mat_file = File( exists=True, - field='spmmat', - desc='Absolute path to SPM.mat', + field="spmmat", + desc="Absolute path to SPM.mat", copyfile=True, - mandatory=True) + mandatory=True, + ) contrasts = traits.List( traits.Either( - traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), - traits.List(traits.Float)), - traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), - traits.List(traits.Float), traits.List(traits.Float)), - traits.Tuple(traits.Str, traits.Enum('F'), - traits.List( - traits.Either( - traits.Tuple(traits.Str, traits.Enum('T'), - traits.List(traits.Str), - traits.List(traits.Float)), - traits.Tuple(traits.Str, traits.Enum('T'), - traits.List(traits.Str), - traits.List(traits.Float), - traits.List(traits.Float)))))), + traits.Tuple( + traits.Str, + traits.Enum("T"), + traits.List(traits.Str), + traits.List(traits.Float), + ), + traits.Tuple( + traits.Str, + traits.Enum("T"), + traits.List(traits.Str), + traits.List(traits.Float), + traits.List(traits.Float), + ), + traits.Tuple( + traits.Str, + traits.Enum("F"), + traits.List( + traits.Either( + traits.Tuple( + traits.Str, + traits.Enum("T"), + traits.List(traits.Str), + traits.List(traits.Float), + ), + traits.Tuple( + traits.Str, + traits.Enum("T"), + traits.List(traits.Str), + traits.List(traits.Float), + traits.List(traits.Float), + ), + ) + ), + ), + ), desc="""List of contrasts with each contrast being a list of the form: [('name', 'stat', [condition list], [weight list], [session list])] If session list is None or not provided, all sessions are used. For F contrasts, the condition list should contain previously defined T-contrasts.""", - mandatory=True) + mandatory=True, + ) beta_images = InputMultiPath( File(exists=True), - desc=('Parameter estimates of the ' - 'design matrix'), + desc=("Parameter estimates of the " "design matrix"), copyfile=False, - mandatory=True) + mandatory=True, + ) residual_image = File( exists=True, - desc='Mean-squared image of the residuals', + desc="Mean-squared image of the residuals", copyfile=False, - mandatory=True) + mandatory=True, + ) use_derivs = traits.Bool( - desc='use derivatives for estimation', xor=['group_contrast']) - group_contrast = traits.Bool( - desc='higher level contrast', xor=['use_derivs']) + desc="use derivatives for estimation", xor=["group_contrast"] + ) + group_contrast = traits.Bool(desc="higher level contrast", xor=["use_derivs"]) class EstimateContrastOutputSpec(TraitedSpec): con_images = OutputMultiPath( - File(exists=True), desc='contrast images from a t-contrast') + File(exists=True), desc="contrast images from a t-contrast" + ) spmT_images = OutputMultiPath( - File(exists=True), desc='stat images from a t-contrast') + File(exists=True), desc="stat images from a t-contrast" + ) ess_images = OutputMultiPath( - File(exists=True), desc='contrast images from an F-contrast') + File(exists=True), desc="contrast images from an F-contrast" + ) spmF_images = OutputMultiPath( - File(exists=True), desc='stat images from an F-contrast') - spm_mat_file = File(exists=True, desc='Updated SPM mat file') + File(exists=True), desc="stat images from an F-contrast" + ) + spm_mat_file = File(exists=True, desc="Updated SPM mat file") class EstimateContrast(SPMCommand): @@ -388,8 +438,8 @@ class EstimateContrast(SPMCommand): input_spec = EstimateContrastInputSpec output_spec = EstimateContrastOutputSpec - _jobtype = 'stats' - _jobname = 'con' + _jobtype = "stats" + _jobname = "con" def _make_matlab_command(self, _): """validates spm options and generates job structure @@ -398,164 +448,176 @@ def _make_matlab_command(self, _): cname = [] for i, cont in enumerate(self.inputs.contrasts): cname.insert(i, cont[0]) - contrasts.insert(i, - Bunch( - name=cont[0], - stat=cont[1], - conditions=cont[2], - weights=None, - sessions=None)) + contrasts.insert( + i, + Bunch( + name=cont[0], + stat=cont[1], + conditions=cont[2], + weights=None, + sessions=None, + ), + ) if len(cont) >= 4: contrasts[i].weights = cont[3] if len(cont) >= 5: contrasts[i].sessions = cont[4] script = "% generated by nipype.interfaces.spm\n" script += "spm_defaults;\n" - script += ("jobs{1}.stats{1}.con.spmmat = {'%s'};\n" % - self.inputs.spm_mat_file) + script += "jobs{1}.stats{1}.con.spmmat = {'%s'};\n" % self.inputs.spm_mat_file script += "load(jobs{1}.stats{1}.con.spmmat{:});\n" script += "SPM.swd = '%s';\n" % os.getcwd() script += "save(jobs{1}.stats{1}.con.spmmat{:},'SPM');\n" script += "names = SPM.xX.name;\n" # get names for columns - if (isdefined(self.inputs.group_contrast) - and self.inputs.group_contrast): + if isdefined(self.inputs.group_contrast) and self.inputs.group_contrast: script += "condnames=names;\n" else: if self.inputs.use_derivs: script += "pat = 'Sn\([0-9]*\) (.*)';\n" else: - script += ("pat = 'Sn\([0-9]*\) (.*)\*bf\(1\)|Sn\([0-9]*\) " - ".*\*bf\([2-9]\)|Sn\([0-9]*\) (.*)';\n") + script += ( + "pat = 'Sn\([0-9]*\) (.*)\*bf\(1\)|Sn\([0-9]*\) " + ".*\*bf\([2-9]\)|Sn\([0-9]*\) (.*)';\n" + ) script += "t = regexp(names,pat,'tokens');\n" # get sessidx for columns script += "pat1 = 'Sn\(([0-9].*)\)\s.*';\n" script += "t1 = regexp(names,pat1,'tokens');\n" - script += ("for i0=1:numel(t),condnames{i0}='';condsess(i0)=0;if " - "~isempty(t{i0}{1}),condnames{i0} = t{i0}{1}{1};" - "condsess(i0)=str2num(t1{i0}{1}{1});end;end;\n") + script += ( + "for i0=1:numel(t),condnames{i0}='';condsess(i0)=0;if " + "~isempty(t{i0}{1}),condnames{i0} = t{i0}{1}{1};" + "condsess(i0)=str2num(t1{i0}{1}{1});end;end;\n" + ) # BUILD CONTRAST SESSION STRUCTURE for i, contrast in enumerate(contrasts): - if contrast.stat == 'T': - script += ("consess{%d}.tcon.name = '%s';\n" % - (i + 1, contrast.name)) - script += ( - "consess{%d}.tcon.convec = zeros(1,numel(names));\n" % - (i + 1)) + if contrast.stat == "T": + script += "consess{%d}.tcon.name = '%s';\n" % (i + 1, contrast.name) + script += "consess{%d}.tcon.convec = zeros(1,numel(names));\n" % (i + 1) for c0, cond in enumerate(contrast.conditions): - script += ("idx = strmatch('%s',condnames,'exact');\n" % - (cond)) - script += (("if isempty(idx), throw(MException(" - "'CondName:Chk', sprintf('Condition %%s not " - "found in design','%s'))); end;\n") % cond) + script += "idx = strmatch('%s',condnames,'exact');\n" % (cond) + script += ( + "if isempty(idx), throw(MException(" + "'CondName:Chk', sprintf('Condition %%s not " + "found in design','%s'))); end;\n" + ) % cond if contrast.sessions: for sno, sw in enumerate(contrast.sessions): - script += ("sidx = find(condsess(idx)==%d);\n" % - (sno + 1)) - script += (("consess{%d}.tcon.convec(idx(sidx)) " - "= %f;\n") % - (i + 1, sw * contrast.weights[c0])) + script += "sidx = find(condsess(idx)==%d);\n" % (sno + 1) + script += ( + "consess{%d}.tcon.convec(idx(sidx)) " "= %f;\n" + ) % (i + 1, sw * contrast.weights[c0]) else: - script += ("consess{%d}.tcon.convec(idx) = %f;\n" % - (i + 1, contrast.weights[c0])) + script += "consess{%d}.tcon.convec(idx) = %f;\n" % ( + i + 1, + contrast.weights[c0], + ) for i, contrast in enumerate(contrasts): - if contrast.stat == 'F': - script += ("consess{%d}.fcon.name = '%s';\n" % - (i + 1, contrast.name)) + if contrast.stat == "F": + script += "consess{%d}.fcon.name = '%s';\n" % (i + 1, contrast.name) for cl0, fcont in enumerate(contrast.conditions): try: tidx = cname.index(fcont[0]) except: - Exception("Contrast Estimate: could not get index of" - " T contrast. probably not defined prior " - "to the F contrasts") - script += (("consess{%d}.fcon.convec{%d} = " - "consess{%d}.tcon.convec;\n") % - (i + 1, cl0 + 1, tidx + 1)) + Exception( + "Contrast Estimate: could not get index of" + " T contrast. probably not defined prior " + "to the F contrasts" + ) + script += ( + "consess{%d}.fcon.convec{%d} = " "consess{%d}.tcon.convec;\n" + ) % (i + 1, cl0 + 1, tidx + 1) script += "jobs{1}.stats{1}.con.consess = consess;\n" - script += ("if strcmp(spm('ver'),'SPM8'), spm_jobman('initcfg');" - "jobs=spm_jobman('spm5tospm8',{jobs});end\n") + script += ( + "if strcmp(spm('ver'),'SPM8'), spm_jobman('initcfg');" + "jobs=spm_jobman('spm5tospm8',{jobs});end\n" + ) script += "spm_jobman('run',jobs);" return script def _list_outputs(self): import scipy.io as sio + outputs = self._outputs().get() pth, _ = os.path.split(self.inputs.spm_mat_file) spm = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False) con_images = [] spmT_images = [] - for con in spm['SPM'][0, 0].xCon[0]: + for con in spm["SPM"][0, 0].xCon[0]: con_images.append(str(os.path.join(pth, con.Vcon[0, 0].fname[0]))) spmT_images.append(str(os.path.join(pth, con.Vspm[0, 0].fname[0]))) if con_images: - outputs['con_images'] = con_images - outputs['spmT_images'] = spmT_images - spm12 = '12' in self.version.split('.')[0] + outputs["con_images"] = con_images + outputs["spmT_images"] = spmT_images + spm12 = "12" in self.version.split(".")[0] if spm12: - ess = glob(os.path.join(pth, 'ess*.nii')) + ess = glob(os.path.join(pth, "ess*.nii")) else: - ess = glob(os.path.join(pth, 'ess*.img')) + ess = glob(os.path.join(pth, "ess*.img")) if len(ess) > 0: - outputs['ess_images'] = sorted(ess) + outputs["ess_images"] = sorted(ess) if spm12: - spmf = glob(os.path.join(pth, 'spmF*.nii')) + spmf = glob(os.path.join(pth, "spmF*.nii")) else: - spmf = glob(os.path.join(pth, 'spmF*.img')) + spmf = glob(os.path.join(pth, "spmF*.img")) if len(spmf) > 0: - outputs['spmF_images'] = sorted(spmf) - outputs['spm_mat_file'] = self.inputs.spm_mat_file + outputs["spmF_images"] = sorted(spmf) + outputs["spm_mat_file"] = self.inputs.spm_mat_file return outputs class ThresholdInputSpec(SPMCommandInputSpec): spm_mat_file = File( - exists=True, - desc='absolute path to SPM.mat', - copyfile=True, - mandatory=True) - stat_image = File( - exists=True, desc='stat image', copyfile=False, mandatory=True) + exists=True, desc="absolute path to SPM.mat", copyfile=True, mandatory=True + ) + stat_image = File(exists=True, desc="stat image", copyfile=False, mandatory=True) contrast_index = traits.Int( - mandatory=True, desc='which contrast in the SPM.mat to use') + mandatory=True, desc="which contrast in the SPM.mat to use" + ) use_fwe_correction = traits.Bool( True, usedefault=True, - desc=('whether to use FWE (Bonferroni) ' - 'correction for initial threshold ' - '(height_threshold_type has to be ' - 'set to p-value)')) + desc=( + "whether to use FWE (Bonferroni) " + "correction for initial threshold " + "(height_threshold_type has to be " + "set to p-value)" + ), + ) use_topo_fdr = traits.Bool( True, usedefault=True, - desc=('whether to use FDR over cluster extent ' - 'probabilities')) + desc=("whether to use FDR over cluster extent " "probabilities"), + ) height_threshold = traits.Float( 0.05, usedefault=True, - desc=('value for initial thresholding ' - '(defining clusters)')) + desc=("value for initial thresholding " "(defining clusters)"), + ) height_threshold_type = traits.Enum( - 'p-value', - 'stat', + "p-value", + "stat", usedefault=True, - desc=('Is the cluster forming ' - 'threshold a stat value or ' - 'p-value?')) + desc=("Is the cluster forming " "threshold a stat value or " "p-value?"), + ) extent_fdr_p_threshold = traits.Float( 0.05, usedefault=True, - desc=('p threshold on FDR corrected ' - 'cluster size probabilities')) + desc=("p threshold on FDR corrected " "cluster size probabilities"), + ) extent_threshold = traits.Int( - 0, usedefault=True, desc='Minimum cluster size in voxels') + 0, usedefault=True, desc="Minimum cluster size in voxels" + ) force_activation = traits.Bool( False, usedefault=True, - desc=('In case no clusters survive the ' - 'topological inference step this ' - 'will pick a culster with the highes ' - 'sum of t-values. Use with care.')) + desc=( + "In case no clusters survive the " + "topological inference step this " + "will pick a culster with the highes " + "sum of t-values. Use with care." + ), + ) class ThresholdOutputSpec(TraitedSpec): @@ -582,6 +644,7 @@ class Threshold(SPMCommand): >>> thresh.inputs.extent_fdr_p_threshold = 0.05 >>> thresh.run() # doctest: +SKIP """ + input_spec = ThresholdInputSpec output_spec = ThresholdOutputSpec @@ -610,11 +673,11 @@ def _make_matlab_command(self, _): script += "force_activation = 1;\n" else: script += "force_activation = 0;\n" - script += ("cluster_extent_p_fdr_thr = %f;\n" % - self.inputs.extent_fdr_p_threshold) + script += ( + "cluster_extent_p_fdr_thr = %f;\n" % self.inputs.extent_fdr_p_threshold + ) script += "stat_filename = '%s';\n" % self.inputs.stat_image - script += ("height_threshold_type = '%s';\n" % - self.inputs.height_threshold_type) + script += "height_threshold_type = '%s';\n" % self.inputs.height_threshold_type script += "extent_threshold = %d;\n" % self.inputs.extent_threshold script += "load %s;\n" % self.inputs.spm_mat_file @@ -647,9 +710,10 @@ def _make_matlab_command(self, _): Zth = Z(Z >= cluster_forming_thr); """ - script += (("spm_write_filtered(Zth,XYZth,stat_map_vol.dim'," - "stat_map_vol.mat,'thresholded map', '%s');\n") % - self._gen_pre_topo_map_filename()) + script += ( + "spm_write_filtered(Zth,XYZth,stat_map_vol.dim'," + "stat_map_vol.mat,'thresholded map', '%s');\n" + ) % self._gen_pre_topo_map_filename() script += """ max_size = 0; max_size_index = 0; @@ -707,55 +771,65 @@ def _make_matlab_command(self, _): fprintf('cluster_forming_thr = %f\\n',cluster_forming_thr); """ - script += (("spm_write_filtered(thresholded_Z,thresholded_XYZ," - "stat_map_vol.dim',stat_map_vol.mat,'thresholded map'," - " '%s');\n") % self._gen_thresholded_map_filename()) + script += ( + "spm_write_filtered(thresholded_Z,thresholded_XYZ," + "stat_map_vol.dim',stat_map_vol.mat,'thresholded map'," + " '%s');\n" + ) % self._gen_thresholded_map_filename() return script def aggregate_outputs(self, runtime=None): outputs = self._outputs() - setattr(outputs, 'thresholded_map', - self._gen_thresholded_map_filename()) - setattr(outputs, 'pre_topo_fdr_map', self._gen_pre_topo_map_filename()) - for line in runtime.stdout.split('\n'): + setattr(outputs, "thresholded_map", self._gen_thresholded_map_filename()) + setattr(outputs, "pre_topo_fdr_map", self._gen_pre_topo_map_filename()) + for line in runtime.stdout.split("\n"): if line.startswith("activation_forced = "): - setattr(outputs, 'activation_forced', - line[len("activation_forced = "):].strip() == "1") + setattr( + outputs, + "activation_forced", + line[len("activation_forced = ") :].strip() == "1", + ) elif line.startswith("n_clusters = "): - setattr(outputs, 'n_clusters', - int(line[len("n_clusters = "):].strip())) + setattr( + outputs, "n_clusters", int(line[len("n_clusters = ") :].strip()) + ) elif line.startswith("pre_topo_n_clusters = "): - setattr(outputs, 'pre_topo_n_clusters', - int(line[len("pre_topo_n_clusters = "):].strip())) + setattr( + outputs, + "pre_topo_n_clusters", + int(line[len("pre_topo_n_clusters = ") :].strip()), + ) elif line.startswith("cluster_forming_thr = "): - setattr(outputs, 'cluster_forming_thr', - float(line[len("cluster_forming_thr = "):].strip())) + setattr( + outputs, + "cluster_forming_thr", + float(line[len("cluster_forming_thr = ") :].strip()), + ) return outputs def _list_outputs(self): outputs = self._outputs().get() - outputs['thresholded_map'] = self._gen_thresholded_map_filename() - outputs['pre_topo_fdr_map'] = self._gen_pre_topo_map_filename() + outputs["thresholded_map"] = self._gen_thresholded_map_filename() + outputs["pre_topo_fdr_map"] = self._gen_pre_topo_map_filename() return outputs class ThresholdStatisticsInputSpec(SPMCommandInputSpec): spm_mat_file = File( - exists=True, - desc='absolute path to SPM.mat', - copyfile=True, - mandatory=True) - stat_image = File( - exists=True, desc='stat image', copyfile=False, mandatory=True) + exists=True, desc="absolute path to SPM.mat", copyfile=True, mandatory=True + ) + stat_image = File(exists=True, desc="stat image", copyfile=False, mandatory=True) contrast_index = traits.Int( - mandatory=True, desc='which contrast in the SPM.mat to use') + mandatory=True, desc="which contrast in the SPM.mat to use" + ) height_threshold = traits.Float( - desc=('stat value for initial ' - 'thresholding (defining clusters)'), - mandatory=True) + desc=("stat value for initial " "thresholding (defining clusters)"), + mandatory=True, + ) extent_threshold = traits.Int( - 0, usedefault=True, desc="Minimum cluster size in voxels") + 0, usedefault=True, desc="Minimum cluster size in voxels" + ) class ThresholdStatisticsOutputSpec(TraitedSpec): @@ -781,6 +855,7 @@ class ThresholdStatistics(SPMCommand): >>> thresh.inputs.height_threshold = 4.56 >>> thresh.run() # doctest: +SKIP """ + input_spec = ThresholdStatisticsInputSpec output_spec = ThresholdStatisticsOutputSpec @@ -841,16 +916,19 @@ def _make_matlab_command(self, _): def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() cur_output = "" - for line in runtime.stdout.split('\n'): + for line in runtime.stdout.split("\n"): if cur_output != "" and len(line.split()) != 0: setattr(outputs, cur_output, float(line)) cur_output = "" continue - if (len(line.split()) != 0 and line.split()[0] in [ - "clusterwise_P_FDR", "clusterwise_P_RF", - "voxelwise_P_Bonf", "voxelwise_P_FDR", "voxelwise_P_RF", - "voxelwise_P_uncor" - ]): + if len(line.split()) != 0 and line.split()[0] in [ + "clusterwise_P_FDR", + "clusterwise_P_RF", + "voxelwise_P_Bonf", + "voxelwise_P_FDR", + "voxelwise_P_RF", + "voxelwise_P_uncor", + ]: cur_output = line.split()[0] continue @@ -859,64 +937,68 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): class FactorialDesignInputSpec(SPMCommandInputSpec): spm_mat_dir = Directory( - exists=True, field='dir', desc='directory to store SPM.mat file (opt)') + exists=True, field="dir", desc="directory to store SPM.mat file (opt)" + ) # Need to make an alias of InputMultiPath; the inputs below are not Path covariates = InputMultiPath( traits.Dict( - key_trait=traits.Enum('vector', 'name', 'interaction', - 'centering')), - field='cov', - desc=('covariate dictionary {vector, name, ' - 'interaction, centering}')) + key_trait=traits.Enum("vector", "name", "interaction", "centering") + ), + field="cov", + desc=("covariate dictionary {vector, name, " "interaction, centering}"), + ) threshold_mask_none = traits.Bool( - field='masking.tm.tm_none', - xor=['threshold_mask_absolute', 'threshold_mask_relative'], - desc='do not use threshold masking') + field="masking.tm.tm_none", + xor=["threshold_mask_absolute", "threshold_mask_relative"], + desc="do not use threshold masking", + ) threshold_mask_absolute = traits.Float( - field='masking.tm.tma.athresh', - xor=['threshold_mask_none', 'threshold_mask_relative'], - desc='use an absolute threshold') + field="masking.tm.tma.athresh", + xor=["threshold_mask_none", "threshold_mask_relative"], + desc="use an absolute threshold", + ) threshold_mask_relative = traits.Float( - field='masking.tm.tmr.rthresh', - xor=['threshold_mask_absolute', 'threshold_mask_none'], - desc=('threshold using a ' - 'proportion of the global ' - 'value')) + field="masking.tm.tmr.rthresh", + xor=["threshold_mask_absolute", "threshold_mask_none"], + desc=("threshold using a " "proportion of the global " "value"), + ) use_implicit_threshold = traits.Bool( - field='masking.im', - desc=('use implicit mask NaNs or ' - 'zeros to threshold')) + field="masking.im", desc=("use implicit mask NaNs or " "zeros to threshold") + ) explicit_mask_file = File( - field='masking.em', # requires cell - desc='use an implicit mask file to threshold') + field="masking.em", # requires cell + desc="use an implicit mask file to threshold", + ) global_calc_omit = traits.Bool( - field='globalc.g_omit', - xor=['global_calc_mean', 'global_calc_values'], - desc='omit global calculation') + field="globalc.g_omit", + xor=["global_calc_mean", "global_calc_values"], + desc="omit global calculation", + ) global_calc_mean = traits.Bool( - field='globalc.g_mean', - xor=['global_calc_omit', 'global_calc_values'], - desc='use mean for global calculation') + field="globalc.g_mean", + xor=["global_calc_omit", "global_calc_values"], + desc="use mean for global calculation", + ) global_calc_values = traits.List( traits.Float, - field='globalc.g_user.global_uval', - xor=['global_calc_mean', 'global_calc_omit'], - desc='omit global calculation') + field="globalc.g_user.global_uval", + xor=["global_calc_mean", "global_calc_omit"], + desc="omit global calculation", + ) no_grand_mean_scaling = traits.Bool( - field='globalm.gmsca.gmsca_no', - desc=('do not perform grand mean ' - 'scaling')) + field="globalm.gmsca.gmsca_no", desc=("do not perform grand mean " "scaling") + ) global_normalization = traits.Enum( 1, 2, 3, - field='globalm.glonorm', - desc=('global normalization None-1, ' - 'Proportional-2, ANCOVA-3')) + field="globalm.glonorm", + desc=("global normalization None-1, " "Proportional-2, ANCOVA-3"), + ) class FactorialDesignOutputSpec(TraitedSpec): - spm_mat_file = File(exists=True, desc='SPM mat file') + spm_mat_file = File(exists=True, desc="SPM mat file") class FactorialDesign(SPMCommand): @@ -928,21 +1010,21 @@ class FactorialDesign(SPMCommand): input_spec = FactorialDesignInputSpec output_spec = FactorialDesignOutputSpec - _jobtype = 'stats' - _jobname = 'factorial_design' + _jobtype = "stats" + _jobname = "factorial_design" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt in ['spm_mat_dir', 'explicit_mask_file']: + if opt in ["spm_mat_dir", "explicit_mask_file"]: return np.array([str(val)], dtype=object) - if opt in ['covariates']: + if opt in ["covariates"]: outlist = [] mapping = { - 'name': 'cname', - 'vector': 'c', - 'interaction': 'iCFI', - 'centering': 'iCC' + "name": "cname", + "vector": "c", + "interaction": "iCFI", + "centering": "iCC", } for dictitem in val: outdict = {} @@ -957,23 +1039,24 @@ def _parse_inputs(self): """ einputs = super(FactorialDesign, self)._parse_inputs() if not isdefined(self.inputs.spm_mat_dir): - einputs[0]['dir'] = np.array([str(os.getcwd())], dtype=object) + einputs[0]["dir"] = np.array([str(os.getcwd())], dtype=object) return einputs def _list_outputs(self): outputs = self._outputs().get() - spm = os.path.join(os.getcwd(), 'SPM.mat') - outputs['spm_mat_file'] = spm + spm = os.path.join(os.getcwd(), "SPM.mat") + outputs["spm_mat_file"] = spm return outputs class OneSampleTTestDesignInputSpec(FactorialDesignInputSpec): in_files = traits.List( File(exists=True), - field='des.t1.scans', + field="des.t1.scans", mandatory=True, minlen=2, - desc='input files') + desc="input files", + ) class OneSampleTTestDesign(FactorialDesign): @@ -992,7 +1075,7 @@ class OneSampleTTestDesign(FactorialDesign): def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt in ['in_files']: + if opt in ["in_files"]: return np.array(val, dtype=object) return super(OneSampleTTestDesign, self)._format_arg(opt, spec, val) @@ -1002,24 +1085,25 @@ class TwoSampleTTestDesignInputSpec(FactorialDesignInputSpec): # parameters to require at least two files in each group [SG] group1_files = traits.List( File(exists=True), - field='des.t2.scans1', + field="des.t2.scans1", mandatory=True, minlen=2, - desc='Group 1 input files') + desc="Group 1 input files", + ) group2_files = traits.List( File(exists=True), - field='des.t2.scans2', + field="des.t2.scans2", mandatory=True, minlen=2, - desc='Group 2 input files') + desc="Group 2 input files", + ) dependent = traits.Bool( - field='des.t2.dept', - desc=('Are the measurements dependent between ' - 'levels')) + field="des.t2.dept", desc=("Are the measurements dependent between " "levels") + ) unequal_variance = traits.Bool( - field='des.t2.variance', - desc=('Are the variances equal or unequal ' - 'between groups')) + field="des.t2.variance", + desc=("Are the variances equal or unequal " "between groups"), + ) class TwoSampleTTestDesign(FactorialDesign): @@ -1039,7 +1123,7 @@ class TwoSampleTTestDesign(FactorialDesign): def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt in ['group1_files', 'group2_files']: + if opt in ["group1_files", "group2_files"]: return np.array(val, dtype=object) return super(TwoSampleTTestDesign, self)._format_arg(opt, spec, val) @@ -1047,14 +1131,17 @@ def _format_arg(self, opt, spec, val): class PairedTTestDesignInputSpec(FactorialDesignInputSpec): paired_files = traits.List( traits.List(File(exists=True), minlen=2, maxlen=2), - field='des.pt.pair', + field="des.pt.pair", mandatory=True, minlen=2, - desc='List of paired files') + desc="List of paired files", + ) grand_mean_scaling = traits.Bool( - field='des.pt.gmsca', desc='Perform grand mean scaling') + field="des.pt.gmsca", desc="Perform grand mean scaling" + ) ancova = traits.Bool( - field='des.pt.ancova', desc='Specify ancova-by-factor regressors') + field="des.pt.ancova", desc="Specify ancova-by-factor regressors" + ) class PairedTTestDesign(FactorialDesign): @@ -1073,7 +1160,7 @@ class PairedTTestDesign(FactorialDesign): def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt in ['paired_files']: + if opt in ["paired_files"]: return [dict(scans=np.array(files, dtype=object)) for files in val] return super(PairedTTestDesign, self)._format_arg(opt, spec, val) @@ -1081,20 +1168,22 @@ def _format_arg(self, opt, spec, val): class MultipleRegressionDesignInputSpec(FactorialDesignInputSpec): in_files = traits.List( File(exists=True), - field='des.mreg.scans', + field="des.mreg.scans", mandatory=True, minlen=2, - desc='List of files') + desc="List of files", + ) include_intercept = traits.Bool( True, - field='des.mreg.incint', + field="des.mreg.incint", usedefault=True, - desc='Include intercept in design') + desc="Include intercept in design", + ) user_covariates = InputMultiPath( - traits.Dict(key_trait=traits.Enum('vector', 'name', 'centering')), - field='des.mreg.mcov', - desc=('covariate dictionary {vector, ' - 'name, centering}')) + traits.Dict(key_trait=traits.Enum("vector", "name", "centering")), + field="des.mreg.mcov", + desc=("covariate dictionary {vector, " "name, centering}"), + ) class MultipleRegressionDesign(FactorialDesign): @@ -1113,16 +1202,15 @@ class MultipleRegressionDesign(FactorialDesign): def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt in ['in_files']: + if opt in ["in_files"]: return np.array(val, dtype=object) - if opt in ['user_covariates']: + if opt in ["user_covariates"]: outlist = [] - mapping = {'name': 'cname', 'vector': 'c', 'centering': 'iCC'} + mapping = {"name": "cname", "vector": "c", "centering": "iCC"} for dictitem in val: outdict = {} for key, keyval in list(dictitem.items()): outdict[mapping[key]] = keyval outlist.append(outdict) return outlist - return (super(MultipleRegressionDesign, self)._format_arg( - opt, spec, val)) + return super(MultipleRegressionDesign, self)._format_arg(opt, spec, val) diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index 5c63936b09..b6a705fa8e 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -10,98 +10,194 @@ import numpy as np # Local imports -from ...utils.filemanip import (fname_presuffix, ensure_list, - simplify_list, split_filename) -from ..base import (OutputMultiPath, TraitedSpec, isdefined, - traits, InputMultiPath, InputMultiObject, File, Str) -from .base import (SPMCommand, scans_for_fname, func_is_3d, - scans_for_fnames, SPMCommandInputSpec, ImageFileSPM) - -__docformat__ = 'restructuredtext' +from ...utils.filemanip import ( + fname_presuffix, + ensure_list, + simplify_list, + split_filename, +) +from ..base import ( + OutputMultiPath, + TraitedSpec, + isdefined, + traits, + InputMultiPath, + InputMultiObject, + File, + Str, +) +from .base import ( + SPMCommand, + scans_for_fname, + func_is_3d, + scans_for_fnames, + SPMCommandInputSpec, + ImageFileSPM, +) + +__docformat__ = "restructuredtext" class FieldMapInputSpec(SPMCommandInputSpec): - jobtype = traits.Enum('calculatevdm', 'applyvdm', usedefault=True, - desc='one of: calculatevdm, applyvdm') - phase_file = File(mandatory=True, exists=True, copyfile=False, - field='subj.data.presubphasemag.phase', - desc='presubstracted phase file') - magnitude_file = File(mandatory=True, exists=True, copyfile=False, - field='subj.data.presubphasemag.magnitude', - desc='presubstracted magnitude file') - echo_times = traits.Tuple(traits.Float, traits.Float, mandatory=True, - field='subj.defaults.defaultsval.et', - desc='short and long echo times') - maskbrain = traits.Bool(True, usedefault=True, - field='subj.defaults.defaultsval.maskbrain', - desc='masking or no masking of the brain') - blip_direction = traits.Enum(1, -1, mandatory=True, - field='subj.defaults.defaultsval.blipdir', - desc='polarity of the phase-encode blips') - total_readout_time = traits.Float(mandatory=True, - field='subj.defaults.defaultsval.tert', - desc='total EPI readout time') - epifm = traits.Bool(False, usedefault=True, - field='subj.defaults.defaultsval.epifm', - desc='epi-based field map'); - jacobian_modulation = traits.Bool(False, usedefault=True, - field='subj.defaults.defaultsval.ajm', - desc='jacobian modulation'); + jobtype = traits.Enum( + "calculatevdm", + "applyvdm", + usedefault=True, + desc="one of: calculatevdm, applyvdm", + ) + phase_file = File( + mandatory=True, + exists=True, + copyfile=False, + field="subj.data.presubphasemag.phase", + desc="presubstracted phase file", + ) + magnitude_file = File( + mandatory=True, + exists=True, + copyfile=False, + field="subj.data.presubphasemag.magnitude", + desc="presubstracted magnitude file", + ) + echo_times = traits.Tuple( + traits.Float, + traits.Float, + mandatory=True, + field="subj.defaults.defaultsval.et", + desc="short and long echo times", + ) + maskbrain = traits.Bool( + True, + usedefault=True, + field="subj.defaults.defaultsval.maskbrain", + desc="masking or no masking of the brain", + ) + blip_direction = traits.Enum( + 1, + -1, + mandatory=True, + field="subj.defaults.defaultsval.blipdir", + desc="polarity of the phase-encode blips", + ) + total_readout_time = traits.Float( + mandatory=True, + field="subj.defaults.defaultsval.tert", + desc="total EPI readout time", + ) + epifm = traits.Bool( + False, + usedefault=True, + field="subj.defaults.defaultsval.epifm", + desc="epi-based field map", + ) + jacobian_modulation = traits.Bool( + False, + usedefault=True, + field="subj.defaults.defaultsval.ajm", + desc="jacobian modulation", + ) # Unwarping defaults parameters - method = traits.Enum('Mark3D', 'Mark2D', 'Huttonish', usedefault=True, - desc='One of: Mark3D, Mark2D, Huttonish', - field='subj.defaults.defaultsval.uflags.method'); - unwarp_fwhm = traits.Range(low=0, value=10, usedefault=True, - field='subj.defaults.defaultsval.uflags.fwhm', - desc='gaussian smoothing kernel width'); - pad = traits.Range(low=0, value=0, usedefault=True, - field='subj.defaults.defaultsval.uflags.pad', - desc='padding kernel width'); - ws = traits.Bool(True, usedefault=True, - field='subj.defaults.defaultsval.uflags.ws', - desc='weighted smoothing'); + method = traits.Enum( + "Mark3D", + "Mark2D", + "Huttonish", + usedefault=True, + desc="One of: Mark3D, Mark2D, Huttonish", + field="subj.defaults.defaultsval.uflags.method", + ) + unwarp_fwhm = traits.Range( + low=0, + value=10, + usedefault=True, + field="subj.defaults.defaultsval.uflags.fwhm", + desc="gaussian smoothing kernel width", + ) + pad = traits.Range( + low=0, + value=0, + usedefault=True, + field="subj.defaults.defaultsval.uflags.pad", + desc="padding kernel width", + ) + ws = traits.Bool( + True, + usedefault=True, + field="subj.defaults.defaultsval.uflags.ws", + desc="weighted smoothing", + ) # Brain mask defaults parameters - template = File(copyfile=False, exists=True, - field='subj.defaults.defaultsval.mflags.template', - desc='template image for brain masking'); - mask_fwhm = traits.Range(low=0, value=5, usedefault=True, - field='subj.defaults.defaultsval.mflags.fwhm', - desc='gaussian smoothing kernel width'); - nerode = traits.Range(low=0, value=2, usedefault=True, - field='subj.defaults.defaultsval.mflags.nerode', - desc='number of erosions'); - ndilate = traits.Range(low=0, value=4, usedefault=True, - field='subj.defaults.defaultsval.mflags.ndilate', - desc='number of erosions'); - thresh = traits.Float(0.5, usedefault=True, - field='subj.defaults.defaultsval.mflags.thresh', - desc='threshold used to create brain mask from segmented data'); - reg = traits.Float(0.02, usedefault=True, - field='subj.defaults.defaultsval.mflags.reg', - desc='regularization value used in the segmentation'); + template = File( + copyfile=False, + exists=True, + field="subj.defaults.defaultsval.mflags.template", + desc="template image for brain masking", + ) + mask_fwhm = traits.Range( + low=0, + value=5, + usedefault=True, + field="subj.defaults.defaultsval.mflags.fwhm", + desc="gaussian smoothing kernel width", + ) + nerode = traits.Range( + low=0, + value=2, + usedefault=True, + field="subj.defaults.defaultsval.mflags.nerode", + desc="number of erosions", + ) + ndilate = traits.Range( + low=0, + value=4, + usedefault=True, + field="subj.defaults.defaultsval.mflags.ndilate", + desc="number of erosions", + ) + thresh = traits.Float( + 0.5, + usedefault=True, + field="subj.defaults.defaultsval.mflags.thresh", + desc="threshold used to create brain mask from segmented data", + ) + reg = traits.Float( + 0.02, + usedefault=True, + field="subj.defaults.defaultsval.mflags.reg", + desc="regularization value used in the segmentation", + ) # EPI unwarping for quality check - epi_file = File(copyfile=False, exists=True, mandatory=True, - field='subj.session.epi', - desc='EPI to unwarp'); - matchvdm = traits.Bool(True, usedefault=True, - field='subj.matchvdm', - desc='match VDM to EPI'); - sessname = Str('_run-', usedefault=True, - field='subj.sessname', - desc='VDM filename extension'); - writeunwarped = traits.Bool(False, usedefault=True, - field='subj.writeunwarped', - desc='write unwarped EPI'); - anat_file = File(copyfile=False, exists=True, - field='subj.anat', - desc='anatomical image for comparison'); - matchanat = traits.Bool(True, usedefault=True, - field='subj.matchanat', - desc='match anatomical image to EPI'); + epi_file = File( + copyfile=False, + exists=True, + mandatory=True, + field="subj.session.epi", + desc="EPI to unwarp", + ) + matchvdm = traits.Bool( + True, usedefault=True, field="subj.matchvdm", desc="match VDM to EPI" + ) + sessname = Str( + "_run-", usedefault=True, field="subj.sessname", desc="VDM filename extension" + ) + writeunwarped = traits.Bool( + False, usedefault=True, field="subj.writeunwarped", desc="write unwarped EPI" + ) + anat_file = File( + copyfile=False, + exists=True, + field="subj.anat", + desc="anatomical image for comparison", + ) + matchanat = traits.Bool( + True, + usedefault=True, + field="subj.matchanat", + desc="match anatomical image to EPI", + ) class FieldMapOutputSpec(TraitedSpec): - vdm = File(exists=True, desc='voxel difference map') + vdm = File(exists=True, desc="voxel difference map") class FieldMap(SPMCommand): @@ -129,13 +225,13 @@ class FieldMap(SPMCommand): input_spec = FieldMapInputSpec output_spec = FieldMapOutputSpec - _jobtype = 'tools' - _jobname = 'fieldmap' + _jobtype = "tools" + _jobname = "fieldmap" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt in ['phase_file', 'magnitude_file', 'anat_file', 'epi_file']: + if opt in ["phase_file", "magnitude_file", "anat_file", "epi_file"]: return scans_for_fname(ensure_list(val)) return super(FieldMap, self)._format_arg(opt, spec, val) @@ -150,7 +246,7 @@ def _list_outputs(self): outputs = self._outputs().get() jobtype = self.inputs.jobtype if jobtype == "calculatevdm": - outputs['vdm'] = fname_presuffix(self.inputs.phase_file, prefix='vdm5_sc') + outputs["vdm"] = fname_presuffix(self.inputs.phase_file, prefix="vdm5_sc") return outputs @@ -158,43 +254,49 @@ def _list_outputs(self): class SliceTimingInputSpec(SPMCommandInputSpec): in_files = InputMultiPath( traits.Either( - traits.List(ImageFileSPM(exists=True)), ImageFileSPM(exists=True)), - field='scans', - desc='list of filenames to apply slice timing', + traits.List(ImageFileSPM(exists=True)), ImageFileSPM(exists=True) + ), + field="scans", + desc="list of filenames to apply slice timing", mandatory=True, - copyfile=False) + copyfile=False, + ) num_slices = traits.Int( - field='nslices', desc='number of slices in a volume', mandatory=True) + field="nslices", desc="number of slices in a volume", mandatory=True + ) time_repetition = traits.Float( - field='tr', - desc=('time between volume acquisitions' - '(start to start time)'), - mandatory=True) + field="tr", + desc=("time between volume acquisitions" "(start to start time)"), + mandatory=True, + ) time_acquisition = traits.Float( - field='ta', - desc=('time of volume acquisition. usually' - 'calculated as TR-(TR/num_slices)'), - mandatory=True) + field="ta", + desc=("time of volume acquisition. usually" "calculated as TR-(TR/num_slices)"), + mandatory=True, + ) slice_order = traits.List( traits.Float(), - field='so', - desc=('1-based order or onset (in ms) in which ' - 'slices are acquired'), - mandatory=True) + field="so", + desc=("1-based order or onset (in ms) in which " "slices are acquired"), + mandatory=True, + ) ref_slice = traits.Int( - field='refslice', - desc='1-based Number of the reference slice or ' - 'reference time point if slice_order is in ' - 'onsets (ms)', - mandatory=True) + field="refslice", + desc="1-based Number of the reference slice or " + "reference time point if slice_order is in " + "onsets (ms)", + mandatory=True, + ) out_prefix = traits.String( - 'a', field='prefix', usedefault=True, desc='slicetimed output prefix') + "a", field="prefix", usedefault=True, desc="slicetimed output prefix" + ) class SliceTimingOutputSpec(TraitedSpec): timecorrected_files = OutputMultiPath( traits.Either(traits.List(File(exists=True)), File(exists=True)), - desc='slice time corrected files') + desc="slice time corrected files", + ) class SliceTiming(SPMCommand): @@ -220,127 +322,134 @@ class SliceTiming(SPMCommand): input_spec = SliceTimingInputSpec output_spec = SliceTimingOutputSpec - _jobtype = 'temporal' - _jobname = 'st' + _jobtype = "temporal" + _jobname = "st" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt == 'in_files': + if opt == "in_files": return scans_for_fnames( - ensure_list(val), keep4d=False, separate_sessions=True) + ensure_list(val), keep4d=False, separate_sessions=True + ) return super(SliceTiming, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() - outputs['timecorrected_files'] = [] + outputs["timecorrected_files"] = [] filelist = ensure_list(self.inputs.in_files) for f in filelist: if isinstance(f, list): run = [ - fname_presuffix(in_f, prefix=self.inputs.out_prefix) - for in_f in f + fname_presuffix(in_f, prefix=self.inputs.out_prefix) for in_f in f ] else: run = fname_presuffix(f, prefix=self.inputs.out_prefix) - outputs['timecorrected_files'].append(run) + outputs["timecorrected_files"].append(run) return outputs class RealignInputSpec(SPMCommandInputSpec): in_files = InputMultiPath( - traits.Either(ImageFileSPM(exists=True), - traits.List(ImageFileSPM(exists=True))), - field='data', + traits.Either( + ImageFileSPM(exists=True), traits.List(ImageFileSPM(exists=True)) + ), + field="data", mandatory=True, copyfile=True, - desc='list of filenames to realign') + desc="list of filenames to realign", + ) jobtype = traits.Enum( - 'estwrite', - 'estimate', - 'write', - desc='one of: estimate, write, estwrite', - usedefault=True) + "estwrite", + "estimate", + "write", + desc="one of: estimate, write, estwrite", + usedefault=True, + ) quality = traits.Range( - low=0.0, - high=1.0, - field='eoptions.quality', - desc='0.1 = fast, 1.0 = precise') + low=0.0, high=1.0, field="eoptions.quality", desc="0.1 = fast, 1.0 = precise" + ) fwhm = traits.Range( - low=0.0, field='eoptions.fwhm', desc='gaussian smoothing kernel width') + low=0.0, field="eoptions.fwhm", desc="gaussian smoothing kernel width" + ) separation = traits.Range( - low=0.0, field='eoptions.sep', desc='sampling separation in mm') + low=0.0, field="eoptions.sep", desc="sampling separation in mm" + ) register_to_mean = traits.Bool( - field='eoptions.rtm', - desc=('Indicate whether realignment is ' - 'done to the mean image')) + field="eoptions.rtm", + desc=("Indicate whether realignment is " "done to the mean image"), + ) weight_img = File( - exists=True, - field='eoptions.weight', - desc='filename of weighting image') + exists=True, field="eoptions.weight", desc="filename of weighting image" + ) interp = traits.Range( low=0, high=7, - field='eoptions.interp', - desc='degree of b-spline used for interpolation') + field="eoptions.interp", + desc="degree of b-spline used for interpolation", + ) wrap = traits.List( traits.Int(), minlen=3, maxlen=3, - field='eoptions.wrap', - desc='Check if interpolation should wrap in [x,y,z]') + field="eoptions.wrap", + desc="Check if interpolation should wrap in [x,y,z]", + ) write_which = traits.ListInt( [2, 1], - field='roptions.which', + field="roptions.which", minlen=2, maxlen=2, usedefault=True, - desc='determines which images to reslice') + desc="determines which images to reslice", + ) write_interp = traits.Range( low=0, high=7, - field='roptions.interp', - desc=('degree of b-spline used for ' - 'interpolation')) + field="roptions.interp", + desc=("degree of b-spline used for " "interpolation"), + ) write_wrap = traits.List( traits.Int(), minlen=3, maxlen=3, - field='roptions.wrap', - desc=('Check if interpolation should wrap in ' - '[x,y,z]')) - write_mask = traits.Bool( - field='roptions.mask', desc='True/False mask output image') + field="roptions.wrap", + desc=("Check if interpolation should wrap in " "[x,y,z]"), + ) + write_mask = traits.Bool(field="roptions.mask", desc="True/False mask output image") out_prefix = traits.String( - 'r', - field='roptions.prefix', - usedefault=True, - desc='realigned output prefix') + "r", field="roptions.prefix", usedefault=True, desc="realigned output prefix" + ) class RealignOutputSpec(TraitedSpec): - mean_image = File(exists=True, desc='Mean image file from the realignment') + mean_image = File(exists=True, desc="Mean image file from the realignment") modified_in_files = OutputMultiPath( traits.Either(traits.List(File(exists=True)), File(exists=True)), - desc=('Copies of all files passed to ' - 'in_files. Headers will have ' - 'been modified to align all ' - 'images with the first, or ' - 'optionally to first do that, ' - 'extract a mean image, and ' - 're-align to that mean image.')) + desc=( + "Copies of all files passed to " + "in_files. Headers will have " + "been modified to align all " + "images with the first, or " + "optionally to first do that, " + "extract a mean image, and " + "re-align to that mean image." + ), + ) realigned_files = OutputMultiPath( traits.Either(traits.List(File(exists=True)), File(exists=True)), - desc=('If jobtype is write or estwrite, ' - 'these will be the resliced files.' - ' Otherwise, they will be copies ' - 'of in_files that have had their ' - 'headers rewritten.')) + desc=( + "If jobtype is write or estwrite, " + "these will be the resliced files." + " Otherwise, they will be copies " + "of in_files that have had their " + "headers rewritten." + ), + ) realignment_parameters = OutputMultiPath( - File(exists=True), - desc=('Estimated translation and ' - 'rotation parameters')) + File(exists=True), desc=("Estimated translation and " "rotation parameters") + ) class Realign(SPMCommand): @@ -362,26 +471,27 @@ class Realign(SPMCommand): input_spec = RealignInputSpec output_spec = RealignOutputSpec - _jobtype = 'spatial' - _jobname = 'realign' + _jobtype = "spatial" + _jobname = "realign" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt == 'in_files': + if opt == "in_files": if self.inputs.jobtype == "write": separate_sessions = False else: separate_sessions = True return scans_for_fnames( - val, keep4d=False, separate_sessions=separate_sessions) + val, keep4d=False, separate_sessions=separate_sessions + ) return super(Realign, self)._format_arg(opt, spec, val) def _parse_inputs(self): """validate spm realign options if set to None ignore """ einputs = super(Realign, self)._parse_inputs() - return [{'%s' % (self.inputs.jobtype): einputs[0]}] + return [{"%s" % (self.inputs.jobtype): einputs[0]}] def _list_outputs(self): outputs = self._outputs().get() @@ -390,22 +500,23 @@ def _list_outputs(self): if self.inputs.jobtype != "write": if isdefined(self.inputs.in_files): - outputs['realignment_parameters'] = [] + outputs["realignment_parameters"] = [] for imgf in self.inputs.in_files: if isinstance(imgf, list): tmp_imgf = imgf[0] else: tmp_imgf = imgf - outputs['realignment_parameters'].append( + outputs["realignment_parameters"].append( fname_presuffix( - tmp_imgf, prefix='rp_', suffix='.txt', use_ext=False)) + tmp_imgf, prefix="rp_", suffix=".txt", use_ext=False + ) + ) if not isinstance(imgf, list) and func_is_3d(imgf): break if self.inputs.jobtype == "estimate": - outputs['realigned_files'] = self.inputs.in_files - if (self.inputs.jobtype == "estimate" - or self.inputs.jobtype == "estwrite"): - outputs['modified_in_files'] = self.inputs.in_files + outputs["realigned_files"] = self.inputs.in_files + if self.inputs.jobtype == "estimate" or self.inputs.jobtype == "estwrite": + outputs["modified_in_files"] = self.inputs.in_files if self.inputs.jobtype == "write" or self.inputs.jobtype == "estwrite": if isinstance(self.inputs.in_files[0], list): first_image = self.inputs.in_files[0][0] @@ -413,170 +524,195 @@ def _list_outputs(self): first_image = self.inputs.in_files[0] if resliced_mean: - outputs['mean_image'] = fname_presuffix( - first_image, prefix='mean') + outputs["mean_image"] = fname_presuffix(first_image, prefix="mean") if resliced_all: - outputs['realigned_files'] = [] - for idx, imgf in enumerate( - ensure_list(self.inputs.in_files)): + outputs["realigned_files"] = [] + for idx, imgf in enumerate(ensure_list(self.inputs.in_files)): realigned_run = [] if isinstance(imgf, list): for i, inner_imgf in enumerate(ensure_list(imgf)): newfile = fname_presuffix( - inner_imgf, prefix=self.inputs.out_prefix) + inner_imgf, prefix=self.inputs.out_prefix + ) realigned_run.append(newfile) else: realigned_run = fname_presuffix( - imgf, prefix=self.inputs.out_prefix) - outputs['realigned_files'].append(realigned_run) + imgf, prefix=self.inputs.out_prefix + ) + outputs["realigned_files"].append(realigned_run) return outputs class RealignUnwarpInputSpec(SPMCommandInputSpec): in_files = InputMultiObject( - traits.Either(ImageFileSPM(exists=True), - traits.List(ImageFileSPM(exists=True))), - field='data.scans', + traits.Either( + ImageFileSPM(exists=True), traits.List(ImageFileSPM(exists=True)) + ), + field="data.scans", mandatory=True, copyfile=True, - desc='list of filenames to realign and unwarp') + desc="list of filenames to realign and unwarp", + ) phase_map = File( - field='data.pmscan', - desc='Voxel displacement map to use in unwarping. Unlike SPM standard ' - 'behaviour, the same map will be used for all sessions', - copyfile=False) + field="data.pmscan", + desc="Voxel displacement map to use in unwarping. Unlike SPM standard " + "behaviour, the same map will be used for all sessions", + copyfile=False, + ) quality = traits.Range( - low=0.0, - high=1.0, - field='eoptions.quality', - desc='0.1 = fast, 1.0 = precise') + low=0.0, high=1.0, field="eoptions.quality", desc="0.1 = fast, 1.0 = precise" + ) fwhm = traits.Range( - low=0.0, - field='eoptions.fwhm', - desc='gaussian smoothing kernel width') + low=0.0, field="eoptions.fwhm", desc="gaussian smoothing kernel width" + ) separation = traits.Range( - low=0.0, - field='eoptions.sep', - desc='sampling separation in mm') + low=0.0, field="eoptions.sep", desc="sampling separation in mm" + ) register_to_mean = traits.Bool( - field='eoptions.rtm', - desc='Indicate whether realignment is done to the mean image') + field="eoptions.rtm", + desc="Indicate whether realignment is done to the mean image", + ) weight_img = File( - exists=True, - field='eoptions.weight', - desc='filename of weighting image') + exists=True, field="eoptions.weight", desc="filename of weighting image" + ) interp = traits.Range( low=0, high=7, - field='eoptions.einterp', - desc='degree of b-spline used for interpolation') + field="eoptions.einterp", + desc="degree of b-spline used for interpolation", + ) wrap = traits.List( traits.Int(), minlen=3, maxlen=3, - field='eoptions.ewrap', - desc='Check if interpolation should wrap in [x,y,z]') + field="eoptions.ewrap", + desc="Check if interpolation should wrap in [x,y,z]", + ) est_basis_func = traits.List( traits.Int(), minlen=2, maxlen=2, - field='uweoptions.basfcn', - desc='Number of basis functions to use for each dimension') + field="uweoptions.basfcn", + desc="Number of basis functions to use for each dimension", + ) est_reg_order = traits.Range( low=0, high=3, - field='uweoptions.regorder', - desc=('This parameter determines how to balance the compromise between likelihood ' - 'maximization and smoothness maximization of the estimated field.')) + field="uweoptions.regorder", + desc=( + "This parameter determines how to balance the compromise between likelihood " + "maximization and smoothness maximization of the estimated field." + ), + ) est_reg_factor = traits.ListInt( [100000], - field='uweoptions.lambda', + field="uweoptions.lambda", minlen=1, maxlen=1, usedefault=True, - desc='Regularisation factor. Default: 100000 (medium).') + desc="Regularisation factor. Default: 100000 (medium).", + ) est_jacobian_deformations = traits.Bool( - field='uweoptions.jm', - desc=('Jacobian deformations. In theory a good idea to include them, ' - ' in practice a bad idea. Default: No.')) + field="uweoptions.jm", + desc=( + "Jacobian deformations. In theory a good idea to include them, " + " in practice a bad idea. Default: No." + ), + ) est_first_order_effects = traits.List( traits.Int(), minlen=1, maxlen=6, - field='uweoptions.fot', - desc='First order effects should only depend on pitch and roll, i.e. [4 5]') + field="uweoptions.fot", + desc="First order effects should only depend on pitch and roll, i.e. [4 5]", + ) est_second_order_effects = traits.List( traits.Int(), minlen=1, maxlen=6, - field='uweoptions.sot', - desc='List of second order terms to model second derivatives of.') + field="uweoptions.sot", + desc="List of second order terms to model second derivatives of.", + ) est_unwarp_fwhm = traits.Range( low=0.0, - field='uweoptions.uwfwhm', - desc='gaussian smoothing kernel width for unwarp') + field="uweoptions.uwfwhm", + desc="gaussian smoothing kernel width for unwarp", + ) est_re_est_mov_par = traits.Bool( - field='uweoptions.rem', - desc='Re-estimate movement parameters at each unwarping iteration.') + field="uweoptions.rem", + desc="Re-estimate movement parameters at each unwarping iteration.", + ) est_num_of_iterations = traits.ListInt( [5], - field='uweoptions.noi', + field="uweoptions.noi", minlen=1, maxlen=1, usedefault=True, - desc='Number of iterations.') + desc="Number of iterations.", + ) est_taylor_expansion_point = traits.String( - 'Average', - field='uweoptions.expround', + "Average", + field="uweoptions.expround", usedefault=True, - desc='Point in position space to perform Taylor-expansion around.') + desc="Point in position space to perform Taylor-expansion around.", + ) reslice_which = traits.ListInt( [2, 1], - field='uwroptions.uwwhich', + field="uwroptions.uwwhich", minlen=2, maxlen=2, usedefault=True, - desc='determines which images to reslice') + desc="determines which images to reslice", + ) reslice_interp = traits.Range( low=0, high=7, - field='uwroptions.rinterp', - desc='degree of b-spline used for interpolation') + field="uwroptions.rinterp", + desc="degree of b-spline used for interpolation", + ) reslice_wrap = traits.List( traits.Int(), minlen=3, maxlen=3, - field='uwroptions.wrap', - desc='Check if interpolation should wrap in [x,y,z]') + field="uwroptions.wrap", + desc="Check if interpolation should wrap in [x,y,z]", + ) reslice_mask = traits.Bool( - field='uwroptions.mask', - desc='True/False mask output image') + field="uwroptions.mask", desc="True/False mask output image" + ) out_prefix = traits.String( - 'u', - field='uwroptions.prefix', + "u", + field="uwroptions.prefix", usedefault=True, - desc='realigned and unwarped output prefix') + desc="realigned and unwarped output prefix", + ) class RealignUnwarpOutputSpec(TraitedSpec): - mean_image = File(exists=True, desc='Mean image file from the realignment & unwarping') + mean_image = File( + exists=True, desc="Mean image file from the realignment & unwarping" + ) modified_in_files = OutputMultiPath( traits.Either(traits.List(File(exists=True)), File(exists=True)), - desc=('Copies of all files passed to ' - 'in_files. Headers will have ' - 'been modified to align all ' - 'images with the first, or ' - 'optionally to first do that, ' - 'extract a mean image, and ' - 're-align to that mean image.')) + desc=( + "Copies of all files passed to " + "in_files. Headers will have " + "been modified to align all " + "images with the first, or " + "optionally to first do that, " + "extract a mean image, and " + "re-align to that mean image." + ), + ) realigned_unwarped_files = OutputMultiPath( traits.Either(traits.List(File(exists=True)), File(exists=True)), - desc='Realigned and unwarped files written to disc.') + desc="Realigned and unwarped files written to disc.", + ) realignment_parameters = OutputMultiPath( - File(exists=True), - desc='Estimated translation and rotation parameters') + File(exists=True), desc="Estimated translation and rotation parameters" + ) class RealignUnwarp(SPMCommand): @@ -602,56 +738,54 @@ class RealignUnwarp(SPMCommand): input_spec = RealignUnwarpInputSpec output_spec = RealignUnwarpOutputSpec - _jobtype = 'spatial' - _jobname = 'realignunwarp' + _jobtype = "spatial" + _jobname = "realignunwarp" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt == 'in_files': - return scans_for_fnames(ensure_list(val), - keep4d=False, - separate_sessions=True) + if opt == "in_files": + return scans_for_fnames( + ensure_list(val), keep4d=False, separate_sessions=True + ) return super(RealignUnwarp, self)._format_arg(opt, spec, val) - def _parse_inputs(self, skip=()): spmdict = super(RealignUnwarp, self)._parse_inputs(skip=())[0] if isdefined(self.inputs.phase_map): - pmscan = spmdict['data']['pmscan'] + pmscan = spmdict["data"]["pmscan"] else: - pmscan = '' + pmscan = "" if isdefined(self.inputs.in_files): if isinstance(self.inputs.in_files, list): - data = [dict(scans = sess, pmscan = pmscan) - for sess in spmdict['data']['scans']] + data = [ + dict(scans=sess, pmscan=pmscan) for sess in spmdict["data"]["scans"] + ] else: - data = [dict(scans = spmdict['data']['scans'], pmscan = pmscan)] + data = [dict(scans=spmdict["data"]["scans"], pmscan=pmscan)] - spmdict['data'] = data + spmdict["data"] = data return [spmdict] - def _list_outputs(self): outputs = self._outputs().get() resliced_all = self.inputs.reslice_which[0] > 0 resliced_mean = self.inputs.reslice_which[1] > 0 if isdefined(self.inputs.in_files): - outputs['realignment_parameters'] = [] + outputs["realignment_parameters"] = [] for imgf in self.inputs.in_files: if isinstance(imgf, list): tmp_imgf = imgf[0] else: tmp_imgf = imgf - outputs['realignment_parameters'].append(fname_presuffix(tmp_imgf, - prefix='rp_', - suffix='.txt', - use_ext=False)) + outputs["realignment_parameters"].append( + fname_presuffix(tmp_imgf, prefix="rp_", suffix=".txt", use_ext=False) + ) if not isinstance(imgf, list) and func_is_3d(imgf): break @@ -661,21 +795,21 @@ def _list_outputs(self): first_image = self.inputs.in_files[0] if resliced_mean: - outputs['mean_image'] = fname_presuffix(first_image, prefix='meanu') + outputs["mean_image"] = fname_presuffix(first_image, prefix="meanu") if resliced_all: - outputs['realigned_unwarped_files'] = [] + outputs["realigned_unwarped_files"] = [] for idx, imgf in enumerate(ensure_list(self.inputs.in_files)): realigned_run = [] if isinstance(imgf, list): for i, inner_imgf in enumerate(ensure_list(imgf)): - newfile = fname_presuffix(inner_imgf, - prefix=self.inputs.out_prefix) + newfile = fname_presuffix( + inner_imgf, prefix=self.inputs.out_prefix + ) realigned_run.append(newfile) else: - realigned_run = fname_presuffix(imgf, - prefix=self.inputs.out_prefix) - outputs['realigned_unwarped_files'].append(realigned_run) + realigned_run = fname_presuffix(imgf, prefix=self.inputs.out_prefix) + outputs["realigned_unwarped_files"].append(realigned_run) return outputs @@ -683,76 +817,83 @@ class CoregisterInputSpec(SPMCommandInputSpec): target = ImageFileSPM( exists=True, mandatory=True, - field='ref', - desc='reference file to register to', - copyfile=False) + field="ref", + desc="reference file to register to", + copyfile=False, + ) source = InputMultiPath( ImageFileSPM(exists=True), - field='source', - desc='file to register to target', + field="source", + desc="file to register to target", copyfile=True, - mandatory=True) + mandatory=True, + ) jobtype = traits.Enum( - 'estwrite', - 'estimate', - 'write', - desc='one of: estimate, write, estwrite', - usedefault=True) + "estwrite", + "estimate", + "write", + desc="one of: estimate, write, estwrite", + usedefault=True, + ) apply_to_files = InputMultiPath( File(exists=True), - field='other', - desc='files to apply transformation to', - copyfile=True) + field="other", + desc="files to apply transformation to", + copyfile=True, + ) cost_function = traits.Enum( - 'mi', - 'nmi', - 'ecc', - 'ncc', - field='eoptions.cost_fun', + "mi", + "nmi", + "ecc", + "ncc", + field="eoptions.cost_fun", desc="""cost function, one of: 'mi' - Mutual Information, 'nmi' - Normalised Mutual Information, 'ecc' - Entropy Correlation Coefficient, - 'ncc' - Normalised Cross Correlation""") + 'ncc' - Normalised Cross Correlation""", + ) fwhm = traits.List( traits.Float(), minlen=2, maxlen=2, - field='eoptions.fwhm', - desc='gaussian smoothing kernel width (mm)') + field="eoptions.fwhm", + desc="gaussian smoothing kernel width (mm)", + ) separation = traits.List( - traits.Float(), field='eoptions.sep', desc='sampling separation in mm') + traits.Float(), field="eoptions.sep", desc="sampling separation in mm" + ) tolerance = traits.List( traits.Float(), - field='eoptions.tol', - desc='acceptable tolerance for each of 12 params') + field="eoptions.tol", + desc="acceptable tolerance for each of 12 params", + ) write_interp = traits.Range( low=0, high=7, - field='roptions.interp', - desc=('degree of b-spline used for ' - 'interpolation')) + field="roptions.interp", + desc=("degree of b-spline used for " "interpolation"), + ) write_wrap = traits.List( traits.Int(), minlen=3, maxlen=3, - field='roptions.wrap', - desc=('Check if interpolation should wrap in ' - '[x,y,z]')) - write_mask = traits.Bool( - field='roptions.mask', desc='True/False mask output image') + field="roptions.wrap", + desc=("Check if interpolation should wrap in " "[x,y,z]"), + ) + write_mask = traits.Bool(field="roptions.mask", desc="True/False mask output image") out_prefix = traits.String( - 'r', - field='roptions.prefix', - usedefault=True, - desc='coregistered output prefix') + "r", field="roptions.prefix", usedefault=True, desc="coregistered output prefix" + ) class CoregisterOutputSpec(TraitedSpec): coregistered_source = OutputMultiPath( - File(exists=True), desc='Coregistered source files') + File(exists=True), desc="Coregistered source files" + ) coregistered_files = OutputMultiPath( - File(exists=True), desc='Coregistered other files') + File(exists=True), desc="Coregistered other files" + ) class Coregister(SPMCommand): @@ -773,18 +914,17 @@ class Coregister(SPMCommand): input_spec = CoregisterInputSpec output_spec = CoregisterOutputSpec - _jobtype = 'spatial' - _jobname = 'coreg' + _jobtype = "spatial" + _jobname = "coreg" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if (opt == 'target' - or (opt == 'source' and self.inputs.jobtype != "write")): + if opt == "target" or (opt == "source" and self.inputs.jobtype != "write"): return scans_for_fnames(ensure_list(val), keep4d=True) - if opt == 'apply_to_files': + if opt == "apply_to_files": return np.array(ensure_list(val), dtype=object) - if opt == 'source' and self.inputs.jobtype == "write": + if opt == "source" and self.inputs.jobtype == "write": if isdefined(self.inputs.apply_to_files): return scans_for_fnames(val + self.inputs.apply_to_files) else: @@ -795,32 +935,38 @@ def _parse_inputs(self): """validate spm coregister options if set to None ignore """ if self.inputs.jobtype == "write": - einputs = (super(Coregister, self) - ._parse_inputs(skip=('jobtype', 'apply_to_files'))) + einputs = super(Coregister, self)._parse_inputs( + skip=("jobtype", "apply_to_files") + ) else: - einputs = super(Coregister, self)._parse_inputs(skip=('jobtype')) + einputs = super(Coregister, self)._parse_inputs(skip=("jobtype")) jobtype = self.inputs.jobtype - return [{'%s' % (jobtype): einputs[0]}] + return [{"%s" % (jobtype): einputs[0]}] def _list_outputs(self): outputs = self._outputs().get() if self.inputs.jobtype == "estimate": if isdefined(self.inputs.apply_to_files): - outputs['coregistered_files'] = self.inputs.apply_to_files - outputs['coregistered_source'] = self.inputs.source - elif (self.inputs.jobtype == "write" - or self.inputs.jobtype == "estwrite"): + outputs["coregistered_files"] = self.inputs.apply_to_files + outputs["coregistered_source"] = self.inputs.source + elif self.inputs.jobtype == "write" or self.inputs.jobtype == "estwrite": if isdefined(self.inputs.apply_to_files): - outputs['coregistered_files'] = [] + outputs["coregistered_files"] = [] for imgf in ensure_list(self.inputs.apply_to_files): - (outputs['coregistered_files'].append( - fname_presuffix(imgf, prefix=self.inputs.out_prefix))) + ( + outputs["coregistered_files"].append( + fname_presuffix(imgf, prefix=self.inputs.out_prefix) + ) + ) - outputs['coregistered_source'] = [] + outputs["coregistered_source"] = [] for imgf in ensure_list(self.inputs.source): - (outputs['coregistered_source'].append( - fname_presuffix(imgf, prefix=self.inputs.out_prefix))) + ( + outputs["coregistered_source"].append( + fname_presuffix(imgf, prefix=self.inputs.out_prefix) + ) + ) return outputs @@ -828,108 +974,106 @@ def _list_outputs(self): class NormalizeInputSpec(SPMCommandInputSpec): template = File( exists=True, - field='eoptions.template', - desc='template file to normalize to', + field="eoptions.template", + desc="template file to normalize to", mandatory=True, - xor=['parameter_file'], - copyfile=False) + xor=["parameter_file"], + copyfile=False, + ) source = InputMultiPath( ImageFileSPM(exists=True), - field='subj.source', - xor=['parameter_file'], - desc='file to normalize to template', + field="subj.source", + xor=["parameter_file"], + desc="file to normalize to template", mandatory=True, - copyfile=True) + copyfile=True, + ) jobtype = traits.Enum( - 'estwrite', - 'est', - 'write', - usedefault=True, - desc='Estimate, Write or do both') + "estwrite", "est", "write", usedefault=True, desc="Estimate, Write or do both" + ) apply_to_files = InputMultiPath( traits.Either(File(exists=True), traits.List(File(exists=True))), - field='subj.resample', - desc='files to apply transformation to', - copyfile=True) + field="subj.resample", + desc="files to apply transformation to", + copyfile=True, + ) parameter_file = File( - field='subj.matname', + field="subj.matname", mandatory=True, - xor=['source', 'template'], - desc='normalization parameter file*_sn.mat', - copyfile=False) + xor=["source", "template"], + desc="normalization parameter file*_sn.mat", + copyfile=False, + ) source_weight = File( - field='subj.wtsrc', - desc='name of weighting image for source', - copyfile=False) + field="subj.wtsrc", desc="name of weighting image for source", copyfile=False + ) template_weight = File( - field='eoptions.weight', - desc='name of weighting image for template', - copyfile=False) + field="eoptions.weight", + desc="name of weighting image for template", + copyfile=False, + ) source_image_smoothing = traits.Float( - field='eoptions.smosrc', desc='source smoothing') + field="eoptions.smosrc", desc="source smoothing" + ) template_image_smoothing = traits.Float( - field='eoptions.smoref', desc='template smoothing') + field="eoptions.smoref", desc="template smoothing" + ) affine_regularization_type = traits.Enum( - 'mni', - 'size', - 'none', - field='eoptions.regtype', - desc='mni, size, none') + "mni", "size", "none", field="eoptions.regtype", desc="mni, size, none" + ) DCT_period_cutoff = traits.Float( - field='eoptions.cutoff', desc='Cutoff of for DCT bases') + field="eoptions.cutoff", desc="Cutoff of for DCT bases" + ) nonlinear_iterations = traits.Int( - field='eoptions.nits', - desc=('Number of iterations of ' - 'nonlinear warping')) + field="eoptions.nits", desc=("Number of iterations of " "nonlinear warping") + ) nonlinear_regularization = traits.Float( - field='eoptions.reg', - desc=('the amount of the ' - 'regularization for the ' - 'nonlinear part of the ' - 'normalization')) + field="eoptions.reg", + desc=( + "the amount of the " + "regularization for the " + "nonlinear part of the " + "normalization" + ), + ) write_preserve = traits.Bool( - field='roptions.preserve', - desc='True/False warped images are modulated') + field="roptions.preserve", desc="True/False warped images are modulated" + ) write_bounding_box = traits.List( traits.List(traits.Float(), minlen=3, maxlen=3), - field='roptions.bb', + field="roptions.bb", minlen=2, maxlen=2, - desc='3x2-element list of lists') + desc="3x2-element list of lists", + ) write_voxel_sizes = traits.List( - traits.Float(), - field='roptions.vox', - minlen=3, - maxlen=3, - desc='3-element list') + traits.Float(), field="roptions.vox", minlen=3, maxlen=3, desc="3-element list" + ) write_interp = traits.Range( low=0, high=7, - field='roptions.interp', - desc=('degree of b-spline used for ' - 'interpolation')) + field="roptions.interp", + desc=("degree of b-spline used for " "interpolation"), + ) write_wrap = traits.List( traits.Int(), - field='roptions.wrap', - desc=('Check if interpolation should wrap in ' - '[x,y,z] - list of bools')) + field="roptions.wrap", + desc=("Check if interpolation should wrap in " "[x,y,z] - list of bools"), + ) out_prefix = traits.String( - 'w', - field='roptions.prefix', - usedefault=True, - desc='normalized output prefix') + "w", field="roptions.prefix", usedefault=True, desc="normalized output prefix" + ) class NormalizeOutputSpec(TraitedSpec): normalization_parameters = OutputMultiPath( File(exists=True), - desc=('MAT files containing ' - 'the normalization ' - 'parameters')) + desc=("MAT files containing " "the normalization " "parameters"), + ) normalized_source = OutputMultiPath( - File(exists=True), desc='Normalized source files') - normalized_files = OutputMultiPath( - File(exists=True), desc='Normalized other files') + File(exists=True), desc="Normalized source files" + ) + normalized_files = OutputMultiPath(File(exists=True), desc="Normalized other files") class Normalize(SPMCommand): @@ -948,82 +1092,81 @@ class Normalize(SPMCommand): input_spec = NormalizeInputSpec output_spec = NormalizeOutputSpec - _jobtype = 'spatial' - _jobname = 'normalise' + _jobtype = "spatial" + _jobname = "normalise" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt == 'template': + if opt == "template": return scans_for_fname(ensure_list(val)) - if opt == 'source': + if opt == "source": return scans_for_fname(ensure_list(val)) - if opt == 'apply_to_files': + if opt == "apply_to_files": return scans_for_fnames(ensure_list(val)) - if opt == 'parameter_file': + if opt == "parameter_file": return np.array([simplify_list(val)], dtype=object) - if opt in ['write_wrap']: + if opt in ["write_wrap"]: if len(val) != 3: - raise ValueError('%s must have 3 elements' % opt) + raise ValueError("%s must have 3 elements" % opt) return super(Normalize, self)._format_arg(opt, spec, val) def _parse_inputs(self): """Validate spm normalize options if set to None ignore """ - einputs = super( - Normalize, self)._parse_inputs(skip=('jobtype', 'apply_to_files')) + einputs = super(Normalize, self)._parse_inputs( + skip=("jobtype", "apply_to_files") + ) if isdefined(self.inputs.apply_to_files): inputfiles = deepcopy(self.inputs.apply_to_files) if isdefined(self.inputs.source): inputfiles.extend(self.inputs.source) - einputs[0]['subj']['resample'] = scans_for_fnames(inputfiles) + einputs[0]["subj"]["resample"] = scans_for_fnames(inputfiles) jobtype = self.inputs.jobtype - if jobtype in ['estwrite', 'write']: + if jobtype in ["estwrite", "write"]: if not isdefined(self.inputs.apply_to_files): if isdefined(self.inputs.source): - einputs[0]['subj']['resample'] = scans_for_fname( - self.inputs.source) - return [{'%s' % (jobtype): einputs[0]}] + einputs[0]["subj"]["resample"] = scans_for_fname(self.inputs.source) + return [{"%s" % (jobtype): einputs[0]}] def _list_outputs(self): outputs = self._outputs().get() jobtype = self.inputs.jobtype - if jobtype.startswith('est'): - outputs['normalization_parameters'] = [] + if jobtype.startswith("est"): + outputs["normalization_parameters"] = [] for imgf in ensure_list(self.inputs.source): - outputs['normalization_parameters'].append( - fname_presuffix(imgf, suffix='_sn.mat', use_ext=False)) - outputs['normalization_parameters'] = simplify_list( - outputs['normalization_parameters']) + outputs["normalization_parameters"].append( + fname_presuffix(imgf, suffix="_sn.mat", use_ext=False) + ) + outputs["normalization_parameters"] = simplify_list( + outputs["normalization_parameters"] + ) if self.inputs.jobtype == "estimate": if isdefined(self.inputs.apply_to_files): - outputs['normalized_files'] = self.inputs.apply_to_files - outputs['normalized_source'] = self.inputs.source - elif 'write' in self.inputs.jobtype: - if (isdefined(self.inputs.write_preserve) - and self.inputs.write_preserve): - prefixNorm = ''.join(['m', self.inputs.out_prefix]) + outputs["normalized_files"] = self.inputs.apply_to_files + outputs["normalized_source"] = self.inputs.source + elif "write" in self.inputs.jobtype: + if isdefined(self.inputs.write_preserve) and self.inputs.write_preserve: + prefixNorm = "".join(["m", self.inputs.out_prefix]) else: prefixNorm = self.inputs.out_prefix - outputs['normalized_files'] = [] + outputs["normalized_files"] = [] if isdefined(self.inputs.apply_to_files): filelist = ensure_list(self.inputs.apply_to_files) for f in filelist: if isinstance(f, list): - run = [ - fname_presuffix(in_f, prefix=prefixNorm) - for in_f in f - ] + run = [fname_presuffix(in_f, prefix=prefixNorm) for in_f in f] else: run = [fname_presuffix(f, prefix=prefixNorm)] - outputs['normalized_files'].extend(run) + outputs["normalized_files"].extend(run) if isdefined(self.inputs.source): - outputs['normalized_source'] = [] + outputs["normalized_source"] = [] for imgf in ensure_list(self.inputs.source): - outputs['normalized_source'].append( - fname_presuffix(imgf, prefix=prefixNorm)) + outputs["normalized_source"].append( + fname_presuffix(imgf, prefix=prefixNorm) + ) return outputs @@ -1031,32 +1174,34 @@ def _list_outputs(self): class Normalize12InputSpec(SPMCommandInputSpec): image_to_align = ImageFileSPM( exists=True, - field='subj.vol', - desc=('file to estimate normalization parameters ' - 'with'), - xor=['deformation_file'], + field="subj.vol", + desc=("file to estimate normalization parameters " "with"), + xor=["deformation_file"], mandatory=True, - copyfile=True) + copyfile=True, + ) apply_to_files = InputMultiPath( traits.Either( - ImageFileSPM(exists=True), traits.List(ImageFileSPM(exists=True))), - field='subj.resample', - desc='files to apply transformation to', - copyfile=True) + ImageFileSPM(exists=True), traits.List(ImageFileSPM(exists=True)) + ), + field="subj.resample", + desc="files to apply transformation to", + copyfile=True, + ) deformation_file = ImageFileSPM( - field='subj.def', + field="subj.def", mandatory=True, - xor=['image_to_align', 'tpm'], + xor=["image_to_align", "tpm"], copyfile=False, - desc=('file y_*.nii containing 3 deformation ' - 'fields for the deformation in x, y and z ' - 'dimension')) + desc=( + "file y_*.nii containing 3 deformation " + "fields for the deformation in x, y and z " + "dimension" + ), + ) jobtype = traits.Enum( - 'estwrite', - 'est', - 'write', - usedefault=True, - desc='Estimate, Write or do Both') + "estwrite", "est", "write", usedefault=True, desc="Estimate, Write or do Both" + ) bias_regularization = traits.Enum( 0, 0.00001, @@ -1066,8 +1211,9 @@ class Normalize12InputSpec(SPMCommandInputSpec): 0.1, 1, 10, - field='eoptions.biasreg', - desc='no(0) - extremely heavy (10)') + field="eoptions.biasreg", + desc="no(0) - extremely heavy (10)", + ) bias_fwhm = traits.Enum( 30, 40, @@ -1082,75 +1228,82 @@ class Normalize12InputSpec(SPMCommandInputSpec): 130, 140, 150, - 'Inf', - field='eoptions.biasfwhm', - desc='FWHM of Gaussian smoothness of bias') + "Inf", + field="eoptions.biasfwhm", + desc="FWHM of Gaussian smoothness of bias", + ) tpm = File( exists=True, - field='eoptions.tpm', - desc=('template in form of tissue probablitiy maps to ' - 'normalize to'), - xor=['deformation_file'], - copyfile=False) + field="eoptions.tpm", + desc=("template in form of tissue probablitiy maps to " "normalize to"), + xor=["deformation_file"], + copyfile=False, + ) affine_regularization_type = traits.Enum( - 'mni', 'size', 'none', field='eoptions.affreg', desc='mni, size, none') + "mni", "size", "none", field="eoptions.affreg", desc="mni, size, none" + ) warping_regularization = traits.List( traits.Float(), - field='eoptions.reg', + field="eoptions.reg", minlen=5, maxlen=5, - desc=('controls balance between ' - 'parameters and data')) + desc=("controls balance between " "parameters and data"), + ) smoothness = traits.Float( - field='eoptions.fwhm', - desc=('value (in mm) to smooth the data before ' - 'normalization')) + field="eoptions.fwhm", + desc=("value (in mm) to smooth the data before " "normalization"), + ) sampling_distance = traits.Float( - field='eoptions.samp', - desc=('Sampling distance on data for ' - 'parameter estimation')) + field="eoptions.samp", + desc=("Sampling distance on data for " "parameter estimation"), + ) write_bounding_box = traits.List( traits.List(traits.Float(), minlen=3, maxlen=3), - field='woptions.bb', + field="woptions.bb", minlen=2, maxlen=2, - desc=('3x2-element list of lists ' - 'representing the bounding box ' - '(in mm) to be written')) + desc=( + "3x2-element list of lists " + "representing the bounding box " + "(in mm) to be written" + ), + ) write_voxel_sizes = traits.List( traits.Float(), - field='woptions.vox', + field="woptions.vox", minlen=3, maxlen=3, - desc=('3-element list representing the ' - 'voxel sizes (in mm) of the written ' - 'normalised images')) + desc=( + "3-element list representing the " + "voxel sizes (in mm) of the written " + "normalised images" + ), + ) write_interp = traits.Range( low=0, high=7, - field='woptions.interp', - desc=('degree of b-spline used for ' - 'interpolation')) + field="woptions.interp", + desc=("degree of b-spline used for " "interpolation"), + ) out_prefix = traits.String( - 'w', - field='woptions.prefix', - usedefault=True, - desc='Normalized output prefix') + "w", field="woptions.prefix", usedefault=True, desc="Normalized output prefix" + ) class Normalize12OutputSpec(TraitedSpec): deformation_field = OutputMultiPath( File(exists=True), - desc=('NIfTI file containing 3 ' - 'deformation fields for the ' - 'deformation in x, y and z ' - 'dimension')) + desc=( + "NIfTI file containing 3 " + "deformation fields for the " + "deformation in x, y and z " + "dimension" + ), + ) normalized_image = OutputMultiPath( - File(exists=True), - desc=('Normalized file that needed to ' - 'be aligned')) - normalized_files = OutputMultiPath( - File(exists=True), desc='Normalized other files') + File(exists=True), desc=("Normalized file that needed to " "be aligned") + ) + normalized_files = OutputMultiPath(File(exists=True), desc="Normalized other files") class Normalize12(SPMCommand): @@ -1174,74 +1327,75 @@ class Normalize12(SPMCommand): input_spec = Normalize12InputSpec output_spec = Normalize12OutputSpec - _jobtype = 'spatial' - _jobname = 'normalise' + _jobtype = "spatial" + _jobname = "normalise" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt == 'tpm': + if opt == "tpm": return scans_for_fname(ensure_list(val)) - if opt == 'image_to_align': + if opt == "image_to_align": return scans_for_fname(ensure_list(val)) - if opt == 'apply_to_files': + if opt == "apply_to_files": return scans_for_fnames(ensure_list(val)) - if opt == 'deformation_file': + if opt == "deformation_file": return np.array([simplify_list(val)], dtype=object) - if opt in ['nonlinear_regularization']: + if opt in ["nonlinear_regularization"]: if len(val) != 5: - raise ValueError('%s must have 5 elements' % opt) + raise ValueError("%s must have 5 elements" % opt) return super(Normalize12, self)._format_arg(opt, spec, val) def _parse_inputs(self, skip=()): """validate spm normalize options if set to None ignore """ - einputs = super( - Normalize12, - self)._parse_inputs(skip=('jobtype', 'apply_to_files')) + einputs = super(Normalize12, self)._parse_inputs( + skip=("jobtype", "apply_to_files") + ) if isdefined(self.inputs.apply_to_files): inputfiles = deepcopy(self.inputs.apply_to_files) if isdefined(self.inputs.image_to_align): inputfiles.extend([self.inputs.image_to_align]) - einputs[0]['subj']['resample'] = scans_for_fnames(inputfiles) + einputs[0]["subj"]["resample"] = scans_for_fnames(inputfiles) jobtype = self.inputs.jobtype - if jobtype in ['estwrite', 'write']: + if jobtype in ["estwrite", "write"]: if not isdefined(self.inputs.apply_to_files): if isdefined(self.inputs.image_to_align): - einputs[0]['subj']['resample'] = scans_for_fname( - self.inputs.image_to_align) - return [{'%s' % (jobtype): einputs[0]}] + einputs[0]["subj"]["resample"] = scans_for_fname( + self.inputs.image_to_align + ) + return [{"%s" % (jobtype): einputs[0]}] def _list_outputs(self): outputs = self._outputs().get() jobtype = self.inputs.jobtype - if jobtype.startswith('est'): - outputs['deformation_field'] = [] + if jobtype.startswith("est"): + outputs["deformation_field"] = [] for imgf in ensure_list(self.inputs.image_to_align): - outputs['deformation_field'].append( - fname_presuffix(imgf, prefix='y_')) - outputs['deformation_field'] = simplify_list( - outputs['deformation_field']) + outputs["deformation_field"].append(fname_presuffix(imgf, prefix="y_")) + outputs["deformation_field"] = simplify_list(outputs["deformation_field"]) if self.inputs.jobtype == "estimate": if isdefined(self.inputs.apply_to_files): - outputs['normalized_files'] = self.inputs.apply_to_files - outputs['normalized_image'] = fname_presuffix( - self.inputs.image_to_align, prefix='w') - elif 'write' in self.inputs.jobtype: - outputs['normalized_files'] = [] + outputs["normalized_files"] = self.inputs.apply_to_files + outputs["normalized_image"] = fname_presuffix( + self.inputs.image_to_align, prefix="w" + ) + elif "write" in self.inputs.jobtype: + outputs["normalized_files"] = [] if isdefined(self.inputs.apply_to_files): filelist = ensure_list(self.inputs.apply_to_files) for f in filelist: if isinstance(f, list): - run = [fname_presuffix(in_f, prefix='w') for in_f in f] + run = [fname_presuffix(in_f, prefix="w") for in_f in f] else: - run = [fname_presuffix(f, prefix='w')] - outputs['normalized_files'].extend(run) + run = [fname_presuffix(f, prefix="w")] + outputs["normalized_files"].extend(run) if isdefined(self.inputs.image_to_align): - outputs['normalized_image'] = fname_presuffix( - self.inputs.image_to_align, prefix='w') + outputs["normalized_image"] = fname_presuffix( + self.inputs.image_to_align, prefix="w" + ) return outputs @@ -1249,17 +1403,17 @@ def _list_outputs(self): class SegmentInputSpec(SPMCommandInputSpec): data = InputMultiPath( ImageFileSPM(exists=True), - field='data', - desc='one scan per subject', + field="data", + desc="one scan per subject", copyfile=False, - mandatory=True) + mandatory=True, + ) gm_output_type = traits.List( traits.Bool(), minlen=3, maxlen=3, - field='output.GM', - desc= - """Options to produce grey matter images: c1*.img, wc1*.img and mwc1*.img. + field="output.GM", + desc="""Options to produce grey matter images: c1*.img, wc1*.img and mwc1*.img. None: [False,False,False], Native Space: [False,False,True], Unmodulated Normalised: [False,True,False], @@ -1267,12 +1421,13 @@ class SegmentInputSpec(SPMCommandInputSpec): Native + Unmodulated Normalised: [False,True,True], Native + Modulated Normalised: [True,False,True], Native + Modulated + Unmodulated: [True,True,True], - Modulated + Unmodulated Normalised: [True,True,False]""") + Modulated + Unmodulated Normalised: [True,True,False]""", + ) wm_output_type = traits.List( traits.Bool(), minlen=3, maxlen=3, - field='output.WM', + field="output.WM", desc=""" Options to produce white matter images: c2*.img, wc2*.img and mwc2*.img. None: [False,False,False], @@ -1282,12 +1437,13 @@ class SegmentInputSpec(SPMCommandInputSpec): Native + Unmodulated Normalised: [False,True,True], Native + Modulated Normalised: [True,False,True], Native + Modulated + Unmodulated: [True,True,True], - Modulated + Unmodulated Normalised: [True,True,False]""") + Modulated + Unmodulated Normalised: [True,True,False]""", + ) csf_output_type = traits.List( traits.Bool(), minlen=3, maxlen=3, - field='output.CSF', + field="output.CSF", desc=""" Options to produce CSF images: c3*.img, wc3*.img and mwc3*.img. None: [False,False,False], @@ -1297,45 +1453,48 @@ class SegmentInputSpec(SPMCommandInputSpec): Native + Unmodulated Normalised: [False,True,True], Native + Modulated Normalised: [True,False,True], Native + Modulated + Unmodulated: [True,True,True], - Modulated + Unmodulated Normalised: [True,True,False]""") + Modulated + Unmodulated Normalised: [True,True,False]""", + ) save_bias_corrected = traits.Bool( - field='output.biascor', - desc=('True/False produce a bias ' - 'corrected image')) + field="output.biascor", desc=("True/False produce a bias " "corrected image") + ) clean_masks = traits.Enum( - 'no', - 'light', - 'thorough', - field='output.cleanup', - desc=("clean using estimated brain mask " - "('no','light','thorough')")) + "no", + "light", + "thorough", + field="output.cleanup", + desc=("clean using estimated brain mask " "('no','light','thorough')"), + ) tissue_prob_maps = traits.List( File(exists=True), - field='opts.tpm', - desc=('list of gray, white & csf prob. ' - '(opt,)')) + field="opts.tpm", + desc=("list of gray, white & csf prob. " "(opt,)"), + ) gaussians_per_class = traits.List( traits.Int(), - field='opts.ngaus', - desc=('num Gaussians capture intensity ' - 'distribution')) + field="opts.ngaus", + desc=("num Gaussians capture intensity " "distribution"), + ) affine_regularization = traits.Enum( - 'mni', - 'eastern', - 'subj', - 'none', - '', - field='opts.regtype', - desc=('Possible options: "mni", ' - '"eastern", "subj", "none" ' - '(no reguralisation), "" ' - '(no affine registration)')) + "mni", + "eastern", + "subj", + "none", + "", + field="opts.regtype", + desc=( + 'Possible options: "mni", ' + '"eastern", "subj", "none" ' + '(no reguralisation), "" ' + "(no affine registration)" + ), + ) warping_regularization = traits.Float( - field='opts.warpreg', - desc=('Controls balance between ' - 'parameters and data')) + field="opts.warpreg", desc=("Controls balance between " "parameters and data") + ) warp_frequency_cutoff = traits.Float( - field='opts.warpco', desc='Cutoff of DCT bases') + field="opts.warpco", desc="Cutoff of DCT bases" + ) bias_regularization = traits.Enum( 0, 0.00001, @@ -1345,8 +1504,9 @@ class SegmentInputSpec(SPMCommandInputSpec): 0.1, 1, 10, - field='opts.biasreg', - desc='no(0) - extremely heavy (10)') + field="opts.biasreg", + desc="no(0) - extremely heavy (10)", + ) bias_fwhm = traits.Enum( 30, 40, @@ -1359,43 +1519,39 @@ class SegmentInputSpec(SPMCommandInputSpec): 110, 120, 130, - 'Inf', - field='opts.biasfwhm', - desc='FWHM of Gaussian smoothness of bias') + "Inf", + field="opts.biasfwhm", + desc="FWHM of Gaussian smoothness of bias", + ) sampling_distance = traits.Float( - field='opts.samp', - desc=('Sampling distance on data for ' - 'parameter estimation')) + field="opts.samp", + desc=("Sampling distance on data for " "parameter estimation"), + ) mask_image = File( exists=True, - field='opts.msk', - desc='Binary image to restrict parameter estimation ') + field="opts.msk", + desc="Binary image to restrict parameter estimation ", + ) class SegmentOutputSpec(TraitedSpec): - native_gm_image = File(desc='native space grey probability map') - normalized_gm_image = File(desc='normalized grey probability map', ) - modulated_gm_image = File( - desc=('modulated, normalized grey ' - 'probability map')) - native_wm_image = File(desc='native space white probability map') - normalized_wm_image = File(desc='normalized white probability map') - modulated_wm_image = File( - desc=('modulated, normalized white ' - 'probability map')) - native_csf_image = File(desc='native space csf probability map') - normalized_csf_image = File(desc='normalized csf probability map') - modulated_csf_image = File( - desc=('modulated, normalized csf ' - 'probability map')) + native_gm_image = File(desc="native space grey probability map") + normalized_gm_image = File(desc="normalized grey probability map",) + modulated_gm_image = File(desc=("modulated, normalized grey " "probability map")) + native_wm_image = File(desc="native space white probability map") + normalized_wm_image = File(desc="normalized white probability map") + modulated_wm_image = File(desc=("modulated, normalized white " "probability map")) + native_csf_image = File(desc="native space csf probability map") + normalized_csf_image = File(desc="normalized csf probability map") + modulated_csf_image = File(desc=("modulated, normalized csf " "probability map")) modulated_input_image = File( - deprecated='0.10', - new_name='bias_corrected_image', - desc='bias-corrected version of input image') - bias_corrected_image = File(desc='bias-corrected version of input image') - transformation_mat = File(exists=True, desc='Normalization transformation') - inverse_transformation_mat = File( - exists=True, desc='Inverse normalization info') + deprecated="0.10", + new_name="bias_corrected_image", + desc="bias-corrected version of input image", + ) + bias_corrected_image = File(desc="bias-corrected version of input image") + transformation_mat = File(exists=True, desc="Normalization transformation") + inverse_transformation_mat = File(exists=True, desc="Inverse normalization info") class Segment(SPMCommand): @@ -1418,30 +1574,30 @@ class Segment(SPMCommand): def __init__(self, **inputs): _local_version = SPMCommand().version - if _local_version and '12.' in _local_version: - self._jobtype = 'tools' - self._jobname = 'oldseg' + if _local_version and "12." in _local_version: + self._jobtype = "tools" + self._jobname = "oldseg" else: - self._jobtype = 'spatial' - self._jobname = 'preproc' + self._jobtype = "spatial" + self._jobname = "preproc" SPMCommand.__init__(self, **inputs) def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - clean_masks_dict = {'no': 0, 'light': 1, 'thorough': 2} + clean_masks_dict = {"no": 0, "light": 1, "thorough": 2} - if opt in ['data', 'tissue_prob_maps']: + if opt in ["data", "tissue_prob_maps"]: if isinstance(val, list): return scans_for_fnames(val) else: return scans_for_fname(val) - if 'output_type' in opt: + if "output_type" in opt: return [int(v) for v in val] - if opt == 'mask_image': + if opt == "mask_image": return scans_for_fname(val) - if opt == 'clean_masks': + if opt == "clean_masks": return clean_masks_dict[val] return super(Segment, self)._format_arg(opt, spec, val) @@ -1449,23 +1605,26 @@ def _list_outputs(self): outputs = self._outputs().get() f = self.inputs.data[0] - for tidx, tissue in enumerate(['gm', 'wm', 'csf']): - outtype = '%s_output_type' % tissue + for tidx, tissue in enumerate(["gm", "wm", "csf"]): + outtype = "%s_output_type" % tissue if isdefined(getattr(self.inputs, outtype)): - for idx, (image, prefix) in enumerate([('modulated', 'mw'), - ('normalized', - 'w'), ('native', '')]): + for idx, (image, prefix) in enumerate( + [("modulated", "mw"), ("normalized", "w"), ("native", "")] + ): if getattr(self.inputs, outtype)[idx]: - outfield = '%s_%s_image' % (image, tissue) + outfield = "%s_%s_image" % (image, tissue) outputs[outfield] = fname_presuffix( - f, prefix='%sc%d' % (prefix, tidx + 1)) - if (isdefined(self.inputs.save_bias_corrected) - and self.inputs.save_bias_corrected): - outputs['bias_corrected_image'] = fname_presuffix(f, prefix='m') - t_mat = fname_presuffix(f, suffix='_seg_sn.mat', use_ext=False) - outputs['transformation_mat'] = t_mat - invt_mat = fname_presuffix(f, suffix='_seg_inv_sn.mat', use_ext=False) - outputs['inverse_transformation_mat'] = invt_mat + f, prefix="%sc%d" % (prefix, tidx + 1) + ) + if ( + isdefined(self.inputs.save_bias_corrected) + and self.inputs.save_bias_corrected + ): + outputs["bias_corrected_image"] = fname_presuffix(f, prefix="m") + t_mat = fname_presuffix(f, suffix="_seg_sn.mat", use_ext=False) + outputs["transformation_mat"] = t_mat + invt_mat = fname_presuffix(f, suffix="_seg_inv_sn.mat", use_ext=False) + outputs["inverse_transformation_mat"] = invt_mat return outputs @@ -1474,8 +1633,9 @@ class NewSegmentInputSpec(SPMCommandInputSpec): ImageFileSPM(exists=True), mandatory=True, desc="A list of files to be segmented", - field='channel', - copyfile=False) + field="channel", + copyfile=False, + ) channel_info = traits.Tuple( traits.Float(), traits.Float(), @@ -1484,64 +1644,75 @@ class NewSegmentInputSpec(SPMCommandInputSpec): - bias reguralisation (0-10) - FWHM of Gaussian smoothness of bias - which maps to save (Field, Corrected) - a tuple of two boolean values""", - field='channel') + field="channel", + ) tissues = traits.List( traits.Tuple( traits.Tuple(ImageFileSPM(exists=True), traits.Int()), - traits.Int(), traits.Tuple(traits.Bool, traits.Bool), - traits.Tuple(traits.Bool, traits.Bool)), + traits.Int(), + traits.Tuple(traits.Bool, traits.Bool), + traits.Tuple(traits.Bool, traits.Bool), + ), desc="""A list of tuples (one per tissue) with the following fields: - tissue probability map (4D), 1-based index to frame - number of gaussians - which maps to save [Native, DARTEL] - a tuple of two boolean values - which maps to save [Unmodulated, Modulated] - a tuple of two boolean values""", - field='tissue') + field="tissue", + ) affine_regularization = traits.Enum( - 'mni', - 'eastern', - 'subj', - 'none', - field='warp.affreg', - desc='mni, eastern, subj, none ') + "mni", + "eastern", + "subj", + "none", + field="warp.affreg", + desc="mni, eastern, subj, none ", + ) warping_regularization = traits.Either( traits.List(traits.Float(), minlen=5, maxlen=5), traits.Float(), - field='warp.reg', - desc=('Warping regularization ' - 'parameter(s). Accepts float ' - 'or list of floats (the ' - 'latter is required by ' - 'SPM12)')) + field="warp.reg", + desc=( + "Warping regularization " + "parameter(s). Accepts float " + "or list of floats (the " + "latter is required by " + "SPM12)" + ), + ) sampling_distance = traits.Float( - field='warp.samp', - desc=('Sampling distance on data for ' - 'parameter estimation')) + field="warp.samp", + desc=("Sampling distance on data for " "parameter estimation"), + ) write_deformation_fields = traits.List( traits.Bool(), minlen=2, maxlen=2, - field='warp.write', - desc=("Which deformation fields to " - "write:[Inverse, Forward]")) + field="warp.write", + desc=("Which deformation fields to " "write:[Inverse, Forward]"), + ) class NewSegmentOutputSpec(TraitedSpec): native_class_images = traits.List( - traits.List(File(exists=True)), desc='native space probability maps') + traits.List(File(exists=True)), desc="native space probability maps" + ) dartel_input_images = traits.List( - traits.List(File(exists=True)), desc='dartel imported class images') + traits.List(File(exists=True)), desc="dartel imported class images" + ) normalized_class_images = traits.List( - traits.List(File(exists=True)), desc='normalized class images') + traits.List(File(exists=True)), desc="normalized class images" + ) modulated_class_images = traits.List( - traits.List(File(exists=True)), - desc=('modulated+normalized class ' - 'images')) + traits.List(File(exists=True)), desc=("modulated+normalized class " "images") + ) transformation_mat = OutputMultiPath( - File(exists=True), desc='Normalization transformation') + File(exists=True), desc="Normalization transformation" + ) bias_corrected_images = OutputMultiPath( - File(exists=True), desc='bias corrected images') - bias_field_images = OutputMultiPath( - File(exists=True), desc='bias field images') + File(exists=True), desc="bias corrected images" + ) + bias_field_images = OutputMultiPath(File(exists=True), desc="bias field images") forward_deformation_field = OutputMultiPath(File(exists=True)) inverse_deformation_field = OutputMultiPath(File(exists=True)) @@ -1582,12 +1753,12 @@ class NewSegment(SPMCommand): def __init__(self, **inputs): _local_version = SPMCommand().version - if _local_version and '12.' in _local_version: - self._jobtype = 'spatial' - self._jobname = 'preproc' + if _local_version and "12." in _local_version: + self._jobtype = "spatial" + self._jobname = "preproc" else: - self._jobtype = 'tools' - self._jobname = 'preproc8' + self._jobtype = "tools" + self._jobname = "preproc8" SPMCommand.__init__(self, **inputs) @@ -1595,119 +1766,132 @@ def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt in ['channel_files', 'channel_info']: + if opt in ["channel_files", "channel_info"]: # structure have to be recreated because of some weird traits error new_channel = {} - new_channel['vols'] = scans_for_fnames(self.inputs.channel_files) + new_channel["vols"] = scans_for_fnames(self.inputs.channel_files) if isdefined(self.inputs.channel_info): info = self.inputs.channel_info - new_channel['biasreg'] = info[0] - new_channel['biasfwhm'] = info[1] - new_channel['write'] = [int(info[2][0]), int(info[2][1])] + new_channel["biasreg"] = info[0] + new_channel["biasfwhm"] = info[1] + new_channel["write"] = [int(info[2][0]), int(info[2][1])] return [new_channel] - elif opt == 'tissues': + elif opt == "tissues": new_tissues = [] for tissue in val: new_tissue = {} - new_tissue['tpm'] = np.array( - [','.join([tissue[0][0], str(tissue[0][1])])], - dtype=object) - new_tissue['ngaus'] = tissue[1] - new_tissue['native'] = [int(tissue[2][0]), int(tissue[2][1])] - new_tissue['warped'] = [int(tissue[3][0]), int(tissue[3][1])] + new_tissue["tpm"] = np.array( + [",".join([tissue[0][0], str(tissue[0][1])])], dtype=object + ) + new_tissue["ngaus"] = tissue[1] + new_tissue["native"] = [int(tissue[2][0]), int(tissue[2][1])] + new_tissue["warped"] = [int(tissue[3][0]), int(tissue[3][1])] new_tissues.append(new_tissue) return new_tissues - elif opt == 'write_deformation_fields': + elif opt == "write_deformation_fields": return super(NewSegment, self)._format_arg( - opt, spec, [int(val[0]), int(val[1])]) + opt, spec, [int(val[0]), int(val[1])] + ) else: return super(NewSegment, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() - outputs['native_class_images'] = [] - outputs['dartel_input_images'] = [] - outputs['normalized_class_images'] = [] - outputs['modulated_class_images'] = [] - outputs['transformation_mat'] = [] - outputs['bias_corrected_images'] = [] - outputs['bias_field_images'] = [] - outputs['inverse_deformation_field'] = [] - outputs['forward_deformation_field'] = [] + outputs["native_class_images"] = [] + outputs["dartel_input_images"] = [] + outputs["normalized_class_images"] = [] + outputs["modulated_class_images"] = [] + outputs["transformation_mat"] = [] + outputs["bias_corrected_images"] = [] + outputs["bias_field_images"] = [] + outputs["inverse_deformation_field"] = [] + outputs["forward_deformation_field"] = [] n_classes = 5 if isdefined(self.inputs.tissues): n_classes = len(self.inputs.tissues) for i in range(n_classes): - outputs['native_class_images'].append([]) - outputs['dartel_input_images'].append([]) - outputs['normalized_class_images'].append([]) - outputs['modulated_class_images'].append([]) + outputs["native_class_images"].append([]) + outputs["dartel_input_images"].append([]) + outputs["normalized_class_images"].append([]) + outputs["modulated_class_images"].append([]) for filename in self.inputs.channel_files: pth, base, ext = split_filename(filename) if isdefined(self.inputs.tissues): for i, tissue in enumerate(self.inputs.tissues): if tissue[2][0]: - outputs['native_class_images'][i].append( - os.path.join(pth, "c%d%s.nii" % (i + 1, base))) + outputs["native_class_images"][i].append( + os.path.join(pth, "c%d%s.nii" % (i + 1, base)) + ) if tissue[2][1]: - outputs['dartel_input_images'][i].append( - os.path.join(pth, "rc%d%s.nii" % (i + 1, base))) + outputs["dartel_input_images"][i].append( + os.path.join(pth, "rc%d%s.nii" % (i + 1, base)) + ) if tissue[3][0]: - outputs['normalized_class_images'][i].append( - os.path.join(pth, "wc%d%s.nii" % (i + 1, base))) + outputs["normalized_class_images"][i].append( + os.path.join(pth, "wc%d%s.nii" % (i + 1, base)) + ) if tissue[3][1]: - outputs['modulated_class_images'][i].append( - os.path.join(pth, "mwc%d%s.nii" % (i + 1, base))) + outputs["modulated_class_images"][i].append( + os.path.join(pth, "mwc%d%s.nii" % (i + 1, base)) + ) else: for i in range(n_classes): - outputs['native_class_images'][i].append( - os.path.join(pth, "c%d%s.nii" % (i + 1, base))) - outputs['transformation_mat'].append( - os.path.join(pth, "%s_seg8.mat" % base)) + outputs["native_class_images"][i].append( + os.path.join(pth, "c%d%s.nii" % (i + 1, base)) + ) + outputs["transformation_mat"].append( + os.path.join(pth, "%s_seg8.mat" % base) + ) if isdefined(self.inputs.write_deformation_fields): if self.inputs.write_deformation_fields[0]: - outputs['inverse_deformation_field'].append( - os.path.join(pth, "iy_%s.nii" % base)) + outputs["inverse_deformation_field"].append( + os.path.join(pth, "iy_%s.nii" % base) + ) if self.inputs.write_deformation_fields[1]: - outputs['forward_deformation_field'].append( - os.path.join(pth, "y_%s.nii" % base)) + outputs["forward_deformation_field"].append( + os.path.join(pth, "y_%s.nii" % base) + ) if isdefined(self.inputs.channel_info): if self.inputs.channel_info[2][0]: - outputs['bias_field_images'].append( - os.path.join(pth, "BiasField_%s.nii" % (base))) + outputs["bias_field_images"].append( + os.path.join(pth, "BiasField_%s.nii" % (base)) + ) if self.inputs.channel_info[2][1]: - outputs['bias_corrected_images'].append( - os.path.join(pth, "m%s.nii" % (base))) + outputs["bias_corrected_images"].append( + os.path.join(pth, "m%s.nii" % (base)) + ) return outputs class SmoothInputSpec(SPMCommandInputSpec): in_files = InputMultiPath( ImageFileSPM(exists=True), - field='data', - desc='list of files to smooth', + field="data", + desc="list of files to smooth", mandatory=True, - copyfile=False) + copyfile=False, + ) fwhm = traits.Either( traits.List(traits.Float(), minlen=3, maxlen=3), traits.Float(), - field='fwhm', - desc='3-list of fwhm for each dimension') - data_type = traits.Int( - field='dtype', desc='Data type of the output images') + field="fwhm", + desc="3-list of fwhm for each dimension", + ) + data_type = traits.Int(field="dtype", desc="Data type of the output images") implicit_masking = traits.Bool( - field='im', desc=('A mask implied by a particular' - 'voxel value')) + field="im", desc=("A mask implied by a particular" "voxel value") + ) out_prefix = traits.String( - 's', field='prefix', usedefault=True, desc='smoothed output prefix') + "s", field="prefix", usedefault=True, desc="smoothed output prefix" + ) class SmoothOutputSpec(TraitedSpec): - smoothed_files = OutputMultiPath(File(exists=True), desc='smoothed files') + smoothed_files = OutputMultiPath(File(exists=True), desc="smoothed files") class Smooth(SPMCommand): @@ -1726,13 +1910,13 @@ class Smooth(SPMCommand): input_spec = SmoothInputSpec output_spec = SmoothOutputSpec - _jobtype = 'spatial' - _jobname = 'smooth' + _jobtype = "spatial" + _jobname = "smooth" def _format_arg(self, opt, spec, val): - if opt in ['in_files']: + if opt in ["in_files"]: return scans_for_fnames(ensure_list(val)) - if opt == 'fwhm': + if opt == "fwhm": if not isinstance(val, list): return [val, val, val] if isinstance(val, list): @@ -1745,11 +1929,12 @@ def _format_arg(self, opt, spec, val): def _list_outputs(self): outputs = self._outputs().get() - outputs['smoothed_files'] = [] + outputs["smoothed_files"] = [] for imgf in ensure_list(self.inputs.in_files): - outputs['smoothed_files'].append( - fname_presuffix(imgf, prefix=self.inputs.out_prefix)) + outputs["smoothed_files"].append( + fname_presuffix(imgf, prefix=self.inputs.out_prefix) + ) return outputs @@ -1757,57 +1942,60 @@ class DARTELInputSpec(SPMCommandInputSpec): image_files = traits.List( traits.List(ImageFileSPM(exists=True)), desc="A list of files to be segmented", - field='warp.images', + field="warp.images", copyfile=False, - mandatory=True) + mandatory=True, + ) template_prefix = traits.Str( - 'Template', + "Template", usedefault=True, - field='warp.settings.template', - desc='Prefix for template') + field="warp.settings.template", + desc="Prefix for template", + ) regularization_form = traits.Enum( - 'Linear', - 'Membrane', - 'Bending', - field='warp.settings.rform', - desc=('Form of regularization energy ' - 'term')) + "Linear", + "Membrane", + "Bending", + field="warp.settings.rform", + desc=("Form of regularization energy " "term"), + ) iteration_parameters = traits.List( traits.Tuple( traits.Range(1, 10), traits.Tuple(traits.Float, traits.Float, traits.Float), traits.Enum(1, 2, 4, 8, 16, 32, 64, 128, 256, 512), - traits.Enum(0, 0.5, 1, 2, 4, 8, 16, 32)), + traits.Enum(0, 0.5, 1, 2, 4, 8, 16, 32), + ), minlen=3, maxlen=12, - field='warp.settings.param', + field="warp.settings.param", desc="""List of tuples for each iteration - Inner iterations - Regularization parameters - Time points for deformation model - smoothing parameter - """) + """, + ) optimization_parameters = traits.Tuple( traits.Float, traits.Range(1, 8), traits.Range(1, 8), - field='warp.settings.optim', + field="warp.settings.optim", desc=""" Optimization settings a tuple - LM regularization - cycles of multigrid solver - relaxation iterations - """) + """, + ) class DARTELOutputSpec(TraitedSpec): - final_template_file = File(exists=True, desc='final DARTEL template') + final_template_file = File(exists=True, desc="final DARTEL template") template_files = traits.List( - File(exists=True), - desc=('Templates from different stages of ' - 'iteration')) - dartel_flow_fields = traits.List( - File(exists=True), desc='DARTEL flow fields') + File(exists=True), desc=("Templates from different stages of " "iteration") + ) + dartel_flow_fields = traits.List(File(exists=True), desc="DARTEL flow fields") class DARTEL(SPMCommand): @@ -1826,52 +2014,53 @@ class DARTEL(SPMCommand): input_spec = DARTELInputSpec output_spec = DARTELOutputSpec - _jobtype = 'tools' - _jobname = 'dartel' + _jobtype = "tools" + _jobname = "dartel" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt in ['image_files']: + if opt in ["image_files"]: return scans_for_fnames(val, keep4d=True, separate_sessions=True) - elif opt == 'regularization_form': - mapper = {'Linear': 0, 'Membrane': 1, 'Bending': 2} + elif opt == "regularization_form": + mapper = {"Linear": 0, "Membrane": 1, "Bending": 2} return mapper[val] - elif opt == 'iteration_parameters': + elif opt == "iteration_parameters": params = [] for param in val: new_param = {} - new_param['its'] = param[0] - new_param['rparam'] = list(param[1]) - new_param['K'] = param[2] - new_param['slam'] = param[3] + new_param["its"] = param[0] + new_param["rparam"] = list(param[1]) + new_param["K"] = param[2] + new_param["slam"] = param[3] params.append(new_param) return params - elif opt == 'optimization_parameters': + elif opt == "optimization_parameters": new_param = {} - new_param['lmreg'] = val[0] - new_param['cyc'] = val[1] - new_param['its'] = val[2] + new_param["lmreg"] = val[0] + new_param["cyc"] = val[1] + new_param["its"] = val[2] return [new_param] else: return super(DARTEL, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() - outputs['template_files'] = [] + outputs["template_files"] = [] for i in range(6): - outputs['template_files'].append( - os.path.realpath('%s_%d.nii' % (self.inputs.template_prefix, - i + 1))) - outputs['final_template_file'] = os.path.realpath( - '%s_6.nii' % self.inputs.template_prefix) - outputs['dartel_flow_fields'] = [] + outputs["template_files"].append( + os.path.realpath("%s_%d.nii" % (self.inputs.template_prefix, i + 1)) + ) + outputs["final_template_file"] = os.path.realpath( + "%s_6.nii" % self.inputs.template_prefix + ) + outputs["dartel_flow_fields"] = [] for filename in self.inputs.image_files[0]: pth, base, ext = split_filename(filename) - outputs['dartel_flow_fields'].append( - os.path.realpath('u_%s_%s%s' % - (base, self.inputs.template_prefix, ext))) + outputs["dartel_flow_fields"].append( + os.path.realpath("u_%s_%s%s" % (base, self.inputs.template_prefix, ext)) + ) return outputs @@ -1881,24 +2070,28 @@ class DARTELNorm2MNIInputSpec(SPMCommandInputSpec): copyfile=False, mandatory=True, desc="DARTEL template", - field='mni_norm.template') + field="mni_norm.template", + ) flowfield_files = InputMultiPath( ImageFileSPM(exists=True), mandatory=True, desc="DARTEL flow fields u_rc1*", - field='mni_norm.data.subjs.flowfields') + field="mni_norm.data.subjs.flowfields", + ) apply_to_files = InputMultiPath( ImageFileSPM(exists=True), desc="Files to apply the transform to", - field='mni_norm.data.subjs.images', + field="mni_norm.data.subjs.images", mandatory=True, - copyfile=False) + copyfile=False, + ) voxel_size = traits.Tuple( traits.Float, traits.Float, traits.Float, desc="Voxel sizes for output file", - field='mni_norm.vox') + field="mni_norm.vox", + ) bounding_box = traits.Tuple( traits.Float, traits.Float, @@ -1907,24 +2100,27 @@ class DARTELNorm2MNIInputSpec(SPMCommandInputSpec): traits.Float, traits.Float, desc="Voxel sizes for output file", - field='mni_norm.bb') + field="mni_norm.bb", + ) modulate = traits.Bool( - field='mni_norm.preserve', - desc=("Modulate out images - no modulation " - "preserves concentrations")) + field="mni_norm.preserve", + desc=("Modulate out images - no modulation " "preserves concentrations"), + ) fwhm = traits.Either( traits.List(traits.Float(), minlen=3, maxlen=3), traits.Float(), - field='mni_norm.fwhm', - desc='3-list of fwhm for each dimension') + field="mni_norm.fwhm", + desc="3-list of fwhm for each dimension", + ) class DARTELNorm2MNIOutputSpec(TraitedSpec): normalized_files = OutputMultiPath( - File(exists=True), desc='Normalized files in MNI space') + File(exists=True), desc="Normalized files in MNI space" + ) normalization_parameter_file = File( - exists=True, desc=('Transform parameters to MNI ' - 'space')) + exists=True, desc=("Transform parameters to MNI " "space") + ) class DARTELNorm2MNI(SPMCommand): @@ -1946,23 +2142,23 @@ class DARTELNorm2MNI(SPMCommand): input_spec = DARTELNorm2MNIInputSpec output_spec = DARTELNorm2MNIOutputSpec - _jobtype = 'tools' - _jobname = 'dartel' + _jobtype = "tools" + _jobname = "dartel" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt in ['template_file']: + if opt in ["template_file"]: return np.array([val], dtype=object) - elif opt in ['flowfield_files']: + elif opt in ["flowfield_files"]: return scans_for_fnames(val, keep4d=True) - elif opt in ['apply_to_files']: + elif opt in ["apply_to_files"]: return scans_for_fnames(val, keep4d=True, separate_sessions=True) - elif opt == 'voxel_size': + elif opt == "voxel_size": return list(val) - elif opt == 'bounding_box': + elif opt == "bounding_box": return list(val) - elif opt == 'fwhm': + elif opt == "fwhm": if isinstance(val, list): return val else: @@ -1973,18 +2169,18 @@ def _format_arg(self, opt, spec, val): def _list_outputs(self): outputs = self._outputs().get() pth, base, ext = split_filename(self.inputs.template_file) - outputs['normalization_parameter_file'] = os.path.realpath( - base + '_2mni.mat') - outputs['normalized_files'] = [] + outputs["normalization_parameter_file"] = os.path.realpath(base + "_2mni.mat") + outputs["normalized_files"] = [] prefix = "w" if isdefined(self.inputs.modulate) and self.inputs.modulate: - prefix = 'm' + prefix + prefix = "m" + prefix if not isdefined(self.inputs.fwhm) or self.inputs.fwhm > 0: - prefix = 's' + prefix + prefix = "s" + prefix for filename in self.inputs.apply_to_files: pth, base, ext = split_filename(filename) - outputs['normalized_files'].append( - os.path.realpath('%s%s%s' % (prefix, base, ext))) + outputs["normalized_files"].append( + os.path.realpath("%s%s%s" % (prefix, base, ext)) + ) return outputs @@ -1994,31 +2190,33 @@ class CreateWarpedInputSpec(SPMCommandInputSpec): ImageFileSPM(exists=True), mandatory=True, desc="A list of files to be warped", - field='crt_warped.images', - copyfile=False) + field="crt_warped.images", + copyfile=False, + ) flowfield_files = InputMultiPath( ImageFileSPM(exists=True), copyfile=False, desc="DARTEL flow fields u_rc1*", - field='crt_warped.flowfields', - mandatory=True) + field="crt_warped.flowfields", + mandatory=True, + ) iterations = traits.Range( low=0, high=9, - desc=("The number of iterations: log2(number of " - "time steps)"), - field='crt_warped.K') + desc=("The number of iterations: log2(number of " "time steps)"), + field="crt_warped.K", + ) interp = traits.Range( low=0, high=7, - field='crt_warped.interp', - desc='degree of b-spline used for interpolation') - modulate = traits.Bool( - field='crt_warped.jactransf', desc="Modulate images") + field="crt_warped.interp", + desc="degree of b-spline used for interpolation", + ) + modulate = traits.Bool(field="crt_warped.jactransf", desc="Modulate images") class CreateWarpedOutputSpec(TraitedSpec): - warped_files = traits.List(File(exists=True, desc='final warped files')) + warped_files = traits.List(File(exists=True, desc="final warped files")) class CreateWarped(SPMCommand): @@ -2038,45 +2236,41 @@ class CreateWarped(SPMCommand): input_spec = CreateWarpedInputSpec output_spec = CreateWarpedOutputSpec - _jobtype = 'tools' - _jobname = 'dartel' + _jobtype = "tools" + _jobname = "dartel" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt in ['image_files']: + if opt in ["image_files"]: return scans_for_fnames(val, keep4d=True, separate_sessions=True) - if opt in ['flowfield_files']: + if opt in ["flowfield_files"]: return scans_for_fnames(val, keep4d=True) else: return super(CreateWarped, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() - outputs['warped_files'] = [] + outputs["warped_files"] = [] for filename in self.inputs.image_files: pth, base, ext = split_filename(filename) if isdefined(self.inputs.modulate) and self.inputs.modulate: - outputs['warped_files'].append( - os.path.realpath('mw%s%s' % (base, ext))) + outputs["warped_files"].append(os.path.realpath("mw%s%s" % (base, ext))) else: - outputs['warped_files'].append( - os.path.realpath('w%s%s' % (base, ext))) + outputs["warped_files"].append(os.path.realpath("w%s%s" % (base, ext))) return outputs class ApplyDeformationFieldInputSpec(SPMCommandInputSpec): - in_files = InputMultiPath( - ImageFileSPM(exists=True), mandatory=True, field='fnames') - deformation_field = File(exists=True, mandatory=True, field='comp{1}.def') + in_files = InputMultiPath(ImageFileSPM(exists=True), mandatory=True, field="fnames") + deformation_field = File(exists=True, mandatory=True, field="comp{1}.def") reference_volume = ImageFileSPM( - exists=True, mandatory=True, field='comp{2}.id.space') + exists=True, mandatory=True, field="comp{2}.id.space" + ) interp = traits.Range( - low=0, - high=7, - field='interp', - desc='degree of b-spline used for interpolation') + low=0, high=7, field="interp", desc="degree of b-spline used for interpolation" + ) class ApplyDeformationFieldOutputSpec(TraitedSpec): @@ -2087,18 +2281,18 @@ class ApplyDeformations(SPMCommand): input_spec = ApplyDeformationFieldInputSpec output_spec = ApplyDeformationFieldOutputSpec - _jobtype = 'util' - _jobname = 'defs' + _jobtype = "util" + _jobname = "defs" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt in ['deformation_field', 'reference_volume']: + if opt in ["deformation_field", "reference_volume"]: val = [val] - if opt in ['deformation_field']: + if opt in ["deformation_field"]: return scans_for_fnames(val, keep4d=True, separate_sessions=False) - if opt in ['in_files', 'reference_volume']: + if opt in ["in_files", "reference_volume"]: return scans_for_fnames(val, keep4d=False, separate_sessions=False) else: @@ -2106,10 +2300,10 @@ def _format_arg(self, opt, spec, val): def _list_outputs(self): outputs = self._outputs().get() - outputs['out_files'] = [] + outputs["out_files"] = [] for filename in self.inputs.in_files: _, fname = os.path.split(filename) - outputs['out_files'].append(os.path.realpath('w%s' % fname)) + outputs["out_files"].append(os.path.realpath("w%s" % fname)) return outputs @@ -2118,197 +2312,197 @@ class VBMSegmentInputSpec(SPMCommandInputSpec): in_files = InputMultiPath( ImageFileSPM(exists=True), desc="A list of files to be segmented", - field='estwrite.data', + field="estwrite.data", copyfile=False, - mandatory=True) + mandatory=True, + ) tissues = ImageFileSPM( - exists=True, field='estwrite.tpm', desc='tissue probability map') + exists=True, field="estwrite.tpm", desc="tissue probability map" + ) gaussians_per_class = traits.Tuple( (2, 2, 2, 3, 4, 2), *([traits.Int()] * 6), usedefault=True, - desc='number of gaussians for each tissue class') + desc="number of gaussians for each tissue class" + ) bias_regularization = traits.Enum( - 0.0001, (0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10), - field='estwrite.opts.biasreg', + 0.0001, + (0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10), + field="estwrite.opts.biasreg", usedefault=True, - desc='no(0) - extremely heavy (10)') + desc="no(0) - extremely heavy (10)", + ) bias_fwhm = traits.Enum( - 60, (30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 'Inf'), - field='estwrite.opts.biasfwhm', + 60, + (30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, "Inf"), + field="estwrite.opts.biasfwhm", usedefault=True, - desc='FWHM of Gaussian smoothness of bias') + desc="FWHM of Gaussian smoothness of bias", + ) sampling_distance = traits.Float( 3, usedefault=True, - field='estwrite.opts.samp', - desc='Sampling distance on data for parameter estimation') + field="estwrite.opts.samp", + desc="Sampling distance on data for parameter estimation", + ) warping_regularization = traits.Float( 4, usedefault=True, - field='estwrite.opts.warpreg', - desc='Controls balance between parameters and data') - - spatial_normalization = traits.Enum( - 'high', - 'low', - usedefault=True, + field="estwrite.opts.warpreg", + desc="Controls balance between parameters and data", ) + + spatial_normalization = traits.Enum("high", "low", usedefault=True,) dartel_template = ImageFileSPM( - exists=True, field='estwrite.extopts.dartelwarp.normhigh.darteltpm') + exists=True, field="estwrite.extopts.dartelwarp.normhigh.darteltpm" + ) use_sanlm_denoising_filter = traits.Range( 0, 2, 2, usedefault=True, - field='estwrite.extopts.sanlm', - desc="0=No denoising, 1=denoising,2=denoising multi-threaded") - mrf_weighting = traits.Float( - 0.15, usedefault=True, field='estwrite.extopts.mrf') + field="estwrite.extopts.sanlm", + desc="0=No denoising, 1=denoising,2=denoising multi-threaded", + ) + mrf_weighting = traits.Float(0.15, usedefault=True, field="estwrite.extopts.mrf") cleanup_partitions = traits.Int( 1, usedefault=True, - field='estwrite.extopts.cleanup', - desc="0=None,1=light,2=thorough") - display_results = traits.Bool( - True, usedefault=True, field='estwrite.extopts.print') - - gm_native = traits.Bool( - False, - usedefault=True, - field='estwrite.output.GM.native', + field="estwrite.extopts.cleanup", + desc="0=None,1=light,2=thorough", ) + display_results = traits.Bool(True, usedefault=True, field="estwrite.extopts.print") + + gm_native = traits.Bool(False, usedefault=True, field="estwrite.output.GM.native",) gm_normalized = traits.Bool( - False, - usedefault=True, - field='estwrite.output.GM.warped', + False, usedefault=True, field="estwrite.output.GM.warped", ) gm_modulated_normalized = traits.Range( 0, 2, 2, usedefault=True, - field='estwrite.output.GM.modulated', - desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only') + field="estwrite.output.GM.modulated", + desc="0=none,1=affine+non-linear(SPM8 default),2=non-linear only", + ) gm_dartel = traits.Range( 0, 2, 0, usedefault=True, - field='estwrite.output.GM.dartel', - desc="0=None,1=rigid(SPM8 default),2=affine") - - wm_native = traits.Bool( - False, - usedefault=True, - field='estwrite.output.WM.native', + field="estwrite.output.GM.dartel", + desc="0=None,1=rigid(SPM8 default),2=affine", ) + + wm_native = traits.Bool(False, usedefault=True, field="estwrite.output.WM.native",) wm_normalized = traits.Bool( - False, - usedefault=True, - field='estwrite.output.WM.warped', + False, usedefault=True, field="estwrite.output.WM.warped", ) wm_modulated_normalized = traits.Range( 0, 2, 2, usedefault=True, - field='estwrite.output.WM.modulated', - desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only') + field="estwrite.output.WM.modulated", + desc="0=none,1=affine+non-linear(SPM8 default),2=non-linear only", + ) wm_dartel = traits.Range( 0, 2, 0, usedefault=True, - field='estwrite.output.WM.dartel', - desc="0=None,1=rigid(SPM8 default),2=affine") + field="estwrite.output.WM.dartel", + desc="0=None,1=rigid(SPM8 default),2=affine", + ) csf_native = traits.Bool( - False, - usedefault=True, - field='estwrite.output.CSF.native', + False, usedefault=True, field="estwrite.output.CSF.native", ) csf_normalized = traits.Bool( - False, - usedefault=True, - field='estwrite.output.CSF.warped', + False, usedefault=True, field="estwrite.output.CSF.warped", ) csf_modulated_normalized = traits.Range( 0, 2, 2, usedefault=True, - field='estwrite.output.CSF.modulated', - desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only') + field="estwrite.output.CSF.modulated", + desc="0=none,1=affine+non-linear(SPM8 default),2=non-linear only", + ) csf_dartel = traits.Range( 0, 2, 0, usedefault=True, - field='estwrite.output.CSF.dartel', - desc="0=None,1=rigid(SPM8 default),2=affine") + field="estwrite.output.CSF.dartel", + desc="0=None,1=rigid(SPM8 default),2=affine", + ) bias_corrected_native = traits.Bool( - False, - usedefault=True, - field='estwrite.output.bias.native', + False, usedefault=True, field="estwrite.output.bias.native", ) bias_corrected_normalized = traits.Bool( - True, - usedefault=True, - field='estwrite.output.bias.warped', + True, usedefault=True, field="estwrite.output.bias.warped", ) bias_corrected_affine = traits.Bool( - False, - usedefault=True, - field='estwrite.output.bias.affine', + False, usedefault=True, field="estwrite.output.bias.affine", ) pve_label_native = traits.Bool( - False, usedefault=True, field='estwrite.output.label.native') + False, usedefault=True, field="estwrite.output.label.native" + ) pve_label_normalized = traits.Bool( - False, usedefault=True, field='estwrite.output.label.warped') + False, usedefault=True, field="estwrite.output.label.warped" + ) pve_label_dartel = traits.Range( 0, 2, 0, usedefault=True, - field='estwrite.output.label.dartel', - desc="0=None,1=rigid(SPM8 default),2=affine") + field="estwrite.output.label.dartel", + desc="0=None,1=rigid(SPM8 default),2=affine", + ) jacobian_determinant = traits.Bool( - False, usedefault=True, field='estwrite.jacobian.warped') + False, usedefault=True, field="estwrite.jacobian.warped" + ) deformation_field = traits.Tuple( (0, 0), traits.Bool, traits.Bool, usedefault=True, - field='estwrite.output.warps', - desc='forward and inverse field') + field="estwrite.output.warps", + desc="forward and inverse field", + ) class VBMSegmentOuputSpec(TraitedSpec): native_class_images = traits.List( - traits.List(File(exists=True)), desc='native space probability maps') + traits.List(File(exists=True)), desc="native space probability maps" + ) dartel_input_images = traits.List( - traits.List(File(exists=True)), desc='dartel imported class images') + traits.List(File(exists=True)), desc="dartel imported class images" + ) normalized_class_images = traits.List( - traits.List(File(exists=True)), desc='normalized class images') + traits.List(File(exists=True)), desc="normalized class images" + ) modulated_class_images = traits.List( - traits.List(File(exists=True)), - desc=('modulated+normalized class ' - 'images')) + traits.List(File(exists=True)), desc=("modulated+normalized class " "images") + ) transformation_mat = OutputMultiPath( - File(exists=True), desc='Normalization transformation') + File(exists=True), desc="Normalization transformation" + ) bias_corrected_images = OutputMultiPath( - File(exists=True), desc='bias corrected images') + File(exists=True), desc="bias corrected images" + ) normalized_bias_corrected_images = OutputMultiPath( - File(exists=True), desc='bias corrected images') + File(exists=True), desc="bias corrected images" + ) pve_label_native_images = OutputMultiPath(File(exists=True)) pve_label_normalized_images = OutputMultiPath(File(exists=True)) @@ -2342,124 +2536,137 @@ class VBMSegment(SPMCommand): input_spec = VBMSegmentInputSpec output_spec = VBMSegmentOuputSpec - _jobtype = 'tools' - _jobname = 'vbm8' + _jobtype = "tools" + _jobname = "vbm8" def _list_outputs(self): outputs = self._outputs().get() do_dartel = self.inputs.spatial_normalization - dartel_px = '' + dartel_px = "" if do_dartel: - dartel_px = 'r' + dartel_px = "r" - outputs['native_class_images'] = [[], [], []] - outputs['dartel_input_images'] = [[], [], []] - outputs['normalized_class_images'] = [[], [], []] - outputs['modulated_class_images'] = [[], [], []] + outputs["native_class_images"] = [[], [], []] + outputs["dartel_input_images"] = [[], [], []] + outputs["normalized_class_images"] = [[], [], []] + outputs["modulated_class_images"] = [[], [], []] - outputs['transformation_mat'] = [] + outputs["transformation_mat"] = [] - outputs['bias_corrected_images'] = [] - outputs['normalized_bias_corrected_images'] = [] + outputs["bias_corrected_images"] = [] + outputs["normalized_bias_corrected_images"] = [] - outputs['inverse_deformation_field'] = [] - outputs['forward_deformation_field'] = [] - outputs['jacobian_determinant_images'] = [] + outputs["inverse_deformation_field"] = [] + outputs["forward_deformation_field"] = [] + outputs["jacobian_determinant_images"] = [] - outputs['pve_label_native_images'] = [] - outputs['pve_label_normalized_images'] = [] - outputs['pve_label_registered_images'] = [] + outputs["pve_label_native_images"] = [] + outputs["pve_label_normalized_images"] = [] + outputs["pve_label_registered_images"] = [] for filename in self.inputs.in_files: pth, base, ext = split_filename(filename) - outputs['transformation_mat'].append( - os.path.join(pth, "%s_seg8.mat" % base)) + outputs["transformation_mat"].append( + os.path.join(pth, "%s_seg8.mat" % base) + ) - for i, tis in enumerate(['gm', 'wm', 'csf']): + for i, tis in enumerate(["gm", "wm", "csf"]): # native space - if getattr(self.inputs, '%s_native' % tis): - outputs['native_class_images'][i].append( - os.path.join(pth, "p%d%s.nii" % (i + 1, base))) - if getattr(self.inputs, '%s_dartel' % tis) == 1: - outputs['dartel_input_images'][i].append( - os.path.join(pth, "rp%d%s.nii" % (i + 1, base))) - elif getattr(self.inputs, '%s_dartel' % tis) == 2: - outputs['dartel_input_images'][i].append( - os.path.join(pth, "rp%d%s_affine.nii" % (i + 1, base))) - - # normalized space - if getattr(self.inputs, '%s_normalized' % tis): - outputs['normalized_class_images'][i].append( - os.path.join(pth, "w%sp%d%s.nii" % (dartel_px, i + 1, - base))) - - if getattr(self.inputs, '%s_modulated_normalized' % tis) == 1: - outputs['modulated_class_images'][i].append( - os.path.join(pth, "mw%sp%d%s.nii" % (dartel_px, i + 1, - base))) - elif getattr(self.inputs, - '%s_modulated_normalized' % tis) == 2: - outputs['normalized_class_images'][i].append( - os.path.join(pth, "m0w%sp%d%s.nii" % (dartel_px, i + 1, - base))) + if getattr(self.inputs, "%s_native" % tis): + outputs["native_class_images"][i].append( + os.path.join(pth, "p%d%s.nii" % (i + 1, base)) + ) + if getattr(self.inputs, "%s_dartel" % tis) == 1: + outputs["dartel_input_images"][i].append( + os.path.join(pth, "rp%d%s.nii" % (i + 1, base)) + ) + elif getattr(self.inputs, "%s_dartel" % tis) == 2: + outputs["dartel_input_images"][i].append( + os.path.join(pth, "rp%d%s_affine.nii" % (i + 1, base)) + ) + + # normalized space + if getattr(self.inputs, "%s_normalized" % tis): + outputs["normalized_class_images"][i].append( + os.path.join(pth, "w%sp%d%s.nii" % (dartel_px, i + 1, base)) + ) + + if getattr(self.inputs, "%s_modulated_normalized" % tis) == 1: + outputs["modulated_class_images"][i].append( + os.path.join(pth, "mw%sp%d%s.nii" % (dartel_px, i + 1, base)) + ) + elif getattr(self.inputs, "%s_modulated_normalized" % tis) == 2: + outputs["normalized_class_images"][i].append( + os.path.join(pth, "m0w%sp%d%s.nii" % (dartel_px, i + 1, base)) + ) if self.inputs.pve_label_native: - outputs['pve_label_native_images'].append( - os.path.join(pth, "p0%s.nii" % (base))) + outputs["pve_label_native_images"].append( + os.path.join(pth, "p0%s.nii" % (base)) + ) if self.inputs.pve_label_normalized: - outputs['pve_label_normalized_images'].append( - os.path.join(pth, "w%sp0%s.nii" % (dartel_px, base))) + outputs["pve_label_normalized_images"].append( + os.path.join(pth, "w%sp0%s.nii" % (dartel_px, base)) + ) if self.inputs.pve_label_dartel == 1: - outputs['pve_label_registered_images'].append( - os.path.join(pth, "rp0%s.nii" % (base))) + outputs["pve_label_registered_images"].append( + os.path.join(pth, "rp0%s.nii" % (base)) + ) elif self.inputs.pve_label_dartel == 2: - outputs['pve_label_registered_images'].append( - os.path.join(pth, "rp0%s_affine.nii" % (base))) + outputs["pve_label_registered_images"].append( + os.path.join(pth, "rp0%s_affine.nii" % (base)) + ) if self.inputs.bias_corrected_native: - outputs['bias_corrected_images'].append( - os.path.join(pth, "m%s.nii" % (base))) + outputs["bias_corrected_images"].append( + os.path.join(pth, "m%s.nii" % (base)) + ) if self.inputs.bias_corrected_normalized: - outputs['normalized_bias_corrected_images'].append( - os.path.join(pth, "wm%s%s.nii" % (dartel_px, base))) + outputs["normalized_bias_corrected_images"].append( + os.path.join(pth, "wm%s%s.nii" % (dartel_px, base)) + ) if self.inputs.deformation_field[0]: - outputs['forward_deformation_field'].append( - os.path.join(pth, "y_%s%s.nii" % (dartel_px, base))) + outputs["forward_deformation_field"].append( + os.path.join(pth, "y_%s%s.nii" % (dartel_px, base)) + ) if self.inputs.deformation_field[1]: - outputs['inverse_deformation_field'].append( - os.path.join(pth, "iy_%s%s.nii" % (dartel_px, base))) + outputs["inverse_deformation_field"].append( + os.path.join(pth, "iy_%s%s.nii" % (dartel_px, base)) + ) if self.inputs.jacobian_determinant and do_dartel: - outputs['jacobian_determinant_images'].append( - os.path.join(pth, "jac_wrp1%s.nii" % (base))) + outputs["jacobian_determinant_images"].append( + os.path.join(pth, "jac_wrp1%s.nii" % (base)) + ) return outputs def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt in ['in_files']: + if opt in ["in_files"]: return scans_for_fnames(val, keep4d=True) - elif opt in ['spatial_normalization']: - if val == 'low': - return {'normlow': []} - elif opt in ['dartel_template']: + elif opt in ["spatial_normalization"]: + if val == "low": + return {"normlow": []} + elif opt in ["dartel_template"]: return np.array([val], dtype=object) - elif opt in ['deformation_field']: + elif opt in ["deformation_field"]: return super(VBMSegment, self)._format_arg( - opt, spec, [int(val[0]), int(val[1])]) + opt, spec, [int(val[0]), int(val[1])] + ) else: return super(VBMSegment, self)._format_arg(opt, spec, val) def _parse_inputs(self): - if self.inputs.spatial_normalization == 'low': + if self.inputs.spatial_normalization == "low": einputs = super(VBMSegment, self)._parse_inputs( - skip=('spatial_normalization', 'dartel_template')) - einputs[0]['estwrite']['extopts']['dartelwarp'] = {'normlow': 1} + skip=("spatial_normalization", "dartel_template") + ) + einputs[0]["estwrite"]["extopts"]["dartelwarp"] = {"normlow": 1} return einputs else: - return super(VBMSegment, - self)._parse_inputs(skip=('spatial_normalization')) + return super(VBMSegment, self)._parse_inputs(skip=("spatial_normalization")) diff --git a/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py index 92ca0baf27..771cb640b1 100644 --- a/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py +++ b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py @@ -4,35 +4,28 @@ def test_Analyze2nii_inputs(): input_map = dict( - analyze_file=dict( - extensions=None, - mandatory=True, - ), + analyze_file=dict(extensions=None, mandatory=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), + mfile=dict(usedefault=True,), paths=dict(), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = Analyze2nii.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Analyze2nii_outputs(): output_map = dict( matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - nifti_file=dict(extensions=None, ), + mfile=dict(usedefault=True,), + nifti_file=dict(extensions=None,), paths=dict(), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) outputs = Analyze2nii.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py index 8d7c371f40..6e741ae607 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py @@ -4,37 +4,29 @@ def test_ApplyDeformations_inputs(): input_map = dict( - deformation_field=dict( - extensions=None, - field='comp{1}.def', - mandatory=True, - ), - in_files=dict( - field='fnames', - mandatory=True, - ), - interp=dict(field='interp', ), + deformation_field=dict(extensions=None, field="comp{1}.def", mandatory=True,), + in_files=dict(field="fnames", mandatory=True,), + interp=dict(field="interp",), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), + mfile=dict(usedefault=True,), paths=dict(), reference_volume=dict( - extensions=['.hdr', '.img', '.img.gz', '.nii'], - field='comp{2}.id.space', + extensions=[".hdr", ".img", ".img.gz", ".nii"], + field="comp{2}.id.space", mandatory=True, ), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = ApplyDeformations.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ApplyDeformations_outputs(): - output_map = dict(out_files=dict(), ) + output_map = dict(out_files=dict(),) outputs = ApplyDeformations.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py index 270fc8cb75..b9389091b3 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py @@ -4,43 +4,34 @@ def test_ApplyInverseDeformation_inputs(): input_map = dict( - bounding_box=dict(field='comp{1}.inv.comp{1}.sn2def.bb', ), + bounding_box=dict(field="comp{1}.inv.comp{1}.sn2def.bb",), deformation=dict( extensions=None, - field='comp{1}.inv.comp{1}.sn2def.matname', - xor=['deformation_field'], + field="comp{1}.inv.comp{1}.sn2def.matname", + xor=["deformation_field"], ), deformation_field=dict( - extensions=None, - field='comp{1}.inv.comp{1}.def', - xor=['deformation'], - ), - in_files=dict( - field='fnames', - mandatory=True, + extensions=None, field="comp{1}.inv.comp{1}.def", xor=["deformation"], ), - interpolation=dict(field='interp', ), + in_files=dict(field="fnames", mandatory=True,), + interpolation=dict(field="interp",), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), + mfile=dict(usedefault=True,), paths=dict(), - target=dict( - extensions=None, - field='comp{1}.inv.space', - ), + target=dict(extensions=None, field="comp{1}.inv.space",), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), - voxel_sizes=dict(field='comp{1}.inv.comp{1}.sn2def.vox', ), + use_v8struct=dict(min_ver="8", usedefault=True,), + voxel_sizes=dict(field="comp{1}.inv.comp{1}.sn2def.vox",), ) inputs = ApplyInverseDeformation.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ApplyInverseDeformation_outputs(): - output_map = dict(out_files=dict(), ) + output_map = dict(out_files=dict(),) outputs = ApplyInverseDeformation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py index be07ee26ce..99f140ecac 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py @@ -4,35 +4,24 @@ def test_ApplyTransform_inputs(): input_map = dict( - in_file=dict( - copyfile=True, - extensions=None, - mandatory=True, - ), - mat=dict( - extensions=None, - mandatory=True, - ), + in_file=dict(copyfile=True, extensions=None, mandatory=True,), + mat=dict(extensions=None, mandatory=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - out_file=dict( - extensions=None, - genfile=True, - ), + mfile=dict(usedefault=True,), + out_file=dict(extensions=None, genfile=True,), paths=dict(), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = ApplyTransform.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ApplyTransform_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ApplyTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py index a4f7b5b516..3c67fe75c6 100644 --- a/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py +++ b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py @@ -4,36 +4,25 @@ def test_CalcCoregAffine_inputs(): input_map = dict( - invmat=dict(extensions=None, ), - mat=dict(extensions=None, ), + invmat=dict(extensions=None,), + mat=dict(extensions=None,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - moving=dict( - copyfile=False, - extensions=None, - mandatory=True, - ), + mfile=dict(usedefault=True,), + moving=dict(copyfile=False, extensions=None, mandatory=True,), paths=dict(), - target=dict( - extensions=None, - mandatory=True, - ), + target=dict(extensions=None, mandatory=True,), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = CalcCoregAffine.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CalcCoregAffine_outputs(): - output_map = dict( - invmat=dict(extensions=None, ), - mat=dict(extensions=None, ), - ) + output_map = dict(invmat=dict(extensions=None,), mat=dict(extensions=None,),) outputs = CalcCoregAffine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Coregister.py b/nipype/interfaces/spm/tests/test_auto_Coregister.py index 276f02c47a..04e39ce23b 100644 --- a/nipype/interfaces/spm/tests/test_auto_Coregister.py +++ b/nipype/interfaces/spm/tests/test_auto_Coregister.py @@ -4,52 +4,38 @@ def test_Coregister_inputs(): input_map = dict( - apply_to_files=dict( - copyfile=True, - field='other', - ), - cost_function=dict(field='eoptions.cost_fun', ), - fwhm=dict(field='eoptions.fwhm', ), - jobtype=dict(usedefault=True, ), + apply_to_files=dict(copyfile=True, field="other",), + cost_function=dict(field="eoptions.cost_fun",), + fwhm=dict(field="eoptions.fwhm",), + jobtype=dict(usedefault=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - out_prefix=dict( - field='roptions.prefix', - usedefault=True, - ), + mfile=dict(usedefault=True,), + out_prefix=dict(field="roptions.prefix", usedefault=True,), paths=dict(), - separation=dict(field='eoptions.sep', ), - source=dict( - copyfile=True, - field='source', - mandatory=True, - ), + separation=dict(field="eoptions.sep",), + source=dict(copyfile=True, field="source", mandatory=True,), target=dict( copyfile=False, - extensions=['.hdr', '.img', '.img.gz', '.nii'], - field='ref', + extensions=[".hdr", ".img", ".img.gz", ".nii"], + field="ref", mandatory=True, ), - tolerance=dict(field='eoptions.tol', ), + tolerance=dict(field="eoptions.tol",), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), - write_interp=dict(field='roptions.interp', ), - write_mask=dict(field='roptions.mask', ), - write_wrap=dict(field='roptions.wrap', ), + use_v8struct=dict(min_ver="8", usedefault=True,), + write_interp=dict(field="roptions.interp",), + write_mask=dict(field="roptions.mask",), + write_wrap=dict(field="roptions.wrap",), ) inputs = Coregister.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Coregister_outputs(): - output_map = dict( - coregistered_files=dict(), - coregistered_source=dict(), - ) + output_map = dict(coregistered_files=dict(), coregistered_source=dict(),) outputs = Coregister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_CreateWarped.py b/nipype/interfaces/spm/tests/test_auto_CreateWarped.py index b25e377ad3..b172972fe7 100644 --- a/nipype/interfaces/spm/tests/test_auto_CreateWarped.py +++ b/nipype/interfaces/spm/tests/test_auto_CreateWarped.py @@ -5,34 +5,27 @@ def test_CreateWarped_inputs(): input_map = dict( flowfield_files=dict( - copyfile=False, - field='crt_warped.flowfields', - mandatory=True, + copyfile=False, field="crt_warped.flowfields", mandatory=True, ), - image_files=dict( - copyfile=False, - field='crt_warped.images', - mandatory=True, - ), - interp=dict(field='crt_warped.interp', ), - iterations=dict(field='crt_warped.K', ), + image_files=dict(copyfile=False, field="crt_warped.images", mandatory=True,), + interp=dict(field="crt_warped.interp",), + iterations=dict(field="crt_warped.K",), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - modulate=dict(field='crt_warped.jactransf', ), + mfile=dict(usedefault=True,), + modulate=dict(field="crt_warped.jactransf",), paths=dict(), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = CreateWarped.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CreateWarped_outputs(): - output_map = dict(warped_files=dict(), ) + output_map = dict(warped_files=dict(),) outputs = CreateWarped.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_DARTEL.py b/nipype/interfaces/spm/tests/test_auto_DARTEL.py index de7eb7e158..ca031dfd1e 100644 --- a/nipype/interfaces/spm/tests/test_auto_DARTEL.py +++ b/nipype/interfaces/spm/tests/test_auto_DARTEL.py @@ -4,36 +4,28 @@ def test_DARTEL_inputs(): input_map = dict( - image_files=dict( - copyfile=False, - field='warp.images', - mandatory=True, - ), - iteration_parameters=dict(field='warp.settings.param', ), + image_files=dict(copyfile=False, field="warp.images", mandatory=True,), + iteration_parameters=dict(field="warp.settings.param",), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - optimization_parameters=dict(field='warp.settings.optim', ), + mfile=dict(usedefault=True,), + optimization_parameters=dict(field="warp.settings.optim",), paths=dict(), - regularization_form=dict(field='warp.settings.rform', ), - template_prefix=dict( - field='warp.settings.template', - usedefault=True, - ), + regularization_form=dict(field="warp.settings.rform",), + template_prefix=dict(field="warp.settings.template", usedefault=True,), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = DARTEL.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DARTEL_outputs(): output_map = dict( dartel_flow_fields=dict(), - final_template_file=dict(extensions=None, ), + final_template_file=dict(extensions=None,), template_files=dict(), ) outputs = DARTEL.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py index 9870b2b55b..3b406b3c27 100644 --- a/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py +++ b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py @@ -5,42 +5,35 @@ def test_DARTELNorm2MNI_inputs(): input_map = dict( apply_to_files=dict( - copyfile=False, - field='mni_norm.data.subjs.images', - mandatory=True, - ), - bounding_box=dict(field='mni_norm.bb', ), - flowfield_files=dict( - field='mni_norm.data.subjs.flowfields', - mandatory=True, + copyfile=False, field="mni_norm.data.subjs.images", mandatory=True, ), - fwhm=dict(field='mni_norm.fwhm', ), + bounding_box=dict(field="mni_norm.bb",), + flowfield_files=dict(field="mni_norm.data.subjs.flowfields", mandatory=True,), + fwhm=dict(field="mni_norm.fwhm",), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - modulate=dict(field='mni_norm.preserve', ), + mfile=dict(usedefault=True,), + modulate=dict(field="mni_norm.preserve",), paths=dict(), template_file=dict( copyfile=False, - extensions=['.hdr', '.img', '.img.gz', '.nii'], - field='mni_norm.template', + extensions=[".hdr", ".img", ".img.gz", ".nii"], + field="mni_norm.template", mandatory=True, ), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), - voxel_size=dict(field='mni_norm.vox', ), + use_v8struct=dict(min_ver="8", usedefault=True,), + voxel_size=dict(field="mni_norm.vox",), ) inputs = DARTELNorm2MNI.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DARTELNorm2MNI_outputs(): output_map = dict( - normalization_parameter_file=dict(extensions=None, ), - normalized_files=dict(), + normalization_parameter_file=dict(extensions=None,), normalized_files=dict(), ) outputs = DARTELNorm2MNI.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_DicomImport.py b/nipype/interfaces/spm/tests/test_auto_DicomImport.py index 833794b628..ddb3f81c78 100644 --- a/nipype/interfaces/spm/tests/test_auto_DicomImport.py +++ b/nipype/interfaces/spm/tests/test_auto_DicomImport.py @@ -4,42 +4,26 @@ def test_DicomImport_inputs(): input_map = dict( - format=dict( - field='convopts.format', - usedefault=True, - ), - icedims=dict( - field='convopts.icedims', - usedefault=True, - ), - in_files=dict( - field='data', - mandatory=True, - ), + format=dict(field="convopts.format", usedefault=True,), + icedims=dict(field="convopts.icedims", usedefault=True,), + in_files=dict(field="data", mandatory=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - output_dir=dict( - field='outdir', - usedefault=True, - ), - output_dir_struct=dict( - field='root', - usedefault=True, - ), + mfile=dict(usedefault=True,), + output_dir=dict(field="outdir", usedefault=True,), + output_dir_struct=dict(field="root", usedefault=True,), paths=dict(), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = DicomImport.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DicomImport_outputs(): - output_map = dict(out_files=dict(), ) + output_map = dict(out_files=dict(),) outputs = DicomImport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py index cc2d50f5a7..323cb3707c 100644 --- a/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py @@ -4,45 +4,34 @@ def test_EstimateContrast_inputs(): input_map = dict( - beta_images=dict( - copyfile=False, - mandatory=True, - ), - contrasts=dict(mandatory=True, ), - group_contrast=dict(xor=['use_derivs'], ), + beta_images=dict(copyfile=False, mandatory=True,), + contrasts=dict(mandatory=True,), + group_contrast=dict(xor=["use_derivs"],), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), + mfile=dict(usedefault=True,), paths=dict(), - residual_image=dict( - copyfile=False, - extensions=None, - mandatory=True, - ), + residual_image=dict(copyfile=False, extensions=None, mandatory=True,), spm_mat_file=dict( - copyfile=True, - extensions=None, - field='spmmat', - mandatory=True, + copyfile=True, extensions=None, field="spmmat", mandatory=True, ), - use_derivs=dict(xor=['group_contrast'], ), + use_derivs=dict(xor=["group_contrast"],), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = EstimateContrast.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EstimateContrast_outputs(): output_map = dict( con_images=dict(), ess_images=dict(), spmF_images=dict(), spmT_images=dict(), - spm_mat_file=dict(extensions=None, ), + spm_mat_file=dict(extensions=None,), ) outputs = EstimateContrast.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateModel.py b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py index 9893f2bfcb..5e2b25e0c7 100644 --- a/nipype/interfaces/spm/tests/test_auto_EstimateModel.py +++ b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py @@ -4,45 +4,38 @@ def test_EstimateModel_inputs(): input_map = dict( - estimation_method=dict( - field='method', - mandatory=True, - ), + estimation_method=dict(field="method", mandatory=True,), flags=dict(), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), + mfile=dict(usedefault=True,), paths=dict(), spm_mat_file=dict( - copyfile=True, - extensions=None, - field='spmmat', - mandatory=True, + copyfile=True, extensions=None, field="spmmat", mandatory=True, ), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), - write_residuals=dict(field='write_residuals', ), + use_v8struct=dict(min_ver="8", usedefault=True,), + write_residuals=dict(field="write_residuals",), ) inputs = EstimateModel.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_EstimateModel_outputs(): output_map = dict( ARcoef=dict(), Cbetas=dict(), - RPVimage=dict(extensions=['.hdr', '.img', '.img.gz', '.nii'], ), + RPVimage=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"],), SDbetas=dict(), SDerror=dict(), beta_images=dict(), - labels=dict(extensions=['.hdr', '.img', '.img.gz', '.nii'], ), - mask_image=dict(extensions=['.hdr', '.img', '.img.gz', '.nii'], ), - residual_image=dict(extensions=['.hdr', '.img', '.img.gz', '.nii'], ), + labels=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"],), + mask_image=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"],), + residual_image=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"],), residual_images=dict(), - spm_mat_file=dict(extensions=None, ), + spm_mat_file=dict(extensions=None,), ) outputs = EstimateModel.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py index 814cb173fb..0fb35c010f 100644 --- a/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py @@ -4,55 +4,49 @@ def test_FactorialDesign_inputs(): input_map = dict( - covariates=dict(field='cov', ), - explicit_mask_file=dict( - extensions=None, - field='masking.em', - ), + covariates=dict(field="cov",), + explicit_mask_file=dict(extensions=None, field="masking.em",), global_calc_mean=dict( - field='globalc.g_mean', - xor=['global_calc_omit', 'global_calc_values'], + field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"], ), global_calc_omit=dict( - field='globalc.g_omit', - xor=['global_calc_mean', 'global_calc_values'], + field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"], ), global_calc_values=dict( - field='globalc.g_user.global_uval', - xor=['global_calc_mean', 'global_calc_omit'], + field="globalc.g_user.global_uval", + xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict(field='globalm.glonorm', ), + global_normalization=dict(field="globalm.glonorm",), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - no_grand_mean_scaling=dict(field='globalm.gmsca.gmsca_no', ), + mfile=dict(usedefault=True,), + no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no",), paths=dict(), - spm_mat_dir=dict(field='dir', ), + spm_mat_dir=dict(field="dir",), threshold_mask_absolute=dict( - field='masking.tm.tma.athresh', - xor=['threshold_mask_none', 'threshold_mask_relative'], + field="masking.tm.tma.athresh", + xor=["threshold_mask_none", "threshold_mask_relative"], ), threshold_mask_none=dict( - field='masking.tm.tm_none', - xor=['threshold_mask_absolute', 'threshold_mask_relative'], + field="masking.tm.tm_none", + xor=["threshold_mask_absolute", "threshold_mask_relative"], ), threshold_mask_relative=dict( - field='masking.tm.tmr.rthresh', - xor=['threshold_mask_absolute', 'threshold_mask_none'], + field="masking.tm.tmr.rthresh", + xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict(field='masking.im', ), + use_implicit_threshold=dict(field="masking.im",), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = FactorialDesign.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FactorialDesign_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None, ), ) + output_map = dict(spm_mat_file=dict(extensions=None,),) outputs = FactorialDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_FieldMap.py b/nipype/interfaces/spm/tests/test_auto_FieldMap.py index ad0375e7c8..19d0bc3e19 100644 --- a/nipype/interfaces/spm/tests/test_auto_FieldMap.py +++ b/nipype/interfaces/spm/tests/test_auto_FieldMap.py @@ -4,127 +4,70 @@ def test_FieldMap_inputs(): input_map = dict( - anat_file=dict( - copyfile=False, - extensions=None, - field='subj.anat', - ), - blip_direction=dict( - field='subj.defaults.defaultsval.blipdir', - mandatory=True, - ), - echo_times=dict( - field='subj.defaults.defaultsval.et', - mandatory=True, - ), + anat_file=dict(copyfile=False, extensions=None, field="subj.anat",), + blip_direction=dict(field="subj.defaults.defaultsval.blipdir", mandatory=True,), + echo_times=dict(field="subj.defaults.defaultsval.et", mandatory=True,), epi_file=dict( - copyfile=False, - extensions=None, - field='subj.session.epi', - mandatory=True, - ), - epifm=dict( - field='subj.defaults.defaultsval.epifm', - usedefault=True, + copyfile=False, extensions=None, field="subj.session.epi", mandatory=True, ), + epifm=dict(field="subj.defaults.defaultsval.epifm", usedefault=True,), jacobian_modulation=dict( - field='subj.defaults.defaultsval.ajm', - usedefault=True, + field="subj.defaults.defaultsval.ajm", usedefault=True, ), - jobtype=dict(usedefault=True, ), + jobtype=dict(usedefault=True,), magnitude_file=dict( copyfile=False, extensions=None, - field='subj.data.presubphasemag.magnitude', + field="subj.data.presubphasemag.magnitude", mandatory=True, ), - mask_fwhm=dict( - field='subj.defaults.defaultsval.mflags.fwhm', - usedefault=True, - ), - maskbrain=dict( - field='subj.defaults.defaultsval.maskbrain', - usedefault=True, - ), - matchanat=dict( - field='subj.matchanat', - usedefault=True, - ), - matchvdm=dict( - field='subj.matchvdm', - usedefault=True, - ), + mask_fwhm=dict(field="subj.defaults.defaultsval.mflags.fwhm", usedefault=True,), + maskbrain=dict(field="subj.defaults.defaultsval.maskbrain", usedefault=True,), + matchanat=dict(field="subj.matchanat", usedefault=True,), + matchvdm=dict(field="subj.matchvdm", usedefault=True,), matlab_cmd=dict(), - method=dict( - field='subj.defaults.defaultsval.uflags.method', - usedefault=True, - ), - mfile=dict(usedefault=True, ), + method=dict(field="subj.defaults.defaultsval.uflags.method", usedefault=True,), + mfile=dict(usedefault=True,), ndilate=dict( - field='subj.defaults.defaultsval.mflags.ndilate', - usedefault=True, - ), - nerode=dict( - field='subj.defaults.defaultsval.mflags.nerode', - usedefault=True, - ), - pad=dict( - field='subj.defaults.defaultsval.uflags.pad', - usedefault=True, + field="subj.defaults.defaultsval.mflags.ndilate", usedefault=True, ), + nerode=dict(field="subj.defaults.defaultsval.mflags.nerode", usedefault=True,), + pad=dict(field="subj.defaults.defaultsval.uflags.pad", usedefault=True,), paths=dict(), phase_file=dict( copyfile=False, extensions=None, - field='subj.data.presubphasemag.phase', + field="subj.data.presubphasemag.phase", mandatory=True, ), - reg=dict( - field='subj.defaults.defaultsval.mflags.reg', - usedefault=True, - ), - sessname=dict( - field='subj.sessname', - usedefault=True, - ), + reg=dict(field="subj.defaults.defaultsval.mflags.reg", usedefault=True,), + sessname=dict(field="subj.sessname", usedefault=True,), template=dict( copyfile=False, extensions=None, - field='subj.defaults.defaultsval.mflags.template', - ), - thresh=dict( - field='subj.defaults.defaultsval.mflags.thresh', - usedefault=True, + field="subj.defaults.defaultsval.mflags.template", ), + thresh=dict(field="subj.defaults.defaultsval.mflags.thresh", usedefault=True,), total_readout_time=dict( - field='subj.defaults.defaultsval.tert', - mandatory=True, + field="subj.defaults.defaultsval.tert", mandatory=True, ), unwarp_fwhm=dict( - field='subj.defaults.defaultsval.uflags.fwhm', - usedefault=True, + field="subj.defaults.defaultsval.uflags.fwhm", usedefault=True, ), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), - writeunwarped=dict( - field='subj.writeunwarped', - usedefault=True, - ), - ws=dict( - field='subj.defaults.defaultsval.uflags.ws', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), + writeunwarped=dict(field="subj.writeunwarped", usedefault=True,), + ws=dict(field="subj.defaults.defaultsval.uflags.ws", usedefault=True,), ) inputs = FieldMap.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FieldMap_outputs(): - output_map = dict(vdm=dict(extensions=None, ), ) + output_map = dict(vdm=dict(extensions=None,),) outputs = FieldMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Level1Design.py b/nipype/interfaces/spm/tests/test_auto_Level1Design.py index 6d941dbec9..8a57a7b86e 100644 --- a/nipype/interfaces/spm/tests/test_auto_Level1Design.py +++ b/nipype/interfaces/spm/tests/test_auto_Level1Design.py @@ -4,51 +4,35 @@ def test_Level1Design_inputs(): input_map = dict( - bases=dict( - field='bases', - mandatory=True, - ), - factor_info=dict(field='fact', ), + bases=dict(field="bases", mandatory=True,), + factor_info=dict(field="fact",), flags=dict(), - global_intensity_normalization=dict(field='global', ), - interscan_interval=dict( - field='timing.RT', - mandatory=True, - ), - mask_image=dict( - extensions=None, - field='mask', - ), - mask_threshold=dict(usedefault=True, ), + global_intensity_normalization=dict(field="global",), + interscan_interval=dict(field="timing.RT", mandatory=True,), + mask_image=dict(extensions=None, field="mask",), + mask_threshold=dict(usedefault=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - microtime_onset=dict(field='timing.fmri_t0', ), - microtime_resolution=dict(field='timing.fmri_t', ), - model_serial_correlations=dict(field='cvi', ), + mfile=dict(usedefault=True,), + microtime_onset=dict(field="timing.fmri_t0",), + microtime_resolution=dict(field="timing.fmri_t",), + model_serial_correlations=dict(field="cvi",), paths=dict(), - session_info=dict( - field='sess', - mandatory=True, - ), - spm_mat_dir=dict(field='dir', ), - timing_units=dict( - field='timing.units', - mandatory=True, - ), + session_info=dict(field="sess", mandatory=True,), + spm_mat_dir=dict(field="dir",), + timing_units=dict(field="timing.units", mandatory=True,), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), - volterra_expansion_order=dict(field='volt', ), + use_v8struct=dict(min_ver="8", usedefault=True,), + volterra_expansion_order=dict(field="volt",), ) inputs = Level1Design.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Level1Design_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None, ), ) + output_map = dict(spm_mat_file=dict(extensions=None,),) outputs = Level1Design.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py index 1fdc1d1ff1..478c869474 100644 --- a/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py @@ -4,64 +4,52 @@ def test_MultipleRegressionDesign_inputs(): input_map = dict( - covariates=dict(field='cov', ), - explicit_mask_file=dict( - extensions=None, - field='masking.em', - ), + covariates=dict(field="cov",), + explicit_mask_file=dict(extensions=None, field="masking.em",), global_calc_mean=dict( - field='globalc.g_mean', - xor=['global_calc_omit', 'global_calc_values'], + field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"], ), global_calc_omit=dict( - field='globalc.g_omit', - xor=['global_calc_mean', 'global_calc_values'], + field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"], ), global_calc_values=dict( - field='globalc.g_user.global_uval', - xor=['global_calc_mean', 'global_calc_omit'], - ), - global_normalization=dict(field='globalm.glonorm', ), - in_files=dict( - field='des.mreg.scans', - mandatory=True, - ), - include_intercept=dict( - field='des.mreg.incint', - usedefault=True, + field="globalc.g_user.global_uval", + xor=["global_calc_mean", "global_calc_omit"], ), + global_normalization=dict(field="globalm.glonorm",), + in_files=dict(field="des.mreg.scans", mandatory=True,), + include_intercept=dict(field="des.mreg.incint", usedefault=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - no_grand_mean_scaling=dict(field='globalm.gmsca.gmsca_no', ), + mfile=dict(usedefault=True,), + no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no",), paths=dict(), - spm_mat_dir=dict(field='dir', ), + spm_mat_dir=dict(field="dir",), threshold_mask_absolute=dict( - field='masking.tm.tma.athresh', - xor=['threshold_mask_none', 'threshold_mask_relative'], + field="masking.tm.tma.athresh", + xor=["threshold_mask_none", "threshold_mask_relative"], ), threshold_mask_none=dict( - field='masking.tm.tm_none', - xor=['threshold_mask_absolute', 'threshold_mask_relative'], + field="masking.tm.tm_none", + xor=["threshold_mask_absolute", "threshold_mask_relative"], ), threshold_mask_relative=dict( - field='masking.tm.tmr.rthresh', - xor=['threshold_mask_absolute', 'threshold_mask_none'], + field="masking.tm.tmr.rthresh", + xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict(field='masking.im', ), + use_implicit_threshold=dict(field="masking.im",), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), - user_covariates=dict(field='des.mreg.mcov', ), + use_v8struct=dict(min_ver="8", usedefault=True,), + user_covariates=dict(field="des.mreg.mcov",), ) inputs = MultipleRegressionDesign.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MultipleRegressionDesign_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None, ), ) + output_map = dict(spm_mat_file=dict(extensions=None,),) outputs = MultipleRegressionDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_NewSegment.py b/nipype/interfaces/spm/tests/test_auto_NewSegment.py index 505296bbf4..e05643b92e 100644 --- a/nipype/interfaces/spm/tests/test_auto_NewSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_NewSegment.py @@ -4,31 +4,26 @@ def test_NewSegment_inputs(): input_map = dict( - affine_regularization=dict(field='warp.affreg', ), - channel_files=dict( - copyfile=False, - field='channel', - mandatory=True, - ), - channel_info=dict(field='channel', ), + affine_regularization=dict(field="warp.affreg",), + channel_files=dict(copyfile=False, field="channel", mandatory=True,), + channel_info=dict(field="channel",), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), + mfile=dict(usedefault=True,), paths=dict(), - sampling_distance=dict(field='warp.samp', ), - tissues=dict(field='tissue', ), + sampling_distance=dict(field="warp.samp",), + tissues=dict(field="tissue",), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), - warping_regularization=dict(field='warp.reg', ), - write_deformation_fields=dict(field='warp.write', ), + use_v8struct=dict(min_ver="8", usedefault=True,), + warping_regularization=dict(field="warp.reg",), + write_deformation_fields=dict(field="warp.write",), ) inputs = NewSegment.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_NewSegment_outputs(): output_map = dict( bias_corrected_images=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize.py b/nipype/interfaces/spm/tests/test_auto_Normalize.py index dfa33b5106..e028c609c9 100644 --- a/nipype/interfaces/spm/tests/test_auto_Normalize.py +++ b/nipype/interfaces/spm/tests/test_auto_Normalize.py @@ -4,70 +4,52 @@ def test_Normalize_inputs(): input_map = dict( - DCT_period_cutoff=dict(field='eoptions.cutoff', ), - affine_regularization_type=dict(field='eoptions.regtype', ), - apply_to_files=dict( - copyfile=True, - field='subj.resample', - ), - jobtype=dict(usedefault=True, ), + DCT_period_cutoff=dict(field="eoptions.cutoff",), + affine_regularization_type=dict(field="eoptions.regtype",), + apply_to_files=dict(copyfile=True, field="subj.resample",), + jobtype=dict(usedefault=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - nonlinear_iterations=dict(field='eoptions.nits', ), - nonlinear_regularization=dict(field='eoptions.reg', ), - out_prefix=dict( - field='roptions.prefix', - usedefault=True, - ), + mfile=dict(usedefault=True,), + nonlinear_iterations=dict(field="eoptions.nits",), + nonlinear_regularization=dict(field="eoptions.reg",), + out_prefix=dict(field="roptions.prefix", usedefault=True,), parameter_file=dict( copyfile=False, extensions=None, - field='subj.matname', + field="subj.matname", mandatory=True, - xor=['source', 'template'], + xor=["source", "template"], ), paths=dict(), source=dict( - copyfile=True, - field='subj.source', - mandatory=True, - xor=['parameter_file'], - ), - source_image_smoothing=dict(field='eoptions.smosrc', ), - source_weight=dict( - copyfile=False, - extensions=None, - field='subj.wtsrc', + copyfile=True, field="subj.source", mandatory=True, xor=["parameter_file"], ), + source_image_smoothing=dict(field="eoptions.smosrc",), + source_weight=dict(copyfile=False, extensions=None, field="subj.wtsrc",), template=dict( copyfile=False, extensions=None, - field='eoptions.template', + field="eoptions.template", mandatory=True, - xor=['parameter_file'], - ), - template_image_smoothing=dict(field='eoptions.smoref', ), - template_weight=dict( - copyfile=False, - extensions=None, - field='eoptions.weight', + xor=["parameter_file"], ), + template_image_smoothing=dict(field="eoptions.smoref",), + template_weight=dict(copyfile=False, extensions=None, field="eoptions.weight",), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), - write_bounding_box=dict(field='roptions.bb', ), - write_interp=dict(field='roptions.interp', ), - write_preserve=dict(field='roptions.preserve', ), - write_voxel_sizes=dict(field='roptions.vox', ), - write_wrap=dict(field='roptions.wrap', ), + use_v8struct=dict(min_ver="8", usedefault=True,), + write_bounding_box=dict(field="roptions.bb",), + write_interp=dict(field="roptions.interp",), + write_preserve=dict(field="roptions.preserve",), + write_voxel_sizes=dict(field="roptions.vox",), + write_wrap=dict(field="roptions.wrap",), ) inputs = Normalize.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Normalize_outputs(): output_map = dict( normalization_parameters=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize12.py b/nipype/interfaces/spm/tests/test_auto_Normalize12.py index 74abdb1d60..b64475eab3 100644 --- a/nipype/interfaces/spm/tests/test_auto_Normalize12.py +++ b/nipype/interfaces/spm/tests/test_auto_Normalize12.py @@ -4,63 +4,54 @@ def test_Normalize12_inputs(): input_map = dict( - affine_regularization_type=dict(field='eoptions.affreg', ), - apply_to_files=dict( - copyfile=True, - field='subj.resample', - ), - bias_fwhm=dict(field='eoptions.biasfwhm', ), - bias_regularization=dict(field='eoptions.biasreg', ), + affine_regularization_type=dict(field="eoptions.affreg",), + apply_to_files=dict(copyfile=True, field="subj.resample",), + bias_fwhm=dict(field="eoptions.biasfwhm",), + bias_regularization=dict(field="eoptions.biasreg",), deformation_file=dict( copyfile=False, - extensions=['.hdr', '.img', '.img.gz', '.nii'], - field='subj.def', + extensions=[".hdr", ".img", ".img.gz", ".nii"], + field="subj.def", mandatory=True, - xor=['image_to_align', 'tpm'], + xor=["image_to_align", "tpm"], ), image_to_align=dict( copyfile=True, - extensions=['.hdr', '.img', '.img.gz', '.nii'], - field='subj.vol', + extensions=[".hdr", ".img", ".img.gz", ".nii"], + field="subj.vol", mandatory=True, - xor=['deformation_file'], + xor=["deformation_file"], ), - jobtype=dict(usedefault=True, ), + jobtype=dict(usedefault=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - out_prefix=dict( - field='woptions.prefix', - usedefault=True, - ), + mfile=dict(usedefault=True,), + out_prefix=dict(field="woptions.prefix", usedefault=True,), paths=dict(), - sampling_distance=dict(field='eoptions.samp', ), - smoothness=dict(field='eoptions.fwhm', ), + sampling_distance=dict(field="eoptions.samp",), + smoothness=dict(field="eoptions.fwhm",), tpm=dict( copyfile=False, extensions=None, - field='eoptions.tpm', - xor=['deformation_file'], + field="eoptions.tpm", + xor=["deformation_file"], ), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), - warping_regularization=dict(field='eoptions.reg', ), - write_bounding_box=dict(field='woptions.bb', ), - write_interp=dict(field='woptions.interp', ), - write_voxel_sizes=dict(field='woptions.vox', ), + use_v8struct=dict(min_ver="8", usedefault=True,), + warping_regularization=dict(field="eoptions.reg",), + write_bounding_box=dict(field="woptions.bb",), + write_interp=dict(field="woptions.interp",), + write_voxel_sizes=dict(field="woptions.vox",), ) inputs = Normalize12.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Normalize12_outputs(): output_map = dict( - deformation_field=dict(), - normalized_files=dict(), - normalized_image=dict(), + deformation_field=dict(), normalized_files=dict(), normalized_image=dict(), ) outputs = Normalize12.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py index 552ca0e701..cd5197602c 100644 --- a/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py @@ -4,59 +4,50 @@ def test_OneSampleTTestDesign_inputs(): input_map = dict( - covariates=dict(field='cov', ), - explicit_mask_file=dict( - extensions=None, - field='masking.em', - ), + covariates=dict(field="cov",), + explicit_mask_file=dict(extensions=None, field="masking.em",), global_calc_mean=dict( - field='globalc.g_mean', - xor=['global_calc_omit', 'global_calc_values'], + field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"], ), global_calc_omit=dict( - field='globalc.g_omit', - xor=['global_calc_mean', 'global_calc_values'], + field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"], ), global_calc_values=dict( - field='globalc.g_user.global_uval', - xor=['global_calc_mean', 'global_calc_omit'], - ), - global_normalization=dict(field='globalm.glonorm', ), - in_files=dict( - field='des.t1.scans', - mandatory=True, + field="globalc.g_user.global_uval", + xor=["global_calc_mean", "global_calc_omit"], ), + global_normalization=dict(field="globalm.glonorm",), + in_files=dict(field="des.t1.scans", mandatory=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - no_grand_mean_scaling=dict(field='globalm.gmsca.gmsca_no', ), + mfile=dict(usedefault=True,), + no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no",), paths=dict(), - spm_mat_dir=dict(field='dir', ), + spm_mat_dir=dict(field="dir",), threshold_mask_absolute=dict( - field='masking.tm.tma.athresh', - xor=['threshold_mask_none', 'threshold_mask_relative'], + field="masking.tm.tma.athresh", + xor=["threshold_mask_none", "threshold_mask_relative"], ), threshold_mask_none=dict( - field='masking.tm.tm_none', - xor=['threshold_mask_absolute', 'threshold_mask_relative'], + field="masking.tm.tm_none", + xor=["threshold_mask_absolute", "threshold_mask_relative"], ), threshold_mask_relative=dict( - field='masking.tm.tmr.rthresh', - xor=['threshold_mask_absolute', 'threshold_mask_none'], + field="masking.tm.tmr.rthresh", + xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict(field='masking.im', ), + use_implicit_threshold=dict(field="masking.im",), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = OneSampleTTestDesign.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_OneSampleTTestDesign_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None, ), ) + output_map = dict(spm_mat_file=dict(extensions=None,),) outputs = OneSampleTTestDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py index d8e9ef1615..bb516488ee 100644 --- a/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py @@ -4,61 +4,52 @@ def test_PairedTTestDesign_inputs(): input_map = dict( - ancova=dict(field='des.pt.ancova', ), - covariates=dict(field='cov', ), - explicit_mask_file=dict( - extensions=None, - field='masking.em', - ), + ancova=dict(field="des.pt.ancova",), + covariates=dict(field="cov",), + explicit_mask_file=dict(extensions=None, field="masking.em",), global_calc_mean=dict( - field='globalc.g_mean', - xor=['global_calc_omit', 'global_calc_values'], + field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"], ), global_calc_omit=dict( - field='globalc.g_omit', - xor=['global_calc_mean', 'global_calc_values'], + field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"], ), global_calc_values=dict( - field='globalc.g_user.global_uval', - xor=['global_calc_mean', 'global_calc_omit'], + field="globalc.g_user.global_uval", + xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict(field='globalm.glonorm', ), - grand_mean_scaling=dict(field='des.pt.gmsca', ), + global_normalization=dict(field="globalm.glonorm",), + grand_mean_scaling=dict(field="des.pt.gmsca",), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - no_grand_mean_scaling=dict(field='globalm.gmsca.gmsca_no', ), - paired_files=dict( - field='des.pt.pair', - mandatory=True, - ), + mfile=dict(usedefault=True,), + no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no",), + paired_files=dict(field="des.pt.pair", mandatory=True,), paths=dict(), - spm_mat_dir=dict(field='dir', ), + spm_mat_dir=dict(field="dir",), threshold_mask_absolute=dict( - field='masking.tm.tma.athresh', - xor=['threshold_mask_none', 'threshold_mask_relative'], + field="masking.tm.tma.athresh", + xor=["threshold_mask_none", "threshold_mask_relative"], ), threshold_mask_none=dict( - field='masking.tm.tm_none', - xor=['threshold_mask_absolute', 'threshold_mask_relative'], + field="masking.tm.tm_none", + xor=["threshold_mask_absolute", "threshold_mask_relative"], ), threshold_mask_relative=dict( - field='masking.tm.tmr.rthresh', - xor=['threshold_mask_absolute', 'threshold_mask_none'], + field="masking.tm.tmr.rthresh", + xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict(field='masking.im', ), + use_implicit_threshold=dict(field="masking.im",), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = PairedTTestDesign.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_PairedTTestDesign_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None, ), ) + output_map = dict(spm_mat_file=dict(extensions=None,),) outputs = PairedTTestDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Realign.py b/nipype/interfaces/spm/tests/test_auto_Realign.py index c65181e277..ab76f6a82d 100644 --- a/nipype/interfaces/spm/tests/test_auto_Realign.py +++ b/nipype/interfaces/spm/tests/test_auto_Realign.py @@ -4,52 +4,36 @@ def test_Realign_inputs(): input_map = dict( - fwhm=dict(field='eoptions.fwhm', ), - in_files=dict( - copyfile=True, - field='data', - mandatory=True, - ), - interp=dict(field='eoptions.interp', ), - jobtype=dict(usedefault=True, ), + fwhm=dict(field="eoptions.fwhm",), + in_files=dict(copyfile=True, field="data", mandatory=True,), + interp=dict(field="eoptions.interp",), + jobtype=dict(usedefault=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - out_prefix=dict( - field='roptions.prefix', - usedefault=True, - ), + mfile=dict(usedefault=True,), + out_prefix=dict(field="roptions.prefix", usedefault=True,), paths=dict(), - quality=dict(field='eoptions.quality', ), - register_to_mean=dict(field='eoptions.rtm', ), - separation=dict(field='eoptions.sep', ), + quality=dict(field="eoptions.quality",), + register_to_mean=dict(field="eoptions.rtm",), + separation=dict(field="eoptions.sep",), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), - weight_img=dict( - extensions=None, - field='eoptions.weight', - ), - wrap=dict(field='eoptions.wrap', ), - write_interp=dict(field='roptions.interp', ), - write_mask=dict(field='roptions.mask', ), - write_which=dict( - field='roptions.which', - maxlen=2, - minlen=2, - usedefault=True, - ), - write_wrap=dict(field='roptions.wrap', ), + use_v8struct=dict(min_ver="8", usedefault=True,), + weight_img=dict(extensions=None, field="eoptions.weight",), + wrap=dict(field="eoptions.wrap",), + write_interp=dict(field="roptions.interp",), + write_mask=dict(field="roptions.mask",), + write_which=dict(field="roptions.which", maxlen=2, minlen=2, usedefault=True,), + write_wrap=dict(field="roptions.wrap",), ) inputs = Realign.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Realign_outputs(): output_map = dict( - mean_image=dict(extensions=None, ), + mean_image=dict(extensions=None,), modified_in_files=dict(), realigned_files=dict(), realignment_parameters=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py b/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py index 85d0ba3d94..fd4e420423 100644 --- a/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py +++ b/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py @@ -4,79 +4,52 @@ def test_RealignUnwarp_inputs(): input_map = dict( - est_basis_func=dict(field='uweoptions.basfcn', ), - est_first_order_effects=dict(field='uweoptions.fot', ), - est_jacobian_deformations=dict(field='uweoptions.jm', ), + est_basis_func=dict(field="uweoptions.basfcn",), + est_first_order_effects=dict(field="uweoptions.fot",), + est_jacobian_deformations=dict(field="uweoptions.jm",), est_num_of_iterations=dict( - field='uweoptions.noi', - maxlen=1, - minlen=1, - usedefault=True, + field="uweoptions.noi", maxlen=1, minlen=1, usedefault=True, ), - est_re_est_mov_par=dict(field='uweoptions.rem', ), + est_re_est_mov_par=dict(field="uweoptions.rem",), est_reg_factor=dict( - field='uweoptions.lambda', - maxlen=1, - minlen=1, - usedefault=True, - ), - est_reg_order=dict(field='uweoptions.regorder', ), - est_second_order_effects=dict(field='uweoptions.sot', ), - est_taylor_expansion_point=dict( - field='uweoptions.expround', - usedefault=True, - ), - est_unwarp_fwhm=dict(field='uweoptions.uwfwhm', ), - fwhm=dict(field='eoptions.fwhm', ), - in_files=dict( - copyfile=True, - field='data.scans', - mandatory=True, - ), - interp=dict(field='eoptions.einterp', ), + field="uweoptions.lambda", maxlen=1, minlen=1, usedefault=True, + ), + est_reg_order=dict(field="uweoptions.regorder",), + est_second_order_effects=dict(field="uweoptions.sot",), + est_taylor_expansion_point=dict(field="uweoptions.expround", usedefault=True,), + est_unwarp_fwhm=dict(field="uweoptions.uwfwhm",), + fwhm=dict(field="eoptions.fwhm",), + in_files=dict(copyfile=True, field="data.scans", mandatory=True,), + interp=dict(field="eoptions.einterp",), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - out_prefix=dict( - field='uwroptions.prefix', - usedefault=True, - ), + mfile=dict(usedefault=True,), + out_prefix=dict(field="uwroptions.prefix", usedefault=True,), paths=dict(), - phase_map=dict( - copyfile=False, - extensions=None, - field='data.pmscan', - ), - quality=dict(field='eoptions.quality', ), - register_to_mean=dict(field='eoptions.rtm', ), - reslice_interp=dict(field='uwroptions.rinterp', ), - reslice_mask=dict(field='uwroptions.mask', ), + phase_map=dict(copyfile=False, extensions=None, field="data.pmscan",), + quality=dict(field="eoptions.quality",), + register_to_mean=dict(field="eoptions.rtm",), + reslice_interp=dict(field="uwroptions.rinterp",), + reslice_mask=dict(field="uwroptions.mask",), reslice_which=dict( - field='uwroptions.uwwhich', - maxlen=2, - minlen=2, - usedefault=True, + field="uwroptions.uwwhich", maxlen=2, minlen=2, usedefault=True, ), - reslice_wrap=dict(field='uwroptions.wrap', ), - separation=dict(field='eoptions.sep', ), + reslice_wrap=dict(field="uwroptions.wrap",), + separation=dict(field="eoptions.sep",), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), - weight_img=dict( - extensions=None, - field='eoptions.weight', - ), - wrap=dict(field='eoptions.ewrap', ), + use_v8struct=dict(min_ver="8", usedefault=True,), + weight_img=dict(extensions=None, field="eoptions.weight",), + wrap=dict(field="eoptions.ewrap",), ) inputs = RealignUnwarp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_RealignUnwarp_outputs(): output_map = dict( - mean_image=dict(extensions=None, ), + mean_image=dict(extensions=None,), modified_in_files=dict(), realigned_unwarped_files=dict(), realignment_parameters=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_Reslice.py b/nipype/interfaces/spm/tests/test_auto_Reslice.py index ca6a89ff67..46083f8192 100644 --- a/nipype/interfaces/spm/tests/test_auto_Reslice.py +++ b/nipype/interfaces/spm/tests/test_auto_Reslice.py @@ -4,32 +4,25 @@ def test_Reslice_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - interp=dict(usedefault=True, ), + in_file=dict(extensions=None, mandatory=True,), + interp=dict(usedefault=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - out_file=dict(extensions=None, ), + mfile=dict(usedefault=True,), + out_file=dict(extensions=None,), paths=dict(), - space_defining=dict( - extensions=None, - mandatory=True, - ), + space_defining=dict(extensions=None, mandatory=True,), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = Reslice.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Reslice_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Reslice.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py index f1ec8393ff..ebea9a0bf4 100644 --- a/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py +++ b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py @@ -4,33 +4,26 @@ def test_ResliceToReference_inputs(): input_map = dict( - bounding_box=dict(field='comp{2}.idbbvox.bb', ), - in_files=dict( - field='fnames', - mandatory=True, - ), - interpolation=dict(field='interp', ), + bounding_box=dict(field="comp{2}.idbbvox.bb",), + in_files=dict(field="fnames", mandatory=True,), + interpolation=dict(field="interp",), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), + mfile=dict(usedefault=True,), paths=dict(), - target=dict( - extensions=None, - field='comp{1}.id.space', - ), + target=dict(extensions=None, field="comp{1}.id.space",), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), - voxel_sizes=dict(field='comp{2}.idbbvox.vox', ), + use_v8struct=dict(min_ver="8", usedefault=True,), + voxel_sizes=dict(field="comp{2}.idbbvox.vox",), ) inputs = ResliceToReference.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ResliceToReference_outputs(): - output_map = dict(out_files=dict(), ) + output_map = dict(out_files=dict(),) outputs = ResliceToReference.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_SPMCommand.py b/nipype/interfaces/spm/tests/test_auto_SPMCommand.py index 32a6b28b5c..bde05ad1be 100644 --- a/nipype/interfaces/spm/tests/test_auto_SPMCommand.py +++ b/nipype/interfaces/spm/tests/test_auto_SPMCommand.py @@ -5,13 +5,10 @@ def test_SPMCommand_inputs(): input_map = dict( matlab_cmd=dict(), - mfile=dict(usedefault=True, ), + mfile=dict(usedefault=True,), paths=dict(), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = SPMCommand.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Segment.py b/nipype/interfaces/spm/tests/test_auto_Segment.py index c746cd258e..ea07881981 100644 --- a/nipype/interfaces/spm/tests/test_auto_Segment.py +++ b/nipype/interfaces/spm/tests/test_auto_Segment.py @@ -4,61 +4,51 @@ def test_Segment_inputs(): input_map = dict( - affine_regularization=dict(field='opts.regtype', ), - bias_fwhm=dict(field='opts.biasfwhm', ), - bias_regularization=dict(field='opts.biasreg', ), - clean_masks=dict(field='output.cleanup', ), - csf_output_type=dict(field='output.CSF', ), - data=dict( - copyfile=False, - field='data', - mandatory=True, - ), - gaussians_per_class=dict(field='opts.ngaus', ), - gm_output_type=dict(field='output.GM', ), - mask_image=dict( - extensions=None, - field='opts.msk', - ), + affine_regularization=dict(field="opts.regtype",), + bias_fwhm=dict(field="opts.biasfwhm",), + bias_regularization=dict(field="opts.biasreg",), + clean_masks=dict(field="output.cleanup",), + csf_output_type=dict(field="output.CSF",), + data=dict(copyfile=False, field="data", mandatory=True,), + gaussians_per_class=dict(field="opts.ngaus",), + gm_output_type=dict(field="output.GM",), + mask_image=dict(extensions=None, field="opts.msk",), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), + mfile=dict(usedefault=True,), paths=dict(), - sampling_distance=dict(field='opts.samp', ), - save_bias_corrected=dict(field='output.biascor', ), - tissue_prob_maps=dict(field='opts.tpm', ), + sampling_distance=dict(field="opts.samp",), + save_bias_corrected=dict(field="output.biascor",), + tissue_prob_maps=dict(field="opts.tpm",), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), - warp_frequency_cutoff=dict(field='opts.warpco', ), - warping_regularization=dict(field='opts.warpreg', ), - wm_output_type=dict(field='output.WM', ), + use_v8struct=dict(min_ver="8", usedefault=True,), + warp_frequency_cutoff=dict(field="opts.warpco",), + warping_regularization=dict(field="opts.warpreg",), + wm_output_type=dict(field="output.WM",), ) inputs = Segment.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Segment_outputs(): output_map = dict( - bias_corrected_image=dict(extensions=None, ), - inverse_transformation_mat=dict(extensions=None, ), - modulated_csf_image=dict(extensions=None, ), - modulated_gm_image=dict(extensions=None, ), + bias_corrected_image=dict(extensions=None,), + inverse_transformation_mat=dict(extensions=None,), + modulated_csf_image=dict(extensions=None,), + modulated_gm_image=dict(extensions=None,), modulated_input_image=dict( - deprecated='0.10', - extensions=None, - new_name='bias_corrected_image', + deprecated="0.10", extensions=None, new_name="bias_corrected_image", ), - modulated_wm_image=dict(extensions=None, ), - native_csf_image=dict(extensions=None, ), - native_gm_image=dict(extensions=None, ), - native_wm_image=dict(extensions=None, ), - normalized_csf_image=dict(extensions=None, ), - normalized_gm_image=dict(extensions=None, ), - normalized_wm_image=dict(extensions=None, ), - transformation_mat=dict(extensions=None, ), + modulated_wm_image=dict(extensions=None,), + native_csf_image=dict(extensions=None,), + native_gm_image=dict(extensions=None,), + native_wm_image=dict(extensions=None,), + normalized_csf_image=dict(extensions=None,), + normalized_gm_image=dict(extensions=None,), + normalized_wm_image=dict(extensions=None,), + transformation_mat=dict(extensions=None,), ) outputs = Segment.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_SliceTiming.py b/nipype/interfaces/spm/tests/test_auto_SliceTiming.py index 369392b5e9..8c99e4c4e3 100644 --- a/nipype/interfaces/spm/tests/test_auto_SliceTiming.py +++ b/nipype/interfaces/spm/tests/test_auto_SliceTiming.py @@ -4,51 +4,28 @@ def test_SliceTiming_inputs(): input_map = dict( - in_files=dict( - copyfile=False, - field='scans', - mandatory=True, - ), + in_files=dict(copyfile=False, field="scans", mandatory=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - num_slices=dict( - field='nslices', - mandatory=True, - ), - out_prefix=dict( - field='prefix', - usedefault=True, - ), + mfile=dict(usedefault=True,), + num_slices=dict(field="nslices", mandatory=True,), + out_prefix=dict(field="prefix", usedefault=True,), paths=dict(), - ref_slice=dict( - field='refslice', - mandatory=True, - ), - slice_order=dict( - field='so', - mandatory=True, - ), - time_acquisition=dict( - field='ta', - mandatory=True, - ), - time_repetition=dict( - field='tr', - mandatory=True, - ), + ref_slice=dict(field="refslice", mandatory=True,), + slice_order=dict(field="so", mandatory=True,), + time_acquisition=dict(field="ta", mandatory=True,), + time_repetition=dict(field="tr", mandatory=True,), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = SliceTiming.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SliceTiming_outputs(): - output_map = dict(timecorrected_files=dict(), ) + output_map = dict(timecorrected_files=dict(),) outputs = SliceTiming.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Smooth.py b/nipype/interfaces/spm/tests/test_auto_Smooth.py index b4e2d42b0f..4e0025a292 100644 --- a/nipype/interfaces/spm/tests/test_auto_Smooth.py +++ b/nipype/interfaces/spm/tests/test_auto_Smooth.py @@ -4,34 +4,26 @@ def test_Smooth_inputs(): input_map = dict( - data_type=dict(field='dtype', ), - fwhm=dict(field='fwhm', ), - implicit_masking=dict(field='im', ), - in_files=dict( - copyfile=False, - field='data', - mandatory=True, - ), + data_type=dict(field="dtype",), + fwhm=dict(field="fwhm",), + implicit_masking=dict(field="im",), + in_files=dict(copyfile=False, field="data", mandatory=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - out_prefix=dict( - field='prefix', - usedefault=True, - ), + mfile=dict(usedefault=True,), + out_prefix=dict(field="prefix", usedefault=True,), paths=dict(), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = Smooth.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Smooth_outputs(): - output_map = dict(smoothed_files=dict(), ) + output_map = dict(smoothed_files=dict(),) outputs = Smooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Threshold.py b/nipype/interfaces/spm/tests/test_auto_Threshold.py index 3f73cf91e0..75fcbf06e2 100644 --- a/nipype/interfaces/spm/tests/test_auto_Threshold.py +++ b/nipype/interfaces/spm/tests/test_auto_Threshold.py @@ -4,46 +4,37 @@ def test_Threshold_inputs(): input_map = dict( - contrast_index=dict(mandatory=True, ), - extent_fdr_p_threshold=dict(usedefault=True, ), - extent_threshold=dict(usedefault=True, ), - force_activation=dict(usedefault=True, ), - height_threshold=dict(usedefault=True, ), - height_threshold_type=dict(usedefault=True, ), + contrast_index=dict(mandatory=True,), + extent_fdr_p_threshold=dict(usedefault=True,), + extent_threshold=dict(usedefault=True,), + force_activation=dict(usedefault=True,), + height_threshold=dict(usedefault=True,), + height_threshold_type=dict(usedefault=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), + mfile=dict(usedefault=True,), paths=dict(), - spm_mat_file=dict( - copyfile=True, - extensions=None, - mandatory=True, - ), - stat_image=dict( - copyfile=False, - extensions=None, - mandatory=True, - ), - use_fwe_correction=dict(usedefault=True, ), + spm_mat_file=dict(copyfile=True, extensions=None, mandatory=True,), + stat_image=dict(copyfile=False, extensions=None, mandatory=True,), + use_fwe_correction=dict(usedefault=True,), use_mcr=dict(), - use_topo_fdr=dict(usedefault=True, ), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_topo_fdr=dict(usedefault=True,), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = Threshold.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Threshold_outputs(): output_map = dict( activation_forced=dict(), cluster_forming_thr=dict(), n_clusters=dict(), - pre_topo_fdr_map=dict(extensions=None, ), + pre_topo_fdr_map=dict(extensions=None,), pre_topo_n_clusters=dict(), - thresholded_map=dict(extensions=None, ), + thresholded_map=dict(extensions=None,), ) outputs = Threshold.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py index 33c40f26ce..c654be7b3d 100644 --- a/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py +++ b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py @@ -4,33 +4,24 @@ def test_ThresholdStatistics_inputs(): input_map = dict( - contrast_index=dict(mandatory=True, ), - extent_threshold=dict(usedefault=True, ), - height_threshold=dict(mandatory=True, ), + contrast_index=dict(mandatory=True,), + extent_threshold=dict(usedefault=True,), + height_threshold=dict(mandatory=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), + mfile=dict(usedefault=True,), paths=dict(), - spm_mat_file=dict( - copyfile=True, - extensions=None, - mandatory=True, - ), - stat_image=dict( - copyfile=False, - extensions=None, - mandatory=True, - ), + spm_mat_file=dict(copyfile=True, extensions=None, mandatory=True,), + stat_image=dict(copyfile=False, extensions=None, mandatory=True,), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = ThresholdStatistics.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ThresholdStatistics_outputs(): output_map = dict( clusterwise_P_FDR=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py index 30b99be080..fa0cc9e331 100644 --- a/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py @@ -4,65 +4,53 @@ def test_TwoSampleTTestDesign_inputs(): input_map = dict( - covariates=dict(field='cov', ), - dependent=dict(field='des.t2.dept', ), - explicit_mask_file=dict( - extensions=None, - field='masking.em', - ), + covariates=dict(field="cov",), + dependent=dict(field="des.t2.dept",), + explicit_mask_file=dict(extensions=None, field="masking.em",), global_calc_mean=dict( - field='globalc.g_mean', - xor=['global_calc_omit', 'global_calc_values'], + field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"], ), global_calc_omit=dict( - field='globalc.g_omit', - xor=['global_calc_mean', 'global_calc_values'], + field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"], ), global_calc_values=dict( - field='globalc.g_user.global_uval', - xor=['global_calc_mean', 'global_calc_omit'], - ), - global_normalization=dict(field='globalm.glonorm', ), - group1_files=dict( - field='des.t2.scans1', - mandatory=True, - ), - group2_files=dict( - field='des.t2.scans2', - mandatory=True, + field="globalc.g_user.global_uval", + xor=["global_calc_mean", "global_calc_omit"], ), + global_normalization=dict(field="globalm.glonorm",), + group1_files=dict(field="des.t2.scans1", mandatory=True,), + group2_files=dict(field="des.t2.scans2", mandatory=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - no_grand_mean_scaling=dict(field='globalm.gmsca.gmsca_no', ), + mfile=dict(usedefault=True,), + no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no",), paths=dict(), - spm_mat_dir=dict(field='dir', ), + spm_mat_dir=dict(field="dir",), threshold_mask_absolute=dict( - field='masking.tm.tma.athresh', - xor=['threshold_mask_none', 'threshold_mask_relative'], + field="masking.tm.tma.athresh", + xor=["threshold_mask_none", "threshold_mask_relative"], ), threshold_mask_none=dict( - field='masking.tm.tm_none', - xor=['threshold_mask_absolute', 'threshold_mask_relative'], + field="masking.tm.tm_none", + xor=["threshold_mask_absolute", "threshold_mask_relative"], ), threshold_mask_relative=dict( - field='masking.tm.tmr.rthresh', - xor=['threshold_mask_absolute', 'threshold_mask_none'], + field="masking.tm.tmr.rthresh", + xor=["threshold_mask_absolute", "threshold_mask_none"], ), - unequal_variance=dict(field='des.t2.variance', ), - use_implicit_threshold=dict(field='masking.im', ), + unequal_variance=dict(field="des.t2.variance",), + use_implicit_threshold=dict(field="masking.im",), use_mcr=dict(), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True,), ) inputs = TwoSampleTTestDesign.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_TwoSampleTTestDesign_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None, ), ) + output_map = dict(spm_mat_file=dict(extensions=None,),) outputs = TwoSampleTTestDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_VBMSegment.py b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py index 586b7b53e1..626ce9893f 100644 --- a/nipype/interfaces/spm/tests/test_auto_VBMSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py @@ -5,146 +5,72 @@ def test_VBMSegment_inputs(): input_map = dict( bias_corrected_affine=dict( - field='estwrite.output.bias.affine', - usedefault=True, + field="estwrite.output.bias.affine", usedefault=True, ), bias_corrected_native=dict( - field='estwrite.output.bias.native', - usedefault=True, + field="estwrite.output.bias.native", usedefault=True, ), bias_corrected_normalized=dict( - field='estwrite.output.bias.warped', - usedefault=True, - ), - bias_fwhm=dict( - field='estwrite.opts.biasfwhm', - usedefault=True, - ), - bias_regularization=dict( - field='estwrite.opts.biasreg', - usedefault=True, - ), - cleanup_partitions=dict( - field='estwrite.extopts.cleanup', - usedefault=True, - ), - csf_dartel=dict( - field='estwrite.output.CSF.dartel', - usedefault=True, + field="estwrite.output.bias.warped", usedefault=True, ), + bias_fwhm=dict(field="estwrite.opts.biasfwhm", usedefault=True,), + bias_regularization=dict(field="estwrite.opts.biasreg", usedefault=True,), + cleanup_partitions=dict(field="estwrite.extopts.cleanup", usedefault=True,), + csf_dartel=dict(field="estwrite.output.CSF.dartel", usedefault=True,), csf_modulated_normalized=dict( - field='estwrite.output.CSF.modulated', - usedefault=True, - ), - csf_native=dict( - field='estwrite.output.CSF.native', - usedefault=True, - ), - csf_normalized=dict( - field='estwrite.output.CSF.warped', - usedefault=True, + field="estwrite.output.CSF.modulated", usedefault=True, ), + csf_native=dict(field="estwrite.output.CSF.native", usedefault=True,), + csf_normalized=dict(field="estwrite.output.CSF.warped", usedefault=True,), dartel_template=dict( - extensions=['.hdr', '.img', '.img.gz', '.nii'], - field='estwrite.extopts.dartelwarp.normhigh.darteltpm', - ), - deformation_field=dict( - field='estwrite.output.warps', - usedefault=True, - ), - display_results=dict( - field='estwrite.extopts.print', - usedefault=True, - ), - gaussians_per_class=dict(usedefault=True, ), - gm_dartel=dict( - field='estwrite.output.GM.dartel', - usedefault=True, + extensions=[".hdr", ".img", ".img.gz", ".nii"], + field="estwrite.extopts.dartelwarp.normhigh.darteltpm", ), + deformation_field=dict(field="estwrite.output.warps", usedefault=True,), + display_results=dict(field="estwrite.extopts.print", usedefault=True,), + gaussians_per_class=dict(usedefault=True,), + gm_dartel=dict(field="estwrite.output.GM.dartel", usedefault=True,), gm_modulated_normalized=dict( - field='estwrite.output.GM.modulated', - usedefault=True, - ), - gm_native=dict( - field='estwrite.output.GM.native', - usedefault=True, - ), - gm_normalized=dict( - field='estwrite.output.GM.warped', - usedefault=True, - ), - in_files=dict( - copyfile=False, - field='estwrite.data', - mandatory=True, - ), - jacobian_determinant=dict( - field='estwrite.jacobian.warped', - usedefault=True, + field="estwrite.output.GM.modulated", usedefault=True, ), + gm_native=dict(field="estwrite.output.GM.native", usedefault=True,), + gm_normalized=dict(field="estwrite.output.GM.warped", usedefault=True,), + in_files=dict(copyfile=False, field="estwrite.data", mandatory=True,), + jacobian_determinant=dict(field="estwrite.jacobian.warped", usedefault=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True, ), - mrf_weighting=dict( - field='estwrite.extopts.mrf', - usedefault=True, - ), + mfile=dict(usedefault=True,), + mrf_weighting=dict(field="estwrite.extopts.mrf", usedefault=True,), paths=dict(), - pve_label_dartel=dict( - field='estwrite.output.label.dartel', - usedefault=True, - ), - pve_label_native=dict( - field='estwrite.output.label.native', - usedefault=True, - ), + pve_label_dartel=dict(field="estwrite.output.label.dartel", usedefault=True,), + pve_label_native=dict(field="estwrite.output.label.native", usedefault=True,), pve_label_normalized=dict( - field='estwrite.output.label.warped', - usedefault=True, - ), - sampling_distance=dict( - field='estwrite.opts.samp', - usedefault=True, + field="estwrite.output.label.warped", usedefault=True, ), - spatial_normalization=dict(usedefault=True, ), + sampling_distance=dict(field="estwrite.opts.samp", usedefault=True,), + spatial_normalization=dict(usedefault=True,), tissues=dict( - extensions=['.hdr', '.img', '.img.gz', '.nii'], - field='estwrite.tpm', + extensions=[".hdr", ".img", ".img.gz", ".nii"], field="estwrite.tpm", ), use_mcr=dict(), use_sanlm_denoising_filter=dict( - field='estwrite.extopts.sanlm', - usedefault=True, - ), - use_v8struct=dict( - min_ver='8', - usedefault=True, - ), - warping_regularization=dict( - field='estwrite.opts.warpreg', - usedefault=True, - ), - wm_dartel=dict( - field='estwrite.output.WM.dartel', - usedefault=True, + field="estwrite.extopts.sanlm", usedefault=True, ), + use_v8struct=dict(min_ver="8", usedefault=True,), + warping_regularization=dict(field="estwrite.opts.warpreg", usedefault=True,), + wm_dartel=dict(field="estwrite.output.WM.dartel", usedefault=True,), wm_modulated_normalized=dict( - field='estwrite.output.WM.modulated', - usedefault=True, - ), - wm_native=dict( - field='estwrite.output.WM.native', - usedefault=True, - ), - wm_normalized=dict( - field='estwrite.output.WM.warped', - usedefault=True, + field="estwrite.output.WM.modulated", usedefault=True, ), + wm_native=dict(field="estwrite.output.WM.native", usedefault=True,), + wm_normalized=dict(field="estwrite.output.WM.warped", usedefault=True,), ) inputs = VBMSegment.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_VBMSegment_outputs(): output_map = dict( bias_corrected_images=dict(), diff --git a/nipype/interfaces/spm/tests/test_base.py b/nipype/interfaces/spm/tests/test_base.py index c59cd6b234..c2c991d742 100644 --- a/nipype/interfaces/spm/tests/test_base.py +++ b/nipype/interfaces/spm/tests/test_base.py @@ -13,7 +13,7 @@ from nipype.interfaces.spm.base import SPMCommandInputSpec from nipype.interfaces.base import traits -mlab.MatlabCommand.set_default_matlab_cmd(os.getenv('MATLABCMD', 'matlab')) +mlab.MatlabCommand.set_default_matlab_cmd(os.getenv("MATLABCMD", "matlab")) def test_scan_for_fnames(create_files_in_directory): @@ -31,7 +31,7 @@ def test_spm_path(): spm_path = spm.Info.path() if spm_path is not None: assert isinstance(spm_path, (str, bytes)) - assert 'spm' in spm_path.lower() + assert "spm" in spm_path.lower() def test_use_mfile(): @@ -49,7 +49,7 @@ class TestClass(spm.SPMCommand): pass # test without FORCE_SPMMCR, SPMMCRCMD set - for varname in ['FORCE_SPMMCR', 'SPMMCRCMD']: + for varname in ["FORCE_SPMMCR", "SPMMCRCMD"]: try: del os.environ[varname] except KeyError: @@ -58,15 +58,15 @@ class TestClass(spm.SPMCommand): assert dc._use_mcr is None assert dc._matlab_cmd is None # test with only FORCE_SPMMCR set - os.environ['FORCE_SPMMCR'] = '1' + os.environ["FORCE_SPMMCR"] = "1" dc = TestClass() assert dc._use_mcr assert dc._matlab_cmd is None # test with both, FORCE_SPMMCR and SPMMCRCMD set - os.environ['SPMMCRCMD'] = 'spmcmd' + os.environ["SPMMCRCMD"] = "spmcmd" dc = TestClass() assert dc._use_mcr - assert dc._matlab_cmd == 'spmcmd' + assert dc._matlab_cmd == "spmcmd" # restore environment os.environ.clear() os.environ.update(saved_env) @@ -78,19 +78,19 @@ class TestClass(spm.SPMCommand): input_spec = spm.SPMCommandInputSpec dc = TestClass() # dc = derived_class - dc.inputs.matlab_cmd = 'foo' - assert dc.mlab._cmd == 'foo' + dc.inputs.matlab_cmd = "foo" + assert dc.mlab._cmd == "foo" def test_cmd_update2(): class TestClass(spm.SPMCommand): - _jobtype = 'jobtype' - _jobname = 'jobname' + _jobtype = "jobtype" + _jobname = "jobname" input_spec = spm.SPMCommandInputSpec dc = TestClass() # dc = derived_class - assert dc.jobtype == 'jobtype' - assert dc.jobname == 'jobname' + assert dc.jobtype == "jobtype" + assert dc.jobname == "jobname" def test_reformat_dict_for_savemat(): @@ -98,8 +98,8 @@ class TestClass(spm.SPMCommand): input_spec = spm.SPMCommandInputSpec dc = TestClass() # dc = derived_class - out = dc._reformat_dict_for_savemat({'a': {'b': {'c': []}}}) - assert out == [{'a': [{'b': [{'c': []}]}]}] + out = dc._reformat_dict_for_savemat({"a": {"b": {"c": []}}}) + assert out == [{"a": [{"b": [{"c": []}]}]}] def test_generate_job(create_files_in_directory): @@ -108,58 +108,60 @@ class TestClass(spm.SPMCommand): dc = TestClass() # dc = derived_class out = dc._generate_job() - assert out == '' + assert out == "" # struct array - contents = {'contents': [1, 2, 3, 4]} + contents = {"contents": [1, 2, 3, 4]} out = dc._generate_job(contents=contents) - assert out == ('.contents(1) = 1;\n.contents(2) = 2;' - '\n.contents(3) = 3;\n.contents(4) = 4;\n') + assert out == ( + ".contents(1) = 1;\n.contents(2) = 2;" + "\n.contents(3) = 3;\n.contents(4) = 4;\n" + ) # cell array of strings filelist, outdir = create_files_in_directory names = spm.scans_for_fnames(filelist, keep4d=True) - contents = {'files': names} - out = dc._generate_job(prefix='test', contents=contents) + contents = {"files": names} + out = dc._generate_job(prefix="test", contents=contents) assert out == "test.files = {...\n'a.nii';...\n'b.nii';...\n};\n" # string assignment - contents = 'foo' - out = dc._generate_job(prefix='test', contents=contents) + contents = "foo" + out = dc._generate_job(prefix="test", contents=contents) assert out == "test = 'foo';\n" # cell array of vectors - contents = {'onsets': np.array((1, ), dtype=object)} - contents['onsets'][0] = [1, 2, 3, 4] - out = dc._generate_job(prefix='test', contents=contents) - assert out == 'test.onsets = {...\n[1, 2, 3, 4];...\n};\n' + contents = {"onsets": np.array((1,), dtype=object)} + contents["onsets"][0] = [1, 2, 3, 4] + out = dc._generate_job(prefix="test", contents=contents) + assert out == "test.onsets = {...\n[1, 2, 3, 4];...\n};\n" def test_bool(): class TestClassInputSpec(SPMCommandInputSpec): - test_in = include_intercept = traits.Bool(field='testfield') + test_in = include_intercept = traits.Bool(field="testfield") class TestClass(spm.SPMCommand): input_spec = TestClassInputSpec - _jobtype = 'jobtype' - _jobname = 'jobname' + _jobtype = "jobtype" + _jobname = "jobname" dc = TestClass() # dc = derived_class dc.inputs.test_in = True out = dc._make_matlab_command(dc._parse_inputs()) - assert out.find('jobs{1}.spm.jobtype.jobname.testfield = 1;') > 0, 1 + assert out.find("jobs{1}.spm.jobtype.jobname.testfield = 1;") > 0, 1 dc.inputs.use_v8struct = False out = dc._make_matlab_command(dc._parse_inputs()) - assert out.find('jobs{1}.jobtype{1}.jobname{1}.testfield = 1;') > 0, 1 + assert out.find("jobs{1}.jobtype{1}.jobname{1}.testfield = 1;") > 0, 1 def test_make_matlab_command(create_files_in_directory): class TestClass(spm.SPMCommand): - _jobtype = 'jobtype' - _jobname = 'jobname' + _jobtype = "jobtype" + _jobname = "jobname" input_spec = spm.SPMCommandInputSpec dc = TestClass() # dc = derived_class filelist, outdir = create_files_in_directory - contents = {'contents': [1, 2, 3, 4]} + contents = {"contents": [1, 2, 3, 4]} script = dc._make_matlab_command([contents]) - assert 'jobs{1}.spm.jobtype.jobname.contents(3) = 3;' in script + assert "jobs{1}.spm.jobtype.jobname.contents(3) = 3;" in script dc.inputs.use_v8struct = False script = dc._make_matlab_command([contents]) - assert 'jobs{1}.jobtype{1}.jobname{1}.contents(3) = 3;' in script + assert "jobs{1}.jobtype{1}.jobname{1}.contents(3) = 3;" in script diff --git a/nipype/interfaces/spm/tests/test_model.py b/nipype/interfaces/spm/tests/test_model.py index a9cb957944..a960d06fb8 100644 --- a/nipype/interfaces/spm/tests/test_model.py +++ b/nipype/interfaces/spm/tests/test_model.py @@ -6,39 +6,39 @@ import nipype.interfaces.spm.model as spm import nipype.interfaces.matlab as mlab -mlab.MatlabCommand.set_default_matlab_cmd(os.getenv('MATLABCMD', 'matlab')) +mlab.MatlabCommand.set_default_matlab_cmd(os.getenv("MATLABCMD", "matlab")) def test_level1design(): - assert spm.Level1Design._jobtype == 'stats' - assert spm.Level1Design._jobname == 'fmri_spec' + assert spm.Level1Design._jobtype == "stats" + assert spm.Level1Design._jobname == "fmri_spec" def test_estimatemodel(): - assert spm.EstimateModel._jobtype == 'stats' - assert spm.EstimateModel._jobname == 'fmri_est' + assert spm.EstimateModel._jobtype == "stats" + assert spm.EstimateModel._jobname == "fmri_est" def test_estimatecontrast(): - assert spm.EstimateContrast._jobtype == 'stats' - assert spm.EstimateContrast._jobname == 'con' + assert spm.EstimateContrast._jobtype == "stats" + assert spm.EstimateContrast._jobname == "con" def test_threshold(): - assert spm.Threshold._jobtype == 'basetype' - assert spm.Threshold._jobname == 'basename' + assert spm.Threshold._jobtype == "basetype" + assert spm.Threshold._jobname == "basename" def test_factorialdesign(): - assert spm.FactorialDesign._jobtype == 'stats' - assert spm.FactorialDesign._jobname == 'factorial_design' + assert spm.FactorialDesign._jobtype == "stats" + assert spm.FactorialDesign._jobname == "factorial_design" def test_onesamplettestdesign(): - assert spm.OneSampleTTestDesign._jobtype == 'stats' - assert spm.OneSampleTTestDesign._jobname == 'factorial_design' + assert spm.OneSampleTTestDesign._jobtype == "stats" + assert spm.OneSampleTTestDesign._jobname == "factorial_design" def test_twosamplettestdesign(): - assert spm.TwoSampleTTestDesign._jobtype == 'stats' - assert spm.TwoSampleTTestDesign._jobname == 'factorial_design' + assert spm.TwoSampleTTestDesign._jobtype == "stats" + assert spm.TwoSampleTTestDesign._jobname == "factorial_design" diff --git a/nipype/interfaces/spm/tests/test_preprocess.py b/nipype/interfaces/spm/tests/test_preprocess.py index 2b70b7bb54..de5c79caba 100644 --- a/nipype/interfaces/spm/tests/test_preprocess.py +++ b/nipype/interfaces/spm/tests/test_preprocess.py @@ -10,107 +10,106 @@ from nipype.interfaces.spm import no_spm import nipype.interfaces.matlab as mlab -mlab.MatlabCommand.set_default_matlab_cmd(os.getenv('MATLABCMD', 'matlab')) +mlab.MatlabCommand.set_default_matlab_cmd(os.getenv("MATLABCMD", "matlab")) def test_slicetiming(): - assert spm.SliceTiming._jobtype == 'temporal' - assert spm.SliceTiming._jobname == 'st' + assert spm.SliceTiming._jobtype == "temporal" + assert spm.SliceTiming._jobname == "st" def test_slicetiming_list_outputs(create_files_in_directory): filelist, outdir = create_files_in_directory st = spm.SliceTiming(in_files=filelist[0]) - assert st._list_outputs()['timecorrected_files'][0][0] == 'a' + assert st._list_outputs()["timecorrected_files"][0][0] == "a" def test_realign(): - assert spm.Realign._jobtype == 'spatial' - assert spm.Realign._jobname == 'realign' - assert spm.Realign().inputs.jobtype == 'estwrite' + assert spm.Realign._jobtype == "spatial" + assert spm.Realign._jobname == "realign" + assert spm.Realign().inputs.jobtype == "estwrite" def test_realign_list_outputs(create_files_in_directory): filelist, outdir = create_files_in_directory rlgn = spm.Realign(in_files=filelist[0]) - assert rlgn._list_outputs()['realignment_parameters'][0].startswith('rp_') - assert rlgn._list_outputs()['realigned_files'][0].startswith('r') - assert rlgn._list_outputs()['mean_image'].startswith('mean') + assert rlgn._list_outputs()["realignment_parameters"][0].startswith("rp_") + assert rlgn._list_outputs()["realigned_files"][0].startswith("r") + assert rlgn._list_outputs()["mean_image"].startswith("mean") def test_coregister(): - assert spm.Coregister._jobtype == 'spatial' - assert spm.Coregister._jobname == 'coreg' - assert spm.Coregister().inputs.jobtype == 'estwrite' + assert spm.Coregister._jobtype == "spatial" + assert spm.Coregister._jobname == "coreg" + assert spm.Coregister().inputs.jobtype == "estwrite" def test_coregister_list_outputs(create_files_in_directory): filelist, outdir = create_files_in_directory coreg = spm.Coregister(source=filelist[0]) - assert coreg._list_outputs()['coregistered_source'][0].startswith('r') + assert coreg._list_outputs()["coregistered_source"][0].startswith("r") coreg = spm.Coregister(source=filelist[0], apply_to_files=filelist[1]) - assert coreg._list_outputs()['coregistered_files'][0].startswith('r') + assert coreg._list_outputs()["coregistered_files"][0].startswith("r") def test_normalize(): - assert spm.Normalize._jobtype == 'spatial' - assert spm.Normalize._jobname == 'normalise' - assert spm.Normalize().inputs.jobtype == 'estwrite' + assert spm.Normalize._jobtype == "spatial" + assert spm.Normalize._jobname == "normalise" + assert spm.Normalize().inputs.jobtype == "estwrite" def test_normalize_list_outputs(create_files_in_directory): filelist, outdir = create_files_in_directory norm = spm.Normalize(source=filelist[0]) - assert norm._list_outputs()['normalized_source'][0].startswith('w') + assert norm._list_outputs()["normalized_source"][0].startswith("w") norm = spm.Normalize(source=filelist[0], apply_to_files=filelist[1]) - assert norm._list_outputs()['normalized_files'][0].startswith('w') + assert norm._list_outputs()["normalized_files"][0].startswith("w") def test_normalize12(): - assert spm.Normalize12._jobtype == 'spatial' - assert spm.Normalize12._jobname == 'normalise' - assert spm.Normalize12().inputs.jobtype == 'estwrite' + assert spm.Normalize12._jobtype == "spatial" + assert spm.Normalize12._jobname == "normalise" + assert spm.Normalize12().inputs.jobtype == "estwrite" def test_normalize12_list_outputs(create_files_in_directory): filelist, outdir = create_files_in_directory norm12 = spm.Normalize12(image_to_align=filelist[0]) - assert norm12._list_outputs()['normalized_image'][0].startswith('w') - norm12 = spm.Normalize12( - image_to_align=filelist[0], apply_to_files=filelist[1]) - assert norm12._list_outputs()['normalized_files'][0].startswith('w') + assert norm12._list_outputs()["normalized_image"][0].startswith("w") + norm12 = spm.Normalize12(image_to_align=filelist[0], apply_to_files=filelist[1]) + assert norm12._list_outputs()["normalized_files"][0].startswith("w") @pytest.mark.skipif(no_spm(), reason="spm is not installed") def test_segment(): if spm.Info.name() == "SPM12": - assert spm.Segment()._jobtype == 'tools' - assert spm.Segment()._jobname == 'oldseg' + assert spm.Segment()._jobtype == "tools" + assert spm.Segment()._jobname == "oldseg" else: - assert spm.Segment()._jobtype == 'spatial' - assert spm.Segment()._jobname == 'preproc' + assert spm.Segment()._jobtype == "spatial" + assert spm.Segment()._jobname == "preproc" @pytest.mark.skipif(no_spm(), reason="spm is not installed") def test_newsegment(): if spm.Info.name() == "SPM12": - assert spm.NewSegment()._jobtype == 'spatial' - assert spm.NewSegment()._jobname == 'preproc' + assert spm.NewSegment()._jobtype == "spatial" + assert spm.NewSegment()._jobname == "preproc" else: - assert spm.NewSegment()._jobtype == 'tools' - assert spm.NewSegment()._jobname == 'preproc8' + assert spm.NewSegment()._jobtype == "tools" + assert spm.NewSegment()._jobname == "preproc8" def test_smooth(): - assert spm.Smooth._jobtype == 'spatial' - assert spm.Smooth._jobname == 'smooth' + assert spm.Smooth._jobtype == "spatial" + assert spm.Smooth._jobname == "smooth" def test_dartel(): - assert spm.DARTEL._jobtype == 'tools' - assert spm.DARTEL._jobname == 'dartel' + assert spm.DARTEL._jobtype == "tools" + assert spm.DARTEL._jobname == "dartel" def test_dartelnorm2mni(): - assert spm.DARTELNorm2MNI._jobtype == 'tools' - assert spm.DARTELNorm2MNI._jobname == 'dartel' + assert spm.DARTELNorm2MNI._jobtype == "tools" + assert spm.DARTELNorm2MNI._jobname == "dartel" diff --git a/nipype/interfaces/spm/tests/test_utils.py b/nipype/interfaces/spm/tests/test_utils.py index a574fb90a7..1afc887b06 100644 --- a/nipype/interfaces/spm/tests/test_utils.py +++ b/nipype/interfaces/spm/tests/test_utils.py @@ -11,73 +11,73 @@ def test_coreg(): - moving = example_data(infile='functional.nii') - target = example_data(infile='T1.nii') - mat = example_data(infile='trans.mat') - coreg = spmu.CalcCoregAffine(matlab_cmd='mymatlab') + moving = example_data(infile="functional.nii") + target = example_data(infile="T1.nii") + mat = example_data(infile="trans.mat") + coreg = spmu.CalcCoregAffine(matlab_cmd="mymatlab") coreg.inputs.target = target - assert coreg.inputs.matlab_cmd == 'mymatlab' + assert coreg.inputs.matlab_cmd == "mymatlab" coreg.inputs.moving = moving assert not isdefined(coreg.inputs.mat) pth, mov, _ = split_filename(moving) _, tgt, _ = split_filename(target) - mat = os.path.join(pth, '%s_to_%s.mat' % (mov, tgt)) - invmat = fname_presuffix(mat, prefix='inverse_') + mat = os.path.join(pth, "%s_to_%s.mat" % (mov, tgt)) + invmat = fname_presuffix(mat, prefix="inverse_") scrpt = coreg._make_matlab_command(None) assert coreg.inputs.mat == mat assert coreg.inputs.invmat == invmat def test_apply_transform(): - moving = example_data(infile='functional.nii') - mat = example_data(infile='trans.mat') - applymat = spmu.ApplyTransform(matlab_cmd='mymatlab') - assert applymat.inputs.matlab_cmd == 'mymatlab' + moving = example_data(infile="functional.nii") + mat = example_data(infile="trans.mat") + applymat = spmu.ApplyTransform(matlab_cmd="mymatlab") + assert applymat.inputs.matlab_cmd == "mymatlab" applymat.inputs.in_file = moving applymat.inputs.mat = mat scrpt = applymat._make_matlab_command(None) - expected = '[p n e v] = spm_fileparts(V.fname);' + expected = "[p n e v] = spm_fileparts(V.fname);" assert expected in scrpt - expected = 'V.mat = transform.M * V.mat;' + expected = "V.mat = transform.M * V.mat;" assert expected in scrpt def test_reslice(): - moving = example_data(infile='functional.nii') - space_defining = example_data(infile='T1.nii') - reslice = spmu.Reslice(matlab_cmd='mymatlab_version') - assert reslice.inputs.matlab_cmd == 'mymatlab_version' + moving = example_data(infile="functional.nii") + space_defining = example_data(infile="T1.nii") + reslice = spmu.Reslice(matlab_cmd="mymatlab_version") + assert reslice.inputs.matlab_cmd == "mymatlab_version" reslice.inputs.in_file = moving reslice.inputs.space_defining = space_defining assert reslice.inputs.interp == 0 with pytest.raises(TraitError): - reslice.inputs.trait_set(interp='nearest') + reslice.inputs.trait_set(interp="nearest") with pytest.raises(TraitError): reslice.inputs.trait_set(interp=10) reslice.inputs.interp = 1 script = reslice._make_matlab_command(None) - outfile = fname_presuffix(moving, prefix='r') + outfile = fname_presuffix(moving, prefix="r") assert reslice.inputs.out_file == outfile - expected = '\nflags.mean=0;\nflags.which=1;\nflags.mask=0;' - assert expected in script.replace(' ', '') - expected_interp = 'flags.interp = 1;\n' + expected = "\nflags.mean=0;\nflags.which=1;\nflags.mask=0;" + assert expected in script.replace(" ", "") + expected_interp = "flags.interp = 1;\n" assert expected_interp in script - assert 'spm_reslice(invols, flags);' in script + assert "spm_reslice(invols, flags);" in script def test_dicom_import(): - dicom = example_data(infile='dicomdir/123456-1-1.dcm') - di = spmu.DicomImport(matlab_cmd='mymatlab') - assert di.inputs.matlab_cmd == 'mymatlab' - assert di.inputs.output_dir_struct == 'flat' - assert di.inputs.output_dir == './converted_dicom' - assert di.inputs.format == 'nii' + dicom = example_data(infile="dicomdir/123456-1-1.dcm") + di = spmu.DicomImport(matlab_cmd="mymatlab") + assert di.inputs.matlab_cmd == "mymatlab" + assert di.inputs.output_dir_struct == "flat" + assert di.inputs.output_dir == "./converted_dicom" + assert di.inputs.format == "nii" assert not di.inputs.icedims with pytest.raises(TraitError): - di.inputs.trait_set(output_dir_struct='wrong') + di.inputs.trait_set(output_dir_struct="wrong") with pytest.raises(TraitError): - di.inputs.trait_set(format='FAT') + di.inputs.trait_set(format="FAT") with pytest.raises(TraitError): - di.inputs.trait_set(in_files=['does_sfd_not_32fn_exist.dcm']) + di.inputs.trait_set(in_files=["does_sfd_not_32fn_exist.dcm"]) di.inputs.in_files = [dicom] assert di.inputs.in_files == [dicom] diff --git a/nipype/interfaces/spm/utils.py b/nipype/interfaces/spm/utils.py index 919b5853ab..99e3d57d3b 100644 --- a/nipype/interfaces/spm/utils.py +++ b/nipype/interfaces/spm/utils.py @@ -5,12 +5,14 @@ import os import numpy as np -from ...utils.filemanip import (split_filename, fname_presuffix, - ensure_list, simplify_list) -from ..base import (TraitedSpec, isdefined, File, traits, OutputMultiPath, - InputMultiPath) -from .base import (SPMCommandInputSpec, SPMCommand, scans_for_fnames, - scans_for_fname) +from ...utils.filemanip import ( + split_filename, + fname_presuffix, + ensure_list, + simplify_list, +) +from ..base import TraitedSpec, isdefined, File, traits, OutputMultiPath, InputMultiPath +from .base import SPMCommandInputSpec, SPMCommand, scans_for_fnames, scans_for_fname class Analyze2niiInputSpec(SPMCommandInputSpec): @@ -38,28 +40,27 @@ def _make_matlab_command(self, _): def _list_outputs(self): outputs = self._outputs().get() - outputs['nifti_file'] = self.output_name + outputs["nifti_file"] = self.output_name return outputs class CalcCoregAffineInputSpec(SPMCommandInputSpec): target = File( - exists=True, - mandatory=True, - desc='target for generating affine transform') + exists=True, mandatory=True, desc="target for generating affine transform" + ) moving = File( exists=True, mandatory=True, copyfile=False, - desc=('volume transform can be applied to register with ' - 'target')) - mat = File(desc='Filename used to store affine matrix') - invmat = File(desc='Filename used to store inverse affine matrix') + desc=("volume transform can be applied to register with " "target"), + ) + mat = File(desc="Filename used to store affine matrix") + invmat = File(desc="Filename used to store inverse affine matrix") class CalcCoregAffineOutputSpec(TraitedSpec): - mat = File(exists=True, desc='Matlab file holding transform') - invmat = File(desc='Matlab file holding inverse transform') + mat = File(exists=True, desc="Matlab file holding transform") + invmat = File(desc="Matlab file holding inverse transform") class CalcCoregAffine(SPMCommand): @@ -90,14 +91,14 @@ class CalcCoregAffine(SPMCommand): def _make_inv_file(self): """ makes filename to hold inverse transform if not specified""" - invmat = fname_presuffix(self.inputs.mat, prefix='inverse_') + invmat = fname_presuffix(self.inputs.mat, prefix="inverse_") return invmat def _make_mat_file(self): """ makes name for matfile if doesn exist""" pth, mv, _ = split_filename(self.inputs.moving) _, tgt, _ = split_filename(self.inputs.target) - mat = os.path.join(pth, '%s_to_%s.mat' % (mv, tgt)) + mat = os.path.join(pth, "%s_to_%s.mat" % (mv, tgt)) return mat def _make_matlab_command(self, _): @@ -116,14 +117,18 @@ def _make_matlab_command(self, _): save('%s' , 'M' ); M = inv(M); save('%s','M') - """ % (self.inputs.target, self.inputs.moving, self.inputs.mat, - self.inputs.invmat) + """ % ( + self.inputs.target, + self.inputs.moving, + self.inputs.mat, + self.inputs.invmat, + ) return script def _list_outputs(self): outputs = self._outputs().get() - outputs['mat'] = os.path.abspath(self.inputs.mat) - outputs['invmat'] = os.path.abspath(self.inputs.invmat) + outputs["mat"] = os.path.abspath(self.inputs.mat) + outputs["invmat"] = os.path.abspath(self.inputs.invmat) return outputs @@ -132,14 +137,14 @@ class ApplyTransformInputSpec(SPMCommandInputSpec): exists=True, mandatory=True, copyfile=True, - desc='file to apply transform to, (only updates header)') - mat = File( - exists=True, mandatory=True, desc='file holding transform to apply') + desc="file to apply transform to, (only updates header)", + ) + mat = File(exists=True, mandatory=True, desc="file holding transform to apply") out_file = File(desc="output file name for transformed data", genfile=True) class ApplyTransformOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='Transformed image file') + out_file = File(exists=True, desc="Transformed image file") class ApplyTransform(SPMCommand): @@ -155,13 +160,14 @@ class ApplyTransform(SPMCommand): >>> applymat.run() # doctest: +SKIP """ + input_spec = ApplyTransformInputSpec output_spec = ApplyTransformOutputSpec def _make_matlab_command(self, _): """checks for SPM, generates script""" outputs = self._list_outputs() - self.inputs.out_file = outputs['out_file'] + self.inputs.out_file = outputs["out_file"] script = """ infile = '%s'; outfile = '%s' @@ -174,7 +180,11 @@ def _make_matlab_command(self, _): V.fname = fullfile(outfile); spm_write_vol(V,X); - """ % (self.inputs.in_file, self.inputs.out_file, self.inputs.mat) + """ % ( + self.inputs.in_file, + self.inputs.out_file, + self.inputs.mat, + ) # img_space = spm_get_space(infile); # spm_get_space(infile, transform.M * img_space); return script @@ -182,38 +192,39 @@ def _make_matlab_command(self, _): def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.out_file): - outputs['out_file'] = os.path.abspath(self._gen_outfilename()) + outputs["out_file"] = os.path.abspath(self._gen_outfilename()) else: - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) - return name + '_trans.nii' + return name + "_trans.nii" class ResliceInputSpec(SPMCommandInputSpec): in_file = File( exists=True, mandatory=True, - desc='file to apply transform to, (only updates header)') + desc="file to apply transform to, (only updates header)", + ) space_defining = File( - exists=True, - mandatory=True, - desc='Volume defining space to slice in_file into') + exists=True, mandatory=True, desc="Volume defining space to slice in_file into" + ) interp = traits.Range( low=0, high=7, usedefault=True, - desc='degree of b-spline used for interpolation' - '0 is nearest neighbor (default)') + desc="degree of b-spline used for interpolation" + "0 is nearest neighbor (default)", + ) - out_file = File(desc='Optional file to save resliced volume') + out_file = File(desc="Optional file to save resliced volume") class ResliceOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='resliced volume') + out_file = File(exists=True, desc="resliced volume") class Reslice(SPMCommand): @@ -225,8 +236,7 @@ class Reslice(SPMCommand): def _make_matlab_command(self, _): """ generates script""" if not isdefined(self.inputs.out_file): - self.inputs.out_file = fname_presuffix( - self.inputs.in_file, prefix='r') + self.inputs.out_file = fname_presuffix(self.inputs.in_file, prefix="r") script = """ flags.mean = 0; flags.which = 1; @@ -235,13 +245,16 @@ def _make_matlab_command(self, _): infiles = strvcat(\'%s\', \'%s\'); invols = spm_vol(infiles); spm_reslice(invols, flags); - """ % (self.inputs.interp, self.inputs.space_defining, - self.inputs.in_file) + """ % ( + self.inputs.interp, + self.inputs.space_defining, + self.inputs.in_file, + ) return script def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs["out_file"] = os.path.abspath(self.inputs.out_file) return outputs @@ -249,44 +262,46 @@ class ApplyInverseDeformationInput(SPMCommandInputSpec): in_files = InputMultiPath( File(exists=True), mandatory=True, - field='fnames', - desc='Files on which deformation is applied') + field="fnames", + desc="Files on which deformation is applied", + ) target = File( - exists=True, - field='comp{1}.inv.space', - desc='File defining target space') + exists=True, field="comp{1}.inv.space", desc="File defining target space" + ) deformation = File( exists=True, - field='comp{1}.inv.comp{1}.sn2def.matname', - desc='SN SPM deformation file', - xor=['deformation_field']) + field="comp{1}.inv.comp{1}.sn2def.matname", + desc="SN SPM deformation file", + xor=["deformation_field"], + ) deformation_field = File( exists=True, - field='comp{1}.inv.comp{1}.def', - desc='SN SPM deformation file', - xor=['deformation']) + field="comp{1}.inv.comp{1}.def", + desc="SN SPM deformation file", + xor=["deformation"], + ) interpolation = traits.Range( - low=0, - high=7, - field='interp', - desc='degree of b-spline used for interpolation') + low=0, high=7, field="interp", desc="degree of b-spline used for interpolation" + ) bounding_box = traits.List( traits.Float(), - field='comp{1}.inv.comp{1}.sn2def.bb', + field="comp{1}.inv.comp{1}.sn2def.bb", minlen=6, maxlen=6, - desc='6-element list (opt)') + desc="6-element list (opt)", + ) voxel_sizes = traits.List( traits.Float(), - field='comp{1}.inv.comp{1}.sn2def.vox', + field="comp{1}.inv.comp{1}.sn2def.vox", minlen=3, maxlen=3, - desc='3-element list (opt)') + desc="3-element list (opt)", + ) class ApplyInverseDeformationOutput(TraitedSpec): - out_files = OutputMultiPath(File(exists=True), desc='Transformed files') + out_files = OutputMultiPath(File(exists=True), desc="Transformed files") class ApplyInverseDeformation(SPMCommand): @@ -307,28 +322,28 @@ class ApplyInverseDeformation(SPMCommand): input_spec = ApplyInverseDeformationInput output_spec = ApplyInverseDeformationOutput - _jobtype = 'util' - _jobname = 'defs' + _jobtype = "util" + _jobname = "defs" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt == 'in_files': + if opt == "in_files": return scans_for_fnames(ensure_list(val)) - if opt == 'target': + if opt == "target": return scans_for_fname(ensure_list(val)) - if opt == 'deformation': + if opt == "deformation": return np.array([simplify_list(val)], dtype=object) - if opt == 'deformation_field': + if opt == "deformation_field": return np.array([simplify_list(val)], dtype=object) return val def _list_outputs(self): outputs = self._outputs().get() - outputs['out_files'] = [] + outputs["out_files"] = [] for filename in self.inputs.in_files: _, fname = os.path.split(filename) - outputs['out_files'].append(os.path.realpath('w%s' % fname)) + outputs["out_files"].append(os.path.realpath("w%s" % fname)) return outputs @@ -336,34 +351,34 @@ class ResliceToReferenceInput(SPMCommandInputSpec): in_files = InputMultiPath( File(exists=True), mandatory=True, - field='fnames', - desc='Files on which deformation is applied') + field="fnames", + desc="Files on which deformation is applied", + ) target = File( - exists=True, - field='comp{1}.id.space', - desc='File defining target space') + exists=True, field="comp{1}.id.space", desc="File defining target space" + ) interpolation = traits.Range( - low=0, - high=7, - field='interp', - desc='degree of b-spline used for interpolation') + low=0, high=7, field="interp", desc="degree of b-spline used for interpolation" + ) bounding_box = traits.List( traits.Float(), - field='comp{2}.idbbvox.bb', + field="comp{2}.idbbvox.bb", minlen=6, maxlen=6, - desc='6-element list (opt)') + desc="6-element list (opt)", + ) voxel_sizes = traits.List( traits.Float(), - field='comp{2}.idbbvox.vox', + field="comp{2}.idbbvox.vox", minlen=3, maxlen=3, - desc='3-element list (opt)') + desc="3-element list (opt)", + ) class ResliceToReferenceOutput(TraitedSpec): - out_files = OutputMultiPath(File(exists=True), desc='Transformed files') + out_files = OutputMultiPath(File(exists=True), desc="Transformed files") class ResliceToReference(SPMCommand): @@ -383,28 +398,28 @@ class ResliceToReference(SPMCommand): input_spec = ResliceToReferenceInput output_spec = ResliceToReferenceOutput - _jobtype = 'util' - _jobname = 'defs' + _jobtype = "util" + _jobname = "defs" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt == 'in_files': + if opt == "in_files": return scans_for_fnames(ensure_list(val)) - if opt == 'target': + if opt == "target": return scans_for_fname(ensure_list(val)) - if opt == 'deformation': + if opt == "deformation": return np.array([simplify_list(val)], dtype=object) - if opt == 'deformation_field': + if opt == "deformation_field": return np.array([simplify_list(val)], dtype=object) return val def _list_outputs(self): outputs = self._outputs().get() - outputs['out_files'] = [] + outputs["out_files"] = [] for filename in self.inputs.in_files: _, fname = os.path.split(filename) - outputs['out_files'].append(os.path.realpath('w%s' % fname)) + outputs["out_files"].append(os.path.realpath("w%s" % fname)) return outputs @@ -412,42 +427,42 @@ class DicomImportInputSpec(SPMCommandInputSpec): in_files = InputMultiPath( File(exists=True), mandatory=True, - field='data', - desc='dicom files to be converted') + field="data", + desc="dicom files to be converted", + ) output_dir_struct = traits.Enum( - 'flat', - 'series', - 'patname', - 'patid_date', - 'patid', - 'date_time', - field='root', + "flat", + "series", + "patname", + "patid_date", + "patid", + "date_time", + field="root", usedefault=True, - desc='directory structure for the output.') + desc="directory structure for the output.", + ) output_dir = traits.Str( - './converted_dicom', - field='outdir', - usedefault=True, - desc='output directory.') + "./converted_dicom", field="outdir", usedefault=True, desc="output directory." + ) format = traits.Enum( - 'nii', - 'img', - field='convopts.format', - usedefault=True, - desc='output format.') + "nii", "img", field="convopts.format", usedefault=True, desc="output format." + ) icedims = traits.Bool( False, - field='convopts.icedims', + field="convopts.icedims", usedefault=True, - desc=('If image sorting fails, one can try using ' - 'the additional SIEMENS ICEDims information ' - 'to create unique filenames. Use this only if ' - 'there would be multiple volumes with exactly ' - 'the same file names.')) + desc=( + "If image sorting fails, one can try using " + "the additional SIEMENS ICEDims information " + "to create unique filenames. Use this only if " + "there would be multiple volumes with exactly " + "the same file names." + ), + ) class DicomImportOutputSpec(TraitedSpec): - out_files = OutputMultiPath(File(exists=True), desc='converted files') + out_files = OutputMultiPath(File(exists=True), desc="converted files") class DicomImport(SPMCommand): @@ -465,19 +480,19 @@ class DicomImport(SPMCommand): input_spec = DicomImportInputSpec output_spec = DicomImportOutputSpec - _jobtype = 'util' - _jobname = 'dicom' + _jobtype = "util" + _jobname = "dicom" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ - if opt == 'in_files': + if opt == "in_files": return np.array(val, dtype=object) - if opt == 'output_dir': + if opt == "output_dir": return np.array([val], dtype=object) - if opt == 'output_dir': + if opt == "output_dir": return os.path.abspath(val) - if opt == 'icedims': + if opt == "icedims": if val: return 1 return 0 @@ -491,21 +506,23 @@ def _run_interface(self, runtime): def _list_outputs(self): from glob import glob + outputs = self._outputs().get() od = os.path.abspath(self.inputs.output_dir) ext = self.inputs.format if self.inputs.output_dir_struct == "flat": - outputs['out_files'] = glob(os.path.join(od, '*.%s' % ext)) - elif self.inputs.output_dir_struct == 'series': - outputs['out_files'] = glob( - os.path.join(od, os.path.join('*', '*.%s' % ext))) - elif (self.inputs.output_dir_struct in [ - 'patid', 'date_time', 'patname' - ]): - outputs['out_files'] = glob( - os.path.join(od, os.path.join('*', '*', '*.%s' % ext))) - elif self.inputs.output_dir_struct == 'patid_date': - outputs['out_files'] = glob( - os.path.join(od, os.path.join('*', '*', '*', '*.%s' % ext))) + outputs["out_files"] = glob(os.path.join(od, "*.%s" % ext)) + elif self.inputs.output_dir_struct == "series": + outputs["out_files"] = glob( + os.path.join(od, os.path.join("*", "*.%s" % ext)) + ) + elif self.inputs.output_dir_struct in ["patid", "date_time", "patname"]: + outputs["out_files"] = glob( + os.path.join(od, os.path.join("*", "*", "*.%s" % ext)) + ) + elif self.inputs.output_dir_struct == "patid_date": + outputs["out_files"] = glob( + os.path.join(od, os.path.join("*", "*", "*", "*.%s" % ext)) + ) return outputs diff --git a/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py index 65cd8ea0c4..0263177c5e 100644 --- a/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py +++ b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py @@ -4,20 +4,19 @@ def test_BIDSDataGrabber_inputs(): input_map = dict( - base_dir=dict(mandatory=True, ), + base_dir=dict(mandatory=True,), extra_derivatives=dict(), - index_derivatives=dict( - mandatory=True, - usedefault=True, - ), + index_derivatives=dict(mandatory=True, usedefault=True,), output_query=dict(), - raise_on_empty=dict(usedefault=True, ), + raise_on_empty=dict(usedefault=True,), ) inputs = BIDSDataGrabber.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_BIDSDataGrabber_outputs(): output_map = dict() outputs = BIDSDataGrabber.output_spec() diff --git a/nipype/interfaces/tests/test_auto_Bru2.py b/nipype/interfaces/tests/test_auto_Bru2.py index ae9aa850a8..5256b2732f 100644 --- a/nipype/interfaces/tests/test_auto_Bru2.py +++ b/nipype/interfaces/tests/test_auto_Bru2.py @@ -4,32 +4,24 @@ def test_Bru2_inputs(): input_map = dict( - actual_size=dict(argstr='-a', ), - append_protocol_name=dict(argstr='-p', ), - args=dict(argstr='%s', ), - compress=dict(argstr='-z', ), - environ=dict( - nohash=True, - usedefault=True, - ), - force_conversion=dict(argstr='-f', ), - input_dir=dict( - argstr='%s', - mandatory=True, - position=-1, - ), - output_filename=dict( - argstr='-o %s', - genfile=True, - ), + actual_size=dict(argstr="-a",), + append_protocol_name=dict(argstr="-p",), + args=dict(argstr="%s",), + compress=dict(argstr="-z",), + environ=dict(nohash=True, usedefault=True,), + force_conversion=dict(argstr="-f",), + input_dir=dict(argstr="%s", mandatory=True, position=-1,), + output_filename=dict(argstr="-o %s", genfile=True,), ) inputs = Bru2.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Bru2_outputs(): - output_map = dict(nii_file=dict(extensions=None, ), ) + output_map = dict(nii_file=dict(extensions=None,),) outputs = Bru2.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_C3d.py b/nipype/interfaces/tests/test_auto_C3d.py index daf1077cf4..d55216b454 100644 --- a/nipype/interfaces/tests/test_auto_C3d.py +++ b/nipype/interfaces/tests/test_auto_C3d.py @@ -4,47 +4,29 @@ def test_C3d_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - mandatory=True, - position=1, - ), - interp=dict(argstr='-interpolation %s', ), - is_4d=dict(usedefault=True, ), - multicomp_split=dict( - argstr='-mcr', - position=0, - usedefault=True, - ), - out_file=dict( - argstr='-o %s', - extensions=None, - position=-1, - xor=['out_files'], - ), - out_files=dict( - argstr='-oo %s', - position=-1, - xor=['out_file'], - ), - pix_type=dict(argstr='-type %s', ), - resample=dict(argstr='-resample %s', ), - scale=dict(argstr='-scale %s', ), - shift=dict(argstr='-shift %s', ), - smooth=dict(argstr='-smooth %s', ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", mandatory=True, position=1,), + interp=dict(argstr="-interpolation %s",), + is_4d=dict(usedefault=True,), + multicomp_split=dict(argstr="-mcr", position=0, usedefault=True,), + out_file=dict(argstr="-o %s", extensions=None, position=-1, xor=["out_files"],), + out_files=dict(argstr="-oo %s", position=-1, xor=["out_file"],), + pix_type=dict(argstr="-type %s",), + resample=dict(argstr="-resample %s",), + scale=dict(argstr="-scale %s",), + shift=dict(argstr="-shift %s",), + smooth=dict(argstr="-smooth %s",), ) inputs = C3d.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_C3d_outputs(): - output_map = dict(out_files=dict(), ) + output_map = dict(out_files=dict(),) outputs = C3d.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_C3dAffineTool.py b/nipype/interfaces/tests/test_auto_C3dAffineTool.py index dd4884557a..963d2f7931 100644 --- a/nipype/interfaces/tests/test_auto_C3dAffineTool.py +++ b/nipype/interfaces/tests/test_auto_C3dAffineTool.py @@ -4,43 +4,23 @@ def test_C3dAffineTool_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - fsl2ras=dict( - argstr='-fsl2ras', - position=4, - ), - itk_transform=dict( - argstr='-oitk %s', - hash_files=False, - position=5, - ), - reference_file=dict( - argstr='-ref %s', - extensions=None, - position=1, - ), - source_file=dict( - argstr='-src %s', - extensions=None, - position=2, - ), - transform_file=dict( - argstr='%s', - extensions=None, - position=3, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + fsl2ras=dict(argstr="-fsl2ras", position=4,), + itk_transform=dict(argstr="-oitk %s", hash_files=False, position=5,), + reference_file=dict(argstr="-ref %s", extensions=None, position=1,), + source_file=dict(argstr="-src %s", extensions=None, position=2,), + transform_file=dict(argstr="%s", extensions=None, position=3,), ) inputs = C3dAffineTool.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_C3dAffineTool_outputs(): - output_map = dict(itk_transform=dict(extensions=None, ), ) + output_map = dict(itk_transform=dict(extensions=None,),) outputs = C3dAffineTool.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_CopyMeta.py b/nipype/interfaces/tests/test_auto_CopyMeta.py index 7f13dfc927..6415514c3d 100644 --- a/nipype/interfaces/tests/test_auto_CopyMeta.py +++ b/nipype/interfaces/tests/test_auto_CopyMeta.py @@ -4,24 +4,20 @@ def test_CopyMeta_inputs(): input_map = dict( - dest_file=dict( - extensions=None, - mandatory=True, - ), + dest_file=dict(extensions=None, mandatory=True,), exclude_classes=dict(), include_classes=dict(), - src_file=dict( - extensions=None, - mandatory=True, - ), + src_file=dict(extensions=None, mandatory=True,), ) inputs = CopyMeta.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CopyMeta_outputs(): - output_map = dict(dest_file=dict(extensions=None, ), ) + output_map = dict(dest_file=dict(extensions=None,),) outputs = CopyMeta.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_DataFinder.py b/nipype/interfaces/tests/test_auto_DataFinder.py index 6ee0258218..81cdc92ac8 100644 --- a/nipype/interfaces/tests/test_auto_DataFinder.py +++ b/nipype/interfaces/tests/test_auto_DataFinder.py @@ -5,17 +5,19 @@ def test_DataFinder_inputs(): input_map = dict( ignore_regexes=dict(), - match_regex=dict(usedefault=True, ), + match_regex=dict(usedefault=True,), max_depth=dict(), min_depth=dict(), - root_paths=dict(mandatory=True, ), - unpack_single=dict(usedefault=True, ), + root_paths=dict(mandatory=True,), + unpack_single=dict(usedefault=True,), ) inputs = DataFinder.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DataFinder_outputs(): output_map = dict() outputs = DataFinder.output_spec() diff --git a/nipype/interfaces/tests/test_auto_DataGrabber.py b/nipype/interfaces/tests/test_auto_DataGrabber.py index c395eadd1a..c3817e43e7 100644 --- a/nipype/interfaces/tests/test_auto_DataGrabber.py +++ b/nipype/interfaces/tests/test_auto_DataGrabber.py @@ -5,10 +5,10 @@ def test_DataGrabber_inputs(): input_map = dict( base_directory=dict(), - drop_blank_outputs=dict(usedefault=True, ), - raise_on_empty=dict(usedefault=True, ), - sort_filelist=dict(mandatory=True, ), - template=dict(mandatory=True, ), + drop_blank_outputs=dict(usedefault=True,), + raise_on_empty=dict(usedefault=True,), + sort_filelist=dict(mandatory=True,), + template=dict(mandatory=True,), template_args=dict(), ) inputs = DataGrabber.input_spec() @@ -16,6 +16,8 @@ def test_DataGrabber_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DataGrabber_outputs(): output_map = dict() outputs = DataGrabber.output_spec() diff --git a/nipype/interfaces/tests/test_auto_DataSink.py b/nipype/interfaces/tests/test_auto_DataSink.py index 15af11755c..870dbc3c85 100644 --- a/nipype/interfaces/tests/test_auto_DataSink.py +++ b/nipype/interfaces/tests/test_auto_DataSink.py @@ -4,16 +4,16 @@ def test_DataSink_inputs(): input_map = dict( - _outputs=dict(usedefault=True, ), + _outputs=dict(usedefault=True,), base_directory=dict(), bucket=dict(), container=dict(), creds_path=dict(), encrypt_bucket_keys=dict(), local_copy=dict(), - parameterization=dict(usedefault=True, ), + parameterization=dict(usedefault=True,), regexp_substitutions=dict(), - remove_dest_dir=dict(usedefault=True, ), + remove_dest_dir=dict(usedefault=True,), strip_dir=dict(), substitutions=dict(), ) @@ -22,8 +22,10 @@ def test_DataSink_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DataSink_outputs(): - output_map = dict(out_file=dict(), ) + output_map = dict(out_file=dict(),) outputs = DataSink.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_Dcm2nii.py b/nipype/interfaces/tests/test_auto_Dcm2nii.py index 92ca835242..9aca885a64 100644 --- a/nipype/interfaces/tests/test_auto_Dcm2nii.py +++ b/nipype/interfaces/tests/test_auto_Dcm2nii.py @@ -4,88 +4,41 @@ def test_Dcm2nii_inputs(): input_map = dict( - anonymize=dict( - argstr='-a', - usedefault=True, - ), - args=dict(argstr='%s', ), - collapse_folders=dict( - argstr='-c', - usedefault=True, - ), - config_file=dict( - argstr='-b %s', - extensions=None, - genfile=True, - ), - convert_all_pars=dict( - argstr='-v', - usedefault=True, - ), - date_in_filename=dict( - argstr='-d', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - events_in_filename=dict( - argstr='-e', - usedefault=True, - ), - gzip_output=dict( - argstr='-g', - usedefault=True, - ), - id_in_filename=dict( - argstr='-i', - usedefault=True, - ), - nii_output=dict( - argstr='-n', - usedefault=True, - ), - output_dir=dict( - argstr='-o %s', - genfile=True, - ), - protocol_in_filename=dict( - argstr='-p', - usedefault=True, - ), - reorient=dict(argstr='-r', ), - reorient_and_crop=dict( - argstr='-x', - usedefault=True, - ), + anonymize=dict(argstr="-a", usedefault=True,), + args=dict(argstr="%s",), + collapse_folders=dict(argstr="-c", usedefault=True,), + config_file=dict(argstr="-b %s", extensions=None, genfile=True,), + convert_all_pars=dict(argstr="-v", usedefault=True,), + date_in_filename=dict(argstr="-d", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + events_in_filename=dict(argstr="-e", usedefault=True,), + gzip_output=dict(argstr="-g", usedefault=True,), + id_in_filename=dict(argstr="-i", usedefault=True,), + nii_output=dict(argstr="-n", usedefault=True,), + output_dir=dict(argstr="-o %s", genfile=True,), + protocol_in_filename=dict(argstr="-p", usedefault=True,), + reorient=dict(argstr="-r",), + reorient_and_crop=dict(argstr="-x", usedefault=True,), source_dir=dict( - argstr='%s', - mandatory=True, - position=-1, - xor=['source_names'], - ), - source_in_filename=dict( - argstr='-f', - usedefault=True, + argstr="%s", mandatory=True, position=-1, xor=["source_names"], ), + source_in_filename=dict(argstr="-f", usedefault=True,), source_names=dict( - argstr='%s', + argstr="%s", copyfile=False, mandatory=True, position=-1, - xor=['source_dir'], - ), - spm_analyze=dict( - argstr='-s', - xor=['nii_output'], + xor=["source_dir"], ), + spm_analyze=dict(argstr="-s", xor=["nii_output"],), ) inputs = Dcm2nii.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Dcm2nii_outputs(): output_map = dict( bvals=dict(), diff --git a/nipype/interfaces/tests/test_auto_Dcm2niix.py b/nipype/interfaces/tests/test_auto_Dcm2niix.py index 04ca6015ea..dfaa46d36a 100644 --- a/nipype/interfaces/tests/test_auto_Dcm2niix.py +++ b/nipype/interfaces/tests/test_auto_Dcm2niix.py @@ -4,80 +4,44 @@ def test_Dcm2niix_inputs(): input_map = dict( - anon_bids=dict( - argstr='-ba', - requires=['bids_format'], - ), - args=dict(argstr='%s', ), - bids_format=dict( - argstr='-b', - usedefault=True, - ), - comment=dict(argstr='-c %s', ), - compress=dict( - argstr='-z %s', - usedefault=True, - ), - compression=dict(argstr='-%d', ), - crop=dict( - argstr='-x', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - has_private=dict( - argstr='-t', - usedefault=True, - ), - ignore_deriv=dict(argstr='-i', ), - merge_imgs=dict( - argstr='-m', - usedefault=True, - ), - out_filename=dict(argstr='-f %s', ), - output_dir=dict( - argstr='-o %s', - usedefault=True, - ), - philips_float=dict(argstr='-p', ), - series_numbers=dict(argstr='-n %s...', ), - single_file=dict( - argstr='-s', - usedefault=True, - ), + anon_bids=dict(argstr="-ba", requires=["bids_format"],), + args=dict(argstr="%s",), + bids_format=dict(argstr="-b", usedefault=True,), + comment=dict(argstr="-c %s",), + compress=dict(argstr="-z %s", usedefault=True,), + compression=dict(argstr="-%d",), + crop=dict(argstr="-x", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + has_private=dict(argstr="-t", usedefault=True,), + ignore_deriv=dict(argstr="-i",), + merge_imgs=dict(argstr="-m", usedefault=True,), + out_filename=dict(argstr="-f %s",), + output_dir=dict(argstr="-o %s", usedefault=True,), + philips_float=dict(argstr="-p",), + series_numbers=dict(argstr="-n %s...",), + single_file=dict(argstr="-s", usedefault=True,), source_dir=dict( - argstr='%s', - mandatory=True, - position=-1, - xor=['source_names'], + argstr="%s", mandatory=True, position=-1, xor=["source_names"], ), source_names=dict( - argstr='%s', + argstr="%s", copyfile=False, mandatory=True, position=-1, - xor=['source_dir'], - ), - to_nrrd=dict(argstr='-e', ), - verbose=dict( - argstr='-v', - usedefault=True, + xor=["source_dir"], ), + to_nrrd=dict(argstr="-e",), + verbose=dict(argstr="-v", usedefault=True,), ) inputs = Dcm2niix.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Dcm2niix_outputs(): - output_map = dict( - bids=dict(), - bvals=dict(), - bvecs=dict(), - converted_files=dict(), - ) + output_map = dict(bids=dict(), bvals=dict(), bvecs=dict(), converted_files=dict(),) outputs = Dcm2niix.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_DcmStack.py b/nipype/interfaces/tests/test_auto_DcmStack.py index bffc1706ad..af0ff91495 100644 --- a/nipype/interfaces/tests/test_auto_DcmStack.py +++ b/nipype/interfaces/tests/test_auto_DcmStack.py @@ -4,12 +4,12 @@ def test_DcmStack_inputs(): input_map = dict( - dicom_files=dict(mandatory=True, ), + dicom_files=dict(mandatory=True,), embed_meta=dict(), exclude_regexes=dict(), - force_read=dict(usedefault=True, ), + force_read=dict(usedefault=True,), include_regexes=dict(), - out_ext=dict(usedefault=True, ), + out_ext=dict(usedefault=True,), out_format=dict(), out_path=dict(), ) @@ -18,8 +18,10 @@ def test_DcmStack_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_DcmStack_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = DcmStack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_ExportFile.py b/nipype/interfaces/tests/test_auto_ExportFile.py index d7aa25ddd9..8dd84b29b9 100644 --- a/nipype/interfaces/tests/test_auto_ExportFile.py +++ b/nipype/interfaces/tests/test_auto_ExportFile.py @@ -6,22 +6,18 @@ def test_ExportFile_inputs(): input_map = dict( check_extension=dict(), clobber=dict(), - in_file=dict( - extensions=None, - mandatory=True, - ), - out_file=dict( - extensions=None, - mandatory=True, - ), + in_file=dict(extensions=None, mandatory=True,), + out_file=dict(extensions=None, mandatory=True,), ) inputs = ExportFile.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_ExportFile_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = ExportFile.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_FreeSurferSource.py b/nipype/interfaces/tests/test_auto_FreeSurferSource.py index 26511575ae..c7102512ae 100644 --- a/nipype/interfaces/tests/test_auto_FreeSurferSource.py +++ b/nipype/interfaces/tests/test_auto_FreeSurferSource.py @@ -4,132 +4,56 @@ def test_FreeSurferSource_inputs(): input_map = dict( - hemi=dict(usedefault=True, ), - subject_id=dict(mandatory=True, ), - subjects_dir=dict(mandatory=True, ), + hemi=dict(usedefault=True,), + subject_id=dict(mandatory=True,), + subjects_dir=dict(mandatory=True,), ) inputs = FreeSurferSource.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_FreeSurferSource_outputs(): output_map = dict( - BA_stats=dict( - altkey='BA', - loc='stats', - ), - T1=dict( - extensions=None, - loc='mri', - ), - annot=dict( - altkey='*annot', - loc='label', - ), - aparc_a2009s_stats=dict( - altkey='aparc.a2009s', - loc='stats', - ), - aparc_aseg=dict( - altkey='aparc*aseg', - loc='mri', - ), - aparc_stats=dict( - altkey='aparc', - loc='stats', - ), - area_pial=dict( - altkey='area.pial', - loc='surf', - ), - aseg=dict( - extensions=None, - loc='mri', - ), - aseg_stats=dict( - altkey='aseg', - loc='stats', - ), - avg_curv=dict(loc='surf', ), - brain=dict( - extensions=None, - loc='mri', - ), - brainmask=dict( - extensions=None, - loc='mri', - ), - curv=dict(loc='surf', ), - curv_pial=dict( - altkey='curv.pial', - loc='surf', - ), - curv_stats=dict( - altkey='curv', - loc='stats', - ), - entorhinal_exvivo_stats=dict( - altkey='entorhinal_exvivo', - loc='stats', - ), - filled=dict( - extensions=None, - loc='mri', - ), - graymid=dict( - altkey=['graymid', 'midthickness'], - loc='surf', - ), - inflated=dict(loc='surf', ), - jacobian_white=dict(loc='surf', ), - label=dict( - altkey='*label', - loc='label', - ), - norm=dict( - extensions=None, - loc='mri', - ), - nu=dict( - extensions=None, - loc='mri', - ), - orig=dict( - extensions=None, - loc='mri', - ), - pial=dict(loc='surf', ), - rawavg=dict( - extensions=None, - loc='mri', - ), - ribbon=dict( - altkey='*ribbon', - loc='mri', - ), - smoothwm=dict(loc='surf', ), - sphere=dict(loc='surf', ), - sphere_reg=dict( - altkey='sphere.reg', - loc='surf', - ), - sulc=dict(loc='surf', ), - thickness=dict(loc='surf', ), - volume=dict(loc='surf', ), - white=dict(loc='surf', ), - wm=dict( - extensions=None, - loc='mri', - ), - wmparc=dict( - extensions=None, - loc='mri', - ), - wmparc_stats=dict( - altkey='wmparc', - loc='stats', - ), + BA_stats=dict(altkey="BA", loc="stats",), + T1=dict(extensions=None, loc="mri",), + annot=dict(altkey="*annot", loc="label",), + aparc_a2009s_stats=dict(altkey="aparc.a2009s", loc="stats",), + aparc_aseg=dict(altkey="aparc*aseg", loc="mri",), + aparc_stats=dict(altkey="aparc", loc="stats",), + area_pial=dict(altkey="area.pial", loc="surf",), + aseg=dict(extensions=None, loc="mri",), + aseg_stats=dict(altkey="aseg", loc="stats",), + avg_curv=dict(loc="surf",), + brain=dict(extensions=None, loc="mri",), + brainmask=dict(extensions=None, loc="mri",), + curv=dict(loc="surf",), + curv_pial=dict(altkey="curv.pial", loc="surf",), + curv_stats=dict(altkey="curv", loc="stats",), + entorhinal_exvivo_stats=dict(altkey="entorhinal_exvivo", loc="stats",), + filled=dict(extensions=None, loc="mri",), + graymid=dict(altkey=["graymid", "midthickness"], loc="surf",), + inflated=dict(loc="surf",), + jacobian_white=dict(loc="surf",), + label=dict(altkey="*label", loc="label",), + norm=dict(extensions=None, loc="mri",), + nu=dict(extensions=None, loc="mri",), + orig=dict(extensions=None, loc="mri",), + pial=dict(loc="surf",), + rawavg=dict(extensions=None, loc="mri",), + ribbon=dict(altkey="*ribbon", loc="mri",), + smoothwm=dict(loc="surf",), + sphere=dict(loc="surf",), + sphere_reg=dict(altkey="sphere.reg", loc="surf",), + sulc=dict(loc="surf",), + thickness=dict(loc="surf",), + volume=dict(loc="surf",), + white=dict(loc="surf",), + wm=dict(extensions=None, loc="mri",), + wmparc=dict(extensions=None, loc="mri",), + wmparc_stats=dict(altkey="wmparc", loc="stats",), ) outputs = FreeSurferSource.output_spec() diff --git a/nipype/interfaces/tests/test_auto_GroupAndStack.py b/nipype/interfaces/tests/test_auto_GroupAndStack.py index a566467007..f330efde20 100644 --- a/nipype/interfaces/tests/test_auto_GroupAndStack.py +++ b/nipype/interfaces/tests/test_auto_GroupAndStack.py @@ -4,12 +4,12 @@ def test_GroupAndStack_inputs(): input_map = dict( - dicom_files=dict(mandatory=True, ), + dicom_files=dict(mandatory=True,), embed_meta=dict(), exclude_regexes=dict(), - force_read=dict(usedefault=True, ), + force_read=dict(usedefault=True,), include_regexes=dict(), - out_ext=dict(usedefault=True, ), + out_ext=dict(usedefault=True,), out_format=dict(), out_path=dict(), ) @@ -18,8 +18,10 @@ def test_GroupAndStack_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_GroupAndStack_outputs(): - output_map = dict(out_list=dict(), ) + output_map = dict(out_list=dict(),) outputs = GroupAndStack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_JSONFileGrabber.py b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py index 61aea81044..c01a584949 100644 --- a/nipype/interfaces/tests/test_auto_JSONFileGrabber.py +++ b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py @@ -3,15 +3,14 @@ def test_JSONFileGrabber_inputs(): - input_map = dict( - defaults=dict(), - in_file=dict(extensions=None, ), - ) + input_map = dict(defaults=dict(), in_file=dict(extensions=None,),) inputs = JSONFileGrabber.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_JSONFileGrabber_outputs(): output_map = dict() outputs = JSONFileGrabber.output_spec() diff --git a/nipype/interfaces/tests/test_auto_JSONFileSink.py b/nipype/interfaces/tests/test_auto_JSONFileSink.py index d28df1cb66..c88faba852 100644 --- a/nipype/interfaces/tests/test_auto_JSONFileSink.py +++ b/nipype/interfaces/tests/test_auto_JSONFileSink.py @@ -4,17 +4,19 @@ def test_JSONFileSink_inputs(): input_map = dict( - _outputs=dict(usedefault=True, ), - in_dict=dict(usedefault=True, ), - out_file=dict(extensions=None, ), + _outputs=dict(usedefault=True,), + in_dict=dict(usedefault=True,), + out_file=dict(extensions=None,), ) inputs = JSONFileSink.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_JSONFileSink_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = JSONFileSink.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_LookupMeta.py b/nipype/interfaces/tests/test_auto_LookupMeta.py index 8b5424ab6a..fa0129798e 100644 --- a/nipype/interfaces/tests/test_auto_LookupMeta.py +++ b/nipype/interfaces/tests/test_auto_LookupMeta.py @@ -4,17 +4,15 @@ def test_LookupMeta_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - meta_keys=dict(mandatory=True, ), + in_file=dict(extensions=None, mandatory=True,), meta_keys=dict(mandatory=True,), ) inputs = LookupMeta.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_LookupMeta_outputs(): output_map = dict() outputs = LookupMeta.output_spec() diff --git a/nipype/interfaces/tests/test_auto_MatlabCommand.py b/nipype/interfaces/tests/test_auto_MatlabCommand.py index 7e04090c78..0dea244ec8 100644 --- a/nipype/interfaces/tests/test_auto_MatlabCommand.py +++ b/nipype/interfaces/tests/test_auto_MatlabCommand.py @@ -4,45 +4,20 @@ def test_MatlabCommand_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - logfile=dict( - argstr='-logfile %s', - extensions=None, - ), - mfile=dict(usedefault=True, ), - nodesktop=dict( - argstr='-nodesktop', - nohash=True, - usedefault=True, - ), - nosplash=dict( - argstr='-nosplash', - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + logfile=dict(argstr="-logfile %s", extensions=None,), + mfile=dict(usedefault=True,), + nodesktop=dict(argstr="-nodesktop", nohash=True, usedefault=True,), + nosplash=dict(argstr="-nosplash", nohash=True, usedefault=True,), paths=dict(), - postscript=dict(usedefault=True, ), - prescript=dict(usedefault=True, ), - script=dict( - argstr='-r "%s;exit"', - mandatory=True, - position=-1, - ), - script_file=dict( - extensions=None, - usedefault=True, - ), - single_comp_thread=dict( - argstr='-singleCompThread', - nohash=True, - ), + postscript=dict(usedefault=True,), + prescript=dict(usedefault=True,), + script=dict(argstr='-r "%s;exit"', mandatory=True, position=-1,), + script_file=dict(extensions=None, usedefault=True,), + single_comp_thread=dict(argstr="-singleCompThread", nohash=True,), uses_mcr=dict( - nohash=True, - xor=['nodesktop', 'nosplash', 'single_comp_thread'], + nohash=True, xor=["nodesktop", "nosplash", "single_comp_thread"], ), ) inputs = MatlabCommand.input_spec() diff --git a/nipype/interfaces/tests/test_auto_MergeNifti.py b/nipype/interfaces/tests/test_auto_MergeNifti.py index 4898fc7fe7..38c0f39f72 100644 --- a/nipype/interfaces/tests/test_auto_MergeNifti.py +++ b/nipype/interfaces/tests/test_auto_MergeNifti.py @@ -4,9 +4,9 @@ def test_MergeNifti_inputs(): input_map = dict( - in_files=dict(mandatory=True, ), + in_files=dict(mandatory=True,), merge_dim=dict(), - out_ext=dict(usedefault=True, ), + out_ext=dict(usedefault=True,), out_format=dict(), out_path=dict(), sort_order=dict(), @@ -16,8 +16,10 @@ def test_MergeNifti_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MergeNifti_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = MergeNifti.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_MeshFix.py b/nipype/interfaces/tests/test_auto_MeshFix.py index 557396f2fd..9a69fc0859 100644 --- a/nipype/interfaces/tests/test_auto_MeshFix.py +++ b/nipype/interfaces/tests/test_auto_MeshFix.py @@ -4,99 +4,72 @@ def test_MeshFix_inputs(): input_map = dict( - args=dict(argstr='%s', ), - cut_inner=dict(argstr='--cut-inner %d', ), - cut_outer=dict(argstr='--cut-outer %d', ), - decouple_inin=dict(argstr='--decouple-inin %d', ), - decouple_outin=dict(argstr='--decouple-outin %d', ), - decouple_outout=dict(argstr='--decouple-outout %d', ), - dilation=dict(argstr='--dilate %d', ), - dont_clean=dict(argstr='--no-clean', ), - environ=dict( - nohash=True, - usedefault=True, - ), - epsilon_angle=dict(argstr='-a %f', ), + args=dict(argstr="%s",), + cut_inner=dict(argstr="--cut-inner %d",), + cut_outer=dict(argstr="--cut-outer %d",), + decouple_inin=dict(argstr="--decouple-inin %d",), + decouple_outin=dict(argstr="--decouple-outin %d",), + decouple_outout=dict(argstr="--decouple-outout %d",), + dilation=dict(argstr="--dilate %d",), + dont_clean=dict(argstr="--no-clean",), + environ=dict(nohash=True, usedefault=True,), + epsilon_angle=dict(argstr="-a %f",), finetuning_distance=dict( - argstr='%f', - position=-2, - requires=['finetuning_substeps'], + argstr="%f", position=-2, requires=["finetuning_substeps"], ), finetuning_inwards=dict( - argstr='--fineTuneIn ', + argstr="--fineTuneIn ", position=-3, - requires=['finetuning_distance', 'finetuning_substeps'], + requires=["finetuning_distance", "finetuning_substeps"], ), finetuning_outwards=dict( - argstr='--fineTuneOut ', + argstr="--fineTuneOut ", position=-3, - requires=['finetuning_distance', 'finetuning_substeps'], - xor=['finetuning_inwards'], + requires=["finetuning_distance", "finetuning_substeps"], + xor=["finetuning_inwards"], ), finetuning_substeps=dict( - argstr='%d', - position=-1, - requires=['finetuning_distance'], - ), - in_file1=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - in_file2=dict( - argstr='%s', - extensions=None, - position=2, - ), - join_closest_components=dict( - argstr='-jc', - xor=['join_closest_components'], + argstr="%d", position=-1, requires=["finetuning_distance"], ), + in_file1=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + in_file2=dict(argstr="%s", extensions=None, position=2,), + join_closest_components=dict(argstr="-jc", xor=["join_closest_components"],), join_overlapping_largest_components=dict( - argstr='-j', - xor=['join_closest_components'], + argstr="-j", xor=["join_closest_components"], ), - laplacian_smoothing_steps=dict(argstr='--smooth %d', ), - number_of_biggest_shells=dict(argstr='--shells %d', ), - out_filename=dict( - argstr='-o %s', - extensions=None, - genfile=True, - ), - output_type=dict(usedefault=True, ), - quiet_mode=dict(argstr='-q', ), - remove_handles=dict(argstr='--remove-handles', ), + laplacian_smoothing_steps=dict(argstr="--smooth %d",), + number_of_biggest_shells=dict(argstr="--shells %d",), + out_filename=dict(argstr="-o %s", extensions=None, genfile=True,), + output_type=dict(usedefault=True,), + quiet_mode=dict(argstr="-q",), + remove_handles=dict(argstr="--remove-handles",), save_as_freesurfer_mesh=dict( - argstr='--fsmesh', - xor=['save_as_vrml', 'save_as_stl'], + argstr="--fsmesh", xor=["save_as_vrml", "save_as_stl"], ), save_as_stl=dict( - argstr='--stl', - xor=['save_as_vrml', 'save_as_freesurfer_mesh'], + argstr="--stl", xor=["save_as_vrml", "save_as_freesurfer_mesh"], ), save_as_vrml=dict( - argstr='--wrl', - xor=['save_as_stl', 'save_as_freesurfer_mesh'], + argstr="--wrl", xor=["save_as_stl", "save_as_freesurfer_mesh"], ), - set_intersections_to_one=dict(argstr='--intersect', ), + set_intersections_to_one=dict(argstr="--intersect",), uniform_remeshing_steps=dict( - argstr='-u %d', - requires=['uniform_remeshing_vertices'], + argstr="-u %d", requires=["uniform_remeshing_vertices"], ), uniform_remeshing_vertices=dict( - argstr='--vertices %d', - requires=['uniform_remeshing_steps'], + argstr="--vertices %d", requires=["uniform_remeshing_steps"], ), - x_shift=dict(argstr='--smooth %d', ), + x_shift=dict(argstr="--smooth %d",), ) inputs = MeshFix.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MeshFix_outputs(): - output_map = dict(mesh_file=dict(extensions=None, ), ) + output_map = dict(mesh_file=dict(extensions=None,),) outputs = MeshFix.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_MySQLSink.py b/nipype/interfaces/tests/test_auto_MySQLSink.py index 30899607a8..30bf18ef26 100644 --- a/nipype/interfaces/tests/test_auto_MySQLSink.py +++ b/nipype/interfaces/tests/test_auto_MySQLSink.py @@ -4,20 +4,16 @@ def test_MySQLSink_inputs(): input_map = dict( - config=dict( - extensions=None, - mandatory=True, - xor=['host'], - ), - database_name=dict(mandatory=True, ), + config=dict(extensions=None, mandatory=True, xor=["host"],), + database_name=dict(mandatory=True,), host=dict( mandatory=True, - requires=['username', 'password'], + requires=["username", "password"], usedefault=True, - xor=['config'], + xor=["config"], ), password=dict(), - table_name=dict(mandatory=True, ), + table_name=dict(mandatory=True,), username=dict(), ) inputs = MySQLSink.input_spec() diff --git a/nipype/interfaces/tests/test_auto_PETPVC.py b/nipype/interfaces/tests/test_auto_PETPVC.py index e5b116d932..49ba858e4a 100644 --- a/nipype/interfaces/tests/test_auto_PETPVC.py +++ b/nipype/interfaces/tests/test_auto_PETPVC.py @@ -4,71 +4,30 @@ def test_PETPVC_inputs(): input_map = dict( - alpha=dict( - argstr='-a %.4f', - usedefault=True, - ), - args=dict(argstr='%s', ), - debug=dict( - argstr='-d', - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fwhm_x=dict( - argstr='-x %.4f', - mandatory=True, - ), - fwhm_y=dict( - argstr='-y %.4f', - mandatory=True, - ), - fwhm_z=dict( - argstr='-z %.4f', - mandatory=True, - ), - in_file=dict( - argstr='-i %s', - extensions=None, - mandatory=True, - ), - mask_file=dict( - argstr='-m %s', - extensions=None, - mandatory=True, - ), - n_deconv=dict( - argstr='-k %d', - usedefault=True, - ), - n_iter=dict( - argstr='-n %d', - usedefault=True, - ), - out_file=dict( - argstr='-o %s', - extensions=None, - genfile=True, - hash_files=False, - ), - pvc=dict( - argstr='-p %s', - mandatory=True, - ), - stop_crit=dict( - argstr='-s %.4f', - usedefault=True, - ), + alpha=dict(argstr="-a %.4f", usedefault=True,), + args=dict(argstr="%s",), + debug=dict(argstr="-d", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + fwhm_x=dict(argstr="-x %.4f", mandatory=True,), + fwhm_y=dict(argstr="-y %.4f", mandatory=True,), + fwhm_z=dict(argstr="-z %.4f", mandatory=True,), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True,), + mask_file=dict(argstr="-m %s", extensions=None, mandatory=True,), + n_deconv=dict(argstr="-k %d", usedefault=True,), + n_iter=dict(argstr="-n %d", usedefault=True,), + out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False,), + pvc=dict(argstr="-p %s", mandatory=True,), + stop_crit=dict(argstr="-s %.4f", usedefault=True,), ) inputs = PETPVC.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_PETPVC_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = PETPVC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_Quickshear.py b/nipype/interfaces/tests/test_auto_Quickshear.py index 43ccd757c1..06b829af9a 100644 --- a/nipype/interfaces/tests/test_auto_Quickshear.py +++ b/nipype/interfaces/tests/test_auto_Quickshear.py @@ -4,33 +4,17 @@ def test_Quickshear_inputs(): input_map = dict( - args=dict(argstr='%s', ), - buff=dict( - argstr='%d', - position=4, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - mask_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), + args=dict(argstr="%s",), + buff=dict(argstr="%d", position=4,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + mask_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, keep_extension=True, - name_source='in_file', - name_template='%s_defaced', + name_source="in_file", + name_template="%s_defaced", position=3, ), ) @@ -39,8 +23,10 @@ def test_Quickshear_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Quickshear_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Quickshear.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_Reorient.py b/nipype/interfaces/tests/test_auto_Reorient.py index d6774a483c..bb4660a05c 100644 --- a/nipype/interfaces/tests/test_auto_Reorient.py +++ b/nipype/interfaces/tests/test_auto_Reorient.py @@ -4,21 +4,19 @@ def test_Reorient_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - orientation=dict(usedefault=True, ), + in_file=dict(extensions=None, mandatory=True,), + orientation=dict(usedefault=True,), ) inputs = Reorient.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Reorient_outputs(): output_map = dict( - out_file=dict(extensions=None, ), - transform=dict(extensions=None, ), + out_file=dict(extensions=None,), transform=dict(extensions=None,), ) outputs = Reorient.output_spec() diff --git a/nipype/interfaces/tests/test_auto_Rescale.py b/nipype/interfaces/tests/test_auto_Rescale.py index e3e2bf151b..d8c83c24ca 100644 --- a/nipype/interfaces/tests/test_auto_Rescale.py +++ b/nipype/interfaces/tests/test_auto_Rescale.py @@ -4,24 +4,20 @@ def test_Rescale_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), + in_file=dict(extensions=None, mandatory=True,), invert=dict(), - percentile=dict(usedefault=True, ), - ref_file=dict( - extensions=None, - mandatory=True, - ), + percentile=dict(usedefault=True,), + ref_file=dict(extensions=None, mandatory=True,), ) inputs = Rescale.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Rescale_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Rescale.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_S3DataGrabber.py b/nipype/interfaces/tests/test_auto_S3DataGrabber.py index ff13619248..96b32701e2 100644 --- a/nipype/interfaces/tests/test_auto_S3DataGrabber.py +++ b/nipype/interfaces/tests/test_auto_S3DataGrabber.py @@ -4,14 +4,14 @@ def test_S3DataGrabber_inputs(): input_map = dict( - anon=dict(usedefault=True, ), - bucket=dict(mandatory=True, ), - bucket_path=dict(usedefault=True, ), + anon=dict(usedefault=True,), + bucket=dict(mandatory=True,), + bucket_path=dict(usedefault=True,), local_directory=dict(), - raise_on_empty=dict(usedefault=True, ), - region=dict(usedefault=True, ), - sort_filelist=dict(mandatory=True, ), - template=dict(mandatory=True, ), + raise_on_empty=dict(usedefault=True,), + region=dict(usedefault=True,), + sort_filelist=dict(mandatory=True,), + template=dict(mandatory=True,), template_args=dict(), ) inputs = S3DataGrabber.input_spec() @@ -19,6 +19,8 @@ def test_S3DataGrabber_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_S3DataGrabber_outputs(): output_map = dict() outputs = S3DataGrabber.output_spec() diff --git a/nipype/interfaces/tests/test_auto_SQLiteSink.py b/nipype/interfaces/tests/test_auto_SQLiteSink.py index 353e01e985..f1a62ead15 100644 --- a/nipype/interfaces/tests/test_auto_SQLiteSink.py +++ b/nipype/interfaces/tests/test_auto_SQLiteSink.py @@ -4,11 +4,8 @@ def test_SQLiteSink_inputs(): input_map = dict( - database_file=dict( - extensions=None, - mandatory=True, - ), - table_name=dict(mandatory=True, ), + database_file=dict(extensions=None, mandatory=True,), + table_name=dict(mandatory=True,), ) inputs = SQLiteSink.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SSHDataGrabber.py b/nipype/interfaces/tests/test_auto_SSHDataGrabber.py index e3755c8a75..7a759cb07f 100644 --- a/nipype/interfaces/tests/test_auto_SSHDataGrabber.py +++ b/nipype/interfaces/tests/test_auto_SSHDataGrabber.py @@ -4,17 +4,17 @@ def test_SSHDataGrabber_inputs(): input_map = dict( - base_directory=dict(mandatory=True, ), - download_files=dict(usedefault=True, ), - drop_blank_outputs=dict(usedefault=True, ), - hostname=dict(mandatory=True, ), + base_directory=dict(mandatory=True,), + download_files=dict(usedefault=True,), + drop_blank_outputs=dict(usedefault=True,), + hostname=dict(mandatory=True,), password=dict(), - raise_on_empty=dict(usedefault=True, ), - sort_filelist=dict(mandatory=True, ), - ssh_log_to_file=dict(usedefault=True, ), - template=dict(mandatory=True, ), + raise_on_empty=dict(usedefault=True,), + sort_filelist=dict(mandatory=True,), + ssh_log_to_file=dict(usedefault=True,), + template=dict(mandatory=True,), template_args=dict(), - template_expression=dict(usedefault=True, ), + template_expression=dict(usedefault=True,), username=dict(), ) inputs = SSHDataGrabber.input_spec() @@ -22,6 +22,8 @@ def test_SSHDataGrabber_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SSHDataGrabber_outputs(): output_map = dict() outputs = SSHDataGrabber.output_spec() diff --git a/nipype/interfaces/tests/test_auto_SelectFiles.py b/nipype/interfaces/tests/test_auto_SelectFiles.py index ed6c276ddf..722c431b8f 100644 --- a/nipype/interfaces/tests/test_auto_SelectFiles.py +++ b/nipype/interfaces/tests/test_auto_SelectFiles.py @@ -5,15 +5,17 @@ def test_SelectFiles_inputs(): input_map = dict( base_directory=dict(), - force_lists=dict(usedefault=True, ), - raise_on_empty=dict(usedefault=True, ), - sort_filelist=dict(usedefault=True, ), + force_lists=dict(usedefault=True,), + raise_on_empty=dict(usedefault=True,), + sort_filelist=dict(usedefault=True,), ) inputs = SelectFiles.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SelectFiles_outputs(): output_map = dict() outputs = SelectFiles.output_spec() diff --git a/nipype/interfaces/tests/test_auto_SignalExtraction.py b/nipype/interfaces/tests/test_auto_SignalExtraction.py index 3ca02656b1..2af759b50a 100644 --- a/nipype/interfaces/tests/test_auto_SignalExtraction.py +++ b/nipype/interfaces/tests/test_auto_SignalExtraction.py @@ -4,27 +4,23 @@ def test_SignalExtraction_inputs(): input_map = dict( - class_labels=dict(mandatory=True, ), - detrend=dict(usedefault=True, ), - in_file=dict( - extensions=None, - mandatory=True, - ), - incl_shared_variance=dict(usedefault=True, ), - include_global=dict(usedefault=True, ), - label_files=dict(mandatory=True, ), - out_file=dict( - extensions=None, - usedefault=True, - ), + class_labels=dict(mandatory=True,), + detrend=dict(usedefault=True,), + in_file=dict(extensions=None, mandatory=True,), + incl_shared_variance=dict(usedefault=True,), + include_global=dict(usedefault=True,), + label_files=dict(mandatory=True,), + out_file=dict(extensions=None, usedefault=True,), ) inputs = SignalExtraction.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SignalExtraction_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = SignalExtraction.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_SlicerCommandLine.py b/nipype/interfaces/tests/test_auto_SlicerCommandLine.py index 43ae697eec..99bd82a8f3 100644 --- a/nipype/interfaces/tests/test_auto_SlicerCommandLine.py +++ b/nipype/interfaces/tests/test_auto_SlicerCommandLine.py @@ -4,11 +4,8 @@ def test_SlicerCommandLine_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), module=dict(), ) inputs = SlicerCommandLine.input_spec() @@ -16,6 +13,8 @@ def test_SlicerCommandLine_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SlicerCommandLine_outputs(): output_map = dict() outputs = SlicerCommandLine.output_spec() diff --git a/nipype/interfaces/tests/test_auto_SplitNifti.py b/nipype/interfaces/tests/test_auto_SplitNifti.py index 06dd71feba..219f378a82 100644 --- a/nipype/interfaces/tests/test_auto_SplitNifti.py +++ b/nipype/interfaces/tests/test_auto_SplitNifti.py @@ -4,11 +4,8 @@ def test_SplitNifti_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - out_ext=dict(usedefault=True, ), + in_file=dict(extensions=None, mandatory=True,), + out_ext=dict(usedefault=True,), out_format=dict(), out_path=dict(), split_dim=dict(), @@ -18,8 +15,10 @@ def test_SplitNifti_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_SplitNifti_outputs(): - output_map = dict(out_list=dict(), ) + output_map = dict(out_list=dict(),) outputs = SplitNifti.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_XNATSink.py b/nipype/interfaces/tests/test_auto_XNATSink.py index 0206281382..d22e2a1a63 100644 --- a/nipype/interfaces/tests/test_auto_XNATSink.py +++ b/nipype/interfaces/tests/test_auto_XNATSink.py @@ -4,25 +4,17 @@ def test_XNATSink_inputs(): input_map = dict( - _outputs=dict(usedefault=True, ), - assessor_id=dict(xor=['reconstruction_id'], ), + _outputs=dict(usedefault=True,), + assessor_id=dict(xor=["reconstruction_id"],), cache_dir=dict(), - config=dict( - extensions=None, - mandatory=True, - xor=['server'], - ), - experiment_id=dict(mandatory=True, ), - project_id=dict(mandatory=True, ), + config=dict(extensions=None, mandatory=True, xor=["server"],), + experiment_id=dict(mandatory=True,), + project_id=dict(mandatory=True,), pwd=dict(), - reconstruction_id=dict(xor=['assessor_id'], ), - server=dict( - mandatory=True, - requires=['user', 'pwd'], - xor=['config'], - ), - share=dict(usedefault=True, ), - subject_id=dict(mandatory=True, ), + reconstruction_id=dict(xor=["assessor_id"],), + server=dict(mandatory=True, requires=["user", "pwd"], xor=["config"],), + share=dict(usedefault=True,), + subject_id=dict(mandatory=True,), user=dict(), ) inputs = XNATSink.input_spec() diff --git a/nipype/interfaces/tests/test_auto_XNATSource.py b/nipype/interfaces/tests/test_auto_XNATSource.py index c26553e390..a60af06b6a 100644 --- a/nipype/interfaces/tests/test_auto_XNATSource.py +++ b/nipype/interfaces/tests/test_auto_XNATSource.py @@ -5,19 +5,11 @@ def test_XNATSource_inputs(): input_map = dict( cache_dir=dict(), - config=dict( - extensions=None, - mandatory=True, - xor=['server'], - ), + config=dict(extensions=None, mandatory=True, xor=["server"],), pwd=dict(), - query_template=dict(mandatory=True, ), - query_template_args=dict(usedefault=True, ), - server=dict( - mandatory=True, - requires=['user', 'pwd'], - xor=['config'], - ), + query_template=dict(mandatory=True,), + query_template_args=dict(usedefault=True,), + server=dict(mandatory=True, requires=["user", "pwd"], xor=["config"],), user=dict(), ) inputs = XNATSource.input_spec() @@ -25,6 +17,8 @@ def test_XNATSource_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_XNATSource_outputs(): output_map = dict() outputs = XNATSource.output_spec() diff --git a/nipype/interfaces/tests/test_extra_dcm2nii.py b/nipype/interfaces/tests/test_extra_dcm2nii.py index 68fb626f75..e76f300ec6 100644 --- a/nipype/interfaces/tests/test_extra_dcm2nii.py +++ b/nipype/interfaces/tests/test_extra_dcm2nii.py @@ -3,15 +3,16 @@ import shutil from nipype.interfaces.dcm2nii import Dcm2niix + no_dcm2niix = not bool(Dcm2niix().version) no_datalad = False try: - from datalad import api # to pull and grab data + from datalad import api # to pull and grab data from datalad.support.exceptions import IncompleteResultsError except ImportError: no_datalad = True -DICOM_DIR = 'http://datasets-tests.datalad.org/dicoms/dcm2niix-tests' +DICOM_DIR = "http://datasets-tests.datalad.org/dicoms/dcm2niix-tests" @pytest.fixture @@ -25,23 +26,25 @@ def _fetch_data(datadir, dicoms): except IncompleteResultsError as exc: pytest.skip("Failed to fetch test data: %s" % str(exc)) return data + return _fetch_data + @pytest.mark.skipif(no_datalad, reason="Datalad required") @pytest.mark.skipif(no_dcm2niix, reason="Dcm2niix required") @pytest.mark.xfail(reason="Intermittent failures. Let's come back to this later.") def test_dcm2niix_dti(fetch_data, tmpdir): tmpdir.chdir() - datadir = tmpdir.mkdir('data').strpath - dicoms = fetch_data(datadir, 'Siemens_Sag_DTI_20160825_145811') + datadir = tmpdir.mkdir("data").strpath + dicoms = fetch_data(datadir, "Siemens_Sag_DTI_20160825_145811") def assert_dti(res): "Some assertions we will make" assert res.outputs.converted_files assert res.outputs.bvals assert res.outputs.bvecs - outputs = [y for x,y in res.outputs.get().items()] - if res.inputs.get('bids_format'): + outputs = [y for x, y in res.outputs.get().items()] + if res.inputs.get("bids_format"): # ensure all outputs are of equal lengths assert len(set(map(len, outputs))) == 1 else: @@ -49,11 +52,11 @@ def assert_dti(res): dcm = Dcm2niix() dcm.inputs.source_dir = dicoms - dcm.inputs.out_filename = '%u%z' + dcm.inputs.out_filename = "%u%z" assert_dti(dcm.run()) # now run specifying output directory and removing BIDS option - outdir = tmpdir.mkdir('conversion').strpath + outdir = tmpdir.mkdir("conversion").strpath dcm.inputs.output_dir = outdir dcm.inputs.bids_format = False assert_dti(dcm.run()) diff --git a/nipype/interfaces/tests/test_image.py b/nipype/interfaces/tests/test_image.py index 43030ff1b4..b05d55b547 100644 --- a/nipype/interfaces/tests/test_image.py +++ b/nipype/interfaces/tests/test_image.py @@ -9,11 +9,10 @@ from ..image import _as_reoriented_backport, _orientations from ... import LooseVersion -nibabel24 = LooseVersion(nb.__version__) >= LooseVersion('2.4.0') +nibabel24 = LooseVersion(nb.__version__) >= LooseVersion("2.4.0") -@pytest.mark.skipif(not nibabel24, - reason="Old nibabel - can't directly compare") +@pytest.mark.skipif(not nibabel24, reason="Old nibabel - can't directly compare") def test_reorientation_backport(): pixdims = ((1, 1, 1), (2, 2, 3)) data = np.random.normal(size=(17, 18, 19, 2)) @@ -28,7 +27,7 @@ def test_reorientation_backport(): # Create image img = nb.Nifti1Image(data, affine) - dim_info = {'freq': 0, 'phase': 1, 'slice': 2} + dim_info = {"freq": 0, "phase": 1, "slice": 2} img.header.set_dim_info(**dim_info) # Find a random, non-identity transform @@ -51,14 +50,15 @@ def test_reorientation_backport(): # Reorientation changes affine and data array assert not np.allclose(img.affine, reoriented_a.affine) - assert not (flips_only and - np.allclose(img.get_data(), reoriented_a.get_data())) + assert not (flips_only and np.allclose(img.get_data(), reoriented_a.get_data())) # Dimension info changes iff axes are reordered - assert flips_only == np.array_equal(img.header.get_dim_info(), - reoriented_a.header.get_dim_info()) + assert flips_only == np.array_equal( + img.header.get_dim_info(), reoriented_a.header.get_dim_info() + ) # Both approaches produce equivalent images assert np.allclose(reoriented_a.affine, reoriented_b.affine) assert np.array_equal(reoriented_a.get_data(), reoriented_b.get_data()) - assert np.array_equal(reoriented_a.header.get_dim_info(), - reoriented_b.header.get_dim_info()) + assert np.array_equal( + reoriented_a.header.get_dim_info(), reoriented_b.header.get_dim_info() + ) diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index 1b718f0533..ef210de030 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -36,6 +36,7 @@ # Check for paramiko try: import paramiko + no_paramiko = False # Check for localhost SSH Server @@ -45,14 +46,15 @@ client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - client.connect('127.0.0.1', username=os.getenv('USER'), sock=proxy, - timeout=10) + client.connect("127.0.0.1", username=os.getenv("USER"), sock=proxy, timeout=10) no_local_ssh = False - except (paramiko.SSHException, - paramiko.ssh_exception.NoValidConnectionsError, - OSError): + except ( + paramiko.SSHException, + paramiko.ssh_exception.NoValidConnectionsError, + OSError, + ): no_local_ssh = True except ImportError: @@ -61,9 +63,10 @@ # Check for fakes3 from subprocess import check_call, CalledProcessError + try: - ret_code = check_call(['which', 'fakes3'], stdout=open(os.devnull, 'wb')) - fakes3 = (ret_code == 0) + ret_code = check_call(["which", "fakes3"], stdout=open(os.devnull, "wb")) + fakes3 = ret_code == 0 except CalledProcessError: fakes3 = False @@ -71,8 +74,9 @@ have_pybids = True try: import bids + filepath = os.path.realpath(os.path.dirname(bids.__file__)) - datadir = os.path.realpath(os.path.join(filepath, 'tests/data/')) + datadir = os.path.realpath(os.path.join(filepath, "tests/data/")) except ImportError: have_pybids = False @@ -81,7 +85,7 @@ def test_datagrabber(): dg = nio.DataGrabber() assert dg.inputs.template == Undefined assert dg.inputs.base_directory == Undefined - assert dg.inputs.template_args == {'outfiles': []} + assert dg.inputs.template_args == {"outfiles": []} @pytest.mark.skipif(noboto, reason="boto library is not available") @@ -89,97 +93,103 @@ def test_s3datagrabber(): dg = nio.S3DataGrabber() assert dg.inputs.template == Undefined assert dg.inputs.local_directory == Undefined - assert dg.inputs.template_args == {'outfiles': []} + assert dg.inputs.template_args == {"outfiles": []} templates1 = { "model": "interfaces/{package}/model.py", - "preprocess": "interfaces/{package}/pre*.py" + "preprocess": "interfaces/{package}/pre*.py", } templates2 = {"converter": "interfaces/dcm{to!s}nii.py"} templates3 = {"model": "interfaces/{package.name}/model.py"} -@pytest.mark.parametrize("SF_args, inputs_att, expected", [ - ({ - "templates": templates1 - }, { - "package": "fsl" - }, { - "infields": ["package"], - "outfields": ["model", "preprocess"], - "run_output": { - "model": - op.join(op.dirname(nipype.__file__), "interfaces/fsl/model.py"), - "preprocess": - op.join( - op.dirname(nipype.__file__), "interfaces/fsl/preprocess.py") - }, - "node_output": ["model", "preprocess"] - }), - ({ - "templates": templates1, - "force_lists": True - }, { - "package": "spm" - }, { - "infields": ["package"], - "outfields": ["model", "preprocess"], - "run_output": { - "model": - [op.join(op.dirname(nipype.__file__), "interfaces/spm/model.py")], - "preprocess": [ - op.join( - op.dirname(nipype.__file__), - "interfaces/spm/preprocess.py") - ] - }, - "node_output": ["model", "preprocess"] - }), - ({ - "templates": templates1 - }, { - "package": "fsl", - "force_lists": ["model"] - }, { - "infields": ["package"], - "outfields": ["model", "preprocess"], - "run_output": { - "model": - [op.join(op.dirname(nipype.__file__), "interfaces/fsl/model.py")], - "preprocess": - op.join( - op.dirname(nipype.__file__), "interfaces/fsl/preprocess.py") - }, - "node_output": ["model", "preprocess"] - }), - ({ - "templates": templates2 - }, { - "to": 2 - }, { - "infields": ["to"], - "outfields": ["converter"], - "run_output": { - "converter": - op.join(op.dirname(nipype.__file__), "interfaces/dcm2nii.py") - }, - "node_output": ["converter"] - }), - ({ - "templates": templates3 - }, { - "package": namedtuple("package", ["name"])("fsl") - }, { - "infields": ["package"], - "outfields": ["model"], - "run_output": { - "model": - op.join(op.dirname(nipype.__file__), "interfaces/fsl/model.py") - }, - "node_output": ["model"] - }), -]) +@pytest.mark.parametrize( + "SF_args, inputs_att, expected", + [ + ( + {"templates": templates1}, + {"package": "fsl"}, + { + "infields": ["package"], + "outfields": ["model", "preprocess"], + "run_output": { + "model": op.join( + op.dirname(nipype.__file__), "interfaces/fsl/model.py" + ), + "preprocess": op.join( + op.dirname(nipype.__file__), "interfaces/fsl/preprocess.py" + ), + }, + "node_output": ["model", "preprocess"], + }, + ), + ( + {"templates": templates1, "force_lists": True}, + {"package": "spm"}, + { + "infields": ["package"], + "outfields": ["model", "preprocess"], + "run_output": { + "model": [ + op.join(op.dirname(nipype.__file__), "interfaces/spm/model.py") + ], + "preprocess": [ + op.join( + op.dirname(nipype.__file__), "interfaces/spm/preprocess.py" + ) + ], + }, + "node_output": ["model", "preprocess"], + }, + ), + ( + {"templates": templates1}, + {"package": "fsl", "force_lists": ["model"]}, + { + "infields": ["package"], + "outfields": ["model", "preprocess"], + "run_output": { + "model": [ + op.join(op.dirname(nipype.__file__), "interfaces/fsl/model.py") + ], + "preprocess": op.join( + op.dirname(nipype.__file__), "interfaces/fsl/preprocess.py" + ), + }, + "node_output": ["model", "preprocess"], + }, + ), + ( + {"templates": templates2}, + {"to": 2}, + { + "infields": ["to"], + "outfields": ["converter"], + "run_output": { + "converter": op.join( + op.dirname(nipype.__file__), "interfaces/dcm2nii.py" + ) + }, + "node_output": ["converter"], + }, + ), + ( + {"templates": templates3}, + {"package": namedtuple("package", ["name"])("fsl")}, + { + "infields": ["package"], + "outfields": ["model"], + "run_output": { + "model": op.join( + op.dirname(nipype.__file__), "interfaces/fsl/model.py" + ) + }, + "node_output": ["model"], + }, + ), + ], +) def test_selectfiles(tmpdir, SF_args, inputs_att, expected): tmpdir.chdir() base_dir = op.dirname(nipype.__file__) @@ -201,11 +211,10 @@ def test_selectfiles_valueerror(): base_dir = op.dirname(nipype.__file__) templates = { "model": "interfaces/{package}/model.py", - "preprocess": "interfaces/{package}/pre*.py" + "preprocess": "interfaces/{package}/pre*.py", } force_lists = ["model", "preprocess", "registration"] - sf = nio.SelectFiles( - templates, base_directory=base_dir, force_lists=force_lists) + sf = nio.SelectFiles(templates, base_directory=base_dir, force_lists=force_lists) with pytest.raises(ValueError): sf.run() @@ -213,67 +222,82 @@ def test_selectfiles_valueerror(): @pytest.mark.skipif(noboto, reason="boto library is not available") def test_s3datagrabber_communication(tmpdir): dg = nio.S3DataGrabber( - infields=['subj_id', 'run_num'], outfields=['func', 'struct']) + infields=["subj_id", "run_num"], outfields=["func", "struct"] + ) dg.inputs.anon = True - dg.inputs.bucket = 'openfmri' - dg.inputs.bucket_path = 'ds001/' + dg.inputs.bucket = "openfmri" + dg.inputs.bucket_path = "ds001/" dg.inputs.local_directory = tmpdir.strpath dg.inputs.sort_filelist = True - dg.inputs.template = '*' + dg.inputs.template = "*" dg.inputs.field_template = dict( - func='%s/BOLD/task001_%s/bold.nii.gz', - struct='%s/anatomy/highres001_brain.nii.gz') - dg.inputs.subj_id = ['sub001', 'sub002'] - dg.inputs.run_num = ['run001', 'run003'] - dg.inputs.template_args = dict( - func=[['subj_id', 'run_num']], struct=[['subj_id']]) + func="%s/BOLD/task001_%s/bold.nii.gz", + struct="%s/anatomy/highres001_brain.nii.gz", + ) + dg.inputs.subj_id = ["sub001", "sub002"] + dg.inputs.run_num = ["run001", "run003"] + dg.inputs.template_args = dict(func=[["subj_id", "run_num"]], struct=[["subj_id"]]) res = dg.run() func_outfiles = res.outputs.func struct_outfiles = res.outputs.struct # check for all files - assert os.path.join( - dg.inputs.local_directory, - '/sub001/BOLD/task001_run001/bold.nii.gz') in func_outfiles[0] + assert ( + os.path.join( + dg.inputs.local_directory, "/sub001/BOLD/task001_run001/bold.nii.gz" + ) + in func_outfiles[0] + ) assert os.path.exists(func_outfiles[0]) - assert os.path.join( - dg.inputs.local_directory, - '/sub001/anatomy/highres001_brain.nii.gz') in struct_outfiles[0] + assert ( + os.path.join( + dg.inputs.local_directory, "/sub001/anatomy/highres001_brain.nii.gz" + ) + in struct_outfiles[0] + ) assert os.path.exists(struct_outfiles[0]) - assert os.path.join( - dg.inputs.local_directory, - '/sub002/BOLD/task001_run003/bold.nii.gz') in func_outfiles[1] + assert ( + os.path.join( + dg.inputs.local_directory, "/sub002/BOLD/task001_run003/bold.nii.gz" + ) + in func_outfiles[1] + ) assert os.path.exists(func_outfiles[1]) - assert os.path.join( - dg.inputs.local_directory, - '/sub002/anatomy/highres001_brain.nii.gz') in struct_outfiles[1] + assert ( + os.path.join( + dg.inputs.local_directory, "/sub002/anatomy/highres001_brain.nii.gz" + ) + in struct_outfiles[1] + ) assert os.path.exists(struct_outfiles[1]) def test_datagrabber_order(tmpdir): for file_name in [ - 'sub002_L1_R1.q', 'sub002_L1_R2.q', 'sub002_L2_R1.q', - 'sub002_L2_R2.qd', 'sub002_L3_R10.q', 'sub002_L3_R2.q' + "sub002_L1_R1.q", + "sub002_L1_R2.q", + "sub002_L2_R1.q", + "sub002_L2_R2.qd", + "sub002_L3_R10.q", + "sub002_L3_R2.q", ]: - tmpdir.join(file_name).open('a').close() + tmpdir.join(file_name).open("a").close() - dg = nio.DataGrabber(infields=['sid']) + dg = nio.DataGrabber(infields=["sid"]) dg.inputs.base_directory = tmpdir.strpath - dg.inputs.template = '%s_L%d_R*.q*' - dg.inputs.template_args = { - 'outfiles': [['sid', 1], ['sid', 2], ['sid', 3]] - } - dg.inputs.sid = 'sub002' + dg.inputs.template = "%s_L%d_R*.q*" + dg.inputs.template_args = {"outfiles": [["sid", 1], ["sid", 2], ["sid", 3]]} + dg.inputs.sid = "sub002" dg.inputs.sort_filelist = True res = dg.run() outfiles = res.outputs.outfiles - assert 'sub002_L1_R1' in outfiles[0][0] - assert 'sub002_L1_R2' in outfiles[0][1] - assert 'sub002_L2_R1' in outfiles[1][0] - assert 'sub002_L2_R2' in outfiles[1][1] - assert 'sub002_L3_R2' in outfiles[2][0] - assert 'sub002_L3_R10' in outfiles[2][1] + assert "sub002_L1_R1" in outfiles[0][0] + assert "sub002_L1_R2" in outfiles[0][1] + assert "sub002_L2_R1" in outfiles[1][0] + assert "sub002_L2_R2" in outfiles[1][1] + assert "sub002_L3_R2" in outfiles[2][0] + assert "sub002_L3_R10" in outfiles[2][1] def test_datasink(): @@ -283,26 +307,25 @@ def test_datasink(): assert ds.inputs.strip_dir == Undefined assert ds.inputs._outputs == {} - ds = nio.DataSink(base_directory='foo') - assert ds.inputs.base_directory == 'foo' + ds = nio.DataSink(base_directory="foo") + assert ds.inputs.base_directory == "foo" - ds = nio.DataSink(infields=['test']) - assert 'test' in ds.inputs.copyable_trait_names() + ds = nio.DataSink(infields=["test"]) + assert "test" in ds.inputs.copyable_trait_names() # Make dummy input file @pytest.fixture(scope="module") def dummy_input(request, tmpdir_factory): - ''' + """ Function to create a dummy file - ''' + """ # Init variables - input_path = tmpdir_factory.mktemp('input_data').join( - 'datasink_test_s3.txt') + input_path = tmpdir_factory.mktemp("input_data").join("datasink_test_s3.txt") # Create input file - input_path.write_binary(b'ABCD1234') + input_path.write_binary(b"ABCD1234") # Return path return str(input_path) @@ -310,35 +333,37 @@ def dummy_input(request, tmpdir_factory): # Test datasink writes to s3 properly @pytest.mark.skipif( - noboto3 or not fakes3, reason="boto3 or fakes3 library is not available") + noboto3 or not fakes3, reason="boto3 or fakes3 library is not available" +) def test_datasink_to_s3(dummy_input, tmpdir): - ''' + """ This function tests to see if the S3 functionality of a DataSink works properly - ''' + """ # Init variables ds = nio.DataSink() - bucket_name = 'test' - container = 'outputs' - attr_folder = 'text_file' - output_dir = 's3://' + bucket_name + bucket_name = "test" + container = "outputs" + attr_folder = "text_file" + output_dir = "s3://" + bucket_name # Local temporary filepaths for testing fakes3_dir = tmpdir.strpath input_path = dummy_input # Start up fake-S3 server proc = Popen( - ['fakes3', '-r', fakes3_dir, '-p', '4567'], - stdout=open(os.devnull, 'wb')) + ["fakes3", "-r", fakes3_dir, "-p", "4567"], stdout=open(os.devnull, "wb") + ) # Init boto3 s3 resource to talk with fakes3 resource = boto3.resource( - aws_access_key_id='mykey', - aws_secret_access_key='mysecret', - service_name='s3', - endpoint_url='http://127.0.0.1:4567', - use_ssl=False) - resource.meta.client.meta.events.unregister('before-sign.s3', fix_s3_host) + aws_access_key_id="mykey", + aws_secret_access_key="mysecret", + service_name="s3", + endpoint_url="http://127.0.0.1:4567", + use_ssl=False, + ) + resource.meta.client.meta.events.unregister("before-sign.s3", fix_s3_host) # Create bucket bucket = resource.create_bucket(Bucket=bucket_name) @@ -353,10 +378,10 @@ def test_datasink_to_s3(dummy_input, tmpdir): ds.run() # Get MD5sums and compare - key = '/'.join([container, attr_folder, os.path.basename(input_path)]) + key = "/".join([container, attr_folder, os.path.basename(input_path)]) obj = bucket.Object(key=key) - dst_md5 = obj.e_tag.replace('"', '') - src_md5 = hashlib.md5(open(input_path, 'rb').read()).hexdigest() + dst_md5 = obj.e_tag.replace('"', "") + src_md5 = hashlib.md5(open(input_path, "rb").read()).hexdigest() # Kill fakes3 proc.kill() @@ -367,21 +392,22 @@ def test_datasink_to_s3(dummy_input, tmpdir): # Test AWS creds read from env vars @pytest.mark.skipif( - noboto3 or not fakes3, reason="boto3 or fakes3 library is not available") + noboto3 or not fakes3, reason="boto3 or fakes3 library is not available" +) def test_aws_keys_from_env(): - ''' + """ Function to ensure the DataSink can successfully read in AWS credentials from the environment variables - ''' + """ # Init variables ds = nio.DataSink() - aws_access_key_id = 'ABCDACCESS' - aws_secret_access_key = 'DEFGSECRET' + aws_access_key_id = "ABCDACCESS" + aws_secret_access_key = "DEFGSECRET" # Set env vars - os.environ['AWS_ACCESS_KEY_ID'] = aws_access_key_id - os.environ['AWS_SECRET_ACCESS_KEY'] = aws_secret_access_key + os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id + os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key # Call function to return creds access_key_test, secret_key_test = ds._return_aws_keys() @@ -393,15 +419,15 @@ def test_aws_keys_from_env(): # Test the local copy attribute def test_datasink_localcopy(dummy_input, tmpdir): - ''' + """ Function to validate DataSink will make local copy via local_copy attribute - ''' + """ # Init variables local_dir = tmpdir.strpath - container = 'outputs' - attr_folder = 'text_file' + container = "outputs" + attr_folder = "text_file" # Make dummy input file and datasink input_path = dummy_input @@ -415,44 +441,50 @@ def test_datasink_localcopy(dummy_input, tmpdir): setattr(ds.inputs, attr_folder, input_path) # Expected local copy path - local_copy = os.path.join(local_dir, container, attr_folder, - os.path.basename(input_path)) + local_copy = os.path.join( + local_dir, container, attr_folder, os.path.basename(input_path) + ) # Run the datasink ds.run() # Check md5sums of both - src_md5 = hashlib.md5(open(input_path, 'rb').read()).hexdigest() - dst_md5 = hashlib.md5(open(local_copy, 'rb').read()).hexdigest() + src_md5 = hashlib.md5(open(input_path, "rb").read()).hexdigest() + dst_md5 = hashlib.md5(open(local_copy, "rb").read()).hexdigest() # Perform test assert src_md5 == dst_md5 def test_datasink_substitutions(tmpdir): - indir = tmpdir.mkdir('-Tmp-nipype_ds_subs_in') - outdir = tmpdir.mkdir('-Tmp-nipype_ds_subs_out') + indir = tmpdir.mkdir("-Tmp-nipype_ds_subs_in") + outdir = tmpdir.mkdir("-Tmp-nipype_ds_subs_out") files = [] - for n in ['ababab.n', 'xabababyz.n']: + for n in ["ababab.n", "xabababyz.n"]: f = str(indir.join(n)) files.append(f) - open(f, 'w') + open(f, "w") ds = nio.DataSink( parametrization=False, base_directory=str(outdir), - substitutions=[('ababab', 'ABABAB')], + substitutions=[("ababab", "ABABAB")], # end archoring ($) is used to assure operation on the filename # instead of possible temporary directories names matches # Patterns should be more comprehendable in the real-world usage # cases since paths would be quite more sensible - regexp_substitutions=[(r'xABABAB(\w*)\.n$', r'a-\1-b.n'), - ('(.*%s)[-a]([^%s]*)$' % ((os.path.sep, ) * 2), - r'\1!\2')]) - setattr(ds.inputs, '@outdir', files) + regexp_substitutions=[ + (r"xABABAB(\w*)\.n$", r"a-\1-b.n"), + ("(.*%s)[-a]([^%s]*)$" % ((os.path.sep,) * 2), r"\1!\2"), + ], + ) + setattr(ds.inputs, "@outdir", files) ds.run() - assert sorted([os.path.basename(x) for - x in glob.glob(os.path.join(str(outdir), '*'))]) \ - == ['!-yz-b.n', 'ABABAB.n'] # so we got re used 2nd and both patterns + assert sorted( + [os.path.basename(x) for x in glob.glob(os.path.join(str(outdir), "*"))] + ) == [ + "!-yz-b.n", + "ABABAB.n", + ] # so we got re used 2nd and both patterns @pytest.fixture() @@ -461,8 +493,8 @@ def _temp_analyze_files(tmpdir): img_dir = tmpdir.mkdir("img") orig_img = img_dir.join("orig.img") orig_hdr = img_dir.join("orig.hdr") - orig_img.open('w') - orig_hdr.open('w') + orig_img.open("w") + orig_hdr.open("w") return orig_img.strpath, orig_hdr.strpath @@ -471,40 +503,40 @@ def test_datasink_copydir_1(_temp_analyze_files, tmpdir): outdir = tmpdir pth, fname = os.path.split(orig_img) ds = nio.DataSink( - base_directory=outdir.mkdir("basedir").strpath, parameterization=False) - setattr(ds.inputs, '@outdir', pth) + base_directory=outdir.mkdir("basedir").strpath, parameterization=False + ) + setattr(ds.inputs, "@outdir", pth) ds.run() sep = os.path.sep - assert tmpdir.join('basedir', pth.split(sep)[-1], fname).check() + assert tmpdir.join("basedir", pth.split(sep)[-1], fname).check() def test_datasink_copydir_2(_temp_analyze_files, tmpdir): orig_img, orig_hdr = _temp_analyze_files pth, fname = os.path.split(orig_img) ds = nio.DataSink( - base_directory=tmpdir.mkdir("basedir").strpath, parameterization=False) + base_directory=tmpdir.mkdir("basedir").strpath, parameterization=False + ) ds.inputs.remove_dest_dir = True - setattr(ds.inputs, 'outdir', pth) + setattr(ds.inputs, "outdir", pth) ds.run() sep = os.path.sep - assert not tmpdir.join('basedir', pth.split(sep)[-1], fname).check() - assert tmpdir.join('basedir', 'outdir', pth.split(sep)[-1], fname).check() + assert not tmpdir.join("basedir", pth.split(sep)[-1], fname).check() + assert tmpdir.join("basedir", "outdir", pth.split(sep)[-1], fname).check() def test_datafinder_depth(tmpdir): outdir = tmpdir.strpath - os.makedirs(os.path.join(outdir, '0', '1', '2', '3')) + os.makedirs(os.path.join(outdir, "0", "1", "2", "3")) df = nio.DataFinder() - df.inputs.root_paths = os.path.join(outdir, '0') + df.inputs.root_paths = os.path.join(outdir, "0") for min_depth in range(4): for max_depth in range(min_depth, 4): df.inputs.min_depth = min_depth df.inputs.max_depth = max_depth result = df.run() - expected = [ - '{}'.format(x) for x in range(min_depth, max_depth + 1) - ] + expected = ["{}".format(x) for x in range(min_depth, max_depth + 1)] for path, exp_fname in zip(result.outputs.out_paths, expected): _, fname = os.path.split(path) assert fname == exp_fname @@ -513,12 +545,12 @@ def test_datafinder_depth(tmpdir): def test_datafinder_unpack(tmpdir): outdir = tmpdir.strpath single_res = os.path.join(outdir, "findme.txt") - open(single_res, 'a').close() - open(os.path.join(outdir, "dontfindme"), 'a').close() + open(single_res, "a").close() + open(os.path.join(outdir, "dontfindme"), "a").close() df = nio.DataFinder() df.inputs.root_paths = outdir - df.inputs.match_regex = r'.+/(?P.+)\.txt' + df.inputs.match_regex = r".+/(?P.+)\.txt" df.inputs.unpack_single = True result = df.run() print(result.outputs.out_paths) @@ -527,7 +559,7 @@ def test_datafinder_unpack(tmpdir): def test_freesurfersource(): fss = nio.FreeSurferSource() - assert fss.inputs.hemi == 'both' + assert fss.inputs.hemi == "both" assert fss.inputs.subject_id == Undefined assert fss.inputs.subjects_dir == Undefined @@ -535,7 +567,7 @@ def test_freesurfersource(): def test_freesurfersource_incorrectdir(): fss = nio.FreeSurferSource() with pytest.raises(TraitError) as err: - fss.inputs.subjects_dir = 'path/to/no/existing/directory' + fss.inputs.subjects_dir = "path/to/no/existing/directory" def test_jsonsink_input(): @@ -543,87 +575,86 @@ def test_jsonsink_input(): ds = nio.JSONFileSink() assert ds.inputs._outputs == {} - ds = nio.JSONFileSink(in_dict={'foo': 'var'}) - assert ds.inputs.in_dict == {'foo': 'var'} + ds = nio.JSONFileSink(in_dict={"foo": "var"}) + assert ds.inputs.in_dict == {"foo": "var"} - ds = nio.JSONFileSink(infields=['test']) - assert 'test' in ds.inputs.copyable_trait_names() + ds = nio.JSONFileSink(infields=["test"]) + assert "test" in ds.inputs.copyable_trait_names() -@pytest.mark.parametrize("inputs_attributes", [{ - 'new_entry': 'someValue' -}, { - 'new_entry': 'someValue', - 'test': 'testInfields' -}]) +@pytest.mark.parametrize( + "inputs_attributes", + [{"new_entry": "someValue"}, {"new_entry": "someValue", "test": "testInfields"}], +) def test_jsonsink(tmpdir, inputs_attributes): tmpdir.chdir() - js = nio.JSONFileSink(infields=['test'], in_dict={'foo': 'var'}) - setattr(js.inputs, 'contrasts.alt', 'someNestedValue') + js = nio.JSONFileSink(infields=["test"], in_dict={"foo": "var"}) + setattr(js.inputs, "contrasts.alt", "someNestedValue") expected_data = {"contrasts": {"alt": "someNestedValue"}, "foo": "var"} for key, val in inputs_attributes.items(): setattr(js.inputs, key, val) expected_data[key] = val res = js.run() - with open(res.outputs.out_file, 'r') as f: + with open(res.outputs.out_file, "r") as f: data = simplejson.load(f) assert data == expected_data # There are three reasons these tests will be skipped: -@pytest.mark.skipif(not have_pybids, - reason="Pybids is not installed") -@pytest.mark.skipif(not dist_is_editable('pybids'), - reason="Pybids is not installed in editable mode") +@pytest.mark.skipif(not have_pybids, reason="Pybids is not installed") +@pytest.mark.skipif( + not dist_is_editable("pybids"), reason="Pybids is not installed in editable mode" +) def test_bids_grabber(tmpdir): tmpdir.chdir() bg = nio.BIDSDataGrabber() - bg.inputs.base_dir = os.path.join(datadir, 'ds005') - bg.inputs.subject = '01' + bg.inputs.base_dir = os.path.join(datadir, "ds005") + bg.inputs.subject = "01" results = bg.run() - assert 'sub-01_T1w.nii.gz' in map(os.path.basename, results.outputs.T1w) - assert 'sub-01_task-mixedgamblestask_run-01_bold.nii.gz' in \ - map(os.path.basename, results.outputs.bold) + assert "sub-01_T1w.nii.gz" in map(os.path.basename, results.outputs.T1w) + assert "sub-01_task-mixedgamblestask_run-01_bold.nii.gz" in map( + os.path.basename, results.outputs.bold + ) -@pytest.mark.skipif(not have_pybids, - reason="Pybids is not installed") -@pytest.mark.skipif(not dist_is_editable('pybids'), - reason="Pybids is not installed in editable mode") +@pytest.mark.skipif(not have_pybids, reason="Pybids is not installed") +@pytest.mark.skipif( + not dist_is_editable("pybids"), reason="Pybids is not installed in editable mode" +) def test_bids_fields(tmpdir): tmpdir.chdir() - bg = nio.BIDSDataGrabber(infields = ['subject'], outfields = ['dwi']) - bg.inputs.base_dir = os.path.join(datadir, 'ds005') - bg.inputs.subject = '01' - bg.inputs.output_query['dwi'] = dict(datatype='dwi') + bg = nio.BIDSDataGrabber(infields=["subject"], outfields=["dwi"]) + bg.inputs.base_dir = os.path.join(datadir, "ds005") + bg.inputs.subject = "01" + bg.inputs.output_query["dwi"] = dict(datatype="dwi") results = bg.run() - assert 'sub-01_dwi.nii.gz' in map(os.path.basename, results.outputs.dwi) + assert "sub-01_dwi.nii.gz" in map(os.path.basename, results.outputs.dwi) -@pytest.mark.skipif(not have_pybids, - reason="Pybids is not installed") -@pytest.mark.skipif(not dist_is_editable('pybids'), - reason="Pybids is not installed in editable mode") +@pytest.mark.skipif(not have_pybids, reason="Pybids is not installed") +@pytest.mark.skipif( + not dist_is_editable("pybids"), reason="Pybids is not installed in editable mode" +) def test_bids_infields_outfields(tmpdir): tmpdir.chdir() - infields = ['infield1', 'infield2'] - outfields = ['outfield1', 'outfield2'] + infields = ["infield1", "infield2"] + outfields = ["outfield1", "outfield2"] bg = nio.BIDSDataGrabber(infields=infields) for outfield in outfields: - bg.inputs.output_query[outfield] = {'key': 'value'} + bg.inputs.output_query[outfield] = {"key": "value"} for infield in infields: - assert(infield in bg.inputs.traits()) - assert(not(isdefined(bg.inputs.get()[infield]))) + assert infield in bg.inputs.traits() + assert not (isdefined(bg.inputs.get()[infield])) for outfield in outfields: - assert(outfield in bg._outputs().traits()) + assert outfield in bg._outputs().traits() # now try without defining outfields bg = nio.BIDSDataGrabber() - for outfield in ['T1w', 'bold']: + for outfield in ["T1w", "bold"]: assert outfield in bg._outputs().traits() @@ -634,11 +665,11 @@ def test_SSHDataGrabber(tmpdir): """ old_cwd = tmpdir.chdir() - source_dir = tmpdir.mkdir('source') - source_hdr = source_dir.join('somedata.hdr') - source_dat = source_dir.join('somedata.img') - source_hdr.ensure() # create - source_dat.ensure() # create + source_dir = tmpdir.mkdir("source") + source_hdr = source_dir.join("somedata.hdr") + source_dat = source_dir.join("somedata.img") + source_hdr.ensure() # create + source_dat.ensure() # create # ssh client that connects to localhost, current user, regardless of # ~/.ssh/config @@ -647,21 +678,20 @@ def _mock_get_ssh_client(self): client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - client.connect('127.0.0.1', username=os.getenv('USER'), sock=proxy, - timeout=10) + client.connect("127.0.0.1", username=os.getenv("USER"), sock=proxy, timeout=10) return client + MockSSHDataGrabber = copy.copy(nio.SSHDataGrabber) MockSSHDataGrabber._get_ssh_client = _mock_get_ssh_client # grabber to get files from source_dir matching test.hdr - ssh_grabber = MockSSHDataGrabber(infields=['test'], - outfields=['test_file']) + ssh_grabber = MockSSHDataGrabber(infields=["test"], outfields=["test_file"]) ssh_grabber.inputs.base_directory = str(source_dir) - ssh_grabber.inputs.hostname = '127.0.0.1' - ssh_grabber.inputs.field_template = dict(test_file='%s.hdr') - ssh_grabber.inputs.template = '' - ssh_grabber.inputs.template_args = dict(test_file=[['test']]) - ssh_grabber.inputs.test = 'somedata' + ssh_grabber.inputs.hostname = "127.0.0.1" + ssh_grabber.inputs.field_template = dict(test_file="%s.hdr") + ssh_grabber.inputs.template = "" + ssh_grabber.inputs.template_args = dict(test_file=[["test"]]) + ssh_grabber.inputs.test = "somedata" ssh_grabber.inputs.sort_filelist = True runtime = ssh_grabber.run() @@ -669,31 +699,33 @@ def _mock_get_ssh_client(self): # did we successfully get the header? assert runtime.outputs.test_file == str(tmpdir.join(source_hdr.basename)) # did we successfully get the data? - assert (tmpdir.join(source_hdr.basename) # header file - .new(ext='.img') # data file - .check(file=True, exists=True)) # exists? + assert ( + tmpdir.join(source_hdr.basename) # header file + .new(ext=".img") # data file + .check(file=True, exists=True) + ) # exists? old_cwd.chdir() def test_ExportFile(tmp_path): - testin = tmp_path / 'in.txt' - testin.write_text('test string') + testin = tmp_path / "in.txt" + testin.write_text("test string") i = nio.ExportFile() i.inputs.in_file = str(testin) - i.inputs.out_file = str(tmp_path / 'out.tsv') + i.inputs.out_file = str(tmp_path / "out.tsv") i.inputs.check_extension = True with pytest.raises(RuntimeError): i.run() i.inputs.check_extension = False i.run() - assert (tmp_path / 'out.tsv').read_text() == 'test string' - i.inputs.out_file = str(tmp_path / 'out.txt') + assert (tmp_path / "out.tsv").read_text() == "test string" + i.inputs.out_file = str(tmp_path / "out.txt") i.inputs.check_extension = True i.run() - assert (tmp_path / 'out.txt').read_text() == 'test string' + assert (tmp_path / "out.txt").read_text() == "test string" with pytest.raises(FileExistsError): i.run() i.inputs.clobber = True i.run() - assert (tmp_path / 'out.txt').read_text() == 'test string' + assert (tmp_path / "out.txt").read_text() == "test string" diff --git a/nipype/interfaces/tests/test_matlab.py b/nipype/interfaces/tests/test_matlab.py index 2576a379e7..64f1de846f 100644 --- a/nipype/interfaces/tests/test_matlab.py +++ b/nipype/interfaces/tests/test_matlab.py @@ -26,24 +26,24 @@ def clean_workspace_and_get_default_script_file(): def test_cmdline(): default_script_file = clean_workspace_and_get_default_script_file() - mi = mlab.MatlabCommand( - script='whos', script_file='testscript', mfile=False) - - assert mi.cmdline == \ - matlab_cmd + (' -nodesktop -nosplash -singleCompThread -r "fprintf(1,' - '\'Executing code at %s:\\n\',datestr(now));ver,try,' - 'whos,catch ME,fprintf(2,\'MATLAB code threw an ' - 'exception:\\n\');fprintf(2,\'%s\\n\',ME.message);if ' - 'length(ME.stack) ~= 0, fprintf(2,\'File:%s\\nName:%s\\n' - 'Line:%d\\n\',ME.stack.file,ME.stack.name,' - 'ME.stack.line);, end;end;;exit"') - - assert mi.inputs.script == 'whos' - assert mi.inputs.script_file == 'testscript' + mi = mlab.MatlabCommand(script="whos", script_file="testscript", mfile=False) + + assert mi.cmdline == matlab_cmd + ( + ' -nodesktop -nosplash -singleCompThread -r "fprintf(1,' + "'Executing code at %s:\\n',datestr(now));ver,try," + "whos,catch ME,fprintf(2,'MATLAB code threw an " + "exception:\\n');fprintf(2,'%s\\n',ME.message);if " + "length(ME.stack) ~= 0, fprintf(2,'File:%s\\nName:%s\\n" + "Line:%d\\n',ME.stack.file,ME.stack.name," + 'ME.stack.line);, end;end;;exit"' + ) + + assert mi.inputs.script == "whos" + assert mi.inputs.script_file == "testscript" + assert not os.path.exists(mi.inputs.script_file), "scriptfile should not exist" assert not os.path.exists( - mi.inputs.script_file), 'scriptfile should not exist' - assert not os.path.exists( - default_script_file), 'default scriptfile should not exist.' + default_script_file + ), "default scriptfile should not exist." @pytest.mark.skipif(no_matlab, reason="matlab is not available") @@ -51,8 +51,13 @@ def test_mlab_inputspec(): default_script_file = clean_workspace_and_get_default_script_file() spec = mlab.MatlabInputSpec() for k in [ - 'paths', 'script', 'nosplash', 'mfile', 'logfile', 'script_file', - 'nodesktop' + "paths", + "script", + "nosplash", + "mfile", + "logfile", + "script_file", + "nodesktop", ]: assert k in spec.copyable_trait_names() assert spec.nodesktop @@ -65,54 +70,49 @@ def test_mlab_inputspec(): def test_mlab_init(): default_script_file = clean_workspace_and_get_default_script_file() - assert mlab.MatlabCommand._cmd == 'matlab' + assert mlab.MatlabCommand._cmd == "matlab" assert mlab.MatlabCommand.input_spec == mlab.MatlabInputSpec assert mlab.MatlabCommand().cmd == matlab_cmd - mc = mlab.MatlabCommand(matlab_cmd='foo_m') - assert mc.cmd == 'foo_m' + mc = mlab.MatlabCommand(matlab_cmd="foo_m") + assert mc.cmd == "foo_m" @pytest.mark.skipif(no_matlab, reason="matlab is not available") def test_run_interface(tmpdir): default_script_file = clean_workspace_and_get_default_script_file() - mc = mlab.MatlabCommand(matlab_cmd='foo_m') - assert not os.path.exists( - default_script_file), 'scriptfile should not exist 1.' + mc = mlab.MatlabCommand(matlab_cmd="foo_m") + assert not os.path.exists(default_script_file), "scriptfile should not exist 1." with pytest.raises(ValueError): mc.run() # script is mandatory - assert not os.path.exists( - default_script_file), 'scriptfile should not exist 2.' + assert not os.path.exists(default_script_file), "scriptfile should not exist 2." if os.path.exists(default_script_file): # cleanup os.remove(default_script_file) - mc.inputs.script = 'a=1;' - assert not os.path.exists( - default_script_file), 'scriptfile should not exist 3.' + mc.inputs.script = "a=1;" + assert not os.path.exists(default_script_file), "scriptfile should not exist 3." with pytest.raises(IOError): mc.run() # foo_m is not an executable - assert os.path.exists(default_script_file), 'scriptfile should exist 3.' + assert os.path.exists(default_script_file), "scriptfile should exist 3." if os.path.exists(default_script_file): # cleanup os.remove(default_script_file) cwd = tmpdir.chdir() # bypasses ubuntu dash issue - mc = mlab.MatlabCommand(script='foo;', paths=[tmpdir.strpath], mfile=True) - assert not os.path.exists( - default_script_file), 'scriptfile should not exist 4.' + mc = mlab.MatlabCommand(script="foo;", paths=[tmpdir.strpath], mfile=True) + assert not os.path.exists(default_script_file), "scriptfile should not exist 4." with pytest.raises(RuntimeError): mc.run() - assert os.path.exists(default_script_file), 'scriptfile should exist 4.' + assert os.path.exists(default_script_file), "scriptfile should exist 4." if os.path.exists(default_script_file): # cleanup os.remove(default_script_file) # bypasses ubuntu dash issue - res = mlab.MatlabCommand( - script='a=1;', paths=[tmpdir.strpath], mfile=True).run() + res = mlab.MatlabCommand(script="a=1;", paths=[tmpdir.strpath], mfile=True).run() assert res.runtime.returncode == 0 - assert os.path.exists(default_script_file), 'scriptfile should exist 5.' + assert os.path.exists(default_script_file), "scriptfile should exist 5." cwd.chdir() @@ -121,8 +121,7 @@ def test_set_matlabcmd(): default_script_file = clean_workspace_and_get_default_script_file() mi = mlab.MatlabCommand() - mi.set_default_matlab_cmd('foo') - assert not os.path.exists( - default_script_file), 'scriptfile should not exist.' - assert mi._default_matlab_cmd == 'foo' + mi.set_default_matlab_cmd("foo") + assert not os.path.exists(default_script_file), "scriptfile should not exist." + assert mi._default_matlab_cmd == "foo" mi.set_default_matlab_cmd(matlab_cmd) diff --git a/nipype/interfaces/tests/test_nilearn.py b/nipype/interfaces/tests/test_nilearn.py index 79432bc180..6c3a52a670 100644 --- a/nipype/interfaces/tests/test_nilearn.py +++ b/nipype/interfaces/tests/test_nilearn.py @@ -14,38 +14,39 @@ no_nilearn = True try: - __import__('nilearn') + __import__("nilearn") no_nilearn = False except ImportError: pass @pytest.mark.skipif(no_nilearn, reason="the nilearn library is not available") -class TestSignalExtraction(): +class TestSignalExtraction: filenames = { - 'in_file': 'fmri.nii', - 'label_files': 'labels.nii', - '4d_label_file': '4dlabels.nii', - 'out_file': 'signals.tsv' + "in_file": "fmri.nii", + "label_files": "labels.nii", + "4d_label_file": "4dlabels.nii", + "out_file": "signals.tsv", } - labels = ['CSF', 'GrayMatter', 'WhiteMatter'] - global_labels = ['GlobalSignal'] + labels + labels = ["CSF", "GrayMatter", "WhiteMatter"] + global_labels = ["GlobalSignal"] + labels - @pytest.fixture(autouse=True, scope='class') + @pytest.fixture(autouse=True, scope="class") def setup_class(self, tmpdir_factory): tempdir = tmpdir_factory.mktemp("test") self.orig_dir = tempdir.chdir() - utils.save_toy_nii(self.fake_fmri_data, self.filenames['in_file']) - utils.save_toy_nii(self.fake_label_data, self.filenames['label_files']) + utils.save_toy_nii(self.fake_fmri_data, self.filenames["in_file"]) + utils.save_toy_nii(self.fake_label_data, self.filenames["label_files"]) def test_signal_extract_no_shared(self): # run iface.SignalExtraction( - in_file=self.filenames['in_file'], - label_files=self.filenames['label_files'], + in_file=self.filenames["in_file"], + label_files=self.filenames["label_files"], class_labels=self.labels, - incl_shared_variance=False).run() + incl_shared_variance=False, + ).run() # assert self.assert_expected_output(self.labels, self.base_wanted) @@ -53,44 +54,44 @@ def test_signal_extr_bad_label_list(self): # run with pytest.raises(ValueError): iface.SignalExtraction( - in_file=self.filenames['in_file'], - label_files=self.filenames['label_files'], - class_labels=['bad'], - incl_shared_variance=False).run() + in_file=self.filenames["in_file"], + label_files=self.filenames["label_files"], + class_labels=["bad"], + incl_shared_variance=False, + ).run() def test_signal_extr_equiv_4d_no_shared(self): self._test_4d_label( - self.base_wanted, - self.fake_equiv_4d_label_data, - incl_shared_variance=False) + self.base_wanted, self.fake_equiv_4d_label_data, incl_shared_variance=False + ) def test_signal_extr_4d_no_shared(self): # set up & run & assert self._test_4d_label( - self.fourd_wanted, - self.fake_4d_label_data, - incl_shared_variance=False) + self.fourd_wanted, self.fake_4d_label_data, incl_shared_variance=False + ) def test_signal_extr_global_no_shared(self): # set up - wanted_global = [[-4. / 6], [-1. / 6], [3. / 6], [-1. / 6], [-7. / 6]] + wanted_global = [[-4.0 / 6], [-1.0 / 6], [3.0 / 6], [-1.0 / 6], [-7.0 / 6]] for i, vals in enumerate(self.base_wanted): wanted_global[i].extend(vals) # run iface.SignalExtraction( - in_file=self.filenames['in_file'], - label_files=self.filenames['label_files'], + in_file=self.filenames["in_file"], + label_files=self.filenames["label_files"], class_labels=self.labels, include_global=True, - incl_shared_variance=False).run() + incl_shared_variance=False, + ).run() # assert self.assert_expected_output(self.global_labels, wanted_global) def test_signal_extr_4d_global_no_shared(self): # set up - wanted_global = [[3. / 8], [-3. / 8], [1. / 8], [-7. / 8], [-9. / 8]] + wanted_global = [[3.0 / 8], [-3.0 / 8], [1.0 / 8], [-7.0 / 8], [-9.0 / 8]] for i, vals in enumerate(self.fourd_wanted): wanted_global[i].extend(vals) @@ -99,7 +100,8 @@ def test_signal_extr_4d_global_no_shared(self): wanted_global, self.fake_4d_label_data, include_global=True, - incl_shared_variance=False) + incl_shared_variance=False, + ) def test_signal_extr_shared(self): # set up @@ -109,45 +111,45 @@ def test_signal_extr_shared(self): wanted_row = [] for reg in range(self.fake_4d_label_data.shape[3]): region = self.fake_4d_label_data[:, :, :, reg].flatten() - wanted_row.append( - (volume * region).sum() / (region * region).sum()) + wanted_row.append((volume * region).sum() / (region * region).sum()) wanted.append(wanted_row) # run & assert self._test_4d_label(wanted, self.fake_4d_label_data) def test_signal_extr_traits_valid(self): - ''' Test a node using the SignalExtraction interface. + """ Test a node using the SignalExtraction interface. Unlike interface.run(), node.run() checks the traits - ''' + """ # run node = pe.Node( iface.SignalExtraction( - in_file=os.path.abspath(self.filenames['in_file']), - label_files=os.path.abspath(self.filenames['label_files']), + in_file=os.path.abspath(self.filenames["in_file"]), + label_files=os.path.abspath(self.filenames["label_files"]), class_labels=self.labels, - incl_shared_variance=False), - name='SignalExtraction') + incl_shared_variance=False, + ), + name="SignalExtraction", + ) node.run() # assert # just checking that it passes trait validations - def _test_4d_label(self, - wanted, - fake_labels, - include_global=False, - incl_shared_variance=True): + def _test_4d_label( + self, wanted, fake_labels, include_global=False, incl_shared_variance=True + ): # set up - utils.save_toy_nii(fake_labels, self.filenames['4d_label_file']) + utils.save_toy_nii(fake_labels, self.filenames["4d_label_file"]) # run iface.SignalExtraction( - in_file=self.filenames['in_file'], - label_files=self.filenames['4d_label_file'], + in_file=self.filenames["in_file"], + label_files=self.filenames["4d_label_file"], class_labels=self.labels, incl_shared_variance=incl_shared_variance, - include_global=include_global).run() + include_global=include_global, + ).run() wanted_labels = self.global_labels if include_global else self.labels @@ -155,12 +157,11 @@ def _test_4d_label(self, self.assert_expected_output(wanted_labels, wanted) def assert_expected_output(self, labels, wanted): - with open(self.filenames['out_file'], 'r') as output: + with open(self.filenames["out_file"], "r") as output: got = [line.split() for line in output] labels_got = got.pop(0) # remove header assert labels_got == labels - assert len(got) == self.fake_fmri_data.shape[ - 3], 'num rows and num volumes' + assert len(got) == self.fake_fmri_data.shape[3], "num rows and num volumes" # convert from string to float got = [[float(num) for num in row] for row in got] for i, time in enumerate(got): @@ -168,33 +169,52 @@ def assert_expected_output(self, labels, wanted): for j, segment in enumerate(time): npt.assert_almost_equal(segment, wanted[i][j], decimal=1) - -# dj: self doesnt have orig_dir at this point, not sure how to change it. -# should work without it -# def teardown_class(self): -# self.orig_dir.chdir() - - fake_fmri_data = np.array([[[[2, -1, 4, -2, 3], [4, -2, -5, -1, 0]], - [[-2, 0, 1, 4, 4], [-5, 3, -3, 1, -5]]], - [[[2, -2, -1, -2, -5], [3, 0, 3, -5, -2]], - [[-4, -2, -2, 1, -2], [3, 1, 4, -3, -2]]]]) + # dj: self doesnt have orig_dir at this point, not sure how to change it. + # should work without it + # def teardown_class(self): + # self.orig_dir.chdir() + + fake_fmri_data = np.array( + [ + [ + [[2, -1, 4, -2, 3], [4, -2, -5, -1, 0]], + [[-2, 0, 1, 4, 4], [-5, 3, -3, 1, -5]], + ], + [ + [[2, -2, -1, -2, -5], [3, 0, 3, -5, -2]], + [[-4, -2, -2, 1, -2], [3, 1, 4, -3, -2]], + ], + ] + ) fake_label_data = np.array([[[1, 0], [3, 1]], [[2, 0], [1, 3]]]) fake_equiv_4d_label_data = np.array( - [[[[1., 0., 0.], [0., 0., 0.]], [[0., 0., 1.], [1., 0., 0.]]], - [[[0., 1., 0.], [0., 0., 0.]], [[1., 0., 0.], [0., 0., 1.]]]]) - - base_wanted = [[-2.33333, 2, .5], [0, -2, .5], [-.3333333, -1, 2.5], - [0, -2, .5], [-1.3333333, -5, 1]] - - fake_4d_label_data = np.array([[[[0.2, 0.3, 0.5], [0.1, 0.1, 0.8]], - [[0.1, 0.3, 0.6], [0.3, 0.4, 0.3]]], - [[[0.2, 0.2, 0.6], [0., 0.3, 0.7]], - [[0.3, 0.3, 0.4], [0.3, 0.4, 0.3]]]]) - - fourd_wanted = [[-5.0652173913, -5.44565217391, 5.50543478261], [ - -7.02173913043, 11.1847826087, -4.33152173913 - ], [-19.0869565217, 21.2391304348, - -4.57608695652], [5.19565217391, -3.66304347826, -1.51630434783], - [-12.0, 3., 0.5]] + [ + [[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]], + [[[0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]], + ] + ) + + base_wanted = [ + [-2.33333, 2, 0.5], + [0, -2, 0.5], + [-0.3333333, -1, 2.5], + [0, -2, 0.5], + [-1.3333333, -5, 1], + ] + + fake_4d_label_data = np.array( + [ + [[[0.2, 0.3, 0.5], [0.1, 0.1, 0.8]], [[0.1, 0.3, 0.6], [0.3, 0.4, 0.3]]], + [[[0.2, 0.2, 0.6], [0.0, 0.3, 0.7]], [[0.3, 0.3, 0.4], [0.3, 0.4, 0.3]]], + ] + ) + + fourd_wanted = [ + [-5.0652173913, -5.44565217391, 5.50543478261], + [-7.02173913043, 11.1847826087, -4.33152173913], + [-19.0869565217, 21.2391304348, -4.57608695652], + [5.19565217391, -3.66304347826, -1.51630434783], + [-12.0, 3.0, 0.5], + ] diff --git a/nipype/interfaces/utility/__init__.py b/nipype/interfaces/utility/__init__.py index 084acb569c..f5556e7263 100644 --- a/nipype/interfaces/utility/__init__.py +++ b/nipype/interfaces/utility/__init__.py @@ -7,7 +7,6 @@ Requires Packages to be installed """ -from .base import (IdentityInterface, Rename, Select, Split, Merge, - AssertEqual) +from .base import IdentityInterface, Rename, Select, Split, Merge, AssertEqual from .csv import CSVReader from .wrappers import Function diff --git a/nipype/interfaces/utility/base.py b/nipype/interfaces/utility/base.py index 3261be0c53..9ff13011b7 100644 --- a/nipype/interfaces/utility/base.py +++ b/nipype/interfaces/utility/base.py @@ -10,9 +10,20 @@ import re import numpy as np -from ..base import (traits, TraitedSpec, DynamicTraitedSpec, File, Undefined, - isdefined, OutputMultiPath, InputMultiPath, BaseInterface, - BaseInterfaceInputSpec, Str, SimpleInterface) +from ..base import ( + traits, + TraitedSpec, + DynamicTraitedSpec, + File, + Undefined, + isdefined, + OutputMultiPath, + InputMultiPath, + BaseInterface, + BaseInterfaceInputSpec, + Str, + SimpleInterface, +) from ..io import IOBase, add_traits from ...utils.filemanip import ensure_list, copyfile, split_filename @@ -42,20 +53,20 @@ class IdentityInterface(IOBase): >>> out = ii2.run() # doctest: +SKIP ValueError: IdentityInterface requires a value for input 'b' because it was listed in 'fields' Interface IdentityInterface failed to run. """ + input_spec = DynamicTraitedSpec output_spec = DynamicTraitedSpec def __init__(self, fields=None, mandatory_inputs=True, **inputs): super(IdentityInterface, self).__init__(**inputs) if fields is None or not fields: - raise ValueError( - 'Identity Interface fields must be a non-empty list') + raise ValueError("Identity Interface fields must be a non-empty list") # Each input must be in the fields. for in_field in inputs: if in_field not in fields: raise ValueError( - 'Identity Interface input is not in the fields: %s' % - in_field) + "Identity Interface input is not in the fields: %s" % in_field + ) self._fields = fields self._mandatory_inputs = mandatory_inputs add_traits(self.inputs, fields) @@ -73,9 +84,11 @@ def _list_outputs(self): for key in self._fields: value = getattr(self.inputs, key) if not isdefined(value): - msg = "%s requires a value for input '%s' because it was listed in 'fields'. \ - You can turn off mandatory inputs checking by passing mandatory_inputs = False to the constructor." % \ - (self.__class__.__name__, key) + msg = ( + "%s requires a value for input '%s' because it was listed in 'fields'. \ + You can turn off mandatory inputs checking by passing mandatory_inputs = False to the constructor." + % (self.__class__.__name__, key) + ) raise ValueError(msg) outputs = self._outputs().get() @@ -88,22 +101,23 @@ def _list_outputs(self): class MergeInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): axis = traits.Enum( - 'vstack', - 'hstack', + "vstack", + "hstack", usedefault=True, - desc= - 'direction in which to merge, hstack requires same number of elements in each input' + desc="direction in which to merge, hstack requires same number of elements in each input", ) no_flatten = traits.Bool( False, usedefault=True, - desc='append to outlist instead of extending in vstack mode') + desc="append to outlist instead of extending in vstack mode", + ) ravel_inputs = traits.Bool( - False, usedefault=True, desc='ravel inputs when no_flatten is False') + False, usedefault=True, desc="ravel inputs when no_flatten is False" + ) class MergeOutputSpec(TraitedSpec): - out = traits.List(desc='Merged output') + out = traits.List(desc="Merged output") def _ravel(in_val): @@ -156,6 +170,7 @@ class Merge(IOBase): >>> out.outputs.out [[1, [2, 5], 3]] """ + input_spec = MergeInputSpec output_spec = MergeOutputSpec @@ -163,7 +178,7 @@ def __init__(self, numinputs=0, **inputs): super(Merge, self).__init__(**inputs) self._numinputs = numinputs if numinputs >= 1: - input_names = ['in%d' % (i + 1) for i in range(numinputs)] + input_names = ["in%d" % (i + 1) for i in range(numinputs)] else: input_names = [] add_traits(self.inputs, input_names) @@ -175,42 +190,42 @@ def _list_outputs(self): if self._numinputs < 1: return outputs else: - getval = lambda idx: getattr(self.inputs, 'in%d' % (idx + 1)) + getval = lambda idx: getattr(self.inputs, "in%d" % (idx + 1)) values = [ - getval(idx) for idx in range(self._numinputs) - if isdefined(getval(idx)) + getval(idx) for idx in range(self._numinputs) if isdefined(getval(idx)) ] - if self.inputs.axis == 'vstack': + if self.inputs.axis == "vstack": for value in values: if isinstance(value, list) and not self.inputs.no_flatten: - out.extend( - _ravel(value) if self.inputs.ravel_inputs else value) + out.extend(_ravel(value) if self.inputs.ravel_inputs else value) else: out.append(value) else: lists = [ensure_list(val) for val in values] out = [[val[i] for val in lists] for i in range(len(lists[0]))] - outputs['out'] = out + outputs["out"] = out return outputs class RenameInputSpec(DynamicTraitedSpec): in_file = File(exists=True, mandatory=True, desc="file to rename") keep_ext = traits.Bool( - desc=("Keep in_file extension, replace " - "non-extension component of name")) + desc=("Keep in_file extension, replace " "non-extension component of name") + ) format_string = Str( - mandatory=True, desc="Python formatting string for output template") - parse_string = Str(desc="Python regexp parse string to define " - "replacement inputs") + mandatory=True, desc="Python formatting string for output template" + ) + parse_string = Str( + desc="Python regexp parse string to define " "replacement inputs" + ) use_fullpath = traits.Bool( - False, usedefault=True, desc="Use full path as input to regex parser") + False, usedefault=True, desc="Use full path as input to regex parser" + ) class RenameOutputSpec(TraitedSpec): - out_file = File( - exists=True, desc="softlink to original file with new name") + out_file = File(exists=True, desc="softlink to original file with new name") class Rename(SimpleInterface, IOBase): @@ -255,6 +270,7 @@ class Rename(SimpleInterface, IOBase): 'subj_201_epi_run02.nii' # doctest: +SKIP """ + input_spec = RenameInputSpec output_spec = RenameOutputSpec @@ -270,12 +286,12 @@ def __init__(self, format_string=None, **inputs): def _rename(self): fmt_dict = dict() if isdefined(self.inputs.parse_string): - if isdefined( - self.inputs.use_fullpath) and self.inputs.use_fullpath: + if isdefined(self.inputs.use_fullpath) and self.inputs.use_fullpath: m = re.search(self.inputs.parse_string, self.inputs.in_file) else: - m = re.search(self.inputs.parse_string, - os.path.split(self.inputs.in_file)[1]) + m = re.search( + self.inputs.parse_string, os.path.split(self.inputs.in_file)[1] + ) if m: fmt_dict.update(m.groupdict()) for field in self.fmt_fields: @@ -283,10 +299,9 @@ def _rename(self): if isdefined(val): fmt_dict[field] = getattr(self.inputs, field) if self.inputs.keep_ext: - fmt_string = "".join([ - self.inputs.format_string, - split_filename(self.inputs.in_file)[2] - ]) + fmt_string = "".join( + [self.inputs.format_string, split_filename(self.inputs.in_file)[2]] + ) else: fmt_string = self.inputs.format_string return fmt_string % fmt_dict @@ -295,22 +310,20 @@ def _run_interface(self, runtime): runtime.returncode = 0 out_file = os.path.join(runtime.cwd, self._rename()) _ = copyfile(self.inputs.in_file, out_file) - self._results['out_file'] = out_file + self._results["out_file"] = out_file return runtime class SplitInputSpec(BaseInterfaceInputSpec): - inlist = traits.List( - traits.Any, mandatory=True, desc='list of values to split') + inlist = traits.List(traits.Any, mandatory=True, desc="list of values to split") splits = traits.List( traits.Int, mandatory=True, - desc='Number of outputs in each split - should add to number of inputs' + desc="Number of outputs in each split - should add to number of inputs", ) squeeze = traits.Bool( - False, - usedefault=True, - desc='unfold one-element splits removing the list') + False, usedefault=True, desc="unfold one-element splits removing the list" + ) class Split(IOBase): @@ -334,7 +347,7 @@ class Split(IOBase): def _add_output_traits(self, base): undefined_traits = {} for i in range(len(self.inputs.splits)): - key = 'out%d' % (i + 1) + key = "out%d" % (i + 1) base.add_trait(key, traits.Any) undefined_traits[key] = Undefined base.trait_set(trait_change_notify=False, **undefined_traits) @@ -344,28 +357,29 @@ def _list_outputs(self): outputs = self._outputs().get() if isdefined(self.inputs.splits): if sum(self.inputs.splits) != len(self.inputs.inlist): - raise RuntimeError('sum of splits != num of list elements') + raise RuntimeError("sum of splits != num of list elements") splits = [0] splits.extend(self.inputs.splits) splits = np.cumsum(splits) for i in range(len(splits) - 1): - val = np.array( - self.inputs.inlist)[splits[i]:splits[i + 1]].tolist() + val = np.array(self.inputs.inlist)[splits[i] : splits[i + 1]].tolist() if self.inputs.squeeze and len(val) == 1: val = val[0] - outputs['out%d' % (i + 1)] = val + outputs["out%d" % (i + 1)] = val return outputs class SelectInputSpec(BaseInterfaceInputSpec): inlist = InputMultiPath( - traits.Any, mandatory=True, desc='list of values to choose from') + traits.Any, mandatory=True, desc="list of values to choose from" + ) index = InputMultiPath( - traits.Int, mandatory=True, desc='0-based indices of values to choose') + traits.Int, mandatory=True, desc="0-based indices of values to choose" + ) class SelectOutputSpec(TraitedSpec): - out = OutputMultiPath(traits.Any, desc='list of selected values') + out = OutputMultiPath(traits.Any, desc="list of selected values") class Select(IOBase): @@ -393,9 +407,8 @@ class Select(IOBase): def _list_outputs(self): outputs = self._outputs().get() - out = np.array(self.inputs.inlist)[np.array( - self.inputs.index)].tolist() - outputs['out'] = out + out = np.array(self.inputs.inlist)[np.array(self.inputs.index)].tolist() + outputs["out"] = out return outputs @@ -409,9 +422,10 @@ class AssertEqual(BaseInterface): def _run_interface(self, runtime): import nibabel as nb + data1 = nb.load(self.inputs.volume1).get_data() data2 = nb.load(self.inputs.volume2).get_data() if not np.all(data1 == data2): - raise RuntimeError('Input images are not exactly equal') + raise RuntimeError("Input images are not exactly equal") return runtime diff --git a/nipype/interfaces/utility/csv.py b/nipype/interfaces/utility/csv.py index d22b146d74..04cb28438e 100644 --- a/nipype/interfaces/utility/csv.py +++ b/nipype/interfaces/utility/csv.py @@ -3,20 +3,17 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """CSV Handling utilities """ -from ..base import (traits, TraitedSpec, DynamicTraitedSpec, File, - BaseInterface) +from ..base import traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface from ..io import add_traits class CSVReaderInputSpec(DynamicTraitedSpec, TraitedSpec): in_file = File( - exists=True, - mandatory=True, - desc='Input comma-seperated value (CSV) file') + exists=True, mandatory=True, desc="Input comma-seperated value (CSV) file" + ) header = traits.Bool( - False, - usedefault=True, - desc='True if the first line is a column header') + False, usedefault=True, desc="True if the first line is a column header" + ) class CSVReader(BaseInterface): @@ -46,6 +43,7 @@ class CSVReader(BaseInterface): True """ + input_spec = CSVReaderInputSpec output_spec = DynamicTraitedSpec _always_run = True @@ -56,18 +54,17 @@ def _append_entry(self, outputs, entry): return outputs def _parse_line(self, line): - line = line.replace('\n', '') - entry = [x.strip() for x in line.split(',')] + line = line.replace("\n", "") + entry = [x.strip() for x in line.split(",")] return entry def _get_outfields(self): - with open(self.inputs.in_file, 'r') as fid: + with open(self.inputs.in_file, "r") as fid: entry = self._parse_line(fid.readline()) if self.inputs.header: self._outfields = tuple(entry) else: - self._outfields = tuple( - ['column_' + str(x) for x in range(len(entry))]) + self._outfields = tuple(["column_" + str(x) for x in range(len(entry))]) return self._outfields def _run_interface(self, runtime): @@ -85,7 +82,7 @@ def _list_outputs(self): isHeader = True for key in self._outfields: outputs[key] = [] # initialize outfields - with open(self.inputs.in_file, 'r') as fid: + with open(self.inputs.in_file, "r") as fid: for line in fid.readlines(): if self.inputs.header and isHeader: # skip header line isHeader = False diff --git a/nipype/interfaces/utility/tests/test_auto_AssertEqual.py b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py index c550a5efba..2045d9149a 100644 --- a/nipype/interfaces/utility/tests/test_auto_AssertEqual.py +++ b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py @@ -4,14 +4,8 @@ def test_AssertEqual_inputs(): input_map = dict( - volume1=dict( - extensions=None, - mandatory=True, - ), - volume2=dict( - extensions=None, - mandatory=True, - ), + volume1=dict(extensions=None, mandatory=True,), + volume2=dict(extensions=None, mandatory=True,), ) inputs = AssertEqual.input_spec() diff --git a/nipype/interfaces/utility/tests/test_auto_CSVReader.py b/nipype/interfaces/utility/tests/test_auto_CSVReader.py index 8bd60f55ad..98adf59f6d 100644 --- a/nipype/interfaces/utility/tests/test_auto_CSVReader.py +++ b/nipype/interfaces/utility/tests/test_auto_CSVReader.py @@ -4,17 +4,15 @@ def test_CSVReader_inputs(): input_map = dict( - header=dict(usedefault=True, ), - in_file=dict( - extensions=None, - mandatory=True, - ), + header=dict(usedefault=True,), in_file=dict(extensions=None, mandatory=True,), ) inputs = CSVReader.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CSVReader_outputs(): output_map = dict() outputs = CSVReader.output_spec() diff --git a/nipype/interfaces/utility/tests/test_auto_Function.py b/nipype/interfaces/utility/tests/test_auto_Function.py index f4e353bb27..f2713a4407 100644 --- a/nipype/interfaces/utility/tests/test_auto_Function.py +++ b/nipype/interfaces/utility/tests/test_auto_Function.py @@ -3,12 +3,14 @@ def test_Function_inputs(): - input_map = dict(function_str=dict(mandatory=True, ), ) + input_map = dict(function_str=dict(mandatory=True,),) inputs = Function.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Function_outputs(): output_map = dict() outputs = Function.output_spec() diff --git a/nipype/interfaces/utility/tests/test_auto_IdentityInterface.py b/nipype/interfaces/utility/tests/test_auto_IdentityInterface.py index be12e0bad7..7adb95ee88 100644 --- a/nipype/interfaces/utility/tests/test_auto_IdentityInterface.py +++ b/nipype/interfaces/utility/tests/test_auto_IdentityInterface.py @@ -9,6 +9,8 @@ def test_IdentityInterface_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_IdentityInterface_outputs(): output_map = dict() outputs = IdentityInterface.output_spec() diff --git a/nipype/interfaces/utility/tests/test_auto_Merge.py b/nipype/interfaces/utility/tests/test_auto_Merge.py index a7ed5c3a31..7658529a9d 100644 --- a/nipype/interfaces/utility/tests/test_auto_Merge.py +++ b/nipype/interfaces/utility/tests/test_auto_Merge.py @@ -4,17 +4,19 @@ def test_Merge_inputs(): input_map = dict( - axis=dict(usedefault=True, ), - no_flatten=dict(usedefault=True, ), - ravel_inputs=dict(usedefault=True, ), + axis=dict(usedefault=True,), + no_flatten=dict(usedefault=True,), + ravel_inputs=dict(usedefault=True,), ) inputs = Merge.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Merge_outputs(): - output_map = dict(out=dict(), ) + output_map = dict(out=dict(),) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Rename.py b/nipype/interfaces/utility/tests/test_auto_Rename.py index 177f205ef0..a722afac0e 100644 --- a/nipype/interfaces/utility/tests/test_auto_Rename.py +++ b/nipype/interfaces/utility/tests/test_auto_Rename.py @@ -4,22 +4,21 @@ def test_Rename_inputs(): input_map = dict( - format_string=dict(mandatory=True, ), - in_file=dict( - extensions=None, - mandatory=True, - ), + format_string=dict(mandatory=True,), + in_file=dict(extensions=None, mandatory=True,), keep_ext=dict(), parse_string=dict(), - use_fullpath=dict(usedefault=True, ), + use_fullpath=dict(usedefault=True,), ) inputs = Rename.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Rename_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Rename.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Select.py b/nipype/interfaces/utility/tests/test_auto_Select.py index abc81b45da..76f9061446 100644 --- a/nipype/interfaces/utility/tests/test_auto_Select.py +++ b/nipype/interfaces/utility/tests/test_auto_Select.py @@ -3,17 +3,16 @@ def test_Select_inputs(): - input_map = dict( - index=dict(mandatory=True, ), - inlist=dict(mandatory=True, ), - ) + input_map = dict(index=dict(mandatory=True,), inlist=dict(mandatory=True,),) inputs = Select.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Select_outputs(): - output_map = dict(out=dict(), ) + output_map = dict(out=dict(),) outputs = Select.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Split.py b/nipype/interfaces/utility/tests/test_auto_Split.py index 20dbd948dc..901162ecab 100644 --- a/nipype/interfaces/utility/tests/test_auto_Split.py +++ b/nipype/interfaces/utility/tests/test_auto_Split.py @@ -4,15 +4,17 @@ def test_Split_inputs(): input_map = dict( - inlist=dict(mandatory=True, ), - splits=dict(mandatory=True, ), - squeeze=dict(usedefault=True, ), + inlist=dict(mandatory=True,), + splits=dict(mandatory=True,), + squeeze=dict(usedefault=True,), ) inputs = Split.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Split_outputs(): output_map = dict() outputs = Split.output_spec() diff --git a/nipype/interfaces/utility/tests/test_base.py b/nipype/interfaces/utility/tests/test_base.py index 0356452638..a19cff16b4 100644 --- a/nipype/interfaces/utility/tests/test_base.py +++ b/nipype/interfaces/utility/tests/test_base.py @@ -22,9 +22,8 @@ def test_rename(tmpdir): # Now a string-formatting version rn = utility.Rename( - in_file="file.txt", - format_string="%(field1)s_file%(field2)d", - keep_ext=True) + in_file="file.txt", format_string="%(field1)s_file%(field2)d", keep_ext=True + ) # Test .input field creation assert hasattr(rn.inputs, "field1") assert hasattr(rn.inputs, "field2") @@ -38,45 +37,42 @@ def test_rename(tmpdir): assert os.path.exists(outfile) -@pytest.mark.parametrize("args, expected", [({}, ([0], [1, 2, 3])), - ({ - "squeeze": True - }, (0, [1, 2, 3]))]) +@pytest.mark.parametrize( + "args, expected", [({}, ([0], [1, 2, 3])), ({"squeeze": True}, (0, [1, 2, 3]))] +) def test_split(tmpdir, args, expected): tmpdir.chdir() node = pe.Node( utility.Split(inlist=list(range(4)), splits=[1, 3], **args), - name='split_squeeze') + name="split_squeeze", + ) res = node.run() assert res.outputs.out1 == expected[0] assert res.outputs.out2 == expected[1] -@pytest.mark.parametrize("args, kwargs, in_lists, expected", [ - ([3], {}, [0, [1, 2], [3, 4, 5]], [0, 1, 2, 3, 4, 5]), - ([0], {}, None, None), - ([], {}, [], []), - ([], {}, [0, [1, 2], [3, 4, 5]], [0, [1, 2], [3, 4, 5]]), - ([3], { - 'axis': 'hstack' - }, [[0], [1, 2], [3, 4, 5]], [[0, 1, 3]]), - ([3], { - 'axis': 'hstack' - }, [[0, 1], [2, 3], [4, 5]], [[0, 2, 4], [1, 3, 5]]), - ([3], { - 'axis': 'hstack' - }, [[0, 1], [2, 3], [4, 5]], [[0, 2, 4], [1, 3, 5]]), -]) +@pytest.mark.parametrize( + "args, kwargs, in_lists, expected", + [ + ([3], {}, [0, [1, 2], [3, 4, 5]], [0, 1, 2, 3, 4, 5]), + ([0], {}, None, None), + ([], {}, [], []), + ([], {}, [0, [1, 2], [3, 4, 5]], [0, [1, 2], [3, 4, 5]]), + ([3], {"axis": "hstack"}, [[0], [1, 2], [3, 4, 5]], [[0, 1, 3]]), + ([3], {"axis": "hstack"}, [[0, 1], [2, 3], [4, 5]], [[0, 2, 4], [1, 3, 5]]), + ([3], {"axis": "hstack"}, [[0, 1], [2, 3], [4, 5]], [[0, 2, 4], [1, 3, 5]]), + ], +) def test_merge(tmpdir, args, kwargs, in_lists, expected): tmpdir.chdir() - node = pe.Node(utility.Merge(*args, **kwargs), name='merge') + node = pe.Node(utility.Merge(*args, **kwargs), name="merge") numinputs = args[0] if args else 0 if numinputs >= 1: for i in range(1, numinputs + 1): - setattr(node.inputs, 'in{:d}'.format(i), in_lists[i - 1]) + setattr(node.inputs, "in{:d}".format(i), in_lists[i - 1]) res = node.run() if numinputs < 1: diff --git a/nipype/interfaces/utility/tests/test_csv.py b/nipype/interfaces/utility/tests/test_csv.py index 3c15c81239..ffd69f000f 100644 --- a/nipype/interfaces/utility/tests/test_csv.py +++ b/nipype/interfaces/utility/tests/test_csv.py @@ -10,7 +10,7 @@ def test_csvReader(tmpdir): lines = ["foo,hello,300.1\n", "bar,world,5\n", "baz,goodbye,0.3\n"] for x in range(2): name = tmpdir.join("testfile.csv").strpath - with open(name, 'w') as fid: + with open(name, "w") as fid: reader = utility.CSVReader() if x % 2 == 0: fid.write(header) @@ -20,10 +20,10 @@ def test_csvReader(tmpdir): reader.inputs.in_file = name out = reader.run() if x % 2 == 0: - assert out.outputs.files == ['foo', 'bar', 'baz'] - assert out.outputs.labels == ['hello', 'world', 'goodbye'] - assert out.outputs.erosion == ['300.1', '5', '0.3'] + assert out.outputs.files == ["foo", "bar", "baz"] + assert out.outputs.labels == ["hello", "world", "goodbye"] + assert out.outputs.erosion == ["300.1", "5", "0.3"] else: - assert out.outputs.column_0 == ['foo', 'bar', 'baz'] - assert out.outputs.column_1 == ['hello', 'world', 'goodbye'] - assert out.outputs.column_2 == ['300.1', '5', '0.3'] + assert out.outputs.column_0 == ["foo", "bar", "baz"] + assert out.outputs.column_1 == ["hello", "world", "goodbye"] + assert out.outputs.column_2 == ["300.1", "5", "0.3"] diff --git a/nipype/interfaces/utility/tests/test_wrappers.py b/nipype/interfaces/utility/tests/test_wrappers.py index eb145f51ef..98ee7c7959 100644 --- a/nipype/interfaces/utility/tests/test_wrappers.py +++ b/nipype/interfaces/utility/tests/test_wrappers.py @@ -20,15 +20,18 @@ def test_function(tmpdir): def gen_random_array(size): import numpy as np + return np.random.rand(size, size) f1 = pe.MapNode( utility.Function( - input_names=['size'], - output_names=['random_array'], - function=gen_random_array), - name='random_array', - iterfield=['size']) + input_names=["size"], + output_names=["random_array"], + function=gen_random_array, + ), + name="random_array", + iterfield=["size"], + ) f1.inputs.size = [2, 3, 5] wf = pe.Workflow(name="test_workflow") @@ -38,14 +41,15 @@ def increment_array(in_array): f2 = pe.MapNode( utility.Function(function=increment_array), - name='increment_array', - iterfield=['in_array']) + name="increment_array", + iterfield=["in_array"], + ) - wf.connect(f1, 'random_array', f2, 'in_array') + wf.connect(f1, "random_array", f2, "in_array") f3 = pe.Node(utility.Function(function=concat_sort), name="concat_sort") - wf.connect(f2, 'out', f3, 'in_arrays') + wf.connect(f2, "out", f3, "in_arrays") wf.run() @@ -60,8 +64,10 @@ def should_fail(tmp): utility.Function( input_names=["size"], output_names=["random_array"], - function=make_random_array), - name="should_fail") + function=make_random_array, + ), + name="should_fail", + ) node.inputs.size = 10 node.run() @@ -79,8 +85,10 @@ def test_function_with_imports(tmpdir): input_names=["size"], output_names=["random_array"], function=make_random_array, - imports=["import numpy as np"]), - name="should_not_fail") + imports=["import numpy as np"], + ), + name="should_not_fail", + ) print(node.inputs.function_str) node.inputs.size = 10 node.run() @@ -95,9 +103,7 @@ def test_aux_connect_function(tmpdir): wf = pe.Workflow(name="test_workflow") def _gen_tuple(size): - return [ - 1, - ] * size + return [1,] * size def _sum_and_sub_mul(a, b, c): return (a + b) * c, (a - b) * c @@ -105,33 +111,35 @@ def _sum_and_sub_mul(a, b, c): def _inc(x): return x + 1 - params = pe.Node( - utility.IdentityInterface(fields=['size', 'num']), name='params') + params = pe.Node(utility.IdentityInterface(fields=["size", "num"]), name="params") params.inputs.num = 42 params.inputs.size = 1 gen_tuple = pe.Node( utility.Function( - input_names=['size'], output_names=['tuple'], function=_gen_tuple), - name='gen_tuple') + input_names=["size"], output_names=["tuple"], function=_gen_tuple + ), + name="gen_tuple", + ) ssm = pe.Node( utility.Function( - input_names=['a', 'b', 'c'], - output_names=['sum', 'sub'], - function=_sum_and_sub_mul), - name='sum_and_sub_mul') - - split = pe.Node(utility.Split(splits=[1, 1], squeeze=True), name='split') - - wf.connect([ - (params, gen_tuple, [(("size", _inc), "size")]), - (params, ssm, [(("num", _inc), "c")]), - (gen_tuple, split, [("tuple", "inlist")]), - (split, ssm, [ - (("out1", _inc), "a"), - ("out2", "b"), - ]), - ]) + input_names=["a", "b", "c"], + output_names=["sum", "sub"], + function=_sum_and_sub_mul, + ), + name="sum_and_sub_mul", + ) + + split = pe.Node(utility.Split(splits=[1, 1], squeeze=True), name="split") + + wf.connect( + [ + (params, gen_tuple, [(("size", _inc), "size")]), + (params, ssm, [(("num", _inc), "c")]), + (gen_tuple, split, [("tuple", "inlist")]), + (split, ssm, [(("out1", _inc), "a"), ("out2", "b"),]), + ] + ) wf.run() diff --git a/nipype/interfaces/utility/wrappers.py b/nipype/interfaces/utility/wrappers.py index e775c9a540..f638816166 100644 --- a/nipype/interfaces/utility/wrappers.py +++ b/nipype/interfaces/utility/wrappers.py @@ -7,17 +7,22 @@ >>> old = tmp.chdir() """ from ... import logging -from ..base import (traits, DynamicTraitedSpec, Undefined, isdefined, - BaseInterfaceInputSpec) +from ..base import ( + traits, + DynamicTraitedSpec, + Undefined, + isdefined, + BaseInterfaceInputSpec, +) from ..io import IOBase, add_traits from ...utils.filemanip import ensure_list from ...utils.functions import getsource, create_function_from_source -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") class FunctionInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - function_str = traits.Str(mandatory=True, desc='code for function') + function_str = traits.Str(mandatory=True, desc="code for function") class Function(IOBase): @@ -38,12 +43,14 @@ class Function(IOBase): input_spec = FunctionInputSpec output_spec = DynamicTraitedSpec - def __init__(self, - input_names=None, - output_names='out', - function=None, - imports=None, - **inputs): + def __init__( + self, + input_names=None, + output_names="out", + function=None, + imports=None, + **inputs + ): """ Parameters @@ -66,26 +73,27 @@ def __init__(self, super(Function, self).__init__(**inputs) if function: - if hasattr(function, '__call__'): + if hasattr(function, "__call__"): try: self.inputs.function_str = getsource(function) except IOError: - raise Exception('Interface Function does not accept ' - 'function objects defined interactively ' - 'in a python session') + raise Exception( + "Interface Function does not accept " + "function objects defined interactively " + "in a python session" + ) else: if input_names is None: fninfo = function.__code__ elif isinstance(function, (str, bytes)): self.inputs.function_str = function if input_names is None: - fninfo = create_function_from_source(function, - imports).__code__ + fninfo = create_function_from_source(function, imports).__code__ else: - raise Exception('Unknown type of function') + raise Exception("Unknown type of function") if input_names is None: - input_names = fninfo.co_varnames[:fninfo.co_argcount] - self.inputs.on_trait_change(self._set_function_string, 'function_str') + input_names = fninfo.co_varnames[: fninfo.co_argcount] + self.inputs.on_trait_change(self._set_function_string, "function_str") self._input_names = ensure_list(input_names) self._output_names = ensure_list(output_names) add_traits(self.inputs, [name for name in self._input_names]) @@ -95,20 +103,18 @@ def __init__(self, self._out[name] = None def _set_function_string(self, obj, name, old, new): - if name == 'function_str': - if hasattr(new, '__call__'): + if name == "function_str": + if hasattr(new, "__call__"): function_source = getsource(new) fninfo = new.__code__ elif isinstance(new, (str, bytes)): function_source = new - fninfo = create_function_from_source(new, - self.imports).__code__ + fninfo = create_function_from_source(new, self.imports).__code__ self.inputs.trait_set( - trait_change_notify=False, **{ - '%s' % name: function_source - }) + trait_change_notify=False, **{"%s" % name: function_source} + ) # Update input traits - input_names = fninfo.co_varnames[:fninfo.co_argcount] + input_names = fninfo.co_varnames[: fninfo.co_argcount] new_names = set(input_names) - set(self._input_names) add_traits(self.inputs, list(new_names)) self._input_names.extend(new_names) @@ -123,8 +129,9 @@ def _add_output_traits(self, base): def _run_interface(self, runtime): # Create function handle - function_handle = create_function_from_source(self.inputs.function_str, - self.imports) + function_handle = create_function_from_source( + self.inputs.function_str, self.imports + ) # Get function args args = {} for name in self._input_names: @@ -136,9 +143,8 @@ def _run_interface(self, runtime): if len(self._output_names) == 1: self._out[self._output_names[0]] = out else: - if isinstance(out, tuple) and \ - (len(out) != len(self._output_names)): - raise RuntimeError('Mismatch in number of expected outputs') + if isinstance(out, tuple) and (len(out) != len(self._output_names)): + raise RuntimeError("Mismatch in number of expected outputs") else: for idx, name in enumerate(self._output_names): diff --git a/nipype/interfaces/vista/__init__.py b/nipype/interfaces/vista/__init__.py index d0372042aa..c44c4678d3 100644 --- a/nipype/interfaces/vista/__init__.py +++ b/nipype/interfaces/vista/__init__.py @@ -1,4 +1,4 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from .vista import (Vnifti2Image, VtoMat) +from .vista import Vnifti2Image, VtoMat diff --git a/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py index 5168f61ea7..56bf94b7cc 100644 --- a/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py +++ b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py @@ -4,29 +4,17 @@ def test_Vnifti2Image_inputs(): input_map = dict( - args=dict(argstr='%s', ), - attributes=dict( - argstr='-attr %s', - extensions=None, - position=2, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - position=1, - ), + args=dict(argstr="%s",), + attributes=dict(argstr="-attr %s", extensions=None, position=2,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=1,), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, hash_files=False, keep_extension=False, - name_source=['in_file'], - name_template='%s.v', + name_source=["in_file"], + name_template="%s.v", position=-1, ), ) @@ -35,8 +23,10 @@ def test_Vnifti2Image_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_Vnifti2Image_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = Vnifti2Image.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/vista/tests/test_auto_VtoMat.py b/nipype/interfaces/vista/tests/test_auto_VtoMat.py index 788f1e5d9e..46cac1b5b6 100644 --- a/nipype/interfaces/vista/tests/test_auto_VtoMat.py +++ b/nipype/interfaces/vista/tests/test_auto_VtoMat.py @@ -4,24 +4,16 @@ def test_VtoMat_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='-in %s', - extensions=None, - mandatory=True, - position=1, - ), + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=1,), out_file=dict( - argstr='-out %s', + argstr="-out %s", extensions=None, hash_files=False, keep_extension=False, - name_source=['in_file'], - name_template='%s.mat', + name_source=["in_file"], + name_template="%s.mat", position=-1, ), ) @@ -30,8 +22,10 @@ def test_VtoMat_inputs(): for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_VtoMat_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = VtoMat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/vista/vista.py b/nipype/interfaces/vista/vista.py index ada6f430f1..4bb941c7f9 100644 --- a/nipype/interfaces/vista/vista.py +++ b/nipype/interfaces/vista/vista.py @@ -7,25 +7,22 @@ class Vnifti2ImageInputSpec(CommandLineInputSpec): in_file = File( - exists=True, - argstr='-in %s', - mandatory=True, - position=1, - desc='in file') - attributes = File( - exists=True, argstr='-attr %s', position=2, desc='attribute file') + exists=True, argstr="-in %s", mandatory=True, position=1, desc="in file" + ) + attributes = File(exists=True, argstr="-attr %s", position=2, desc="attribute file") out_file = File( name_template="%s.v", keep_extension=False, - argstr='-out %s', + argstr="-out %s", hash_files=False, position=-1, - desc='output data file', - name_source=["in_file"]) + desc="output data file", + name_source=["in_file"], + ) class Vnifti2ImageOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='Output vista file') + out_file = File(exists=True, desc="Output vista file") class Vnifti2Image(CommandLine): @@ -42,30 +39,28 @@ class Vnifti2Image(CommandLine): >>> vimage.run() # doctest: +SKIP """ - _cmd = 'vnifti2image' + _cmd = "vnifti2image" input_spec = Vnifti2ImageInputSpec output_spec = Vnifti2ImageOutputSpec class VtoMatInputSpec(CommandLineInputSpec): in_file = File( - exists=True, - argstr='-in %s', - mandatory=True, - position=1, - desc='in file') + exists=True, argstr="-in %s", mandatory=True, position=1, desc="in file" + ) out_file = File( name_template="%s.mat", keep_extension=False, - argstr='-out %s', + argstr="-out %s", hash_files=False, position=-1, - desc='output mat file', - name_source=["in_file"]) + desc="output mat file", + name_source=["in_file"], + ) class VtoMatOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='Output mat file') + out_file = File(exists=True, desc="Output mat file") class VtoMat(CommandLine): @@ -82,6 +77,6 @@ class VtoMat(CommandLine): >>> vimage.run() # doctest: +SKIP """ - _cmd = 'vtomat' + _cmd = "vtomat" input_spec = VtoMatInputSpec output_spec = VtoMatOutputSpec diff --git a/nipype/interfaces/vtkbase.py b/nipype/interfaces/vtkbase.py index 67edef41c3..875ccb61d5 100644 --- a/nipype/interfaces/vtkbase.py +++ b/nipype/interfaces/vtkbase.py @@ -10,32 +10,36 @@ import os from .. import logging -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") # Check that VTK can be imported and get version _vtk_version = None try: import vtk - _vtk_version = (vtk.vtkVersion.GetVTKMajorVersion(), - vtk.vtkVersion.GetVTKMinorVersion()) + + _vtk_version = ( + vtk.vtkVersion.GetVTKMajorVersion(), + vtk.vtkVersion.GetVTKMinorVersion(), + ) except ImportError: - iflogger.warning('VTK was not found') + iflogger.warning("VTK was not found") # Ensure that tvtk is loaded with the appropriate ETS_TOOLKIT env var -old_ets = os.getenv('ETS_TOOLKIT') -os.environ['ETS_TOOLKIT'] = 'null' +old_ets = os.getenv("ETS_TOOLKIT") +os.environ["ETS_TOOLKIT"] = "null" _have_tvtk = False try: from tvtk.api import tvtk + _have_tvtk = True except ImportError: - iflogger.warning('tvtk wasn\'t found') + iflogger.warning("tvtk wasn't found") tvtk = None finally: if old_ets is not None: - os.environ['ETS_TOOLKIT'] = old_ets + os.environ["ETS_TOOLKIT"] = old_ets else: - del os.environ['ETS_TOOLKIT'] + del os.environ["ETS_TOOLKIT"] def vtk_version(): @@ -60,7 +64,7 @@ def vtk_old(): """ Checks if VTK uses the old-style pipeline (VTK<6.0) """ global _vtk_version if _vtk_version is None: - raise RuntimeException('VTK is not correctly installed.') + raise RuntimeException("VTK is not correctly installed.") return _vtk_version[0] < 6 diff --git a/nipype/interfaces/workbench/base.py b/nipype/interfaces/workbench/base.py index 2c2620dbb6..82c12420b9 100644 --- a/nipype/interfaces/workbench/base.py +++ b/nipype/interfaces/workbench/base.py @@ -17,7 +17,7 @@ from ...utils.filemanip import split_filename from ..base import CommandLine, PackageInfo -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") class Info(PackageInfo): @@ -25,11 +25,11 @@ class Info(PackageInfo): Handle `wb_command` version information. """ - version_cmd = 'wb_command -version' + version_cmd = "wb_command -version" @staticmethod def parse_version(raw_info): - m = re.search(r'\nVersion (\S+)', raw_info) + m = re.search(r"\nVersion (\S+)", raw_info) return m.groups()[0] if m else None @@ -40,7 +40,7 @@ class WBCommand(CommandLine): def version(self): return Info.version() - def _gen_filename(self, name, outdir=None, suffix='', ext=None): + def _gen_filename(self, name, outdir=None, suffix="", ext=None): """Generate a filename based on the given parameters. The filename will take the form: . Parameters @@ -63,5 +63,5 @@ def _gen_filename(self, name, outdir=None, suffix='', ext=None): if ext is None: ext = fext if outdir is None: - outdir = '.' + outdir = "." return os.path.join(outdir, fname + suffix + ext) diff --git a/nipype/interfaces/workbench/cifti.py b/nipype/interfaces/workbench/cifti.py index fa288aeead..272aec1a3e 100644 --- a/nipype/interfaces/workbench/cifti.py +++ b/nipype/interfaces/workbench/cifti.py @@ -2,11 +2,11 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """This module provides interfaces for workbench CIFTI commands""" -from ..base import (TraitedSpec, File, traits, CommandLineInputSpec) +from ..base import TraitedSpec, File, traits, CommandLineInputSpec from .base import WBCommand from ... import logging -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") class CiftiSmoothInputSpec(CommandLineInputSpec): @@ -15,84 +15,99 @@ class CiftiSmoothInputSpec(CommandLineInputSpec): mandatory=True, argstr="%s", position=0, - desc="The input CIFTI file") + desc="The input CIFTI file", + ) sigma_surf = traits.Float( mandatory=True, argstr="%s", position=1, - desc="the sigma for the gaussian surface smoothing kernel, in mm") + desc="the sigma for the gaussian surface smoothing kernel, in mm", + ) sigma_vol = traits.Float( mandatory=True, argstr="%s", position=2, - desc="the sigma for the gaussian volume smoothing kernel, in mm") + desc="the sigma for the gaussian volume smoothing kernel, in mm", + ) direction = traits.Enum( "ROW", "COLUMN", mandatory=True, argstr="%s", position=3, - desc="which dimension to smooth along, ROW or COLUMN") + desc="which dimension to smooth along, ROW or COLUMN", + ) out_file = File( name_source=["in_file"], name_template="smoothed_%s.nii", keep_extension=True, argstr="%s", position=4, - desc="The output CIFTI") + desc="The output CIFTI", + ) left_surf = File( exists=True, mandatory=True, position=5, argstr="-left-surface %s", - desc="Specify the left surface to use") + desc="Specify the left surface to use", + ) left_corrected_areas = File( exists=True, position=6, argstr="-left-corrected-areas %s", desc="vertex areas (as a metric) to use instead of computing them from " - "the left surface.") + "the left surface.", + ) right_surf = File( exists=True, mandatory=True, position=7, argstr="-right-surface %s", - desc="Specify the right surface to use") + desc="Specify the right surface to use", + ) right_corrected_areas = File( exists=True, position=8, argstr="-right-corrected-areas %s", desc="vertex areas (as a metric) to use instead of computing them from " - "the right surface") + "the right surface", + ) cerebellum_surf = File( exists=True, position=9, argstr="-cerebellum-surface %s", - desc="specify the cerebellum surface to use") + desc="specify the cerebellum surface to use", + ) cerebellum_corrected_areas = File( exists=True, position=10, requires=["cerebellum_surf"], argstr="cerebellum-corrected-areas %s", desc="vertex areas (as a metric) to use instead of computing them from " - "the cerebellum surface") + "the cerebellum surface", + ) cifti_roi = File( exists=True, position=11, argstr="-cifti-roi %s", - desc="CIFTI file for ROI smoothing") + desc="CIFTI file for ROI smoothing", + ) fix_zeros_vol = traits.Bool( position=12, argstr="-fix-zeros-volume", - desc="treat values of zero in the volume as missing data") + desc="treat values of zero in the volume as missing data", + ) fix_zeros_surf = traits.Bool( position=13, argstr="-fix-zeros-surface", - desc="treat values of zero on the surface as missing data") + desc="treat values of zero on the surface as missing data", + ) merged_volume = traits.Bool( position=14, argstr="-merged-volume", - desc="smooth across subcortical structure boundaries") + desc="smooth across subcortical structure boundaries", + ) class CiftiSmoothOutputSpec(TraitedSpec): @@ -135,6 +150,7 @@ class CiftiSmooth(WBCommand): -left-surface sub-01.L.midthickness.32k_fs_LR.surf.gii \ -right-surface sub-01.R.midthickness.32k_fs_LR.surf.gii' """ + input_spec = CiftiSmoothInputSpec output_spec = CiftiSmoothOutputSpec - _cmd = 'wb_command -cifti-smoothing' + _cmd = "wb_command -cifti-smoothing" diff --git a/nipype/interfaces/workbench/metric.py b/nipype/interfaces/workbench/metric.py index b3653576a4..6bbe7f98cf 100644 --- a/nipype/interfaces/workbench/metric.py +++ b/nipype/interfaces/workbench/metric.py @@ -4,11 +4,11 @@ """This module provides interfaces for workbench surface commands""" import os -from ..base import (TraitedSpec, File, traits, CommandLineInputSpec) +from ..base import TraitedSpec, File, traits, CommandLineInputSpec from .base import WBCommand from ... import logging -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") class MetricResampleInputSpec(CommandLineInputSpec): @@ -17,20 +17,23 @@ class MetricResampleInputSpec(CommandLineInputSpec): mandatory=True, argstr="%s", position=0, - desc="The metric file to resample") + desc="The metric file to resample", + ) current_sphere = File( exists=True, mandatory=True, argstr="%s", position=1, - desc="A sphere surface with the mesh that the metric is currently on") + desc="A sphere surface with the mesh that the metric is currently on", + ) new_sphere = File( exists=True, mandatory=True, argstr="%s", position=2, desc="A sphere surface that is in register with and" - " has the desired output mesh") + " has the desired output mesh", + ) method = traits.Enum( "ADAP_BARY_AREA", "BARYCENTRIC", @@ -38,51 +41,60 @@ class MetricResampleInputSpec(CommandLineInputSpec): mandatory=True, position=3, desc="The method name - ADAP_BARY_AREA method is recommended for" - " ordinary metric data, because it should use all data while" - " downsampling, unlike BARYCENTRIC. If ADAP_BARY_AREA is used," - " exactly one of area_surfs or area_metrics must be specified") + " ordinary metric data, because it should use all data while" + " downsampling, unlike BARYCENTRIC. If ADAP_BARY_AREA is used," + " exactly one of area_surfs or area_metrics must be specified", + ) out_file = File( name_source=["new_sphere"], name_template="%s.out", keep_extension=True, argstr="%s", position=4, - desc="The output metric") + desc="The output metric", + ) area_surfs = traits.Bool( position=5, argstr="-area-surfs", xor=["area_metrics"], - desc="Specify surfaces to do vertex area correction based on") + desc="Specify surfaces to do vertex area correction based on", + ) area_metrics = traits.Bool( position=5, argstr="-area-metrics", xor=["area_surfs"], - desc="Specify vertex area metrics to do area correction based on") + desc="Specify vertex area metrics to do area correction based on", + ) current_area = File( exists=True, position=6, argstr="%s", desc="A relevant anatomical surface with mesh OR" - " a metric file with vertex areas for mesh") + " a metric file with vertex areas for mesh", + ) new_area = File( exists=True, position=7, argstr="%s", desc="A relevant anatomical surface with mesh OR" - " a metric file with vertex areas for mesh") + " a metric file with vertex areas for mesh", + ) roi_metric = File( exists=True, position=8, argstr="-current-roi %s", - desc="Input roi on the current mesh used to exclude non-data vertices") + desc="Input roi on the current mesh used to exclude non-data vertices", + ) valid_roi_out = traits.Bool( position=9, argstr="-valid-roi-out", - desc="Output the ROI of vertices that got data from valid source vertices") + desc="Output the ROI of vertices that got data from valid source vertices", + ) largest = traits.Bool( position=10, argstr="-largest", - desc="Use only the value of the vertex with the largest weight") + desc="Use only the value of the vertex with the largest weight", + ) class MetricResampleOutputSpec(TraitedSpec): @@ -129,24 +141,30 @@ class MetricResample(WBCommand): -area-metrics fsaverage5.L.midthickness_va_avg.10k_fsavg_L.shape.gii \ fs_LR.L.midthickness_va_avg.32k_fs_LR.shape.gii' """ + input_spec = MetricResampleInputSpec output_spec = MetricResampleOutputSpec - _cmd = 'wb_command -metric-resample' + _cmd = "wb_command -metric-resample" def _format_arg(self, opt, spec, val): - if opt in ['current_area', 'new_area']: + if opt in ["current_area", "new_area"]: if not self.inputs.area_surfs and not self.inputs.area_metrics: - raise ValueError("{} was set but neither area_surfs or" - " area_metrics were set".format(opt)) + raise ValueError( + "{} was set but neither area_surfs or" + " area_metrics were set".format(opt) + ) if opt == "method": - if (val == "ADAP_BARY_AREA" and - not self.inputs.area_surfs and - not self.inputs.area_metrics): - raise ValueError("Exactly one of area_surfs or area_metrics" - " must be specified") + if ( + val == "ADAP_BARY_AREA" + and not self.inputs.area_surfs + and not self.inputs.area_metrics + ): + raise ValueError( + "Exactly one of area_surfs or area_metrics" " must be specified" + ) if opt == "valid_roi_out" and val: # generate a filename and add it to argstr - roi_out = self._gen_filename(self.inputs.in_file, suffix='_roi') + roi_out = self._gen_filename(self.inputs.in_file, suffix="_roi") iflogger.info("Setting roi output file as", roi_out) spec.argstr += " " + roi_out return super(MetricResample, self)._format_arg(opt, spec, val) @@ -154,6 +172,6 @@ def _format_arg(self, opt, spec, val): def _list_outputs(self): outputs = super(MetricResample, self)._list_outputs() if self.inputs.valid_roi_out: - roi_file = self._gen_filename(self.inputs.in_file, suffix='_roi') - outputs['roi_file'] = os.path.abspath(roi_file) + roi_file = self._gen_filename(self.inputs.in_file, suffix="_roi") + outputs["roi_file"] = os.path.abspath(roi_file) return outputs diff --git a/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py b/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py index a045eb29f5..4be8b4aba7 100644 --- a/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py +++ b/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py @@ -4,98 +4,55 @@ def test_CiftiSmooth_inputs(): input_map = dict( - args=dict(argstr='%s', ), + args=dict(argstr="%s",), cerebellum_corrected_areas=dict( - argstr='cerebellum-corrected-areas %s', + argstr="cerebellum-corrected-areas %s", extensions=None, position=10, - requires=['cerebellum_surf'], + requires=["cerebellum_surf"], ), cerebellum_surf=dict( - argstr='-cerebellum-surface %s', - extensions=None, - position=9, - ), - cifti_roi=dict( - argstr='-cifti-roi %s', - extensions=None, - position=11, - ), - direction=dict( - argstr='%s', - mandatory=True, - position=3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fix_zeros_surf=dict( - argstr='-fix-zeros-surface', - position=13, - ), - fix_zeros_vol=dict( - argstr='-fix-zeros-volume', - position=12, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, - ), + argstr="-cerebellum-surface %s", extensions=None, position=9, + ), + cifti_roi=dict(argstr="-cifti-roi %s", extensions=None, position=11,), + direction=dict(argstr="%s", mandatory=True, position=3,), + environ=dict(nohash=True, usedefault=True,), + fix_zeros_surf=dict(argstr="-fix-zeros-surface", position=13,), + fix_zeros_vol=dict(argstr="-fix-zeros-volume", position=12,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), left_corrected_areas=dict( - argstr='-left-corrected-areas %s', - extensions=None, - position=6, + argstr="-left-corrected-areas %s", extensions=None, position=6, ), left_surf=dict( - argstr='-left-surface %s', - extensions=None, - mandatory=True, - position=5, - ), - merged_volume=dict( - argstr='-merged-volume', - position=14, + argstr="-left-surface %s", extensions=None, mandatory=True, position=5, ), + merged_volume=dict(argstr="-merged-volume", position=14,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, keep_extension=True, - name_source=['in_file'], - name_template='smoothed_%s.nii', + name_source=["in_file"], + name_template="smoothed_%s.nii", position=4, ), right_corrected_areas=dict( - argstr='-right-corrected-areas %s', - extensions=None, - position=8, + argstr="-right-corrected-areas %s", extensions=None, position=8, ), right_surf=dict( - argstr='-right-surface %s', - extensions=None, - mandatory=True, - position=7, - ), - sigma_surf=dict( - argstr='%s', - mandatory=True, - position=1, - ), - sigma_vol=dict( - argstr='%s', - mandatory=True, - position=2, + argstr="-right-surface %s", extensions=None, mandatory=True, position=7, ), + sigma_surf=dict(argstr="%s", mandatory=True, position=1,), + sigma_vol=dict(argstr="%s", mandatory=True, position=2,), ) inputs = CiftiSmooth.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_CiftiSmooth_outputs(): - output_map = dict(out_file=dict(extensions=None, ), ) + output_map = dict(out_file=dict(extensions=None,),) outputs = CiftiSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/workbench/tests/test_auto_MetricResample.py b/nipype/interfaces/workbench/tests/test_auto_MetricResample.py index f5ed55874b..eb9201b7f1 100644 --- a/nipype/interfaces/workbench/tests/test_auto_MetricResample.py +++ b/nipype/interfaces/workbench/tests/test_auto_MetricResample.py @@ -4,86 +4,37 @@ def test_MetricResample_inputs(): input_map = dict( - area_metrics=dict( - argstr='-area-metrics', - position=5, - xor=['area_surfs'], - ), - area_surfs=dict( - argstr='-area-surfs', - position=5, - xor=['area_metrics'], - ), - args=dict(argstr='%s', ), - current_area=dict( - argstr='%s', - extensions=None, - position=6, - ), - current_sphere=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=0, - ), - largest=dict( - argstr='-largest', - position=10, - ), - method=dict( - argstr='%s', - mandatory=True, - position=3, - ), - new_area=dict( - argstr='%s', - extensions=None, - position=7, - ), - new_sphere=dict( - argstr='%s', - extensions=None, - mandatory=True, - position=2, - ), + area_metrics=dict(argstr="-area-metrics", position=5, xor=["area_surfs"],), + area_surfs=dict(argstr="-area-surfs", position=5, xor=["area_metrics"],), + args=dict(argstr="%s",), + current_area=dict(argstr="%s", extensions=None, position=6,), + current_sphere=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + environ=dict(nohash=True, usedefault=True,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + largest=dict(argstr="-largest", position=10,), + method=dict(argstr="%s", mandatory=True, position=3,), + new_area=dict(argstr="%s", extensions=None, position=7,), + new_sphere=dict(argstr="%s", extensions=None, mandatory=True, position=2,), out_file=dict( - argstr='%s', + argstr="%s", extensions=None, keep_extension=True, - name_source=['new_sphere'], - name_template='%s.out', + name_source=["new_sphere"], + name_template="%s.out", position=4, ), - roi_metric=dict( - argstr='-current-roi %s', - extensions=None, - position=8, - ), - valid_roi_out=dict( - argstr='-valid-roi-out', - position=9, - ), + roi_metric=dict(argstr="-current-roi %s", extensions=None, position=8,), + valid_roi_out=dict(argstr="-valid-roi-out", position=9,), ) inputs = MetricResample.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value + + def test_MetricResample_outputs(): - output_map = dict( - out_file=dict(extensions=None, ), - roi_file=dict(extensions=None, ), - ) + output_map = dict(out_file=dict(extensions=None,), roi_file=dict(extensions=None,),) outputs = MetricResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/workbench/tests/test_auto_WBCommand.py b/nipype/interfaces/workbench/tests/test_auto_WBCommand.py index cccba55c95..0a32276e5f 100644 --- a/nipype/interfaces/workbench/tests/test_auto_WBCommand.py +++ b/nipype/interfaces/workbench/tests/test_auto_WBCommand.py @@ -4,11 +4,7 @@ def test_WBCommand_inputs(): input_map = dict( - args=dict(argstr='%s', ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), ) inputs = WBCommand.input_spec() diff --git a/nipype/pipeline/__init__.py b/nipype/pipeline/__init__.py index b410fc8ea0..75b3b17c3a 100644 --- a/nipype/pipeline/__init__.py +++ b/nipype/pipeline/__init__.py @@ -5,5 +5,5 @@ Package contains modules for generating pipelines using interfaces """ -__docformat__ = 'restructuredtext' +__docformat__ = "restructuredtext" from .engine import Node, MapNode, JoinNode, Workflow diff --git a/nipype/pipeline/engine/__init__.py b/nipype/pipeline/engine/__init__.py index 4dc6784276..b13ba968ac 100644 --- a/nipype/pipeline/engine/__init__.py +++ b/nipype/pipeline/engine/__init__.py @@ -7,7 +7,7 @@ """ -__docformat__ = 'restructuredtext' +__docformat__ = "restructuredtext" from .workflows import Workflow from .nodes import Node, MapNode, JoinNode from .utils import generate_expanded_graph diff --git a/nipype/pipeline/engine/base.py b/nipype/pipeline/engine/base.py index fef31d00b9..6735c19d49 100644 --- a/nipype/pipeline/engine/base.py +++ b/nipype/pipeline/engine/base.py @@ -33,7 +33,7 @@ def __init__(self, name=None, base_dir=None): """ self._hierarchy = None self.name = name - self._id = self.name # for compatibility with node expansion using iterables + self._id = self.name # for compatibility with node expansion using iterables self.base_dir = base_dir self.config = deepcopy(config._sections) @@ -44,14 +44,14 @@ def name(self): @name.setter def name(self, name): - if not name or not re.match(r'^[\w-]+$', name): + if not name or not re.match(r"^[\w-]+$", name): raise ValueError('[Workflow|Node] name "%s" is not valid.' % name) self._name = name @property def fullname(self): if self._hierarchy: - return '%s.%s' % (self._hierarchy, self.name) + return "%s.%s" % (self._hierarchy, self.name) return self.name @property @@ -67,7 +67,7 @@ def itername(self): """Name for expanded iterable""" itername = self._id if self._hierarchy: - itername = '%s.%s' % (self._hierarchy, self._id) + itername = "%s.%s" % (self._hierarchy, self._id) return itername def clone(self, name): @@ -80,11 +80,10 @@ def clone(self, name): A clone of node or workflow must have a new name """ if name == self.name: - raise ValueError('Cloning requires a new name, "%s" is ' - 'in use.' % name) + raise ValueError('Cloning requires a new name, "%s" is ' "in use." % name) clone = deepcopy(self) clone.name = name - if hasattr(clone, '_id'): + if hasattr(clone, "_id"): clone._id = name return clone @@ -104,7 +103,7 @@ def __repr__(self): def save(self, filename=None): if filename is None: - filename = 'temp.pklz' + filename = "temp.pklz" savepkl(filename, self) def load(self, filename): diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index c57eb4e2c8..09822cc7ff 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -19,23 +19,49 @@ from ... import config, logging from ...utils.misc import flatten, unflatten, str2bool, dict_diff -from ...utils.filemanip import (md5, ensure_list, simplify_list, copyfiles, fnames_presuffix, - loadpkl, split_filename, load_json, - emptydirs, savepkl, indirectory, silentrm) - -from ...interfaces.base import (traits, InputMultiPath, CommandLine, Undefined, - DynamicTraitedSpec, Bunch, InterfaceResult, - Interface, isdefined) +from ...utils.filemanip import ( + md5, + ensure_list, + simplify_list, + copyfiles, + fnames_presuffix, + loadpkl, + split_filename, + load_json, + emptydirs, + savepkl, + indirectory, + silentrm, +) + +from ...interfaces.base import ( + traits, + InputMultiPath, + CommandLine, + Undefined, + DynamicTraitedSpec, + Bunch, + InterfaceResult, + Interface, + isdefined, +) from ...interfaces.base.specs import get_filecopy_info from .utils import ( - _parameterization_dir, save_hashfile as _save_hashfile, load_resultfile as - _load_resultfile, save_resultfile as _save_resultfile, nodelist_runner as - _node_runner, strip_temp as _strip_temp, write_node_report, - clean_working_directory, merge_dict, evaluate_connect_function) + _parameterization_dir, + save_hashfile as _save_hashfile, + load_resultfile as _load_resultfile, + save_resultfile as _save_resultfile, + nodelist_runner as _node_runner, + strip_temp as _strip_temp, + write_node_report, + clean_working_directory, + merge_dict, + evaluate_connect_function, +) from .base import EngineBase -logger = logging.getLogger('nipype.workflow') +logger = logging.getLogger("nipype.workflow") class Node(EngineBase): @@ -60,18 +86,20 @@ class Node(EngineBase): """ - def __init__(self, - interface, - name, - iterables=None, - itersource=None, - synchronize=False, - overwrite=None, - needed_outputs=None, - run_without_submitting=False, - n_procs=None, - mem_gb=0.20, - **kwargs): + def __init__( + self, + interface, + name, + iterables=None, + itersource=None, + synchronize=False, + overwrite=None, + needed_outputs=None, + run_without_submitting=False, + n_procs=None, + mem_gb=0.20, + **kwargs + ): """ Parameters ---------- @@ -142,11 +170,11 @@ def __init__(self, """ # Make sure an interface is set, and that it is an Interface if interface is None: - raise IOError('Interface must be provided') + raise IOError("Interface must be provided") if not isinstance(interface, Interface): - raise IOError('interface must be an instance of an Interface') + raise IOError("interface must be an instance of an Interface") - super(Node, self).__init__(name, kwargs.get('base_dir')) + super(Node, self).__init__(name, kwargs.get("base_dir")) self._interface = interface self._hierarchy = None @@ -167,8 +195,7 @@ def __init__(self, self._n_procs = n_procs # Downstream n_procs - if hasattr(self._interface.inputs, - 'num_threads') and self._n_procs is not None: + if hasattr(self._interface.inputs, "num_threads") and self._n_procs is not None: self._interface.inputs.num_threads = self._n_procs # Initialize needed_outputs and hashes @@ -187,7 +214,8 @@ def interface(self): def result(self): """Get result from result file (do not hold it in memory)""" return _load_resultfile( - op.join(self.output_dir(), 'result_%s.pklz' % self.name)) + op.join(self.output_dir(), "result_%s.pklz" % self.name) + ) @property def inputs(self): @@ -216,11 +244,12 @@ def needed_outputs(self, new_outputs): @property def mem_gb(self): """Get estimated memory (GB)""" - if hasattr(self._interface, 'estimated_memory_gb'): + if hasattr(self._interface, "estimated_memory_gb"): self._mem_gb = self._interface.estimated_memory_gb logger.warning( 'Setting "estimated_memory_gb" on Interfaces has been ' - 'deprecated as of nipype 1.0, please use Node.mem_gb.') + "deprecated as of nipype 1.0, please use Node.mem_gb." + ) return self._mem_gb @@ -229,8 +258,9 @@ def n_procs(self): """Get the estimated number of processes/threads""" if self._n_procs is not None: return self._n_procs - if hasattr(self._interface.inputs, 'num_threads') and isdefined( - self._interface.inputs.num_threads): + if hasattr(self._interface.inputs, "num_threads") and isdefined( + self._interface.inputs.num_threads + ): return self._interface.inputs.num_threads return 1 @@ -240,7 +270,7 @@ def n_procs(self, value): self._n_procs = value # Overwrite interface's dynamic input of num_threads - if hasattr(self._interface.inputs, 'num_threads'): + if hasattr(self._interface.inputs, "num_threads"): self._interface.inputs.num_threads = self._n_procs def output_dir(self): @@ -254,10 +284,10 @@ def output_dir(self): self.base_dir = mkdtemp() outputdir = self.base_dir if self._hierarchy: - outputdir = op.join(outputdir, *self._hierarchy.split('.')) + outputdir = op.join(outputdir, *self._hierarchy.split(".")) if self.parameterization: - params_str = ['{}'.format(p) for p in self.parameterization] - if not str2bool(self.config['execution']['parameterize_dirs']): + params_str = ["{}".format(p) for p in self.parameterization] + if not str2bool(self.config["execution"]["parameterize_dirs"]): params_str = [_parameterization_dir(p) for p in params_str] outputdir = op.join(outputdir, *params_str) @@ -266,8 +296,9 @@ def output_dir(self): def set_input(self, parameter, val): """Set interface input value""" - logger.debug('[Node] %s - setting input %s = %s', self.name, parameter, - str(val)) + logger.debug( + "[Node] %s - setting input %s = %s", self.name, parameter, str(val) + ) setattr(self.inputs, parameter, deepcopy(val)) def get_output(self, parameter): @@ -286,40 +317,46 @@ def is_cached(self, rm_outdated=False): outdir = self.output_dir() # The output folder does not exist: not cached - if not op.exists(outdir) or \ - not op.exists(op.join(outdir, 'result_%s.pklz' % self.name)): + if not op.exists(outdir) or not op.exists( + op.join(outdir, "result_%s.pklz" % self.name) + ): logger.debug('[Node] Not cached "%s".', outdir) return False, False # Check if there are hashfiles - globhashes = glob(op.join(outdir, '_0x*.json')) - unfinished = [ - path for path in globhashes - if path.endswith('_unfinished.json') - ] + globhashes = glob(op.join(outdir, "_0x*.json")) + unfinished = [path for path in globhashes if path.endswith("_unfinished.json")] hashfiles = list(set(globhashes) - set(unfinished)) # Update hash hashed_inputs, hashvalue = self._get_hashval() - hashfile = op.join(outdir, '_0x%s.json' % hashvalue) - logger.debug('[Node] Hashes: %s, %s, %s, %s', - hashed_inputs, hashvalue, hashfile, hashfiles) + hashfile = op.join(outdir, "_0x%s.json" % hashvalue) + logger.debug( + "[Node] Hashes: %s, %s, %s, %s", + hashed_inputs, + hashvalue, + hashfile, + hashfiles, + ) cached = hashfile in hashfiles # No previous hashfiles found, we're all set. if cached and len(hashfiles) == 1: - assert(hashfile == hashfiles[0]) + assert hashfile == hashfiles[0] logger.debug('[Node] Up-to-date cache found for "%s".', self.fullname) return True, True # Cached and updated if len(hashfiles) > 1: if cached: hashfiles.remove(hashfile) # Do not clean up the node, if cached - logger.warning('[Node] Found %d previous hashfiles indicating that the working ' - 'directory of node "%s" is stale, deleting old hashfiles.', - len(hashfiles), self.fullname) + logger.warning( + "[Node] Found %d previous hashfiles indicating that the working " + 'directory of node "%s" is stale, deleting old hashfiles.', + len(hashfiles), + self.fullname, + ) for rmfile in hashfiles: os.remove(rmfile) @@ -327,7 +364,7 @@ def is_cached(self, rm_outdated=False): if not hashfiles: logger.debug('[Node] No hashfiles found in "%s".', outdir) - assert(not cached) + assert not cached return False, False # At this point only one hashfile is in the folder @@ -340,21 +377,21 @@ def is_cached(self, rm_outdated=False): loglevel = logger.getEffectiveLevel() if loglevel < INFO: # Lazy logging: only < INFO exp_hash_file_base = split_filename(hashfiles[0])[1] - exp_hash = exp_hash_file_base[len('_0x'):] - logger.log(loglevel, "[Node] Old/new hashes = %s/%s", - exp_hash, hashvalue) + exp_hash = exp_hash_file_base[len("_0x") :] + logger.log( + loglevel, "[Node] Old/new hashes = %s/%s", exp_hash, hashvalue + ) try: prev_inputs = load_json(hashfiles[0]) except Exception: pass else: - logger.log(loglevel, - dict_diff(prev_inputs, hashed_inputs, 10)) + logger.log(loglevel, dict_diff(prev_inputs, hashed_inputs, 10)) if rm_outdated: os.remove(hashfiles[0]) - assert(cached) # At this point, node is cached (may not be up-to-date) + assert cached # At this point, node is cached (may not be up-to-date) return cached, updated def hash_exists(self, updatehash=False): @@ -368,7 +405,7 @@ def hash_exists(self, updatehash=False): cached, updated = self.is_cached(rm_outdated=True) outdir = self.output_dir() - hashfile = op.join(outdir, '_0x%s.json' % self._hashvalue) + hashfile = op.join(outdir, "_0x%s.json" % self._hashvalue) if updated: return True, self._hashvalue, hashfile, self._hashed_inputs @@ -397,8 +434,9 @@ def run(self, updatehash=False): self.config = merge_dict(deepcopy(config._sections), self.config) outdir = self.output_dir() - force_run = self.overwrite or (self.overwrite is None and - self._interface.always_run) + force_run = self.overwrite or ( + self.overwrite is None and self._interface.always_run + ) # Check hash, check whether run should be enforced logger.info('[Node] Setting-up "%s" in "%s".', self.fullname, outdir) @@ -407,32 +445,38 @@ def run(self, updatehash=False): # If the node is cached, check on pklz files and finish if not force_run and (updated or (not updated and updatehash)): logger.debug("Only updating node hashes or skipping execution") - inputs_file = op.join(outdir, '_inputs.pklz') + inputs_file = op.join(outdir, "_inputs.pklz") if not op.exists(inputs_file): - logger.debug('Creating inputs file %s', inputs_file) + logger.debug("Creating inputs file %s", inputs_file) savepkl(inputs_file, self.inputs.get_traitsfree()) - node_file = op.join(outdir, '_node.pklz') + node_file = op.join(outdir, "_node.pklz") if not op.exists(node_file): - logger.debug('Creating node file %s', node_file) + logger.debug("Creating node file %s", node_file) savepkl(node_file, self) - result = self._run_interface(execute=False, - updatehash=updatehash and not updated) - logger.info('[Node] "%s" found cached%s.', self.fullname, - ' (and hash updated)' * (updatehash and not updated)) + result = self._run_interface( + execute=False, updatehash=updatehash and not updated + ) + logger.info( + '[Node] "%s" found cached%s.', + self.fullname, + " (and hash updated)" * (updatehash and not updated), + ) return result if cached and updated and not isinstance(self, MapNode): logger.debug('[Node] Rerunning cached, up-to-date node "%s"', self.fullname) if not force_run and str2bool( - self.config['execution']['stop_on_first_rerun']): + self.config["execution"]["stop_on_first_rerun"] + ): raise Exception( - 'Cannot rerun when "stop_on_first_rerun" is set to True') + 'Cannot rerun when "stop_on_first_rerun" is set to True' + ) # Remove any hashfile that exists at this point (re)running. if cached: - for outdatedhash in glob(op.join(self.output_dir(), '_0x*.json')): + for outdatedhash in glob(op.join(self.output_dir(), "_0x*.json")): os.remove(outdatedhash) # _get_hashval needs to be called before running. When there is a valid (or seemingly @@ -441,21 +485,22 @@ def run(self, updatehash=False): # the hashval needs to be generated here. See #3026 for a larger context. self._get_hashval() # Hashfile while running - hashfile_unfinished = op.join( - outdir, '_0x%s_unfinished.json' % self._hashvalue) + hashfile_unfinished = op.join(outdir, "_0x%s_unfinished.json" % self._hashvalue) # Delete directory contents if this is not a MapNode or can't resume can_resume = not (self._interface.can_resume and op.isfile(hashfile_unfinished)) if can_resume and not isinstance(self, MapNode): emptydirs(outdir, noexist_ok=True) else: - logger.debug('[%sNode] Resume - hashfile=%s', - 'Map' * int(isinstance(self, MapNode)), - hashfile_unfinished) + logger.debug( + "[%sNode] Resume - hashfile=%s", + "Map" * int(isinstance(self, MapNode)), + hashfile_unfinished, + ) if isinstance(self, MapNode): # remove old json files - for filename in glob(op.join(outdir, '_0x*.json')): + for filename in glob(op.join(outdir, "_0x*.json")): os.remove(filename) # Make sure outdir is created @@ -464,8 +509,8 @@ def run(self, updatehash=False): # Store runtime-hashfile, pre-execution report, the node and the inputs set. _save_hashfile(hashfile_unfinished, self._hashed_inputs) write_node_report(self, is_mapnode=isinstance(self, MapNode)) - savepkl(op.join(outdir, '_node.pklz'), self) - savepkl(op.join(outdir, '_inputs.pklz'), self.inputs.get_traitsfree()) + savepkl(op.join(outdir, "_node.pklz"), self) + savepkl(op.join(outdir, "_inputs.pklz"), self.inputs.get_traitsfree()) try: result = self._run_interface(execute=True) @@ -473,15 +518,17 @@ def run(self, updatehash=False): logger.warning('[Node] Error on "%s" (%s)', self.fullname, outdir) # Tear-up after error if not silentrm(hashfile_unfinished): - logger.warning("""\ + logger.warning( + """\ Interface finished unexpectedly and the corresponding unfinished hashfile %s \ does not exist. Another nipype instance may be running against the same work \ -directory. Please ensure no other concurrent workflows are racing""", hashfile_unfinished) +directory. Please ensure no other concurrent workflows are racing""", + hashfile_unfinished, + ) raise # Tear-up after success - shutil.move(hashfile_unfinished, - hashfile_unfinished.replace('_unfinished', '')) + shutil.move(hashfile_unfinished, hashfile_unfinished.replace("_unfinished", "")) write_node_report(self, result=result, is_mapnode=isinstance(self, MapNode)) logger.info('[Node] Finished "%s".', self.fullname) return result @@ -491,14 +538,15 @@ def _get_hashval(self): self._get_inputs() if self._hashvalue is None and self._hashed_inputs is None: self._hashed_inputs, self._hashvalue = self.inputs.get_hashval( - hash_method=self.config['execution']['hash_method']) - rm_extra = self.config['execution']['remove_unnecessary_outputs'] + hash_method=self.config["execution"]["hash_method"] + ) + rm_extra = self.config["execution"]["remove_unnecessary_outputs"] if str2bool(rm_extra) and self.needed_outputs: hashobject = md5() hashobject.update(self._hashvalue.encode()) hashobject.update(str(self.needed_outputs).encode()) self._hashvalue = hashobject.hexdigest() - self._hashed_inputs.append(('needed_outputs', self.needed_outputs)) + self._hashed_inputs.append(("needed_outputs", self.needed_outputs)) return self._hashed_inputs, self._hashvalue def _get_inputs(self): @@ -521,19 +569,25 @@ def _get_inputs(self): logger.debug( '[Node] Setting %d connected inputs of node "%s" from %d previous nodes.', - len(self.input_source), self.name, len(prev_results)) + len(self.input_source), + self.name, + len(prev_results), + ) for results_fname, connections in list(prev_results.items()): outputs = None try: outputs = _load_resultfile(results_fname).outputs except AttributeError as e: - logger.critical('%s', e) + logger.critical("%s", e) if outputs is None: - raise RuntimeError("""\ + raise RuntimeError( + """\ Error populating the inputs of node "%s": the results file of the source node \ -(%s) does not contain any outputs.""" % (self.name, results_fname)) +(%s) does not contain any outputs.""" + % (self.name, results_fname) + ) for key, conn in connections: output_value = Undefined @@ -541,7 +595,8 @@ def _get_inputs(self): value = getattr(outputs, conn[0]) if isdefined(value): output_value = evaluate_connect_function( - conn[1], conn[2], value) + conn[1], conn[2], value + ) else: output_name = conn try: @@ -554,19 +609,22 @@ def _get_inputs(self): self.set_input(key, deepcopy(output_value)) except traits.TraitError as e: msg = ( - e.args[0], '', 'Error setting node input:', - 'Node: %s' % self.name, 'input: %s' % key, - 'results_file: %s' % results_fname, - 'value: %s' % str(output_value), + e.args[0], + "", + "Error setting node input:", + "Node: %s" % self.name, + "input: %s" % key, + "results_file: %s" % results_fname, + "value: %s" % str(output_value), ) - e.args = ('\n'.join(msg), ) + e.args = ("\n".join(msg),) raise # Successfully set inputs self._got_inputs = True def _update_hash(self): - for outdatedhash in glob(op.join(self.output_dir(), '_0x*.json')): + for outdatedhash in glob(op.join(self.output_dir(), "_0x*.json")): os.remove(outdatedhash) _save_hashfile(self._hashvalue, self._hashed_inputs) @@ -580,15 +638,15 @@ def _load_results(self): cwd = self.output_dir() try: - result = _load_resultfile( - op.join(cwd, 'result_%s.pklz' % self.name)) + result = _load_resultfile(op.join(cwd, "result_%s.pklz" % self.name)) except (traits.TraitError, EOFError): - logger.debug( - 'Error populating inputs/outputs, (re)aggregating results...') + logger.debug("Error populating inputs/outputs, (re)aggregating results...") except (AttributeError, ImportError) as err: - logger.debug('attribute error: %s probably using ' - 'different trait pickled file', str(err)) - old_inputs = loadpkl(op.join(cwd, '_inputs.pklz')) + logger.debug( + "attribute error: %s probably using " "different trait pickled file", + str(err), + ) + old_inputs = loadpkl(op.join(cwd, "_inputs.pklz")) self.inputs.trait_set(**old_inputs) else: return result @@ -597,22 +655,28 @@ def _load_results(self): if not isinstance(self, MapNode): self._copyfiles_to_wd(linksonly=True) aggouts = self._interface.aggregate_outputs( - needed_outputs=self.needed_outputs) + needed_outputs=self.needed_outputs + ) runtime = Bunch( cwd=cwd, returncode=0, environ=dict(os.environ), - hostname=socket.gethostname()) + hostname=socket.gethostname(), + ) result = InterfaceResult( interface=self._interface.__class__, runtime=runtime, inputs=self._interface.inputs.get_traitsfree(), - outputs=aggouts) + outputs=aggouts, + ) _save_resultfile( - result, cwd, self.name, - rebase=str2bool(self.config['execution']['use_relative_paths'])) + result, + cwd, + self.name, + rebase=str2bool(self.config["execution"]["use_relative_paths"]), + ) else: - logger.debug('aggregating mapnode results') + logger.debug("aggregating mapnode results") result = self._run_interface() return result @@ -622,13 +686,15 @@ def _run_command(self, execute, copyfiles=True): result = self._load_results() except (FileNotFoundError, AttributeError): # if aggregation does not work, rerun the node - logger.info("[Node] Some of the outputs were not found: " - "rerunning node.") + logger.info( + "[Node] Some of the outputs were not found: " "rerunning node." + ) copyfiles = False # OE: this was like this before, execute = True # I'll keep them for safety else: - logger.info('[Node] Cached "%s" - collecting precomputed outputs', - self.fullname) + logger.info( + '[Node] Cached "%s" - collecting precomputed outputs', self.fullname + ) return result outdir = self.output_dir() @@ -639,46 +705,55 @@ def _run_command(self, execute, copyfiles=True): cwd=outdir, returncode=1, environ=dict(os.environ), - hostname=socket.gethostname() + hostname=socket.gethostname(), ), - inputs=self._interface.inputs.get_traitsfree()) + inputs=self._interface.inputs.get_traitsfree(), + ) if copyfiles: self._originputs = deepcopy(self._interface.inputs) self._copyfiles_to_wd(execute=execute) message = '[Node] Running "{}" ("{}.{}")'.format( - self.name, self._interface.__module__, - self._interface.__class__.__name__) + self.name, self._interface.__module__, self._interface.__class__.__name__ + ) if issubclass(self._interface.__class__, CommandLine): try: with indirectory(outdir): cmd = self._interface.cmdline except Exception as msg: - result.runtime.stderr = '{}\n\n{}'.format( - getattr(result.runtime, 'stderr', ''), msg) + result.runtime.stderr = "{}\n\n{}".format( + getattr(result.runtime, "stderr", ""), msg + ) _save_resultfile( - result, outdir, self.name, - rebase=str2bool(self.config['execution']['use_relative_paths'])) + result, + outdir, + self.name, + rebase=str2bool(self.config["execution"]["use_relative_paths"]), + ) raise - cmdfile = op.join(outdir, 'command.txt') - with open(cmdfile, 'wt') as fd: + cmdfile = op.join(outdir, "command.txt") + with open(cmdfile, "wt") as fd: print(cmd + "\n", file=fd) - message += ', a CommandLine Interface with command:\n{}'.format(cmd) + message += ", a CommandLine Interface with command:\n{}".format(cmd) logger.info(message) try: result = self._interface.run(cwd=outdir) except Exception as msg: - result.runtime.stderr = '%s\n\n%s'.format( - getattr(result.runtime, 'stderr', ''), msg) + result.runtime.stderr = "%s\n\n%s".format( + getattr(result.runtime, "stderr", ""), msg + ) _save_resultfile( - result, outdir, self.name, - rebase=str2bool(self.config['execution']['use_relative_paths'])) + result, + outdir, + self.name, + rebase=str2bool(self.config["execution"]["use_relative_paths"]), + ) raise dirs2keep = None if isinstance(self, MapNode): - dirs2keep = [op.join(outdir, 'mapflow')] + dirs2keep = [op.join(outdir, "mapflow")] result.outputs = clean_working_directory( result.outputs, @@ -686,10 +761,14 @@ def _run_command(self, execute, copyfiles=True): self._interface.inputs, self.needed_outputs, self.config, - dirs2keep=dirs2keep) + dirs2keep=dirs2keep, + ) _save_resultfile( - result, outdir, self.name, - rebase=str2bool(self.config['execution']['use_relative_paths'])) + result, + outdir, + self.name, + rebase=str2bool(self.config["execution"]["use_relative_paths"]), + ) return result @@ -700,41 +779,42 @@ def _copyfiles_to_wd(self, execute=True, linksonly=False): # Nothing to be done return - logger.debug('copying files to wd [execute=%s, linksonly=%s]', execute, - linksonly) + logger.debug( + "copying files to wd [execute=%s, linksonly=%s]", execute, linksonly + ) outdir = self.output_dir() if execute and linksonly: olddir = outdir - outdir = op.join(outdir, '_tempinput') + outdir = op.join(outdir, "_tempinput") os.makedirs(outdir, exist_ok=True) for info in filecopy_info: - files = self.inputs.trait_get().get(info['key']) + files = self.inputs.trait_get().get(info["key"]) if not isdefined(files) or not files: continue infiles = ensure_list(files) if execute: if linksonly: - if not info['copy']: + if not info["copy"]: newfiles = copyfiles( - infiles, [outdir], - copy=info['copy'], - create_new=True) + infiles, [outdir], copy=info["copy"], create_new=True + ) else: newfiles = fnames_presuffix(infiles, newpath=outdir) - newfiles = _strip_temp(newfiles, - op.abspath(olddir).split( - op.sep)[-1]) + newfiles = _strip_temp( + newfiles, op.abspath(olddir).split(op.sep)[-1] + ) else: newfiles = copyfiles( - infiles, [outdir], copy=info['copy'], create_new=True) + infiles, [outdir], copy=info["copy"], create_new=True + ) else: newfiles = fnames_presuffix(infiles, newpath=outdir) if not isinstance(files, list): newfiles = simplify_list(newfiles) - setattr(self.inputs, info['key'], newfiles) + setattr(self.inputs, info["key"], newfiles) if execute and linksonly: emptydirs(outdir, noexist_ok=True) @@ -772,13 +852,9 @@ class JoinNode(Node): """ - def __init__(self, - interface, - name, - joinsource, - joinfield=None, - unique=False, - **kwargs): + def __init__( + self, interface, name, joinsource, joinfield=None, unique=False, **kwargs + ): """ Parameters @@ -810,8 +886,9 @@ def __init__(self, self.joinfield = joinfield """the fields to join""" - self._inputs = self._override_join_traits(self._interface.inputs, - self.joinfield) + self._inputs = self._override_join_traits( + self._interface.inputs, self.joinfield + ) """the override inputs""" self._unique = unique @@ -861,8 +938,9 @@ def _add_join_item_fields(self): """ # create the new join item fields idx = self._next_slot_index - newfields = dict([(field, self._add_join_item_field(field, idx)) - for field in self.joinfield]) + newfields = dict( + [(field, self._add_join_item_field(field, idx)) for field in self.joinfield] + ) # increment the join slot index logger.debug("Added the %s join item fields %s.", self, newfields) self._next_slot_index += 1 @@ -896,8 +974,10 @@ def _override_join_traits(self, basetraits, fields): # validate the fields for field in fields: if not basetraits.trait(field): - raise ValueError("The JoinNode %s does not have a field" - " named %s" % (self.name, field)) + raise ValueError( + "The JoinNode %s does not have a field" + " named %s" % (self.name, field) + ) for name, trait in list(basetraits.items()): # if a join field has a single inner trait, then the item # trait is that inner trait. Otherwise, the item trait is @@ -908,7 +988,11 @@ def _override_join_traits(self, basetraits, fields): setattr(dyntraits, name, Undefined) logger.debug( "Converted the join node %s field %s trait type from %s to %s", - self, name, trait.trait_type.info(), item_trait.info()) + self, + name, + trait.trait_type.info(), + item_trait.info(), + ) else: dyntraits.add_trait(name, traits.Any) setattr(dyntraits, name, Undefined) @@ -930,17 +1014,27 @@ def _collate_join_field_inputs(self): try: setattr(self._interface.inputs, field, val) except Exception as e: - raise ValueError(">>JN %s %s %s %s %s: %s" % - (self, field, val, - self.inputs.copyable_trait_names(), - self.joinfield, e)) + raise ValueError( + ">>JN %s %s %s %s %s: %s" + % ( + self, + field, + val, + self.inputs.copyable_trait_names(), + self.joinfield, + e, + ) + ) elif hasattr(self._interface.inputs, field): # copy the non-join field val = getattr(self._inputs, field) if isdefined(val): setattr(self._interface.inputs, field, val) - logger.debug("Collated %d inputs into the %s node join fields", - self._next_slot_index, self) + logger.debug( + "Collated %d inputs into the %s node join fields", + self._next_slot_index, + self, + ) def _collate_input_value(self, field): """ @@ -954,10 +1048,7 @@ def _collate_input_value(self, field): the iterables order. If the ``unique`` flag is set, then duplicate values are removed but the iterables order is preserved. """ - val = [ - self._slot_value(field, idx) - for idx in range(self._next_slot_index) - ] + val = [self._slot_value(field, idx) for idx in range(self._next_slot_index)] basetrait = self._interface.inputs.trait(field) if isinstance(basetrait.trait_type, traits.Set): return set(val) @@ -974,8 +1065,9 @@ def _slot_value(self, field, index): except AttributeError as e: raise AttributeError( "The join node %s does not have a slot field %s" - " to hold the %s value at index %d: %s" % (self, slot_field, - field, index, e)) + " to hold the %s value at index %d: %s" + % (self, slot_field, field, index, e) + ) class MapNode(Node): @@ -994,13 +1086,9 @@ class MapNode(Node): """ - def __init__(self, - interface, - iterfield, - name, - serial=False, - nested=False, - **kwargs): + def __init__( + self, interface, iterfield, name, serial=False, nested=False, **kwargs + ): """ Parameters @@ -1031,7 +1119,8 @@ def __init__(self, self.iterfield = iterfield self.nested = nested self._inputs = self._create_dynamic_traits( - self._interface.inputs, fields=self.iterfield) + self._interface.inputs, fields=self.iterfield + ) self._inputs.on_trait_change(self._set_mapnode_input) self._got_inputs = False self._serial = serial @@ -1044,7 +1133,7 @@ def _create_dynamic_traits(self, basetraits, fields=None, nitems=None): fields = basetraits.copyable_trait_names() for name, spec in list(basetraits.items()): if name in fields and ((nitems is None) or (nitems > 1)): - logger.debug('adding multipath trait: %s', name) + logger.debug("adding multipath trait: %s", name) if self.nested: output.add_trait(name, InputMultiPath(traits.Any())) else: @@ -1063,13 +1152,15 @@ def set_input(self, parameter, val): Set interface input value or nodewrapper attribute Priority goes to interface. """ - logger.debug('setting nodelevel(%s) input %s = %s', str(self), - parameter, str(val)) + logger.debug( + "setting nodelevel(%s) input %s = %s", str(self), parameter, str(val) + ) self._set_mapnode_input(parameter, deepcopy(val)) def _set_mapnode_input(self, name, newvalue): - logger.debug('setting mapnode(%s) input: %s -> %s', str(self), name, - str(newvalue)) + logger.debug( + "setting mapnode(%s) input: %s -> %s", str(self), name, str(newvalue) + ) if name in self.iterfield: setattr(self._inputs, name, newvalue) else: @@ -1087,25 +1178,24 @@ def _get_hashval(self): for name in self.iterfield: hashinputs.remove_trait(name) hashinputs.add_trait( - name, - InputMultiPath( - self._interface.inputs.traits()[name].trait_type)) - logger.debug('setting hashinput %s-> %s', name, - getattr(self._inputs, name)) + name, InputMultiPath(self._interface.inputs.traits()[name].trait_type) + ) + logger.debug("setting hashinput %s-> %s", name, getattr(self._inputs, name)) if self.nested: setattr(hashinputs, name, flatten(getattr(self._inputs, name))) else: setattr(hashinputs, name, getattr(self._inputs, name)) hashed_inputs, hashvalue = hashinputs.get_hashval( - hash_method=self.config['execution']['hash_method']) - rm_extra = self.config['execution']['remove_unnecessary_outputs'] + hash_method=self.config["execution"]["hash_method"] + ) + rm_extra = self.config["execution"]["remove_unnecessary_outputs"] if str2bool(rm_extra) and self.needed_outputs: hashobject = md5() hashobject.update(hashvalue.encode()) sorted_outputs = sorted(self.needed_outputs) hashobject.update(str(sorted_outputs).encode()) hashvalue = hashobject.hexdigest() - hashed_inputs.append(('needed_outputs', sorted_outputs)) + hashed_inputs.append(("needed_outputs", sorted_outputs)) self._hashed_inputs, self._hashvalue = hashed_inputs, hashvalue return self._hashed_inputs, self._hashvalue @@ -1122,14 +1212,11 @@ def _make_nodes(self, cwd=None): if cwd is None: cwd = self.output_dir() if self.nested: - nitems = len( - flatten( - ensure_list(getattr(self.inputs, self.iterfield[0])))) + nitems = len(flatten(ensure_list(getattr(self.inputs, self.iterfield[0])))) else: - nitems = len( - ensure_list(getattr(self.inputs, self.iterfield[0]))) + nitems = len(ensure_list(getattr(self.inputs, self.iterfield[0]))) for i in range(nitems): - nodename = '_%s%d' % (self.name, i) + nodename = "_%s%d" % (self.name, i) node = Node( deepcopy(self._interface), n_procs=self._n_procs, @@ -1137,47 +1224,44 @@ def _make_nodes(self, cwd=None): overwrite=self.overwrite, needed_outputs=self.needed_outputs, run_without_submitting=self.run_without_submitting, - base_dir=op.join(cwd, 'mapflow'), - name=nodename) + base_dir=op.join(cwd, "mapflow"), + name=nodename, + ) node.plugin_args = self.plugin_args node.interface.inputs.trait_set( - **deepcopy(self._interface.inputs.trait_get())) + **deepcopy(self._interface.inputs.trait_get()) + ) node.interface.resource_monitor = self._interface.resource_monitor for field in self.iterfield: if self.nested: - fieldvals = flatten( - ensure_list(getattr(self.inputs, field))) + fieldvals = flatten(ensure_list(getattr(self.inputs, field))) else: fieldvals = ensure_list(getattr(self.inputs, field)) - logger.debug('setting input %d %s %s', i, field, fieldvals[i]) + logger.debug("setting input %d %s %s", i, field, fieldvals[i]) setattr(node.inputs, field, fieldvals[i]) node.config = self.config yield i, node def _collate_results(self, nodes): finalresult = InterfaceResult( - interface=[], - runtime=[], - provenance=[], - inputs=[], - outputs=self.outputs) + interface=[], runtime=[], provenance=[], inputs=[], outputs=self.outputs + ) returncode = [] for i, nresult, err in nodes: finalresult.runtime.insert(i, None) returncode.insert(i, err) if nresult: - if hasattr(nresult, 'runtime'): + if hasattr(nresult, "runtime"): finalresult.interface.insert(i, nresult.interface) finalresult.inputs.insert(i, nresult.inputs) finalresult.runtime[i] = nresult.runtime - if hasattr(nresult, 'provenance'): + if hasattr(nresult, "provenance"): finalresult.provenance.insert(i, nresult.provenance) if self.outputs: for key, _ in list(self.outputs.items()): - rm_extra = ( - self.config['execution']['remove_unnecessary_outputs']) + rm_extra = self.config["execution"]["remove_unnecessary_outputs"] if str2bool(rm_extra) and self.needed_outputs: if key not in self.needed_outputs: continue @@ -1196,20 +1280,20 @@ def _collate_results(self, nodes): for key, _ in list(self.outputs.items()): values = getattr(finalresult.outputs, key) if isdefined(values): - values = unflatten(values, - ensure_list( - getattr(self.inputs, - self.iterfield[0]))) + values = unflatten( + values, ensure_list(getattr(self.inputs, self.iterfield[0])) + ) setattr(finalresult.outputs, key, values) if returncode and any([code is not None for code in returncode]): msg = [] for i, code in enumerate(returncode): if code is not None: - msg += ['Subnode %d failed' % i] - msg += ['Error: %s' % str(code)] - raise Exception('Subnodes of node: %s failed:\n%s' % - (self.name, '\n'.join(msg))) + msg += ["Subnode %d failed" % i] + msg += ["Error: %s" % str(code)] + raise Exception( + "Subnodes of node: %s failed:\n%s" % (self.name, "\n".join(msg)) + ) return finalresult @@ -1227,15 +1311,14 @@ def num_subnodes(self): if self._serial: return 1 if self.nested: - return len( - ensure_list( - flatten(getattr(self.inputs, self.iterfield[0])))) + return len(ensure_list(flatten(getattr(self.inputs, self.iterfield[0])))) return len(ensure_list(getattr(self.inputs, self.iterfield[0]))) def _get_inputs(self): old_inputs = self._inputs.trait_get() self._inputs = self._create_dynamic_traits( - self._interface.inputs, fields=self.iterfield) + self._interface.inputs, fields=self.iterfield + ) self._inputs.trait_set(**old_inputs) super(MapNode, self)._get_inputs() @@ -1247,17 +1330,21 @@ def _check_iterfield(self): """ for iterfield in self.iterfield: if not isdefined(getattr(self.inputs, iterfield)): - raise ValueError(("Input %s was not set but it is listed " - "in iterfields.") % iterfield) + raise ValueError( + ("Input %s was not set but it is listed " "in iterfields.") + % iterfield + ) if len(self.iterfield) > 1: - first_len = len( - ensure_list(getattr(self.inputs, self.iterfield[0]))) + first_len = len(ensure_list(getattr(self.inputs, self.iterfield[0]))) for iterfield in self.iterfield[1:]: - if first_len != len( - ensure_list(getattr(self.inputs, iterfield))): + if first_len != len(ensure_list(getattr(self.inputs, iterfield))): raise ValueError( - ("All iterfields of a MapNode have to " - "have the same length. %s") % str(self.inputs)) + ( + "All iterfields of a MapNode have to " + "have the same length. %s" + ) + % str(self.inputs) + ) def _run_interface(self, execute=True, updatehash=False): """Run the mapnode interface @@ -1272,13 +1359,10 @@ def _run_interface(self, execute=True, updatehash=False): # Set up mapnode folder names if self.nested: - nitems = len( - ensure_list( - flatten(getattr(self.inputs, self.iterfield[0])))) + nitems = len(ensure_list(flatten(getattr(self.inputs, self.iterfield[0])))) else: - nitems = len( - ensure_list(getattr(self.inputs, self.iterfield[0]))) - nnametpl = '_%s{}' % self.name + nitems = len(ensure_list(getattr(self.inputs, self.iterfield[0]))) + nnametpl = "_%s{}" % self.name nodenames = [nnametpl.format(i) for i in range(nitems)] # Run mapnode @@ -1286,13 +1370,14 @@ def _run_interface(self, execute=True, updatehash=False): _node_runner( self._make_nodes(cwd), updatehash=updatehash, - stop_first=str2bool( - self.config['execution']['stop_on_first_crash']))) + stop_first=str2bool(self.config["execution"]["stop_on_first_crash"]), + ) + ) # And store results _save_resultfile(result, cwd, self.name, rebase=False) # remove any node directories no longer required dirs2remove = [] - for path in glob(op.join(cwd, 'mapflow', '*')): + for path in glob(op.join(cwd, "mapflow", "*")): if op.isdir(path): if path.split(op.sep)[-1] not in nodenames: dirs2remove.append(path) diff --git a/nipype/pipeline/engine/tests/test_base.py b/nipype/pipeline/engine/tests/test_base.py index c6b9135f54..ab80c2f158 100644 --- a/nipype/pipeline/engine/tests/test_base.py +++ b/nipype/pipeline/engine/tests/test_base.py @@ -10,13 +10,13 @@ class InputSpec(nib.TraitedSpec): - input1 = nib.traits.Int(desc='a random int') - input2 = nib.traits.Int(desc='a random int') - input_file = nib.File(desc='Random File') + input1 = nib.traits.Int(desc="a random int") + input2 = nib.traits.Int(desc="a random int") + input_file = nib.File(desc="Random File") class OutputSpec(nib.TraitedSpec): - output1 = nib.traits.List(nib.traits.Int, desc='outputs') + output1 = nib.traits.List(nib.traits.Int, desc="outputs") class EngineTestInterface(nib.SimpleInterface): @@ -25,60 +25,67 @@ class EngineTestInterface(nib.SimpleInterface): def _run_interface(self, runtime): runtime.returncode = 0 - self._results['output1'] = [1, self.inputs.input1] + self._results["output1"] = [1, self.inputs.input1] return runtime -@pytest.mark.parametrize( - 'name', ['valid1', 'valid_node', 'valid-node', 'ValidNode0']) +@pytest.mark.parametrize("name", ["valid1", "valid_node", "valid-node", "ValidNode0"]) def test_create(name): base = EngineBase(name=name) assert base.name == name @pytest.mark.parametrize( - 'name', ['invalid*1', 'invalid.1', 'invalid@', 'in/valid', None]) + "name", ["invalid*1", "invalid.1", "invalid@", "in/valid", None] +) def test_create_invalid(name): with pytest.raises(ValueError): EngineBase(name=name) def test_hierarchy(): - base = EngineBase(name='nodename') - base._hierarchy = 'some.history.behind' + base = EngineBase(name="nodename") + base._hierarchy = "some.history.behind" - assert base.name == 'nodename' - assert base.fullname == 'some.history.behind.nodename' + assert base.name == "nodename" + assert base.fullname == "some.history.behind.nodename" def test_clone(): - base = EngineBase(name='nodename') - base2 = base.clone('newnodename') + base = EngineBase(name="nodename") + base2 = base.clone("newnodename") - assert (base.base_dir == base2.base_dir and - base.config == base2.config and - base2.name == 'newnodename') + assert ( + base.base_dir == base2.base_dir + and base.config == base2.config + and base2.name == "newnodename" + ) with pytest.raises(ValueError): - base.clone('nodename') + base.clone("nodename") + def test_clone_node_iterables(tmpdir): tmpdir.chdir() def addstr(string): - return ('%s + 2' % string) - - subject_list = ['sub-001', 'sub-002'] - inputnode = pe.Node(niu.IdentityInterface(fields=['subject']), - name='inputnode') - inputnode.iterables = [('subject', subject_list)] - - node_1 = pe.Node(niu.Function(input_names='string', - output_names='string', - function=addstr), name='node_1') - node_2 = node_1.clone('node_2') - - workflow = pe.Workflow(name='iter_clone_wf') - workflow.connect([(inputnode, node_1, [('subject', 'string')]), - (node_1, node_2, [('string', 'string')])]) + return "%s + 2" % string + + subject_list = ["sub-001", "sub-002"] + inputnode = pe.Node(niu.IdentityInterface(fields=["subject"]), name="inputnode") + inputnode.iterables = [("subject", subject_list)] + + node_1 = pe.Node( + niu.Function(input_names="string", output_names="string", function=addstr), + name="node_1", + ) + node_2 = node_1.clone("node_2") + + workflow = pe.Workflow(name="iter_clone_wf") + workflow.connect( + [ + (inputnode, node_1, [("subject", "string")]), + (node_1, node_2, [("string", "string")]), + ] + ) workflow.run() diff --git a/nipype/pipeline/engine/tests/test_engine.py b/nipype/pipeline/engine/tests/test_engine.py index 083aa8b691..f28b0f3bf3 100644 --- a/nipype/pipeline/engine/tests/test_engine.py +++ b/nipype/pipeline/engine/tests/test_engine.py @@ -19,16 +19,13 @@ @pytest.mark.parametrize( "iterables, expected", [ - ({ - "1": None - }, (1, 0)), # test1 - ({ - "1": dict(input1=lambda: [1, 2], input2=lambda: [1, 2]) - }, (4, 0)) # test2 - ]) + ({"1": None}, (1, 0)), # test1 + ({"1": dict(input1=lambda: [1, 2], input2=lambda: [1, 2])}, (4, 0)), # test2 + ], +) def test_1mod(iterables, expected): - pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') + pipe = pe.Workflow(name="pipe") + mod1 = pe.Node(interface=EngineTestInterface(), name="mod1") setattr(mod1, "iterables", iterables["1"]) pipe.add_nodes([mod1]) pipe._flatgraph = pipe._create_flat_graph() @@ -40,26 +37,21 @@ def test_1mod(iterables, expected): @pytest.mark.parametrize( "iterables, expected", [ - ({ - "1": {}, - "2": dict(input1=lambda: [1, 2]) - }, (3, 2)), # test3 - ({ - "1": dict(input1=lambda: [1, 2]), - "2": {} - }, (4, 2)), # test4 - ({ - "1": dict(input1=lambda: [1, 2]), - "2": dict(input1=lambda: [1, 2]) - }, (6, 4)) # test5 - ]) + ({"1": {}, "2": dict(input1=lambda: [1, 2])}, (3, 2)), # test3 + ({"1": dict(input1=lambda: [1, 2]), "2": {}}, (4, 2)), # test4 + ( + {"1": dict(input1=lambda: [1, 2]), "2": dict(input1=lambda: [1, 2])}, + (6, 4), + ), # test5 + ], +) def test_2mods(iterables, expected): - pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') - mod2 = pe.Node(interface=EngineTestInterface(), name='mod2') + pipe = pe.Workflow(name="pipe") + mod1 = pe.Node(interface=EngineTestInterface(), name="mod1") + mod2 = pe.Node(interface=EngineTestInterface(), name="mod2") for nr in ["1", "2"]: setattr(eval("mod" + nr), "iterables", iterables[nr]) - pipe.connect([(mod1, mod2, [('output1', 'input2')])]) + pipe.connect([(mod1, mod2, [("output1", "input2")])]) pipe._flatgraph = pipe._create_flat_graph() pipe._execgraph = pe.generate_expanded_graph(deepcopy(pipe._flatgraph)) assert len(pipe._execgraph.nodes()) == expected[0] @@ -69,95 +61,111 @@ def test_2mods(iterables, expected): @pytest.mark.parametrize( "iterables, expected, connect", [ - ({ - "1": {}, - "2": dict(input1=lambda: [1, 2]), - "3": {} - }, (5, 4), ("1-2", "2-3")), # test6 - ({ - "1": dict(input1=lambda: [1, 2]), - "2": {}, - "3": {} - }, (5, 4), ("1-3", "2-3")), # test7 - ({ - "1": dict(input1=lambda: [1, 2]), - "2": dict(input1=lambda: [1, 2]), - "3": {} - }, (8, 8), ("1-3", "2-3")), # test8 - ]) + ( + {"1": {}, "2": dict(input1=lambda: [1, 2]), "3": {}}, + (5, 4), + ("1-2", "2-3"), + ), # test6 + ( + {"1": dict(input1=lambda: [1, 2]), "2": {}, "3": {}}, + (5, 4), + ("1-3", "2-3"), + ), # test7 + ( + { + "1": dict(input1=lambda: [1, 2]), + "2": dict(input1=lambda: [1, 2]), + "3": {}, + }, + (8, 8), + ("1-3", "2-3"), + ), # test8 + ], +) def test_3mods(iterables, expected, connect): - pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') - mod2 = pe.Node(interface=EngineTestInterface(), name='mod2') - mod3 = pe.Node(interface=EngineTestInterface(), name='mod3') + pipe = pe.Workflow(name="pipe") + mod1 = pe.Node(interface=EngineTestInterface(), name="mod1") + mod2 = pe.Node(interface=EngineTestInterface(), name="mod2") + mod3 = pe.Node(interface=EngineTestInterface(), name="mod3") for nr in ["1", "2", "3"]: setattr(eval("mod" + nr), "iterables", iterables[nr]) if connect == ("1-2", "2-3"): - pipe.connect([(mod1, mod2, [('output1', 'input2')]), - (mod2, mod3, [('output1', 'input2')])]) + pipe.connect( + [ + (mod1, mod2, [("output1", "input2")]), + (mod2, mod3, [("output1", "input2")]), + ] + ) elif connect == ("1-3", "2-3"): - pipe.connect([(mod1, mod3, [('output1', 'input1')]), - (mod2, mod3, [('output1', 'input2')])]) + pipe.connect( + [ + (mod1, mod3, [("output1", "input1")]), + (mod2, mod3, [("output1", "input2")]), + ] + ) else: raise Exception( - "connect pattern is not implemented yet within the test function") + "connect pattern is not implemented yet within the test function" + ) pipe._flatgraph = pipe._create_flat_graph() pipe._execgraph = pe.generate_expanded_graph(deepcopy(pipe._flatgraph)) assert len(pipe._execgraph.nodes()) == expected[0] assert len(pipe._execgraph.edges()) == expected[1] - edgenum = sorted([(len(pipe._execgraph.in_edges(node)) + - len(pipe._execgraph.out_edges(node))) - for node in pipe._execgraph.nodes()]) + edgenum = sorted( + [ + (len(pipe._execgraph.in_edges(node)) + len(pipe._execgraph.out_edges(node))) + for node in pipe._execgraph.nodes() + ] + ) assert edgenum[0] > 0 def test_expansion(): - pipe1 = pe.Workflow(name='pipe1') - mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') - mod2 = pe.Node(interface=EngineTestInterface(), name='mod2') - pipe1.connect([(mod1, mod2, [('output1', 'input2')])]) - pipe2 = pe.Workflow(name='pipe2') - mod3 = pe.Node(interface=EngineTestInterface(), name='mod3') - mod4 = pe.Node(interface=EngineTestInterface(), name='mod4') - pipe2.connect([(mod3, mod4, [('output1', 'input2')])]) + pipe1 = pe.Workflow(name="pipe1") + mod1 = pe.Node(interface=EngineTestInterface(), name="mod1") + mod2 = pe.Node(interface=EngineTestInterface(), name="mod2") + pipe1.connect([(mod1, mod2, [("output1", "input2")])]) + pipe2 = pe.Workflow(name="pipe2") + mod3 = pe.Node(interface=EngineTestInterface(), name="mod3") + mod4 = pe.Node(interface=EngineTestInterface(), name="mod4") + pipe2.connect([(mod3, mod4, [("output1", "input2")])]) pipe3 = pe.Workflow(name="pipe3") - pipe3.connect([(pipe1, pipe2, [('mod2.output1', 'mod4.input1')])]) + pipe3.connect([(pipe1, pipe2, [("mod2.output1", "mod4.input1")])]) pipe4 = pe.Workflow(name="pipe4") - mod5 = pe.Node(interface=EngineTestInterface(), name='mod5') + mod5 = pe.Node(interface=EngineTestInterface(), name="mod5") pipe4.add_nodes([mod5]) pipe5 = pe.Workflow(name="pipe5") pipe5.add_nodes([pipe4]) pipe6 = pe.Workflow(name="pipe6") - pipe6.connect([(pipe5, pipe3, [('pipe4.mod5.output1', - 'pipe2.mod3.input1')])]) + pipe6.connect([(pipe5, pipe3, [("pipe4.mod5.output1", "pipe2.mod3.input1")])]) pipe6._flatgraph = pipe6._create_flat_graph() def test_iterable_expansion(): - wf1 = pe.Workflow(name='test') - node1 = pe.Node(EngineTestInterface(), name='node1') - node2 = pe.Node(EngineTestInterface(), name='node2') - node1.iterables = ('input1', [1, 2]) - wf1.connect(node1, 'output1', node2, 'input2') - wf3 = pe.Workflow(name='group') + wf1 = pe.Workflow(name="test") + node1 = pe.Node(EngineTestInterface(), name="node1") + node2 = pe.Node(EngineTestInterface(), name="node2") + node1.iterables = ("input1", [1, 2]) + wf1.connect(node1, "output1", node2, "input2") + wf3 = pe.Workflow(name="group") for i in [0, 1, 2]: - wf3.add_nodes([wf1.clone(name='test%d' % i)]) + wf3.add_nodes([wf1.clone(name="test%d" % i)]) wf3._flatgraph = wf3._create_flat_graph() assert len(pe.generate_expanded_graph(wf3._flatgraph).nodes()) == 12 def test_synchronize_expansion(): - wf1 = pe.Workflow(name='test') - node1 = pe.Node(EngineTestInterface(), name='node1') - node1.iterables = [('input1', [1, 2]), ('input2', [3, 4, 5])] + wf1 = pe.Workflow(name="test") + node1 = pe.Node(EngineTestInterface(), name="node1") + node1.iterables = [("input1", [1, 2]), ("input2", [3, 4, 5])] node1.synchronize = True - node2 = pe.Node(EngineTestInterface(), name='node2') - wf1.connect(node1, 'output1', node2, 'input2') - wf3 = pe.Workflow(name='group') + node2 = pe.Node(EngineTestInterface(), name="node2") + wf1.connect(node1, "output1", node2, "input2") + wf3 = pe.Workflow(name="group") for i in [0, 1, 2]: - wf3.add_nodes([wf1.clone(name='test%d' % i)]) + wf3.add_nodes([wf1.clone(name="test%d" % i)]) wf3._flatgraph = wf3._create_flat_graph() # Each expanded graph clone has: # 3 node1 expansion nodes and @@ -168,19 +176,19 @@ def test_synchronize_expansion(): def test_synchronize_tuples_expansion(): - wf1 = pe.Workflow(name='test') + wf1 = pe.Workflow(name="test") - node1 = pe.Node(EngineTestInterface(), name='node1') - node2 = pe.Node(EngineTestInterface(), name='node2') - node1.iterables = [('input1', 'input2'), [(1, 3), (2, 4), (None, 5)]] + node1 = pe.Node(EngineTestInterface(), name="node1") + node2 = pe.Node(EngineTestInterface(), name="node2") + node1.iterables = [("input1", "input2"), [(1, 3), (2, 4), (None, 5)]] node1.synchronize = True - wf1.connect(node1, 'output1', node2, 'input2') + wf1.connect(node1, "output1", node2, "input2") - wf3 = pe.Workflow(name='group') + wf3 = pe.Workflow(name="group") for i in [0, 1, 2]: - wf3.add_nodes([wf1.clone(name='test%d' % i)]) + wf3.add_nodes([wf1.clone(name="test%d" % i)]) wf3._flatgraph = wf3._create_flat_graph() # Identical to test_synchronize_expansion @@ -189,25 +197,25 @@ def test_synchronize_tuples_expansion(): def test_itersource_expansion(): - wf1 = pe.Workflow(name='test') - node1 = pe.Node(EngineTestInterface(), name='node1') - node1.iterables = ('input1', [1, 2]) + wf1 = pe.Workflow(name="test") + node1 = pe.Node(EngineTestInterface(), name="node1") + node1.iterables = ("input1", [1, 2]) - node2 = pe.Node(EngineTestInterface(), name='node2') - wf1.connect(node1, 'output1', node2, 'input1') + node2 = pe.Node(EngineTestInterface(), name="node2") + wf1.connect(node1, "output1", node2, "input1") - node3 = pe.Node(EngineTestInterface(), name='node3') - node3.itersource = ('node1', 'input1') - node3.iterables = [('input1', {1: [3, 4], 2: [5, 6, 7]})] + node3 = pe.Node(EngineTestInterface(), name="node3") + node3.itersource = ("node1", "input1") + node3.iterables = [("input1", {1: [3, 4], 2: [5, 6, 7]})] - wf1.connect(node2, 'output1', node3, 'input1') - node4 = pe.Node(EngineTestInterface(), name='node4') + wf1.connect(node2, "output1", node3, "input1") + node4 = pe.Node(EngineTestInterface(), name="node4") - wf1.connect(node3, 'output1', node4, 'input1') + wf1.connect(node3, "output1", node4, "input1") - wf3 = pe.Workflow(name='group') + wf3 = pe.Workflow(name="group") for i in [0, 1, 2]: - wf3.add_nodes([wf1.clone(name='test%d' % i)]) + wf3.add_nodes([wf1.clone(name="test%d" % i)]) wf3._flatgraph = wf3._create_flat_graph() @@ -223,26 +231,24 @@ def test_itersource_expansion(): def test_itersource_synchronize1_expansion(): - wf1 = pe.Workflow(name='test') - node1 = pe.Node(EngineTestInterface(), name='node1') - node1.iterables = [('input1', [1, 2]), ('input2', [3, 4])] + wf1 = pe.Workflow(name="test") + node1 = pe.Node(EngineTestInterface(), name="node1") + node1.iterables = [("input1", [1, 2]), ("input2", [3, 4])] node1.synchronize = True - node2 = pe.Node(EngineTestInterface(), name='node2') - wf1.connect(node1, 'output1', node2, 'input1') - node3 = pe.Node(EngineTestInterface(), name='node3') - node3.itersource = ('node1', ['input1', 'input2']) - node3.iterables = [('input1', { - (1, 3): [5, 6] - }), ('input2', { - (1, 3): [7, 8], - (2, 4): [9] - })] - wf1.connect(node2, 'output1', node3, 'input1') - node4 = pe.Node(EngineTestInterface(), name='node4') - wf1.connect(node3, 'output1', node4, 'input1') - wf3 = pe.Workflow(name='group') + node2 = pe.Node(EngineTestInterface(), name="node2") + wf1.connect(node1, "output1", node2, "input1") + node3 = pe.Node(EngineTestInterface(), name="node3") + node3.itersource = ("node1", ["input1", "input2"]) + node3.iterables = [ + ("input1", {(1, 3): [5, 6]}), + ("input2", {(1, 3): [7, 8], (2, 4): [9]}), + ] + wf1.connect(node2, "output1", node3, "input1") + node4 = pe.Node(EngineTestInterface(), name="node4") + wf1.connect(node3, "output1", node4, "input1") + wf3 = pe.Workflow(name="group") for i in [0, 1, 2]: - wf3.add_nodes([wf1.clone(name='test%d' % i)]) + wf3.add_nodes([wf1.clone(name="test%d" % i)]) wf3._flatgraph = wf3._create_flat_graph() # each expanded graph clone has: @@ -257,26 +263,26 @@ def test_itersource_synchronize1_expansion(): def test_itersource_synchronize2_expansion(): - wf1 = pe.Workflow(name='test') + wf1 = pe.Workflow(name="test") - node1 = pe.Node(EngineTestInterface(), name='node1') - node1.iterables = [('input1', [1, 2]), ('input2', [3, 4])] + node1 = pe.Node(EngineTestInterface(), name="node1") + node1.iterables = [("input1", [1, 2]), ("input2", [3, 4])] node1.synchronize = True - node2 = pe.Node(EngineTestInterface(), name='node2') - wf1.connect(node1, 'output1', node2, 'input1') - node3 = pe.Node(EngineTestInterface(), name='node3') - node3.itersource = ('node1', ['input1', 'input2']) + node2 = pe.Node(EngineTestInterface(), name="node2") + wf1.connect(node1, "output1", node2, "input1") + node3 = pe.Node(EngineTestInterface(), name="node3") + node3.itersource = ("node1", ["input1", "input2"]) node3.synchronize = True - node3.iterables = [('input1', 'input2'), { - (1, 3): [(5, 7), (6, 8)], - (2, 4): [(None, 9)] - }] - wf1.connect(node2, 'output1', node3, 'input1') - node4 = pe.Node(EngineTestInterface(), name='node4') - wf1.connect(node3, 'output1', node4, 'input1') - wf3 = pe.Workflow(name='group') + node3.iterables = [ + ("input1", "input2"), + {(1, 3): [(5, 7), (6, 8)], (2, 4): [(None, 9)]}, + ] + wf1.connect(node2, "output1", node3, "input1") + node4 = pe.Node(EngineTestInterface(), name="node4") + wf1.connect(node3, "output1", node4, "input1") + wf3 = pe.Workflow(name="group") for i in [0, 1, 2]: - wf3.add_nodes([wf1.clone(name='test%d' % i)]) + wf3.add_nodes([wf1.clone(name="test%d" % i)]) wf3._flatgraph = wf3._create_flat_graph() # each expanded graph clone has: @@ -290,7 +296,6 @@ def test_itersource_synchronize2_expansion(): assert len(pe.generate_expanded_graph(wf3._flatgraph).nodes()) == 30 - def test_old_config(tmpdir): tmpdir.chdir() wd = os.getcwd() @@ -303,21 +308,21 @@ def func2(a): return a + 1 n1 = pe.Node( - Function(input_names=[], output_names=['a'], function=func1), - name='n1') + Function(input_names=[], output_names=["a"], function=func1), name="n1" + ) n2 = pe.Node( - Function(input_names=['a'], output_names=['b'], function=func2), - name='n2') - w1 = pe.Workflow(name='test') + Function(input_names=["a"], output_names=["b"], function=func2), name="n2" + ) + w1 = pe.Workflow(name="test") modify = lambda x: x + 1 n1.inputs.a = 1 - w1.connect(n1, ('a', modify), n2, 'a') + w1.connect(n1, ("a", modify), n2, "a") w1.base_dir = wd - w1.config['execution']['crashdump_dir'] = wd + w1.config["execution"]["crashdump_dir"] = wd # generate outputs - w1.run(plugin='Linear') + w1.run(plugin="Linear") def test_mapnode_json(tmpdir): @@ -331,13 +336,14 @@ def func1(in1): return in1 + 1 n1 = MapNode( - Function(input_names=['in1'], output_names=['out'], function=func1), - iterfield=['in1'], - name='n1') + Function(input_names=["in1"], output_names=["out"], function=func1), + iterfield=["in1"], + name="n1", + ) n1.inputs.in1 = [1] - w1 = Workflow(name='test') + w1 = Workflow(name="test") w1.base_dir = wd - w1.config['execution']['crashdump_dir'] = wd + w1.config["execution"]["crashdump_dir"] = wd w1.add_nodes([n1]) w1.run() n1.inputs.in1 = [2] @@ -347,13 +353,13 @@ def func1(in1): eg = w1.run() node = list(eg.nodes())[0] - outjson = glob(os.path.join(node.output_dir(), '_0x*.json')) + outjson = glob(os.path.join(node.output_dir(), "_0x*.json")) assert len(outjson) == 1 # check that multiple json's don't trigger rerun - with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp: - fp.write('dummy file') - w1.config['execution'].update(**{'stop_on_first_rerun': True}) + with open(os.path.join(node.output_dir(), "test.json"), "wt") as fp: + fp.write("dummy file") + w1.config["execution"].update(**{"stop_on_first_rerun": True}) w1.run() @@ -362,18 +368,18 @@ def test_parameterize_dirs_false(tmpdir): from ....interfaces.utility import IdentityInterface from ....testing import example_data - input_file = example_data('fsl_motion_outliers_fd.txt') + input_file = example_data("fsl_motion_outliers_fd.txt") - n1 = pe.Node(EngineTestInterface(), name='Node1') - n1.iterables = ('input_file', (input_file, input_file)) + n1 = pe.Node(EngineTestInterface(), name="Node1") + n1.iterables = ("input_file", (input_file, input_file)) n1.interface.inputs.input1 = 1 - n2 = pe.Node(IdentityInterface(fields='in1'), name='Node2') + n2 = pe.Node(IdentityInterface(fields="in1"), name="Node2") - wf = pe.Workflow(name='Test') + wf = pe.Workflow(name="Test") wf.base_dir = tmpdir.strpath - wf.config['execution']['parameterize_dirs'] = False - wf.connect([(n1, n2, [('output1', 'in1')])]) + wf.config["execution"]["parameterize_dirs"] = False + wf.connect([(n1, n2, [("output1", "in1")])]) wf.run() @@ -387,62 +393,61 @@ def func1(in1): return in1 n1 = MapNode( - Function(input_names=['in1'], output_names=['out'], function=func1), - iterfield=['in1'], - name='n1') + Function(input_names=["in1"], output_names=["out"], function=func1), + iterfield=["in1"], + name="n1", + ) n1.inputs.in1 = [1, 2, 3] - w1 = Workflow(name='test') + w1 = Workflow(name="test") w1.base_dir = wd w1.add_nodes([n1]) # set local check - w1.config['execution'] = { - 'stop_on_first_crash': 'true', - 'local_hash_check': 'true', - 'crashdump_dir': wd, - 'poll_sleep_duration': 2 + w1.config["execution"] = { + "stop_on_first_crash": "true", + "local_hash_check": "true", + "crashdump_dir": wd, + "poll_sleep_duration": 2, } # test output of num_subnodes method when serial is default (False) assert n1.num_subnodes() == len(n1.inputs.in1) # test running the workflow on default conditions - w1.run(plugin='MultiProc') + w1.run(plugin="MultiProc") # test output of num_subnodes method when serial is True n1._serial = True assert n1.num_subnodes() == 1 # test running the workflow on serial conditions - w1.run(plugin='MultiProc') + w1.run(plugin="MultiProc") def test_write_graph_runs(tmpdir): tmpdir.chdir() - for graph in ('orig', 'flat', 'exec', 'hierarchical', 'colored'): + for graph in ("orig", "flat", "exec", "hierarchical", "colored"): for simple in (True, False): - pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') - mod2 = pe.Node(interface=EngineTestInterface(), name='mod2') - pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe = pe.Workflow(name="pipe") + mod1 = pe.Node(interface=EngineTestInterface(), name="mod1") + mod2 = pe.Node(interface=EngineTestInterface(), name="mod2") + pipe.connect([(mod1, mod2, [("output1", "input1")])]) try: - pipe.write_graph( - graph2use=graph, simple_form=simple, format='dot') + pipe.write_graph(graph2use=graph, simple_form=simple, format="dot") except Exception: - assert False, \ - 'Failed to plot {} {} graph'.format( - 'simple' if simple else 'detailed', graph) + assert False, "Failed to plot {} {} graph".format( + "simple" if simple else "detailed", graph + ) - assert os.path.exists('graph.dot') or os.path.exists( - 'graph_detailed.dot') + assert os.path.exists("graph.dot") or os.path.exists("graph_detailed.dot") try: - os.remove('graph.dot') + os.remove("graph.dot") except OSError: pass try: - os.remove('graph_detailed.dot') + os.remove("graph_detailed.dot") except OSError: pass @@ -450,88 +455,98 @@ def test_write_graph_runs(tmpdir): def test_deep_nested_write_graph_runs(tmpdir): tmpdir.chdir() - for graph in ('orig', 'flat', 'exec', 'hierarchical', 'colored'): + for graph in ("orig", "flat", "exec", "hierarchical", "colored"): for simple in (True, False): - pipe = pe.Workflow(name='pipe') + pipe = pe.Workflow(name="pipe") parent = pipe for depth in range(10): - sub = pe.Workflow(name='pipe_nest_{}'.format(depth)) + sub = pe.Workflow(name="pipe_nest_{}".format(depth)) parent.add_nodes([sub]) parent = sub - mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') + mod1 = pe.Node(interface=EngineTestInterface(), name="mod1") parent.add_nodes([mod1]) try: - pipe.write_graph( - graph2use=graph, simple_form=simple, format='dot') + pipe.write_graph(graph2use=graph, simple_form=simple, format="dot") except Exception as e: - assert False, \ - 'Failed to plot {} {} deep graph: {!s}'.format( - 'simple' if simple else 'detailed', graph, e) + assert False, "Failed to plot {} {} deep graph: {!s}".format( + "simple" if simple else "detailed", graph, e + ) - assert os.path.exists('graph.dot') or os.path.exists( - 'graph_detailed.dot') + assert os.path.exists("graph.dot") or os.path.exists("graph_detailed.dot") try: - os.remove('graph.dot') + os.remove("graph.dot") except OSError: pass try: - os.remove('graph_detailed.dot') + os.remove("graph_detailed.dot") except OSError: pass + import networkx + # Format of the graph has slightly changed -graph_str = '""' if int(networkx.__version__.split('.')[0]) == 1 else '' +graph_str = '""' if int(networkx.__version__.split(".")[0]) == 1 else "" # examples of dot files used in the following test -dotfile_orig = ['strict digraph ' + graph_str + ' {\n', - '"mod1 (engine)";\n', - '"mod2 (engine)";\n', - '"mod1 (engine)" -> "mod2 (engine)";\n', - '}\n'] - -dotfile_detailed_orig = ['digraph structs {\n', - 'node [shape=record];\n', - 'pipemod1 [label="{IN}|{ mod1 | engine | }|{OUT| output1}"];\n', - 'pipemod2 [label="{IN| input1}|{ mod2 | engine | }|{OUT}"];\n', - 'pipemod1:outoutput1:e -> pipemod2:ininput1:w;\n', - '}'] - - -dotfile_hierarchical = ['digraph pipe{\n', - ' label="pipe";\n', - ' pipe_mod1[label="mod1 (engine)"];\n', - ' pipe_mod2[label="mod2 (engine)"];\n', - ' pipe_mod1 -> pipe_mod2;\n', - '}'] - -dotfile_colored = ['digraph pipe{\n', - ' label="pipe";\n', - ' pipe_mod1[label="mod1 (engine)", style=filled, fillcolor="#FFFFC8"];\n', - ' pipe_mod2[label="mod2 (engine)", style=filled, fillcolor="#FFFFC8"];\n', - ' pipe_mod1 -> pipe_mod2;\n', - '}'] +dotfile_orig = [ + "strict digraph " + graph_str + " {\n", + '"mod1 (engine)";\n', + '"mod2 (engine)";\n', + '"mod1 (engine)" -> "mod2 (engine)";\n', + "}\n", +] + +dotfile_detailed_orig = [ + "digraph structs {\n", + "node [shape=record];\n", + 'pipemod1 [label="{IN}|{ mod1 | engine | }|{OUT| output1}"];\n', + 'pipemod2 [label="{IN| input1}|{ mod2 | engine | }|{OUT}"];\n', + "pipemod1:outoutput1:e -> pipemod2:ininput1:w;\n", + "}", +] + + +dotfile_hierarchical = [ + "digraph pipe{\n", + ' label="pipe";\n', + ' pipe_mod1[label="mod1 (engine)"];\n', + ' pipe_mod2[label="mod2 (engine)"];\n', + " pipe_mod1 -> pipe_mod2;\n", + "}", +] + +dotfile_colored = [ + "digraph pipe{\n", + ' label="pipe";\n', + ' pipe_mod1[label="mod1 (engine)", style=filled, fillcolor="#FFFFC8"];\n', + ' pipe_mod2[label="mod2 (engine)", style=filled, fillcolor="#FFFFC8"];\n', + " pipe_mod1 -> pipe_mod2;\n", + "}", +] dotfiles = { "orig": dotfile_orig, "flat": dotfile_orig, "exec": dotfile_orig, "hierarchical": dotfile_hierarchical, - "colored": dotfile_colored - } + "colored": dotfile_colored, +} + @pytest.mark.parametrize("simple", [True, False]) -@pytest.mark.parametrize("graph_type", ['orig', 'flat', 'exec', 'hierarchical', 'colored']) +@pytest.mark.parametrize( + "graph_type", ["orig", "flat", "exec", "hierarchical", "colored"] +) def test_write_graph_dotfile(tmpdir, graph_type, simple): """ checking dot files for a workflow without iterables""" tmpdir.chdir() - pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') - mod2 = pe.Node(interface=EngineTestInterface(), name='mod2') - pipe.connect([(mod1, mod2, [('output1', 'input1')])]) - pipe.write_graph( - graph2use=graph_type, simple_form=simple, format='dot') + pipe = pe.Workflow(name="pipe") + mod1 = pe.Node(interface=EngineTestInterface(), name="mod1") + mod2 = pe.Node(interface=EngineTestInterface(), name="mod2") + pipe.connect([(mod1, mod2, [("output1", "input1")])]) + pipe.write_graph(graph2use=graph_type, simple_form=simple, format="dot") with open("graph.dot") as f: graph_str = f.read() @@ -543,12 +558,19 @@ def test_write_graph_dotfile(tmpdir, graph_type, simple): # if simple=False graph.dot uses longer names for line in dotfiles[graph_type]: if graph_type in ["hierarchical", "colored"]: - assert line.replace("mod1 (engine)", "mod1.EngineTestInterface.engine").replace( - "mod2 (engine)", "mod2.EngineTestInterface.engine") in graph_str + assert ( + line.replace( + "mod1 (engine)", "mod1.EngineTestInterface.engine" + ).replace("mod2 (engine)", "mod2.EngineTestInterface.engine") + in graph_str + ) else: - assert line.replace( - "mod1 (engine)", "pipe.mod1.EngineTestInterface.engine").replace( - "mod2 (engine)", "pipe.mod2.EngineTestInterface.engine") in graph_str + assert ( + line.replace( + "mod1 (engine)", "pipe.mod1.EngineTestInterface.engine" + ).replace("mod2 (engine)", "pipe.mod2.EngineTestInterface.engine") + in graph_str + ) # graph_detailed is the same for orig, flat, exec (if no iterables) # graph_detailed is not created for hierachical or colored @@ -561,59 +583,64 @@ def test_write_graph_dotfile(tmpdir, graph_type, simple): # examples of dot files used in the following test dotfile_detailed_iter_exec = [ - 'digraph structs {\n', - 'node [shape=record];\n', + "digraph structs {\n", + "node [shape=record];\n", 'pipemod1aIa1 [label="{IN}|{ a1 | engine | mod1.aI }|{OUT| output1}"];\n', 'pipemod2a1 [label="{IN| input1}|{ a1 | engine | mod2 }|{OUT}"];\n', 'pipemod1aIa0 [label="{IN}|{ a0 | engine | mod1.aI }|{OUT| output1}"];\n', 'pipemod2a0 [label="{IN| input1}|{ a0 | engine | mod2 }|{OUT}"];\n', - 'pipemod1aIa0:outoutput1:e -> pipemod2a0:ininput1:w;\n', - 'pipemod1aIa1:outoutput1:e -> pipemod2a1:ininput1:w;\n', - '}'] + "pipemod1aIa0:outoutput1:e -> pipemod2a0:ininput1:w;\n", + "pipemod1aIa1:outoutput1:e -> pipemod2a1:ininput1:w;\n", + "}", +] dotfile_iter_hierarchical = [ - 'digraph pipe{\n', + "digraph pipe{\n", ' label="pipe";\n', ' pipe_mod1[label="mod1 (engine)", shape=box3d,style=filled, color=black, colorscheme=greys7 fillcolor=2];\n', ' pipe_mod2[label="mod2 (engine)"];\n', - ' pipe_mod1 -> pipe_mod2;\n', - '}'] + " pipe_mod1 -> pipe_mod2;\n", + "}", +] dotfile_iter_colored = [ - 'digraph pipe{\n', + "digraph pipe{\n", ' label="pipe";\n', ' pipe_mod1[label="mod1 (engine)", shape=box3d,style=filled, color=black, colorscheme=greys7 fillcolor=2];\n', ' pipe_mod2[label="mod2 (engine)", style=filled, fillcolor="#FFFFC8"];\n', - ' pipe_mod1 -> pipe_mod2;\n', - '}'] + " pipe_mod1 -> pipe_mod2;\n", + "}", +] dotfiles_iter = { "orig": dotfile_orig, "flat": dotfile_orig, "exec": dotfile_orig, "hierarchical": dotfile_iter_hierarchical, - "colored": dotfile_iter_colored - } + "colored": dotfile_iter_colored, +} dotfiles_detailed_iter = { "orig": dotfile_detailed_orig, "flat": dotfile_detailed_orig, - "exec": dotfile_detailed_iter_exec - } + "exec": dotfile_detailed_iter_exec, +} + @pytest.mark.parametrize("simple", [True, False]) -@pytest.mark.parametrize("graph_type", ['orig', 'flat', 'exec', 'hierarchical', 'colored']) +@pytest.mark.parametrize( + "graph_type", ["orig", "flat", "exec", "hierarchical", "colored"] +) def test_write_graph_dotfile_iterables(tmpdir, graph_type, simple): """ checking dot files for a workflow with iterables""" tmpdir.chdir() - pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') - mod1.iterables = ('input1', [1, 2]) - mod2 = pe.Node(interface=EngineTestInterface(), name='mod2') - pipe.connect([(mod1, mod2, [('output1', 'input1')])]) - pipe.write_graph( - graph2use=graph_type, simple_form=simple, format='dot') + pipe = pe.Workflow(name="pipe") + mod1 = pe.Node(interface=EngineTestInterface(), name="mod1") + mod1.iterables = ("input1", [1, 2]) + mod2 = pe.Node(interface=EngineTestInterface(), name="mod2") + pipe.connect([(mod1, mod2, [("output1", "input1")])]) + pipe.write_graph(graph2use=graph_type, simple_form=simple, format="dot") with open("graph.dot") as f: graph_str = f.read() @@ -625,12 +652,19 @@ def test_write_graph_dotfile_iterables(tmpdir, graph_type, simple): # if simple=False graph.dot uses longer names for line in dotfiles_iter[graph_type]: if graph_type in ["hierarchical", "colored"]: - assert line.replace("mod1 (engine)", "mod1.EngineTestInterface.engine").replace( - "mod2 (engine)", "mod2.EngineTestInterface.engine") in graph_str + assert ( + line.replace( + "mod1 (engine)", "mod1.EngineTestInterface.engine" + ).replace("mod2 (engine)", "mod2.EngineTestInterface.engine") + in graph_str + ) else: - assert line.replace( - "mod1 (engine)", "pipe.mod1.EngineTestInterface.engine").replace( - "mod2 (engine)", "pipe.mod2.EngineTestInterface.engine") in graph_str + assert ( + line.replace( + "mod1 (engine)", "pipe.mod1.EngineTestInterface.engine" + ).replace("mod2 (engine)", "pipe.mod2.EngineTestInterface.engine") + in graph_str + ) # graph_detailed is not created for hierachical or colored if graph_type not in ["hierarchical", "colored"]: @@ -640,7 +674,6 @@ def test_write_graph_dotfile_iterables(tmpdir, graph_type, simple): assert line in graph_str - def test_io_subclass(): """Ensure any io subclass allows dynamic traits""" from nipype.interfaces.io import IOBase @@ -652,25 +685,26 @@ class TestKV(IOBase): def _list_outputs(self): outputs = {} - outputs['test'] = 1 - outputs['foo'] = 'bar' + outputs["test"] = 1 + outputs["foo"] = "bar" return outputs - wf = pe.Workflow('testkv') + wf = pe.Workflow("testkv") def testx2(test): return test * 2 - kvnode = pe.Node(TestKV(), name='testkv') + kvnode = pe.Node(TestKV(), name="testkv") from nipype.interfaces.utility import Function + func = pe.Node( - Function( - input_names=['test'], output_names=['test2'], function=testx2), - name='func') + Function(input_names=["test"], output_names=["test2"], function=testx2), + name="func", + ) exception_not_raised = True try: - wf.connect(kvnode, 'test', func, 'test') + wf.connect(kvnode, "test", func, "test") except Exception as e: - if 'Module testkv has no output called test' in e: + if "Module testkv has no output called test" in e: exception_not_raised = False assert exception_not_raised diff --git a/nipype/pipeline/engine/tests/test_join.py b/nipype/pipeline/engine/tests/test_join.py index f59d9d4bc1..b14d79a366 100644 --- a/nipype/pipeline/engine/tests/test_join.py +++ b/nipype/pipeline/engine/tests/test_join.py @@ -13,8 +13,7 @@ class PickFirstSpec(nib.TraitedSpec): - in_files = traits.List( - File(exists=True), argstr="%s", position=2, mandatory=True) + in_files = traits.List(File(exists=True), argstr="%s", position=2, mandatory=True) class PickFirstOutSpec(nib.TraitedSpec): @@ -31,17 +30,17 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['output1'] = self.inputs.in_files[0] + outputs["output1"] = self.inputs.in_files[0] return outputs class IncrementInputSpec(nib.TraitedSpec): - input1 = nib.traits.Int(mandatory=True, desc='input') - inc = nib.traits.Int(usedefault=True, default_value=1, desc='increment') + input1 = nib.traits.Int(mandatory=True, desc="input") + inc = nib.traits.Int(usedefault=True, default_value=1, desc="increment") class IncrementOutputSpec(nib.TraitedSpec): - output1 = nib.traits.Int(desc='ouput') + output1 = nib.traits.Int(desc="ouput") class IncrementInterface(nib.SimpleInterface): @@ -50,7 +49,7 @@ class IncrementInterface(nib.SimpleInterface): def _run_interface(self, runtime): runtime.returncode = 0 - self._results['output1'] = self.inputs.input1 + self.inputs.inc + self._results["output1"] = self.inputs.input1 + self.inputs.inc return runtime @@ -60,12 +59,12 @@ def _run_interface(self, runtime): class SumInputSpec(nib.TraitedSpec): - input1 = nib.traits.List(nib.traits.Int, mandatory=True, desc='input') + input1 = nib.traits.List(nib.traits.Int, mandatory=True, desc="input") class SumOutputSpec(nib.TraitedSpec): - output1 = nib.traits.Int(desc='ouput') - operands = nib.traits.List(nib.traits.Int, desc='operands') + output1 = nib.traits.Int(desc="ouput") + operands = nib.traits.List(nib.traits.Int, desc="operands") class SumInterface(nib.SimpleInterface): @@ -76,8 +75,8 @@ def _run_interface(self, runtime): global _sum global _sum_operands runtime.returncode = 0 - self._results['operands'] = self.inputs.input1 - self._results['output1'] = sum(self.inputs.input1) + self._results["operands"] = self.inputs.input1 + self._results["output1"] = sum(self.inputs.input1) _sum_operands.append(self.inputs.input1) _sums.append(sum(self.inputs.input1)) return runtime @@ -88,11 +87,11 @@ def _run_interface(self, runtime): class SetInputSpec(nib.TraitedSpec): - input1 = nib.traits.Set(nib.traits.Int, mandatory=True, desc='input') + input1 = nib.traits.Set(nib.traits.Int, mandatory=True, desc="input") class SetOutputSpec(nib.TraitedSpec): - output1 = nib.traits.Int(desc='ouput') + output1 = nib.traits.Int(desc="ouput") class SetInterface(nib.BaseInterface): @@ -106,7 +105,7 @@ def _run_interface(self, runtime): def _list_outputs(self): global _set_len outputs = self._outputs().get() - _set_len = outputs['output1'] = len(self.inputs.input1) + _set_len = outputs["output1"] = len(self.inputs.input1) return outputs @@ -115,12 +114,12 @@ def _list_outputs(self): class ProductInputSpec(nib.TraitedSpec): - input1 = nib.traits.Int(mandatory=True, desc='input1') - input2 = nib.traits.Int(mandatory=True, desc='input2') + input1 = nib.traits.Int(mandatory=True, desc="input1") + input2 = nib.traits.Int(mandatory=True, desc="input2") class ProductOutputSpec(nib.TraitedSpec): - output1 = nib.traits.Int(mandatory=True, desc='output') + output1 = nib.traits.Int(mandatory=True, desc="output") class ProductInterface(nib.BaseInterface): @@ -134,12 +133,12 @@ def _run_interface(self, runtime): def _list_outputs(self): global _products outputs = self._outputs().get() - outputs['output1'] = self.inputs.input1 * self.inputs.input2 - _products.append(outputs['output1']) + outputs["output1"] = self.inputs.input1 * self.inputs.input2 + _products.append(outputs["output1"]) return outputs -@pytest.mark.parametrize('needed_outputs', ['true', 'false']) +@pytest.mark.parametrize("needed_outputs", ["true", "false"]) def test_join_expansion(tmpdir, needed_outputs): global _sums global _sum_operands @@ -151,40 +150,40 @@ def test_join_expansion(tmpdir, needed_outputs): _sum_operands = [] _sums = [] - prev_state = config.get('execution', 'remove_unnecessary_outputs') - config.set('execution', 'remove_unnecessary_outputs', needed_outputs) + prev_state = config.get("execution", "remove_unnecessary_outputs") + config.set("execution", "remove_unnecessary_outputs", needed_outputs) # Make the workflow. - wf = pe.Workflow(name='test') + wf = pe.Workflow(name="test") # the iterated input node - inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') - inputspec.iterables = [('n', [1, 2])] + inputspec = pe.Node(IdentityInterface(fields=["n"]), name="inputspec") + inputspec.iterables = [("n", [1, 2])] # a pre-join node in the iterated path - pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') + pre_join1 = pe.Node(IncrementInterface(), name="pre_join1") # another pre-join node in the iterated path - pre_join2 = pe.Node(IncrementInterface(), name='pre_join2') + pre_join2 = pe.Node(IncrementInterface(), name="pre_join2") # the join node join = pe.JoinNode( - SumInterface(), - joinsource='inputspec', - joinfield='input1', - name='join') + SumInterface(), joinsource="inputspec", joinfield="input1", name="join" + ) # an uniterated post-join node - post_join1 = pe.Node(IncrementInterface(), name='post_join1') + post_join1 = pe.Node(IncrementInterface(), name="post_join1") # a post-join node in the iterated path - post_join2 = pe.Node(ProductInterface(), name='post_join2') - - wf.connect([ - (inputspec, pre_join1, [('n', 'input1')]), - (pre_join1, pre_join2, [('output1', 'input1')]), - (pre_join1, post_join2, [('output1', 'input2')]), - (pre_join2, join, [('output1', 'input1')]), - (join, post_join1, [('output1', 'input1')]), - (join, post_join2, [('output1', 'input1')]), - ]) + post_join2 = pe.Node(ProductInterface(), name="post_join2") + + wf.connect( + [ + (inputspec, pre_join1, [("n", "input1")]), + (pre_join1, pre_join2, [("output1", "input1")]), + (pre_join1, post_join2, [("output1", "input2")]), + (pre_join2, join, [("output1", "input1")]), + (join, post_join1, [("output1", "input1")]), + (join, post_join2, [("output1", "input1")]), + ] + ) result = wf.run() # the two expanded pre-join predecessor nodes feed into one join node - joins = [node for node in result.nodes() if node.name == 'join'] + joins = [node for node in result.nodes() if node.name == "join"] assert len(joins) == 1, "The number of join result nodes is incorrect." # the expanded graph contains 2 * 2 = 4 iteration pre-join nodes, 1 join # node, 1 non-iterated post-join node and 2 * 1 iteration post-join nodes. @@ -193,15 +192,14 @@ def test_join_expansion(tmpdir, needed_outputs): # the join Sum result is (1 + 1 + 1) + (2 + 1 + 1) assert len(_sums) == 1, "The number of join outputs is incorrect" - assert _sums[ - 0] == 7, "The join Sum output value is incorrect: %s." % _sums[0] + assert _sums[0] == 7, "The join Sum output value is incorrect: %s." % _sums[0] # the join input preserves the iterables input order - assert _sum_operands[0] == [3, 4], \ + assert _sum_operands[0] == [3, 4], ( "The join Sum input is incorrect: %s." % _sum_operands[0] + ) # there are two iterations of the post-join node in the iterable path - assert len(_products) == 2,\ - "The number of iterated post-join outputs is incorrect" - config.set('execution', 'remove_unnecessary_outputs', prev_state) + assert len(_products) == 2, "The number of iterated post-join outputs is incorrect" + config.set("execution", "remove_unnecessary_outputs", prev_state) def test_node_joinsource(tmpdir): @@ -209,15 +207,17 @@ def test_node_joinsource(tmpdir): tmpdir.chdir() # the iterated input node - inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') - inputspec.iterables = [('n', [1, 2])] + inputspec = pe.Node(IdentityInterface(fields=["n"]), name="inputspec") + inputspec.iterables = [("n", [1, 2])] # the join node join = pe.JoinNode( - SetInterface(), joinsource=inputspec, joinfield='input1', name='join') + SetInterface(), joinsource=inputspec, joinfield="input1", name="join" + ) # the joinsource is the inputspec name - assert join.joinsource == inputspec.name, \ - "The joinsource is not set to the node name." + assert ( + join.joinsource == inputspec.name + ), "The joinsource is not set to the node name." def test_set_join_node(tmpdir): @@ -225,26 +225,23 @@ def test_set_join_node(tmpdir): tmpdir.chdir() # Make the workflow. - wf = pe.Workflow(name='test') + wf = pe.Workflow(name="test") # the iterated input node - inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') - inputspec.iterables = [('n', [1, 2, 1, 3, 2])] + inputspec = pe.Node(IdentityInterface(fields=["n"]), name="inputspec") + inputspec.iterables = [("n", [1, 2, 1, 3, 2])] # a pre-join node in the iterated path - pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') - wf.connect(inputspec, 'n', pre_join1, 'input1') + pre_join1 = pe.Node(IncrementInterface(), name="pre_join1") + wf.connect(inputspec, "n", pre_join1, "input1") # the set join node join = pe.JoinNode( - SetInterface(), - joinsource='inputspec', - joinfield='input1', - name='join') - wf.connect(pre_join1, 'output1', join, 'input1') + SetInterface(), joinsource="inputspec", joinfield="input1", name="join" + ) + wf.connect(pre_join1, "output1", join, "input1") wf.run() # the join length is the number of unique inputs - assert _set_len == 3, \ - "The join Set output value is incorrect: %s." % _set_len + assert _set_len == 3, "The join Set output value is incorrect: %s." % _set_len def test_unique_join_node(tmpdir): @@ -254,26 +251,28 @@ def test_unique_join_node(tmpdir): tmpdir.chdir() # Make the workflow. - wf = pe.Workflow(name='test') + wf = pe.Workflow(name="test") # the iterated input node - inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') - inputspec.iterables = [('n', [3, 1, 2, 1, 3])] + inputspec = pe.Node(IdentityInterface(fields=["n"]), name="inputspec") + inputspec.iterables = [("n", [3, 1, 2, 1, 3])] # a pre-join node in the iterated path - pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') - wf.connect(inputspec, 'n', pre_join1, 'input1') + pre_join1 = pe.Node(IncrementInterface(), name="pre_join1") + wf.connect(inputspec, "n", pre_join1, "input1") # the set join node join = pe.JoinNode( SumInterface(), - joinsource='inputspec', - joinfield='input1', + joinsource="inputspec", + joinfield="input1", unique=True, - name='join') - wf.connect(pre_join1, 'output1', join, 'input1') + name="join", + ) + wf.connect(pre_join1, "output1", join, "input1") wf.run() - assert _sum_operands[0] == [4, 2, 3], \ + assert _sum_operands[0] == [4, 2, 3], ( "The unique join output value is incorrect: %s." % _sum_operands[0] + ) def test_multiple_join_nodes(tmpdir): @@ -283,47 +282,48 @@ def test_multiple_join_nodes(tmpdir): tmpdir.chdir() # Make the workflow. - wf = pe.Workflow(name='test') + wf = pe.Workflow(name="test") # the iterated input node - inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') - inputspec.iterables = [('n', [1, 2, 3])] + inputspec = pe.Node(IdentityInterface(fields=["n"]), name="inputspec") + inputspec.iterables = [("n", [1, 2, 3])] # a pre-join node in the iterated path - pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') - wf.connect(inputspec, 'n', pre_join1, 'input1') + pre_join1 = pe.Node(IncrementInterface(), name="pre_join1") + wf.connect(inputspec, "n", pre_join1, "input1") # the first join node join1 = pe.JoinNode( - IdentityInterface(fields=['vector']), - joinsource='inputspec', - joinfield='vector', - name='join1') - wf.connect(pre_join1, 'output1', join1, 'vector') + IdentityInterface(fields=["vector"]), + joinsource="inputspec", + joinfield="vector", + name="join1", + ) + wf.connect(pre_join1, "output1", join1, "vector") # an uniterated post-join node - post_join1 = pe.Node(SumInterface(), name='post_join1') - wf.connect(join1, 'vector', post_join1, 'input1') + post_join1 = pe.Node(SumInterface(), name="post_join1") + wf.connect(join1, "vector", post_join1, "input1") # the downstream join node connected to both an upstream join # path output and a separate input in the iterated path join2 = pe.JoinNode( - IdentityInterface(fields=['vector', 'scalar']), - joinsource='inputspec', - joinfield='vector', - name='join2') - wf.connect(pre_join1, 'output1', join2, 'vector') - wf.connect(post_join1, 'output1', join2, 'scalar') + IdentityInterface(fields=["vector", "scalar"]), + joinsource="inputspec", + joinfield="vector", + name="join2", + ) + wf.connect(pre_join1, "output1", join2, "vector") + wf.connect(post_join1, "output1", join2, "scalar") # a second post-join node - post_join2 = pe.Node(SumInterface(), name='post_join2') - wf.connect(join2, 'vector', post_join2, 'input1') + post_join2 = pe.Node(SumInterface(), name="post_join2") + wf.connect(join2, "vector", post_join2, "input1") # a third post-join node - post_join3 = pe.Node(ProductInterface(), name='post_join3') - wf.connect(post_join2, 'output1', post_join3, 'input1') - wf.connect(join2, 'scalar', post_join3, 'input2') + post_join3 = pe.Node(ProductInterface(), name="post_join3") + wf.connect(post_join2, "output1", post_join3, "input1") + wf.connect(join2, "scalar", post_join3, "input2") result = wf.run() # The expanded graph contains one pre_join1 replicate per inputspec # replicate and one of each remaining node = 3 + 5 = 8 nodes. # The replicated inputspec nodes are factored out of the expansion. - assert len(result.nodes()) == 8, \ - "The number of expanded nodes is incorrect." + assert len(result.nodes()) == 8, "The number of expanded nodes is incorrect." # The outputs are: # pre_join1: [2, 3, 4] # post_join1: 9 @@ -340,33 +340,34 @@ def test_identity_join_node(tmpdir): tmpdir.chdir() # Make the workflow. - wf = pe.Workflow(name='test') + wf = pe.Workflow(name="test") # the iterated input node - inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') - inputspec.iterables = [('n', [1, 2, 3])] + inputspec = pe.Node(IdentityInterface(fields=["n"]), name="inputspec") + inputspec.iterables = [("n", [1, 2, 3])] # a pre-join node in the iterated path - pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') - wf.connect(inputspec, 'n', pre_join1, 'input1') + pre_join1 = pe.Node(IncrementInterface(), name="pre_join1") + wf.connect(inputspec, "n", pre_join1, "input1") # the IdentityInterface join node join = pe.JoinNode( - IdentityInterface(fields=['vector']), - joinsource='inputspec', - joinfield='vector', - name='join') - wf.connect(pre_join1, 'output1', join, 'vector') + IdentityInterface(fields=["vector"]), + joinsource="inputspec", + joinfield="vector", + name="join", + ) + wf.connect(pre_join1, "output1", join, "vector") # an uniterated post-join node - post_join1 = pe.Node(SumInterface(), name='post_join1') - wf.connect(join, 'vector', post_join1, 'input1') + post_join1 = pe.Node(SumInterface(), name="post_join1") + wf.connect(join, "vector", post_join1, "input1") result = wf.run() # the expanded graph contains 1 * 3 iteration pre-join nodes, 1 join # node and 1 post-join node. Nipype factors away the iterable input # IdentityInterface but keeps the join IdentityInterface. - assert len(result.nodes()) == 5, \ - "The number of expanded nodes is incorrect." - assert _sum_operands[0] == [2, 3, 4], \ + assert len(result.nodes()) == 5, "The number of expanded nodes is incorrect." + assert _sum_operands[0] == [2, 3, 4], ( "The join Sum input is incorrect: %s." % _sum_operands[0] + ) def test_multifield_join_node(tmpdir): @@ -376,38 +377,38 @@ def test_multifield_join_node(tmpdir): tmpdir.chdir() # Make the workflow. - wf = pe.Workflow(name='test') + wf = pe.Workflow(name="test") # the iterated input node - inputspec = pe.Node(IdentityInterface(fields=['m', 'n']), name='inputspec') - inputspec.iterables = [('m', [1, 2]), ('n', [3, 4])] + inputspec = pe.Node(IdentityInterface(fields=["m", "n"]), name="inputspec") + inputspec.iterables = [("m", [1, 2]), ("n", [3, 4])] # two pre-join nodes in a parallel iterated path - inc1 = pe.Node(IncrementInterface(), name='inc1') - wf.connect(inputspec, 'm', inc1, 'input1') - inc2 = pe.Node(IncrementInterface(), name='inc2') - wf.connect(inputspec, 'n', inc2, 'input1') + inc1 = pe.Node(IncrementInterface(), name="inc1") + wf.connect(inputspec, "m", inc1, "input1") + inc2 = pe.Node(IncrementInterface(), name="inc2") + wf.connect(inputspec, "n", inc2, "input1") # the join node join = pe.JoinNode( - IdentityInterface(fields=['vector1', 'vector2']), - joinsource='inputspec', - name='join') - wf.connect(inc1, 'output1', join, 'vector1') - wf.connect(inc2, 'output1', join, 'vector2') + IdentityInterface(fields=["vector1", "vector2"]), + joinsource="inputspec", + name="join", + ) + wf.connect(inc1, "output1", join, "vector1") + wf.connect(inc2, "output1", join, "vector2") # a post-join node - prod = pe.MapNode( - ProductInterface(), name='prod', iterfield=['input1', 'input2']) - wf.connect(join, 'vector1', prod, 'input1') - wf.connect(join, 'vector2', prod, 'input2') + prod = pe.MapNode(ProductInterface(), name="prod", iterfield=["input1", "input2"]) + wf.connect(join, "vector1", prod, "input1") + wf.connect(join, "vector2", prod, "input2") result = wf.run() # the iterables are expanded as the cartesian product of the iterables values. # thus, the expanded graph contains 2 * (2 * 2) iteration pre-join nodes, 1 join # node and 1 post-join node. - assert len(result.nodes()) == 10, \ - "The number of expanded nodes is incorrect." + assert len(result.nodes()) == 10, "The number of expanded nodes is incorrect." # the product inputs are [2, 4], [2, 5], [3, 4], [3, 5] - assert set(_products) == set([8, 10, 12, 15]), \ + assert set(_products) == set([8, 10, 12, 15]), ( "The post-join products is incorrect: %s." % _products + ) def test_synchronize_join_node(tmpdir): @@ -417,39 +418,37 @@ def test_synchronize_join_node(tmpdir): tmpdir.chdir() # Make the workflow. - wf = pe.Workflow(name='test') + wf = pe.Workflow(name="test") # the iterated input node - inputspec = pe.Node(IdentityInterface(fields=['m', 'n']), name='inputspec') - inputspec.iterables = [('m', [1, 2]), ('n', [3, 4])] + inputspec = pe.Node(IdentityInterface(fields=["m", "n"]), name="inputspec") + inputspec.iterables = [("m", [1, 2]), ("n", [3, 4])] inputspec.synchronize = True # two pre-join nodes in a parallel iterated path - inc1 = pe.Node(IncrementInterface(), name='inc1') - wf.connect(inputspec, 'm', inc1, 'input1') - inc2 = pe.Node(IncrementInterface(), name='inc2') - wf.connect(inputspec, 'n', inc2, 'input1') + inc1 = pe.Node(IncrementInterface(), name="inc1") + wf.connect(inputspec, "m", inc1, "input1") + inc2 = pe.Node(IncrementInterface(), name="inc2") + wf.connect(inputspec, "n", inc2, "input1") # the join node join = pe.JoinNode( - IdentityInterface(fields=['vector1', 'vector2']), - joinsource='inputspec', - name='join') - wf.connect(inc1, 'output1', join, 'vector1') - wf.connect(inc2, 'output1', join, 'vector2') + IdentityInterface(fields=["vector1", "vector2"]), + joinsource="inputspec", + name="join", + ) + wf.connect(inc1, "output1", join, "vector1") + wf.connect(inc2, "output1", join, "vector2") # a post-join node - prod = pe.MapNode( - ProductInterface(), name='prod', iterfield=['input1', 'input2']) - wf.connect(join, 'vector1', prod, 'input1') - wf.connect(join, 'vector2', prod, 'input2') + prod = pe.MapNode(ProductInterface(), name="prod", iterfield=["input1", "input2"]) + wf.connect(join, "vector1", prod, "input1") + wf.connect(join, "vector2", prod, "input2") result = wf.run() # there are 3 iterables expansions. # thus, the expanded graph contains 2 * 2 iteration pre-join nodes, 1 join # node and 1 post-join node. - assert len(result.nodes()) == 6, \ - "The number of expanded nodes is incorrect." + assert len(result.nodes()) == 6, "The number of expanded nodes is incorrect." # the product inputs are [2, 3] and [4, 5] - assert _products == [8, 15], \ - "The post-join products is incorrect: %s." % _products + assert _products == [8, 15], "The post-join products is incorrect: %s." % _products def test_itersource_join_source_node(tmpdir): @@ -457,31 +456,32 @@ def test_itersource_join_source_node(tmpdir): tmpdir.chdir() # Make the workflow. - wf = pe.Workflow(name='test') + wf = pe.Workflow(name="test") # the iterated input node - inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') - inputspec.iterables = [('n', [1, 2])] + inputspec = pe.Node(IdentityInterface(fields=["n"]), name="inputspec") + inputspec.iterables = [("n", [1, 2])] # an intermediate node in the first iteration path - pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') - wf.connect(inputspec, 'n', pre_join1, 'input1') + pre_join1 = pe.Node(IncrementInterface(), name="pre_join1") + wf.connect(inputspec, "n", pre_join1, "input1") # an iterable pre-join node with an itersource - pre_join2 = pe.Node(ProductInterface(), name='pre_join2') - pre_join2.itersource = ('inputspec', 'n') - pre_join2.iterables = ('input1', {1: [3, 4], 2: [5, 6]}) - wf.connect(pre_join1, 'output1', pre_join2, 'input2') + pre_join2 = pe.Node(ProductInterface(), name="pre_join2") + pre_join2.itersource = ("inputspec", "n") + pre_join2.iterables = ("input1", {1: [3, 4], 2: [5, 6]}) + wf.connect(pre_join1, "output1", pre_join2, "input2") # an intermediate node in the second iteration path - pre_join3 = pe.Node(IncrementInterface(), name='pre_join3') - wf.connect(pre_join2, 'output1', pre_join3, 'input1') + pre_join3 = pe.Node(IncrementInterface(), name="pre_join3") + wf.connect(pre_join2, "output1", pre_join3, "input1") # the join node join = pe.JoinNode( - IdentityInterface(fields=['vector']), - joinsource='pre_join2', - joinfield='vector', - name='join') - wf.connect(pre_join3, 'output1', join, 'vector') + IdentityInterface(fields=["vector"]), + joinsource="pre_join2", + joinfield="vector", + name="join", + ) + wf.connect(pre_join3, "output1", join, "vector") # a join successor node - post_join1 = pe.Node(SumInterface(), name='post_join1') - wf.connect(join, 'vector', post_join1, 'input1') + post_join1 = pe.Node(SumInterface(), name="post_join1") + wf.connect(join, "vector", post_join1, "input1") result = wf.run() @@ -494,18 +494,19 @@ def test_itersource_join_source_node(tmpdir): # 2 + (2 * 2) + 4 + 2 + 2 = 14 expansion graph nodes. # Nipype factors away the iterable input # IdentityInterface but keeps the join IdentityInterface. - assert len(result.nodes()) == 14, \ - "The number of expanded nodes is incorrect." + assert len(result.nodes()) == 14, "The number of expanded nodes is incorrect." # The first join inputs are: # 1 + (3 * 2) and 1 + (4 * 2) # The second join inputs are: # 1 + (5 * 3) and 1 + (6 * 3) # the post-join nodes execution order is indeterminate; # therefore, compare the lists item-wise. - assert [16, 19] in _sum_operands, \ + assert [16, 19] in _sum_operands, ( "The join Sum input is incorrect: %s." % _sum_operands - assert [7, 9] in _sum_operands, \ + ) + assert [7, 9] in _sum_operands, ( "The join Sum input is incorrect: %s." % _sum_operands + ) def test_itersource_two_join_nodes(tmpdir): @@ -514,68 +515,69 @@ def test_itersource_two_join_nodes(tmpdir): tmpdir.chdir() # Make the workflow. - wf = pe.Workflow(name='test') + wf = pe.Workflow(name="test") # the iterated input node - inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') - inputspec.iterables = [('n', [1, 2])] + inputspec = pe.Node(IdentityInterface(fields=["n"]), name="inputspec") + inputspec.iterables = [("n", [1, 2])] # an intermediate node in the first iteration path - pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') - wf.connect(inputspec, 'n', pre_join1, 'input1') + pre_join1 = pe.Node(IncrementInterface(), name="pre_join1") + wf.connect(inputspec, "n", pre_join1, "input1") # an iterable pre-join node with an itersource - pre_join2 = pe.Node(ProductInterface(), name='pre_join2') - pre_join2.itersource = ('inputspec', 'n') - pre_join2.iterables = ('input1', {1: [3, 4], 2: [5, 6]}) - wf.connect(pre_join1, 'output1', pre_join2, 'input2') + pre_join2 = pe.Node(ProductInterface(), name="pre_join2") + pre_join2.itersource = ("inputspec", "n") + pre_join2.iterables = ("input1", {1: [3, 4], 2: [5, 6]}) + wf.connect(pre_join1, "output1", pre_join2, "input2") # an intermediate node in the second iteration path - pre_join3 = pe.Node(IncrementInterface(), name='pre_join3') - wf.connect(pre_join2, 'output1', pre_join3, 'input1') + pre_join3 = pe.Node(IncrementInterface(), name="pre_join3") + wf.connect(pre_join2, "output1", pre_join3, "input1") # the first join node join1 = pe.JoinNode( - IdentityInterface(fields=['vector']), - joinsource='pre_join2', - joinfield='vector', - name='join1') - wf.connect(pre_join3, 'output1', join1, 'vector') + IdentityInterface(fields=["vector"]), + joinsource="pre_join2", + joinfield="vector", + name="join1", + ) + wf.connect(pre_join3, "output1", join1, "vector") # a join successor node - post_join1 = pe.Node(SumInterface(), name='post_join1') - wf.connect(join1, 'vector', post_join1, 'input1') + post_join1 = pe.Node(SumInterface(), name="post_join1") + wf.connect(join1, "vector", post_join1, "input1") # a summary join node join2 = pe.JoinNode( - IdentityInterface(fields=['vector']), - joinsource='inputspec', - joinfield='vector', - name='join2') - wf.connect(post_join1, 'output1', join2, 'vector') + IdentityInterface(fields=["vector"]), + joinsource="inputspec", + joinfield="vector", + name="join2", + ) + wf.connect(post_join1, "output1", join2, "vector") result = wf.run() # the expanded graph contains the 14 test_itersource_join_source_node # nodes plus the summary join node. - assert len(result.nodes()) == 15, \ - "The number of expanded nodes is incorrect." + assert len(result.nodes()) == 15, "The number of expanded nodes is incorrect." def test_set_join_node_file_input(tmpdir): """Test collecting join inputs to a set.""" tmpdir.chdir() - open('test.nii', 'w+').close() - open('test2.nii', 'w+').close() + open("test.nii", "w+").close() + open("test2.nii", "w+").close() # Make the workflow. - wf = pe.Workflow(name='test') + wf = pe.Workflow(name="test") # the iterated input node - inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') - inputspec.iterables = [('n', [ - tmpdir.join('test.nii').strpath, - tmpdir.join('test2.nii').strpath - ])] + inputspec = pe.Node(IdentityInterface(fields=["n"]), name="inputspec") + inputspec.iterables = [ + ("n", [tmpdir.join("test.nii").strpath, tmpdir.join("test2.nii").strpath]) + ] # a pre-join node in the iterated path - pre_join1 = pe.Node(IdentityInterface(fields=['n']), name='pre_join1') - wf.connect(inputspec, 'n', pre_join1, 'n') + pre_join1 = pe.Node(IdentityInterface(fields=["n"]), name="pre_join1") + wf.connect(inputspec, "n", pre_join1, "n") # the set join node join = pe.JoinNode( - PickFirst(), joinsource='inputspec', joinfield='in_files', name='join') - wf.connect(pre_join1, 'n', join, 'in_files') + PickFirst(), joinsource="inputspec", joinfield="in_files", name="join" + ) + wf.connect(pre_join1, "n", join, "in_files") wf.run() @@ -585,26 +587,27 @@ def test_nested_workflow_join(tmpdir): tmpdir.chdir() # Make the nested workflow - def nested_wf(i, name='smallwf'): + def nested_wf(i, name="smallwf"): # iterables with list of nums - inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') - inputspec.iterables = [('n', i)] + inputspec = pe.Node(IdentityInterface(fields=["n"]), name="inputspec") + inputspec.iterables = [("n", i)] # increment each iterable before joining - pre_join = pe.Node(IncrementInterface(), name='pre_join') + pre_join = pe.Node(IncrementInterface(), name="pre_join") # rejoin nums into list join = pe.JoinNode( - IdentityInterface(fields=['n']), - joinsource='inputspec', - joinfield='n', - name='join') + IdentityInterface(fields=["n"]), + joinsource="inputspec", + joinfield="n", + name="join", + ) # define and connect nested workflow - wf = pe.Workflow(name='wf_%d' % i[0]) - wf.connect(inputspec, 'n', pre_join, 'input1') - wf.connect(pre_join, 'output1', join, 'n') + wf = pe.Workflow(name="wf_%d" % i[0]) + wf.connect(inputspec, "n", pre_join, "input1") + wf.connect(pre_join, "output1", join, "n") return wf # master wf - meta_wf = pe.Workflow(name='meta', base_dir='.') + meta_wf = pe.Workflow(name="meta", base_dir=".") # add each mini-workflow to master for i in [[1, 3], [2, 4]]: mini_wf = nested_wf(i) @@ -613,8 +616,7 @@ def nested_wf(i, name='smallwf'): result = meta_wf.run() # there should be six nodes in total - assert len(result.nodes()) == 6, \ - "The number of expanded nodes is incorrect." + assert len(result.nodes()) == 6, "The number of expanded nodes is incorrect." def test_name_prefix_join(tmpdir): @@ -623,14 +625,16 @@ def test_name_prefix_join(tmpdir): def sq(x): return x ** 2 - wf = pe.Workflow('wf', base_dir=tmpdir.strpath) - square = pe.Node(Function(function=sq), name='square') - square.iterables = [('x', [1, 2])] - square_join = pe.JoinNode(Merge(1, ravel_inputs=True), - name='square_join', - joinsource='square', - joinfield=['in1']) - wf.connect(square, 'out', square_join, "in1") + wf = pe.Workflow("wf", base_dir=tmpdir.strpath) + square = pe.Node(Function(function=sq), name="square") + square.iterables = [("x", [1, 2])] + square_join = pe.JoinNode( + Merge(1, ravel_inputs=True), + name="square_join", + joinsource="square", + joinfield=["in1"], + ) + wf.connect(square, "out", square_join, "in1") wf.run() @@ -640,27 +644,30 @@ def test_join_nestediters(tmpdir): def exponent(x, p): return x ** p - wf = pe.Workflow('wf', base_dir=tmpdir.strpath) - - xs = pe.Node(IdentityInterface(['x']), - iterables=[('x', [1, 2])], - name='xs') - ps = pe.Node(IdentityInterface(['p']), - iterables=[('p', [3, 4])], - name='ps') - exp = pe.Node(Function(function=exponent), name='exp') - exp_joinx = pe.JoinNode(Merge(1, ravel_inputs=True), - name='exp_joinx', - joinsource='xs', - joinfield=['in1']) - exp_joinp = pe.JoinNode(Merge(1, ravel_inputs=True), - name='exp_joinp', - joinsource='ps', - joinfield=['in1']) - wf.connect([ - (xs, exp, [('x', 'x')]), - (ps, exp, [('p', 'p')]), - (exp, exp_joinx, [('out', 'in1')]), - (exp_joinx, exp_joinp, [('out', 'in1')])]) + wf = pe.Workflow("wf", base_dir=tmpdir.strpath) + + xs = pe.Node(IdentityInterface(["x"]), iterables=[("x", [1, 2])], name="xs") + ps = pe.Node(IdentityInterface(["p"]), iterables=[("p", [3, 4])], name="ps") + exp = pe.Node(Function(function=exponent), name="exp") + exp_joinx = pe.JoinNode( + Merge(1, ravel_inputs=True), + name="exp_joinx", + joinsource="xs", + joinfield=["in1"], + ) + exp_joinp = pe.JoinNode( + Merge(1, ravel_inputs=True), + name="exp_joinp", + joinsource="ps", + joinfield=["in1"], + ) + wf.connect( + [ + (xs, exp, [("x", "x")]), + (ps, exp, [("p", "p")]), + (exp, exp_joinx, [("out", "in1")]), + (exp_joinx, exp_joinp, [("out", "in1")]), + ] + ) wf.run() diff --git a/nipype/pipeline/engine/tests/test_nodes.py b/nipype/pipeline/engine/tests/test_nodes.py index 395ede54bb..6fd88011ee 100644 --- a/nipype/pipeline/engine/tests/test_nodes.py +++ b/nipype/pipeline/engine/tests/test_nodes.py @@ -12,7 +12,7 @@ from .test_base import EngineTestInterface from .test_utils import UtilsTestInterface -''' +""" Test for order of iterables import nipype.pipeline.engine as pe @@ -42,7 +42,7 @@ wf1.run(inseries=True, createdirsonly=True) wf1.write_graph(graph2use='exec') -''' +""" ''' import nipype.pipeline.engine as pe import nipype.interfaces.spm as spm @@ -88,34 +88,42 @@ def test_node_init(): with pytest.raises(TypeError): pe.Node() with pytest.raises(IOError): - pe.Node(EngineTestInterface, name='test') + pe.Node(EngineTestInterface, name="test") def test_node_get_output(): - mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') + mod1 = pe.Node(interface=EngineTestInterface(), name="mod1") mod1.inputs.input1 = 1 mod1.run() - assert mod1.get_output('output1') == [1, 1] + assert mod1.get_output("output1") == [1, 1] mod1._result = None - assert mod1.get_output('output1') == [1, 1] + assert mod1.get_output("output1") == [1, 1] def test_mapnode_iterfield_check(): - mod1 = pe.MapNode(EngineTestInterface(), iterfield=['input1'], name='mod1') + mod1 = pe.MapNode(EngineTestInterface(), iterfield=["input1"], name="mod1") with pytest.raises(ValueError): mod1._check_iterfield() mod1 = pe.MapNode( - EngineTestInterface(), iterfield=['input1', 'input2'], name='mod1') + EngineTestInterface(), iterfield=["input1", "input2"], name="mod1" + ) mod1.inputs.input1 = [1, 2] mod1.inputs.input2 = 3 with pytest.raises(ValueError): mod1._check_iterfield() -@pytest.mark.parametrize("x_inp, f_exp", - [(3, [6]), ([2, 3], [4, 6]), ((2, 3), [4, 6]), - (range(3), [0, 2, 4]), ("Str", ["StrStr"]), - (["Str1", "Str2"], ["Str1Str1", "Str2Str2"])]) +@pytest.mark.parametrize( + "x_inp, f_exp", + [ + (3, [6]), + ([2, 3], [4, 6]), + ((2, 3), [4, 6]), + (range(3), [0, 2, 4]), + ("Str", ["StrStr"]), + (["Str1", "Str2"], ["Str1Str1", "Str2Str2"]), + ], +) def test_mapnode_iterfield_type(x_inp, f_exp): from nipype import MapNode, Function @@ -139,19 +147,21 @@ def func1(in1): return in1 + 1 n1 = MapNode( - Function(input_names=['in1'], output_names=['out'], function=func1), - iterfield=['in1'], + Function(input_names=["in1"], output_names=["out"], function=func1), + iterfield=["in1"], nested=True, - name='n1') + name="n1", + ) n1.inputs.in1 = [[1, [2]], 3, [4, 5]] n1.run() - assert n1.get_output('out') == [[2, [3]], 4, [5, 6]] + assert n1.get_output("out") == [[2, [3]], 4, [5, 6]] n2 = MapNode( - Function(input_names=['in1'], output_names=['out'], function=func1), - iterfield=['in1'], + Function(input_names=["in1"], output_names=["out"], function=func1), + iterfield=["in1"], nested=False, - name='n1') + name="n1", + ) n2.inputs.in1 = [[1, [2]], 3, [4, 5]] with pytest.raises(Exception) as excinfo: @@ -167,27 +177,25 @@ def func1(in1): return in1 + 1 mapnode = MapNode( - Function(function=func1), - iterfield='in1', - name='mapnode', - n_procs=2, - mem_gb=2) + Function(function=func1), iterfield="in1", name="mapnode", n_procs=2, mem_gb=2 + ) mapnode.inputs.in1 = [1, 2] for idx, node in mapnode._make_nodes(): - for attr in ('overwrite', 'run_without_submitting', 'plugin_args'): + for attr in ("overwrite", "run_without_submitting", "plugin_args"): + assert getattr(node, attr) == getattr(mapnode, attr) + for attr in ("_n_procs", "_mem_gb"): assert getattr(node, attr) == getattr(mapnode, attr) - for attr in ('_n_procs', '_mem_gb'): - assert (getattr(node, attr) == getattr(mapnode, attr)) def test_node_hash(tmpdir): from nipype.interfaces.utility import Function + tmpdir.chdir() config.set_default_config() - config.set('execution', 'stop_on_first_crash', True) - config.set('execution', 'crashdump_dir', os.getcwd()) + config.set("execution", "stop_on_first_crash", True) + config.set("execution", "crashdump_dir", os.getcwd()) def func1(): return 1 @@ -196,17 +204,18 @@ def func2(a): return a + 1 n1 = pe.Node( - Function(input_names=[], output_names=['a'], function=func1), - name='n1') + Function(input_names=[], output_names=["a"], function=func1), name="n1" + ) n2 = pe.Node( - Function(input_names=['a'], output_names=['b'], function=func2), - name='n2') - w1 = pe.Workflow(name='test') + Function(input_names=["a"], output_names=["b"], function=func2), name="n2" + ) + w1 = pe.Workflow(name="test") def modify(x): return x + 1 + n1.inputs.a = 1 - w1.connect(n1, ('a', modify), n2, 'a') + w1.connect(n1, ("a", modify), n2, "a") w1.base_dir = os.getcwd() # create dummy distributed plugin class @@ -219,25 +228,26 @@ class EngineTestException(Exception): class RaiseError(DistributedPluginBase): def _submit_job(self, node, updatehash=False): raise EngineTestException( - 'Submit called - cached=%s, updated=%s' % node.is_cached()) + "Submit called - cached=%s, updated=%s" % node.is_cached() + ) # check if a proper exception is raised with pytest.raises(EngineTestException) as excinfo: w1.run(plugin=RaiseError()) - assert str(excinfo.value).startswith('Submit called') + assert str(excinfo.value).startswith("Submit called") # generate outputs - w1.run(plugin='Linear') + w1.run(plugin="Linear") # ensure plugin is being called - config.set('execution', 'local_hash_check', False) + config.set("execution", "local_hash_check", False) # rerun to ensure we have outputs - w1.run(plugin='Linear') + w1.run(plugin="Linear") # set local check - config.set('execution', 'local_hash_check', True) - w1 = pe.Workflow(name='test') - w1.connect(n1, ('a', modify), n2, 'a') + config.set("execution", "local_hash_check", True) + w1 = pe.Workflow(name="test") + w1.connect(n1, ("a", modify), n2, "a") w1.base_dir = os.getcwd() w1.run(plugin=RaiseError()) @@ -245,57 +255,58 @@ def _submit_job(self, node, updatehash=False): def test_outputs_removal(tmpdir): def test_function(arg1): import os - file1 = os.path.join(os.getcwd(), 'file1.txt') - file2 = os.path.join(os.getcwd(), 'file2.txt') - with open(file1, 'wt') as fp: - fp.write('%d' % arg1) - with open(file2, 'wt') as fp: - fp.write('%d' % arg1) + + file1 = os.path.join(os.getcwd(), "file1.txt") + file2 = os.path.join(os.getcwd(), "file2.txt") + with open(file1, "wt") as fp: + fp.write("%d" % arg1) + with open(file2, "wt") as fp: + fp.write("%d" % arg1) return file1, file2 n1 = pe.Node( niu.Function( - input_names=['arg1'], - output_names=['file1', 'file2'], - function=test_function), + input_names=["arg1"], + output_names=["file1", "file2"], + function=test_function, + ), base_dir=tmpdir.strpath, - name='testoutputs') + name="testoutputs", + ) n1.inputs.arg1 = 1 - n1.config = {'execution': {'remove_unnecessary_outputs': True}} + n1.config = {"execution": {"remove_unnecessary_outputs": True}} n1.config = merge_dict(deepcopy(config._sections), n1.config) n1.run() - assert tmpdir.join(n1.name, 'file1.txt').check() - assert tmpdir.join(n1.name, 'file1.txt').check() - n1.needed_outputs = ['file2'] + assert tmpdir.join(n1.name, "file1.txt").check() + assert tmpdir.join(n1.name, "file1.txt").check() + n1.needed_outputs = ["file2"] n1.run() - assert not tmpdir.join(n1.name, 'file1.txt').check() - assert tmpdir.join(n1.name, 'file2.txt').check() + assert not tmpdir.join(n1.name, "file1.txt").check() + assert tmpdir.join(n1.name, "file2.txt").check() def test_inputs_removal(tmpdir): - file1 = tmpdir.join('file1.txt') - file1.write('dummy_file') - n1 = pe.Node( - UtilsTestInterface(), base_dir=tmpdir.strpath, name='testinputs') + file1 = tmpdir.join("file1.txt") + file1.write("dummy_file") + n1 = pe.Node(UtilsTestInterface(), base_dir=tmpdir.strpath, name="testinputs") n1.inputs.in_file = file1.strpath - n1.config = {'execution': {'keep_inputs': True}} + n1.config = {"execution": {"keep_inputs": True}} n1.config = merge_dict(deepcopy(config._sections), n1.config) n1.run() - assert tmpdir.join(n1.name, 'file1.txt').check() + assert tmpdir.join(n1.name, "file1.txt").check() n1.inputs.in_file = file1.strpath - n1.config = {'execution': {'keep_inputs': False}} + n1.config = {"execution": {"keep_inputs": False}} n1.config = merge_dict(deepcopy(config._sections), n1.config) n1.overwrite = True n1.run() - assert not tmpdir.join(n1.name, 'file1.txt').check() + assert not tmpdir.join(n1.name, "file1.txt").check() def test_outputmultipath_collapse(tmpdir): """Test an OutputMultiPath whose initial value is ``[[x]]`` to ensure that it is returned as ``[x]``, regardless of how accessed.""" select_if = niu.Select(inlist=[[1, 2, 3], [4]], index=1) - select_nd = pe.Node(niu.Select(inlist=[[1, 2, 3], [4]], index=1), - name='select_nd') + select_nd = pe.Node(niu.Select(inlist=[[1, 2, 3], [4]], index=1), name="select_nd") ifres = select_if.run() ndres = select_nd.run() diff --git a/nipype/pipeline/engine/tests/test_utils.py b/nipype/pipeline/engine/tests/test_utils.py index 83f5aef282..1e86e5a071 100644 --- a/nipype/pipeline/engine/tests/test_utils.py +++ b/nipype/pipeline/engine/tests/test_utils.py @@ -11,8 +11,7 @@ from ....interfaces import base as nib from ....interfaces import utility as niu from .... import config -from ..utils import (clean_working_directory, write_workflow_prov, - load_resultfile) +from ..utils import clean_working_directory, write_workflow_prov, load_resultfile class InputSpec(nib.TraitedSpec): @@ -20,7 +19,7 @@ class InputSpec(nib.TraitedSpec): class OutputSpec(nib.TraitedSpec): - output1 = nib.traits.List(nib.traits.Int, desc='outputs') + output1 = nib.traits.List(nib.traits.Int, desc="outputs") class UtilsTestInterface(nib.BaseInterface): @@ -33,38 +32,37 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['output1'] = [1] + outputs["output1"] = [1] return outputs def test_identitynode_removal(tmpdir): def test_function(arg1, arg2, arg3): import numpy as np + return (np.array(arg1) + arg2 + arg3).tolist() wf = pe.Workflow(name="testidentity", base_dir=tmpdir.strpath) n1 = pe.Node( - niu.IdentityInterface(fields=['a', 'b']), - name='src', - base_dir=tmpdir.strpath) - n1.iterables = ('b', [0, 1, 2, 3]) + niu.IdentityInterface(fields=["a", "b"]), name="src", base_dir=tmpdir.strpath + ) + n1.iterables = ("b", [0, 1, 2, 3]) n1.inputs.a = [0, 1, 2, 3] - n2 = pe.Node(niu.Select(), name='selector', base_dir=tmpdir.strpath) - wf.connect(n1, ('a', test_function, 1, -1), n2, 'inlist') - wf.connect(n1, 'b', n2, 'index') + n2 = pe.Node(niu.Select(), name="selector", base_dir=tmpdir.strpath) + wf.connect(n1, ("a", test_function, 1, -1), n2, "inlist") + wf.connect(n1, "b", n2, "index") n3 = pe.Node( - niu.IdentityInterface(fields=['c', 'd']), - name='passer', - base_dir=tmpdir.strpath) + niu.IdentityInterface(fields=["c", "d"]), name="passer", base_dir=tmpdir.strpath + ) n3.inputs.c = [1, 2, 3, 4] - wf.connect(n2, 'out', n3, 'd') + wf.connect(n2, "out", n3, "d") - n4 = pe.Node(niu.Select(), name='selector2', base_dir=tmpdir.strpath) - wf.connect(n3, ('c', test_function, 1, -1), n4, 'inlist') - wf.connect(n3, 'd', n4, 'index') + n4 = pe.Node(niu.Select(), name="selector2", base_dir=tmpdir.strpath) + wf.connect(n3, ("c", test_function, 1, -1), n4, "inlist") + wf.connect(n3, "d", n4, "index") fg = wf._create_flat_graph() wf._set_needed_outputs(fg) @@ -84,29 +82,35 @@ class InputSpec(nib.TraitedSpec): inputs = InputSpec() filenames = [ - 'file.hdr', 'file.img', 'file.BRIK', 'file.HEAD', '_0x1234.json', - 'foo.txt' + "file.hdr", + "file.img", + "file.BRIK", + "file.HEAD", + "_0x1234.json", + "foo.txt", ] outfiles = [] for filename in filenames: outfile = tmpdir.join(filename) - outfile.write('dummy') + outfile.write("dummy") outfiles.append(outfile.strpath) outputs.files = outfiles[:4:2] outputs.others = outfiles[5] inputs.infile = outfiles[-1] - needed_outputs = ['files'] + needed_outputs = ["files"] config.set_default_config() assert os.path.exists(outfiles[5]) config.set_default_config() - config.set('execution', 'remove_unnecessary_outputs', False) - out = clean_working_directory(outputs, tmpdir.strpath, inputs, - needed_outputs, deepcopy(config._sections)) + config.set("execution", "remove_unnecessary_outputs", False) + out = clean_working_directory( + outputs, tmpdir.strpath, inputs, needed_outputs, deepcopy(config._sections) + ) assert os.path.exists(outfiles[5]) assert out.others == outfiles[5] - config.set('execution', 'remove_unnecessary_outputs', True) - out = clean_working_directory(outputs, tmpdir.strpath, inputs, - needed_outputs, deepcopy(config._sections)) + config.set("execution", "remove_unnecessary_outputs", True) + out = clean_working_directory( + outputs, tmpdir.strpath, inputs, needed_outputs, deepcopy(config._sections) + ) assert os.path.exists(outfiles[1]) assert os.path.exists(outfiles[3]) assert os.path.exists(outfiles[4]) @@ -118,39 +122,40 @@ class InputSpec(nib.TraitedSpec): def create_wf(name): """Creates a workflow for the following tests""" + def fwhm(fwhm): return fwhm pipe = pe.Workflow(name=name) process = pe.Node( - niu.Function( - input_names=['fwhm'], output_names=['fwhm'], function=fwhm), - name='proc') - process.iterables = ('fwhm', [0]) + niu.Function(input_names=["fwhm"], output_names=["fwhm"], function=fwhm), + name="proc", + ) + process.iterables = ("fwhm", [0]) process2 = pe.Node( - niu.Function( - input_names=['fwhm'], output_names=['fwhm'], function=fwhm), - name='proc2') - process2.iterables = ('fwhm', [0]) - pipe.connect(process, 'fwhm', process2, 'fwhm') + niu.Function(input_names=["fwhm"], output_names=["fwhm"], function=fwhm), + name="proc2", + ) + process2.iterables = ("fwhm", [0]) + pipe.connect(process, "fwhm", process2, "fwhm") return pipe def test_multi_disconnected_iterable(tmpdir): - metawf = pe.Workflow(name='meta') + metawf = pe.Workflow(name="meta") metawf.base_dir = tmpdir.strpath - metawf.add_nodes([create_wf('wf%d' % i) for i in range(30)]) - eg = metawf.run(plugin='Linear') + metawf.add_nodes([create_wf("wf%d" % i) for i in range(30)]) + eg = metawf.run(plugin="Linear") assert len(eg.nodes()) == 60 def test_provenance(tmpdir): - metawf = pe.Workflow(name='meta') + metawf = pe.Workflow(name="meta") metawf.base_dir = tmpdir.strpath - metawf.add_nodes([create_wf('wf%d' % i) for i in range(1)]) - eg = metawf.run(plugin='Linear') - prov_base = tmpdir.join('workflow_provenance_test').strpath - psg = write_workflow_prov(eg, prov_base, format='all') + metawf.add_nodes([create_wf("wf%d" % i) for i in range(1)]) + eg = metawf.run(plugin="Linear") + prov_base = tmpdir.join("workflow_provenance_test").strpath + psg = write_workflow_prov(eg, prov_base, format="all") assert len(psg.bundles) == 2 assert len(psg.get_records()) == 7 @@ -164,14 +169,14 @@ def test_mapnode_crash(tmpdir): cwd = os.getcwd() node = pe.MapNode( niu.Function( - input_names=['WRONG'], - output_names=['newstring'], - function=dummy_func), - iterfield=['WRONG'], - name='myfunc') - node.inputs.WRONG = ['string{}'.format(i) for i in range(3)] + input_names=["WRONG"], output_names=["newstring"], function=dummy_func + ), + iterfield=["WRONG"], + name="myfunc", + ) + node.inputs.WRONG = ["string{}".format(i) for i in range(3)] node.config = deepcopy(config._sections) - node.config['execution']['stop_on_first_crash'] = True + node.config["execution"]["stop_on_first_crash"] = True node.base_dir = tmpdir.strpath with pytest.raises(TypeError): node.run() @@ -183,12 +188,12 @@ def test_mapnode_crash2(tmpdir): cwd = os.getcwd() node = pe.MapNode( niu.Function( - input_names=['WRONG'], - output_names=['newstring'], - function=dummy_func), - iterfield=['WRONG'], - name='myfunc') - node.inputs.WRONG = ['string{}'.format(i) for i in range(3)] + input_names=["WRONG"], output_names=["newstring"], function=dummy_func + ), + iterfield=["WRONG"], + name="myfunc", + ) + node.inputs.WRONG = ["string{}".format(i) for i in range(3)] node.base_dir = tmpdir.strpath with pytest.raises(Exception): @@ -201,19 +206,20 @@ def test_mapnode_crash3(tmpdir): tmpdir.chdir() node = pe.MapNode( niu.Function( - input_names=['WRONG'], - output_names=['newstring'], - function=dummy_func), - iterfield=['WRONG'], - name='myfunc') - node.inputs.WRONG = ['string{}'.format(i) for i in range(3)] - wf = pe.Workflow('testmapnodecrash') + input_names=["WRONG"], output_names=["newstring"], function=dummy_func + ), + iterfield=["WRONG"], + name="myfunc", + ) + node.inputs.WRONG = ["string{}".format(i) for i in range(3)] + wf = pe.Workflow("testmapnodecrash") wf.add_nodes([node]) wf.base_dir = tmpdir.strpath # changing crashdump dir to current working directory (to avoid problems with read-only systems) wf.config["execution"]["crashdump_dir"] = os.getcwd() with pytest.raises(RuntimeError): - wf.run(plugin='Linear') + wf.run(plugin="Linear") + class StrPathConfuserInputSpec(nib.TraitedSpec): in_str = nib.traits.String() @@ -233,14 +239,14 @@ class StrPathConfuser(nib.SimpleInterface): output_spec = StrPathConfuserOutputSpec def _run_interface(self, runtime): - out_path = os.path.abspath(os.path.basename(self.inputs.in_str) + '_path') - open(out_path, 'w').close() - self._results['out_str'] = self.inputs.in_str - self._results['out_path'] = out_path - self._results['out_tuple'] = (out_path, self.inputs.in_str) - self._results['out_dict_path'] = {self.inputs.in_str: out_path} - self._results['out_dict_str'] = {self.inputs.in_str: self.inputs.in_str} - self._results['out_list'] = [self.inputs.in_str] * 2 + out_path = os.path.abspath(os.path.basename(self.inputs.in_str) + "_path") + open(out_path, "w").close() + self._results["out_str"] = self.inputs.in_str + self._results["out_path"] = out_path + self._results["out_tuple"] = (out_path, self.inputs.in_str) + self._results["out_dict_path"] = {self.inputs.in_str: out_path} + self._results["out_dict_str"] = {self.inputs.in_str: self.inputs.in_str} + self._results["out_list"] = [self.inputs.in_str] * 2 return runtime @@ -254,15 +260,15 @@ def test_modify_paths_bug(tmpdir): """ tmpdir.chdir() - spc = pe.Node(StrPathConfuser(in_str='2'), name='spc') + spc = pe.Node(StrPathConfuser(in_str="2"), name="spc") - open('2', 'w').close() + open("2", "w").close() outputs = spc.run().outputs # Basic check that string was not manipulated out_str = outputs.out_str - assert out_str == '2' + assert out_str == "2" # Check path exists and is absolute out_path = outputs.out_path @@ -279,41 +285,45 @@ def test_modify_paths_bug(tmpdir): def test_save_load_resultfile(tmpdir, use_relative): """Test minimally the save/load functions for result files.""" from shutil import copytree, rmtree + tmpdir.chdir() - old_use_relative = config.getboolean('execution', 'use_relative_paths') - config.set('execution', 'use_relative_paths', use_relative) + old_use_relative = config.getboolean("execution", "use_relative_paths") + config.set("execution", "use_relative_paths", use_relative) - spc = pe.Node(StrPathConfuser(in_str='2'), name='spc') - spc.base_dir = tmpdir.mkdir('node').strpath + spc = pe.Node(StrPathConfuser(in_str="2"), name="spc") + spc.base_dir = tmpdir.mkdir("node").strpath result = spc.run() loaded_result = load_resultfile( - tmpdir.join('node').join('spc').join('result_spc.pklz').strpath) + tmpdir.join("node").join("spc").join("result_spc.pklz").strpath + ) assert result.runtime.dictcopy() == loaded_result.runtime.dictcopy() assert result.inputs == loaded_result.inputs assert result.outputs.get() == loaded_result.outputs.get() # Test the mobility of the result file. - copytree(tmpdir.join('node').strpath, tmpdir.join('node2').strpath) - rmtree(tmpdir.join('node').strpath) + copytree(tmpdir.join("node").strpath, tmpdir.join("node2").strpath) + rmtree(tmpdir.join("node").strpath) if use_relative: loaded_result2 = load_resultfile( - tmpdir.join('node2').join('spc').join('result_spc.pklz').strpath) + tmpdir.join("node2").join("spc").join("result_spc.pklz").strpath + ) assert result.runtime.dictcopy() == loaded_result2.runtime.dictcopy() assert result.inputs == loaded_result2.inputs assert loaded_result2.outputs.get() != result.outputs.get() - newpath = result.outputs.out_path.replace('/node/', '/node2/') + newpath = result.outputs.out_path.replace("/node/", "/node2/") assert loaded_result2.outputs.out_path == newpath assert loaded_result2.outputs.out_tuple[0] == newpath - assert loaded_result2.outputs.out_dict_path['2'] == newpath + assert loaded_result2.outputs.out_dict_path["2"] == newpath else: with pytest.raises(nib.TraitError): load_resultfile( - tmpdir.join('node2').join('spc').join('result_spc.pklz').strpath) + tmpdir.join("node2").join("spc").join("result_spc.pklz").strpath + ) - config.set('execution', 'use_relative_paths', old_use_relative) + config.set("execution", "use_relative_paths", old_use_relative) diff --git a/nipype/pipeline/engine/tests/test_workflows.py b/nipype/pipeline/engine/tests/test_workflows.py index 0cc7f2142f..75f77525f8 100644 --- a/nipype/pipeline/engine/tests/test_workflows.py +++ b/nipype/pipeline/engine/tests/test_workflows.py @@ -20,27 +20,25 @@ def test_init(): with pytest.raises(TypeError): pe.Workflow() - pipe = pe.Workflow(name='pipe') + pipe = pe.Workflow(name="pipe") assert type(pipe._graph) == nx.DiGraph def test_connect(): - pipe = pe.Workflow(name='pipe') - mod2 = pe.Node(EngineTestInterface(), name='mod2') - mod1 = pe.Node(EngineTestInterface(), name='mod1') - pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe = pe.Workflow(name="pipe") + mod2 = pe.Node(EngineTestInterface(), name="mod2") + mod1 = pe.Node(EngineTestInterface(), name="mod1") + pipe.connect([(mod1, mod2, [("output1", "input1")])]) assert mod1 in pipe._graph.nodes() assert mod2 in pipe._graph.nodes() - assert pipe._graph.get_edge_data(mod1, mod2) == { - 'connect': [('output1', 'input1')] - } + assert pipe._graph.get_edge_data(mod1, mod2) == {"connect": [("output1", "input1")]} def test_add_nodes(): - pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(EngineTestInterface(), name='mod1') - mod2 = pe.Node(EngineTestInterface(), name='mod2') + pipe = pe.Workflow(name="pipe") + mod1 = pe.Node(EngineTestInterface(), name="mod1") + mod2 = pe.Node(EngineTestInterface(), name="mod2") pipe.add_nodes([mod1, mod2]) assert mod1 in pipe._graph.nodes() @@ -48,40 +46,40 @@ def test_add_nodes(): def test_disconnect(): - a = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='a') - b = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='b') - flow1 = pe.Workflow(name='test') - flow1.connect(a, 'a', b, 'a') - flow1.disconnect(a, 'a', b, 'a') + a = pe.Node(niu.IdentityInterface(fields=["a", "b"]), name="a") + b = pe.Node(niu.IdentityInterface(fields=["a", "b"]), name="b") + flow1 = pe.Workflow(name="test") + flow1.connect(a, "a", b, "a") + flow1.disconnect(a, "a", b, "a") assert list(flow1._graph.edges()) == [] def test_workflow_add(): - n1 = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='n1') - n2 = pe.Node(niu.IdentityInterface(fields=['c', 'd']), name='n2') - n3 = pe.Node(niu.IdentityInterface(fields=['c', 'd']), name='n1') - w1 = pe.Workflow(name='test') - w1.connect(n1, 'a', n2, 'c') + n1 = pe.Node(niu.IdentityInterface(fields=["a", "b"]), name="n1") + n2 = pe.Node(niu.IdentityInterface(fields=["c", "d"]), name="n2") + n3 = pe.Node(niu.IdentityInterface(fields=["c", "d"]), name="n1") + w1 = pe.Workflow(name="test") + w1.connect(n1, "a", n2, "c") for node in [n1, n2, n3]: with pytest.raises(IOError): w1.add_nodes([node]) with pytest.raises(IOError): - w1.connect([(w1, n2, [('n1.a', 'd')])]) + w1.connect([(w1, n2, [("n1.a", "d")])]) def test_doubleconnect(): - a = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='a') - b = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='b') - flow1 = pe.Workflow(name='test') - flow1.connect(a, 'a', b, 'a') + a = pe.Node(niu.IdentityInterface(fields=["a", "b"]), name="a") + b = pe.Node(niu.IdentityInterface(fields=["a", "b"]), name="b") + flow1 = pe.Workflow(name="test") + flow1.connect(a, "a", b, "a") with pytest.raises(Exception) as excinfo: - flow1.connect(a, 'b', b, 'a') + flow1.connect(a, "b", b, "a") assert "Trying to connect" in str(excinfo.value) - c = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='c') - flow1 = pe.Workflow(name='test2') + c = pe.Node(niu.IdentityInterface(fields=["a", "b"]), name="c") + flow1 = pe.Workflow(name="test2") with pytest.raises(Exception) as excinfo: - flow1.connect([(a, c, [('b', 'b')]), (b, c, [('a', 'b')])]) + flow1.connect([(a, c, [("b", "b")]), (b, c, [("a", "b")])]) assert "Trying to connect" in str(excinfo.value) @@ -114,29 +112,31 @@ def test_duplicate_node_check(): def _test_function(arg1): import os - file1 = os.path.join(os.getcwd(), 'file1.txt') - file2 = os.path.join(os.getcwd(), 'file2.txt') - file3 = os.path.join(os.getcwd(), 'file3.txt') - file4 = os.path.join(os.getcwd(), 'subdir', 'file4.txt') + + file1 = os.path.join(os.getcwd(), "file1.txt") + file2 = os.path.join(os.getcwd(), "file2.txt") + file3 = os.path.join(os.getcwd(), "file3.txt") + file4 = os.path.join(os.getcwd(), "subdir", "file4.txt") os.mkdir("subdir") for filename in [file1, file2, file3, file4]: - with open(filename, 'wt') as fp: - fp.write('%d' % arg1) + with open(filename, "wt") as fp: + fp.write("%d" % arg1) return file1, file2, os.path.join(os.getcwd(), "subdir") def _test_function2(in_file, arg): import os - with open(in_file, 'rt') as fp: + + with open(in_file, "rt") as fp: in_arg = fp.read() - file1 = os.path.join(os.getcwd(), 'file1.txt') - file2 = os.path.join(os.getcwd(), 'file2.txt') - file3 = os.path.join(os.getcwd(), 'file3.txt') + file1 = os.path.join(os.getcwd(), "file1.txt") + file2 = os.path.join(os.getcwd(), "file2.txt") + file3 = os.path.join(os.getcwd(), "file3.txt") files = [file1, file2, file3] for filename in files: - with open(filename, 'wt') as fp: - fp.write('%d' % arg + in_arg) + with open(filename, "wt") as fp: + fp.write("%d" % arg + in_arg) return file1, file2, 1 @@ -145,67 +145,69 @@ def _test_function3(arg): @pytest.mark.parametrize( - 'plugin, remove_unnecessary_outputs, keep_inputs', - list(product(['Linear', 'MultiProc'], [False, True], [True, False]))) -def test_outputs_removal_wf(tmpdir, plugin, remove_unnecessary_outputs, - keep_inputs): + "plugin, remove_unnecessary_outputs, keep_inputs", + list(product(["Linear", "MultiProc"], [False, True], [True, False])), +) +def test_outputs_removal_wf(tmpdir, plugin, remove_unnecessary_outputs, keep_inputs): config.set_default_config() - config.set('execution', 'remove_unnecessary_outputs', - remove_unnecessary_outputs) - config.set('execution', 'keep_inputs', keep_inputs) + config.set("execution", "remove_unnecessary_outputs", remove_unnecessary_outputs) + config.set("execution", "keep_inputs", keep_inputs) n1 = pe.Node( niu.Function( - output_names=['out_file1', 'out_file2', 'dir'], - function=_test_function), - name='n1', - base_dir=tmpdir.strpath) + output_names=["out_file1", "out_file2", "dir"], function=_test_function + ), + name="n1", + base_dir=tmpdir.strpath, + ) n1.inputs.arg1 = 1 n2 = pe.Node( niu.Function( - output_names=['out_file1', 'out_file2', 'n'], - function=_test_function2), - name='n2', - base_dir=tmpdir.strpath) + output_names=["out_file1", "out_file2", "n"], function=_test_function2 + ), + name="n2", + base_dir=tmpdir.strpath, + ) n2.inputs.arg = 2 n3 = pe.Node( - niu.Function( - output_names=['n'], - function=_test_function3), - name='n3', - base_dir=tmpdir.strpath) + niu.Function(output_names=["n"], function=_test_function3), + name="n3", + base_dir=tmpdir.strpath, + ) - wf = pe.Workflow( - name="node_rem_test" + plugin, base_dir=tmpdir.strpath) + wf = pe.Workflow(name="node_rem_test" + plugin, base_dir=tmpdir.strpath) wf.connect(n1, "out_file1", n2, "in_file") wf.run(plugin=plugin) # Necessary outputs HAVE to exist - assert os.path.exists( - os.path.join(wf.base_dir, wf.name, n1.name, 'file1.txt')) - assert os.path.exists( - os.path.join(wf.base_dir, wf.name, n2.name, 'file1.txt')) - assert os.path.exists( - os.path.join(wf.base_dir, wf.name, n2.name, 'file2.txt')) + assert os.path.exists(os.path.join(wf.base_dir, wf.name, n1.name, "file1.txt")) + assert os.path.exists(os.path.join(wf.base_dir, wf.name, n2.name, "file1.txt")) + assert os.path.exists(os.path.join(wf.base_dir, wf.name, n2.name, "file2.txt")) # Unnecessary outputs exist only iff remove_unnecessary_outputs is True - assert os.path.exists( - os.path.join(wf.base_dir, wf.name, n1.name, - 'file2.txt')) is not remove_unnecessary_outputs - assert os.path.exists( - os.path.join(wf.base_dir, wf.name, n1.name, "subdir", - 'file4.txt')) is not remove_unnecessary_outputs - assert os.path.exists( - os.path.join(wf.base_dir, wf.name, n1.name, - 'file3.txt')) is not remove_unnecessary_outputs - assert os.path.exists( - os.path.join(wf.base_dir, wf.name, n2.name, - 'file3.txt')) is not remove_unnecessary_outputs - - n4 = pe.Node(UtilsTestInterface(), name='n4', base_dir=tmpdir.strpath) + assert ( + os.path.exists(os.path.join(wf.base_dir, wf.name, n1.name, "file2.txt")) + is not remove_unnecessary_outputs + ) + assert ( + os.path.exists( + os.path.join(wf.base_dir, wf.name, n1.name, "subdir", "file4.txt") + ) + is not remove_unnecessary_outputs + ) + assert ( + os.path.exists(os.path.join(wf.base_dir, wf.name, n1.name, "file3.txt")) + is not remove_unnecessary_outputs + ) + assert ( + os.path.exists(os.path.join(wf.base_dir, wf.name, n2.name, "file3.txt")) + is not remove_unnecessary_outputs + ) + + n4 = pe.Node(UtilsTestInterface(), name="n4", base_dir=tmpdir.strpath) wf.connect(n2, "out_file1", n4, "in_file") def pick_first(l): @@ -216,50 +218,49 @@ def pick_first(l): wf.run(plugin=plugin) # Test necessary outputs - assert os.path.exists( - os.path.join(wf.base_dir, wf.name, n2.name, 'file1.txt')) - assert os.path.exists( - os.path.join(wf.base_dir, wf.name, n2.name, 'file1.txt')) + assert os.path.exists(os.path.join(wf.base_dir, wf.name, n2.name, "file1.txt")) + assert os.path.exists(os.path.join(wf.base_dir, wf.name, n2.name, "file1.txt")) # Test unnecessary outputs - assert os.path.exists( - os.path.join(wf.base_dir, wf.name, n2.name, - 'file2.txt')) is not remove_unnecessary_outputs + assert ( + os.path.exists(os.path.join(wf.base_dir, wf.name, n2.name, "file2.txt")) + is not remove_unnecessary_outputs + ) # Test keep_inputs - assert os.path.exists( - os.path.join(wf.base_dir, wf.name, n4.name, - 'file1.txt')) is keep_inputs + assert ( + os.path.exists(os.path.join(wf.base_dir, wf.name, n4.name, "file1.txt")) + is keep_inputs + ) def _test_function4(): - raise FileNotFoundError('Generic error') + raise FileNotFoundError("Generic error") def test_config_setting(tmpdir): tmpdir.chdir() - wf = pe.Workflow('config') + wf = pe.Workflow("config") wf.base_dir = os.getcwd() - crashdir = os.path.join(os.getcwd(), 'crashdir') + crashdir = os.path.join(os.getcwd(), "crashdir") os.mkdir(crashdir) wf.config = {"execution": {"crashdump_dir": crashdir}} - n1 = pe.Node(niu.Function(function=_test_function4), - name='errorfunc') + n1 = pe.Node(niu.Function(function=_test_function4), name="errorfunc") wf.add_nodes([n1]) try: wf.run() except RuntimeError: pass - fl = glob(os.path.join(crashdir, 'crash*')) + fl = glob(os.path.join(crashdir, "crash*")) assert len(fl) == 1 # Now test node overwrite - crashdir2 = os.path.join(os.getcwd(), 'crashdir2') + crashdir2 = os.path.join(os.getcwd(), "crashdir2") os.mkdir(crashdir2) - crashdir3 = os.path.join(os.getcwd(), 'crashdir3') + crashdir3 = os.path.join(os.getcwd(), "crashdir3") os.mkdir(crashdir3) wf.config = {"execution": {"crashdump_dir": crashdir3}} n1.config = {"execution": {"crashdump_dir": crashdir2}} @@ -269,7 +270,7 @@ def test_config_setting(tmpdir): except RuntimeError: pass - fl = glob(os.path.join(crashdir2, 'crash*')) + fl = glob(os.path.join(crashdir2, "crash*")) assert len(fl) == 1 - fl = glob(os.path.join(crashdir3, 'crash*')) + fl = glob(os.path.join(crashdir3, "crash*")) assert len(fl) == 0 diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 195ebc6f69..d1fde0ba32 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -18,19 +18,26 @@ from ... import config, logging from ...utils.misc import str2bool -from ...utils.functions import (getsource, create_function_from_source) +from ...utils.functions import getsource, create_function_from_source -from ...interfaces.base import (traits, TraitedSpec, TraitDictObject, - TraitListObject) +from ...interfaces.base import traits, TraitedSpec, TraitDictObject, TraitListObject from ...utils.filemanip import save_json -from .utils import (generate_expanded_graph, export_graph, write_workflow_prov, - write_workflow_resources, format_dot, topological_sort, - get_print_name, merge_dict, format_node) +from .utils import ( + generate_expanded_graph, + export_graph, + write_workflow_prov, + write_workflow_resources, + format_dot, + topological_sort, + get_print_name, + merge_dict, + format_node, +) from .base import EngineBase from .nodes import MapNode -logger = logging.getLogger('nipype.workflow') +logger = logging.getLogger("nipype.workflow") class Workflow(EngineBase): @@ -48,6 +55,7 @@ def __init__(self, name, base_dir=None): """ import networkx as nx + super(Workflow, self).__init__(name, base_dir) self._graph = nx.DiGraph() @@ -120,12 +128,14 @@ def connect(self, *args, **kwargs): elif len(args) == 4: connection_list = [(args[0], args[2], [(args[1], args[3])])] else: - raise TypeError('connect() takes either 4 arguments, or 1 list of' - ' connection tuples (%d args given)' % len(args)) + raise TypeError( + "connect() takes either 4 arguments, or 1 list of" + " connection tuples (%d args given)" % len(args) + ) disconnect = False if kwargs: - disconnect = kwargs.get('disconnect', False) + disconnect = kwargs.get("disconnect", False) if disconnect: self.disconnect(connection_list) @@ -134,9 +144,10 @@ def connect(self, *args, **kwargs): newnodes = [] for srcnode, destnode, _ in connection_list: if self in [srcnode, destnode]: - msg = ('Workflow connect cannot contain itself as node:' - ' src[%s] dest[%s] workflow[%s]') % (srcnode, destnode, - self.name) + msg = ( + "Workflow connect cannot contain itself as node:" + " src[%s] dest[%s] workflow[%s]" + ) % (srcnode, destnode, self.name) raise IOError(msg) if (srcnode not in newnodes) and not self._has_node(srcnode): @@ -158,7 +169,7 @@ def connect(self, *args, **kwargs): if not disconnect and (destnode in self._graph.nodes()): for edge in self._graph.in_edges(destnode): data = self._graph.get_edge_data(*edge) - for sourceinfo, destname in data['connect']: + for sourceinfo, destname in data["connect"]: if destname not in connected_ports[destnode]: connected_ports[destnode] += [destname] for source, dest in connects: @@ -166,22 +177,39 @@ def connect(self, *args, **kwargs): # determine their inputs/outputs depending on # connection settings. Skip these modules in the check if dest in connected_ports[destnode]: - raise Exception("""\ + raise Exception( + """\ Trying to connect %s:%s to %s:%s but input '%s' of node '%s' is already connected. -""" % (srcnode, source, destnode, dest, dest, destnode)) - if not (hasattr(destnode, '_interface') and - ('.io' in str(destnode._interface.__class__) or any([ - '.io' in str(val) - for val in destnode._interface.__class__.__bases__ - ]))): +""" + % (srcnode, source, destnode, dest, dest, destnode) + ) + if not ( + hasattr(destnode, "_interface") + and ( + ".io" in str(destnode._interface.__class__) + or any( + [ + ".io" in str(val) + for val in destnode._interface.__class__.__bases__ + ] + ) + ) + ): if not destnode._check_inputs(dest): - not_found.append(['in', destnode.name, dest]) - if not (hasattr(srcnode, '_interface') and - ('.io' in str(srcnode._interface.__class__) or any([ - '.io' in str(val) - for val in srcnode._interface.__class__.__bases__ - ]))): + not_found.append(["in", destnode.name, dest]) + if not ( + hasattr(srcnode, "_interface") + and ( + ".io" in str(srcnode._interface.__class__) + or any( + [ + ".io" in str(val) + for val in srcnode._interface.__class__.__bases__ + ] + ) + ) + ): if isinstance(source, tuple): # handles the case that source is specified # with a function @@ -190,26 +218,27 @@ def connect(self, *args, **kwargs): sourcename = source else: raise Exception( - ('Unknown source specification in ' - 'connection from output of %s') % srcnode.name) + ( + "Unknown source specification in " + "connection from output of %s" + ) + % srcnode.name + ) if sourcename and not srcnode._check_outputs(sourcename): - not_found.append(['out', srcnode.name, sourcename]) + not_found.append(["out", srcnode.name, sourcename]) connected_ports[destnode] += [dest] infostr = [] for info in not_found: infostr += [ - "Module %s has no %sput called %s\n" % (info[1], info[0], - info[2]) + "Module %s has no %sput called %s\n" % (info[1], info[0], info[2]) ] if not_found: - raise Exception( - '\n'.join(['Some connections were not found'] + infostr)) + raise Exception("\n".join(["Some connections were not found"] + infostr)) # turn functions into strings for srcnode, destnode, connects in connection_list: for idx, (src, dest) in enumerate(connects): - if isinstance(src, - tuple) and not isinstance(src[1], (str, bytes)): + if isinstance(src, tuple) and not isinstance(src[1], (str, bytes)): function_source = getsource(src[1]) connects[idx] = ((src[0], function_source, src[2:]), dest) @@ -217,30 +246,28 @@ def connect(self, *args, **kwargs): for srcnode, destnode, connects in connection_list: edge_data = self._graph.get_edge_data(srcnode, destnode, None) if edge_data: - logger.debug('(%s, %s): Edge data exists: %s', srcnode, - destnode, str(edge_data)) + logger.debug( + "(%s, %s): Edge data exists: %s", srcnode, destnode, str(edge_data) + ) for data in connects: - if data not in edge_data['connect']: - edge_data['connect'].append(data) + if data not in edge_data["connect"]: + edge_data["connect"].append(data) if disconnect: - logger.debug('Removing connection: %s', str(data)) - edge_data['connect'].remove(data) - if edge_data['connect']: - self._graph.add_edges_from([(srcnode, destnode, - edge_data)]) + logger.debug("Removing connection: %s", str(data)) + edge_data["connect"].remove(data) + if edge_data["connect"]: + self._graph.add_edges_from([(srcnode, destnode, edge_data)]) else: # pass - logger.debug('Removing connection: %s->%s', srcnode, - destnode) + logger.debug("Removing connection: %s->%s", srcnode, destnode) self._graph.remove_edges_from([(srcnode, destnode)]) elif not disconnect: - logger.debug('(%s, %s): No edge data', srcnode, destnode) - self._graph.add_edges_from([(srcnode, destnode, { - 'connect': connects - })]) + logger.debug("(%s, %s): No edge data", srcnode, destnode) + self._graph.add_edges_from([(srcnode, destnode, {"connect": connects})]) edge_data = self._graph.get_edge_data(srcnode, destnode) - logger.debug('(%s, %s): new edge data: %s', srcnode, destnode, - str(edge_data)) + logger.debug( + "(%s, %s): new edge data: %s", srcnode, destnode, str(edge_data) + ) def disconnect(self, *args): """Disconnect nodes @@ -251,25 +278,25 @@ def disconnect(self, *args): elif len(args) == 4: connection_list = [(args[0], args[2], [(args[1], args[3])])] else: - raise TypeError('disconnect() takes either 4 arguments, or 1 list ' - 'of connection tuples (%d args given)' % len(args)) + raise TypeError( + "disconnect() takes either 4 arguments, or 1 list " + "of connection tuples (%d args given)" % len(args) + ) for srcnode, dstnode, conn in connection_list: - logger.debug('disconnect(): %s->%s %s', srcnode, dstnode, - str(conn)) + logger.debug("disconnect(): %s->%s %s", srcnode, dstnode, str(conn)) if self in [srcnode, dstnode]: raise IOError( - 'Workflow connect cannot contain itself as node: src[%s] ' - 'dest[%s] workflow[%s]') % (srcnode, dstnode, self.name) + "Workflow connect cannot contain itself as node: src[%s] " + "dest[%s] workflow[%s]" + ) % (srcnode, dstnode, self.name) # If node is not in the graph, not connected if not self._has_node(srcnode) or not self._has_node(dstnode): continue - edge_data = self._graph.get_edge_data(srcnode, dstnode, { - 'connect': [] - }) - ed_conns = [(c[0], c[1]) for c in edge_data['connect']] + edge_data = self._graph.get_edge_data(srcnode, dstnode, {"connect": []}) + ed_conns = [(c[0], c[1]) for c in edge_data["connect"]] remove = [] for edge in conn: @@ -277,12 +304,12 @@ def disconnect(self, *args): # idx = ed_conns.index(edge) remove.append((edge[0], edge[1])) - logger.debug('disconnect(): remove list %s', str(remove)) + logger.debug("disconnect(): remove list %s", str(remove)) for el in remove: - edge_data['connect'].remove(el) - logger.debug('disconnect(): removed connection %s', str(el)) + edge_data["connect"].remove(el) + logger.debug("disconnect(): removed connection %s", str(el)) - if not edge_data['connect']: + if not edge_data["connect"]: self._graph.remove_edge(srcnode, dstnode) else: self._graph.add_edges_from([(srcnode, dstnode, edge_data)]) @@ -299,20 +326,21 @@ def add_nodes(self, nodes): all_nodes = self._get_all_nodes() for node in nodes: if self._has_node(node): - raise IOError('Node %s already exists in the workflow' % node) + raise IOError("Node %s already exists in the workflow" % node) if isinstance(node, Workflow): for subnode in node._get_all_nodes(): if subnode in all_nodes: - raise IOError(('Subnode %s of node %s already exists ' - 'in the workflow') % (subnode, node)) + raise IOError( + ("Subnode %s of node %s already exists " "in the workflow") + % (subnode, node) + ) newnodes.append(node) if not newnodes: - logger.debug('no new nodes to add') + logger.debug("no new nodes to add") return for node in newnodes: if not issubclass(node.__class__, EngineBase): - raise Exception('Node %s must be a subclass of EngineBase', - node) + raise Exception("Node %s must be a subclass of EngineBase", node) self._check_nodes(newnodes) for node in newnodes: if node._hierarchy is None: @@ -341,16 +369,15 @@ def outputs(self): def get_node(self, name): """Return an internal node by name """ - nodenames = name.split('.') + nodenames = name.split(".") nodename = nodenames[0] outnode = [ - node for node in self._graph.nodes() - if str(node).endswith('.' + nodename) + node for node in self._graph.nodes() if str(node).endswith("." + nodename) ] if outnode: outnode = outnode[0] if nodenames[1:] and issubclass(outnode.__class__, Workflow): - outnode = outnode.get_node('.'.join(nodenames[1:])) + outnode = outnode.get_node(".".join(nodenames[1:])) else: outnode = None return outnode @@ -359,22 +386,27 @@ def list_node_names(self): """List names of all nodes in a workflow """ import networkx as nx + outlist = [] for node in nx.topological_sort(self._graph): if isinstance(node, Workflow): - outlist.extend([ - '.'.join((node.name, nodename)) - for nodename in node.list_node_names() - ]) + outlist.extend( + [ + ".".join((node.name, nodename)) + for nodename in node.list_node_names() + ] + ) else: outlist.append(node.name) return sorted(outlist) - def write_graph(self, - dotfilename='graph.dot', - graph2use='hierarchical', - format="png", - simple_form=True): + def write_graph( + self, + dotfilename="graph.dot", + graph2use="hierarchical", + format="png", + simple_form=True, + ): """Generates a graphviz dot file and a png file Parameters @@ -398,12 +430,13 @@ def write_graph(self, False. """ - graphtypes = ['orig', 'flat', 'hierarchical', 'exec', 'colored'] + graphtypes = ["orig", "flat", "hierarchical", "exec", "colored"] if graph2use not in graphtypes: - raise ValueError('Unknown graph2use keyword. Must be one of: ' + - str(graphtypes)) + raise ValueError( + "Unknown graph2use keyword. Must be one of: " + str(graphtypes) + ) base_dir, dotfilename = op.split(dotfilename) - if base_dir == '': + if base_dir == "": if self.base_dir: base_dir = self.base_dir if self.name: @@ -411,56 +444,58 @@ def write_graph(self, else: base_dir = os.getcwd() os.makedirs(base_dir, exist_ok=True) - if graph2use in ['hierarchical', 'colored']: + if graph2use in ["hierarchical", "colored"]: if self.name[:1].isdigit(): # these graphs break if int - raise ValueError('{} graph failed, workflow name cannot begin ' - 'with a number'.format(graph2use)) + raise ValueError( + "{} graph failed, workflow name cannot begin " + "with a number".format(graph2use) + ) dotfilename = op.join(base_dir, dotfilename) self.write_hierarchical_dotfile( dotfilename=dotfilename, colored=graph2use == "colored", - simple_form=simple_form) + simple_form=simple_form, + ) outfname = format_dot(dotfilename, format=format) else: graph = self._graph - if graph2use in ['flat', 'exec']: + if graph2use in ["flat", "exec"]: graph = self._create_flat_graph() - if graph2use == 'exec': + if graph2use == "exec": graph = generate_expanded_graph(deepcopy(graph)) outfname = export_graph( graph, base_dir, dotfilename=dotfilename, format=format, - simple_form=simple_form) + simple_form=simple_form, + ) logger.info( - 'Generated workflow graph: %s (graph2use=%s, simple_form=%s).' % - (outfname, graph2use, simple_form)) + "Generated workflow graph: %s (graph2use=%s, simple_form=%s)." + % (outfname, graph2use, simple_form) + ) return outfname - def write_hierarchical_dotfile(self, - dotfilename=None, - colored=False, - simple_form=True): - dotlist = ['digraph %s{' % self.name] + def write_hierarchical_dotfile( + self, dotfilename=None, colored=False, simple_form=True + ): + dotlist = ["digraph %s{" % self.name] dotlist.append( - self._get_dot( - prefix=' ', colored=colored, simple_form=simple_form)) - dotlist.append('}') - dotstr = '\n'.join(dotlist) + self._get_dot(prefix=" ", colored=colored, simple_form=simple_form) + ) + dotlist.append("}") + dotstr = "\n".join(dotlist) if dotfilename: - fp = open(dotfilename, 'wt') + fp = open(dotfilename, "wt") fp.writelines(dotstr) fp.close() else: logger.info(dotstr) - def export(self, - filename=None, - prefix="output", - format="python", - include_config=False): + def export( + self, filename=None, prefix="output", format="python", include_config=False + ): """Export object into a different format Parameters @@ -476,41 +511,39 @@ def export(self, """ import networkx as nx + formats = ["python"] if format not in formats: - raise ValueError('format must be one of: %s' % '|'.join(formats)) + raise ValueError("format must be one of: %s" % "|".join(formats)) flatgraph = self._create_flat_graph() nodes = nx.topological_sort(flatgraph) all_lines = None - lines = ['# Workflow'] - importlines = [ - 'from nipype.pipeline.engine import Workflow, ' - 'Node, MapNode' - ] + lines = ["# Workflow"] + importlines = ["from nipype.pipeline.engine import Workflow, " "Node, MapNode"] functions = {} if format == "python": connect_template = '%s.connect(%%s, %%s, %%s, "%%s")' % self.name - connect_template2 = '%s.connect(%%s, "%%s", %%s, "%%s")' \ - % self.name + connect_template2 = '%s.connect(%%s, "%%s", %%s, "%%s")' % self.name wfdef = '%s = Workflow("%s")' % (self.name, self.name) lines.append(wfdef) if include_config: - lines.append('%s.config = %s' % (self.name, self.config)) + lines.append("%s.config = %s" % (self.name, self.config)) for idx, node in enumerate(nodes): - nodename = node.fullname.replace('.', '_') + nodename = node.fullname.replace(".", "_") # write nodes nodelines = format_node( - node, format='python', include_config=include_config) + node, format="python", include_config=include_config + ) for line in nodelines: - if line.startswith('from'): + if line.startswith("from"): if line not in importlines: importlines.append(line) else: lines.append(line) # write connections for u, _, d in flatgraph.in_edges(nbunch=node, data=True): - for cd in d['connect']: + for cd in d["connect"]: if isinstance(cd[0], tuple): args = list(cd[0]) if args[1] in functions: @@ -518,30 +551,39 @@ def export(self, else: func = create_function_from_source(args[1]) funcname = [ - name for name in func.__globals__ - if name != '__builtins__' + name + for name in func.__globals__ + if name != "__builtins__" ][0] functions[args[1]] = funcname args[1] = funcname args = tuple([arg for arg in args if arg]) - line_args = (u.fullname.replace('.', '_'), args, - nodename, cd[1]) + line_args = ( + u.fullname.replace(".", "_"), + args, + nodename, + cd[1], + ) line = connect_template % line_args line = line.replace("'%s'" % funcname, funcname) lines.append(line) else: - line_args = (u.fullname.replace('.', '_'), cd[0], - nodename, cd[1]) + line_args = ( + u.fullname.replace(".", "_"), + cd[0], + nodename, + cd[1], + ) lines.append(connect_template2 % line_args) - functionlines = ['# Functions'] + functionlines = ["# Functions"] for function in functions: functionlines.append(pickle.loads(function).rstrip()) all_lines = importlines + functionlines + lines if not filename: - filename = '%s%s.py' % (prefix, self.name) - with open(filename, 'wt') as fp: - fp.writelines('\n'.join(all_lines)) + filename = "%s%s.py" % (prefix, self.name) + with open(filename, "wt") as fp: + fp.writelines("\n".join(all_lines)) return all_lines def run(self, plugin=None, plugin_args=None, updatehash=False): @@ -557,26 +599,25 @@ def run(self, plugin=None, plugin_args=None, updatehash=False): constructor. see individual plugin doc strings for details. """ if plugin is None: - plugin = config.get('execution', 'plugin') + plugin = config.get("execution", "plugin") if not isinstance(plugin, (str, bytes)): runner = plugin - plugin = runner.__class__.__name__[:-len('Plugin')] + plugin = runner.__class__.__name__[: -len("Plugin")] plugin_args = runner.plugin_args else: - name = '.'.join(__name__.split('.')[:-2] + ['plugins']) + name = ".".join(__name__.split(".")[:-2] + ["plugins"]) try: __import__(name) except ImportError: - msg = 'Could not import plugin module: %s' % name + msg = "Could not import plugin module: %s" % name logger.error(msg) raise ImportError(msg) else: - plugin_mod = getattr(sys.modules[name], '%sPlugin' % plugin) + plugin_mod = getattr(sys.modules[name], "%sPlugin" % plugin) runner = plugin_mod(plugin_args=plugin_args) flatgraph = self._create_flat_graph() self.config = merge_dict(deepcopy(config._sections), self.config) - logger.info('Workflow %s settings: %s', self.name, - str(sorted(self.config))) + logger.info("Workflow %s settings: %s", self.name, str(sorted(self.config))) self._set_needed_outputs(flatgraph) execgraph = generate_expanded_graph(deepcopy(flatgraph)) for index, node in enumerate(execgraph.nodes()): @@ -586,21 +627,21 @@ def run(self, plugin=None, plugin_args=None, updatehash=False): if isinstance(node, MapNode): node.use_plugin = (plugin, plugin_args) self._configure_exec_nodes(execgraph) - if str2bool(self.config['execution']['create_report']): + if str2bool(self.config["execution"]["create_report"]): self._write_report_info(self.base_dir, self.name, execgraph) runner.run(execgraph, updatehash=updatehash, config=self.config) - datestr = datetime.utcnow().strftime('%Y%m%dT%H%M%S') - if str2bool(self.config['execution']['write_provenance']): - prov_base = op.join(self.base_dir, - 'workflow_provenance_%s' % datestr) - logger.info('Provenance file prefix: %s' % prov_base) - write_workflow_prov(execgraph, prov_base, format='all') + datestr = datetime.utcnow().strftime("%Y%m%dT%H%M%S") + if str2bool(self.config["execution"]["write_provenance"]): + prov_base = op.join(self.base_dir, "workflow_provenance_%s" % datestr) + logger.info("Provenance file prefix: %s" % prov_base) + write_workflow_prov(execgraph, prov_base, format="all") if config.resource_monitor: base_dir = self.base_dir or os.getcwd() write_workflow_resources( execgraph, - filename=op.join(base_dir, self.name, 'resource_monitor.json')) + filename=op.join(base_dir, self.name, "resource_monitor.json"), + ) return execgraph # PRIVATE API AND FUNCTIONS @@ -611,50 +652,58 @@ def _write_report_info(self, workingdir, name, graph): report_dir = op.join(workingdir, name) os.makedirs(report_dir, exist_ok=True) shutil.copyfile( - op.join(op.dirname(__file__), 'report_template.html'), - op.join(report_dir, 'index.html')) + op.join(op.dirname(__file__), "report_template.html"), + op.join(report_dir, "index.html"), + ) shutil.copyfile( - op.join(op.dirname(__file__), '..', '..', 'external', 'd3.js'), - op.join(report_dir, 'd3.js')) + op.join(op.dirname(__file__), "..", "..", "external", "d3.js"), + op.join(report_dir, "d3.js"), + ) nodes, groups = topological_sort(graph, depth_first=True) - graph_file = op.join(report_dir, 'graph1.json') - json_dict = {'nodes': [], 'links': [], 'groups': [], 'maxN': 0} + graph_file = op.join(report_dir, "graph1.json") + json_dict = {"nodes": [], "links": [], "groups": [], "maxN": 0} for i, node in enumerate(nodes): - report_file = "%s/_report/report.rst" % \ - node.output_dir().replace(report_dir, '') - result_file = "%s/result_%s.pklz" % \ - (node.output_dir().replace(report_dir, ''), - node.name) - json_dict['nodes'].append( + report_file = "%s/_report/report.rst" % node.output_dir().replace( + report_dir, "" + ) + result_file = "%s/result_%s.pklz" % ( + node.output_dir().replace(report_dir, ""), + node.name, + ) + json_dict["nodes"].append( dict( - name='%d_%s' % (i, node.name), + name="%d_%s" % (i, node.name), report=report_file, result=result_file, - group=groups[i])) + group=groups[i], + ) + ) maxN = 0 for gid in np.unique(groups): procs = [i for i, val in enumerate(groups) if val == gid] N = len(procs) if N > maxN: maxN = N - json_dict['groups'].append( - dict(procs=procs, total=N, name='Group_%05d' % gid)) - json_dict['maxN'] = maxN + json_dict["groups"].append( + dict(procs=procs, total=N, name="Group_%05d" % gid) + ) + json_dict["maxN"] = maxN for u, v in graph.in_edges(): - json_dict['links'].append( - dict(source=nodes.index(u), target=nodes.index(v), value=1)) + json_dict["links"].append( + dict(source=nodes.index(u), target=nodes.index(v), value=1) + ) save_json(graph_file, json_dict) - graph_file = op.join(report_dir, 'graph.json') + graph_file = op.join(report_dir, "graph.json") # Avoid RuntimeWarning: divide by zero encountered in log10 num_nodes = len(nodes) if num_nodes > 0: index_name = np.ceil(np.log10(num_nodes)).astype(int) else: index_name = 0 - template = '%%0%dd_' % index_name + template = "%%0%dd_" % index_name def getname(u, i): - name_parts = u.fullname.split('.') + name_parts = u.fullname.split(".") # return '.'.join(name_parts[:-1] + [template % i + name_parts[-1]]) return template % i + name_parts[-1] @@ -664,16 +713,13 @@ def getname(u, i): for u, v in graph.in_edges(nbunch=node): imports.append(getname(u, nodes.index(u))) json_dict.append( - dict( - name=getname(node, i), - size=1, - group=groups[i], - imports=imports)) + dict(name=getname(node, i), size=1, group=groups[i], imports=imports) + ) save_json(graph_file, json_dict) def _set_needed_outputs(self, graph): """Initialize node with list of which outputs are needed.""" - rm_outputs = self.config['execution']['remove_unnecessary_outputs'] + rm_outputs = self.config["execution"]["remove_unnecessary_outputs"] if not str2bool(rm_outputs): return for node in graph.nodes(): @@ -681,8 +727,7 @@ def _set_needed_outputs(self, graph): for edge in graph.out_edges(node): data = graph.get_edge_data(*edge) sourceinfo = [ - v1[0] if isinstance(v1, tuple) else v1 - for v1, v2 in data['connect'] + v1[0] if isinstance(v1, tuple) else v1 for v1, v2 in data["connect"] ] node.needed_outputs += [ v for v in sourceinfo if v not in node.needed_outputs @@ -697,11 +742,11 @@ def _configure_exec_nodes(self, graph): node.input_source = {} for edge in graph.in_edges(node): data = graph.get_edge_data(*edge) - for sourceinfo, field in data['connect']: - node.input_source[field] = \ - (op.join(edge[0].output_dir(), - 'result_%s.pklz' % edge[0].name), - sourceinfo) + for sourceinfo, field in data["connect"]: + node.input_source[field] = ( + op.join(edge[0].output_dir(), "result_%s.pklz" % edge[0].name), + sourceinfo, + ) def _check_nodes(self, nodes): """Checks if any of the nodes are already in the graph @@ -715,23 +760,21 @@ def _check_nodes(self, nodes): try: this_node_lineage = node_lineage[idx] except IndexError: - raise IOError( - 'Duplicate node name "%s" found.' % node.name) + raise IOError('Duplicate node name "%s" found.' % node.name) else: if this_node_lineage in [node._hierarchy, self.name]: - raise IOError( - 'Duplicate node name "%s" found.' % node.name) + raise IOError('Duplicate node name "%s" found.' % node.name) else: node_names.append(node.name) - def _has_attr(self, parameter, subtype='in'): + def _has_attr(self, parameter, subtype="in"): """Checks if a parameter is available as an input or output """ - if subtype == 'in': + if subtype == "in": subobject = self.inputs else: subobject = self.outputs - attrlist = parameter.split('.') + attrlist = parameter.split(".") cur_out = subobject for attr in attrlist: if not hasattr(cur_out, attr): @@ -739,25 +782,25 @@ def _has_attr(self, parameter, subtype='in'): cur_out = getattr(cur_out, attr) return True - def _get_parameter_node(self, parameter, subtype='in'): + def _get_parameter_node(self, parameter, subtype="in"): """Returns the underlying node corresponding to an input or output parameter """ - if subtype == 'in': + if subtype == "in": subobject = self.inputs else: subobject = self.outputs - attrlist = parameter.split('.') + attrlist = parameter.split(".") cur_out = subobject for attr in attrlist[:-1]: cur_out = getattr(cur_out, attr) return cur_out.traits()[attrlist[-1]].node def _check_outputs(self, parameter): - return self._has_attr(parameter, subtype='out') + return self._has_attr(parameter, subtype="out") def _check_inputs(self, parameter): - return self._has_attr(parameter, subtype='in') + return self._has_attr(parameter, subtype="in") def _get_inputs(self): """Returns the inputs of a workflow @@ -773,14 +816,12 @@ def _get_inputs(self): else: taken_inputs = [] for _, _, d in self._graph.in_edges(nbunch=node, data=True): - for cd in d['connect']: + for cd in d["connect"]: taken_inputs.append(cd[1]) unconnectedinputs = TraitedSpec() for key, trait in list(node.inputs.items()): if key not in taken_inputs: - unconnectedinputs.add_trait(key, - traits.Trait( - trait, node=node)) + unconnectedinputs.add_trait(key, traits.Trait(trait, node=node)) value = getattr(node.inputs, key) setattr(unconnectedinputs, key, value) setattr(inputdict, node.name, unconnectedinputs) @@ -814,14 +855,13 @@ def _set_node_input(self, node, param, source, sourceinfo): val = source.get_output(sourceinfo) elif isinstance(sourceinfo, tuple): if callable(sourceinfo[1]): - val = sourceinfo[1](source.get_output(sourceinfo[0]), - *sourceinfo[2:]) + val = sourceinfo[1](source.get_output(sourceinfo[0]), *sourceinfo[2:]) newval = val if isinstance(val, TraitDictObject): newval = dict(val) if isinstance(val, TraitListObject): newval = val[:] - logger.debug('setting node input: %s->%s', param, str(newval)) + logger.debug("setting node input: %s->%s", param, str(newval)) node.set_input(param, deepcopy(newval)) def _get_all_nodes(self): @@ -844,7 +884,7 @@ def _has_node(self, wanted_node): def _create_flat_graph(self): """Make a simple DAG where no node is a workflow.""" - logger.debug('Creating flat graph for workflow: %s', self.name) + logger.debug("Creating flat graph for workflow: %s", self.name) workflowcopy = deepcopy(self) workflowcopy._generate_flatgraph() return workflowcopy._graph @@ -856,8 +896,7 @@ def _reset_hierarchy(self): if isinstance(node, Workflow): node._reset_hierarchy() for innernode in node._graph.nodes(): - innernode._hierarchy = '.'.join((self.name, - innernode._hierarchy)) + innernode._hierarchy = ".".join((self.name, innernode._hierarchy)) else: node._hierarchy = self.name @@ -865,179 +904,184 @@ def _generate_flatgraph(self): """Generate a graph containing only Nodes or MapNodes """ import networkx as nx - logger.debug('expanding workflow: %s', self) + + logger.debug("expanding workflow: %s", self) nodes2remove = [] if not nx.is_directed_acyclic_graph(self._graph): - raise Exception(('Workflow: %s is not a directed acyclic graph ' - '(DAG)') % self.name) + raise Exception( + ("Workflow: %s is not a directed acyclic graph " "(DAG)") % self.name + ) nodes = list(nx.topological_sort(self._graph)) for node in nodes: - logger.debug('processing node: %s', node) + logger.debug("processing node: %s", node) if isinstance(node, Workflow): nodes2remove.append(node) # use in_edges instead of in_edges_iter to allow # disconnections to take place properly. otherwise, the # edge dict is modified. # dj: added list() for networkx ver.2 - for u, _, d in list( - self._graph.in_edges(nbunch=node, data=True)): - logger.debug('in: connections-> %s', str(d['connect'])) - for cd in deepcopy(d['connect']): + for u, _, d in list(self._graph.in_edges(nbunch=node, data=True)): + logger.debug("in: connections-> %s", str(d["connect"])) + for cd in deepcopy(d["connect"]): logger.debug("in: %s", str(cd)) - dstnode = node._get_parameter_node(cd[1], subtype='in') + dstnode = node._get_parameter_node(cd[1], subtype="in") srcnode = u srcout = cd[0] - dstin = cd[1].split('.')[-1] - logger.debug('in edges: %s %s %s %s', srcnode, srcout, - dstnode, dstin) + dstin = cd[1].split(".")[-1] + logger.debug( + "in edges: %s %s %s %s", srcnode, srcout, dstnode, dstin + ) self.disconnect(u, cd[0], node, cd[1]) self.connect(srcnode, srcout, dstnode, dstin) # do not use out_edges_iter for reasons stated in in_edges # dj: for ver 2 use list(out_edges) - for _, v, d in list( - self._graph.out_edges(nbunch=node, data=True)): - logger.debug('out: connections-> %s', str(d['connect'])) - for cd in deepcopy(d['connect']): + for _, v, d in list(self._graph.out_edges(nbunch=node, data=True)): + logger.debug("out: connections-> %s", str(d["connect"])) + for cd in deepcopy(d["connect"]): logger.debug("out: %s", str(cd)) dstnode = v if isinstance(cd[0], tuple): parameter = cd[0][0] else: parameter = cd[0] - srcnode = node._get_parameter_node( - parameter, subtype='out') + srcnode = node._get_parameter_node(parameter, subtype="out") if isinstance(cd[0], tuple): srcout = list(cd[0]) - srcout[0] = parameter.split('.')[-1] + srcout[0] = parameter.split(".")[-1] srcout = tuple(srcout) else: - srcout = parameter.split('.')[-1] + srcout = parameter.split(".")[-1] dstin = cd[1] - logger.debug('out edges: %s %s %s %s', srcnode, srcout, - dstnode, dstin) + logger.debug( + "out edges: %s %s %s %s", srcnode, srcout, dstnode, dstin + ) self.disconnect(node, cd[0], v, cd[1]) self.connect(srcnode, srcout, dstnode, dstin) # expand the workflow node # logger.debug('expanding workflow: %s', node) node._generate_flatgraph() for innernode in node._graph.nodes(): - innernode._hierarchy = '.'.join((self.name, - innernode._hierarchy)) + innernode._hierarchy = ".".join((self.name, innernode._hierarchy)) self._graph.add_nodes_from(node._graph.nodes()) self._graph.add_edges_from(node._graph.edges(data=True)) if nodes2remove: self._graph.remove_nodes_from(nodes2remove) - logger.debug('finished expanding workflow: %s', self) - - def _get_dot(self, - prefix=None, - hierarchy=None, - colored=False, - simple_form=True, - level=0): + logger.debug("finished expanding workflow: %s", self) + + def _get_dot( + self, prefix=None, hierarchy=None, colored=False, simple_form=True, level=0 + ): """Create a dot file with connection info """ import networkx as nx + if prefix is None: - prefix = ' ' + prefix = " " if hierarchy is None: hierarchy = [] colorset = [ - '#FFFFC8', # Y - '#0000FF', - '#B4B4FF', - '#E6E6FF', # B - '#FF0000', - '#FFB4B4', - '#FFE6E6', # R - '#00A300', - '#B4FFB4', - '#E6FFE6', # G - '#0000FF', - '#B4B4FF' + "#FFFFC8", # Y + "#0000FF", + "#B4B4FF", + "#E6E6FF", # B + "#FF0000", + "#FFB4B4", + "#FFE6E6", # R + "#00A300", + "#B4FFB4", + "#E6FFE6", # G + "#0000FF", + "#B4B4FF", ] # loop B if level > len(colorset) - 2: level = 3 # Loop back to blue dotlist = ['%slabel="%s";' % (prefix, self.name)] for node in nx.topological_sort(self._graph): - fullname = '.'.join(hierarchy + [node.fullname]) - nodename = fullname.replace('.', '_') + fullname = ".".join(hierarchy + [node.fullname]) + nodename = fullname.replace(".", "_") if not isinstance(node, Workflow): node_class_name = get_print_name(node, simple_form=simple_form) if not simple_form: - node_class_name = '.'.join(node_class_name.split('.')[1:]) - if hasattr(node, 'iterables') and node.iterables: - dotlist.append(('%s[label="%s", shape=box3d,' - 'style=filled, color=black, colorscheme' - '=greys7 fillcolor=2];') % - (nodename, node_class_name)) + node_class_name = ".".join(node_class_name.split(".")[1:]) + if hasattr(node, "iterables") and node.iterables: + dotlist.append( + ( + '%s[label="%s", shape=box3d,' + "style=filled, color=black, colorscheme" + "=greys7 fillcolor=2];" + ) + % (nodename, node_class_name) + ) else: if colored: dotlist.append( - ('%s[label="%s", style=filled,' - ' fillcolor="%s"];') % (nodename, node_class_name, - colorset[level])) + ('%s[label="%s", style=filled,' ' fillcolor="%s"];') + % (nodename, node_class_name, colorset[level]) + ) else: - dotlist.append(('%s[label="%s"];') % (nodename, - node_class_name)) + dotlist.append( + ('%s[label="%s"];') % (nodename, node_class_name) + ) for node in nx.topological_sort(self._graph): if isinstance(node, Workflow): - fullname = '.'.join(hierarchy + [node.fullname]) - nodename = fullname.replace('.', '_') - dotlist.append('subgraph cluster_%s {' % nodename) + fullname = ".".join(hierarchy + [node.fullname]) + nodename = fullname.replace(".", "_") + dotlist.append("subgraph cluster_%s {" % nodename) if colored: - dotlist.append(prefix + prefix + 'edge [color="%s"];' % - (colorset[level + 1])) - dotlist.append(prefix + prefix + 'style=filled;') - dotlist.append(prefix + prefix + 'fillcolor="%s";' % - (colorset[level + 2])) + dotlist.append( + prefix + prefix + 'edge [color="%s"];' % (colorset[level + 1]) + ) + dotlist.append(prefix + prefix + "style=filled;") + dotlist.append( + prefix + prefix + 'fillcolor="%s";' % (colorset[level + 2]) + ) dotlist.append( node._get_dot( prefix=prefix + prefix, hierarchy=hierarchy + [self.name], colored=colored, simple_form=simple_form, - level=level + 3)) - dotlist.append('}') + level=level + 3, + ) + ) + dotlist.append("}") else: for subnode in self._graph.successors(node): if node._hierarchy != subnode._hierarchy: continue if not isinstance(subnode, Workflow): - nodefullname = '.'.join(hierarchy + [node.fullname]) - subnodefullname = '.'.join( - hierarchy + [subnode.fullname]) - nodename = nodefullname.replace('.', '_') - subnodename = subnodefullname.replace('.', '_') - for _ in self._graph.get_edge_data(node, - subnode)['connect']: - dotlist.append('%s -> %s;' % (nodename, - subnodename)) - logger.debug('connection: %s', dotlist[-1]) + nodefullname = ".".join(hierarchy + [node.fullname]) + subnodefullname = ".".join(hierarchy + [subnode.fullname]) + nodename = nodefullname.replace(".", "_") + subnodename = subnodefullname.replace(".", "_") + for _ in self._graph.get_edge_data(node, subnode)["connect"]: + dotlist.append("%s -> %s;" % (nodename, subnodename)) + logger.debug("connection: %s", dotlist[-1]) # add between workflow connections for u, v, d in self._graph.edges(data=True): - uname = '.'.join(hierarchy + [u.fullname]) - vname = '.'.join(hierarchy + [v.fullname]) - for src, dest in d['connect']: + uname = ".".join(hierarchy + [u.fullname]) + vname = ".".join(hierarchy + [v.fullname]) + for src, dest in d["connect"]: uname1 = uname vname1 = vname if isinstance(src, tuple): srcname = src[0] else: srcname = src - if '.' in srcname: - uname1 += '.' + '.'.join(srcname.split('.')[:-1]) - if '.' in dest and '@' not in dest: + if "." in srcname: + uname1 += "." + ".".join(srcname.split(".")[:-1]) + if "." in dest and "@" not in dest: if not isinstance(v, Workflow): - if 'datasink' not in \ - str(v._interface.__class__).lower(): - vname1 += '.' + '.'.join(dest.split('.')[:-1]) + if "datasink" not in str(v._interface.__class__).lower(): + vname1 += "." + ".".join(dest.split(".")[:-1]) else: - vname1 += '.' + '.'.join(dest.split('.')[:-1]) - if uname1.split('.')[:-1] != vname1.split('.')[:-1]: - dotlist.append('%s -> %s;' % (uname1.replace('.', '_'), - vname1.replace('.', '_'))) - logger.debug('cross connection: %s', dotlist[-1]) - return ('\n' + prefix).join(dotlist) + vname1 += "." + ".".join(dest.split(".")[:-1]) + if uname1.split(".")[:-1] != vname1.split(".")[:-1]: + dotlist.append( + "%s -> %s;" + % (uname1.replace(".", "_"), vname1.replace(".", "_")) + ) + logger.debug("cross connection: %s", dotlist[-1]) + return ("\n" + prefix).join(dotlist) diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index 23c8faa5b9..f7fcb6dab1 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -19,7 +19,7 @@ from ..engine import MapNode from .tools import report_crash, report_nodes_not_run, create_pyscript -logger = logging.getLogger('nipype.workflow') +logger = logging.getLogger("nipype.workflow") class PluginBase(object): @@ -33,7 +33,7 @@ def __init__(self, plugin_args=None): plugin_args = {} self.plugin_args = plugin_args self._config = None - self._status_callback = plugin_args.get('status_callback') + self._status_callback = plugin_args.get("status_callback") def run(self, graph, config, updatehash=False): """ @@ -96,7 +96,7 @@ def __init__(self, plugin_args=None): self.proc_done = None self.proc_pending = None self.pending_tasks = [] - self.max_jobs = self.plugin_args.get('max_jobs', np.inf) + self.max_jobs = self.plugin_args.get("max_jobs", np.inf) def _prerun_check(self, graph): """Stub method to validate/massage graph and nodes before running""" @@ -110,7 +110,7 @@ def run(self, graph, config, updatehash=False): """ logger.info("Running in parallel.") self._config = config - poll_sleep_secs = float(config['execution']['poll_sleep_duration']) + poll_sleep_secs = float(config["execution"]["poll_sleep_duration"]) self._prerun_check(graph) # Generate appropriate structures for worker-manager model @@ -126,19 +126,24 @@ def run(self, graph, config, updatehash=False): loop_start = time() # Check if a job is available (jobs with all dependencies run) # https://github.com/nipy/nipype/pull/2200#discussion_r141605722 - jobs_ready = np.nonzero(~self.proc_done & - (self.depidx.sum(0) == 0))[1] - - progress_stats = (len(self.proc_done), - np.sum(self.proc_done ^ self.proc_pending), - np.sum(self.proc_done & self.proc_pending), - len(jobs_ready), len(self.pending_tasks), - np.sum(~self.proc_done & ~self.proc_pending)) + jobs_ready = np.nonzero(~self.proc_done & (self.depidx.sum(0) == 0))[1] + + progress_stats = ( + len(self.proc_done), + np.sum(self.proc_done ^ self.proc_pending), + np.sum(self.proc_done & self.proc_pending), + len(jobs_ready), + len(self.pending_tasks), + np.sum(~self.proc_done & ~self.proc_pending), + ) display_stats = progress_stats != old_progress_stats if display_stats: - logger.debug('Progress: %d jobs, %d/%d/%d ' - '(done/running/ready), %d/%d ' - '(pending_tasks/waiting).', *progress_stats) + logger.debug( + "Progress: %d jobs, %d/%d/%d " + "(done/running/ready), %d/%d " + "(pending_tasks/waiting).", + *progress_stats + ) old_progress_stats = progress_stats toappend = [] # trigger callbacks for any pending results @@ -147,37 +152,34 @@ def run(self, graph, config, updatehash=False): try: result = self._get_result(taskid) except Exception: - notrun.append( - self._clean_queue(jobid, graph)) + notrun.append(self._clean_queue(jobid, graph)) else: if result: - if result['traceback']: + if result["traceback"]: notrun.append( - self._clean_queue(jobid, graph, result=result)) + self._clean_queue(jobid, graph, result=result) + ) else: self._task_finished_cb(jobid) self._remove_node_dirs() self._clear_task(taskid) else: - assert self.proc_done[jobid] and \ - self.proc_pending[jobid] + assert self.proc_done[jobid] and self.proc_pending[jobid] toappend.insert(0, (taskid, jobid)) if toappend: self.pending_tasks.extend(toappend) num_jobs = len(self.pending_tasks) - presub_stats = (num_jobs, - np.sum(self.proc_done & self.proc_pending)) + presub_stats = (num_jobs, np.sum(self.proc_done & self.proc_pending)) display_stats = display_stats or presub_stats != old_presub_stats if display_stats: - logger.debug('Tasks currently running: %d. Pending: %d.', - *presub_stats) + logger.debug("Tasks currently running: %d. Pending: %d.", *presub_stats) old_presub_stats = presub_stats if num_jobs < self.max_jobs: self._send_procs_to_workers(updatehash=updatehash, graph=graph) elif display_stats: - logger.debug('Not submitting (max jobs reached)') + logger.debug("Not submitting (max jobs reached)") sleep_til = loop_start + poll_sleep_secs sleep(max(0, sleep_til - time())) @@ -197,8 +199,8 @@ def _submit_job(self, node, updatehash=False): def _report_crash(self, node, result=None): tb = None if result is not None: - node._result = result['result'] - tb = result['traceback'] + node._result = result["result"] + tb = result["traceback"] node._traceback = tb return report_crash(node, traceback=tb) @@ -206,17 +208,19 @@ def _clear_task(self, taskid): raise NotImplementedError def _clean_queue(self, jobid, graph, result=None): - logger.debug('Clearing %d from queue', jobid) + logger.debug("Clearing %d from queue", jobid) if self._status_callback: - self._status_callback(self.procs[jobid], 'exception') + self._status_callback(self.procs[jobid], "exception") if result is None: - result = {'result': None, - 'traceback': '\n'.join(format_exception(*sys.exc_info()))} + result = { + "result": None, + "traceback": "\n".join(format_exception(*sys.exc_info())), + } crashfile = self._report_crash(self.procs[jobid], result=result) - if str2bool(self._config['execution']['stop_on_first_crash']): - raise RuntimeError("".join(result['traceback'])) + if str2bool(self._config["execution"]["stop_on_first_crash"]): + raise RuntimeError("".join(result["traceback"])) if jobid in self.mapnodesubids: # remove current jobid self.proc_pending[jobid] = False @@ -230,29 +234,31 @@ def _clean_queue(self, jobid, graph, result=None): def _submit_mapnode(self, jobid): import scipy.sparse as ssp + if jobid in self.mapnodes: return True self.mapnodes.append(jobid) mapnodesubids = self.procs[jobid].get_subnodes() numnodes = len(mapnodesubids) - logger.debug('Adding %d jobs for mapnode %s', numnodes, - self.procs[jobid]) + logger.debug("Adding %d jobs for mapnode %s", numnodes, self.procs[jobid]) for i in range(numnodes): self.mapnodesubids[self.depidx.shape[0] + i] = jobid self.procs.extend(mapnodesubids) self.depidx = ssp.vstack( - (self.depidx, - ssp.lil_matrix(np.zeros( - (numnodes, self.depidx.shape[1])))), 'lil') + (self.depidx, ssp.lil_matrix(np.zeros((numnodes, self.depidx.shape[1])))), + "lil", + ) self.depidx = ssp.hstack( - (self.depidx, - ssp.lil_matrix(np.zeros( - (self.depidx.shape[0], numnodes)))), 'lil') + (self.depidx, ssp.lil_matrix(np.zeros((self.depidx.shape[0], numnodes)))), + "lil", + ) self.depidx[-numnodes:, jobid] = 1 - self.proc_done = np.concatenate((self.proc_done, - np.zeros(numnodes, dtype=bool))) - self.proc_pending = np.concatenate((self.proc_pending, - np.zeros(numnodes, dtype=bool))) + self.proc_done = np.concatenate( + (self.proc_done, np.zeros(numnodes, dtype=bool)) + ) + self.proc_pending = np.concatenate( + (self.proc_pending, np.zeros(numnodes, dtype=bool)) + ) return False def _send_procs_to_workers(self, updatehash=False, graph=None): @@ -266,7 +272,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): slots = None else: slots = max(0, self.max_jobs - num_jobs) - logger.debug('Slots available: %s', slots) + logger.debug("Slots available: %s", slots) if (num_jobs >= self.max_jobs) or (slots == 0): break @@ -276,8 +282,12 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): if len(jobids) > 0: # send all available jobs - logger.info('Pending[%d] Submitting[%d] jobs Slots[%s]', - num_jobs, len(jobids[:slots]), slots or 'inf') + logger.info( + "Pending[%d] Submitting[%d] jobs Slots[%s]", + num_jobs, + len(jobids[:slots]), + slots or "inf", + ) for jobid in jobids[:slots]: if isinstance(self.procs[jobid], MapNode): @@ -295,15 +305,15 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): self.proc_done[jobid] = True self.proc_pending[jobid] = True # Send job to task manager and add to pending tasks - logger.info('Submitting: %s ID: %d', - self.procs[jobid], jobid) + logger.info("Submitting: %s ID: %d", self.procs[jobid], jobid) if self._status_callback: - self._status_callback(self.procs[jobid], 'start') + self._status_callback(self.procs[jobid], "start") if not self._local_hash_check(jobid, graph): if self.procs[jobid].run_without_submitting: - logger.debug('Running node %s on master thread', - self.procs[jobid]) + logger.debug( + "Running node %s on master thread", self.procs[jobid] + ) try: self.procs[jobid].run() except Exception: @@ -312,55 +322,66 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): self._remove_node_dirs() else: tid = self._submit_job( - deepcopy(self.procs[jobid]), - updatehash=updatehash) + deepcopy(self.procs[jobid]), updatehash=updatehash + ) if tid is None: self.proc_done[jobid] = False self.proc_pending[jobid] = False else: self.pending_tasks.insert(0, (tid, jobid)) - logger.info('Finished submitting: %s ID: %d', - self.procs[jobid], jobid) + logger.info( + "Finished submitting: %s ID: %d", self.procs[jobid], jobid + ) else: break def _local_hash_check(self, jobid, graph): - if not str2bool( - self.procs[jobid].config['execution']['local_hash_check']): + if not str2bool(self.procs[jobid].config["execution"]["local_hash_check"]): return False try: cached, updated = self.procs[jobid].is_cached() except Exception: logger.warning( - 'Error while checking node hash, forcing re-run. ' - 'Although this error may not prevent the workflow from running, ' - 'it could indicate a major problem. Please report a new issue ' - 'at https://github.com/nipy/nipype/issues adding the following ' - 'information:\n\n\tNode: %s\n\tInterface: %s.%s\n\tTraceback:\n%s', + "Error while checking node hash, forcing re-run. " + "Although this error may not prevent the workflow from running, " + "it could indicate a major problem. Please report a new issue " + "at https://github.com/nipy/nipype/issues adding the following " + "information:\n\n\tNode: %s\n\tInterface: %s.%s\n\tTraceback:\n%s", self.procs[jobid], self.procs[jobid].interface.__module__, self.procs[jobid].interface.__class__.__name__, - '\n'.join(format_exception(*sys.exc_info())) + "\n".join(format_exception(*sys.exc_info())), ) return False - logger.debug('Checking hash "%s" locally: cached=%s, updated=%s.', - self.procs[jobid], cached, updated) + logger.debug( + 'Checking hash "%s" locally: cached=%s, updated=%s.', + self.procs[jobid], + cached, + updated, + ) overwrite = self.procs[jobid].overwrite always_run = self.procs[jobid].interface.always_run - if cached and updated and (overwrite is False or - overwrite is None and not always_run): - logger.debug('Skipping cached node %s with ID %s.', - self.procs[jobid], jobid) + if ( + cached + and updated + and (overwrite is False or overwrite is None and not always_run) + ): + logger.debug( + "Skipping cached node %s with ID %s.", self.procs[jobid], jobid + ) try: self._task_finished_cb(jobid, cached=True) self._remove_node_dirs() except Exception: - logger.debug('Error skipping cached node %s (%s).\n\n%s', - self.procs[jobid], jobid, - '\n'.join(format_exception(*sys.exc_info()))) + logger.debug( + "Error skipping cached node %s (%s).\n\n%s", + self.procs[jobid], + jobid, + "\n".join(format_exception(*sys.exc_info())), + ) self._clean_queue(jobid, graph) self.proc_pending[jobid] = False return True @@ -371,10 +392,14 @@ def _task_finished_cb(self, jobid, cached=False): This is called when a job is completed. """ - logger.info('[Job %d] %s (%s).', jobid, 'Cached' - if cached else 'Completed', self.procs[jobid]) + logger.info( + "[Job %d] %s (%s).", + jobid, + "Cached" if cached else "Completed", + self.procs[jobid], + ) if self._status_callback: - self._status_callback(self.procs[jobid], 'end') + self._status_callback(self.procs[jobid], "end") # Update job and worker queues self.proc_pending[jobid] = False # update the job dependency structure @@ -391,7 +416,8 @@ def _generate_dependency_list(self, graph): self.procs, _ = topological_sort(graph) try: self.depidx = nx.to_scipy_sparse_matrix( - graph, nodelist=self.procs, format='lil') + graph, nodelist=self.procs, format="lil" + ) except: self.depidx = nx.to_scipy_sparse_matrix(graph, nodelist=self.procs) self.refidx = deepcopy(self.depidx) @@ -401,6 +427,7 @@ def _generate_dependency_list(self, graph): def _remove_node_deps(self, jobid, crashfile, graph): import networkx as nx + try: dfs_preorder = nx.dfs_preorder except AttributeError: @@ -410,13 +437,12 @@ def _remove_node_deps(self, jobid, crashfile, graph): idx = self.procs.index(node) self.proc_done[idx] = True self.proc_pending[idx] = False - return dict( - node=self.procs[jobid], dependents=subnodes, crashfile=crashfile) + return dict(node=self.procs[jobid], dependents=subnodes, crashfile=crashfile) def _remove_node_dirs(self): """Removes directories whose outputs have already been used up """ - if str2bool(self._config['execution']['remove_node_directories']): + if str2bool(self._config["execution"]["remove_node_directories"]): indices = np.nonzero((self.refidx.sum(axis=1) == 0).__array__())[0] for idx in indices: if idx in self.mapnodesubids: @@ -424,9 +450,13 @@ def _remove_node_dirs(self): if self.proc_done[idx] and (not self.proc_pending[idx]): self.refidx[idx, idx] = -1 outdir = self.procs[idx].output_dir() - logger.info(('[node dependencies finished] ' - 'removing node: %s from directory %s') % - (self.procs[idx]._id, outdir)) + logger.info( + ( + "[node dependencies finished] " + "removing node: %s from directory %s" + ) + % (self.procs[idx]._id, outdir) + ) shutil.rmtree(outdir) @@ -439,13 +469,13 @@ def __init__(self, template, plugin_args=None): self._template = template self._qsub_args = None if plugin_args: - if 'template' in plugin_args: - self._template = plugin_args['template'] + if "template" in plugin_args: + self._template = plugin_args["template"] if os.path.isfile(self._template): with open(self._template) as tpl_file: self._template = tpl_file.read() - if 'qsub_args' in plugin_args: - self._qsub_args = plugin_args['qsub_args'] + if "qsub_args" in plugin_args: + self._qsub_args = plugin_args["qsub_args"] self._pending = {} def _is_pending(self, taskid): @@ -460,7 +490,7 @@ def _submit_batchtask(self, scriptfile, node): def _get_result(self, taskid): if taskid not in self._pending: - raise Exception('Task %d not found' % taskid) + raise Exception("Task %d not found" % taskid) if self._is_pending(taskid): return None node_dir = self._pending[taskid] @@ -470,46 +500,43 @@ def _get_result(self, taskid): # is a disconnect when the queueing engine knows a job is # finished to when the directories become statable. t = time() - timeout = float(self._config['execution']['job_finished_timeout']) + timeout = float(self._config["execution"]["job_finished_timeout"]) timed_out = True while (time() - t) < timeout: try: - glob(os.path.join(node_dir, 'result_*.pklz')).pop() + glob(os.path.join(node_dir, "result_*.pklz")).pop() timed_out = False break except Exception as e: logger.debug(e) sleep(2) if timed_out: - result_data = { - 'hostname': 'unknown', - 'result': None, - 'traceback': None - } + result_data = {"hostname": "unknown", "result": None, "traceback": None} results_file = None try: - error_message = ('Job id ({0}) finished or terminated, but ' - 'results file does not exist after ({1}) ' - 'seconds. Batch dir contains crashdump file ' - 'if node raised an exception.\n' - 'Node working directory: ({2}) '.format( - taskid, timeout, node_dir)) + error_message = ( + "Job id ({0}) finished or terminated, but " + "results file does not exist after ({1}) " + "seconds. Batch dir contains crashdump file " + "if node raised an exception.\n" + "Node working directory: ({2}) ".format(taskid, timeout, node_dir) + ) raise IOError(error_message) except IOError as e: - result_data['traceback'] = '\n'.join(format_exception(*sys.exc_info())) + result_data["traceback"] = "\n".join(format_exception(*sys.exc_info())) else: - results_file = glob(os.path.join(node_dir, 'result_*.pklz'))[0] + results_file = glob(os.path.join(node_dir, "result_*.pklz"))[0] result_data = load_resultfile(results_file) result_out = dict(result=None, traceback=None) if isinstance(result_data, dict): - result_out['result'] = result_data['result'] - result_out['traceback'] = result_data['traceback'] - result_out['hostname'] = result_data['hostname'] + result_out["result"] = result_data["result"] + result_out["traceback"] = result_data["traceback"] + result_out["hostname"] = result_data["hostname"] if results_file: - crash_file = os.path.join(node_dir, 'crashstore.pklz') + crash_file = os.path.join(node_dir, "crashstore.pklz") os.rename(results_file, crash_file) else: - result_out['result'] = result_data + result_out["result"] = result_data return result_out def _submit_job(self, node, updatehash=False): @@ -517,11 +544,10 @@ def _submit_job(self, node, updatehash=False): """ pyscript = create_pyscript(node, updatehash=updatehash) batch_dir, name = os.path.split(pyscript) - name = '.'.join(name.split('.')[:-1]) - batchscript = '\n'.join((self._template, '%s %s' % (sys.executable, - pyscript))) - batchscriptfile = os.path.join(batch_dir, 'batchscript_%s.sh' % name) - with open(batchscriptfile, 'wt') as fp: + name = ".".join(name.split(".")[:-1]) + batchscript = "\n".join((self._template, "%s %s" % (sys.executable, pyscript))) + batchscriptfile = os.path.join(batch_dir, "batchscript_%s.sh" % name) + with open(batchscriptfile, "wt") as fp: fp.writelines(batchscript) return self._submit_batchtask(batchscriptfile, node) @@ -534,25 +560,27 @@ class GraphPluginBase(PluginBase): """ def __init__(self, plugin_args=None): - if plugin_args and plugin_args.get('status_callback'): - logger.warning('status_callback not supported for Graph submission' - ' plugins') + if plugin_args and plugin_args.get("status_callback"): + logger.warning( + "status_callback not supported for Graph submission" " plugins" + ) super(GraphPluginBase, self).__init__(plugin_args=plugin_args) def run(self, graph, config, updatehash=False): import networkx as nx + pyfiles = [] dependencies = {} self._config = config nodes = list(nx.topological_sort(graph)) - logger.debug('Creating executable python files for each node') + logger.debug("Creating executable python files for each node") for idx, node in enumerate(nodes): pyfiles.append( - create_pyscript( - node, updatehash=updatehash, store_exception=False)) + create_pyscript(node, updatehash=updatehash, store_exception=False) + ) dependencies[idx] = [ - nodes.index(prevnode) - for prevnode in list(graph.predecessors(node))] + nodes.index(prevnode) for prevnode in list(graph.predecessors(node)) + ] self._submit_graph(pyfiles, dependencies, nodes) def _get_args(self, node, keywords): @@ -562,22 +590,22 @@ def _get_args(self, node, keywords): if keyword == "template" and os.path.isfile(value): with open(value) as f: value = f.read() - if (hasattr(node, "plugin_args") - and isinstance(node.plugin_args, dict) - and keyword in node.plugin_args): - if (keyword == "template" - and os.path.isfile(node.plugin_args[keyword])): + if ( + hasattr(node, "plugin_args") + and isinstance(node.plugin_args, dict) + and keyword in node.plugin_args + ): + if keyword == "template" and os.path.isfile(node.plugin_args[keyword]): with open(node.plugin_args[keyword]) as f: tmp_value = f.read() else: tmp_value = node.plugin_args[keyword] - if ('overwrite' in node.plugin_args - and node.plugin_args['overwrite']): + if "overwrite" in node.plugin_args and node.plugin_args["overwrite"]: value = tmp_value else: value += tmp_value - values += (value, ) + values += (value,) return values def _submit_graph(self, pyfiles, dependencies, nodes): @@ -589,25 +617,25 @@ def _submit_graph(self, pyfiles, dependencies, nodes): def _get_result(self, taskid): if taskid not in self._pending: - raise Exception('Task %d not found' % taskid) + raise Exception("Task %d not found" % taskid) if self._is_pending(taskid): return None node_dir = self._pending[taskid] - glob(os.path.join(node_dir, 'result_*.pklz')).pop() + glob(os.path.join(node_dir, "result_*.pklz")).pop() - results_file = glob(os.path.join(node_dir, 'result_*.pklz'))[0] + results_file = glob(os.path.join(node_dir, "result_*.pklz"))[0] result_data = load_resultfile(results_file) result_out = dict(result=None, traceback=None) if isinstance(result_data, dict): - result_out['result'] = result_data['result'] - result_out['traceback'] = result_data['traceback'] - result_out['hostname'] = result_data['hostname'] + result_out["result"] = result_data["result"] + result_out["traceback"] = result_data["traceback"] + result_out["hostname"] = result_data["hostname"] if results_file: - crash_file = os.path.join(node_dir, 'crashstore.pklz') + crash_file = os.path.join(node_dir, "crashstore.pklz") os.rename(results_file, crash_file) else: - result_out['result'] = result_data + result_out["result"] = result_data return result_out diff --git a/nipype/pipeline/plugins/condor.py b/nipype/pipeline/plugins/condor.py index bdf598c5f6..cd0ad985e2 100644 --- a/nipype/pipeline/plugins/condor.py +++ b/nipype/pipeline/plugins/condor.py @@ -8,7 +8,8 @@ from ...interfaces.base import CommandLine from ... import logging from .base import SGELikeBatchManagerBase, logger -iflogger = logging.getLogger('nipype.interface') + +iflogger = logging.getLogger("nipype.interface") class CondorPlugin(SGELikeBatchManagerBase): @@ -38,59 +39,59 @@ def __init__(self, **kwargs): """ self._retry_timeout = 2 self._max_tries = 2 - if 'plugin_args' in kwargs and kwargs['plugin_args']: - if 'retry_timeout' in kwargs['plugin_args']: - self._retry_timeout = kwargs['plugin_args']['retry_timeout'] - if 'max_tries' in kwargs['plugin_args']: - self._max_tries = kwargs['plugin_args']['max_tries'] + if "plugin_args" in kwargs and kwargs["plugin_args"]: + if "retry_timeout" in kwargs["plugin_args"]: + self._retry_timeout = kwargs["plugin_args"]["retry_timeout"] + if "max_tries" in kwargs["plugin_args"]: + self._max_tries = kwargs["plugin_args"]["max_tries"] super(CondorPlugin, self).__init__(template, **kwargs) def _is_pending(self, taskid): cmd = CommandLine( - 'condor_q', resource_monitor=False, terminal_output='allatonce') - cmd.inputs.args = '%d' % taskid + "condor_q", resource_monitor=False, terminal_output="allatonce" + ) + cmd.inputs.args = "%d" % taskid # check condor cluster oldlevel = iflogger.level - iflogger.setLevel(logging.getLevelName('CRITICAL')) + iflogger.setLevel(logging.getLevelName("CRITICAL")) result = cmd.run(ignore_exception=True) iflogger.setLevel(oldlevel) - if result.runtime.stdout.count('\n%d' % taskid): + if result.runtime.stdout.count("\n%d" % taskid): return True return False def _submit_batchtask(self, scriptfile, node): cmd = CommandLine( - 'condor_qsub', + "condor_qsub", environ=dict(os.environ), resource_monitor=False, - terminal_output='allatonce') + terminal_output="allatonce", + ) path = os.path.dirname(scriptfile) - qsubargs = '' + qsubargs = "" if self._qsub_args: qsubargs = self._qsub_args - if 'qsub_args' in node.plugin_args: - if 'overwrite' in node.plugin_args and\ - node.plugin_args['overwrite']: - qsubargs = node.plugin_args['qsub_args'] + if "qsub_args" in node.plugin_args: + if "overwrite" in node.plugin_args and node.plugin_args["overwrite"]: + qsubargs = node.plugin_args["qsub_args"] else: - qsubargs += (" " + node.plugin_args['qsub_args']) + qsubargs += " " + node.plugin_args["qsub_args"] if self._qsub_args: qsubargs = self._qsub_args - if '-o' not in qsubargs: - qsubargs = '%s -o %s' % (qsubargs, path) - if '-e' not in qsubargs: - qsubargs = '%s -e %s' % (qsubargs, path) + if "-o" not in qsubargs: + qsubargs = "%s -o %s" % (qsubargs, path) + if "-e" not in qsubargs: + qsubargs = "%s -e %s" % (qsubargs, path) if node._hierarchy: - jobname = '.'.join((dict(os.environ)['LOGNAME'], node._hierarchy, - node._id)) + jobname = ".".join((dict(os.environ)["LOGNAME"], node._hierarchy, node._id)) else: - jobname = '.'.join((dict(os.environ)['LOGNAME'], node._id)) - jobnameitems = jobname.split('.') + jobname = ".".join((dict(os.environ)["LOGNAME"], node._id)) + jobnameitems = jobname.split(".") jobnameitems.reverse() - jobname = '.'.join(jobnameitems) - cmd.inputs.args = '%s -N %s %s' % (qsubargs, jobname, scriptfile) + jobname = ".".join(jobnameitems) + cmd.inputs.args = "%s -N %s %s" % (qsubargs, jobname, scriptfile) oldlevel = iflogger.level - iflogger.setLevel(logging.getLevelName('CRITICAL')) + iflogger.setLevel(logging.getLevelName("CRITICAL")) tries = 0 while True: try: @@ -101,16 +102,20 @@ def _submit_batchtask(self, scriptfile, node): sleep(self._retry_timeout) # sleep 2 seconds and try again else: iflogger.setLevel(oldlevel) - raise RuntimeError('\n'.join((('Could not submit condor ' - 'cluster' - ' for node %s') % node._id, - str(e)))) + raise RuntimeError( + "\n".join( + ( + ("Could not submit condor " "cluster" " for node %s") + % node._id, + str(e), + ) + ) + ) else: break iflogger.setLevel(oldlevel) # retrieve condor clusterid - taskid = int(result.runtime.stdout.split(' ')[2]) + taskid = int(result.runtime.stdout.split(" ")[2]) self._pending[taskid] = node.output_dir() - logger.debug('submitted condor cluster: %d for node %s' % (taskid, - node._id)) + logger.debug("submitted condor cluster: %d for node %s" % (taskid, node._id)) return taskid diff --git a/nipype/pipeline/plugins/dagman.py b/nipype/pipeline/plugins/dagman.py index 9db1f70fe5..98b07eeb10 100644 --- a/nipype/pipeline/plugins/dagman.py +++ b/nipype/pipeline/plugins/dagman.py @@ -65,35 +65,37 @@ def _get_str_or_file(self, arg): # actually have to run. would be good to be able to decide whether they # actually have to be scheduled (i.e. output already exist). def __init__(self, **kwargs): - for var, id_, val in \ - (('_template', 'submit_template', self.default_submit_template), - ('_initial_specs', 'template', ''), - ('_initial_specs', 'initial_specs', ''), - ('_override_specs', 'submit_specs', ''), - ('_override_specs', 'override_specs', ''), - ('_wrapper_cmd', 'wrapper_cmd', None), - ('_wrapper_args', 'wrapper_args', ''), - ('_block', 'block', False), - ('_dagman_args', 'dagman_args', '')): - if 'plugin_args' in kwargs \ - and not kwargs['plugin_args'] is None \ - and id_ in kwargs['plugin_args']: - if id_ == 'wrapper_cmd': - val = os.path.abspath(kwargs['plugin_args'][id_]) - elif id_ == 'block': - val = kwargs['plugin_args'][id_] + for var, id_, val in ( + ("_template", "submit_template", self.default_submit_template), + ("_initial_specs", "template", ""), + ("_initial_specs", "initial_specs", ""), + ("_override_specs", "submit_specs", ""), + ("_override_specs", "override_specs", ""), + ("_wrapper_cmd", "wrapper_cmd", None), + ("_wrapper_args", "wrapper_args", ""), + ("_block", "block", False), + ("_dagman_args", "dagman_args", ""), + ): + if ( + "plugin_args" in kwargs + and not kwargs["plugin_args"] is None + and id_ in kwargs["plugin_args"] + ): + if id_ == "wrapper_cmd": + val = os.path.abspath(kwargs["plugin_args"][id_]) + elif id_ == "block": + val = kwargs["plugin_args"][id_] else: - val = self._get_str_or_file(kwargs['plugin_args'][id_]) + val = self._get_str_or_file(kwargs["plugin_args"][id_]) setattr(self, var, val) # TODO remove after some time - if 'plugin_args' in kwargs \ - and not kwargs['plugin_args'] is None: - plugin_args = kwargs['plugin_args'] - if 'template' in plugin_args: + if "plugin_args" in kwargs and not kwargs["plugin_args"] is None: + plugin_args = kwargs["plugin_args"] + if "template" in plugin_args: warn( "the 'template' argument is deprecated, use 'initial_specs' instead" ) - if 'submit_specs' in plugin_args: + if "submit_specs" in plugin_args: warn( "the 'submit_specs' argument is deprecated, use 'override_specs' instead" ) @@ -103,73 +105,89 @@ def _submit_graph(self, pyfiles, dependencies, nodes): # location of all scripts, place dagman output in here too batch_dir, _ = os.path.split(pyfiles[0]) # DAG description filename - dagfilename = os.path.join(batch_dir, 'workflow-%s.dag' % uuid.uuid4()) - with open(dagfilename, 'wt') as dagfileptr: + dagfilename = os.path.join(batch_dir, "workflow-%s.dag" % uuid.uuid4()) + with open(dagfilename, "wt") as dagfileptr: # loop over all scripts, create submit files, and define them # as jobs in the DAG for idx, pyscript in enumerate(pyfiles): node = nodes[idx] # XXX redundant with previous value? or could it change between # scripts? - template, initial_specs, override_specs, wrapper_cmd, wrapper_args = \ - self._get_args(node, - ["template", "initial_specs", - "override_specs", "wrapper_cmd", - "wrapper_args"]) + ( + template, + initial_specs, + override_specs, + wrapper_cmd, + wrapper_args, + ) = self._get_args( + node, + [ + "template", + "initial_specs", + "override_specs", + "wrapper_cmd", + "wrapper_args", + ], + ) # add required slots to the template - template = '%s\n%s\n%s\nqueue\n' % ('%(initial_specs)s', - template, - '%(override_specs)s') + template = "%s\n%s\n%s\nqueue\n" % ( + "%(initial_specs)s", + template, + "%(override_specs)s", + ) batch_dir, name = os.path.split(pyscript) - name = '.'.join(name.split('.')[:-1]) + name = ".".join(name.split(".")[:-1]) specs = dict( # TODO make parameter for this, initial_specs=initial_specs, executable=sys.executable, nodescript=pyscript, basename=os.path.join(batch_dir, name), - override_specs=override_specs) + override_specs=override_specs, + ) if wrapper_cmd is not None: - specs['executable'] = wrapper_cmd - specs['nodescript'] = \ - '%s %s %s' % (wrapper_args % specs, # give access to variables - sys.executable, - pyscript) + specs["executable"] = wrapper_cmd + specs["nodescript"] = "%s %s %s" % ( + wrapper_args % specs, # give access to variables + sys.executable, + pyscript, + ) submitspec = template % specs # write submit spec for this job - submitfile = os.path.join(batch_dir, '%s.submit' % name) - with open(submitfile, 'wt') as submitfileprt: + submitfile = os.path.join(batch_dir, "%s.submit" % name) + with open(submitfile, "wt") as submitfileprt: submitfileprt.writelines(submitspec) submitfileprt.close() # define job in DAG - dagfileptr.write('JOB %i %s\n' % (idx, submitfile)) + dagfileptr.write("JOB %i %s\n" % (idx, submitfile)) # define dependencies in DAG for child in dependencies: parents = dependencies[child] if len(parents): - dagfileptr.write('PARENT %s CHILD %i\n' % - (' '.join([str(i) for i in parents]), - child)) + dagfileptr.write( + "PARENT %s CHILD %i\n" + % (" ".join([str(i) for i in parents]), child) + ) # hand over DAG to condor_dagman cmd = CommandLine( - 'condor_submit_dag', + "condor_submit_dag", environ=dict(os.environ), resource_monitor=False, - terminal_output='allatonce') + terminal_output="allatonce", + ) # needs -update_submit or re-running a workflow will fail - cmd.inputs.args = '%s -update_submit %s' % (self._dagman_args, - dagfilename) + cmd.inputs.args = "%s -update_submit %s" % (self._dagman_args, dagfilename) cmd.run() - logger.info('submitted all jobs to Condor DAGMan') + logger.info("submitted all jobs to Condor DAGMan") if self._block: # wait for DAGMan to settle down, no time wasted it is already running time.sleep(10) - if not os.path.exists('%s.condor.sub' % dagfilename): + if not os.path.exists("%s.condor.sub" % dagfilename): raise EnvironmentError( "DAGMan did not create its submit file, please check the logs" ) # wait for completion - logger.info('waiting for DAGMan to finish') - lockfilename = '%s.lock' % dagfilename + logger.info("waiting for DAGMan to finish") + lockfilename = "%s.lock" % dagfilename while os.path.exists(lockfilename): time.sleep(5) diff --git a/nipype/pipeline/plugins/debug.py b/nipype/pipeline/plugins/debug.py index 0b9b009c2f..16ea8f44ee 100644 --- a/nipype/pipeline/plugins/debug.py +++ b/nipype/pipeline/plugins/debug.py @@ -14,11 +14,14 @@ class DebugPlugin(PluginBase): def __init__(self, plugin_args=None): super(DebugPlugin, self).__init__(plugin_args=plugin_args) - if plugin_args and "callable" in plugin_args and \ - hasattr(plugin_args['callable'], '__call__'): - self._callable = plugin_args['callable'] + if ( + plugin_args + and "callable" in plugin_args + and hasattr(plugin_args["callable"], "__call__") + ): + self._callable = plugin_args["callable"] else: - raise ValueError('plugin_args must contain a callable function') + raise ValueError("plugin_args must contain a callable function") def run(self, graph, config, updatehash=False): """Executes a pre-defined pipeline in a serial order. @@ -31,7 +34,7 @@ def run(self, graph, config, updatehash=False): """ if not isinstance(graph, nx.DiGraph): - raise ValueError('Input must be a networkx digraph object') + raise ValueError("Input must be a networkx digraph object") logger.info("Executing debug plugin") for node in nx.topological_sort(graph): self._callable(node, graph) diff --git a/nipype/pipeline/plugins/ipython.py b/nipype/pipeline/plugins/ipython.py index eafeb904e3..8a786a16f1 100644 --- a/nipype/pipeline/plugins/ipython.py +++ b/nipype/pipeline/plugins/ipython.py @@ -6,7 +6,7 @@ from pickle import dumps import sys -from .base import (DistributedPluginBase, logger, report_crash) +from .base import DistributedPluginBase, logger, report_crash IPython_not_loaded = False try: @@ -20,19 +20,23 @@ def execute_task(pckld_task, node_config, updatehash): from socket import gethostname from traceback import format_exc from nipype import config, logging + traceback = None result = None import os + cwd = os.getcwd() try: config.update_config(node_config) logging.update_logging(config) from pickle import loads + task = loads(pckld_task) result = task.run(updatehash=updatehash) except: traceback = format_exc() from pickle import loads + task = loads(pckld_task) result = task.result os.chdir(cwd) @@ -45,14 +49,24 @@ class IPythonPlugin(DistributedPluginBase): def __init__(self, plugin_args=None): if IPython_not_loaded: - raise ImportError('Please install ipyparallel to use this plugin.') + raise ImportError("Please install ipyparallel to use this plugin.") super(IPythonPlugin, self).__init__(plugin_args=plugin_args) - valid_args = ('url_file', 'profile', 'cluster_id', 'context', 'debug', - 'timeout', 'config', 'username', 'sshserver', 'sshkey', - 'password', 'paramiko') + valid_args = ( + "url_file", + "profile", + "cluster_id", + "context", + "debug", + "timeout", + "config", + "username", + "sshserver", + "sshkey", + "password", + "paramiko", + ) self.client_args = { - arg: plugin_args[arg] - for arg in valid_args if arg in plugin_args + arg: plugin_args[arg] for arg in valid_args if arg in plugin_args } self.iparallel = None self.taskclient = None @@ -65,36 +79,35 @@ def run(self, graph, config, updatehash=False): """ # retrieve clients again try: - name = 'ipyparallel' + name = "ipyparallel" __import__(name) self.iparallel = sys.modules[name] except ImportError as e: - raise ImportError("ipyparallel not found. Parallel execution " - "will be unavailable") from e + raise ImportError( + "ipyparallel not found. Parallel execution " "will be unavailable" + ) from e try: self.taskclient = self.iparallel.Client(**self.client_args) except Exception as e: if isinstance(e, TimeoutError): raise Exception("No IPython clients found.") from e if isinstance(e, IOError): - raise Exception("ipcluster/ipcontroller has not been started") \ - from e + raise Exception("ipcluster/ipcontroller has not been started") from e if isinstance(e, ValueError): raise Exception("Ipython kernel not installed") from e else: raise e - return super(IPythonPlugin, self).run( - graph, config, updatehash=updatehash) + return super(IPythonPlugin, self).run(graph, config, updatehash=updatehash) def _get_result(self, taskid): if taskid not in self.taskmap: - raise ValueError('Task %d not in pending list' % taskid) + raise ValueError("Task %d not in pending list" % taskid) if self.taskmap[taskid].ready(): result, traceback, hostname = self.taskmap[taskid].get() result_out = dict(result=None, traceback=None) - result_out['result'] = result - result_out['traceback'] = traceback - result_out['hostname'] = hostname + result_out["result"] = result + result_out["traceback"] = traceback + result_out["hostname"] = hostname return result_out else: return None @@ -102,21 +115,22 @@ def _get_result(self, taskid): def _submit_job(self, node, updatehash=False): pckld_node = dumps(node, 2) result_object = self.taskclient.load_balanced_view().apply( - execute_task, pckld_node, node.config, updatehash) + execute_task, pckld_node, node.config, updatehash + ) self._taskid += 1 self.taskmap[self._taskid] = result_object return self._taskid def _report_crash(self, node, result=None): - if result and result['traceback']: - node._result = result['result'] - node._traceback = result['traceback'] - return report_crash(node, traceback=result['traceback']) + if result and result["traceback"]: + node._result = result["result"] + node._traceback = result["traceback"] + return report_crash(node, traceback=result["traceback"]) else: return report_crash(node) def _clear_task(self, taskid): - if IPyversion >= '0.11': + if IPyversion >= "0.11": logger.debug("Clearing id: %d" % taskid) self.taskclient.purge_results(self.taskmap[taskid]) del self.taskmap[taskid] diff --git a/nipype/pipeline/plugins/legacymultiproc.py b/nipype/pipeline/plugins/legacymultiproc.py index 451770e2bd..620aadb422 100644 --- a/nipype/pipeline/plugins/legacymultiproc.py +++ b/nipype/pipeline/plugins/legacymultiproc.py @@ -36,7 +36,7 @@ def indent(text, prefix): # Init logger -logger = logging.getLogger('nipype.workflow') +logger = logging.getLogger("nipype.workflow") # Run node @@ -64,14 +64,15 @@ def run_node(node, updatehash, taskid): # Try and execute the node via node.run() try: - result['result'] = node.run(updatehash=updatehash) + result["result"] = node.run(updatehash=updatehash) except: # noqa: E722, intendedly catch all here - result['traceback'] = format_exception(*sys.exc_info()) - result['result'] = node.result + result["traceback"] = format_exception(*sys.exc_info()) + result["result"] = node.result # Return the result dictionary return result + # Pythons 2.7, 3.4-3.7.0, and 3.7.1 have three different implementations of # pool.Pool().Process(), and the type of the result varies based on the default # multiprocessing context, so we need to dynamically patch the daemon property @@ -84,51 +85,70 @@ def daemon(self): def daemon(self, val): pass + try: from multiprocessing import context + # Exists on all platforms class NonDaemonSpawnProcess(NonDaemonMixin, context.SpawnProcess): pass + class NonDaemonSpawnContext(context.SpawnContext): Process = NonDaemonSpawnProcess - _nondaemon_context_mapper = { - 'spawn': NonDaemonSpawnContext() - } + + _nondaemon_context_mapper = {"spawn": NonDaemonSpawnContext()} # POSIX only try: + class NonDaemonForkProcess(NonDaemonMixin, context.ForkProcess): pass + class NonDaemonForkContext(context.ForkContext): Process = NonDaemonForkProcess - _nondaemon_context_mapper['fork'] = NonDaemonForkContext() + + _nondaemon_context_mapper["fork"] = NonDaemonForkContext() except AttributeError: pass # POSIX only try: + class NonDaemonForkServerProcess(NonDaemonMixin, context.ForkServerProcess): pass + class NonDaemonForkServerContext(context.ForkServerContext): Process = NonDaemonForkServerProcess - _nondaemon_context_mapper['forkserver'] = NonDaemonForkServerContext() + + _nondaemon_context_mapper["forkserver"] = NonDaemonForkServerContext() except AttributeError: pass class NonDaemonPool(pool.Pool): - def __init__(self, processes=None, initializer=None, initargs=(), - maxtasksperchild=None, context=None): + def __init__( + self, + processes=None, + initializer=None, + initargs=(), + maxtasksperchild=None, + context=None, + ): if context is None: context = mp.get_context() context = _nondaemon_context_mapper[context._name] - super(NonDaemonPool, self).__init__(processes=processes, - initializer=initializer, - initargs=initargs, - maxtasksperchild=maxtasksperchild, - context=context) + super(NonDaemonPool, self).__init__( + processes=processes, + initializer=initializer, + initargs=initargs, + maxtasksperchild=maxtasksperchild, + context=context, + ) + except ImportError: + class NonDaemonProcess(NonDaemonMixin, mp.Process): pass + class NonDaemonPool(pool.Pool): Process = NonDaemonProcess @@ -179,19 +199,24 @@ def __init__(self, plugin_args=None): self._cwd = os.getcwd() # Read in options or set defaults. - non_daemon = self.plugin_args.get('non_daemon', True) - maxtasks = self.plugin_args.get('maxtasksperchild', 10) - self.processors = self.plugin_args.get('n_procs', cpu_count()) + non_daemon = self.plugin_args.get("non_daemon", True) + maxtasks = self.plugin_args.get("maxtasksperchild", 10) + self.processors = self.plugin_args.get("n_procs", cpu_count()) self.memory_gb = self.plugin_args.get( - 'memory_gb', # Allocate 90% of system memory - get_system_total_memory_gb() * 0.9) - self.raise_insufficient = self.plugin_args.get('raise_insufficient', - True) + "memory_gb", # Allocate 90% of system memory + get_system_total_memory_gb() * 0.9, + ) + self.raise_insufficient = self.plugin_args.get("raise_insufficient", True) # Instantiate different thread pools for non-daemon processes - logger.debug('[LegacyMultiProc] Starting in "%sdaemon" mode (n_procs=%d, ' - 'mem_gb=%0.2f, cwd=%s)', 'non' * int(non_daemon), - self.processors, self.memory_gb, self._cwd) + logger.debug( + '[LegacyMultiProc] Starting in "%sdaemon" mode (n_procs=%d, ' + "mem_gb=%0.2f, cwd=%s)", + "non" * int(non_daemon), + self.processors, + self.memory_gb, + self._cwd, + ) NipypePool = NonDaemonPool if non_daemon else Pool try: @@ -199,7 +224,7 @@ def __init__(self, plugin_args=None): processes=self.processors, maxtasksperchild=maxtasks, initializer=os.chdir, - initargs=(self._cwd,) + initargs=(self._cwd,), ) except TypeError: # Python < 3.2 does not have maxtasksperchild @@ -212,7 +237,7 @@ def __init__(self, plugin_args=None): def _async_callback(self, args): # Make sure runtime is not left at a dubious working directory os.chdir(self._cwd) - self._taskresult[args['taskid']] = args + self._taskresult[args["taskid"]] = args def _get_result(self, taskid): return self._taskresult.get(taskid) @@ -224,15 +249,18 @@ def _submit_job(self, node, updatehash=False): self._taskid += 1 # Don't allow streaming outputs - if getattr(node.interface, 'terminal_output', '') == 'stream': - node.interface.terminal_output = 'allatonce' + if getattr(node.interface, "terminal_output", "") == "stream": + node.interface.terminal_output = "allatonce" self._task_obj[self._taskid] = self.pool.apply_async( - run_node, (node, updatehash, self._taskid), - callback=self._async_callback) - - logger.debug('[LegacyMultiProc] Submitted task %s (taskid=%d).', - node.fullname, self._taskid) + run_node, (node, updatehash, self._taskid), callback=self._async_callback + ) + + logger.debug( + "[LegacyMultiProc] Submitted task %s (taskid=%d).", + node.fullname, + self._taskid, + ) return self._taskid def _prerun_check(self, graph): @@ -245,17 +273,19 @@ def _prerun_check(self, graph): if np.any(np.array(tasks_mem_gb) > self.memory_gb): logger.warning( - 'Some nodes exceed the total amount of memory available ' - '(%0.2fGB).', self.memory_gb) + "Some nodes exceed the total amount of memory available " "(%0.2fGB).", + self.memory_gb, + ) if self.raise_insufficient: - raise RuntimeError('Insufficient resources available for job') + raise RuntimeError("Insufficient resources available for job") if np.any(np.array(tasks_num_th) > self.processors): logger.warning( - 'Some nodes demand for more threads than available (%d).', - self.processors) + "Some nodes demand for more threads than available (%d).", + self.processors, + ) if self.raise_insufficient: - raise RuntimeError('Insufficient resources available for job') + raise RuntimeError("Insufficient resources available for job") def _postrun_check(self): self.pool.close() @@ -280,46 +310,58 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): # Check to see if a job is available (jobs with all dependencies run) # See https://github.com/nipy/nipype/pull/2200#discussion_r141605722 # See also https://github.com/nipy/nipype/issues/2372 - jobids = np.flatnonzero(~self.proc_done & - (self.depidx.sum(axis=0) == 0).__array__()) + jobids = np.flatnonzero( + ~self.proc_done & (self.depidx.sum(axis=0) == 0).__array__() + ) # Check available resources by summing all threads and memory used - free_memory_gb, free_processors = self._check_resources( - self.pending_tasks) - - stats = (len(self.pending_tasks), len(jobids), free_memory_gb, - self.memory_gb, free_processors, self.processors) + free_memory_gb, free_processors = self._check_resources(self.pending_tasks) + + stats = ( + len(self.pending_tasks), + len(jobids), + free_memory_gb, + self.memory_gb, + free_processors, + self.processors, + ) if self._stats != stats: - tasks_list_msg = '' + tasks_list_msg = "" if logger.level <= INFO: running_tasks = [ - ' * %s' % self.procs[jobid].fullname + " * %s" % self.procs[jobid].fullname for _, jobid in self.pending_tasks ] if running_tasks: - tasks_list_msg = '\nCurrently running:\n' - tasks_list_msg += '\n'.join(running_tasks) - tasks_list_msg = indent(tasks_list_msg, ' ' * 21) + tasks_list_msg = "\nCurrently running:\n" + tasks_list_msg += "\n".join(running_tasks) + tasks_list_msg = indent(tasks_list_msg, " " * 21) logger.info( - '[LegacyMultiProc] Running %d tasks, and %d jobs ready. Free ' - 'memory (GB): %0.2f/%0.2f, Free processors: %d/%d.%s', - len(self.pending_tasks), len(jobids), free_memory_gb, - self.memory_gb, free_processors, self.processors, - tasks_list_msg) + "[LegacyMultiProc] Running %d tasks, and %d jobs ready. Free " + "memory (GB): %0.2f/%0.2f, Free processors: %d/%d.%s", + len(self.pending_tasks), + len(jobids), + free_memory_gb, + self.memory_gb, + free_processors, + self.processors, + tasks_list_msg, + ) self._stats = stats if free_memory_gb < 0.01 or free_processors == 0: - logger.debug('No resources available') + logger.debug("No resources available") return if len(jobids) + len(self.pending_tasks) == 0: - logger.debug('No tasks are being run, and no jobs can ' - 'be submitted to the queue. Potential deadlock') + logger.debug( + "No tasks are being run, and no jobs can " + "be submitted to the queue. Potential deadlock" + ) return - jobids = self._sort_jobs( - jobids, scheduler=self.plugin_args.get('scheduler')) + jobids = self._sort_jobs(jobids, scheduler=self.plugin_args.get("scheduler")) # Run garbage collector before potentially submitting jobs gc.collect() @@ -333,12 +375,8 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): except Exception: traceback = format_exception(*sys.exc_info()) self._clean_queue( - jobid, - graph, - result={ - 'result': None, - 'traceback': traceback - }) + jobid, graph, result={"result": None, "traceback": traceback} + ) self.proc_pending[jobid] = False continue if num_subnodes > 1: @@ -352,16 +390,26 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): # If node does not fit, skip at this moment if next_job_th > free_processors or next_job_gb > free_memory_gb: - logger.debug('Cannot allocate job %d (%0.2fGB, %d threads).', - jobid, next_job_gb, next_job_th) + logger.debug( + "Cannot allocate job %d (%0.2fGB, %d threads).", + jobid, + next_job_gb, + next_job_th, + ) continue free_memory_gb -= next_job_gb free_processors -= next_job_th - logger.debug('Allocating %s ID=%d (%0.2fGB, %d threads). Free: ' - '%0.2fGB, %d threads.', self.procs[jobid].fullname, - jobid, next_job_gb, next_job_th, free_memory_gb, - free_processors) + logger.debug( + "Allocating %s ID=%d (%0.2fGB, %d threads). Free: " + "%0.2fGB, %d threads.", + self.procs[jobid].fullname, + jobid, + next_job_gb, + next_job_th, + free_memory_gb, + free_processors, + ) # change job status in appropriate queues self.proc_done[jobid] = True @@ -373,19 +421,14 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): # updatehash and run_without_submitting are also run locally if updatehash or self.procs[jobid].run_without_submitting: - logger.debug('Running node %s on master thread', - self.procs[jobid]) + logger.debug("Running node %s on master thread", self.procs[jobid]) try: self.procs[jobid].run(updatehash=updatehash) except Exception: traceback = format_exception(*sys.exc_info()) self._clean_queue( - jobid, - graph, - result={ - 'result': None, - 'traceback': traceback - }) + jobid, graph, result={"result": None, "traceback": traceback} + ) # Release resources self._task_finished_cb(jobid) @@ -402,9 +445,8 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): # Task should be submitted to workers # Send job to task manager and add to pending tasks if self._status_callback: - self._status_callback(self.procs[jobid], 'start') - tid = self._submit_job( - deepcopy(self.procs[jobid]), updatehash=updatehash) + self._status_callback(self.procs[jobid], "start") + tid = self._submit_job(deepcopy(self.procs[jobid]), updatehash=updatehash) if tid is None: self.proc_done[jobid] = False self.proc_pending[jobid] = False @@ -413,10 +455,10 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): # Display stats next loop self._stats = None - def _sort_jobs(self, jobids, scheduler='tsort'): - if scheduler == 'mem_thread': + def _sort_jobs(self, jobids, scheduler="tsort"): + if scheduler == "mem_thread": return sorted( jobids, - key=lambda item: (self.procs[item].mem_gb, self.procs[item].n_procs) + key=lambda item: (self.procs[item].mem_gb, self.procs[item].n_procs), ) return jobids diff --git a/nipype/pipeline/plugins/linear.py b/nipype/pipeline/plugins/linear.py index 732636ca12..650bff280f 100644 --- a/nipype/pipeline/plugins/linear.py +++ b/nipype/pipeline/plugins/linear.py @@ -5,8 +5,7 @@ """ import os -from .base import (PluginBase, logger, report_crash, report_nodes_not_run, - str2bool) +from .base import PluginBase, logger, report_crash, report_nodes_not_run, str2bool from ..engine.utils import topological_sort @@ -24,40 +23,42 @@ def run(self, graph, config, updatehash=False): defines order of execution """ import networkx as nx + try: dfs_preorder = nx.dfs_preorder except AttributeError: dfs_preorder = nx.dfs_preorder_nodes if not isinstance(graph, nx.DiGraph): - raise ValueError('Input must be a networkx digraph object') + raise ValueError("Input must be a networkx digraph object") logger.info("Running serially.") old_wd = os.getcwd() notrun = [] donotrun = [] nodes, _ = topological_sort(graph) for node in nodes: - endstatus = 'end' + endstatus = "end" try: if node in donotrun: continue if self._status_callback: - self._status_callback(node, 'start') + self._status_callback(node, "start") node.run(updatehash=updatehash) except: - endstatus = 'exception' + endstatus = "exception" # bare except, but i really don't know where a # node might fail crashfile = report_crash(node) - if str2bool(config['execution']['stop_on_first_crash']): + if str2bool(config["execution"]["stop_on_first_crash"]): raise # remove dependencies from queue subnodes = [s for s in dfs_preorder(graph, node)] - notrun.append({'node': node, 'dependents': subnodes, - 'crashfile': crashfile}) + notrun.append( + {"node": node, "dependents": subnodes, "crashfile": crashfile} + ) donotrun.extend(subnodes) # Delay raising the crash until we cleaned the house - if str2bool(config['execution']['stop_on_first_crash']): + if str2bool(config["execution"]["stop_on_first_crash"]): os.chdir(old_wd) # Return wherever we were before report_nodes_not_run(notrun) # report before raising raise diff --git a/nipype/pipeline/plugins/lsf.py b/nipype/pipeline/plugins/lsf.py index 866529d8bb..a88fbb6675 100644 --- a/nipype/pipeline/plugins/lsf.py +++ b/nipype/pipeline/plugins/lsf.py @@ -9,7 +9,8 @@ from ... import logging from ...interfaces.base import CommandLine from .base import SGELikeBatchManagerBase, logger -iflogger = logging.getLogger('nipype.interface') + +iflogger = logging.getLogger("nipype.interface") class LSFPlugin(SGELikeBatchManagerBase): @@ -30,14 +31,14 @@ def __init__(self, **kwargs): """ self._retry_timeout = 2 self._max_tries = 2 - self._bsub_args = '' - if 'plugin_args' in kwargs and kwargs['plugin_args']: - if 'retry_timeout' in kwargs['plugin_args']: - self._retry_timeout = kwargs['plugin_args']['retry_timeout'] - if 'max_tries' in kwargs['plugin_args']: - self._max_tries = kwargs['plugin_args']['max_tries'] - if 'bsub_args' in kwargs['plugin_args']: - self._bsub_args = kwargs['plugin_args']['bsub_args'] + self._bsub_args = "" + if "plugin_args" in kwargs and kwargs["plugin_args"]: + if "retry_timeout" in kwargs["plugin_args"]: + self._retry_timeout = kwargs["plugin_args"]["retry_timeout"] + if "max_tries" in kwargs["plugin_args"]: + self._max_tries = kwargs["plugin_args"]["max_tries"] + if "bsub_args" in kwargs["plugin_args"]: + self._bsub_args = kwargs["plugin_args"]["bsub_args"] super(LSFPlugin, self).__init__(template, **kwargs) def _is_pending(self, taskid): @@ -46,53 +47,54 @@ def _is_pending(self, taskid): But _is_pending should return True until a job has finished and is ready to be checked for completeness. So return True if status is either 'PEND' or 'RUN'""" - cmd = CommandLine( - 'bjobs', resource_monitor=False, terminal_output='allatonce') - cmd.inputs.args = '%d' % taskid + cmd = CommandLine("bjobs", resource_monitor=False, terminal_output="allatonce") + cmd.inputs.args = "%d" % taskid # check lsf task oldlevel = iflogger.level - iflogger.setLevel(logging.getLevelName('CRITICAL')) + iflogger.setLevel(logging.getLevelName("CRITICAL")) result = cmd.run(ignore_exception=True) iflogger.setLevel(oldlevel) # logger.debug(result.runtime.stdout) - if 'DONE' in result.runtime.stdout or 'EXIT' in result.runtime.stdout: + if "DONE" in result.runtime.stdout or "EXIT" in result.runtime.stdout: return False else: return True def _submit_batchtask(self, scriptfile, node): cmd = CommandLine( - 'bsub', + "bsub", environ=dict(os.environ), resource_monitor=False, - terminal_output='allatonce') - bsubargs = '' + terminal_output="allatonce", + ) + bsubargs = "" if self._bsub_args: bsubargs = self._bsub_args - if 'bsub_args' in node.plugin_args: - if 'overwrite' in node.plugin_args and\ - node.plugin_args['overwrite']: - bsubargs = node.plugin_args['bsub_args'] + if "bsub_args" in node.plugin_args: + if "overwrite" in node.plugin_args and node.plugin_args["overwrite"]: + bsubargs = node.plugin_args["bsub_args"] else: - bsubargs += (" " + node.plugin_args['bsub_args']) - if '-o' not in bsubargs: # -o outfile - bsubargs = '%s -o %s' % (bsubargs, scriptfile + ".log") - if '-e' not in bsubargs: + bsubargs += " " + node.plugin_args["bsub_args"] + if "-o" not in bsubargs: # -o outfile + bsubargs = "%s -o %s" % (bsubargs, scriptfile + ".log") + if "-e" not in bsubargs: # -e error file - bsubargs = '%s -e %s' % (bsubargs, scriptfile + ".log") + bsubargs = "%s -e %s" % (bsubargs, scriptfile + ".log") if node._hierarchy: - jobname = '.'.join((dict(os.environ)['LOGNAME'], node._hierarchy, - node._id)) + jobname = ".".join((dict(os.environ)["LOGNAME"], node._hierarchy, node._id)) else: - jobname = '.'.join((dict(os.environ)['LOGNAME'], node._id)) - jobnameitems = jobname.split('.') + jobname = ".".join((dict(os.environ)["LOGNAME"], node._id)) + jobnameitems = jobname.split(".") jobnameitems.reverse() - jobname = '.'.join(jobnameitems) - cmd.inputs.args = '%s -J %s sh %s' % (bsubargs, jobname, - scriptfile) # -J job_name_spec - logger.debug('bsub ' + cmd.inputs.args) + jobname = ".".join(jobnameitems) + cmd.inputs.args = "%s -J %s sh %s" % ( + bsubargs, + jobname, + scriptfile, + ) # -J job_name_spec + logger.debug("bsub " + cmd.inputs.args) oldlevel = iflogger.level - iflogger.setLevel(logging.getLevelName('CRITICAL')) + iflogger.setLevel(logging.getLevelName("CRITICAL")) tries = 0 while True: try: @@ -100,23 +102,28 @@ def _submit_batchtask(self, scriptfile, node): except Exception as e: if tries < self._max_tries: tries += 1 - sleep( - self._retry_timeout) # sleep 2 seconds and try again. + sleep(self._retry_timeout) # sleep 2 seconds and try again. else: iflogger.setLevel(oldlevel) - raise RuntimeError('\n'.join((('Could not submit lsf task' - ' for node %s') % node._id, - str(e)))) + raise RuntimeError( + "\n".join( + ( + ("Could not submit lsf task" " for node %s") % node._id, + str(e), + ) + ) + ) else: break iflogger.setLevel(oldlevel) # retrieve lsf taskid - match = re.search(r'<(\d*)>', result.runtime.stdout) + match = re.search(r"<(\d*)>", result.runtime.stdout) if match: taskid = int(match.groups()[0]) else: - raise IOError("Can't parse submission job output id: %s" % - result.runtime.stdout) + raise IOError( + "Can't parse submission job output id: %s" % result.runtime.stdout + ) self._pending[taskid] = node.output_dir() - logger.debug('submitted lsf task: %d for node %s' % (taskid, node._id)) + logger.debug("submitted lsf task: %d for node %s" % (taskid, node._id)) return taskid diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index f310d97912..dc950385b1 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -36,7 +36,7 @@ def indent(text, prefix): # Init logger -logger = logging.getLogger('nipype.workflow') +logger = logging.getLogger("nipype.workflow") # Run node @@ -64,10 +64,10 @@ def run_node(node, updatehash, taskid): # Try and execute the node via node.run() try: - result['result'] = node.run(updatehash=updatehash) + result["result"] = node.run(updatehash=updatehash) except: # noqa: E722, intendedly catch all here - result['traceback'] = format_exception(*sys.exc_info()) - result['result'] = node.result + result["traceback"] = format_exception(*sys.exc_info()) + result["result"] = node.result # Return the result dictionary return result @@ -118,25 +118,29 @@ def __init__(self, plugin_args=None): self._cwd = os.getcwd() # Read in options or set defaults. - self.processors = self.plugin_args.get('n_procs', mp.cpu_count()) + self.processors = self.plugin_args.get("n_procs", mp.cpu_count()) self.memory_gb = self.plugin_args.get( - 'memory_gb', # Allocate 90% of system memory - get_system_total_memory_gb() * 0.9) - self.raise_insufficient = self.plugin_args.get('raise_insufficient', - True) + "memory_gb", # Allocate 90% of system memory + get_system_total_memory_gb() * 0.9, + ) + self.raise_insufficient = self.plugin_args.get("raise_insufficient", True) # Instantiate different thread pools for non-daemon processes - logger.debug('[MultiProc] Starting (n_procs=%d, ' - 'mem_gb=%0.2f, cwd=%s)', - self.processors, self.memory_gb, self._cwd) + logger.debug( + "[MultiProc] Starting (n_procs=%d, " "mem_gb=%0.2f, cwd=%s)", + self.processors, + self.memory_gb, + self._cwd, + ) try: - mp_context = mp.context.get_context( - self.plugin_args.get('mp_context')) - self.pool = ProcessPoolExecutor(max_workers=self.processors, - initializer=os.chdir, - initargs=(self._cwd,), - mp_context=mp_context) + mp_context = mp.context.get_context(self.plugin_args.get("mp_context")) + self.pool = ProcessPoolExecutor( + max_workers=self.processors, + initializer=os.chdir, + initargs=(self._cwd,), + mp_context=mp_context, + ) except (AttributeError, TypeError): # Python < 3.7 does not support initialization or contexts self.pool = ProcessPoolExecutor(max_workers=self.processors) @@ -145,7 +149,7 @@ def __init__(self, plugin_args=None): def _async_callback(self, args): result = args.result() - self._taskresult[result['taskid']] = result + self._taskresult[result["taskid"]] = result def _get_result(self, taskid): return self._taskresult.get(taskid) @@ -157,15 +161,16 @@ def _submit_job(self, node, updatehash=False): self._taskid += 1 # Don't allow streaming outputs - if getattr(node.interface, 'terminal_output', '') == 'stream': - node.interface.terminal_output = 'allatonce' + if getattr(node.interface, "terminal_output", "") == "stream": + node.interface.terminal_output = "allatonce" result_future = self.pool.submit(run_node, node, updatehash, self._taskid) result_future.add_done_callback(self._async_callback) self._task_obj[self._taskid] = result_future - logger.debug('[MultiProc] Submitted task %s (taskid=%d).', - node.fullname, self._taskid) + logger.debug( + "[MultiProc] Submitted task %s (taskid=%d).", node.fullname, self._taskid + ) return self._taskid def _prerun_check(self, graph): @@ -178,17 +183,19 @@ def _prerun_check(self, graph): if np.any(np.array(tasks_mem_gb) > self.memory_gb): logger.warning( - 'Some nodes exceed the total amount of memory available ' - '(%0.2fGB).', self.memory_gb) + "Some nodes exceed the total amount of memory available " "(%0.2fGB).", + self.memory_gb, + ) if self.raise_insufficient: - raise RuntimeError('Insufficient resources available for job') + raise RuntimeError("Insufficient resources available for job") if np.any(np.array(tasks_num_th) > self.processors): logger.warning( - 'Some nodes demand for more threads than available (%d).', - self.processors) + "Some nodes demand for more threads than available (%d).", + self.processors, + ) if self.raise_insufficient: - raise RuntimeError('Insufficient resources available for job') + raise RuntimeError("Insufficient resources available for job") def _postrun_check(self): self.pool.shutdown() @@ -213,46 +220,58 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): # Check to see if a job is available (jobs with all dependencies run) # See https://github.com/nipy/nipype/pull/2200#discussion_r141605722 # See also https://github.com/nipy/nipype/issues/2372 - jobids = np.flatnonzero(~self.proc_done & - (self.depidx.sum(axis=0) == 0).__array__()) + jobids = np.flatnonzero( + ~self.proc_done & (self.depidx.sum(axis=0) == 0).__array__() + ) # Check available resources by summing all threads and memory used - free_memory_gb, free_processors = self._check_resources( - self.pending_tasks) - - stats = (len(self.pending_tasks), len(jobids), free_memory_gb, - self.memory_gb, free_processors, self.processors) + free_memory_gb, free_processors = self._check_resources(self.pending_tasks) + + stats = ( + len(self.pending_tasks), + len(jobids), + free_memory_gb, + self.memory_gb, + free_processors, + self.processors, + ) if self._stats != stats: - tasks_list_msg = '' + tasks_list_msg = "" if logger.level <= INFO: running_tasks = [ - ' * %s' % self.procs[jobid].fullname + " * %s" % self.procs[jobid].fullname for _, jobid in self.pending_tasks ] if running_tasks: - tasks_list_msg = '\nCurrently running:\n' - tasks_list_msg += '\n'.join(running_tasks) - tasks_list_msg = indent(tasks_list_msg, ' ' * 21) + tasks_list_msg = "\nCurrently running:\n" + tasks_list_msg += "\n".join(running_tasks) + tasks_list_msg = indent(tasks_list_msg, " " * 21) logger.info( - '[MultiProc] Running %d tasks, and %d jobs ready. Free ' - 'memory (GB): %0.2f/%0.2f, Free processors: %d/%d.%s', - len(self.pending_tasks), len(jobids), free_memory_gb, - self.memory_gb, free_processors, self.processors, - tasks_list_msg) + "[MultiProc] Running %d tasks, and %d jobs ready. Free " + "memory (GB): %0.2f/%0.2f, Free processors: %d/%d.%s", + len(self.pending_tasks), + len(jobids), + free_memory_gb, + self.memory_gb, + free_processors, + self.processors, + tasks_list_msg, + ) self._stats = stats if free_memory_gb < 0.01 or free_processors == 0: - logger.debug('No resources available') + logger.debug("No resources available") return if len(jobids) + len(self.pending_tasks) == 0: - logger.debug('No tasks are being run, and no jobs can ' - 'be submitted to the queue. Potential deadlock') + logger.debug( + "No tasks are being run, and no jobs can " + "be submitted to the queue. Potential deadlock" + ) return - jobids = self._sort_jobs( - jobids, scheduler=self.plugin_args.get('scheduler')) + jobids = self._sort_jobs(jobids, scheduler=self.plugin_args.get("scheduler")) # Run garbage collector before potentially submitting jobs gc.collect() @@ -266,12 +285,8 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): except Exception: traceback = format_exception(*sys.exc_info()) self._clean_queue( - jobid, - graph, - result={ - 'result': None, - 'traceback': traceback - }) + jobid, graph, result={"result": None, "traceback": traceback} + ) self.proc_pending[jobid] = False continue if num_subnodes > 1: @@ -285,16 +300,26 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): # If node does not fit, skip at this moment if next_job_th > free_processors or next_job_gb > free_memory_gb: - logger.debug('Cannot allocate job %d (%0.2fGB, %d threads).', - jobid, next_job_gb, next_job_th) + logger.debug( + "Cannot allocate job %d (%0.2fGB, %d threads).", + jobid, + next_job_gb, + next_job_th, + ) continue free_memory_gb -= next_job_gb free_processors -= next_job_th - logger.debug('Allocating %s ID=%d (%0.2fGB, %d threads). Free: ' - '%0.2fGB, %d threads.', self.procs[jobid].fullname, - jobid, next_job_gb, next_job_th, free_memory_gb, - free_processors) + logger.debug( + "Allocating %s ID=%d (%0.2fGB, %d threads). Free: " + "%0.2fGB, %d threads.", + self.procs[jobid].fullname, + jobid, + next_job_gb, + next_job_th, + free_memory_gb, + free_processors, + ) # change job status in appropriate queues self.proc_done[jobid] = True @@ -306,19 +331,14 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): # updatehash and run_without_submitting are also run locally if updatehash or self.procs[jobid].run_without_submitting: - logger.debug('Running node %s on master thread', - self.procs[jobid]) + logger.debug("Running node %s on master thread", self.procs[jobid]) try: self.procs[jobid].run(updatehash=updatehash) except Exception: traceback = format_exception(*sys.exc_info()) self._clean_queue( - jobid, - graph, - result={ - 'result': None, - 'traceback': traceback - }) + jobid, graph, result={"result": None, "traceback": traceback} + ) # Release resources self._task_finished_cb(jobid) @@ -335,9 +355,8 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): # Task should be submitted to workers # Send job to task manager and add to pending tasks if self._status_callback: - self._status_callback(self.procs[jobid], 'start') - tid = self._submit_job( - deepcopy(self.procs[jobid]), updatehash=updatehash) + self._status_callback(self.procs[jobid], "start") + tid = self._submit_job(deepcopy(self.procs[jobid]), updatehash=updatehash) if tid is None: self.proc_done[jobid] = False self.proc_pending[jobid] = False @@ -346,10 +365,10 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): # Display stats next loop self._stats = None - def _sort_jobs(self, jobids, scheduler='tsort'): - if scheduler == 'mem_thread': + def _sort_jobs(self, jobids, scheduler="tsort"): + if scheduler == "mem_thread": return sorted( jobids, - key=lambda item: (self.procs[item].mem_gb, self.procs[item].n_procs) + key=lambda item: (self.procs[item].mem_gb, self.procs[item].n_procs), ) return jobids diff --git a/nipype/pipeline/plugins/oar.py b/nipype/pipeline/plugins/oar.py index 5ce718c250..4ce64305eb 100644 --- a/nipype/pipeline/plugins/oar.py +++ b/nipype/pipeline/plugins/oar.py @@ -10,7 +10,8 @@ from ... import logging from ...interfaces.base import CommandLine from .base import SGELikeBatchManagerBase, logger -iflogger = logging.getLogger('nipype.interface') + +iflogger = logging.getLogger("nipype.interface") class OARPlugin(SGELikeBatchManagerBase): @@ -28,7 +29,7 @@ class OARPlugin(SGELikeBatchManagerBase): # Addtional class variables _max_jobname_len = 15 - _oarsub_args = '' + _oarsub_args = "" def __init__(self, **kwargs): template = """ @@ -37,71 +38,75 @@ def __init__(self, **kwargs): self._retry_timeout = 2 self._max_tries = 2 self._max_jobname_length = 15 - if 'plugin_args' in kwargs and kwargs['plugin_args']: - if 'oarsub_args' in kwargs['plugin_args']: - self._oarsub_args = kwargs['plugin_args']['oarsub_args'] - if 'retry_timeout' in kwargs['plugin_args']: - self._retry_timeout = kwargs['plugin_args']['retry_timeout'] - if 'max_tries' in kwargs['plugin_args']: - self._max_tries = kwargs['plugin_args']['max_tries'] - if 'max_jobname_len' in kwargs['plugin_args']: - self._max_jobname_len = \ - kwargs['plugin_args']['max_jobname_len'] + if "plugin_args" in kwargs and kwargs["plugin_args"]: + if "oarsub_args" in kwargs["plugin_args"]: + self._oarsub_args = kwargs["plugin_args"]["oarsub_args"] + if "retry_timeout" in kwargs["plugin_args"]: + self._retry_timeout = kwargs["plugin_args"]["retry_timeout"] + if "max_tries" in kwargs["plugin_args"]: + self._max_tries = kwargs["plugin_args"]["max_tries"] + if "max_jobname_len" in kwargs["plugin_args"]: + self._max_jobname_len = kwargs["plugin_args"]["max_jobname_len"] super(OARPlugin, self).__init__(template, **kwargs) def _is_pending(self, taskid): # subprocess.Popen requires taskid to be a string proc = subprocess.Popen( - ['oarstat', '-J', '-s', '-j', taskid], + ["oarstat", "-J", "-s", "-j", taskid], stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + stderr=subprocess.PIPE, + ) o, e = proc.communicate() parsed_result = json.loads(o)[taskid].lower() - is_pending = (('error' not in parsed_result) - and ('terminated' not in parsed_result)) + is_pending = ("error" not in parsed_result) and ( + "terminated" not in parsed_result + ) return is_pending def _submit_batchtask(self, scriptfile, node): cmd = CommandLine( - 'oarsub', + "oarsub", environ=dict(os.environ), resource_monitor=False, - terminal_output='allatonce') + terminal_output="allatonce", + ) path = os.path.dirname(scriptfile) - oarsubargs = '' + oarsubargs = "" if self._oarsub_args: oarsubargs = self._oarsub_args - if 'oarsub_args' in node.plugin_args: - if ('overwrite' in node.plugin_args - and node.plugin_args['overwrite']): - oarsubargs = node.plugin_args['oarsub_args'] + if "oarsub_args" in node.plugin_args: + if "overwrite" in node.plugin_args and node.plugin_args["overwrite"]: + oarsubargs = node.plugin_args["oarsub_args"] else: - oarsubargs += (" " + node.plugin_args['oarsub_args']) + oarsubargs += " " + node.plugin_args["oarsub_args"] if node._hierarchy: - jobname = '.'.join((dict(os.environ)['LOGNAME'], node._hierarchy, - node._id)) + jobname = ".".join((dict(os.environ)["LOGNAME"], node._hierarchy, node._id)) else: - jobname = '.'.join((dict(os.environ)['LOGNAME'], node._id)) - jobnameitems = jobname.split('.') + jobname = ".".join((dict(os.environ)["LOGNAME"], node._id)) + jobnameitems = jobname.split(".") jobnameitems.reverse() - jobname = '.'.join(jobnameitems) - jobname = jobname[0:self._max_jobname_len] - - if '-O' not in oarsubargs: - oarsubargs = '%s -O %s' % (oarsubargs, - os.path.join(path, jobname + '.stdout')) - if '-E' not in oarsubargs: - oarsubargs = '%s -E %s' % (oarsubargs, - os.path.join(path, jobname + '.stderr')) - if '-J' not in oarsubargs: - oarsubargs = '%s -J' % (oarsubargs) + jobname = ".".join(jobnameitems) + jobname = jobname[0 : self._max_jobname_len] + + if "-O" not in oarsubargs: + oarsubargs = "%s -O %s" % ( + oarsubargs, + os.path.join(path, jobname + ".stdout"), + ) + if "-E" not in oarsubargs: + oarsubargs = "%s -E %s" % ( + oarsubargs, + os.path.join(path, jobname + ".stderr"), + ) + if "-J" not in oarsubargs: + oarsubargs = "%s -J" % (oarsubargs) os.chmod(scriptfile, stat.S_IEXEC | stat.S_IREAD | stat.S_IWRITE) - cmd.inputs.args = '%s -n %s -S %s' % (oarsubargs, jobname, scriptfile) + cmd.inputs.args = "%s -n %s -S %s" % (oarsubargs, jobname, scriptfile) oldlevel = iflogger.level - iflogger.setLevel(logging.getLevelName('CRITICAL')) + iflogger.setLevel(logging.getLevelName("CRITICAL")) tries = 0 while True: try: @@ -113,24 +118,29 @@ def _submit_batchtask(self, scriptfile, node): # sleep 2 seconds and try again. else: iflogger.setLevel(oldlevel) - raise RuntimeError('\n'.join((('Could not submit OAR task' - ' for node %s') % node._id, - str(e)))) + raise RuntimeError( + "\n".join( + ( + ("Could not submit OAR task" " for node %s") % node._id, + str(e), + ) + ) + ) else: break iflogger.setLevel(oldlevel) # retrieve OAR taskid - o = '' + o = "" add = False for line in result.runtime.stdout.splitlines(): - if line.strip().startswith('{'): + if line.strip().startswith("{"): add = True if add: - o += line + '\n' - if line.strip().startswith('}'): + o += line + "\n" + if line.strip().startswith("}"): break - taskid = json.loads(o)['job_id'] + taskid = json.loads(o)["job_id"] self._pending[taskid] = node.output_dir() - logger.debug('submitted OAR task: %s for node %s' % (taskid, node._id)) + logger.debug("submitted OAR task: %s for node %s" % (taskid, node._id)) return taskid diff --git a/nipype/pipeline/plugins/pbs.py b/nipype/pipeline/plugins/pbs.py index cc5bbed93b..b322d88743 100644 --- a/nipype/pipeline/plugins/pbs.py +++ b/nipype/pipeline/plugins/pbs.py @@ -8,7 +8,7 @@ from ...interfaces.base import CommandLine from .base import SGELikeBatchManagerBase, logger -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") class PBSPlugin(SGELikeBatchManagerBase): @@ -34,65 +34,65 @@ def __init__(self, **kwargs): self._retry_timeout = 2 self._max_tries = 2 self._max_jobname_length = 15 - if 'plugin_args' in kwargs and kwargs['plugin_args']: - if 'retry_timeout' in kwargs['plugin_args']: - self._retry_timeout = kwargs['plugin_args']['retry_timeout'] - if 'max_tries' in kwargs['plugin_args']: - self._max_tries = kwargs['plugin_args']['max_tries'] - if 'max_jobname_len' in kwargs['plugin_args']: - self._max_jobname_len = kwargs['plugin_args'][ - 'max_jobname_len'] + if "plugin_args" in kwargs and kwargs["plugin_args"]: + if "retry_timeout" in kwargs["plugin_args"]: + self._retry_timeout = kwargs["plugin_args"]["retry_timeout"] + if "max_tries" in kwargs["plugin_args"]: + self._max_tries = kwargs["plugin_args"]["max_tries"] + if "max_jobname_len" in kwargs["plugin_args"]: + self._max_jobname_len = kwargs["plugin_args"]["max_jobname_len"] super(PBSPlugin, self).__init__(template, **kwargs) def _is_pending(self, taskid): - result = CommandLine('qstat -f {}'.format(taskid), - environ=dict(os.environ), - terminal_output='file_split', - resource_monitor=False, - ignore_exception=True).run() + result = CommandLine( + "qstat -f {}".format(taskid), + environ=dict(os.environ), + terminal_output="file_split", + resource_monitor=False, + ignore_exception=True, + ).run() stdout = result.runtime.stdout stderr = result.runtime.stderr - errmsg = 'Unknown Job Id' - success = 'Job has finished' - if (success in stderr) or ('job_state = C' in stdout): + errmsg = "Unknown Job Id" + success = "Job has finished" + if (success in stderr) or ("job_state = C" in stdout): return False else: return errmsg not in stderr def _submit_batchtask(self, scriptfile, node): cmd = CommandLine( - 'qsub', + "qsub", environ=dict(os.environ), resource_monitor=False, - terminal_output='allatonce') + terminal_output="allatonce", + ) path = os.path.dirname(scriptfile) - qsubargs = '' + qsubargs = "" if self._qsub_args: qsubargs = self._qsub_args - if 'qsub_args' in node.plugin_args: - if 'overwrite' in node.plugin_args and \ - node.plugin_args['overwrite']: - qsubargs = node.plugin_args['qsub_args'] + if "qsub_args" in node.plugin_args: + if "overwrite" in node.plugin_args and node.plugin_args["overwrite"]: + qsubargs = node.plugin_args["qsub_args"] else: - qsubargs += (" " + node.plugin_args['qsub_args']) - if '-o' not in qsubargs: - qsubargs = '%s -o %s' % (qsubargs, path) - if '-e' not in qsubargs: - qsubargs = '%s -e %s' % (qsubargs, path) + qsubargs += " " + node.plugin_args["qsub_args"] + if "-o" not in qsubargs: + qsubargs = "%s -o %s" % (qsubargs, path) + if "-e" not in qsubargs: + qsubargs = "%s -e %s" % (qsubargs, path) if node._hierarchy: - jobname = '.'.join((dict(os.environ)['LOGNAME'], node._hierarchy, - node._id)) + jobname = ".".join((dict(os.environ)["LOGNAME"], node._hierarchy, node._id)) else: - jobname = '.'.join((dict(os.environ)['LOGNAME'], node._id)) - jobnameitems = jobname.split('.') + jobname = ".".join((dict(os.environ)["LOGNAME"], node._id)) + jobnameitems = jobname.split(".") jobnameitems.reverse() - jobname = '.'.join(jobnameitems) - jobname = jobname[0:self._max_jobname_len] - cmd.inputs.args = '%s -N %s %s' % (qsubargs, jobname, scriptfile) + jobname = ".".join(jobnameitems) + jobname = jobname[0 : self._max_jobname_len] + cmd.inputs.args = "%s -N %s %s" % (qsubargs, jobname, scriptfile) oldlevel = iflogger.level - iflogger.setLevel(logging.getLevelName('CRITICAL')) + iflogger.setLevel(logging.getLevelName("CRITICAL")) tries = 0 while True: try: @@ -105,15 +105,14 @@ def _submit_batchtask(self, scriptfile, node): else: iflogger.setLevel(oldlevel) raise RuntimeError( - 'Could not submit pbs task for node {}\n{}'.format( - node._id, e)) + "Could not submit pbs task for node {}\n{}".format(node._id, e) + ) else: break iflogger.setLevel(oldlevel) # retrieve pbs taskid - taskid = result.runtime.stdout.split('.')[0] + taskid = result.runtime.stdout.split(".")[0] self._pending[taskid] = node.output_dir() - logger.debug('submitted pbs task: {} for node {}'.format( - taskid, node._id)) + logger.debug("submitted pbs task: {} for node {}".format(taskid, node._id)) return taskid diff --git a/nipype/pipeline/plugins/pbsgraph.py b/nipype/pipeline/plugins/pbsgraph.py index 9b6e9da755..6304e715b7 100644 --- a/nipype/pipeline/plugins/pbsgraph.py +++ b/nipype/pipeline/plugins/pbsgraph.py @@ -19,43 +19,44 @@ class PBSGraphPlugin(SGEGraphPlugin): qsub call """ + _template = """ #PBS -V """ def _submit_graph(self, pyfiles, dependencies, nodes): batch_dir, _ = os.path.split(pyfiles[0]) - submitjobsfile = os.path.join(batch_dir, 'submit_jobs.sh') - with open(submitjobsfile, 'wt') as fp: - fp.writelines('#!/usr/bin/env sh\n') + submitjobsfile = os.path.join(batch_dir, "submit_jobs.sh") + with open(submitjobsfile, "wt") as fp: + fp.writelines("#!/usr/bin/env sh\n") for idx, pyscript in enumerate(pyfiles): node = nodes[idx] - template, qsub_args = self._get_args(node, - ["template", "qsub_args"]) + template, qsub_args = self._get_args(node, ["template", "qsub_args"]) batch_dir, name = os.path.split(pyscript) - name = '.'.join(name.split('.')[:-1]) - batchscript = '\n'.join((template, '%s %s' % (sys.executable, - pyscript))) - batchscriptfile = os.path.join(batch_dir, - 'batchscript_%s.sh' % name) - with open(batchscriptfile, 'wt') as batchfp: + name = ".".join(name.split(".")[:-1]) + batchscript = "\n".join( + (template, "%s %s" % (sys.executable, pyscript)) + ) + batchscriptfile = os.path.join(batch_dir, "batchscript_%s.sh" % name) + with open(batchscriptfile, "wt") as batchfp: batchfp.writelines(batchscript) batchfp.close() - deps = '' + deps = "" if idx in dependencies: - values = [ - '$job%05d' % jobid for jobid in dependencies[idx] - ] + values = ["$job%05d" % jobid for jobid in dependencies[idx]] if len(values): - deps = '-W depend=afterok:%s' % ':'.join(values) - fp.writelines('job%05d=`qsub %s %s %s`\n' % - (idx, deps, qsub_args, batchscriptfile)) + deps = "-W depend=afterok:%s" % ":".join(values) + fp.writelines( + "job%05d=`qsub %s %s %s`\n" + % (idx, deps, qsub_args, batchscriptfile) + ) cmd = CommandLine( - 'sh', + "sh", environ=dict(os.environ), resource_monitor=False, - terminal_output='allatonce') - cmd.inputs.args = '%s' % submitjobsfile + terminal_output="allatonce", + ) + cmd.inputs.args = "%s" % submitjobsfile cmd.run() - logger.info('submitted all jobs to queue') + logger.info("submitted all jobs to queue") diff --git a/nipype/pipeline/plugins/semaphore_singleton.py b/nipype/pipeline/plugins/semaphore_singleton.py index fe25cf2c61..12fa7c7777 100644 --- a/nipype/pipeline/plugins/semaphore_singleton.py +++ b/nipype/pipeline/plugins/semaphore_singleton.py @@ -1,3 +1,4 @@ # -*- coding: utf-8 -*- import threading + semaphore = threading.Semaphore(0) diff --git a/nipype/pipeline/plugins/sge.py b/nipype/pipeline/plugins/sge.py index 61b127e188..17a5093ae2 100644 --- a/nipype/pipeline/plugins/sge.py +++ b/nipype/pipeline/plugins/sge.py @@ -14,7 +14,8 @@ from ... import logging from ...interfaces.base import CommandLine from .base import SGELikeBatchManagerBase, logger -iflogger = logging.getLogger('nipype.interface') + +iflogger = logging.getLogger("nipype.interface") DEBUGGING_PREFIX = str(int(random.uniform(100, 999))) @@ -31,28 +32,39 @@ class QJobInfo(object): :author Hans J. Johnson """ - def __init__(self, job_num, job_queue_state, job_time, job_queue_name, - job_slots, qsub_command_line): + def __init__( + self, + job_num, + job_queue_state, + job_time, + job_queue_name, + job_slots, + qsub_command_line, + ): # self._jobName = None # Ascii text name of job not unique self._job_num = int( job_num ) # The primary unique identifier for this job, must be an integer! # self._jobOwn = None # Who owns this job - self._job_queue_state = str( - job_queue_state) # ["running","zombie",...??] + self._job_queue_state = str(job_queue_state) # ["running","zombie",...??] # self._jobActionState = str(jobActionState) # ['r','qw','S',...??] self._job_time = job_time # The job start time - self._job_info_creation_time = time.time( + self._job_info_creation_time = ( + time.time() ) # When this job was created (for comparing against initalization) self._job_queue_name = job_queue_name # Where the job is running self._job_slots = int(job_slots) # How many slots are being used self._qsub_command_line = qsub_command_line def __repr__(self): - return '{:<8d}{:12}{:<3d}{:20}{:8}{}'.format( - self._job_num, self._job_queue_state, self._job_slots, + return "{:<8d}{:12}{:<3d}{:20}{:8}{}".format( + self._job_num, + self._job_queue_state, + self._job_slots, time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(self._job_time)), - self._job_queue_name, self._qsub_command_line) + self._job_queue_name, + self._qsub_command_line, + ) def is_initializing(self): return self._job_queue_state == "initializing" @@ -69,26 +81,29 @@ def is_pending(self): def is_job_state_pending(self): """ Return True, unless job is in the "zombie" status """ - time_diff = (time.time() - self._job_info_creation_time) + time_diff = time.time() - self._job_info_creation_time if self.is_zombie(): sge_debug_print( - "DONE! QJobInfo.IsPending found in 'zombie' list, returning False so claiming done!\n{0}". - format(self)) + "DONE! QJobInfo.IsPending found in 'zombie' list, returning False so claiming done!\n{0}".format( + self + ) + ) is_pending_status = False # Job explicitly found as being completed! elif self.is_initializing() and (time_diff > 600): # if initializing for more than 5 minute, failure due to # initialization and completion before registration sge_debug_print( "FAILURE! QJobInfo.IsPending found long running at {1} seconds" - "'initializing' returning False for to break loop!\n{0}". - format(self, time_diff)) + "'initializing' returning False for to break loop!\n{0}".format( + self, time_diff + ) + ) is_pending_status = True # Job initialization took too long, so report! else: # self.is_running() || self.is_pending(): is_pending_status = True # Job cache last listed as running return is_pending_status # The job is in one of the hold states - def update_info(self, job_queue_state, job_time, job_queue_name, - job_slots): + def update_info(self, job_queue_state, job_time, job_queue_name, job_slots): self._job_queue_state = job_queue_state self._job_time = job_time self._job_queue_name = job_queue_name @@ -102,9 +117,9 @@ class QstatSubstitute(object): """A wrapper for Qstat to avoid overloading the SGE/OGS server with rapid continuous qstat requests""" - def __init__(self, - qstat_instant_executable='qstat', - qstat_cached_executable='qstat'): + def __init__( + self, qstat_instant_executable="qstat", qstat_cached_executable="qstat" + ): """ :param qstat_instant_executable: :param qstat_cached_executable: @@ -112,7 +127,8 @@ def __init__(self, self._qstat_instant_executable = qstat_instant_executable self._qstat_cached_executable = qstat_cached_executable self._out_of_scope_jobs = list() # Initialize first - self._task_dictionary = dict( + self._task_dictionary = ( + dict() ) # {'taskid': QJobInfo(), .... } The dictionaryObject self._remove_old_jobs() @@ -132,20 +148,22 @@ def add_startup_job(self, taskid, qsub_command_line): :return: NONE """ taskid = int(taskid) # Ensure that it is an integer - self._task_dictionary[taskid] = QJobInfo(taskid, "initializing", - time.time(), "noQueue", 1, - qsub_command_line) + self._task_dictionary[taskid] = QJobInfo( + taskid, "initializing", time.time(), "noQueue", 1, qsub_command_line + ) @staticmethod def _qacct_verified_complete(taskid): """ request definitive job completion information for the current job from the qacct report """ - sge_debug_print("WARNING: " - "CONTACTING qacct for finished jobs, " - "{0}: {1}".format(time.time(), "Verifying Completion")) + sge_debug_print( + "WARNING: " + "CONTACTING qacct for finished jobs, " + "{0}: {1}".format(time.time(), "Verifying Completion") + ) - this_command = 'qacct' + this_command = "qacct" qacct_retries = 10 is_complete = False while qacct_retries > 0: @@ -153,17 +171,19 @@ def _qacct_verified_complete(taskid): try: proc = subprocess.Popen( [ - this_command, '-o', - pwd.getpwuid(os.getuid())[0], '-j', - str(taskid) + this_command, + "-o", + pwd.getpwuid(os.getuid())[0], + "-j", + str(taskid), ], stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + stderr=subprocess.PIPE, + ) qacct_result, _ = proc.communicate() if qacct_result.find(str(taskid)): is_complete = True - sge_debug_print( - "NOTE: qacct for jobs\n{0}".format(qacct_result)) + sge_debug_print("NOTE: qacct for jobs\n{0}".format(qacct_result)) break except: sge_debug_print("NOTE: qacct call failed") @@ -178,26 +198,36 @@ def _parse_qstat_job_list(self, xml_job_list): # jobown = # current_job_element.getElementsByTagName('JB_owner')[0].childNodes[0].data try: - job_queue_name = current_job_element.getElementsByTagName( - 'queue_name')[0].childNodes[0].data + job_queue_name = ( + current_job_element.getElementsByTagName("queue_name")[0] + .childNodes[0] + .data + ) except: job_queue_name = "unknown" try: job_slots = int( - current_job_element.getElementsByTagName('slots')[0] - .childNodes[0].data) + current_job_element.getElementsByTagName("slots")[0] + .childNodes[0] + .data + ) except: job_slots = -1 - job_queue_state = current_job_element.getAttribute('state') + job_queue_state = current_job_element.getAttribute("state") job_num = int( - current_job_element.getElementsByTagName('JB_job_number')[0] - .childNodes[0].data) + current_job_element.getElementsByTagName("JB_job_number")[0] + .childNodes[0] + .data + ) try: - job_time_text = current_job_element.getElementsByTagName( - 'JAT_start_time')[0].childNodes[0].data + job_time_text = ( + current_job_element.getElementsByTagName("JAT_start_time")[0] + .childNodes[0] + .data + ) job_time = float( - time.mktime( - time.strptime(job_time_text, "%Y-%m-%dT%H:%M:%S"))) + time.mktime(time.strptime(job_time_text, "%Y-%m-%dT%H:%M:%S")) + ) except: job_time = float(0.0) # Make job entry @@ -205,9 +235,11 @@ def _parse_qstat_job_list(self, xml_job_list): task_id = int(job_num) if task_id in self._task_dictionary: self._task_dictionary[task_id].update_info( - job_queue_state, job_time, job_queue_name, job_slots) - sge_debug_print("Updating job: {0}".format( - self._task_dictionary[task_id])) + job_queue_state, job_time, job_queue_name, job_slots + ) + sge_debug_print( + "Updating job: {0}".format(self._task_dictionary[task_id]) + ) current_jobs_parsed.append(task_id) # Changed from job_num as "in" is used to check which does not cast else: @@ -227,10 +259,12 @@ def _parse_qstat_job_list(self, xml_job_list): if is_completed: self._task_dictionary[dictionary_job].set_state("zombie") else: - sge_debug_print("ERROR: Job not in current parselist, " - "and not in done list {0}: {1}".format( - dictionary_job, - self._task_dictionary[dictionary_job])) + sge_debug_print( + "ERROR: Job not in current parselist, " + "and not in done list {0}: {1}".format( + dictionary_job, self._task_dictionary[dictionary_job] + ) + ) pass if self._task_dictionary[dictionary_job].is_initializing(): is_completed = self._qacct_verified_complete(dictionary_job) @@ -240,8 +274,9 @@ def _parse_qstat_job_list(self, xml_job_list): sge_debug_print( "ERROR: Job not in still in intializing mode, " "and not in done list {0}: {1}".format( - dictionary_job, - self._task_dictionary[dictionary_job])) + dictionary_job, self._task_dictionary[dictionary_job] + ) + ) pass def _run_qstat(self, reason_for_qstat, force_instant=True): @@ -252,8 +287,10 @@ def _run_qstat(self, reason_for_qstat, force_instant=True): -s z gives recently completed jobs (**recently** is very ambiguous) -s s suspended jobs """ - sge_debug_print("WARNING: CONTACTING qmaster for jobs, " - "{0}: {1}".format(time.time(), reason_for_qstat)) + sge_debug_print( + "WARNING: CONTACTING qmaster for jobs, " + "{0}: {1}".format(time.time(), reason_for_qstat) + ) if force_instant: this_command = self._qstat_instant_executable else: @@ -265,22 +302,27 @@ def _run_qstat(self, reason_for_qstat, force_instant=True): try: proc = subprocess.Popen( [ - this_command, '-u', - pwd.getpwuid(os.getuid())[0], '-xml', '-s', 'psrz' + this_command, + "-u", + pwd.getpwuid(os.getuid())[0], + "-xml", + "-s", + "psrz", ], stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + stderr=subprocess.PIPE, + ) qstat_xml_result, _ = proc.communicate() dom = xml.dom.minidom.parseString(qstat_xml_result) - jobs = dom.getElementsByTagName('job_info') + jobs = dom.getElementsByTagName("job_info") run = jobs[0] - runjobs = run.getElementsByTagName('job_list') + runjobs = run.getElementsByTagName("job_list") self._parse_qstat_job_list(runjobs) break except Exception as inst: exception_message = "QstatParsingError:\n\t{0}\n\t{1}\n".format( type(inst), # the exception instance - inst # __str__ allows args to printed directly + inst, # __str__ allows args to printed directly ) sge_debug_print(exception_message) time.sleep(5) @@ -296,36 +338,37 @@ def is_job_pending(self, task_id): # Check if the task is in the dictionary first (before running qstat) if task_id in self._task_dictionary: # Trust the cache, only False if state='zombie' - job_is_pending = self._task_dictionary[ - task_id].is_job_state_pending() + job_is_pending = self._task_dictionary[task_id].is_job_state_pending() # Double check pending jobs in case of change (since we don't check at the beginning) if job_is_pending: self._run_qstat( - "checking job pending status {0}".format(task_id), False) - job_is_pending = self._task_dictionary[ - task_id].is_job_state_pending() + "checking job pending status {0}".format(task_id), False + ) + job_is_pending = self._task_dictionary[task_id].is_job_state_pending() else: - self._run_qstat("checking job pending status {0}".format(task_id), - True) + self._run_qstat("checking job pending status {0}".format(task_id), True) if task_id in self._task_dictionary: # Trust the cache, only False if state='zombie' - job_is_pending = self._task_dictionary[ - task_id].is_job_state_pending() + job_is_pending = self._task_dictionary[task_id].is_job_state_pending() else: - sge_debug_print("ERROR: Job {0} not in task list, " - "even after forced qstat!".format(task_id)) + sge_debug_print( + "ERROR: Job {0} not in task list, " + "even after forced qstat!".format(task_id) + ) job_is_pending = False if not job_is_pending: - sge_debug_print( - "DONE! Returning for {0} claiming done!".format(task_id)) + sge_debug_print("DONE! Returning for {0} claiming done!".format(task_id)) if task_id in self._task_dictionary: sge_debug_print( - "NOTE: Adding {0} to OutOfScopeJobs list!".format(task_id)) + "NOTE: Adding {0} to OutOfScopeJobs list!".format(task_id) + ) self._out_of_scope_jobs.append(int(task_id)) self._task_dictionary.pop(task_id) else: - sge_debug_print("ERROR: Job {0} not in task list, " - "but attempted to be removed!".format(task_id)) + sge_debug_print( + "ERROR: Job {0} not in task list, " + "but attempted to be removed!".format(task_id) + ) return job_is_pending @@ -342,7 +385,7 @@ def qsub_sanitize_job_name(testjobname): if testjobname[0].isalpha(): return testjobname else: - return 'J' + testjobname + return "J" + testjobname class SGEPlugin(SGELikeBatchManagerBase): @@ -364,18 +407,18 @@ def __init__(self, **kwargs): """ self._retry_timeout = 2 self._max_tries = 2 - instant_qstat = 'qstat' - cached_qstat = 'qstat' - - if 'plugin_args' in kwargs and kwargs['plugin_args']: - if 'retry_timeout' in kwargs['plugin_args']: - self._retry_timeout = kwargs['plugin_args']['retry_timeout'] - if 'max_tries' in kwargs['plugin_args']: - self._max_tries = kwargs['plugin_args']['max_tries'] - if 'qstatProgramPath' in kwargs['plugin_args']: - instant_qstat = kwargs['plugin_args']['qstatProgramPath'] - if 'qstatCachedProgramPath' in kwargs['plugin_args']: - cached_qstat = kwargs['plugin_args']['qstatCachedProgramPath'] + instant_qstat = "qstat" + cached_qstat = "qstat" + + if "plugin_args" in kwargs and kwargs["plugin_args"]: + if "retry_timeout" in kwargs["plugin_args"]: + self._retry_timeout = kwargs["plugin_args"]["retry_timeout"] + if "max_tries" in kwargs["plugin_args"]: + self._max_tries = kwargs["plugin_args"]["max_tries"] + if "qstatProgramPath" in kwargs["plugin_args"]: + instant_qstat = kwargs["plugin_args"]["qstatProgramPath"] + if "qstatCachedProgramPath" in kwargs["plugin_args"]: + cached_qstat = kwargs["plugin_args"]["qstatCachedProgramPath"] self._refQstatSubstitute = QstatSubstitute(instant_qstat, cached_qstat) super(SGEPlugin, self).__init__(template, **kwargs) @@ -385,36 +428,35 @@ def _is_pending(self, taskid): def _submit_batchtask(self, scriptfile, node): cmd = CommandLine( - 'qsub', + "qsub", environ=dict(os.environ), resource_monitor=False, - terminal_output='allatonce') + terminal_output="allatonce", + ) path = os.path.dirname(scriptfile) - qsubargs = '' + qsubargs = "" if self._qsub_args: qsubargs = self._qsub_args - if 'qsub_args' in node.plugin_args: - if 'overwrite' in node.plugin_args and \ - node.plugin_args['overwrite']: - qsubargs = node.plugin_args['qsub_args'] + if "qsub_args" in node.plugin_args: + if "overwrite" in node.plugin_args and node.plugin_args["overwrite"]: + qsubargs = node.plugin_args["qsub_args"] else: - qsubargs += (" " + node.plugin_args['qsub_args']) - if '-o' not in qsubargs: - qsubargs = '%s -o %s' % (qsubargs, path) - if '-e' not in qsubargs: - qsubargs = '%s -e %s' % (qsubargs, path) + qsubargs += " " + node.plugin_args["qsub_args"] + if "-o" not in qsubargs: + qsubargs = "%s -o %s" % (qsubargs, path) + if "-e" not in qsubargs: + qsubargs = "%s -e %s" % (qsubargs, path) if node._hierarchy: - jobname = '.'.join((dict(os.environ)['LOGNAME'], node._hierarchy, - node._id)) + jobname = ".".join((dict(os.environ)["LOGNAME"], node._hierarchy, node._id)) else: - jobname = '.'.join((dict(os.environ)['LOGNAME'], node._id)) - jobnameitems = jobname.split('.') + jobname = ".".join((dict(os.environ)["LOGNAME"], node._id)) + jobnameitems = jobname.split(".") jobnameitems.reverse() - jobname = '.'.join(jobnameitems) + jobname = ".".join(jobnameitems) jobname = qsub_sanitize_job_name(jobname) - cmd.inputs.args = '%s -N %s %s' % (qsubargs, jobname, scriptfile) + cmd.inputs.args = "%s -N %s %s" % (qsubargs, jobname, scriptfile) oldlevel = iflogger.level - iflogger.setLevel(logging.getLevelName('CRITICAL')) + iflogger.setLevel(logging.getLevelName("CRITICAL")) tries = 0 result = list() while True: @@ -423,23 +465,29 @@ def _submit_batchtask(self, scriptfile, node): except Exception as e: if tries < self._max_tries: tries += 1 - time.sleep( - self._retry_timeout) # sleep 2 seconds and try again. + time.sleep(self._retry_timeout) # sleep 2 seconds and try again. else: iflogger.setLevel(oldlevel) - raise RuntimeError('\n'.join((('Could not submit sge task' - ' for node %s') % node._id, - str(e)))) + raise RuntimeError( + "\n".join( + ( + ("Could not submit sge task" " for node %s") % node._id, + str(e), + ) + ) + ) else: break iflogger.setLevel(oldlevel) # retrieve sge taskid - lines = [line for line in result.runtime.stdout.split('\n') if line] + lines = [line for line in result.runtime.stdout.split("\n") if line] taskid = int( - re.match("Your job ([0-9]*) .* has been submitted", - lines[-1]).groups()[0]) + re.match("Your job ([0-9]*) .* has been submitted", lines[-1]).groups()[0] + ) self._pending[taskid] = node.output_dir() self._refQstatSubstitute.add_startup_job(taskid, cmd.cmdline) - logger.debug('submitted sge task: %d for node %s with %s' % - (taskid, node._id, cmd.cmdline)) + logger.debug( + "submitted sge task: %d for node %s with %s" + % (taskid, node._id, cmd.cmdline) + ) return taskid diff --git a/nipype/pipeline/plugins/sgegraph.py b/nipype/pipeline/plugins/sgegraph.py index 06e5719654..82d1cc0e58 100644 --- a/nipype/pipeline/plugins/sgegraph.py +++ b/nipype/pipeline/plugins/sgegraph.py @@ -5,7 +5,7 @@ import sys from ...interfaces.base import CommandLine -from .base import (GraphPluginBase, logger) +from .base import GraphPluginBase, logger def node_completed_status(checknode): @@ -15,15 +15,15 @@ def node_completed_status(checknode): :return: boolean value True indicates that the node does not need to be run. """ """ TODO: place this in the base.py file and refactor """ - node_state_does_not_require_overwrite = ( - checknode.overwrite is False or - (checknode.overwrite is None and not checknode._interface.always_run)) + node_state_does_not_require_overwrite = checknode.overwrite is False or ( + checknode.overwrite is None and not checknode._interface.always_run + ) hash_exists = False try: hash_exists, _, _, _ = checknode.hash_exists() except Exception: hash_exists = False - return (hash_exists and node_state_does_not_require_overwrite) + return hash_exists and node_state_does_not_require_overwrite class SGEGraphPlugin(GraphPluginBase): @@ -37,6 +37,7 @@ class SGEGraphPlugin(GraphPluginBase): qsub call """ + _template = """ #!/bin/bash #$ -V @@ -44,19 +45,20 @@ class SGEGraphPlugin(GraphPluginBase): """ def __init__(self, **kwargs): - self._qsub_args = '' + self._qsub_args = "" self._dont_resubmit_completed_jobs = False - if 'plugin_args' in kwargs and kwargs['plugin_args']: - plugin_args = kwargs['plugin_args'] - if 'template' in plugin_args: - self._template = plugin_args['template'] + if "plugin_args" in kwargs and kwargs["plugin_args"]: + plugin_args = kwargs["plugin_args"] + if "template" in plugin_args: + self._template = plugin_args["template"] if os.path.isfile(self._template): self._template = open(self._template).read() - if 'qsub_args' in plugin_args: - self._qsub_args = plugin_args['qsub_args'] - if 'dont_resubmit_completed_jobs' in plugin_args: + if "qsub_args" in plugin_args: + self._qsub_args = plugin_args["qsub_args"] + if "dont_resubmit_completed_jobs" in plugin_args: self._dont_resubmit_completed_jobs = plugin_args[ - 'dont_resubmit_completed_jobs'] + "dont_resubmit_completed_jobs" + ] super(SGEGraphPlugin, self).__init__(**kwargs) def _submit_graph(self, pyfiles, dependencies, nodes): @@ -66,17 +68,18 @@ def make_job_name(jobnumber, nodeslist): - nodeslist: The name of the node being processed - return: A string representing this job to be displayed by SGE """ - job_name = 'j{0}_{1}'.format(jobnumber, nodeslist[jobnumber]._id) + job_name = "j{0}_{1}".format(jobnumber, nodeslist[jobnumber]._id) # Condition job_name to be a valid bash identifier (i.e. - is invalid) - job_name = job_name.replace('-', '_').replace('.', '_').replace( - ':', '_') + job_name = job_name.replace("-", "_").replace(".", "_").replace(":", "_") return job_name batch_dir, _ = os.path.split(pyfiles[0]) - submitjobsfile = os.path.join(batch_dir, 'submit_jobs.sh') + submitjobsfile = os.path.join(batch_dir, "submit_jobs.sh") cache_doneness_per_node = dict() - if self._dont_resubmit_completed_jobs: # A future parameter for controlling this behavior could be added here + if ( + self._dont_resubmit_completed_jobs + ): # A future parameter for controlling this behavior could be added here for idx, pyscript in enumerate(pyfiles): node = nodes[idx] node_status_done = node_completed_status(node) @@ -86,73 +89,80 @@ def make_job_name(jobnumber, nodeslist): if node_status_done and idx in dependencies: for child_idx in dependencies[idx]: if child_idx in cache_doneness_per_node: - child_status_done = cache_doneness_per_node[ - child_idx] + child_status_done = cache_doneness_per_node[child_idx] else: - child_status_done = node_completed_status( - nodes[child_idx]) + child_status_done = node_completed_status(nodes[child_idx]) node_status_done = node_status_done and child_status_done cache_doneness_per_node[idx] = node_status_done - with open(submitjobsfile, 'wt') as fp: - fp.writelines('#!/usr/bin/env bash\n') - fp.writelines('# Condense format attempted\n') + with open(submitjobsfile, "wt") as fp: + fp.writelines("#!/usr/bin/env bash\n") + fp.writelines("# Condense format attempted\n") for idx, pyscript in enumerate(pyfiles): node = nodes[idx] if cache_doneness_per_node.get(idx, False): continue else: template, qsub_args = self._get_args( - node, ["template", "qsub_args"]) + node, ["template", "qsub_args"] + ) batch_dir, name = os.path.split(pyscript) - name = '.'.join(name.split('.')[:-1]) - batchscript = '\n'.join( - (template, '%s %s' % (sys.executable, pyscript))) - batchscriptfile = os.path.join(batch_dir, - 'batchscript_%s.sh' % name) - - batchscriptoutfile = batchscriptfile + '.o' - batchscripterrfile = batchscriptfile + '.e' - - with open(batchscriptfile, 'wt') as batchfp: + name = ".".join(name.split(".")[:-1]) + batchscript = "\n".join( + (template, "%s %s" % (sys.executable, pyscript)) + ) + batchscriptfile = os.path.join( + batch_dir, "batchscript_%s.sh" % name + ) + + batchscriptoutfile = batchscriptfile + ".o" + batchscripterrfile = batchscriptfile + ".e" + + with open(batchscriptfile, "wt") as batchfp: batchfp.writelines(batchscript) batchfp.close() - deps = '' + deps = "" if idx in dependencies: - values = ' ' + values = " " for jobid in dependencies[idx]: # Avoid dependancies of done jobs - if not self._dont_resubmit_completed_jobs or not cache_doneness_per_node[jobid]: + if ( + not self._dont_resubmit_completed_jobs + or not cache_doneness_per_node[jobid] + ): values += "${{{0}}},".format( - make_job_name(jobid, nodes)) - if values != ' ': # i.e. if some jobs were added to dependency list - values = values.rstrip(',') - deps = '-hold_jid%s' % values + make_job_name(jobid, nodes) + ) + if ( + values != " " + ): # i.e. if some jobs were added to dependency list + values = values.rstrip(",") + deps = "-hold_jid%s" % values jobname = make_job_name(idx, nodes) # Do not use default output locations if they are set in self._qsub_args - stderrFile = '' - if self._qsub_args.count('-e ') == 0: - stderrFile = '-e {errFile}'.format( - errFile=batchscripterrfile) - stdoutFile = '' - if self._qsub_args.count('-o ') == 0: - stdoutFile = '-o {outFile}'.format( - outFile=batchscriptoutfile) - full_line = '{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk \'{{print $3}}\')\n'.format( + stderrFile = "" + if self._qsub_args.count("-e ") == 0: + stderrFile = "-e {errFile}".format(errFile=batchscripterrfile) + stdoutFile = "" + if self._qsub_args.count("-o ") == 0: + stdoutFile = "-o {outFile}".format(outFile=batchscriptoutfile) + full_line = "{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk '{{print $3}}')\n".format( jobNm=jobname, outFileOption=stdoutFile, errFileOption=stderrFile, extraQSubArgs=qsub_args, dependantIndex=deps, - batchscript=batchscriptfile) + batchscript=batchscriptfile, + ) fp.writelines(full_line) cmd = CommandLine( - 'bash', + "bash", environ=dict(os.environ), resource_monitor=False, - terminal_output='allatonce') - cmd.inputs.args = '%s' % submitjobsfile + terminal_output="allatonce", + ) + cmd.inputs.args = "%s" % submitjobsfile cmd.run() - logger.info('submitted all jobs to queue') + logger.info("submitted all jobs to queue") diff --git a/nipype/pipeline/plugins/slurm.py b/nipype/pipeline/plugins/slurm.py index ffcbc42b63..44cdac70d5 100644 --- a/nipype/pipeline/plugins/slurm.py +++ b/nipype/pipeline/plugins/slurm.py @@ -1,10 +1,10 @@ -''' +""" Created on Aug 2, 2013 @author: chadcumba Parallel workflow execution with SLURM -''' +""" import os import re from time import sleep @@ -13,11 +13,11 @@ from ...interfaces.base import CommandLine from .base import SGELikeBatchManagerBase, logger -iflogger = logging.getLogger('nipype.interface') +iflogger = logging.getLogger("nipype.interface") class SLURMPlugin(SGELikeBatchManagerBase): - ''' + """ Execute using SLURM The plugin_args input to run can be used to control the SLURM execution. @@ -28,7 +28,7 @@ class SLURMPlugin(SGELikeBatchManagerBase): - sbatch_args: arguments to pass prepend to the sbatch call - ''' + """ def __init__(self, **kwargs): @@ -40,42 +40,46 @@ def __init__(self, **kwargs): self._sbatch_args = None self._jobid_re = "Submitted batch job ([0-9]*)" - if 'plugin_args' in kwargs and kwargs['plugin_args']: - if 'retry_timeout' in kwargs['plugin_args']: - self._retry_timeout = kwargs['plugin_args']['retry_timeout'] - if 'max_tries' in kwargs['plugin_args']: - self._max_tries = kwargs['plugin_args']['max_tries'] - if 'jobid_re' in kwargs['plugin_args']: - self._jobid_re = kwargs['plugin_args']['jobid_re'] - if 'template' in kwargs['plugin_args']: - self._template = kwargs['plugin_args']['template'] + if "plugin_args" in kwargs and kwargs["plugin_args"]: + if "retry_timeout" in kwargs["plugin_args"]: + self._retry_timeout = kwargs["plugin_args"]["retry_timeout"] + if "max_tries" in kwargs["plugin_args"]: + self._max_tries = kwargs["plugin_args"]["max_tries"] + if "jobid_re" in kwargs["plugin_args"]: + self._jobid_re = kwargs["plugin_args"]["jobid_re"] + if "template" in kwargs["plugin_args"]: + self._template = kwargs["plugin_args"]["template"] if os.path.isfile(self._template): with open(self._template) as f: self._template = f.read() - if 'sbatch_args' in kwargs['plugin_args']: - self._sbatch_args = kwargs['plugin_args']['sbatch_args'] + if "sbatch_args" in kwargs["plugin_args"]: + self._sbatch_args = kwargs["plugin_args"]["sbatch_args"] self._pending = {} super(SLURMPlugin, self).__init__(self._template, **kwargs) def _is_pending(self, taskid): try: res = CommandLine( - 'squeue', - args=' '.join(['-j', '%s' % taskid]), + "squeue", + args=" ".join(["-j", "%s" % taskid]), resource_monitor=False, - terminal_output='allatonce').run() + terminal_output="allatonce", + ).run() return res.runtime.stdout.find(str(taskid)) > -1 except RuntimeError as e: - if any(ss in str(e) for ss - in ['Socket timed out', 'not available at the moment']): + if any( + ss in str(e) + for ss in ["Socket timed out", "not available at the moment"] + ): # do not raise error and allow recheck logger.warning( "SLURM timeout encountered while checking job status," - " treating job %d as pending", taskid + " treating job %d as pending", + taskid, ) return True - if 'Invalid job id' not in str(e): - raise(e) + if "Invalid job id" not in str(e): + raise (e) return False def _submit_batchtask(self, scriptfile, node): @@ -85,38 +89,35 @@ def _submit_batchtask(self, scriptfile, node): formatting/processing """ cmd = CommandLine( - 'sbatch', + "sbatch", environ=dict(os.environ), resource_monitor=False, - terminal_output='allatonce') + terminal_output="allatonce", + ) path = os.path.dirname(scriptfile) - sbatch_args = '' + sbatch_args = "" if self._sbatch_args: sbatch_args = self._sbatch_args - if 'sbatch_args' in node.plugin_args: - if 'overwrite' in node.plugin_args and\ - node.plugin_args['overwrite']: - sbatch_args = node.plugin_args['sbatch_args'] + if "sbatch_args" in node.plugin_args: + if "overwrite" in node.plugin_args and node.plugin_args["overwrite"]: + sbatch_args = node.plugin_args["sbatch_args"] else: - sbatch_args += (" " + node.plugin_args['sbatch_args']) - if '-o' not in sbatch_args: - sbatch_args = '%s -o %s' % (sbatch_args, - os.path.join(path, 'slurm-%j.out')) - if '-e' not in sbatch_args: - sbatch_args = '%s -e %s' % (sbatch_args, - os.path.join(path, 'slurm-%j.out')) + sbatch_args += " " + node.plugin_args["sbatch_args"] + if "-o" not in sbatch_args: + sbatch_args = "%s -o %s" % (sbatch_args, os.path.join(path, "slurm-%j.out")) + if "-e" not in sbatch_args: + sbatch_args = "%s -e %s" % (sbatch_args, os.path.join(path, "slurm-%j.out")) if node._hierarchy: - jobname = '.'.join((dict(os.environ)['LOGNAME'], node._hierarchy, - node._id)) + jobname = ".".join((dict(os.environ)["LOGNAME"], node._hierarchy, node._id)) else: - jobname = '.'.join((dict(os.environ)['LOGNAME'], node._id)) - jobnameitems = jobname.split('.') + jobname = ".".join((dict(os.environ)["LOGNAME"], node._id)) + jobnameitems = jobname.split(".") jobnameitems.reverse() - jobname = '.'.join(jobnameitems) - cmd.inputs.args = '%s -J %s %s' % (sbatch_args, jobname, scriptfile) + jobname = ".".join(jobnameitems) + cmd.inputs.args = "%s -J %s %s" % (sbatch_args, jobname, scriptfile) oldlevel = iflogger.level - iflogger.setLevel(logging.getLevelName('CRITICAL')) + iflogger.setLevel(logging.getLevelName("CRITICAL")) tries = 0 while True: try: @@ -128,17 +129,22 @@ def _submit_batchtask(self, scriptfile, node): sleep(self._retry_timeout) else: iflogger.setLevel(oldlevel) - raise RuntimeError('\n'.join( - (('Could not submit sbatch task' - ' for node %s') % node._id, str(e)))) + raise RuntimeError( + "\n".join( + ( + ("Could not submit sbatch task" " for node %s") + % node._id, + str(e), + ) + ) + ) else: break - logger.debug('Ran command ({0})'.format(cmd.cmdline)) + logger.debug("Ran command ({0})".format(cmd.cmdline)) iflogger.setLevel(oldlevel) # retrieve taskid - lines = [line for line in result.runtime.stdout.split('\n') if line] + lines = [line for line in result.runtime.stdout.split("\n") if line] taskid = int(re.match(self._jobid_re, lines[-1]).groups()[0]) self._pending[taskid] = node.output_dir() - logger.debug('submitted sbatch task: %d for node %s' % (taskid, - node._id)) + logger.debug("submitted sbatch task: %d for node %s" % (taskid, node._id)) return taskid diff --git a/nipype/pipeline/plugins/slurmgraph.py b/nipype/pipeline/plugins/slurmgraph.py index 1b62177457..9468c76ba1 100644 --- a/nipype/pipeline/plugins/slurmgraph.py +++ b/nipype/pipeline/plugins/slurmgraph.py @@ -5,7 +5,7 @@ import sys from ...interfaces.base import CommandLine -from .base import (GraphPluginBase, logger) +from .base import GraphPluginBase, logger def node_completed_status(checknode): @@ -15,15 +15,15 @@ def node_completed_status(checknode): :return: boolean value True indicates that the node does not need to be run. """ """ TODO: place this in the base.py file and refactor """ - node_state_does_not_require_overwrite = ( - checknode.overwrite is False or - (checknode.overwrite is None and not checknode._interface.always_run)) + node_state_does_not_require_overwrite = checknode.overwrite is False or ( + checknode.overwrite is None and not checknode._interface.always_run + ) hash_exists = False try: hash_exists, _, _, _ = checknode.hash_exists() except Exception: hash_exists = False - return (hash_exists and node_state_does_not_require_overwrite) + return hash_exists and node_state_does_not_require_overwrite class SLURMGraphPlugin(GraphPluginBase): @@ -37,24 +37,26 @@ class SLURMGraphPlugin(GraphPluginBase): qsub call """ + _template = "#!/bin/bash" def __init__(self, **kwargs): - self._sbatch_args = '' - if 'plugin_args' in kwargs and kwargs['plugin_args']: - if 'retry_timeout' in kwargs['plugin_args']: - self._retry_timeout = kwargs['plugin_args']['retry_timeout'] - if 'max_tries' in kwargs['plugin_args']: - self._max_tries = kwargs['plugin_args']['max_tries'] - if 'template' in kwargs['plugin_args']: - self._template = kwargs['plugin_args']['template'] + self._sbatch_args = "" + if "plugin_args" in kwargs and kwargs["plugin_args"]: + if "retry_timeout" in kwargs["plugin_args"]: + self._retry_timeout = kwargs["plugin_args"]["retry_timeout"] + if "max_tries" in kwargs["plugin_args"]: + self._max_tries = kwargs["plugin_args"]["max_tries"] + if "template" in kwargs["plugin_args"]: + self._template = kwargs["plugin_args"]["template"] if os.path.isfile(self._template): self._template = open(self._template).read() - if 'sbatch_args' in kwargs['plugin_args']: - self._sbatch_args = kwargs['plugin_args']['sbatch_args'] - if 'dont_resubmit_completed_jobs' in kwargs['plugin_args']: - self._dont_resubmit_completed_jobs = kwargs['plugin_args'][ - 'dont_resubmit_completed_jobs'] + if "sbatch_args" in kwargs["plugin_args"]: + self._sbatch_args = kwargs["plugin_args"]["sbatch_args"] + if "dont_resubmit_completed_jobs" in kwargs["plugin_args"]: + self._dont_resubmit_completed_jobs = kwargs["plugin_args"][ + "dont_resubmit_completed_jobs" + ] else: self._dont_resubmit_completed_jobs = False super(SLURMGraphPlugin, self).__init__(**kwargs) @@ -66,17 +68,18 @@ def make_job_name(jobnumber, nodeslist): - nodeslist: The name of the node being processed - return: A string representing this job to be displayed by SLURM """ - job_name = 'j{0}_{1}'.format(jobnumber, nodeslist[jobnumber]._id) + job_name = "j{0}_{1}".format(jobnumber, nodeslist[jobnumber]._id) # Condition job_name to be a valid bash identifier (i.e. - is invalid) - job_name = job_name.replace('-', '_').replace('.', '_').replace( - ':', '_') + job_name = job_name.replace("-", "_").replace(".", "_").replace(":", "_") return job_name batch_dir, _ = os.path.split(pyfiles[0]) - submitjobsfile = os.path.join(batch_dir, 'submit_jobs.sh') + submitjobsfile = os.path.join(batch_dir, "submit_jobs.sh") cache_doneness_per_node = dict() - if self._dont_resubmit_completed_jobs: # A future parameter for controlling this behavior could be added here + if ( + self._dont_resubmit_completed_jobs + ): # A future parameter for controlling this behavior could be added here for idx, pyscript in enumerate(pyfiles): node = nodes[idx] node_status_done = node_completed_status(node) @@ -86,73 +89,80 @@ def make_job_name(jobnumber, nodeslist): if node_status_done and idx in dependencies: for child_idx in dependencies[idx]: if child_idx in cache_doneness_per_node: - child_status_done = cache_doneness_per_node[ - child_idx] + child_status_done = cache_doneness_per_node[child_idx] else: - child_status_done = node_completed_status( - nodes[child_idx]) + child_status_done = node_completed_status(nodes[child_idx]) node_status_done = node_status_done and child_status_done cache_doneness_per_node[idx] = node_status_done - with open(submitjobsfile, 'wt') as fp: - fp.writelines('#!/usr/bin/env bash\n') - fp.writelines('# Condense format attempted\n') + with open(submitjobsfile, "wt") as fp: + fp.writelines("#!/usr/bin/env bash\n") + fp.writelines("# Condense format attempted\n") for idx, pyscript in enumerate(pyfiles): node = nodes[idx] if cache_doneness_per_node.get(idx, False): continue else: template, sbatch_args = self._get_args( - node, ["template", "sbatch_args"]) + node, ["template", "sbatch_args"] + ) batch_dir, name = os.path.split(pyscript) - name = '.'.join(name.split('.')[:-1]) - batchscript = '\n'.join( - (template, '%s %s' % (sys.executable, pyscript))) - batchscriptfile = os.path.join(batch_dir, - 'batchscript_%s.sh' % name) - - batchscriptoutfile = batchscriptfile + '.o' - batchscripterrfile = batchscriptfile + '.e' - - with open(batchscriptfile, 'wt') as batchfp: + name = ".".join(name.split(".")[:-1]) + batchscript = "\n".join( + (template, "%s %s" % (sys.executable, pyscript)) + ) + batchscriptfile = os.path.join( + batch_dir, "batchscript_%s.sh" % name + ) + + batchscriptoutfile = batchscriptfile + ".o" + batchscripterrfile = batchscriptfile + ".e" + + with open(batchscriptfile, "wt") as batchfp: batchfp.writelines(batchscript) batchfp.close() - deps = '' + deps = "" if idx in dependencies: - values = '' + values = "" for jobid in dependencies[idx]: # Avoid dependancies of done jobs - if not self._dont_resubmit_completed_jobs or not cache_doneness_per_node[jobid]: + if ( + not self._dont_resubmit_completed_jobs + or not cache_doneness_per_node[jobid] + ): values += "${{{0}}}:".format( - make_job_name(jobid, nodes)) - if values != '': # i.e. if some jobs were added to dependency list - values = values.rstrip(':') - deps = '--dependency=afterok:%s' % values + make_job_name(jobid, nodes) + ) + if ( + values != "" + ): # i.e. if some jobs were added to dependency list + values = values.rstrip(":") + deps = "--dependency=afterok:%s" % values jobname = make_job_name(idx, nodes) # Do not use default output locations if they are set in self._sbatch_args - stderrFile = '' - if self._sbatch_args.count('-e ') == 0: - stderrFile = '-e {errFile}'.format( - errFile=batchscripterrfile) - stdoutFile = '' - if self._sbatch_args.count('-o ') == 0: - stdoutFile = '-o {outFile}'.format( - outFile=batchscriptoutfile) - full_line = '{jobNm}=$(sbatch {outFileOption} {errFileOption} {extraSBatchArgs} {dependantIndex} -J {jobNm} {batchscript} | awk \'/^Submitted/ {{print $4}}\')\n'.format( + stderrFile = "" + if self._sbatch_args.count("-e ") == 0: + stderrFile = "-e {errFile}".format(errFile=batchscripterrfile) + stdoutFile = "" + if self._sbatch_args.count("-o ") == 0: + stdoutFile = "-o {outFile}".format(outFile=batchscriptoutfile) + full_line = "{jobNm}=$(sbatch {outFileOption} {errFileOption} {extraSBatchArgs} {dependantIndex} -J {jobNm} {batchscript} | awk '/^Submitted/ {{print $4}}')\n".format( jobNm=jobname, outFileOption=stdoutFile, errFileOption=stderrFile, extraSBatchArgs=sbatch_args, dependantIndex=deps, - batchscript=batchscriptfile) + batchscript=batchscriptfile, + ) fp.writelines(full_line) cmd = CommandLine( - 'bash', + "bash", environ=dict(os.environ), resource_monitor=False, - terminal_output='allatonce') - cmd.inputs.args = '%s' % submitjobsfile + terminal_output="allatonce", + ) + cmd.inputs.args = "%s" % submitjobsfile cmd.run() - logger.info('submitted all jobs to queue') + logger.info("submitted all jobs to queue") diff --git a/nipype/pipeline/plugins/somaflow.py b/nipype/pipeline/plugins/somaflow.py index e31a901169..d621c7967a 100644 --- a/nipype/pipeline/plugins/somaflow.py +++ b/nipype/pipeline/plugins/somaflow.py @@ -5,12 +5,11 @@ import os import sys -from .base import (GraphPluginBase, logger) +from .base import GraphPluginBase, logger soma_not_loaded = False try: - from soma.workflow.client import (Job, Workflow, WorkflowController, - Helper) + from soma.workflow.client import Job, Workflow, WorkflowController, Helper except: soma_not_loaded = True @@ -21,7 +20,7 @@ class SomaFlowPlugin(GraphPluginBase): def __init__(self, plugin_args=None): if soma_not_loaded: - raise ImportError('SomaFlow could not be imported') + raise ImportError("SomaFlow could not be imported") super(SomaFlowPlugin, self).__init__(plugin_args=plugin_args) def _submit_graph(self, pyfiles, dependencies, nodes): @@ -35,9 +34,9 @@ def _submit_graph(self, pyfiles, dependencies, nodes): soma_deps.append((jobs[val], jobs[key])) wf = Workflow(jobs, soma_deps) - logger.info('serializing workflow') - Helper.serialize('workflow', wf) + logger.info("serializing workflow") + Helper.serialize("workflow", wf) controller = WorkflowController() - logger.info('submitting workflow') + logger.info("submitting workflow") wf_id = controller.submit_workflow(wf) Helper.wait_workflow(wf_id, controller) diff --git a/nipype/pipeline/plugins/tests/test_base.py b/nipype/pipeline/plugins/tests/test_base.py index cdc55b668b..fddcfa2368 100644 --- a/nipype/pipeline/plugins/tests/test_base.py +++ b/nipype/pipeline/plugins/tests/test_base.py @@ -14,7 +14,7 @@ def test_scipy_sparse(): assert foo[0, 1] == 0 -''' +""" Can use the following code to test that a mapnode crash continues successfully Need to put this into a unit-test with a timeout @@ -38,4 +38,4 @@ def func(arg1): wf.base_dir = '/tmp' wf.run(plugin='MultiProc') -''' +""" diff --git a/nipype/pipeline/plugins/tests/test_callback.py b/nipype/pipeline/plugins/tests/test_callback.py index b7eb827b74..8baa356fdd 100644 --- a/nipype/pipeline/plugins/tests/test_callback.py +++ b/nipype/pipeline/plugins/tests/test_callback.py @@ -25,42 +25,39 @@ def callback(self, node, status, result=None): self.statuses.append((node.name, status)) -@pytest.mark.parametrize("plugin", ['Linear', 'MultiProc', 'LegacyMultiProc']) +@pytest.mark.parametrize("plugin", ["Linear", "MultiProc", "LegacyMultiProc"]) def test_callback_normal(tmpdir, plugin): tmpdir.chdir() so = Status() - wf = pe.Workflow(name='test', base_dir=tmpdir.strpath) + wf = pe.Workflow(name="test", base_dir=tmpdir.strpath) f_node = pe.Node( - niu.Function(function=func, input_names=[], output_names=[]), - name='f_node') + niu.Function(function=func, input_names=[], output_names=[]), name="f_node" + ) wf.add_nodes([f_node]) - wf.config['execution'] = { - 'crashdump_dir': wf.base_dir, - 'poll_sleep_duration': 2 - } - wf.run(plugin=plugin, plugin_args={'status_callback': so.callback}) - assert so.statuses == [('f_node', 'start'), ('f_node', 'end')] + wf.config["execution"] = {"crashdump_dir": wf.base_dir, "poll_sleep_duration": 2} + wf.run(plugin=plugin, plugin_args={"status_callback": so.callback}) + assert so.statuses == [("f_node", "start"), ("f_node", "end")] -@pytest.mark.parametrize("plugin", ['Linear', 'MultiProc', 'LegacyMultiProc']) +@pytest.mark.parametrize("plugin", ["Linear", "MultiProc", "LegacyMultiProc"]) @pytest.mark.parametrize("stop_on_first_crash", [False, True]) def test_callback_exception(tmpdir, plugin, stop_on_first_crash): tmpdir.chdir() so = Status() - wf = pe.Workflow(name='test', base_dir=tmpdir.strpath) + wf = pe.Workflow(name="test", base_dir=tmpdir.strpath) f_node = pe.Node( - niu.Function(function=bad_func, input_names=[], output_names=[]), - name='f_node') + niu.Function(function=bad_func, input_names=[], output_names=[]), name="f_node" + ) wf.add_nodes([f_node]) - wf.config['execution'] = { - 'crashdump_dir': wf.base_dir, - 'stop_on_first_crash': stop_on_first_crash, - 'poll_sleep_duration': 2 + wf.config["execution"] = { + "crashdump_dir": wf.base_dir, + "stop_on_first_crash": stop_on_first_crash, + "poll_sleep_duration": 2, } with pytest.raises(Exception): - wf.run(plugin=plugin, plugin_args={'status_callback': so.callback}) + wf.run(plugin=plugin, plugin_args={"status_callback": so.callback}) sleep(0.5) # Wait for callback to be called (python 2.7) - assert so.statuses == [('f_node', 'start'), ('f_node', 'exception')] + assert so.statuses == [("f_node", "start"), ("f_node", "exception")] diff --git a/nipype/pipeline/plugins/tests/test_debug.py b/nipype/pipeline/plugins/tests/test_debug.py index bd06ecb775..82361a0228 100644 --- a/nipype/pipeline/plugins/tests/test_debug.py +++ b/nipype/pipeline/plugins/tests/test_debug.py @@ -7,12 +7,12 @@ class InputSpec(nib.TraitedSpec): - input1 = nib.traits.Int(desc='a random int') - input2 = nib.traits.Int(desc='a random int') + input1 = nib.traits.Int(desc="a random int") + input2 = nib.traits.Int(desc="a random int") class OutputSpec(nib.TraitedSpec): - output1 = nib.traits.List(nib.traits.Int, desc='outputs') + output1 = nib.traits.List(nib.traits.Int, desc="outputs") class DebugTestInterface(nib.BaseInterface): @@ -25,7 +25,7 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['output1'] = [1, self.inputs.input1] + outputs["output1"] = [1, self.inputs.input1] return outputs @@ -36,11 +36,11 @@ def callme(node, graph): def test_debug(tmpdir): tmpdir.chdir() - pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(DebugTestInterface(), name='mod1') - mod2 = pe.MapNode(DebugTestInterface(), iterfield=['input1'], name='mod2') + pipe = pe.Workflow(name="pipe") + mod1 = pe.Node(DebugTestInterface(), name="mod1") + mod2 = pe.MapNode(DebugTestInterface(), iterfield=["input1"], name="mod2") - pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe.connect([(mod1, mod2, [("output1", "input1")])]) pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 @@ -50,8 +50,8 @@ def test_debug(tmpdir): exc = None try: - pipe.run(plugin="Debug", plugin_args={'callable': callme}) + pipe.run(plugin="Debug", plugin_args={"callable": callme}) except Exception as e: exc = e - assert exc is None, 'unexpected exception caught' + assert exc is None, "unexpected exception caught" diff --git a/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py b/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py index 3c4e673f55..f490729485 100644 --- a/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py +++ b/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py @@ -15,9 +15,9 @@ def mytestFunction(insum=0): - ''' + """ Run a multiprocessing job and spawn child processes. - ''' + """ # need to import here since this is executed as an external process import multiprocessing @@ -37,16 +37,16 @@ def mytestFunction(insum=0): f = [None] * numberOfThreads def dummyFunction(filename): - ''' + """ This function writes the value 45 to the given filename. - ''' + """ j = 0 for i in range(0, 10): j += i # j is now 45 (0+1+2+3+4+5+6+7+8+9) - with open(filename, 'w') as f: + with open(filename, "w") as f: f.write(str(j)) for n in range(numberOfThreads): @@ -55,9 +55,9 @@ def dummyFunction(filename): a[n] = True # create a temp file to use as the data exchange container - tmpFile = tempfile.mkstemp('.txt', 'test_engine_')[1] + tmpFile = tempfile.mkstemp(".txt", "test_engine_")[1] f[n] = tmpFile # keep track of the temp file - t[n] = multiprocessing.Process(target=dummyFunction, args=(tmpFile, )) + t[n] = multiprocessing.Process(target=dummyFunction, args=(tmpFile,)) # fire up the job t[n].start() @@ -88,65 +88,61 @@ def dummyFunction(filename): def run_multiproc_nondaemon_with_flag(nondaemon_flag): - ''' + """ Start a pipe with two nodes using the resource multiproc plugin and passing the nondaemon_flag. - ''' + """ cur_dir = os.getcwd() - temp_dir = mkdtemp(prefix='test_engine_') + temp_dir = mkdtemp(prefix="test_engine_") os.chdir(temp_dir) - pipe = pe.Workflow(name='pipe') + pipe = pe.Workflow(name="pipe") f1 = pe.Node( interface=Function( - function=mytestFunction, - input_names=['insum'], - output_names=['sum_out']), - name='f1') + function=mytestFunction, input_names=["insum"], output_names=["sum_out"] + ), + name="f1", + ) f2 = pe.Node( interface=Function( - function=mytestFunction, - input_names=['insum'], - output_names=['sum_out']), - name='f2') + function=mytestFunction, input_names=["insum"], output_names=["sum_out"] + ), + name="f2", + ) - pipe.connect([(f1, f2, [('sum_out', 'insum')])]) + pipe.connect([(f1, f2, [("sum_out", "insum")])]) pipe.base_dir = os.getcwd() f1.inputs.insum = 0 - pipe.config['execution']['stop_on_first_crash'] = True + pipe.config["execution"]["stop_on_first_crash"] = True # execute the pipe using the LegacyMultiProc plugin with 2 processes and the # non_daemon flag to enable child processes which start other # multiprocessing jobs execgraph = pipe.run( plugin="LegacyMultiProc", - plugin_args={ - 'n_procs': 2, - 'non_daemon': nondaemon_flag - }) - - names = [ - '.'.join((node._hierarchy, node.name)) for node in execgraph.nodes() - ] - node = list(execgraph.nodes())[names.index('pipe.f2')] - result = node.get_output('sum_out') + plugin_args={"n_procs": 2, "non_daemon": nondaemon_flag}, + ) + + names = [".".join((node._hierarchy, node.name)) for node in execgraph.nodes()] + node = list(execgraph.nodes())[names.index("pipe.f2")] + result = node.get_output("sum_out") os.chdir(cur_dir) rmtree(temp_dir) return result def test_run_multiproc_nondaemon_false(): - ''' + """ This is the entry point for the test. Two times a pipe of several multiprocessing jobs gets executed. First, without the nondaemon flag. Second, with the nondaemon flag. Since the processes of the pipe start child processes, the execution only succeeds when the non_daemon flag is on. - ''' + """ shouldHaveFailed = False try: # with nondaemon_flag = False, the execution should fail diff --git a/nipype/pipeline/plugins/tests/test_linear.py b/nipype/pipeline/plugins/tests/test_linear.py index 6484432baa..9ccb5157fc 100644 --- a/nipype/pipeline/plugins/tests/test_linear.py +++ b/nipype/pipeline/plugins/tests/test_linear.py @@ -6,12 +6,12 @@ class InputSpec(nib.TraitedSpec): - input1 = nib.traits.Int(desc='a random int') - input2 = nib.traits.Int(desc='a random int') + input1 = nib.traits.Int(desc="a random int") + input2 = nib.traits.Int(desc="a random int") class OutputSpec(nib.TraitedSpec): - output1 = nib.traits.List(nib.traits.Int, desc='outputs') + output1 = nib.traits.List(nib.traits.Int, desc="outputs") class LinearTestInterface(nib.BaseInterface): @@ -24,24 +24,23 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['output1'] = [1, self.inputs.input1] + outputs["output1"] = [1, self.inputs.input1] return outputs def test_run_in_series(tmpdir): tmpdir.chdir() - pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(interface=LinearTestInterface(), name='mod1') + pipe = pe.Workflow(name="pipe") + mod1 = pe.Node(interface=LinearTestInterface(), name="mod1") mod2 = pe.MapNode( - interface=LinearTestInterface(), iterfield=['input1'], name='mod2') - pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + interface=LinearTestInterface(), iterfield=["input1"], name="mod2" + ) + pipe.connect([(mod1, mod2, [("output1", "input1")])]) pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 execgraph = pipe.run(plugin="Linear") - names = [ - '.'.join((node._hierarchy, node.name)) for node in execgraph.nodes() - ] - node = list(execgraph.nodes())[names.index('pipe.mod1')] - result = node.get_output('output1') + names = [".".join((node._hierarchy, node.name)) for node in execgraph.nodes()] + node = list(execgraph.nodes())[names.index("pipe.mod1")] + result = node.get_output("output1") assert result == [1, 1] diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index 7ba9001c39..2e8967cfbe 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -11,12 +11,12 @@ class InputSpec(nib.TraitedSpec): - input1 = nib.traits.Int(desc='a random int') - input2 = nib.traits.Int(desc='a random int') + input1 = nib.traits.Int(desc="a random int") + input2 = nib.traits.Int(desc="a random int") class OutputSpec(nib.TraitedSpec): - output1 = nib.traits.List(nib.traits.Int, desc='outputs') + output1 = nib.traits.List(nib.traits.Int, desc="outputs") class MultiprocTestInterface(nib.BaseInterface): @@ -29,35 +29,34 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['output1'] = [1, self.inputs.input1] + outputs["output1"] = [1, self.inputs.input1] return outputs def test_run_multiproc(tmpdir): tmpdir.chdir() - pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(MultiprocTestInterface(), name='mod1') - mod2 = pe.MapNode( - MultiprocTestInterface(), iterfield=['input1'], name='mod2') - pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe = pe.Workflow(name="pipe") + mod1 = pe.Node(MultiprocTestInterface(), name="mod1") + mod2 = pe.MapNode(MultiprocTestInterface(), iterfield=["input1"], name="mod2") + pipe.connect([(mod1, mod2, [("output1", "input1")])]) pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 - pipe.config['execution']['poll_sleep_duration'] = 2 + pipe.config["execution"]["poll_sleep_duration"] = 2 execgraph = pipe.run(plugin="MultiProc") names = [node.fullname for node in execgraph.nodes()] - node = list(execgraph.nodes())[names.index('pipe.mod1')] - result = node.get_output('output1') + node = list(execgraph.nodes())[names.index("pipe.mod1")] + result = node.get_output("output1") assert result == [1, 1] class InputSpecSingleNode(nib.TraitedSpec): - input1 = nib.traits.Int(desc='a random int') - input2 = nib.traits.Int(desc='a random int') + input1 = nib.traits.Int(desc="a random int") + input2 = nib.traits.Int(desc="a random int") class OutputSpecSingleNode(nib.TraitedSpec): - output1 = nib.traits.Int(desc='a random int') + output1 = nib.traits.Int(desc="a random int") class SingleNodeTestInterface(nib.BaseInterface): @@ -70,68 +69,65 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['output1'] = self.inputs.input1 + outputs["output1"] = self.inputs.input1 return outputs def test_no_more_memory_than_specified(tmpdir): tmpdir.chdir() - pipe = pe.Workflow(name='pipe') - n1 = pe.Node(SingleNodeTestInterface(), name='n1', mem_gb=1) - n2 = pe.Node(SingleNodeTestInterface(), name='n2', mem_gb=1) - n3 = pe.Node(SingleNodeTestInterface(), name='n3', mem_gb=1) - n4 = pe.Node(SingleNodeTestInterface(), name='n4', mem_gb=1) - - pipe.connect(n1, 'output1', n2, 'input1') - pipe.connect(n1, 'output1', n3, 'input1') - pipe.connect(n2, 'output1', n4, 'input1') - pipe.connect(n3, 'output1', n4, 'input2') + pipe = pe.Workflow(name="pipe") + n1 = pe.Node(SingleNodeTestInterface(), name="n1", mem_gb=1) + n2 = pe.Node(SingleNodeTestInterface(), name="n2", mem_gb=1) + n3 = pe.Node(SingleNodeTestInterface(), name="n3", mem_gb=1) + n4 = pe.Node(SingleNodeTestInterface(), name="n4", mem_gb=1) + + pipe.connect(n1, "output1", n2, "input1") + pipe.connect(n1, "output1", n3, "input1") + pipe.connect(n2, "output1", n4, "input1") + pipe.connect(n3, "output1", n4, "input2") n1.inputs.input1 = 1 max_memory = 0.5 with pytest.raises(RuntimeError): pipe.run( - plugin='MultiProc', - plugin_args={ - 'memory_gb': max_memory, - 'n_procs': 2 - }) + plugin="MultiProc", plugin_args={"memory_gb": max_memory, "n_procs": 2} + ) def test_no_more_threads_than_specified(tmpdir): tmpdir.chdir() - pipe = pe.Workflow(name='pipe') - n1 = pe.Node(SingleNodeTestInterface(), name='n1', n_procs=2) - n2 = pe.Node(SingleNodeTestInterface(), name='n2', n_procs=2) - n3 = pe.Node(SingleNodeTestInterface(), name='n3', n_procs=4) - n4 = pe.Node(SingleNodeTestInterface(), name='n4', n_procs=2) + pipe = pe.Workflow(name="pipe") + n1 = pe.Node(SingleNodeTestInterface(), name="n1", n_procs=2) + n2 = pe.Node(SingleNodeTestInterface(), name="n2", n_procs=2) + n3 = pe.Node(SingleNodeTestInterface(), name="n3", n_procs=4) + n4 = pe.Node(SingleNodeTestInterface(), name="n4", n_procs=2) - pipe.connect(n1, 'output1', n2, 'input1') - pipe.connect(n1, 'output1', n3, 'input1') - pipe.connect(n2, 'output1', n4, 'input1') - pipe.connect(n3, 'output1', n4, 'input2') + pipe.connect(n1, "output1", n2, "input1") + pipe.connect(n1, "output1", n3, "input1") + pipe.connect(n2, "output1", n4, "input1") + pipe.connect(n3, "output1", n4, "input2") n1.inputs.input1 = 4 max_threads = 2 with pytest.raises(RuntimeError): - pipe.run(plugin='MultiProc', plugin_args={'n_procs': max_threads}) + pipe.run(plugin="MultiProc", plugin_args={"n_procs": max_threads}) def test_hold_job_until_procs_available(tmpdir): tmpdir.chdir() - pipe = pe.Workflow(name='pipe') - n1 = pe.Node(SingleNodeTestInterface(), name='n1', n_procs=2) - n2 = pe.Node(SingleNodeTestInterface(), name='n2', n_procs=2) - n3 = pe.Node(SingleNodeTestInterface(), name='n3', n_procs=2) - n4 = pe.Node(SingleNodeTestInterface(), name='n4', n_procs=2) + pipe = pe.Workflow(name="pipe") + n1 = pe.Node(SingleNodeTestInterface(), name="n1", n_procs=2) + n2 = pe.Node(SingleNodeTestInterface(), name="n2", n_procs=2) + n3 = pe.Node(SingleNodeTestInterface(), name="n3", n_procs=2) + n4 = pe.Node(SingleNodeTestInterface(), name="n4", n_procs=2) - pipe.connect(n1, 'output1', n2, 'input1') - pipe.connect(n1, 'output1', n3, 'input1') - pipe.connect(n2, 'output1', n4, 'input1') - pipe.connect(n3, 'output1', n4, 'input2') + pipe.connect(n1, "output1", n2, "input1") + pipe.connect(n1, "output1", n3, "input1") + pipe.connect(n2, "output1", n4, "input1") + pipe.connect(n3, "output1", n4, "input2") n1.inputs.input1 = 4 max_threads = 2 - pipe.run(plugin='MultiProc', plugin_args={'n_procs': max_threads}) + pipe.run(plugin="MultiProc", plugin_args={"n_procs": max_threads}) diff --git a/nipype/pipeline/plugins/tests/test_oar.py b/nipype/pipeline/plugins/tests/test_oar.py index fd4f0b950c..1024daaef9 100644 --- a/nipype/pipeline/plugins/tests/test_oar.py +++ b/nipype/pipeline/plugins/tests/test_oar.py @@ -9,12 +9,12 @@ class InputSpec(nib.TraitedSpec): - input1 = nib.traits.Int(desc='a random int') - input2 = nib.traits.Int(desc='a random int') + input1 = nib.traits.Int(desc="a random int") + input2 = nib.traits.Int(desc="a random int") class OutputSpec(nib.TraitedSpec): - output1 = nib.traits.List(nib.traits.Int, desc='outputs') + output1 = nib.traits.List(nib.traits.Int, desc="outputs") class OarTestInterface(nib.BaseInterface): @@ -27,29 +27,26 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['output1'] = [1, self.inputs.input1] + outputs["output1"] = [1, self.inputs.input1] return outputs @pytest.mark.xfail(reason="not known") def test_run_oar(): cur_dir = os.getcwd() - temp_dir = mkdtemp(prefix='test_engine_', dir=os.getcwd()) + temp_dir = mkdtemp(prefix="test_engine_", dir=os.getcwd()) os.chdir(temp_dir) - pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(interface=OarTestInterface(), name='mod1') - mod2 = pe.MapNode( - interface=OarTestInterface(), iterfield=['input1'], name='mod2') - pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe = pe.Workflow(name="pipe") + mod1 = pe.Node(interface=OarTestInterface(), name="mod1") + mod2 = pe.MapNode(interface=OarTestInterface(), iterfield=["input1"], name="mod2") + pipe.connect([(mod1, mod2, [("output1", "input1")])]) pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 execgraph = pipe.run(plugin="OAR") - names = [ - '.'.join((node._hierarchy, node.name)) for node in execgraph.nodes() - ] - node = list(execgraph.nodes())[names.index('pipe.mod1')] - result = node.get_output('output1') + names = [".".join((node._hierarchy, node.name)) for node in execgraph.nodes()] + node = list(execgraph.nodes())[names.index("pipe.mod1")] + result = node.get_output("output1") assert result == [1, 1] os.chdir(cur_dir) rmtree(temp_dir) diff --git a/nipype/pipeline/plugins/tests/test_pbs.py b/nipype/pipeline/plugins/tests/test_pbs.py index f6aa6c88e0..bb85443940 100644 --- a/nipype/pipeline/plugins/tests/test_pbs.py +++ b/nipype/pipeline/plugins/tests/test_pbs.py @@ -10,12 +10,12 @@ class InputSpec(nib.TraitedSpec): - input1 = nib.traits.Int(desc='a random int') - input2 = nib.traits.Int(desc='a random int') + input1 = nib.traits.Int(desc="a random int") + input2 = nib.traits.Int(desc="a random int") class OutputSpec(nib.TraitedSpec): - output1 = nib.traits.List(nib.traits.Int, desc='outputs') + output1 = nib.traits.List(nib.traits.Int, desc="outputs") class PbsTestInterface(nib.BaseInterface): @@ -28,29 +28,26 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['output1'] = [1, self.inputs.input1] + outputs["output1"] = [1, self.inputs.input1] return outputs @pytest.mark.xfail(reason="not known") def test_run_pbsgraph(): cur_dir = os.getcwd() - temp_dir = mkdtemp(prefix='test_engine_') + temp_dir = mkdtemp(prefix="test_engine_") os.chdir(temp_dir) - pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(interface=PbsTestInterface(), name='mod1') - mod2 = pe.MapNode( - interface=PbsTestInterface(), iterfield=['input1'], name='mod2') - pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe = pe.Workflow(name="pipe") + mod1 = pe.Node(interface=PbsTestInterface(), name="mod1") + mod2 = pe.MapNode(interface=PbsTestInterface(), iterfield=["input1"], name="mod2") + pipe.connect([(mod1, mod2, [("output1", "input1")])]) pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 execgraph = pipe.run(plugin="PBSGraph") - names = [ - '.'.join((node._hierarchy, node.name)) for node in execgraph.nodes() - ] - node = list(execgraph.nodes())[names.index('pipe.mod1')] - result = node.get_output('output1') + names = [".".join((node._hierarchy, node.name)) for node in execgraph.nodes()] + node = list(execgraph.nodes())[names.index("pipe.mod1")] + result = node.get_output("output1") assert result == [1, 1] os.chdir(cur_dir) rmtree(temp_dir) diff --git a/nipype/pipeline/plugins/tests/test_somaflow.py b/nipype/pipeline/plugins/tests/test_somaflow.py index 68cefcdc17..5fe5935e1d 100644 --- a/nipype/pipeline/plugins/tests/test_somaflow.py +++ b/nipype/pipeline/plugins/tests/test_somaflow.py @@ -10,12 +10,12 @@ class InputSpec(nib.TraitedSpec): - input1 = nib.traits.Int(desc='a random int') - input2 = nib.traits.Int(desc='a random int') + input1 = nib.traits.Int(desc="a random int") + input2 = nib.traits.Int(desc="a random int") class OutputSpec(nib.TraitedSpec): - output1 = nib.traits.List(nib.traits.Int, desc='outputs') + output1 = nib.traits.List(nib.traits.Int, desc="outputs") class SomaTestInterface(nib.BaseInterface): @@ -28,7 +28,7 @@ def _run_interface(self, runtime): def _list_outputs(self): outputs = self._outputs().get() - outputs['output1'] = [1, self.inputs.input1] + outputs["output1"] = [1, self.inputs.input1] return outputs @@ -36,17 +36,14 @@ def _list_outputs(self): def test_run_somaflow(tmpdir): tmpdir.chdir() - pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(interface=SomaTestInterface(), name='mod1') - mod2 = pe.MapNode( - interface=SomaTestInterface(), iterfield=['input1'], name='mod2') - pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe = pe.Workflow(name="pipe") + mod1 = pe.Node(interface=SomaTestInterface(), name="mod1") + mod2 = pe.MapNode(interface=SomaTestInterface(), iterfield=["input1"], name="mod2") + pipe.connect([(mod1, mod2, [("output1", "input1")])]) pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 execgraph = pipe.run(plugin="SomaFlow") - names = [ - '.'.join((node._hierarchy, node.name)) for node in execgraph.nodes() - ] - node = list(execgraph.nodes())[names.index('pipe.mod1')] - result = node.get_output('output1') + names = [".".join((node._hierarchy, node.name)) for node in execgraph.nodes()] + node = list(execgraph.nodes())[names.index("pipe.mod1")] + result = node.get_output("output1") assert result == [1, 1] diff --git a/nipype/pipeline/plugins/tests/test_tools.py b/nipype/pipeline/plugins/tests/test_tools.py index ee9b2e4de7..e34c937fc1 100644 --- a/nipype/pipeline/plugins/tests/test_tools.py +++ b/nipype/pipeline/plugins/tests/test_tools.py @@ -13,30 +13,28 @@ def test_report_crash(): - with mock.patch('pickle.dump', mock.MagicMock()) as mock_pickle_dump: - with mock.patch('nipype.pipeline.plugins.tools.format_exception', - mock.MagicMock()): # see iss 1517 + with mock.patch("pickle.dump", mock.MagicMock()) as mock_pickle_dump: + with mock.patch( + "nipype.pipeline.plugins.tools.format_exception", mock.MagicMock() + ): # see iss 1517 mock_pickle_dump.return_value = True - mock_node = mock.MagicMock(name='mock_node') - mock_node._id = 'an_id' + mock_node = mock.MagicMock(name="mock_node") + mock_node._id = "an_id" mock_node.config = { - 'execution': { - 'crashdump_dir': '.', - 'crashfile_format': 'pklz', - } + "execution": {"crashdump_dir": ".", "crashfile_format": "pklz",} } actual_crashfile = report_crash(mock_node) - expected_crashfile = re.compile( - '.*/crash-.*-an_id-[0-9a-f\-]*.pklz') + expected_crashfile = re.compile(".*/crash-.*-an_id-[0-9a-f\-]*.pklz") - assert expected_crashfile.match( - actual_crashfile).group() == actual_crashfile + assert ( + expected_crashfile.match(actual_crashfile).group() == actual_crashfile + ) assert mock_pickle_dump.call_count == 1 -''' +""" Can use the following code to test that a mapnode crash continues successfully Need to put this into a unit-test with a timeout @@ -60,4 +58,4 @@ def func(arg1): wf.base_dir = '/tmp' wf.run(plugin='MultiProc') -''' +""" diff --git a/nipype/pipeline/plugins/tools.py b/nipype/pipeline/plugins/tools.py index c06d5eea63..2bb31de564 100644 --- a/nipype/pipeline/plugins/tools.py +++ b/nipype/pipeline/plugins/tools.py @@ -14,7 +14,7 @@ from ... import logging from ...utils.filemanip import savepkl, crash2txt -logger = logging.getLogger('nipype.workflow') +logger = logging.getLogger("nipype.workflow") def report_crash(node, traceback=None, hostname=None): @@ -30,14 +30,20 @@ def report_crash(node, traceback=None, hostname=None): traceback += """ When creating this crashfile, the results file corresponding -to the node could not be found.""".splitlines(keepends=True) +to the node could not be found.""".splitlines( + keepends=True + ) except Exception as exc: traceback += """ During the creation of this crashfile triggered by the above exception, -another exception occurred:\n\n{}.""".format(exc).splitlines(keepends=True) +another exception occurred:\n\n{}.""".format( + exc + ).splitlines( + keepends=True + ) else: - if getattr(result, 'runtime', None): + if getattr(result, "runtime", None): if isinstance(result.runtime, list): host = result.runtime[0].hostname else: @@ -45,30 +51,28 @@ def report_crash(node, traceback=None, hostname=None): # Try everything to fill in the host host = host or hostname or gethostname() - logger.error('Node %s failed to run on host %s.', name, host) - timeofcrash = strftime('%Y%m%d-%H%M%S') + logger.error("Node %s failed to run on host %s.", name, host) + timeofcrash = strftime("%Y%m%d-%H%M%S") try: login_name = getpass.getuser() except KeyError: - login_name = 'UID{:d}'.format(os.getuid()) - crashfile = 'crash-%s-%s-%s-%s' % (timeofcrash, login_name, name, - str(uuid.uuid4())) - crashdir = node.config['execution'].get('crashdump_dir', os.getcwd()) + login_name = "UID{:d}".format(os.getuid()) + crashfile = "crash-%s-%s-%s-%s" % (timeofcrash, login_name, name, str(uuid.uuid4())) + crashdir = node.config["execution"].get("crashdump_dir", os.getcwd()) os.makedirs(crashdir, exist_ok=True) crashfile = os.path.join(crashdir, crashfile) - if node.config['execution']['crashfile_format'].lower() in ('text', 'txt', '.txt'): - crashfile += '.txt' + if node.config["execution"]["crashfile_format"].lower() in ("text", "txt", ".txt"): + crashfile += ".txt" else: - crashfile += '.pklz' + crashfile += ".pklz" - logger.error('Saving crash info to %s\n%s', crashfile, ''.join(traceback)) - if crashfile.endswith('.txt'): + logger.error("Saving crash info to %s\n%s", crashfile, "".join(traceback)) + if crashfile.endswith(".txt"): crash2txt(crashfile, dict(node=node, traceback=traceback)) else: - savepkl(crashfile, dict(node=node, traceback=traceback), - versioning=True) + savepkl(crashfile, dict(node=node, traceback=traceback), versioning=True) return crashfile @@ -81,30 +85,32 @@ def report_nodes_not_run(notrun): if notrun: logger.info("***********************************") for info in notrun: - logger.error("could not run node: %s" % '.'.join( - (info['node']._hierarchy, info['node']._id))) - logger.info("crashfile: %s" % info['crashfile']) + logger.error( + "could not run node: %s" + % ".".join((info["node"]._hierarchy, info["node"]._id)) + ) + logger.info("crashfile: %s" % info["crashfile"]) logger.debug("The following dependent nodes were not run") - for subnode in info['dependents']: + for subnode in info["dependents"]: logger.debug(subnode._id) logger.info("***********************************") - raise RuntimeError(('Workflow did not execute cleanly. ' - 'Check log for details')) + raise RuntimeError( + ("Workflow did not execute cleanly. " "Check log for details") + ) def create_pyscript(node, updatehash=False, store_exception=True): # pickle node - timestamp = strftime('%Y%m%d_%H%M%S') + timestamp = strftime("%Y%m%d_%H%M%S") if node._hierarchy: - suffix = '%s_%s_%s' % (timestamp, node._hierarchy, node._id) - batch_dir = os.path.join(node.base_dir, - node._hierarchy.split('.')[0], 'batch') + suffix = "%s_%s_%s" % (timestamp, node._hierarchy, node._id) + batch_dir = os.path.join(node.base_dir, node._hierarchy.split(".")[0], "batch") else: - suffix = '%s_%s' % (timestamp, node._id) - batch_dir = os.path.join(node.base_dir, 'batch') + suffix = "%s_%s" % (timestamp, node._id) + batch_dir = os.path.join(node.base_dir, "batch") if not os.path.exists(batch_dir): os.makedirs(batch_dir) - pkl_file = os.path.join(batch_dir, 'node_%s.pklz' % suffix) + pkl_file = os.path.join(batch_dir, "node_%s.pklz" % suffix) savepkl(pkl_file, dict(node=node, updatehash=updatehash)) mpl_backend = node.config["execution"]["matplotlib_backend"] # create python script to load and trap exception @@ -167,7 +173,7 @@ def create_pyscript(node, updatehash=False, store_exception=True): raise Exception(e) """ cmdstr = cmdstr % (mpl_backend, pkl_file, batch_dir, node.config, suffix) - pyscript = os.path.join(batch_dir, 'pyscript_%s.py' % suffix) - with open(pyscript, 'wt') as fp: + pyscript = os.path.join(batch_dir, "pyscript_%s.py" % suffix) + with open(pyscript, "wt") as fp: fp.writelines(cmdstr) return pyscript diff --git a/nipype/pkg_info.py b/nipype/pkg_info.py index 7367e4d8e5..fbba2f138c 100644 --- a/nipype/pkg_info.py +++ b/nipype/pkg_info.py @@ -5,11 +5,11 @@ import sys import subprocess -COMMIT_INFO_FNAME = 'COMMIT_INFO.txt' +COMMIT_INFO_FNAME = "COMMIT_INFO.txt" def pkg_commit_hash(pkg_path): - ''' Get short form of commit hash given directory `pkg_path` + """ Get short form of commit hash given directory `pkg_path` There should be a file called 'COMMIT_INFO.txt' in `pkg_path`. This is a file in INI file format, with at least one section: ``commit hash`` and two @@ -37,35 +37,36 @@ def pkg_commit_hash(pkg_path): Where we got the hash from - description hash_str : str short form of hash - ''' + """ # Try and get commit from written commit text file pth = os.path.join(pkg_path, COMMIT_INFO_FNAME) if not os.path.isfile(pth): - raise IOError('Missing commit info file %s' % pth) + raise IOError("Missing commit info file %s" % pth) cfg_parser = configparser.RawConfigParser() - with open(pth, encoding='utf-8') as fp: + with open(pth, encoding="utf-8") as fp: cfg_parser.read_file(fp) - archive_subst = cfg_parser.get('commit hash', 'archive_subst_hash') - if not archive_subst.startswith('$Format'): # it has been substituted - return 'archive substitution', archive_subst - install_subst = cfg_parser.get('commit hash', 'install_hash') - if install_subst != '': - return 'installation', install_subst + archive_subst = cfg_parser.get("commit hash", "archive_subst_hash") + if not archive_subst.startswith("$Format"): # it has been substituted + return "archive substitution", archive_subst + install_subst = cfg_parser.get("commit hash", "install_hash") + if install_subst != "": + return "installation", install_subst # maybe we are in a repository proc = subprocess.Popen( - 'git rev-parse --short HEAD', + "git rev-parse --short HEAD", stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=pkg_path, - shell=True) + shell=True, + ) repo_commit, _ = proc.communicate() if repo_commit: - return 'repository', repo_commit.decode().strip() - return '(none found)', '' + return "repository", repo_commit.decode().strip() + return "(none found)", "" def get_pkg_info(pkg_path): - ''' Return dict describing the context of this package + """ Return dict describing the context of this package Parameters ---------- @@ -76,7 +77,7 @@ def get_pkg_info(pkg_path): ------- context : dict with named parameters of interest - ''' + """ src, hsh = pkg_commit_hash(pkg_path) from .info import VERSION import networkx @@ -84,6 +85,7 @@ def get_pkg_info(pkg_path): import numpy import scipy import traits + return dict( pkg_path=pkg_path, commit_source=src, @@ -96,4 +98,5 @@ def get_pkg_info(pkg_path): scipy_version=scipy.__version__, networkx_version=networkx.__version__, nibabel_version=nibabel.__version__, - traits_version=traits.__version__) + traits_version=traits.__version__, + ) diff --git a/nipype/scripts/cli.py b/nipype/scripts/cli.py index ae21a789cb..73b599a978 100644 --- a/nipype/scripts/cli.py +++ b/nipype/scripts/cli.py @@ -25,13 +25,14 @@ def cli(): @cli.command(context_settings=CONTEXT_SETTINGS) -@click.argument('logdir', type=ExistingDirPath, callback=check_not_none) +@click.argument("logdir", type=ExistingDirPath, callback=check_not_none) @click.option( - '-r', - '--regex', + "-r", + "--regex", type=RegularExpression(), callback=check_not_none, - help='Regular expression to be searched in each traceback.') + help="Regular expression to be searched in each traceback.", +) def search(logdir, regex): """Search for tracebacks content. @@ -52,26 +53,27 @@ def search(logdir, regex): @cli.command(context_settings=CONTEXT_SETTINGS) -@click.argument('crashfile', type=ExistingFilePath, callback=check_not_none) +@click.argument("crashfile", type=ExistingFilePath, callback=check_not_none) @click.option( - '-r', '--rerun', is_flag=True, flag_value=True, help='Rerun crashed node.') + "-r", "--rerun", is_flag=True, flag_value=True, help="Rerun crashed node." +) @click.option( - '-d', - '--debug', + "-d", + "--debug", is_flag=True, flag_value=True, - help='Enable Python debugger when re-executing.') + help="Enable Python debugger when re-executing.", +) @click.option( - '-i', - '--ipydebug', + "-i", + "--ipydebug", is_flag=True, flag_value=True, - help='Enable IPython debugger when re-executing.') + help="Enable IPython debugger when re-executing.", +) @click.option( - '-w', - '--dir', - type=ExistingDirPath, - help='Directory where to run the node in.') + "-w", "--dir", type=ExistingDirPath, help="Directory where to run the node in." +) def crash(crashfile, rerun, debug, ipydebug, dir): """Display Nipype crash files. @@ -83,17 +85,19 @@ def crash(crashfile, rerun, debug, ipydebug, dir): """ from .crash_files import display_crash_file - debug = 'ipython' if ipydebug else debug - if debug == 'ipython': + debug = "ipython" if ipydebug else debug + if debug == "ipython": import sys from IPython.core import ultratb + sys.excepthook = ultratb.FormattedTB( - mode='Verbose', color_scheme='Linux', call_pdb=1) + mode="Verbose", color_scheme="Linux", call_pdb=1 + ) display_crash_file(crashfile, rerun, debug, dir) @cli.command(context_settings=CONTEXT_SETTINGS) -@click.argument('pklz_file', type=ExistingFilePath, callback=check_not_none) +@click.argument("pklz_file", type=ExistingFilePath, callback=check_not_none) def show(pklz_file): """Print the content of Nipype node .pklz file. @@ -108,20 +112,17 @@ def show(pklz_file): @cli.command(context_settings=UNKNOWN_OPTIONS) -@click.argument( - 'module', type=PythonModule(), required=False, callback=check_not_none) -@click.argument('interface', type=str, required=False) +@click.argument("module", type=PythonModule(), required=False, callback=check_not_none) +@click.argument("interface", type=str, required=False) @click.option( - '--list', + "--list", is_flag=True, flag_value=True, - help='List the available Interfaces inside the given module.') + help="List the available Interfaces inside the given module.", +) @click.option( - '-h', - '--help', - is_flag=True, - flag_value=True, - help='Show help message and exit.') + "-h", "--help", is_flag=True, flag_value=True, help="Show help message and exit." +) @click.pass_context def run(ctx, module, interface, list, help): """Run a Nipype Interface. @@ -142,18 +143,16 @@ def run(ctx, module, interface, list, help): # print the list of available interfaces for the given module elif (module_given and list) or (module_given and not interface): iface_names = list_interfaces(module) - click.echo('Available Interfaces:') + click.echo("Available Interfaces:") for if_name in iface_names: - click.echo(' {}'.format(if_name)) + click.echo(" {}".format(if_name)) # check the interface - elif (module_given and interface): + elif module_given and interface: # create the argument parser description = "Run {}".format(interface) - prog = " ".join( - [ctx.command_path, module.__name__, interface] + ctx.args) - iface_parser = argparse.ArgumentParser( - description=description, prog=prog) + prog = " ".join([ctx.command_path, module.__name__, interface] + ctx.args) + iface_parser = argparse.ArgumentParser(description=description, prog=prog) # instantiate the interface node = getattr(module, interface)() @@ -164,8 +163,10 @@ def run(ctx, module, interface, list, help): try: iface_parser.print_help() except: - print('An error ocurred when trying to print the full' - 'command help, printing usage.') + print( + "An error ocurred when trying to print the full" + "command help, printing usage." + ) finally: iface_parser.print_usage() else: @@ -192,65 +193,77 @@ def convert(): "--interface", type=str, required=True, - help="Name of the Nipype interface to export.") + help="Name of the Nipype interface to export.", +) @click.option( "-m", "--module", type=PythonModule(), required=True, callback=check_not_none, - help="Module where the interface is defined.") + help="Module where the interface is defined.", +) @click.option( "-o", "--output", type=UnexistingFilePath, required=True, callback=check_not_none, - help="JSON file name where the Boutiques descriptor will be " - "written.") + help="JSON file name where the Boutiques descriptor will be " "written.", +) @click.option( "-c", "--container-image", required=True, type=str, - help="Name of the container image where the tool is installed.") + help="Name of the container image where the tool is installed.", +) @click.option( "-p", "--container-type", required=True, type=str, - help="Type of container image (Docker or Singularity).") + help="Type of container image (Docker or Singularity).", +) @click.option( "-x", "--container-index", type=str, help="Optional index where the image is available (e.g. " - "http://index.docker.io).") + "http://index.docker.io).", +) @click.option( "-g", "--ignore-inputs", type=str, multiple=True, - help="List of interface inputs to not include in the descriptor.") + help="List of interface inputs to not include in the descriptor.", +) @click.option( - "-v", - "--verbose", - is_flag=True, - flag_value=True, - help="Print information messages.") + "-v", "--verbose", is_flag=True, flag_value=True, help="Print information messages." +) @click.option( - "-a", - "--author", - type=str, - help="Author of the tool (required for publishing).") + "-a", "--author", type=str, help="Author of the tool (required for publishing)." +) @click.option( "-t", "--tags", type=str, help="JSON string containing tags to include in the descriptor," - "e.g. \"{\"key1\": \"value1\"}\"") -def boutiques(module, interface, container_image, container_type, output, - container_index, verbose, author, ignore_inputs, tags): + 'e.g. "{"key1": "value1"}"', +) +def boutiques( + module, + interface, + container_image, + container_type, + output, + container_index, + verbose, + author, + ignore_inputs, + tags, +): """Nipype to Boutiques exporter. See Boutiques specification at https://github.com/boutiques/schema. @@ -259,5 +272,15 @@ def boutiques(module, interface, container_image, container_type, output, # Generates JSON string and saves it to file generate_boutiques_descriptor( - module, interface, container_image, container_type, container_index, - verbose, True, output, author, ignore_inputs, tags) + module, + interface, + container_image, + container_type, + container_index, + verbose, + True, + output, + author, + ignore_inputs, + tags, + ) diff --git a/nipype/scripts/crash_files.py b/nipype/scripts/crash_files.py index b7b83dff5c..84464ccddb 100644 --- a/nipype/scripts/crash_files.py +++ b/nipype/scripts/crash_files.py @@ -18,7 +18,7 @@ def load_pklz_traceback(crash_filepath): except: raise else: - return '\n'.join(data['traceback']) + return "\n".join(data["traceback"]) def iter_tracebacks(logdir): @@ -38,7 +38,7 @@ def iter_tracebacks(logdir): traceback: str """ - crash_files = sorted(glob(op.join(logdir, '*.pkl*'))) + crash_files = sorted(glob(op.join(logdir, "*.pkl*"))) for cf in crash_files: yield cf, load_pklz_traceback(cf) @@ -50,9 +50,9 @@ def display_crash_file(crashfile, rerun, debug, directory): crash_data = loadcrash(crashfile) node = None - if 'node' in crash_data: - node = crash_data['node'] - tb = crash_data['traceback'] + if "node" in crash_data: + node = crash_data["node"] + tb = crash_data["traceback"] print("\n") print("File: %s" % crashfile) @@ -67,7 +67,7 @@ def display_crash_file(crashfile, rerun, debug, directory): print(node.inputs) print("\n") print("Traceback: ") - print(''.join(tb)) + print("".join(tb)) print("\n") if rerun: @@ -76,12 +76,13 @@ def display_crash_file(crashfile, rerun, debug, directory): return print("Rerunning node") node.base_dir = directory - node.config = {'execution': {'crashdump_dir': '/tmp'}} + node.config = {"execution": {"crashdump_dir": "/tmp"}} try: node.run() except: - if debug and debug != 'ipython': + if debug and debug != "ipython": import pdb + pdb.post_mortem() else: raise diff --git a/nipype/scripts/instance.py b/nipype/scripts/instance.py index 1f44a43bda..0d736de796 100644 --- a/nipype/scripts/instance.py +++ b/nipype/scripts/instance.py @@ -29,8 +29,7 @@ def import_module(module_path): try: mod = importlib.import_module(module_path) except: - raise ImportError( - 'Error when importing object {}.'.format(module_path)) + raise ImportError("Error when importing object {}.".format(module_path)) else: return mod diff --git a/nipype/scripts/utils.py b/nipype/scripts/utils.py index 0315bfd64e..28e11cd8f6 100644 --- a/nipype/scripts/utils.py +++ b/nipype/scripts/utils.py @@ -4,7 +4,6 @@ """ - import re import click import json @@ -14,7 +13,7 @@ from ..interfaces.base.support import get_trait_desc # different context options -CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) +CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) UNKNOWN_OPTIONS = dict(allow_extra_args=True, ignore_unknown_options=True) # specification of existing ParamTypes @@ -26,32 +25,31 @@ # validators def check_not_none(ctx, param, value): if value is None: - raise click.BadParameter('got {}.'.format(value)) + raise click.BadParameter("got {}.".format(value)) return value # declare custom click.ParamType class RegularExpression(click.ParamType): - name = 'regex' + name = "regex" def convert(self, value, param, ctx): try: rex = re.compile(value, re.IGNORECASE) except ValueError: - self.fail('%s is not a valid regular expression.' % value, param, - ctx) + self.fail("%s is not a valid regular expression." % value, param, ctx) else: return rex class PythonModule(click.ParamType): - name = 'Python module path' + name = "Python module path" def convert(self, value, param, ctx): try: module = import_module(value) except ValueError: - self.fail('%s is not a valid Python module.' % value, param, ctx) + self.fail("%s is not a valid Python module." % value, param, ctx) else: return module @@ -60,15 +58,15 @@ def add_args_options(arg_parser, interface): """Add arguments to `arg_parser` to create a CLI for `interface`.""" inputs = interface.input_spec() for name, spec in sorted(interface.inputs.traits(transient=None).items()): - desc = "\n".join(get_trait_desc(inputs, name, spec))[len(name) + 2:] + desc = "\n".join(get_trait_desc(inputs, name, spec))[len(name) + 2 :] # Escape any % signs with a % - desc = desc.replace('%', '%%') + desc = desc.replace("%", "%%") args = {} has_multiple_inner_traits = False if spec.is_trait_type(traits.Bool): args["default"] = getattr(inputs, name) - args["action"] = 'store_true' + args["action"] = "store_true" # current support is for simple trait types if not spec.inner_traits: @@ -93,8 +91,9 @@ def add_args_options(arg_parser, interface): if spec.is_trait_type(InputMultiPath): args["nargs"] = "+" elif spec.is_trait_type(traits.List): - if (spec.trait_type.minlen == spec.trait_type.maxlen) and \ - spec.trait_type.maxlen: + if ( + spec.trait_type.minlen == spec.trait_type.maxlen + ) and spec.trait_type.maxlen: args["nargs"] = spec.trait_type.maxlen else: args["nargs"] = "+" @@ -103,22 +102,25 @@ def add_args_options(arg_parser, interface): if has_multiple_inner_traits: raise NotImplementedError( - ('This interface cannot be used. via the' - ' command line as multiple inner traits' - ' are currently not supported for mandatory' - ' argument: {}.'.format(name))) + ( + "This interface cannot be used. via the" + " command line as multiple inner traits" + " are currently not supported for mandatory" + " argument: {}.".format(name) + ) + ) arg_parser.add_argument(name, help=desc, **args) else: if spec.is_trait_type(InputMultiPath): args["nargs"] = "*" elif spec.is_trait_type(traits.List): - if (spec.trait_type.minlen == spec.trait_type.maxlen) and \ - spec.trait_type.maxlen: + if ( + spec.trait_type.minlen == spec.trait_type.maxlen + ) and spec.trait_type.maxlen: args["nargs"] = spec.trait_type.maxlen else: args["nargs"] = "*" if not has_multiple_inner_traits: - arg_parser.add_argument( - "--%s" % name, dest=name, help=desc, **args) + arg_parser.add_argument("--%s" % name, dest=name, help=desc, **args) return arg_parser diff --git a/nipype/sphinxext/plot_workflow.py b/nipype/sphinxext/plot_workflow.py index 740c121926..832e163b2f 100644 --- a/nipype/sphinxext/plot_workflow.py +++ b/nipype/sphinxext/plot_workflow.py @@ -120,6 +120,7 @@ try: from docutils.parsers.rst import directives from docutils.parsers.rst.directives.images import Image + align = Image.align except ImportError as e: missing_imports = [str(e)] @@ -130,6 +131,8 @@ def format_template(template, **kw): return jinja2.Template(template).render(**kw) + + except ImportError as e: missing_imports.append(str(e)) try: @@ -143,13 +146,21 @@ def format_template(template, **kw): missing_imports.append(str(e)) - -def wf_directive(name, arguments, options, content, lineno, content_offset, - block_text, state, state_machine): +def wf_directive( + name, + arguments, + options, + content, + lineno, + content_offset, + block_text, + state, + state_machine, +): if len(missing_imports) == 0: return run(arguments, content, options, state_machine, state, lineno) else: - raise ImportError('\n'.join(missing_imports)) + raise ImportError("\n".join(missing_imports)) wf_directive.__doc__ = __doc__ @@ -159,32 +170,32 @@ def _option_boolean(arg): if not arg or not arg.strip(): # no argument given, assume used as a flag return True - elif arg.strip().lower() in ('no', '0', 'false'): + elif arg.strip().lower() in ("no", "0", "false"): return False - elif arg.strip().lower() in ('yes', '1', 'true'): + elif arg.strip().lower() in ("yes", "1", "true"): return True else: raise ValueError('"%s" unknown boolean' % arg) def _option_graph2use(arg): - return directives.choice( - arg, ('hierarchical', 'colored', 'flat', 'orig', 'exec')) + return directives.choice(arg, ("hierarchical", "colored", "flat", "orig", "exec")) def _option_context(arg): - if arg in [None, 'reset', 'close-figs']: + if arg in [None, "reset", "close-figs"]: return arg raise ValueError("argument should be None or 'reset' or 'close-figs'") def _option_format(arg): - return directives.choice(arg, ('python', 'doctest')) + return directives.choice(arg, ("python", "doctest")) def _option_align(arg): return directives.choice( - arg, ("top", "middle", "bottom", "left", "center", "right")) + arg, ("top", "middle", "bottom", "left", "center", "right") + ) def mark_wf_labels(app, document): @@ -200,21 +211,24 @@ def mark_wf_labels(app, document): if labelid is None: continue node = document.ids[labelid] - if node.tagname in ('html_only', 'latex_only'): + if node.tagname in ("html_only", "latex_only"): for n in node: - if n.tagname == 'figure': + if n.tagname == "figure": sectname = name for c in n: - if c.tagname == 'caption': + if c.tagname == "caption": sectname = c.astext() break - node['ids'].remove(labelid) - node['names'].remove(name) - n['ids'].append(labelid) - n['names'].append(name) - document.settings.env.labels[name] = \ - document.settings.env.docname, labelid, sectname + node["ids"].remove(labelid) + node["names"].remove(name) + n["ids"].append(labelid) + n["names"].append(name) + document.settings.env.labels[name] = ( + document.settings.env.docname, + labelid, + sectname, + ) break @@ -224,38 +238,38 @@ def setup(app): setup.confdir = app.confdir options = { - 'alt': directives.unchanged, - 'height': directives.length_or_unitless, - 'width': directives.length_or_percentage_or_unitless, - 'scale': directives.nonnegative_int, - 'align': _option_align, - 'class': directives.class_option, - 'include-source': _option_boolean, - 'format': _option_format, - 'context': _option_context, - 'nofigs': directives.flag, - 'encoding': directives.encoding, - 'graph2use': _option_graph2use, - 'simple_form': _option_boolean + "alt": directives.unchanged, + "height": directives.length_or_unitless, + "width": directives.length_or_percentage_or_unitless, + "scale": directives.nonnegative_int, + "align": _option_align, + "class": directives.class_option, + "include-source": _option_boolean, + "format": _option_format, + "context": _option_context, + "nofigs": directives.flag, + "encoding": directives.encoding, + "graph2use": _option_graph2use, + "simple_form": _option_boolean, } - app.add_directive('workflow', wf_directive, True, (0, 2, False), **options) - app.add_config_value('graph2use', 'hierarchical', 'html') - app.add_config_value('simple_form', True, 'html') - app.add_config_value('wf_pre_code', None, True) - app.add_config_value('wf_include_source', False, True) - app.add_config_value('wf_html_show_source_link', True, True) - app.add_config_value('wf_formats', ['png', 'svg', 'pdf'], True) - app.add_config_value('wf_basedir', None, True) - app.add_config_value('wf_html_show_formats', True, True) - app.add_config_value('wf_rcparams', {}, True) - app.add_config_value('wf_apply_rcparams', False, True) - app.add_config_value('wf_working_directory', None, True) - app.add_config_value('wf_template', None, True) - - app.connect('doctree-read', mark_wf_labels) - - metadata = {'parallel_read_safe': True, 'parallel_write_safe': True} + app.add_directive("workflow", wf_directive, True, (0, 2, False), **options) + app.add_config_value("graph2use", "hierarchical", "html") + app.add_config_value("simple_form", True, "html") + app.add_config_value("wf_pre_code", None, True) + app.add_config_value("wf_include_source", False, True) + app.add_config_value("wf_html_show_source_link", True, True) + app.add_config_value("wf_formats", ["png", "svg", "pdf"], True) + app.add_config_value("wf_basedir", None, True) + app.add_config_value("wf_html_show_formats", True, True) + app.add_config_value("wf_rcparams", {}, True) + app.add_config_value("wf_apply_rcparams", False, True) + app.add_config_value("wf_working_directory", None, True) + app.add_config_value("wf_template", None, True) + + app.connect("doctree-read", mark_wf_labels) + + metadata = {"parallel_read_safe": True, "parallel_write_safe": True} return metadata @@ -267,11 +281,11 @@ def setup(app): def contains_doctest(text): try: # check if it's valid Python as-is - compile(text, '', 'exec') + compile(text, "", "exec") return False except SyntaxError: pass - r = re.compile(r'^\s*>>>', re.M) + r = re.compile(r"^\s*>>>", re.M) m = r.search(text) return bool(m) @@ -286,7 +300,7 @@ def unescape_doctest(text): code = "" for line in text.split("\n"): - m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line) + m = re.match(r"^\s*(>>>|\.\.\.) (.*)$", line) if m: code += m.group(2) + "\n" elif line.strip(): @@ -389,9 +403,10 @@ def out_of_date(original, derived): Returns True if derivative is out-of-date wrt original, both of which are full file paths. """ - return (not os.path.exists(derived) - or (os.path.exists(original) - and os.stat(derived).st_mtime < os.stat(original).st_mtime)) + return not os.path.exists(derived) or ( + os.path.exists(original) + and os.stat(derived).st_mtime < os.stat(original).st_mtime + ) class GraphError(RuntimeError): @@ -414,14 +429,16 @@ def run_code(code, code_path, ns=None, function_name=None): os.chdir(setup.config.wf_working_directory) except OSError as err: raise OSError( - str(err) + '\n`wf_working_directory` option in' - 'Sphinx configuration file must be a valid ' - 'directory path') + str(err) + "\n`wf_working_directory` option in" + "Sphinx configuration file must be a valid " + "directory path" + ) except TypeError as err: raise TypeError( - str(err) + '\n`wf_working_directory` option in ' - 'Sphinx configuration file must be a string or ' - 'None') + str(err) + "\n`wf_working_directory` option in " + "Sphinx configuration file must be a string or " + "None" + ) sys.path.insert(0, setup.config.wf_working_directory) elif code_path is not None: dirname = os.path.abspath(os.path.dirname(code_path)) @@ -450,7 +467,7 @@ def _dummy_print(*arg, **kwarg): if not ns: if setup.config.wf_pre_code is not None: exec(str(setup.config.wf_pre_code), ns) - ns['print'] = _dummy_print + ns["print"] = _dummy_print if "__main__" in code: exec("__name__ = '__main__'", ns) code = remove_coding(code) @@ -468,18 +485,18 @@ def _dummy_print(*arg, **kwarg): def get_wf_formats(config): - default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200} + default_dpi = {"png": 80, "hires.png": 200, "pdf": 200} formats = [] wf_formats = config.wf_formats if isinstance(wf_formats, (str, bytes)): # String Sphinx < 1.3, Split on , to mimic # Sphinx 1.3 and later. Sphinx 1.3 always # returns a list. - wf_formats = wf_formats.split(',') + wf_formats = wf_formats.split(",") for fmt in wf_formats: if isinstance(fmt, (str, bytes)): - if ':' in fmt: - suffix, dpi = fmt.split(':') + if ":" in fmt: + suffix, dpi = fmt.split(":") formats.append((str(suffix), int(dpi))) else: formats.append((fmt, default_dpi.get(fmt, 80))) @@ -490,17 +507,19 @@ def get_wf_formats(config): return formats -def render_figures(code, - code_path, - output_dir, - output_base, - context, - function_name, - config, - graph2use, - simple_form, - context_reset=False, - close_figs=False): +def render_figures( + code, + code_path, + output_dir, + output_base, + context, + function_name, + config, + graph2use, + simple_form, + context_reset=False, + close_figs=False, +): """ Run a nipype workflow creation script and save the graph in *output_dir*. Save the images under *output_dir* with file names derived from @@ -518,12 +537,10 @@ def render_figures(code, try: img_path = img.filename(fmt) imgname, ext = os.path.splitext(os.path.basename(img_path)) - ns['wf'].base_dir = output_dir - src = ns['wf'].write_graph( - imgname, - format=ext[1:], - graph2use=graph2use, - simple_form=simple_form) + ns["wf"].base_dir = output_dir + src = ns["wf"].write_graph( + imgname, format=ext[1:], graph2use=graph2use, simple_form=simple_form + ) shutil.move(src, img_path) except Exception: raise GraphError(traceback.format_exc()) @@ -536,31 +553,33 @@ def render_figures(code, def run(arguments, content, options, state_machine, state, lineno): document = state_machine.document config = document.settings.env.config - nofigs = 'nofigs' in options + nofigs = "nofigs" in options formats = get_wf_formats(config) default_fmt = formats[0][0] - graph2use = options.get('graph2use', 'hierarchical') - simple_form = options.get('simple_form', True) + graph2use = options.get("graph2use", "hierarchical") + simple_form = options.get("simple_form", True) - options.setdefault('include-source', config.wf_include_source) - keep_context = 'context' in options - context_opt = None if not keep_context else options['context'] + options.setdefault("include-source", config.wf_include_source) + keep_context = "context" in options + context_opt = None if not keep_context else options["context"] - rst_file = document.attributes['source'] + rst_file = document.attributes["source"] rst_dir = os.path.dirname(rst_file) if len(arguments): if not config.wf_basedir: - source_file_name = os.path.join(setup.app.builder.srcdir, - directives.uri(arguments[0])) + source_file_name = os.path.join( + setup.app.builder.srcdir, directives.uri(arguments[0]) + ) else: - source_file_name = os.path.join(setup.confdir, config.wf_basedir, - directives.uri(arguments[0])) + source_file_name = os.path.join( + setup.confdir, config.wf_basedir, directives.uri(arguments[0]) + ) # If there is content, it will be passed as a caption. - caption = '\n'.join(content) + caption = "\n".join(content) # If the optional function name is provided, use it if len(arguments) == 2: @@ -568,32 +587,32 @@ def run(arguments, content, options, state_machine, state, lineno): else: function_name = None - with io.open(source_file_name, 'r', encoding='utf-8') as fd: + with io.open(source_file_name, "r", encoding="utf-8") as fd: code = fd.read() output_base = os.path.basename(source_file_name) else: source_file_name = rst_file code = textwrap.dedent("\n".join([str(c) for c in content])) - counter = document.attributes.get('_wf_counter', 0) + 1 - document.attributes['_wf_counter'] = counter + counter = document.attributes.get("_wf_counter", 0) + 1 + document.attributes["_wf_counter"] = counter base, _ = os.path.splitext(os.path.basename(source_file_name)) - output_base = '%s-%d.py' % (base, counter) + output_base = "%s-%d.py" % (base, counter) function_name = None - caption = '' + caption = "" base, source_ext = os.path.splitext(output_base) - if source_ext in ('.py', '.rst', '.txt'): + if source_ext in (".py", ".rst", ".txt"): output_base = base else: - source_ext = '' + source_ext = "" # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames - output_base = output_base.replace('.', '-') + output_base = output_base.replace(".", "-") # is it in doctest format? is_doctest = contains_doctest(code) - if 'format' in options: - if options['format'] == 'python': + if "format" in options: + if options["format"] == "python": is_doctest = False else: is_doctest = True @@ -606,7 +625,8 @@ def run(arguments, content, options, state_machine, state, lineno): # build_dir: where to place output files (temporarily) build_dir = os.path.join( - os.path.dirname(setup.app.doctreedir), 'wf_directive', source_rel_dir) + os.path.dirname(setup.app.doctreedir), "wf_directive", source_rel_dir + ) # get rid of .. in paths, also changes pathsep # see note in Python docs for warning about symbolic links on Windows. # need to compare source and dest paths at end @@ -616,22 +636,21 @@ def run(arguments, content, options, state_machine, state, lineno): os.makedirs(build_dir) # output_dir: final location in the builder's directory - dest_dir = os.path.abspath( - os.path.join(setup.app.builder.outdir, source_rel_dir)) + dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, source_rel_dir)) if not os.path.exists(dest_dir): os.makedirs(dest_dir) # no problem here for me, but just use built-ins # how to link to files from the RST file dest_dir_link = os.path.join( - relpath(setup.confdir, rst_dir), source_rel_dir).replace( - os.path.sep, '/') + relpath(setup.confdir, rst_dir), source_rel_dir + ).replace(os.path.sep, "/") try: - build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/') + build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, "/") except ValueError: # on Windows, relpath raises ValueError when path and start are on # different mounts/drives build_dir_link = build_dir - source_link = dest_dir_link + '/' + output_base + source_ext + source_link = dest_dir_link + "/" + output_base + source_ext # make figures try: @@ -645,35 +664,34 @@ def run(arguments, content, options, state_machine, state, lineno): config, graph2use, simple_form, - context_reset=context_opt == 'reset', - close_figs=context_opt == 'close-figs') + context_reset=context_opt == "reset", + close_figs=context_opt == "close-figs", + ) errors = [] except GraphError as err: reporter = state.memo.reporter sm = reporter.system_message( 2, - "Exception occurred in plotting %s\n from %s:\n%s" % - (output_base, source_file_name, err), - line=lineno) + "Exception occurred in plotting %s\n from %s:\n%s" + % (output_base, source_file_name, err), + line=lineno, + ) results = [(code, [])] errors = [sm] # Properly indent the caption - caption = '\n'.join( - ' ' + line.strip() for line in caption.split('\n')) + caption = "\n".join(" " + line.strip() for line in caption.split("\n")) # generate output restructuredtext total_lines = [] for j, (code_piece, images) in enumerate(results): - if options['include-source']: + if options["include-source"]: if is_doctest: - lines = [''] - lines += [row.rstrip() for row in code_piece.split('\n')] + lines = [""] + lines += [row.rstrip() for row in code_piece.split("\n")] else: - lines = ['.. code-block:: python', ''] - lines += [ - ' %s' % row.rstrip() for row in code_piece.split('\n') - ] + lines = [".. code-block:: python", ""] + lines += [" %s" % row.rstrip() for row in code_piece.split("\n")] source_code = "\n".join(lines) else: source_code = "" @@ -682,8 +700,9 @@ def run(arguments, content, options, state_machine, state, lineno): images = [] opts = [ - ':%s: %s' % (key, val) for key, val in list(options.items()) - if key in ('alt', 'height', 'width', 'scale', 'align', 'class') + ":%s: %s" % (key, val) + for key, val in list(options.items()) + if key in ("alt", "height", "width", "scale", "align", "class") ] only_html = ".. only:: html" @@ -711,7 +730,8 @@ def run(arguments, content, options, state_machine, state, lineno): images=images, source_code=source_code, html_show_formats=config.wf_html_show_formats and len(images), - caption=caption) + caption=caption, + ) total_lines.extend(result.split("\n")) total_lines.extend("\n") @@ -730,7 +750,7 @@ def run(arguments, content, options, state_machine, state, lineno): # copy script (if necessary) target_name = os.path.join(dest_dir, output_base + source_ext) - with io.open(target_name, 'w', encoding="utf-8") as f: + with io.open(target_name, "w", encoding="utf-8") as f: if source_file_name == rst_file: code_escaped = unescape_doctest(code) else: diff --git a/nipype/testing/__init__.py b/nipype/testing/__init__.py index 2167e7e54a..c22de2cc7a 100644 --- a/nipype/testing/__init__.py +++ b/nipype/testing/__init__.py @@ -11,8 +11,8 @@ filepath = os.path.abspath(__file__) basedir = os.path.dirname(filepath) -funcfile = os.path.join(basedir, 'data', 'functional.nii') -anatfile = os.path.join(basedir, 'data', 'structural.nii') +funcfile = os.path.join(basedir, "data", "functional.nii") +anatfile = os.path.join(basedir, "data", "structural.nii") template = funcfile transfm = funcfile @@ -22,14 +22,14 @@ skipif = decorators.dec.skipif -def example_data(infile='functional.nii'): +def example_data(infile="functional.nii"): """returns path to empty example data files for doc tests it will raise an exception if filename is not in the directory""" filepath = os.path.abspath(__file__) basedir = os.path.dirname(filepath) - outfile = os.path.join(basedir, 'data', infile) + outfile = os.path.join(basedir, "data", infile) if not os.path.exists(outfile): - raise IOError('%s empty data file does NOT exist' % outfile) + raise IOError("%s empty data file does NOT exist" % outfile) return outfile diff --git a/nipype/testing/fixtures.py b/nipype/testing/fixtures.py index dabf3b3307..6f5b12495c 100644 --- a/nipype/testing/fixtures.py +++ b/nipype/testing/fixtures.py @@ -26,14 +26,13 @@ def analyze_pair_image_files(outdir, filelist, shape): def nifti_image_files(outdir, filelist, shape): for f in ensure_list(filelist): img = np.random.random(shape) - nb.Nifti1Image(img, np.eye(4), None).to_filename( - os.path.join(outdir, f)) + nb.Nifti1Image(img, np.eye(4), None).to_filename(os.path.join(outdir, f)) @pytest.fixture() def create_files_in_directory(request, tmpdir): cwd = tmpdir.chdir() - filelist = ['a.nii', 'b.nii'] + filelist = ["a.nii", "b.nii"] nifti_image_files(tmpdir.strpath, filelist, shape=(3, 3, 3, 4)) def change_directory(): @@ -46,7 +45,7 @@ def change_directory(): @pytest.fixture() def create_analyze_pair_file_in_directory(request, tmpdir): cwd = tmpdir.chdir() - filelist = ['a.hdr'] + filelist = ["a.hdr"] analyze_pair_image_files(tmpdir.strpath, filelist, shape=(3, 3, 3, 4)) def change_directory(): @@ -59,11 +58,11 @@ def change_directory(): @pytest.fixture() def create_files_in_directory_plus_dummy_file(request, tmpdir): cwd = tmpdir.chdir() - filelist = ['a.nii', 'b.nii'] + filelist = ["a.nii", "b.nii"] nifti_image_files(tmpdir.strpath, filelist, shape=(3, 3, 3, 4)) - tmpdir.join('reg.dat').write('dummy file') - filelist.append('reg.dat') + tmpdir.join("reg.dat").write("dummy file") + filelist.append("reg.dat") def change_directory(): cwd.chdir() @@ -75,7 +74,7 @@ def change_directory(): @pytest.fixture() def create_surf_file_in_directory(request, tmpdir): cwd = tmpdir.chdir() - surf = 'lh.a.nii' + surf = "lh.a.nii" nifti_image_files(tmpdir.strpath, filelist=surf, shape=(1, 100, 1)) def change_directory(): @@ -86,12 +85,12 @@ def change_directory(): def set_output_type(fsl_output_type): - prev_output_type = os.environ.get('FSLOUTPUTTYPE', None) + prev_output_type = os.environ.get("FSLOUTPUTTYPE", None) if fsl_output_type is not None: - os.environ['FSLOUTPUTTYPE'] = fsl_output_type - elif 'FSLOUTPUTTYPE' in os.environ: - del os.environ['FSLOUTPUTTYPE'] + os.environ["FSLOUTPUTTYPE"] = fsl_output_type + elif "FSLOUTPUTTYPE" in os.environ: + del os.environ["FSLOUTPUTTYPE"] FSLCommand.set_default_output_type(Info.output_type()) return prev_output_type @@ -101,7 +100,7 @@ def set_output_type(fsl_output_type): def create_files_in_directory_plus_output_type(request, tmpdir): func_prev_type = set_output_type(request.param) origdir = tmpdir.chdir() - filelist = ['a.nii', 'b.nii'] + filelist = ["a.nii", "b.nii"] nifti_image_files(tmpdir.strpath, filelist, shape=(3, 3, 3, 4)) out_ext = Info.output_type_to_ext(Info.output_type()) diff --git a/nipype/testing/tests/test_utils.py b/nipype/testing/tests/test_utils.py index 798f640805..fb2992b7e6 100644 --- a/nipype/testing/tests/test_utils.py +++ b/nipype/testing/tests/test_utils.py @@ -22,8 +22,9 @@ def test_tempfatfs(): @patch( - 'subprocess.check_call', - MagicMock(side_effect=subprocess.CalledProcessError('', ''))) + "subprocess.check_call", + MagicMock(side_effect=subprocess.CalledProcessError("", "")), +) def test_tempfatfs_calledprocesserror(): try: TempFATFS() @@ -34,8 +35,8 @@ def test_tempfatfs_calledprocesserror(): assert False -@patch('subprocess.check_call', MagicMock()) -@patch('subprocess.Popen', MagicMock(side_effect=OSError())) +@patch("subprocess.check_call", MagicMock()) +@patch("subprocess.Popen", MagicMock(side_effect=OSError())) def test_tempfatfs_oserror(): try: TempFATFS() diff --git a/nipype/testing/utils.py b/nipype/testing/utils.py index 6c36ce514e..e666a7586f 100644 --- a/nipype/testing/utils.py +++ b/nipype/testing/utils.py @@ -12,7 +12,7 @@ from tempfile import mkdtemp from ..utils.misc import package_check -__docformat__ = 'restructuredtext' +__docformat__ = "restructuredtext" import numpy as np import nibabel as nb @@ -37,28 +37,30 @@ def __init__(self, size_in_mbytes=8, delay=0.5): """ self.delay = delay self.tmpdir = mkdtemp() - self.dev_null = open(os.devnull, 'wb') + self.dev_null = open(os.devnull, "wb") - vfatfile = os.path.join(self.tmpdir, 'vfatblock') - self.vfatmount = os.path.join(self.tmpdir, 'vfatmount') - self.canary = os.path.join(self.vfatmount, '.canary') + vfatfile = os.path.join(self.tmpdir, "vfatblock") + self.vfatmount = os.path.join(self.tmpdir, "vfatmount") + self.canary = os.path.join(self.vfatmount, ".canary") - with open(vfatfile, 'wb') as fobj: - fobj.write(b'\x00' * (int(size_in_mbytes) << 20)) + with open(vfatfile, "wb") as fobj: + fobj.write(b"\x00" * (int(size_in_mbytes) << 20)) os.mkdir(self.vfatmount) - mkfs_args = ['mkfs.vfat', vfatfile] - mount_args = ['fusefat', '-o', 'rw+', '-f', vfatfile, self.vfatmount] + mkfs_args = ["mkfs.vfat", vfatfile] + mount_args = ["fusefat", "-o", "rw+", "-f", vfatfile, self.vfatmount] try: subprocess.check_call( - args=mkfs_args, stdout=self.dev_null, stderr=self.dev_null) + args=mkfs_args, stdout=self.dev_null, stderr=self.dev_null + ) except CalledProcessError as e: raise IOError("mkfs.vfat failed") from e try: self.fusefat = subprocess.Popen( - args=mount_args, stdout=self.dev_null, stderr=self.dev_null) + args=mount_args, stdout=self.dev_null, stderr=self.dev_null + ) except OSError as e: raise IOError("fusefat is not installed") from e @@ -67,7 +69,7 @@ def __init__(self, size_in_mbytes=8, delay=0.5): if self.fusefat.poll() is not None: raise IOError("fusefat terminated too soon") - open(self.canary, 'wb').close() + open(self.canary, "wb").close() def __enter__(self): return self.vfatmount diff --git a/nipype/tests/test_nipype.py b/nipype/tests/test_nipype.py index 01fd081bc9..ab3499c8db 100644 --- a/nipype/tests/test_nipype.py +++ b/nipype/tests/test_nipype.py @@ -12,8 +12,10 @@ def test_nipype_info(): assert exception_not_raised -@pytest.mark.skipif(not get_nipype_gitversion(), - reason="not able to get version from get_nipype_gitversion") +@pytest.mark.skipif( + not get_nipype_gitversion(), + reason="not able to get version from get_nipype_gitversion", +) def test_git_hash(): # removing the first "g" from gitversion - get_nipype_gitversion()[1:] == get_info()['commit_hash'] + get_nipype_gitversion()[1:] == get_info()["commit_hash"] diff --git a/nipype/utils/config.py b/nipype/utils/config.py index 79515a5b7f..4537fdadc8 100644 --- a/nipype/utils/config.py +++ b/nipype/utils/config.py @@ -1,14 +1,14 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -''' +""" Created on 20 Apr 2010 logging options : INFO, DEBUG hash_method : content, timestamp @author: Chris Filo Gorgolewski -''' +""" import os import sys import errno @@ -24,11 +24,11 @@ from filelock import SoftFileLock CONFIG_DEPRECATIONS = { - 'profile_runtime': ('monitoring.enabled', '1.0'), - 'filemanip_level': ('logging.utils_level', '1.0'), + "profile_runtime": ("monitoring.enabled", "1.0"), + "filemanip_level": ("logging.utils_level", "1.0"), } -NUMPY_MMAP = LooseVersion(np.__version__) >= LooseVersion('1.12.0') +NUMPY_MMAP = LooseVersion(np.__version__) >= LooseVersion("1.12.0") DEFAULT_CONFIG_TPL = """\ [logging] @@ -91,25 +91,22 @@ def __init__(self, *args, **kwargs): self._config = configparser.ConfigParser() self._cwd = None - config_dir = os.path.expanduser('~/.nipype') - self.data_file = os.path.join(config_dir, 'nipype.json') + config_dir = os.path.expanduser("~/.nipype") + self.data_file = os.path.join(config_dir, "nipype.json") self.set_default_config() self._display = None self._resource_monitor = None - self._config.read( - [os.path.join(config_dir, 'nipype.cfg'), 'nipype.cfg']) + self._config.read([os.path.join(config_dir, "nipype.cfg"), "nipype.cfg"]) for option in CONFIG_DEPRECATIONS: - for section in ['execution', 'logging', 'monitoring']: + for section in ["execution", "logging", "monitoring"]: if self.has_option(section, option): - new_section, new_option = CONFIG_DEPRECATIONS[option][ - 0].split('.') + new_section, new_option = CONFIG_DEPRECATIONS[option][0].split(".") if not self.has_option(new_section, new_option): # Warn implicit in get - self.set(new_section, new_option, - self.get(section, option)) + self.set(new_section, new_option, self.get(section, option)) @property def cwd(self): @@ -120,34 +117,39 @@ def cwd(self): try: self._cwd = os.getcwd() except OSError: - warn('Trying to run Nipype from a nonexistent directory "{}".'. - format(os.getenv('PWD', 'unknown')), RuntimeWarning) + warn( + 'Trying to run Nipype from a nonexistent directory "{}".'.format( + os.getenv("PWD", "unknown") + ), + RuntimeWarning, + ) raise return self._cwd def set_default_config(self): """Read default settings template and set into config object""" default_cfg = DEFAULT_CONFIG_TPL( - log_dir=os.path.expanduser( - '~'), # Get $HOME in a platform-agnostic way - crashdump_dir=self.cwd # Read cached cwd + log_dir=os.path.expanduser("~"), # Get $HOME in a platform-agnostic way + crashdump_dir=self.cwd, # Read cached cwd ) try: self._config.read_string(default_cfg) # Python >= 3.2 except AttributeError: from io import StringIO + self._config.readfp(StringIO(default_cfg)) def enable_debug_mode(self): """Enables debug configuration""" from .. import logging - self._config.set('execution', 'stop_on_first_crash', 'true') - self._config.set('execution', 'remove_unnecessary_outputs', 'false') - self._config.set('execution', 'keep_inputs', 'true') - self._config.set('logging', 'workflow_level', 'DEBUG') - self._config.set('logging', 'interface_level', 'DEBUG') - self._config.set('logging', 'utils_level', 'DEBUG') + + self._config.set("execution", "stop_on_first_crash", "true") + self._config.set("execution", "remove_unnecessary_outputs", "false") + self._config.set("execution", "keep_inputs", "true") + self._config.set("logging", "workflow_level", "DEBUG") + self._config.set("logging", "interface_level", "DEBUG") + self._config.set("logging", "utils_level", "DEBUG") logging.update_logging(self._config) def set_log_dir(self, log_dir): @@ -156,17 +158,17 @@ def set_log_dir(self, log_dir): This should be the first thing that is done before any nipype class with logging is imported. """ - self._config.set('logging', 'log_directory', log_dir) + self._config.set("logging", "log_directory", log_dir) def get(self, section, option, default=None): """Get an option""" if option in CONFIG_DEPRECATIONS: - msg = ('Config option "%s" has been deprecated as of nipype %s. ' - 'Please use "%s" instead.') % ( - option, CONFIG_DEPRECATIONS[option][1], - CONFIG_DEPRECATIONS[option][0]) + msg = ( + 'Config option "%s" has been deprecated as of nipype %s. ' + 'Please use "%s" instead.' + ) % (option, CONFIG_DEPRECATIONS[option][1], CONFIG_DEPRECATIONS[option][0]) warn(msg) - section, option = CONFIG_DEPRECATIONS[option][0].split('.') + section, option = CONFIG_DEPRECATIONS[option][0].split(".") if self._config.has_option(section, option): return self._config.get(section, option) @@ -178,12 +180,12 @@ def set(self, section, option, value): value = str(value) if option in CONFIG_DEPRECATIONS: - msg = ('Config option "%s" has been deprecated as of nipype %s. ' - 'Please use "%s" instead.') % ( - option, CONFIG_DEPRECATIONS[option][1], - CONFIG_DEPRECATIONS[option][0]) + msg = ( + 'Config option "%s" has been deprecated as of nipype %s. ' + 'Please use "%s" instead.' + ) % (option, CONFIG_DEPRECATIONS[option][1], CONFIG_DEPRECATIONS[option][0]) warn(msg) - section, option = CONFIG_DEPRECATIONS[option][0].split('.') + section, option = CONFIG_DEPRECATIONS[option][0].split(".") return self._config.set(section, option, value) @@ -203,8 +205,8 @@ def get_data(self, key): """Read options file""" if not os.path.exists(self.data_file): return None - with SoftFileLock('%s.lock' % self.data_file): - with open(self.data_file, 'rt') as file: + with SoftFileLock("%s.lock" % self.data_file): + with open(self.data_file, "rt") as file: datadict = load(file) if key in datadict: return datadict[key] @@ -214,35 +216,36 @@ def save_data(self, key, value): """Store config flie""" datadict = {} if os.path.exists(self.data_file): - with SoftFileLock('%s.lock' % self.data_file): - with open(self.data_file, 'rt') as file: + with SoftFileLock("%s.lock" % self.data_file): + with open(self.data_file, "rt") as file: datadict = load(file) else: dirname = os.path.dirname(self.data_file) if not os.path.exists(dirname): mkdir_p(dirname) - with SoftFileLock('%s.lock' % self.data_file): - with open(self.data_file, 'wt') as file: + with SoftFileLock("%s.lock" % self.data_file): + with open(self.data_file, "wt") as file: datadict[key] = value dump(datadict, file) def update_config(self, config_dict): """Extend internal dictionary with config_dict""" - for section in ['execution', 'logging', 'check']: + for section in ["execution", "logging", "check"]: if section in config_dict: for key, val in list(config_dict[section].items()): - if not key.startswith('__'): + if not key.startswith("__"): self._config.set(section, key, str(val)) def update_matplotlib(self): """Set backend on matplotlib from options""" import matplotlib - matplotlib.use(self.get('execution', 'matplotlib_backend')) + + matplotlib.use(self.get("execution", "matplotlib_backend")) def enable_provenance(self): """Sets provenance storing on""" - self._config.set('execution', 'write_provenance', 'true') - self._config.set('execution', 'hash_method', 'content') + self._config.set("execution", "write_provenance", "true") + self._config.set("execution", "hash_method", "content") @property def resource_monitor(self): @@ -251,8 +254,9 @@ def resource_monitor(self): return self._resource_monitor # Cache config from nipype config - self.resource_monitor = str2bool( - self._config.get('monitoring', 'enabled')) or False + self.resource_monitor = ( + str2bool(self._config.get("monitoring", "enabled")) or False + ) return self._resource_monitor @resource_monitor.setter @@ -270,16 +274,21 @@ def resource_monitor(self, value): self._resource_monitor = False try: import psutil + self._resource_monitor = LooseVersion( - psutil.__version__) >= LooseVersion('5.0') + psutil.__version__ + ) >= LooseVersion("5.0") except ImportError: pass finally: if not self._resource_monitor: - warn('Could not enable the resource monitor: ' - 'psutil>=5.0 could not be imported.') - self._config.set('monitoring', 'enabled', - ('%s' % self._resource_monitor).lower()) + warn( + "Could not enable the resource monitor: " + "psutil>=5.0 could not be imported." + ) + self._config.set( + "monitoring", "enabled", ("%s" % self._resource_monitor).lower() + ) def enable_resource_monitor(self): """Sets the resource monitor on""" @@ -300,13 +309,13 @@ def get_display(self): # shell=True, stdout=sp.DEVNULL)) if self._display is not None: - return ':%d' % self._display.new_display + return ":%d" % self._display.new_display sysdisplay = None - if self._config.has_option('execution', 'display_variable'): - sysdisplay = self._config.get('execution', 'display_variable') + if self._config.has_option("execution", "display_variable"): + sysdisplay = self._config.get("execution", "display_variable") - sysdisplay = sysdisplay or os.getenv('DISPLAY') + sysdisplay = sysdisplay or os.getenv("DISPLAY") if sysdisplay: from collections import namedtuple @@ -314,49 +323,51 @@ def _mock(): pass # Store a fake Xvfb object. Format - :[.] - ndisp = sysdisplay.split(':')[-1].split('.')[0] - Xvfb = namedtuple('Xvfb', ['new_display', 'stop']) + ndisp = sysdisplay.split(":")[-1].split(".")[0] + Xvfb = namedtuple("Xvfb", ["new_display", "stop"]) self._display = Xvfb(int(ndisp), _mock) return self.get_display() else: - if 'darwin' in sys.platform: + if "darwin" in sys.platform: raise RuntimeError( - 'Xvfb requires root permissions to run in OSX. Please ' - 'make sure that an X server is listening and set the ' - 'appropriate config on either $DISPLAY or nipype\'s ' + "Xvfb requires root permissions to run in OSX. Please " + "make sure that an X server is listening and set the " + "appropriate config on either $DISPLAY or nipype's " '"display_variable" config. Valid X servers include ' - 'VNC, XQuartz, or manually started Xvfb.') + "VNC, XQuartz, or manually started Xvfb." + ) # If $DISPLAY is empty, it confuses Xvfb so unset - if sysdisplay == '': - del os.environ['DISPLAY'] + if sysdisplay == "": + del os.environ["DISPLAY"] try: from xvfbwrapper import Xvfb except ImportError: raise RuntimeError( - 'A display server was required, but $DISPLAY is not ' - 'defined and Xvfb could not be imported.') + "A display server was required, but $DISPLAY is not " + "defined and Xvfb could not be imported." + ) - self._display = Xvfb(nolisten='tcp') + self._display = Xvfb(nolisten="tcp") self._display.start() # Older versions of xvfbwrapper used vdisplay_num - if not hasattr(self._display, 'new_display'): - setattr(self._display, 'new_display', - self._display.vdisplay_num) + if not hasattr(self._display, "new_display"): + setattr(self._display, "new_display", self._display.vdisplay_num) return self.get_display() def stop_display(self): """Closes the display if started""" if self._display is not None: from .. import logging + self._display.stop() - logging.getLogger('nipype.interface').debug( - 'Closing display (if virtual)') + logging.getLogger("nipype.interface").debug("Closing display (if virtual)") @atexit.register def free_display(): """Stop virtual display (if it is up)""" from .. import config + config.stop_display() diff --git a/nipype/utils/docparse.py b/nipype/utils/docparse.py index 041e3ad5cf..576a235892 100644 --- a/nipype/utils/docparse.py +++ b/nipype/utils/docparse.py @@ -35,11 +35,12 @@ def grab_doc(cmd, trap_error=True): """ proc = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True + ) stdout, stderr = proc.communicate() if trap_error and proc.returncode: - msg = 'Attempting to run %s. Returned Error: %s' % (cmd, stderr) + msg = "Attempting to run %s. Returned Error: %s" % (cmd, stderr) raise IOError(msg) if stderr: @@ -76,7 +77,7 @@ def reverse_opt_map(opt_map): # The value is a tuple where the first element is the # format string and the second element is a docstring. value = value[0] - if (key != 'flags' and value is not None): + if key != "flags" and value is not None: revdict[value.split()[0]] = key return revdict @@ -104,21 +105,21 @@ def format_params(paramlist, otherlist=None): The formatted docstring. """ - hdr = 'Parameters' - delim = '----------' + hdr = "Parameters" + delim = "----------" paramlist.insert(0, delim) paramlist.insert(0, hdr) - params = '\n'.join(paramlist) + params = "\n".join(paramlist) otherparams = [] - doc = ''.join(params) + doc = "".join(params) if otherlist: - hdr = 'Others Parameters' - delim = '-----------------' + hdr = "Others Parameters" + delim = "-----------------" otherlist.insert(0, delim) otherlist.insert(0, hdr) - otherlist.insert(0, '\n') - otherparams = '\n'.join(otherlist) - doc = ''.join([doc, otherparams]) + otherlist.insert(0, "\n") + otherparams = "\n".join(otherlist) + doc = "".join([doc, otherparams]) return doc @@ -159,7 +160,7 @@ def insert_doc(doc, new_items): """ # Insert new_items after the Parameters header - doclist = doc.split('\n') + doclist = doc.split("\n") tmpdoc = doclist[:2] # Add new_items tmpdoc.extend(new_items) @@ -169,10 +170,10 @@ def insert_doc(doc, new_items): newdoc = [] for line in tmpdoc: newdoc.append(line) - newdoc.append('\n') + newdoc.append("\n") # We add one too many newlines, remove it. newdoc.pop(-1) - return ''.join(newdoc) + return "".join(newdoc) def build_doc(doc, opts): @@ -196,7 +197,7 @@ def build_doc(doc, opts): # Split doc into line elements. Generally, each line is an # individual flag/option. - doclist = doc.split('\n') + doclist = doc.split("\n") newdoc = [] flags_doc = [] for line in doclist: @@ -205,17 +206,17 @@ def build_doc(doc, opts): # Probably an empty line continue # For lines we care about, the first item is the flag - if ',' in linelist[0]: # sometimes flags are only seperated by comma - flag = linelist[0].split(',')[0] + if "," in linelist[0]: # sometimes flags are only seperated by comma + flag = linelist[0].split(",")[0] else: flag = linelist[0] attr = opts.get(flag) if attr is not None: # newline = line.replace(flag, attr) # Replace the flag with our attribute name - linelist[0] = '%s :\n ' % str(attr) + linelist[0] = "%s :\n " % str(attr) # Add some line formatting - newline = ' '.join(linelist) + newline = " ".join(linelist) newdoc.append(newline) else: if line[0].isspace(): @@ -249,20 +250,21 @@ def get_doc(cmd, opt_map, help_flag=None, trap_error=True): """ res = CommandLine( - 'which %s' % cmd.split(' ')[0], + "which %s" % cmd.split(" ")[0], resource_monitor=False, - terminal_output='allatonce').run() + terminal_output="allatonce", + ).run() cmd_path = res.runtime.stdout.strip() - if cmd_path == '': - raise Exception('Command %s not found' % cmd.split(' ')[0]) + if cmd_path == "": + raise Exception("Command %s not found" % cmd.split(" ")[0]) if help_flag: - cmd = ' '.join((cmd, help_flag)) + cmd = " ".join((cmd, help_flag)) doc = grab_doc(cmd, trap_error) opts = reverse_opt_map(opt_map) return build_doc(doc, opts) -def _parse_doc(doc, style=['--']): +def _parse_doc(doc, style=["--"]): """Parses a help doc for inputs Parameters @@ -279,16 +281,16 @@ def _parse_doc(doc, style=['--']): # Split doc into line elements. Generally, each line is an # individual flag/option. - doclist = doc.split('\n') + doclist = doc.split("\n") optmap = {} if isinstance(style, (str, bytes)): style = [style] for line in doclist: linelist = line.split() flag = [ - item for i, item in enumerate(linelist) - if i < 2 and any([item.startswith(s) - for s in style]) and len(item) > 1 + item + for i, item in enumerate(linelist) + if i < 2 and any([item.startswith(s) for s in style]) and len(item) > 1 ] if flag: if len(flag) == 1: @@ -303,11 +305,11 @@ def _parse_doc(doc, style=['--']): break flag = flag[style_idx.index(min(style_idx))] style_idx = min(style_idx) - optmap[flag.split(style[style_idx])[1]] = '%s %%s' % flag + optmap[flag.split(style[style_idx])[1]] = "%s %%s" % flag return optmap -def get_params_from_doc(cmd, style='--', help_flag=None, trap_error=True): +def get_params_from_doc(cmd, style="--", help_flag=None, trap_error=True): """Auto-generate option map from command line help Parameters @@ -329,14 +331,15 @@ def get_params_from_doc(cmd, style='--', help_flag=None, trap_error=True): """ res = CommandLine( - 'which %s' % cmd.split(' ')[0], + "which %s" % cmd.split(" ")[0], resource_monitor=False, - terminal_output='allatonce').run() + terminal_output="allatonce", + ).run() cmd_path = res.runtime.stdout.strip() - if cmd_path == '': - raise Exception('Command %s not found' % cmd.split(' ')[0]) + if cmd_path == "": + raise Exception("Command %s not found" % cmd.split(" ")[0]) if help_flag: - cmd = ' '.join((cmd, help_flag)) + cmd = " ".join((cmd, help_flag)) doc = grab_doc(cmd, trap_error) return _parse_doc(doc, style) diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 6a037d34e0..0da078af84 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -17,13 +17,15 @@ try: import pandas as pd except ImportError: - print('Pandas not found; in order for full functionality of this module ' - 'install the pandas package') + print( + "Pandas not found; in order for full functionality of this module " + "install the pandas package" + ) pass def create_event_dict(start_time, nodes_list): - ''' + """ Function to generate a dictionary of event (start/finish) nodes from the nodes list @@ -39,7 +41,7 @@ def create_event_dict(start_time, nodes_list): events : dictionary a dictionary where the key is the timedelta from the start of the pipeline execution to the value node it accompanies - ''' + """ # Import packages import copy @@ -47,28 +49,28 @@ def create_event_dict(start_time, nodes_list): events = {} for node in nodes_list: # Format node fields - estimated_threads = node.get('num_threads', 1) - estimated_memory_gb = node.get('estimated_memory_gb', 1.0) - runtime_threads = node.get('runtime_threads', 0) - runtime_memory_gb = node.get('runtime_memory_gb', 0.0) + estimated_threads = node.get("num_threads", 1) + estimated_memory_gb = node.get("estimated_memory_gb", 1.0) + runtime_threads = node.get("runtime_threads", 0) + runtime_memory_gb = node.get("runtime_memory_gb", 0.0) # Init and format event-based nodes - node['estimated_threads'] = estimated_threads - node['estimated_memory_gb'] = estimated_memory_gb - node['runtime_threads'] = runtime_threads - node['runtime_memory_gb'] = runtime_memory_gb + node["estimated_threads"] = estimated_threads + node["estimated_memory_gb"] = estimated_memory_gb + node["runtime_threads"] = runtime_threads + node["runtime_memory_gb"] = runtime_memory_gb start_node = node finish_node = copy.deepcopy(node) - start_node['event'] = 'start' - finish_node['event'] = 'finish' + start_node["event"] = "start" + finish_node["event"] = "finish" # Get dictionary key - start_delta = (node['start'] - start_time).total_seconds() - finish_delta = (node['finish'] - start_time).total_seconds() + start_delta = (node["start"] - start_time).total_seconds() + finish_delta = (node["finish"] - start_time).total_seconds() # Populate dictionary if events.get(start_delta) or events.get(finish_delta): - err_msg = 'Event logged twice or events started at exact same time!' + err_msg = "Event logged twice or events started at exact same time!" raise KeyError(err_msg) events[start_delta] = start_node events[finish_delta] = finish_node @@ -78,7 +80,7 @@ def create_event_dict(start_time, nodes_list): def log_to_dict(logfile): - ''' + """ Function to extract log node dictionaries into a list of python dictionaries and return the list as well as the final node @@ -93,10 +95,10 @@ def log_to_dict(logfile): nodes_list : list a list of python dictionaries containing the runtime info for each nipype node - ''' + """ # Init variables - with open(logfile, 'r') as content: + with open(logfile, "r") as content: # read file separating each line lines = content.readlines() @@ -107,7 +109,7 @@ def log_to_dict(logfile): def calculate_resource_timeseries(events, resource): - ''' + """ Given as event dictionary, calculate the resources used as a timeseries @@ -125,7 +127,7 @@ def calculate_resource_timeseries(events, resource): time_series : pandas Series a pandas Series object that contains timestamps as the indices and the resource amount as values - ''' + """ # Import packages import pandas as pd @@ -136,14 +138,14 @@ def calculate_resource_timeseries(events, resource): # Iterate through the events for _, event in sorted(events.items()): - if event['event'] == "start": - if resource in event and event[resource] != 'Unknown': + if event["event"] == "start": + if resource in event and event[resource] != "Unknown": all_res += float(event[resource]) - current_time = event['start'] - elif event['event'] == "finish": - if resource in event and event[resource] != 'Unknown': + current_time = event["start"] + elif event["event"] == "finish": + if resource in event and event[resource] != "Unknown": all_res -= float(event[resource]) - current_time = event['finish'] + current_time = event["finish"] res[current_time] = all_res # Formulate the pandas timeseries @@ -157,7 +159,7 @@ def calculate_resource_timeseries(events, resource): def draw_lines(start, total_duration, minute_scale, scale): - ''' + """ Function to draw the minute line markers and timestamps Parameters @@ -178,10 +180,10 @@ def draw_lines(start, total_duration, minute_scale, scale): result : string the html-formatted string for producing the minutes-based time line markers - ''' + """ # Init variables - result = '' + result = "" next_line = 220 next_time = start num_lines = int(((total_duration // 60) // minute_scale) + 2) @@ -192,8 +194,11 @@ def draw_lines(start, total_duration, minute_scale, scale): new_line = "
" % next_line result += new_line # Time digits - time = "

%02d:%02d

" % \ - (next_line-20, next_time.hour, next_time.minute) + time = "

%02d:%02d

" % ( + next_line - 20, + next_time.hour, + next_time.minute, + ) result += time # Increment line spacing and digits next_line += minute_scale * scale @@ -203,9 +208,8 @@ def draw_lines(start, total_duration, minute_scale, scale): return result -def draw_nodes(start, nodes_list, cores, minute_scale, space_between_minutes, - colors): - ''' +def draw_nodes(start, nodes_list, cores, minute_scale, space_between_minutes, colors): + """ Function to return the html-string of the node drawings for the gantt chart @@ -235,28 +239,30 @@ def draw_nodes(start, nodes_list, cores, minute_scale, space_between_minutes, result : string the html-formatted string for producing the minutes-based time line markers - ''' + """ # Init variables - result = '' + result = "" scale = space_between_minutes / minute_scale space_between_minutes = space_between_minutes / scale end_times = [ - datetime.datetime(start.year, start.month, start.day, start.hour, - start.minute, start.second) for core in range(cores) + datetime.datetime( + start.year, start.month, start.day, start.hour, start.minute, start.second + ) + for core in range(cores) ] # For each node in the pipeline for node in nodes_list: # Get start and finish times - node_start = node['start'] - node_finish = node['finish'] + node_start = node["start"] + node_finish = node["finish"] # Calculate an offset and scale duration - offset = ((node_start - start).total_seconds() / 60) * scale * \ - space_between_minutes + 220 + offset = ( + (node_start - start).total_seconds() / 60 + ) * scale * space_between_minutes + 220 # Scale duration - scale_duration = ( - node['duration'] / 60) * scale * space_between_minutes + scale_duration = (node["duration"] / 60) * scale * space_between_minutes if scale_duration < 5: scale_duration = 5 scale_duration -= 2 @@ -266,32 +272,38 @@ def draw_nodes(start, nodes_list, cores, minute_scale, space_between_minutes, if end_times[core] < node_start: left += core * 30 end_times[core] = datetime.datetime( - node_finish.year, node_finish.month, node_finish.day, - node_finish.hour, node_finish.minute, node_finish.second) + node_finish.year, + node_finish.month, + node_finish.day, + node_finish.hour, + node_finish.minute, + node_finish.second, + ) break # Get color for node object color = random.choice(colors) - if 'error' in node: - color = 'red' + if "error" in node: + color = "red" # Setup dictionary for node html string insertion node_dict = { - 'left': left, - 'offset': offset, - 'scale_duration': scale_duration, - 'color': color, - 'node_name': node['name'], - 'node_dur': node['duration'] / 60.0, - 'node_start': node_start.strftime("%Y-%m-%d %H:%M:%S"), - 'node_finish': node_finish.strftime("%Y-%m-%d %H:%M:%S") + "left": left, + "offset": offset, + "scale_duration": scale_duration, + "color": color, + "node_name": node["name"], + "node_dur": node["duration"] / 60.0, + "node_start": node_start.strftime("%Y-%m-%d %H:%M:%S"), + "node_finish": node_finish.strftime("%Y-%m-%d %H:%M:%S"), } # Create new node string - new_node = "
" % \ - node_dict + new_node = ( + "
" % node_dict + ) # Append to output result result += new_node @@ -300,15 +312,21 @@ def draw_nodes(start, nodes_list, cores, minute_scale, space_between_minutes, return result -def draw_resource_bar(start_time, finish_time, time_series, - space_between_minutes, minute_scale, color, left, - resource): - ''' - ''' +def draw_resource_bar( + start_time, + finish_time, + time_series, + space_between_minutes, + minute_scale, + color, + left, + resource, +): + """ + """ # Memory header - result = "

%s

" \ - % (left, resource) + result = "

%s

" % (left, resource) # Image scaling factors scale = space_between_minutes / minute_scale space_between_minutes = space_between_minutes / scale @@ -323,8 +341,9 @@ def draw_resource_bar(start_time, finish_time, time_series, else: ts_end = finish_time # Calculate offset from start at top - offset = ((ts_start-start_time).total_seconds() / 60.0) * scale * \ - space_between_minutes + 220 + offset = ( + (ts_start - start_time).total_seconds() / 60.0 + ) * scale * space_between_minutes + 220 # Scale duration duration_mins = (ts_end - ts_start).total_seconds() / 60.0 height = duration_mins * scale * space_between_minutes @@ -335,29 +354,31 @@ def draw_resource_bar(start_time, finish_time, time_series, # Bar width is proportional to resource amount width = amount * 20 - if resource.lower() == 'memory': - label = '%.3f GB' % amount + if resource.lower() == "memory": + label = "%.3f GB" % amount else: - label = '%d threads' % amount + label = "%d threads" % amount # Setup dictionary for bar html string insertion bar_dict = { - 'color': color, - 'height': height, - 'width': width, - 'offset': offset, - 'left': left, - 'label': label, - 'duration': duration_mins, - 'start': ts_start.strftime('%Y-%m-%d %H:%M:%S'), - 'finish': ts_end.strftime('%Y-%m-%d %H:%M:%S') + "color": color, + "height": height, + "width": width, + "offset": offset, + "left": left, + "label": label, + "duration": duration_mins, + "start": ts_start.strftime("%Y-%m-%d %H:%M:%S"), + "finish": ts_end.strftime("%Y-%m-%d %H:%M:%S"), } - bar_html = "
" + bar_html = ( + "
" + ) # Add another bar to html line result += bar_html % bar_dict @@ -365,12 +386,14 @@ def draw_resource_bar(start_time, finish_time, time_series, return result -def generate_gantt_chart(logfile, - cores, - minute_scale=10, - space_between_minutes=50, - colors=["#7070FF", "#4E4EB2", "#2D2D66", "#9B9BFF"]): - ''' +def generate_gantt_chart( + logfile, + cores, + minute_scale=10, + space_between_minutes=50, + colors=["#7070FF", "#4E4EB2", "#2D2D66", "#9B9BFF"], +): + """ Generates a gantt chart in html showing the workflow execution based on a callback log file. This script was intended to be used with the MultiprocPlugin. The following code shows how to set up the workflow in order to generate the log file: @@ -418,10 +441,10 @@ def generate_gantt_chart(logfile, # plugin_args={'n_procs':8, 'memory':12, 'status_callback': log_nodes_cb}) # generate_gantt_chart('callback.log', 8) - ''' + """ # add the html header - html_string = ''' + html_string = """ \n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "Most used/contributed policy!\n", - "\n", - "Not all components of these packages are available." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "skip" - } - }, - "source": [ - "# Workflows\n", - "\n", - "- Properties:\n", - "\n", - " - processing pipeline is a directed acyclic graph (DAG)\n", - " - nodes are processes\n", - " - edges represent data flow\n", - " - compact represenation for any process\n", - " - code and data separation" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "#Execution Plugins\n", - "\n", - "Allows seamless execution across many architectures\n", - "\n", - " - Local\n", - "\n", - " - Serial\n", - " - Multicore\n", - "\n", - " - Clusters\n", - "\n", - " - HTCondor\n", - " - PBS/Torque/SGE/LSF (native and via IPython)\n", - " - SSH (via IPython)\n", - " - Soma Workflow" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Learn Nipype concepts in 10 easy steps\n", - "\n", - "\n", - "1. Installing and testing the installation \n", - "2. Working with interfaces\n", - "3. Using Nipype caching\n", - "4. Creating Nodes, MapNodes and Workflows\n", - "5. Getting and saving data\n", - "6. Using Iterables\n", - "7. Function nodes\n", - "8. Distributed computation\n", - "9. Connecting to databases\n", - "10. Execution configuration options" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Step 1. Installing Nipype\n", - "\n", - "## Scientific Python:\n", - "\n", - "* Debian/Ubuntu/Scientific Fedora\n", - "* [Canopy from Enthought](https://www.enthought.com/products/canopy/)\n", - "* [Anaconda from Contnuum Analytics](https://store.continuum.io/cshop/anaconda/)\n", - "\n", - "## Installing Nipype:\n", - "\n", - "* Available from [@NeuroDebian](http://neuro.debian.net/pkgs/python-nipype.html),\n", - " [@PyPI](http://pypi.python.org/pypi/nipype/), and\n", - " [@GitHub](http://github.com/nipy/nipype)\n", - " \n", - " - pip install nipype\n", - " - easy_install nipype\n", - " - sudo apt-get install python-nipype\n", - "\n", - "* Dependencies: networkx, nibabel, numpy, scipy, traits\n", - "\n", - "## Running Nipype ([Quickstart](http://nipy.org/nipype/quickstart.html)):\n", - "\n", - "* Ensure underlying tools are installed and accessible\n", - "* Nipype **is a wrapper, not a substitute** for AFNI, ANTS, FreeSurfer, FSL, SPM,\n", - " NiPy, etc.,." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "# Step 1. Testing nipype" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```\n", - "$ ipython notebook\n", - "```" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import nipype\n", - "\n", - "# Comment the following section to increase verbosity of output\n", - "nipype.config.set('logging', 'workflow_level', 'CRITICAL')\n", - "nipype.config.set('logging', 'interface_level', 'CRITICAL')\n", - "nipype.logging.update_logging(nipype.config)\n", - "\n", - "nipype.test(verbose=0) # Increase verbosity parameter for more info" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "If all goes well you will see an OK:\n", - "\n", - " ----------------------------------------------------------------------\n", - " Ran 2497 tests in 68.486s\n", - "\n", - " OK (SKIP=13)\n", - "\n", - "The number of tests and time will vary depending on which interfaces you have installed on your system." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "nipype.get_info()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "# Environment and data setup\n", - "\n", - "Setting up your Ipython notebook environment and download some data to play with" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "%pylab inline" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# Some preliminaries\n", - "import os\n", - "cwd = os.getcwd()\n", - "tutorial_dir = '/software/temp/nipype-tutorial/ohbm/'\n", - "if not os.path.exists(tutorial_dir):\n", - " os.mkdir(tutorial_dir)\n", - "os.chdir(tutorial_dir)" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "-" - } - }, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import urllib\n", - "required_files = ['ds107/sub001/BOLD/task001_run001/bold.nii.gz',\n", - " 'ds107/sub001/BOLD/task001_run002/bold.nii.gz',\n", - " 'ds107/sub001/anatomy/highres001.nii.gz',\n", - " 'ds107/sub044/BOLD/task001_run001/bold.nii.gz',\n", - " 'ds107/sub044/BOLD/task001_run002/bold.nii.gz',\n", - " 'ds107/sub044/anatomy/highres001.nii.gz'\n", - " ]\n", - "base_url = 'http://openfmri.aws.amazon.com.s3.amazonaws.com/'\n", - "for filepath in required_files:\n", - " file_location = os.path.join(tutorial_dir, filepath)\n", - " if not os.path.exists(file_location):\n", - " print('Retrieving: ' + file_location)\n", - " os.makedirs(os.path.dirname(file_location))\n", - " urllib.urlretrieve(base_url + filepath, file_location)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Step 2. Working with interfaces" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import nipype.algorithms" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.fsl import DTIFit\n", - "from nipype.interfaces.spm import Realign" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### Finding interface inputs and outputs and examples" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "DTIFit.help()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "Realign.help()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### Creating a directory for running interfaces" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import os\n", - "from shutil import copyfile\n", - "library_dir = os.path.join(tutorial_dir, 'as_a_library')\n", - "if not os.path.exists(library_dir):\n", - " os.mkdir(library_dir)\n", - "os.chdir(library_dir)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "## Executing interfaces" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.freesurfer import MRIConvert\n", - "convert = MRIConvert(in_file='../ds107/sub001/BOLD/task001_run001/bold.nii.gz',\n", - " out_file='ds107.nii')\n", - "print(convert.cmdline)\n", - "results = convert.run(terminal_output='none') # allatonce, stream (default), file" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "results.outputs" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "## Other ways" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "convert = MRIConvert()\n", - "convert.inputs.in_file='../ds107/sub001/BOLD/task001_run001/bold.nii.gz'\n", - "convert.inputs.out_file='ds107.nii'\n", - "convert.run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "convert = MRIConvert()\n", - "convert.run(in_file='../ds107/sub001/BOLD/task001_run001/bold.nii.gz',\n", - " out_file='ds107.nii')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": true, - "input": [ - "convert.inputs" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "#### Look at only the defined inputs" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "results.inputs" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "-" - } - }, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### Experiment with other interfaces\n", - "\n", - "For example, run realignment with SPM" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.spm import Realign\n", - "results1 = Realign(in_files='ds107.nii',\n", - " register_to_mean=False).run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "And now use FSL" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.fsl import MCFLIRT\n", - "results2 = MCFLIRT(in_file='ds107.nii', ref_vol=0,\n", - " save_plots=True).run()" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "-" - } - }, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### Now we can look at some results" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "print results1.runtime.duration, results2.runtime.duration\n", - "subplot(211);plot(genfromtxt('ds107_mcf.nii.gz.par')[:, 3:]);title('FSL')\n", - "subplot(212);plot(genfromtxt('rp_ds107.txt')[:,:3]);title('SPM')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### if i execute the MCFLIRT line again, well, it runs again!" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Step 3. Nipype caching" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.caching import Memory\n", - "mem = Memory('.')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Create `cacheable` objects" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "spm_realign = mem.cache(Realign)\n", - "fsl_realign = mem.cache(MCFLIRT)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### Execute interfaces" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "spm_results = spm_realign(in_files='ds107.nii', register_to_mean=False)\n", - "fsl_results = fsl_realign(in_file='ds107.nii', ref_vol=0, save_plots=True)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "subplot(211);plot(genfromtxt(fsl_results.outputs.par_file)[:, 3:])\n", - "subplot(212);plot(genfromtxt(spm_results.outputs.realignment_parameters)[:,:3])" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "spm_results = spm_realign(in_files='ds107.nii', register_to_mean=False)\n", - "fsl_results = fsl_realign(in_file='ds107.nii', ref_vol=0, save_plots=True)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "# More caching" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from os.path import abspath as opap\n", - "files = [opap('../ds107/sub001/BOLD/task001_run001/bold.nii.gz'),\n", - " opap('../ds107/sub001/BOLD/task001_run002/bold.nii.gz')]\n", - "converter = mem.cache(MRIConvert)\n", - "newfiles = []\n", - "for idx, fname in enumerate(files):\n", - " newfiles.append(converter(in_file=fname,\n", - " out_type='nii').outputs.out_file)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "os.chdir(tutorial_dir)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Step 4: Nodes, Mapnodes and workflows\n", - "\n", - "**Where:**" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.pipeline.engine import Node, MapNode, Workflow" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Node**:" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "realign_spm = Node(Realign(), name='motion_correct')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Mapnode**:\n", - "\n", - "" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "convert2nii = MapNode(MRIConvert(out_type='nii'),\n", - " iterfield=['in_file'],\n", - " name='convert2nii')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "# \"Hello World\" of Nipype workflows" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### Connect them up:" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "realignflow = Workflow(name='realign_with_spm')\n", - "realignflow.connect(convert2nii, 'out_file',\n", - " realign_spm, 'in_files')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "convert2nii.inputs.in_file = files\n", - "realign_spm.inputs.register_to_mean = False\n", - "\n", - "realignflow.base_dir = opap('.')\n", - "realignflow.run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "#Visualize the workflow" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "realignflow.write_graph()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from IPython.core.display import Image\n", - "Image('realign_with_spm/graph.png')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "realignflow.write_graph(graph2use='orig')\n", - "Image('realign_with_spm/graph_detailed.png')" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Step 5. Getting and saving data\n", - "\n", - "### Instead of assigning data ourselves, let's *glob* it" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "os.chdir(tutorial_dir)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.io import DataGrabber, DataFinder\n", - "ds = Node(DataGrabber(infields=['subject_id'], outfields=['func']),\n", - " name='datasource')\n", - "ds.inputs.base_directory = opap('ds107')\n", - "ds.inputs.template = '%s/BOLD/task001*/bold.nii.gz'\n", - "ds.inputs.sort_filelist = True\n", - "\n", - "ds.inputs.subject_id = 'sub001'\n", - "print ds.run().outputs" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "ds.inputs.subject_id = 'sub044'\n", - "print ds.run().outputs" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "#Multiple files\n", - "\n", - "### A little more practical usage" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "ds = Node(DataGrabber(infields=['subject_id', 'task_id'],\n", - " outfields=['func', 'anat']),\n", - " name='datasource')\n", - "ds.inputs.base_directory = opap('ds107')\n", - "ds.inputs.template = '*'\n", - "ds.inputs.template_args = {'func': [['subject_id', 'task_id']],\n", - " 'anat': [['subject_id']]}\n", - "ds.inputs.field_template = {'func': '%s/BOLD/task%03d*/bold.nii.gz',\n", - " 'anat': '%s/anatomy/highres001.nii.gz'}\n", - "ds.inputs.sort_filelist = True\n", - "ds.inputs.subject_id = 'sub001'\n", - "ds.inputs.task_id = 1\n", - "print ds.run().outputs" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "# Connecting to computation" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "convert2nii = MapNode(MRIConvert(out_type='nii'),\n", - " iterfield=['in_file'],\n", - " name='convert2nii')\n", - "\n", - "realign_spm = Node(Realign(), name='motion_correct')\n", - "realign_spm.inputs.register_to_mean = False\n", - "\n", - "connectedworkflow = Workflow(name='connectedtogether')\n", - "connectedworkflow.base_dir = opap('working_dir')\n", - "connectedworkflow.connect(ds, 'func', convert2nii, 'in_file')\n", - "connectedworkflow.connect(convert2nii, 'out_file', realign_spm, 'in_files')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "#Data sinking\n", - "\n", - "###Take output computed in a workflow out of it." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.io import DataSink\n", - "sinker = Node(DataSink(), name='sinker')\n", - "sinker.inputs.base_directory = opap('output')\n", - "connectedworkflow.connect(realign_spm, 'realigned_files',\n", - " sinker, 'realigned')\n", - "connectedworkflow.connect(realign_spm, 'realignment_parameters',\n", - " sinker, 'realigned.@parameters')\n", - "connectedworkflow.run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### How to determine output location\n", - "\n", - " 'base_directory/container/parameterization/destloc/filename'\n", - " \n", - " destloc = [@]string[[.[@]]string[[.[@]]string]...] and\n", - " destloc = realigned.@parameters --> 'realigned'\n", - " destloc = realigned.parameters.@1 --> 'realigned/parameters'\n", - " destloc = realigned.parameters.@2 --> 'realigned/parameters'\n", - " filename comes from the input to the connect statement." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "#Step 6: *iterables* - parametric execution\n", - "\n", - "**Workflow + iterables**: runs subgraph several times, attribute not input" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "ds.iterables = ('subject_id', ['sub001', 'sub044'])\n", - "connectedworkflow.run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "#Putting it all together\n", - "\n", - "### iterables + MapNode + Node + Workflow + DataGrabber + DataSink" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "connectedworkflow.write_graph()\n", - "Image('working_dir/connectedtogether/graph.png')" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "-" - } - }, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Step 7: The Function interface\n", - "\n", - "### The do anything you want card" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.utility import Function\n", - "\n", - "def myfunc(input1, input2):\n", - " \"\"\"Add and subtract two inputs\n", - " \"\"\"\n", - " return input1 + input2, input1 - input2\n", - "\n", - "calcfunc = Node(Function(input_names=['input1', 'input2'],\n", - " output_names = ['sum', 'difference'],\n", - " function=myfunc),\n", - " name='mycalc')\n", - "calcfunc.inputs.input1 = 1\n", - "calcfunc.inputs.input2 = 2\n", - "res = calcfunc.run()\n", - "print res.outputs" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "#Step 8: Distributed computing\n", - "\n", - "### Normally calling run executes the workflow in series" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "connectedworkflow.run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### but you can scale very easily\n", - "\n", - "For example, to use multiple cores on your local machine" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "connectedworkflow.run('MultiProc', plugin_args={'n_procs': 4})" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### Or to other job managers\n", - "\n", - "- connectedworkflow.run('PBS', plugin_args={'qsub_args': '-q many'})\n", - "- connectedworkflow.run('SGE', plugin_args={'qsub_args': '-q many'})\n", - "- connectedworkflow.run('LSF', plugin_args={'qsub_args': '-q many'})\n", - "- connectedworkflow.run('Condor')\n", - "- connectedworkflow.run('IPython')\n", - "\n", - "### or submit graphs as a whole\n", - "\n", - "\n", - "- connectedworkflow.run('PBSGraph', plugin_args={'qsub_args': '-q many'})\n", - "- connectedworkflow.run('SGEGraph', plugin_args={'qsub_args': '-q many'})\n", - "- connectedworkflow.run('CondorDAGMan')\n", - "\n", - "### Current Requirement: **SHARED FILESYSTEM**" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### You can also set node specific plugin arguments" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "- node.plugin_args = {'qsub_args': '-l nodes=1:ppn=3', 'overwrite': True}\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "#Step 9: Connecting to Databases" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from os.path import abspath as opap\n", - "\n", - "from nipype.interfaces.io import XNATSource\n", - "from nipype.pipeline.engine import Node, Workflow\n", - "from nipype.interfaces.fsl import BET\n", - "\n", - "subject_id = 'xnat_S00001'\n", - "\n", - "dg = Node(XNATSource(infields=['subject_id'],\n", - " outfields=['struct'],\n", - " config='/Users/satra/xnat_configs/nitrc_ir_config'),\n", - " name='xnatsource')\n", - "dg.inputs.query_template = ('/projects/fcon_1000/subjects/%s/experiments/xnat_E00001'\n", - " '/scans/%s/resources/NIfTI/files')\n", - "dg.inputs.query_template_args['struct'] = [['subject_id', 'anat_mprage_anonymized']]\n", - "dg.inputs.subject_id = subject_id\n", - "\n", - "bet = Node(BET(), name='skull_stripper')\n", - "\n", - "wf = Workflow(name='testxnat')\n", - "wf.base_dir = opap('xnattest')\n", - "wf.connect(dg, 'struct', bet, 'in_file')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.io import XNATSink\n", - "\n", - "ds = Node(XNATSink(config='/Users/satra/xnat_configs/central_config'),\n", - " name='xnatsink')\n", - "ds.inputs.project_id = 'NPTEST'\n", - "ds.inputs.subject_id = 'NPTEST_xnat_S00001'\n", - "ds.inputs.experiment_id = 'test_xnat'\n", - "ds.inputs.reconstruction_id = 'bet'\n", - "ds.inputs.share = True\n", - "wf.connect(bet, 'out_file', ds, 'brain')" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "skip" - } - }, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "wf.run()" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "#Step 10: Configuration options\n", - "\n", - "[Configurable options](http://nipy.org/nipype/users/config_file.html) control workflow and node execution options\n", - "\n", - "At the global level:" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype import config, logging\n", - "\n", - "config.enable_debug_mode()\n", - "logging.update_logging(config)\n", - "\n", - "config.set('execution', 'stop_on_first_crash', 'true')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "At the workflow level:" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "wf.config['execution']['hash_method'] = 'content'" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "Configurations can also be set at the node level." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "bet.config = {'execution': {'keep_unnecessary_outputs': 'true'}}" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "wf.run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Reusable workflows" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "config.set_default_config()\n", - "logging.update_logging(config)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from niflow.nipype1.workflows.fmri.fsl.preprocess import create_susan_smooth\n", - "\n", - "smooth = create_susan_smooth()\n", - "smooth.inputs.inputnode.in_files = opap('output/realigned/_subject_id_sub044/rbold_out.nii')\n", - "smooth.inputs.inputnode.fwhm = 5\n", - "smooth.inputs.inputnode.mask_file = 'mask.nii'\n", - "\n", - "smooth.run() # Will error because mask.nii does not exist" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.fsl import BET, MeanImage, ImageMaths\n", - "from nipype.pipeline.engine import Node\n", - "\n", - "\n", - "remove_nan = Node(ImageMaths(op_string= '-nan'), name='nanremove')\n", - "remove_nan.inputs.in_file = opap('output/realigned/_subject_id_sub044/rbold_out.nii')\n", - "\n", - "mi = Node(MeanImage(), name='mean')\n", - "\n", - "mask = Node(BET(mask=True), name='mask')\n", - "\n", - "wf = Workflow('reuse')\n", - "wf.base_dir = opap('.')\n", - "wf.connect(remove_nan, 'out_file', mi, 'in_file')\n", - "wf.connect(mi, 'out_file', mask, 'in_file')\n", - "wf.connect(mask, 'out_file', smooth, 'inputnode.mask_file')\n", - "wf.connect(remove_nan, 'out_file', smooth, 'inputnode.in_files')\n", - "\n", - "wf.run()" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "## Setting internal parameters of workflows" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "print(smooth.list_node_names())\n", - "\n", - "median = smooth.get_node('median')\n", - "median.inputs.op_string = '-k %s -p 60'" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "-" - } - }, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "wf.run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Summary\n", - "\n", - "\n", - "- This tutorial covers the concepts of Nipype\n", - "\n", - " 1. Installing and testing the installation \n", - " 2. Working with interfaces\n", - " 3. Using Nipype caching\n", - " 4. Creating Nodes, MapNodes and Workflows\n", - " 5. Getting and saving data\n", - " 6. Using Iterables\n", - " 7. Function nodes\n", - " 8. Distributed computation\n", - " 9. Connecting to databases\n", - " 10. Execution configuration options\n", - "\n", - "- It will allow you to reuse and debug the various workflows available in Nipype, BIPS and CPAC\n", - "- Please contribute new interfaces and workflows!" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import os\n", - "basedir = '/Users/satra/Dropbox/WORK/notebooks/'\n", - "if os.path.exists(basedir):\n", - " os.chdir(basedir)" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "skip" - } - }, - "outputs": [] - } - ], - "metadata": {} - } - ] -} diff --git a/examples/rsfmri_vol_surface_preprocessing.py b/examples/rsfmri_vol_surface_preprocessing.py deleted file mode 100644 index 43e9d3d089..0000000000 --- a/examples/rsfmri_vol_surface_preprocessing.py +++ /dev/null @@ -1,1082 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -==================================== -rsfMRI: ANTS, FS, FSL, SPM, aCompCor -==================================== - - -A preprocessing workflow for Siemens resting state data. - -This workflow makes use of: - -- ANTS -- FreeSurfer -- FSL -- SPM -- CompCor - -For example:: - - python rsfmri_preprocessing.py -d /data/12345-34-1.dcm -f /data/Resting.nii - -s subj001 -o output -p PBS --plugin_args "dict(qsub_args='-q many')" - - or - - python rsfmri_vol_surface_preprocessing.py -f SUB_1024011/E?/func/rest.nii - -t OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz --TR 2 -s SUB_1024011 - --subjects_dir fsdata --slice_times 0 17 1 18 2 19 3 20 4 21 5 22 6 23 - 7 24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15 32 16 -o . - -This workflow takes resting timeseries and a Siemens dicom file corresponding -to it and preprocesses it to produce timeseries coordinates or grayordinates. - -This workflow also requires 2mm subcortical atlas and templates that are -available from: - -http://mindboggle.info/data.html - -specifically the 2mm versions of: - -- `Joint Fusion Atlas `_ -- `MNI template `_ -""" - -from __future__ import division, unicode_literals -from builtins import open, range, str - -import os - -from nipype.interfaces.base import CommandLine -CommandLine.set_default_terminal_output('allatonce') - -from dicom import read_file - -from nipype.interfaces import (spm, fsl, Function, ants, freesurfer) -from nipype.interfaces.c3 import C3dAffineTool - -fsl.FSLCommand.set_default_output_type('NIFTI') - -from nipype import Workflow, Node, MapNode -from nipype.interfaces import matlab as mlab - -mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodisplay") -# If SPM is not in your MATLAB path you should add it here -# mlab.MatlabCommand.set_default_paths('/software/matlab/spm12') - -from nipype.algorithms.rapidart import ArtifactDetect -from nipype.algorithms.misc import TSNR, CalculateMedian -from nipype.interfaces.utility import Rename, Merge, IdentityInterface -from nipype.utils.filemanip import filename_to_list -from nipype.interfaces.io import DataSink, FreeSurferSource - -import numpy as np -import scipy as sp -import nibabel as nb - -imports = [ - 'import os', 'import nibabel as nb', 'import numpy as np', - 'import scipy as sp', - 'from nipype.utils.filemanip import filename_to_list, list_to_filename, split_filename', - 'from scipy.special import legendre' -] - - -def get_info(dicom_files): - from dcmstack.extract import default_extractor - """Given a Siemens dicom file return metadata - - Returns - ------- - RepetitionTime - Slice Acquisition Times - Spacing between slices - """ - meta = default_extractor( - read_file( - filename_to_list(dicom_files)[0], - stop_before_pixels=True, - force=True)) - return (meta['RepetitionTime'] / 1000., meta['CsaImage.MosaicRefAcqTimes'], - meta['SpacingBetweenSlices']) - - -def median(in_files): - """Computes an average of the median of each realigned timeseries - - Parameters - ---------- - - in_files: one or more realigned Nifti 4D time series - - Returns - ------- - - out_file: a 3D Nifti file - """ - import numpy as np - import nibabel as nb - average = None - for idx, filename in enumerate(filename_to_list(in_files)): - img = nb.load(filename) - data = np.median(img.get_data(), axis=3) - if average is None: - average = data - else: - average = average + data - median_img = nb.Nifti1Image(average / float(idx + 1), img.affine, - img.header) - filename = os.path.join(os.getcwd(), 'median.nii.gz') - median_img.to_filename(filename) - return filename - - -def bandpass_filter(files, lowpass_freq, highpass_freq, fs): - """Bandpass filter the input files - - Parameters - ---------- - files: list of 4d nifti files - lowpass_freq: cutoff frequency for the low pass filter (in Hz) - highpass_freq: cutoff frequency for the high pass filter (in Hz) - fs: sampling rate (in Hz) - """ - from nipype.utils.filemanip import split_filename, list_to_filename - import numpy as np - import nibabel as nb - out_files = [] - for filename in filename_to_list(files): - path, name, ext = split_filename(filename) - out_file = os.path.join(os.getcwd(), name + '_bp' + ext) - img = nb.load(filename) - timepoints = img.shape[-1] - F = np.zeros((timepoints)) - lowidx = int(timepoints / 2) + 1 - if lowpass_freq > 0: - lowidx = np.round(lowpass_freq / fs * timepoints) - highidx = 0 - if highpass_freq > 0: - highidx = np.round(highpass_freq / fs * timepoints) - F[highidx:lowidx] = 1 - F = ((F + F[::-1]) > 0).astype(int) - data = img.get_data() - if np.all(F == 1): - filtered_data = data - else: - filtered_data = np.real(np.fft.ifftn(np.fft.fftn(data) * F)) - img_out = nb.Nifti1Image(filtered_data, img.affine, img.header) - img_out.to_filename(out_file) - out_files.append(out_file) - return list_to_filename(out_files) - - -def motion_regressors(motion_params, order=0, derivatives=1): - """Compute motion regressors upto given order and derivative - - motion + d(motion)/dt + d2(motion)/dt2 (linear + quadratic) - """ - import numpy as np - out_files = [] - for idx, filename in enumerate(filename_to_list(motion_params)): - params = np.genfromtxt(filename) - out_params = params - for d in range(1, derivatives + 1): - cparams = np.vstack((np.repeat(params[0, :][None, :], d, axis=0), - params)) - out_params = np.hstack((out_params, np.diff(cparams, d, axis=0))) - out_params2 = out_params - for i in range(2, order + 1): - out_params2 = np.hstack((out_params2, np.power(out_params, i))) - filename = os.path.join(os.getcwd(), "motion_regressor%02d.txt" % idx) - np.savetxt(filename, out_params2, fmt=b"%.10f") - out_files.append(filename) - return out_files - - -def build_filter1(motion_params, comp_norm, outliers, detrend_poly=None): - """Builds a regressor set comprisong motion parameters, composite norm and - outliers - - The outliers are added as a single time point column for each outlier - - - Parameters - ---------- - - motion_params: a text file containing motion parameters and its derivatives - comp_norm: a text file containing the composite norm - outliers: a text file containing 0-based outlier indices - detrend_poly: number of polynomials to add to detrend - - Returns - ------- - components_file: a text file containing all the regressors - """ - import numpy as np - import nibabel as nb - from scipy.special import legendre - out_files = [] - for idx, filename in enumerate(filename_to_list(motion_params)): - params = np.genfromtxt(filename) - norm_val = np.genfromtxt(filename_to_list(comp_norm)[idx]) - out_params = np.hstack((params, norm_val[:, None])) - try: - outlier_val = np.genfromtxt(filename_to_list(outliers)[idx]) - except IOError: - outlier_val = np.empty((0)) - for index in np.atleast_1d(outlier_val): - outlier_vector = np.zeros((out_params.shape[0], 1)) - outlier_vector[index] = 1 - out_params = np.hstack((out_params, outlier_vector)) - if detrend_poly: - timepoints = out_params.shape[0] - X = np.empty((timepoints, 0)) - for i in range(detrend_poly): - X = np.hstack((X, legendre(i + 1)(np.linspace( - -1, 1, timepoints))[:, None])) - out_params = np.hstack((out_params, X)) - filename = os.path.join(os.getcwd(), "filter_regressor%02d.txt" % idx) - np.savetxt(filename, out_params, fmt=b"%.10f") - out_files.append(filename) - return out_files - - -def extract_noise_components(realigned_file, - mask_file, - num_components=5, - extra_regressors=None): - """Derive components most reflective of physiological noise - - Parameters - ---------- - realigned_file: a 4D Nifti file containing realigned volumes - mask_file: a 3D Nifti file containing white matter + ventricular masks - num_components: number of components to use for noise decomposition - extra_regressors: additional regressors to add - - Returns - ------- - components_file: a text file containing the noise components - """ - from scipy.linalg.decomp_svd import svd - import numpy as np - import nibabel as nb - import os - imgseries = nb.load(realigned_file) - components = None - for filename in filename_to_list(mask_file): - mask = nb.load(filename).get_data() - if len(np.nonzero(mask > 0)[0]) == 0: - continue - voxel_timecourses = imgseries.get_data()[mask > 0] - voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0 - # remove mean and normalize by variance - # voxel_timecourses.shape == [nvoxels, time] - X = voxel_timecourses.T - stdX = np.std(X, axis=0) - stdX[stdX == 0] = 1. - stdX[np.isnan(stdX)] = 1. - stdX[np.isinf(stdX)] = 1. - X = (X - np.mean(X, axis=0)) / stdX - u, _, _ = svd(X, full_matrices=False) - if components is None: - components = u[:, :num_components] - else: - components = np.hstack((components, u[:, :num_components])) - if extra_regressors: - regressors = np.genfromtxt(extra_regressors) - components = np.hstack((components, regressors)) - components_file = os.path.join(os.getcwd(), 'noise_components.txt') - np.savetxt(components_file, components, fmt=b"%.10f") - return components_file - - -def rename(in_files, suffix=None): - from nipype.utils.filemanip import (filename_to_list, split_filename, - list_to_filename) - out_files = [] - for idx, filename in enumerate(filename_to_list(in_files)): - _, name, ext = split_filename(filename) - if suffix is None: - out_files.append(name + ('_%03d' % idx) + ext) - else: - out_files.append(name + suffix + ext) - return list_to_filename(out_files) - - -def get_aparc_aseg(files): - """Return the aparc+aseg.mgz file""" - for name in files: - if 'aparc+aseg.mgz' in name: - return name - raise ValueError('aparc+aseg.mgz not found') - - -def extract_subrois(timeseries_file, label_file, indices): - """Extract voxel time courses for each subcortical roi index - - Parameters - ---------- - - timeseries_file: a 4D Nifti file - label_file: a 3D file containing rois in the same space/size of the 4D file - indices: a list of indices for ROIs to extract. - - Returns - ------- - out_file: a text file containing time courses for each voxel of each roi - The first four columns are: freesurfer index, i, j, k positions in the - label file - """ - from nipype.utils.filemanip import split_filename - import nibabel as nb - import os - img = nb.load(timeseries_file) - data = img.get_data() - roiimg = nb.load(label_file) - rois = roiimg.get_data() - prefix = split_filename(timeseries_file)[1] - out_ts_file = os.path.join(os.getcwd(), '%s_subcortical_ts.txt' % prefix) - with open(out_ts_file, 'wt') as fp: - for fsindex in indices: - ijk = np.nonzero(rois == fsindex) - ts = data[ijk] - for i0, row in enumerate(ts): - fp.write('%d,%d,%d,%d,' % ( - fsindex, ijk[0][i0], ijk[1][i0], - ijk[2][i0]) + ','.join(['%.10f' % val - for val in row]) + '\n') - return out_ts_file - - -def combine_hemi(left, right): - """Combine left and right hemisphere time series into a single text file - """ - import os - import numpy as np - lh_data = nb.load(left).get_data() - rh_data = nb.load(right).get_data() - - indices = np.vstack((1000000 + np.arange(0, lh_data.shape[0])[:, None], - 2000000 + np.arange(0, rh_data.shape[0])[:, None])) - all_data = np.hstack((indices, - np.vstack((lh_data.squeeze(), rh_data.squeeze())))) - filename = left.split('.')[1] + '_combined.txt' - np.savetxt( - filename, - all_data, - fmt=','.join(['%d'] + ['%.10f'] * (all_data.shape[1] - 1))) - return os.path.abspath(filename) - - -def create_reg_workflow(name='registration'): - """Create a FEAT preprocessing workflow together with freesurfer - - Parameters - ---------- - - name : name of workflow (default: 'registration') - - Inputs:: - - inputspec.source_files : files (filename or list of filenames to register) - inputspec.mean_image : reference image to use - inputspec.anatomical_image : anatomical image to coregister to - inputspec.target_image : registration target - - Outputs:: - - outputspec.func2anat_transform : FLIRT transform - outputspec.anat2target_transform : FLIRT+FNIRT transform - outputspec.transformed_files : transformed files in target space - outputspec.transformed_mean : mean image in target space - """ - - register = Workflow(name=name) - - inputnode = Node( - interface=IdentityInterface(fields=[ - 'source_files', 'mean_image', 'subject_id', 'subjects_dir', - 'target_image' - ]), - name='inputspec') - - outputnode = Node( - interface=IdentityInterface(fields=[ - 'func2anat_transform', 'out_reg_file', 'anat2target_transform', - 'transforms', 'transformed_mean', 'segmentation_files', - 'anat2target', 'aparc' - ]), - name='outputspec') - - # Get the subject's freesurfer source directory - fssource = Node(FreeSurferSource(), name='fssource') - fssource.run_without_submitting = True - register.connect(inputnode, 'subject_id', fssource, 'subject_id') - register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir') - - convert = Node(freesurfer.MRIConvert(out_type='nii'), name="convert") - register.connect(fssource, 'T1', convert, 'in_file') - - # Coregister the median to the surface - bbregister = Node(freesurfer.BBRegister(), name='bbregister') - bbregister.inputs.init = 'fsl' - bbregister.inputs.contrast_type = 't2' - bbregister.inputs.out_fsl_file = True - bbregister.inputs.epi_mask = True - register.connect(inputnode, 'subject_id', bbregister, 'subject_id') - register.connect(inputnode, 'mean_image', bbregister, 'source_file') - register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir') - """ - Estimate the tissue classes from the anatomical image. But use spm's segment - as FSL appears to be breaking. - """ - - stripper = Node(fsl.BET(), name='stripper') - register.connect(convert, 'out_file', stripper, 'in_file') - fast = Node(fsl.FAST(), name='fast') - register.connect(stripper, 'out_file', fast, 'in_files') - """ - Binarize the segmentation - """ - - binarize = MapNode( - fsl.ImageMaths(op_string='-nan -thr 0.9 -ero -bin'), - iterfield=['in_file'], - name='binarize') - register.connect(fast, 'partial_volume_files', binarize, 'in_file') - """ - Apply inverse transform to take segmentations to functional space - """ - - applyxfm = MapNode( - freesurfer.ApplyVolTransform(inverse=True, interp='nearest'), - iterfield=['target_file'], - name='inverse_transform') - register.connect(inputnode, 'subjects_dir', applyxfm, 'subjects_dir') - register.connect(bbregister, 'out_reg_file', applyxfm, 'reg_file') - register.connect(binarize, 'out_file', applyxfm, 'target_file') - register.connect(inputnode, 'mean_image', applyxfm, 'source_file') - """ - Apply inverse transform to aparc file - """ - - aparcxfm = Node( - freesurfer.ApplyVolTransform(inverse=True, interp='nearest'), - name='aparc_inverse_transform') - register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir') - register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file') - register.connect(fssource, ('aparc_aseg', get_aparc_aseg), aparcxfm, - 'target_file') - register.connect(inputnode, 'mean_image', aparcxfm, 'source_file') - """ - Convert the BBRegister transformation to ANTS ITK format - """ - - convert2itk = Node(C3dAffineTool(), name='convert2itk') - convert2itk.inputs.fsl2ras = True - convert2itk.inputs.itk_transform = True - register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file') - register.connect(inputnode, 'mean_image', convert2itk, 'source_file') - register.connect(stripper, 'out_file', convert2itk, 'reference_file') - """ - Compute registration between the subject's structural and MNI template - This is currently set to perform a very quick registration. However, the - registration can be made significantly more accurate for cortical - structures by increasing the number of iterations - All parameters are set using the example from: - #https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh - """ - - reg = Node(ants.Registration(), name='antsRegister') - reg.inputs.output_transform_prefix = "output_" - reg.inputs.transforms = ['Rigid', 'Affine', 'SyN'] - reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)] - reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[ - 100, 30, 20 - ]] - reg.inputs.dimension = 3 - reg.inputs.write_composite_transform = True - reg.inputs.collapse_output_transforms = True - reg.inputs.initial_moving_transform_com = True - reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']] - reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]] - reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]] - reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]] - reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]] - reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01] - reg.inputs.convergence_window_size = [20] * 2 + [5] - reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]] - reg.inputs.sigma_units = ['vox'] * 3 - reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]] - reg.inputs.use_estimate_learning_rate_once = [True] * 3 - reg.inputs.use_histogram_matching = [False] * 2 + [True] - reg.inputs.winsorize_lower_quantile = 0.005 - reg.inputs.winsorize_upper_quantile = 0.995 - reg.inputs.float = True - reg.inputs.output_warped_image = 'output_warped_image.nii.gz' - reg.inputs.num_threads = 4 - reg.plugin_args = {'qsub_args': '-l nodes=1:ppn=4'} - register.connect(stripper, 'out_file', reg, 'moving_image') - register.connect(inputnode, 'target_image', reg, 'fixed_image') - """ - Concatenate the affine and ants transforms into a list - """ - - merge = Node(Merge(2), iterfield=['in2'], name='mergexfm') - register.connect(convert2itk, 'itk_transform', merge, 'in2') - register.connect(reg, 'composite_transform', merge, 'in1') - """ - Transform the mean image. First to anatomical and then to target - """ - - warpmean = Node(ants.ApplyTransforms(), name='warpmean') - warpmean.inputs.input_image_type = 3 - warpmean.inputs.interpolation = 'Linear' - warpmean.inputs.invert_transform_flags = [False, False] - warpmean.terminal_output = 'file' - warpmean.inputs.args = '--float' - warpmean.inputs.num_threads = 4 - - register.connect(inputnode, 'target_image', warpmean, 'reference_image') - register.connect(inputnode, 'mean_image', warpmean, 'input_image') - register.connect(merge, 'out', warpmean, 'transforms') - """ - Assign all the output files - """ - - register.connect(reg, 'warped_image', outputnode, 'anat2target') - register.connect(warpmean, 'output_image', outputnode, 'transformed_mean') - register.connect(applyxfm, 'transformed_file', outputnode, - 'segmentation_files') - register.connect(aparcxfm, 'transformed_file', outputnode, 'aparc') - register.connect(bbregister, 'out_fsl_file', outputnode, - 'func2anat_transform') - register.connect(bbregister, 'out_reg_file', outputnode, 'out_reg_file') - register.connect(reg, 'composite_transform', outputnode, - 'anat2target_transform') - register.connect(merge, 'out', outputnode, 'transforms') - - return register - - -""" -Creates the main preprocessing workflow -""" - - -def create_workflow(files, - target_file, - subject_id, - TR, - slice_times, - norm_threshold=1, - num_components=5, - vol_fwhm=None, - surf_fwhm=None, - lowpass_freq=-1, - highpass_freq=-1, - subjects_dir=None, - sink_directory=os.getcwd(), - target_subject=['fsaverage3', 'fsaverage4'], - name='resting'): - - wf = Workflow(name=name) - - # Rename files in case they are named identically - name_unique = MapNode( - Rename(format_string='rest_%(run)02d'), - iterfield=['in_file', 'run'], - name='rename') - name_unique.inputs.keep_ext = True - name_unique.inputs.run = list(range(1, len(files) + 1)) - name_unique.inputs.in_file = files - - realign = Node(interface=spm.Realign(), name="realign") - realign.inputs.jobtype = 'estwrite' - - num_slices = len(slice_times) - slice_timing = Node(interface=spm.SliceTiming(), name="slice_timing") - slice_timing.inputs.num_slices = num_slices - slice_timing.inputs.time_repetition = TR - slice_timing.inputs.time_acquisition = TR - TR / float(num_slices) - slice_timing.inputs.slice_order = (np.argsort(slice_times) + 1).tolist() - slice_timing.inputs.ref_slice = int(num_slices / 2) - - # Comute TSNR on realigned data regressing polynomials upto order 2 - tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr') - wf.connect(slice_timing, 'timecorrected_files', tsnr, 'in_file') - - # Compute the median image across runs - calc_median = Node(CalculateMedian(), name='median') - wf.connect(tsnr, 'detrended_file', calc_median, 'in_files') - """Segment and Register - """ - - registration = create_reg_workflow(name='registration') - wf.connect(calc_median, 'median_file', registration, - 'inputspec.mean_image') - registration.inputs.inputspec.subject_id = subject_id - registration.inputs.inputspec.subjects_dir = subjects_dir - registration.inputs.inputspec.target_image = target_file - """Use :class:`nipype.algorithms.rapidart` to determine which of the - images in the functional series are outliers based on deviations in - intensity or movement. - """ - - art = Node(interface=ArtifactDetect(), name="art") - art.inputs.use_differences = [True, True] - art.inputs.use_norm = True - art.inputs.norm_threshold = norm_threshold - art.inputs.zintensity_threshold = 9 - art.inputs.mask_type = 'spm_global' - art.inputs.parameter_source = 'SPM' - """Here we are connecting all the nodes together. Notice that we add the merge node only if you choose - to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal - voxel sizes. - """ - - wf.connect([ - (name_unique, realign, [('out_file', 'in_files')]), - (realign, slice_timing, [('realigned_files', 'in_files')]), - (slice_timing, art, [('timecorrected_files', 'realigned_files')]), - (realign, art, [('realignment_parameters', 'realignment_parameters')]), - ]) - - def selectindex(files, idx): - import numpy as np - from nipype.utils.filemanip import filename_to_list, list_to_filename - return list_to_filename( - np.array(filename_to_list(files))[idx].tolist()) - - mask = Node(fsl.BET(), name='getmask') - mask.inputs.mask = True - wf.connect(calc_median, 'median_file', mask, 'in_file') - - # get segmentation in normalized functional space - - def merge_files(in1, in2): - out_files = filename_to_list(in1) - out_files.extend(filename_to_list(in2)) - return out_files - - # filter some noise - - # Compute motion regressors - motreg = Node( - Function( - input_names=['motion_params', 'order', 'derivatives'], - output_names=['out_files'], - function=motion_regressors, - imports=imports), - name='getmotionregress') - wf.connect(realign, 'realignment_parameters', motreg, 'motion_params') - - # Create a filter to remove motion and art confounds - createfilter1 = Node( - Function( - input_names=[ - 'motion_params', 'comp_norm', 'outliers', 'detrend_poly' - ], - output_names=['out_files'], - function=build_filter1, - imports=imports), - name='makemotionbasedfilter') - createfilter1.inputs.detrend_poly = 2 - wf.connect(motreg, 'out_files', createfilter1, 'motion_params') - wf.connect(art, 'norm_files', createfilter1, 'comp_norm') - wf.connect(art, 'outlier_files', createfilter1, 'outliers') - - filter1 = MapNode( - fsl.GLM( - out_f_name='F_mcart.nii', out_pf_name='pF_mcart.nii', demean=True), - iterfield=['in_file', 'design', 'out_res_name'], - name='filtermotion') - - wf.connect(slice_timing, 'timecorrected_files', filter1, 'in_file') - wf.connect(slice_timing, ('timecorrected_files', rename, '_filtermotart'), - filter1, 'out_res_name') - wf.connect(createfilter1, 'out_files', filter1, 'design') - - createfilter2 = MapNode( - Function( - input_names=[ - 'realigned_file', 'mask_file', 'num_components', - 'extra_regressors' - ], - output_names=['out_files'], - function=extract_noise_components, - imports=imports), - iterfield=['realigned_file', 'extra_regressors'], - name='makecompcorrfilter') - createfilter2.inputs.num_components = num_components - - wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors') - wf.connect(filter1, 'out_res', createfilter2, 'realigned_file') - wf.connect(registration, - ('outputspec.segmentation_files', selectindex, [0, 2]), - createfilter2, 'mask_file') - - filter2 = MapNode( - fsl.GLM(out_f_name='F.nii', out_pf_name='pF.nii', demean=True), - iterfield=['in_file', 'design', 'out_res_name'], - name='filter_noise_nosmooth') - wf.connect(filter1, 'out_res', filter2, 'in_file') - wf.connect(filter1, ('out_res', rename, '_cleaned'), filter2, - 'out_res_name') - wf.connect(createfilter2, 'out_files', filter2, 'design') - wf.connect(mask, 'mask_file', filter2, 'mask') - - bandpass = Node( - Function( - input_names=['files', 'lowpass_freq', 'highpass_freq', 'fs'], - output_names=['out_files'], - function=bandpass_filter, - imports=imports), - name='bandpass_unsmooth') - bandpass.inputs.fs = 1. / TR - bandpass.inputs.highpass_freq = highpass_freq - bandpass.inputs.lowpass_freq = lowpass_freq - wf.connect(filter2, 'out_res', bandpass, 'files') - """Smooth the functional data using - :class:`nipype.interfaces.spm.Smooth`. - """ - - smooth = Node(interface=spm.Smooth(), name="smooth") - smooth.inputs.fwhm = vol_fwhm - - wf.connect(bandpass, 'out_files', smooth, 'in_files') - - collector = Node(Merge(2), name='collect_streams') - wf.connect(smooth, 'smoothed_files', collector, 'in1') - wf.connect(bandpass, 'out_files', collector, 'in2') - """ - Transform the remaining images. First to anatomical and then to target - """ - - warpall = MapNode( - ants.ApplyTransforms(), iterfield=['input_image'], name='warpall') - warpall.inputs.input_image_type = 3 - warpall.inputs.interpolation = 'Linear' - warpall.inputs.invert_transform_flags = [False, False] - warpall.terminal_output = 'file' - warpall.inputs.reference_image = target_file - warpall.inputs.args = '--float' - warpall.inputs.num_threads = 1 - - # transform to target - wf.connect(collector, 'out', warpall, 'input_image') - wf.connect(registration, 'outputspec.transforms', warpall, 'transforms') - - mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask') - - wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file') - - maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker') - wf.connect(warpall, 'output_image', maskts, 'in_file') - wf.connect(mask_target, 'out_file', maskts, 'mask_file') - - # map to surface - # extract aparc+aseg ROIs - # extract subcortical ROIs - # extract target space ROIs - # combine subcortical and cortical rois into a single cifti file - - ####### - # Convert aparc to subject functional space - - # Sample the average time series in aparc ROIs - sampleaparc = MapNode( - freesurfer.SegStats(default_color_table=True), - iterfield=['in_file', 'summary_file', 'avgwf_txt_file'], - name='aparc_ts') - sampleaparc.inputs.segment_id = ( - [8] + list(range(10, 14)) + [17, 18, 26, 47] + list(range(49, 55)) + - [58] + list(range(1001, 1036)) + list(range(2001, 2036))) - - wf.connect(registration, 'outputspec.aparc', sampleaparc, - 'segmentation_file') - wf.connect(collector, 'out', sampleaparc, 'in_file') - - def get_names(files, suffix): - """Generate appropriate names for output files - """ - from nipype.utils.filemanip import (split_filename, filename_to_list, - list_to_filename) - out_names = [] - for filename in files: - _, name, _ = split_filename(filename) - out_names.append(name + suffix) - return list_to_filename(out_names) - - wf.connect(collector, ('out', get_names, '_avgwf.txt'), sampleaparc, - 'avgwf_txt_file') - wf.connect(collector, ('out', get_names, '_summary.stats'), sampleaparc, - 'summary_file') - - # Sample the time series onto the surface of the target surface. Performs - # sampling into left and right hemisphere - target = Node(IdentityInterface(fields=['target_subject']), name='target') - target.iterables = ('target_subject', filename_to_list(target_subject)) - - samplerlh = MapNode( - freesurfer.SampleToSurface(), - iterfield=['source_file'], - name='sampler_lh') - samplerlh.inputs.sampling_method = "average" - samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1) - samplerlh.inputs.sampling_units = "frac" - samplerlh.inputs.interp_method = "trilinear" - samplerlh.inputs.smooth_surf = surf_fwhm - # samplerlh.inputs.cortex_mask = True - samplerlh.inputs.out_type = 'niigz' - samplerlh.inputs.subjects_dir = subjects_dir - - samplerrh = samplerlh.clone('sampler_rh') - - samplerlh.inputs.hemi = 'lh' - wf.connect(collector, 'out', samplerlh, 'source_file') - wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file') - wf.connect(target, 'target_subject', samplerlh, 'target_subject') - - samplerrh.set_input('hemi', 'rh') - wf.connect(collector, 'out', samplerrh, 'source_file') - wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file') - wf.connect(target, 'target_subject', samplerrh, 'target_subject') - - # Combine left and right hemisphere to text file - combiner = MapNode( - Function( - input_names=['left', 'right'], - output_names=['out_file'], - function=combine_hemi, - imports=imports), - iterfield=['left', 'right'], - name="combiner") - wf.connect(samplerlh, 'out_file', combiner, 'left') - wf.connect(samplerrh, 'out_file', combiner, 'right') - - # Sample the time series file for each subcortical roi - ts2txt = MapNode( - Function( - input_names=['timeseries_file', 'label_file', 'indices'], - output_names=['out_file'], - function=extract_subrois, - imports=imports), - iterfield=['timeseries_file'], - name='getsubcortts') - ts2txt.inputs.indices = [8] + list(range(10, 14)) + [17, 18, 26, 47] +\ - list(range(49, 55)) + [58] - ts2txt.inputs.label_file = \ - os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_' - '2mm_v2.nii.gz')) - wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file') - - ###### - - substitutions = [('_target_subject_', - ''), ('_filtermotart_cleaned_bp_trans_masked', ''), - ('_filtermotart_cleaned_bp', '')] - regex_subs = [ - ('_ts_masker.*/sar', '/smooth/'), - ('_ts_masker.*/ar', '/unsmooth/'), - ('_combiner.*/sar', '/smooth/'), - ('_combiner.*/ar', '/unsmooth/'), - ('_aparc_ts.*/sar', '/smooth/'), - ('_aparc_ts.*/ar', '/unsmooth/'), - ('_getsubcortts.*/sar', '/smooth/'), - ('_getsubcortts.*/ar', '/unsmooth/'), - ('series/sar', 'series/smooth/'), - ('series/ar', 'series/unsmooth/'), - ('_inverse_transform./', ''), - ] - # Save the relevant data into an output directory - datasink = Node(interface=DataSink(), name="datasink") - datasink.inputs.base_directory = sink_directory - datasink.inputs.container = subject_id - datasink.inputs.substitutions = substitutions - datasink.inputs.regexp_substitutions = regex_subs # (r'(/_.*(\d+/))', r'/run\2') - wf.connect(realign, 'realignment_parameters', datasink, - 'resting.qa.motion') - wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm') - wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity') - wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files') - wf.connect(registration, 'outputspec.segmentation_files', datasink, - 'resting.mask_files') - wf.connect(registration, 'outputspec.anat2target', datasink, - 'resting.qa.ants') - wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask') - wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target') - wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F') - wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF') - wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps') - wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p') - wf.connect(bandpass, 'out_files', datasink, - 'resting.timeseries.@bandpassed') - wf.connect(smooth, 'smoothed_files', datasink, - 'resting.timeseries.@smoothed') - wf.connect(createfilter1, 'out_files', datasink, - 'resting.regress.@regressors') - wf.connect(createfilter2, 'out_files', datasink, - 'resting.regress.@compcorr') - wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target') - wf.connect(sampleaparc, 'summary_file', datasink, - 'resting.parcellations.aparc') - wf.connect(sampleaparc, 'avgwf_txt_file', datasink, - 'resting.parcellations.aparc.@avgwf') - wf.connect(ts2txt, 'out_file', datasink, - 'resting.parcellations.grayo.@subcortical') - - datasink2 = Node(interface=DataSink(), name="datasink2") - datasink2.inputs.base_directory = sink_directory - datasink2.inputs.container = subject_id - datasink2.inputs.substitutions = substitutions - datasink2.inputs.regexp_substitutions = regex_subs # (r'(/_.*(\d+/))', r'/run\2') - wf.connect(combiner, 'out_file', datasink2, - 'resting.parcellations.grayo.@surface') - return wf - - -""" -Creates the full workflow including getting information from dicom files -""" - - -def create_resting_workflow(args, name=None): - TR = args.TR - slice_times = args.slice_times - if args.dicom_file: - TR, slice_times, slice_thickness = get_info(args.dicom_file) - slice_times = (np.array(slice_times) / 1000.).tolist() - if name is None: - name = 'resting_' + args.subject_id - kwargs = dict( - files=[os.path.abspath(filename) for filename in args.files], - target_file=os.path.abspath(args.target_file), - subject_id=args.subject_id, - TR=TR, - slice_times=slice_times, - vol_fwhm=args.vol_fwhm, - surf_fwhm=args.surf_fwhm, - norm_threshold=2., - subjects_dir=os.path.abspath(args.fsdir), - target_subject=args.target_surfs, - lowpass_freq=args.lowpass_freq, - highpass_freq=args.highpass_freq, - sink_directory=os.path.abspath(args.sink), - name=name) - wf = create_workflow(**kwargs) - return wf - - -if __name__ == "__main__": - from argparse import ArgumentParser, RawTextHelpFormatter - defstr = ' (default %(default)s)' - parser = ArgumentParser( - description=__doc__, formatter_class=RawTextHelpFormatter) - parser.add_argument( - "-d", - "--dicom_file", - dest="dicom_file", - help="an example dicom file from the resting series") - parser.add_argument( - "-f", - "--files", - dest="files", - nargs="+", - help="4d nifti files for resting state", - required=True) - parser.add_argument( - "-t", - "--target", - dest="target_file", - help=("Target in MNI space. Best to use the MindBoggle " - "template - " - "OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz"), - required=True) - parser.add_argument( - "-s", - "--subject_id", - dest="subject_id", - help="FreeSurfer subject id", - required=True) - parser.add_argument( - "--subjects_dir", - dest="fsdir", - help="FreeSurfer subject directory", - required=True) - parser.add_argument( - "--target_surfaces", - dest="target_surfs", - nargs="+", - default=['fsaverage5'], - help="FreeSurfer target surfaces" + defstr) - parser.add_argument( - "--TR", - dest="TR", - default=None, - type=float, - help="TR if dicom not provided in seconds") - parser.add_argument( - "--slice_times", - dest="slice_times", - nargs="+", - type=float, - help="Slice onset times in seconds") - parser.add_argument( - '--vol_fwhm', - default=6., - dest='vol_fwhm', - type=float, - help="Spatial FWHM" + defstr) - parser.add_argument( - '--surf_fwhm', - default=15., - dest='surf_fwhm', - type=float, - help="Spatial FWHM" + defstr) - parser.add_argument( - "-l", - "--lowpass_freq", - dest="lowpass_freq", - default=0.1, - type=float, - help="Low pass frequency (Hz)" + defstr) - parser.add_argument( - "-u", - "--highpass_freq", - dest="highpass_freq", - default=0.01, - type=float, - help="High pass frequency (Hz)" + defstr) - parser.add_argument( - "-o", - "--output_dir", - dest="sink", - help="Output directory base", - required=True) - parser.add_argument( - "-w", "--work_dir", dest="work_dir", help="Output directory base") - parser.add_argument( - "-p", - "--plugin", - dest="plugin", - default='Linear', - help="Plugin to use") - parser.add_argument( - "--plugin_args", dest="plugin_args", help="Plugin arguments") - args = parser.parse_args() - - wf = create_resting_workflow(args) - - if args.work_dir: - work_dir = os.path.abspath(args.work_dir) - else: - work_dir = os.getcwd() - - wf.base_dir = work_dir - if args.plugin_args: - wf.run(args.plugin, plugin_args=eval(args.plugin_args)) - else: - wf.run(args.plugin) diff --git a/examples/rsfmri_vol_surface_preprocessing_nipy.py b/examples/rsfmri_vol_surface_preprocessing_nipy.py deleted file mode 100644 index 2397a136e0..0000000000 --- a/examples/rsfmri_vol_surface_preprocessing_nipy.py +++ /dev/null @@ -1,1083 +0,0 @@ -#!/usr/bin/env python -""" -===================================== -rsfMRI: ANTS, FS, FSL, NiPy, aCompCor -===================================== - - -A preprocessing workflow for Siemens resting state data. - -This workflow makes use of: - -- ANTS -- FreeSurfer -- FSL -- NiPy -- CompCor - -For example:: - - python rsfmri_preprocessing.py -d /data/12345-34-1.dcm -f /data/Resting.nii - -s subj001 -o output -p PBS --plugin_args "dict(qsub_args='-q many')" - -or:: - - python rsfmri_vol_surface_preprocessing.py -f SUB_1024011/E?/func/rest.nii - -t OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz --TR 2 -s SUB_1024011 - --subjects_dir fsdata --slice_times 0 17 1 18 2 19 3 20 4 21 5 22 6 23 - 7 24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15 32 16 -o . - -This workflow takes resting timeseries and a Siemens dicom file corresponding -to it and preprocesses it to produce timeseries coordinates or grayordinates. - -For non-Siemens dicoms, provide slice times instead, since the dicom extractor is not guaranteed to work. - -This workflow also requires 2mm subcortical atlas and templates that are -available from: - -http://mindboggle.info/data.html - -specifically the 2mm versions of: - - * `Joint Fusion Atlas `_ - * `MNI template `_ - -Import necessary modules from nipype. -""" - -from __future__ import division, unicode_literals -from builtins import open, range, str - -import os - -from nipype.interfaces.base import CommandLine -CommandLine.set_default_terminal_output('allatonce') - -# https://github.com/moloney/dcmstack -from dcmstack.extract import default_extractor -# pip install pydicom -from dicom import read_file - -from nipype.interfaces import (fsl, Function, ants, freesurfer, nipy) -from nipype.interfaces.c3 import C3dAffineTool - -fsl.FSLCommand.set_default_output_type('NIFTI_GZ') - -from nipype import Workflow, Node, MapNode - -from nipype.algorithms.rapidart import ArtifactDetect -from nipype.algorithms.misc import TSNR, CalculateMedian -from nipype.algorithms.confounds import ACompCor -from nipype.interfaces.utility import Rename, Merge, IdentityInterface -from nipype.utils.filemanip import filename_to_list -from nipype.interfaces.io import DataSink, FreeSurferSource -import nipype.interfaces.freesurfer as fs - -import numpy as np -import scipy as sp -import nibabel as nb - -""" -A list of modules and functions to import inside of nodes -""" - -imports = [ - 'import os', - 'import nibabel as nb', - 'import numpy as np', - 'import scipy as sp', - 'from nipype.utils.filemanip import filename_to_list, list_to_filename, split_filename', - 'from scipy.special import legendre' - ] - -""" -Define utility functions for use in workflow nodes -""" - - -def get_info(dicom_files): - """Given a Siemens dicom file return metadata - - Returns - ------- - RepetitionTime - Slice Acquisition Times - Spacing between slices - """ - meta = default_extractor( - read_file( - filename_to_list(dicom_files)[0], - stop_before_pixels=True, - force=True)) - return (meta['RepetitionTime'] / 1000., meta['CsaImage.MosaicRefAcqTimes'], - meta['SpacingBetweenSlices']) - - -def median(in_files): - """Computes an average of the median of each realigned timeseries - - Parameters - ---------- - - in_files: one or more realigned Nifti 4D time series - - Returns - ------- - - out_file: a 3D Nifti file - """ - average = None - for idx, filename in enumerate(filename_to_list(in_files)): - img = nb.load(filename) - data = np.median(img.get_data(), axis=3) - if average is None: - average = data - else: - average = average + data - median_img = nb.Nifti1Image(average / float(idx + 1), img.affine, - img.header) - filename = os.path.join(os.getcwd(), 'median.nii.gz') - median_img.to_filename(filename) - return filename - - -def bandpass_filter(files, lowpass_freq, highpass_freq, fs): - """Bandpass filter the input files - - Parameters - ---------- - files: list of 4d nifti files - lowpass_freq: cutoff frequency for the low pass filter (in Hz) - highpass_freq: cutoff frequency for the high pass filter (in Hz) - fs: sampling rate (in Hz) - """ - out_files = [] - for filename in filename_to_list(files): - path, name, ext = split_filename(filename) - out_file = os.path.join(os.getcwd(), name + '_bp' + ext) - img = nb.load(filename) - timepoints = img.shape[-1] - F = np.zeros((timepoints)) - lowidx = int(timepoints / 2) + 1 - if lowpass_freq > 0: - lowidx = np.round(float(lowpass_freq) / fs * timepoints) - highidx = 0 - if highpass_freq > 0: - highidx = np.round(float(highpass_freq) / fs * timepoints) - F[highidx:lowidx] = 1 - F = ((F + F[::-1]) > 0).astype(int) - data = img.get_data() - if np.all(F == 1): - filtered_data = data - else: - filtered_data = np.real(np.fft.ifftn(np.fft.fftn(data) * F)) - img_out = nb.Nifti1Image(filtered_data, img.affine, img.header) - img_out.to_filename(out_file) - out_files.append(out_file) - return list_to_filename(out_files) - - -def motion_regressors(motion_params, order=0, derivatives=1): - """Compute motion regressors upto given order and derivative - - motion + d(motion)/dt + d2(motion)/dt2 (linear + quadratic) - """ - out_files = [] - for idx, filename in enumerate(filename_to_list(motion_params)): - params = np.genfromtxt(filename) - out_params = params - for d in range(1, derivatives + 1): - cparams = np.vstack((np.repeat(params[0, :][None, :], d, axis=0), - params)) - out_params = np.hstack((out_params, np.diff(cparams, d, axis=0))) - out_params2 = out_params - for i in range(2, order + 1): - out_params2 = np.hstack((out_params2, np.power(out_params, i))) - filename = os.path.join(os.getcwd(), "motion_regressor%02d.txt" % idx) - np.savetxt(filename, out_params2, fmt=b"%.10f") - out_files.append(filename) - return out_files - - -def build_filter1(motion_params, comp_norm, outliers, detrend_poly=None): - """Builds a regressor set comprisong motion parameters, composite norm and - outliers - - The outliers are added as a single time point column for each outlier - - - Parameters - ---------- - - motion_params: a text file containing motion parameters and its derivatives - comp_norm: a text file containing the composite norm - outliers: a text file containing 0-based outlier indices - detrend_poly: number of polynomials to add to detrend - - Returns - ------- - components_file: a text file containing all the regressors - """ - out_files = [] - for idx, filename in enumerate(filename_to_list(motion_params)): - params = np.genfromtxt(filename) - norm_val = np.genfromtxt(filename_to_list(comp_norm)[idx]) - out_params = np.hstack((params, norm_val[:, None])) - try: - outlier_val = np.genfromtxt(filename_to_list(outliers)[idx]) - except IOError: - outlier_val = np.empty((0)) - for index in np.atleast_1d(outlier_val): - outlier_vector = np.zeros((out_params.shape[0], 1)) - outlier_vector[index] = 1 - out_params = np.hstack((out_params, outlier_vector)) - if detrend_poly: - timepoints = out_params.shape[0] - X = np.empty((timepoints, 0)) - for i in range(detrend_poly): - X = np.hstack((X, legendre(i + 1)(np.linspace( - -1, 1, timepoints))[:, None])) - out_params = np.hstack((out_params, X)) - filename = os.path.join(os.getcwd(), "filter_regressor%02d.txt" % idx) - np.savetxt(filename, out_params, fmt=b"%.10f") - out_files.append(filename) - return out_files - - -def rename(in_files, suffix=None): - from nipype.utils.filemanip import (filename_to_list, split_filename, - list_to_filename) - out_files = [] - for idx, filename in enumerate(filename_to_list(in_files)): - _, name, ext = split_filename(filename) - if suffix is None: - out_files.append(name + ('_%03d' % idx) + ext) - else: - out_files.append(name + suffix + ext) - return list_to_filename(out_files) - - -def get_aparc_aseg(files): - """Return the aparc+aseg.mgz file""" - for name in files: - if 'aparc+aseg.mgz' in name: - return name - raise ValueError('aparc+aseg.mgz not found') - - -def extract_subrois(timeseries_file, label_file, indices): - """Extract voxel time courses for each subcortical roi index - - Parameters - ---------- - - timeseries_file: a 4D Nifti file - label_file: a 3D file containing rois in the same space/size of the 4D file - indices: a list of indices for ROIs to extract. - - Returns - ------- - out_file: a text file containing time courses for each voxel of each roi - The first four columns are: freesurfer index, i, j, k positions in the - label file - """ - img = nb.load(timeseries_file) - data = img.get_data() - roiimg = nb.load(label_file) - rois = roiimg.get_data() - prefix = split_filename(timeseries_file)[1] - out_ts_file = os.path.join(os.getcwd(), '%s_subcortical_ts.txt' % prefix) - with open(out_ts_file, 'wt') as fp: - for fsindex in indices: - ijk = np.nonzero(rois == fsindex) - ts = data[ijk] - for i0, row in enumerate(ts): - fp.write('%d,%d,%d,%d,' % ( - fsindex, ijk[0][i0], ijk[1][i0], - ijk[2][i0]) + ','.join(['%.10f' % val - for val in row]) + '\n') - return out_ts_file - - -def combine_hemi(left, right): - """Combine left and right hemisphere time series into a single text file - """ - lh_data = nb.load(left).get_data() - rh_data = nb.load(right).get_data() - - indices = np.vstack((1000000 + np.arange(0, lh_data.shape[0])[:, None], - 2000000 + np.arange(0, rh_data.shape[0])[:, None])) - all_data = np.hstack((indices, - np.vstack((lh_data.squeeze(), rh_data.squeeze())))) - filename = left.split('.')[1] + '_combined.txt' - np.savetxt( - filename, - all_data, - fmt=','.join(['%d'] + ['%.10f'] * (all_data.shape[1] - 1))) - return os.path.abspath(filename) - -""" -Create a Registration Workflow -""" - - -def create_reg_workflow(name='registration'): - """Create a FEAT preprocessing workflow together with freesurfer - - Parameters - ---------- - name : name of workflow (default: 'registration') - - Inputs: - - inputspec.source_files : files (filename or list of filenames to register) - inputspec.mean_image : reference image to use - inputspec.anatomical_image : anatomical image to coregister to - inputspec.target_image : registration target - - Outputs: - - outputspec.func2anat_transform : FLIRT transform - outputspec.anat2target_transform : FLIRT+FNIRT transform - outputspec.transformed_files : transformed files in target space - outputspec.transformed_mean : mean image in target space - - Example - ------- - See code below - """ - - register = Workflow(name=name) - - inputnode = Node( - interface=IdentityInterface(fields=[ - 'source_files', 'mean_image', 'subject_id', 'subjects_dir', - 'target_image' - ]), - name='inputspec') - - outputnode = Node( - interface=IdentityInterface(fields=[ - 'func2anat_transform', 'out_reg_file', 'anat2target_transform', - 'transforms', 'transformed_mean', 'segmentation_files', - 'anat2target', 'aparc', 'min_cost_file' - ]), - name='outputspec') - - # Get the subject's freesurfer source directory - fssource = Node(FreeSurferSource(), name='fssource') - fssource.run_without_submitting = True - register.connect(inputnode, 'subject_id', fssource, 'subject_id') - register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir') - - convert = Node(freesurfer.MRIConvert(out_type='nii'), name="convert") - register.connect(fssource, 'T1', convert, 'in_file') - - # Coregister the median to the surface - bbregister = Node(freesurfer.BBRegister(), name='bbregister') - bbregister.inputs.init = 'fsl' - bbregister.inputs.contrast_type = 't2' - bbregister.inputs.out_fsl_file = True - bbregister.inputs.epi_mask = True - register.connect(inputnode, 'subject_id', bbregister, 'subject_id') - register.connect(inputnode, 'mean_image', bbregister, 'source_file') - register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir') - """ - Estimate the tissue classes from the anatomical image. But use aparc+aseg's brain mask - """ - - binarize = Node( - fs.Binarize(min=0.5, out_type="nii.gz", dilate=1), - name="binarize_aparc") - register.connect(fssource, ("aparc_aseg", get_aparc_aseg), binarize, - "in_file") - stripper = Node(fsl.ApplyMask(), name='stripper') - register.connect(binarize, "binary_file", stripper, "mask_file") - register.connect(convert, 'out_file', stripper, 'in_file') - - fast = Node(fsl.FAST(), name='fast') - register.connect(stripper, 'out_file', fast, 'in_files') - """ - Binarize the segmentation - """ - - binarize = MapNode( - fsl.ImageMaths(op_string='-nan -thr 0.9 -ero -bin'), - iterfield=['in_file'], - name='binarize') - register.connect(fast, 'partial_volume_files', binarize, 'in_file') - """ - Apply inverse transform to take segmentations to functional space - """ - - applyxfm = MapNode( - freesurfer.ApplyVolTransform(inverse=True, interp='nearest'), - iterfield=['target_file'], - name='inverse_transform') - register.connect(inputnode, 'subjects_dir', applyxfm, 'subjects_dir') - register.connect(bbregister, 'out_reg_file', applyxfm, 'reg_file') - register.connect(binarize, 'out_file', applyxfm, 'target_file') - register.connect(inputnode, 'mean_image', applyxfm, 'source_file') - """ - Apply inverse transform to aparc file - """ - - aparcxfm = Node( - freesurfer.ApplyVolTransform(inverse=True, interp='nearest'), - name='aparc_inverse_transform') - register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir') - register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file') - register.connect(fssource, ('aparc_aseg', get_aparc_aseg), aparcxfm, - 'target_file') - register.connect(inputnode, 'mean_image', aparcxfm, 'source_file') - """ - Convert the BBRegister transformation to ANTS ITK format - """ - - convert2itk = Node(C3dAffineTool(), name='convert2itk') - convert2itk.inputs.fsl2ras = True - convert2itk.inputs.itk_transform = True - register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file') - register.connect(inputnode, 'mean_image', convert2itk, 'source_file') - register.connect(stripper, 'out_file', convert2itk, 'reference_file') - """ - Compute registration between the subject's structural and MNI template - - * All parameters are set using the example from: - #https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh - * This is currently set to perform a very quick registration. However, - the registration can be made significantly more accurate for cortical - structures by increasing the number of iterations. - """ - - reg = Node(ants.Registration(), name='antsRegister') - reg.inputs.output_transform_prefix = "output_" - reg.inputs.transforms = ['Rigid', 'Affine', 'SyN'] - reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)] - reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[ - 100, 30, 20 - ]] - reg.inputs.dimension = 3 - reg.inputs.write_composite_transform = True - reg.inputs.collapse_output_transforms = True - reg.inputs.initial_moving_transform_com = True - reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']] - reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]] - reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]] - reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]] - reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]] - reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01] - reg.inputs.convergence_window_size = [20] * 2 + [5] - reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]] - reg.inputs.sigma_units = ['vox'] * 3 - reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]] - reg.inputs.use_estimate_learning_rate_once = [True] * 3 - reg.inputs.use_histogram_matching = [False] * 2 + [True] - reg.inputs.winsorize_lower_quantile = 0.005 - reg.inputs.winsorize_upper_quantile = 0.995 - reg.inputs.float = True - reg.inputs.output_warped_image = 'output_warped_image.nii.gz' - reg.inputs.num_threads = 4 - reg.plugin_args = {'sbatch_args': '-c%d' % 4} - register.connect(stripper, 'out_file', reg, 'moving_image') - register.connect(inputnode, 'target_image', reg, 'fixed_image') - - """ - Concatenate the affine and ants transforms into a list - """ - - merge = Node(Merge(2), iterfield=['in2'], name='mergexfm') - register.connect(convert2itk, 'itk_transform', merge, 'in2') - register.connect(reg, ('composite_transform', pickfirst), merge, 'in1') - - """ - Transform the mean image. First to anatomical and then to target - """ - - warpmean = Node(ants.ApplyTransforms(), name='warpmean') - warpmean.inputs.input_image_type = 3 - warpmean.inputs.interpolation = 'Linear' - warpmean.inputs.invert_transform_flags = [False, False] - warpmean.terminal_output = 'file' - warpmean.inputs.args = '--float' - warpmean.inputs.num_threads = 4 - warpmean.plugin_args = {'sbatch_args': '-c%d' % 4} - - register.connect(inputnode, 'target_image', warpmean, 'reference_image') - register.connect(inputnode, 'mean_image', warpmean, 'input_image') - register.connect(merge, 'out', warpmean, 'transforms') - - """ - Assign all the output files - """ - - register.connect(reg, 'warped_image', outputnode, 'anat2target') - register.connect(warpmean, 'output_image', outputnode, 'transformed_mean') - register.connect(applyxfm, 'transformed_file', outputnode, - 'segmentation_files') - register.connect(aparcxfm, 'transformed_file', outputnode, 'aparc') - register.connect(bbregister, 'out_fsl_file', outputnode, - 'func2anat_transform') - register.connect(bbregister, 'out_reg_file', outputnode, 'out_reg_file') - register.connect(reg, 'composite_transform', outputnode, - 'anat2target_transform') - register.connect(merge, 'out', outputnode, 'transforms') - register.connect(bbregister, 'min_cost_file', outputnode, 'min_cost_file') - - return register - -""" -Creates the main preprocessing workflow -""" - - -def create_workflow(files, - target_file, - subject_id, - TR, - slice_times, - norm_threshold=1, - num_components=5, - vol_fwhm=None, - surf_fwhm=None, - lowpass_freq=-1, - highpass_freq=-1, - subjects_dir=None, - sink_directory=os.getcwd(), - target_subject=['fsaverage3', 'fsaverage4'], - name='resting'): - - wf = Workflow(name=name) - - # Rename files in case they are named identically - name_unique = MapNode( - Rename(format_string='rest_%(run)02d'), - iterfield=['in_file', 'run'], - name='rename') - name_unique.inputs.keep_ext = True - name_unique.inputs.run = list(range(1, len(files) + 1)) - name_unique.inputs.in_file = files - - realign = Node(nipy.SpaceTimeRealigner(), name="spacetime_realign") - realign.inputs.slice_times = slice_times - realign.inputs.tr = TR - realign.inputs.slice_info = 2 - realign.plugin_args = {'sbatch_args': '-c%d' % 4} - - # Compute TSNR on realigned data regressing polynomials up to order 2 - tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr') - wf.connect(realign, "out_file", tsnr, "in_file") - - # Compute the median image across runs - calc_median = Node(CalculateMedian(), name='median') - wf.connect(tsnr, 'detrended_file', calc_median, 'in_files') - - """ - Segment and Register - """ - - registration = create_reg_workflow(name='registration') - wf.connect(calc_median, 'median_file', registration, - 'inputspec.mean_image') - registration.inputs.inputspec.subject_id = subject_id - registration.inputs.inputspec.subjects_dir = subjects_dir - registration.inputs.inputspec.target_image = target_file - - """Quantify TSNR in each freesurfer ROI - """ - - get_roi_tsnr = MapNode( - fs.SegStats(default_color_table=True), - iterfield=['in_file'], - name='get_aparc_tsnr') - get_roi_tsnr.inputs.avgwf_txt_file = True - wf.connect(tsnr, 'tsnr_file', get_roi_tsnr, 'in_file') - wf.connect(registration, 'outputspec.aparc', get_roi_tsnr, - 'segmentation_file') - - """Use :class:`nipype.algorithms.rapidart` to determine which of the - images in the functional series are outliers based on deviations in - intensity or movement. - """ - - art = Node(interface=ArtifactDetect(), name="art") - art.inputs.use_differences = [True, True] - art.inputs.use_norm = True - art.inputs.norm_threshold = norm_threshold - art.inputs.zintensity_threshold = 9 - art.inputs.mask_type = 'spm_global' - art.inputs.parameter_source = 'NiPy' - - """Here we are connecting all the nodes together. Notice that we add the merge node only if you choose - to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal - voxel sizes. - """ - - wf.connect([ - (name_unique, realign, [('out_file', 'in_file')]), - (realign, art, [('out_file', 'realigned_files')]), - (realign, art, [('par_file', 'realignment_parameters')]), - ]) - - def selectindex(files, idx): - import numpy as np - from nipype.utils.filemanip import filename_to_list, list_to_filename - return list_to_filename( - np.array(filename_to_list(files))[idx].tolist()) - - mask = Node(fsl.BET(), name='getmask') - mask.inputs.mask = True - wf.connect(calc_median, 'median_file', mask, 'in_file') - - # get segmentation in normalized functional space - - def merge_files(in1, in2): - out_files = filename_to_list(in1) - out_files.extend(filename_to_list(in2)) - return out_files - - # filter some noise - - # Compute motion regressors - motreg = Node( - Function( - input_names=['motion_params', 'order', 'derivatives'], - output_names=['out_files'], - function=motion_regressors, - imports=imports), - name='getmotionregress') - wf.connect(realign, 'par_file', motreg, 'motion_params') - - # Create a filter to remove motion and art confounds - createfilter1 = Node( - Function( - input_names=[ - 'motion_params', 'comp_norm', 'outliers', 'detrend_poly' - ], - output_names=['out_files'], - function=build_filter1, - imports=imports), - name='makemotionbasedfilter') - createfilter1.inputs.detrend_poly = 2 - wf.connect(motreg, 'out_files', createfilter1, 'motion_params') - wf.connect(art, 'norm_files', createfilter1, 'comp_norm') - wf.connect(art, 'outlier_files', createfilter1, 'outliers') - - filter1 = MapNode( - fsl.GLM( - out_f_name='F_mcart.nii.gz', - out_pf_name='pF_mcart.nii.gz', - demean=True), - iterfield=['in_file', 'design', 'out_res_name'], - name='filtermotion') - - wf.connect(realign, 'out_file', filter1, 'in_file') - wf.connect(realign, ('out_file', rename, '_filtermotart'), filter1, - 'out_res_name') - wf.connect(createfilter1, 'out_files', filter1, 'design') - - createfilter2 = MapNode( - ACompCor(), - iterfield=['realigned_file', 'extra_regressors'], - name='makecompcorrfilter') - createfilter2.inputs.components_file = 'noise_components.txt' - createfilter2.inputs.num_components = num_components - - wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors') - wf.connect(filter1, 'out_res', createfilter2, 'realigned_file') - wf.connect(registration, - ('outputspec.segmentation_files', selectindex, [0, 2]), - createfilter2, 'mask_file') - - filter2 = MapNode( - fsl.GLM(out_f_name='F.nii.gz', out_pf_name='pF.nii.gz', demean=True), - iterfield=['in_file', 'design', 'out_res_name'], - name='filter_noise_nosmooth') - wf.connect(filter1, 'out_res', filter2, 'in_file') - wf.connect(filter1, ('out_res', rename, '_cleaned'), filter2, - 'out_res_name') - wf.connect(createfilter2, 'components_file', filter2, 'design') - wf.connect(mask, 'mask_file', filter2, 'mask') - - bandpass = Node( - Function( - input_names=['files', 'lowpass_freq', 'highpass_freq', 'fs'], - output_names=['out_files'], - function=bandpass_filter, - imports=imports), - name='bandpass_unsmooth') - bandpass.inputs.fs = 1. / TR - bandpass.inputs.highpass_freq = highpass_freq - bandpass.inputs.lowpass_freq = lowpass_freq - wf.connect(filter2, 'out_res', bandpass, 'files') - """Smooth the functional data using - :class:`nipype.interfaces.fsl.IsotropicSmooth`. - """ - - smooth = MapNode( - interface=fsl.IsotropicSmooth(), name="smooth", iterfield=["in_file"]) - smooth.inputs.fwhm = vol_fwhm - - wf.connect(bandpass, 'out_files', smooth, 'in_file') - - collector = Node(Merge(2), name='collect_streams') - wf.connect(smooth, 'out_file', collector, 'in1') - wf.connect(bandpass, 'out_files', collector, 'in2') - """ - Transform the remaining images. First to anatomical and then to target - """ - - warpall = MapNode( - ants.ApplyTransforms(), iterfield=['input_image'], name='warpall') - warpall.inputs.input_image_type = 3 - warpall.inputs.interpolation = 'Linear' - warpall.inputs.invert_transform_flags = [False, False] - warpall.terminal_output = 'file' - warpall.inputs.reference_image = target_file - warpall.inputs.args = '--float' - warpall.inputs.num_threads = 2 - warpall.plugin_args = {'sbatch_args': '-c%d' % 2} - - # transform to target - wf.connect(collector, 'out', warpall, 'input_image') - wf.connect(registration, 'outputspec.transforms', warpall, 'transforms') - - mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask') - - wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file') - - maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker') - wf.connect(warpall, 'output_image', maskts, 'in_file') - wf.connect(mask_target, 'out_file', maskts, 'mask_file') - - # map to surface - # extract aparc+aseg ROIs - # extract subcortical ROIs - # extract target space ROIs - # combine subcortical and cortical rois into a single cifti file - - ####### - # Convert aparc to subject functional space - - # Sample the average time series in aparc ROIs - sampleaparc = MapNode( - freesurfer.SegStats(default_color_table=True), - iterfield=['in_file', 'summary_file', 'avgwf_txt_file'], - name='aparc_ts') - sampleaparc.inputs.segment_id = ( - [8] + list(range(10, 14)) + [17, 18, 26, 47] + list(range(49, 55)) + - [58] + list(range(1001, 1036)) + list(range(2001, 2036))) - - wf.connect(registration, 'outputspec.aparc', sampleaparc, - 'segmentation_file') - wf.connect(collector, 'out', sampleaparc, 'in_file') - - def get_names(files, suffix): - """Generate appropriate names for output files - """ - from nipype.utils.filemanip import (split_filename, filename_to_list, - list_to_filename) - import os - out_names = [] - for filename in files: - path, name, _ = split_filename(filename) - out_names.append(os.path.join(path, name + suffix)) - return list_to_filename(out_names) - - wf.connect(collector, ('out', get_names, '_avgwf.txt'), sampleaparc, - 'avgwf_txt_file') - wf.connect(collector, ('out', get_names, '_summary.stats'), sampleaparc, - 'summary_file') - - # Sample the time series onto the surface of the target surface. Performs - # sampling into left and right hemisphere - target = Node(IdentityInterface(fields=['target_subject']), name='target') - target.iterables = ('target_subject', filename_to_list(target_subject)) - - samplerlh = MapNode( - freesurfer.SampleToSurface(), - iterfield=['source_file'], - name='sampler_lh') - samplerlh.inputs.sampling_method = "average" - samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1) - samplerlh.inputs.sampling_units = "frac" - samplerlh.inputs.interp_method = "trilinear" - samplerlh.inputs.smooth_surf = surf_fwhm - # samplerlh.inputs.cortex_mask = True - samplerlh.inputs.out_type = 'niigz' - samplerlh.inputs.subjects_dir = subjects_dir - - samplerrh = samplerlh.clone('sampler_rh') - - samplerlh.inputs.hemi = 'lh' - wf.connect(collector, 'out', samplerlh, 'source_file') - wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file') - wf.connect(target, 'target_subject', samplerlh, 'target_subject') - - samplerrh.set_input('hemi', 'rh') - wf.connect(collector, 'out', samplerrh, 'source_file') - wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file') - wf.connect(target, 'target_subject', samplerrh, 'target_subject') - - # Combine left and right hemisphere to text file - combiner = MapNode( - Function( - input_names=['left', 'right'], - output_names=['out_file'], - function=combine_hemi, - imports=imports), - iterfield=['left', 'right'], - name="combiner") - wf.connect(samplerlh, 'out_file', combiner, 'left') - wf.connect(samplerrh, 'out_file', combiner, 'right') - - # Sample the time series file for each subcortical roi - ts2txt = MapNode( - Function( - input_names=['timeseries_file', 'label_file', 'indices'], - output_names=['out_file'], - function=extract_subrois, - imports=imports), - iterfield=['timeseries_file'], - name='getsubcortts') - ts2txt.inputs.indices = [8] + list(range(10, 14)) + [17, 18, 26, 47] +\ - list(range(49, 55)) + [58] - ts2txt.inputs.label_file = \ - os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_' - '2mm_v2.nii.gz')) - wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file') - - ###### - - substitutions = [ - ('_target_subject_', ''), - ('_filtermotart_cleaned_bp_trans_masked', ''), - ('_filtermotart_cleaned_bp', ''), - ] - substitutions += [("_smooth%d" % i, "") for i in range(11)[::-1]] - substitutions += [("_ts_masker%d" % i, "") for i in range(11)[::-1]] - substitutions += [("_getsubcortts%d" % i, "") for i in range(11)[::-1]] - substitutions += [("_combiner%d" % i, "") for i in range(11)[::-1]] - substitutions += [("_filtermotion%d" % i, "") for i in range(11)[::-1]] - substitutions += [("_filter_noise_nosmooth%d" % i, "") - for i in range(11)[::-1]] - substitutions += [("_makecompcorfilter%d" % i, "") - for i in range(11)[::-1]] - substitutions += [("_get_aparc_tsnr%d/" % i, "run%d_" % (i + 1)) - for i in range(11)[::-1]] - - substitutions += [("T1_out_brain_pve_0_maths_warped", "compcor_csf"), - ("T1_out_brain_pve_1_maths_warped", - "compcor_gm"), ("T1_out_brain_pve_2_maths_warped", - "compcor_wm"), - ("output_warped_image_maths", - "target_brain_mask"), ("median_brain_mask", - "native_brain_mask"), ("corr_", - "")] - - regex_subs = [ - ('_combiner.*/sar', '/smooth/'), - ('_combiner.*/ar', '/unsmooth/'), - ('_aparc_ts.*/sar', '/smooth/'), - ('_aparc_ts.*/ar', '/unsmooth/'), - ('_getsubcortts.*/sar', '/smooth/'), - ('_getsubcortts.*/ar', '/unsmooth/'), - ('series/sar', 'series/smooth/'), - ('series/ar', 'series/unsmooth/'), - ('_inverse_transform./', ''), - ] - # Save the relevant data into an output directory - datasink = Node(interface=DataSink(), name="datasink") - datasink.inputs.base_directory = sink_directory - datasink.inputs.container = subject_id - datasink.inputs.substitutions = substitutions - datasink.inputs.regexp_substitutions = regex_subs # (r'(/_.*(\d+/))', r'/run\2') - wf.connect(realign, 'par_file', datasink, 'resting.qa.motion') - wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm') - wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity') - wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files') - wf.connect(registration, 'outputspec.segmentation_files', datasink, - 'resting.mask_files') - wf.connect(registration, 'outputspec.anat2target', datasink, - 'resting.qa.ants') - wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask') - wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target') - wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F') - wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF') - wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps') - wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p') - wf.connect(registration, 'outputspec.min_cost_file', datasink, - 'resting.qa.mincost') - wf.connect(tsnr, 'tsnr_file', datasink, 'resting.qa.tsnr.@map') - wf.connect([(get_roi_tsnr, datasink, - [('avgwf_txt_file', 'resting.qa.tsnr'), - ('summary_file', 'resting.qa.tsnr.@summary')])]) - - wf.connect(bandpass, 'out_files', datasink, - 'resting.timeseries.@bandpassed') - wf.connect(smooth, 'out_file', datasink, 'resting.timeseries.@smoothed') - wf.connect(createfilter1, 'out_files', datasink, - 'resting.regress.@regressors') - wf.connect(createfilter2, 'components_file', datasink, - 'resting.regress.@compcorr') - wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target') - wf.connect(sampleaparc, 'summary_file', datasink, - 'resting.parcellations.aparc') - wf.connect(sampleaparc, 'avgwf_txt_file', datasink, - 'resting.parcellations.aparc.@avgwf') - wf.connect(ts2txt, 'out_file', datasink, - 'resting.parcellations.grayo.@subcortical') - - datasink2 = Node(interface=DataSink(), name="datasink2") - datasink2.inputs.base_directory = sink_directory - datasink2.inputs.container = subject_id - datasink2.inputs.substitutions = substitutions - datasink2.inputs.regexp_substitutions = regex_subs # (r'(/_.*(\d+/))', r'/run\2') - wf.connect(combiner, 'out_file', datasink2, - 'resting.parcellations.grayo.@surface') - return wf - - -""" -Creates the full workflow including getting information from dicom files -""" - - -def create_resting_workflow(args, name=None): - TR = args.TR - slice_times = args.slice_times - if args.dicom_file: - TR, slice_times, slice_thickness = get_info(args.dicom_file) - slice_times = (np.array(slice_times) / 1000.).tolist() - - if name is None: - name = 'resting_' + args.subject_id - kwargs = dict( - files=[os.path.abspath(filename) for filename in args.files], - target_file=os.path.abspath(args.target_file), - subject_id=args.subject_id, - TR=TR, - slice_times=slice_times, - vol_fwhm=args.vol_fwhm, - surf_fwhm=args.surf_fwhm, - norm_threshold=2., - subjects_dir=os.path.abspath(args.fsdir), - target_subject=args.target_surfs, - lowpass_freq=args.lowpass_freq, - highpass_freq=args.highpass_freq, - sink_directory=os.path.abspath(args.sink), - name=name) - wf = create_workflow(**kwargs) - return wf - - -if __name__ == "__main__": - from argparse import ArgumentParser, RawTextHelpFormatter - defstr = ' (default %(default)s)' - parser = ArgumentParser( - description=__doc__, formatter_class=RawTextHelpFormatter) - parser.add_argument( - "-d", - "--dicom_file", - dest="dicom_file", - help="a SIEMENS example dicom file from the resting series") - parser.add_argument( - "-f", - "--files", - dest="files", - nargs="+", - help="4d nifti files for resting state", - required=True) - parser.add_argument( - "-t", - "--target", - dest="target_file", - help=("Target in MNI space. Best to use the MindBoggle " - "template - " - "OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz"), - required=True) - parser.add_argument( - "-s", - "--subject_id", - dest="subject_id", - help="FreeSurfer subject id", - required=True) - parser.add_argument( - "--subjects_dir", - dest="fsdir", - help="FreeSurfer subject directory", - required=True) - parser.add_argument( - "--target_surfaces", - dest="target_surfs", - nargs="+", - default=['fsaverage5'], - help="FreeSurfer target surfaces" + defstr) - parser.add_argument( - "--TR", - dest="TR", - default=None, - type=float, - help="TR if dicom not provided in seconds") - parser.add_argument( - "--slice_times", - dest="slice_times", - nargs="+", - type=float, - help="Slice onset times in seconds") - parser.add_argument( - '--vol_fwhm', - default=6., - dest='vol_fwhm', - type=float, - help="Spatial FWHM" + defstr) - parser.add_argument( - '--surf_fwhm', - default=15., - dest='surf_fwhm', - type=float, - help="Spatial FWHM" + defstr) - parser.add_argument( - "-l", - "--lowpass_freq", - dest="lowpass_freq", - default=0.1, - type=float, - help="Low pass frequency (Hz)" + defstr) - parser.add_argument( - "-u", - "--highpass_freq", - dest="highpass_freq", - default=0.01, - type=float, - help="High pass frequency (Hz)" + defstr) - parser.add_argument( - "-o", - "--output_dir", - dest="sink", - help="Output directory base", - required=True) - parser.add_argument( - "-w", "--work_dir", dest="work_dir", help="Output directory base") - parser.add_argument( - "-p", - "--plugin", - dest="plugin", - default='Linear', - help="Plugin to use") - parser.add_argument( - "--plugin_args", dest="plugin_args", help="Plugin arguments") - args = parser.parse_args() - - wf = create_resting_workflow(args) - - if args.work_dir: - work_dir = os.path.abspath(args.work_dir) - else: - work_dir = os.getcwd() - - wf.base_dir = work_dir - if args.plugin_args: - wf.run(args.plugin, plugin_args=eval(args.plugin_args)) - else: - wf.run(args.plugin) diff --git a/examples/smri_ants_build_template.py b/examples/smri_ants_build_template.py deleted file mode 100644 index 53f3981428..0000000000 --- a/examples/smri_ants_build_template.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -=============================================== -sMRI: Using new ANTS for creating a T1 template -=============================================== - -In this tutorial we will use ANTS (old version aka "ANTS") based workflow to -create a template out of multiple T1 volumes. - -1. Tell python where to find the appropriate functions. -""" - -from __future__ import print_function, unicode_literals -from builtins import open -from future import standard_library -standard_library.install_aliases() - -import os -import nipype.interfaces.utility as util -import nipype.interfaces.ants as ants -import nipype.interfaces.io as io -import nipype.pipeline.engine as pe # pypeline engine - -from niflow.nipype1.workflows.smri.ants import ANTSTemplateBuildSingleIterationWF -""" -2. Download T1 volumes into home directory -""" - -import urllib.request -import urllib.error -import urllib.parse -homeDir = os.getenv("HOME") -requestedPath = os.path.join(homeDir, 'nipypeTestPath') -mydatadir = os.path.realpath(requestedPath) -if not os.path.exists(mydatadir): - os.makedirs(mydatadir) -print(mydatadir) - -MyFileURLs = [ - ('http://slicer.kitware.com/midas3/download?bitstream=13121', - '01_T1_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13122', - '02_T1_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13124', - '03_T1_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13128', - '01_T1_inv_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13123', - '02_T1_inv_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13125', - '03_T1_inv_half.nii.gz'), -] -for tt in MyFileURLs: - myURL = tt[0] - localFilename = os.path.join(mydatadir, tt[1]) - if not os.path.exists(localFilename): - remotefile = urllib.request.urlopen(myURL) - - localFile = open(localFilename, 'wb') - localFile.write(remotefile.read()) - localFile.close() - print("Downloaded file: {0}".format(localFilename)) - else: - print("File previously downloaded {0}".format(localFilename)) - -input_images = [ - os.path.join(mydatadir, '01_T1_half.nii.gz'), - os.path.join(mydatadir, '02_T1_half.nii.gz'), - os.path.join(mydatadir, '03_T1_half.nii.gz') -] -input_passive_images = [{ - 'INV_T1': - os.path.join(mydatadir, '01_T1_inv_half.nii.gz') -}, { - 'INV_T1': - os.path.join(mydatadir, '02_T1_inv_half.nii.gz') -}, { - 'INV_T1': - os.path.join(mydatadir, '03_T1_inv_half.nii.gz') -}] -""" -3. Define the workflow and its working directory -""" - -tbuilder = pe.Workflow(name="ANTSTemplateBuilder") -tbuilder.base_dir = requestedPath -""" -4. Define data sources. In real life these would be replace by DataGrabbers -""" - -datasource = pe.Node( - interface=util.IdentityInterface( - fields=['imageList', 'passiveImagesDictionariesList']), - run_without_submitting=True, - name='InputImages') -datasource.inputs.imageList = input_images -datasource.inputs.passiveImagesDictionariesList = input_passive_images -datasource.inputs.sort_filelist = True -""" -5. Template is initialized by a simple average -""" - -initAvg = pe.Node(interface=ants.AverageImages(), name='initAvg') -initAvg.inputs.dimension = 3 -initAvg.inputs.normalize = True - -tbuilder.connect(datasource, "imageList", initAvg, "images") -""" -6. Define the first iteration of template building -""" - -buildTemplateIteration1 = ANTSTemplateBuildSingleIterationWF('iteration01') -tbuilder.connect(initAvg, 'output_average_image', buildTemplateIteration1, - 'inputspec.fixed_image') -tbuilder.connect(datasource, 'imageList', buildTemplateIteration1, - 'inputspec.images') -tbuilder.connect(datasource, 'passiveImagesDictionariesList', - buildTemplateIteration1, - 'inputspec.ListOfPassiveImagesDictionaries') -""" -7. Define the second iteration of template building -""" - -buildTemplateIteration2 = ANTSTemplateBuildSingleIterationWF('iteration02') -tbuilder.connect(buildTemplateIteration1, 'outputspec.template', - buildTemplateIteration2, 'inputspec.fixed_image') -tbuilder.connect(datasource, 'imageList', buildTemplateIteration2, - 'inputspec.images') -tbuilder.connect(datasource, 'passiveImagesDictionariesList', - buildTemplateIteration2, - 'inputspec.ListOfPassiveImagesDictionaries') -""" -8. Move selected files to a designated results folder -""" - -datasink = pe.Node(io.DataSink(), name="datasink") -datasink.inputs.base_directory = os.path.join(requestedPath, "results") - -tbuilder.connect(buildTemplateIteration2, 'outputspec.template', datasink, - 'PrimaryTemplate') -tbuilder.connect(buildTemplateIteration2, - 'outputspec.passive_deformed_templates', datasink, - 'PassiveTemplate') -tbuilder.connect(initAvg, 'output_average_image', datasink, - 'PreRegisterAverage') -""" -8. Run the workflow -""" - -tbuilder.run() diff --git a/examples/smri_ants_registration.py b/examples/smri_ants_registration.py deleted file mode 100644 index e7050b05b7..0000000000 --- a/examples/smri_ants_registration.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -================================== -sMRI: Using ANTS for registration -================================== - -In this simple tutorial we will use the Registration interface from ANTS to -coregister two T1 volumes. - -1. Tell python where to find the appropriate functions. - -""" - -from __future__ import print_function, unicode_literals -from builtins import open - -from future import standard_library -standard_library.install_aliases() - -import os -import urllib.request -import urllib.error -import urllib.parse -from nipype.interfaces.ants import Registration -from nipype.testing import example_data -""" -2. Download T1 volumes into home directory - -""" - -homeDir = os.getenv("HOME") -requestedPath = os.path.join(homeDir, 'nipypeTestPath') -mydatadir = os.path.realpath(requestedPath) -if not os.path.exists(mydatadir): - os.makedirs(mydatadir) -print(mydatadir) - -MyFileURLs = [ - ('http://slicer.kitware.com/midas3/download?bitstream=13121', - '01_T1_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13122', - '02_T1_half.nii.gz'), -] -for tt in MyFileURLs: - myURL = tt[0] - localFilename = os.path.join(mydatadir, tt[1]) - if not os.path.exists(localFilename): - remotefile = urllib.request.urlopen(myURL) - - localFile = open(localFilename, 'wb') - localFile.write(remotefile.read()) - localFile.close() - print("Downloaded file: {0}".format(localFilename)) - else: - print("File previously downloaded {0}".format(localFilename)) - -input_images = [ - os.path.join(mydatadir, '01_T1_half.nii.gz'), - os.path.join(mydatadir, '02_T1_half.nii.gz'), -] -""" -3. Define the parameters of the registration. Settings are -found in the file ``smri_ants_registration_settings.json`` -distributed with the ``example_data`` of `nipype`. - -""" - -reg = Registration( - from_file=example_data('smri_ants_registration_settings.json')) -reg.inputs.fixed_image = input_images[0] -reg.inputs.moving_image = input_images[1] -""" -Alternatively to the use of the ``from_file`` feature to load ANTs settings, -the user can manually set all those inputs instead:: - - reg.inputs.output_transform_prefix = 'thisTransform' - reg.inputs.output_warped_image = 'INTERNAL_WARPED.nii.gz' - reg.inputs.output_transform_prefix = "output_" - reg.inputs.transforms = ['Translation', 'Rigid', 'Affine', 'SyN'] - reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.1,), (0.2, 3.0, 0.0)] - reg.inputs.number_of_iterations = ([[10000, 111110, 11110]] * 3 + - [[100, 50, 30]]) - reg.inputs.dimension = 3 - reg.inputs.write_composite_transform = True - reg.inputs.collapse_output_transforms = False - reg.inputs.metric = ['Mattes'] * 3 + [['Mattes', 'CC']] - reg.inputs.metric_weight = [1] * 3 + [[0.5, 0.5]] - reg.inputs.radius_or_number_of_bins = [32] * 3 + [[32, 4]] - reg.inputs.sampling_strategy = ['Regular'] * 3 + [[None, None]] - reg.inputs.sampling_percentage = [0.3] * 3 + [[None, None]] - reg.inputs.convergence_threshold = [1.e-8] * 3 + [-0.01] - reg.inputs.convergence_window_size = [20] * 3 + [5] - reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 3 + [[1, 0.5, 0]] - reg.inputs.sigma_units = ['vox'] * 4 - reg.inputs.shrink_factors = [[6, 4, 2]] + [[3, 2, 1]] * 2 + [[4, 2, 1]] - reg.inputs.use_estimate_learning_rate_once = [True] * 4 - reg.inputs.use_histogram_matching = [False] * 3 + [True] - reg.inputs.initial_moving_transform_com = True - -""" - -print(reg.cmdline) -""" -3. Run the registration -""" - -reg.run() diff --git a/examples/smri_antsregistration_build_template.py b/examples/smri_antsregistration_build_template.py deleted file mode 100644 index e84fc5b509..0000000000 --- a/examples/smri_antsregistration_build_template.py +++ /dev/null @@ -1,222 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -====================================================== -sMRI: Using new ANTS for creating a T1 template (ITK4) -====================================================== - -In this tutorial we will use ANTS (new ITK4 version aka "antsRegistration") based workflow to -create a template out of multiple T1 volumes. We will also showcase how to fine tune SGE jobs requirements. - -1. Tell python where to find the appropriate functions. -""" - -from __future__ import print_function -from future import standard_library -standard_library.install_aliases() - -import os -import nipype.interfaces.utility as util -import nipype.interfaces.ants as ants -import nipype.interfaces.io as io -import nipype.pipeline.engine as pe # pypeline engine - -from niflow.nipype1.workflows.smri.ants import antsRegistrationTemplateBuildSingleIterationWF -""" -2. Download T1 volumes into home directory -""" - -import urllib.request -import urllib.error -import urllib.parse -homeDir = os.getenv("HOME") -requestedPath = os.path.join(homeDir, 'nipypeTestPath') -mydatadir = os.path.realpath(requestedPath) -if not os.path.exists(mydatadir): - os.makedirs(mydatadir) -print(mydatadir) - -MyFileURLs = [ - ('http://slicer.kitware.com/midas3/download?bitstream=13121', - '01_T1_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13122', - '02_T1_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13124', - '03_T1_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13128', - '01_T1_inv_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13123', - '02_T1_inv_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13125', - '03_T1_inv_half.nii.gz'), -] -for tt in MyFileURLs: - myURL = tt[0] - localFilename = os.path.join(mydatadir, tt[1]) - if not os.path.exists(localFilename): - remotefile = urllib.request.urlopen(myURL) - - localFile = open(localFilename, 'wb') - localFile.write(remotefile.read()) - localFile.close() - print("Downloaded file: {0}".format(localFilename)) - else: - print("File previously downloaded {0}".format(localFilename)) -""" -ListOfImagesDictionaries - a list of dictionaries where each dictionary is -for one scan session, and the mappings in the dictionary are for all the -co-aligned images for that one scan session -""" - -ListOfImagesDictionaries = [{ - 'T1': - os.path.join(mydatadir, '01_T1_half.nii.gz'), - 'INV_T1': - os.path.join(mydatadir, '01_T1_inv_half.nii.gz'), - 'LABEL_MAP': - os.path.join(mydatadir, '01_T1_inv_half.nii.gz') -}, { - 'T1': - os.path.join(mydatadir, '02_T1_half.nii.gz'), - 'INV_T1': - os.path.join(mydatadir, '02_T1_inv_half.nii.gz'), - 'LABEL_MAP': - os.path.join(mydatadir, '02_T1_inv_half.nii.gz') -}, { - 'T1': - os.path.join(mydatadir, '03_T1_half.nii.gz'), - 'INV_T1': - os.path.join(mydatadir, '03_T1_inv_half.nii.gz'), - 'LABEL_MAP': - os.path.join(mydatadir, '03_T1_inv_half.nii.gz') -}] -input_passive_images = [{ - 'INV_T1': - os.path.join(mydatadir, '01_T1_inv_half.nii.gz') -}, { - 'INV_T1': - os.path.join(mydatadir, '02_T1_inv_half.nii.gz') -}, { - 'INV_T1': - os.path.join(mydatadir, '03_T1_inv_half.nii.gz') -}] -""" -registrationImageTypes - A list of the image types to be used actively during -the estimation process of registration, any image type not in this list -will be passively resampled with the estimated transforms. -['T1','T2'] -""" - -registrationImageTypes = ['T1'] -""" -interpolationMap - A map of image types to interpolation modes. If an -image type is not listed, it will be linearly interpolated. -{ 'labelmap':'NearestNeighbor', 'FLAIR':'WindowedSinc' } -""" - -interpolationMapping = { - 'INV_T1': 'LanczosWindowedSinc', - 'LABEL_MAP': 'NearestNeighbor', - 'T1': 'Linear' -} -""" -3. Define the workflow and its working directory -""" - -tbuilder = pe.Workflow(name="antsRegistrationTemplateBuilder") -tbuilder.base_dir = requestedPath -""" -4. Define data sources. In real life these would be replace by DataGrabbers -""" - -InitialTemplateInputs = [mdict['T1'] for mdict in ListOfImagesDictionaries] - -datasource = pe.Node( - interface=util.IdentityInterface(fields=[ - 'InitialTemplateInputs', 'ListOfImagesDictionaries', - 'registrationImageTypes', 'interpolationMapping' - ]), - run_without_submitting=True, - name='InputImages') -datasource.inputs.InitialTemplateInputs = InitialTemplateInputs -datasource.inputs.ListOfImagesDictionaries = ListOfImagesDictionaries -datasource.inputs.registrationImageTypes = registrationImageTypes -datasource.inputs.interpolationMapping = interpolationMapping -datasource.inputs.sort_filelist = True -""" -5. Template is initialized by a simple average in this simple example, - any reference image could be used (i.e. a previously created template) -""" - -initAvg = pe.Node(interface=ants.AverageImages(), name='initAvg') -initAvg.inputs.dimension = 3 -initAvg.inputs.normalize = True - -tbuilder.connect(datasource, "InitialTemplateInputs", initAvg, "images") -""" -6. Define the first iteration of template building -""" - -buildTemplateIteration1 = antsRegistrationTemplateBuildSingleIterationWF( - 'iteration01') -""" -Here we are fine tuning parameters of the SGE job (memory limit, numebr of cores etc.) -""" - -BeginANTS = buildTemplateIteration1.get_node("BeginANTS") -BeginANTS.plugin_args = { - 'qsub_args': - '-S /bin/bash -pe smp1 8-12 -l mem_free=6000M -o /dev/null -e /dev/null queue_name', - 'overwrite': - True -} - -tbuilder.connect(initAvg, 'output_average_image', buildTemplateIteration1, - 'inputspec.fixed_image') -tbuilder.connect(datasource, 'ListOfImagesDictionaries', - buildTemplateIteration1, 'inputspec.ListOfImagesDictionaries') -tbuilder.connect(datasource, 'registrationImageTypes', buildTemplateIteration1, - 'inputspec.registrationImageTypes') -tbuilder.connect(datasource, 'interpolationMapping', buildTemplateIteration1, - 'inputspec.interpolationMapping') -""" -7. Define the second iteration of template building -""" - -buildTemplateIteration2 = antsRegistrationTemplateBuildSingleIterationWF( - 'iteration02') -BeginANTS = buildTemplateIteration2.get_node("BeginANTS") -BeginANTS.plugin_args = { - 'qsub_args': - '-S /bin/bash -pe smp1 8-12 -l mem_free=6000M -o /dev/null -e /dev/null queue_name', - 'overwrite': - True -} -tbuilder.connect(buildTemplateIteration1, 'outputspec.template', - buildTemplateIteration2, 'inputspec.fixed_image') -tbuilder.connect(datasource, 'ListOfImagesDictionaries', - buildTemplateIteration2, 'inputspec.ListOfImagesDictionaries') -tbuilder.connect(datasource, 'registrationImageTypes', buildTemplateIteration2, - 'inputspec.registrationImageTypes') -tbuilder.connect(datasource, 'interpolationMapping', buildTemplateIteration2, - 'inputspec.interpolationMapping') -""" -8. Move selected files to a designated results folder -""" - -datasink = pe.Node(io.DataSink(), name="datasink") -datasink.inputs.base_directory = os.path.join(requestedPath, "results") - -tbuilder.connect(buildTemplateIteration2, 'outputspec.template', datasink, - 'PrimaryTemplate') -tbuilder.connect(buildTemplateIteration2, - 'outputspec.passive_deformed_templates', datasink, - 'PassiveTemplate') -tbuilder.connect(initAvg, 'output_average_image', datasink, - 'PreRegisterAverage') -""" -9. Run the workflow -""" - -tbuilder.run(plugin="SGE") diff --git a/examples/smri_cbs_skullstripping.py b/examples/smri_cbs_skullstripping.py deleted file mode 100644 index 1471496576..0000000000 --- a/examples/smri_cbs_skullstripping.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -======================================== -sMRI: USing CBS Tools for skullstripping -======================================== - -This simple workflow uses SPECTRE2010 algorithm to skullstrip an MP2RAGE -anatomical scan. -""" - -import nipype.pipeline.engine as pe -from nipype.interfaces.mipav.developer import (JistIntensityMp2rageMasking, - MedicAlgorithmSPECTRE2010) - -wf = pe.Workflow("skullstripping") - -mask = pe.Node(JistIntensityMp2rageMasking(), name="masking") -folder_path = '/Users/filo/7t_trt/niftis/sub001/session_1/' -mask.inputs.inSecond = folder_path + "MP2RAGE_INV2.nii.gz" -mask.inputs.inQuantitative = folder_path + "MP2RAGE_UNI.nii.gz" -mask.inputs.inT1weighted = folder_path + "MP2RAGE_T1.nii.gz" -mask.inputs.outMasked = True -mask.inputs.outMasked2 = True -mask.inputs.outSignal = True -mask.inputs.outSignal2 = True - -skullstrip = pe.Node(MedicAlgorithmSPECTRE2010(), name="skullstrip") -skullstrip.inputs.outStripped = True -skullstrip.inputs.xDefaultMem = 6000 - -wf.connect(mask, 'outMasked', skullstrip, 'inInput') -wf.run() diff --git a/examples/smri_freesurfer.py b/examples/smri_freesurfer.py deleted file mode 100644 index d365b44dd5..0000000000 --- a/examples/smri_freesurfer.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python -""" -================ -sMRI: FreeSurfer -================ - -This script, smri_freesurfer.py, demonstrates the ability to call reconall on -a set of subjects and then make an average subject:: - - python smri_freesurfer.py - -Import necessary modules from nipype. -""" - -import os - -import nipype.pipeline.engine as pe -import nipype.interfaces.io as nio -from nipype.interfaces.freesurfer.preprocess import ReconAll -from nipype.interfaces.freesurfer.utils import MakeAverageSubject - -subject_list = ['s1', 's3'] -data_dir = os.path.abspath('data') -subjects_dir = os.path.abspath('amri_freesurfer_tutorial/subjects_dir') - -wf = pe.Workflow(name="l1workflow") -wf.base_dir = os.path.abspath('amri_freesurfer_tutorial/workdir') -""" -Grab data -""" - -datasource = pe.MapNode( - interface=nio.DataGrabber(infields=['subject_id'], outfields=['struct']), - name='datasource', - iterfield=['subject_id']) -datasource.inputs.base_directory = data_dir -datasource.inputs.template = '%s/%s.nii' -datasource.inputs.template_args = dict(struct=[['subject_id', 'struct']]) -datasource.inputs.subject_id = subject_list -datasource.inputs.sort_filelist = True -""" -Run recon-all -""" - -recon_all = pe.MapNode( - interface=ReconAll(), - name='recon_all', - iterfield=['subject_id', 'T1_files']) -recon_all.inputs.subject_id = subject_list -if not os.path.exists(subjects_dir): - os.mkdir(subjects_dir) -recon_all.inputs.subjects_dir = subjects_dir - -wf.connect(datasource, 'struct', recon_all, 'T1_files') -""" -Make average subject -""" - -average = pe.Node(interface=MakeAverageSubject(), name="average") -average.inputs.subjects_dir = subjects_dir - -wf.connect(recon_all, 'subject_id', average, 'subjects_ids') - -wf.run("MultiProc", plugin_args={'n_procs': 4}) diff --git a/examples/smri_fsreconall.py b/examples/smri_fsreconall.py deleted file mode 100644 index 16d0b4c9f3..0000000000 --- a/examples/smri_fsreconall.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python -""" -================ -sMRI: FSReconAll -================ - -This script, smri_fsreconall.py, demonstrates the ability to use the -create_reconall_workflow function to create a workflow and then run it on a -set of subjects and then make an average subject:: - - python smri_fsreconall.py - -For an example on how to call FreeSurfer's reconall script in Nipype -see smri_freesurfer.py. - -Import necessary modules from nipype. -""" - -import os - -import nipype.pipeline.engine as pe -import nipype.interfaces.io as nio -from niflow.nipype1.workflows.smri.freesurfer import create_reconall_workflow -from nipype.interfaces.freesurfer.utils import MakeAverageSubject -from nipype.interfaces.utility import IdentityInterface -""" -Assign the tutorial directory -""" - -tutorial_dir = os.path.abspath('smri_fsreconall_tutorial') -if not os.path.isdir(tutorial_dir): - os.mkdir(tutorial_dir) -""" -Define the workflow directories -""" - -subject_list = ['s1', 's3'] -data_dir = os.path.abspath('data') -subjects_dir = os.path.join(tutorial_dir, 'subjects_dir') -if not os.path.exists(subjects_dir): - os.mkdir(subjects_dir) - -wf = pe.Workflow(name="l1workflow") -wf.base_dir = os.path.join(tutorial_dir, 'workdir') -""" -Create inputspec -""" - -inputspec = pe.Node( - interface=IdentityInterface(['subject_id']), name="inputspec") -inputspec.iterables = ("subject_id", subject_list) -""" -Grab data -""" - -datasource = pe.Node( - interface=nio.DataGrabber(infields=['subject_id'], outfields=['struct']), - name='datasource') -datasource.inputs.base_directory = data_dir -datasource.inputs.template = '%s/%s.nii' -datasource.inputs.template_args = dict(struct=[['subject_id', 'struct']]) -datasource.inputs.subject_id = subject_list -datasource.inputs.sort_filelist = True - -wf.connect(inputspec, 'subject_id', datasource, 'subject_id') -""" -Run recon-all -""" - -recon_all = create_reconall_workflow() -recon_all.inputs.inputspec.subjects_dir = subjects_dir - -wf.connect(datasource, 'struct', recon_all, 'inputspec.T1_files') -wf.connect(inputspec, 'subject_id', recon_all, 'inputspec.subject_id') -""" -Make average subject -""" - -average = pe.JoinNode( - interface=MakeAverageSubject(), - joinsource="inputspec", - joinfield="subjects_ids", - name="average") -average.inputs.subjects_dir = subjects_dir - -wf.connect(recon_all, 'postdatasink_outputspec.subject_id', average, - 'subjects_ids') - -wf.run("MultiProc", plugin_args={'n_procs': 4}) diff --git a/examples/tessellation_tutorial.py b/examples/tessellation_tutorial.py deleted file mode 100644 index 58bae095cc..0000000000 --- a/examples/tessellation_tutorial.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env python -""" -================================================= -sMRI: Regional Tessellation and Surface Smoothing -================================================= - -Introduction -============ - -This script, tessellation_tutorial.py, demonstrates the use of create_tessellation_flow from niflow.nipype1.workflows.smri.freesurfer, and it can be run with:: - - python tessellation_tutorial.py - -This example requires that the user has Freesurfer installed, and that the Freesurfer directory for 'fsaverage' is present. - -.. seealso:: - - ConnectomeViewer - The Connectome Viewer connects Multi-Modal Multi-Scale Neuroimaging and Network Datasets For Analysis and Visualization in Python. - - http://www.geuz.org/gmsh/ - Gmsh: a three-dimensional finite element mesh generator with built-in pre- and post-processing facilities - - http://www.blender.org/ - Blender is the free open source 3D content creation suite, available for all major operating systems under the GNU General Public License. - -.. warning:: - - This workflow will take several hours to finish entirely, since smoothing the larger cortical surfaces is very time consuming. - -Packages and Data Setup -======================= - -Import the necessary modules and workflow from nipype. -""" - -import nipype.pipeline.engine as pe # pypeline engine -import nipype.interfaces.cmtk as cmtk -import nipype.interfaces.io as nio # Data i/o -import os -import os.path as op -from niflow.nipype1.workflows.smri.freesurfer import create_tessellation_flow -""" -Directories -=========== - -Set the default directory and lookup table (LUT) paths -""" - -fs_dir = os.environ['FREESURFER_HOME'] -lookup_file = op.join(fs_dir, 'FreeSurferColorLUT.txt') -subjects_dir = op.join(fs_dir, 'subjects/') -output_dir = './tessellate_tutorial' -""" -Inputs -====== - -Create the tessellation workflow and set inputs -Here we will choose Gifti (gii) as the output format, because -we want to able to view the surface in ConnectomeViewer. - -In you intend to view the meshes in gmsh or Blender, you should change -the workflow creation to use stereolithographic (stl) format. -""" - -tessflow = create_tessellation_flow(name='tessflow', out_format='gii') -tessflow.inputs.inputspec.subject_id = 'fsaverage' -tessflow.inputs.inputspec.subjects_dir = subjects_dir -tessflow.inputs.inputspec.lookup_file = lookup_file -""" -We also create a conditional node to package the surfaces for ConnectomeViewer. -Simply set cff to "False" to ignore this step. -""" - -cff = True -if cff: - cff = pe.Node(interface=cmtk.CFFConverter(), name='cff') - cff.inputs.out_file = 'Meshes.cff' -""" -Outputs -======= - -Create a datasink to organize the smoothed meshes -Using regular-expression substitutions we can remove the extraneous folders generated by the mapnode. -""" - -datasink = pe.Node(interface=nio.DataSink(), name="datasink") -datasink.inputs.base_directory = 'meshes' -datasink.inputs.regexp_substitutions = [('_smoother[\d]*/', '')] -""" -Execution -========= - -Finally, create and run another pipeline that connects the workflow and datasink -""" - -tesspipe = pe.Workflow(name='tessellate_tutorial') -tesspipe.base_dir = output_dir -tesspipe.connect([(tessflow, datasink, [('outputspec.meshes', - '@meshes.all')])]) -""" -If the surfaces are to be packaged, this will connect the CFFConverter -node to the tessellation and smoothing workflow, as well as to the datasink. -""" - -if cff: - tesspipe.connect([(tessflow, cff, [('outputspec.meshes', - 'gifti_surfaces')])]) - tesspipe.connect([(cff, datasink, [('connectome_file', '@cff')])]) - -tesspipe.run() diff --git a/examples/test_spm.py b/examples/test_spm.py deleted file mode 100644 index 4c31f144ed..0000000000 --- a/examples/test_spm.py +++ /dev/null @@ -1,77 +0,0 @@ -from __future__ import division -from builtins import range -import nipype.pipeline.engine as pe -from nipype.interfaces import spm -from nipype.interfaces import fsl -from nipype.interfaces import utility as niu -from nipype.interfaces import io as nio -from nipype.algorithms.misc import Gunzip - - -def _get_first(inlist): - if isinstance(inlist, (list, tuple)): - return inlist[0] - return inlist - - -def test_spm(name='test_spm_3d'): - """ - A simple workflow to test SPM's installation. By default will split the 4D volume in - time-steps. - """ - workflow = pe.Workflow(name=name) - - inputnode = pe.Node( - niu.IdentityInterface(fields=['in_data']), name='inputnode') - dgr = pe.Node( - nio.DataGrabber( - template="feeds/data/fmri.nii.gz", - outfields=['out_file'], - sort_filelist=False), - name='datasource') - - stc = pe.Node( - spm.SliceTiming( - num_slices=21, - time_repetition=1.0, - time_acquisition=2. - 2. / 32, - slice_order=list(range(21, 0, -1)), - ref_slice=10), - name='stc') - realign_estimate = pe.Node( - spm.Realign(jobtype='estimate'), name='realign_estimate') - realign_write = pe.Node(spm.Realign(jobtype='write'), name='realign_write') - realign_estwrite = pe.Node( - spm.Realign(jobtype='estwrite'), name='realign_estwrite') - smooth = pe.Node(spm.Smooth(fwhm=[6, 6, 6]), name='smooth') - - if name == 'test_spm_3d': - split = pe.Node( - fsl.Split(dimension="t", output_type="NIFTI"), name="split") - workflow.connect([(dgr, split, [(('out_file', _get_first), - 'in_file')]), - (split, stc, [("out_files", "in_files")])]) - elif name == 'test_spm_4d': - gunzip = pe.Node(Gunzip(), name="gunzip") - workflow.connect([(dgr, gunzip, [(('out_file', _get_first), - 'in_file')]), - (gunzip, stc, [("out_file", "in_files")])]) - else: - raise NotImplementedError( - 'No implementation of the test workflow \'{}\' was found'.format( - name)) - - workflow.connect([(inputnode, dgr, [('in_data', 'base_directory')]), - (stc, realign_estimate, - [('timecorrected_files', - 'in_files')]), (realign_estimate, realign_write, - [('modified_in_files', 'in_files')]), - (stc, realign_estwrite, - [('timecorrected_files', - 'in_files')]), (realign_write, smooth, - [('realigned_files', 'in_files')])]) - return workflow - - -workflow3d = test_spm() -workflow4d = test_spm(name='test_spm_4d') diff --git a/examples/workshop_dartmouth_2010.py b/examples/workshop_dartmouth_2010.py deleted file mode 100644 index 931a633c52..0000000000 --- a/examples/workshop_dartmouth_2010.py +++ /dev/null @@ -1,288 +0,0 @@ -""" -================================ -Workshop: Dartmouth College 2010 -================================ - -First lets go to the directory with the data we'll be working on and start the interactive python interpreter -(with some nipype specific configuration). Note that nipype does not need to be run through ipython - it is -just much nicer to do interactive work in it. - -.. sourcecode:: bash - - cd $TDPATH - ipython -p nipype - -For every neuroimaging procedure supported by nipype there exists a wrapper - a small piece of code managing -the underlying software (FSL, SPM, AFNI etc.). We call those interfaces. They are standarised so we can hook them up -together. Lets have a look at some of them. - -.. sourcecode:: ipython - - In [1]: import nipype.interfaces.fsl as fsl - - In [2]: fsl.BET.help() - Inputs - ------ - - Mandatory: - in_file: input file to skull strip - - Optional: - args: Additional parameters to the command - center: center of gravity in voxels - environ: Environment variables (default={}) - frac: fractional intensity threshold - functional: apply to 4D fMRI data - mutually exclusive: functional, reduce_bias - mask: create binary mask image - mesh: generate a vtk mesh brain surface - no_output: Don't generate segmented output - out_file: name of output skull stripped image - outline: create surface outline image - output_type: FSL output type - radius: head radius - reduce_bias: bias field and neck cleanup - mutually exclusive: functional, reduce_bias - skull: create skull image - threshold: apply thresholding to segmented brain image and mask - vertical_gradient: vertical gradient in fractional intensity threshold (-1, 1) - - Outputs - ------- - mask_file: path/name of binary brain mask (if generated) - meshfile: path/name of vtk mesh file (if generated) - out_file: path/name of skullstripped file - outline_file: path/name of outline file (if generated) - - In [3]: import nipype.interfaces.freesurfer as fs - - In [4]: fs.Smooth.help() - Inputs - ------ - - Mandatory: - in_file: source volume - num_iters: number of iterations instead of fwhm - mutually exclusive: surface_fwhm - reg_file: registers volume to surface anatomical - surface_fwhm: surface FWHM in mm - mutually exclusive: num_iters - requires: reg_file - - Optional: - args: Additional parameters to the command - environ: Environment variables (default={}) - proj_frac: project frac of thickness a long surface normal - mutually exclusive: proj_frac_avg - proj_frac_avg: average a long normal min max delta - mutually exclusive: proj_frac - smoothed_file: output volume - subjects_dir: subjects directory - vol_fwhm: volumesmoothing outside of surface - - Outputs - ------- - args: Additional parameters to the command - environ: Environment variables - smoothed_file: smoothed input volume - subjects_dir: subjects directory - -You can read about all of the interfaces implemented in nipype at our online documentation at http://nipy.sourceforge.net/nipype/documentation.html#documentation . -Check it out now. - -Using interfaces ----------------- - -Having interfaces allows us to use third party software (like FSL BET) as function. Look how simple it is. -""" - -from __future__ import print_function -from builtins import str - -import nipype.interfaces.fsl as fsl -result = fsl.BET(in_file='data/s1/struct.nii').run() -print(result) -""" -Running a single program is not much of a breakthrough. Lets run motion correction followed by smoothing -(isotropic - in other words not using SUSAN). Notice that in the first line we are setting the output data type -for all FSL interfaces. -""" - -fsl.FSLCommand.set_default_output_type('NIFTI_GZ') -result1 = fsl.MCFLIRT(in_file='data/s1/f3.nii').run() -result2 = fsl.Smooth(in_file='f3_mcf.nii.gz', fwhm=6).run() -""" -Simple workflow ---------------- - -In the previous example we knew that fsl.MCFLIRT will produce a file called f3_mcf.nii.gz and we have hard coded -this as an input to fsl.Smooth. This is quite limited, but luckily nipype supports joining interfaces in pipelines. -This way output of one interface will be used as an input of another without having to hard code anything. Before -connecting Interfaces we need to put them into (separate) Nodes and give them unique names. This way every interface will -process data in a separate folder. -""" - -import nipype.pipeline.engine as pe -import os - -motion_correct = pe.Node( - interface=fsl.MCFLIRT(in_file=os.path.abspath('data/s1/f3.nii')), - name="motion_correct") -smooth = pe.Node(interface=fsl.Smooth(fwhm=6), name="smooth") - -motion_correct_and_smooth = pe.Workflow(name="motion_correct_and_smooth") -motion_correct_and_smooth.base_dir = os.path.abspath( - '.') # define where will be the root folder for the workflow -motion_correct_and_smooth.connect([(motion_correct, smooth, [('out_file', - 'in_file')])]) -# we are connecting 'out_file' output of motion_correct to 'in_file' input of smooth -motion_correct_and_smooth.run() -""" -Another workflow ----------------- - -Another example of a simple workflow (calculate the mean of fMRI signal and subtract it). -This time we'll be assigning inputs after defining the workflow. -""" - -calc_mean = pe.Node(interface=fsl.ImageMaths(), name="calc_mean") -calc_mean.inputs.op_string = "-Tmean" -subtract = pe.Node(interface=fsl.ImageMaths(), name="subtract") -subtract.inputs.op_string = "-sub" - -demean = pe.Workflow(name="demean") -demean.base_dir = os.path.abspath('.') -demean.connect([(calc_mean, subtract, [('out_file', 'in_file2')])]) - -demean.inputs.calc_mean.in_file = os.path.abspath('data/s1/f3.nii') -demean.inputs.subtract.in_file = os.path.abspath('data/s1/f3.nii') -demean.run() -""" -Reusing workflows ------------------ - -The beauty of the workflows is that they are reusable. We can just import a workflow made by someone -else and feed it with our data. -""" - -from fmri_fsl import preproc -preproc.base_dir = os.path.abspath('.') -preproc.inputs.inputspec.func = os.path.abspath('data/s1/f3.nii') -preproc.inputs.inputspec.struct = os.path.abspath('data/s1/struct.nii') -preproc.run() -""" -... and we can run it again and it won't actually rerun anything because none of -the parameters have changed. -""" - -preproc.run() -""" -... and we can change a parameter and run it again. Only the dependent nodes -are rerun and that too only if the input state has changed. -""" - -preproc.inputs.meanfuncmask.frac = 0.5 -preproc.run() -""" -Visualizing workflows 1 ------------------------ - -So what did we run in this precanned workflow -""" - -preproc.write_graph() -""" -Datasink --------- - -Datasink is a special interface for copying and arranging results. -""" - -import nipype.interfaces.io as nio - -preproc.inputs.inputspec.func = os.path.abspath('data/s1/f3.nii') -preproc.inputs.inputspec.struct = os.path.abspath('data/s1/struct.nii') -datasink = pe.Node(interface=nio.DataSink(), name='sinker') -preprocess = pe.Workflow(name='preprocout') -preprocess.base_dir = os.path.abspath('.') -preprocess.connect([(preproc, datasink, [('meanfunc2.out_file', 'meanfunc'), - ('maskfunc3.out_file', 'funcruns')])]) -preprocess.run() -""" -Datagrabber ------------ - -Datagrabber is (surprise, surprise) an interface for collecting files from hard drive. It is very flexible and -supports almost any file organisation of your data you can imagine. -""" - -datasource1 = nio.DataGrabber() -datasource1.inputs.template = 'data/s1/f3.nii' -datasource1.inputs.sort_filelist = True -results = datasource1.run() -print(results.outputs) - -datasource2 = nio.DataGrabber() -datasource2.inputs.template = 'data/s*/f*.nii' -datasource2.inputs.sort_filelist = True -results = datasource2.run() -print(results.outputs) - -datasource3 = nio.DataGrabber(infields=['run']) -datasource3.inputs.template = 'data/s1/f%d.nii' -datasource3.inputs.sort_filelist = True -datasource3.inputs.run = [3, 7] -results = datasource3.run() -print(results.outputs) - -datasource4 = nio.DataGrabber(infields=['subject_id', 'run']) -datasource4.inputs.template = 'data/%s/f%d.nii' -datasource4.inputs.sort_filelist = True -datasource4.inputs.run = [3, 7] -datasource4.inputs.subject_id = ['s1', 's3'] -results = datasource4.run() -print(results.outputs) -""" -Iterables ---------- - -Iterables is a special field of the Node class that enables to iterate all workfloes/nodes connected to it over -some parameters. Here we'll use it to iterate over two subjects. -""" - -import nipype.interfaces.utility as util -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") -infosource.iterables = ('subject_id', ['s1', 's3']) - -datasource = pe.Node( - nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), - name="datasource") -datasource.inputs.template = '%s/%s.nii' -datasource.inputs.base_directory = os.path.abspath('data') -datasource.inputs.template_args = dict( - func=[['subject_id', 'f3']], struct=[['subject_id', 'struct']]) -datasource.inputs.sort_filelist = True - -my_workflow = pe.Workflow(name="my_workflow") -my_workflow.base_dir = os.path.abspath('.') - -my_workflow.connect([(infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, preproc, [('func', 'inputspec.func'), - ('struct', 'inputspec.struct')])]) -my_workflow.run() -""" -and we can change a node attribute and run it again - -""" - -smoothnode = my_workflow.get_node('preproc.smooth') -assert (str(smoothnode) == 'preproc.smooth') -smoothnode.iterables = ('fwhm', [5., 10.]) -my_workflow.run() -""" -Visualizing workflows 2 ------------------------ - -In the case of nested workflows, we might want to look at expanded forms of the workflow. -""" From f560ac7886d9f05a93e2ee66c51a304de8201d57 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 21 Feb 2020 08:58:13 -0500 Subject: [PATCH 0732/1665] DOC: Initial README pointing to examples niflow --- examples/README | 4 ---- examples/README.md | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) delete mode 100644 examples/README create mode 100644 examples/README.md diff --git a/examples/README b/examples/README deleted file mode 100644 index 3e482bb975..0000000000 --- a/examples/README +++ /dev/null @@ -1,4 +0,0 @@ -A dataset for use with these scripts can be downloaded from the nipype -website. At the time of writing, it's at: - -http://nipype.readthedocs.io/en/0.12.0/users/pipeline_tutorial.html diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000000..c22dfdcc67 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,4 @@ +The examples directory previously held a set of literate programming documents that demonstrated +solutions to various problems using Nipype. + +These examples have been moved to https://github.com/niflows/nipype1-examples. From 193918b65e512efbbe7f49976ec065dceb6cbe86 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 21 Feb 2020 09:00:53 -0500 Subject: [PATCH 0733/1665] CI: Remove example tests from CircleCI --- .circleci/config.yml | 121 ------------------ .circleci/test_fmri_fsl_feeds_linear_l1.sh | 3 - .circleci/test_fmri_fsl_reuse_linear_l1.sh | 3 - .../test_fmri_spm_dartel_multiproc_l1.sh | 3 - .../test_fmri_spm_dartel_multiproc_l2.sh | 3 - .circleci/test_fmri_spm_linear_3d.sh | 3 - .circleci/test_fmri_spm_linear_4d.sh | 3 - .../test_fmri_spm_nested_multiproc_l1.sh | 3 - .../test_fmri_spm_nested_multiproc_l2.sh | 3 - 9 files changed, 145 deletions(-) delete mode 100644 .circleci/test_fmri_fsl_feeds_linear_l1.sh delete mode 100644 .circleci/test_fmri_fsl_reuse_linear_l1.sh delete mode 100644 .circleci/test_fmri_spm_dartel_multiproc_l1.sh delete mode 100644 .circleci/test_fmri_spm_dartel_multiproc_l2.sh delete mode 100644 .circleci/test_fmri_spm_linear_3d.sh delete mode 100644 .circleci/test_fmri_spm_linear_4d.sh delete mode 100644 .circleci/test_fmri_spm_nested_multiproc_l1.sh delete mode 100644 .circleci/test_fmri_spm_nested_multiproc_l2.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index c8058c48b7..902649cd03 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -172,103 +172,6 @@ jobs: paths: - docker - test_fmri_fsl_spm: - machine: *machine_kwds - working_directory: /home/circleci/nipype - steps: - - checkout: - path: /home/circleci/nipype - - attach_workspace: - at: /tmp - - run: *set_pr_number - - run: *generate_dockerfiles - - run: *modify_nipype_version - - run: *get_base_image - - run: *build_main_image_py36 - - run: *_get_codecov - - run: *_download_test_data - - run: *prepare_working_directory - - run: - name: Run FSL reuse pipeline - no_output_timeout: 40m - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_fsl_reuse_linear_l1.sh - - run: - name: Run SPM test workflow - 3D inputs - no_output_timeout: 40m - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_linear_3d.sh - - run: - name: Run SPM test workflow - 4D inputs - no_output_timeout: 40m - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_linear_4d.sh - - run: *_run_codecov_smoke - - store_artifacts: *store_artifacts_kwds - - test_fmri_spm_dartel_multiproc: - machine: *machine_kwds - working_directory: /home/circleci/nipype - steps: - - checkout: - path: /home/circleci/nipype - - attach_workspace: - at: /tmp - - run: *set_pr_number - - run: *generate_dockerfiles - - run: *modify_nipype_version - - run: *get_base_image - - run: *build_main_image_py36 - - run: *_get_codecov - - run: *_download_test_data - - run: *prepare_working_directory - - run: - name: Run SPM DARTEL Level 1 pipeline - no_output_timeout: 1h - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_dartel_multiproc_l1.sh - - run: - name: Run SPM DARTEL Level 2 pipeline - no_output_timeout: 30m - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_dartel_multiproc_l2.sh - - run: *_run_codecov_smoke - - store_artifacts: *store_artifacts_kwds - - test_fmri_spm_nested_fsl_feeds: - machine: *machine_kwds - working_directory: /home/circleci/nipype - steps: - - checkout: - path: /home/circleci/nipype - - attach_workspace: - at: /tmp - - run: *set_pr_number - - run: *generate_dockerfiles - - run: *modify_nipype_version - - run: *get_base_image - - run: *build_main_image_py36 - - run: *_get_codecov - - run: *_download_test_data - - run: *prepare_working_directory - - run: - name: Run SPM Nested Level 1 pipeline - no_output_timeout: 1h - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_nested_multiproc_l1.sh - - run: - name: Run SPM Nested Level 2 pipeline - no_output_timeout: 30m - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_nested_multiproc_l2.sh - - run: - name: Run FSL FEEDS pipeline - no_output_timeout: 40m - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_fsl_feeds_linear_l1.sh - - run: *_run_codecov_smoke - - store_artifacts: *store_artifacts_kwds - deploy_dockerhub: docker: - image: docker:17.10.0-ce-git @@ -446,36 +349,12 @@ workflows: only: /.*/ requires: - compare_base_dockerfiles - - test_fmri_fsl_spm: - filters: - branches: - ignore: - - /docs?\/.*/ - requires: - - compare_base_dockerfiles - - test_fmri_spm_dartel_multiproc: - filters: - branches: - ignore: - - /docs?\/.*/ - requires: - - compare_base_dockerfiles - - test_fmri_spm_nested_fsl_feeds: - filters: - branches: - ignore: - - /docs?\/.*/ - requires: - - compare_base_dockerfiles - deploy_dockerhub: filters: branches: only: master requires: - test_pytest - - test_fmri_spm_nested_fsl_feeds - - test_fmri_fsl_spm - - test_fmri_spm_dartel_multiproc - deploy_pypi: filters: branches: diff --git a/.circleci/test_fmri_fsl_feeds_linear_l1.sh b/.circleci/test_fmri_fsl_feeds_linear_l1.sh deleted file mode 100644 index 9666829b74..0000000000 --- a/.circleci/test_fmri_fsl_feeds_linear_l1.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_fsl_feeds Linear /data/examples/ l1pipeline diff --git a/.circleci/test_fmri_fsl_reuse_linear_l1.sh b/.circleci/test_fmri_fsl_reuse_linear_l1.sh deleted file mode 100644 index 48be49d80d..0000000000 --- a/.circleci/test_fmri_fsl_reuse_linear_l1.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_fsl_reuse Linear /data/examples/ level1_workflow diff --git a/.circleci/test_fmri_spm_dartel_multiproc_l1.sh b/.circleci/test_fmri_spm_dartel_multiproc_l1.sh deleted file mode 100644 index 4208eed506..0000000000 --- a/.circleci/test_fmri_spm_dartel_multiproc_l1.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_spm_dartel MultiProc /data/examples/ level1 diff --git a/.circleci/test_fmri_spm_dartel_multiproc_l2.sh b/.circleci/test_fmri_spm_dartel_multiproc_l2.sh deleted file mode 100644 index 86119e7654..0000000000 --- a/.circleci/test_fmri_spm_dartel_multiproc_l2.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_spm_dartel MultiProc /data/examples/ l2pipeline diff --git a/.circleci/test_fmri_spm_linear_3d.sh b/.circleci/test_fmri_spm_linear_3d.sh deleted file mode 100644 index 27c2c92a1a..0000000000 --- a/.circleci/test_fmri_spm_linear_3d.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh test_spm Linear /data/examples/ workflow3d diff --git a/.circleci/test_fmri_spm_linear_4d.sh b/.circleci/test_fmri_spm_linear_4d.sh deleted file mode 100644 index cd255d60ca..0000000000 --- a/.circleci/test_fmri_spm_linear_4d.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh test_spm Linear /data/examples/ workflow4d diff --git a/.circleci/test_fmri_spm_nested_multiproc_l1.sh b/.circleci/test_fmri_spm_nested_multiproc_l1.sh deleted file mode 100644 index a6d2133a42..0000000000 --- a/.circleci/test_fmri_spm_nested_multiproc_l1.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work -e NIPYPE_NUMBER_OF_CPUS=4 "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_spm_nested MultiProc /data/examples/ level1 diff --git a/.circleci/test_fmri_spm_nested_multiproc_l2.sh b/.circleci/test_fmri_spm_nested_multiproc_l2.sh deleted file mode 100644 index c0926be148..0000000000 --- a/.circleci/test_fmri_spm_nested_multiproc_l2.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work -e NIPYPE_NUMBER_OF_CPUS=4 -e NIPYPE_RESOURCE_MONITOR=1 "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_spm_nested MultiProc /data/examples/ l2pipeline From 7e1a423806e5b5b566fbff89b27b74a2e8fbaa81 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 21 Feb 2020 11:53:15 -0500 Subject: [PATCH 0734/1665] ENH: Import examples from niflow --- doc/.gitignore | 1 + doc/Makefile | 2 +- doc/conf.py | 37 ++++++++++++++++++++++++++++--------- tools/ex2rst | 2 +- 4 files changed, 31 insertions(+), 11 deletions(-) diff --git a/doc/.gitignore b/doc/.gitignore index 1f812bd420..d396f26e2d 100644 --- a/doc/.gitignore +++ b/doc/.gitignore @@ -1 +1,2 @@ /documentation.zip +_static/python diff --git a/doc/Makefile b/doc/Makefile index bcb7ac2e8f..9b3e3783ef 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -24,7 +24,7 @@ help: @echo " doctest run all doctests embedded in the documentation" clean: - -rm -rf _build/* *~ api/generated interfaces/generated users/examples documentation.zip + -rm -rf _build/* *~ api/generated interfaces/generated users/examples documentation.zip _static/python htmlonly: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html diff --git a/doc/conf.py b/doc/conf.py index 56d6935270..a75887431d 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -13,18 +13,37 @@ # serve to show the default. import os +from pathlib import Path +from tempfile import TemporaryDirectory +import shutil from packaging.version import Version import nipype +import subprocess as sp -doc_path = os.path.abspath(os.path.dirname(__file__)) -os.makedirs('users/examples', exist_ok=True) -os.chdir(os.path.join(doc_path, 'users', 'examples')) -os.system("""python ../../../tools/ex2rst -x ../../../examples/test_spm.py \ ---project Nipype --outdir . ../../../examples""") -os.system("""python ../../../tools/ex2rst --project Nipype --outdir . \ -../../../examples/frontiers_paper""") -os.chdir(doc_path) +conf_py = Path(__file__) + +example_dir = conf_py.parent / 'users' / 'examples' +shutil.rmtree(example_dir, ignore_errors=True) +example_dir.mkdir(parents=True) +python_dir = conf_py.parent / "_static" / "python" +shutil.rmtree(python_dir, ignore_errors=True) + +ex2rst = str(conf_py.parent.parent / "tools" / "ex2rst") + +with TemporaryDirectory() as tmpdir: + sp.run(["git", "clone", "--depth", "1", "https://github.com/niflows/nipype1-examples.git", + tmpdir], check=True) + source_dir = Path(tmpdir) / "package" / "niflow" / "nipype1" / "examples" + shutil.copytree(source_dir, python_dir) + +sp.run(["python", ex2rst, "--project", "Nipype", "--outdir", str(example_dir), + "-x", str(python_dir / "test_spm.py"), + "-x", str(python_dir / "__init__.py"), + "-x", str(python_dir / "cli.py"), + str(python_dir)], check=True) +sp.run(["python", ex2rst, "--project", "Nipype", "--outdir", str(example_dir), + str(python_dir / "frontiers_paper")], check=True) # If extensions (or modules to document with autodoc) are in another directory, @@ -98,7 +117,7 @@ # General information about the project. project = u'nipype' -copyright = u'2009-19, Neuroimaging in Python team' +copyright = u'2009-20, Neuroimaging in Python team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/tools/ex2rst b/tools/ex2rst index dc3c6d5f37..346a725466 100755 --- a/tools/ex2rst +++ b/tools/ex2rst @@ -190,7 +190,7 @@ def exfile2rstfile(filename, opts): This same script is also included in the %s source distribution under the :file:`examples` directory. -""" % (filename, opts.project) +""" % (os.path.relpath(filename, opts.outdir), opts.project) dfile.write(msg) From 219f377e10aea533d4e797eb3ce73b22fa07831e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 21 Feb 2020 12:38:23 -0500 Subject: [PATCH 0735/1665] DOC: Refer to niflow instead of source distribution --- doc/conf.py | 10 +++++----- tools/ex2rst | 9 ++++++--- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index a75887431d..c5898d1cfd 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -37,13 +37,13 @@ source_dir = Path(tmpdir) / "package" / "niflow" / "nipype1" / "examples" shutil.copytree(source_dir, python_dir) -sp.run(["python", ex2rst, "--project", "Nipype", "--outdir", str(example_dir), +sp.run(["python", ex2rst, "--outdir", str(example_dir), str(python_dir), "-x", str(python_dir / "test_spm.py"), "-x", str(python_dir / "__init__.py"), - "-x", str(python_dir / "cli.py"), - str(python_dir)], check=True) -sp.run(["python", ex2rst, "--project", "Nipype", "--outdir", str(example_dir), - str(python_dir / "frontiers_paper")], check=True) + "-x", str(python_dir / "cli.py")], + check=True) +sp.run(["python", ex2rst, "--outdir", str(example_dir), str(python_dir / "frontiers_paper")], + check=True) # If extensions (or modules to document with autodoc) are in another directory, diff --git a/tools/ex2rst b/tools/ex2rst index 346a725466..82653f80e5 100755 --- a/tools/ex2rst +++ b/tools/ex2rst @@ -187,10 +187,13 @@ def exfile2rstfile(filename, opts): .. admonition:: Example source code You can download :download:`the full source code of this example <%s>`. - This same script is also included in the %s source distribution under the - :file:`examples` directory. + This same script is also included in `%s <%s>`__ under the :file:`%s` + directory. -""" % (os.path.relpath(filename, opts.outdir), opts.project) +""" % (os.path.relpath(filename, opts.outdir), + "Nipype1 Examples Niflow", + "https://github.com/niflows/nipype1-examples", + "package/niflow/nipype1/examples") dfile.write(msg) From e530d445040259d9e6b031e11c617eec7b9fb8dd Mon Sep 17 00:00:00 2001 From: mathiasg Date: Fri, 21 Feb 2020 15:55:57 -0500 Subject: [PATCH 0736/1665] ENH: Detect values for EulerNumber interface --- nipype/interfaces/freesurfer/utils.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/freesurfer/utils.py b/nipype/interfaces/freesurfer/utils.py index a2b3b7a47b..5a4d6ca425 100644 --- a/nipype/interfaces/freesurfer/utils.py +++ b/nipype/interfaces/freesurfer/utils.py @@ -2592,7 +2592,9 @@ class EulerNumberInputSpec(FSTraitedSpec): class EulerNumberOutputSpec(TraitedSpec): - out_file = File(exists=False, desc="Output file for EulerNumber") + euler = traits.Int(desc="Euler number of cortical surface. A value of 2 signals a " + "topologically correct surface model with no holes") + defects = traits.Int(desc="Number of defects") class EulerNumber(FSCommand): @@ -2612,9 +2614,22 @@ class EulerNumber(FSCommand): input_spec = EulerNumberInputSpec output_spec = EulerNumberOutputSpec + def _run_interface(self, runtime): + runtime = super()._run_interface(runtime) + self._parse_output(runtime.stdout, runtime.stderr) + return runtime + + def _parse_output(self, stdout, stderr): + """Parse stdout / stderr and extract defects""" + m = re.search(r'(?<=total defect index = )\d+', stdout or stderr) + if m is None: + raise RuntimeError("Could not fetch defect index") + self._defects = int(m.group()) + def _list_outputs(self): outputs = self._outputs().get() - outputs["out_file"] = os.path.abspath(self.inputs.in_file) + outputs["defects"] = self._defects + outputs["euler"] = 2 - (2 * self._defects) return outputs From b03db6abedd468e67a5d3b5bde5ab542cdc1a4ad Mon Sep 17 00:00:00 2001 From: mathiasg Date: Fri, 21 Feb 2020 16:30:16 -0500 Subject: [PATCH 0737/1665] fix: forgotten autotest --- nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py index d31c9278bc..a90be6bca4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py @@ -17,7 +17,7 @@ def test_EulerNumber_inputs(): def test_EulerNumber_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict(defects=dict(), euler=dict(),) outputs = EulerNumber.output_spec() for key, metadata in list(output_map.items()): From 2e135f6866210c1d73210b83e4a81fee66510806 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 22 Feb 2020 08:16:27 -0500 Subject: [PATCH 0738/1665] ENH: Make run_examples.py give helpful hints --- tools/run_examples.py | 71 +++++-------------------------------------- 1 file changed, 7 insertions(+), 64 deletions(-) diff --git a/tools/run_examples.py b/tools/run_examples.py index 09a97cc2d9..20382ef74d 100644 --- a/tools/run_examples.py +++ b/tools/run_examples.py @@ -1,69 +1,12 @@ # -*- coding: utf-8 -*- -import os import sys -from shutil import rmtree -from multiprocessing import cpu_count - - -def run_examples(example, pipelines, data_path, plugin=None, rm_base_dir=True): - from nipype import config - from nipype.interfaces.base import CommandLine - - if plugin is None: - plugin = "MultiProc" - - print("running example: %s with plugin: %s" % (example, plugin)) - config.enable_debug_mode() - config.enable_provenance() - CommandLine.set_default_terminal_output("stream") - - plugin_args = {} - if plugin == "MultiProc": - plugin_args["n_procs"] = int(os.getenv("NIPYPE_NUMBER_OF_CPUS", cpu_count())) - - __import__(example) - for pipeline in pipelines: - wf = getattr(sys.modules[example], pipeline) - wf.base_dir = os.path.join(os.getcwd(), "output", example, plugin) - - results_dir = os.path.join(wf.base_dir, wf.name) - if rm_base_dir and os.path.exists(results_dir): - rmtree(results_dir) - - # Handle a logging directory - log_dir = os.path.join(os.getcwd(), "logs", example) - if not os.path.exists(log_dir): - os.makedirs(log_dir) - wf.config = { - "execution": { - "hash_method": "timestamp", - "stop_on_first_rerun": "true", - "write_provenance": "true", - "poll_sleep_duration": 2, - }, - "logging": {"log_directory": log_dir, "log_to_file": True}, - } - try: - wf.inputs.inputnode.in_data = os.path.abspath(data_path) - except AttributeError: - pass # the workflow does not have inputnode.in_data - - wf.run(plugin=plugin, plugin_args=plugin_args) - # run twice to check if nothing is rerunning - wf.run(plugin=plugin) +from textwrap import dedent if __name__ == "__main__": - path, file = os.path.split(__file__) - sys.path.insert(0, os.path.realpath(os.path.join(path, "..", "examples"))) - examples = { - "fmri_fsl_reuse": ["level1_workflow"], - "fmri_spm_nested": ["level1", "l2pipeline"], - # 'fmri_spm_dartel':['level1','l2pipeline'], - # 'fmri_fsl_feeds':['l1pipeline'] - } - example = sys.argv[1] - plugin = sys.argv[2] - data_path = sys.argv[3] - pipelines = sys.argv[4:] - run_examples(example, pipelines, data_path, plugin) + print(dedent("""Nipype examples have been moved to niflow-nipype1-examples. + +Install with: pip install niflow-nipype1-examples""")) + if sys.argv[1:]: + print("Run this command with: niflow-nipype1-examples " + " ".join(sys.argv[1:])) + sys.exit(1) From 22cbbb8320c29c83a1dbfba11b5e6dd3b75c55f6 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 22 Feb 2020 08:18:07 -0500 Subject: [PATCH 0739/1665] DOC: Add links to examples/README --- examples/README.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/examples/README.md b/examples/README.md index c22dfdcc67..c2e506e87a 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,4 +1,7 @@ -The examples directory previously held a set of literate programming documents that demonstrated -solutions to various problems using Nipype. +The examples directory previously held a set of +[literate programming](https://en.wikipedia.org/wiki/Literate_programming) documents that +demonstrated solutions to various problems using Nipype. -These examples have been moved to https://github.com/niflows/nipype1-examples. +These examples have been moved to the +[Nipype1 Examples Niflow](https://github.com/niflows/nipype1-examples). Please refer to +that repository for more information, and report any issues with the examples there. From 3bf557a6a4b0cb853d40eb76bb4b15a355efa995 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 23 Feb 2020 11:21:25 -0500 Subject: [PATCH 0740/1665] Update CONTRIBUTING.md --- CONTRIBUTING.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a4387bcddc..7ddb2c1253 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,7 +6,8 @@ These guidelines are designed to make it as easy as possible to get involved. If Before you start you'll need to set up a free [GitHub][link_github] account and sign in. Here are some [instructions][link_signupinstructions]. If you are not familiar with version control systems such as git, - introductions and tutorials may be found [here](http://www.reproducibleimaging.org/module-reproducible-basics/02-vcs/). +we recommend the [VCS module](http://www.reproducibleimaging.org/module-reproducible-basics/02-vcs/) +available from [ReproNim](http://www.reproducibleimaging.org/). Already know what you're looking for in this guide? Jump to the following sections: * [Understanding issue labels](#issue-labels) From d431f6635381984e2c05189c36522582374de9b0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 23 Feb 2020 12:23:58 -0500 Subject: [PATCH 0741/1665] RF: Add new interface to correctly handle CSD --- nipype/interfaces/mrtrix3/reconst.py | 67 +++++++++++++++++++++++++--- 1 file changed, 61 insertions(+), 6 deletions(-) diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index 551dd3ed5e..3f21e9ad54 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -108,9 +108,13 @@ class EstimateFODInputSpec(MRTrix3BaseInputSpec): desc="output WM ODF", ) gm_txt = File(argstr="%s", position=-4, desc="GM response text file") - gm_odf = File(argstr="%s", position=-3, desc="output GM ODF") + gm_odf = File( + "gm.mif", usedefault=True, argstr="%s", position=-3, desc="output GM ODF" + ) csf_txt = File(argstr="%s", position=-2, desc="CSF response text file") - csf_odf = File(argstr="%s", position=-1, desc="output CSF ODF") + csf_odf = File( + "csf.mif", usedefault=True, argstr="%s", position=-1, desc="output CSF ODF" + ) mask_file = File(exists=True, argstr="-mask %s", desc="mask image") # DW Shell selection options @@ -122,6 +126,8 @@ class EstimateFODInputSpec(MRTrix3BaseInputSpec): ) max_sh = InputMultiObject( traits.Int, + value=[8], + usedefault=True, argstr="-lmax %s", sep=",", desc=( @@ -150,18 +156,24 @@ class EstimateFOD(MRTrix3Base): """ Estimate fibre orientation distributions from diffusion data using spherical deconvolution + .. warning:: + + The CSD algorithm does not work as intended, but fixing it in this interface could break + existing workflows. This interface has been superseded by + :py:class:`.ConstrainedSphericalDecomposition`. + Example ------- >>> import nipype.interfaces.mrtrix3 as mrt >>> fod = mrt.EstimateFOD() - >>> fod.inputs.algorithm = 'csd' + >>> fod.inputs.algorithm = 'msmt_csd' >>> fod.inputs.in_file = 'dwi.mif' >>> fod.inputs.wm_txt = 'wm.txt' >>> fod.inputs.grad_fsl = ('bvecs', 'bvals') - >>> fod.cmdline # doctest: +ELLIPSIS - 'dwi2fod -fslgrad bvecs bvals csd dwi.mif wm.txt wm.mif' - >>> fod.run() # doctest: +SKIP + >>> fod.cmdline + 'dwi2fod -fslgrad bvecs bvals -lmax 8 msmt_csd dwi.mif wm.txt wm.mif gm.mif csf.mif' + >>> fod.run() # doctest: +SKIP """ _cmd = "dwi2fod" @@ -176,3 +188,46 @@ def _list_outputs(self): if self.inputs.csf_odf != Undefined: outputs["csf_odf"] = op.abspath(self.inputs.csf_odf) return outputs + + +class ConstrainedSphericalDeconvolutionInputSpec(EstimateFODInputSpec): + gm_odf = File(argstr="%s", position=-3, desc="output GM ODF") + csf_odf = File(argstr="%s", position=-1, desc="output CSF ODF") + max_sh = InputMultiObject( + traits.Int, + argstr="-lmax %s", + sep=",", + desc=( + "maximum harmonic degree of response function - single value for single-shell response, list for multi-shell response" + ), + ) + + +class ConstrainedSphericalDeconvolution(EstimateFOD): + """ + Estimate fibre orientation distributions from diffusion data using spherical deconvolution + + This interface supersedes :py:class:`.EstimateFOD`. + The old interface has contained a bug when using the CSD algorithm as opposed to the MSMT CSD + algorithm, but fixing it could potentially break existing workflows. The new interface works + the same, but does not populate the following inputs by default: + + * ``gm_odf`` + * ``csf_odf`` + * ``max_sh`` + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> fod = mrt.ConstrainedSphericalDeconvolution() + >>> fod.inputs.algorithm = 'csd' + >>> fod.inputs.in_file = 'dwi.mif' + >>> fod.inputs.wm_txt = 'wm.txt' + >>> fod.inputs.grad_fsl = ('bvecs', 'bvals') + >>> fod.cmdline + 'dwi2fod -fslgrad bvecs bvals csd dwi.mif wm.txt wm.mif' + >>> fod.run() # doctest: +SKIP + """ + + input_spec = ConstrainedSphericalDeconvolutionInputSpec From 6c0967e9dbe41436adb12e086ee0c05be368b88b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 23 Feb 2020 12:36:58 -0500 Subject: [PATCH 0742/1665] make specs --- ..._auto_ConstrainedSphericalDeconvolution.py | 47 +++++++++++++++++++ tools/checkspecs.py | 2 +- 2 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py b/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py new file mode 100644 index 0000000000..9bcba81e49 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..reconst import ConstrainedSphericalDeconvolution + + +def test_ConstrainedSphericalDeconvolution_inputs(): + input_map = dict( + algorithm=dict(argstr="%s", mandatory=True, position=-8,), + args=dict(argstr="%s",), + bval_scale=dict(argstr="-bvalue_scaling %s",), + csf_odf=dict(argstr="%s", extensions=None, position=-1,), + csf_txt=dict(argstr="%s", extensions=None, position=-2,), + environ=dict(nohash=True, usedefault=True,), + gm_odf=dict(argstr="%s", extensions=None, position=-3,), + gm_txt=dict(argstr="%s", extensions=None, position=-4,), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), + in_bval=dict(extensions=None,), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), + in_dirs=dict(argstr="-directions %s", extensions=None,), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-7,), + mask_file=dict(argstr="-mask %s", extensions=None,), + max_sh=dict(argstr="-lmax %s", sep=",",), + nthreads=dict(argstr="-nthreads %d", nohash=True,), + shell=dict(argstr="-shell %s", sep=",",), + wm_odf=dict( + argstr="%s", extensions=None, mandatory=True, position=-5, usedefault=True, + ), + wm_txt=dict(argstr="%s", extensions=None, mandatory=True, position=-6,), + ) + inputs = ConstrainedSphericalDeconvolution.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_ConstrainedSphericalDeconvolution_outputs(): + output_map = dict( + csf_odf=dict(argstr="%s", extensions=None,), + gm_odf=dict(argstr="%s", extensions=None,), + wm_odf=dict(argstr="%s", extensions=None,), + ) + outputs = ConstrainedSphericalDeconvolution.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/tools/checkspecs.py b/tools/checkspecs.py index a4707bd375..e06f862338 100644 --- a/tools/checkspecs.py +++ b/tools/checkspecs.py @@ -324,7 +324,7 @@ def test_specs(self, uri): and "xor" not in trait.__dict__ ): if ( - trait.trait_type.__class__.__name__ is "Range" + trait.trait_type.__class__.__name__ == "Range" and trait.default == trait.trait_type._low ): continue From 523dad9d95666cdcbdfa901df0c7e100aaa19f85 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 23 Feb 2020 12:44:56 -0500 Subject: [PATCH 0743/1665] MNT: Disable telemetry during doc build (closes gh-3175) --- doc/conf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/conf.py b/doc/conf.py index c5898d1cfd..eb34e3f8d8 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -20,6 +20,8 @@ import nipype import subprocess as sp +# Disable etelemetry during doc builds +os.environ["NIPYPE_NO_ET"] = "1" conf_py = Path(__file__) From 7f68dda982be0b5f4e805a5f10db1b24ff47ca21 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 23 Feb 2020 19:48:07 -0500 Subject: [PATCH 0744/1665] ENH: Import ConstrainedSphericalDeconvolution into interfaces.mrtrix3 --- nipype/interfaces/mrtrix3/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/__init__.py b/nipype/interfaces/mrtrix3/__init__.py index 2970918844..f60e837310 100644 --- a/nipype/interfaces/mrtrix3/__init__.py +++ b/nipype/interfaces/mrtrix3/__init__.py @@ -23,5 +23,5 @@ DWIBiasCorrect, ) from .tracking import Tractography -from .reconst import FitTensor, EstimateFOD +from .reconst import FitTensor, EstimateFOD, ConstrainedSphericalDeconvolution from .connectivity import LabelConfig, LabelConvert, BuildConnectome From 312ae3d5395ee9f550a70076a9979bb65e4e154a Mon Sep 17 00:00:00 2001 From: mathiasg Date: Sun, 23 Feb 2020 22:49:49 -0500 Subject: [PATCH 0745/1665] ENH: add testcase --- nipype/interfaces/freesurfer/tests/test_utils.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/nipype/interfaces/freesurfer/tests/test_utils.py b/nipype/interfaces/freesurfer/tests/test_utils.py index 52348bb5e0..996453d654 100644 --- a/nipype/interfaces/freesurfer/tests/test_utils.py +++ b/nipype/interfaces/freesurfer/tests/test_utils.py @@ -230,3 +230,19 @@ def test_mrisexpand(tmpdir): assert op.dirname(if_out_file) == op.dirname(fsavginfo["smoothwm"]) # Node places output in working directory assert op.dirname(nd_out_file) == nd_res.runtime.cwd + + +@pytest.mark.skipif(fs.no_freesurfer(), reason="freesurfer is not installed") +def test_eulernumber(tmpdir): + # grab a surface from fsaverage + fssrc = FreeSurferSource( + subjects_dir=fs.Info.subjectsdir(), subject_id="fsaverage", hemi="lh" + ) + pial = fssrc.run().outputs.pial + assert isinstance(pial, str), "Problem when fetching surface file" + + eu = fs.EulerNumber() + eu.inputs.in_file = pial + res = eu.run() + assert res.outputs.defects == 0 + assert res.outputs.euler == 2 From a67da289c1849a59eab77689cf6de189c96dd164 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 24 Feb 2020 09:07:41 -0500 Subject: [PATCH 0746/1665] RF: Prefer math.gcd to hand-rolled Euclid's algorithm --- nipype/algorithms/modelgen.py | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index 9d0f5d0de5..8b760d226b 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -30,25 +30,6 @@ iflogger = logging.getLogger("nipype.interface") -def gcd(a, b): - """ - Return the greatest common divisor of two integers (uses Euclid's algorithm). - - Examples - -------- - >>> gcd(4, 5) - 1 - >>> gcd(4, 8) - 4 - >>> gcd(22, 55) - 11 - - """ - while b > 0: - a, b = b, a % b - return a - - def spm_hrf(RT, P=None, fMRI_T=16): """ python implementation of spm_hrf @@ -813,10 +794,10 @@ def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): if len(durations) == 1: durations = durations * np.ones((len(i_onsets))) onsets = np.round(np.array(i_onsets) * 1000) - dttemp = gcd(TA, gcd(SILENCE, TR)) + dttemp = math.gcd(TA, math.gcd(SILENCE, TR)) if dt < dttemp: if dttemp % dt != 0: - dt = float(gcd(dttemp, dt)) + dt = float(math.gcd(dttemp, dt)) if dt < 1: raise Exception("Time multiple less than 1 ms") From 7a19ee09f21ae233ab71e27c8672cd13edea6ead Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 24 Feb 2020 14:42:20 -0500 Subject: [PATCH 0747/1665] FIX: Explicitly coerce millisecond slices to int --- nipype/algorithms/modelgen.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index 8b760d226b..f3a4bbd2fb 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -780,9 +780,9 @@ def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): matplotlib.use(config.get("execution", "matplotlib_backend")) import matplotlib.pyplot as plt - TR = np.round(self.inputs.time_repetition * 1000) # in ms + TR = int(np.round(self.inputs.time_repetition * 1000)) # in ms if self.inputs.time_acquisition: - TA = np.round(self.inputs.time_acquisition * 1000) # in ms + TA = int(np.round(self.inputs.time_acquisition * 1000)) # in ms else: TA = TR # in ms nvol = self.inputs.volumes_in_cluster @@ -797,7 +797,7 @@ def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): dttemp = math.gcd(TA, math.gcd(SILENCE, TR)) if dt < dttemp: if dttemp % dt != 0: - dt = float(math.gcd(dttemp, dt)) + dt = float(math.gcd(dttemp, int(dt))) if dt < 1: raise Exception("Time multiple less than 1 ms") From 909301e982afcf8146406515b5de6dff966f4b41 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 27 Feb 2020 12:16:30 -0800 Subject: [PATCH 0748/1665] fix #3178: improve the parsing of ``Atropos`` --- nipype/interfaces/ants/segmentation.py | 169 ++++++++++++++++--------- 1 file changed, 111 insertions(+), 58 deletions(-) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index faba90dc82..8f6064b4a0 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -31,7 +31,10 @@ class AtroposInputSpec(ANTSCommandInputSpec): requires=["number_of_tissue_classes"], mandatory=True, ) - prior_probability_images = InputMultiPath(File(exists=True)) + kmeans_init_centers = traits.List(traits.Either(traits.Int, traits.Float), minlen=1) + prior_image = traits.Either( + File(exists=True), traits.Str, + desc="either a string pattern (e.g., 'prior%%02d.nii') or an existing vector-image file.") number_of_tissue_classes = traits.Int(mandatory=True) prior_weighting = traits.Float() prior_probability_threshold = traits.Float(requires=["prior_weighting"]) @@ -65,7 +68,10 @@ class AtroposOutputSpec(TraitedSpec): class Atropos(ANTSCommand): - """A finite mixture modeling (FMM) segmentation approach with possibilities for + """ + A multivariate n-class segmentation algorithm. + + A finite mixture modeling (FMM) segmentation approach with possibilities for specifying prior constraints. These prior constraints include the specification of a prior label image, prior probability images (one for each class), and/or an MRF prior to enforce spatial smoothing of the labels. Similar algorithms include @@ -73,32 +79,69 @@ class Atropos(ANTSCommand): Examples -------- - >>> from nipype.interfaces.ants import Atropos - >>> at = Atropos() - >>> at.inputs.dimension = 3 - >>> at.inputs.intensity_images = 'structural.nii' - >>> at.inputs.mask_image = 'mask.nii' + >>> at = Atropos( + ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', + ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True, + ... mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True, + ... maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001, + ... posterior_formulation='Socrates', use_mixture_model_proportions=True) + >>> at.inputs.initialization = 'Random' + >>> at.cmdline + 'Atropos --image-dimensionality 3 --icm [1,1] \ +--initialization Random[2] --intensity-image structural.nii \ +--likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] \ +--output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] \ +--use-random-seed 1' + + >>> at = Atropos( + ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', + ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True, + ... mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True, + ... maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001, + ... posterior_formulation='Socrates', use_mixture_model_proportions=True) + >>> at.inputs.initialization = 'KMeans' + >>> at.inputs.kmeans_init_centers = [100, 200] + >>> at.cmdline + 'Atropos --image-dimensionality 3 --icm [1,1] \ +--initialization KMeans[2,100,200] --intensity-image structural.nii \ +--likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] \ +--output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] \ +--use-random-seed 1' + + >>> at = Atropos( + ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', + ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True, + ... mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True, + ... maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001, + ... posterior_formulation='Socrates', use_mixture_model_proportions=True) >>> at.inputs.initialization = 'PriorProbabilityImages' - >>> at.inputs.prior_probability_images = ['rc1s1.nii', 'rc1s2.nii'] - >>> at.inputs.number_of_tissue_classes = 2 + >>> at.inputs.prior_image = 'BrainSegmentationPrior%02d.nii.gz' >>> at.inputs.prior_weighting = 0.8 >>> at.inputs.prior_probability_threshold = 0.0000001 - >>> at.inputs.likelihood_model = 'Gaussian' - >>> at.inputs.mrf_smoothing_factor = 0.2 - >>> at.inputs.mrf_radius = [1, 1, 1] - >>> at.inputs.icm_use_synchronous_update = True - >>> at.inputs.maximum_number_of_icm_terations = 1 - >>> at.inputs.n_iterations = 5 - >>> at.inputs.convergence_threshold = 0.000001 - >>> at.inputs.posterior_formulation = 'Socrates' - >>> at.inputs.use_mixture_model_proportions = True - >>> at.inputs.save_posteriors = True >>> at.cmdline 'Atropos --image-dimensionality 3 --icm [1,1] \ ---initialization PriorProbabilityImages[2,priors/priorProbImages%02d.nii,0.8,1e-07] --intensity-image structural.nii \ +--initialization PriorProbabilityImages[2,BrainSegmentationPrior%02d.nii.gz,0.8,1e-07] \ +--intensity-image structural.nii --likelihood-model Gaussian --mask-image mask.nii \ +--mrf [0.2,1x1x1] --convergence [5,1e-06] --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] \ +--posterior-formulation Socrates[1] --use-random-seed 1' + + >>> at = Atropos( + ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', + ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True, + ... mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True, + ... maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001, + ... posterior_formulation='Socrates', use_mixture_model_proportions=True) + >>> at.inputs.initialization = 'PriorLabelImage' + >>> at.inputs.prior_image = 'segmentation0.nii.gz' + >>> at.inputs.number_of_tissue_classes = 2 + >>> at.inputs.prior_weighting = 0.8 + >>> at.cmdline + 'Atropos --image-dimensionality 3 --icm [1,1] \ +--initialization PriorLabelImage[2,segmentation0.nii.gz,0.8] --intensity-image structural.nii \ --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] \ ---output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] --use-random-seed 1' +--output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] \ +--use-random-seed 1' """ @@ -108,20 +151,53 @@ class Atropos(ANTSCommand): def _format_arg(self, opt, spec, val): if opt == "initialization": - retval = "--initialization %s[%d" % ( - val, - self.inputs.number_of_tissue_classes, - ) - if val == "PriorProbabilityImages": - _, _, ext = split_filename(self.inputs.prior_probability_images[0]) - retval += ( - ",priors/priorProbImages%02d" - + ext - + ",%g" % self.inputs.prior_weighting - ) - if isdefined(self.inputs.prior_probability_threshold): - retval += ",%g" % self.inputs.prior_probability_threshold - return retval + "]" + n_classes = self.inputs.number_of_tissue_classes + brackets = ['%d' % n_classes] + if val == 'KMeans' and isdefined(self.inputs.kmeans_init_centers): + centers = sorted(set(self.inputs.kmeans_init_centers)) + if len(centers) != n_classes: + raise ValueError( + "KMeans initialization with initial cluster centers requires " + "the number of centers to match number_of_tissue_classes" + ) + brackets += ["%g" % c for c in centers] + + if val in ("PriorProbabilityImages", "PriorLabelImage"): + if ( + not isdefined(self.inputs.prior_image) + or not isdefined(self.inputs.prior_weighting) + ): + raise ValueError( + "'%s' initialization requires setting " + "prior_image and prior_weighting" % val + ) + + priors_paths = [self.inputs.prior_image] + if "%02d" in priors_paths[0]: + if val == "PriorLabelImage": + raise ValueError( + "'PriorLabelImage' initialization does not " + "accept patterns for prior_image." + ) + priors_paths = [ + priors_paths[0] % i + for i in range(1, n_classes + 1) + ] + + if not all([os.path.exists(p) for p in priors_paths]): + raise FileNotFoundError( + "One or more prior images do not exist: " + "%s." % ', '.join(priors_paths) + ) + brackets += [self.inputs.prior_image, + "%g" % self.inputs.prior_weighting] + + if ( + val == "PriorProbabilityImages" + and isdefined(self.inputs.prior_probability_threshold) + ): + brackets.append("%g" % self.inputs.prior_probability_threshold) + return "--initialization %s[%s]" % (val, ','.join(brackets)) if opt == "mrf_smoothing_factor": retval = "--mrf [%g" % val if isdefined(self.inputs.mrf_radius): @@ -151,29 +227,6 @@ def _format_arg(self, opt, spec, val): return retval + "]" return super(Atropos, self)._format_arg(opt, spec, val) - def _run_interface(self, runtime, correct_return_codes=[0]): - if self.inputs.initialization == "PriorProbabilityImages": - priors_directory = os.path.join(os.getcwd(), "priors") - if not os.path.exists(priors_directory): - os.makedirs(priors_directory) - _, _, ext = split_filename(self.inputs.prior_probability_images[0]) - for i, f in enumerate(self.inputs.prior_probability_images): - target = os.path.join( - priors_directory, "priorProbImages%02d" % (i + 1) + ext - ) - if not ( - os.path.exists(target) - and os.path.realpath(target) == os.path.abspath(f) - ): - copyfile( - os.path.abspath(f), - os.path.join( - priors_directory, "priorProbImages%02d" % (i + 1) + ext - ), - ) - runtime = super(Atropos, self)._run_interface(runtime) - return runtime - def _gen_filename(self, name): if name == "out_classified_image_name": output = self.inputs.out_classified_image_name From 8165242e61d605029b52d2f1a0189e7270148e90 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 27 Feb 2020 15:00:51 -0800 Subject: [PATCH 0749/1665] enh: add a few new utilities to ANTs --- nipype/interfaces/ants/utils.py | 346 +++++++++++++++++++++++++- nipype/utils/imagemanip.py | 18 ++ nipype/utils/tests/test_imagemanip.py | 33 +++ 3 files changed, 390 insertions(+), 7 deletions(-) create mode 100644 nipype/utils/imagemanip.py create mode 100644 nipype/utils/tests/test_imagemanip.py diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index 5497535609..17415b77aa 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -1,11 +1,341 @@ -# -*- coding: utf-8 -*- -"""ANTS Apply Transforms interface -""" - +"""ANTs' utilities.""" import os +from ...utils.imagemanip import copy_header as _copy_header +from ..base import traits, isdefined, TraitedSpec, File, Str, InputMultiPath +from .base import ANTSCommandInputSpec, ANTSCommand + + +class _ImageMathInputSpec(ANTSCommandInputSpec): + dimension = traits.Int(3, usedefault=True, position=1, argstr='%d', + desc='dimension of output image') + output_image = File(position=2, argstr='%s', name_source=['op1'], + name_template='%s_maths', desc='output image file', + keep_extension=True) + operation = traits.Enum( + 'm', 'vm', '+', 'v+', '-', 'v-', '/', '^', 'max', 'exp', 'addtozero', + 'overadd', 'abs', 'total', 'mean', 'vtotal', 'Decision', 'Neg', + 'Project', 'G', 'MD', 'ME', 'MO', 'MC', 'GD', 'GE', 'GO', 'GC', + mandatory=True, position=3, argstr='%s', + desc='mathematical operations') + op1 = File(exists=True, mandatory=True, position=-2, argstr='%s', + desc='first operator') + op2 = traits.Either(File(exists=True), Str, position=-1, + argstr='%s', desc='second operator') + copy_header = traits.Bool( + True, usedefault=True, + desc='copy headers of the original image into the output (corrected) file') + + +class _ImageMathOuputSpec(TraitedSpec): + output_image = File(exists=True, desc='output image file') + + +class ImageMath(ANTSCommand): + """ + Operations over images. + + Example + ------- + >>> ImageMath( + ... op1='structural.nii', + ... operation='+', + ... op2='2').cmdline + 'ImageMath 3 structural_maths.nii + structural.nii 2' + + >>> ImageMath( + ... op1='structural.nii', + ... operation='Project', + ... op2='1 2').cmdline + 'ImageMath 3 structural_maths.nii Project structural.nii 1 2' + + >>> ImageMath( + ... op1='structural.nii', + ... operation='G', + ... op2='4').cmdline + 'ImageMath 3 structural_maths.nii G structural.nii 4' + + """ + + _cmd = 'ImageMath' + input_spec = _ImageMathInputSpec + output_spec = _ImageMathOuputSpec + + def _list_outputs(self): + outputs = super(ImageMath, self)._list_outputs() + if self.inputs.copy_header: # Fix headers + _copy_header(self.inputs.op1, outputs['output_image'], + keep_dtype=True) + return outputs + + +class _ResampleImageBySpacingInputSpec(ANTSCommandInputSpec): + dimension = traits.Int(3, usedefault=True, position=1, argstr='%d', + desc='dimension of output image') + input_image = File(exists=True, mandatory=True, position=2, argstr='%s', + desc='input image file') + output_image = File(position=3, argstr='%s', name_source=['input_image'], + name_template='%s_resampled', desc='output image file', + keep_extension=True) + out_spacing = traits.Either( + traits.List(traits.Float, minlen=2, maxlen=3), + traits.Tuple(traits.Float, traits.Float, traits.Float), + traits.Tuple(traits.Float, traits.Float), + position=4, argstr='%s', mandatory=True, desc='output spacing' + ) + apply_smoothing = traits.Bool(False, argstr='%d', position=5, + desc='smooth before resampling') + addvox = traits.Int(argstr='%d', position=6, requires=['apply_smoothing'], + desc='addvox pads each dimension by addvox') + nn_interp = traits.Bool(argstr='%d', desc='nn interpolation', + position=-1, requires=['addvox']) + + +class _ResampleImageBySpacingOutputSpec(TraitedSpec): + output_image = File(exists=True, desc='resampled file') + + +class ResampleImageBySpacing(ANTSCommand): + """ + Resample an image with a given spacing. + + Examples + -------- + >>> res = ResampleImageBySpacing(dimension=3) + >>> res.inputs.input_image = 'structural.nii' + >>> res.inputs.output_image = 'output.nii.gz' + >>> res.inputs.out_spacing = (4, 4, 4) + >>> res.cmdline #doctest: +ELLIPSIS + 'ResampleImageBySpacing 3 structural.nii output.nii.gz 4 4 4' + + >>> res = ResampleImageBySpacing(dimension=3) + >>> res.inputs.input_image = 'structural.nii' + >>> res.inputs.output_image = 'output.nii.gz' + >>> res.inputs.out_spacing = (4, 4, 4) + >>> res.inputs.apply_smoothing = True + >>> res.cmdline #doctest: +ELLIPSIS + 'ResampleImageBySpacing 3 structural.nii output.nii.gz 4 4 4 1' + + >>> res = ResampleImageBySpacing(dimension=3) + >>> res.inputs.input_image = 'structural.nii' + >>> res.inputs.output_image = 'output.nii.gz' + >>> res.inputs.out_spacing = (0.4, 0.4, 0.4) + >>> res.inputs.apply_smoothing = True + >>> res.inputs.addvox = 2 + >>> res.inputs.nn_interp = False + >>> res.cmdline #doctest: +ELLIPSIS + 'ResampleImageBySpacing 3 structural.nii output.nii.gz 0.4 0.4 0.4 1 2 0' + + """ + + _cmd = 'ResampleImageBySpacing' + input_spec = _ResampleImageBySpacingInputSpec + output_spec = _ResampleImageBySpacingOutputSpec + + def _format_arg(self, name, trait_spec, value): + if name == 'out_spacing': + if len(value) != self.inputs.dimension: + raise ValueError('out_spacing dimensions should match dimension') + + value = ' '.join(['%g' % d for d in value]) + + return super(ResampleImageBySpacing, self)._format_arg( + name, trait_spec, value) + + +class _ThresholdImageInputSpec(ANTSCommandInputSpec): + dimension = traits.Int(3, usedefault=True, position=1, argstr='%d', + desc='dimension of output image') + input_image = File(exists=True, mandatory=True, position=2, argstr='%s', + desc='input image file') + output_image = File(position=3, argstr='%s', name_source=['input_image'], + name_template='%s_resampled', desc='output image file', + keep_extension=True) -from ..base import TraitedSpec, File, traits, InputMultiPath -from .base import ANTSCommand, ANTSCommandInputSpec + mode = traits.Enum('Otsu', 'Kmeans', argstr='%s', position=4, + requires=['num_thresholds'], xor=['th_low', 'th_high'], + desc='whether to run Otsu / Kmeans thresholding') + num_thresholds = traits.Int(position=5, argstr='%d', + desc='number of thresholds') + input_mask = File(exists=True, requires=['num_thresholds'], argstr='%s', + desc='input mask for Otsu, Kmeans') + + th_low = traits.Float(position=4, argstr='%f', xor=['mode'], + desc='lower threshold') + th_high = traits.Float(position=5, argstr='%f', xor=['mode'], + desc='upper threshold') + inside_value = traits.Float(1, position=6, argstr='%f', requires=['th_low'], + desc='inside value') + outside_value = traits.Float(0, position=7, argstr='%f', requires=['th_low'], + desc='outside value') + copy_header = traits.Bool( + True, mandatory=True, usedefault=True, + desc='copy headers of the original image into the output (corrected) file') + + +class _ThresholdImageOutputSpec(TraitedSpec): + output_image = File(exists=True, desc='resampled file') + + +class ThresholdImage(ANTSCommand): + """ + Apply thresholds on images. + + Examples + -------- + >>> thres = ThresholdImage(dimension=3) + >>> thres.inputs.input_image = 'structural.nii' + >>> thres.inputs.output_image = 'output.nii.gz' + >>> thres.inputs.th_low = 0.5 + >>> thres.inputs.th_high = 1.0 + >>> thres.inputs.inside_value = 1.0 + >>> thres.inputs.outside_value = 0.0 + >>> thres.cmdline #doctest: +ELLIPSIS + 'ThresholdImage 3 structural.nii output.nii.gz 0.500000 1.000000 1.000000 0.000000' + + >>> thres = ThresholdImage(dimension=3) + >>> thres.inputs.input_image = 'structural.nii' + >>> thres.inputs.output_image = 'output.nii.gz' + >>> thres.inputs.mode = 'Kmeans' + >>> thres.inputs.num_thresholds = 4 + >>> thres.cmdline #doctest: +ELLIPSIS + 'ThresholdImage 3 structural.nii output.nii.gz Kmeans 4' + + """ + + _cmd = 'ThresholdImage' + input_spec = _ThresholdImageInputSpec + output_spec = _ThresholdImageOutputSpec + + def _list_outputs(self): + outputs = super(ThresholdImage, self)._list_outputs() + if self.inputs.copy_header: # Fix headers + _copy_header(self.inputs.input_image, outputs['output_image'], + keep_dtype=True) + return outputs + + +class _AIInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum(3, 2, usedefault=True, argstr='-d %d', + desc='dimension of output image') + verbose = traits.Bool(False, usedefault=True, argstr='-v %d', + desc='enable verbosity') + + fixed_image = File( + exists=True, mandatory=True, + desc='Image to which the moving_image should be transformed') + moving_image = File( + exists=True, mandatory=True, + desc='Image that will be transformed to fixed_image') + + fixed_image_mask = File( + exists=True, argstr='-x %s', desc='fixed mage mask') + moving_image_mask = File( + exists=True, requires=['fixed_image_mask'], + desc='moving mage mask') + + metric_trait = ( + traits.Enum("Mattes", "GC", "MI"), + traits.Int(32), + traits.Enum('Regular', 'Random', 'None'), + traits.Range(value=0.2, low=0.0, high=1.0) + ) + metric = traits.Tuple(*metric_trait, argstr='-m %s', mandatory=True, + desc='the metric(s) to use.') + + transform = traits.Tuple( + traits.Enum('Affine', 'Rigid', 'Similarity'), + traits.Range(value=0.1, low=0.0, exclude_low=True), + argstr='-t %s[%g]', usedefault=True, + desc='Several transform options are available') + + principal_axes = traits.Bool(False, usedefault=True, argstr='-p %d', xor=['blobs'], + desc='align using principal axes') + search_factor = traits.Tuple( + traits.Float(20), traits.Range(value=0.12, low=0.0, high=1.0), + usedefault=True, argstr='-s [%g,%g]', desc='search factor') + + search_grid = traits.Either( + traits.Tuple(traits.Float, traits.Tuple(traits.Float, traits.Float, traits.Float)), + traits.Tuple(traits.Float, traits.Tuple(traits.Float, traits.Float)), + argstr='-g %s', desc='Translation search grid in mm') + + convergence = traits.Tuple( + traits.Range(low=1, high=10000, value=10), + traits.Float(1e-6), + traits.Range(low=1, high=100, value=10), + usedefault=True, argstr='-c [%d,%g,%d]', desc='convergence') + + output_transform = File( + 'initialization.mat', usedefault=True, argstr='-o %s', + desc='output file name') + + +class _AIOuputSpec(TraitedSpec): + output_transform = File(exists=True, desc='output file name') + + +class AI(ANTSCommand): + """ + Calculate the optimal linear transform parameters for aligning two images. + + Examples + -------- + >>> AI( + ... fixed_image='structural.nii', + ... moving_image='epi.nii', + ... metric=('Mattes', 32, 'Regular', 1), + ... ).cmdline + 'antsAI -c [10,1e-06,10] -d 3 -m Mattes[structural.nii,epi.nii,32,Regular,1] + -o initialization.mat -p 0 -s [20,0.12] -t Affine[0.1] -v 0' + + >>> AI( + ... fixed_image='structural.nii', + ... moving_image='epi.nii', + ... metric=('Mattes', 32, 'Regular', 1), + ... search_grid=(12, (1, 1, 1)), + ... ).cmdline + 'antsAI -c [10,1e-06,10] -d 3 -m Mattes[structural.nii,epi.nii,32,Regular,1] + -o initialization.mat -p 0 -s [20,0.12] -g [12.0,1x1x1] -t Affine[0.1] -v 0' + + """ + + _cmd = 'antsAI' + input_spec = _AIInputSpec + output_spec = _AIOuputSpec + + def _run_interface(self, runtime, correct_return_codes=(0, )): + runtime = super(AI, self)._run_interface( + runtime, correct_return_codes) + + setattr(self, '_output', { + 'output_transform': os.path.join( + runtime.cwd, + os.path.basename(self.inputs.output_transform)) + }) + return runtime + + def _format_arg(self, opt, spec, val): + if opt == 'metric': + val = '%s[{fixed_image},{moving_image},%d,%s,%g]' % val + val = val.format( + fixed_image=self.inputs.fixed_image, + moving_image=self.inputs.moving_image) + return spec.argstr % val + + if opt == 'search_grid': + val1 = 'x'.join(['%g' % v for v in val[1]]) + fmtval = '[%s]' % ','.join([str(val[0]), val1]) + return spec.argstr % fmtval + + if opt == 'fixed_image_mask': + if isdefined(self.inputs.moving_image_mask): + return spec.argstr % ('[%s,%s]' % ( + val, self.inputs.moving_image_mask)) + + return super(AI, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + return getattr(self, '_output') class AverageAffineTransformInputSpec(ANTSCommandInputSpec): @@ -42,6 +372,7 @@ class AverageAffineTransform(ANTSCommand): >>> avg.inputs.output_affine_transform = 'MYtemplatewarp.mat' >>> avg.cmdline 'AverageAffineTransform 3 MYtemplatewarp.mat trans.mat func_to_struct.mat' + """ _cmd = "AverageAffineTransform" @@ -343,7 +674,8 @@ class ComposeMultiTransform(ANTSCommand): >>> compose_transform.inputs.dimension = 3 >>> compose_transform.inputs.transforms = ['struct_to_template.mat', 'func_to_struct.mat'] >>> compose_transform.cmdline - 'ComposeMultiTransform 3 struct_to_template_composed.mat struct_to_template.mat func_to_struct.mat' + 'ComposeMultiTransform 3 struct_to_template_composed.mat + struct_to_template.mat func_to_struct.mat' """ diff --git a/nipype/utils/imagemanip.py b/nipype/utils/imagemanip.py new file mode 100644 index 0000000000..4fe022973b --- /dev/null +++ b/nipype/utils/imagemanip.py @@ -0,0 +1,18 @@ +"""Image manipulation utilities (mostly, NiBabel manipulations).""" +import nibabel as nb + + +def copy_header(header_file, in_file, keep_dtype=True): + """Copy header from a reference image onto another image.""" + hdr_img = nb.load(header_file) + out_img = nb.load(in_file, mmap=False) + hdr = hdr_img.header.copy() + if keep_dtype: + hdr.set_data_dtype(out_img.get_data_dtype()) + + new_img = out_img.__class__(out_img.dataobj, None, hdr) + if not keep_dtype: + new_img.set_data_dtype(hdr_img.get_data_dtype()) + + new_img.to_filename(in_file) + return in_file diff --git a/nipype/utils/tests/test_imagemanip.py b/nipype/utils/tests/test_imagemanip.py new file mode 100644 index 0000000000..922d7681f1 --- /dev/null +++ b/nipype/utils/tests/test_imagemanip.py @@ -0,0 +1,33 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +import numpy as np +import nibabel as nb +import pytest +from ..imagemanip import copy_header + + +@pytest.mark.parametrize('keep_dtype', (True, False)) +def test_copy_header(tmp_path, keep_dtype): + """Cover copy_header.""" + fname1 = tmp_path / 'reference.nii.gz' + fname2 = tmp_path / 'target.nii.gz' + + nii = nb.Nifti1Image( + np.zeros((10, 10, 10), dtype='uint8'), None, None) + nii.set_qform(np.diag((1., 2., 3., 1.)), code=2) + nii.set_sform(np.diag((1., 2., 3., 1.)), code=1) + nii.to_filename(fname1) + + nii.set_data_dtype('float32') + nii.set_qform(np.eye(4), code=1) + nii.to_filename(fname2) + + copied = nb.load( + copy_header(fname1, fname2, keep_dtype=keep_dtype) + ) + ref = nb.load(fname1) + assert np.all(copied.get_qform(coded=False) == ref.get_qform(coded=False)) + assert np.all(copied.get_sform(coded=False) == ref.get_sform(coded=False)) + assert copied.get_qform(coded=True)[1] == ref.get_qform(coded=True)[1] + assert copied.get_sform(coded=True)[1] == ref.get_sform(coded=True)[1] + assert (copied.header.get_data_dtype() == ref.header.get_data_dtype()) != keep_dtype From 167f6c964ab0c1832bc1c80ae947d33779548cac Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 27 Feb 2020 15:01:24 -0800 Subject: [PATCH 0750/1665] enh: update ants' interfaces API --- nipype/interfaces/ants/__init__.py | 59 ++++++++++++++++++++++++------ 1 file changed, 48 insertions(+), 11 deletions(-) diff --git a/nipype/interfaces/ants/__init__.py b/nipype/interfaces/ants/__init__.py index 389a5f1371..0dd4c66adf 100644 --- a/nipype/interfaces/ants/__init__.py +++ b/nipype/interfaces/ants/__init__.py @@ -3,13 +3,13 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Top-level namespace for ants.""" -# Registraiton programs +# RegistratIon programs from .registration import ( ANTS, - Registration, - RegistrationSynQuick, CompositeTransformUtil, MeasureImageSimilarity, + Registration, + RegistrationSynQuick, ) # Resampling Programs @@ -22,14 +22,14 @@ # Segmentation Programs from .segmentation import ( + AntsJointFusion, Atropos, - LaplacianThickness, - N4BiasFieldCorrection, - JointFusion, - CorticalThickness, BrainExtraction, + CorticalThickness, DenoiseImage, - AntsJointFusion, + JointFusion, + LaplacianThickness, + N4BiasFieldCorrection, ) # Visualization Programs @@ -37,11 +37,48 @@ # Utility Programs from .utils import ( + AffineInitializer, + AI, AverageAffineTransform, AverageImages, - MultiplyImages, - CreateJacobianDeterminantImage, - AffineInitializer, ComposeMultiTransform, + CreateJacobianDeterminantImage, + ImageMath, LabelGeometry, + MultiplyImages, + ResampleImageBySpacing, + ThresholdImage, ) + +__all__ = [ + "AffineInitializer", + "AI", + "ANTS", + "AntsJointFusion", + "ApplyTransforms", + "ApplyTransformsToPoints", + "Atropos", + "AverageAffineTransform", + "AverageImages", + "BrainExtraction", + "ComposeMultiTransform", + "CompositeTransformUtil", + "ConvertScalarImageToRGB", + "CorticalThickness", + "CreateJacobianDeterminantImage", + "CreateTiledMosaic", + "DenoiseImage", + "ImageMath", + "JointFusion", + "LabelGeometry", + "LaplacianThickness", + "MeasureImageSimilarity", + "MultiplyImages", + "N4BiasFieldCorrection", + "Registration", + "RegistrationSynQuick", + "ResampleImageBySpacing", + "ThresholdImage", + "WarpImageMultiTransform", + "WarpTimeSeriesImageMultiTransform", +] From 417b8897a116fcded5000e21e2b6ccbe29452a52 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 27 Feb 2020 15:08:42 -0800 Subject: [PATCH 0751/1665] sty: black --- nipype/interfaces/ants/utils.py | 341 +++++++++++++++++++++----------- 1 file changed, 221 insertions(+), 120 deletions(-) diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index 17415b77aa..28f4cbfd97 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -6,28 +6,66 @@ class _ImageMathInputSpec(ANTSCommandInputSpec): - dimension = traits.Int(3, usedefault=True, position=1, argstr='%d', - desc='dimension of output image') - output_image = File(position=2, argstr='%s', name_source=['op1'], - name_template='%s_maths', desc='output image file', - keep_extension=True) + dimension = traits.Int( + 3, usedefault=True, position=1, argstr="%d", desc="dimension of output image" + ) + output_image = File( + position=2, + argstr="%s", + name_source=["op1"], + name_template="%s_maths", + desc="output image file", + keep_extension=True, + ) operation = traits.Enum( - 'm', 'vm', '+', 'v+', '-', 'v-', '/', '^', 'max', 'exp', 'addtozero', - 'overadd', 'abs', 'total', 'mean', 'vtotal', 'Decision', 'Neg', - 'Project', 'G', 'MD', 'ME', 'MO', 'MC', 'GD', 'GE', 'GO', 'GC', - mandatory=True, position=3, argstr='%s', - desc='mathematical operations') - op1 = File(exists=True, mandatory=True, position=-2, argstr='%s', - desc='first operator') - op2 = traits.Either(File(exists=True), Str, position=-1, - argstr='%s', desc='second operator') + "m", + "vm", + "+", + "v+", + "-", + "v-", + "/", + "^", + "max", + "exp", + "addtozero", + "overadd", + "abs", + "total", + "mean", + "vtotal", + "Decision", + "Neg", + "Project", + "G", + "MD", + "ME", + "MO", + "MC", + "GD", + "GE", + "GO", + "GC", + mandatory=True, + position=3, + argstr="%s", + desc="mathematical operations", + ) + op1 = File( + exists=True, mandatory=True, position=-2, argstr="%s", desc="first operator" + ) + op2 = traits.Either( + File(exists=True), Str, position=-1, argstr="%s", desc="second operator" + ) copy_header = traits.Bool( - True, usedefault=True, - desc='copy headers of the original image into the output (corrected) file') + True, + usedefault=True, + desc="copy headers of the original image into the output (corrected) file", + ) class _ImageMathOuputSpec(TraitedSpec): - output_image = File(exists=True, desc='output image file') + output_image = File(exists=True, desc="output image file") class ImageMath(ANTSCommand): @@ -56,42 +94,57 @@ class ImageMath(ANTSCommand): """ - _cmd = 'ImageMath' + _cmd = "ImageMath" input_spec = _ImageMathInputSpec output_spec = _ImageMathOuputSpec def _list_outputs(self): outputs = super(ImageMath, self)._list_outputs() if self.inputs.copy_header: # Fix headers - _copy_header(self.inputs.op1, outputs['output_image'], - keep_dtype=True) + _copy_header(self.inputs.op1, outputs["output_image"], keep_dtype=True) return outputs class _ResampleImageBySpacingInputSpec(ANTSCommandInputSpec): - dimension = traits.Int(3, usedefault=True, position=1, argstr='%d', - desc='dimension of output image') - input_image = File(exists=True, mandatory=True, position=2, argstr='%s', - desc='input image file') - output_image = File(position=3, argstr='%s', name_source=['input_image'], - name_template='%s_resampled', desc='output image file', - keep_extension=True) + dimension = traits.Int( + 3, usedefault=True, position=1, argstr="%d", desc="dimension of output image" + ) + input_image = File( + exists=True, mandatory=True, position=2, argstr="%s", desc="input image file" + ) + output_image = File( + position=3, + argstr="%s", + name_source=["input_image"], + name_template="%s_resampled", + desc="output image file", + keep_extension=True, + ) out_spacing = traits.Either( traits.List(traits.Float, minlen=2, maxlen=3), traits.Tuple(traits.Float, traits.Float, traits.Float), traits.Tuple(traits.Float, traits.Float), - position=4, argstr='%s', mandatory=True, desc='output spacing' + position=4, + argstr="%s", + mandatory=True, + desc="output spacing", + ) + apply_smoothing = traits.Bool( + False, argstr="%d", position=5, desc="smooth before resampling" + ) + addvox = traits.Int( + argstr="%d", + position=6, + requires=["apply_smoothing"], + desc="addvox pads each dimension by addvox", + ) + nn_interp = traits.Bool( + argstr="%d", desc="nn interpolation", position=-1, requires=["addvox"] ) - apply_smoothing = traits.Bool(False, argstr='%d', position=5, - desc='smooth before resampling') - addvox = traits.Int(argstr='%d', position=6, requires=['apply_smoothing'], - desc='addvox pads each dimension by addvox') - nn_interp = traits.Bool(argstr='%d', desc='nn interpolation', - position=-1, requires=['addvox']) class _ResampleImageBySpacingOutputSpec(TraitedSpec): - output_image = File(exists=True, desc='resampled file') + output_image = File(exists=True, desc="resampled file") class ResampleImageBySpacing(ANTSCommand): @@ -127,53 +180,73 @@ class ResampleImageBySpacing(ANTSCommand): """ - _cmd = 'ResampleImageBySpacing' + _cmd = "ResampleImageBySpacing" input_spec = _ResampleImageBySpacingInputSpec output_spec = _ResampleImageBySpacingOutputSpec def _format_arg(self, name, trait_spec, value): - if name == 'out_spacing': + if name == "out_spacing": if len(value) != self.inputs.dimension: - raise ValueError('out_spacing dimensions should match dimension') + raise ValueError("out_spacing dimensions should match dimension") - value = ' '.join(['%g' % d for d in value]) + value = " ".join(["%g" % d for d in value]) - return super(ResampleImageBySpacing, self)._format_arg( - name, trait_spec, value) + return super(ResampleImageBySpacing, self)._format_arg(name, trait_spec, value) class _ThresholdImageInputSpec(ANTSCommandInputSpec): - dimension = traits.Int(3, usedefault=True, position=1, argstr='%d', - desc='dimension of output image') - input_image = File(exists=True, mandatory=True, position=2, argstr='%s', - desc='input image file') - output_image = File(position=3, argstr='%s', name_source=['input_image'], - name_template='%s_resampled', desc='output image file', - keep_extension=True) - - mode = traits.Enum('Otsu', 'Kmeans', argstr='%s', position=4, - requires=['num_thresholds'], xor=['th_low', 'th_high'], - desc='whether to run Otsu / Kmeans thresholding') - num_thresholds = traits.Int(position=5, argstr='%d', - desc='number of thresholds') - input_mask = File(exists=True, requires=['num_thresholds'], argstr='%s', - desc='input mask for Otsu, Kmeans') - - th_low = traits.Float(position=4, argstr='%f', xor=['mode'], - desc='lower threshold') - th_high = traits.Float(position=5, argstr='%f', xor=['mode'], - desc='upper threshold') - inside_value = traits.Float(1, position=6, argstr='%f', requires=['th_low'], - desc='inside value') - outside_value = traits.Float(0, position=7, argstr='%f', requires=['th_low'], - desc='outside value') + dimension = traits.Int( + 3, usedefault=True, position=1, argstr="%d", desc="dimension of output image" + ) + input_image = File( + exists=True, mandatory=True, position=2, argstr="%s", desc="input image file" + ) + output_image = File( + position=3, + argstr="%s", + name_source=["input_image"], + name_template="%s_resampled", + desc="output image file", + keep_extension=True, + ) + + mode = traits.Enum( + "Otsu", + "Kmeans", + argstr="%s", + position=4, + requires=["num_thresholds"], + xor=["th_low", "th_high"], + desc="whether to run Otsu / Kmeans thresholding", + ) + num_thresholds = traits.Int(position=5, argstr="%d", desc="number of thresholds") + input_mask = File( + exists=True, + requires=["num_thresholds"], + argstr="%s", + desc="input mask for Otsu, Kmeans", + ) + + th_low = traits.Float(position=4, argstr="%f", xor=["mode"], desc="lower threshold") + th_high = traits.Float( + position=5, argstr="%f", xor=["mode"], desc="upper threshold" + ) + inside_value = traits.Float( + 1, position=6, argstr="%f", requires=["th_low"], desc="inside value" + ) + outside_value = traits.Float( + 0, position=7, argstr="%f", requires=["th_low"], desc="outside value" + ) copy_header = traits.Bool( - True, mandatory=True, usedefault=True, - desc='copy headers of the original image into the output (corrected) file') + True, + mandatory=True, + usedefault=True, + desc="copy headers of the original image into the output (corrected) file", + ) class _ThresholdImageOutputSpec(TraitedSpec): - output_image = File(exists=True, desc='resampled file') + output_image = File(exists=True, desc="resampled file") class ThresholdImage(ANTSCommand): @@ -202,76 +275,101 @@ class ThresholdImage(ANTSCommand): """ - _cmd = 'ThresholdImage' + _cmd = "ThresholdImage" input_spec = _ThresholdImageInputSpec output_spec = _ThresholdImageOutputSpec def _list_outputs(self): outputs = super(ThresholdImage, self)._list_outputs() if self.inputs.copy_header: # Fix headers - _copy_header(self.inputs.input_image, outputs['output_image'], - keep_dtype=True) + _copy_header( + self.inputs.input_image, outputs["output_image"], keep_dtype=True + ) return outputs class _AIInputSpec(ANTSCommandInputSpec): - dimension = traits.Enum(3, 2, usedefault=True, argstr='-d %d', - desc='dimension of output image') - verbose = traits.Bool(False, usedefault=True, argstr='-v %d', - desc='enable verbosity') + dimension = traits.Enum( + 3, 2, usedefault=True, argstr="-d %d", desc="dimension of output image" + ) + verbose = traits.Bool( + False, usedefault=True, argstr="-v %d", desc="enable verbosity" + ) fixed_image = File( - exists=True, mandatory=True, - desc='Image to which the moving_image should be transformed') + exists=True, + mandatory=True, + desc="Image to which the moving_image should be transformed", + ) moving_image = File( - exists=True, mandatory=True, - desc='Image that will be transformed to fixed_image') + exists=True, + mandatory=True, + desc="Image that will be transformed to fixed_image", + ) - fixed_image_mask = File( - exists=True, argstr='-x %s', desc='fixed mage mask') + fixed_image_mask = File(exists=True, argstr="-x %s", desc="fixed mage mask") moving_image_mask = File( - exists=True, requires=['fixed_image_mask'], - desc='moving mage mask') + exists=True, requires=["fixed_image_mask"], desc="moving mage mask" + ) metric_trait = ( traits.Enum("Mattes", "GC", "MI"), traits.Int(32), - traits.Enum('Regular', 'Random', 'None'), - traits.Range(value=0.2, low=0.0, high=1.0) + traits.Enum("Regular", "Random", "None"), + traits.Range(value=0.2, low=0.0, high=1.0), + ) + metric = traits.Tuple( + *metric_trait, argstr="-m %s", mandatory=True, desc="the metric(s) to use." ) - metric = traits.Tuple(*metric_trait, argstr='-m %s', mandatory=True, - desc='the metric(s) to use.') transform = traits.Tuple( - traits.Enum('Affine', 'Rigid', 'Similarity'), + traits.Enum("Affine", "Rigid", "Similarity"), traits.Range(value=0.1, low=0.0, exclude_low=True), - argstr='-t %s[%g]', usedefault=True, - desc='Several transform options are available') + argstr="-t %s[%g]", + usedefault=True, + desc="Several transform options are available", + ) - principal_axes = traits.Bool(False, usedefault=True, argstr='-p %d', xor=['blobs'], - desc='align using principal axes') + principal_axes = traits.Bool( + False, + usedefault=True, + argstr="-p %d", + xor=["blobs"], + desc="align using principal axes", + ) search_factor = traits.Tuple( - traits.Float(20), traits.Range(value=0.12, low=0.0, high=1.0), - usedefault=True, argstr='-s [%g,%g]', desc='search factor') + traits.Float(20), + traits.Range(value=0.12, low=0.0, high=1.0), + usedefault=True, + argstr="-s [%g,%g]", + desc="search factor", + ) search_grid = traits.Either( - traits.Tuple(traits.Float, traits.Tuple(traits.Float, traits.Float, traits.Float)), + traits.Tuple( + traits.Float, traits.Tuple(traits.Float, traits.Float, traits.Float) + ), traits.Tuple(traits.Float, traits.Tuple(traits.Float, traits.Float)), - argstr='-g %s', desc='Translation search grid in mm') + argstr="-g %s", + desc="Translation search grid in mm", + ) convergence = traits.Tuple( traits.Range(low=1, high=10000, value=10), traits.Float(1e-6), traits.Range(low=1, high=100, value=10), - usedefault=True, argstr='-c [%d,%g,%d]', desc='convergence') + usedefault=True, + argstr="-c [%d,%g,%d]", + desc="convergence", + ) output_transform = File( - 'initialization.mat', usedefault=True, argstr='-o %s', - desc='output file name') + "initialization.mat", usedefault=True, argstr="-o %s", desc="output file name" + ) class _AIOuputSpec(TraitedSpec): - output_transform = File(exists=True, desc='output file name') + output_transform = File(exists=True, desc="output file name") class AI(ANTSCommand): @@ -299,43 +397,46 @@ class AI(ANTSCommand): """ - _cmd = 'antsAI' + _cmd = "antsAI" input_spec = _AIInputSpec output_spec = _AIOuputSpec - def _run_interface(self, runtime, correct_return_codes=(0, )): - runtime = super(AI, self)._run_interface( - runtime, correct_return_codes) - - setattr(self, '_output', { - 'output_transform': os.path.join( - runtime.cwd, - os.path.basename(self.inputs.output_transform)) - }) + def _run_interface(self, runtime, correct_return_codes=(0,)): + runtime = super(AI, self)._run_interface(runtime, correct_return_codes) + + setattr( + self, + "_output", + { + "output_transform": os.path.join( + runtime.cwd, os.path.basename(self.inputs.output_transform) + ) + }, + ) return runtime def _format_arg(self, opt, spec, val): - if opt == 'metric': - val = '%s[{fixed_image},{moving_image},%d,%s,%g]' % val + if opt == "metric": + val = "%s[{fixed_image},{moving_image},%d,%s,%g]" % val val = val.format( fixed_image=self.inputs.fixed_image, - moving_image=self.inputs.moving_image) + moving_image=self.inputs.moving_image, + ) return spec.argstr % val - if opt == 'search_grid': - val1 = 'x'.join(['%g' % v for v in val[1]]) - fmtval = '[%s]' % ','.join([str(val[0]), val1]) + if opt == "search_grid": + val1 = "x".join(["%g" % v for v in val[1]]) + fmtval = "[%s]" % ",".join([str(val[0]), val1]) return spec.argstr % fmtval - if opt == 'fixed_image_mask': + if opt == "fixed_image_mask": if isdefined(self.inputs.moving_image_mask): - return spec.argstr % ('[%s,%s]' % ( - val, self.inputs.moving_image_mask)) + return spec.argstr % ("[%s,%s]" % (val, self.inputs.moving_image_mask)) return super(AI, self)._format_arg(opt, spec, val) def _list_outputs(self): - return getattr(self, '_output') + return getattr(self, "_output") class AverageAffineTransformInputSpec(ANTSCommandInputSpec): From 1c5045d9acc2a26c8222ad84c65e6e60390df508 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 27 Feb 2020 15:35:05 -0800 Subject: [PATCH 0752/1665] enh,sty: update antsJointFusion + black et al. --- nipype/interfaces/ants/segmentation.py | 454 +++++++----------- .../ants/tests/test_auto_JointFusion.py | 45 -- .../ants/tests/test_spec_JointFusion.py | 92 ---- 3 files changed, 169 insertions(+), 422 deletions(-) delete mode 100644 nipype/interfaces/ants/tests/test_auto_JointFusion.py delete mode 100644 nipype/interfaces/ants/tests/test_spec_JointFusion.py diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index 8f6064b4a0..d2d75d900f 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -1,7 +1,6 @@ -# -*- coding: utf-8 -*- -"""The ants module provides basic functions for interfacing with ants functions. -""" +"""Wrappers for segmentation utilities within ANTs.""" import os +from glob import glob from ...external.due import BibTeX from ...utils.filemanip import split_filename, copyfile, which, fname_presuffix from ..base import TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, isdefined @@ -33,8 +32,10 @@ class AtroposInputSpec(ANTSCommandInputSpec): ) kmeans_init_centers = traits.List(traits.Either(traits.Int, traits.Float), minlen=1) prior_image = traits.Either( - File(exists=True), traits.Str, - desc="either a string pattern (e.g., 'prior%%02d.nii') or an existing vector-image file.") + File(exists=True), + traits.Str, + desc="either a string pattern (e.g., 'prior%%02d.nii') or an existing vector-image file.", + ) number_of_tissue_classes = traits.Int(mandatory=True) prior_weighting = traits.Float() prior_probability_threshold = traits.Float(requires=["prior_weighting"]) @@ -88,11 +89,11 @@ class Atropos(ANTSCommand): ... posterior_formulation='Socrates', use_mixture_model_proportions=True) >>> at.inputs.initialization = 'Random' >>> at.cmdline - 'Atropos --image-dimensionality 3 --icm [1,1] \ ---initialization Random[2] --intensity-image structural.nii \ ---likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] \ ---output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] \ ---use-random-seed 1' + 'Atropos --image-dimensionality 3 --icm [1,1] + --initialization Random[2] --intensity-image structural.nii + --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] + --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] + --use-random-seed 1' >>> at = Atropos( ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', @@ -103,11 +104,11 @@ class Atropos(ANTSCommand): >>> at.inputs.initialization = 'KMeans' >>> at.inputs.kmeans_init_centers = [100, 200] >>> at.cmdline - 'Atropos --image-dimensionality 3 --icm [1,1] \ ---initialization KMeans[2,100,200] --intensity-image structural.nii \ ---likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] \ ---output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] \ ---use-random-seed 1' + 'Atropos --image-dimensionality 3 --icm [1,1] + --initialization KMeans[2,100,200] --intensity-image structural.nii + --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] + --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] + --use-random-seed 1' >>> at = Atropos( ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', @@ -120,11 +121,12 @@ class Atropos(ANTSCommand): >>> at.inputs.prior_weighting = 0.8 >>> at.inputs.prior_probability_threshold = 0.0000001 >>> at.cmdline - 'Atropos --image-dimensionality 3 --icm [1,1] \ ---initialization PriorProbabilityImages[2,BrainSegmentationPrior%02d.nii.gz,0.8,1e-07] \ ---intensity-image structural.nii --likelihood-model Gaussian --mask-image mask.nii \ ---mrf [0.2,1x1x1] --convergence [5,1e-06] --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] \ ---posterior-formulation Socrates[1] --use-random-seed 1' + 'Atropos --image-dimensionality 3 --icm [1,1] + --initialization PriorProbabilityImages[2,BrainSegmentationPrior%02d.nii.gz,0.8,1e-07] + --intensity-image structural.nii --likelihood-model Gaussian --mask-image mask.nii + --mrf [0.2,1x1x1] --convergence [5,1e-06] + --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] + --posterior-formulation Socrates[1] --use-random-seed 1' >>> at = Atropos( ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', @@ -137,11 +139,11 @@ class Atropos(ANTSCommand): >>> at.inputs.number_of_tissue_classes = 2 >>> at.inputs.prior_weighting = 0.8 >>> at.cmdline - 'Atropos --image-dimensionality 3 --icm [1,1] \ ---initialization PriorLabelImage[2,segmentation0.nii.gz,0.8] --intensity-image structural.nii \ ---likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] \ ---output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] \ ---use-random-seed 1' + 'Atropos --image-dimensionality 3 --icm [1,1] + --initialization PriorLabelImage[2,segmentation0.nii.gz,0.8] --intensity-image structural.nii + --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] + --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] + --use-random-seed 1' """ @@ -152,8 +154,8 @@ class Atropos(ANTSCommand): def _format_arg(self, opt, spec, val): if opt == "initialization": n_classes = self.inputs.number_of_tissue_classes - brackets = ['%d' % n_classes] - if val == 'KMeans' and isdefined(self.inputs.kmeans_init_centers): + brackets = ["%d" % n_classes] + if val == "KMeans" and isdefined(self.inputs.kmeans_init_centers): centers = sorted(set(self.inputs.kmeans_init_centers)) if len(centers) != n_classes: raise ValueError( @@ -163,9 +165,8 @@ def _format_arg(self, opt, spec, val): brackets += ["%g" % c for c in centers] if val in ("PriorProbabilityImages", "PriorLabelImage"): - if ( - not isdefined(self.inputs.prior_image) - or not isdefined(self.inputs.prior_weighting) + if not isdefined(self.inputs.prior_image) or not isdefined( + self.inputs.prior_weighting ): raise ValueError( "'%s' initialization requires setting " @@ -180,24 +181,24 @@ def _format_arg(self, opt, spec, val): "accept patterns for prior_image." ) priors_paths = [ - priors_paths[0] % i - for i in range(1, n_classes + 1) + priors_paths[0] % i for i in range(1, n_classes + 1) ] if not all([os.path.exists(p) for p in priors_paths]): raise FileNotFoundError( "One or more prior images do not exist: " - "%s." % ', '.join(priors_paths) + "%s." % ", ".join(priors_paths) ) - brackets += [self.inputs.prior_image, - "%g" % self.inputs.prior_weighting] + brackets += [ + self.inputs.prior_image, + "%g" % self.inputs.prior_weighting, + ] - if ( - val == "PriorProbabilityImages" - and isdefined(self.inputs.prior_probability_threshold) + if val == "PriorProbabilityImages" and isdefined( + self.inputs.prior_probability_threshold ): brackets.append("%g" % self.inputs.prior_probability_threshold) - return "--initialization %s[%s]" % (val, ','.join(brackets)) + return "--initialization %s[%s]" % (val, ",".join(brackets)) if opt == "mrf_smoothing_factor": retval = "--mrf [%g" % val if isdefined(self.inputs.mrf_radius): @@ -448,42 +449,42 @@ class N4BiasFieldCorrection(ANTSCommand): >>> n4.inputs.shrink_factor = 3 >>> n4.inputs.n_iterations = [50,50,30,20] >>> n4.cmdline - 'N4BiasFieldCorrection --bspline-fitting [ 300 ] \ --d 3 --input-image structural.nii \ ---convergence [ 50x50x30x20 ] --output structural_corrected.nii \ ---shrink-factor 3' + 'N4BiasFieldCorrection --bspline-fitting [ 300 ] + -d 3 --input-image structural.nii + --convergence [ 50x50x30x20 ] --output structural_corrected.nii + --shrink-factor 3' >>> n4_2 = copy.deepcopy(n4) >>> n4_2.inputs.convergence_threshold = 1e-6 >>> n4_2.cmdline - 'N4BiasFieldCorrection --bspline-fitting [ 300 ] \ --d 3 --input-image structural.nii \ ---convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \ ---shrink-factor 3' + 'N4BiasFieldCorrection --bspline-fitting [ 300 ] + -d 3 --input-image structural.nii + --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii + --shrink-factor 3' >>> n4_3 = copy.deepcopy(n4_2) >>> n4_3.inputs.bspline_order = 5 >>> n4_3.cmdline - 'N4BiasFieldCorrection --bspline-fitting [ 300, 5 ] \ --d 3 --input-image structural.nii \ ---convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \ ---shrink-factor 3' + 'N4BiasFieldCorrection --bspline-fitting [ 300, 5 ] + -d 3 --input-image structural.nii + --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii + --shrink-factor 3' >>> n4_4 = N4BiasFieldCorrection() >>> n4_4.inputs.input_image = 'structural.nii' >>> n4_4.inputs.save_bias = True >>> n4_4.inputs.dimension = 3 >>> n4_4.cmdline - 'N4BiasFieldCorrection -d 3 --input-image structural.nii \ ---output [ structural_corrected.nii, structural_bias.nii ]' + 'N4BiasFieldCorrection -d 3 --input-image structural.nii + --output [ structural_corrected.nii, structural_bias.nii ]' >>> n4_5 = N4BiasFieldCorrection() >>> n4_5.inputs.input_image = 'structural.nii' >>> n4_5.inputs.dimension = 3 >>> n4_5.inputs.histogram_sharpening = (0.12, 0.02, 200) >>> n4_5.cmdline - 'N4BiasFieldCorrection -d 3 --histogram-sharpening [0.12,0.02,200] \ ---input-image structural.nii --output structural_corrected.nii' + 'N4BiasFieldCorrection -d 3 --histogram-sharpening [0.12,0.02,200] + --input-image structural.nii --output structural_corrected.nii' """ @@ -749,8 +750,9 @@ class CorticalThickness(ANTSCommand): ... 'BrainSegmentationPrior04.nii.gz'] >>> corticalthickness.inputs.t1_registration_template = 'brain_study_template.nii.gz' >>> corticalthickness.cmdline - 'antsCorticalThickness.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz -e study_template.nii.gz -d 3 \ --s nii.gz -o antsCT_ -p nipype_priors/BrainSegmentationPrior%02d.nii.gz -t brain_study_template.nii.gz' + 'antsCorticalThickness.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz + -e study_template.nii.gz -d 3 -s nii.gz -o antsCT_ + -p nipype_priors/BrainSegmentationPrior%02d.nii.gz -t brain_study_template.nii.gz' """ @@ -990,7 +992,8 @@ class BrainExtraction(ANTSCommand): >>> brainextraction.inputs.brain_template = 'study_template.nii.gz' >>> brainextraction.inputs.brain_probability_mask ='ProbabilityMaskOfStudyTemplate.nii.gz' >>> brainextraction.cmdline - 'antsBrainExtraction.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz -e study_template.nii.gz -d 3 -s nii.gz -o highres001_' + 'antsBrainExtraction.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz + -e study_template.nii.gz -d 3 -s nii.gz -o highres001_' """ @@ -1143,155 +1146,6 @@ def _list_outputs(self): return outputs -class JointFusionInputSpec(ANTSCommandInputSpec): - dimension = traits.Enum( - 3, - 2, - 4, - argstr="%d", - position=0, - usedefault=True, - mandatory=True, - desc="image dimension (2, 3, or 4)", - ) - modalities = traits.Int( - argstr="%d", position=1, mandatory=True, desc="Number of modalities or features" - ) - warped_intensity_images = InputMultiPath( - File(exists=True), argstr="-g %s...", mandatory=True, desc="Warped atlas images" - ) - target_image = InputMultiPath( - File(exists=True), argstr="-tg %s...", mandatory=True, desc="Target image(s)" - ) - warped_label_images = InputMultiPath( - File(exists=True), - argstr="-l %s...", - mandatory=True, - desc="Warped atlas segmentations", - ) - method = traits.Str( - "Joint", - argstr="-m %s", - usedefault=True, - desc=( - "Select voting method. Options: Joint (Joint" - "Label Fusion). May be followed by optional" - "parameters in brackets, e.g., -m Joint[0.1,2]" - ), - ) - alpha = traits.Float( - 0.1, - usedefault=True, - requires=["method"], - desc=("Regularization term added to matrix Mx for inverse"), - ) - beta = traits.Int( - 2, - usedefault=True, - requires=["method"], - desc=("Exponent for mapping intensity difference to joint error"), - ) - output_label_image = File( - argstr="%s", - mandatory=True, - position=-1, - name_template="%s", - output_name="output_label_image", - desc="Output fusion label map image", - ) - patch_radius = traits.ListInt( - minlen=3, - maxlen=3, - argstr="-rp %s", - desc=("Patch radius for similarity measures, scalar or vector. Default: 2x2x2"), - ) - search_radius = traits.ListInt( - minlen=3, maxlen=3, argstr="-rs %s", desc="Local search radius. Default: 3x3x3" - ) - exclusion_region = File( - exists=True, - argstr="-x %s", - desc=("Specify an exclusion region for the given label."), - ) - atlas_group_id = traits.ListInt( - argstr="-gp %d...", desc="Assign a group ID for each atlas" - ) - atlas_group_weights = traits.ListInt( - argstr="-gpw %d...", desc=("Assign the voting weights to each atlas group") - ) - - -class JointFusionOutputSpec(TraitedSpec): - output_label_image = File(exists=True) - # TODO: optional outputs - output_posteriors, output_voting_weights - - -class JointFusion(ANTSCommand): - """ - Segmentation fusion tool. - - Examples - -------- - >>> from nipype.interfaces.ants import JointFusion - >>> at = JointFusion() - >>> at.inputs.dimension = 3 - >>> at.inputs.modalities = 1 - >>> at.inputs.method = 'Joint[0.1,2]' - >>> at.inputs.output_label_image ='fusion_labelimage_output.nii' - >>> at.inputs.warped_intensity_images = ['im1.nii', - ... 'im2.nii', - ... 'im3.nii'] - >>> at.inputs.warped_label_images = ['segmentation0.nii.gz', - ... 'segmentation1.nii.gz', - ... 'segmentation1.nii.gz'] - >>> at.inputs.target_image = 'T1.nii' - >>> at.cmdline - 'jointfusion 3 1 -m Joint[0.1,2] -tg T1.nii -g im1.nii -g im2.nii -g im3.nii -l segmentation0.nii.gz \ --l segmentation1.nii.gz -l segmentation1.nii.gz fusion_labelimage_output.nii' - - >>> at.inputs.method = 'Joint' - >>> at.inputs.alpha = 0.5 - >>> at.inputs.beta = 1 - >>> at.inputs.patch_radius = [3,2,1] - >>> at.inputs.search_radius = [1,2,3] - >>> at.cmdline - 'jointfusion 3 1 -m Joint[0.5,1] -rp 3x2x1 -rs 1x2x3 -tg T1.nii -g im1.nii -g im2.nii -g im3.nii \ --l segmentation0.nii.gz -l segmentation1.nii.gz -l segmentation1.nii.gz fusion_labelimage_output.nii' - - """ - - input_spec = JointFusionInputSpec - output_spec = JointFusionOutputSpec - _cmd = "jointfusion" - - def _format_arg(self, opt, spec, val): - if opt == "method": - if "[" in val: - retval = "-m {0}".format(val) - else: - retval = "-m {0}[{1},{2}]".format( - self.inputs.method, self.inputs.alpha, self.inputs.beta - ) - elif opt == "patch_radius": - retval = "-rp {0}".format(self._format_xarray(val)) - elif opt == "search_radius": - retval = "-rs {0}".format(self._format_xarray(val)) - else: - if opt == "warped_intensity_images": - assert len(val) == self.inputs.modalities * len( - self.inputs.warped_label_images - ), "Number of intensity images and label maps must be the same {0}!={1}".format( - len(val), len(self.inputs.warped_label_images) - ) - return super(JointFusion, self)._format_arg(opt, spec, val) - return retval - - def _list_outputs(self): - outputs = self._outputs().get() - outputs["output_label_image"] = os.path.abspath(self.inputs.output_label_image) - return outputs - - class DenoiseImageInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( 2, @@ -1403,7 +1257,7 @@ def _format_arg(self, name, trait_spec, value): return super(DenoiseImage, self)._format_arg(name, trait_spec, value) -class AntsJointFusionInputSpec(ANTSCommandInputSpec): +class _JointFusionInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( 3, 2, @@ -1549,75 +1403,91 @@ class AntsJointFusionInputSpec(ANTSCommandInputSpec): verbose = traits.Bool(False, argstr="-v", desc=("Verbose output.")) -class AntsJointFusionOutputSpec(TraitedSpec): +class _JointFusionOutputSpec(TraitedSpec): out_label_fusion = File(exists=True) - out_intensity_fusion_name_format = traits.Str() - out_label_post_prob_name_format = traits.Str() - out_atlas_voting_weight_name_format = traits.Str() + out_intensity_fusion = OutputMultiPath(File(exists=True)) + out_label_post_prob = OutputMultiPath(File(exists=True)) + out_atlas_voting_weight = OutputMultiPath(File(exists=True)) -class AntsJointFusion(ANTSCommand): +class JointFusion(ANTSCommand): """ + An image fusion algorithm. + + Developed by Hongzhi Wang and Paul Yushkevich, and it won segmentation challenges + at MICCAI 2012 and MICCAI 2013. + The original label fusion framework was extended to accommodate intensities by Brian + Avants. + This implementation is based on Paul's original ITK-style implementation + and Brian's ANTsR implementation. + + References include 1) H. Wang, J. W. Suh, S. + Das, J. Pluta, C. Craige, P. Yushkevich, Multi-atlas segmentation with joint + label fusion IEEE Trans. on Pattern Analysis and Machine Intelligence, 35(3), + 611-623, 2013. and 2) H. Wang and P. A. Yushkevich, Multi-atlas segmentation + with joint label fusion and corrective learning--an open source implementation, + Front. Neuroinform., 2013. + Examples -------- - >>> from nipype.interfaces.ants import AntsJointFusion - >>> antsjointfusion = AntsJointFusion() - >>> antsjointfusion.inputs.out_label_fusion = 'ants_fusion_label_output.nii' - >>> antsjointfusion.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'] ] - >>> antsjointfusion.inputs.atlas_segmentation_image = ['segmentation0.nii.gz'] - >>> antsjointfusion.inputs.target_image = ['im1.nii'] - >>> antsjointfusion.cmdline - "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz \ --b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii']" - - >>> antsjointfusion.inputs.target_image = [ ['im1.nii', 'im2.nii'] ] - >>> antsjointfusion.cmdline - "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz \ --b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii', 'im2.nii']" - - >>> antsjointfusion.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'], + >>> from nipype.interfaces.ants import JointFusion + >>> jf = JointFusion() + >>> jf.inputs.out_label_fusion = 'ants_fusion_label_output.nii' + >>> jf.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'] ] + >>> jf.inputs.atlas_segmentation_image = ['segmentation0.nii.gz'] + >>> jf.inputs.target_image = ['im1.nii'] + >>> jf.cmdline + "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz + -b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii']" + + >>> jf.inputs.target_image = [ ['im1.nii', 'im2.nii'] ] + >>> jf.cmdline + "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz + -b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii', 'im2.nii']" + + >>> jf.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'], ... ['rc2s1.nii','rc2s2.nii'] ] - >>> antsjointfusion.inputs.atlas_segmentation_image = ['segmentation0.nii.gz', + >>> jf.inputs.atlas_segmentation_image = ['segmentation0.nii.gz', ... 'segmentation1.nii.gz'] - >>> antsjointfusion.cmdline - "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \ --l segmentation0.nii.gz -l segmentation1.nii.gz -b 2.0 -o ants_fusion_label_output.nii \ --s 3x3x3 -t ['im1.nii', 'im2.nii']" - - >>> antsjointfusion.inputs.dimension = 3 - >>> antsjointfusion.inputs.alpha = 0.5 - >>> antsjointfusion.inputs.beta = 1.0 - >>> antsjointfusion.inputs.patch_radius = [3,2,1] - >>> antsjointfusion.inputs.search_radius = [3] - >>> antsjointfusion.cmdline - "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \ --l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -o ants_fusion_label_output.nii \ --p 3x2x1 -s 3 -t ['im1.nii', 'im2.nii']" - - >>> antsjointfusion.inputs.search_radius = ['mask.nii'] - >>> antsjointfusion.inputs.verbose = True - >>> antsjointfusion.inputs.exclusion_image = ['roi01.nii', 'roi02.nii'] - >>> antsjointfusion.inputs.exclusion_image_label = ['1','2'] - >>> antsjointfusion.cmdline - "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \ --l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] \ --o ants_fusion_label_output.nii -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v" - - >>> antsjointfusion.inputs.out_label_fusion = 'ants_fusion_label_output.nii' - >>> antsjointfusion.inputs.out_intensity_fusion_name_format = 'ants_joint_fusion_intensity_%d.nii.gz' - >>> antsjointfusion.inputs.out_label_post_prob_name_format = 'ants_joint_fusion_posterior_%d.nii.gz' - >>> antsjointfusion.inputs.out_atlas_voting_weight_name_format = 'ants_joint_fusion_voting_weight_%d.nii.gz' - >>> antsjointfusion.cmdline - "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \ --l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] \ --o [ants_fusion_label_output.nii, ants_joint_fusion_intensity_%d.nii.gz, \ -ants_joint_fusion_posterior_%d.nii.gz, ants_joint_fusion_voting_weight_%d.nii.gz] \ --p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v" + >>> jf.cmdline + "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] + -l segmentation0.nii.gz -l segmentation1.nii.gz -b 2.0 -o ants_fusion_label_output.nii + -s 3x3x3 -t ['im1.nii', 'im2.nii']" + + >>> jf.inputs.dimension = 3 + >>> jf.inputs.alpha = 0.5 + >>> jf.inputs.beta = 1.0 + >>> jf.inputs.patch_radius = [3,2,1] + >>> jf.inputs.search_radius = [3] + >>> jf.cmdline + "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] + -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -o ants_fusion_label_output.nii + -p 3x2x1 -s 3 -t ['im1.nii', 'im2.nii']" + + >>> jf.inputs.search_radius = ['mask.nii'] + >>> jf.inputs.verbose = True + >>> jf.inputs.exclusion_image = ['roi01.nii', 'roi02.nii'] + >>> jf.inputs.exclusion_image_label = ['1','2'] + >>> jf.cmdline + "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] + -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] + -o ants_fusion_label_output.nii -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v" + + >>> jf.inputs.out_label_fusion = 'ants_fusion_label_output.nii' + >>> jf.inputs.out_intensity_fusion_name_format = 'ants_joint_fusion_intensity_%d.nii.gz' + >>> jf.inputs.out_label_post_prob_name_format = 'ants_joint_fusion_posterior_%d.nii.gz' + >>> jf.inputs.out_atlas_voting_weight_name_format = 'ants_joint_fusion_voting_weight_%d.nii.gz' + >>> jf.cmdline + "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] + -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] + -o [ants_fusion_label_output.nii, ants_joint_fusion_intensity_%d.nii.gz, + ants_joint_fusion_posterior_%d.nii.gz, ants_joint_fusion_voting_weight_%d.nii.gz] + -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v" """ - input_spec = AntsJointFusionInputSpec - output_spec = AntsJointFusionOutputSpec + input_spec = _JointFusionInputSpec + output_spec = _JointFusionOutputSpec _cmd = "antsJointFusion" def _format_arg(self, opt, spec, val): @@ -1679,11 +1549,13 @@ def _format_arg(self, opt, spec, val): ) retval = target_image_cmd elif opt == "atlas_segmentation_image": - assert len(val) == len(self.inputs.atlas_image), ( - "Number of specified " - "segmentations should be identical to the number of atlas image " - "sets {0}!={1}".format(len(val), len(self.inputs.atlas_image)) - ) + if len(val) != len(self.inputs.atlas_image): + raise ValueError( + "Number of specified segmentations should be identical to the number " + "of atlas image sets {0}!={1}".format( + len(val), len(self.inputs.atlas_image) + ) + ) atlas_segmentation_image_cmd = " ".join( ["-l {0}".format(fn) for fn in self.inputs.atlas_segmentation_image] @@ -1699,21 +1571,32 @@ def _list_outputs(self): if isdefined(self.inputs.out_label_fusion): outputs["out_label_fusion"] = os.path.abspath(self.inputs.out_label_fusion) if isdefined(self.inputs.out_intensity_fusion_name_format): - outputs["out_intensity_fusion_name_format"] = os.path.abspath( - self.inputs.out_intensity_fusion_name_format + outputs["out_intensity_fusion"] = glob( + os.path.abspath( + self.inputs.out_intensity_fusion_name_format.replace("%d", "*") + ) ) if isdefined(self.inputs.out_label_post_prob_name_format): - outputs["out_label_post_prob_name_format"] = os.path.abspath( - self.inputs.out_label_post_prob_name_format + outputs["out_label_post_prob"] = glob( + os.path.abspath( + self.inputs.out_label_post_prob_name_format.replace("%d", "*") + ) ) if isdefined(self.inputs.out_atlas_voting_weight_name_format): - outputs["out_atlas_voting_weight_name_format"] = os.path.abspath( - self.inputs.out_atlas_voting_weight_name_format + outputs["out_atlas_voting_weight"] = glob( + os.path.abspath( + self.inputs.out_atlas_voting_weight_name_format.replace("%d", "*") + ) ) - return outputs +# For backwards compatibility +AntsJointFusion = JointFusion +AntsJointFusionInputSpec = _JointFusionInputSpec +AntsJointFusionOutputSpec = _JointFusionOutputSpec + + class KellyKapowskiInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( 3, @@ -1850,7 +1733,8 @@ class KellyKapowskiOutputSpec(TraitedSpec): class KellyKapowski(ANTSCommand): - """ Nipype Interface to ANTs' KellyKapowski, also known as DiReCT. + """ + Nipype Interface to ANTs' KellyKapowski, also known as DiReCT. DiReCT is a registration based estimate of cortical thickness. It was published in S. R. Das, B. B. Avants, M. Grossman, and J. C. Gee, Registration based @@ -1865,12 +1749,12 @@ class KellyKapowski(ANTSCommand): >>> kk.inputs.convergence = "[45,0.0,10]" >>> kk.inputs.thickness_prior_estimate = 10 >>> kk.cmdline - 'KellyKapowski --convergence "[45,0.0,10]" \ ---output "[segmentation0_cortical_thickness.nii.gz,segmentation0_warped_white_matter.nii.gz]" \ ---image-dimensionality 3 --gradient-step 0.025000 \ ---maximum-number-of-invert-displacement-field-iterations 20 --number-of-integration-points 10 \ ---segmentation-image "[segmentation0.nii.gz,2,3]" --smoothing-variance 1.000000 \ ---smoothing-velocity-field-parameter 1.500000 --thickness-prior-estimate 10.000000' + 'KellyKapowski --convergence "[45,0.0,10]" + --output "[segmentation0_cortical_thickness.nii.gz,segmentation0_warped_white_matter.nii.gz]" + --image-dimensionality 3 --gradient-step 0.025000 + --maximum-number-of-invert-displacement-field-iterations 20 --number-of-integration-points 10 + --segmentation-image "[segmentation0.nii.gz,2,3]" --smoothing-variance 1.000000 + --smoothing-velocity-field-parameter 1.500000 --thickness-prior-estimate 10.000000' """ diff --git a/nipype/interfaces/ants/tests/test_auto_JointFusion.py b/nipype/interfaces/ants/tests/test_auto_JointFusion.py deleted file mode 100644 index 4919b27a2d..0000000000 --- a/nipype/interfaces/ants/tests/test_auto_JointFusion.py +++ /dev/null @@ -1,45 +0,0 @@ -# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from ..segmentation import JointFusion - - -def test_JointFusion_inputs(): - input_map = dict( - alpha=dict(requires=["method"], usedefault=True,), - args=dict(argstr="%s",), - atlas_group_id=dict(argstr="-gp %d...",), - atlas_group_weights=dict(argstr="-gpw %d...",), - beta=dict(requires=["method"], usedefault=True,), - dimension=dict(argstr="%d", mandatory=True, position=0, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - exclusion_region=dict(argstr="-x %s", extensions=None,), - method=dict(argstr="-m %s", usedefault=True,), - modalities=dict(argstr="%d", mandatory=True, position=1,), - num_threads=dict(nohash=True, usedefault=True,), - output_label_image=dict( - argstr="%s", - extensions=None, - mandatory=True, - name_template="%s", - output_name="output_label_image", - position=-1, - ), - patch_radius=dict(argstr="-rp %s", maxlen=3, minlen=3,), - search_radius=dict(argstr="-rs %s", maxlen=3, minlen=3,), - target_image=dict(argstr="-tg %s...", mandatory=True,), - warped_intensity_images=dict(argstr="-g %s...", mandatory=True,), - warped_label_images=dict(argstr="-l %s...", mandatory=True,), - ) - inputs = JointFusion.input_spec() - - for key, metadata in list(input_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(inputs.traits()[key], metakey) == value - - -def test_JointFusion_outputs(): - output_map = dict(output_label_image=dict(extensions=None,),) - outputs = JointFusion.output_spec() - - for key, metadata in list(output_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_spec_JointFusion.py b/nipype/interfaces/ants/tests/test_spec_JointFusion.py deleted file mode 100644 index a0276afbb0..0000000000 --- a/nipype/interfaces/ants/tests/test_spec_JointFusion.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -from nipype.testing import example_data -from nipype.interfaces.base import InputMultiPath -from traits.trait_errors import TraitError -from nipype.interfaces.ants import JointFusion -import pytest - - -def test_JointFusion_dimension(): - at = JointFusion() - set_dimension = lambda d: setattr(at.inputs, "dimension", int(d)) - for d in range(2, 5): - set_dimension(d) - assert at.inputs.dimension == int(d) - for d in [0, 1, 6, 7]: - with pytest.raises(TraitError): - set_dimension(d) - - -@pytest.mark.parametrize("m", range(1, 5)) -def test_JointFusion_modalities(m): - at = JointFusion() - setattr(at.inputs, "modalities", int(m)) - assert at.inputs.modalities == int(m) - - -@pytest.mark.parametrize("a, b", [(a, b) for a in range(10) for b in range(10)]) -def test_JointFusion_method(a, b): - at = JointFusion() - set_method = lambda a, b: setattr( - at.inputs, "method", "Joint[%.1f,%d]".format(a, b) - ) - _a = a / 10.0 - set_method(_a, b) - # set directly - assert at.inputs.method == "Joint[%.1f,%d]".format(_a, b) - aprime = _a + 0.1 - bprime = b + 1 - at.inputs.alpha = aprime - at.inputs.beta = bprime - # set with alpha/beta - assert at.inputs.method == "Joint[%.1f,%d]".format(aprime, bprime) - - -@pytest.mark.parametrize( - "attr, x", - [(attr, x) for attr in ["patch_radius", "search_radius"] for x in range(5)], -) -def test_JointFusion_radius(attr, x): - at = JointFusion() - setattr(at.inputs, attr, [x, x + 1, x ** x]) - assert at._format_arg(attr, None, getattr(at.inputs, attr))[ - 4: - ] == "{0}x{1}x{2}".format(x, x + 1, x ** x) - - -def test_JointFusion_cmd(): - at = JointFusion() - at.inputs.dimension = 3 - at.inputs.modalities = 1 - at.inputs.method = "Joint[0.1,2]" - at.inputs.output_label_image = "fusion_labelimage_output.nii" - warped_intensity_images = [example_data("im1.nii"), example_data("im2.nii")] - at.inputs.warped_intensity_images = warped_intensity_images - segmentation_images = [ - example_data("segmentation0.nii.gz"), - example_data("segmentation1.nii.gz"), - ] - at.inputs.warped_label_images = segmentation_images - T1_image = example_data("T1.nii") - at.inputs.target_image = T1_image - at.inputs.patch_radius = [3, 2, 1] - at.inputs.search_radius = [1, 2, 3] - expected_command = ( - "jointfusion 3 1 -m Joint[0.1,2] -rp 3x2x1 -rs 1x2x3" - " -tg %s -g %s -g %s -l %s -l %s" - " fusion_labelimage_output.nii" - ) % ( - T1_image, - warped_intensity_images[0], - warped_intensity_images[1], - segmentation_images[0], - segmentation_images[1], - ) - assert at.cmdline == expected_command - # setting intensity or labels with unequal lengths raises error - with pytest.raises(AssertionError): - at._format_arg( - "warped_intensity_images", - InputMultiPath, - warped_intensity_images + [example_data("im3.nii")], - ) From 9e7c99713b78b68f199df141a05b9fd7390eb3d6 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 27 Feb 2020 16:43:47 -0800 Subject: [PATCH 0753/1665] fix: ``make specs`` --- nipype/interfaces/ants/__init__.py | 2 +- nipype/interfaces/ants/tests/test_auto_AI.py | 37 ++++++++++++ .../ants/tests/test_auto_Atropos.py | 3 +- .../ants/tests/test_auto_ImageMath.py | 37 ++++++++++++ .../ants/tests/test_auto_JointFusion.py | 59 +++++++++++++++++++ .../tests/test_auto_ResampleImageBySpacing.py | 38 ++++++++++++ .../ants/tests/test_auto_ThresholdImage.py | 47 +++++++++++++++ 7 files changed, 221 insertions(+), 2 deletions(-) create mode 100644 nipype/interfaces/ants/tests/test_auto_AI.py create mode 100644 nipype/interfaces/ants/tests/test_auto_ImageMath.py create mode 100644 nipype/interfaces/ants/tests/test_auto_JointFusion.py create mode 100644 nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py create mode 100644 nipype/interfaces/ants/tests/test_auto_ThresholdImage.py diff --git a/nipype/interfaces/ants/__init__.py b/nipype/interfaces/ants/__init__.py index 0dd4c66adf..dc96642f23 100644 --- a/nipype/interfaces/ants/__init__.py +++ b/nipype/interfaces/ants/__init__.py @@ -3,7 +3,7 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Top-level namespace for ants.""" -# RegistratIon programs +# Registration programs from .registration import ( ANTS, CompositeTransformUtil, diff --git a/nipype/interfaces/ants/tests/test_auto_AI.py b/nipype/interfaces/ants/tests/test_auto_AI.py new file mode 100644 index 0000000000..3a3e1485d1 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_AI.py @@ -0,0 +1,37 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..utils import AI + + +def test_AI_inputs(): + input_map = dict( + args=dict(argstr="%s",), + convergence=dict(argstr="-c [%d,%g,%d]", usedefault=True,), + dimension=dict(argstr="-d %d", usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + fixed_image=dict(extensions=None, mandatory=True,), + fixed_image_mask=dict(argstr="-x %s", extensions=None,), + metric=dict(argstr="-m %s", mandatory=True,), + moving_image=dict(extensions=None, mandatory=True,), + moving_image_mask=dict(extensions=None, requires=["fixed_image_mask"],), + num_threads=dict(nohash=True, usedefault=True,), + output_transform=dict(argstr="-o %s", extensions=None, usedefault=True,), + principal_axes=dict(argstr="-p %d", usedefault=True, xor=["blobs"],), + search_factor=dict(argstr="-s [%g,%g]", usedefault=True,), + search_grid=dict(argstr="-g %s",), + transform=dict(argstr="-t %s[%g]", usedefault=True,), + verbose=dict(argstr="-v %d", usedefault=True,), + ) + inputs = AI.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_AI_outputs(): + output_map = dict(output_transform=dict(extensions=None,),) + outputs = AI.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_Atropos.py b/nipype/interfaces/ants/tests/test_auto_Atropos.py index 021348bba0..09644907ab 100644 --- a/nipype/interfaces/ants/tests/test_auto_Atropos.py +++ b/nipype/interfaces/ants/tests/test_auto_Atropos.py @@ -13,6 +13,7 @@ def test_Atropos_inputs(): argstr="%s", mandatory=True, requires=["number_of_tissue_classes"], ), intensity_images=dict(argstr="--intensity-image %s...", mandatory=True,), + kmeans_init_centers=dict(), likelihood_model=dict(argstr="--likelihood-model %s",), mask_image=dict(argstr="--mask-image %s", extensions=None, mandatory=True,), maximum_number_of_icm_terations=dict(requires=["icm_use_synchronous_update"],), @@ -26,7 +27,7 @@ def test_Atropos_inputs(): ), output_posteriors_name_template=dict(usedefault=True,), posterior_formulation=dict(argstr="%s",), - prior_probability_images=dict(), + prior_image=dict(), prior_probability_threshold=dict(requires=["prior_weighting"],), prior_weighting=dict(), save_posteriors=dict(), diff --git a/nipype/interfaces/ants/tests/test_auto_ImageMath.py b/nipype/interfaces/ants/tests/test_auto_ImageMath.py new file mode 100644 index 0000000000..12aabf7a6e --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_ImageMath.py @@ -0,0 +1,37 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..utils import ImageMath + + +def test_ImageMath_inputs(): + input_map = dict( + args=dict(argstr="%s",), + copy_header=dict(usedefault=True,), + dimension=dict(argstr="%d", position=1, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + num_threads=dict(nohash=True, usedefault=True,), + op1=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + op2=dict(argstr="%s", position=-1,), + operation=dict(argstr="%s", mandatory=True, position=3,), + output_image=dict( + argstr="%s", + extensions=None, + keep_extension=True, + name_source=["op1"], + name_template="%s_maths", + position=2, + ), + ) + inputs = ImageMath.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_ImageMath_outputs(): + output_map = dict(output_image=dict(extensions=None,),) + outputs = ImageMath.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_JointFusion.py b/nipype/interfaces/ants/tests/test_auto_JointFusion.py new file mode 100644 index 0000000000..57af10173d --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_JointFusion.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..segmentation import JointFusion + + +def test_JointFusion_inputs(): + input_map = dict( + alpha=dict(argstr="-a %s", usedefault=True,), + args=dict(argstr="%s",), + atlas_image=dict(argstr="-g %s...", mandatory=True,), + atlas_segmentation_image=dict(argstr="-l %s...", mandatory=True,), + beta=dict(argstr="-b %s", usedefault=True,), + constrain_nonnegative=dict(argstr="-c", usedefault=True,), + dimension=dict(argstr="-d %d",), + environ=dict(nohash=True, usedefault=True,), + exclusion_image=dict(), + exclusion_image_label=dict(argstr="-e %s", requires=["exclusion_image"],), + mask_image=dict(argstr="-x %s", extensions=None,), + num_threads=dict(nohash=True, usedefault=True,), + out_atlas_voting_weight_name_format=dict( + requires=[ + "out_label_fusion", + "out_intensity_fusion_name_format", + "out_label_post_prob_name_format", + ], + ), + out_intensity_fusion_name_format=dict(argstr="",), + out_label_fusion=dict(argstr="%s", extensions=None, hash_files=False,), + out_label_post_prob_name_format=dict( + requires=["out_label_fusion", "out_intensity_fusion_name_format"], + ), + patch_metric=dict(argstr="-m %s",), + patch_radius=dict(argstr="-p %s", maxlen=3, minlen=3,), + retain_atlas_voting_images=dict(argstr="-f", usedefault=True,), + retain_label_posterior_images=dict( + argstr="-r", requires=["atlas_segmentation_image"], usedefault=True, + ), + search_radius=dict(argstr="-s %s", usedefault=True,), + target_image=dict(argstr="-t %s", mandatory=True,), + verbose=dict(argstr="-v",), + ) + inputs = JointFusion.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_JointFusion_outputs(): + output_map = dict( + out_atlas_voting_weight=dict(), + out_intensity_fusion=dict(), + out_label_fusion=dict(extensions=None,), + out_label_post_prob=dict(), + ) + outputs = JointFusion.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py b/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py new file mode 100644 index 0000000000..a2834bf36c --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..utils import ResampleImageBySpacing + + +def test_ResampleImageBySpacing_inputs(): + input_map = dict( + addvox=dict(argstr="%d", position=6, requires=["apply_smoothing"],), + apply_smoothing=dict(argstr="%d", position=5,), + args=dict(argstr="%s",), + dimension=dict(argstr="%d", position=1, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + input_image=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + nn_interp=dict(argstr="%d", position=-1, requires=["addvox"],), + num_threads=dict(nohash=True, usedefault=True,), + out_spacing=dict(argstr="%s", mandatory=True, position=4,), + output_image=dict( + argstr="%s", + extensions=None, + keep_extension=True, + name_source=["input_image"], + name_template="%s_resampled", + position=3, + ), + ) + inputs = ResampleImageBySpacing.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_ResampleImageBySpacing_outputs(): + output_map = dict(output_image=dict(extensions=None,),) + outputs = ResampleImageBySpacing.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_ThresholdImage.py b/nipype/interfaces/ants/tests/test_auto_ThresholdImage.py new file mode 100644 index 0000000000..c8e399f644 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_ThresholdImage.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..utils import ThresholdImage + + +def test_ThresholdImage_inputs(): + input_map = dict( + args=dict(argstr="%s",), + copy_header=dict(mandatory=True, usedefault=True,), + dimension=dict(argstr="%d", position=1, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + input_image=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + input_mask=dict(argstr="%s", extensions=None, requires=["num_thresholds"],), + inside_value=dict(argstr="%f", position=6, requires=["th_low"],), + mode=dict( + argstr="%s", + position=4, + requires=["num_thresholds"], + xor=["th_low", "th_high"], + ), + num_threads=dict(nohash=True, usedefault=True,), + num_thresholds=dict(argstr="%d", position=5,), + output_image=dict( + argstr="%s", + extensions=None, + keep_extension=True, + name_source=["input_image"], + name_template="%s_resampled", + position=3, + ), + outside_value=dict(argstr="%f", position=7, requires=["th_low"],), + th_high=dict(argstr="%f", position=5, xor=["mode"],), + th_low=dict(argstr="%f", position=4, xor=["mode"],), + ) + inputs = ThresholdImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_ThresholdImage_outputs(): + output_map = dict(output_image=dict(extensions=None,),) + outputs = ThresholdImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value From dfe85017cd1c1607e26f424376b0a27e668f8bcc Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Thu, 27 Feb 2020 19:58:08 -0800 Subject: [PATCH 0754/1665] tst: reduce some complexity and increase coverage --- nipype/interfaces/ants/segmentation.py | 90 +++++++++----------------- 1 file changed, 32 insertions(+), 58 deletions(-) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index d2d75d900f..dc205ee4cf 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -2,6 +2,7 @@ import os from glob import glob from ...external.due import BibTeX +from ...utils.imagemanip import copy_header as _copy_header from ...utils.filemanip import split_filename, copyfile, which, fname_presuffix from ..base import TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, isdefined from .base import ANTSCommand, ANTSCommandInputSpec @@ -235,7 +236,6 @@ def _gen_filename(self, name): _, name, ext = split_filename(self.inputs.intensity_images[0]) output = name + "_labeled" + ext return output - return None def _list_outputs(self): outputs = self._outputs().get() @@ -538,24 +538,15 @@ def _list_outputs(self): # Fix headers if self.inputs.copy_header: - self._copy_header(outputs["output_image"]) + _copy_header(self.inputs.input_image, outputs["output_image"], + keep_dtype=False) if self._out_bias_file: outputs["bias_image"] = os.path.abspath(self._out_bias_file) if self.inputs.copy_header: - self._copy_header(outputs["bias_image"]) + _copy_header(self.inputs.input_image, outputs["bias_image"]) return outputs - def _copy_header(self, fname): - """Copy header from input image to an output image.""" - import nibabel as nb - - in_img = nb.load(self.inputs.input_image) - out_img = nb.load(fname, mmap=False) - new_img = out_img.__class__(out_img.get_fdata(), in_img.affine, in_img.header) - new_img.set_data_dtype(out_img.get_data_dtype()) - new_img.to_filename(fname) - class CorticalThicknessInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( @@ -1500,55 +1491,44 @@ def _format_arg(self, opt, spec, val): self.inputs.exclusion_image[ii], ) ) - retval = " ".join(retval) - elif opt == "patch_radius": - retval = "-p {0}".format(self._format_xarray(val)) - elif opt == "search_radius": - retval = "-s {0}".format(self._format_xarray(val)) - elif opt == "out_label_fusion": - if isdefined(self.inputs.out_intensity_fusion_name_format): - if isdefined(self.inputs.out_label_post_prob_name_format): - if isdefined(self.inputs.out_atlas_voting_weight_name_format): - retval = "-o [{0}, {1}, {2}, {3}]".format( - self.inputs.out_label_fusion, - self.inputs.out_intensity_fusion_name_format, - self.inputs.out_label_post_prob_name_format, - self.inputs.out_atlas_voting_weight_name_format, - ) - else: - retval = "-o [{0}, {1}, {2}]".format( - self.inputs.out_label_fusion, - self.inputs.out_intensity_fusion_name_format, - self.inputs.out_label_post_prob_name_format, - ) + return " ".join(retval) + if opt == "patch_radius": + return "-p {0}".format(self._format_xarray(val)) + if opt == "search_radius": + return "-s {0}".format(self._format_xarray(val)) + if opt == "out_label_fusion": + args = [self.inputs.out_label_fusion] + for option in ( + self.inputs.out_intensity_fusion_name_format, + self.inputs.out_label_post_prob_name_format, + self.inputs.out_atlas_voting_weight_name_format + ): + if isdefined(option): + args.append(option) else: - retval = "-o [{0}, {1}]".format( - self.inputs.out_label_fusion, - self.inputs.out_intensity_fusion_name_format, - ) - else: - retval = "-o {0}".format(self.inputs.out_label_fusion) - elif opt == "out_intensity_fusion_name_format": - retval = "" + break + if len(args) == 1: + return " ".join(("-o", args[0])) + return "-o [{}]".format(", ".join(args)) + if opt == "out_intensity_fusion_name_format": if not isdefined(self.inputs.out_label_fusion): - retval = "-o {0}".format(self.inputs.out_intensity_fusion_name_format) - elif opt == "atlas_image": - atlas_image_cmd = " ".join( + return "-o {0}".format(self.inputs.out_intensity_fusion_name_format) + return "" + if opt == "atlas_image": + return " ".join( [ "-g [{0}]".format(", ".join("'%s'" % fn for fn in ai)) for ai in self.inputs.atlas_image ] ) - retval = atlas_image_cmd - elif opt == "target_image": - target_image_cmd = " ".join( + if opt == "target_image": + return " ".join( [ "-t [{0}]".format(", ".join("'%s'" % fn for fn in ai)) for ai in self.inputs.target_image ] ) - retval = target_image_cmd - elif opt == "atlas_segmentation_image": + if opt == "atlas_segmentation_image": if len(val) != len(self.inputs.atlas_image): raise ValueError( "Number of specified segmentations should be identical to the number " @@ -1557,14 +1537,10 @@ def _format_arg(self, opt, spec, val): ) ) - atlas_segmentation_image_cmd = " ".join( + return " ".join( ["-l {0}".format(fn) for fn in self.inputs.atlas_segmentation_image] ) - retval = atlas_segmentation_image_cmd - else: - - return super(AntsJointFusion, self)._format_arg(opt, spec, val) - return retval + return super(AntsJointFusion, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() @@ -1805,8 +1781,6 @@ def _gen_filename(self, name): output = name + "_warped_white_matter" + ext return output - return None - def _format_arg(self, opt, spec, val): if opt == "segmentation_image": newval = "[{0},{1},{2}]".format( From dffcb7815b7e1fc7aa073746adb01bd1cb8efc3f Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 28 Feb 2020 14:54:12 -0800 Subject: [PATCH 0755/1665] fix: copy header for ResampleImageBySpacing Closes nipreps/niworkflows#459 --- .../ants/tests/test_auto_ResampleImageBySpacing.py | 1 + nipype/interfaces/ants/utils.py | 14 ++++++++++++++ 2 files changed, 15 insertions(+) diff --git a/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py b/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py index a2834bf36c..dce64b8930 100644 --- a/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py +++ b/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py @@ -7,6 +7,7 @@ def test_ResampleImageBySpacing_inputs(): addvox=dict(argstr="%d", position=6, requires=["apply_smoothing"],), apply_smoothing=dict(argstr="%d", position=5,), args=dict(argstr="%s",), + copy_header=dict(mandatory=True, usedefault=True,), dimension=dict(argstr="%d", position=1, usedefault=True,), environ=dict(nohash=True, usedefault=True,), input_image=dict(argstr="%s", extensions=None, mandatory=True, position=2,), diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index 28f4cbfd97..4322fadb7c 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -141,6 +141,12 @@ class _ResampleImageBySpacingInputSpec(ANTSCommandInputSpec): nn_interp = traits.Bool( argstr="%d", desc="nn interpolation", position=-1, requires=["addvox"] ) + copy_header = traits.Bool( + True, + mandatory=True, + usedefault=True, + desc="copy headers of the original image into the output (corrected) file", + ) class _ResampleImageBySpacingOutputSpec(TraitedSpec): @@ -193,6 +199,14 @@ def _format_arg(self, name, trait_spec, value): return super(ResampleImageBySpacing, self)._format_arg(name, trait_spec, value) + def _list_outputs(self): + outputs = super(ResampleImageBySpacing, self)._list_outputs() + if self.inputs.copy_header: # Fix headers + _copy_header( + self.inputs.input_image, outputs["output_image"], keep_dtype=True + ) + return outputs + class _ThresholdImageInputSpec(ANTSCommandInputSpec): dimension = traits.Int( From c1007723dc78d2bb15afb9439514fd2bf0a213d7 Mon Sep 17 00:00:00 2001 From: Daniel Geisler Date: Tue, 3 Mar 2020 18:06:36 +0100 Subject: [PATCH 0756/1665] FIX: check if result has an attribute outputs before accessing it --- nipype/pipeline/engine/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 78d9417f9f..d8d2f1f9e8 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -291,7 +291,7 @@ def load_resultfile(results_file, resolve=True): raise FileNotFoundError(results_file) result = loadpkl(results_file) - if resolve and result.outputs: + if resolve and hasattr(result,"outputs") and result.outputs: try: outputs = result.outputs.get() except TypeError: # This is a Bunch From cf48cbd60452bd49deb68f5fe6d122b9f3f36467 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Tue, 3 Mar 2020 11:09:01 -0800 Subject: [PATCH 0757/1665] Apply suggestions from code review Co-Authored-By: Chris Markiewicz --- nipype/interfaces/ants/segmentation.py | 2 +- nipype/interfaces/ants/utils.py | 28 ++++++++++---------------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index dc205ee4cf..7b35942bab 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -35,7 +35,7 @@ class AtroposInputSpec(ANTSCommandInputSpec): prior_image = traits.Either( File(exists=True), traits.Str, - desc="either a string pattern (e.g., 'prior%%02d.nii') or an existing vector-image file.", + desc="either a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file.", ) number_of_tissue_classes = traits.Int(mandatory=True) prior_weighting = traits.Float() diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index 4322fadb7c..637c6260d9 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -1,7 +1,7 @@ """ANTs' utilities.""" import os from ...utils.imagemanip import copy_header as _copy_header -from ..base import traits, isdefined, TraitedSpec, File, Str, InputMultiPath +from ..base import traits, isdefined, TraitedSpec, File, Str, InputMultiObject from .base import ANTSCommandInputSpec, ANTSCommand @@ -400,11 +400,10 @@ class AI(ANTSCommand): 'antsAI -c [10,1e-06,10] -d 3 -m Mattes[structural.nii,epi.nii,32,Regular,1] -o initialization.mat -p 0 -s [20,0.12] -t Affine[0.1] -v 0' - >>> AI( - ... fixed_image='structural.nii', - ... moving_image='epi.nii', - ... metric=('Mattes', 32, 'Regular', 1), - ... search_grid=(12, (1, 1, 1)), + >>> AI(fixed_image='structural.nii', + ... moving_image='epi.nii', + ... metric=('Mattes', 32, 'Regular', 1), + ... search_grid=(12, (1, 1, 1)), ... ).cmdline 'antsAI -c [10,1e-06,10] -d 3 -m Mattes[structural.nii,epi.nii,32,Regular,1] -o initialization.mat -p 0 -s [20,0.12] -g [12.0,1x1x1] -t Affine[0.1] -v 0' @@ -418,15 +417,11 @@ class AI(ANTSCommand): def _run_interface(self, runtime, correct_return_codes=(0,)): runtime = super(AI, self)._run_interface(runtime, correct_return_codes) - setattr( - self, - "_output", - { - "output_transform": os.path.join( - runtime.cwd, os.path.basename(self.inputs.output_transform) - ) - }, - ) + self._output = { + "output_transform": os.path.join( + runtime.cwd, os.path.basename(self.inputs.output_transform) + ) + } return runtime def _format_arg(self, opt, spec, val): @@ -439,8 +434,7 @@ def _format_arg(self, opt, spec, val): return spec.argstr % val if opt == "search_grid": - val1 = "x".join(["%g" % v for v in val[1]]) - fmtval = "[%s]" % ",".join([str(val[0]), val1]) + fmtval = "[%s,%s]" % (val[0], "x".join("%g" % v for v in val[1])) return spec.argstr % fmtval if opt == "fixed_image_mask": From 051c1e54f2c23c8c3279c60246c14593c987f1ee Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 3 Mar 2020 18:11:38 -0800 Subject: [PATCH 0758/1665] fix(compat): ensure nibabel < 3 compatiblity of tests --- nipype/utils/tests/test_imagemanip.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nipype/utils/tests/test_imagemanip.py b/nipype/utils/tests/test_imagemanip.py index 922d7681f1..af4c708b34 100644 --- a/nipype/utils/tests/test_imagemanip.py +++ b/nipype/utils/tests/test_imagemanip.py @@ -16,16 +16,16 @@ def test_copy_header(tmp_path, keep_dtype): np.zeros((10, 10, 10), dtype='uint8'), None, None) nii.set_qform(np.diag((1., 2., 3., 1.)), code=2) nii.set_sform(np.diag((1., 2., 3., 1.)), code=1) - nii.to_filename(fname1) + nii.to_filename(str(fname1)) nii.set_data_dtype('float32') nii.set_qform(np.eye(4), code=1) - nii.to_filename(fname2) + nii.to_filename(str(fname2)) copied = nb.load( copy_header(fname1, fname2, keep_dtype=keep_dtype) ) - ref = nb.load(fname1) + ref = nb.load(str(fname1)) assert np.all(copied.get_qform(coded=False) == ref.get_qform(coded=False)) assert np.all(copied.get_sform(coded=False) == ref.get_sform(coded=False)) assert copied.get_qform(coded=True)[1] == ref.get_qform(coded=True)[1] From 774f41d3bd8bb93ff83ccdff775661ceda509e14 Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 3 Mar 2020 18:16:54 -0800 Subject: [PATCH 0759/1665] fix: use ``aggregate_outputs`` instead of ``_list_outputs`` --- nipype/interfaces/ants/utils.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index 637c6260d9..0dfbf83937 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -98,10 +98,14 @@ class ImageMath(ANTSCommand): input_spec = _ImageMathInputSpec output_spec = _ImageMathOuputSpec - def _list_outputs(self): - outputs = super(ImageMath, self)._list_outputs() + def aggregate_outputs(self, runtime=None, needed_outputs=None): + """Overload the aggregation with header replacement, if required.""" + outputs = super(ImageMath, self).aggregate_outputs( + runtime, needed_outputs) if self.inputs.copy_header: # Fix headers - _copy_header(self.inputs.op1, outputs["output_image"], keep_dtype=True) + _copy_header( + self.inputs.op1, outputs["output_image"], keep_dtype=True + ) return outputs @@ -199,8 +203,10 @@ def _format_arg(self, name, trait_spec, value): return super(ResampleImageBySpacing, self)._format_arg(name, trait_spec, value) - def _list_outputs(self): - outputs = super(ResampleImageBySpacing, self)._list_outputs() + def aggregate_outputs(self, runtime=None, needed_outputs=None): + """Overload the aggregation with header replacement, if required.""" + outputs = super(ResampleImageBySpacing, self).aggregate_outputs( + runtime, needed_outputs) if self.inputs.copy_header: # Fix headers _copy_header( self.inputs.input_image, outputs["output_image"], keep_dtype=True @@ -293,8 +299,10 @@ class ThresholdImage(ANTSCommand): input_spec = _ThresholdImageInputSpec output_spec = _ThresholdImageOutputSpec - def _list_outputs(self): - outputs = super(ThresholdImage, self)._list_outputs() + def aggregate_outputs(self, runtime=None, needed_outputs=None): + """Overload the aggregation with header replacement, if required.""" + outputs = super(ThresholdImage, self).aggregate_outputs( + runtime, needed_outputs) if self.inputs.copy_header: # Fix headers _copy_header( self.inputs.input_image, outputs["output_image"], keep_dtype=True From 05441243b0c444a4a36e7cdeebac21da8c0ba9b1 Mon Sep 17 00:00:00 2001 From: oesteban Date: Tue, 3 Mar 2020 18:21:56 -0800 Subject: [PATCH 0760/1665] enh: remove ``aggregate_outputs`` duplications, fix InputMultiPath --- nipype/interfaces/ants/base.py | 15 +++++++ .../tests/test_auto_FixHeaderANTSCommand.py | 15 +++++++ nipype/interfaces/ants/utils.py | 45 +++---------------- 3 files changed, 37 insertions(+), 38 deletions(-) create mode 100644 nipype/interfaces/ants/tests/test_auto_FixHeaderANTSCommand.py diff --git a/nipype/interfaces/ants/base.py b/nipype/interfaces/ants/base.py index 4b5e5ef8db..4d27fa619f 100644 --- a/nipype/interfaces/ants/base.py +++ b/nipype/interfaces/ants/base.py @@ -7,6 +7,7 @@ # Local imports from ... import logging, LooseVersion from ..base import CommandLine, CommandLineInputSpec, traits, isdefined, PackageInfo +from ...utils.imagemanip import copy_header as _copy_header iflogger = logging.getLogger("nipype.interface") @@ -121,3 +122,17 @@ def set_default_num_threads(cls, num_threads): @property def version(self): return Info.version() + + +class FixHeaderANTSCommand(ANTSCommand): + """Fix header if the copy_header input is on.""" + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + """Overload the aggregation with header replacement, if required.""" + outputs = super(FixHeaderANTSCommand, self).aggregate_outputs( + runtime, needed_outputs) + if self.inputs.copy_header: # Fix headers + _copy_header( + self.inputs.op1, outputs["output_image"], keep_dtype=True + ) + return outputs diff --git a/nipype/interfaces/ants/tests/test_auto_FixHeaderANTSCommand.py b/nipype/interfaces/ants/tests/test_auto_FixHeaderANTSCommand.py new file mode 100644 index 0000000000..2f0d66dca9 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_FixHeaderANTSCommand.py @@ -0,0 +1,15 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..base import FixHeaderANTSCommand + + +def test_FixHeaderANTSCommand_inputs(): + input_map = dict( + args=dict(argstr="%s",), + environ=dict(nohash=True, usedefault=True,), + num_threads=dict(nohash=True, usedefault=True,), + ) + inputs = FixHeaderANTSCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index 0dfbf83937..da23bd8213 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -1,8 +1,7 @@ """ANTs' utilities.""" import os -from ...utils.imagemanip import copy_header as _copy_header from ..base import traits, isdefined, TraitedSpec, File, Str, InputMultiObject -from .base import ANTSCommandInputSpec, ANTSCommand +from .base import ANTSCommandInputSpec, ANTSCommand, FixHeaderANTSCommand class _ImageMathInputSpec(ANTSCommandInputSpec): @@ -68,7 +67,7 @@ class _ImageMathOuputSpec(TraitedSpec): output_image = File(exists=True, desc="output image file") -class ImageMath(ANTSCommand): +class ImageMath(FixHeaderANTSCommand): """ Operations over images. @@ -98,16 +97,6 @@ class ImageMath(ANTSCommand): input_spec = _ImageMathInputSpec output_spec = _ImageMathOuputSpec - def aggregate_outputs(self, runtime=None, needed_outputs=None): - """Overload the aggregation with header replacement, if required.""" - outputs = super(ImageMath, self).aggregate_outputs( - runtime, needed_outputs) - if self.inputs.copy_header: # Fix headers - _copy_header( - self.inputs.op1, outputs["output_image"], keep_dtype=True - ) - return outputs - class _ResampleImageBySpacingInputSpec(ANTSCommandInputSpec): dimension = traits.Int( @@ -157,7 +146,7 @@ class _ResampleImageBySpacingOutputSpec(TraitedSpec): output_image = File(exists=True, desc="resampled file") -class ResampleImageBySpacing(ANTSCommand): +class ResampleImageBySpacing(FixHeaderANTSCommand): """ Resample an image with a given spacing. @@ -203,16 +192,6 @@ def _format_arg(self, name, trait_spec, value): return super(ResampleImageBySpacing, self)._format_arg(name, trait_spec, value) - def aggregate_outputs(self, runtime=None, needed_outputs=None): - """Overload the aggregation with header replacement, if required.""" - outputs = super(ResampleImageBySpacing, self).aggregate_outputs( - runtime, needed_outputs) - if self.inputs.copy_header: # Fix headers - _copy_header( - self.inputs.input_image, outputs["output_image"], keep_dtype=True - ) - return outputs - class _ThresholdImageInputSpec(ANTSCommandInputSpec): dimension = traits.Int( @@ -269,7 +248,7 @@ class _ThresholdImageOutputSpec(TraitedSpec): output_image = File(exists=True, desc="resampled file") -class ThresholdImage(ANTSCommand): +class ThresholdImage(FixHeaderANTSCommand): """ Apply thresholds on images. @@ -299,16 +278,6 @@ class ThresholdImage(ANTSCommand): input_spec = _ThresholdImageInputSpec output_spec = _ThresholdImageOutputSpec - def aggregate_outputs(self, runtime=None, needed_outputs=None): - """Overload the aggregation with header replacement, if required.""" - outputs = super(ThresholdImage, self).aggregate_outputs( - runtime, needed_outputs) - if self.inputs.copy_header: # Fix headers - _copy_header( - self.inputs.input_image, outputs["output_image"], keep_dtype=True - ) - return outputs - class _AIInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( @@ -465,7 +434,7 @@ class AverageAffineTransformInputSpec(ANTSCommandInputSpec): position=1, desc="Outputfname.txt: the name of the resulting transform.", ) - transforms = InputMultiPath( + transforms = InputMultiObject( File(exists=True), argstr="%s", mandatory=True, @@ -526,7 +495,7 @@ class AverageImagesInputSpec(ANTSCommandInputSpec): desc="Normalize: if true, the 2nd image is divided by its mean. " "This will select the largest image to average into.", ) - images = InputMultiPath( + images = InputMultiObject( File(exists=True), argstr="%s", mandatory=True, @@ -767,7 +736,7 @@ class ComposeMultiTransformInputSpec(ANTSCommandInputSpec): position=2, desc="Reference image (only necessary when output is warpfield)", ) - transforms = InputMultiPath( + transforms = InputMultiObject( File(exists=True), argstr="%s", mandatory=True, From 75ce23df7495ba520ef5d5529e81fb92caeb0a97 Mon Sep 17 00:00:00 2001 From: Daniel Ge Date: Wed, 4 Mar 2020 16:21:42 +0100 Subject: [PATCH 0761/1665] Update nipype/pipeline/engine/utils.py Co-Authored-By: Chris Markiewicz --- nipype/pipeline/engine/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index d8d2f1f9e8..d7a65b74de 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -291,7 +291,7 @@ def load_resultfile(results_file, resolve=True): raise FileNotFoundError(results_file) result = loadpkl(results_file) - if resolve and hasattr(result,"outputs") and result.outputs: + if resolve and getattr(result, "outputs", None): try: outputs = result.outputs.get() except TypeError: # This is a Bunch From f88add41dca713c67645752b56b533b407ca258d Mon Sep 17 00:00:00 2001 From: oesteban Date: Wed, 4 Mar 2020 10:19:43 -0800 Subject: [PATCH 0762/1665] fix: roll back under-i/o specs --- nipype/interfaces/ants/segmentation.py | 12 +++++----- nipype/interfaces/ants/utils.py | 32 +++++++++++++------------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index 7b35942bab..06b9350dbc 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -1248,7 +1248,7 @@ def _format_arg(self, name, trait_spec, value): return super(DenoiseImage, self)._format_arg(name, trait_spec, value) -class _JointFusionInputSpec(ANTSCommandInputSpec): +class JointFusionInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( 3, 2, @@ -1394,7 +1394,7 @@ class _JointFusionInputSpec(ANTSCommandInputSpec): verbose = traits.Bool(False, argstr="-v", desc=("Verbose output.")) -class _JointFusionOutputSpec(TraitedSpec): +class JointFusionOutputSpec(TraitedSpec): out_label_fusion = File(exists=True) out_intensity_fusion = OutputMultiPath(File(exists=True)) out_label_post_prob = OutputMultiPath(File(exists=True)) @@ -1477,8 +1477,8 @@ class JointFusion(ANTSCommand): """ - input_spec = _JointFusionInputSpec - output_spec = _JointFusionOutputSpec + input_spec = JointFusionInputSpec + output_spec = JointFusionOutputSpec _cmd = "antsJointFusion" def _format_arg(self, opt, spec, val): @@ -1569,8 +1569,8 @@ def _list_outputs(self): # For backwards compatibility AntsJointFusion = JointFusion -AntsJointFusionInputSpec = _JointFusionInputSpec -AntsJointFusionOutputSpec = _JointFusionOutputSpec +AntsJointFusionInputSpec = JointFusionInputSpec +AntsJointFusionOutputSpec = JointFusionOutputSpec class KellyKapowskiInputSpec(ANTSCommandInputSpec): diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index da23bd8213..58c91f629f 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -4,7 +4,7 @@ from .base import ANTSCommandInputSpec, ANTSCommand, FixHeaderANTSCommand -class _ImageMathInputSpec(ANTSCommandInputSpec): +class ImageMathInputSpec(ANTSCommandInputSpec): dimension = traits.Int( 3, usedefault=True, position=1, argstr="%d", desc="dimension of output image" ) @@ -63,7 +63,7 @@ class _ImageMathInputSpec(ANTSCommandInputSpec): ) -class _ImageMathOuputSpec(TraitedSpec): +class ImageMathOuputSpec(TraitedSpec): output_image = File(exists=True, desc="output image file") @@ -94,11 +94,11 @@ class ImageMath(FixHeaderANTSCommand): """ _cmd = "ImageMath" - input_spec = _ImageMathInputSpec - output_spec = _ImageMathOuputSpec + input_spec = ImageMathInputSpec + output_spec = ImageMathOuputSpec -class _ResampleImageBySpacingInputSpec(ANTSCommandInputSpec): +class ResampleImageBySpacingInputSpec(ANTSCommandInputSpec): dimension = traits.Int( 3, usedefault=True, position=1, argstr="%d", desc="dimension of output image" ) @@ -142,7 +142,7 @@ class _ResampleImageBySpacingInputSpec(ANTSCommandInputSpec): ) -class _ResampleImageBySpacingOutputSpec(TraitedSpec): +class ResampleImageBySpacingOutputSpec(TraitedSpec): output_image = File(exists=True, desc="resampled file") @@ -180,8 +180,8 @@ class ResampleImageBySpacing(FixHeaderANTSCommand): """ _cmd = "ResampleImageBySpacing" - input_spec = _ResampleImageBySpacingInputSpec - output_spec = _ResampleImageBySpacingOutputSpec + input_spec = ResampleImageBySpacingInputSpec + output_spec = ResampleImageBySpacingOutputSpec def _format_arg(self, name, trait_spec, value): if name == "out_spacing": @@ -193,7 +193,7 @@ def _format_arg(self, name, trait_spec, value): return super(ResampleImageBySpacing, self)._format_arg(name, trait_spec, value) -class _ThresholdImageInputSpec(ANTSCommandInputSpec): +class ThresholdImageInputSpec(ANTSCommandInputSpec): dimension = traits.Int( 3, usedefault=True, position=1, argstr="%d", desc="dimension of output image" ) @@ -244,7 +244,7 @@ class _ThresholdImageInputSpec(ANTSCommandInputSpec): ) -class _ThresholdImageOutputSpec(TraitedSpec): +class ThresholdImageOutputSpec(TraitedSpec): output_image = File(exists=True, desc="resampled file") @@ -275,11 +275,11 @@ class ThresholdImage(FixHeaderANTSCommand): """ _cmd = "ThresholdImage" - input_spec = _ThresholdImageInputSpec - output_spec = _ThresholdImageOutputSpec + input_spec = ThresholdImageInputSpec + output_spec = ThresholdImageOutputSpec -class _AIInputSpec(ANTSCommandInputSpec): +class AIInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( 3, 2, usedefault=True, argstr="-d %d", desc="dimension of output image" ) @@ -359,7 +359,7 @@ class _AIInputSpec(ANTSCommandInputSpec): ) -class _AIOuputSpec(TraitedSpec): +class AIOuputSpec(TraitedSpec): output_transform = File(exists=True, desc="output file name") @@ -388,8 +388,8 @@ class AI(ANTSCommand): """ _cmd = "antsAI" - input_spec = _AIInputSpec - output_spec = _AIOuputSpec + input_spec = AIInputSpec + output_spec = AIOuputSpec def _run_interface(self, runtime, correct_return_codes=(0,)): runtime = super(AI, self)._run_interface(runtime, correct_return_codes) From 6979cbdcb041cb12b064513a57ff74b000bacdad Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 4 Mar 2020 16:30:11 -0500 Subject: [PATCH 0763/1665] RF: Move header copying to a mixin --- nipype/interfaces/ants/base.py | 14 --- nipype/interfaces/ants/segmentation.py | 24 ++--- nipype/interfaces/ants/utils.py | 12 ++- nipype/interfaces/mixins/__init__.py | 1 + nipype/interfaces/mixins/fixheader.py | 134 +++++++++++++++++++++++++ 5 files changed, 150 insertions(+), 35 deletions(-) create mode 100644 nipype/interfaces/mixins/fixheader.py diff --git a/nipype/interfaces/ants/base.py b/nipype/interfaces/ants/base.py index 4d27fa619f..df45dd6c28 100644 --- a/nipype/interfaces/ants/base.py +++ b/nipype/interfaces/ants/base.py @@ -122,17 +122,3 @@ def set_default_num_threads(cls, num_threads): @property def version(self): return Info.version() - - -class FixHeaderANTSCommand(ANTSCommand): - """Fix header if the copy_header input is on.""" - - def aggregate_outputs(self, runtime=None, needed_outputs=None): - """Overload the aggregation with header replacement, if required.""" - outputs = super(FixHeaderANTSCommand, self).aggregate_outputs( - runtime, needed_outputs) - if self.inputs.copy_header: # Fix headers - _copy_header( - self.inputs.op1, outputs["output_image"], keep_dtype=True - ) - return outputs diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index 06b9350dbc..d3319010d1 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -2,9 +2,9 @@ import os from glob import glob from ...external.due import BibTeX -from ...utils.imagemanip import copy_header as _copy_header from ...utils.filemanip import split_filename, copyfile, which, fname_presuffix from ..base import TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, isdefined +from ..mixins import CopyHeaderInterface from .base import ANTSCommand, ANTSCommandInputSpec @@ -420,7 +420,7 @@ class N4BiasFieldCorrectionOutputSpec(TraitedSpec): bias_image = File(exists=True, desc="Estimated bias") -class N4BiasFieldCorrection(ANTSCommand): +class N4BiasFieldCorrection(ANTSCommand, CopyHeaderInterface): """ Bias field correction. @@ -491,6 +491,10 @@ class N4BiasFieldCorrection(ANTSCommand): _cmd = "N4BiasFieldCorrection" input_spec = N4BiasFieldCorrectionInputSpec output_spec = N4BiasFieldCorrectionOutputSpec + _copy_header_map = { + "output_image": ("input_image", False), + "bias_image": ("input_image", True), + } def __init__(self, *args, **kwargs): """Instantiate the N4BiasFieldCorrection interface.""" @@ -533,20 +537,6 @@ def _parse_inputs(self, skip=None): self._out_bias_file = bias_image return super(N4BiasFieldCorrection, self)._parse_inputs(skip=skip) - def _list_outputs(self): - outputs = super(N4BiasFieldCorrection, self)._list_outputs() - - # Fix headers - if self.inputs.copy_header: - _copy_header(self.inputs.input_image, outputs["output_image"], - keep_dtype=False) - - if self._out_bias_file: - outputs["bias_image"] = os.path.abspath(self._out_bias_file) - if self.inputs.copy_header: - _copy_header(self.inputs.input_image, outputs["bias_image"]) - return outputs - class CorticalThicknessInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( @@ -1501,7 +1491,7 @@ def _format_arg(self, opt, spec, val): for option in ( self.inputs.out_intensity_fusion_name_format, self.inputs.out_label_post_prob_name_format, - self.inputs.out_atlas_voting_weight_name_format + self.inputs.out_atlas_voting_weight_name_format, ): if isdefined(option): args.append(option) diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index 58c91f629f..6c3c05e9c4 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -1,7 +1,8 @@ """ANTs' utilities.""" import os from ..base import traits, isdefined, TraitedSpec, File, Str, InputMultiObject -from .base import ANTSCommandInputSpec, ANTSCommand, FixHeaderANTSCommand +from ..mixins import CopyHeaderInterface +from .base import ANTSCommandInputSpec, ANTSCommand class ImageMathInputSpec(ANTSCommandInputSpec): @@ -67,7 +68,7 @@ class ImageMathOuputSpec(TraitedSpec): output_image = File(exists=True, desc="output image file") -class ImageMath(FixHeaderANTSCommand): +class ImageMath(ANTSCommand, CopyHeaderInterface): """ Operations over images. @@ -96,6 +97,7 @@ class ImageMath(FixHeaderANTSCommand): _cmd = "ImageMath" input_spec = ImageMathInputSpec output_spec = ImageMathOuputSpec + _copy_header_map = {"output_image": "op1"} class ResampleImageBySpacingInputSpec(ANTSCommandInputSpec): @@ -146,7 +148,7 @@ class ResampleImageBySpacingOutputSpec(TraitedSpec): output_image = File(exists=True, desc="resampled file") -class ResampleImageBySpacing(FixHeaderANTSCommand): +class ResampleImageBySpacing(ANTSCommand, CopyHeaderInterface): """ Resample an image with a given spacing. @@ -182,6 +184,7 @@ class ResampleImageBySpacing(FixHeaderANTSCommand): _cmd = "ResampleImageBySpacing" input_spec = ResampleImageBySpacingInputSpec output_spec = ResampleImageBySpacingOutputSpec + _copy_header_map = {"output_image": "input_image"} def _format_arg(self, name, trait_spec, value): if name == "out_spacing": @@ -248,7 +251,7 @@ class ThresholdImageOutputSpec(TraitedSpec): output_image = File(exists=True, desc="resampled file") -class ThresholdImage(FixHeaderANTSCommand): +class ThresholdImage(ANTSCommand, CopyHeaderInterface): """ Apply thresholds on images. @@ -277,6 +280,7 @@ class ThresholdImage(FixHeaderANTSCommand): _cmd = "ThresholdImage" input_spec = ThresholdImageInputSpec output_spec = ThresholdImageOutputSpec + _copy_header_map = {"output_image": "input_image"} class AIInputSpec(ANTSCommandInputSpec): diff --git a/nipype/interfaces/mixins/__init__.py b/nipype/interfaces/mixins/__init__.py index a64dc34ff2..e54986231f 100644 --- a/nipype/interfaces/mixins/__init__.py +++ b/nipype/interfaces/mixins/__init__.py @@ -3,3 +3,4 @@ ReportCapableInputSpec, ReportCapableOutputSpec, ) +from .fixheader import CopyHeaderInputSpec, CopyHeaderInterface diff --git a/nipype/interfaces/mixins/fixheader.py b/nipype/interfaces/mixins/fixheader.py new file mode 100644 index 0000000000..3eb15785a3 --- /dev/null +++ b/nipype/interfaces/mixins/fixheader.py @@ -0,0 +1,134 @@ +from ..base import BaseInterface, BaseInterfaceInputSpec, traits +from ...utils.imagemanip import copy_header as _copy_header + + +class CopyHeaderInputSpec(BaseInterfaceInputSpec): + copy_header = traits.Bool( + desc="Copy headers of the input image into the output image" + ) + + +class CopyHeaderInterface(BaseInterface): + """ Copy headers if the copy_header input is ``True`` + + This interface mixin adds a post-run hook that allows for copying + an input header to an output file. + The subclass should specify a ``_copy_header_map`` that maps the **output** + image to the **input** image whose header should be copied. + + This feature is intended for tools that are intended to adjust voxel data without + modifying the header, but for some reason do not reliably preserve the header. + + Here we show an example interface that takes advantage of the mixin by simply + setting the data block: + + >>> import os + >>> import numpy as np + >>> import nibabel as nb + >>> from nipype.interfaces.base import SimpleInterface, TraitedSpec, File + >>> from nipype.interfaces.mixins import CopyHeaderInputSpec, CopyHeaderInterface + + >>> class ZerofileInputSpec(CopyHeaderInputSpec): + ... in_file = File(mandatory=True, exists=True) + + >>> class ZerofileOutputSpec(TraitedSpec): + ... out_file = File() + + >>> class ZerofileInterface(SimpleInterface, CopyHeaderInterface): + ... input_spec = ZerofileInputSpec + ... output_spec = ZerofileOutputSpec + ... _copy_header_map = {'out_file': 'in_file'} + ... + ... def _run_interface(self, runtime): + ... img = nb.load(self.inputs.in_file) + ... # Just set the data. Let the CopyHeaderInterface mixin fix the affine and header. + ... nb.Nifti1Image(np.zeros(img.shape, dtype=np.uint8), None).to_filename('out.nii') + ... self._results = {'out_file': os.path.abspath('out.nii')} + ... return runtime + + Consider a file of all ones and a non-trivial affine: + + >>> in_file = 'test.nii' + >>> nb.Nifti1Image(np.ones((5,5,5), dtype=np.int16), + ... affine=np.diag((4, 3, 2, 1))).to_filename(in_file) + + The default behavior would produce a file with similar data: + + >>> res = ZerofileInterface(in_file=in_file).run() + >>> out_img = nb.load(res.outputs.out_file) + >>> out_img.shape + (5, 5, 5) + >>> np.all(out_img.get_fdata() == 0) + True + + An updated data type: + + >>> out_img.get_data_dtype() + dtype('uint8') + + But a different affine: + + >>> np.array_equal(out_img.affine, np.diag((4, 3, 2, 1))) + False + + With ``copy_header=True``, then the affine is also equal: + + >>> res = ZerofileInterface(in_file=in_file, copy_header=True).run() + >>> out_img = nb.load(res.outputs.out_file) + >>> np.array_equal(out_img.affine, np.diag((4, 3, 2, 1))) + True + + The data properties remain as expected: + + >>> out_img.shape + (5, 5, 5) + >>> out_img.get_data_dtype() + dtype('uint8') + >>> np.all(out_img.get_fdata() == 0) + True + + By default, the data type of the output file is permitted to vary from the + inputs. That is, the data type is preserved. + If the data type of the original file is preferred, the ``_copy_header_map`` + can indicate the output data type should **not** be preserved by providing a + tuple of the input and ``False``. + + >>> ZerofileInterface._copy_header_map['out_file'] = ('in_file', False) + + >>> res = ZerofileInterface(in_file=in_file, copy_header=True).run() + >>> out_img = nb.load(res.outputs.out_file) + >>> out_img.get_data_dtype() + dtype('>> np.array_equal(out_img.affine, np.diag((4, 3, 2, 1))) + True + >>> out_img.shape + (5, 5, 5) + >>> np.all(out_img.get_fdata() == 0) + True + + Providing a tuple where the second value is ``True`` is also permissible to + achieve the default behavior. + + """ + + _copy_header_map = None + + def _post_run_hook(self, runtime): + """Copy headers for outputs, if required.""" + runtime = super()._post_run_hook(runtime) + + if self._copy_header_map is None or not self.inputs.copy_header: + return runtime + + inputs = self.inputs.get_traitsfree() + outputs = self.aggregate_outputs(runtime=runtime).get_traitsfree() + for out, inp in self._copy_header_map.items(): + keep_dtype = True + if isinstance(inp, tuple): + inp, keep_dtype = inp + _copy_header(inputs[inp], outputs[out], keep_dtype=keep_dtype) + + return runtime From d50c1858564c0b3073fb23c54886a0454cb66afa Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 4 Mar 2020 16:30:39 -0500 Subject: [PATCH 0764/1665] STY: black --- nipype/utils/tests/test_imagemanip.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/nipype/utils/tests/test_imagemanip.py b/nipype/utils/tests/test_imagemanip.py index af4c708b34..a488633cab 100644 --- a/nipype/utils/tests/test_imagemanip.py +++ b/nipype/utils/tests/test_imagemanip.py @@ -6,25 +6,22 @@ from ..imagemanip import copy_header -@pytest.mark.parametrize('keep_dtype', (True, False)) +@pytest.mark.parametrize("keep_dtype", (True, False)) def test_copy_header(tmp_path, keep_dtype): """Cover copy_header.""" - fname1 = tmp_path / 'reference.nii.gz' - fname2 = tmp_path / 'target.nii.gz' + fname1 = tmp_path / "reference.nii.gz" + fname2 = tmp_path / "target.nii.gz" - nii = nb.Nifti1Image( - np.zeros((10, 10, 10), dtype='uint8'), None, None) - nii.set_qform(np.diag((1., 2., 3., 1.)), code=2) - nii.set_sform(np.diag((1., 2., 3., 1.)), code=1) + nii = nb.Nifti1Image(np.zeros((10, 10, 10), dtype="uint8"), None, None) + nii.set_qform(np.diag((1.0, 2.0, 3.0, 1.0)), code=2) + nii.set_sform(np.diag((1.0, 2.0, 3.0, 1.0)), code=1) nii.to_filename(str(fname1)) - nii.set_data_dtype('float32') + nii.set_data_dtype("float32") nii.set_qform(np.eye(4), code=1) nii.to_filename(str(fname2)) - copied = nb.load( - copy_header(fname1, fname2, keep_dtype=keep_dtype) - ) + copied = nb.load(copy_header(fname1, fname2, keep_dtype=keep_dtype)) ref = nb.load(str(fname1)) assert np.all(copied.get_qform(coded=False) == ref.get_qform(coded=False)) assert np.all(copied.get_sform(coded=False) == ref.get_sform(coded=False)) From 545a5fe1949d5f9d48af9ba001cbfbd639c313a0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 4 Mar 2020 16:35:08 -0500 Subject: [PATCH 0765/1665] MNT: make specs --- .../ants/tests/test_auto_AntsJointFusion.py | 59 ------------------- .../tests/test_auto_FixHeaderANTSCommand.py | 15 ----- .../tests/test_auto_CopyHeaderInterface.py | 11 ++++ 3 files changed, 11 insertions(+), 74 deletions(-) delete mode 100644 nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py delete mode 100644 nipype/interfaces/ants/tests/test_auto_FixHeaderANTSCommand.py create mode 100644 nipype/interfaces/mixins/tests/test_auto_CopyHeaderInterface.py diff --git a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py deleted file mode 100644 index d86f7f84cb..0000000000 --- a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py +++ /dev/null @@ -1,59 +0,0 @@ -# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from ..segmentation import AntsJointFusion - - -def test_AntsJointFusion_inputs(): - input_map = dict( - alpha=dict(argstr="-a %s", usedefault=True,), - args=dict(argstr="%s",), - atlas_image=dict(argstr="-g %s...", mandatory=True,), - atlas_segmentation_image=dict(argstr="-l %s...", mandatory=True,), - beta=dict(argstr="-b %s", usedefault=True,), - constrain_nonnegative=dict(argstr="-c", usedefault=True,), - dimension=dict(argstr="-d %d",), - environ=dict(nohash=True, usedefault=True,), - exclusion_image=dict(), - exclusion_image_label=dict(argstr="-e %s", requires=["exclusion_image"],), - mask_image=dict(argstr="-x %s", extensions=None,), - num_threads=dict(nohash=True, usedefault=True,), - out_atlas_voting_weight_name_format=dict( - requires=[ - "out_label_fusion", - "out_intensity_fusion_name_format", - "out_label_post_prob_name_format", - ], - ), - out_intensity_fusion_name_format=dict(argstr="",), - out_label_fusion=dict(argstr="%s", extensions=None, hash_files=False,), - out_label_post_prob_name_format=dict( - requires=["out_label_fusion", "out_intensity_fusion_name_format"], - ), - patch_metric=dict(argstr="-m %s",), - patch_radius=dict(argstr="-p %s", maxlen=3, minlen=3,), - retain_atlas_voting_images=dict(argstr="-f", usedefault=True,), - retain_label_posterior_images=dict( - argstr="-r", requires=["atlas_segmentation_image"], usedefault=True, - ), - search_radius=dict(argstr="-s %s", usedefault=True,), - target_image=dict(argstr="-t %s", mandatory=True,), - verbose=dict(argstr="-v",), - ) - inputs = AntsJointFusion.input_spec() - - for key, metadata in list(input_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(inputs.traits()[key], metakey) == value - - -def test_AntsJointFusion_outputs(): - output_map = dict( - out_atlas_voting_weight_name_format=dict(), - out_intensity_fusion_name_format=dict(), - out_label_fusion=dict(extensions=None,), - out_label_post_prob_name_format=dict(), - ) - outputs = AntsJointFusion.output_spec() - - for key, metadata in list(output_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_FixHeaderANTSCommand.py b/nipype/interfaces/ants/tests/test_auto_FixHeaderANTSCommand.py deleted file mode 100644 index 2f0d66dca9..0000000000 --- a/nipype/interfaces/ants/tests/test_auto_FixHeaderANTSCommand.py +++ /dev/null @@ -1,15 +0,0 @@ -# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from ..base import FixHeaderANTSCommand - - -def test_FixHeaderANTSCommand_inputs(): - input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - num_threads=dict(nohash=True, usedefault=True,), - ) - inputs = FixHeaderANTSCommand.input_spec() - - for key, metadata in list(input_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mixins/tests/test_auto_CopyHeaderInterface.py b/nipype/interfaces/mixins/tests/test_auto_CopyHeaderInterface.py new file mode 100644 index 0000000000..58f9bc0864 --- /dev/null +++ b/nipype/interfaces/mixins/tests/test_auto_CopyHeaderInterface.py @@ -0,0 +1,11 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..fixheader import CopyHeaderInterface + + +def test_CopyHeaderInterface_inputs(): + input_map = dict() + inputs = CopyHeaderInterface.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value From 135d900b52f8db60e8ec3b70a9d95828bfbef1b8 Mon Sep 17 00:00:00 2001 From: oesteban Date: Thu, 5 Mar 2020 00:03:16 -0800 Subject: [PATCH 0766/1665] sty: fix unused import --- nipype/interfaces/ants/base.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/ants/base.py b/nipype/interfaces/ants/base.py index df45dd6c28..4b5e5ef8db 100644 --- a/nipype/interfaces/ants/base.py +++ b/nipype/interfaces/ants/base.py @@ -7,7 +7,6 @@ # Local imports from ... import logging, LooseVersion from ..base import CommandLine, CommandLineInputSpec, traits, isdefined, PackageInfo -from ...utils.imagemanip import copy_header as _copy_header iflogger = logging.getLogger("nipype.interface") From 78c792d1aae2d1d1b406f5d0e5dabba990e3e167 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Sat, 7 Mar 2020 12:23:43 +0100 Subject: [PATCH 0767/1665] Re-write _check_inputs and _check_outputs to not require full inputs/outputs computation --- nipype/pipeline/engine/workflows.py | 44 ++++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 7 deletions(-) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index d1fde0ba32..13cd611d59 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -770,16 +770,46 @@ def _check_nodes(self, nodes): def _has_attr(self, parameter, subtype="in"): """Checks if a parameter is available as an input or output """ + hierarchy = parameter.split(".") + attrname = hierarchy.pop() + nodename = hierarchy.pop() + + targetworkflow = self + for workflowname in hierarchy: + workflow = None + for node in targetworkflow._graph.nodes(): + if node.name == workflowname: + if isinstance(node, Workflow): + workflow = node + break + if workflow is None: + return False + targetworkflow = workflow + + targetnode = None + for node in targetworkflow._graph.nodes(): + if node.name == nodename: + if isinstance(node, Workflow): + return False + else: + targetnode = node + break + if targetnode is None: + return False + if subtype == "in": - subobject = self.inputs + if not hasattr(node.inputs, attrname): + return False else: - subobject = self.outputs - attrlist = parameter.split(".") - cur_out = subobject - for attr in attrlist: - if not hasattr(cur_out, attr): + if not hasattr(node.outputs, attrname): return False - cur_out = getattr(cur_out, attr) + + if subtype == "in": + for _, _, d in targetworkflow._graph.in_edges(nbunch=targetnode, data=True): + for cd in d["connect"]: + if attrname == cd[1]: + return False + return True def _get_parameter_node(self, parameter, subtype="in"): From 7b1a2f44ac8c206106ce1d03d476d4a0b971323c Mon Sep 17 00:00:00 2001 From: Daniel Geisler Date: Sat, 7 Mar 2020 23:33:48 +0100 Subject: [PATCH 0768/1665] First attempt of a SGE test --- nipype/pipeline/plugins/tests/test_sgelike.py | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 nipype/pipeline/plugins/tests/test_sgelike.py diff --git a/nipype/pipeline/plugins/tests/test_sgelike.py b/nipype/pipeline/plugins/tests/test_sgelike.py new file mode 100644 index 0000000000..4b1a9e10bd --- /dev/null +++ b/nipype/pipeline/plugins/tests/test_sgelike.py @@ -0,0 +1,42 @@ +from nipype.pipeline.plugins.base import SGELikeBatchManagerBase +from nipype.interfaces.utility import Function +import nipype.pipeline.engine as pe +from os.path import join +import os +from glob import glob +import pytest +from mock import patch +from tempfile import TemporaryDirectory +import subprocess + +def crasher(): + raise ValueError() + + +def submit_batchtask(self, scriptfile, node): + self._pending[1] = node.output_dir() + subprocess.call(["bash", scriptfile]) + return 1 + +def is_pending(taskid): + return False + + +@patch.object(SGELikeBatchManagerBase, '_submit_batchtask', new=submit_batchtask) +@patch.object(SGELikeBatchManagerBase, '_is_pending', new=is_pending) +def test_crashfile_creation(): + cur_dir = os.getcwd() + with TemporaryDirectory(prefix="test_engine_", dir=cur_dir) as tmpdirname: + pipe = pe.Workflow(name="pipe", base_dir=tmpdirname) + pipe.config["execution"]["crashdump_dir"] = tmpdirname + pipe.add_nodes([pe.Node(interface=Function(function=crasher), + name="crasher")]) + sgelike_plugin = SGELikeBatchManagerBase("") + with pytest.raises(RuntimeError) as e: + assert pipe.run(plugin=sgelike_plugin) + assert (str(e.value) == + "Workflow did not execute cleanly. Check log for details") + + crashfiles = glob(join(tmpdirname,"crash*crasher*.pklz")) + assert len(crashfiles) == 1 + os.chdir(cur_dir) From 4e1d601e0c52187340bcc07bc8651376d3c31533 Mon Sep 17 00:00:00 2001 From: oesteban Date: Sat, 7 Mar 2020 16:36:50 -0800 Subject: [PATCH 0769/1665] fix: consider that some outputs might not be defined --- nipype/interfaces/mixins/fixheader.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/mixins/fixheader.py b/nipype/interfaces/mixins/fixheader.py index 3eb15785a3..ded1830582 100644 --- a/nipype/interfaces/mixins/fixheader.py +++ b/nipype/interfaces/mixins/fixheader.py @@ -125,7 +125,9 @@ def _post_run_hook(self, runtime): inputs = self.inputs.get_traitsfree() outputs = self.aggregate_outputs(runtime=runtime).get_traitsfree() - for out, inp in self._copy_header_map.items(): + defined_outputs = set(outputs.keys()).intersection(self._copy_header_map.keys()) + for out in defined_outputs: + inp = self._copy_header_map[out] keep_dtype = True if isinstance(inp, tuple): inp, keep_dtype = inp From 9c8f322f381ae51f19b5f34c75c52ba944362d49 Mon Sep 17 00:00:00 2001 From: Daniel Geisler Date: Sun, 8 Mar 2020 11:14:30 +0100 Subject: [PATCH 0770/1665] FIX: add missing argument to is_pending mock function --- nipype/pipeline/plugins/tests/test_sgelike.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nipype/pipeline/plugins/tests/test_sgelike.py b/nipype/pipeline/plugins/tests/test_sgelike.py index 4b1a9e10bd..40649c9af9 100644 --- a/nipype/pipeline/plugins/tests/test_sgelike.py +++ b/nipype/pipeline/plugins/tests/test_sgelike.py @@ -9,6 +9,7 @@ from tempfile import TemporaryDirectory import subprocess + def crasher(): raise ValueError() @@ -18,7 +19,8 @@ def submit_batchtask(self, scriptfile, node): subprocess.call(["bash", scriptfile]) return 1 -def is_pending(taskid): + +def is_pending(self, taskid): return False @@ -40,3 +42,4 @@ def test_crashfile_creation(): crashfiles = glob(join(tmpdirname,"crash*crasher*.pklz")) assert len(crashfiles) == 1 os.chdir(cur_dir) + From 57ab1080f69d24acbed9cbdf55e98e8f5d9b5a5a Mon Sep 17 00:00:00 2001 From: Daniel Ge <3453485+daniel-ge@users.noreply.github.com> Date: Sun, 8 Mar 2020 14:25:29 +0100 Subject: [PATCH 0771/1665] Update nipype/pipeline/plugins/tests/test_sgelike.py Co-Authored-By: Chris Markiewicz --- nipype/pipeline/plugins/tests/test_sgelike.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nipype/pipeline/plugins/tests/test_sgelike.py b/nipype/pipeline/plugins/tests/test_sgelike.py index 40649c9af9..b973b1e841 100644 --- a/nipype/pipeline/plugins/tests/test_sgelike.py +++ b/nipype/pipeline/plugins/tests/test_sgelike.py @@ -5,7 +5,7 @@ import os from glob import glob import pytest -from mock import patch +from unittest.mock import patch from tempfile import TemporaryDirectory import subprocess @@ -42,4 +42,3 @@ def test_crashfile_creation(): crashfiles = glob(join(tmpdirname,"crash*crasher*.pklz")) assert len(crashfiles) == 1 os.chdir(cur_dir) - From 269ac56c21b170dce1af6ebead51dbf49302b48a Mon Sep 17 00:00:00 2001 From: Daniel Ge <3453485+daniel-ge@users.noreply.github.com> Date: Sun, 8 Mar 2020 14:25:39 +0100 Subject: [PATCH 0772/1665] Update nipype/pipeline/plugins/tests/test_sgelike.py Co-Authored-By: Chris Markiewicz --- nipype/pipeline/plugins/tests/test_sgelike.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/plugins/tests/test_sgelike.py b/nipype/pipeline/plugins/tests/test_sgelike.py index b973b1e841..27cd733b39 100644 --- a/nipype/pipeline/plugins/tests/test_sgelike.py +++ b/nipype/pipeline/plugins/tests/test_sgelike.py @@ -26,7 +26,7 @@ def is_pending(self, taskid): @patch.object(SGELikeBatchManagerBase, '_submit_batchtask', new=submit_batchtask) @patch.object(SGELikeBatchManagerBase, '_is_pending', new=is_pending) -def test_crashfile_creation(): +def test_crashfile_creation(tmp_path): cur_dir = os.getcwd() with TemporaryDirectory(prefix="test_engine_", dir=cur_dir) as tmpdirname: pipe = pe.Workflow(name="pipe", base_dir=tmpdirname) From 98aeca13a312e0326355a4a0f9ab5a9a30f02601 Mon Sep 17 00:00:00 2001 From: Daniel Ge <3453485+daniel-ge@users.noreply.github.com> Date: Sun, 8 Mar 2020 14:25:50 +0100 Subject: [PATCH 0773/1665] Update nipype/pipeline/plugins/tests/test_sgelike.py Co-Authored-By: Chris Markiewicz --- nipype/pipeline/plugins/tests/test_sgelike.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nipype/pipeline/plugins/tests/test_sgelike.py b/nipype/pipeline/plugins/tests/test_sgelike.py index 27cd733b39..10c332bea1 100644 --- a/nipype/pipeline/plugins/tests/test_sgelike.py +++ b/nipype/pipeline/plugins/tests/test_sgelike.py @@ -27,8 +27,6 @@ def is_pending(self, taskid): @patch.object(SGELikeBatchManagerBase, '_submit_batchtask', new=submit_batchtask) @patch.object(SGELikeBatchManagerBase, '_is_pending', new=is_pending) def test_crashfile_creation(tmp_path): - cur_dir = os.getcwd() - with TemporaryDirectory(prefix="test_engine_", dir=cur_dir) as tmpdirname: pipe = pe.Workflow(name="pipe", base_dir=tmpdirname) pipe.config["execution"]["crashdump_dir"] = tmpdirname pipe.add_nodes([pe.Node(interface=Function(function=crasher), From 33befb1a960f3da4388005efdf5245b46ee27d8d Mon Sep 17 00:00:00 2001 From: Daniel Ge <3453485+daniel-ge@users.noreply.github.com> Date: Sun, 8 Mar 2020 14:25:59 +0100 Subject: [PATCH 0774/1665] Update nipype/pipeline/plugins/tests/test_sgelike.py Co-Authored-By: Chris Markiewicz --- nipype/pipeline/plugins/tests/test_sgelike.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/pipeline/plugins/tests/test_sgelike.py b/nipype/pipeline/plugins/tests/test_sgelike.py index 10c332bea1..a29d0528c7 100644 --- a/nipype/pipeline/plugins/tests/test_sgelike.py +++ b/nipype/pipeline/plugins/tests/test_sgelike.py @@ -39,4 +39,3 @@ def test_crashfile_creation(tmp_path): crashfiles = glob(join(tmpdirname,"crash*crasher*.pklz")) assert len(crashfiles) == 1 - os.chdir(cur_dir) From ce22e364266d814fdeba5110860248f6d96fa534 Mon Sep 17 00:00:00 2001 From: Daniel Ge <3453485+daniel-ge@users.noreply.github.com> Date: Sun, 8 Mar 2020 14:26:14 +0100 Subject: [PATCH 0775/1665] Update nipype/pipeline/plugins/tests/test_sgelike.py Co-Authored-By: Chris Markiewicz --- nipype/pipeline/plugins/tests/test_sgelike.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/plugins/tests/test_sgelike.py b/nipype/pipeline/plugins/tests/test_sgelike.py index a29d0528c7..143df8ddcc 100644 --- a/nipype/pipeline/plugins/tests/test_sgelike.py +++ b/nipype/pipeline/plugins/tests/test_sgelike.py @@ -27,7 +27,7 @@ def is_pending(self, taskid): @patch.object(SGELikeBatchManagerBase, '_submit_batchtask', new=submit_batchtask) @patch.object(SGELikeBatchManagerBase, '_is_pending', new=is_pending) def test_crashfile_creation(tmp_path): - pipe = pe.Workflow(name="pipe", base_dir=tmpdirname) + pipe = pe.Workflow(name="pipe", base_dir=str(tmp_path)) pipe.config["execution"]["crashdump_dir"] = tmpdirname pipe.add_nodes([pe.Node(interface=Function(function=crasher), name="crasher")]) From ef739f047615758e2552f793669846267cc2aa5b Mon Sep 17 00:00:00 2001 From: Daniel Ge <3453485+daniel-ge@users.noreply.github.com> Date: Sun, 8 Mar 2020 14:26:23 +0100 Subject: [PATCH 0776/1665] Update nipype/pipeline/plugins/tests/test_sgelike.py Co-Authored-By: Chris Markiewicz --- nipype/pipeline/plugins/tests/test_sgelike.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/plugins/tests/test_sgelike.py b/nipype/pipeline/plugins/tests/test_sgelike.py index 143df8ddcc..025bba1133 100644 --- a/nipype/pipeline/plugins/tests/test_sgelike.py +++ b/nipype/pipeline/plugins/tests/test_sgelike.py @@ -28,7 +28,7 @@ def is_pending(self, taskid): @patch.object(SGELikeBatchManagerBase, '_is_pending', new=is_pending) def test_crashfile_creation(tmp_path): pipe = pe.Workflow(name="pipe", base_dir=str(tmp_path)) - pipe.config["execution"]["crashdump_dir"] = tmpdirname + pipe.config["execution"]["crashdump_dir"] = str(tmp_path) pipe.add_nodes([pe.Node(interface=Function(function=crasher), name="crasher")]) sgelike_plugin = SGELikeBatchManagerBase("") From 27a2472b16de5861b95238d414978033dcee77f3 Mon Sep 17 00:00:00 2001 From: Daniel Ge <3453485+daniel-ge@users.noreply.github.com> Date: Sun, 8 Mar 2020 14:26:33 +0100 Subject: [PATCH 0777/1665] Update nipype/pipeline/plugins/tests/test_sgelike.py Co-Authored-By: Chris Markiewicz --- nipype/pipeline/plugins/tests/test_sgelike.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/plugins/tests/test_sgelike.py b/nipype/pipeline/plugins/tests/test_sgelike.py index 025bba1133..80debae87c 100644 --- a/nipype/pipeline/plugins/tests/test_sgelike.py +++ b/nipype/pipeline/plugins/tests/test_sgelike.py @@ -37,5 +37,5 @@ def test_crashfile_creation(tmp_path): assert (str(e.value) == "Workflow did not execute cleanly. Check log for details") - crashfiles = glob(join(tmpdirname,"crash*crasher*.pklz")) + crashfiles = tmp_path.glob("crash*crasher*.pklz") assert len(crashfiles) == 1 From 02991da67458b879d7c6360aa6457eb3c1bd5a07 Mon Sep 17 00:00:00 2001 From: Daniel <3453485+daniel-ge@users.noreply.github.com> Date: Sun, 8 Mar 2020 15:05:28 +0100 Subject: [PATCH 0778/1665] FIX: get length of generator + STY: Black --- nipype/pipeline/plugins/tests/test_sgelike.py | 30 +++++++++---------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/nipype/pipeline/plugins/tests/test_sgelike.py b/nipype/pipeline/plugins/tests/test_sgelike.py index 80debae87c..02fdf263d1 100644 --- a/nipype/pipeline/plugins/tests/test_sgelike.py +++ b/nipype/pipeline/plugins/tests/test_sgelike.py @@ -13,8 +13,8 @@ def crasher(): raise ValueError() - -def submit_batchtask(self, scriptfile, node): + +def submit_batchtask(self, scriptfile, node): self._pending[1] = node.output_dir() subprocess.call(["bash", scriptfile]) return 1 @@ -24,18 +24,16 @@ def is_pending(self, taskid): return False -@patch.object(SGELikeBatchManagerBase, '_submit_batchtask', new=submit_batchtask) -@patch.object(SGELikeBatchManagerBase, '_is_pending', new=is_pending) +@patch.object(SGELikeBatchManagerBase, "_submit_batchtask", new=submit_batchtask) +@patch.object(SGELikeBatchManagerBase, "_is_pending", new=is_pending) def test_crashfile_creation(tmp_path): - pipe = pe.Workflow(name="pipe", base_dir=str(tmp_path)) - pipe.config["execution"]["crashdump_dir"] = str(tmp_path) - pipe.add_nodes([pe.Node(interface=Function(function=crasher), - name="crasher")]) - sgelike_plugin = SGELikeBatchManagerBase("") - with pytest.raises(RuntimeError) as e: - assert pipe.run(plugin=sgelike_plugin) - assert (str(e.value) == - "Workflow did not execute cleanly. Check log for details") - - crashfiles = tmp_path.glob("crash*crasher*.pklz") - assert len(crashfiles) == 1 + pipe = pe.Workflow(name="pipe", base_dir=str(tmp_path)) + pipe.config["execution"]["crashdump_dir"] = str(tmp_path) + pipe.add_nodes([pe.Node(interface=Function(function=crasher), name="crasher")]) + sgelike_plugin = SGELikeBatchManagerBase("") + with pytest.raises(RuntimeError) as e: + assert pipe.run(plugin=sgelike_plugin) + assert str(e.value) == "Workflow did not execute cleanly. Check log for details" + + crashfiles = tmp_path.glob("crash*crasher*.pklz") + assert len(list(crashfiles)) == 1 From 69e107f4da45b4bf3f31814e5b0d1b43e08cf29a Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Mon, 9 Mar 2020 12:24:14 +0100 Subject: [PATCH 0779/1665] Add my name to contributors --- .zenodo.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index 4471e71e7a..92c6f3ed17 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -157,6 +157,11 @@ "name": "De La Vega, Alejandro", "orcid": "0000-0001-9062-3778" }, + { + "affiliation": "Charite Universitatsmedizin Berlin, Germany", + "name": "Waller, Lea", + "orcid": "0000-0002-3239-6957" + }, { "affiliation": "MIT", "name": "Kaczmarzyk, Jakub", From 771ab93a0d822ab6c68bf52ea840910ce3a860ae Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Mon, 9 Mar 2020 13:22:56 +0100 Subject: [PATCH 0780/1665] Apply suggestions from code review --- nipype/pipeline/engine/workflows.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 13cd611d59..a12c48a5fb 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -798,10 +798,10 @@ def _has_attr(self, parameter, subtype="in"): return False if subtype == "in": - if not hasattr(node.inputs, attrname): + if not hasattr(targetnode.inputs, attrname): return False else: - if not hasattr(node.outputs, attrname): + if not hasattr(targetnode.outputs, attrname): return False if subtype == "in": From 4a6d6b88a8aea55ffa3d318e4090d77556afb3cf Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 9 Mar 2020 09:51:12 -0400 Subject: [PATCH 0781/1665] TEST: Cleanup imports --- nipype/pipeline/plugins/tests/test_sgelike.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nipype/pipeline/plugins/tests/test_sgelike.py b/nipype/pipeline/plugins/tests/test_sgelike.py index 02fdf263d1..9c7cdc1412 100644 --- a/nipype/pipeline/plugins/tests/test_sgelike.py +++ b/nipype/pipeline/plugins/tests/test_sgelike.py @@ -1,12 +1,8 @@ from nipype.pipeline.plugins.base import SGELikeBatchManagerBase from nipype.interfaces.utility import Function import nipype.pipeline.engine as pe -from os.path import join -import os -from glob import glob import pytest from unittest.mock import patch -from tempfile import TemporaryDirectory import subprocess From 03729829776277c9f3e10da7144590f209705d64 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Wed, 11 Mar 2020 18:41:55 +0100 Subject: [PATCH 0782/1665] Fix double connect on nested workflows --- .../pipeline/engine/tests/test_workflows.py | 16 ++++++++++ nipype/pipeline/engine/workflows.py | 32 ++++++++++++++++--- 2 files changed, 43 insertions(+), 5 deletions(-) diff --git a/nipype/pipeline/engine/tests/test_workflows.py b/nipype/pipeline/engine/tests/test_workflows.py index 75f77525f8..c6170f7ba8 100644 --- a/nipype/pipeline/engine/tests/test_workflows.py +++ b/nipype/pipeline/engine/tests/test_workflows.py @@ -83,6 +83,22 @@ def test_doubleconnect(): assert "Trying to connect" in str(excinfo.value) +def test_nested_workflow_doubleconnect(): + # double input with nested workflows + a = pe.Node(niu.IdentityInterface(fields=["a", "b"]), name="a") + b = pe.Node(niu.IdentityInterface(fields=["a", "b"]), name="b") + c = pe.Node(niu.IdentityInterface(fields=["a", "b"]), name="c") + flow1 = pe.Workflow(name="test1") + flow2 = pe.Workflow(name="test2") + flow3 = pe.Workflow(name="test3") + flow1.add_nodes([b]) + flow2.connect(a, "a", flow1, "b.a") + with pytest.raises(Exception) as excinfo: + flow3.connect(c, "a", flow2, "test1.b.a") + assert "Some connections were not found" in str(excinfo.value) + flow3.connect(c, "b", flow2, "test1.b.b") + + def test_duplicate_node_check(): wf = pe.Workflow(name="testidentity") diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index a12c48a5fb..bc532ddf90 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -771,11 +771,25 @@ def _has_attr(self, parameter, subtype="in"): """Checks if a parameter is available as an input or output """ hierarchy = parameter.split(".") + + # Connecting to a workflow needs at least two values, + # the name of the child node and the name of the input/output + if len(hierarchy) < 2: + return False + attrname = hierarchy.pop() nodename = hierarchy.pop() + def _check_is_already_connected(workflow, node, attrname): + for _, _, d in workflow._graph.in_edges(nbunch=node, data=True): + for cd in d["connect"]: + if attrname == cd[1]: + return False + return True + targetworkflow = self - for workflowname in hierarchy: + while hierarchy: + workflowname = hierarchy.pop(0) workflow = None for node in targetworkflow._graph.nodes(): if node.name == workflowname: @@ -784,6 +798,13 @@ def _has_attr(self, parameter, subtype="in"): break if workflow is None: return False + # Verify input does not already have an incoming connection + # in the hierarchy of workflows + if subtype == "in": + hierattrname = ".".join(hierarchy + [nodename, attrname]) + if not _check_is_already_connected( + targetworkflow, workflow, hierattrname): + return False targetworkflow = workflow targetnode = None @@ -804,11 +825,12 @@ def _has_attr(self, parameter, subtype="in"): if not hasattr(targetnode.outputs, attrname): return False + # Verify input does not already have an incoming connection + # in the target workflow if subtype == "in": - for _, _, d in targetworkflow._graph.in_edges(nbunch=targetnode, data=True): - for cd in d["connect"]: - if attrname == cd[1]: - return False + if not _check_is_already_connected( + targetworkflow, targetnode, attrname): + return False return True From 7963e31e998e219cfcdf06645a453fcdea56dd52 Mon Sep 17 00:00:00 2001 From: oesteban Date: Fri, 13 Mar 2020 21:52:00 -0700 Subject: [PATCH 0783/1665] FIX: Partial rollback of N4BiasFieldCorrection In the last maintenance (#3180) of the interface, we eliminated an important section of the ``_list_outputs``: https://github.com/nipy/nipype/commit/6979cbdcb041cb12b064513a57ff74b000bacdad#diff-b6f33a19b0e06b91023416db5faf7323L544-L547 This PR addresses the problem: ``` Execution Outputs ----------------- * bias_image : * output_image : /home/oesteban/tmp/fmriprep-ds005/fprep-work/fmriprep_wf/single_subject_01_wf/anat_preproc_wf/brain_extraction_wf/inu_n4_final/mapflow/_inu_n4_final0/sub-01_T1w_cor rected.nii.gz Runtime info ------------ * cmdline : N4BiasFieldCorrection --bspline-fitting [ 200 ] -d 3 --input-image /oak/stanford/groups/russpold/data/openfmri/ds000005/sub-01/anat/sub-01_T1w.nii.gz --convergence [ 50x 50x50x50x50, 1e-07 ] --output [ sub-01_T1w_corrected.nii.gz, sub-01_T1w_bias.nii.gz ] -r --shrink-factor 4 --weight-image /home/oesteban/tmp/fmriprep-ds005/fprep-work/fmriprep_wf/single_subject_01_wf/anat_preproc_wf/brain_extraction_wf/atropos_wf/copy_xform/09_relabel_wm_maths_xform.nii.gz * duration : 15.334786 * hostname : dendrite * prev_wd : /home/oesteban/tmp/fmriprep-ds005 * working_dir : /home/oesteban/tmp/fmriprep-ds005/fprep-work/fmriprep_wf/single_subject_01_wf/anat_preproc_wf/brain_extraction_wf/inu_n4_final/mapflow/_inu_n4_final0 ``` (`bias_image` should be `'sub-01_T1w_bias.nii.gz'` given the `cmdline`) --- nipype/interfaces/ants/segmentation.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index d3319010d1..387fd229a1 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -537,6 +537,11 @@ def _parse_inputs(self, skip=None): self._out_bias_file = bias_image return super(N4BiasFieldCorrection, self)._parse_inputs(skip=skip) + def _list_outputs(self): + outputs = super(N4BiasFieldCorrection, self)._list_outputs() + if self._out_bias_file: + outputs["bias_image"] = os.path.abspath(self._out_bias_file) + return outputs class CorticalThicknessInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( From cbb1ab9dfe797efec86e54b917b9272617b89622 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 14 Mar 2020 12:39:14 -0400 Subject: [PATCH 0784/1665] NEP29+1y: Set minimum python to 3.6, numpy to 1.13 --- nipype/info.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nipype/info.py b/nipype/info.py index 7a2e4ae70e..d73d04290a 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -54,12 +54,11 @@ def get_nipype_gitversion(): "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX :: Linux", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Topic :: Scientific/Engineering", ] -PYTHON_REQUIRES = ">= 3.5" +PYTHON_REQUIRES = ">= 3.6" description = "Neuroimaging in Python: Pipelines and Interfaces" @@ -100,7 +99,7 @@ def get_nipype_gitversion(): # versions NIBABEL_MIN_VERSION = "2.1.0" NETWORKX_MIN_VERSION = "1.9" -NUMPY_MIN_VERSION = "1.12" +NUMPY_MIN_VERSION = "1.13" # Numpy bug in python 3.7: # https://www.opensourceanswers.com/blog/you-shouldnt-use-python-37-for-data-science-right-now.html NUMPY_MIN_VERSION_37 = "1.15.3" From e925c6891df9214ad6f1ea17a8ea8f29b468e484 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 14 Mar 2020 12:40:22 -0400 Subject: [PATCH 0785/1665] DOC: Update changelog --- doc/changelog/1.X.X-changelog.rst | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/doc/changelog/1.X.X-changelog.rst b/doc/changelog/1.X.X-changelog.rst index 3da15d7a68..395ea50884 100644 --- a/doc/changelog/1.X.X-changelog.rst +++ b/doc/changelog/1.X.X-changelog.rst @@ -1,3 +1,28 @@ +1.5.0 (To be determined) +========================= + +New feature release in the 1.5.x series. + +In this release, the example scripts have been split out into their own package: +`niflow-nipype1-examples `__. + +(`Full changelog `__) + + * FIX: ANTs' tools maintenance overhaul (https://github.com/nipy/nipype/pull/3180) + * FIX: load_resultfile crashes if open resultsfile from crashed job (https://github.com/nipy/nipype/pull/3182) + * FIX: FSL model.py make multiple F-tests (https://github.com/nipy/nipype/pull/3166) + * ENH: Improve workflow connect performance (https://github.com/nipy/nipype/pull/3184) + * ENH: Add ``ConstrainedSphericalDeconvolution`` interface to replace ``EstimateFOD`` for MRtrix3's ``dwi2fod`` (https://github.com/nipy/nipype/pull/3176) + * ENH: Detect values for EulerNumber interface (https://github.com/nipy/nipype/pull/3173) + * ENH: Remove examples from repository (https://github.com/nipy/nipype/pull/3172) + * REF: Prefer math.gcd to hand-rolled Euclid's algorithm (https://github.com/nipy/nipype/pull/3177) + * REF: Removed all uses of numpy_mmap (https://github.com/nipy/nipype/pull/3121) + * DOC: Update links, typos in contributing guide (https://github.com/nipy/nipype/pull/3160) + * DOC: Update SelectFiles docstring to match actual behavior (https://github.com/nipy/nipype/pull/3041) + * DOC: Updated .zenodo.json file (https://github.com/nipy/nipype/pull/3167) + * DOC: Update .zenodo.json (https://github.com/nipy/nipype/pull/3165) + * MNT: Update Zenodo ordering based on commit count (https://github.com/nipy/nipype/pull/3169) + 1.4.2 (February 14, 2020) ========================= (`Full changelog `__) From 21c5b19d25a33a9f18f41b3c2f2dffd91970beff Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 14 Mar 2020 12:41:02 -0400 Subject: [PATCH 0786/1665] MNT: Update Zenodo ordering, adding Adam Kimbler to Zenodo --- .zenodo.json | 74 +++++++++++++++++++++++++++++----------------------- 1 file changed, 42 insertions(+), 32 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 92c6f3ed17..8a57735308 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -157,11 +157,6 @@ "name": "De La Vega, Alejandro", "orcid": "0000-0001-9062-3778" }, - { - "affiliation": "Charite Universitatsmedizin Berlin, Germany", - "name": "Waller, Lea", - "orcid": "0000-0002-3239-6957" - }, { "affiliation": "MIT", "name": "Kaczmarzyk, Jakub", @@ -183,6 +178,11 @@ { "name": "Erickson, Drew" }, + { + "affiliation": "Neuroscience Program, University of Iowa", + "name": "Kent, James D.", + "orcid": "0000-0002-4892-2659" + }, { "affiliation": "Otto-von-Guericke-University Magdeburg, Germany", "name": "Hanke, Michael", @@ -195,11 +195,6 @@ { "name": "Moloney, Brendan" }, - { - "affiliation": "Neuroscience Program, University of Iowa", - "name": "Kent, James D.", - "orcid": "0000-0002-4892-2659" - }, { "affiliation": "SRI International", "name": "Nichols, B. Nolan", @@ -246,6 +241,11 @@ { "name": "Mordom, David" }, + { + "affiliation": "University College London", + "name": "Mancini, Matteo", + "orcid": "0000-0001-7194-4568" + }, { "affiliation": "ARAMIS LAB, Brain and Spine Institute (ICM), Paris, France.", "name": "Guillon, Je\u0301re\u0301my", @@ -291,16 +291,16 @@ { "name": "Salvatore, John" }, - { - "affiliation": "University College London", - "name": "Mancini, Matteo", - "orcid": "0000-0001-7194-4568" - }, { "affiliation": "CNRS LTCI, Telecom ParisTech, Universit\u00e9 Paris-Saclay", "name": "Gramfort, Alexandre", "orcid": "0000-0001-9791-4404" }, + { + "affiliation": "Department of Psychology, University of Bielefeld, Bielefeld, Germany.", + "name": "Doll, Anna", + "orcid": "0000-0002-0799-0831" + }, { "name": "Buchanan, Colin" }, @@ -393,9 +393,14 @@ "orcid": "0000-0003-2766-8425" }, { - "affiliation": "Department of Psychology, University of Bielefeld, Bielefeld, Germany.", - "name": "Doll, Anna", - "orcid": "0000-0002-0799-0831" + "affiliation": "NIMH, Scientific and Statistical Computing Core", + "name": "Glen, Daniel", + "orcid": "0000-0001-8456-5647" + }, + { + "affiliation": "Technische Universit\u00e4t Dresden, Faculty of Medicine, Department of Child and Adolescent Psychiatry", + "name": "Geisler, Daniel", + "orcid": "0000-0003-2076-5329" }, { "affiliation": "University of Iowa", @@ -406,6 +411,11 @@ "name": "Triplett, William", "orcid": "0000-0002-9546-1306" }, + { + "affiliation": "The University of Iowa", + "name": "Ghayoor, Ali", + "orcid": "0000-0002-8858-1254" + }, { "affiliation": "Child Mind Institute", "name": "Craddock, R. Cameron", @@ -450,12 +460,12 @@ "name": "Rothmei, Simon" }, { - "name": "Weinstein, Alejandro" + "affiliation": "Korea Advanced Institute of Science and Technology", + "name": "Kim, Sin", + "orcid": "0000-0003-4652-3758" }, { - "affiliation": "The University of Iowa", - "name": "Ghayoor, Ali", - "orcid": "0000-0002-8858-1254" + "name": "Weinstein, Alejandro" }, { "affiliation": "University of Pennsylvania", @@ -508,6 +518,11 @@ "name": "Linkersd\u00f6rfer, Janosch", "orcid": "0000-0002-1577-1233" }, + { + "affiliation": "Charite Universitatsmedizin Berlin, Germany", + "name": "Waller, Lea", + "orcid": "0000-0002-3239-6957" + }, { "name": "Renfro, Mandy" }, @@ -521,11 +536,6 @@ { "name": "K\u00fcttner, Ren\u00e9" }, - { - "affiliation": "Korea Advanced Institute of Science and Technology", - "name": "Kim, Sin", - "orcid": "0000-0003-4652-3758" - }, { "affiliation": "California Institute of Technology", "name": "Pauli, Wolfgang M.", @@ -536,6 +546,11 @@ "name": "Glen, Daniel", "orcid": "0000-0001-8456-5647" }, + { + "affiliation": "Florida International University", + "name": "Kimbler, Adam", + "orcid": "0000-0001-5885-9596" + }, { "affiliation": "University of Pittsburgh", "name": "Meyers, Benjamin", @@ -544,11 +559,6 @@ { "name": "Tarbert, Claire" }, - { - "affiliation": "Technische Universit\u00e4t Dresden, Faculty of Medicine, Department of Child and Adolescent Psychiatry", - "name": "Geisler, Daniel", - "orcid": "0000-0003-2076-5329" - }, { "name": "Ginsburg, Daniel" }, From c9d59f4fb24f242c6608f88c517956b4e10f38c6 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 14 Mar 2020 12:44:02 -0400 Subject: [PATCH 0787/1665] MNT: Fix SyntaxError, and disable telemetry for checkspecs.py --- tools/checkspecs.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/checkspecs.py b/tools/checkspecs.py index e06f862338..032fd122cc 100644 --- a/tools/checkspecs.py +++ b/tools/checkspecs.py @@ -8,8 +8,6 @@ import sys import warnings -from nipype.interfaces.base import BaseInterface - import black @@ -182,6 +180,8 @@ def test_specs(self, uri): Returns ------- """ + from nipype.interfaces.base import BaseInterface + # get the names of all classes and functions _, classes = self._parse_module(uri) if not classes: @@ -480,6 +480,7 @@ def check_modules(self): if __name__ == "__main__": + os.environ["NIPYPE_NO_ET"] = "1" package = "nipype" ic = InterfaceChecker(package) # Packages that should not be included in generated API docs. From b8722157d0d4f7a8015cb02a9eb5538436efa736 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 14 Mar 2020 12:44:11 -0400 Subject: [PATCH 0788/1665] FIX: add usedefault metadata to trait with default --- nipype/interfaces/io.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 5dd977dfd9..1156695b5a 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -3005,7 +3005,9 @@ class ExportFileInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc="Input file name") out_file = File(mandatory=True, desc="Output file name") check_extension = traits.Bool( - True, desc="Ensure that the input and output file extensions match" + True, + usedefault=True, + desc="Ensure that the input and output file extensions match", ) clobber = traits.Bool(desc="Permit overwriting existing files") From 33f3a8028e5cf5b23dbc42a44c43952b563fd146 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 14 Mar 2020 12:44:15 -0400 Subject: [PATCH 0789/1665] TEST: make specs --- nipype/interfaces/tests/test_auto_ExportFile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_auto_ExportFile.py b/nipype/interfaces/tests/test_auto_ExportFile.py index 8dd84b29b9..331b51d8c2 100644 --- a/nipype/interfaces/tests/test_auto_ExportFile.py +++ b/nipype/interfaces/tests/test_auto_ExportFile.py @@ -4,7 +4,7 @@ def test_ExportFile_inputs(): input_map = dict( - check_extension=dict(), + check_extension=dict(usedefault=True,), clobber=dict(), in_file=dict(extensions=None, mandatory=True,), out_file=dict(extensions=None, mandatory=True,), From faef7d0f93013a700c882f709e98fb3cd36ebb03 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 14 Mar 2020 12:44:20 -0400 Subject: [PATCH 0790/1665] STY: black --- nipype/interfaces/base/traits_extension.py | 1 + nipype/interfaces/freesurfer/utils.py | 8 +++--- nipype/interfaces/fsl/tests/test_model.py | 29 ++++++++++++++++------ nipype/interfaces/io.py | 4 +-- nipype/pipeline/engine/workflows.py | 6 ++--- 5 files changed, 33 insertions(+), 15 deletions(-) diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index cbfe24e676..e3b54eb7cb 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -26,6 +26,7 @@ import traits.api as traits from traits.api import TraitType, Unicode from traits.trait_base import _Undefined + try: # Moved in traits 6.0 from traits.trait_type import NoDefaultSpecified diff --git a/nipype/interfaces/freesurfer/utils.py b/nipype/interfaces/freesurfer/utils.py index 5a4d6ca425..4a526cdca8 100644 --- a/nipype/interfaces/freesurfer/utils.py +++ b/nipype/interfaces/freesurfer/utils.py @@ -2592,8 +2592,10 @@ class EulerNumberInputSpec(FSTraitedSpec): class EulerNumberOutputSpec(TraitedSpec): - euler = traits.Int(desc="Euler number of cortical surface. A value of 2 signals a " - "topologically correct surface model with no holes") + euler = traits.Int( + desc="Euler number of cortical surface. A value of 2 signals a " + "topologically correct surface model with no holes" + ) defects = traits.Int(desc="Number of defects") @@ -2621,7 +2623,7 @@ def _run_interface(self, runtime): def _parse_output(self, stdout, stderr): """Parse stdout / stderr and extract defects""" - m = re.search(r'(?<=total defect index = )\d+', stdout or stderr) + m = re.search(r"(?<=total defect index = )\d+", stdout or stderr) if m is None: raise RuntimeError("Could not fetch defect index") self._defects = int(m.group()) diff --git a/nipype/interfaces/fsl/tests/test_model.py b/nipype/interfaces/fsl/tests/test_model.py index 456e7b6492..8c12f04fa4 100644 --- a/nipype/interfaces/fsl/tests/test_model.py +++ b/nipype/interfaces/fsl/tests/test_model.py @@ -7,18 +7,25 @@ import nipype.interfaces.fsl.model as fsl from nipype.interfaces.fsl import no_fsl from pathlib import Path -from ....pipeline import engine as pe +from ....pipeline import engine as pe @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") def test_MultipleRegressDesign(tmpdir): - designer = pe.Node(fsl.MultipleRegressDesign(), name='designer', base_dir=str(tmpdir)) + designer = pe.Node( + fsl.MultipleRegressDesign(), name="designer", base_dir=str(tmpdir) + ) designer.inputs.regressors = dict( voice_stenght=[1, 1, 1], age=[0.2, 0.4, 0.5], BMI=[1, -1, 2] ) con1 = ["voice_and_age", "T", ["age", "voice_stenght"], [0.5, 0.5]] con2 = ["just_BMI", "T", ["BMI"], [1]] - designer.inputs.contrasts = [con1, con2, ["con3", "F", [con1, con2]], ["con4", "F", [con2]]] + designer.inputs.contrasts = [ + con1, + con2, + ["con3", "F", [con1, con2]], + ["con4", "F", [con2]], + ] res = designer.run() outputs = res.outputs.get_traitsfree() @@ -27,7 +34,9 @@ def test_MultipleRegressDesign(tmpdir): expected_content = {} - expected_content["design_mat"] = """/NumWaves 3 + expected_content[ + "design_mat" + ] = """/NumWaves 3 /NumPoints 3 /PPheights 3.000000e+00 5.000000e-01 1.000000e+00 @@ -37,7 +46,9 @@ def test_MultipleRegressDesign(tmpdir): 2.000000e+00 5.000000e-01 1.000000e+00 """ - expected_content["design_con"] = """/ContrastName1 voice_and_age + expected_content[ + "design_con" + ] = """/ContrastName1 voice_and_age /ContrastName2 just_BMI /NumWaves 3 /NumContrasts 2 @@ -49,7 +60,9 @@ def test_MultipleRegressDesign(tmpdir): 1.000000e+00 0.000000e+00 0.000000e+00 """ - expected_content["design_fts"] = """/NumWaves 2 + expected_content[ + "design_fts" + ] = """/NumWaves 2 /NumContrasts 2 /Matrix @@ -57,7 +70,9 @@ def test_MultipleRegressDesign(tmpdir): 0 1 """ - expected_content["design_grp"] = """/NumWaves 1 + expected_content[ + "design_grp" + ] = """/NumWaves 1 /NumPoints 3 /Matrix diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 1156695b5a..d9b578caaa 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -450,7 +450,7 @@ def _check_s3_base_dir(self): s3_flag = base_directory.lower().startswith(s3_str) if s3_flag: - bucket_name = base_directory[len(s3_str):].partition('/')[0] + bucket_name = base_directory[len(s3_str) :].partition("/")[0] return s3_flag, bucket_name @@ -610,7 +610,7 @@ def _upload_to_s3(self, bucket, src, dst): # Explicitly lower-case the "s3" if dst.lower().startswith(s3_str): - dst = s3_str + dst[len(s3_str):] + dst = s3_str + dst[len(s3_str) :] # If src is a directory, collect files (this assumes dst is a dir too) if os.path.isdir(src): diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index bc532ddf90..0f50b9a5aa 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -803,7 +803,8 @@ def _check_is_already_connected(workflow, node, attrname): if subtype == "in": hierattrname = ".".join(hierarchy + [nodename, attrname]) if not _check_is_already_connected( - targetworkflow, workflow, hierattrname): + targetworkflow, workflow, hierattrname + ): return False targetworkflow = workflow @@ -828,8 +829,7 @@ def _check_is_already_connected(workflow, node, attrname): # Verify input does not already have an incoming connection # in the target workflow if subtype == "in": - if not _check_is_already_connected( - targetworkflow, targetnode, attrname): + if not _check_is_already_connected(targetworkflow, targetnode, attrname): return False return True From 79f11d952c1437b05f54a8c1c8c745b3ea8eeded Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 14 Mar 2020 12:44:26 -0400 Subject: [PATCH 0791/1665] MNT: Add Zenodo blacklist --- tools/update_zenodo.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/update_zenodo.py b/tools/update_zenodo.py index cece8d53fd..e63c2ed2e7 100755 --- a/tools/update_zenodo.py +++ b/tools/update_zenodo.py @@ -14,6 +14,9 @@ def decommify(name): # These names should go last CREATORS_LAST = ["Krzysztof J. Gorgolewski", "Satrajit Ghosh"] +# Contributors that have requested not to be cited (or bothered) +BLACKLIST = {"Jonathan R. Williford"} + if __name__ == "__main__": git_root = Path(git.Repo(".", search_parent_directories=True).working_dir) zenodo_file = git_root / ".zenodo.json" @@ -44,7 +47,8 @@ def decommify(name): ) match, score = matches[0] if score <= 80: - print("No entry to sort:", committer) + if committer not in BLACKLIST: + print("No entry to sort:", committer) continue existing_creators.discard(match) committers.append(match) From 3c1c6fad6b5b12fbe9f27a23c6e271f1915976f4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 14 Mar 2020 22:20:37 -0400 Subject: [PATCH 0792/1665] CI: Drop Python 3.5 tests --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 3e4326c5a4..5ea5869f19 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,6 @@ sudo: true language: python # our build matrix python: -- 3.5 - 3.6 - 3.7 From ff72cd7f58d557d46477e22aaff53e60f62d1ff7 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 15 Mar 2020 11:28:48 -0400 Subject: [PATCH 0793/1665] DOC: Update changelog --- doc/changelog/1.X.X-changelog.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/changelog/1.X.X-changelog.rst b/doc/changelog/1.X.X-changelog.rst index 395ea50884..239aa7d936 100644 --- a/doc/changelog/1.X.X-changelog.rst +++ b/doc/changelog/1.X.X-changelog.rst @@ -8,6 +8,7 @@ In this release, the example scripts have been split out into their own package: (`Full changelog `__) + * FIX: Partial rollback of N4BiasFieldCorrection (https://github.com/nipy/nipype/pull/3188) * FIX: ANTs' tools maintenance overhaul (https://github.com/nipy/nipype/pull/3180) * FIX: load_resultfile crashes if open resultsfile from crashed job (https://github.com/nipy/nipype/pull/3182) * FIX: FSL model.py make multiple F-tests (https://github.com/nipy/nipype/pull/3166) From d94a8da5ebb99b68ec6f4e79f0a4df891bf1ca02 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 15 Mar 2020 12:01:21 -0400 Subject: [PATCH 0794/1665] REL: 1.5.0-rc1 --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index d73d04290a..950ac2fddb 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -5,7 +5,7 @@ # nipype version information # Remove -dev for release -__version__ = "1.5.0-dev" +__version__ = "1.5.0-rc1" def get_nipype_gitversion(): From b17a21bf0fffd209f69efa9cb2e3275ff56d81f8 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 15 Mar 2020 12:09:56 -0400 Subject: [PATCH 0795/1665] MNT: 1.5.0-rc1.post-dev --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 950ac2fddb..0dce44deaa 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -5,7 +5,7 @@ # nipype version information # Remove -dev for release -__version__ = "1.5.0-rc1" +__version__ = "1.5.0-rc1.post-dev" def get_nipype_gitversion(): From 636452107d6d3aa83973220f2637d2600631ebf4 Mon Sep 17 00:00:00 2001 From: sulantha2006 Date: Fri, 20 Mar 2020 17:37:21 -0700 Subject: [PATCH 0796/1665] Adding reverse_forward_transforms and reverse_forward_invert_flags to ANTS Registration outputs. --- nipype/interfaces/ants/registration.py | 10 ++++++++++ nipype/interfaces/ants/tests/test_auto_Registration.py | 2 ++ 2 files changed, 12 insertions(+) diff --git a/nipype/interfaces/ants/registration.py b/nipype/interfaces/ants/registration.py index cb8e363c04..0f6e602591 100644 --- a/nipype/interfaces/ants/registration.py +++ b/nipype/interfaces/ants/registration.py @@ -591,12 +591,18 @@ class RegistrationOutputSpec(TraitedSpec): forward_transforms = traits.List( File(exists=True), desc="List of output transforms for forward registration" ) + reverse_forward_transforms = traits.List( + File(exists=True), desc="List of output transforms for forward registration reversed for antsApplyTransform" + ) reverse_transforms = traits.List( File(exists=True), desc="List of output transforms for reverse registration" ) forward_invert_flags = traits.List( traits.Bool(), desc="List of flags corresponding to the forward transforms" ) + reverse_forward_invert_flags = traits.List( + traits.Bool(), desc="List of flags corresponding to the forward transforms reversed for antsApplyTransform" + ) reverse_invert_flags = traits.List( traits.Bool(), desc="List of flags corresponding to the reverse transforms" ) @@ -1472,6 +1478,10 @@ def _list_outputs(self): outputs["metric_value"] = self._metric_value if self._elapsed_time: outputs["elapsed_time"] = self._elapsed_time + + outputs["reverse_forward_transforms"] = outputs["forward_transforms"][::-1] + outputs["reverse_forward_invert_flags"] = outputs["forward_invert_flags"][::-1] + return outputs diff --git a/nipype/interfaces/ants/tests/test_auto_Registration.py b/nipype/interfaces/ants/tests/test_auto_Registration.py index 33921e8638..dd49cf1eaf 100644 --- a/nipype/interfaces/ants/tests/test_auto_Registration.py +++ b/nipype/interfaces/ants/tests/test_auto_Registration.py @@ -94,6 +94,8 @@ def test_Registration_outputs(): elapsed_time=dict(), forward_invert_flags=dict(), forward_transforms=dict(), + reverse_forward_invert_flags=dict(), + reverse_forward_transforms=dict(), inverse_composite_transform=dict(extensions=None,), inverse_warped_image=dict(extensions=None,), metric_value=dict(), From fdd0955b250fcc1208703c2183437a2c6941df65 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 23 Mar 2020 15:39:36 -0400 Subject: [PATCH 0797/1665] TEST: Mock terminal output before testing changing default value --- nipype/interfaces/base/tests/test_core.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/nipype/interfaces/base/tests/test_core.py b/nipype/interfaces/base/tests/test_core.py index d7e2620c9b..e97a5bab79 100644 --- a/nipype/interfaces/base/tests/test_core.py +++ b/nipype/interfaces/base/tests/test_core.py @@ -5,6 +5,7 @@ import simplejson as json import pytest +from unittest import mock from .... import config from ....testing import example_data @@ -456,17 +457,18 @@ def test_global_CommandLine_output(tmpdir): ci = BET() assert ci.terminal_output == "stream" # default case - nib.CommandLine.set_default_terminal_output("allatonce") - ci = nib.CommandLine(command="ls -l") - assert ci.terminal_output == "allatonce" + with mock.patch.object(nib.CommandLine, '_terminal_output'): + nib.CommandLine.set_default_terminal_output("allatonce") + ci = nib.CommandLine(command="ls -l") + assert ci.terminal_output == "allatonce" - nib.CommandLine.set_default_terminal_output("file") - ci = nib.CommandLine(command="ls -l") - assert ci.terminal_output == "file" + nib.CommandLine.set_default_terminal_output("file") + ci = nib.CommandLine(command="ls -l") + assert ci.terminal_output == "file" - # Check default affects derived interfaces - ci = BET() - assert ci.terminal_output == "file" + # Check default affects derived interfaces + ci = BET() + assert ci.terminal_output == "file" def test_CommandLine_prefix(tmpdir): From 4061b76ecedd1e2f3576ad47ae47ed384379f6bd Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 23 Mar 2020 16:41:00 -0400 Subject: [PATCH 0798/1665] TEST: Remove xfail from test_dcm2niix_dti --- nipype/interfaces/tests/test_extra_dcm2nii.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/tests/test_extra_dcm2nii.py b/nipype/interfaces/tests/test_extra_dcm2nii.py index e76f300ec6..67e45d9626 100644 --- a/nipype/interfaces/tests/test_extra_dcm2nii.py +++ b/nipype/interfaces/tests/test_extra_dcm2nii.py @@ -32,7 +32,6 @@ def _fetch_data(datadir, dicoms): @pytest.mark.skipif(no_datalad, reason="Datalad required") @pytest.mark.skipif(no_dcm2niix, reason="Dcm2niix required") -@pytest.mark.xfail(reason="Intermittent failures. Let's come back to this later.") def test_dcm2niix_dti(fetch_data, tmpdir): tmpdir.chdir() datadir = tmpdir.mkdir("data").strpath From f8a2e53e79f3eab173693c82b45f43a07299d27b Mon Sep 17 00:00:00 2001 From: sulantha2006 Date: Mon, 23 Mar 2020 16:57:34 -0700 Subject: [PATCH 0799/1665] doctest: +ELLIPSIS fix --- nipype/interfaces/ants/registration.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nipype/interfaces/ants/registration.py b/nipype/interfaces/ants/registration.py index 0f6e602591..b9b6700dcd 100644 --- a/nipype/interfaces/ants/registration.py +++ b/nipype/interfaces/ants/registration.py @@ -803,6 +803,8 @@ class Registration(ANTSCommand): 'inverse_composite_transform': '...data/output_InverseComposite.h5', 'inverse_warped_image': , 'metric_value': , + 'reverse_forward_invert_flags': [], + 'reverse_forward_transforms': [], 'reverse_invert_flags': [], 'reverse_transforms': [], 'save_state': '...data/trans.mat', @@ -832,6 +834,9 @@ class Registration(ANTSCommand): 'inverse_composite_transform': , 'inverse_warped_image': , 'metric_value': , + 'reverse_forward_invert_flags': [False, False], + 'reverse_forward_transforms': ['...data/output_1Warp.nii.gz', + '...data/output_0GenericAffine.mat'], 'reverse_invert_flags': [True, False], 'reverse_transforms': ['...data/output_0GenericAffine.mat', \ '...data/output_1InverseWarp.nii.gz'], From f6ba8094427fbcb928a2049b0a24f4624fa79214 Mon Sep 17 00:00:00 2001 From: sulantha2006 Date: Mon, 23 Mar 2020 17:31:24 -0700 Subject: [PATCH 0800/1665] Fix test_auto_Registration.py --- nipype/interfaces/ants/tests/test_auto_Registration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/ants/tests/test_auto_Registration.py b/nipype/interfaces/ants/tests/test_auto_Registration.py index dd49cf1eaf..151a852820 100644 --- a/nipype/interfaces/ants/tests/test_auto_Registration.py +++ b/nipype/interfaces/ants/tests/test_auto_Registration.py @@ -94,11 +94,11 @@ def test_Registration_outputs(): elapsed_time=dict(), forward_invert_flags=dict(), forward_transforms=dict(), - reverse_forward_invert_flags=dict(), - reverse_forward_transforms=dict(), inverse_composite_transform=dict(extensions=None,), inverse_warped_image=dict(extensions=None,), metric_value=dict(), + reverse_forward_invert_flags=dict(), + reverse_forward_transforms=dict(), reverse_invert_flags=dict(), reverse_transforms=dict(), save_state=dict(extensions=None,), From 199dc2e3e1a6c5dd0f8f35b6a6497015325ee85f Mon Sep 17 00:00:00 2001 From: Fernando Perez-Garcia Date: Tue, 24 Mar 2020 17:31:59 +0000 Subject: [PATCH 0801/1665] Use PackageInfo to get NiftyReg version Resolves #2615. --- nipype/interfaces/niftyreg/base.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/nipype/interfaces/niftyreg/base.py b/nipype/interfaces/niftyreg/base.py index aa343dcfcb..cf928cf331 100644 --- a/nipype/interfaces/niftyreg/base.py +++ b/nipype/interfaces/niftyreg/base.py @@ -19,7 +19,8 @@ import os from ... import logging -from ..base import CommandLine, CommandLineInputSpec, traits, Undefined +from ..base import (CommandLine, CommandLineInputSpec, traits, Undefined, + PackageInfo) from ...utils.filemanip import split_filename iflogger = logging.getLogger("nipype.interface") @@ -29,6 +30,14 @@ def get_custom_path(command, env_dir="NIFTYREGDIR"): return os.path.join(os.getenv(env_dir, ""), command) +class Info(PackageInfo): + version_cmd = get_custom_path('reg_aladin') + ' --version' + + @staticmethod + def parse_version(raw_info): + return raw_info + + class NiftyRegCommandInputSpec(CommandLineInputSpec): """Input Spec for niftyreg interfaces.""" @@ -55,9 +64,8 @@ def __init__(self, required_version=None, **inputs): self.num_threads = 1 super(NiftyRegCommand, self).__init__(**inputs) self.required_version = required_version - _version = self.version_from_command() + _version = self.version if _version: - _version = _version.decode("utf-8") if self._min_version is not None and StrictVersion( _version ) < StrictVersion(self._min_version): @@ -91,7 +99,7 @@ def _environ_update(self): self.inputs.omp_core_val = Undefined def check_version(self): - _version = self.version_from_command() + _version = self.version if not _version: raise Exception("Niftyreg not found") # Decoding to string: @@ -107,10 +115,10 @@ def check_version(self): @property def version(self): - return self.version_from_command() + return Info.version() def exists(self): - return self.version_from_command() is not None + return self.version is not None def _format_arg(self, name, spec, value): if name == "omp_core_val": From 328bd4ee164e6f81b3048fbc13595ee5cfd62831 Mon Sep 17 00:00:00 2001 From: Fernando Perez-Garcia Date: Tue, 24 Mar 2020 19:32:06 +0000 Subject: [PATCH 0802/1665] Remove version decoding --- nipype/interfaces/niftyreg/base.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nipype/interfaces/niftyreg/base.py b/nipype/interfaces/niftyreg/base.py index cf928cf331..88e441d52a 100644 --- a/nipype/interfaces/niftyreg/base.py +++ b/nipype/interfaces/niftyreg/base.py @@ -102,8 +102,6 @@ def check_version(self): _version = self.version if not _version: raise Exception("Niftyreg not found") - # Decoding to string: - _version = _version.decode("utf-8") if StrictVersion(_version) < StrictVersion(self._min_version): err = "A later version of Niftyreg is required (%s < %s)" raise ValueError(err % (_version, self._min_version)) From 5c69cd187ff3eb170649cd56dda5ba52046298d1 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 28 Mar 2020 22:13:05 -0400 Subject: [PATCH 0803/1665] TEST: Explicitly specify dataset in datalad get call --- nipype/interfaces/tests/test_extra_dcm2nii.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_extra_dcm2nii.py b/nipype/interfaces/tests/test_extra_dcm2nii.py index 67e45d9626..0b0c132f7d 100644 --- a/nipype/interfaces/tests/test_extra_dcm2nii.py +++ b/nipype/interfaces/tests/test_extra_dcm2nii.py @@ -22,7 +22,7 @@ def _fetch_data(datadir, dicoms): """Fetches some test DICOMs using datalad""" api.install(path=datadir, source=DICOM_DIR) data = os.path.join(datadir, dicoms) - api.get(path=data) + api.get(path=data, dataset=datadir) except IncompleteResultsError as exc: pytest.skip("Failed to fetch test data: %s" % str(exc)) return data From 933f64f769d66670b750a16b77de6c45f9b05b80 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 24 Mar 2020 12:21:37 -0400 Subject: [PATCH 0804/1665] TEST: Skip PBS test in absence of qsub, use tmp_path fixture --- nipype/pipeline/plugins/tests/test_pbs.py | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/nipype/pipeline/plugins/tests/test_pbs.py b/nipype/pipeline/plugins/tests/test_pbs.py index bb85443940..64d0d77b5d 100644 --- a/nipype/pipeline/plugins/tests/test_pbs.py +++ b/nipype/pipeline/plugins/tests/test_pbs.py @@ -1,8 +1,5 @@ # -*- coding: utf-8 -*- -import os -from shutil import rmtree -from tempfile import mkdtemp -from time import sleep +from shutil import which import nipype.interfaces.base as nib import pytest @@ -32,22 +29,15 @@ def _list_outputs(self): return outputs -@pytest.mark.xfail(reason="not known") -def test_run_pbsgraph(): - cur_dir = os.getcwd() - temp_dir = mkdtemp(prefix="test_engine_") - os.chdir(temp_dir) - - pipe = pe.Workflow(name="pipe") +@pytest.mark.skipif(which('qsub') is None, reason="PBS not installed") +def test_run_pbsgraph(tmp_path): + pipe = pe.Workflow(name="pipe", base_dir=str(tmp_path)) mod1 = pe.Node(interface=PbsTestInterface(), name="mod1") mod2 = pe.MapNode(interface=PbsTestInterface(), iterfield=["input1"], name="mod2") pipe.connect([(mod1, mod2, [("output1", "input1")])]) - pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 execgraph = pipe.run(plugin="PBSGraph") names = [".".join((node._hierarchy, node.name)) for node in execgraph.nodes()] node = list(execgraph.nodes())[names.index("pipe.mod1")] result = node.get_output("output1") assert result == [1, 1] - os.chdir(cur_dir) - rmtree(temp_dir) From 46b505c1355ee7c45ba3d53d084229600652523c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 24 Mar 2020 12:47:39 -0400 Subject: [PATCH 0805/1665] TEST: Skip OAR test in absence of oarsub, use tmp_path fixture --- nipype/pipeline/plugins/tests/test_oar.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/nipype/pipeline/plugins/tests/test_oar.py b/nipype/pipeline/plugins/tests/test_oar.py index 1024daaef9..cd3bf9606b 100644 --- a/nipype/pipeline/plugins/tests/test_oar.py +++ b/nipype/pipeline/plugins/tests/test_oar.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- -import os -from shutil import rmtree -from tempfile import mkdtemp +from shutil import which import nipype.interfaces.base as nib import pytest @@ -31,13 +29,9 @@ def _list_outputs(self): return outputs -@pytest.mark.xfail(reason="not known") -def test_run_oar(): - cur_dir = os.getcwd() - temp_dir = mkdtemp(prefix="test_engine_", dir=os.getcwd()) - os.chdir(temp_dir) - - pipe = pe.Workflow(name="pipe") +@pytest.mark.skipif(which('oarsub') is None, reason="OAR not installed") +def test_run_pbsgraph(tmp_path): + pipe = pe.Workflow(name="pipe", base_dir=str(tmp_path)) mod1 = pe.Node(interface=OarTestInterface(), name="mod1") mod2 = pe.MapNode(interface=OarTestInterface(), iterfield=["input1"], name="mod2") pipe.connect([(mod1, mod2, [("output1", "input1")])]) @@ -48,5 +42,3 @@ def test_run_oar(): node = list(execgraph.nodes())[names.index("pipe.mod1")] result = node.get_output("output1") assert result == [1, 1] - os.chdir(cur_dir) - rmtree(temp_dir) From 35f83b95667eed273587c5f70e8bbfcbc4d0ed82 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 25 Mar 2020 09:16:10 -0400 Subject: [PATCH 0806/1665] TEST: Raise SkipTest when TempFATFS fails --- nipype/testing/tests/test_utils.py | 10 ++++---- nipype/utils/tests/test_filemanip.py | 37 ++++++++++++++-------------- 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/nipype/testing/tests/test_utils.py b/nipype/testing/tests/test_utils.py index fb2992b7e6..b2c8a296d2 100644 --- a/nipype/testing/tests/test_utils.py +++ b/nipype/testing/tests/test_utils.py @@ -7,7 +7,8 @@ import os import warnings import subprocess -from mock import patch, MagicMock +from unittest.mock import patch, MagicMock +from unittest import SkipTest from nipype.testing.utils import TempFATFS @@ -15,10 +16,9 @@ def test_tempfatfs(): try: fatfs = TempFATFS() except (IOError, OSError): - warnings.warn("Cannot mount FAT filesystems with FUSE") - else: - with fatfs as tmp_dir: - assert os.path.exists(tmp_dir) + raise SkipTest("Cannot mount FAT filesystems with FUSE") + with fatfs as tmp_dir: + assert os.path.exists(tmp_dir) @patch( diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index e8e317935f..9c54ff02ee 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -3,10 +3,9 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: import os import time -import warnings from pathlib import Path -import mock +from unittest import mock, SkipTest import pytest from ...testing import TempFATFS from ...utils.filemanip import ( @@ -238,22 +237,22 @@ def test_copyfallback(_temp_analyze_files): try: fatfs = TempFATFS() except (IOError, OSError): - warnings.warn("Fuse mount failed. copyfile fallback tests skipped.") - else: - with fatfs as fatdir: - tgt_img = os.path.join(fatdir, imgname) - tgt_hdr = os.path.join(fatdir, hdrname) - for copy in (True, False): - for use_hardlink in (True, False): - copyfile(orig_img, tgt_img, copy=copy, use_hardlink=use_hardlink) - assert os.path.exists(tgt_img) - assert os.path.exists(tgt_hdr) - assert not os.path.islink(tgt_img) - assert not os.path.islink(tgt_hdr) - assert not os.path.samefile(orig_img, tgt_img) - assert not os.path.samefile(orig_hdr, tgt_hdr) - os.unlink(tgt_img) - os.unlink(tgt_hdr) + raise SkipTest("Fuse mount failed. copyfile fallback tests skipped.") + + with fatfs as fatdir: + tgt_img = os.path.join(fatdir, imgname) + tgt_hdr = os.path.join(fatdir, hdrname) + for copy in (True, False): + for use_hardlink in (True, False): + copyfile(orig_img, tgt_img, copy=copy, use_hardlink=use_hardlink) + assert os.path.exists(tgt_img) + assert os.path.exists(tgt_hdr) + assert not os.path.islink(tgt_img) + assert not os.path.islink(tgt_hdr) + assert not os.path.samefile(orig_img, tgt_img) + assert not os.path.samefile(orig_hdr, tgt_hdr) + os.unlink(tgt_img) + os.unlink(tgt_hdr) def test_get_related_files(_temp_analyze_files): @@ -295,7 +294,7 @@ def test_ensure_list(filename, expected): @pytest.mark.parametrize( - "list, expected", [(["foo.nii"], "foo.nii"), (["foo", "bar"], ["foo", "bar"]),] + "list, expected", [(["foo.nii"], "foo.nii"), (["foo", "bar"], ["foo", "bar"])] ) def test_simplify_list(list, expected): x = simplify_list(list) From 5fed3fbff0419c597591ca1d52a97303966f0ce7 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 25 Mar 2020 09:22:33 -0400 Subject: [PATCH 0807/1665] TEST: Require rdflib 5.0 for prov tests --- nipype/utils/tests/test_provenance.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/nipype/utils/tests/test_provenance.py b/nipype/utils/tests/test_provenance.py index 393a66b6b2..159a59ba7a 100644 --- a/nipype/utils/tests/test_provenance.py +++ b/nipype/utils/tests/test_provenance.py @@ -2,10 +2,19 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os +from nibabel.optpkg import optional_package +import pytest + +_, have_rdflib5, _ = optional_package("rdflib", min_version="5.0.0") from nipype.utils.provenance import ProvStore, safe_encode +needs_rdflib5 = pytest.mark.skipif( + not have_rdflib5, reason="Test requires rdflib 5.0.0 or higher" +) + +@needs_rdflib5 def test_provenance(tmpdir): from nipype.interfaces.base import CommandLine @@ -17,6 +26,7 @@ def test_provenance(tmpdir): assert "echo hello" in provn +@needs_rdflib5 def test_provenance_exists(tmpdir): tmpdir.chdir() from nipype import config From e78bda18c802f528b7122c127504f1ee2741c85f Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 28 Mar 2020 23:12:10 -0400 Subject: [PATCH 0808/1665] TEST: Purge mock for unittest.mock --- nipype/algorithms/tests/test_TSNR.py | 6 ++---- nipype/info.py | 1 - nipype/pipeline/plugins/tests/test_tools.py | 2 +- nipype/utils/tests/test_config.py | 2 +- 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/nipype/algorithms/tests/test_TSNR.py b/nipype/algorithms/tests/test_TSNR.py index e00bf35e05..b9de248155 100644 --- a/nipype/algorithms/tests/test_TSNR.py +++ b/nipype/algorithms/tests/test_TSNR.py @@ -7,7 +7,7 @@ import pytest import numpy.testing as npt -import mock +from unittest import mock import nibabel as nb import numpy as np import os @@ -16,9 +16,7 @@ class TestTSNR: """ Note: Tests currently do a poor job of testing functionality """ - in_filenames = { - "in_file": "tsnrinfile.nii", - } + in_filenames = {"in_file": "tsnrinfile.nii"} out_filenames = { # default output file names "detrended_file": "detrend.nii.gz", diff --git a/nipype/info.py b/nipype/info.py index 7a2e4ae70e..6e1dc06742 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -158,7 +158,6 @@ def get_nipype_gitversion(): TESTS_REQUIRES = [ "codecov", "coverage<5", - "mock", "pytest", "pytest-cov", "pytest-env", diff --git a/nipype/pipeline/plugins/tests/test_tools.py b/nipype/pipeline/plugins/tests/test_tools.py index 17b435bbb6..b1ff7e09ac 100644 --- a/nipype/pipeline/plugins/tests/test_tools.py +++ b/nipype/pipeline/plugins/tests/test_tools.py @@ -7,7 +7,7 @@ import scipy.sparse as ssp import re -import mock +from unittest import mock from nipype.pipeline.plugins.tools import report_crash diff --git a/nipype/utils/tests/test_config.py b/nipype/utils/tests/test_config.py index 47dae20d88..5d9b5d57df 100644 --- a/nipype/utils/tests/test_config.py +++ b/nipype/utils/tests/test_config.py @@ -5,7 +5,7 @@ import sys import pytest from nipype import config -from mock import MagicMock +from unittest.mock import MagicMock try: import xvfbwrapper From f86426796af0dca01d8796b3a0808c7aac5c8cab Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 30 Mar 2020 09:35:09 -0400 Subject: [PATCH 0809/1665] TEST: Relax version constraints when checking FSL version --- nipype/interfaces/fsl/tests/test_base.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nipype/interfaces/fsl/tests/test_base.py b/nipype/interfaces/fsl/tests/test_base.py index 52f93b545f..b030a28a18 100644 --- a/nipype/interfaces/fsl/tests/test_base.py +++ b/nipype/interfaces/fsl/tests/test_base.py @@ -13,8 +13,7 @@ @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") def test_fslversion(): ver = fsl.Info.version() - ver = ver.split(".") - assert ver[0] in ["4", "5"] + assert ver.split(".", 1)[0].isdigit() @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") From 2b1957e0d2bd13c80b21bab6dc2922690da68d7a Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 30 Mar 2020 09:35:37 -0400 Subject: [PATCH 0810/1665] TEST: Validate NIPYPE_NO_ET when NIPYPE_NO_ET is set --- nipype/tests/test_nipype.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/nipype/tests/test_nipype.py b/nipype/tests/test_nipype.py index 60fa92d141..bb80da601f 100644 --- a/nipype/tests/test_nipype.py +++ b/nipype/tests/test_nipype.py @@ -1,3 +1,4 @@ +import os from .. import get_info from ..info import get_nipype_gitversion import pytest @@ -46,26 +47,28 @@ def test_no_et(tmp_path): from nipype.interfaces import utility as niu from nipype.interfaces.base import BaseInterface + et = os.getenv("NIPYPE_NO_ET") is None + # Pytest doesn't trigger this, so let's pretend it's there with patch.object(BaseInterface, "_etelemetry_version_data", {}): # Direct function call - environment not set f = niu.Function(function=_check_no_et) res = f.run() - assert res.outputs.out is True + assert res.outputs.out == et # Basic node - environment not set n = pe.Node( niu.Function(function=_check_no_et), name="n", base_dir=str(tmp_path) ) res = n.run() - assert res.outputs.out is True + assert res.outputs.out == et # Linear run - environment not set wf1 = pe.Workflow(name="wf1", base_dir=str(tmp_path)) wf1.add_nodes([pe.Node(niu.Function(function=_check_no_et), name="n")]) res = wf1.run() - assert next(iter(res.nodes)).result.outputs.out is True + assert next(iter(res.nodes)).result.outputs.out == et # MultiProc run - environment initialized with NIPYPE_NO_ET wf2 = pe.Workflow(name="wf2", base_dir=str(tmp_path)) @@ -91,9 +94,9 @@ def test_no_et(tmp_path): ] ) res = wf4.run(plugin="MultiProc", plugin_args={"n_procs": 1}) - assert next(iter(res.nodes)).result.outputs.out is True + assert next(iter(res.nodes)).result.outputs.out == et - # LegacyMultiProc run - environment initialized with NIPYPE_NO_ET + # run_without_submitting - environment not set wf5 = pe.Workflow(name="wf5", base_dir=str(tmp_path)) wf5.add_nodes( [ @@ -105,4 +108,4 @@ def test_no_et(tmp_path): ] ) res = wf5.run(plugin="LegacyMultiProc", plugin_args={"n_procs": 1}) - assert next(iter(res.nodes)).result.outputs.out is True + assert next(iter(res.nodes)).result.outputs.out == et From 68a7e4f030210dcca69d0d89814f638c58ec8afd Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 30 Mar 2020 10:29:53 -0400 Subject: [PATCH 0811/1665] CI: Clean up travis.yml --- .travis.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 3e4326c5a4..297a58cf65 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,5 @@ +os: linux dist: xenial -sudo: true language: python # our build matrix @@ -8,15 +8,13 @@ python: - 3.6 - 3.7 -# NOTE: Any changes to the matrix section should be duplicated below for -# Python 3.4 env: global: - EXTRA_WHEELS="https://5cf40426d9f06eb7461d-6fe47d9331aba7cd62fc36c7196769e4.ssl.cf2.rackcdn.com" - PRE_WHEELS="https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" - EXTRA_PIP_FLAGS="--find-links=$EXTRA_WHEELS" - CHECK_TYPE=test - matrix: + jobs: - INSTALL_DEB_DEPENDECIES=true NIPYPE_EXTRAS="doc,tests,nipy,profiler" CI_SKIP_TEST=1 @@ -31,7 +29,7 @@ env: EXTRA_PIP_FLAGS="--pre $EXTRA_PIP_FLAGS --find-links $PRE_WHEELS --upgrade" CI_SKIP_TEST=1 -matrix: +jobs: include: - python: 3.7 env: From e6ed8c6c30fa1386d4820024ab29907274dd0109 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 30 Mar 2020 10:41:33 -0400 Subject: [PATCH 0812/1665] RF: Use etelemetry.check_available_version --- nipype/__init__.py | 38 +++----------------------------------- nipype/info.py | 2 +- 2 files changed, 4 insertions(+), 36 deletions(-) diff --git a/nipype/__init__.py b/nipype/__init__.py index 43e9011175..72b7241020 100644 --- a/nipype/__init__.py +++ b/nipype/__init__.py @@ -83,41 +83,9 @@ def check_latest_version(raise_exception=False): import etelemetry logger = logging.getLogger("nipype.utils") - - INIT_MSG = "Running {packname} version {version} (latest: {latest})".format - - latest = {"version": "Unknown", "bad_versions": []} - result = None - try: - result = etelemetry.get_project("nipy/nipype") - except Exception as e: - logger.warning("Could not check for version updates: \n%s", e) - finally: - if result: - latest.update(**result) - if LooseVersion(__version__) != LooseVersion(latest["version"]): - logger.info( - INIT_MSG( - packname="nipype", version=__version__, latest=latest["version"] - ) - ) - else: - logger.info("No new version available.") - if latest["bad_versions"] and any( - [ - LooseVersion(__version__) == LooseVersion(ver) - for ver in latest["bad_versions"] - ] - ): - message = ( - "You are using a version of Nipype with a critical " - "bug. Please use a different version." - ) - if raise_exception: - raise RuntimeError(message) - else: - logger.critical(message) - return latest + return etelemetry.check_available_version( + "nipy/nipype", __version__, logger, raise_exception + ) # Run telemetry on import for interactive sessions, such as IPython, Jupyter notebooks, Python REPL diff --git a/nipype/info.py b/nipype/info.py index 6e1dc06742..397ac3f939 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -148,7 +148,7 @@ def get_nipype_gitversion(): "simplejson>=%s" % SIMPLEJSON_MIN_VERSION, "traits>=%s,!=5.0" % TRAITS_MIN_VERSION, "filelock>=3.0.0", - "etelemetry", + "etelemetry>=0.2.0", ] # neurdflib has to come after prov From e2533c2d14a041ed0271384b78a14cad0fd0f919 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Apr 2020 10:35:08 -0400 Subject: [PATCH 0813/1665] CI: Add artifacts redirector action --- .github/workflows/main.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .github/workflows/main.yml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000000..90fda1aea7 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,12 @@ +on: [status] +jobs: + circleci_artifacts_redirector_job: + runs-on: ubuntu-latest + name: Run CircleCI artifacts redirector + steps: + - name: GitHub Action step + uses: larsoner/circleci-artifacts-redirector-action@master + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + artifact-path: 0/tmp/src/nipype/doc/_build/html/index.html + circleci-jobs: build_docs From 3194918fe9102284d78f51d2c19e048ae8ae285c Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Apr 2020 10:36:50 -0400 Subject: [PATCH 0814/1665] RF: Use docutils 0.5-style directive definition --- nipype/sphinxext/plot_workflow.py | 469 +++++++++++++++--------------- 1 file changed, 231 insertions(+), 238 deletions(-) diff --git a/nipype/sphinxext/plot_workflow.py b/nipype/sphinxext/plot_workflow.py index 78b5f71384..931d6efec1 100644 --- a/nipype/sphinxext/plot_workflow.py +++ b/nipype/sphinxext/plot_workflow.py @@ -118,7 +118,7 @@ missing_imports = [] try: - from docutils.parsers.rst import directives + from docutils.parsers.rst import directives, Directive from docutils.parsers.rst.directives.images import Image align = Image.align @@ -146,26 +146,6 @@ def format_template(template, **kw): missing_imports.append(str(e)) -def wf_directive( - name, - arguments, - options, - content, - lineno, - content_offset, - block_text, - state, - state_machine, -): - if len(missing_imports) == 0: - return run(arguments, content, options, state_machine, state, lineno) - else: - raise ImportError("\n".join(missing_imports)) - - -wf_directive.__doc__ = __doc__ - - def _option_boolean(arg): if not arg or not arg.strip(): # no argument given, assume used as a flag @@ -232,12 +212,12 @@ def mark_wf_labels(app, document): break -def setup(app): - setup.app = app - setup.config = app.config - setup.confdir = app.confdir - - options = { +class WorkflowDirective(Directive): + has_content = True + required_arguments = 0 + optional_arguments = 2 + final_argument_whitespace = False + option_spec = { "alt": directives.unchanged, "height": directives.length_or_unitless, "width": directives.length_or_percentage_or_unitless, @@ -253,7 +233,230 @@ def setup(app): "simple_form": _option_boolean, } - app.add_directive("workflow", wf_directive, True, (0, 2, False), **options) + def run(self): + if missing_imports: + raise ImportError("\n".join(missing_imports)) + + document = self.state_machine.document + config = document.settings.env.config + nofigs = "nofigs" in self.options + + formats = get_wf_formats(config) + default_fmt = formats[0][0] + + graph2use = self.options.get("graph2use", "hierarchical") + simple_form = self.options.get("simple_form", True) + + self.options.setdefault("include-source", config.wf_include_source) + keep_context = "context" in self.options + context_opt = None if not keep_context else self.options["context"] + + rst_file = document.attributes["source"] + rst_dir = os.path.dirname(rst_file) + + if len(self.arguments): + if not config.wf_basedir: + source_file_name = os.path.join( + setup.app.builder.srcdir, directives.uri(self.arguments[0]) + ) + else: + source_file_name = os.path.join( + setup.confdir, config.wf_basedir, directives.uri(self.arguments[0]) + ) + + # If there is content, it will be passed as a caption. + caption = "\n".join(self.content) + + # If the optional function name is provided, use it + if len(self.arguments) == 2: + function_name = self.arguments[1] + else: + function_name = None + + with io.open(source_file_name, "r", encoding="utf-8") as fd: + code = fd.read() + output_base = os.path.basename(source_file_name) + else: + source_file_name = rst_file + code = textwrap.dedent("\n".join([str(c) for c in self.content])) + counter = document.attributes.get("_wf_counter", 0) + 1 + document.attributes["_wf_counter"] = counter + base, _ = os.path.splitext(os.path.basename(source_file_name)) + output_base = "%s-%d.py" % (base, counter) + function_name = None + caption = "" + + base, source_ext = os.path.splitext(output_base) + if source_ext in (".py", ".rst", ".txt"): + output_base = base + else: + source_ext = "" + + # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames + output_base = output_base.replace(".", "-") + + # is it in doctest format? + is_doctest = contains_doctest(code) + if "format" in self.options: + if self.options["format"] == "python": + is_doctest = False + else: + is_doctest = True + + # determine output directory name fragment + source_rel_name = relpath(source_file_name, setup.confdir) + source_rel_dir = os.path.dirname(source_rel_name) + while source_rel_dir.startswith(os.path.sep): + source_rel_dir = source_rel_dir[1:] + + # build_dir: where to place output files (temporarily) + build_dir = os.path.join( + os.path.dirname(setup.app.doctreedir), "wf_directive", source_rel_dir + ) + # get rid of .. in paths, also changes pathsep + # see note in Python docs for warning about symbolic links on Windows. + # need to compare source and dest paths at end + build_dir = os.path.normpath(build_dir) + + if not os.path.exists(build_dir): + os.makedirs(build_dir) + + # output_dir: final location in the builder's directory + dest_dir = os.path.abspath( + os.path.join(setup.app.builder.outdir, source_rel_dir) + ) + if not os.path.exists(dest_dir): + os.makedirs(dest_dir) # no problem here for me, but just use built-ins + + # how to link to files from the RST file + dest_dir_link = os.path.join( + relpath(setup.confdir, rst_dir), source_rel_dir + ).replace(os.path.sep, "/") + try: + build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, "/") + except ValueError: + # on Windows, relpath raises ValueError when path and start are on + # different mounts/drives + build_dir_link = build_dir + source_link = dest_dir_link + "/" + output_base + source_ext + + # make figures + try: + results = render_figures( + code, + source_file_name, + build_dir, + output_base, + keep_context, + function_name, + config, + graph2use, + simple_form, + context_reset=context_opt == "reset", + close_figs=context_opt == "close-figs", + ) + errors = [] + except GraphError as err: + reporter = self.state.memo.reporter + sm = reporter.system_message( + 2, + "Exception occurred in plotting %s\n from %s:\n%s" + % (output_base, source_file_name, err), + line=self.lineno, + ) + results = [(code, [])] + errors = [sm] + + # Properly indent the caption + caption = "\n".join(" " + line.strip() for line in caption.split("\n")) + + # generate output restructuredtext + total_lines = [] + for j, (code_piece, images) in enumerate(results): + if self.options["include-source"]: + if is_doctest: + lines = [""] + lines += [row.rstrip() for row in code_piece.split("\n")] + else: + lines = [".. code-block:: python", ""] + lines += [" %s" % row.rstrip() for row in code_piece.split("\n")] + source_code = "\n".join(lines) + else: + source_code = "" + + if nofigs: + images = [] + + opts = [ + ":%s: %s" % (key, val) + for key, val in list(self.options.items()) + if key in ("alt", "height", "width", "scale", "align", "class") + ] + + only_html = ".. only:: html" + only_latex = ".. only:: latex" + only_texinfo = ".. only:: texinfo" + + # Not-None src_link signals the need for a source link in the generated + # html + if j == 0 and config.wf_html_show_source_link: + src_link = source_link + else: + src_link = None + + result = format_template( + config.wf_template or TEMPLATE, + default_fmt=default_fmt, + dest_dir=dest_dir_link, + build_dir=build_dir_link, + source_link=src_link, + multi_image=len(images) > 1, + only_html=only_html, + only_latex=only_latex, + only_texinfo=only_texinfo, + options=opts, + images=images, + source_code=source_code, + html_show_formats=config.wf_html_show_formats and len(images), + caption=caption, + ) + + total_lines.extend(result.split("\n")) + total_lines.extend("\n") + + if total_lines: + self.state_machine.insert_input(total_lines, source=source_file_name) + + # copy image files to builder's output directory, if necessary + os.makedirs(dest_dir, exist_ok=True) + for code_piece, images in results: + for img in images: + for fn in img.filenames(): + destimg = os.path.join(dest_dir, os.path.basename(fn)) + if fn != destimg: + shutil.copyfile(fn, destimg) + + # copy script (if necessary) + target_name = os.path.join(dest_dir, output_base + source_ext) + with io.open(target_name, "w", encoding="utf-8") as f: + if source_file_name == rst_file: + code_escaped = unescape_doctest(code) + else: + code_escaped = code + f.write(code_escaped) + + return errors + + +WorkflowDirective.__doc__ = __doc__ + + +def setup(app): + setup.app = app + setup.config = app.config + setup.confdir = app.confdir + + app.add_directive("workflow", WorkflowDirective) app.add_config_value("graph2use", "hierarchical", "html") app.add_config_value("simple_form", True, "html") app.add_config_value("wf_pre_code", None, True) @@ -548,213 +751,3 @@ def render_figures( img.formats.append(fmt) return [(code, [img])] - - -def run(arguments, content, options, state_machine, state, lineno): - document = state_machine.document - config = document.settings.env.config - nofigs = "nofigs" in options - - formats = get_wf_formats(config) - default_fmt = formats[0][0] - - graph2use = options.get("graph2use", "hierarchical") - simple_form = options.get("simple_form", True) - - options.setdefault("include-source", config.wf_include_source) - keep_context = "context" in options - context_opt = None if not keep_context else options["context"] - - rst_file = document.attributes["source"] - rst_dir = os.path.dirname(rst_file) - - if len(arguments): - if not config.wf_basedir: - source_file_name = os.path.join( - setup.app.builder.srcdir, directives.uri(arguments[0]) - ) - else: - source_file_name = os.path.join( - setup.confdir, config.wf_basedir, directives.uri(arguments[0]) - ) - - # If there is content, it will be passed as a caption. - caption = "\n".join(content) - - # If the optional function name is provided, use it - if len(arguments) == 2: - function_name = arguments[1] - else: - function_name = None - - with io.open(source_file_name, "r", encoding="utf-8") as fd: - code = fd.read() - output_base = os.path.basename(source_file_name) - else: - source_file_name = rst_file - code = textwrap.dedent("\n".join([str(c) for c in content])) - counter = document.attributes.get("_wf_counter", 0) + 1 - document.attributes["_wf_counter"] = counter - base, _ = os.path.splitext(os.path.basename(source_file_name)) - output_base = "%s-%d.py" % (base, counter) - function_name = None - caption = "" - - base, source_ext = os.path.splitext(output_base) - if source_ext in (".py", ".rst", ".txt"): - output_base = base - else: - source_ext = "" - - # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames - output_base = output_base.replace(".", "-") - - # is it in doctest format? - is_doctest = contains_doctest(code) - if "format" in options: - if options["format"] == "python": - is_doctest = False - else: - is_doctest = True - - # determine output directory name fragment - source_rel_name = relpath(source_file_name, setup.confdir) - source_rel_dir = os.path.dirname(source_rel_name) - while source_rel_dir.startswith(os.path.sep): - source_rel_dir = source_rel_dir[1:] - - # build_dir: where to place output files (temporarily) - build_dir = os.path.join( - os.path.dirname(setup.app.doctreedir), "wf_directive", source_rel_dir - ) - # get rid of .. in paths, also changes pathsep - # see note in Python docs for warning about symbolic links on Windows. - # need to compare source and dest paths at end - build_dir = os.path.normpath(build_dir) - - if not os.path.exists(build_dir): - os.makedirs(build_dir) - - # output_dir: final location in the builder's directory - dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, source_rel_dir)) - if not os.path.exists(dest_dir): - os.makedirs(dest_dir) # no problem here for me, but just use built-ins - - # how to link to files from the RST file - dest_dir_link = os.path.join( - relpath(setup.confdir, rst_dir), source_rel_dir - ).replace(os.path.sep, "/") - try: - build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, "/") - except ValueError: - # on Windows, relpath raises ValueError when path and start are on - # different mounts/drives - build_dir_link = build_dir - source_link = dest_dir_link + "/" + output_base + source_ext - - # make figures - try: - results = render_figures( - code, - source_file_name, - build_dir, - output_base, - keep_context, - function_name, - config, - graph2use, - simple_form, - context_reset=context_opt == "reset", - close_figs=context_opt == "close-figs", - ) - errors = [] - except GraphError as err: - reporter = state.memo.reporter - sm = reporter.system_message( - 2, - "Exception occurred in plotting %s\n from %s:\n%s" - % (output_base, source_file_name, err), - line=lineno, - ) - results = [(code, [])] - errors = [sm] - - # Properly indent the caption - caption = "\n".join(" " + line.strip() for line in caption.split("\n")) - - # generate output restructuredtext - total_lines = [] - for j, (code_piece, images) in enumerate(results): - if options["include-source"]: - if is_doctest: - lines = [""] - lines += [row.rstrip() for row in code_piece.split("\n")] - else: - lines = [".. code-block:: python", ""] - lines += [" %s" % row.rstrip() for row in code_piece.split("\n")] - source_code = "\n".join(lines) - else: - source_code = "" - - if nofigs: - images = [] - - opts = [ - ":%s: %s" % (key, val) - for key, val in list(options.items()) - if key in ("alt", "height", "width", "scale", "align", "class") - ] - - only_html = ".. only:: html" - only_latex = ".. only:: latex" - only_texinfo = ".. only:: texinfo" - - # Not-None src_link signals the need for a source link in the generated - # html - if j == 0 and config.wf_html_show_source_link: - src_link = source_link - else: - src_link = None - - result = format_template( - config.wf_template or TEMPLATE, - default_fmt=default_fmt, - dest_dir=dest_dir_link, - build_dir=build_dir_link, - source_link=src_link, - multi_image=len(images) > 1, - only_html=only_html, - only_latex=only_latex, - only_texinfo=only_texinfo, - options=opts, - images=images, - source_code=source_code, - html_show_formats=config.wf_html_show_formats and len(images), - caption=caption, - ) - - total_lines.extend(result.split("\n")) - total_lines.extend("\n") - - if total_lines: - state_machine.insert_input(total_lines, source=source_file_name) - - # copy image files to builder's output directory, if necessary - os.makedirs(dest_dir, exist_ok=True) - for code_piece, images in results: - for img in images: - for fn in img.filenames(): - destimg = os.path.join(dest_dir, os.path.basename(fn)) - if fn != destimg: - shutil.copyfile(fn, destimg) - - # copy script (if necessary) - target_name = os.path.join(dest_dir, output_base + source_ext) - with io.open(target_name, "w", encoding="utf-8") as f: - if source_file_name == rst_file: - code_escaped = unescape_doctest(code) - else: - code_escaped = code - f.write(code_escaped) - - return errors From 747137f1587f938146f3bf9a1acc4d2eca44d402 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Apr 2020 12:05:45 -0400 Subject: [PATCH 0815/1665] DOC: Restore sphinxext docs --- doc/conf.py | 267 +++++++++++++++++++++++++------------------ doc/requirements.txt | 3 +- 2 files changed, 155 insertions(+), 115 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index eb34e3f8d8..91fd3086a3 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -25,7 +25,7 @@ conf_py = Path(__file__) -example_dir = conf_py.parent / 'users' / 'examples' +example_dir = conf_py.parent / "users" / "examples" shutil.rmtree(example_dir, ignore_errors=True) example_dir.mkdir(parents=True) python_dir = conf_py.parent / "_static" / "python" @@ -34,18 +34,46 @@ ex2rst = str(conf_py.parent.parent / "tools" / "ex2rst") with TemporaryDirectory() as tmpdir: - sp.run(["git", "clone", "--depth", "1", "https://github.com/niflows/nipype1-examples.git", - tmpdir], check=True) + sp.run( + [ + "git", + "clone", + "--depth", + "1", + "https://github.com/niflows/nipype1-examples.git", + tmpdir, + ], + check=True, + ) source_dir = Path(tmpdir) / "package" / "niflow" / "nipype1" / "examples" shutil.copytree(source_dir, python_dir) -sp.run(["python", ex2rst, "--outdir", str(example_dir), str(python_dir), - "-x", str(python_dir / "test_spm.py"), - "-x", str(python_dir / "__init__.py"), - "-x", str(python_dir / "cli.py")], - check=True) -sp.run(["python", ex2rst, "--outdir", str(example_dir), str(python_dir / "frontiers_paper")], - check=True) +sp.run( + [ + "python", + ex2rst, + "--outdir", + str(example_dir), + str(python_dir), + "-x", + str(python_dir / "test_spm.py"), + "-x", + str(python_dir / "__init__.py"), + "-x", + str(python_dir / "cli.py"), + ], + check=True, +) +sp.run( + [ + "python", + ex2rst, + "--outdir", + str(example_dir), + str(python_dir / "frontiers_paper"), + ], + check=True, +) # If extensions (or modules to document with autodoc) are in another directory, @@ -58,33 +86,33 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.graphviz', - 'sphinx.ext.mathjax', - 'sphinx.ext.inheritance_diagram', - 'sphinx.ext.todo', - 'sphinxcontrib.apidoc', - 'matplotlib.sphinxext.plot_directive', - 'nbsphinx', - 'nipype.sphinxext.plot_workflow', - 'nipype.sphinxext.apidoc', - 'nipype.sphinxext.documenter', + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.graphviz", + "sphinx.ext.mathjax", + "sphinx.ext.inheritance_diagram", + "sphinx.ext.todo", + "sphinxcontrib.apidoc", + "matplotlib.sphinxext.plot_directive", + "nbsphinx", + "nipype.sphinxext.plot_workflow", + "nipype.sphinxext.apidoc", + "nipype.sphinxext.documenter", ] autodoc_mock_imports = [ - 'matplotlib', - 'nilearn', - 'nipy', - 'nitime', - 'numpy', - 'pandas', - 'seaborn', - 'skimage', - 'svgutils', - 'transforms3d', - 'tvtk', - 'vtk' + "matplotlib", + "nilearn", + "nipy", + "nitime", + "numpy", + "pandas", + "seaborn", + "skimage", + "svgutils", + "transforms3d", + "tvtk", + "vtk", ] # Accept custom section names to be parsed for numpy-style docstrings @@ -93,33 +121,33 @@ # https://github.com/sphinx-contrib/napoleon/pull/10 is merged. napoleon_use_param = False napoleon_custom_sections = [ - ('Inputs', 'Parameters'), - ('Outputs', 'Parameters'), - ('Attributes', 'Parameters'), - ('Mandatory Inputs', 'Parameters'), - ('Optional Inputs', 'Parameters'), + ("Inputs", "Parameters"), + ("Outputs", "Parameters"), + ("Attributes", "Parameters"), + ("Mandatory Inputs", "Parameters"), + ("Optional Inputs", "Parameters"), ] -on_rtd = os.environ.get('READTHEDOCS') == 'True' +on_rtd = os.environ.get("READTHEDOCS") == "True" if on_rtd: - extensions.append('readthedocs_ext.readthedocs') + extensions.append("readthedocs_ext.readthedocs") # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8' +# source_encoding = 'utf-8' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'nipype' -copyright = u'2009-20, Neuroimaging in Python team' +project = u"nipype" +copyright = u"2009-20, Neuroimaging in Python team" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -132,55 +160,55 @@ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -today_fmt = '%B %d, %Y, %H:%M PDT' +today_fmt = "%B %d, %Y, %H:%M PDT" # List of documents that shouldn't be included in the build. -unused_docs = ['api/generated/gen'] +unused_docs = ["api/generated/gen"] # List of directories, relative to source directory, that shouldn't be searched # for source files. -exclude_trees = ['_build'] +exclude_trees = ["_build"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = [ - '_build', 'Thumbs.db', '.DS_Store', -] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # -- Sphinxext configuration --------------------------------------------------- # Set attributes for layout of inheritance diagrams -inheritance_graph_attrs = dict(rankdir="LR", size='"6.0, 8.0"', fontsize=14, - ratio='compress') -inheritance_node_attrs = dict(shape='ellipse', fontsize=14, height=0.75, - color='dodgerblue1', style='filled') +inheritance_graph_attrs = dict( + rankdir="LR", size='"6.0, 8.0"', fontsize=14, ratio="compress" +) +inheritance_node_attrs = dict( + shape="ellipse", fontsize=14, height=0.75, color="dodgerblue1", style="filled" +) # Flag to show todo items in rendered output todo_include_todos = True @@ -189,70 +217,72 @@ # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'sphinxdoc' +html_theme = "sphinxdoc" # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. -html_style = 'nipype.css' +html_style = "nipype.css" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -html_title = 'nipy pipeline and interfaces package' +html_title = "nipy pipeline and interfaces package" # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # Content template for the index page. -html_index = 'index.html' +html_index = "index.html" # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -html_sidebars = {'**': ['gse.html', 'localtoc.html', 'sidebar_versions.html', 'indexsidebar.html'], - 'searchresults': ['sidebar_versions.html', 'indexsidebar.html'], - 'version': []} +html_sidebars = { + "**": ["gse.html", "localtoc.html", "sidebar_versions.html", "indexsidebar.html"], + "searchresults": ["sidebar_versions.html", "indexsidebar.html"], + "version": [], +} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {'index': 'index.html'} +# html_additional_pages = {'index': 'index.html'} # If false, no module index is generated. -#html_use_modindex = True +# html_use_modindex = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False @@ -260,72 +290,81 @@ # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' +# html_file_suffix = '' # Output file base name for HTML help builder. -htmlhelp_basename = 'nipypedoc' +htmlhelp_basename = "nipypedoc" # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' +# latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' +# latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('interfaces', 'interfaces.tex', 'Nipype Interfaces Documentation', - 'Neuroimaging in Python team', 'manual'), + ( + "interfaces", + "interfaces.tex", + "Nipype Interfaces Documentation", + "Neuroimaging in Python team", + "manual", + ), # ('developers', 'developers.tex', 'Nipype API', # 'Neuroimaging in Python team', 'manual'), - ('examples', 'examples.tex', 'Nipype Examples', - 'Neuroimaging in Python team', 'manual'), + ( + "examples", + "examples.tex", + "Nipype Examples", + "Neuroimaging in Python team", + "manual", + ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # Additional stuff for the LaTeX preamble. -#latex_preamble = '' +# latex_preamble = '' # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +# latex_use_modindex = True # -- apidoc extension configuration ------------------------------------------ -apidoc_module_dir = '../nipype' -apidoc_output_dir = 'api/generated' +apidoc_module_dir = "../nipype" +apidoc_output_dir = "api/generated" apidoc_excluded_paths = [ - '*/tests/*', 'tests/*', - 'external/*', - 'fixes/*', - 'scripts/*', - 'sphinxext/*', - 'testing/*', - 'workflows/*', - 'conftest.py', - 'info.py', - 'pkg_info.py', - 'refs.py', + "*/tests/*", + "tests/*", + "external/*", + "fixes/*", + "scripts/*", + "testing/*", + "workflows/*", + "conftest.py", + "info.py", + "pkg_info.py", + "refs.py", ] apidoc_separate_modules = True -apidoc_extra_args = ['--module-first', '-d 1', '-T'] +apidoc_extra_args = ["--module-first", "-d 1", "-T"] # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'http://docs.python.org/': None} - +intersphinx_mapping = {"http://docs.python.org/": None} diff --git a/doc/requirements.txt b/doc/requirements.txt index 057147c5b5..3b0b513ff7 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -5,4 +5,5 @@ nbsphinx sphinx-argparse sphinx>=2.1.2 sphinxcontrib-apidoc -sphinxcontrib-napoleon \ No newline at end of file +sphinxcontrib-napoleon +niflow-nipype1-workflows From 6f15e261b9ef596c7402e1aefb4cc936dc784b57 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Apr 2020 12:08:30 -0400 Subject: [PATCH 0816/1665] DOC: Drop doubled-up docs --- nipype/sphinxext/plot_workflow.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nipype/sphinxext/plot_workflow.py b/nipype/sphinxext/plot_workflow.py index 931d6efec1..95b87da157 100644 --- a/nipype/sphinxext/plot_workflow.py +++ b/nipype/sphinxext/plot_workflow.py @@ -448,9 +448,6 @@ def run(self): return errors -WorkflowDirective.__doc__ = __doc__ - - def setup(app): setup.app = app setup.config = app.config From 04fc7b0df0f6358b13bf24df3f299fbd3db775f0 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 15 Jan 2020 11:54:31 -0500 Subject: [PATCH 0817/1665] CI: Test Python 3.8 --- .travis.yml | 1 + nipype/info.py | 1 + 2 files changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index 64641b0f14..568c350085 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,6 +6,7 @@ language: python python: - 3.6 - 3.7 +- 3.8 env: global: diff --git a/nipype/info.py b/nipype/info.py index 099b53b735..c8cccff9c6 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -56,6 +56,7 @@ def get_nipype_gitversion(): "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering", ] PYTHON_REQUIRES = ">= 3.6" From 7ebe6ec8bf2519272fd0dd228e65ead6f9d4ee54 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 23 Mar 2020 15:58:09 -0400 Subject: [PATCH 0818/1665] CI: Skip some multiproc tests in Py38 --- .../pipeline/plugins/tests/test_legacymultiproc_nondaemon.py | 2 ++ nipype/pipeline/plugins/tests/test_multiproc.py | 3 +++ 2 files changed, 5 insertions(+) diff --git a/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py b/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py index f490729485..2e814bbf3e 100644 --- a/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py +++ b/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py @@ -134,6 +134,7 @@ def run_multiproc_nondaemon_with_flag(nondaemon_flag): return result +@pytest.mark.skipif(sys.version_info >= (3, 8), reason="multiprocessing issues in Python 3.8") def test_run_multiproc_nondaemon_false(): """ This is the entry point for the test. Two times a pipe of several @@ -152,6 +153,7 @@ def test_run_multiproc_nondaemon_false(): assert shouldHaveFailed +@pytest.mark.skipif(sys.version_info >= (3, 8), reason="multiprocessing issues in Python 3.8") def test_run_multiproc_nondaemon_true(): # with nondaemon_flag = True, the execution should succeed result = run_multiproc_nondaemon_with_flag(True) diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index 2e8967cfbe..9916785e3d 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -4,6 +4,7 @@ """ Test the resource management of MultiProc """ +import sys import os import pytest from nipype.pipeline import engine as pe @@ -33,6 +34,7 @@ def _list_outputs(self): return outputs +@pytest.mark.skipif(sys.version_info >= (3, 8), reason="multiprocessing issues in Python 3.8") def test_run_multiproc(tmpdir): tmpdir.chdir() @@ -114,6 +116,7 @@ def test_no_more_threads_than_specified(tmpdir): pipe.run(plugin="MultiProc", plugin_args={"n_procs": max_threads}) +@pytest.mark.skipif(sys.version_info >= (3, 8), reason="multiprocessing issues in Python 3.8") def test_hold_job_until_procs_available(tmpdir): tmpdir.chdir() From 828cd92b123a59627714ca3bc2446b2b0aa70060 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 1 Apr 2020 12:55:46 -0400 Subject: [PATCH 0819/1665] TEST: Timeout provenance tests after 1min --- nipype/utils/tests/test_provenance.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nipype/utils/tests/test_provenance.py b/nipype/utils/tests/test_provenance.py index 159a59ba7a..8137c083f7 100644 --- a/nipype/utils/tests/test_provenance.py +++ b/nipype/utils/tests/test_provenance.py @@ -15,6 +15,7 @@ @needs_rdflib5 +@pytest.mark.timeout(60) def test_provenance(tmpdir): from nipype.interfaces.base import CommandLine @@ -27,6 +28,7 @@ def test_provenance(tmpdir): @needs_rdflib5 +@pytest.mark.timeout(60) def test_provenance_exists(tmpdir): tmpdir.chdir() from nipype import config From 58dde80feccadad54e8d7a08a97518afffa4f004 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 1 Apr 2020 10:46:59 -0400 Subject: [PATCH 0820/1665] MNT: Pin rdflib >=5 --- nipype/info.py | 6 ++---- requirements.txt | 6 +++--- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/nipype/info.py b/nipype/info.py index c8cccff9c6..1cedaa94f4 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -110,6 +110,7 @@ def get_nipype_gitversion(): FUTURE_MIN_VERSION = "0.16.0" SIMPLEJSON_MIN_VERSION = "3.8.0" PROV_VERSION = "1.5.2" +RDFLIB_MIN_VERSION = "5.0.0" CLICK_MIN_VERSION = "6.6.0" PYDOT_MIN_VERSION = "1.2.3" @@ -144,6 +145,7 @@ def get_nipype_gitversion(): "pydot>=%s" % PYDOT_MIN_VERSION, "pydotplus", "python-dateutil>=%s" % DATEUTIL_MIN_VERSION, + "rdflib>=%s" % RDFLIB_MIN_VERSION, "scipy>=%s" % SCIPY_MIN_VERSION, "simplejson>=%s" % SIMPLEJSON_MIN_VERSION, "traits>=%s,!=5.0" % TRAITS_MIN_VERSION, @@ -151,10 +153,6 @@ def get_nipype_gitversion(): "etelemetry>=0.2.0", ] -# neurdflib has to come after prov -# https://github.com/nipy/nipype/pull/2961#issuecomment-512035484 -REQUIRES += ["neurdflib"] - TESTS_REQUIRES = [ "codecov", "coverage<5", diff --git a/requirements.txt b/requirements.txt index 8f16c7873c..6cb09abdee 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,16 +2,16 @@ click>=6.6.0 networkx>=1.9 nibabel>=2.1.0 -numpy>=1.12 ; python_version < "3.7" +numpy>=1.13 ; python_version < "3.7" numpy>=1.15.3 ; python_version >= "3.7" packaging prov>=1.5.2 pydot>=1.2.3 pydotplus python-dateutil>=2.2 +rdflib>=5.0.0 scipy>=0.14 simplejson>=3.8.0 traits>=4.6,!=5.0 filelock>=3.0.0 -etelemetry -neurdflib +etelemetry>=0.2.0 From ab4298a7e267ff8ce35d6f69996f52b6c670c16b Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Apr 2020 09:55:30 -0400 Subject: [PATCH 0821/1665] TEST: Set timeouts for PBS and OAR tests --- nipype/pipeline/plugins/tests/test_oar.py | 5 +++-- nipype/pipeline/plugins/tests/test_pbs.py | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/nipype/pipeline/plugins/tests/test_oar.py b/nipype/pipeline/plugins/tests/test_oar.py index cd3bf9606b..75cfddaa05 100644 --- a/nipype/pipeline/plugins/tests/test_oar.py +++ b/nipype/pipeline/plugins/tests/test_oar.py @@ -29,8 +29,9 @@ def _list_outputs(self): return outputs -@pytest.mark.skipif(which('oarsub') is None, reason="OAR not installed") -def test_run_pbsgraph(tmp_path): +@pytest.mark.skipif(which("oarsub") is None, reason="OAR not installed") +@pytest.mark.timeout(60) +def test_run_oargraph(tmp_path): pipe = pe.Workflow(name="pipe", base_dir=str(tmp_path)) mod1 = pe.Node(interface=OarTestInterface(), name="mod1") mod2 = pe.MapNode(interface=OarTestInterface(), iterfield=["input1"], name="mod2") diff --git a/nipype/pipeline/plugins/tests/test_pbs.py b/nipype/pipeline/plugins/tests/test_pbs.py index 64d0d77b5d..65662fd867 100644 --- a/nipype/pipeline/plugins/tests/test_pbs.py +++ b/nipype/pipeline/plugins/tests/test_pbs.py @@ -29,7 +29,8 @@ def _list_outputs(self): return outputs -@pytest.mark.skipif(which('qsub') is None, reason="PBS not installed") +@pytest.mark.skipif(which("qsub") is None, reason="PBS not installed") +@pytest.mark.timeout(60) def test_run_pbsgraph(tmp_path): pipe = pe.Workflow(name="pipe", base_dir=str(tmp_path)) mod1 = pe.Node(interface=PbsTestInterface(), name="mod1") From b58d7f341001fc9ed2a4be306040c52828a7d593 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Sat, 18 Apr 2020 13:36:09 -0400 Subject: [PATCH 0822/1665] enh: add readthedocs markup for building docs --- .readthedocs.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 .readthedocs.yml diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000000..6b05eeddbe --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,18 @@ +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: docs/conf.py + +# Optionally build your docs in additional formats such as PDF and ePub +formats: all + +# Optionally set the version of Python and requirements required to build your docs +python: + version: 3.7 + install: + - requirements: docs/requirements.txt From d3a965cd0f37cf05f8c560fb1c4d06805f99b3c3 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Sat, 18 Apr 2020 13:40:09 -0400 Subject: [PATCH 0823/1665] fix: path --- .readthedocs.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index 6b05eeddbe..e139918f30 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -6,7 +6,7 @@ version: 2 # Build documentation in the docs/ directory with Sphinx sphinx: - configuration: docs/conf.py + configuration: doc/conf.py # Optionally build your docs in additional formats such as PDF and ePub formats: all @@ -15,4 +15,4 @@ formats: all python: version: 3.7 install: - - requirements: docs/requirements.txt + - requirements: doc/requirements.txt From 279422150dfb755a1be63b23e96361acb0bdf880 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Apr 2020 16:35:16 -0400 Subject: [PATCH 0824/1665] DOC: Install local nipype for access to sphinxexts --- .readthedocs.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.readthedocs.yml b/.readthedocs.yml index e139918f30..d4dfc7493e 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -16,3 +16,7 @@ python: version: 3.7 install: - requirements: doc/requirements.txt + - method: pip + path: . + extra_requirements: + - docs From 7cb5ea39e0cbe3a92407287d98004cd1c7e03c0b Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Apr 2020 16:38:31 -0400 Subject: [PATCH 0825/1665] DOC: Use doc extra, not docs --- .readthedocs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index d4dfc7493e..43f861701f 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -19,4 +19,4 @@ python: - method: pip path: . extra_requirements: - - docs + - doc From a10d3da97ef39d9bbd63a8b634ca0747d9571e06 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Apr 2020 16:46:45 -0400 Subject: [PATCH 0826/1665] DOC: Ignore out-of-date IPython notebook for now --- doc/conf.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index 91fd3086a3..cef6952782 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -46,7 +46,11 @@ check=True, ) source_dir = Path(tmpdir) / "package" / "niflow" / "nipype1" / "examples" - shutil.copytree(source_dir, python_dir) + shutil.copytree( + source_dir, + python_dir, + ignore=lambda src, names: [n for n in names if n.endswith(".ipynb")], + ) sp.run( [ From 3bfd2f25764f0c768eceb0db67894c73fa4f15ae Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 18 Apr 2020 16:59:04 -0400 Subject: [PATCH 0827/1665] DOC: Skip PDF and EPUB builds --- .readthedocs.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index 43f861701f..5a32188317 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -9,7 +9,8 @@ sphinx: configuration: doc/conf.py # Optionally build your docs in additional formats such as PDF and ePub -formats: all +formats: + - htmlzip # Optionally set the version of Python and requirements required to build your docs python: From e671d61246ebc9edad0f2938bb602951f7f295cb Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Mon, 20 Apr 2020 11:50:30 -0400 Subject: [PATCH 0828/1665] RF: use sys.executable not hardcoded "python" this would allow to consistently use the same python (which might be python3 with python corresponding to python v 2) --- doc/conf.py | 5 +++-- tools/toollib.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index cef6952782..9a4f050572 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -16,6 +16,7 @@ from pathlib import Path from tempfile import TemporaryDirectory import shutil +import sys from packaging.version import Version import nipype import subprocess as sp @@ -54,7 +55,7 @@ sp.run( [ - "python", + sys.executable, ex2rst, "--outdir", str(example_dir), @@ -70,7 +71,7 @@ ) sp.run( [ - "python", + sys.executable, ex2rst, "--outdir", str(example_dir), diff --git a/tools/toollib.py b/tools/toollib.py index 979f89c97f..77d864f142 100644 --- a/tools/toollib.py +++ b/tools/toollib.py @@ -31,7 +31,7 @@ def sh(cmd): def compile_tree(): """Compile all Python files below current directory.""" vstr = ".".join(map(str, sys.version_info[:2])) - stat = os.system("python %s/lib/python%s/compileall.py ." % (sys.prefix, vstr)) + stat = os.system("%s %s/lib/python%s/compileall.py ." % (sys.executable, sys.prefix, vstr)) if stat: msg = "*** ERROR: Some Python files in tree do NOT compile! ***\n" msg += "See messages above for the actual file that produced it.\n" From 02e32dc9ad96912bb3327120a532814b63d90d5e Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Mon, 20 Apr 2020 11:51:29 -0400 Subject: [PATCH 0829/1665] RF: DICOMConvert - use the same (basename of) python as of the host process May be basename is not desired? but it might not exist exactly the same on execution box, so decided to go with basename --- nipype/interfaces/freesurfer/preprocess.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/freesurfer/preprocess.py b/nipype/interfaces/freesurfer/preprocess.py index 39a444495c..92fbe7d7f7 100644 --- a/nipype/interfaces/freesurfer/preprocess.py +++ b/nipype/interfaces/freesurfer/preprocess.py @@ -7,6 +7,7 @@ import os.path as op from glob import glob import shutil +import sys import numpy as np from nibabel import load @@ -726,7 +727,7 @@ def cmdline(self): outdir = self._get_outdir() cmd = [] if not os.path.exists(outdir): - cmdstr = "python -c \"import os; os.makedirs('%s')\"" % outdir + cmdstr = "%s -c \"import os; os.makedirs('%s')\"" % (op.basename(sys.executable), outdir) cmd.extend([cmdstr]) infofile = os.path.join(outdir, "shortinfo.txt") if not os.path.exists(infofile): From f8447343d04274032ea99313c2723e900319f576 Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Mon, 20 Apr 2020 11:53:13 -0400 Subject: [PATCH 0830/1665] RF: Makefile - use $(PYTHON) consistently to centralize choice of Python to use --- Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 48f2c112c2..b7107f31f0 100644 --- a/Makefile +++ b/Makefile @@ -11,18 +11,18 @@ zipdoc: html sdist: zipdoc @echo "Building source distribution..." - python setup.py sdist + $(PYTHON) setup.py sdist @echo "Done building source distribution." # XXX copy documentation.zip to dist directory. egg: zipdoc @echo "Building egg..." - python setup.py bdist_egg + $(PYTHON) setup.py bdist_egg @echo "Done building egg." upload_to_pypi: zipdoc @echo "Uploading to PyPi..." - python setup.py sdist --formats=zip,gztar upload + $(PYTHON) setup.py sdist --formats=zip,gztar upload trailing-spaces: find . -name "*[.py|.rst]" -type f | xargs perl -pi -e 's/[ \t]*$$//' @@ -70,7 +70,7 @@ html: specs: @echo "Checking specs and autogenerating spec tests" - env PYTHONPATH=".:$(PYTHONPATH)" python tools/checkspecs.py + env PYTHONPATH=".:$(PYTHONPATH)" $(PYTHON) tools/checkspecs.py check: check-before-commit # just a shortcut check-before-commit: specs trailing-spaces html test From c7efd5b79d3b7c3d88795cda4602615f7fc7ae96 Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Mon, 20 Apr 2020 11:53:42 -0400 Subject: [PATCH 0831/1665] RF: py.test -> $(PYTHON) -m pytest To assure use of consistent with chosen python pytest. py-test might correspond to python2 whenever python3 is used. --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index b7107f31f0..03c1152053 100644 --- a/Makefile +++ b/Makefile @@ -56,10 +56,10 @@ inplace: $(PYTHON) setup.py build_ext -i test-code: in - py.test --doctest-modules nipype + $(PYTHON) -m pytest --doctest-modules nipype test-coverage: clean-tests in - py.test --doctest-modules --cov-config .coveragerc --cov=nipype nipype + $(PYTHON) -m pytest --doctest-modules --cov-config .coveragerc --cov=nipype nipype test: tests # just another name tests: clean test-code From 5e560d9befe70b369b25c2f6eb42cefc1dc37579 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Mon, 27 Apr 2020 08:44:54 -0700 Subject: [PATCH 0832/1665] ENH: Add ``"TruncateImageIntensity"`` operation to ``ants.utils.ImageMath`` ``"TruncateImageIntensity"`` is part of ``antsBrainExtraction.sh`` and therefore, having it supported by nipype is of great interest. This PR addresses a problem @eilidhmacnicol just discovered. Having this merged will also allow us to call the upstreaming of ANTS interfaces from niworkflows done. --- nipype/interfaces/ants/utils.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index 6c3c05e9c4..a158aa83d4 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -46,6 +46,7 @@ class ImageMathInputSpec(ANTSCommandInputSpec): "GE", "GO", "GC", + "TruncateImageIntensity", mandatory=True, position=3, argstr="%s", @@ -92,6 +93,12 @@ class ImageMath(ANTSCommand, CopyHeaderInterface): ... op2='4').cmdline 'ImageMath 3 structural_maths.nii G structural.nii 4' + >>> ImageMath( + ... op1='structural.nii', + ... operation='TruncateImageIntensity', + ... op2='0.005 0.999 256').cmdline + 'ImageMath 3 structural_maths.nii TruncateImageIntensity structural.nii 0.005 0.999 256' + """ _cmd = "ImageMath" From 96f1033b5aa567b39f5a338683ca82d69c73ebe0 Mon Sep 17 00:00:00 2001 From: Eric Condamine <37933899+servoz@users.noreply.github.com> Date: Fri, 15 May 2020 12:11:27 +0200 Subject: [PATCH 0833/1665] volterra_expansion_order documentation error For fMRI model specification, in SPM, matlabbatch{n}.spm.stats.fmri_spec.volt = 2 corresponds to 'Model Interactions' and matlabbatch{n}.spm.stats.fmri_spec.volt =1 corresponds to 'Do not model Interactions'. Currently, for the corresponding process in nipype, the Level1Design, we see volterra_expansion_order = traits.Enum(1, 2, field="volt", desc=("Model interactions - yes:1, no:2")). I guess the correct description should rather be volterra_expansion_order = traits.Enum(1, 2, field="volt", desc=("Model interactions - no:1, yes:2")) --- nipype/interfaces/spm/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/spm/model.py b/nipype/interfaces/spm/model.py index 2d09f23e90..de6dc15216 100644 --- a/nipype/interfaces/spm/model.py +++ b/nipype/interfaces/spm/model.py @@ -81,7 +81,7 @@ class Level1DesignInputSpec(SPMCommandInputSpec): mandatory=True, ) volterra_expansion_order = traits.Enum( - 1, 2, field="volt", desc=("Model interactions - yes:1, no:2") + 1, 2, field="volt", desc=("Model interactions - no:1, yes:2") ) global_intensity_normalization = traits.Enum( "none", From 5f85656c3fa55201f1e9a5e74b56336aa56984b2 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sat, 16 May 2020 13:15:03 +0300 Subject: [PATCH 0834/1665] Added VSCode configuration directory and venv/ to .gitignore. --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 4213d07a68..f75fe9ef6e 100644 --- a/.gitignore +++ b/.gitignore @@ -29,3 +29,5 @@ __pycache__/ .ipynb_checkpoints/ .ruby-version .pytest_cache +.vscode/ +venv/ \ No newline at end of file From b344a58c55d4c3d70470ff3c030d7d3b70d1b807 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Sat, 16 May 2020 13:20:13 +0300 Subject: [PATCH 0835/1665] Fixed BET's returned output path bug, resolves #2916. --- nipype/interfaces/fsl/preprocess.py | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py index bf38ff3d1f..feebf4e742 100644 --- a/nipype/interfaces/fsl/preprocess.py +++ b/nipype/interfaces/fsl/preprocess.py @@ -173,45 +173,50 @@ def _gen_outfilename(self): def _list_outputs(self): outputs = self.output_spec().get() outputs["out_file"] = self._gen_outfilename() + + basename = os.path.basename(outputs['out_file']) + cwd = os.path.dirname(outputs['out_file']) + kwargs = {'basename': basename, 'cwd': cwd} + if (isdefined(self.inputs.mesh) and self.inputs.mesh) or ( isdefined(self.inputs.surfaces) and self.inputs.surfaces ): outputs["meshfile"] = self._gen_fname( - outputs["out_file"], suffix="_mesh.vtk", change_ext=False + suffix="_mesh.vtk", change_ext=False, **kwargs ) if (isdefined(self.inputs.mask) and self.inputs.mask) or ( isdefined(self.inputs.reduce_bias) and self.inputs.reduce_bias ): - outputs["mask_file"] = self._gen_fname(outputs["out_file"], suffix="_mask") + outputs["mask_file"] = self._gen_fname(suffix="_mask", **kwargs) if isdefined(self.inputs.outline) and self.inputs.outline: outputs["outline_file"] = self._gen_fname( - outputs["out_file"], suffix="_overlay" + suffix="_overlay", **kwargs ) if isdefined(self.inputs.surfaces) and self.inputs.surfaces: outputs["inskull_mask_file"] = self._gen_fname( - outputs["out_file"], suffix="_inskull_mask" + suffix="_inskull_mask", **kwargs ) outputs["inskull_mesh_file"] = self._gen_fname( - outputs["out_file"], suffix="_inskull_mesh" + suffix="_inskull_mesh", **kwargs ) outputs["outskull_mask_file"] = self._gen_fname( - outputs["out_file"], suffix="_outskull_mask" + suffix="_outskull_mask", **kwargs ) outputs["outskull_mesh_file"] = self._gen_fname( - outputs["out_file"], suffix="_outskull_mesh" + suffix="_outskull_mesh", **kwargs ) outputs["outskin_mask_file"] = self._gen_fname( - outputs["out_file"], suffix="_outskin_mask" + suffix="_outskin_mask", **kwargs ) outputs["outskin_mesh_file"] = self._gen_fname( - outputs["out_file"], suffix="_outskin_mesh" + suffix="_outskin_mesh", **kwargs ) outputs["skull_mask_file"] = self._gen_fname( - outputs["out_file"], suffix="_skull_mask" + suffix="_skull_mask", **kwargs ) if isdefined(self.inputs.skull) and self.inputs.skull: outputs["skull_file"] = self._gen_fname( - outputs["out_file"], suffix="_skull" + suffix="_skull", **kwargs ) if isdefined(self.inputs.no_output) and self.inputs.no_output: outputs["out_file"] = Undefined From f95d96025f69d5ce8cb3a5ff484b677e9702c0bd Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Tue, 19 May 2020 08:56:12 -0400 Subject: [PATCH 0836/1665] fix: allow parsing freesurfer 7 version string closes #3215 --- nipype/interfaces/freesurfer/base.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nipype/interfaces/freesurfer/base.py b/nipype/interfaces/freesurfer/base.py index 66023386da..e9fe62ee50 100644 --- a/nipype/interfaces/freesurfer/base.py +++ b/nipype/interfaces/freesurfer/base.py @@ -76,6 +76,9 @@ def looseversion(cls): vstr = "6.0.0-dev" + githash elif vinfo[5][0] == "v": vstr = vinfo[5][1:] + elif len([1 for val in vinfo[3] if val == '.']) == 2: + "version string: freesurfer-linux-centos7_x86_64-7.1.0-20200511-813297b" + vstr = vinfo[3] else: raise RuntimeError("Unknown version string: " + ver) # Retain pre-6.0.0 heuristics From 6270778ead7d44fa2d6285c5105574e731986182 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 2 Jan 2019 09:48:39 -0500 Subject: [PATCH 0837/1665] MAINT: Allow nilearn up to master --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 1cedaa94f4..0e7fd0f70b 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -175,7 +175,7 @@ def get_nipype_gitversion(): "sphinxcontrib-napoleon", ], "duecredit": ["duecredit"], - "nipy": ["nitime", "nilearn<0.5.0", "dipy", "nipy", "matplotlib"], + "nipy": ["nitime", "nilearn", "dipy", "nipy", "matplotlib"], "profiler": ["psutil>=5.0"], "pybids": ["pybids>=0.7.0"], "specs": ["black"], From ad44896c57ccf08a7a6d6e6001e360e8c36af19c Mon Sep 17 00:00:00 2001 From: mathiasg Date: Tue, 26 May 2020 23:15:02 -0400 Subject: [PATCH 0838/1665] FIX: report.rst terminal output --- nipype/utils/filemanip.py | 2 +- nipype/utils/tests/test_filemanip.py | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 735cc610b6..46c4cc53be 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -736,7 +736,7 @@ def write_rst_header(header, level=0): def write_rst_list(items, prefix=""): out = [] - for item in items: + for item in ensure_list(items): out.append("{} {}".format(prefix, str(item))) return "\n".join(out) + "\n\n" diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index 9c54ff02ee..fed2462548 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -30,6 +30,7 @@ loadcrash, savepkl, path_resolve, + write_rst_list, ) @@ -652,3 +653,17 @@ def test_pickle(tmp_path, save_versioning): savepkl(pickle_fname, testobj, versioning=save_versioning) outobj = loadpkl(pickle_fname) assert outobj == testobj + + +@pytest.mark.parametrize("items,expected", [ + ('', ' \n\n'), + ('A string', ' A string\n\n'), + (['A list', 'Of strings'], ' A list\n Of strings\n\n'), + (None, TypeError), +]) +def test_write_rst_list(tmp_path, items, expected): + if items is not None: + assert write_rst_list(items) == expected + else: + with pytest.raises(expected): + write_rst_list(items) From 81b9c83ab022cc499325cdb5675fcce42581622f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 2 Jun 2020 16:38:20 -0400 Subject: [PATCH 0839/1665] RESTORE ants.legacy, removed in 4a84a0275 --- nipype/interfaces/ants/legacy.py | 373 +++++++++++++++++++++++++++++++ 1 file changed, 373 insertions(+) create mode 100644 nipype/interfaces/ants/legacy.py diff --git a/nipype/interfaces/ants/legacy.py b/nipype/interfaces/ants/legacy.py new file mode 100644 index 0000000000..bdeaa02279 --- /dev/null +++ b/nipype/interfaces/ants/legacy.py @@ -0,0 +1,373 @@ +# -*- coding: utf-8 -*- +# NOTE: This implementation has been superceeded buy the antsApplyTransform +# implmeentation that more closely follows the strucutre and capabilities +# of the antsApplyTransform program. This implementation is here +# for backwards compatibility. +"""ANTS Apply Transforms interface +""" + +from builtins import range + +import os +from glob import glob + +from .base import ANTSCommand, ANTSCommandInputSpec +from ..base import TraitedSpec, File, traits, isdefined, OutputMultiPath +from ...utils.filemanip import split_filename + + +class antsIntroductionInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + argstr="-d %d", + usedefault=True, + desc="image dimension (2 or 3)", + position=1, + ) + reference_image = File( + exists=True, + argstr="-r %s", + desc="template file to warp to", + mandatory=True, + copyfile=True, + ) + input_image = File( + exists=True, + argstr="-i %s", + desc="input image to warp to template", + mandatory=True, + copyfile=False, + ) + force_proceed = traits.Bool( + argstr="-f 1", + desc=("force script to proceed even if headers " "may be incompatible"), + ) + inverse_warp_template_labels = traits.Bool( + argstr="-l", + desc=( + "Applies inverse warp to the template labels " + "to estimate label positions in target space (use " + "for template-based segmentation)" + ), + ) + max_iterations = traits.List( + traits.Int, + argstr="-m %s", + sep="x", + desc=( + "maximum number of iterations (must be " + "list of integers in the form [J,K,L...]: " + "J = coarsest resolution iterations, K = " + "middle resolution interations, L = fine " + "resolution iterations" + ), + ) + bias_field_correction = traits.Bool( + argstr="-n 1", desc=("Applies bias field correction to moving " "image") + ) + similarity_metric = traits.Enum( + "PR", + "CC", + "MI", + "MSQ", + argstr="-s %s", + desc=( + "Type of similartiy metric used for registration " + "(CC = cross correlation, MI = mutual information, " + "PR = probability mapping, MSQ = mean square difference)" + ), + ) + transformation_model = traits.Enum( + "GR", + "EL", + "SY", + "S2", + "EX", + "DD", + "RI", + "RA", + argstr="-t %s", + usedefault=True, + desc=( + "Type of transofmration model used for registration " + "(EL = elastic transformation model, SY = SyN with time, " + "arbitrary number of time points, S2 = SyN with time " + "optimized for 2 time points, GR = greedy SyN, EX = " + "exponential, DD = diffeomorphic demons style exponential " + "mapping, RI = purely rigid, RA = affine rigid" + ), + ) + out_prefix = traits.Str( + "ants_", + argstr="-o %s", + usedefault=True, + desc=("Prefix that is prepended to all output " "files (default = ants_)"), + ) + quality_check = traits.Bool( + argstr="-q 1", desc="Perform a quality check of the result" + ) + + +class antsIntroductionOutputSpec(TraitedSpec): + affine_transformation = File(exists=True, desc="affine (prefix_Affine.txt)") + warp_field = File(exists=True, desc="warp field (prefix_Warp.nii)") + inverse_warp_field = File( + exists=True, desc="inverse warp field (prefix_InverseWarp.nii)" + ) + input_file = File(exists=True, desc="input image (prefix_repaired.nii)") + output_file = File(exists=True, desc="output image (prefix_deformed.nii)") + + +class antsIntroduction(ANTSCommand): + """Uses ANTS to generate matrices to warp data from one space to another. + + Examples + -------- + + >>> from nipype.interfaces.ants.legacy import antsIntroduction + >>> warp = antsIntroduction() + >>> warp.inputs.reference_image = 'Template_6.nii' + >>> warp.inputs.input_image = 'structural.nii' + >>> warp.inputs.max_iterations = [30,90,20] + >>> warp.cmdline + 'antsIntroduction.sh -d 3 -i structural.nii -m 30x90x20 -o ants_ -r Template_6.nii -t GR' + + """ + + _cmd = "antsIntroduction.sh" + input_spec = antsIntroductionInputSpec + output_spec = antsIntroductionOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + transmodel = self.inputs.transformation_model + + # When transform is set as 'RI'/'RA', wrap fields should not be expected + # The default transformation is GR, which outputs the wrap fields + if not isdefined(transmodel) or ( + isdefined(transmodel) and transmodel not in ["RI", "RA"] + ): + outputs["warp_field"] = os.path.join( + os.getcwd(), self.inputs.out_prefix + "Warp.nii.gz" + ) + outputs["inverse_warp_field"] = os.path.join( + os.getcwd(), self.inputs.out_prefix + "InverseWarp.nii.gz" + ) + + outputs["affine_transformation"] = os.path.join( + os.getcwd(), self.inputs.out_prefix + "Affine.txt" + ) + outputs["input_file"] = os.path.join( + os.getcwd(), self.inputs.out_prefix + "repaired.nii.gz" + ) + outputs["output_file"] = os.path.join( + os.getcwd(), self.inputs.out_prefix + "deformed.nii.gz" + ) + + return outputs + + +# How do we make a pass through so that GenWarpFields is just an alias for antsIntroduction ? + + +class GenWarpFields(antsIntroduction): + pass + + +class buildtemplateparallelInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + 4, + argstr="-d %d", + usedefault=True, + desc="image dimension (2, 3 or 4)", + position=1, + ) + out_prefix = traits.Str( + "antsTMPL_", + argstr="-o %s", + usedefault=True, + desc=("Prefix that is prepended to all output " "files (default = antsTMPL_)"), + ) + in_files = traits.List( + File(exists=True), + mandatory=True, + desc="list of images to generate template from", + argstr="%s", + position=-1, + ) + parallelization = traits.Enum( + 0, + 1, + 2, + argstr="-c %d", + usedefault=True, + desc=( + "control for parallel processing (0 = " + "serial, 1 = use PBS, 2 = use PEXEC, 3 = " + "use Apple XGrid" + ), + ) + gradient_step_size = traits.Float( + argstr="-g %f", + desc=("smaller magnitude results in " "more cautious steps (default = " ".25)"), + ) + iteration_limit = traits.Int( + 4, argstr="-i %d", usedefault=True, desc="iterations of template construction" + ) + num_cores = traits.Int( + argstr="-j %d", + requires=["parallelization"], + desc=( + "Requires parallelization = 2 (PEXEC). " "Sets number of cpu cores to use" + ), + ) + max_iterations = traits.List( + traits.Int, + argstr="-m %s", + sep="x", + desc=( + "maximum number of iterations (must be " + "list of integers in the form [J,K,L...]: " + "J = coarsest resolution iterations, K = " + "middle resolution interations, L = fine " + "resolution iterations" + ), + ) + bias_field_correction = traits.Bool( + argstr="-n 1", desc=("Applies bias field correction to moving " "image") + ) + rigid_body_registration = traits.Bool( + argstr="-r 1", + desc=( + "registers inputs before creating template " + "(useful if no initial template available)" + ), + ) + similarity_metric = traits.Enum( + "PR", + "CC", + "MI", + "MSQ", + argstr="-s %s", + desc=( + "Type of similartiy metric used for registration " + "(CC = cross correlation, MI = mutual information, " + "PR = probability mapping, MSQ = mean square difference)" + ), + ) + transformation_model = traits.Enum( + "GR", + "EL", + "SY", + "S2", + "EX", + "DD", + argstr="-t %s", + usedefault=True, + desc=( + "Type of transofmration model used for registration " + "(EL = elastic transformation model, SY = SyN with time, " + "arbitrary number of time points, S2 = SyN with time " + "optimized for 2 time points, GR = greedy SyN, EX = " + "exponential, DD = diffeomorphic demons style exponential " + "mapping" + ), + ) + use_first_as_target = traits.Bool( + desc=( + "uses first volume as target of " + "all inputs. When not used, an " + "unbiased average image is used " + "to start." + ) + ) + + +class buildtemplateparallelOutputSpec(TraitedSpec): + final_template_file = File(exists=True, desc="final ANTS template") + template_files = OutputMultiPath( + File(exists=True), desc="Templates from different stages of iteration" + ) + subject_outfiles = OutputMultiPath( + File(exists=True), + desc=( + "Outputs for each input image. Includes warp " + "field, inverse warp, Affine, original image " + "(repaired) and warped image (deformed)" + ), + ) + + +class buildtemplateparallel(ANTSCommand): + """Generate a optimal average template + + .. warning:: + + This can take a VERY long time to complete + + Examples + -------- + + >>> from nipype.interfaces.ants.legacy import buildtemplateparallel + >>> tmpl = buildtemplateparallel() + >>> tmpl.inputs.in_files = ['T1.nii', 'structural.nii'] + >>> tmpl.inputs.max_iterations = [30, 90, 20] + >>> tmpl.cmdline + 'buildtemplateparallel.sh -d 3 -i 4 -m 30x90x20 -o antsTMPL_ -c 0 -t GR T1.nii structural.nii' + + """ + + _cmd = "buildtemplateparallel.sh" + input_spec = buildtemplateparallelInputSpec + output_spec = buildtemplateparallelOutputSpec + + def _format_arg(self, opt, spec, val): + if opt == "num_cores": + if self.inputs.parallelization == 2: + return "-j " + str(val) + else: + return "" + if opt == "in_files": + if self.inputs.use_first_as_target: + start = "-z " + else: + start = "" + return start + " ".join(name for name in val) + return super(buildtemplateparallel, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["template_files"] = [] + for i in range(len(glob(os.path.realpath("*iteration*")))): + temp = os.path.realpath( + "%s_iteration_%d/%stemplate.nii.gz" + % (self.inputs.transformation_model, i, self.inputs.out_prefix) + ) + os.rename( + temp, + os.path.realpath( + "%s_iteration_%d/%stemplate_i%d.nii.gz" + % (self.inputs.transformation_model, i, self.inputs.out_prefix, i) + ), + ) + file_ = "%s_iteration_%d/%stemplate_i%d.nii.gz" % ( + self.inputs.transformation_model, + i, + self.inputs.out_prefix, + i, + ) + + outputs["template_files"].append(os.path.realpath(file_)) + outputs["final_template_file"] = os.path.realpath( + "%stemplate.nii.gz" % self.inputs.out_prefix + ) + outputs["subject_outfiles"] = [] + for filename in self.inputs.in_files: + _, base, _ = split_filename(filename) + temp = glob(os.path.realpath("%s%s*" % (self.inputs.out_prefix, base))) + for file_ in temp: + outputs["subject_outfiles"].append(file_) + return outputs From 124d4ef7668eaece4236b2c5a814148bb3c838f1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 2 Jun 2020 16:42:09 -0400 Subject: [PATCH 0840/1665] make specs --- .../ants/tests/test_auto_GenWarpFields.py | 45 +++++++++++++++++++ .../ants/tests/test_auto_antsIntroduction.py | 45 +++++++++++++++++++ .../tests/test_auto_buildtemplateparallel.py | 41 +++++++++++++++++ 3 files changed, 131 insertions(+) create mode 100644 nipype/interfaces/ants/tests/test_auto_GenWarpFields.py create mode 100644 nipype/interfaces/ants/tests/test_auto_antsIntroduction.py create mode 100644 nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py diff --git a/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py b/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py new file mode 100644 index 0000000000..11fbe56ccd --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..legacy import GenWarpFields + + +def test_GenWarpFields_inputs(): + input_map = dict( + args=dict(argstr="%s",), + bias_field_correction=dict(argstr="-n 1",), + dimension=dict(argstr="-d %d", position=1, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + force_proceed=dict(argstr="-f 1",), + input_image=dict( + argstr="-i %s", copyfile=False, extensions=None, mandatory=True, + ), + inverse_warp_template_labels=dict(argstr="-l",), + max_iterations=dict(argstr="-m %s", sep="x",), + num_threads=dict(nohash=True, usedefault=True,), + out_prefix=dict(argstr="-o %s", usedefault=True,), + quality_check=dict(argstr="-q 1",), + reference_image=dict( + argstr="-r %s", copyfile=True, extensions=None, mandatory=True, + ), + similarity_metric=dict(argstr="-s %s",), + transformation_model=dict(argstr="-t %s", usedefault=True,), + ) + inputs = GenWarpFields.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_GenWarpFields_outputs(): + output_map = dict( + affine_transformation=dict(extensions=None,), + input_file=dict(extensions=None,), + inverse_warp_field=dict(extensions=None,), + output_file=dict(extensions=None,), + warp_field=dict(extensions=None,), + ) + outputs = GenWarpFields.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py b/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py new file mode 100644 index 0000000000..d7950f38b5 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..legacy import antsIntroduction + + +def test_antsIntroduction_inputs(): + input_map = dict( + args=dict(argstr="%s",), + bias_field_correction=dict(argstr="-n 1",), + dimension=dict(argstr="-d %d", position=1, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + force_proceed=dict(argstr="-f 1",), + input_image=dict( + argstr="-i %s", copyfile=False, extensions=None, mandatory=True, + ), + inverse_warp_template_labels=dict(argstr="-l",), + max_iterations=dict(argstr="-m %s", sep="x",), + num_threads=dict(nohash=True, usedefault=True,), + out_prefix=dict(argstr="-o %s", usedefault=True,), + quality_check=dict(argstr="-q 1",), + reference_image=dict( + argstr="-r %s", copyfile=True, extensions=None, mandatory=True, + ), + similarity_metric=dict(argstr="-s %s",), + transformation_model=dict(argstr="-t %s", usedefault=True,), + ) + inputs = antsIntroduction.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_antsIntroduction_outputs(): + output_map = dict( + affine_transformation=dict(extensions=None,), + input_file=dict(extensions=None,), + inverse_warp_field=dict(extensions=None,), + output_file=dict(extensions=None,), + warp_field=dict(extensions=None,), + ) + outputs = antsIntroduction.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py b/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py new file mode 100644 index 0000000000..35b225949b --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..legacy import buildtemplateparallel + + +def test_buildtemplateparallel_inputs(): + input_map = dict( + args=dict(argstr="%s",), + bias_field_correction=dict(argstr="-n 1",), + dimension=dict(argstr="-d %d", position=1, usedefault=True,), + environ=dict(nohash=True, usedefault=True,), + gradient_step_size=dict(argstr="-g %f",), + in_files=dict(argstr="%s", mandatory=True, position=-1,), + iteration_limit=dict(argstr="-i %d", usedefault=True,), + max_iterations=dict(argstr="-m %s", sep="x",), + num_cores=dict(argstr="-j %d", requires=["parallelization"],), + num_threads=dict(nohash=True, usedefault=True,), + out_prefix=dict(argstr="-o %s", usedefault=True,), + parallelization=dict(argstr="-c %d", usedefault=True,), + rigid_body_registration=dict(argstr="-r 1",), + similarity_metric=dict(argstr="-s %s",), + transformation_model=dict(argstr="-t %s", usedefault=True,), + use_first_as_target=dict(), + ) + inputs = buildtemplateparallel.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_buildtemplateparallel_outputs(): + output_map = dict( + final_template_file=dict(extensions=None,), + subject_outfiles=dict(), + template_files=dict(), + ) + outputs = buildtemplateparallel.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value From 81e8f9ed6ff3636e7e2ec3916249b4084a066af7 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 2 Jun 2020 16:45:25 -0400 Subject: [PATCH 0841/1665] Update module description --- nipype/interfaces/ants/legacy.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/ants/legacy.py b/nipype/interfaces/ants/legacy.py index bdeaa02279..0cc8b7e864 100644 --- a/nipype/interfaces/ants/legacy.py +++ b/nipype/interfaces/ants/legacy.py @@ -1,9 +1,8 @@ # -*- coding: utf-8 -*- -# NOTE: This implementation has been superceeded buy the antsApplyTransform -# implmeentation that more closely follows the strucutre and capabilities -# of the antsApplyTransform program. This implementation is here -# for backwards compatibility. -"""ANTS Apply Transforms interface +"""ANTS Legacy Interfaces + +These interfaces are for programs that have been deprecated by ANTs, but +are preserved for backwards compatibility. """ from builtins import range From 2808a2f0517da87024ab5f1725a9b62a904146d4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 3 Jun 2020 10:24:39 -0400 Subject: [PATCH 0842/1665] Update changelog --- .zenodo.json | 10 +++++----- doc/changelog/1.X.X-changelog.rst | 18 +++++++++++++++++- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 8a57735308..c7af81ad78 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -635,6 +635,11 @@ "affiliation": "Vrije Universiteit Amsterdam", "name": "Ort, Eduard" }, + { + "affiliation": "CNRS, UMS3552 IRMaGe", + "name": "Condamine, Eric", + "orcid": "0000-0002-9533-3769" + }, { "affiliation": "Stanford University", "name": "Lerma-Usabiaga, Garikoitz", @@ -719,11 +724,6 @@ { "name": "Shachnev, Dmitry" }, - { - "affiliation": "CNRS, UMS3552 IRMaGe", - "name": "Condamine, Eric", - "orcid": "0000-0002-9533-3769" - }, { "name": "Flandin, Guillaume" }, diff --git a/doc/changelog/1.X.X-changelog.rst b/doc/changelog/1.X.X-changelog.rst index 239aa7d936..e10949cf08 100644 --- a/doc/changelog/1.X.X-changelog.rst +++ b/doc/changelog/1.X.X-changelog.rst @@ -1,4 +1,4 @@ -1.5.0 (To be determined) +1.5.0 (June 03, 2020) ========================= New feature release in the 1.5.x series. @@ -8,20 +8,36 @@ In this release, the example scripts have been split out into their own package: (`Full changelog `__) + * FIX: volterra_expansion_order documentation error (https://github.com/nipy/nipype/pull/3213) + * FIX: BET incorrect output paths (https://github.com/nipy/nipype/pull/3214) + * FIX: Terminal output in ``report.rst`` spreads one line per character (https://github.com/nipy/nipype/pull/3220) + * FIX: Allow parsing freesurfer 7 version string (https://github.com/nipy/nipype/pull/3216) + * FIX: Use PackageInfo to get NiftyReg version (https://github.com/nipy/nipype/pull/3194) * FIX: Partial rollback of N4BiasFieldCorrection (https://github.com/nipy/nipype/pull/3188) * FIX: ANTs' tools maintenance overhaul (https://github.com/nipy/nipype/pull/3180) * FIX: load_resultfile crashes if open resultsfile from crashed job (https://github.com/nipy/nipype/pull/3182) * FIX: FSL model.py make multiple F-tests (https://github.com/nipy/nipype/pull/3166) + * ENH: Restore ants.legacy interfaces (https://github.com/nipy/nipype/pull/3222) + * ENH: Add ``"TruncateImageIntensity"`` operation to ``ants.utils.Image.Math`` (https://github.com/nipy/nipype/pull/3210) + * ENH: SPM NewSegment multi-channel segmentation (https://github.com/nipy/nipype/pull/3162) + * ENH: Add reverse-ordered transform lists to ants.Registration outputs (https://github.com/nipy/nipype/pull/3192) * ENH: Improve workflow connect performance (https://github.com/nipy/nipype/pull/3184) * ENH: Add ``ConstrainedSphericalDeconvolution`` interface to replace ``EstimateFOD`` for MRtrix3's ``dwi2fod`` (https://github.com/nipy/nipype/pull/3176) * ENH: Detect values for EulerNumber interface (https://github.com/nipy/nipype/pull/3173) * ENH: Remove examples from repository (https://github.com/nipy/nipype/pull/3172) + * TEST: Clean up tests (https://github.com/nipy/nipype/pull/3195) + * TEST: Mock terminal output before testing changing default value (https://github.com/nipy/nipype/pull/3193) + * REF: make invocations of python and pytest consistent with the one used/desired python (https://github.com/nipy/nipype/pull/3208) * REF: Prefer math.gcd to hand-rolled Euclid's algorithm (https://github.com/nipy/nipype/pull/3177) * REF: Removed all uses of numpy_mmap (https://github.com/nipy/nipype/pull/3121) + * DOC: Sphinx 3 compatibility (https://github.com/nipy/nipype/pull/3206) * DOC: Update links, typos in contributing guide (https://github.com/nipy/nipype/pull/3160) * DOC: Update SelectFiles docstring to match actual behavior (https://github.com/nipy/nipype/pull/3041) * DOC: Updated .zenodo.json file (https://github.com/nipy/nipype/pull/3167) * DOC: Update .zenodo.json (https://github.com/nipy/nipype/pull/3165) + * MNT: Permit recent nilearns (https://github.com/nipy/nipype/pull/2841) + * MNT: Test Python 3.8 (https://github.com/nipy/nipype/pull/3154) + * MNT: Restore ReadTheDocs (https://github.com/nipy/nipype/pull/3207) * MNT: Update Zenodo ordering based on commit count (https://github.com/nipy/nipype/pull/3169) 1.4.2 (February 14, 2020) From f4c936a2bc3d4a57c568fdf26802e08424a966a6 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 3 Jun 2020 10:24:45 -0400 Subject: [PATCH 0843/1665] Update mailmap --- .mailmap | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.mailmap b/.mailmap index 4df0aff6e5..f603849d24 100644 --- a/.mailmap +++ b/.mailmap @@ -45,6 +45,8 @@ Colin Buchanan Daniel Brenner Daniel Clark Daniel Geisler +Daniel Geisler +Daniel Geisler <3453485+daniel-ge@users.noreply.github.com> Daniel Ginsburg Daniel McNamee David Ellis @@ -89,6 +91,7 @@ Joerg Stadler John A. Lee John A. Lee Joke Durnez +Jordi Huguet Josh Warner Junhao WEN Kai Schlamp @@ -117,6 +120,7 @@ Lukas Snoek Marcel Falkiewicz Martin Perez-Guevara Mathias Goncalves +Mathias Goncalves Mathieu Dubois Mathieu Dubois Matteo Mancini @@ -170,6 +174,7 @@ Steven Giavasis Steven Giavasis Steven Giavasis Steven Tilley +Sulantha Mathotaarachchi Tristan Glatard Victor Férat Victor Férat From fe34db606479c5774e462e6a46ddadfdf8fbd5ef Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 3 Jun 2020 10:24:58 -0400 Subject: [PATCH 0844/1665] Update Zenodo --- .zenodo.json | 44 ++++++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index c7af81ad78..2ee43c9904 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -76,14 +76,14 @@ "name": "Dayan, Michael", "orcid": "0000-0002-2666-0969" }, - { - "name": "Loney, Fred" - }, { "affiliation": "Dartmouth College: Hanover, NH, United States", "name": "Halchenko, Yaroslav O.", "orcid": "0000-0003-3456-2493" }, + { + "name": "Loney, Fred" + }, { "affiliation": "Florida International University", "name": "Salo, Taylor", @@ -288,6 +288,11 @@ "name": "Kong, Xiang-Zhen", "orcid": "0000-0002-0805-1350" }, + { + "affiliation": "Division of Psychological and Social Medicine and Developmental Neuroscience, Faculty of Medicine, Technische Universit\u00e4t Dresden, Dresden, Germany", + "name": "Geisler, Daniel", + "orcid": "0000-0003-2076-5329" + }, { "name": "Salvatore, John" }, @@ -384,6 +389,11 @@ { "name": "Cumba, Chad" }, + { + "affiliation": "University College London", + "name": "P\u00e9rez-Garc\u00eda, Fernando", + "orcid": "0000-0001-9090-3024" + }, { "name": "Blair, Ross" }, @@ -392,16 +402,6 @@ "name": "Iqbal, Shariq", "orcid": "0000-0003-2766-8425" }, - { - "affiliation": "NIMH, Scientific and Statistical Computing Core", - "name": "Glen, Daniel", - "orcid": "0000-0001-8456-5647" - }, - { - "affiliation": "Technische Universit\u00e4t Dresden, Faculty of Medicine, Department of Child and Adolescent Psychiatry", - "name": "Geisler, Daniel", - "orcid": "0000-0003-2076-5329" - }, { "affiliation": "University of Iowa", "name": "Welch, David" @@ -429,11 +429,6 @@ "name": "Papadopoulos Orfanos, Dimitri", "orcid": "0000-0002-1242-8990" }, - { - "affiliation": "University College London", - "name": "P\u00e9rez-Garc\u00eda, Fernando", - "orcid": "0000-0001-9090-3024" - }, { "affiliation": "Leibniz Institute for Neurobiology", "name": "Stadler, J\u00f6rg", @@ -618,6 +613,10 @@ "name": "Gerhard, Stephan", "orcid": "0000-0003-4454-6171" }, + { + "affiliation": "Enigma Biomedical Group", + "name": "Mathotaarachchi, Sulantha" + }, { "name": "Saase, Victor" }, @@ -659,6 +658,11 @@ "name": "Pellman, John", "orcid": "0000-0001-6810-4461" }, + { + "affiliation": "BarcelonaBeta Brain Research Center", + "name": "Huguet, Jordi", + "orcid": "0000-0001-8420-4833" + }, { "affiliation": "University of Pennsylvania", "name": "Junhao WEN", @@ -689,6 +693,10 @@ "name": "Andberg, Sami Kristian", "orcid": "0000-0002-5650-3964" }, + { + "affiliation": "Sagol School of Neuroscience, Tel Aviv University", + "name": "Baratz, Zvi" + }, { "name": "Matsubara, K" }, From 94f25c1f627a3e600fbe34b622f848528c700c28 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 3 Jun 2020 10:25:48 -0400 Subject: [PATCH 0845/1665] REL: 1.5.0 --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 0e7fd0f70b..69eb443f76 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -5,7 +5,7 @@ # nipype version information # Remove -dev for release -__version__ = "1.5.0-rc1.post-dev" +__version__ = "1.5.0" def get_nipype_gitversion(): From 742e7394f3a179e3f12857c8bad0cdb222c1ceb8 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 3 Jun 2020 14:26:46 -0400 Subject: [PATCH 0846/1665] MNT: 1.5.1-dev --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 69eb443f76..6c65d06d68 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -5,7 +5,7 @@ # nipype version information # Remove -dev for release -__version__ = "1.5.0" +__version__ = "1.5.1-dev" def get_nipype_gitversion(): From ac16f23d6fcc29c3778348573d566decb5ec7d37 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 3 Jun 2020 14:27:34 -0400 Subject: [PATCH 0847/1665] MNT: 1.6.0-dev --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 69eb443f76..73ad1b88d5 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -5,7 +5,7 @@ # nipype version information # Remove -dev for release -__version__ = "1.5.0" +__version__ = "1.6.0-dev" def get_nipype_gitversion(): From dc230ef7c9e6261164d66124f82918807473a81d Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 4 Jun 2020 13:30:11 -0400 Subject: [PATCH 0848/1665] DOC: Skip BIDSDataGrabber doctest if pybids is missing --- nipype/conftest.py | 1 + nipype/interfaces/io.py | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/nipype/conftest.py b/nipype/conftest.py index b099fd0078..7323e72846 100644 --- a/nipype/conftest.py +++ b/nipype/conftest.py @@ -17,6 +17,7 @@ def add_np(doctest_namespace): doctest_namespace["np"] = numpy doctest_namespace["os"] = os + doctest_namespace["pytest"] = pytest doctest_namespace["datadir"] = data_dir diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index d9b578caaa..2b2510f169 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2895,6 +2895,14 @@ class BIDSDataGrabber(LibraryBaseInterface, IOBase): Examples -------- + + .. setup:: + + >>> try: + ... import bids + ... except ImportError: + ... pytest.skip() + By default, the BIDSDataGrabber fetches anatomical and functional images from a project, and makes BIDS entities (e.g. subject) available for filtering outputs. From 5116ee208205c8050f87fc2a45bb0f35e3dd1c0a Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Tue, 7 Jul 2020 22:21:29 -0400 Subject: [PATCH 0849/1665] add nipype svg brain --- doc/_static/snakebrain-nipype.svg | 108 ++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 doc/_static/snakebrain-nipype.svg diff --git a/doc/_static/snakebrain-nipype.svg b/doc/_static/snakebrain-nipype.svg new file mode 100644 index 0000000000..04bfc05362 --- /dev/null +++ b/doc/_static/snakebrain-nipype.svg @@ -0,0 +1,108 @@ + + + + + + + + image/svg+xml + + + + + + Michael Hanke <michael.hanke@gmail.com> + + + Artwork by Arno Klein + The image has been converted from a pixel-based artwork. + + + Original artwork created by Arno Klein <arno@binarybottle.com>. +Converted to SVG by Michael Hanke <michael.hanke@gmail.com>. + + + + + + + + + + + + + + + + + + + + + + From 35875331ffc850690eb168b0e25142fddbd94cc4 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 12 Aug 2020 11:27:49 +0200 Subject: [PATCH 0850/1665] ENH: Add more operations to ``ants.ImageMath`` These added operations are necessary for the antsBrainExtraction workflow we have in niworkflows (and hence niflows too). --- nipype/interfaces/ants/utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index a158aa83d4..b91d909b43 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -47,6 +47,10 @@ class ImageMathInputSpec(ANTSCommandInputSpec): "GO", "GC", "TruncateImageIntensity", + "Laplacian", + "GetLargestComponent", + "FillHoles", + "PadImage", mandatory=True, position=3, argstr="%s", From e59aaa68f3f223d9126056ffc8ad71ad49343a48 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 12 Aug 2020 14:10:39 +0200 Subject: [PATCH 0851/1665] fix: address edge cases of the copyheader mixin - Disable ``copy_header`` for the ``PadImage`` operation in ``ImageMath`` - Drop the CopyHeader mixin and update auto test of ResampleImageBySpacing --- .../tests/test_auto_ResampleImageBySpacing.py | 1 - nipype/interfaces/ants/utils.py | 43 +++++++++++++++---- 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py b/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py index dce64b8930..a2834bf36c 100644 --- a/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py +++ b/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py @@ -7,7 +7,6 @@ def test_ResampleImageBySpacing_inputs(): addvox=dict(argstr="%d", position=6, requires=["apply_smoothing"],), apply_smoothing=dict(argstr="%d", position=5,), args=dict(argstr="%s",), - copy_header=dict(mandatory=True, usedefault=True,), dimension=dict(argstr="%d", position=1, usedefault=True,), environ=dict(nohash=True, usedefault=True,), input_image=dict(argstr="%s", extensions=None, mandatory=True, position=2,), diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index b91d909b43..751741e6d0 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -1,5 +1,6 @@ """ANTs' utilities.""" import os +from warnings import warn from ..base import traits, isdefined, TraitedSpec, File, Str, InputMultiObject from ..mixins import CopyHeaderInterface from .base import ANTSCommandInputSpec, ANTSCommand @@ -103,6 +104,22 @@ class ImageMath(ANTSCommand, CopyHeaderInterface): ... op2='0.005 0.999 256').cmdline 'ImageMath 3 structural_maths.nii TruncateImageIntensity structural.nii 0.005 0.999 256' + >>> pad = ImageMath( + ... op1='structural.nii', + ... operation='PadImage', + ... op2='0.005 0.999 256') + >>> pad.inputs.copy_header + False + + >>> pad.inputs.copy_header = True + >>> pad.inputs.copy_header + False + + >>> pad.inputs.operation = "ME" + >>> pad.inputs.copy_header = True + >>> pad.inputs.copy_header + True + """ _cmd = "ImageMath" @@ -110,6 +127,23 @@ class ImageMath(ANTSCommand, CopyHeaderInterface): output_spec = ImageMathOuputSpec _copy_header_map = {"output_image": "op1"} + def __init__(self, **inputs): + super(ImageMath, self).__init__(**inputs) + if self.inputs.operation in ("PadImage", ): + self.inputs.copy_header = False + + self.inputs.on_trait_change(self._operation_update, "operation") + self.inputs.on_trait_change(self._copyheader_update, "copy_header") + + def _operation_update(self): + if self.inputs.operation in ("PadImage", ): + self.inputs.copy_header = False + + def _copyheader_update(self): + if self.inputs.copy_header and self.inputs.operation in ("PadImage", ): + warn("copy_header cannot be updated to True with PadImage as operation.") + self.inputs.copy_header = False + class ResampleImageBySpacingInputSpec(ANTSCommandInputSpec): dimension = traits.Int( @@ -147,19 +181,13 @@ class ResampleImageBySpacingInputSpec(ANTSCommandInputSpec): nn_interp = traits.Bool( argstr="%d", desc="nn interpolation", position=-1, requires=["addvox"] ) - copy_header = traits.Bool( - True, - mandatory=True, - usedefault=True, - desc="copy headers of the original image into the output (corrected) file", - ) class ResampleImageBySpacingOutputSpec(TraitedSpec): output_image = File(exists=True, desc="resampled file") -class ResampleImageBySpacing(ANTSCommand, CopyHeaderInterface): +class ResampleImageBySpacing(ANTSCommand): """ Resample an image with a given spacing. @@ -195,7 +223,6 @@ class ResampleImageBySpacing(ANTSCommand, CopyHeaderInterface): _cmd = "ResampleImageBySpacing" input_spec = ResampleImageBySpacingInputSpec output_spec = ResampleImageBySpacingOutputSpec - _copy_header_map = {"output_image": "input_image"} def _format_arg(self, name, trait_spec, value): if name == "out_spacing": From 461bc5e66f2e5f595e864bb5e8402f342a2faf7f Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Thu, 13 Aug 2020 09:14:29 +0200 Subject: [PATCH 0852/1665] doc: add explanation to doctests [skip ci] --- nipype/interfaces/ants/utils.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index 751741e6d0..6ca6af2d98 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -78,8 +78,8 @@ class ImageMath(ANTSCommand, CopyHeaderInterface): """ Operations over images. - Example - ------- + Examples + -------- >>> ImageMath( ... op1='structural.nii', ... operation='+', @@ -104,17 +104,26 @@ class ImageMath(ANTSCommand, CopyHeaderInterface): ... op2='0.005 0.999 256').cmdline 'ImageMath 3 structural_maths.nii TruncateImageIntensity structural.nii 0.005 0.999 256' + By default, Nipype copies headers from the first input image (``op1``) + to the output image. + For the ``PadImage`` operation, the header cannot be copied from inputs to + outputs, and so ``copy_header`` option is automatically set to ``False``. + >>> pad = ImageMath( ... op1='structural.nii', - ... operation='PadImage', - ... op2='0.005 0.999 256') + ... operation='PadImage') >>> pad.inputs.copy_header False + While the operation is set to ``PadImage``, + setting ``copy_header = True`` will have no effect. + >>> pad.inputs.copy_header = True >>> pad.inputs.copy_header False + For any other operation, ``copy_header`` can be enabled/disabled normally: + >>> pad.inputs.operation = "ME" >>> pad.inputs.copy_header = True >>> pad.inputs.copy_header From 1cea2306546e05ca6f438e80d5025251db7e9344 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 12 Aug 2020 11:27:49 +0200 Subject: [PATCH 0853/1665] ENH: Add more operations to ``ants.ImageMath`` These added operations are necessary for the antsBrainExtraction workflow we have in niworkflows (and hence niflows too). --- nipype/interfaces/ants/utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index a158aa83d4..b91d909b43 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -47,6 +47,10 @@ class ImageMathInputSpec(ANTSCommandInputSpec): "GO", "GC", "TruncateImageIntensity", + "Laplacian", + "GetLargestComponent", + "FillHoles", + "PadImage", mandatory=True, position=3, argstr="%s", From 4a7cae15c9a2f3c3a73932c368668a6d40b39f52 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 12 Aug 2020 14:10:39 +0200 Subject: [PATCH 0854/1665] fix: address edge cases of the copyheader mixin - Disable ``copy_header`` for the ``PadImage`` operation in ``ImageMath`` - Drop the CopyHeader mixin and update auto test of ResampleImageBySpacing --- .../tests/test_auto_ResampleImageBySpacing.py | 1 - nipype/interfaces/ants/utils.py | 43 +++++++++++++++---- 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py b/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py index dce64b8930..a2834bf36c 100644 --- a/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py +++ b/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py @@ -7,7 +7,6 @@ def test_ResampleImageBySpacing_inputs(): addvox=dict(argstr="%d", position=6, requires=["apply_smoothing"],), apply_smoothing=dict(argstr="%d", position=5,), args=dict(argstr="%s",), - copy_header=dict(mandatory=True, usedefault=True,), dimension=dict(argstr="%d", position=1, usedefault=True,), environ=dict(nohash=True, usedefault=True,), input_image=dict(argstr="%s", extensions=None, mandatory=True, position=2,), diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index b91d909b43..751741e6d0 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -1,5 +1,6 @@ """ANTs' utilities.""" import os +from warnings import warn from ..base import traits, isdefined, TraitedSpec, File, Str, InputMultiObject from ..mixins import CopyHeaderInterface from .base import ANTSCommandInputSpec, ANTSCommand @@ -103,6 +104,22 @@ class ImageMath(ANTSCommand, CopyHeaderInterface): ... op2='0.005 0.999 256').cmdline 'ImageMath 3 structural_maths.nii TruncateImageIntensity structural.nii 0.005 0.999 256' + >>> pad = ImageMath( + ... op1='structural.nii', + ... operation='PadImage', + ... op2='0.005 0.999 256') + >>> pad.inputs.copy_header + False + + >>> pad.inputs.copy_header = True + >>> pad.inputs.copy_header + False + + >>> pad.inputs.operation = "ME" + >>> pad.inputs.copy_header = True + >>> pad.inputs.copy_header + True + """ _cmd = "ImageMath" @@ -110,6 +127,23 @@ class ImageMath(ANTSCommand, CopyHeaderInterface): output_spec = ImageMathOuputSpec _copy_header_map = {"output_image": "op1"} + def __init__(self, **inputs): + super(ImageMath, self).__init__(**inputs) + if self.inputs.operation in ("PadImage", ): + self.inputs.copy_header = False + + self.inputs.on_trait_change(self._operation_update, "operation") + self.inputs.on_trait_change(self._copyheader_update, "copy_header") + + def _operation_update(self): + if self.inputs.operation in ("PadImage", ): + self.inputs.copy_header = False + + def _copyheader_update(self): + if self.inputs.copy_header and self.inputs.operation in ("PadImage", ): + warn("copy_header cannot be updated to True with PadImage as operation.") + self.inputs.copy_header = False + class ResampleImageBySpacingInputSpec(ANTSCommandInputSpec): dimension = traits.Int( @@ -147,19 +181,13 @@ class ResampleImageBySpacingInputSpec(ANTSCommandInputSpec): nn_interp = traits.Bool( argstr="%d", desc="nn interpolation", position=-1, requires=["addvox"] ) - copy_header = traits.Bool( - True, - mandatory=True, - usedefault=True, - desc="copy headers of the original image into the output (corrected) file", - ) class ResampleImageBySpacingOutputSpec(TraitedSpec): output_image = File(exists=True, desc="resampled file") -class ResampleImageBySpacing(ANTSCommand, CopyHeaderInterface): +class ResampleImageBySpacing(ANTSCommand): """ Resample an image with a given spacing. @@ -195,7 +223,6 @@ class ResampleImageBySpacing(ANTSCommand, CopyHeaderInterface): _cmd = "ResampleImageBySpacing" input_spec = ResampleImageBySpacingInputSpec output_spec = ResampleImageBySpacingOutputSpec - _copy_header_map = {"output_image": "input_image"} def _format_arg(self, name, trait_spec, value): if name == "out_spacing": From c71b8ac1731874ef46a7e070adcd8bb3e5bd3caf Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Thu, 13 Aug 2020 09:14:29 +0200 Subject: [PATCH 0855/1665] doc: add explanation to doctests [skip ci] --- nipype/interfaces/ants/utils.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index 751741e6d0..6ca6af2d98 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -78,8 +78,8 @@ class ImageMath(ANTSCommand, CopyHeaderInterface): """ Operations over images. - Example - ------- + Examples + -------- >>> ImageMath( ... op1='structural.nii', ... operation='+', @@ -104,17 +104,26 @@ class ImageMath(ANTSCommand, CopyHeaderInterface): ... op2='0.005 0.999 256').cmdline 'ImageMath 3 structural_maths.nii TruncateImageIntensity structural.nii 0.005 0.999 256' + By default, Nipype copies headers from the first input image (``op1``) + to the output image. + For the ``PadImage`` operation, the header cannot be copied from inputs to + outputs, and so ``copy_header`` option is automatically set to ``False``. + >>> pad = ImageMath( ... op1='structural.nii', - ... operation='PadImage', - ... op2='0.005 0.999 256') + ... operation='PadImage') >>> pad.inputs.copy_header False + While the operation is set to ``PadImage``, + setting ``copy_header = True`` will have no effect. + >>> pad.inputs.copy_header = True >>> pad.inputs.copy_header False + For any other operation, ``copy_header`` can be enabled/disabled normally: + >>> pad.inputs.operation = "ME" >>> pad.inputs.copy_header = True >>> pad.inputs.copy_header From 05359e7f0935d95748a85f22ce46d73a561f1f02 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 15 Aug 2020 00:55:53 -0400 Subject: [PATCH 0856/1665] CI: Display stdout/stderr during tests --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 568c350085..63ff7c44d1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -71,7 +71,7 @@ install: script: - | if [ "$CHECK_TYPE" = "test" ]; then - py.test -v --cov nipype --cov-config .coveragerc --cov-report xml:cov.xml -c nipype/pytest.ini --doctest-modules nipype -n auto + py.test -sv --cov nipype --cov-config .coveragerc --cov-report xml:cov.xml -c nipype/pytest.ini --doctest-modules nipype -n auto fi - | if [ "$CHECK_TYPE" = "specs" ]; then From 75b405feb446a742366e615b868db7765c67f16d Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 15 Aug 2020 01:14:42 -0400 Subject: [PATCH 0857/1665] TEST: Parametrize multiproc test to track down error --- nipype/tests/test_nipype.py | 64 +++++++++++++++---------------------- 1 file changed, 26 insertions(+), 38 deletions(-) diff --git a/nipype/tests/test_nipype.py b/nipype/tests/test_nipype.py index bb80da601f..9ac517f9d7 100644 --- a/nipype/tests/test_nipype.py +++ b/nipype/tests/test_nipype.py @@ -41,7 +41,7 @@ def _check_no_et(): return et -def test_no_et(tmp_path): +def test_no_et_bare(tmp_path): from unittest.mock import patch from nipype.pipeline import engine as pe from nipype.interfaces import utility as niu @@ -70,42 +70,30 @@ def test_no_et(tmp_path): res = wf1.run() assert next(iter(res.nodes)).result.outputs.out == et - # MultiProc run - environment initialized with NIPYPE_NO_ET - wf2 = pe.Workflow(name="wf2", base_dir=str(tmp_path)) - wf2.add_nodes([pe.Node(niu.Function(function=_check_no_et), name="n")]) - res = wf2.run(plugin="MultiProc", plugin_args={"n_procs": 1}) - assert next(iter(res.nodes)).result.outputs.out is False - - # LegacyMultiProc run - environment initialized with NIPYPE_NO_ET - wf3 = pe.Workflow(name="wf3", base_dir=str(tmp_path)) - wf3.add_nodes([pe.Node(niu.Function(function=_check_no_et), name="n")]) - res = wf3.run(plugin="LegacyMultiProc", plugin_args={"n_procs": 1}) - assert next(iter(res.nodes)).result.outputs.out is False - - # run_without_submitting - environment not set - wf4 = pe.Workflow(name="wf4", base_dir=str(tmp_path)) - wf4.add_nodes( - [ - pe.Node( - niu.Function(function=_check_no_et), - run_without_submitting=True, - name="n", - ) - ] - ) - res = wf4.run(plugin="MultiProc", plugin_args={"n_procs": 1}) - assert next(iter(res.nodes)).result.outputs.out == et - # run_without_submitting - environment not set - wf5 = pe.Workflow(name="wf5", base_dir=str(tmp_path)) - wf5.add_nodes( - [ - pe.Node( - niu.Function(function=_check_no_et), - run_without_submitting=True, - name="n", - ) - ] +@pytest.mark.parametrize("plugin", ("MultiProc", "LegacyMultiProc")) +@pytest.mark.parametrize("run_without_submitting", (True, False)) +def test_no_et_multiproc(tmp_path, plugin, run_without_submitting): + from unittest.mock import patch + from nipype.pipeline import engine as pe + from nipype.interfaces import utility as niu + from nipype.interfaces.base import BaseInterface + + et = os.getenv("NIPYPE_NO_ET") is None + + # Multiprocessing runs initialize new processes with NIPYPE_NO_ET + # This does not apply to unsubmitted jobs, run by the main thread + expectation = et if run_without_submitting else False + + # Pytest doesn't trigger this, so let's pretend it's there + with patch.object(BaseInterface, "_etelemetry_version_data", {}): + + wf = pe.Workflow(name="wf2", base_dir=str(tmp_path)) + n = pe.Node( + niu.Function(function=_check_no_et), + run_without_submitting=run_without_submitting, + name="n", ) - res = wf5.run(plugin="LegacyMultiProc", plugin_args={"n_procs": 1}) - assert next(iter(res.nodes)).result.outputs.out == et + wf.add_nodes([n]) + res = wf.run(plugin=plugin, plugin_args={"n_procs": 1}) + assert next(iter(res.nodes)).result.outputs.out is expectation From 7400725fba61cdfab8ccd96d5766ad650c6dc41f Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 15 Aug 2020 01:53:12 -0400 Subject: [PATCH 0858/1665] CI: Run two jobs in parallel --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 63ff7c44d1..8fb76e5050 100644 --- a/.travis.yml +++ b/.travis.yml @@ -71,7 +71,7 @@ install: script: - | if [ "$CHECK_TYPE" = "test" ]; then - py.test -sv --cov nipype --cov-config .coveragerc --cov-report xml:cov.xml -c nipype/pytest.ini --doctest-modules nipype -n auto + py.test -sv --cov nipype --cov-config .coveragerc --cov-report xml:cov.xml -c nipype/pytest.ini --doctest-modules nipype -n 2 fi - | if [ "$CHECK_TYPE" = "specs" ]; then From e89e4ac40f4a32d8b46b3830af58090f44806fc9 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 14 Aug 2020 15:39:27 -0400 Subject: [PATCH 0859/1665] FIX: Permit identity transforms in list of transforms to antsApplyTransforms Convert to InputMultiObject while we're at it. --- nipype/interfaces/ants/resampling.py | 31 +++++++++++++++++++++------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/nipype/interfaces/ants/resampling.py b/nipype/interfaces/ants/resampling.py index a5d6a52c04..1a03470a53 100644 --- a/nipype/interfaces/ants/resampling.py +++ b/nipype/interfaces/ants/resampling.py @@ -4,7 +4,7 @@ import os from .base import ANTSCommand, ANTSCommandInputSpec -from ..base import TraitedSpec, File, traits, isdefined, InputMultiPath +from ..base import TraitedSpec, File, traits, isdefined, InputMultiObject from ...utils.filemanip import split_filename @@ -52,7 +52,7 @@ class WarpTimeSeriesImageMultiTransformInputSpec(ANTSCommandInputSpec): use_bspline = traits.Bool( argstr="--use-Bspline", desc="Use 3rd order B-Spline interpolation" ) - transformation_series = InputMultiPath( + transformation_series = InputMultiObject( File(exists=True), argstr="%s", desc="transformation file(s) to be applied", @@ -204,7 +204,7 @@ class WarpImageMultiTransformInputSpec(ANTSCommandInputSpec): use_bspline = traits.Bool( argstr="--use-BSpline", desc="Use 3rd order B-Spline interpolation" ) - transformation_series = InputMultiPath( + transformation_series = InputMultiObject( File(exists=True), argstr="%s", desc="transformation file(s) to be applied", @@ -369,15 +369,14 @@ class ApplyTransformsInputSpec(ANTSCommandInputSpec): traits.Float(), traits.Float() # Gaussian/MultiLabel (sigma, alpha) ), ) - transforms = traits.Either( - InputMultiPath(File(exists=True)), - "identity", + transforms = InputMultiObject( + traits.Either(File(exists=True), "identity"), argstr="%s", mandatory=True, desc="transform files: will be applied in reverse order. For " "example, the last specified transform will be applied first.", ) - invert_transform_flags = InputMultiPath(traits.Bool()) + invert_transform_flags = InputMultiObject(traits.Bool()) default_value = traits.Float(0.0, argstr="--default-value %g", usedefault=True) print_out_composite_warp_file = traits.Bool( False, @@ -411,7 +410,7 @@ class ApplyTransforms(ANTSCommand): >>> at.cmdline 'antsApplyTransforms --default-value 0 --float 0 --input moving1.nii \ --interpolation Linear --output moving1_trans.nii \ ---reference-image fixed1.nii -t identity' +--reference-image fixed1.nii --transform identity' >>> at = ApplyTransforms() >>> at.inputs.dimension = 3 @@ -441,6 +440,22 @@ class ApplyTransforms(ANTSCommand): 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii \ --interpolation BSpline[ 5 ] --output deformed_moving1.nii --reference-image fixed1.nii \ --transform [ ants_Warp.nii.gz, 0 ] --transform [ trans.mat, 0 ]' + + Identity transforms may be used as part of a chain: + + >>> at2 = ApplyTransforms() + >>> at2.inputs.dimension = 3 + >>> at2.inputs.input_image = 'moving1.nii' + >>> at2.inputs.reference_image = 'fixed1.nii' + >>> at2.inputs.output_image = 'deformed_moving1.nii' + >>> at2.inputs.interpolation = 'BSpline' + >>> at2.inputs.interpolation_parameters = (5,) + >>> at2.inputs.default_value = 0 + >>> at2.inputs.transforms = ['identity', 'ants_Warp.nii.gz', 'trans.mat'] + >>> at2.cmdline + 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii \ +--interpolation BSpline[ 5 ] --output deformed_moving1.nii --reference-image fixed1.nii \ +--transform identity --transform ants_Warp.nii.gz --transform trans.mat' """ _cmd = "antsApplyTransforms" From 68bbf02fe862fcb8416385c12c004257cfb191ff Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 14 Aug 2020 15:40:38 -0400 Subject: [PATCH 0860/1665] RF: Simplify inversion logic, use ValueError --- nipype/interfaces/ants/resampling.py | 37 ++++++++++++---------------- 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/nipype/interfaces/ants/resampling.py b/nipype/interfaces/ants/resampling.py index 1a03470a53..48e944e091 100644 --- a/nipype/interfaces/ants/resampling.py +++ b/nipype/interfaces/ants/resampling.py @@ -420,11 +420,11 @@ class ApplyTransforms(ANTSCommand): >>> at.inputs.interpolation = 'Linear' >>> at.inputs.default_value = 0 >>> at.inputs.transforms = ['ants_Warp.nii.gz', 'trans.mat'] - >>> at.inputs.invert_transform_flags = [False, False] + >>> at.inputs.invert_transform_flags = [False, True] >>> at.cmdline 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii \ --interpolation Linear --output deformed_moving1.nii --reference-image fixed1.nii \ ---transform [ ants_Warp.nii.gz, 0 ] --transform [ trans.mat, 0 ]' +--transform ants_Warp.nii.gz --transform [ trans.mat, 1 ]' >>> at1 = ApplyTransforms() >>> at1.inputs.dimension = 3 @@ -439,7 +439,7 @@ class ApplyTransforms(ANTSCommand): >>> at1.cmdline 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii \ --interpolation BSpline[ 5 ] --output deformed_moving1.nii --reference-image fixed1.nii \ ---transform [ ants_Warp.nii.gz, 0 ] --transform [ trans.mat, 0 ]' +--transform ants_Warp.nii.gz --transform trans.mat' Identity transforms may be used as part of a chain: @@ -473,25 +473,20 @@ def _gen_filename(self, name): def _get_transform_filenames(self): retval = [] - for ii in range(len(self.inputs.transforms)): - if isdefined(self.inputs.invert_transform_flags): - if len(self.inputs.transforms) == len( - self.inputs.invert_transform_flags - ): - invert_code = 1 if self.inputs.invert_transform_flags[ii] else 0 - retval.append( - "--transform [ %s, %d ]" - % (self.inputs.transforms[ii], invert_code) - ) - else: - raise Exception( - ( - "ERROR: The useInverse list must have the same number " - "of entries as the transformsFileName list." - ) - ) + invert_flags = self.inputs.invert_transform_flags + if not isdefined(invert_flags): + invert_flags = [False] * len(self.inputs.transforms) + elif len(self.inputs.transforms) != len(invert_flags): + raise ValueError( + "ERROR: The invert_transform_flags list must have the same number " + "of entries as the transforms list." + ) + + for transform, invert in zip(self.inputs.transforms, invert_flags): + if invert: + retval.append(f"--transform [ {transform}, 1 ]") else: - retval.append("--transform %s" % self.inputs.transforms[ii]) + retval.append(f"--transform {transform}") return " ".join(retval) def _get_output_warped_filename(self): From c729c1ce761793715371b0295c9b39eeb878aeda Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 14 Aug 2020 15:42:01 -0400 Subject: [PATCH 0861/1665] STY: Drop unused identity branch --- nipype/interfaces/ants/resampling.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nipype/interfaces/ants/resampling.py b/nipype/interfaces/ants/resampling.py index 48e944e091..607202c109 100644 --- a/nipype/interfaces/ants/resampling.py +++ b/nipype/interfaces/ants/resampling.py @@ -502,8 +502,6 @@ def _format_arg(self, opt, spec, val): if opt == "output_image": return self._get_output_warped_filename() elif opt == "transforms": - if val == "identity": - return "-t identity" return self._get_transform_filenames() elif opt == "interpolation": if self.inputs.interpolation in [ From 14a304a0aae974dc75ce296506dbd6560b470dd9 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 15 Aug 2020 10:32:22 -0400 Subject: [PATCH 0862/1665] TEST: Reproduce gh-3230 --- nipype/pipeline/engine/tests/test_utils.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/tests/test_utils.py b/nipype/pipeline/engine/tests/test_utils.py index 1e86e5a071..0705f0ad53 100644 --- a/nipype/pipeline/engine/tests/test_utils.py +++ b/nipype/pipeline/engine/tests/test_utils.py @@ -11,7 +11,12 @@ from ....interfaces import base as nib from ....interfaces import utility as niu from .... import config -from ..utils import clean_working_directory, write_workflow_prov, load_resultfile +from ..utils import ( + clean_working_directory, + write_workflow_prov, + load_resultfile, + format_node, +) class InputSpec(nib.TraitedSpec): @@ -327,3 +332,11 @@ def test_save_load_resultfile(tmpdir, use_relative): ) config.set("execution", "use_relative_paths", old_use_relative) + + +def test_format_node(): + node = pe.Node(niu.IdentityInterface(fields=["a", "b"]), name="node") + serialized = format_node(node) + workspace = {"Node": pe.Node} + exec("\n".join(serialized), workspace) + assert workspace["node"].interface._fields == node.interface._fields From 6566e6eefae53787e03959e1785cf05a87932a75 Mon Sep 17 00:00:00 2001 From: Matteo Mancini Date: Mon, 3 Aug 2020 11:18:33 +0100 Subject: [PATCH 0863/1665] Removed line that excluded fields parameter during export. --- nipype/pipeline/engine/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index d7a65b74de..3d0f078e35 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -365,7 +365,6 @@ def format_node(node, format="python", include_config=False): comment = "# Node: %s" % node.fullname spec = signature(node.interface.__init__) args = [p.name for p in list(spec.parameters.values())] - args = args[1:] if args: filled_args = [] for arg in args: From 7df2d625a8fb04f1884dd49ee50adaa51c2dc5d2 Mon Sep 17 00:00:00 2001 From: Matteo Mancini Date: Mon, 3 Aug 2020 11:23:50 +0100 Subject: [PATCH 0864/1665] Added if statement to correctly handle strings and list during export. --- nipype/pipeline/engine/utils.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 3d0f078e35..6f73d62189 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -369,9 +369,15 @@ def format_node(node, format="python", include_config=False): filled_args = [] for arg in args: if hasattr(node.interface, "_%s" % arg): - filled_args.append( - "%s=%s" % (arg, getattr(node.interface, "_%s" % arg)) - ) + argval = getattr(node.interface, "_%s" % arg) + if isinstance(argval, str): + filled_args.append( + "%s='%s'" % (arg, argval) + ) + else: + filled_args.append( + "%s=%s" % (arg, argval) + ) args = ", ".join(filled_args) else: args = "" From fc8e991eb687383db9f2894a9ad1175ce382c782 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 15 Aug 2020 10:42:52 -0400 Subject: [PATCH 0865/1665] RF: Simplify serialization --- nipype/pipeline/engine/utils.py | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 6f73d62189..f77f771ea7 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -364,23 +364,12 @@ def format_node(node, format="python", include_config=False): importline = "from %s import %s" % (klass.__module__, klass.__class__.__name__) comment = "# Node: %s" % node.fullname spec = signature(node.interface.__init__) - args = [p.name for p in list(spec.parameters.values())] - if args: - filled_args = [] - for arg in args: - if hasattr(node.interface, "_%s" % arg): - argval = getattr(node.interface, "_%s" % arg) - if isinstance(argval, str): - filled_args.append( - "%s='%s'" % (arg, argval) - ) - else: - filled_args.append( - "%s=%s" % (arg, argval) - ) - args = ", ".join(filled_args) - else: - args = "" + filled_args = [] + for param in spec.parameters.values(): + val = getattr(node.interface, f"_{param.name}", None) + if val is not None: + filled_args.append(f"{param.name}={val!r}") + args = ", ".join(filled_args) klass_name = klass.__class__.__name__ if isinstance(node, MapNode): nodedef = '%s = MapNode(%s(%s), iterfield=%s, name="%s")' % ( From 44497f278a2be55c301a46e6eaeeecb5762a52db Mon Sep 17 00:00:00 2001 From: Horea Christian Date: Fri, 7 Aug 2020 01:12:38 -0400 Subject: [PATCH 0866/1665] ENH: no more auto-failing on misparsed versions --- nipype/interfaces/base/core.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 82da393a84..1e626fe1b5 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -276,6 +276,15 @@ def _check_version_requirements(self, trait_object, raise_exception=True): version = LooseVersion(str(self.version)) for name in names: min_ver = LooseVersion(str(trait_object.traits()[name].min_ver)) + try: + min_ver > version + except TypeError: + iflogger.warning( + 'Nipype is having issues parsing the package version ' + f'for Trait {name} ({self.__class__.__name__})' + f'You may want to check whether {version} is larger than {min_ver}' + ) + continue if min_ver > version: unavailable_traits.append(name) if not isdefined(getattr(trait_object, name)): @@ -293,6 +302,15 @@ def _check_version_requirements(self, trait_object, raise_exception=True): version = LooseVersion(str(self.version)) for name in names: max_ver = LooseVersion(str(trait_object.traits()[name].max_ver)) + try: + max_ver > version + except TypeError: + iflogger.warning( + 'Nipype is having issues parsing the package version ' + f'for Trait {name} ({self.__class__.__name__})' + f'You may want to check whether {version} is smaller than {max_ver}' + ) + continue if max_ver < version: unavailable_traits.append(name) if not isdefined(getattr(trait_object, name)): From 690c620705288a3be11e28ac1703c555b12fd9a7 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 14 Aug 2020 17:07:18 -0400 Subject: [PATCH 0867/1665] TEST: Validate bad version parse warning when hit with min/max version --- nipype/interfaces/base/tests/test_core.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/nipype/interfaces/base/tests/test_core.py b/nipype/interfaces/base/tests/test_core.py index e97a5bab79..e3dd5cf1de 100644 --- a/nipype/interfaces/base/tests/test_core.py +++ b/nipype/interfaces/base/tests/test_core.py @@ -3,6 +3,7 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: import os import simplejson as json +import logging import pytest from unittest import mock @@ -236,6 +237,21 @@ class DerivedInterface1(nib.BaseInterface): obj._check_version_requirements(obj.inputs) +def test_input_version_missing(caplog): + class DerivedInterface(nib.BaseInterface): + class input_spec(nib.TraitedSpec): + foo = nib.traits.Int(min_ver="0.9") + bar = nib.traits.Int(max_ver="0.9") + _version = "misparsed-garbage" + + obj = DerivedInterface() + obj.inputs.foo = 1 + obj.inputs.bar = 1 + with caplog.at_level(logging.WARNING, logger="nipype.interface"): + obj._check_version_requirements(obj.inputs) + assert len(caplog.records) == 2 + + def test_output_version(): class InputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc="a random int") From c56a3b1d788b86ad402a45699d8588c09f87f3e8 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 14 Aug 2020 17:13:53 -0400 Subject: [PATCH 0868/1665] STY: Reuse comparison, rewrite warning --- nipype/interfaces/base/core.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 1e626fe1b5..d03ea941b3 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -277,15 +277,15 @@ def _check_version_requirements(self, trait_object, raise_exception=True): for name in names: min_ver = LooseVersion(str(trait_object.traits()[name].min_ver)) try: - min_ver > version + too_old = min_ver > version except TypeError: iflogger.warning( - 'Nipype is having issues parsing the package version ' - f'for Trait {name} ({self.__class__.__name__})' - f'You may want to check whether {version} is larger than {min_ver}' - ) + f"Nipype cannot validate the package version {version!r} for " + f"{self.__class__.__name__}. Trait {name} requires version " + f">={min_ver}. Please verify validity." + ) continue - if min_ver > version: + if too_old: unavailable_traits.append(name) if not isdefined(getattr(trait_object, name)): continue @@ -303,15 +303,15 @@ def _check_version_requirements(self, trait_object, raise_exception=True): for name in names: max_ver = LooseVersion(str(trait_object.traits()[name].max_ver)) try: - max_ver > version + too_new = max_ver < version except TypeError: iflogger.warning( - 'Nipype is having issues parsing the package version ' - f'for Trait {name} ({self.__class__.__name__})' - f'You may want to check whether {version} is smaller than {max_ver}' - ) + f"Nipype cannot validate the package version {version!r} for " + f"{self.__class__.__name__}. Trait {name} requires version " + f"<={max_ver}. Please verify validity." + ) continue - if max_ver < version: + if too_new: unavailable_traits.append(name) if not isdefined(getattr(trait_object, name)): continue From bf24dbaf6a4bf8227de049879b69e053e854ac6c Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 16 Aug 2020 12:58:00 -0400 Subject: [PATCH 0869/1665] RF: Raise ValueError if stop_on_unknown_version is set --- nipype/interfaces/base/core.py | 20 ++++++++++++-------- nipype/interfaces/base/tests/test_core.py | 22 +++++++++++++++++++++- 2 files changed, 33 insertions(+), 9 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index d03ea941b3..54c4302c7f 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -278,12 +278,14 @@ def _check_version_requirements(self, trait_object, raise_exception=True): min_ver = LooseVersion(str(trait_object.traits()[name].min_ver)) try: too_old = min_ver > version - except TypeError: - iflogger.warning( + except TypeError as err: + msg = ( f"Nipype cannot validate the package version {version!r} for " - f"{self.__class__.__name__}. Trait {name} requires version " - f">={min_ver}. Please verify validity." + f"{self.__class__.__name__}. Trait {name} requires version >={min_ver}." ) + iflogger.warning(f"{msg}. Please verify validity.") + if config.getboolean("execution", "stop_on_unknown_version"): + raise ValueError(msg) from err continue if too_old: unavailable_traits.append(name) @@ -304,12 +306,14 @@ def _check_version_requirements(self, trait_object, raise_exception=True): max_ver = LooseVersion(str(trait_object.traits()[name].max_ver)) try: too_new = max_ver < version - except TypeError: - iflogger.warning( + except TypeError as err: + msg = ( f"Nipype cannot validate the package version {version!r} for " - f"{self.__class__.__name__}. Trait {name} requires version " - f"<={max_ver}. Please verify validity." + f"{self.__class__.__name__}. Trait {name} requires version <={max_ver}." ) + iflogger.warning(f"{msg}. Please verify validity.") + if config.getboolean("execution", "stop_on_unknown_version"): + raise ValueError(msg) from err continue if too_new: unavailable_traits.append(name) diff --git a/nipype/interfaces/base/tests/test_core.py b/nipype/interfaces/base/tests/test_core.py index e3dd5cf1de..165b3532ab 100644 --- a/nipype/interfaces/base/tests/test_core.py +++ b/nipype/interfaces/base/tests/test_core.py @@ -242,6 +242,7 @@ class DerivedInterface(nib.BaseInterface): class input_spec(nib.TraitedSpec): foo = nib.traits.Int(min_ver="0.9") bar = nib.traits.Int(max_ver="0.9") + _version = "misparsed-garbage" obj = DerivedInterface() @@ -252,6 +253,25 @@ class input_spec(nib.TraitedSpec): assert len(caplog.records) == 2 +def test_input_version_missing_error(): + from nipype import config + + class DerivedInterface(nib.BaseInterface): + class input_spec(nib.TraitedSpec): + foo = nib.traits.Int(min_ver="0.9") + bar = nib.traits.Int(max_ver="0.9") + + _version = "misparsed-garbage" + + with mock.patch.object(config, "getboolean", return_value=True): + obj = DerivedInterface(foo=1) + with pytest.raises(ValueError): + obj._check_version_requirements(obj.inputs) + obj = DerivedInterface(bar=1) + with pytest.raises(ValueError): + obj._check_version_requirements(obj.inputs) + + def test_output_version(): class InputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc="a random int") @@ -473,7 +493,7 @@ def test_global_CommandLine_output(tmpdir): ci = BET() assert ci.terminal_output == "stream" # default case - with mock.patch.object(nib.CommandLine, '_terminal_output'): + with mock.patch.object(nib.CommandLine, "_terminal_output"): nib.CommandLine.set_default_terminal_output("allatonce") ci = nib.CommandLine(command="ls -l") assert ci.terminal_output == "allatonce" From a09d0a9a2e277be27aa9bcd608d698d1c7baea53 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 14 Aug 2020 16:25:18 -0400 Subject: [PATCH 0870/1665] MNT: Set version 1.5.1 --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 6c65d06d68..7ec5fef02e 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -5,7 +5,7 @@ # nipype version information # Remove -dev for release -__version__ = "1.5.1-dev" +__version__ = "1.5.1" def get_nipype_gitversion(): From 83358d7f17aac07cb90d0330f11ea2322e2974d8 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 14 Aug 2020 17:15:15 -0400 Subject: [PATCH 0871/1665] STY: Black --- nipype/interfaces/ants/registration.py | 6 ++++-- nipype/interfaces/ants/segmentation.py | 1 + nipype/interfaces/ants/utils.py | 6 +++--- nipype/interfaces/freesurfer/base.py | 2 +- nipype/interfaces/freesurfer/preprocess.py | 5 ++++- nipype/interfaces/fsl/preprocess.py | 18 ++++++------------ nipype/interfaces/niftyreg/base.py | 5 ++--- nipype/interfaces/spm/preprocess.py | 2 +- .../tests/test_legacymultiproc_nondaemon.py | 8 ++++++-- .../pipeline/plugins/tests/test_multiproc.py | 8 ++++++-- nipype/utils/tests/test_filemanip.py | 15 +++++++++------ 11 files changed, 43 insertions(+), 33 deletions(-) diff --git a/nipype/interfaces/ants/registration.py b/nipype/interfaces/ants/registration.py index b9b6700dcd..47b8d53cfc 100644 --- a/nipype/interfaces/ants/registration.py +++ b/nipype/interfaces/ants/registration.py @@ -592,7 +592,8 @@ class RegistrationOutputSpec(TraitedSpec): File(exists=True), desc="List of output transforms for forward registration" ) reverse_forward_transforms = traits.List( - File(exists=True), desc="List of output transforms for forward registration reversed for antsApplyTransform" + File(exists=True), + desc="List of output transforms for forward registration reversed for antsApplyTransform", ) reverse_transforms = traits.List( File(exists=True), desc="List of output transforms for reverse registration" @@ -601,7 +602,8 @@ class RegistrationOutputSpec(TraitedSpec): traits.Bool(), desc="List of flags corresponding to the forward transforms" ) reverse_forward_invert_flags = traits.List( - traits.Bool(), desc="List of flags corresponding to the forward transforms reversed for antsApplyTransform" + traits.Bool(), + desc="List of flags corresponding to the forward transforms reversed for antsApplyTransform", ) reverse_invert_flags = traits.List( traits.Bool(), desc="List of flags corresponding to the reverse transforms" diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index 387fd229a1..555cdd1777 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -543,6 +543,7 @@ def _list_outputs(self): outputs["bias_image"] = os.path.abspath(self._out_bias_file) return outputs + class CorticalThicknessInputSpec(ANTSCommandInputSpec): dimension = traits.Enum( 3, 2, argstr="-d %d", usedefault=True, desc="image dimension (2 or 3)" diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index 6ca6af2d98..a6fdc5cf00 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -138,18 +138,18 @@ class ImageMath(ANTSCommand, CopyHeaderInterface): def __init__(self, **inputs): super(ImageMath, self).__init__(**inputs) - if self.inputs.operation in ("PadImage", ): + if self.inputs.operation in ("PadImage",): self.inputs.copy_header = False self.inputs.on_trait_change(self._operation_update, "operation") self.inputs.on_trait_change(self._copyheader_update, "copy_header") def _operation_update(self): - if self.inputs.operation in ("PadImage", ): + if self.inputs.operation in ("PadImage",): self.inputs.copy_header = False def _copyheader_update(self): - if self.inputs.copy_header and self.inputs.operation in ("PadImage", ): + if self.inputs.copy_header and self.inputs.operation in ("PadImage",): warn("copy_header cannot be updated to True with PadImage as operation.") self.inputs.copy_header = False diff --git a/nipype/interfaces/freesurfer/base.py b/nipype/interfaces/freesurfer/base.py index e9fe62ee50..1108cbf6bc 100644 --- a/nipype/interfaces/freesurfer/base.py +++ b/nipype/interfaces/freesurfer/base.py @@ -76,7 +76,7 @@ def looseversion(cls): vstr = "6.0.0-dev" + githash elif vinfo[5][0] == "v": vstr = vinfo[5][1:] - elif len([1 for val in vinfo[3] if val == '.']) == 2: + elif len([1 for val in vinfo[3] if val == "."]) == 2: "version string: freesurfer-linux-centos7_x86_64-7.1.0-20200511-813297b" vstr = vinfo[3] else: diff --git a/nipype/interfaces/freesurfer/preprocess.py b/nipype/interfaces/freesurfer/preprocess.py index 92fbe7d7f7..91eba956b5 100644 --- a/nipype/interfaces/freesurfer/preprocess.py +++ b/nipype/interfaces/freesurfer/preprocess.py @@ -727,7 +727,10 @@ def cmdline(self): outdir = self._get_outdir() cmd = [] if not os.path.exists(outdir): - cmdstr = "%s -c \"import os; os.makedirs('%s')\"" % (op.basename(sys.executable), outdir) + cmdstr = "%s -c \"import os; os.makedirs('%s')\"" % ( + op.basename(sys.executable), + outdir, + ) cmd.extend([cmdstr]) infofile = os.path.join(outdir, "shortinfo.txt") if not os.path.exists(infofile): diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py index feebf4e742..f2fbe8c079 100644 --- a/nipype/interfaces/fsl/preprocess.py +++ b/nipype/interfaces/fsl/preprocess.py @@ -174,9 +174,9 @@ def _list_outputs(self): outputs = self.output_spec().get() outputs["out_file"] = self._gen_outfilename() - basename = os.path.basename(outputs['out_file']) - cwd = os.path.dirname(outputs['out_file']) - kwargs = {'basename': basename, 'cwd': cwd} + basename = os.path.basename(outputs["out_file"]) + cwd = os.path.dirname(outputs["out_file"]) + kwargs = {"basename": basename, "cwd": cwd} if (isdefined(self.inputs.mesh) and self.inputs.mesh) or ( isdefined(self.inputs.surfaces) and self.inputs.surfaces @@ -189,9 +189,7 @@ def _list_outputs(self): ): outputs["mask_file"] = self._gen_fname(suffix="_mask", **kwargs) if isdefined(self.inputs.outline) and self.inputs.outline: - outputs["outline_file"] = self._gen_fname( - suffix="_overlay", **kwargs - ) + outputs["outline_file"] = self._gen_fname(suffix="_overlay", **kwargs) if isdefined(self.inputs.surfaces) and self.inputs.surfaces: outputs["inskull_mask_file"] = self._gen_fname( suffix="_inskull_mask", **kwargs @@ -211,13 +209,9 @@ def _list_outputs(self): outputs["outskin_mesh_file"] = self._gen_fname( suffix="_outskin_mesh", **kwargs ) - outputs["skull_mask_file"] = self._gen_fname( - suffix="_skull_mask", **kwargs - ) + outputs["skull_mask_file"] = self._gen_fname(suffix="_skull_mask", **kwargs) if isdefined(self.inputs.skull) and self.inputs.skull: - outputs["skull_file"] = self._gen_fname( - suffix="_skull", **kwargs - ) + outputs["skull_file"] = self._gen_fname(suffix="_skull", **kwargs) if isdefined(self.inputs.no_output) and self.inputs.no_output: outputs["out_file"] = Undefined return outputs diff --git a/nipype/interfaces/niftyreg/base.py b/nipype/interfaces/niftyreg/base.py index 88e441d52a..375a3ada29 100644 --- a/nipype/interfaces/niftyreg/base.py +++ b/nipype/interfaces/niftyreg/base.py @@ -19,8 +19,7 @@ import os from ... import logging -from ..base import (CommandLine, CommandLineInputSpec, traits, Undefined, - PackageInfo) +from ..base import CommandLine, CommandLineInputSpec, traits, Undefined, PackageInfo from ...utils.filemanip import split_filename iflogger = logging.getLogger("nipype.interface") @@ -31,7 +30,7 @@ def get_custom_path(command, env_dir="NIFTYREGDIR"): class Info(PackageInfo): - version_cmd = get_custom_path('reg_aladin') + ' --version' + version_cmd = get_custom_path("reg_aladin") + " --version" @staticmethod def parse_version(raw_info): diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index 4f7df04c92..17dc03ff5a 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -1884,7 +1884,7 @@ class MultiChannelNewSegmentInputSpec(SPMCommandInputSpec): - FWHM of Gaussian smoothness of bias - which maps to save (Field, Corrected) - a tuple of two boolean values""", field="channel", - ) + ), ), desc="""A list of tuples (one per each channel) with the following fields: - a list of channel files (only 1rst channel files will be segmented) diff --git a/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py b/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py index 2e814bbf3e..5f33b025b2 100644 --- a/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py +++ b/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py @@ -134,7 +134,9 @@ def run_multiproc_nondaemon_with_flag(nondaemon_flag): return result -@pytest.mark.skipif(sys.version_info >= (3, 8), reason="multiprocessing issues in Python 3.8") +@pytest.mark.skipif( + sys.version_info >= (3, 8), reason="multiprocessing issues in Python 3.8" +) def test_run_multiproc_nondaemon_false(): """ This is the entry point for the test. Two times a pipe of several @@ -153,7 +155,9 @@ def test_run_multiproc_nondaemon_false(): assert shouldHaveFailed -@pytest.mark.skipif(sys.version_info >= (3, 8), reason="multiprocessing issues in Python 3.8") +@pytest.mark.skipif( + sys.version_info >= (3, 8), reason="multiprocessing issues in Python 3.8" +) def test_run_multiproc_nondaemon_true(): # with nondaemon_flag = True, the execution should succeed result = run_multiproc_nondaemon_with_flag(True) diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index 9916785e3d..142d108ebc 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -34,7 +34,9 @@ def _list_outputs(self): return outputs -@pytest.mark.skipif(sys.version_info >= (3, 8), reason="multiprocessing issues in Python 3.8") +@pytest.mark.skipif( + sys.version_info >= (3, 8), reason="multiprocessing issues in Python 3.8" +) def test_run_multiproc(tmpdir): tmpdir.chdir() @@ -116,7 +118,9 @@ def test_no_more_threads_than_specified(tmpdir): pipe.run(plugin="MultiProc", plugin_args={"n_procs": max_threads}) -@pytest.mark.skipif(sys.version_info >= (3, 8), reason="multiprocessing issues in Python 3.8") +@pytest.mark.skipif( + sys.version_info >= (3, 8), reason="multiprocessing issues in Python 3.8" +) def test_hold_job_until_procs_available(tmpdir): tmpdir.chdir() diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index fed2462548..e8da256261 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -655,12 +655,15 @@ def test_pickle(tmp_path, save_versioning): assert outobj == testobj -@pytest.mark.parametrize("items,expected", [ - ('', ' \n\n'), - ('A string', ' A string\n\n'), - (['A list', 'Of strings'], ' A list\n Of strings\n\n'), - (None, TypeError), -]) +@pytest.mark.parametrize( + "items,expected", + [ + ("", " \n\n"), + ("A string", " A string\n\n"), + (["A list", "Of strings"], " A list\n Of strings\n\n"), + (None, TypeError), + ], +) def test_write_rst_list(tmp_path, items, expected): if items is not None: assert write_rst_list(items) == expected From e1f5c0263ed775a51deb01811fa79a1ac57c51af Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 15 Aug 2020 10:20:50 -0400 Subject: [PATCH 0872/1665] DOC: Update previous versions --- doc/interfaces.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/interfaces.rst b/doc/interfaces.rst index b612555a7c..85dbb9f779 100644 --- a/doc/interfaces.rst +++ b/doc/interfaces.rst @@ -8,7 +8,7 @@ Interfaces and Workflows :Release: |version| :Date: |today| -Previous versions: `1.4.2 `_ `1.4.1 `_ +Previous versions: `1.5.0 `_ `1.4.2 `_ Workflows --------- From c98bfbfc65ce2f846e52ae65ee1b261ee3afb68b Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 16 Aug 2020 13:34:28 -0400 Subject: [PATCH 0873/1665] DOC: Update changelog --- doc/changelog/1.X.X-changelog.rst | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/doc/changelog/1.X.X-changelog.rst b/doc/changelog/1.X.X-changelog.rst index e10949cf08..4855404401 100644 --- a/doc/changelog/1.X.X-changelog.rst +++ b/doc/changelog/1.X.X-changelog.rst @@ -1,5 +1,21 @@ +1.5.1 (August 16, 2020) +======================= + +Bug-fix release in the 1.5.x series. + +This release includes small updates to ANTs utilities that lie somewhere +between bug fixes and enhancements. + +(`Full changelog `__) + + * FIX: Warn for min/max_ver traits when tool version can't be parsed (https://github.com/nipy/nipype/pull/3241) + * FIX: Serialize all interface arguments when exporting workflows (https://github.com/nipy/nipype/pull/3240) + * FIX: Permit identity transforms in list of transforms given to ants.ApplyTransforms (https://github.com/nipy/nipype/pull/3237) + * FIX: ANTs' utilities revision - bug fixes and add more operations to ``ants.ImageMath`` (https://github.com/nipy/nipype/pull/3236) + * DOC: Skip BIDSDataGrabber doctest if pybids is missing (https://github.com/nipy/nipype/pull/3224) + 1.5.0 (June 03, 2020) -========================= +===================== New feature release in the 1.5.x series. From a8367b03a6fa4f71c99fd2a056a27fc311e07922 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 16 Aug 2020 13:34:51 -0400 Subject: [PATCH 0874/1665] DOC: Update Zenodo --- .zenodo.json | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 2ee43c9904..7324bd942c 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -115,16 +115,16 @@ "name": "Hamalainen, Carlo", "orcid": "0000-0001-7655-3830" }, - { - "affiliation": "Stanford University", - "name": "\u0106iri\u0107 , Rastko", - "orcid": "0000-0001-6347-7939" - }, { "affiliation": "Institute for Biomedical Engineering, ETH and University of Zurich", "name": "Christian, Horea", "orcid": "0000-0001-7037-2449" }, + { + "affiliation": "Stanford University", + "name": "\u0106iri\u0107 , Rastko", + "orcid": "0000-0001-6347-7939" + }, { "name": "Dubois, Mathieu" }, @@ -225,6 +225,11 @@ { "name": "Millman, Jarrod" }, + { + "affiliation": "University College London", + "name": "Mancini, Matteo", + "orcid": "0000-0001-7194-4568" + }, { "affiliation": "National Institute of Mental Health", "name": "Nielson, Dylan M.", @@ -241,11 +246,6 @@ { "name": "Mordom, David" }, - { - "affiliation": "University College London", - "name": "Mancini, Matteo", - "orcid": "0000-0001-7194-4568" - }, { "affiliation": "ARAMIS LAB, Brain and Spine Institute (ICM), Paris, France.", "name": "Guillon, Je\u0301re\u0301my", From 1d6b91e237b9dd7b5dee80e4569e8b6e484944bf Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 16 Aug 2020 14:11:25 -0400 Subject: [PATCH 0875/1665] MNT: 1.5.2-dev --- doc/interfaces.rst | 2 +- nipype/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/interfaces.rst b/doc/interfaces.rst index 85dbb9f779..d04fd88271 100644 --- a/doc/interfaces.rst +++ b/doc/interfaces.rst @@ -8,7 +8,7 @@ Interfaces and Workflows :Release: |version| :Date: |today| -Previous versions: `1.5.0 `_ `1.4.2 `_ +Previous versions: `1.5.1 `_ `1.5.0 `_ Workflows --------- diff --git a/nipype/info.py b/nipype/info.py index 7ec5fef02e..d652a3f72e 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -5,7 +5,7 @@ # nipype version information # Remove -dev for release -__version__ = "1.5.1" +__version__ = "1.5.2-dev" def get_nipype_gitversion(): From 5d1c1e9cbd4021d026b91a03cf7c70553db46d94 Mon Sep 17 00:00:00 2001 From: Hershkovitz-hub Date: Thu, 10 Sep 2020 19:10:09 +0300 Subject: [PATCH 0876/1665] Removed "-" from algorithm specification Resolves #3247 . --- nipype/interfaces/mrtrix3/preprocess.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index 9384ef43c7..832d3256ac 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -181,13 +181,13 @@ class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): ) in_mask = File(argstr="-mask %s", desc="input mask image for bias field estimation") use_ants = traits.Bool( - argstr="-ants", + argstr="ants", mandatory=True, desc="use ANTS N4 to estimate the inhomogeneity field", xor=["use_fsl"], ) use_fsl = traits.Bool( - argstr="-fsl", + argstr="fsl", mandatory=True, desc="use FSL FAST to estimate the inhomogeneity field", xor=["use_ants"], From 42544d05d6753589f4dd8f59d54525a135976297 Mon Sep 17 00:00:00 2001 From: Hershkovitz-hub Date: Thu, 10 Sep 2020 19:17:43 +0300 Subject: [PATCH 0877/1665] Update .zenodo.json --- .zenodo.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index 7324bd942c..d59c95370b 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -697,6 +697,10 @@ "affiliation": "Sagol School of Neuroscience, Tel Aviv University", "name": "Baratz, Zvi" }, + { + "affiliation": "Sagol School of Neuroscience, Tel Aviv University", + "name": "Ben-Zvi,Gal" + }, { "name": "Matsubara, K" }, From 8c4a8b09669e076f128318f40006a3de1bb46611 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 17 Sep 2020 14:38:56 -0400 Subject: [PATCH 0878/1665] STY: Re-run make specs; change in black --- nipype/algorithms/tests/test_auto_ACompCor.py | 72 +++- .../tests/test_auto_ActivationCount.py | 21 +- .../tests/test_auto_AddCSVColumn.py | 16 +- .../algorithms/tests/test_auto_AddCSVRow.py | 14 +- nipype/algorithms/tests/test_auto_AddNoise.py | 33 +- .../tests/test_auto_ArtifactDetect.py | 70 +++- .../tests/test_auto_CalculateMedian.py | 10 +- .../test_auto_CalculateNormalizedMoments.py | 13 +- .../tests/test_auto_ComputeDVARS.py | 78 +++- .../tests/test_auto_ComputeMeshWarp.py | 36 +- .../algorithms/tests/test_auto_CreateNifti.py | 16 +- nipype/algorithms/tests/test_auto_Distance.py | 25 +- .../tests/test_auto_FramewiseDisplacement.py | 47 ++- .../tests/test_auto_FuzzyOverlap.py | 28 +- nipype/algorithms/tests/test_auto_Gunzip.py | 13 +- nipype/algorithms/tests/test_auto_ICC.py | 21 +- .../algorithms/tests/test_auto_Matlab2CSV.py | 13 +- .../tests/test_auto_MergeCSVFiles.py | 19 +- .../algorithms/tests/test_auto_MergeROIs.py | 12 +- .../tests/test_auto_MeshWarpMaths.py | 33 +- .../tests/test_auto_ModifyAffine.py | 11 +- .../tests/test_auto_NonSteadyStateDetector.py | 11 +- .../test_auto_NormalizeProbabilityMapSet.py | 11 +- .../algorithms/tests/test_auto_P2PDistance.py | 36 +- .../algorithms/tests/test_auto_PickAtlas.py | 27 +- .../algorithms/tests/test_auto_Similarity.py | 26 +- .../tests/test_auto_SimpleThreshold.py | 13 +- .../tests/test_auto_SpecifyModel.py | 48 ++- .../tests/test_auto_SpecifySPMModel.py | 56 ++- .../tests/test_auto_SpecifySparseModel.py | 74 +++- .../algorithms/tests/test_auto_SplitROIs.py | 15 +- .../tests/test_auto_StimulusCorrelation.py | 21 +- nipype/algorithms/tests/test_auto_TCompCor.py | 76 +++- .../algorithms/tests/test_auto_WarpPoints.py | 21 +- .../afni/tests/test_auto_ABoverlap.py | 50 ++- .../afni/tests/test_auto_AFNICommand.py | 14 +- .../afni/tests/test_auto_AFNICommandBase.py | 8 +- .../afni/tests/test_auto_AFNIPythonCommand.py | 14 +- .../afni/tests/test_auto_AFNItoNIFTI.py | 44 +- .../afni/tests/test_auto_AlignEpiAnatPy.py | 101 +++-- .../afni/tests/test_auto_Allineate.py | 227 ++++++++--- .../afni/tests/test_auto_AutoTLRC.py | 29 +- .../afni/tests/test_auto_AutoTcorrelate.py | 48 ++- .../afni/tests/test_auto_Autobox.py | 31 +- .../afni/tests/test_auto_Automask.py | 39 +- .../afni/tests/test_auto_Axialize.py | 49 ++- .../afni/tests/test_auto_Bandpass.py | 91 ++++- .../afni/tests/test_auto_BlurInMask.py | 52 ++- .../afni/tests/test_auto_BlurToFWHM.py | 48 ++- .../afni/tests/test_auto_BrickStat.py | 55 ++- .../interfaces/afni/tests/test_auto_Bucket.py | 32 +- .../interfaces/afni/tests/test_auto_Calc.py | 62 ++- nipype/interfaces/afni/tests/test_auto_Cat.py | 42 +- .../afni/tests/test_auto_CatMatvec.py | 41 +- .../afni/tests/test_auto_CenterMass.py | 48 ++- .../afni/tests/test_auto_ClipLevel.py | 38 +- .../afni/tests/test_auto_ConvertDset.py | 38 +- .../interfaces/afni/tests/test_auto_Copy.py | 30 +- .../afni/tests/test_auto_Deconvolve.py | 224 +++++++--- .../afni/tests/test_auto_DegreeCentrality.py | 56 ++- .../afni/tests/test_auto_Despike.py | 26 +- .../afni/tests/test_auto_Detrend.py | 26 +- nipype/interfaces/afni/tests/test_auto_Dot.py | 80 +++- nipype/interfaces/afni/tests/test_auto_ECM.py | 79 +++- .../interfaces/afni/tests/test_auto_Edge3.py | 52 ++- .../interfaces/afni/tests/test_auto_Eval.py | 62 ++- .../interfaces/afni/tests/test_auto_FWHMx.py | 79 +++- nipype/interfaces/afni/tests/test_auto_Fim.py | 35 +- .../afni/tests/test_auto_Fourier.py | 40 +- .../interfaces/afni/tests/test_auto_GCOR.py | 27 +- .../interfaces/afni/tests/test_auto_Hist.py | 44 +- .../interfaces/afni/tests/test_auto_LFCD.py | 47 ++- .../afni/tests/test_auto_LocalBistat.py | 60 ++- .../afni/tests/test_auto_Localstat.py | 72 +++- .../afni/tests/test_auto_MaskTool.py | 69 +++- .../afni/tests/test_auto_Maskave.py | 37 +- .../interfaces/afni/tests/test_auto_Means.py | 69 +++- .../interfaces/afni/tests/test_auto_Merge.py | 36 +- .../interfaces/afni/tests/test_auto_Notes.py | 53 ++- .../afni/tests/test_auto_NwarpAdjust.py | 29 +- .../afni/tests/test_auto_NwarpApply.py | 57 ++- .../afni/tests/test_auto_NwarpCat.py | 47 ++- .../afni/tests/test_auto_OneDToolPy.py | 65 ++- .../afni/tests/test_auto_OutlierCount.py | 69 +++- .../afni/tests/test_auto_QualityIndex.py | 59 ++- .../interfaces/afni/tests/test_auto_Qwarp.py | 241 ++++++++--- .../afni/tests/test_auto_QwarpPlusMinus.py | 241 ++++++++--- .../afni/tests/test_auto_ROIStats.py | 75 +++- .../interfaces/afni/tests/test_auto_ReHo.py | 53 ++- .../interfaces/afni/tests/test_auto_Refit.py | 88 +++- .../afni/tests/test_auto_Remlfit.py | 240 ++++++++--- .../afni/tests/test_auto_Resample.py | 37 +- .../afni/tests/test_auto_Retroicor.py | 58 ++- .../afni/tests/test_auto_SVMTest.py | 60 ++- .../afni/tests/test_auto_SVMTrain.py | 73 +++- nipype/interfaces/afni/tests/test_auto_Seg.py | 57 ++- .../afni/tests/test_auto_SkullStrip.py | 20 +- .../afni/tests/test_auto_Synthesize.py | 53 ++- .../interfaces/afni/tests/test_auto_TCat.py | 36 +- .../afni/tests/test_auto_TCatSubBrick.py | 38 +- .../afni/tests/test_auto_TCorr1D.py | 49 ++- .../afni/tests/test_auto_TCorrMap.py | 136 +++++-- .../afni/tests/test_auto_TCorrelate.py | 40 +- .../interfaces/afni/tests/test_auto_TNorm.py | 50 ++- .../afni/tests/test_auto_TProject.py | 85 +++- .../interfaces/afni/tests/test_auto_TShift.py | 71 +++- .../afni/tests/test_auto_TSmooth.py | 63 ++- .../interfaces/afni/tests/test_auto_TStat.py | 35 +- .../interfaces/afni/tests/test_auto_To3D.py | 46 ++- .../interfaces/afni/tests/test_auto_Undump.py | 59 ++- .../afni/tests/test_auto_Unifize.py | 64 ++- .../interfaces/afni/tests/test_auto_Volreg.py | 67 ++- .../interfaces/afni/tests/test_auto_Warp.py | 76 +++- .../interfaces/afni/tests/test_auto_ZCutUp.py | 30 +- .../interfaces/afni/tests/test_auto_Zcat.py | 45 ++- .../afni/tests/test_auto_Zeropad.py | 87 +++- nipype/interfaces/ants/tests/test_auto_AI.py | 86 +++- .../interfaces/ants/tests/test_auto_ANTS.py | 134 ++++-- .../ants/tests/test_auto_ANTSCommand.py | 14 +- .../ants/tests/test_auto_AffineInitializer.py | 71 +++- .../ants/tests/test_auto_ApplyTransforms.py | 72 +++- .../test_auto_ApplyTransformsToPoints.py | 35 +- .../ants/tests/test_auto_Atropos.py | 99 ++++- .../tests/test_auto_AverageAffineTransform.py | 37 +- .../ants/tests/test_auto_AverageImages.py | 44 +- .../ants/tests/test_auto_BrainExtraction.py | 141 +++++-- .../tests/test_auto_ComposeMultiTransform.py | 38 +- .../tests/test_auto_CompositeTransformUtil.py | 50 ++- .../test_auto_ConvertScalarImageToRGB.py | 83 +++- .../ants/tests/test_auto_CorticalThickness.py | 160 ++++++-- ...est_auto_CreateJacobianDeterminantImage.py | 50 ++- .../ants/tests/test_auto_CreateTiledMosaic.py | 70 +++- .../ants/tests/test_auto_DenoiseImage.py | 51 ++- .../ants/tests/test_auto_GenWarpFields.py | 85 +++- .../ants/tests/test_auto_ImageMath.py | 48 ++- .../ants/tests/test_auto_JointFusion.py | 100 ++++- .../ants/tests/test_auto_KellyKapowski.py | 76 +++- .../ants/tests/test_auto_LabelGeometry.py | 39 +- .../tests/test_auto_LaplacianThickness.py | 61 ++- .../tests/test_auto_MeasureImageSimilarity.py | 68 +++- .../ants/tests/test_auto_MultiplyImages.py | 44 +- .../tests/test_auto_N4BiasFieldCorrection.py | 88 +++- .../ants/tests/test_auto_Registration.py | 186 +++++++-- .../tests/test_auto_RegistrationSynQuick.py | 78 +++- .../tests/test_auto_ResampleImageBySpacing.py | 56 ++- .../ants/tests/test_auto_ThresholdImage.py | 73 +++- .../test_auto_WarpImageMultiTransform.py | 67 ++- ..._auto_WarpTimeSeriesImageMultiTransform.py | 67 ++- .../ants/tests/test_auto_antsIntroduction.py | 85 +++- .../tests/test_auto_buildtemplateparallel.py | 76 +++- .../base/tests/test_auto_CommandLine.py | 8 +- .../base/tests/test_auto_MpiCommandLine.py | 13 +- .../tests/test_auto_SEMLikeCommandLine.py | 8 +- .../base/tests/test_auto_StdOutCommandLine.py | 16 +- .../brainsuite/tests/test_auto_BDP.py | 176 ++++++-- .../brainsuite/tests/test_auto_Bfc.py | 127 ++++-- .../brainsuite/tests/test_auto_Bse.py | 112 +++-- .../brainsuite/tests/test_auto_Cerebro.py | 105 +++-- .../brainsuite/tests/test_auto_Cortex.py | 60 ++- .../brainsuite/tests/test_auto_Dewisp.py | 43 +- .../brainsuite/tests/test_auto_Dfs.py | 78 +++- .../brainsuite/tests/test_auto_Hemisplit.py | 74 +++- .../brainsuite/tests/test_auto_Pialmesh.py | 99 ++++- .../brainsuite/tests/test_auto_Pvc.py | 56 ++- .../brainsuite/tests/test_auto_SVReg.py | 95 +++-- .../brainsuite/tests/test_auto_Scrubmask.py | 49 ++- .../brainsuite/tests/test_auto_Skullfinder.py | 73 +++- .../brainsuite/tests/test_auto_Tca.py | 49 ++- .../tests/test_auto_ThicknessPVC.py | 14 +- .../camino/tests/test_auto_AnalyzeHeader.py | 133 ++++-- .../tests/test_auto_ComputeEigensystem.py | 47 ++- .../test_auto_ComputeFractionalAnisotropy.py | 47 ++- .../tests/test_auto_ComputeMeanDiffusivity.py | 47 ++- .../tests/test_auto_ComputeTensorTrace.py | 47 ++- .../camino/tests/test_auto_Conmat.py | 49 ++- .../camino/tests/test_auto_DT2NIfTI.py | 36 +- .../camino/tests/test_auto_DTIFit.py | 46 ++- .../camino/tests/test_auto_DTLUTGen.py | 76 +++- .../camino/tests/test_auto_DTMetric.py | 47 ++- .../camino/tests/test_auto_FSL2Scheme.py | 69 +++- .../camino/tests/test_auto_Image2Voxel.py | 33 +- .../camino/tests/test_auto_ImageStats.py | 38 +- .../camino/tests/test_auto_LinRecon.py | 56 ++- .../interfaces/camino/tests/test_auto_MESD.py | 67 ++- .../camino/tests/test_auto_ModelFit.py | 92 ++++- .../camino/tests/test_auto_NIfTIDT2Camino.py | 56 ++- .../camino/tests/test_auto_PicoPDFs.py | 60 ++- .../camino/tests/test_auto_ProcStreamlines.py | 173 ++++++-- .../camino/tests/test_auto_QBallMX.py | 53 ++- .../camino/tests/test_auto_SFLUTGen.py | 64 ++- .../camino/tests/test_auto_SFPICOCalibData.py | 84 +++- .../camino/tests/test_auto_SFPeaks.py | 87 +++- .../camino/tests/test_auto_Shredder.py | 47 ++- .../camino/tests/test_auto_Track.py | 113 ++++-- .../camino/tests/test_auto_TrackBallStick.py | 113 ++++-- .../camino/tests/test_auto_TrackBayesDirac.py | 153 +++++-- .../tests/test_auto_TrackBedpostxDeter.py | 123 ++++-- .../tests/test_auto_TrackBedpostxProba.py | 128 ++++-- .../camino/tests/test_auto_TrackBootstrap.py | 138 +++++-- .../camino/tests/test_auto_TrackDT.py | 113 ++++-- .../camino/tests/test_auto_TrackPICo.py | 122 ++++-- .../camino/tests/test_auto_TractShredder.py | 47 ++- .../camino/tests/test_auto_VtkStreamlines.py | 70 +++- .../tests/test_auto_Camino2Trackvis.py | 60 ++- .../tests/test_auto_Trackvis2Camino.py | 35 +- .../cmtk/tests/test_auto_AverageNetworks.py | 28 +- .../cmtk/tests/test_auto_CFFConverter.py | 19 +- .../cmtk/tests/test_auto_CreateMatrix.py | 114 ++++-- .../cmtk/tests/test_auto_CreateNodes.py | 21 +- .../cmtk/tests/test_auto_MergeCNetworks.py | 15 +- .../tests/test_auto_NetworkBasedStatistic.py | 44 +- .../cmtk/tests/test_auto_NetworkXMetrics.py | 76 +++- .../cmtk/tests/test_auto_Parcellate.py | 49 ++- .../interfaces/cmtk/tests/test_auto_ROIGen.py | 35 +- .../tests/test_auto_DTIRecon.py | 102 +++-- .../tests/test_auto_DTITracker.py | 111 +++-- .../tests/test_auto_HARDIMat.py | 61 ++- .../tests/test_auto_ODFRecon.py | 98 ++++- .../tests/test_auto_ODFTracker.py | 128 ++++-- .../tests/test_auto_SplineFilter.py | 35 +- .../tests/test_auto_TrackMerge.py | 28 +- .../dipy/tests/test_auto_APMQball.py | 29 +- nipype/interfaces/dipy/tests/test_auto_CSD.py | 48 ++- nipype/interfaces/dipy/tests/test_auto_DTI.py | 47 ++- .../dipy/tests/test_auto_Denoise.py | 36 +- .../tests/test_auto_DipyDiffusionInterface.py | 19 +- .../tests/test_auto_EstimateResponseSH.py | 63 ++- .../dipy/tests/test_auto_RESTORE.py | 55 ++- .../dipy/tests/test_auto_Resample.py | 16 +- .../tests/test_auto_SimulateMultiTensor.py | 93 ++++- .../tests/test_auto_StreamlineTractography.py | 71 +++- .../dipy/tests/test_auto_TensorMode.py | 29 +- .../dipy/tests/test_auto_TrackDensityMap.py | 24 +- .../dtitk/tests/test_auto_AffScalarVol.py | 47 ++- .../tests/test_auto_AffSymTensor3DVol.py | 52 ++- .../dtitk/tests/test_auto_Affine.py | 52 ++- .../dtitk/tests/test_auto_AffineTask.py | 52 ++- .../dtitk/tests/test_auto_BinThresh.py | 50 ++- .../dtitk/tests/test_auto_BinThreshTask.py | 50 ++- .../dtitk/tests/test_auto_CommandLineDtitk.py | 8 +- .../dtitk/tests/test_auto_ComposeXfm.py | 33 +- .../dtitk/tests/test_auto_ComposeXfmTask.py | 33 +- .../dtitk/tests/test_auto_Diffeo.py | 56 ++- .../dtitk/tests/test_auto_DiffeoScalarVol.py | 51 ++- .../tests/test_auto_DiffeoSymTensor3DVol.py | 61 ++- .../dtitk/tests/test_auto_DiffeoTask.py | 56 ++- .../interfaces/dtitk/tests/test_auto_Rigid.py | 52 ++- .../dtitk/tests/test_auto_RigidTask.py | 52 ++- .../dtitk/tests/test_auto_SVAdjustVoxSp.py | 35 +- .../tests/test_auto_SVAdjustVoxSpTask.py | 35 +- .../dtitk/tests/test_auto_SVResample.py | 40 +- .../dtitk/tests/test_auto_SVResampleTask.py | 40 +- .../tests/test_auto_TVAdjustOriginTask.py | 35 +- .../dtitk/tests/test_auto_TVAdjustVoxSp.py | 35 +- .../tests/test_auto_TVAdjustVoxSpTask.py | 35 +- .../dtitk/tests/test_auto_TVResample.py | 44 +- .../dtitk/tests/test_auto_TVResampleTask.py | 44 +- .../dtitk/tests/test_auto_TVtool.py | 31 +- .../dtitk/tests/test_auto_TVtoolTask.py | 31 +- .../dtitk/tests/test_auto_affScalarVolTask.py | 47 ++- .../tests/test_auto_affSymTensor3DVolTask.py | 52 ++- .../tests/test_auto_diffeoScalarVolTask.py | 51 ++- .../test_auto_diffeoSymTensor3DVolTask.py | 61 ++- .../elastix/tests/test_auto_AnalyzeWarp.py | 60 ++- .../elastix/tests/test_auto_ApplyWarp.py | 39 +- .../elastix/tests/test_auto_EditTransform.py | 32 +- .../elastix/tests/test_auto_PointsWarp.py | 39 +- .../elastix/tests/test_auto_Registration.py | 57 ++- .../tests/test_auto_AddXFormToHeader.py | 44 +- .../freesurfer/tests/test_auto_Aparc2Aseg.py | 107 ++++- .../freesurfer/tests/test_auto_Apas2Aseg.py | 28 +- .../freesurfer/tests/test_auto_ApplyMask.py | 64 ++- .../tests/test_auto_ApplyVolTransform.py | 61 ++- .../freesurfer/tests/test_auto_Binarize.py | 130 ++++-- .../freesurfer/tests/test_auto_CALabel.py | 79 +++- .../freesurfer/tests/test_auto_CANormalize.py | 52 ++- .../freesurfer/tests/test_auto_CARegister.py | 70 +++- .../test_auto_CheckTalairachAlignment.py | 27 +- .../freesurfer/tests/test_auto_Concatenate.py | 88 +++- .../tests/test_auto_ConcatenateLTA.py | 53 ++- .../freesurfer/tests/test_auto_Contrast.py | 62 ++- .../freesurfer/tests/test_auto_Curvature.py | 42 +- .../tests/test_auto_CurvatureStats.py | 59 ++- .../tests/test_auto_DICOMConvert.py | 37 +- .../freesurfer/tests/test_auto_EMRegister.py | 47 ++- .../tests/test_auto_EditWMwithAseg.py | 47 ++- .../freesurfer/tests/test_auto_EulerNumber.py | 21 +- .../tests/test_auto_ExtractMainComponent.py | 22 +- .../freesurfer/tests/test_auto_FSCommand.py | 9 +- .../tests/test_auto_FSCommandOpenMP.py | 9 +- .../tests/test_auto_FSScriptCommand.py | 9 +- .../freesurfer/tests/test_auto_FitMSParams.py | 33 +- .../freesurfer/tests/test_auto_FixTopology.py | 69 +++- .../tests/test_auto_FuseSegmentations.py | 47 ++- .../freesurfer/tests/test_auto_GLMFit.py | 280 ++++++++++--- .../freesurfer/tests/test_auto_ImageInfo.py | 19 +- .../freesurfer/tests/test_auto_Jacobian.py | 29 +- .../freesurfer/tests/test_auto_LTAConvert.py | 72 +++- .../freesurfer/tests/test_auto_Label2Annot.py | 54 ++- .../freesurfer/tests/test_auto_Label2Label.py | 62 ++- .../freesurfer/tests/test_auto_Label2Vol.py | 75 +++- .../tests/test_auto_MNIBiasCorrection.py | 56 ++- .../freesurfer/tests/test_auto_MPRtoMNI305.py | 34 +- .../freesurfer/tests/test_auto_MRIConvert.py | 359 ++++++++++++---- .../freesurfer/tests/test_auto_MRICoreg.py | 138 +++++-- .../freesurfer/tests/test_auto_MRIFill.py | 47 ++- .../tests/test_auto_MRIMarchingCubes.py | 41 +- .../freesurfer/tests/test_auto_MRIPretess.py | 48 ++- .../freesurfer/tests/test_auto_MRISPreproc.py | 83 +++- .../tests/test_auto_MRISPreprocReconAll.py | 93 ++++- .../tests/test_auto_MRITessellate.py | 43 +- .../freesurfer/tests/test_auto_MRIsCALabel.py | 71 +++- .../freesurfer/tests/test_auto_MRIsCalc.py | 51 ++- .../freesurfer/tests/test_auto_MRIsCombine.py | 27 +- .../freesurfer/tests/test_auto_MRIsConvert.py | 101 ++++- .../freesurfer/tests/test_auto_MRIsExpand.py | 74 +++- .../freesurfer/tests/test_auto_MRIsInflate.py | 34 +- .../freesurfer/tests/test_auto_MS_LDA.py | 63 ++- .../tests/test_auto_MakeAverageSubject.py | 25 +- .../tests/test_auto_MakeSurfaces.py | 125 ++++-- .../freesurfer/tests/test_auto_Normalize.py | 40 +- .../tests/test_auto_OneSampleTTest.py | 280 ++++++++++--- .../freesurfer/tests/test_auto_Paint.py | 33 +- .../tests/test_auto_ParcellationStats.py | 126 ++++-- .../tests/test_auto_ParseDICOMDir.py | 34 +- .../freesurfer/tests/test_auto_ReconAll.py | 382 ++++++++++++++---- .../freesurfer/tests/test_auto_Register.py | 51 ++- .../tests/test_auto_RegisterAVItoTalairach.py | 46 ++- .../tests/test_auto_RelabelHypointensities.py | 41 +- .../tests/test_auto_RemoveIntersection.py | 21 +- .../freesurfer/tests/test_auto_RemoveNeck.py | 40 +- .../freesurfer/tests/test_auto_Resample.py | 32 +- .../tests/test_auto_RobustRegister.py | 169 ++++++-- .../tests/test_auto_RobustTemplate.py | 73 +++- .../tests/test_auto_SampleToSurface.py | 148 +++++-- .../freesurfer/tests/test_auto_SegStats.py | 158 ++++++-- .../tests/test_auto_SegStatsReconAll.py | 214 +++++++--- .../freesurfer/tests/test_auto_SegmentCC.py | 40 +- .../freesurfer/tests/test_auto_SegmentWM.py | 29 +- .../freesurfer/tests/test_auto_Smooth.py | 53 ++- .../tests/test_auto_SmoothTessellation.py | 80 +++- .../freesurfer/tests/test_auto_Sphere.py | 34 +- .../tests/test_auto_SphericalAverage.py | 71 +++- .../tests/test_auto_Surface2VolTransform.py | 53 ++- .../tests/test_auto_SurfaceSmooth.py | 56 ++- .../tests/test_auto_SurfaceSnapshots.py | 138 +++++-- .../tests/test_auto_SurfaceTransform.py | 57 ++- .../tests/test_auto_SynthesizeFLASH.py | 58 ++- .../tests/test_auto_TalairachAVI.py | 37 +- .../freesurfer/tests/test_auto_TalairachQC.py | 23 +- .../freesurfer/tests/test_auto_Tkregister2.py | 97 ++++- .../tests/test_auto_UnpackSDICOMDir.py | 44 +- .../freesurfer/tests/test_auto_VolumeMask.py | 83 +++- .../tests/test_auto_WatershedSkullStrip.py | 44 +- .../fsl/tests/test_auto_AR1Image.py | 49 ++- .../fsl/tests/test_auto_AccuracyTester.py | 34 +- .../fsl/tests/test_auto_ApplyMask.py | 50 ++- .../fsl/tests/test_auto_ApplyTOPUP.py | 48 ++- .../fsl/tests/test_auto_ApplyWarp.py | 77 +++- .../fsl/tests/test_auto_ApplyXFM.py | 227 ++++++++--- .../interfaces/fsl/tests/test_auto_AvScale.py | 25 +- .../interfaces/fsl/tests/test_auto_B0Calc.py | 83 +++- .../fsl/tests/test_auto_BEDPOSTX5.py | 141 +++++-- nipype/interfaces/fsl/tests/test_auto_BET.py | 112 +++-- .../fsl/tests/test_auto_BinaryMaths.py | 54 ++- .../fsl/tests/test_auto_ChangeDataType.py | 44 +- .../fsl/tests/test_auto_Classifier.py | 37 +- .../interfaces/fsl/tests/test_auto_Cleaner.py | 56 ++- .../interfaces/fsl/tests/test_auto_Cluster.py | 162 ++++++-- .../interfaces/fsl/tests/test_auto_Complex.py | 75 +++- .../fsl/tests/test_auto_ContrastMgr.py | 54 ++- .../fsl/tests/test_auto_ConvertWarp.py | 89 +++- .../fsl/tests/test_auto_ConvertXFM.py | 28 +- .../fsl/tests/test_auto_CopyGeom.py | 27 +- .../interfaces/fsl/tests/test_auto_DTIFit.py | 137 +++++-- .../fsl/tests/test_auto_DilateImage.py | 65 ++- .../fsl/tests/test_auto_DistanceMap.py | 41 +- .../fsl/tests/test_auto_DualRegression.py | 61 ++- .../fsl/tests/test_auto_EPIDeWarp.py | 92 ++++- nipype/interfaces/fsl/tests/test_auto_Eddy.py | 282 ++++++++++--- .../fsl/tests/test_auto_EddyCorrect.py | 29 +- .../fsl/tests/test_auto_EddyQuad.py | 82 +++- .../interfaces/fsl/tests/test_auto_EpiReg.py | 132 ++++-- .../fsl/tests/test_auto_ErodeImage.py | 65 ++- .../fsl/tests/test_auto_ExtractROI.py | 70 +++- nipype/interfaces/fsl/tests/test_auto_FAST.py | 116 ++++-- nipype/interfaces/fsl/tests/test_auto_FEAT.py | 20 +- .../fsl/tests/test_auto_FEATModel.py | 42 +- .../fsl/tests/test_auto_FEATRegister.py | 19 +- .../interfaces/fsl/tests/test_auto_FIRST.py | 49 ++- .../interfaces/fsl/tests/test_auto_FLAMEO.py | 96 ++++- .../interfaces/fsl/tests/test_auto_FLIRT.py | 226 ++++++++--- .../interfaces/fsl/tests/test_auto_FNIRT.py | 224 +++++++--- .../fsl/tests/test_auto_FSLCommand.py | 9 +- .../fsl/tests/test_auto_FSLXCommand.py | 135 +++++-- .../interfaces/fsl/tests/test_auto_FUGUE.py | 156 +++++-- .../fsl/tests/test_auto_FeatureExtractor.py | 23 +- .../fsl/tests/test_auto_FilterRegressor.py | 58 ++- .../fsl/tests/test_auto_FindTheBiggest.py | 28 +- nipype/interfaces/fsl/tests/test_auto_GLM.py | 112 +++-- .../fsl/tests/test_auto_ICA_AROMA.py | 69 +++- .../fsl/tests/test_auto_ImageMaths.py | 49 ++- .../fsl/tests/test_auto_ImageMeants.py | 63 ++- .../fsl/tests/test_auto_ImageStats.py | 42 +- .../interfaces/fsl/tests/test_auto_InvWarp.py | 57 ++- .../fsl/tests/test_auto_IsotropicSmooth.py | 57 ++- .../interfaces/fsl/tests/test_auto_L2Model.py | 18 +- .../fsl/tests/test_auto_Level1Design.py | 25 +- .../interfaces/fsl/tests/test_auto_MCFLIRT.py | 115 ++++-- .../interfaces/fsl/tests/test_auto_MELODIC.py | 219 +++++++--- .../fsl/tests/test_auto_MakeDyadicVectors.py | 49 ++- .../fsl/tests/test_auto_MathsCommand.py | 43 +- .../fsl/tests/test_auto_MaxImage.py | 49 ++- .../fsl/tests/test_auto_MaxnImage.py | 49 ++- .../fsl/tests/test_auto_MeanImage.py | 49 ++- .../fsl/tests/test_auto_MedianImage.py | 49 ++- .../interfaces/fsl/tests/test_auto_Merge.py | 32 +- .../fsl/tests/test_auto_MinImage.py | 49 ++- .../fsl/tests/test_auto_MotionOutliers.py | 48 ++- .../fsl/tests/test_auto_MultiImageMaths.py | 53 ++- .../tests/test_auto_MultipleRegressDesign.py | 24 +- .../interfaces/fsl/tests/test_auto_Overlay.py | 72 +++- .../interfaces/fsl/tests/test_auto_PRELUDE.py | 77 +++- .../fsl/tests/test_auto_PercentileImage.py | 54 ++- .../fsl/tests/test_auto_PlotMotionParams.py | 41 +- .../fsl/tests/test_auto_PlotTimeSeries.py | 88 +++- .../fsl/tests/test_auto_PowerSpectrum.py | 28 +- .../fsl/tests/test_auto_PrepareFieldmap.py | 54 ++- .../fsl/tests/test_auto_ProbTrackX.py | 178 ++++++-- .../fsl/tests/test_auto_ProbTrackX2.py | 259 +++++++++--- .../fsl/tests/test_auto_ProjThresh.py | 25 +- .../fsl/tests/test_auto_Randomise.py | 129 ++++-- .../fsl/tests/test_auto_Reorient2Std.py | 28 +- .../fsl/tests/test_auto_RobustFOV.py | 27 +- nipype/interfaces/fsl/tests/test_auto_SMM.py | 26 +- .../interfaces/fsl/tests/test_auto_SUSAN.py | 58 ++- .../interfaces/fsl/tests/test_auto_SigLoss.py | 40 +- .../interfaces/fsl/tests/test_auto_Slice.py | 24 +- .../fsl/tests/test_auto_SliceTimer.py | 57 ++- .../interfaces/fsl/tests/test_auto_Slicer.py | 87 +++- .../interfaces/fsl/tests/test_auto_Smooth.py | 22 +- .../fsl/tests/test_auto_SmoothEstimate.py | 39 +- .../fsl/tests/test_auto_SpatialFilter.py | 65 ++- .../interfaces/fsl/tests/test_auto_Split.py | 31 +- .../fsl/tests/test_auto_StdImage.py | 49 ++- .../fsl/tests/test_auto_SwapDimensions.py | 34 +- .../interfaces/fsl/tests/test_auto_TOPUP.py | 124 ++++-- .../fsl/tests/test_auto_TemporalFilter.py | 55 ++- .../fsl/tests/test_auto_Threshold.py | 57 ++- .../fsl/tests/test_auto_TractSkeleton.py | 58 ++- .../fsl/tests/test_auto_Training.py | 31 +- .../fsl/tests/test_auto_TrainingSetCreator.py | 16 +- .../fsl/tests/test_auto_UnaryMaths.py | 49 ++- .../interfaces/fsl/tests/test_auto_VecReg.py | 68 +++- .../fsl/tests/test_auto_WarpPoints.py | 56 ++- .../fsl/tests/test_auto_WarpPointsFromStd.py | 56 ++- .../fsl/tests/test_auto_WarpPointsToStd.py | 61 ++- .../fsl/tests/test_auto_WarpUtils.py | 54 ++- .../fsl/tests/test_auto_XFibres5.py | 140 +++++-- .../minc/tests/test_auto_Average.py | 97 ++++- .../interfaces/minc/tests/test_auto_BBox.py | 55 ++- .../interfaces/minc/tests/test_auto_Beast.py | 106 ++++- .../minc/tests/test_auto_BestLinReg.py | 39 +- .../minc/tests/test_auto_BigAverage.py | 44 +- .../interfaces/minc/tests/test_auto_Blob.py | 38 +- .../interfaces/minc/tests/test_auto_Blur.py | 81 +++- .../interfaces/minc/tests/test_auto_Calc.py | 86 +++- .../minc/tests/test_auto_Convert.py | 43 +- .../interfaces/minc/tests/test_auto_Copy.py | 32 +- .../interfaces/minc/tests/test_auto_Dump.py | 64 ++- .../minc/tests/test_auto_Extract.py | 73 +++- .../minc/tests/test_auto_Gennlxfm.py | 38 +- .../interfaces/minc/tests/test_auto_Math.py | 180 ++++++--- .../interfaces/minc/tests/test_auto_NlpFit.py | 64 ++- .../interfaces/minc/tests/test_auto_Norm.py | 75 +++- nipype/interfaces/minc/tests/test_auto_Pik.py | 112 +++-- .../minc/tests/test_auto_Resample.py | 124 ++++-- .../minc/tests/test_auto_Reshape.py | 35 +- .../interfaces/minc/tests/test_auto_ToEcat.py | 50 ++- .../interfaces/minc/tests/test_auto_ToRaw.py | 51 ++- .../minc/tests/test_auto_VolSymm.py | 66 ++- .../minc/tests/test_auto_Volcentre.py | 43 +- .../interfaces/minc/tests/test_auto_Voliso.py | 43 +- .../interfaces/minc/tests/test_auto_Volpad.py | 51 ++- .../interfaces/minc/tests/test_auto_XfmAvg.py | 55 ++- .../minc/tests/test_auto_XfmConcat.py | 32 +- .../minc/tests/test_auto_XfmInvert.py | 39 +- .../test_auto_JistBrainMgdmSegmentation.py | 132 ++++-- ...est_auto_JistBrainMp2rageDuraEstimation.py | 55 ++- ...est_auto_JistBrainMp2rageSkullStripping.py | 86 +++- .../test_auto_JistBrainPartialVolumeFilter.py | 50 ++- ...est_auto_JistCortexSurfaceMeshInflation.py | 72 +++- .../test_auto_JistIntensityMp2rageMasking.py | 89 +++- .../test_auto_JistLaminarProfileCalculator.py | 51 ++- .../test_auto_JistLaminarProfileGeometry.py | 58 ++- .../test_auto_JistLaminarProfileSampling.py | 58 ++- .../test_auto_JistLaminarROIAveraging.py | 56 ++- ...test_auto_JistLaminarVolumetricLayering.py | 99 +++-- ...test_auto_MedicAlgorithmImageCalculator.py | 51 ++- .../test_auto_MedicAlgorithmLesionToads.py | 197 ++++++--- .../test_auto_MedicAlgorithmMipavReorient.py | 69 +++- .../mipav/tests/test_auto_MedicAlgorithmN3.py | 81 +++- .../test_auto_MedicAlgorithmSPECTRE2010.py | 245 ++++++++--- ...uto_MedicAlgorithmThresholdToBinaryMask.py | 48 ++- .../mipav/tests/test_auto_RandomVol.py | 73 +++- .../mne/tests/test_auto_WatershedBEM.py | 64 ++- ..._auto_ConstrainedSphericalDeconvolution.py | 85 +++- .../test_auto_DWI2SphericalHarmonicsImage.py | 43 +- .../mrtrix/tests/test_auto_DWI2Tensor.py | 49 ++- ...est_auto_DiffusionTensorStreamlineTrack.py | 108 +++-- .../tests/test_auto_Directions2Amplitude.py | 48 ++- .../mrtrix/tests/test_auto_Erode.py | 48 ++- .../tests/test_auto_EstimateResponseForSH.py | 59 ++- .../mrtrix/tests/test_auto_FSL2MRTrix.py | 33 +- .../mrtrix/tests/test_auto_FilterTracks.py | 53 ++- .../mrtrix/tests/test_auto_FindShPeaks.py | 57 ++- .../tests/test_auto_GenerateDirections.py | 41 +- .../test_auto_GenerateWhiteMatterMask.py | 43 +- .../mrtrix/tests/test_auto_MRConvert.py | 83 +++- .../mrtrix/tests/test_auto_MRMultiply.py | 38 +- .../mrtrix/tests/test_auto_MRTransform.py | 71 +++- .../mrtrix/tests/test_auto_MRTrix2TrackVis.py | 29 +- .../mrtrix/tests/test_auto_MRTrixInfo.py | 16 +- .../mrtrix/tests/test_auto_MRTrixViewer.py | 25 +- .../mrtrix/tests/test_auto_MedianFilter3D.py | 39 +- ...cSphericallyDeconvolutedStreamlineTrack.py | 107 +++-- ..._SphericallyDeconvolutedStreamlineTrack.py | 103 ++++- .../mrtrix/tests/test_auto_StreamlineTrack.py | 103 ++++- .../test_auto_Tensor2ApparentDiffusion.py | 39 +- .../test_auto_Tensor2FractionalAnisotropy.py | 39 +- .../mrtrix/tests/test_auto_Tensor2Vector.py | 39 +- .../mrtrix/tests/test_auto_Threshold.py | 57 ++- .../mrtrix/tests/test_auto_Tracks2Prob.py | 62 ++- .../mrtrix3/tests/test_auto_ACTPrepareFSL.py | 28 +- .../mrtrix3/tests/test_auto_BrainMask.py | 57 ++- .../tests/test_auto_BuildConnectome.py | 77 +++- .../mrtrix3/tests/test_auto_ComputeTDI.py | 103 ++++- ..._auto_ConstrainedSphericalDeconvolution.py | 123 ++++-- .../mrtrix3/tests/test_auto_DWIBiasCorrect.py | 76 +++- .../mrtrix3/tests/test_auto_DWIDenoise.py | 64 ++- .../mrtrix3/tests/test_auto_DWIExtract.py | 75 +++- .../mrtrix3/tests/test_auto_EstimateFOD.py | 126 ++++-- .../mrtrix3/tests/test_auto_FitTensor.py | 71 +++- .../mrtrix3/tests/test_auto_Generate5tt.py | 64 ++- .../mrtrix3/tests/test_auto_LabelConfig.py | 64 ++- .../mrtrix3/tests/test_auto_LabelConvert.py | 51 ++- .../mrtrix3/tests/test_auto_MRConvert.py | 77 +++- .../mrtrix3/tests/test_auto_MRDeGibbs.py | 74 +++- .../mrtrix3/tests/test_auto_MRMath.py | 68 +++- .../mrtrix3/tests/test_auto_MRResize.py | 64 ++- .../mrtrix3/tests/test_auto_MRTrix3Base.py | 8 +- .../mrtrix3/tests/test_auto_Mesh2PVE.py | 40 +- .../tests/test_auto_ReplaceFSwithFIRST.py | 41 +- .../mrtrix3/tests/test_auto_ResponseSD.py | 101 ++++- .../mrtrix3/tests/test_auto_TCK2VTK.py | 44 +- .../mrtrix3/tests/test_auto_TensorMetrics.py | 67 ++- .../mrtrix3/tests/test_auto_Tractography.py | 205 +++++++--- .../niftyfit/tests/test_auto_DwiTool.py | 75 +++- .../niftyfit/tests/test_auto_FitAsl.py | 166 ++++++-- .../niftyfit/tests/test_auto_FitDwi.py | 179 ++++++-- .../niftyfit/tests/test_auto_FitQt1.py | 164 ++++++-- .../tests/test_auto_NiftyFitCommand.py | 8 +- .../tests/test_auto_NiftyRegCommand.py | 14 +- .../niftyreg/tests/test_auto_RegAladin.py | 125 ++++-- .../niftyreg/tests/test_auto_RegAverage.py | 32 +- .../niftyreg/tests/test_auto_RegF3D.py | 230 ++++++++--- .../niftyreg/tests/test_auto_RegJacobian.py | 37 +- .../niftyreg/tests/test_auto_RegMeasure.py | 37 +- .../niftyreg/tests/test_auto_RegResample.py | 67 ++- .../niftyreg/tests/test_auto_RegTools.py | 88 +++- .../niftyreg/tests/test_auto_RegTransform.py | 49 ++- .../niftyseg/tests/test_auto_BinaryMaths.py | 33 +- .../tests/test_auto_BinaryMathsInteger.py | 39 +- .../niftyseg/tests/test_auto_BinaryStats.py | 42 +- .../niftyseg/tests/test_auto_CalcTopNCC.py | 43 +- .../interfaces/niftyseg/tests/test_auto_EM.py | 79 +++- .../niftyseg/tests/test_auto_FillLesions.py | 78 +++- .../niftyseg/tests/test_auto_LabelFusion.py | 88 +++- .../niftyseg/tests/test_auto_MathsCommand.py | 27 +- .../niftyseg/tests/test_auto_Merge.py | 37 +- .../tests/test_auto_NiftySegCommand.py | 8 +- .../niftyseg/tests/test_auto_PatchMatch.py | 54 ++- .../niftyseg/tests/test_auto_StatsCommand.py | 31 +- .../niftyseg/tests/test_auto_TupleMaths.py | 43 +- .../niftyseg/tests/test_auto_UnaryMaths.py | 33 +- .../niftyseg/tests/test_auto_UnaryStats.py | 37 +- .../nipy/tests/test_auto_ComputeMask.py | 15 +- .../nipy/tests/test_auto_EstimateContrast.py | 44 +- .../interfaces/nipy/tests/test_auto_FitGLM.py | 56 ++- .../nipy/tests/test_auto_Similarity.py | 26 +- .../tests/test_auto_SpaceTimeRealigner.py | 18 +- .../interfaces/nipy/tests/test_auto_Trim.py | 27 +- .../tests/test_auto_CoherenceAnalyzer.py | 45 ++- ...t_auto_BRAINSPosteriorToContinuousClass.py | 55 ++- .../brains/tests/test_auto_BRAINSTalairach.py | 67 ++- .../tests/test_auto_BRAINSTalairachMask.py | 43 +- .../tests/test_auto_GenerateEdgeMapImage.py | 54 ++- .../tests/test_auto_GeneratePurePlugMask.py | 33 +- .../test_auto_HistogramMatchingFilter.py | 58 ++- .../brains/tests/test_auto_SimilarityIndex.py | 28 +- .../diffusion/tests/test_auto_DWIConvert.py | 106 +++-- .../tests/test_auto_compareTractInclusion.py | 43 +- .../diffusion/tests/test_auto_dtiaverage.py | 32 +- .../diffusion/tests/test_auto_dtiestim.py | 105 +++-- .../diffusion/tests/test_auto_dtiprocess.py | 202 ++++++--- .../tests/test_auto_extractNrrdVectorIndex.py | 37 +- .../tests/test_auto_gtractAnisotropyMap.py | 33 +- .../tests/test_auto_gtractAverageBvalues.py | 37 +- .../tests/test_auto_gtractClipAnisotropy.py | 37 +- .../tests/test_auto_gtractCoRegAnatomy.py | 116 ++++-- .../tests/test_auto_gtractConcatDwi.py | 32 +- .../test_auto_gtractCopyImageOrientation.py | 34 +- .../tests/test_auto_gtractCoregBvalues.py | 84 +++- .../tests/test_auto_gtractCostFastMarching.py | 60 ++- .../tests/test_auto_gtractCreateGuideFiber.py | 37 +- .../test_auto_gtractFastMarchingTracking.py | 74 +++- .../tests/test_auto_gtractFiberTracking.py | 129 ++++-- .../tests/test_auto_gtractImageConformity.py | 34 +- .../test_auto_gtractInvertBSplineTransform.py | 39 +- ...test_auto_gtractInvertDisplacementField.py | 38 +- .../test_auto_gtractInvertRigidTransform.py | 29 +- .../test_auto_gtractResampleAnisotropy.py | 39 +- .../tests/test_auto_gtractResampleB0.py | 47 ++- .../test_auto_gtractResampleCodeImage.py | 43 +- .../test_auto_gtractResampleDWIInPlace.py | 59 ++- .../tests/test_auto_gtractResampleFibers.py | 39 +- .../diffusion/tests/test_auto_gtractTensor.py | 64 ++- ...auto_gtractTransformToDisplacementField.py | 32 +- .../diffusion/tests/test_auto_maxcurvature.py | 33 +- .../tests/test_auto_UKFTractography.py | 152 +++++-- .../tests/test_auto_fiberprocess.py | 78 +++- .../tests/test_auto_fiberstats.py | 18 +- .../tests/test_auto_fibertrack.py | 70 +++- .../filtering/tests/test_auto_CannyEdge.py | 37 +- ...to_CannySegmentationLevelSetImageFilter.py | 56 ++- .../filtering/tests/test_auto_DilateImage.py | 34 +- .../filtering/tests/test_auto_DilateMask.py | 38 +- .../filtering/tests/test_auto_DistanceMaps.py | 34 +- .../test_auto_DumpBinaryTrainingVectors.py | 19 +- .../filtering/tests/test_auto_ErodeImage.py | 34 +- .../tests/test_auto_FlippedDifference.py | 30 +- .../test_auto_GenerateBrainClippedImage.py | 34 +- .../test_auto_GenerateSummedGradientImage.py | 38 +- .../tests/test_auto_GenerateTestImage.py | 37 +- ...GradientAnisotropicDiffusionImageFilter.py | 37 +- .../tests/test_auto_HammerAttributeCreator.py | 36 +- .../tests/test_auto_NeighborhoodMean.py | 34 +- .../tests/test_auto_NeighborhoodMedian.py | 34 +- .../tests/test_auto_STAPLEAnalysis.py | 28 +- .../test_auto_TextureFromNoiseImageFilter.py | 29 +- .../tests/test_auto_TextureMeasureFilter.py | 38 +- .../tests/test_auto_UnbiasedNonLocalMeans.py | 50 ++- .../legacy/tests/test_auto_scalartransform.py | 48 ++- .../tests/test_auto_BRAINSDemonWarp.py | 183 ++++++--- .../registration/tests/test_auto_BRAINSFit.py | 311 ++++++++++---- .../tests/test_auto_BRAINSResample.py | 65 ++- .../tests/test_auto_BRAINSResize.py | 33 +- .../test_auto_BRAINSTransformFromFiducials.py | 46 ++- .../tests/test_auto_VBRAINSDemonWarp.py | 186 ++++++--- .../segmentation/tests/test_auto_BRAINSABC.py | 165 ++++++-- .../test_auto_BRAINSConstellationDetector.py | 213 +++++++--- ...BRAINSCreateLabelMapFromProbabilityMaps.py | 50 ++- .../segmentation/tests/test_auto_BRAINSCut.py | 79 +++- .../tests/test_auto_BRAINSMultiSTAPLE.py | 48 ++- .../tests/test_auto_BRAINSROIAuto.py | 63 ++- ...t_auto_BinaryMaskEditorBasedOnLandmarks.py | 42 +- .../segmentation/tests/test_auto_ESLR.py | 53 ++- .../semtools/tests/test_auto_DWICompare.py | 19 +- .../tests/test_auto_DWISimpleCompare.py | 23 +- ...o_GenerateCsfClippedFromClassifiedImage.py | 25 +- .../tests/test_auto_BRAINSAlignMSP.py | 66 ++- .../tests/test_auto_BRAINSClipInferior.py | 37 +- .../test_auto_BRAINSConstellationModeler.py | 66 ++- .../tests/test_auto_BRAINSEyeDetector.py | 33 +- ...est_auto_BRAINSInitializedControlPoints.py | 43 +- .../test_auto_BRAINSLandmarkInitializer.py | 29 +- .../test_auto_BRAINSLinearModelerEPCA.py | 18 +- .../tests/test_auto_BRAINSLmkTransform.py | 47 ++- .../utilities/tests/test_auto_BRAINSMush.py | 94 ++++- .../tests/test_auto_BRAINSSnapShotWriter.py | 42 +- .../tests/test_auto_BRAINSTransformConvert.py | 45 ++- ...st_auto_BRAINSTrimForegroundInDirection.py | 49 ++- .../tests/test_auto_CleanUpOverlapLabels.py | 20 +- .../tests/test_auto_FindCenterOfBrain.py | 103 +++-- ...auto_GenerateLabelMapFromProbabilityMap.py | 28 +- .../tests/test_auto_ImageRegionPlotter.py | 49 ++- .../tests/test_auto_JointHistogram.py | 33 +- .../tests/test_auto_ShuffleVectorsModule.py | 25 +- .../utilities/tests/test_auto_fcsv_to_hdf5.py | 42 +- .../tests/test_auto_insertMidACPCpoint.py | 25 +- ...test_auto_landmarksConstellationAligner.py | 23 +- ...test_auto_landmarksConstellationWeights.py | 35 +- .../diffusion/tests/test_auto_DTIexport.py | 28 +- .../diffusion/tests/test_auto_DTIimport.py | 32 +- .../test_auto_DWIJointRicianLMMSEFilter.py | 46 ++- .../tests/test_auto_DWIRicianLMMSEFilter.py | 70 +++- .../tests/test_auto_DWIToDTIEstimation.py | 50 ++- ..._auto_DiffusionTensorScalarMeasurements.py | 32 +- ...est_auto_DiffusionWeightedVolumeMasking.py | 45 ++- .../tests/test_auto_ResampleDTIVolume.py | 131 ++++-- .../test_auto_TractographyLabelMapSeeding.py | 89 +++- .../tests/test_auto_AddScalarVolumes.py | 38 +- .../tests/test_auto_CastScalarVolume.py | 32 +- .../tests/test_auto_CheckerBoardFilter.py | 39 +- ...test_auto_CurvatureAnisotropicDiffusion.py | 40 +- .../tests/test_auto_ExtractSkeleton.py | 44 +- .../test_auto_GaussianBlurImageFilter.py | 32 +- .../test_auto_GradientAnisotropicDiffusion.py | 40 +- .../test_auto_GrayscaleFillHoleImageFilter.py | 28 +- ...test_auto_GrayscaleGrindPeakImageFilter.py | 28 +- .../tests/test_auto_HistogramMatching.py | 46 ++- .../tests/test_auto_ImageLabelCombine.py | 38 +- .../tests/test_auto_MaskScalarVolume.py | 42 +- .../tests/test_auto_MedianImageFilter.py | 33 +- .../tests/test_auto_MultiplyScalarVolumes.py | 38 +- .../test_auto_N4ITKBiasFieldCorrection.py | 72 +++- ...test_auto_ResampleScalarVectorDWIVolume.py | 123 ++++-- .../tests/test_auto_SubtractScalarVolumes.py | 38 +- .../tests/test_auto_ThresholdScalarVolume.py | 48 ++- ...auto_VotingBinaryHoleFillingImageFilter.py | 45 ++- ...est_auto_DWIUnbiasedNonLocalMeansFilter.py | 51 ++- .../tests/test_auto_AffineRegistration.py | 66 ++- ...test_auto_BSplineDeformableRegistration.py | 79 +++- .../test_auto_BSplineToDeformationField.py | 30 +- .../test_auto_ExpertAutomatedRegistration.py | 136 +++++-- .../tests/test_auto_LinearRegistration.py | 72 +++- ..._auto_MultiResolutionAffineRegistration.py | 67 ++- .../test_auto_OtsuThresholdImageFilter.py | 40 +- .../test_auto_OtsuThresholdSegmentation.py | 44 +- .../tests/test_auto_ResampleScalarVolume.py | 37 +- .../tests/test_auto_RigidRegistration.py | 76 +++- .../test_auto_IntensityDifferenceMetric.py | 55 ++- ..._auto_PETStandardUptakeValueComputation.py | 59 ++- .../tests/test_auto_ACPCTransform.py | 32 +- .../tests/test_auto_BRAINSDemonWarp.py | 183 ++++++--- .../registration/tests/test_auto_BRAINSFit.py | 271 ++++++++++--- .../tests/test_auto_BRAINSResample.py | 65 ++- .../tests/test_auto_FiducialRegistration.py | 40 +- .../tests/test_auto_VBRAINSDemonWarp.py | 186 ++++++--- .../tests/test_auto_BRAINSROIAuto.py | 54 ++- .../tests/test_auto_EMSegmentCommandLine.py | 104 +++-- .../test_auto_RobustStatisticsSegmenter.py | 54 ++- ...st_auto_SimpleRegionGrowingSegmentation.py | 56 ++- .../tests/test_auto_DicomToNrrdConverter.py | 42 +- ...test_auto_EMSegmentTransformToNewFormat.py | 29 +- .../tests/test_auto_GrayscaleModelMaker.py | 52 ++- .../tests/test_auto_LabelMapSmoothing.py | 44 +- .../slicer/tests/test_auto_MergeModels.py | 34 +- .../slicer/tests/test_auto_ModelMaker.py | 90 ++++- .../slicer/tests/test_auto_ModelToLabelMap.py | 38 +- .../tests/test_auto_OrientScalarVolume.py | 32 +- .../tests/test_auto_ProbeVolumeWithModel.py | 34 +- .../tests/test_auto_SlicerCommandLine.py | 8 +- .../spm/tests/test_auto_Analyze2nii.py | 27 +- .../spm/tests/test_auto_ApplyDeformations.py | 28 +- .../test_auto_ApplyInverseDeformation.py | 39 +- .../spm/tests/test_auto_ApplyTransform.py | 31 +- .../spm/tests/test_auto_CalcCoregAffine.py | 37 +- .../spm/tests/test_auto_Coregister.py | 62 ++- .../spm/tests/test_auto_CreateWarped.py | 35 +- .../interfaces/spm/tests/test_auto_DARTEL.py | 36 +- .../spm/tests/test_auto_DARTELNorm2MNI.py | 39 +- .../spm/tests/test_auto_DicomImport.py | 38 +- .../spm/tests/test_auto_EstimateContrast.py | 41 +- .../spm/tests/test_auto_EstimateModel.py | 43 +- .../spm/tests/test_auto_FactorialDesign.py | 46 ++- .../spm/tests/test_auto_FieldMap.py | 117 ++++-- .../spm/tests/test_auto_Level1Design.py | 72 +++- .../tests/test_auto_MultiChannelNewSegment.py | 33 +- .../test_auto_MultipleRegressionDesign.py | 60 ++- .../spm/tests/test_auto_NewSegment.py | 39 +- .../spm/tests/test_auto_Normalize.py | 84 +++- .../spm/tests/test_auto_Normalize12.py | 63 ++- .../tests/test_auto_OneSampleTTestDesign.py | 51 ++- .../spm/tests/test_auto_PairedTTestDesign.py | 59 ++- .../interfaces/spm/tests/test_auto_Realign.py | 76 +++- .../spm/tests/test_auto_RealignUnwarp.py | 121 ++++-- .../interfaces/spm/tests/test_auto_Reslice.py | 33 +- .../spm/tests/test_auto_ResliceToReference.py | 35 +- .../spm/tests/test_auto_SPMCommand.py | 9 +- .../interfaces/spm/tests/test_auto_Segment.py | 126 ++++-- .../spm/tests/test_auto_SliceTiming.py | 49 ++- .../interfaces/spm/tests/test_auto_Smooth.py | 36 +- .../spm/tests/test_auto_Threshold.py | 61 ++- .../tests/test_auto_ThresholdStatistics.py | 33 +- .../tests/test_auto_TwoSampleTTestDesign.py | 64 ++- .../spm/tests/test_auto_VBMSegment.py | 150 +++++-- .../tests/test_auto_BIDSDataGrabber.py | 13 +- nipype/interfaces/tests/test_auto_Bru2.py | 42 +- nipype/interfaces/tests/test_auto_C3d.py | 66 ++- .../tests/test_auto_C3dAffineTool.py | 44 +- nipype/interfaces/tests/test_auto_CopyMeta.py | 16 +- .../interfaces/tests/test_auto_DataFinder.py | 12 +- .../interfaces/tests/test_auto_DataGrabber.py | 16 +- nipype/interfaces/tests/test_auto_DataSink.py | 16 +- nipype/interfaces/tests/test_auto_Dcm2nii.py | 89 +++- nipype/interfaces/tests/test_auto_Dcm2niix.py | 94 ++++- nipype/interfaces/tests/test_auto_DcmStack.py | 18 +- .../interfaces/tests/test_auto_ExportFile.py | 20 +- .../tests/test_auto_FreeSurferSource.py | 186 +++++++-- .../tests/test_auto_GroupAndStack.py | 16 +- .../tests/test_auto_JSONFileGrabber.py | 7 +- .../tests/test_auto_JSONFileSink.py | 18 +- .../interfaces/tests/test_auto_LookupMeta.py | 8 +- .../tests/test_auto_MatlabCommand.py | 57 ++- .../interfaces/tests/test_auto_MergeNifti.py | 14 +- nipype/interfaces/tests/test_auto_MeshFix.py | 127 ++++-- .../interfaces/tests/test_auto_MySQLSink.py | 14 +- nipype/interfaces/tests/test_auto_PETPVC.py | 79 +++- .../interfaces/tests/test_auto_Quickshear.py | 34 +- nipype/interfaces/tests/test_auto_Reorient.py | 16 +- nipype/interfaces/tests/test_auto_Rescale.py | 20 +- .../tests/test_auto_S3DataGrabber.py | 28 +- .../interfaces/tests/test_auto_SQLiteSink.py | 9 +- .../tests/test_auto_SSHDataGrabber.py | 36 +- .../interfaces/tests/test_auto_SelectFiles.py | 12 +- .../tests/test_auto_SignalExtraction.py | 36 +- .../tests/test_auto_SlicerCommandLine.py | 9 +- .../interfaces/tests/test_auto_SplitNifti.py | 13 +- nipype/interfaces/tests/test_auto_XNATSink.py | 40 +- .../interfaces/tests/test_auto_XNATSource.py | 20 +- .../utility/tests/test_auto_AssertEqual.py | 10 +- .../utility/tests/test_auto_CSVReader.py | 8 +- .../utility/tests/test_auto_Function.py | 6 +- .../utility/tests/test_auto_Merge.py | 16 +- .../utility/tests/test_auto_Rename.py | 19 +- .../utility/tests/test_auto_Select.py | 13 +- .../utility/tests/test_auto_Split.py | 12 +- .../vista/tests/test_auto_Vnifti2Image.py | 28 +- .../vista/tests/test_auto_VtoMat.py | 22 +- .../workbench/tests/test_auto_CiftiSmooth.py | 85 +++- .../tests/test_auto_MetricResample.py | 85 +++- .../workbench/tests/test_auto_WBCommand.py | 8 +- 833 files changed, 40312 insertions(+), 10893 deletions(-) diff --git a/nipype/algorithms/tests/test_auto_ACompCor.py b/nipype/algorithms/tests/test_auto_ACompCor.py index e2788e97d5..814aa71704 100644 --- a/nipype/algorithms/tests/test_auto_ACompCor.py +++ b/nipype/algorithms/tests/test_auto_ACompCor.py @@ -4,24 +4,56 @@ def test_ACompCor_inputs(): input_map = dict( - components_file=dict(usedefault=True,), - failure_mode=dict(usedefault=True,), + components_file=dict( + usedefault=True, + ), + failure_mode=dict( + usedefault=True, + ), header_prefix=dict(), - high_pass_cutoff=dict(usedefault=True,), - ignore_initial_volumes=dict(usedefault=True,), + high_pass_cutoff=dict( + usedefault=True, + ), + ignore_initial_volumes=dict( + usedefault=True, + ), mask_files=dict(), - mask_index=dict(requires=["mask_files"], xor=["merge_method"],), + mask_index=dict( + requires=["mask_files"], + xor=["merge_method"], + ), mask_names=dict(), - merge_method=dict(requires=["mask_files"], xor=["mask_index"],), - num_components=dict(xor=["variance_threshold"],), - pre_filter=dict(usedefault=True,), - realigned_file=dict(extensions=None, mandatory=True,), - regress_poly_degree=dict(usedefault=True,), + merge_method=dict( + requires=["mask_files"], + xor=["mask_index"], + ), + num_components=dict( + xor=["variance_threshold"], + ), + pre_filter=dict( + usedefault=True, + ), + realigned_file=dict( + extensions=None, + mandatory=True, + ), + regress_poly_degree=dict( + usedefault=True, + ), repetition_time=dict(), - save_metadata=dict(usedefault=True,), - save_pre_filter=dict(usedefault=True,), - use_regress_poly=dict(deprecated="0.15.0", new_name="pre_filter",), - variance_threshold=dict(xor=["num_components"],), + save_metadata=dict( + usedefault=True, + ), + save_pre_filter=dict( + usedefault=True, + ), + use_regress_poly=dict( + deprecated="0.15.0", + new_name="pre_filter", + ), + variance_threshold=dict( + xor=["num_components"], + ), ) inputs = ACompCor.input_spec() @@ -32,9 +64,15 @@ def test_ACompCor_inputs(): def test_ACompCor_outputs(): output_map = dict( - components_file=dict(extensions=None,), - metadata_file=dict(extensions=None,), - pre_filter_file=dict(extensions=None,), + components_file=dict( + extensions=None, + ), + metadata_file=dict( + extensions=None, + ), + pre_filter_file=dict( + extensions=None, + ), ) outputs = ACompCor.output_spec() diff --git a/nipype/algorithms/tests/test_auto_ActivationCount.py b/nipype/algorithms/tests/test_auto_ActivationCount.py index 6fc50301ac..7df84ee122 100644 --- a/nipype/algorithms/tests/test_auto_ActivationCount.py +++ b/nipype/algorithms/tests/test_auto_ActivationCount.py @@ -3,7 +3,14 @@ def test_ActivationCount_inputs(): - input_map = dict(in_files=dict(mandatory=True,), threshold=dict(mandatory=True,),) + input_map = dict( + in_files=dict( + mandatory=True, + ), + threshold=dict( + mandatory=True, + ), + ) inputs = ActivationCount.input_spec() for key, metadata in list(input_map.items()): @@ -13,9 +20,15 @@ def test_ActivationCount_inputs(): def test_ActivationCount_outputs(): output_map = dict( - acm_neg=dict(extensions=None,), - acm_pos=dict(extensions=None,), - out_file=dict(extensions=None,), + acm_neg=dict( + extensions=None, + ), + acm_pos=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = ActivationCount.output_spec() diff --git a/nipype/algorithms/tests/test_auto_AddCSVColumn.py b/nipype/algorithms/tests/test_auto_AddCSVColumn.py index a2d82b6eec..b76fd46457 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVColumn.py +++ b/nipype/algorithms/tests/test_auto_AddCSVColumn.py @@ -6,8 +6,14 @@ def test_AddCSVColumn_inputs(): input_map = dict( extra_column_heading=dict(), extra_field=dict(), - in_file=dict(extensions=None, mandatory=True,), - out_file=dict(extensions=None, usedefault=True,), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), ) inputs = AddCSVColumn.input_spec() @@ -17,7 +23,11 @@ def test_AddCSVColumn_inputs(): def test_AddCSVColumn_outputs(): - output_map = dict(csv_file=dict(extensions=None,),) + output_map = dict( + csv_file=dict( + extensions=None, + ), + ) outputs = AddCSVColumn.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_AddCSVRow.py b/nipype/algorithms/tests/test_auto_AddCSVRow.py index 39d6d40abb..78976f418d 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVRow.py +++ b/nipype/algorithms/tests/test_auto_AddCSVRow.py @@ -4,7 +4,13 @@ def test_AddCSVRow_inputs(): input_map = dict( - _outputs=dict(usedefault=True,), in_file=dict(extensions=None, mandatory=True,), + _outputs=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = AddCSVRow.input_spec() @@ -14,7 +20,11 @@ def test_AddCSVRow_inputs(): def test_AddCSVRow_outputs(): - output_map = dict(csv_file=dict(extensions=None,),) + output_map = dict( + csv_file=dict( + extensions=None, + ), + ) outputs = AddCSVRow.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_AddNoise.py b/nipype/algorithms/tests/test_auto_AddNoise.py index ad1e8734e8..5cf92e33f7 100644 --- a/nipype/algorithms/tests/test_auto_AddNoise.py +++ b/nipype/algorithms/tests/test_auto_AddNoise.py @@ -4,12 +4,27 @@ def test_AddNoise_inputs(): input_map = dict( - bg_dist=dict(mandatory=True, usedefault=True,), - dist=dict(mandatory=True, usedefault=True,), - in_file=dict(extensions=None, mandatory=True,), - in_mask=dict(extensions=None,), - out_file=dict(extensions=None,), - snr=dict(usedefault=True,), + bg_dist=dict( + mandatory=True, + usedefault=True, + ), + dist=dict( + mandatory=True, + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + snr=dict( + usedefault=True, + ), ) inputs = AddNoise.input_spec() @@ -19,7 +34,11 @@ def test_AddNoise_inputs(): def test_AddNoise_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AddNoise.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ArtifactDetect.py b/nipype/algorithms/tests/test_auto_ArtifactDetect.py index 9340982472..51010aea3a 100644 --- a/nipype/algorithms/tests/test_auto_ArtifactDetect.py +++ b/nipype/algorithms/tests/test_auto_ArtifactDetect.py @@ -4,25 +4,61 @@ def test_ArtifactDetect_inputs(): input_map = dict( - bound_by_brainmask=dict(usedefault=True,), - global_threshold=dict(usedefault=True,), - intersect_mask=dict(usedefault=True,), - mask_file=dict(extensions=None,), + bound_by_brainmask=dict( + usedefault=True, + ), + global_threshold=dict( + usedefault=True, + ), + intersect_mask=dict( + usedefault=True, + ), + mask_file=dict( + extensions=None, + ), mask_threshold=dict(), - mask_type=dict(mandatory=True,), + mask_type=dict( + mandatory=True, + ), norm_threshold=dict( - mandatory=True, xor=["rotation_threshold", "translation_threshold"], - ), - parameter_source=dict(mandatory=True,), - plot_type=dict(usedefault=True,), - realigned_files=dict(mandatory=True,), - realignment_parameters=dict(mandatory=True,), - rotation_threshold=dict(mandatory=True, xor=["norm_threshold"],), - save_plot=dict(usedefault=True,), - translation_threshold=dict(mandatory=True, xor=["norm_threshold"],), - use_differences=dict(maxlen=2, minlen=2, usedefault=True,), - use_norm=dict(requires=["norm_threshold"], usedefault=True,), - zintensity_threshold=dict(mandatory=True,), + mandatory=True, + xor=["rotation_threshold", "translation_threshold"], + ), + parameter_source=dict( + mandatory=True, + ), + plot_type=dict( + usedefault=True, + ), + realigned_files=dict( + mandatory=True, + ), + realignment_parameters=dict( + mandatory=True, + ), + rotation_threshold=dict( + mandatory=True, + xor=["norm_threshold"], + ), + save_plot=dict( + usedefault=True, + ), + translation_threshold=dict( + mandatory=True, + xor=["norm_threshold"], + ), + use_differences=dict( + maxlen=2, + minlen=2, + usedefault=True, + ), + use_norm=dict( + requires=["norm_threshold"], + usedefault=True, + ), + zintensity_threshold=dict( + mandatory=True, + ), ) inputs = ArtifactDetect.input_spec() diff --git a/nipype/algorithms/tests/test_auto_CalculateMedian.py b/nipype/algorithms/tests/test_auto_CalculateMedian.py index ff8d9edd7a..ddc8b9814d 100644 --- a/nipype/algorithms/tests/test_auto_CalculateMedian.py +++ b/nipype/algorithms/tests/test_auto_CalculateMedian.py @@ -4,7 +4,11 @@ def test_CalculateMedian_inputs(): input_map = dict( - in_files=dict(), median_file=dict(), median_per_file=dict(usedefault=True,), + in_files=dict(), + median_file=dict(), + median_per_file=dict( + usedefault=True, + ), ) inputs = CalculateMedian.input_spec() @@ -14,7 +18,9 @@ def test_CalculateMedian_inputs(): def test_CalculateMedian_outputs(): - output_map = dict(median_files=dict(),) + output_map = dict( + median_files=dict(), + ) outputs = CalculateMedian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py b/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py index 102ec2c205..a67f959176 100644 --- a/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py +++ b/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py @@ -4,8 +4,13 @@ def test_CalculateNormalizedMoments_inputs(): input_map = dict( - moment=dict(mandatory=True,), - timeseries_file=dict(extensions=None, mandatory=True,), + moment=dict( + mandatory=True, + ), + timeseries_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CalculateNormalizedMoments.input_spec() @@ -15,7 +20,9 @@ def test_CalculateNormalizedMoments_inputs(): def test_CalculateNormalizedMoments_outputs(): - output_map = dict(moments=dict(),) + output_map = dict( + moments=dict(), + ) outputs = CalculateNormalizedMoments.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ComputeDVARS.py b/nipype/algorithms/tests/test_auto_ComputeDVARS.py index ed51de0b0a..5fe2d241b9 100644 --- a/nipype/algorithms/tests/test_auto_ComputeDVARS.py +++ b/nipype/algorithms/tests/test_auto_ComputeDVARS.py @@ -4,18 +4,44 @@ def test_ComputeDVARS_inputs(): input_map = dict( - figdpi=dict(usedefault=True,), - figformat=dict(usedefault=True,), - figsize=dict(usedefault=True,), - in_file=dict(extensions=None, mandatory=True,), - in_mask=dict(extensions=None, mandatory=True,), - intensity_normalization=dict(usedefault=True,), - remove_zerovariance=dict(usedefault=True,), - save_all=dict(usedefault=True,), - save_nstd=dict(usedefault=True,), - save_plot=dict(usedefault=True,), - save_std=dict(usedefault=True,), - save_vxstd=dict(usedefault=True,), + figdpi=dict( + usedefault=True, + ), + figformat=dict( + usedefault=True, + ), + figsize=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict( + extensions=None, + mandatory=True, + ), + intensity_normalization=dict( + usedefault=True, + ), + remove_zerovariance=dict( + usedefault=True, + ), + save_all=dict( + usedefault=True, + ), + save_nstd=dict( + usedefault=True, + ), + save_plot=dict( + usedefault=True, + ), + save_std=dict( + usedefault=True, + ), + save_vxstd=dict( + usedefault=True, + ), series_tr=dict(), ) inputs = ComputeDVARS.input_spec() @@ -30,13 +56,27 @@ def test_ComputeDVARS_outputs(): avg_nstd=dict(), avg_std=dict(), avg_vxstd=dict(), - fig_nstd=dict(extensions=None,), - fig_std=dict(extensions=None,), - fig_vxstd=dict(extensions=None,), - out_all=dict(extensions=None,), - out_nstd=dict(extensions=None,), - out_std=dict(extensions=None,), - out_vxstd=dict(extensions=None,), + fig_nstd=dict( + extensions=None, + ), + fig_std=dict( + extensions=None, + ), + fig_vxstd=dict( + extensions=None, + ), + out_all=dict( + extensions=None, + ), + out_nstd=dict( + extensions=None, + ), + out_std=dict( + extensions=None, + ), + out_vxstd=dict( + extensions=None, + ), ) outputs = ComputeDVARS.output_spec() diff --git a/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py index 871564b817..639f03770c 100644 --- a/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py +++ b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py @@ -4,12 +4,28 @@ def test_ComputeMeshWarp_inputs(): input_map = dict( - metric=dict(usedefault=True,), - out_file=dict(extensions=None, usedefault=True,), - out_warp=dict(extensions=None, usedefault=True,), - surface1=dict(extensions=None, mandatory=True,), - surface2=dict(extensions=None, mandatory=True,), - weighting=dict(usedefault=True,), + metric=dict( + usedefault=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), + out_warp=dict( + extensions=None, + usedefault=True, + ), + surface1=dict( + extensions=None, + mandatory=True, + ), + surface2=dict( + extensions=None, + mandatory=True, + ), + weighting=dict( + usedefault=True, + ), ) inputs = ComputeMeshWarp.input_spec() @@ -21,8 +37,12 @@ def test_ComputeMeshWarp_inputs(): def test_ComputeMeshWarp_outputs(): output_map = dict( distance=dict(), - out_file=dict(extensions=None,), - out_warp=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_warp=dict( + extensions=None, + ), ) outputs = ComputeMeshWarp.output_spec() diff --git a/nipype/algorithms/tests/test_auto_CreateNifti.py b/nipype/algorithms/tests/test_auto_CreateNifti.py index d4989386b4..f5c5c4a2f5 100644 --- a/nipype/algorithms/tests/test_auto_CreateNifti.py +++ b/nipype/algorithms/tests/test_auto_CreateNifti.py @@ -5,8 +5,14 @@ def test_CreateNifti_inputs(): input_map = dict( affine=dict(), - data_file=dict(extensions=None, mandatory=True,), - header_file=dict(extensions=None, mandatory=True,), + data_file=dict( + extensions=None, + mandatory=True, + ), + header_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CreateNifti.input_spec() @@ -16,7 +22,11 @@ def test_CreateNifti_inputs(): def test_CreateNifti_outputs(): - output_map = dict(nifti_file=dict(extensions=None,),) + output_map = dict( + nifti_file=dict( + extensions=None, + ), + ) outputs = CreateNifti.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_Distance.py b/nipype/algorithms/tests/test_auto_Distance.py index e334e3a0f1..46e48342c4 100644 --- a/nipype/algorithms/tests/test_auto_Distance.py +++ b/nipype/algorithms/tests/test_auto_Distance.py @@ -4,10 +4,20 @@ def test_Distance_inputs(): input_map = dict( - mask_volume=dict(extensions=None,), - method=dict(usedefault=True,), - volume1=dict(extensions=None, mandatory=True,), - volume2=dict(extensions=None, mandatory=True,), + mask_volume=dict( + extensions=None, + ), + method=dict( + usedefault=True, + ), + volume1=dict( + extensions=None, + mandatory=True, + ), + volume2=dict( + extensions=None, + mandatory=True, + ), ) inputs = Distance.input_spec() @@ -18,7 +28,12 @@ def test_Distance_inputs(): def test_Distance_outputs(): output_map = dict( - distance=dict(), histogram=dict(extensions=None,), point1=dict(), point2=dict(), + distance=dict(), + histogram=dict( + extensions=None, + ), + point1=dict(), + point2=dict(), ) outputs = Distance.output_spec() diff --git a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py index 1bc46fba64..1308b4d97d 100644 --- a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py +++ b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py @@ -4,15 +4,36 @@ def test_FramewiseDisplacement_inputs(): input_map = dict( - figdpi=dict(usedefault=True,), - figsize=dict(usedefault=True,), - in_file=dict(extensions=None, mandatory=True,), - normalize=dict(usedefault=True,), - out_figure=dict(extensions=None, usedefault=True,), - out_file=dict(extensions=None, usedefault=True,), - parameter_source=dict(mandatory=True,), - radius=dict(usedefault=True,), - save_plot=dict(usedefault=True,), + figdpi=dict( + usedefault=True, + ), + figsize=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + normalize=dict( + usedefault=True, + ), + out_figure=dict( + extensions=None, + usedefault=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), + parameter_source=dict( + mandatory=True, + ), + radius=dict( + usedefault=True, + ), + save_plot=dict( + usedefault=True, + ), series_tr=dict(), ) inputs = FramewiseDisplacement.input_spec() @@ -25,8 +46,12 @@ def test_FramewiseDisplacement_inputs(): def test_FramewiseDisplacement_outputs(): output_map = dict( fd_average=dict(), - out_figure=dict(extensions=None,), - out_file=dict(extensions=None,), + out_figure=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = FramewiseDisplacement.output_spec() diff --git a/nipype/algorithms/tests/test_auto_FuzzyOverlap.py b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py index 877f864bee..e8a7fe5ef1 100644 --- a/nipype/algorithms/tests/test_auto_FuzzyOverlap.py +++ b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py @@ -4,11 +4,22 @@ def test_FuzzyOverlap_inputs(): input_map = dict( - in_mask=dict(extensions=None,), - in_ref=dict(mandatory=True,), - in_tst=dict(mandatory=True,), - out_file=dict(extensions=None, usedefault=True,), - weighting=dict(usedefault=True,), + in_mask=dict( + extensions=None, + ), + in_ref=dict( + mandatory=True, + ), + in_tst=dict( + mandatory=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), + weighting=dict( + usedefault=True, + ), ) inputs = FuzzyOverlap.input_spec() @@ -18,7 +29,12 @@ def test_FuzzyOverlap_inputs(): def test_FuzzyOverlap_outputs(): - output_map = dict(class_fdi=dict(), class_fji=dict(), dice=dict(), jaccard=dict(),) + output_map = dict( + class_fdi=dict(), + class_fji=dict(), + dice=dict(), + jaccard=dict(), + ) outputs = FuzzyOverlap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_Gunzip.py b/nipype/algorithms/tests/test_auto_Gunzip.py index 40a1f44531..7629feb820 100644 --- a/nipype/algorithms/tests/test_auto_Gunzip.py +++ b/nipype/algorithms/tests/test_auto_Gunzip.py @@ -3,7 +3,12 @@ def test_Gunzip_inputs(): - input_map = dict(in_file=dict(extensions=None, mandatory=True,),) + input_map = dict( + in_file=dict( + extensions=None, + mandatory=True, + ), + ) inputs = Gunzip.input_spec() for key, metadata in list(input_map.items()): @@ -12,7 +17,11 @@ def test_Gunzip_inputs(): def test_Gunzip_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Gunzip.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ICC.py b/nipype/algorithms/tests/test_auto_ICC.py index 9797fdb4af..4a2389202c 100644 --- a/nipype/algorithms/tests/test_auto_ICC.py +++ b/nipype/algorithms/tests/test_auto_ICC.py @@ -4,8 +4,13 @@ def test_ICC_inputs(): input_map = dict( - mask=dict(extensions=None, mandatory=True,), - subjects_sessions=dict(mandatory=True,), + mask=dict( + extensions=None, + mandatory=True, + ), + subjects_sessions=dict( + mandatory=True, + ), ) inputs = ICC.input_spec() @@ -16,9 +21,15 @@ def test_ICC_inputs(): def test_ICC_outputs(): output_map = dict( - icc_map=dict(extensions=None,), - session_var_map=dict(extensions=None,), - subject_var_map=dict(extensions=None,), + icc_map=dict( + extensions=None, + ), + session_var_map=dict( + extensions=None, + ), + subject_var_map=dict( + extensions=None, + ), ) outputs = ICC.output_spec() diff --git a/nipype/algorithms/tests/test_auto_Matlab2CSV.py b/nipype/algorithms/tests/test_auto_Matlab2CSV.py index 665dbc3fed..42acbd514a 100644 --- a/nipype/algorithms/tests/test_auto_Matlab2CSV.py +++ b/nipype/algorithms/tests/test_auto_Matlab2CSV.py @@ -4,8 +4,13 @@ def test_Matlab2CSV_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True,), - reshape_matrix=dict(usedefault=True,), + in_file=dict( + extensions=None, + mandatory=True, + ), + reshape_matrix=dict( + usedefault=True, + ), ) inputs = Matlab2CSV.input_spec() @@ -15,7 +20,9 @@ def test_Matlab2CSV_inputs(): def test_Matlab2CSV_outputs(): - output_map = dict(csv_files=dict(),) + output_map = dict( + csv_files=dict(), + ) outputs = Matlab2CSV.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_MergeCSVFiles.py b/nipype/algorithms/tests/test_auto_MergeCSVFiles.py index f4f7bc54a0..bb7e9ed65a 100644 --- a/nipype/algorithms/tests/test_auto_MergeCSVFiles.py +++ b/nipype/algorithms/tests/test_auto_MergeCSVFiles.py @@ -7,9 +7,16 @@ def test_MergeCSVFiles_inputs(): column_headings=dict(), extra_column_heading=dict(), extra_field=dict(), - in_files=dict(mandatory=True,), - out_file=dict(extensions=None, usedefault=True,), - row_heading_title=dict(usedefault=True,), + in_files=dict( + mandatory=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), + row_heading_title=dict( + usedefault=True, + ), row_headings=dict(), ) inputs = MergeCSVFiles.input_spec() @@ -20,7 +27,11 @@ def test_MergeCSVFiles_inputs(): def test_MergeCSVFiles_outputs(): - output_map = dict(csv_file=dict(extensions=None,),) + output_map = dict( + csv_file=dict( + extensions=None, + ), + ) outputs = MergeCSVFiles.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_MergeROIs.py b/nipype/algorithms/tests/test_auto_MergeROIs.py index 7f56b9d08c..c43a33b686 100644 --- a/nipype/algorithms/tests/test_auto_MergeROIs.py +++ b/nipype/algorithms/tests/test_auto_MergeROIs.py @@ -4,7 +4,11 @@ def test_MergeROIs_inputs(): input_map = dict( - in_files=dict(), in_index=dict(), in_reference=dict(extensions=None,), + in_files=dict(), + in_index=dict(), + in_reference=dict( + extensions=None, + ), ) inputs = MergeROIs.input_spec() @@ -14,7 +18,11 @@ def test_MergeROIs_inputs(): def test_MergeROIs_outputs(): - output_map = dict(merged_file=dict(extensions=None,),) + output_map = dict( + merged_file=dict( + extensions=None, + ), + ) outputs = MergeROIs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_MeshWarpMaths.py b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py index be0de9e541..a4295b8f46 100644 --- a/nipype/algorithms/tests/test_auto_MeshWarpMaths.py +++ b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py @@ -5,11 +5,25 @@ def test_MeshWarpMaths_inputs(): input_map = dict( float_trait=dict(), - in_surf=dict(extensions=None, mandatory=True,), - operation=dict(usedefault=True,), - operator=dict(mandatory=True, usedefault=True,), - out_file=dict(extensions=None, usedefault=True,), - out_warp=dict(extensions=None, usedefault=True,), + in_surf=dict( + extensions=None, + mandatory=True, + ), + operation=dict( + usedefault=True, + ), + operator=dict( + mandatory=True, + usedefault=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), + out_warp=dict( + extensions=None, + usedefault=True, + ), ) inputs = MeshWarpMaths.input_spec() @@ -19,7 +33,14 @@ def test_MeshWarpMaths_inputs(): def test_MeshWarpMaths_outputs(): - output_map = dict(out_file=dict(extensions=None,), out_warp=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + out_warp=dict( + extensions=None, + ), + ) outputs = MeshWarpMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ModifyAffine.py b/nipype/algorithms/tests/test_auto_ModifyAffine.py index a4a441e662..6592e28690 100644 --- a/nipype/algorithms/tests/test_auto_ModifyAffine.py +++ b/nipype/algorithms/tests/test_auto_ModifyAffine.py @@ -4,7 +4,12 @@ def test_ModifyAffine_inputs(): input_map = dict( - transformation_matrix=dict(usedefault=True,), volumes=dict(mandatory=True,), + transformation_matrix=dict( + usedefault=True, + ), + volumes=dict( + mandatory=True, + ), ) inputs = ModifyAffine.input_spec() @@ -14,7 +19,9 @@ def test_ModifyAffine_inputs(): def test_ModifyAffine_outputs(): - output_map = dict(transformed_volumes=dict(),) + output_map = dict( + transformed_volumes=dict(), + ) outputs = ModifyAffine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py index 5d42bcf0e7..9e14e00595 100644 --- a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py +++ b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py @@ -3,7 +3,12 @@ def test_NonSteadyStateDetector_inputs(): - input_map = dict(in_file=dict(extensions=None, mandatory=True,),) + input_map = dict( + in_file=dict( + extensions=None, + mandatory=True, + ), + ) inputs = NonSteadyStateDetector.input_spec() for key, metadata in list(input_map.items()): @@ -12,7 +17,9 @@ def test_NonSteadyStateDetector_inputs(): def test_NonSteadyStateDetector_outputs(): - output_map = dict(n_volumes_to_discard=dict(),) + output_map = dict( + n_volumes_to_discard=dict(), + ) outputs = NonSteadyStateDetector.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py b/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py index 41b8cc030d..be18979a85 100644 --- a/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py +++ b/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py @@ -3,7 +3,12 @@ def test_NormalizeProbabilityMapSet_inputs(): - input_map = dict(in_files=dict(), in_mask=dict(extensions=None,),) + input_map = dict( + in_files=dict(), + in_mask=dict( + extensions=None, + ), + ) inputs = NormalizeProbabilityMapSet.input_spec() for key, metadata in list(input_map.items()): @@ -12,7 +17,9 @@ def test_NormalizeProbabilityMapSet_inputs(): def test_NormalizeProbabilityMapSet_outputs(): - output_map = dict(out_files=dict(),) + output_map = dict( + out_files=dict(), + ) outputs = NormalizeProbabilityMapSet.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_P2PDistance.py b/nipype/algorithms/tests/test_auto_P2PDistance.py index 0c11648576..a5623353ec 100644 --- a/nipype/algorithms/tests/test_auto_P2PDistance.py +++ b/nipype/algorithms/tests/test_auto_P2PDistance.py @@ -4,12 +4,28 @@ def test_P2PDistance_inputs(): input_map = dict( - metric=dict(usedefault=True,), - out_file=dict(extensions=None, usedefault=True,), - out_warp=dict(extensions=None, usedefault=True,), - surface1=dict(extensions=None, mandatory=True,), - surface2=dict(extensions=None, mandatory=True,), - weighting=dict(usedefault=True,), + metric=dict( + usedefault=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), + out_warp=dict( + extensions=None, + usedefault=True, + ), + surface1=dict( + extensions=None, + mandatory=True, + ), + surface2=dict( + extensions=None, + mandatory=True, + ), + weighting=dict( + usedefault=True, + ), ) inputs = P2PDistance.input_spec() @@ -21,8 +37,12 @@ def test_P2PDistance_inputs(): def test_P2PDistance_outputs(): output_map = dict( distance=dict(), - out_file=dict(extensions=None,), - out_warp=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_warp=dict( + extensions=None, + ), ) outputs = P2PDistance.output_spec() diff --git a/nipype/algorithms/tests/test_auto_PickAtlas.py b/nipype/algorithms/tests/test_auto_PickAtlas.py index 71a76aba5c..2a29ca8d23 100644 --- a/nipype/algorithms/tests/test_auto_PickAtlas.py +++ b/nipype/algorithms/tests/test_auto_PickAtlas.py @@ -4,11 +4,22 @@ def test_PickAtlas_inputs(): input_map = dict( - atlas=dict(extensions=None, mandatory=True,), - dilation_size=dict(usedefault=True,), - hemi=dict(usedefault=True,), - labels=dict(mandatory=True,), - output_file=dict(extensions=None,), + atlas=dict( + extensions=None, + mandatory=True, + ), + dilation_size=dict( + usedefault=True, + ), + hemi=dict( + usedefault=True, + ), + labels=dict( + mandatory=True, + ), + output_file=dict( + extensions=None, + ), ) inputs = PickAtlas.input_spec() @@ -18,7 +29,11 @@ def test_PickAtlas_inputs(): def test_PickAtlas_outputs(): - output_map = dict(mask_file=dict(extensions=None,),) + output_map = dict( + mask_file=dict( + extensions=None, + ), + ) outputs = PickAtlas.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_Similarity.py b/nipype/algorithms/tests/test_auto_Similarity.py index 3a851d0d30..a5e5f583d5 100644 --- a/nipype/algorithms/tests/test_auto_Similarity.py +++ b/nipype/algorithms/tests/test_auto_Similarity.py @@ -4,11 +4,23 @@ def test_Similarity_inputs(): input_map = dict( - mask1=dict(extensions=None,), - mask2=dict(extensions=None,), - metric=dict(usedefault=True,), - volume1=dict(extensions=None, mandatory=True,), - volume2=dict(extensions=None, mandatory=True,), + mask1=dict( + extensions=None, + ), + mask2=dict( + extensions=None, + ), + metric=dict( + usedefault=True, + ), + volume1=dict( + extensions=None, + mandatory=True, + ), + volume2=dict( + extensions=None, + mandatory=True, + ), ) inputs = Similarity.input_spec() @@ -18,7 +30,9 @@ def test_Similarity_inputs(): def test_Similarity_outputs(): - output_map = dict(similarity=dict(),) + output_map = dict( + similarity=dict(), + ) outputs = Similarity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SimpleThreshold.py b/nipype/algorithms/tests/test_auto_SimpleThreshold.py index 7a1c531c3d..ab7141f0de 100644 --- a/nipype/algorithms/tests/test_auto_SimpleThreshold.py +++ b/nipype/algorithms/tests/test_auto_SimpleThreshold.py @@ -3,7 +3,14 @@ def test_SimpleThreshold_inputs(): - input_map = dict(threshold=dict(mandatory=True,), volumes=dict(mandatory=True,),) + input_map = dict( + threshold=dict( + mandatory=True, + ), + volumes=dict( + mandatory=True, + ), + ) inputs = SimpleThreshold.input_spec() for key, metadata in list(input_map.items()): @@ -12,7 +19,9 @@ def test_SimpleThreshold_inputs(): def test_SimpleThreshold_outputs(): - output_map = dict(thresholded_volumes=dict(),) + output_map = dict( + thresholded_volumes=dict(), + ) outputs = SimpleThreshold.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SpecifyModel.py b/nipype/algorithms/tests/test_auto_SpecifyModel.py index fd583f42bc..15d9e4994e 100644 --- a/nipype/algorithms/tests/test_auto_SpecifyModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifyModel.py @@ -5,23 +5,43 @@ def test_SpecifyModel_inputs(): input_map = dict( bids_amplitude_column=dict(), - bids_condition_column=dict(usedefault=True,), + bids_condition_column=dict( + usedefault=True, + ), bids_event_file=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], ), event_files=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], - ), - functional_runs=dict(copyfile=False, mandatory=True,), - high_pass_filter_cutoff=dict(mandatory=True,), - input_units=dict(mandatory=True,), - outlier_files=dict(copyfile=False,), - parameter_source=dict(usedefault=True,), - realignment_parameters=dict(copyfile=False,), + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], + ), + functional_runs=dict( + copyfile=False, + mandatory=True, + ), + high_pass_filter_cutoff=dict( + mandatory=True, + ), + input_units=dict( + mandatory=True, + ), + outlier_files=dict( + copyfile=False, + ), + parameter_source=dict( + usedefault=True, + ), + realignment_parameters=dict( + copyfile=False, + ), subject_info=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], + ), + time_repetition=dict( + mandatory=True, ), - time_repetition=dict(mandatory=True,), ) inputs = SpecifyModel.input_spec() @@ -31,7 +51,9 @@ def test_SpecifyModel_inputs(): def test_SpecifyModel_outputs(): - output_map = dict(session_info=dict(),) + output_map = dict( + session_info=dict(), + ) outputs = SpecifyModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py index cb8c5f7a17..64bb206359 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py @@ -5,25 +5,49 @@ def test_SpecifySPMModel_inputs(): input_map = dict( bids_amplitude_column=dict(), - bids_condition_column=dict(usedefault=True,), + bids_condition_column=dict( + usedefault=True, + ), bids_event_file=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], + ), + concatenate_runs=dict( + usedefault=True, ), - concatenate_runs=dict(usedefault=True,), event_files=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], - ), - functional_runs=dict(copyfile=False, mandatory=True,), - high_pass_filter_cutoff=dict(mandatory=True,), - input_units=dict(mandatory=True,), - outlier_files=dict(copyfile=False,), - output_units=dict(usedefault=True,), - parameter_source=dict(usedefault=True,), - realignment_parameters=dict(copyfile=False,), + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], + ), + functional_runs=dict( + copyfile=False, + mandatory=True, + ), + high_pass_filter_cutoff=dict( + mandatory=True, + ), + input_units=dict( + mandatory=True, + ), + outlier_files=dict( + copyfile=False, + ), + output_units=dict( + usedefault=True, + ), + parameter_source=dict( + usedefault=True, + ), + realignment_parameters=dict( + copyfile=False, + ), subject_info=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], + ), + time_repetition=dict( + mandatory=True, ), - time_repetition=dict(mandatory=True,), ) inputs = SpecifySPMModel.input_spec() @@ -33,7 +57,9 @@ def test_SpecifySPMModel_inputs(): def test_SpecifySPMModel_outputs(): - output_map = dict(session_info=dict(),) + output_map = dict( + session_info=dict(), + ) outputs = SpecifySPMModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py index ad116a86b1..cac4ce5770 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py @@ -5,31 +5,63 @@ def test_SpecifySparseModel_inputs(): input_map = dict( bids_amplitude_column=dict(), - bids_condition_column=dict(usedefault=True,), + bids_condition_column=dict( + usedefault=True, + ), bids_event_file=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], ), event_files=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], + ), + functional_runs=dict( + copyfile=False, + mandatory=True, + ), + high_pass_filter_cutoff=dict( + mandatory=True, + ), + input_units=dict( + mandatory=True, ), - functional_runs=dict(copyfile=False, mandatory=True,), - high_pass_filter_cutoff=dict(mandatory=True,), - input_units=dict(mandatory=True,), model_hrf=dict(), - outlier_files=dict(copyfile=False,), - parameter_source=dict(usedefault=True,), - realignment_parameters=dict(copyfile=False,), + outlier_files=dict( + copyfile=False, + ), + parameter_source=dict( + usedefault=True, + ), + realignment_parameters=dict( + copyfile=False, + ), save_plot=dict(), - scale_regressors=dict(usedefault=True,), - scan_onset=dict(usedefault=True,), - stimuli_as_impulses=dict(usedefault=True,), + scale_regressors=dict( + usedefault=True, + ), + scan_onset=dict( + usedefault=True, + ), + stimuli_as_impulses=dict( + usedefault=True, + ), subject_info=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"], + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], + ), + time_acquisition=dict( + mandatory=True, + ), + time_repetition=dict( + mandatory=True, + ), + use_temporal_deriv=dict( + requires=["model_hrf"], + ), + volumes_in_cluster=dict( + usedefault=True, ), - time_acquisition=dict(mandatory=True,), - time_repetition=dict(mandatory=True,), - use_temporal_deriv=dict(requires=["model_hrf"],), - volumes_in_cluster=dict(usedefault=True,), ) inputs = SpecifySparseModel.input_spec() @@ -41,8 +73,12 @@ def test_SpecifySparseModel_inputs(): def test_SpecifySparseModel_outputs(): output_map = dict( session_info=dict(), - sparse_png_file=dict(extensions=None,), - sparse_svg_file=dict(extensions=None,), + sparse_png_file=dict( + extensions=None, + ), + sparse_svg_file=dict( + extensions=None, + ), ) outputs = SpecifySparseModel.output_spec() diff --git a/nipype/algorithms/tests/test_auto_SplitROIs.py b/nipype/algorithms/tests/test_auto_SplitROIs.py index a9f3844775..c9eec86058 100644 --- a/nipype/algorithms/tests/test_auto_SplitROIs.py +++ b/nipype/algorithms/tests/test_auto_SplitROIs.py @@ -4,8 +4,13 @@ def test_SplitROIs_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True,), - in_mask=dict(extensions=None,), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict( + extensions=None, + ), roi_size=dict(), ) inputs = SplitROIs.input_spec() @@ -16,7 +21,11 @@ def test_SplitROIs_inputs(): def test_SplitROIs_outputs(): - output_map = dict(out_files=dict(), out_index=dict(), out_masks=dict(),) + output_map = dict( + out_files=dict(), + out_index=dict(), + out_masks=dict(), + ) outputs = SplitROIs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_StimulusCorrelation.py b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py index 2e95175ca8..19cec418c4 100644 --- a/nipype/algorithms/tests/test_auto_StimulusCorrelation.py +++ b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py @@ -4,10 +4,19 @@ def test_StimulusCorrelation_inputs(): input_map = dict( - concatenated_design=dict(mandatory=True,), - intensity_values=dict(mandatory=True,), - realignment_parameters=dict(mandatory=True,), - spm_mat_file=dict(extensions=None, mandatory=True,), + concatenated_design=dict( + mandatory=True, + ), + intensity_values=dict( + mandatory=True, + ), + realignment_parameters=dict( + mandatory=True, + ), + spm_mat_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = StimulusCorrelation.input_spec() @@ -17,7 +26,9 @@ def test_StimulusCorrelation_inputs(): def test_StimulusCorrelation_outputs(): - output_map = dict(stimcorr_files=dict(),) + output_map = dict( + stimcorr_files=dict(), + ) outputs = StimulusCorrelation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_TCompCor.py b/nipype/algorithms/tests/test_auto_TCompCor.py index d8cb0d7ae6..0f802cc92e 100644 --- a/nipype/algorithms/tests/test_auto_TCompCor.py +++ b/nipype/algorithms/tests/test_auto_TCompCor.py @@ -4,25 +4,59 @@ def test_TCompCor_inputs(): input_map = dict( - components_file=dict(usedefault=True,), - failure_mode=dict(usedefault=True,), + components_file=dict( + usedefault=True, + ), + failure_mode=dict( + usedefault=True, + ), header_prefix=dict(), - high_pass_cutoff=dict(usedefault=True,), - ignore_initial_volumes=dict(usedefault=True,), + high_pass_cutoff=dict( + usedefault=True, + ), + ignore_initial_volumes=dict( + usedefault=True, + ), mask_files=dict(), - mask_index=dict(requires=["mask_files"], xor=["merge_method"],), + mask_index=dict( + requires=["mask_files"], + xor=["merge_method"], + ), mask_names=dict(), - merge_method=dict(requires=["mask_files"], xor=["mask_index"],), - num_components=dict(xor=["variance_threshold"],), - percentile_threshold=dict(usedefault=True,), - pre_filter=dict(usedefault=True,), - realigned_file=dict(extensions=None, mandatory=True,), - regress_poly_degree=dict(usedefault=True,), + merge_method=dict( + requires=["mask_files"], + xor=["mask_index"], + ), + num_components=dict( + xor=["variance_threshold"], + ), + percentile_threshold=dict( + usedefault=True, + ), + pre_filter=dict( + usedefault=True, + ), + realigned_file=dict( + extensions=None, + mandatory=True, + ), + regress_poly_degree=dict( + usedefault=True, + ), repetition_time=dict(), - save_metadata=dict(usedefault=True,), - save_pre_filter=dict(usedefault=True,), - use_regress_poly=dict(deprecated="0.15.0", new_name="pre_filter",), - variance_threshold=dict(xor=["num_components"],), + save_metadata=dict( + usedefault=True, + ), + save_pre_filter=dict( + usedefault=True, + ), + use_regress_poly=dict( + deprecated="0.15.0", + new_name="pre_filter", + ), + variance_threshold=dict( + xor=["num_components"], + ), ) inputs = TCompCor.input_spec() @@ -33,10 +67,16 @@ def test_TCompCor_inputs(): def test_TCompCor_outputs(): output_map = dict( - components_file=dict(extensions=None,), + components_file=dict( + extensions=None, + ), high_variance_masks=dict(), - metadata_file=dict(extensions=None,), - pre_filter_file=dict(extensions=None,), + metadata_file=dict( + extensions=None, + ), + pre_filter_file=dict( + extensions=None, + ), ) outputs = TCompCor.output_spec() diff --git a/nipype/algorithms/tests/test_auto_WarpPoints.py b/nipype/algorithms/tests/test_auto_WarpPoints.py index 493f9fb26f..fc72866d83 100644 --- a/nipype/algorithms/tests/test_auto_WarpPoints.py +++ b/nipype/algorithms/tests/test_auto_WarpPoints.py @@ -4,7 +4,10 @@ def test_WarpPoints_inputs(): input_map = dict( - interp=dict(mandatory=True, usedefault=True,), + interp=dict( + mandatory=True, + usedefault=True, + ), out_points=dict( extensions=None, keep_extension=True, @@ -12,8 +15,14 @@ def test_WarpPoints_inputs(): name_template="%s_warped", output_name="out_points", ), - points=dict(extensions=None, mandatory=True,), - warp=dict(extensions=None, mandatory=True,), + points=dict( + extensions=None, + mandatory=True, + ), + warp=dict( + extensions=None, + mandatory=True, + ), ) inputs = WarpPoints.input_spec() @@ -23,7 +32,11 @@ def test_WarpPoints_inputs(): def test_WarpPoints_outputs(): - output_map = dict(out_points=dict(extensions=None,),) + output_map = dict( + out_points=dict( + extensions=None, + ), + ) outputs = WarpPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ABoverlap.py b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py index 63e2e8d652..501f5331b7 100644 --- a/nipype/interfaces/afni/tests/test_auto_ABoverlap.py +++ b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py @@ -4,20 +4,46 @@ def test_ABoverlap_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file_a=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-3, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-3, ), in_file_b=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-2, + ), + no_automask=dict( + argstr="-no_automask", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr=" |& tee %s", + extensions=None, + position=-1, ), - no_automask=dict(argstr="-no_automask",), - num_threads=dict(nohash=True, usedefault=True,), - out_file=dict(argstr=" |& tee %s", extensions=None, position=-1,), outputtype=dict(), - quiet=dict(argstr="-quiet",), - verb=dict(argstr="-verb",), + quiet=dict( + argstr="-quiet", + ), + verb=dict( + argstr="-verb", + ), ) inputs = ABoverlap.input_spec() @@ -27,7 +53,11 @@ def test_ABoverlap_inputs(): def test_ABoverlap_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ABoverlap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AFNICommand.py b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py index 0764b4947b..941667f49f 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNICommand.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py @@ -4,9 +4,17 @@ def test_AFNICommand_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, diff --git a/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py b/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py index 58de0b425a..de23f6c05b 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py @@ -4,7 +4,13 @@ def test_AFNICommandBase_inputs(): input_map = dict( - args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), ) inputs = AFNICommandBase.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py index e437676286..fd4682947b 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py @@ -4,9 +4,17 @@ def test_AFNIPythonCommand_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, diff --git a/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py index 19012ff364..6983e839fb 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py @@ -4,15 +4,35 @@ def test_AFNItoNIFTI_inputs(): input_map = dict( - args=dict(argstr="%s",), - denote=dict(argstr="-denote",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + denote=dict( + argstr="-denote", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + newid=dict( + argstr="-newid", + xor=["oldid"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + oldid=dict( + argstr="-oldid", + xor=["newid"], ), - newid=dict(argstr="-newid", xor=["oldid"],), - num_threads=dict(nohash=True, usedefault=True,), - oldid=dict(argstr="-oldid", xor=["newid"],), out_file=dict( argstr="-prefix %s", extensions=None, @@ -21,7 +41,9 @@ def test_AFNItoNIFTI_inputs(): name_template="%s.nii", ), outputtype=dict(), - pure=dict(argstr="-pure",), + pure=dict( + argstr="-pure", + ), ) inputs = AFNItoNIFTI.input_spec() @@ -31,7 +53,11 @@ def test_AFNItoNIFTI_inputs(): def test_AFNItoNIFTI_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AFNItoNIFTI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py index e2b8f5a3c9..a3b376f55b 100644 --- a/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py +++ b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py @@ -4,22 +4,57 @@ def test_AlignEpiAnatPy_inputs(): input_map = dict( - anat=dict(argstr="-anat %s", copyfile=False, extensions=None, mandatory=True,), - anat2epi=dict(argstr="-anat2epi",), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - epi2anat=dict(argstr="-epi2anat",), - epi_base=dict(argstr="-epi_base %s", mandatory=True,), - epi_strip=dict(argstr="-epi_strip %s",), + anat=dict( + argstr="-anat %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + anat2epi=dict( + argstr="-anat2epi", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi2anat=dict( + argstr="-epi2anat", + ), + epi_base=dict( + argstr="-epi_base %s", + mandatory=True, + ), + epi_strip=dict( + argstr="-epi_strip %s", + ), in_file=dict( - argstr="-epi %s", copyfile=False, extensions=None, mandatory=True, + argstr="-epi %s", + copyfile=False, + extensions=None, + mandatory=True, ), outputtype=dict(), - py27_path=dict(usedefault=True,), - save_skullstrip=dict(argstr="-save_skullstrip",), - suffix=dict(argstr="-suffix %s", usedefault=True,), - tshift=dict(argstr="-tshift %s", usedefault=True,), - volreg=dict(argstr="-volreg %s", usedefault=True,), + py27_path=dict( + usedefault=True, + ), + save_skullstrip=dict( + argstr="-save_skullstrip", + ), + suffix=dict( + argstr="-suffix %s", + usedefault=True, + ), + tshift=dict( + argstr="-tshift %s", + usedefault=True, + ), + volreg=dict( + argstr="-volreg %s", + usedefault=True, + ), ) inputs = AlignEpiAnatPy.input_spec() @@ -30,16 +65,36 @@ def test_AlignEpiAnatPy_inputs(): def test_AlignEpiAnatPy_outputs(): output_map = dict( - anat_al_mat=dict(extensions=None,), - anat_al_orig=dict(extensions=None,), - epi_al_mat=dict(extensions=None,), - epi_al_orig=dict(extensions=None,), - epi_al_tlrc_mat=dict(extensions=None,), - epi_reg_al_mat=dict(extensions=None,), - epi_tlrc_al=dict(extensions=None,), - epi_vr_al_mat=dict(extensions=None,), - epi_vr_motion=dict(extensions=None,), - skullstrip=dict(extensions=None,), + anat_al_mat=dict( + extensions=None, + ), + anat_al_orig=dict( + extensions=None, + ), + epi_al_mat=dict( + extensions=None, + ), + epi_al_orig=dict( + extensions=None, + ), + epi_al_tlrc_mat=dict( + extensions=None, + ), + epi_reg_al_mat=dict( + extensions=None, + ), + epi_tlrc_al=dict( + extensions=None, + ), + epi_vr_al_mat=dict( + extensions=None, + ), + epi_vr_motion=dict( + extensions=None, + ), + skullstrip=dict( + extensions=None, + ), ) outputs = AlignEpiAnatPy.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Allineate.py b/nipype/interfaces/afni/tests/test_auto_Allineate.py index 356e7d52a0..afe6c3f24d 100644 --- a/nipype/interfaces/afni/tests/test_auto_Allineate.py +++ b/nipype/interfaces/afni/tests/test_auto_Allineate.py @@ -10,20 +10,48 @@ def test_Allineate_inputs(): position=-1, xor=["out_file", "out_matrix", "out_param_file", "out_weight_file"], ), - args=dict(argstr="%s",), - autobox=dict(argstr="-autobox",), - automask=dict(argstr="-automask+%d",), - autoweight=dict(argstr="-autoweight%s",), - center_of_mass=dict(argstr="-cmass%s",), - check=dict(argstr="-check %s",), - convergence=dict(argstr="-conv %f",), - cost=dict(argstr="-cost %s",), - environ=dict(nohash=True, usedefault=True,), - epi=dict(argstr="-EPI",), - final_interpolation=dict(argstr="-final %s",), - fine_blur=dict(argstr="-fineblur %f",), + args=dict( + argstr="%s", + ), + autobox=dict( + argstr="-autobox", + ), + automask=dict( + argstr="-automask+%d", + ), + autoweight=dict( + argstr="-autoweight%s", + ), + center_of_mass=dict( + argstr="-cmass%s", + ), + check=dict( + argstr="-check %s", + ), + convergence=dict( + argstr="-conv %f", + ), + cost=dict( + argstr="-cost %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi=dict( + argstr="-EPI", + ), + final_interpolation=dict( + argstr="-final %s", + ), + fine_blur=dict( + argstr="-fineblur %f", + ), in_file=dict( - argstr="-source %s", copyfile=False, extensions=None, mandatory=True, + argstr="-source %s", + copyfile=False, + extensions=None, + mandatory=True, ), in_matrix=dict( argstr="-1Dmatrix_apply %s", @@ -32,23 +60,57 @@ def test_Allineate_inputs(): xor=["out_matrix"], ), in_param_file=dict( - argstr="-1Dparam_apply %s", extensions=None, xor=["out_param_file"], - ), - interpolation=dict(argstr="-interp %s",), - master=dict(argstr="-master %s", extensions=None,), - maxrot=dict(argstr="-maxrot %f",), - maxscl=dict(argstr="-maxscl %f",), - maxshf=dict(argstr="-maxshf %f",), - maxshr=dict(argstr="-maxshr %f",), - newgrid=dict(argstr="-newgrid %f",), - nmatch=dict(argstr="-nmatch %d",), - no_pad=dict(argstr="-nopad",), - nomask=dict(argstr="-nomask",), - num_threads=dict(nohash=True, usedefault=True,), - nwarp=dict(argstr="-nwarp %s",), - nwarp_fixdep=dict(argstr="-nwarp_fixdep%s...",), - nwarp_fixmot=dict(argstr="-nwarp_fixmot%s...",), - one_pass=dict(argstr="-onepass",), + argstr="-1Dparam_apply %s", + extensions=None, + xor=["out_param_file"], + ), + interpolation=dict( + argstr="-interp %s", + ), + master=dict( + argstr="-master %s", + extensions=None, + ), + maxrot=dict( + argstr="-maxrot %f", + ), + maxscl=dict( + argstr="-maxscl %f", + ), + maxshf=dict( + argstr="-maxshf %f", + ), + maxshr=dict( + argstr="-maxshr %f", + ), + newgrid=dict( + argstr="-newgrid %f", + ), + nmatch=dict( + argstr="-nmatch %d", + ), + no_pad=dict( + argstr="-nopad", + ), + nomask=dict( + argstr="-nomask", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + nwarp=dict( + argstr="-nwarp %s", + ), + nwarp_fixdep=dict( + argstr="-nwarp_fixdep%s...", + ), + nwarp_fixmot=dict( + argstr="-nwarp_fixmot%s...", + ), + one_pass=dict( + argstr="-onepass", + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -58,35 +120,80 @@ def test_Allineate_inputs(): xor=["allcostx"], ), out_matrix=dict( - argstr="-1Dmatrix_save %s", extensions=None, xor=["in_matrix", "allcostx"], + argstr="-1Dmatrix_save %s", + extensions=None, + xor=["in_matrix", "allcostx"], ), out_param_file=dict( argstr="-1Dparam_save %s", extensions=None, xor=["in_param_file", "allcostx"], ), - out_weight_file=dict(argstr="-wtprefix %s", extensions=None, xor=["allcostx"],), + out_weight_file=dict( + argstr="-wtprefix %s", + extensions=None, + xor=["allcostx"], + ), outputtype=dict(), - overwrite=dict(argstr="-overwrite",), - quiet=dict(argstr="-quiet",), - reference=dict(argstr="-base %s", extensions=None,), - replacebase=dict(argstr="-replacebase",), - replacemeth=dict(argstr="-replacemeth %s",), - source_automask=dict(argstr="-source_automask+%d",), - source_mask=dict(argstr="-source_mask %s", extensions=None,), - two_best=dict(argstr="-twobest %d",), - two_blur=dict(argstr="-twoblur %f",), - two_first=dict(argstr="-twofirst",), - two_pass=dict(argstr="-twopass",), - usetemp=dict(argstr="-usetemp",), - verbose=dict(argstr="-verb",), - warp_type=dict(argstr="-warp %s",), - warpfreeze=dict(argstr="-warpfreeze",), - weight=dict(argstr="-weight %s",), + overwrite=dict( + argstr="-overwrite", + ), + quiet=dict( + argstr="-quiet", + ), + reference=dict( + argstr="-base %s", + extensions=None, + ), + replacebase=dict( + argstr="-replacebase", + ), + replacemeth=dict( + argstr="-replacemeth %s", + ), + source_automask=dict( + argstr="-source_automask+%d", + ), + source_mask=dict( + argstr="-source_mask %s", + extensions=None, + ), + two_best=dict( + argstr="-twobest %d", + ), + two_blur=dict( + argstr="-twoblur %f", + ), + two_first=dict( + argstr="-twofirst", + ), + two_pass=dict( + argstr="-twopass", + ), + usetemp=dict( + argstr="-usetemp", + ), + verbose=dict( + argstr="-verb", + ), + warp_type=dict( + argstr="-warp %s", + ), + warpfreeze=dict( + argstr="-warpfreeze", + ), + weight=dict( + argstr="-weight %s", + ), weight_file=dict( - argstr="-weight %s", deprecated="1.0.0", extensions=None, new_name="weight", + argstr="-weight %s", + deprecated="1.0.0", + extensions=None, + new_name="weight", + ), + zclip=dict( + argstr="-zclip", ), - zclip=dict(argstr="-zclip",), ) inputs = Allineate.input_spec() @@ -97,11 +204,21 @@ def test_Allineate_inputs(): def test_Allineate_outputs(): output_map = dict( - allcostx=dict(extensions=None,), - out_file=dict(extensions=None,), - out_matrix=dict(extensions=None,), - out_param_file=dict(extensions=None,), - out_weight_file=dict(extensions=None,), + allcostx=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + out_matrix=dict( + extensions=None, + ), + out_param_file=dict( + extensions=None, + ), + out_weight_file=dict( + extensions=None, + ), ) outputs = Allineate.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py index a2b13596e6..eebfc73b6b 100644 --- a/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py +++ b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py @@ -4,13 +4,26 @@ def test_AutoTLRC_inputs(): input_map = dict( - args=dict(argstr="%s",), - base=dict(argstr="-base %s", mandatory=True,), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + base=dict( + argstr="-base %s", + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="-input %s", copyfile=False, extensions=None, mandatory=True, + argstr="-input %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + no_ss=dict( + argstr="-no_ss", ), - no_ss=dict(argstr="-no_ss",), outputtype=dict(), ) inputs = AutoTLRC.input_spec() @@ -21,7 +34,11 @@ def test_AutoTLRC_inputs(): def test_AutoTLRC_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AutoTLRC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py index 344ec503ce..14c59cba0c 100644 --- a/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py +++ b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py @@ -4,18 +4,40 @@ def test_AutoTcorrelate_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - eta2=dict(argstr="-eta2",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + eta2=dict( + argstr="-eta2", + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + mask_only_targets=dict( + argstr="-mask_only_targets", + xor=["mask_source"], ), - mask=dict(argstr="-mask %s", extensions=None,), - mask_only_targets=dict(argstr="-mask_only_targets", xor=["mask_source"],), mask_source=dict( - argstr="-mask_source %s", extensions=None, xor=["mask_only_targets"], + argstr="-mask_source %s", + extensions=None, + xor=["mask_only_targets"], + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True,), out_file=dict( argstr="-prefix %s", extensions=None, @@ -23,7 +45,9 @@ def test_AutoTcorrelate_inputs(): name_template="%s_similarity_matrix.1D", ), outputtype=dict(), - polort=dict(argstr="-polort %d",), + polort=dict( + argstr="-polort %d", + ), ) inputs = AutoTcorrelate.input_spec() @@ -33,7 +57,11 @@ def test_AutoTcorrelate_inputs(): def test_AutoTcorrelate_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AutoTcorrelate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Autobox.py b/nipype/interfaces/afni/tests/test_auto_Autobox.py index 91eca4d811..8a13b14742 100644 --- a/nipype/interfaces/afni/tests/test_auto_Autobox.py +++ b/nipype/interfaces/afni/tests/test_auto_Autobox.py @@ -4,13 +4,26 @@ def test_Autobox_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="-input %s", copyfile=False, extensions=None, mandatory=True, + argstr="-input %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + no_clustering=dict( + argstr="-noclust", + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - no_clustering=dict(argstr="-noclust",), - num_threads=dict(nohash=True, usedefault=True,), out_file=dict( argstr="-prefix %s", extensions=None, @@ -18,7 +31,9 @@ def test_Autobox_inputs(): name_template="%s_autobox", ), outputtype=dict(), - padding=dict(argstr="-npad %d",), + padding=dict( + argstr="-npad %d", + ), ) inputs = Autobox.input_spec() @@ -29,7 +44,9 @@ def test_Autobox_inputs(): def test_Autobox_outputs(): output_map = dict( - out_file=dict(extensions=None,), + out_file=dict( + extensions=None, + ), x_max=dict(), x_min=dict(), y_max=dict(), diff --git a/nipype/interfaces/afni/tests/test_auto_Automask.py b/nipype/interfaces/afni/tests/test_auto_Automask.py index 27f8040695..1c2a3c4ee9 100644 --- a/nipype/interfaces/afni/tests/test_auto_Automask.py +++ b/nipype/interfaces/afni/tests/test_auto_Automask.py @@ -4,21 +4,39 @@ def test_Automask_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), brain_file=dict( argstr="-apply_prefix %s", extensions=None, name_source="in_file", name_template="%s_masked", ), - clfrac=dict(argstr="-clfrac %s",), - dilate=dict(argstr="-dilate %s",), - environ=dict(nohash=True, usedefault=True,), - erode=dict(argstr="-erode %s",), + clfrac=dict( + argstr="-clfrac %s", + ), + dilate=dict( + argstr="-dilate %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + erode=dict( + argstr="-erode %s", + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True,), out_file=dict( argstr="-prefix %s", extensions=None, @@ -36,7 +54,12 @@ def test_Automask_inputs(): def test_Automask_outputs(): output_map = dict( - brain_file=dict(extensions=None,), out_file=dict(extensions=None,), + brain_file=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = Automask.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Axialize.py b/nipype/interfaces/afni/tests/test_auto_Axialize.py index 2c4fafbb5b..bac640d601 100644 --- a/nipype/interfaces/afni/tests/test_auto_Axialize.py +++ b/nipype/interfaces/afni/tests/test_auto_Axialize.py @@ -4,15 +4,35 @@ def test_Axialize_inputs(): input_map = dict( - args=dict(argstr="%s",), - axial=dict(argstr="-axial", xor=["coronal", "sagittal"],), - coronal=dict(argstr="-coronal", xor=["sagittal", "axial"],), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + axial=dict( + argstr="-axial", + xor=["coronal", "sagittal"], + ), + coronal=dict( + argstr="-coronal", + xor=["sagittal", "axial"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + orientation=dict( + argstr="-orient %s", ), - num_threads=dict(nohash=True, usedefault=True,), - orientation=dict(argstr="-orient %s",), out_file=dict( argstr="-prefix %s", extensions=None, @@ -20,8 +40,13 @@ def test_Axialize_inputs(): name_template="%s_axialize", ), outputtype=dict(), - sagittal=dict(argstr="-sagittal", xor=["coronal", "axial"],), - verb=dict(argstr="-verb",), + sagittal=dict( + argstr="-sagittal", + xor=["coronal", "axial"], + ), + verb=dict( + argstr="-verb", + ), ) inputs = Axialize.input_spec() @@ -31,7 +56,11 @@ def test_Axialize_inputs(): def test_Axialize_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Axialize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Bandpass.py b/nipype/interfaces/afni/tests/test_auto_Bandpass.py index b17a8433ca..8ae9966240 100644 --- a/nipype/interfaces/afni/tests/test_auto_Bandpass.py +++ b/nipype/interfaces/afni/tests/test_auto_Bandpass.py @@ -4,25 +4,70 @@ def test_Bandpass_inputs(): input_map = dict( - args=dict(argstr="%s",), - automask=dict(argstr="-automask",), - blur=dict(argstr="-blur %f",), - despike=dict(argstr="-despike",), - environ=dict(nohash=True, usedefault=True,), - highpass=dict(argstr="%f", mandatory=True, position=-3,), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + ), + blur=dict( + argstr="-blur %f", + ), + despike=dict( + argstr="-despike", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + highpass=dict( + argstr="%f", + mandatory=True, + position=-3, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, - ), - localPV=dict(argstr="-localPV %f",), - lowpass=dict(argstr="%f", mandatory=True, position=-2,), - mask=dict(argstr="-mask %s", extensions=None, position=2,), - nfft=dict(argstr="-nfft %d",), - no_detrend=dict(argstr="-nodetrend",), - normalize=dict(argstr="-norm",), - notrans=dict(argstr="-notrans",), - num_threads=dict(nohash=True, usedefault=True,), - orthogonalize_dset=dict(argstr="-dsort %s", extensions=None,), - orthogonalize_file=dict(argstr="-ort %s",), + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + localPV=dict( + argstr="-localPV %f", + ), + lowpass=dict( + argstr="%f", + mandatory=True, + position=-2, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + position=2, + ), + nfft=dict( + argstr="-nfft %d", + ), + no_detrend=dict( + argstr="-nodetrend", + ), + normalize=dict( + argstr="-norm", + ), + notrans=dict( + argstr="-notrans", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + orthogonalize_dset=dict( + argstr="-dsort %s", + extensions=None, + ), + orthogonalize_file=dict( + argstr="-ort %s", + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -31,7 +76,9 @@ def test_Bandpass_inputs(): position=1, ), outputtype=dict(), - tr=dict(argstr="-dt %f",), + tr=dict( + argstr="-dt %f", + ), ) inputs = Bandpass.input_spec() @@ -41,7 +88,11 @@ def test_Bandpass_inputs(): def test_Bandpass_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Bandpass.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_BlurInMask.py b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py index ec44fed3ee..91114611dc 100644 --- a/nipype/interfaces/afni/tests/test_auto_BlurInMask.py +++ b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py @@ -4,11 +4,23 @@ def test_BlurInMask_inputs(): input_map = dict( - args=dict(argstr="%s",), - automask=dict(argstr="-automask",), - environ=dict(nohash=True, usedefault=True,), - float_out=dict(argstr="-float",), - fwhm=dict(argstr="-FWHM %f", mandatory=True,), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + float_out=dict( + argstr="-float", + ), + fwhm=dict( + argstr="-FWHM %f", + mandatory=True, + ), in_file=dict( argstr="-input %s", copyfile=False, @@ -16,10 +28,22 @@ def test_BlurInMask_inputs(): mandatory=True, position=1, ), - mask=dict(argstr="-mask %s", extensions=None,), - multimask=dict(argstr="-Mmask %s", extensions=None,), - num_threads=dict(nohash=True, usedefault=True,), - options=dict(argstr="%s", position=2,), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + multimask=dict( + argstr="-Mmask %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + options=dict( + argstr="%s", + position=2, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -28,7 +52,9 @@ def test_BlurInMask_inputs(): position=-1, ), outputtype=dict(), - preserve=dict(argstr="-preserve",), + preserve=dict( + argstr="-preserve", + ), ) inputs = BlurInMask.input_spec() @@ -38,7 +64,11 @@ def test_BlurInMask_inputs(): def test_BlurInMask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BlurInMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py index 87788b8b3d..f164ae815e 100644 --- a/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py +++ b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py @@ -4,15 +4,39 @@ def test_BlurToFWHM_inputs(): input_map = dict( - args=dict(argstr="%s",), - automask=dict(argstr="-automask",), - blurmaster=dict(argstr="-blurmaster %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - fwhm=dict(argstr="-FWHM %f",), - fwhmxy=dict(argstr="-FWHMxy %f",), - in_file=dict(argstr="-input %s", extensions=None, mandatory=True,), - mask=dict(argstr="-mask %s", extensions=None,), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + ), + blurmaster=dict( + argstr="-blurmaster %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm=dict( + argstr="-FWHM %f", + ), + fwhmxy=dict( + argstr="-FWHMxy %f", + ), + in_file=dict( + argstr="-input %s", + extensions=None, + mandatory=True, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -29,7 +53,11 @@ def test_BlurToFWHM_inputs(): def test_BlurToFWHM_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BlurToFWHM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_BrickStat.py b/nipype/interfaces/afni/tests/test_auto_BrickStat.py index 0d3bc11c21..a366953a5b 100644 --- a/nipype/interfaces/afni/tests/test_auto_BrickStat.py +++ b/nipype/interfaces/afni/tests/test_auto_BrickStat.py @@ -4,17 +4,46 @@ def test_BrickStat_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), - mask=dict(argstr="-mask %s", extensions=None, position=2,), - max=dict(argstr="-max",), - mean=dict(argstr="-mean",), - min=dict(argstr="-min", position=1,), - percentile=dict(argstr="-percentile %.3f %.3f %.3f",), - slow=dict(argstr="-slow",), - sum=dict(argstr="-sum",), - var=dict(argstr="-var",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + position=2, + ), + max=dict( + argstr="-max", + ), + mean=dict( + argstr="-mean", + ), + min=dict( + argstr="-min", + position=1, + ), + percentile=dict( + argstr="-percentile %.3f %.3f %.3f", + ), + slow=dict( + argstr="-slow", + ), + sum=dict( + argstr="-sum", + ), + var=dict( + argstr="-var", + ), ) inputs = BrickStat.input_spec() @@ -24,7 +53,9 @@ def test_BrickStat_inputs(): def test_BrickStat_outputs(): - output_map = dict(min_val=dict(),) + output_map = dict( + min_val=dict(), + ) outputs = BrickStat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Bucket.py b/nipype/interfaces/afni/tests/test_auto_Bucket.py index de301feaed..34dbd18bc2 100644 --- a/nipype/interfaces/afni/tests/test_auto_Bucket.py +++ b/nipype/interfaces/afni/tests/test_auto_Bucket.py @@ -4,11 +4,27 @@ def test_Bucket_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", mandatory=True, position=-1,), - num_threads=dict(nohash=True, usedefault=True,), - out_file=dict(argstr="-prefix %s", extensions=None, name_template="buck",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="-prefix %s", + extensions=None, + name_template="buck", + ), outputtype=dict(), ) inputs = Bucket.input_spec() @@ -19,7 +35,11 @@ def test_Bucket_inputs(): def test_Bucket_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Bucket.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Calc.py b/nipype/interfaces/afni/tests/test_auto_Calc.py index 5b49623773..dc50380317 100644 --- a/nipype/interfaces/afni/tests/test_auto_Calc.py +++ b/nipype/interfaces/afni/tests/test_auto_Calc.py @@ -4,14 +4,42 @@ def test_Calc_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - expr=dict(argstr='-expr "%s"', mandatory=True, position=3,), - in_file_a=dict(argstr="-a %s", extensions=None, mandatory=True, position=0,), - in_file_b=dict(argstr="-b %s", extensions=None, position=1,), - in_file_c=dict(argstr="-c %s", extensions=None, position=2,), - num_threads=dict(nohash=True, usedefault=True,), - other=dict(argstr="", extensions=None,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expr=dict( + argstr='-expr "%s"', + mandatory=True, + position=3, + ), + in_file_a=dict( + argstr="-a %s", + extensions=None, + mandatory=True, + position=0, + ), + in_file_b=dict( + argstr="-b %s", + extensions=None, + position=1, + ), + in_file_c=dict( + argstr="-c %s", + extensions=None, + position=2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + other=dict( + argstr="", + extensions=None, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -19,10 +47,16 @@ def test_Calc_inputs(): name_template="%s_calc", ), outputtype=dict(), - overwrite=dict(argstr="-overwrite",), + overwrite=dict( + argstr="-overwrite", + ), single_idx=dict(), - start_idx=dict(requires=["stop_idx"],), - stop_idx=dict(requires=["start_idx"],), + start_idx=dict( + requires=["stop_idx"], + ), + stop_idx=dict( + requires=["start_idx"], + ), ) inputs = Calc.input_spec() @@ -32,7 +66,11 @@ def test_Calc_inputs(): def test_Calc_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Calc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Cat.py b/nipype/interfaces/afni/tests/test_auto_Cat.py index 07a1d2979c..e5c76b34b1 100644 --- a/nipype/interfaces/afni/tests/test_auto_Cat.py +++ b/nipype/interfaces/afni/tests/test_auto_Cat.py @@ -4,12 +4,28 @@ def test_Cat_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_files=dict(argstr="%s", mandatory=True, position=-2,), - keepfree=dict(argstr="-nonfixed",), - num_threads=dict(nohash=True, usedefault=True,), - omitconst=dict(argstr="-nonconst",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + keepfree=dict( + argstr="-nonfixed", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + omitconst=dict( + argstr="-nonconst", + ), out_cint=dict( xor=["out_format", "out_nice", "out_double", "out_fint", "out_int"], ), @@ -41,8 +57,12 @@ def test_Cat_inputs(): xor=["out_format", "out_int", "out_double", "out_fint", "out_cint"], ), outputtype=dict(), - sel=dict(argstr="-sel %s",), - stack=dict(argstr="-stack",), + sel=dict( + argstr="-sel %s", + ), + stack=dict( + argstr="-stack", + ), ) inputs = Cat.input_spec() @@ -52,7 +72,11 @@ def test_Cat_inputs(): def test_Cat_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Cat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py index f1716f6084..6b6c2630f6 100644 --- a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py +++ b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py @@ -4,13 +4,34 @@ def test_CatMatvec_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fourxfour=dict(argstr="-4x4", xor=["matrix", "oneline"],), - in_file=dict(argstr="%s", mandatory=True, position=-2,), - matrix=dict(argstr="-MATRIX", xor=["oneline", "fourxfour"],), - num_threads=dict(nohash=True, usedefault=True,), - oneline=dict(argstr="-ONELINE", xor=["matrix", "fourxfour"],), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fourxfour=dict( + argstr="-4x4", + xor=["matrix", "oneline"], + ), + in_file=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + matrix=dict( + argstr="-MATRIX", + xor=["oneline", "fourxfour"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + oneline=dict( + argstr="-ONELINE", + xor=["matrix", "fourxfour"], + ), out_file=dict( argstr=" > %s", extensions=None, @@ -30,7 +51,11 @@ def test_CatMatvec_inputs(): def test_CatMatvec_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = CatMatvec.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_CenterMass.py b/nipype/interfaces/afni/tests/test_auto_CenterMass.py index 07975af563..7ec95938b4 100644 --- a/nipype/interfaces/afni/tests/test_auto_CenterMass.py +++ b/nipype/interfaces/afni/tests/test_auto_CenterMass.py @@ -4,9 +4,15 @@ def test_CenterMass_inputs(): input_map = dict( - all_rois=dict(argstr="-all_rois",), - args=dict(argstr="%s",), - automask=dict(argstr="-automask",), + all_rois=dict( + argstr="-all_rois", + ), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + ), cm_file=dict( argstr="> %s", extensions=None, @@ -16,14 +22,30 @@ def test_CenterMass_inputs(): name_template="%s_cm.out", position=-1, ), - environ=dict(nohash=True, usedefault=True,), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2, + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-2, + ), + local_ijk=dict( + argstr="-local_ijk", + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + roi_vals=dict( + argstr="-roi_vals %s", + ), + set_cm=dict( + argstr="-set %f %f %f", ), - local_ijk=dict(argstr="-local_ijk",), - mask_file=dict(argstr="-mask %s", extensions=None,), - roi_vals=dict(argstr="-roi_vals %s",), - set_cm=dict(argstr="-set %f %f %f",), ) inputs = CenterMass.input_spec() @@ -34,7 +56,13 @@ def test_CenterMass_inputs(): def test_CenterMass_outputs(): output_map = dict( - cm=dict(), cm_file=dict(extensions=None,), out_file=dict(extensions=None,), + cm=dict(), + cm_file=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = CenterMass.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py index 576ea68f89..7a324fe7d4 100644 --- a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py +++ b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py @@ -4,12 +4,34 @@ def test_ClipLevel_inputs(): input_map = dict( - args=dict(argstr="%s",), - doall=dict(argstr="-doall", position=3, xor="grad",), - environ=dict(nohash=True, usedefault=True,), - grad=dict(argstr="-grad %s", extensions=None, position=3, xor="doall",), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), - mfrac=dict(argstr="-mfrac %s", position=2,), + args=dict( + argstr="%s", + ), + doall=dict( + argstr="-doall", + position=3, + xor="grad", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad=dict( + argstr="-grad %s", + extensions=None, + position=3, + xor="doall", + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + mfrac=dict( + argstr="-mfrac %s", + position=2, + ), ) inputs = ClipLevel.input_spec() @@ -19,7 +41,9 @@ def test_ClipLevel_inputs(): def test_ClipLevel_outputs(): - output_map = dict(clip_val=dict(),) + output_map = dict( + clip_val=dict(), + ) outputs = ClipLevel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ConvertDset.py b/nipype/interfaces/afni/tests/test_auto_ConvertDset.py index 249fe47843..226eac97b5 100644 --- a/nipype/interfaces/afni/tests/test_auto_ConvertDset.py +++ b/nipype/interfaces/afni/tests/test_auto_ConvertDset.py @@ -4,14 +4,34 @@ def test_ConvertDset_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-input %s", extensions=None, mandatory=True, position=-2,), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-input %s", + extensions=None, + mandatory=True, + position=-2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( - argstr="-prefix %s", extensions=None, mandatory=True, position=-1, + argstr="-prefix %s", + extensions=None, + mandatory=True, + position=-1, + ), + out_type=dict( + argstr="-o_%s", + mandatory=True, + position=0, ), - out_type=dict(argstr="-o_%s", mandatory=True, position=0,), outputtype=dict(), ) inputs = ConvertDset.input_spec() @@ -22,7 +42,11 @@ def test_ConvertDset_inputs(): def test_ConvertDset_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ConvertDset.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Copy.py b/nipype/interfaces/afni/tests/test_auto_Copy.py index a601202c48..e96592b184 100644 --- a/nipype/interfaces/afni/tests/test_auto_Copy.py +++ b/nipype/interfaces/afni/tests/test_auto_Copy.py @@ -4,12 +4,24 @@ def test_Copy_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-2, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True,), out_file=dict( argstr="%s", extensions=None, @@ -18,7 +30,9 @@ def test_Copy_inputs(): position=-1, ), outputtype=dict(), - verbose=dict(argstr="-verb",), + verbose=dict( + argstr="-verb", + ), ) inputs = Copy.input_spec() @@ -28,7 +42,11 @@ def test_Copy_inputs(): def test_Copy_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Copy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Deconvolve.py b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py index c03fafcf46..c4195807eb 100644 --- a/nipype/interfaces/afni/tests/test_auto_Deconvolve.py +++ b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py @@ -4,55 +4,169 @@ def test_Deconvolve_inputs(): input_map = dict( - STATmask=dict(argstr="-STATmask %s", extensions=None,), - TR_1D=dict(argstr="-TR_1D %f",), - allzero_OK=dict(argstr="-allzero_OK",), - args=dict(argstr="%s",), - automask=dict(argstr="-automask",), - cbucket=dict(argstr="-cbucket %s",), - censor=dict(argstr="-censor %s", extensions=None,), - dmbase=dict(argstr="-dmbase",), - dname=dict(argstr="-D%s=%s",), - environ=dict(nohash=True, usedefault=True,), - force_TR=dict(argstr="-force_TR %f", position=0,), - fout=dict(argstr="-fout",), - global_times=dict(argstr="-global_times", xor=["local_times"],), - glt_label=dict(argstr="-glt_label %d %s...", position=-1, requires=["gltsym"],), - gltsym=dict(argstr="-gltsym 'SYM: %s'...", position=-2,), - goforit=dict(argstr="-GOFORIT %i",), - in_files=dict(argstr="-input %s", copyfile=False, position=1, sep=" ",), - input1D=dict(argstr="-input1D %s", extensions=None,), - legendre=dict(argstr="-legendre",), - local_times=dict(argstr="-local_times", xor=["global_times"],), - mask=dict(argstr="-mask %s", extensions=None,), - noblock=dict(argstr="-noblock",), - nocond=dict(argstr="-nocond",), - nodmbase=dict(argstr="-nodmbase",), - nofdr=dict(argstr="-noFDR",), - nolegendre=dict(argstr="-nolegendre",), - nosvd=dict(argstr="-nosvd",), - num_glt=dict(argstr="-num_glt %d", position=-3,), - num_stimts=dict(argstr="-num_stimts %d", position=-6,), - num_threads=dict(argstr="-jobs %d", nohash=True,), - ortvec=dict(argstr="-ortvec %s %s",), - out_file=dict(argstr="-bucket %s", extensions=None,), + STATmask=dict( + argstr="-STATmask %s", + extensions=None, + ), + TR_1D=dict( + argstr="-TR_1D %f", + ), + allzero_OK=dict( + argstr="-allzero_OK", + ), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + ), + cbucket=dict( + argstr="-cbucket %s", + ), + censor=dict( + argstr="-censor %s", + extensions=None, + ), + dmbase=dict( + argstr="-dmbase", + ), + dname=dict( + argstr="-D%s=%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + force_TR=dict( + argstr="-force_TR %f", + position=0, + ), + fout=dict( + argstr="-fout", + ), + global_times=dict( + argstr="-global_times", + xor=["local_times"], + ), + glt_label=dict( + argstr="-glt_label %d %s...", + position=-1, + requires=["gltsym"], + ), + gltsym=dict( + argstr="-gltsym 'SYM: %s'...", + position=-2, + ), + goforit=dict( + argstr="-GOFORIT %i", + ), + in_files=dict( + argstr="-input %s", + copyfile=False, + position=1, + sep=" ", + ), + input1D=dict( + argstr="-input1D %s", + extensions=None, + ), + legendre=dict( + argstr="-legendre", + ), + local_times=dict( + argstr="-local_times", + xor=["global_times"], + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + noblock=dict( + argstr="-noblock", + ), + nocond=dict( + argstr="-nocond", + ), + nodmbase=dict( + argstr="-nodmbase", + ), + nofdr=dict( + argstr="-noFDR", + ), + nolegendre=dict( + argstr="-nolegendre", + ), + nosvd=dict( + argstr="-nosvd", + ), + num_glt=dict( + argstr="-num_glt %d", + position=-3, + ), + num_stimts=dict( + argstr="-num_stimts %d", + position=-6, + ), + num_threads=dict( + argstr="-jobs %d", + nohash=True, + ), + ortvec=dict( + argstr="-ortvec %s %s", + ), + out_file=dict( + argstr="-bucket %s", + extensions=None, + ), outputtype=dict(), - polort=dict(argstr="-polort %d",), - rmsmin=dict(argstr="-rmsmin %f",), - rout=dict(argstr="-rout",), - sat=dict(argstr="-sat", xor=["trans"],), - singvals=dict(argstr="-singvals",), + polort=dict( + argstr="-polort %d", + ), + rmsmin=dict( + argstr="-rmsmin %f", + ), + rout=dict( + argstr="-rout", + ), + sat=dict( + argstr="-sat", + xor=["trans"], + ), + singvals=dict( + argstr="-singvals", + ), stim_label=dict( - argstr="-stim_label %d %s...", position=-4, requires=["stim_times"], - ), - stim_times=dict(argstr="-stim_times %d %s '%s'...", position=-5,), - stim_times_subtract=dict(argstr="-stim_times_subtract %f",), - svd=dict(argstr="-svd",), - tout=dict(argstr="-tout",), - trans=dict(argstr="-trans", xor=["sat"],), - vout=dict(argstr="-vout",), - x1D=dict(argstr="-x1D %s", extensions=None,), - x1D_stop=dict(argstr="-x1D_stop",), + argstr="-stim_label %d %s...", + position=-4, + requires=["stim_times"], + ), + stim_times=dict( + argstr="-stim_times %d %s '%s'...", + position=-5, + ), + stim_times_subtract=dict( + argstr="-stim_times_subtract %f", + ), + svd=dict( + argstr="-svd", + ), + tout=dict( + argstr="-tout", + ), + trans=dict( + argstr="-trans", + xor=["sat"], + ), + vout=dict( + argstr="-vout", + ), + x1D=dict( + argstr="-x1D %s", + extensions=None, + ), + x1D_stop=dict( + argstr="-x1D_stop", + ), ) inputs = Deconvolve.input_spec() @@ -63,10 +177,18 @@ def test_Deconvolve_inputs(): def test_Deconvolve_outputs(): output_map = dict( - cbucket=dict(extensions=None,), - out_file=dict(extensions=None,), - reml_script=dict(extensions=None,), - x1D=dict(extensions=None,), + cbucket=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + reml_script=dict( + extensions=None, + ), + x1D=dict( + extensions=None, + ), ) outputs = Deconvolve.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py index 0fb10a0713..afbc5a7d4f 100644 --- a/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py +++ b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py @@ -4,16 +4,37 @@ def test_DegreeCentrality_inputs(): input_map = dict( - args=dict(argstr="%s",), - autoclip=dict(argstr="-autoclip",), - automask=dict(argstr="-automask",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + autoclip=dict( + argstr="-autoclip", + ), + automask=dict( + argstr="-automask", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + oned_file=dict( + argstr="-out1D %s", ), - mask=dict(argstr="-mask %s", extensions=None,), - num_threads=dict(nohash=True, usedefault=True,), - oned_file=dict(argstr="-out1D %s",), out_file=dict( argstr="-prefix %s", extensions=None, @@ -21,9 +42,15 @@ def test_DegreeCentrality_inputs(): name_template="%s_afni", ), outputtype=dict(), - polort=dict(argstr="-polort %d",), - sparsity=dict(argstr="-sparsity %f",), - thresh=dict(argstr="-thresh %f",), + polort=dict( + argstr="-polort %d", + ), + sparsity=dict( + argstr="-sparsity %f", + ), + thresh=dict( + argstr="-thresh %f", + ), ) inputs = DegreeCentrality.input_spec() @@ -34,7 +61,12 @@ def test_DegreeCentrality_inputs(): def test_DegreeCentrality_outputs(): output_map = dict( - oned_file=dict(extensions=None,), out_file=dict(extensions=None,), + oned_file=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = DegreeCentrality.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Despike.py b/nipype/interfaces/afni/tests/test_auto_Despike.py index 00a6666894..8835dd7e07 100644 --- a/nipype/interfaces/afni/tests/test_auto_Despike.py +++ b/nipype/interfaces/afni/tests/test_auto_Despike.py @@ -4,12 +4,24 @@ def test_Despike_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True,), out_file=dict( argstr="-prefix %s", extensions=None, @@ -26,7 +38,11 @@ def test_Despike_inputs(): def test_Despike_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Despike.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Detrend.py b/nipype/interfaces/afni/tests/test_auto_Detrend.py index 275c45208b..5edbdd74ee 100644 --- a/nipype/interfaces/afni/tests/test_auto_Detrend.py +++ b/nipype/interfaces/afni/tests/test_auto_Detrend.py @@ -4,12 +4,24 @@ def test_Detrend_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True,), out_file=dict( argstr="-prefix %s", extensions=None, @@ -26,7 +38,11 @@ def test_Detrend_inputs(): def test_Detrend_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Detrend.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Dot.py b/nipype/interfaces/afni/tests/test_auto_Dot.py index d1c7812cdd..9cf8083ab9 100644 --- a/nipype/interfaces/afni/tests/test_auto_Dot.py +++ b/nipype/interfaces/afni/tests/test_auto_Dot.py @@ -4,24 +4,64 @@ def test_Dot_inputs(): input_map = dict( - args=dict(argstr="%s",), - demean=dict(argstr="-demean",), - docoef=dict(argstr="-docoef",), - docor=dict(argstr="-docor",), - dodice=dict(argstr="-dodice",), - dodot=dict(argstr="-dodot",), - doeta2=dict(argstr="-doeta2",), - dosums=dict(argstr="-dosums",), - environ=dict(nohash=True, usedefault=True,), - full=dict(argstr="-full",), - in_files=dict(argstr="%s ...", position=-2,), - mask=dict(argstr="-mask %s", extensions=None,), - mrange=dict(argstr="-mrange %s %s",), - num_threads=dict(nohash=True, usedefault=True,), - out_file=dict(argstr=" |& tee %s", extensions=None, position=-1,), + args=dict( + argstr="%s", + ), + demean=dict( + argstr="-demean", + ), + docoef=dict( + argstr="-docoef", + ), + docor=dict( + argstr="-docor", + ), + dodice=dict( + argstr="-dodice", + ), + dodot=dict( + argstr="-dodot", + ), + doeta2=dict( + argstr="-doeta2", + ), + dosums=dict( + argstr="-dosums", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + full=dict( + argstr="-full", + ), + in_files=dict( + argstr="%s ...", + position=-2, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + mrange=dict( + argstr="-mrange %s %s", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr=" |& tee %s", + extensions=None, + position=-1, + ), outputtype=dict(), - show_labels=dict(argstr="-show_labels",), - upper=dict(argstr="-upper",), + show_labels=dict( + argstr="-show_labels", + ), + upper=dict( + argstr="-upper", + ), ) inputs = Dot.input_spec() @@ -31,7 +71,11 @@ def test_Dot_inputs(): def test_Dot_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Dot.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ECM.py b/nipype/interfaces/afni/tests/test_auto_ECM.py index 83d5508bae..030aaffe6a 100644 --- a/nipype/interfaces/afni/tests/test_auto_ECM.py +++ b/nipype/interfaces/afni/tests/test_auto_ECM.py @@ -4,20 +4,49 @@ def test_ECM_inputs(): input_map = dict( - args=dict(argstr="%s",), - autoclip=dict(argstr="-autoclip",), - automask=dict(argstr="-automask",), - environ=dict(nohash=True, usedefault=True,), - eps=dict(argstr="-eps %f",), - fecm=dict(argstr="-fecm",), - full=dict(argstr="-full",), + args=dict( + argstr="%s", + ), + autoclip=dict( + argstr="-autoclip", + ), + automask=dict( + argstr="-automask", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + eps=dict( + argstr="-eps %f", + ), + fecm=dict( + argstr="-fecm", + ), + full=dict( + argstr="-full", + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + max_iter=dict( + argstr="-max_iter %d", + ), + memory=dict( + argstr="-memory %f", + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - mask=dict(argstr="-mask %s", extensions=None,), - max_iter=dict(argstr="-max_iter %d",), - memory=dict(argstr="-memory %f",), - num_threads=dict(nohash=True, usedefault=True,), out_file=dict( argstr="-prefix %s", extensions=None, @@ -25,11 +54,21 @@ def test_ECM_inputs(): name_template="%s_afni", ), outputtype=dict(), - polort=dict(argstr="-polort %d",), - scale=dict(argstr="-scale %f",), - shift=dict(argstr="-shift %f",), - sparsity=dict(argstr="-sparsity %f",), - thresh=dict(argstr="-thresh %f",), + polort=dict( + argstr="-polort %d", + ), + scale=dict( + argstr="-scale %f", + ), + shift=dict( + argstr="-shift %f", + ), + sparsity=dict( + argstr="-sparsity %f", + ), + thresh=dict( + argstr="-thresh %f", + ), ) inputs = ECM.input_spec() @@ -39,7 +78,11 @@ def test_ECM_inputs(): def test_ECM_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ECM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Edge3.py b/nipype/interfaces/afni/tests/test_auto_Edge3.py index 484920d596..45b49fd243 100644 --- a/nipype/interfaces/afni/tests/test_auto_Edge3.py +++ b/nipype/interfaces/afni/tests/test_auto_Edge3.py @@ -4,11 +4,24 @@ def test_Edge3_inputs(): input_map = dict( - args=dict(argstr="%s",), - datum=dict(argstr="-datum %s",), - environ=dict(nohash=True, usedefault=True,), - fscale=dict(argstr="-fscale", xor=["gscale", "nscale", "scale_floats"],), - gscale=dict(argstr="-gscale", xor=["fscale", "nscale", "scale_floats"],), + args=dict( + argstr="%s", + ), + datum=dict( + argstr="-datum %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fscale=dict( + argstr="-fscale", + xor=["gscale", "nscale", "scale_floats"], + ), + gscale=dict( + argstr="-gscale", + xor=["fscale", "nscale", "scale_floats"], + ), in_file=dict( argstr="-input %s", copyfile=False, @@ -16,14 +29,27 @@ def test_Edge3_inputs(): mandatory=True, position=0, ), - nscale=dict(argstr="-nscale", xor=["fscale", "gscale", "scale_floats"],), - num_threads=dict(nohash=True, usedefault=True,), - out_file=dict(argstr="-prefix %s", extensions=None, position=-1,), + nscale=dict( + argstr="-nscale", + xor=["fscale", "gscale", "scale_floats"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="-prefix %s", + extensions=None, + position=-1, + ), outputtype=dict(), scale_floats=dict( - argstr="-scale_floats %f", xor=["fscale", "gscale", "nscale"], + argstr="-scale_floats %f", + xor=["fscale", "gscale", "nscale"], + ), + verbose=dict( + argstr="-verbose", ), - verbose=dict(argstr="-verbose",), ) inputs = Edge3.input_spec() @@ -33,7 +59,11 @@ def test_Edge3_inputs(): def test_Edge3_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Edge3.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Eval.py b/nipype/interfaces/afni/tests/test_auto_Eval.py index 1bc22fcb6c..748bf05dfd 100644 --- a/nipype/interfaces/afni/tests/test_auto_Eval.py +++ b/nipype/interfaces/afni/tests/test_auto_Eval.py @@ -4,15 +4,45 @@ def test_Eval_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - expr=dict(argstr='-expr "%s"', mandatory=True, position=3,), - in_file_a=dict(argstr="-a %s", extensions=None, mandatory=True, position=0,), - in_file_b=dict(argstr="-b %s", extensions=None, position=1,), - in_file_c=dict(argstr="-c %s", extensions=None, position=2,), - num_threads=dict(nohash=True, usedefault=True,), - other=dict(argstr="", extensions=None,), - out1D=dict(argstr="-1D",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expr=dict( + argstr='-expr "%s"', + mandatory=True, + position=3, + ), + in_file_a=dict( + argstr="-a %s", + extensions=None, + mandatory=True, + position=0, + ), + in_file_b=dict( + argstr="-b %s", + extensions=None, + position=1, + ), + in_file_c=dict( + argstr="-c %s", + extensions=None, + position=2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + other=dict( + argstr="", + extensions=None, + ), + out1D=dict( + argstr="-1D", + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -21,8 +51,12 @@ def test_Eval_inputs(): ), outputtype=dict(), single_idx=dict(), - start_idx=dict(requires=["stop_idx"],), - stop_idx=dict(requires=["start_idx"],), + start_idx=dict( + requires=["stop_idx"], + ), + stop_idx=dict( + requires=["start_idx"], + ), ) inputs = Eval.input_spec() @@ -32,7 +66,11 @@ def test_Eval_inputs(): def test_Eval_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Eval.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_FWHMx.py b/nipype/interfaces/afni/tests/test_auto_FWHMx.py index 1785a675b5..06151c569a 100644 --- a/nipype/interfaces/afni/tests/test_auto_FWHMx.py +++ b/nipype/interfaces/afni/tests/test_auto_FWHMx.py @@ -4,18 +4,53 @@ def test_FWHMx_inputs(): input_map = dict( - acf=dict(argstr="-acf", usedefault=True,), - args=dict(argstr="%s",), - arith=dict(argstr="-arith", xor=["geom"],), - automask=dict(argstr="-automask", usedefault=True,), - combine=dict(argstr="-combine",), - compat=dict(argstr="-compat",), - demed=dict(argstr="-demed", xor=["detrend"],), - detrend=dict(argstr="-detrend", usedefault=True, xor=["demed"],), - environ=dict(nohash=True, usedefault=True,), - geom=dict(argstr="-geom", xor=["arith"],), - in_file=dict(argstr="-input %s", extensions=None, mandatory=True,), - mask=dict(argstr="-mask %s", extensions=None,), + acf=dict( + argstr="-acf", + usedefault=True, + ), + args=dict( + argstr="%s", + ), + arith=dict( + argstr="-arith", + xor=["geom"], + ), + automask=dict( + argstr="-automask", + usedefault=True, + ), + combine=dict( + argstr="-combine", + ), + compat=dict( + argstr="-compat", + ), + demed=dict( + argstr="-demed", + xor=["detrend"], + ), + detrend=dict( + argstr="-detrend", + usedefault=True, + xor=["demed"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + geom=dict( + argstr="-geom", + xor=["arith"], + ), + in_file=dict( + argstr="-input %s", + extensions=None, + mandatory=True, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), out_detrend=dict( argstr="-detprefix %s", extensions=None, @@ -38,7 +73,9 @@ def test_FWHMx_inputs(): name_source="in_file", name_template="%s_subbricks.out", ), - unif=dict(argstr="-unif",), + unif=dict( + argstr="-unif", + ), ) inputs = FWHMx.input_spec() @@ -51,10 +88,18 @@ def test_FWHMx_outputs(): output_map = dict( acf_param=dict(), fwhm=dict(), - out_acf=dict(extensions=None,), - out_detrend=dict(extensions=None,), - out_file=dict(extensions=None,), - out_subbricks=dict(extensions=None,), + out_acf=dict( + extensions=None, + ), + out_detrend=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + out_subbricks=dict( + extensions=None, + ), ) outputs = FWHMx.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Fim.py b/nipype/interfaces/afni/tests/test_auto_Fim.py index 6a07de12e5..aea43391bc 100644 --- a/nipype/interfaces/afni/tests/test_auto_Fim.py +++ b/nipype/interfaces/afni/tests/test_auto_Fim.py @@ -4,11 +4,22 @@ def test_Fim_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fim_thr=dict(argstr="-fim_thr %f", position=3,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fim_thr=dict( + argstr="-fim_thr %f", + position=3, + ), ideal_file=dict( - argstr="-ideal_file %s", extensions=None, mandatory=True, position=2, + argstr="-ideal_file %s", + extensions=None, + mandatory=True, + position=2, ), in_file=dict( argstr="-input %s", @@ -17,8 +28,14 @@ def test_Fim_inputs(): mandatory=True, position=1, ), - num_threads=dict(nohash=True, usedefault=True,), - out=dict(argstr="-out %s", position=4,), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out=dict( + argstr="-out %s", + position=4, + ), out_file=dict( argstr="-bucket %s", extensions=None, @@ -35,7 +52,11 @@ def test_Fim_inputs(): def test_Fim_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Fim.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Fourier.py b/nipype/interfaces/afni/tests/test_auto_Fourier.py index 0c648fb149..97764a2b9b 100644 --- a/nipype/interfaces/afni/tests/test_auto_Fourier.py +++ b/nipype/interfaces/afni/tests/test_auto_Fourier.py @@ -4,14 +4,32 @@ def test_Fourier_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - highpass=dict(argstr="-highpass %f", mandatory=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + highpass=dict( + argstr="-highpass %f", + mandatory=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + lowpass=dict( + argstr="-lowpass %f", + mandatory=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - lowpass=dict(argstr="-lowpass %f", mandatory=True,), - num_threads=dict(nohash=True, usedefault=True,), out_file=dict( argstr="-prefix %s", extensions=None, @@ -19,7 +37,9 @@ def test_Fourier_inputs(): name_template="%s_fourier", ), outputtype=dict(), - retrend=dict(argstr="-retrend",), + retrend=dict( + argstr="-retrend", + ), ) inputs = Fourier.input_spec() @@ -29,7 +49,11 @@ def test_Fourier_inputs(): def test_Fourier_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Fourier.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_GCOR.py b/nipype/interfaces/afni/tests/test_auto_GCOR.py index b63b269d6a..2e5f0f372d 100644 --- a/nipype/interfaces/afni/tests/test_auto_GCOR.py +++ b/nipype/interfaces/afni/tests/test_auto_GCOR.py @@ -4,8 +4,13 @@ def test_GCOR_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( argstr="-input %s", copyfile=False, @@ -13,9 +18,17 @@ def test_GCOR_inputs(): mandatory=True, position=-1, ), - mask=dict(argstr="-mask %s", copyfile=False, extensions=None,), - nfirst=dict(argstr="-nfirst %d",), - no_demean=dict(argstr="-no_demean",), + mask=dict( + argstr="-mask %s", + copyfile=False, + extensions=None, + ), + nfirst=dict( + argstr="-nfirst %d", + ), + no_demean=dict( + argstr="-no_demean", + ), ) inputs = GCOR.input_spec() @@ -25,7 +38,9 @@ def test_GCOR_inputs(): def test_GCOR_outputs(): - output_map = dict(out=dict(),) + output_map = dict( + out=dict(), + ) outputs = GCOR.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Hist.py b/nipype/interfaces/afni/tests/test_auto_Hist.py index 30443b3a44..2263f3632b 100644 --- a/nipype/interfaces/afni/tests/test_auto_Hist.py +++ b/nipype/interfaces/afni/tests/test_auto_Hist.py @@ -4,9 +4,16 @@ def test_Hist_inputs(): input_map = dict( - args=dict(argstr="%s",), - bin_width=dict(argstr="-binwidth %f",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + bin_width=dict( + argstr="-binwidth %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( argstr="-input %s", copyfile=False, @@ -14,10 +21,19 @@ def test_Hist_inputs(): mandatory=True, position=1, ), - mask=dict(argstr="-mask %s", extensions=None,), - max_value=dict(argstr="-max %f",), - min_value=dict(argstr="-min %f",), - nbin=dict(argstr="-nbin %d",), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + max_value=dict( + argstr="-max %f", + ), + min_value=dict( + argstr="-min %f", + ), + nbin=dict( + argstr="-nbin %d", + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -33,7 +49,10 @@ def test_Hist_inputs(): name_template="%s_hist.out", position=-1, ), - showhist=dict(argstr="-showhist", usedefault=True,), + showhist=dict( + argstr="-showhist", + usedefault=True, + ), ) inputs = Hist.input_spec() @@ -43,7 +62,14 @@ def test_Hist_inputs(): def test_Hist_outputs(): - output_map = dict(out_file=dict(extensions=None,), out_show=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + out_show=dict( + extensions=None, + ), + ) outputs = Hist.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_LFCD.py b/nipype/interfaces/afni/tests/test_auto_LFCD.py index aebe0dcded..bd4b76baee 100644 --- a/nipype/interfaces/afni/tests/test_auto_LFCD.py +++ b/nipype/interfaces/afni/tests/test_auto_LFCD.py @@ -4,15 +4,34 @@ def test_LFCD_inputs(): input_map = dict( - args=dict(argstr="%s",), - autoclip=dict(argstr="-autoclip",), - automask=dict(argstr="-automask",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + autoclip=dict( + argstr="-autoclip", + ), + automask=dict( + argstr="-automask", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - mask=dict(argstr="-mask %s", extensions=None,), - num_threads=dict(nohash=True, usedefault=True,), out_file=dict( argstr="-prefix %s", extensions=None, @@ -20,8 +39,12 @@ def test_LFCD_inputs(): name_template="%s_afni", ), outputtype=dict(), - polort=dict(argstr="-polort %d",), - thresh=dict(argstr="-thresh %f",), + polort=dict( + argstr="-polort %d", + ), + thresh=dict( + argstr="-thresh %f", + ), ) inputs = LFCD.input_spec() @@ -31,7 +54,11 @@ def test_LFCD_inputs(): def test_LFCD_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = LFCD.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py index ea718b9d1c..2ffe29dda0 100644 --- a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py +++ b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py @@ -4,14 +4,41 @@ def test_LocalBistat_inputs(): input_map = dict( - args=dict(argstr="%s",), - automask=dict(argstr="-automask", xor=["weight_file"],), - environ=dict(nohash=True, usedefault=True,), - in_file1=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - in_file2=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), - mask_file=dict(argstr="-mask %s", extensions=None,), - neighborhood=dict(argstr="-nbhd '%s(%s)'", mandatory=True,), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + xor=["weight_file"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file1=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + in_file2=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + neighborhood=dict( + argstr="-nbhd '%s(%s)'", + mandatory=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -21,8 +48,15 @@ def test_LocalBistat_inputs(): position=0, ), outputtype=dict(), - stat=dict(argstr="-stat %s...", mandatory=True,), - weight_file=dict(argstr="-weight %s", extensions=None, xor=["automask"],), + stat=dict( + argstr="-stat %s...", + mandatory=True, + ), + weight_file=dict( + argstr="-weight %s", + extensions=None, + xor=["automask"], + ), ) inputs = LocalBistat.input_spec() @@ -32,7 +66,11 @@ def test_LocalBistat_inputs(): def test_LocalBistat_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = LocalBistat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Localstat.py b/nipype/interfaces/afni/tests/test_auto_Localstat.py index 4b036e6d84..54c99b434d 100644 --- a/nipype/interfaces/afni/tests/test_auto_Localstat.py +++ b/nipype/interfaces/afni/tests/test_auto_Localstat.py @@ -4,15 +4,41 @@ def test_Localstat_inputs(): input_map = dict( - args=dict(argstr="%s",), - automask=dict(argstr="-automask",), - environ=dict(nohash=True, usedefault=True,), - grid_rmode=dict(argstr="-grid_rmode %s", requires=["reduce_restore_grid"],), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), - mask_file=dict(argstr="-mask %s", extensions=None,), - neighborhood=dict(argstr="-nbhd '%s(%s)'", mandatory=True,), - nonmask=dict(argstr="-use_nonmask",), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grid_rmode=dict( + argstr="-grid_rmode %s", + requires=["reduce_restore_grid"], + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + neighborhood=dict( + argstr="-nbhd '%s(%s)'", + mandatory=True, + ), + nonmask=dict( + argstr="-use_nonmask", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -22,18 +48,28 @@ def test_Localstat_inputs(): position=0, ), outputtype=dict(), - overwrite=dict(argstr="-overwrite",), - quiet=dict(argstr="-quiet",), + overwrite=dict( + argstr="-overwrite", + ), + quiet=dict( + argstr="-quiet", + ), reduce_grid=dict( - argstr="-reduce_grid %s", xor=["reduce_restore_grid", "reduce_max_vox"], + argstr="-reduce_grid %s", + xor=["reduce_restore_grid", "reduce_max_vox"], ), reduce_max_vox=dict( - argstr="-reduce_max_vox %s", xor=["reduce_restore_grid", "reduce_grid"], + argstr="-reduce_max_vox %s", + xor=["reduce_restore_grid", "reduce_grid"], ), reduce_restore_grid=dict( - argstr="-reduce_restore_grid %s", xor=["reduce_max_vox", "reduce_grid"], + argstr="-reduce_restore_grid %s", + xor=["reduce_max_vox", "reduce_grid"], + ), + stat=dict( + argstr="-stat %s...", + mandatory=True, ), - stat=dict(argstr="-stat %s...", mandatory=True,), ) inputs = Localstat.input_spec() @@ -43,7 +79,11 @@ def test_Localstat_inputs(): def test_Localstat_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Localstat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_MaskTool.py b/nipype/interfaces/afni/tests/test_auto_MaskTool.py index dffe07ff32..a0520df606 100644 --- a/nipype/interfaces/afni/tests/test_auto_MaskTool.py +++ b/nipype/interfaces/afni/tests/test_auto_MaskTool.py @@ -4,18 +4,49 @@ def test_MaskTool_inputs(): input_map = dict( - args=dict(argstr="%s",), - count=dict(argstr="-count", position=2,), - datum=dict(argstr="-datum %s",), - dilate_inputs=dict(argstr="-dilate_inputs %s",), - dilate_results=dict(argstr="-dilate_results %s",), - environ=dict(nohash=True, usedefault=True,), - fill_dirs=dict(argstr="-fill_dirs %s", requires=["fill_holes"],), - fill_holes=dict(argstr="-fill_holes",), - frac=dict(argstr="-frac %s",), - in_file=dict(argstr="-input %s", copyfile=False, mandatory=True, position=-1,), - inter=dict(argstr="-inter",), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + count=dict( + argstr="-count", + position=2, + ), + datum=dict( + argstr="-datum %s", + ), + dilate_inputs=dict( + argstr="-dilate_inputs %s", + ), + dilate_results=dict( + argstr="-dilate_results %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill_dirs=dict( + argstr="-fill_dirs %s", + requires=["fill_holes"], + ), + fill_holes=dict( + argstr="-fill_holes", + ), + frac=dict( + argstr="-frac %s", + ), + in_file=dict( + argstr="-input %s", + copyfile=False, + mandatory=True, + position=-1, + ), + inter=dict( + argstr="-inter", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -23,8 +54,12 @@ def test_MaskTool_inputs(): name_template="%s_mask", ), outputtype=dict(), - union=dict(argstr="-union",), - verbose=dict(argstr="-verb %s",), + union=dict( + argstr="-union", + ), + verbose=dict( + argstr="-verb %s", + ), ) inputs = MaskTool.input_spec() @@ -34,7 +69,11 @@ def test_MaskTool_inputs(): def test_MaskTool_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MaskTool.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Maskave.py b/nipype/interfaces/afni/tests/test_auto_Maskave.py index b882cfdba9..ce7a07c95e 100644 --- a/nipype/interfaces/afni/tests/test_auto_Maskave.py +++ b/nipype/interfaces/afni/tests/test_auto_Maskave.py @@ -4,13 +4,29 @@ def test_Maskave_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-2, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + position=1, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - mask=dict(argstr="-mask %s", extensions=None, position=1,), - num_threads=dict(nohash=True, usedefault=True,), out_file=dict( argstr="> %s", extensions=None, @@ -20,7 +36,10 @@ def test_Maskave_inputs(): position=-1, ), outputtype=dict(), - quiet=dict(argstr="-quiet", position=2,), + quiet=dict( + argstr="-quiet", + position=2, + ), ) inputs = Maskave.input_spec() @@ -30,7 +49,11 @@ def test_Maskave_inputs(): def test_Maskave_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Maskave.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Means.py b/nipype/interfaces/afni/tests/test_auto_Means.py index b88957fef3..3aa3ada375 100644 --- a/nipype/interfaces/afni/tests/test_auto_Means.py +++ b/nipype/interfaces/afni/tests/test_auto_Means.py @@ -4,16 +4,43 @@ def test_Means_inputs(): input_map = dict( - args=dict(argstr="%s",), - count=dict(argstr="-count",), - datum=dict(argstr="-datum %s",), - environ=dict(nohash=True, usedefault=True,), - in_file_a=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - in_file_b=dict(argstr="%s", extensions=None, position=-1,), - mask_inter=dict(argstr="-mask_inter",), - mask_union=dict(argstr="-mask_union",), - non_zero=dict(argstr="-non_zero",), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + count=dict( + argstr="-count", + ), + datum=dict( + argstr="-datum %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file_a=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + in_file_b=dict( + argstr="%s", + extensions=None, + position=-1, + ), + mask_inter=dict( + argstr="-mask_inter", + ), + mask_union=dict( + argstr="-mask_union", + ), + non_zero=dict( + argstr="-non_zero", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -21,10 +48,18 @@ def test_Means_inputs(): name_template="%s_mean", ), outputtype=dict(), - scale=dict(argstr="-%sscale",), - sqr=dict(argstr="-sqr",), - std_dev=dict(argstr="-stdev",), - summ=dict(argstr="-sum",), + scale=dict( + argstr="-%sscale", + ), + sqr=dict( + argstr="-sqr", + ), + std_dev=dict( + argstr="-stdev", + ), + summ=dict( + argstr="-sum", + ), ) inputs = Means.input_spec() @@ -34,7 +69,11 @@ def test_Means_inputs(): def test_Means_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Means.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Merge.py b/nipype/interfaces/afni/tests/test_auto_Merge.py index 9ccaf8d4d4..ac974184ea 100644 --- a/nipype/interfaces/afni/tests/test_auto_Merge.py +++ b/nipype/interfaces/afni/tests/test_auto_Merge.py @@ -4,12 +4,30 @@ def test_Merge_inputs(): input_map = dict( - args=dict(argstr="%s",), - blurfwhm=dict(argstr="-1blur_fwhm %d", units="mm",), - doall=dict(argstr="-doall",), - environ=dict(nohash=True, usedefault=True,), - in_files=dict(argstr="%s", copyfile=False, mandatory=True, position=-1,), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + blurfwhm=dict( + argstr="-1blur_fwhm %d", + units="mm", + ), + doall=dict( + argstr="-doall", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s", + copyfile=False, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -26,7 +44,11 @@ def test_Merge_inputs(): def test_Merge_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Notes.py b/nipype/interfaces/afni/tests/test_auto_Notes.py index a51727578d..c83a70f0f2 100644 --- a/nipype/interfaces/afni/tests/test_auto_Notes.py +++ b/nipype/interfaces/afni/tests/test_auto_Notes.py @@ -4,19 +4,46 @@ def test_Notes_inputs(): input_map = dict( - add=dict(argstr='-a "%s"',), - add_history=dict(argstr='-h "%s"', xor=["rep_history"],), - args=dict(argstr="%s",), - delete=dict(argstr="-d %d",), - environ=dict(nohash=True, usedefault=True,), + add=dict( + argstr='-a "%s"', + ), + add_history=dict( + argstr='-h "%s"', + xor=["rep_history"], + ), + args=dict( + argstr="%s", + ), + delete=dict( + argstr="-d %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="%s", + extensions=None, ), - num_threads=dict(nohash=True, usedefault=True,), - out_file=dict(argstr="%s", extensions=None,), outputtype=dict(), - rep_history=dict(argstr='-HH "%s"', xor=["add_history"],), - ses=dict(argstr="-ses",), + rep_history=dict( + argstr='-HH "%s"', + xor=["add_history"], + ), + ses=dict( + argstr="-ses", + ), ) inputs = Notes.input_spec() @@ -26,7 +53,11 @@ def test_Notes_inputs(): def test_Notes_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Notes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py b/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py index 265e3720a2..85fb2d3495 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py @@ -4,10 +4,20 @@ def test_NwarpAdjust_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_files=dict(argstr="-source %s",), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="-source %s", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -17,7 +27,10 @@ def test_NwarpAdjust_inputs(): requires=["in_files"], ), outputtype=dict(), - warps=dict(argstr="-nwarp %s", mandatory=True,), + warps=dict( + argstr="-nwarp %s", + mandatory=True, + ), ) inputs = NwarpAdjust.input_spec() @@ -27,7 +40,11 @@ def test_NwarpAdjust_inputs(): def test_NwarpAdjust_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = NwarpAdjust.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py index 727b210c4e..c9ebd2853e 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py @@ -4,23 +4,52 @@ def test_NwarpApply_inputs(): input_map = dict( - ainterp=dict(argstr="-ainterp %s",), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-source %s", mandatory=True,), - interp=dict(argstr="-interp %s", usedefault=True,), - inv_warp=dict(argstr="-iwarp",), - master=dict(argstr="-master %s", extensions=None,), + ainterp=dict( + argstr="-ainterp %s", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-source %s", + mandatory=True, + ), + interp=dict( + argstr="-interp %s", + usedefault=True, + ), + inv_warp=dict( + argstr="-iwarp", + ), + master=dict( + argstr="-master %s", + extensions=None, + ), out_file=dict( argstr="-prefix %s", extensions=None, name_source="in_file", name_template="%s_Nwarp", ), - quiet=dict(argstr="-quiet", xor=["verb"],), - short=dict(argstr="-short",), - verb=dict(argstr="-verb", xor=["quiet"],), - warp=dict(argstr="-nwarp %s", mandatory=True,), + quiet=dict( + argstr="-quiet", + xor=["verb"], + ), + short=dict( + argstr="-short", + ), + verb=dict( + argstr="-verb", + xor=["quiet"], + ), + warp=dict( + argstr="-nwarp %s", + mandatory=True, + ), ) inputs = NwarpApply.input_spec() @@ -30,7 +59,11 @@ def test_NwarpApply_inputs(): def test_NwarpApply_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = NwarpApply.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpCat.py b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py index 82b1e6a125..b89aade9b0 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpCat.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py @@ -4,13 +4,32 @@ def test_NwarpCat_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - expad=dict(argstr="-expad %d",), - in_files=dict(argstr="%s", mandatory=True, position=-1,), - interp=dict(argstr="-interp %s", usedefault=True,), - inv_warp=dict(argstr="-iwarp",), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expad=dict( + argstr="-expad %d", + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=-1, + ), + interp=dict( + argstr="-interp %s", + usedefault=True, + ), + inv_warp=dict( + argstr="-iwarp", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -18,8 +37,12 @@ def test_NwarpCat_inputs(): name_template="%s_NwarpCat", ), outputtype=dict(), - space=dict(argstr="-space %s",), - verb=dict(argstr="-verb",), + space=dict( + argstr="-space %s", + ), + verb=dict( + argstr="-verb", + ), ) inputs = NwarpCat.input_spec() @@ -29,7 +52,11 @@ def test_NwarpCat_inputs(): def test_NwarpCat_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = NwarpCat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py index e9ae2472be..bb47517e27 100644 --- a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py +++ b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py @@ -4,29 +4,60 @@ def test_OneDToolPy_inputs(): input_map = dict( - args=dict(argstr="%s",), - censor_motion=dict(argstr="-censor_motion %f %s",), - censor_prev_TR=dict(argstr="-censor_prev_TR",), - demean=dict(argstr="-demean",), - derivative=dict(argstr="-derivative",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-infile %s", extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + censor_motion=dict( + argstr="-censor_motion %f %s", + ), + censor_prev_TR=dict( + argstr="-censor_prev_TR", + ), + demean=dict( + argstr="-demean", + ), + derivative=dict( + argstr="-derivative", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-infile %s", + extensions=None, + mandatory=True, + ), out_file=dict( - argstr="-write %s", extensions=None, xor=["show_cormat_warnings"], + argstr="-write %s", + extensions=None, + xor=["show_cormat_warnings"], ), outputtype=dict(), - py27_path=dict(usedefault=True,), - set_nruns=dict(argstr="-set_nruns %d",), - show_censor_count=dict(argstr="-show_censor_count",), + py27_path=dict( + usedefault=True, + ), + set_nruns=dict( + argstr="-set_nruns %d", + ), + show_censor_count=dict( + argstr="-show_censor_count", + ), show_cormat_warnings=dict( argstr="-show_cormat_warnings |& tee %s", extensions=None, position=-1, xor=["out_file"], ), - show_indices_interest=dict(argstr="-show_indices_interest",), - show_trs_run=dict(argstr="-show_trs_run %d",), - show_trs_uncensored=dict(argstr="-show_trs_uncensored %s",), + show_indices_interest=dict( + argstr="-show_indices_interest", + ), + show_trs_run=dict( + argstr="-show_trs_run %d", + ), + show_trs_uncensored=dict( + argstr="-show_trs_uncensored %s", + ), ) inputs = OneDToolPy.input_spec() @@ -36,7 +67,11 @@ def test_OneDToolPy_inputs(): def test_OneDToolPy_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = OneDToolPy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_OutlierCount.py b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py index 3b90e076d7..511c1ca8f2 100644 --- a/nipype/interfaces/afni/tests/test_auto_OutlierCount.py +++ b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py @@ -4,15 +4,46 @@ def test_OutlierCount_inputs(): input_map = dict( - args=dict(argstr="%s",), - autoclip=dict(argstr="-autoclip", usedefault=True, xor=["mask"],), - automask=dict(argstr="-automask", usedefault=True, xor=["mask"],), - environ=dict(nohash=True, usedefault=True,), - fraction=dict(argstr="-fraction", usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - interval=dict(argstr="-range", usedefault=True,), - legendre=dict(argstr="-legendre", usedefault=True,), - mask=dict(argstr="-mask %s", extensions=None, xor=["autoclip", "automask"],), + args=dict( + argstr="%s", + ), + autoclip=dict( + argstr="-autoclip", + usedefault=True, + xor=["mask"], + ), + automask=dict( + argstr="-automask", + usedefault=True, + xor=["mask"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fraction=dict( + argstr="-fraction", + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + interval=dict( + argstr="-range", + usedefault=True, + ), + legendre=dict( + argstr="-legendre", + usedefault=True, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + xor=["autoclip", "automask"], + ), out_file=dict( extensions=None, keep_extension=False, @@ -27,9 +58,16 @@ def test_OutlierCount_inputs(): name_template="%s_outliers", output_name="out_outliers", ), - polort=dict(argstr="-polort %d",), - qthr=dict(argstr="-qthr %.5f", usedefault=True,), - save_outliers=dict(usedefault=True,), + polort=dict( + argstr="-polort %d", + ), + qthr=dict( + argstr="-qthr %.5f", + usedefault=True, + ), + save_outliers=dict( + usedefault=True, + ), ) inputs = OutlierCount.input_spec() @@ -40,7 +78,12 @@ def test_OutlierCount_inputs(): def test_OutlierCount_outputs(): output_map = dict( - out_file=dict(extensions=None,), out_outliers=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_outliers=dict( + extensions=None, + ), ) outputs = OutlierCount.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_QualityIndex.py b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py index 359743b19f..c759be87a4 100644 --- a/nipype/interfaces/afni/tests/test_auto_QualityIndex.py +++ b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py @@ -4,14 +4,41 @@ def test_QualityIndex_inputs(): input_map = dict( - args=dict(argstr="%s",), - autoclip=dict(argstr="-autoclip", usedefault=True, xor=["mask"],), - automask=dict(argstr="-automask", usedefault=True, xor=["mask"],), - clip=dict(argstr="-clip %f",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - interval=dict(argstr="-range", usedefault=True,), - mask=dict(argstr="-mask %s", extensions=None, xor=["autoclip", "automask"],), + args=dict( + argstr="%s", + ), + autoclip=dict( + argstr="-autoclip", + usedefault=True, + xor=["mask"], + ), + automask=dict( + argstr="-automask", + usedefault=True, + xor=["mask"], + ), + clip=dict( + argstr="-clip %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + interval=dict( + argstr="-range", + usedefault=True, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + xor=["autoclip", "automask"], + ), out_file=dict( argstr="> %s", extensions=None, @@ -20,8 +47,14 @@ def test_QualityIndex_inputs(): name_template="%s_tqual", position=-1, ), - quadrant=dict(argstr="-quadrant", usedefault=True,), - spearman=dict(argstr="-spearman", usedefault=True,), + quadrant=dict( + argstr="-quadrant", + usedefault=True, + ), + spearman=dict( + argstr="-spearman", + usedefault=True, + ), ) inputs = QualityIndex.input_spec() @@ -31,7 +64,11 @@ def test_QualityIndex_inputs(): def test_QualityIndex_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = QualityIndex.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Qwarp.py b/nipype/interfaces/afni/tests/test_auto_Qwarp.py index e8659ec4c3..181f7217dd 100644 --- a/nipype/interfaces/afni/tests/test_auto_Qwarp.py +++ b/nipype/interfaces/afni/tests/test_auto_Qwarp.py @@ -4,74 +4,199 @@ def test_Qwarp_inputs(): input_map = dict( - Qfinal=dict(argstr="-Qfinal",), - Qonly=dict(argstr="-Qonly",), - allineate=dict(argstr="-allineate",), - allineate_opts=dict(argstr="-allineate_opts %s", requires=["allineate"],), - allsave=dict(argstr="-allsave", xor=["nopadWARP", "duplo", "plusminus"],), - args=dict(argstr="%s",), - ballopt=dict(argstr="-ballopt", xor=["workhard", "boxopt"],), + Qfinal=dict( + argstr="-Qfinal", + ), + Qonly=dict( + argstr="-Qonly", + ), + allineate=dict( + argstr="-allineate", + ), + allineate_opts=dict( + argstr="-allineate_opts %s", + requires=["allineate"], + ), + allsave=dict( + argstr="-allsave", + xor=["nopadWARP", "duplo", "plusminus"], + ), + args=dict( + argstr="%s", + ), + ballopt=dict( + argstr="-ballopt", + xor=["workhard", "boxopt"], + ), base_file=dict( - argstr="-base %s", copyfile=False, extensions=None, mandatory=True, + argstr="-base %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + baxopt=dict( + argstr="-boxopt", + xor=["workhard", "ballopt"], + ), + blur=dict( + argstr="-blur %s", ), - baxopt=dict(argstr="-boxopt", xor=["workhard", "ballopt"],), - blur=dict(argstr="-blur %s",), duplo=dict( argstr="-duplo", xor=["gridlist", "maxlev", "inilev", "iniwarp", "plusminus", "allsave"], ), - emask=dict(argstr="-emask %s", copyfile=False, extensions=None,), - environ=dict(nohash=True, usedefault=True,), - expad=dict(argstr="-expad %d", xor=["nopadWARP"],), + emask=dict( + argstr="-emask %s", + copyfile=False, + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expad=dict( + argstr="-expad %d", + xor=["nopadWARP"], + ), gridlist=dict( argstr="-gridlist %s", copyfile=False, extensions=None, xor=["duplo", "plusminus"], ), - hel=dict(argstr="-hel", xor=["nmi", "mi", "lpc", "lpa", "pear"],), + hel=dict( + argstr="-hel", + xor=["nmi", "mi", "lpc", "lpa", "pear"], + ), in_file=dict( - argstr="-source %s", copyfile=False, extensions=None, mandatory=True, - ), - inilev=dict(argstr="-inilev %d", xor=["duplo"],), - iniwarp=dict(argstr="-iniwarp %s", xor=["duplo"],), - iwarp=dict(argstr="-iwarp", xor=["plusminus"],), - lpa=dict(argstr="-lpa", xor=["nmi", "mi", "lpc", "hel", "pear"],), - lpc=dict(argstr="-lpc", position=-2, xor=["nmi", "mi", "hel", "lpa", "pear"],), - maxlev=dict(argstr="-maxlev %d", position=-1, xor=["duplo"],), - mi=dict(argstr="-mi", xor=["mi", "hel", "lpc", "lpa", "pear"],), - minpatch=dict(argstr="-minpatch %d",), - nmi=dict(argstr="-nmi", xor=["nmi", "hel", "lpc", "lpa", "pear"],), - noXdis=dict(argstr="-noXdis",), - noYdis=dict(argstr="-noYdis",), - noZdis=dict(argstr="-noZdis",), - noneg=dict(argstr="-noneg",), - nopad=dict(argstr="-nopad",), - nopadWARP=dict(argstr="-nopadWARP", xor=["allsave", "expad"],), - nopenalty=dict(argstr="-nopenalty",), - nowarp=dict(argstr="-nowarp",), - noweight=dict(argstr="-noweight",), - num_threads=dict(nohash=True, usedefault=True,), + argstr="-source %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + inilev=dict( + argstr="-inilev %d", + xor=["duplo"], + ), + iniwarp=dict( + argstr="-iniwarp %s", + xor=["duplo"], + ), + iwarp=dict( + argstr="-iwarp", + xor=["plusminus"], + ), + lpa=dict( + argstr="-lpa", + xor=["nmi", "mi", "lpc", "hel", "pear"], + ), + lpc=dict( + argstr="-lpc", + position=-2, + xor=["nmi", "mi", "hel", "lpa", "pear"], + ), + maxlev=dict( + argstr="-maxlev %d", + position=-1, + xor=["duplo"], + ), + mi=dict( + argstr="-mi", + xor=["mi", "hel", "lpc", "lpa", "pear"], + ), + minpatch=dict( + argstr="-minpatch %d", + ), + nmi=dict( + argstr="-nmi", + xor=["nmi", "hel", "lpc", "lpa", "pear"], + ), + noXdis=dict( + argstr="-noXdis", + ), + noYdis=dict( + argstr="-noYdis", + ), + noZdis=dict( + argstr="-noZdis", + ), + noneg=dict( + argstr="-noneg", + ), + nopad=dict( + argstr="-nopad", + ), + nopadWARP=dict( + argstr="-nopadWARP", + xor=["allsave", "expad"], + ), + nopenalty=dict( + argstr="-nopenalty", + ), + nowarp=dict( + argstr="-nowarp", + ), + noweight=dict( + argstr="-noweight", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, name_source=["in_file"], name_template="ppp_%s", ), - out_weight_file=dict(argstr="-wtprefix %s", extensions=None,), + out_weight_file=dict( + argstr="-wtprefix %s", + extensions=None, + ), outputtype=dict(), - overwrite=dict(argstr="-overwrite",), - pblur=dict(argstr="-pblur %s",), - pear=dict(argstr="-pear",), - penfac=dict(argstr="-penfac %f",), - plusminus=dict(argstr="-plusminus", xor=["duplo", "allsave", "iwarp"],), - quiet=dict(argstr="-quiet", xor=["verb"],), - resample=dict(argstr="-resample",), - verb=dict(argstr="-verb", xor=["quiet"],), - wball=dict(argstr="-wball %s", xor=["wmask"],), - weight=dict(argstr="-weight %s", extensions=None,), - wmask=dict(argstr="-wpass %s %f", xor=["wball"],), - workhard=dict(argstr="-workhard", xor=["boxopt", "ballopt"],), + overwrite=dict( + argstr="-overwrite", + ), + pblur=dict( + argstr="-pblur %s", + ), + pear=dict( + argstr="-pear", + ), + penfac=dict( + argstr="-penfac %f", + ), + plusminus=dict( + argstr="-plusminus", + xor=["duplo", "allsave", "iwarp"], + ), + quiet=dict( + argstr="-quiet", + xor=["verb"], + ), + resample=dict( + argstr="-resample", + ), + verb=dict( + argstr="-verb", + xor=["quiet"], + ), + wball=dict( + argstr="-wball %s", + xor=["wmask"], + ), + weight=dict( + argstr="-weight %s", + extensions=None, + ), + wmask=dict( + argstr="-wpass %s %f", + xor=["wball"], + ), + workhard=dict( + argstr="-workhard", + xor=["boxopt", "ballopt"], + ), ) inputs = Qwarp.input_spec() @@ -82,11 +207,21 @@ def test_Qwarp_inputs(): def test_Qwarp_outputs(): output_map = dict( - base_warp=dict(extensions=None,), - source_warp=dict(extensions=None,), - warped_base=dict(extensions=None,), - warped_source=dict(extensions=None,), - weights=dict(extensions=None,), + base_warp=dict( + extensions=None, + ), + source_warp=dict( + extensions=None, + ), + warped_base=dict( + extensions=None, + ), + warped_source=dict( + extensions=None, + ), + weights=dict( + extensions=None, + ), ) outputs = Qwarp.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py index c74e192b8f..4f386ab63b 100644 --- a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py +++ b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py @@ -4,71 +4,181 @@ def test_QwarpPlusMinus_inputs(): input_map = dict( - Qfinal=dict(argstr="-Qfinal",), - Qonly=dict(argstr="-Qonly",), - allineate=dict(argstr="-allineate",), - allineate_opts=dict(argstr="-allineate_opts %s", requires=["allineate"],), - allsave=dict(argstr="-allsave", xor=["nopadWARP", "duplo", "plusminus"],), - args=dict(argstr="%s",), - ballopt=dict(argstr="-ballopt", xor=["workhard", "boxopt"],), + Qfinal=dict( + argstr="-Qfinal", + ), + Qonly=dict( + argstr="-Qonly", + ), + allineate=dict( + argstr="-allineate", + ), + allineate_opts=dict( + argstr="-allineate_opts %s", + requires=["allineate"], + ), + allsave=dict( + argstr="-allsave", + xor=["nopadWARP", "duplo", "plusminus"], + ), + args=dict( + argstr="%s", + ), + ballopt=dict( + argstr="-ballopt", + xor=["workhard", "boxopt"], + ), base_file=dict( - argstr="-base %s", copyfile=False, extensions=None, mandatory=True, + argstr="-base %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + baxopt=dict( + argstr="-boxopt", + xor=["workhard", "ballopt"], + ), + blur=dict( + argstr="-blur %s", ), - baxopt=dict(argstr="-boxopt", xor=["workhard", "ballopt"],), - blur=dict(argstr="-blur %s",), duplo=dict( argstr="-duplo", xor=["gridlist", "maxlev", "inilev", "iniwarp", "plusminus", "allsave"], ), - emask=dict(argstr="-emask %s", copyfile=False, extensions=None,), - environ=dict(nohash=True, usedefault=True,), - expad=dict(argstr="-expad %d", xor=["nopadWARP"],), + emask=dict( + argstr="-emask %s", + copyfile=False, + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expad=dict( + argstr="-expad %d", + xor=["nopadWARP"], + ), gridlist=dict( argstr="-gridlist %s", copyfile=False, extensions=None, xor=["duplo", "plusminus"], ), - hel=dict(argstr="-hel", xor=["nmi", "mi", "lpc", "lpa", "pear"],), + hel=dict( + argstr="-hel", + xor=["nmi", "mi", "lpc", "lpa", "pear"], + ), in_file=dict( - argstr="-source %s", copyfile=False, extensions=None, mandatory=True, - ), - inilev=dict(argstr="-inilev %d", xor=["duplo"],), - iniwarp=dict(argstr="-iniwarp %s", xor=["duplo"],), - iwarp=dict(argstr="-iwarp", xor=["plusminus"],), - lpa=dict(argstr="-lpa", xor=["nmi", "mi", "lpc", "hel", "pear"],), - lpc=dict(argstr="-lpc", position=-2, xor=["nmi", "mi", "hel", "lpa", "pear"],), - maxlev=dict(argstr="-maxlev %d", position=-1, xor=["duplo"],), - mi=dict(argstr="-mi", xor=["mi", "hel", "lpc", "lpa", "pear"],), - minpatch=dict(argstr="-minpatch %d",), - nmi=dict(argstr="-nmi", xor=["nmi", "hel", "lpc", "lpa", "pear"],), - noXdis=dict(argstr="-noXdis",), - noYdis=dict(argstr="-noYdis",), - noZdis=dict(argstr="-noZdis",), - noneg=dict(argstr="-noneg",), - nopad=dict(argstr="-nopad",), - nopadWARP=dict(argstr="-nopadWARP", xor=["allsave", "expad"],), - nopenalty=dict(argstr="-nopenalty",), - nowarp=dict(argstr="-nowarp",), - noweight=dict(argstr="-noweight",), - num_threads=dict(nohash=True, usedefault=True,), + argstr="-source %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + inilev=dict( + argstr="-inilev %d", + xor=["duplo"], + ), + iniwarp=dict( + argstr="-iniwarp %s", + xor=["duplo"], + ), + iwarp=dict( + argstr="-iwarp", + xor=["plusminus"], + ), + lpa=dict( + argstr="-lpa", + xor=["nmi", "mi", "lpc", "hel", "pear"], + ), + lpc=dict( + argstr="-lpc", + position=-2, + xor=["nmi", "mi", "hel", "lpa", "pear"], + ), + maxlev=dict( + argstr="-maxlev %d", + position=-1, + xor=["duplo"], + ), + mi=dict( + argstr="-mi", + xor=["mi", "hel", "lpc", "lpa", "pear"], + ), + minpatch=dict( + argstr="-minpatch %d", + ), + nmi=dict( + argstr="-nmi", + xor=["nmi", "hel", "lpc", "lpa", "pear"], + ), + noXdis=dict( + argstr="-noXdis", + ), + noYdis=dict( + argstr="-noYdis", + ), + noZdis=dict( + argstr="-noZdis", + ), + noneg=dict( + argstr="-noneg", + ), + nopad=dict( + argstr="-nopad", + ), + nopadWARP=dict( + argstr="-nopadWARP", + xor=["allsave", "expad"], + ), + nopenalty=dict( + argstr="-nopenalty", + ), + nowarp=dict( + argstr="-nowarp", + ), + noweight=dict( + argstr="-noweight", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( - argstr="-prefix %s", extensions=None, position=0, usedefault=True, + argstr="-prefix %s", + extensions=None, + position=0, + usedefault=True, + ), + out_weight_file=dict( + argstr="-wtprefix %s", + extensions=None, ), - out_weight_file=dict(argstr="-wtprefix %s", extensions=None,), outputtype=dict(), - overwrite=dict(argstr="-overwrite",), - pblur=dict(argstr="-pblur %s",), - pear=dict(argstr="-pear",), - penfac=dict(argstr="-penfac %f",), + overwrite=dict( + argstr="-overwrite", + ), + pblur=dict( + argstr="-pblur %s", + ), + pear=dict( + argstr="-pear", + ), + penfac=dict( + argstr="-penfac %f", + ), plusminus=dict( argstr="-plusminus", position=1, usedefault=True, xor=["duplo", "allsave", "iwarp"], ), - quiet=dict(argstr="-quiet", xor=["verb"],), - resample=dict(argstr="-resample",), + quiet=dict( + argstr="-quiet", + xor=["verb"], + ), + resample=dict( + argstr="-resample", + ), source_file=dict( argstr="-source %s", copyfile=False, @@ -76,11 +186,26 @@ def test_QwarpPlusMinus_inputs(): extensions=None, new_name="in_file", ), - verb=dict(argstr="-verb", xor=["quiet"],), - wball=dict(argstr="-wball %s", xor=["wmask"],), - weight=dict(argstr="-weight %s", extensions=None,), - wmask=dict(argstr="-wpass %s %f", xor=["wball"],), - workhard=dict(argstr="-workhard", xor=["boxopt", "ballopt"],), + verb=dict( + argstr="-verb", + xor=["quiet"], + ), + wball=dict( + argstr="-wball %s", + xor=["wmask"], + ), + weight=dict( + argstr="-weight %s", + extensions=None, + ), + wmask=dict( + argstr="-wpass %s %f", + xor=["wball"], + ), + workhard=dict( + argstr="-workhard", + xor=["boxopt", "ballopt"], + ), ) inputs = QwarpPlusMinus.input_spec() @@ -91,11 +216,21 @@ def test_QwarpPlusMinus_inputs(): def test_QwarpPlusMinus_outputs(): output_map = dict( - base_warp=dict(extensions=None,), - source_warp=dict(extensions=None,), - warped_base=dict(extensions=None,), - warped_source=dict(extensions=None,), - weights=dict(extensions=None,), + base_warp=dict( + extensions=None, + ), + source_warp=dict( + extensions=None, + ), + warped_base=dict( + extensions=None, + ), + warped_source=dict( + extensions=None, + ), + weights=dict( + extensions=None, + ), ) outputs = QwarpPlusMinus.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_ROIStats.py b/nipype/interfaces/afni/tests/test_auto_ROIStats.py index 026e3ff076..36e7546990 100644 --- a/nipype/interfaces/afni/tests/test_auto_ROIStats.py +++ b/nipype/interfaces/afni/tests/test_auto_ROIStats.py @@ -4,12 +4,30 @@ def test_ROIStats_inputs(): input_map = dict( - args=dict(argstr="%s",), - debug=dict(argstr="-debug",), - environ=dict(nohash=True, usedefault=True,), - format1D=dict(argstr="-1Dformat", xor=["format1DR"],), - format1DR=dict(argstr="-1DRformat", xor=["format1D"],), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + format1D=dict( + argstr="-1Dformat", + xor=["format1DR"], + ), + format1DR=dict( + argstr="-1DRformat", + xor=["format1D"], + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), mask=dict( argstr="-mask %s", deprecated="1.1.4", @@ -17,11 +35,22 @@ def test_ROIStats_inputs(): new_name="mask_file", position=3, ), - mask_f2short=dict(argstr="-mask_f2short",), - mask_file=dict(argstr="-mask %s", extensions=None,), - nobriklab=dict(argstr="-nobriklab",), - nomeanout=dict(argstr="-nomeanout",), - num_roi=dict(argstr="-numroi %s",), + mask_f2short=dict( + argstr="-mask_f2short", + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + nobriklab=dict( + argstr="-nobriklab", + ), + nomeanout=dict( + argstr="-nomeanout", + ), + num_roi=dict( + argstr="-numroi %s", + ), out_file=dict( argstr="> %s", extensions=None, @@ -30,10 +59,20 @@ def test_ROIStats_inputs(): name_template="%s_roistat.1D", position=-1, ), - quiet=dict(argstr="-quiet",), - roisel=dict(argstr="-roisel %s", extensions=None,), - stat=dict(argstr="%s...",), - zerofill=dict(argstr="-zerofill %s", requires=["num_roi"],), + quiet=dict( + argstr="-quiet", + ), + roisel=dict( + argstr="-roisel %s", + extensions=None, + ), + stat=dict( + argstr="%s...", + ), + zerofill=dict( + argstr="-zerofill %s", + requires=["num_roi"], + ), ) inputs = ROIStats.input_spec() @@ -43,7 +82,11 @@ def test_ROIStats_inputs(): def test_ROIStats_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ROIStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ReHo.py b/nipype/interfaces/afni/tests/test_auto_ReHo.py index 1200399a1d..f9a1993ce1 100644 --- a/nipype/interfaces/afni/tests/test_auto_ReHo.py +++ b/nipype/interfaces/afni/tests/test_auto_ReHo.py @@ -4,17 +4,38 @@ def test_ReHo_inputs(): input_map = dict( - args=dict(argstr="%s",), - chi_sq=dict(argstr="-chi_sq",), + args=dict( + argstr="%s", + ), + chi_sq=dict( + argstr="-chi_sq", + ), ellipsoid=dict( argstr="-neigh_X %s -neigh_Y %s -neigh_Z %s", xor=["sphere", "neighborhood"], ), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-inset %s", extensions=None, mandatory=True, position=1,), - label_set=dict(argstr="-in_rois %s", extensions=None,), - mask_file=dict(argstr="-mask %s", extensions=None,), - neighborhood=dict(argstr="-nneigh %s", xor=["sphere", "ellipsoid"],), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-inset %s", + extensions=None, + mandatory=True, + position=1, + ), + label_set=dict( + argstr="-in_rois %s", + extensions=None, + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + neighborhood=dict( + argstr="-nneigh %s", + xor=["sphere", "ellipsoid"], + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -23,8 +44,13 @@ def test_ReHo_inputs(): name_template="%s_reho", position=0, ), - overwrite=dict(argstr="-overwrite",), - sphere=dict(argstr="-neigh_RAD %s", xor=["neighborhood", "ellipsoid"],), + overwrite=dict( + argstr="-overwrite", + ), + sphere=dict( + argstr="-neigh_RAD %s", + xor=["neighborhood", "ellipsoid"], + ), ) inputs = ReHo.input_spec() @@ -34,7 +60,14 @@ def test_ReHo_inputs(): def test_ReHo_outputs(): - output_map = dict(out_file=dict(extensions=None,), out_vals=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + out_vals=dict( + extensions=None, + ), + ) outputs = ReHo.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Refit.py b/nipype/interfaces/afni/tests/test_auto_Refit.py index 65952d85fa..205f7e0190 100644 --- a/nipype/interfaces/afni/tests/test_auto_Refit.py +++ b/nipype/interfaces/afni/tests/test_auto_Refit.py @@ -4,27 +4,69 @@ def test_Refit_inputs(): input_map = dict( - args=dict(argstr="%s",), - atrcopy=dict(argstr="-atrcopy %s %s",), - atrfloat=dict(argstr="-atrfloat %s %s",), - atrint=dict(argstr="-atrint %s %s",), - atrstring=dict(argstr="-atrstring %s %s",), - deoblique=dict(argstr="-deoblique",), - duporigin_file=dict(argstr="-duporigin %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + atrcopy=dict( + argstr="-atrcopy %s %s", + ), + atrfloat=dict( + argstr="-atrfloat %s %s", + ), + atrint=dict( + argstr="-atrint %s %s", + ), + atrstring=dict( + argstr="-atrstring %s %s", + ), + deoblique=dict( + argstr="-deoblique", + ), + duporigin_file=dict( + argstr="-duporigin %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-1, - ), - nosaveatr=dict(argstr="-nosaveatr",), - saveatr=dict(argstr="-saveatr",), - space=dict(argstr="-space %s",), - xdel=dict(argstr="-xdel %f",), - xorigin=dict(argstr="-xorigin %s",), - xyzscale=dict(argstr="-xyzscale %f",), - ydel=dict(argstr="-ydel %f",), - yorigin=dict(argstr="-yorigin %s",), - zdel=dict(argstr="-zdel %f",), - zorigin=dict(argstr="-zorigin %s",), + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-1, + ), + nosaveatr=dict( + argstr="-nosaveatr", + ), + saveatr=dict( + argstr="-saveatr", + ), + space=dict( + argstr="-space %s", + ), + xdel=dict( + argstr="-xdel %f", + ), + xorigin=dict( + argstr="-xorigin %s", + ), + xyzscale=dict( + argstr="-xyzscale %f", + ), + ydel=dict( + argstr="-ydel %f", + ), + yorigin=dict( + argstr="-yorigin %s", + ), + zdel=dict( + argstr="-zdel %f", + ), + zorigin=dict( + argstr="-zorigin %s", + ), ) inputs = Refit.input_spec() @@ -34,7 +76,11 @@ def test_Refit_inputs(): def test_Refit_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Refit.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Remlfit.py b/nipype/interfaces/afni/tests/test_auto_Remlfit.py index 3040c1f48e..cfffeeb40e 100644 --- a/nipype/interfaces/afni/tests/test_auto_Remlfit.py +++ b/nipype/interfaces/afni/tests/test_auto_Remlfit.py @@ -4,46 +4,156 @@ def test_Remlfit_inputs(): input_map = dict( - STATmask=dict(argstr="-STATmask %s", extensions=None,), - addbase=dict(argstr="-addbase %s", copyfile=False, sep=" ",), - args=dict(argstr="%s",), - automask=dict(argstr="-automask", usedefault=True,), - dsort=dict(argstr="-dsort %s", copyfile=False, extensions=None,), - dsort_nods=dict(argstr="-dsort_nods", requires=["dsort"],), - environ=dict(nohash=True, usedefault=True,), - errts_file=dict(argstr="-Rerrts %s", extensions=None,), - fitts_file=dict(argstr="-Rfitts %s", extensions=None,), - fout=dict(argstr="-fout",), - glt_file=dict(argstr="-Rglt %s", extensions=None,), - gltsym=dict(argstr='-gltsym "%s" %s...',), - goforit=dict(argstr="-GOFORIT",), - in_files=dict(argstr='-input "%s"', copyfile=False, mandatory=True, sep=" ",), - mask=dict(argstr="-mask %s", extensions=None,), - matim=dict(argstr="-matim %s", extensions=None, xor=["matrix"],), - matrix=dict(argstr="-matrix %s", extensions=None, mandatory=True,), - nobout=dict(argstr="-nobout",), - nodmbase=dict(argstr="-nodmbase", requires=["addbase", "dsort"],), - nofdr=dict(argstr="-noFDR",), - num_threads=dict(nohash=True, usedefault=True,), - obeta=dict(argstr="-Obeta %s", extensions=None,), - obuck=dict(argstr="-Obuck %s", extensions=None,), - oerrts=dict(argstr="-Oerrts %s", extensions=None,), - ofitts=dict(argstr="-Ofitts %s", extensions=None,), - oglt=dict(argstr="-Oglt %s", extensions=None,), - out_file=dict(argstr="-Rbuck %s", extensions=None,), + STATmask=dict( + argstr="-STATmask %s", + extensions=None, + ), + addbase=dict( + argstr="-addbase %s", + copyfile=False, + sep=" ", + ), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + usedefault=True, + ), + dsort=dict( + argstr="-dsort %s", + copyfile=False, + extensions=None, + ), + dsort_nods=dict( + argstr="-dsort_nods", + requires=["dsort"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + errts_file=dict( + argstr="-Rerrts %s", + extensions=None, + ), + fitts_file=dict( + argstr="-Rfitts %s", + extensions=None, + ), + fout=dict( + argstr="-fout", + ), + glt_file=dict( + argstr="-Rglt %s", + extensions=None, + ), + gltsym=dict( + argstr='-gltsym "%s" %s...', + ), + goforit=dict( + argstr="-GOFORIT", + ), + in_files=dict( + argstr='-input "%s"', + copyfile=False, + mandatory=True, + sep=" ", + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + matim=dict( + argstr="-matim %s", + extensions=None, + xor=["matrix"], + ), + matrix=dict( + argstr="-matrix %s", + extensions=None, + mandatory=True, + ), + nobout=dict( + argstr="-nobout", + ), + nodmbase=dict( + argstr="-nodmbase", + requires=["addbase", "dsort"], + ), + nofdr=dict( + argstr="-noFDR", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + obeta=dict( + argstr="-Obeta %s", + extensions=None, + ), + obuck=dict( + argstr="-Obuck %s", + extensions=None, + ), + oerrts=dict( + argstr="-Oerrts %s", + extensions=None, + ), + ofitts=dict( + argstr="-Ofitts %s", + extensions=None, + ), + oglt=dict( + argstr="-Oglt %s", + extensions=None, + ), + out_file=dict( + argstr="-Rbuck %s", + extensions=None, + ), outputtype=dict(), - ovar=dict(argstr="-Ovar %s", extensions=None,), - polort=dict(argstr="-polort %d", xor=["matrix"],), - quiet=dict(argstr="-quiet",), - rbeta_file=dict(argstr="-Rbeta %s", extensions=None,), - rout=dict(argstr="-rout",), - slibase=dict(argstr="-slibase %s",), - slibase_sm=dict(argstr="-slibase_sm %s",), - tout=dict(argstr="-tout",), - usetemp=dict(argstr="-usetemp",), - var_file=dict(argstr="-Rvar %s", extensions=None,), - verb=dict(argstr="-verb",), - wherr_file=dict(argstr="-Rwherr %s", extensions=None,), + ovar=dict( + argstr="-Ovar %s", + extensions=None, + ), + polort=dict( + argstr="-polort %d", + xor=["matrix"], + ), + quiet=dict( + argstr="-quiet", + ), + rbeta_file=dict( + argstr="-Rbeta %s", + extensions=None, + ), + rout=dict( + argstr="-rout", + ), + slibase=dict( + argstr="-slibase %s", + ), + slibase_sm=dict( + argstr="-slibase_sm %s", + ), + tout=dict( + argstr="-tout", + ), + usetemp=dict( + argstr="-usetemp", + ), + var_file=dict( + argstr="-Rvar %s", + extensions=None, + ), + verb=dict( + argstr="-verb", + ), + wherr_file=dict( + argstr="-Rwherr %s", + extensions=None, + ), ) inputs = Remlfit.input_spec() @@ -54,19 +164,45 @@ def test_Remlfit_inputs(): def test_Remlfit_outputs(): output_map = dict( - errts_file=dict(extensions=None,), - fitts_file=dict(extensions=None,), - glt_file=dict(extensions=None,), - obeta=dict(extensions=None,), - obuck=dict(extensions=None,), - oerrts=dict(extensions=None,), - ofitts=dict(extensions=None,), - oglt=dict(extensions=None,), - out_file=dict(extensions=None,), - ovar=dict(extensions=None,), - rbeta_file=dict(extensions=None,), - var_file=dict(extensions=None,), - wherr_file=dict(extensions=None,), + errts_file=dict( + extensions=None, + ), + fitts_file=dict( + extensions=None, + ), + glt_file=dict( + extensions=None, + ), + obeta=dict( + extensions=None, + ), + obuck=dict( + extensions=None, + ), + oerrts=dict( + extensions=None, + ), + ofitts=dict( + extensions=None, + ), + oglt=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + ovar=dict( + extensions=None, + ), + rbeta_file=dict( + extensions=None, + ), + var_file=dict( + extensions=None, + ), + wherr_file=dict( + extensions=None, + ), ) outputs = Remlfit.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Resample.py b/nipype/interfaces/afni/tests/test_auto_Resample.py index 3053112816..792c03aa9f 100644 --- a/nipype/interfaces/afni/tests/test_auto_Resample.py +++ b/nipype/interfaces/afni/tests/test_auto_Resample.py @@ -4,8 +4,13 @@ def test_Resample_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( argstr="-inset %s", copyfile=False, @@ -13,9 +18,17 @@ def test_Resample_inputs(): mandatory=True, position=-1, ), - master=dict(argstr="-master %s", extensions=None,), - num_threads=dict(nohash=True, usedefault=True,), - orientation=dict(argstr="-orient %s",), + master=dict( + argstr="-master %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + orientation=dict( + argstr="-orient %s", + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -23,8 +36,12 @@ def test_Resample_inputs(): name_template="%s_resample", ), outputtype=dict(), - resample_mode=dict(argstr="-rmode %s",), - voxel_size=dict(argstr="-dxyz %f %f %f",), + resample_mode=dict( + argstr="-rmode %s", + ), + voxel_size=dict( + argstr="-dxyz %f %f %f", + ), ) inputs = Resample.input_spec() @@ -34,7 +51,11 @@ def test_Resample_inputs(): def test_Resample_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Retroicor.py b/nipype/interfaces/afni/tests/test_auto_Retroicor.py index 9c95116d09..03039a291f 100644 --- a/nipype/interfaces/afni/tests/test_auto_Retroicor.py +++ b/nipype/interfaces/afni/tests/test_auto_Retroicor.py @@ -4,17 +4,39 @@ def test_Retroicor_inputs(): input_map = dict( - args=dict(argstr="%s",), - card=dict(argstr="-card %s", extensions=None, position=-2,), + args=dict( + argstr="%s", + ), + card=dict( + argstr="-card %s", + extensions=None, + position=-2, + ), cardphase=dict( - argstr="-cardphase %s", extensions=None, hash_files=False, position=-6, + argstr="-cardphase %s", + extensions=None, + hash_files=False, + position=-6, + ), + environ=dict( + nohash=True, + usedefault=True, ), - environ=dict(nohash=True, usedefault=True,), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + order=dict( + argstr="-order %s", + position=-5, ), - num_threads=dict(nohash=True, usedefault=True,), - order=dict(argstr="-order %s", position=-5,), out_file=dict( argstr="-prefix %s", extensions=None, @@ -23,11 +45,21 @@ def test_Retroicor_inputs(): position=1, ), outputtype=dict(), - resp=dict(argstr="-resp %s", extensions=None, position=-3,), + resp=dict( + argstr="-resp %s", + extensions=None, + position=-3, + ), respphase=dict( - argstr="-respphase %s", extensions=None, hash_files=False, position=-7, + argstr="-respphase %s", + extensions=None, + hash_files=False, + position=-7, + ), + threshold=dict( + argstr="-threshold %d", + position=-4, ), - threshold=dict(argstr="-threshold %d", position=-4,), ) inputs = Retroicor.input_spec() @@ -37,7 +69,11 @@ def test_Retroicor_inputs(): def test_Retroicor_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Retroicor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTest.py b/nipype/interfaces/afni/tests/test_auto_SVMTest.py index e4ab1e00bc..665a4a6156 100644 --- a/nipype/interfaces/afni/tests/test_auto_SVMTest.py +++ b/nipype/interfaces/afni/tests/test_auto_SVMTest.py @@ -4,21 +4,51 @@ def test_SVMTest_inputs(): input_map = dict( - args=dict(argstr="%s",), - classout=dict(argstr="-classout",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-testvol %s", extensions=None, mandatory=True,), - model=dict(argstr="-model %s", mandatory=True,), - multiclass=dict(argstr="-multiclass %s",), - nodetrend=dict(argstr="-nodetrend",), - nopredcensord=dict(argstr="-nopredcensord",), - num_threads=dict(nohash=True, usedefault=True,), - options=dict(argstr="%s",), + args=dict( + argstr="%s", + ), + classout=dict( + argstr="-classout", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-testvol %s", + extensions=None, + mandatory=True, + ), + model=dict( + argstr="-model %s", + mandatory=True, + ), + multiclass=dict( + argstr="-multiclass %s", + ), + nodetrend=dict( + argstr="-nodetrend", + ), + nopredcensord=dict( + argstr="-nopredcensord", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + options=dict( + argstr="%s", + ), out_file=dict( - argstr="-predictions %s", extensions=None, name_template="%s_predictions", + argstr="-predictions %s", + extensions=None, + name_template="%s_predictions", ), outputtype=dict(), - testlabels=dict(argstr="-testlabels %s", extensions=None,), + testlabels=dict( + argstr="-testlabels %s", + extensions=None, + ), ) inputs = SVMTest.input_spec() @@ -28,7 +58,11 @@ def test_SVMTest_inputs(): def test_SVMTest_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SVMTest.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTrain.py b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py index 5edf36b7fa..f79bf1b9ac 100644 --- a/nipype/interfaces/afni/tests/test_auto_SVMTrain.py +++ b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py @@ -11,15 +11,35 @@ def test_SVMTrain_inputs(): name_template="%s_alphas", suffix="_alphas", ), - args=dict(argstr="%s",), - censor=dict(argstr="-censor %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + censor=dict( + argstr="-censor %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="-trainvol %s", copyfile=False, extensions=None, mandatory=True, + argstr="-trainvol %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + kernel=dict( + argstr="-kernel %s", + ), + mask=dict( + argstr="-mask %s", + copyfile=False, + extensions=None, + position=-1, + ), + max_iterations=dict( + argstr="-max_iterations %d", ), - kernel=dict(argstr="-kernel %s",), - mask=dict(argstr="-mask %s", copyfile=False, extensions=None, position=-1,), - max_iterations=dict(argstr="-max_iterations %d",), model=dict( argstr="-model %s", extensions=None, @@ -27,9 +47,16 @@ def test_SVMTrain_inputs(): name_template="%s_model", suffix="_model", ), - nomodelmask=dict(argstr="-nomodelmask",), - num_threads=dict(nohash=True, usedefault=True,), - options=dict(argstr="%s",), + nomodelmask=dict( + argstr="-nomodelmask", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + options=dict( + argstr="%s", + ), out_file=dict( argstr="-bucket %s", extensions=None, @@ -38,9 +65,17 @@ def test_SVMTrain_inputs(): suffix="_bucket", ), outputtype=dict(), - trainlabels=dict(argstr="-trainlabels %s", extensions=None,), - ttype=dict(argstr="-type %s", mandatory=True,), - w_out=dict(argstr="-wout",), + trainlabels=dict( + argstr="-trainlabels %s", + extensions=None, + ), + ttype=dict( + argstr="-type %s", + mandatory=True, + ), + w_out=dict( + argstr="-wout", + ), ) inputs = SVMTrain.input_spec() @@ -51,9 +86,15 @@ def test_SVMTrain_inputs(): def test_SVMTrain_outputs(): output_map = dict( - alphas=dict(extensions=None,), - model=dict(extensions=None,), - out_file=dict(extensions=None,), + alphas=dict( + extensions=None, + ), + model=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = SVMTrain.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Seg.py b/nipype/interfaces/afni/tests/test_auto_Seg.py index f243aac8c7..38b256d1ea 100644 --- a/nipype/interfaces/afni/tests/test_auto_Seg.py +++ b/nipype/interfaces/afni/tests/test_auto_Seg.py @@ -4,13 +4,28 @@ def test_Seg_inputs(): input_map = dict( - args=dict(argstr="%s",), - bias_classes=dict(argstr="-bias_classes %s",), - bias_fwhm=dict(argstr="-bias_fwhm %f",), - blur_meth=dict(argstr="-blur_meth %s",), - bmrf=dict(argstr="-bmrf %f",), - classes=dict(argstr="-classes %s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + bias_classes=dict( + argstr="-bias_classes %s", + ), + bias_fwhm=dict( + argstr="-bias_fwhm %f", + ), + blur_meth=dict( + argstr="-blur_meth %s", + ), + bmrf=dict( + argstr="-bmrf %f", + ), + classes=dict( + argstr="-classes %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( argstr="-anat %s", copyfile=True, @@ -18,11 +33,23 @@ def test_Seg_inputs(): mandatory=True, position=-1, ), - main_N=dict(argstr="-main_N %d",), - mask=dict(argstr="-mask %s", mandatory=True, position=-2,), - mixfloor=dict(argstr="-mixfloor %f",), - mixfrac=dict(argstr="-mixfrac %s",), - prefix=dict(argstr="-prefix %s",), + main_N=dict( + argstr="-main_N %d", + ), + mask=dict( + argstr="-mask %s", + mandatory=True, + position=-2, + ), + mixfloor=dict( + argstr="-mixfloor %f", + ), + mixfrac=dict( + argstr="-mixfrac %s", + ), + prefix=dict( + argstr="-prefix %s", + ), ) inputs = Seg.input_spec() @@ -32,7 +59,11 @@ def test_Seg_inputs(): def test_Seg_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Seg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_SkullStrip.py b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py index 4fbf078da5..4f78254e47 100644 --- a/nipype/interfaces/afni/tests/test_auto_SkullStrip.py +++ b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py @@ -4,8 +4,13 @@ def test_SkullStrip_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( argstr="-input %s", copyfile=False, @@ -13,7 +18,10 @@ def test_SkullStrip_inputs(): mandatory=True, position=1, ), - num_threads=dict(nohash=True, usedefault=True,), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -30,7 +38,11 @@ def test_SkullStrip_inputs(): def test_SkullStrip_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SkullStrip.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Synthesize.py b/nipype/interfaces/afni/tests/test_auto_Synthesize.py index df23d9923b..9f787edbfc 100644 --- a/nipype/interfaces/afni/tests/test_auto_Synthesize.py +++ b/nipype/interfaces/afni/tests/test_auto_Synthesize.py @@ -4,21 +4,48 @@ def test_Synthesize_inputs(): input_map = dict( - TR=dict(argstr="-TR %f",), - args=dict(argstr="%s",), + TR=dict( + argstr="-TR %f", + ), + args=dict( + argstr="%s", + ), cbucket=dict( - argstr="-cbucket %s", copyfile=False, extensions=None, mandatory=True, + argstr="-cbucket %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + cenfill=dict( + argstr="-cenfill %s", + ), + dry_run=dict( + argstr="-dry", + ), + environ=dict( + nohash=True, + usedefault=True, ), - cenfill=dict(argstr="-cenfill %s",), - dry_run=dict(argstr="-dry",), - environ=dict(nohash=True, usedefault=True,), matrix=dict( - argstr="-matrix %s", copyfile=False, extensions=None, mandatory=True, + argstr="-matrix %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="-prefix %s", + extensions=None, + name_template="syn", ), - num_threads=dict(nohash=True, usedefault=True,), - out_file=dict(argstr="-prefix %s", extensions=None, name_template="syn",), outputtype=dict(), - select=dict(argstr="-select %s", mandatory=True,), + select=dict( + argstr="-select %s", + mandatory=True, + ), ) inputs = Synthesize.input_spec() @@ -28,7 +55,11 @@ def test_Synthesize_inputs(): def test_Synthesize_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Synthesize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCat.py b/nipype/interfaces/afni/tests/test_auto_TCat.py index b9a4a16054..595e91383e 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCat.py +++ b/nipype/interfaces/afni/tests/test_auto_TCat.py @@ -4,10 +4,23 @@ def test_TCat_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_files=dict(argstr=" %s", copyfile=False, mandatory=True, position=-1,), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr=" %s", + copyfile=False, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -15,8 +28,13 @@ def test_TCat_inputs(): name_template="%s_tcat", ), outputtype=dict(), - rlt=dict(argstr="-rlt%s", position=1,), - verbose=dict(argstr="-verb",), + rlt=dict( + argstr="-rlt%s", + position=1, + ), + verbose=dict( + argstr="-verb", + ), ) inputs = TCat.input_spec() @@ -26,7 +44,11 @@ def test_TCat_inputs(): def test_TCat_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TCat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py index 7644b191a6..728d281d27 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py +++ b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py @@ -4,13 +4,33 @@ def test_TCatSubBrick_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_files=dict(argstr="%s%s ...", copyfile=False, mandatory=True, position=-1,), - num_threads=dict(nohash=True, usedefault=True,), - out_file=dict(argstr="-prefix %s", extensions=None, genfile=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s%s ...", + copyfile=False, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="-prefix %s", + extensions=None, + genfile=True, + ), outputtype=dict(), - rlt=dict(argstr="-rlt%s", position=1,), + rlt=dict( + argstr="-rlt%s", + position=1, + ), ) inputs = TCatSubBrick.input_spec() @@ -20,7 +40,11 @@ def test_TCatSubBrick_inputs(): def test_TCatSubBrick_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TCatSubBrick.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCorr1D.py b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py index 0a306ceca3..665a0dfc3d 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorr1D.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py @@ -4,12 +4,22 @@ def test_TCorr1D_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), ktaub=dict( - argstr=" -ktaub", position=1, xor=["pearson", "spearman", "quadrant"], + argstr=" -ktaub", + position=1, + xor=["pearson", "spearman", "quadrant"], + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True,), out_file=dict( argstr="-prefix %s", extensions=None, @@ -19,18 +29,33 @@ def test_TCorr1D_inputs(): ), outputtype=dict(), pearson=dict( - argstr=" -pearson", position=1, xor=["spearman", "quadrant", "ktaub"], + argstr=" -pearson", + position=1, + xor=["spearman", "quadrant", "ktaub"], ), quadrant=dict( - argstr=" -quadrant", position=1, xor=["pearson", "spearman", "ktaub"], + argstr=" -quadrant", + position=1, + xor=["pearson", "spearman", "ktaub"], ), spearman=dict( - argstr=" -spearman", position=1, xor=["pearson", "quadrant", "ktaub"], + argstr=" -spearman", + position=1, + xor=["pearson", "quadrant", "ktaub"], ), xset=dict( - argstr=" %s", copyfile=False, extensions=None, mandatory=True, position=-2, + argstr=" %s", + copyfile=False, + extensions=None, + mandatory=True, + position=-2, + ), + y_1d=dict( + argstr=" %s", + extensions=None, + mandatory=True, + position=-1, ), - y_1d=dict(argstr=" %s", extensions=None, mandatory=True, position=-1,), ) inputs = TCorr1D.input_spec() @@ -40,7 +65,11 @@ def test_TCorr1D_inputs(): def test_TCorr1D_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TCorr1D.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py index 5a03aa3732..8e6b1860ff 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py @@ -15,8 +15,12 @@ def test_TCorrMap_inputs(): "var_absolute_threshold_normalize", ), ), - args=dict(argstr="%s",), - automask=dict(argstr="-automask",), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + ), average_expr=dict( argstr="-Aexpr %s %s", extensions=None, @@ -31,15 +35,26 @@ def test_TCorrMap_inputs(): suffix="_cexpr", xor=("average_expr", "average_expr_nonzero", "sum_expr"), ), - bandpass=dict(argstr="-bpass %f %f",), - blur_fwhm=dict(argstr="-Gblur %f",), + bandpass=dict( + argstr="-bpass %f %f", + ), + blur_fwhm=dict( + argstr="-Gblur %f", + ), correlation_maps=dict( - argstr="-CorrMap %s", extensions=None, name_source="in_file", + argstr="-CorrMap %s", + extensions=None, + name_source="in_file", ), correlation_maps_masked=dict( - argstr="-CorrMask %s", extensions=None, name_source="in_file", + argstr="-CorrMask %s", + extensions=None, + name_source="in_file", + ), + environ=dict( + nohash=True, + usedefault=True, ), - environ=dict(nohash=True, usedefault=True,), expr=dict(), histogram=dict( argstr="-Hist %d %s", @@ -49,13 +64,25 @@ def test_TCorrMap_inputs(): ), histogram_bin_numbers=dict(), in_file=dict( - argstr="-input %s", copyfile=False, extensions=None, mandatory=True, + argstr="-input %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + mask=dict( + argstr="-mask %s", + extensions=None, ), - mask=dict(argstr="-mask %s", extensions=None,), mean_file=dict( - argstr="-Mean %s", extensions=None, name_source="in_file", suffix="_mean", + argstr="-Mean %s", + extensions=None, + name_source="in_file", + suffix="_mean", + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True,), out_file=dict( argstr="-prefix %s", extensions=None, @@ -64,15 +91,33 @@ def test_TCorrMap_inputs(): ), outputtype=dict(), pmean=dict( - argstr="-Pmean %s", extensions=None, name_source="in_file", suffix="_pmean", + argstr="-Pmean %s", + extensions=None, + name_source="in_file", + suffix="_pmean", + ), + polort=dict( + argstr="-polort %d", ), - polort=dict(argstr="-polort %d",), qmean=dict( - argstr="-Qmean %s", extensions=None, name_source="in_file", suffix="_qmean", + argstr="-Qmean %s", + extensions=None, + name_source="in_file", + suffix="_qmean", + ), + regress_out_timeseries=dict( + argstr="-ort %s", + extensions=None, + ), + seeds=dict( + argstr="-seed %s", + extensions=None, + xor="seeds_width", + ), + seeds_width=dict( + argstr="-Mseed %f", + xor="seeds", ), - regress_out_timeseries=dict(argstr="-ort %s", extensions=None,), - seeds=dict(argstr="-seed %s", extensions=None, xor="seeds_width",), - seeds_width=dict(argstr="-Mseed %f", xor="seeds",), sum_expr=dict( argstr="-Sexpr %s %s", extensions=None, @@ -104,7 +149,10 @@ def test_TCorrMap_inputs(): ), ), zmean=dict( - argstr="-Zmean %s", extensions=None, name_source="in_file", suffix="_zmean", + argstr="-Zmean %s", + extensions=None, + name_source="in_file", + suffix="_zmean", ), ) inputs = TCorrMap.input_spec() @@ -116,19 +164,45 @@ def test_TCorrMap_inputs(): def test_TCorrMap_outputs(): output_map = dict( - absolute_threshold=dict(extensions=None,), - average_expr=dict(extensions=None,), - average_expr_nonzero=dict(extensions=None,), - correlation_maps=dict(extensions=None,), - correlation_maps_masked=dict(extensions=None,), - histogram=dict(extensions=None,), - mean_file=dict(extensions=None,), - pmean=dict(extensions=None,), - qmean=dict(extensions=None,), - sum_expr=dict(extensions=None,), - var_absolute_threshold=dict(extensions=None,), - var_absolute_threshold_normalize=dict(extensions=None,), - zmean=dict(extensions=None,), + absolute_threshold=dict( + extensions=None, + ), + average_expr=dict( + extensions=None, + ), + average_expr_nonzero=dict( + extensions=None, + ), + correlation_maps=dict( + extensions=None, + ), + correlation_maps_masked=dict( + extensions=None, + ), + histogram=dict( + extensions=None, + ), + mean_file=dict( + extensions=None, + ), + pmean=dict( + extensions=None, + ), + qmean=dict( + extensions=None, + ), + sum_expr=dict( + extensions=None, + ), + var_absolute_threshold=dict( + extensions=None, + ), + var_absolute_threshold_normalize=dict( + extensions=None, + ), + zmean=dict( + extensions=None, + ), ) outputs = TCorrMap.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrelate.py b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py index 0bf794886b..1e85d44b68 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrelate.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py @@ -4,9 +4,17 @@ def test_TCorrelate_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -14,13 +22,25 @@ def test_TCorrelate_inputs(): name_template="%s_tcorr", ), outputtype=dict(), - pearson=dict(argstr="-pearson",), - polort=dict(argstr="-polort %d",), + pearson=dict( + argstr="-pearson", + ), + polort=dict( + argstr="-polort %d", + ), xset=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-2, ), yset=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, ), ) inputs = TCorrelate.input_spec() @@ -31,7 +51,11 @@ def test_TCorrelate_inputs(): def test_TCorrelate_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TCorrelate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TNorm.py b/nipype/interfaces/afni/tests/test_auto_TNorm.py index 8a8493f927..975345d92a 100644 --- a/nipype/interfaces/afni/tests/test_auto_TNorm.py +++ b/nipype/interfaces/afni/tests/test_auto_TNorm.py @@ -4,17 +4,39 @@ def test_TNorm_inputs(): input_map = dict( - L1fit=dict(argstr="-L1fit",), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + L1fit=dict( + argstr="-L1fit", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + norm1=dict( + argstr="-norm1", + ), + norm2=dict( + argstr="-norm2", + ), + normR=dict( + argstr="-normR", + ), + normx=dict( + argstr="-normx", + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - norm1=dict(argstr="-norm1",), - norm2=dict(argstr="-norm2",), - normR=dict(argstr="-normR",), - normx=dict(argstr="-normx",), - num_threads=dict(nohash=True, usedefault=True,), out_file=dict( argstr="-prefix %s", extensions=None, @@ -22,7 +44,9 @@ def test_TNorm_inputs(): name_template="%s_tnorm", ), outputtype=dict(), - polort=dict(argstr="-polort %s",), + polort=dict( + argstr="-polort %s", + ), ) inputs = TNorm.input_spec() @@ -32,7 +56,11 @@ def test_TNorm_inputs(): def test_TNorm_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TNorm.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TProject.py b/nipype/interfaces/afni/tests/test_auto_TProject.py index e300cb16be..d8f9990817 100644 --- a/nipype/interfaces/afni/tests/test_auto_TProject.py +++ b/nipype/interfaces/afni/tests/test_auto_TProject.py @@ -4,17 +4,43 @@ def test_TProject_inputs(): input_map = dict( - TR=dict(argstr="-TR %g",), - args=dict(argstr="%s",), - automask=dict(argstr="-automask", xor=["mask"],), - bandpass=dict(argstr="-bandpass %g %g",), - blur=dict(argstr="-blur %g",), - cenmode=dict(argstr="-cenmode %s",), - censor=dict(argstr="-censor %s", extensions=None,), - censortr=dict(argstr="-CENSORTR %s",), - concat=dict(argstr="-concat %s", extensions=None,), - dsort=dict(argstr="-dsort %s...",), - environ=dict(nohash=True, usedefault=True,), + TR=dict( + argstr="-TR %g", + ), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + xor=["mask"], + ), + bandpass=dict( + argstr="-bandpass %g %g", + ), + blur=dict( + argstr="-blur %g", + ), + cenmode=dict( + argstr="-cenmode %s", + ), + censor=dict( + argstr="-censor %s", + extensions=None, + ), + censortr=dict( + argstr="-CENSORTR %s", + ), + concat=dict( + argstr="-concat %s", + extensions=None, + ), + dsort=dict( + argstr="-dsort %s...", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( argstr="-input %s", copyfile=False, @@ -22,11 +48,24 @@ def test_TProject_inputs(): mandatory=True, position=1, ), - mask=dict(argstr="-mask %s", extensions=None,), - noblock=dict(argstr="-noblock",), - norm=dict(argstr="-norm",), - num_threads=dict(nohash=True, usedefault=True,), - ort=dict(argstr="-ort %s", extensions=None,), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + noblock=dict( + argstr="-noblock", + ), + norm=dict( + argstr="-norm", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + ort=dict( + argstr="-ort %s", + extensions=None, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -35,8 +74,12 @@ def test_TProject_inputs(): position=-1, ), outputtype=dict(), - polort=dict(argstr="-polort %d",), - stopband=dict(argstr="-stopband %g %g",), + polort=dict( + argstr="-polort %d", + ), + stopband=dict( + argstr="-stopband %g %g", + ), ) inputs = TProject.input_spec() @@ -46,7 +89,11 @@ def test_TProject_inputs(): def test_TProject_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TProject.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TShift.py b/nipype/interfaces/afni/tests/test_auto_TShift.py index 9f1e6c3342..78af699c7c 100644 --- a/nipype/interfaces/afni/tests/test_auto_TShift.py +++ b/nipype/interfaces/afni/tests/test_auto_TShift.py @@ -4,14 +4,30 @@ def test_TShift_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - ignore=dict(argstr="-ignore %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignore=dict( + argstr="-ignore %s", + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + interp=dict( + argstr="-%s", + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - interp=dict(argstr="-%s",), - num_threads=dict(nohash=True, usedefault=True,), out_file=dict( argstr="-prefix %s", extensions=None, @@ -19,14 +35,34 @@ def test_TShift_inputs(): name_template="%s_tshift", ), outputtype=dict(), - rlt=dict(argstr="-rlt",), - rltplus=dict(argstr="-rlt+",), - slice_encoding_direction=dict(usedefault=True,), - slice_timing=dict(argstr="-tpattern @%s", xor=["tpattern"],), - tpattern=dict(argstr="-tpattern %s", xor=["slice_timing"],), - tr=dict(argstr="-TR %s",), - tslice=dict(argstr="-slice %s", xor=["tzero"],), - tzero=dict(argstr="-tzero %s", xor=["tslice"],), + rlt=dict( + argstr="-rlt", + ), + rltplus=dict( + argstr="-rlt+", + ), + slice_encoding_direction=dict( + usedefault=True, + ), + slice_timing=dict( + argstr="-tpattern @%s", + xor=["tpattern"], + ), + tpattern=dict( + argstr="-tpattern %s", + xor=["slice_timing"], + ), + tr=dict( + argstr="-TR %s", + ), + tslice=dict( + argstr="-slice %s", + xor=["tzero"], + ), + tzero=dict( + argstr="-tzero %s", + xor=["tslice"], + ), ) inputs = TShift.input_spec() @@ -37,7 +73,12 @@ def test_TShift_inputs(): def test_TShift_outputs(): output_map = dict( - out_file=dict(extensions=None,), timing_file=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + timing_file=dict( + extensions=None, + ), ) outputs = TShift.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_TSmooth.py b/nipype/interfaces/afni/tests/test_auto_TSmooth.py index 1223686b83..1836d31c22 100644 --- a/nipype/interfaces/afni/tests/test_auto_TSmooth.py +++ b/nipype/interfaces/afni/tests/test_auto_TSmooth.py @@ -4,21 +4,52 @@ def test_TSmooth_inputs(): input_map = dict( - adaptive=dict(argstr="-adaptive %d",), - args=dict(argstr="%s",), - blackman=dict(argstr="-blackman %d",), - custom=dict(argstr="-custom %s", extensions=None,), - datum=dict(argstr="-datum %s",), - environ=dict(nohash=True, usedefault=True,), - hamming=dict(argstr="-hamming %d",), + adaptive=dict( + argstr="-adaptive %d", + ), + args=dict( + argstr="%s", + ), + blackman=dict( + argstr="-blackman %d", + ), + custom=dict( + argstr="-custom %s", + extensions=None, + ), + datum=dict( + argstr="-datum %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hamming=dict( + argstr="-hamming %d", + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + lin=dict( + argstr="-lin", + ), + lin3=dict( + argstr="-3lin %d", + ), + med=dict( + argstr="-med", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + osf=dict( + argstr="-osf", ), - lin=dict(argstr="-lin",), - lin3=dict(argstr="-3lin %d",), - med=dict(argstr="-med",), - num_threads=dict(nohash=True, usedefault=True,), - osf=dict(argstr="-osf",), out_file=dict( argstr="-prefix %s", extensions=None, @@ -35,7 +66,11 @@ def test_TSmooth_inputs(): def test_TSmooth_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TStat.py b/nipype/interfaces/afni/tests/test_auto_TStat.py index 0a7a99da76..b7499c5442 100644 --- a/nipype/interfaces/afni/tests/test_auto_TStat.py +++ b/nipype/interfaces/afni/tests/test_auto_TStat.py @@ -4,14 +4,31 @@ def test_TStat_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + options=dict( + argstr="%s", ), - mask=dict(argstr="-mask %s", extensions=None,), - num_threads=dict(nohash=True, usedefault=True,), - options=dict(argstr="%s",), out_file=dict( argstr="-prefix %s", extensions=None, @@ -28,7 +45,11 @@ def test_TStat_inputs(): def test_TStat_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TStat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_To3D.py b/nipype/interfaces/afni/tests/test_auto_To3D.py index ffed9ba623..6121efbe57 100644 --- a/nipype/interfaces/afni/tests/test_auto_To3D.py +++ b/nipype/interfaces/afni/tests/test_auto_To3D.py @@ -4,14 +4,34 @@ def test_To3D_inputs(): input_map = dict( - args=dict(argstr="%s",), - assumemosaic=dict(argstr="-assume_dicom_mosaic",), - datatype=dict(argstr="-datum %s",), - environ=dict(nohash=True, usedefault=True,), - filetype=dict(argstr="-%s",), - funcparams=dict(argstr="-time:zt %s alt+z2",), - in_folder=dict(argstr="%s/*.dcm", mandatory=True, position=-1,), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + assumemosaic=dict( + argstr="-assume_dicom_mosaic", + ), + datatype=dict( + argstr="-datum %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + filetype=dict( + argstr="-%s", + ), + funcparams=dict( + argstr="-time:zt %s alt+z2", + ), + in_folder=dict( + argstr="%s/*.dcm", + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -19,7 +39,9 @@ def test_To3D_inputs(): name_template="%s", ), outputtype=dict(), - skipoutliers=dict(argstr="-skip_outliers",), + skipoutliers=dict( + argstr="-skip_outliers", + ), ) inputs = To3D.input_spec() @@ -29,7 +51,11 @@ def test_To3D_inputs(): def test_To3D_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = To3D.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Undump.py b/nipype/interfaces/afni/tests/test_auto_Undump.py index 3679a118fe..64c98cf25a 100644 --- a/nipype/interfaces/afni/tests/test_auto_Undump.py +++ b/nipype/interfaces/afni/tests/test_auto_Undump.py @@ -4,13 +4,28 @@ def test_Undump_inputs(): input_map = dict( - args=dict(argstr="%s",), - coordinates_specification=dict(argstr="-%s",), - datatype=dict(argstr="-datum %s",), - default_value=dict(argstr="-dval %f",), - environ=dict(nohash=True, usedefault=True,), - fill_value=dict(argstr="-fval %f",), - head_only=dict(argstr="-head_only",), + args=dict( + argstr="%s", + ), + coordinates_specification=dict( + argstr="-%s", + ), + datatype=dict( + argstr="-datum %s", + ), + default_value=dict( + argstr="-dval %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill_value=dict( + argstr="-fval %f", + ), + head_only=dict( + argstr="-head_only", + ), in_file=dict( argstr="-master %s", copyfile=False, @@ -18,12 +33,26 @@ def test_Undump_inputs(): mandatory=True, position=-1, ), - mask_file=dict(argstr="-mask %s", extensions=None,), - num_threads=dict(nohash=True, usedefault=True,), - orient=dict(argstr="-orient %s",), - out_file=dict(argstr="-prefix %s", extensions=None, name_source="in_file",), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + orient=dict( + argstr="-orient %s", + ), + out_file=dict( + argstr="-prefix %s", + extensions=None, + name_source="in_file", + ), outputtype=dict(), - srad=dict(argstr="-srad %f",), + srad=dict( + argstr="-srad %f", + ), ) inputs = Undump.input_spec() @@ -33,7 +62,11 @@ def test_Undump_inputs(): def test_Undump_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Undump.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Unifize.py b/nipype/interfaces/afni/tests/test_auto_Unifize.py index 71331215b7..15832152c2 100644 --- a/nipype/interfaces/afni/tests/test_auto_Unifize.py +++ b/nipype/interfaces/afni/tests/test_auto_Unifize.py @@ -4,11 +4,24 @@ def test_Unifize_inputs(): input_map = dict( - args=dict(argstr="%s",), - cl_frac=dict(argstr="-clfrac %f",), - environ=dict(nohash=True, usedefault=True,), - epi=dict(argstr="-EPI", requires=["no_duplo", "t2"], xor=["gm"],), - gm=dict(argstr="-GM",), + args=dict( + argstr="%s", + ), + cl_frac=dict( + argstr="-clfrac %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi=dict( + argstr="-EPI", + requires=["no_duplo", "t2"], + xor=["gm"], + ), + gm=dict( + argstr="-GM", + ), in_file=dict( argstr="-input %s", copyfile=False, @@ -16,8 +29,13 @@ def test_Unifize_inputs(): mandatory=True, position=-1, ), - no_duplo=dict(argstr="-noduplo",), - num_threads=dict(nohash=True, usedefault=True,), + no_duplo=dict( + argstr="-noduplo", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -25,12 +43,25 @@ def test_Unifize_inputs(): name_template="%s_unifized", ), outputtype=dict(), - quiet=dict(argstr="-quiet",), - rbt=dict(argstr="-rbt %f %f %f",), - scale_file=dict(argstr="-ssave %s", extensions=None,), - t2=dict(argstr="-T2",), - t2_up=dict(argstr="-T2up %f",), - urad=dict(argstr="-Urad %s",), + quiet=dict( + argstr="-quiet", + ), + rbt=dict( + argstr="-rbt %f %f %f", + ), + scale_file=dict( + argstr="-ssave %s", + extensions=None, + ), + t2=dict( + argstr="-T2", + ), + t2_up=dict( + argstr="-T2up %f", + ), + urad=dict( + argstr="-Urad %s", + ), ) inputs = Unifize.input_spec() @@ -41,7 +72,12 @@ def test_Unifize_inputs(): def test_Unifize_outputs(): output_map = dict( - out_file=dict(extensions=None,), scale_file=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + scale_file=dict( + extensions=None, + ), ) outputs = Unifize.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Volreg.py b/nipype/interfaces/afni/tests/test_auto_Volreg.py index 9d7c6aa69b..658b933391 100644 --- a/nipype/interfaces/afni/tests/test_auto_Volreg.py +++ b/nipype/interfaces/afni/tests/test_auto_Volreg.py @@ -4,15 +4,34 @@ def test_Volreg_inputs(): input_map = dict( - args=dict(argstr="%s",), - basefile=dict(argstr="-base %s", extensions=None, position=-6,), - copyorigin=dict(argstr="-twodup",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + basefile=dict( + argstr="-base %s", + extensions=None, + position=-6, + ), + copyorigin=dict( + argstr="-twodup", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + in_weight_volume=dict( + argstr="-weight '%s[%d]'", + ), + interp=dict( + argstr="-%s", ), - in_weight_volume=dict(argstr="-weight '%s[%d]'",), - interp=dict(argstr="-%s",), md1d_file=dict( argstr="-maxdisp1D %s", extensions=None, @@ -21,7 +40,10 @@ def test_Volreg_inputs(): name_template="%s_md.1D", position=-4, ), - num_threads=dict(nohash=True, usedefault=True,), + num_threads=dict( + nohash=True, + usedefault=True, + ), oned_file=dict( argstr="-1Dfile %s", extensions=None, @@ -43,9 +65,16 @@ def test_Volreg_inputs(): name_template="%s_volreg", ), outputtype=dict(), - timeshift=dict(argstr="-tshift 0",), - verbose=dict(argstr="-verbose",), - zpad=dict(argstr="-zpad %d", position=-5,), + timeshift=dict( + argstr="-tshift 0", + ), + verbose=dict( + argstr="-verbose", + ), + zpad=dict( + argstr="-zpad %d", + position=-5, + ), ) inputs = Volreg.input_spec() @@ -56,10 +85,18 @@ def test_Volreg_inputs(): def test_Volreg_outputs(): output_map = dict( - md1d_file=dict(extensions=None,), - oned_file=dict(extensions=None,), - oned_matrix_save=dict(extensions=None,), - out_file=dict(extensions=None,), + md1d_file=dict( + extensions=None, + ), + oned_file=dict( + extensions=None, + ), + oned_matrix_save=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = Volreg.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Warp.py b/nipype/interfaces/afni/tests/test_auto_Warp.py index 5b5e9ded71..9d694f8425 100644 --- a/nipype/interfaces/afni/tests/test_auto_Warp.py +++ b/nipype/interfaces/afni/tests/test_auto_Warp.py @@ -4,19 +4,48 @@ def test_Warp_inputs(): input_map = dict( - args=dict(argstr="%s",), - deoblique=dict(argstr="-deoblique",), - environ=dict(nohash=True, usedefault=True,), - gridset=dict(argstr="-gridset %s", extensions=None,), + args=dict( + argstr="%s", + ), + deoblique=dict( + argstr="-deoblique", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gridset=dict( + argstr="-gridset %s", + extensions=None, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, - ), - interp=dict(argstr="-%s",), - matparent=dict(argstr="-matparent %s", extensions=None,), - mni2tta=dict(argstr="-mni2tta",), - newgrid=dict(argstr="-newgrid %f",), - num_threads=dict(nohash=True, usedefault=True,), - oblique_parent=dict(argstr="-oblique_parent %s", extensions=None,), + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + interp=dict( + argstr="-%s", + ), + matparent=dict( + argstr="-matparent %s", + extensions=None, + ), + mni2tta=dict( + argstr="-mni2tta", + ), + newgrid=dict( + argstr="-newgrid %f", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + oblique_parent=dict( + argstr="-oblique_parent %s", + extensions=None, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -25,10 +54,18 @@ def test_Warp_inputs(): name_template="%s_warp", ), outputtype=dict(), - save_warp=dict(requires=["verbose"],), - tta2mni=dict(argstr="-tta2mni",), - verbose=dict(argstr="-verb",), - zpad=dict(argstr="-zpad %d",), + save_warp=dict( + requires=["verbose"], + ), + tta2mni=dict( + argstr="-tta2mni", + ), + verbose=dict( + argstr="-verb", + ), + zpad=dict( + argstr="-zpad %d", + ), ) inputs = Warp.input_spec() @@ -39,7 +76,12 @@ def test_Warp_inputs(): def test_Warp_outputs(): output_map = dict( - out_file=dict(extensions=None,), warp_file=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + warp_file=dict( + extensions=None, + ), ) outputs = Warp.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_ZCutUp.py b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py index d234da5a50..3c51d6dd1d 100644 --- a/nipype/interfaces/afni/tests/test_auto_ZCutUp.py +++ b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py @@ -4,13 +4,27 @@ def test_ZCutUp_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + keep=dict( + argstr="-keep %s", + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - keep=dict(argstr="-keep %s",), - num_threads=dict(nohash=True, usedefault=True,), out_file=dict( argstr="-prefix %s", extensions=None, @@ -27,7 +41,11 @@ def test_ZCutUp_inputs(): def test_ZCutUp_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ZCutUp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Zcat.py b/nipype/interfaces/afni/tests/test_auto_Zcat.py index 81251acfe8..e06f343591 100644 --- a/nipype/interfaces/afni/tests/test_auto_Zcat.py +++ b/nipype/interfaces/afni/tests/test_auto_Zcat.py @@ -4,13 +4,34 @@ def test_Zcat_inputs(): input_map = dict( - args=dict(argstr="%s",), - datum=dict(argstr="-datum %s",), - environ=dict(nohash=True, usedefault=True,), - fscale=dict(argstr="-fscale", xor=["nscale"],), - in_files=dict(argstr="%s", copyfile=False, mandatory=True, position=-1,), - nscale=dict(argstr="-nscale", xor=["fscale"],), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + datum=dict( + argstr="-datum %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fscale=dict( + argstr="-fscale", + xor=["nscale"], + ), + in_files=dict( + argstr="%s", + copyfile=False, + mandatory=True, + position=-1, + ), + nscale=dict( + argstr="-nscale", + xor=["fscale"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -18,7 +39,9 @@ def test_Zcat_inputs(): name_template="%s_zcat", ), outputtype=dict(), - verb=dict(argstr="-verb",), + verb=dict( + argstr="-verb", + ), ) inputs = Zcat.input_spec() @@ -28,7 +51,11 @@ def test_Zcat_inputs(): def test_Zcat_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Zcat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Zeropad.py b/nipype/interfaces/afni/tests/test_auto_Zeropad.py index 6f59445034..4d6742f21e 100644 --- a/nipype/interfaces/afni/tests/test_auto_Zeropad.py +++ b/nipype/interfaces/afni/tests/test_auto_Zeropad.py @@ -4,30 +4,79 @@ def test_Zeropad_inputs(): input_map = dict( - A=dict(argstr="-A %i", xor=["master"],), - AP=dict(argstr="-AP %i", xor=["master"],), - I=dict(argstr="-I %i", xor=["master"],), - IS=dict(argstr="-IS %i", xor=["master"],), - L=dict(argstr="-L %i", xor=["master"],), - P=dict(argstr="-P %i", xor=["master"],), - R=dict(argstr="-R %i", xor=["master"],), - RL=dict(argstr="-RL %i", xor=["master"],), - S=dict(argstr="-S %i", xor=["master"],), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + A=dict( + argstr="-A %i", + xor=["master"], + ), + AP=dict( + argstr="-AP %i", + xor=["master"], + ), + I=dict( + argstr="-I %i", + xor=["master"], + ), + IS=dict( + argstr="-IS %i", + xor=["master"], + ), + L=dict( + argstr="-L %i", + xor=["master"], + ), + P=dict( + argstr="-P %i", + xor=["master"], + ), + R=dict( + argstr="-R %i", + xor=["master"], + ), + RL=dict( + argstr="-RL %i", + xor=["master"], + ), + S=dict( + argstr="-S %i", + xor=["master"], + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_files=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, ), master=dict( argstr="-master %s", extensions=None, xor=["I", "S", "A", "P", "L", "R", "z", "RL", "AP", "IS", "mm"], ), - mm=dict(argstr="-mm", xor=["master"],), - num_threads=dict(nohash=True, usedefault=True,), - out_file=dict(argstr="-prefix %s", extensions=None, name_template="zeropad",), + mm=dict( + argstr="-mm", + xor=["master"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="-prefix %s", + extensions=None, + name_template="zeropad", + ), outputtype=dict(), - z=dict(argstr="-z %i", xor=["master"],), + z=dict( + argstr="-z %i", + xor=["master"], + ), ) inputs = Zeropad.input_spec() @@ -37,7 +86,11 @@ def test_Zeropad_inputs(): def test_Zeropad_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Zeropad.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_AI.py b/nipype/interfaces/ants/tests/test_auto_AI.py index 3a3e1485d1..fa4cb42916 100644 --- a/nipype/interfaces/ants/tests/test_auto_AI.py +++ b/nipype/interfaces/ants/tests/test_auto_AI.py @@ -4,22 +4,70 @@ def test_AI_inputs(): input_map = dict( - args=dict(argstr="%s",), - convergence=dict(argstr="-c [%d,%g,%d]", usedefault=True,), - dimension=dict(argstr="-d %d", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - fixed_image=dict(extensions=None, mandatory=True,), - fixed_image_mask=dict(argstr="-x %s", extensions=None,), - metric=dict(argstr="-m %s", mandatory=True,), - moving_image=dict(extensions=None, mandatory=True,), - moving_image_mask=dict(extensions=None, requires=["fixed_image_mask"],), - num_threads=dict(nohash=True, usedefault=True,), - output_transform=dict(argstr="-o %s", extensions=None, usedefault=True,), - principal_axes=dict(argstr="-p %d", usedefault=True, xor=["blobs"],), - search_factor=dict(argstr="-s [%g,%g]", usedefault=True,), - search_grid=dict(argstr="-g %s",), - transform=dict(argstr="-t %s[%g]", usedefault=True,), - verbose=dict(argstr="-v %d", usedefault=True,), + args=dict( + argstr="%s", + ), + convergence=dict( + argstr="-c [%d,%g,%d]", + usedefault=True, + ), + dimension=dict( + argstr="-d %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + extensions=None, + mandatory=True, + ), + fixed_image_mask=dict( + argstr="-x %s", + extensions=None, + ), + metric=dict( + argstr="-m %s", + mandatory=True, + ), + moving_image=dict( + extensions=None, + mandatory=True, + ), + moving_image_mask=dict( + extensions=None, + requires=["fixed_image_mask"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_transform=dict( + argstr="-o %s", + extensions=None, + usedefault=True, + ), + principal_axes=dict( + argstr="-p %d", + usedefault=True, + xor=["blobs"], + ), + search_factor=dict( + argstr="-s [%g,%g]", + usedefault=True, + ), + search_grid=dict( + argstr="-g %s", + ), + transform=dict( + argstr="-t %s[%g]", + usedefault=True, + ), + verbose=dict( + argstr="-v %d", + usedefault=True, + ), ) inputs = AI.input_spec() @@ -29,7 +77,11 @@ def test_AI_inputs(): def test_AI_outputs(): - output_map = dict(output_transform=dict(extensions=None,),) + output_map = dict( + output_transform=dict( + extensions=None, + ), + ) outputs = AI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_ANTS.py b/nipype/interfaces/ants/tests/test_auto_ANTS.py index f3e329f957..17f456e0dd 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTS.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTS.py @@ -4,35 +4,97 @@ def test_ANTS_inputs(): input_map = dict( - affine_gradient_descent_option=dict(argstr="%s",), - args=dict(argstr="%s",), - delta_time=dict(requires=["number_of_time_steps"],), - dimension=dict(argstr="%d", position=1,), - environ=dict(nohash=True, usedefault=True,), - fixed_image=dict(mandatory=True,), - gradient_step_length=dict(requires=["transformation_model"],), - metric=dict(mandatory=True,), - metric_weight=dict(mandatory=True, requires=["metric"], usedefault=True,), - mi_option=dict(argstr="--MI-option %s", sep="x",), - moving_image=dict(argstr="%s", mandatory=True,), - num_threads=dict(nohash=True, usedefault=True,), + affine_gradient_descent_option=dict( + argstr="%s", + ), + args=dict( + argstr="%s", + ), + delta_time=dict( + requires=["number_of_time_steps"], + ), + dimension=dict( + argstr="%d", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + mandatory=True, + ), + gradient_step_length=dict( + requires=["transformation_model"], + ), + metric=dict( + mandatory=True, + ), + metric_weight=dict( + mandatory=True, + requires=["metric"], + usedefault=True, + ), + mi_option=dict( + argstr="--MI-option %s", + sep="x", + ), + moving_image=dict( + argstr="%s", + mandatory=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), number_of_affine_iterations=dict( - argstr="--number-of-affine-iterations %s", sep="x", + argstr="--number-of-affine-iterations %s", + sep="x", + ), + number_of_iterations=dict( + argstr="--number-of-iterations %s", + sep="x", + ), + number_of_time_steps=dict( + requires=["gradient_step_length"], ), - number_of_iterations=dict(argstr="--number-of-iterations %s", sep="x",), - number_of_time_steps=dict(requires=["gradient_step_length"],), output_transform_prefix=dict( - argstr="--output-naming %s", mandatory=True, usedefault=True, - ), - radius=dict(mandatory=True, requires=["metric"],), - regularization=dict(argstr="%s",), - regularization_deformation_field_sigma=dict(requires=["regularization"],), - regularization_gradient_field_sigma=dict(requires=["regularization"],), - smoothing_sigmas=dict(argstr="--gaussian-smoothing-sigmas %s", sep="x",), - subsampling_factors=dict(argstr="--subsampling-factors %s", sep="x",), - symmetry_type=dict(requires=["delta_time"],), - transformation_model=dict(argstr="%s", mandatory=True,), - use_histogram_matching=dict(argstr="%s", usedefault=True,), + argstr="--output-naming %s", + mandatory=True, + usedefault=True, + ), + radius=dict( + mandatory=True, + requires=["metric"], + ), + regularization=dict( + argstr="%s", + ), + regularization_deformation_field_sigma=dict( + requires=["regularization"], + ), + regularization_gradient_field_sigma=dict( + requires=["regularization"], + ), + smoothing_sigmas=dict( + argstr="--gaussian-smoothing-sigmas %s", + sep="x", + ), + subsampling_factors=dict( + argstr="--subsampling-factors %s", + sep="x", + ), + symmetry_type=dict( + requires=["delta_time"], + ), + transformation_model=dict( + argstr="%s", + mandatory=True, + ), + use_histogram_matching=dict( + argstr="%s", + usedefault=True, + ), ) inputs = ANTS.input_spec() @@ -43,11 +105,21 @@ def test_ANTS_inputs(): def test_ANTS_outputs(): output_map = dict( - affine_transform=dict(extensions=None,), - inverse_warp_transform=dict(extensions=None,), - metaheader=dict(extensions=None,), - metaheader_raw=dict(extensions=None,), - warp_transform=dict(extensions=None,), + affine_transform=dict( + extensions=None, + ), + inverse_warp_transform=dict( + extensions=None, + ), + metaheader=dict( + extensions=None, + ), + metaheader_raw=dict( + extensions=None, + ), + warp_transform=dict( + extensions=None, + ), ) outputs = ANTS.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py b/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py index 7a5ff5dec5..8907c4ab91 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py @@ -4,9 +4,17 @@ def test_ANTSCommand_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), ) inputs = ANTSCommand.input_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py index f97fbe3352..24ef0d655e 100644 --- a/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py +++ b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py @@ -4,17 +4,60 @@ def test_AffineInitializer_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="%s", position=0, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - fixed_image=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - local_search=dict(argstr="%d", position=7, usedefault=True,), - moving_image=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - num_threads=dict(nohash=True, usedefault=True,), - out_file=dict(argstr="%s", extensions=None, position=3, usedefault=True,), - principal_axes=dict(argstr="%d", position=6, usedefault=True,), - radian_fraction=dict(argstr="%f", position=5, usedefault=True,), - search_factor=dict(argstr="%f", position=4, usedefault=True,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%s", + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + local_search=dict( + argstr="%d", + position=7, + usedefault=True, + ), + moving_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + position=3, + usedefault=True, + ), + principal_axes=dict( + argstr="%d", + position=6, + usedefault=True, + ), + radian_fraction=dict( + argstr="%f", + position=5, + usedefault=True, + ), + search_factor=dict( + argstr="%f", + position=4, + usedefault=True, + ), ) inputs = AffineInitializer.input_spec() @@ -24,7 +67,11 @@ def test_AffineInitializer_inputs(): def test_AffineInitializer_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AffineInitializer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py index 18add39b6c..a18a3b60b4 100644 --- a/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py @@ -4,24 +4,62 @@ def test_ApplyTransforms_inputs(): input_map = dict( - args=dict(argstr="%s",), - default_value=dict(argstr="--default-value %g", usedefault=True,), - dimension=dict(argstr="--dimensionality %d",), - environ=dict(nohash=True, usedefault=True,), - float=dict(argstr="--float %d", usedefault=True,), - input_image=dict(argstr="--input %s", extensions=None, mandatory=True,), - input_image_type=dict(argstr="--input-image-type %d",), - interpolation=dict(argstr="%s", usedefault=True,), + args=dict( + argstr="%s", + ), + default_value=dict( + argstr="--default-value %g", + usedefault=True, + ), + dimension=dict( + argstr="--dimensionality %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + float=dict( + argstr="--float %d", + usedefault=True, + ), + input_image=dict( + argstr="--input %s", + extensions=None, + mandatory=True, + ), + input_image_type=dict( + argstr="--input-image-type %d", + ), + interpolation=dict( + argstr="%s", + usedefault=True, + ), interpolation_parameters=dict(), invert_transform_flags=dict(), - num_threads=dict(nohash=True, usedefault=True,), - out_postfix=dict(usedefault=True,), - output_image=dict(argstr="--output %s", genfile=True, hash_files=False,), - print_out_composite_warp_file=dict(requires=["output_image"],), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_postfix=dict( + usedefault=True, + ), + output_image=dict( + argstr="--output %s", + genfile=True, + hash_files=False, + ), + print_out_composite_warp_file=dict( + requires=["output_image"], + ), reference_image=dict( - argstr="--reference-image %s", extensions=None, mandatory=True, + argstr="--reference-image %s", + extensions=None, + mandatory=True, + ), + transforms=dict( + argstr="%s", + mandatory=True, ), - transforms=dict(argstr="%s", mandatory=True,), ) inputs = ApplyTransforms.input_spec() @@ -31,7 +69,11 @@ def test_ApplyTransforms_inputs(): def test_ApplyTransforms_outputs(): - output_map = dict(output_image=dict(extensions=None,),) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = ApplyTransforms.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py index a0258471b8..472c22c8b2 100644 --- a/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py @@ -4,19 +4,36 @@ def test_ApplyTransformsToPoints_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="--dimensionality %d",), - environ=dict(nohash=True, usedefault=True,), - input_file=dict(argstr="--input %s", extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="--dimensionality %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="--input %s", + extensions=None, + mandatory=True, + ), invert_transform_flags=dict(), - num_threads=dict(nohash=True, usedefault=True,), + num_threads=dict( + nohash=True, + usedefault=True, + ), output_file=dict( argstr="--output %s", hash_files=False, name_source=["input_file"], name_template="%s_transformed.csv", ), - transforms=dict(argstr="%s", mandatory=True,), + transforms=dict( + argstr="%s", + mandatory=True, + ), ) inputs = ApplyTransformsToPoints.input_spec() @@ -26,7 +43,11 @@ def test_ApplyTransformsToPoints_inputs(): def test_ApplyTransformsToPoints_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = ApplyTransformsToPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_Atropos.py b/nipype/interfaces/ants/tests/test_auto_Atropos.py index 09644907ab..9f02bb5151 100644 --- a/nipype/interfaces/ants/tests/test_auto_Atropos.py +++ b/nipype/interfaces/ants/tests/test_auto_Atropos.py @@ -4,35 +4,85 @@ def test_Atropos_inputs(): input_map = dict( - args=dict(argstr="%s",), - convergence_threshold=dict(requires=["n_iterations"],), - dimension=dict(argstr="--image-dimensionality %d", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - icm_use_synchronous_update=dict(argstr="%s",), + args=dict( + argstr="%s", + ), + convergence_threshold=dict( + requires=["n_iterations"], + ), + dimension=dict( + argstr="--image-dimensionality %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + icm_use_synchronous_update=dict( + argstr="%s", + ), initialization=dict( - argstr="%s", mandatory=True, requires=["number_of_tissue_classes"], + argstr="%s", + mandatory=True, + requires=["number_of_tissue_classes"], + ), + intensity_images=dict( + argstr="--intensity-image %s...", + mandatory=True, ), - intensity_images=dict(argstr="--intensity-image %s...", mandatory=True,), kmeans_init_centers=dict(), - likelihood_model=dict(argstr="--likelihood-model %s",), - mask_image=dict(argstr="--mask-image %s", extensions=None, mandatory=True,), - maximum_number_of_icm_terations=dict(requires=["icm_use_synchronous_update"],), - mrf_radius=dict(requires=["mrf_smoothing_factor"],), - mrf_smoothing_factor=dict(argstr="%s",), - n_iterations=dict(argstr="%s",), - num_threads=dict(nohash=True, usedefault=True,), - number_of_tissue_classes=dict(mandatory=True,), + likelihood_model=dict( + argstr="--likelihood-model %s", + ), + mask_image=dict( + argstr="--mask-image %s", + extensions=None, + mandatory=True, + ), + maximum_number_of_icm_terations=dict( + requires=["icm_use_synchronous_update"], + ), + mrf_radius=dict( + requires=["mrf_smoothing_factor"], + ), + mrf_smoothing_factor=dict( + argstr="%s", + ), + n_iterations=dict( + argstr="%s", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + number_of_tissue_classes=dict( + mandatory=True, + ), out_classified_image_name=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + ), + output_posteriors_name_template=dict( + usedefault=True, + ), + posterior_formulation=dict( + argstr="%s", ), - output_posteriors_name_template=dict(usedefault=True,), - posterior_formulation=dict(argstr="%s",), prior_image=dict(), - prior_probability_threshold=dict(requires=["prior_weighting"],), + prior_probability_threshold=dict( + requires=["prior_weighting"], + ), prior_weighting=dict(), save_posteriors=dict(), - use_mixture_model_proportions=dict(requires=["posterior_formulation"],), - use_random_seed=dict(argstr="--use-random-seed %d", usedefault=True,), + use_mixture_model_proportions=dict( + requires=["posterior_formulation"], + ), + use_random_seed=dict( + argstr="--use-random-seed %d", + usedefault=True, + ), ) inputs = Atropos.input_spec() @@ -42,7 +92,12 @@ def test_Atropos_inputs(): def test_Atropos_outputs(): - output_map = dict(classified_image=dict(extensions=None,), posteriors=dict(),) + output_map = dict( + classified_image=dict( + extensions=None, + ), + posteriors=dict(), + ) outputs = Atropos.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py index 5d8b191931..3da6956de2 100644 --- a/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py @@ -4,14 +4,33 @@ def test_AverageAffineTransform_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="%d", mandatory=True, position=0,), - environ=dict(nohash=True, usedefault=True,), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + mandatory=True, + position=0, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), output_affine_transform=dict( - argstr="%s", extensions=None, mandatory=True, position=1, + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + transforms=dict( + argstr="%s", + mandatory=True, + position=3, ), - transforms=dict(argstr="%s", mandatory=True, position=3,), ) inputs = AverageAffineTransform.input_spec() @@ -21,7 +40,11 @@ def test_AverageAffineTransform_inputs(): def test_AverageAffineTransform_outputs(): - output_map = dict(affine_transform=dict(extensions=None,),) + output_map = dict( + affine_transform=dict( + extensions=None, + ), + ) outputs = AverageAffineTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_AverageImages.py b/nipype/interfaces/ants/tests/test_auto_AverageImages.py index 572407efbc..8eb03ea7b4 100644 --- a/nipype/interfaces/ants/tests/test_auto_AverageImages.py +++ b/nipype/interfaces/ants/tests/test_auto_AverageImages.py @@ -4,14 +4,38 @@ def test_AverageImages_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="%d", mandatory=True, position=0,), - environ=dict(nohash=True, usedefault=True,), - images=dict(argstr="%s", mandatory=True, position=3,), - normalize=dict(argstr="%d", mandatory=True, position=2,), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + mandatory=True, + position=0, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + images=dict( + argstr="%s", + mandatory=True, + position=3, + ), + normalize=dict( + argstr="%d", + mandatory=True, + position=2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), output_average_image=dict( - argstr="%s", extensions=None, hash_files=False, position=1, usedefault=True, + argstr="%s", + extensions=None, + hash_files=False, + position=1, + usedefault=True, ), ) inputs = AverageImages.input_spec() @@ -22,7 +46,11 @@ def test_AverageImages_inputs(): def test_AverageImages_outputs(): - output_map = dict(output_average_image=dict(extensions=None,),) + output_map = dict( + output_average_image=dict( + extensions=None, + ), + ) outputs = AverageImages.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py index 8eb0293313..454a102f2d 100644 --- a/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py +++ b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py @@ -4,22 +4,61 @@ def test_BrainExtraction_inputs(): input_map = dict( - anatomical_image=dict(argstr="-a %s", extensions=None, mandatory=True,), - args=dict(argstr="%s",), + anatomical_image=dict( + argstr="-a %s", + extensions=None, + mandatory=True, + ), + args=dict( + argstr="%s", + ), brain_probability_mask=dict( - argstr="-m %s", copyfile=False, extensions=None, mandatory=True, - ), - brain_template=dict(argstr="-e %s", extensions=None, mandatory=True,), - debug=dict(argstr="-z 1",), - dimension=dict(argstr="-d %d", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - extraction_registration_mask=dict(argstr="-f %s", extensions=None,), - image_suffix=dict(argstr="-s %s", usedefault=True,), - keep_temporary_files=dict(argstr="-k %d",), - num_threads=dict(nohash=True, usedefault=True,), - out_prefix=dict(argstr="-o %s", usedefault=True,), - use_floatingpoint_precision=dict(argstr="-q %d",), - use_random_seeding=dict(argstr="-u %d",), + argstr="-m %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + brain_template=dict( + argstr="-e %s", + extensions=None, + mandatory=True, + ), + debug=dict( + argstr="-z 1", + ), + dimension=dict( + argstr="-d %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + extraction_registration_mask=dict( + argstr="-f %s", + extensions=None, + ), + image_suffix=dict( + argstr="-s %s", + usedefault=True, + ), + keep_temporary_files=dict( + argstr="-k %d", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_prefix=dict( + argstr="-o %s", + usedefault=True, + ), + use_floatingpoint_precision=dict( + argstr="-q %d", + ), + use_random_seeding=dict( + argstr="-u %d", + ), ) inputs = BrainExtraction.input_spec() @@ -30,24 +69,60 @@ def test_BrainExtraction_inputs(): def test_BrainExtraction_outputs(): output_map = dict( - BrainExtractionBrain=dict(extensions=None,), - BrainExtractionCSF=dict(extensions=None,), - BrainExtractionGM=dict(extensions=None,), - BrainExtractionInitialAffine=dict(extensions=None,), - BrainExtractionInitialAffineFixed=dict(extensions=None,), - BrainExtractionInitialAffineMoving=dict(extensions=None,), - BrainExtractionLaplacian=dict(extensions=None,), - BrainExtractionMask=dict(extensions=None,), - BrainExtractionPrior0GenericAffine=dict(extensions=None,), - BrainExtractionPrior1InverseWarp=dict(extensions=None,), - BrainExtractionPrior1Warp=dict(extensions=None,), - BrainExtractionPriorWarped=dict(extensions=None,), - BrainExtractionSegmentation=dict(extensions=None,), - BrainExtractionTemplateLaplacian=dict(extensions=None,), - BrainExtractionTmp=dict(extensions=None,), - BrainExtractionWM=dict(extensions=None,), - N4Corrected0=dict(extensions=None,), - N4Truncated0=dict(extensions=None,), + BrainExtractionBrain=dict( + extensions=None, + ), + BrainExtractionCSF=dict( + extensions=None, + ), + BrainExtractionGM=dict( + extensions=None, + ), + BrainExtractionInitialAffine=dict( + extensions=None, + ), + BrainExtractionInitialAffineFixed=dict( + extensions=None, + ), + BrainExtractionInitialAffineMoving=dict( + extensions=None, + ), + BrainExtractionLaplacian=dict( + extensions=None, + ), + BrainExtractionMask=dict( + extensions=None, + ), + BrainExtractionPrior0GenericAffine=dict( + extensions=None, + ), + BrainExtractionPrior1InverseWarp=dict( + extensions=None, + ), + BrainExtractionPrior1Warp=dict( + extensions=None, + ), + BrainExtractionPriorWarped=dict( + extensions=None, + ), + BrainExtractionSegmentation=dict( + extensions=None, + ), + BrainExtractionTemplateLaplacian=dict( + extensions=None, + ), + BrainExtractionTmp=dict( + extensions=None, + ), + BrainExtractionWM=dict( + extensions=None, + ), + N4Corrected0=dict( + extensions=None, + ), + N4Truncated0=dict( + extensions=None, + ), ) outputs = BrainExtraction.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py index ccc54c6eb8..78afc21df2 100644 --- a/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py @@ -4,10 +4,22 @@ def test_ComposeMultiTransform_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="%d", position=0, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), output_transform=dict( argstr="%s", extensions=None, @@ -16,8 +28,16 @@ def test_ComposeMultiTransform_inputs(): name_template="%s_composed", position=1, ), - reference_image=dict(argstr="%s", extensions=None, position=2,), - transforms=dict(argstr="%s", mandatory=True, position=3,), + reference_image=dict( + argstr="%s", + extensions=None, + position=2, + ), + transforms=dict( + argstr="%s", + mandatory=True, + position=3, + ), ) inputs = ComposeMultiTransform.input_spec() @@ -27,7 +47,11 @@ def test_ComposeMultiTransform_inputs(): def test_ComposeMultiTransform_outputs(): - output_map = dict(output_transform=dict(extensions=None,),) + output_map = dict( + output_transform=dict( + extensions=None, + ), + ) outputs = ComposeMultiTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py b/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py index 125c69f141..54b428db2a 100644 --- a/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py +++ b/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py @@ -4,13 +4,37 @@ def test_CompositeTransformUtil_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s...", mandatory=True, position=3,), - num_threads=dict(nohash=True, usedefault=True,), - out_file=dict(argstr="%s", extensions=None, position=2,), - output_prefix=dict(argstr="%s", position=4, usedefault=True,), - process=dict(argstr="--%s", position=1, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s...", + mandatory=True, + position=3, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + position=2, + ), + output_prefix=dict( + argstr="%s", + position=4, + usedefault=True, + ), + process=dict( + argstr="--%s", + position=1, + usedefault=True, + ), ) inputs = CompositeTransformUtil.input_spec() @@ -21,9 +45,15 @@ def test_CompositeTransformUtil_inputs(): def test_CompositeTransformUtil_outputs(): output_map = dict( - affine_transform=dict(extensions=None,), - displacement_field=dict(extensions=None,), - out_file=dict(extensions=None,), + affine_transform=dict( + extensions=None, + ), + displacement_field=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = CompositeTransformUtil.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py index eb0a7d48b0..a49239ebae 100644 --- a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py +++ b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py @@ -4,19 +4,70 @@ def test_ConvertScalarImageToRGB_inputs(): input_map = dict( - args=dict(argstr="%s",), - colormap=dict(argstr="%s", mandatory=True, position=4,), - custom_color_map_file=dict(argstr="%s", position=5, usedefault=True,), - dimension=dict(argstr="%d", mandatory=True, position=0, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - input_image=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - mask_image=dict(argstr="%s", extensions=None, position=3, usedefault=True,), - maximum_RGB_output=dict(argstr="%d", position=9, usedefault=True,), - maximum_input=dict(argstr="%d", mandatory=True, position=7,), - minimum_RGB_output=dict(argstr="%d", position=8, usedefault=True,), - minimum_input=dict(argstr="%d", mandatory=True, position=6,), - num_threads=dict(nohash=True, usedefault=True,), - output_image=dict(argstr="%s", position=2, usedefault=True,), + args=dict( + argstr="%s", + ), + colormap=dict( + argstr="%s", + mandatory=True, + position=4, + ), + custom_color_map_file=dict( + argstr="%s", + position=5, + usedefault=True, + ), + dimension=dict( + argstr="%d", + mandatory=True, + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + mask_image=dict( + argstr="%s", + extensions=None, + position=3, + usedefault=True, + ), + maximum_RGB_output=dict( + argstr="%d", + position=9, + usedefault=True, + ), + maximum_input=dict( + argstr="%d", + mandatory=True, + position=7, + ), + minimum_RGB_output=dict( + argstr="%d", + position=8, + usedefault=True, + ), + minimum_input=dict( + argstr="%d", + mandatory=True, + position=6, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_image=dict( + argstr="%s", + position=2, + usedefault=True, + ), ) inputs = ConvertScalarImageToRGB.input_spec() @@ -26,7 +77,11 @@ def test_ConvertScalarImageToRGB_inputs(): def test_ConvertScalarImageToRGB_outputs(): - output_map = dict(output_image=dict(extensions=None,),) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = ConvertScalarImageToRGB.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py index 92a3f04b57..a42551788b 100644 --- a/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py @@ -4,32 +4,94 @@ def test_CorticalThickness_inputs(): input_map = dict( - anatomical_image=dict(argstr="-a %s", extensions=None, mandatory=True,), - args=dict(argstr="%s",), - b_spline_smoothing=dict(argstr="-v",), + anatomical_image=dict( + argstr="-a %s", + extensions=None, + mandatory=True, + ), + args=dict( + argstr="%s", + ), + b_spline_smoothing=dict( + argstr="-v", + ), brain_probability_mask=dict( - argstr="-m %s", copyfile=False, extensions=None, mandatory=True, - ), - brain_template=dict(argstr="-e %s", extensions=None, mandatory=True,), - cortical_label_image=dict(extensions=None,), - debug=dict(argstr="-z 1",), - dimension=dict(argstr="-d %d", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - extraction_registration_mask=dict(argstr="-f %s", extensions=None,), - image_suffix=dict(argstr="-s %s", usedefault=True,), - keep_temporary_files=dict(argstr="-k %d",), - label_propagation=dict(argstr="-l %s",), - max_iterations=dict(argstr="-i %d",), - num_threads=dict(nohash=True, usedefault=True,), - out_prefix=dict(argstr="-o %s", usedefault=True,), - posterior_formulation=dict(argstr="-b %s",), - prior_segmentation_weight=dict(argstr="-w %f",), - quick_registration=dict(argstr="-q 1",), - segmentation_iterations=dict(argstr="-n %d",), - segmentation_priors=dict(argstr="-p %s", mandatory=True,), - t1_registration_template=dict(argstr="-t %s", extensions=None, mandatory=True,), - use_floatingpoint_precision=dict(argstr="-j %d",), - use_random_seeding=dict(argstr="-u %d",), + argstr="-m %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + brain_template=dict( + argstr="-e %s", + extensions=None, + mandatory=True, + ), + cortical_label_image=dict( + extensions=None, + ), + debug=dict( + argstr="-z 1", + ), + dimension=dict( + argstr="-d %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + extraction_registration_mask=dict( + argstr="-f %s", + extensions=None, + ), + image_suffix=dict( + argstr="-s %s", + usedefault=True, + ), + keep_temporary_files=dict( + argstr="-k %d", + ), + label_propagation=dict( + argstr="-l %s", + ), + max_iterations=dict( + argstr="-i %d", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_prefix=dict( + argstr="-o %s", + usedefault=True, + ), + posterior_formulation=dict( + argstr="-b %s", + ), + prior_segmentation_weight=dict( + argstr="-w %f", + ), + quick_registration=dict( + argstr="-q 1", + ), + segmentation_iterations=dict( + argstr="-n %d", + ), + segmentation_priors=dict( + argstr="-p %s", + mandatory=True, + ), + t1_registration_template=dict( + argstr="-t %s", + extensions=None, + mandatory=True, + ), + use_floatingpoint_precision=dict( + argstr="-j %d", + ), + use_random_seeding=dict( + argstr="-u %d", + ), ) inputs = CorticalThickness.input_spec() @@ -40,19 +102,43 @@ def test_CorticalThickness_inputs(): def test_CorticalThickness_outputs(): output_map = dict( - BrainExtractionMask=dict(extensions=None,), - BrainSegmentation=dict(extensions=None,), - BrainSegmentationN4=dict(extensions=None,), + BrainExtractionMask=dict( + extensions=None, + ), + BrainSegmentation=dict( + extensions=None, + ), + BrainSegmentationN4=dict( + extensions=None, + ), BrainSegmentationPosteriors=dict(), - BrainVolumes=dict(extensions=None,), - CorticalThickness=dict(extensions=None,), - CorticalThicknessNormedToTemplate=dict(extensions=None,), - ExtractedBrainN4=dict(extensions=None,), - SubjectToTemplate0GenericAffine=dict(extensions=None,), - SubjectToTemplate1Warp=dict(extensions=None,), - SubjectToTemplateLogJacobian=dict(extensions=None,), - TemplateToSubject0Warp=dict(extensions=None,), - TemplateToSubject1GenericAffine=dict(extensions=None,), + BrainVolumes=dict( + extensions=None, + ), + CorticalThickness=dict( + extensions=None, + ), + CorticalThicknessNormedToTemplate=dict( + extensions=None, + ), + ExtractedBrainN4=dict( + extensions=None, + ), + SubjectToTemplate0GenericAffine=dict( + extensions=None, + ), + SubjectToTemplate1Warp=dict( + extensions=None, + ), + SubjectToTemplateLogJacobian=dict( + extensions=None, + ), + TemplateToSubject0Warp=dict( + extensions=None, + ), + TemplateToSubject1GenericAffine=dict( + extensions=None, + ), ) outputs = CorticalThickness.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py index be694c055f..197ec2ad23 100644 --- a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py +++ b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py @@ -4,16 +4,42 @@ def test_CreateJacobianDeterminantImage_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), deformationField=dict( - argstr="%s", extensions=None, mandatory=True, position=1, - ), - doLogJacobian=dict(argstr="%d", position=3,), - environ=dict(nohash=True, usedefault=True,), - imageDimension=dict(argstr="%d", mandatory=True, position=0,), - num_threads=dict(nohash=True, usedefault=True,), - outputImage=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - useGeometric=dict(argstr="%d", position=4,), + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + doLogJacobian=dict( + argstr="%d", + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + imageDimension=dict( + argstr="%d", + mandatory=True, + position=0, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + outputImage=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + useGeometric=dict( + argstr="%d", + position=4, + ), ) inputs = CreateJacobianDeterminantImage.input_spec() @@ -23,7 +49,11 @@ def test_CreateJacobianDeterminantImage_inputs(): def test_CreateJacobianDeterminantImage_outputs(): - output_map = dict(jacobian_image=dict(extensions=None,),) + output_map = dict( + jacobian_image=dict( + extensions=None, + ), + ) outputs = CreateJacobianDeterminantImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py index 3bb36c9d01..6516b03ad7 100644 --- a/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py +++ b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py @@ -4,20 +4,56 @@ def test_CreateTiledMosaic_inputs(): input_map = dict( - alpha_value=dict(argstr="-a %.2f",), - args=dict(argstr="%s",), - direction=dict(argstr="-d %d",), - environ=dict(nohash=True, usedefault=True,), - flip_slice=dict(argstr="-f %s",), - input_image=dict(argstr="-i %s", extensions=None, mandatory=True,), - mask_image=dict(argstr="-x %s", extensions=None,), - num_threads=dict(nohash=True, usedefault=True,), - output_image=dict(argstr="-o %s", usedefault=True,), - pad_or_crop=dict(argstr="-p %s",), - permute_axes=dict(argstr="-g",), - rgb_image=dict(argstr="-r %s", extensions=None, mandatory=True,), - slices=dict(argstr="-s %s",), - tile_geometry=dict(argstr="-t %s",), + alpha_value=dict( + argstr="-a %.2f", + ), + args=dict( + argstr="%s", + ), + direction=dict( + argstr="-d %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip_slice=dict( + argstr="-f %s", + ), + input_image=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + mask_image=dict( + argstr="-x %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_image=dict( + argstr="-o %s", + usedefault=True, + ), + pad_or_crop=dict( + argstr="-p %s", + ), + permute_axes=dict( + argstr="-g", + ), + rgb_image=dict( + argstr="-r %s", + extensions=None, + mandatory=True, + ), + slices=dict( + argstr="-s %s", + ), + tile_geometry=dict( + argstr="-t %s", + ), ) inputs = CreateTiledMosaic.input_spec() @@ -27,7 +63,11 @@ def test_CreateTiledMosaic_inputs(): def test_CreateTiledMosaic_outputs(): - output_map = dict(output_image=dict(extensions=None,),) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = CreateTiledMosaic.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py index 244b2ca778..7af4764633 100644 --- a/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py +++ b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py @@ -4,10 +4,21 @@ def test_DenoiseImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="-d %d",), - environ=dict(nohash=True, usedefault=True,), - input_image=dict(argstr="-i %s", extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-d %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), noise_image=dict( extensions=None, hash_files=False, @@ -15,8 +26,14 @@ def test_DenoiseImage_inputs(): name_source=["input_image"], name_template="%s_noise", ), - noise_model=dict(argstr="-n %s", usedefault=True,), - num_threads=dict(nohash=True, usedefault=True,), + noise_model=dict( + argstr="-n %s", + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), output_image=dict( argstr="-o %s", extensions=None, @@ -25,9 +42,18 @@ def test_DenoiseImage_inputs(): name_source=["input_image"], name_template="%s_noise_corrected", ), - save_noise=dict(mandatory=True, usedefault=True, xor=["noise_image"],), - shrink_factor=dict(argstr="-s %s", usedefault=True,), - verbose=dict(argstr="-v",), + save_noise=dict( + mandatory=True, + usedefault=True, + xor=["noise_image"], + ), + shrink_factor=dict( + argstr="-s %s", + usedefault=True, + ), + verbose=dict( + argstr="-v", + ), ) inputs = DenoiseImage.input_spec() @@ -38,7 +64,12 @@ def test_DenoiseImage_inputs(): def test_DenoiseImage_outputs(): output_map = dict( - noise_image=dict(extensions=None,), output_image=dict(extensions=None,), + noise_image=dict( + extensions=None, + ), + output_image=dict( + extensions=None, + ), ) outputs = DenoiseImage.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py b/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py index 11fbe56ccd..6fde6f5b44 100644 --- a/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py +++ b/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py @@ -4,24 +4,61 @@ def test_GenWarpFields_inputs(): input_map = dict( - args=dict(argstr="%s",), - bias_field_correction=dict(argstr="-n 1",), - dimension=dict(argstr="-d %d", position=1, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - force_proceed=dict(argstr="-f 1",), + args=dict( + argstr="%s", + ), + bias_field_correction=dict( + argstr="-n 1", + ), + dimension=dict( + argstr="-d %d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + force_proceed=dict( + argstr="-f 1", + ), input_image=dict( - argstr="-i %s", copyfile=False, extensions=None, mandatory=True, + argstr="-i %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + inverse_warp_template_labels=dict( + argstr="-l", + ), + max_iterations=dict( + argstr="-m %s", + sep="x", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_prefix=dict( + argstr="-o %s", + usedefault=True, + ), + quality_check=dict( + argstr="-q 1", ), - inverse_warp_template_labels=dict(argstr="-l",), - max_iterations=dict(argstr="-m %s", sep="x",), - num_threads=dict(nohash=True, usedefault=True,), - out_prefix=dict(argstr="-o %s", usedefault=True,), - quality_check=dict(argstr="-q 1",), reference_image=dict( - argstr="-r %s", copyfile=True, extensions=None, mandatory=True, + argstr="-r %s", + copyfile=True, + extensions=None, + mandatory=True, + ), + similarity_metric=dict( + argstr="-s %s", + ), + transformation_model=dict( + argstr="-t %s", + usedefault=True, ), - similarity_metric=dict(argstr="-s %s",), - transformation_model=dict(argstr="-t %s", usedefault=True,), ) inputs = GenWarpFields.input_spec() @@ -32,11 +69,21 @@ def test_GenWarpFields_inputs(): def test_GenWarpFields_outputs(): output_map = dict( - affine_transformation=dict(extensions=None,), - input_file=dict(extensions=None,), - inverse_warp_field=dict(extensions=None,), - output_file=dict(extensions=None,), - warp_field=dict(extensions=None,), + affine_transformation=dict( + extensions=None, + ), + input_file=dict( + extensions=None, + ), + inverse_warp_field=dict( + extensions=None, + ), + output_file=dict( + extensions=None, + ), + warp_field=dict( + extensions=None, + ), ) outputs = GenWarpFields.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ImageMath.py b/nipype/interfaces/ants/tests/test_auto_ImageMath.py index 12aabf7a6e..d71d4c476a 100644 --- a/nipype/interfaces/ants/tests/test_auto_ImageMath.py +++ b/nipype/interfaces/ants/tests/test_auto_ImageMath.py @@ -4,14 +4,40 @@ def test_ImageMath_inputs(): input_map = dict( - args=dict(argstr="%s",), - copy_header=dict(usedefault=True,), - dimension=dict(argstr="%d", position=1, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - num_threads=dict(nohash=True, usedefault=True,), - op1=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - op2=dict(argstr="%s", position=-1,), - operation=dict(argstr="%s", mandatory=True, position=3,), + args=dict( + argstr="%s", + ), + copy_header=dict( + usedefault=True, + ), + dimension=dict( + argstr="%d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + op1=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + op2=dict( + argstr="%s", + position=-1, + ), + operation=dict( + argstr="%s", + mandatory=True, + position=3, + ), output_image=dict( argstr="%s", extensions=None, @@ -29,7 +55,11 @@ def test_ImageMath_inputs(): def test_ImageMath_outputs(): - output_map = dict(output_image=dict(extensions=None,),) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = ImageMath.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_JointFusion.py b/nipype/interfaces/ants/tests/test_auto_JointFusion.py index 57af10173d..f234ceea7c 100644 --- a/nipype/interfaces/ants/tests/test_auto_JointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_JointFusion.py @@ -4,18 +4,49 @@ def test_JointFusion_inputs(): input_map = dict( - alpha=dict(argstr="-a %s", usedefault=True,), - args=dict(argstr="%s",), - atlas_image=dict(argstr="-g %s...", mandatory=True,), - atlas_segmentation_image=dict(argstr="-l %s...", mandatory=True,), - beta=dict(argstr="-b %s", usedefault=True,), - constrain_nonnegative=dict(argstr="-c", usedefault=True,), - dimension=dict(argstr="-d %d",), - environ=dict(nohash=True, usedefault=True,), + alpha=dict( + argstr="-a %s", + usedefault=True, + ), + args=dict( + argstr="%s", + ), + atlas_image=dict( + argstr="-g %s...", + mandatory=True, + ), + atlas_segmentation_image=dict( + argstr="-l %s...", + mandatory=True, + ), + beta=dict( + argstr="-b %s", + usedefault=True, + ), + constrain_nonnegative=dict( + argstr="-c", + usedefault=True, + ), + dimension=dict( + argstr="-d %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), exclusion_image=dict(), - exclusion_image_label=dict(argstr="-e %s", requires=["exclusion_image"],), - mask_image=dict(argstr="-x %s", extensions=None,), - num_threads=dict(nohash=True, usedefault=True,), + exclusion_image_label=dict( + argstr="-e %s", + requires=["exclusion_image"], + ), + mask_image=dict( + argstr="-x %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_atlas_voting_weight_name_format=dict( requires=[ "out_label_fusion", @@ -23,20 +54,45 @@ def test_JointFusion_inputs(): "out_label_post_prob_name_format", ], ), - out_intensity_fusion_name_format=dict(argstr="",), - out_label_fusion=dict(argstr="%s", extensions=None, hash_files=False,), + out_intensity_fusion_name_format=dict( + argstr="", + ), + out_label_fusion=dict( + argstr="%s", + extensions=None, + hash_files=False, + ), out_label_post_prob_name_format=dict( requires=["out_label_fusion", "out_intensity_fusion_name_format"], ), - patch_metric=dict(argstr="-m %s",), - patch_radius=dict(argstr="-p %s", maxlen=3, minlen=3,), - retain_atlas_voting_images=dict(argstr="-f", usedefault=True,), + patch_metric=dict( + argstr="-m %s", + ), + patch_radius=dict( + argstr="-p %s", + maxlen=3, + minlen=3, + ), + retain_atlas_voting_images=dict( + argstr="-f", + usedefault=True, + ), retain_label_posterior_images=dict( - argstr="-r", requires=["atlas_segmentation_image"], usedefault=True, + argstr="-r", + requires=["atlas_segmentation_image"], + usedefault=True, + ), + search_radius=dict( + argstr="-s %s", + usedefault=True, + ), + target_image=dict( + argstr="-t %s", + mandatory=True, + ), + verbose=dict( + argstr="-v", ), - search_radius=dict(argstr="-s %s", usedefault=True,), - target_image=dict(argstr="-t %s", mandatory=True,), - verbose=dict(argstr="-v",), ) inputs = JointFusion.input_spec() @@ -49,7 +105,9 @@ def test_JointFusion_outputs(): output_map = dict( out_atlas_voting_weight=dict(), out_intensity_fusion=dict(), - out_label_fusion=dict(extensions=None,), + out_label_fusion=dict( + extensions=None, + ), out_label_post_prob=dict(), ) outputs = JointFusion.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py index 94ce9e9abf..00c1ec53a9 100644 --- a/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py +++ b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py @@ -4,8 +4,13 @@ def test_KellyKapowski_inputs(): input_map = dict( - args=dict(argstr="%s",), - convergence=dict(argstr='--convergence "%s"', usedefault=True,), + args=dict( + argstr="%s", + ), + convergence=dict( + argstr='--convergence "%s"', + usedefault=True, + ), cortical_thickness=dict( argstr='--output "%s"', extensions=None, @@ -14,35 +19,61 @@ def test_KellyKapowski_inputs(): name_source=["segmentation_image"], name_template="%s_cortical_thickness", ), - dimension=dict(argstr="--image-dimensionality %d", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - gradient_step=dict(argstr="--gradient-step %f", usedefault=True,), - gray_matter_label=dict(usedefault=True,), + dimension=dict( + argstr="--image-dimensionality %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gradient_step=dict( + argstr="--gradient-step %f", + usedefault=True, + ), + gray_matter_label=dict( + usedefault=True, + ), gray_matter_prob_image=dict( - argstr='--gray-matter-probability-image "%s"', extensions=None, + argstr='--gray-matter-probability-image "%s"', + extensions=None, ), max_invert_displacement_field_iters=dict( argstr="--maximum-number-of-invert-displacement-field-iterations %d", usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True,), + num_threads=dict( + nohash=True, + usedefault=True, + ), number_integration_points=dict( - argstr="--number-of-integration-points %d", usedefault=True, + argstr="--number-of-integration-points %d", + usedefault=True, ), segmentation_image=dict( - argstr='--segmentation-image "%s"', extensions=None, mandatory=True, + argstr='--segmentation-image "%s"', + extensions=None, + mandatory=True, + ), + smoothing_variance=dict( + argstr="--smoothing-variance %f", + usedefault=True, ), - smoothing_variance=dict(argstr="--smoothing-variance %f", usedefault=True,), smoothing_velocity_field=dict( - argstr="--smoothing-velocity-field-parameter %f", usedefault=True, + argstr="--smoothing-velocity-field-parameter %f", + usedefault=True, ), thickness_prior_estimate=dict( - argstr="--thickness-prior-estimate %f", usedefault=True, + argstr="--thickness-prior-estimate %f", + usedefault=True, ), thickness_prior_image=dict( - argstr='--thickness-prior-image "%s"', extensions=None, + argstr='--thickness-prior-image "%s"', + extensions=None, + ), + use_bspline_smoothing=dict( + argstr="--use-bspline-smoothing 1", ), - use_bspline_smoothing=dict(argstr="--use-bspline-smoothing 1",), warped_white_matter=dict( extensions=None, hash_files=False, @@ -50,9 +81,12 @@ def test_KellyKapowski_inputs(): name_source=["segmentation_image"], name_template="%s_warped_white_matter", ), - white_matter_label=dict(usedefault=True,), + white_matter_label=dict( + usedefault=True, + ), white_matter_prob_image=dict( - argstr='--white-matter-probability-image "%s"', extensions=None, + argstr='--white-matter-probability-image "%s"', + extensions=None, ), ) inputs = KellyKapowski.input_spec() @@ -64,8 +98,12 @@ def test_KellyKapowski_inputs(): def test_KellyKapowski_outputs(): output_map = dict( - cortical_thickness=dict(extensions=None,), - warped_white_matter=dict(extensions=None,), + cortical_thickness=dict( + extensions=None, + ), + warped_white_matter=dict( + extensions=None, + ), ) outputs = KellyKapowski.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py b/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py index dfc4e0ff60..306c7aa17e 100644 --- a/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py +++ b/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py @@ -4,14 +4,35 @@ def test_LabelGeometry_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="%d", position=0, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), intensity_image=dict( - argstr="%s", extensions=None, mandatory=True, position=2, usedefault=True, + argstr="%s", + extensions=None, + mandatory=True, + position=2, + usedefault=True, + ), + label_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - label_image=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - num_threads=dict(nohash=True, usedefault=True,), output_file=dict( argstr="%s", name_source=["label_image"], @@ -27,7 +48,11 @@ def test_LabelGeometry_inputs(): def test_LabelGeometry_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = LabelGeometry.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py index 3b18ca5d0f..74c9ef076d 100644 --- a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py @@ -4,16 +4,36 @@ def test_LaplacianThickness_inputs(): input_map = dict( - args=dict(argstr="%s",), - dT=dict(argstr="%s", position=6, requires=["prior_thickness"],), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + dT=dict( + argstr="%s", + position=6, + requires=["prior_thickness"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), input_gm=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=2, + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=2, ), input_wm=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=1, + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=1, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True,), output_image=dict( argstr="%s", extensions=None, @@ -23,10 +43,25 @@ def test_LaplacianThickness_inputs(): name_template="%s_thickness", position=3, ), - prior_thickness=dict(argstr="%s", position=5, requires=["smooth_param"],), - smooth_param=dict(argstr="%s", position=4,), - sulcus_prior=dict(argstr="%s", position=7, requires=["dT"],), - tolerance=dict(argstr="%s", position=8, requires=["sulcus_prior"],), + prior_thickness=dict( + argstr="%s", + position=5, + requires=["smooth_param"], + ), + smooth_param=dict( + argstr="%s", + position=4, + ), + sulcus_prior=dict( + argstr="%s", + position=7, + requires=["dT"], + ), + tolerance=dict( + argstr="%s", + position=8, + requires=["sulcus_prior"], + ), ) inputs = LaplacianThickness.input_spec() @@ -36,7 +71,11 @@ def test_LaplacianThickness_inputs(): def test_LaplacianThickness_outputs(): - output_map = dict(output_image=dict(extensions=None,),) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = LaplacianThickness.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py index 2d15c49afa..05279d8017 100644 --- a/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py +++ b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py @@ -4,19 +4,57 @@ def test_MeasureImageSimilarity_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="--dimensionality %d", position=1,), - environ=dict(nohash=True, usedefault=True,), - fixed_image=dict(extensions=None, mandatory=True,), - fixed_image_mask=dict(argstr="%s", extensions=None,), - metric=dict(argstr="%s", mandatory=True,), - metric_weight=dict(requires=["metric"], usedefault=True,), - moving_image=dict(extensions=None, mandatory=True,), - moving_image_mask=dict(extensions=None, requires=["fixed_image_mask"],), - num_threads=dict(nohash=True, usedefault=True,), - radius_or_number_of_bins=dict(mandatory=True, requires=["metric"],), - sampling_percentage=dict(mandatory=True, requires=["metric"],), - sampling_strategy=dict(requires=["metric"], usedefault=True,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="--dimensionality %d", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + extensions=None, + mandatory=True, + ), + fixed_image_mask=dict( + argstr="%s", + extensions=None, + ), + metric=dict( + argstr="%s", + mandatory=True, + ), + metric_weight=dict( + requires=["metric"], + usedefault=True, + ), + moving_image=dict( + extensions=None, + mandatory=True, + ), + moving_image_mask=dict( + extensions=None, + requires=["fixed_image_mask"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + radius_or_number_of_bins=dict( + mandatory=True, + requires=["metric"], + ), + sampling_percentage=dict( + mandatory=True, + requires=["metric"], + ), + sampling_strategy=dict( + requires=["metric"], + usedefault=True, + ), ) inputs = MeasureImageSimilarity.input_spec() @@ -26,7 +64,9 @@ def test_MeasureImageSimilarity_inputs(): def test_MeasureImageSimilarity_outputs(): - output_map = dict(similarity=dict(),) + output_map = dict( + similarity=dict(), + ) outputs = MeasureImageSimilarity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py index 8a8d0958eb..89f8fa60ae 100644 --- a/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py +++ b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py @@ -4,15 +4,39 @@ def test_MultiplyImages_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="%d", mandatory=True, position=0,), - environ=dict(nohash=True, usedefault=True,), - first_input=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + mandatory=True, + position=0, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + first_input=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), output_product_image=dict( - argstr="%s", extensions=None, mandatory=True, position=3, + argstr="%s", + extensions=None, + mandatory=True, + position=3, + ), + second_input=dict( + argstr="%s", + mandatory=True, + position=2, ), - second_input=dict(argstr="%s", mandatory=True, position=2,), ) inputs = MultiplyImages.input_spec() @@ -22,7 +46,11 @@ def test_MultiplyImages_inputs(): def test_MultiplyImages_outputs(): - output_map = dict(output_product_image=dict(extensions=None,),) + output_map = dict( + output_product_image=dict( + extensions=None, + ), + ) outputs = MultiplyImages.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py index 2426660455..59775df2ea 100644 --- a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py +++ b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py @@ -4,19 +4,53 @@ def test_N4BiasFieldCorrection_inputs(): input_map = dict( - args=dict(argstr="%s",), - bias_image=dict(extensions=None, hash_files=False,), - bspline_fitting_distance=dict(argstr="--bspline-fitting %s",), - bspline_order=dict(requires=["bspline_fitting_distance"],), - convergence_threshold=dict(requires=["n_iterations"],), - copy_header=dict(mandatory=True, usedefault=True,), - dimension=dict(argstr="-d %d", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - histogram_sharpening=dict(argstr="--histogram-sharpening [%g,%g,%d]",), - input_image=dict(argstr="--input-image %s", extensions=None, mandatory=True,), - mask_image=dict(argstr="--mask-image %s", extensions=None,), - n_iterations=dict(argstr="--convergence %s",), - num_threads=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + bias_image=dict( + extensions=None, + hash_files=False, + ), + bspline_fitting_distance=dict( + argstr="--bspline-fitting %s", + ), + bspline_order=dict( + requires=["bspline_fitting_distance"], + ), + convergence_threshold=dict( + requires=["n_iterations"], + ), + copy_header=dict( + mandatory=True, + usedefault=True, + ), + dimension=dict( + argstr="-d %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + histogram_sharpening=dict( + argstr="--histogram-sharpening [%g,%g,%d]", + ), + input_image=dict( + argstr="--input-image %s", + extensions=None, + mandatory=True, + ), + mask_image=dict( + argstr="--mask-image %s", + extensions=None, + ), + n_iterations=dict( + argstr="--convergence %s", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), output_image=dict( argstr="--output %s", hash_files=False, @@ -24,10 +58,23 @@ def test_N4BiasFieldCorrection_inputs(): name_source=["input_image"], name_template="%s_corrected", ), - rescale_intensities=dict(argstr="-r", min_ver="2.1.0", usedefault=True,), - save_bias=dict(mandatory=True, usedefault=True, xor=["bias_image"],), - shrink_factor=dict(argstr="--shrink-factor %d",), - weight_image=dict(argstr="--weight-image %s", extensions=None,), + rescale_intensities=dict( + argstr="-r", + min_ver="2.1.0", + usedefault=True, + ), + save_bias=dict( + mandatory=True, + usedefault=True, + xor=["bias_image"], + ), + shrink_factor=dict( + argstr="--shrink-factor %d", + ), + weight_image=dict( + argstr="--weight-image %s", + extensions=None, + ), ) inputs = N4BiasFieldCorrection.input_spec() @@ -38,7 +85,12 @@ def test_N4BiasFieldCorrection_inputs(): def test_N4BiasFieldCorrection_outputs(): output_map = dict( - bias_image=dict(extensions=None,), output_image=dict(extensions=None,), + bias_image=dict( + extensions=None, + ), + output_image=dict( + extensions=None, + ), ) outputs = N4BiasFieldCorrection.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_Registration.py b/nipype/interfaces/ants/tests/test_auto_Registration.py index 151a852820..7bc7c04a15 100644 --- a/nipype/interfaces/ants/tests/test_auto_Registration.py +++ b/nipype/interfaces/ants/tests/test_auto_Registration.py @@ -4,81 +4,169 @@ def test_Registration_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), collapse_output_transforms=dict( - argstr="--collapse-output-transforms %d", usedefault=True, + argstr="--collapse-output-transforms %d", + usedefault=True, + ), + convergence_threshold=dict( + requires=["number_of_iterations"], + usedefault=True, ), - convergence_threshold=dict(requires=["number_of_iterations"], usedefault=True,), convergence_window_size=dict( - requires=["convergence_threshold"], usedefault=True, + requires=["convergence_threshold"], + usedefault=True, + ), + dimension=dict( + argstr="--dimensionality %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + mandatory=True, ), - dimension=dict(argstr="--dimensionality %d", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - fixed_image=dict(mandatory=True,), fixed_image_mask=dict( - argstr="%s", extensions=None, max_ver="2.1.0", xor=["fixed_image_masks"], + argstr="%s", + extensions=None, + max_ver="2.1.0", + xor=["fixed_image_masks"], + ), + fixed_image_masks=dict( + min_ver="2.2.0", + xor=["fixed_image_mask"], + ), + float=dict( + argstr="--float %d", ), - fixed_image_masks=dict(min_ver="2.2.0", xor=["fixed_image_mask"],), - float=dict(argstr="--float %d",), initial_moving_transform=dict( - argstr="%s", xor=["initial_moving_transform_com"], + argstr="%s", + xor=["initial_moving_transform_com"], ), initial_moving_transform_com=dict( - argstr="%s", xor=["initial_moving_transform"], + argstr="%s", + xor=["initial_moving_transform"], ), initialize_transforms_per_stage=dict( - argstr="--initialize-transforms-per-stage %d", usedefault=True, + argstr="--initialize-transforms-per-stage %d", + usedefault=True, + ), + interpolation=dict( + argstr="%s", + usedefault=True, ), - interpolation=dict(argstr="%s", usedefault=True,), interpolation_parameters=dict(), invert_initial_moving_transform=dict( - requires=["initial_moving_transform"], xor=["initial_moving_transform_com"], + requires=["initial_moving_transform"], + xor=["initial_moving_transform_com"], + ), + metric=dict( + mandatory=True, ), - metric=dict(mandatory=True,), metric_item_trait=dict(), metric_stage_trait=dict(), - metric_weight=dict(mandatory=True, requires=["metric"], usedefault=True,), - metric_weight_item_trait=dict(usedefault=True,), + metric_weight=dict( + mandatory=True, + requires=["metric"], + usedefault=True, + ), + metric_weight_item_trait=dict( + usedefault=True, + ), metric_weight_stage_trait=dict(), - moving_image=dict(mandatory=True,), + moving_image=dict( + mandatory=True, + ), moving_image_mask=dict( extensions=None, max_ver="2.1.0", requires=["fixed_image_mask"], xor=["moving_image_masks"], ), - moving_image_masks=dict(min_ver="2.2.0", xor=["moving_image_mask"],), - num_threads=dict(nohash=True, usedefault=True,), + moving_image_masks=dict( + min_ver="2.2.0", + xor=["moving_image_mask"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), number_of_iterations=dict(), output_inverse_warped_image=dict( - hash_files=False, requires=["output_warped_image"], + hash_files=False, + requires=["output_warped_image"], + ), + output_transform_prefix=dict( + argstr="%s", + usedefault=True, + ), + output_warped_image=dict( + hash_files=False, + ), + radius_bins_item_trait=dict( + usedefault=True, ), - output_transform_prefix=dict(argstr="%s", usedefault=True,), - output_warped_image=dict(hash_files=False,), - radius_bins_item_trait=dict(usedefault=True,), radius_bins_stage_trait=dict(), - radius_or_number_of_bins=dict(requires=["metric_weight"], usedefault=True,), - restore_state=dict(argstr="--restore-state %s", extensions=None,), + radius_or_number_of_bins=dict( + requires=["metric_weight"], + usedefault=True, + ), + restore_state=dict( + argstr="--restore-state %s", + extensions=None, + ), restrict_deformation=dict(), - sampling_percentage=dict(requires=["sampling_strategy"],), + sampling_percentage=dict( + requires=["sampling_strategy"], + ), sampling_percentage_item_trait=dict(), sampling_percentage_stage_trait=dict(), - sampling_strategy=dict(requires=["metric_weight"],), + sampling_strategy=dict( + requires=["metric_weight"], + ), sampling_strategy_item_trait=dict(), sampling_strategy_stage_trait=dict(), - save_state=dict(argstr="--save-state %s", extensions=None,), - shrink_factors=dict(mandatory=True,), - sigma_units=dict(requires=["smoothing_sigmas"],), - smoothing_sigmas=dict(mandatory=True,), + save_state=dict( + argstr="--save-state %s", + extensions=None, + ), + shrink_factors=dict( + mandatory=True, + ), + sigma_units=dict( + requires=["smoothing_sigmas"], + ), + smoothing_sigmas=dict( + mandatory=True, + ), transform_parameters=dict(), - transforms=dict(argstr="%s", mandatory=True,), + transforms=dict( + argstr="%s", + mandatory=True, + ), use_estimate_learning_rate_once=dict(), - use_histogram_matching=dict(usedefault=True,), - verbose=dict(argstr="-v", usedefault=True,), - winsorize_lower_quantile=dict(argstr="%s", usedefault=True,), - winsorize_upper_quantile=dict(argstr="%s", usedefault=True,), + use_histogram_matching=dict( + usedefault=True, + ), + verbose=dict( + argstr="-v", + usedefault=True, + ), + winsorize_lower_quantile=dict( + argstr="%s", + usedefault=True, + ), + winsorize_upper_quantile=dict( + argstr="%s", + usedefault=True, + ), write_composite_transform=dict( - argstr="--write-composite-transform %d", usedefault=True, + argstr="--write-composite-transform %d", + usedefault=True, ), ) inputs = Registration.input_spec() @@ -90,19 +178,29 @@ def test_Registration_inputs(): def test_Registration_outputs(): output_map = dict( - composite_transform=dict(extensions=None,), + composite_transform=dict( + extensions=None, + ), elapsed_time=dict(), forward_invert_flags=dict(), forward_transforms=dict(), - inverse_composite_transform=dict(extensions=None,), - inverse_warped_image=dict(extensions=None,), + inverse_composite_transform=dict( + extensions=None, + ), + inverse_warped_image=dict( + extensions=None, + ), metric_value=dict(), reverse_forward_invert_flags=dict(), reverse_forward_transforms=dict(), reverse_invert_flags=dict(), reverse_transforms=dict(), - save_state=dict(extensions=None,), - warped_image=dict(extensions=None,), + save_state=dict( + extensions=None, + ), + warped_image=dict( + extensions=None, + ), ) outputs = Registration.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py b/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py index 3bc1b8aa06..c09f08d17a 100644 --- a/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py +++ b/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py @@ -4,18 +4,52 @@ def test_RegistrationSynQuick_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="-d %d", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - fixed_image=dict(argstr="-f %s...", mandatory=True,), - histogram_bins=dict(argstr="-r %d", usedefault=True,), - moving_image=dict(argstr="-m %s...", mandatory=True,), - num_threads=dict(argstr="-n %d", usedefault=True,), - output_prefix=dict(argstr="-o %s", usedefault=True,), - precision_type=dict(argstr="-p %s", usedefault=True,), - spline_distance=dict(argstr="-s %d", usedefault=True,), - transform_type=dict(argstr="-t %s", usedefault=True,), - use_histogram_matching=dict(argstr="-j %d",), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-d %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + argstr="-f %s...", + mandatory=True, + ), + histogram_bins=dict( + argstr="-r %d", + usedefault=True, + ), + moving_image=dict( + argstr="-m %s...", + mandatory=True, + ), + num_threads=dict( + argstr="-n %d", + usedefault=True, + ), + output_prefix=dict( + argstr="-o %s", + usedefault=True, + ), + precision_type=dict( + argstr="-p %s", + usedefault=True, + ), + spline_distance=dict( + argstr="-s %d", + usedefault=True, + ), + transform_type=dict( + argstr="-t %s", + usedefault=True, + ), + use_histogram_matching=dict( + argstr="-j %d", + ), ) inputs = RegistrationSynQuick.input_spec() @@ -26,11 +60,21 @@ def test_RegistrationSynQuick_inputs(): def test_RegistrationSynQuick_outputs(): output_map = dict( - forward_warp_field=dict(extensions=None,), - inverse_warp_field=dict(extensions=None,), - inverse_warped_image=dict(extensions=None,), - out_matrix=dict(extensions=None,), - warped_image=dict(extensions=None,), + forward_warp_field=dict( + extensions=None, + ), + inverse_warp_field=dict( + extensions=None, + ), + inverse_warped_image=dict( + extensions=None, + ), + out_matrix=dict( + extensions=None, + ), + warped_image=dict( + extensions=None, + ), ) outputs = RegistrationSynQuick.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py b/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py index a2834bf36c..9bf5c6c6a6 100644 --- a/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py +++ b/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py @@ -4,15 +4,47 @@ def test_ResampleImageBySpacing_inputs(): input_map = dict( - addvox=dict(argstr="%d", position=6, requires=["apply_smoothing"],), - apply_smoothing=dict(argstr="%d", position=5,), - args=dict(argstr="%s",), - dimension=dict(argstr="%d", position=1, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - input_image=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - nn_interp=dict(argstr="%d", position=-1, requires=["addvox"],), - num_threads=dict(nohash=True, usedefault=True,), - out_spacing=dict(argstr="%s", mandatory=True, position=4,), + addvox=dict( + argstr="%d", + position=6, + requires=["apply_smoothing"], + ), + apply_smoothing=dict( + argstr="%d", + position=5, + ), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + nn_interp=dict( + argstr="%d", + position=-1, + requires=["addvox"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_spacing=dict( + argstr="%s", + mandatory=True, + position=4, + ), output_image=dict( argstr="%s", extensions=None, @@ -30,7 +62,11 @@ def test_ResampleImageBySpacing_inputs(): def test_ResampleImageBySpacing_outputs(): - output_map = dict(output_image=dict(extensions=None,),) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = ResampleImageBySpacing.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_ThresholdImage.py b/nipype/interfaces/ants/tests/test_auto_ThresholdImage.py index c8e399f644..c17f340b69 100644 --- a/nipype/interfaces/ants/tests/test_auto_ThresholdImage.py +++ b/nipype/interfaces/ants/tests/test_auto_ThresholdImage.py @@ -4,21 +4,52 @@ def test_ThresholdImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - copy_header=dict(mandatory=True, usedefault=True,), - dimension=dict(argstr="%d", position=1, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - input_image=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - input_mask=dict(argstr="%s", extensions=None, requires=["num_thresholds"],), - inside_value=dict(argstr="%f", position=6, requires=["th_low"],), + args=dict( + argstr="%s", + ), + copy_header=dict( + mandatory=True, + usedefault=True, + ), + dimension=dict( + argstr="%d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + input_mask=dict( + argstr="%s", + extensions=None, + requires=["num_thresholds"], + ), + inside_value=dict( + argstr="%f", + position=6, + requires=["th_low"], + ), mode=dict( argstr="%s", position=4, requires=["num_thresholds"], xor=["th_low", "th_high"], ), - num_threads=dict(nohash=True, usedefault=True,), - num_thresholds=dict(argstr="%d", position=5,), + num_threads=dict( + nohash=True, + usedefault=True, + ), + num_thresholds=dict( + argstr="%d", + position=5, + ), output_image=dict( argstr="%s", extensions=None, @@ -27,9 +58,21 @@ def test_ThresholdImage_inputs(): name_template="%s_resampled", position=3, ), - outside_value=dict(argstr="%f", position=7, requires=["th_low"],), - th_high=dict(argstr="%f", position=5, xor=["mode"],), - th_low=dict(argstr="%f", position=4, xor=["mode"],), + outside_value=dict( + argstr="%f", + position=7, + requires=["th_low"], + ), + th_high=dict( + argstr="%f", + position=5, + xor=["mode"], + ), + th_low=dict( + argstr="%f", + position=4, + xor=["mode"], + ), ) inputs = ThresholdImage.input_spec() @@ -39,7 +82,11 @@ def test_ThresholdImage_inputs(): def test_ThresholdImage_outputs(): - output_map = dict(output_image=dict(extensions=None,),) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = ThresholdImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py index b9acca1442..66f45d0cd7 100644 --- a/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py @@ -4,14 +4,34 @@ def test_WarpImageMultiTransform_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="%d", position=1, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - input_image=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), invert_affine=dict(), - num_threads=dict(nohash=True, usedefault=True,), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_postfix=dict( - extensions=None, hash_files=False, usedefault=True, xor=["output_image"], + extensions=None, + hash_files=False, + usedefault=True, + xor=["output_image"], ), output_image=dict( argstr="%s", @@ -21,12 +41,29 @@ def test_WarpImageMultiTransform_inputs(): position=3, xor=["out_postfix"], ), - reference_image=dict(argstr="-R %s", extensions=None, xor=["tightest_box"],), - reslice_by_header=dict(argstr="--reslice-by-header",), - tightest_box=dict(argstr="--tightest-bounding-box", xor=["reference_image"],), - transformation_series=dict(argstr="%s", mandatory=True, position=-1,), - use_bspline=dict(argstr="--use-BSpline",), - use_nearest=dict(argstr="--use-NN",), + reference_image=dict( + argstr="-R %s", + extensions=None, + xor=["tightest_box"], + ), + reslice_by_header=dict( + argstr="--reslice-by-header", + ), + tightest_box=dict( + argstr="--tightest-bounding-box", + xor=["reference_image"], + ), + transformation_series=dict( + argstr="%s", + mandatory=True, + position=-1, + ), + use_bspline=dict( + argstr="--use-BSpline", + ), + use_nearest=dict( + argstr="--use-NN", + ), ) inputs = WarpImageMultiTransform.input_spec() @@ -36,7 +73,11 @@ def test_WarpImageMultiTransform_inputs(): def test_WarpImageMultiTransform_outputs(): - output_map = dict(output_image=dict(extensions=None,),) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = WarpImageMultiTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py index e95d70c9ac..63d8d8365e 100644 --- a/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py @@ -4,19 +4,56 @@ def test_WarpTimeSeriesImageMultiTransform_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="%d", position=1, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - input_image=dict(argstr="%s", copyfile=True, extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + ), invert_affine=dict(), - num_threads=dict(nohash=True, usedefault=True,), - out_postfix=dict(argstr="%s", usedefault=True,), - reference_image=dict(argstr="-R %s", extensions=None, xor=["tightest_box"],), - reslice_by_header=dict(argstr="--reslice-by-header",), - tightest_box=dict(argstr="--tightest-bounding-box", xor=["reference_image"],), - transformation_series=dict(argstr="%s", copyfile=False, mandatory=True,), - use_bspline=dict(argstr="--use-Bspline",), - use_nearest=dict(argstr="--use-NN",), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_postfix=dict( + argstr="%s", + usedefault=True, + ), + reference_image=dict( + argstr="-R %s", + extensions=None, + xor=["tightest_box"], + ), + reslice_by_header=dict( + argstr="--reslice-by-header", + ), + tightest_box=dict( + argstr="--tightest-bounding-box", + xor=["reference_image"], + ), + transformation_series=dict( + argstr="%s", + copyfile=False, + mandatory=True, + ), + use_bspline=dict( + argstr="--use-Bspline", + ), + use_nearest=dict( + argstr="--use-NN", + ), ) inputs = WarpTimeSeriesImageMultiTransform.input_spec() @@ -26,7 +63,11 @@ def test_WarpTimeSeriesImageMultiTransform_inputs(): def test_WarpTimeSeriesImageMultiTransform_outputs(): - output_map = dict(output_image=dict(extensions=None,),) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = WarpTimeSeriesImageMultiTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py b/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py index d7950f38b5..0318db7624 100644 --- a/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py +++ b/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py @@ -4,24 +4,61 @@ def test_antsIntroduction_inputs(): input_map = dict( - args=dict(argstr="%s",), - bias_field_correction=dict(argstr="-n 1",), - dimension=dict(argstr="-d %d", position=1, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - force_proceed=dict(argstr="-f 1",), + args=dict( + argstr="%s", + ), + bias_field_correction=dict( + argstr="-n 1", + ), + dimension=dict( + argstr="-d %d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + force_proceed=dict( + argstr="-f 1", + ), input_image=dict( - argstr="-i %s", copyfile=False, extensions=None, mandatory=True, + argstr="-i %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + inverse_warp_template_labels=dict( + argstr="-l", + ), + max_iterations=dict( + argstr="-m %s", + sep="x", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_prefix=dict( + argstr="-o %s", + usedefault=True, + ), + quality_check=dict( + argstr="-q 1", ), - inverse_warp_template_labels=dict(argstr="-l",), - max_iterations=dict(argstr="-m %s", sep="x",), - num_threads=dict(nohash=True, usedefault=True,), - out_prefix=dict(argstr="-o %s", usedefault=True,), - quality_check=dict(argstr="-q 1",), reference_image=dict( - argstr="-r %s", copyfile=True, extensions=None, mandatory=True, + argstr="-r %s", + copyfile=True, + extensions=None, + mandatory=True, + ), + similarity_metric=dict( + argstr="-s %s", + ), + transformation_model=dict( + argstr="-t %s", + usedefault=True, ), - similarity_metric=dict(argstr="-s %s",), - transformation_model=dict(argstr="-t %s", usedefault=True,), ) inputs = antsIntroduction.input_spec() @@ -32,11 +69,21 @@ def test_antsIntroduction_inputs(): def test_antsIntroduction_outputs(): output_map = dict( - affine_transformation=dict(extensions=None,), - input_file=dict(extensions=None,), - inverse_warp_field=dict(extensions=None,), - output_file=dict(extensions=None,), - warp_field=dict(extensions=None,), + affine_transformation=dict( + extensions=None, + ), + input_file=dict( + extensions=None, + ), + inverse_warp_field=dict( + extensions=None, + ), + output_file=dict( + extensions=None, + ), + warp_field=dict( + extensions=None, + ), ) outputs = antsIntroduction.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py b/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py index 35b225949b..2713c6af54 100644 --- a/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py +++ b/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py @@ -4,21 +4,63 @@ def test_buildtemplateparallel_inputs(): input_map = dict( - args=dict(argstr="%s",), - bias_field_correction=dict(argstr="-n 1",), - dimension=dict(argstr="-d %d", position=1, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - gradient_step_size=dict(argstr="-g %f",), - in_files=dict(argstr="%s", mandatory=True, position=-1,), - iteration_limit=dict(argstr="-i %d", usedefault=True,), - max_iterations=dict(argstr="-m %s", sep="x",), - num_cores=dict(argstr="-j %d", requires=["parallelization"],), - num_threads=dict(nohash=True, usedefault=True,), - out_prefix=dict(argstr="-o %s", usedefault=True,), - parallelization=dict(argstr="-c %d", usedefault=True,), - rigid_body_registration=dict(argstr="-r 1",), - similarity_metric=dict(argstr="-s %s",), - transformation_model=dict(argstr="-t %s", usedefault=True,), + args=dict( + argstr="%s", + ), + bias_field_correction=dict( + argstr="-n 1", + ), + dimension=dict( + argstr="-d %d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gradient_step_size=dict( + argstr="-g %f", + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=-1, + ), + iteration_limit=dict( + argstr="-i %d", + usedefault=True, + ), + max_iterations=dict( + argstr="-m %s", + sep="x", + ), + num_cores=dict( + argstr="-j %d", + requires=["parallelization"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_prefix=dict( + argstr="-o %s", + usedefault=True, + ), + parallelization=dict( + argstr="-c %d", + usedefault=True, + ), + rigid_body_registration=dict( + argstr="-r 1", + ), + similarity_metric=dict( + argstr="-s %s", + ), + transformation_model=dict( + argstr="-t %s", + usedefault=True, + ), use_first_as_target=dict(), ) inputs = buildtemplateparallel.input_spec() @@ -30,7 +72,9 @@ def test_buildtemplateparallel_inputs(): def test_buildtemplateparallel_outputs(): output_map = dict( - final_template_file=dict(extensions=None,), + final_template_file=dict( + extensions=None, + ), subject_outfiles=dict(), template_files=dict(), ) diff --git a/nipype/interfaces/base/tests/test_auto_CommandLine.py b/nipype/interfaces/base/tests/test_auto_CommandLine.py index b003543a3e..b03e4adfca 100644 --- a/nipype/interfaces/base/tests/test_auto_CommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_CommandLine.py @@ -4,7 +4,13 @@ def test_CommandLine_inputs(): input_map = dict( - args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), ) inputs = CommandLine.input_spec() diff --git a/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py b/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py index 7ab181458f..908943c754 100644 --- a/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py @@ -4,10 +4,17 @@ def test_MpiCommandLine_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), n_procs=dict(), - use_mpi=dict(usedefault=True,), + use_mpi=dict( + usedefault=True, + ), ) inputs = MpiCommandLine.input_spec() diff --git a/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py b/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py index e17eed4db1..1197b2479c 100644 --- a/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py @@ -4,7 +4,13 @@ def test_SEMLikeCommandLine_inputs(): input_map = dict( - args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), ) inputs = SEMLikeCommandLine.input_spec() diff --git a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py index bc0771ac78..39b80d487b 100644 --- a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py @@ -4,9 +4,19 @@ def test_StdOutCommandLine_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), ) inputs = StdOutCommandLine.input_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_BDP.py b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py index 94f95c5c2a..0da29c372d 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_BDP.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py @@ -10,7 +10,9 @@ def test_BDP_inputs(): position=-1, xor=["bMatrixFile"], ), - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), bMatrixFile=dict( argstr="--bmat %s", extensions=None, @@ -18,7 +20,9 @@ def test_BDP_inputs(): position=-1, xor=["BVecBValPair"], ), - bValRatioThreshold=dict(argstr="--bval-ratio-threshold %f",), + bValRatioThreshold=dict( + argstr="--bval-ratio-threshold %f", + ), bfcFile=dict( argstr="%s", extensions=None, @@ -27,68 +31,152 @@ def test_BDP_inputs(): xor=["noStructuralRegistration"], ), customDiffusionLabel=dict( - argstr="--custom-diffusion-label %s", extensions=None, - ), - customLabelXML=dict(argstr="--custom-label-xml %s", extensions=None,), - customT1Label=dict(argstr="--custom-t1-label %s", extensions=None,), - dataSinkDelay=dict(argstr="%s",), - dcorrRegMeasure=dict(argstr="--dcorr-reg-method %s",), - dcorrWeight=dict(argstr="--dcorr-regularization-wt %f",), - dwiMask=dict(argstr="--dwi-mask %s", extensions=None,), - echoSpacing=dict(argstr="--echo-spacing=%f",), - environ=dict(nohash=True, usedefault=True,), - estimateODF_3DShore=dict(argstr="--3dshore --diffusion_time_ms %f",), - estimateODF_FRACT=dict(argstr="--FRACT",), - estimateODF_FRT=dict(argstr="--FRT",), - estimateTensors=dict(argstr="--tensors",), + argstr="--custom-diffusion-label %s", + extensions=None, + ), + customLabelXML=dict( + argstr="--custom-label-xml %s", + extensions=None, + ), + customT1Label=dict( + argstr="--custom-t1-label %s", + extensions=None, + ), + dataSinkDelay=dict( + argstr="%s", + ), + dcorrRegMeasure=dict( + argstr="--dcorr-reg-method %s", + ), + dcorrWeight=dict( + argstr="--dcorr-regularization-wt %f", + ), + dwiMask=dict( + argstr="--dwi-mask %s", + extensions=None, + ), + echoSpacing=dict( + argstr="--echo-spacing=%f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + estimateODF_3DShore=dict( + argstr="--3dshore --diffusion_time_ms %f", + ), + estimateODF_FRACT=dict( + argstr="--FRACT", + ), + estimateODF_FRT=dict( + argstr="--FRT", + ), + estimateTensors=dict( + argstr="--tensors", + ), fieldmapCorrection=dict( argstr="--fieldmap-correction %s", extensions=None, requires=["echoSpacing"], ), fieldmapCorrectionMethod=dict( - argstr="--fieldmap-correction-method %s", xor=["skipIntensityCorr"], - ), - fieldmapSmooth=dict(argstr="--fieldmap-smooth3=%f",), - flagConfigFile=dict(argstr="--flag-conf-file %s", extensions=None,), - forcePartialROIStats=dict(argstr="--force-partial-roi-stats",), - generateStats=dict(argstr="--generate-stats",), - ignoreFieldmapFOV=dict(argstr="--ignore-fieldmap-fov",), - ignoreMemory=dict(argstr="--ignore-memory",), + argstr="--fieldmap-correction-method %s", + xor=["skipIntensityCorr"], + ), + fieldmapSmooth=dict( + argstr="--fieldmap-smooth3=%f", + ), + flagConfigFile=dict( + argstr="--flag-conf-file %s", + extensions=None, + ), + forcePartialROIStats=dict( + argstr="--force-partial-roi-stats", + ), + generateStats=dict( + argstr="--generate-stats", + ), + ignoreFieldmapFOV=dict( + argstr="--ignore-fieldmap-fov", + ), + ignoreMemory=dict( + argstr="--ignore-memory", + ), inputDiffusionData=dict( - argstr="--nii %s", extensions=None, mandatory=True, position=-2, + argstr="--nii %s", + extensions=None, + mandatory=True, + position=-2, + ), + lowMemory=dict( + argstr="--low-memory", ), - lowMemory=dict(argstr="--low-memory",), noStructuralRegistration=dict( argstr="--no-structural-registration", mandatory=True, position=0, xor=["bfcFile"], ), - odfLambta=dict(argstr="--odf-lambda ",), - onlyStats=dict(argstr="--generate-only-stats",), - outPrefix=dict(argstr="--output-fileprefix %s",), - outputDiffusionCoordinates=dict(argstr="--output-diffusion-coordinate",), - outputSubdir=dict(argstr="--output-subdir %s",), - phaseEncodingDirection=dict(argstr="--dir=%s",), - rigidRegMeasure=dict(argstr="--rigid-reg-measure %s",), - skipDistortionCorr=dict(argstr="--no-distortion-correction",), + odfLambta=dict( + argstr="--odf-lambda ", + ), + onlyStats=dict( + argstr="--generate-only-stats", + ), + outPrefix=dict( + argstr="--output-fileprefix %s", + ), + outputDiffusionCoordinates=dict( + argstr="--output-diffusion-coordinate", + ), + outputSubdir=dict( + argstr="--output-subdir %s", + ), + phaseEncodingDirection=dict( + argstr="--dir=%s", + ), + rigidRegMeasure=dict( + argstr="--rigid-reg-measure %s", + ), + skipDistortionCorr=dict( + argstr="--no-distortion-correction", + ), skipIntensityCorr=dict( - argstr="--no-intensity-correction", xor=["fieldmapCorrectionMethod"], + argstr="--no-intensity-correction", + xor=["fieldmapCorrectionMethod"], + ), + skipNonuniformityCorr=dict( + argstr="--no-nonuniformity-correction", + ), + t1Mask=dict( + argstr="--t1-mask %s", + extensions=None, + ), + threads=dict( + argstr="--threads=%d", + ), + transformDataOnly=dict( + argstr="--transform-data-only", ), - skipNonuniformityCorr=dict(argstr="--no-nonuniformity-correction",), - t1Mask=dict(argstr="--t1-mask %s", extensions=None,), - threads=dict(argstr="--threads=%d",), - transformDataOnly=dict(argstr="--transform-data-only",), transformDiffusionSurface=dict( - argstr="--transform-diffusion-surface %s", extensions=None, + argstr="--transform-diffusion-surface %s", + extensions=None, ), transformDiffusionVolume=dict( - argstr="--transform-diffusion-volume %s", extensions=None, + argstr="--transform-diffusion-volume %s", + extensions=None, + ), + transformInterpolation=dict( + argstr="--transform-interpolation %s", + ), + transformT1Surface=dict( + argstr="--transform-t1-surface %s", + extensions=None, + ), + transformT1Volume=dict( + argstr="--transform-t1-volume %s", + extensions=None, ), - transformInterpolation=dict(argstr="--transform-interpolation %s",), - transformT1Surface=dict(argstr="--transform-t1-surface %s", extensions=None,), - transformT1Volume=dict(argstr="--transform-t1-volume %s", extensions=None,), ) inputs = BDP.input_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py index 5e2588fd74..dbb1f3d839 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py @@ -4,35 +4,96 @@ def test_Bfc_inputs(): input_map = dict( - args=dict(argstr="%s",), - biasEstimateConvergenceThreshold=dict(argstr="--beps %f",), - biasEstimateSpacing=dict(argstr="-s %d",), - biasFieldEstimatesOutputPrefix=dict(argstr="--biasprefix %s",), - biasRange=dict(argstr="%s",), - controlPointSpacing=dict(argstr="-c %d",), - convergenceThreshold=dict(argstr="--eps %f",), - correctWholeVolume=dict(argstr="--extrapolate",), - correctedImagesOutputPrefix=dict(argstr="--prefix %s",), - correctionScheduleFile=dict(argstr="--schedule %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - histogramRadius=dict(argstr="-r %d",), - histogramType=dict(argstr="%s",), - inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True,), - inputMaskFile=dict(argstr="-m %s", extensions=None, hash_files=False,), - intermediate_file_type=dict(argstr="%s",), - iterativeMode=dict(argstr="--iterate",), - maxBias=dict(argstr="-U %f", usedefault=True,), - minBias=dict(argstr="-L %f", usedefault=True,), - outputBiasField=dict(argstr="--bias %s", extensions=None, hash_files=False,), + args=dict( + argstr="%s", + ), + biasEstimateConvergenceThreshold=dict( + argstr="--beps %f", + ), + biasEstimateSpacing=dict( + argstr="-s %d", + ), + biasFieldEstimatesOutputPrefix=dict( + argstr="--biasprefix %s", + ), + biasRange=dict( + argstr="%s", + ), + controlPointSpacing=dict( + argstr="-c %d", + ), + convergenceThreshold=dict( + argstr="--eps %f", + ), + correctWholeVolume=dict( + argstr="--extrapolate", + ), + correctedImagesOutputPrefix=dict( + argstr="--prefix %s", + ), + correctionScheduleFile=dict( + argstr="--schedule %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + histogramRadius=dict( + argstr="-r %d", + ), + histogramType=dict( + argstr="%s", + ), + inputMRIFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + inputMaskFile=dict( + argstr="-m %s", + extensions=None, + hash_files=False, + ), + intermediate_file_type=dict( + argstr="%s", + ), + iterativeMode=dict( + argstr="--iterate", + ), + maxBias=dict( + argstr="-U %f", + usedefault=True, + ), + minBias=dict( + argstr="-L %f", + usedefault=True, + ), + outputBiasField=dict( + argstr="--bias %s", + extensions=None, + hash_files=False, + ), outputMRIVolume=dict( - argstr="-o %s", extensions=None, genfile=True, hash_files=False, + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, ), outputMaskedBiasField=dict( - argstr="--maskedbias %s", extensions=None, hash_files=False, + argstr="--maskedbias %s", + extensions=None, + hash_files=False, + ), + splineLambda=dict( + argstr="-w %f", + ), + timer=dict( + argstr="--timer", + ), + verbosityLevel=dict( + argstr="-v %d", ), - splineLambda=dict(argstr="-w %f",), - timer=dict(argstr="--timer",), - verbosityLevel=dict(argstr="-v %d",), ) inputs = Bfc.input_spec() @@ -43,10 +104,18 @@ def test_Bfc_inputs(): def test_Bfc_outputs(): output_map = dict( - correctionScheduleFile=dict(extensions=None,), - outputBiasField=dict(extensions=None,), - outputMRIVolume=dict(extensions=None,), - outputMaskedBiasField=dict(extensions=None,), + correctionScheduleFile=dict( + extensions=None, + ), + outputBiasField=dict( + extensions=None, + ), + outputMRIVolume=dict( + extensions=None, + ), + outputMaskedBiasField=dict( + extensions=None, + ), ) outputs = Bfc.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bse.py b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py index a980010ef0..748defcc00 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Bse.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py @@ -4,32 +4,84 @@ def test_Bse_inputs(): input_map = dict( - args=dict(argstr="%s",), - diffusionConstant=dict(argstr="-d %f", usedefault=True,), - diffusionIterations=dict(argstr="-n %d", usedefault=True,), - dilateFinalMask=dict(argstr="-p", usedefault=True,), - edgeDetectionConstant=dict(argstr="-s %f", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True,), - noRotate=dict(argstr="--norotate",), - outputCortexFile=dict(argstr="--cortex %s", extensions=None, hash_files=False,), + args=dict( + argstr="%s", + ), + diffusionConstant=dict( + argstr="-d %f", + usedefault=True, + ), + diffusionIterations=dict( + argstr="-n %d", + usedefault=True, + ), + dilateFinalMask=dict( + argstr="-p", + usedefault=True, + ), + edgeDetectionConstant=dict( + argstr="-s %f", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMRIFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + noRotate=dict( + argstr="--norotate", + ), + outputCortexFile=dict( + argstr="--cortex %s", + extensions=None, + hash_files=False, + ), outputDetailedBrainMask=dict( - argstr="--hires %s", extensions=None, hash_files=False, + argstr="--hires %s", + extensions=None, + hash_files=False, ), outputDiffusionFilter=dict( - argstr="--adf %s", extensions=None, hash_files=False, + argstr="--adf %s", + extensions=None, + hash_files=False, + ), + outputEdgeMap=dict( + argstr="--edge %s", + extensions=None, + hash_files=False, ), - outputEdgeMap=dict(argstr="--edge %s", extensions=None, hash_files=False,), outputMRIVolume=dict( - argstr="-o %s", extensions=None, genfile=True, hash_files=False, + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, ), outputMaskFile=dict( - argstr="--mask %s", extensions=None, genfile=True, hash_files=False, + argstr="--mask %s", + extensions=None, + genfile=True, + hash_files=False, + ), + radius=dict( + argstr="-r %f", + usedefault=True, + ), + timer=dict( + argstr="--timer", + ), + trim=dict( + argstr="--trim", + usedefault=True, + ), + verbosityLevel=dict( + argstr="-v %f", + usedefault=True, ), - radius=dict(argstr="-r %f", usedefault=True,), - timer=dict(argstr="--timer",), - trim=dict(argstr="--trim", usedefault=True,), - verbosityLevel=dict(argstr="-v %f", usedefault=True,), ) inputs = Bse.input_spec() @@ -40,12 +92,24 @@ def test_Bse_inputs(): def test_Bse_outputs(): output_map = dict( - outputCortexFile=dict(extensions=None,), - outputDetailedBrainMask=dict(extensions=None,), - outputDiffusionFilter=dict(extensions=None,), - outputEdgeMap=dict(extensions=None,), - outputMRIVolume=dict(extensions=None,), - outputMaskFile=dict(extensions=None,), + outputCortexFile=dict( + extensions=None, + ), + outputDetailedBrainMask=dict( + extensions=None, + ), + outputDiffusionFilter=dict( + extensions=None, + ), + outputEdgeMap=dict( + extensions=None, + ), + outputMRIVolume=dict( + extensions=None, + ), + outputMaskFile=dict( + extensions=None, + ), ) outputs = Bse.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py index e2ff64c071..a924d7ce0f 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py @@ -4,31 +4,80 @@ def test_Cerebro_inputs(): input_map = dict( - args=dict(argstr="%s",), - costFunction=dict(argstr="-c %d", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + costFunction=dict( + argstr="-c %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputAtlasLabelFile=dict( - argstr="--atlaslabels %s", extensions=None, mandatory=True, + argstr="--atlaslabels %s", + extensions=None, + mandatory=True, + ), + inputAtlasMRIFile=dict( + argstr="--atlas %s", + extensions=None, + mandatory=True, + ), + inputBrainMaskFile=dict( + argstr="-m %s", + extensions=None, + ), + inputMRIFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + keepTempFiles=dict( + argstr="--keep", + ), + linearConvergence=dict( + argstr="--linconv %f", ), - inputAtlasMRIFile=dict(argstr="--atlas %s", extensions=None, mandatory=True,), - inputBrainMaskFile=dict(argstr="-m %s", extensions=None,), - inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True,), - keepTempFiles=dict(argstr="--keep",), - linearConvergence=dict(argstr="--linconv %f",), outputAffineTransformFile=dict( - argstr="--air %s", extensions=None, genfile=True, + argstr="--air %s", + extensions=None, + genfile=True, + ), + outputCerebrumMaskFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + outputLabelVolumeFile=dict( + argstr="-l %s", + extensions=None, + genfile=True, ), - outputCerebrumMaskFile=dict(argstr="-o %s", extensions=None, genfile=True,), - outputLabelVolumeFile=dict(argstr="-l %s", extensions=None, genfile=True,), outputWarpTransformFile=dict( - argstr="--warp %s", extensions=None, genfile=True, - ), - tempDirectory=dict(argstr="--tempdir %s",), - tempDirectoryBase=dict(argstr="--tempdirbase %s",), - useCentroids=dict(argstr="--centroids",), - verbosity=dict(argstr="-v %d",), - warpConvergence=dict(argstr="--warpconv %f",), - warpLabel=dict(argstr="--warplevel %d",), + argstr="--warp %s", + extensions=None, + genfile=True, + ), + tempDirectory=dict( + argstr="--tempdir %s", + ), + tempDirectoryBase=dict( + argstr="--tempdirbase %s", + ), + useCentroids=dict( + argstr="--centroids", + ), + verbosity=dict( + argstr="-v %d", + ), + warpConvergence=dict( + argstr="--warpconv %f", + ), + warpLabel=dict( + argstr="--warplevel %d", + ), ) inputs = Cerebro.input_spec() @@ -39,10 +88,18 @@ def test_Cerebro_inputs(): def test_Cerebro_outputs(): output_map = dict( - outputAffineTransformFile=dict(extensions=None,), - outputCerebrumMaskFile=dict(extensions=None,), - outputLabelVolumeFile=dict(extensions=None,), - outputWarpTransformFile=dict(extensions=None,), + outputAffineTransformFile=dict( + extensions=None, + ), + outputCerebrumMaskFile=dict( + extensions=None, + ), + outputLabelVolumeFile=dict( + extensions=None, + ), + outputWarpTransformFile=dict( + extensions=None, + ), ) outputs = Cerebro.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py index 75015d79ab..30287edf90 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py @@ -4,17 +4,49 @@ def test_Cortex_inputs(): input_map = dict( - args=dict(argstr="%s",), - computeGCBoundary=dict(argstr="-g",), - computeWGBoundary=dict(argstr="-w", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - includeAllSubcorticalAreas=dict(argstr="-a", usedefault=True,), - inputHemisphereLabelFile=dict(argstr="-h %s", extensions=None, mandatory=True,), - inputTissueFractionFile=dict(argstr="-f %s", extensions=None, mandatory=True,), - outputCerebrumMask=dict(argstr="-o %s", extensions=None, genfile=True,), - timer=dict(argstr="--timer",), - tissueFractionThreshold=dict(argstr="-p %f", usedefault=True,), - verbosity=dict(argstr="-v %d",), + args=dict( + argstr="%s", + ), + computeGCBoundary=dict( + argstr="-g", + ), + computeWGBoundary=dict( + argstr="-w", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + includeAllSubcorticalAreas=dict( + argstr="-a", + usedefault=True, + ), + inputHemisphereLabelFile=dict( + argstr="-h %s", + extensions=None, + mandatory=True, + ), + inputTissueFractionFile=dict( + argstr="-f %s", + extensions=None, + mandatory=True, + ), + outputCerebrumMask=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + timer=dict( + argstr="--timer", + ), + tissueFractionThreshold=dict( + argstr="-p %f", + usedefault=True, + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Cortex.input_spec() @@ -24,7 +56,11 @@ def test_Cortex_inputs(): def test_Cortex_outputs(): - output_map = dict(outputCerebrumMask=dict(extensions=None,),) + output_map = dict( + outputCerebrumMask=dict( + extensions=None, + ), + ) outputs = Cortex.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py index ed3b4c32f6..4d986e22f3 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py @@ -4,14 +4,35 @@ def test_Dewisp_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputMaskFile=dict(argstr="-i %s", extensions=None, mandatory=True,), - maximumIterations=dict(argstr="-n %d",), - outputMaskFile=dict(argstr="-o %s", extensions=None, genfile=True,), - sizeThreshold=dict(argstr="-t %d",), - timer=dict(argstr="--timer",), - verbosity=dict(argstr="-v %d",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + maximumIterations=dict( + argstr="-n %d", + ), + outputMaskFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + sizeThreshold=dict( + argstr="-t %d", + ), + timer=dict( + argstr="--timer", + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Dewisp.input_spec() @@ -21,7 +42,11 @@ def test_Dewisp_inputs(): def test_Dewisp_outputs(): - output_map = dict(outputMaskFile=dict(extensions=None,),) + output_map = dict( + outputMaskFile=dict( + extensions=None, + ), + ) outputs = Dewisp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py index 00a35e8c82..3122791cf5 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py @@ -4,30 +4,70 @@ def test_Dfs_inputs(): input_map = dict( - args=dict(argstr="%s",), - curvatureWeighting=dict(argstr="-w %f", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - inputShadingVolume=dict(argstr="-c %s", extensions=None,), - inputVolumeFile=dict(argstr="-i %s", extensions=None, mandatory=True,), - noNormalsFlag=dict(argstr="--nonormals",), + args=dict( + argstr="%s", + ), + curvatureWeighting=dict( + argstr="-w %f", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputShadingVolume=dict( + argstr="-c %s", + extensions=None, + ), + inputVolumeFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + noNormalsFlag=dict( + argstr="--nonormals", + ), nonZeroTessellation=dict( - argstr="-nz", xor=("nonZeroTessellation", "specialTessellation"), + argstr="-nz", + xor=("nonZeroTessellation", "specialTessellation"), + ), + outputSurfaceFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + postSmoothFlag=dict( + argstr="--postsmooth", + ), + scalingPercentile=dict( + argstr="-f %f", + ), + smoothingConstant=dict( + argstr="-a %f", + usedefault=True, + ), + smoothingIterations=dict( + argstr="-n %d", + usedefault=True, ), - outputSurfaceFile=dict(argstr="-o %s", extensions=None, genfile=True,), - postSmoothFlag=dict(argstr="--postsmooth",), - scalingPercentile=dict(argstr="-f %f",), - smoothingConstant=dict(argstr="-a %f", usedefault=True,), - smoothingIterations=dict(argstr="-n %d", usedefault=True,), specialTessellation=dict( argstr="%s", position=-1, requires=["tessellationThreshold"], xor=("nonZeroTessellation", "specialTessellation"), ), - tessellationThreshold=dict(argstr="%f",), - timer=dict(argstr="--timer",), - verbosity=dict(argstr="-v %d",), - zeroPadFlag=dict(argstr="-z",), + tessellationThreshold=dict( + argstr="%f", + ), + timer=dict( + argstr="--timer", + ), + verbosity=dict( + argstr="-v %d", + ), + zeroPadFlag=dict( + argstr="-z", + ), ) inputs = Dfs.input_spec() @@ -37,7 +77,11 @@ def test_Dfs_inputs(): def test_Dfs_outputs(): - output_map = dict(outputSurfaceFile=dict(extensions=None,),) + output_map = dict( + outputSurfaceFile=dict( + extensions=None, + ), + ) outputs = Dfs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py index 761d049672..0696f11992 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py @@ -4,17 +4,53 @@ def test_Hemisplit_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputHemisphereLabelFile=dict(argstr="-l %s", extensions=None, mandatory=True,), - inputSurfaceFile=dict(argstr="-i %s", extensions=None, mandatory=True,), - outputLeftHemisphere=dict(argstr="--left %s", extensions=None, genfile=True,), - outputLeftPialHemisphere=dict(argstr="-pl %s", extensions=None, genfile=True,), - outputRightHemisphere=dict(argstr="--right %s", extensions=None, genfile=True,), - outputRightPialHemisphere=dict(argstr="-pr %s", extensions=None, genfile=True,), - pialSurfaceFile=dict(argstr="-p %s", extensions=None,), - timer=dict(argstr="--timer",), - verbosity=dict(argstr="-v %d",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputHemisphereLabelFile=dict( + argstr="-l %s", + extensions=None, + mandatory=True, + ), + inputSurfaceFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + outputLeftHemisphere=dict( + argstr="--left %s", + extensions=None, + genfile=True, + ), + outputLeftPialHemisphere=dict( + argstr="-pl %s", + extensions=None, + genfile=True, + ), + outputRightHemisphere=dict( + argstr="--right %s", + extensions=None, + genfile=True, + ), + outputRightPialHemisphere=dict( + argstr="-pr %s", + extensions=None, + genfile=True, + ), + pialSurfaceFile=dict( + argstr="-p %s", + extensions=None, + ), + timer=dict( + argstr="--timer", + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Hemisplit.input_spec() @@ -25,10 +61,18 @@ def test_Hemisplit_inputs(): def test_Hemisplit_outputs(): output_map = dict( - outputLeftHemisphere=dict(extensions=None,), - outputLeftPialHemisphere=dict(extensions=None,), - outputRightHemisphere=dict(extensions=None,), - outputRightPialHemisphere=dict(extensions=None,), + outputLeftHemisphere=dict( + extensions=None, + ), + outputLeftPialHemisphere=dict( + extensions=None, + ), + outputRightHemisphere=dict( + extensions=None, + ), + outputRightPialHemisphere=dict( + extensions=None, + ), ) outputs = Hemisplit.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py index 9da07862f8..f5ba0725df 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py @@ -4,25 +4,80 @@ def test_Pialmesh_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - exportPrefix=dict(argstr="--prefix %s",), - inputMaskFile=dict(argstr="-m %s", extensions=None, mandatory=True,), - inputSurfaceFile=dict(argstr="-i %s", extensions=None, mandatory=True,), - inputTissueFractionFile=dict(argstr="-f %s", extensions=None, mandatory=True,), - laplacianSmoothing=dict(argstr="--smooth %f", usedefault=True,), - maxThickness=dict(argstr="--max %f", usedefault=True,), - normalSmoother=dict(argstr="--nc %f", usedefault=True,), - numIterations=dict(argstr="-n %d", usedefault=True,), - outputInterval=dict(argstr="--interval %d", usedefault=True,), - outputSurfaceFile=dict(argstr="-o %s", extensions=None, genfile=True,), - recomputeNormals=dict(argstr="--norm",), - searchRadius=dict(argstr="-r %f", usedefault=True,), - stepSize=dict(argstr="-s %f", usedefault=True,), - tangentSmoother=dict(argstr="--tc %f",), - timer=dict(argstr="--timer",), - tissueThreshold=dict(argstr="-t %f", usedefault=True,), - verbosity=dict(argstr="-v %d",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + exportPrefix=dict( + argstr="--prefix %s", + ), + inputMaskFile=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + ), + inputSurfaceFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + inputTissueFractionFile=dict( + argstr="-f %s", + extensions=None, + mandatory=True, + ), + laplacianSmoothing=dict( + argstr="--smooth %f", + usedefault=True, + ), + maxThickness=dict( + argstr="--max %f", + usedefault=True, + ), + normalSmoother=dict( + argstr="--nc %f", + usedefault=True, + ), + numIterations=dict( + argstr="-n %d", + usedefault=True, + ), + outputInterval=dict( + argstr="--interval %d", + usedefault=True, + ), + outputSurfaceFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + recomputeNormals=dict( + argstr="--norm", + ), + searchRadius=dict( + argstr="-r %f", + usedefault=True, + ), + stepSize=dict( + argstr="-s %f", + usedefault=True, + ), + tangentSmoother=dict( + argstr="--tc %f", + ), + timer=dict( + argstr="--timer", + ), + tissueThreshold=dict( + argstr="-t %f", + usedefault=True, + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Pialmesh.input_spec() @@ -32,7 +87,11 @@ def test_Pialmesh_inputs(): def test_Pialmesh_outputs(): - output_map = dict(outputSurfaceFile=dict(extensions=None,),) + output_map = dict( + outputSurfaceFile=dict( + extensions=None, + ), + ) outputs = Pialmesh.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py index fa1c8bc8b7..a6f52a26a7 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py @@ -4,16 +4,44 @@ def test_Pvc_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True,), - inputMaskFile=dict(argstr="-m %s", extensions=None,), - outputLabelFile=dict(argstr="-o %s", extensions=None, genfile=True,), - outputTissueFractionFile=dict(argstr="-f %s", extensions=None, genfile=True,), - spatialPrior=dict(argstr="-l %f",), - threeClassFlag=dict(argstr="-3",), - timer=dict(argstr="--timer",), - verbosity=dict(argstr="-v %d",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMRIFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + inputMaskFile=dict( + argstr="-m %s", + extensions=None, + ), + outputLabelFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + outputTissueFractionFile=dict( + argstr="-f %s", + extensions=None, + genfile=True, + ), + spatialPrior=dict( + argstr="-l %f", + ), + threeClassFlag=dict( + argstr="-3", + ), + timer=dict( + argstr="--timer", + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Pvc.input_spec() @@ -24,8 +52,12 @@ def test_Pvc_inputs(): def test_Pvc_outputs(): output_map = dict( - outputLabelFile=dict(extensions=None,), - outputTissueFractionFile=dict(extensions=None,), + outputLabelFile=dict( + extensions=None, + ), + outputTissueFractionFile=dict( + extensions=None, + ), ) outputs = Pvc.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py b/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py index 2c19934e7c..4c29c2bfda 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py @@ -4,33 +4,82 @@ def test_SVReg_inputs(): input_map = dict( - args=dict(argstr="%s",), - atlasFilePrefix=dict(argstr="'%s'", position=1,), - curveMatchingInstructions=dict(argstr="'-cur %s'",), - dataSinkDelay=dict(argstr="%s",), - displayModuleName=dict(argstr="'-m'",), - displayTimestamps=dict(argstr="'-t'",), - environ=dict(nohash=True, usedefault=True,), - iterations=dict(argstr="'-H %d'",), - keepIntermediates=dict(argstr="'-k'",), - pialSurfaceMaskDilation=dict(argstr="'-D %d'",), - refineOutputs=dict(argstr="'-r'",), - shortMessages=dict(argstr="'-gui'",), - skipToIntensityReg=dict(argstr="'-p'",), - skipToVolumeReg=dict(argstr="'-s'",), - skipVolumetricProcessing=dict(argstr="'-S'",), - subjectFilePrefix=dict(argstr="'%s'", mandatory=True, position=0,), - useCerebrumMask=dict(argstr="'-C'",), - useManualMaskFile=dict(argstr="'-cbm'",), - useMultiThreading=dict(argstr="'-P'",), - useSingleThreading=dict(argstr="'-U'",), + args=dict( + argstr="%s", + ), + atlasFilePrefix=dict( + argstr="'%s'", + position=1, + ), + curveMatchingInstructions=dict( + argstr="'-cur %s'", + ), + dataSinkDelay=dict( + argstr="%s", + ), + displayModuleName=dict( + argstr="'-m'", + ), + displayTimestamps=dict( + argstr="'-t'", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + iterations=dict( + argstr="'-H %d'", + ), + keepIntermediates=dict( + argstr="'-k'", + ), + pialSurfaceMaskDilation=dict( + argstr="'-D %d'", + ), + refineOutputs=dict( + argstr="'-r'", + ), + shortMessages=dict( + argstr="'-gui'", + ), + skipToIntensityReg=dict( + argstr="'-p'", + ), + skipToVolumeReg=dict( + argstr="'-s'", + ), + skipVolumetricProcessing=dict( + argstr="'-S'", + ), + subjectFilePrefix=dict( + argstr="'%s'", + mandatory=True, + position=0, + ), + useCerebrumMask=dict( + argstr="'-C'", + ), + useManualMaskFile=dict( + argstr="'-cbm'", + ), + useMultiThreading=dict( + argstr="'-P'", + ), + useSingleThreading=dict( + argstr="'-U'", + ), verbosity0=dict( - argstr="'-v0'", xor=("verbosity0", "verbosity1", "verbosity2"), + argstr="'-v0'", + xor=("verbosity0", "verbosity1", "verbosity2"), ), verbosity1=dict( - argstr="'-v1'", xor=("verbosity0", "verbosity1", "verbosity2"), + argstr="'-v1'", + xor=("verbosity0", "verbosity1", "verbosity2"), + ), + verbosity2=dict( + argstr="'v2'", + xor=("verbosity0", "verbosity1", "verbosity2"), ), - verbosity2=dict(argstr="'v2'", xor=("verbosity0", "verbosity1", "verbosity2"),), ) inputs = SVReg.input_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py index b356b0335e..97094db018 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py @@ -4,15 +4,40 @@ def test_Scrubmask_inputs(): input_map = dict( - args=dict(argstr="%s",), - backgroundFillThreshold=dict(argstr="-b %d", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - foregroundTrimThreshold=dict(argstr="-f %d", usedefault=True,), - inputMaskFile=dict(argstr="-i %s", extensions=None, mandatory=True,), - numberIterations=dict(argstr="-n %d",), - outputMaskFile=dict(argstr="-o %s", extensions=None, genfile=True,), - timer=dict(argstr="--timer",), - verbosity=dict(argstr="-v %d",), + args=dict( + argstr="%s", + ), + backgroundFillThreshold=dict( + argstr="-b %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + foregroundTrimThreshold=dict( + argstr="-f %d", + usedefault=True, + ), + inputMaskFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + numberIterations=dict( + argstr="-n %d", + ), + outputMaskFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + timer=dict( + argstr="--timer", + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Scrubmask.input_spec() @@ -22,7 +47,11 @@ def test_Scrubmask_inputs(): def test_Scrubmask_outputs(): - output_map = dict(outputMaskFile=dict(extensions=None,),) + output_map = dict( + outputMaskFile=dict( + extensions=None, + ), + ) outputs = Scrubmask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py index 06480f30e8..3120f00184 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py @@ -4,21 +4,58 @@ def test_Skullfinder_inputs(): input_map = dict( - args=dict(argstr="%s",), - bgLabelValue=dict(argstr="--bglabel %d",), - brainLabelValue=dict(argstr="--brainlabel %d",), - environ=dict(nohash=True, usedefault=True,), - inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True,), - inputMaskFile=dict(argstr="-m %s", extensions=None, mandatory=True,), - lowerThreshold=dict(argstr="-l %d",), - outputLabelFile=dict(argstr="-o %s", extensions=None, genfile=True,), - performFinalOpening=dict(argstr="--finalOpening",), - scalpLabelValue=dict(argstr="--scalplabel %d",), - skullLabelValue=dict(argstr="--skulllabel %d",), - spaceLabelValue=dict(argstr="--spacelabel %d",), - surfaceFilePrefix=dict(argstr="-s %s",), - upperThreshold=dict(argstr="-u %d",), - verbosity=dict(argstr="-v %d",), + args=dict( + argstr="%s", + ), + bgLabelValue=dict( + argstr="--bglabel %d", + ), + brainLabelValue=dict( + argstr="--brainlabel %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMRIFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + inputMaskFile=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + ), + lowerThreshold=dict( + argstr="-l %d", + ), + outputLabelFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + performFinalOpening=dict( + argstr="--finalOpening", + ), + scalpLabelValue=dict( + argstr="--scalplabel %d", + ), + skullLabelValue=dict( + argstr="--skulllabel %d", + ), + spaceLabelValue=dict( + argstr="--spacelabel %d", + ), + surfaceFilePrefix=dict( + argstr="-s %s", + ), + upperThreshold=dict( + argstr="-u %d", + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Skullfinder.input_spec() @@ -28,7 +65,11 @@ def test_Skullfinder_inputs(): def test_Skullfinder_outputs(): - output_map = dict(outputLabelFile=dict(extensions=None,),) + output_map = dict( + outputLabelFile=dict( + extensions=None, + ), + ) outputs = Skullfinder.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Tca.py b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py index 66cb70ac8f..eaba6a1d5f 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Tca.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py @@ -4,15 +4,40 @@ def test_Tca_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - foregroundDelta=dict(argstr="--delta %d", usedefault=True,), - inputMaskFile=dict(argstr="-i %s", extensions=None, mandatory=True,), - maxCorrectionSize=dict(argstr="-n %d",), - minCorrectionSize=dict(argstr="-m %d", usedefault=True,), - outputMaskFile=dict(argstr="-o %s", extensions=None, genfile=True,), - timer=dict(argstr="--timer",), - verbosity=dict(argstr="-v %d",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + foregroundDelta=dict( + argstr="--delta %d", + usedefault=True, + ), + inputMaskFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + maxCorrectionSize=dict( + argstr="-n %d", + ), + minCorrectionSize=dict( + argstr="-m %d", + usedefault=True, + ), + outputMaskFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + timer=dict( + argstr="--timer", + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Tca.input_spec() @@ -22,7 +47,11 @@ def test_Tca_inputs(): def test_Tca_outputs(): - output_map = dict(outputMaskFile=dict(extensions=None,),) + output_map = dict( + outputMaskFile=dict( + extensions=None, + ), + ) outputs = Tca.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py b/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py index c0265e7e6c..8b043c63c7 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py @@ -4,9 +4,17 @@ def test_ThicknessPVC_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - subjectFilePrefix=dict(argstr="%s", mandatory=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + subjectFilePrefix=dict( + argstr="%s", + mandatory=True, + ), ) inputs = ThicknessPVC.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py index 419330da13..9b6110d30d 100644 --- a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py +++ b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py @@ -4,32 +4,109 @@ def test_AnalyzeHeader_inputs(): input_map = dict( - args=dict(argstr="%s",), - centre=dict(argstr="-centre %s", units="mm",), - data_dims=dict(argstr="-datadims %s", units="voxels",), - datatype=dict(argstr="-datatype %s", mandatory=True,), - description=dict(argstr="-description %s",), - environ=dict(nohash=True, usedefault=True,), - greylevels=dict(argstr="-gl %s", units="NA",), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1,), - initfromheader=dict(argstr="-initfromheader %s", extensions=None, position=3,), - intelbyteorder=dict(argstr="-intelbyteorder",), - networkbyteorder=dict(argstr="-networkbyteorder",), - nimages=dict(argstr="-nimages %d", units="NA",), - offset=dict(argstr="-offset %d", units="NA",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - picoseed=dict(argstr="-picoseed %s", units="mm",), - printbigendian=dict(argstr="-printbigendian %s", extensions=None, position=3,), - printimagedims=dict(argstr="-printimagedims %s", extensions=None, position=3,), + args=dict( + argstr="%s", + ), + centre=dict( + argstr="-centre %s", + units="mm", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + datatype=dict( + argstr="-datatype %s", + mandatory=True, + ), + description=dict( + argstr="-description %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + greylevels=dict( + argstr="-gl %s", + units="NA", + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=1, + ), + initfromheader=dict( + argstr="-initfromheader %s", + extensions=None, + position=3, + ), + intelbyteorder=dict( + argstr="-intelbyteorder", + ), + networkbyteorder=dict( + argstr="-networkbyteorder", + ), + nimages=dict( + argstr="-nimages %d", + units="NA", + ), + offset=dict( + argstr="-offset %d", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + picoseed=dict( + argstr="-picoseed %s", + units="mm", + ), + printbigendian=dict( + argstr="-printbigendian %s", + extensions=None, + position=3, + ), + printimagedims=dict( + argstr="-printimagedims %s", + extensions=None, + position=3, + ), printintelbyteorder=dict( - argstr="-printintelbyteorder %s", extensions=None, position=3, - ), - printprogargs=dict(argstr="-printprogargs %s", extensions=None, position=3,), - readheader=dict(argstr="-readheader %s", extensions=None, position=3,), - scaleinter=dict(argstr="-scaleinter %d", units="NA",), - scaleslope=dict(argstr="-scaleslope %d", units="NA",), - scheme_file=dict(argstr="%s", extensions=None, position=2,), - voxel_dims=dict(argstr="-voxeldims %s", units="mm",), + argstr="-printintelbyteorder %s", + extensions=None, + position=3, + ), + printprogargs=dict( + argstr="-printprogargs %s", + extensions=None, + position=3, + ), + readheader=dict( + argstr="-readheader %s", + extensions=None, + position=3, + ), + scaleinter=dict( + argstr="-scaleinter %d", + units="NA", + ), + scaleslope=dict( + argstr="-scaleslope %d", + units="NA", + ), + scheme_file=dict( + argstr="%s", + extensions=None, + position=2, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = AnalyzeHeader.input_spec() @@ -39,7 +116,11 @@ def test_AnalyzeHeader_inputs(): def test_AnalyzeHeader_outputs(): - output_map = dict(header=dict(extensions=None,),) + output_map = dict( + header=dict( + extensions=None, + ), + ) outputs = AnalyzeHeader.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py index 70e1603a33..6181cf7541 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py @@ -4,14 +4,39 @@ def test_ComputeEigensystem_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1,), - inputdatatype=dict(argstr="-inputdatatype %s", usedefault=True,), - inputmodel=dict(argstr="-inputmodel %s",), - maxcomponents=dict(argstr="-maxcomponents %d",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - outputdatatype=dict(argstr="-outputdatatype %s", usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + usedefault=True, + ), + inputmodel=dict( + argstr="-inputmodel %s", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + outputdatatype=dict( + argstr="-outputdatatype %s", + usedefault=True, + ), ) inputs = ComputeEigensystem.input_spec() @@ -21,7 +46,11 @@ def test_ComputeEigensystem_inputs(): def test_ComputeEigensystem_outputs(): - output_map = dict(eigen=dict(extensions=None,),) + output_map = dict( + eigen=dict( + extensions=None, + ), + ) outputs = ComputeEigensystem.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py index cecdcd6dd9..0a13ac4f64 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py @@ -4,14 +4,39 @@ def test_ComputeFractionalAnisotropy_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1,), - inputdatatype=dict(argstr="-inputdatatype %s",), - inputmodel=dict(argstr="-inputmodel %s",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - outputdatatype=dict(argstr="-outputdatatype %s",), - scheme_file=dict(argstr="%s", extensions=None, position=2,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + outputdatatype=dict( + argstr="-outputdatatype %s", + ), + scheme_file=dict( + argstr="%s", + extensions=None, + position=2, + ), ) inputs = ComputeFractionalAnisotropy.input_spec() @@ -21,7 +46,11 @@ def test_ComputeFractionalAnisotropy_inputs(): def test_ComputeFractionalAnisotropy_outputs(): - output_map = dict(fa=dict(extensions=None,),) + output_map = dict( + fa=dict( + extensions=None, + ), + ) outputs = ComputeFractionalAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py index 692d900494..822bd0306e 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py @@ -4,14 +4,39 @@ def test_ComputeMeanDiffusivity_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1,), - inputdatatype=dict(argstr="-inputdatatype %s",), - inputmodel=dict(argstr="-inputmodel %s",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - outputdatatype=dict(argstr="-outputdatatype %s",), - scheme_file=dict(argstr="%s", extensions=None, position=2,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + outputdatatype=dict( + argstr="-outputdatatype %s", + ), + scheme_file=dict( + argstr="%s", + extensions=None, + position=2, + ), ) inputs = ComputeMeanDiffusivity.input_spec() @@ -21,7 +46,11 @@ def test_ComputeMeanDiffusivity_inputs(): def test_ComputeMeanDiffusivity_outputs(): - output_map = dict(md=dict(extensions=None,),) + output_map = dict( + md=dict( + extensions=None, + ), + ) outputs = ComputeMeanDiffusivity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py index 3a7469378e..8a912685ae 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py @@ -4,14 +4,39 @@ def test_ComputeTensorTrace_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1,), - inputdatatype=dict(argstr="-inputdatatype %s",), - inputmodel=dict(argstr="-inputmodel %s",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - outputdatatype=dict(argstr="-outputdatatype %s",), - scheme_file=dict(argstr="%s", extensions=None, position=2,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + outputdatatype=dict( + argstr="-outputdatatype %s", + ), + scheme_file=dict( + argstr="%s", + extensions=None, + position=2, + ), ) inputs = ComputeTensorTrace.input_spec() @@ -21,7 +46,11 @@ def test_ComputeTensorTrace_inputs(): def test_ComputeTensorTrace_outputs(): - output_map = dict(trace=dict(extensions=None,),) + output_map = dict( + trace=dict( + extensions=None, + ), + ) outputs = ComputeTensorTrace.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Conmat.py b/nipype/interfaces/camino/tests/test_auto_Conmat.py index 65c84dc64c..a9ea16865d 100644 --- a/nipype/interfaces/camino/tests/test_auto_Conmat.py +++ b/nipype/interfaces/camino/tests/test_auto_Conmat.py @@ -4,16 +4,42 @@ def test_Conmat_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True,), - output_root=dict(argstr="-outputroot %s", extensions=None, genfile=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + mandatory=True, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + genfile=True, + ), scalar_file=dict( - argstr="-scalarfile %s", extensions=None, requires=["tract_stat"], + argstr="-scalarfile %s", + extensions=None, + requires=["tract_stat"], + ), + target_file=dict( + argstr="-targetfile %s", + extensions=None, + mandatory=True, + ), + targetname_file=dict( + argstr="-targetnamefile %s", + extensions=None, + ), + tract_prop=dict( + argstr="-tractstat %s", + units="NA", + xor=["tract_stat"], ), - target_file=dict(argstr="-targetfile %s", extensions=None, mandatory=True,), - targetname_file=dict(argstr="-targetnamefile %s", extensions=None,), - tract_prop=dict(argstr="-tractstat %s", units="NA", xor=["tract_stat"],), tract_stat=dict( argstr="-tractstat %s", requires=["scalar_file"], @@ -30,7 +56,12 @@ def test_Conmat_inputs(): def test_Conmat_outputs(): output_map = dict( - conmat_sc=dict(extensions=None,), conmat_ts=dict(extensions=None,), + conmat_sc=dict( + extensions=None, + ), + conmat_ts=dict( + extensions=None, + ), ) outputs = Conmat.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py index fce7560dd2..b88fe01ba9 100644 --- a/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py +++ b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py @@ -4,16 +4,30 @@ def test_DT2NIfTI_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), header_file=dict( - argstr="-header %s", extensions=None, mandatory=True, position=3, + argstr="-header %s", + extensions=None, + mandatory=True, + position=3, ), in_file=dict( - argstr="-inputfile %s", extensions=None, mandatory=True, position=1, + argstr="-inputfile %s", + extensions=None, + mandatory=True, + position=1, ), output_root=dict( - argstr="-outputroot %s", extensions=None, genfile=True, position=2, + argstr="-outputroot %s", + extensions=None, + genfile=True, + position=2, ), ) inputs = DT2NIfTI.input_spec() @@ -25,9 +39,15 @@ def test_DT2NIfTI_inputs(): def test_DT2NIfTI_outputs(): output_map = dict( - dt=dict(extensions=None,), - exitcode=dict(extensions=None,), - lns0=dict(extensions=None,), + dt=dict( + extensions=None, + ), + exitcode=dict( + extensions=None, + ), + lns0=dict( + extensions=None, + ), ) outputs = DT2NIfTI.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_DTIFit.py b/nipype/interfaces/camino/tests/test_auto_DTIFit.py index 467e2d54ea..757f870fe3 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/camino/tests/test_auto_DTIFit.py @@ -4,13 +4,39 @@ def test_DTIFit_inputs(): input_map = dict( - args=dict(argstr="%s",), - bgmask=dict(argstr="-bgmask %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - non_linear=dict(argstr="-nonlinear", position=3,), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - scheme_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + args=dict( + argstr="%s", + ), + bgmask=dict( + argstr="-bgmask %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + non_linear=dict( + argstr="-nonlinear", + position=3, + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + scheme_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), ) inputs = DTIFit.input_spec() @@ -20,7 +46,11 @@ def test_DTIFit_inputs(): def test_DTIFit_outputs(): - output_map = dict(tensor_fitted=dict(extensions=None,),) + output_map = dict( + tensor_fitted=dict( + extensions=None, + ), + ) outputs = DTIFit.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py index 674d38a37b..0ee1ffea8f 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py @@ -4,22 +4,64 @@ def test_DTLUTGen_inputs(): input_map = dict( - acg=dict(argstr="-acg",), - args=dict(argstr="%s",), - bingham=dict(argstr="-bingham",), - environ=dict(nohash=True, usedefault=True,), - frange=dict(argstr="-frange %s", position=1, units="NA",), - inversion=dict(argstr="-inversion %d", units="NA",), - lrange=dict(argstr="-lrange %s", position=1, units="NA",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - samples=dict(argstr="-samples %d", units="NA",), + acg=dict( + argstr="-acg", + ), + args=dict( + argstr="%s", + ), + bingham=dict( + argstr="-bingham", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + frange=dict( + argstr="-frange %s", + position=1, + units="NA", + ), + inversion=dict( + argstr="-inversion %d", + units="NA", + ), + lrange=dict( + argstr="-lrange %s", + position=1, + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + samples=dict( + argstr="-samples %d", + units="NA", + ), scheme_file=dict( - argstr="-schemefile %s", extensions=None, mandatory=True, position=2, + argstr="-schemefile %s", + extensions=None, + mandatory=True, + position=2, + ), + snr=dict( + argstr="-snr %f", + units="NA", + ), + step=dict( + argstr="-step %f", + units="NA", + ), + trace=dict( + argstr="-trace %G", + units="NA", + ), + watson=dict( + argstr="-watson", ), - snr=dict(argstr="-snr %f", units="NA",), - step=dict(argstr="-step %f", units="NA",), - trace=dict(argstr="-trace %G", units="NA",), - watson=dict(argstr="-watson",), ) inputs = DTLUTGen.input_spec() @@ -29,7 +71,11 @@ def test_DTLUTGen_inputs(): def test_DTLUTGen_outputs(): - output_map = dict(dtLUT=dict(extensions=None,),) + output_map = dict( + dtLUT=dict( + extensions=None, + ), + ) outputs = DTLUTGen.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_DTMetric.py b/nipype/interfaces/camino/tests/test_auto_DTMetric.py index fd62a3d329..11e971b28b 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTMetric.py +++ b/nipype/interfaces/camino/tests/test_auto_DTMetric.py @@ -4,14 +4,39 @@ def test_DTMetric_inputs(): input_map = dict( - args=dict(argstr="%s",), - data_header=dict(argstr="-header %s", extensions=None,), - eigen_data=dict(argstr="-inputfile %s", extensions=None, mandatory=True,), - environ=dict(nohash=True, usedefault=True,), - inputdatatype=dict(argstr="-inputdatatype %s", usedefault=True,), - metric=dict(argstr="-stat %s", mandatory=True,), - outputdatatype=dict(argstr="-outputdatatype %s", usedefault=True,), - outputfile=dict(argstr="-outputfile %s", extensions=None, genfile=True,), + args=dict( + argstr="%s", + ), + data_header=dict( + argstr="-header %s", + extensions=None, + ), + eigen_data=dict( + argstr="-inputfile %s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + usedefault=True, + ), + metric=dict( + argstr="-stat %s", + mandatory=True, + ), + outputdatatype=dict( + argstr="-outputdatatype %s", + usedefault=True, + ), + outputfile=dict( + argstr="-outputfile %s", + extensions=None, + genfile=True, + ), ) inputs = DTMetric.input_spec() @@ -21,7 +46,11 @@ def test_DTMetric_inputs(): def test_DTMetric_outputs(): - output_map = dict(metric_stats=dict(extensions=None,),) + output_map = dict( + metric_stats=dict( + extensions=None, + ), + ) outputs = DTMetric.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py index 00b11eb751..376fa1bf3e 100644 --- a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py +++ b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py @@ -4,23 +4,58 @@ def test_FSL2Scheme_inputs(): input_map = dict( - args=dict(argstr="%s",), - bscale=dict(argstr="-bscale %d", units="NA",), + args=dict( + argstr="%s", + ), + bscale=dict( + argstr="-bscale %d", + units="NA", + ), bval_file=dict( - argstr="-bvalfile %s", extensions=None, mandatory=True, position=2, + argstr="-bvalfile %s", + extensions=None, + mandatory=True, + position=2, ), bvec_file=dict( - argstr="-bvecfile %s", extensions=None, mandatory=True, position=1, - ), - diffusiontime=dict(argstr="-diffusiontime %f", units="NA",), - environ=dict(nohash=True, usedefault=True,), - flipx=dict(argstr="-flipx",), - flipy=dict(argstr="-flipy",), - flipz=dict(argstr="-flipz",), - interleave=dict(argstr="-interleave",), - numscans=dict(argstr="-numscans %d", units="NA",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - usegradmod=dict(argstr="-usegradmod",), + argstr="-bvecfile %s", + extensions=None, + mandatory=True, + position=1, + ), + diffusiontime=dict( + argstr="-diffusiontime %f", + units="NA", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flipx=dict( + argstr="-flipx", + ), + flipy=dict( + argstr="-flipy", + ), + flipz=dict( + argstr="-flipz", + ), + interleave=dict( + argstr="-interleave", + ), + numscans=dict( + argstr="-numscans %d", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + usegradmod=dict( + argstr="-usegradmod", + ), ) inputs = FSL2Scheme.input_spec() @@ -30,7 +65,11 @@ def test_FSL2Scheme_inputs(): def test_FSL2Scheme_outputs(): - output_map = dict(scheme=dict(extensions=None,),) + output_map = dict( + scheme=dict( + extensions=None, + ), + ) outputs = FSL2Scheme.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py index 47379c7f54..ae49936d02 100644 --- a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py +++ b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py @@ -4,13 +4,30 @@ def test_Image2Voxel_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="-4dimage %s", extensions=None, mandatory=True, position=1, + argstr="-4dimage %s", + extensions=None, + mandatory=True, + position=1, + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + out_type=dict( + argstr="-outputdatatype %s", + position=2, + usedefault=True, ), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - out_type=dict(argstr="-outputdatatype %s", position=2, usedefault=True,), ) inputs = Image2Voxel.input_spec() @@ -20,7 +37,11 @@ def test_Image2Voxel_inputs(): def test_Image2Voxel_outputs(): - output_map = dict(voxel_order=dict(extensions=None,),) + output_map = dict( + voxel_order=dict( + extensions=None, + ), + ) outputs = Image2Voxel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ImageStats.py b/nipype/interfaces/camino/tests/test_auto_ImageStats.py index 9d817f4ca9..4bc6aa941b 100644 --- a/nipype/interfaces/camino/tests/test_auto_ImageStats.py +++ b/nipype/interfaces/camino/tests/test_auto_ImageStats.py @@ -4,12 +4,32 @@ def test_ImageStats_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_files=dict(argstr="-images %s", mandatory=True, position=-1,), - out_type=dict(argstr="-outputdatatype %s", usedefault=True,), - output_root=dict(argstr="-outputroot %s", extensions=None, mandatory=True,), - stat=dict(argstr="-stat %s", mandatory=True, units="NA",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="-images %s", + mandatory=True, + position=-1, + ), + out_type=dict( + argstr="-outputdatatype %s", + usedefault=True, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + mandatory=True, + ), + stat=dict( + argstr="-stat %s", + mandatory=True, + units="NA", + ), ) inputs = ImageStats.input_spec() @@ -19,7 +39,11 @@ def test_ImageStats_inputs(): def test_ImageStats_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ImageStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_LinRecon.py b/nipype/interfaces/camino/tests/test_auto_LinRecon.py index a8a7731b70..1ed5bbbe6b 100644 --- a/nipype/interfaces/camino/tests/test_auto_LinRecon.py +++ b/nipype/interfaces/camino/tests/test_auto_LinRecon.py @@ -4,15 +4,47 @@ def test_LinRecon_inputs(): input_map = dict( - args=dict(argstr="%s",), - bgmask=dict(argstr="-bgmask %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - log=dict(argstr="-log",), - normalize=dict(argstr="-normalize",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - qball_mat=dict(argstr="%s", extensions=None, mandatory=True, position=3,), - scheme_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + args=dict( + argstr="%s", + ), + bgmask=dict( + argstr="-bgmask %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + log=dict( + argstr="-log", + ), + normalize=dict( + argstr="-normalize", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + qball_mat=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=3, + ), + scheme_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), ) inputs = LinRecon.input_spec() @@ -22,7 +54,11 @@ def test_LinRecon_inputs(): def test_LinRecon_outputs(): - output_map = dict(recon_data=dict(extensions=None,),) + output_map = dict( + recon_data=dict( + extensions=None, + ), + ) outputs = LinRecon.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_MESD.py b/nipype/interfaces/camino/tests/test_auto_MESD.py index dd91241d0a..189dd2e2d4 100644 --- a/nipype/interfaces/camino/tests/test_auto_MESD.py +++ b/nipype/interfaces/camino/tests/test_auto_MESD.py @@ -4,19 +4,56 @@ def test_MESD_inputs(): input_map = dict( - args=dict(argstr="%s",), - bgmask=dict(argstr="-bgmask %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - fastmesd=dict(argstr="-fastmesd", requires=["mepointset"],), + args=dict( + argstr="%s", + ), + bgmask=dict( + argstr="-bgmask %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fastmesd=dict( + argstr="-fastmesd", + requires=["mepointset"], + ), in_file=dict( - argstr="-inputfile %s", extensions=None, mandatory=True, position=1, - ), - inputdatatype=dict(argstr="-inputdatatype %s",), - inverter=dict(argstr="-filter %s", mandatory=True, position=2,), - inverter_param=dict(argstr="%f", mandatory=True, position=3, units="NA",), - mepointset=dict(argstr="-mepointset %d", units="NA",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True,), + argstr="-inputfile %s", + extensions=None, + mandatory=True, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inverter=dict( + argstr="-filter %s", + mandatory=True, + position=2, + ), + inverter_param=dict( + argstr="%f", + mandatory=True, + position=3, + units="NA", + ), + mepointset=dict( + argstr="-mepointset %d", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + scheme_file=dict( + argstr="-schemefile %s", + extensions=None, + mandatory=True, + ), ) inputs = MESD.input_spec() @@ -26,7 +63,11 @@ def test_MESD_inputs(): def test_MESD_outputs(): - output_map = dict(mesd_data=dict(extensions=None,),) + output_map = dict( + mesd_data=dict( + extensions=None, + ), + ) outputs = MESD.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ModelFit.py b/nipype/interfaces/camino/tests/test_auto_ModelFit.py index ca5ba4a9d6..82bd1a8400 100644 --- a/nipype/interfaces/camino/tests/test_auto_ModelFit.py +++ b/nipype/interfaces/camino/tests/test_auto_ModelFit.py @@ -4,24 +4,74 @@ def test_ModelFit_inputs(): input_map = dict( - args=dict(argstr="%s",), - bgmask=dict(argstr="-bgmask %s", extensions=None,), - bgthresh=dict(argstr="-bgthresh %G",), - cfthresh=dict(argstr="-csfthresh %G",), - environ=dict(nohash=True, usedefault=True,), - fixedbvalue=dict(argstr="-fixedbvalue %s",), - fixedmodq=dict(argstr="-fixedmod %s",), - in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True,), - inputdatatype=dict(argstr="-inputdatatype %s",), - model=dict(argstr="-model %s", mandatory=True,), - noisemap=dict(argstr="-noisemap %s", extensions=None,), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - outlier=dict(argstr="-outliermap %s", extensions=None,), - outputfile=dict(argstr="-outputfile %s", extensions=None,), - residualmap=dict(argstr="-residualmap %s", extensions=None,), - scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True,), - sigma=dict(argstr="-sigma %G",), - tau=dict(argstr="-tau %G",), + args=dict( + argstr="%s", + ), + bgmask=dict( + argstr="-bgmask %s", + extensions=None, + ), + bgthresh=dict( + argstr="-bgthresh %G", + ), + cfthresh=dict( + argstr="-csfthresh %G", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedbvalue=dict( + argstr="-fixedbvalue %s", + ), + fixedmodq=dict( + argstr="-fixedmod %s", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + mandatory=True, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + model=dict( + argstr="-model %s", + mandatory=True, + ), + noisemap=dict( + argstr="-noisemap %s", + extensions=None, + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + outlier=dict( + argstr="-outliermap %s", + extensions=None, + ), + outputfile=dict( + argstr="-outputfile %s", + extensions=None, + ), + residualmap=dict( + argstr="-residualmap %s", + extensions=None, + ), + scheme_file=dict( + argstr="-schemefile %s", + extensions=None, + mandatory=True, + ), + sigma=dict( + argstr="-sigma %G", + ), + tau=dict( + argstr="-tau %G", + ), ) inputs = ModelFit.input_spec() @@ -31,7 +81,11 @@ def test_ModelFit_inputs(): def test_ModelFit_outputs(): - output_map = dict(fitted_data=dict(extensions=None,),) + output_map = dict( + fitted_data=dict( + extensions=None, + ), + ) outputs = ModelFit.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py index 951e4bdc0e..82b4276a0f 100644 --- a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py +++ b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py @@ -4,18 +4,46 @@ def test_NIfTIDT2Camino_inputs(): input_map = dict( - args=dict(argstr="%s",), - bgmask=dict(argstr="-bgmask %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + bgmask=dict( + argstr="-bgmask %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="-inputfile %s", extensions=None, mandatory=True, position=1, - ), - lns0_file=dict(argstr="-lns0 %s", extensions=None,), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - s0_file=dict(argstr="-s0 %s", extensions=None,), - scaleinter=dict(argstr="-scaleinter %s",), - scaleslope=dict(argstr="-scaleslope %s",), - uppertriangular=dict(argstr="-uppertriangular %s",), + argstr="-inputfile %s", + extensions=None, + mandatory=True, + position=1, + ), + lns0_file=dict( + argstr="-lns0 %s", + extensions=None, + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + s0_file=dict( + argstr="-s0 %s", + extensions=None, + ), + scaleinter=dict( + argstr="-scaleinter %s", + ), + scaleslope=dict( + argstr="-scaleslope %s", + ), + uppertriangular=dict( + argstr="-uppertriangular %s", + ), ) inputs = NIfTIDT2Camino.input_spec() @@ -25,7 +53,11 @@ def test_NIfTIDT2Camino_inputs(): def test_NIfTIDT2Camino_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = NIfTIDT2Camino.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py index 5a321dddba..09f3a93cac 100644 --- a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py +++ b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py @@ -4,16 +4,50 @@ def test_PicoPDFs_inputs(): input_map = dict( - args=dict(argstr="%s",), - directmap=dict(argstr="-directmap",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1,), - inputmodel=dict(argstr="-inputmodel %s", position=2, usedefault=True,), - luts=dict(argstr="-luts %s", mandatory=True,), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), - numpds=dict(argstr="-numpds %d", units="NA",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - pdf=dict(argstr="-pdf %s", position=4, usedefault=True,), + args=dict( + argstr="%s", + ), + directmap=dict( + argstr="-directmap", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=1, + ), + inputmodel=dict( + argstr="-inputmodel %s", + position=2, + usedefault=True, + ), + luts=dict( + argstr="-luts %s", + mandatory=True, + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + pdf=dict( + argstr="-pdf %s", + position=4, + usedefault=True, + ), ) inputs = PicoPDFs.input_spec() @@ -23,7 +57,11 @@ def test_PicoPDFs_inputs(): def test_PicoPDFs_outputs(): - output_map = dict(pdfs=dict(extensions=None,),) + output_map = dict( + pdfs=dict( + extensions=None, + ), + ) outputs = PicoPDFs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py index 7ccd071c99..b1b9fda588 100644 --- a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py @@ -4,44 +4,138 @@ def test_ProcStreamlines_inputs(): input_map = dict( - allowmultitargets=dict(argstr="-allowmultitargets",), - args=dict(argstr="%s",), - datadims=dict(argstr="-datadims %s", units="voxels",), - directional=dict(argstr="-directional %s", units="NA",), - discardloops=dict(argstr="-discardloops",), - endpointfile=dict(argstr="-endpointfile %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - exclusionfile=dict(argstr="-exclusionfile %s", extensions=None,), - gzip=dict(argstr="-gzip",), + allowmultitargets=dict( + argstr="-allowmultitargets", + ), + args=dict( + argstr="%s", + ), + datadims=dict( + argstr="-datadims %s", + units="voxels", + ), + directional=dict( + argstr="-directional %s", + units="NA", + ), + discardloops=dict( + argstr="-discardloops", + ), + endpointfile=dict( + argstr="-endpointfile %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + exclusionfile=dict( + argstr="-exclusionfile %s", + extensions=None, + ), + gzip=dict( + argstr="-gzip", + ), in_file=dict( - argstr="-inputfile %s", extensions=None, mandatory=True, position=1, - ), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), - iterations=dict(argstr="-iterations %d", units="NA",), - maxtractlength=dict(argstr="-maxtractlength %d", units="mm",), - maxtractpoints=dict(argstr="-maxtractpoints %d", units="NA",), - mintractlength=dict(argstr="-mintractlength %d", units="mm",), - mintractpoints=dict(argstr="-mintractpoints %d", units="NA",), - noresample=dict(argstr="-noresample",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - outputacm=dict(argstr="-outputacm", requires=["outputroot", "seedfile"],), + argstr="-inputfile %s", + extensions=None, + mandatory=True, + position=1, + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + iterations=dict( + argstr="-iterations %d", + units="NA", + ), + maxtractlength=dict( + argstr="-maxtractlength %d", + units="mm", + ), + maxtractpoints=dict( + argstr="-maxtractpoints %d", + units="NA", + ), + mintractlength=dict( + argstr="-mintractlength %d", + units="mm", + ), + mintractpoints=dict( + argstr="-mintractpoints %d", + units="NA", + ), + noresample=dict( + argstr="-noresample", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + outputacm=dict( + argstr="-outputacm", + requires=["outputroot", "seedfile"], + ), outputcbs=dict( - argstr="-outputcbs", requires=["outputroot", "targetfile", "seedfile"], - ), - outputcp=dict(argstr="-outputcp", requires=["outputroot", "seedfile"],), - outputroot=dict(argstr="-outputroot %s", extensions=None,), - outputsc=dict(argstr="-outputsc", requires=["outputroot", "seedfile"],), - outputtracts=dict(argstr="-outputtracts",), - regionindex=dict(argstr="-regionindex %d", units="mm",), - resamplestepsize=dict(argstr="-resamplestepsize %d", units="NA",), - seedfile=dict(argstr="-seedfile %s", extensions=None,), - seedpointmm=dict(argstr="-seedpointmm %s", units="mm",), - seedpointvox=dict(argstr="-seedpointvox %s", units="voxels",), - targetfile=dict(argstr="-targetfile %s", extensions=None,), - truncateinexclusion=dict(argstr="-truncateinexclusion",), - truncateloops=dict(argstr="-truncateloops",), - voxeldims=dict(argstr="-voxeldims %s", units="mm",), - waypointfile=dict(argstr="-waypointfile %s", extensions=None,), + argstr="-outputcbs", + requires=["outputroot", "targetfile", "seedfile"], + ), + outputcp=dict( + argstr="-outputcp", + requires=["outputroot", "seedfile"], + ), + outputroot=dict( + argstr="-outputroot %s", + extensions=None, + ), + outputsc=dict( + argstr="-outputsc", + requires=["outputroot", "seedfile"], + ), + outputtracts=dict( + argstr="-outputtracts", + ), + regionindex=dict( + argstr="-regionindex %d", + units="mm", + ), + resamplestepsize=dict( + argstr="-resamplestepsize %d", + units="NA", + ), + seedfile=dict( + argstr="-seedfile %s", + extensions=None, + ), + seedpointmm=dict( + argstr="-seedpointmm %s", + units="mm", + ), + seedpointvox=dict( + argstr="-seedpointvox %s", + units="voxels", + ), + targetfile=dict( + argstr="-targetfile %s", + extensions=None, + ), + truncateinexclusion=dict( + argstr="-truncateinexclusion", + ), + truncateloops=dict( + argstr="-truncateloops", + ), + voxeldims=dict( + argstr="-voxeldims %s", + units="mm", + ), + waypointfile=dict( + argstr="-waypointfile %s", + extensions=None, + ), ) inputs = ProcStreamlines.input_spec() @@ -51,7 +145,12 @@ def test_ProcStreamlines_inputs(): def test_ProcStreamlines_outputs(): - output_map = dict(outputroot_files=dict(), proc=dict(extensions=None,),) + output_map = dict( + outputroot_files=dict(), + proc=dict( + extensions=None, + ), + ) outputs = ProcStreamlines.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_QBallMX.py b/nipype/interfaces/camino/tests/test_auto_QBallMX.py index 180e5c6f83..30fda3a483 100644 --- a/nipype/interfaces/camino/tests/test_auto_QBallMX.py +++ b/nipype/interfaces/camino/tests/test_auto_QBallMX.py @@ -4,15 +4,44 @@ def test_QBallMX_inputs(): input_map = dict( - args=dict(argstr="%s",), - basistype=dict(argstr="-basistype %s", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - order=dict(argstr="-order %d", units="NA",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - rbfpointset=dict(argstr="-rbfpointset %d", units="NA",), - rbfsigma=dict(argstr="-rbfsigma %f", units="NA",), - scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True,), - smoothingsigma=dict(argstr="-smoothingsigma %f", units="NA",), + args=dict( + argstr="%s", + ), + basistype=dict( + argstr="-basistype %s", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + order=dict( + argstr="-order %d", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + rbfpointset=dict( + argstr="-rbfpointset %d", + units="NA", + ), + rbfsigma=dict( + argstr="-rbfsigma %f", + units="NA", + ), + scheme_file=dict( + argstr="-schemefile %s", + extensions=None, + mandatory=True, + ), + smoothingsigma=dict( + argstr="-smoothingsigma %f", + units="NA", + ), ) inputs = QBallMX.input_spec() @@ -22,7 +51,11 @@ def test_QBallMX_inputs(): def test_QBallMX_outputs(): - output_map = dict(qmat=dict(extensions=None,),) + output_map = dict( + qmat=dict( + extensions=None, + ), + ) outputs = QBallMX.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py index 96dd1c2e5e..fc58b2f2e9 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py @@ -4,17 +4,52 @@ def test_SFLUTGen_inputs(): input_map = dict( - args=dict(argstr="%s",), - binincsize=dict(argstr="-binincsize %d", units="NA",), - directmap=dict(argstr="-directmap",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True,), - info_file=dict(argstr="-infofile %s", extensions=None, mandatory=True,), - minvectsperbin=dict(argstr="-minvectsperbin %d", units="NA",), - order=dict(argstr="-order %d", units="NA",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - outputstem=dict(argstr="-outputstem %s", usedefault=True,), - pdf=dict(argstr="-pdf %s", usedefault=True,), + args=dict( + argstr="%s", + ), + binincsize=dict( + argstr="-binincsize %d", + units="NA", + ), + directmap=dict( + argstr="-directmap", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + mandatory=True, + ), + info_file=dict( + argstr="-infofile %s", + extensions=None, + mandatory=True, + ), + minvectsperbin=dict( + argstr="-minvectsperbin %d", + units="NA", + ), + order=dict( + argstr="-order %d", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + outputstem=dict( + argstr="-outputstem %s", + usedefault=True, + ), + pdf=dict( + argstr="-pdf %s", + usedefault=True, + ), ) inputs = SFLUTGen.input_spec() @@ -25,7 +60,12 @@ def test_SFLUTGen_inputs(): def test_SFLUTGen_outputs(): output_map = dict( - lut_one_fibre=dict(extensions=None,), lut_two_fibres=dict(extensions=None,), + lut_one_fibre=dict( + extensions=None, + ), + lut_two_fibres=dict( + extensions=None, + ), ) outputs = SFLUTGen.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py index b6a032e66d..5c20399cbc 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py @@ -4,8 +4,13 @@ def test_SFPICOCalibData_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), info_file=dict( argstr="-infooutputfile %s", extensions=None, @@ -13,19 +18,61 @@ def test_SFPICOCalibData_inputs(): hash_files=False, mandatory=True, ), - onedtfarange=dict(argstr="-onedtfarange %s", units="NA",), - onedtfastep=dict(argstr="-onedtfastep %f", units="NA",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True,), - seed=dict(argstr="-seed %f", units="NA",), - snr=dict(argstr="-snr %f", units="NA",), - trace=dict(argstr="-trace %f", units="NA",), - twodtanglerange=dict(argstr="-twodtanglerange %s", units="NA",), - twodtanglestep=dict(argstr="-twodtanglestep %f", units="NA",), - twodtfarange=dict(argstr="-twodtfarange %s", units="NA",), - twodtfastep=dict(argstr="-twodtfastep %f", units="NA",), - twodtmixmax=dict(argstr="-twodtmixmax %f", units="NA",), - twodtmixstep=dict(argstr="-twodtmixstep %f", units="NA",), + onedtfarange=dict( + argstr="-onedtfarange %s", + units="NA", + ), + onedtfastep=dict( + argstr="-onedtfastep %f", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + scheme_file=dict( + argstr="-schemefile %s", + extensions=None, + mandatory=True, + ), + seed=dict( + argstr="-seed %f", + units="NA", + ), + snr=dict( + argstr="-snr %f", + units="NA", + ), + trace=dict( + argstr="-trace %f", + units="NA", + ), + twodtanglerange=dict( + argstr="-twodtanglerange %s", + units="NA", + ), + twodtanglestep=dict( + argstr="-twodtanglestep %f", + units="NA", + ), + twodtfarange=dict( + argstr="-twodtfarange %s", + units="NA", + ), + twodtfastep=dict( + argstr="-twodtfastep %f", + units="NA", + ), + twodtmixmax=dict( + argstr="-twodtmixmax %f", + units="NA", + ), + twodtmixstep=dict( + argstr="-twodtmixstep %f", + units="NA", + ), ) inputs = SFPICOCalibData.input_spec() @@ -36,7 +83,12 @@ def test_SFPICOCalibData_inputs(): def test_SFPICOCalibData_outputs(): output_map = dict( - PICOCalib=dict(extensions=None,), calib_info=dict(extensions=None,), + PICOCalib=dict( + extensions=None, + ), + calib_info=dict( + extensions=None, + ), ) outputs = SFPICOCalibData.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py index 8012e56d5d..775a9061e6 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py @@ -4,22 +4,71 @@ def test_SFPeaks_inputs(): input_map = dict( - args=dict(argstr="%s",), - density=dict(argstr="-density %d", units="NA",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True,), - inputmodel=dict(argstr="-inputmodel %s", mandatory=True,), - mepointset=dict(argstr="-mepointset %d", units="NA",), - noconsistencycheck=dict(argstr="-noconsistencycheck",), - numpds=dict(argstr="-numpds %d", units="NA",), - order=dict(argstr="-order %d", units="NA",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - pdthresh=dict(argstr="-pdthresh %f", units="NA",), - pointset=dict(argstr="-pointset %d", units="NA",), - rbfpointset=dict(argstr="-rbfpointset %d", units="NA",), - scheme_file=dict(argstr="%s", extensions=None,), - searchradius=dict(argstr="-searchradius %f", units="NA",), - stdsfrommean=dict(argstr="-stdsfrommean %f", units="NA",), + args=dict( + argstr="%s", + ), + density=dict( + argstr="-density %d", + units="NA", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + mandatory=True, + ), + inputmodel=dict( + argstr="-inputmodel %s", + mandatory=True, + ), + mepointset=dict( + argstr="-mepointset %d", + units="NA", + ), + noconsistencycheck=dict( + argstr="-noconsistencycheck", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), + order=dict( + argstr="-order %d", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + pdthresh=dict( + argstr="-pdthresh %f", + units="NA", + ), + pointset=dict( + argstr="-pointset %d", + units="NA", + ), + rbfpointset=dict( + argstr="-rbfpointset %d", + units="NA", + ), + scheme_file=dict( + argstr="%s", + extensions=None, + ), + searchradius=dict( + argstr="-searchradius %f", + units="NA", + ), + stdsfrommean=dict( + argstr="-stdsfrommean %f", + units="NA", + ), ) inputs = SFPeaks.input_spec() @@ -29,7 +78,11 @@ def test_SFPeaks_inputs(): def test_SFPeaks_outputs(): - output_map = dict(peaks=dict(extensions=None,),) + output_map = dict( + peaks=dict( + extensions=None, + ), + ) outputs = SFPeaks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Shredder.py b/nipype/interfaces/camino/tests/test_auto_Shredder.py index c7e82afbad..bf0f9dc9dc 100644 --- a/nipype/interfaces/camino/tests/test_auto_Shredder.py +++ b/nipype/interfaces/camino/tests/test_auto_Shredder.py @@ -4,13 +4,40 @@ def test_Shredder_inputs(): input_map = dict( - args=dict(argstr="%s",), - chunksize=dict(argstr="%d", position=2, units="NA",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=-2,), - offset=dict(argstr="%d", position=1, units="NA",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - space=dict(argstr="%d", position=3, units="NA",), + args=dict( + argstr="%s", + ), + chunksize=dict( + argstr="%d", + position=2, + units="NA", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=-2, + ), + offset=dict( + argstr="%d", + position=1, + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + space=dict( + argstr="%d", + position=3, + units="NA", + ), ) inputs = Shredder.input_spec() @@ -20,7 +47,11 @@ def test_Shredder_inputs(): def test_Shredder_outputs(): - output_map = dict(shredded=dict(extensions=None,),) + output_map = dict( + shredded=dict( + extensions=None, + ), + ) outputs = Shredder.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Track.py b/nipype/interfaces/camino/tests/test_auto_Track.py index 99f42e95c7..697b2d5275 100644 --- a/nipype/interfaces/camino/tests/test_auto_Track.py +++ b/nipype/interfaces/camino/tests/test_auto_Track.py @@ -4,30 +4,91 @@ def test_Track_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None,), - anisthresh=dict(argstr="-anisthresh %f",), - args=dict(argstr="%s",), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), - curvethresh=dict(argstr="-curvethresh %f",), - data_dims=dict(argstr="-datadims %s", units="voxels",), - environ=dict(nohash=True, usedefault=True,), - gzip=dict(argstr="-gzip",), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), - inputdatatype=dict(argstr="-inputdatatype %s",), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), - interpolator=dict(argstr="-interpolator %s",), - ipthresh=dict(argstr="-ipthresh %f",), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), - numpds=dict(argstr="-numpds %d", units="NA",), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1, - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), - outputtracts=dict(argstr="-outputtracts %s",), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), - tracker=dict(argstr="-tracker %s", usedefault=True,), - voxel_dims=dict(argstr="-voxeldims %s", units="mm",), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = Track.input_spec() @@ -37,7 +98,11 @@ def test_Track_inputs(): def test_Track_outputs(): - output_map = dict(tracked=dict(extensions=None,),) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = Track.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py index ff13fbe241..a117d5d782 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py @@ -4,30 +4,91 @@ def test_TrackBallStick_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None,), - anisthresh=dict(argstr="-anisthresh %f",), - args=dict(argstr="%s",), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), - curvethresh=dict(argstr="-curvethresh %f",), - data_dims=dict(argstr="-datadims %s", units="voxels",), - environ=dict(nohash=True, usedefault=True,), - gzip=dict(argstr="-gzip",), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), - inputdatatype=dict(argstr="-inputdatatype %s",), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), - interpolator=dict(argstr="-interpolator %s",), - ipthresh=dict(argstr="-ipthresh %f",), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), - numpds=dict(argstr="-numpds %d", units="NA",), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1, - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), - outputtracts=dict(argstr="-outputtracts %s",), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), - tracker=dict(argstr="-tracker %s", usedefault=True,), - voxel_dims=dict(argstr="-voxeldims %s", units="mm",), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = TrackBallStick.input_spec() @@ -37,7 +98,11 @@ def test_TrackBallStick_inputs(): def test_TrackBallStick_outputs(): - output_map = dict(tracked=dict(extensions=None,),) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = TrackBallStick.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py index 7f174486d5..56ca8ece97 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py @@ -4,39 +4,122 @@ def test_TrackBayesDirac_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None,), - anisthresh=dict(argstr="-anisthresh %f",), - args=dict(argstr="%s",), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), - curvepriorg=dict(argstr="-curvepriorg %G",), - curvepriork=dict(argstr="-curvepriork %G",), - curvethresh=dict(argstr="-curvethresh %f",), - data_dims=dict(argstr="-datadims %s", units="voxels",), - datamodel=dict(argstr="-datamodel %s",), - environ=dict(nohash=True, usedefault=True,), - extpriordatatype=dict(argstr="-extpriordatatype %s",), - extpriorfile=dict(argstr="-extpriorfile %s", extensions=None,), - gzip=dict(argstr="-gzip",), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), - inputdatatype=dict(argstr="-inputdatatype %s",), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), - interpolator=dict(argstr="-interpolator %s",), - ipthresh=dict(argstr="-ipthresh %f",), - iterations=dict(argstr="-iterations %d", units="NA",), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), - numpds=dict(argstr="-numpds %d", units="NA",), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvepriorg=dict( + argstr="-curvepriorg %G", + ), + curvepriork=dict( + argstr="-curvepriork %G", + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + datamodel=dict( + argstr="-datamodel %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + extpriordatatype=dict( + argstr="-extpriordatatype %s", + ), + extpriorfile=dict( + argstr="-extpriorfile %s", + extensions=None, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + iterations=dict( + argstr="-iterations %d", + units="NA", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1, - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), - outputtracts=dict(argstr="-outputtracts %s",), - pdf=dict(argstr="-pdf %s",), - pointset=dict(argstr="-pointset %s",), - scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True,), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), - tracker=dict(argstr="-tracker %s", usedefault=True,), - voxel_dims=dict(argstr="-voxeldims %s", units="mm",), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + pdf=dict( + argstr="-pdf %s", + ), + pointset=dict( + argstr="-pointset %s", + ), + scheme_file=dict( + argstr="-schemefile %s", + extensions=None, + mandatory=True, + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = TrackBayesDirac.input_spec() @@ -46,7 +129,11 @@ def test_TrackBayesDirac_inputs(): def test_TrackBayesDirac_outputs(): - output_map = dict(tracked=dict(extensions=None,),) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = TrackBayesDirac.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py index 91489a1d84..dbd8f89478 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py @@ -4,32 +4,99 @@ def test_TrackBedpostxDeter_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None,), - anisthresh=dict(argstr="-anisthresh %f",), - args=dict(argstr="%s",), - bedpostxdir=dict(argstr="-bedpostxdir %s", mandatory=True,), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), - curvethresh=dict(argstr="-curvethresh %f",), - data_dims=dict(argstr="-datadims %s", units="voxels",), - environ=dict(nohash=True, usedefault=True,), - gzip=dict(argstr="-gzip",), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), - inputdatatype=dict(argstr="-inputdatatype %s",), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), - interpolator=dict(argstr="-interpolator %s",), - ipthresh=dict(argstr="-ipthresh %f",), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), - min_vol_frac=dict(argstr="-bedpostxminf %d", units="NA",), - numpds=dict(argstr="-numpds %d", units="NA",), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + bedpostxdir=dict( + argstr="-bedpostxdir %s", + mandatory=True, + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + min_vol_frac=dict( + argstr="-bedpostxminf %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1, - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), - outputtracts=dict(argstr="-outputtracts %s",), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), - tracker=dict(argstr="-tracker %s", usedefault=True,), - voxel_dims=dict(argstr="-voxeldims %s", units="mm",), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = TrackBedpostxDeter.input_spec() @@ -39,7 +106,11 @@ def test_TrackBedpostxDeter_inputs(): def test_TrackBedpostxDeter_outputs(): - output_map = dict(tracked=dict(extensions=None,),) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = TrackBedpostxDeter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py index 92f02879da..7d1baa0e43 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py @@ -4,33 +4,103 @@ def test_TrackBedpostxProba_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None,), - anisthresh=dict(argstr="-anisthresh %f",), - args=dict(argstr="%s",), - bedpostxdir=dict(argstr="-bedpostxdir %s", mandatory=True,), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), - curvethresh=dict(argstr="-curvethresh %f",), - data_dims=dict(argstr="-datadims %s", units="voxels",), - environ=dict(nohash=True, usedefault=True,), - gzip=dict(argstr="-gzip",), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), - inputdatatype=dict(argstr="-inputdatatype %s",), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), - interpolator=dict(argstr="-interpolator %s",), - ipthresh=dict(argstr="-ipthresh %f",), - iterations=dict(argstr="-iterations %d", units="NA",), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), - min_vol_frac=dict(argstr="-bedpostxminf %d", units="NA",), - numpds=dict(argstr="-numpds %d", units="NA",), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + bedpostxdir=dict( + argstr="-bedpostxdir %s", + mandatory=True, + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + iterations=dict( + argstr="-iterations %d", + units="NA", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + min_vol_frac=dict( + argstr="-bedpostxminf %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1, - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), - outputtracts=dict(argstr="-outputtracts %s",), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), - tracker=dict(argstr="-tracker %s", usedefault=True,), - voxel_dims=dict(argstr="-voxeldims %s", units="mm",), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = TrackBedpostxProba.input_spec() @@ -40,7 +110,11 @@ def test_TrackBedpostxProba_inputs(): def test_TrackBedpostxProba_outputs(): - output_map = dict(tracked=dict(extensions=None,),) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = TrackBedpostxProba.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py index 8cd35bab8a..75cd2e3d11 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py @@ -4,35 +4,111 @@ def test_TrackBootstrap_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None,), - anisthresh=dict(argstr="-anisthresh %f",), - args=dict(argstr="%s",), - bgmask=dict(argstr="-bgmask %s", extensions=None,), - bsdatafiles=dict(argstr="-bsdatafile %s", mandatory=True,), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), - curvethresh=dict(argstr="-curvethresh %f",), - data_dims=dict(argstr="-datadims %s", units="voxels",), - environ=dict(nohash=True, usedefault=True,), - gzip=dict(argstr="-gzip",), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), - inputdatatype=dict(argstr="-inputdatatype %s",), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), - interpolator=dict(argstr="-interpolator %s",), - inversion=dict(argstr="-inversion %s",), - ipthresh=dict(argstr="-ipthresh %f",), - iterations=dict(argstr="-iterations %d", units="NA",), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), - numpds=dict(argstr="-numpds %d", units="NA",), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + bgmask=dict( + argstr="-bgmask %s", + extensions=None, + ), + bsdatafiles=dict( + argstr="-bsdatafile %s", + mandatory=True, + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + inversion=dict( + argstr="-inversion %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + iterations=dict( + argstr="-iterations %d", + units="NA", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1, - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), - outputtracts=dict(argstr="-outputtracts %s",), - scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True,), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), - tracker=dict(argstr="-tracker %s", usedefault=True,), - voxel_dims=dict(argstr="-voxeldims %s", units="mm",), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + scheme_file=dict( + argstr="-schemefile %s", + extensions=None, + mandatory=True, + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = TrackBootstrap.input_spec() @@ -42,7 +118,11 @@ def test_TrackBootstrap_inputs(): def test_TrackBootstrap_outputs(): - output_map = dict(tracked=dict(extensions=None,),) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = TrackBootstrap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackDT.py b/nipype/interfaces/camino/tests/test_auto_TrackDT.py index 736dc5fc5f..c60ba7b5f5 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackDT.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackDT.py @@ -4,30 +4,91 @@ def test_TrackDT_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None,), - anisthresh=dict(argstr="-anisthresh %f",), - args=dict(argstr="%s",), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), - curvethresh=dict(argstr="-curvethresh %f",), - data_dims=dict(argstr="-datadims %s", units="voxels",), - environ=dict(nohash=True, usedefault=True,), - gzip=dict(argstr="-gzip",), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), - inputdatatype=dict(argstr="-inputdatatype %s",), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), - interpolator=dict(argstr="-interpolator %s",), - ipthresh=dict(argstr="-ipthresh %f",), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), - numpds=dict(argstr="-numpds %d", units="NA",), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1, - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), - outputtracts=dict(argstr="-outputtracts %s",), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), - tracker=dict(argstr="-tracker %s", usedefault=True,), - voxel_dims=dict(argstr="-voxeldims %s", units="mm",), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = TrackDT.input_spec() @@ -37,7 +98,11 @@ def test_TrackDT_inputs(): def test_TrackDT_outputs(): - output_map = dict(tracked=dict(extensions=None,),) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = TrackDT.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackPICo.py b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py index 37d4a95179..1d3647e151 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackPICo.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py @@ -4,32 +4,98 @@ def test_TrackPICo_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None,), - anisthresh=dict(argstr="-anisthresh %f",), - args=dict(argstr="%s",), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"],), - curvethresh=dict(argstr="-curvethresh %f",), - data_dims=dict(argstr="-datadims %s", units="voxels",), - environ=dict(nohash=True, usedefault=True,), - gzip=dict(argstr="-gzip",), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1,), - inputdatatype=dict(argstr="-inputdatatype %s",), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), - interpolator=dict(argstr="-interpolator %s",), - ipthresh=dict(argstr="-ipthresh %f",), - iterations=dict(argstr="-iterations %d", units="NA",), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA",), - numpds=dict(argstr="-numpds %d", units="NA",), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + iterations=dict( + argstr="-iterations %d", + units="NA", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1, - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1,), - outputtracts=dict(argstr="-outputtracts %s",), - pdf=dict(argstr="-pdf %s",), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2,), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"],), - tracker=dict(argstr="-tracker %s", usedefault=True,), - voxel_dims=dict(argstr="-voxeldims %s", units="mm",), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + pdf=dict( + argstr="-pdf %s", + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = TrackPICo.input_spec() @@ -39,7 +105,11 @@ def test_TrackPICo_inputs(): def test_TrackPICo_outputs(): - output_map = dict(tracked=dict(extensions=None,),) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = TrackPICo.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TractShredder.py b/nipype/interfaces/camino/tests/test_auto_TractShredder.py index b8a95c9569..07678c5d64 100644 --- a/nipype/interfaces/camino/tests/test_auto_TractShredder.py +++ b/nipype/interfaces/camino/tests/test_auto_TractShredder.py @@ -4,13 +4,40 @@ def test_TractShredder_inputs(): input_map = dict( - args=dict(argstr="%s",), - bunchsize=dict(argstr="%d", position=2, units="NA",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=-2,), - offset=dict(argstr="%d", position=1, units="NA",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - space=dict(argstr="%d", position=3, units="NA",), + args=dict( + argstr="%s", + ), + bunchsize=dict( + argstr="%d", + position=2, + units="NA", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=-2, + ), + offset=dict( + argstr="%d", + position=1, + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + space=dict( + argstr="%d", + position=3, + units="NA", + ), ) inputs = TractShredder.input_spec() @@ -20,7 +47,11 @@ def test_TractShredder_inputs(): def test_TractShredder_outputs(): - output_map = dict(shredded=dict(extensions=None,),) + output_map = dict( + shredded=dict( + extensions=None, + ), + ) outputs = TractShredder.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py index cee10a70db..194f233cc1 100644 --- a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py @@ -4,18 +4,58 @@ def test_VtkStreamlines_inputs(): input_map = dict( - args=dict(argstr="%s",), - colourorient=dict(argstr="-colourorient",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr=" < %s", extensions=None, mandatory=True, position=-2,), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True,), - interpolate=dict(argstr="-interpolate",), - interpolatescalars=dict(argstr="-interpolatescalars",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), - scalar_file=dict(argstr="-scalarfile %s", extensions=None, position=3,), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=1,), - target_file=dict(argstr="-targetfile %s", extensions=None, position=2,), - voxeldims=dict(argstr="-voxeldims %s", position=4, units="mm",), + args=dict( + argstr="%s", + ), + colourorient=dict( + argstr="-colourorient", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr=" < %s", + extensions=None, + mandatory=True, + position=-2, + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolate=dict( + argstr="-interpolate", + ), + interpolatescalars=dict( + argstr="-interpolatescalars", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + scalar_file=dict( + argstr="-scalarfile %s", + extensions=None, + position=3, + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=1, + ), + target_file=dict( + argstr="-targetfile %s", + extensions=None, + position=2, + ), + voxeldims=dict( + argstr="-voxeldims %s", + position=4, + units="mm", + ), ) inputs = VtkStreamlines.input_spec() @@ -25,7 +65,11 @@ def test_VtkStreamlines_inputs(): def test_VtkStreamlines_outputs(): - output_map = dict(vtk=dict(extensions=None,),) + output_map = dict( + vtk=dict( + extensions=None, + ), + ) outputs = VtkStreamlines.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py index 3cd618eb10..fb076c1107 100644 --- a/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py @@ -4,16 +4,52 @@ def test_Camino2Trackvis_inputs(): input_map = dict( - args=dict(argstr="%s",), - data_dims=dict(argstr="-d %s", mandatory=True, position=4, sep=",",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1,), - min_length=dict(argstr="-l %d", position=3, units="mm",), - nifti_file=dict(argstr="--nifti %s", extensions=None, position=7,), - out_file=dict(argstr="-o %s", extensions=None, genfile=True, position=2,), - voxel_dims=dict(argstr="-x %s", mandatory=True, position=5, sep=",",), + args=dict( + argstr="%s", + ), + data_dims=dict( + argstr="-d %s", + mandatory=True, + position=4, + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=1, + ), + min_length=dict( + argstr="-l %d", + position=3, + units="mm", + ), + nifti_file=dict( + argstr="--nifti %s", + extensions=None, + position=7, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + genfile=True, + position=2, + ), + voxel_dims=dict( + argstr="-x %s", + mandatory=True, + position=5, + sep=",", + ), voxel_order=dict( - argstr="--voxel-order %s", extensions=None, mandatory=True, position=6, + argstr="--voxel-order %s", + extensions=None, + mandatory=True, + position=6, ), ) inputs = Camino2Trackvis.input_spec() @@ -24,7 +60,11 @@ def test_Camino2Trackvis_inputs(): def test_Camino2Trackvis_outputs(): - output_map = dict(trackvis=dict(extensions=None,),) + output_map = dict( + trackvis=dict( + extensions=None, + ), + ) outputs = Camino2Trackvis.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py index b2869c08e3..ec7ed22d0c 100644 --- a/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py @@ -4,11 +4,30 @@ def test_Trackvis2Camino_inputs(): input_map = dict( - append_file=dict(argstr="-a %s", extensions=None, position=2,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1,), - out_file=dict(argstr="-o %s", extensions=None, genfile=True, position=2,), + append_file=dict( + argstr="-a %s", + extensions=None, + position=2, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=1, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + genfile=True, + position=2, + ), ) inputs = Trackvis2Camino.input_spec() @@ -18,7 +37,11 @@ def test_Trackvis2Camino_inputs(): def test_Trackvis2Camino_outputs(): - output_map = dict(camino=dict(extensions=None,),) + output_map = dict( + camino=dict( + extensions=None, + ), + ) outputs = Trackvis2Camino.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py index 41d3f6ecce..e115acad83 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py +++ b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py @@ -4,11 +4,21 @@ def test_AverageNetworks_inputs(): input_map = dict( - group_id=dict(usedefault=True,), - in_files=dict(mandatory=True,), - out_gexf_groupavg=dict(extensions=None,), - out_gpickled_groupavg=dict(extensions=None,), - resolution_network_file=dict(extensions=None,), + group_id=dict( + usedefault=True, + ), + in_files=dict( + mandatory=True, + ), + out_gexf_groupavg=dict( + extensions=None, + ), + out_gpickled_groupavg=dict( + extensions=None, + ), + resolution_network_file=dict( + extensions=None, + ), ) inputs = AverageNetworks.input_spec() @@ -19,8 +29,12 @@ def test_AverageNetworks_inputs(): def test_AverageNetworks_outputs(): output_map = dict( - gexf_groupavg=dict(extensions=None,), - gpickled_groupavg=dict(extensions=None,), + gexf_groupavg=dict( + extensions=None, + ), + gpickled_groupavg=dict( + extensions=None, + ), matlab_groupavgs=dict(), ) outputs = AverageNetworks.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py index 43240defab..ee7b0459ef 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py @@ -6,7 +6,9 @@ def test_CFFConverter_inputs(): input_map = dict( creator=dict(), data_files=dict(), - description=dict(usedefault=True,), + description=dict( + usedefault=True, + ), email=dict(), gifti_labels=dict(), gifti_surfaces=dict(), @@ -14,13 +16,18 @@ def test_CFFConverter_inputs(): graphml_networks=dict(), license=dict(), nifti_volumes=dict(), - out_file=dict(extensions=None, usedefault=True,), + out_file=dict( + extensions=None, + usedefault=True, + ), publisher=dict(), references=dict(), relation=dict(), rights=dict(), script_files=dict(), - species=dict(usedefault=True,), + species=dict( + usedefault=True, + ), timeseries_files=dict(), title=dict(), tract_files=dict(), @@ -33,7 +40,11 @@ def test_CFFConverter_inputs(): def test_CFFConverter_outputs(): - output_map = dict(connectome_file=dict(extensions=None,),) + output_map = dict( + connectome_file=dict( + extensions=None, + ), + ) outputs = CFFConverter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py b/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py index 3e68292557..a9466f91be 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py @@ -4,17 +4,49 @@ def test_CreateMatrix_inputs(): input_map = dict( - count_region_intersections=dict(usedefault=True,), - out_endpoint_array_name=dict(extensions=None, genfile=True,), - out_fiber_length_std_matrix_mat_file=dict(extensions=None, genfile=True,), - out_intersection_matrix_mat_file=dict(extensions=None, genfile=True,), - out_matrix_file=dict(extensions=None, genfile=True,), - out_matrix_mat_file=dict(extensions=None, usedefault=True,), - out_mean_fiber_length_matrix_mat_file=dict(extensions=None, genfile=True,), - out_median_fiber_length_matrix_mat_file=dict(extensions=None, genfile=True,), - resolution_network_file=dict(extensions=None, mandatory=True,), - roi_file=dict(extensions=None, mandatory=True,), - tract_file=dict(extensions=None, mandatory=True,), + count_region_intersections=dict( + usedefault=True, + ), + out_endpoint_array_name=dict( + extensions=None, + genfile=True, + ), + out_fiber_length_std_matrix_mat_file=dict( + extensions=None, + genfile=True, + ), + out_intersection_matrix_mat_file=dict( + extensions=None, + genfile=True, + ), + out_matrix_file=dict( + extensions=None, + genfile=True, + ), + out_matrix_mat_file=dict( + extensions=None, + usedefault=True, + ), + out_mean_fiber_length_matrix_mat_file=dict( + extensions=None, + genfile=True, + ), + out_median_fiber_length_matrix_mat_file=dict( + extensions=None, + genfile=True, + ), + resolution_network_file=dict( + extensions=None, + mandatory=True, + ), + roi_file=dict( + extensions=None, + mandatory=True, + ), + tract_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CreateMatrix.input_spec() @@ -25,24 +57,54 @@ def test_CreateMatrix_inputs(): def test_CreateMatrix_outputs(): output_map = dict( - endpoint_file=dict(extensions=None,), - endpoint_file_mm=dict(extensions=None,), - fiber_label_file=dict(extensions=None,), - fiber_labels_noorphans=dict(extensions=None,), - fiber_length_file=dict(extensions=None,), - fiber_length_std_matrix_mat_file=dict(extensions=None,), + endpoint_file=dict( + extensions=None, + ), + endpoint_file_mm=dict( + extensions=None, + ), + fiber_label_file=dict( + extensions=None, + ), + fiber_labels_noorphans=dict( + extensions=None, + ), + fiber_length_file=dict( + extensions=None, + ), + fiber_length_std_matrix_mat_file=dict( + extensions=None, + ), filtered_tractographies=dict(), - filtered_tractography=dict(extensions=None,), - filtered_tractography_by_intersections=dict(extensions=None,), - intersection_matrix_file=dict(extensions=None,), - intersection_matrix_mat_file=dict(extensions=None,), + filtered_tractography=dict( + extensions=None, + ), + filtered_tractography_by_intersections=dict( + extensions=None, + ), + intersection_matrix_file=dict( + extensions=None, + ), + intersection_matrix_mat_file=dict( + extensions=None, + ), matlab_matrix_files=dict(), - matrix_file=dict(extensions=None,), + matrix_file=dict( + extensions=None, + ), matrix_files=dict(), - matrix_mat_file=dict(extensions=None,), - mean_fiber_length_matrix_mat_file=dict(extensions=None,), - median_fiber_length_matrix_mat_file=dict(extensions=None,), - stats_file=dict(extensions=None,), + matrix_mat_file=dict( + extensions=None, + ), + mean_fiber_length_matrix_mat_file=dict( + extensions=None, + ), + median_fiber_length_matrix_mat_file=dict( + extensions=None, + ), + stats_file=dict( + extensions=None, + ), ) outputs = CreateMatrix.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py index 95023590d2..f88950d758 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py @@ -4,9 +4,18 @@ def test_CreateNodes_inputs(): input_map = dict( - out_filename=dict(extensions=None, usedefault=True,), - resolution_network_file=dict(extensions=None, mandatory=True,), - roi_file=dict(extensions=None, mandatory=True,), + out_filename=dict( + extensions=None, + usedefault=True, + ), + resolution_network_file=dict( + extensions=None, + mandatory=True, + ), + roi_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CreateNodes.input_spec() @@ -16,7 +25,11 @@ def test_CreateNodes_inputs(): def test_CreateNodes_outputs(): - output_map = dict(node_network=dict(extensions=None,),) + output_map = dict( + node_network=dict( + extensions=None, + ), + ) outputs = CreateNodes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py index 30aae80243..17f8990a08 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py +++ b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py @@ -4,8 +4,13 @@ def test_MergeCNetworks_inputs(): input_map = dict( - in_files=dict(mandatory=True,), - out_file=dict(extensions=None, usedefault=True,), + in_files=dict( + mandatory=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), ) inputs = MergeCNetworks.input_spec() @@ -15,7 +20,11 @@ def test_MergeCNetworks_inputs(): def test_MergeCNetworks_outputs(): - output_map = dict(connectome_file=dict(extensions=None,),) + output_map = dict( + connectome_file=dict( + extensions=None, + ), + ) outputs = MergeCNetworks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py index af1c68fca2..975e4741cd 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py @@ -4,15 +4,33 @@ def test_NetworkBasedStatistic_inputs(): input_map = dict( - edge_key=dict(usedefault=True,), - in_group1=dict(mandatory=True,), - in_group2=dict(mandatory=True,), - node_position_network=dict(extensions=None,), - number_of_permutations=dict(usedefault=True,), - out_nbs_network=dict(extensions=None,), - out_nbs_pval_network=dict(extensions=None,), - t_tail=dict(usedefault=True,), - threshold=dict(usedefault=True,), + edge_key=dict( + usedefault=True, + ), + in_group1=dict( + mandatory=True, + ), + in_group2=dict( + mandatory=True, + ), + node_position_network=dict( + extensions=None, + ), + number_of_permutations=dict( + usedefault=True, + ), + out_nbs_network=dict( + extensions=None, + ), + out_nbs_pval_network=dict( + extensions=None, + ), + t_tail=dict( + usedefault=True, + ), + threshold=dict( + usedefault=True, + ), ) inputs = NetworkBasedStatistic.input_spec() @@ -23,8 +41,12 @@ def test_NetworkBasedStatistic_inputs(): def test_NetworkBasedStatistic_outputs(): output_map = dict( - nbs_network=dict(extensions=None,), - nbs_pval_network=dict(extensions=None,), + nbs_network=dict( + extensions=None, + ), + nbs_pval_network=dict( + extensions=None, + ), network_files=dict(), ) outputs = NetworkBasedStatistic.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py index 820b447885..d171e6ab7a 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py @@ -4,16 +4,44 @@ def test_NetworkXMetrics_inputs(): input_map = dict( - compute_clique_related_measures=dict(usedefault=True,), - in_file=dict(extensions=None, mandatory=True,), - out_edge_metrics_matlab=dict(extensions=None, genfile=True,), - out_global_metrics_matlab=dict(extensions=None, genfile=True,), - out_k_core=dict(extensions=None, usedefault=True,), - out_k_crust=dict(extensions=None, usedefault=True,), - out_k_shell=dict(extensions=None, usedefault=True,), - out_node_metrics_matlab=dict(extensions=None, genfile=True,), - out_pickled_extra_measures=dict(extensions=None, usedefault=True,), - treat_as_weighted_graph=dict(usedefault=True,), + compute_clique_related_measures=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_edge_metrics_matlab=dict( + extensions=None, + genfile=True, + ), + out_global_metrics_matlab=dict( + extensions=None, + genfile=True, + ), + out_k_core=dict( + extensions=None, + usedefault=True, + ), + out_k_crust=dict( + extensions=None, + usedefault=True, + ), + out_k_shell=dict( + extensions=None, + usedefault=True, + ), + out_node_metrics_matlab=dict( + extensions=None, + genfile=True, + ), + out_pickled_extra_measures=dict( + extensions=None, + usedefault=True, + ), + treat_as_weighted_graph=dict( + usedefault=True, + ), ) inputs = NetworkXMetrics.input_spec() @@ -25,18 +53,32 @@ def test_NetworkXMetrics_inputs(): def test_NetworkXMetrics_outputs(): output_map = dict( edge_measure_networks=dict(), - edge_measures_matlab=dict(extensions=None,), - global_measures_matlab=dict(extensions=None,), + edge_measures_matlab=dict( + extensions=None, + ), + global_measures_matlab=dict( + extensions=None, + ), gpickled_network_files=dict(), - k_core=dict(extensions=None,), - k_crust=dict(extensions=None,), + k_core=dict( + extensions=None, + ), + k_crust=dict( + extensions=None, + ), k_networks=dict(), - k_shell=dict(extensions=None,), + k_shell=dict( + extensions=None, + ), matlab_dict_measures=dict(), matlab_matrix_files=dict(), node_measure_networks=dict(), - node_measures_matlab=dict(extensions=None,), - pickled_extra_measures=dict(extensions=None,), + node_measures_matlab=dict( + extensions=None, + ), + pickled_extra_measures=dict( + extensions=None, + ), ) outputs = NetworkXMetrics.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py index 8c380c5704..800b5b516b 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py +++ b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py @@ -4,11 +4,20 @@ def test_Parcellate_inputs(): input_map = dict( - dilation=dict(usedefault=True,), + dilation=dict( + usedefault=True, + ), freesurfer_dir=dict(), - out_roi_file=dict(extensions=None, genfile=True,), - parcellation_name=dict(usedefault=True,), - subject_id=dict(mandatory=True,), + out_roi_file=dict( + extensions=None, + genfile=True, + ), + parcellation_name=dict( + usedefault=True, + ), + subject_id=dict( + mandatory=True, + ), subjects_dir=dict(), ) inputs = Parcellate.input_spec() @@ -20,14 +29,30 @@ def test_Parcellate_inputs(): def test_Parcellate_outputs(): output_map = dict( - aseg_file=dict(extensions=None,), - cc_unknown_file=dict(extensions=None,), - dilated_roi_file_in_structural_space=dict(extensions=None,), - ribbon_file=dict(extensions=None,), - roi_file=dict(extensions=None,), - roi_file_in_structural_space=dict(extensions=None,), - roiv_file=dict(extensions=None,), - white_matter_mask_file=dict(extensions=None,), + aseg_file=dict( + extensions=None, + ), + cc_unknown_file=dict( + extensions=None, + ), + dilated_roi_file_in_structural_space=dict( + extensions=None, + ), + ribbon_file=dict( + extensions=None, + ), + roi_file=dict( + extensions=None, + ), + roi_file_in_structural_space=dict( + extensions=None, + ), + roiv_file=dict( + extensions=None, + ), + white_matter_mask_file=dict( + extensions=None, + ), ) outputs = Parcellate.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py index 2191f940ac..54fd9e46e9 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py +++ b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py @@ -4,12 +4,28 @@ def test_ROIGen_inputs(): input_map = dict( - LUT_file=dict(extensions=None, xor=["use_freesurfer_LUT"],), - aparc_aseg_file=dict(extensions=None, mandatory=True,), - freesurfer_dir=dict(requires=["use_freesurfer_LUT"],), - out_dict_file=dict(extensions=None, genfile=True,), - out_roi_file=dict(extensions=None, genfile=True,), - use_freesurfer_LUT=dict(xor=["LUT_file"],), + LUT_file=dict( + extensions=None, + xor=["use_freesurfer_LUT"], + ), + aparc_aseg_file=dict( + extensions=None, + mandatory=True, + ), + freesurfer_dir=dict( + requires=["use_freesurfer_LUT"], + ), + out_dict_file=dict( + extensions=None, + genfile=True, + ), + out_roi_file=dict( + extensions=None, + genfile=True, + ), + use_freesurfer_LUT=dict( + xor=["LUT_file"], + ), ) inputs = ROIGen.input_spec() @@ -20,7 +36,12 @@ def test_ROIGen_inputs(): def test_ROIGen_outputs(): output_map = dict( - dict_file=dict(extensions=None,), roi_file=dict(extensions=None,), + dict_file=dict( + extensions=None, + ), + roi_file=dict( + extensions=None, + ), ) outputs = ROIGen.output_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py index 91e164f35a..f7bf46f327 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py @@ -4,17 +4,49 @@ def test_DTIRecon_inputs(): input_map = dict( - DWI=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - args=dict(argstr="%s",), - b0_threshold=dict(argstr="-b0_th",), - bvals=dict(extensions=None, mandatory=True,), - bvecs=dict(argstr="-gm %s", extensions=None, mandatory=True,), - environ=dict(nohash=True, usedefault=True,), - image_orientation_vectors=dict(argstr="-iop %f",), - n_averages=dict(argstr="-nex %s",), - oblique_correction=dict(argstr="-oc",), - out_prefix=dict(argstr="%s", position=2, usedefault=True,), - output_type=dict(argstr="-ot %s", usedefault=True,), + DWI=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + args=dict( + argstr="%s", + ), + b0_threshold=dict( + argstr="-b0_th", + ), + bvals=dict( + extensions=None, + mandatory=True, + ), + bvecs=dict( + argstr="-gm %s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + image_orientation_vectors=dict( + argstr="-iop %f", + ), + n_averages=dict( + argstr="-nex %s", + ), + oblique_correction=dict( + argstr="-oc", + ), + out_prefix=dict( + argstr="%s", + position=2, + usedefault=True, + ), + output_type=dict( + argstr="-ot %s", + usedefault=True, + ), ) inputs = DTIRecon.input_spec() @@ -25,18 +57,42 @@ def test_DTIRecon_inputs(): def test_DTIRecon_outputs(): output_map = dict( - ADC=dict(extensions=None,), - B0=dict(extensions=None,), - FA=dict(extensions=None,), - FA_color=dict(extensions=None,), - L1=dict(extensions=None,), - L2=dict(extensions=None,), - L3=dict(extensions=None,), - V1=dict(extensions=None,), - V2=dict(extensions=None,), - V3=dict(extensions=None,), - exp=dict(extensions=None,), - tensor=dict(extensions=None,), + ADC=dict( + extensions=None, + ), + B0=dict( + extensions=None, + ), + FA=dict( + extensions=None, + ), + FA_color=dict( + extensions=None, + ), + L1=dict( + extensions=None, + ), + L2=dict( + extensions=None, + ), + L3=dict( + extensions=None, + ), + V1=dict( + extensions=None, + ), + V2=dict( + extensions=None, + ), + V3=dict( + extensions=None, + ), + exp=dict( + extensions=None, + ), + tensor=dict( + extensions=None, + ), ) outputs = DTIRecon.output_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py index d1fd3bd1e8..e550bc4b27 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py @@ -4,29 +4,87 @@ def test_DTITracker_inputs(): input_map = dict( - angle_threshold=dict(argstr="-at %f",), - angle_threshold_weight=dict(argstr="-atw %f",), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - input_data_prefix=dict(argstr="%s", position=0, usedefault=True,), - input_type=dict(argstr="-it %s",), - invert_x=dict(argstr="-ix",), - invert_y=dict(argstr="-iy",), - invert_z=dict(argstr="-iz",), - mask1_file=dict(argstr="-m %s", extensions=None, mandatory=True, position=2,), - mask1_threshold=dict(position=3,), - mask2_file=dict(argstr="-m2 %s", extensions=None, position=4,), - mask2_threshold=dict(position=5,), - output_file=dict(argstr="%s", extensions=None, position=1, usedefault=True,), - output_mask=dict(argstr="-om %s", extensions=None,), - primary_vector=dict(argstr="-%s",), - random_seed=dict(argstr="-rseed %d",), - step_length=dict(argstr="-l %f",), - swap_xy=dict(argstr="-sxy",), - swap_yz=dict(argstr="-syz",), - swap_zx=dict(argstr="-szx",), - tensor_file=dict(extensions=None,), - tracking_method=dict(argstr="-%s",), + angle_threshold=dict( + argstr="-at %f", + ), + angle_threshold_weight=dict( + argstr="-atw %f", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_data_prefix=dict( + argstr="%s", + position=0, + usedefault=True, + ), + input_type=dict( + argstr="-it %s", + ), + invert_x=dict( + argstr="-ix", + ), + invert_y=dict( + argstr="-iy", + ), + invert_z=dict( + argstr="-iz", + ), + mask1_file=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + position=2, + ), + mask1_threshold=dict( + position=3, + ), + mask2_file=dict( + argstr="-m2 %s", + extensions=None, + position=4, + ), + mask2_threshold=dict( + position=5, + ), + output_file=dict( + argstr="%s", + extensions=None, + position=1, + usedefault=True, + ), + output_mask=dict( + argstr="-om %s", + extensions=None, + ), + primary_vector=dict( + argstr="-%s", + ), + random_seed=dict( + argstr="-rseed %d", + ), + step_length=dict( + argstr="-l %f", + ), + swap_xy=dict( + argstr="-sxy", + ), + swap_yz=dict( + argstr="-syz", + ), + swap_zx=dict( + argstr="-szx", + ), + tensor_file=dict( + extensions=None, + ), + tracking_method=dict( + argstr="-%s", + ), ) inputs = DTITracker.input_spec() @@ -37,7 +95,12 @@ def test_DTITracker_inputs(): def test_DTITracker_outputs(): output_map = dict( - mask_file=dict(extensions=None,), track_file=dict(extensions=None,), + mask_file=dict( + extensions=None, + ), + track_file=dict( + extensions=None, + ), ) outputs = DTITracker.output_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py index 6cddb31fdc..a933495672 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py @@ -4,17 +4,50 @@ def test_HARDIMat_inputs(): input_map = dict( - args=dict(argstr="%s",), - bvals=dict(extensions=None, mandatory=True,), - bvecs=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - environ=dict(nohash=True, usedefault=True,), - image_info=dict(argstr="-info %s", extensions=None,), - image_orientation_vectors=dict(argstr="-iop %f",), - oblique_correction=dict(argstr="-oc",), - odf_file=dict(argstr="-odf %s", extensions=None,), - order=dict(argstr="-order %s",), - out_file=dict(argstr="%s", extensions=None, position=2, usedefault=True,), - reference_file=dict(argstr="-ref %s", extensions=None,), + args=dict( + argstr="%s", + ), + bvals=dict( + extensions=None, + mandatory=True, + ), + bvecs=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + image_info=dict( + argstr="-info %s", + extensions=None, + ), + image_orientation_vectors=dict( + argstr="-iop %f", + ), + oblique_correction=dict( + argstr="-oc", + ), + odf_file=dict( + argstr="-odf %s", + extensions=None, + ), + order=dict( + argstr="-order %s", + ), + out_file=dict( + argstr="%s", + extensions=None, + position=2, + usedefault=True, + ), + reference_file=dict( + argstr="-ref %s", + extensions=None, + ), ) inputs = HARDIMat.input_spec() @@ -24,7 +57,11 @@ def test_HARDIMat_inputs(): def test_HARDIMat_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = HARDIMat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py index 0e8132a7f6..b6a18aaf77 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py @@ -4,22 +4,68 @@ def test_ODFRecon_inputs(): input_map = dict( - DWI=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - args=dict(argstr="%s",), - dsi=dict(argstr="-dsi",), - environ=dict(nohash=True, usedefault=True,), - filter=dict(argstr="-f",), - image_orientation_vectors=dict(argstr="-iop %f",), - matrix=dict(argstr="-mat %s", extensions=None, mandatory=True,), - n_b0=dict(argstr="-b0 %s", mandatory=True,), - n_directions=dict(argstr="%s", mandatory=True, position=2,), - n_output_directions=dict(argstr="%s", mandatory=True, position=3,), - oblique_correction=dict(argstr="-oc",), - out_prefix=dict(argstr="%s", position=4, usedefault=True,), - output_entropy=dict(argstr="-oe",), - output_type=dict(argstr="-ot %s", usedefault=True,), - sharpness=dict(argstr="-s %f",), - subtract_background=dict(argstr="-bg",), + DWI=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + args=dict( + argstr="%s", + ), + dsi=dict( + argstr="-dsi", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + filter=dict( + argstr="-f", + ), + image_orientation_vectors=dict( + argstr="-iop %f", + ), + matrix=dict( + argstr="-mat %s", + extensions=None, + mandatory=True, + ), + n_b0=dict( + argstr="-b0 %s", + mandatory=True, + ), + n_directions=dict( + argstr="%s", + mandatory=True, + position=2, + ), + n_output_directions=dict( + argstr="%s", + mandatory=True, + position=3, + ), + oblique_correction=dict( + argstr="-oc", + ), + out_prefix=dict( + argstr="%s", + position=4, + usedefault=True, + ), + output_entropy=dict( + argstr="-oe", + ), + output_type=dict( + argstr="-ot %s", + usedefault=True, + ), + sharpness=dict( + argstr="-s %f", + ), + subtract_background=dict( + argstr="-bg", + ), ) inputs = ODFRecon.input_spec() @@ -30,11 +76,21 @@ def test_ODFRecon_inputs(): def test_ODFRecon_outputs(): output_map = dict( - B0=dict(extensions=None,), - DWI=dict(extensions=None,), - ODF=dict(extensions=None,), - entropy=dict(extensions=None,), - max=dict(extensions=None,), + B0=dict( + extensions=None, + ), + DWI=dict( + extensions=None, + ), + ODF=dict( + extensions=None, + ), + entropy=dict( + extensions=None, + ), + max=dict( + extensions=None, + ), ) outputs = ODFRecon.output_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py index 233aea3e3a..2118745f3f 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py @@ -4,33 +4,101 @@ def test_ODFTracker_inputs(): input_map = dict( - ODF=dict(extensions=None, mandatory=True,), - angle_threshold=dict(argstr="-at %f",), - args=dict(argstr="%s",), - disc=dict(argstr="-disc",), - dsi=dict(argstr="-dsi",), - environ=dict(nohash=True, usedefault=True,), - image_orientation_vectors=dict(argstr="-iop %f",), - input_data_prefix=dict(argstr="%s", position=0, usedefault=True,), - input_output_type=dict(argstr="-it %s", usedefault=True,), - invert_x=dict(argstr="-ix",), - invert_y=dict(argstr="-iy",), - invert_z=dict(argstr="-iz",), - limit=dict(argstr="-limit %d",), - mask1_file=dict(argstr="-m %s", extensions=None, mandatory=True, position=2,), - mask1_threshold=dict(position=3,), - mask2_file=dict(argstr="-m2 %s", extensions=None, position=4,), - mask2_threshold=dict(position=5,), - max=dict(extensions=None, mandatory=True,), - out_file=dict(argstr="%s", extensions=None, position=1, usedefault=True,), - random_seed=dict(argstr="-rseed %s",), - runge_kutta2=dict(argstr="-rk2",), - slice_order=dict(argstr="-sorder %d",), - step_length=dict(argstr="-l %f",), - swap_xy=dict(argstr="-sxy",), - swap_yz=dict(argstr="-syz",), - swap_zx=dict(argstr="-szx",), - voxel_order=dict(argstr="-vorder %s",), + ODF=dict( + extensions=None, + mandatory=True, + ), + angle_threshold=dict( + argstr="-at %f", + ), + args=dict( + argstr="%s", + ), + disc=dict( + argstr="-disc", + ), + dsi=dict( + argstr="-dsi", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + image_orientation_vectors=dict( + argstr="-iop %f", + ), + input_data_prefix=dict( + argstr="%s", + position=0, + usedefault=True, + ), + input_output_type=dict( + argstr="-it %s", + usedefault=True, + ), + invert_x=dict( + argstr="-ix", + ), + invert_y=dict( + argstr="-iy", + ), + invert_z=dict( + argstr="-iz", + ), + limit=dict( + argstr="-limit %d", + ), + mask1_file=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + position=2, + ), + mask1_threshold=dict( + position=3, + ), + mask2_file=dict( + argstr="-m2 %s", + extensions=None, + position=4, + ), + mask2_threshold=dict( + position=5, + ), + max=dict( + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + position=1, + usedefault=True, + ), + random_seed=dict( + argstr="-rseed %s", + ), + runge_kutta2=dict( + argstr="-rk2", + ), + slice_order=dict( + argstr="-sorder %d", + ), + step_length=dict( + argstr="-l %f", + ), + swap_xy=dict( + argstr="-sxy", + ), + swap_yz=dict( + argstr="-syz", + ), + swap_zx=dict( + argstr="-szx", + ), + voxel_order=dict( + argstr="-vorder %s", + ), ) inputs = ODFTracker.input_spec() @@ -40,7 +108,11 @@ def test_ODFTracker_inputs(): def test_ODFTracker_outputs(): - output_map = dict(track_file=dict(extensions=None,),) + output_map = dict( + track_file=dict( + extensions=None, + ), + ) outputs = ODFTracker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py index ddff69b5de..65450952a4 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py @@ -4,11 +4,30 @@ def test_SplineFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - output_file=dict(argstr="%s", extensions=None, position=2, usedefault=True,), - step_length=dict(argstr="%f", mandatory=True, position=1,), - track_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + output_file=dict( + argstr="%s", + extensions=None, + position=2, + usedefault=True, + ), + step_length=dict( + argstr="%f", + mandatory=True, + position=1, + ), + track_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), ) inputs = SplineFilter.input_spec() @@ -18,7 +37,11 @@ def test_SplineFilter_inputs(): def test_SplineFilter_outputs(): - output_map = dict(smoothed_track_file=dict(extensions=None,),) + output_map = dict( + smoothed_track_file=dict( + extensions=None, + ), + ) outputs = SplineFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py index 1c274533f0..7f668df568 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py @@ -4,10 +4,24 @@ def test_TrackMerge_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - output_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True,), - track_files=dict(argstr="%s...", mandatory=True, position=0,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + output_file=dict( + argstr="%s", + extensions=None, + position=-1, + usedefault=True, + ), + track_files=dict( + argstr="%s...", + mandatory=True, + position=0, + ), ) inputs = TrackMerge.input_spec() @@ -17,7 +31,11 @@ def test_TrackMerge_inputs(): def test_TrackMerge_outputs(): - output_map = dict(track_file=dict(extensions=None,),) + output_map = dict( + track_file=dict( + extensions=None, + ), + ) outputs = TrackMerge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_APMQball.py b/nipype/interfaces/dipy/tests/test_auto_APMQball.py index fcc97ebf70..81ff397cb8 100644 --- a/nipype/interfaces/dipy/tests/test_auto_APMQball.py +++ b/nipype/interfaces/dipy/tests/test_auto_APMQball.py @@ -4,11 +4,24 @@ def test_APMQball_inputs(): input_map = dict( - b0_thres=dict(usedefault=True,), - in_bval=dict(extensions=None, mandatory=True,), - in_bvec=dict(extensions=None, mandatory=True,), - in_file=dict(extensions=None, mandatory=True,), - mask_file=dict(extensions=None,), + b0_thres=dict( + usedefault=True, + ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + mask_file=dict( + extensions=None, + ), out_prefix=dict(), ) inputs = APMQball.input_spec() @@ -19,7 +32,11 @@ def test_APMQball_inputs(): def test_APMQball_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = APMQball.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_CSD.py b/nipype/interfaces/dipy/tests/test_auto_CSD.py index a9c92b02b1..7cdab47f9b 100644 --- a/nipype/interfaces/dipy/tests/test_auto_CSD.py +++ b/nipype/interfaces/dipy/tests/test_auto_CSD.py @@ -4,16 +4,37 @@ def test_CSD_inputs(): input_map = dict( - b0_thres=dict(usedefault=True,), - in_bval=dict(extensions=None, mandatory=True,), - in_bvec=dict(extensions=None, mandatory=True,), - in_file=dict(extensions=None, mandatory=True,), - in_mask=dict(extensions=None,), - out_fods=dict(extensions=None,), + b0_thres=dict( + usedefault=True, + ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict( + extensions=None, + ), + out_fods=dict( + extensions=None, + ), out_prefix=dict(), - response=dict(extensions=None,), - save_fods=dict(usedefault=True,), - sh_order=dict(usedefault=True,), + response=dict( + extensions=None, + ), + save_fods=dict( + usedefault=True, + ), + sh_order=dict( + usedefault=True, + ), ) inputs = CSD.input_spec() @@ -23,7 +44,14 @@ def test_CSD_inputs(): def test_CSD_outputs(): - output_map = dict(model=dict(extensions=None,), out_fods=dict(extensions=None,),) + output_map = dict( + model=dict( + extensions=None, + ), + out_fods=dict( + extensions=None, + ), + ) outputs = CSD.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_DTI.py b/nipype/interfaces/dipy/tests/test_auto_DTI.py index 8e2482b129..1cea142a36 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DTI.py +++ b/nipype/interfaces/dipy/tests/test_auto_DTI.py @@ -4,11 +4,24 @@ def test_DTI_inputs(): input_map = dict( - b0_thres=dict(usedefault=True,), - in_bval=dict(extensions=None, mandatory=True,), - in_bvec=dict(extensions=None, mandatory=True,), - in_file=dict(extensions=None, mandatory=True,), - mask_file=dict(extensions=None,), + b0_thres=dict( + usedefault=True, + ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + mask_file=dict( + extensions=None, + ), out_prefix=dict(), ) inputs = DTI.input_spec() @@ -20,12 +33,24 @@ def test_DTI_inputs(): def test_DTI_outputs(): output_map = dict( - ad_file=dict(extensions=None,), - color_fa_file=dict(extensions=None,), - fa_file=dict(extensions=None,), - md_file=dict(extensions=None,), - out_file=dict(extensions=None,), - rd_file=dict(extensions=None,), + ad_file=dict( + extensions=None, + ), + color_fa_file=dict( + extensions=None, + ), + fa_file=dict( + extensions=None, + ), + md_file=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + rd_file=dict( + extensions=None, + ), ) outputs = DTI.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_Denoise.py b/nipype/interfaces/dipy/tests/test_auto_Denoise.py index 453e794f39..e85d2644c2 100644 --- a/nipype/interfaces/dipy/tests/test_auto_Denoise.py +++ b/nipype/interfaces/dipy/tests/test_auto_Denoise.py @@ -4,13 +4,29 @@ def test_Denoise_inputs(): input_map = dict( - block_radius=dict(usedefault=True,), - in_file=dict(extensions=None, mandatory=True,), - in_mask=dict(extensions=None,), - noise_mask=dict(extensions=None,), - noise_model=dict(mandatory=True, usedefault=True,), - patch_radius=dict(usedefault=True,), - signal_mask=dict(extensions=None,), + block_radius=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict( + extensions=None, + ), + noise_mask=dict( + extensions=None, + ), + noise_model=dict( + mandatory=True, + usedefault=True, + ), + patch_radius=dict( + usedefault=True, + ), + signal_mask=dict( + extensions=None, + ), snr=dict(), ) inputs = Denoise.input_spec() @@ -21,7 +37,11 @@ def test_Denoise_inputs(): def test_Denoise_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Denoise.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py index 657128b050..e292135ba0 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py +++ b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py @@ -4,10 +4,21 @@ def test_DipyDiffusionInterface_inputs(): input_map = dict( - b0_thres=dict(usedefault=True,), - in_bval=dict(extensions=None, mandatory=True,), - in_bvec=dict(extensions=None, mandatory=True,), - in_file=dict(extensions=None, mandatory=True,), + b0_thres=dict( + usedefault=True, + ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), out_prefix=dict(), ) inputs = DipyDiffusionInterface.input_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py index 9b9cf49d6f..00c8c1ba0d 100644 --- a/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py +++ b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py @@ -4,19 +4,49 @@ def test_EstimateResponseSH_inputs(): input_map = dict( - auto=dict(xor=["recursive"],), - b0_thres=dict(usedefault=True,), - fa_thresh=dict(usedefault=True,), - in_bval=dict(extensions=None, mandatory=True,), - in_bvec=dict(extensions=None, mandatory=True,), - in_evals=dict(extensions=None, mandatory=True,), - in_file=dict(extensions=None, mandatory=True,), - in_mask=dict(extensions=None,), - out_mask=dict(extensions=None, usedefault=True,), + auto=dict( + xor=["recursive"], + ), + b0_thres=dict( + usedefault=True, + ), + fa_thresh=dict( + usedefault=True, + ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_evals=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict( + extensions=None, + ), + out_mask=dict( + extensions=None, + usedefault=True, + ), out_prefix=dict(), - recursive=dict(xor=["auto"],), - response=dict(extensions=None, usedefault=True,), - roi_radius=dict(usedefault=True,), + recursive=dict( + xor=["auto"], + ), + response=dict( + extensions=None, + usedefault=True, + ), + roi_radius=dict( + usedefault=True, + ), ) inputs = EstimateResponseSH.input_spec() @@ -26,7 +56,14 @@ def test_EstimateResponseSH_inputs(): def test_EstimateResponseSH_outputs(): - output_map = dict(out_mask=dict(extensions=None,), response=dict(extensions=None,),) + output_map = dict( + out_mask=dict( + extensions=None, + ), + response=dict( + extensions=None, + ), + ) outputs = EstimateResponseSH.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_RESTORE.py b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py index a172847174..f25127f9c9 100644 --- a/nipype/interfaces/dipy/tests/test_auto_RESTORE.py +++ b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py @@ -4,12 +4,27 @@ def test_RESTORE_inputs(): input_map = dict( - b0_thres=dict(usedefault=True,), - in_bval=dict(extensions=None, mandatory=True,), - in_bvec=dict(extensions=None, mandatory=True,), - in_file=dict(extensions=None, mandatory=True,), - in_mask=dict(extensions=None,), - noise_mask=dict(extensions=None,), + b0_thres=dict( + usedefault=True, + ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict( + extensions=None, + ), + noise_mask=dict( + extensions=None, + ), out_prefix=dict(), ) inputs = RESTORE.input_spec() @@ -21,13 +36,27 @@ def test_RESTORE_inputs(): def test_RESTORE_outputs(): output_map = dict( - evals=dict(extensions=None,), - evecs=dict(extensions=None,), - fa=dict(extensions=None,), - md=dict(extensions=None,), - mode=dict(extensions=None,), - rd=dict(extensions=None,), - trace=dict(extensions=None,), + evals=dict( + extensions=None, + ), + evecs=dict( + extensions=None, + ), + fa=dict( + extensions=None, + ), + md=dict( + extensions=None, + ), + mode=dict( + extensions=None, + ), + rd=dict( + extensions=None, + ), + trace=dict( + extensions=None, + ), ) outputs = RESTORE.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_Resample.py b/nipype/interfaces/dipy/tests/test_auto_Resample.py index ac1b6ce9cd..6c765b2fa9 100644 --- a/nipype/interfaces/dipy/tests/test_auto_Resample.py +++ b/nipype/interfaces/dipy/tests/test_auto_Resample.py @@ -4,8 +4,14 @@ def test_Resample_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True,), - interp=dict(mandatory=True, usedefault=True,), + in_file=dict( + extensions=None, + mandatory=True, + ), + interp=dict( + mandatory=True, + usedefault=True, + ), vox_size=dict(), ) inputs = Resample.input_spec() @@ -16,7 +22,11 @@ def test_Resample_inputs(): def test_Resample_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py index 3202306026..16fff2aeff 100644 --- a/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py +++ b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py @@ -4,24 +4,65 @@ def test_SimulateMultiTensor_inputs(): input_map = dict( - baseline=dict(extensions=None, mandatory=True,), - bvalues=dict(usedefault=True,), - diff_iso=dict(usedefault=True,), - diff_sf=dict(usedefault=True,), - gradients=dict(extensions=None,), - in_bval=dict(extensions=None,), - in_bvec=dict(extensions=None,), - in_dirs=dict(mandatory=True,), - in_frac=dict(mandatory=True,), - in_mask=dict(extensions=None,), - in_vfms=dict(mandatory=True,), - n_proc=dict(usedefault=True,), - num_dirs=dict(usedefault=True,), - out_bval=dict(extensions=None, usedefault=True,), - out_bvec=dict(extensions=None, usedefault=True,), - out_file=dict(extensions=None, usedefault=True,), - out_mask=dict(extensions=None, usedefault=True,), - snr=dict(usedefault=True,), + baseline=dict( + extensions=None, + mandatory=True, + ), + bvalues=dict( + usedefault=True, + ), + diff_iso=dict( + usedefault=True, + ), + diff_sf=dict( + usedefault=True, + ), + gradients=dict( + extensions=None, + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + extensions=None, + ), + in_dirs=dict( + mandatory=True, + ), + in_frac=dict( + mandatory=True, + ), + in_mask=dict( + extensions=None, + ), + in_vfms=dict( + mandatory=True, + ), + n_proc=dict( + usedefault=True, + ), + num_dirs=dict( + usedefault=True, + ), + out_bval=dict( + extensions=None, + usedefault=True, + ), + out_bvec=dict( + extensions=None, + usedefault=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), + out_mask=dict( + extensions=None, + usedefault=True, + ), + snr=dict( + usedefault=True, + ), ) inputs = SimulateMultiTensor.input_spec() @@ -32,10 +73,18 @@ def test_SimulateMultiTensor_inputs(): def test_SimulateMultiTensor_outputs(): output_map = dict( - out_bval=dict(extensions=None,), - out_bvec=dict(extensions=None,), - out_file=dict(extensions=None,), - out_mask=dict(extensions=None,), + out_bval=dict( + extensions=None, + ), + out_bvec=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + out_mask=dict( + extensions=None, + ), ) outputs = SimulateMultiTensor.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py index bbe4abce94..ad97523ce2 100644 --- a/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py +++ b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py @@ -4,19 +4,50 @@ def test_StreamlineTractography_inputs(): input_map = dict( - gfa_thresh=dict(mandatory=True, usedefault=True,), - in_file=dict(extensions=None, mandatory=True,), - in_model=dict(extensions=None,), - in_peaks=dict(extensions=None,), - min_angle=dict(mandatory=True, usedefault=True,), - multiprocess=dict(mandatory=True, usedefault=True,), - num_seeds=dict(mandatory=True, usedefault=True,), + gfa_thresh=dict( + mandatory=True, + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_model=dict( + extensions=None, + ), + in_peaks=dict( + extensions=None, + ), + min_angle=dict( + mandatory=True, + usedefault=True, + ), + multiprocess=dict( + mandatory=True, + usedefault=True, + ), + num_seeds=dict( + mandatory=True, + usedefault=True, + ), out_prefix=dict(), - peak_threshold=dict(mandatory=True, usedefault=True,), - save_seeds=dict(mandatory=True, usedefault=True,), - seed_coord=dict(extensions=None,), - seed_mask=dict(extensions=None,), - tracking_mask=dict(extensions=None,), + peak_threshold=dict( + mandatory=True, + usedefault=True, + ), + save_seeds=dict( + mandatory=True, + usedefault=True, + ), + seed_coord=dict( + extensions=None, + ), + seed_mask=dict( + extensions=None, + ), + tracking_mask=dict( + extensions=None, + ), ) inputs = StreamlineTractography.input_spec() @@ -27,10 +58,18 @@ def test_StreamlineTractography_inputs(): def test_StreamlineTractography_outputs(): output_map = dict( - gfa=dict(extensions=None,), - odf_peaks=dict(extensions=None,), - out_seeds=dict(extensions=None,), - tracks=dict(extensions=None,), + gfa=dict( + extensions=None, + ), + odf_peaks=dict( + extensions=None, + ), + out_seeds=dict( + extensions=None, + ), + tracks=dict( + extensions=None, + ), ) outputs = StreamlineTractography.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_TensorMode.py b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py index 29a01e4a75..59b1b30e8b 100644 --- a/nipype/interfaces/dipy/tests/test_auto_TensorMode.py +++ b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py @@ -4,11 +4,24 @@ def test_TensorMode_inputs(): input_map = dict( - b0_thres=dict(usedefault=True,), - in_bval=dict(extensions=None, mandatory=True,), - in_bvec=dict(extensions=None, mandatory=True,), - in_file=dict(extensions=None, mandatory=True,), - mask_file=dict(extensions=None,), + b0_thres=dict( + usedefault=True, + ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + mask_file=dict( + extensions=None, + ), out_prefix=dict(), ) inputs = TensorMode.input_spec() @@ -19,7 +32,11 @@ def test_TensorMode_inputs(): def test_TensorMode_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TensorMode.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py index 06265ffd0f..0c7855c507 100644 --- a/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py +++ b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py @@ -5,10 +5,20 @@ def test_TrackDensityMap_inputs(): input_map = dict( data_dims=dict(), - in_file=dict(extensions=None, mandatory=True,), - out_filename=dict(extensions=None, usedefault=True,), - points_space=dict(usedefault=True,), - reference=dict(extensions=None,), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_filename=dict( + extensions=None, + usedefault=True, + ), + points_space=dict( + usedefault=True, + ), + reference=dict( + extensions=None, + ), voxel_dims=dict(), ) inputs = TrackDensityMap.input_spec() @@ -19,7 +29,11 @@ def test_TrackDensityMap_inputs(): def test_TrackDensityMap_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TrackDensityMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py index e48312d3f2..2988e44e8d 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py @@ -4,12 +4,30 @@ def test_AffScalarVol_inputs(): input_map = dict( - args=dict(argstr="%s",), - deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"],), - environ=dict(nohash=True, usedefault=True,), - euler=dict(argstr="-euler %g %g %g", xor=["transform"],), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - interpolation=dict(argstr="-interp %s", usedefault=True,), + args=dict( + argstr="%s", + ), + deformation=dict( + argstr="-deformation %g %g %g %g %g %g", + xor=["transform"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + euler=dict( + argstr="-euler %g %g %g", + xor=["transform"], + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -17,13 +35,20 @@ def test_AffScalarVol_inputs(): name_source="in_file", name_template="%s_affxfmd", ), - target=dict(argstr="-target %s", extensions=None, xor=["transform"],), + target=dict( + argstr="-target %s", + extensions=None, + xor=["transform"], + ), transform=dict( argstr="-trans %s", extensions=None, xor=["target", "translation", "euler", "deformation"], ), - translation=dict(argstr="-translation %g %g %g", xor=["transform"],), + translation=dict( + argstr="-translation %g %g %g", + xor=["transform"], + ), ) inputs = AffScalarVol.input_spec() @@ -33,7 +58,11 @@ def test_AffScalarVol_inputs(): def test_AffScalarVol_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AffScalarVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py index 54a562e8be..d1ba18a8ac 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py @@ -4,12 +4,30 @@ def test_AffSymTensor3DVol_inputs(): input_map = dict( - args=dict(argstr="%s",), - deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"],), - environ=dict(nohash=True, usedefault=True,), - euler=dict(argstr="-euler %g %g %g", xor=["transform"],), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - interpolation=dict(argstr="-interp %s", usedefault=True,), + args=dict( + argstr="%s", + ), + deformation=dict( + argstr="-deformation %g %g %g %g %g %g", + xor=["transform"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + euler=dict( + argstr="-euler %g %g %g", + xor=["transform"], + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -17,14 +35,24 @@ def test_AffSymTensor3DVol_inputs(): name_source="in_file", name_template="%s_affxfmd", ), - reorient=dict(argstr="-reorient %s", usedefault=True,), - target=dict(argstr="-target %s", extensions=None, xor=["transform"],), + reorient=dict( + argstr="-reorient %s", + usedefault=True, + ), + target=dict( + argstr="-target %s", + extensions=None, + xor=["transform"], + ), transform=dict( argstr="-trans %s", extensions=None, xor=["target", "translation", "euler", "deformation"], ), - translation=dict(argstr="-translation %g %g %g", xor=["transform"],), + translation=dict( + argstr="-translation %g %g %g", + xor=["transform"], + ), ) inputs = AffSymTensor3DVol.input_spec() @@ -34,7 +62,11 @@ def test_AffSymTensor3DVol_inputs(): def test_AffSymTensor3DVol_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AffSymTensor3DVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_Affine.py b/nipype/interfaces/dtitk/tests/test_auto_Affine.py index e095ce3922..0ff24b788c 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Affine.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Affine.py @@ -4,21 +4,50 @@ def test_Affine_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fixed_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=0, + ), + ftol=dict( + argstr="%g", + mandatory=True, + position=4, + usedefault=True, + ), + initialize_xfm=dict( + argstr="%s", + copyfile=True, + extensions=None, + position=5, ), - ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True,), - initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5,), moving_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=1, ), sampling_xyz=dict( - argstr="%g %g %g", mandatory=True, position=3, usedefault=True, + argstr="%g %g %g", + mandatory=True, + position=3, + usedefault=True, ), similarity_metric=dict( - argstr="%s", mandatory=True, position=2, usedefault=True, + argstr="%s", + mandatory=True, + position=2, + usedefault=True, ), ) inputs = Affine.input_spec() @@ -30,7 +59,12 @@ def test_Affine_inputs(): def test_Affine_outputs(): output_map = dict( - out_file=dict(extensions=None,), out_file_xfm=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_file_xfm=dict( + extensions=None, + ), ) outputs = Affine.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py index 31820fc56d..c680c08815 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py @@ -4,21 +4,50 @@ def test_AffineTask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fixed_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=0, + ), + ftol=dict( + argstr="%g", + mandatory=True, + position=4, + usedefault=True, + ), + initialize_xfm=dict( + argstr="%s", + copyfile=True, + extensions=None, + position=5, ), - ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True,), - initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5,), moving_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=1, ), sampling_xyz=dict( - argstr="%g %g %g", mandatory=True, position=3, usedefault=True, + argstr="%g %g %g", + mandatory=True, + position=3, + usedefault=True, ), similarity_metric=dict( - argstr="%s", mandatory=True, position=2, usedefault=True, + argstr="%s", + mandatory=True, + position=2, + usedefault=True, ), ) inputs = AffineTask.input_spec() @@ -30,7 +59,12 @@ def test_AffineTask_inputs(): def test_AffineTask_outputs(): output_map = dict( - out_file=dict(extensions=None,), out_file_xfm=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_file_xfm=dict( + extensions=None, + ), ) outputs = AffineTask.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py index d1780ffe5c..994c8a2b8d 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py +++ b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py @@ -4,11 +4,31 @@ def test_BinThresh_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), - inside_value=dict(argstr="%g", mandatory=True, position=4, usedefault=True,), - lower_bound=dict(argstr="%g", mandatory=True, position=2, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), + inside_value=dict( + argstr="%g", + mandatory=True, + position=4, + usedefault=True, + ), + lower_bound=dict( + argstr="%g", + mandatory=True, + position=2, + usedefault=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -17,8 +37,18 @@ def test_BinThresh_inputs(): name_template="%s_thrbin", position=1, ), - outside_value=dict(argstr="%g", mandatory=True, position=5, usedefault=True,), - upper_bound=dict(argstr="%g", mandatory=True, position=3, usedefault=True,), + outside_value=dict( + argstr="%g", + mandatory=True, + position=5, + usedefault=True, + ), + upper_bound=dict( + argstr="%g", + mandatory=True, + position=3, + usedefault=True, + ), ) inputs = BinThresh.input_spec() @@ -28,7 +58,11 @@ def test_BinThresh_inputs(): def test_BinThresh_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BinThresh.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py index b8e03f023b..cccf7eed27 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py @@ -4,11 +4,31 @@ def test_BinThreshTask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), - inside_value=dict(argstr="%g", mandatory=True, position=4, usedefault=True,), - lower_bound=dict(argstr="%g", mandatory=True, position=2, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), + inside_value=dict( + argstr="%g", + mandatory=True, + position=4, + usedefault=True, + ), + lower_bound=dict( + argstr="%g", + mandatory=True, + position=2, + usedefault=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -17,8 +37,18 @@ def test_BinThreshTask_inputs(): name_template="%s_thrbin", position=1, ), - outside_value=dict(argstr="%g", mandatory=True, position=5, usedefault=True,), - upper_bound=dict(argstr="%g", mandatory=True, position=3, usedefault=True,), + outside_value=dict( + argstr="%g", + mandatory=True, + position=5, + usedefault=True, + ), + upper_bound=dict( + argstr="%g", + mandatory=True, + position=3, + usedefault=True, + ), ) inputs = BinThreshTask.input_spec() @@ -28,7 +58,11 @@ def test_BinThreshTask_inputs(): def test_BinThreshTask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BinThreshTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py b/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py index ba76666b1f..03044b6bc4 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py +++ b/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py @@ -4,7 +4,13 @@ def test_CommandLineDtitk_inputs(): input_map = dict( - args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), ) inputs = CommandLineDtitk.input_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py index 88dac765f2..fa34fdcdb3 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py +++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py @@ -4,11 +4,28 @@ def test_ComposeXfm_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_aff=dict(argstr="-aff %s", extensions=None, mandatory=True,), - in_df=dict(argstr="-df %s", extensions=None, mandatory=True,), - out_file=dict(argstr="-out %s", extensions=None, genfile=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_aff=dict( + argstr="-aff %s", + extensions=None, + mandatory=True, + ), + in_df=dict( + argstr="-df %s", + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="-out %s", + extensions=None, + genfile=True, + ), ) inputs = ComposeXfm.input_spec() @@ -18,7 +35,11 @@ def test_ComposeXfm_inputs(): def test_ComposeXfm_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ComposeXfm.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py index 3f573a1815..b4b2f7509e 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py @@ -4,11 +4,28 @@ def test_ComposeXfmTask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_aff=dict(argstr="-aff %s", extensions=None, mandatory=True,), - in_df=dict(argstr="-df %s", extensions=None, mandatory=True,), - out_file=dict(argstr="-out %s", extensions=None, genfile=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_aff=dict( + argstr="-aff %s", + extensions=None, + mandatory=True, + ), + in_df=dict( + argstr="-df %s", + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="-out %s", + extensions=None, + genfile=True, + ), ) inputs = ComposeXfmTask.input_spec() @@ -18,7 +35,11 @@ def test_ComposeXfmTask_inputs(): def test_ComposeXfmTask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ComposeXfmTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py index cc354b0cde..89a1bdcfc8 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py @@ -4,14 +4,47 @@ def test_Diffeo_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fixed_file=dict(argstr="%s", extensions=None, position=0,), - ftol=dict(argstr="%g", mandatory=True, position=5, usedefault=True,), - legacy=dict(argstr="%d", mandatory=True, position=3, usedefault=True,), - mask_file=dict(argstr="%s", extensions=None, position=2,), - moving_file=dict(argstr="%s", copyfile=False, extensions=None, position=1,), - n_iters=dict(argstr="%d", mandatory=True, position=4, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_file=dict( + argstr="%s", + extensions=None, + position=0, + ), + ftol=dict( + argstr="%g", + mandatory=True, + position=5, + usedefault=True, + ), + legacy=dict( + argstr="%d", + mandatory=True, + position=3, + usedefault=True, + ), + mask_file=dict( + argstr="%s", + extensions=None, + position=2, + ), + moving_file=dict( + argstr="%s", + copyfile=False, + extensions=None, + position=1, + ), + n_iters=dict( + argstr="%d", + mandatory=True, + position=4, + usedefault=True, + ), ) inputs = Diffeo.input_spec() @@ -22,7 +55,12 @@ def test_Diffeo_inputs(): def test_Diffeo_outputs(): output_map = dict( - out_file=dict(extensions=None,), out_file_xfm=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_file_xfm=dict( + extensions=None, + ), ) outputs = Diffeo.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py index cc85e03870..39255fb5c1 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py @@ -4,11 +4,25 @@ def test_DiffeoScalarVol_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - flip=dict(argstr="-flip %d %d %d",), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - interpolation=dict(argstr="-interp %s", usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip=dict( + argstr="-flip %d %d %d", + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -16,10 +30,23 @@ def test_DiffeoScalarVol_inputs(): name_source="in_file", name_template="%s_diffeoxfmd", ), - resampling_type=dict(argstr="-type %s",), - target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"],), - transform=dict(argstr="-trans %s", extensions=None, mandatory=True,), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"],), + resampling_type=dict( + argstr="-type %s", + ), + target=dict( + argstr="-target %s", + extensions=None, + xor=["voxel_size"], + ), + transform=dict( + argstr="-trans %s", + extensions=None, + mandatory=True, + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target"], + ), ) inputs = DiffeoScalarVol.input_spec() @@ -29,7 +56,11 @@ def test_DiffeoScalarVol_inputs(): def test_DiffeoScalarVol_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = DiffeoScalarVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py index 67626f3ce6..123b741645 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py @@ -4,12 +4,29 @@ def test_DiffeoSymTensor3DVol_inputs(): input_map = dict( - args=dict(argstr="%s",), - df=dict(argstr="-df %s", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - flip=dict(argstr="-flip %d %d %d",), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - interpolation=dict(argstr="-interp %s", usedefault=True,), + args=dict( + argstr="%s", + ), + df=dict( + argstr="-df %s", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip=dict( + argstr="-flip %d %d %d", + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -17,11 +34,27 @@ def test_DiffeoSymTensor3DVol_inputs(): name_source="in_file", name_template="%s_diffeoxfmd", ), - reorient=dict(argstr="-reorient %s", usedefault=True,), - resampling_type=dict(argstr="-type %s",), - target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"],), - transform=dict(argstr="-trans %s", extensions=None, mandatory=True,), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"],), + reorient=dict( + argstr="-reorient %s", + usedefault=True, + ), + resampling_type=dict( + argstr="-type %s", + ), + target=dict( + argstr="-target %s", + extensions=None, + xor=["voxel_size"], + ), + transform=dict( + argstr="-trans %s", + extensions=None, + mandatory=True, + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target"], + ), ) inputs = DiffeoSymTensor3DVol.input_spec() @@ -31,7 +64,11 @@ def test_DiffeoSymTensor3DVol_inputs(): def test_DiffeoSymTensor3DVol_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = DiffeoSymTensor3DVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py index 0eb20d64d6..f7914ab9cc 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py @@ -4,14 +4,47 @@ def test_DiffeoTask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fixed_file=dict(argstr="%s", extensions=None, position=0,), - ftol=dict(argstr="%g", mandatory=True, position=5, usedefault=True,), - legacy=dict(argstr="%d", mandatory=True, position=3, usedefault=True,), - mask_file=dict(argstr="%s", extensions=None, position=2,), - moving_file=dict(argstr="%s", copyfile=False, extensions=None, position=1,), - n_iters=dict(argstr="%d", mandatory=True, position=4, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_file=dict( + argstr="%s", + extensions=None, + position=0, + ), + ftol=dict( + argstr="%g", + mandatory=True, + position=5, + usedefault=True, + ), + legacy=dict( + argstr="%d", + mandatory=True, + position=3, + usedefault=True, + ), + mask_file=dict( + argstr="%s", + extensions=None, + position=2, + ), + moving_file=dict( + argstr="%s", + copyfile=False, + extensions=None, + position=1, + ), + n_iters=dict( + argstr="%d", + mandatory=True, + position=4, + usedefault=True, + ), ) inputs = DiffeoTask.input_spec() @@ -22,7 +55,12 @@ def test_DiffeoTask_inputs(): def test_DiffeoTask_outputs(): output_map = dict( - out_file=dict(extensions=None,), out_file_xfm=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_file_xfm=dict( + extensions=None, + ), ) outputs = DiffeoTask.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_Rigid.py b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py index 629fd5b780..dbcc6f0fcf 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Rigid.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py @@ -4,21 +4,50 @@ def test_Rigid_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fixed_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=0, + ), + ftol=dict( + argstr="%g", + mandatory=True, + position=4, + usedefault=True, + ), + initialize_xfm=dict( + argstr="%s", + copyfile=True, + extensions=None, + position=5, ), - ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True,), - initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5,), moving_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=1, ), sampling_xyz=dict( - argstr="%g %g %g", mandatory=True, position=3, usedefault=True, + argstr="%g %g %g", + mandatory=True, + position=3, + usedefault=True, ), similarity_metric=dict( - argstr="%s", mandatory=True, position=2, usedefault=True, + argstr="%s", + mandatory=True, + position=2, + usedefault=True, ), ) inputs = Rigid.input_spec() @@ -30,7 +59,12 @@ def test_Rigid_inputs(): def test_Rigid_outputs(): output_map = dict( - out_file=dict(extensions=None,), out_file_xfm=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_file_xfm=dict( + extensions=None, + ), ) outputs = Rigid.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py index ec280e06a9..6c5236607c 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py @@ -4,21 +4,50 @@ def test_RigidTask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fixed_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=0, + ), + ftol=dict( + argstr="%g", + mandatory=True, + position=4, + usedefault=True, + ), + initialize_xfm=dict( + argstr="%s", + copyfile=True, + extensions=None, + position=5, ), - ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True,), - initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5,), moving_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=1, ), sampling_xyz=dict( - argstr="%g %g %g", mandatory=True, position=3, usedefault=True, + argstr="%g %g %g", + mandatory=True, + position=3, + usedefault=True, ), similarity_metric=dict( - argstr="%s", mandatory=True, position=2, usedefault=True, + argstr="%s", + mandatory=True, + position=2, + usedefault=True, ), ) inputs = RigidTask.input_spec() @@ -30,7 +59,12 @@ def test_RigidTask_inputs(): def test_RigidTask_outputs(): output_map = dict( - out_file=dict(extensions=None,), out_file_xfm=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_file_xfm=dict( + extensions=None, + ), ) outputs = RigidTask.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py index c01e08a66e..3d32a314bd 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py @@ -4,10 +4,22 @@ def test_SVAdjustVoxSp_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -16,9 +28,14 @@ def test_SVAdjustVoxSp_inputs(): name_template="%s_avs", ), target_file=dict( - argstr="-target %s", extensions=None, xor=["voxel_size", "origin"], + argstr="-target %s", + extensions=None, + xor=["voxel_size", "origin"], + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), ) inputs = SVAdjustVoxSp.input_spec() @@ -28,7 +45,11 @@ def test_SVAdjustVoxSp_inputs(): def test_SVAdjustVoxSp_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SVAdjustVoxSp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py index 0ca4e416ae..cedc693a24 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py @@ -4,10 +4,22 @@ def test_SVAdjustVoxSpTask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -16,9 +28,14 @@ def test_SVAdjustVoxSpTask_inputs(): name_template="%s_avs", ), target_file=dict( - argstr="-target %s", extensions=None, xor=["voxel_size", "origin"], + argstr="-target %s", + extensions=None, + xor=["voxel_size", "origin"], + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), ) inputs = SVAdjustVoxSpTask.input_spec() @@ -28,7 +45,11 @@ def test_SVAdjustVoxSpTask_inputs(): def test_SVAdjustVoxSpTask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SVAdjustVoxSpTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResample.py b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py index c08df4bfbb..fc880cd3e7 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVResample.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py @@ -4,12 +4,29 @@ def test_SVResample_inputs(): input_map = dict( - align=dict(argstr="-align %s",), - args=dict(argstr="%s",), - array_size=dict(argstr="-size %d %d %d", xor=["target_file"],), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), + align=dict( + argstr="-align %s", + ), + args=dict( + argstr="%s", + ), + array_size=dict( + argstr="-size %d %d %d", + xor=["target_file"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -22,7 +39,10 @@ def test_SVResample_inputs(): extensions=None, xor=["array_size", "voxel_size", "origin"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], + ), ) inputs = SVResample.input_spec() @@ -32,7 +52,11 @@ def test_SVResample_inputs(): def test_SVResample_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SVResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py index 467163504b..88412e68ae 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py @@ -4,12 +4,29 @@ def test_SVResampleTask_inputs(): input_map = dict( - align=dict(argstr="-align %s",), - args=dict(argstr="%s",), - array_size=dict(argstr="-size %d %d %d", xor=["target_file"],), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), + align=dict( + argstr="-align %s", + ), + args=dict( + argstr="%s", + ), + array_size=dict( + argstr="-size %d %d %d", + xor=["target_file"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -22,7 +39,10 @@ def test_SVResampleTask_inputs(): extensions=None, xor=["array_size", "voxel_size", "origin"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], + ), ) inputs = SVResampleTask.input_spec() @@ -32,7 +52,11 @@ def test_SVResampleTask_inputs(): def test_SVResampleTask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SVResampleTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py index d22aa78c9c..333e05628d 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py @@ -4,10 +4,22 @@ def test_TVAdjustOriginTask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -16,9 +28,14 @@ def test_TVAdjustOriginTask_inputs(): name_template="%s_avs", ), target_file=dict( - argstr="-target %s", extensions=None, xor=["voxel_size", "origin"], + argstr="-target %s", + extensions=None, + xor=["voxel_size", "origin"], + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), ) inputs = TVAdjustOriginTask.input_spec() @@ -28,7 +45,11 @@ def test_TVAdjustOriginTask_inputs(): def test_TVAdjustOriginTask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TVAdjustOriginTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py index 70dc59c5a3..9c6596042b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py @@ -4,10 +4,22 @@ def test_TVAdjustVoxSp_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -16,9 +28,14 @@ def test_TVAdjustVoxSp_inputs(): name_template="%s_avs", ), target_file=dict( - argstr="-target %s", extensions=None, xor=["voxel_size", "origin"], + argstr="-target %s", + extensions=None, + xor=["voxel_size", "origin"], + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), ) inputs = TVAdjustVoxSp.input_spec() @@ -28,7 +45,11 @@ def test_TVAdjustVoxSp_inputs(): def test_TVAdjustVoxSp_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TVAdjustVoxSp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py index a23056e502..f34a76ae7b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py @@ -4,10 +4,22 @@ def test_TVAdjustVoxSpTask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -16,9 +28,14 @@ def test_TVAdjustVoxSpTask_inputs(): name_template="%s_avs", ), target_file=dict( - argstr="-target %s", extensions=None, xor=["voxel_size", "origin"], + argstr="-target %s", + extensions=None, + xor=["voxel_size", "origin"], + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), ) inputs = TVAdjustVoxSpTask.input_spec() @@ -28,7 +45,11 @@ def test_TVAdjustVoxSpTask_inputs(): def test_TVAdjustVoxSpTask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TVAdjustVoxSpTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResample.py b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py index 13142572f9..2ca99176f8 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVResample.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py @@ -4,13 +4,32 @@ def test_TVResample_inputs(): input_map = dict( - align=dict(argstr="-align %s",), - args=dict(argstr="%s",), - array_size=dict(argstr="-size %d %d %d", xor=["target_file"],), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - interpolation=dict(argstr="-interp %s",), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), + align=dict( + argstr="-align %s", + ), + args=dict( + argstr="%s", + ), + array_size=dict( + argstr="-size %d %d %d", + xor=["target_file"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -23,7 +42,10 @@ def test_TVResample_inputs(): extensions=None, xor=["array_size", "voxel_size", "origin"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], + ), ) inputs = TVResample.input_spec() @@ -33,7 +55,11 @@ def test_TVResample_inputs(): def test_TVResample_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TVResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py index 3bf6221d24..d1f908fca8 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py @@ -4,13 +4,32 @@ def test_TVResampleTask_inputs(): input_map = dict( - align=dict(argstr="-align %s",), - args=dict(argstr="%s",), - array_size=dict(argstr="-size %d %d %d", xor=["target_file"],), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - interpolation=dict(argstr="-interp %s",), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"],), + align=dict( + argstr="-align %s", + ), + args=dict( + argstr="%s", + ), + array_size=dict( + argstr="-size %d %d %d", + xor=["target_file"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -23,7 +42,10 @@ def test_TVResampleTask_inputs(): extensions=None, xor=["array_size", "voxel_size", "origin"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"],), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], + ), ) inputs = TVResampleTask.input_spec() @@ -33,7 +55,11 @@ def test_TVResampleTask_inputs(): def test_TVResampleTask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TVResampleTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtool.py b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py index 812049f83b..11e2d05acc 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVtool.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py @@ -4,11 +4,26 @@ def test_TVtool_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - in_flag=dict(argstr="-%s",), - out_file=dict(argstr="-out %s", extensions=None, genfile=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + in_flag=dict( + argstr="-%s", + ), + out_file=dict( + argstr="-out %s", + extensions=None, + genfile=True, + ), ) inputs = TVtool.input_spec() @@ -18,7 +33,11 @@ def test_TVtool_inputs(): def test_TVtool_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TVtool.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py index a26d2e76f2..7af7bcb75b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py @@ -4,11 +4,26 @@ def test_TVtoolTask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - in_flag=dict(argstr="-%s",), - out_file=dict(argstr="-out %s", extensions=None, genfile=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + in_flag=dict( + argstr="-%s", + ), + out_file=dict( + argstr="-out %s", + extensions=None, + genfile=True, + ), ) inputs = TVtoolTask.input_spec() @@ -18,7 +33,11 @@ def test_TVtoolTask_inputs(): def test_TVtoolTask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TVtoolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py index 67b1d0efbb..8d3ebfad98 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py @@ -4,12 +4,30 @@ def test_affScalarVolTask_inputs(): input_map = dict( - args=dict(argstr="%s",), - deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"],), - environ=dict(nohash=True, usedefault=True,), - euler=dict(argstr="-euler %g %g %g", xor=["transform"],), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - interpolation=dict(argstr="-interp %s", usedefault=True,), + args=dict( + argstr="%s", + ), + deformation=dict( + argstr="-deformation %g %g %g %g %g %g", + xor=["transform"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + euler=dict( + argstr="-euler %g %g %g", + xor=["transform"], + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -17,13 +35,20 @@ def test_affScalarVolTask_inputs(): name_source="in_file", name_template="%s_affxfmd", ), - target=dict(argstr="-target %s", extensions=None, xor=["transform"],), + target=dict( + argstr="-target %s", + extensions=None, + xor=["transform"], + ), transform=dict( argstr="-trans %s", extensions=None, xor=["target", "translation", "euler", "deformation"], ), - translation=dict(argstr="-translation %g %g %g", xor=["transform"],), + translation=dict( + argstr="-translation %g %g %g", + xor=["transform"], + ), ) inputs = affScalarVolTask.input_spec() @@ -33,7 +58,11 @@ def test_affScalarVolTask_inputs(): def test_affScalarVolTask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = affScalarVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py index 33778b661e..fbfde68e86 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py @@ -4,12 +4,30 @@ def test_affSymTensor3DVolTask_inputs(): input_map = dict( - args=dict(argstr="%s",), - deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"],), - environ=dict(nohash=True, usedefault=True,), - euler=dict(argstr="-euler %g %g %g", xor=["transform"],), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - interpolation=dict(argstr="-interp %s", usedefault=True,), + args=dict( + argstr="%s", + ), + deformation=dict( + argstr="-deformation %g %g %g %g %g %g", + xor=["transform"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + euler=dict( + argstr="-euler %g %g %g", + xor=["transform"], + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -17,14 +35,24 @@ def test_affSymTensor3DVolTask_inputs(): name_source="in_file", name_template="%s_affxfmd", ), - reorient=dict(argstr="-reorient %s", usedefault=True,), - target=dict(argstr="-target %s", extensions=None, xor=["transform"],), + reorient=dict( + argstr="-reorient %s", + usedefault=True, + ), + target=dict( + argstr="-target %s", + extensions=None, + xor=["transform"], + ), transform=dict( argstr="-trans %s", extensions=None, xor=["target", "translation", "euler", "deformation"], ), - translation=dict(argstr="-translation %g %g %g", xor=["transform"],), + translation=dict( + argstr="-translation %g %g %g", + xor=["transform"], + ), ) inputs = affSymTensor3DVolTask.input_spec() @@ -34,7 +62,11 @@ def test_affSymTensor3DVolTask_inputs(): def test_affSymTensor3DVolTask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = affSymTensor3DVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py index e1e2468b5c..4a7ffee6f4 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py @@ -4,11 +4,25 @@ def test_diffeoScalarVolTask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - flip=dict(argstr="-flip %d %d %d",), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - interpolation=dict(argstr="-interp %s", usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip=dict( + argstr="-flip %d %d %d", + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -16,10 +30,23 @@ def test_diffeoScalarVolTask_inputs(): name_source="in_file", name_template="%s_diffeoxfmd", ), - resampling_type=dict(argstr="-type %s",), - target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"],), - transform=dict(argstr="-trans %s", extensions=None, mandatory=True,), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"],), + resampling_type=dict( + argstr="-type %s", + ), + target=dict( + argstr="-target %s", + extensions=None, + xor=["voxel_size"], + ), + transform=dict( + argstr="-trans %s", + extensions=None, + mandatory=True, + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target"], + ), ) inputs = diffeoScalarVolTask.input_spec() @@ -29,7 +56,11 @@ def test_diffeoScalarVolTask_inputs(): def test_diffeoScalarVolTask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = diffeoScalarVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py index 0956ec615a..6724343e69 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py @@ -4,12 +4,29 @@ def test_diffeoSymTensor3DVolTask_inputs(): input_map = dict( - args=dict(argstr="%s",), - df=dict(argstr="-df %s", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - flip=dict(argstr="-flip %d %d %d",), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - interpolation=dict(argstr="-interp %s", usedefault=True,), + args=dict( + argstr="%s", + ), + df=dict( + argstr="-df %s", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip=dict( + argstr="-flip %d %d %d", + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -17,11 +34,27 @@ def test_diffeoSymTensor3DVolTask_inputs(): name_source="in_file", name_template="%s_diffeoxfmd", ), - reorient=dict(argstr="-reorient %s", usedefault=True,), - resampling_type=dict(argstr="-type %s",), - target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"],), - transform=dict(argstr="-trans %s", extensions=None, mandatory=True,), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"],), + reorient=dict( + argstr="-reorient %s", + usedefault=True, + ), + resampling_type=dict( + argstr="-type %s", + ), + target=dict( + argstr="-target %s", + extensions=None, + xor=["voxel_size"], + ), + transform=dict( + argstr="-trans %s", + extensions=None, + mandatory=True, + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target"], + ), ) inputs = diffeoSymTensor3DVolTask.input_spec() @@ -31,7 +64,11 @@ def test_diffeoSymTensor3DVolTask_inputs(): def test_diffeoSymTensor3DVolTask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = diffeoSymTensor3DVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py index 509c0a5a77..b7895e1cf1 100644 --- a/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py @@ -4,15 +4,45 @@ def test_AnalyzeWarp_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - jac=dict(argstr="-jac %s", usedefault=True,), - jacmat=dict(argstr="-jacmat %s", usedefault=True,), - moving_image=dict(argstr="-in %s", extensions=None,), - num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True,), - output_path=dict(argstr="-out %s", mandatory=True, usedefault=True,), - points=dict(argstr="-def %s", position=0, usedefault=True,), - transform_file=dict(argstr="-tp %s", extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + jac=dict( + argstr="-jac %s", + usedefault=True, + ), + jacmat=dict( + argstr="-jacmat %s", + usedefault=True, + ), + moving_image=dict( + argstr="-in %s", + extensions=None, + ), + num_threads=dict( + argstr="-threads %01d", + nohash=True, + usedefault=True, + ), + output_path=dict( + argstr="-out %s", + mandatory=True, + usedefault=True, + ), + points=dict( + argstr="-def %s", + position=0, + usedefault=True, + ), + transform_file=dict( + argstr="-tp %s", + extensions=None, + mandatory=True, + ), ) inputs = AnalyzeWarp.input_spec() @@ -23,9 +53,15 @@ def test_AnalyzeWarp_inputs(): def test_AnalyzeWarp_outputs(): output_map = dict( - disp_field=dict(extensions=None,), - jacdet_map=dict(extensions=None,), - jacmat_map=dict(extensions=None,), + disp_field=dict( + extensions=None, + ), + jacdet_map=dict( + extensions=None, + ), + jacmat_map=dict( + extensions=None, + ), ) outputs = AnalyzeWarp.output_spec() diff --git a/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py index fd77478270..0eebfb0665 100644 --- a/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py @@ -4,12 +4,33 @@ def test_ApplyWarp_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - moving_image=dict(argstr="-in %s", extensions=None, mandatory=True,), - num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True,), - output_path=dict(argstr="-out %s", mandatory=True, usedefault=True,), - transform_file=dict(argstr="-tp %s", extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + moving_image=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + num_threads=dict( + argstr="-threads %01d", + nohash=True, + usedefault=True, + ), + output_path=dict( + argstr="-out %s", + mandatory=True, + usedefault=True, + ), + transform_file=dict( + argstr="-tp %s", + extensions=None, + mandatory=True, + ), ) inputs = ApplyWarp.input_spec() @@ -19,7 +40,11 @@ def test_ApplyWarp_inputs(): def test_ApplyWarp_outputs(): - output_map = dict(warped_file=dict(extensions=None,),) + output_map = dict( + warped_file=dict( + extensions=None, + ), + ) outputs = ApplyWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_EditTransform.py b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py index 687906f8b0..a8617c4988 100644 --- a/nipype/interfaces/elastix/tests/test_auto_EditTransform.py +++ b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py @@ -4,12 +4,26 @@ def test_EditTransform_inputs(): input_map = dict( - interpolation=dict(argstr="FinalBSplineInterpolationOrder", usedefault=True,), - output_file=dict(extensions=None,), - output_format=dict(argstr="ResultImageFormat",), - output_type=dict(argstr="ResultImagePixelType",), - reference_image=dict(extensions=None,), - transform_file=dict(extensions=None, mandatory=True,), + interpolation=dict( + argstr="FinalBSplineInterpolationOrder", + usedefault=True, + ), + output_file=dict( + extensions=None, + ), + output_format=dict( + argstr="ResultImageFormat", + ), + output_type=dict( + argstr="ResultImagePixelType", + ), + reference_image=dict( + extensions=None, + ), + transform_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = EditTransform.input_spec() @@ -19,7 +33,11 @@ def test_EditTransform_inputs(): def test_EditTransform_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = EditTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py index 83e3092f28..bce2a3c662 100644 --- a/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py @@ -4,12 +4,33 @@ def test_PointsWarp_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True,), - output_path=dict(argstr="-out %s", mandatory=True, usedefault=True,), - points_file=dict(argstr="-def %s", extensions=None, mandatory=True,), - transform_file=dict(argstr="-tp %s", extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + argstr="-threads %01d", + nohash=True, + usedefault=True, + ), + output_path=dict( + argstr="-out %s", + mandatory=True, + usedefault=True, + ), + points_file=dict( + argstr="-def %s", + extensions=None, + mandatory=True, + ), + transform_file=dict( + argstr="-tp %s", + extensions=None, + mandatory=True, + ), ) inputs = PointsWarp.input_spec() @@ -19,7 +40,11 @@ def test_PointsWarp_inputs(): def test_PointsWarp_outputs(): - output_map = dict(warped_file=dict(extensions=None,),) + output_map = dict( + warped_file=dict( + extensions=None, + ), + ) outputs = PointsWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_Registration.py b/nipype/interfaces/elastix/tests/test_auto_Registration.py index 18ad8f93a2..ae6d929950 100644 --- a/nipype/interfaces/elastix/tests/test_auto_Registration.py +++ b/nipype/interfaces/elastix/tests/test_auto_Registration.py @@ -4,16 +4,49 @@ def test_Registration_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fixed_image=dict(argstr="-f %s", extensions=None, mandatory=True,), - fixed_mask=dict(argstr="-fMask %s", extensions=None,), - initial_transform=dict(argstr="-t0 %s", extensions=None,), - moving_image=dict(argstr="-m %s", extensions=None, mandatory=True,), - moving_mask=dict(argstr="-mMask %s", extensions=None,), - num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True,), - output_path=dict(argstr="-out %s", mandatory=True, usedefault=True,), - parameters=dict(argstr="-p %s...", mandatory=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + argstr="-f %s", + extensions=None, + mandatory=True, + ), + fixed_mask=dict( + argstr="-fMask %s", + extensions=None, + ), + initial_transform=dict( + argstr="-t0 %s", + extensions=None, + ), + moving_image=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + ), + moving_mask=dict( + argstr="-mMask %s", + extensions=None, + ), + num_threads=dict( + argstr="-threads %01d", + nohash=True, + usedefault=True, + ), + output_path=dict( + argstr="-out %s", + mandatory=True, + usedefault=True, + ), + parameters=dict( + argstr="-p %s...", + mandatory=True, + ), ) inputs = Registration.input_spec() @@ -25,7 +58,9 @@ def test_Registration_inputs(): def test_Registration_outputs(): output_map = dict( transform=dict(), - warped_file=dict(extensions=None,), + warped_file=dict( + extensions=None, + ), warped_files=dict(), warped_files_flags=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py index 499d85a437..23618b2aa8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py @@ -4,14 +4,38 @@ def test_AddXFormToHeader_inputs(): input_map = dict( - args=dict(argstr="%s",), - copy_name=dict(argstr="-c",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - out_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True,), + args=dict( + argstr="%s", + ), + copy_name=dict( + argstr="-c", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + out_file=dict( + argstr="%s", + extensions=None, + position=-1, + usedefault=True, + ), subjects_dir=dict(), - transform=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - verbose=dict(argstr="-v",), + transform=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + verbose=dict( + argstr="-v", + ), ) inputs = AddXFormToHeader.input_spec() @@ -21,7 +45,11 @@ def test_AddXFormToHeader_inputs(): def test_AddXFormToHeader_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AddXFormToHeader.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py index 2c0d0ad5ef..bc65ee096c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py @@ -4,29 +4,87 @@ def test_Aparc2Aseg_inputs(): input_map = dict( - a2009s=dict(argstr="--a2009s",), - args=dict(argstr="%s",), - aseg=dict(argstr="--aseg %s", extensions=None,), + a2009s=dict( + argstr="--a2009s", + ), + args=dict( + argstr="%s", + ), + aseg=dict( + argstr="--aseg %s", + extensions=None, + ), copy_inputs=dict(), - ctxseg=dict(argstr="--ctxseg %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - filled=dict(extensions=None,), - hypo_wm=dict(argstr="--hypo-as-wm",), - label_wm=dict(argstr="--labelwm",), - lh_annotation=dict(extensions=None, mandatory=True,), - lh_pial=dict(extensions=None, mandatory=True,), - lh_ribbon=dict(extensions=None, mandatory=True,), - lh_white=dict(extensions=None, mandatory=True,), - out_file=dict(argstr="--o %s", extensions=None, mandatory=True,), - rh_annotation=dict(extensions=None, mandatory=True,), - rh_pial=dict(extensions=None, mandatory=True,), - rh_ribbon=dict(extensions=None, mandatory=True,), - rh_white=dict(extensions=None, mandatory=True,), - ribbon=dict(extensions=None, mandatory=True,), - rip_unknown=dict(argstr="--rip-unknown",), - subject_id=dict(argstr="--s %s", mandatory=True, usedefault=True,), + ctxseg=dict( + argstr="--ctxseg %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + filled=dict( + extensions=None, + ), + hypo_wm=dict( + argstr="--hypo-as-wm", + ), + label_wm=dict( + argstr="--labelwm", + ), + lh_annotation=dict( + extensions=None, + mandatory=True, + ), + lh_pial=dict( + extensions=None, + mandatory=True, + ), + lh_ribbon=dict( + extensions=None, + mandatory=True, + ), + lh_white=dict( + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="--o %s", + extensions=None, + mandatory=True, + ), + rh_annotation=dict( + extensions=None, + mandatory=True, + ), + rh_pial=dict( + extensions=None, + mandatory=True, + ), + rh_ribbon=dict( + extensions=None, + mandatory=True, + ), + rh_white=dict( + extensions=None, + mandatory=True, + ), + ribbon=dict( + extensions=None, + mandatory=True, + ), + rip_unknown=dict( + argstr="--rip-unknown", + ), + subject_id=dict( + argstr="--s %s", + mandatory=True, + usedefault=True, + ), subjects_dir=dict(), - volmask=dict(argstr="--volmask",), + volmask=dict( + argstr="--volmask", + ), ) inputs = Aparc2Aseg.input_spec() @@ -36,7 +94,12 @@ def test_Aparc2Aseg_inputs(): def test_Aparc2Aseg_outputs(): - output_map = dict(out_file=dict(argstr="%s", extensions=None,),) + output_map = dict( + out_file=dict( + argstr="%s", + extensions=None, + ), + ) outputs = Aparc2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py index 295a376884..e2738ac746 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py @@ -4,10 +4,23 @@ def test_Apas2Aseg_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="--i %s", extensions=None, mandatory=True,), - out_file=dict(argstr="--o %s", extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="--i %s", + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="--o %s", + extensions=None, + mandatory=True, + ), subjects_dir=dict(), ) inputs = Apas2Aseg.input_spec() @@ -18,7 +31,12 @@ def test_Apas2Aseg_inputs(): def test_Apas2Aseg_outputs(): - output_map = dict(out_file=dict(argstr="%s", extensions=None,),) + output_map = dict( + out_file=dict( + argstr="%s", + extensions=None, + ), + ) outputs = Apas2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py index 44bd9eba25..b3a8e80806 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py @@ -4,13 +4,34 @@ def test_ApplyMask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - invert_xfm=dict(argstr="-invert",), - keep_mask_deletion_edits=dict(argstr="-keep_mask_deletion_edits",), - mask_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - mask_thresh=dict(argstr="-T %.4f",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + invert_xfm=dict( + argstr="-invert", + ), + keep_mask_deletion_edits=dict( + argstr="-keep_mask_deletion_edits", + ), + mask_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + mask_thresh=dict( + argstr="-T %.4f", + ), out_file=dict( argstr="%s", extensions=None, @@ -21,11 +42,24 @@ def test_ApplyMask_inputs(): position=-1, ), subjects_dir=dict(), - transfer=dict(argstr="-transfer %d",), - use_abs=dict(argstr="-abs",), - xfm_file=dict(argstr="-xform %s", extensions=None,), - xfm_source=dict(argstr="-lta_src %s", extensions=None,), - xfm_target=dict(argstr="-lta_dst %s", extensions=None,), + transfer=dict( + argstr="-transfer %d", + ), + use_abs=dict( + argstr="-abs", + ), + xfm_file=dict( + argstr="-xform %s", + extensions=None, + ), + xfm_source=dict( + argstr="-lta_src %s", + extensions=None, + ), + xfm_target=dict( + argstr="-lta_dst %s", + extensions=None, + ), ) inputs = ApplyMask.input_spec() @@ -35,7 +69,11 @@ def test_ApplyMask_inputs(): def test_ApplyMask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ApplyMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py index 1eedade07a..2aa4ae59f8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py @@ -4,8 +4,13 @@ def test_ApplyVolTransform_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fs_target=dict( argstr="--fstarg", mandatory=True, @@ -27,9 +32,16 @@ def test_ApplyVolTransform_inputs(): "subject", ), ), - interp=dict(argstr="--interp %s",), - inverse=dict(argstr="--inv",), - invert_morph=dict(argstr="--inv-morph", requires=["m3z_file"],), + interp=dict( + argstr="--interp %s", + ), + inverse=dict( + argstr="--inv", + ), + invert_morph=dict( + argstr="--inv-morph", + requires=["m3z_file"], + ), lta_file=dict( argstr="--lta %s", extensions=None, @@ -60,7 +72,10 @@ def test_ApplyVolTransform_inputs(): "subject", ), ), - m3z_file=dict(argstr="--m3z %s", extensions=None,), + m3z_file=dict( + argstr="--m3z %s", + extensions=None, + ), mni_152_reg=dict( argstr="--regheader", mandatory=True, @@ -75,8 +90,13 @@ def test_ApplyVolTransform_inputs(): "subject", ), ), - no_ded_m3z_path=dict(argstr="--noDefM3zPath", requires=["m3z_file"],), - no_resample=dict(argstr="--no-resample",), + no_ded_m3z_path=dict( + argstr="--noDefM3zPath", + requires=["m3z_file"], + ), + no_resample=dict( + argstr="--no-resample", + ), reg_file=dict( argstr="--reg %s", extensions=None, @@ -107,7 +127,10 @@ def test_ApplyVolTransform_inputs(): ), ), source_file=dict( - argstr="--mov %s", copyfile=False, extensions=None, mandatory=True, + argstr="--mov %s", + copyfile=False, + extensions=None, + mandatory=True, ), subject=dict( argstr="--s %s", @@ -125,16 +148,24 @@ def test_ApplyVolTransform_inputs(): ), subjects_dir=dict(), tal=dict( - argstr="--tal", mandatory=True, xor=("target_file", "tal", "fs_target"), + argstr="--tal", + mandatory=True, + xor=("target_file", "tal", "fs_target"), + ), + tal_resolution=dict( + argstr="--talres %.10f", ), - tal_resolution=dict(argstr="--talres %.10f",), target_file=dict( argstr="--targ %s", extensions=None, mandatory=True, xor=("target_file", "tal", "fs_target"), ), - transformed_file=dict(argstr="--o %s", extensions=None, genfile=True,), + transformed_file=dict( + argstr="--o %s", + extensions=None, + genfile=True, + ), xfm_reg_file=dict( argstr="--xfm %s", extensions=None, @@ -159,7 +190,11 @@ def test_ApplyVolTransform_inputs(): def test_ApplyVolTransform_outputs(): - output_map = dict(transformed_file=dict(extensions=None,),) + output_map = dict( + transformed_file=dict( + extensions=None, + ), + ) outputs = ApplyVolTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py index 28211d63db..e7a95d1a3d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py @@ -4,35 +4,102 @@ def test_Binarize_inputs(): input_map = dict( - abs=dict(argstr="--abs",), - args=dict(argstr="%s",), - bin_col_num=dict(argstr="--bincol",), - bin_val=dict(argstr="--binval %d",), - bin_val_not=dict(argstr="--binvalnot %d",), - binary_file=dict(argstr="--o %s", extensions=None, genfile=True,), - count_file=dict(argstr="--count %s",), - dilate=dict(argstr="--dilate %d",), - environ=dict(nohash=True, usedefault=True,), - erode=dict(argstr="--erode %d",), - erode2d=dict(argstr="--erode2d %d",), - frame_no=dict(argstr="--frame %s",), - in_file=dict(argstr="--i %s", copyfile=False, extensions=None, mandatory=True,), - invert=dict(argstr="--inv",), - mask_file=dict(argstr="--mask maskvol", extensions=None,), - mask_thresh=dict(argstr="--mask-thresh %f",), - match=dict(argstr="--match %d...",), - max=dict(argstr="--max %f", xor=["wm_ven_csf"],), - merge_file=dict(argstr="--merge %s", extensions=None,), - min=dict(argstr="--min %f", xor=["wm_ven_csf"],), - out_type=dict(argstr="",), - rmax=dict(argstr="--rmax %f",), - rmin=dict(argstr="--rmin %f",), + abs=dict( + argstr="--abs", + ), + args=dict( + argstr="%s", + ), + bin_col_num=dict( + argstr="--bincol", + ), + bin_val=dict( + argstr="--binval %d", + ), + bin_val_not=dict( + argstr="--binvalnot %d", + ), + binary_file=dict( + argstr="--o %s", + extensions=None, + genfile=True, + ), + count_file=dict( + argstr="--count %s", + ), + dilate=dict( + argstr="--dilate %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + erode=dict( + argstr="--erode %d", + ), + erode2d=dict( + argstr="--erode2d %d", + ), + frame_no=dict( + argstr="--frame %s", + ), + in_file=dict( + argstr="--i %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + invert=dict( + argstr="--inv", + ), + mask_file=dict( + argstr="--mask maskvol", + extensions=None, + ), + mask_thresh=dict( + argstr="--mask-thresh %f", + ), + match=dict( + argstr="--match %d...", + ), + max=dict( + argstr="--max %f", + xor=["wm_ven_csf"], + ), + merge_file=dict( + argstr="--merge %s", + extensions=None, + ), + min=dict( + argstr="--min %f", + xor=["wm_ven_csf"], + ), + out_type=dict( + argstr="", + ), + rmax=dict( + argstr="--rmax %f", + ), + rmin=dict( + argstr="--rmin %f", + ), subjects_dir=dict(), - ventricles=dict(argstr="--ventricles",), - wm=dict(argstr="--wm",), - wm_ven_csf=dict(argstr="--wm+vcsf", xor=["min", "max"],), - zero_edges=dict(argstr="--zero-edges",), - zero_slice_edge=dict(argstr="--zero-slice-edges",), + ventricles=dict( + argstr="--ventricles", + ), + wm=dict( + argstr="--wm", + ), + wm_ven_csf=dict( + argstr="--wm+vcsf", + xor=["min", "max"], + ), + zero_edges=dict( + argstr="--zero-edges", + ), + zero_slice_edge=dict( + argstr="--zero-slice-edges", + ), ) inputs = Binarize.input_spec() @@ -43,7 +110,12 @@ def test_Binarize_inputs(): def test_Binarize_outputs(): output_map = dict( - binary_file=dict(extensions=None,), count_file=dict(extensions=None,), + binary_file=dict( + extensions=None, + ), + count_file=dict( + extensions=None, + ), ) outputs = Binarize.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py index dfc1f86d97..d89e51841b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py @@ -4,22 +4,67 @@ def test_CALabel_inputs(): input_map = dict( - align=dict(argstr="-align",), - args=dict(argstr="%s",), - aseg=dict(argstr="-aseg %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), - in_vol=dict(argstr="-r %s", extensions=None,), - intensities=dict(argstr="-r %s", extensions=None,), - label=dict(argstr="-l %s", extensions=None,), - no_big_ventricles=dict(argstr="-nobigventricles",), + align=dict( + argstr="-align", + ), + args=dict( + argstr="%s", + ), + aseg=dict( + argstr="-aseg %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), + in_vol=dict( + argstr="-r %s", + extensions=None, + ), + intensities=dict( + argstr="-r %s", + extensions=None, + ), + label=dict( + argstr="-l %s", + extensions=None, + ), + no_big_ventricles=dict( + argstr="-nobigventricles", + ), num_threads=dict(), - out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), - prior=dict(argstr="-prior %.1f",), - relabel_unlikely=dict(argstr="-relabel_unlikely %d %.1f",), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + prior=dict( + argstr="-prior %.1f", + ), + relabel_unlikely=dict( + argstr="-relabel_unlikely %d %.1f", + ), subjects_dir=dict(), - template=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - transform=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + template=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + transform=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), ) inputs = CALabel.input_spec() @@ -29,7 +74,11 @@ def test_CALabel_inputs(): def test_CALabel_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = CALabel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py index 84018f9a01..58788da3a4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py @@ -4,13 +4,37 @@ def test_CANormalize_inputs(): input_map = dict( - args=dict(argstr="%s",), - atlas=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - control_points=dict(argstr="-c %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), - long_file=dict(argstr="-long %s", extensions=None,), - mask=dict(argstr="-mask %s", extensions=None,), + args=dict( + argstr="%s", + ), + atlas=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + control_points=dict( + argstr="-c %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), + long_file=dict( + argstr="-long %s", + extensions=None, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), out_file=dict( argstr="%s", extensions=None, @@ -21,7 +45,12 @@ def test_CANormalize_inputs(): position=-1, ), subjects_dir=dict(), - transform=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + transform=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), ) inputs = CANormalize.input_spec() @@ -32,7 +61,12 @@ def test_CANormalize_inputs(): def test_CANormalize_outputs(): output_map = dict( - control_points=dict(extensions=None,), out_file=dict(extensions=None,), + control_points=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = CANormalize.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py index 5ce22ecfa0..3f410524f0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py @@ -4,21 +4,59 @@ def test_CARegister_inputs(): input_map = dict( - A=dict(argstr="-A %d",), - align=dict(argstr="-align-%s",), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - invert_and_save=dict(argstr="-invert-and-save", position=-4,), - l_files=dict(argstr="-l %s",), - levels=dict(argstr="-levels %d",), - mask=dict(argstr="-mask %s", extensions=None,), - no_big_ventricles=dict(argstr="-nobigventricles",), + A=dict( + argstr="-A %d", + ), + align=dict( + argstr="-align-%s", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + invert_and_save=dict( + argstr="-invert-and-save", + position=-4, + ), + l_files=dict( + argstr="-l %s", + ), + levels=dict( + argstr="-levels %d", + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + no_big_ventricles=dict( + argstr="-nobigventricles", + ), num_threads=dict(), - out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), subjects_dir=dict(), - template=dict(argstr="%s", extensions=None, position=-2,), - transform=dict(argstr="-T %s", extensions=None,), + template=dict( + argstr="%s", + extensions=None, + position=-2, + ), + transform=dict( + argstr="-T %s", + extensions=None, + ), ) inputs = CARegister.input_spec() @@ -28,7 +66,11 @@ def test_CARegister_inputs(): def test_CARegister_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = CARegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py index 8dcede267e..d14e203079 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py @@ -4,8 +4,13 @@ def test_CheckTalairachAlignment_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( argstr="-xfm %s", extensions=None, @@ -13,9 +18,17 @@ def test_CheckTalairachAlignment_inputs(): position=-1, xor=["subject"], ), - subject=dict(argstr="-subj %s", mandatory=True, position=-1, xor=["in_file"],), + subject=dict( + argstr="-subj %s", + mandatory=True, + position=-1, + xor=["in_file"], + ), subjects_dir=dict(), - threshold=dict(argstr="-T %.3f", usedefault=True,), + threshold=dict( + argstr="-T %.3f", + usedefault=True, + ), ) inputs = CheckTalairachAlignment.input_spec() @@ -25,7 +38,11 @@ def test_CheckTalairachAlignment_inputs(): def test_CheckTalairachAlignment_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = CheckTalairachAlignment.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py index c841be04ad..992f3e308c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py @@ -4,26 +4,70 @@ def test_Concatenate_inputs(): input_map = dict( - add_val=dict(argstr="--add %f",), - args=dict(argstr="%s",), - combine=dict(argstr="--combine",), - concatenated_file=dict(argstr="--o %s", extensions=None, genfile=True,), - environ=dict(nohash=True, usedefault=True,), - gmean=dict(argstr="--gmean %d",), - in_files=dict(argstr="--i %s...", mandatory=True,), - keep_dtype=dict(argstr="--keep-datatype",), - mask_file=dict(argstr="--mask %s", extensions=None,), - max_bonfcor=dict(argstr="--max-bonfcor",), - max_index=dict(argstr="--max-index",), - mean_div_n=dict(argstr="--mean-div-n",), - multiply_by=dict(argstr="--mul %f",), - multiply_matrix_file=dict(argstr="--mtx %s", extensions=None,), - paired_stats=dict(argstr="--paired-%s",), - sign=dict(argstr="--%s",), - sort=dict(argstr="--sort",), - stats=dict(argstr="--%s",), + add_val=dict( + argstr="--add %f", + ), + args=dict( + argstr="%s", + ), + combine=dict( + argstr="--combine", + ), + concatenated_file=dict( + argstr="--o %s", + extensions=None, + genfile=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gmean=dict( + argstr="--gmean %d", + ), + in_files=dict( + argstr="--i %s...", + mandatory=True, + ), + keep_dtype=dict( + argstr="--keep-datatype", + ), + mask_file=dict( + argstr="--mask %s", + extensions=None, + ), + max_bonfcor=dict( + argstr="--max-bonfcor", + ), + max_index=dict( + argstr="--max-index", + ), + mean_div_n=dict( + argstr="--mean-div-n", + ), + multiply_by=dict( + argstr="--mul %f", + ), + multiply_matrix_file=dict( + argstr="--mtx %s", + extensions=None, + ), + paired_stats=dict( + argstr="--paired-%s", + ), + sign=dict( + argstr="--%s", + ), + sort=dict( + argstr="--sort", + ), + stats=dict( + argstr="--%s", + ), subjects_dir=dict(), - vote=dict(argstr="--vote",), + vote=dict( + argstr="--vote", + ), ) inputs = Concatenate.input_spec() @@ -33,7 +77,11 @@ def test_Concatenate_inputs(): def test_Concatenate_outputs(): - output_map = dict(concatenated_file=dict(extensions=None,),) + output_map = dict( + concatenated_file=dict( + extensions=None, + ), + ) outputs = Concatenate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py index b9750aecbf..dad221b734 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py @@ -4,13 +4,33 @@ def test_ConcatenateLTA_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_lta1=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - in_lta2=dict(argstr="%s", mandatory=True, position=-2,), - invert_1=dict(argstr="-invert1",), - invert_2=dict(argstr="-invert2",), - invert_out=dict(argstr="-invertout",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_lta1=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + in_lta2=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + invert_1=dict( + argstr="-invert1", + ), + invert_2=dict( + argstr="-invert2", + ), + invert_out=dict( + argstr="-invertout", + ), out_file=dict( argstr="%s", extensions=None, @@ -20,8 +40,12 @@ def test_ConcatenateLTA_inputs(): name_template="%s_concat", position=-1, ), - out_type=dict(argstr="-out_type %d",), - subject=dict(argstr="-subject %s",), + out_type=dict( + argstr="-out_type %d", + ), + subject=dict( + argstr="-subject %s", + ), subjects_dir=dict(), tal_source_file=dict( argstr="-tal %s", @@ -30,7 +54,10 @@ def test_ConcatenateLTA_inputs(): requires=["tal_template_file"], ), tal_template_file=dict( - argstr="%s", extensions=None, position=-4, requires=["tal_source_file"], + argstr="%s", + extensions=None, + position=-4, + requires=["tal_source_file"], ), ) inputs = ConcatenateLTA.input_spec() @@ -41,7 +68,11 @@ def test_ConcatenateLTA_inputs(): def test_ConcatenateLTA_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ConcatenateLTA.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py index 68e450315d..8409f26757 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py @@ -4,18 +4,48 @@ def test_Contrast_inputs(): input_map = dict( - annotation=dict(extensions=None, mandatory=True,), - args=dict(argstr="%s",), + annotation=dict( + extensions=None, + mandatory=True, + ), + args=dict( + argstr="%s", + ), copy_inputs=dict(), - cortex=dict(extensions=None, mandatory=True,), - environ=dict(nohash=True, usedefault=True,), - hemisphere=dict(argstr="--%s-only", mandatory=True,), - orig=dict(extensions=None, mandatory=True,), - rawavg=dict(extensions=None, mandatory=True,), - subject_id=dict(argstr="--s %s", mandatory=True, usedefault=True,), + cortex=dict( + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr="--%s-only", + mandatory=True, + ), + orig=dict( + extensions=None, + mandatory=True, + ), + rawavg=dict( + extensions=None, + mandatory=True, + ), + subject_id=dict( + argstr="--s %s", + mandatory=True, + usedefault=True, + ), subjects_dir=dict(), - thickness=dict(extensions=None, mandatory=True,), - white=dict(extensions=None, mandatory=True,), + thickness=dict( + extensions=None, + mandatory=True, + ), + white=dict( + extensions=None, + mandatory=True, + ), ) inputs = Contrast.input_spec() @@ -26,9 +56,15 @@ def test_Contrast_inputs(): def test_Contrast_outputs(): output_map = dict( - out_contrast=dict(extensions=None,), - out_log=dict(extensions=None,), - out_stats=dict(extensions=None,), + out_contrast=dict( + extensions=None, + ), + out_log=dict( + extensions=None, + ), + out_stats=dict( + extensions=None, + ), ) outputs = Contrast.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py index 906d961740..c230edb8ba 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py @@ -4,18 +4,37 @@ def test_Curvature_inputs(): input_map = dict( - args=dict(argstr="%s",), - averages=dict(argstr="-a %d",), + args=dict( + argstr="%s", + ), + averages=dict( + argstr="-a %d", + ), copy_input=dict(), - distances=dict(argstr="-distances %d %d",), - environ=dict(nohash=True, usedefault=True,), + distances=dict( + argstr="-distances %d %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2, + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-2, + ), + n=dict( + argstr="-n", + ), + save=dict( + argstr="-w", ), - n=dict(argstr="-n",), - save=dict(argstr="-w",), subjects_dir=dict(), - threshold=dict(argstr="-thresh %.3f",), + threshold=dict( + argstr="-thresh %.3f", + ), ) inputs = Curvature.input_spec() @@ -26,7 +45,12 @@ def test_Curvature_inputs(): def test_Curvature_outputs(): output_map = dict( - out_gauss=dict(extensions=None,), out_mean=dict(extensions=None,), + out_gauss=dict( + extensions=None, + ), + out_mean=dict( + extensions=None, + ), ) outputs = Curvature.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py index 3b69b41def..4e8e3d5bc2 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py @@ -4,13 +4,34 @@ def test_CurvatureStats_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), copy_inputs=dict(), - curvfile1=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - curvfile2=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), - environ=dict(nohash=True, usedefault=True,), - hemisphere=dict(argstr="%s", mandatory=True, position=-3,), - min_max=dict(argstr="-m",), + curvfile1=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + curvfile2=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr="%s", + mandatory=True, + position=-3, + ), + min_max=dict( + argstr="-m", + ), out_file=dict( argstr="-o %s", extensions=None, @@ -18,11 +39,23 @@ def test_CurvatureStats_inputs(): name_source=["hemisphere"], name_template="%s.curv.stats", ), - subject_id=dict(argstr="%s", mandatory=True, position=-4, usedefault=True,), + subject_id=dict( + argstr="%s", + mandatory=True, + position=-4, + usedefault=True, + ), subjects_dir=dict(), - surface=dict(argstr="-F %s", extensions=None,), - values=dict(argstr="-G",), - write=dict(argstr="--writeCurvatureFiles",), + surface=dict( + argstr="-F %s", + extensions=None, + ), + values=dict( + argstr="-G", + ), + write=dict( + argstr="--writeCurvatureFiles", + ), ) inputs = CurvatureStats.input_spec() @@ -32,7 +65,11 @@ def test_CurvatureStats_inputs(): def test_CurvatureStats_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = CurvatureStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py index 3eae700dc6..bda2620fe1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py @@ -4,16 +4,35 @@ def test_DICOMConvert_inputs(): input_map = dict( - args=dict(argstr="%s",), - base_output_dir=dict(mandatory=True,), - dicom_dir=dict(mandatory=True,), - dicom_info=dict(extensions=None,), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + base_output_dir=dict( + mandatory=True, + ), + dicom_dir=dict( + mandatory=True, + ), + dicom_info=dict( + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), file_mapping=dict(), - ignore_single_slice=dict(requires=["dicom_info"],), - out_type=dict(usedefault=True,), - seq_list=dict(requires=["dicom_info"],), - subject_dir_template=dict(usedefault=True,), + ignore_single_slice=dict( + requires=["dicom_info"], + ), + out_type=dict( + usedefault=True, + ), + seq_list=dict( + requires=["dicom_info"], + ), + subject_dir_template=dict( + usedefault=True, + ), subject_id=dict(), subjects_dir=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py index d87052cebc..7bf1b895f1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py @@ -4,11 +4,26 @@ def test_EMRegister_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - mask=dict(argstr="-mask %s", extensions=None,), - nbrspacing=dict(argstr="-uns %d",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + nbrspacing=dict( + argstr="-uns %d", + ), num_threads=dict(), out_file=dict( argstr="%s", @@ -19,10 +34,20 @@ def test_EMRegister_inputs(): name_template="%s_transform.lta", position=-1, ), - skull=dict(argstr="-skull",), + skull=dict( + argstr="-skull", + ), subjects_dir=dict(), - template=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - transform=dict(argstr="-t %s", extensions=None,), + template=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + transform=dict( + argstr="-t %s", + extensions=None, + ), ) inputs = EMRegister.input_spec() @@ -32,7 +57,11 @@ def test_EMRegister_inputs(): def test_EMRegister_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = EMRegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py index 725980b7ab..5c51ed848d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py @@ -4,13 +4,40 @@ def test_EditWMwithAseg_inputs(): input_map = dict( - args=dict(argstr="%s",), - brain_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), - keep_in=dict(argstr="-keep-in",), - out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), - seg_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + brain_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), + keep_in=dict( + argstr="-keep-in", + ), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + seg_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), subjects_dir=dict(), ) inputs = EditWMwithAseg.input_spec() @@ -21,7 +48,11 @@ def test_EditWMwithAseg_inputs(): def test_EditWMwithAseg_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = EditWMwithAseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py index a90be6bca4..9d05019824 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py @@ -4,9 +4,19 @@ def test_EulerNumber_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), subjects_dir=dict(), ) inputs = EulerNumber.input_spec() @@ -17,7 +27,10 @@ def test_EulerNumber_inputs(): def test_EulerNumber_outputs(): - output_map = dict(defects=dict(), euler=dict(),) + output_map = dict( + defects=dict(), + euler=dict(), + ) outputs = EulerNumber.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py index 424d6bdb23..59997ad5b4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py @@ -4,9 +4,19 @@ def test_ExtractMainComponent_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), out_file=dict( argstr="%s", extensions=None, @@ -23,7 +33,11 @@ def test_ExtractMainComponent_inputs(): def test_ExtractMainComponent_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ExtractMainComponent.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py b/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py index 87f836e34b..aa53727cc8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py @@ -4,8 +4,13 @@ def test_FSCommand_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), subjects_dir=dict(), ) inputs = FSCommand.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py b/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py index 165191e96c..f61b52c1ea 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py @@ -4,8 +4,13 @@ def test_FSCommandOpenMP_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), num_threads=dict(), subjects_dir=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py b/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py index 162962f578..03cf55eb69 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py @@ -4,8 +4,13 @@ def test_FSScriptCommand_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), subjects_dir=dict(), ) inputs = FSScriptCommand.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py index 3133f52445..7842c5333a 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py @@ -4,11 +4,24 @@ def test_FitMSParams_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), flip_list=dict(), - in_files=dict(argstr="%s", mandatory=True, position=-2,), - out_dir=dict(argstr="%s", genfile=True, position=-1,), + in_files=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + out_dir=dict( + argstr="%s", + genfile=True, + position=-1, + ), subjects_dir=dict(), te_list=dict(), tr_list=dict(), @@ -23,9 +36,15 @@ def test_FitMSParams_inputs(): def test_FitMSParams_outputs(): output_map = dict( - pd_image=dict(extensions=None,), - t1_image=dict(extensions=None,), - t2star_image=dict(extensions=None,), + pd_image=dict( + extensions=None, + ), + t1_image=dict( + extensions=None, + ), + t2star_image=dict( + extensions=None, + ), ) outputs = FitMSParams.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py index bfdb140216..0037c02270 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py @@ -4,19 +4,56 @@ def test_FixTopology_inputs(): input_map = dict( - args=dict(argstr="%s",), - copy_inputs=dict(mandatory=True,), - environ=dict(nohash=True, usedefault=True,), - ga=dict(argstr="-ga",), - hemisphere=dict(argstr="%s", mandatory=True, position=-1,), - in_brain=dict(extensions=None, mandatory=True,), - in_inflated=dict(extensions=None, mandatory=True,), - in_orig=dict(extensions=None, mandatory=True,), - in_wm=dict(extensions=None, mandatory=True,), - mgz=dict(argstr="-mgz",), - seed=dict(argstr="-seed %d",), - sphere=dict(argstr="-sphere %s", extensions=None,), - subject_id=dict(argstr="%s", mandatory=True, position=-2, usedefault=True,), + args=dict( + argstr="%s", + ), + copy_inputs=dict( + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ga=dict( + argstr="-ga", + ), + hemisphere=dict( + argstr="%s", + mandatory=True, + position=-1, + ), + in_brain=dict( + extensions=None, + mandatory=True, + ), + in_inflated=dict( + extensions=None, + mandatory=True, + ), + in_orig=dict( + extensions=None, + mandatory=True, + ), + in_wm=dict( + extensions=None, + mandatory=True, + ), + mgz=dict( + argstr="-mgz", + ), + seed=dict( + argstr="-seed %d", + ), + sphere=dict( + argstr="-sphere %s", + extensions=None, + ), + subject_id=dict( + argstr="%s", + mandatory=True, + position=-2, + usedefault=True, + ), subjects_dir=dict(), ) inputs = FixTopology.input_spec() @@ -27,7 +64,11 @@ def test_FixTopology_inputs(): def test_FixTopology_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = FixTopology.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py index 2ffc84eada..12550be8b3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py @@ -4,15 +4,40 @@ def test_FuseSegmentations_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_norms=dict(argstr="-n %s", mandatory=True,), - in_segmentations=dict(argstr="-a %s", mandatory=True,), - in_segmentations_noCC=dict(argstr="-c %s", mandatory=True,), - out_file=dict(extensions=None, mandatory=True, position=-1,), - subject_id=dict(argstr="%s", position=-3,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_norms=dict( + argstr="-n %s", + mandatory=True, + ), + in_segmentations=dict( + argstr="-a %s", + mandatory=True, + ), + in_segmentations_noCC=dict( + argstr="-c %s", + mandatory=True, + ), + out_file=dict( + extensions=None, + mandatory=True, + position=-1, + ), + subject_id=dict( + argstr="%s", + position=-3, + ), subjects_dir=dict(), - timepoints=dict(argstr="%s", mandatory=True, position=-2,), + timepoints=dict( + argstr="%s", + mandatory=True, + position=-2, + ), ) inputs = FuseSegmentations.input_spec() @@ -22,7 +47,11 @@ def test_FuseSegmentations_inputs(): def test_FuseSegmentations_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = FuseSegmentations.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py index a883f39732..21c41eb691 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py @@ -4,68 +4,192 @@ def test_GLMFit_inputs(): input_map = dict( - allow_ill_cond=dict(argstr="--illcond",), - allow_repeated_subjects=dict(argstr="--allowsubjrep",), - args=dict(argstr="%s",), - calc_AR1=dict(argstr="--tar1",), - check_opts=dict(argstr="--checkopts",), - compute_log_y=dict(argstr="--logy",), - contrast=dict(argstr="--C %s...",), - cortex=dict(argstr="--cortex", xor=["label_file"],), - debug=dict(argstr="--debug",), + allow_ill_cond=dict( + argstr="--illcond", + ), + allow_repeated_subjects=dict( + argstr="--allowsubjrep", + ), + args=dict( + argstr="%s", + ), + calc_AR1=dict( + argstr="--tar1", + ), + check_opts=dict( + argstr="--checkopts", + ), + compute_log_y=dict( + argstr="--logy", + ), + contrast=dict( + argstr="--C %s...", + ), + cortex=dict( + argstr="--cortex", + xor=["label_file"], + ), + debug=dict( + argstr="--debug", + ), design=dict( - argstr="--X %s", extensions=None, xor=("fsgd", "design", "one_sample"), + argstr="--X %s", + extensions=None, + xor=("fsgd", "design", "one_sample"), + ), + diag=dict( + argstr="--diag %d", + ), + diag_cluster=dict( + argstr="--diag-cluster", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_fx_dof=dict( + argstr="--ffxdof %d", + xor=["fixed_fx_dof_file"], ), - diag=dict(argstr="--diag %d",), - diag_cluster=dict(argstr="--diag-cluster",), - environ=dict(nohash=True, usedefault=True,), - fixed_fx_dof=dict(argstr="--ffxdof %d", xor=["fixed_fx_dof_file"],), fixed_fx_dof_file=dict( - argstr="--ffxdofdat %d", extensions=None, xor=["fixed_fx_dof"], + argstr="--ffxdofdat %d", + extensions=None, + xor=["fixed_fx_dof"], + ), + fixed_fx_var=dict( + argstr="--yffxvar %s", + extensions=None, + ), + force_perm=dict( + argstr="--perm-force", + ), + fsgd=dict( + argstr="--fsgd %s %s", + xor=("fsgd", "design", "one_sample"), + ), + fwhm=dict( + argstr="--fwhm %f", + ), + glm_dir=dict( + argstr="--glmdir %s", + genfile=True, ), - fixed_fx_var=dict(argstr="--yffxvar %s", extensions=None,), - force_perm=dict(argstr="--perm-force",), - fsgd=dict(argstr="--fsgd %s %s", xor=("fsgd", "design", "one_sample"),), - fwhm=dict(argstr="--fwhm %f",), - glm_dir=dict(argstr="--glmdir %s", genfile=True,), hemi=dict(), - in_file=dict(argstr="--y %s", copyfile=False, extensions=None, mandatory=True,), - invert_mask=dict(argstr="--mask-inv",), - label_file=dict(argstr="--label %s", extensions=None, xor=["cortex"],), - mask_file=dict(argstr="--mask %s", extensions=None,), - no_contrast_ok=dict(argstr="--no-contrasts-ok",), - no_est_fwhm=dict(argstr="--no-est-fwhm",), - no_mask_smooth=dict(argstr="--no-mask-smooth",), - no_prune=dict(argstr="--no-prune", xor=["prunethresh"],), + in_file=dict( + argstr="--y %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + invert_mask=dict( + argstr="--mask-inv", + ), + label_file=dict( + argstr="--label %s", + extensions=None, + xor=["cortex"], + ), + mask_file=dict( + argstr="--mask %s", + extensions=None, + ), + no_contrast_ok=dict( + argstr="--no-contrasts-ok", + ), + no_est_fwhm=dict( + argstr="--no-est-fwhm", + ), + no_mask_smooth=dict( + argstr="--no-mask-smooth", + ), + no_prune=dict( + argstr="--no-prune", + xor=["prunethresh"], + ), one_sample=dict( - argstr="--osgm", xor=("one_sample", "fsgd", "design", "contrast"), - ), - pca=dict(argstr="--pca",), - per_voxel_reg=dict(argstr="--pvr %s...",), - profile=dict(argstr="--profile %d",), - prune=dict(argstr="--prune",), - prune_thresh=dict(argstr="--prune_thr %f", xor=["noprune"],), - resynth_test=dict(argstr="--resynthtest %d",), - save_cond=dict(argstr="--save-cond",), - save_estimate=dict(argstr="--yhat-save",), - save_res_corr_mtx=dict(argstr="--eres-scm",), - save_residual=dict(argstr="--eres-save",), - seed=dict(argstr="--seed %d",), - self_reg=dict(argstr="--selfreg %d %d %d",), - sim_done_file=dict(argstr="--sim-done %s", extensions=None,), - sim_sign=dict(argstr="--sim-sign %s",), - simulation=dict(argstr="--sim %s %d %f %s",), + argstr="--osgm", + xor=("one_sample", "fsgd", "design", "contrast"), + ), + pca=dict( + argstr="--pca", + ), + per_voxel_reg=dict( + argstr="--pvr %s...", + ), + profile=dict( + argstr="--profile %d", + ), + prune=dict( + argstr="--prune", + ), + prune_thresh=dict( + argstr="--prune_thr %f", + xor=["noprune"], + ), + resynth_test=dict( + argstr="--resynthtest %d", + ), + save_cond=dict( + argstr="--save-cond", + ), + save_estimate=dict( + argstr="--yhat-save", + ), + save_res_corr_mtx=dict( + argstr="--eres-scm", + ), + save_residual=dict( + argstr="--eres-save", + ), + seed=dict( + argstr="--seed %d", + ), + self_reg=dict( + argstr="--selfreg %d %d %d", + ), + sim_done_file=dict( + argstr="--sim-done %s", + extensions=None, + ), + sim_sign=dict( + argstr="--sim-sign %s", + ), + simulation=dict( + argstr="--sim %s %d %f %s", + ), subject_id=dict(), subjects_dir=dict(), - surf=dict(argstr="--surf %s %s %s", requires=["subject_id", "hemi"],), - surf_geo=dict(usedefault=True,), - synth=dict(argstr="--synth",), - uniform=dict(argstr="--uniform %f %f",), - var_fwhm=dict(argstr="--var-fwhm %f",), - vox_dump=dict(argstr="--voxdump %d %d %d",), - weight_file=dict(extensions=None, xor=["weighted_ls"],), - weight_inv=dict(argstr="--w-inv", xor=["weighted_ls"],), - weight_sqrt=dict(argstr="--w-sqrt", xor=["weighted_ls"],), + surf=dict( + argstr="--surf %s %s %s", + requires=["subject_id", "hemi"], + ), + surf_geo=dict( + usedefault=True, + ), + synth=dict( + argstr="--synth", + ), + uniform=dict( + argstr="--uniform %f %f", + ), + var_fwhm=dict( + argstr="--var-fwhm %f", + ), + vox_dump=dict( + argstr="--voxdump %d %d %d", + ), + weight_file=dict( + extensions=None, + xor=["weighted_ls"], + ), + weight_inv=dict( + argstr="--w-inv", + xor=["weighted_ls"], + ), + weight_sqrt=dict( + argstr="--w-sqrt", + xor=["weighted_ls"], + ), weighted_ls=dict( argstr="--wls %s", extensions=None, @@ -81,23 +205,47 @@ def test_GLMFit_inputs(): def test_GLMFit_outputs(): output_map = dict( - beta_file=dict(extensions=None,), - dof_file=dict(extensions=None,), - error_file=dict(extensions=None,), - error_stddev_file=dict(extensions=None,), - error_var_file=dict(extensions=None,), - estimate_file=dict(extensions=None,), - frame_eigenvectors=dict(extensions=None,), + beta_file=dict( + extensions=None, + ), + dof_file=dict( + extensions=None, + ), + error_file=dict( + extensions=None, + ), + error_stddev_file=dict( + extensions=None, + ), + error_var_file=dict( + extensions=None, + ), + estimate_file=dict( + extensions=None, + ), + frame_eigenvectors=dict( + extensions=None, + ), ftest_file=dict(), - fwhm_file=dict(extensions=None,), + fwhm_file=dict( + extensions=None, + ), gamma_file=dict(), gamma_var_file=dict(), glm_dir=dict(), - mask_file=dict(extensions=None,), + mask_file=dict( + extensions=None, + ), sig_file=dict(), - singular_values=dict(extensions=None,), - spatial_eigenvectors=dict(extensions=None,), - svd_stats_file=dict(extensions=None,), + singular_values=dict( + extensions=None, + ), + spatial_eigenvectors=dict( + extensions=None, + ), + svd_stats_file=dict( + extensions=None, + ), ) outputs = GLMFit.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py index 2a80c0743b..aa6d5d302e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py @@ -4,9 +4,18 @@ def test_ImageInfo_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, position=1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + position=1, + ), subjects_dir=dict(), ) inputs = ImageInfo.input_spec() @@ -26,7 +35,9 @@ def test_ImageInfo_outputs(): file_format=dict(), info=dict(), orientation=dict(), - out_file=dict(extensions=None,), + out_file=dict( + extensions=None, + ), ph_enc_dir=dict(), vox_sizes=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py index 14cd9fa9f0..f62c085839 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py @@ -4,10 +4,25 @@ def test_Jacobian_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_mappedsurf=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - in_origsurf=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_mappedsurf=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + in_origsurf=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), out_file=dict( argstr="%s", extensions=None, @@ -27,7 +42,11 @@ def test_Jacobian_inputs(): def test_Jacobian_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Jacobian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py index ab59b01867..b70bd34c45 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py @@ -4,8 +4,13 @@ def test_LTAConvert_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_fsl=dict( argstr="--infsl %s", extensions=None, @@ -41,16 +46,39 @@ def test_LTAConvert_inputs(): mandatory=True, xor=("in_lta", "in_fsl", "in_mni", "in_reg", "in_niftyreg", "in_itk"), ), - invert=dict(argstr="--invert",), - ltavox2vox=dict(argstr="--ltavox2vox", requires=["out_lta"],), - out_fsl=dict(argstr="--outfsl %s",), - out_itk=dict(argstr="--outitk %s",), - out_lta=dict(argstr="--outlta %s",), - out_mni=dict(argstr="--outmni %s",), - out_reg=dict(argstr="--outreg %s",), - source_file=dict(argstr="--src %s", extensions=None,), - target_conform=dict(argstr="--trgconform",), - target_file=dict(argstr="--trg %s", extensions=None,), + invert=dict( + argstr="--invert", + ), + ltavox2vox=dict( + argstr="--ltavox2vox", + requires=["out_lta"], + ), + out_fsl=dict( + argstr="--outfsl %s", + ), + out_itk=dict( + argstr="--outitk %s", + ), + out_lta=dict( + argstr="--outlta %s", + ), + out_mni=dict( + argstr="--outmni %s", + ), + out_reg=dict( + argstr="--outreg %s", + ), + source_file=dict( + argstr="--src %s", + extensions=None, + ), + target_conform=dict( + argstr="--trgconform", + ), + target_file=dict( + argstr="--trg %s", + extensions=None, + ), ) inputs = LTAConvert.input_spec() @@ -61,11 +89,21 @@ def test_LTAConvert_inputs(): def test_LTAConvert_outputs(): output_map = dict( - out_fsl=dict(extensions=None,), - out_itk=dict(extensions=None,), - out_lta=dict(extensions=None,), - out_mni=dict(extensions=None,), - out_reg=dict(extensions=None,), + out_fsl=dict( + extensions=None, + ), + out_itk=dict( + extensions=None, + ), + out_lta=dict( + extensions=None, + ), + out_mni=dict( + extensions=None, + ), + out_reg=dict( + extensions=None, + ), ) outputs = LTAConvert.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py index f800c560f7..7e1caf88cc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py @@ -4,18 +4,46 @@ def test_Label2Annot_inputs(): input_map = dict( - args=dict(argstr="%s",), - color_table=dict(argstr="--ctab %s", extensions=None,), + args=dict( + argstr="%s", + ), + color_table=dict( + argstr="--ctab %s", + extensions=None, + ), copy_inputs=dict(), - environ=dict(nohash=True, usedefault=True,), - hemisphere=dict(argstr="--hemi %s", mandatory=True,), - in_labels=dict(argstr="--l %s...", mandatory=True,), - keep_max=dict(argstr="--maxstatwinner",), - orig=dict(extensions=None, mandatory=True,), - out_annot=dict(argstr="--a %s", mandatory=True,), - subject_id=dict(argstr="--s %s", mandatory=True, usedefault=True,), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr="--hemi %s", + mandatory=True, + ), + in_labels=dict( + argstr="--l %s...", + mandatory=True, + ), + keep_max=dict( + argstr="--maxstatwinner", + ), + orig=dict( + extensions=None, + mandatory=True, + ), + out_annot=dict( + argstr="--a %s", + mandatory=True, + ), + subject_id=dict( + argstr="--s %s", + mandatory=True, + usedefault=True, + ), subjects_dir=dict(), - verbose_off=dict(argstr="--noverbose",), + verbose_off=dict( + argstr="--noverbose", + ), ) inputs = Label2Annot.input_spec() @@ -25,7 +53,11 @@ def test_Label2Annot_inputs(): def test_Label2Annot_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Label2Annot.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py index e5e227c5a9..34f99e1a24 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py @@ -4,10 +4,18 @@ def test_Label2Label_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), copy_inputs=dict(), - environ=dict(nohash=True, usedefault=True,), - hemisphere=dict(argstr="--hemi %s", mandatory=True,), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr="--hemi %s", + mandatory=True, + ), out_file=dict( argstr="--trglabel %s", extensions=None, @@ -16,15 +24,41 @@ def test_Label2Label_inputs(): name_source=["source_label"], name_template="%s_converted", ), - registration_method=dict(argstr="--regmethod %s", usedefault=True,), - source_label=dict(argstr="--srclabel %s", extensions=None, mandatory=True,), - source_sphere_reg=dict(extensions=None, mandatory=True,), - source_subject=dict(argstr="--srcsubject %s", mandatory=True,), - source_white=dict(extensions=None, mandatory=True,), - sphere_reg=dict(extensions=None, mandatory=True,), - subject_id=dict(argstr="--trgsubject %s", mandatory=True, usedefault=True,), + registration_method=dict( + argstr="--regmethod %s", + usedefault=True, + ), + source_label=dict( + argstr="--srclabel %s", + extensions=None, + mandatory=True, + ), + source_sphere_reg=dict( + extensions=None, + mandatory=True, + ), + source_subject=dict( + argstr="--srcsubject %s", + mandatory=True, + ), + source_white=dict( + extensions=None, + mandatory=True, + ), + sphere_reg=dict( + extensions=None, + mandatory=True, + ), + subject_id=dict( + argstr="--trgsubject %s", + mandatory=True, + usedefault=True, + ), subjects_dir=dict(), - white=dict(extensions=None, mandatory=True,), + white=dict( + extensions=None, + mandatory=True, + ), ) inputs = Label2Label.input_spec() @@ -34,7 +68,11 @@ def test_Label2Label_inputs(): def test_Label2Label_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Label2Label.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py index dd890531c9..aa1b19f564 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py @@ -17,23 +17,50 @@ def test_Label2Vol_inputs(): mandatory=True, xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), ), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fill_thresh=dict(argstr="--fillthresh %g",), - hemi=dict(argstr="--hemi %s",), - identity=dict(argstr="--identity", xor=("reg_file", "reg_header", "identity"),), - invert_mtx=dict(argstr="--invertmtx",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill_thresh=dict( + argstr="--fillthresh %g", + ), + hemi=dict( + argstr="--hemi %s", + ), + identity=dict( + argstr="--identity", + xor=("reg_file", "reg_header", "identity"), + ), + invert_mtx=dict( + argstr="--invertmtx", + ), label_file=dict( argstr="--label %s...", copyfile=False, mandatory=True, xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), ), - label_hit_file=dict(argstr="--hits %s", extensions=None,), - label_voxel_volume=dict(argstr="--labvoxvol %f",), - map_label_stat=dict(argstr="--label-stat %s", extensions=None,), - native_vox2ras=dict(argstr="--native-vox2ras",), - proj=dict(argstr="--proj %s %f %f %f", requires=("subject_id", "hemi"),), + label_hit_file=dict( + argstr="--hits %s", + extensions=None, + ), + label_voxel_volume=dict( + argstr="--labvoxvol %f", + ), + map_label_stat=dict( + argstr="--label-stat %s", + extensions=None, + ), + native_vox2ras=dict( + argstr="--native-vox2ras", + ), + proj=dict( + argstr="--proj %s %f %f %f", + requires=("subject_id", "hemi"), + ), reg_file=dict( argstr="--reg %s", extensions=None, @@ -51,11 +78,23 @@ def test_Label2Vol_inputs(): mandatory=True, xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), ), - subject_id=dict(argstr="--subject %s",), + subject_id=dict( + argstr="--subject %s", + ), subjects_dir=dict(), - surface=dict(argstr="--surf %s",), - template_file=dict(argstr="--temp %s", extensions=None, mandatory=True,), - vol_label_file=dict(argstr="--o %s", extensions=None, genfile=True,), + surface=dict( + argstr="--surf %s", + ), + template_file=dict( + argstr="--temp %s", + extensions=None, + mandatory=True, + ), + vol_label_file=dict( + argstr="--o %s", + extensions=None, + genfile=True, + ), ) inputs = Label2Vol.input_spec() @@ -65,7 +104,11 @@ def test_Label2Vol_inputs(): def test_Label2Vol_outputs(): - output_map = dict(vol_label_file=dict(extensions=None,),) + output_map = dict( + vol_label_file=dict( + extensions=None, + ), + ) outputs = Label2Vol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py index 16ed15d093..3b3c2f0852 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py @@ -4,13 +4,32 @@ def test_MNIBiasCorrection_inputs(): input_map = dict( - args=dict(argstr="%s",), - distance=dict(argstr="--distance %d",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="--i %s", extensions=None, mandatory=True,), - iterations=dict(argstr="--n %d", usedefault=True,), - mask=dict(argstr="--mask %s", extensions=None,), - no_rescale=dict(argstr="--no-rescale",), + args=dict( + argstr="%s", + ), + distance=dict( + argstr="--distance %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="--i %s", + extensions=None, + mandatory=True, + ), + iterations=dict( + argstr="--n %d", + usedefault=True, + ), + mask=dict( + argstr="--mask %s", + extensions=None, + ), + no_rescale=dict( + argstr="--no-rescale", + ), out_file=dict( argstr="--o %s", extensions=None, @@ -19,11 +38,20 @@ def test_MNIBiasCorrection_inputs(): name_source=["in_file"], name_template="%s_output", ), - protocol_iterations=dict(argstr="--proto-iters %d",), - shrink=dict(argstr="--shrink %d",), - stop=dict(argstr="--stop %f",), + protocol_iterations=dict( + argstr="--proto-iters %d", + ), + shrink=dict( + argstr="--shrink %d", + ), + stop=dict( + argstr="--stop %f", + ), subjects_dir=dict(), - transform=dict(argstr="--uchar %s", extensions=None,), + transform=dict( + argstr="--uchar %s", + extensions=None, + ), ) inputs = MNIBiasCorrection.input_spec() @@ -33,7 +61,11 @@ def test_MNIBiasCorrection_inputs(): def test_MNIBiasCorrection_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MNIBiasCorrection.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py index ae81998809..4f21cc2f61 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py @@ -4,12 +4,27 @@ def test_MPRtoMNI305_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, usedefault=True,), - reference_dir=dict(mandatory=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + usedefault=True, + ), + reference_dir=dict( + mandatory=True, + usedefault=True, + ), subjects_dir=dict(), - target=dict(mandatory=True, usedefault=True,), + target=dict( + mandatory=True, + usedefault=True, + ), ) inputs = MPRtoMNI305.input_spec() @@ -20,8 +35,13 @@ def test_MPRtoMNI305_inputs(): def test_MPRtoMNI305_outputs(): output_map = dict( - log_file=dict(extensions=None, usedefault=True,), - out_file=dict(extensions=None,), + log_file=dict( + extensions=None, + usedefault=True, + ), + out_file=dict( + extensions=None, + ), ) outputs = MPRtoMNI305.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py index b75f338f31..9e229078ef 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py @@ -5,97 +5,276 @@ def test_MRIConvert_inputs(): input_map = dict( apply_inv_transform=dict( - argstr="--apply_inverse_transform %s", extensions=None, - ), - apply_transform=dict(argstr="--apply_transform %s", extensions=None,), - args=dict(argstr="%s",), - ascii=dict(argstr="--ascii",), - autoalign_matrix=dict(argstr="--autoalign %s", extensions=None,), - color_file=dict(argstr="--color_file %s", extensions=None,), - conform=dict(argstr="--conform",), - conform_min=dict(argstr="--conform_min",), - conform_size=dict(argstr="--conform_size %s",), - crop_center=dict(argstr="--crop %d %d %d",), - crop_gdf=dict(argstr="--crop_gdf",), - crop_size=dict(argstr="--cropsize %d %d %d",), - cut_ends=dict(argstr="--cutends %d",), - cw256=dict(argstr="--cw256",), - devolve_transform=dict(argstr="--devolvexfm %s",), - drop_n=dict(argstr="--ndrop %d",), - environ=dict(nohash=True, usedefault=True,), - fill_parcellation=dict(argstr="--fill_parcellation",), - force_ras=dict(argstr="--force_ras_good",), - frame=dict(argstr="--frame %d",), - frame_subsample=dict(argstr="--fsubsample %d %d %d",), - fwhm=dict(argstr="--fwhm %f",), - in_center=dict(argstr="--in_center %s",), + argstr="--apply_inverse_transform %s", + extensions=None, + ), + apply_transform=dict( + argstr="--apply_transform %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + ascii=dict( + argstr="--ascii", + ), + autoalign_matrix=dict( + argstr="--autoalign %s", + extensions=None, + ), + color_file=dict( + argstr="--color_file %s", + extensions=None, + ), + conform=dict( + argstr="--conform", + ), + conform_min=dict( + argstr="--conform_min", + ), + conform_size=dict( + argstr="--conform_size %s", + ), + crop_center=dict( + argstr="--crop %d %d %d", + ), + crop_gdf=dict( + argstr="--crop_gdf", + ), + crop_size=dict( + argstr="--cropsize %d %d %d", + ), + cut_ends=dict( + argstr="--cutends %d", + ), + cw256=dict( + argstr="--cw256", + ), + devolve_transform=dict( + argstr="--devolvexfm %s", + ), + drop_n=dict( + argstr="--ndrop %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill_parcellation=dict( + argstr="--fill_parcellation", + ), + force_ras=dict( + argstr="--force_ras_good", + ), + frame=dict( + argstr="--frame %d", + ), + frame_subsample=dict( + argstr="--fsubsample %d %d %d", + ), + fwhm=dict( + argstr="--fwhm %f", + ), + in_center=dict( + argstr="--in_center %s", + ), in_file=dict( - argstr="--input_volume %s", extensions=None, mandatory=True, position=-2, - ), - in_i_dir=dict(argstr="--in_i_direction %f %f %f",), - in_i_size=dict(argstr="--in_i_size %d",), - in_info=dict(argstr="--in_info",), - in_j_dir=dict(argstr="--in_j_direction %f %f %f",), - in_j_size=dict(argstr="--in_j_size %d",), - in_k_dir=dict(argstr="--in_k_direction %f %f %f",), - in_k_size=dict(argstr="--in_k_size %d",), - in_like=dict(argstr="--in_like %s", extensions=None,), - in_matrix=dict(argstr="--in_matrix",), - in_orientation=dict(argstr="--in_orientation %s",), - in_scale=dict(argstr="--scale %f",), - in_stats=dict(argstr="--in_stats",), - in_type=dict(argstr="--in_type %s",), - invert_contrast=dict(argstr="--invert_contrast %f",), - midframe=dict(argstr="--mid-frame",), - no_change=dict(argstr="--nochange",), - no_scale=dict(argstr="--no_scale 1",), - no_translate=dict(argstr="--no_translate",), - no_write=dict(argstr="--no_write",), - out_center=dict(argstr="--out_center %f %f %f",), - out_datatype=dict(argstr="--out_data_type %s",), + argstr="--input_volume %s", + extensions=None, + mandatory=True, + position=-2, + ), + in_i_dir=dict( + argstr="--in_i_direction %f %f %f", + ), + in_i_size=dict( + argstr="--in_i_size %d", + ), + in_info=dict( + argstr="--in_info", + ), + in_j_dir=dict( + argstr="--in_j_direction %f %f %f", + ), + in_j_size=dict( + argstr="--in_j_size %d", + ), + in_k_dir=dict( + argstr="--in_k_direction %f %f %f", + ), + in_k_size=dict( + argstr="--in_k_size %d", + ), + in_like=dict( + argstr="--in_like %s", + extensions=None, + ), + in_matrix=dict( + argstr="--in_matrix", + ), + in_orientation=dict( + argstr="--in_orientation %s", + ), + in_scale=dict( + argstr="--scale %f", + ), + in_stats=dict( + argstr="--in_stats", + ), + in_type=dict( + argstr="--in_type %s", + ), + invert_contrast=dict( + argstr="--invert_contrast %f", + ), + midframe=dict( + argstr="--mid-frame", + ), + no_change=dict( + argstr="--nochange", + ), + no_scale=dict( + argstr="--no_scale 1", + ), + no_translate=dict( + argstr="--no_translate", + ), + no_write=dict( + argstr="--no_write", + ), + out_center=dict( + argstr="--out_center %f %f %f", + ), + out_datatype=dict( + argstr="--out_data_type %s", + ), out_file=dict( - argstr="--output_volume %s", extensions=None, genfile=True, position=-1, - ), - out_i_count=dict(argstr="--out_i_count %d",), - out_i_dir=dict(argstr="--out_i_direction %f %f %f",), - out_i_size=dict(argstr="--out_i_size %d",), - out_info=dict(argstr="--out_info",), - out_j_count=dict(argstr="--out_j_count %d",), - out_j_dir=dict(argstr="--out_j_direction %f %f %f",), - out_j_size=dict(argstr="--out_j_size %d",), - out_k_count=dict(argstr="--out_k_count %d",), - out_k_dir=dict(argstr="--out_k_direction %f %f %f",), - out_k_size=dict(argstr="--out_k_size %d",), - out_matrix=dict(argstr="--out_matrix",), - out_orientation=dict(argstr="--out_orientation %s",), - out_scale=dict(argstr="--out-scale %d",), - out_stats=dict(argstr="--out_stats",), - out_type=dict(argstr="--out_type %s",), - parse_only=dict(argstr="--parse_only",), - read_only=dict(argstr="--read_only",), - reorder=dict(argstr="--reorder %d %d %d",), - resample_type=dict(argstr="--resample_type %s",), - reslice_like=dict(argstr="--reslice_like %s", extensions=None,), - sdcm_list=dict(argstr="--sdcmlist %s", extensions=None,), - skip_n=dict(argstr="--nskip %d",), - slice_bias=dict(argstr="--slice-bias %f",), - slice_crop=dict(argstr="--slice-crop %d %d",), - slice_reverse=dict(argstr="--slice-reverse",), - smooth_parcellation=dict(argstr="--smooth_parcellation",), - sphinx=dict(argstr="--sphinx",), - split=dict(argstr="--split",), - status_file=dict(argstr="--status %s", extensions=None,), - subject_name=dict(argstr="--subject_name %s",), + argstr="--output_volume %s", + extensions=None, + genfile=True, + position=-1, + ), + out_i_count=dict( + argstr="--out_i_count %d", + ), + out_i_dir=dict( + argstr="--out_i_direction %f %f %f", + ), + out_i_size=dict( + argstr="--out_i_size %d", + ), + out_info=dict( + argstr="--out_info", + ), + out_j_count=dict( + argstr="--out_j_count %d", + ), + out_j_dir=dict( + argstr="--out_j_direction %f %f %f", + ), + out_j_size=dict( + argstr="--out_j_size %d", + ), + out_k_count=dict( + argstr="--out_k_count %d", + ), + out_k_dir=dict( + argstr="--out_k_direction %f %f %f", + ), + out_k_size=dict( + argstr="--out_k_size %d", + ), + out_matrix=dict( + argstr="--out_matrix", + ), + out_orientation=dict( + argstr="--out_orientation %s", + ), + out_scale=dict( + argstr="--out-scale %d", + ), + out_stats=dict( + argstr="--out_stats", + ), + out_type=dict( + argstr="--out_type %s", + ), + parse_only=dict( + argstr="--parse_only", + ), + read_only=dict( + argstr="--read_only", + ), + reorder=dict( + argstr="--reorder %d %d %d", + ), + resample_type=dict( + argstr="--resample_type %s", + ), + reslice_like=dict( + argstr="--reslice_like %s", + extensions=None, + ), + sdcm_list=dict( + argstr="--sdcmlist %s", + extensions=None, + ), + skip_n=dict( + argstr="--nskip %d", + ), + slice_bias=dict( + argstr="--slice-bias %f", + ), + slice_crop=dict( + argstr="--slice-crop %d %d", + ), + slice_reverse=dict( + argstr="--slice-reverse", + ), + smooth_parcellation=dict( + argstr="--smooth_parcellation", + ), + sphinx=dict( + argstr="--sphinx", + ), + split=dict( + argstr="--split", + ), + status_file=dict( + argstr="--status %s", + extensions=None, + ), + subject_name=dict( + argstr="--subject_name %s", + ), subjects_dir=dict(), - te=dict(argstr="-te %d",), - template_info=dict(argstr="--template_info",), - template_type=dict(argstr="--template_type %s",), - ti=dict(argstr="-ti %d",), - tr=dict(argstr="-tr %d",), - unwarp_gradient=dict(argstr="--unwarp_gradient_nonlinearity",), - vox_size=dict(argstr="-voxsize %f %f %f",), - zero_ge_z_offset=dict(argstr="--zero_ge_z_offset",), - zero_outlines=dict(argstr="--zero_outlines",), + te=dict( + argstr="-te %d", + ), + template_info=dict( + argstr="--template_info", + ), + template_type=dict( + argstr="--template_type %s", + ), + ti=dict( + argstr="-ti %d", + ), + tr=dict( + argstr="-tr %d", + ), + unwarp_gradient=dict( + argstr="--unwarp_gradient_nonlinearity", + ), + vox_size=dict( + argstr="-voxsize %f %f %f", + ), + zero_ge_z_offset=dict( + argstr="--zero_ge_z_offset", + ), + zero_outlines=dict( + argstr="--zero_outlines", + ), ) inputs = MRIConvert.input_spec() @@ -105,7 +284,9 @@ def test_MRIConvert_inputs(): def test_MRIConvert_outputs(): - output_map = dict(out_file=dict(),) + output_map = dict( + out_file=dict(), + ) outputs = MRIConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py index 1cef259c82..3d85129f3d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py @@ -4,29 +4,79 @@ def test_MRICoreg_inputs(): input_map = dict( - args=dict(argstr="%s",), - brute_force_limit=dict(argstr="--bf-lim %g", xor=["no_brute_force"],), - brute_force_samples=dict(argstr="--bf-nsamp %d", xor=["no_brute_force"],), - conform_reference=dict(argstr="--conf-ref",), - dof=dict(argstr="--dof %d",), - environ=dict(nohash=True, usedefault=True,), - ftol=dict(argstr="--ftol %e",), - initial_rotation=dict(argstr="--rot %g %g %g",), - initial_scale=dict(argstr="--scale %g %g %g",), - initial_shear=dict(argstr="--shear %g %g %g",), - initial_translation=dict(argstr="--trans %g %g %g",), - linmintol=dict(argstr="--linmintol %e",), - max_iters=dict(argstr="--nitersmax %d",), - no_brute_force=dict(argstr="--no-bf",), - no_coord_dithering=dict(argstr="--no-coord-dither",), - no_cras0=dict(argstr="--no-cras0",), - no_intensity_dithering=dict(argstr="--no-intensity-dither",), - no_smooth=dict(argstr="--no-smooth",), - num_threads=dict(argstr="--threads %d",), - out_lta_file=dict(argstr="--lta %s", usedefault=True,), - out_params_file=dict(argstr="--params %s",), - out_reg_file=dict(argstr="--regdat %s",), - ref_fwhm=dict(argstr="--ref-fwhm",), + args=dict( + argstr="%s", + ), + brute_force_limit=dict( + argstr="--bf-lim %g", + xor=["no_brute_force"], + ), + brute_force_samples=dict( + argstr="--bf-nsamp %d", + xor=["no_brute_force"], + ), + conform_reference=dict( + argstr="--conf-ref", + ), + dof=dict( + argstr="--dof %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ftol=dict( + argstr="--ftol %e", + ), + initial_rotation=dict( + argstr="--rot %g %g %g", + ), + initial_scale=dict( + argstr="--scale %g %g %g", + ), + initial_shear=dict( + argstr="--shear %g %g %g", + ), + initial_translation=dict( + argstr="--trans %g %g %g", + ), + linmintol=dict( + argstr="--linmintol %e", + ), + max_iters=dict( + argstr="--nitersmax %d", + ), + no_brute_force=dict( + argstr="--no-bf", + ), + no_coord_dithering=dict( + argstr="--no-coord-dither", + ), + no_cras0=dict( + argstr="--no-cras0", + ), + no_intensity_dithering=dict( + argstr="--no-intensity-dither", + ), + no_smooth=dict( + argstr="--no-smooth", + ), + num_threads=dict( + argstr="--threads %d", + ), + out_lta_file=dict( + argstr="--lta %s", + usedefault=True, + ), + out_params_file=dict( + argstr="--params %s", + ), + out_reg_file=dict( + argstr="--regdat %s", + ), + ref_fwhm=dict( + argstr="--ref-fwhm", + ), reference_file=dict( argstr="--ref %s", copyfile=False, @@ -34,14 +84,28 @@ def test_MRICoreg_inputs(): mandatory=True, xor=["subject_id"], ), - reference_mask=dict(argstr="--ref-mask %s", position=2,), - saturation_threshold=dict(argstr="--sat %g",), - sep=dict(argstr="--sep %s...",), + reference_mask=dict( + argstr="--ref-mask %s", + position=2, + ), + saturation_threshold=dict( + argstr="--sat %g", + ), + sep=dict( + argstr="--sep %s...", + ), source_file=dict( - argstr="--mov %s", copyfile=False, extensions=None, mandatory=True, + argstr="--mov %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + source_mask=dict( + argstr="--mov-mask", + ), + source_oob=dict( + argstr="--mov-oob", ), - source_mask=dict(argstr="--mov-mask",), - source_oob=dict(argstr="--mov-oob",), subject_id=dict( argstr="--s %s", mandatory=True, @@ -49,7 +113,9 @@ def test_MRICoreg_inputs(): requires=["subjects_dir"], xor=["reference_file"], ), - subjects_dir=dict(argstr="--sd %s",), + subjects_dir=dict( + argstr="--sd %s", + ), ) inputs = MRICoreg.input_spec() @@ -60,9 +126,15 @@ def test_MRICoreg_inputs(): def test_MRICoreg_outputs(): output_map = dict( - out_lta_file=dict(extensions=None,), - out_params_file=dict(extensions=None,), - out_reg_file=dict(extensions=None,), + out_lta_file=dict( + extensions=None, + ), + out_params_file=dict( + extensions=None, + ), + out_reg_file=dict( + extensions=None, + ), ) outputs = MRICoreg.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py index c8a2f7090c..bf359364ba 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py @@ -4,14 +4,38 @@ def test_MRIFill_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - log_file=dict(argstr="-a %s", extensions=None,), - out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), - segmentation=dict(argstr="-segmentation %s", extensions=None,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + log_file=dict( + argstr="-a %s", + extensions=None, + ), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + segmentation=dict( + argstr="-segmentation %s", + extensions=None, + ), subjects_dir=dict(), - transform=dict(argstr="-xform %s", extensions=None,), + transform=dict( + argstr="-xform %s", + extensions=None, + ), ) inputs = MRIFill.input_spec() @@ -21,7 +45,14 @@ def test_MRIFill_inputs(): def test_MRIFill_outputs(): - output_map = dict(log_file=dict(extensions=None,), out_file=dict(extensions=None,),) + output_map = dict( + log_file=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + ) outputs = MRIFill.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py index 25137a53a8..ccb2ab4388 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py @@ -4,12 +4,35 @@ def test_MRIMarchingCubes_inputs(): input_map = dict( - args=dict(argstr="%s",), - connectivity_value=dict(argstr="%d", position=-1, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - label_value=dict(argstr="%d", mandatory=True, position=2,), - out_file=dict(argstr="./%s", extensions=None, genfile=True, position=-2,), + args=dict( + argstr="%s", + ), + connectivity_value=dict( + argstr="%d", + position=-1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + label_value=dict( + argstr="%d", + mandatory=True, + position=2, + ), + out_file=dict( + argstr="./%s", + extensions=None, + genfile=True, + position=-2, + ), subjects_dir=dict(), ) inputs = MRIMarchingCubes.input_spec() @@ -20,7 +43,11 @@ def test_MRIMarchingCubes_inputs(): def test_MRIMarchingCubes_outputs(): - output_map = dict(surface=dict(extensions=None,),) + output_map = dict( + surface=dict( + extensions=None, + ), + ) outputs = MRIMarchingCubes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py index 195472d4ad..e6a239fbd5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py @@ -4,13 +4,37 @@ def test_MRIPretess_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_filled=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), - in_norm=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - keep=dict(argstr="-keep",), - label=dict(argstr="%s", mandatory=True, position=-3, usedefault=True,), - nocorners=dict(argstr="-nocorners",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_filled=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), + in_norm=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + keep=dict( + argstr="-keep", + ), + label=dict( + argstr="%s", + mandatory=True, + position=-3, + usedefault=True, + ), + nocorners=dict( + argstr="-nocorners", + ), out_file=dict( argstr="%s", extensions=None, @@ -20,7 +44,9 @@ def test_MRIPretess_inputs(): position=-1, ), subjects_dir=dict(), - test=dict(argstr="-test",), + test=dict( + argstr="-test", + ), ) inputs = MRIPretess.input_spec() @@ -30,7 +56,11 @@ def test_MRIPretess_inputs(): def test_MRIPretess_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRIPretess.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py index 03d9ccd2e4..845e6c6c3c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py @@ -4,43 +4,84 @@ def test_MRISPreproc_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fsgd_file=dict( argstr="--fsgd %s", extensions=None, xor=("subjects", "fsgd_file", "subject_file"), ), - fwhm=dict(argstr="--fwhm %f", xor=["num_iters"],), - fwhm_source=dict(argstr="--fwhm-src %f", xor=["num_iters_source"],), - hemi=dict(argstr="--hemi %s", mandatory=True,), - num_iters=dict(argstr="--niters %d", xor=["fwhm"],), - num_iters_source=dict(argstr="--niterssrc %d", xor=["fwhm_source"],), - out_file=dict(argstr="--out %s", extensions=None, genfile=True,), - proj_frac=dict(argstr="--projfrac %s",), - smooth_cortex_only=dict(argstr="--smooth-cortex-only",), - source_format=dict(argstr="--srcfmt %s",), + fwhm=dict( + argstr="--fwhm %f", + xor=["num_iters"], + ), + fwhm_source=dict( + argstr="--fwhm-src %f", + xor=["num_iters_source"], + ), + hemi=dict( + argstr="--hemi %s", + mandatory=True, + ), + num_iters=dict( + argstr="--niters %d", + xor=["fwhm"], + ), + num_iters_source=dict( + argstr="--niterssrc %d", + xor=["fwhm_source"], + ), + out_file=dict( + argstr="--out %s", + extensions=None, + genfile=True, + ), + proj_frac=dict( + argstr="--projfrac %s", + ), + smooth_cortex_only=dict( + argstr="--smooth-cortex-only", + ), + source_format=dict( + argstr="--srcfmt %s", + ), subject_file=dict( argstr="--f %s", extensions=None, xor=("subjects", "fsgd_file", "subject_file"), ), subjects=dict( - argstr="--s %s...", xor=("subjects", "fsgd_file", "subject_file"), + argstr="--s %s...", + xor=("subjects", "fsgd_file", "subject_file"), ), subjects_dir=dict(), surf_area=dict( - argstr="--area %s", xor=("surf_measure", "surf_measure_file", "surf_area"), + argstr="--area %s", + xor=("surf_measure", "surf_measure_file", "surf_area"), + ), + surf_dir=dict( + argstr="--surfdir %s", ), - surf_dir=dict(argstr="--surfdir %s",), surf_measure=dict( - argstr="--meas %s", xor=("surf_measure", "surf_measure_file", "surf_area"), + argstr="--meas %s", + xor=("surf_measure", "surf_measure_file", "surf_area"), ), surf_measure_file=dict( - argstr="--is %s...", xor=("surf_measure", "surf_measure_file", "surf_area"), + argstr="--is %s...", + xor=("surf_measure", "surf_measure_file", "surf_area"), + ), + target=dict( + argstr="--target %s", + mandatory=True, + ), + vol_measure_file=dict( + argstr="--iv %s %s...", ), - target=dict(argstr="--target %s", mandatory=True,), - vol_measure_file=dict(argstr="--iv %s %s...",), ) inputs = MRISPreproc.input_spec() @@ -50,7 +91,11 @@ def test_MRISPreproc_inputs(): def test_MRISPreproc_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRISPreproc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py index 5a7a711263..5bdb0614e5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py @@ -4,25 +4,61 @@ def test_MRISPreprocReconAll_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), copy_inputs=dict(), - environ=dict(nohash=True, usedefault=True,), + environ=dict( + nohash=True, + usedefault=True, + ), fsgd_file=dict( argstr="--fsgd %s", extensions=None, xor=("subjects", "fsgd_file", "subject_file"), ), - fwhm=dict(argstr="--fwhm %f", xor=["num_iters"],), - fwhm_source=dict(argstr="--fwhm-src %f", xor=["num_iters_source"],), - hemi=dict(argstr="--hemi %s", mandatory=True,), - lh_surfreg_target=dict(extensions=None, requires=["surfreg_files"],), - num_iters=dict(argstr="--niters %d", xor=["fwhm"],), - num_iters_source=dict(argstr="--niterssrc %d", xor=["fwhm_source"],), - out_file=dict(argstr="--out %s", extensions=None, genfile=True,), - proj_frac=dict(argstr="--projfrac %s",), - rh_surfreg_target=dict(extensions=None, requires=["surfreg_files"],), - smooth_cortex_only=dict(argstr="--smooth-cortex-only",), - source_format=dict(argstr="--srcfmt %s",), + fwhm=dict( + argstr="--fwhm %f", + xor=["num_iters"], + ), + fwhm_source=dict( + argstr="--fwhm-src %f", + xor=["num_iters_source"], + ), + hemi=dict( + argstr="--hemi %s", + mandatory=True, + ), + lh_surfreg_target=dict( + extensions=None, + requires=["surfreg_files"], + ), + num_iters=dict( + argstr="--niters %d", + xor=["fwhm"], + ), + num_iters_source=dict( + argstr="--niterssrc %d", + xor=["fwhm_source"], + ), + out_file=dict( + argstr="--out %s", + extensions=None, + genfile=True, + ), + proj_frac=dict( + argstr="--projfrac %s", + ), + rh_surfreg_target=dict( + extensions=None, + requires=["surfreg_files"], + ), + smooth_cortex_only=dict( + argstr="--smooth-cortex-only", + ), + source_format=dict( + argstr="--srcfmt %s", + ), subject_file=dict( argstr="--f %s", extensions=None, @@ -34,15 +70,20 @@ def test_MRISPreprocReconAll_inputs(): xor=("subjects", "fsgd_file", "subject_file", "subject_id"), ), subjects=dict( - argstr="--s %s...", xor=("subjects", "fsgd_file", "subject_file"), + argstr="--s %s...", + xor=("subjects", "fsgd_file", "subject_file"), ), subjects_dir=dict(), surf_area=dict( - argstr="--area %s", xor=("surf_measure", "surf_measure_file", "surf_area"), + argstr="--area %s", + xor=("surf_measure", "surf_measure_file", "surf_area"), + ), + surf_dir=dict( + argstr="--surfdir %s", ), - surf_dir=dict(argstr="--surfdir %s",), surf_measure=dict( - argstr="--meas %s", xor=("surf_measure", "surf_measure_file", "surf_area"), + argstr="--meas %s", + xor=("surf_measure", "surf_measure_file", "surf_area"), ), surf_measure_file=dict( argstr="--meas %s", @@ -50,10 +91,16 @@ def test_MRISPreprocReconAll_inputs(): xor=("surf_measure", "surf_measure_file", "surf_area"), ), surfreg_files=dict( - argstr="--surfreg %s", requires=["lh_surfreg_target", "rh_surfreg_target"], + argstr="--surfreg %s", + requires=["lh_surfreg_target", "rh_surfreg_target"], + ), + target=dict( + argstr="--target %s", + mandatory=True, + ), + vol_measure_file=dict( + argstr="--iv %s %s...", ), - target=dict(argstr="--target %s", mandatory=True,), - vol_measure_file=dict(argstr="--iv %s %s...",), ) inputs = MRISPreprocReconAll.input_spec() @@ -63,7 +110,11 @@ def test_MRISPreprocReconAll_inputs(): def test_MRISPreprocReconAll_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRISPreprocReconAll.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py index 8bba694bf7..8aa7210d0e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py @@ -4,14 +4,37 @@ def test_MRITessellate_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - label_value=dict(argstr="%d", mandatory=True, position=-2,), - out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + label_value=dict( + argstr="%d", + mandatory=True, + position=-2, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), subjects_dir=dict(), - tesselate_all_voxels=dict(argstr="-a",), - use_real_RAS_coordinates=dict(argstr="-n",), + tesselate_all_voxels=dict( + argstr="-a", + ), + use_real_RAS_coordinates=dict( + argstr="-n", + ), ) inputs = MRITessellate.input_spec() @@ -21,7 +44,11 @@ def test_MRITessellate_inputs(): def test_MRITessellate_outputs(): - output_map = dict(surface=dict(extensions=None,),) + output_map = dict( + surface=dict( + extensions=None, + ), + ) outputs = MRITessellate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py index 560f7e4fce..e37cf0723a 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py @@ -4,15 +4,43 @@ def test_MRIsCALabel_inputs(): input_map = dict( - args=dict(argstr="%s",), - aseg=dict(argstr="-aseg %s", extensions=None,), - canonsurf=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - classifier=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + aseg=dict( + argstr="-aseg %s", + extensions=None, + ), + canonsurf=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + classifier=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), copy_inputs=dict(), - curv=dict(extensions=None, mandatory=True,), - environ=dict(nohash=True, usedefault=True,), - hemisphere=dict(argstr="%s", mandatory=True, position=-4,), - label=dict(argstr="-l %s", extensions=None,), + curv=dict( + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr="%s", + mandatory=True, + position=-4, + ), + label=dict( + argstr="-l %s", + extensions=None, + ), num_threads=dict(), out_file=dict( argstr="%s", @@ -23,11 +51,24 @@ def test_MRIsCALabel_inputs(): name_template="%s.aparc.annot", position=-1, ), - seed=dict(argstr="-seed %d",), - smoothwm=dict(extensions=None, mandatory=True,), - subject_id=dict(argstr="%s", mandatory=True, position=-5, usedefault=True,), + seed=dict( + argstr="-seed %d", + ), + smoothwm=dict( + extensions=None, + mandatory=True, + ), + subject_id=dict( + argstr="%s", + mandatory=True, + position=-5, + usedefault=True, + ), subjects_dir=dict(), - sulc=dict(extensions=None, mandatory=True,), + sulc=dict( + extensions=None, + mandatory=True, + ), ) inputs = MRIsCALabel.input_spec() @@ -37,7 +78,11 @@ def test_MRIsCALabel_inputs(): def test_MRIsCALabel_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRIsCALabel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py index 521c1d5d6c..1ef9c95c46 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py @@ -4,16 +4,45 @@ def test_MRIsCalc_inputs(): input_map = dict( - action=dict(argstr="%s", mandatory=True, position=-2,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file1=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + action=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file1=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), in_file2=dict( - argstr="%s", extensions=None, position=-1, xor=["in_float", "in_int"], + argstr="%s", + extensions=None, + position=-1, + xor=["in_float", "in_int"], + ), + in_float=dict( + argstr="%f", + position=-1, + xor=["in_file2", "in_int"], + ), + in_int=dict( + argstr="%d", + position=-1, + xor=["in_file2", "in_float"], + ), + out_file=dict( + argstr="-o %s", + extensions=None, + mandatory=True, ), - in_float=dict(argstr="%f", position=-1, xor=["in_file2", "in_int"],), - in_int=dict(argstr="%d", position=-1, xor=["in_file2", "in_float"],), - out_file=dict(argstr="-o %s", extensions=None, mandatory=True,), subjects_dir=dict(), ) inputs = MRIsCalc.input_spec() @@ -24,7 +53,11 @@ def test_MRIsCalc_inputs(): def test_MRIsCalc_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRIsCalc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py index 56fd270efc..01aef41a01 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py @@ -4,11 +4,24 @@ def test_MRIsCombine_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_files=dict(argstr="--combinesurfs %s", mandatory=True, position=1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="--combinesurfs %s", + mandatory=True, + position=1, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, mandatory=True, position=-1, + argstr="%s", + extensions=None, + genfile=True, + mandatory=True, + position=-1, ), subjects_dir=dict(), ) @@ -20,7 +33,11 @@ def test_MRIsCombine_inputs(): def test_MRIsCombine_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRIsCombine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py index 6972ae4f33..daf4462ff8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py @@ -4,17 +4,48 @@ def test_MRIsConvert_inputs(): input_map = dict( - annot_file=dict(argstr="--annot %s", extensions=None,), - args=dict(argstr="%s",), - dataarray_num=dict(argstr="--da_num %d",), - environ=dict(nohash=True, usedefault=True,), - functional_file=dict(argstr="-f %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - label_file=dict(argstr="--label %s", extensions=None,), - labelstats_outfile=dict(argstr="--labelstats %s", extensions=None,), - normal=dict(argstr="-n",), - origname=dict(argstr="-o %s",), - out_datatype=dict(mandatory=True, xor=["out_file"],), + annot_file=dict( + argstr="--annot %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + dataarray_num=dict( + argstr="--da_num %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + functional_file=dict( + argstr="-f %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + label_file=dict( + argstr="--label %s", + extensions=None, + ), + labelstats_outfile=dict( + argstr="--labelstats %s", + extensions=None, + ), + normal=dict( + argstr="-n", + ), + origname=dict( + argstr="-o %s", + ), + out_datatype=dict( + mandatory=True, + xor=["out_file"], + ), out_file=dict( argstr="%s", extensions=None, @@ -23,17 +54,39 @@ def test_MRIsConvert_inputs(): position=-1, xor=["out_datatype"], ), - parcstats_file=dict(argstr="--parcstats %s", extensions=None,), - patch=dict(argstr="-p",), - rescale=dict(argstr="-r",), - scalarcurv_file=dict(argstr="-c %s", extensions=None,), - scale=dict(argstr="-s %.3f",), + parcstats_file=dict( + argstr="--parcstats %s", + extensions=None, + ), + patch=dict( + argstr="-p", + ), + rescale=dict( + argstr="-r", + ), + scalarcurv_file=dict( + argstr="-c %s", + extensions=None, + ), + scale=dict( + argstr="-s %.3f", + ), subjects_dir=dict(), - talairachxfm_subjid=dict(argstr="-t %s",), - to_scanner=dict(argstr="--to-scanner",), - to_tkr=dict(argstr="--to-tkr",), - vertex=dict(argstr="-v",), - xyz_ascii=dict(argstr="-a",), + talairachxfm_subjid=dict( + argstr="-t %s", + ), + to_scanner=dict( + argstr="--to-scanner", + ), + to_tkr=dict( + argstr="--to-tkr", + ), + vertex=dict( + argstr="-v", + ), + xyz_ascii=dict( + argstr="-a", + ), ) inputs = MRIsConvert.input_spec() @@ -43,7 +96,11 @@ def test_MRIsConvert_inputs(): def test_MRIsConvert_outputs(): - output_map = dict(converted=dict(extensions=None,),) + output_map = dict( + converted=dict( + extensions=None, + ), + ) outputs = MRIsConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py index b2d97f0d48..05e34a29b5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py @@ -4,23 +4,61 @@ def test_MRIsExpand_inputs(): input_map = dict( - args=dict(argstr="%s",), - distance=dict(argstr="%g", mandatory=True, position=-2,), - dt=dict(argstr="-T %g",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + distance=dict( + argstr="%g", + mandatory=True, + position=-2, + ), + dt=dict( + argstr="-T %g", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-3, - ), - nsurfaces=dict(argstr="-N %d",), - out_name=dict(argstr="%s", position=-1, usedefault=True,), - pial=dict(argstr="-pial %s", copyfile=False,), - smooth_averages=dict(argstr="-A %d",), - sphere=dict(copyfile=False, usedefault=True,), - spring=dict(argstr="-S %g",), + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-3, + ), + nsurfaces=dict( + argstr="-N %d", + ), + out_name=dict( + argstr="%s", + position=-1, + usedefault=True, + ), + pial=dict( + argstr="-pial %s", + copyfile=False, + ), + smooth_averages=dict( + argstr="-A %d", + ), + sphere=dict( + copyfile=False, + usedefault=True, + ), + spring=dict( + argstr="-S %g", + ), subjects_dir=dict(), - thickness=dict(argstr="-thickness",), - thickness_name=dict(argstr="-thickness_name %s", copyfile=False,), - write_iterations=dict(argstr="-W %d",), + thickness=dict( + argstr="-thickness", + ), + thickness_name=dict( + argstr="-thickness_name %s", + copyfile=False, + ), + write_iterations=dict( + argstr="-W %d", + ), ) inputs = MRIsExpand.input_spec() @@ -30,7 +68,11 @@ def test_MRIsExpand_inputs(): def test_MRIsExpand_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRIsExpand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py index aead890eff..9cc45189a0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py @@ -4,12 +4,24 @@ def test_MRIsInflate_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2, + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-2, + ), + no_save_sulc=dict( + argstr="-no-save-sulc", + xor=["out_sulc"], ), - no_save_sulc=dict(argstr="-no-save-sulc", xor=["out_sulc"],), out_file=dict( argstr="%s", extensions=None, @@ -19,7 +31,10 @@ def test_MRIsInflate_inputs(): name_template="%s.inflated", position=-1, ), - out_sulc=dict(extensions=None, xor=["no_save_sulc"],), + out_sulc=dict( + extensions=None, + xor=["no_save_sulc"], + ), subjects_dir=dict(), ) inputs = MRIsInflate.input_spec() @@ -30,7 +45,14 @@ def test_MRIsInflate_inputs(): def test_MRIsInflate_outputs(): - output_map = dict(out_file=dict(extensions=None,), out_sulc=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + out_sulc=dict( + extensions=None, + ), + ) outputs = MRIsInflate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py index 47575cf851..093dd3d9b8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py @@ -4,18 +4,52 @@ def test_MS_LDA_inputs(): input_map = dict( - args=dict(argstr="%s",), - conform=dict(argstr="-conform",), - environ=dict(nohash=True, usedefault=True,), - images=dict(argstr="%s", copyfile=False, mandatory=True, position=-1,), - label_file=dict(argstr="-label %s", extensions=None,), - lda_labels=dict(argstr="-lda %s", mandatory=True, sep=" ",), - mask_file=dict(argstr="-mask %s", extensions=None,), - shift=dict(argstr="-shift %d",), + args=dict( + argstr="%s", + ), + conform=dict( + argstr="-conform", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + images=dict( + argstr="%s", + copyfile=False, + mandatory=True, + position=-1, + ), + label_file=dict( + argstr="-label %s", + extensions=None, + ), + lda_labels=dict( + argstr="-lda %s", + mandatory=True, + sep=" ", + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + shift=dict( + argstr="-shift %d", + ), subjects_dir=dict(), - use_weights=dict(argstr="-W",), - vol_synth_file=dict(argstr="-synth %s", extensions=None, mandatory=True,), - weight_file=dict(argstr="-weight %s", extensions=None, mandatory=True,), + use_weights=dict( + argstr="-W", + ), + vol_synth_file=dict( + argstr="-synth %s", + extensions=None, + mandatory=True, + ), + weight_file=dict( + argstr="-weight %s", + extensions=None, + mandatory=True, + ), ) inputs = MS_LDA.input_spec() @@ -26,7 +60,12 @@ def test_MS_LDA_inputs(): def test_MS_LDA_outputs(): output_map = dict( - vol_synth_file=dict(extensions=None,), weight_file=dict(extensions=None,), + vol_synth_file=dict( + extensions=None, + ), + weight_file=dict( + extensions=None, + ), ) outputs = MS_LDA.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py index 3f7b6ac9ab..e3778911e6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py @@ -4,11 +4,24 @@ def test_MakeAverageSubject_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - out_name=dict(argstr="--out %s", extensions=None, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + out_name=dict( + argstr="--out %s", + extensions=None, + usedefault=True, + ), subjects_dir=dict(), - subjects_ids=dict(argstr="--subjects %s", mandatory=True, sep=" ",), + subjects_ids=dict( + argstr="--subjects %s", + mandatory=True, + sep=" ", + ), ) inputs = MakeAverageSubject.input_spec() @@ -18,7 +31,9 @@ def test_MakeAverageSubject_inputs(): def test_MakeAverageSubject_outputs(): - output_map = dict(average_subject_name=dict(),) + output_map = dict( + average_subject_name=dict(), + ) outputs = MakeAverageSubject.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py index 219150aef9..06316d071a 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py @@ -4,29 +4,88 @@ def test_MakeSurfaces_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), copy_inputs=dict(), - environ=dict(nohash=True, usedefault=True,), - fix_mtl=dict(argstr="-fix_mtl",), - hemisphere=dict(argstr="%s", mandatory=True, position=-1,), - in_T1=dict(argstr="-T1 %s", extensions=None,), - in_aseg=dict(argstr="-aseg %s", extensions=None,), - in_filled=dict(extensions=None, mandatory=True,), - in_label=dict(extensions=None, xor=["noaparc"],), - in_orig=dict(argstr="-orig %s", extensions=None, mandatory=True,), - in_white=dict(extensions=None,), - in_wm=dict(extensions=None, mandatory=True,), - longitudinal=dict(argstr="-long",), - maximum=dict(argstr="-max %.1f",), - mgz=dict(argstr="-mgz",), - no_white=dict(argstr="-nowhite",), - noaparc=dict(argstr="-noaparc", xor=["in_label"],), - orig_pial=dict(argstr="-orig_pial %s", extensions=None, requires=["in_label"],), - orig_white=dict(argstr="-orig_white %s", extensions=None,), - subject_id=dict(argstr="%s", mandatory=True, position=-2, usedefault=True,), + environ=dict( + nohash=True, + usedefault=True, + ), + fix_mtl=dict( + argstr="-fix_mtl", + ), + hemisphere=dict( + argstr="%s", + mandatory=True, + position=-1, + ), + in_T1=dict( + argstr="-T1 %s", + extensions=None, + ), + in_aseg=dict( + argstr="-aseg %s", + extensions=None, + ), + in_filled=dict( + extensions=None, + mandatory=True, + ), + in_label=dict( + extensions=None, + xor=["noaparc"], + ), + in_orig=dict( + argstr="-orig %s", + extensions=None, + mandatory=True, + ), + in_white=dict( + extensions=None, + ), + in_wm=dict( + extensions=None, + mandatory=True, + ), + longitudinal=dict( + argstr="-long", + ), + maximum=dict( + argstr="-max %.1f", + ), + mgz=dict( + argstr="-mgz", + ), + no_white=dict( + argstr="-nowhite", + ), + noaparc=dict( + argstr="-noaparc", + xor=["in_label"], + ), + orig_pial=dict( + argstr="-orig_pial %s", + extensions=None, + requires=["in_label"], + ), + orig_white=dict( + argstr="-orig_white %s", + extensions=None, + ), + subject_id=dict( + argstr="%s", + mandatory=True, + position=-2, + usedefault=True, + ), subjects_dir=dict(), - white=dict(argstr="-white %s",), - white_only=dict(argstr="-whiteonly",), + white=dict( + argstr="-white %s", + ), + white_only=dict( + argstr="-whiteonly", + ), ) inputs = MakeSurfaces.input_spec() @@ -37,12 +96,24 @@ def test_MakeSurfaces_inputs(): def test_MakeSurfaces_outputs(): output_map = dict( - out_area=dict(extensions=None,), - out_cortex=dict(extensions=None,), - out_curv=dict(extensions=None,), - out_pial=dict(extensions=None,), - out_thickness=dict(extensions=None,), - out_white=dict(extensions=None,), + out_area=dict( + extensions=None, + ), + out_cortex=dict( + extensions=None, + ), + out_curv=dict( + extensions=None, + ), + out_pial=dict( + extensions=None, + ), + out_thickness=dict( + extensions=None, + ), + out_white=dict( + extensions=None, + ), ) outputs = MakeSurfaces.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py index f639141960..271f0bb328 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py @@ -4,11 +4,26 @@ def test_Normalize_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - gradient=dict(argstr="-g %d",), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - mask=dict(argstr="-mask %s", extensions=None,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gradient=dict( + argstr="-g %d", + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), out_file=dict( argstr="%s", extensions=None, @@ -18,9 +33,14 @@ def test_Normalize_inputs(): name_template="%s_norm", position=-1, ), - segmentation=dict(argstr="-aseg %s", extensions=None,), + segmentation=dict( + argstr="-aseg %s", + extensions=None, + ), subjects_dir=dict(), - transform=dict(extensions=None,), + transform=dict( + extensions=None, + ), ) inputs = Normalize.input_spec() @@ -30,7 +50,11 @@ def test_Normalize_inputs(): def test_Normalize_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Normalize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py index da476e1cb3..533c0a17a9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py @@ -4,68 +4,192 @@ def test_OneSampleTTest_inputs(): input_map = dict( - allow_ill_cond=dict(argstr="--illcond",), - allow_repeated_subjects=dict(argstr="--allowsubjrep",), - args=dict(argstr="%s",), - calc_AR1=dict(argstr="--tar1",), - check_opts=dict(argstr="--checkopts",), - compute_log_y=dict(argstr="--logy",), - contrast=dict(argstr="--C %s...",), - cortex=dict(argstr="--cortex", xor=["label_file"],), - debug=dict(argstr="--debug",), + allow_ill_cond=dict( + argstr="--illcond", + ), + allow_repeated_subjects=dict( + argstr="--allowsubjrep", + ), + args=dict( + argstr="%s", + ), + calc_AR1=dict( + argstr="--tar1", + ), + check_opts=dict( + argstr="--checkopts", + ), + compute_log_y=dict( + argstr="--logy", + ), + contrast=dict( + argstr="--C %s...", + ), + cortex=dict( + argstr="--cortex", + xor=["label_file"], + ), + debug=dict( + argstr="--debug", + ), design=dict( - argstr="--X %s", extensions=None, xor=("fsgd", "design", "one_sample"), + argstr="--X %s", + extensions=None, + xor=("fsgd", "design", "one_sample"), + ), + diag=dict( + argstr="--diag %d", + ), + diag_cluster=dict( + argstr="--diag-cluster", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_fx_dof=dict( + argstr="--ffxdof %d", + xor=["fixed_fx_dof_file"], ), - diag=dict(argstr="--diag %d",), - diag_cluster=dict(argstr="--diag-cluster",), - environ=dict(nohash=True, usedefault=True,), - fixed_fx_dof=dict(argstr="--ffxdof %d", xor=["fixed_fx_dof_file"],), fixed_fx_dof_file=dict( - argstr="--ffxdofdat %d", extensions=None, xor=["fixed_fx_dof"], + argstr="--ffxdofdat %d", + extensions=None, + xor=["fixed_fx_dof"], + ), + fixed_fx_var=dict( + argstr="--yffxvar %s", + extensions=None, + ), + force_perm=dict( + argstr="--perm-force", + ), + fsgd=dict( + argstr="--fsgd %s %s", + xor=("fsgd", "design", "one_sample"), + ), + fwhm=dict( + argstr="--fwhm %f", + ), + glm_dir=dict( + argstr="--glmdir %s", + genfile=True, ), - fixed_fx_var=dict(argstr="--yffxvar %s", extensions=None,), - force_perm=dict(argstr="--perm-force",), - fsgd=dict(argstr="--fsgd %s %s", xor=("fsgd", "design", "one_sample"),), - fwhm=dict(argstr="--fwhm %f",), - glm_dir=dict(argstr="--glmdir %s", genfile=True,), hemi=dict(), - in_file=dict(argstr="--y %s", copyfile=False, extensions=None, mandatory=True,), - invert_mask=dict(argstr="--mask-inv",), - label_file=dict(argstr="--label %s", extensions=None, xor=["cortex"],), - mask_file=dict(argstr="--mask %s", extensions=None,), - no_contrast_ok=dict(argstr="--no-contrasts-ok",), - no_est_fwhm=dict(argstr="--no-est-fwhm",), - no_mask_smooth=dict(argstr="--no-mask-smooth",), - no_prune=dict(argstr="--no-prune", xor=["prunethresh"],), + in_file=dict( + argstr="--y %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + invert_mask=dict( + argstr="--mask-inv", + ), + label_file=dict( + argstr="--label %s", + extensions=None, + xor=["cortex"], + ), + mask_file=dict( + argstr="--mask %s", + extensions=None, + ), + no_contrast_ok=dict( + argstr="--no-contrasts-ok", + ), + no_est_fwhm=dict( + argstr="--no-est-fwhm", + ), + no_mask_smooth=dict( + argstr="--no-mask-smooth", + ), + no_prune=dict( + argstr="--no-prune", + xor=["prunethresh"], + ), one_sample=dict( - argstr="--osgm", xor=("one_sample", "fsgd", "design", "contrast"), - ), - pca=dict(argstr="--pca",), - per_voxel_reg=dict(argstr="--pvr %s...",), - profile=dict(argstr="--profile %d",), - prune=dict(argstr="--prune",), - prune_thresh=dict(argstr="--prune_thr %f", xor=["noprune"],), - resynth_test=dict(argstr="--resynthtest %d",), - save_cond=dict(argstr="--save-cond",), - save_estimate=dict(argstr="--yhat-save",), - save_res_corr_mtx=dict(argstr="--eres-scm",), - save_residual=dict(argstr="--eres-save",), - seed=dict(argstr="--seed %d",), - self_reg=dict(argstr="--selfreg %d %d %d",), - sim_done_file=dict(argstr="--sim-done %s", extensions=None,), - sim_sign=dict(argstr="--sim-sign %s",), - simulation=dict(argstr="--sim %s %d %f %s",), + argstr="--osgm", + xor=("one_sample", "fsgd", "design", "contrast"), + ), + pca=dict( + argstr="--pca", + ), + per_voxel_reg=dict( + argstr="--pvr %s...", + ), + profile=dict( + argstr="--profile %d", + ), + prune=dict( + argstr="--prune", + ), + prune_thresh=dict( + argstr="--prune_thr %f", + xor=["noprune"], + ), + resynth_test=dict( + argstr="--resynthtest %d", + ), + save_cond=dict( + argstr="--save-cond", + ), + save_estimate=dict( + argstr="--yhat-save", + ), + save_res_corr_mtx=dict( + argstr="--eres-scm", + ), + save_residual=dict( + argstr="--eres-save", + ), + seed=dict( + argstr="--seed %d", + ), + self_reg=dict( + argstr="--selfreg %d %d %d", + ), + sim_done_file=dict( + argstr="--sim-done %s", + extensions=None, + ), + sim_sign=dict( + argstr="--sim-sign %s", + ), + simulation=dict( + argstr="--sim %s %d %f %s", + ), subject_id=dict(), subjects_dir=dict(), - surf=dict(argstr="--surf %s %s %s", requires=["subject_id", "hemi"],), - surf_geo=dict(usedefault=True,), - synth=dict(argstr="--synth",), - uniform=dict(argstr="--uniform %f %f",), - var_fwhm=dict(argstr="--var-fwhm %f",), - vox_dump=dict(argstr="--voxdump %d %d %d",), - weight_file=dict(extensions=None, xor=["weighted_ls"],), - weight_inv=dict(argstr="--w-inv", xor=["weighted_ls"],), - weight_sqrt=dict(argstr="--w-sqrt", xor=["weighted_ls"],), + surf=dict( + argstr="--surf %s %s %s", + requires=["subject_id", "hemi"], + ), + surf_geo=dict( + usedefault=True, + ), + synth=dict( + argstr="--synth", + ), + uniform=dict( + argstr="--uniform %f %f", + ), + var_fwhm=dict( + argstr="--var-fwhm %f", + ), + vox_dump=dict( + argstr="--voxdump %d %d %d", + ), + weight_file=dict( + extensions=None, + xor=["weighted_ls"], + ), + weight_inv=dict( + argstr="--w-inv", + xor=["weighted_ls"], + ), + weight_sqrt=dict( + argstr="--w-sqrt", + xor=["weighted_ls"], + ), weighted_ls=dict( argstr="--wls %s", extensions=None, @@ -81,23 +205,47 @@ def test_OneSampleTTest_inputs(): def test_OneSampleTTest_outputs(): output_map = dict( - beta_file=dict(extensions=None,), - dof_file=dict(extensions=None,), - error_file=dict(extensions=None,), - error_stddev_file=dict(extensions=None,), - error_var_file=dict(extensions=None,), - estimate_file=dict(extensions=None,), - frame_eigenvectors=dict(extensions=None,), + beta_file=dict( + extensions=None, + ), + dof_file=dict( + extensions=None, + ), + error_file=dict( + extensions=None, + ), + error_stddev_file=dict( + extensions=None, + ), + error_var_file=dict( + extensions=None, + ), + estimate_file=dict( + extensions=None, + ), + frame_eigenvectors=dict( + extensions=None, + ), ftest_file=dict(), - fwhm_file=dict(extensions=None,), + fwhm_file=dict( + extensions=None, + ), gamma_file=dict(), gamma_var_file=dict(), glm_dir=dict(), - mask_file=dict(extensions=None,), + mask_file=dict( + extensions=None, + ), sig_file=dict(), - singular_values=dict(extensions=None,), - spatial_eigenvectors=dict(extensions=None,), - svd_stats_file=dict(extensions=None,), + singular_values=dict( + extensions=None, + ), + spatial_eigenvectors=dict( + extensions=None, + ), + svd_stats_file=dict( + extensions=None, + ), ) outputs = OneSampleTTest.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Paint.py b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py index 0a93abec78..d95c4c9fa3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Paint.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py @@ -4,10 +4,22 @@ def test_Paint_inputs(): input_map = dict( - args=dict(argstr="%s",), - averages=dict(argstr="-a %d",), - environ=dict(nohash=True, usedefault=True,), - in_surf=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + averages=dict( + argstr="-a %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_surf=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), out_file=dict( argstr="%s", extensions=None, @@ -18,7 +30,12 @@ def test_Paint_inputs(): position=-1, ), subjects_dir=dict(), - template=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + template=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), template_param=dict(), ) inputs = Paint.input_spec() @@ -29,7 +46,11 @@ def test_Paint_inputs(): def test_Paint_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Paint.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py index cf42abe8b6..e1632020b5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py @@ -4,38 +4,109 @@ def test_ParcellationStats_inputs(): input_map = dict( - args=dict(argstr="%s",), - aseg=dict(extensions=None, mandatory=True,), - brainmask=dict(extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + aseg=dict( + extensions=None, + mandatory=True, + ), + brainmask=dict( + extensions=None, + mandatory=True, + ), copy_inputs=dict(), - cortex_label=dict(extensions=None,), - environ=dict(nohash=True, usedefault=True,), - hemisphere=dict(argstr="%s", mandatory=True, position=-2,), - in_annotation=dict(argstr="-a %s", extensions=None, xor=["in_label"],), - in_cortex=dict(argstr="-cortex %s", extensions=None,), + cortex_label=dict( + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + in_annotation=dict( + argstr="-a %s", + extensions=None, + xor=["in_label"], + ), + in_cortex=dict( + argstr="-cortex %s", + extensions=None, + ), in_label=dict( - argstr="-l %s", extensions=None, xor=["in_annotatoin", "out_color"], + argstr="-l %s", + extensions=None, + xor=["in_annotatoin", "out_color"], + ), + lh_pial=dict( + extensions=None, + mandatory=True, + ), + lh_white=dict( + extensions=None, + mandatory=True, + ), + mgz=dict( + argstr="-mgz", ), - lh_pial=dict(extensions=None, mandatory=True,), - lh_white=dict(extensions=None, mandatory=True,), - mgz=dict(argstr="-mgz",), out_color=dict( - argstr="-c %s", extensions=None, genfile=True, xor=["in_label"], + argstr="-c %s", + extensions=None, + genfile=True, + xor=["in_label"], ), out_table=dict( - argstr="-f %s", extensions=None, genfile=True, requires=["tabular_output"], + argstr="-f %s", + extensions=None, + genfile=True, + requires=["tabular_output"], + ), + rh_pial=dict( + extensions=None, + mandatory=True, + ), + rh_white=dict( + extensions=None, + mandatory=True, + ), + ribbon=dict( + extensions=None, + mandatory=True, + ), + subject_id=dict( + argstr="%s", + mandatory=True, + position=-3, + usedefault=True, ), - rh_pial=dict(extensions=None, mandatory=True,), - rh_white=dict(extensions=None, mandatory=True,), - ribbon=dict(extensions=None, mandatory=True,), - subject_id=dict(argstr="%s", mandatory=True, position=-3, usedefault=True,), subjects_dir=dict(), - surface=dict(argstr="%s", position=-1,), - tabular_output=dict(argstr="-b",), - th3=dict(argstr="-th3", requires=["cortex_label"],), - thickness=dict(extensions=None, mandatory=True,), - transform=dict(extensions=None, mandatory=True,), - wm=dict(extensions=None, mandatory=True,), + surface=dict( + argstr="%s", + position=-1, + ), + tabular_output=dict( + argstr="-b", + ), + th3=dict( + argstr="-th3", + requires=["cortex_label"], + ), + thickness=dict( + extensions=None, + mandatory=True, + ), + transform=dict( + extensions=None, + mandatory=True, + ), + wm=dict( + extensions=None, + mandatory=True, + ), ) inputs = ParcellationStats.input_spec() @@ -46,7 +117,12 @@ def test_ParcellationStats_inputs(): def test_ParcellationStats_outputs(): output_map = dict( - out_color=dict(extensions=None,), out_table=dict(extensions=None,), + out_color=dict( + extensions=None, + ), + out_table=dict( + extensions=None, + ), ) outputs = ParcellationStats.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py index 243e769266..3168ac64ec 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py @@ -4,13 +4,29 @@ def test_ParseDICOMDir_inputs(): input_map = dict( - args=dict(argstr="%s",), - dicom_dir=dict(argstr="--d %s", mandatory=True,), - dicom_info_file=dict(argstr="--o %s", extensions=None, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - sortbyrun=dict(argstr="--sortbyrun",), + args=dict( + argstr="%s", + ), + dicom_dir=dict( + argstr="--d %s", + mandatory=True, + ), + dicom_info_file=dict( + argstr="--o %s", + extensions=None, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + sortbyrun=dict( + argstr="--sortbyrun", + ), subjects_dir=dict(), - summarize=dict(argstr="--summarize",), + summarize=dict( + argstr="--summarize", + ), ) inputs = ParseDICOMDir.input_spec() @@ -20,7 +36,11 @@ def test_ParseDICOMDir_inputs(): def test_ParseDICOMDir_outputs(): - output_map = dict(dicom_info_file=dict(extensions=None,),) + output_map = dict( + dicom_info_file=dict( + extensions=None, + ), + ) outputs = ParseDICOMDir.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py index 29a9f0006b..aa270f30b3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py @@ -4,58 +4,168 @@ def test_ReconAll_inputs(): input_map = dict( - FLAIR_file=dict(argstr="-FLAIR %s", extensions=None, min_ver="5.3.0",), - T1_files=dict(argstr="-i %s...",), - T2_file=dict(argstr="-T2 %s", extensions=None, min_ver="5.3.0",), - args=dict(argstr="%s",), - big_ventricles=dict(argstr="-bigventricles",), - brainstem=dict(argstr="-brainstem-structures",), - directive=dict(argstr="-%s", position=0, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - expert=dict(argstr="-expert %s", extensions=None,), - flags=dict(argstr="%s",), - hemi=dict(argstr="-hemi %s",), + FLAIR_file=dict( + argstr="-FLAIR %s", + extensions=None, + min_ver="5.3.0", + ), + T1_files=dict( + argstr="-i %s...", + ), + T2_file=dict( + argstr="-T2 %s", + extensions=None, + min_ver="5.3.0", + ), + args=dict( + argstr="%s", + ), + big_ventricles=dict( + argstr="-bigventricles", + ), + brainstem=dict( + argstr="-brainstem-structures", + ), + directive=dict( + argstr="-%s", + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expert=dict( + argstr="-expert %s", + extensions=None, + ), + flags=dict( + argstr="%s", + ), + hemi=dict( + argstr="-hemi %s", + ), hippocampal_subfields_T1=dict( - argstr="-hippocampal-subfields-T1", min_ver="6.0.0", + argstr="-hippocampal-subfields-T1", + min_ver="6.0.0", ), hippocampal_subfields_T2=dict( - argstr="-hippocampal-subfields-T2 %s %s", min_ver="6.0.0", - ), - hires=dict(argstr="-hires", min_ver="6.0.0",), - mprage=dict(argstr="-mprage",), - mri_aparc2aseg=dict(xor=["expert"],), - mri_ca_label=dict(xor=["expert"],), - mri_ca_normalize=dict(xor=["expert"],), - mri_ca_register=dict(xor=["expert"],), - mri_edit_wm_with_aseg=dict(xor=["expert"],), - mri_em_register=dict(xor=["expert"],), - mri_fill=dict(xor=["expert"],), - mri_mask=dict(xor=["expert"],), - mri_normalize=dict(xor=["expert"],), - mri_pretess=dict(xor=["expert"],), - mri_remove_neck=dict(xor=["expert"],), - mri_segment=dict(xor=["expert"],), - mri_segstats=dict(xor=["expert"],), - mri_tessellate=dict(xor=["expert"],), - mri_watershed=dict(xor=["expert"],), - mris_anatomical_stats=dict(xor=["expert"],), - mris_ca_label=dict(xor=["expert"],), - mris_fix_topology=dict(xor=["expert"],), - mris_inflate=dict(xor=["expert"],), - mris_make_surfaces=dict(xor=["expert"],), - mris_register=dict(xor=["expert"],), - mris_smooth=dict(xor=["expert"],), - mris_sphere=dict(xor=["expert"],), - mris_surf2vol=dict(xor=["expert"],), - mrisp_paint=dict(xor=["expert"],), - openmp=dict(argstr="-openmp %d",), - parallel=dict(argstr="-parallel",), - subject_id=dict(argstr="-subjid %s", usedefault=True,), - subjects_dir=dict(argstr="-sd %s", genfile=True, hash_files=False,), - talairach=dict(xor=["expert"],), - use_FLAIR=dict(argstr="-FLAIRpial", min_ver="5.3.0", xor=["use_T2"],), - use_T2=dict(argstr="-T2pial", min_ver="5.3.0", xor=["use_FLAIR"],), - xopts=dict(argstr="-xopts-%s",), + argstr="-hippocampal-subfields-T2 %s %s", + min_ver="6.0.0", + ), + hires=dict( + argstr="-hires", + min_ver="6.0.0", + ), + mprage=dict( + argstr="-mprage", + ), + mri_aparc2aseg=dict( + xor=["expert"], + ), + mri_ca_label=dict( + xor=["expert"], + ), + mri_ca_normalize=dict( + xor=["expert"], + ), + mri_ca_register=dict( + xor=["expert"], + ), + mri_edit_wm_with_aseg=dict( + xor=["expert"], + ), + mri_em_register=dict( + xor=["expert"], + ), + mri_fill=dict( + xor=["expert"], + ), + mri_mask=dict( + xor=["expert"], + ), + mri_normalize=dict( + xor=["expert"], + ), + mri_pretess=dict( + xor=["expert"], + ), + mri_remove_neck=dict( + xor=["expert"], + ), + mri_segment=dict( + xor=["expert"], + ), + mri_segstats=dict( + xor=["expert"], + ), + mri_tessellate=dict( + xor=["expert"], + ), + mri_watershed=dict( + xor=["expert"], + ), + mris_anatomical_stats=dict( + xor=["expert"], + ), + mris_ca_label=dict( + xor=["expert"], + ), + mris_fix_topology=dict( + xor=["expert"], + ), + mris_inflate=dict( + xor=["expert"], + ), + mris_make_surfaces=dict( + xor=["expert"], + ), + mris_register=dict( + xor=["expert"], + ), + mris_smooth=dict( + xor=["expert"], + ), + mris_sphere=dict( + xor=["expert"], + ), + mris_surf2vol=dict( + xor=["expert"], + ), + mrisp_paint=dict( + xor=["expert"], + ), + openmp=dict( + argstr="-openmp %d", + ), + parallel=dict( + argstr="-parallel", + ), + subject_id=dict( + argstr="-subjid %s", + usedefault=True, + ), + subjects_dir=dict( + argstr="-sd %s", + genfile=True, + hash_files=False, + ), + talairach=dict( + xor=["expert"], + ), + use_FLAIR=dict( + argstr="-FLAIRpial", + min_ver="5.3.0", + xor=["use_T2"], + ), + use_T2=dict( + argstr="-T2pial", + min_ver="5.3.0", + xor=["use_FLAIR"], + ), + xopts=dict( + argstr="-xopts-%s", + ), ) inputs = ReconAll.input_spec() @@ -66,45 +176,145 @@ def test_ReconAll_inputs(): def test_ReconAll_outputs(): output_map = dict( - BA_stats=dict(altkey="BA", loc="stats",), - T1=dict(extensions=None, loc="mri",), - annot=dict(altkey="*annot", loc="label",), - aparc_a2009s_stats=dict(altkey="aparc.a2009s", loc="stats",), - aparc_aseg=dict(altkey="aparc*aseg", loc="mri",), - aparc_stats=dict(altkey="aparc", loc="stats",), - area_pial=dict(altkey="area.pial", loc="surf",), - aseg=dict(extensions=None, loc="mri",), - aseg_stats=dict(altkey="aseg", loc="stats",), - avg_curv=dict(loc="surf",), - brain=dict(extensions=None, loc="mri",), - brainmask=dict(extensions=None, loc="mri",), - curv=dict(loc="surf",), - curv_pial=dict(altkey="curv.pial", loc="surf",), - curv_stats=dict(altkey="curv", loc="stats",), - entorhinal_exvivo_stats=dict(altkey="entorhinal_exvivo", loc="stats",), - filled=dict(extensions=None, loc="mri",), - graymid=dict(altkey=["graymid", "midthickness"], loc="surf",), - inflated=dict(loc="surf",), - jacobian_white=dict(loc="surf",), - label=dict(altkey="*label", loc="label",), - norm=dict(extensions=None, loc="mri",), - nu=dict(extensions=None, loc="mri",), - orig=dict(extensions=None, loc="mri",), - pial=dict(loc="surf",), - rawavg=dict(extensions=None, loc="mri",), - ribbon=dict(altkey="*ribbon", loc="mri",), - smoothwm=dict(loc="surf",), - sphere=dict(loc="surf",), - sphere_reg=dict(altkey="sphere.reg", loc="surf",), + BA_stats=dict( + altkey="BA", + loc="stats", + ), + T1=dict( + extensions=None, + loc="mri", + ), + annot=dict( + altkey="*annot", + loc="label", + ), + aparc_a2009s_stats=dict( + altkey="aparc.a2009s", + loc="stats", + ), + aparc_aseg=dict( + altkey="aparc*aseg", + loc="mri", + ), + aparc_stats=dict( + altkey="aparc", + loc="stats", + ), + area_pial=dict( + altkey="area.pial", + loc="surf", + ), + aseg=dict( + extensions=None, + loc="mri", + ), + aseg_stats=dict( + altkey="aseg", + loc="stats", + ), + avg_curv=dict( + loc="surf", + ), + brain=dict( + extensions=None, + loc="mri", + ), + brainmask=dict( + extensions=None, + loc="mri", + ), + curv=dict( + loc="surf", + ), + curv_pial=dict( + altkey="curv.pial", + loc="surf", + ), + curv_stats=dict( + altkey="curv", + loc="stats", + ), + entorhinal_exvivo_stats=dict( + altkey="entorhinal_exvivo", + loc="stats", + ), + filled=dict( + extensions=None, + loc="mri", + ), + graymid=dict( + altkey=["graymid", "midthickness"], + loc="surf", + ), + inflated=dict( + loc="surf", + ), + jacobian_white=dict( + loc="surf", + ), + label=dict( + altkey="*label", + loc="label", + ), + norm=dict( + extensions=None, + loc="mri", + ), + nu=dict( + extensions=None, + loc="mri", + ), + orig=dict( + extensions=None, + loc="mri", + ), + pial=dict( + loc="surf", + ), + rawavg=dict( + extensions=None, + loc="mri", + ), + ribbon=dict( + altkey="*ribbon", + loc="mri", + ), + smoothwm=dict( + loc="surf", + ), + sphere=dict( + loc="surf", + ), + sphere_reg=dict( + altkey="sphere.reg", + loc="surf", + ), subject_id=dict(), subjects_dir=dict(), - sulc=dict(loc="surf",), - thickness=dict(loc="surf",), - volume=dict(loc="surf",), - white=dict(loc="surf",), - wm=dict(extensions=None, loc="mri",), - wmparc=dict(extensions=None, loc="mri",), - wmparc_stats=dict(altkey="wmparc", loc="stats",), + sulc=dict( + loc="surf", + ), + thickness=dict( + loc="surf", + ), + volume=dict( + loc="surf", + ), + white=dict( + loc="surf", + ), + wm=dict( + extensions=None, + loc="mri", + ), + wmparc=dict( + extensions=None, + loc="mri", + ), + wmparc_stats=dict( + altkey="wmparc", + loc="stats", + ), ) outputs = ReconAll.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Register.py b/nipype/interfaces/freesurfer/tests/test_auto_Register.py index d2add3db5b..c10daabd58 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Register.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Register.py @@ -4,17 +4,46 @@ def test_Register_inputs(): input_map = dict( - args=dict(argstr="%s",), - curv=dict(argstr="-curv", requires=["in_smoothwm"],), - environ=dict(nohash=True, usedefault=True,), - in_smoothwm=dict(copyfile=True, extensions=None,), - in_sulc=dict(copyfile=True, extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + curv=dict( + argstr="-curv", + requires=["in_smoothwm"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_smoothwm=dict( + copyfile=True, + extensions=None, + ), + in_sulc=dict( + copyfile=True, + extensions=None, + mandatory=True, + ), in_surf=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-3, + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-3, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, ), - out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), subjects_dir=dict(), - target=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + target=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), ) inputs = Register.input_spec() @@ -24,7 +53,11 @@ def test_Register_inputs(): def test_Register_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Register.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py index 39a7e754bb..f66ac1bda7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py @@ -4,13 +4,38 @@ def test_RegisterAVItoTalairach_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), - out_file=dict(argstr="%s", extensions=None, position=3, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), + out_file=dict( + argstr="%s", + extensions=None, + position=3, + usedefault=True, + ), subjects_dir=dict(), - target=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - vox2vox=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + target=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + vox2vox=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), ) inputs = RegisterAVItoTalairach.input_spec() @@ -21,8 +46,13 @@ def test_RegisterAVItoTalairach_inputs(): def test_RegisterAVItoTalairach_outputs(): output_map = dict( - log_file=dict(extensions=None, usedefault=True,), - out_file=dict(extensions=None,), + log_file=dict( + extensions=None, + usedefault=True, + ), + out_file=dict( + extensions=None, + ), ) outputs = RegisterAVItoTalairach.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py index fd459f14a7..eeac74f722 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py @@ -4,10 +4,24 @@ def test_RelabelHypointensities_inputs(): input_map = dict( - args=dict(argstr="%s",), - aseg=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - environ=dict(nohash=True, usedefault=True,), - lh_white=dict(copyfile=True, extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + aseg=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + lh_white=dict( + copyfile=True, + extensions=None, + mandatory=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -17,9 +31,17 @@ def test_RelabelHypointensities_inputs(): name_template="%s.hypos.mgz", position=-1, ), - rh_white=dict(copyfile=True, extensions=None, mandatory=True,), + rh_white=dict( + copyfile=True, + extensions=None, + mandatory=True, + ), subjects_dir=dict(), - surf_directory=dict(argstr="%s", position=-2, usedefault=True,), + surf_directory=dict( + argstr="%s", + position=-2, + usedefault=True, + ), ) inputs = RelabelHypointensities.input_spec() @@ -29,7 +51,12 @@ def test_RelabelHypointensities_inputs(): def test_RelabelHypointensities_outputs(): - output_map = dict(out_file=dict(argstr="%s", extensions=None,),) + output_map = dict( + out_file=dict( + argstr="%s", + extensions=None, + ), + ) outputs = RelabelHypointensities.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py index d94124a82b..735ea7b84a 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py @@ -4,10 +4,19 @@ def test_RemoveIntersection_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2, + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-2, ), out_file=dict( argstr="%s", @@ -28,7 +37,11 @@ def test_RemoveIntersection_inputs(): def test_RemoveIntersection_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RemoveIntersection.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py index 4050db776b..3d2ce30cbd 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py @@ -4,9 +4,19 @@ def test_RemoveNeck_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), out_file=dict( argstr="%s", extensions=None, @@ -16,10 +26,22 @@ def test_RemoveNeck_inputs(): name_template="%s_noneck", position=-1, ), - radius=dict(argstr="-radius %d",), + radius=dict( + argstr="-radius %d", + ), subjects_dir=dict(), - template=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - transform=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + template=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + transform=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), ) inputs = RemoveNeck.input_spec() @@ -29,7 +51,11 @@ def test_RemoveNeck_inputs(): def test_RemoveNeck_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RemoveNeck.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Resample.py b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py index 0dccad303c..280a8a4cc1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Resample.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py @@ -4,14 +4,30 @@ def test_Resample_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=-2, + ), resampled_file=dict( - argstr="-o %s", extensions=None, genfile=True, position=-1, + argstr="-o %s", + extensions=None, + genfile=True, + position=-1, ), subjects_dir=dict(), - voxel_size=dict(argstr="-vs %.2f %.2f %.2f", mandatory=True,), + voxel_size=dict( + argstr="-vs %.2f %.2f %.2f", + mandatory=True, + ), ) inputs = Resample.input_spec() @@ -21,7 +37,11 @@ def test_Resample_inputs(): def test_Resample_outputs(): - output_map = dict(resampled_file=dict(extensions=None,),) + output_map = dict( + resampled_file=dict( + extensions=None, + ), + ) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py index 7dde230eb7..3f7e1b96a0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py @@ -4,38 +4,113 @@ def test_RobustRegister_inputs(): input_map = dict( - args=dict(argstr="%s",), - auto_sens=dict(argstr="--satit", mandatory=True, xor=["outlier_sens"],), - environ=dict(nohash=True, usedefault=True,), - est_int_scale=dict(argstr="--iscale",), - force_double=dict(argstr="--doubleprec",), - force_float=dict(argstr="--floattype",), - half_source=dict(argstr="--halfmov %s",), - half_source_xfm=dict(argstr="--halfmovlta %s",), - half_targ=dict(argstr="--halfdst %s",), - half_targ_xfm=dict(argstr="--halfdstlta %s",), - half_weights=dict(argstr="--halfweights %s",), - high_iterations=dict(argstr="--highit %d",), - in_xfm_file=dict(argstr="--transform", extensions=None,), - init_orient=dict(argstr="--initorient",), - iteration_thresh=dict(argstr="--epsit %.3f",), - least_squares=dict(argstr="--leastsquares",), - mask_source=dict(argstr="--maskmov %s", extensions=None,), - mask_target=dict(argstr="--maskdst %s", extensions=None,), - max_iterations=dict(argstr="--maxit %d",), - no_init=dict(argstr="--noinit",), - no_multi=dict(argstr="--nomulti",), - out_reg_file=dict(argstr="--lta %s", usedefault=True,), - outlier_limit=dict(argstr="--wlimit %.3f",), - outlier_sens=dict(argstr="--sat %.4f", mandatory=True, xor=["auto_sens"],), - registered_file=dict(argstr="--warp %s",), - source_file=dict(argstr="--mov %s", extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + auto_sens=dict( + argstr="--satit", + mandatory=True, + xor=["outlier_sens"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + est_int_scale=dict( + argstr="--iscale", + ), + force_double=dict( + argstr="--doubleprec", + ), + force_float=dict( + argstr="--floattype", + ), + half_source=dict( + argstr="--halfmov %s", + ), + half_source_xfm=dict( + argstr="--halfmovlta %s", + ), + half_targ=dict( + argstr="--halfdst %s", + ), + half_targ_xfm=dict( + argstr="--halfdstlta %s", + ), + half_weights=dict( + argstr="--halfweights %s", + ), + high_iterations=dict( + argstr="--highit %d", + ), + in_xfm_file=dict( + argstr="--transform", + extensions=None, + ), + init_orient=dict( + argstr="--initorient", + ), + iteration_thresh=dict( + argstr="--epsit %.3f", + ), + least_squares=dict( + argstr="--leastsquares", + ), + mask_source=dict( + argstr="--maskmov %s", + extensions=None, + ), + mask_target=dict( + argstr="--maskdst %s", + extensions=None, + ), + max_iterations=dict( + argstr="--maxit %d", + ), + no_init=dict( + argstr="--noinit", + ), + no_multi=dict( + argstr="--nomulti", + ), + out_reg_file=dict( + argstr="--lta %s", + usedefault=True, + ), + outlier_limit=dict( + argstr="--wlimit %.3f", + ), + outlier_sens=dict( + argstr="--sat %.4f", + mandatory=True, + xor=["auto_sens"], + ), + registered_file=dict( + argstr="--warp %s", + ), + source_file=dict( + argstr="--mov %s", + extensions=None, + mandatory=True, + ), subjects_dir=dict(), - subsample_thresh=dict(argstr="--subsample %d",), - target_file=dict(argstr="--dst %s", extensions=None, mandatory=True,), - trans_only=dict(argstr="--transonly",), - weights_file=dict(argstr="--weights %s",), - write_vo2vox=dict(argstr="--vox2vox",), + subsample_thresh=dict( + argstr="--subsample %d", + ), + target_file=dict( + argstr="--dst %s", + extensions=None, + mandatory=True, + ), + trans_only=dict( + argstr="--transonly", + ), + weights_file=dict( + argstr="--weights %s", + ), + write_vo2vox=dict( + argstr="--vox2vox", + ), ) inputs = RobustRegister.input_spec() @@ -46,14 +121,30 @@ def test_RobustRegister_inputs(): def test_RobustRegister_outputs(): output_map = dict( - half_source=dict(extensions=None,), - half_source_xfm=dict(extensions=None,), - half_targ=dict(extensions=None,), - half_targ_xfm=dict(extensions=None,), - half_weights=dict(extensions=None,), - out_reg_file=dict(extensions=None,), - registered_file=dict(extensions=None,), - weights_file=dict(extensions=None,), + half_source=dict( + extensions=None, + ), + half_source_xfm=dict( + extensions=None, + ), + half_targ=dict( + extensions=None, + ), + half_targ_xfm=dict( + extensions=None, + ), + half_weights=dict( + extensions=None, + ), + out_reg_file=dict( + extensions=None, + ), + registered_file=dict( + extensions=None, + ), + weights_file=dict( + extensions=None, + ), ) outputs = RobustRegister.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py index 3ee33a567e..8c180332db 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py @@ -4,30 +4,65 @@ def test_RobustTemplate_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), auto_detect_sensitivity=dict( - argstr="--satit", mandatory=True, xor=["outlier_sensitivity"], - ), - average_metric=dict(argstr="--average %d",), - environ=dict(nohash=True, usedefault=True,), - fixed_timepoint=dict(argstr="--fixtp",), - in_files=dict(argstr="--mov %s", mandatory=True,), - in_intensity_scales=dict(argstr="--iscalein %s",), - initial_timepoint=dict(argstr="--inittp %d",), - initial_transforms=dict(argstr="--ixforms %s",), - intensity_scaling=dict(argstr="--iscale",), - no_iteration=dict(argstr="--noit",), + argstr="--satit", + mandatory=True, + xor=["outlier_sensitivity"], + ), + average_metric=dict( + argstr="--average %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_timepoint=dict( + argstr="--fixtp", + ), + in_files=dict( + argstr="--mov %s", + mandatory=True, + ), + in_intensity_scales=dict( + argstr="--iscalein %s", + ), + initial_timepoint=dict( + argstr="--inittp %d", + ), + initial_transforms=dict( + argstr="--ixforms %s", + ), + intensity_scaling=dict( + argstr="--iscale", + ), + no_iteration=dict( + argstr="--noit", + ), num_threads=dict(), out_file=dict( - argstr="--template %s", extensions=None, mandatory=True, usedefault=True, + argstr="--template %s", + extensions=None, + mandatory=True, + usedefault=True, ), outlier_sensitivity=dict( - argstr="--sat %.4f", mandatory=True, xor=["auto_detect_sensitivity"], + argstr="--sat %.4f", + mandatory=True, + xor=["auto_detect_sensitivity"], + ), + scaled_intensity_outputs=dict( + argstr="--iscaleout %s", ), - scaled_intensity_outputs=dict(argstr="--iscaleout %s",), subjects_dir=dict(), - subsample_threshold=dict(argstr="--subsample %d",), - transform_outputs=dict(argstr="--lta %s",), + subsample_threshold=dict( + argstr="--subsample %d", + ), + transform_outputs=dict( + argstr="--lta %s", + ), ) inputs = RobustTemplate.input_spec() @@ -38,7 +73,9 @@ def test_RobustTemplate_inputs(): def test_RobustTemplate_outputs(): output_map = dict( - out_file=dict(extensions=None,), + out_file=dict( + extensions=None, + ), scaled_intensity_outputs=dict(), transform_outputs=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py index 67cc1fd244..de9ffe2485 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py @@ -4,31 +4,83 @@ def test_SampleToSurface_inputs(): input_map = dict( - apply_rot=dict(argstr="--rot %.3f %.3f %.3f",), - apply_trans=dict(argstr="--trans %.3f %.3f %.3f",), - args=dict(argstr="%s",), - cortex_mask=dict(argstr="--cortex", xor=["mask_label"],), - environ=dict(nohash=True, usedefault=True,), - fix_tk_reg=dict(argstr="--fixtkreg",), - float2int_method=dict(argstr="--float2int %s",), - frame=dict(argstr="--frame %d",), - hemi=dict(argstr="--hemi %s", mandatory=True,), - hits_file=dict(argstr="--srchit %s",), - hits_type=dict(argstr="--srchit_type",), - ico_order=dict(argstr="--icoorder %d", requires=["target_subject"],), - interp_method=dict(argstr="--interp %s",), - mask_label=dict(argstr="--mask %s", extensions=None, xor=["cortex_mask"],), + apply_rot=dict( + argstr="--rot %.3f %.3f %.3f", + ), + apply_trans=dict( + argstr="--trans %.3f %.3f %.3f", + ), + args=dict( + argstr="%s", + ), + cortex_mask=dict( + argstr="--cortex", + xor=["mask_label"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fix_tk_reg=dict( + argstr="--fixtkreg", + ), + float2int_method=dict( + argstr="--float2int %s", + ), + frame=dict( + argstr="--frame %d", + ), + hemi=dict( + argstr="--hemi %s", + mandatory=True, + ), + hits_file=dict( + argstr="--srchit %s", + ), + hits_type=dict( + argstr="--srchit_type", + ), + ico_order=dict( + argstr="--icoorder %d", + requires=["target_subject"], + ), + interp_method=dict( + argstr="--interp %s", + ), + mask_label=dict( + argstr="--mask %s", + extensions=None, + xor=["cortex_mask"], + ), mni152reg=dict( argstr="--mni152reg", mandatory=True, xor=["reg_file", "reg_header", "mni152reg"], ), - no_reshape=dict(argstr="--noreshape", xor=["reshape"],), - out_file=dict(argstr="--o %s", extensions=None, genfile=True,), - out_type=dict(argstr="--out_type %s",), - override_reg_subj=dict(argstr="--srcsubject %s", requires=["subject_id"],), - projection_stem=dict(mandatory=True, xor=["sampling_method"],), - reference_file=dict(argstr="--ref %s", extensions=None,), + no_reshape=dict( + argstr="--noreshape", + xor=["reshape"], + ), + out_file=dict( + argstr="--o %s", + extensions=None, + genfile=True, + ), + out_type=dict( + argstr="--out_type %s", + ), + override_reg_subj=dict( + argstr="--srcsubject %s", + requires=["subject_id"], + ), + projection_stem=dict( + mandatory=True, + xor=["sampling_method"], + ), + reference_file=dict( + argstr="--ref %s", + extensions=None, + ), reg_file=dict( argstr="--reg %s", extensions=None, @@ -41,8 +93,13 @@ def test_SampleToSurface_inputs(): requires=["subject_id"], xor=["reg_file", "reg_header", "mni152reg"], ), - reshape=dict(argstr="--reshape", xor=["no_reshape"],), - reshape_slices=dict(argstr="--rf %d",), + reshape=dict( + argstr="--reshape", + xor=["no_reshape"], + ), + reshape_slices=dict( + argstr="--rf %d", + ), sampling_method=dict( argstr="%s", mandatory=True, @@ -51,16 +108,35 @@ def test_SampleToSurface_inputs(): ), sampling_range=dict(), sampling_units=dict(), - scale_input=dict(argstr="--scale %.3f",), - smooth_surf=dict(argstr="--surf-fwhm %.3f",), - smooth_vol=dict(argstr="--fwhm %.3f",), - source_file=dict(argstr="--mov %s", extensions=None, mandatory=True,), + scale_input=dict( + argstr="--scale %.3f", + ), + smooth_surf=dict( + argstr="--surf-fwhm %.3f", + ), + smooth_vol=dict( + argstr="--fwhm %.3f", + ), + source_file=dict( + argstr="--mov %s", + extensions=None, + mandatory=True, + ), subject_id=dict(), subjects_dir=dict(), - surf_reg=dict(argstr="--surfreg %s", requires=["target_subject"],), - surface=dict(argstr="--surf %s",), - target_subject=dict(argstr="--trgsubject %s",), - vox_file=dict(argstr="--nvox %s",), + surf_reg=dict( + argstr="--surfreg %s", + requires=["target_subject"], + ), + surface=dict( + argstr="--surf %s", + ), + target_subject=dict( + argstr="--trgsubject %s", + ), + vox_file=dict( + argstr="--nvox %s", + ), ) inputs = SampleToSurface.input_spec() @@ -71,9 +147,15 @@ def test_SampleToSurface_inputs(): def test_SampleToSurface_outputs(): output_map = dict( - hits_file=dict(extensions=None,), - out_file=dict(extensions=None,), - vox_file=dict(extensions=None,), + hits_file=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + vox_file=dict( + extensions=None, + ), ) outputs = SampleToSurface.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py index 5cb7321d50..dfb82e8b85 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py @@ -9,72 +9,146 @@ def test_SegStats_inputs(): mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - args=dict(argstr="%s",), - avgwf_file=dict(argstr="--avgwfvol %s",), - avgwf_txt_file=dict(argstr="--avgwf %s",), - brain_vol=dict(argstr="--%s",), - brainmask_file=dict(argstr="--brainmask %s", extensions=None,), - calc_power=dict(argstr="--%s",), - calc_snr=dict(argstr="--snr",), + args=dict( + argstr="%s", + ), + avgwf_file=dict( + argstr="--avgwfvol %s", + ), + avgwf_txt_file=dict( + argstr="--avgwf %s", + ), + brain_vol=dict( + argstr="--%s", + ), + brainmask_file=dict( + argstr="--brainmask %s", + extensions=None, + ), + calc_power=dict( + argstr="--%s", + ), + calc_snr=dict( + argstr="--snr", + ), color_table_file=dict( argstr="--ctab %s", extensions=None, xor=("color_table_file", "default_color_table", "gca_color_table"), ), - cortex_vol_from_surf=dict(argstr="--surf-ctx-vol",), + cortex_vol_from_surf=dict( + argstr="--surf-ctx-vol", + ), default_color_table=dict( argstr="--ctab-default", xor=("color_table_file", "default_color_table", "gca_color_table"), ), - empty=dict(argstr="--empty",), - environ=dict(nohash=True, usedefault=True,), - etiv=dict(argstr="--etiv",), + empty=dict( + argstr="--empty", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + etiv=dict( + argstr="--etiv", + ), etiv_only=dict(), - euler=dict(argstr="--euler",), - exclude_ctx_gm_wm=dict(argstr="--excl-ctxgmwm",), - exclude_id=dict(argstr="--excludeid %d",), - frame=dict(argstr="--frame %d",), + euler=dict( + argstr="--euler", + ), + exclude_ctx_gm_wm=dict( + argstr="--excl-ctxgmwm", + ), + exclude_id=dict( + argstr="--excludeid %d", + ), + frame=dict( + argstr="--frame %d", + ), gca_color_table=dict( argstr="--ctab-gca %s", extensions=None, xor=("color_table_file", "default_color_table", "gca_color_table"), ), - in_file=dict(argstr="--i %s", extensions=None,), - in_intensity=dict(argstr="--in %s --in-intensity-name %s", extensions=None,), + in_file=dict( + argstr="--i %s", + extensions=None, + ), + in_intensity=dict( + argstr="--in %s --in-intensity-name %s", + extensions=None, + ), intensity_units=dict( - argstr="--in-intensity-units %s", requires=["in_intensity"], + argstr="--in-intensity-units %s", + requires=["in_intensity"], + ), + mask_erode=dict( + argstr="--maskerode %d", + ), + mask_file=dict( + argstr="--mask %s", + extensions=None, + ), + mask_frame=dict( + requires=["mask_file"], + ), + mask_invert=dict( + argstr="--maskinvert", ), - mask_erode=dict(argstr="--maskerode %d",), - mask_file=dict(argstr="--mask %s", extensions=None,), - mask_frame=dict(requires=["mask_file"],), - mask_invert=dict(argstr="--maskinvert",), mask_sign=dict(), - mask_thresh=dict(argstr="--maskthresh %f",), - multiply=dict(argstr="--mul %f",), - non_empty_only=dict(argstr="--nonempty",), - partial_volume_file=dict(argstr="--pv %s", extensions=None,), - segment_id=dict(argstr="--id %s...",), + mask_thresh=dict( + argstr="--maskthresh %f", + ), + multiply=dict( + argstr="--mul %f", + ), + non_empty_only=dict( + argstr="--nonempty", + ), + partial_volume_file=dict( + argstr="--pv %s", + extensions=None, + ), + segment_id=dict( + argstr="--id %s...", + ), segmentation_file=dict( argstr="--seg %s", extensions=None, mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - sf_avg_file=dict(argstr="--sfavg %s",), - subcort_gm=dict(argstr="--subcortgray",), + sf_avg_file=dict( + argstr="--sfavg %s", + ), + subcort_gm=dict( + argstr="--subcortgray", + ), subjects_dir=dict(), summary_file=dict( - argstr="--sum %s", extensions=None, genfile=True, position=-1, + argstr="--sum %s", + extensions=None, + genfile=True, + position=-1, + ), + supratent=dict( + argstr="--supratent", ), - supratent=dict(argstr="--supratent",), surf_label=dict( argstr="--slabel %s %s %s", mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - total_gray=dict(argstr="--totalgray",), - vox=dict(argstr="--vox %s",), - wm_vol_from_surf=dict(argstr="--surf-wm-vol",), + total_gray=dict( + argstr="--totalgray", + ), + vox=dict( + argstr="--vox %s", + ), + wm_vol_from_surf=dict( + argstr="--surf-wm-vol", + ), ) inputs = SegStats.input_spec() @@ -85,10 +159,18 @@ def test_SegStats_inputs(): def test_SegStats_outputs(): output_map = dict( - avgwf_file=dict(extensions=None,), - avgwf_txt_file=dict(extensions=None,), - sf_avg_file=dict(extensions=None,), - summary_file=dict(extensions=None,), + avgwf_file=dict( + extensions=None, + ), + avgwf_txt_file=dict( + extensions=None, + ), + sf_avg_file=dict( + extensions=None, + ), + summary_file=dict( + extensions=None, + ), ) outputs = SegStats.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py index 2301087a3b..0121dd7d9e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py @@ -9,84 +9,190 @@ def test_SegStatsReconAll_inputs(): mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - args=dict(argstr="%s",), - aseg=dict(extensions=None,), - avgwf_file=dict(argstr="--avgwfvol %s",), - avgwf_txt_file=dict(argstr="--avgwf %s",), - brain_vol=dict(argstr="--%s",), - brainmask_file=dict(argstr="--brainmask %s", extensions=None,), - calc_power=dict(argstr="--%s",), - calc_snr=dict(argstr="--snr",), + args=dict( + argstr="%s", + ), + aseg=dict( + extensions=None, + ), + avgwf_file=dict( + argstr="--avgwfvol %s", + ), + avgwf_txt_file=dict( + argstr="--avgwf %s", + ), + brain_vol=dict( + argstr="--%s", + ), + brainmask_file=dict( + argstr="--brainmask %s", + extensions=None, + ), + calc_power=dict( + argstr="--%s", + ), + calc_snr=dict( + argstr="--snr", + ), color_table_file=dict( argstr="--ctab %s", extensions=None, xor=("color_table_file", "default_color_table", "gca_color_table"), ), copy_inputs=dict(), - cortex_vol_from_surf=dict(argstr="--surf-ctx-vol",), + cortex_vol_from_surf=dict( + argstr="--surf-ctx-vol", + ), default_color_table=dict( argstr="--ctab-default", xor=("color_table_file", "default_color_table", "gca_color_table"), ), - empty=dict(argstr="--empty",), - environ=dict(nohash=True, usedefault=True,), - etiv=dict(argstr="--etiv",), + empty=dict( + argstr="--empty", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + etiv=dict( + argstr="--etiv", + ), etiv_only=dict(), - euler=dict(argstr="--euler",), - exclude_ctx_gm_wm=dict(argstr="--excl-ctxgmwm",), - exclude_id=dict(argstr="--excludeid %d",), - frame=dict(argstr="--frame %d",), + euler=dict( + argstr="--euler", + ), + exclude_ctx_gm_wm=dict( + argstr="--excl-ctxgmwm", + ), + exclude_id=dict( + argstr="--excludeid %d", + ), + frame=dict( + argstr="--frame %d", + ), gca_color_table=dict( argstr="--ctab-gca %s", extensions=None, xor=("color_table_file", "default_color_table", "gca_color_table"), ), - in_file=dict(argstr="--i %s", extensions=None,), - in_intensity=dict(argstr="--in %s --in-intensity-name %s", extensions=None,), + in_file=dict( + argstr="--i %s", + extensions=None, + ), + in_intensity=dict( + argstr="--in %s --in-intensity-name %s", + extensions=None, + ), intensity_units=dict( - argstr="--in-intensity-units %s", requires=["in_intensity"], - ), - lh_orig_nofix=dict(extensions=None, mandatory=True,), - lh_pial=dict(extensions=None, mandatory=True,), - lh_white=dict(extensions=None, mandatory=True,), - mask_erode=dict(argstr="--maskerode %d",), - mask_file=dict(argstr="--mask %s", extensions=None,), - mask_frame=dict(requires=["mask_file"],), - mask_invert=dict(argstr="--maskinvert",), + argstr="--in-intensity-units %s", + requires=["in_intensity"], + ), + lh_orig_nofix=dict( + extensions=None, + mandatory=True, + ), + lh_pial=dict( + extensions=None, + mandatory=True, + ), + lh_white=dict( + extensions=None, + mandatory=True, + ), + mask_erode=dict( + argstr="--maskerode %d", + ), + mask_file=dict( + argstr="--mask %s", + extensions=None, + ), + mask_frame=dict( + requires=["mask_file"], + ), + mask_invert=dict( + argstr="--maskinvert", + ), mask_sign=dict(), - mask_thresh=dict(argstr="--maskthresh %f",), - multiply=dict(argstr="--mul %f",), - non_empty_only=dict(argstr="--nonempty",), - partial_volume_file=dict(argstr="--pv %s", extensions=None,), - presurf_seg=dict(extensions=None,), - rh_orig_nofix=dict(extensions=None, mandatory=True,), - rh_pial=dict(extensions=None, mandatory=True,), - rh_white=dict(extensions=None, mandatory=True,), - ribbon=dict(extensions=None, mandatory=True,), - segment_id=dict(argstr="--id %s...",), + mask_thresh=dict( + argstr="--maskthresh %f", + ), + multiply=dict( + argstr="--mul %f", + ), + non_empty_only=dict( + argstr="--nonempty", + ), + partial_volume_file=dict( + argstr="--pv %s", + extensions=None, + ), + presurf_seg=dict( + extensions=None, + ), + rh_orig_nofix=dict( + extensions=None, + mandatory=True, + ), + rh_pial=dict( + extensions=None, + mandatory=True, + ), + rh_white=dict( + extensions=None, + mandatory=True, + ), + ribbon=dict( + extensions=None, + mandatory=True, + ), + segment_id=dict( + argstr="--id %s...", + ), segmentation_file=dict( argstr="--seg %s", extensions=None, mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - sf_avg_file=dict(argstr="--sfavg %s",), - subcort_gm=dict(argstr="--subcortgray",), - subject_id=dict(argstr="--subject %s", mandatory=True, usedefault=True,), + sf_avg_file=dict( + argstr="--sfavg %s", + ), + subcort_gm=dict( + argstr="--subcortgray", + ), + subject_id=dict( + argstr="--subject %s", + mandatory=True, + usedefault=True, + ), subjects_dir=dict(), summary_file=dict( - argstr="--sum %s", extensions=None, genfile=True, position=-1, + argstr="--sum %s", + extensions=None, + genfile=True, + position=-1, + ), + supratent=dict( + argstr="--supratent", ), - supratent=dict(argstr="--supratent",), surf_label=dict( argstr="--slabel %s %s %s", mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - total_gray=dict(argstr="--totalgray",), - transform=dict(extensions=None, mandatory=True,), - vox=dict(argstr="--vox %s",), - wm_vol_from_surf=dict(argstr="--surf-wm-vol",), + total_gray=dict( + argstr="--totalgray", + ), + transform=dict( + extensions=None, + mandatory=True, + ), + vox=dict( + argstr="--vox %s", + ), + wm_vol_from_surf=dict( + argstr="--surf-wm-vol", + ), ) inputs = SegStatsReconAll.input_spec() @@ -97,10 +203,18 @@ def test_SegStatsReconAll_inputs(): def test_SegStatsReconAll_outputs(): output_map = dict( - avgwf_file=dict(extensions=None,), - avgwf_txt_file=dict(extensions=None,), - sf_avg_file=dict(extensions=None,), - summary_file=dict(extensions=None,), + avgwf_file=dict( + extensions=None, + ), + avgwf_txt_file=dict( + extensions=None, + ), + sf_avg_file=dict( + extensions=None, + ), + summary_file=dict( + extensions=None, + ), ) outputs = SegStatsReconAll.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py index 9343177c30..7c16a1f476 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py @@ -4,11 +4,23 @@ def test_SegmentCC_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), copy_inputs=dict(), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-aseg %s", extensions=None, mandatory=True,), - in_norm=dict(extensions=None, mandatory=True,), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-aseg %s", + extensions=None, + mandatory=True, + ), + in_norm=dict( + extensions=None, + mandatory=True, + ), out_file=dict( argstr="-o %s", extensions=None, @@ -17,8 +29,17 @@ def test_SegmentCC_inputs(): name_source=["in_file"], name_template="%s.auto.mgz", ), - out_rotation=dict(argstr="-lta %s", extensions=None, mandatory=True,), - subject_id=dict(argstr="%s", mandatory=True, position=-1, usedefault=True,), + out_rotation=dict( + argstr="-lta %s", + extensions=None, + mandatory=True, + ), + subject_id=dict( + argstr="%s", + mandatory=True, + position=-1, + usedefault=True, + ), subjects_dir=dict(), ) inputs = SegmentCC.input_spec() @@ -30,7 +51,12 @@ def test_SegmentCC_inputs(): def test_SegmentCC_outputs(): output_map = dict( - out_file=dict(extensions=None,), out_rotation=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_rotation=dict( + extensions=None, + ), ) outputs = SegmentCC.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py index 87f4af54c0..8aac066c26 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py @@ -4,10 +4,25 @@ def test_SegmentWM_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), subjects_dir=dict(), ) inputs = SegmentWM.input_spec() @@ -18,7 +33,11 @@ def test_SegmentWM_inputs(): def test_SegmentWM_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SegmentWM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py index bc180a00f6..e20de1c795 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py @@ -4,14 +4,41 @@ def test_Smooth_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="--i %s", extensions=None, mandatory=True,), - num_iters=dict(argstr="--niters %d", mandatory=True, xor=["surface_fwhm"],), - proj_frac=dict(argstr="--projfrac %s", xor=["proj_frac_avg"],), - proj_frac_avg=dict(argstr="--projfrac-avg %.2f %.2f %.2f", xor=["proj_frac"],), - reg_file=dict(argstr="--reg %s", extensions=None, mandatory=True,), - smoothed_file=dict(argstr="--o %s", extensions=None, genfile=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="--i %s", + extensions=None, + mandatory=True, + ), + num_iters=dict( + argstr="--niters %d", + mandatory=True, + xor=["surface_fwhm"], + ), + proj_frac=dict( + argstr="--projfrac %s", + xor=["proj_frac_avg"], + ), + proj_frac_avg=dict( + argstr="--projfrac-avg %.2f %.2f %.2f", + xor=["proj_frac"], + ), + reg_file=dict( + argstr="--reg %s", + extensions=None, + mandatory=True, + ), + smoothed_file=dict( + argstr="--o %s", + extensions=None, + genfile=True, + ), subjects_dir=dict(), surface_fwhm=dict( argstr="--fwhm %f", @@ -19,7 +46,9 @@ def test_Smooth_inputs(): requires=["reg_file"], xor=["num_iters"], ), - vol_fwhm=dict(argstr="--vol-fwhm %f",), + vol_fwhm=dict( + argstr="--vol-fwhm %f", + ), ) inputs = Smooth.input_spec() @@ -29,7 +58,11 @@ def test_Smooth_inputs(): def test_Smooth_outputs(): - output_map = dict(smoothed_file=dict(extensions=None,),) + output_map = dict( + smoothed_file=dict( + extensions=None, + ), + ) outputs = Smooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py index 517a223432..5f97cc281b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py @@ -4,25 +4,65 @@ def test_SmoothTessellation_inputs(): input_map = dict( - args=dict(argstr="%s",), - curvature_averaging_iterations=dict(argstr="-a %d",), - disable_estimates=dict(argstr="-nw",), - environ=dict(nohash=True, usedefault=True,), - gaussian_curvature_norm_steps=dict(argstr="%d",), - gaussian_curvature_smoothing_steps=dict(argstr=" %d",), + args=dict( + argstr="%s", + ), + curvature_averaging_iterations=dict( + argstr="-a %d", + ), + disable_estimates=dict( + argstr="-nw", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gaussian_curvature_norm_steps=dict( + argstr="%d", + ), + gaussian_curvature_smoothing_steps=dict( + argstr=" %d", + ), in_file=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2, - ), - normalize_area=dict(argstr="-area",), - out_area_file=dict(argstr="-b %s", extensions=None,), - out_curvature_file=dict(argstr="-c %s", extensions=None,), - out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - seed=dict(argstr="-seed %d",), - smoothing_iterations=dict(argstr="-n %d",), - snapshot_writing_iterations=dict(argstr="-w %d",), + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-2, + ), + normalize_area=dict( + argstr="-area", + ), + out_area_file=dict( + argstr="-b %s", + extensions=None, + ), + out_curvature_file=dict( + argstr="-c %s", + extensions=None, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + seed=dict( + argstr="-seed %d", + ), + smoothing_iterations=dict( + argstr="-n %d", + ), + snapshot_writing_iterations=dict( + argstr="-w %d", + ), subjects_dir=dict(), - use_gaussian_curvature_smoothing=dict(argstr="-g",), - use_momentum=dict(argstr="-m",), + use_gaussian_curvature_smoothing=dict( + argstr="-g", + ), + use_momentum=dict( + argstr="-m", + ), ) inputs = SmoothTessellation.input_spec() @@ -32,7 +72,11 @@ def test_SmoothTessellation_inputs(): def test_SmoothTessellation_outputs(): - output_map = dict(surface=dict(extensions=None,),) + output_map = dict( + surface=dict( + extensions=None, + ), + ) outputs = SmoothTessellation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py index 407354fbfe..84673e2951 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py @@ -4,13 +4,27 @@ def test_Sphere_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2, + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-2, + ), + in_smoothwm=dict( + copyfile=True, + extensions=None, + ), + magic=dict( + argstr="-q", ), - in_smoothwm=dict(copyfile=True, extensions=None,), - magic=dict(argstr="-q",), num_threads=dict(), out_file=dict( argstr="%s", @@ -20,7 +34,9 @@ def test_Sphere_inputs(): name_template="%s.sphere", position=-1, ), - seed=dict(argstr="-seed %d",), + seed=dict( + argstr="-seed %d", + ), subjects_dir=dict(), ) inputs = Sphere.input_spec() @@ -31,7 +47,11 @@ def test_Sphere_inputs(): def test_Sphere_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Sphere.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py index 41c61ea0fa..73f5b3efd7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py @@ -4,19 +4,60 @@ def test_SphericalAverage_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - erode=dict(argstr="-erode %d",), - fname=dict(argstr="%s", mandatory=True, position=-5,), - hemisphere=dict(argstr="%s", mandatory=True, position=-4,), - in_average=dict(argstr="%s", genfile=True, position=-2,), - in_orig=dict(argstr="-orig %s", extensions=None,), - in_surf=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - subject_id=dict(argstr="-o %s", mandatory=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + erode=dict( + argstr="-erode %d", + ), + fname=dict( + argstr="%s", + mandatory=True, + position=-5, + ), + hemisphere=dict( + argstr="%s", + mandatory=True, + position=-4, + ), + in_average=dict( + argstr="%s", + genfile=True, + position=-2, + ), + in_orig=dict( + argstr="-orig %s", + extensions=None, + ), + in_surf=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + subject_id=dict( + argstr="-o %s", + mandatory=True, + ), subjects_dir=dict(), - threshold=dict(argstr="-t %.1f",), - which=dict(argstr="%s", mandatory=True, position=-6,), + threshold=dict( + argstr="-t %.1f", + ), + which=dict( + argstr="%s", + mandatory=True, + position=-6, + ), ) inputs = SphericalAverage.input_spec() @@ -26,7 +67,11 @@ def test_SphericalAverage_inputs(): def test_SphericalAverage_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SphericalAverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py index c9e20a00b5..01dc354710 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py @@ -4,13 +4,29 @@ def test_Surface2VolTransform_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - hemi=dict(argstr="--hemi %s", mandatory=True,), - mkmask=dict(argstr="--mkmask", xor=["source_file"],), - projfrac=dict(argstr="--projfrac %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemi=dict( + argstr="--hemi %s", + mandatory=True, + ), + mkmask=dict( + argstr="--mkmask", + xor=["source_file"], + ), + projfrac=dict( + argstr="--projfrac %s", + ), reg_file=dict( - argstr="--volreg %s", extensions=None, mandatory=True, xor=["subject_id"], + argstr="--volreg %s", + extensions=None, + mandatory=True, + xor=["subject_id"], ), source_file=dict( argstr="--surfval %s", @@ -19,10 +35,20 @@ def test_Surface2VolTransform_inputs(): mandatory=True, xor=["mkmask"], ), - subject_id=dict(argstr="--identity %s", xor=["reg_file"],), - subjects_dir=dict(argstr="--sd %s",), - surf_name=dict(argstr="--surf %s",), - template_file=dict(argstr="--template %s", extensions=None,), + subject_id=dict( + argstr="--identity %s", + xor=["reg_file"], + ), + subjects_dir=dict( + argstr="--sd %s", + ), + surf_name=dict( + argstr="--surf %s", + ), + template_file=dict( + argstr="--template %s", + extensions=None, + ), transformed_file=dict( argstr="--outvol %s", extensions=None, @@ -47,7 +73,12 @@ def test_Surface2VolTransform_inputs(): def test_Surface2VolTransform_outputs(): output_map = dict( - transformed_file=dict(extensions=None,), vertexvol_file=dict(extensions=None,), + transformed_file=dict( + extensions=None, + ), + vertexvol_file=dict( + extensions=None, + ), ) outputs = Surface2VolTransform.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py index 020f7af7a8..7876dfa1cc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py @@ -4,16 +4,46 @@ def test_SurfaceSmooth_inputs(): input_map = dict( - args=dict(argstr="%s",), - cortex=dict(argstr="--cortex", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - fwhm=dict(argstr="--fwhm %.4f", xor=["smooth_iters"],), - hemi=dict(argstr="--hemi %s", mandatory=True,), - in_file=dict(argstr="--sval %s", extensions=None, mandatory=True,), - out_file=dict(argstr="--tval %s", extensions=None, genfile=True,), - reshape=dict(argstr="--reshape",), - smooth_iters=dict(argstr="--smooth %d", xor=["fwhm"],), - subject_id=dict(argstr="--s %s", mandatory=True,), + args=dict( + argstr="%s", + ), + cortex=dict( + argstr="--cortex", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm=dict( + argstr="--fwhm %.4f", + xor=["smooth_iters"], + ), + hemi=dict( + argstr="--hemi %s", + mandatory=True, + ), + in_file=dict( + argstr="--sval %s", + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="--tval %s", + extensions=None, + genfile=True, + ), + reshape=dict( + argstr="--reshape", + ), + smooth_iters=dict( + argstr="--smooth %d", + xor=["fwhm"], + ), + subject_id=dict( + argstr="--s %s", + mandatory=True, + ), subjects_dir=dict(), ) inputs = SurfaceSmooth.input_spec() @@ -24,7 +54,11 @@ def test_SurfaceSmooth_inputs(): def test_SurfaceSmooth_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SurfaceSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py index b4b3e8a5e2..c778bcc959 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py @@ -4,51 +4,127 @@ def test_SurfaceSnapshots_inputs(): input_map = dict( - annot_file=dict(argstr="-annotation %s", extensions=None, xor=["annot_name"],), - annot_name=dict(argstr="-annotation %s", xor=["annot_file"],), - args=dict(argstr="%s",), - colortable=dict(argstr="-colortable %s", extensions=None,), - demean_overlay=dict(argstr="-zm",), - environ=dict(nohash=True, usedefault=True,), - hemi=dict(argstr="%s", mandatory=True, position=2,), + annot_file=dict( + argstr="-annotation %s", + extensions=None, + xor=["annot_name"], + ), + annot_name=dict( + argstr="-annotation %s", + xor=["annot_file"], + ), + args=dict( + argstr="%s", + ), + colortable=dict( + argstr="-colortable %s", + extensions=None, + ), + demean_overlay=dict( + argstr="-zm", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemi=dict( + argstr="%s", + mandatory=True, + position=2, + ), identity_reg=dict( argstr="-overlay-reg-identity", xor=["overlay_reg", "identity_reg", "mni152_reg"], ), - invert_overlay=dict(argstr="-invphaseflag 1",), - label_file=dict(argstr="-label %s", extensions=None, xor=["label_name"],), - label_name=dict(argstr="-label %s", xor=["label_file"],), - label_outline=dict(argstr="-label-outline",), - label_under=dict(argstr="-labels-under",), + invert_overlay=dict( + argstr="-invphaseflag 1", + ), + label_file=dict( + argstr="-label %s", + extensions=None, + xor=["label_name"], + ), + label_name=dict( + argstr="-label %s", + xor=["label_file"], + ), + label_outline=dict( + argstr="-label-outline", + ), + label_under=dict( + argstr="-labels-under", + ), mni152_reg=dict( - argstr="-mni152reg", xor=["overlay_reg", "identity_reg", "mni152_reg"], + argstr="-mni152reg", + xor=["overlay_reg", "identity_reg", "mni152_reg"], + ), + orig_suffix=dict( + argstr="-orig %s", ), - orig_suffix=dict(argstr="-orig %s",), overlay=dict( - argstr="-overlay %s", extensions=None, requires=["overlay_range"], + argstr="-overlay %s", + extensions=None, + requires=["overlay_range"], + ), + overlay_range=dict( + argstr="%s", + ), + overlay_range_offset=dict( + argstr="-foffset %.3f", ), - overlay_range=dict(argstr="%s",), - overlay_range_offset=dict(argstr="-foffset %.3f",), overlay_reg=dict( argstr="-overlay-reg %s", extensions=None, xor=["overlay_reg", "identity_reg", "mni152_reg"], ), - patch_file=dict(argstr="-patch %s", extensions=None,), - reverse_overlay=dict(argstr="-revphaseflag 1",), + patch_file=dict( + argstr="-patch %s", + extensions=None, + ), + reverse_overlay=dict( + argstr="-revphaseflag 1", + ), screenshot_stem=dict(), - show_color_scale=dict(argstr="-colscalebarflag 1",), - show_color_text=dict(argstr="-colscaletext 1",), - show_curv=dict(argstr="-curv", xor=["show_gray_curv"],), - show_gray_curv=dict(argstr="-gray", xor=["show_curv"],), + show_color_scale=dict( + argstr="-colscalebarflag 1", + ), + show_color_text=dict( + argstr="-colscaletext 1", + ), + show_curv=dict( + argstr="-curv", + xor=["show_gray_curv"], + ), + show_gray_curv=dict( + argstr="-gray", + xor=["show_curv"], + ), six_images=dict(), - sphere_suffix=dict(argstr="-sphere %s",), - stem_template_args=dict(requires=["screenshot_stem"],), - subject_id=dict(argstr="%s", mandatory=True, position=1,), + sphere_suffix=dict( + argstr="-sphere %s", + ), + stem_template_args=dict( + requires=["screenshot_stem"], + ), + subject_id=dict( + argstr="%s", + mandatory=True, + position=1, + ), subjects_dir=dict(), - surface=dict(argstr="%s", mandatory=True, position=3,), - tcl_script=dict(argstr="%s", extensions=None, genfile=True,), - truncate_overlay=dict(argstr="-truncphaseflag 1",), + surface=dict( + argstr="%s", + mandatory=True, + position=3, + ), + tcl_script=dict( + argstr="%s", + extensions=None, + genfile=True, + ), + truncate_overlay=dict( + argstr="-truncphaseflag 1", + ), ) inputs = SurfaceSnapshots.input_spec() @@ -58,7 +134,9 @@ def test_SurfaceSnapshots_inputs(): def test_SurfaceSnapshots_outputs(): - output_map = dict(snapshots=dict(),) + output_map = dict( + snapshots=dict(), + ) outputs = SurfaceSnapshots.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py index d8bcf6eb28..88923befd4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py @@ -4,12 +4,28 @@ def test_SurfaceTransform_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - hemi=dict(argstr="--hemi %s", mandatory=True,), - out_file=dict(argstr="--tval %s", extensions=None, genfile=True,), - reshape=dict(argstr="--reshape",), - reshape_factor=dict(argstr="--reshape-factor",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemi=dict( + argstr="--hemi %s", + mandatory=True, + ), + out_file=dict( + argstr="--tval %s", + extensions=None, + genfile=True, + ), + reshape=dict( + argstr="--reshape", + ), + reshape_factor=dict( + argstr="--reshape-factor", + ), source_annot_file=dict( argstr="--sval-annot %s", extensions=None, @@ -22,12 +38,25 @@ def test_SurfaceTransform_inputs(): mandatory=True, xor=["source_annot_file"], ), - source_subject=dict(argstr="--srcsubject %s", mandatory=True,), - source_type=dict(argstr="--sfmt %s", requires=["source_file"],), + source_subject=dict( + argstr="--srcsubject %s", + mandatory=True, + ), + source_type=dict( + argstr="--sfmt %s", + requires=["source_file"], + ), subjects_dir=dict(), - target_ico_order=dict(argstr="--trgicoorder %d",), - target_subject=dict(argstr="--trgsubject %s", mandatory=True,), - target_type=dict(argstr="--tfmt %s",), + target_ico_order=dict( + argstr="--trgicoorder %d", + ), + target_subject=dict( + argstr="--trgsubject %s", + mandatory=True, + ), + target_type=dict( + argstr="--tfmt %s", + ), ) inputs = SurfaceTransform.input_spec() @@ -37,7 +66,11 @@ def test_SurfaceTransform_inputs(): def test_SurfaceTransform_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SurfaceTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py index 396d46e1a9..7d52c994bc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py @@ -4,16 +4,50 @@ def test_SynthesizeFLASH_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fixed_weighting=dict(argstr="-w", position=1,), - flip_angle=dict(argstr="%.2f", mandatory=True, position=3,), - out_file=dict(argstr="%s", extensions=None, genfile=True,), - pd_image=dict(argstr="%s", extensions=None, mandatory=True, position=6,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_weighting=dict( + argstr="-w", + position=1, + ), + flip_angle=dict( + argstr="%.2f", + mandatory=True, + position=3, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + ), + pd_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=6, + ), subjects_dir=dict(), - t1_image=dict(argstr="%s", extensions=None, mandatory=True, position=5,), - te=dict(argstr="%.3f", mandatory=True, position=4,), - tr=dict(argstr="%.2f", mandatory=True, position=2,), + t1_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=5, + ), + te=dict( + argstr="%.3f", + mandatory=True, + position=4, + ), + tr=dict( + argstr="%.2f", + mandatory=True, + position=2, + ), ) inputs = SynthesizeFLASH.input_spec() @@ -23,7 +57,11 @@ def test_SynthesizeFLASH_inputs(): def test_SynthesizeFLASH_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SynthesizeFLASH.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py index 41ad7fef4e..384f44edd2 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py @@ -4,11 +4,26 @@ def test_TalairachAVI_inputs(): input_map = dict( - args=dict(argstr="%s",), - atlas=dict(argstr="--atlas %s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="--i %s", extensions=None, mandatory=True,), - out_file=dict(argstr="--xfm %s", extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + atlas=dict( + argstr="--atlas %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="--i %s", + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="--xfm %s", + extensions=None, + mandatory=True, + ), subjects_dir=dict(), ) inputs = TalairachAVI.input_spec() @@ -20,9 +35,15 @@ def test_TalairachAVI_inputs(): def test_TalairachAVI_outputs(): output_map = dict( - out_file=dict(extensions=None,), - out_log=dict(extensions=None,), - out_txt=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_log=dict( + extensions=None, + ), + out_txt=dict( + extensions=None, + ), ) outputs = TalairachAVI.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py index 52b07074e1..c6536186aa 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py @@ -4,9 +4,19 @@ def test_TalairachQC_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - log_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + log_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), subjects_dir=dict(), ) inputs = TalairachQC.input_spec() @@ -17,7 +27,12 @@ def test_TalairachQC_inputs(): def test_TalairachQC_outputs(): - output_map = dict(log_file=dict(extensions=None, usedefault=True,),) + output_map = dict( + log_file=dict( + extensions=None, + usedefault=True, + ), + ) outputs = TalairachQC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py index 0e80196220..31cdedb679 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py @@ -4,27 +4,76 @@ def test_Tkregister2_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fsl_in_matrix=dict(argstr="--fsl %s", extensions=None,), - fsl_out=dict(argstr="--fslregout %s",), - fstal=dict(argstr="--fstal", xor=["target_image", "moving_image", "reg_file"],), - fstarg=dict(argstr="--fstarg", xor=["target_image"],), - invert_lta_in=dict(requires=["lta_in"],), - invert_lta_out=dict(argstr="--ltaout-inv", requires=["lta_in"],), - lta_in=dict(argstr="--lta %s", extensions=None,), - lta_out=dict(argstr="--ltaout %s",), - moving_image=dict(argstr="--mov %s", extensions=None, mandatory=True,), - movscale=dict(argstr="--movscale %f",), - noedit=dict(argstr="--noedit", usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fsl_in_matrix=dict( + argstr="--fsl %s", + extensions=None, + ), + fsl_out=dict( + argstr="--fslregout %s", + ), + fstal=dict( + argstr="--fstal", + xor=["target_image", "moving_image", "reg_file"], + ), + fstarg=dict( + argstr="--fstarg", + xor=["target_image"], + ), + invert_lta_in=dict( + requires=["lta_in"], + ), + invert_lta_out=dict( + argstr="--ltaout-inv", + requires=["lta_in"], + ), + lta_in=dict( + argstr="--lta %s", + extensions=None, + ), + lta_out=dict( + argstr="--ltaout %s", + ), + moving_image=dict( + argstr="--mov %s", + extensions=None, + mandatory=True, + ), + movscale=dict( + argstr="--movscale %f", + ), + noedit=dict( + argstr="--noedit", + usedefault=True, + ), reg_file=dict( - argstr="--reg %s", extensions=None, mandatory=True, usedefault=True, + argstr="--reg %s", + extensions=None, + mandatory=True, + usedefault=True, + ), + reg_header=dict( + argstr="--regheader", + ), + subject_id=dict( + argstr="--s %s", ), - reg_header=dict(argstr="--regheader",), - subject_id=dict(argstr="--s %s",), subjects_dir=dict(), - target_image=dict(argstr="--targ %s", extensions=None, xor=["fstarg"],), - xfm=dict(argstr="--xfm %s", extensions=None,), + target_image=dict( + argstr="--targ %s", + extensions=None, + xor=["fstarg"], + ), + xfm=dict( + argstr="--xfm %s", + extensions=None, + ), ) inputs = Tkregister2.input_spec() @@ -35,9 +84,15 @@ def test_Tkregister2_inputs(): def test_Tkregister2_outputs(): output_map = dict( - fsl_file=dict(extensions=None,), - lta_file=dict(extensions=None,), - reg_file=dict(extensions=None,), + fsl_file=dict( + extensions=None, + ), + lta_file=dict( + extensions=None, + ), + reg_file=dict( + extensions=None, + ), ) outputs = Tkregister2.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py index b4378f3cac..b6b27e67b8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py @@ -4,33 +4,57 @@ def test_UnpackSDICOMDir_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), config=dict( argstr="-cfg %s", extensions=None, mandatory=True, xor=("run_info", "config", "seq_config"), ), - dir_structure=dict(argstr="-%s",), - environ=dict(nohash=True, usedefault=True,), - log_file=dict(argstr="-log %s", extensions=None,), - no_info_dump=dict(argstr="-noinfodump",), - no_unpack_err=dict(argstr="-no-unpackerr",), - output_dir=dict(argstr="-targ %s",), + dir_structure=dict( + argstr="-%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + log_file=dict( + argstr="-log %s", + extensions=None, + ), + no_info_dump=dict( + argstr="-noinfodump", + ), + no_unpack_err=dict( + argstr="-no-unpackerr", + ), + output_dir=dict( + argstr="-targ %s", + ), run_info=dict( argstr="-run %d %s %s %s", mandatory=True, xor=("run_info", "config", "seq_config"), ), - scan_only=dict(argstr="-scanonly %s", extensions=None,), + scan_only=dict( + argstr="-scanonly %s", + extensions=None, + ), seq_config=dict( argstr="-seqcfg %s", extensions=None, mandatory=True, xor=("run_info", "config", "seq_config"), ), - source_dir=dict(argstr="-src %s", mandatory=True,), - spm_zeropad=dict(argstr="-nspmzeropad %d",), + source_dir=dict( + argstr="-src %s", + mandatory=True, + ), + spm_zeropad=dict( + argstr="-nspmzeropad %d", + ), subjects_dir=dict(), ) inputs = UnpackSDICOMDir.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py index cef9ddfedc..152f03eaa8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py @@ -4,21 +4,64 @@ def test_VolumeMask_inputs(): input_map = dict( - args=dict(argstr="%s",), - aseg=dict(extensions=None, xor=["in_aseg"],), + args=dict( + argstr="%s", + ), + aseg=dict( + extensions=None, + xor=["in_aseg"], + ), copy_inputs=dict(), - environ=dict(nohash=True, usedefault=True,), - in_aseg=dict(argstr="--aseg_name %s", extensions=None, xor=["aseg"],), - left_ribbonlabel=dict(argstr="--label_left_ribbon %d", mandatory=True,), - left_whitelabel=dict(argstr="--label_left_white %d", mandatory=True,), - lh_pial=dict(extensions=None, mandatory=True,), - lh_white=dict(extensions=None, mandatory=True,), - rh_pial=dict(extensions=None, mandatory=True,), - rh_white=dict(extensions=None, mandatory=True,), - right_ribbonlabel=dict(argstr="--label_right_ribbon %d", mandatory=True,), - right_whitelabel=dict(argstr="--label_right_white %d", mandatory=True,), - save_ribbon=dict(argstr="--save_ribbon",), - subject_id=dict(argstr="%s", mandatory=True, position=-1, usedefault=True,), + environ=dict( + nohash=True, + usedefault=True, + ), + in_aseg=dict( + argstr="--aseg_name %s", + extensions=None, + xor=["aseg"], + ), + left_ribbonlabel=dict( + argstr="--label_left_ribbon %d", + mandatory=True, + ), + left_whitelabel=dict( + argstr="--label_left_white %d", + mandatory=True, + ), + lh_pial=dict( + extensions=None, + mandatory=True, + ), + lh_white=dict( + extensions=None, + mandatory=True, + ), + rh_pial=dict( + extensions=None, + mandatory=True, + ), + rh_white=dict( + extensions=None, + mandatory=True, + ), + right_ribbonlabel=dict( + argstr="--label_right_ribbon %d", + mandatory=True, + ), + right_whitelabel=dict( + argstr="--label_right_white %d", + mandatory=True, + ), + save_ribbon=dict( + argstr="--save_ribbon", + ), + subject_id=dict( + argstr="%s", + mandatory=True, + position=-1, + usedefault=True, + ), subjects_dir=dict(), ) inputs = VolumeMask.input_spec() @@ -30,9 +73,15 @@ def test_VolumeMask_inputs(): def test_VolumeMask_outputs(): output_map = dict( - lh_ribbon=dict(extensions=None,), - out_ribbon=dict(extensions=None,), - rh_ribbon=dict(extensions=None,), + lh_ribbon=dict( + extensions=None, + ), + out_ribbon=dict( + extensions=None, + ), + rh_ribbon=dict( + extensions=None, + ), ) outputs = VolumeMask.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py index 649e4e497b..5e8609c4c6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py @@ -4,16 +4,40 @@ def test_WatershedSkullStrip_inputs(): input_map = dict( - args=dict(argstr="%s",), - brain_atlas=dict(argstr="-brain_atlas %s", extensions=None, position=-4,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + brain_atlas=dict( + argstr="-brain_atlas %s", + extensions=None, + position=-4, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, ), subjects_dir=dict(), - t1=dict(argstr="-T1",), - transform=dict(argstr="%s", extensions=None, position=-3,), + t1=dict( + argstr="-T1", + ), + transform=dict( + argstr="%s", + extensions=None, + position=-3, + ), ) inputs = WatershedSkullStrip.input_spec() @@ -23,7 +47,11 @@ def test_WatershedSkullStrip_inputs(): def test_WatershedSkullStrip_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = WatershedSkullStrip.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_AR1Image.py b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py index 23e6a19b6c..0f4cfc2bcc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AR1Image.py +++ b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py @@ -4,16 +4,43 @@ def test_AR1Image_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="-%sar1", position=4, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - nan2zeros=dict(argstr="-nan", position=3,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%sar1", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = AR1Image.input_spec() @@ -24,7 +51,11 @@ def test_AR1Image_inputs(): def test_AR1Image_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AR1Image.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py index 6faf6d5d27..359a076f2d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py +++ b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py @@ -4,12 +4,29 @@ def test_AccuracyTester_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - mel_icas=dict(argstr="%s", copyfile=False, mandatory=True, position=3,), - output_directory=dict(argstr="%s", mandatory=True, position=2,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + mel_icas=dict( + argstr="%s", + copyfile=False, + mandatory=True, + position=3, + ), + output_directory=dict( + argstr="%s", + mandatory=True, + position=2, + ), trained_wts_file=dict( - argstr="%s", extensions=None, mandatory=True, position=1, + argstr="%s", + extensions=None, + mandatory=True, + position=1, ), ) inputs = AccuracyTester.input_spec() @@ -20,7 +37,12 @@ def test_AccuracyTester_inputs(): def test_AccuracyTester_outputs(): - output_map = dict(output_directory=dict(argstr="%s", position=1,),) + output_map = dict( + output_directory=dict( + argstr="%s", + position=1, + ), + ) outputs = AccuracyTester.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py index dcd7fc6081..f38990e572 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py @@ -4,16 +4,44 @@ def test_ApplyMask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - mask_file=dict(argstr="-mas %s", extensions=None, mandatory=True, position=4,), - nan2zeros=dict(argstr="-nan", position=3,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + mask_file=dict( + argstr="-mas %s", + extensions=None, + mandatory=True, + position=4, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = ApplyMask.input_spec() @@ -24,7 +52,11 @@ def test_ApplyMask_inputs(): def test_ApplyMask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ApplyMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py index a600c425e6..4c5bcc13a7 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py @@ -4,12 +4,30 @@ def test_ApplyTOPUP_inputs(): input_map = dict( - args=dict(argstr="%s",), - datatype=dict(argstr="-d=%s",), - encoding_file=dict(argstr="--datain=%s", extensions=None, mandatory=True,), - environ=dict(nohash=True, usedefault=True,), - in_files=dict(argstr="--imain=%s", mandatory=True, sep=",",), - in_index=dict(argstr="--inindex=%s", sep=",",), + args=dict( + argstr="%s", + ), + datatype=dict( + argstr="-d=%s", + ), + encoding_file=dict( + argstr="--datain=%s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="--imain=%s", + mandatory=True, + sep=",", + ), + in_index=dict( + argstr="--inindex=%s", + sep=",", + ), in_topup_fieldcoef=dict( argstr="--topup=%s", copyfile=False, @@ -17,10 +35,16 @@ def test_ApplyTOPUP_inputs(): requires=["in_topup_movpar"], ), in_topup_movpar=dict( - copyfile=False, extensions=None, requires=["in_topup_fieldcoef"], + copyfile=False, + extensions=None, + requires=["in_topup_fieldcoef"], + ), + interp=dict( + argstr="--interp=%s", + ), + method=dict( + argstr="--method=%s", ), - interp=dict(argstr="--interp=%s",), - method=dict(argstr="--method=%s",), out_corrected=dict( argstr="--out=%s", extensions=None, @@ -37,7 +61,11 @@ def test_ApplyTOPUP_inputs(): def test_ApplyTOPUP_outputs(): - output_map = dict(out_corrected=dict(extensions=None,),) + output_map = dict( + out_corrected=dict( + extensions=None, + ), + ) outputs = ApplyTOPUP.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py index 8deaf747c2..1eaf3eb2e7 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py @@ -4,14 +4,38 @@ def test_ApplyWarp_inputs(): input_map = dict( - abswarp=dict(argstr="--abs", xor=["relwarp"],), - args=dict(argstr="%s",), - datatype=dict(argstr="--datatype=%s",), - environ=dict(nohash=True, usedefault=True,), - field_file=dict(argstr="--warp=%s", extensions=None,), - in_file=dict(argstr="--in=%s", extensions=None, mandatory=True, position=0,), - interp=dict(argstr="--interp=%s", position=-2,), - mask_file=dict(argstr="--mask=%s", extensions=None,), + abswarp=dict( + argstr="--abs", + xor=["relwarp"], + ), + args=dict( + argstr="%s", + ), + datatype=dict( + argstr="--datatype=%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + field_file=dict( + argstr="--warp=%s", + extensions=None, + ), + in_file=dict( + argstr="--in=%s", + extensions=None, + mandatory=True, + position=0, + ), + interp=dict( + argstr="--interp=%s", + position=-2, + ), + mask_file=dict( + argstr="--mask=%s", + extensions=None, + ), out_file=dict( argstr="--out=%s", extensions=None, @@ -20,12 +44,31 @@ def test_ApplyWarp_inputs(): position=2, ), output_type=dict(), - postmat=dict(argstr="--postmat=%s", extensions=None,), - premat=dict(argstr="--premat=%s", extensions=None,), - ref_file=dict(argstr="--ref=%s", extensions=None, mandatory=True, position=1,), - relwarp=dict(argstr="--rel", position=-1, xor=["abswarp"],), - superlevel=dict(argstr="--superlevel=%s",), - supersample=dict(argstr="--super",), + postmat=dict( + argstr="--postmat=%s", + extensions=None, + ), + premat=dict( + argstr="--premat=%s", + extensions=None, + ), + ref_file=dict( + argstr="--ref=%s", + extensions=None, + mandatory=True, + position=1, + ), + relwarp=dict( + argstr="--rel", + position=-1, + xor=["abswarp"], + ), + superlevel=dict( + argstr="--superlevel=%s", + ), + supersample=dict( + argstr="--super", + ), ) inputs = ApplyWarp.input_spec() @@ -35,7 +78,11 @@ def test_ApplyWarp_inputs(): def test_ApplyWarp_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ApplyWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py index 116748d4c9..0fc914cdde 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py @@ -4,35 +4,111 @@ def test_ApplyXFM_inputs(): input_map = dict( - angle_rep=dict(argstr="-anglerep %s",), - apply_isoxfm=dict(argstr="-applyisoxfm %f", xor=["apply_xfm"],), - apply_xfm=dict(argstr="-applyxfm", usedefault=True,), - args=dict(argstr="%s",), - bbrslope=dict(argstr="-bbrslope %f", min_ver="5.0.0",), - bbrtype=dict(argstr="-bbrtype %s", min_ver="5.0.0",), - bgvalue=dict(argstr="-setbackground %f",), - bins=dict(argstr="-bins %d",), - coarse_search=dict(argstr="-coarsesearch %d", units="degrees",), - cost=dict(argstr="-cost %s",), - cost_func=dict(argstr="-searchcost %s",), - datatype=dict(argstr="-datatype %s",), - display_init=dict(argstr="-displayinit",), - dof=dict(argstr="-dof %d",), - echospacing=dict(argstr="-echospacing %f", min_ver="5.0.0",), - environ=dict(nohash=True, usedefault=True,), - fieldmap=dict(argstr="-fieldmap %s", extensions=None, min_ver="5.0.0",), - fieldmapmask=dict(argstr="-fieldmapmask %s", extensions=None, min_ver="5.0.0",), - fine_search=dict(argstr="-finesearch %d", units="degrees",), - force_scaling=dict(argstr="-forcescaling",), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=0,), - in_matrix_file=dict(argstr="-init %s", extensions=None,), - in_weight=dict(argstr="-inweight %s", extensions=None,), - interp=dict(argstr="-interp %s",), - min_sampling=dict(argstr="-minsampling %f", units="mm",), - no_clamp=dict(argstr="-noclamp",), - no_resample=dict(argstr="-noresample",), - no_resample_blur=dict(argstr="-noresampblur",), - no_search=dict(argstr="-nosearch",), + angle_rep=dict( + argstr="-anglerep %s", + ), + apply_isoxfm=dict( + argstr="-applyisoxfm %f", + xor=["apply_xfm"], + ), + apply_xfm=dict( + argstr="-applyxfm", + usedefault=True, + ), + args=dict( + argstr="%s", + ), + bbrslope=dict( + argstr="-bbrslope %f", + min_ver="5.0.0", + ), + bbrtype=dict( + argstr="-bbrtype %s", + min_ver="5.0.0", + ), + bgvalue=dict( + argstr="-setbackground %f", + ), + bins=dict( + argstr="-bins %d", + ), + coarse_search=dict( + argstr="-coarsesearch %d", + units="degrees", + ), + cost=dict( + argstr="-cost %s", + ), + cost_func=dict( + argstr="-searchcost %s", + ), + datatype=dict( + argstr="-datatype %s", + ), + display_init=dict( + argstr="-displayinit", + ), + dof=dict( + argstr="-dof %d", + ), + echospacing=dict( + argstr="-echospacing %f", + min_ver="5.0.0", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fieldmap=dict( + argstr="-fieldmap %s", + extensions=None, + min_ver="5.0.0", + ), + fieldmapmask=dict( + argstr="-fieldmapmask %s", + extensions=None, + min_ver="5.0.0", + ), + fine_search=dict( + argstr="-finesearch %d", + units="degrees", + ), + force_scaling=dict( + argstr="-forcescaling", + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + position=0, + ), + in_matrix_file=dict( + argstr="-init %s", + extensions=None, + ), + in_weight=dict( + argstr="-inweight %s", + extensions=None, + ), + interp=dict( + argstr="-interp %s", + ), + min_sampling=dict( + argstr="-minsampling %f", + units="mm", + ), + no_clamp=dict( + argstr="-noclamp", + ), + no_resample=dict( + argstr="-noresample", + ), + no_resample_blur=dict( + argstr="-noresampblur", + ), + no_search=dict( + argstr="-nosearch", + ), out_file=dict( argstr="-out %s", extensions=None, @@ -58,23 +134,72 @@ def test_ApplyXFM_inputs(): position=3, ), output_type=dict(), - padding_size=dict(argstr="-paddingsize %d", units="voxels",), - pedir=dict(argstr="-pedir %d", min_ver="5.0.0",), - ref_weight=dict(argstr="-refweight %s", extensions=None,), - reference=dict(argstr="-ref %s", extensions=None, mandatory=True, position=1,), - rigid2D=dict(argstr="-2D",), + padding_size=dict( + argstr="-paddingsize %d", + units="voxels", + ), + pedir=dict( + argstr="-pedir %d", + min_ver="5.0.0", + ), + ref_weight=dict( + argstr="-refweight %s", + extensions=None, + ), + reference=dict( + argstr="-ref %s", + extensions=None, + mandatory=True, + position=1, + ), + rigid2D=dict( + argstr="-2D", + ), save_log=dict(), - schedule=dict(argstr="-schedule %s", extensions=None,), - searchr_x=dict(argstr="-searchrx %s", units="degrees",), - searchr_y=dict(argstr="-searchry %s", units="degrees",), - searchr_z=dict(argstr="-searchrz %s", units="degrees",), - sinc_width=dict(argstr="-sincwidth %d", units="voxels",), - sinc_window=dict(argstr="-sincwindow %s",), - uses_qform=dict(argstr="-usesqform",), - verbose=dict(argstr="-verbose %d",), - wm_seg=dict(argstr="-wmseg %s", extensions=None, min_ver="5.0.0",), - wmcoords=dict(argstr="-wmcoords %s", extensions=None, min_ver="5.0.0",), - wmnorms=dict(argstr="-wmnorms %s", extensions=None, min_ver="5.0.0",), + schedule=dict( + argstr="-schedule %s", + extensions=None, + ), + searchr_x=dict( + argstr="-searchrx %s", + units="degrees", + ), + searchr_y=dict( + argstr="-searchry %s", + units="degrees", + ), + searchr_z=dict( + argstr="-searchrz %s", + units="degrees", + ), + sinc_width=dict( + argstr="-sincwidth %d", + units="voxels", + ), + sinc_window=dict( + argstr="-sincwindow %s", + ), + uses_qform=dict( + argstr="-usesqform", + ), + verbose=dict( + argstr="-verbose %d", + ), + wm_seg=dict( + argstr="-wmseg %s", + extensions=None, + min_ver="5.0.0", + ), + wmcoords=dict( + argstr="-wmcoords %s", + extensions=None, + min_ver="5.0.0", + ), + wmnorms=dict( + argstr="-wmnorms %s", + extensions=None, + min_ver="5.0.0", + ), ) inputs = ApplyXFM.input_spec() @@ -85,9 +210,15 @@ def test_ApplyXFM_inputs(): def test_ApplyXFM_outputs(): output_map = dict( - out_file=dict(extensions=None,), - out_log=dict(extensions=None,), - out_matrix_file=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_log=dict( + extensions=None, + ), + out_matrix_file=dict( + extensions=None, + ), ) outputs = ApplyXFM.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_AvScale.py b/nipype/interfaces/fsl/tests/test_auto_AvScale.py index 5f636ec453..4748d6a9bc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AvScale.py +++ b/nipype/interfaces/fsl/tests/test_auto_AvScale.py @@ -4,11 +4,26 @@ def test_AvScale_inputs(): input_map = dict( - all_param=dict(argstr="--allparams",), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - mat_file=dict(argstr="%s", extensions=None, position=-2,), - ref_file=dict(argstr="%s", extensions=None, position=-1,), + all_param=dict( + argstr="--allparams", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + mat_file=dict( + argstr="%s", + extensions=None, + position=-2, + ), + ref_file=dict( + argstr="%s", + extensions=None, + position=-1, + ), ) inputs = AvScale.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_B0Calc.py b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py index 85175fd428..01727179ff 100644 --- a/nipype/interfaces/fsl/tests/test_auto_B0Calc.py +++ b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py @@ -4,14 +4,39 @@ def test_B0Calc_inputs(): input_map = dict( - args=dict(argstr="%s",), - chi_air=dict(argstr="--chi0=%e", usedefault=True,), - compute_xyz=dict(argstr="--xyz", usedefault=True,), - delta=dict(argstr="-d %e", usedefault=True,), - directconv=dict(argstr="--directconv", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - extendboundary=dict(argstr="--extendboundary=%0.2f", usedefault=True,), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0,), + args=dict( + argstr="%s", + ), + chi_air=dict( + argstr="--chi0=%e", + usedefault=True, + ), + compute_xyz=dict( + argstr="--xyz", + usedefault=True, + ), + delta=dict( + argstr="-d %e", + usedefault=True, + ), + directconv=dict( + argstr="--directconv", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + extendboundary=dict( + argstr="--extendboundary=%0.2f", + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=0, + ), out_file=dict( argstr="-o %s", extensions=None, @@ -21,15 +46,37 @@ def test_B0Calc_inputs(): position=1, ), output_type=dict(), - x_b0=dict(argstr="--b0x=%0.2f", usedefault=True, xor=["xyz_b0"],), - x_grad=dict(argstr="--gx=%0.4f", usedefault=True,), + x_b0=dict( + argstr="--b0x=%0.2f", + usedefault=True, + xor=["xyz_b0"], + ), + x_grad=dict( + argstr="--gx=%0.4f", + usedefault=True, + ), xyz_b0=dict( - argstr="--b0x=%0.2f --b0y=%0.2f --b0=%0.2f", xor=["x_b0", "y_b0", "z_b0"], + argstr="--b0x=%0.2f --b0y=%0.2f --b0=%0.2f", + xor=["x_b0", "y_b0", "z_b0"], + ), + y_b0=dict( + argstr="--b0y=%0.2f", + usedefault=True, + xor=["xyz_b0"], + ), + y_grad=dict( + argstr="--gy=%0.4f", + usedefault=True, + ), + z_b0=dict( + argstr="--b0=%0.2f", + usedefault=True, + xor=["xyz_b0"], + ), + z_grad=dict( + argstr="--gz=%0.4f", + usedefault=True, ), - y_b0=dict(argstr="--b0y=%0.2f", usedefault=True, xor=["xyz_b0"],), - y_grad=dict(argstr="--gy=%0.4f", usedefault=True,), - z_b0=dict(argstr="--b0=%0.2f", usedefault=True, xor=["xyz_b0"],), - z_grad=dict(argstr="--gz=%0.4f", usedefault=True,), ) inputs = B0Calc.input_spec() @@ -39,7 +86,11 @@ def test_B0Calc_inputs(): def test_B0Calc_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = B0Calc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py index 8941e775c6..2424307165 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py +++ b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py @@ -4,39 +4,114 @@ def test_BEDPOSTX5_inputs(): input_map = dict( - all_ard=dict(argstr="--allard", xor=("no_ard", "all_ard"),), - args=dict(argstr="%s",), - burn_in=dict(argstr="-b %d", usedefault=True,), - burn_in_no_ard=dict(argstr="--burnin_noard=%d", usedefault=True,), - bvals=dict(extensions=None, mandatory=True,), - bvecs=dict(extensions=None, mandatory=True,), + all_ard=dict( + argstr="--allard", + xor=("no_ard", "all_ard"), + ), + args=dict( + argstr="%s", + ), + burn_in=dict( + argstr="-b %d", + usedefault=True, + ), + burn_in_no_ard=dict( + argstr="--burnin_noard=%d", + usedefault=True, + ), + bvals=dict( + extensions=None, + mandatory=True, + ), + bvecs=dict( + extensions=None, + mandatory=True, + ), cnlinear=dict( - argstr="--cnonlinear", xor=("no_spat", "non_linear", "cnlinear"), - ), - dwi=dict(extensions=None, mandatory=True,), - environ=dict(nohash=True, usedefault=True,), - f0_ard=dict(argstr="--f0 --ardf0", xor=["f0_noard", "f0_ard", "all_ard"],), - f0_noard=dict(argstr="--f0", xor=["f0_noard", "f0_ard"],), - force_dir=dict(argstr="--forcedir", usedefault=True,), - fudge=dict(argstr="-w %d",), - grad_dev=dict(extensions=None,), - gradnonlin=dict(argstr="-g",), - logdir=dict(argstr="--logdir=%s",), - mask=dict(extensions=None, mandatory=True,), - model=dict(argstr="-model %d",), - n_fibres=dict(argstr="-n %d", mandatory=True, usedefault=True,), - n_jumps=dict(argstr="-j %d", usedefault=True,), - no_ard=dict(argstr="--noard", xor=("no_ard", "all_ard"),), - no_spat=dict(argstr="--nospat", xor=("no_spat", "non_linear", "cnlinear"),), + argstr="--cnonlinear", + xor=("no_spat", "non_linear", "cnlinear"), + ), + dwi=dict( + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + f0_ard=dict( + argstr="--f0 --ardf0", + xor=["f0_noard", "f0_ard", "all_ard"], + ), + f0_noard=dict( + argstr="--f0", + xor=["f0_noard", "f0_ard"], + ), + force_dir=dict( + argstr="--forcedir", + usedefault=True, + ), + fudge=dict( + argstr="-w %d", + ), + grad_dev=dict( + extensions=None, + ), + gradnonlin=dict( + argstr="-g", + ), + logdir=dict( + argstr="--logdir=%s", + ), + mask=dict( + extensions=None, + mandatory=True, + ), + model=dict( + argstr="-model %d", + ), + n_fibres=dict( + argstr="-n %d", + mandatory=True, + usedefault=True, + ), + n_jumps=dict( + argstr="-j %d", + usedefault=True, + ), + no_ard=dict( + argstr="--noard", + xor=("no_ard", "all_ard"), + ), + no_spat=dict( + argstr="--nospat", + xor=("no_spat", "non_linear", "cnlinear"), + ), non_linear=dict( - argstr="--nonlinear", xor=("no_spat", "non_linear", "cnlinear"), + argstr="--nonlinear", + xor=("no_spat", "non_linear", "cnlinear"), + ), + out_dir=dict( + argstr="%s", + mandatory=True, + position=1, + usedefault=True, ), - out_dir=dict(argstr="%s", mandatory=True, position=1, usedefault=True,), output_type=dict(), - rician=dict(argstr="--rician",), - sample_every=dict(argstr="-s %d", usedefault=True,), - seed=dict(argstr="--seed=%d",), - update_proposal_every=dict(argstr="--updateproposalevery=%d", usedefault=True,), + rician=dict( + argstr="--rician", + ), + sample_every=dict( + argstr="-s %d", + usedefault=True, + ), + seed=dict( + argstr="--seed=%d", + ), + update_proposal_every=dict( + argstr="--updateproposalevery=%d", + usedefault=True, + ), use_gpu=dict(), ) inputs = BEDPOSTX5.input_spec() @@ -50,8 +125,12 @@ def test_BEDPOSTX5_outputs(): output_map = dict( dyads=dict(), dyads_dispersion=dict(), - mean_S0samples=dict(extensions=None,), - mean_dsamples=dict(extensions=None,), + mean_S0samples=dict( + extensions=None, + ), + mean_dsamples=dict( + extensions=None, + ), mean_fsamples=dict(), mean_phsamples=dict(), mean_thsamples=dict(), diff --git a/nipype/interfaces/fsl/tests/test_auto_BET.py b/nipype/interfaces/fsl/tests/test_auto_BET.py index 8d06376d94..9d1b18846d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BET.py +++ b/nipype/interfaces/fsl/tests/test_auto_BET.py @@ -4,10 +4,20 @@ def test_BET_inputs(): input_map = dict( - args=dict(argstr="%s",), - center=dict(argstr="-c %s", units="voxels",), - environ=dict(nohash=True, usedefault=True,), - frac=dict(argstr="-f %.2f",), + args=dict( + argstr="%s", + ), + center=dict( + argstr="-c %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + frac=dict( + argstr="-f %.2f", + ), functional=dict( argstr="-F", xor=( @@ -20,14 +30,31 @@ def test_BET_inputs(): "t2_guided", ), ), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), - mask=dict(argstr="-m",), - mesh=dict(argstr="-e",), - no_output=dict(argstr="-n",), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), + mask=dict( + argstr="-m", + ), + mesh=dict( + argstr="-e", + ), + no_output=dict( + argstr="-n", + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=1, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=1, + ), + outline=dict( + argstr="-o", ), - outline=dict(argstr="-o",), output_type=dict(), padding=dict( argstr="-Z", @@ -41,7 +68,10 @@ def test_BET_inputs(): "t2_guided", ), ), - radius=dict(argstr="-r %d", units="mm",), + radius=dict( + argstr="-r %d", + units="mm", + ), reduce_bias=dict( argstr="-B", xor=( @@ -78,7 +108,9 @@ def test_BET_inputs(): "t2_guided", ), ), - skull=dict(argstr="-s",), + skull=dict( + argstr="-s", + ), surfaces=dict( argstr="-A", xor=( @@ -104,8 +136,12 @@ def test_BET_inputs(): "t2_guided", ), ), - threshold=dict(argstr="-t",), - vertical_gradient=dict(argstr="-g %.2f",), + threshold=dict( + argstr="-t", + ), + vertical_gradient=dict( + argstr="-g %.2f", + ), ) inputs = BET.input_spec() @@ -116,18 +152,42 @@ def test_BET_inputs(): def test_BET_outputs(): output_map = dict( - inskull_mask_file=dict(extensions=None,), - inskull_mesh_file=dict(extensions=None,), - mask_file=dict(extensions=None,), - meshfile=dict(extensions=None,), - out_file=dict(extensions=None,), - outline_file=dict(extensions=None,), - outskin_mask_file=dict(extensions=None,), - outskin_mesh_file=dict(extensions=None,), - outskull_mask_file=dict(extensions=None,), - outskull_mesh_file=dict(extensions=None,), - skull_file=dict(extensions=None,), - skull_mask_file=dict(extensions=None,), + inskull_mask_file=dict( + extensions=None, + ), + inskull_mesh_file=dict( + extensions=None, + ), + mask_file=dict( + extensions=None, + ), + meshfile=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + outline_file=dict( + extensions=None, + ), + outskin_mask_file=dict( + extensions=None, + ), + outskin_mesh_file=dict( + extensions=None, + ), + outskull_mask_file=dict( + extensions=None, + ), + outskull_mesh_file=dict( + extensions=None, + ), + skull_file=dict( + extensions=None, + ), + skull_mask_file=dict( + extensions=None, + ), ) outputs = BET.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py index 685058f2d4..405cd592f6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py @@ -4,11 +4,27 @@ def test_BinaryMaths_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - nan2zeros=dict(argstr="-nan", position=3,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), operand_file=dict( argstr="%s", extensions=None, @@ -17,13 +33,27 @@ def test_BinaryMaths_inputs(): xor=["operand_value"], ), operand_value=dict( - argstr="%.8f", mandatory=True, position=5, xor=["operand_file"], + argstr="%.8f", + mandatory=True, + position=5, + xor=["operand_file"], + ), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, ), - operation=dict(argstr="-%s", mandatory=True, position=4,), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = BinaryMaths.input_spec() @@ -34,7 +64,11 @@ def test_BinaryMaths_inputs(): def test_BinaryMaths_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BinaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py index 9dfe5ed83f..61f27be950 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py +++ b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py @@ -4,15 +4,39 @@ def test_ChangeDataType_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - nan2zeros=dict(argstr="-nan", position=3,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + mandatory=True, + position=-1, ), - output_datatype=dict(argstr="-odt %s", mandatory=True, position=-1,), output_type=dict(), ) inputs = ChangeDataType.input_spec() @@ -23,7 +47,11 @@ def test_ChangeDataType_inputs(): def test_ChangeDataType_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ChangeDataType.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Classifier.py b/nipype/interfaces/fsl/tests/test_auto_Classifier.py index e81bb72096..44fde8ae1a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Classifier.py +++ b/nipype/interfaces/fsl/tests/test_auto_Classifier.py @@ -4,13 +4,32 @@ def test_Classifier_inputs(): input_map = dict( - args=dict(argstr="%s",), - artifacts_list_file=dict(extensions=None,), - environ=dict(nohash=True, usedefault=True,), - mel_ica=dict(argstr="%s", copyfile=False, position=1,), - thresh=dict(argstr="%d", mandatory=True, position=-1,), + args=dict( + argstr="%s", + ), + artifacts_list_file=dict( + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + mel_ica=dict( + argstr="%s", + copyfile=False, + position=1, + ), + thresh=dict( + argstr="%d", + mandatory=True, + position=-1, + ), trained_wts_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=2, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=2, ), ) inputs = Classifier.input_spec() @@ -21,7 +40,11 @@ def test_Classifier_inputs(): def test_Classifier_outputs(): - output_map = dict(artifacts_list_file=dict(extensions=None,),) + output_map = dict( + artifacts_list_file=dict( + extensions=None, + ), + ) outputs = Classifier.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py index d5462a23d8..55e6851603 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py @@ -4,17 +4,47 @@ def test_Cleaner_inputs(): input_map = dict( - aggressive=dict(argstr="-A", position=3,), - args=dict(argstr="%s",), + aggressive=dict( + argstr="-A", + position=3, + ), + args=dict( + argstr="%s", + ), artifacts_list_file=dict( - argstr="%s", extensions=None, mandatory=True, position=1, - ), - cleanup_motion=dict(argstr="-m", position=2,), - confound_file=dict(argstr="-x %s", extensions=None, position=4,), - confound_file_1=dict(argstr="-x %s", extensions=None, position=5,), - confound_file_2=dict(argstr="-x %s", extensions=None, position=6,), - environ=dict(nohash=True, usedefault=True,), - highpass=dict(argstr="-m -h %f", position=2, usedefault=True,), + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + cleanup_motion=dict( + argstr="-m", + position=2, + ), + confound_file=dict( + argstr="-x %s", + extensions=None, + position=4, + ), + confound_file_1=dict( + argstr="-x %s", + extensions=None, + position=5, + ), + confound_file_2=dict( + argstr="-x %s", + extensions=None, + position=6, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + highpass=dict( + argstr="-m -h %f", + position=2, + usedefault=True, + ), ) inputs = Cleaner.input_spec() @@ -24,7 +54,11 @@ def test_Cleaner_inputs(): def test_Cleaner_outputs(): - output_map = dict(cleaned_functional_file=dict(extensions=None,),) + output_map = dict( + cleaned_functional_file=dict( + extensions=None, + ), + ) outputs = Cleaner.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Cluster.py b/nipype/interfaces/fsl/tests/test_auto_Cluster.py index dee38d4875..ba4dfc8ae3 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cluster.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cluster.py @@ -4,34 +4,110 @@ def test_Cluster_inputs(): input_map = dict( - args=dict(argstr="%s",), - connectivity=dict(argstr="--connectivity=%d",), - cope_file=dict(argstr="--cope=%s", extensions=None,), - dlh=dict(argstr="--dlh=%.10f",), - environ=dict(nohash=True, usedefault=True,), - find_min=dict(argstr="--min", usedefault=True,), - fractional=dict(argstr="--fractional", usedefault=True,), - in_file=dict(argstr="--in=%s", extensions=None, mandatory=True,), - minclustersize=dict(argstr="--minclustersize", usedefault=True,), - no_table=dict(argstr="--no_table", usedefault=True,), - num_maxima=dict(argstr="--num=%d",), - out_index_file=dict(argstr="--oindex=%s", hash_files=False,), - out_localmax_txt_file=dict(argstr="--olmax=%s", hash_files=False,), - out_localmax_vol_file=dict(argstr="--olmaxim=%s", hash_files=False,), - out_max_file=dict(argstr="--omax=%s", hash_files=False,), - out_mean_file=dict(argstr="--omean=%s", hash_files=False,), - out_pval_file=dict(argstr="--opvals=%s", hash_files=False,), - out_size_file=dict(argstr="--osize=%s", hash_files=False,), - out_threshold_file=dict(argstr="--othresh=%s", hash_files=False,), + args=dict( + argstr="%s", + ), + connectivity=dict( + argstr="--connectivity=%d", + ), + cope_file=dict( + argstr="--cope=%s", + extensions=None, + ), + dlh=dict( + argstr="--dlh=%.10f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + find_min=dict( + argstr="--min", + usedefault=True, + ), + fractional=dict( + argstr="--fractional", + usedefault=True, + ), + in_file=dict( + argstr="--in=%s", + extensions=None, + mandatory=True, + ), + minclustersize=dict( + argstr="--minclustersize", + usedefault=True, + ), + no_table=dict( + argstr="--no_table", + usedefault=True, + ), + num_maxima=dict( + argstr="--num=%d", + ), + out_index_file=dict( + argstr="--oindex=%s", + hash_files=False, + ), + out_localmax_txt_file=dict( + argstr="--olmax=%s", + hash_files=False, + ), + out_localmax_vol_file=dict( + argstr="--olmaxim=%s", + hash_files=False, + ), + out_max_file=dict( + argstr="--omax=%s", + hash_files=False, + ), + out_mean_file=dict( + argstr="--omean=%s", + hash_files=False, + ), + out_pval_file=dict( + argstr="--opvals=%s", + hash_files=False, + ), + out_size_file=dict( + argstr="--osize=%s", + hash_files=False, + ), + out_threshold_file=dict( + argstr="--othresh=%s", + hash_files=False, + ), output_type=dict(), - peak_distance=dict(argstr="--peakdist=%.10f",), - pthreshold=dict(argstr="--pthresh=%.10f", requires=["dlh", "volume"],), - std_space_file=dict(argstr="--stdvol=%s", extensions=None,), - threshold=dict(argstr="--thresh=%.10f", mandatory=True,), - use_mm=dict(argstr="--mm", usedefault=True,), - volume=dict(argstr="--volume=%d",), - warpfield_file=dict(argstr="--warpvol=%s", extensions=None,), - xfm_file=dict(argstr="--xfm=%s", extensions=None,), + peak_distance=dict( + argstr="--peakdist=%.10f", + ), + pthreshold=dict( + argstr="--pthresh=%.10f", + requires=["dlh", "volume"], + ), + std_space_file=dict( + argstr="--stdvol=%s", + extensions=None, + ), + threshold=dict( + argstr="--thresh=%.10f", + mandatory=True, + ), + use_mm=dict( + argstr="--mm", + usedefault=True, + ), + volume=dict( + argstr="--volume=%d", + ), + warpfield_file=dict( + argstr="--warpvol=%s", + extensions=None, + ), + xfm_file=dict( + argstr="--xfm=%s", + extensions=None, + ), ) inputs = Cluster.input_spec() @@ -42,14 +118,30 @@ def test_Cluster_inputs(): def test_Cluster_outputs(): output_map = dict( - index_file=dict(extensions=None,), - localmax_txt_file=dict(extensions=None,), - localmax_vol_file=dict(extensions=None,), - max_file=dict(extensions=None,), - mean_file=dict(extensions=None,), - pval_file=dict(extensions=None,), - size_file=dict(extensions=None,), - threshold_file=dict(extensions=None,), + index_file=dict( + extensions=None, + ), + localmax_txt_file=dict( + extensions=None, + ), + localmax_vol_file=dict( + extensions=None, + ), + max_file=dict( + extensions=None, + ), + mean_file=dict( + extensions=None, + ), + pval_file=dict( + extensions=None, + ), + size_file=dict( + extensions=None, + ), + threshold_file=dict( + extensions=None, + ), ) outputs = Cluster.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Complex.py b/nipype/interfaces/fsl/tests/test_auto_Complex.py index ac631cde80..80d6f20fe6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Complex.py +++ b/nipype/interfaces/fsl/tests/test_auto_Complex.py @@ -4,7 +4,9 @@ def test_Complex_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), complex_cartesian=dict( argstr="-complex", position=1, @@ -17,8 +19,16 @@ def test_Complex_inputs(): "complex_merge", ], ), - complex_in_file=dict(argstr="%s", extensions=None, position=2,), - complex_in_file2=dict(argstr="%s", extensions=None, position=3,), + complex_in_file=dict( + argstr="%s", + extensions=None, + position=2, + ), + complex_in_file2=dict( + argstr="%s", + extensions=None, + position=3, + ), complex_merge=dict( argstr="-complexmerge", position=1, @@ -72,9 +82,19 @@ def test_Complex_inputs(): "complex_merge", ], ), - end_vol=dict(argstr="%d", position=-1,), - environ=dict(nohash=True, usedefault=True,), - imaginary_in_file=dict(argstr="%s", extensions=None, position=3,), + end_vol=dict( + argstr="%d", + position=-1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + imaginary_in_file=dict( + argstr="%s", + extensions=None, + position=3, + ), imaginary_out_file=dict( argstr="%s", extensions=None, @@ -91,7 +111,11 @@ def test_Complex_inputs(): "complex_merge", ], ), - magnitude_in_file=dict(argstr="%s", extensions=None, position=2,), + magnitude_in_file=dict( + argstr="%s", + extensions=None, + position=2, + ), magnitude_out_file=dict( argstr="%s", extensions=None, @@ -109,7 +133,11 @@ def test_Complex_inputs(): ], ), output_type=dict(), - phase_in_file=dict(argstr="%s", extensions=None, position=3,), + phase_in_file=dict( + argstr="%s", + extensions=None, + position=3, + ), phase_out_file=dict( argstr="%s", extensions=None, @@ -138,7 +166,11 @@ def test_Complex_inputs(): "complex_merge", ], ), - real_in_file=dict(argstr="%s", extensions=None, position=2,), + real_in_file=dict( + argstr="%s", + extensions=None, + position=2, + ), real_out_file=dict( argstr="%s", extensions=None, @@ -167,7 +199,10 @@ def test_Complex_inputs(): "complex_merge", ], ), - start_vol=dict(argstr="%d", position=-2,), + start_vol=dict( + argstr="%d", + position=-2, + ), ) inputs = Complex.input_spec() @@ -178,11 +213,21 @@ def test_Complex_inputs(): def test_Complex_outputs(): output_map = dict( - complex_out_file=dict(extensions=None,), - imaginary_out_file=dict(extensions=None,), - magnitude_out_file=dict(extensions=None,), - phase_out_file=dict(extensions=None,), - real_out_file=dict(extensions=None,), + complex_out_file=dict( + extensions=None, + ), + imaginary_out_file=dict( + extensions=None, + ), + magnitude_out_file=dict( + extensions=None, + ), + phase_out_file=dict( + extensions=None, + ), + real_out_file=dict( + extensions=None, + ), ) outputs = Complex.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py index 185c089889..35e5bd43a7 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py +++ b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py @@ -4,19 +4,53 @@ def test_ContrastMgr_inputs(): input_map = dict( - args=dict(argstr="%s",), - contrast_num=dict(argstr="-cope",), - corrections=dict(copyfile=False, extensions=None, mandatory=True,), - dof_file=dict(argstr="", copyfile=False, extensions=None, mandatory=True,), - environ=dict(nohash=True, usedefault=True,), - fcon_file=dict(argstr="-f %s", extensions=None,), + args=dict( + argstr="%s", + ), + contrast_num=dict( + argstr="-cope", + ), + corrections=dict( + copyfile=False, + extensions=None, + mandatory=True, + ), + dof_file=dict( + argstr="", + copyfile=False, + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fcon_file=dict( + argstr="-f %s", + extensions=None, + ), output_type=dict(), - param_estimates=dict(argstr="", copyfile=False, mandatory=True,), + param_estimates=dict( + argstr="", + copyfile=False, + mandatory=True, + ), sigmasquareds=dict( - argstr="", copyfile=False, extensions=None, mandatory=True, position=-2, + argstr="", + copyfile=False, + extensions=None, + mandatory=True, + position=-2, + ), + suffix=dict( + argstr="-suffix %s", + ), + tcon_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, ), - suffix=dict(argstr="-suffix %s",), - tcon_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), ) inputs = ContrastMgr.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py index dcfc562258..7ae7f7471b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py @@ -4,14 +4,34 @@ def test_ConvertWarp_inputs(): input_map = dict( - abswarp=dict(argstr="--abs", xor=["relwarp"],), - args=dict(argstr="%s",), - cons_jacobian=dict(argstr="--constrainj",), - environ=dict(nohash=True, usedefault=True,), - jacobian_max=dict(argstr="--jmax=%f",), - jacobian_min=dict(argstr="--jmin=%f",), - midmat=dict(argstr="--midmat=%s", extensions=None,), - out_abswarp=dict(argstr="--absout", xor=["out_relwarp"],), + abswarp=dict( + argstr="--abs", + xor=["relwarp"], + ), + args=dict( + argstr="%s", + ), + cons_jacobian=dict( + argstr="--constrainj", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + jacobian_max=dict( + argstr="--jmax=%f", + ), + jacobian_min=dict( + argstr="--jmin=%f", + ), + midmat=dict( + argstr="--midmat=%s", + extensions=None, + ), + out_abswarp=dict( + argstr="--absout", + xor=["out_relwarp"], + ), out_file=dict( argstr="--out=%s", extensions=None, @@ -20,16 +40,45 @@ def test_ConvertWarp_inputs(): output_name="out_file", position=-1, ), - out_relwarp=dict(argstr="--relout", xor=["out_abswarp"],), + out_relwarp=dict( + argstr="--relout", + xor=["out_abswarp"], + ), output_type=dict(), - postmat=dict(argstr="--postmat=%s", extensions=None,), - premat=dict(argstr="--premat=%s", extensions=None,), - reference=dict(argstr="--ref=%s", extensions=None, mandatory=True, position=1,), - relwarp=dict(argstr="--rel", xor=["abswarp"],), - shift_direction=dict(argstr="--shiftdir=%s", requires=["shift_in_file"],), - shift_in_file=dict(argstr="--shiftmap=%s", extensions=None,), - warp1=dict(argstr="--warp1=%s", extensions=None,), - warp2=dict(argstr="--warp2=%s", extensions=None,), + postmat=dict( + argstr="--postmat=%s", + extensions=None, + ), + premat=dict( + argstr="--premat=%s", + extensions=None, + ), + reference=dict( + argstr="--ref=%s", + extensions=None, + mandatory=True, + position=1, + ), + relwarp=dict( + argstr="--rel", + xor=["abswarp"], + ), + shift_direction=dict( + argstr="--shiftdir=%s", + requires=["shift_in_file"], + ), + shift_in_file=dict( + argstr="--shiftmap=%s", + extensions=None, + ), + warp1=dict( + argstr="--warp1=%s", + extensions=None, + ), + warp2=dict( + argstr="--warp2=%s", + extensions=None, + ), ) inputs = ConvertWarp.input_spec() @@ -39,7 +88,11 @@ def test_ConvertWarp_inputs(): def test_ConvertWarp_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ConvertWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py index b4311d0d37..60e1d7553b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py @@ -4,22 +4,36 @@ def test_ConvertXFM_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), concat_xfm=dict( argstr="-concat", position=-3, requires=["in_file2"], xor=["invert_xfm", "concat_xfm", "fix_scale_skew"], ), - environ=dict(nohash=True, usedefault=True,), + environ=dict( + nohash=True, + usedefault=True, + ), fix_scale_skew=dict( argstr="-fixscaleskew", position=-3, requires=["in_file2"], xor=["invert_xfm", "concat_xfm", "fix_scale_skew"], ), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), - in_file2=dict(argstr="%s", extensions=None, position=-2,), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + in_file2=dict( + argstr="%s", + extensions=None, + position=-2, + ), invert_xfm=dict( argstr="-inverse", position=-3, @@ -42,7 +56,11 @@ def test_ConvertXFM_inputs(): def test_ConvertXFM_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ConvertXFM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py index ade301a00e..6f15d47bfc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py +++ b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py @@ -4,7 +4,9 @@ def test_CopyGeom_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), dest_file=dict( argstr="%s", copyfile=True, @@ -15,9 +17,20 @@ def test_CopyGeom_inputs(): output_name="out_file", position=1, ), - environ=dict(nohash=True, usedefault=True,), - ignore_dims=dict(argstr="-d", position="-1",), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + environ=dict( + nohash=True, + usedefault=True, + ), + ignore_dims=dict( + argstr="-d", + position="-1", + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), output_type=dict(), ) inputs = CopyGeom.input_spec() @@ -28,7 +41,11 @@ def test_CopyGeom_inputs(): def test_CopyGeom_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = CopyGeom.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_DTIFit.py b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py index a0cf704d87..546ffa6848 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py @@ -4,25 +4,78 @@ def test_DTIFit_inputs(): input_map = dict( - args=dict(argstr="%s",), - base_name=dict(argstr="-o %s", position=1, usedefault=True,), - bvals=dict(argstr="-b %s", extensions=None, mandatory=True, position=4,), - bvecs=dict(argstr="-r %s", extensions=None, mandatory=True, position=3,), - cni=dict(argstr="--cni=%s", extensions=None,), - dwi=dict(argstr="-k %s", extensions=None, mandatory=True, position=0,), - environ=dict(nohash=True, usedefault=True,), - gradnonlin=dict(argstr="--gradnonlin=%s", extensions=None,), - little_bit=dict(argstr="--littlebit",), - mask=dict(argstr="-m %s", extensions=None, mandatory=True, position=2,), - max_x=dict(argstr="-X %d",), - max_y=dict(argstr="-Y %d",), - max_z=dict(argstr="-Z %d",), - min_x=dict(argstr="-x %d",), - min_y=dict(argstr="-y %d",), - min_z=dict(argstr="-z %d",), + args=dict( + argstr="%s", + ), + base_name=dict( + argstr="-o %s", + position=1, + usedefault=True, + ), + bvals=dict( + argstr="-b %s", + extensions=None, + mandatory=True, + position=4, + ), + bvecs=dict( + argstr="-r %s", + extensions=None, + mandatory=True, + position=3, + ), + cni=dict( + argstr="--cni=%s", + extensions=None, + ), + dwi=dict( + argstr="-k %s", + extensions=None, + mandatory=True, + position=0, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gradnonlin=dict( + argstr="--gradnonlin=%s", + extensions=None, + ), + little_bit=dict( + argstr="--littlebit", + ), + mask=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + position=2, + ), + max_x=dict( + argstr="-X %d", + ), + max_y=dict( + argstr="-Y %d", + ), + max_z=dict( + argstr="-Z %d", + ), + min_x=dict( + argstr="-x %d", + ), + min_y=dict( + argstr="-y %d", + ), + min_z=dict( + argstr="-z %d", + ), output_type=dict(), - save_tensor=dict(argstr="--save_tensor",), - sse=dict(argstr="--sse",), + save_tensor=dict( + argstr="--save_tensor", + ), + sse=dict( + argstr="--sse", + ), ) inputs = DTIFit.input_spec() @@ -33,18 +86,42 @@ def test_DTIFit_inputs(): def test_DTIFit_outputs(): output_map = dict( - FA=dict(extensions=None,), - L1=dict(extensions=None,), - L2=dict(extensions=None,), - L3=dict(extensions=None,), - MD=dict(extensions=None,), - MO=dict(extensions=None,), - S0=dict(extensions=None,), - V1=dict(extensions=None,), - V2=dict(extensions=None,), - V3=dict(extensions=None,), - sse=dict(extensions=None,), - tensor=dict(extensions=None,), + FA=dict( + extensions=None, + ), + L1=dict( + extensions=None, + ), + L2=dict( + extensions=None, + ), + L3=dict( + extensions=None, + ), + MD=dict( + extensions=None, + ), + MO=dict( + extensions=None, + ), + S0=dict( + extensions=None, + ), + V1=dict( + extensions=None, + ), + V2=dict( + extensions=None, + ), + V3=dict( + extensions=None, + ), + sse=dict( + extensions=None, + ), + tensor=dict( + extensions=None, + ), ) outputs = DTIFit.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_DilateImage.py b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py index 4a9a49eb45..ac204e5a38 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DilateImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py @@ -4,21 +4,58 @@ def test_DilateImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), kernel_file=dict( - argstr="%s", extensions=None, position=5, xor=["kernel_size"], + argstr="%s", + extensions=None, + position=5, + xor=["kernel_size"], + ), + kernel_shape=dict( + argstr="-kernel %s", + position=4, + ), + kernel_size=dict( + argstr="%.4f", + position=5, + xor=["kernel_file"], + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), + operation=dict( + argstr="-dil%s", + mandatory=True, + position=6, ), - kernel_shape=dict(argstr="-kernel %s", position=4,), - kernel_size=dict(argstr="%.4f", position=5, xor=["kernel_file"],), - nan2zeros=dict(argstr="-nan", position=3,), - operation=dict(argstr="-dil%s", mandatory=True, position=6,), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = DilateImage.input_spec() @@ -29,7 +66,11 @@ def test_DilateImage_inputs(): def test_DilateImage_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = DilateImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py index bd2134ec5d..74ea024917 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py +++ b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py @@ -4,15 +4,35 @@ def test_DistanceMap_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), distance_map=dict( - argstr="--out=%s", extensions=None, genfile=True, hash_files=False, + argstr="--out=%s", + extensions=None, + genfile=True, + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="--in=%s", + extensions=None, + mandatory=True, + ), + invert_input=dict( + argstr="--invert", + ), + local_max_file=dict( + argstr="--localmax=%s", + hash_files=False, + ), + mask_file=dict( + argstr="--mask=%s", + extensions=None, ), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="--in=%s", extensions=None, mandatory=True,), - invert_input=dict(argstr="--invert",), - local_max_file=dict(argstr="--localmax=%s", hash_files=False,), - mask_file=dict(argstr="--mask=%s", extensions=None,), output_type=dict(), ) inputs = DistanceMap.input_spec() @@ -24,7 +44,12 @@ def test_DistanceMap_inputs(): def test_DistanceMap_outputs(): output_map = dict( - distance_map=dict(extensions=None,), local_max_file=dict(extensions=None,), + distance_map=dict( + extensions=None, + ), + local_max_file=dict( + extensions=None, + ), ) outputs = DistanceMap.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_DualRegression.py b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py index 99e6a0f23d..4b3d8f6851 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DualRegression.py +++ b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py @@ -4,18 +4,55 @@ def test_DualRegression_inputs(): input_map = dict( - args=dict(argstr="%s",), - con_file=dict(argstr="%s", extensions=None, position=4,), - des_norm=dict(argstr="%i", position=2, usedefault=True,), - design_file=dict(argstr="%s", extensions=None, position=3,), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + con_file=dict( + argstr="%s", + extensions=None, + position=4, + ), + des_norm=dict( + argstr="%i", + position=2, + usedefault=True, + ), + design_file=dict( + argstr="%s", + extensions=None, + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), group_IC_maps_4D=dict( - argstr="%s", extensions=None, mandatory=True, position=1, + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=-1, + sep=" ", + ), + n_perm=dict( + argstr="%i", + mandatory=True, + position=5, + ), + one_sample_group_mean=dict( + argstr="-1", + position=3, + ), + out_dir=dict( + argstr="%s", + genfile=True, + position=6, + usedefault=True, ), - in_files=dict(argstr="%s", mandatory=True, position=-1, sep=" ",), - n_perm=dict(argstr="%i", mandatory=True, position=5,), - one_sample_group_mean=dict(argstr="-1", position=3,), - out_dir=dict(argstr="%s", genfile=True, position=6, usedefault=True,), output_type=dict(), ) inputs = DualRegression.input_spec() @@ -26,7 +63,9 @@ def test_DualRegression_inputs(): def test_DualRegression_outputs(): - output_map = dict(out_dir=dict(),) + output_map = dict( + out_dir=dict(), + ) outputs = DualRegression.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py index 39476ab324..0462fa9cbe 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py @@ -4,22 +4,68 @@ def test_EPIDeWarp_inputs(): input_map = dict( - args=dict(argstr="%s",), - cleanup=dict(argstr="--cleanup",), - dph_file=dict(argstr="--dph %s", extensions=None, mandatory=True,), - environ=dict(nohash=True, usedefault=True,), - epi_file=dict(argstr="--epi %s", extensions=None,), - epidw=dict(argstr="--epidw %s", genfile=False,), - esp=dict(argstr="--esp %s", usedefault=True,), - exf_file=dict(argstr="--exf %s", extensions=None,), - exfdw=dict(argstr="--exfdw %s", genfile=True,), - mag_file=dict(argstr="--mag %s", extensions=None, mandatory=True, position=0,), - nocleanup=dict(argstr="--nocleanup", usedefault=True,), + args=dict( + argstr="%s", + ), + cleanup=dict( + argstr="--cleanup", + ), + dph_file=dict( + argstr="--dph %s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi_file=dict( + argstr="--epi %s", + extensions=None, + ), + epidw=dict( + argstr="--epidw %s", + genfile=False, + ), + esp=dict( + argstr="--esp %s", + usedefault=True, + ), + exf_file=dict( + argstr="--exf %s", + extensions=None, + ), + exfdw=dict( + argstr="--exfdw %s", + genfile=True, + ), + mag_file=dict( + argstr="--mag %s", + extensions=None, + mandatory=True, + position=0, + ), + nocleanup=dict( + argstr="--nocleanup", + usedefault=True, + ), output_type=dict(), - sigma=dict(argstr="--sigma %s", usedefault=True,), - tediff=dict(argstr="--tediff %s", usedefault=True,), - tmpdir=dict(argstr="--tmpdir %s", genfile=True,), - vsm=dict(argstr="--vsm %s", genfile=True,), + sigma=dict( + argstr="--sigma %s", + usedefault=True, + ), + tediff=dict( + argstr="--tediff %s", + usedefault=True, + ), + tmpdir=dict( + argstr="--tmpdir %s", + genfile=True, + ), + vsm=dict( + argstr="--vsm %s", + genfile=True, + ), ) inputs = EPIDeWarp.input_spec() @@ -30,10 +76,18 @@ def test_EPIDeWarp_inputs(): def test_EPIDeWarp_outputs(): output_map = dict( - exf_mask=dict(extensions=None,), - exfdw=dict(extensions=None,), - unwarped_file=dict(extensions=None,), - vsm_file=dict(extensions=None,), + exf_mask=dict( + extensions=None, + ), + exfdw=dict( + extensions=None, + ), + unwarped_file=dict( + extensions=None, + ), + vsm_file=dict( + extensions=None, + ), ) outputs = EPIDeWarp.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Eddy.py b/nipype/interfaces/fsl/tests/test_auto_Eddy.py index 4a2d245a23..0005085474 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Eddy.py +++ b/nipype/interfaces/fsl/tests/test_auto_Eddy.py @@ -4,33 +4,99 @@ def test_Eddy_inputs(): input_map = dict( - args=dict(argstr="%s",), - cnr_maps=dict(argstr="--cnr_maps", min_ver="5.0.10",), - dont_peas=dict(argstr="--dont_peas",), - dont_sep_offs_move=dict(argstr="--dont_sep_offs_move",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + cnr_maps=dict( + argstr="--cnr_maps", + min_ver="5.0.10", + ), + dont_peas=dict( + argstr="--dont_peas", + ), + dont_sep_offs_move=dict( + argstr="--dont_sep_offs_move", + ), + environ=dict( + nohash=True, + usedefault=True, + ), estimate_move_by_susceptibility=dict( - argstr="--estimate_move_by_susceptibility", min_ver="6.0.1", - ), - fep=dict(argstr="--fep",), - field=dict(argstr="--field=%s", extensions=None,), - field_mat=dict(argstr="--field_mat=%s", extensions=None,), - flm=dict(argstr="--flm=%s", usedefault=True,), - fudge_factor=dict(argstr="--ff=%s", usedefault=True,), - fwhm=dict(argstr="--fwhm=%s",), - in_acqp=dict(argstr="--acqp=%s", extensions=None, mandatory=True,), - in_bval=dict(argstr="--bvals=%s", extensions=None, mandatory=True,), - in_bvec=dict(argstr="--bvecs=%s", extensions=None, mandatory=True,), - in_file=dict(argstr="--imain=%s", extensions=None, mandatory=True,), - in_index=dict(argstr="--index=%s", extensions=None, mandatory=True,), - in_mask=dict(argstr="--mask=%s", extensions=None, mandatory=True,), + argstr="--estimate_move_by_susceptibility", + min_ver="6.0.1", + ), + fep=dict( + argstr="--fep", + ), + field=dict( + argstr="--field=%s", + extensions=None, + ), + field_mat=dict( + argstr="--field_mat=%s", + extensions=None, + ), + flm=dict( + argstr="--flm=%s", + usedefault=True, + ), + fudge_factor=dict( + argstr="--ff=%s", + usedefault=True, + ), + fwhm=dict( + argstr="--fwhm=%s", + ), + in_acqp=dict( + argstr="--acqp=%s", + extensions=None, + mandatory=True, + ), + in_bval=dict( + argstr="--bvals=%s", + extensions=None, + mandatory=True, + ), + in_bvec=dict( + argstr="--bvecs=%s", + extensions=None, + mandatory=True, + ), + in_file=dict( + argstr="--imain=%s", + extensions=None, + mandatory=True, + ), + in_index=dict( + argstr="--index=%s", + extensions=None, + mandatory=True, + ), + in_mask=dict( + argstr="--mask=%s", + extensions=None, + mandatory=True, + ), in_topup_fieldcoef=dict( - argstr="--topup=%s", extensions=None, requires=["in_topup_movpar"], + argstr="--topup=%s", + extensions=None, + requires=["in_topup_movpar"], + ), + in_topup_movpar=dict( + extensions=None, + requires=["in_topup_fieldcoef"], + ), + initrand=dict( + argstr="--initrand", + min_ver="5.0.10", + ), + interp=dict( + argstr="--interp=%s", + usedefault=True, + ), + is_shelled=dict( + argstr="--data_is_shelled", ), - in_topup_movpar=dict(extensions=None, requires=["in_topup_fieldcoef"],), - initrand=dict(argstr="--initrand", min_ver="5.0.10",), - interp=dict(argstr="--interp=%s", usedefault=True,), - is_shelled=dict(argstr="--data_is_shelled",), json=dict( argstr="--json=%s", min_ver="6.0.1", @@ -52,38 +118,102 @@ def test_Eddy_inputs(): min_ver="6.0.1", requires=["estimate_move_by_susceptibility"], ), - method=dict(argstr="--resamp=%s", usedefault=True,), - mporder=dict(argstr="--mporder=%s", min_ver="5.0.11", requires=["use_cuda"],), - multiband_factor=dict(argstr="--mb=%s", min_ver="5.0.10",), + method=dict( + argstr="--resamp=%s", + usedefault=True, + ), + mporder=dict( + argstr="--mporder=%s", + min_ver="5.0.11", + requires=["use_cuda"], + ), + multiband_factor=dict( + argstr="--mb=%s", + min_ver="5.0.10", + ), multiband_offset=dict( - argstr="--mb_offs=%d", min_ver="5.0.10", requires=["multiband_factor"], - ), - niter=dict(argstr="--niter=%s", usedefault=True,), - num_threads=dict(nohash=True, usedefault=True,), - nvoxhp=dict(argstr="--nvoxhp=%s", usedefault=True,), - out_base=dict(argstr="--out=%s", usedefault=True,), - outlier_nstd=dict(argstr="--ol_nstd", min_ver="5.0.10", requires=["repol"],), - outlier_nvox=dict(argstr="--ol_nvox", min_ver="5.0.10", requires=["repol"],), - outlier_pos=dict(argstr="--ol_pos", min_ver="5.0.10", requires=["repol"],), - outlier_sqr=dict(argstr="--ol_sqr", min_ver="5.0.10", requires=["repol"],), - outlier_type=dict(argstr="--ol_type", min_ver="5.0.10", requires=["repol"],), + argstr="--mb_offs=%d", + min_ver="5.0.10", + requires=["multiband_factor"], + ), + niter=dict( + argstr="--niter=%s", + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + nvoxhp=dict( + argstr="--nvoxhp=%s", + usedefault=True, + ), + out_base=dict( + argstr="--out=%s", + usedefault=True, + ), + outlier_nstd=dict( + argstr="--ol_nstd", + min_ver="5.0.10", + requires=["repol"], + ), + outlier_nvox=dict( + argstr="--ol_nvox", + min_ver="5.0.10", + requires=["repol"], + ), + outlier_pos=dict( + argstr="--ol_pos", + min_ver="5.0.10", + requires=["repol"], + ), + outlier_sqr=dict( + argstr="--ol_sqr", + min_ver="5.0.10", + requires=["repol"], + ), + outlier_type=dict( + argstr="--ol_type", + min_ver="5.0.10", + requires=["repol"], + ), output_type=dict(), - repol=dict(argstr="--repol",), - residuals=dict(argstr="--residuals", min_ver="5.0.10",), - session=dict(argstr="--session=%s", extensions=None,), + repol=dict( + argstr="--repol", + ), + residuals=dict( + argstr="--residuals", + min_ver="5.0.10", + ), + session=dict( + argstr="--session=%s", + extensions=None, + ), slice2vol_interp=dict( - argstr="--s2v_interp=%s", min_ver="5.0.11", requires=["mporder"], + argstr="--s2v_interp=%s", + min_ver="5.0.11", + requires=["mporder"], ), slice2vol_lambda=dict( - argstr="--s2v_lambda=%d", min_ver="5.0.11", requires=["mporder"], + argstr="--s2v_lambda=%d", + min_ver="5.0.11", + requires=["mporder"], ), slice2vol_niter=dict( - argstr="--s2v_niter=%d", min_ver="5.0.11", requires=["mporder"], + argstr="--s2v_niter=%d", + min_ver="5.0.11", + requires=["mporder"], ), slice_order=dict( - argstr="--slspec=%s", min_ver="5.0.11", requires=["mporder"], xor=["json"], + argstr="--slspec=%s", + min_ver="5.0.11", + requires=["mporder"], + xor=["json"], + ), + slm=dict( + argstr="--slm=%s", + usedefault=True, ), - slm=dict(argstr="--slm=%s", usedefault=True,), use_cuda=dict(), ) inputs = Eddy.input_spec() @@ -95,21 +225,51 @@ def test_Eddy_inputs(): def test_Eddy_outputs(): output_map = dict( - out_cnr_maps=dict(extensions=None,), - out_corrected=dict(extensions=None,), - out_movement_over_time=dict(extensions=None,), - out_movement_rms=dict(extensions=None,), - out_outlier_free=dict(extensions=None,), - out_outlier_map=dict(extensions=None,), - out_outlier_n_sqr_stdev_map=dict(extensions=None,), - out_outlier_n_stdev_map=dict(extensions=None,), - out_outlier_report=dict(extensions=None,), - out_parameter=dict(extensions=None,), - out_residuals=dict(extensions=None,), - out_restricted_movement_rms=dict(extensions=None,), - out_rotated_bvecs=dict(extensions=None,), - out_shell_alignment_parameters=dict(extensions=None,), - out_shell_pe_translation_parameters=dict(extensions=None,), + out_cnr_maps=dict( + extensions=None, + ), + out_corrected=dict( + extensions=None, + ), + out_movement_over_time=dict( + extensions=None, + ), + out_movement_rms=dict( + extensions=None, + ), + out_outlier_free=dict( + extensions=None, + ), + out_outlier_map=dict( + extensions=None, + ), + out_outlier_n_sqr_stdev_map=dict( + extensions=None, + ), + out_outlier_n_stdev_map=dict( + extensions=None, + ), + out_outlier_report=dict( + extensions=None, + ), + out_parameter=dict( + extensions=None, + ), + out_residuals=dict( + extensions=None, + ), + out_restricted_movement_rms=dict( + extensions=None, + ), + out_rotated_bvecs=dict( + extensions=None, + ), + out_shell_alignment_parameters=dict( + extensions=None, + ), + out_shell_pe_translation_parameters=dict( + extensions=None, + ), ) outputs = Eddy.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py index fbd2af30cb..e2ce1c0a3b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py +++ b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py @@ -4,9 +4,19 @@ def test_EddyCorrect_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), out_file=dict( argstr="%s", extensions=None, @@ -16,7 +26,12 @@ def test_EddyCorrect_inputs(): position=1, ), output_type=dict(), - ref_num=dict(argstr="%d", mandatory=True, position=2, usedefault=True,), + ref_num=dict( + argstr="%d", + mandatory=True, + position=2, + usedefault=True, + ), ) inputs = EddyCorrect.input_spec() @@ -26,7 +41,11 @@ def test_EddyCorrect_inputs(): def test_EddyCorrect_outputs(): - output_map = dict(eddy_corrected=dict(extensions=None,),) + output_map = dict( + eddy_corrected=dict( + extensions=None, + ), + ) outputs = EddyCorrect.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py index 14b6ca3a28..3d9756a4be 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py +++ b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py @@ -4,21 +4,59 @@ def test_EddyQuad_inputs(): input_map = dict( - args=dict(argstr="%s",), - base_name=dict(argstr="%s", position=0, usedefault=True,), - bval_file=dict(argstr="--bvals %s", extensions=None, mandatory=True,), - bvec_file=dict(argstr="--bvecs %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - field=dict(argstr="--field %s", extensions=None,), - idx_file=dict(argstr="--eddyIdx %s", extensions=None, mandatory=True,), - mask_file=dict(argstr="--mask %s", extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + base_name=dict( + argstr="%s", + position=0, + usedefault=True, + ), + bval_file=dict( + argstr="--bvals %s", + extensions=None, + mandatory=True, + ), + bvec_file=dict( + argstr="--bvecs %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + field=dict( + argstr="--field %s", + extensions=None, + ), + idx_file=dict( + argstr="--eddyIdx %s", + extensions=None, + mandatory=True, + ), + mask_file=dict( + argstr="--mask %s", + extensions=None, + mandatory=True, + ), output_dir=dict( - argstr="--output-dir %s", name_source=["base_name"], name_template="%s.qc", + argstr="--output-dir %s", + name_source=["base_name"], + name_template="%s.qc", ), output_type=dict(), - param_file=dict(argstr="--eddyParams %s", extensions=None, mandatory=True,), - slice_spec=dict(argstr="--slspec %s", extensions=None,), - verbose=dict(argstr="--verbose",), + param_file=dict( + argstr="--eddyParams %s", + extensions=None, + mandatory=True, + ), + slice_spec=dict( + argstr="--slspec %s", + extensions=None, + ), + verbose=dict( + argstr="--verbose", + ), ) inputs = EddyQuad.input_spec() @@ -31,12 +69,22 @@ def test_EddyQuad_outputs(): output_map = dict( avg_b0_pe_png=dict(), avg_b_png=dict(), - clean_volumes=dict(extensions=None,), + clean_volumes=dict( + extensions=None, + ), cnr_png=dict(), - qc_json=dict(extensions=None,), - qc_pdf=dict(extensions=None,), - residuals=dict(extensions=None,), - vdm_png=dict(extensions=None,), + qc_json=dict( + extensions=None, + ), + qc_pdf=dict( + extensions=None, + ), + residuals=dict( + extensions=None, + ), + vdm_png=dict( + extensions=None, + ), ) outputs = EddyQuad.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py index f1f1482260..242c2e6040 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py @@ -4,24 +4,70 @@ def test_EpiReg_inputs(): input_map = dict( - args=dict(argstr="%s",), - echospacing=dict(argstr="--echospacing=%f",), - environ=dict(nohash=True, usedefault=True,), - epi=dict(argstr="--epi=%s", extensions=None, mandatory=True, position=-4,), - fmap=dict(argstr="--fmap=%s", extensions=None,), - fmapmag=dict(argstr="--fmapmag=%s", extensions=None,), - fmapmagbrain=dict(argstr="--fmapmagbrain=%s", extensions=None,), - no_clean=dict(argstr="--noclean", usedefault=True,), - no_fmapreg=dict(argstr="--nofmapreg",), - out_base=dict(argstr="--out=%s", position=-1, usedefault=True,), + args=dict( + argstr="%s", + ), + echospacing=dict( + argstr="--echospacing=%f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi=dict( + argstr="--epi=%s", + extensions=None, + mandatory=True, + position=-4, + ), + fmap=dict( + argstr="--fmap=%s", + extensions=None, + ), + fmapmag=dict( + argstr="--fmapmag=%s", + extensions=None, + ), + fmapmagbrain=dict( + argstr="--fmapmagbrain=%s", + extensions=None, + ), + no_clean=dict( + argstr="--noclean", + usedefault=True, + ), + no_fmapreg=dict( + argstr="--nofmapreg", + ), + out_base=dict( + argstr="--out=%s", + position=-1, + usedefault=True, + ), output_type=dict(), - pedir=dict(argstr="--pedir=%s",), + pedir=dict( + argstr="--pedir=%s", + ), t1_brain=dict( - argstr="--t1brain=%s", extensions=None, mandatory=True, position=-2, + argstr="--t1brain=%s", + extensions=None, + mandatory=True, + position=-2, + ), + t1_head=dict( + argstr="--t1=%s", + extensions=None, + mandatory=True, + position=-3, + ), + weight_image=dict( + argstr="--weight=%s", + extensions=None, + ), + wmseg=dict( + argstr="--wmseg=%s", + extensions=None, ), - t1_head=dict(argstr="--t1=%s", extensions=None, mandatory=True, position=-3,), - weight_image=dict(argstr="--weight=%s", extensions=None,), - wmseg=dict(argstr="--wmseg=%s", extensions=None,), ) inputs = EpiReg.input_spec() @@ -32,20 +78,48 @@ def test_EpiReg_inputs(): def test_EpiReg_outputs(): output_map = dict( - epi2str_inv=dict(extensions=None,), - epi2str_mat=dict(extensions=None,), - fmap2epi_mat=dict(extensions=None,), - fmap2str_mat=dict(extensions=None,), - fmap_epi=dict(extensions=None,), - fmap_str=dict(extensions=None,), - fmapmag_str=dict(extensions=None,), - fullwarp=dict(extensions=None,), - out_1vol=dict(extensions=None,), - out_file=dict(extensions=None,), - seg=dict(extensions=None,), - shiftmap=dict(extensions=None,), - wmedge=dict(extensions=None,), - wmseg=dict(extensions=None,), + epi2str_inv=dict( + extensions=None, + ), + epi2str_mat=dict( + extensions=None, + ), + fmap2epi_mat=dict( + extensions=None, + ), + fmap2str_mat=dict( + extensions=None, + ), + fmap_epi=dict( + extensions=None, + ), + fmap_str=dict( + extensions=None, + ), + fmapmag_str=dict( + extensions=None, + ), + fullwarp=dict( + extensions=None, + ), + out_1vol=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + seg=dict( + extensions=None, + ), + shiftmap=dict( + extensions=None, + ), + wmedge=dict( + extensions=None, + ), + wmseg=dict( + extensions=None, + ), ) outputs = EpiReg.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py index 1b813812c3..1aad31cd16 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py @@ -4,21 +4,58 @@ def test_ErodeImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), kernel_file=dict( - argstr="%s", extensions=None, position=5, xor=["kernel_size"], + argstr="%s", + extensions=None, + position=5, + xor=["kernel_size"], + ), + kernel_shape=dict( + argstr="-kernel %s", + position=4, + ), + kernel_size=dict( + argstr="%.4f", + position=5, + xor=["kernel_file"], + ), + minimum_filter=dict( + argstr="%s", + position=6, + usedefault=True, + ), + nan2zeros=dict( + argstr="-nan", + position=3, ), - kernel_shape=dict(argstr="-kernel %s", position=4,), - kernel_size=dict(argstr="%.4f", position=5, xor=["kernel_file"],), - minimum_filter=dict(argstr="%s", position=6, usedefault=True,), - nan2zeros=dict(argstr="-nan", position=3,), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = ErodeImage.input_spec() @@ -29,7 +66,11 @@ def test_ErodeImage_inputs(): def test_ErodeImage_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ErodeImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py index 4039eb50a2..bd6acb137c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py +++ b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py @@ -4,7 +4,9 @@ def test_ExtractROI_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), crop_list=dict( argstr="%s", position=2, @@ -19,20 +21,56 @@ def test_ExtractROI_inputs(): "t_size", ], ), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), output_type=dict(), roi_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=1, - ), - t_min=dict(argstr="%d", position=8,), - t_size=dict(argstr="%d", position=9,), - x_min=dict(argstr="%d", position=2,), - x_size=dict(argstr="%d", position=3,), - y_min=dict(argstr="%d", position=4,), - y_size=dict(argstr="%d", position=5,), - z_min=dict(argstr="%d", position=6,), - z_size=dict(argstr="%d", position=7,), + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=1, + ), + t_min=dict( + argstr="%d", + position=8, + ), + t_size=dict( + argstr="%d", + position=9, + ), + x_min=dict( + argstr="%d", + position=2, + ), + x_size=dict( + argstr="%d", + position=3, + ), + y_min=dict( + argstr="%d", + position=4, + ), + y_size=dict( + argstr="%d", + position=5, + ), + z_min=dict( + argstr="%d", + position=6, + ), + z_size=dict( + argstr="%d", + position=7, + ), ) inputs = ExtractROI.input_spec() @@ -42,7 +80,11 @@ def test_ExtractROI_inputs(): def test_ExtractROI_outputs(): - output_map = dict(roi_file=dict(extensions=None,),) + output_map = dict( + roi_file=dict( + extensions=None, + ), + ) outputs = ExtractROI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FAST.py b/nipype/interfaces/fsl/tests/test_auto_FAST.py index 3c245682a6..e775d97b35 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FAST.py +++ b/nipype/interfaces/fsl/tests/test_auto_FAST.py @@ -4,31 +4,87 @@ def test_FAST_inputs(): input_map = dict( - args=dict(argstr="%s",), - bias_iters=dict(argstr="-I %d",), - bias_lowpass=dict(argstr="-l %d", units="mm",), - environ=dict(nohash=True, usedefault=True,), - hyper=dict(argstr="-H %.2f",), - img_type=dict(argstr="-t %d",), - in_files=dict(argstr="%s", copyfile=False, mandatory=True, position=-1,), - init_seg_smooth=dict(argstr="-f %.3f",), - init_transform=dict(argstr="-a %s", extensions=None,), - iters_afterbias=dict(argstr="-O %d",), - manual_seg=dict(argstr="-s %s", extensions=None,), - mixel_smooth=dict(argstr="-R %.2f",), - no_bias=dict(argstr="-N",), - no_pve=dict(argstr="--nopve",), - number_classes=dict(argstr="-n %d",), - other_priors=dict(argstr="-A %s",), - out_basename=dict(argstr="-o %s", extensions=None,), - output_biascorrected=dict(argstr="-B",), - output_biasfield=dict(argstr="-b",), + args=dict( + argstr="%s", + ), + bias_iters=dict( + argstr="-I %d", + ), + bias_lowpass=dict( + argstr="-l %d", + units="mm", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hyper=dict( + argstr="-H %.2f", + ), + img_type=dict( + argstr="-t %d", + ), + in_files=dict( + argstr="%s", + copyfile=False, + mandatory=True, + position=-1, + ), + init_seg_smooth=dict( + argstr="-f %.3f", + ), + init_transform=dict( + argstr="-a %s", + extensions=None, + ), + iters_afterbias=dict( + argstr="-O %d", + ), + manual_seg=dict( + argstr="-s %s", + extensions=None, + ), + mixel_smooth=dict( + argstr="-R %.2f", + ), + no_bias=dict( + argstr="-N", + ), + no_pve=dict( + argstr="--nopve", + ), + number_classes=dict( + argstr="-n %d", + ), + other_priors=dict( + argstr="-A %s", + ), + out_basename=dict( + argstr="-o %s", + extensions=None, + ), + output_biascorrected=dict( + argstr="-B", + ), + output_biasfield=dict( + argstr="-b", + ), output_type=dict(), - probability_maps=dict(argstr="-p",), - segment_iters=dict(argstr="-W %d",), - segments=dict(argstr="-g",), - use_priors=dict(argstr="-P",), - verbose=dict(argstr="-v",), + probability_maps=dict( + argstr="-p", + ), + segment_iters=dict( + argstr="-W %d", + ), + segments=dict( + argstr="-g", + ), + use_priors=dict( + argstr="-P", + ), + verbose=dict( + argstr="-v", + ), ) inputs = FAST.input_spec() @@ -40,13 +96,19 @@ def test_FAST_inputs(): def test_FAST_outputs(): output_map = dict( bias_field=dict(), - mixeltype=dict(extensions=None,), + mixeltype=dict( + extensions=None, + ), partial_volume_files=dict(), - partial_volume_map=dict(extensions=None,), + partial_volume_map=dict( + extensions=None, + ), probability_maps=dict(), restored_image=dict(), tissue_class_files=dict(), - tissue_class_map=dict(extensions=None,), + tissue_class_map=dict( + extensions=None, + ), ) outputs = FAST.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FEAT.py b/nipype/interfaces/fsl/tests/test_auto_FEAT.py index f2d4e1a90d..b363dd290f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEAT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEAT.py @@ -4,9 +4,19 @@ def test_FEAT_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fsf_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fsf_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), output_type=dict(), ) inputs = FEAT.input_spec() @@ -17,7 +27,9 @@ def test_FEAT_inputs(): def test_FEAT_outputs(): - output_map = dict(feat_dir=dict(),) + output_map = dict( + feat_dir=dict(), + ) outputs = FEAT.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATModel.py b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py index 46e959a2a0..0e6c2f9e33 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEATModel.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py @@ -4,11 +4,25 @@ def test_FEATModel_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - ev_files=dict(argstr="%s", copyfile=False, mandatory=True, position=1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ev_files=dict( + argstr="%s", + copyfile=False, + mandatory=True, + position=1, + ), fsf_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=0, ), output_type=dict(), ) @@ -21,11 +35,21 @@ def test_FEATModel_inputs(): def test_FEATModel_outputs(): output_map = dict( - con_file=dict(extensions=None,), - design_cov=dict(extensions=None,), - design_file=dict(extensions=None,), - design_image=dict(extensions=None,), - fcon_file=dict(extensions=None,), + con_file=dict( + extensions=None, + ), + design_cov=dict( + extensions=None, + ), + design_file=dict( + extensions=None, + ), + design_image=dict( + extensions=None, + ), + fcon_file=dict( + extensions=None, + ), ) outputs = FEATModel.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py index bd7ae5f7c3..fe09c468ec 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py @@ -4,9 +4,16 @@ def test_FEATRegister_inputs(): input_map = dict( - feat_dirs=dict(mandatory=True,), - reg_dof=dict(usedefault=True,), - reg_image=dict(extensions=None, mandatory=True,), + feat_dirs=dict( + mandatory=True, + ), + reg_dof=dict( + usedefault=True, + ), + reg_image=dict( + extensions=None, + mandatory=True, + ), ) inputs = FEATRegister.input_spec() @@ -16,7 +23,11 @@ def test_FEATRegister_inputs(): def test_FEATRegister_outputs(): - output_map = dict(fsf_file=dict(extensions=None,),) + output_map = dict( + fsf_file=dict( + extensions=None, + ), + ) outputs = FEATRegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FIRST.py b/nipype/interfaces/fsl/tests/test_auto_FIRST.py index c34f1737d2..42ba79e799 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FIRST.py +++ b/nipype/interfaces/fsl/tests/test_auto_FIRST.py @@ -4,10 +4,22 @@ def test_FIRST_inputs(): input_map = dict( - affine_file=dict(argstr="-a %s", extensions=None, position=6,), - args=dict(argstr="%s",), - brain_extracted=dict(argstr="-b", position=2,), - environ=dict(nohash=True, usedefault=True,), + affine_file=dict( + argstr="-a %s", + extensions=None, + position=6, + ), + args=dict( + argstr="%s", + ), + brain_extracted=dict( + argstr="-b", + position=2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( argstr="-i %s", copyfile=False, @@ -15,15 +27,25 @@ def test_FIRST_inputs(): mandatory=True, position=-2, ), - list_of_specific_structures=dict(argstr="-s %s", position=5, sep=",",), + list_of_specific_structures=dict( + argstr="-s %s", + position=5, + sep=",", + ), method=dict( argstr="-m %s", position=4, usedefault=True, xor=["method_as_numerical_threshold"], ), - method_as_numerical_threshold=dict(argstr="-m %.4f", position=4,), - no_cleanup=dict(argstr="-d", position=3,), + method_as_numerical_threshold=dict( + argstr="-m %.4f", + position=4, + ), + no_cleanup=dict( + argstr="-d", + position=3, + ), out_file=dict( argstr="-o %s", extensions=None, @@ -33,7 +55,10 @@ def test_FIRST_inputs(): usedefault=True, ), output_type=dict(), - verbose=dict(argstr="-v", position=1,), + verbose=dict( + argstr="-v", + position=1, + ), ) inputs = FIRST.input_spec() @@ -45,8 +70,12 @@ def test_FIRST_inputs(): def test_FIRST_outputs(): output_map = dict( bvars=dict(), - original_segmentations=dict(extensions=None,), - segmentation_file=dict(extensions=None,), + original_segmentations=dict( + extensions=None, + ), + segmentation_file=dict( + extensions=None, + ), vtk_surfaces=dict(), ) outputs = FIRST.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py index 13690485a5..f25b225d6e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py +++ b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py @@ -4,29 +4,83 @@ def test_FLAMEO_inputs(): input_map = dict( - args=dict(argstr="%s",), - burnin=dict(argstr="--burnin=%d",), - cope_file=dict(argstr="--copefile=%s", extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + burnin=dict( + argstr="--burnin=%d", + ), + cope_file=dict( + argstr="--copefile=%s", + extensions=None, + mandatory=True, + ), cov_split_file=dict( - argstr="--covsplitfile=%s", extensions=None, mandatory=True, - ), - design_file=dict(argstr="--designfile=%s", extensions=None, mandatory=True,), - dof_var_cope_file=dict(argstr="--dofvarcopefile=%s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - f_con_file=dict(argstr="--fcontrastsfile=%s", extensions=None,), - fix_mean=dict(argstr="--fixmean",), - infer_outliers=dict(argstr="--inferoutliers",), - log_dir=dict(argstr="--ld=%s", usedefault=True,), - mask_file=dict(argstr="--maskfile=%s", extensions=None, mandatory=True,), - n_jumps=dict(argstr="--njumps=%d",), - no_pe_outputs=dict(argstr="--nopeoutput",), - outlier_iter=dict(argstr="--ioni=%d",), + argstr="--covsplitfile=%s", + extensions=None, + mandatory=True, + ), + design_file=dict( + argstr="--designfile=%s", + extensions=None, + mandatory=True, + ), + dof_var_cope_file=dict( + argstr="--dofvarcopefile=%s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + f_con_file=dict( + argstr="--fcontrastsfile=%s", + extensions=None, + ), + fix_mean=dict( + argstr="--fixmean", + ), + infer_outliers=dict( + argstr="--inferoutliers", + ), + log_dir=dict( + argstr="--ld=%s", + usedefault=True, + ), + mask_file=dict( + argstr="--maskfile=%s", + extensions=None, + mandatory=True, + ), + n_jumps=dict( + argstr="--njumps=%d", + ), + no_pe_outputs=dict( + argstr="--nopeoutput", + ), + outlier_iter=dict( + argstr="--ioni=%d", + ), output_type=dict(), - run_mode=dict(argstr="--runmode=%s", mandatory=True,), - sample_every=dict(argstr="--sampleevery=%d",), - sigma_dofs=dict(argstr="--sigma_dofs=%d",), - t_con_file=dict(argstr="--tcontrastsfile=%s", extensions=None, mandatory=True,), - var_cope_file=dict(argstr="--varcopefile=%s", extensions=None,), + run_mode=dict( + argstr="--runmode=%s", + mandatory=True, + ), + sample_every=dict( + argstr="--sampleevery=%d", + ), + sigma_dofs=dict( + argstr="--sigma_dofs=%d", + ), + t_con_file=dict( + argstr="--tcontrastsfile=%s", + extensions=None, + mandatory=True, + ), + var_cope_file=dict( + argstr="--varcopefile=%s", + extensions=None, + ), ) inputs = FLAMEO.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FLIRT.py b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py index ce2fca2486..a9bdc38477 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FLIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py @@ -4,35 +4,110 @@ def test_FLIRT_inputs(): input_map = dict( - angle_rep=dict(argstr="-anglerep %s",), - apply_isoxfm=dict(argstr="-applyisoxfm %f", xor=["apply_xfm"],), - apply_xfm=dict(argstr="-applyxfm",), - args=dict(argstr="%s",), - bbrslope=dict(argstr="-bbrslope %f", min_ver="5.0.0",), - bbrtype=dict(argstr="-bbrtype %s", min_ver="5.0.0",), - bgvalue=dict(argstr="-setbackground %f",), - bins=dict(argstr="-bins %d",), - coarse_search=dict(argstr="-coarsesearch %d", units="degrees",), - cost=dict(argstr="-cost %s",), - cost_func=dict(argstr="-searchcost %s",), - datatype=dict(argstr="-datatype %s",), - display_init=dict(argstr="-displayinit",), - dof=dict(argstr="-dof %d",), - echospacing=dict(argstr="-echospacing %f", min_ver="5.0.0",), - environ=dict(nohash=True, usedefault=True,), - fieldmap=dict(argstr="-fieldmap %s", extensions=None, min_ver="5.0.0",), - fieldmapmask=dict(argstr="-fieldmapmask %s", extensions=None, min_ver="5.0.0",), - fine_search=dict(argstr="-finesearch %d", units="degrees",), - force_scaling=dict(argstr="-forcescaling",), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=0,), - in_matrix_file=dict(argstr="-init %s", extensions=None,), - in_weight=dict(argstr="-inweight %s", extensions=None,), - interp=dict(argstr="-interp %s",), - min_sampling=dict(argstr="-minsampling %f", units="mm",), - no_clamp=dict(argstr="-noclamp",), - no_resample=dict(argstr="-noresample",), - no_resample_blur=dict(argstr="-noresampblur",), - no_search=dict(argstr="-nosearch",), + angle_rep=dict( + argstr="-anglerep %s", + ), + apply_isoxfm=dict( + argstr="-applyisoxfm %f", + xor=["apply_xfm"], + ), + apply_xfm=dict( + argstr="-applyxfm", + ), + args=dict( + argstr="%s", + ), + bbrslope=dict( + argstr="-bbrslope %f", + min_ver="5.0.0", + ), + bbrtype=dict( + argstr="-bbrtype %s", + min_ver="5.0.0", + ), + bgvalue=dict( + argstr="-setbackground %f", + ), + bins=dict( + argstr="-bins %d", + ), + coarse_search=dict( + argstr="-coarsesearch %d", + units="degrees", + ), + cost=dict( + argstr="-cost %s", + ), + cost_func=dict( + argstr="-searchcost %s", + ), + datatype=dict( + argstr="-datatype %s", + ), + display_init=dict( + argstr="-displayinit", + ), + dof=dict( + argstr="-dof %d", + ), + echospacing=dict( + argstr="-echospacing %f", + min_ver="5.0.0", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fieldmap=dict( + argstr="-fieldmap %s", + extensions=None, + min_ver="5.0.0", + ), + fieldmapmask=dict( + argstr="-fieldmapmask %s", + extensions=None, + min_ver="5.0.0", + ), + fine_search=dict( + argstr="-finesearch %d", + units="degrees", + ), + force_scaling=dict( + argstr="-forcescaling", + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + position=0, + ), + in_matrix_file=dict( + argstr="-init %s", + extensions=None, + ), + in_weight=dict( + argstr="-inweight %s", + extensions=None, + ), + interp=dict( + argstr="-interp %s", + ), + min_sampling=dict( + argstr="-minsampling %f", + units="mm", + ), + no_clamp=dict( + argstr="-noclamp", + ), + no_resample=dict( + argstr="-noresample", + ), + no_resample_blur=dict( + argstr="-noresampblur", + ), + no_search=dict( + argstr="-nosearch", + ), out_file=dict( argstr="-out %s", extensions=None, @@ -58,23 +133,72 @@ def test_FLIRT_inputs(): position=3, ), output_type=dict(), - padding_size=dict(argstr="-paddingsize %d", units="voxels",), - pedir=dict(argstr="-pedir %d", min_ver="5.0.0",), - ref_weight=dict(argstr="-refweight %s", extensions=None,), - reference=dict(argstr="-ref %s", extensions=None, mandatory=True, position=1,), - rigid2D=dict(argstr="-2D",), + padding_size=dict( + argstr="-paddingsize %d", + units="voxels", + ), + pedir=dict( + argstr="-pedir %d", + min_ver="5.0.0", + ), + ref_weight=dict( + argstr="-refweight %s", + extensions=None, + ), + reference=dict( + argstr="-ref %s", + extensions=None, + mandatory=True, + position=1, + ), + rigid2D=dict( + argstr="-2D", + ), save_log=dict(), - schedule=dict(argstr="-schedule %s", extensions=None,), - searchr_x=dict(argstr="-searchrx %s", units="degrees",), - searchr_y=dict(argstr="-searchry %s", units="degrees",), - searchr_z=dict(argstr="-searchrz %s", units="degrees",), - sinc_width=dict(argstr="-sincwidth %d", units="voxels",), - sinc_window=dict(argstr="-sincwindow %s",), - uses_qform=dict(argstr="-usesqform",), - verbose=dict(argstr="-verbose %d",), - wm_seg=dict(argstr="-wmseg %s", extensions=None, min_ver="5.0.0",), - wmcoords=dict(argstr="-wmcoords %s", extensions=None, min_ver="5.0.0",), - wmnorms=dict(argstr="-wmnorms %s", extensions=None, min_ver="5.0.0",), + schedule=dict( + argstr="-schedule %s", + extensions=None, + ), + searchr_x=dict( + argstr="-searchrx %s", + units="degrees", + ), + searchr_y=dict( + argstr="-searchry %s", + units="degrees", + ), + searchr_z=dict( + argstr="-searchrz %s", + units="degrees", + ), + sinc_width=dict( + argstr="-sincwidth %d", + units="voxels", + ), + sinc_window=dict( + argstr="-sincwindow %s", + ), + uses_qform=dict( + argstr="-usesqform", + ), + verbose=dict( + argstr="-verbose %d", + ), + wm_seg=dict( + argstr="-wmseg %s", + extensions=None, + min_ver="5.0.0", + ), + wmcoords=dict( + argstr="-wmcoords %s", + extensions=None, + min_ver="5.0.0", + ), + wmnorms=dict( + argstr="-wmnorms %s", + extensions=None, + min_ver="5.0.0", + ), ) inputs = FLIRT.input_spec() @@ -85,9 +209,15 @@ def test_FLIRT_inputs(): def test_FLIRT_outputs(): output_map = dict( - out_file=dict(extensions=None,), - out_log=dict(extensions=None,), - out_matrix_file=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_log=dict( + extensions=None, + ), + out_matrix_file=dict( + extensions=None, + ), ) outputs = FLIRT.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py index 794ae7d5f0..eb6ae1f714 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py @@ -4,57 +4,169 @@ def test_FNIRT_inputs(): input_map = dict( - affine_file=dict(argstr="--aff=%s", extensions=None,), - apply_inmask=dict(argstr="--applyinmask=%s", sep=",", xor=["skip_inmask"],), + affine_file=dict( + argstr="--aff=%s", + extensions=None, + ), + apply_inmask=dict( + argstr="--applyinmask=%s", + sep=",", + xor=["skip_inmask"], + ), apply_intensity_mapping=dict( - argstr="--estint=%s", sep=",", xor=["skip_intensity_mapping"], - ), - apply_refmask=dict(argstr="--applyrefmask=%s", sep=",", xor=["skip_refmask"],), - args=dict(argstr="%s",), - bias_regularization_lambda=dict(argstr="--biaslambda=%f",), - biasfield_resolution=dict(argstr="--biasres=%d,%d,%d",), - config_file=dict(argstr="--config=%s",), - derive_from_ref=dict(argstr="--refderiv",), - environ=dict(nohash=True, usedefault=True,), - field_file=dict(argstr="--fout=%s", hash_files=False,), - fieldcoeff_file=dict(argstr="--cout=%s",), - hessian_precision=dict(argstr="--numprec=%s",), - in_file=dict(argstr="--in=%s", extensions=None, mandatory=True,), - in_fwhm=dict(argstr="--infwhm=%s", sep=",",), - in_intensitymap_file=dict(argstr="--intin=%s", copyfile=False,), - inmask_file=dict(argstr="--inmask=%s", extensions=None,), - inmask_val=dict(argstr="--impinval=%f",), - intensity_mapping_model=dict(argstr="--intmod=%s",), - intensity_mapping_order=dict(argstr="--intorder=%d",), - inwarp_file=dict(argstr="--inwarp=%s", extensions=None,), - jacobian_file=dict(argstr="--jout=%s", hash_files=False,), - jacobian_range=dict(argstr="--jacrange=%f,%f",), + argstr="--estint=%s", + sep=",", + xor=["skip_intensity_mapping"], + ), + apply_refmask=dict( + argstr="--applyrefmask=%s", + sep=",", + xor=["skip_refmask"], + ), + args=dict( + argstr="%s", + ), + bias_regularization_lambda=dict( + argstr="--biaslambda=%f", + ), + biasfield_resolution=dict( + argstr="--biasres=%d,%d,%d", + ), + config_file=dict( + argstr="--config=%s", + ), + derive_from_ref=dict( + argstr="--refderiv", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + field_file=dict( + argstr="--fout=%s", + hash_files=False, + ), + fieldcoeff_file=dict( + argstr="--cout=%s", + ), + hessian_precision=dict( + argstr="--numprec=%s", + ), + in_file=dict( + argstr="--in=%s", + extensions=None, + mandatory=True, + ), + in_fwhm=dict( + argstr="--infwhm=%s", + sep=",", + ), + in_intensitymap_file=dict( + argstr="--intin=%s", + copyfile=False, + ), + inmask_file=dict( + argstr="--inmask=%s", + extensions=None, + ), + inmask_val=dict( + argstr="--impinval=%f", + ), + intensity_mapping_model=dict( + argstr="--intmod=%s", + ), + intensity_mapping_order=dict( + argstr="--intorder=%d", + ), + inwarp_file=dict( + argstr="--inwarp=%s", + extensions=None, + ), + jacobian_file=dict( + argstr="--jout=%s", + hash_files=False, + ), + jacobian_range=dict( + argstr="--jacrange=%f,%f", + ), log_file=dict( - argstr="--logout=%s", extensions=None, genfile=True, hash_files=False, + argstr="--logout=%s", + extensions=None, + genfile=True, + hash_files=False, + ), + max_nonlin_iter=dict( + argstr="--miter=%s", + sep=",", + ), + modulatedref_file=dict( + argstr="--refout=%s", + hash_files=False, + ), + out_intensitymap_file=dict( + argstr="--intout=%s", + hash_files=False, ), - max_nonlin_iter=dict(argstr="--miter=%s", sep=",",), - modulatedref_file=dict(argstr="--refout=%s", hash_files=False,), - out_intensitymap_file=dict(argstr="--intout=%s", hash_files=False,), output_type=dict(), - ref_file=dict(argstr="--ref=%s", extensions=None, mandatory=True,), - ref_fwhm=dict(argstr="--reffwhm=%s", sep=",",), - refmask_file=dict(argstr="--refmask=%s", extensions=None,), - refmask_val=dict(argstr="--imprefval=%f",), - regularization_lambda=dict(argstr="--lambda=%s", sep=",",), - regularization_model=dict(argstr="--regmod=%s",), - skip_implicit_in_masking=dict(argstr="--impinm=0",), - skip_implicit_ref_masking=dict(argstr="--imprefm=0",), - skip_inmask=dict(argstr="--applyinmask=0", xor=["apply_inmask"],), + ref_file=dict( + argstr="--ref=%s", + extensions=None, + mandatory=True, + ), + ref_fwhm=dict( + argstr="--reffwhm=%s", + sep=",", + ), + refmask_file=dict( + argstr="--refmask=%s", + extensions=None, + ), + refmask_val=dict( + argstr="--imprefval=%f", + ), + regularization_lambda=dict( + argstr="--lambda=%s", + sep=",", + ), + regularization_model=dict( + argstr="--regmod=%s", + ), + skip_implicit_in_masking=dict( + argstr="--impinm=0", + ), + skip_implicit_ref_masking=dict( + argstr="--imprefm=0", + ), + skip_inmask=dict( + argstr="--applyinmask=0", + xor=["apply_inmask"], + ), skip_intensity_mapping=dict( - argstr="--estint=0", xor=["apply_intensity_mapping"], + argstr="--estint=0", + xor=["apply_intensity_mapping"], + ), + skip_lambda_ssq=dict( + argstr="--ssqlambda=0", + ), + skip_refmask=dict( + argstr="--applyrefmask=0", + xor=["apply_refmask"], + ), + spline_order=dict( + argstr="--splineorder=%d", + ), + subsampling_scheme=dict( + argstr="--subsamp=%s", + sep=",", + ), + warp_resolution=dict( + argstr="--warpres=%d,%d,%d", ), - skip_lambda_ssq=dict(argstr="--ssqlambda=0",), - skip_refmask=dict(argstr="--applyrefmask=0", xor=["apply_refmask"],), - spline_order=dict(argstr="--splineorder=%d",), - subsampling_scheme=dict(argstr="--subsamp=%s", sep=",",), - warp_resolution=dict(argstr="--warpres=%d,%d,%d",), warped_file=dict( - argstr="--iout=%s", extensions=None, genfile=True, hash_files=False, + argstr="--iout=%s", + extensions=None, + genfile=True, + hash_files=False, ), ) inputs = FNIRT.input_spec() @@ -66,13 +178,25 @@ def test_FNIRT_inputs(): def test_FNIRT_outputs(): output_map = dict( - field_file=dict(extensions=None,), - fieldcoeff_file=dict(extensions=None,), - jacobian_file=dict(extensions=None,), - log_file=dict(extensions=None,), - modulatedref_file=dict(extensions=None,), + field_file=dict( + extensions=None, + ), + fieldcoeff_file=dict( + extensions=None, + ), + jacobian_file=dict( + extensions=None, + ), + log_file=dict( + extensions=None, + ), + modulatedref_file=dict( + extensions=None, + ), out_intensitymap_file=dict(), - warped_file=dict(extensions=None,), + warped_file=dict( + extensions=None, + ), ) outputs = FNIRT.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py b/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py index bc4c0443ee..1b444c381e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py @@ -4,8 +4,13 @@ def test_FSLCommand_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), output_type=dict(), ) inputs = FSLCommand.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py index d9c30cd262..3948f3d650 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py @@ -4,36 +4,107 @@ def test_FSLXCommand_inputs(): input_map = dict( - all_ard=dict(argstr="--allard", xor=("no_ard", "all_ard"),), - args=dict(argstr="%s",), - burn_in=dict(argstr="--burnin=%d", usedefault=True,), - burn_in_no_ard=dict(argstr="--burnin_noard=%d", usedefault=True,), - bvals=dict(argstr="--bvals=%s", extensions=None, mandatory=True,), - bvecs=dict(argstr="--bvecs=%s", extensions=None, mandatory=True,), + all_ard=dict( + argstr="--allard", + xor=("no_ard", "all_ard"), + ), + args=dict( + argstr="%s", + ), + burn_in=dict( + argstr="--burnin=%d", + usedefault=True, + ), + burn_in_no_ard=dict( + argstr="--burnin_noard=%d", + usedefault=True, + ), + bvals=dict( + argstr="--bvals=%s", + extensions=None, + mandatory=True, + ), + bvecs=dict( + argstr="--bvecs=%s", + extensions=None, + mandatory=True, + ), cnlinear=dict( - argstr="--cnonlinear", xor=("no_spat", "non_linear", "cnlinear"), - ), - dwi=dict(argstr="--data=%s", extensions=None, mandatory=True,), - environ=dict(nohash=True, usedefault=True,), - f0_ard=dict(argstr="--f0 --ardf0", xor=["f0_noard", "f0_ard", "all_ard"],), - f0_noard=dict(argstr="--f0", xor=["f0_noard", "f0_ard"],), - force_dir=dict(argstr="--forcedir", usedefault=True,), - fudge=dict(argstr="--fudge=%d",), - logdir=dict(argstr="--logdir=%s", usedefault=True,), - mask=dict(argstr="--mask=%s", extensions=None, mandatory=True,), - model=dict(argstr="--model=%d",), - n_fibres=dict(argstr="--nfibres=%d", mandatory=True, usedefault=True,), - n_jumps=dict(argstr="--njumps=%d", usedefault=True,), - no_ard=dict(argstr="--noard", xor=("no_ard", "all_ard"),), - no_spat=dict(argstr="--nospat", xor=("no_spat", "non_linear", "cnlinear"),), + argstr="--cnonlinear", + xor=("no_spat", "non_linear", "cnlinear"), + ), + dwi=dict( + argstr="--data=%s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + f0_ard=dict( + argstr="--f0 --ardf0", + xor=["f0_noard", "f0_ard", "all_ard"], + ), + f0_noard=dict( + argstr="--f0", + xor=["f0_noard", "f0_ard"], + ), + force_dir=dict( + argstr="--forcedir", + usedefault=True, + ), + fudge=dict( + argstr="--fudge=%d", + ), + logdir=dict( + argstr="--logdir=%s", + usedefault=True, + ), + mask=dict( + argstr="--mask=%s", + extensions=None, + mandatory=True, + ), + model=dict( + argstr="--model=%d", + ), + n_fibres=dict( + argstr="--nfibres=%d", + mandatory=True, + usedefault=True, + ), + n_jumps=dict( + argstr="--njumps=%d", + usedefault=True, + ), + no_ard=dict( + argstr="--noard", + xor=("no_ard", "all_ard"), + ), + no_spat=dict( + argstr="--nospat", + xor=("no_spat", "non_linear", "cnlinear"), + ), non_linear=dict( - argstr="--nonlinear", xor=("no_spat", "non_linear", "cnlinear"), + argstr="--nonlinear", + xor=("no_spat", "non_linear", "cnlinear"), ), output_type=dict(), - rician=dict(argstr="--rician",), - sample_every=dict(argstr="--sampleevery=%d", usedefault=True,), - seed=dict(argstr="--seed=%d",), - update_proposal_every=dict(argstr="--updateproposalevery=%d", usedefault=True,), + rician=dict( + argstr="--rician", + ), + sample_every=dict( + argstr="--sampleevery=%d", + usedefault=True, + ), + seed=dict( + argstr="--seed=%d", + ), + update_proposal_every=dict( + argstr="--updateproposalevery=%d", + usedefault=True, + ), ) inputs = FSLXCommand.input_spec() @@ -46,10 +117,16 @@ def test_FSLXCommand_outputs(): output_map = dict( dyads=dict(), fsamples=dict(), - mean_S0samples=dict(extensions=None,), - mean_dsamples=dict(extensions=None,), + mean_S0samples=dict( + extensions=None, + ), + mean_dsamples=dict( + extensions=None, + ), mean_fsamples=dict(), - mean_tausamples=dict(extensions=None,), + mean_tausamples=dict( + extensions=None, + ), phsamples=dict(), thsamples=dict(), ) diff --git a/nipype/interfaces/fsl/tests/test_auto_FUGUE.py b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py index a1f6873658..841bb2021f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FUGUE.py +++ b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py @@ -4,39 +4,115 @@ def test_FUGUE_inputs(): input_map = dict( - args=dict(argstr="%s",), - asym_se_time=dict(argstr="--asym=%.10f",), - despike_2dfilter=dict(argstr="--despike",), - despike_threshold=dict(argstr="--despikethreshold=%s",), - dwell_time=dict(argstr="--dwell=%.10f",), - dwell_to_asym_ratio=dict(argstr="--dwelltoasym=%.10f",), - environ=dict(nohash=True, usedefault=True,), - fmap_in_file=dict(argstr="--loadfmap=%s", extensions=None,), - fmap_out_file=dict(argstr="--savefmap=%s", extensions=None,), - forward_warping=dict(usedefault=True,), - fourier_order=dict(argstr="--fourier=%d",), - icorr=dict(argstr="--icorr", requires=["shift_in_file"],), - icorr_only=dict(argstr="--icorronly", requires=["unwarped_file"],), - in_file=dict(argstr="--in=%s", extensions=None,), - mask_file=dict(argstr="--mask=%s", extensions=None,), - median_2dfilter=dict(argstr="--median",), - no_extend=dict(argstr="--noextend",), - no_gap_fill=dict(argstr="--nofill",), - nokspace=dict(argstr="--nokspace",), + args=dict( + argstr="%s", + ), + asym_se_time=dict( + argstr="--asym=%.10f", + ), + despike_2dfilter=dict( + argstr="--despike", + ), + despike_threshold=dict( + argstr="--despikethreshold=%s", + ), + dwell_time=dict( + argstr="--dwell=%.10f", + ), + dwell_to_asym_ratio=dict( + argstr="--dwelltoasym=%.10f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fmap_in_file=dict( + argstr="--loadfmap=%s", + extensions=None, + ), + fmap_out_file=dict( + argstr="--savefmap=%s", + extensions=None, + ), + forward_warping=dict( + usedefault=True, + ), + fourier_order=dict( + argstr="--fourier=%d", + ), + icorr=dict( + argstr="--icorr", + requires=["shift_in_file"], + ), + icorr_only=dict( + argstr="--icorronly", + requires=["unwarped_file"], + ), + in_file=dict( + argstr="--in=%s", + extensions=None, + ), + mask_file=dict( + argstr="--mask=%s", + extensions=None, + ), + median_2dfilter=dict( + argstr="--median", + ), + no_extend=dict( + argstr="--noextend", + ), + no_gap_fill=dict( + argstr="--nofill", + ), + nokspace=dict( + argstr="--nokspace", + ), output_type=dict(), - pava=dict(argstr="--pava",), - phase_conjugate=dict(argstr="--phaseconj",), - phasemap_in_file=dict(argstr="--phasemap=%s", extensions=None,), - poly_order=dict(argstr="--poly=%d",), - save_fmap=dict(xor=["save_unmasked_fmap"],), - save_shift=dict(xor=["save_unmasked_shift"],), - save_unmasked_fmap=dict(argstr="--unmaskfmap", xor=["save_fmap"],), - save_unmasked_shift=dict(argstr="--unmaskshift", xor=["save_shift"],), - shift_in_file=dict(argstr="--loadshift=%s", extensions=None,), - shift_out_file=dict(argstr="--saveshift=%s", extensions=None,), - smooth2d=dict(argstr="--smooth2=%.2f",), - smooth3d=dict(argstr="--smooth3=%.2f",), - unwarp_direction=dict(argstr="--unwarpdir=%s",), + pava=dict( + argstr="--pava", + ), + phase_conjugate=dict( + argstr="--phaseconj", + ), + phasemap_in_file=dict( + argstr="--phasemap=%s", + extensions=None, + ), + poly_order=dict( + argstr="--poly=%d", + ), + save_fmap=dict( + xor=["save_unmasked_fmap"], + ), + save_shift=dict( + xor=["save_unmasked_shift"], + ), + save_unmasked_fmap=dict( + argstr="--unmaskfmap", + xor=["save_fmap"], + ), + save_unmasked_shift=dict( + argstr="--unmaskshift", + xor=["save_shift"], + ), + shift_in_file=dict( + argstr="--loadshift=%s", + extensions=None, + ), + shift_out_file=dict( + argstr="--saveshift=%s", + extensions=None, + ), + smooth2d=dict( + argstr="--smooth2=%.2f", + ), + smooth3d=dict( + argstr="--smooth3=%.2f", + ), + unwarp_direction=dict( + argstr="--unwarpdir=%s", + ), unwarped_file=dict( argstr="--unwarp=%s", extensions=None, @@ -59,10 +135,18 @@ def test_FUGUE_inputs(): def test_FUGUE_outputs(): output_map = dict( - fmap_out_file=dict(extensions=None,), - shift_out_file=dict(extensions=None,), - unwarped_file=dict(extensions=None,), - warped_file=dict(extensions=None,), + fmap_out_file=dict( + extensions=None, + ), + shift_out_file=dict( + extensions=None, + ), + unwarped_file=dict( + extensions=None, + ), + warped_file=dict( + extensions=None, + ), ) outputs = FUGUE.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py index 03d627a0bf..8531fe17c4 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py +++ b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py @@ -4,9 +4,18 @@ def test_FeatureExtractor_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - mel_ica=dict(argstr="%s", copyfile=False, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + mel_ica=dict( + argstr="%s", + copyfile=False, + position=-1, + ), ) inputs = FeatureExtractor.input_spec() @@ -16,7 +25,13 @@ def test_FeatureExtractor_inputs(): def test_FeatureExtractor_outputs(): - output_map = dict(mel_ica=dict(argstr="%s", copyfile=False, position=-1,),) + output_map = dict( + mel_ica=dict( + argstr="%s", + copyfile=False, + position=-1, + ), + ) outputs = FeatureExtractor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py index d63a61ea1c..e4826db355 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py +++ b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py @@ -4,23 +4,55 @@ def test_FilterRegressor_inputs(): input_map = dict( - args=dict(argstr="%s",), - design_file=dict(argstr="-d %s", extensions=None, mandatory=True, position=3,), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + design_file=dict( + argstr="-d %s", + extensions=None, + mandatory=True, + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), filter_all=dict( - argstr="-f '%s'", mandatory=True, position=4, xor=["filter_columns"], + argstr="-f '%s'", + mandatory=True, + position=4, + xor=["filter_columns"], ), filter_columns=dict( - argstr="-f '%s'", mandatory=True, position=4, xor=["filter_all"], + argstr="-f '%s'", + mandatory=True, + position=4, + xor=["filter_all"], + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=1, + ), + mask=dict( + argstr="-m %s", + extensions=None, ), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1,), - mask=dict(argstr="-m %s", extensions=None,), out_file=dict( - argstr="-o %s", extensions=None, genfile=True, hash_files=False, position=2, + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, + position=2, + ), + out_vnscales=dict( + argstr="--out_vnscales", ), - out_vnscales=dict(argstr="--out_vnscales",), output_type=dict(), - var_norm=dict(argstr="--vn",), + var_norm=dict( + argstr="--vn", + ), ) inputs = FilterRegressor.input_spec() @@ -30,7 +62,11 @@ def test_FilterRegressor_inputs(): def test_FilterRegressor_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = FilterRegressor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py index 0152e34ed0..14b3bbb8da 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py +++ b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py @@ -4,11 +4,24 @@ def test_FindTheBiggest_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_files=dict(argstr="%s", mandatory=True, position=0,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=0, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=2, ), output_type=dict(), ) @@ -20,7 +33,12 @@ def test_FindTheBiggest_inputs(): def test_FindTheBiggest_outputs(): - output_map = dict(out_file=dict(argstr="%s", extensions=None,),) + output_map = dict( + out_file=dict( + argstr="%s", + extensions=None, + ), + ) outputs = FindTheBiggest.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_GLM.py b/nipype/interfaces/fsl/tests/test_auto_GLM.py index 61a550884d..63105f128d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_GLM.py +++ b/nipype/interfaces/fsl/tests/test_auto_GLM.py @@ -4,19 +4,57 @@ def test_GLM_inputs(): input_map = dict( - args=dict(argstr="%s",), - contrasts=dict(argstr="-c %s", extensions=None,), - dat_norm=dict(argstr="--dat_norm",), - demean=dict(argstr="--demean",), - des_norm=dict(argstr="--des_norm",), - design=dict(argstr="-d %s", extensions=None, mandatory=True, position=2,), - dof=dict(argstr="--dof=%d",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1,), - mask=dict(argstr="-m %s", extensions=None,), - out_cope=dict(argstr="--out_cope=%s", extensions=None,), - out_data_name=dict(argstr="--out_data=%s", extensions=None,), - out_f_name=dict(argstr="--out_f=%s", extensions=None,), + args=dict( + argstr="%s", + ), + contrasts=dict( + argstr="-c %s", + extensions=None, + ), + dat_norm=dict( + argstr="--dat_norm", + ), + demean=dict( + argstr="--demean", + ), + des_norm=dict( + argstr="--des_norm", + ), + design=dict( + argstr="-d %s", + extensions=None, + mandatory=True, + position=2, + ), + dof=dict( + argstr="--dof=%d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=1, + ), + mask=dict( + argstr="-m %s", + extensions=None, + ), + out_cope=dict( + argstr="--out_cope=%s", + extensions=None, + ), + out_data_name=dict( + argstr="--out_data=%s", + extensions=None, + ), + out_f_name=dict( + argstr="--out_f=%s", + extensions=None, + ), out_file=dict( argstr="-o %s", extensions=None, @@ -25,16 +63,42 @@ def test_GLM_inputs(): name_template="%s_glm", position=3, ), - out_p_name=dict(argstr="--out_p=%s", extensions=None,), - out_pf_name=dict(argstr="--out_pf=%s", extensions=None,), - out_res_name=dict(argstr="--out_res=%s", extensions=None,), - out_sigsq_name=dict(argstr="--out_sigsq=%s", extensions=None,), - out_t_name=dict(argstr="--out_t=%s", extensions=None,), - out_varcb_name=dict(argstr="--out_varcb=%s", extensions=None,), - out_vnscales_name=dict(argstr="--out_vnscales=%s", extensions=None,), - out_z_name=dict(argstr="--out_z=%s", extensions=None,), + out_p_name=dict( + argstr="--out_p=%s", + extensions=None, + ), + out_pf_name=dict( + argstr="--out_pf=%s", + extensions=None, + ), + out_res_name=dict( + argstr="--out_res=%s", + extensions=None, + ), + out_sigsq_name=dict( + argstr="--out_sigsq=%s", + extensions=None, + ), + out_t_name=dict( + argstr="--out_t=%s", + extensions=None, + ), + out_varcb_name=dict( + argstr="--out_varcb=%s", + extensions=None, + ), + out_vnscales_name=dict( + argstr="--out_vnscales=%s", + extensions=None, + ), + out_z_name=dict( + argstr="--out_z=%s", + extensions=None, + ), output_type=dict(), - var_norm=dict(argstr="--vn",), + var_norm=dict( + argstr="--vn", + ), ) inputs = GLM.input_spec() @@ -48,7 +112,9 @@ def test_GLM_outputs(): out_cope=dict(), out_data=dict(), out_f=dict(), - out_file=dict(extensions=None,), + out_file=dict( + extensions=None, + ), out_p=dict(), out_pf=dict(), out_res=dict(), diff --git a/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py index 51975c5bef..b49813e24d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py +++ b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py @@ -4,27 +4,64 @@ def test_ICA_AROMA_inputs(): input_map = dict( - TR=dict(argstr="-tr %.3f",), - args=dict(argstr="%s",), - denoise_type=dict(argstr="-den %s", mandatory=True, usedefault=True,), - dim=dict(argstr="-dim %d",), - environ=dict(nohash=True, usedefault=True,), + TR=dict( + argstr="-tr %.3f", + ), + args=dict( + argstr="%s", + ), + denoise_type=dict( + argstr="-den %s", + mandatory=True, + usedefault=True, + ), + dim=dict( + argstr="-dim %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), feat_dir=dict( argstr="-feat %s", mandatory=True, xor=["in_file", "mat_file", "fnirt_warp_file", "motion_parameters"], ), - fnirt_warp_file=dict(argstr="-warp %s", extensions=None, xor=["feat_dir"],), + fnirt_warp_file=dict( + argstr="-warp %s", + extensions=None, + xor=["feat_dir"], + ), in_file=dict( - argstr="-i %s", extensions=None, mandatory=True, xor=["feat_dir"], + argstr="-i %s", + extensions=None, + mandatory=True, + xor=["feat_dir"], + ), + mask=dict( + argstr="-m %s", + extensions=None, + xor=["feat_dir"], + ), + mat_file=dict( + argstr="-affmat %s", + extensions=None, + xor=["feat_dir"], + ), + melodic_dir=dict( + argstr="-meldir %s", ), - mask=dict(argstr="-m %s", extensions=None, xor=["feat_dir"],), - mat_file=dict(argstr="-affmat %s", extensions=None, xor=["feat_dir"],), - melodic_dir=dict(argstr="-meldir %s",), motion_parameters=dict( - argstr="-mc %s", extensions=None, mandatory=True, xor=["feat_dir"], + argstr="-mc %s", + extensions=None, + mandatory=True, + xor=["feat_dir"], + ), + out_dir=dict( + argstr="-o %s", + mandatory=True, + usedefault=True, ), - out_dir=dict(argstr="-o %s", mandatory=True, usedefault=True,), ) inputs = ICA_AROMA.input_spec() @@ -35,8 +72,12 @@ def test_ICA_AROMA_inputs(): def test_ICA_AROMA_outputs(): output_map = dict( - aggr_denoised_file=dict(extensions=None,), - nonaggr_denoised_file=dict(extensions=None,), + aggr_denoised_file=dict( + extensions=None, + ), + nonaggr_denoised_file=dict( + extensions=None, + ), out_dir=dict(), ) outputs = ICA_AROMA.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py index 02a73d2662..d2c4737d65 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py @@ -4,15 +4,42 @@ def test_ImageMaths_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - in_file2=dict(argstr="%s", extensions=None, position=3,), - mask_file=dict(argstr="-mas %s", extensions=None,), - op_string=dict(argstr="%s", position=2,), - out_data_type=dict(argstr="-odt %s", position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + in_file2=dict( + argstr="%s", + extensions=None, + position=3, + ), + mask_file=dict( + argstr="-mas %s", + extensions=None, + ), + op_string=dict( + argstr="%s", + position=2, + ), + out_data_type=dict( + argstr="-odt %s", + position=-1, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, ), output_type=dict(), suffix=dict(), @@ -25,7 +52,11 @@ def test_ImageMaths_inputs(): def test_ImageMaths_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ImageMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py index 2d53d25c1f..b050d8f50b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py @@ -4,19 +4,52 @@ def test_ImageMeants_inputs(): input_map = dict( - args=dict(argstr="%s",), - eig=dict(argstr="--eig",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0,), - mask=dict(argstr="-m %s", extensions=None,), - nobin=dict(argstr="--no_bin",), - order=dict(argstr="--order=%d", usedefault=True,), - out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False,), + args=dict( + argstr="%s", + ), + eig=dict( + argstr="--eig", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=0, + ), + mask=dict( + argstr="-m %s", + extensions=None, + ), + nobin=dict( + argstr="--no_bin", + ), + order=dict( + argstr="--order=%d", + usedefault=True, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, + ), output_type=dict(), - show_all=dict(argstr="--showall",), - spatial_coord=dict(argstr="-c %s",), - transpose=dict(argstr="--transpose",), - use_mm=dict(argstr="--usemm",), + show_all=dict( + argstr="--showall", + ), + spatial_coord=dict( + argstr="-c %s", + ), + transpose=dict( + argstr="--transpose", + ), + use_mm=dict( + argstr="--usemm", + ), ) inputs = ImageMeants.input_spec() @@ -26,7 +59,11 @@ def test_ImageMeants_inputs(): def test_ImageMeants_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ImageMeants.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py index 0c309880bb..e4ddf5f06d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py @@ -4,14 +4,38 @@ def test_ImageStats_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=3,), - index_mask_file=dict(argstr="-K %s", extensions=None, position=2,), - mask_file=dict(argstr="", extensions=None,), - op_string=dict(argstr="%s", mandatory=True, position=4,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=3, + ), + index_mask_file=dict( + argstr="-K %s", + extensions=None, + position=2, + ), + mask_file=dict( + argstr="", + extensions=None, + ), + op_string=dict( + argstr="%s", + mandatory=True, + position=4, + ), output_type=dict(), - split_4d=dict(argstr="-t", position=1,), + split_4d=dict( + argstr="-t", + position=1, + ), ) inputs = ImageStats.input_spec() @@ -21,7 +45,9 @@ def test_ImageStats_inputs(): def test_ImageStats_outputs(): - output_map = dict(out_stat=dict(),) + output_map = dict( + out_stat=dict(), + ) outputs = ImageStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_InvWarp.py b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py index b116f19737..1dba5e578a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_InvWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py @@ -4,9 +4,17 @@ def test_InvWarp_inputs(): input_map = dict( - absolute=dict(argstr="--abs", xor=["relative"],), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + absolute=dict( + argstr="--abs", + xor=["relative"], + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inverse_warp=dict( argstr="--out=%s", extensions=None, @@ -14,15 +22,36 @@ def test_InvWarp_inputs(): name_source=["warp"], name_template="%s_inverse", ), - jacobian_max=dict(argstr="--jmax=%f",), - jacobian_min=dict(argstr="--jmin=%f",), - niter=dict(argstr="--niter=%d",), - noconstraint=dict(argstr="--noconstraint",), + jacobian_max=dict( + argstr="--jmax=%f", + ), + jacobian_min=dict( + argstr="--jmin=%f", + ), + niter=dict( + argstr="--niter=%d", + ), + noconstraint=dict( + argstr="--noconstraint", + ), output_type=dict(), - reference=dict(argstr="--ref=%s", extensions=None, mandatory=True,), - regularise=dict(argstr="--regularise=%f",), - relative=dict(argstr="--rel", xor=["absolute"],), - warp=dict(argstr="--warp=%s", extensions=None, mandatory=True,), + reference=dict( + argstr="--ref=%s", + extensions=None, + mandatory=True, + ), + regularise=dict( + argstr="--regularise=%f", + ), + relative=dict( + argstr="--rel", + xor=["absolute"], + ), + warp=dict( + argstr="--warp=%s", + extensions=None, + mandatory=True, + ), ) inputs = InvWarp.input_spec() @@ -32,7 +61,11 @@ def test_InvWarp_inputs(): def test_InvWarp_outputs(): - output_map = dict(inverse_warp=dict(extensions=None,),) + output_map = dict( + inverse_warp=dict( + extensions=None, + ), + ) outputs = InvWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py index 58186672ec..f9c5432d40 100644 --- a/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py +++ b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py @@ -4,18 +4,51 @@ def test_IsotropicSmooth_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fwhm=dict(argstr="-s %.5f", mandatory=True, position=4, xor=["sigma"],), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - nan2zeros=dict(argstr="-nan", position=3,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm=dict( + argstr="-s %.5f", + mandatory=True, + position=4, + xor=["sigma"], + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), - sigma=dict(argstr="-s %.5f", mandatory=True, position=4, xor=["fwhm"],), + sigma=dict( + argstr="-s %.5f", + mandatory=True, + position=4, + xor=["fwhm"], + ), ) inputs = IsotropicSmooth.input_spec() @@ -25,7 +58,11 @@ def test_IsotropicSmooth_inputs(): def test_IsotropicSmooth_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = IsotropicSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_L2Model.py b/nipype/interfaces/fsl/tests/test_auto_L2Model.py index 6d16cc6038..c4547fc7a2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_L2Model.py +++ b/nipype/interfaces/fsl/tests/test_auto_L2Model.py @@ -3,7 +3,11 @@ def test_L2Model_inputs(): - input_map = dict(num_copes=dict(mandatory=True,),) + input_map = dict( + num_copes=dict( + mandatory=True, + ), + ) inputs = L2Model.input_spec() for key, metadata in list(input_map.items()): @@ -13,9 +17,15 @@ def test_L2Model_inputs(): def test_L2Model_outputs(): output_map = dict( - design_con=dict(extensions=None,), - design_grp=dict(extensions=None,), - design_mat=dict(extensions=None,), + design_con=dict( + extensions=None, + ), + design_grp=dict( + extensions=None, + ), + design_mat=dict( + extensions=None, + ), ) outputs = L2Model.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Level1Design.py b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py index f8ed336e43..5a43989601 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Level1Design.py +++ b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py @@ -4,12 +4,22 @@ def test_Level1Design_inputs(): input_map = dict( - bases=dict(mandatory=True,), + bases=dict( + mandatory=True, + ), contrasts=dict(), - interscan_interval=dict(mandatory=True,), - model_serial_correlations=dict(mandatory=True,), - orthogonalization=dict(usedefault=True,), - session_info=dict(mandatory=True,), + interscan_interval=dict( + mandatory=True, + ), + model_serial_correlations=dict( + mandatory=True, + ), + orthogonalization=dict( + usedefault=True, + ), + session_info=dict( + mandatory=True, + ), ) inputs = Level1Design.input_spec() @@ -19,7 +29,10 @@ def test_Level1Design_inputs(): def test_Level1Design_outputs(): - output_map = dict(ev_files=dict(), fsf_files=dict(),) + output_map = dict( + ev_files=dict(), + fsf_files=dict(), + ) outputs = Level1Design.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py index 8f52f40eb0..768c52a7f4 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py @@ -4,31 +4,82 @@ def test_MCFLIRT_inputs(): input_map = dict( - args=dict(argstr="%s",), - bins=dict(argstr="-bins %d",), - cost=dict(argstr="-cost %s",), - dof=dict(argstr="-dof %d",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=0,), - init=dict(argstr="-init %s", extensions=None,), - interpolation=dict(argstr="-%s_final",), - mean_vol=dict(argstr="-meanvol",), + args=dict( + argstr="%s", + ), + bins=dict( + argstr="-bins %d", + ), + cost=dict( + argstr="-cost %s", + ), + dof=dict( + argstr="-dof %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + position=0, + ), + init=dict( + argstr="-init %s", + extensions=None, + ), + interpolation=dict( + argstr="-%s_final", + ), + mean_vol=dict( + argstr="-meanvol", + ), out_file=dict( - argstr="-out %s", extensions=None, genfile=True, hash_files=False, + argstr="-out %s", + extensions=None, + genfile=True, + hash_files=False, ), output_type=dict(), - ref_file=dict(argstr="-reffile %s", extensions=None,), - ref_vol=dict(argstr="-refvol %d",), - rotation=dict(argstr="-rotation %d",), - save_mats=dict(argstr="-mats",), - save_plots=dict(argstr="-plots",), - save_rms=dict(argstr="-rmsabs -rmsrel",), - scaling=dict(argstr="-scaling %.2f",), - smooth=dict(argstr="-smooth %.2f",), - stages=dict(argstr="-stages %d",), - stats_imgs=dict(argstr="-stats",), - use_contour=dict(argstr="-edge",), - use_gradient=dict(argstr="-gdt",), + ref_file=dict( + argstr="-reffile %s", + extensions=None, + ), + ref_vol=dict( + argstr="-refvol %d", + ), + rotation=dict( + argstr="-rotation %d", + ), + save_mats=dict( + argstr="-mats", + ), + save_plots=dict( + argstr="-plots", + ), + save_rms=dict( + argstr="-rmsabs -rmsrel", + ), + scaling=dict( + argstr="-scaling %.2f", + ), + smooth=dict( + argstr="-smooth %.2f", + ), + stages=dict( + argstr="-stages %d", + ), + stats_imgs=dict( + argstr="-stats", + ), + use_contour=dict( + argstr="-edge", + ), + use_gradient=dict( + argstr="-gdt", + ), ) inputs = MCFLIRT.input_spec() @@ -40,12 +91,22 @@ def test_MCFLIRT_inputs(): def test_MCFLIRT_outputs(): output_map = dict( mat_file=dict(), - mean_img=dict(extensions=None,), - out_file=dict(extensions=None,), - par_file=dict(extensions=None,), + mean_img=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + par_file=dict( + extensions=None, + ), rms_files=dict(), - std_img=dict(extensions=None,), - variance_img=dict(extensions=None,), + std_img=dict( + extensions=None, + ), + variance_img=dict( + extensions=None, + ), ) outputs = MCFLIRT.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MELODIC.py b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py index 86e4e0e2a2..db2406e30f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MELODIC.py +++ b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py @@ -4,57 +4,171 @@ def test_MELODIC_inputs(): input_map = dict( - ICs=dict(argstr="--ICs=%s", extensions=None,), - approach=dict(argstr="-a %s",), - args=dict(argstr="%s",), - bg_image=dict(argstr="--bgimage=%s", extensions=None,), - bg_threshold=dict(argstr="--bgthreshold=%f",), - cov_weight=dict(argstr="--covarweight=%f",), - dim=dict(argstr="-d %d",), - dim_est=dict(argstr="--dimest=%s",), - environ=dict(nohash=True, usedefault=True,), - epsilon=dict(argstr="--eps=%f",), - epsilonS=dict(argstr="--epsS=%f",), - in_files=dict(argstr="-i %s", mandatory=True, position=0, sep=",",), - log_power=dict(argstr="--logPower",), - mask=dict(argstr="-m %s", extensions=None,), - max_restart=dict(argstr="--maxrestart=%d",), - maxit=dict(argstr="--maxit=%d",), - migp=dict(argstr="--migp",), - migpN=dict(argstr="--migpN %d",), - migp_factor=dict(argstr="--migp_factor %d",), - migp_shuffle=dict(argstr="--migp_shuffle",), - mix=dict(argstr="--mix=%s", extensions=None,), - mm_thresh=dict(argstr="--mmthresh=%f",), - no_bet=dict(argstr="--nobet",), - no_mask=dict(argstr="--nomask",), - no_mm=dict(argstr="--no_mm",), - non_linearity=dict(argstr="--nl=%s",), - num_ICs=dict(argstr="-n %d",), - out_all=dict(argstr="--Oall",), - out_dir=dict(argstr="-o %s", genfile=True,), - out_mean=dict(argstr="--Omean",), - out_orig=dict(argstr="--Oorig",), - out_pca=dict(argstr="--Opca",), - out_stats=dict(argstr="--Ostats",), - out_unmix=dict(argstr="--Ounmix",), - out_white=dict(argstr="--Owhite",), + ICs=dict( + argstr="--ICs=%s", + extensions=None, + ), + approach=dict( + argstr="-a %s", + ), + args=dict( + argstr="%s", + ), + bg_image=dict( + argstr="--bgimage=%s", + extensions=None, + ), + bg_threshold=dict( + argstr="--bgthreshold=%f", + ), + cov_weight=dict( + argstr="--covarweight=%f", + ), + dim=dict( + argstr="-d %d", + ), + dim_est=dict( + argstr="--dimest=%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + epsilon=dict( + argstr="--eps=%f", + ), + epsilonS=dict( + argstr="--epsS=%f", + ), + in_files=dict( + argstr="-i %s", + mandatory=True, + position=0, + sep=",", + ), + log_power=dict( + argstr="--logPower", + ), + mask=dict( + argstr="-m %s", + extensions=None, + ), + max_restart=dict( + argstr="--maxrestart=%d", + ), + maxit=dict( + argstr="--maxit=%d", + ), + migp=dict( + argstr="--migp", + ), + migpN=dict( + argstr="--migpN %d", + ), + migp_factor=dict( + argstr="--migp_factor %d", + ), + migp_shuffle=dict( + argstr="--migp_shuffle", + ), + mix=dict( + argstr="--mix=%s", + extensions=None, + ), + mm_thresh=dict( + argstr="--mmthresh=%f", + ), + no_bet=dict( + argstr="--nobet", + ), + no_mask=dict( + argstr="--nomask", + ), + no_mm=dict( + argstr="--no_mm", + ), + non_linearity=dict( + argstr="--nl=%s", + ), + num_ICs=dict( + argstr="-n %d", + ), + out_all=dict( + argstr="--Oall", + ), + out_dir=dict( + argstr="-o %s", + genfile=True, + ), + out_mean=dict( + argstr="--Omean", + ), + out_orig=dict( + argstr="--Oorig", + ), + out_pca=dict( + argstr="--Opca", + ), + out_stats=dict( + argstr="--Ostats", + ), + out_unmix=dict( + argstr="--Ounmix", + ), + out_white=dict( + argstr="--Owhite", + ), output_type=dict(), - pbsc=dict(argstr="--pbsc",), - rem_cmp=dict(argstr="-f %d",), - remove_deriv=dict(argstr="--remove_deriv",), - report=dict(argstr="--report",), - report_maps=dict(argstr="--report_maps=%s",), - s_con=dict(argstr="--Scon=%s", extensions=None,), - s_des=dict(argstr="--Sdes=%s", extensions=None,), - sep_vn=dict(argstr="--sep_vn",), - sep_whiten=dict(argstr="--sep_whiten",), - smode=dict(argstr="--smode=%s", extensions=None,), - t_con=dict(argstr="--Tcon=%s", extensions=None,), - t_des=dict(argstr="--Tdes=%s", extensions=None,), - tr_sec=dict(argstr="--tr=%f",), - update_mask=dict(argstr="--update_mask",), - var_norm=dict(argstr="--vn",), + pbsc=dict( + argstr="--pbsc", + ), + rem_cmp=dict( + argstr="-f %d", + ), + remove_deriv=dict( + argstr="--remove_deriv", + ), + report=dict( + argstr="--report", + ), + report_maps=dict( + argstr="--report_maps=%s", + ), + s_con=dict( + argstr="--Scon=%s", + extensions=None, + ), + s_des=dict( + argstr="--Sdes=%s", + extensions=None, + ), + sep_vn=dict( + argstr="--sep_vn", + ), + sep_whiten=dict( + argstr="--sep_whiten", + ), + smode=dict( + argstr="--smode=%s", + extensions=None, + ), + t_con=dict( + argstr="--Tcon=%s", + extensions=None, + ), + t_des=dict( + argstr="--Tdes=%s", + extensions=None, + ), + tr_sec=dict( + argstr="--tr=%f", + ), + update_mask=dict( + argstr="--update_mask", + ), + var_norm=dict( + argstr="--vn", + ), ) inputs = MELODIC.input_spec() @@ -64,7 +178,10 @@ def test_MELODIC_inputs(): def test_MELODIC_outputs(): - output_map = dict(out_dir=dict(), report_dir=dict(),) + output_map = dict( + out_dir=dict(), + report_dir=dict(), + ) outputs = MELODIC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py index 760072bab9..bfdb32146e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py +++ b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py @@ -4,16 +4,42 @@ def test_MakeDyadicVectors_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - mask=dict(argstr="%s", extensions=None, position=2,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + mask=dict( + argstr="%s", + extensions=None, + position=2, + ), output=dict( - argstr="%s", extensions=None, hash_files=False, position=3, usedefault=True, + argstr="%s", + extensions=None, + hash_files=False, + position=3, + usedefault=True, ), output_type=dict(), - perc=dict(argstr="%f", position=4,), - phi_vol=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - theta_vol=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + perc=dict( + argstr="%f", + position=4, + ), + phi_vol=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + theta_vol=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), ) inputs = MakeDyadicVectors.input_spec() @@ -23,7 +49,14 @@ def test_MakeDyadicVectors_inputs(): def test_MakeDyadicVectors_outputs(): - output_map = dict(dispersion=dict(extensions=None,), dyads=dict(extensions=None,),) + output_map = dict( + dispersion=dict( + extensions=None, + ), + dyads=dict( + extensions=None, + ), + ) outputs = MakeDyadicVectors.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py index ce7f058663..e14e4a4005 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py @@ -4,15 +4,38 @@ def test_MathsCommand_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - nan2zeros=dict(argstr="-nan", position=3,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = MathsCommand.input_spec() @@ -23,7 +46,11 @@ def test_MathsCommand_inputs(): def test_MathsCommand_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MathsCommand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py index 1baa75963b..f96f931fcf 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MaxImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py @@ -4,16 +4,43 @@ def test_MaxImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="-%smax", position=4, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - nan2zeros=dict(argstr="-nan", position=3,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%smax", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = MaxImage.input_spec() @@ -24,7 +51,11 @@ def test_MaxImage_inputs(): def test_MaxImage_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MaxImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py index aa52ba3bb7..30ada25d79 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py @@ -4,16 +4,43 @@ def test_MaxnImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="-%smaxn", position=4, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - nan2zeros=dict(argstr="-nan", position=3,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%smaxn", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = MaxnImage.input_spec() @@ -24,7 +51,11 @@ def test_MaxnImage_inputs(): def test_MaxnImage_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MaxnImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MeanImage.py b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py index 076cb08a76..e29104476c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MeanImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py @@ -4,16 +4,43 @@ def test_MeanImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="-%smean", position=4, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - nan2zeros=dict(argstr="-nan", position=3,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%smean", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = MeanImage.input_spec() @@ -24,7 +51,11 @@ def test_MeanImage_inputs(): def test_MeanImage_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MeanImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MedianImage.py b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py index a70ff14b2f..7c8052fd31 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MedianImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py @@ -4,16 +4,43 @@ def test_MedianImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="-%smedian", position=4, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - nan2zeros=dict(argstr="-nan", position=3,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%smedian", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = MedianImage.input_spec() @@ -24,7 +51,11 @@ def test_MedianImage_inputs(): def test_MedianImage_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MedianImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Merge.py b/nipype/interfaces/fsl/tests/test_auto_Merge.py index 45db6482a9..847f9b7bd3 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Merge.py +++ b/nipype/interfaces/fsl/tests/test_auto_Merge.py @@ -4,10 +4,23 @@ def test_Merge_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="-%s", mandatory=True, position=0,), - environ=dict(nohash=True, usedefault=True,), - in_files=dict(argstr="%s", mandatory=True, position=2,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%s", + mandatory=True, + position=0, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=2, + ), merged_file=dict( argstr="%s", extensions=None, @@ -17,7 +30,10 @@ def test_Merge_inputs(): position=1, ), output_type=dict(), - tr=dict(argstr="%.2f", position=-1,), + tr=dict( + argstr="%.2f", + position=-1, + ), ) inputs = Merge.input_spec() @@ -27,7 +43,11 @@ def test_Merge_inputs(): def test_Merge_outputs(): - output_map = dict(merged_file=dict(extensions=None,),) + output_map = dict( + merged_file=dict( + extensions=None, + ), + ) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MinImage.py b/nipype/interfaces/fsl/tests/test_auto_MinImage.py index 9d5416bd15..bde76c1afc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MinImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MinImage.py @@ -4,16 +4,43 @@ def test_MinImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="-%smin", position=4, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - nan2zeros=dict(argstr="-nan", position=3,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%smin", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = MinImage.input_spec() @@ -24,7 +51,11 @@ def test_MinImage_inputs(): def test_MinImage_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MinImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py index 4c8ce55636..9a5773336f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py +++ b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py @@ -4,13 +4,31 @@ def test_MotionOutliers_inputs(): input_map = dict( - args=dict(argstr="%s",), - dummy=dict(argstr="--dummy=%d",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True,), - mask=dict(argstr="-m %s", extensions=None,), - metric=dict(argstr="--%s",), - no_motion_correction=dict(argstr="--nomoco",), + args=dict( + argstr="%s", + ), + dummy=dict( + argstr="--dummy=%d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + mask=dict( + argstr="-m %s", + extensions=None, + ), + metric=dict( + argstr="--%s", + ), + no_motion_correction=dict( + argstr="--nomoco", + ), out_file=dict( argstr="-o %s", extensions=None, @@ -36,7 +54,9 @@ def test_MotionOutliers_inputs(): name_template="%s_metrics.txt", ), output_type=dict(), - threshold=dict(argstr="--thresh=%g",), + threshold=dict( + argstr="--thresh=%g", + ), ) inputs = MotionOutliers.input_spec() @@ -47,9 +67,15 @@ def test_MotionOutliers_inputs(): def test_MotionOutliers_outputs(): output_map = dict( - out_file=dict(extensions=None,), - out_metric_plot=dict(extensions=None,), - out_metric_values=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_metric_plot=dict( + extensions=None, + ), + out_metric_values=dict( + extensions=None, + ), ) outputs = MotionOutliers.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py index ba96daf994..95de40d023 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py @@ -4,17 +4,46 @@ def test_MultiImageMaths_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - nan2zeros=dict(argstr="-nan", position=3,), - op_string=dict(argstr="%s", mandatory=True, position=4,), - operand_files=dict(mandatory=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), + op_string=dict( + argstr="%s", + mandatory=True, + position=4, + ), + operand_files=dict( + mandatory=True, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = MultiImageMaths.input_spec() @@ -25,7 +54,11 @@ def test_MultiImageMaths_inputs(): def test_MultiImageMaths_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MultiImageMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py index fe3ce1b0b1..cae5e90cd4 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py +++ b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py @@ -4,9 +4,13 @@ def test_MultipleRegressDesign_inputs(): input_map = dict( - contrasts=dict(mandatory=True,), + contrasts=dict( + mandatory=True, + ), groups=dict(), - regressors=dict(mandatory=True,), + regressors=dict( + mandatory=True, + ), ) inputs = MultipleRegressDesign.input_spec() @@ -17,10 +21,18 @@ def test_MultipleRegressDesign_inputs(): def test_MultipleRegressDesign_outputs(): output_map = dict( - design_con=dict(extensions=None,), - design_fts=dict(extensions=None,), - design_grp=dict(extensions=None,), - design_mat=dict(extensions=None,), + design_con=dict( + extensions=None, + ), + design_fts=dict( + extensions=None, + ), + design_grp=dict( + extensions=None, + ), + design_mat=dict( + extensions=None, + ), ) outputs = MultipleRegressDesign.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Overlay.py b/nipype/interfaces/fsl/tests/test_auto_Overlay.py index e09ef17541..22c4f08a44 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Overlay.py +++ b/nipype/interfaces/fsl/tests/test_auto_Overlay.py @@ -4,7 +4,9 @@ def test_Overlay_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), auto_thresh_bg=dict( argstr="-a", mandatory=True, @@ -12,7 +14,10 @@ def test_Overlay_inputs(): xor=("auto_thresh_bg", "full_bg_range", "bg_thresh"), ), background_image=dict( - argstr="%s", extensions=None, mandatory=True, position=4, + argstr="%s", + extensions=None, + mandatory=True, + position=4, ), bg_thresh=dict( argstr="%.3f %.3f", @@ -20,7 +25,10 @@ def test_Overlay_inputs(): position=5, xor=("auto_thresh_bg", "full_bg_range", "bg_thresh"), ), - environ=dict(nohash=True, usedefault=True,), + environ=dict( + nohash=True, + usedefault=True, + ), full_bg_range=dict( argstr="-A", mandatory=True, @@ -28,19 +36,53 @@ def test_Overlay_inputs(): xor=("auto_thresh_bg", "full_bg_range", "bg_thresh"), ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-1, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-1, + ), + out_type=dict( + argstr="%s", + position=2, + usedefault=True, ), - out_type=dict(argstr="%s", position=2, usedefault=True,), output_type=dict(), - show_negative_stats=dict(argstr="%s", position=8, xor=["stat_image2"],), - stat_image=dict(argstr="%s", extensions=None, mandatory=True, position=6,), + show_negative_stats=dict( + argstr="%s", + position=8, + xor=["stat_image2"], + ), + stat_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=6, + ), stat_image2=dict( - argstr="%s", extensions=None, position=9, xor=["show_negative_stats"], + argstr="%s", + extensions=None, + position=9, + xor=["show_negative_stats"], + ), + stat_thresh=dict( + argstr="%.2f %.2f", + mandatory=True, + position=7, + ), + stat_thresh2=dict( + argstr="%.2f %.2f", + position=10, + ), + transparency=dict( + argstr="%s", + position=1, + usedefault=True, + ), + use_checkerboard=dict( + argstr="-c", + position=3, ), - stat_thresh=dict(argstr="%.2f %.2f", mandatory=True, position=7,), - stat_thresh2=dict(argstr="%.2f %.2f", position=10,), - transparency=dict(argstr="%s", position=1, usedefault=True,), - use_checkerboard=dict(argstr="-c", position=3,), ) inputs = Overlay.input_spec() @@ -50,7 +92,11 @@ def test_Overlay_inputs(): def test_Overlay_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Overlay.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py index 2c3623a76a..0194526c70 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py +++ b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py @@ -4,25 +4,43 @@ def test_PRELUDE_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), complex_phase_file=dict( argstr="--complex=%s", extensions=None, mandatory=True, xor=["magnitude_file", "phase_file"], ), - end=dict(argstr="--end=%d",), - environ=dict(nohash=True, usedefault=True,), - label_file=dict(argstr="--labels=%s", extensions=None, hash_files=False,), - labelprocess2d=dict(argstr="--labelslices",), + end=dict( + argstr="--end=%d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + label_file=dict( + argstr="--labels=%s", + extensions=None, + hash_files=False, + ), + labelprocess2d=dict( + argstr="--labelslices", + ), magnitude_file=dict( argstr="--abs=%s", extensions=None, mandatory=True, xor=["complex_phase_file"], ), - mask_file=dict(argstr="--mask=%s", extensions=None,), - num_partitions=dict(argstr="--numphasesplit=%d",), + mask_file=dict( + argstr="--mask=%s", + extensions=None, + ), + num_partitions=dict( + argstr="--numphasesplit=%d", + ), output_type=dict(), phase_file=dict( argstr="--phase=%s", @@ -30,15 +48,38 @@ def test_PRELUDE_inputs(): mandatory=True, xor=["complex_phase_file"], ), - process2d=dict(argstr="--slices", xor=["labelprocess2d"],), - process3d=dict(argstr="--force3D", xor=["labelprocess2d", "process2d"],), - rawphase_file=dict(argstr="--rawphase=%s", extensions=None, hash_files=False,), - removeramps=dict(argstr="--removeramps",), - savemask_file=dict(argstr="--savemask=%s", extensions=None, hash_files=False,), - start=dict(argstr="--start=%d",), - threshold=dict(argstr="--thresh=%.10f",), + process2d=dict( + argstr="--slices", + xor=["labelprocess2d"], + ), + process3d=dict( + argstr="--force3D", + xor=["labelprocess2d", "process2d"], + ), + rawphase_file=dict( + argstr="--rawphase=%s", + extensions=None, + hash_files=False, + ), + removeramps=dict( + argstr="--removeramps", + ), + savemask_file=dict( + argstr="--savemask=%s", + extensions=None, + hash_files=False, + ), + start=dict( + argstr="--start=%d", + ), + threshold=dict( + argstr="--thresh=%.10f", + ), unwrapped_phase_file=dict( - argstr="--unwrap=%s", extensions=None, genfile=True, hash_files=False, + argstr="--unwrap=%s", + extensions=None, + genfile=True, + hash_files=False, ), ) inputs = PRELUDE.input_spec() @@ -49,7 +90,11 @@ def test_PRELUDE_inputs(): def test_PRELUDE_outputs(): - output_map = dict(unwrapped_phase_file=dict(extensions=None,),) + output_map = dict( + unwrapped_phase_file=dict( + extensions=None, + ), + ) outputs = PRELUDE.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py index 2b272b006c..3a3ae14a78 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py @@ -4,18 +4,48 @@ def test_PercentileImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="-%sperc", position=4, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - nan2zeros=dict(argstr="-nan", position=3,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%sperc", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), - perc=dict(argstr="%f", position=5,), + perc=dict( + argstr="%f", + position=5, + ), ) inputs = PercentileImage.input_spec() @@ -25,7 +55,11 @@ def test_PercentileImage_inputs(): def test_PercentileImage_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = PercentileImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py index c12494e50b..8cf1d2e214 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py +++ b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py @@ -4,14 +4,35 @@ def test_PlotMotionParams_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", mandatory=True, position=1,), - in_source=dict(mandatory=True,), - out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + mandatory=True, + position=1, + ), + in_source=dict( + mandatory=True, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, + ), output_type=dict(), - plot_size=dict(argstr="%s",), - plot_type=dict(argstr="%s", mandatory=True,), + plot_size=dict( + argstr="%s", + ), + plot_type=dict( + argstr="%s", + mandatory=True, + ), ) inputs = PlotMotionParams.input_spec() @@ -21,7 +42,11 @@ def test_PlotMotionParams_inputs(): def test_PlotMotionParams_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = PlotMotionParams.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py index 0f3954fcf2..5b4ebc46aa 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py +++ b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py @@ -4,24 +4,72 @@ def test_PlotTimeSeries_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", mandatory=True, position=1,), - labels=dict(argstr="%s",), - legend_file=dict(argstr="--legend=%s", extensions=None,), - out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + mandatory=True, + position=1, + ), + labels=dict( + argstr="%s", + ), + legend_file=dict( + argstr="--legend=%s", + extensions=None, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, + ), output_type=dict(), - plot_finish=dict(argstr="--finish=%d", xor=("plot_range",),), - plot_range=dict(argstr="%s", xor=("plot_start", "plot_finish"),), - plot_size=dict(argstr="%s",), - plot_start=dict(argstr="--start=%d", xor=("plot_range",),), - sci_notation=dict(argstr="--sci",), - title=dict(argstr="%s",), - x_precision=dict(argstr="--precision=%d",), - x_units=dict(argstr="-u %d", usedefault=True,), - y_max=dict(argstr="--ymax=%.2f", xor=("y_range",),), - y_min=dict(argstr="--ymin=%.2f", xor=("y_range",),), - y_range=dict(argstr="%s", xor=("y_min", "y_max"),), + plot_finish=dict( + argstr="--finish=%d", + xor=("plot_range",), + ), + plot_range=dict( + argstr="%s", + xor=("plot_start", "plot_finish"), + ), + plot_size=dict( + argstr="%s", + ), + plot_start=dict( + argstr="--start=%d", + xor=("plot_range",), + ), + sci_notation=dict( + argstr="--sci", + ), + title=dict( + argstr="%s", + ), + x_precision=dict( + argstr="--precision=%d", + ), + x_units=dict( + argstr="-u %d", + usedefault=True, + ), + y_max=dict( + argstr="--ymax=%.2f", + xor=("y_range",), + ), + y_min=dict( + argstr="--ymin=%.2f", + xor=("y_range",), + ), + y_range=dict( + argstr="%s", + xor=("y_min", "y_max"), + ), ) inputs = PlotTimeSeries.input_spec() @@ -31,7 +79,11 @@ def test_PlotTimeSeries_inputs(): def test_PlotTimeSeries_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = PlotTimeSeries.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py index 5aa19309fc..874cbcf0e8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py +++ b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py @@ -4,11 +4,25 @@ def test_PowerSpectrum_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=1, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=1, ), output_type=dict(), ) @@ -20,7 +34,11 @@ def test_PowerSpectrum_inputs(): def test_PowerSpectrum_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = PowerSpectrum.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py index d6d39b595c..2286dad026 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py +++ b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py @@ -4,15 +4,47 @@ def test_PrepareFieldmap_inputs(): input_map = dict( - args=dict(argstr="%s",), - delta_TE=dict(argstr="%f", mandatory=True, position=-2, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - in_magnitude=dict(argstr="%s", extensions=None, mandatory=True, position=3,), - in_phase=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - nocheck=dict(argstr="--nocheck", position=-1, usedefault=True,), - out_fieldmap=dict(argstr="%s", extensions=None, position=4,), + args=dict( + argstr="%s", + ), + delta_TE=dict( + argstr="%f", + mandatory=True, + position=-2, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_magnitude=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=3, + ), + in_phase=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + nocheck=dict( + argstr="--nocheck", + position=-1, + usedefault=True, + ), + out_fieldmap=dict( + argstr="%s", + extensions=None, + position=4, + ), output_type=dict(), - scanner=dict(argstr="%s", position=1, usedefault=True,), + scanner=dict( + argstr="%s", + position=1, + usedefault=True, + ), ) inputs = PrepareFieldmap.input_spec() @@ -22,7 +54,11 @@ def test_PrepareFieldmap_inputs(): def test_PrepareFieldmap_outputs(): - output_map = dict(out_fieldmap=dict(extensions=None,),) + output_map = dict( + out_fieldmap=dict( + extensions=None, + ), + ) outputs = PrepareFieldmap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py index 1e2d0f5486..aae5d80c57 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py @@ -4,45 +4,139 @@ def test_ProbTrackX_inputs(): input_map = dict( - args=dict(argstr="%s",), - avoid_mp=dict(argstr="--avoid=%s", extensions=None,), - c_thresh=dict(argstr="--cthr=%.3f",), - correct_path_distribution=dict(argstr="--pd",), - dist_thresh=dict(argstr="--distthresh=%.3f",), - environ=dict(nohash=True, usedefault=True,), - fibst=dict(argstr="--fibst=%d",), - force_dir=dict(argstr="--forcedir", usedefault=True,), - fsamples=dict(mandatory=True,), - inv_xfm=dict(argstr="--invxfm=%s", extensions=None,), - loop_check=dict(argstr="--loopcheck",), - mask=dict(argstr="-m %s", extensions=None, mandatory=True,), - mask2=dict(argstr="--mask2=%s", extensions=None,), - mesh=dict(argstr="--mesh=%s", extensions=None,), - mod_euler=dict(argstr="--modeuler",), - mode=dict(argstr="--mode=%s", genfile=True,), - n_samples=dict(argstr="--nsamples=%d", usedefault=True,), - n_steps=dict(argstr="--nsteps=%d",), - network=dict(argstr="--network",), - opd=dict(argstr="--opd", usedefault=True,), - os2t=dict(argstr="--os2t",), - out_dir=dict(argstr="--dir=%s", genfile=True,), + args=dict( + argstr="%s", + ), + avoid_mp=dict( + argstr="--avoid=%s", + extensions=None, + ), + c_thresh=dict( + argstr="--cthr=%.3f", + ), + correct_path_distribution=dict( + argstr="--pd", + ), + dist_thresh=dict( + argstr="--distthresh=%.3f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fibst=dict( + argstr="--fibst=%d", + ), + force_dir=dict( + argstr="--forcedir", + usedefault=True, + ), + fsamples=dict( + mandatory=True, + ), + inv_xfm=dict( + argstr="--invxfm=%s", + extensions=None, + ), + loop_check=dict( + argstr="--loopcheck", + ), + mask=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + ), + mask2=dict( + argstr="--mask2=%s", + extensions=None, + ), + mesh=dict( + argstr="--mesh=%s", + extensions=None, + ), + mod_euler=dict( + argstr="--modeuler", + ), + mode=dict( + argstr="--mode=%s", + genfile=True, + ), + n_samples=dict( + argstr="--nsamples=%d", + usedefault=True, + ), + n_steps=dict( + argstr="--nsteps=%d", + ), + network=dict( + argstr="--network", + ), + opd=dict( + argstr="--opd", + usedefault=True, + ), + os2t=dict( + argstr="--os2t", + ), + out_dir=dict( + argstr="--dir=%s", + genfile=True, + ), output_type=dict(), - phsamples=dict(mandatory=True,), - rand_fib=dict(argstr="--randfib=%d",), - random_seed=dict(argstr="--rseed",), - s2tastext=dict(argstr="--s2tastext",), - sample_random_points=dict(argstr="--sampvox",), - samples_base_name=dict(argstr="--samples=%s", usedefault=True,), - seed=dict(argstr="--seed=%s", mandatory=True,), - seed_ref=dict(argstr="--seedref=%s", extensions=None,), - step_length=dict(argstr="--steplength=%.3f",), - stop_mask=dict(argstr="--stop=%s", extensions=None,), - target_masks=dict(argstr="--targetmasks=%s",), - thsamples=dict(mandatory=True,), - use_anisotropy=dict(argstr="--usef",), - verbose=dict(argstr="--verbose=%d",), - waypoints=dict(argstr="--waypoints=%s", extensions=None,), - xfm=dict(argstr="--xfm=%s", extensions=None,), + phsamples=dict( + mandatory=True, + ), + rand_fib=dict( + argstr="--randfib=%d", + ), + random_seed=dict( + argstr="--rseed", + ), + s2tastext=dict( + argstr="--s2tastext", + ), + sample_random_points=dict( + argstr="--sampvox", + ), + samples_base_name=dict( + argstr="--samples=%s", + usedefault=True, + ), + seed=dict( + argstr="--seed=%s", + mandatory=True, + ), + seed_ref=dict( + argstr="--seedref=%s", + extensions=None, + ), + step_length=dict( + argstr="--steplength=%.3f", + ), + stop_mask=dict( + argstr="--stop=%s", + extensions=None, + ), + target_masks=dict( + argstr="--targetmasks=%s", + ), + thsamples=dict( + mandatory=True, + ), + use_anisotropy=dict( + argstr="--usef", + ), + verbose=dict( + argstr="--verbose=%d", + ), + waypoints=dict( + argstr="--waypoints=%s", + extensions=None, + ), + xfm=dict( + argstr="--xfm=%s", + extensions=None, + ), ) inputs = ProbTrackX.input_spec() @@ -54,10 +148,14 @@ def test_ProbTrackX_inputs(): def test_ProbTrackX_outputs(): output_map = dict( fdt_paths=dict(), - log=dict(extensions=None,), + log=dict( + extensions=None, + ), particle_files=dict(), targets=dict(), - way_total=dict(extensions=None,), + way_total=dict( + extensions=None, + ), ) outputs = ProbTrackX.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py index 56bff1e5ac..1813bd3c9c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py @@ -4,59 +4,186 @@ def test_ProbTrackX2_inputs(): input_map = dict( - args=dict(argstr="%s",), - avoid_mp=dict(argstr="--avoid=%s", extensions=None,), - c_thresh=dict(argstr="--cthr=%.3f",), - colmask4=dict(argstr="--colmask4=%s", extensions=None,), - correct_path_distribution=dict(argstr="--pd",), - dist_thresh=dict(argstr="--distthresh=%.3f",), - distthresh1=dict(argstr="--distthresh1=%.3f",), - distthresh3=dict(argstr="--distthresh3=%.3f",), - environ=dict(nohash=True, usedefault=True,), - fibst=dict(argstr="--fibst=%d",), - fopd=dict(argstr="--fopd=%s", extensions=None,), - force_dir=dict(argstr="--forcedir", usedefault=True,), - fsamples=dict(mandatory=True,), - inv_xfm=dict(argstr="--invxfm=%s", extensions=None,), - loop_check=dict(argstr="--loopcheck",), - lrtarget3=dict(argstr="--lrtarget3=%s", extensions=None,), - mask=dict(argstr="-m %s", extensions=None, mandatory=True,), - meshspace=dict(argstr="--meshspace=%s",), - mod_euler=dict(argstr="--modeuler",), - n_samples=dict(argstr="--nsamples=%d", usedefault=True,), - n_steps=dict(argstr="--nsteps=%d",), - network=dict(argstr="--network",), - omatrix1=dict(argstr="--omatrix1",), - omatrix2=dict(argstr="--omatrix2", requires=["target2"],), - omatrix3=dict(argstr="--omatrix3", requires=["target3", "lrtarget3"],), - omatrix4=dict(argstr="--omatrix4",), - onewaycondition=dict(argstr="--onewaycondition",), - opd=dict(argstr="--opd", usedefault=True,), - os2t=dict(argstr="--os2t",), - out_dir=dict(argstr="--dir=%s", genfile=True,), + args=dict( + argstr="%s", + ), + avoid_mp=dict( + argstr="--avoid=%s", + extensions=None, + ), + c_thresh=dict( + argstr="--cthr=%.3f", + ), + colmask4=dict( + argstr="--colmask4=%s", + extensions=None, + ), + correct_path_distribution=dict( + argstr="--pd", + ), + dist_thresh=dict( + argstr="--distthresh=%.3f", + ), + distthresh1=dict( + argstr="--distthresh1=%.3f", + ), + distthresh3=dict( + argstr="--distthresh3=%.3f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fibst=dict( + argstr="--fibst=%d", + ), + fopd=dict( + argstr="--fopd=%s", + extensions=None, + ), + force_dir=dict( + argstr="--forcedir", + usedefault=True, + ), + fsamples=dict( + mandatory=True, + ), + inv_xfm=dict( + argstr="--invxfm=%s", + extensions=None, + ), + loop_check=dict( + argstr="--loopcheck", + ), + lrtarget3=dict( + argstr="--lrtarget3=%s", + extensions=None, + ), + mask=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + ), + meshspace=dict( + argstr="--meshspace=%s", + ), + mod_euler=dict( + argstr="--modeuler", + ), + n_samples=dict( + argstr="--nsamples=%d", + usedefault=True, + ), + n_steps=dict( + argstr="--nsteps=%d", + ), + network=dict( + argstr="--network", + ), + omatrix1=dict( + argstr="--omatrix1", + ), + omatrix2=dict( + argstr="--omatrix2", + requires=["target2"], + ), + omatrix3=dict( + argstr="--omatrix3", + requires=["target3", "lrtarget3"], + ), + omatrix4=dict( + argstr="--omatrix4", + ), + onewaycondition=dict( + argstr="--onewaycondition", + ), + opd=dict( + argstr="--opd", + usedefault=True, + ), + os2t=dict( + argstr="--os2t", + ), + out_dir=dict( + argstr="--dir=%s", + genfile=True, + ), output_type=dict(), - phsamples=dict(mandatory=True,), - rand_fib=dict(argstr="--randfib=%d",), - random_seed=dict(argstr="--rseed",), - s2tastext=dict(argstr="--s2tastext",), - sample_random_points=dict(argstr="--sampvox",), - samples_base_name=dict(argstr="--samples=%s", usedefault=True,), - seed=dict(argstr="--seed=%s", mandatory=True,), - seed_ref=dict(argstr="--seedref=%s", extensions=None,), - simple=dict(argstr="--simple",), - step_length=dict(argstr="--steplength=%.3f",), - stop_mask=dict(argstr="--stop=%s", extensions=None,), - target2=dict(argstr="--target2=%s", extensions=None,), - target3=dict(argstr="--target3=%s", extensions=None,), - target4=dict(argstr="--target4=%s", extensions=None,), - target_masks=dict(argstr="--targetmasks=%s",), - thsamples=dict(mandatory=True,), - use_anisotropy=dict(argstr="--usef",), - verbose=dict(argstr="--verbose=%d",), - waycond=dict(argstr="--waycond=%s",), - wayorder=dict(argstr="--wayorder",), - waypoints=dict(argstr="--waypoints=%s", extensions=None,), - xfm=dict(argstr="--xfm=%s", extensions=None,), + phsamples=dict( + mandatory=True, + ), + rand_fib=dict( + argstr="--randfib=%d", + ), + random_seed=dict( + argstr="--rseed", + ), + s2tastext=dict( + argstr="--s2tastext", + ), + sample_random_points=dict( + argstr="--sampvox", + ), + samples_base_name=dict( + argstr="--samples=%s", + usedefault=True, + ), + seed=dict( + argstr="--seed=%s", + mandatory=True, + ), + seed_ref=dict( + argstr="--seedref=%s", + extensions=None, + ), + simple=dict( + argstr="--simple", + ), + step_length=dict( + argstr="--steplength=%.3f", + ), + stop_mask=dict( + argstr="--stop=%s", + extensions=None, + ), + target2=dict( + argstr="--target2=%s", + extensions=None, + ), + target3=dict( + argstr="--target3=%s", + extensions=None, + ), + target4=dict( + argstr="--target4=%s", + extensions=None, + ), + target_masks=dict( + argstr="--targetmasks=%s", + ), + thsamples=dict( + mandatory=True, + ), + use_anisotropy=dict( + argstr="--usef", + ), + verbose=dict( + argstr="--verbose=%d", + ), + waycond=dict( + argstr="--waycond=%s", + ), + wayorder=dict( + argstr="--wayorder", + ), + waypoints=dict( + argstr="--waypoints=%s", + extensions=None, + ), + xfm=dict( + argstr="--xfm=%s", + extensions=None, + ), ) inputs = ProbTrackX2.input_spec() @@ -68,15 +195,29 @@ def test_ProbTrackX2_inputs(): def test_ProbTrackX2_outputs(): output_map = dict( fdt_paths=dict(), - log=dict(extensions=None,), - lookup_tractspace=dict(extensions=None,), - matrix1_dot=dict(extensions=None,), - matrix2_dot=dict(extensions=None,), - matrix3_dot=dict(extensions=None,), - network_matrix=dict(extensions=None,), + log=dict( + extensions=None, + ), + lookup_tractspace=dict( + extensions=None, + ), + matrix1_dot=dict( + extensions=None, + ), + matrix2_dot=dict( + extensions=None, + ), + matrix3_dot=dict( + extensions=None, + ), + network_matrix=dict( + extensions=None, + ), particle_files=dict(), targets=dict(), - way_total=dict(extensions=None,), + way_total=dict( + extensions=None, + ), ) outputs = ProbTrackX2.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py b/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py index cc1a6a03ac..420eacb9c2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py @@ -4,11 +4,24 @@ def test_ProjThresh_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_files=dict(argstr="%s", mandatory=True, position=0,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=0, + ), output_type=dict(), - threshold=dict(argstr="%d", mandatory=True, position=1,), + threshold=dict( + argstr="%d", + mandatory=True, + position=1, + ), ) inputs = ProjThresh.input_spec() @@ -18,7 +31,9 @@ def test_ProjThresh_inputs(): def test_ProjThresh_outputs(): - output_map = dict(out_files=dict(),) + output_map = dict( + out_files=dict(), + ) outputs = ProjThresh.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Randomise.py b/nipype/interfaces/fsl/tests/test_auto_Randomise.py index 95c1cf7d59..cf816c56de 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Randomise.py +++ b/nipype/interfaces/fsl/tests/test_auto_Randomise.py @@ -4,36 +4,107 @@ def test_Randomise_inputs(): input_map = dict( - args=dict(argstr="%s",), - base_name=dict(argstr='-o "%s"', position=1, usedefault=True,), - c_thresh=dict(argstr="-c %.1f",), - cm_thresh=dict(argstr="-C %.1f",), - demean=dict(argstr="-D",), - design_mat=dict(argstr="-d %s", extensions=None, position=2,), - environ=dict(nohash=True, usedefault=True,), - f_c_thresh=dict(argstr="-F %.2f",), - f_cm_thresh=dict(argstr="-S %.2f",), - f_only=dict(argstr="--f_only",), - fcon=dict(argstr="-f %s", extensions=None,), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0,), - mask=dict(argstr="-m %s", extensions=None,), - num_perm=dict(argstr="-n %d",), - one_sample_group_mean=dict(argstr="-1",), + args=dict( + argstr="%s", + ), + base_name=dict( + argstr='-o "%s"', + position=1, + usedefault=True, + ), + c_thresh=dict( + argstr="-c %.1f", + ), + cm_thresh=dict( + argstr="-C %.1f", + ), + demean=dict( + argstr="-D", + ), + design_mat=dict( + argstr="-d %s", + extensions=None, + position=2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + f_c_thresh=dict( + argstr="-F %.2f", + ), + f_cm_thresh=dict( + argstr="-S %.2f", + ), + f_only=dict( + argstr="--f_only", + ), + fcon=dict( + argstr="-f %s", + extensions=None, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=0, + ), + mask=dict( + argstr="-m %s", + extensions=None, + ), + num_perm=dict( + argstr="-n %d", + ), + one_sample_group_mean=dict( + argstr="-1", + ), output_type=dict(), - p_vec_n_dist_files=dict(argstr="-P",), - raw_stats_imgs=dict(argstr="-R",), - seed=dict(argstr="--seed=%d",), - show_info_parallel_mode=dict(argstr="-Q",), - show_total_perms=dict(argstr="-q",), - tcon=dict(argstr="-t %s", extensions=None, position=3,), - tfce=dict(argstr="-T",), - tfce2D=dict(argstr="--T2",), - tfce_C=dict(argstr="--tfce_C=%.2f",), - tfce_E=dict(argstr="--tfce_E=%.2f",), - tfce_H=dict(argstr="--tfce_H=%.2f",), - var_smooth=dict(argstr="-v %d",), - vox_p_values=dict(argstr="-x",), - x_block_labels=dict(argstr="-e %s", extensions=None,), + p_vec_n_dist_files=dict( + argstr="-P", + ), + raw_stats_imgs=dict( + argstr="-R", + ), + seed=dict( + argstr="--seed=%d", + ), + show_info_parallel_mode=dict( + argstr="-Q", + ), + show_total_perms=dict( + argstr="-q", + ), + tcon=dict( + argstr="-t %s", + extensions=None, + position=3, + ), + tfce=dict( + argstr="-T", + ), + tfce2D=dict( + argstr="--T2", + ), + tfce_C=dict( + argstr="--tfce_C=%.2f", + ), + tfce_E=dict( + argstr="--tfce_E=%.2f", + ), + tfce_H=dict( + argstr="--tfce_H=%.2f", + ), + var_smooth=dict( + argstr="-v %d", + ), + vox_p_values=dict( + argstr="-x", + ), + x_block_labels=dict( + argstr="-e %s", + extensions=None, + ), ) inputs = Randomise.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py index d81874e76a..e008eb44e6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py +++ b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py @@ -4,10 +4,24 @@ def test_Reorient2Std_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True,), - out_file=dict(argstr="%s", extensions=None, genfile=True, hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + ), output_type=dict(), ) inputs = Reorient2Std.input_spec() @@ -18,7 +32,11 @@ def test_Reorient2Std_inputs(): def test_Reorient2Std_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Reorient2Std.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py index fbadb82c99..b5598f0de4 100644 --- a/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py +++ b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py @@ -4,10 +4,22 @@ def test_RobustFOV_inputs(): input_map = dict( - args=dict(argstr="%s",), - brainsize=dict(argstr="-b %d",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0,), + args=dict( + argstr="%s", + ), + brainsize=dict( + argstr="-b %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=0, + ), out_roi=dict( argstr="-r %s", extensions=None, @@ -33,7 +45,12 @@ def test_RobustFOV_inputs(): def test_RobustFOV_outputs(): output_map = dict( - out_roi=dict(extensions=None,), out_transform=dict(extensions=None,), + out_roi=dict( + extensions=None, + ), + out_transform=dict( + extensions=None, + ), ) outputs = RobustFOV.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_SMM.py b/nipype/interfaces/fsl/tests/test_auto_SMM.py index 2042d0845f..51777eaed9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SMM.py +++ b/nipype/interfaces/fsl/tests/test_auto_SMM.py @@ -4,8 +4,13 @@ def test_SMM_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), mask=dict( argstr='--mask="%s"', copyfile=False, @@ -13,7 +18,10 @@ def test_SMM_inputs(): mandatory=True, position=1, ), - no_deactivation_class=dict(argstr="--zfstatmode", position=2,), + no_deactivation_class=dict( + argstr="--zfstatmode", + position=2, + ), output_type=dict(), spatial_data_file=dict( argstr='--sdf="%s"', @@ -32,9 +40,15 @@ def test_SMM_inputs(): def test_SMM_outputs(): output_map = dict( - activation_p_map=dict(extensions=None,), - deactivation_p_map=dict(extensions=None,), - null_p_map=dict(extensions=None,), + activation_p_map=dict( + extensions=None, + ), + deactivation_p_map=dict( + extensions=None, + ), + null_p_map=dict( + extensions=None, + ), ) outputs = SMM.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_SUSAN.py b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py index 7f7f270be1..427b770222 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SUSAN.py +++ b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py @@ -4,18 +4,52 @@ def test_SUSAN_inputs(): input_map = dict( - args=dict(argstr="%s",), - brightness_threshold=dict(argstr="%.10f", mandatory=True, position=2,), - dimension=dict(argstr="%d", position=4, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - fwhm=dict(argstr="%.10f", mandatory=True, position=3,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), + args=dict( + argstr="%s", + ), + brightness_threshold=dict( + argstr="%.10f", + mandatory=True, + position=2, + ), + dimension=dict( + argstr="%d", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm=dict( + argstr="%.10f", + mandatory=True, + position=3, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-1, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-1, ), output_type=dict(), - usans=dict(argstr="", position=6, usedefault=True,), - use_median=dict(argstr="%d", position=5, usedefault=True,), + usans=dict( + argstr="", + position=6, + usedefault=True, + ), + use_median=dict( + argstr="%d", + position=5, + usedefault=True, + ), ) inputs = SUSAN.input_spec() @@ -25,7 +59,11 @@ def test_SUSAN_inputs(): def test_SUSAN_outputs(): - output_map = dict(smoothed_file=dict(extensions=None,),) + output_map = dict( + smoothed_file=dict( + extensions=None, + ), + ) outputs = SUSAN.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SigLoss.py b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py index fe63fdce23..11be93c5b9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SigLoss.py +++ b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py @@ -4,14 +4,34 @@ def test_SigLoss_inputs(): input_map = dict( - args=dict(argstr="%s",), - echo_time=dict(argstr="--te=%f",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True,), - mask_file=dict(argstr="-m %s", extensions=None,), - out_file=dict(argstr="-s %s", extensions=None, genfile=True,), + args=dict( + argstr="%s", + ), + echo_time=dict( + argstr="--te=%f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + mask_file=dict( + argstr="-m %s", + extensions=None, + ), + out_file=dict( + argstr="-s %s", + extensions=None, + genfile=True, + ), output_type=dict(), - slice_direction=dict(argstr="-d %s",), + slice_direction=dict( + argstr="-d %s", + ), ) inputs = SigLoss.input_spec() @@ -21,7 +41,11 @@ def test_SigLoss_inputs(): def test_SigLoss_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SigLoss.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Slice.py b/nipype/interfaces/fsl/tests/test_auto_Slice.py index f96ee854b3..f5360716c6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Slice.py +++ b/nipype/interfaces/fsl/tests/test_auto_Slice.py @@ -4,12 +4,24 @@ def test_Slice_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0, + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=0, + ), + out_base_name=dict( + argstr="%s", + position=1, ), - out_base_name=dict(argstr="%s", position=1,), output_type=dict(), ) inputs = Slice.input_spec() @@ -20,7 +32,9 @@ def test_Slice_inputs(): def test_Slice_outputs(): - output_map = dict(out_files=dict(),) + output_map = dict( + out_files=dict(), + ) outputs = Slice.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py index 0d0c0fc0f4..acdbd8c2ca 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py +++ b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py @@ -4,20 +4,49 @@ def test_SliceTimer_inputs(): input_map = dict( - args=dict(argstr="%s",), - custom_order=dict(argstr="--ocustom=%s", extensions=None,), - custom_timings=dict(argstr="--tcustom=%s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - global_shift=dict(argstr="--tglobal",), - in_file=dict(argstr="--in=%s", extensions=None, mandatory=True, position=0,), - index_dir=dict(argstr="--down",), - interleaved=dict(argstr="--odd",), + args=dict( + argstr="%s", + ), + custom_order=dict( + argstr="--ocustom=%s", + extensions=None, + ), + custom_timings=dict( + argstr="--tcustom=%s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + global_shift=dict( + argstr="--tglobal", + ), + in_file=dict( + argstr="--in=%s", + extensions=None, + mandatory=True, + position=0, + ), + index_dir=dict( + argstr="--down", + ), + interleaved=dict( + argstr="--odd", + ), out_file=dict( - argstr="--out=%s", extensions=None, genfile=True, hash_files=False, + argstr="--out=%s", + extensions=None, + genfile=True, + hash_files=False, ), output_type=dict(), - slice_direction=dict(argstr="--direction=%d",), - time_repetition=dict(argstr="--repeat=%f",), + slice_direction=dict( + argstr="--direction=%d", + ), + time_repetition=dict( + argstr="--repeat=%f", + ), ) inputs = SliceTimer.input_spec() @@ -27,7 +56,11 @@ def test_SliceTimer_inputs(): def test_SliceTimer_outputs(): - output_map = dict(slice_time_corrected_file=dict(extensions=None,),) + output_map = dict( + slice_time_corrected_file=dict( + extensions=None, + ), + ) outputs = SliceTimer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Slicer.py b/nipype/interfaces/fsl/tests/test_auto_Slicer.py index 205aab061b..8e3195fd39 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Slicer.py +++ b/nipype/interfaces/fsl/tests/test_auto_Slicer.py @@ -10,23 +10,61 @@ def test_Slicer_inputs(): requires=["image_width"], xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), ), - args=dict(argstr="%s",), - colour_map=dict(argstr="-l %s", extensions=None, position=4,), - dither_edges=dict(argstr="-t", position=7,), - environ=dict(nohash=True, usedefault=True,), - image_edges=dict(argstr="%s", extensions=None, position=2,), - image_width=dict(argstr="%d", position=-2,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - intensity_range=dict(argstr="-i %.3f %.3f", position=5,), - label_slices=dict(argstr="-L", position=3, usedefault=True,), + args=dict( + argstr="%s", + ), + colour_map=dict( + argstr="-l %s", + extensions=None, + position=4, + ), + dither_edges=dict( + argstr="-t", + position=7, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + image_edges=dict( + argstr="%s", + extensions=None, + position=2, + ), + image_width=dict( + argstr="%d", + position=-2, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + intensity_range=dict( + argstr="-i %.3f %.3f", + position=5, + ), + label_slices=dict( + argstr="-L", + position=3, + usedefault=True, + ), middle_slices=dict( argstr="-a", position=10, xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), ), - nearest_neighbour=dict(argstr="-n", position=8,), + nearest_neighbour=dict( + argstr="-n", + position=8, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-1, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-1, ), output_type=dict(), sample_axial=dict( @@ -35,16 +73,29 @@ def test_Slicer_inputs(): requires=["image_width"], xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), ), - scaling=dict(argstr="-s %f", position=0,), - show_orientation=dict(argstr="%s", position=9, usedefault=True,), + scaling=dict( + argstr="-s %f", + position=0, + ), + show_orientation=dict( + argstr="%s", + position=9, + usedefault=True, + ), single_slice=dict( argstr="-%s", position=10, requires=["slice_number"], xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), ), - slice_number=dict(argstr="-%d", position=11,), - threshold_edges=dict(argstr="-e %.3f", position=6,), + slice_number=dict( + argstr="-%d", + position=11, + ), + threshold_edges=dict( + argstr="-e %.3f", + position=6, + ), ) inputs = Slicer.input_spec() @@ -54,7 +105,11 @@ def test_Slicer_inputs(): def test_Slicer_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Slicer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Smooth.py b/nipype/interfaces/fsl/tests/test_auto_Smooth.py index 733f0e83f1..9d9324770b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Smooth.py +++ b/nipype/interfaces/fsl/tests/test_auto_Smooth.py @@ -4,15 +4,25 @@ def test_Smooth_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fwhm=dict( argstr="-kernel gauss %.03f -fmean", mandatory=True, position=1, xor=["sigma"], ), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), output_type=dict(), sigma=dict( argstr="-kernel gauss %.03f -fmean", @@ -37,7 +47,11 @@ def test_Smooth_inputs(): def test_Smooth_outputs(): - output_map = dict(smoothed_file=dict(extensions=None,),) + output_map = dict( + smoothed_file=dict( + extensions=None, + ), + ) outputs = Smooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py index b6fac84352..bf21438d1d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py +++ b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py @@ -4,13 +4,34 @@ def test_SmoothEstimate_inputs(): input_map = dict( - args=dict(argstr="%s",), - dof=dict(argstr="--dof=%d", mandatory=True, xor=["zstat_file"],), - environ=dict(nohash=True, usedefault=True,), - mask_file=dict(argstr="--mask=%s", extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + dof=dict( + argstr="--dof=%d", + mandatory=True, + xor=["zstat_file"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + mask_file=dict( + argstr="--mask=%s", + extensions=None, + mandatory=True, + ), output_type=dict(), - residual_fit_file=dict(argstr="--res=%s", extensions=None, requires=["dof"],), - zstat_file=dict(argstr="--zstat=%s", extensions=None, xor=["dof"],), + residual_fit_file=dict( + argstr="--res=%s", + extensions=None, + requires=["dof"], + ), + zstat_file=dict( + argstr="--zstat=%s", + extensions=None, + xor=["dof"], + ), ) inputs = SmoothEstimate.input_spec() @@ -20,7 +41,11 @@ def test_SmoothEstimate_inputs(): def test_SmoothEstimate_outputs(): - output_map = dict(dlh=dict(), resels=dict(), volume=dict(),) + output_map = dict( + dlh=dict(), + resels=dict(), + volume=dict(), + ) outputs = SmoothEstimate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py index ec2b59ba6d..0d6f68cbea 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py +++ b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py @@ -4,21 +4,58 @@ def test_SpatialFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), kernel_file=dict( - argstr="%s", extensions=None, position=5, xor=["kernel_size"], + argstr="%s", + extensions=None, + position=5, + xor=["kernel_size"], + ), + kernel_shape=dict( + argstr="-kernel %s", + position=4, + ), + kernel_size=dict( + argstr="%.4f", + position=5, + xor=["kernel_file"], + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), + operation=dict( + argstr="-f%s", + mandatory=True, + position=6, ), - kernel_shape=dict(argstr="-kernel %s", position=4,), - kernel_size=dict(argstr="%.4f", position=5, xor=["kernel_file"],), - nan2zeros=dict(argstr="-nan", position=3,), - operation=dict(argstr="-f%s", mandatory=True, position=6,), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = SpatialFilter.input_spec() @@ -29,7 +66,11 @@ def test_SpatialFilter_inputs(): def test_SpatialFilter_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SpatialFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Split.py b/nipype/interfaces/fsl/tests/test_auto_Split.py index 26b814b9c0..79aa3a7ade 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Split.py +++ b/nipype/interfaces/fsl/tests/test_auto_Split.py @@ -4,11 +4,28 @@ def test_Split_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="-%s", mandatory=True, position=2,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), - out_base_name=dict(argstr="%s", position=1,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%s", + mandatory=True, + position=2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), + out_base_name=dict( + argstr="%s", + position=1, + ), output_type=dict(), ) inputs = Split.input_spec() @@ -19,7 +36,9 @@ def test_Split_inputs(): def test_Split_outputs(): - output_map = dict(out_files=dict(),) + output_map = dict( + out_files=dict(), + ) outputs = Split.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_StdImage.py b/nipype/interfaces/fsl/tests/test_auto_StdImage.py index 073ebfa7ee..226abb5e5e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_StdImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_StdImage.py @@ -4,16 +4,43 @@ def test_StdImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(argstr="-%sstd", position=4, usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - nan2zeros=dict(argstr="-nan", position=3,), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%sstd", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = StdImage.input_spec() @@ -24,7 +51,11 @@ def test_StdImage_inputs(): def test_StdImage_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = StdImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py index ac56fad17e..26b3b3ff54 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py +++ b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py @@ -4,11 +4,29 @@ def test_SwapDimensions_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position="1",), - new_dims=dict(argstr="%s %s %s", mandatory=True,), - out_file=dict(argstr="%s", extensions=None, genfile=True, hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position="1", + ), + new_dims=dict( + argstr="%s %s %s", + mandatory=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + ), output_type=dict(), ) inputs = SwapDimensions.input_spec() @@ -19,7 +37,11 @@ def test_SwapDimensions_inputs(): def test_SwapDimensions_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SwapDimensions.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TOPUP.py b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py index f34023f799..3358190dfb 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TOPUP.py +++ b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py @@ -4,8 +4,13 @@ def test_TOPUP_inputs(): input_map = dict( - args=dict(argstr="%s",), - config=dict(argstr="--config=%s", usedefault=True,), + args=dict( + argstr="%s", + ), + config=dict( + argstr="--config=%s", + usedefault=True, + ), encoding_direction=dict( argstr="--datain=%s", mandatory=True, @@ -18,14 +23,33 @@ def test_TOPUP_inputs(): mandatory=True, xor=["encoding_direction"], ), - environ=dict(nohash=True, usedefault=True,), - estmov=dict(argstr="--estmov=%d",), - fwhm=dict(argstr="--fwhm=%f",), - in_file=dict(argstr="--imain=%s", extensions=None, mandatory=True,), - interp=dict(argstr="--interp=%s",), - max_iter=dict(argstr="--miter=%d",), - minmet=dict(argstr="--minmet=%d",), - numprec=dict(argstr="--numprec=%s",), + environ=dict( + nohash=True, + usedefault=True, + ), + estmov=dict( + argstr="--estmov=%d", + ), + fwhm=dict( + argstr="--fwhm=%f", + ), + in_file=dict( + argstr="--imain=%s", + extensions=None, + mandatory=True, + ), + interp=dict( + argstr="--interp=%s", + ), + max_iter=dict( + argstr="--miter=%d", + ), + minmet=dict( + argstr="--minmet=%d", + ), + numprec=dict( + argstr="--numprec=%s", + ), out_base=dict( argstr="--out=%s", extensions=None, @@ -47,7 +71,11 @@ def test_TOPUP_inputs(): name_source=["in_file"], name_template="%s_field", ), - out_jac_prefix=dict(argstr="--jacout=%s", hash_files=False, usedefault=True,), + out_jac_prefix=dict( + argstr="--jacout=%s", + hash_files=False, + usedefault=True, + ), out_logfile=dict( argstr="--logout=%s", extensions=None, @@ -56,20 +84,46 @@ def test_TOPUP_inputs(): name_source=["in_file"], name_template="%s_topup.log", ), - out_mat_prefix=dict(argstr="--rbmout=%s", hash_files=False, usedefault=True,), - out_warp_prefix=dict(argstr="--dfout=%s", hash_files=False, usedefault=True,), + out_mat_prefix=dict( + argstr="--rbmout=%s", + hash_files=False, + usedefault=True, + ), + out_warp_prefix=dict( + argstr="--dfout=%s", + hash_files=False, + usedefault=True, + ), output_type=dict(), readout_times=dict( - mandatory=True, requires=["encoding_direction"], xor=["encoding_file"], - ), - reg_lambda=dict(argstr="--lambda=%0.f",), - regmod=dict(argstr="--regmod=%s",), - regrid=dict(argstr="--regrid=%d",), - scale=dict(argstr="--scale=%d",), - splineorder=dict(argstr="--splineorder=%d",), - ssqlambda=dict(argstr="--ssqlambda=%d",), - subsamp=dict(argstr="--subsamp=%d",), - warp_res=dict(argstr="--warpres=%f",), + mandatory=True, + requires=["encoding_direction"], + xor=["encoding_file"], + ), + reg_lambda=dict( + argstr="--lambda=%0.f", + ), + regmod=dict( + argstr="--regmod=%s", + ), + regrid=dict( + argstr="--regrid=%d", + ), + scale=dict( + argstr="--scale=%d", + ), + splineorder=dict( + argstr="--splineorder=%d", + ), + ssqlambda=dict( + argstr="--ssqlambda=%d", + ), + subsamp=dict( + argstr="--subsamp=%d", + ), + warp_res=dict( + argstr="--warpres=%f", + ), ) inputs = TOPUP.input_spec() @@ -80,14 +134,26 @@ def test_TOPUP_inputs(): def test_TOPUP_outputs(): output_map = dict( - out_corrected=dict(extensions=None,), - out_enc_file=dict(extensions=None,), - out_field=dict(extensions=None,), - out_fieldcoef=dict(extensions=None,), + out_corrected=dict( + extensions=None, + ), + out_enc_file=dict( + extensions=None, + ), + out_field=dict( + extensions=None, + ), + out_fieldcoef=dict( + extensions=None, + ), out_jacs=dict(), - out_logfile=dict(extensions=None,), + out_logfile=dict( + extensions=None, + ), out_mats=dict(), - out_movpar=dict(extensions=None,), + out_movpar=dict( + extensions=None, + ), out_warps=dict(), ) outputs = TOPUP.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py index a764c6c1a6..3a825e4e45 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py +++ b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py @@ -4,17 +4,48 @@ def test_TemporalFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - highpass_sigma=dict(argstr="-bptf %.6f", position=4, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - lowpass_sigma=dict(argstr="%.6f", position=5, usedefault=True,), - nan2zeros=dict(argstr="-nan", position=3,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + highpass_sigma=dict( + argstr="-bptf %.6f", + position=4, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + lowpass_sigma=dict( + argstr="%.6f", + position=5, + usedefault=True, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = TemporalFilter.input_spec() @@ -25,7 +56,11 @@ def test_TemporalFilter_inputs(): def test_TemporalFilter_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TemporalFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Threshold.py b/nipype/interfaces/fsl/tests/test_auto_Threshold.py index 4156b8f82b..cc3446bd47 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Threshold.py +++ b/nipype/interfaces/fsl/tests/test_auto_Threshold.py @@ -4,19 +4,50 @@ def test_Threshold_inputs(): input_map = dict( - args=dict(argstr="%s",), - direction=dict(usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - nan2zeros=dict(argstr="-nan", position=3,), + args=dict( + argstr="%s", + ), + direction=dict( + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), - thresh=dict(argstr="%s", mandatory=True, position=4,), - use_nonzero_voxels=dict(requires=["use_robust_range"],), + thresh=dict( + argstr="%s", + mandatory=True, + position=4, + ), + use_nonzero_voxels=dict( + requires=["use_robust_range"], + ), use_robust_range=dict(), ) inputs = Threshold.input_spec() @@ -27,7 +58,11 @@ def test_Threshold_inputs(): def test_Threshold_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Threshold.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py index f5c6c38f35..631741da49 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py +++ b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py @@ -4,23 +4,52 @@ def test_TractSkeleton_inputs(): input_map = dict( - alt_data_file=dict(argstr="-a %s", extensions=None,), - alt_skeleton=dict(argstr="-s %s", extensions=None,), - args=dict(argstr="%s",), - data_file=dict(extensions=None,), - distance_map=dict(extensions=None,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True,), + alt_data_file=dict( + argstr="-a %s", + extensions=None, + ), + alt_skeleton=dict( + argstr="-s %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + data_file=dict( + extensions=None, + ), + distance_map=dict( + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), output_type=dict(), project_data=dict( argstr="-p %.3f %s %s %s %s", requires=["threshold", "distance_map", "data_file"], ), - projected_data=dict(extensions=None,), - search_mask_file=dict(extensions=None, xor=["use_cingulum_mask"],), - skeleton_file=dict(argstr="-o %s",), + projected_data=dict( + extensions=None, + ), + search_mask_file=dict( + extensions=None, + xor=["use_cingulum_mask"], + ), + skeleton_file=dict( + argstr="-o %s", + ), threshold=dict(), - use_cingulum_mask=dict(usedefault=True, xor=["search_mask_file"],), + use_cingulum_mask=dict( + usedefault=True, + xor=["search_mask_file"], + ), ) inputs = TractSkeleton.input_spec() @@ -31,7 +60,12 @@ def test_TractSkeleton_inputs(): def test_TractSkeleton_outputs(): output_map = dict( - projected_data=dict(extensions=None,), skeleton_file=dict(extensions=None,), + projected_data=dict( + extensions=None, + ), + skeleton_file=dict( + extensions=None, + ), ) outputs = TractSkeleton.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Training.py b/nipype/interfaces/fsl/tests/test_auto_Training.py index 5626f3e483..82a53d1408 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Training.py +++ b/nipype/interfaces/fsl/tests/test_auto_Training.py @@ -4,11 +4,26 @@ def test_Training_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - loo=dict(argstr="-l", position=2,), - mel_icas=dict(argstr="%s", copyfile=False, position=-1,), - trained_wts_filestem=dict(argstr="%s", position=1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + loo=dict( + argstr="-l", + position=2, + ), + mel_icas=dict( + argstr="%s", + copyfile=False, + position=-1, + ), + trained_wts_filestem=dict( + argstr="%s", + position=1, + ), ) inputs = Training.input_spec() @@ -18,7 +33,11 @@ def test_Training_inputs(): def test_Training_outputs(): - output_map = dict(trained_wts_file=dict(extensions=None,),) + output_map = dict( + trained_wts_file=dict( + extensions=None, + ), + ) outputs = Training.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py index df4c1c2257..3ab307d6a8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py +++ b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py @@ -3,7 +3,13 @@ def test_TrainingSetCreator_inputs(): - input_map = dict(mel_icas_in=dict(argstr="%s", copyfile=False, position=-1,),) + input_map = dict( + mel_icas_in=dict( + argstr="%s", + copyfile=False, + position=-1, + ), + ) inputs = TrainingSetCreator.input_spec() for key, metadata in list(input_map.items()): @@ -12,7 +18,13 @@ def test_TrainingSetCreator_inputs(): def test_TrainingSetCreator_outputs(): - output_map = dict(mel_icas_out=dict(argstr="%s", copyfile=False, position=-1,),) + output_map = dict( + mel_icas_out=dict( + argstr="%s", + copyfile=False, + position=-1, + ), + ) outputs = TrainingSetCreator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py index 67662a8c7f..cb27a76876 100644 --- a/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py @@ -4,16 +4,43 @@ def test_UnaryMaths_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - internal_datatype=dict(argstr="-dt %s", position=1,), - nan2zeros=dict(argstr="-nan", position=3,), - operation=dict(argstr="-%s", mandatory=True, position=4,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2, + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1,), output_type=dict(), ) inputs = UnaryMaths.input_spec() @@ -24,7 +51,11 @@ def test_UnaryMaths_inputs(): def test_UnaryMaths_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = UnaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_VecReg.py b/nipype/interfaces/fsl/tests/test_auto_VecReg.py index 4f802628cd..9564241cc3 100644 --- a/nipype/interfaces/fsl/tests/test_auto_VecReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_VecReg.py @@ -4,19 +4,57 @@ def test_VecReg_inputs(): input_map = dict( - affine_mat=dict(argstr="-t %s", extensions=None,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True,), - interpolation=dict(argstr="--interp=%s",), - mask=dict(argstr="-m %s", extensions=None,), - out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False,), + affine_mat=dict( + argstr="-t %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="--interp=%s", + ), + mask=dict( + argstr="-m %s", + extensions=None, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, + ), output_type=dict(), - ref_mask=dict(argstr="--refmask=%s", extensions=None,), - ref_vol=dict(argstr="-r %s", extensions=None, mandatory=True,), - rotation_mat=dict(argstr="--rotmat=%s", extensions=None,), - rotation_warp=dict(argstr="--rotwarp=%s", extensions=None,), - warp_field=dict(argstr="-w %s", extensions=None,), + ref_mask=dict( + argstr="--refmask=%s", + extensions=None, + ), + ref_vol=dict( + argstr="-r %s", + extensions=None, + mandatory=True, + ), + rotation_mat=dict( + argstr="--rotmat=%s", + extensions=None, + ), + rotation_warp=dict( + argstr="--rotwarp=%s", + extensions=None, + ), + warp_field=dict( + argstr="-w %s", + extensions=None, + ), ) inputs = VecReg.input_spec() @@ -26,7 +64,11 @@ def test_VecReg_inputs(): def test_VecReg_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = VecReg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py index 70980aa1be..b7f7fc7d87 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py @@ -4,21 +4,53 @@ def test_WarpPoints_inputs(): input_map = dict( - args=dict(argstr="%s",), - coord_mm=dict(argstr="-mm", xor=["coord_vox"],), - coord_vox=dict(argstr="-vox", xor=["coord_mm"],), - dest_file=dict(argstr="-dest %s", extensions=None, mandatory=True,), - environ=dict(nohash=True, usedefault=True,), - in_coords=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + args=dict( + argstr="%s", + ), + coord_mm=dict( + argstr="-mm", + xor=["coord_vox"], + ), + coord_vox=dict( + argstr="-vox", + xor=["coord_mm"], + ), + dest_file=dict( + argstr="-dest %s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_coords=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), out_file=dict( extensions=None, name_source="in_coords", name_template="%s_warped", output_name="out_file", ), - src_file=dict(argstr="-src %s", extensions=None, mandatory=True,), - warp_file=dict(argstr="-warp %s", extensions=None, xor=["xfm_file"],), - xfm_file=dict(argstr="-xfm %s", extensions=None, xor=["warp_file"],), + src_file=dict( + argstr="-src %s", + extensions=None, + mandatory=True, + ), + warp_file=dict( + argstr="-warp %s", + extensions=None, + xor=["xfm_file"], + ), + xfm_file=dict( + argstr="-xfm %s", + extensions=None, + xor=["warp_file"], + ), ) inputs = WarpPoints.input_spec() @@ -28,7 +60,11 @@ def test_WarpPoints_inputs(): def test_WarpPoints_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = WarpPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py index a6fa949890..b8f4cbef97 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py @@ -4,15 +4,47 @@ def test_WarpPointsFromStd_inputs(): input_map = dict( - args=dict(argstr="%s",), - coord_mm=dict(argstr="-mm", xor=["coord_vox"],), - coord_vox=dict(argstr="-vox", xor=["coord_mm"],), - environ=dict(nohash=True, usedefault=True,), - img_file=dict(argstr="-img %s", extensions=None, mandatory=True,), - in_coords=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - std_file=dict(argstr="-std %s", extensions=None, mandatory=True,), - warp_file=dict(argstr="-warp %s", extensions=None, xor=["xfm_file"],), - xfm_file=dict(argstr="-xfm %s", extensions=None, xor=["warp_file"],), + args=dict( + argstr="%s", + ), + coord_mm=dict( + argstr="-mm", + xor=["coord_vox"], + ), + coord_vox=dict( + argstr="-vox", + xor=["coord_mm"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + img_file=dict( + argstr="-img %s", + extensions=None, + mandatory=True, + ), + in_coords=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + std_file=dict( + argstr="-std %s", + extensions=None, + mandatory=True, + ), + warp_file=dict( + argstr="-warp %s", + extensions=None, + xor=["xfm_file"], + ), + xfm_file=dict( + argstr="-xfm %s", + extensions=None, + xor=["warp_file"], + ), ) inputs = WarpPointsFromStd.input_spec() @@ -22,7 +54,11 @@ def test_WarpPointsFromStd_inputs(): def test_WarpPointsFromStd_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = WarpPointsFromStd.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py index 9debbe6a74..0b5881c776 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py @@ -4,22 +4,57 @@ def test_WarpPointsToStd_inputs(): input_map = dict( - args=dict(argstr="%s",), - coord_mm=dict(argstr="-mm", xor=["coord_vox"],), - coord_vox=dict(argstr="-vox", xor=["coord_mm"],), - environ=dict(nohash=True, usedefault=True,), - img_file=dict(argstr="-img %s", extensions=None, mandatory=True,), - in_coords=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + args=dict( + argstr="%s", + ), + coord_mm=dict( + argstr="-mm", + xor=["coord_vox"], + ), + coord_vox=dict( + argstr="-vox", + xor=["coord_mm"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + img_file=dict( + argstr="-img %s", + extensions=None, + mandatory=True, + ), + in_coords=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), out_file=dict( extensions=None, name_source="in_coords", name_template="%s_warped", output_name="out_file", ), - premat_file=dict(argstr="-premat %s", extensions=None,), - std_file=dict(argstr="-std %s", extensions=None, mandatory=True,), - warp_file=dict(argstr="-warp %s", extensions=None, xor=["xfm_file"],), - xfm_file=dict(argstr="-xfm %s", extensions=None, xor=["warp_file"],), + premat_file=dict( + argstr="-premat %s", + extensions=None, + ), + std_file=dict( + argstr="-std %s", + extensions=None, + mandatory=True, + ), + warp_file=dict( + argstr="-warp %s", + extensions=None, + xor=["xfm_file"], + ), + xfm_file=dict( + argstr="-xfm %s", + extensions=None, + xor=["warp_file"], + ), ) inputs = WarpPointsToStd.input_spec() @@ -29,7 +64,11 @@ def test_WarpPointsToStd_inputs(): def test_WarpPointsToStd_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = WarpPointsToStd.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py index cdb0e86e64..c8caa8da84 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py @@ -4,10 +4,21 @@ def test_WarpUtils_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="--in=%s", extensions=None, mandatory=True,), - knot_space=dict(argstr="--knotspace=%d,%d,%d",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="--in=%s", + extensions=None, + mandatory=True, + ), + knot_space=dict( + argstr="--knotspace=%d,%d,%d", + ), out_file=dict( argstr="--out=%s", extensions=None, @@ -15,13 +26,29 @@ def test_WarpUtils_inputs(): output_name="out_file", position=-1, ), - out_format=dict(argstr="--outformat=%s",), - out_jacobian=dict(argstr="--jac=%s", extensions=None,), + out_format=dict( + argstr="--outformat=%s", + ), + out_jacobian=dict( + argstr="--jac=%s", + extensions=None, + ), output_type=dict(), - reference=dict(argstr="--ref=%s", extensions=None, mandatory=True,), - warp_resolution=dict(argstr="--warpres=%0.4f,%0.4f,%0.4f",), - with_affine=dict(argstr="--withaff",), - write_jacobian=dict(mandatory=True, usedefault=True,), + reference=dict( + argstr="--ref=%s", + extensions=None, + mandatory=True, + ), + warp_resolution=dict( + argstr="--warpres=%0.4f,%0.4f,%0.4f", + ), + with_affine=dict( + argstr="--withaff", + ), + write_jacobian=dict( + mandatory=True, + usedefault=True, + ), ) inputs = WarpUtils.input_spec() @@ -32,7 +59,12 @@ def test_WarpUtils_inputs(): def test_WarpUtils_outputs(): output_map = dict( - out_file=dict(extensions=None,), out_jacobian=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_jacobian=dict( + extensions=None, + ), ) outputs = WarpUtils.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_XFibres5.py b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py index 9a4e973569..349b2f52e7 100644 --- a/nipype/interfaces/fsl/tests/test_auto_XFibres5.py +++ b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py @@ -4,37 +4,111 @@ def test_XFibres5_inputs(): input_map = dict( - all_ard=dict(argstr="--allard", xor=("no_ard", "all_ard"),), - args=dict(argstr="%s",), - burn_in=dict(argstr="--burnin=%d", usedefault=True,), - burn_in_no_ard=dict(argstr="--burnin_noard=%d", usedefault=True,), - bvals=dict(argstr="--bvals=%s", extensions=None, mandatory=True,), - bvecs=dict(argstr="--bvecs=%s", extensions=None, mandatory=True,), + all_ard=dict( + argstr="--allard", + xor=("no_ard", "all_ard"), + ), + args=dict( + argstr="%s", + ), + burn_in=dict( + argstr="--burnin=%d", + usedefault=True, + ), + burn_in_no_ard=dict( + argstr="--burnin_noard=%d", + usedefault=True, + ), + bvals=dict( + argstr="--bvals=%s", + extensions=None, + mandatory=True, + ), + bvecs=dict( + argstr="--bvecs=%s", + extensions=None, + mandatory=True, + ), cnlinear=dict( - argstr="--cnonlinear", xor=("no_spat", "non_linear", "cnlinear"), - ), - dwi=dict(argstr="--data=%s", extensions=None, mandatory=True,), - environ=dict(nohash=True, usedefault=True,), - f0_ard=dict(argstr="--f0 --ardf0", xor=["f0_noard", "f0_ard", "all_ard"],), - f0_noard=dict(argstr="--f0", xor=["f0_noard", "f0_ard"],), - force_dir=dict(argstr="--forcedir", usedefault=True,), - fudge=dict(argstr="--fudge=%d",), - gradnonlin=dict(argstr="--gradnonlin=%s", extensions=None,), - logdir=dict(argstr="--logdir=%s", usedefault=True,), - mask=dict(argstr="--mask=%s", extensions=None, mandatory=True,), - model=dict(argstr="--model=%d",), - n_fibres=dict(argstr="--nfibres=%d", mandatory=True, usedefault=True,), - n_jumps=dict(argstr="--njumps=%d", usedefault=True,), - no_ard=dict(argstr="--noard", xor=("no_ard", "all_ard"),), - no_spat=dict(argstr="--nospat", xor=("no_spat", "non_linear", "cnlinear"),), + argstr="--cnonlinear", + xor=("no_spat", "non_linear", "cnlinear"), + ), + dwi=dict( + argstr="--data=%s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + f0_ard=dict( + argstr="--f0 --ardf0", + xor=["f0_noard", "f0_ard", "all_ard"], + ), + f0_noard=dict( + argstr="--f0", + xor=["f0_noard", "f0_ard"], + ), + force_dir=dict( + argstr="--forcedir", + usedefault=True, + ), + fudge=dict( + argstr="--fudge=%d", + ), + gradnonlin=dict( + argstr="--gradnonlin=%s", + extensions=None, + ), + logdir=dict( + argstr="--logdir=%s", + usedefault=True, + ), + mask=dict( + argstr="--mask=%s", + extensions=None, + mandatory=True, + ), + model=dict( + argstr="--model=%d", + ), + n_fibres=dict( + argstr="--nfibres=%d", + mandatory=True, + usedefault=True, + ), + n_jumps=dict( + argstr="--njumps=%d", + usedefault=True, + ), + no_ard=dict( + argstr="--noard", + xor=("no_ard", "all_ard"), + ), + no_spat=dict( + argstr="--nospat", + xor=("no_spat", "non_linear", "cnlinear"), + ), non_linear=dict( - argstr="--nonlinear", xor=("no_spat", "non_linear", "cnlinear"), + argstr="--nonlinear", + xor=("no_spat", "non_linear", "cnlinear"), ), output_type=dict(), - rician=dict(argstr="--rician",), - sample_every=dict(argstr="--sampleevery=%d", usedefault=True,), - seed=dict(argstr="--seed=%d",), - update_proposal_every=dict(argstr="--updateproposalevery=%d", usedefault=True,), + rician=dict( + argstr="--rician", + ), + sample_every=dict( + argstr="--sampleevery=%d", + usedefault=True, + ), + seed=dict( + argstr="--seed=%d", + ), + update_proposal_every=dict( + argstr="--updateproposalevery=%d", + usedefault=True, + ), ) inputs = XFibres5.input_spec() @@ -47,10 +121,16 @@ def test_XFibres5_outputs(): output_map = dict( dyads=dict(), fsamples=dict(), - mean_S0samples=dict(extensions=None,), - mean_dsamples=dict(extensions=None,), + mean_S0samples=dict( + extensions=None, + ), + mean_dsamples=dict( + extensions=None, + ), mean_fsamples=dict(), - mean_tausamples=dict(extensions=None,), + mean_tausamples=dict( + extensions=None, + ), phsamples=dict(), thsamples=dict(), ) diff --git a/nipype/interfaces/minc/tests/test_auto_Average.py b/nipype/interfaces/minc/tests/test_auto_Average.py index c9066611dd..7017967d61 100644 --- a/nipype/interfaces/minc/tests/test_auto_Average.py +++ b/nipype/interfaces/minc/tests/test_auto_Average.py @@ -4,18 +4,40 @@ def test_Average_inputs(): input_map = dict( - args=dict(argstr="%s",), - avgdim=dict(argstr="-avgdim %s",), - binarize=dict(argstr="-binarize",), - binrange=dict(argstr="-binrange %s %s",), - binvalue=dict(argstr="-binvalue %s",), + args=dict( + argstr="%s", + ), + avgdim=dict( + argstr="-avgdim %s", + ), + binarize=dict( + argstr="-binarize", + ), + binrange=dict( + argstr="-binrange %s %s", + ), + binvalue=dict( + argstr="-binvalue %s", + ), check_dimensions=dict( - argstr="-check_dimensions", xor=("check_dimensions", "no_check_dimensions"), + argstr="-check_dimensions", + xor=("check_dimensions", "no_check_dimensions"), + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + copy_header=dict( + argstr="-copy_header", + xor=("copy_header", "no_copy_header"), + ), + debug=dict( + argstr="-debug", + ), + environ=dict( + nohash=True, + usedefault=True, ), - clobber=dict(argstr="-clobber", usedefault=True,), - copy_header=dict(argstr="-copy_header", xor=("copy_header", "no_copy_header"),), - debug=dict(argstr="-debug",), - environ=dict(nohash=True, usedefault=True,), filelist=dict( argstr="-filelist %s", extensions=None, @@ -156,17 +178,25 @@ def test_Average_inputs(): xor=("input_files", "filelist"), ), max_buffer_size_in_kb=dict( - argstr="-max_buffer_size_in_kb %d", usedefault=True, + argstr="-max_buffer_size_in_kb %d", + usedefault=True, ), no_check_dimensions=dict( argstr="-nocheck_dimensions", xor=("check_dimensions", "no_check_dimensions"), ), no_copy_header=dict( - argstr="-nocopy_header", xor=("copy_header", "no_copy_header"), + argstr="-nocopy_header", + xor=("copy_header", "no_copy_header"), + ), + nonormalize=dict( + argstr="-nonormalize", + xor=("normalize", "nonormalize"), + ), + normalize=dict( + argstr="-normalize", + xor=("normalize", "nonormalize"), ), - nonormalize=dict(argstr="-nonormalize", xor=("normalize", "nonormalize"),), - normalize=dict(argstr="-normalize", xor=("normalize", "nonormalize"),), output_file=dict( argstr="%s", extensions=None, @@ -176,13 +206,32 @@ def test_Average_inputs(): name_template="%s_averaged.mnc", position=-1, ), - quiet=dict(argstr="-quiet", xor=("verbose", "quiet"),), - sdfile=dict(argstr="-sdfile %s", extensions=None,), - two=dict(argstr="-2",), - verbose=dict(argstr="-verbose", xor=("verbose", "quiet"),), - voxel_range=dict(argstr="-range %d %d",), - weights=dict(argstr="-weights %s", sep=",",), - width_weighted=dict(argstr="-width_weighted", requires=("avgdim",),), + quiet=dict( + argstr="-quiet", + xor=("verbose", "quiet"), + ), + sdfile=dict( + argstr="-sdfile %s", + extensions=None, + ), + two=dict( + argstr="-2", + ), + verbose=dict( + argstr="-verbose", + xor=("verbose", "quiet"), + ), + voxel_range=dict( + argstr="-range %d %d", + ), + weights=dict( + argstr="-weights %s", + sep=",", + ), + width_weighted=dict( + argstr="-width_weighted", + requires=("avgdim",), + ), ) inputs = Average.input_spec() @@ -192,7 +241,11 @@ def test_Average_inputs(): def test_Average_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Average.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_BBox.py b/nipype/interfaces/minc/tests/test_auto_BBox.py index c1dfbb370a..532cb14d5f 100644 --- a/nipype/interfaces/minc/tests/test_auto_BBox.py +++ b/nipype/interfaces/minc/tests/test_auto_BBox.py @@ -4,14 +4,38 @@ def test_BBox_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - format_minccrop=dict(argstr="-minccrop",), - format_mincresample=dict(argstr="-mincresample",), - format_mincreshape=dict(argstr="-mincreshape",), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - one_line=dict(argstr="-one_line", xor=("one_line", "two_lines"),), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + format_minccrop=dict( + argstr="-minccrop", + ), + format_mincresample=dict( + argstr="-mincresample", + ), + format_mincreshape=dict( + argstr="-mincreshape", + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + one_line=dict( + argstr="-one_line", + xor=("one_line", "two_lines"), + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), output_file=dict( extensions=None, hash_files=False, @@ -20,8 +44,13 @@ def test_BBox_inputs(): name_template="%s_bbox.txt", position=-1, ), - threshold=dict(argstr="-threshold",), - two_lines=dict(argstr="-two_lines", xor=("one_line", "two_lines"),), + threshold=dict( + argstr="-threshold", + ), + two_lines=dict( + argstr="-two_lines", + xor=("one_line", "two_lines"), + ), ) inputs = BBox.input_spec() @@ -31,7 +60,11 @@ def test_BBox_inputs(): def test_BBox_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = BBox.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Beast.py b/nipype/interfaces/minc/tests/test_auto_Beast.py index 5aed48440f..487550a33a 100644 --- a/nipype/interfaces/minc/tests/test_auto_Beast.py +++ b/nipype/interfaces/minc/tests/test_auto_Beast.py @@ -4,20 +4,59 @@ def test_Beast_inputs(): input_map = dict( - abspath=dict(argstr="-abspath", usedefault=True,), - args=dict(argstr="%s",), - clobber=dict(argstr="-clobber", usedefault=True,), - confidence_level_alpha=dict(argstr="-alpha %s", usedefault=True,), - configuration_file=dict(argstr="-configuration %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - fill_holes=dict(argstr="-fill",), - flip_images=dict(argstr="-flip",), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - library_dir=dict(argstr="%s", mandatory=True, position=-3,), - load_moments=dict(argstr="-load_moments",), - median_filter=dict(argstr="-median",), - nlm_filter=dict(argstr="-nlm_filter",), - number_selected_images=dict(argstr="-selection_num %s", usedefault=True,), + abspath=dict( + argstr="-abspath", + usedefault=True, + ), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + confidence_level_alpha=dict( + argstr="-alpha %s", + usedefault=True, + ), + configuration_file=dict( + argstr="-configuration %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill_holes=dict( + argstr="-fill", + ), + flip_images=dict( + argstr="-flip", + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + library_dir=dict( + argstr="%s", + mandatory=True, + position=-3, + ), + load_moments=dict( + argstr="-load_moments", + ), + median_filter=dict( + argstr="-median", + ), + nlm_filter=dict( + argstr="-nlm_filter", + ), + number_selected_images=dict( + argstr="-selection_num %s", + usedefault=True, + ), output_file=dict( argstr="%s", extensions=None, @@ -26,13 +65,32 @@ def test_Beast_inputs(): name_template="%s_beast_mask.mnc", position=-1, ), - patch_size=dict(argstr="-patch_size %s", usedefault=True,), - probability_map=dict(argstr="-probability",), - same_resolution=dict(argstr="-same_resolution",), - search_area=dict(argstr="-search_area %s", usedefault=True,), - smoothness_factor_beta=dict(argstr="-beta %s", usedefault=True,), - threshold_patch_selection=dict(argstr="-threshold %s", usedefault=True,), - voxel_size=dict(argstr="-voxel_size %s", usedefault=True,), + patch_size=dict( + argstr="-patch_size %s", + usedefault=True, + ), + probability_map=dict( + argstr="-probability", + ), + same_resolution=dict( + argstr="-same_resolution", + ), + search_area=dict( + argstr="-search_area %s", + usedefault=True, + ), + smoothness_factor_beta=dict( + argstr="-beta %s", + usedefault=True, + ), + threshold_patch_selection=dict( + argstr="-threshold %s", + usedefault=True, + ), + voxel_size=dict( + argstr="-voxel_size %s", + usedefault=True, + ), ) inputs = Beast.input_spec() @@ -42,7 +100,11 @@ def test_Beast_inputs(): def test_Beast_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Beast.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_BestLinReg.py b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py index 3e765b0e52..57a8929878 100644 --- a/nipype/interfaces/minc/tests/test_auto_BestLinReg.py +++ b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py @@ -4,9 +4,17 @@ def test_BestLinReg_inputs(): input_map = dict( - args=dict(argstr="%s",), - clobber=dict(argstr="-clobber", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), output_mnc=dict( argstr="%s", extensions=None, @@ -27,9 +35,21 @@ def test_BestLinReg_inputs(): name_template="%s_bestlinreg.xfm", position=-2, ), - source=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), - target=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - verbose=dict(argstr="-verbose",), + source=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), + target=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + verbose=dict( + argstr="-verbose", + ), ) inputs = BestLinReg.input_spec() @@ -40,7 +60,12 @@ def test_BestLinReg_inputs(): def test_BestLinReg_outputs(): output_map = dict( - output_mnc=dict(extensions=None,), output_xfm=dict(extensions=None,), + output_mnc=dict( + extensions=None, + ), + output_xfm=dict( + extensions=None, + ), ) outputs = BestLinReg.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_BigAverage.py b/nipype/interfaces/minc/tests/test_auto_BigAverage.py index 539ae73488..1eefb273d6 100644 --- a/nipype/interfaces/minc/tests/test_auto_BigAverage.py +++ b/nipype/interfaces/minc/tests/test_auto_BigAverage.py @@ -4,10 +4,23 @@ def test_BigAverage_inputs(): input_map = dict( - args=dict(argstr="%s",), - clobber=dict(argstr="--clobber", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" ",), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="--clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_files=dict( + argstr="%s", + mandatory=True, + position=-2, + sep=" ", + ), output_file=dict( argstr="%s", extensions=None, @@ -17,8 +30,12 @@ def test_BigAverage_inputs(): name_template="%s_bigaverage.mnc", position=-1, ), - output_float=dict(argstr="--float",), - robust=dict(argstr="-robust",), + output_float=dict( + argstr="--float", + ), + robust=dict( + argstr="-robust", + ), sd_file=dict( argstr="--sdfile %s", extensions=None, @@ -26,8 +43,12 @@ def test_BigAverage_inputs(): name_source=["input_files"], name_template="%s_bigaverage_stdev.mnc", ), - tmpdir=dict(argstr="-tmpdir %s",), - verbose=dict(argstr="--verbose",), + tmpdir=dict( + argstr="-tmpdir %s", + ), + verbose=dict( + argstr="--verbose", + ), ) inputs = BigAverage.input_spec() @@ -38,7 +59,12 @@ def test_BigAverage_inputs(): def test_BigAverage_outputs(): output_map = dict( - output_file=dict(extensions=None,), sd_file=dict(extensions=None,), + output_file=dict( + extensions=None, + ), + sd_file=dict( + extensions=None, + ), ) outputs = BigAverage.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Blob.py b/nipype/interfaces/minc/tests/test_auto_Blob.py index f51c3693f6..ae2b445c73 100644 --- a/nipype/interfaces/minc/tests/test_auto_Blob.py +++ b/nipype/interfaces/minc/tests/test_auto_Blob.py @@ -4,11 +4,25 @@ def test_Blob_inputs(): input_map = dict( - args=dict(argstr="%s",), - determinant=dict(argstr="-determinant",), - environ=dict(nohash=True, usedefault=True,), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - magnitude=dict(argstr="-magnitude",), + args=dict( + argstr="%s", + ), + determinant=dict( + argstr="-determinant", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + magnitude=dict( + argstr="-magnitude", + ), output_file=dict( argstr="%s", extensions=None, @@ -18,8 +32,12 @@ def test_Blob_inputs(): name_template="%s_blob.mnc", position=-1, ), - trace=dict(argstr="-trace",), - translation=dict(argstr="-translation",), + trace=dict( + argstr="-trace", + ), + translation=dict( + argstr="-translation", + ), ) inputs = Blob.input_spec() @@ -29,7 +47,11 @@ def test_Blob_inputs(): def test_Blob_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Blob.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Blur.py b/nipype/interfaces/minc/tests/test_auto_Blur.py index b37942f768..87647b5f62 100644 --- a/nipype/interfaces/minc/tests/test_auto_Blur.py +++ b/nipype/interfaces/minc/tests/test_auto_Blur.py @@ -4,25 +4,58 @@ def test_Blur_inputs(): input_map = dict( - args=dict(argstr="%s",), - clobber=dict(argstr="-clobber", usedefault=True,), - dimensions=dict(argstr="-dimensions %s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + dimensions=dict( + argstr="-dimensions %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fwhm=dict( - argstr="-fwhm %s", mandatory=True, xor=("fwhm", "fwhm3d", "standard_dev"), + argstr="-fwhm %s", + mandatory=True, + xor=("fwhm", "fwhm3d", "standard_dev"), ), fwhm3d=dict( argstr="-3dfwhm %s %s %s", mandatory=True, xor=("fwhm", "fwhm3d", "standard_dev"), ), - gaussian=dict(argstr="-gaussian", xor=("gaussian", "rect"),), - gradient=dict(argstr="-gradient",), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - no_apodize=dict(argstr="-no_apodize",), - output_file_base=dict(argstr="%s", extensions=None, position=-1,), - partial=dict(argstr="-partial",), - rect=dict(argstr="-rect", xor=("gaussian", "rect"),), + gaussian=dict( + argstr="-gaussian", + xor=("gaussian", "rect"), + ), + gradient=dict( + argstr="-gradient", + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + no_apodize=dict( + argstr="-no_apodize", + ), + output_file_base=dict( + argstr="%s", + extensions=None, + position=-1, + ), + partial=dict( + argstr="-partial", + ), + rect=dict( + argstr="-rect", + xor=("gaussian", "rect"), + ), standard_dev=dict( argstr="-standarddev %s", mandatory=True, @@ -38,12 +71,24 @@ def test_Blur_inputs(): def test_Blur_outputs(): output_map = dict( - gradient_dxyz=dict(extensions=None,), - output_file=dict(extensions=None,), - partial_dx=dict(extensions=None,), - partial_dxyz=dict(extensions=None,), - partial_dy=dict(extensions=None,), - partial_dz=dict(extensions=None,), + gradient_dxyz=dict( + extensions=None, + ), + output_file=dict( + extensions=None, + ), + partial_dx=dict( + extensions=None, + ), + partial_dxyz=dict( + extensions=None, + ), + partial_dy=dict( + extensions=None, + ), + partial_dz=dict( + extensions=None, + ), ) outputs = Blur.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Calc.py b/nipype/interfaces/minc/tests/test_auto_Calc.py index 670278dfa9..d0d4f61fbe 100644 --- a/nipype/interfaces/minc/tests/test_auto_Calc.py +++ b/nipype/interfaces/minc/tests/test_auto_Calc.py @@ -4,15 +4,31 @@ def test_Calc_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), check_dimensions=dict( - argstr="-check_dimensions", xor=("check_dimensions", "no_check_dimensions"), + argstr="-check_dimensions", + xor=("check_dimensions", "no_check_dimensions"), + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + copy_header=dict( + argstr="-copy_header", + xor=("copy_header", "no_copy_header"), + ), + debug=dict( + argstr="-debug", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + eval_width=dict( + argstr="-eval_width %s", ), - clobber=dict(argstr="-clobber", usedefault=True,), - copy_header=dict(argstr="-copy_header", xor=("copy_header", "no_copy_header"),), - debug=dict(argstr="-debug",), - environ=dict(nohash=True, usedefault=True,), - eval_width=dict(argstr="-eval_width %s",), expfile=dict( argstr="-expfile %s", extensions=None, @@ -20,7 +36,9 @@ def test_Calc_inputs(): xor=("expression", "expfile"), ), expression=dict( - argstr="-expression '%s'", mandatory=True, xor=("expression", "expfile"), + argstr="-expression '%s'", + mandatory=True, + xor=("expression", "expfile"), ), filelist=dict( argstr="-filelist %s", @@ -154,15 +172,25 @@ def test_Calc_inputs(): "format_unsigned", ), ), - ignore_nan=dict(argstr="-ignore_nan",), - input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" ",), - max_buffer_size_in_kb=dict(argstr="-max_buffer_size_in_kb %d",), + ignore_nan=dict( + argstr="-ignore_nan", + ), + input_files=dict( + argstr="%s", + mandatory=True, + position=-2, + sep=" ", + ), + max_buffer_size_in_kb=dict( + argstr="-max_buffer_size_in_kb %d", + ), no_check_dimensions=dict( argstr="-nocheck_dimensions", xor=("check_dimensions", "no_check_dimensions"), ), no_copy_header=dict( - argstr="-nocopy_header", xor=("copy_header", "no_copy_header"), + argstr="-nocopy_header", + xor=("copy_header", "no_copy_header"), ), outfiles=dict(), output_file=dict( @@ -179,16 +207,30 @@ def test_Calc_inputs(): xor=("output_nan", "output_zero", "output_illegal_value"), ), output_nan=dict( - argstr="-nan", xor=("output_nan", "output_zero", "output_illegal_value"), + argstr="-nan", + xor=("output_nan", "output_zero", "output_illegal_value"), ), output_zero=dict( - argstr="-zero", xor=("output_nan", "output_zero", "output_illegal_value"), + argstr="-zero", + xor=("output_nan", "output_zero", "output_illegal_value"), + ), + propagate_nan=dict( + argstr="-propagate_nan", + ), + quiet=dict( + argstr="-quiet", + xor=("verbose", "quiet"), + ), + two=dict( + argstr="-2", + ), + verbose=dict( + argstr="-verbose", + xor=("verbose", "quiet"), + ), + voxel_range=dict( + argstr="-range %d %d", ), - propagate_nan=dict(argstr="-propagate_nan",), - quiet=dict(argstr="-quiet", xor=("verbose", "quiet"),), - two=dict(argstr="-2",), - verbose=dict(argstr="-verbose", xor=("verbose", "quiet"),), - voxel_range=dict(argstr="-range %d %d",), ) inputs = Calc.input_spec() @@ -198,7 +240,11 @@ def test_Calc_inputs(): def test_Calc_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Calc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Convert.py b/nipype/interfaces/minc/tests/test_auto_Convert.py index 695d371b47..57963b9b81 100644 --- a/nipype/interfaces/minc/tests/test_auto_Convert.py +++ b/nipype/interfaces/minc/tests/test_auto_Convert.py @@ -4,12 +4,29 @@ def test_Convert_inputs(): input_map = dict( - args=dict(argstr="%s",), - chunk=dict(argstr="-chunk %d",), - clobber=dict(argstr="-clobber", usedefault=True,), - compression=dict(argstr="-compress %s",), - environ=dict(nohash=True, usedefault=True,), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + chunk=dict( + argstr="-chunk %d", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + compression=dict( + argstr="-compress %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), output_file=dict( argstr="%s", extensions=None, @@ -19,8 +36,12 @@ def test_Convert_inputs(): name_template="%s_convert_output.mnc", position=-1, ), - template=dict(argstr="-template",), - two=dict(argstr="-2",), + template=dict( + argstr="-template", + ), + two=dict( + argstr="-2", + ), ) inputs = Convert.input_spec() @@ -30,7 +51,11 @@ def test_Convert_inputs(): def test_Convert_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Convert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Copy.py b/nipype/interfaces/minc/tests/test_auto_Copy.py index 91736a67b3..a6bb527e7a 100644 --- a/nipype/interfaces/minc/tests/test_auto_Copy.py +++ b/nipype/interfaces/minc/tests/test_auto_Copy.py @@ -4,9 +4,19 @@ def test_Copy_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), output_file=dict( argstr="%s", extensions=None, @@ -16,8 +26,14 @@ def test_Copy_inputs(): name_template="%s_copy.mnc", position=-1, ), - pixel_values=dict(argstr="-pixel_values", xor=("pixel_values", "real_values"),), - real_values=dict(argstr="-real_values", xor=("pixel_values", "real_values"),), + pixel_values=dict( + argstr="-pixel_values", + xor=("pixel_values", "real_values"), + ), + real_values=dict( + argstr="-real_values", + xor=("pixel_values", "real_values"), + ), ) inputs = Copy.input_spec() @@ -27,7 +43,11 @@ def test_Copy_inputs(): def test_Copy_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Copy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Dump.py b/nipype/interfaces/minc/tests/test_auto_Dump.py index eb1fe2c6a7..4253bc20cc 100644 --- a/nipype/interfaces/minc/tests/test_auto_Dump.py +++ b/nipype/interfaces/minc/tests/test_auto_Dump.py @@ -5,19 +5,46 @@ def test_Dump_inputs(): input_map = dict( annotations_brief=dict( - argstr="-b %s", xor=("annotations_brief", "annotations_full"), + argstr="-b %s", + xor=("annotations_brief", "annotations_full"), ), annotations_full=dict( - argstr="-f %s", xor=("annotations_brief", "annotations_full"), - ), - args=dict(argstr="%s",), - coordinate_data=dict(argstr="-c", xor=("coordinate_data", "header_data"),), - environ=dict(nohash=True, usedefault=True,), - header_data=dict(argstr="-h", xor=("coordinate_data", "header_data"),), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - line_length=dict(argstr="-l %d",), - netcdf_name=dict(argstr="-n %s",), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + argstr="-f %s", + xor=("annotations_brief", "annotations_full"), + ), + args=dict( + argstr="%s", + ), + coordinate_data=dict( + argstr="-c", + xor=("coordinate_data", "header_data"), + ), + environ=dict( + nohash=True, + usedefault=True, + ), + header_data=dict( + argstr="-h", + xor=("coordinate_data", "header_data"), + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + line_length=dict( + argstr="-l %d", + ), + netcdf_name=dict( + argstr="-n %s", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), output_file=dict( extensions=None, hash_files=False, @@ -26,8 +53,13 @@ def test_Dump_inputs(): name_template="%s_dump.txt", position=-1, ), - precision=dict(argstr="%s",), - variables=dict(argstr="-v %s", sep=",",), + precision=dict( + argstr="%s", + ), + variables=dict( + argstr="-v %s", + sep=",", + ), ) inputs = Dump.input_spec() @@ -37,7 +69,11 @@ def test_Dump_inputs(): def test_Dump_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Dump.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Extract.py b/nipype/interfaces/minc/tests/test_auto_Extract.py index 0c05d4ab5f..6c34b443f0 100644 --- a/nipype/interfaces/minc/tests/test_auto_Extract.py +++ b/nipype/interfaces/minc/tests/test_auto_Extract.py @@ -4,9 +4,17 @@ def test_Extract_inputs(): input_map = dict( - args=dict(argstr="%s",), - count=dict(argstr="-count %s", sep=",",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + count=dict( + argstr="-count %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), flip_any_direction=dict( argstr="-any_direction", xor=( @@ -67,13 +75,35 @@ def test_Extract_inputs(): argstr="+zdirection", xor=("flip_z_positive", "flip_z_negative", "flip_z_any"), ), - image_maximum=dict(argstr="-image_maximum %s",), - image_minimum=dict(argstr="-image_minimum %s",), - image_range=dict(argstr="-image_range %s %s",), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - nonormalize=dict(argstr="-nonormalize", xor=("normalize", "nonormalize"),), - normalize=dict(argstr="-normalize", xor=("normalize", "nonormalize"),), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + image_maximum=dict( + argstr="-image_maximum %s", + ), + image_minimum=dict( + argstr="-image_minimum %s", + ), + image_range=dict( + argstr="-image_range %s %s", + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + nonormalize=dict( + argstr="-nonormalize", + xor=("normalize", "nonormalize"), + ), + normalize=dict( + argstr="-normalize", + xor=("normalize", "nonormalize"), + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), output_file=dict( extensions=None, hash_files=False, @@ -82,7 +112,10 @@ def test_Extract_inputs(): name_template="%s.raw", position=-1, ), - start=dict(argstr="-start %s", sep=",",), + start=dict( + argstr="-start %s", + sep=",", + ), write_ascii=dict( argstr="-ascii", xor=( @@ -173,7 +206,9 @@ def test_Extract_inputs(): "write_unsigned", ), ), - write_range=dict(argstr="-range %s %s",), + write_range=dict( + argstr="-range %s %s", + ), write_short=dict( argstr="-short", xor=( @@ -189,9 +224,13 @@ def test_Extract_inputs(): "write_unsigned", ), ), - write_signed=dict(argstr="-signed", xor=("write_signed", "write_unsigned"),), + write_signed=dict( + argstr="-signed", + xor=("write_signed", "write_unsigned"), + ), write_unsigned=dict( - argstr="-unsigned", xor=("write_signed", "write_unsigned"), + argstr="-unsigned", + xor=("write_signed", "write_unsigned"), ), ) inputs = Extract.input_spec() @@ -202,7 +241,11 @@ def test_Extract_inputs(): def test_Extract_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Extract.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py index deb6449d3d..91bcc57e3c 100644 --- a/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py +++ b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py @@ -4,11 +4,24 @@ def test_Gennlxfm_inputs(): input_map = dict( - args=dict(argstr="%s",), - clobber=dict(argstr="-clobber", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - ident=dict(argstr="-ident",), - like=dict(argstr="-like %s", extensions=None,), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ident=dict( + argstr="-ident", + ), + like=dict( + argstr="-like %s", + extensions=None, + ), output_file=dict( argstr="%s", extensions=None, @@ -18,8 +31,12 @@ def test_Gennlxfm_inputs(): name_template="%s_gennlxfm.xfm", position=-1, ), - step=dict(argstr="-step %s",), - verbose=dict(argstr="-verbose",), + step=dict( + argstr="-step %s", + ), + verbose=dict( + argstr="-verbose", + ), ) inputs = Gennlxfm.input_spec() @@ -30,7 +47,12 @@ def test_Gennlxfm_inputs(): def test_Gennlxfm_outputs(): output_map = dict( - output_file=dict(extensions=None,), output_grid=dict(extensions=None,), + output_file=dict( + extensions=None, + ), + output_grid=dict( + extensions=None, + ), ) outputs = Gennlxfm.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Math.py b/nipype/interfaces/minc/tests/test_auto_Math.py index 32a5f68d66..86858235cd 100644 --- a/nipype/interfaces/minc/tests/test_auto_Math.py +++ b/nipype/interfaces/minc/tests/test_auto_Math.py @@ -4,25 +4,61 @@ def test_Math_inputs(): input_map = dict( - abs=dict(argstr="-abs",), - args=dict(argstr="%s",), - calc_add=dict(argstr="-add",), - calc_and=dict(argstr="-and",), - calc_div=dict(argstr="-div",), - calc_mul=dict(argstr="-mult",), - calc_not=dict(argstr="-not",), - calc_or=dict(argstr="-or",), - calc_sub=dict(argstr="-sub",), + abs=dict( + argstr="-abs", + ), + args=dict( + argstr="%s", + ), + calc_add=dict( + argstr="-add", + ), + calc_and=dict( + argstr="-and", + ), + calc_div=dict( + argstr="-div", + ), + calc_mul=dict( + argstr="-mult", + ), + calc_not=dict( + argstr="-not", + ), + calc_or=dict( + argstr="-or", + ), + calc_sub=dict( + argstr="-sub", + ), check_dimensions=dict( - argstr="-check_dimensions", xor=("check_dimensions", "no_check_dimensions"), - ), - clamp=dict(argstr="-clamp -const2 %s %s",), - clobber=dict(argstr="-clobber", usedefault=True,), - copy_header=dict(argstr="-copy_header", xor=("copy_header", "no_copy_header"),), - count_valid=dict(argstr="-count_valid",), - dimension=dict(argstr="-dimension %s",), - environ=dict(nohash=True, usedefault=True,), - exp=dict(argstr="-exp -const2 %s %s",), + argstr="-check_dimensions", + xor=("check_dimensions", "no_check_dimensions"), + ), + clamp=dict( + argstr="-clamp -const2 %s %s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + copy_header=dict( + argstr="-copy_header", + xor=("copy_header", "no_copy_header"), + ), + count_valid=dict( + argstr="-count_valid", + ), + dimension=dict( + argstr="-dimension %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + exp=dict( + argstr="-exp -const2 %s %s", + ), filelist=dict( argstr="-filelist %s", extensions=None, @@ -155,7 +191,9 @@ def test_Math_inputs(): "format_unsigned", ), ), - ignore_nan=dict(argstr="-ignore_nan",), + ignore_nan=dict( + argstr="-ignore_nan", + ), input_files=dict( argstr="%s", mandatory=True, @@ -163,23 +201,39 @@ def test_Math_inputs(): sep=" ", xor=("input_files", "filelist"), ), - invert=dict(argstr="-invert -const %s",), - isnan=dict(argstr="-isnan",), - log=dict(argstr="-log -const2 %s %s",), + invert=dict( + argstr="-invert -const %s", + ), + isnan=dict( + argstr="-isnan", + ), + log=dict( + argstr="-log -const2 %s %s", + ), max_buffer_size_in_kb=dict( - argstr="-max_buffer_size_in_kb %d", usedefault=True, + argstr="-max_buffer_size_in_kb %d", + usedefault=True, + ), + maximum=dict( + argstr="-maximum", + ), + minimum=dict( + argstr="-minimum", + ), + nisnan=dict( + argstr="-nisnan", ), - maximum=dict(argstr="-maximum",), - minimum=dict(argstr="-minimum",), - nisnan=dict(argstr="-nisnan",), no_check_dimensions=dict( argstr="-nocheck_dimensions", xor=("check_dimensions", "no_check_dimensions"), ), no_copy_header=dict( - argstr="-nocopy_header", xor=("copy_header", "no_copy_header"), + argstr="-nocopy_header", + xor=("copy_header", "no_copy_header"), + ), + nsegment=dict( + argstr="-nsegment -const2 %s %s", ), - nsegment=dict(argstr="-nsegment -const2 %s %s",), output_file=dict( argstr="%s", extensions=None, @@ -194,25 +248,55 @@ def test_Math_inputs(): xor=("output_nan", "output_zero", "output_illegal_value"), ), output_nan=dict( - argstr="-nan", xor=("output_nan", "output_zero", "output_illegal_value"), + argstr="-nan", + xor=("output_nan", "output_zero", "output_illegal_value"), ), output_zero=dict( - argstr="-zero", xor=("output_nan", "output_zero", "output_illegal_value"), - ), - percentdiff=dict(argstr="-percentdiff",), - propagate_nan=dict(argstr="-propagate_nan",), - scale=dict(argstr="-scale -const2 %s %s",), - segment=dict(argstr="-segment -const2 %s %s",), - sqrt=dict(argstr="-sqrt",), - square=dict(argstr="-square",), - test_eq=dict(argstr="-eq",), - test_ge=dict(argstr="-ge",), - test_gt=dict(argstr="-gt",), - test_le=dict(argstr="-le",), - test_lt=dict(argstr="-lt",), - test_ne=dict(argstr="-ne",), - two=dict(argstr="-2",), - voxel_range=dict(argstr="-range %d %d",), + argstr="-zero", + xor=("output_nan", "output_zero", "output_illegal_value"), + ), + percentdiff=dict( + argstr="-percentdiff", + ), + propagate_nan=dict( + argstr="-propagate_nan", + ), + scale=dict( + argstr="-scale -const2 %s %s", + ), + segment=dict( + argstr="-segment -const2 %s %s", + ), + sqrt=dict( + argstr="-sqrt", + ), + square=dict( + argstr="-square", + ), + test_eq=dict( + argstr="-eq", + ), + test_ge=dict( + argstr="-ge", + ), + test_gt=dict( + argstr="-gt", + ), + test_le=dict( + argstr="-le", + ), + test_lt=dict( + argstr="-lt", + ), + test_ne=dict( + argstr="-ne", + ), + two=dict( + argstr="-2", + ), + voxel_range=dict( + argstr="-range %d %d", + ), ) inputs = Math.input_spec() @@ -222,7 +306,11 @@ def test_Math_inputs(): def test_Math_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Math.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_NlpFit.py b/nipype/interfaces/minc/tests/test_auto_NlpFit.py index 58e9e985db..39b1df4743 100644 --- a/nipype/interfaces/minc/tests/test_auto_NlpFit.py +++ b/nipype/interfaces/minc/tests/test_auto_NlpFit.py @@ -4,17 +4,54 @@ def test_NlpFit_inputs(): input_map = dict( - args=dict(argstr="%s",), - clobber=dict(argstr="-clobber", usedefault=True,), - config_file=dict(argstr="-config_file %s", extensions=None, mandatory=True,), - environ=dict(nohash=True, usedefault=True,), - init_xfm=dict(argstr="-init_xfm %s", extensions=None, mandatory=True,), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + config_file=dict( + argstr="-config_file %s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + init_xfm=dict( + argstr="-init_xfm %s", + extensions=None, + mandatory=True, + ), input_grid_files=dict(), - output_xfm=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - source=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - source_mask=dict(argstr="-source_mask %s", extensions=None, mandatory=True,), - target=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - verbose=dict(argstr="-verbose",), + output_xfm=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + source=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + source_mask=dict( + argstr="-source_mask %s", + extensions=None, + mandatory=True, + ), + target=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + verbose=dict( + argstr="-verbose", + ), ) inputs = NlpFit.input_spec() @@ -25,7 +62,12 @@ def test_NlpFit_inputs(): def test_NlpFit_outputs(): output_map = dict( - output_grid=dict(extensions=None,), output_xfm=dict(extensions=None,), + output_grid=dict( + extensions=None, + ), + output_xfm=dict( + extensions=None, + ), ) outputs = NlpFit.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Norm.py b/nipype/interfaces/minc/tests/test_auto_Norm.py index 462b61459f..f8c2060250 100644 --- a/nipype/interfaces/minc/tests/test_auto_Norm.py +++ b/nipype/interfaces/minc/tests/test_auto_Norm.py @@ -4,16 +4,43 @@ def test_Norm_inputs(): input_map = dict( - args=dict(argstr="%s",), - clamp=dict(argstr="-clamp", usedefault=True,), - clobber=dict(argstr="-clobber", usedefault=True,), - cutoff=dict(argstr="-cutoff %s",), - environ=dict(nohash=True, usedefault=True,), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - lower=dict(argstr="-lower %s",), - mask=dict(argstr="-mask %s", extensions=None,), - out_ceil=dict(argstr="-out_ceil %s",), - out_floor=dict(argstr="-out_floor %s",), + args=dict( + argstr="%s", + ), + clamp=dict( + argstr="-clamp", + usedefault=True, + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + cutoff=dict( + argstr="-cutoff %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + lower=dict( + argstr="-lower %s", + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + out_ceil=dict( + argstr="-out_ceil %s", + ), + out_floor=dict( + argstr="-out_floor %s", + ), output_file=dict( argstr="%s", extensions=None, @@ -30,11 +57,21 @@ def test_Norm_inputs(): name_source=["input_file"], name_template="%s_norm_threshold_mask.mnc", ), - threshold=dict(argstr="-threshold",), - threshold_blur=dict(argstr="-threshold_blur %s",), - threshold_bmt=dict(argstr="-threshold_bmt",), - threshold_perc=dict(argstr="-threshold_perc %s",), - upper=dict(argstr="-upper %s",), + threshold=dict( + argstr="-threshold", + ), + threshold_blur=dict( + argstr="-threshold_blur %s", + ), + threshold_bmt=dict( + argstr="-threshold_bmt", + ), + threshold_perc=dict( + argstr="-threshold_perc %s", + ), + upper=dict( + argstr="-upper %s", + ), ) inputs = Norm.input_spec() @@ -45,8 +82,12 @@ def test_Norm_inputs(): def test_Norm_outputs(): output_map = dict( - output_file=dict(extensions=None,), - output_threshold_mask=dict(extensions=None,), + output_file=dict( + extensions=None, + ), + output_threshold_mask=dict( + extensions=None, + ), ) outputs = Norm.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Pik.py b/nipype/interfaces/minc/tests/test_auto_Pik.py index 530ead0317..3323ef74a1 100644 --- a/nipype/interfaces/minc/tests/test_auto_Pik.py +++ b/nipype/interfaces/minc/tests/test_auto_Pik.py @@ -4,23 +4,50 @@ def test_Pik_inputs(): input_map = dict( - annotated_bar=dict(argstr="--anot_bar",), - args=dict(argstr="%s",), - auto_range=dict(argstr="--auto_range", xor=("image_range", "auto_range"),), - clobber=dict(argstr="-clobber", usedefault=True,), - depth=dict(argstr="--depth %s",), - environ=dict(nohash=True, usedefault=True,), + annotated_bar=dict( + argstr="--anot_bar", + ), + args=dict( + argstr="%s", + ), + auto_range=dict( + argstr="--auto_range", + xor=("image_range", "auto_range"), + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + depth=dict( + argstr="--depth %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), horizontal_triplanar_view=dict( argstr="--horizontal", xor=("vertical_triplanar_view", "horizontal_triplanar_view"), ), image_range=dict( - argstr="--image_range %s %s", xor=("image_range", "auto_range"), + argstr="--image_range %s %s", + xor=("image_range", "auto_range"), + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + jpg=dict( + xor=("jpg", "png"), + ), + lookup=dict( + argstr="--lookup %s", + ), + minc_range=dict( + argstr="--range %s %s", ), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - jpg=dict(xor=("jpg", "png"),), - lookup=dict(argstr="--lookup %s",), - minc_range=dict(argstr="--range %s %s",), output_file=dict( argstr="%s", extensions=None, @@ -31,23 +58,54 @@ def test_Pik_inputs(): name_template="%s.png", position=-1, ), - png=dict(xor=("jpg", "png"),), - sagittal_offset=dict(argstr="--sagittal_offset %s",), - sagittal_offset_perc=dict(argstr="--sagittal_offset_perc %d",), - scale=dict(argstr="--scale %s", usedefault=True,), - slice_x=dict(argstr="-x", xor=("slice_z", "slice_y", "slice_x"),), - slice_y=dict(argstr="-y", xor=("slice_z", "slice_y", "slice_x"),), - slice_z=dict(argstr="-z", xor=("slice_z", "slice_y", "slice_x"),), - start=dict(argstr="--slice %s",), - tile_size=dict(argstr="--tilesize %s",), - title=dict(argstr="%s",), - title_size=dict(argstr="--title_size %s", requires=["title"],), - triplanar=dict(argstr="--triplanar",), + png=dict( + xor=("jpg", "png"), + ), + sagittal_offset=dict( + argstr="--sagittal_offset %s", + ), + sagittal_offset_perc=dict( + argstr="--sagittal_offset_perc %d", + ), + scale=dict( + argstr="--scale %s", + usedefault=True, + ), + slice_x=dict( + argstr="-x", + xor=("slice_z", "slice_y", "slice_x"), + ), + slice_y=dict( + argstr="-y", + xor=("slice_z", "slice_y", "slice_x"), + ), + slice_z=dict( + argstr="-z", + xor=("slice_z", "slice_y", "slice_x"), + ), + start=dict( + argstr="--slice %s", + ), + tile_size=dict( + argstr="--tilesize %s", + ), + title=dict( + argstr="%s", + ), + title_size=dict( + argstr="--title_size %s", + requires=["title"], + ), + triplanar=dict( + argstr="--triplanar", + ), vertical_triplanar_view=dict( argstr="--vertical", xor=("vertical_triplanar_view", "horizontal_triplanar_view"), ), - width=dict(argstr="--width %s",), + width=dict( + argstr="--width %s", + ), ) inputs = Pik.input_spec() @@ -57,7 +115,11 @@ def test_Pik_inputs(): def test_Pik_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Pik.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Resample.py b/nipype/interfaces/minc/tests/test_auto_Resample.py index 32385be6c9..59f2ae180d 100644 --- a/nipype/interfaces/minc/tests/test_auto_Resample.py +++ b/nipype/interfaces/minc/tests/test_auto_Resample.py @@ -4,17 +4,33 @@ def test_Resample_inputs(): input_map = dict( - args=dict(argstr="%s",), - clobber=dict(argstr="-clobber", usedefault=True,), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), coronal_slices=dict( - argstr="-coronal", xor=("transverse", "sagittal", "coronal"), + argstr="-coronal", + xor=("transverse", "sagittal", "coronal"), ), dircos=dict( - argstr="-dircos %s %s %s", xor=("nelements", "nelements_x_y_or_z"), + argstr="-dircos %s %s %s", + xor=("nelements", "nelements_x_y_or_z"), + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill=dict( + argstr="-fill", + xor=("nofill", "fill"), + ), + fill_value=dict( + argstr="-fillvalue %s", + requires=["fill"], ), - environ=dict(nohash=True, usedefault=True,), - fill=dict(argstr="-fill", xor=("nofill", "fill"),), - fill_value=dict(argstr="-fillvalue %s", requires=["fill"],), format_byte=dict( argstr="-byte", xor=( @@ -120,15 +136,27 @@ def test_Resample_inputs(): ), ), half_width_sinc_window=dict( - argstr="-width %s", requires=["sinc_interpolation"], + argstr="-width %s", + requires=["sinc_interpolation"], + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, ), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), input_grid_files=dict(), - invert_transformation=dict(argstr="-invert_transformation",), + invert_transformation=dict( + argstr="-invert_transformation", + ), keep_real_range=dict( - argstr="-keep_real_range", xor=("keep_real_range", "nokeep_real_range"), + argstr="-keep_real_range", + xor=("keep_real_range", "nokeep_real_range"), + ), + like=dict( + argstr="-like %s", + extensions=None, ), - like=dict(argstr="-like %s", extensions=None,), nearest_neighbour_interpolation=dict( argstr="-nearest_neighbour", xor=( @@ -139,16 +167,24 @@ def test_Resample_inputs(): ), ), nelements=dict( - argstr="-nelements %s %s %s", xor=("nelements", "nelements_x_y_or_z"), + argstr="-nelements %s %s %s", + xor=("nelements", "nelements_x_y_or_z"), + ), + no_fill=dict( + argstr="-nofill", + xor=("nofill", "fill"), ), - no_fill=dict(argstr="-nofill", xor=("nofill", "fill"),), no_input_sampling=dict( - argstr="-use_input_sampling", xor=("vio_transform", "no_input_sampling"), + argstr="-use_input_sampling", + xor=("vio_transform", "no_input_sampling"), ), nokeep_real_range=dict( - argstr="-nokeep_real_range", xor=("keep_real_range", "nokeep_real_range"), + argstr="-nokeep_real_range", + xor=("keep_real_range", "nokeep_real_range"), + ), + origin=dict( + argstr="-origin %s %s %s", ), - origin=dict(argstr="-origin %s %s %s",), output_file=dict( argstr="%s", extensions=None, @@ -158,9 +194,12 @@ def test_Resample_inputs(): name_template="%s_resample.mnc", position=-1, ), - output_range=dict(argstr="-range %s %s",), + output_range=dict( + argstr="-range %s %s", + ), sagittal_slices=dict( - argstr="-sagittal", xor=("transverse", "sagittal", "coronal"), + argstr="-sagittal", + xor=("transverse", "sagittal", "coronal"), ), sinc_interpolation=dict( argstr="-sinc", @@ -181,14 +220,30 @@ def test_Resample_inputs(): requires=["sinc_interpolation"], xor=("sinc_window_hanning", "sinc_window_hamming"), ), - spacetype=dict(argstr="-spacetype %s",), - standard_sampling=dict(argstr="-standard_sampling",), - start=dict(argstr="-start %s %s %s", xor=("nelements", "nelements_x_y_or_z"),), - step=dict(argstr="-step %s %s %s", xor=("nelements", "nelements_x_y_or_z"),), - talairach=dict(argstr="-talairach",), - transformation=dict(argstr="-transformation %s", extensions=None,), + spacetype=dict( + argstr="-spacetype %s", + ), + standard_sampling=dict( + argstr="-standard_sampling", + ), + start=dict( + argstr="-start %s %s %s", + xor=("nelements", "nelements_x_y_or_z"), + ), + step=dict( + argstr="-step %s %s %s", + xor=("nelements", "nelements_x_y_or_z"), + ), + talairach=dict( + argstr="-talairach", + ), + transformation=dict( + argstr="-transformation %s", + extensions=None, + ), transverse_slices=dict( - argstr="-transverse", xor=("transverse", "sagittal", "coronal"), + argstr="-transverse", + xor=("transverse", "sagittal", "coronal"), ), tricubic_interpolation=dict( argstr="-tricubic", @@ -208,10 +263,15 @@ def test_Resample_inputs(): "sinc_interpolation", ), ), - two=dict(argstr="-2",), - units=dict(argstr="-units %s",), + two=dict( + argstr="-2", + ), + units=dict( + argstr="-units %s", + ), vio_transform=dict( - argstr="-tfm_input_sampling", xor=("vio_transform", "no_input_sampling"), + argstr="-tfm_input_sampling", + xor=("vio_transform", "no_input_sampling"), ), xdircos=dict( argstr="-xdircos %s", @@ -282,7 +342,11 @@ def test_Resample_inputs(): def test_Resample_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Reshape.py b/nipype/interfaces/minc/tests/test_auto_Reshape.py index 92b0e5862e..50286b41e5 100644 --- a/nipype/interfaces/minc/tests/test_auto_Reshape.py +++ b/nipype/interfaces/minc/tests/test_auto_Reshape.py @@ -4,10 +4,23 @@ def test_Reshape_inputs(): input_map = dict( - args=dict(argstr="%s",), - clobber=dict(argstr="-clobber", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), output_file=dict( argstr="%s", extensions=None, @@ -17,8 +30,12 @@ def test_Reshape_inputs(): name_template="%s_reshape.mnc", position=-1, ), - verbose=dict(argstr="-verbose",), - write_short=dict(argstr="-short",), + verbose=dict( + argstr="-verbose", + ), + write_short=dict( + argstr="-short", + ), ) inputs = Reshape.input_spec() @@ -28,7 +45,11 @@ def test_Reshape_inputs(): def test_Reshape_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Reshape.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_ToEcat.py b/nipype/interfaces/minc/tests/test_auto_ToEcat.py index 02936ae4f4..f6ce521232 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToEcat.py +++ b/nipype/interfaces/minc/tests/test_auto_ToEcat.py @@ -4,18 +4,40 @@ def test_ToEcat_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - ignore_acquisition_variable=dict(argstr="-ignore_acquisition_variable",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignore_acquisition_variable=dict( + argstr="-ignore_acquisition_variable", + ), ignore_ecat_acquisition_variable=dict( argstr="-ignore_ecat_acquisition_variable", ), - ignore_ecat_main=dict(argstr="-ignore_ecat_main",), - ignore_ecat_subheader_variable=dict(argstr="-ignore_ecat_subheader_variable",), - ignore_patient_variable=dict(argstr="-ignore_patient_variable",), - ignore_study_variable=dict(argstr="-ignore_study_variable",), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - no_decay_corr_fctr=dict(argstr="-no_decay_corr_fctr",), + ignore_ecat_main=dict( + argstr="-ignore_ecat_main", + ), + ignore_ecat_subheader_variable=dict( + argstr="-ignore_ecat_subheader_variable", + ), + ignore_patient_variable=dict( + argstr="-ignore_patient_variable", + ), + ignore_study_variable=dict( + argstr="-ignore_study_variable", + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + no_decay_corr_fctr=dict( + argstr="-no_decay_corr_fctr", + ), output_file=dict( argstr="%s", extensions=None, @@ -26,7 +48,9 @@ def test_ToEcat_inputs(): name_template="%s_to_ecat.v", position=-1, ), - voxels_as_integers=dict(argstr="-label",), + voxels_as_integers=dict( + argstr="-label", + ), ) inputs = ToEcat.input_spec() @@ -36,7 +60,11 @@ def test_ToEcat_inputs(): def test_ToEcat_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = ToEcat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_ToRaw.py b/nipype/interfaces/minc/tests/test_auto_ToRaw.py index 7a15e49f65..39940170f6 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToRaw.py +++ b/nipype/interfaces/minc/tests/test_auto_ToRaw.py @@ -4,12 +4,33 @@ def test_ToRaw_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - nonormalize=dict(argstr="-nonormalize", xor=("normalize", "nonormalize"),), - normalize=dict(argstr="-normalize", xor=("normalize", "nonormalize"),), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + nonormalize=dict( + argstr="-nonormalize", + xor=("normalize", "nonormalize"), + ), + normalize=dict( + argstr="-normalize", + xor=("normalize", "nonormalize"), + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), output_file=dict( extensions=None, hash_files=False, @@ -73,7 +94,9 @@ def test_ToRaw_inputs(): "write_double", ), ), - write_range=dict(argstr="-range %s %s",), + write_range=dict( + argstr="-range %s %s", + ), write_short=dict( argstr="-short", xor=( @@ -85,9 +108,13 @@ def test_ToRaw_inputs(): "write_double", ), ), - write_signed=dict(argstr="-signed", xor=("write_signed", "write_unsigned"),), + write_signed=dict( + argstr="-signed", + xor=("write_signed", "write_unsigned"), + ), write_unsigned=dict( - argstr="-unsigned", xor=("write_signed", "write_unsigned"), + argstr="-unsigned", + xor=("write_signed", "write_unsigned"), ), ) inputs = ToRaw.input_spec() @@ -98,7 +125,11 @@ def test_ToRaw_inputs(): def test_ToRaw_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = ToRaw.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_VolSymm.py b/nipype/interfaces/minc/tests/test_auto_VolSymm.py index aeb8e6d23a..a1b89616f2 100644 --- a/nipype/interfaces/minc/tests/test_auto_VolSymm.py +++ b/nipype/interfaces/minc/tests/test_auto_VolSymm.py @@ -4,15 +4,37 @@ def test_VolSymm_inputs(): input_map = dict( - args=dict(argstr="%s",), - clobber=dict(argstr="-clobber", usedefault=True,), - config_file=dict(argstr="-config_file %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - fit_linear=dict(argstr="-linear",), - fit_nonlinear=dict(argstr="-nonlinear",), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + config_file=dict( + argstr="-config_file %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fit_linear=dict( + argstr="-linear", + ), + fit_nonlinear=dict( + argstr="-nonlinear", + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), input_grid_files=dict(), - nofit=dict(argstr="-nofit",), + nofit=dict( + argstr="-nofit", + ), output_file=dict( argstr="%s", extensions=None, @@ -32,10 +54,18 @@ def test_VolSymm_inputs(): name_template="%s_vol_symm.xfm", position=-2, ), - verbose=dict(argstr="-verbose",), - x=dict(argstr="-x",), - y=dict(argstr="-y",), - z=dict(argstr="-z",), + verbose=dict( + argstr="-verbose", + ), + x=dict( + argstr="-x", + ), + y=dict( + argstr="-y", + ), + z=dict( + argstr="-z", + ), ) inputs = VolSymm.input_spec() @@ -46,9 +76,15 @@ def test_VolSymm_inputs(): def test_VolSymm_outputs(): output_map = dict( - output_file=dict(extensions=None,), - output_grid=dict(extensions=None,), - trans_file=dict(extensions=None,), + output_file=dict( + extensions=None, + ), + output_grid=dict( + extensions=None, + ), + trans_file=dict( + extensions=None, + ), ) outputs = VolSymm.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Volcentre.py b/nipype/interfaces/minc/tests/test_auto_Volcentre.py index 492714adf4..cf9f777f70 100644 --- a/nipype/interfaces/minc/tests/test_auto_Volcentre.py +++ b/nipype/interfaces/minc/tests/test_auto_Volcentre.py @@ -4,12 +4,29 @@ def test_Volcentre_inputs(): input_map = dict( - args=dict(argstr="%s",), - centre=dict(argstr="-centre %s %s %s",), - clobber=dict(argstr="-clobber", usedefault=True,), - com=dict(argstr="-com",), - environ=dict(nohash=True, usedefault=True,), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + centre=dict( + argstr="-centre %s %s %s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + com=dict( + argstr="-com", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), output_file=dict( argstr="%s", extensions=None, @@ -19,8 +36,12 @@ def test_Volcentre_inputs(): name_template="%s_volcentre.mnc", position=-1, ), - verbose=dict(argstr="-verbose",), - zero_dircos=dict(argstr="-zero_dircos",), + verbose=dict( + argstr="-verbose", + ), + zero_dircos=dict( + argstr="-zero_dircos", + ), ) inputs = Volcentre.input_spec() @@ -30,7 +51,11 @@ def test_Volcentre_inputs(): def test_Volcentre_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Volcentre.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Voliso.py b/nipype/interfaces/minc/tests/test_auto_Voliso.py index 534315d0cf..40d01b5767 100644 --- a/nipype/interfaces/minc/tests/test_auto_Voliso.py +++ b/nipype/interfaces/minc/tests/test_auto_Voliso.py @@ -4,13 +4,32 @@ def test_Voliso_inputs(): input_map = dict( - args=dict(argstr="%s",), - avgstep=dict(argstr="--avgstep",), - clobber=dict(argstr="--clobber", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - maxstep=dict(argstr="--maxstep %s",), - minstep=dict(argstr="--minstep %s",), + args=dict( + argstr="%s", + ), + avgstep=dict( + argstr="--avgstep", + ), + clobber=dict( + argstr="--clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + maxstep=dict( + argstr="--maxstep %s", + ), + minstep=dict( + argstr="--minstep %s", + ), output_file=dict( argstr="%s", extensions=None, @@ -20,7 +39,9 @@ def test_Voliso_inputs(): name_template="%s_voliso.mnc", position=-1, ), - verbose=dict(argstr="--verbose",), + verbose=dict( + argstr="--verbose", + ), ) inputs = Voliso.input_spec() @@ -30,7 +51,11 @@ def test_Voliso_inputs(): def test_Voliso_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Voliso.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Volpad.py b/nipype/interfaces/minc/tests/test_auto_Volpad.py index ce67c4ef73..5102199657 100644 --- a/nipype/interfaces/minc/tests/test_auto_Volpad.py +++ b/nipype/interfaces/minc/tests/test_auto_Volpad.py @@ -4,13 +4,32 @@ def test_Volpad_inputs(): input_map = dict( - args=dict(argstr="%s",), - auto=dict(argstr="-auto",), - auto_freq=dict(argstr="-auto_freq %s",), - clobber=dict(argstr="-clobber", usedefault=True,), - distance=dict(argstr="-distance %s",), - environ=dict(nohash=True, usedefault=True,), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + auto=dict( + argstr="-auto", + ), + auto_freq=dict( + argstr="-auto_freq %s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + distance=dict( + argstr="-distance %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), output_file=dict( argstr="%s", extensions=None, @@ -20,9 +39,15 @@ def test_Volpad_inputs(): name_template="%s_volpad.mnc", position=-1, ), - smooth=dict(argstr="-smooth",), - smooth_distance=dict(argstr="-smooth_distance %s",), - verbose=dict(argstr="-verbose",), + smooth=dict( + argstr="-smooth", + ), + smooth_distance=dict( + argstr="-smooth_distance %s", + ), + verbose=dict( + argstr="-verbose", + ), ) inputs = Volpad.input_spec() @@ -32,7 +57,11 @@ def test_Volpad_inputs(): def test_Volpad_outputs(): - output_map = dict(output_file=dict(extensions=None,),) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Volpad.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_XfmAvg.py b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py index f688494751..f5df6f4d54 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmAvg.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py @@ -4,17 +4,45 @@ def test_XfmAvg_inputs(): input_map = dict( - args=dict(argstr="%s",), - avg_linear=dict(argstr="-avg_linear",), - avg_nonlinear=dict(argstr="-avg_nonlinear",), - clobber=dict(argstr="-clobber", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - ignore_linear=dict(argstr="-ignore_linear",), - ignore_nonlinear=dict(argstr="-ignore_nonline",), - input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" ",), + args=dict( + argstr="%s", + ), + avg_linear=dict( + argstr="-avg_linear", + ), + avg_nonlinear=dict( + argstr="-avg_nonlinear", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignore_linear=dict( + argstr="-ignore_linear", + ), + ignore_nonlinear=dict( + argstr="-ignore_nonline", + ), + input_files=dict( + argstr="%s", + mandatory=True, + position=-2, + sep=" ", + ), input_grid_files=dict(), - output_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - verbose=dict(argstr="-verbose",), + output_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + verbose=dict( + argstr="-verbose", + ), ) inputs = XfmAvg.input_spec() @@ -25,7 +53,12 @@ def test_XfmAvg_inputs(): def test_XfmAvg_outputs(): output_map = dict( - output_file=dict(extensions=None,), output_grid=dict(extensions=None,), + output_file=dict( + extensions=None, + ), + output_grid=dict( + extensions=None, + ), ) outputs = XfmAvg.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_XfmConcat.py b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py index 23642895da..58144779b8 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmConcat.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py @@ -4,10 +4,23 @@ def test_XfmConcat_inputs(): input_map = dict( - args=dict(argstr="%s",), - clobber=dict(argstr="-clobber", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" ",), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_files=dict( + argstr="%s", + mandatory=True, + position=-2, + sep=" ", + ), input_grid_files=dict(), output_file=dict( argstr="%s", @@ -18,7 +31,9 @@ def test_XfmConcat_inputs(): name_template="%s_xfmconcat.xfm", position=-1, ), - verbose=dict(argstr="-verbose",), + verbose=dict( + argstr="-verbose", + ), ) inputs = XfmConcat.input_spec() @@ -28,7 +43,12 @@ def test_XfmConcat_inputs(): def test_XfmConcat_outputs(): - output_map = dict(output_file=dict(extensions=None,), output_grids=dict(),) + output_map = dict( + output_file=dict( + extensions=None, + ), + output_grids=dict(), + ) outputs = XfmConcat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py index 7f0c42c433..aa8fb61ccd 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py @@ -4,12 +4,32 @@ def test_XfmInvert_inputs(): input_map = dict( - args=dict(argstr="%s",), - clobber=dict(argstr="-clobber", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - output_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - verbose=dict(argstr="-verbose",), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + output_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + verbose=dict( + argstr="-verbose", + ), ) inputs = XfmInvert.input_spec() @@ -20,7 +40,12 @@ def test_XfmInvert_inputs(): def test_XfmInvert_outputs(): output_map = dict( - output_file=dict(extensions=None,), output_grid=dict(extensions=None,), + output_file=dict( + extensions=None, + ), + output_grid=dict( + extensions=None, + ), ) outputs = XfmInvert.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py index d97c5d904b..9daa1b996e 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py @@ -4,32 +4,96 @@ def test_JistBrainMgdmSegmentation_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inAdjust=dict(argstr="--inAdjust %s",), - inAtlas=dict(argstr="--inAtlas %s", extensions=None,), - inCompute=dict(argstr="--inCompute %s",), - inCurvature=dict(argstr="--inCurvature %f",), - inData=dict(argstr="--inData %f",), - inFLAIR=dict(argstr="--inFLAIR %s", extensions=None,), - inMP2RAGE=dict(argstr="--inMP2RAGE %s", extensions=None,), - inMP2RAGE2=dict(argstr="--inMP2RAGE2 %s", extensions=None,), - inMPRAGE=dict(argstr="--inMPRAGE %s", extensions=None,), - inMax=dict(argstr="--inMax %d",), - inMin=dict(argstr="--inMin %f",), - inOutput=dict(argstr="--inOutput %s",), - inPV=dict(argstr="--inPV %s", extensions=None,), - inPosterior=dict(argstr="--inPosterior %f",), - inSteps=dict(argstr="--inSteps %d",), - inTopology=dict(argstr="--inTopology %s",), - null=dict(argstr="--null %s",), - outLevelset=dict(argstr="--outLevelset %s", hash_files=False,), - outPosterior2=dict(argstr="--outPosterior2 %s", hash_files=False,), - outPosterior3=dict(argstr="--outPosterior3 %s", hash_files=False,), - outSegmented=dict(argstr="--outSegmented %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inAdjust=dict( + argstr="--inAdjust %s", + ), + inAtlas=dict( + argstr="--inAtlas %s", + extensions=None, + ), + inCompute=dict( + argstr="--inCompute %s", + ), + inCurvature=dict( + argstr="--inCurvature %f", + ), + inData=dict( + argstr="--inData %f", + ), + inFLAIR=dict( + argstr="--inFLAIR %s", + extensions=None, + ), + inMP2RAGE=dict( + argstr="--inMP2RAGE %s", + extensions=None, + ), + inMP2RAGE2=dict( + argstr="--inMP2RAGE2 %s", + extensions=None, + ), + inMPRAGE=dict( + argstr="--inMPRAGE %s", + extensions=None, + ), + inMax=dict( + argstr="--inMax %d", + ), + inMin=dict( + argstr="--inMin %f", + ), + inOutput=dict( + argstr="--inOutput %s", + ), + inPV=dict( + argstr="--inPV %s", + extensions=None, + ), + inPosterior=dict( + argstr="--inPosterior %f", + ), + inSteps=dict( + argstr="--inSteps %d", + ), + inTopology=dict( + argstr="--inTopology %s", + ), + null=dict( + argstr="--null %s", + ), + outLevelset=dict( + argstr="--outLevelset %s", + hash_files=False, + ), + outPosterior2=dict( + argstr="--outPosterior2 %s", + hash_files=False, + ), + outPosterior3=dict( + argstr="--outPosterior3 %s", + hash_files=False, + ), + outSegmented=dict( + argstr="--outSegmented %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistBrainMgdmSegmentation.input_spec() @@ -40,10 +104,18 @@ def test_JistBrainMgdmSegmentation_inputs(): def test_JistBrainMgdmSegmentation_outputs(): output_map = dict( - outLevelset=dict(extensions=None,), - outPosterior2=dict(extensions=None,), - outPosterior3=dict(extensions=None,), - outSegmented=dict(extensions=None,), + outLevelset=dict( + extensions=None, + ), + outPosterior2=dict( + extensions=None, + ), + outPosterior3=dict( + extensions=None, + ), + outSegmented=dict( + extensions=None, + ), ) outputs = JistBrainMgdmSegmentation.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py index fa55aa0d75..e7706b16b5 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py @@ -4,17 +4,44 @@ def test_JistBrainMp2rageDuraEstimation_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inDistance=dict(argstr="--inDistance %f",), - inSecond=dict(argstr="--inSecond %s", extensions=None,), - inSkull=dict(argstr="--inSkull %s", extensions=None,), - inoutput=dict(argstr="--inoutput %s",), - null=dict(argstr="--null %s",), - outDura=dict(argstr="--outDura %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inDistance=dict( + argstr="--inDistance %f", + ), + inSecond=dict( + argstr="--inSecond %s", + extensions=None, + ), + inSkull=dict( + argstr="--inSkull %s", + extensions=None, + ), + inoutput=dict( + argstr="--inoutput %s", + ), + null=dict( + argstr="--null %s", + ), + outDura=dict( + argstr="--outDura %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistBrainMp2rageDuraEstimation.input_spec() @@ -24,7 +51,11 @@ def test_JistBrainMp2rageDuraEstimation_inputs(): def test_JistBrainMp2rageDuraEstimation_outputs(): - output_map = dict(outDura=dict(extensions=None,),) + output_map = dict( + outDura=dict( + extensions=None, + ), + ) outputs = JistBrainMp2rageDuraEstimation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py index 5f4a6eb616..637b50dcad 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py @@ -4,21 +4,61 @@ def test_JistBrainMp2rageSkullStripping_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inFilter=dict(argstr="--inFilter %s", extensions=None,), - inSecond=dict(argstr="--inSecond %s", extensions=None,), - inSkip=dict(argstr="--inSkip %s",), - inT1=dict(argstr="--inT1 %s", extensions=None,), - inT1weighted=dict(argstr="--inT1weighted %s", extensions=None,), - null=dict(argstr="--null %s",), - outBrain=dict(argstr="--outBrain %s", hash_files=False,), - outMasked=dict(argstr="--outMasked %s", hash_files=False,), - outMasked2=dict(argstr="--outMasked2 %s", hash_files=False,), - outMasked3=dict(argstr="--outMasked3 %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inFilter=dict( + argstr="--inFilter %s", + extensions=None, + ), + inSecond=dict( + argstr="--inSecond %s", + extensions=None, + ), + inSkip=dict( + argstr="--inSkip %s", + ), + inT1=dict( + argstr="--inT1 %s", + extensions=None, + ), + inT1weighted=dict( + argstr="--inT1weighted %s", + extensions=None, + ), + null=dict( + argstr="--null %s", + ), + outBrain=dict( + argstr="--outBrain %s", + hash_files=False, + ), + outMasked=dict( + argstr="--outMasked %s", + hash_files=False, + ), + outMasked2=dict( + argstr="--outMasked2 %s", + hash_files=False, + ), + outMasked3=dict( + argstr="--outMasked3 %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistBrainMp2rageSkullStripping.input_spec() @@ -29,10 +69,18 @@ def test_JistBrainMp2rageSkullStripping_inputs(): def test_JistBrainMp2rageSkullStripping_outputs(): output_map = dict( - outBrain=dict(extensions=None,), - outMasked=dict(extensions=None,), - outMasked2=dict(extensions=None,), - outMasked3=dict(extensions=None,), + outBrain=dict( + extensions=None, + ), + outMasked=dict( + extensions=None, + ), + outMasked2=dict( + extensions=None, + ), + outMasked3=dict( + extensions=None, + ), ) outputs = JistBrainMp2rageSkullStripping.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py index 016b18f1e7..61a3e2b074 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py @@ -4,16 +4,40 @@ def test_JistBrainPartialVolumeFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inInput=dict(argstr="--inInput %s", extensions=None,), - inPV=dict(argstr="--inPV %s",), - inoutput=dict(argstr="--inoutput %s",), - null=dict(argstr="--null %s",), - outPartial=dict(argstr="--outPartial %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inInput=dict( + argstr="--inInput %s", + extensions=None, + ), + inPV=dict( + argstr="--inPV %s", + ), + inoutput=dict( + argstr="--inoutput %s", + ), + null=dict( + argstr="--null %s", + ), + outPartial=dict( + argstr="--outPartial %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistBrainPartialVolumeFilter.input_spec() @@ -23,7 +47,11 @@ def test_JistBrainPartialVolumeFilter_inputs(): def test_JistBrainPartialVolumeFilter_outputs(): - output_map = dict(outPartial=dict(extensions=None,),) + output_map = dict( + outPartial=dict( + extensions=None, + ), + ) outputs = JistBrainPartialVolumeFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py index 88ae68161c..41ae9c5cce 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py @@ -4,21 +4,56 @@ def test_JistCortexSurfaceMeshInflation_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inLevelset=dict(argstr="--inLevelset %s", extensions=None,), - inLorentzian=dict(argstr="--inLorentzian %s",), - inMax=dict(argstr="--inMax %d",), - inMean=dict(argstr="--inMean %f",), - inSOR=dict(argstr="--inSOR %f",), - inStep=dict(argstr="--inStep %d",), - inTopology=dict(argstr="--inTopology %s",), - null=dict(argstr="--null %s",), - outInflated=dict(argstr="--outInflated %s", hash_files=False,), - outOriginal=dict(argstr="--outOriginal %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inLevelset=dict( + argstr="--inLevelset %s", + extensions=None, + ), + inLorentzian=dict( + argstr="--inLorentzian %s", + ), + inMax=dict( + argstr="--inMax %d", + ), + inMean=dict( + argstr="--inMean %f", + ), + inSOR=dict( + argstr="--inSOR %f", + ), + inStep=dict( + argstr="--inStep %d", + ), + inTopology=dict( + argstr="--inTopology %s", + ), + null=dict( + argstr="--null %s", + ), + outInflated=dict( + argstr="--outInflated %s", + hash_files=False, + ), + outOriginal=dict( + argstr="--outOriginal %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistCortexSurfaceMeshInflation.input_spec() @@ -29,7 +64,12 @@ def test_JistCortexSurfaceMeshInflation_inputs(): def test_JistCortexSurfaceMeshInflation_outputs(): output_map = dict( - outInflated=dict(extensions=None,), outOriginal=dict(extensions=None,), + outInflated=dict( + extensions=None, + ), + outOriginal=dict( + extensions=None, + ), ) outputs = JistCortexSurfaceMeshInflation.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py index aaab939380..94d9cc525a 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py @@ -4,22 +4,63 @@ def test_JistIntensityMp2rageMasking_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inBackground=dict(argstr="--inBackground %s",), - inMasking=dict(argstr="--inMasking %s",), - inQuantitative=dict(argstr="--inQuantitative %s", extensions=None,), - inSecond=dict(argstr="--inSecond %s", extensions=None,), - inSkip=dict(argstr="--inSkip %s",), - inT1weighted=dict(argstr="--inT1weighted %s", extensions=None,), - null=dict(argstr="--null %s",), - outMasked=dict(argstr="--outMasked_T1_Map %s", hash_files=False,), - outMasked2=dict(argstr="--outMasked_T1weighted %s", hash_files=False,), - outSignal=dict(argstr="--outSignal_Proba %s", hash_files=False,), - outSignal2=dict(argstr="--outSignal_Mask %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inBackground=dict( + argstr="--inBackground %s", + ), + inMasking=dict( + argstr="--inMasking %s", + ), + inQuantitative=dict( + argstr="--inQuantitative %s", + extensions=None, + ), + inSecond=dict( + argstr="--inSecond %s", + extensions=None, + ), + inSkip=dict( + argstr="--inSkip %s", + ), + inT1weighted=dict( + argstr="--inT1weighted %s", + extensions=None, + ), + null=dict( + argstr="--null %s", + ), + outMasked=dict( + argstr="--outMasked_T1_Map %s", + hash_files=False, + ), + outMasked2=dict( + argstr="--outMasked_T1weighted %s", + hash_files=False, + ), + outSignal=dict( + argstr="--outSignal_Proba %s", + hash_files=False, + ), + outSignal2=dict( + argstr="--outSignal_Mask %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistIntensityMp2rageMasking.input_spec() @@ -30,10 +71,18 @@ def test_JistIntensityMp2rageMasking_inputs(): def test_JistIntensityMp2rageMasking_outputs(): output_map = dict( - outMasked=dict(extensions=None,), - outMasked2=dict(extensions=None,), - outSignal=dict(extensions=None,), - outSignal2=dict(extensions=None,), + outMasked=dict( + extensions=None, + ), + outMasked2=dict( + extensions=None, + ), + outSignal=dict( + extensions=None, + ), + outSignal2=dict( + extensions=None, + ), ) outputs = JistIntensityMp2rageMasking.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py index d8447b9773..0cc1501e4f 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py @@ -4,16 +4,41 @@ def test_JistLaminarProfileCalculator_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inIntensity=dict(argstr="--inIntensity %s", extensions=None,), - inMask=dict(argstr="--inMask %s", extensions=None,), - incomputed=dict(argstr="--incomputed %s",), - null=dict(argstr="--null %s",), - outResult=dict(argstr="--outResult %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inIntensity=dict( + argstr="--inIntensity %s", + extensions=None, + ), + inMask=dict( + argstr="--inMask %s", + extensions=None, + ), + incomputed=dict( + argstr="--incomputed %s", + ), + null=dict( + argstr="--null %s", + ), + outResult=dict( + argstr="--outResult %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistLaminarProfileCalculator.input_spec() @@ -23,7 +48,11 @@ def test_JistLaminarProfileCalculator_inputs(): def test_JistLaminarProfileCalculator_outputs(): - output_map = dict(outResult=dict(extensions=None,),) + output_map = dict( + outResult=dict( + extensions=None, + ), + ) outputs = JistLaminarProfileCalculator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py index 4a0c4f392a..758d331935 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py @@ -4,18 +4,46 @@ def test_JistLaminarProfileGeometry_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inProfile=dict(argstr="--inProfile %s", extensions=None,), - incomputed=dict(argstr="--incomputed %s",), - inoutside=dict(argstr="--inoutside %f",), - inregularization=dict(argstr="--inregularization %s",), - insmoothing=dict(argstr="--insmoothing %f",), - null=dict(argstr="--null %s",), - outResult=dict(argstr="--outResult %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inProfile=dict( + argstr="--inProfile %s", + extensions=None, + ), + incomputed=dict( + argstr="--incomputed %s", + ), + inoutside=dict( + argstr="--inoutside %f", + ), + inregularization=dict( + argstr="--inregularization %s", + ), + insmoothing=dict( + argstr="--insmoothing %f", + ), + null=dict( + argstr="--null %s", + ), + outResult=dict( + argstr="--outResult %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistLaminarProfileGeometry.input_spec() @@ -25,7 +53,11 @@ def test_JistLaminarProfileGeometry_inputs(): def test_JistLaminarProfileGeometry_outputs(): - output_map = dict(outResult=dict(extensions=None,),) + output_map = dict( + outResult=dict( + extensions=None, + ), + ) outputs = JistLaminarProfileGeometry.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py index 3ad28793f0..65841c48a9 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py @@ -4,17 +4,46 @@ def test_JistLaminarProfileSampling_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inCortex=dict(argstr="--inCortex %s", extensions=None,), - inIntensity=dict(argstr="--inIntensity %s", extensions=None,), - inProfile=dict(argstr="--inProfile %s", extensions=None,), - null=dict(argstr="--null %s",), - outProfile2=dict(argstr="--outProfile2 %s", hash_files=False,), - outProfilemapped=dict(argstr="--outProfilemapped %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inCortex=dict( + argstr="--inCortex %s", + extensions=None, + ), + inIntensity=dict( + argstr="--inIntensity %s", + extensions=None, + ), + inProfile=dict( + argstr="--inProfile %s", + extensions=None, + ), + null=dict( + argstr="--null %s", + ), + outProfile2=dict( + argstr="--outProfile2 %s", + hash_files=False, + ), + outProfilemapped=dict( + argstr="--outProfilemapped %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistLaminarProfileSampling.input_spec() @@ -25,7 +54,12 @@ def test_JistLaminarProfileSampling_inputs(): def test_JistLaminarProfileSampling_outputs(): output_map = dict( - outProfile2=dict(extensions=None,), outProfilemapped=dict(extensions=None,), + outProfile2=dict( + extensions=None, + ), + outProfilemapped=dict( + extensions=None, + ), ) outputs = JistLaminarProfileSampling.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py index 7120db3045..fed4abfca1 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py @@ -4,17 +4,45 @@ def test_JistLaminarROIAveraging_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inIntensity=dict(argstr="--inIntensity %s", extensions=None,), - inMask=dict(argstr="--inMask %s", extensions=None,), - inROI=dict(argstr="--inROI %s", extensions=None,), - inROI2=dict(argstr="--inROI2 %s",), - null=dict(argstr="--null %s",), - outROI3=dict(argstr="--outROI3 %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inIntensity=dict( + argstr="--inIntensity %s", + extensions=None, + ), + inMask=dict( + argstr="--inMask %s", + extensions=None, + ), + inROI=dict( + argstr="--inROI %s", + extensions=None, + ), + inROI2=dict( + argstr="--inROI2 %s", + ), + null=dict( + argstr="--null %s", + ), + outROI3=dict( + argstr="--outROI3 %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistLaminarROIAveraging.input_spec() @@ -24,7 +52,11 @@ def test_JistLaminarROIAveraging_inputs(): def test_JistLaminarROIAveraging_outputs(): - output_map = dict(outROI3=dict(extensions=None,),) + output_map = dict( + outROI3=dict( + extensions=None, + ), + ) outputs = JistLaminarROIAveraging.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py index 7b57e483ef..31d34ae32e 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py @@ -4,26 +4,73 @@ def test_JistLaminarVolumetricLayering_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inInner=dict(argstr="--inInner %s", extensions=None,), - inLayering=dict(argstr="--inLayering %s",), - inLayering2=dict(argstr="--inLayering2 %s",), - inMax=dict(argstr="--inMax %d",), - inMin=dict(argstr="--inMin %f",), - inNumber=dict(argstr="--inNumber %d",), - inOuter=dict(argstr="--inOuter %s", extensions=None,), - inTopology=dict(argstr="--inTopology %s",), - incurvature=dict(argstr="--incurvature %d",), - inpresmooth=dict(argstr="--inpresmooth %s",), - inratio=dict(argstr="--inratio %f",), - null=dict(argstr="--null %s",), - outContinuous=dict(argstr="--outContinuous %s", hash_files=False,), - outDiscrete=dict(argstr="--outDiscrete %s", hash_files=False,), - outLayer=dict(argstr="--outLayer %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inInner=dict( + argstr="--inInner %s", + extensions=None, + ), + inLayering=dict( + argstr="--inLayering %s", + ), + inLayering2=dict( + argstr="--inLayering2 %s", + ), + inMax=dict( + argstr="--inMax %d", + ), + inMin=dict( + argstr="--inMin %f", + ), + inNumber=dict( + argstr="--inNumber %d", + ), + inOuter=dict( + argstr="--inOuter %s", + extensions=None, + ), + inTopology=dict( + argstr="--inTopology %s", + ), + incurvature=dict( + argstr="--incurvature %d", + ), + inpresmooth=dict( + argstr="--inpresmooth %s", + ), + inratio=dict( + argstr="--inratio %f", + ), + null=dict( + argstr="--null %s", + ), + outContinuous=dict( + argstr="--outContinuous %s", + hash_files=False, + ), + outDiscrete=dict( + argstr="--outDiscrete %s", + hash_files=False, + ), + outLayer=dict( + argstr="--outLayer %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistLaminarVolumetricLayering.input_spec() @@ -34,9 +81,15 @@ def test_JistLaminarVolumetricLayering_inputs(): def test_JistLaminarVolumetricLayering_outputs(): output_map = dict( - outContinuous=dict(extensions=None,), - outDiscrete=dict(extensions=None,), - outLayer=dict(extensions=None,), + outContinuous=dict( + extensions=None, + ), + outDiscrete=dict( + extensions=None, + ), + outLayer=dict( + extensions=None, + ), ) outputs = JistLaminarVolumetricLayering.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py index 9d7ff807d6..7b9a0fc859 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py @@ -4,16 +4,41 @@ def test_MedicAlgorithmImageCalculator_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inOperation=dict(argstr="--inOperation %s",), - inVolume=dict(argstr="--inVolume %s", extensions=None,), - inVolume2=dict(argstr="--inVolume2 %s", extensions=None,), - null=dict(argstr="--null %s",), - outResult=dict(argstr="--outResult %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inOperation=dict( + argstr="--inOperation %s", + ), + inVolume=dict( + argstr="--inVolume %s", + extensions=None, + ), + inVolume2=dict( + argstr="--inVolume2 %s", + extensions=None, + ), + null=dict( + argstr="--null %s", + ), + outResult=dict( + argstr="--outResult %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = MedicAlgorithmImageCalculator.input_spec() @@ -23,7 +48,11 @@ def test_MedicAlgorithmImageCalculator_inputs(): def test_MedicAlgorithmImageCalculator_outputs(): - output_map = dict(outResult=dict(extensions=None,),) + output_map = dict( + outResult=dict( + extensions=None, + ), + ) outputs = MedicAlgorithmImageCalculator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py index f9036d1207..4fb5f2567b 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py @@ -4,42 +4,131 @@ def test_MedicAlgorithmLesionToads_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inAtlas=dict(argstr="--inAtlas %s",), - inAtlas2=dict(argstr="--inAtlas2 %s", extensions=None,), - inAtlas3=dict(argstr="--inAtlas3 %s", extensions=None,), - inAtlas4=dict(argstr="--inAtlas4 %s", extensions=None,), - inAtlas5=dict(argstr="--inAtlas5 %f",), - inAtlas6=dict(argstr="--inAtlas6 %s",), - inConnectivity=dict(argstr="--inConnectivity %s",), - inCorrect=dict(argstr="--inCorrect %s",), - inFLAIR=dict(argstr="--inFLAIR %s", extensions=None,), - inInclude=dict(argstr="--inInclude %s",), - inMaximum=dict(argstr="--inMaximum %d",), - inMaximum2=dict(argstr="--inMaximum2 %d",), - inMaximum3=dict(argstr="--inMaximum3 %d",), - inMaximum4=dict(argstr="--inMaximum4 %f",), - inMaximum5=dict(argstr="--inMaximum5 %d",), - inOutput=dict(argstr="--inOutput %s",), - inOutput2=dict(argstr="--inOutput2 %s",), - inOutput3=dict(argstr="--inOutput3 %s",), - inSmooting=dict(argstr="--inSmooting %f",), - inT1_MPRAGE=dict(argstr="--inT1_MPRAGE %s", extensions=None,), - inT1_SPGR=dict(argstr="--inT1_SPGR %s", extensions=None,), - null=dict(argstr="--null %s",), - outCortical=dict(argstr="--outCortical %s", hash_files=False,), - outFilled=dict(argstr="--outFilled %s", hash_files=False,), - outHard=dict(argstr="--outHard %s", hash_files=False,), - outHard2=dict(argstr="--outHard2 %s", hash_files=False,), - outInhomogeneity=dict(argstr="--outInhomogeneity %s", hash_files=False,), - outLesion=dict(argstr="--outLesion %s", hash_files=False,), - outMembership=dict(argstr="--outMembership %s", hash_files=False,), - outSulcal=dict(argstr="--outSulcal %s", hash_files=False,), - outWM=dict(argstr="--outWM %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inAtlas=dict( + argstr="--inAtlas %s", + ), + inAtlas2=dict( + argstr="--inAtlas2 %s", + extensions=None, + ), + inAtlas3=dict( + argstr="--inAtlas3 %s", + extensions=None, + ), + inAtlas4=dict( + argstr="--inAtlas4 %s", + extensions=None, + ), + inAtlas5=dict( + argstr="--inAtlas5 %f", + ), + inAtlas6=dict( + argstr="--inAtlas6 %s", + ), + inConnectivity=dict( + argstr="--inConnectivity %s", + ), + inCorrect=dict( + argstr="--inCorrect %s", + ), + inFLAIR=dict( + argstr="--inFLAIR %s", + extensions=None, + ), + inInclude=dict( + argstr="--inInclude %s", + ), + inMaximum=dict( + argstr="--inMaximum %d", + ), + inMaximum2=dict( + argstr="--inMaximum2 %d", + ), + inMaximum3=dict( + argstr="--inMaximum3 %d", + ), + inMaximum4=dict( + argstr="--inMaximum4 %f", + ), + inMaximum5=dict( + argstr="--inMaximum5 %d", + ), + inOutput=dict( + argstr="--inOutput %s", + ), + inOutput2=dict( + argstr="--inOutput2 %s", + ), + inOutput3=dict( + argstr="--inOutput3 %s", + ), + inSmooting=dict( + argstr="--inSmooting %f", + ), + inT1_MPRAGE=dict( + argstr="--inT1_MPRAGE %s", + extensions=None, + ), + inT1_SPGR=dict( + argstr="--inT1_SPGR %s", + extensions=None, + ), + null=dict( + argstr="--null %s", + ), + outCortical=dict( + argstr="--outCortical %s", + hash_files=False, + ), + outFilled=dict( + argstr="--outFilled %s", + hash_files=False, + ), + outHard=dict( + argstr="--outHard %s", + hash_files=False, + ), + outHard2=dict( + argstr="--outHard2 %s", + hash_files=False, + ), + outInhomogeneity=dict( + argstr="--outInhomogeneity %s", + hash_files=False, + ), + outLesion=dict( + argstr="--outLesion %s", + hash_files=False, + ), + outMembership=dict( + argstr="--outMembership %s", + hash_files=False, + ), + outSulcal=dict( + argstr="--outSulcal %s", + hash_files=False, + ), + outWM=dict( + argstr="--outWM %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = MedicAlgorithmLesionToads.input_spec() @@ -50,15 +139,33 @@ def test_MedicAlgorithmLesionToads_inputs(): def test_MedicAlgorithmLesionToads_outputs(): output_map = dict( - outCortical=dict(extensions=None,), - outFilled=dict(extensions=None,), - outHard=dict(extensions=None,), - outHard2=dict(extensions=None,), - outInhomogeneity=dict(extensions=None,), - outLesion=dict(extensions=None,), - outMembership=dict(extensions=None,), - outSulcal=dict(extensions=None,), - outWM=dict(extensions=None,), + outCortical=dict( + extensions=None, + ), + outFilled=dict( + extensions=None, + ), + outHard=dict( + extensions=None, + ), + outHard2=dict( + extensions=None, + ), + outInhomogeneity=dict( + extensions=None, + ), + outLesion=dict( + extensions=None, + ), + outMembership=dict( + extensions=None, + ), + outSulcal=dict( + extensions=None, + ), + outWM=dict( + extensions=None, + ), ) outputs = MedicAlgorithmLesionToads.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py index 0c941fdbc7..49c307f27f 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py @@ -4,22 +4,59 @@ def test_MedicAlgorithmMipavReorient_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inInterpolation=dict(argstr="--inInterpolation %s",), - inNew=dict(argstr="--inNew %s",), - inResolution=dict(argstr="--inResolution %s",), - inSource=dict(argstr="--inSource %s", sep=";",), - inTemplate=dict(argstr="--inTemplate %s", extensions=None,), - inUser=dict(argstr="--inUser %s",), - inUser2=dict(argstr="--inUser2 %s",), - inUser3=dict(argstr="--inUser3 %s",), - inUser4=dict(argstr="--inUser4 %s",), - null=dict(argstr="--null %s",), - outReoriented=dict(argstr="--outReoriented %s", sep=";",), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inInterpolation=dict( + argstr="--inInterpolation %s", + ), + inNew=dict( + argstr="--inNew %s", + ), + inResolution=dict( + argstr="--inResolution %s", + ), + inSource=dict( + argstr="--inSource %s", + sep=";", + ), + inTemplate=dict( + argstr="--inTemplate %s", + extensions=None, + ), + inUser=dict( + argstr="--inUser %s", + ), + inUser2=dict( + argstr="--inUser2 %s", + ), + inUser3=dict( + argstr="--inUser3 %s", + ), + inUser4=dict( + argstr="--inUser4 %s", + ), + null=dict( + argstr="--null %s", + ), + outReoriented=dict( + argstr="--outReoriented %s", + sep=";", + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = MedicAlgorithmMipavReorient.input_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py index d9a8a25023..bf895247a6 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py @@ -4,23 +4,62 @@ def test_MedicAlgorithmN3_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inAutomatic=dict(argstr="--inAutomatic %s",), - inEnd=dict(argstr="--inEnd %f",), - inField=dict(argstr="--inField %f",), - inInput=dict(argstr="--inInput %s", extensions=None,), - inKernel=dict(argstr="--inKernel %f",), - inMaximum=dict(argstr="--inMaximum %d",), - inSignal=dict(argstr="--inSignal %f",), - inSubsample=dict(argstr="--inSubsample %f",), - inWeiner=dict(argstr="--inWeiner %f",), - null=dict(argstr="--null %s",), - outInhomogeneity=dict(argstr="--outInhomogeneity %s", hash_files=False,), - outInhomogeneity2=dict(argstr="--outInhomogeneity2 %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inAutomatic=dict( + argstr="--inAutomatic %s", + ), + inEnd=dict( + argstr="--inEnd %f", + ), + inField=dict( + argstr="--inField %f", + ), + inInput=dict( + argstr="--inInput %s", + extensions=None, + ), + inKernel=dict( + argstr="--inKernel %f", + ), + inMaximum=dict( + argstr="--inMaximum %d", + ), + inSignal=dict( + argstr="--inSignal %f", + ), + inSubsample=dict( + argstr="--inSubsample %f", + ), + inWeiner=dict( + argstr="--inWeiner %f", + ), + null=dict( + argstr="--null %s", + ), + outInhomogeneity=dict( + argstr="--outInhomogeneity %s", + hash_files=False, + ), + outInhomogeneity2=dict( + argstr="--outInhomogeneity2 %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = MedicAlgorithmN3.input_spec() @@ -31,8 +70,12 @@ def test_MedicAlgorithmN3_inputs(): def test_MedicAlgorithmN3_outputs(): output_map = dict( - outInhomogeneity=dict(extensions=None,), - outInhomogeneity2=dict(extensions=None,), + outInhomogeneity=dict( + extensions=None, + ), + outInhomogeneity2=dict( + extensions=None, + ), ) outputs = MedicAlgorithmN3.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py index 8e51a9c7bc..b62def8a4f 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py @@ -4,55 +4,166 @@ def test_MedicAlgorithmSPECTRE2010_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inApply=dict(argstr="--inApply %s",), - inAtlas=dict(argstr="--inAtlas %s", extensions=None,), - inBackground=dict(argstr="--inBackground %f",), - inCoarse=dict(argstr="--inCoarse %f",), - inCost=dict(argstr="--inCost %s",), - inDegrees=dict(argstr="--inDegrees %s",), - inFind=dict(argstr="--inFind %s",), - inFine=dict(argstr="--inFine %f",), - inImage=dict(argstr="--inImage %s",), - inInhomogeneity=dict(argstr="--inInhomogeneity %s",), - inInitial=dict(argstr="--inInitial %d",), - inInitial2=dict(argstr="--inInitial2 %f",), - inInput=dict(argstr="--inInput %s", extensions=None,), - inMMC=dict(argstr="--inMMC %d",), - inMMC2=dict(argstr="--inMMC2 %d",), - inMaximum=dict(argstr="--inMaximum %f",), - inMinimum=dict(argstr="--inMinimum %f",), - inMinimum2=dict(argstr="--inMinimum2 %f",), - inMultiple=dict(argstr="--inMultiple %d",), - inMultithreading=dict(argstr="--inMultithreading %s",), - inNumber=dict(argstr="--inNumber %d",), - inNumber2=dict(argstr="--inNumber2 %d",), - inOutput=dict(argstr="--inOutput %s",), - inOutput2=dict(argstr="--inOutput2 %s",), - inOutput3=dict(argstr="--inOutput3 %s",), - inOutput4=dict(argstr="--inOutput4 %s",), - inOutput5=dict(argstr="--inOutput5 %s",), - inRegistration=dict(argstr="--inRegistration %s",), - inResample=dict(argstr="--inResample %s",), - inRun=dict(argstr="--inRun %s",), - inSkip=dict(argstr="--inSkip %s",), - inSmoothing=dict(argstr="--inSmoothing %f",), - inSubsample=dict(argstr="--inSubsample %s",), - inUse=dict(argstr="--inUse %s",), - null=dict(argstr="--null %s",), - outFANTASM=dict(argstr="--outFANTASM %s", hash_files=False,), - outMask=dict(argstr="--outMask %s", hash_files=False,), - outMidsagittal=dict(argstr="--outMidsagittal %s", hash_files=False,), - outOriginal=dict(argstr="--outOriginal %s", hash_files=False,), - outPrior=dict(argstr="--outPrior %s", hash_files=False,), - outSegmentation=dict(argstr="--outSegmentation %s", hash_files=False,), - outSplitHalves=dict(argstr="--outSplitHalves %s", hash_files=False,), - outStripped=dict(argstr="--outStripped %s", hash_files=False,), - outd0=dict(argstr="--outd0 %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inApply=dict( + argstr="--inApply %s", + ), + inAtlas=dict( + argstr="--inAtlas %s", + extensions=None, + ), + inBackground=dict( + argstr="--inBackground %f", + ), + inCoarse=dict( + argstr="--inCoarse %f", + ), + inCost=dict( + argstr="--inCost %s", + ), + inDegrees=dict( + argstr="--inDegrees %s", + ), + inFind=dict( + argstr="--inFind %s", + ), + inFine=dict( + argstr="--inFine %f", + ), + inImage=dict( + argstr="--inImage %s", + ), + inInhomogeneity=dict( + argstr="--inInhomogeneity %s", + ), + inInitial=dict( + argstr="--inInitial %d", + ), + inInitial2=dict( + argstr="--inInitial2 %f", + ), + inInput=dict( + argstr="--inInput %s", + extensions=None, + ), + inMMC=dict( + argstr="--inMMC %d", + ), + inMMC2=dict( + argstr="--inMMC2 %d", + ), + inMaximum=dict( + argstr="--inMaximum %f", + ), + inMinimum=dict( + argstr="--inMinimum %f", + ), + inMinimum2=dict( + argstr="--inMinimum2 %f", + ), + inMultiple=dict( + argstr="--inMultiple %d", + ), + inMultithreading=dict( + argstr="--inMultithreading %s", + ), + inNumber=dict( + argstr="--inNumber %d", + ), + inNumber2=dict( + argstr="--inNumber2 %d", + ), + inOutput=dict( + argstr="--inOutput %s", + ), + inOutput2=dict( + argstr="--inOutput2 %s", + ), + inOutput3=dict( + argstr="--inOutput3 %s", + ), + inOutput4=dict( + argstr="--inOutput4 %s", + ), + inOutput5=dict( + argstr="--inOutput5 %s", + ), + inRegistration=dict( + argstr="--inRegistration %s", + ), + inResample=dict( + argstr="--inResample %s", + ), + inRun=dict( + argstr="--inRun %s", + ), + inSkip=dict( + argstr="--inSkip %s", + ), + inSmoothing=dict( + argstr="--inSmoothing %f", + ), + inSubsample=dict( + argstr="--inSubsample %s", + ), + inUse=dict( + argstr="--inUse %s", + ), + null=dict( + argstr="--null %s", + ), + outFANTASM=dict( + argstr="--outFANTASM %s", + hash_files=False, + ), + outMask=dict( + argstr="--outMask %s", + hash_files=False, + ), + outMidsagittal=dict( + argstr="--outMidsagittal %s", + hash_files=False, + ), + outOriginal=dict( + argstr="--outOriginal %s", + hash_files=False, + ), + outPrior=dict( + argstr="--outPrior %s", + hash_files=False, + ), + outSegmentation=dict( + argstr="--outSegmentation %s", + hash_files=False, + ), + outSplitHalves=dict( + argstr="--outSplitHalves %s", + hash_files=False, + ), + outStripped=dict( + argstr="--outStripped %s", + hash_files=False, + ), + outd0=dict( + argstr="--outd0 %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = MedicAlgorithmSPECTRE2010.input_spec() @@ -63,15 +174,33 @@ def test_MedicAlgorithmSPECTRE2010_inputs(): def test_MedicAlgorithmSPECTRE2010_outputs(): output_map = dict( - outFANTASM=dict(extensions=None,), - outMask=dict(extensions=None,), - outMidsagittal=dict(extensions=None,), - outOriginal=dict(extensions=None,), - outPrior=dict(extensions=None,), - outSegmentation=dict(extensions=None,), - outSplitHalves=dict(extensions=None,), - outStripped=dict(extensions=None,), - outd0=dict(extensions=None,), + outFANTASM=dict( + extensions=None, + ), + outMask=dict( + extensions=None, + ), + outMidsagittal=dict( + extensions=None, + ), + outOriginal=dict( + extensions=None, + ), + outPrior=dict( + extensions=None, + ), + outSegmentation=dict( + extensions=None, + ), + outSplitHalves=dict( + extensions=None, + ), + outStripped=dict( + extensions=None, + ), + outd0=dict( + extensions=None, + ), ) outputs = MedicAlgorithmSPECTRE2010.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py index ca0179d231..bdd6e4e1b6 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py @@ -4,17 +4,43 @@ def test_MedicAlgorithmThresholdToBinaryMask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inLabel=dict(argstr="--inLabel %s", sep=";",), - inMaximum=dict(argstr="--inMaximum %f",), - inMinimum=dict(argstr="--inMinimum %f",), - inUse=dict(argstr="--inUse %s",), - null=dict(argstr="--null %s",), - outBinary=dict(argstr="--outBinary %s", sep=";",), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inLabel=dict( + argstr="--inLabel %s", + sep=";", + ), + inMaximum=dict( + argstr="--inMaximum %f", + ), + inMinimum=dict( + argstr="--inMinimum %f", + ), + inUse=dict( + argstr="--inUse %s", + ), + null=dict( + argstr="--null %s", + ), + outBinary=dict( + argstr="--outBinary %s", + sep=";", + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = MedicAlgorithmThresholdToBinaryMask.input_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_RandomVol.py b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py index 1dd7520626..4929f54d6a 100644 --- a/nipype/interfaces/mipav/tests/test_auto_RandomVol.py +++ b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py @@ -4,22 +4,57 @@ def test_RandomVol_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inField=dict(argstr="--inField %s",), - inLambda=dict(argstr="--inLambda %f",), - inMaximum=dict(argstr="--inMaximum %d",), - inMinimum=dict(argstr="--inMinimum %d",), - inSize=dict(argstr="--inSize %d",), - inSize2=dict(argstr="--inSize2 %d",), - inSize3=dict(argstr="--inSize3 %d",), - inSize4=dict(argstr="--inSize4 %d",), - inStandard=dict(argstr="--inStandard %d",), - null=dict(argstr="--null %s",), - outRand1=dict(argstr="--outRand1 %s", hash_files=False,), - xDefaultMem=dict(argstr="-xDefaultMem %d",), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True,), - xPrefExt=dict(argstr="--xPrefExt %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inField=dict( + argstr="--inField %s", + ), + inLambda=dict( + argstr="--inLambda %f", + ), + inMaximum=dict( + argstr="--inMaximum %d", + ), + inMinimum=dict( + argstr="--inMinimum %d", + ), + inSize=dict( + argstr="--inSize %d", + ), + inSize2=dict( + argstr="--inSize2 %d", + ), + inSize3=dict( + argstr="--inSize3 %d", + ), + inSize4=dict( + argstr="--inSize4 %d", + ), + inStandard=dict( + argstr="--inStandard %d", + ), + null=dict( + argstr="--null %s", + ), + outRand1=dict( + argstr="--outRand1 %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = RandomVol.input_spec() @@ -29,7 +64,11 @@ def test_RandomVol_inputs(): def test_RandomVol_outputs(): - output_map = dict(outRand1=dict(extensions=None,),) + output_map = dict( + outRand1=dict( + extensions=None, + ), + ) outputs = RandomVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py index 30e6a1de7e..57f1b40e4d 100644 --- a/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py +++ b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py @@ -4,13 +4,32 @@ def test_WatershedBEM_inputs(): input_map = dict( - args=dict(argstr="%s",), - atlas_mode=dict(argstr="--atlas",), - environ=dict(nohash=True, usedefault=True,), - overwrite=dict(argstr="--overwrite", usedefault=True,), - subject_id=dict(argstr="--subject %s", mandatory=True,), - subjects_dir=dict(mandatory=True, usedefault=True,), - volume=dict(argstr="--volume %s", usedefault=True,), + args=dict( + argstr="%s", + ), + atlas_mode=dict( + argstr="--atlas", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + overwrite=dict( + argstr="--overwrite", + usedefault=True, + ), + subject_id=dict( + argstr="--subject %s", + mandatory=True, + ), + subjects_dir=dict( + mandatory=True, + usedefault=True, + ), + volume=dict( + argstr="--volume %s", + usedefault=True, + ), ) inputs = WatershedBEM.input_spec() @@ -21,13 +40,32 @@ def test_WatershedBEM_inputs(): def test_WatershedBEM_outputs(): output_map = dict( - brain_surface=dict(extensions=None, loc="bem/watershed",), - cor_files=dict(altkey="COR", loc="bem/watershed/ws",), - fif_file=dict(altkey="fif", extensions=None, loc="bem",), - inner_skull_surface=dict(extensions=None, loc="bem/watershed",), + brain_surface=dict( + extensions=None, + loc="bem/watershed", + ), + cor_files=dict( + altkey="COR", + loc="bem/watershed/ws", + ), + fif_file=dict( + altkey="fif", + extensions=None, + loc="bem", + ), + inner_skull_surface=dict( + extensions=None, + loc="bem/watershed", + ), mesh_files=dict(), - outer_skin_surface=dict(extensions=None, loc="bem/watershed",), - outer_skull_surface=dict(extensions=None, loc="bem/watershed",), + outer_skin_surface=dict( + extensions=None, + loc="bem/watershed", + ), + outer_skull_surface=dict( + extensions=None, + loc="bem/watershed", + ), ) outputs = WatershedBEM.output_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py index cee549fae9..ad93f35b9a 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py @@ -4,21 +4,70 @@ def test_ConstrainedSphericalDeconvolution_inputs(): input_map = dict( - args=dict(argstr="%s",), - debug=dict(argstr="-debug",), - directions_file=dict(argstr="-directions %s", extensions=None, position=-2,), - encoding_file=dict(argstr="-grad %s", extensions=None, position=1,), - environ=dict(nohash=True, usedefault=True,), - filter_file=dict(argstr="-filter %s", extensions=None, position=-2,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - iterations=dict(argstr="-niter %s",), - lambda_value=dict(argstr="-lambda %s",), - mask_image=dict(argstr="-mask %s", extensions=None, position=2,), - maximum_harmonic_order=dict(argstr="-lmax %s",), - normalise=dict(argstr="-normalise", position=3,), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - response_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - threshold_value=dict(argstr="-threshold %s",), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + ), + directions_file=dict( + argstr="-directions %s", + extensions=None, + position=-2, + ), + encoding_file=dict( + argstr="-grad %s", + extensions=None, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + filter_file=dict( + argstr="-filter %s", + extensions=None, + position=-2, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + iterations=dict( + argstr="-niter %s", + ), + lambda_value=dict( + argstr="-lambda %s", + ), + mask_image=dict( + argstr="-mask %s", + extensions=None, + position=2, + ), + maximum_harmonic_order=dict( + argstr="-lmax %s", + ), + normalise=dict( + argstr="-normalise", + position=3, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + response_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + threshold_value=dict( + argstr="-threshold %s", + ), ) inputs = ConstrainedSphericalDeconvolution.input_spec() @@ -28,7 +77,11 @@ def test_ConstrainedSphericalDeconvolution_inputs(): def test_ConstrainedSphericalDeconvolution_outputs(): - output_map = dict(spherical_harmonics_image=dict(extensions=None,),) + output_map = dict( + spherical_harmonics_image=dict( + extensions=None, + ), + ) outputs = ConstrainedSphericalDeconvolution.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py index 7bd1399f30..97b5885baf 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py @@ -4,15 +4,38 @@ def test_DWI2SphericalHarmonicsImage_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), encoding_file=dict( - argstr="-grad %s", extensions=None, mandatory=True, position=1, + argstr="-grad %s", + extensions=None, + mandatory=True, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + maximum_harmonic_order=dict( + argstr="-lmax %s", + ), + normalise=dict( + argstr="-normalise", + position=3, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, ), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - maximum_harmonic_order=dict(argstr="-lmax %s",), - normalise=dict(argstr="-normalise", position=3,), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), ) inputs = DWI2SphericalHarmonicsImage.input_spec() @@ -22,7 +45,11 @@ def test_DWI2SphericalHarmonicsImage_inputs(): def test_DWI2SphericalHarmonicsImage_outputs(): - output_map = dict(spherical_harmonics_image=dict(extensions=None,),) + output_map = dict( + spherical_harmonics_image=dict( + extensions=None, + ), + ) outputs = DWI2SphericalHarmonicsImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py index 476f68e9ef..25bae449e3 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py @@ -4,13 +4,37 @@ def test_DWI2Tensor_inputs(): input_map = dict( - args=dict(argstr="%s",), - debug=dict(argstr="-debug", position=1,), - encoding_file=dict(argstr="-grad %s", extensions=None, position=2,), - environ=dict(nohash=True, usedefault=True,), - ignore_slice_by_volume=dict(argstr="-ignoreslices %s", position=2, sep=" ",), - ignore_volumes=dict(argstr="-ignorevolumes %s", position=2, sep=" ",), - in_file=dict(argstr="%s", mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + encoding_file=dict( + argstr="-grad %s", + extensions=None, + position=2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignore_slice_by_volume=dict( + argstr="-ignoreslices %s", + position=2, + sep=" ", + ), + ignore_volumes=dict( + argstr="-ignorevolumes %s", + position=2, + sep=" ", + ), + in_file=dict( + argstr="%s", + mandatory=True, + position=-2, + ), out_filename=dict( argstr="%s", extensions=None, @@ -19,7 +43,10 @@ def test_DWI2Tensor_inputs(): output_name="tensor", position=-1, ), - quiet=dict(argstr="-quiet", position=1,), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = DWI2Tensor.input_spec() @@ -29,7 +56,11 @@ def test_DWI2Tensor_inputs(): def test_DWI2Tensor_outputs(): - output_map = dict(tensor=dict(extensions=None,),) + output_map = dict( + tensor=dict( + extensions=None, + ), + ) outputs = DWI2Tensor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py index 66122fcfdb..c6fe4f586e 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py @@ -4,13 +4,27 @@ def test_DiffusionTensorStreamlineTrack_inputs(): input_map = dict( - args=dict(argstr="%s",), - cutoff_value=dict(argstr="-cutoff %s", units="NA",), - desired_number_of_tracks=dict(argstr="-number %d",), - do_not_precompute=dict(argstr="-noprecomputed",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + cutoff_value=dict( + argstr="-cutoff %s", + units="NA", + ), + desired_number_of_tracks=dict( + argstr="-number %d", + ), + do_not_precompute=dict( + argstr="-noprecomputed", + ), + environ=dict( + nohash=True, + usedefault=True, + ), exclude_file=dict( - argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"], + argstr="-exclude %s", + extensions=None, + xor=["exclude_file", "exclude_spec"], ), exclude_spec=dict( argstr="-exclude %s", @@ -20,11 +34,21 @@ def test_DiffusionTensorStreamlineTrack_inputs(): xor=["exclude_file", "exclude_spec"], ), gradient_encoding_file=dict( - argstr="-grad %s", extensions=None, mandatory=True, position=-2, + argstr="-grad %s", + extensions=None, + mandatory=True, + position=-2, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, ), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), include_file=dict( - argstr="-include %s", extensions=None, xor=["include_file", "include_spec"], + argstr="-include %s", + extensions=None, + xor=["include_file", "include_spec"], ), include_spec=dict( argstr="-include %s", @@ -33,11 +57,23 @@ def test_DiffusionTensorStreamlineTrack_inputs(): units="mm", xor=["include_file", "include_spec"], ), - initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA",), - initial_direction=dict(argstr="-initdirection %s", units="voxels",), - inputmodel=dict(argstr="%s", position=-3, usedefault=True,), + initial_cutoff_value=dict( + argstr="-initcutoff %s", + units="NA", + ), + initial_direction=dict( + argstr="-initdirection %s", + units="voxels", + ), + inputmodel=dict( + argstr="%s", + position=-3, + usedefault=True, + ), mask_file=dict( - argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"], + argstr="-mask %s", + extensions=None, + xor=["mask_file", "mask_spec"], ), mask_spec=dict( argstr="-mask %s", @@ -46,11 +82,24 @@ def test_DiffusionTensorStreamlineTrack_inputs(): units="mm", xor=["mask_file", "mask_spec"], ), - maximum_number_of_tracks=dict(argstr="-maxnum %d",), - maximum_tract_length=dict(argstr="-length %s", units="mm",), - minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm",), - minimum_tract_length=dict(argstr="-minlength %s", units="mm",), - no_mask_interpolation=dict(argstr="-nomaskinterp",), + maximum_number_of_tracks=dict( + argstr="-maxnum %d", + ), + maximum_tract_length=dict( + argstr="-length %s", + units="mm", + ), + minimum_radius_of_curvature=dict( + argstr="-curvature %s", + units="mm", + ), + minimum_tract_length=dict( + argstr="-minlength %s", + units="mm", + ), + no_mask_interpolation=dict( + argstr="-nomaskinterp", + ), out_file=dict( argstr="%s", extensions=None, @@ -60,7 +109,9 @@ def test_DiffusionTensorStreamlineTrack_inputs(): position=-1, ), seed_file=dict( - argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"], + argstr="-seed %s", + extensions=None, + xor=["seed_file", "seed_spec"], ), seed_spec=dict( argstr="-seed %s", @@ -69,9 +120,16 @@ def test_DiffusionTensorStreamlineTrack_inputs(): units="mm", xor=["seed_file", "seed_spec"], ), - step_size=dict(argstr="-step %s", units="mm",), - stop=dict(argstr="-stop",), - unidirectional=dict(argstr="-unidirectional",), + step_size=dict( + argstr="-step %s", + units="mm", + ), + stop=dict( + argstr="-stop", + ), + unidirectional=dict( + argstr="-unidirectional", + ), ) inputs = DiffusionTensorStreamlineTrack.input_spec() @@ -81,7 +139,11 @@ def test_DiffusionTensorStreamlineTrack_inputs(): def test_DiffusionTensorStreamlineTrack_outputs(): - output_map = dict(tracked=dict(extensions=None,),) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = DiffusionTensorStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py index 66c75db7dc..4685b0e9d6 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py @@ -4,12 +4,28 @@ def test_Directions2Amplitude_inputs(): input_map = dict( - args=dict(argstr="%s",), - display_debug=dict(argstr="-debug",), - display_info=dict(argstr="-info",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - num_peaks=dict(argstr="-num %s",), + args=dict( + argstr="%s", + ), + display_debug=dict( + argstr="-debug", + ), + display_info=dict( + argstr="-info", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + num_peaks=dict( + argstr="-num %s", + ), out_file=dict( argstr="%s", extensions=None, @@ -19,9 +35,17 @@ def test_Directions2Amplitude_inputs(): name_template="%s_amplitudes.mif", position=-1, ), - peak_directions=dict(argstr="-direction %s", sep=" ",), - peaks_image=dict(argstr="-peaks %s", extensions=None,), - quiet_display=dict(argstr="-quiet",), + peak_directions=dict( + argstr="-direction %s", + sep=" ", + ), + peaks_image=dict( + argstr="-peaks %s", + extensions=None, + ), + quiet_display=dict( + argstr="-quiet", + ), ) inputs = Directions2Amplitude.input_spec() @@ -31,7 +55,11 @@ def test_Directions2Amplitude_inputs(): def test_Directions2Amplitude_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Directions2Amplitude.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Erode.py b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py index cb038461ad..86a6a3d0b1 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Erode.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py @@ -4,14 +4,40 @@ def test_Erode_inputs(): input_map = dict( - args=dict(argstr="%s",), - debug=dict(argstr="-debug", position=1,), - dilate=dict(argstr="-dilate", position=1,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - number_of_passes=dict(argstr="-npass %s",), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - quiet=dict(argstr="-quiet", position=1,), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + dilate=dict( + argstr="-dilate", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + number_of_passes=dict( + argstr="-npass %s", + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = Erode.input_spec() @@ -21,7 +47,11 @@ def test_Erode_inputs(): def test_Erode_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Erode.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py index 4040fe9479..e93a7744fc 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py @@ -4,18 +4,49 @@ def test_EstimateResponseForSH_inputs(): input_map = dict( - args=dict(argstr="%s",), - debug=dict(argstr="-debug",), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + ), encoding_file=dict( - argstr="-grad %s", extensions=None, mandatory=True, position=1, - ), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - mask_image=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - maximum_harmonic_order=dict(argstr="-lmax %s",), - normalise=dict(argstr="-normalise",), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - quiet=dict(argstr="-quiet",), + argstr="-grad %s", + extensions=None, + mandatory=True, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + mask_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + maximum_harmonic_order=dict( + argstr="-lmax %s", + ), + normalise=dict( + argstr="-normalise", + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + ), ) inputs = EstimateResponseForSH.input_spec() @@ -25,7 +56,11 @@ def test_EstimateResponseForSH_inputs(): def test_EstimateResponseForSH_outputs(): - output_map = dict(response=dict(extensions=None,),) + output_map = dict( + response=dict( + extensions=None, + ), + ) outputs = EstimateResponseForSH.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py b/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py index 4772abcbc4..3e11a7db45 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py @@ -4,12 +4,27 @@ def test_FSL2MRTrix_inputs(): input_map = dict( - bval_file=dict(extensions=None, mandatory=True,), - bvec_file=dict(extensions=None, mandatory=True,), - invert_x=dict(usedefault=True,), - invert_y=dict(usedefault=True,), - invert_z=dict(usedefault=True,), - out_encoding_file=dict(extensions=None, genfile=True,), + bval_file=dict( + extensions=None, + mandatory=True, + ), + bvec_file=dict( + extensions=None, + mandatory=True, + ), + invert_x=dict( + usedefault=True, + ), + invert_y=dict( + usedefault=True, + ), + invert_z=dict( + usedefault=True, + ), + out_encoding_file=dict( + extensions=None, + genfile=True, + ), ) inputs = FSL2MRTrix.input_spec() @@ -19,7 +34,11 @@ def test_FSL2MRTrix_inputs(): def test_FSL2MRTrix_outputs(): - output_map = dict(encoding_file=dict(extensions=None,),) + output_map = dict( + encoding_file=dict( + extensions=None, + ), + ) outputs = FSL2MRTrix.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py index 0ce949eb41..c7b9c19d24 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py @@ -4,11 +4,21 @@ def test_FilterTracks_inputs(): input_map = dict( - args=dict(argstr="%s",), - debug=dict(argstr="-debug", position=1,), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), exclude_file=dict( - argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"], + argstr="-exclude %s", + extensions=None, + xor=["exclude_file", "exclude_spec"], ), exclude_spec=dict( argstr="-exclude %s", @@ -17,9 +27,16 @@ def test_FilterTracks_inputs(): units="mm", xor=["exclude_file", "exclude_spec"], ), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), include_file=dict( - argstr="-include %s", extensions=None, xor=["include_file", "include_spec"], + argstr="-include %s", + extensions=None, + xor=["include_file", "include_spec"], ), include_spec=dict( argstr="-include %s", @@ -28,9 +45,16 @@ def test_FilterTracks_inputs(): units="mm", xor=["include_file", "include_spec"], ), - invert=dict(argstr="-invert",), - minimum_tract_length=dict(argstr="-minlength %s", units="mm",), - no_mask_interpolation=dict(argstr="-nomaskinterp",), + invert=dict( + argstr="-invert", + ), + minimum_tract_length=dict( + argstr="-minlength %s", + units="mm", + ), + no_mask_interpolation=dict( + argstr="-nomaskinterp", + ), out_file=dict( argstr="%s", extensions=None, @@ -39,7 +63,10 @@ def test_FilterTracks_inputs(): name_template="%s_filt", position=-1, ), - quiet=dict(argstr="-quiet", position=1,), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = FilterTracks.input_spec() @@ -49,7 +76,11 @@ def test_FilterTracks_inputs(): def test_FilterTracks_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = FilterTracks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py index c8e6a9cb3d..62132f795a 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py @@ -4,15 +4,34 @@ def test_FindShPeaks_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), directions_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-2, + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + display_debug=dict( + argstr="-debug", + ), + display_info=dict( + argstr="-info", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + num_peaks=dict( + argstr="-num %s", ), - display_debug=dict(argstr="-debug",), - display_info=dict(argstr="-info",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - num_peaks=dict(argstr="-num %s",), out_file=dict( argstr="%s", extensions=None, @@ -22,10 +41,20 @@ def test_FindShPeaks_inputs(): name_template="%s_peak_dirs.mif", position=-1, ), - peak_directions=dict(argstr="-direction %s", sep=" ",), - peak_threshold=dict(argstr="-threshold %s",), - peaks_image=dict(argstr="-peaks %s", extensions=None,), - quiet_display=dict(argstr="-quiet",), + peak_directions=dict( + argstr="-direction %s", + sep=" ", + ), + peak_threshold=dict( + argstr="-threshold %s", + ), + peaks_image=dict( + argstr="-peaks %s", + extensions=None, + ), + quiet_display=dict( + argstr="-quiet", + ), ) inputs = FindShPeaks.input_spec() @@ -35,7 +64,11 @@ def test_FindShPeaks_inputs(): def test_FindShPeaks_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = FindShPeaks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py index f980f9386b..63b66ab1e6 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py @@ -4,12 +4,27 @@ def test_GenerateDirections_inputs(): input_map = dict( - args=dict(argstr="%s",), - display_debug=dict(argstr="-debug",), - display_info=dict(argstr="-info",), - environ=dict(nohash=True, usedefault=True,), - niter=dict(argstr="-niter %s",), - num_dirs=dict(argstr="%s", mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + display_debug=dict( + argstr="-debug", + ), + display_info=dict( + argstr="-info", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + niter=dict( + argstr="-niter %s", + ), + num_dirs=dict( + argstr="%s", + mandatory=True, + position=-2, + ), out_file=dict( argstr="%s", extensions=None, @@ -18,8 +33,12 @@ def test_GenerateDirections_inputs(): name_template="directions_%d.txt", position=-1, ), - power=dict(argstr="-power %s",), - quiet_display=dict(argstr="-quiet",), + power=dict( + argstr="-power %s", + ), + quiet_display=dict( + argstr="-quiet", + ), ) inputs = GenerateDirections.input_spec() @@ -29,7 +48,11 @@ def test_GenerateDirections_inputs(): def test_GenerateDirections_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = GenerateDirections.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py index dc58ac51d2..09b893f105 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py @@ -4,16 +4,39 @@ def test_GenerateWhiteMatterMask_inputs(): input_map = dict( - args=dict(argstr="%s",), - binary_mask=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + binary_mask=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), encoding_file=dict( - argstr="-grad %s", extensions=None, mandatory=True, position=1, + argstr="-grad %s", + extensions=None, + mandatory=True, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + noise_level_margin=dict( + argstr="-margin %s", ), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - noise_level_margin=dict(argstr="-margin %s",), out_WMProb_filename=dict( - argstr="%s", extensions=None, genfile=True, position=-1, + argstr="%s", + extensions=None, + genfile=True, + position=-1, ), ) inputs = GenerateWhiteMatterMask.input_spec() @@ -24,7 +47,11 @@ def test_GenerateWhiteMatterMask_inputs(): def test_GenerateWhiteMatterMask_outputs(): - output_map = dict(WMprobabilitymap=dict(extensions=None,),) + output_map = dict( + WMprobabilitymap=dict( + extensions=None, + ), + ) outputs = GenerateWhiteMatterMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py index 792beb6aa0..7e819a66da 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py @@ -4,20 +4,69 @@ def test_MRConvert_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - extension=dict(position=2, usedefault=True,), - extract_at_axis=dict(argstr="-coord %s", position=1,), - extract_at_coordinate=dict(argstr="%s", position=2, sep=",",), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - layout=dict(argstr="-output %s", position=2,), - offset_bias=dict(argstr="-scale %d", position=3, units="mm",), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - output_datatype=dict(argstr="-output %s", position=2,), - prs=dict(argstr="-prs", position=3,), - replace_NaN_with_zero=dict(argstr="-zero", position=3,), - resample=dict(argstr="-scale %d", position=3, units="mm",), - voxel_dims=dict(argstr="-vox %s", position=3, sep=",",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + extension=dict( + position=2, + usedefault=True, + ), + extract_at_axis=dict( + argstr="-coord %s", + position=1, + ), + extract_at_coordinate=dict( + argstr="%s", + position=2, + sep=",", + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + layout=dict( + argstr="-output %s", + position=2, + ), + offset_bias=dict( + argstr="-scale %d", + position=3, + units="mm", + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + output_datatype=dict( + argstr="-output %s", + position=2, + ), + prs=dict( + argstr="-prs", + position=3, + ), + replace_NaN_with_zero=dict( + argstr="-zero", + position=3, + ), + resample=dict( + argstr="-scale %d", + position=3, + units="mm", + ), + voxel_dims=dict( + argstr="-vox %s", + position=3, + sep=",", + ), ) inputs = MRConvert.input_spec() @@ -27,7 +76,11 @@ def test_MRConvert_inputs(): def test_MRConvert_outputs(): - output_map = dict(converted=dict(extensions=None,),) + output_map = dict( + converted=dict( + extensions=None, + ), + ) outputs = MRConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py index 5525ef1130..daa1231f06 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py @@ -4,12 +4,32 @@ def test_MRMultiply_inputs(): input_map = dict( - args=dict(argstr="%s",), - debug=dict(argstr="-debug", position=1,), - environ=dict(nohash=True, usedefault=True,), - in_files=dict(argstr="%s", mandatory=True, position=-2,), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - quiet=dict(argstr="-quiet", position=1,), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = MRMultiply.input_spec() @@ -19,7 +39,11 @@ def test_MRMultiply_inputs(): def test_MRMultiply_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRMultiply.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py index 60e0f452ac..c1e91da1c1 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py @@ -4,18 +4,59 @@ def test_MRTransform_inputs(): input_map = dict( - args=dict(argstr="%s",), - debug=dict(argstr="-debug", position=1,), - environ=dict(nohash=True, usedefault=True,), - flip_x=dict(argstr="-flipx", position=1,), - in_files=dict(argstr="%s", mandatory=True, position=-2,), - invert=dict(argstr="-inverse", position=1,), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - quiet=dict(argstr="-quiet", position=1,), - reference_image=dict(argstr="-reference %s", extensions=None, position=1,), - replace_transform=dict(argstr="-replace", position=1,), - template_image=dict(argstr="-template %s", extensions=None, position=1,), - transformation_file=dict(argstr="-transform %s", extensions=None, position=1,), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip_x=dict( + argstr="-flipx", + position=1, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + invert=dict( + argstr="-inverse", + position=1, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), + reference_image=dict( + argstr="-reference %s", + extensions=None, + position=1, + ), + replace_transform=dict( + argstr="-replace", + position=1, + ), + template_image=dict( + argstr="-template %s", + extensions=None, + position=1, + ), + transformation_file=dict( + argstr="-transform %s", + extensions=None, + position=1, + ), ) inputs = MRTransform.input_spec() @@ -25,7 +66,11 @@ def test_MRTransform_inputs(): def test_MRTransform_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py index 05f80b4646..0fb54a3020 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py @@ -4,11 +4,24 @@ def test_MRTrix2TrackVis_inputs(): input_map = dict( - image_file=dict(extensions=None,), - in_file=dict(extensions=None, mandatory=True,), - matrix_file=dict(extensions=None,), - out_filename=dict(extensions=None, genfile=True, usedefault=True,), - registration_image_file=dict(extensions=None,), + image_file=dict( + extensions=None, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + matrix_file=dict( + extensions=None, + ), + out_filename=dict( + extensions=None, + genfile=True, + usedefault=True, + ), + registration_image_file=dict( + extensions=None, + ), ) inputs = MRTrix2TrackVis.input_spec() @@ -18,7 +31,11 @@ def test_MRTrix2TrackVis_inputs(): def test_MRTrix2TrackVis_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRTrix2TrackVis.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py index 5e3fd2882e..eecdc39bf5 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py @@ -4,9 +4,19 @@ def test_MRTrixInfo_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), ) inputs = MRTrixInfo.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py index 711191bd16..8eab033221 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py @@ -4,11 +4,26 @@ def test_MRTrixViewer_inputs(): input_map = dict( - args=dict(argstr="%s",), - debug=dict(argstr="-debug", position=1,), - environ=dict(nohash=True, usedefault=True,), - in_files=dict(argstr="%s", mandatory=True, position=-2,), - quiet=dict(argstr="-quiet", position=1,), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = MRTrixViewer.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py index 0b3f38dcbc..7a0974d9b8 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py @@ -4,12 +4,33 @@ def test_MedianFilter3D_inputs(): input_map = dict( - args=dict(argstr="%s",), - debug=dict(argstr="-debug", position=1,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - quiet=dict(argstr="-quiet", position=1,), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = MedianFilter3D.input_spec() @@ -19,7 +40,11 @@ def test_MedianFilter3D_inputs(): def test_MedianFilter3D_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MedianFilter3D.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py index e640da1306..8dded55576 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py @@ -4,13 +4,27 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): input_map = dict( - args=dict(argstr="%s",), - cutoff_value=dict(argstr="-cutoff %s", units="NA",), - desired_number_of_tracks=dict(argstr="-number %d",), - do_not_precompute=dict(argstr="-noprecomputed",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + cutoff_value=dict( + argstr="-cutoff %s", + units="NA", + ), + desired_number_of_tracks=dict( + argstr="-number %d", + ), + do_not_precompute=dict( + argstr="-noprecomputed", + ), + environ=dict( + nohash=True, + usedefault=True, + ), exclude_file=dict( - argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"], + argstr="-exclude %s", + extensions=None, + xor=["exclude_file", "exclude_spec"], ), exclude_spec=dict( argstr="-exclude %s", @@ -19,9 +33,16 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["exclude_file", "exclude_spec"], ), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), include_file=dict( - argstr="-include %s", extensions=None, xor=["include_file", "include_spec"], + argstr="-include %s", + extensions=None, + xor=["include_file", "include_spec"], ), include_spec=dict( argstr="-include %s", @@ -30,11 +51,23 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["include_file", "include_spec"], ), - initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA",), - initial_direction=dict(argstr="-initdirection %s", units="voxels",), - inputmodel=dict(argstr="%s", position=-3, usedefault=True,), + initial_cutoff_value=dict( + argstr="-initcutoff %s", + units="NA", + ), + initial_direction=dict( + argstr="-initdirection %s", + units="voxels", + ), + inputmodel=dict( + argstr="%s", + position=-3, + usedefault=True, + ), mask_file=dict( - argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"], + argstr="-mask %s", + extensions=None, + xor=["mask_file", "mask_spec"], ), mask_spec=dict( argstr="-mask %s", @@ -43,12 +76,27 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["mask_file", "mask_spec"], ), - maximum_number_of_tracks=dict(argstr="-maxnum %d",), - maximum_number_of_trials=dict(argstr="-trials %s",), - maximum_tract_length=dict(argstr="-length %s", units="mm",), - minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm",), - minimum_tract_length=dict(argstr="-minlength %s", units="mm",), - no_mask_interpolation=dict(argstr="-nomaskinterp",), + maximum_number_of_tracks=dict( + argstr="-maxnum %d", + ), + maximum_number_of_trials=dict( + argstr="-trials %s", + ), + maximum_tract_length=dict( + argstr="-length %s", + units="mm", + ), + minimum_radius_of_curvature=dict( + argstr="-curvature %s", + units="mm", + ), + minimum_tract_length=dict( + argstr="-minlength %s", + units="mm", + ), + no_mask_interpolation=dict( + argstr="-nomaskinterp", + ), out_file=dict( argstr="%s", extensions=None, @@ -58,7 +106,9 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): position=-1, ), seed_file=dict( - argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"], + argstr="-seed %s", + extensions=None, + xor=["seed_file", "seed_spec"], ), seed_spec=dict( argstr="-seed %s", @@ -67,9 +117,16 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["seed_file", "seed_spec"], ), - step_size=dict(argstr="-step %s", units="mm",), - stop=dict(argstr="-stop",), - unidirectional=dict(argstr="-unidirectional",), + step_size=dict( + argstr="-step %s", + units="mm", + ), + stop=dict( + argstr="-stop", + ), + unidirectional=dict( + argstr="-unidirectional", + ), ) inputs = ProbabilisticSphericallyDeconvolutedStreamlineTrack.input_spec() @@ -79,7 +136,11 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_outputs(): - output_map = dict(tracked=dict(extensions=None,),) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = ProbabilisticSphericallyDeconvolutedStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py index bc32741331..c717eb628b 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py @@ -4,13 +4,27 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): input_map = dict( - args=dict(argstr="%s",), - cutoff_value=dict(argstr="-cutoff %s", units="NA",), - desired_number_of_tracks=dict(argstr="-number %d",), - do_not_precompute=dict(argstr="-noprecomputed",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + cutoff_value=dict( + argstr="-cutoff %s", + units="NA", + ), + desired_number_of_tracks=dict( + argstr="-number %d", + ), + do_not_precompute=dict( + argstr="-noprecomputed", + ), + environ=dict( + nohash=True, + usedefault=True, + ), exclude_file=dict( - argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"], + argstr="-exclude %s", + extensions=None, + xor=["exclude_file", "exclude_spec"], ), exclude_spec=dict( argstr="-exclude %s", @@ -19,9 +33,16 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["exclude_file", "exclude_spec"], ), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), include_file=dict( - argstr="-include %s", extensions=None, xor=["include_file", "include_spec"], + argstr="-include %s", + extensions=None, + xor=["include_file", "include_spec"], ), include_spec=dict( argstr="-include %s", @@ -30,11 +51,23 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["include_file", "include_spec"], ), - initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA",), - initial_direction=dict(argstr="-initdirection %s", units="voxels",), - inputmodel=dict(argstr="%s", position=-3, usedefault=True,), + initial_cutoff_value=dict( + argstr="-initcutoff %s", + units="NA", + ), + initial_direction=dict( + argstr="-initdirection %s", + units="voxels", + ), + inputmodel=dict( + argstr="%s", + position=-3, + usedefault=True, + ), mask_file=dict( - argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"], + argstr="-mask %s", + extensions=None, + xor=["mask_file", "mask_spec"], ), mask_spec=dict( argstr="-mask %s", @@ -43,11 +76,24 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["mask_file", "mask_spec"], ), - maximum_number_of_tracks=dict(argstr="-maxnum %d",), - maximum_tract_length=dict(argstr="-length %s", units="mm",), - minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm",), - minimum_tract_length=dict(argstr="-minlength %s", units="mm",), - no_mask_interpolation=dict(argstr="-nomaskinterp",), + maximum_number_of_tracks=dict( + argstr="-maxnum %d", + ), + maximum_tract_length=dict( + argstr="-length %s", + units="mm", + ), + minimum_radius_of_curvature=dict( + argstr="-curvature %s", + units="mm", + ), + minimum_tract_length=dict( + argstr="-minlength %s", + units="mm", + ), + no_mask_interpolation=dict( + argstr="-nomaskinterp", + ), out_file=dict( argstr="%s", extensions=None, @@ -57,7 +103,9 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): position=-1, ), seed_file=dict( - argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"], + argstr="-seed %s", + extensions=None, + xor=["seed_file", "seed_spec"], ), seed_spec=dict( argstr="-seed %s", @@ -66,9 +114,16 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["seed_file", "seed_spec"], ), - step_size=dict(argstr="-step %s", units="mm",), - stop=dict(argstr="-stop",), - unidirectional=dict(argstr="-unidirectional",), + step_size=dict( + argstr="-step %s", + units="mm", + ), + stop=dict( + argstr="-stop", + ), + unidirectional=dict( + argstr="-unidirectional", + ), ) inputs = SphericallyDeconvolutedStreamlineTrack.input_spec() @@ -78,7 +133,11 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): def test_SphericallyDeconvolutedStreamlineTrack_outputs(): - output_map = dict(tracked=dict(extensions=None,),) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = SphericallyDeconvolutedStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py index bf58f3fcd0..d8f1a5c869 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py @@ -4,13 +4,27 @@ def test_StreamlineTrack_inputs(): input_map = dict( - args=dict(argstr="%s",), - cutoff_value=dict(argstr="-cutoff %s", units="NA",), - desired_number_of_tracks=dict(argstr="-number %d",), - do_not_precompute=dict(argstr="-noprecomputed",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + cutoff_value=dict( + argstr="-cutoff %s", + units="NA", + ), + desired_number_of_tracks=dict( + argstr="-number %d", + ), + do_not_precompute=dict( + argstr="-noprecomputed", + ), + environ=dict( + nohash=True, + usedefault=True, + ), exclude_file=dict( - argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"], + argstr="-exclude %s", + extensions=None, + xor=["exclude_file", "exclude_spec"], ), exclude_spec=dict( argstr="-exclude %s", @@ -19,9 +33,16 @@ def test_StreamlineTrack_inputs(): units="mm", xor=["exclude_file", "exclude_spec"], ), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), include_file=dict( - argstr="-include %s", extensions=None, xor=["include_file", "include_spec"], + argstr="-include %s", + extensions=None, + xor=["include_file", "include_spec"], ), include_spec=dict( argstr="-include %s", @@ -30,11 +51,23 @@ def test_StreamlineTrack_inputs(): units="mm", xor=["include_file", "include_spec"], ), - initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA",), - initial_direction=dict(argstr="-initdirection %s", units="voxels",), - inputmodel=dict(argstr="%s", position=-3, usedefault=True,), + initial_cutoff_value=dict( + argstr="-initcutoff %s", + units="NA", + ), + initial_direction=dict( + argstr="-initdirection %s", + units="voxels", + ), + inputmodel=dict( + argstr="%s", + position=-3, + usedefault=True, + ), mask_file=dict( - argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"], + argstr="-mask %s", + extensions=None, + xor=["mask_file", "mask_spec"], ), mask_spec=dict( argstr="-mask %s", @@ -43,11 +76,24 @@ def test_StreamlineTrack_inputs(): units="mm", xor=["mask_file", "mask_spec"], ), - maximum_number_of_tracks=dict(argstr="-maxnum %d",), - maximum_tract_length=dict(argstr="-length %s", units="mm",), - minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm",), - minimum_tract_length=dict(argstr="-minlength %s", units="mm",), - no_mask_interpolation=dict(argstr="-nomaskinterp",), + maximum_number_of_tracks=dict( + argstr="-maxnum %d", + ), + maximum_tract_length=dict( + argstr="-length %s", + units="mm", + ), + minimum_radius_of_curvature=dict( + argstr="-curvature %s", + units="mm", + ), + minimum_tract_length=dict( + argstr="-minlength %s", + units="mm", + ), + no_mask_interpolation=dict( + argstr="-nomaskinterp", + ), out_file=dict( argstr="%s", extensions=None, @@ -57,7 +103,9 @@ def test_StreamlineTrack_inputs(): position=-1, ), seed_file=dict( - argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"], + argstr="-seed %s", + extensions=None, + xor=["seed_file", "seed_spec"], ), seed_spec=dict( argstr="-seed %s", @@ -66,9 +114,16 @@ def test_StreamlineTrack_inputs(): units="mm", xor=["seed_file", "seed_spec"], ), - step_size=dict(argstr="-step %s", units="mm",), - stop=dict(argstr="-stop",), - unidirectional=dict(argstr="-unidirectional",), + step_size=dict( + argstr="-step %s", + units="mm", + ), + stop=dict( + argstr="-stop", + ), + unidirectional=dict( + argstr="-unidirectional", + ), ) inputs = StreamlineTrack.input_spec() @@ -78,7 +133,11 @@ def test_StreamlineTrack_inputs(): def test_StreamlineTrack_outputs(): - output_map = dict(tracked=dict(extensions=None,),) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = StreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py index da5225cc42..c32daa3574 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py @@ -4,12 +4,33 @@ def test_Tensor2ApparentDiffusion_inputs(): input_map = dict( - args=dict(argstr="%s",), - debug=dict(argstr="-debug", position=1,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - quiet=dict(argstr="-quiet", position=1,), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = Tensor2ApparentDiffusion.input_spec() @@ -19,7 +40,11 @@ def test_Tensor2ApparentDiffusion_inputs(): def test_Tensor2ApparentDiffusion_outputs(): - output_map = dict(ADC=dict(extensions=None,),) + output_map = dict( + ADC=dict( + extensions=None, + ), + ) outputs = Tensor2ApparentDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py index 8f9937b550..bf90806f74 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py @@ -4,12 +4,33 @@ def test_Tensor2FractionalAnisotropy_inputs(): input_map = dict( - args=dict(argstr="%s",), - debug=dict(argstr="-debug", position=1,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - quiet=dict(argstr="-quiet", position=1,), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = Tensor2FractionalAnisotropy.input_spec() @@ -19,7 +40,11 @@ def test_Tensor2FractionalAnisotropy_inputs(): def test_Tensor2FractionalAnisotropy_outputs(): - output_map = dict(FA=dict(extensions=None,),) + output_map = dict( + FA=dict( + extensions=None, + ), + ) outputs = Tensor2FractionalAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py index dcc58860a4..a87eefef7e 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py @@ -4,12 +4,33 @@ def test_Tensor2Vector_inputs(): input_map = dict( - args=dict(argstr="%s",), - debug=dict(argstr="-debug", position=1,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - quiet=dict(argstr="-quiet", position=1,), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = Tensor2Vector.input_spec() @@ -19,7 +40,11 @@ def test_Tensor2Vector_inputs(): def test_Tensor2Vector_outputs(): - output_map = dict(vector=dict(extensions=None,),) + output_map = dict( + vector=dict( + extensions=None, + ), + ) outputs = Tensor2Vector.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py index b1e9a27016..0a67f4db56 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py @@ -4,16 +4,47 @@ def test_Threshold_inputs(): input_map = dict( - absolute_threshold_value=dict(argstr="-abs %s",), - args=dict(argstr="%s",), - debug=dict(argstr="-debug", position=1,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - invert=dict(argstr="-invert", position=1,), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - percentage_threshold_value=dict(argstr="-percent %s",), - quiet=dict(argstr="-quiet", position=1,), - replace_zeros_with_NaN=dict(argstr="-nan", position=1,), + absolute_threshold_value=dict( + argstr="-abs %s", + ), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + invert=dict( + argstr="-invert", + position=1, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + percentage_threshold_value=dict( + argstr="-percent %s", + ), + quiet=dict( + argstr="-quiet", + position=1, + ), + replace_zeros_with_NaN=dict( + argstr="-nan", + position=1, + ), ) inputs = Threshold.input_spec() @@ -23,7 +54,11 @@ def test_Threshold_inputs(): def test_Threshold_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Threshold.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py index b8bc425de7..9323ba74f2 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py @@ -4,16 +4,52 @@ def test_Tracks2Prob_inputs(): input_map = dict( - args=dict(argstr="%s",), - colour=dict(argstr="-colour", position=3,), - environ=dict(nohash=True, usedefault=True,), - fraction=dict(argstr="-fraction", position=3,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - output_datatype=dict(argstr="-datatype %s", position=2,), - resample=dict(argstr="-resample %d", position=3, units="mm",), - template_file=dict(argstr="-template %s", extensions=None, position=1,), - voxel_dims=dict(argstr="-vox %s", position=2, sep=",",), + args=dict( + argstr="%s", + ), + colour=dict( + argstr="-colour", + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fraction=dict( + argstr="-fraction", + position=3, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + output_datatype=dict( + argstr="-datatype %s", + position=2, + ), + resample=dict( + argstr="-resample %d", + position=3, + units="mm", + ), + template_file=dict( + argstr="-template %s", + extensions=None, + position=1, + ), + voxel_dims=dict( + argstr="-vox %s", + position=2, + sep=",", + ), ) inputs = Tracks2Prob.input_spec() @@ -23,7 +59,11 @@ def test_Tracks2Prob_inputs(): def test_Tracks2Prob_outputs(): - output_map = dict(tract_image=dict(extensions=None,),) + output_map = dict( + tract_image=dict( + extensions=None, + ), + ) outputs = Tracks2Prob.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py index 8064175d65..a9334f5412 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py @@ -4,11 +4,25 @@ def test_ACTPrepareFSL_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, ), ) inputs = ACTPrepareFSL.input_spec() @@ -19,7 +33,11 @@ def test_ACTPrepareFSL_inputs(): def test_ACTPrepareFSL_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ACTPrepareFSL.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py index c7ce6cc9af..ddf96a9c5f 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py @@ -4,17 +4,48 @@ def test_BrainMask_inputs(): input_map = dict( - args=dict(argstr="%s",), - bval_scale=dict(argstr="-bvalue_scaling %s",), - environ=dict(nohash=True, usedefault=True,), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), - in_bval=dict(extensions=None,), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - nthreads=dict(argstr="-nthreads %d", nohash=True,), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, ), ) inputs = BrainMask.input_spec() @@ -25,7 +56,11 @@ def test_BrainMask_inputs(): def test_BrainMask_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BrainMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py index 95aae6fc03..bfb0b1f0c9 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py @@ -4,23 +4,64 @@ def test_BuildConnectome_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - in_parc=dict(argstr="%s", extensions=None, position=-2,), - in_scalar=dict(argstr="-image %s", extensions=None,), - in_weights=dict(argstr="-tck_weights_in %s", extensions=None,), - keep_unassigned=dict(argstr="-keep_unassigned",), - metric=dict(argstr="-metric %s",), - nthreads=dict(argstr="-nthreads %d", nohash=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + in_parc=dict( + argstr="%s", + extensions=None, + position=-2, + ), + in_scalar=dict( + argstr="-image %s", + extensions=None, + ), + in_weights=dict( + argstr="-tck_weights_in %s", + extensions=None, + ), + keep_unassigned=dict( + argstr="-keep_unassigned", + ), + metric=dict( + argstr="-metric %s", + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + search_forward=dict( + argstr="-assignment_forward_search %f", + ), + search_radius=dict( + argstr="-assignment_radial_search %f", + ), + search_reverse=dict( + argstr="-assignment_reverse_search %f", + ), + vox_lookup=dict( + argstr="-assignment_voxel_lookup", + ), + zero_diagonal=dict( + argstr="-zero_diagonal", ), - search_forward=dict(argstr="-assignment_forward_search %f",), - search_radius=dict(argstr="-assignment_radial_search %f",), - search_reverse=dict(argstr="-assignment_reverse_search %f",), - vox_lookup=dict(argstr="-assignment_voxel_lookup",), - zero_diagonal=dict(argstr="-zero_diagonal",), ) inputs = BuildConnectome.input_spec() @@ -30,7 +71,11 @@ def test_BuildConnectome_inputs(): def test_BuildConnectome_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BuildConnectome.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py index f6a8734cef..ab1d984425 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py @@ -4,27 +4,82 @@ def test_ComputeTDI_inputs(): input_map = dict( - args=dict(argstr="%s",), - contrast=dict(argstr="-constrast %s",), - data_type=dict(argstr="-datatype %s",), - dixel=dict(argstr="-dixel %s", extensions=None,), - ends_only=dict(argstr="-ends_only",), - environ=dict(nohash=True, usedefault=True,), - fwhm_tck=dict(argstr="-fwhm_tck %f",), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - in_map=dict(argstr="-image %s", extensions=None,), - map_zero=dict(argstr="-map_zero",), - max_tod=dict(argstr="-tod %d",), - nthreads=dict(argstr="-nthreads %d", nohash=True,), - out_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True,), - precise=dict(argstr="-precise",), - reference=dict(argstr="-template %s", extensions=None,), - stat_tck=dict(argstr="-stat_tck %s",), - stat_vox=dict(argstr="-stat_vox %s",), - tck_weights=dict(argstr="-tck_weights_in %s", extensions=None,), - upsample=dict(argstr="-upsample %d",), - use_dec=dict(argstr="-dec",), - vox_size=dict(argstr="-vox %s", sep=",",), + args=dict( + argstr="%s", + ), + contrast=dict( + argstr="-constrast %s", + ), + data_type=dict( + argstr="-datatype %s", + ), + dixel=dict( + argstr="-dixel %s", + extensions=None, + ), + ends_only=dict( + argstr="-ends_only", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm_tck=dict( + argstr="-fwhm_tck %f", + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + in_map=dict( + argstr="-image %s", + extensions=None, + ), + map_zero=dict( + argstr="-map_zero", + ), + max_tod=dict( + argstr="-tod %d", + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + position=-1, + usedefault=True, + ), + precise=dict( + argstr="-precise", + ), + reference=dict( + argstr="-template %s", + extensions=None, + ), + stat_tck=dict( + argstr="-stat_tck %s", + ), + stat_vox=dict( + argstr="-stat_vox %s", + ), + tck_weights=dict( + argstr="-tck_weights_in %s", + extensions=None, + ), + upsample=dict( + argstr="-upsample %d", + ), + use_dec=dict( + argstr="-dec", + ), + vox_size=dict( + argstr="-vox %s", + sep=",", + ), ) inputs = ComputeTDI.input_spec() @@ -34,7 +89,11 @@ def test_ComputeTDI_inputs(): def test_ComputeTDI_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ComputeTDI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py b/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py index 9bcba81e49..1e87ebc377 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py @@ -4,28 +4,96 @@ def test_ConstrainedSphericalDeconvolution_inputs(): input_map = dict( - algorithm=dict(argstr="%s", mandatory=True, position=-8,), - args=dict(argstr="%s",), - bval_scale=dict(argstr="-bvalue_scaling %s",), - csf_odf=dict(argstr="%s", extensions=None, position=-1,), - csf_txt=dict(argstr="%s", extensions=None, position=-2,), - environ=dict(nohash=True, usedefault=True,), - gm_odf=dict(argstr="%s", extensions=None, position=-3,), - gm_txt=dict(argstr="%s", extensions=None, position=-4,), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), - in_bval=dict(extensions=None,), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), - in_dirs=dict(argstr="-directions %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-7,), - mask_file=dict(argstr="-mask %s", extensions=None,), - max_sh=dict(argstr="-lmax %s", sep=",",), - nthreads=dict(argstr="-nthreads %d", nohash=True,), - shell=dict(argstr="-shell %s", sep=",",), + algorithm=dict( + argstr="%s", + mandatory=True, + position=-8, + ), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + csf_odf=dict( + argstr="%s", + extensions=None, + position=-1, + ), + csf_txt=dict( + argstr="%s", + extensions=None, + position=-2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gm_odf=dict( + argstr="%s", + extensions=None, + position=-3, + ), + gm_txt=dict( + argstr="%s", + extensions=None, + position=-4, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_dirs=dict( + argstr="-directions %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-7, + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + max_sh=dict( + argstr="-lmax %s", + sep=",", + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + shell=dict( + argstr="-shell %s", + sep=",", + ), wm_odf=dict( - argstr="%s", extensions=None, mandatory=True, position=-5, usedefault=True, + argstr="%s", + extensions=None, + mandatory=True, + position=-5, + usedefault=True, + ), + wm_txt=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-6, ), - wm_txt=dict(argstr="%s", extensions=None, mandatory=True, position=-6,), ) inputs = ConstrainedSphericalDeconvolution.input_spec() @@ -36,9 +104,18 @@ def test_ConstrainedSphericalDeconvolution_inputs(): def test_ConstrainedSphericalDeconvolution_outputs(): output_map = dict( - csf_odf=dict(argstr="%s", extensions=None,), - gm_odf=dict(argstr="%s", extensions=None,), - wm_odf=dict(argstr="%s", extensions=None,), + csf_odf=dict( + argstr="%s", + extensions=None, + ), + gm_odf=dict( + argstr="%s", + extensions=None, + ), + wm_odf=dict( + argstr="%s", + extensions=None, + ), ) outputs = ConstrainedSphericalDeconvolution.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py index 210b39b141..f67dd54667 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py @@ -4,17 +4,50 @@ def test_DWIBiasCorrect_inputs(): input_map = dict( - args=dict(argstr="%s",), - bias=dict(argstr="-bias %s", extensions=None,), - bval_scale=dict(argstr="-bvalue_scaling %s",), - environ=dict(nohash=True, usedefault=True,), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), - in_bval=dict(extensions=None,), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - in_mask=dict(argstr="-mask %s", extensions=None,), - nthreads=dict(argstr="-nthreads %d", nohash=True,), + args=dict( + argstr="%s", + ), + bias=dict( + argstr="-bias %s", + extensions=None, + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + in_mask=dict( + argstr="-mask %s", + extensions=None, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -24,8 +57,16 @@ def test_DWIBiasCorrect_inputs(): name_template="%s_biascorr", position=-1, ), - use_ants=dict(argstr="-ants", mandatory=True, xor=["use_fsl"],), - use_fsl=dict(argstr="-fsl", mandatory=True, xor=["use_ants"],), + use_ants=dict( + argstr="-ants", + mandatory=True, + xor=["use_fsl"], + ), + use_fsl=dict( + argstr="-fsl", + mandatory=True, + xor=["use_ants"], + ), ) inputs = DWIBiasCorrect.input_spec() @@ -35,7 +76,14 @@ def test_DWIBiasCorrect_inputs(): def test_DWIBiasCorrect_outputs(): - output_map = dict(bias=dict(extensions=None,), out_file=dict(extensions=None,),) + output_map = dict( + bias=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + ) outputs = DWIBiasCorrect.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py index d5050327aa..efa722c81d 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py @@ -4,16 +4,46 @@ def test_DWIDenoise_inputs(): input_map = dict( - args=dict(argstr="%s",), - bval_scale=dict(argstr="-bvalue_scaling %s",), - environ=dict(nohash=True, usedefault=True,), - extent=dict(argstr="-extent %d,%d,%d",), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), - in_bval=dict(extensions=None,), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - mask=dict(argstr="-mask %s", extensions=None, position=1,), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + extent=dict( + argstr="-extent %d,%d,%d", + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + position=1, + ), noise=dict( argstr="-noise %s", extensions=None, @@ -21,7 +51,10 @@ def test_DWIDenoise_inputs(): name_source="in_file", name_template="%s_noise", ), - nthreads=dict(argstr="-nthreads %d", nohash=True,), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -39,7 +72,14 @@ def test_DWIDenoise_inputs(): def test_DWIDenoise_outputs(): - output_map = dict(noise=dict(extensions=None,), out_file=dict(extensions=None,),) + output_map = dict( + noise=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + ) outputs = DWIDenoise.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py index 18fa49c260..379e67d397 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py @@ -4,20 +4,61 @@ def test_DWIExtract_inputs(): input_map = dict( - args=dict(argstr="%s",), - bval_scale=dict(argstr="-bvalue_scaling %s",), - bzero=dict(argstr="-bzero",), - environ=dict(nohash=True, usedefault=True,), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), - in_bval=dict(extensions=None,), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - nobzero=dict(argstr="-no_bzero",), - nthreads=dict(argstr="-nthreads %d", nohash=True,), - out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), - shell=dict(argstr="-shell %s", sep=",",), - singleshell=dict(argstr="-singleshell",), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + bzero=dict( + argstr="-bzero", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + nobzero=dict( + argstr="-no_bzero", + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + shell=dict( + argstr="-shell %s", + sep=",", + ), + singleshell=dict( + argstr="-singleshell", + ), ) inputs = DWIExtract.input_spec() @@ -27,7 +68,11 @@ def test_DWIExtract_inputs(): def test_DWIExtract_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = DWIExtract.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py index bfadae423f..d91d270c4d 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py @@ -4,28 +4,99 @@ def test_EstimateFOD_inputs(): input_map = dict( - algorithm=dict(argstr="%s", mandatory=True, position=-8,), - args=dict(argstr="%s",), - bval_scale=dict(argstr="-bvalue_scaling %s",), - csf_odf=dict(argstr="%s", extensions=None, position=-1, usedefault=True,), - csf_txt=dict(argstr="%s", extensions=None, position=-2,), - environ=dict(nohash=True, usedefault=True,), - gm_odf=dict(argstr="%s", extensions=None, position=-3, usedefault=True,), - gm_txt=dict(argstr="%s", extensions=None, position=-4,), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), - in_bval=dict(extensions=None,), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), - in_dirs=dict(argstr="-directions %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-7,), - mask_file=dict(argstr="-mask %s", extensions=None,), - max_sh=dict(argstr="-lmax %s", sep=",", usedefault=True,), - nthreads=dict(argstr="-nthreads %d", nohash=True,), - shell=dict(argstr="-shell %s", sep=",",), + algorithm=dict( + argstr="%s", + mandatory=True, + position=-8, + ), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + csf_odf=dict( + argstr="%s", + extensions=None, + position=-1, + usedefault=True, + ), + csf_txt=dict( + argstr="%s", + extensions=None, + position=-2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gm_odf=dict( + argstr="%s", + extensions=None, + position=-3, + usedefault=True, + ), + gm_txt=dict( + argstr="%s", + extensions=None, + position=-4, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_dirs=dict( + argstr="-directions %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-7, + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + max_sh=dict( + argstr="-lmax %s", + sep=",", + usedefault=True, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + shell=dict( + argstr="-shell %s", + sep=",", + ), wm_odf=dict( - argstr="%s", extensions=None, mandatory=True, position=-5, usedefault=True, + argstr="%s", + extensions=None, + mandatory=True, + position=-5, + usedefault=True, + ), + wm_txt=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-6, ), - wm_txt=dict(argstr="%s", extensions=None, mandatory=True, position=-6,), ) inputs = EstimateFOD.input_spec() @@ -36,9 +107,18 @@ def test_EstimateFOD_inputs(): def test_EstimateFOD_outputs(): output_map = dict( - csf_odf=dict(argstr="%s", extensions=None,), - gm_odf=dict(argstr="%s", extensions=None,), - wm_odf=dict(argstr="%s", extensions=None,), + csf_odf=dict( + argstr="%s", + extensions=None, + ), + gm_odf=dict( + argstr="%s", + extensions=None, + ), + wm_odf=dict( + argstr="%s", + extensions=None, + ), ) outputs = EstimateFOD.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py index d586dbaf59..cbadd78fa6 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py @@ -4,21 +4,60 @@ def test_FitTensor_inputs(): input_map = dict( - args=dict(argstr="%s",), - bval_scale=dict(argstr="-bvalue_scaling %s",), - environ=dict(nohash=True, usedefault=True,), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), - in_bval=dict(extensions=None,), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - in_mask=dict(argstr="-mask %s", extensions=None,), - method=dict(argstr="-method %s",), - nthreads=dict(argstr="-nthreads %d", nohash=True,), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + in_mask=dict( + argstr="-mask %s", + extensions=None, + ), + method=dict( + argstr="-method %s", + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + reg_term=dict( + argstr="-regularisation %f", + max_ver="0.3.13", ), - reg_term=dict(argstr="-regularisation %f", max_ver="0.3.13",), ) inputs = FitTensor.input_spec() @@ -28,7 +67,11 @@ def test_FitTensor_inputs(): def test_FitTensor_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = FitTensor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py index d41fd52a11..1b135a5917 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py @@ -4,17 +4,53 @@ def test_Generate5tt_inputs(): input_map = dict( - algorithm=dict(argstr="%s", mandatory=True, position=-3,), - args=dict(argstr="%s",), - bval_scale=dict(argstr="-bvalue_scaling %s",), - environ=dict(nohash=True, usedefault=True,), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), - in_bval=dict(extensions=None,), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - nthreads=dict(argstr="-nthreads %d", nohash=True,), - out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + algorithm=dict( + argstr="%s", + mandatory=True, + position=-3, + ), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), ) inputs = Generate5tt.input_spec() @@ -24,7 +60,11 @@ def test_Generate5tt_inputs(): def test_Generate5tt_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Generate5tt.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py index 2c37a6bc93..15116f9bb8 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py @@ -4,19 +4,55 @@ def test_LabelConfig_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_config=dict(argstr="%s", extensions=None, position=-2,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - lut_aal=dict(argstr="-lut_aal %s", extensions=None,), - lut_basic=dict(argstr="-lut_basic %s", extensions=None,), - lut_fs=dict(argstr="-lut_freesurfer %s", extensions=None,), - lut_itksnap=dict(argstr="-lut_itksnap %s", extensions=None,), - nthreads=dict(argstr="-nthreads %d", nohash=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_config=dict( + argstr="%s", + extensions=None, + position=-2, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + lut_aal=dict( + argstr="-lut_aal %s", + extensions=None, + ), + lut_basic=dict( + argstr="-lut_basic %s", + extensions=None, + ), + lut_fs=dict( + argstr="-lut_freesurfer %s", + extensions=None, + ), + lut_itksnap=dict( + argstr="-lut_itksnap %s", + extensions=None, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + spine=dict( + argstr="-spine %s", + extensions=None, ), - spine=dict(argstr="-spine %s", extensions=None,), ) inputs = LabelConfig.input_spec() @@ -26,7 +62,11 @@ def test_LabelConfig_inputs(): def test_LabelConfig_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = LabelConfig.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py index 9db9bb4df5..98512ac317 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py @@ -4,16 +4,45 @@ def test_LabelConvert_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_config=dict(argstr="%s", extensions=None, position=-2,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), - in_lut=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - num_threads=dict(argstr="-nthreads %d", nohash=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_config=dict( + argstr="%s", + extensions=None, + position=-2, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), + in_lut=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + num_threads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + spine=dict( + argstr="-spine %s", + extensions=None, ), - spine=dict(argstr="-spine %s", extensions=None,), ) inputs = LabelConvert.input_spec() @@ -23,7 +52,11 @@ def test_LabelConvert_inputs(): def test_LabelConvert_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = LabelConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py index 2440113e20..578ffb9b1a 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py @@ -4,22 +4,65 @@ def test_MRConvert_inputs(): input_map = dict( - args=dict(argstr="%s",), - axes=dict(argstr="-axes %s", sep=",",), - bval_scale=dict(argstr="-bvalue_scaling %s",), - coord=dict(argstr="-coord %s", sep=" ",), - environ=dict(nohash=True, usedefault=True,), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), - in_bval=dict(extensions=None,), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - nthreads=dict(argstr="-nthreads %d", nohash=True,), + args=dict( + argstr="%s", + ), + axes=dict( + argstr="-axes %s", + sep=",", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + coord=dict( + argstr="-coord %s", + sep=" ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + scaling=dict( + argstr="-scaling %s", + sep=",", + ), + vox=dict( + argstr="-vox %s", + sep=",", ), - scaling=dict(argstr="-scaling %s", sep=",",), - vox=dict(argstr="-vox %s", sep=",",), ) inputs = MRConvert.input_spec() @@ -29,7 +72,11 @@ def test_MRConvert_inputs(): def test_MRConvert_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py index 439e834eb2..f1ef52ab88 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py @@ -4,19 +4,61 @@ def test_MRDeGibbs_inputs(): input_map = dict( - args=dict(argstr="%s",), - axes=dict(argstr="-axes %s", maxlen=2, minlen=2, sep=",", usedefault=True,), - bval_scale=dict(argstr="-bvalue_scaling %s",), - environ=dict(nohash=True, usedefault=True,), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), - in_bval=dict(extensions=None,), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - maxW=dict(argstr="-maxW %d", usedefault=True,), - minW=dict(argstr="-minW %d", usedefault=True,), - nshifts=dict(argstr="-nshifts %d", usedefault=True,), - nthreads=dict(argstr="-nthreads %d", nohash=True,), + args=dict( + argstr="%s", + ), + axes=dict( + argstr="-axes %s", + maxlen=2, + minlen=2, + sep=",", + usedefault=True, + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + maxW=dict( + argstr="-maxW %d", + usedefault=True, + ), + minW=dict( + argstr="-minW %d", + usedefault=True, + ), + nshifts=dict( + argstr="-nshifts %d", + usedefault=True, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -34,7 +76,11 @@ def test_MRDeGibbs_inputs(): def test_MRDeGibbs_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRDeGibbs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py index 33f9c82d22..6446b2ceda 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py @@ -4,18 +4,56 @@ def test_MRMath_inputs(): input_map = dict( - args=dict(argstr="%s",), - axis=dict(argstr="-axis %d",), - bval_scale=dict(argstr="-bvalue_scaling %s",), - environ=dict(nohash=True, usedefault=True,), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), - in_bval=dict(extensions=None,), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - nthreads=dict(argstr="-nthreads %d", nohash=True,), - operation=dict(argstr="%s", mandatory=True, position=-2,), - out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), + args=dict( + argstr="%s", + ), + axis=dict( + argstr="-axis %d", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + operation=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), ) inputs = MRMath.input_spec() @@ -25,7 +63,11 @@ def test_MRMath_inputs(): def test_MRMath_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRMath.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py index cf41dfe856..ee982c9561 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py @@ -4,19 +4,51 @@ def test_MRResize_inputs(): input_map = dict( - args=dict(argstr="%s",), - bval_scale=dict(argstr="-bvalue_scaling %s",), - environ=dict(nohash=True, usedefault=True,), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), image_size=dict( - argstr="-size %d,%d,%d", mandatory=True, xor=["voxel_size", "scale_factor"], + argstr="-size %d,%d,%d", + mandatory=True, + xor=["voxel_size", "scale_factor"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, ), - in_bval=dict(extensions=None,), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - interpolation=dict(argstr="-interp %s", usedefault=True,), - nthreads=dict(argstr="-nthreads %d", nohash=True,), out_file=dict( argstr="%s", extensions=None, @@ -26,7 +58,9 @@ def test_MRResize_inputs(): position=-1, ), scale_factor=dict( - argstr="-scale %g,%g,%g", mandatory=True, xor=["image_size", "voxel_size"], + argstr="-scale %g,%g,%g", + mandatory=True, + xor=["image_size", "voxel_size"], ), voxel_size=dict( argstr="-voxel %g,%g,%g", @@ -42,7 +76,11 @@ def test_MRResize_inputs(): def test_MRResize_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRResize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py index a5042e58d9..7689f14f11 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py @@ -4,7 +4,13 @@ def test_MRTrix3Base_inputs(): input_map = dict( - args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), ) inputs = MRTrix3Base.input_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py index 602d3c0228..0fd63be8af 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py @@ -4,14 +4,36 @@ def test_Mesh2PVE_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), - in_first=dict(argstr="-first %s", extensions=None,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + in_first=dict( + argstr="-first %s", + extensions=None, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + reference=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, ), - reference=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), ) inputs = Mesh2PVE.input_spec() @@ -21,7 +43,11 @@ def test_Mesh2PVE_inputs(): def test_Mesh2PVE_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Mesh2PVE.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py index 802f2fd64a..ef3053cede 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py @@ -4,13 +4,36 @@ def test_ReplaceFSwithFIRST_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_config=dict(argstr="%s", extensions=None, position=-2,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4,), - in_t1w=dict(argstr="%s", extensions=None, mandatory=True, position=-3,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_config=dict( + argstr="%s", + extensions=None, + position=-2, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), + in_t1w=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, ), ) inputs = ReplaceFSwithFIRST.input_spec() @@ -21,7 +44,11 @@ def test_ReplaceFSwithFIRST_inputs(): def test_ReplaceFSwithFIRST_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ReplaceFSwithFIRST.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py index b35f6529e7..f7e556f466 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py @@ -4,22 +4,76 @@ def test_ResponseSD_inputs(): input_map = dict( - algorithm=dict(argstr="%s", mandatory=True, position=1,), - args=dict(argstr="%s",), - bval_scale=dict(argstr="-bvalue_scaling %s",), - csf_file=dict(argstr="%s", extensions=None, position=-1,), - environ=dict(nohash=True, usedefault=True,), - gm_file=dict(argstr="%s", extensions=None, position=-2,), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), - in_bval=dict(extensions=None,), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-5,), - in_mask=dict(argstr="-mask %s", extensions=None,), - max_sh=dict(argstr="-lmax %s", sep=",",), - mtt_file=dict(argstr="%s", extensions=None, position=-4,), - nthreads=dict(argstr="-nthreads %d", nohash=True,), - wm_file=dict(argstr="%s", extensions=None, position=-3, usedefault=True,), + algorithm=dict( + argstr="%s", + mandatory=True, + position=1, + ), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + csf_file=dict( + argstr="%s", + extensions=None, + position=-1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gm_file=dict( + argstr="%s", + extensions=None, + position=-2, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-5, + ), + in_mask=dict( + argstr="-mask %s", + extensions=None, + ), + max_sh=dict( + argstr="-lmax %s", + sep=",", + ), + mtt_file=dict( + argstr="%s", + extensions=None, + position=-4, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + wm_file=dict( + argstr="%s", + extensions=None, + position=-3, + usedefault=True, + ), ) inputs = ResponseSD.input_spec() @@ -30,9 +84,18 @@ def test_ResponseSD_inputs(): def test_ResponseSD_outputs(): output_map = dict( - csf_file=dict(argstr="%s", extensions=None,), - gm_file=dict(argstr="%s", extensions=None,), - wm_file=dict(argstr="%s", extensions=None,), + csf_file=dict( + argstr="%s", + extensions=None, + ), + gm_file=dict( + argstr="%s", + extensions=None, + ), + wm_file=dict( + argstr="%s", + extensions=None, + ), ) outputs = ResponseSD.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py index 2c72dee012..215dafedc0 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py @@ -4,13 +4,37 @@ def test_TCK2VTK_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - nthreads=dict(argstr="-nthreads %d", nohash=True,), - out_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True,), - reference=dict(argstr="-image %s", extensions=None,), - voxel=dict(argstr="-image %s", extensions=None,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + position=-1, + usedefault=True, + ), + reference=dict( + argstr="-image %s", + extensions=None, + ), + voxel=dict( + argstr="-image %s", + extensions=None, + ), ) inputs = TCK2VTK.input_spec() @@ -20,7 +44,11 @@ def test_TCK2VTK_inputs(): def test_TCK2VTK_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TCK2VTK.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py index be6736cecb..b23813aaf4 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py @@ -4,16 +4,47 @@ def test_TensorMetrics_inputs(): input_map = dict( - args=dict(argstr="%s",), - component=dict(argstr="-num %s", sep=",", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1,), - in_mask=dict(argstr="-mask %s", extensions=None,), - modulate=dict(argstr="-modulate %s",), - out_adc=dict(argstr="-adc %s", extensions=None,), - out_eval=dict(argstr="-value %s", extensions=None,), - out_evec=dict(argstr="-vector %s", extensions=None,), - out_fa=dict(argstr="-fa %s", extensions=None,), + args=dict( + argstr="%s", + ), + component=dict( + argstr="-num %s", + sep=",", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + in_mask=dict( + argstr="-mask %s", + extensions=None, + ), + modulate=dict( + argstr="-modulate %s", + ), + out_adc=dict( + argstr="-adc %s", + extensions=None, + ), + out_eval=dict( + argstr="-value %s", + extensions=None, + ), + out_evec=dict( + argstr="-vector %s", + extensions=None, + ), + out_fa=dict( + argstr="-fa %s", + extensions=None, + ), ) inputs = TensorMetrics.input_spec() @@ -24,10 +55,18 @@ def test_TensorMetrics_inputs(): def test_TensorMetrics_outputs(): output_map = dict( - out_adc=dict(extensions=None,), - out_eval=dict(extensions=None,), - out_evec=dict(extensions=None,), - out_fa=dict(extensions=None,), + out_adc=dict( + extensions=None, + ), + out_eval=dict( + extensions=None, + ), + out_evec=dict( + extensions=None, + ), + out_fa=dict( + extensions=None, + ), ) outputs = TensorMetrics.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py index 9c1b51d363..2a70fe09f6 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py @@ -4,60 +4,168 @@ def test_Tractography_inputs(): input_map = dict( - act_file=dict(argstr="-act %s", extensions=None,), - algorithm=dict(argstr="-algorithm %s", usedefault=True,), - angle=dict(argstr="-angle %f",), - args=dict(argstr="%s",), - backtrack=dict(argstr="-backtrack",), - bval_scale=dict(argstr="-bvalue_scaling %s",), - crop_at_gmwmi=dict(argstr="-crop_at_gmwmi",), - cutoff=dict(argstr="-cutoff %f",), - cutoff_init=dict(argstr="-initcutoff %f",), - downsample=dict(argstr="-downsample %f",), - environ=dict(nohash=True, usedefault=True,), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), - in_bval=dict(extensions=None,), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - init_dir=dict(argstr="-initdirection %f,%f,%f",), - max_length=dict(argstr="-maxlength %f",), - max_seed_attempts=dict(argstr="-max_seed_attempts %d",), - max_tracks=dict(argstr="-maxnum %d",), - min_length=dict(argstr="-minlength %f",), - n_samples=dict(argstr="-samples %d", usedefault=True,), - n_tracks=dict(argstr="-number %d", max_ver="0.4",), - n_trials=dict(argstr="-trials %d",), - noprecompt=dict(argstr="-noprecomputed",), - nthreads=dict(argstr="-nthreads %d", nohash=True,), + act_file=dict( + argstr="-act %s", + extensions=None, + ), + algorithm=dict( + argstr="-algorithm %s", + usedefault=True, + ), + angle=dict( + argstr="-angle %f", + ), + args=dict( + argstr="%s", + ), + backtrack=dict( + argstr="-backtrack", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + crop_at_gmwmi=dict( + argstr="-crop_at_gmwmi", + ), + cutoff=dict( + argstr="-cutoff %f", + ), + cutoff_init=dict( + argstr="-initcutoff %f", + ), + downsample=dict( + argstr="-downsample %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + init_dir=dict( + argstr="-initdirection %f,%f,%f", + ), + max_length=dict( + argstr="-maxlength %f", + ), + max_seed_attempts=dict( + argstr="-max_seed_attempts %d", + ), + max_tracks=dict( + argstr="-maxnum %d", + ), + min_length=dict( + argstr="-minlength %f", + ), + n_samples=dict( + argstr="-samples %d", + usedefault=True, + ), + n_tracks=dict( + argstr="-number %d", + max_ver="0.4", + ), + n_trials=dict( + argstr="-trials %d", + ), + noprecompt=dict( + argstr="-noprecomputed", + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True, - ), - out_seeds=dict(argstr="-output_seeds %s", extensions=None, usedefault=True,), - power=dict(argstr="-power %d",), - roi_excl=dict(argstr="-exclude %s",), - roi_incl=dict(argstr="-include %s",), - roi_mask=dict(argstr="-mask %s",), - seed_dynamic=dict(argstr="-seed_dynamic %s", extensions=None,), + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + out_seeds=dict( + argstr="-output_seeds %s", + extensions=None, + usedefault=True, + ), + power=dict( + argstr="-power %d", + ), + roi_excl=dict( + argstr="-exclude %s", + ), + roi_incl=dict( + argstr="-include %s", + ), + roi_mask=dict( + argstr="-mask %s", + ), + seed_dynamic=dict( + argstr="-seed_dynamic %s", + extensions=None, + ), seed_gmwmi=dict( - argstr="-seed_gmwmi %s", extensions=None, requires=["act_file"], + argstr="-seed_gmwmi %s", + extensions=None, + requires=["act_file"], ), seed_grid_voxel=dict( - argstr="-seed_grid_per_voxel %s %d", xor=["seed_image", "seed_rnd_voxel"], + argstr="-seed_grid_per_voxel %s %d", + xor=["seed_image", "seed_rnd_voxel"], + ), + seed_image=dict( + argstr="-seed_image %s", + extensions=None, + ), + seed_rejection=dict( + argstr="-seed_rejection %s", + extensions=None, ), - seed_image=dict(argstr="-seed_image %s", extensions=None,), - seed_rejection=dict(argstr="-seed_rejection %s", extensions=None,), seed_rnd_voxel=dict( argstr="-seed_random_per_voxel %s %d", xor=["seed_image", "seed_grid_voxel"], ), - seed_sphere=dict(argstr="-seed_sphere %f,%f,%f,%f",), - select=dict(argstr="-select %d", min_ver="3",), - sph_trait=dict(argstr="%f,%f,%f,%f",), - step_size=dict(argstr="-step %f",), - stop=dict(argstr="-stop",), - unidirectional=dict(argstr="-unidirectional",), - use_rk4=dict(argstr="-rk4",), + seed_sphere=dict( + argstr="-seed_sphere %f,%f,%f,%f", + ), + select=dict( + argstr="-select %d", + min_ver="3", + ), + sph_trait=dict( + argstr="%f,%f,%f,%f", + ), + step_size=dict( + argstr="-step %f", + ), + stop=dict( + argstr="-stop", + ), + unidirectional=dict( + argstr="-unidirectional", + ), + use_rk4=dict( + argstr="-rk4", + ), ) inputs = Tractography.input_spec() @@ -68,7 +176,12 @@ def test_Tractography_inputs(): def test_Tractography_outputs(): output_map = dict( - out_file=dict(extensions=None,), out_seeds=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + out_seeds=dict( + extensions=None, + ), ) outputs = Tractography.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py index cf81acdc55..ca14384031 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py @@ -4,8 +4,14 @@ def test_DwiTool_inputs(): input_map = dict( - args=dict(argstr="%s",), - b0_file=dict(argstr="-b0 %s", extensions=None, position=4,), + args=dict( + argstr="%s", + ), + b0_file=dict( + argstr="-b0 %s", + extensions=None, + position=4, + ), ball_flag=dict( argstr="-ball", position=6, @@ -32,10 +38,23 @@ def test_DwiTool_inputs(): "nodv_flag", ], ), - bval_file=dict(argstr="-bval %s", extensions=None, mandatory=True, position=2,), - bvec_file=dict(argstr="-bvec %s", extensions=None, position=3,), - diso_val=dict(argstr="-diso %f",), - dpr_val=dict(argstr="-dpr %f",), + bval_file=dict( + argstr="-bval %s", + extensions=None, + mandatory=True, + position=2, + ), + bvec_file=dict( + argstr="-bvec %s", + extensions=None, + position=3, + ), + diso_val=dict( + argstr="-diso %f", + ), + dpr_val=dict( + argstr="-dpr %f", + ), dti_flag=dict( argstr="-dti", position=6, @@ -62,7 +81,10 @@ def test_DwiTool_inputs(): "nodv_flag", ], ), - environ=dict(nohash=True, usedefault=True,), + environ=dict( + nohash=True, + usedefault=True, + ), famap_file=dict( argstr="-famap %s", extensions=None, @@ -88,7 +110,11 @@ def test_DwiTool_inputs(): name_source=["source_file"], name_template="%s_logdti2.nii.gz", ), - mask_file=dict(argstr="-mask %s", extensions=None, position=5,), + mask_file=dict( + argstr="-mask %s", + extensions=None, + position=5, + ), mcmap_file=dict( argstr="-mcmap %s", extensions=None, @@ -147,7 +173,10 @@ def test_DwiTool_inputs(): name_template="%s_rgbmap.nii.gz", ), source_file=dict( - argstr="-source %s", extensions=None, mandatory=True, position=1, + argstr="-source %s", + extensions=None, + mandatory=True, + position=1, ), syn_file=dict( argstr="-syn %s", @@ -172,13 +201,27 @@ def test_DwiTool_inputs(): def test_DwiTool_outputs(): output_map = dict( - famap_file=dict(extensions=None,), - logdti_file=dict(extensions=None,), - mcmap_file=dict(extensions=None,), - mdmap_file=dict(extensions=None,), - rgbmap_file=dict(extensions=None,), - syn_file=dict(extensions=None,), - v1map_file=dict(extensions=None,), + famap_file=dict( + extensions=None, + ), + logdti_file=dict( + extensions=None, + ), + mcmap_file=dict( + extensions=None, + ), + mdmap_file=dict( + extensions=None, + ), + rgbmap_file=dict( + extensions=None, + ), + syn_file=dict( + extensions=None, + ), + v1map_file=dict( + extensions=None, + ), ) outputs = DwiTool.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py index 567831f9f4..14093322cc 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py @@ -4,48 +4,115 @@ def test_FitAsl_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), cbf_file=dict( argstr="-cbf %s", extensions=None, name_source=["source_file"], name_template="%s_cbf.nii.gz", ), - dpld=dict(argstr="-dPLD %f",), - dt_inv2=dict(argstr="-dTinv2 %f",), - eff=dict(argstr="-eff %f",), - environ=dict(nohash=True, usedefault=True,), + dpld=dict( + argstr="-dPLD %f", + ), + dt_inv2=dict( + argstr="-dTinv2 %f", + ), + eff=dict( + argstr="-eff %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), error_file=dict( argstr="-error %s", extensions=None, name_source=["source_file"], name_template="%s_error.nii.gz", ), - gm_plasma=dict(argstr="-gmL %f",), - gm_t1=dict(argstr="-gmT1 %f",), - gm_ttt=dict(argstr="-gmTTT %f",), - ir_output=dict(argstr="-IRoutput %s", extensions=None,), - ir_volume=dict(argstr="-IRvolume %s", extensions=None,), - ldd=dict(argstr="-LDD %f",), - m0map=dict(argstr="-m0map %s", extensions=None,), - m0mape=dict(argstr="-m0mape %s", extensions=None,), - mask=dict(argstr="-mask %s", extensions=None, position=2,), - mul=dict(argstr="-mul %f",), - mulgm=dict(argstr="-sig",), - out=dict(argstr="-out %f",), - pasl=dict(argstr="-pasl",), - pcasl=dict(argstr="-pcasl",), - plasma_coeff=dict(argstr="-L %f",), - pld=dict(argstr="-PLD %f",), - pv0=dict(argstr="-pv0 %d",), - pv2=dict(argstr="-pv2 %d",), - pv3=dict(argstr="-pv3 %d %d %d",), - pv_threshold=dict(argstr="-pvthreshold",), - seg=dict(argstr="-seg %s", extensions=None,), - segstyle=dict(argstr="-segstyle",), - sig=dict(argstr="-sig",), + gm_plasma=dict( + argstr="-gmL %f", + ), + gm_t1=dict( + argstr="-gmT1 %f", + ), + gm_ttt=dict( + argstr="-gmTTT %f", + ), + ir_output=dict( + argstr="-IRoutput %s", + extensions=None, + ), + ir_volume=dict( + argstr="-IRvolume %s", + extensions=None, + ), + ldd=dict( + argstr="-LDD %f", + ), + m0map=dict( + argstr="-m0map %s", + extensions=None, + ), + m0mape=dict( + argstr="-m0mape %s", + extensions=None, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + position=2, + ), + mul=dict( + argstr="-mul %f", + ), + mulgm=dict( + argstr="-sig", + ), + out=dict( + argstr="-out %f", + ), + pasl=dict( + argstr="-pasl", + ), + pcasl=dict( + argstr="-pcasl", + ), + plasma_coeff=dict( + argstr="-L %f", + ), + pld=dict( + argstr="-PLD %f", + ), + pv0=dict( + argstr="-pv0 %d", + ), + pv2=dict( + argstr="-pv2 %d", + ), + pv3=dict( + argstr="-pv3 %d %d %d", + ), + pv_threshold=dict( + argstr="-pvthreshold", + ), + seg=dict( + argstr="-seg %s", + extensions=None, + ), + segstyle=dict( + argstr="-segstyle", + ), + sig=dict( + argstr="-sig", + ), source_file=dict( - argstr="-source %s", extensions=None, mandatory=True, position=1, + argstr="-source %s", + extensions=None, + mandatory=True, + position=1, ), syn_file=dict( argstr="-syn %s", @@ -53,13 +120,28 @@ def test_FitAsl_inputs(): name_source=["source_file"], name_template="%s_syn.nii.gz", ), - t1_art_cmp=dict(argstr="-T1a %f",), - t1map=dict(argstr="-t1map %s", extensions=None,), - t_inv1=dict(argstr="-Tinv1 %f",), - t_inv2=dict(argstr="-Tinv2 %f",), - wm_plasma=dict(argstr="-wmL %f",), - wm_t1=dict(argstr="-wmT1 %f",), - wm_ttt=dict(argstr="-wmTTT %f",), + t1_art_cmp=dict( + argstr="-T1a %f", + ), + t1map=dict( + argstr="-t1map %s", + extensions=None, + ), + t_inv1=dict( + argstr="-Tinv1 %f", + ), + t_inv2=dict( + argstr="-Tinv2 %f", + ), + wm_plasma=dict( + argstr="-wmL %f", + ), + wm_t1=dict( + argstr="-wmT1 %f", + ), + wm_ttt=dict( + argstr="-wmTTT %f", + ), ) inputs = FitAsl.input_spec() @@ -70,9 +152,15 @@ def test_FitAsl_inputs(): def test_FitAsl_outputs(): output_map = dict( - cbf_file=dict(extensions=None,), - error_file=dict(extensions=None,), - syn_file=dict(extensions=None,), + cbf_file=dict( + extensions=None, + ), + error_file=dict( + extensions=None, + ), + syn_file=dict( + extensions=None, + ), ) outputs = FitAsl.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py index 57bee972e1..87650ffbd6 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py @@ -4,8 +4,12 @@ def test_FitDwi_inputs(): input_map = dict( - acceptance=dict(argstr="-accpetance %f",), - args=dict(argstr="%s",), + acceptance=dict( + argstr="-accpetance %f", + ), + args=dict( + argstr="%s", + ), ball_flag=dict( argstr="-ball", position=4, @@ -30,12 +34,31 @@ def test_FitDwi_inputs(): "nodv_flag", ], ), - bval_file=dict(argstr="-bval %s", extensions=None, mandatory=True, position=2,), - bvec_file=dict(argstr="-bvec %s", extensions=None, mandatory=True, position=3,), - cov_file=dict(argstr="-cov %s", extensions=None,), - csf_t2_val=dict(argstr="-csfT2 %f",), - diso_val=dict(argstr="-diso %f",), - dpr_val=dict(argstr="-dpr %f",), + bval_file=dict( + argstr="-bval %s", + extensions=None, + mandatory=True, + position=2, + ), + bvec_file=dict( + argstr="-bvec %s", + extensions=None, + mandatory=True, + position=3, + ), + cov_file=dict( + argstr="-cov %s", + extensions=None, + ), + csf_t2_val=dict( + argstr="-csfT2 %f", + ), + diso_val=dict( + argstr="-diso %f", + ), + dpr_val=dict( + argstr="-dpr %f", + ), dti_flag=dict( argstr="-dti", position=4, @@ -48,7 +71,10 @@ def test_FitDwi_inputs(): "nodv_flag", ], ), - environ=dict(nohash=True, usedefault=True,), + environ=dict( + nohash=True, + usedefault=True, + ), error_file=dict( argstr="-error %s", extensions=None, @@ -61,7 +87,10 @@ def test_FitDwi_inputs(): name_source=["source_file"], name_template="%s_famap.nii.gz", ), - gn_flag=dict(argstr="-gn", xor=["wls_flag"],), + gn_flag=dict( + argstr="-gn", + xor=["wls_flag"], + ), ivim_flag=dict( argstr="-ivim", position=4, @@ -74,9 +103,18 @@ def test_FitDwi_inputs(): "nodv_flag", ], ), - lm_vals=dict(argstr="-lm %f %f", requires=["gn_flag"],), - mask_file=dict(argstr="-mask %s", extensions=None,), - maxit_val=dict(argstr="-maxit %d", requires=["gn_flag"],), + lm_vals=dict( + argstr="-lm %f %f", + requires=["gn_flag"], + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + maxit_val=dict( + argstr="-maxit %d", + requires=["gn_flag"], + ), mcmap_file=dict( argstr="-mcmap %s", extensions=None, @@ -84,14 +122,18 @@ def test_FitDwi_inputs(): name_template="%s_mcmap.nii.gz", requires=["nodv_flag"], ), - mcmaxit=dict(argstr="-mcmaxit %d",), + mcmaxit=dict( + argstr="-mcmaxit %d", + ), mcout=dict( argstr="-mcout %s", extensions=None, name_source=["source_file"], name_template="%s_mcout.txt", ), - mcsamples=dict(argstr="-mcsamples %d",), + mcsamples=dict( + argstr="-mcsamples %d", + ), mdmap_file=dict( argstr="-mdmap %s", extensions=None, @@ -140,8 +182,13 @@ def test_FitDwi_inputs(): "nod_flag", ], ), - perf_thr=dict(argstr="-perfthreshold %f",), - prior_file=dict(argstr="-prior %s", extensions=None,), + perf_thr=dict( + argstr="-perfthreshold %f", + ), + prior_file=dict( + argstr="-prior %s", + extensions=None, + ), res_file=dict( argstr="-res %s", extensions=None, @@ -155,21 +202,40 @@ def test_FitDwi_inputs(): name_template="%s_rgbmap.nii.gz", requires=["dti_flag"], ), - rot_sform_flag=dict(argstr="-rotsform %d",), - slice_no=dict(argstr="-slice %d",), + rot_sform_flag=dict( + argstr="-rotsform %d", + ), + slice_no=dict( + argstr="-slice %d", + ), source_file=dict( - argstr="-source %s", extensions=None, mandatory=True, position=1, + argstr="-source %s", + extensions=None, + mandatory=True, + position=1, + ), + swls_val=dict( + argstr="-swls %f", ), - swls_val=dict(argstr="-swls %f",), syn_file=dict( argstr="-syn %s", extensions=None, name_source=["source_file"], name_template="%s_syn.nii.gz", ), - te_file=dict(argstr="-TE %s", extensions=None, xor=["te_file"],), - te_value=dict(argstr="-TE %s", extensions=None, xor=["te_file"],), - ten_type=dict(usedefault=True,), + te_file=dict( + argstr="-TE %s", + extensions=None, + xor=["te_file"], + ), + te_value=dict( + argstr="-TE %s", + extensions=None, + xor=["te_file"], + ), + ten_type=dict( + usedefault=True, + ), tenmap2_file=dict( argstr="-tenmap2 %s", extensions=None, @@ -190,10 +256,19 @@ def test_FitDwi_inputs(): name_source=["source_file"], name_template="%s_v1map.nii.gz", ), - vb_flag=dict(argstr="-vb",), - voxel=dict(argstr="-voxel %d %d %d",), - wls_flag=dict(argstr="-wls", xor=["gn_flag"],), - wm_t2_val=dict(argstr="-wmT2 %f",), + vb_flag=dict( + argstr="-vb", + ), + voxel=dict( + argstr="-voxel %d %d %d", + ), + wls_flag=dict( + argstr="-wls", + xor=["gn_flag"], + ), + wm_t2_val=dict( + argstr="-wmT2 %f", + ), ) inputs = FitDwi.input_spec() @@ -204,18 +279,42 @@ def test_FitDwi_inputs(): def test_FitDwi_outputs(): output_map = dict( - error_file=dict(extensions=None,), - famap_file=dict(extensions=None,), - mcmap_file=dict(extensions=None,), - mcout=dict(extensions=None,), - mdmap_file=dict(extensions=None,), - nodiff_file=dict(extensions=None,), - res_file=dict(extensions=None,), - rgbmap_file=dict(extensions=None,), - syn_file=dict(extensions=None,), - tenmap2_file=dict(extensions=None,), - tenmap_file=dict(extensions=None,), - v1map_file=dict(extensions=None,), + error_file=dict( + extensions=None, + ), + famap_file=dict( + extensions=None, + ), + mcmap_file=dict( + extensions=None, + ), + mcout=dict( + extensions=None, + ), + mdmap_file=dict( + extensions=None, + ), + nodiff_file=dict( + extensions=None, + ), + res_file=dict( + extensions=None, + ), + rgbmap_file=dict( + extensions=None, + ), + syn_file=dict( + extensions=None, + ), + tenmap2_file=dict( + extensions=None, + ), + tenmap_file=dict( + extensions=None, + ), + v1map_file=dict( + extensions=None, + ), ) outputs = FitDwi.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py index be09fc90fc..ee82b5c900 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py @@ -4,78 +4,158 @@ def test_FitQt1_inputs(): input_map = dict( - acceptance=dict(argstr="-acceptance %f",), - args=dict(argstr="%s",), - b1map=dict(argstr="-b1map %s", extensions=None,), + acceptance=dict( + argstr="-acceptance %f", + ), + args=dict( + argstr="%s", + ), + b1map=dict( + argstr="-b1map %s", + extensions=None, + ), comp_file=dict( argstr="-comp %s", extensions=None, name_source=["source_file"], name_template="%s_comp.nii.gz", ), - environ=dict(nohash=True, usedefault=True,), + environ=dict( + nohash=True, + usedefault=True, + ), error_file=dict( argstr="-error %s", extensions=None, name_source=["source_file"], name_template="%s_error.nii.gz", ), - flips=dict(argstr="-flips %s", sep=" ",), - flips_list=dict(argstr="-fliplist %s", extensions=None,), - gn_flag=dict(argstr="-gn", position=8,), - ir_flag=dict(argstr="-IR", position=13,), - lm_val=dict(argstr="-lm %f %f", position=7,), + flips=dict( + argstr="-flips %s", + sep=" ", + ), + flips_list=dict( + argstr="-fliplist %s", + extensions=None, + ), + gn_flag=dict( + argstr="-gn", + position=8, + ), + ir_flag=dict( + argstr="-IR", + position=13, + ), + lm_val=dict( + argstr="-lm %f %f", + position=7, + ), m0map_file=dict( argstr="-m0map %s", extensions=None, name_source=["source_file"], name_template="%s_m0map.nii.gz", ), - mask=dict(argstr="-mask %s", extensions=None, position=2,), - maxit=dict(argstr="-maxit %d", position=11,), + mask=dict( + argstr="-mask %s", + extensions=None, + position=2, + ), + maxit=dict( + argstr="-maxit %d", + position=11, + ), mcmap_file=dict( argstr="-mcmap %s", extensions=None, name_source=["source_file"], name_template="%s_mcmap.nii.gz", ), - mcmaxit=dict(argstr="-mcmaxit %d",), - mcout=dict(argstr="-mcout %s", extensions=None,), - mcsamples=dict(argstr="-mcsamples %d",), - nb_comp=dict(argstr="-nc %d", position=6,), - prior=dict(argstr="-prior %s", extensions=None, position=3,), + mcmaxit=dict( + argstr="-mcmaxit %d", + ), + mcout=dict( + argstr="-mcout %s", + extensions=None, + ), + mcsamples=dict( + argstr="-mcsamples %d", + ), + nb_comp=dict( + argstr="-nc %d", + position=6, + ), + prior=dict( + argstr="-prior %s", + extensions=None, + position=3, + ), res_file=dict( argstr="-res %s", extensions=None, name_source=["source_file"], name_template="%s_res.nii.gz", ), - slice_no=dict(argstr="-slice %d", position=9,), + slice_no=dict( + argstr="-slice %d", + position=9, + ), source_file=dict( - argstr="-source %s", extensions=None, mandatory=True, position=1, + argstr="-source %s", + extensions=None, + mandatory=True, + position=1, + ), + spgr=dict( + argstr="-SPGR", + ), + sr_flag=dict( + argstr="-SR", + position=12, ), - spgr=dict(argstr="-SPGR",), - sr_flag=dict(argstr="-SR", position=12,), syn_file=dict( argstr="-syn %s", extensions=None, name_source=["source_file"], name_template="%s_syn.nii.gz", ), - t1_list=dict(argstr="-T1list %s", extensions=None,), + t1_list=dict( + argstr="-T1list %s", + extensions=None, + ), t1map_file=dict( argstr="-t1map %s", extensions=None, name_source=["source_file"], name_template="%s_t1map.nii.gz", ), - t1max=dict(argstr="-T1max %f",), - t1min=dict(argstr="-T1min %f",), - te_value=dict(argstr="-TE %f", position=4,), - tis=dict(argstr="-TIs %s", position=14, sep=" ",), - tis_list=dict(argstr="-TIlist %s", extensions=None,), - tr_value=dict(argstr="-TR %f", position=5,), - voxel=dict(argstr="-voxel %d %d %d", position=10,), + t1max=dict( + argstr="-T1max %f", + ), + t1min=dict( + argstr="-T1min %f", + ), + te_value=dict( + argstr="-TE %f", + position=4, + ), + tis=dict( + argstr="-TIs %s", + position=14, + sep=" ", + ), + tis_list=dict( + argstr="-TIlist %s", + extensions=None, + ), + tr_value=dict( + argstr="-TR %f", + position=5, + ), + voxel=dict( + argstr="-voxel %d %d %d", + position=10, + ), ) inputs = FitQt1.input_spec() @@ -86,13 +166,27 @@ def test_FitQt1_inputs(): def test_FitQt1_outputs(): output_map = dict( - comp_file=dict(extensions=None,), - error_file=dict(extensions=None,), - m0map_file=dict(extensions=None,), - mcmap_file=dict(extensions=None,), - res_file=dict(extensions=None,), - syn_file=dict(extensions=None,), - t1map_file=dict(extensions=None,), + comp_file=dict( + extensions=None, + ), + error_file=dict( + extensions=None, + ), + m0map_file=dict( + extensions=None, + ), + mcmap_file=dict( + extensions=None, + ), + res_file=dict( + extensions=None, + ), + syn_file=dict( + extensions=None, + ), + t1map_file=dict( + extensions=None, + ), ) outputs = FitQt1.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py b/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py index e6fb0b0bbb..f12ccad480 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py @@ -4,7 +4,13 @@ def test_NiftyFitCommand_inputs(): input_map = dict( - args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), ) inputs = NiftyFitCommand.input_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py b/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py index 75e103edbe..43f72df69f 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py @@ -4,9 +4,17 @@ def test_NiftyRegCommand_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - omp_core_val=dict(argstr="-omp %i", usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), ) inputs = NiftyRegCommand.input_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py index 16ca83bdba..a4485d0e20 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py @@ -4,45 +4,108 @@ def test_RegAladin_inputs(): input_map = dict( - aff_direct_flag=dict(argstr="-affDirect",), + aff_direct_flag=dict( + argstr="-affDirect", + ), aff_file=dict( argstr="-aff %s", extensions=None, name_source=["flo_file"], name_template="%s_aff.txt", ), - args=dict(argstr="%s",), - cog_flag=dict(argstr="-cog",), - environ=dict(nohash=True, usedefault=True,), - flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True,), - flo_low_val=dict(argstr="-floLowThr %f",), - flo_up_val=dict(argstr="-floUpThr %f",), - fmask_file=dict(argstr="-fmask %s", extensions=None,), - gpuid_val=dict(argstr="-gpuid %i",), - i_val=dict(argstr="-pi %d",), - in_aff_file=dict(argstr="-inaff %s", extensions=None,), - ln_val=dict(argstr="-ln %d",), - lp_val=dict(argstr="-lp %d",), - maxit_val=dict(argstr="-maxit %d",), - nac_flag=dict(argstr="-nac",), - nosym_flag=dict(argstr="-noSym",), - omp_core_val=dict(argstr="-omp %i", usedefault=True,), - platform_val=dict(argstr="-platf %i",), - ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True,), - ref_low_val=dict(argstr="-refLowThr %f",), - ref_up_val=dict(argstr="-refUpThr %f",), + args=dict( + argstr="%s", + ), + cog_flag=dict( + argstr="-cog", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flo_file=dict( + argstr="-flo %s", + extensions=None, + mandatory=True, + ), + flo_low_val=dict( + argstr="-floLowThr %f", + ), + flo_up_val=dict( + argstr="-floUpThr %f", + ), + fmask_file=dict( + argstr="-fmask %s", + extensions=None, + ), + gpuid_val=dict( + argstr="-gpuid %i", + ), + i_val=dict( + argstr="-pi %d", + ), + in_aff_file=dict( + argstr="-inaff %s", + extensions=None, + ), + ln_val=dict( + argstr="-ln %d", + ), + lp_val=dict( + argstr="-lp %d", + ), + maxit_val=dict( + argstr="-maxit %d", + ), + nac_flag=dict( + argstr="-nac", + ), + nosym_flag=dict( + argstr="-noSym", + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), + platform_val=dict( + argstr="-platf %i", + ), + ref_file=dict( + argstr="-ref %s", + extensions=None, + mandatory=True, + ), + ref_low_val=dict( + argstr="-refLowThr %f", + ), + ref_up_val=dict( + argstr="-refUpThr %f", + ), res_file=dict( argstr="-res %s", extensions=None, name_source=["flo_file"], name_template="%s_res.nii.gz", ), - rig_only_flag=dict(argstr="-rigOnly",), - rmask_file=dict(argstr="-rmask %s", extensions=None,), - smoo_f_val=dict(argstr="-smooF %f",), - smoo_r_val=dict(argstr="-smooR %f",), - v_val=dict(argstr="-pv %d",), - verbosity_off_flag=dict(argstr="-voff",), + rig_only_flag=dict( + argstr="-rigOnly", + ), + rmask_file=dict( + argstr="-rmask %s", + extensions=None, + ), + smoo_f_val=dict( + argstr="-smooF %f", + ), + smoo_r_val=dict( + argstr="-smooR %f", + ), + v_val=dict( + argstr="-pv %d", + ), + verbosity_off_flag=dict( + argstr="-voff", + ), ) inputs = RegAladin.input_spec() @@ -53,9 +116,13 @@ def test_RegAladin_inputs(): def test_RegAladin_outputs(): output_map = dict( - aff_file=dict(extensions=None,), + aff_file=dict( + extensions=None, + ), avg_output=dict(), - res_file=dict(extensions=None,), + res_file=dict( + extensions=None, + ), ) outputs = RegAladin.output_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py index 3ee172453f..0077b85faa 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py @@ -4,7 +4,9 @@ def test_RegAverage_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), avg_files=dict( argstr="-avg %s", position=1, @@ -83,11 +85,25 @@ def test_RegAverage_inputs(): "demean2_ref_file", ], ), - environ=dict(nohash=True, usedefault=True,), - omp_core_val=dict(argstr="-omp %i", usedefault=True,), - out_file=dict(argstr="%s", extensions=None, genfile=True, position=0,), + environ=dict( + nohash=True, + usedefault=True, + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=0, + ), warp_files=dict( - argstr="%s", position=-1, sep=" ", xor=["avg_files", "avg_lts_files"], + argstr="%s", + position=-1, + sep=" ", + xor=["avg_files", "avg_lts_files"], ), ) inputs = RegAverage.input_spec() @@ -98,7 +114,11 @@ def test_RegAverage_inputs(): def test_RegAverage_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RegAverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py index a70318cd43..b760ebb3d1 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py @@ -4,69 +4,181 @@ def test_RegF3D_inputs(): input_map = dict( - aff_file=dict(argstr="-aff %s", extensions=None,), - amc_flag=dict(argstr="-amc",), - args=dict(argstr="%s",), - be_val=dict(argstr="-be %f",), + aff_file=dict( + argstr="-aff %s", + extensions=None, + ), + amc_flag=dict( + argstr="-amc", + ), + args=dict( + argstr="%s", + ), + be_val=dict( + argstr="-be %f", + ), cpp_file=dict( argstr="-cpp %s", extensions=None, name_source=["flo_file"], name_template="%s_cpp.nii.gz", ), - environ=dict(nohash=True, usedefault=True,), - fbn2_val=dict(argstr="-fbn %d %d",), - fbn_val=dict(argstr="--fbn %d",), - flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True,), - flo_smooth_val=dict(argstr="-smooF %f",), - flwth2_thr_val=dict(argstr="-fLwTh %d %f",), - flwth_thr_val=dict(argstr="--fLwTh %f",), - fmask_file=dict(argstr="-fmask %s", extensions=None,), - fupth2_thr_val=dict(argstr="-fUpTh %d %f",), - fupth_thr_val=dict(argstr="--fUpTh %f",), - incpp_file=dict(argstr="-incpp %s", extensions=None,), - jl_val=dict(argstr="-jl %f",), - kld2_flag=dict(argstr="-kld %d",), - kld_flag=dict(argstr="--kld",), - le_val=dict(argstr="-le %f",), - ln_val=dict(argstr="-ln %d",), - lncc2_val=dict(argstr="-lncc %d %f",), - lncc_val=dict(argstr="--lncc %f",), - lp_val=dict(argstr="-lp %d",), - maxit_val=dict(argstr="-maxit %d",), - nmi_flag=dict(argstr="--nmi",), - no_app_jl_flag=dict(argstr="-noAppJL",), - noconj_flag=dict(argstr="-noConj",), - nopy_flag=dict(argstr="-nopy",), - nox_flag=dict(argstr="-nox",), - noy_flag=dict(argstr="-noy",), - noz_flag=dict(argstr="-noz",), - omp_core_val=dict(argstr="-omp %i", usedefault=True,), - pad_val=dict(argstr="-pad %f",), - pert_val=dict(argstr="-pert %d",), - rbn2_val=dict(argstr="-rbn %d %d",), - rbn_val=dict(argstr="--rbn %d",), - ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True,), - ref_smooth_val=dict(argstr="-smooR %f",), + environ=dict( + nohash=True, + usedefault=True, + ), + fbn2_val=dict( + argstr="-fbn %d %d", + ), + fbn_val=dict( + argstr="--fbn %d", + ), + flo_file=dict( + argstr="-flo %s", + extensions=None, + mandatory=True, + ), + flo_smooth_val=dict( + argstr="-smooF %f", + ), + flwth2_thr_val=dict( + argstr="-fLwTh %d %f", + ), + flwth_thr_val=dict( + argstr="--fLwTh %f", + ), + fmask_file=dict( + argstr="-fmask %s", + extensions=None, + ), + fupth2_thr_val=dict( + argstr="-fUpTh %d %f", + ), + fupth_thr_val=dict( + argstr="--fUpTh %f", + ), + incpp_file=dict( + argstr="-incpp %s", + extensions=None, + ), + jl_val=dict( + argstr="-jl %f", + ), + kld2_flag=dict( + argstr="-kld %d", + ), + kld_flag=dict( + argstr="--kld", + ), + le_val=dict( + argstr="-le %f", + ), + ln_val=dict( + argstr="-ln %d", + ), + lncc2_val=dict( + argstr="-lncc %d %f", + ), + lncc_val=dict( + argstr="--lncc %f", + ), + lp_val=dict( + argstr="-lp %d", + ), + maxit_val=dict( + argstr="-maxit %d", + ), + nmi_flag=dict( + argstr="--nmi", + ), + no_app_jl_flag=dict( + argstr="-noAppJL", + ), + noconj_flag=dict( + argstr="-noConj", + ), + nopy_flag=dict( + argstr="-nopy", + ), + nox_flag=dict( + argstr="-nox", + ), + noy_flag=dict( + argstr="-noy", + ), + noz_flag=dict( + argstr="-noz", + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), + pad_val=dict( + argstr="-pad %f", + ), + pert_val=dict( + argstr="-pert %d", + ), + rbn2_val=dict( + argstr="-rbn %d %d", + ), + rbn_val=dict( + argstr="--rbn %d", + ), + ref_file=dict( + argstr="-ref %s", + extensions=None, + mandatory=True, + ), + ref_smooth_val=dict( + argstr="-smooR %f", + ), res_file=dict( argstr="-res %s", extensions=None, name_source=["flo_file"], name_template="%s_res.nii.gz", ), - rlwth2_thr_val=dict(argstr="-rLwTh %d %f",), - rlwth_thr_val=dict(argstr="--rLwTh %f",), - rmask_file=dict(argstr="-rmask %s", extensions=None,), - rupth2_thr_val=dict(argstr="-rUpTh %d %f",), - rupth_thr_val=dict(argstr="--rUpTh %f",), - smooth_grad_val=dict(argstr="-smoothGrad %f",), - ssd2_flag=dict(argstr="-ssd %d",), - ssd_flag=dict(argstr="--ssd",), - sx_val=dict(argstr="-sx %f",), - sy_val=dict(argstr="-sy %f",), - sz_val=dict(argstr="-sz %f",), - vel_flag=dict(argstr="-vel",), - verbosity_off_flag=dict(argstr="-voff",), + rlwth2_thr_val=dict( + argstr="-rLwTh %d %f", + ), + rlwth_thr_val=dict( + argstr="--rLwTh %f", + ), + rmask_file=dict( + argstr="-rmask %s", + extensions=None, + ), + rupth2_thr_val=dict( + argstr="-rUpTh %d %f", + ), + rupth_thr_val=dict( + argstr="--rUpTh %f", + ), + smooth_grad_val=dict( + argstr="-smoothGrad %f", + ), + ssd2_flag=dict( + argstr="-ssd %d", + ), + ssd_flag=dict( + argstr="--ssd", + ), + sx_val=dict( + argstr="-sx %f", + ), + sy_val=dict( + argstr="-sy %f", + ), + sz_val=dict( + argstr="-sz %f", + ), + vel_flag=dict( + argstr="-vel", + ), + verbosity_off_flag=dict( + argstr="-voff", + ), ) inputs = RegF3D.input_spec() @@ -78,10 +190,18 @@ def test_RegF3D_inputs(): def test_RegF3D_outputs(): output_map = dict( avg_output=dict(), - cpp_file=dict(extensions=None,), - invcpp_file=dict(extensions=None,), - invres_file=dict(extensions=None,), - res_file=dict(extensions=None,), + cpp_file=dict( + extensions=None, + ), + invcpp_file=dict( + extensions=None, + ), + invres_file=dict( + extensions=None, + ), + res_file=dict( + extensions=None, + ), ) outputs = RegF3D.output_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py index 5a0291e1af..60c8ce5c08 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py @@ -4,9 +4,17 @@ def test_RegJacobian_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - omp_core_val=dict(argstr="-omp %i", usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -14,9 +22,20 @@ def test_RegJacobian_inputs(): name_template="%s", position=-1, ), - ref_file=dict(argstr="-ref %s", extensions=None,), - trans_file=dict(argstr="-trans %s", extensions=None, mandatory=True,), - type=dict(argstr="-%s", position=-2, usedefault=True,), + ref_file=dict( + argstr="-ref %s", + extensions=None, + ), + trans_file=dict( + argstr="-trans %s", + extensions=None, + mandatory=True, + ), + type=dict( + argstr="-%s", + position=-2, + usedefault=True, + ), ) inputs = RegJacobian.input_spec() @@ -26,7 +45,11 @@ def test_RegJacobian_inputs(): def test_RegJacobian_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RegJacobian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py index 8ae16aa9c8..8a7e470e6c 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py @@ -4,18 +4,37 @@ def test_RegMeasure_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True,), - measure_type=dict(argstr="-%s", mandatory=True,), - omp_core_val=dict(argstr="-omp %i", usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flo_file=dict( + argstr="-flo %s", + extensions=None, + mandatory=True, + ), + measure_type=dict( + argstr="-%s", + mandatory=True, + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, name_source=["flo_file"], name_template="%s", ), - ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True,), + ref_file=dict( + argstr="-ref %s", + extensions=None, + mandatory=True, + ), ) inputs = RegMeasure.input_spec() @@ -25,7 +44,11 @@ def test_RegMeasure_inputs(): def test_RegMeasure_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RegMeasure.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py index 2836efb4f8..6d9c9a93e5 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py @@ -4,11 +4,25 @@ def test_RegResample_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True,), - inter_val=dict(argstr="-inter %d",), - omp_core_val=dict(argstr="-omp %i", usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flo_file=dict( + argstr="-flo %s", + extensions=None, + mandatory=True, + ), + inter_val=dict( + argstr="-inter %d", + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -16,14 +30,35 @@ def test_RegResample_inputs(): name_template="%s", position=-1, ), - pad_val=dict(argstr="-pad %f",), - psf_alg=dict(argstr="-psf_alg %d",), - psf_flag=dict(argstr="-psf",), - ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True,), - tensor_flag=dict(argstr="-tensor ",), - trans_file=dict(argstr="-trans %s", extensions=None,), - type=dict(argstr="-%s", position=-2, usedefault=True,), - verbosity_off_flag=dict(argstr="-voff",), + pad_val=dict( + argstr="-pad %f", + ), + psf_alg=dict( + argstr="-psf_alg %d", + ), + psf_flag=dict( + argstr="-psf", + ), + ref_file=dict( + argstr="-ref %s", + extensions=None, + mandatory=True, + ), + tensor_flag=dict( + argstr="-tensor ", + ), + trans_file=dict( + argstr="-trans %s", + extensions=None, + ), + type=dict( + argstr="-%s", + position=-2, + usedefault=True, + ), + verbosity_off_flag=dict( + argstr="-voff", + ), ) inputs = RegResample.input_spec() @@ -33,7 +68,11 @@ def test_RegResample_inputs(): def test_RegResample_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RegResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py index 0b0513ef4d..9abf8184ec 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py @@ -4,31 +4,75 @@ def test_RegTools_inputs(): input_map = dict( - add_val=dict(argstr="-add %s",), - args=dict(argstr="%s",), - bin_flag=dict(argstr="-bin",), - chg_res_val=dict(argstr="-chgres %f %f %f",), - div_val=dict(argstr="-div %s",), - down_flag=dict(argstr="-down",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True,), - inter_val=dict(argstr="-interp %d",), - iso_flag=dict(argstr="-iso",), - mask_file=dict(argstr="-nan %s", extensions=None,), - mul_val=dict(argstr="-mul %s",), - noscl_flag=dict(argstr="-noscl",), - omp_core_val=dict(argstr="-omp %i", usedefault=True,), + add_val=dict( + argstr="-add %s", + ), + args=dict( + argstr="%s", + ), + bin_flag=dict( + argstr="-bin", + ), + chg_res_val=dict( + argstr="-chgres %f %f %f", + ), + div_val=dict( + argstr="-div %s", + ), + down_flag=dict( + argstr="-down", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + inter_val=dict( + argstr="-interp %d", + ), + iso_flag=dict( + argstr="-iso", + ), + mask_file=dict( + argstr="-nan %s", + extensions=None, + ), + mul_val=dict( + argstr="-mul %s", + ), + noscl_flag=dict( + argstr="-noscl", + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, name_source=["in_file"], name_template="%s_tools.nii.gz", ), - rms_val=dict(argstr="-rms %s", extensions=None,), - smo_g_val=dict(argstr="-smoG %f %f %f",), - smo_s_val=dict(argstr="-smoS %f %f %f",), - sub_val=dict(argstr="-sub %s",), - thr_val=dict(argstr="-thr %f",), + rms_val=dict( + argstr="-rms %s", + extensions=None, + ), + smo_g_val=dict( + argstr="-smoG %f %f %f", + ), + smo_s_val=dict( + argstr="-smoS %f %f %f", + ), + sub_val=dict( + argstr="-sub %s", + ), + thr_val=dict( + argstr="-thr %f", + ), ) inputs = RegTools.input_spec() @@ -38,7 +82,11 @@ def test_RegTools_inputs(): def test_RegTools_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RegTools.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py index 7a16c6e452..b9ee8bf2af 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py @@ -21,7 +21,9 @@ def test_RegTransform_inputs(): "flirt_2_nr_input", ], ), - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), comp_input=dict( argstr="-comp %s", extensions=None, @@ -40,7 +42,11 @@ def test_RegTransform_inputs(): "flirt_2_nr_input", ], ), - comp_input2=dict(argstr="%s", extensions=None, position=-2,), + comp_input2=dict( + argstr="%s", + extensions=None, + position=-2, + ), def_input=dict( argstr="-def %s", extensions=None, @@ -75,7 +81,10 @@ def test_RegTransform_inputs(): "flirt_2_nr_input", ], ), - environ=dict(nohash=True, usedefault=True,), + environ=dict( + nohash=True, + usedefault=True, + ), flirt_2_nr_input=dict( argstr="-flirtAff2NR %s %s %s", position=-2, @@ -175,11 +184,26 @@ def test_RegTransform_inputs(): "flirt_2_nr_input", ], ), - omp_core_val=dict(argstr="-omp %i", usedefault=True,), - out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1,), - ref1_file=dict(argstr="-ref %s", extensions=None, position=0,), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + ref1_file=dict( + argstr="-ref %s", + extensions=None, + position=0, + ), ref2_file=dict( - argstr="-ref2 %s", extensions=None, position=1, requires=["ref1_file"], + argstr="-ref2 %s", + extensions=None, + position=1, + requires=["ref1_file"], ), upd_s_form_input=dict( argstr="-updSform %s", @@ -200,7 +224,10 @@ def test_RegTransform_inputs(): ], ), upd_s_form_input2=dict( - argstr="%s", extensions=None, position=-2, requires=["upd_s_form_input"], + argstr="%s", + extensions=None, + position=-2, + requires=["upd_s_form_input"], ), ) inputs = RegTransform.input_spec() @@ -211,7 +238,11 @@ def test_RegTransform_inputs(): def test_RegTransform_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RegTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py index ae7bb8a8ef..4c0a962a21 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py @@ -4,9 +4,19 @@ def test_BinaryMaths_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), operand_file=dict( argstr="%s", extensions=None, @@ -26,7 +36,11 @@ def test_BinaryMaths_inputs(): position=5, xor=["operand_file", "operand_str"], ), - operation=dict(argstr="-%s", mandatory=True, position=4,), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, + ), out_file=dict( argstr="%s", extensions=None, @@ -34,7 +48,10 @@ def test_BinaryMaths_inputs(): name_template="%s", position=-2, ), - output_datatype=dict(argstr="-odt %s", position=-3,), + output_datatype=dict( + argstr="-odt %s", + position=-3, + ), ) inputs = BinaryMaths.input_spec() @@ -44,7 +61,11 @@ def test_BinaryMaths_inputs(): def test_BinaryMaths_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BinaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py index 195a361f58..440cb92bbc 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py @@ -4,11 +4,29 @@ def test_BinaryMathsInteger_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - operand_value=dict(argstr="%d", mandatory=True, position=5,), - operation=dict(argstr="-%s", mandatory=True, position=4,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + operand_value=dict( + argstr="%d", + mandatory=True, + position=5, + ), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, + ), out_file=dict( argstr="%s", extensions=None, @@ -16,7 +34,10 @@ def test_BinaryMathsInteger_inputs(): name_template="%s", position=-2, ), - output_datatype=dict(argstr="-odt %s", position=-3,), + output_datatype=dict( + argstr="-odt %s", + position=-3, + ), ) inputs = BinaryMathsInteger.input_spec() @@ -26,7 +47,11 @@ def test_BinaryMathsInteger_inputs(): def test_BinaryMathsInteger_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BinaryMathsInteger.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py index 61ef530418..5a5ac7298b 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py @@ -4,11 +4,28 @@ def test_BinaryStats_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - larger_voxel=dict(argstr="-t %f", position=-3,), - mask_file=dict(argstr="-m %s", extensions=None, position=-2,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + larger_voxel=dict( + argstr="-t %f", + position=-3, + ), + mask_file=dict( + argstr="-m %s", + extensions=None, + position=-2, + ), operand_file=dict( argstr="%s", extensions=None, @@ -17,9 +34,16 @@ def test_BinaryStats_inputs(): xor=["operand_value"], ), operand_value=dict( - argstr="%.8f", mandatory=True, position=5, xor=["operand_file"], + argstr="%.8f", + mandatory=True, + position=5, + xor=["operand_file"], + ), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, ), - operation=dict(argstr="-%s", mandatory=True, position=4,), ) inputs = BinaryStats.input_spec() @@ -29,7 +53,9 @@ def test_BinaryStats_inputs(): def test_BinaryStats_outputs(): - output_map = dict(output=dict(),) + output_map = dict( + output=dict(), + ) outputs = BinaryStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py index f1c16859eb..e0943be61e 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py @@ -4,13 +4,38 @@ def test_CalcTopNCC_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-target %s", extensions=None, mandatory=True, position=1,), - in_templates=dict(argstr="%s", mandatory=True, position=3,), - mask_file=dict(argstr="-mask %s", extensions=None,), - num_templates=dict(argstr="-templates %s", mandatory=True, position=2,), - top_templates=dict(argstr="-n %s", mandatory=True, position=4,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-target %s", + extensions=None, + mandatory=True, + position=1, + ), + in_templates=dict( + argstr="%s", + mandatory=True, + position=3, + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + num_templates=dict( + argstr="-templates %s", + mandatory=True, + position=2, + ), + top_templates=dict( + argstr="-n %s", + mandatory=True, + position=4, + ), ) inputs = CalcTopNCC.input_spec() @@ -20,7 +45,9 @@ def test_CalcTopNCC_inputs(): def test_CalcTopNCC_outputs(): - output_map = dict(out_files=dict(),) + output_map = dict( + out_files=dict(), + ) outputs = CalcTopNCC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_EM.py b/nipype/interfaces/niftyseg/tests/test_auto_EM.py index ac340e89ff..a0394b174e 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_EM.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_EM.py @@ -4,17 +4,46 @@ def test_EM_inputs(): input_map = dict( - args=dict(argstr="%s",), - bc_order_val=dict(argstr="-bc_order %s", usedefault=True,), - bc_thresh_val=dict(argstr="-bc_thresh %s", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=4,), - mask_file=dict(argstr="-mask %s", extensions=None,), - max_iter=dict(argstr="-max_iter %s", usedefault=True,), - min_iter=dict(argstr="-min_iter %s", usedefault=True,), - mrf_beta_val=dict(argstr="-mrf_beta %s",), + args=dict( + argstr="%s", + ), + bc_order_val=dict( + argstr="-bc_order %s", + usedefault=True, + ), + bc_thresh_val=dict( + argstr="-bc_thresh %s", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + position=4, + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + max_iter=dict( + argstr="-max_iter %s", + usedefault=True, + ), + min_iter=dict( + argstr="-min_iter %s", + usedefault=True, + ), + mrf_beta_val=dict( + argstr="-mrf_beta %s", + ), no_prior=dict( - argstr="-nopriors %s", mandatory=True, xor=["prior_4D", "priors"], + argstr="-nopriors %s", + mandatory=True, + xor=["prior_4D", "priors"], ), out_bc_file=dict( argstr="-bc_out %s", @@ -34,16 +63,26 @@ def test_EM_inputs(): name_source=["in_file"], name_template="%s_outlier_em.nii.gz", ), - outlier_val=dict(argstr="-outlier %s %s",), + outlier_val=dict( + argstr="-outlier %s %s", + ), prior_4D=dict( argstr="-prior4D %s", extensions=None, mandatory=True, xor=["no_prior", "priors"], ), - priors=dict(argstr="%s", mandatory=True, xor=["no_prior", "prior_4D"],), - reg_val=dict(argstr="-reg %s",), - relax_priors=dict(argstr="-rf %s %s",), + priors=dict( + argstr="%s", + mandatory=True, + xor=["no_prior", "prior_4D"], + ), + reg_val=dict( + argstr="-reg %s", + ), + relax_priors=dict( + argstr="-rf %s %s", + ), ) inputs = EM.input_spec() @@ -54,9 +93,15 @@ def test_EM_inputs(): def test_EM_outputs(): output_map = dict( - out_bc_file=dict(extensions=None,), - out_file=dict(extensions=None,), - out_outlier_file=dict(extensions=None,), + out_bc_file=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + out_outlier_file=dict( + extensions=None, + ), ) outputs = EM.output_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py index 0e4c3d65bf..9e1b06a892 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py @@ -4,17 +4,47 @@ def test_FillLesions_inputs(): input_map = dict( - args=dict(argstr="%s",), - bin_mask=dict(argstr="-mask %s", extensions=None,), - cwf=dict(argstr="-cwf %f",), - debug=dict(argstr="-debug",), - environ=dict(nohash=True, usedefault=True,), - in_dilation=dict(argstr="-dil %d",), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1,), - lesion_mask=dict(argstr="-l %s", extensions=None, mandatory=True, position=2,), - match=dict(argstr="-match %f",), - other=dict(argstr="-other",), - out_datatype=dict(argstr="-odt %s",), + args=dict( + argstr="%s", + ), + bin_mask=dict( + argstr="-mask %s", + extensions=None, + ), + cwf=dict( + argstr="-cwf %f", + ), + debug=dict( + argstr="-debug", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_dilation=dict( + argstr="-dil %d", + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=1, + ), + lesion_mask=dict( + argstr="-l %s", + extensions=None, + mandatory=True, + position=2, + ), + match=dict( + argstr="-match %f", + ), + other=dict( + argstr="-other", + ), + out_datatype=dict( + argstr="-odt %s", + ), out_file=dict( argstr="-o %s", extensions=None, @@ -22,11 +52,21 @@ def test_FillLesions_inputs(): name_template="%s_lesions_filled.nii.gz", position=3, ), - search=dict(argstr="-search %f",), - size=dict(argstr="-size %d",), - smooth=dict(argstr="-smo %f",), - use_2d=dict(argstr="-2D",), - verbose=dict(argstr="-v",), + search=dict( + argstr="-search %f", + ), + size=dict( + argstr="-size %d", + ), + smooth=dict( + argstr="-smo %f", + ), + use_2d=dict( + argstr="-2D", + ), + verbose=dict( + argstr="-v", + ), ) inputs = FillLesions.input_spec() @@ -36,7 +76,11 @@ def test_FillLesions_inputs(): def test_FillLesions_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = FillLesions.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py index ba319d3475..dc4590a15b 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py @@ -4,33 +4,79 @@ def test_LabelFusion_inputs(): input_map = dict( - args=dict(argstr="%s",), - classifier_type=dict(argstr="-%s", mandatory=True, position=2,), - conv=dict(argstr="-conv %f",), + args=dict( + argstr="%s", + ), + classifier_type=dict( + argstr="-%s", + mandatory=True, + position=2, + ), + conv=dict( + argstr="-conv %f", + ), dilation_roi=dict(), - environ=dict(nohash=True, usedefault=True,), - file_to_seg=dict(extensions=None, mandatory=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=1,), + environ=dict( + nohash=True, + usedefault=True, + ), + file_to_seg=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + position=1, + ), kernel_size=dict(), - mask_file=dict(argstr="-mask %s", extensions=None,), - max_iter=dict(argstr="-max_iter %d",), - mrf_value=dict(argstr="-MRF_beta %f",), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + max_iter=dict( + argstr="-max_iter %d", + ), + mrf_value=dict( + argstr="-MRF_beta %f", + ), out_file=dict( argstr="-out %s", extensions=None, name_source=["in_file"], name_template="%s", ), - prob_flag=dict(argstr="-outProb",), - prob_update_flag=dict(argstr="-prop_update",), - proportion=dict(argstr="-prop %s",), - set_pq=dict(argstr="-setPQ %f %f",), - sm_ranking=dict(argstr="-%s", position=3, usedefault=True,), - template_file=dict(extensions=None,), + prob_flag=dict( + argstr="-outProb", + ), + prob_update_flag=dict( + argstr="-prop_update", + ), + proportion=dict( + argstr="-prop %s", + ), + set_pq=dict( + argstr="-setPQ %f %f", + ), + sm_ranking=dict( + argstr="-%s", + position=3, + usedefault=True, + ), + template_file=dict( + extensions=None, + ), template_num=dict(), - unc=dict(argstr="-unc",), - unc_thresh=dict(argstr="-uncthres %f",), - verbose=dict(argstr="-v %s",), + unc=dict( + argstr="-unc", + ), + unc_thresh=dict( + argstr="-uncthres %f", + ), + verbose=dict( + argstr="-v %s", + ), ) inputs = LabelFusion.input_spec() @@ -40,7 +86,11 @@ def test_LabelFusion_inputs(): def test_LabelFusion_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = LabelFusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py index f8f7bcf95d..963ddf96f8 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py @@ -4,9 +4,19 @@ def test_MathsCommand_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), out_file=dict( argstr="%s", extensions=None, @@ -14,7 +24,10 @@ def test_MathsCommand_inputs(): name_template="%s", position=-2, ), - output_datatype=dict(argstr="-odt %s", position=-3,), + output_datatype=dict( + argstr="-odt %s", + position=-3, + ), ) inputs = MathsCommand.input_spec() @@ -24,7 +37,11 @@ def test_MathsCommand_inputs(): def test_MathsCommand_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MathsCommand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_Merge.py b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py index cfeb8a01d0..de8dc903e6 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_Merge.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py @@ -4,11 +4,27 @@ def test_Merge_inputs(): input_map = dict( - args=dict(argstr="%s",), - dimension=dict(mandatory=True,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - merge_files=dict(argstr="%s", mandatory=True, position=4,), + args=dict( + argstr="%s", + ), + dimension=dict( + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + merge_files=dict( + argstr="%s", + mandatory=True, + position=4, + ), out_file=dict( argstr="%s", extensions=None, @@ -16,7 +32,10 @@ def test_Merge_inputs(): name_template="%s", position=-2, ), - output_datatype=dict(argstr="-odt %s", position=-3,), + output_datatype=dict( + argstr="-odt %s", + position=-3, + ), ) inputs = Merge.input_spec() @@ -26,7 +45,11 @@ def test_Merge_inputs(): def test_Merge_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py index e78d913a4c..37a6ee059c 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py @@ -4,7 +4,13 @@ def test_NiftySegCommand_inputs(): input_map = dict( - args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), ) inputs = NiftySegCommand.input_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py index 3832a197f6..c5b9dba115 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py @@ -4,16 +4,40 @@ def test_PatchMatch_inputs(): input_map = dict( - args=dict(argstr="%s",), - cs_size=dict(argstr="-cs %i",), + args=dict( + argstr="%s", + ), + cs_size=dict( + argstr="-cs %i", + ), database_file=dict( - argstr="-db %s", extensions=None, mandatory=True, position=3, + argstr="-db %s", + extensions=None, + mandatory=True, + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=1, + ), + it_num=dict( + argstr="-it %i", + ), + mask_file=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + position=2, + ), + match_num=dict( + argstr="-match %i", ), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1,), - it_num=dict(argstr="-it %i",), - mask_file=dict(argstr="-m %s", extensions=None, mandatory=True, position=2,), - match_num=dict(argstr="-match %i",), out_file=dict( argstr="-o %s", extensions=None, @@ -21,8 +45,12 @@ def test_PatchMatch_inputs(): name_template="%s_pm.nii.gz", position=4, ), - patch_size=dict(argstr="-size %i",), - pm_num=dict(argstr="-pm %i",), + patch_size=dict( + argstr="-size %i", + ), + pm_num=dict( + argstr="-pm %i", + ), ) inputs = PatchMatch.input_spec() @@ -32,7 +60,11 @@ def test_PatchMatch_inputs(): def test_PatchMatch_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = PatchMatch.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py index b0332f1a46..6b173663a9 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py @@ -4,11 +4,28 @@ def test_StatsCommand_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - larger_voxel=dict(argstr="-t %f", position=-3,), - mask_file=dict(argstr="-m %s", extensions=None, position=-2,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + larger_voxel=dict( + argstr="-t %f", + position=-3, + ), + mask_file=dict( + argstr="-m %s", + extensions=None, + position=-2, + ), ) inputs = StatsCommand.input_spec() @@ -18,7 +35,9 @@ def test_StatsCommand_inputs(): def test_StatsCommand_outputs(): - output_map = dict(output=dict(),) + output_map = dict( + output=dict(), + ) outputs = StatsCommand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py index 9fc193b442..ef1d4c401f 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py @@ -4,9 +4,19 @@ def test_TupleMaths_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), operand_file1=dict( argstr="%s", extensions=None, @@ -22,12 +32,22 @@ def test_TupleMaths_inputs(): xor=["operand_value2"], ), operand_value1=dict( - argstr="%.8f", mandatory=True, position=5, xor=["operand_file1"], + argstr="%.8f", + mandatory=True, + position=5, + xor=["operand_file1"], ), operand_value2=dict( - argstr="%.8f", mandatory=True, position=6, xor=["operand_file2"], + argstr="%.8f", + mandatory=True, + position=6, + xor=["operand_file2"], + ), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, ), - operation=dict(argstr="-%s", mandatory=True, position=4,), out_file=dict( argstr="%s", extensions=None, @@ -35,7 +55,10 @@ def test_TupleMaths_inputs(): name_template="%s", position=-2, ), - output_datatype=dict(argstr="-odt %s", position=-3,), + output_datatype=dict( + argstr="-odt %s", + position=-3, + ), ) inputs = TupleMaths.input_spec() @@ -45,7 +68,11 @@ def test_TupleMaths_inputs(): def test_TupleMaths_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TupleMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py index 0409efb5c7..f8189f0f84 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py @@ -4,10 +4,24 @@ def test_UnaryMaths_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - operation=dict(argstr="-%s", mandatory=True, position=4,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, + ), out_file=dict( argstr="%s", extensions=None, @@ -15,7 +29,10 @@ def test_UnaryMaths_inputs(): name_template="%s", position=-2, ), - output_datatype=dict(argstr="-odt %s", position=-3,), + output_datatype=dict( + argstr="-odt %s", + position=-3, + ), ) inputs = UnaryMaths.input_spec() @@ -25,7 +42,11 @@ def test_UnaryMaths_inputs(): def test_UnaryMaths_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = UnaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py index 177f044fd1..117ab819b6 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py @@ -4,12 +4,33 @@ def test_UnaryStats_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), - larger_voxel=dict(argstr="-t %f", position=-3,), - mask_file=dict(argstr="-m %s", extensions=None, position=-2,), - operation=dict(argstr="-%s", mandatory=True, position=4,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + larger_voxel=dict( + argstr="-t %f", + position=-3, + ), + mask_file=dict( + argstr="-m %s", + extensions=None, + position=-2, + ), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, + ), ) inputs = UnaryStats.input_spec() @@ -19,7 +40,9 @@ def test_UnaryStats_inputs(): def test_UnaryStats_outputs(): - output_map = dict(output=dict(),) + output_map = dict( + output=dict(), + ) outputs = UnaryStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py index aa34d55caf..db1b784a03 100644 --- a/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py +++ b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py @@ -7,8 +7,13 @@ def test_ComputeMask_inputs(): M=dict(), cc=dict(), m=dict(), - mean_volume=dict(extensions=None, mandatory=True,), - reference_volume=dict(extensions=None,), + mean_volume=dict( + extensions=None, + mandatory=True, + ), + reference_volume=dict( + extensions=None, + ), ) inputs = ComputeMask.input_spec() @@ -18,7 +23,11 @@ def test_ComputeMask_inputs(): def test_ComputeMask_outputs(): - output_map = dict(brain_mask=dict(extensions=None,),) + output_map = dict( + brain_mask=dict( + extensions=None, + ), + ) outputs = ComputeMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py index c89423bc74..172f2205fd 100644 --- a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py @@ -4,15 +4,35 @@ def test_EstimateContrast_inputs(): input_map = dict( - axis=dict(mandatory=True,), - beta=dict(extensions=None, mandatory=True,), - constants=dict(mandatory=True,), - contrasts=dict(mandatory=True,), - dof=dict(mandatory=True,), - mask=dict(extensions=None,), - nvbeta=dict(mandatory=True,), - reg_names=dict(mandatory=True,), - s2=dict(extensions=None, mandatory=True,), + axis=dict( + mandatory=True, + ), + beta=dict( + extensions=None, + mandatory=True, + ), + constants=dict( + mandatory=True, + ), + contrasts=dict( + mandatory=True, + ), + dof=dict( + mandatory=True, + ), + mask=dict( + extensions=None, + ), + nvbeta=dict( + mandatory=True, + ), + reg_names=dict( + mandatory=True, + ), + s2=dict( + extensions=None, + mandatory=True, + ), ) inputs = EstimateContrast.input_spec() @@ -22,7 +42,11 @@ def test_EstimateContrast_inputs(): def test_EstimateContrast_outputs(): - output_map = dict(p_maps=dict(), stat_maps=dict(), z_maps=dict(),) + output_map = dict( + p_maps=dict(), + stat_maps=dict(), + z_maps=dict(), + ) outputs = EstimateContrast.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py index 7aa96870c7..f04081214c 100644 --- a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py +++ b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py @@ -4,16 +4,36 @@ def test_FitGLM_inputs(): input_map = dict( - TR=dict(mandatory=True,), - drift_model=dict(usedefault=True,), - hrf_model=dict(usedefault=True,), - mask=dict(extensions=None,), - method=dict(usedefault=True,), - model=dict(usedefault=True,), - normalize_design_matrix=dict(usedefault=True,), - plot_design_matrix=dict(usedefault=True,), - save_residuals=dict(usedefault=True,), - session_info=dict(mandatory=True,), + TR=dict( + mandatory=True, + ), + drift_model=dict( + usedefault=True, + ), + hrf_model=dict( + usedefault=True, + ), + mask=dict( + extensions=None, + ), + method=dict( + usedefault=True, + ), + model=dict( + usedefault=True, + ), + normalize_design_matrix=dict( + usedefault=True, + ), + plot_design_matrix=dict( + usedefault=True, + ), + save_residuals=dict( + usedefault=True, + ), + session_info=dict( + mandatory=True, + ), ) inputs = FitGLM.input_spec() @@ -24,15 +44,23 @@ def test_FitGLM_inputs(): def test_FitGLM_outputs(): output_map = dict( - a=dict(extensions=None,), + a=dict( + extensions=None, + ), axis=dict(), - beta=dict(extensions=None,), + beta=dict( + extensions=None, + ), constants=dict(), dof=dict(), nvbeta=dict(), reg_names=dict(), - residuals=dict(extensions=None,), - s2=dict(extensions=None,), + residuals=dict( + extensions=None, + ), + s2=dict( + extensions=None, + ), ) outputs = FitGLM.output_spec() diff --git a/nipype/interfaces/nipy/tests/test_auto_Similarity.py b/nipype/interfaces/nipy/tests/test_auto_Similarity.py index ac0b3c853a..81e8622078 100644 --- a/nipype/interfaces/nipy/tests/test_auto_Similarity.py +++ b/nipype/interfaces/nipy/tests/test_auto_Similarity.py @@ -4,11 +4,23 @@ def test_Similarity_inputs(): input_map = dict( - mask1=dict(extensions=None,), - mask2=dict(extensions=None,), - metric=dict(usedefault=True,), - volume1=dict(extensions=None, mandatory=True,), - volume2=dict(extensions=None, mandatory=True,), + mask1=dict( + extensions=None, + ), + mask2=dict( + extensions=None, + ), + metric=dict( + usedefault=True, + ), + volume1=dict( + extensions=None, + mandatory=True, + ), + volume2=dict( + extensions=None, + mandatory=True, + ), ) inputs = Similarity.input_spec() @@ -18,7 +30,9 @@ def test_Similarity_inputs(): def test_Similarity_outputs(): - output_map = dict(similarity=dict(),) + output_map = dict( + similarity=dict(), + ) outputs = Similarity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py b/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py index 2025d62498..fd65848f72 100644 --- a/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py +++ b/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py @@ -4,10 +4,17 @@ def test_SpaceTimeRealigner_inputs(): input_map = dict( - in_file=dict(mandatory=True, min_ver="0.4.0.dev",), - slice_info=dict(requires=["slice_times"],), + in_file=dict( + mandatory=True, + min_ver="0.4.0.dev", + ), + slice_info=dict( + requires=["slice_times"], + ), slice_times=dict(), - tr=dict(requires=["slice_times"],), + tr=dict( + requires=["slice_times"], + ), ) inputs = SpaceTimeRealigner.input_spec() @@ -17,7 +24,10 @@ def test_SpaceTimeRealigner_inputs(): def test_SpaceTimeRealigner_outputs(): - output_map = dict(out_file=dict(), par_file=dict(),) + output_map = dict( + out_file=dict(), + par_file=dict(), + ) outputs = SpaceTimeRealigner.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_Trim.py b/nipype/interfaces/nipy/tests/test_auto_Trim.py index 4b33c8b4b2..c4ecee3007 100644 --- a/nipype/interfaces/nipy/tests/test_auto_Trim.py +++ b/nipype/interfaces/nipy/tests/test_auto_Trim.py @@ -4,11 +4,22 @@ def test_Trim_inputs(): input_map = dict( - begin_index=dict(usedefault=True,), - end_index=dict(usedefault=True,), - in_file=dict(extensions=None, mandatory=True,), - out_file=dict(extensions=None,), - suffix=dict(usedefault=True,), + begin_index=dict( + usedefault=True, + ), + end_index=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_file=dict( + extensions=None, + ), + suffix=dict( + usedefault=True, + ), ) inputs = Trim.input_spec() @@ -18,7 +29,11 @@ def test_Trim_inputs(): def test_Trim_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Trim.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py index d188c27800..8c70d059ab 100644 --- a/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py +++ b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py @@ -4,15 +4,30 @@ def test_CoherenceAnalyzer_inputs(): input_map = dict( - NFFT=dict(usedefault=True,), + NFFT=dict( + usedefault=True, + ), TR=dict(), - figure_type=dict(usedefault=True,), - frequency_range=dict(usedefault=True,), + figure_type=dict( + usedefault=True, + ), + frequency_range=dict( + usedefault=True, + ), in_TS=dict(), - in_file=dict(extensions=None, requires=("TR",),), - n_overlap=dict(usedefault=True,), - output_csv_file=dict(extensions=None,), - output_figure_file=dict(extensions=None,), + in_file=dict( + extensions=None, + requires=("TR",), + ), + n_overlap=dict( + usedefault=True, + ), + output_csv_file=dict( + extensions=None, + ), + output_figure_file=dict( + extensions=None, + ), ) inputs = CoherenceAnalyzer.input_spec() @@ -24,11 +39,19 @@ def test_CoherenceAnalyzer_inputs(): def test_CoherenceAnalyzer_outputs(): output_map = dict( coherence_array=dict(), - coherence_csv=dict(extensions=None,), - coherence_fig=dict(extensions=None,), + coherence_csv=dict( + extensions=None, + ), + coherence_fig=dict( + extensions=None, + ), timedelay_array=dict(), - timedelay_csv=dict(extensions=None,), - timedelay_fig=dict(extensions=None,), + timedelay_csv=dict( + extensions=None, + ), + timedelay_fig=dict( + extensions=None, + ), ) outputs = CoherenceAnalyzer.output_spec() diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py index fd22f39bba..9098ee2640 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py @@ -4,16 +4,45 @@ def test_BRAINSPosteriorToContinuousClass_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputBasalGmVolume=dict(argstr="--inputBasalGmVolume %s", extensions=None,), - inputCrblGmVolume=dict(argstr="--inputCrblGmVolume %s", extensions=None,), - inputCrblWmVolume=dict(argstr="--inputCrblWmVolume %s", extensions=None,), - inputCsfVolume=dict(argstr="--inputCsfVolume %s", extensions=None,), - inputSurfaceGmVolume=dict(argstr="--inputSurfaceGmVolume %s", extensions=None,), - inputVbVolume=dict(argstr="--inputVbVolume %s", extensions=None,), - inputWhiteVolume=dict(argstr="--inputWhiteVolume %s", extensions=None,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBasalGmVolume=dict( + argstr="--inputBasalGmVolume %s", + extensions=None, + ), + inputCrblGmVolume=dict( + argstr="--inputCrblGmVolume %s", + extensions=None, + ), + inputCrblWmVolume=dict( + argstr="--inputCrblWmVolume %s", + extensions=None, + ), + inputCsfVolume=dict( + argstr="--inputCsfVolume %s", + extensions=None, + ), + inputSurfaceGmVolume=dict( + argstr="--inputSurfaceGmVolume %s", + extensions=None, + ), + inputVbVolume=dict( + argstr="--inputVbVolume %s", + extensions=None, + ), + inputWhiteVolume=dict( + argstr="--inputWhiteVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = BRAINSPosteriorToContinuousClass.input_spec() @@ -23,7 +52,11 @@ def test_BRAINSPosteriorToContinuousClass_inputs(): def test_BRAINSPosteriorToContinuousClass_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSPosteriorToContinuousClass.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py index 0999af73d0..195ebdcad0 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py @@ -4,19 +4,53 @@ def test_BRAINSTalairach_inputs(): input_map = dict( - AC=dict(argstr="--AC %s", sep=",",), - ACisIndex=dict(argstr="--ACisIndex ",), - IRP=dict(argstr="--IRP %s", sep=",",), - IRPisIndex=dict(argstr="--IRPisIndex ",), - PC=dict(argstr="--PC %s", sep=",",), - PCisIndex=dict(argstr="--PCisIndex ",), - SLA=dict(argstr="--SLA %s", sep=",",), - SLAisIndex=dict(argstr="--SLAisIndex ",), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - outputBox=dict(argstr="--outputBox %s", hash_files=False,), - outputGrid=dict(argstr="--outputGrid %s", hash_files=False,), + AC=dict( + argstr="--AC %s", + sep=",", + ), + ACisIndex=dict( + argstr="--ACisIndex ", + ), + IRP=dict( + argstr="--IRP %s", + sep=",", + ), + IRPisIndex=dict( + argstr="--IRPisIndex ", + ), + PC=dict( + argstr="--PC %s", + sep=",", + ), + PCisIndex=dict( + argstr="--PCisIndex ", + ), + SLA=dict( + argstr="--SLA %s", + sep=",", + ), + SLAisIndex=dict( + argstr="--SLAisIndex ", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputBox=dict( + argstr="--outputBox %s", + hash_files=False, + ), + outputGrid=dict( + argstr="--outputGrid %s", + hash_files=False, + ), ) inputs = BRAINSTalairach.input_spec() @@ -27,7 +61,12 @@ def test_BRAINSTalairach_inputs(): def test_BRAINSTalairach_outputs(): output_map = dict( - outputBox=dict(extensions=None,), outputGrid=dict(extensions=None,), + outputBox=dict( + extensions=None, + ), + outputGrid=dict( + extensions=None, + ), ) outputs = BRAINSTalairach.output_spec() diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py index 959733ce42..2470e42f47 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py @@ -4,14 +4,35 @@ def test_BRAINSTalairachMask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - expand=dict(argstr="--expand ",), - hemisphereMode=dict(argstr="--hemisphereMode %s",), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - talairachBox=dict(argstr="--talairachBox %s", extensions=None,), - talairachParameters=dict(argstr="--talairachParameters %s", extensions=None,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expand=dict( + argstr="--expand ", + ), + hemisphereMode=dict( + argstr="--hemisphereMode %s", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + talairachBox=dict( + argstr="--talairachBox %s", + extensions=None, + ), + talairachParameters=dict( + argstr="--talairachParameters %s", + extensions=None, + ), ) inputs = BRAINSTalairachMask.input_spec() @@ -21,7 +42,11 @@ def test_BRAINSTalairachMask_inputs(): def test_BRAINSTalairachMask_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSTalairachMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py index 766c9c7a2b..218c67a4b0 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py @@ -4,19 +4,43 @@ def test_GenerateEdgeMapImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputMRVolumes=dict(argstr="--inputMRVolumes %s...",), - inputMask=dict(argstr="--inputMask %s", extensions=None,), - lowerPercentileMatching=dict(argstr="--lowerPercentileMatching %f",), - maximumOutputRange=dict(argstr="--maximumOutputRange %d",), - minimumOutputRange=dict(argstr="--minimumOutputRange %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputEdgeMap=dict(argstr="--outputEdgeMap %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMRVolumes=dict( + argstr="--inputMRVolumes %s...", + ), + inputMask=dict( + argstr="--inputMask %s", + extensions=None, + ), + lowerPercentileMatching=dict( + argstr="--lowerPercentileMatching %f", + ), + maximumOutputRange=dict( + argstr="--maximumOutputRange %d", + ), + minimumOutputRange=dict( + argstr="--minimumOutputRange %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputEdgeMap=dict( + argstr="--outputEdgeMap %s", + hash_files=False, + ), outputMaximumGradientImage=dict( - argstr="--outputMaximumGradientImage %s", hash_files=False, + argstr="--outputMaximumGradientImage %s", + hash_files=False, + ), + upperPercentileMatching=dict( + argstr="--upperPercentileMatching %f", ), - upperPercentileMatching=dict(argstr="--upperPercentileMatching %f",), ) inputs = GenerateEdgeMapImage.input_spec() @@ -27,8 +51,12 @@ def test_GenerateEdgeMapImage_inputs(): def test_GenerateEdgeMapImage_outputs(): output_map = dict( - outputEdgeMap=dict(extensions=None,), - outputMaximumGradientImage=dict(extensions=None,), + outputEdgeMap=dict( + extensions=None, + ), + outputMaximumGradientImage=dict( + extensions=None, + ), ) outputs = GenerateEdgeMapImage.output_spec() diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py index 5cba5f42d9..e68b03dcf9 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py @@ -4,12 +4,27 @@ def test_GeneratePurePlugMask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputImageModalities=dict(argstr="--inputImageModalities %s...",), - numberOfSubSamples=dict(argstr="--numberOfSubSamples %s", sep=",",), - outputMaskFile=dict(argstr="--outputMaskFile %s", hash_files=False,), - threshold=dict(argstr="--threshold %f",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputImageModalities=dict( + argstr="--inputImageModalities %s...", + ), + numberOfSubSamples=dict( + argstr="--numberOfSubSamples %s", + sep=",", + ), + outputMaskFile=dict( + argstr="--outputMaskFile %s", + hash_files=False, + ), + threshold=dict( + argstr="--threshold %f", + ), ) inputs = GeneratePurePlugMask.input_spec() @@ -19,7 +34,11 @@ def test_GeneratePurePlugMask_inputs(): def test_GeneratePurePlugMask_outputs(): - output_map = dict(outputMaskFile=dict(extensions=None,),) + output_map = dict( + outputMaskFile=dict( + extensions=None, + ), + ) outputs = GeneratePurePlugMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py index 746857c627..110aec4891 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py @@ -4,20 +4,48 @@ def test_HistogramMatchingFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - histogramAlgorithm=dict(argstr="--histogramAlgorithm %s",), - inputBinaryVolume=dict(argstr="--inputBinaryVolume %s", extensions=None,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + histogramAlgorithm=dict( + argstr="--histogramAlgorithm %s", + ), + inputBinaryVolume=dict( + argstr="--inputBinaryVolume %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), referenceBinaryVolume=dict( - argstr="--referenceBinaryVolume %s", extensions=None, + argstr="--referenceBinaryVolume %s", + extensions=None, + ), + referenceVolume=dict( + argstr="--referenceVolume %s", + extensions=None, + ), + verbose=dict( + argstr="--verbose ", + ), + writeHistogram=dict( + argstr="--writeHistogram %s", ), - referenceVolume=dict(argstr="--referenceVolume %s", extensions=None,), - verbose=dict(argstr="--verbose ",), - writeHistogram=dict(argstr="--writeHistogram %s",), ) inputs = HistogramMatchingFilter.input_spec() @@ -27,7 +55,11 @@ def test_HistogramMatchingFilter_inputs(): def test_HistogramMatchingFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = HistogramMatchingFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py index 348ecf67d0..881e3379de 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py @@ -4,12 +4,28 @@ def test_SimilarityIndex_inputs(): input_map = dict( - ANNContinuousVolume=dict(argstr="--ANNContinuousVolume %s", extensions=None,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputManualVolume=dict(argstr="--inputManualVolume %s", extensions=None,), - outputCSVFilename=dict(argstr="--outputCSVFilename %s", extensions=None,), - thresholdInterval=dict(argstr="--thresholdInterval %f",), + ANNContinuousVolume=dict( + argstr="--ANNContinuousVolume %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputManualVolume=dict( + argstr="--inputManualVolume %s", + extensions=None, + ), + outputCSVFilename=dict( + argstr="--outputCSVFilename %s", + extensions=None, + ), + thresholdInterval=dict( + argstr="--thresholdInterval %f", + ), ) inputs = SimilarityIndex.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py index d1f8c33324..4dfb6943cb 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py @@ -4,26 +4,76 @@ def test_DWIConvert_inputs(): input_map = dict( - allowLossyConversion=dict(argstr="--allowLossyConversion ",), - args=dict(argstr="%s",), - conversionMode=dict(argstr="--conversionMode %s",), - environ=dict(nohash=True, usedefault=True,), - fMRI=dict(argstr="--fMRI ",), - fslNIFTIFile=dict(argstr="--fslNIFTIFile %s", extensions=None,), - gradientVectorFile=dict(argstr="--gradientVectorFile %s", hash_files=False,), - inputBValues=dict(argstr="--inputBValues %s", extensions=None,), - inputBVectors=dict(argstr="--inputBVectors %s", extensions=None,), - inputDicomDirectory=dict(argstr="--inputDicomDirectory %s",), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - outputBValues=dict(argstr="--outputBValues %s", hash_files=False,), - outputBVectors=dict(argstr="--outputBVectors %s", hash_files=False,), - outputDirectory=dict(argstr="--outputDirectory %s", hash_files=False,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - smallGradientThreshold=dict(argstr="--smallGradientThreshold %f",), - transposeInputBVectors=dict(argstr="--transposeInputBVectors ",), - useBMatrixGradientDirections=dict(argstr="--useBMatrixGradientDirections ",), - useIdentityMeaseurementFrame=dict(argstr="--useIdentityMeaseurementFrame ",), - writeProtocolGradientsFile=dict(argstr="--writeProtocolGradientsFile ",), + allowLossyConversion=dict( + argstr="--allowLossyConversion ", + ), + args=dict( + argstr="%s", + ), + conversionMode=dict( + argstr="--conversionMode %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fMRI=dict( + argstr="--fMRI ", + ), + fslNIFTIFile=dict( + argstr="--fslNIFTIFile %s", + extensions=None, + ), + gradientVectorFile=dict( + argstr="--gradientVectorFile %s", + hash_files=False, + ), + inputBValues=dict( + argstr="--inputBValues %s", + extensions=None, + ), + inputBVectors=dict( + argstr="--inputBVectors %s", + extensions=None, + ), + inputDicomDirectory=dict( + argstr="--inputDicomDirectory %s", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputBValues=dict( + argstr="--outputBValues %s", + hash_files=False, + ), + outputBVectors=dict( + argstr="--outputBVectors %s", + hash_files=False, + ), + outputDirectory=dict( + argstr="--outputDirectory %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + smallGradientThreshold=dict( + argstr="--smallGradientThreshold %f", + ), + transposeInputBVectors=dict( + argstr="--transposeInputBVectors ", + ), + useBMatrixGradientDirections=dict( + argstr="--useBMatrixGradientDirections ", + ), + useIdentityMeaseurementFrame=dict( + argstr="--useIdentityMeaseurementFrame ", + ), + writeProtocolGradientsFile=dict( + argstr="--writeProtocolGradientsFile ", + ), ) inputs = DWIConvert.input_spec() @@ -34,11 +84,19 @@ def test_DWIConvert_inputs(): def test_DWIConvert_outputs(): output_map = dict( - gradientVectorFile=dict(extensions=None,), - outputBValues=dict(extensions=None,), - outputBVectors=dict(extensions=None,), + gradientVectorFile=dict( + extensions=None, + ), + outputBValues=dict( + extensions=None, + ), + outputBVectors=dict( + extensions=None, + ), outputDirectory=dict(), - outputVolume=dict(extensions=None,), + outputVolume=dict( + extensions=None, + ), ) outputs = DWIConvert.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py index ed184ae4f2..38e8f92b0b 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py @@ -4,16 +4,39 @@ def test_compareTractInclusion_inputs(): input_map = dict( - args=dict(argstr="%s",), - closeness=dict(argstr="--closeness %f",), - environ=dict(nohash=True, usedefault=True,), - numberOfPoints=dict(argstr="--numberOfPoints %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - standardFiber=dict(argstr="--standardFiber %s", extensions=None,), - testFiber=dict(argstr="--testFiber %s", extensions=None,), - testForBijection=dict(argstr="--testForBijection ",), - testForFiberCardinality=dict(argstr="--testForFiberCardinality ",), - writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile ",), + args=dict( + argstr="%s", + ), + closeness=dict( + argstr="--closeness %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + numberOfPoints=dict( + argstr="--numberOfPoints %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + standardFiber=dict( + argstr="--standardFiber %s", + extensions=None, + ), + testFiber=dict( + argstr="--testFiber %s", + extensions=None, + ), + testForBijection=dict( + argstr="--testForBijection ", + ), + testForFiberCardinality=dict( + argstr="--testForFiberCardinality ", + ), + writeXMLPolyDataFile=dict( + argstr="--writeXMLPolyDataFile ", + ), ) inputs = compareTractInclusion.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py index fe4e00032b..17d4d19b4c 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py @@ -4,12 +4,26 @@ def test_dtiaverage_inputs(): input_map = dict( - DTI_double=dict(argstr="--DTI_double ",), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputs=dict(argstr="--inputs %s...",), - tensor_output=dict(argstr="--tensor_output %s", hash_files=False,), - verbose=dict(argstr="--verbose ",), + DTI_double=dict( + argstr="--DTI_double ", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputs=dict( + argstr="--inputs %s...", + ), + tensor_output=dict( + argstr="--tensor_output %s", + hash_files=False, + ), + verbose=dict( + argstr="--verbose ", + ), ) inputs = dtiaverage.input_spec() @@ -19,7 +33,11 @@ def test_dtiaverage_inputs(): def test_dtiaverage_outputs(): - output_map = dict(tensor_output=dict(extensions=None,),) + output_map = dict( + tensor_output=dict( + extensions=None, + ), + ) outputs = dtiaverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py index c7586fc34d..0a36716e87 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py @@ -4,26 +4,75 @@ def test_dtiestim_inputs(): input_map = dict( - B0=dict(argstr="--B0 %s", hash_files=False,), - B0_mask_output=dict(argstr="--B0_mask_output %s", hash_files=False,), - DTI_double=dict(argstr="--DTI_double ",), - args=dict(argstr="%s",), - bad_region_mask=dict(argstr="--bad_region_mask %s", extensions=None,), - brain_mask=dict(argstr="--brain_mask %s", extensions=None,), - correction=dict(argstr="--correction %s",), - defaultTensor=dict(argstr="--defaultTensor %s", sep=",",), - dwi_image=dict(argstr="--dwi_image %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - idwi=dict(argstr="--idwi %s", hash_files=False,), - method=dict(argstr="--method %s",), - shiftNeg=dict(argstr="--shiftNeg ",), - shiftNegCoeff=dict(argstr="--shiftNegCoeff %f",), - sigma=dict(argstr="--sigma %f",), - step=dict(argstr="--step %f",), - tensor_output=dict(argstr="--tensor_output %s", hash_files=False,), - threshold=dict(argstr="--threshold %d",), - verbose=dict(argstr="--verbose ",), - weight_iterations=dict(argstr="--weight_iterations %d",), + B0=dict( + argstr="--B0 %s", + hash_files=False, + ), + B0_mask_output=dict( + argstr="--B0_mask_output %s", + hash_files=False, + ), + DTI_double=dict( + argstr="--DTI_double ", + ), + args=dict( + argstr="%s", + ), + bad_region_mask=dict( + argstr="--bad_region_mask %s", + extensions=None, + ), + brain_mask=dict( + argstr="--brain_mask %s", + extensions=None, + ), + correction=dict( + argstr="--correction %s", + ), + defaultTensor=dict( + argstr="--defaultTensor %s", + sep=",", + ), + dwi_image=dict( + argstr="--dwi_image %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + idwi=dict( + argstr="--idwi %s", + hash_files=False, + ), + method=dict( + argstr="--method %s", + ), + shiftNeg=dict( + argstr="--shiftNeg ", + ), + shiftNegCoeff=dict( + argstr="--shiftNegCoeff %f", + ), + sigma=dict( + argstr="--sigma %f", + ), + step=dict( + argstr="--step %f", + ), + tensor_output=dict( + argstr="--tensor_output %s", + hash_files=False, + ), + threshold=dict( + argstr="--threshold %d", + ), + verbose=dict( + argstr="--verbose ", + ), + weight_iterations=dict( + argstr="--weight_iterations %d", + ), ) inputs = dtiestim.input_spec() @@ -34,10 +83,18 @@ def test_dtiestim_inputs(): def test_dtiestim_outputs(): output_map = dict( - B0=dict(extensions=None,), - B0_mask_output=dict(extensions=None,), - idwi=dict(extensions=None,), - tensor_output=dict(extensions=None,), + B0=dict( + extensions=None, + ), + B0_mask_output=dict( + extensions=None, + ), + idwi=dict( + extensions=None, + ), + tensor_output=dict( + extensions=None, + ), ) outputs = dtiestim.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py index 01a53b18cc..24352abbe3 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py @@ -4,43 +4,121 @@ def test_dtiprocess_inputs(): input_map = dict( - DTI_double=dict(argstr="--DTI_double ",), - RD_output=dict(argstr="--RD_output %s", hash_files=False,), - affineitk_file=dict(argstr="--affineitk_file %s", extensions=None,), - args=dict(argstr="%s",), - color_fa_output=dict(argstr="--color_fa_output %s", hash_files=False,), - correction=dict(argstr="--correction %s",), - deformation_output=dict(argstr="--deformation_output %s", hash_files=False,), - dof_file=dict(argstr="--dof_file %s", extensions=None,), - dti_image=dict(argstr="--dti_image %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - fa_gradient_output=dict(argstr="--fa_gradient_output %s", hash_files=False,), - fa_gradmag_output=dict(argstr="--fa_gradmag_output %s", hash_files=False,), - fa_output=dict(argstr="--fa_output %s", hash_files=False,), - forward=dict(argstr="--forward %s", extensions=None,), + DTI_double=dict( + argstr="--DTI_double ", + ), + RD_output=dict( + argstr="--RD_output %s", + hash_files=False, + ), + affineitk_file=dict( + argstr="--affineitk_file %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + color_fa_output=dict( + argstr="--color_fa_output %s", + hash_files=False, + ), + correction=dict( + argstr="--correction %s", + ), + deformation_output=dict( + argstr="--deformation_output %s", + hash_files=False, + ), + dof_file=dict( + argstr="--dof_file %s", + extensions=None, + ), + dti_image=dict( + argstr="--dti_image %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fa_gradient_output=dict( + argstr="--fa_gradient_output %s", + hash_files=False, + ), + fa_gradmag_output=dict( + argstr="--fa_gradmag_output %s", + hash_files=False, + ), + fa_output=dict( + argstr="--fa_output %s", + hash_files=False, + ), + forward=dict( + argstr="--forward %s", + extensions=None, + ), frobenius_norm_output=dict( - argstr="--frobenius_norm_output %s", hash_files=False, - ), - hField=dict(argstr="--hField ",), - interpolation=dict(argstr="--interpolation %s",), - lambda1_output=dict(argstr="--lambda1_output %s", hash_files=False,), - lambda2_output=dict(argstr="--lambda2_output %s", hash_files=False,), - lambda3_output=dict(argstr="--lambda3_output %s", hash_files=False,), - mask=dict(argstr="--mask %s", extensions=None,), - md_output=dict(argstr="--md_output %s", hash_files=False,), + argstr="--frobenius_norm_output %s", + hash_files=False, + ), + hField=dict( + argstr="--hField ", + ), + interpolation=dict( + argstr="--interpolation %s", + ), + lambda1_output=dict( + argstr="--lambda1_output %s", + hash_files=False, + ), + lambda2_output=dict( + argstr="--lambda2_output %s", + hash_files=False, + ), + lambda3_output=dict( + argstr="--lambda3_output %s", + hash_files=False, + ), + mask=dict( + argstr="--mask %s", + extensions=None, + ), + md_output=dict( + argstr="--md_output %s", + hash_files=False, + ), negative_eigenvector_output=dict( - argstr="--negative_eigenvector_output %s", hash_files=False, + argstr="--negative_eigenvector_output %s", + hash_files=False, + ), + newdof_file=dict( + argstr="--newdof_file %s", + extensions=None, + ), + outmask=dict( + argstr="--outmask %s", + hash_files=False, ), - newdof_file=dict(argstr="--newdof_file %s", extensions=None,), - outmask=dict(argstr="--outmask %s", hash_files=False,), principal_eigenvector_output=dict( - argstr="--principal_eigenvector_output %s", hash_files=False, + argstr="--principal_eigenvector_output %s", + hash_files=False, + ), + reorientation=dict( + argstr="--reorientation %s", + ), + rot_output=dict( + argstr="--rot_output %s", + hash_files=False, + ), + scalar_float=dict( + argstr="--scalar_float ", + ), + sigma=dict( + argstr="--sigma %f", + ), + verbose=dict( + argstr="--verbose ", ), - reorientation=dict(argstr="--reorientation %s",), - rot_output=dict(argstr="--rot_output %s", hash_files=False,), - scalar_float=dict(argstr="--scalar_float ",), - sigma=dict(argstr="--sigma %f",), - verbose=dict(argstr="--verbose ",), ) inputs = dtiprocess.input_spec() @@ -51,21 +129,51 @@ def test_dtiprocess_inputs(): def test_dtiprocess_outputs(): output_map = dict( - RD_output=dict(extensions=None,), - color_fa_output=dict(extensions=None,), - deformation_output=dict(extensions=None,), - fa_gradient_output=dict(extensions=None,), - fa_gradmag_output=dict(extensions=None,), - fa_output=dict(extensions=None,), - frobenius_norm_output=dict(extensions=None,), - lambda1_output=dict(extensions=None,), - lambda2_output=dict(extensions=None,), - lambda3_output=dict(extensions=None,), - md_output=dict(extensions=None,), - negative_eigenvector_output=dict(extensions=None,), - outmask=dict(extensions=None,), - principal_eigenvector_output=dict(extensions=None,), - rot_output=dict(extensions=None,), + RD_output=dict( + extensions=None, + ), + color_fa_output=dict( + extensions=None, + ), + deformation_output=dict( + extensions=None, + ), + fa_gradient_output=dict( + extensions=None, + ), + fa_gradmag_output=dict( + extensions=None, + ), + fa_output=dict( + extensions=None, + ), + frobenius_norm_output=dict( + extensions=None, + ), + lambda1_output=dict( + extensions=None, + ), + lambda2_output=dict( + extensions=None, + ), + lambda3_output=dict( + extensions=None, + ), + md_output=dict( + extensions=None, + ), + negative_eigenvector_output=dict( + extensions=None, + ), + outmask=dict( + extensions=None, + ), + principal_eigenvector_output=dict( + extensions=None, + ), + rot_output=dict( + extensions=None, + ), ) outputs = dtiprocess.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py index 51ec99b1b8..aaa516e9dc 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py @@ -4,13 +4,30 @@ def test_extractNrrdVectorIndex_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - setImageOrientation=dict(argstr="--setImageOrientation %s",), - vectorIndex=dict(argstr="--vectorIndex %d",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + setImageOrientation=dict( + argstr="--setImageOrientation %s", + ), + vectorIndex=dict( + argstr="--vectorIndex %d", + ), ) inputs = extractNrrdVectorIndex.input_spec() @@ -20,7 +37,11 @@ def test_extractNrrdVectorIndex_inputs(): def test_extractNrrdVectorIndex_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = extractNrrdVectorIndex.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py index 3af3c53648..da3e02c37b 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py @@ -4,12 +4,27 @@ def test_gtractAnisotropyMap_inputs(): input_map = dict( - anisotropyType=dict(argstr="--anisotropyType %s",), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + anisotropyType=dict( + argstr="--anisotropyType %s", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTensorVolume=dict( + argstr="--inputTensorVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = gtractAnisotropyMap.input_spec() @@ -19,7 +34,11 @@ def test_gtractAnisotropyMap_inputs(): def test_gtractAnisotropyMap_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractAnisotropyMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py index 1155f11628..a37b0e65ce 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py @@ -4,13 +4,30 @@ def test_gtractAverageBvalues_inputs(): input_map = dict( - args=dict(argstr="%s",), - averageB0only=dict(argstr="--averageB0only ",), - directionsTolerance=dict(argstr="--directionsTolerance %f",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + averageB0only=dict( + argstr="--averageB0only ", + ), + directionsTolerance=dict( + argstr="--directionsTolerance %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = gtractAverageBvalues.input_spec() @@ -20,7 +37,11 @@ def test_gtractAverageBvalues_inputs(): def test_gtractAverageBvalues_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractAverageBvalues.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py index 00fc963f69..3d6e24aee3 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py @@ -4,13 +4,30 @@ def test_gtractClipAnisotropy_inputs(): input_map = dict( - args=dict(argstr="%s",), - clipFirstSlice=dict(argstr="--clipFirstSlice ",), - clipLastSlice=dict(argstr="--clipLastSlice ",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + clipFirstSlice=dict( + argstr="--clipFirstSlice ", + ), + clipLastSlice=dict( + argstr="--clipLastSlice ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = gtractClipAnisotropy.input_spec() @@ -20,7 +37,11 @@ def test_gtractClipAnisotropy_inputs(): def test_gtractClipAnisotropy_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractClipAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py index a5d2337c44..1ab780c1b9 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py @@ -4,34 +4,90 @@ def test_gtractCoRegAnatomy_inputs(): input_map = dict( - args=dict(argstr="%s",), - borderSize=dict(argstr="--borderSize %d",), - convergence=dict(argstr="--convergence %f",), - environ=dict(nohash=True, usedefault=True,), - gradientTolerance=dict(argstr="--gradientTolerance %f",), - gridSize=dict(argstr="--gridSize %s", sep=",",), + args=dict( + argstr="%s", + ), + borderSize=dict( + argstr="--borderSize %d", + ), + convergence=dict( + argstr="--convergence %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gradientTolerance=dict( + argstr="--gradientTolerance %f", + ), + gridSize=dict( + argstr="--gridSize %s", + sep=",", + ), inputAnatomicalVolume=dict( - argstr="--inputAnatomicalVolume %s", extensions=None, - ), - inputRigidTransform=dict(argstr="--inputRigidTransform %s", extensions=None,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - maxBSplineDisplacement=dict(argstr="--maxBSplineDisplacement %f",), - maximumStepSize=dict(argstr="--maximumStepSize %f",), - minimumStepSize=dict(argstr="--minimumStepSize %f",), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), - numberOfIterations=dict(argstr="--numberOfIterations %d",), - numberOfSamples=dict(argstr="--numberOfSamples %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputTransformName=dict(argstr="--outputTransformName %s", hash_files=False,), - relaxationFactor=dict(argstr="--relaxationFactor %f",), - samplingPercentage=dict(argstr="--samplingPercentage %f",), - spatialScale=dict(argstr="--spatialScale %d",), - transformType=dict(argstr="--transformType %s",), - translationScale=dict(argstr="--translationScale %f",), - useCenterOfHeadAlign=dict(argstr="--useCenterOfHeadAlign ",), - useGeometryAlign=dict(argstr="--useGeometryAlign ",), - useMomentsAlign=dict(argstr="--useMomentsAlign ",), - vectorIndex=dict(argstr="--vectorIndex %d",), + argstr="--inputAnatomicalVolume %s", + extensions=None, + ), + inputRigidTransform=dict( + argstr="--inputRigidTransform %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + maxBSplineDisplacement=dict( + argstr="--maxBSplineDisplacement %f", + ), + maximumStepSize=dict( + argstr="--maximumStepSize %f", + ), + minimumStepSize=dict( + argstr="--minimumStepSize %f", + ), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfIterations=dict( + argstr="--numberOfIterations %d", + ), + numberOfSamples=dict( + argstr="--numberOfSamples %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputTransformName=dict( + argstr="--outputTransformName %s", + hash_files=False, + ), + relaxationFactor=dict( + argstr="--relaxationFactor %f", + ), + samplingPercentage=dict( + argstr="--samplingPercentage %f", + ), + spatialScale=dict( + argstr="--spatialScale %d", + ), + transformType=dict( + argstr="--transformType %s", + ), + translationScale=dict( + argstr="--translationScale %f", + ), + useCenterOfHeadAlign=dict( + argstr="--useCenterOfHeadAlign ", + ), + useGeometryAlign=dict( + argstr="--useGeometryAlign ", + ), + useMomentsAlign=dict( + argstr="--useMomentsAlign ", + ), + vectorIndex=dict( + argstr="--vectorIndex %d", + ), ) inputs = gtractCoRegAnatomy.input_spec() @@ -41,7 +97,11 @@ def test_gtractCoRegAnatomy_inputs(): def test_gtractCoRegAnatomy_outputs(): - output_map = dict(outputTransformName=dict(extensions=None,),) + output_map = dict( + outputTransformName=dict( + extensions=None, + ), + ) outputs = gtractCoRegAnatomy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py index 8fd46f9ab6..d2a6ca3288 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py @@ -4,12 +4,26 @@ def test_gtractConcatDwi_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - ignoreOrigins=dict(argstr="--ignoreOrigins ",), - inputVolume=dict(argstr="--inputVolume %s...",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignoreOrigins=dict( + argstr="--ignoreOrigins ", + ), + inputVolume=dict( + argstr="--inputVolume %s...", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = gtractConcatDwi.input_spec() @@ -19,7 +33,11 @@ def test_gtractConcatDwi_inputs(): def test_gtractConcatDwi_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractConcatDwi.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py index 4ce50c9faa..ba03837015 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py @@ -4,12 +4,28 @@ def test_gtractCopyImageOrientation_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputReferenceVolume=dict( + argstr="--inputReferenceVolume %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = gtractCopyImageOrientation.input_spec() @@ -19,7 +35,11 @@ def test_gtractCopyImageOrientation_inputs(): def test_gtractCopyImageOrientation_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractCopyImageOrientation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py index 639dc8cd69..0122bf7636 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py @@ -4,24 +4,65 @@ def test_gtractCoregBvalues_inputs(): input_map = dict( - args=dict(argstr="%s",), - debugLevel=dict(argstr="--debugLevel %d",), - eddyCurrentCorrection=dict(argstr="--eddyCurrentCorrection ",), - environ=dict(nohash=True, usedefault=True,), - fixedVolume=dict(argstr="--fixedVolume %s", extensions=None,), - fixedVolumeIndex=dict(argstr="--fixedVolumeIndex %d",), - maximumStepSize=dict(argstr="--maximumStepSize %f",), - minimumStepSize=dict(argstr="--minimumStepSize %f",), - movingVolume=dict(argstr="--movingVolume %s", extensions=None,), - numberOfIterations=dict(argstr="--numberOfIterations %d",), - numberOfSpatialSamples=dict(argstr="--numberOfSpatialSamples %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - registerB0Only=dict(argstr="--registerB0Only ",), - relaxationFactor=dict(argstr="--relaxationFactor %f",), - samplingPercentage=dict(argstr="--samplingPercentage %f",), - spatialScale=dict(argstr="--spatialScale %f",), + args=dict( + argstr="%s", + ), + debugLevel=dict( + argstr="--debugLevel %d", + ), + eddyCurrentCorrection=dict( + argstr="--eddyCurrentCorrection ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedVolume=dict( + argstr="--fixedVolume %s", + extensions=None, + ), + fixedVolumeIndex=dict( + argstr="--fixedVolumeIndex %d", + ), + maximumStepSize=dict( + argstr="--maximumStepSize %f", + ), + minimumStepSize=dict( + argstr="--minimumStepSize %f", + ), + movingVolume=dict( + argstr="--movingVolume %s", + extensions=None, + ), + numberOfIterations=dict( + argstr="--numberOfIterations %d", + ), + numberOfSpatialSamples=dict( + argstr="--numberOfSpatialSamples %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + registerB0Only=dict( + argstr="--registerB0Only ", + ), + relaxationFactor=dict( + argstr="--relaxationFactor %f", + ), + samplingPercentage=dict( + argstr="--samplingPercentage %f", + ), + spatialScale=dict( + argstr="--spatialScale %f", + ), ) inputs = gtractCoregBvalues.input_spec() @@ -32,7 +73,12 @@ def test_gtractCoregBvalues_inputs(): def test_gtractCoregBvalues_outputs(): output_map = dict( - outputTransform=dict(extensions=None,), outputVolume=dict(extensions=None,), + outputTransform=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = gtractCoregBvalues.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py index cd5d34952d..7d086cd7c0 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py @@ -4,22 +4,48 @@ def test_gtractCostFastMarching_inputs(): input_map = dict( - anisotropyWeight=dict(argstr="--anisotropyWeight %f",), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + anisotropyWeight=dict( + argstr="--anisotropyWeight %f", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputAnisotropyVolume=dict( - argstr="--inputAnisotropyVolume %s", extensions=None, + argstr="--inputAnisotropyVolume %s", + extensions=None, ), inputStartingSeedsLabelMapVolume=dict( - argstr="--inputStartingSeedsLabelMapVolume %s", extensions=None, - ), - inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputCostVolume=dict(argstr="--outputCostVolume %s", hash_files=False,), - outputSpeedVolume=dict(argstr="--outputSpeedVolume %s", hash_files=False,), - seedThreshold=dict(argstr="--seedThreshold %f",), - startingSeedsLabel=dict(argstr="--startingSeedsLabel %d",), - stoppingValue=dict(argstr="--stoppingValue %f",), + argstr="--inputStartingSeedsLabelMapVolume %s", + extensions=None, + ), + inputTensorVolume=dict( + argstr="--inputTensorVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputCostVolume=dict( + argstr="--outputCostVolume %s", + hash_files=False, + ), + outputSpeedVolume=dict( + argstr="--outputSpeedVolume %s", + hash_files=False, + ), + seedThreshold=dict( + argstr="--seedThreshold %f", + ), + startingSeedsLabel=dict( + argstr="--startingSeedsLabel %d", + ), + stoppingValue=dict( + argstr="--stoppingValue %f", + ), ) inputs = gtractCostFastMarching.input_spec() @@ -30,8 +56,12 @@ def test_gtractCostFastMarching_inputs(): def test_gtractCostFastMarching_outputs(): output_map = dict( - outputCostVolume=dict(extensions=None,), - outputSpeedVolume=dict(extensions=None,), + outputCostVolume=dict( + extensions=None, + ), + outputSpeedVolume=dict( + extensions=None, + ), ) outputs = gtractCostFastMarching.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py index 6b2b0a31e5..1990cc2057 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py @@ -4,13 +4,30 @@ def test_gtractCreateGuideFiber_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputFiber=dict(argstr="--inputFiber %s", extensions=None,), - numberOfPoints=dict(argstr="--numberOfPoints %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputFiber=dict(argstr="--outputFiber %s", hash_files=False,), - writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile ",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputFiber=dict( + argstr="--inputFiber %s", + extensions=None, + ), + numberOfPoints=dict( + argstr="--numberOfPoints %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputFiber=dict( + argstr="--outputFiber %s", + hash_files=False, + ), + writeXMLPolyDataFile=dict( + argstr="--writeXMLPolyDataFile ", + ), ) inputs = gtractCreateGuideFiber.input_spec() @@ -20,7 +37,11 @@ def test_gtractCreateGuideFiber_inputs(): def test_gtractCreateGuideFiber_outputs(): - output_map = dict(outputFiber=dict(extensions=None,),) + output_map = dict( + outputFiber=dict( + extensions=None, + ), + ) outputs = gtractCreateGuideFiber.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py index 15ee3053f0..4059d45f6a 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py @@ -4,26 +4,60 @@ def test_gtractFastMarchingTracking_inputs(): input_map = dict( - args=dict(argstr="%s",), - costStepSize=dict(argstr="--costStepSize %f",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + costStepSize=dict( + argstr="--costStepSize %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputAnisotropyVolume=dict( - argstr="--inputAnisotropyVolume %s", extensions=None, + argstr="--inputAnisotropyVolume %s", + extensions=None, + ), + inputCostVolume=dict( + argstr="--inputCostVolume %s", + extensions=None, ), - inputCostVolume=dict(argstr="--inputCostVolume %s", extensions=None,), inputStartingSeedsLabelMapVolume=dict( - argstr="--inputStartingSeedsLabelMapVolume %s", extensions=None, - ), - inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None,), - maximumStepSize=dict(argstr="--maximumStepSize %f",), - minimumStepSize=dict(argstr="--minimumStepSize %f",), - numberOfIterations=dict(argstr="--numberOfIterations %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputTract=dict(argstr="--outputTract %s", hash_files=False,), - seedThreshold=dict(argstr="--seedThreshold %f",), - startingSeedsLabel=dict(argstr="--startingSeedsLabel %d",), - trackingThreshold=dict(argstr="--trackingThreshold %f",), - writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile ",), + argstr="--inputStartingSeedsLabelMapVolume %s", + extensions=None, + ), + inputTensorVolume=dict( + argstr="--inputTensorVolume %s", + extensions=None, + ), + maximumStepSize=dict( + argstr="--maximumStepSize %f", + ), + minimumStepSize=dict( + argstr="--minimumStepSize %f", + ), + numberOfIterations=dict( + argstr="--numberOfIterations %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputTract=dict( + argstr="--outputTract %s", + hash_files=False, + ), + seedThreshold=dict( + argstr="--seedThreshold %f", + ), + startingSeedsLabel=dict( + argstr="--startingSeedsLabel %d", + ), + trackingThreshold=dict( + argstr="--trackingThreshold %f", + ), + writeXMLPolyDataFile=dict( + argstr="--writeXMLPolyDataFile ", + ), ) inputs = gtractFastMarchingTracking.input_spec() @@ -33,7 +67,11 @@ def test_gtractFastMarchingTracking_inputs(): def test_gtractFastMarchingTracking_outputs(): - output_map = dict(outputTract=dict(extensions=None,),) + output_map = dict( + outputTract=dict( + extensions=None, + ), + ) outputs = gtractFastMarchingTracking.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py index 510c00013a..9837774d3e 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py @@ -4,42 +4,103 @@ def test_gtractFiberTracking_inputs(): input_map = dict( - args=dict(argstr="%s",), - branchingAngle=dict(argstr="--branchingAngle %f",), - branchingThreshold=dict(argstr="--branchingThreshold %f",), - curvatureThreshold=dict(argstr="--curvatureThreshold %f",), - endingSeedsLabel=dict(argstr="--endingSeedsLabel %d",), - environ=dict(nohash=True, usedefault=True,), - guidedCurvatureThreshold=dict(argstr="--guidedCurvatureThreshold %f",), + args=dict( + argstr="%s", + ), + branchingAngle=dict( + argstr="--branchingAngle %f", + ), + branchingThreshold=dict( + argstr="--branchingThreshold %f", + ), + curvatureThreshold=dict( + argstr="--curvatureThreshold %f", + ), + endingSeedsLabel=dict( + argstr="--endingSeedsLabel %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + guidedCurvatureThreshold=dict( + argstr="--guidedCurvatureThreshold %f", + ), inputAnisotropyVolume=dict( - argstr="--inputAnisotropyVolume %s", extensions=None, + argstr="--inputAnisotropyVolume %s", + extensions=None, ), inputEndingSeedsLabelMapVolume=dict( - argstr="--inputEndingSeedsLabelMapVolume %s", extensions=None, + argstr="--inputEndingSeedsLabelMapVolume %s", + extensions=None, ), inputStartingSeedsLabelMapVolume=dict( - argstr="--inputStartingSeedsLabelMapVolume %s", extensions=None, - ), - inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None,), - inputTract=dict(argstr="--inputTract %s", extensions=None,), - maximumBranchPoints=dict(argstr="--maximumBranchPoints %d",), - maximumGuideDistance=dict(argstr="--maximumGuideDistance %f",), - maximumLength=dict(argstr="--maximumLength %f",), - minimumLength=dict(argstr="--minimumLength %f",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputTract=dict(argstr="--outputTract %s", hash_files=False,), - randomSeed=dict(argstr="--randomSeed %d",), - seedThreshold=dict(argstr="--seedThreshold %f",), - startingSeedsLabel=dict(argstr="--startingSeedsLabel %d",), - stepSize=dict(argstr="--stepSize %f",), - tendF=dict(argstr="--tendF %f",), - tendG=dict(argstr="--tendG %f",), - trackingMethod=dict(argstr="--trackingMethod %s",), - trackingThreshold=dict(argstr="--trackingThreshold %f",), - useLoopDetection=dict(argstr="--useLoopDetection ",), - useRandomWalk=dict(argstr="--useRandomWalk ",), - useTend=dict(argstr="--useTend ",), - writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile ",), + argstr="--inputStartingSeedsLabelMapVolume %s", + extensions=None, + ), + inputTensorVolume=dict( + argstr="--inputTensorVolume %s", + extensions=None, + ), + inputTract=dict( + argstr="--inputTract %s", + extensions=None, + ), + maximumBranchPoints=dict( + argstr="--maximumBranchPoints %d", + ), + maximumGuideDistance=dict( + argstr="--maximumGuideDistance %f", + ), + maximumLength=dict( + argstr="--maximumLength %f", + ), + minimumLength=dict( + argstr="--minimumLength %f", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputTract=dict( + argstr="--outputTract %s", + hash_files=False, + ), + randomSeed=dict( + argstr="--randomSeed %d", + ), + seedThreshold=dict( + argstr="--seedThreshold %f", + ), + startingSeedsLabel=dict( + argstr="--startingSeedsLabel %d", + ), + stepSize=dict( + argstr="--stepSize %f", + ), + tendF=dict( + argstr="--tendF %f", + ), + tendG=dict( + argstr="--tendG %f", + ), + trackingMethod=dict( + argstr="--trackingMethod %s", + ), + trackingThreshold=dict( + argstr="--trackingThreshold %f", + ), + useLoopDetection=dict( + argstr="--useLoopDetection ", + ), + useRandomWalk=dict( + argstr="--useRandomWalk ", + ), + useTend=dict( + argstr="--useTend ", + ), + writeXMLPolyDataFile=dict( + argstr="--writeXMLPolyDataFile ", + ), ) inputs = gtractFiberTracking.input_spec() @@ -49,7 +110,11 @@ def test_gtractFiberTracking_inputs(): def test_gtractFiberTracking_outputs(): - output_map = dict(outputTract=dict(extensions=None,),) + output_map = dict( + outputTract=dict( + extensions=None, + ), + ) outputs = gtractFiberTracking.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py index b382e97133..64b896e0ca 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py @@ -4,12 +4,28 @@ def test_gtractImageConformity_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputReferenceVolume=dict( + argstr="--inputReferenceVolume %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = gtractImageConformity.input_spec() @@ -19,7 +35,11 @@ def test_gtractImageConformity_inputs(): def test_gtractImageConformity_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractImageConformity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py index 9affd39654..ed43c90dc6 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py @@ -4,13 +4,32 @@ def test_gtractInvertBSplineTransform_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None,), - inputTransform=dict(argstr="--inputTransform %s", extensions=None,), - landmarkDensity=dict(argstr="--landmarkDensity %s", sep=",",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputReferenceVolume=dict( + argstr="--inputReferenceVolume %s", + extensions=None, + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + landmarkDensity=dict( + argstr="--landmarkDensity %s", + sep=",", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, + ), ) inputs = gtractInvertBSplineTransform.input_spec() @@ -20,7 +39,11 @@ def test_gtractInvertBSplineTransform_inputs(): def test_gtractInvertBSplineTransform_outputs(): - output_map = dict(outputTransform=dict(extensions=None,),) + output_map = dict( + outputTransform=dict( + extensions=None, + ), + ) outputs = gtractInvertBSplineTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py index 2ec1e53e42..83129902aa 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py @@ -4,13 +4,31 @@ def test_gtractInvertDisplacementField_inputs(): input_map = dict( - args=dict(argstr="%s",), - baseImage=dict(argstr="--baseImage %s", extensions=None,), - deformationImage=dict(argstr="--deformationImage %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - subsamplingFactor=dict(argstr="--subsamplingFactor %d",), + args=dict( + argstr="%s", + ), + baseImage=dict( + argstr="--baseImage %s", + extensions=None, + ), + deformationImage=dict( + argstr="--deformationImage %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + subsamplingFactor=dict( + argstr="--subsamplingFactor %d", + ), ) inputs = gtractInvertDisplacementField.input_spec() @@ -20,7 +38,11 @@ def test_gtractInvertDisplacementField_inputs(): def test_gtractInvertDisplacementField_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractInvertDisplacementField.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py index bbf3b5b260..73ba9c576f 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py @@ -4,11 +4,24 @@ def test_gtractInvertRigidTransform_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputTransform=dict(argstr="--inputTransform %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, + ), ) inputs = gtractInvertRigidTransform.input_spec() @@ -18,7 +31,11 @@ def test_gtractInvertRigidTransform_inputs(): def test_gtractInvertRigidTransform_outputs(): - output_map = dict(outputTransform=dict(extensions=None,),) + output_map = dict( + outputTransform=dict( + extensions=None, + ), + ) outputs = gtractInvertRigidTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py index dd30cda525..7b38abe0b5 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py @@ -4,18 +4,35 @@ def test_gtractResampleAnisotropy_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputAnatomicalVolume=dict( - argstr="--inputAnatomicalVolume %s", extensions=None, + argstr="--inputAnatomicalVolume %s", + extensions=None, ), inputAnisotropyVolume=dict( - argstr="--inputAnisotropyVolume %s", extensions=None, + argstr="--inputAnisotropyVolume %s", + extensions=None, + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + transformType=dict( + argstr="--transformType %s", ), - inputTransform=dict(argstr="--inputTransform %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - transformType=dict(argstr="--transformType %s",), ) inputs = gtractResampleAnisotropy.input_spec() @@ -25,7 +42,11 @@ def test_gtractResampleAnisotropy_inputs(): def test_gtractResampleAnisotropy_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractResampleAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py index e512fed7b5..7271e8a42a 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py @@ -4,17 +4,38 @@ def test_gtractResampleB0_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputAnatomicalVolume=dict( - argstr="--inputAnatomicalVolume %s", extensions=None, - ), - inputTransform=dict(argstr="--inputTransform %s", extensions=None,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - transformType=dict(argstr="--transformType %s",), - vectorIndex=dict(argstr="--vectorIndex %d",), + argstr="--inputAnatomicalVolume %s", + extensions=None, + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + transformType=dict( + argstr="--transformType %s", + ), + vectorIndex=dict( + argstr="--vectorIndex %d", + ), ) inputs = gtractResampleB0.input_spec() @@ -24,7 +45,11 @@ def test_gtractResampleB0_inputs(): def test_gtractResampleB0_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractResampleB0.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py index 4cc5c30e4f..6649ecfc1f 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py @@ -4,14 +4,35 @@ def test_gtractResampleCodeImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputCodeVolume=dict(argstr="--inputCodeVolume %s", extensions=None,), - inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None,), - inputTransform=dict(argstr="--inputTransform %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - transformType=dict(argstr="--transformType %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputCodeVolume=dict( + argstr="--inputCodeVolume %s", + extensions=None, + ), + inputReferenceVolume=dict( + argstr="--inputReferenceVolume %s", + extensions=None, + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + transformType=dict( + argstr="--transformType %s", + ), ) inputs = gtractResampleCodeImage.input_spec() @@ -21,7 +42,11 @@ def test_gtractResampleCodeImage_inputs(): def test_gtractResampleCodeImage_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractResampleCodeImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py index f87aa364cc..3b61312e54 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py @@ -4,17 +4,47 @@ def test_gtractResampleDWIInPlace_inputs(): input_map = dict( - args=dict(argstr="%s",), - debugLevel=dict(argstr="--debugLevel %d",), - environ=dict(nohash=True, usedefault=True,), - imageOutputSize=dict(argstr="--imageOutputSize %s", sep=",",), - inputTransform=dict(argstr="--inputTransform %s", extensions=None,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputResampledB0=dict(argstr="--outputResampledB0 %s", hash_files=False,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - referenceVolume=dict(argstr="--referenceVolume %s", extensions=None,), - warpDWITransform=dict(argstr="--warpDWITransform %s", extensions=None,), + args=dict( + argstr="%s", + ), + debugLevel=dict( + argstr="--debugLevel %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + imageOutputSize=dict( + argstr="--imageOutputSize %s", + sep=",", + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputResampledB0=dict( + argstr="--outputResampledB0 %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + referenceVolume=dict( + argstr="--referenceVolume %s", + extensions=None, + ), + warpDWITransform=dict( + argstr="--warpDWITransform %s", + extensions=None, + ), ) inputs = gtractResampleDWIInPlace.input_spec() @@ -25,7 +55,12 @@ def test_gtractResampleDWIInPlace_inputs(): def test_gtractResampleDWIInPlace_outputs(): output_map = dict( - outputResampledB0=dict(extensions=None,), outputVolume=dict(extensions=None,), + outputResampledB0=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = gtractResampleDWIInPlace.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py index 3c7a6b33b8..d64d2d8581 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py @@ -4,18 +4,35 @@ def test_gtractResampleFibers_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputForwardDeformationFieldVolume=dict( - argstr="--inputForwardDeformationFieldVolume %s", extensions=None, + argstr="--inputForwardDeformationFieldVolume %s", + extensions=None, ), inputReverseDeformationFieldVolume=dict( - argstr="--inputReverseDeformationFieldVolume %s", extensions=None, + argstr="--inputReverseDeformationFieldVolume %s", + extensions=None, + ), + inputTract=dict( + argstr="--inputTract %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputTract=dict( + argstr="--outputTract %s", + hash_files=False, + ), + writeXMLPolyDataFile=dict( + argstr="--writeXMLPolyDataFile ", ), - inputTract=dict(argstr="--inputTract %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputTract=dict(argstr="--outputTract %s", hash_files=False,), - writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile ",), ) inputs = gtractResampleFibers.input_spec() @@ -25,7 +42,11 @@ def test_gtractResampleFibers_inputs(): def test_gtractResampleFibers_outputs(): - output_map = dict(outputTract=dict(extensions=None,),) + output_map = dict( + outputTract=dict( + extensions=None, + ), + ) outputs = gtractResampleFibers.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py index 2372b9599f..eabe7c6f50 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py @@ -4,22 +4,54 @@ def test_gtractTensor_inputs(): input_map = dict( - applyMeasurementFrame=dict(argstr="--applyMeasurementFrame ",), - args=dict(argstr="%s",), - b0Index=dict(argstr="--b0Index %d",), + applyMeasurementFrame=dict( + argstr="--applyMeasurementFrame ", + ), + args=dict( + argstr="%s", + ), + b0Index=dict( + argstr="--b0Index %d", + ), backgroundSuppressingThreshold=dict( argstr="--backgroundSuppressingThreshold %d", ), - environ=dict(nohash=True, usedefault=True,), - ignoreIndex=dict(argstr="--ignoreIndex %s", sep=",",), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - maskProcessingMode=dict(argstr="--maskProcessingMode %s",), - maskVolume=dict(argstr="--maskVolume %s", extensions=None,), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - resampleIsotropic=dict(argstr="--resampleIsotropic ",), - size=dict(argstr="--size %f",), + environ=dict( + nohash=True, + usedefault=True, + ), + ignoreIndex=dict( + argstr="--ignoreIndex %s", + sep=",", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + maskProcessingMode=dict( + argstr="--maskProcessingMode %s", + ), + maskVolume=dict( + argstr="--maskVolume %s", + extensions=None, + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + resampleIsotropic=dict( + argstr="--resampleIsotropic ", + ), + size=dict( + argstr="--size %f", + ), ) inputs = gtractTensor.input_spec() @@ -29,7 +61,11 @@ def test_gtractTensor_inputs(): def test_gtractTensor_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractTensor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py index 3999871191..64daec32fb 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py @@ -4,13 +4,27 @@ def test_gtractTransformToDisplacementField_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None,), - inputTransform=dict(argstr="--inputTransform %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputReferenceVolume=dict( + argstr="--inputReferenceVolume %s", + extensions=None, + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), outputDeformationFieldVolume=dict( - argstr="--outputDeformationFieldVolume %s", hash_files=False, + argstr="--outputDeformationFieldVolume %s", + hash_files=False, ), ) inputs = gtractTransformToDisplacementField.input_spec() @@ -21,7 +35,11 @@ def test_gtractTransformToDisplacementField_inputs(): def test_gtractTransformToDisplacementField_outputs(): - output_map = dict(outputDeformationFieldVolume=dict(extensions=None,),) + output_map = dict( + outputDeformationFieldVolume=dict( + extensions=None, + ), + ) outputs = gtractTransformToDisplacementField.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py index 9ec247675e..6638ef34cd 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py @@ -4,12 +4,27 @@ def test_maxcurvature_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - image=dict(argstr="--image %s", extensions=None,), - output=dict(argstr="--output %s", hash_files=False,), - sigma=dict(argstr="--sigma %f",), - verbose=dict(argstr="--verbose ",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + image=dict( + argstr="--image %s", + extensions=None, + ), + output=dict( + argstr="--output %s", + hash_files=False, + ), + sigma=dict( + argstr="--sigma %f", + ), + verbose=dict( + argstr="--verbose ", + ), ) inputs = maxcurvature.input_spec() @@ -19,7 +34,11 @@ def test_maxcurvature_inputs(): def test_maxcurvature_outputs(): - output_map = dict(output=dict(extensions=None,),) + output_map = dict( + output=dict( + extensions=None, + ), + ) outputs = maxcurvature.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py index a7e86c79b8..92050c6e43 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py @@ -4,43 +4,118 @@ def test_UKFTractography_inputs(): input_map = dict( - Ql=dict(argstr="--Ql %f",), - Qm=dict(argstr="--Qm %f",), - Qw=dict(argstr="--Qw %f",), - Rs=dict(argstr="--Rs %f",), - args=dict(argstr="%s",), - dwiFile=dict(argstr="--dwiFile %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - freeWater=dict(argstr="--freeWater ",), - fullTensorModel=dict(argstr="--fullTensorModel ",), - labels=dict(argstr="--labels %s", sep=",",), - maskFile=dict(argstr="--maskFile %s", extensions=None,), - maxBranchingAngle=dict(argstr="--maxBranchingAngle %f",), - maxHalfFiberLength=dict(argstr="--maxHalfFiberLength %f",), - minBranchingAngle=dict(argstr="--minBranchingAngle %f",), - minFA=dict(argstr="--minFA %f",), - minGA=dict(argstr="--minGA %f",), - numTensor=dict(argstr="--numTensor %s",), - numThreads=dict(argstr="--numThreads %d",), - recordCovariance=dict(argstr="--recordCovariance ",), - recordFA=dict(argstr="--recordFA ",), - recordFreeWater=dict(argstr="--recordFreeWater ",), - recordLength=dict(argstr="--recordLength %f",), - recordNMSE=dict(argstr="--recordNMSE ",), - recordState=dict(argstr="--recordState ",), - recordTensors=dict(argstr="--recordTensors ",), - recordTrace=dict(argstr="--recordTrace ",), - seedFALimit=dict(argstr="--seedFALimit %f",), - seedsFile=dict(argstr="--seedsFile %s", extensions=None,), - seedsPerVoxel=dict(argstr="--seedsPerVoxel %d",), - stepLength=dict(argstr="--stepLength %f",), - storeGlyphs=dict(argstr="--storeGlyphs ",), - tracts=dict(argstr="--tracts %s", hash_files=False,), + Ql=dict( + argstr="--Ql %f", + ), + Qm=dict( + argstr="--Qm %f", + ), + Qw=dict( + argstr="--Qw %f", + ), + Rs=dict( + argstr="--Rs %f", + ), + args=dict( + argstr="%s", + ), + dwiFile=dict( + argstr="--dwiFile %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + freeWater=dict( + argstr="--freeWater ", + ), + fullTensorModel=dict( + argstr="--fullTensorModel ", + ), + labels=dict( + argstr="--labels %s", + sep=",", + ), + maskFile=dict( + argstr="--maskFile %s", + extensions=None, + ), + maxBranchingAngle=dict( + argstr="--maxBranchingAngle %f", + ), + maxHalfFiberLength=dict( + argstr="--maxHalfFiberLength %f", + ), + minBranchingAngle=dict( + argstr="--minBranchingAngle %f", + ), + minFA=dict( + argstr="--minFA %f", + ), + minGA=dict( + argstr="--minGA %f", + ), + numTensor=dict( + argstr="--numTensor %s", + ), + numThreads=dict( + argstr="--numThreads %d", + ), + recordCovariance=dict( + argstr="--recordCovariance ", + ), + recordFA=dict( + argstr="--recordFA ", + ), + recordFreeWater=dict( + argstr="--recordFreeWater ", + ), + recordLength=dict( + argstr="--recordLength %f", + ), + recordNMSE=dict( + argstr="--recordNMSE ", + ), + recordState=dict( + argstr="--recordState ", + ), + recordTensors=dict( + argstr="--recordTensors ", + ), + recordTrace=dict( + argstr="--recordTrace ", + ), + seedFALimit=dict( + argstr="--seedFALimit %f", + ), + seedsFile=dict( + argstr="--seedsFile %s", + extensions=None, + ), + seedsPerVoxel=dict( + argstr="--seedsPerVoxel %d", + ), + stepLength=dict( + argstr="--stepLength %f", + ), + storeGlyphs=dict( + argstr="--storeGlyphs ", + ), + tracts=dict( + argstr="--tracts %s", + hash_files=False, + ), tractsWithSecondTensor=dict( - argstr="--tractsWithSecondTensor %s", hash_files=False, + argstr="--tractsWithSecondTensor %s", + hash_files=False, + ), + writeAsciiTracts=dict( + argstr="--writeAsciiTracts ", + ), + writeUncompressedTracts=dict( + argstr="--writeUncompressedTracts ", ), - writeAsciiTracts=dict(argstr="--writeAsciiTracts ",), - writeUncompressedTracts=dict(argstr="--writeUncompressedTracts ",), ) inputs = UKFTractography.input_spec() @@ -51,7 +126,12 @@ def test_UKFTractography_inputs(): def test_UKFTractography_outputs(): output_map = dict( - tracts=dict(extensions=None,), tractsWithSecondTensor=dict(extensions=None,), + tracts=dict( + extensions=None, + ), + tractsWithSecondTensor=dict( + extensions=None, + ), ) outputs = UKFTractography.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py index d25c1a10ca..506d3f8f90 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py @@ -4,22 +4,61 @@ def test_fiberprocess_inputs(): input_map = dict( - args=dict(argstr="%s",), - displacement_field=dict(argstr="--displacement_field %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - fiber_file=dict(argstr="--fiber_file %s", extensions=None,), - fiber_output=dict(argstr="--fiber_output %s", hash_files=False,), - fiber_radius=dict(argstr="--fiber_radius %f",), - h_field=dict(argstr="--h_field %s", extensions=None,), - index_space=dict(argstr="--index_space ",), - noDataChange=dict(argstr="--noDataChange ",), - no_warp=dict(argstr="--no_warp ",), - saveProperties=dict(argstr="--saveProperties ",), - tensor_volume=dict(argstr="--tensor_volume %s", extensions=None,), - verbose=dict(argstr="--verbose ",), - voxel_label=dict(argstr="--voxel_label %d",), - voxelize=dict(argstr="--voxelize %s", hash_files=False,), - voxelize_count_fibers=dict(argstr="--voxelize_count_fibers ",), + args=dict( + argstr="%s", + ), + displacement_field=dict( + argstr="--displacement_field %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fiber_file=dict( + argstr="--fiber_file %s", + extensions=None, + ), + fiber_output=dict( + argstr="--fiber_output %s", + hash_files=False, + ), + fiber_radius=dict( + argstr="--fiber_radius %f", + ), + h_field=dict( + argstr="--h_field %s", + extensions=None, + ), + index_space=dict( + argstr="--index_space ", + ), + noDataChange=dict( + argstr="--noDataChange ", + ), + no_warp=dict( + argstr="--no_warp ", + ), + saveProperties=dict( + argstr="--saveProperties ", + ), + tensor_volume=dict( + argstr="--tensor_volume %s", + extensions=None, + ), + verbose=dict( + argstr="--verbose ", + ), + voxel_label=dict( + argstr="--voxel_label %d", + ), + voxelize=dict( + argstr="--voxelize %s", + hash_files=False, + ), + voxelize_count_fibers=dict( + argstr="--voxelize_count_fibers ", + ), ) inputs = fiberprocess.input_spec() @@ -30,7 +69,12 @@ def test_fiberprocess_inputs(): def test_fiberprocess_outputs(): output_map = dict( - fiber_output=dict(extensions=None,), voxelize=dict(extensions=None,), + fiber_output=dict( + extensions=None, + ), + voxelize=dict( + extensions=None, + ), ) outputs = fiberprocess.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py index 570ea316c7..a9df738d28 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py @@ -4,10 +4,20 @@ def test_fiberstats_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fiber_file=dict(argstr="--fiber_file %s", extensions=None,), - verbose=dict(argstr="--verbose ",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fiber_file=dict( + argstr="--fiber_file %s", + extensions=None, + ), + verbose=dict( + argstr="--verbose ", + ), ) inputs = fiberstats.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py index d3994690d1..28798b14ff 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py @@ -4,21 +4,55 @@ def test_fibertrack_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - forbidden_label=dict(argstr="--forbidden_label %d",), - force=dict(argstr="--force ",), - input_roi_file=dict(argstr="--input_roi_file %s", extensions=None,), - input_tensor_file=dict(argstr="--input_tensor_file %s", extensions=None,), - max_angle=dict(argstr="--max_angle %f",), - min_fa=dict(argstr="--min_fa %f",), - output_fiber_file=dict(argstr="--output_fiber_file %s", hash_files=False,), - really_verbose=dict(argstr="--really_verbose ",), - source_label=dict(argstr="--source_label %d",), - step_size=dict(argstr="--step_size %f",), - target_label=dict(argstr="--target_label %d",), - verbose=dict(argstr="--verbose ",), - whole_brain=dict(argstr="--whole_brain ",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + forbidden_label=dict( + argstr="--forbidden_label %d", + ), + force=dict( + argstr="--force ", + ), + input_roi_file=dict( + argstr="--input_roi_file %s", + extensions=None, + ), + input_tensor_file=dict( + argstr="--input_tensor_file %s", + extensions=None, + ), + max_angle=dict( + argstr="--max_angle %f", + ), + min_fa=dict( + argstr="--min_fa %f", + ), + output_fiber_file=dict( + argstr="--output_fiber_file %s", + hash_files=False, + ), + really_verbose=dict( + argstr="--really_verbose ", + ), + source_label=dict( + argstr="--source_label %d", + ), + step_size=dict( + argstr="--step_size %f", + ), + target_label=dict( + argstr="--target_label %d", + ), + verbose=dict( + argstr="--verbose ", + ), + whole_brain=dict( + argstr="--whole_brain ", + ), ) inputs = fibertrack.input_spec() @@ -28,7 +62,11 @@ def test_fibertrack_inputs(): def test_fibertrack_outputs(): - output_map = dict(output_fiber_file=dict(extensions=None,),) + output_map = dict( + output_fiber_file=dict( + extensions=None, + ), + ) outputs = fibertrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py index 66d4da8a6c..222c460279 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py @@ -4,13 +4,30 @@ def test_CannyEdge_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - lowerThreshold=dict(argstr="--lowerThreshold %f",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - upperThreshold=dict(argstr="--upperThreshold %f",), - variance=dict(argstr="--variance %f",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + lowerThreshold=dict( + argstr="--lowerThreshold %f", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + upperThreshold=dict( + argstr="--upperThreshold %f", + ), + variance=dict( + argstr="--variance %f", + ), ) inputs = CannyEdge.input_spec() @@ -20,7 +37,11 @@ def test_CannyEdge_inputs(): def test_CannyEdge_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = CannyEdge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py index f77517fbb5..21f8e5da6f 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py @@ -4,17 +4,44 @@ def test_CannySegmentationLevelSetImageFilter_inputs(): input_map = dict( - advectionWeight=dict(argstr="--advectionWeight %f",), - args=dict(argstr="%s",), - cannyThreshold=dict(argstr="--cannyThreshold %f",), - cannyVariance=dict(argstr="--cannyVariance %f",), - environ=dict(nohash=True, usedefault=True,), - initialModel=dict(argstr="--initialModel %s", extensions=None,), - initialModelIsovalue=dict(argstr="--initialModelIsovalue %f",), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - maxIterations=dict(argstr="--maxIterations %d",), - outputSpeedVolume=dict(argstr="--outputSpeedVolume %s", hash_files=False,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + advectionWeight=dict( + argstr="--advectionWeight %f", + ), + args=dict( + argstr="%s", + ), + cannyThreshold=dict( + argstr="--cannyThreshold %f", + ), + cannyVariance=dict( + argstr="--cannyVariance %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + initialModel=dict( + argstr="--initialModel %s", + extensions=None, + ), + initialModelIsovalue=dict( + argstr="--initialModelIsovalue %f", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + maxIterations=dict( + argstr="--maxIterations %d", + ), + outputSpeedVolume=dict( + argstr="--outputSpeedVolume %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = CannySegmentationLevelSetImageFilter.input_spec() @@ -25,7 +52,12 @@ def test_CannySegmentationLevelSetImageFilter_inputs(): def test_CannySegmentationLevelSetImageFilter_outputs(): output_map = dict( - outputSpeedVolume=dict(extensions=None,), outputVolume=dict(extensions=None,), + outputSpeedVolume=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = CannySegmentationLevelSetImageFilter.output_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py index 901aef8d82..2be2940aeb 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py @@ -4,12 +4,28 @@ def test_DilateImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), - inputRadius=dict(argstr="--inputRadius %d",), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputRadius=dict( + argstr="--inputRadius %d", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = DilateImage.input_spec() @@ -19,7 +35,11 @@ def test_DilateImage_inputs(): def test_DilateImage_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = DilateImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py index 0311b115fb..66cc444a94 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py @@ -4,13 +4,31 @@ def test_DilateMask_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputBinaryVolume=dict(argstr="--inputBinaryVolume %s", extensions=None,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - lowerThreshold=dict(argstr="--lowerThreshold %f",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - sizeStructuralElement=dict(argstr="--sizeStructuralElement %d",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBinaryVolume=dict( + argstr="--inputBinaryVolume %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + lowerThreshold=dict( + argstr="--lowerThreshold %f", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + sizeStructuralElement=dict( + argstr="--sizeStructuralElement %d", + ), ) inputs = DilateMask.input_spec() @@ -20,7 +38,11 @@ def test_DilateMask_inputs(): def test_DilateMask_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = DilateMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py index 730644e779..255145cfc6 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py @@ -4,12 +4,28 @@ def test_DistanceMaps_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputLabelVolume=dict(argstr="--inputLabelVolume %s", extensions=None,), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), - inputTissueLabel=dict(argstr="--inputTissueLabel %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputLabelVolume=dict( + argstr="--inputLabelVolume %s", + extensions=None, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputTissueLabel=dict( + argstr="--inputTissueLabel %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = DistanceMaps.input_spec() @@ -19,7 +35,11 @@ def test_DistanceMaps_inputs(): def test_DistanceMaps_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = DistanceMaps.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py index 0d7df6bf80..b62a21fdbe 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py @@ -4,10 +4,21 @@ def test_DumpBinaryTrainingVectors_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputHeaderFilename=dict(argstr="--inputHeaderFilename %s", extensions=None,), - inputVectorFilename=dict(argstr="--inputVectorFilename %s", extensions=None,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputHeaderFilename=dict( + argstr="--inputHeaderFilename %s", + extensions=None, + ), + inputVectorFilename=dict( + argstr="--inputVectorFilename %s", + extensions=None, + ), ) inputs = DumpBinaryTrainingVectors.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py index 568cafee66..ae7140754f 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py @@ -4,12 +4,28 @@ def test_ErodeImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), - inputRadius=dict(argstr="--inputRadius %d",), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputRadius=dict( + argstr="--inputRadius %d", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = ErodeImage.input_spec() @@ -19,7 +35,11 @@ def test_ErodeImage_inputs(): def test_ErodeImage_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = ErodeImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py index 5498ea004d..704bc01820 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py @@ -4,11 +4,25 @@ def test_FlippedDifference_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = FlippedDifference.input_spec() @@ -18,7 +32,11 @@ def test_FlippedDifference_inputs(): def test_FlippedDifference_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = FlippedDifference.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py index 661bd6c1af..e48d622ab3 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py @@ -4,12 +4,28 @@ def test_GenerateBrainClippedImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputImg=dict(argstr="--inputImg %s", extensions=None,), - inputMsk=dict(argstr="--inputMsk %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputFileName=dict(argstr="--outputFileName %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputImg=dict( + argstr="--inputImg %s", + extensions=None, + ), + inputMsk=dict( + argstr="--inputMsk %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputFileName=dict( + argstr="--outputFileName %s", + hash_files=False, + ), ) inputs = GenerateBrainClippedImage.input_spec() @@ -19,7 +35,11 @@ def test_GenerateBrainClippedImage_inputs(): def test_GenerateBrainClippedImage_outputs(): - output_map = dict(outputFileName=dict(extensions=None,),) + output_map = dict( + outputFileName=dict( + extensions=None, + ), + ) outputs = GenerateBrainClippedImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py index 2eedb1831c..b53396f396 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py @@ -4,13 +4,31 @@ def test_GenerateSummedGradientImage_inputs(): input_map = dict( - MaximumGradient=dict(argstr="--MaximumGradient ",), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None,), - inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputFileName=dict(argstr="--outputFileName %s", hash_files=False,), + MaximumGradient=dict( + argstr="--MaximumGradient ", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="--inputVolume1 %s", + extensions=None, + ), + inputVolume2=dict( + argstr="--inputVolume2 %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputFileName=dict( + argstr="--outputFileName %s", + hash_files=False, + ), ) inputs = GenerateSummedGradientImage.input_spec() @@ -20,7 +38,11 @@ def test_GenerateSummedGradientImage_inputs(): def test_GenerateSummedGradientImage_outputs(): - output_map = dict(outputFileName=dict(extensions=None,),) + output_map = dict( + outputFileName=dict( + extensions=None, + ), + ) outputs = GenerateSummedGradientImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py index 364ef50be4..c6e8fdfb12 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py @@ -4,13 +4,30 @@ def test_GenerateTestImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - lowerBoundOfOutputVolume=dict(argstr="--lowerBoundOfOutputVolume %f",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - outputVolumeSize=dict(argstr="--outputVolumeSize %f",), - upperBoundOfOutputVolume=dict(argstr="--upperBoundOfOutputVolume %f",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + lowerBoundOfOutputVolume=dict( + argstr="--lowerBoundOfOutputVolume %f", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + outputVolumeSize=dict( + argstr="--outputVolumeSize %f", + ), + upperBoundOfOutputVolume=dict( + argstr="--upperBoundOfOutputVolume %f", + ), ) inputs = GenerateTestImage.input_spec() @@ -20,7 +37,11 @@ def test_GenerateTestImage_inputs(): def test_GenerateTestImage_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = GenerateTestImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py index 383a836a20..c7828aaccc 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py @@ -4,13 +4,30 @@ def test_GradientAnisotropicDiffusionImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - conductance=dict(argstr="--conductance %f",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - numberOfIterations=dict(argstr="--numberOfIterations %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - timeStep=dict(argstr="--timeStep %f",), + args=dict( + argstr="%s", + ), + conductance=dict( + argstr="--conductance %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfIterations=dict( + argstr="--numberOfIterations %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + timeStep=dict( + argstr="--timeStep %f", + ), ) inputs = GradientAnisotropicDiffusionImageFilter.input_spec() @@ -20,7 +37,11 @@ def test_GradientAnisotropicDiffusionImageFilter_inputs(): def test_GradientAnisotropicDiffusionImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = GradientAnisotropicDiffusionImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py index 9e77140c41..8188ad0432 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py @@ -4,14 +4,34 @@ def test_HammerAttributeCreator_inputs(): input_map = dict( - Scale=dict(argstr="--Scale %d",), - Strength=dict(argstr="--Strength %f",), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputCSFVolume=dict(argstr="--inputCSFVolume %s", extensions=None,), - inputGMVolume=dict(argstr="--inputGMVolume %s", extensions=None,), - inputWMVolume=dict(argstr="--inputWMVolume %s", extensions=None,), - outputVolumeBase=dict(argstr="--outputVolumeBase %s",), + Scale=dict( + argstr="--Scale %d", + ), + Strength=dict( + argstr="--Strength %f", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputCSFVolume=dict( + argstr="--inputCSFVolume %s", + extensions=None, + ), + inputGMVolume=dict( + argstr="--inputGMVolume %s", + extensions=None, + ), + inputWMVolume=dict( + argstr="--inputWMVolume %s", + extensions=None, + ), + outputVolumeBase=dict( + argstr="--outputVolumeBase %s", + ), ) inputs = HammerAttributeCreator.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py index 9b4652a60c..839bfe5f11 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py @@ -4,12 +4,28 @@ def test_NeighborhoodMean_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), - inputRadius=dict(argstr="--inputRadius %d",), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputRadius=dict( + argstr="--inputRadius %d", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = NeighborhoodMean.input_spec() @@ -19,7 +35,11 @@ def test_NeighborhoodMean_inputs(): def test_NeighborhoodMean_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = NeighborhoodMean.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py index ae1fc8266d..476a93595e 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py @@ -4,12 +4,28 @@ def test_NeighborhoodMedian_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), - inputRadius=dict(argstr="--inputRadius %d",), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputRadius=dict( + argstr="--inputRadius %d", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = NeighborhoodMedian.input_spec() @@ -19,7 +35,11 @@ def test_NeighborhoodMedian_inputs(): def test_NeighborhoodMedian_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = NeighborhoodMedian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py index 9d841a6052..f6ee369414 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py @@ -4,11 +4,23 @@ def test_STAPLEAnalysis_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputDimension=dict(argstr="--inputDimension %d",), - inputLabelVolume=dict(argstr="--inputLabelVolume %s...",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputDimension=dict( + argstr="--inputDimension %d", + ), + inputLabelVolume=dict( + argstr="--inputLabelVolume %s...", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = STAPLEAnalysis.input_spec() @@ -18,7 +30,11 @@ def test_STAPLEAnalysis_inputs(): def test_STAPLEAnalysis_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = STAPLEAnalysis.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py index 9d820afb58..e16d7a9522 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py @@ -4,11 +4,24 @@ def test_TextureFromNoiseImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputRadius=dict(argstr="--inputRadius %d",), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputRadius=dict( + argstr="--inputRadius %d", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = TextureFromNoiseImageFilter.input_spec() @@ -18,7 +31,11 @@ def test_TextureFromNoiseImageFilter_inputs(): def test_TextureFromNoiseImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = TextureFromNoiseImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py index ea9e8843d5..7e6c4f6263 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py @@ -4,13 +4,31 @@ def test_TextureMeasureFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - distance=dict(argstr="--distance %d",), - environ=dict(nohash=True, usedefault=True,), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - insideROIValue=dict(argstr="--insideROIValue %f",), - outputFilename=dict(argstr="--outputFilename %s", hash_files=False,), + args=dict( + argstr="%s", + ), + distance=dict( + argstr="--distance %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + insideROIValue=dict( + argstr="--insideROIValue %f", + ), + outputFilename=dict( + argstr="--outputFilename %s", + hash_files=False, + ), ) inputs = TextureMeasureFilter.input_spec() @@ -20,7 +38,11 @@ def test_TextureMeasureFilter_inputs(): def test_TextureMeasureFilter_outputs(): - output_map = dict(outputFilename=dict(extensions=None,),) + output_map = dict( + outputFilename=dict( + extensions=None, + ), + ) outputs = TextureMeasureFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py index 113d9607c2..194d556a8e 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py @@ -4,15 +4,40 @@ def test_UnbiasedNonLocalMeans_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - hp=dict(argstr="--hp %f",), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), - ps=dict(argstr="--ps %f",), - rc=dict(argstr="--rc %s", sep=",",), - rs=dict(argstr="--rs %s", sep=",",), - sigma=dict(argstr="--sigma %f",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hp=dict( + argstr="--hp %f", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + ps=dict( + argstr="--ps %f", + ), + rc=dict( + argstr="--rc %s", + sep=",", + ), + rs=dict( + argstr="--rs %s", + sep=",", + ), + sigma=dict( + argstr="--sigma %f", + ), ) inputs = UnbiasedNonLocalMeans.input_spec() @@ -22,7 +47,12 @@ def test_UnbiasedNonLocalMeans_inputs(): def test_UnbiasedNonLocalMeans_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = UnbiasedNonLocalMeans.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py index 248c98df3d..6af2e14039 100644 --- a/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py +++ b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py @@ -4,15 +4,38 @@ def test_scalartransform_inputs(): input_map = dict( - args=dict(argstr="%s",), - deformation=dict(argstr="--deformation %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - h_field=dict(argstr="--h_field ",), - input_image=dict(argstr="--input_image %s", extensions=None,), - interpolation=dict(argstr="--interpolation %s",), - invert=dict(argstr="--invert ",), - output_image=dict(argstr="--output_image %s", hash_files=False,), - transformation=dict(argstr="--transformation %s", hash_files=False,), + args=dict( + argstr="%s", + ), + deformation=dict( + argstr="--deformation %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + h_field=dict( + argstr="--h_field ", + ), + input_image=dict( + argstr="--input_image %s", + extensions=None, + ), + interpolation=dict( + argstr="--interpolation %s", + ), + invert=dict( + argstr="--invert ", + ), + output_image=dict( + argstr="--output_image %s", + hash_files=False, + ), + transformation=dict( + argstr="--transformation %s", + hash_files=False, + ), ) inputs = scalartransform.input_spec() @@ -23,7 +46,12 @@ def test_scalartransform_inputs(): def test_scalartransform_outputs(): output_map = dict( - output_image=dict(extensions=None,), transformation=dict(extensions=None,), + output_image=dict( + extensions=None, + ), + transformation=dict( + extensions=None, + ), ) outputs = scalartransform.output_spec() diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py index 4136c8105d..d1c8055df3 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py @@ -4,64 +4,149 @@ def test_BRAINSDemonWarp_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), arrayOfPyramidLevelIterations=dict( - argstr="--arrayOfPyramidLevelIterations %s", sep=",", + argstr="--arrayOfPyramidLevelIterations %s", + sep=",", + ), + backgroundFillValue=dict( + argstr="--backgroundFillValue %d", ), - backgroundFillValue=dict(argstr="--backgroundFillValue %d",), checkerboardPatternSubdivisions=dict( - argstr="--checkerboardPatternSubdivisions %s", sep=",", - ), - environ=dict(nohash=True, usedefault=True,), - fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None,), - fixedVolume=dict(argstr="--fixedVolume %s", extensions=None,), - gradient_type=dict(argstr="--gradient_type %s",), - gui=dict(argstr="--gui ",), - histogramMatch=dict(argstr="--histogramMatch ",), + argstr="--checkerboardPatternSubdivisions %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedBinaryVolume=dict( + argstr="--fixedBinaryVolume %s", + extensions=None, + ), + fixedVolume=dict( + argstr="--fixedVolume %s", + extensions=None, + ), + gradient_type=dict( + argstr="--gradient_type %s", + ), + gui=dict( + argstr="--gui ", + ), + histogramMatch=dict( + argstr="--histogramMatch ", + ), initializeWithDisplacementField=dict( - argstr="--initializeWithDisplacementField %s", extensions=None, + argstr="--initializeWithDisplacementField %s", + extensions=None, ), initializeWithTransform=dict( - argstr="--initializeWithTransform %s", extensions=None, - ), - inputPixelType=dict(argstr="--inputPixelType %s",), - interpolationMode=dict(argstr="--interpolationMode %s",), - lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d",), - maskProcessingMode=dict(argstr="--maskProcessingMode %s",), - max_step_length=dict(argstr="--max_step_length %f",), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), - minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=",",), - minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=",",), - movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None,), - movingVolume=dict(argstr="--movingVolume %s", extensions=None,), - neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=",",), + argstr="--initializeWithTransform %s", + extensions=None, + ), + inputPixelType=dict( + argstr="--inputPixelType %s", + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + lowerThresholdForBOBF=dict( + argstr="--lowerThresholdForBOBF %d", + ), + maskProcessingMode=dict( + argstr="--maskProcessingMode %s", + ), + max_step_length=dict( + argstr="--max_step_length %f", + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), + minimumFixedPyramid=dict( + argstr="--minimumFixedPyramid %s", + sep=",", + ), + minimumMovingPyramid=dict( + argstr="--minimumMovingPyramid %s", + sep=",", + ), + movingBinaryVolume=dict( + argstr="--movingBinaryVolume %s", + extensions=None, + ), + movingVolume=dict( + argstr="--movingVolume %s", + extensions=None, + ), + neighborhoodForBOBF=dict( + argstr="--neighborhoodForBOBF %s", + sep=",", + ), numberOfBCHApproximationTerms=dict( argstr="--numberOfBCHApproximationTerms %d", ), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), - numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + numberOfPyramidLevels=dict( + argstr="--numberOfPyramidLevels %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), outputCheckerboardVolume=dict( - argstr="--outputCheckerboardVolume %s", hash_files=False, + argstr="--outputCheckerboardVolume %s", + hash_files=False, + ), + outputDebug=dict( + argstr="--outputDebug ", ), - outputDebug=dict(argstr="--outputDebug ",), outputDisplacementFieldPrefix=dict( argstr="--outputDisplacementFieldPrefix %s", ), outputDisplacementFieldVolume=dict( - argstr="--outputDisplacementFieldVolume %s", hash_files=False, - ), - outputNormalized=dict(argstr="--outputNormalized ",), - outputPixelType=dict(argstr="--outputPixelType %s",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - promptUser=dict(argstr="--promptUser ",), - registrationFilterType=dict(argstr="--registrationFilterType %s",), - seedForBOBF=dict(argstr="--seedForBOBF %s", sep=",",), - smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f",), - upFieldSmoothing=dict(argstr="--upFieldSmoothing %f",), - upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d",), - use_vanilla_dem=dict(argstr="--use_vanilla_dem ",), + argstr="--outputDisplacementFieldVolume %s", + hash_files=False, + ), + outputNormalized=dict( + argstr="--outputNormalized ", + ), + outputPixelType=dict( + argstr="--outputPixelType %s", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + promptUser=dict( + argstr="--promptUser ", + ), + registrationFilterType=dict( + argstr="--registrationFilterType %s", + ), + seedForBOBF=dict( + argstr="--seedForBOBF %s", + sep=",", + ), + smoothDisplacementFieldSigma=dict( + argstr="--smoothDisplacementFieldSigma %f", + ), + upFieldSmoothing=dict( + argstr="--upFieldSmoothing %f", + ), + upperThresholdForBOBF=dict( + argstr="--upperThresholdForBOBF %d", + ), + use_vanilla_dem=dict( + argstr="--use_vanilla_dem ", + ), ) inputs = BRAINSDemonWarp.input_spec() @@ -72,9 +157,15 @@ def test_BRAINSDemonWarp_inputs(): def test_BRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(extensions=None,), - outputDisplacementFieldVolume=dict(extensions=None,), - outputVolume=dict(extensions=None,), + outputCheckerboardVolume=dict( + extensions=None, + ), + outputDisplacementFieldVolume=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = BRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py index 683d34694b..69111d9212 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py @@ -4,83 +4,228 @@ def test_BRAINSFit_inputs(): input_map = dict( - ROIAutoClosingSize=dict(argstr="--ROIAutoClosingSize %f",), - ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f",), - args=dict(argstr="%s",), - backgroundFillValue=dict(argstr="--backgroundFillValue %f",), - bsplineTransform=dict(argstr="--bsplineTransform %s", hash_files=False,), + ROIAutoClosingSize=dict( + argstr="--ROIAutoClosingSize %f", + ), + ROIAutoDilateSize=dict( + argstr="--ROIAutoDilateSize %f", + ), + args=dict( + argstr="%s", + ), + backgroundFillValue=dict( + argstr="--backgroundFillValue %f", + ), + bsplineTransform=dict( + argstr="--bsplineTransform %s", + hash_files=False, + ), costFunctionConvergenceFactor=dict( argstr="--costFunctionConvergenceFactor %f", ), - costMetric=dict(argstr="--costMetric %s",), - debugLevel=dict(argstr="--debugLevel %d",), - environ=dict(nohash=True, usedefault=True,), - failureExitCode=dict(argstr="--failureExitCode %d",), - fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None,), - fixedVolume=dict(argstr="--fixedVolume %s", extensions=None,), - fixedVolume2=dict(argstr="--fixedVolume2 %s", extensions=None,), - fixedVolumeTimeIndex=dict(argstr="--fixedVolumeTimeIndex %d",), - gui=dict(argstr="--gui ",), - histogramMatch=dict(argstr="--histogramMatch ",), - initialTransform=dict(argstr="--initialTransform %s", extensions=None,), + costMetric=dict( + argstr="--costMetric %s", + ), + debugLevel=dict( + argstr="--debugLevel %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + failureExitCode=dict( + argstr="--failureExitCode %d", + ), + fixedBinaryVolume=dict( + argstr="--fixedBinaryVolume %s", + extensions=None, + ), + fixedVolume=dict( + argstr="--fixedVolume %s", + extensions=None, + ), + fixedVolume2=dict( + argstr="--fixedVolume2 %s", + extensions=None, + ), + fixedVolumeTimeIndex=dict( + argstr="--fixedVolumeTimeIndex %d", + ), + gui=dict( + argstr="--gui ", + ), + histogramMatch=dict( + argstr="--histogramMatch ", + ), + initialTransform=dict( + argstr="--initialTransform %s", + extensions=None, + ), initializeRegistrationByCurrentGenericTransform=dict( argstr="--initializeRegistrationByCurrentGenericTransform ", ), - initializeTransformMode=dict(argstr="--initializeTransformMode %s",), - interpolationMode=dict(argstr="--interpolationMode %s",), - linearTransform=dict(argstr="--linearTransform %s", hash_files=False,), - logFileReport=dict(argstr="--logFileReport %s", hash_files=False,), - maskInferiorCutOffFromCenter=dict(argstr="--maskInferiorCutOffFromCenter %f",), - maskProcessingMode=dict(argstr="--maskProcessingMode %s",), - maxBSplineDisplacement=dict(argstr="--maxBSplineDisplacement %f",), - maximumNumberOfCorrections=dict(argstr="--maximumNumberOfCorrections %d",), - maximumNumberOfEvaluations=dict(argstr="--maximumNumberOfEvaluations %d",), - maximumStepLength=dict(argstr="--maximumStepLength %f",), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), - metricSamplingStrategy=dict(argstr="--metricSamplingStrategy %s",), - minimumStepLength=dict(argstr="--minimumStepLength %s", sep=",",), - movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None,), - movingVolume=dict(argstr="--movingVolume %s", extensions=None,), - movingVolume2=dict(argstr="--movingVolume2 %s", extensions=None,), - movingVolumeTimeIndex=dict(argstr="--movingVolumeTimeIndex %d",), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), - numberOfIterations=dict(argstr="--numberOfIterations %s", sep=",",), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), - numberOfSamples=dict(argstr="--numberOfSamples %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), + initializeTransformMode=dict( + argstr="--initializeTransformMode %s", + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + linearTransform=dict( + argstr="--linearTransform %s", + hash_files=False, + ), + logFileReport=dict( + argstr="--logFileReport %s", + hash_files=False, + ), + maskInferiorCutOffFromCenter=dict( + argstr="--maskInferiorCutOffFromCenter %f", + ), + maskProcessingMode=dict( + argstr="--maskProcessingMode %s", + ), + maxBSplineDisplacement=dict( + argstr="--maxBSplineDisplacement %f", + ), + maximumNumberOfCorrections=dict( + argstr="--maximumNumberOfCorrections %d", + ), + maximumNumberOfEvaluations=dict( + argstr="--maximumNumberOfEvaluations %d", + ), + maximumStepLength=dict( + argstr="--maximumStepLength %f", + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), + metricSamplingStrategy=dict( + argstr="--metricSamplingStrategy %s", + ), + minimumStepLength=dict( + argstr="--minimumStepLength %s", + sep=",", + ), + movingBinaryVolume=dict( + argstr="--movingBinaryVolume %s", + extensions=None, + ), + movingVolume=dict( + argstr="--movingVolume %s", + extensions=None, + ), + movingVolume2=dict( + argstr="--movingVolume2 %s", + extensions=None, + ), + movingVolumeTimeIndex=dict( + argstr="--movingVolumeTimeIndex %d", + ), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfIterations=dict( + argstr="--numberOfIterations %s", + sep=",", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + numberOfSamples=dict( + argstr="--numberOfSamples %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), outputFixedVolumeROI=dict( - argstr="--outputFixedVolumeROI %s", hash_files=False, + argstr="--outputFixedVolumeROI %s", + hash_files=False, ), outputMovingVolumeROI=dict( - argstr="--outputMovingVolumeROI %s", hash_files=False, - ), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - outputVolumePixelType=dict(argstr="--outputVolumePixelType %s",), - projectedGradientTolerance=dict(argstr="--projectedGradientTolerance %f",), - promptUser=dict(argstr="--promptUser ",), - relaxationFactor=dict(argstr="--relaxationFactor %f",), - removeIntensityOutliers=dict(argstr="--removeIntensityOutliers %f",), - reproportionScale=dict(argstr="--reproportionScale %f",), - samplingPercentage=dict(argstr="--samplingPercentage %f",), - scaleOutputValues=dict(argstr="--scaleOutputValues ",), - skewScale=dict(argstr="--skewScale %f",), - splineGridSize=dict(argstr="--splineGridSize %s", sep=",",), + argstr="--outputMovingVolumeROI %s", + hash_files=False, + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + outputVolumePixelType=dict( + argstr="--outputVolumePixelType %s", + ), + projectedGradientTolerance=dict( + argstr="--projectedGradientTolerance %f", + ), + promptUser=dict( + argstr="--promptUser ", + ), + relaxationFactor=dict( + argstr="--relaxationFactor %f", + ), + removeIntensityOutliers=dict( + argstr="--removeIntensityOutliers %f", + ), + reproportionScale=dict( + argstr="--reproportionScale %f", + ), + samplingPercentage=dict( + argstr="--samplingPercentage %f", + ), + scaleOutputValues=dict( + argstr="--scaleOutputValues ", + ), + skewScale=dict( + argstr="--skewScale %f", + ), + splineGridSize=dict( + argstr="--splineGridSize %s", + sep=",", + ), strippedOutputTransform=dict( - argstr="--strippedOutputTransform %s", hash_files=False, - ), - transformType=dict(argstr="--transformType %s", sep=",",), - translationScale=dict(argstr="--translationScale %f",), - useAffine=dict(argstr="--useAffine ",), - useBSpline=dict(argstr="--useBSpline ",), - useComposite=dict(argstr="--useComposite ",), - useROIBSpline=dict(argstr="--useROIBSpline ",), - useRigid=dict(argstr="--useRigid ",), - useScaleSkewVersor3D=dict(argstr="--useScaleSkewVersor3D ",), - useScaleVersor3D=dict(argstr="--useScaleVersor3D ",), - useSyN=dict(argstr="--useSyN ",), - writeOutputTransformInFloat=dict(argstr="--writeOutputTransformInFloat ",), - writeTransformOnFailure=dict(argstr="--writeTransformOnFailure ",), + argstr="--strippedOutputTransform %s", + hash_files=False, + ), + transformType=dict( + argstr="--transformType %s", + sep=",", + ), + translationScale=dict( + argstr="--translationScale %f", + ), + useAffine=dict( + argstr="--useAffine ", + ), + useBSpline=dict( + argstr="--useBSpline ", + ), + useComposite=dict( + argstr="--useComposite ", + ), + useROIBSpline=dict( + argstr="--useROIBSpline ", + ), + useRigid=dict( + argstr="--useRigid ", + ), + useScaleSkewVersor3D=dict( + argstr="--useScaleSkewVersor3D ", + ), + useScaleVersor3D=dict( + argstr="--useScaleVersor3D ", + ), + useSyN=dict( + argstr="--useSyN ", + ), + writeOutputTransformInFloat=dict( + argstr="--writeOutputTransformInFloat ", + ), + writeTransformOnFailure=dict( + argstr="--writeTransformOnFailure ", + ), ) inputs = BRAINSFit.input_spec() @@ -91,14 +236,30 @@ def test_BRAINSFit_inputs(): def test_BRAINSFit_outputs(): output_map = dict( - bsplineTransform=dict(extensions=None,), - linearTransform=dict(extensions=None,), - logFileReport=dict(extensions=None,), - outputFixedVolumeROI=dict(extensions=None,), - outputMovingVolumeROI=dict(extensions=None,), - outputTransform=dict(extensions=None,), - outputVolume=dict(extensions=None,), - strippedOutputTransform=dict(extensions=None,), + bsplineTransform=dict( + extensions=None, + ), + linearTransform=dict( + extensions=None, + ), + logFileReport=dict( + extensions=None, + ), + outputFixedVolumeROI=dict( + extensions=None, + ), + outputMovingVolumeROI=dict( + extensions=None, + ), + outputTransform=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), + strippedOutputTransform=dict( + extensions=None, + ), ) outputs = BRAINSFit.output_spec() diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py index 2d94f19a5f..46d175da07 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py @@ -4,19 +4,52 @@ def test_BRAINSResample_inputs(): input_map = dict( - args=dict(argstr="%s",), - defaultValue=dict(argstr="--defaultValue %f",), - deformationVolume=dict(argstr="--deformationVolume %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - gridSpacing=dict(argstr="--gridSpacing %s", sep=",",), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - interpolationMode=dict(argstr="--interpolationMode %s",), - inverseTransform=dict(argstr="--inverseTransform ",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - pixelType=dict(argstr="--pixelType %s",), - referenceVolume=dict(argstr="--referenceVolume %s", extensions=None,), - warpTransform=dict(argstr="--warpTransform %s", extensions=None,), + args=dict( + argstr="%s", + ), + defaultValue=dict( + argstr="--defaultValue %f", + ), + deformationVolume=dict( + argstr="--deformationVolume %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gridSpacing=dict( + argstr="--gridSpacing %s", + sep=",", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + inverseTransform=dict( + argstr="--inverseTransform ", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + pixelType=dict( + argstr="--pixelType %s", + ), + referenceVolume=dict( + argstr="--referenceVolume %s", + extensions=None, + ), + warpTransform=dict( + argstr="--warpTransform %s", + extensions=None, + ), ) inputs = BRAINSResample.input_spec() @@ -26,7 +59,11 @@ def test_BRAINSResample_inputs(): def test_BRAINSResample_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py index db477b2593..52d24e6fff 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py @@ -4,12 +4,27 @@ def test_BRAINSResize_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - pixelType=dict(argstr="--pixelType %s",), - scaleFactor=dict(argstr="--scaleFactor %f",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + pixelType=dict( + argstr="--pixelType %s", + ), + scaleFactor=dict( + argstr="--scaleFactor %f", + ), ) inputs = BRAINSResize.input_spec() @@ -19,7 +34,11 @@ def test_BRAINSResize_inputs(): def test_BRAINSResize_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSResize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py index 4e5c6ae239..bc98fc2763 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py @@ -4,15 +4,37 @@ def test_BRAINSTransformFromFiducials_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fixedLandmarks=dict(argstr="--fixedLandmarks %s...",), - fixedLandmarksFile=dict(argstr="--fixedLandmarksFile %s", extensions=None,), - movingLandmarks=dict(argstr="--movingLandmarks %s...",), - movingLandmarksFile=dict(argstr="--movingLandmarksFile %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - saveTransform=dict(argstr="--saveTransform %s", hash_files=False,), - transformType=dict(argstr="--transformType %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedLandmarks=dict( + argstr="--fixedLandmarks %s...", + ), + fixedLandmarksFile=dict( + argstr="--fixedLandmarksFile %s", + extensions=None, + ), + movingLandmarks=dict( + argstr="--movingLandmarks %s...", + ), + movingLandmarksFile=dict( + argstr="--movingLandmarksFile %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + saveTransform=dict( + argstr="--saveTransform %s", + hash_files=False, + ), + transformType=dict( + argstr="--transformType %s", + ), ) inputs = BRAINSTransformFromFiducials.input_spec() @@ -22,7 +44,11 @@ def test_BRAINSTransformFromFiducials_inputs(): def test_BRAINSTransformFromFiducials_outputs(): - output_map = dict(saveTransform=dict(extensions=None,),) + output_map = dict( + saveTransform=dict( + extensions=None, + ), + ) outputs = BRAINSTransformFromFiducials.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py index 098360c1e3..af8bac8680 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -4,65 +4,151 @@ def test_VBRAINSDemonWarp_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), arrayOfPyramidLevelIterations=dict( - argstr="--arrayOfPyramidLevelIterations %s", sep=",", + argstr="--arrayOfPyramidLevelIterations %s", + sep=",", + ), + backgroundFillValue=dict( + argstr="--backgroundFillValue %d", ), - backgroundFillValue=dict(argstr="--backgroundFillValue %d",), checkerboardPatternSubdivisions=dict( - argstr="--checkerboardPatternSubdivisions %s", sep=",", - ), - environ=dict(nohash=True, usedefault=True,), - fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None,), - fixedVolume=dict(argstr="--fixedVolume %s...",), - gradient_type=dict(argstr="--gradient_type %s",), - gui=dict(argstr="--gui ",), - histogramMatch=dict(argstr="--histogramMatch ",), + argstr="--checkerboardPatternSubdivisions %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedBinaryVolume=dict( + argstr="--fixedBinaryVolume %s", + extensions=None, + ), + fixedVolume=dict( + argstr="--fixedVolume %s...", + ), + gradient_type=dict( + argstr="--gradient_type %s", + ), + gui=dict( + argstr="--gui ", + ), + histogramMatch=dict( + argstr="--histogramMatch ", + ), initializeWithDisplacementField=dict( - argstr="--initializeWithDisplacementField %s", extensions=None, + argstr="--initializeWithDisplacementField %s", + extensions=None, ), initializeWithTransform=dict( - argstr="--initializeWithTransform %s", extensions=None, - ), - inputPixelType=dict(argstr="--inputPixelType %s",), - interpolationMode=dict(argstr="--interpolationMode %s",), - lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d",), - makeBOBF=dict(argstr="--makeBOBF ",), - max_step_length=dict(argstr="--max_step_length %f",), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), - minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=",",), - minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=",",), - movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None,), - movingVolume=dict(argstr="--movingVolume %s...",), - neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=",",), + argstr="--initializeWithTransform %s", + extensions=None, + ), + inputPixelType=dict( + argstr="--inputPixelType %s", + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + lowerThresholdForBOBF=dict( + argstr="--lowerThresholdForBOBF %d", + ), + makeBOBF=dict( + argstr="--makeBOBF ", + ), + max_step_length=dict( + argstr="--max_step_length %f", + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), + minimumFixedPyramid=dict( + argstr="--minimumFixedPyramid %s", + sep=",", + ), + minimumMovingPyramid=dict( + argstr="--minimumMovingPyramid %s", + sep=",", + ), + movingBinaryVolume=dict( + argstr="--movingBinaryVolume %s", + extensions=None, + ), + movingVolume=dict( + argstr="--movingVolume %s...", + ), + neighborhoodForBOBF=dict( + argstr="--neighborhoodForBOBF %s", + sep=",", + ), numberOfBCHApproximationTerms=dict( argstr="--numberOfBCHApproximationTerms %d", ), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), - numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + numberOfPyramidLevels=dict( + argstr="--numberOfPyramidLevels %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), outputCheckerboardVolume=dict( - argstr="--outputCheckerboardVolume %s", hash_files=False, + argstr="--outputCheckerboardVolume %s", + hash_files=False, + ), + outputDebug=dict( + argstr="--outputDebug ", ), - outputDebug=dict(argstr="--outputDebug ",), outputDisplacementFieldPrefix=dict( argstr="--outputDisplacementFieldPrefix %s", ), outputDisplacementFieldVolume=dict( - argstr="--outputDisplacementFieldVolume %s", hash_files=False, - ), - outputNormalized=dict(argstr="--outputNormalized ",), - outputPixelType=dict(argstr="--outputPixelType %s",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - promptUser=dict(argstr="--promptUser ",), - registrationFilterType=dict(argstr="--registrationFilterType %s",), - seedForBOBF=dict(argstr="--seedForBOBF %s", sep=",",), - smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f",), - upFieldSmoothing=dict(argstr="--upFieldSmoothing %f",), - upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d",), - use_vanilla_dem=dict(argstr="--use_vanilla_dem ",), - weightFactors=dict(argstr="--weightFactors %s", sep=",",), + argstr="--outputDisplacementFieldVolume %s", + hash_files=False, + ), + outputNormalized=dict( + argstr="--outputNormalized ", + ), + outputPixelType=dict( + argstr="--outputPixelType %s", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + promptUser=dict( + argstr="--promptUser ", + ), + registrationFilterType=dict( + argstr="--registrationFilterType %s", + ), + seedForBOBF=dict( + argstr="--seedForBOBF %s", + sep=",", + ), + smoothDisplacementFieldSigma=dict( + argstr="--smoothDisplacementFieldSigma %f", + ), + upFieldSmoothing=dict( + argstr="--upFieldSmoothing %f", + ), + upperThresholdForBOBF=dict( + argstr="--upperThresholdForBOBF %d", + ), + use_vanilla_dem=dict( + argstr="--use_vanilla_dem ", + ), + weightFactors=dict( + argstr="--weightFactors %s", + sep=",", + ), ) inputs = VBRAINSDemonWarp.input_spec() @@ -73,9 +159,15 @@ def test_VBRAINSDemonWarp_inputs(): def test_VBRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(extensions=None,), - outputDisplacementFieldVolume=dict(extensions=None,), - outputVolume=dict(extensions=None,), + outputCheckerboardVolume=dict( + extensions=None, + ), + outputDisplacementFieldVolume=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = VBRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py index e43cdf412c..5e66b2d82c 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py @@ -4,48 +4,123 @@ def test_BRAINSABC_inputs(): input_map = dict( - args=dict(argstr="%s",), - atlasDefinition=dict(argstr="--atlasDefinition %s", extensions=None,), + args=dict( + argstr="%s", + ), + atlasDefinition=dict( + argstr="--atlasDefinition %s", + extensions=None, + ), atlasToSubjectInitialTransform=dict( - argstr="--atlasToSubjectInitialTransform %s", hash_files=False, + argstr="--atlasToSubjectInitialTransform %s", + hash_files=False, ), atlasToSubjectTransform=dict( - argstr="--atlasToSubjectTransform %s", hash_files=False, - ), - atlasToSubjectTransformType=dict(argstr="--atlasToSubjectTransformType %s",), - atlasWarpingOff=dict(argstr="--atlasWarpingOff ",), - debuglevel=dict(argstr="--debuglevel %d",), - defaultSuffix=dict(argstr="--defaultSuffix %s",), - environ=dict(nohash=True, usedefault=True,), - filterIteration=dict(argstr="--filterIteration %d",), - filterMethod=dict(argstr="--filterMethod %s",), - filterTimeStep=dict(argstr="--filterTimeStep %f",), - gridSize=dict(argstr="--gridSize %s", sep=",",), - implicitOutputs=dict(argstr="--implicitOutputs %s...", hash_files=False,), - inputVolumeTypes=dict(argstr="--inputVolumeTypes %s", sep=",",), - inputVolumes=dict(argstr="--inputVolumes %s...",), - interpolationMode=dict(argstr="--interpolationMode %s",), - maxBiasDegree=dict(argstr="--maxBiasDegree %d",), - maxIterations=dict(argstr="--maxIterations %d",), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), + argstr="--atlasToSubjectTransform %s", + hash_files=False, + ), + atlasToSubjectTransformType=dict( + argstr="--atlasToSubjectTransformType %s", + ), + atlasWarpingOff=dict( + argstr="--atlasWarpingOff ", + ), + debuglevel=dict( + argstr="--debuglevel %d", + ), + defaultSuffix=dict( + argstr="--defaultSuffix %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + filterIteration=dict( + argstr="--filterIteration %d", + ), + filterMethod=dict( + argstr="--filterMethod %s", + ), + filterTimeStep=dict( + argstr="--filterTimeStep %f", + ), + gridSize=dict( + argstr="--gridSize %s", + sep=",", + ), + implicitOutputs=dict( + argstr="--implicitOutputs %s...", + hash_files=False, + ), + inputVolumeTypes=dict( + argstr="--inputVolumeTypes %s", + sep=",", + ), + inputVolumes=dict( + argstr="--inputVolumes %s...", + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + maxBiasDegree=dict( + argstr="--maxBiasDegree %d", + ), + maxIterations=dict( + argstr="--maxIterations %d", + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), numberOfSubSamplesInEachPlugArea=dict( - argstr="--numberOfSubSamplesInEachPlugArea %s", sep=",", - ), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputDir=dict(argstr="--outputDir %s", hash_files=False,), - outputDirtyLabels=dict(argstr="--outputDirtyLabels %s", hash_files=False,), - outputFormat=dict(argstr="--outputFormat %s",), - outputLabels=dict(argstr="--outputLabels %s", hash_files=False,), - outputVolumes=dict(argstr="--outputVolumes %s...", hash_files=False,), - posteriorTemplate=dict(argstr="--posteriorTemplate %s",), - purePlugsThreshold=dict(argstr="--purePlugsThreshold %f",), - restoreState=dict(argstr="--restoreState %s", extensions=None,), - saveState=dict(argstr="--saveState %s", hash_files=False,), + argstr="--numberOfSubSamplesInEachPlugArea %s", + sep=",", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputDir=dict( + argstr="--outputDir %s", + hash_files=False, + ), + outputDirtyLabels=dict( + argstr="--outputDirtyLabels %s", + hash_files=False, + ), + outputFormat=dict( + argstr="--outputFormat %s", + ), + outputLabels=dict( + argstr="--outputLabels %s", + hash_files=False, + ), + outputVolumes=dict( + argstr="--outputVolumes %s...", + hash_files=False, + ), + posteriorTemplate=dict( + argstr="--posteriorTemplate %s", + ), + purePlugsThreshold=dict( + argstr="--purePlugsThreshold %f", + ), + restoreState=dict( + argstr="--restoreState %s", + extensions=None, + ), + saveState=dict( + argstr="--saveState %s", + hash_files=False, + ), subjectIntermodeTransformType=dict( argstr="--subjectIntermodeTransformType %s", ), - useKNN=dict(argstr="--useKNN ",), - writeLess=dict(argstr="--writeLess ",), + useKNN=dict( + argstr="--useKNN ", + ), + writeLess=dict( + argstr="--writeLess ", + ), ) inputs = BRAINSABC.input_spec() @@ -56,14 +131,24 @@ def test_BRAINSABC_inputs(): def test_BRAINSABC_outputs(): output_map = dict( - atlasToSubjectInitialTransform=dict(extensions=None,), - atlasToSubjectTransform=dict(extensions=None,), + atlasToSubjectInitialTransform=dict( + extensions=None, + ), + atlasToSubjectTransform=dict( + extensions=None, + ), implicitOutputs=dict(), outputDir=dict(), - outputDirtyLabels=dict(extensions=None,), - outputLabels=dict(extensions=None,), + outputDirtyLabels=dict( + extensions=None, + ), + outputLabels=dict( + extensions=None, + ), outputVolumes=dict(), - saveState=dict(extensions=None,), + saveState=dict( + extensions=None, + ), ) outputs = BRAINSABC.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py index a2eb766db3..2f77f419e7 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py @@ -4,62 +4,155 @@ def test_BRAINSConstellationDetector_inputs(): input_map = dict( - BackgroundFillValue=dict(argstr="--BackgroundFillValue %s",), - LLSModel=dict(argstr="--LLSModel %s", extensions=None,), - acLowerBound=dict(argstr="--acLowerBound %f",), - args=dict(argstr="%s",), - atlasLandmarkWeights=dict(argstr="--atlasLandmarkWeights %s", extensions=None,), - atlasLandmarks=dict(argstr="--atlasLandmarks %s", extensions=None,), - atlasVolume=dict(argstr="--atlasVolume %s", extensions=None,), - cutOutHeadInOutputVolume=dict(argstr="--cutOutHeadInOutputVolume ",), - debug=dict(argstr="--debug ",), - environ=dict(nohash=True, usedefault=True,), - forceACPoint=dict(argstr="--forceACPoint %s", sep=",",), + BackgroundFillValue=dict( + argstr="--BackgroundFillValue %s", + ), + LLSModel=dict( + argstr="--LLSModel %s", + extensions=None, + ), + acLowerBound=dict( + argstr="--acLowerBound %f", + ), + args=dict( + argstr="%s", + ), + atlasLandmarkWeights=dict( + argstr="--atlasLandmarkWeights %s", + extensions=None, + ), + atlasLandmarks=dict( + argstr="--atlasLandmarks %s", + extensions=None, + ), + atlasVolume=dict( + argstr="--atlasVolume %s", + extensions=None, + ), + cutOutHeadInOutputVolume=dict( + argstr="--cutOutHeadInOutputVolume ", + ), + debug=dict( + argstr="--debug ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + forceACPoint=dict( + argstr="--forceACPoint %s", + sep=",", + ), forceHoughEyeDetectorReportFailure=dict( argstr="--forceHoughEyeDetectorReportFailure ", ), - forcePCPoint=dict(argstr="--forcePCPoint %s", sep=",",), - forceRPPoint=dict(argstr="--forceRPPoint %s", sep=",",), - forceVN4Point=dict(argstr="--forceVN4Point %s", sep=",",), - houghEyeDetectorMode=dict(argstr="--houghEyeDetectorMode %d",), - inputLandmarksEMSP=dict(argstr="--inputLandmarksEMSP %s", extensions=None,), - inputTemplateModel=dict(argstr="--inputTemplateModel %s", extensions=None,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - interpolationMode=dict(argstr="--interpolationMode %s",), - mspQualityLevel=dict(argstr="--mspQualityLevel %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f",), + forcePCPoint=dict( + argstr="--forcePCPoint %s", + sep=",", + ), + forceRPPoint=dict( + argstr="--forceRPPoint %s", + sep=",", + ), + forceVN4Point=dict( + argstr="--forceVN4Point %s", + sep=",", + ), + houghEyeDetectorMode=dict( + argstr="--houghEyeDetectorMode %d", + ), + inputLandmarksEMSP=dict( + argstr="--inputLandmarksEMSP %s", + extensions=None, + ), + inputTemplateModel=dict( + argstr="--inputTemplateModel %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + mspQualityLevel=dict( + argstr="--mspQualityLevel %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + otsuPercentileThreshold=dict( + argstr="--otsuPercentileThreshold %f", + ), outputLandmarksInACPCAlignedSpace=dict( - argstr="--outputLandmarksInACPCAlignedSpace %s", hash_files=False, + argstr="--outputLandmarksInACPCAlignedSpace %s", + hash_files=False, ), outputLandmarksInInputSpace=dict( - argstr="--outputLandmarksInInputSpace %s", hash_files=False, + argstr="--outputLandmarksInInputSpace %s", + hash_files=False, + ), + outputMRML=dict( + argstr="--outputMRML %s", + hash_files=False, ), - outputMRML=dict(argstr="--outputMRML %s", hash_files=False,), outputResampledVolume=dict( - argstr="--outputResampledVolume %s", hash_files=False, + argstr="--outputResampledVolume %s", + hash_files=False, + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, ), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), outputUntransformedClippedVolume=dict( - argstr="--outputUntransformedClippedVolume %s", hash_files=False, + argstr="--outputUntransformedClippedVolume %s", + hash_files=False, ), outputVerificationScript=dict( - argstr="--outputVerificationScript %s", hash_files=False, + argstr="--outputVerificationScript %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + rVN4=dict( + argstr="--rVN4 %f", + ), + rac=dict( + argstr="--rac %f", + ), + rescaleIntensities=dict( + argstr="--rescaleIntensities ", ), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - rVN4=dict(argstr="--rVN4 %f",), - rac=dict(argstr="--rac %f",), - rescaleIntensities=dict(argstr="--rescaleIntensities ",), rescaleIntensitiesOutputRange=dict( - argstr="--rescaleIntensitiesOutputRange %s", sep=",", - ), - resultsDir=dict(argstr="--resultsDir %s", hash_files=False,), - rmpj=dict(argstr="--rmpj %f",), - rpc=dict(argstr="--rpc %f",), - trimRescaledIntensities=dict(argstr="--trimRescaledIntensities %f",), - verbose=dict(argstr="--verbose ",), - writeBranded2DImage=dict(argstr="--writeBranded2DImage %s", hash_files=False,), - writedebuggingImagesLevel=dict(argstr="--writedebuggingImagesLevel %d",), + argstr="--rescaleIntensitiesOutputRange %s", + sep=",", + ), + resultsDir=dict( + argstr="--resultsDir %s", + hash_files=False, + ), + rmpj=dict( + argstr="--rmpj %f", + ), + rpc=dict( + argstr="--rpc %f", + ), + trimRescaledIntensities=dict( + argstr="--trimRescaledIntensities %f", + ), + verbose=dict( + argstr="--verbose ", + ), + writeBranded2DImage=dict( + argstr="--writeBranded2DImage %s", + hash_files=False, + ), + writedebuggingImagesLevel=dict( + argstr="--writedebuggingImagesLevel %d", + ), ) inputs = BRAINSConstellationDetector.input_spec() @@ -70,16 +163,34 @@ def test_BRAINSConstellationDetector_inputs(): def test_BRAINSConstellationDetector_outputs(): output_map = dict( - outputLandmarksInACPCAlignedSpace=dict(extensions=None,), - outputLandmarksInInputSpace=dict(extensions=None,), - outputMRML=dict(extensions=None,), - outputResampledVolume=dict(extensions=None,), - outputTransform=dict(extensions=None,), - outputUntransformedClippedVolume=dict(extensions=None,), - outputVerificationScript=dict(extensions=None,), - outputVolume=dict(extensions=None,), + outputLandmarksInACPCAlignedSpace=dict( + extensions=None, + ), + outputLandmarksInInputSpace=dict( + extensions=None, + ), + outputMRML=dict( + extensions=None, + ), + outputResampledVolume=dict( + extensions=None, + ), + outputTransform=dict( + extensions=None, + ), + outputUntransformedClippedVolume=dict( + extensions=None, + ), + outputVerificationScript=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), resultsDir=dict(), - writeBranded2DImage=dict(extensions=None,), + writeBranded2DImage=dict( + extensions=None, + ), ) outputs = BRAINSConstellationDetector.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py index 06bf0165ca..4a395fbc14 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py @@ -4,15 +4,39 @@ def test_BRAINSCreateLabelMapFromProbabilityMaps_inputs(): input_map = dict( - args=dict(argstr="%s",), - cleanLabelVolume=dict(argstr="--cleanLabelVolume %s", hash_files=False,), - dirtyLabelVolume=dict(argstr="--dirtyLabelVolume %s", hash_files=False,), - environ=dict(nohash=True, usedefault=True,), - foregroundPriors=dict(argstr="--foregroundPriors %s", sep=",",), - inclusionThreshold=dict(argstr="--inclusionThreshold %f",), - inputProbabilityVolume=dict(argstr="--inputProbabilityVolume %s...",), - nonAirRegionMask=dict(argstr="--nonAirRegionMask %s", extensions=None,), - priorLabelCodes=dict(argstr="--priorLabelCodes %s", sep=",",), + args=dict( + argstr="%s", + ), + cleanLabelVolume=dict( + argstr="--cleanLabelVolume %s", + hash_files=False, + ), + dirtyLabelVolume=dict( + argstr="--dirtyLabelVolume %s", + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + foregroundPriors=dict( + argstr="--foregroundPriors %s", + sep=",", + ), + inclusionThreshold=dict( + argstr="--inclusionThreshold %f", + ), + inputProbabilityVolume=dict( + argstr="--inputProbabilityVolume %s...", + ), + nonAirRegionMask=dict( + argstr="--nonAirRegionMask %s", + extensions=None, + ), + priorLabelCodes=dict( + argstr="--priorLabelCodes %s", + sep=",", + ), ) inputs = BRAINSCreateLabelMapFromProbabilityMaps.input_spec() @@ -23,8 +47,12 @@ def test_BRAINSCreateLabelMapFromProbabilityMaps_inputs(): def test_BRAINSCreateLabelMapFromProbabilityMaps_outputs(): output_map = dict( - cleanLabelVolume=dict(extensions=None,), - dirtyLabelVolume=dict(extensions=None,), + cleanLabelVolume=dict( + extensions=None, + ), + dirtyLabelVolume=dict( + extensions=None, + ), ) outputs = BRAINSCreateLabelMapFromProbabilityMaps.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py index c72579c470..e101cde2cc 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py @@ -4,27 +4,66 @@ def test_BRAINSCut_inputs(): input_map = dict( - NoTrainingVectorShuffling=dict(argstr="--NoTrainingVectorShuffling ",), - applyModel=dict(argstr="--applyModel ",), - args=dict(argstr="%s",), - computeSSEOn=dict(argstr="--computeSSEOn ",), - createVectors=dict(argstr="--createVectors ",), - environ=dict(nohash=True, usedefault=True,), - generateProbability=dict(argstr="--generateProbability ",), - histogramEqualization=dict(argstr="--histogramEqualization ",), - method=dict(argstr="--method %s",), + NoTrainingVectorShuffling=dict( + argstr="--NoTrainingVectorShuffling ", + ), + applyModel=dict( + argstr="--applyModel ", + ), + args=dict( + argstr="%s", + ), + computeSSEOn=dict( + argstr="--computeSSEOn ", + ), + createVectors=dict( + argstr="--createVectors ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + generateProbability=dict( + argstr="--generateProbability ", + ), + histogramEqualization=dict( + argstr="--histogramEqualization ", + ), + method=dict( + argstr="--method %s", + ), modelConfigurationFilename=dict( - argstr="--modelConfigurationFilename %s", extensions=None, - ), - modelFilename=dict(argstr="--modelFilename %s",), - multiStructureThreshold=dict(argstr="--multiStructureThreshold ",), - netConfiguration=dict(argstr="--netConfiguration %s", extensions=None,), - numberOfTrees=dict(argstr="--numberOfTrees %d",), - randomTreeDepth=dict(argstr="--randomTreeDepth %d",), - trainModel=dict(argstr="--trainModel ",), - trainModelStartIndex=dict(argstr="--trainModelStartIndex %d",), - validate=dict(argstr="--validate ",), - verbose=dict(argstr="--verbose %d",), + argstr="--modelConfigurationFilename %s", + extensions=None, + ), + modelFilename=dict( + argstr="--modelFilename %s", + ), + multiStructureThreshold=dict( + argstr="--multiStructureThreshold ", + ), + netConfiguration=dict( + argstr="--netConfiguration %s", + extensions=None, + ), + numberOfTrees=dict( + argstr="--numberOfTrees %d", + ), + randomTreeDepth=dict( + argstr="--randomTreeDepth %d", + ), + trainModel=dict( + argstr="--trainModel ", + ), + trainModelStartIndex=dict( + argstr="--trainModelStartIndex %d", + ), + validate=dict( + argstr="--validate ", + ), + verbose=dict( + argstr="--verbose %d", + ), ) inputs = BRAINSCut.input_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py index 7f91e4e803..6ff468ba47 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py @@ -4,20 +4,40 @@ def test_BRAINSMultiSTAPLE_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputCompositeT1Volume=dict( - argstr="--inputCompositeT1Volume %s", extensions=None, + argstr="--inputCompositeT1Volume %s", + extensions=None, + ), + inputLabelVolume=dict( + argstr="--inputLabelVolume %s...", + ), + inputTransform=dict( + argstr="--inputTransform %s...", + ), + labelForUndecidedPixels=dict( + argstr="--labelForUndecidedPixels %d", ), - inputLabelVolume=dict(argstr="--inputLabelVolume %s...",), - inputTransform=dict(argstr="--inputTransform %s...",), - labelForUndecidedPixels=dict(argstr="--labelForUndecidedPixels %d",), outputConfusionMatrix=dict( - argstr="--outputConfusionMatrix %s", hash_files=False, + argstr="--outputConfusionMatrix %s", + hash_files=False, + ), + outputMultiSTAPLE=dict( + argstr="--outputMultiSTAPLE %s", + hash_files=False, + ), + resampledVolumePrefix=dict( + argstr="--resampledVolumePrefix %s", + ), + skipResampling=dict( + argstr="--skipResampling ", ), - outputMultiSTAPLE=dict(argstr="--outputMultiSTAPLE %s", hash_files=False,), - resampledVolumePrefix=dict(argstr="--resampledVolumePrefix %s",), - skipResampling=dict(argstr="--skipResampling ",), ) inputs = BRAINSMultiSTAPLE.input_spec() @@ -28,8 +48,12 @@ def test_BRAINSMultiSTAPLE_inputs(): def test_BRAINSMultiSTAPLE_outputs(): output_map = dict( - outputConfusionMatrix=dict(extensions=None,), - outputMultiSTAPLE=dict(extensions=None,), + outputConfusionMatrix=dict( + extensions=None, + ), + outputMultiSTAPLE=dict( + extensions=None, + ), ) outputs = BRAINSMultiSTAPLE.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py index 69562cc9ce..f65c8c9b88 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -4,19 +4,49 @@ def test_BRAINSROIAuto_inputs(): input_map = dict( - ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f",), - args=dict(argstr="%s",), - closingSize=dict(argstr="--closingSize %f",), - cropOutput=dict(argstr="--cropOutput ",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - maskOutput=dict(argstr="--maskOutput ",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f",), - outputROIMaskVolume=dict(argstr="--outputROIMaskVolume %s", hash_files=False,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - outputVolumePixelType=dict(argstr="--outputVolumePixelType %s",), - thresholdCorrectionFactor=dict(argstr="--thresholdCorrectionFactor %f",), + ROIAutoDilateSize=dict( + argstr="--ROIAutoDilateSize %f", + ), + args=dict( + argstr="%s", + ), + closingSize=dict( + argstr="--closingSize %f", + ), + cropOutput=dict( + argstr="--cropOutput ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + maskOutput=dict( + argstr="--maskOutput ", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + otsuPercentileThreshold=dict( + argstr="--otsuPercentileThreshold %f", + ), + outputROIMaskVolume=dict( + argstr="--outputROIMaskVolume %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + outputVolumePixelType=dict( + argstr="--outputVolumePixelType %s", + ), + thresholdCorrectionFactor=dict( + argstr="--thresholdCorrectionFactor %f", + ), ) inputs = BRAINSROIAuto.input_spec() @@ -27,7 +57,12 @@ def test_BRAINSROIAuto_inputs(): def test_BRAINSROIAuto_outputs(): output_map = dict( - outputROIMaskVolume=dict(extensions=None,), outputVolume=dict(extensions=None,), + outputROIMaskVolume=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = BRAINSROIAuto.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py index fd28644cdc..1dd20adecd 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py @@ -4,22 +4,40 @@ def test_BinaryMaskEditorBasedOnLandmarks_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputBinaryVolume=dict(argstr="--inputBinaryVolume %s", extensions=None,), - inputLandmarkNames=dict(argstr="--inputLandmarkNames %s", sep=",",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBinaryVolume=dict( + argstr="--inputBinaryVolume %s", + extensions=None, + ), + inputLandmarkNames=dict( + argstr="--inputLandmarkNames %s", + sep=",", + ), inputLandmarkNamesForObliquePlane=dict( - argstr="--inputLandmarkNamesForObliquePlane %s", sep=",", + argstr="--inputLandmarkNamesForObliquePlane %s", + sep=",", ), inputLandmarksFilename=dict( - argstr="--inputLandmarksFilename %s", extensions=None, + argstr="--inputLandmarksFilename %s", + extensions=None, + ), + outputBinaryVolume=dict( + argstr="--outputBinaryVolume %s", + hash_files=False, ), - outputBinaryVolume=dict(argstr="--outputBinaryVolume %s", hash_files=False,), setCutDirectionForLandmark=dict( - argstr="--setCutDirectionForLandmark %s", sep=",", + argstr="--setCutDirectionForLandmark %s", + sep=",", ), setCutDirectionForObliquePlane=dict( - argstr="--setCutDirectionForObliquePlane %s", sep=",", + argstr="--setCutDirectionForObliquePlane %s", + sep=",", ), ) inputs = BinaryMaskEditorBasedOnLandmarks.input_spec() @@ -30,7 +48,11 @@ def test_BinaryMaskEditorBasedOnLandmarks_inputs(): def test_BinaryMaskEditorBasedOnLandmarks_outputs(): - output_map = dict(outputBinaryVolume=dict(extensions=None,),) + output_map = dict( + outputBinaryVolume=dict( + extensions=None, + ), + ) outputs = BinaryMaskEditorBasedOnLandmarks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py index 2fe0fc16ce..272327f4db 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py @@ -4,17 +4,42 @@ def test_ESLR_inputs(): input_map = dict( - args=dict(argstr="%s",), - closingSize=dict(argstr="--closingSize %d",), - environ=dict(nohash=True, usedefault=True,), - high=dict(argstr="--high %d",), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - low=dict(argstr="--low %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - openingSize=dict(argstr="--openingSize %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - preserveOutside=dict(argstr="--preserveOutside ",), - safetySize=dict(argstr="--safetySize %d",), + args=dict( + argstr="%s", + ), + closingSize=dict( + argstr="--closingSize %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + high=dict( + argstr="--high %d", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + low=dict( + argstr="--low %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + openingSize=dict( + argstr="--openingSize %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + preserveOutside=dict( + argstr="--preserveOutside ", + ), + safetySize=dict( + argstr="--safetySize %d", + ), ) inputs = ESLR.input_spec() @@ -24,7 +49,11 @@ def test_ESLR_inputs(): def test_ESLR_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = ESLR.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/tests/test_auto_DWICompare.py b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py index a9a270d6a5..b691c097a8 100644 --- a/nipype/interfaces/semtools/tests/test_auto_DWICompare.py +++ b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py @@ -4,10 +4,21 @@ def test_DWICompare_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None,), - inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="--inputVolume1 %s", + extensions=None, + ), + inputVolume2=dict( + argstr="--inputVolume2 %s", + extensions=None, + ), ) inputs = DWICompare.input_spec() diff --git a/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py index f8f099104d..ad4dcb12d4 100644 --- a/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py +++ b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py @@ -4,11 +4,24 @@ def test_DWISimpleCompare_inputs(): input_map = dict( - args=dict(argstr="%s",), - checkDWIData=dict(argstr="--checkDWIData ",), - environ=dict(nohash=True, usedefault=True,), - inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None,), - inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None,), + args=dict( + argstr="%s", + ), + checkDWIData=dict( + argstr="--checkDWIData ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="--inputVolume1 %s", + extensions=None, + ), + inputVolume2=dict( + argstr="--inputVolume2 %s", + extensions=None, + ), ) inputs = DWISimpleCompare.input_spec() diff --git a/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py index 99d8d5d226..3f857d8085 100644 --- a/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py +++ b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py @@ -4,10 +4,21 @@ def test_GenerateCsfClippedFromClassifiedImage_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputCassifiedVolume=dict(argstr="--inputCassifiedVolume %s", extensions=None,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputCassifiedVolume=dict( + argstr="--inputCassifiedVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = GenerateCsfClippedFromClassifiedImage.input_spec() @@ -17,7 +28,11 @@ def test_GenerateCsfClippedFromClassifiedImage_inputs(): def test_GenerateCsfClippedFromClassifiedImage_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = GenerateCsfClippedFromClassifiedImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py index 2dabdd4b6d..81fee98e93 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py @@ -4,22 +4,53 @@ def test_BRAINSAlignMSP_inputs(): input_map = dict( - BackgroundFillValue=dict(argstr="--BackgroundFillValue %s",), - OutputresampleMSP=dict(argstr="--OutputresampleMSP %s", hash_files=False,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - interpolationMode=dict(argstr="--interpolationMode %s",), - mspQualityLevel=dict(argstr="--mspQualityLevel %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - rescaleIntensities=dict(argstr="--rescaleIntensities ",), + BackgroundFillValue=dict( + argstr="--BackgroundFillValue %s", + ), + OutputresampleMSP=dict( + argstr="--OutputresampleMSP %s", + hash_files=False, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + mspQualityLevel=dict( + argstr="--mspQualityLevel %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + rescaleIntensities=dict( + argstr="--rescaleIntensities ", + ), rescaleIntensitiesOutputRange=dict( - argstr="--rescaleIntensitiesOutputRange %s", sep=",", + argstr="--rescaleIntensitiesOutputRange %s", + sep=",", + ), + resultsDir=dict( + argstr="--resultsDir %s", + hash_files=False, + ), + trimRescaledIntensities=dict( + argstr="--trimRescaledIntensities %f", + ), + verbose=dict( + argstr="--verbose ", + ), + writedebuggingImagesLevel=dict( + argstr="--writedebuggingImagesLevel %d", ), - resultsDir=dict(argstr="--resultsDir %s", hash_files=False,), - trimRescaledIntensities=dict(argstr="--trimRescaledIntensities %f",), - verbose=dict(argstr="--verbose ",), - writedebuggingImagesLevel=dict(argstr="--writedebuggingImagesLevel %d",), ) inputs = BRAINSAlignMSP.input_spec() @@ -29,7 +60,12 @@ def test_BRAINSAlignMSP_inputs(): def test_BRAINSAlignMSP_outputs(): - output_map = dict(OutputresampleMSP=dict(extensions=None,), resultsDir=dict(),) + output_map = dict( + OutputresampleMSP=dict( + extensions=None, + ), + resultsDir=dict(), + ) outputs = BRAINSAlignMSP.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py index 76958e0b2f..92e45758b1 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py @@ -4,13 +4,30 @@ def test_BRAINSClipInferior_inputs(): input_map = dict( - BackgroundFillValue=dict(argstr="--BackgroundFillValue %s",), - acLowerBound=dict(argstr="--acLowerBound %f",), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + BackgroundFillValue=dict( + argstr="--BackgroundFillValue %s", + ), + acLowerBound=dict( + argstr="--acLowerBound %f", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = BRAINSClipInferior.input_spec() @@ -20,7 +37,11 @@ def test_BRAINSClipInferior_inputs(): def test_BRAINSClipInferior_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSClipInferior.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py index 61129f62cc..2e26a91e05 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py @@ -4,25 +4,56 @@ def test_BRAINSConstellationModeler_inputs(): input_map = dict( - BackgroundFillValue=dict(argstr="--BackgroundFillValue %s",), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputTrainingList=dict(argstr="--inputTrainingList %s", extensions=None,), - mspQualityLevel=dict(argstr="--mspQualityLevel %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), + BackgroundFillValue=dict( + argstr="--BackgroundFillValue %s", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTrainingList=dict( + argstr="--inputTrainingList %s", + extensions=None, + ), + mspQualityLevel=dict( + argstr="--mspQualityLevel %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), optimizedLandmarksFilenameExtender=dict( argstr="--optimizedLandmarksFilenameExtender %s", ), - outputModel=dict(argstr="--outputModel %s", hash_files=False,), - rescaleIntensities=dict(argstr="--rescaleIntensities ",), + outputModel=dict( + argstr="--outputModel %s", + hash_files=False, + ), + rescaleIntensities=dict( + argstr="--rescaleIntensities ", + ), rescaleIntensitiesOutputRange=dict( - argstr="--rescaleIntensitiesOutputRange %s", sep=",", + argstr="--rescaleIntensitiesOutputRange %s", + sep=",", + ), + resultsDir=dict( + argstr="--resultsDir %s", + hash_files=False, + ), + saveOptimizedLandmarks=dict( + argstr="--saveOptimizedLandmarks ", + ), + trimRescaledIntensities=dict( + argstr="--trimRescaledIntensities %f", + ), + verbose=dict( + argstr="--verbose ", + ), + writedebuggingImagesLevel=dict( + argstr="--writedebuggingImagesLevel %d", ), - resultsDir=dict(argstr="--resultsDir %s", hash_files=False,), - saveOptimizedLandmarks=dict(argstr="--saveOptimizedLandmarks ",), - trimRescaledIntensities=dict(argstr="--trimRescaledIntensities %f",), - verbose=dict(argstr="--verbose ",), - writedebuggingImagesLevel=dict(argstr="--writedebuggingImagesLevel %d",), ) inputs = BRAINSConstellationModeler.input_spec() @@ -32,7 +63,12 @@ def test_BRAINSConstellationModeler_inputs(): def test_BRAINSConstellationModeler_outputs(): - output_map = dict(outputModel=dict(extensions=None,), resultsDir=dict(),) + output_map = dict( + outputModel=dict( + extensions=None, + ), + resultsDir=dict(), + ) outputs = BRAINSConstellationModeler.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py index 8ac3c8a21a..c511be64c5 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py @@ -4,12 +4,27 @@ def test_BRAINSEyeDetector_inputs(): input_map = dict( - args=dict(argstr="%s",), - debugDir=dict(argstr="--debugDir %s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + debugDir=dict( + argstr="--debugDir %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = BRAINSEyeDetector.input_spec() @@ -19,7 +34,11 @@ def test_BRAINSEyeDetector_inputs(): def test_BRAINSEyeDetector_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSEyeDetector.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py index cf20b5a37b..05ba1ae7d6 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py @@ -4,14 +4,35 @@ def test_BRAINSInitializedControlPoints_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputLandmarksFile=dict(argstr="--outputLandmarksFile %s",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - permuteOrder=dict(argstr="--permuteOrder %s", sep=",",), - splineGridSize=dict(argstr="--splineGridSize %s", sep=",",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputLandmarksFile=dict( + argstr="--outputLandmarksFile %s", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + permuteOrder=dict( + argstr="--permuteOrder %s", + sep=",", + ), + splineGridSize=dict( + argstr="--splineGridSize %s", + sep=",", + ), ) inputs = BRAINSInitializedControlPoints.input_spec() @@ -21,7 +42,11 @@ def test_BRAINSInitializedControlPoints_inputs(): def test_BRAINSInitializedControlPoints_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSInitializedControlPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py index cdb17c6232..efaa484008 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py @@ -4,17 +4,28 @@ def test_BRAINSLandmarkInitializer_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputFixedLandmarkFilename=dict( - argstr="--inputFixedLandmarkFilename %s", extensions=None, + argstr="--inputFixedLandmarkFilename %s", + extensions=None, ), inputMovingLandmarkFilename=dict( - argstr="--inputMovingLandmarkFilename %s", extensions=None, + argstr="--inputMovingLandmarkFilename %s", + extensions=None, + ), + inputWeightFilename=dict( + argstr="--inputWeightFilename %s", + extensions=None, ), - inputWeightFilename=dict(argstr="--inputWeightFilename %s", extensions=None,), outputTransformFilename=dict( - argstr="--outputTransformFilename %s", hash_files=False, + argstr="--outputTransformFilename %s", + hash_files=False, ), ) inputs = BRAINSLandmarkInitializer.input_spec() @@ -25,7 +36,11 @@ def test_BRAINSLandmarkInitializer_inputs(): def test_BRAINSLandmarkInitializer_outputs(): - output_map = dict(outputTransformFilename=dict(extensions=None,),) + output_map = dict( + outputTransformFilename=dict( + extensions=None, + ), + ) outputs = BRAINSLandmarkInitializer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py index a9fdee554b..beb7a5f664 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py @@ -4,10 +4,20 @@ def test_BRAINSLinearModelerEPCA_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputTrainingList=dict(argstr="--inputTrainingList %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTrainingList=dict( + argstr="--inputTrainingList %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), ) inputs = BRAINSLinearModelerEPCA.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py index f459589580..d0cdc8cc7f 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py @@ -4,18 +4,39 @@ def test_BRAINSLmkTransform_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputFixedLandmarks=dict(argstr="--inputFixedLandmarks %s", extensions=None,), - inputMovingLandmarks=dict(argstr="--inputMovingLandmarks %s", extensions=None,), - inputMovingVolume=dict(argstr="--inputMovingVolume %s", extensions=None,), - inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputFixedLandmarks=dict( + argstr="--inputFixedLandmarks %s", + extensions=None, + ), + inputMovingLandmarks=dict( + argstr="--inputMovingLandmarks %s", + extensions=None, + ), + inputMovingVolume=dict( + argstr="--inputMovingVolume %s", + extensions=None, + ), + inputReferenceVolume=dict( + argstr="--inputReferenceVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), outputAffineTransform=dict( - argstr="--outputAffineTransform %s", hash_files=False, + argstr="--outputAffineTransform %s", + hash_files=False, ), outputResampledVolume=dict( - argstr="--outputResampledVolume %s", hash_files=False, + argstr="--outputResampledVolume %s", + hash_files=False, ), ) inputs = BRAINSLmkTransform.input_spec() @@ -27,8 +48,12 @@ def test_BRAINSLmkTransform_inputs(): def test_BRAINSLmkTransform_outputs(): output_map = dict( - outputAffineTransform=dict(extensions=None,), - outputResampledVolume=dict(extensions=None,), + outputAffineTransform=dict( + extensions=None, + ), + outputResampledVolume=dict( + extensions=None, + ), ) outputs = BRAINSLmkTransform.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py index b16829ef8f..e6eac9cf2d 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py @@ -4,24 +4,70 @@ def test_BRAINSMush_inputs(): input_map = dict( - args=dict(argstr="%s",), - boundingBoxSize=dict(argstr="--boundingBoxSize %s", sep=",",), - boundingBoxStart=dict(argstr="--boundingBoxStart %s", sep=",",), - desiredMean=dict(argstr="--desiredMean %f",), - desiredVariance=dict(argstr="--desiredVariance %f",), - environ=dict(nohash=True, usedefault=True,), - inputFirstVolume=dict(argstr="--inputFirstVolume %s", extensions=None,), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None,), - inputSecondVolume=dict(argstr="--inputSecondVolume %s", extensions=None,), - lowerThresholdFactor=dict(argstr="--lowerThresholdFactor %f",), - lowerThresholdFactorPre=dict(argstr="--lowerThresholdFactorPre %f",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputMask=dict(argstr="--outputMask %s", hash_files=False,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - outputWeightsFile=dict(argstr="--outputWeightsFile %s", hash_files=False,), - seed=dict(argstr="--seed %s", sep=",",), - upperThresholdFactor=dict(argstr="--upperThresholdFactor %f",), - upperThresholdFactorPre=dict(argstr="--upperThresholdFactorPre %f",), + args=dict( + argstr="%s", + ), + boundingBoxSize=dict( + argstr="--boundingBoxSize %s", + sep=",", + ), + boundingBoxStart=dict( + argstr="--boundingBoxStart %s", + sep=",", + ), + desiredMean=dict( + argstr="--desiredMean %f", + ), + desiredVariance=dict( + argstr="--desiredVariance %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputFirstVolume=dict( + argstr="--inputFirstVolume %s", + extensions=None, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputSecondVolume=dict( + argstr="--inputSecondVolume %s", + extensions=None, + ), + lowerThresholdFactor=dict( + argstr="--lowerThresholdFactor %f", + ), + lowerThresholdFactorPre=dict( + argstr="--lowerThresholdFactorPre %f", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputMask=dict( + argstr="--outputMask %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + outputWeightsFile=dict( + argstr="--outputWeightsFile %s", + hash_files=False, + ), + seed=dict( + argstr="--seed %s", + sep=",", + ), + upperThresholdFactor=dict( + argstr="--upperThresholdFactor %f", + ), + upperThresholdFactorPre=dict( + argstr="--upperThresholdFactorPre %f", + ), ) inputs = BRAINSMush.input_spec() @@ -32,9 +78,15 @@ def test_BRAINSMush_inputs(): def test_BRAINSMush_outputs(): output_map = dict( - outputMask=dict(extensions=None,), - outputVolume=dict(extensions=None,), - outputWeightsFile=dict(extensions=None,), + outputMask=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), + outputWeightsFile=dict( + extensions=None, + ), ) outputs = BRAINSMush.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py index 30bf49a6ad..a120b062a4 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py @@ -4,21 +4,39 @@ def test_BRAINSSnapShotWriter_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputBinaryVolumes=dict(argstr="--inputBinaryVolumes %s...",), - inputPlaneDirection=dict(argstr="--inputPlaneDirection %s", sep=",",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBinaryVolumes=dict( + argstr="--inputBinaryVolumes %s...", + ), + inputPlaneDirection=dict( + argstr="--inputPlaneDirection %s", + sep=",", + ), inputSliceToExtractInIndex=dict( - argstr="--inputSliceToExtractInIndex %s", sep=",", + argstr="--inputSliceToExtractInIndex %s", + sep=",", ), inputSliceToExtractInPercent=dict( - argstr="--inputSliceToExtractInPercent %s", sep=",", + argstr="--inputSliceToExtractInPercent %s", + sep=",", ), inputSliceToExtractInPhysicalPoint=dict( - argstr="--inputSliceToExtractInPhysicalPoint %s", sep=",", + argstr="--inputSliceToExtractInPhysicalPoint %s", + sep=",", + ), + inputVolumes=dict( + argstr="--inputVolumes %s...", + ), + outputFilename=dict( + argstr="--outputFilename %s", + hash_files=False, ), - inputVolumes=dict(argstr="--inputVolumes %s...",), - outputFilename=dict(argstr="--outputFilename %s", hash_files=False,), ) inputs = BRAINSSnapShotWriter.input_spec() @@ -28,7 +46,11 @@ def test_BRAINSSnapShotWriter_inputs(): def test_BRAINSSnapShotWriter_outputs(): - output_map = dict(outputFilename=dict(extensions=None,),) + output_map = dict( + outputFilename=dict( + extensions=None, + ), + ) outputs = BRAINSSnapShotWriter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py index 4316f4561b..1eebbb0cec 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py @@ -4,14 +4,35 @@ def test_BRAINSTransformConvert_inputs(): input_map = dict( - args=dict(argstr="%s",), - displacementVolume=dict(argstr="--displacementVolume %s", hash_files=False,), - environ=dict(nohash=True, usedefault=True,), - inputTransform=dict(argstr="--inputTransform %s", extensions=None,), - outputPrecisionType=dict(argstr="--outputPrecisionType %s",), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), - outputTransformType=dict(argstr="--outputTransformType %s",), - referenceVolume=dict(argstr="--referenceVolume %s", extensions=None,), + args=dict( + argstr="%s", + ), + displacementVolume=dict( + argstr="--displacementVolume %s", + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + outputPrecisionType=dict( + argstr="--outputPrecisionType %s", + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, + ), + outputTransformType=dict( + argstr="--outputTransformType %s", + ), + referenceVolume=dict( + argstr="--referenceVolume %s", + extensions=None, + ), ) inputs = BRAINSTransformConvert.input_spec() @@ -22,8 +43,12 @@ def test_BRAINSTransformConvert_inputs(): def test_BRAINSTransformConvert_outputs(): output_map = dict( - displacementVolume=dict(extensions=None,), - outputTransform=dict(extensions=None,), + displacementVolume=dict( + extensions=None, + ), + outputTransform=dict( + extensions=None, + ), ) outputs = BRAINSTransformConvert.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py index ed3180746c..fa68f51e21 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py @@ -4,16 +4,39 @@ def test_BRAINSTrimForegroundInDirection_inputs(): input_map = dict( - BackgroundFillValue=dict(argstr="--BackgroundFillValue %s",), - args=dict(argstr="%s",), - closingSize=dict(argstr="--closingSize %d",), - directionCode=dict(argstr="--directionCode %d",), - environ=dict(nohash=True, usedefault=True,), - headSizeLimit=dict(argstr="--headSizeLimit %f",), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), + BackgroundFillValue=dict( + argstr="--BackgroundFillValue %s", + ), + args=dict( + argstr="%s", + ), + closingSize=dict( + argstr="--closingSize %d", + ), + directionCode=dict( + argstr="--directionCode %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + headSizeLimit=dict( + argstr="--headSizeLimit %f", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + otsuPercentileThreshold=dict( + argstr="--otsuPercentileThreshold %f", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = BRAINSTrimForegroundInDirection.input_spec() @@ -23,7 +46,11 @@ def test_BRAINSTrimForegroundInDirection_inputs(): def test_BRAINSTrimForegroundInDirection_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSTrimForegroundInDirection.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py b/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py index b7a9167092..14ec09298c 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py @@ -4,11 +4,19 @@ def test_CleanUpOverlapLabels_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputBinaryVolumes=dict(argstr="--inputBinaryVolumes %s...",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBinaryVolumes=dict( + argstr="--inputBinaryVolumes %s...", + ), outputBinaryVolumes=dict( - argstr="--outputBinaryVolumes %s...", hash_files=False, + argstr="--outputBinaryVolumes %s...", + hash_files=False, ), ) inputs = CleanUpOverlapLabels.input_spec() @@ -19,7 +27,9 @@ def test_CleanUpOverlapLabels_inputs(): def test_CleanUpOverlapLabels_outputs(): - output_map = dict(outputBinaryVolumes=dict(),) + output_map = dict( + outputBinaryVolumes=dict(), + ) outputs = CleanUpOverlapLabels.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py index bf2d9ab03d..4a0d7c89c2 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py @@ -4,28 +4,69 @@ def test_FindCenterOfBrain_inputs(): input_map = dict( - args=dict(argstr="%s",), - axis=dict(argstr="--axis %d",), - backgroundValue=dict(argstr="--backgroundValue %d",), - clippedImageMask=dict(argstr="--clippedImageMask %s", hash_files=False,), - closingSize=dict(argstr="--closingSize %d",), + args=dict( + argstr="%s", + ), + axis=dict( + argstr="--axis %d", + ), + backgroundValue=dict( + argstr="--backgroundValue %d", + ), + clippedImageMask=dict( + argstr="--clippedImageMask %s", + hash_files=False, + ), + closingSize=dict( + argstr="--closingSize %d", + ), debugAfterGridComputationsForegroundImage=dict( - argstr="--debugAfterGridComputationsForegroundImage %s", hash_files=False, + argstr="--debugAfterGridComputationsForegroundImage %s", + hash_files=False, ), debugClippedImageMask=dict( - argstr="--debugClippedImageMask %s", hash_files=False, - ), - debugDistanceImage=dict(argstr="--debugDistanceImage %s", hash_files=False,), - debugGridImage=dict(argstr="--debugGridImage %s", hash_files=False,), - debugTrimmedImage=dict(argstr="--debugTrimmedImage %s", hash_files=False,), - environ=dict(nohash=True, usedefault=True,), - generateDebugImages=dict(argstr="--generateDebugImages ",), - headSizeEstimate=dict(argstr="--headSizeEstimate %f",), - headSizeLimit=dict(argstr="--headSizeLimit %f",), - imageMask=dict(argstr="--imageMask %s", extensions=None,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - maximize=dict(argstr="--maximize ",), - otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f",), + argstr="--debugClippedImageMask %s", + hash_files=False, + ), + debugDistanceImage=dict( + argstr="--debugDistanceImage %s", + hash_files=False, + ), + debugGridImage=dict( + argstr="--debugGridImage %s", + hash_files=False, + ), + debugTrimmedImage=dict( + argstr="--debugTrimmedImage %s", + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + generateDebugImages=dict( + argstr="--generateDebugImages ", + ), + headSizeEstimate=dict( + argstr="--headSizeEstimate %f", + ), + headSizeLimit=dict( + argstr="--headSizeLimit %f", + ), + imageMask=dict( + argstr="--imageMask %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + maximize=dict( + argstr="--maximize ", + ), + otsuPercentileThreshold=dict( + argstr="--otsuPercentileThreshold %f", + ), ) inputs = FindCenterOfBrain.input_spec() @@ -36,12 +77,24 @@ def test_FindCenterOfBrain_inputs(): def test_FindCenterOfBrain_outputs(): output_map = dict( - clippedImageMask=dict(extensions=None,), - debugAfterGridComputationsForegroundImage=dict(extensions=None,), - debugClippedImageMask=dict(extensions=None,), - debugDistanceImage=dict(extensions=None,), - debugGridImage=dict(extensions=None,), - debugTrimmedImage=dict(extensions=None,), + clippedImageMask=dict( + extensions=None, + ), + debugAfterGridComputationsForegroundImage=dict( + extensions=None, + ), + debugClippedImageMask=dict( + extensions=None, + ), + debugDistanceImage=dict( + extensions=None, + ), + debugGridImage=dict( + extensions=None, + ), + debugTrimmedImage=dict( + extensions=None, + ), ) outputs = FindCenterOfBrain.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py index 289473e902..d3840ec7bd 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py @@ -4,11 +4,23 @@ def test_GenerateLabelMapFromProbabilityMap_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolumes=dict(argstr="--inputVolumes %s...",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputLabelVolume=dict(argstr="--outputLabelVolume %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolumes=dict( + argstr="--inputVolumes %s...", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputLabelVolume=dict( + argstr="--outputLabelVolume %s", + hash_files=False, + ), ) inputs = GenerateLabelMapFromProbabilityMap.input_spec() @@ -18,7 +30,11 @@ def test_GenerateLabelMapFromProbabilityMap_inputs(): def test_GenerateLabelMapFromProbabilityMap_outputs(): - output_map = dict(outputLabelVolume=dict(extensions=None,),) + output_map = dict( + outputLabelVolume=dict( + extensions=None, + ), + ) outputs = GenerateLabelMapFromProbabilityMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py index 7d9f72aedb..5b71204a67 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py @@ -4,17 +4,44 @@ def test_ImageRegionPlotter_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputBinaryROIVolume=dict(argstr="--inputBinaryROIVolume %s", extensions=None,), - inputLabelVolume=dict(argstr="--inputLabelVolume %s", extensions=None,), - inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None,), - inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None,), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), - outputJointHistogramData=dict(argstr="--outputJointHistogramData %s",), - useIntensityForHistogram=dict(argstr="--useIntensityForHistogram ",), - useROIAUTO=dict(argstr="--useROIAUTO ",), - verbose=dict(argstr="--verbose ",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBinaryROIVolume=dict( + argstr="--inputBinaryROIVolume %s", + extensions=None, + ), + inputLabelVolume=dict( + argstr="--inputLabelVolume %s", + extensions=None, + ), + inputVolume1=dict( + argstr="--inputVolume1 %s", + extensions=None, + ), + inputVolume2=dict( + argstr="--inputVolume2 %s", + extensions=None, + ), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + outputJointHistogramData=dict( + argstr="--outputJointHistogramData %s", + ), + useIntensityForHistogram=dict( + argstr="--useIntensityForHistogram ", + ), + useROIAUTO=dict( + argstr="--useROIAUTO ", + ), + verbose=dict( + argstr="--verbose ", + ), ) inputs = ImageRegionPlotter.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py index 6ac46c3695..d5e61c867a 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py @@ -4,18 +4,35 @@ def test_JointHistogram_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputMaskVolumeInXAxis=dict( - argstr="--inputMaskVolumeInXAxis %s", extensions=None, + argstr="--inputMaskVolumeInXAxis %s", + extensions=None, ), inputMaskVolumeInYAxis=dict( - argstr="--inputMaskVolumeInYAxis %s", extensions=None, + argstr="--inputMaskVolumeInYAxis %s", + extensions=None, + ), + inputVolumeInXAxis=dict( + argstr="--inputVolumeInXAxis %s", + extensions=None, + ), + inputVolumeInYAxis=dict( + argstr="--inputVolumeInYAxis %s", + extensions=None, + ), + outputJointHistogramImage=dict( + argstr="--outputJointHistogramImage %s", + ), + verbose=dict( + argstr="--verbose ", ), - inputVolumeInXAxis=dict(argstr="--inputVolumeInXAxis %s", extensions=None,), - inputVolumeInYAxis=dict(argstr="--inputVolumeInYAxis %s", extensions=None,), - outputJointHistogramImage=dict(argstr="--outputJointHistogramImage %s",), - verbose=dict(argstr="--verbose ",), ) inputs = JointHistogram.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py index f3541344a0..4b7d3431bd 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py @@ -4,15 +4,24 @@ def test_ShuffleVectorsModule_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputVectorFileBaseName=dict( - argstr="--inputVectorFileBaseName %s", extensions=None, + argstr="--inputVectorFileBaseName %s", + extensions=None, ), outputVectorFileBaseName=dict( - argstr="--outputVectorFileBaseName %s", hash_files=False, + argstr="--outputVectorFileBaseName %s", + hash_files=False, + ), + resampleProportion=dict( + argstr="--resampleProportion %f", ), - resampleProportion=dict(argstr="--resampleProportion %f",), ) inputs = ShuffleVectorsModule.input_spec() @@ -22,7 +31,11 @@ def test_ShuffleVectorsModule_inputs(): def test_ShuffleVectorsModule_outputs(): - output_map = dict(outputVectorFileBaseName=dict(extensions=None,),) + output_map = dict( + outputVectorFileBaseName=dict( + extensions=None, + ), + ) outputs = ShuffleVectorsModule.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py index f64c302328..896f630839 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py @@ -4,16 +4,34 @@ def test_fcsv_to_hdf5_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - landmarkGlobPattern=dict(argstr="--landmarkGlobPattern %s",), - landmarkTypesList=dict(argstr="--landmarkTypesList %s", extensions=None,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + landmarkGlobPattern=dict( + argstr="--landmarkGlobPattern %s", + ), + landmarkTypesList=dict( + argstr="--landmarkTypesList %s", + extensions=None, + ), landmarksInformationFile=dict( - argstr="--landmarksInformationFile %s", hash_files=False, + argstr="--landmarksInformationFile %s", + hash_files=False, + ), + modelFile=dict( + argstr="--modelFile %s", + hash_files=False, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + versionID=dict( + argstr="--versionID %s", ), - modelFile=dict(argstr="--modelFile %s", hash_files=False,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - versionID=dict(argstr="--versionID %s",), ) inputs = fcsv_to_hdf5.input_spec() @@ -24,8 +42,12 @@ def test_fcsv_to_hdf5_inputs(): def test_fcsv_to_hdf5_outputs(): output_map = dict( - landmarksInformationFile=dict(extensions=None,), - modelFile=dict(extensions=None,), + landmarksInformationFile=dict( + extensions=None, + ), + modelFile=dict( + extensions=None, + ), ) outputs = fcsv_to_hdf5.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py index 7e02bfc9e1..05aa2a3910 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py @@ -4,10 +4,21 @@ def test_insertMidACPCpoint_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputLandmarkFile=dict(argstr="--inputLandmarkFile %s", extensions=None,), - outputLandmarkFile=dict(argstr="--outputLandmarkFile %s", hash_files=False,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputLandmarkFile=dict( + argstr="--inputLandmarkFile %s", + extensions=None, + ), + outputLandmarkFile=dict( + argstr="--outputLandmarkFile %s", + hash_files=False, + ), ) inputs = insertMidACPCpoint.input_spec() @@ -17,7 +28,11 @@ def test_insertMidACPCpoint_inputs(): def test_insertMidACPCpoint_outputs(): - output_map = dict(outputLandmarkFile=dict(extensions=None,),) + output_map = dict( + outputLandmarkFile=dict( + extensions=None, + ), + ) outputs = insertMidACPCpoint.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py index 79fedd5b68..472a1326e0 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py @@ -4,11 +4,20 @@ def test_landmarksConstellationAligner_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputLandmarksPaired=dict(argstr="--inputLandmarksPaired %s", extensions=None,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputLandmarksPaired=dict( + argstr="--inputLandmarksPaired %s", + extensions=None, + ), outputLandmarksPaired=dict( - argstr="--outputLandmarksPaired %s", hash_files=False, + argstr="--outputLandmarksPaired %s", + hash_files=False, ), ) inputs = landmarksConstellationAligner.input_spec() @@ -19,7 +28,11 @@ def test_landmarksConstellationAligner_inputs(): def test_landmarksConstellationAligner_outputs(): - output_map = dict(outputLandmarksPaired=dict(extensions=None,),) + output_map = dict( + outputLandmarksPaired=dict( + extensions=None, + ), + ) outputs = landmarksConstellationAligner.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py index 5bd27aa957..c14fdff775 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py @@ -4,12 +4,29 @@ def test_landmarksConstellationWeights_inputs(): input_map = dict( - LLSModel=dict(argstr="--LLSModel %s", extensions=None,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputTemplateModel=dict(argstr="--inputTemplateModel %s", extensions=None,), - inputTrainingList=dict(argstr="--inputTrainingList %s", extensions=None,), - outputWeightsList=dict(argstr="--outputWeightsList %s", hash_files=False,), + LLSModel=dict( + argstr="--LLSModel %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTemplateModel=dict( + argstr="--inputTemplateModel %s", + extensions=None, + ), + inputTrainingList=dict( + argstr="--inputTrainingList %s", + extensions=None, + ), + outputWeightsList=dict( + argstr="--outputWeightsList %s", + hash_files=False, + ), ) inputs = landmarksConstellationWeights.input_spec() @@ -19,7 +36,11 @@ def test_landmarksConstellationWeights_inputs(): def test_landmarksConstellationWeights_outputs(): - output_map = dict(outputWeightsList=dict(extensions=None,),) + output_map = dict( + outputWeightsList=dict( + extensions=None, + ), + ) outputs = landmarksConstellationWeights.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py index 7c7f02cacc..9017db6760 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py @@ -4,10 +4,23 @@ def test_DTIexport_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputTensor=dict(argstr="%s", extensions=None, position=-2,), - outputFile=dict(argstr="%s", hash_files=False, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTensor=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputFile=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = DTIexport.input_spec() @@ -17,7 +30,12 @@ def test_DTIexport_inputs(): def test_DTIexport_outputs(): - output_map = dict(outputFile=dict(extensions=None, position=-1,),) + output_map = dict( + outputFile=dict( + extensions=None, + position=-1, + ), + ) outputs = DTIexport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py index 65ed035819..802d4ce9bc 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py @@ -4,11 +4,26 @@ def test_DTIimport_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputFile=dict(argstr="%s", extensions=None, position=-2,), - outputTensor=dict(argstr="%s", hash_files=False, position=-1,), - testingmode=dict(argstr="--testingmode ",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputFile=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputTensor=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + testingmode=dict( + argstr="--testingmode ", + ), ) inputs = DTIimport.input_spec() @@ -18,7 +33,12 @@ def test_DTIimport_inputs(): def test_DTIimport_outputs(): - output_map = dict(outputTensor=dict(extensions=None, position=-1,),) + output_map = dict( + outputTensor=dict( + extensions=None, + position=-1, + ), + ) outputs = DTIimport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py index 3c0d7c8861..e004599c12 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py @@ -4,14 +4,37 @@ def test_DWIJointRicianLMMSEFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - compressOutput=dict(argstr="--compressOutput ",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - ng=dict(argstr="--ng %d",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), - re=dict(argstr="--re %s", sep=",",), - rf=dict(argstr="--rf %s", sep=",",), + args=dict( + argstr="%s", + ), + compressOutput=dict( + argstr="--compressOutput ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + ng=dict( + argstr="--ng %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + re=dict( + argstr="--re %s", + sep=",", + ), + rf=dict( + argstr="--rf %s", + sep=",", + ), ) inputs = DWIJointRicianLMMSEFilter.input_spec() @@ -21,7 +44,12 @@ def test_DWIJointRicianLMMSEFilter_inputs(): def test_DWIJointRicianLMMSEFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = DWIJointRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py index 8dda7d3105..3c38117737 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py @@ -4,20 +4,55 @@ def test_DWIRicianLMMSEFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - compressOutput=dict(argstr="--compressOutput ",), - environ=dict(nohash=True, usedefault=True,), - hrf=dict(argstr="--hrf %f",), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - iter=dict(argstr="--iter %d",), - maxnstd=dict(argstr="--maxnstd %d",), - minnstd=dict(argstr="--minnstd %d",), - mnve=dict(argstr="--mnve %d",), - mnvf=dict(argstr="--mnvf %d",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), - re=dict(argstr="--re %s", sep=",",), - rf=dict(argstr="--rf %s", sep=",",), - uav=dict(argstr="--uav ",), + args=dict( + argstr="%s", + ), + compressOutput=dict( + argstr="--compressOutput ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hrf=dict( + argstr="--hrf %f", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + iter=dict( + argstr="--iter %d", + ), + maxnstd=dict( + argstr="--maxnstd %d", + ), + minnstd=dict( + argstr="--minnstd %d", + ), + mnve=dict( + argstr="--mnve %d", + ), + mnvf=dict( + argstr="--mnvf %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + re=dict( + argstr="--re %s", + sep=",", + ), + rf=dict( + argstr="--rf %s", + sep=",", + ), + uav=dict( + argstr="--uav ", + ), ) inputs = DWIRicianLMMSEFilter.input_spec() @@ -27,7 +62,12 @@ def test_DWIRicianLMMSEFilter_inputs(): def test_DWIRicianLMMSEFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = DWIRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py index ecb618c2ff..76b305283c 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py @@ -4,14 +4,38 @@ def test_DWIToDTIEstimation_inputs(): input_map = dict( - args=dict(argstr="%s",), - enumeration=dict(argstr="--enumeration %s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="%s", extensions=None, position=-3,), - mask=dict(argstr="--mask %s", extensions=None,), - outputBaseline=dict(argstr="%s", hash_files=False, position=-1,), - outputTensor=dict(argstr="%s", hash_files=False, position=-2,), - shiftNeg=dict(argstr="--shiftNeg ",), + args=dict( + argstr="%s", + ), + enumeration=dict( + argstr="--enumeration %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-3, + ), + mask=dict( + argstr="--mask %s", + extensions=None, + ), + outputBaseline=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + outputTensor=dict( + argstr="%s", + hash_files=False, + position=-2, + ), + shiftNeg=dict( + argstr="--shiftNeg ", + ), ) inputs = DWIToDTIEstimation.input_spec() @@ -22,8 +46,14 @@ def test_DWIToDTIEstimation_inputs(): def test_DWIToDTIEstimation_outputs(): output_map = dict( - outputBaseline=dict(extensions=None, position=-1,), - outputTensor=dict(extensions=None, position=-2,), + outputBaseline=dict( + extensions=None, + position=-1, + ), + outputTensor=dict( + extensions=None, + position=-2, + ), ) outputs = DWIToDTIEstimation.output_spec() diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py index 70df302ae6..143194f493 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py @@ -4,11 +4,26 @@ def test_DiffusionTensorScalarMeasurements_inputs(): input_map = dict( - args=dict(argstr="%s",), - enumeration=dict(argstr="--enumeration %s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="%s", extensions=None, position=-3,), - outputScalar=dict(argstr="%s", hash_files=False, position=-1,), + args=dict( + argstr="%s", + ), + enumeration=dict( + argstr="--enumeration %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-3, + ), + outputScalar=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = DiffusionTensorScalarMeasurements.input_spec() @@ -18,7 +33,12 @@ def test_DiffusionTensorScalarMeasurements_inputs(): def test_DiffusionTensorScalarMeasurements_outputs(): - output_map = dict(outputScalar=dict(extensions=None, position=-1,),) + output_map = dict( + outputScalar=dict( + extensions=None, + position=-1, + ), + ) outputs = DiffusionTensorScalarMeasurements.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py index 659f86a642..5b11d2f578 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py @@ -4,13 +4,34 @@ def test_DiffusionWeightedVolumeMasking_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="%s", extensions=None, position=-4,), - otsuomegathreshold=dict(argstr="--otsuomegathreshold %f",), - outputBaseline=dict(argstr="%s", hash_files=False, position=-2,), - removeislands=dict(argstr="--removeislands ",), - thresholdMask=dict(argstr="%s", hash_files=False, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-4, + ), + otsuomegathreshold=dict( + argstr="--otsuomegathreshold %f", + ), + outputBaseline=dict( + argstr="%s", + hash_files=False, + position=-2, + ), + removeislands=dict( + argstr="--removeislands ", + ), + thresholdMask=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = DiffusionWeightedVolumeMasking.input_spec() @@ -21,8 +42,14 @@ def test_DiffusionWeightedVolumeMasking_inputs(): def test_DiffusionWeightedVolumeMasking_outputs(): output_map = dict( - outputBaseline=dict(extensions=None, position=-2,), - thresholdMask=dict(extensions=None, position=-1,), + outputBaseline=dict( + extensions=None, + position=-2, + ), + thresholdMask=dict( + extensions=None, + position=-1, + ), ) outputs = DiffusionWeightedVolumeMasking.output_spec() diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py index 8db387f985..c52bb5357d 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py @@ -4,34 +4,102 @@ def test_ResampleDTIVolume_inputs(): input_map = dict( - Inverse_ITK_Transformation=dict(argstr="--Inverse_ITK_Transformation ",), - Reference=dict(argstr="--Reference %s", extensions=None,), - args=dict(argstr="%s",), - centered_transform=dict(argstr="--centered_transform ",), - correction=dict(argstr="--correction %s",), - defField=dict(argstr="--defField %s", extensions=None,), - default_pixel_value=dict(argstr="--default_pixel_value %f",), - direction_matrix=dict(argstr="--direction_matrix %s", sep=",",), - environ=dict(nohash=True, usedefault=True,), - hfieldtype=dict(argstr="--hfieldtype %s",), - image_center=dict(argstr="--image_center %s",), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - interpolation=dict(argstr="--interpolation %s",), - notbulk=dict(argstr="--notbulk ",), - number_of_thread=dict(argstr="--number_of_thread %d",), - origin=dict(argstr="--origin %s",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), - rotation_point=dict(argstr="--rotation_point %s",), - size=dict(argstr="--size %s", sep=",",), - spaceChange=dict(argstr="--spaceChange ",), - spacing=dict(argstr="--spacing %s", sep=",",), - spline_order=dict(argstr="--spline_order %d",), - transform=dict(argstr="--transform %s",), - transform_matrix=dict(argstr="--transform_matrix %s", sep=",",), - transform_order=dict(argstr="--transform_order %s",), - transform_tensor_method=dict(argstr="--transform_tensor_method %s",), - transformationFile=dict(argstr="--transformationFile %s", extensions=None,), - window_function=dict(argstr="--window_function %s",), + Inverse_ITK_Transformation=dict( + argstr="--Inverse_ITK_Transformation ", + ), + Reference=dict( + argstr="--Reference %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + centered_transform=dict( + argstr="--centered_transform ", + ), + correction=dict( + argstr="--correction %s", + ), + defField=dict( + argstr="--defField %s", + extensions=None, + ), + default_pixel_value=dict( + argstr="--default_pixel_value %f", + ), + direction_matrix=dict( + argstr="--direction_matrix %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hfieldtype=dict( + argstr="--hfieldtype %s", + ), + image_center=dict( + argstr="--image_center %s", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + interpolation=dict( + argstr="--interpolation %s", + ), + notbulk=dict( + argstr="--notbulk ", + ), + number_of_thread=dict( + argstr="--number_of_thread %d", + ), + origin=dict( + argstr="--origin %s", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + rotation_point=dict( + argstr="--rotation_point %s", + ), + size=dict( + argstr="--size %s", + sep=",", + ), + spaceChange=dict( + argstr="--spaceChange ", + ), + spacing=dict( + argstr="--spacing %s", + sep=",", + ), + spline_order=dict( + argstr="--spline_order %d", + ), + transform=dict( + argstr="--transform %s", + ), + transform_matrix=dict( + argstr="--transform_matrix %s", + sep=",", + ), + transform_order=dict( + argstr="--transform_order %s", + ), + transform_tensor_method=dict( + argstr="--transform_tensor_method %s", + ), + transformationFile=dict( + argstr="--transformationFile %s", + extensions=None, + ), + window_function=dict( + argstr="--window_function %s", + ), ) inputs = ResampleDTIVolume.input_spec() @@ -41,7 +109,12 @@ def test_ResampleDTIVolume_inputs(): def test_ResampleDTIVolume_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = ResampleDTIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py index 6f36ac2a63..f8b1a3ddff 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py @@ -4,25 +4,70 @@ def test_TractographyLabelMapSeeding_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-2,), - OutputFibers=dict(argstr="%s", hash_files=False, position=-1,), - args=dict(argstr="%s",), - clthreshold=dict(argstr="--clthreshold %f",), - environ=dict(nohash=True, usedefault=True,), - inputroi=dict(argstr="--inputroi %s", extensions=None,), - integrationsteplength=dict(argstr="--integrationsteplength %f",), - label=dict(argstr="--label %d",), - maximumlength=dict(argstr="--maximumlength %f",), - minimumlength=dict(argstr="--minimumlength %f",), - name=dict(argstr="--name %s",), - outputdirectory=dict(argstr="--outputdirectory %s", hash_files=False,), - randomgrid=dict(argstr="--randomgrid ",), - seedspacing=dict(argstr="--seedspacing %f",), - stoppingcurvature=dict(argstr="--stoppingcurvature %f",), - stoppingmode=dict(argstr="--stoppingmode %s",), - stoppingvalue=dict(argstr="--stoppingvalue %f",), - useindexspace=dict(argstr="--useindexspace ",), - writetofile=dict(argstr="--writetofile ",), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputFibers=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + clthreshold=dict( + argstr="--clthreshold %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputroi=dict( + argstr="--inputroi %s", + extensions=None, + ), + integrationsteplength=dict( + argstr="--integrationsteplength %f", + ), + label=dict( + argstr="--label %d", + ), + maximumlength=dict( + argstr="--maximumlength %f", + ), + minimumlength=dict( + argstr="--minimumlength %f", + ), + name=dict( + argstr="--name %s", + ), + outputdirectory=dict( + argstr="--outputdirectory %s", + hash_files=False, + ), + randomgrid=dict( + argstr="--randomgrid ", + ), + seedspacing=dict( + argstr="--seedspacing %f", + ), + stoppingcurvature=dict( + argstr="--stoppingcurvature %f", + ), + stoppingmode=dict( + argstr="--stoppingmode %s", + ), + stoppingvalue=dict( + argstr="--stoppingvalue %f", + ), + useindexspace=dict( + argstr="--useindexspace ", + ), + writetofile=dict( + argstr="--writetofile ", + ), ) inputs = TractographyLabelMapSeeding.input_spec() @@ -33,7 +78,11 @@ def test_TractographyLabelMapSeeding_inputs(): def test_TractographyLabelMapSeeding_outputs(): output_map = dict( - OutputFibers=dict(extensions=None, position=-1,), outputdirectory=dict(), + OutputFibers=dict( + extensions=None, + position=-1, + ), + outputdirectory=dict(), ) outputs = TractographyLabelMapSeeding.output_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py index 3ea978e771..2cd0ac229d 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py @@ -4,12 +4,31 @@ def test_AddScalarVolumes_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume1=dict(argstr="%s", extensions=None, position=-3,), - inputVolume2=dict(argstr="%s", extensions=None, position=-2,), - order=dict(argstr="--order %s",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="%s", + extensions=None, + position=-3, + ), + inputVolume2=dict( + argstr="%s", + extensions=None, + position=-2, + ), + order=dict( + argstr="--order %s", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = AddScalarVolumes.input_spec() @@ -19,7 +38,12 @@ def test_AddScalarVolumes_inputs(): def test_AddScalarVolumes_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = AddScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py index e57c2a691b..8417ab1a90 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py @@ -4,11 +4,26 @@ def test_CastScalarVolume_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-2,), - OutputVolume=dict(argstr="%s", hash_files=False, position=-1,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - type=dict(argstr="--type %s",), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + type=dict( + argstr="--type %s", + ), ) inputs = CastScalarVolume.input_spec() @@ -18,7 +33,12 @@ def test_CastScalarVolume_inputs(): def test_CastScalarVolume_outputs(): - output_map = dict(OutputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = CastScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py index 564e2e14f8..49b5133faa 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py @@ -4,12 +4,32 @@ def test_CheckerBoardFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - checkerPattern=dict(argstr="--checkerPattern %s", sep=",",), - environ=dict(nohash=True, usedefault=True,), - inputVolume1=dict(argstr="%s", extensions=None, position=-3,), - inputVolume2=dict(argstr="%s", extensions=None, position=-2,), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + args=dict( + argstr="%s", + ), + checkerPattern=dict( + argstr="--checkerPattern %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="%s", + extensions=None, + position=-3, + ), + inputVolume2=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = CheckerBoardFilter.input_spec() @@ -19,7 +39,12 @@ def test_CheckerBoardFilter_inputs(): def test_CheckerBoardFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = CheckerBoardFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py index 189bd459a0..48421b7c21 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py @@ -4,13 +4,32 @@ def test_CurvatureAnisotropicDiffusion_inputs(): input_map = dict( - args=dict(argstr="%s",), - conductance=dict(argstr="--conductance %f",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - iterations=dict(argstr="--iterations %d",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), - timeStep=dict(argstr="--timeStep %f",), + args=dict( + argstr="%s", + ), + conductance=dict( + argstr="--conductance %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + iterations=dict( + argstr="--iterations %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + timeStep=dict( + argstr="--timeStep %f", + ), ) inputs = CurvatureAnisotropicDiffusion.input_spec() @@ -20,7 +39,12 @@ def test_CurvatureAnisotropicDiffusion_inputs(): def test_CurvatureAnisotropicDiffusion_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = CurvatureAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py index 93861cefa5..430e299787 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py @@ -4,14 +4,35 @@ def test_ExtractSkeleton_inputs(): input_map = dict( - InputImageFileName=dict(argstr="%s", extensions=None, position=-2,), - OutputImageFileName=dict(argstr="%s", hash_files=False, position=-1,), - args=dict(argstr="%s",), - dontPrune=dict(argstr="--dontPrune ",), - environ=dict(nohash=True, usedefault=True,), - numPoints=dict(argstr="--numPoints %d",), - pointsFile=dict(argstr="--pointsFile %s",), - type=dict(argstr="--type %s",), + InputImageFileName=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputImageFileName=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + dontPrune=dict( + argstr="--dontPrune ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + numPoints=dict( + argstr="--numPoints %d", + ), + pointsFile=dict( + argstr="--pointsFile %s", + ), + type=dict( + argstr="--type %s", + ), ) inputs = ExtractSkeleton.input_spec() @@ -21,7 +42,12 @@ def test_ExtractSkeleton_inputs(): def test_ExtractSkeleton_outputs(): - output_map = dict(OutputImageFileName=dict(extensions=None, position=-1,),) + output_map = dict( + OutputImageFileName=dict( + extensions=None, + position=-1, + ), + ) outputs = ExtractSkeleton.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py index a0655cfb6e..113490472d 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py @@ -4,11 +4,26 @@ def test_GaussianBlurImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), - sigma=dict(argstr="--sigma %f",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + sigma=dict( + argstr="--sigma %f", + ), ) inputs = GaussianBlurImageFilter.input_spec() @@ -18,7 +33,12 @@ def test_GaussianBlurImageFilter_inputs(): def test_GaussianBlurImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = GaussianBlurImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py index 2b6e77d1d8..95810788c7 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py @@ -4,13 +4,32 @@ def test_GradientAnisotropicDiffusion_inputs(): input_map = dict( - args=dict(argstr="%s",), - conductance=dict(argstr="--conductance %f",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - iterations=dict(argstr="--iterations %d",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), - timeStep=dict(argstr="--timeStep %f",), + args=dict( + argstr="%s", + ), + conductance=dict( + argstr="--conductance %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + iterations=dict( + argstr="--iterations %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + timeStep=dict( + argstr="--timeStep %f", + ), ) inputs = GradientAnisotropicDiffusion.input_spec() @@ -20,7 +39,12 @@ def test_GradientAnisotropicDiffusion_inputs(): def test_GradientAnisotropicDiffusion_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = GradientAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py index a12177d820..8891232347 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py @@ -4,10 +4,23 @@ def test_GrayscaleFillHoleImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = GrayscaleFillHoleImageFilter.input_spec() @@ -17,7 +30,12 @@ def test_GrayscaleFillHoleImageFilter_inputs(): def test_GrayscaleFillHoleImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = GrayscaleFillHoleImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py index ab1c23f716..d48d9ded63 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py @@ -4,10 +4,23 @@ def test_GrayscaleGrindPeakImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = GrayscaleGrindPeakImageFilter.input_spec() @@ -17,7 +30,12 @@ def test_GrayscaleGrindPeakImageFilter_inputs(): def test_GrayscaleGrindPeakImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = GrayscaleGrindPeakImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py index 003ec4c8d8..cf2a959ff9 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py @@ -4,14 +4,37 @@ def test_HistogramMatching_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="%s", extensions=None, position=-3,), - numberOfHistogramLevels=dict(argstr="--numberOfHistogramLevels %d",), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), - referenceVolume=dict(argstr="%s", extensions=None, position=-2,), - threshold=dict(argstr="--threshold ",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-3, + ), + numberOfHistogramLevels=dict( + argstr="--numberOfHistogramLevels %d", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + referenceVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + threshold=dict( + argstr="--threshold ", + ), ) inputs = HistogramMatching.input_spec() @@ -21,7 +44,12 @@ def test_HistogramMatching_inputs(): def test_HistogramMatching_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = HistogramMatching.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py index 8fb1596420..802baf5f38 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py @@ -4,12 +4,31 @@ def test_ImageLabelCombine_inputs(): input_map = dict( - InputLabelMap_A=dict(argstr="%s", extensions=None, position=-3,), - InputLabelMap_B=dict(argstr="%s", extensions=None, position=-2,), - OutputLabelMap=dict(argstr="%s", hash_files=False, position=-1,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - first_overwrites=dict(argstr="--first_overwrites ",), + InputLabelMap_A=dict( + argstr="%s", + extensions=None, + position=-3, + ), + InputLabelMap_B=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputLabelMap=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + first_overwrites=dict( + argstr="--first_overwrites ", + ), ) inputs = ImageLabelCombine.input_spec() @@ -19,7 +38,12 @@ def test_ImageLabelCombine_inputs(): def test_ImageLabelCombine_outputs(): - output_map = dict(OutputLabelMap=dict(extensions=None, position=-1,),) + output_map = dict( + OutputLabelMap=dict( + extensions=None, + position=-1, + ), + ) outputs = ImageLabelCombine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py index fd8bf6aaf7..5070718d66 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py @@ -4,13 +4,34 @@ def test_MaskScalarVolume_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-3,), - MaskVolume=dict(argstr="%s", extensions=None, position=-2,), - OutputVolume=dict(argstr="%s", hash_files=False, position=-1,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - label=dict(argstr="--label %d",), - replace=dict(argstr="--replace %d",), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-3, + ), + MaskVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + label=dict( + argstr="--label %d", + ), + replace=dict( + argstr="--replace %d", + ), ) inputs = MaskScalarVolume.input_spec() @@ -20,7 +41,12 @@ def test_MaskScalarVolume_inputs(): def test_MaskScalarVolume_outputs(): - output_map = dict(OutputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = MaskScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py index 969a0dead6..8b86a90c3b 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py @@ -4,11 +4,27 @@ def test_MedianImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - neighborhood=dict(argstr="--neighborhood %s", sep=",",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + neighborhood=dict( + argstr="--neighborhood %s", + sep=",", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = MedianImageFilter.input_spec() @@ -18,7 +34,12 @@ def test_MedianImageFilter_inputs(): def test_MedianImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = MedianImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py index f6e521fe8e..817fdbbe95 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py @@ -4,12 +4,31 @@ def test_MultiplyScalarVolumes_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume1=dict(argstr="%s", extensions=None, position=-3,), - inputVolume2=dict(argstr="%s", extensions=None, position=-2,), - order=dict(argstr="--order %s",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="%s", + extensions=None, + position=-3, + ), + inputVolume2=dict( + argstr="%s", + extensions=None, + position=-2, + ), + order=dict( + argstr="--order %s", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = MultiplyScalarVolumes.input_spec() @@ -19,7 +38,12 @@ def test_MultiplyScalarVolumes_inputs(): def test_MultiplyScalarVolumes_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = MultiplyScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py index 435b4d0f6a..43038036d5 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py @@ -4,20 +4,57 @@ def test_N4ITKBiasFieldCorrection_inputs(): input_map = dict( - args=dict(argstr="%s",), - bsplineorder=dict(argstr="--bsplineorder %d",), - convergencethreshold=dict(argstr="--convergencethreshold %f",), - environ=dict(nohash=True, usedefault=True,), - histogramsharpening=dict(argstr="--histogramsharpening %s", sep=",",), - inputimage=dict(argstr="--inputimage %s", extensions=None,), - iterations=dict(argstr="--iterations %s", sep=",",), - maskimage=dict(argstr="--maskimage %s", extensions=None,), - meshresolution=dict(argstr="--meshresolution %s", sep=",",), - outputbiasfield=dict(argstr="--outputbiasfield %s", hash_files=False,), - outputimage=dict(argstr="--outputimage %s", hash_files=False,), - shrinkfactor=dict(argstr="--shrinkfactor %d",), - splinedistance=dict(argstr="--splinedistance %f",), - weightimage=dict(argstr="--weightimage %s", extensions=None,), + args=dict( + argstr="%s", + ), + bsplineorder=dict( + argstr="--bsplineorder %d", + ), + convergencethreshold=dict( + argstr="--convergencethreshold %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + histogramsharpening=dict( + argstr="--histogramsharpening %s", + sep=",", + ), + inputimage=dict( + argstr="--inputimage %s", + extensions=None, + ), + iterations=dict( + argstr="--iterations %s", + sep=",", + ), + maskimage=dict( + argstr="--maskimage %s", + extensions=None, + ), + meshresolution=dict( + argstr="--meshresolution %s", + sep=",", + ), + outputbiasfield=dict( + argstr="--outputbiasfield %s", + hash_files=False, + ), + outputimage=dict( + argstr="--outputimage %s", + hash_files=False, + ), + shrinkfactor=dict( + argstr="--shrinkfactor %d", + ), + splinedistance=dict( + argstr="--splinedistance %f", + ), + weightimage=dict( + argstr="--weightimage %s", + extensions=None, + ), ) inputs = N4ITKBiasFieldCorrection.input_spec() @@ -28,7 +65,12 @@ def test_N4ITKBiasFieldCorrection_inputs(): def test_N4ITKBiasFieldCorrection_outputs(): output_map = dict( - outputbiasfield=dict(extensions=None,), outputimage=dict(extensions=None,), + outputbiasfield=dict( + extensions=None, + ), + outputimage=dict( + extensions=None, + ), ) outputs = N4ITKBiasFieldCorrection.output_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py index d1a28f3374..37dca6437c 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py @@ -4,32 +4,96 @@ def test_ResampleScalarVectorDWIVolume_inputs(): input_map = dict( - Inverse_ITK_Transformation=dict(argstr="--Inverse_ITK_Transformation ",), - Reference=dict(argstr="--Reference %s", extensions=None,), - args=dict(argstr="%s",), - centered_transform=dict(argstr="--centered_transform ",), - defField=dict(argstr="--defField %s", extensions=None,), - default_pixel_value=dict(argstr="--default_pixel_value %f",), - direction_matrix=dict(argstr="--direction_matrix %s", sep=",",), - environ=dict(nohash=True, usedefault=True,), - hfieldtype=dict(argstr="--hfieldtype %s",), - image_center=dict(argstr="--image_center %s",), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - interpolation=dict(argstr="--interpolation %s",), - notbulk=dict(argstr="--notbulk ",), - number_of_thread=dict(argstr="--number_of_thread %d",), - origin=dict(argstr="--origin %s",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), - rotation_point=dict(argstr="--rotation_point %s",), - size=dict(argstr="--size %s", sep=",",), - spaceChange=dict(argstr="--spaceChange ",), - spacing=dict(argstr="--spacing %s", sep=",",), - spline_order=dict(argstr="--spline_order %d",), - transform=dict(argstr="--transform %s",), - transform_matrix=dict(argstr="--transform_matrix %s", sep=",",), - transform_order=dict(argstr="--transform_order %s",), - transformationFile=dict(argstr="--transformationFile %s", extensions=None,), - window_function=dict(argstr="--window_function %s",), + Inverse_ITK_Transformation=dict( + argstr="--Inverse_ITK_Transformation ", + ), + Reference=dict( + argstr="--Reference %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + centered_transform=dict( + argstr="--centered_transform ", + ), + defField=dict( + argstr="--defField %s", + extensions=None, + ), + default_pixel_value=dict( + argstr="--default_pixel_value %f", + ), + direction_matrix=dict( + argstr="--direction_matrix %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hfieldtype=dict( + argstr="--hfieldtype %s", + ), + image_center=dict( + argstr="--image_center %s", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + interpolation=dict( + argstr="--interpolation %s", + ), + notbulk=dict( + argstr="--notbulk ", + ), + number_of_thread=dict( + argstr="--number_of_thread %d", + ), + origin=dict( + argstr="--origin %s", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + rotation_point=dict( + argstr="--rotation_point %s", + ), + size=dict( + argstr="--size %s", + sep=",", + ), + spaceChange=dict( + argstr="--spaceChange ", + ), + spacing=dict( + argstr="--spacing %s", + sep=",", + ), + spline_order=dict( + argstr="--spline_order %d", + ), + transform=dict( + argstr="--transform %s", + ), + transform_matrix=dict( + argstr="--transform_matrix %s", + sep=",", + ), + transform_order=dict( + argstr="--transform_order %s", + ), + transformationFile=dict( + argstr="--transformationFile %s", + extensions=None, + ), + window_function=dict( + argstr="--window_function %s", + ), ) inputs = ResampleScalarVectorDWIVolume.input_spec() @@ -39,7 +103,12 @@ def test_ResampleScalarVectorDWIVolume_inputs(): def test_ResampleScalarVectorDWIVolume_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = ResampleScalarVectorDWIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py index 6d8ae8ad73..abe3d9ad00 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py @@ -4,12 +4,31 @@ def test_SubtractScalarVolumes_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume1=dict(argstr="%s", extensions=None, position=-3,), - inputVolume2=dict(argstr="%s", extensions=None, position=-2,), - order=dict(argstr="--order %s",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="%s", + extensions=None, + position=-3, + ), + inputVolume2=dict( + argstr="%s", + extensions=None, + position=-2, + ), + order=dict( + argstr="--order %s", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = SubtractScalarVolumes.input_spec() @@ -19,7 +38,12 @@ def test_SubtractScalarVolumes_inputs(): def test_SubtractScalarVolumes_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = SubtractScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py index eec13b435e..0aaab0ff7a 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py @@ -4,15 +4,38 @@ def test_ThresholdScalarVolume_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-2,), - OutputVolume=dict(argstr="%s", hash_files=False, position=-1,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - lower=dict(argstr="--lower %d",), - outsidevalue=dict(argstr="--outsidevalue %d",), - threshold=dict(argstr="--threshold %d",), - thresholdtype=dict(argstr="--thresholdtype %s",), - upper=dict(argstr="--upper %d",), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + lower=dict( + argstr="--lower %d", + ), + outsidevalue=dict( + argstr="--outsidevalue %d", + ), + threshold=dict( + argstr="--threshold %d", + ), + thresholdtype=dict( + argstr="--thresholdtype %s", + ), + upper=dict( + argstr="--upper %d", + ), ) inputs = ThresholdScalarVolume.input_spec() @@ -22,7 +45,12 @@ def test_ThresholdScalarVolume_inputs(): def test_ThresholdScalarVolume_outputs(): - output_map = dict(OutputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = ThresholdScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py index d45159cc1b..bbaa19f848 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py @@ -4,14 +4,36 @@ def test_VotingBinaryHoleFillingImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - background=dict(argstr="--background %d",), - environ=dict(nohash=True, usedefault=True,), - foreground=dict(argstr="--foreground %d",), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - majorityThreshold=dict(argstr="--majorityThreshold %d",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), - radius=dict(argstr="--radius %s", sep=",",), + args=dict( + argstr="%s", + ), + background=dict( + argstr="--background %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + foreground=dict( + argstr="--foreground %d", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + majorityThreshold=dict( + argstr="--majorityThreshold %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + radius=dict( + argstr="--radius %s", + sep=",", + ), ) inputs = VotingBinaryHoleFillingImageFilter.input_spec() @@ -21,7 +43,12 @@ def test_VotingBinaryHoleFillingImageFilter_inputs(): def test_VotingBinaryHoleFillingImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = VotingBinaryHoleFillingImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py index 81f61c1bbc..8aa18dc6a3 100644 --- a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py +++ b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py @@ -4,15 +4,41 @@ def test_DWIUnbiasedNonLocalMeansFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - hp=dict(argstr="--hp %f",), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - ng=dict(argstr="--ng %d",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), - rc=dict(argstr="--rc %s", sep=",",), - re=dict(argstr="--re %s", sep=",",), - rs=dict(argstr="--rs %s", sep=",",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hp=dict( + argstr="--hp %f", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + ng=dict( + argstr="--ng %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + rc=dict( + argstr="--rc %s", + sep=",", + ), + re=dict( + argstr="--re %s", + sep=",", + ), + rs=dict( + argstr="--rs %s", + sep=",", + ), ) inputs = DWIUnbiasedNonLocalMeansFilter.input_spec() @@ -22,7 +48,12 @@ def test_DWIUnbiasedNonLocalMeansFilter_inputs(): def test_DWIUnbiasedNonLocalMeansFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = DWIUnbiasedNonLocalMeansFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py index 44857bf3a8..bb2de08cfb 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py @@ -4,21 +4,53 @@ def test_AffineRegistration_inputs(): input_map = dict( - FixedImageFileName=dict(argstr="%s", extensions=None, position=-2,), - MovingImageFileName=dict(argstr="%s", extensions=None, position=-1,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fixedsmoothingfactor=dict(argstr="--fixedsmoothingfactor %d",), - histogrambins=dict(argstr="--histogrambins %d",), - initialtransform=dict(argstr="--initialtransform %s", extensions=None,), - iterations=dict(argstr="--iterations %d",), - movingsmoothingfactor=dict(argstr="--movingsmoothingfactor %d",), - outputtransform=dict(argstr="--outputtransform %s", hash_files=False,), + FixedImageFileName=dict( + argstr="%s", + extensions=None, + position=-2, + ), + MovingImageFileName=dict( + argstr="%s", + extensions=None, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedsmoothingfactor=dict( + argstr="--fixedsmoothingfactor %d", + ), + histogrambins=dict( + argstr="--histogrambins %d", + ), + initialtransform=dict( + argstr="--initialtransform %s", + extensions=None, + ), + iterations=dict( + argstr="--iterations %d", + ), + movingsmoothingfactor=dict( + argstr="--movingsmoothingfactor %d", + ), + outputtransform=dict( + argstr="--outputtransform %s", + hash_files=False, + ), resampledmovingfilename=dict( - argstr="--resampledmovingfilename %s", hash_files=False, + argstr="--resampledmovingfilename %s", + hash_files=False, + ), + spatialsamples=dict( + argstr="--spatialsamples %d", + ), + translationscale=dict( + argstr="--translationscale %f", ), - spatialsamples=dict(argstr="--spatialsamples %d",), - translationscale=dict(argstr="--translationscale %f",), ) inputs = AffineRegistration.input_spec() @@ -29,8 +61,12 @@ def test_AffineRegistration_inputs(): def test_AffineRegistration_outputs(): output_map = dict( - outputtransform=dict(extensions=None,), - resampledmovingfilename=dict(extensions=None,), + outputtransform=dict( + extensions=None, + ), + resampledmovingfilename=dict( + extensions=None, + ), ) outputs = AffineRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py index 7c777ebca3..41b316e7dc 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py @@ -4,23 +4,60 @@ def test_BSplineDeformableRegistration_inputs(): input_map = dict( - FixedImageFileName=dict(argstr="%s", extensions=None, position=-2,), - MovingImageFileName=dict(argstr="%s", extensions=None, position=-1,), - args=dict(argstr="%s",), - constrain=dict(argstr="--constrain ",), - default=dict(argstr="--default %d",), - environ=dict(nohash=True, usedefault=True,), - gridSize=dict(argstr="--gridSize %d",), - histogrambins=dict(argstr="--histogrambins %d",), - initialtransform=dict(argstr="--initialtransform %s", extensions=None,), - iterations=dict(argstr="--iterations %d",), - maximumDeformation=dict(argstr="--maximumDeformation %f",), - outputtransform=dict(argstr="--outputtransform %s", hash_files=False,), - outputwarp=dict(argstr="--outputwarp %s", hash_files=False,), + FixedImageFileName=dict( + argstr="%s", + extensions=None, + position=-2, + ), + MovingImageFileName=dict( + argstr="%s", + extensions=None, + position=-1, + ), + args=dict( + argstr="%s", + ), + constrain=dict( + argstr="--constrain ", + ), + default=dict( + argstr="--default %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gridSize=dict( + argstr="--gridSize %d", + ), + histogrambins=dict( + argstr="--histogrambins %d", + ), + initialtransform=dict( + argstr="--initialtransform %s", + extensions=None, + ), + iterations=dict( + argstr="--iterations %d", + ), + maximumDeformation=dict( + argstr="--maximumDeformation %f", + ), + outputtransform=dict( + argstr="--outputtransform %s", + hash_files=False, + ), + outputwarp=dict( + argstr="--outputwarp %s", + hash_files=False, + ), resampledmovingfilename=dict( - argstr="--resampledmovingfilename %s", hash_files=False, + argstr="--resampledmovingfilename %s", + hash_files=False, + ), + spatialsamples=dict( + argstr="--spatialsamples %d", ), - spatialsamples=dict(argstr="--spatialsamples %d",), ) inputs = BSplineDeformableRegistration.input_spec() @@ -31,9 +68,15 @@ def test_BSplineDeformableRegistration_inputs(): def test_BSplineDeformableRegistration_outputs(): output_map = dict( - outputtransform=dict(extensions=None,), - outputwarp=dict(extensions=None,), - resampledmovingfilename=dict(extensions=None,), + outputtransform=dict( + extensions=None, + ), + outputwarp=dict( + extensions=None, + ), + resampledmovingfilename=dict( + extensions=None, + ), ) outputs = BSplineDeformableRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py index c4dc0f8969..fbd37eeb8e 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py @@ -4,11 +4,25 @@ def test_BSplineToDeformationField_inputs(): input_map = dict( - args=dict(argstr="%s",), - defImage=dict(argstr="--defImage %s", hash_files=False,), - environ=dict(nohash=True, usedefault=True,), - refImage=dict(argstr="--refImage %s", extensions=None,), - tfm=dict(argstr="--tfm %s", extensions=None,), + args=dict( + argstr="%s", + ), + defImage=dict( + argstr="--defImage %s", + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + refImage=dict( + argstr="--refImage %s", + extensions=None, + ), + tfm=dict( + argstr="--tfm %s", + extensions=None, + ), ) inputs = BSplineToDeformationField.input_spec() @@ -18,7 +32,11 @@ def test_BSplineToDeformationField_inputs(): def test_BSplineToDeformationField_outputs(): - output_map = dict(defImage=dict(extensions=None,),) + output_map = dict( + defImage=dict( + extensions=None, + ), + ) outputs = BSplineToDeformationField.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py index 6536d699b0..28f4e19d7b 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py @@ -4,36 +4,105 @@ def test_ExpertAutomatedRegistration_inputs(): input_map = dict( - affineMaxIterations=dict(argstr="--affineMaxIterations %d",), - affineSamplingRatio=dict(argstr="--affineSamplingRatio %f",), - args=dict(argstr="%s",), - bsplineMaxIterations=dict(argstr="--bsplineMaxIterations %d",), - bsplineSamplingRatio=dict(argstr="--bsplineSamplingRatio %f",), - controlPointSpacing=dict(argstr="--controlPointSpacing %d",), - environ=dict(nohash=True, usedefault=True,), - expectedOffset=dict(argstr="--expectedOffset %f",), - expectedRotation=dict(argstr="--expectedRotation %f",), - expectedScale=dict(argstr="--expectedScale %f",), - expectedSkew=dict(argstr="--expectedSkew %f",), - fixedImage=dict(argstr="%s", extensions=None, position=-2,), - fixedImageMask=dict(argstr="--fixedImageMask %s", extensions=None,), - fixedLandmarks=dict(argstr="--fixedLandmarks %s...",), - initialization=dict(argstr="--initialization %s",), - interpolation=dict(argstr="--interpolation %s",), - loadTransform=dict(argstr="--loadTransform %s", extensions=None,), - metric=dict(argstr="--metric %s",), - minimizeMemory=dict(argstr="--minimizeMemory ",), - movingImage=dict(argstr="%s", extensions=None, position=-1,), - movingLandmarks=dict(argstr="--movingLandmarks %s...",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - randomNumberSeed=dict(argstr="--randomNumberSeed %d",), - registration=dict(argstr="--registration %s",), - resampledImage=dict(argstr="--resampledImage %s", hash_files=False,), - rigidMaxIterations=dict(argstr="--rigidMaxIterations %d",), - rigidSamplingRatio=dict(argstr="--rigidSamplingRatio %f",), - sampleFromOverlap=dict(argstr="--sampleFromOverlap ",), - saveTransform=dict(argstr="--saveTransform %s", hash_files=False,), - verbosityLevel=dict(argstr="--verbosityLevel %s",), + affineMaxIterations=dict( + argstr="--affineMaxIterations %d", + ), + affineSamplingRatio=dict( + argstr="--affineSamplingRatio %f", + ), + args=dict( + argstr="%s", + ), + bsplineMaxIterations=dict( + argstr="--bsplineMaxIterations %d", + ), + bsplineSamplingRatio=dict( + argstr="--bsplineSamplingRatio %f", + ), + controlPointSpacing=dict( + argstr="--controlPointSpacing %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expectedOffset=dict( + argstr="--expectedOffset %f", + ), + expectedRotation=dict( + argstr="--expectedRotation %f", + ), + expectedScale=dict( + argstr="--expectedScale %f", + ), + expectedSkew=dict( + argstr="--expectedSkew %f", + ), + fixedImage=dict( + argstr="%s", + extensions=None, + position=-2, + ), + fixedImageMask=dict( + argstr="--fixedImageMask %s", + extensions=None, + ), + fixedLandmarks=dict( + argstr="--fixedLandmarks %s...", + ), + initialization=dict( + argstr="--initialization %s", + ), + interpolation=dict( + argstr="--interpolation %s", + ), + loadTransform=dict( + argstr="--loadTransform %s", + extensions=None, + ), + metric=dict( + argstr="--metric %s", + ), + minimizeMemory=dict( + argstr="--minimizeMemory ", + ), + movingImage=dict( + argstr="%s", + extensions=None, + position=-1, + ), + movingLandmarks=dict( + argstr="--movingLandmarks %s...", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + randomNumberSeed=dict( + argstr="--randomNumberSeed %d", + ), + registration=dict( + argstr="--registration %s", + ), + resampledImage=dict( + argstr="--resampledImage %s", + hash_files=False, + ), + rigidMaxIterations=dict( + argstr="--rigidMaxIterations %d", + ), + rigidSamplingRatio=dict( + argstr="--rigidSamplingRatio %f", + ), + sampleFromOverlap=dict( + argstr="--sampleFromOverlap ", + ), + saveTransform=dict( + argstr="--saveTransform %s", + hash_files=False, + ), + verbosityLevel=dict( + argstr="--verbosityLevel %s", + ), ) inputs = ExpertAutomatedRegistration.input_spec() @@ -44,7 +113,12 @@ def test_ExpertAutomatedRegistration_inputs(): def test_ExpertAutomatedRegistration_outputs(): output_map = dict( - resampledImage=dict(extensions=None,), saveTransform=dict(extensions=None,), + resampledImage=dict( + extensions=None, + ), + saveTransform=dict( + extensions=None, + ), ) outputs = ExpertAutomatedRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py index b19d3be344..77fb5d69f6 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py @@ -4,22 +4,58 @@ def test_LinearRegistration_inputs(): input_map = dict( - FixedImageFileName=dict(argstr="%s", extensions=None, position=-2,), - MovingImageFileName=dict(argstr="%s", extensions=None, position=-1,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fixedsmoothingfactor=dict(argstr="--fixedsmoothingfactor %d",), - histogrambins=dict(argstr="--histogrambins %d",), - initialtransform=dict(argstr="--initialtransform %s", extensions=None,), - iterations=dict(argstr="--iterations %s", sep=",",), - learningrate=dict(argstr="--learningrate %s", sep=",",), - movingsmoothingfactor=dict(argstr="--movingsmoothingfactor %d",), - outputtransform=dict(argstr="--outputtransform %s", hash_files=False,), + FixedImageFileName=dict( + argstr="%s", + extensions=None, + position=-2, + ), + MovingImageFileName=dict( + argstr="%s", + extensions=None, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedsmoothingfactor=dict( + argstr="--fixedsmoothingfactor %d", + ), + histogrambins=dict( + argstr="--histogrambins %d", + ), + initialtransform=dict( + argstr="--initialtransform %s", + extensions=None, + ), + iterations=dict( + argstr="--iterations %s", + sep=",", + ), + learningrate=dict( + argstr="--learningrate %s", + sep=",", + ), + movingsmoothingfactor=dict( + argstr="--movingsmoothingfactor %d", + ), + outputtransform=dict( + argstr="--outputtransform %s", + hash_files=False, + ), resampledmovingfilename=dict( - argstr="--resampledmovingfilename %s", hash_files=False, + argstr="--resampledmovingfilename %s", + hash_files=False, + ), + spatialsamples=dict( + argstr="--spatialsamples %d", + ), + translationscale=dict( + argstr="--translationscale %f", ), - spatialsamples=dict(argstr="--spatialsamples %d",), - translationscale=dict(argstr="--translationscale %f",), ) inputs = LinearRegistration.input_spec() @@ -30,8 +66,12 @@ def test_LinearRegistration_inputs(): def test_LinearRegistration_outputs(): output_map = dict( - outputtransform=dict(extensions=None,), - resampledmovingfilename=dict(extensions=None,), + outputtransform=dict( + extensions=None, + ), + resampledmovingfilename=dict( + extensions=None, + ), ) outputs = LinearRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py index ad7f89e35d..0f1e19d4ba 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py @@ -4,19 +4,53 @@ def test_MultiResolutionAffineRegistration_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fixedImage=dict(argstr="%s", extensions=None, position=-2,), - fixedImageMask=dict(argstr="--fixedImageMask %s", extensions=None,), - fixedImageROI=dict(argstr="--fixedImageROI %s",), - metricTolerance=dict(argstr="--metricTolerance %f",), - movingImage=dict(argstr="%s", extensions=None, position=-1,), - numIterations=dict(argstr="--numIterations %d",), - numLineIterations=dict(argstr="--numLineIterations %d",), - resampledImage=dict(argstr="--resampledImage %s", hash_files=False,), - saveTransform=dict(argstr="--saveTransform %s", hash_files=False,), - stepSize=dict(argstr="--stepSize %f",), - stepTolerance=dict(argstr="--stepTolerance %f",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedImage=dict( + argstr="%s", + extensions=None, + position=-2, + ), + fixedImageMask=dict( + argstr="--fixedImageMask %s", + extensions=None, + ), + fixedImageROI=dict( + argstr="--fixedImageROI %s", + ), + metricTolerance=dict( + argstr="--metricTolerance %f", + ), + movingImage=dict( + argstr="%s", + extensions=None, + position=-1, + ), + numIterations=dict( + argstr="--numIterations %d", + ), + numLineIterations=dict( + argstr="--numLineIterations %d", + ), + resampledImage=dict( + argstr="--resampledImage %s", + hash_files=False, + ), + saveTransform=dict( + argstr="--saveTransform %s", + hash_files=False, + ), + stepSize=dict( + argstr="--stepSize %f", + ), + stepTolerance=dict( + argstr="--stepTolerance %f", + ), ) inputs = MultiResolutionAffineRegistration.input_spec() @@ -27,7 +61,12 @@ def test_MultiResolutionAffineRegistration_inputs(): def test_MultiResolutionAffineRegistration_outputs(): output_map = dict( - resampledImage=dict(extensions=None,), saveTransform=dict(extensions=None,), + resampledImage=dict( + extensions=None, + ), + saveTransform=dict( + extensions=None, + ), ) outputs = MultiResolutionAffineRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py index 853fbb5caa..672d971471 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py @@ -4,13 +4,32 @@ def test_OtsuThresholdImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - insideValue=dict(argstr="--insideValue %d",), - numberOfBins=dict(argstr="--numberOfBins %d",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), - outsideValue=dict(argstr="--outsideValue %d",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + insideValue=dict( + argstr="--insideValue %d", + ), + numberOfBins=dict( + argstr="--numberOfBins %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + outsideValue=dict( + argstr="--outsideValue %d", + ), ) inputs = OtsuThresholdImageFilter.input_spec() @@ -20,7 +39,12 @@ def test_OtsuThresholdImageFilter_inputs(): def test_OtsuThresholdImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = OtsuThresholdImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py index 561bb246f2..a28c8231c8 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py @@ -4,14 +4,35 @@ def test_OtsuThresholdSegmentation_inputs(): input_map = dict( - args=dict(argstr="%s",), - brightObjects=dict(argstr="--brightObjects ",), - environ=dict(nohash=True, usedefault=True,), - faceConnected=dict(argstr="--faceConnected ",), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - minimumObjectSize=dict(argstr="--minimumObjectSize %d",), - numberOfBins=dict(argstr="--numberOfBins %d",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + args=dict( + argstr="%s", + ), + brightObjects=dict( + argstr="--brightObjects ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + faceConnected=dict( + argstr="--faceConnected ", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + minimumObjectSize=dict( + argstr="--minimumObjectSize %d", + ), + numberOfBins=dict( + argstr="--numberOfBins %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = OtsuThresholdSegmentation.input_spec() @@ -21,7 +42,12 @@ def test_OtsuThresholdSegmentation_inputs(): def test_OtsuThresholdSegmentation_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = OtsuThresholdSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py index a95967feca..2f47b3bd16 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py @@ -4,12 +4,30 @@ def test_ResampleScalarVolume_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-2,), - OutputVolume=dict(argstr="%s", hash_files=False, position=-1,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - interpolation=dict(argstr="--interpolation %s",), - spacing=dict(argstr="--spacing %s", sep=",",), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + interpolation=dict( + argstr="--interpolation %s", + ), + spacing=dict( + argstr="--spacing %s", + sep=",", + ), ) inputs = ResampleScalarVolume.input_spec() @@ -19,7 +37,12 @@ def test_ResampleScalarVolume_inputs(): def test_ResampleScalarVolume_outputs(): - output_map = dict(OutputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = ResampleScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py index ea4b5eda53..3e8aba0e4d 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py @@ -4,23 +4,61 @@ def test_RigidRegistration_inputs(): input_map = dict( - FixedImageFileName=dict(argstr="%s", extensions=None, position=-2,), - MovingImageFileName=dict(argstr="%s", extensions=None, position=-1,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fixedsmoothingfactor=dict(argstr="--fixedsmoothingfactor %d",), - histogrambins=dict(argstr="--histogrambins %d",), - initialtransform=dict(argstr="--initialtransform %s", extensions=None,), - iterations=dict(argstr="--iterations %s", sep=",",), - learningrate=dict(argstr="--learningrate %s", sep=",",), - movingsmoothingfactor=dict(argstr="--movingsmoothingfactor %d",), - outputtransform=dict(argstr="--outputtransform %s", hash_files=False,), + FixedImageFileName=dict( + argstr="%s", + extensions=None, + position=-2, + ), + MovingImageFileName=dict( + argstr="%s", + extensions=None, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedsmoothingfactor=dict( + argstr="--fixedsmoothingfactor %d", + ), + histogrambins=dict( + argstr="--histogrambins %d", + ), + initialtransform=dict( + argstr="--initialtransform %s", + extensions=None, + ), + iterations=dict( + argstr="--iterations %s", + sep=",", + ), + learningrate=dict( + argstr="--learningrate %s", + sep=",", + ), + movingsmoothingfactor=dict( + argstr="--movingsmoothingfactor %d", + ), + outputtransform=dict( + argstr="--outputtransform %s", + hash_files=False, + ), resampledmovingfilename=dict( - argstr="--resampledmovingfilename %s", hash_files=False, + argstr="--resampledmovingfilename %s", + hash_files=False, + ), + spatialsamples=dict( + argstr="--spatialsamples %d", + ), + testingmode=dict( + argstr="--testingmode ", + ), + translationscale=dict( + argstr="--translationscale %f", ), - spatialsamples=dict(argstr="--spatialsamples %d",), - testingmode=dict(argstr="--testingmode ",), - translationscale=dict(argstr="--translationscale %f",), ) inputs = RigidRegistration.input_spec() @@ -31,8 +69,12 @@ def test_RigidRegistration_inputs(): def test_RigidRegistration_outputs(): output_map = dict( - outputtransform=dict(extensions=None,), - resampledmovingfilename=dict(extensions=None,), + outputtransform=dict( + extensions=None, + ), + resampledmovingfilename=dict( + extensions=None, + ), ) outputs = RigidRegistration.output_spec() diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py index 512991571e..6bc91e4d5e 100644 --- a/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py @@ -4,15 +4,43 @@ def test_IntensityDifferenceMetric_inputs(): input_map = dict( - args=dict(argstr="%s",), - baselineSegmentationVolume=dict(argstr="%s", extensions=None, position=-3,), - baselineVolume=dict(argstr="%s", extensions=None, position=-4,), - changingBandSize=dict(argstr="--changingBandSize %d",), - environ=dict(nohash=True, usedefault=True,), - followupVolume=dict(argstr="%s", extensions=None, position=-2,), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), - reportFileName=dict(argstr="--reportFileName %s", hash_files=False,), - sensitivityThreshold=dict(argstr="--sensitivityThreshold %f",), + args=dict( + argstr="%s", + ), + baselineSegmentationVolume=dict( + argstr="%s", + extensions=None, + position=-3, + ), + baselineVolume=dict( + argstr="%s", + extensions=None, + position=-4, + ), + changingBandSize=dict( + argstr="--changingBandSize %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + followupVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + reportFileName=dict( + argstr="--reportFileName %s", + hash_files=False, + ), + sensitivityThreshold=dict( + argstr="--sensitivityThreshold %f", + ), ) inputs = IntensityDifferenceMetric.input_spec() @@ -23,8 +51,13 @@ def test_IntensityDifferenceMetric_inputs(): def test_IntensityDifferenceMetric_outputs(): output_map = dict( - outputVolume=dict(extensions=None, position=-1,), - reportFileName=dict(extensions=None,), + outputVolume=dict( + extensions=None, + position=-1, + ), + reportFileName=dict( + extensions=None, + ), ) outputs = IntensityDifferenceMetric.output_spec() diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py index 61ee94ec6e..aec22b541f 100644 --- a/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py @@ -4,18 +4,47 @@ def test_PETStandardUptakeValueComputation_inputs(): input_map = dict( - OutputLabel=dict(argstr="--OutputLabel %s",), - OutputLabelValue=dict(argstr="--OutputLabelValue %s",), - SUVMax=dict(argstr="--SUVMax %s",), - SUVMean=dict(argstr="--SUVMean %s",), - SUVMin=dict(argstr="--SUVMin %s",), - args=dict(argstr="%s",), - color=dict(argstr="--color %s", extensions=None,), - csvFile=dict(argstr="--csvFile %s", hash_files=False,), - environ=dict(nohash=True, usedefault=True,), - labelMap=dict(argstr="--labelMap %s", extensions=None,), - petDICOMPath=dict(argstr="--petDICOMPath %s",), - petVolume=dict(argstr="--petVolume %s", extensions=None,), + OutputLabel=dict( + argstr="--OutputLabel %s", + ), + OutputLabelValue=dict( + argstr="--OutputLabelValue %s", + ), + SUVMax=dict( + argstr="--SUVMax %s", + ), + SUVMean=dict( + argstr="--SUVMean %s", + ), + SUVMin=dict( + argstr="--SUVMin %s", + ), + args=dict( + argstr="%s", + ), + color=dict( + argstr="--color %s", + extensions=None, + ), + csvFile=dict( + argstr="--csvFile %s", + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + labelMap=dict( + argstr="--labelMap %s", + extensions=None, + ), + petDICOMPath=dict( + argstr="--petDICOMPath %s", + ), + petVolume=dict( + argstr="--petVolume %s", + extensions=None, + ), ) inputs = PETStandardUptakeValueComputation.input_spec() @@ -25,7 +54,11 @@ def test_PETStandardUptakeValueComputation_inputs(): def test_PETStandardUptakeValueComputation_outputs(): - output_map = dict(csvFile=dict(extensions=None,),) + output_map = dict( + csvFile=dict( + extensions=None, + ), + ) outputs = PETStandardUptakeValueComputation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py index e36498110b..58c7c49f32 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py @@ -4,12 +4,26 @@ def test_ACPCTransform_inputs(): input_map = dict( - acpc=dict(argstr="--acpc %s...",), - args=dict(argstr="%s",), - debugSwitch=dict(argstr="--debugSwitch ",), - environ=dict(nohash=True, usedefault=True,), - midline=dict(argstr="--midline %s...",), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), + acpc=dict( + argstr="--acpc %s...", + ), + args=dict( + argstr="%s", + ), + debugSwitch=dict( + argstr="--debugSwitch ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + midline=dict( + argstr="--midline %s...", + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, + ), ) inputs = ACPCTransform.input_spec() @@ -19,7 +33,11 @@ def test_ACPCTransform_inputs(): def test_ACPCTransform_outputs(): - output_map = dict(outputTransform=dict(extensions=None,),) + output_map = dict( + outputTransform=dict( + extensions=None, + ), + ) outputs = ACPCTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py index 4136c8105d..d1c8055df3 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py @@ -4,64 +4,149 @@ def test_BRAINSDemonWarp_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), arrayOfPyramidLevelIterations=dict( - argstr="--arrayOfPyramidLevelIterations %s", sep=",", + argstr="--arrayOfPyramidLevelIterations %s", + sep=",", + ), + backgroundFillValue=dict( + argstr="--backgroundFillValue %d", ), - backgroundFillValue=dict(argstr="--backgroundFillValue %d",), checkerboardPatternSubdivisions=dict( - argstr="--checkerboardPatternSubdivisions %s", sep=",", - ), - environ=dict(nohash=True, usedefault=True,), - fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None,), - fixedVolume=dict(argstr="--fixedVolume %s", extensions=None,), - gradient_type=dict(argstr="--gradient_type %s",), - gui=dict(argstr="--gui ",), - histogramMatch=dict(argstr="--histogramMatch ",), + argstr="--checkerboardPatternSubdivisions %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedBinaryVolume=dict( + argstr="--fixedBinaryVolume %s", + extensions=None, + ), + fixedVolume=dict( + argstr="--fixedVolume %s", + extensions=None, + ), + gradient_type=dict( + argstr="--gradient_type %s", + ), + gui=dict( + argstr="--gui ", + ), + histogramMatch=dict( + argstr="--histogramMatch ", + ), initializeWithDisplacementField=dict( - argstr="--initializeWithDisplacementField %s", extensions=None, + argstr="--initializeWithDisplacementField %s", + extensions=None, ), initializeWithTransform=dict( - argstr="--initializeWithTransform %s", extensions=None, - ), - inputPixelType=dict(argstr="--inputPixelType %s",), - interpolationMode=dict(argstr="--interpolationMode %s",), - lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d",), - maskProcessingMode=dict(argstr="--maskProcessingMode %s",), - max_step_length=dict(argstr="--max_step_length %f",), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), - minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=",",), - minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=",",), - movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None,), - movingVolume=dict(argstr="--movingVolume %s", extensions=None,), - neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=",",), + argstr="--initializeWithTransform %s", + extensions=None, + ), + inputPixelType=dict( + argstr="--inputPixelType %s", + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + lowerThresholdForBOBF=dict( + argstr="--lowerThresholdForBOBF %d", + ), + maskProcessingMode=dict( + argstr="--maskProcessingMode %s", + ), + max_step_length=dict( + argstr="--max_step_length %f", + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), + minimumFixedPyramid=dict( + argstr="--minimumFixedPyramid %s", + sep=",", + ), + minimumMovingPyramid=dict( + argstr="--minimumMovingPyramid %s", + sep=",", + ), + movingBinaryVolume=dict( + argstr="--movingBinaryVolume %s", + extensions=None, + ), + movingVolume=dict( + argstr="--movingVolume %s", + extensions=None, + ), + neighborhoodForBOBF=dict( + argstr="--neighborhoodForBOBF %s", + sep=",", + ), numberOfBCHApproximationTerms=dict( argstr="--numberOfBCHApproximationTerms %d", ), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), - numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + numberOfPyramidLevels=dict( + argstr="--numberOfPyramidLevels %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), outputCheckerboardVolume=dict( - argstr="--outputCheckerboardVolume %s", hash_files=False, + argstr="--outputCheckerboardVolume %s", + hash_files=False, + ), + outputDebug=dict( + argstr="--outputDebug ", ), - outputDebug=dict(argstr="--outputDebug ",), outputDisplacementFieldPrefix=dict( argstr="--outputDisplacementFieldPrefix %s", ), outputDisplacementFieldVolume=dict( - argstr="--outputDisplacementFieldVolume %s", hash_files=False, - ), - outputNormalized=dict(argstr="--outputNormalized ",), - outputPixelType=dict(argstr="--outputPixelType %s",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - promptUser=dict(argstr="--promptUser ",), - registrationFilterType=dict(argstr="--registrationFilterType %s",), - seedForBOBF=dict(argstr="--seedForBOBF %s", sep=",",), - smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f",), - upFieldSmoothing=dict(argstr="--upFieldSmoothing %f",), - upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d",), - use_vanilla_dem=dict(argstr="--use_vanilla_dem ",), + argstr="--outputDisplacementFieldVolume %s", + hash_files=False, + ), + outputNormalized=dict( + argstr="--outputNormalized ", + ), + outputPixelType=dict( + argstr="--outputPixelType %s", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + promptUser=dict( + argstr="--promptUser ", + ), + registrationFilterType=dict( + argstr="--registrationFilterType %s", + ), + seedForBOBF=dict( + argstr="--seedForBOBF %s", + sep=",", + ), + smoothDisplacementFieldSigma=dict( + argstr="--smoothDisplacementFieldSigma %f", + ), + upFieldSmoothing=dict( + argstr="--upFieldSmoothing %f", + ), + upperThresholdForBOBF=dict( + argstr="--upperThresholdForBOBF %d", + ), + use_vanilla_dem=dict( + argstr="--use_vanilla_dem ", + ), ) inputs = BRAINSDemonWarp.input_spec() @@ -72,9 +157,15 @@ def test_BRAINSDemonWarp_inputs(): def test_BRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(extensions=None,), - outputDisplacementFieldVolume=dict(extensions=None,), - outputVolume=dict(extensions=None,), + outputCheckerboardVolume=dict( + extensions=None, + ), + outputDisplacementFieldVolume=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = BRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py index 9d558f1e68..0d7b124635 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py @@ -13,78 +13,205 @@ def test_BRAINSFit_inputs(): NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02=dict( argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 ", ), - ROIAutoClosingSize=dict(argstr="--ROIAutoClosingSize %f",), - ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f",), - args=dict(argstr="%s",), - backgroundFillValue=dict(argstr="--backgroundFillValue %f",), - bsplineTransform=dict(argstr="--bsplineTransform %s", hash_files=False,), + ROIAutoClosingSize=dict( + argstr="--ROIAutoClosingSize %f", + ), + ROIAutoDilateSize=dict( + argstr="--ROIAutoDilateSize %f", + ), + args=dict( + argstr="%s", + ), + backgroundFillValue=dict( + argstr="--backgroundFillValue %f", + ), + bsplineTransform=dict( + argstr="--bsplineTransform %s", + hash_files=False, + ), costFunctionConvergenceFactor=dict( argstr="--costFunctionConvergenceFactor %f", ), - costMetric=dict(argstr="--costMetric %s",), - debugLevel=dict(argstr="--debugLevel %d",), - environ=dict(nohash=True, usedefault=True,), - failureExitCode=dict(argstr="--failureExitCode %d",), - fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None,), - fixedVolume=dict(argstr="--fixedVolume %s", extensions=None,), - fixedVolumeTimeIndex=dict(argstr="--fixedVolumeTimeIndex %d",), - forceMINumberOfThreads=dict(argstr="--forceMINumberOfThreads %d",), - gui=dict(argstr="--gui ",), - histogramMatch=dict(argstr="--histogramMatch ",), - initialTransform=dict(argstr="--initialTransform %s", extensions=None,), - initializeTransformMode=dict(argstr="--initializeTransformMode %s",), - interpolationMode=dict(argstr="--interpolationMode %s",), - linearTransform=dict(argstr="--linearTransform %s", hash_files=False,), - maskInferiorCutOffFromCenter=dict(argstr="--maskInferiorCutOffFromCenter %f",), - maskProcessingMode=dict(argstr="--maskProcessingMode %s",), - maxBSplineDisplacement=dict(argstr="--maxBSplineDisplacement %f",), - maximumStepLength=dict(argstr="--maximumStepLength %f",), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), - minimumStepLength=dict(argstr="--minimumStepLength %s", sep=",",), - movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None,), - movingVolume=dict(argstr="--movingVolume %s", extensions=None,), - movingVolumeTimeIndex=dict(argstr="--movingVolumeTimeIndex %d",), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), - numberOfIterations=dict(argstr="--numberOfIterations %s", sep=",",), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), - numberOfSamples=dict(argstr="--numberOfSamples %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), + costMetric=dict( + argstr="--costMetric %s", + ), + debugLevel=dict( + argstr="--debugLevel %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + failureExitCode=dict( + argstr="--failureExitCode %d", + ), + fixedBinaryVolume=dict( + argstr="--fixedBinaryVolume %s", + extensions=None, + ), + fixedVolume=dict( + argstr="--fixedVolume %s", + extensions=None, + ), + fixedVolumeTimeIndex=dict( + argstr="--fixedVolumeTimeIndex %d", + ), + forceMINumberOfThreads=dict( + argstr="--forceMINumberOfThreads %d", + ), + gui=dict( + argstr="--gui ", + ), + histogramMatch=dict( + argstr="--histogramMatch ", + ), + initialTransform=dict( + argstr="--initialTransform %s", + extensions=None, + ), + initializeTransformMode=dict( + argstr="--initializeTransformMode %s", + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + linearTransform=dict( + argstr="--linearTransform %s", + hash_files=False, + ), + maskInferiorCutOffFromCenter=dict( + argstr="--maskInferiorCutOffFromCenter %f", + ), + maskProcessingMode=dict( + argstr="--maskProcessingMode %s", + ), + maxBSplineDisplacement=dict( + argstr="--maxBSplineDisplacement %f", + ), + maximumStepLength=dict( + argstr="--maximumStepLength %f", + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), + minimumStepLength=dict( + argstr="--minimumStepLength %s", + sep=",", + ), + movingBinaryVolume=dict( + argstr="--movingBinaryVolume %s", + extensions=None, + ), + movingVolume=dict( + argstr="--movingVolume %s", + extensions=None, + ), + movingVolumeTimeIndex=dict( + argstr="--movingVolumeTimeIndex %d", + ), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfIterations=dict( + argstr="--numberOfIterations %s", + sep=",", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + numberOfSamples=dict( + argstr="--numberOfSamples %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), outputFixedVolumeROI=dict( - argstr="--outputFixedVolumeROI %s", hash_files=False, + argstr="--outputFixedVolumeROI %s", + hash_files=False, ), outputMovingVolumeROI=dict( - argstr="--outputMovingVolumeROI %s", hash_files=False, - ), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False,), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - outputVolumePixelType=dict(argstr="--outputVolumePixelType %s",), - permitParameterVariation=dict(argstr="--permitParameterVariation %s", sep=",",), - projectedGradientTolerance=dict(argstr="--projectedGradientTolerance %f",), - promptUser=dict(argstr="--promptUser ",), - relaxationFactor=dict(argstr="--relaxationFactor %f",), - removeIntensityOutliers=dict(argstr="--removeIntensityOutliers %f",), - reproportionScale=dict(argstr="--reproportionScale %f",), - scaleOutputValues=dict(argstr="--scaleOutputValues ",), - skewScale=dict(argstr="--skewScale %f",), - splineGridSize=dict(argstr="--splineGridSize %s", sep=",",), + argstr="--outputMovingVolumeROI %s", + hash_files=False, + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + outputVolumePixelType=dict( + argstr="--outputVolumePixelType %s", + ), + permitParameterVariation=dict( + argstr="--permitParameterVariation %s", + sep=",", + ), + projectedGradientTolerance=dict( + argstr="--projectedGradientTolerance %f", + ), + promptUser=dict( + argstr="--promptUser ", + ), + relaxationFactor=dict( + argstr="--relaxationFactor %f", + ), + removeIntensityOutliers=dict( + argstr="--removeIntensityOutliers %f", + ), + reproportionScale=dict( + argstr="--reproportionScale %f", + ), + scaleOutputValues=dict( + argstr="--scaleOutputValues ", + ), + skewScale=dict( + argstr="--skewScale %f", + ), + splineGridSize=dict( + argstr="--splineGridSize %s", + sep=",", + ), strippedOutputTransform=dict( - argstr="--strippedOutputTransform %s", hash_files=False, + argstr="--strippedOutputTransform %s", + hash_files=False, + ), + transformType=dict( + argstr="--transformType %s", + sep=",", + ), + translationScale=dict( + argstr="--translationScale %f", + ), + useAffine=dict( + argstr="--useAffine ", + ), + useBSpline=dict( + argstr="--useBSpline ", ), - transformType=dict(argstr="--transformType %s", sep=",",), - translationScale=dict(argstr="--translationScale %f",), - useAffine=dict(argstr="--useAffine ",), - useBSpline=dict(argstr="--useBSpline ",), useCachingOfBSplineWeightsMode=dict( argstr="--useCachingOfBSplineWeightsMode %s", ), useExplicitPDFDerivativesMode=dict( argstr="--useExplicitPDFDerivativesMode %s", ), - useRigid=dict(argstr="--useRigid ",), - useScaleSkewVersor3D=dict(argstr="--useScaleSkewVersor3D ",), - useScaleVersor3D=dict(argstr="--useScaleVersor3D ",), - writeOutputTransformInFloat=dict(argstr="--writeOutputTransformInFloat ",), - writeTransformOnFailure=dict(argstr="--writeTransformOnFailure ",), + useRigid=dict( + argstr="--useRigid ", + ), + useScaleSkewVersor3D=dict( + argstr="--useScaleSkewVersor3D ", + ), + useScaleVersor3D=dict( + argstr="--useScaleVersor3D ", + ), + writeOutputTransformInFloat=dict( + argstr="--writeOutputTransformInFloat ", + ), + writeTransformOnFailure=dict( + argstr="--writeTransformOnFailure ", + ), ) inputs = BRAINSFit.input_spec() @@ -95,13 +222,27 @@ def test_BRAINSFit_inputs(): def test_BRAINSFit_outputs(): output_map = dict( - bsplineTransform=dict(extensions=None,), - linearTransform=dict(extensions=None,), - outputFixedVolumeROI=dict(extensions=None,), - outputMovingVolumeROI=dict(extensions=None,), - outputTransform=dict(extensions=None,), - outputVolume=dict(extensions=None,), - strippedOutputTransform=dict(extensions=None,), + bsplineTransform=dict( + extensions=None, + ), + linearTransform=dict( + extensions=None, + ), + outputFixedVolumeROI=dict( + extensions=None, + ), + outputMovingVolumeROI=dict( + extensions=None, + ), + outputTransform=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), + strippedOutputTransform=dict( + extensions=None, + ), ) outputs = BRAINSFit.output_spec() diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py index 2d94f19a5f..46d175da07 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py @@ -4,19 +4,52 @@ def test_BRAINSResample_inputs(): input_map = dict( - args=dict(argstr="%s",), - defaultValue=dict(argstr="--defaultValue %f",), - deformationVolume=dict(argstr="--deformationVolume %s", extensions=None,), - environ=dict(nohash=True, usedefault=True,), - gridSpacing=dict(argstr="--gridSpacing %s", sep=",",), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - interpolationMode=dict(argstr="--interpolationMode %s",), - inverseTransform=dict(argstr="--inverseTransform ",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - pixelType=dict(argstr="--pixelType %s",), - referenceVolume=dict(argstr="--referenceVolume %s", extensions=None,), - warpTransform=dict(argstr="--warpTransform %s", extensions=None,), + args=dict( + argstr="%s", + ), + defaultValue=dict( + argstr="--defaultValue %f", + ), + deformationVolume=dict( + argstr="--deformationVolume %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gridSpacing=dict( + argstr="--gridSpacing %s", + sep=",", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + inverseTransform=dict( + argstr="--inverseTransform ", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + pixelType=dict( + argstr="--pixelType %s", + ), + referenceVolume=dict( + argstr="--referenceVolume %s", + extensions=None, + ), + warpTransform=dict( + argstr="--warpTransform %s", + extensions=None, + ), ) inputs = BRAINSResample.input_spec() @@ -26,7 +59,11 @@ def test_BRAINSResample_inputs(): def test_BRAINSResample_outputs(): - output_map = dict(outputVolume=dict(extensions=None,),) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py index 65c6016db6..6b511790c7 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py @@ -4,14 +4,32 @@ def test_FiducialRegistration_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fixedLandmarks=dict(argstr="--fixedLandmarks %s...",), - movingLandmarks=dict(argstr="--movingLandmarks %s...",), - outputMessage=dict(argstr="--outputMessage %s",), - rms=dict(argstr="--rms %f",), - saveTransform=dict(argstr="--saveTransform %s", hash_files=False,), - transformType=dict(argstr="--transformType %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedLandmarks=dict( + argstr="--fixedLandmarks %s...", + ), + movingLandmarks=dict( + argstr="--movingLandmarks %s...", + ), + outputMessage=dict( + argstr="--outputMessage %s", + ), + rms=dict( + argstr="--rms %f", + ), + saveTransform=dict( + argstr="--saveTransform %s", + hash_files=False, + ), + transformType=dict( + argstr="--transformType %s", + ), ) inputs = FiducialRegistration.input_spec() @@ -21,7 +39,11 @@ def test_FiducialRegistration_inputs(): def test_FiducialRegistration_outputs(): - output_map = dict(saveTransform=dict(extensions=None,),) + output_map = dict( + saveTransform=dict( + extensions=None, + ), + ) outputs = FiducialRegistration.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py index 098360c1e3..af8bac8680 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -4,65 +4,151 @@ def test_VBRAINSDemonWarp_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), arrayOfPyramidLevelIterations=dict( - argstr="--arrayOfPyramidLevelIterations %s", sep=",", + argstr="--arrayOfPyramidLevelIterations %s", + sep=",", + ), + backgroundFillValue=dict( + argstr="--backgroundFillValue %d", ), - backgroundFillValue=dict(argstr="--backgroundFillValue %d",), checkerboardPatternSubdivisions=dict( - argstr="--checkerboardPatternSubdivisions %s", sep=",", - ), - environ=dict(nohash=True, usedefault=True,), - fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None,), - fixedVolume=dict(argstr="--fixedVolume %s...",), - gradient_type=dict(argstr="--gradient_type %s",), - gui=dict(argstr="--gui ",), - histogramMatch=dict(argstr="--histogramMatch ",), + argstr="--checkerboardPatternSubdivisions %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedBinaryVolume=dict( + argstr="--fixedBinaryVolume %s", + extensions=None, + ), + fixedVolume=dict( + argstr="--fixedVolume %s...", + ), + gradient_type=dict( + argstr="--gradient_type %s", + ), + gui=dict( + argstr="--gui ", + ), + histogramMatch=dict( + argstr="--histogramMatch ", + ), initializeWithDisplacementField=dict( - argstr="--initializeWithDisplacementField %s", extensions=None, + argstr="--initializeWithDisplacementField %s", + extensions=None, ), initializeWithTransform=dict( - argstr="--initializeWithTransform %s", extensions=None, - ), - inputPixelType=dict(argstr="--inputPixelType %s",), - interpolationMode=dict(argstr="--interpolationMode %s",), - lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d",), - makeBOBF=dict(argstr="--makeBOBF ",), - max_step_length=dict(argstr="--max_step_length %f",), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=",",), - minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=",",), - minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=",",), - movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None,), - movingVolume=dict(argstr="--movingVolume %s...",), - neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=",",), + argstr="--initializeWithTransform %s", + extensions=None, + ), + inputPixelType=dict( + argstr="--inputPixelType %s", + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + lowerThresholdForBOBF=dict( + argstr="--lowerThresholdForBOBF %d", + ), + makeBOBF=dict( + argstr="--makeBOBF ", + ), + max_step_length=dict( + argstr="--max_step_length %f", + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), + minimumFixedPyramid=dict( + argstr="--minimumFixedPyramid %s", + sep=",", + ), + minimumMovingPyramid=dict( + argstr="--minimumMovingPyramid %s", + sep=",", + ), + movingBinaryVolume=dict( + argstr="--movingBinaryVolume %s", + extensions=None, + ), + movingVolume=dict( + argstr="--movingVolume %s...", + ), + neighborhoodForBOBF=dict( + argstr="--neighborhoodForBOBF %s", + sep=",", + ), numberOfBCHApproximationTerms=dict( argstr="--numberOfBCHApproximationTerms %d", ), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d",), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d",), - numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d",), - numberOfThreads=dict(argstr="--numberOfThreads %d",), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + numberOfPyramidLevels=dict( + argstr="--numberOfPyramidLevels %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), outputCheckerboardVolume=dict( - argstr="--outputCheckerboardVolume %s", hash_files=False, + argstr="--outputCheckerboardVolume %s", + hash_files=False, + ), + outputDebug=dict( + argstr="--outputDebug ", ), - outputDebug=dict(argstr="--outputDebug ",), outputDisplacementFieldPrefix=dict( argstr="--outputDisplacementFieldPrefix %s", ), outputDisplacementFieldVolume=dict( - argstr="--outputDisplacementFieldVolume %s", hash_files=False, - ), - outputNormalized=dict(argstr="--outputNormalized ",), - outputPixelType=dict(argstr="--outputPixelType %s",), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False,), - promptUser=dict(argstr="--promptUser ",), - registrationFilterType=dict(argstr="--registrationFilterType %s",), - seedForBOBF=dict(argstr="--seedForBOBF %s", sep=",",), - smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f",), - upFieldSmoothing=dict(argstr="--upFieldSmoothing %f",), - upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d",), - use_vanilla_dem=dict(argstr="--use_vanilla_dem ",), - weightFactors=dict(argstr="--weightFactors %s", sep=",",), + argstr="--outputDisplacementFieldVolume %s", + hash_files=False, + ), + outputNormalized=dict( + argstr="--outputNormalized ", + ), + outputPixelType=dict( + argstr="--outputPixelType %s", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + promptUser=dict( + argstr="--promptUser ", + ), + registrationFilterType=dict( + argstr="--registrationFilterType %s", + ), + seedForBOBF=dict( + argstr="--seedForBOBF %s", + sep=",", + ), + smoothDisplacementFieldSigma=dict( + argstr="--smoothDisplacementFieldSigma %f", + ), + upFieldSmoothing=dict( + argstr="--upFieldSmoothing %f", + ), + upperThresholdForBOBF=dict( + argstr="--upperThresholdForBOBF %d", + ), + use_vanilla_dem=dict( + argstr="--use_vanilla_dem ", + ), + weightFactors=dict( + argstr="--weightFactors %s", + sep=",", + ), ) inputs = VBRAINSDemonWarp.input_spec() @@ -73,9 +159,15 @@ def test_VBRAINSDemonWarp_inputs(): def test_VBRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(extensions=None,), - outputDisplacementFieldVolume=dict(extensions=None,), - outputVolume=dict(extensions=None,), + outputCheckerboardVolume=dict( + extensions=None, + ), + outputDisplacementFieldVolume=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = VBRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py index c88798ec2d..8990caaf1a 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -4,19 +4,43 @@ def test_BRAINSROIAuto_inputs(): input_map = dict( - ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f",), - args=dict(argstr="%s",), - closingSize=dict(argstr="--closingSize %f",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="--inputVolume %s", extensions=None,), - numberOfThreads=dict(argstr="--numberOfThreads %d",), - otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f",), + ROIAutoDilateSize=dict( + argstr="--ROIAutoDilateSize %f", + ), + args=dict( + argstr="%s", + ), + closingSize=dict( + argstr="--closingSize %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + otsuPercentileThreshold=dict( + argstr="--otsuPercentileThreshold %f", + ), outputClippedVolumeROI=dict( - argstr="--outputClippedVolumeROI %s", hash_files=False, + argstr="--outputClippedVolumeROI %s", + hash_files=False, + ), + outputROIMaskVolume=dict( + argstr="--outputROIMaskVolume %s", + hash_files=False, + ), + outputVolumePixelType=dict( + argstr="--outputVolumePixelType %s", + ), + thresholdCorrectionFactor=dict( + argstr="--thresholdCorrectionFactor %f", ), - outputROIMaskVolume=dict(argstr="--outputROIMaskVolume %s", hash_files=False,), - outputVolumePixelType=dict(argstr="--outputVolumePixelType %s",), - thresholdCorrectionFactor=dict(argstr="--thresholdCorrectionFactor %f",), ) inputs = BRAINSROIAuto.input_spec() @@ -27,8 +51,12 @@ def test_BRAINSROIAuto_inputs(): def test_BRAINSROIAuto_outputs(): output_map = dict( - outputClippedVolumeROI=dict(extensions=None,), - outputROIMaskVolume=dict(extensions=None,), + outputClippedVolumeROI=dict( + extensions=None, + ), + outputROIMaskVolume=dict( + extensions=None, + ), ) outputs = BRAINSROIAuto.output_spec() diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py index 241f58c6c0..2ed2595d4e 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py @@ -4,37 +4,81 @@ def test_EMSegmentCommandLine_inputs(): input_map = dict( - args=dict(argstr="%s",), - atlasVolumeFileNames=dict(argstr="--atlasVolumeFileNames %s...",), - disableCompression=dict(argstr="--disableCompression ",), - disableMultithreading=dict(argstr="--disableMultithreading %d",), - dontUpdateIntermediateData=dict(argstr="--dontUpdateIntermediateData %d",), - dontWriteResults=dict(argstr="--dontWriteResults ",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + atlasVolumeFileNames=dict( + argstr="--atlasVolumeFileNames %s...", + ), + disableCompression=dict( + argstr="--disableCompression ", + ), + disableMultithreading=dict( + argstr="--disableMultithreading %d", + ), + dontUpdateIntermediateData=dict( + argstr="--dontUpdateIntermediateData %d", + ), + dontWriteResults=dict( + argstr="--dontWriteResults ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), generateEmptyMRMLSceneAndQuit=dict( - argstr="--generateEmptyMRMLSceneAndQuit %s", hash_files=False, - ), - intermediateResultsDirectory=dict(argstr="--intermediateResultsDirectory %s",), - keepTempFiles=dict(argstr="--keepTempFiles ",), - loadAtlasNonCentered=dict(argstr="--loadAtlasNonCentered ",), - loadTargetCentered=dict(argstr="--loadTargetCentered ",), - mrmlSceneFileName=dict(argstr="--mrmlSceneFileName %s", extensions=None,), - parametersMRMLNodeName=dict(argstr="--parametersMRMLNodeName %s",), - registrationAffineType=dict(argstr="--registrationAffineType %d",), - registrationDeformableType=dict(argstr="--registrationDeformableType %d",), - registrationPackage=dict(argstr="--registrationPackage %s",), + argstr="--generateEmptyMRMLSceneAndQuit %s", + hash_files=False, + ), + intermediateResultsDirectory=dict( + argstr="--intermediateResultsDirectory %s", + ), + keepTempFiles=dict( + argstr="--keepTempFiles ", + ), + loadAtlasNonCentered=dict( + argstr="--loadAtlasNonCentered ", + ), + loadTargetCentered=dict( + argstr="--loadTargetCentered ", + ), + mrmlSceneFileName=dict( + argstr="--mrmlSceneFileName %s", + extensions=None, + ), + parametersMRMLNodeName=dict( + argstr="--parametersMRMLNodeName %s", + ), + registrationAffineType=dict( + argstr="--registrationAffineType %d", + ), + registrationDeformableType=dict( + argstr="--registrationDeformableType %d", + ), + registrationPackage=dict( + argstr="--registrationPackage %s", + ), resultMRMLSceneFileName=dict( - argstr="--resultMRMLSceneFileName %s", hash_files=False, + argstr="--resultMRMLSceneFileName %s", + hash_files=False, ), resultStandardVolumeFileName=dict( - argstr="--resultStandardVolumeFileName %s", extensions=None, + argstr="--resultStandardVolumeFileName %s", + extensions=None, ), resultVolumeFileName=dict( - argstr="--resultVolumeFileName %s", hash_files=False, + argstr="--resultVolumeFileName %s", + hash_files=False, + ), + targetVolumeFileNames=dict( + argstr="--targetVolumeFileNames %s...", + ), + taskPreProcessingSetting=dict( + argstr="--taskPreProcessingSetting %s", + ), + verbose=dict( + argstr="--verbose ", ), - targetVolumeFileNames=dict(argstr="--targetVolumeFileNames %s...",), - taskPreProcessingSetting=dict(argstr="--taskPreProcessingSetting %s",), - verbose=dict(argstr="--verbose ",), ) inputs = EMSegmentCommandLine.input_spec() @@ -45,9 +89,15 @@ def test_EMSegmentCommandLine_inputs(): def test_EMSegmentCommandLine_outputs(): output_map = dict( - generateEmptyMRMLSceneAndQuit=dict(extensions=None,), - resultMRMLSceneFileName=dict(extensions=None,), - resultVolumeFileName=dict(extensions=None,), + generateEmptyMRMLSceneAndQuit=dict( + extensions=None, + ), + resultMRMLSceneFileName=dict( + extensions=None, + ), + resultVolumeFileName=dict( + extensions=None, + ), ) outputs = EMSegmentCommandLine.output_spec() diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py index deed609acb..4bd05c6fc2 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py @@ -4,16 +4,43 @@ def test_RobustStatisticsSegmenter_inputs(): input_map = dict( - args=dict(argstr="%s",), - curvatureWeight=dict(argstr="--curvatureWeight %f",), - environ=dict(nohash=True, usedefault=True,), - expectedVolume=dict(argstr="--expectedVolume %f",), - intensityHomogeneity=dict(argstr="--intensityHomogeneity %f",), - labelImageFileName=dict(argstr="%s", extensions=None, position=-2,), - labelValue=dict(argstr="--labelValue %d",), - maxRunningTime=dict(argstr="--maxRunningTime %f",), - originalImageFileName=dict(argstr="%s", extensions=None, position=-3,), - segmentedImageFileName=dict(argstr="%s", hash_files=False, position=-1,), + args=dict( + argstr="%s", + ), + curvatureWeight=dict( + argstr="--curvatureWeight %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expectedVolume=dict( + argstr="--expectedVolume %f", + ), + intensityHomogeneity=dict( + argstr="--intensityHomogeneity %f", + ), + labelImageFileName=dict( + argstr="%s", + extensions=None, + position=-2, + ), + labelValue=dict( + argstr="--labelValue %d", + ), + maxRunningTime=dict( + argstr="--maxRunningTime %f", + ), + originalImageFileName=dict( + argstr="%s", + extensions=None, + position=-3, + ), + segmentedImageFileName=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = RobustStatisticsSegmenter.input_spec() @@ -23,7 +50,12 @@ def test_RobustStatisticsSegmenter_inputs(): def test_RobustStatisticsSegmenter_outputs(): - output_map = dict(segmentedImageFileName=dict(extensions=None, position=-1,),) + output_map = dict( + segmentedImageFileName=dict( + extensions=None, + position=-1, + ), + ) outputs = RobustStatisticsSegmenter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py index 9da82507cc..9a308ec959 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py @@ -4,17 +4,44 @@ def test_SimpleRegionGrowingSegmentation_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - iterations=dict(argstr="--iterations %d",), - labelvalue=dict(argstr="--labelvalue %d",), - multiplier=dict(argstr="--multiplier %f",), - neighborhood=dict(argstr="--neighborhood %d",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), - seed=dict(argstr="--seed %s...",), - smoothingIterations=dict(argstr="--smoothingIterations %d",), - timestep=dict(argstr="--timestep %f",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + iterations=dict( + argstr="--iterations %d", + ), + labelvalue=dict( + argstr="--labelvalue %d", + ), + multiplier=dict( + argstr="--multiplier %f", + ), + neighborhood=dict( + argstr="--neighborhood %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + seed=dict( + argstr="--seed %s...", + ), + smoothingIterations=dict( + argstr="--smoothingIterations %d", + ), + timestep=dict( + argstr="--timestep %f", + ), ) inputs = SimpleRegionGrowingSegmentation.input_spec() @@ -24,7 +51,12 @@ def test_SimpleRegionGrowingSegmentation_inputs(): def test_SimpleRegionGrowingSegmentation_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = SimpleRegionGrowingSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py b/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py index 6411e0ee54..b02dfd595d 100644 --- a/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py +++ b/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py @@ -4,15 +4,35 @@ def test_DicomToNrrdConverter_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputDicomDirectory=dict(argstr="--inputDicomDirectory %s",), - outputDirectory=dict(argstr="--outputDirectory %s", hash_files=False,), - outputVolume=dict(argstr="--outputVolume %s",), - smallGradientThreshold=dict(argstr="--smallGradientThreshold %f",), - useBMatrixGradientDirections=dict(argstr="--useBMatrixGradientDirections ",), - useIdentityMeaseurementFrame=dict(argstr="--useIdentityMeaseurementFrame ",), - writeProtocolGradientsFile=dict(argstr="--writeProtocolGradientsFile ",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputDicomDirectory=dict( + argstr="--inputDicomDirectory %s", + ), + outputDirectory=dict( + argstr="--outputDirectory %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + ), + smallGradientThreshold=dict( + argstr="--smallGradientThreshold %f", + ), + useBMatrixGradientDirections=dict( + argstr="--useBMatrixGradientDirections ", + ), + useIdentityMeaseurementFrame=dict( + argstr="--useIdentityMeaseurementFrame ", + ), + writeProtocolGradientsFile=dict( + argstr="--writeProtocolGradientsFile ", + ), ) inputs = DicomToNrrdConverter.input_spec() @@ -22,7 +42,9 @@ def test_DicomToNrrdConverter_inputs(): def test_DicomToNrrdConverter_outputs(): - output_map = dict(outputDirectory=dict(),) + output_map = dict( + outputDirectory=dict(), + ) outputs = DicomToNrrdConverter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py index d65723f4af..338fa49cae 100644 --- a/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py +++ b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py @@ -4,11 +4,24 @@ def test_EMSegmentTransformToNewFormat_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputMRMLFileName=dict(argstr="--inputMRMLFileName %s", extensions=None,), - outputMRMLFileName=dict(argstr="--outputMRMLFileName %s", hash_files=False,), - templateFlag=dict(argstr="--templateFlag ",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMRMLFileName=dict( + argstr="--inputMRMLFileName %s", + extensions=None, + ), + outputMRMLFileName=dict( + argstr="--outputMRMLFileName %s", + hash_files=False, + ), + templateFlag=dict( + argstr="--templateFlag ", + ), ) inputs = EMSegmentTransformToNewFormat.input_spec() @@ -18,7 +31,11 @@ def test_EMSegmentTransformToNewFormat_inputs(): def test_EMSegmentTransformToNewFormat_outputs(): - output_map = dict(outputMRMLFileName=dict(extensions=None,),) + output_map = dict( + outputMRMLFileName=dict( + extensions=None, + ), + ) outputs = EMSegmentTransformToNewFormat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py index eaaa00d788..8bab4bd963 100644 --- a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py @@ -4,16 +4,41 @@ def test_GrayscaleModelMaker_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-2,), - OutputGeometry=dict(argstr="%s", hash_files=False, position=-1,), - args=dict(argstr="%s",), - decimate=dict(argstr="--decimate %f",), - environ=dict(nohash=True, usedefault=True,), - name=dict(argstr="--name %s",), - pointnormals=dict(argstr="--pointnormals ",), - smooth=dict(argstr="--smooth %d",), - splitnormals=dict(argstr="--splitnormals ",), - threshold=dict(argstr="--threshold %f",), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputGeometry=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + decimate=dict( + argstr="--decimate %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + name=dict( + argstr="--name %s", + ), + pointnormals=dict( + argstr="--pointnormals ", + ), + smooth=dict( + argstr="--smooth %d", + ), + splitnormals=dict( + argstr="--splitnormals ", + ), + threshold=dict( + argstr="--threshold %f", + ), ) inputs = GrayscaleModelMaker.input_spec() @@ -23,7 +48,12 @@ def test_GrayscaleModelMaker_inputs(): def test_GrayscaleModelMaker_outputs(): - output_map = dict(OutputGeometry=dict(extensions=None, position=-1,),) + output_map = dict( + OutputGeometry=dict( + extensions=None, + position=-1, + ), + ) outputs = GrayscaleModelMaker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py index 5077a0f23c..3dab7b1498 100644 --- a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py +++ b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py @@ -4,14 +4,35 @@ def test_LabelMapSmoothing_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - gaussianSigma=dict(argstr="--gaussianSigma %f",), - inputVolume=dict(argstr="%s", extensions=None, position=-2,), - labelToSmooth=dict(argstr="--labelToSmooth %d",), - maxRMSError=dict(argstr="--maxRMSError %f",), - numberOfIterations=dict(argstr="--numberOfIterations %d",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gaussianSigma=dict( + argstr="--gaussianSigma %f", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + labelToSmooth=dict( + argstr="--labelToSmooth %d", + ), + maxRMSError=dict( + argstr="--maxRMSError %f", + ), + numberOfIterations=dict( + argstr="--numberOfIterations %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = LabelMapSmoothing.input_spec() @@ -21,7 +42,12 @@ def test_LabelMapSmoothing_inputs(): def test_LabelMapSmoothing_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = LabelMapSmoothing.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py index 43398aa45f..dc93147248 100644 --- a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py +++ b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py @@ -4,11 +4,28 @@ def test_MergeModels_inputs(): input_map = dict( - Model1=dict(argstr="%s", extensions=None, position=-3,), - Model2=dict(argstr="%s", extensions=None, position=-2,), - ModelOutput=dict(argstr="%s", hash_files=False, position=-1,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + Model1=dict( + argstr="%s", + extensions=None, + position=-3, + ), + Model2=dict( + argstr="%s", + extensions=None, + position=-2, + ), + ModelOutput=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), ) inputs = MergeModels.input_spec() @@ -18,7 +35,12 @@ def test_MergeModels_inputs(): def test_MergeModels_outputs(): - output_map = dict(ModelOutput=dict(extensions=None, position=-1,),) + output_map = dict( + ModelOutput=dict( + extensions=None, + position=-1, + ), + ) outputs = MergeModels.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py index 888b7e0477..905b1417e9 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py @@ -4,26 +4,72 @@ def test_ModelMaker_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-1,), - args=dict(argstr="%s",), - color=dict(argstr="--color %s", extensions=None,), - debug=dict(argstr="--debug ",), - decimate=dict(argstr="--decimate %f",), - end=dict(argstr="--end %d",), - environ=dict(nohash=True, usedefault=True,), - filtertype=dict(argstr="--filtertype %s",), - generateAll=dict(argstr="--generateAll ",), - jointsmooth=dict(argstr="--jointsmooth ",), - labels=dict(argstr="--labels %s", sep=",",), - modelSceneFile=dict(argstr="--modelSceneFile %s...", hash_files=False,), - name=dict(argstr="--name %s",), - pad=dict(argstr="--pad ",), - pointnormals=dict(argstr="--pointnormals ",), - saveIntermediateModels=dict(argstr="--saveIntermediateModels ",), - skipUnNamed=dict(argstr="--skipUnNamed ",), - smooth=dict(argstr="--smooth %d",), - splitnormals=dict(argstr="--splitnormals ",), - start=dict(argstr="--start %d",), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-1, + ), + args=dict( + argstr="%s", + ), + color=dict( + argstr="--color %s", + extensions=None, + ), + debug=dict( + argstr="--debug ", + ), + decimate=dict( + argstr="--decimate %f", + ), + end=dict( + argstr="--end %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + filtertype=dict( + argstr="--filtertype %s", + ), + generateAll=dict( + argstr="--generateAll ", + ), + jointsmooth=dict( + argstr="--jointsmooth ", + ), + labels=dict( + argstr="--labels %s", + sep=",", + ), + modelSceneFile=dict( + argstr="--modelSceneFile %s...", + hash_files=False, + ), + name=dict( + argstr="--name %s", + ), + pad=dict( + argstr="--pad ", + ), + pointnormals=dict( + argstr="--pointnormals ", + ), + saveIntermediateModels=dict( + argstr="--saveIntermediateModels ", + ), + skipUnNamed=dict( + argstr="--skipUnNamed ", + ), + smooth=dict( + argstr="--smooth %d", + ), + splitnormals=dict( + argstr="--splitnormals ", + ), + start=dict( + argstr="--start %d", + ), ) inputs = ModelMaker.input_spec() @@ -33,7 +79,9 @@ def test_ModelMaker_inputs(): def test_ModelMaker_outputs(): - output_map = dict(modelSceneFile=dict(),) + output_map = dict( + modelSceneFile=dict(), + ) outputs = ModelMaker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py index ad7c305824..8449c15fce 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py @@ -4,12 +4,31 @@ def test_ModelToLabelMap_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-3,), - OutputVolume=dict(argstr="%s", hash_files=False, position=-1,), - args=dict(argstr="%s",), - distance=dict(argstr="--distance %f",), - environ=dict(nohash=True, usedefault=True,), - surface=dict(argstr="%s", extensions=None, position=-2,), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-3, + ), + OutputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + distance=dict( + argstr="--distance %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + surface=dict( + argstr="%s", + extensions=None, + position=-2, + ), ) inputs = ModelToLabelMap.input_spec() @@ -19,7 +38,12 @@ def test_ModelToLabelMap_inputs(): def test_ModelToLabelMap_outputs(): - output_map = dict(OutputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = ModelToLabelMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py index 97e7d4ae38..e2b4a1a2f7 100644 --- a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py +++ b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py @@ -4,11 +4,26 @@ def test_OrientScalarVolume_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - inputVolume1=dict(argstr="%s", extensions=None, position=-2,), - orientation=dict(argstr="--orientation %s",), - outputVolume=dict(argstr="%s", hash_files=False, position=-1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="%s", + extensions=None, + position=-2, + ), + orientation=dict( + argstr="--orientation %s", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = OrientScalarVolume.input_spec() @@ -18,7 +33,12 @@ def test_OrientScalarVolume_inputs(): def test_OrientScalarVolume_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1,),) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = OrientScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py index c4b12dc7a0..77498c0b08 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py +++ b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py @@ -4,11 +4,28 @@ def test_ProbeVolumeWithModel_inputs(): input_map = dict( - InputModel=dict(argstr="%s", extensions=None, position=-2,), - InputVolume=dict(argstr="%s", extensions=None, position=-3,), - OutputModel=dict(argstr="%s", hash_files=False, position=-1,), - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + InputModel=dict( + argstr="%s", + extensions=None, + position=-2, + ), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-3, + ), + OutputModel=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), ) inputs = ProbeVolumeWithModel.input_spec() @@ -18,7 +35,12 @@ def test_ProbeVolumeWithModel_inputs(): def test_ProbeVolumeWithModel_outputs(): - output_map = dict(OutputModel=dict(extensions=None, position=-1,),) + output_map = dict( + OutputModel=dict( + extensions=None, + position=-1, + ), + ) outputs = ProbeVolumeWithModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py b/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py index dda2e3d8f8..7a16ed38bb 100644 --- a/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py +++ b/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py @@ -4,7 +4,13 @@ def test_SlicerCommandLine_inputs(): input_map = dict( - args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), ) inputs = SlicerCommandLine.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py index 771cb640b1..15fe9399ed 100644 --- a/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py +++ b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py @@ -4,12 +4,20 @@ def test_Analyze2nii_inputs(): input_map = dict( - analyze_file=dict(extensions=None, mandatory=True,), + analyze_file=dict( + extensions=None, + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), + mfile=dict( + usedefault=True, + ), paths=dict(), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = Analyze2nii.input_spec() @@ -21,11 +29,18 @@ def test_Analyze2nii_inputs(): def test_Analyze2nii_outputs(): output_map = dict( matlab_cmd=dict(), - mfile=dict(usedefault=True,), - nifti_file=dict(extensions=None,), + mfile=dict( + usedefault=True, + ), + nifti_file=dict( + extensions=None, + ), paths=dict(), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) outputs = Analyze2nii.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py index 6e741ae607..d41a0fb4b6 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py @@ -4,11 +4,22 @@ def test_ApplyDeformations_inputs(): input_map = dict( - deformation_field=dict(extensions=None, field="comp{1}.def", mandatory=True,), - in_files=dict(field="fnames", mandatory=True,), - interp=dict(field="interp",), + deformation_field=dict( + extensions=None, + field="comp{1}.def", + mandatory=True, + ), + in_files=dict( + field="fnames", + mandatory=True, + ), + interp=dict( + field="interp", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), + mfile=dict( + usedefault=True, + ), paths=dict(), reference_volume=dict( extensions=[".hdr", ".img", ".img.gz", ".nii"], @@ -16,7 +27,10 @@ def test_ApplyDeformations_inputs(): mandatory=True, ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = ApplyDeformations.input_spec() @@ -26,7 +40,9 @@ def test_ApplyDeformations_inputs(): def test_ApplyDeformations_outputs(): - output_map = dict(out_files=dict(),) + output_map = dict( + out_files=dict(), + ) outputs = ApplyDeformations.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py index b9389091b3..c652bd7e12 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py @@ -4,24 +4,43 @@ def test_ApplyInverseDeformation_inputs(): input_map = dict( - bounding_box=dict(field="comp{1}.inv.comp{1}.sn2def.bb",), + bounding_box=dict( + field="comp{1}.inv.comp{1}.sn2def.bb", + ), deformation=dict( extensions=None, field="comp{1}.inv.comp{1}.sn2def.matname", xor=["deformation_field"], ), deformation_field=dict( - extensions=None, field="comp{1}.inv.comp{1}.def", xor=["deformation"], + extensions=None, + field="comp{1}.inv.comp{1}.def", + xor=["deformation"], + ), + in_files=dict( + field="fnames", + mandatory=True, + ), + interpolation=dict( + field="interp", ), - in_files=dict(field="fnames", mandatory=True,), - interpolation=dict(field="interp",), matlab_cmd=dict(), - mfile=dict(usedefault=True,), + mfile=dict( + usedefault=True, + ), paths=dict(), - target=dict(extensions=None, field="comp{1}.inv.space",), + target=dict( + extensions=None, + field="comp{1}.inv.space", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), - voxel_sizes=dict(field="comp{1}.inv.comp{1}.sn2def.vox",), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + voxel_sizes=dict( + field="comp{1}.inv.comp{1}.sn2def.vox", + ), ) inputs = ApplyInverseDeformation.input_spec() @@ -31,7 +50,9 @@ def test_ApplyInverseDeformation_inputs(): def test_ApplyInverseDeformation_outputs(): - output_map = dict(out_files=dict(),) + output_map = dict( + out_files=dict(), + ) outputs = ApplyInverseDeformation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py index 99f140ecac..ae0516370d 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py @@ -4,14 +4,29 @@ def test_ApplyTransform_inputs(): input_map = dict( - in_file=dict(copyfile=True, extensions=None, mandatory=True,), - mat=dict(extensions=None, mandatory=True,), + in_file=dict( + copyfile=True, + extensions=None, + mandatory=True, + ), + mat=dict( + extensions=None, + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - out_file=dict(extensions=None, genfile=True,), + mfile=dict( + usedefault=True, + ), + out_file=dict( + extensions=None, + genfile=True, + ), paths=dict(), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = ApplyTransform.input_spec() @@ -21,7 +36,11 @@ def test_ApplyTransform_inputs(): def test_ApplyTransform_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ApplyTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py index 3c67fe75c6..156591cbb9 100644 --- a/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py +++ b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py @@ -4,15 +4,31 @@ def test_CalcCoregAffine_inputs(): input_map = dict( - invmat=dict(extensions=None,), - mat=dict(extensions=None,), + invmat=dict( + extensions=None, + ), + mat=dict( + extensions=None, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - moving=dict(copyfile=False, extensions=None, mandatory=True,), + mfile=dict( + usedefault=True, + ), + moving=dict( + copyfile=False, + extensions=None, + mandatory=True, + ), paths=dict(), - target=dict(extensions=None, mandatory=True,), + target=dict( + extensions=None, + mandatory=True, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = CalcCoregAffine.input_spec() @@ -22,7 +38,14 @@ def test_CalcCoregAffine_inputs(): def test_CalcCoregAffine_outputs(): - output_map = dict(invmat=dict(extensions=None,), mat=dict(extensions=None,),) + output_map = dict( + invmat=dict( + extensions=None, + ), + mat=dict( + extensions=None, + ), + ) outputs = CalcCoregAffine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Coregister.py b/nipype/interfaces/spm/tests/test_auto_Coregister.py index 04e39ce23b..940f69ebbf 100644 --- a/nipype/interfaces/spm/tests/test_auto_Coregister.py +++ b/nipype/interfaces/spm/tests/test_auto_Coregister.py @@ -4,28 +4,59 @@ def test_Coregister_inputs(): input_map = dict( - apply_to_files=dict(copyfile=True, field="other",), - cost_function=dict(field="eoptions.cost_fun",), - fwhm=dict(field="eoptions.fwhm",), - jobtype=dict(usedefault=True,), + apply_to_files=dict( + copyfile=True, + field="other", + ), + cost_function=dict( + field="eoptions.cost_fun", + ), + fwhm=dict( + field="eoptions.fwhm", + ), + jobtype=dict( + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - out_prefix=dict(field="roptions.prefix", usedefault=True,), + mfile=dict( + usedefault=True, + ), + out_prefix=dict( + field="roptions.prefix", + usedefault=True, + ), paths=dict(), - separation=dict(field="eoptions.sep",), - source=dict(copyfile=True, field="source", mandatory=True,), + separation=dict( + field="eoptions.sep", + ), + source=dict( + copyfile=True, + field="source", + mandatory=True, + ), target=dict( copyfile=False, extensions=[".hdr", ".img", ".img.gz", ".nii"], field="ref", mandatory=True, ), - tolerance=dict(field="eoptions.tol",), + tolerance=dict( + field="eoptions.tol", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), - write_interp=dict(field="roptions.interp",), - write_mask=dict(field="roptions.mask",), - write_wrap=dict(field="roptions.wrap",), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + write_interp=dict( + field="roptions.interp", + ), + write_mask=dict( + field="roptions.mask", + ), + write_wrap=dict( + field="roptions.wrap", + ), ) inputs = Coregister.input_spec() @@ -35,7 +66,10 @@ def test_Coregister_inputs(): def test_Coregister_outputs(): - output_map = dict(coregistered_files=dict(), coregistered_source=dict(),) + output_map = dict( + coregistered_files=dict(), + coregistered_source=dict(), + ) outputs = Coregister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_CreateWarped.py b/nipype/interfaces/spm/tests/test_auto_CreateWarped.py index b172972fe7..f62694641c 100644 --- a/nipype/interfaces/spm/tests/test_auto_CreateWarped.py +++ b/nipype/interfaces/spm/tests/test_auto_CreateWarped.py @@ -5,17 +5,34 @@ def test_CreateWarped_inputs(): input_map = dict( flowfield_files=dict( - copyfile=False, field="crt_warped.flowfields", mandatory=True, + copyfile=False, + field="crt_warped.flowfields", + mandatory=True, + ), + image_files=dict( + copyfile=False, + field="crt_warped.images", + mandatory=True, + ), + interp=dict( + field="crt_warped.interp", + ), + iterations=dict( + field="crt_warped.K", ), - image_files=dict(copyfile=False, field="crt_warped.images", mandatory=True,), - interp=dict(field="crt_warped.interp",), - iterations=dict(field="crt_warped.K",), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - modulate=dict(field="crt_warped.jactransf",), + mfile=dict( + usedefault=True, + ), + modulate=dict( + field="crt_warped.jactransf", + ), paths=dict(), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = CreateWarped.input_spec() @@ -25,7 +42,9 @@ def test_CreateWarped_inputs(): def test_CreateWarped_outputs(): - output_map = dict(warped_files=dict(),) + output_map = dict( + warped_files=dict(), + ) outputs = CreateWarped.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_DARTEL.py b/nipype/interfaces/spm/tests/test_auto_DARTEL.py index ca031dfd1e..5fbba0c287 100644 --- a/nipype/interfaces/spm/tests/test_auto_DARTEL.py +++ b/nipype/interfaces/spm/tests/test_auto_DARTEL.py @@ -4,16 +4,34 @@ def test_DARTEL_inputs(): input_map = dict( - image_files=dict(copyfile=False, field="warp.images", mandatory=True,), - iteration_parameters=dict(field="warp.settings.param",), + image_files=dict( + copyfile=False, + field="warp.images", + mandatory=True, + ), + iteration_parameters=dict( + field="warp.settings.param", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - optimization_parameters=dict(field="warp.settings.optim",), + mfile=dict( + usedefault=True, + ), + optimization_parameters=dict( + field="warp.settings.optim", + ), paths=dict(), - regularization_form=dict(field="warp.settings.rform",), - template_prefix=dict(field="warp.settings.template", usedefault=True,), + regularization_form=dict( + field="warp.settings.rform", + ), + template_prefix=dict( + field="warp.settings.template", + usedefault=True, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = DARTEL.input_spec() @@ -25,7 +43,9 @@ def test_DARTEL_inputs(): def test_DARTEL_outputs(): output_map = dict( dartel_flow_fields=dict(), - final_template_file=dict(extensions=None,), + final_template_file=dict( + extensions=None, + ), template_files=dict(), ) outputs = DARTEL.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py index 3b406b3c27..d4b8ad8a78 100644 --- a/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py +++ b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py @@ -5,14 +5,27 @@ def test_DARTELNorm2MNI_inputs(): input_map = dict( apply_to_files=dict( - copyfile=False, field="mni_norm.data.subjs.images", mandatory=True, + copyfile=False, + field="mni_norm.data.subjs.images", + mandatory=True, + ), + bounding_box=dict( + field="mni_norm.bb", + ), + flowfield_files=dict( + field="mni_norm.data.subjs.flowfields", + mandatory=True, + ), + fwhm=dict( + field="mni_norm.fwhm", ), - bounding_box=dict(field="mni_norm.bb",), - flowfield_files=dict(field="mni_norm.data.subjs.flowfields", mandatory=True,), - fwhm=dict(field="mni_norm.fwhm",), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - modulate=dict(field="mni_norm.preserve",), + mfile=dict( + usedefault=True, + ), + modulate=dict( + field="mni_norm.preserve", + ), paths=dict(), template_file=dict( copyfile=False, @@ -21,8 +34,13 @@ def test_DARTELNorm2MNI_inputs(): mandatory=True, ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), - voxel_size=dict(field="mni_norm.vox",), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + voxel_size=dict( + field="mni_norm.vox", + ), ) inputs = DARTELNorm2MNI.input_spec() @@ -33,7 +51,10 @@ def test_DARTELNorm2MNI_inputs(): def test_DARTELNorm2MNI_outputs(): output_map = dict( - normalization_parameter_file=dict(extensions=None,), normalized_files=dict(), + normalization_parameter_file=dict( + extensions=None, + ), + normalized_files=dict(), ) outputs = DARTELNorm2MNI.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_DicomImport.py b/nipype/interfaces/spm/tests/test_auto_DicomImport.py index ddb3f81c78..e0459d467f 100644 --- a/nipype/interfaces/spm/tests/test_auto_DicomImport.py +++ b/nipype/interfaces/spm/tests/test_auto_DicomImport.py @@ -4,16 +4,36 @@ def test_DicomImport_inputs(): input_map = dict( - format=dict(field="convopts.format", usedefault=True,), - icedims=dict(field="convopts.icedims", usedefault=True,), - in_files=dict(field="data", mandatory=True,), + format=dict( + field="convopts.format", + usedefault=True, + ), + icedims=dict( + field="convopts.icedims", + usedefault=True, + ), + in_files=dict( + field="data", + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - output_dir=dict(field="outdir", usedefault=True,), - output_dir_struct=dict(field="root", usedefault=True,), + mfile=dict( + usedefault=True, + ), + output_dir=dict( + field="outdir", + usedefault=True, + ), + output_dir_struct=dict( + field="root", + usedefault=True, + ), paths=dict(), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = DicomImport.input_spec() @@ -23,7 +43,9 @@ def test_DicomImport_inputs(): def test_DicomImport_outputs(): - output_map = dict(out_files=dict(),) + output_map = dict( + out_files=dict(), + ) outputs = DicomImport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py index 323cb3707c..84fb496e9c 100644 --- a/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py @@ -4,19 +4,40 @@ def test_EstimateContrast_inputs(): input_map = dict( - beta_images=dict(copyfile=False, mandatory=True,), - contrasts=dict(mandatory=True,), - group_contrast=dict(xor=["use_derivs"],), + beta_images=dict( + copyfile=False, + mandatory=True, + ), + contrasts=dict( + mandatory=True, + ), + group_contrast=dict( + xor=["use_derivs"], + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), + mfile=dict( + usedefault=True, + ), paths=dict(), - residual_image=dict(copyfile=False, extensions=None, mandatory=True,), + residual_image=dict( + copyfile=False, + extensions=None, + mandatory=True, + ), spm_mat_file=dict( - copyfile=True, extensions=None, field="spmmat", mandatory=True, + copyfile=True, + extensions=None, + field="spmmat", + mandatory=True, + ), + use_derivs=dict( + xor=["group_contrast"], ), - use_derivs=dict(xor=["group_contrast"],), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = EstimateContrast.input_spec() @@ -31,7 +52,9 @@ def test_EstimateContrast_outputs(): ess_images=dict(), spmF_images=dict(), spmT_images=dict(), - spm_mat_file=dict(extensions=None,), + spm_mat_file=dict( + extensions=None, + ), ) outputs = EstimateContrast.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateModel.py b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py index 5e2b25e0c7..c78924de2f 100644 --- a/nipype/interfaces/spm/tests/test_auto_EstimateModel.py +++ b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py @@ -4,17 +4,30 @@ def test_EstimateModel_inputs(): input_map = dict( - estimation_method=dict(field="method", mandatory=True,), + estimation_method=dict( + field="method", + mandatory=True, + ), flags=dict(), matlab_cmd=dict(), - mfile=dict(usedefault=True,), + mfile=dict( + usedefault=True, + ), paths=dict(), spm_mat_file=dict( - copyfile=True, extensions=None, field="spmmat", mandatory=True, + copyfile=True, + extensions=None, + field="spmmat", + mandatory=True, ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), - write_residuals=dict(field="write_residuals",), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + write_residuals=dict( + field="write_residuals", + ), ) inputs = EstimateModel.input_spec() @@ -27,15 +40,25 @@ def test_EstimateModel_outputs(): output_map = dict( ARcoef=dict(), Cbetas=dict(), - RPVimage=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"],), + RPVimage=dict( + extensions=[".hdr", ".img", ".img.gz", ".nii"], + ), SDbetas=dict(), SDerror=dict(), beta_images=dict(), - labels=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"],), - mask_image=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"],), - residual_image=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"],), + labels=dict( + extensions=[".hdr", ".img", ".img.gz", ".nii"], + ), + mask_image=dict( + extensions=[".hdr", ".img", ".img.gz", ".nii"], + ), + residual_image=dict( + extensions=[".hdr", ".img", ".img.gz", ".nii"], + ), residual_images=dict(), - spm_mat_file=dict(extensions=None,), + spm_mat_file=dict( + extensions=None, + ), ) outputs = EstimateModel.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py index 0fb35c010f..de435ea771 100644 --- a/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py @@ -4,24 +4,39 @@ def test_FactorialDesign_inputs(): input_map = dict( - covariates=dict(field="cov",), - explicit_mask_file=dict(extensions=None, field="masking.em",), + covariates=dict( + field="cov", + ), + explicit_mask_file=dict( + extensions=None, + field="masking.em", + ), global_calc_mean=dict( - field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"], + field="globalc.g_mean", + xor=["global_calc_omit", "global_calc_values"], ), global_calc_omit=dict( - field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"], + field="globalc.g_omit", + xor=["global_calc_mean", "global_calc_values"], ), global_calc_values=dict( field="globalc.g_user.global_uval", xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict(field="globalm.glonorm",), + global_normalization=dict( + field="globalm.glonorm", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no",), + mfile=dict( + usedefault=True, + ), + no_grand_mean_scaling=dict( + field="globalm.gmsca.gmsca_no", + ), paths=dict(), - spm_mat_dir=dict(field="dir",), + spm_mat_dir=dict( + field="dir", + ), threshold_mask_absolute=dict( field="masking.tm.tma.athresh", xor=["threshold_mask_none", "threshold_mask_relative"], @@ -34,9 +49,14 @@ def test_FactorialDesign_inputs(): field="masking.tm.tmr.rthresh", xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict(field="masking.im",), + use_implicit_threshold=dict( + field="masking.im", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = FactorialDesign.input_spec() @@ -46,7 +66,11 @@ def test_FactorialDesign_inputs(): def test_FactorialDesign_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None,),) + output_map = dict( + spm_mat_file=dict( + extensions=None, + ), + ) outputs = FactorialDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_FieldMap.py b/nipype/interfaces/spm/tests/test_auto_FieldMap.py index 19d0bc3e19..a91eec64d8 100644 --- a/nipype/interfaces/spm/tests/test_auto_FieldMap.py +++ b/nipype/interfaces/spm/tests/test_auto_FieldMap.py @@ -4,35 +4,78 @@ def test_FieldMap_inputs(): input_map = dict( - anat_file=dict(copyfile=False, extensions=None, field="subj.anat",), - blip_direction=dict(field="subj.defaults.defaultsval.blipdir", mandatory=True,), - echo_times=dict(field="subj.defaults.defaultsval.et", mandatory=True,), + anat_file=dict( + copyfile=False, + extensions=None, + field="subj.anat", + ), + blip_direction=dict( + field="subj.defaults.defaultsval.blipdir", + mandatory=True, + ), + echo_times=dict( + field="subj.defaults.defaultsval.et", + mandatory=True, + ), epi_file=dict( - copyfile=False, extensions=None, field="subj.session.epi", mandatory=True, + copyfile=False, + extensions=None, + field="subj.session.epi", + mandatory=True, + ), + epifm=dict( + field="subj.defaults.defaultsval.epifm", + usedefault=True, ), - epifm=dict(field="subj.defaults.defaultsval.epifm", usedefault=True,), jacobian_modulation=dict( - field="subj.defaults.defaultsval.ajm", usedefault=True, + field="subj.defaults.defaultsval.ajm", + usedefault=True, + ), + jobtype=dict( + usedefault=True, ), - jobtype=dict(usedefault=True,), magnitude_file=dict( copyfile=False, extensions=None, field="subj.data.presubphasemag.magnitude", mandatory=True, ), - mask_fwhm=dict(field="subj.defaults.defaultsval.mflags.fwhm", usedefault=True,), - maskbrain=dict(field="subj.defaults.defaultsval.maskbrain", usedefault=True,), - matchanat=dict(field="subj.matchanat", usedefault=True,), - matchvdm=dict(field="subj.matchvdm", usedefault=True,), + mask_fwhm=dict( + field="subj.defaults.defaultsval.mflags.fwhm", + usedefault=True, + ), + maskbrain=dict( + field="subj.defaults.defaultsval.maskbrain", + usedefault=True, + ), + matchanat=dict( + field="subj.matchanat", + usedefault=True, + ), + matchvdm=dict( + field="subj.matchvdm", + usedefault=True, + ), matlab_cmd=dict(), - method=dict(field="subj.defaults.defaultsval.uflags.method", usedefault=True,), - mfile=dict(usedefault=True,), + method=dict( + field="subj.defaults.defaultsval.uflags.method", + usedefault=True, + ), + mfile=dict( + usedefault=True, + ), ndilate=dict( - field="subj.defaults.defaultsval.mflags.ndilate", usedefault=True, + field="subj.defaults.defaultsval.mflags.ndilate", + usedefault=True, + ), + nerode=dict( + field="subj.defaults.defaultsval.mflags.nerode", + usedefault=True, + ), + pad=dict( + field="subj.defaults.defaultsval.uflags.pad", + usedefault=True, ), - nerode=dict(field="subj.defaults.defaultsval.mflags.nerode", usedefault=True,), - pad=dict(field="subj.defaults.defaultsval.uflags.pad", usedefault=True,), paths=dict(), phase_file=dict( copyfile=False, @@ -40,24 +83,44 @@ def test_FieldMap_inputs(): field="subj.data.presubphasemag.phase", mandatory=True, ), - reg=dict(field="subj.defaults.defaultsval.mflags.reg", usedefault=True,), - sessname=dict(field="subj.sessname", usedefault=True,), + reg=dict( + field="subj.defaults.defaultsval.mflags.reg", + usedefault=True, + ), + sessname=dict( + field="subj.sessname", + usedefault=True, + ), template=dict( copyfile=False, extensions=None, field="subj.defaults.defaultsval.mflags.template", ), - thresh=dict(field="subj.defaults.defaultsval.mflags.thresh", usedefault=True,), + thresh=dict( + field="subj.defaults.defaultsval.mflags.thresh", + usedefault=True, + ), total_readout_time=dict( - field="subj.defaults.defaultsval.tert", mandatory=True, + field="subj.defaults.defaultsval.tert", + mandatory=True, ), unwarp_fwhm=dict( - field="subj.defaults.defaultsval.uflags.fwhm", usedefault=True, + field="subj.defaults.defaultsval.uflags.fwhm", + usedefault=True, ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), - writeunwarped=dict(field="subj.writeunwarped", usedefault=True,), - ws=dict(field="subj.defaults.defaultsval.uflags.ws", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + writeunwarped=dict( + field="subj.writeunwarped", + usedefault=True, + ), + ws=dict( + field="subj.defaults.defaultsval.uflags.ws", + usedefault=True, + ), ) inputs = FieldMap.input_spec() @@ -67,7 +130,11 @@ def test_FieldMap_inputs(): def test_FieldMap_outputs(): - output_map = dict(vdm=dict(extensions=None,),) + output_map = dict( + vdm=dict( + extensions=None, + ), + ) outputs = FieldMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Level1Design.py b/nipype/interfaces/spm/tests/test_auto_Level1Design.py index 8a57a7b86e..04c9f315ef 100644 --- a/nipype/interfaces/spm/tests/test_auto_Level1Design.py +++ b/nipype/interfaces/spm/tests/test_auto_Level1Design.py @@ -4,25 +4,61 @@ def test_Level1Design_inputs(): input_map = dict( - bases=dict(field="bases", mandatory=True,), - factor_info=dict(field="fact",), + bases=dict( + field="bases", + mandatory=True, + ), + factor_info=dict( + field="fact", + ), flags=dict(), - global_intensity_normalization=dict(field="global",), - interscan_interval=dict(field="timing.RT", mandatory=True,), - mask_image=dict(extensions=None, field="mask",), - mask_threshold=dict(usedefault=True,), + global_intensity_normalization=dict( + field="global", + ), + interscan_interval=dict( + field="timing.RT", + mandatory=True, + ), + mask_image=dict( + extensions=None, + field="mask", + ), + mask_threshold=dict( + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - microtime_onset=dict(field="timing.fmri_t0",), - microtime_resolution=dict(field="timing.fmri_t",), - model_serial_correlations=dict(field="cvi",), + mfile=dict( + usedefault=True, + ), + microtime_onset=dict( + field="timing.fmri_t0", + ), + microtime_resolution=dict( + field="timing.fmri_t", + ), + model_serial_correlations=dict( + field="cvi", + ), paths=dict(), - session_info=dict(field="sess", mandatory=True,), - spm_mat_dir=dict(field="dir",), - timing_units=dict(field="timing.units", mandatory=True,), + session_info=dict( + field="sess", + mandatory=True, + ), + spm_mat_dir=dict( + field="dir", + ), + timing_units=dict( + field="timing.units", + mandatory=True, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), - volterra_expansion_order=dict(field="volt",), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + volterra_expansion_order=dict( + field="volt", + ), ) inputs = Level1Design.input_spec() @@ -32,7 +68,11 @@ def test_Level1Design_inputs(): def test_Level1Design_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None,),) + output_map = dict( + spm_mat_file=dict( + extensions=None, + ), + ) outputs = Level1Design.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_MultiChannelNewSegment.py b/nipype/interfaces/spm/tests/test_auto_MultiChannelNewSegment.py index 29606f798c..9c8f743d45 100644 --- a/nipype/interfaces/spm/tests/test_auto_MultiChannelNewSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_MultiChannelNewSegment.py @@ -4,17 +4,34 @@ def test_MultiChannelNewSegment_inputs(): input_map = dict( - affine_regularization=dict(field="warp.affreg",), - channels=dict(field="channel",), + affine_regularization=dict( + field="warp.affreg", + ), + channels=dict( + field="channel", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), + mfile=dict( + usedefault=True, + ), paths=dict(), - sampling_distance=dict(field="warp.samp",), - tissues=dict(field="tissue",), + sampling_distance=dict( + field="warp.samp", + ), + tissues=dict( + field="tissue", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), - warping_regularization=dict(field="warp.reg",), - write_deformation_fields=dict(field="warp.write",), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + warping_regularization=dict( + field="warp.reg", + ), + write_deformation_fields=dict( + field="warp.write", + ), ) inputs = MultiChannelNewSegment.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py index 478c869474..61d3b38a74 100644 --- a/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py @@ -4,26 +4,47 @@ def test_MultipleRegressionDesign_inputs(): input_map = dict( - covariates=dict(field="cov",), - explicit_mask_file=dict(extensions=None, field="masking.em",), + covariates=dict( + field="cov", + ), + explicit_mask_file=dict( + extensions=None, + field="masking.em", + ), global_calc_mean=dict( - field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"], + field="globalc.g_mean", + xor=["global_calc_omit", "global_calc_values"], ), global_calc_omit=dict( - field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"], + field="globalc.g_omit", + xor=["global_calc_mean", "global_calc_values"], ), global_calc_values=dict( field="globalc.g_user.global_uval", xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict(field="globalm.glonorm",), - in_files=dict(field="des.mreg.scans", mandatory=True,), - include_intercept=dict(field="des.mreg.incint", usedefault=True,), + global_normalization=dict( + field="globalm.glonorm", + ), + in_files=dict( + field="des.mreg.scans", + mandatory=True, + ), + include_intercept=dict( + field="des.mreg.incint", + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no",), + mfile=dict( + usedefault=True, + ), + no_grand_mean_scaling=dict( + field="globalm.gmsca.gmsca_no", + ), paths=dict(), - spm_mat_dir=dict(field="dir",), + spm_mat_dir=dict( + field="dir", + ), threshold_mask_absolute=dict( field="masking.tm.tma.athresh", xor=["threshold_mask_none", "threshold_mask_relative"], @@ -36,10 +57,17 @@ def test_MultipleRegressionDesign_inputs(): field="masking.tm.tmr.rthresh", xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict(field="masking.im",), + use_implicit_threshold=dict( + field="masking.im", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), - user_covariates=dict(field="des.mreg.mcov",), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + user_covariates=dict( + field="des.mreg.mcov", + ), ) inputs = MultipleRegressionDesign.input_spec() @@ -49,7 +77,11 @@ def test_MultipleRegressionDesign_inputs(): def test_MultipleRegressionDesign_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None,),) + output_map = dict( + spm_mat_file=dict( + extensions=None, + ), + ) outputs = MultipleRegressionDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_NewSegment.py b/nipype/interfaces/spm/tests/test_auto_NewSegment.py index e05643b92e..dae310f6e2 100644 --- a/nipype/interfaces/spm/tests/test_auto_NewSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_NewSegment.py @@ -4,18 +4,39 @@ def test_NewSegment_inputs(): input_map = dict( - affine_regularization=dict(field="warp.affreg",), - channel_files=dict(copyfile=False, field="channel", mandatory=True,), - channel_info=dict(field="channel",), + affine_regularization=dict( + field="warp.affreg", + ), + channel_files=dict( + copyfile=False, + field="channel", + mandatory=True, + ), + channel_info=dict( + field="channel", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), + mfile=dict( + usedefault=True, + ), paths=dict(), - sampling_distance=dict(field="warp.samp",), - tissues=dict(field="tissue",), + sampling_distance=dict( + field="warp.samp", + ), + tissues=dict( + field="tissue", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), - warping_regularization=dict(field="warp.reg",), - write_deformation_fields=dict(field="warp.write",), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + warping_regularization=dict( + field="warp.reg", + ), + write_deformation_fields=dict( + field="warp.write", + ), ) inputs = NewSegment.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize.py b/nipype/interfaces/spm/tests/test_auto_Normalize.py index e028c609c9..caa063d923 100644 --- a/nipype/interfaces/spm/tests/test_auto_Normalize.py +++ b/nipype/interfaces/spm/tests/test_auto_Normalize.py @@ -4,15 +4,33 @@ def test_Normalize_inputs(): input_map = dict( - DCT_period_cutoff=dict(field="eoptions.cutoff",), - affine_regularization_type=dict(field="eoptions.regtype",), - apply_to_files=dict(copyfile=True, field="subj.resample",), - jobtype=dict(usedefault=True,), + DCT_period_cutoff=dict( + field="eoptions.cutoff", + ), + affine_regularization_type=dict( + field="eoptions.regtype", + ), + apply_to_files=dict( + copyfile=True, + field="subj.resample", + ), + jobtype=dict( + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - nonlinear_iterations=dict(field="eoptions.nits",), - nonlinear_regularization=dict(field="eoptions.reg",), - out_prefix=dict(field="roptions.prefix", usedefault=True,), + mfile=dict( + usedefault=True, + ), + nonlinear_iterations=dict( + field="eoptions.nits", + ), + nonlinear_regularization=dict( + field="eoptions.reg", + ), + out_prefix=dict( + field="roptions.prefix", + usedefault=True, + ), parameter_file=dict( copyfile=False, extensions=None, @@ -22,10 +40,19 @@ def test_Normalize_inputs(): ), paths=dict(), source=dict( - copyfile=True, field="subj.source", mandatory=True, xor=["parameter_file"], + copyfile=True, + field="subj.source", + mandatory=True, + xor=["parameter_file"], + ), + source_image_smoothing=dict( + field="eoptions.smosrc", + ), + source_weight=dict( + copyfile=False, + extensions=None, + field="subj.wtsrc", ), - source_image_smoothing=dict(field="eoptions.smosrc",), - source_weight=dict(copyfile=False, extensions=None, field="subj.wtsrc",), template=dict( copyfile=False, extensions=None, @@ -33,15 +60,34 @@ def test_Normalize_inputs(): mandatory=True, xor=["parameter_file"], ), - template_image_smoothing=dict(field="eoptions.smoref",), - template_weight=dict(copyfile=False, extensions=None, field="eoptions.weight",), + template_image_smoothing=dict( + field="eoptions.smoref", + ), + template_weight=dict( + copyfile=False, + extensions=None, + field="eoptions.weight", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), - write_bounding_box=dict(field="roptions.bb",), - write_interp=dict(field="roptions.interp",), - write_preserve=dict(field="roptions.preserve",), - write_voxel_sizes=dict(field="roptions.vox",), - write_wrap=dict(field="roptions.wrap",), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + write_bounding_box=dict( + field="roptions.bb", + ), + write_interp=dict( + field="roptions.interp", + ), + write_preserve=dict( + field="roptions.preserve", + ), + write_voxel_sizes=dict( + field="roptions.vox", + ), + write_wrap=dict( + field="roptions.wrap", + ), ) inputs = Normalize.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize12.py b/nipype/interfaces/spm/tests/test_auto_Normalize12.py index b64475eab3..965ffafec9 100644 --- a/nipype/interfaces/spm/tests/test_auto_Normalize12.py +++ b/nipype/interfaces/spm/tests/test_auto_Normalize12.py @@ -4,10 +4,19 @@ def test_Normalize12_inputs(): input_map = dict( - affine_regularization_type=dict(field="eoptions.affreg",), - apply_to_files=dict(copyfile=True, field="subj.resample",), - bias_fwhm=dict(field="eoptions.biasfwhm",), - bias_regularization=dict(field="eoptions.biasreg",), + affine_regularization_type=dict( + field="eoptions.affreg", + ), + apply_to_files=dict( + copyfile=True, + field="subj.resample", + ), + bias_fwhm=dict( + field="eoptions.biasfwhm", + ), + bias_regularization=dict( + field="eoptions.biasreg", + ), deformation_file=dict( copyfile=False, extensions=[".hdr", ".img", ".img.gz", ".nii"], @@ -22,13 +31,24 @@ def test_Normalize12_inputs(): mandatory=True, xor=["deformation_file"], ), - jobtype=dict(usedefault=True,), + jobtype=dict( + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - out_prefix=dict(field="woptions.prefix", usedefault=True,), + mfile=dict( + usedefault=True, + ), + out_prefix=dict( + field="woptions.prefix", + usedefault=True, + ), paths=dict(), - sampling_distance=dict(field="eoptions.samp",), - smoothness=dict(field="eoptions.fwhm",), + sampling_distance=dict( + field="eoptions.samp", + ), + smoothness=dict( + field="eoptions.fwhm", + ), tpm=dict( copyfile=False, extensions=None, @@ -36,11 +56,22 @@ def test_Normalize12_inputs(): xor=["deformation_file"], ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), - warping_regularization=dict(field="eoptions.reg",), - write_bounding_box=dict(field="woptions.bb",), - write_interp=dict(field="woptions.interp",), - write_voxel_sizes=dict(field="woptions.vox",), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + warping_regularization=dict( + field="eoptions.reg", + ), + write_bounding_box=dict( + field="woptions.bb", + ), + write_interp=dict( + field="woptions.interp", + ), + write_voxel_sizes=dict( + field="woptions.vox", + ), ) inputs = Normalize12.input_spec() @@ -51,7 +82,9 @@ def test_Normalize12_inputs(): def test_Normalize12_outputs(): output_map = dict( - deformation_field=dict(), normalized_files=dict(), normalized_image=dict(), + deformation_field=dict(), + normalized_files=dict(), + normalized_image=dict(), ) outputs = Normalize12.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py index cd5197602c..9b77ab0af7 100644 --- a/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py @@ -4,25 +4,43 @@ def test_OneSampleTTestDesign_inputs(): input_map = dict( - covariates=dict(field="cov",), - explicit_mask_file=dict(extensions=None, field="masking.em",), + covariates=dict( + field="cov", + ), + explicit_mask_file=dict( + extensions=None, + field="masking.em", + ), global_calc_mean=dict( - field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"], + field="globalc.g_mean", + xor=["global_calc_omit", "global_calc_values"], ), global_calc_omit=dict( - field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"], + field="globalc.g_omit", + xor=["global_calc_mean", "global_calc_values"], ), global_calc_values=dict( field="globalc.g_user.global_uval", xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict(field="globalm.glonorm",), - in_files=dict(field="des.t1.scans", mandatory=True,), + global_normalization=dict( + field="globalm.glonorm", + ), + in_files=dict( + field="des.t1.scans", + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no",), + mfile=dict( + usedefault=True, + ), + no_grand_mean_scaling=dict( + field="globalm.gmsca.gmsca_no", + ), paths=dict(), - spm_mat_dir=dict(field="dir",), + spm_mat_dir=dict( + field="dir", + ), threshold_mask_absolute=dict( field="masking.tm.tma.athresh", xor=["threshold_mask_none", "threshold_mask_relative"], @@ -35,9 +53,14 @@ def test_OneSampleTTestDesign_inputs(): field="masking.tm.tmr.rthresh", xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict(field="masking.im",), + use_implicit_threshold=dict( + field="masking.im", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = OneSampleTTestDesign.input_spec() @@ -47,7 +70,11 @@ def test_OneSampleTTestDesign_inputs(): def test_OneSampleTTestDesign_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None,),) + output_map = dict( + spm_mat_file=dict( + extensions=None, + ), + ) outputs = OneSampleTTestDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py index bb516488ee..6be1f6ec01 100644 --- a/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py @@ -4,27 +4,49 @@ def test_PairedTTestDesign_inputs(): input_map = dict( - ancova=dict(field="des.pt.ancova",), - covariates=dict(field="cov",), - explicit_mask_file=dict(extensions=None, field="masking.em",), + ancova=dict( + field="des.pt.ancova", + ), + covariates=dict( + field="cov", + ), + explicit_mask_file=dict( + extensions=None, + field="masking.em", + ), global_calc_mean=dict( - field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"], + field="globalc.g_mean", + xor=["global_calc_omit", "global_calc_values"], ), global_calc_omit=dict( - field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"], + field="globalc.g_omit", + xor=["global_calc_mean", "global_calc_values"], ), global_calc_values=dict( field="globalc.g_user.global_uval", xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict(field="globalm.glonorm",), - grand_mean_scaling=dict(field="des.pt.gmsca",), + global_normalization=dict( + field="globalm.glonorm", + ), + grand_mean_scaling=dict( + field="des.pt.gmsca", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no",), - paired_files=dict(field="des.pt.pair", mandatory=True,), + mfile=dict( + usedefault=True, + ), + no_grand_mean_scaling=dict( + field="globalm.gmsca.gmsca_no", + ), + paired_files=dict( + field="des.pt.pair", + mandatory=True, + ), paths=dict(), - spm_mat_dir=dict(field="dir",), + spm_mat_dir=dict( + field="dir", + ), threshold_mask_absolute=dict( field="masking.tm.tma.athresh", xor=["threshold_mask_none", "threshold_mask_relative"], @@ -37,9 +59,14 @@ def test_PairedTTestDesign_inputs(): field="masking.tm.tmr.rthresh", xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict(field="masking.im",), + use_implicit_threshold=dict( + field="masking.im", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = PairedTTestDesign.input_spec() @@ -49,7 +76,11 @@ def test_PairedTTestDesign_inputs(): def test_PairedTTestDesign_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None,),) + output_map = dict( + spm_mat_file=dict( + extensions=None, + ), + ) outputs = PairedTTestDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Realign.py b/nipype/interfaces/spm/tests/test_auto_Realign.py index ab76f6a82d..5165d6f33e 100644 --- a/nipype/interfaces/spm/tests/test_auto_Realign.py +++ b/nipype/interfaces/spm/tests/test_auto_Realign.py @@ -4,25 +4,65 @@ def test_Realign_inputs(): input_map = dict( - fwhm=dict(field="eoptions.fwhm",), - in_files=dict(copyfile=True, field="data", mandatory=True,), - interp=dict(field="eoptions.interp",), - jobtype=dict(usedefault=True,), + fwhm=dict( + field="eoptions.fwhm", + ), + in_files=dict( + copyfile=True, + field="data", + mandatory=True, + ), + interp=dict( + field="eoptions.interp", + ), + jobtype=dict( + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - out_prefix=dict(field="roptions.prefix", usedefault=True,), + mfile=dict( + usedefault=True, + ), + out_prefix=dict( + field="roptions.prefix", + usedefault=True, + ), paths=dict(), - quality=dict(field="eoptions.quality",), - register_to_mean=dict(field="eoptions.rtm",), - separation=dict(field="eoptions.sep",), + quality=dict( + field="eoptions.quality", + ), + register_to_mean=dict( + field="eoptions.rtm", + ), + separation=dict( + field="eoptions.sep", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), - weight_img=dict(extensions=None, field="eoptions.weight",), - wrap=dict(field="eoptions.wrap",), - write_interp=dict(field="roptions.interp",), - write_mask=dict(field="roptions.mask",), - write_which=dict(field="roptions.which", maxlen=2, minlen=2, usedefault=True,), - write_wrap=dict(field="roptions.wrap",), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + weight_img=dict( + extensions=None, + field="eoptions.weight", + ), + wrap=dict( + field="eoptions.wrap", + ), + write_interp=dict( + field="roptions.interp", + ), + write_mask=dict( + field="roptions.mask", + ), + write_which=dict( + field="roptions.which", + maxlen=2, + minlen=2, + usedefault=True, + ), + write_wrap=dict( + field="roptions.wrap", + ), ) inputs = Realign.input_spec() @@ -33,7 +73,9 @@ def test_Realign_inputs(): def test_Realign_outputs(): output_map = dict( - mean_image=dict(extensions=None,), + mean_image=dict( + extensions=None, + ), modified_in_files=dict(), realigned_files=dict(), realignment_parameters=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py b/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py index fd4e420423..bb27419547 100644 --- a/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py +++ b/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py @@ -4,41 +4,104 @@ def test_RealignUnwarp_inputs(): input_map = dict( - est_basis_func=dict(field="uweoptions.basfcn",), - est_first_order_effects=dict(field="uweoptions.fot",), - est_jacobian_deformations=dict(field="uweoptions.jm",), + est_basis_func=dict( + field="uweoptions.basfcn", + ), + est_first_order_effects=dict( + field="uweoptions.fot", + ), + est_jacobian_deformations=dict( + field="uweoptions.jm", + ), est_num_of_iterations=dict( - field="uweoptions.noi", maxlen=1, minlen=1, usedefault=True, + field="uweoptions.noi", + maxlen=1, + minlen=1, + usedefault=True, + ), + est_re_est_mov_par=dict( + field="uweoptions.rem", ), - est_re_est_mov_par=dict(field="uweoptions.rem",), est_reg_factor=dict( - field="uweoptions.lambda", maxlen=1, minlen=1, usedefault=True, - ), - est_reg_order=dict(field="uweoptions.regorder",), - est_second_order_effects=dict(field="uweoptions.sot",), - est_taylor_expansion_point=dict(field="uweoptions.expround", usedefault=True,), - est_unwarp_fwhm=dict(field="uweoptions.uwfwhm",), - fwhm=dict(field="eoptions.fwhm",), - in_files=dict(copyfile=True, field="data.scans", mandatory=True,), - interp=dict(field="eoptions.einterp",), + field="uweoptions.lambda", + maxlen=1, + minlen=1, + usedefault=True, + ), + est_reg_order=dict( + field="uweoptions.regorder", + ), + est_second_order_effects=dict( + field="uweoptions.sot", + ), + est_taylor_expansion_point=dict( + field="uweoptions.expround", + usedefault=True, + ), + est_unwarp_fwhm=dict( + field="uweoptions.uwfwhm", + ), + fwhm=dict( + field="eoptions.fwhm", + ), + in_files=dict( + copyfile=True, + field="data.scans", + mandatory=True, + ), + interp=dict( + field="eoptions.einterp", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - out_prefix=dict(field="uwroptions.prefix", usedefault=True,), + mfile=dict( + usedefault=True, + ), + out_prefix=dict( + field="uwroptions.prefix", + usedefault=True, + ), paths=dict(), - phase_map=dict(copyfile=False, extensions=None, field="data.pmscan",), - quality=dict(field="eoptions.quality",), - register_to_mean=dict(field="eoptions.rtm",), - reslice_interp=dict(field="uwroptions.rinterp",), - reslice_mask=dict(field="uwroptions.mask",), + phase_map=dict( + copyfile=False, + extensions=None, + field="data.pmscan", + ), + quality=dict( + field="eoptions.quality", + ), + register_to_mean=dict( + field="eoptions.rtm", + ), + reslice_interp=dict( + field="uwroptions.rinterp", + ), + reslice_mask=dict( + field="uwroptions.mask", + ), reslice_which=dict( - field="uwroptions.uwwhich", maxlen=2, minlen=2, usedefault=True, + field="uwroptions.uwwhich", + maxlen=2, + minlen=2, + usedefault=True, + ), + reslice_wrap=dict( + field="uwroptions.wrap", + ), + separation=dict( + field="eoptions.sep", ), - reslice_wrap=dict(field="uwroptions.wrap",), - separation=dict(field="eoptions.sep",), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), - weight_img=dict(extensions=None, field="eoptions.weight",), - wrap=dict(field="eoptions.ewrap",), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + weight_img=dict( + extensions=None, + field="eoptions.weight", + ), + wrap=dict( + field="eoptions.ewrap", + ), ) inputs = RealignUnwarp.input_spec() @@ -49,7 +112,9 @@ def test_RealignUnwarp_inputs(): def test_RealignUnwarp_outputs(): output_map = dict( - mean_image=dict(extensions=None,), + mean_image=dict( + extensions=None, + ), modified_in_files=dict(), realigned_unwarped_files=dict(), realignment_parameters=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_Reslice.py b/nipype/interfaces/spm/tests/test_auto_Reslice.py index 46083f8192..c48d1a4b88 100644 --- a/nipype/interfaces/spm/tests/test_auto_Reslice.py +++ b/nipype/interfaces/spm/tests/test_auto_Reslice.py @@ -4,15 +4,30 @@ def test_Reslice_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True,), - interp=dict(usedefault=True,), + in_file=dict( + extensions=None, + mandatory=True, + ), + interp=dict( + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - out_file=dict(extensions=None,), + mfile=dict( + usedefault=True, + ), + out_file=dict( + extensions=None, + ), paths=dict(), - space_defining=dict(extensions=None, mandatory=True,), + space_defining=dict( + extensions=None, + mandatory=True, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = Reslice.input_spec() @@ -22,7 +37,11 @@ def test_Reslice_inputs(): def test_Reslice_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Reslice.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py index ebea9a0bf4..8bbb1d1307 100644 --- a/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py +++ b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py @@ -4,16 +4,33 @@ def test_ResliceToReference_inputs(): input_map = dict( - bounding_box=dict(field="comp{2}.idbbvox.bb",), - in_files=dict(field="fnames", mandatory=True,), - interpolation=dict(field="interp",), + bounding_box=dict( + field="comp{2}.idbbvox.bb", + ), + in_files=dict( + field="fnames", + mandatory=True, + ), + interpolation=dict( + field="interp", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), + mfile=dict( + usedefault=True, + ), paths=dict(), - target=dict(extensions=None, field="comp{1}.id.space",), + target=dict( + extensions=None, + field="comp{1}.id.space", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), - voxel_sizes=dict(field="comp{2}.idbbvox.vox",), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + voxel_sizes=dict( + field="comp{2}.idbbvox.vox", + ), ) inputs = ResliceToReference.input_spec() @@ -23,7 +40,9 @@ def test_ResliceToReference_inputs(): def test_ResliceToReference_outputs(): - output_map = dict(out_files=dict(),) + output_map = dict( + out_files=dict(), + ) outputs = ResliceToReference.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_SPMCommand.py b/nipype/interfaces/spm/tests/test_auto_SPMCommand.py index bde05ad1be..0c0a8d7506 100644 --- a/nipype/interfaces/spm/tests/test_auto_SPMCommand.py +++ b/nipype/interfaces/spm/tests/test_auto_SPMCommand.py @@ -5,10 +5,15 @@ def test_SPMCommand_inputs(): input_map = dict( matlab_cmd=dict(), - mfile=dict(usedefault=True,), + mfile=dict( + usedefault=True, + ), paths=dict(), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = SPMCommand.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Segment.py b/nipype/interfaces/spm/tests/test_auto_Segment.py index ea07881981..4859c76a00 100644 --- a/nipype/interfaces/spm/tests/test_auto_Segment.py +++ b/nipype/interfaces/spm/tests/test_auto_Segment.py @@ -4,26 +4,64 @@ def test_Segment_inputs(): input_map = dict( - affine_regularization=dict(field="opts.regtype",), - bias_fwhm=dict(field="opts.biasfwhm",), - bias_regularization=dict(field="opts.biasreg",), - clean_masks=dict(field="output.cleanup",), - csf_output_type=dict(field="output.CSF",), - data=dict(copyfile=False, field="data", mandatory=True,), - gaussians_per_class=dict(field="opts.ngaus",), - gm_output_type=dict(field="output.GM",), - mask_image=dict(extensions=None, field="opts.msk",), + affine_regularization=dict( + field="opts.regtype", + ), + bias_fwhm=dict( + field="opts.biasfwhm", + ), + bias_regularization=dict( + field="opts.biasreg", + ), + clean_masks=dict( + field="output.cleanup", + ), + csf_output_type=dict( + field="output.CSF", + ), + data=dict( + copyfile=False, + field="data", + mandatory=True, + ), + gaussians_per_class=dict( + field="opts.ngaus", + ), + gm_output_type=dict( + field="output.GM", + ), + mask_image=dict( + extensions=None, + field="opts.msk", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), + mfile=dict( + usedefault=True, + ), paths=dict(), - sampling_distance=dict(field="opts.samp",), - save_bias_corrected=dict(field="output.biascor",), - tissue_prob_maps=dict(field="opts.tpm",), + sampling_distance=dict( + field="opts.samp", + ), + save_bias_corrected=dict( + field="output.biascor", + ), + tissue_prob_maps=dict( + field="opts.tpm", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), - warp_frequency_cutoff=dict(field="opts.warpco",), - warping_regularization=dict(field="opts.warpreg",), - wm_output_type=dict(field="output.WM",), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + warp_frequency_cutoff=dict( + field="opts.warpco", + ), + warping_regularization=dict( + field="opts.warpreg", + ), + wm_output_type=dict( + field="output.WM", + ), ) inputs = Segment.input_spec() @@ -34,21 +72,47 @@ def test_Segment_inputs(): def test_Segment_outputs(): output_map = dict( - bias_corrected_image=dict(extensions=None,), - inverse_transformation_mat=dict(extensions=None,), - modulated_csf_image=dict(extensions=None,), - modulated_gm_image=dict(extensions=None,), + bias_corrected_image=dict( + extensions=None, + ), + inverse_transformation_mat=dict( + extensions=None, + ), + modulated_csf_image=dict( + extensions=None, + ), + modulated_gm_image=dict( + extensions=None, + ), modulated_input_image=dict( - deprecated="0.10", extensions=None, new_name="bias_corrected_image", - ), - modulated_wm_image=dict(extensions=None,), - native_csf_image=dict(extensions=None,), - native_gm_image=dict(extensions=None,), - native_wm_image=dict(extensions=None,), - normalized_csf_image=dict(extensions=None,), - normalized_gm_image=dict(extensions=None,), - normalized_wm_image=dict(extensions=None,), - transformation_mat=dict(extensions=None,), + deprecated="0.10", + extensions=None, + new_name="bias_corrected_image", + ), + modulated_wm_image=dict( + extensions=None, + ), + native_csf_image=dict( + extensions=None, + ), + native_gm_image=dict( + extensions=None, + ), + native_wm_image=dict( + extensions=None, + ), + normalized_csf_image=dict( + extensions=None, + ), + normalized_gm_image=dict( + extensions=None, + ), + normalized_wm_image=dict( + extensions=None, + ), + transformation_mat=dict( + extensions=None, + ), ) outputs = Segment.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_SliceTiming.py b/nipype/interfaces/spm/tests/test_auto_SliceTiming.py index 8c99e4c4e3..85ddf03c52 100644 --- a/nipype/interfaces/spm/tests/test_auto_SliceTiming.py +++ b/nipype/interfaces/spm/tests/test_auto_SliceTiming.py @@ -4,18 +4,45 @@ def test_SliceTiming_inputs(): input_map = dict( - in_files=dict(copyfile=False, field="scans", mandatory=True,), + in_files=dict( + copyfile=False, + field="scans", + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - num_slices=dict(field="nslices", mandatory=True,), - out_prefix=dict(field="prefix", usedefault=True,), + mfile=dict( + usedefault=True, + ), + num_slices=dict( + field="nslices", + mandatory=True, + ), + out_prefix=dict( + field="prefix", + usedefault=True, + ), paths=dict(), - ref_slice=dict(field="refslice", mandatory=True,), - slice_order=dict(field="so", mandatory=True,), - time_acquisition=dict(field="ta", mandatory=True,), - time_repetition=dict(field="tr", mandatory=True,), + ref_slice=dict( + field="refslice", + mandatory=True, + ), + slice_order=dict( + field="so", + mandatory=True, + ), + time_acquisition=dict( + field="ta", + mandatory=True, + ), + time_repetition=dict( + field="tr", + mandatory=True, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = SliceTiming.input_spec() @@ -25,7 +52,9 @@ def test_SliceTiming_inputs(): def test_SliceTiming_outputs(): - output_map = dict(timecorrected_files=dict(),) + output_map = dict( + timecorrected_files=dict(), + ) outputs = SliceTiming.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Smooth.py b/nipype/interfaces/spm/tests/test_auto_Smooth.py index 4e0025a292..5ed7aa57c0 100644 --- a/nipype/interfaces/spm/tests/test_auto_Smooth.py +++ b/nipype/interfaces/spm/tests/test_auto_Smooth.py @@ -4,16 +4,34 @@ def test_Smooth_inputs(): input_map = dict( - data_type=dict(field="dtype",), - fwhm=dict(field="fwhm",), - implicit_masking=dict(field="im",), - in_files=dict(copyfile=False, field="data", mandatory=True,), + data_type=dict( + field="dtype", + ), + fwhm=dict( + field="fwhm", + ), + implicit_masking=dict( + field="im", + ), + in_files=dict( + copyfile=False, + field="data", + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - out_prefix=dict(field="prefix", usedefault=True,), + mfile=dict( + usedefault=True, + ), + out_prefix=dict( + field="prefix", + usedefault=True, + ), paths=dict(), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = Smooth.input_spec() @@ -23,7 +41,9 @@ def test_Smooth_inputs(): def test_Smooth_outputs(): - output_map = dict(smoothed_files=dict(),) + output_map = dict( + smoothed_files=dict(), + ) outputs = Smooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Threshold.py b/nipype/interfaces/spm/tests/test_auto_Threshold.py index 75fcbf06e2..470ae7f2d1 100644 --- a/nipype/interfaces/spm/tests/test_auto_Threshold.py +++ b/nipype/interfaces/spm/tests/test_auto_Threshold.py @@ -4,21 +4,50 @@ def test_Threshold_inputs(): input_map = dict( - contrast_index=dict(mandatory=True,), - extent_fdr_p_threshold=dict(usedefault=True,), - extent_threshold=dict(usedefault=True,), - force_activation=dict(usedefault=True,), - height_threshold=dict(usedefault=True,), - height_threshold_type=dict(usedefault=True,), + contrast_index=dict( + mandatory=True, + ), + extent_fdr_p_threshold=dict( + usedefault=True, + ), + extent_threshold=dict( + usedefault=True, + ), + force_activation=dict( + usedefault=True, + ), + height_threshold=dict( + usedefault=True, + ), + height_threshold_type=dict( + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), + mfile=dict( + usedefault=True, + ), paths=dict(), - spm_mat_file=dict(copyfile=True, extensions=None, mandatory=True,), - stat_image=dict(copyfile=False, extensions=None, mandatory=True,), - use_fwe_correction=dict(usedefault=True,), + spm_mat_file=dict( + copyfile=True, + extensions=None, + mandatory=True, + ), + stat_image=dict( + copyfile=False, + extensions=None, + mandatory=True, + ), + use_fwe_correction=dict( + usedefault=True, + ), use_mcr=dict(), - use_topo_fdr=dict(usedefault=True,), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_topo_fdr=dict( + usedefault=True, + ), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = Threshold.input_spec() @@ -32,9 +61,13 @@ def test_Threshold_outputs(): activation_forced=dict(), cluster_forming_thr=dict(), n_clusters=dict(), - pre_topo_fdr_map=dict(extensions=None,), + pre_topo_fdr_map=dict( + extensions=None, + ), pre_topo_n_clusters=dict(), - thresholded_map=dict(extensions=None,), + thresholded_map=dict( + extensions=None, + ), ) outputs = Threshold.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py index c654be7b3d..89c5a42e57 100644 --- a/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py +++ b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py @@ -4,16 +4,35 @@ def test_ThresholdStatistics_inputs(): input_map = dict( - contrast_index=dict(mandatory=True,), - extent_threshold=dict(usedefault=True,), - height_threshold=dict(mandatory=True,), + contrast_index=dict( + mandatory=True, + ), + extent_threshold=dict( + usedefault=True, + ), + height_threshold=dict( + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), + mfile=dict( + usedefault=True, + ), paths=dict(), - spm_mat_file=dict(copyfile=True, extensions=None, mandatory=True,), - stat_image=dict(copyfile=False, extensions=None, mandatory=True,), + spm_mat_file=dict( + copyfile=True, + extensions=None, + mandatory=True, + ), + stat_image=dict( + copyfile=False, + extensions=None, + mandatory=True, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = ThresholdStatistics.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py index fa0cc9e331..fd03e6c867 100644 --- a/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py @@ -4,27 +4,50 @@ def test_TwoSampleTTestDesign_inputs(): input_map = dict( - covariates=dict(field="cov",), - dependent=dict(field="des.t2.dept",), - explicit_mask_file=dict(extensions=None, field="masking.em",), + covariates=dict( + field="cov", + ), + dependent=dict( + field="des.t2.dept", + ), + explicit_mask_file=dict( + extensions=None, + field="masking.em", + ), global_calc_mean=dict( - field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"], + field="globalc.g_mean", + xor=["global_calc_omit", "global_calc_values"], ), global_calc_omit=dict( - field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"], + field="globalc.g_omit", + xor=["global_calc_mean", "global_calc_values"], ), global_calc_values=dict( field="globalc.g_user.global_uval", xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict(field="globalm.glonorm",), - group1_files=dict(field="des.t2.scans1", mandatory=True,), - group2_files=dict(field="des.t2.scans2", mandatory=True,), + global_normalization=dict( + field="globalm.glonorm", + ), + group1_files=dict( + field="des.t2.scans1", + mandatory=True, + ), + group2_files=dict( + field="des.t2.scans2", + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no",), + mfile=dict( + usedefault=True, + ), + no_grand_mean_scaling=dict( + field="globalm.gmsca.gmsca_no", + ), paths=dict(), - spm_mat_dir=dict(field="dir",), + spm_mat_dir=dict( + field="dir", + ), threshold_mask_absolute=dict( field="masking.tm.tma.athresh", xor=["threshold_mask_none", "threshold_mask_relative"], @@ -37,10 +60,17 @@ def test_TwoSampleTTestDesign_inputs(): field="masking.tm.tmr.rthresh", xor=["threshold_mask_absolute", "threshold_mask_none"], ), - unequal_variance=dict(field="des.t2.variance",), - use_implicit_threshold=dict(field="masking.im",), + unequal_variance=dict( + field="des.t2.variance", + ), + use_implicit_threshold=dict( + field="masking.im", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True,), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = TwoSampleTTestDesign.input_spec() @@ -50,7 +80,11 @@ def test_TwoSampleTTestDesign_inputs(): def test_TwoSampleTTestDesign_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None,),) + output_map = dict( + spm_mat_file=dict( + extensions=None, + ), + ) outputs = TwoSampleTTestDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_VBMSegment.py b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py index 626ce9893f..4bc4664c27 100644 --- a/nipype/interfaces/spm/tests/test_auto_VBMSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py @@ -5,64 +5,146 @@ def test_VBMSegment_inputs(): input_map = dict( bias_corrected_affine=dict( - field="estwrite.output.bias.affine", usedefault=True, + field="estwrite.output.bias.affine", + usedefault=True, ), bias_corrected_native=dict( - field="estwrite.output.bias.native", usedefault=True, + field="estwrite.output.bias.native", + usedefault=True, ), bias_corrected_normalized=dict( - field="estwrite.output.bias.warped", usedefault=True, + field="estwrite.output.bias.warped", + usedefault=True, + ), + bias_fwhm=dict( + field="estwrite.opts.biasfwhm", + usedefault=True, + ), + bias_regularization=dict( + field="estwrite.opts.biasreg", + usedefault=True, + ), + cleanup_partitions=dict( + field="estwrite.extopts.cleanup", + usedefault=True, + ), + csf_dartel=dict( + field="estwrite.output.CSF.dartel", + usedefault=True, ), - bias_fwhm=dict(field="estwrite.opts.biasfwhm", usedefault=True,), - bias_regularization=dict(field="estwrite.opts.biasreg", usedefault=True,), - cleanup_partitions=dict(field="estwrite.extopts.cleanup", usedefault=True,), - csf_dartel=dict(field="estwrite.output.CSF.dartel", usedefault=True,), csf_modulated_normalized=dict( - field="estwrite.output.CSF.modulated", usedefault=True, + field="estwrite.output.CSF.modulated", + usedefault=True, + ), + csf_native=dict( + field="estwrite.output.CSF.native", + usedefault=True, + ), + csf_normalized=dict( + field="estwrite.output.CSF.warped", + usedefault=True, ), - csf_native=dict(field="estwrite.output.CSF.native", usedefault=True,), - csf_normalized=dict(field="estwrite.output.CSF.warped", usedefault=True,), dartel_template=dict( extensions=[".hdr", ".img", ".img.gz", ".nii"], field="estwrite.extopts.dartelwarp.normhigh.darteltpm", ), - deformation_field=dict(field="estwrite.output.warps", usedefault=True,), - display_results=dict(field="estwrite.extopts.print", usedefault=True,), - gaussians_per_class=dict(usedefault=True,), - gm_dartel=dict(field="estwrite.output.GM.dartel", usedefault=True,), + deformation_field=dict( + field="estwrite.output.warps", + usedefault=True, + ), + display_results=dict( + field="estwrite.extopts.print", + usedefault=True, + ), + gaussians_per_class=dict( + usedefault=True, + ), + gm_dartel=dict( + field="estwrite.output.GM.dartel", + usedefault=True, + ), gm_modulated_normalized=dict( - field="estwrite.output.GM.modulated", usedefault=True, + field="estwrite.output.GM.modulated", + usedefault=True, + ), + gm_native=dict( + field="estwrite.output.GM.native", + usedefault=True, + ), + gm_normalized=dict( + field="estwrite.output.GM.warped", + usedefault=True, + ), + in_files=dict( + copyfile=False, + field="estwrite.data", + mandatory=True, + ), + jacobian_determinant=dict( + field="estwrite.jacobian.warped", + usedefault=True, ), - gm_native=dict(field="estwrite.output.GM.native", usedefault=True,), - gm_normalized=dict(field="estwrite.output.GM.warped", usedefault=True,), - in_files=dict(copyfile=False, field="estwrite.data", mandatory=True,), - jacobian_determinant=dict(field="estwrite.jacobian.warped", usedefault=True,), matlab_cmd=dict(), - mfile=dict(usedefault=True,), - mrf_weighting=dict(field="estwrite.extopts.mrf", usedefault=True,), + mfile=dict( + usedefault=True, + ), + mrf_weighting=dict( + field="estwrite.extopts.mrf", + usedefault=True, + ), paths=dict(), - pve_label_dartel=dict(field="estwrite.output.label.dartel", usedefault=True,), - pve_label_native=dict(field="estwrite.output.label.native", usedefault=True,), + pve_label_dartel=dict( + field="estwrite.output.label.dartel", + usedefault=True, + ), + pve_label_native=dict( + field="estwrite.output.label.native", + usedefault=True, + ), pve_label_normalized=dict( - field="estwrite.output.label.warped", usedefault=True, + field="estwrite.output.label.warped", + usedefault=True, + ), + sampling_distance=dict( + field="estwrite.opts.samp", + usedefault=True, + ), + spatial_normalization=dict( + usedefault=True, ), - sampling_distance=dict(field="estwrite.opts.samp", usedefault=True,), - spatial_normalization=dict(usedefault=True,), tissues=dict( - extensions=[".hdr", ".img", ".img.gz", ".nii"], field="estwrite.tpm", + extensions=[".hdr", ".img", ".img.gz", ".nii"], + field="estwrite.tpm", ), use_mcr=dict(), use_sanlm_denoising_filter=dict( - field="estwrite.extopts.sanlm", usedefault=True, + field="estwrite.extopts.sanlm", + usedefault=True, + ), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + warping_regularization=dict( + field="estwrite.opts.warpreg", + usedefault=True, + ), + wm_dartel=dict( + field="estwrite.output.WM.dartel", + usedefault=True, ), - use_v8struct=dict(min_ver="8", usedefault=True,), - warping_regularization=dict(field="estwrite.opts.warpreg", usedefault=True,), - wm_dartel=dict(field="estwrite.output.WM.dartel", usedefault=True,), wm_modulated_normalized=dict( - field="estwrite.output.WM.modulated", usedefault=True, + field="estwrite.output.WM.modulated", + usedefault=True, + ), + wm_native=dict( + field="estwrite.output.WM.native", + usedefault=True, + ), + wm_normalized=dict( + field="estwrite.output.WM.warped", + usedefault=True, ), - wm_native=dict(field="estwrite.output.WM.native", usedefault=True,), - wm_normalized=dict(field="estwrite.output.WM.warped", usedefault=True,), ) inputs = VBMSegment.input_spec() diff --git a/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py index 0263177c5e..dd7d45e42b 100644 --- a/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py +++ b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py @@ -4,11 +4,18 @@ def test_BIDSDataGrabber_inputs(): input_map = dict( - base_dir=dict(mandatory=True,), + base_dir=dict( + mandatory=True, + ), extra_derivatives=dict(), - index_derivatives=dict(mandatory=True, usedefault=True,), + index_derivatives=dict( + mandatory=True, + usedefault=True, + ), output_query=dict(), - raise_on_empty=dict(usedefault=True,), + raise_on_empty=dict( + usedefault=True, + ), ) inputs = BIDSDataGrabber.input_spec() diff --git a/nipype/interfaces/tests/test_auto_Bru2.py b/nipype/interfaces/tests/test_auto_Bru2.py index 5256b2732f..7935d2fc97 100644 --- a/nipype/interfaces/tests/test_auto_Bru2.py +++ b/nipype/interfaces/tests/test_auto_Bru2.py @@ -4,14 +4,34 @@ def test_Bru2_inputs(): input_map = dict( - actual_size=dict(argstr="-a",), - append_protocol_name=dict(argstr="-p",), - args=dict(argstr="%s",), - compress=dict(argstr="-z",), - environ=dict(nohash=True, usedefault=True,), - force_conversion=dict(argstr="-f",), - input_dir=dict(argstr="%s", mandatory=True, position=-1,), - output_filename=dict(argstr="-o %s", genfile=True,), + actual_size=dict( + argstr="-a", + ), + append_protocol_name=dict( + argstr="-p", + ), + args=dict( + argstr="%s", + ), + compress=dict( + argstr="-z", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + force_conversion=dict( + argstr="-f", + ), + input_dir=dict( + argstr="%s", + mandatory=True, + position=-1, + ), + output_filename=dict( + argstr="-o %s", + genfile=True, + ), ) inputs = Bru2.input_spec() @@ -21,7 +41,11 @@ def test_Bru2_inputs(): def test_Bru2_outputs(): - output_map = dict(nii_file=dict(extensions=None,),) + output_map = dict( + nii_file=dict( + extensions=None, + ), + ) outputs = Bru2.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_C3d.py b/nipype/interfaces/tests/test_auto_C3d.py index d55216b454..d4d26e8264 100644 --- a/nipype/interfaces/tests/test_auto_C3d.py +++ b/nipype/interfaces/tests/test_auto_C3d.py @@ -4,19 +4,55 @@ def test_C3d_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", mandatory=True, position=1,), - interp=dict(argstr="-interpolation %s",), - is_4d=dict(usedefault=True,), - multicomp_split=dict(argstr="-mcr", position=0, usedefault=True,), - out_file=dict(argstr="-o %s", extensions=None, position=-1, xor=["out_files"],), - out_files=dict(argstr="-oo %s", position=-1, xor=["out_file"],), - pix_type=dict(argstr="-type %s",), - resample=dict(argstr="-resample %s",), - scale=dict(argstr="-scale %s",), - shift=dict(argstr="-shift %s",), - smooth=dict(argstr="-smooth %s",), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + mandatory=True, + position=1, + ), + interp=dict( + argstr="-interpolation %s", + ), + is_4d=dict( + usedefault=True, + ), + multicomp_split=dict( + argstr="-mcr", + position=0, + usedefault=True, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + position=-1, + xor=["out_files"], + ), + out_files=dict( + argstr="-oo %s", + position=-1, + xor=["out_file"], + ), + pix_type=dict( + argstr="-type %s", + ), + resample=dict( + argstr="-resample %s", + ), + scale=dict( + argstr="-scale %s", + ), + shift=dict( + argstr="-shift %s", + ), + smooth=dict( + argstr="-smooth %s", + ), ) inputs = C3d.input_spec() @@ -26,7 +62,9 @@ def test_C3d_inputs(): def test_C3d_outputs(): - output_map = dict(out_files=dict(),) + output_map = dict( + out_files=dict(), + ) outputs = C3d.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_C3dAffineTool.py b/nipype/interfaces/tests/test_auto_C3dAffineTool.py index 963d2f7931..153f6090a7 100644 --- a/nipype/interfaces/tests/test_auto_C3dAffineTool.py +++ b/nipype/interfaces/tests/test_auto_C3dAffineTool.py @@ -4,13 +4,37 @@ def test_C3dAffineTool_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - fsl2ras=dict(argstr="-fsl2ras", position=4,), - itk_transform=dict(argstr="-oitk %s", hash_files=False, position=5,), - reference_file=dict(argstr="-ref %s", extensions=None, position=1,), - source_file=dict(argstr="-src %s", extensions=None, position=2,), - transform_file=dict(argstr="%s", extensions=None, position=3,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fsl2ras=dict( + argstr="-fsl2ras", + position=4, + ), + itk_transform=dict( + argstr="-oitk %s", + hash_files=False, + position=5, + ), + reference_file=dict( + argstr="-ref %s", + extensions=None, + position=1, + ), + source_file=dict( + argstr="-src %s", + extensions=None, + position=2, + ), + transform_file=dict( + argstr="%s", + extensions=None, + position=3, + ), ) inputs = C3dAffineTool.input_spec() @@ -20,7 +44,11 @@ def test_C3dAffineTool_inputs(): def test_C3dAffineTool_outputs(): - output_map = dict(itk_transform=dict(extensions=None,),) + output_map = dict( + itk_transform=dict( + extensions=None, + ), + ) outputs = C3dAffineTool.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_CopyMeta.py b/nipype/interfaces/tests/test_auto_CopyMeta.py index 6415514c3d..f7a554226e 100644 --- a/nipype/interfaces/tests/test_auto_CopyMeta.py +++ b/nipype/interfaces/tests/test_auto_CopyMeta.py @@ -4,10 +4,16 @@ def test_CopyMeta_inputs(): input_map = dict( - dest_file=dict(extensions=None, mandatory=True,), + dest_file=dict( + extensions=None, + mandatory=True, + ), exclude_classes=dict(), include_classes=dict(), - src_file=dict(extensions=None, mandatory=True,), + src_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CopyMeta.input_spec() @@ -17,7 +23,11 @@ def test_CopyMeta_inputs(): def test_CopyMeta_outputs(): - output_map = dict(dest_file=dict(extensions=None,),) + output_map = dict( + dest_file=dict( + extensions=None, + ), + ) outputs = CopyMeta.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_DataFinder.py b/nipype/interfaces/tests/test_auto_DataFinder.py index 81cdc92ac8..e1f24b3517 100644 --- a/nipype/interfaces/tests/test_auto_DataFinder.py +++ b/nipype/interfaces/tests/test_auto_DataFinder.py @@ -5,11 +5,17 @@ def test_DataFinder_inputs(): input_map = dict( ignore_regexes=dict(), - match_regex=dict(usedefault=True,), + match_regex=dict( + usedefault=True, + ), max_depth=dict(), min_depth=dict(), - root_paths=dict(mandatory=True,), - unpack_single=dict(usedefault=True,), + root_paths=dict( + mandatory=True, + ), + unpack_single=dict( + usedefault=True, + ), ) inputs = DataFinder.input_spec() diff --git a/nipype/interfaces/tests/test_auto_DataGrabber.py b/nipype/interfaces/tests/test_auto_DataGrabber.py index c3817e43e7..a58e4dd434 100644 --- a/nipype/interfaces/tests/test_auto_DataGrabber.py +++ b/nipype/interfaces/tests/test_auto_DataGrabber.py @@ -5,10 +5,18 @@ def test_DataGrabber_inputs(): input_map = dict( base_directory=dict(), - drop_blank_outputs=dict(usedefault=True,), - raise_on_empty=dict(usedefault=True,), - sort_filelist=dict(mandatory=True,), - template=dict(mandatory=True,), + drop_blank_outputs=dict( + usedefault=True, + ), + raise_on_empty=dict( + usedefault=True, + ), + sort_filelist=dict( + mandatory=True, + ), + template=dict( + mandatory=True, + ), template_args=dict(), ) inputs = DataGrabber.input_spec() diff --git a/nipype/interfaces/tests/test_auto_DataSink.py b/nipype/interfaces/tests/test_auto_DataSink.py index 870dbc3c85..1ce4183b70 100644 --- a/nipype/interfaces/tests/test_auto_DataSink.py +++ b/nipype/interfaces/tests/test_auto_DataSink.py @@ -4,16 +4,22 @@ def test_DataSink_inputs(): input_map = dict( - _outputs=dict(usedefault=True,), + _outputs=dict( + usedefault=True, + ), base_directory=dict(), bucket=dict(), container=dict(), creds_path=dict(), encrypt_bucket_keys=dict(), local_copy=dict(), - parameterization=dict(usedefault=True,), + parameterization=dict( + usedefault=True, + ), regexp_substitutions=dict(), - remove_dest_dir=dict(usedefault=True,), + remove_dest_dir=dict( + usedefault=True, + ), strip_dir=dict(), substitutions=dict(), ) @@ -25,7 +31,9 @@ def test_DataSink_inputs(): def test_DataSink_outputs(): - output_map = dict(out_file=dict(),) + output_map = dict( + out_file=dict(), + ) outputs = DataSink.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_Dcm2nii.py b/nipype/interfaces/tests/test_auto_Dcm2nii.py index 9aca885a64..948aafa083 100644 --- a/nipype/interfaces/tests/test_auto_Dcm2nii.py +++ b/nipype/interfaces/tests/test_auto_Dcm2nii.py @@ -4,25 +4,75 @@ def test_Dcm2nii_inputs(): input_map = dict( - anonymize=dict(argstr="-a", usedefault=True,), - args=dict(argstr="%s",), - collapse_folders=dict(argstr="-c", usedefault=True,), - config_file=dict(argstr="-b %s", extensions=None, genfile=True,), - convert_all_pars=dict(argstr="-v", usedefault=True,), - date_in_filename=dict(argstr="-d", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - events_in_filename=dict(argstr="-e", usedefault=True,), - gzip_output=dict(argstr="-g", usedefault=True,), - id_in_filename=dict(argstr="-i", usedefault=True,), - nii_output=dict(argstr="-n", usedefault=True,), - output_dir=dict(argstr="-o %s", genfile=True,), - protocol_in_filename=dict(argstr="-p", usedefault=True,), - reorient=dict(argstr="-r",), - reorient_and_crop=dict(argstr="-x", usedefault=True,), + anonymize=dict( + argstr="-a", + usedefault=True, + ), + args=dict( + argstr="%s", + ), + collapse_folders=dict( + argstr="-c", + usedefault=True, + ), + config_file=dict( + argstr="-b %s", + extensions=None, + genfile=True, + ), + convert_all_pars=dict( + argstr="-v", + usedefault=True, + ), + date_in_filename=dict( + argstr="-d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + events_in_filename=dict( + argstr="-e", + usedefault=True, + ), + gzip_output=dict( + argstr="-g", + usedefault=True, + ), + id_in_filename=dict( + argstr="-i", + usedefault=True, + ), + nii_output=dict( + argstr="-n", + usedefault=True, + ), + output_dir=dict( + argstr="-o %s", + genfile=True, + ), + protocol_in_filename=dict( + argstr="-p", + usedefault=True, + ), + reorient=dict( + argstr="-r", + ), + reorient_and_crop=dict( + argstr="-x", + usedefault=True, + ), source_dir=dict( - argstr="%s", mandatory=True, position=-1, xor=["source_names"], + argstr="%s", + mandatory=True, + position=-1, + xor=["source_names"], + ), + source_in_filename=dict( + argstr="-f", + usedefault=True, ), - source_in_filename=dict(argstr="-f", usedefault=True,), source_names=dict( argstr="%s", copyfile=False, @@ -30,7 +80,10 @@ def test_Dcm2nii_inputs(): position=-1, xor=["source_dir"], ), - spm_analyze=dict(argstr="-s", xor=["nii_output"],), + spm_analyze=dict( + argstr="-s", + xor=["nii_output"], + ), ) inputs = Dcm2nii.input_spec() diff --git a/nipype/interfaces/tests/test_auto_Dcm2niix.py b/nipype/interfaces/tests/test_auto_Dcm2niix.py index dfaa46d36a..dec95abcff 100644 --- a/nipype/interfaces/tests/test_auto_Dcm2niix.py +++ b/nipype/interfaces/tests/test_auto_Dcm2niix.py @@ -4,24 +4,68 @@ def test_Dcm2niix_inputs(): input_map = dict( - anon_bids=dict(argstr="-ba", requires=["bids_format"],), - args=dict(argstr="%s",), - bids_format=dict(argstr="-b", usedefault=True,), - comment=dict(argstr="-c %s",), - compress=dict(argstr="-z %s", usedefault=True,), - compression=dict(argstr="-%d",), - crop=dict(argstr="-x", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - has_private=dict(argstr="-t", usedefault=True,), - ignore_deriv=dict(argstr="-i",), - merge_imgs=dict(argstr="-m", usedefault=True,), - out_filename=dict(argstr="-f %s",), - output_dir=dict(argstr="-o %s", usedefault=True,), - philips_float=dict(argstr="-p",), - series_numbers=dict(argstr="-n %s...",), - single_file=dict(argstr="-s", usedefault=True,), + anon_bids=dict( + argstr="-ba", + requires=["bids_format"], + ), + args=dict( + argstr="%s", + ), + bids_format=dict( + argstr="-b", + usedefault=True, + ), + comment=dict( + argstr="-c %s", + ), + compress=dict( + argstr="-z %s", + usedefault=True, + ), + compression=dict( + argstr="-%d", + ), + crop=dict( + argstr="-x", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + has_private=dict( + argstr="-t", + usedefault=True, + ), + ignore_deriv=dict( + argstr="-i", + ), + merge_imgs=dict( + argstr="-m", + usedefault=True, + ), + out_filename=dict( + argstr="-f %s", + ), + output_dir=dict( + argstr="-o %s", + usedefault=True, + ), + philips_float=dict( + argstr="-p", + ), + series_numbers=dict( + argstr="-n %s...", + ), + single_file=dict( + argstr="-s", + usedefault=True, + ), source_dir=dict( - argstr="%s", mandatory=True, position=-1, xor=["source_names"], + argstr="%s", + mandatory=True, + position=-1, + xor=["source_names"], ), source_names=dict( argstr="%s", @@ -30,8 +74,13 @@ def test_Dcm2niix_inputs(): position=-1, xor=["source_dir"], ), - to_nrrd=dict(argstr="-e",), - verbose=dict(argstr="-v", usedefault=True,), + to_nrrd=dict( + argstr="-e", + ), + verbose=dict( + argstr="-v", + usedefault=True, + ), ) inputs = Dcm2niix.input_spec() @@ -41,7 +90,12 @@ def test_Dcm2niix_inputs(): def test_Dcm2niix_outputs(): - output_map = dict(bids=dict(), bvals=dict(), bvecs=dict(), converted_files=dict(),) + output_map = dict( + bids=dict(), + bvals=dict(), + bvecs=dict(), + converted_files=dict(), + ) outputs = Dcm2niix.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_DcmStack.py b/nipype/interfaces/tests/test_auto_DcmStack.py index af0ff91495..02bf268904 100644 --- a/nipype/interfaces/tests/test_auto_DcmStack.py +++ b/nipype/interfaces/tests/test_auto_DcmStack.py @@ -4,12 +4,18 @@ def test_DcmStack_inputs(): input_map = dict( - dicom_files=dict(mandatory=True,), + dicom_files=dict( + mandatory=True, + ), embed_meta=dict(), exclude_regexes=dict(), - force_read=dict(usedefault=True,), + force_read=dict( + usedefault=True, + ), include_regexes=dict(), - out_ext=dict(usedefault=True,), + out_ext=dict( + usedefault=True, + ), out_format=dict(), out_path=dict(), ) @@ -21,7 +27,11 @@ def test_DcmStack_inputs(): def test_DcmStack_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = DcmStack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_ExportFile.py b/nipype/interfaces/tests/test_auto_ExportFile.py index 331b51d8c2..adac52e161 100644 --- a/nipype/interfaces/tests/test_auto_ExportFile.py +++ b/nipype/interfaces/tests/test_auto_ExportFile.py @@ -4,10 +4,18 @@ def test_ExportFile_inputs(): input_map = dict( - check_extension=dict(usedefault=True,), + check_extension=dict( + usedefault=True, + ), clobber=dict(), - in_file=dict(extensions=None, mandatory=True,), - out_file=dict(extensions=None, mandatory=True,), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = ExportFile.input_spec() @@ -17,7 +25,11 @@ def test_ExportFile_inputs(): def test_ExportFile_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ExportFile.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_FreeSurferSource.py b/nipype/interfaces/tests/test_auto_FreeSurferSource.py index c7102512ae..875380f75f 100644 --- a/nipype/interfaces/tests/test_auto_FreeSurferSource.py +++ b/nipype/interfaces/tests/test_auto_FreeSurferSource.py @@ -4,9 +4,15 @@ def test_FreeSurferSource_inputs(): input_map = dict( - hemi=dict(usedefault=True,), - subject_id=dict(mandatory=True,), - subjects_dir=dict(mandatory=True,), + hemi=dict( + usedefault=True, + ), + subject_id=dict( + mandatory=True, + ), + subjects_dir=dict( + mandatory=True, + ), ) inputs = FreeSurferSource.input_spec() @@ -17,43 +23,143 @@ def test_FreeSurferSource_inputs(): def test_FreeSurferSource_outputs(): output_map = dict( - BA_stats=dict(altkey="BA", loc="stats",), - T1=dict(extensions=None, loc="mri",), - annot=dict(altkey="*annot", loc="label",), - aparc_a2009s_stats=dict(altkey="aparc.a2009s", loc="stats",), - aparc_aseg=dict(altkey="aparc*aseg", loc="mri",), - aparc_stats=dict(altkey="aparc", loc="stats",), - area_pial=dict(altkey="area.pial", loc="surf",), - aseg=dict(extensions=None, loc="mri",), - aseg_stats=dict(altkey="aseg", loc="stats",), - avg_curv=dict(loc="surf",), - brain=dict(extensions=None, loc="mri",), - brainmask=dict(extensions=None, loc="mri",), - curv=dict(loc="surf",), - curv_pial=dict(altkey="curv.pial", loc="surf",), - curv_stats=dict(altkey="curv", loc="stats",), - entorhinal_exvivo_stats=dict(altkey="entorhinal_exvivo", loc="stats",), - filled=dict(extensions=None, loc="mri",), - graymid=dict(altkey=["graymid", "midthickness"], loc="surf",), - inflated=dict(loc="surf",), - jacobian_white=dict(loc="surf",), - label=dict(altkey="*label", loc="label",), - norm=dict(extensions=None, loc="mri",), - nu=dict(extensions=None, loc="mri",), - orig=dict(extensions=None, loc="mri",), - pial=dict(loc="surf",), - rawavg=dict(extensions=None, loc="mri",), - ribbon=dict(altkey="*ribbon", loc="mri",), - smoothwm=dict(loc="surf",), - sphere=dict(loc="surf",), - sphere_reg=dict(altkey="sphere.reg", loc="surf",), - sulc=dict(loc="surf",), - thickness=dict(loc="surf",), - volume=dict(loc="surf",), - white=dict(loc="surf",), - wm=dict(extensions=None, loc="mri",), - wmparc=dict(extensions=None, loc="mri",), - wmparc_stats=dict(altkey="wmparc", loc="stats",), + BA_stats=dict( + altkey="BA", + loc="stats", + ), + T1=dict( + extensions=None, + loc="mri", + ), + annot=dict( + altkey="*annot", + loc="label", + ), + aparc_a2009s_stats=dict( + altkey="aparc.a2009s", + loc="stats", + ), + aparc_aseg=dict( + altkey="aparc*aseg", + loc="mri", + ), + aparc_stats=dict( + altkey="aparc", + loc="stats", + ), + area_pial=dict( + altkey="area.pial", + loc="surf", + ), + aseg=dict( + extensions=None, + loc="mri", + ), + aseg_stats=dict( + altkey="aseg", + loc="stats", + ), + avg_curv=dict( + loc="surf", + ), + brain=dict( + extensions=None, + loc="mri", + ), + brainmask=dict( + extensions=None, + loc="mri", + ), + curv=dict( + loc="surf", + ), + curv_pial=dict( + altkey="curv.pial", + loc="surf", + ), + curv_stats=dict( + altkey="curv", + loc="stats", + ), + entorhinal_exvivo_stats=dict( + altkey="entorhinal_exvivo", + loc="stats", + ), + filled=dict( + extensions=None, + loc="mri", + ), + graymid=dict( + altkey=["graymid", "midthickness"], + loc="surf", + ), + inflated=dict( + loc="surf", + ), + jacobian_white=dict( + loc="surf", + ), + label=dict( + altkey="*label", + loc="label", + ), + norm=dict( + extensions=None, + loc="mri", + ), + nu=dict( + extensions=None, + loc="mri", + ), + orig=dict( + extensions=None, + loc="mri", + ), + pial=dict( + loc="surf", + ), + rawavg=dict( + extensions=None, + loc="mri", + ), + ribbon=dict( + altkey="*ribbon", + loc="mri", + ), + smoothwm=dict( + loc="surf", + ), + sphere=dict( + loc="surf", + ), + sphere_reg=dict( + altkey="sphere.reg", + loc="surf", + ), + sulc=dict( + loc="surf", + ), + thickness=dict( + loc="surf", + ), + volume=dict( + loc="surf", + ), + white=dict( + loc="surf", + ), + wm=dict( + extensions=None, + loc="mri", + ), + wmparc=dict( + extensions=None, + loc="mri", + ), + wmparc_stats=dict( + altkey="wmparc", + loc="stats", + ), ) outputs = FreeSurferSource.output_spec() diff --git a/nipype/interfaces/tests/test_auto_GroupAndStack.py b/nipype/interfaces/tests/test_auto_GroupAndStack.py index f330efde20..7282e23a8b 100644 --- a/nipype/interfaces/tests/test_auto_GroupAndStack.py +++ b/nipype/interfaces/tests/test_auto_GroupAndStack.py @@ -4,12 +4,18 @@ def test_GroupAndStack_inputs(): input_map = dict( - dicom_files=dict(mandatory=True,), + dicom_files=dict( + mandatory=True, + ), embed_meta=dict(), exclude_regexes=dict(), - force_read=dict(usedefault=True,), + force_read=dict( + usedefault=True, + ), include_regexes=dict(), - out_ext=dict(usedefault=True,), + out_ext=dict( + usedefault=True, + ), out_format=dict(), out_path=dict(), ) @@ -21,7 +27,9 @@ def test_GroupAndStack_inputs(): def test_GroupAndStack_outputs(): - output_map = dict(out_list=dict(),) + output_map = dict( + out_list=dict(), + ) outputs = GroupAndStack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_JSONFileGrabber.py b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py index c01a584949..6e548f17c1 100644 --- a/nipype/interfaces/tests/test_auto_JSONFileGrabber.py +++ b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py @@ -3,7 +3,12 @@ def test_JSONFileGrabber_inputs(): - input_map = dict(defaults=dict(), in_file=dict(extensions=None,),) + input_map = dict( + defaults=dict(), + in_file=dict( + extensions=None, + ), + ) inputs = JSONFileGrabber.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/tests/test_auto_JSONFileSink.py b/nipype/interfaces/tests/test_auto_JSONFileSink.py index c88faba852..cdfa32195b 100644 --- a/nipype/interfaces/tests/test_auto_JSONFileSink.py +++ b/nipype/interfaces/tests/test_auto_JSONFileSink.py @@ -4,9 +4,15 @@ def test_JSONFileSink_inputs(): input_map = dict( - _outputs=dict(usedefault=True,), - in_dict=dict(usedefault=True,), - out_file=dict(extensions=None,), + _outputs=dict( + usedefault=True, + ), + in_dict=dict( + usedefault=True, + ), + out_file=dict( + extensions=None, + ), ) inputs = JSONFileSink.input_spec() @@ -16,7 +22,11 @@ def test_JSONFileSink_inputs(): def test_JSONFileSink_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = JSONFileSink.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_LookupMeta.py b/nipype/interfaces/tests/test_auto_LookupMeta.py index fa0129798e..22f6678734 100644 --- a/nipype/interfaces/tests/test_auto_LookupMeta.py +++ b/nipype/interfaces/tests/test_auto_LookupMeta.py @@ -4,7 +4,13 @@ def test_LookupMeta_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True,), meta_keys=dict(mandatory=True,), + in_file=dict( + extensions=None, + mandatory=True, + ), + meta_keys=dict( + mandatory=True, + ), ) inputs = LookupMeta.input_spec() diff --git a/nipype/interfaces/tests/test_auto_MatlabCommand.py b/nipype/interfaces/tests/test_auto_MatlabCommand.py index 0dea244ec8..1dfd9c1dde 100644 --- a/nipype/interfaces/tests/test_auto_MatlabCommand.py +++ b/nipype/interfaces/tests/test_auto_MatlabCommand.py @@ -4,20 +4,53 @@ def test_MatlabCommand_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - logfile=dict(argstr="-logfile %s", extensions=None,), - mfile=dict(usedefault=True,), - nodesktop=dict(argstr="-nodesktop", nohash=True, usedefault=True,), - nosplash=dict(argstr="-nosplash", nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + logfile=dict( + argstr="-logfile %s", + extensions=None, + ), + mfile=dict( + usedefault=True, + ), + nodesktop=dict( + argstr="-nodesktop", + nohash=True, + usedefault=True, + ), + nosplash=dict( + argstr="-nosplash", + nohash=True, + usedefault=True, + ), paths=dict(), - postscript=dict(usedefault=True,), - prescript=dict(usedefault=True,), - script=dict(argstr='-r "%s;exit"', mandatory=True, position=-1,), - script_file=dict(extensions=None, usedefault=True,), - single_comp_thread=dict(argstr="-singleCompThread", nohash=True,), + postscript=dict( + usedefault=True, + ), + prescript=dict( + usedefault=True, + ), + script=dict( + argstr='-r "%s;exit"', + mandatory=True, + position=-1, + ), + script_file=dict( + extensions=None, + usedefault=True, + ), + single_comp_thread=dict( + argstr="-singleCompThread", + nohash=True, + ), uses_mcr=dict( - nohash=True, xor=["nodesktop", "nosplash", "single_comp_thread"], + nohash=True, + xor=["nodesktop", "nosplash", "single_comp_thread"], ), ) inputs = MatlabCommand.input_spec() diff --git a/nipype/interfaces/tests/test_auto_MergeNifti.py b/nipype/interfaces/tests/test_auto_MergeNifti.py index 38c0f39f72..f199fc5da6 100644 --- a/nipype/interfaces/tests/test_auto_MergeNifti.py +++ b/nipype/interfaces/tests/test_auto_MergeNifti.py @@ -4,9 +4,13 @@ def test_MergeNifti_inputs(): input_map = dict( - in_files=dict(mandatory=True,), + in_files=dict( + mandatory=True, + ), merge_dim=dict(), - out_ext=dict(usedefault=True,), + out_ext=dict( + usedefault=True, + ), out_format=dict(), out_path=dict(), sort_order=dict(), @@ -19,7 +23,11 @@ def test_MergeNifti_inputs(): def test_MergeNifti_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MergeNifti.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_MeshFix.py b/nipype/interfaces/tests/test_auto_MeshFix.py index 9a69fc0859..1ae3b2b67c 100644 --- a/nipype/interfaces/tests/test_auto_MeshFix.py +++ b/nipype/interfaces/tests/test_auto_MeshFix.py @@ -4,18 +4,41 @@ def test_MeshFix_inputs(): input_map = dict( - args=dict(argstr="%s",), - cut_inner=dict(argstr="--cut-inner %d",), - cut_outer=dict(argstr="--cut-outer %d",), - decouple_inin=dict(argstr="--decouple-inin %d",), - decouple_outin=dict(argstr="--decouple-outin %d",), - decouple_outout=dict(argstr="--decouple-outout %d",), - dilation=dict(argstr="--dilate %d",), - dont_clean=dict(argstr="--no-clean",), - environ=dict(nohash=True, usedefault=True,), - epsilon_angle=dict(argstr="-a %f",), + args=dict( + argstr="%s", + ), + cut_inner=dict( + argstr="--cut-inner %d", + ), + cut_outer=dict( + argstr="--cut-outer %d", + ), + decouple_inin=dict( + argstr="--decouple-inin %d", + ), + decouple_outin=dict( + argstr="--decouple-outin %d", + ), + decouple_outout=dict( + argstr="--decouple-outout %d", + ), + dilation=dict( + argstr="--dilate %d", + ), + dont_clean=dict( + argstr="--no-clean", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + epsilon_angle=dict( + argstr="-a %f", + ), finetuning_distance=dict( - argstr="%f", position=-2, requires=["finetuning_substeps"], + argstr="%f", + position=-2, + requires=["finetuning_substeps"], ), finetuning_inwards=dict( argstr="--fineTuneIn ", @@ -29,37 +52,75 @@ def test_MeshFix_inputs(): xor=["finetuning_inwards"], ), finetuning_substeps=dict( - argstr="%d", position=-1, requires=["finetuning_distance"], + argstr="%d", + position=-1, + requires=["finetuning_distance"], + ), + in_file1=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + in_file2=dict( + argstr="%s", + extensions=None, + position=2, + ), + join_closest_components=dict( + argstr="-jc", + xor=["join_closest_components"], ), - in_file1=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - in_file2=dict(argstr="%s", extensions=None, position=2,), - join_closest_components=dict(argstr="-jc", xor=["join_closest_components"],), join_overlapping_largest_components=dict( - argstr="-j", xor=["join_closest_components"], - ), - laplacian_smoothing_steps=dict(argstr="--smooth %d",), - number_of_biggest_shells=dict(argstr="--shells %d",), - out_filename=dict(argstr="-o %s", extensions=None, genfile=True,), - output_type=dict(usedefault=True,), - quiet_mode=dict(argstr="-q",), - remove_handles=dict(argstr="--remove-handles",), + argstr="-j", + xor=["join_closest_components"], + ), + laplacian_smoothing_steps=dict( + argstr="--smooth %d", + ), + number_of_biggest_shells=dict( + argstr="--shells %d", + ), + out_filename=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + output_type=dict( + usedefault=True, + ), + quiet_mode=dict( + argstr="-q", + ), + remove_handles=dict( + argstr="--remove-handles", + ), save_as_freesurfer_mesh=dict( - argstr="--fsmesh", xor=["save_as_vrml", "save_as_stl"], + argstr="--fsmesh", + xor=["save_as_vrml", "save_as_stl"], ), save_as_stl=dict( - argstr="--stl", xor=["save_as_vrml", "save_as_freesurfer_mesh"], + argstr="--stl", + xor=["save_as_vrml", "save_as_freesurfer_mesh"], ), save_as_vrml=dict( - argstr="--wrl", xor=["save_as_stl", "save_as_freesurfer_mesh"], + argstr="--wrl", + xor=["save_as_stl", "save_as_freesurfer_mesh"], + ), + set_intersections_to_one=dict( + argstr="--intersect", ), - set_intersections_to_one=dict(argstr="--intersect",), uniform_remeshing_steps=dict( - argstr="-u %d", requires=["uniform_remeshing_vertices"], + argstr="-u %d", + requires=["uniform_remeshing_vertices"], ), uniform_remeshing_vertices=dict( - argstr="--vertices %d", requires=["uniform_remeshing_steps"], + argstr="--vertices %d", + requires=["uniform_remeshing_steps"], + ), + x_shift=dict( + argstr="--smooth %d", ), - x_shift=dict(argstr="--smooth %d",), ) inputs = MeshFix.input_spec() @@ -69,7 +130,11 @@ def test_MeshFix_inputs(): def test_MeshFix_outputs(): - output_map = dict(mesh_file=dict(extensions=None,),) + output_map = dict( + mesh_file=dict( + extensions=None, + ), + ) outputs = MeshFix.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_MySQLSink.py b/nipype/interfaces/tests/test_auto_MySQLSink.py index 30bf18ef26..702a21e9ce 100644 --- a/nipype/interfaces/tests/test_auto_MySQLSink.py +++ b/nipype/interfaces/tests/test_auto_MySQLSink.py @@ -4,8 +4,14 @@ def test_MySQLSink_inputs(): input_map = dict( - config=dict(extensions=None, mandatory=True, xor=["host"],), - database_name=dict(mandatory=True,), + config=dict( + extensions=None, + mandatory=True, + xor=["host"], + ), + database_name=dict( + mandatory=True, + ), host=dict( mandatory=True, requires=["username", "password"], @@ -13,7 +19,9 @@ def test_MySQLSink_inputs(): xor=["config"], ), password=dict(), - table_name=dict(mandatory=True,), + table_name=dict( + mandatory=True, + ), username=dict(), ) inputs = MySQLSink.input_spec() diff --git a/nipype/interfaces/tests/test_auto_PETPVC.py b/nipype/interfaces/tests/test_auto_PETPVC.py index 49ba858e4a..f7da7f45dd 100644 --- a/nipype/interfaces/tests/test_auto_PETPVC.py +++ b/nipype/interfaces/tests/test_auto_PETPVC.py @@ -4,20 +4,65 @@ def test_PETPVC_inputs(): input_map = dict( - alpha=dict(argstr="-a %.4f", usedefault=True,), - args=dict(argstr="%s",), - debug=dict(argstr="-d", usedefault=True,), - environ=dict(nohash=True, usedefault=True,), - fwhm_x=dict(argstr="-x %.4f", mandatory=True,), - fwhm_y=dict(argstr="-y %.4f", mandatory=True,), - fwhm_z=dict(argstr="-z %.4f", mandatory=True,), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True,), - mask_file=dict(argstr="-m %s", extensions=None, mandatory=True,), - n_deconv=dict(argstr="-k %d", usedefault=True,), - n_iter=dict(argstr="-n %d", usedefault=True,), - out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False,), - pvc=dict(argstr="-p %s", mandatory=True,), - stop_crit=dict(argstr="-s %.4f", usedefault=True,), + alpha=dict( + argstr="-a %.4f", + usedefault=True, + ), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm_x=dict( + argstr="-x %.4f", + mandatory=True, + ), + fwhm_y=dict( + argstr="-y %.4f", + mandatory=True, + ), + fwhm_z=dict( + argstr="-z %.4f", + mandatory=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + mask_file=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + ), + n_deconv=dict( + argstr="-k %d", + usedefault=True, + ), + n_iter=dict( + argstr="-n %d", + usedefault=True, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, + ), + pvc=dict( + argstr="-p %s", + mandatory=True, + ), + stop_crit=dict( + argstr="-s %.4f", + usedefault=True, + ), ) inputs = PETPVC.input_spec() @@ -27,7 +72,11 @@ def test_PETPVC_inputs(): def test_PETPVC_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = PETPVC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_Quickshear.py b/nipype/interfaces/tests/test_auto_Quickshear.py index 06b829af9a..92212335dc 100644 --- a/nipype/interfaces/tests/test_auto_Quickshear.py +++ b/nipype/interfaces/tests/test_auto_Quickshear.py @@ -4,11 +4,29 @@ def test_Quickshear_inputs(): input_map = dict( - args=dict(argstr="%s",), - buff=dict(argstr="%d", position=4,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - mask_file=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + args=dict( + argstr="%s", + ), + buff=dict( + argstr="%d", + position=4, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + mask_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), out_file=dict( argstr="%s", extensions=None, @@ -26,7 +44,11 @@ def test_Quickshear_inputs(): def test_Quickshear_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Quickshear.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_Reorient.py b/nipype/interfaces/tests/test_auto_Reorient.py index bb4660a05c..eb7a0ce5ff 100644 --- a/nipype/interfaces/tests/test_auto_Reorient.py +++ b/nipype/interfaces/tests/test_auto_Reorient.py @@ -4,8 +4,13 @@ def test_Reorient_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True,), - orientation=dict(usedefault=True,), + in_file=dict( + extensions=None, + mandatory=True, + ), + orientation=dict( + usedefault=True, + ), ) inputs = Reorient.input_spec() @@ -16,7 +21,12 @@ def test_Reorient_inputs(): def test_Reorient_outputs(): output_map = dict( - out_file=dict(extensions=None,), transform=dict(extensions=None,), + out_file=dict( + extensions=None, + ), + transform=dict( + extensions=None, + ), ) outputs = Reorient.output_spec() diff --git a/nipype/interfaces/tests/test_auto_Rescale.py b/nipype/interfaces/tests/test_auto_Rescale.py index d8c83c24ca..5b14cfc8c0 100644 --- a/nipype/interfaces/tests/test_auto_Rescale.py +++ b/nipype/interfaces/tests/test_auto_Rescale.py @@ -4,10 +4,18 @@ def test_Rescale_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True,), + in_file=dict( + extensions=None, + mandatory=True, + ), invert=dict(), - percentile=dict(usedefault=True,), - ref_file=dict(extensions=None, mandatory=True,), + percentile=dict( + usedefault=True, + ), + ref_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = Rescale.input_spec() @@ -17,7 +25,11 @@ def test_Rescale_inputs(): def test_Rescale_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Rescale.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_S3DataGrabber.py b/nipype/interfaces/tests/test_auto_S3DataGrabber.py index 96b32701e2..4b71fe49c3 100644 --- a/nipype/interfaces/tests/test_auto_S3DataGrabber.py +++ b/nipype/interfaces/tests/test_auto_S3DataGrabber.py @@ -4,14 +4,28 @@ def test_S3DataGrabber_inputs(): input_map = dict( - anon=dict(usedefault=True,), - bucket=dict(mandatory=True,), - bucket_path=dict(usedefault=True,), + anon=dict( + usedefault=True, + ), + bucket=dict( + mandatory=True, + ), + bucket_path=dict( + usedefault=True, + ), local_directory=dict(), - raise_on_empty=dict(usedefault=True,), - region=dict(usedefault=True,), - sort_filelist=dict(mandatory=True,), - template=dict(mandatory=True,), + raise_on_empty=dict( + usedefault=True, + ), + region=dict( + usedefault=True, + ), + sort_filelist=dict( + mandatory=True, + ), + template=dict( + mandatory=True, + ), template_args=dict(), ) inputs = S3DataGrabber.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SQLiteSink.py b/nipype/interfaces/tests/test_auto_SQLiteSink.py index f1a62ead15..fe33b65675 100644 --- a/nipype/interfaces/tests/test_auto_SQLiteSink.py +++ b/nipype/interfaces/tests/test_auto_SQLiteSink.py @@ -4,8 +4,13 @@ def test_SQLiteSink_inputs(): input_map = dict( - database_file=dict(extensions=None, mandatory=True,), - table_name=dict(mandatory=True,), + database_file=dict( + extensions=None, + mandatory=True, + ), + table_name=dict( + mandatory=True, + ), ) inputs = SQLiteSink.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SSHDataGrabber.py b/nipype/interfaces/tests/test_auto_SSHDataGrabber.py index 7a759cb07f..5a58cad276 100644 --- a/nipype/interfaces/tests/test_auto_SSHDataGrabber.py +++ b/nipype/interfaces/tests/test_auto_SSHDataGrabber.py @@ -4,17 +4,35 @@ def test_SSHDataGrabber_inputs(): input_map = dict( - base_directory=dict(mandatory=True,), - download_files=dict(usedefault=True,), - drop_blank_outputs=dict(usedefault=True,), - hostname=dict(mandatory=True,), + base_directory=dict( + mandatory=True, + ), + download_files=dict( + usedefault=True, + ), + drop_blank_outputs=dict( + usedefault=True, + ), + hostname=dict( + mandatory=True, + ), password=dict(), - raise_on_empty=dict(usedefault=True,), - sort_filelist=dict(mandatory=True,), - ssh_log_to_file=dict(usedefault=True,), - template=dict(mandatory=True,), + raise_on_empty=dict( + usedefault=True, + ), + sort_filelist=dict( + mandatory=True, + ), + ssh_log_to_file=dict( + usedefault=True, + ), + template=dict( + mandatory=True, + ), template_args=dict(), - template_expression=dict(usedefault=True,), + template_expression=dict( + usedefault=True, + ), username=dict(), ) inputs = SSHDataGrabber.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SelectFiles.py b/nipype/interfaces/tests/test_auto_SelectFiles.py index 722c431b8f..06f86c36f0 100644 --- a/nipype/interfaces/tests/test_auto_SelectFiles.py +++ b/nipype/interfaces/tests/test_auto_SelectFiles.py @@ -5,9 +5,15 @@ def test_SelectFiles_inputs(): input_map = dict( base_directory=dict(), - force_lists=dict(usedefault=True,), - raise_on_empty=dict(usedefault=True,), - sort_filelist=dict(usedefault=True,), + force_lists=dict( + usedefault=True, + ), + raise_on_empty=dict( + usedefault=True, + ), + sort_filelist=dict( + usedefault=True, + ), ) inputs = SelectFiles.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SignalExtraction.py b/nipype/interfaces/tests/test_auto_SignalExtraction.py index 2af759b50a..272d94f54d 100644 --- a/nipype/interfaces/tests/test_auto_SignalExtraction.py +++ b/nipype/interfaces/tests/test_auto_SignalExtraction.py @@ -4,13 +4,29 @@ def test_SignalExtraction_inputs(): input_map = dict( - class_labels=dict(mandatory=True,), - detrend=dict(usedefault=True,), - in_file=dict(extensions=None, mandatory=True,), - incl_shared_variance=dict(usedefault=True,), - include_global=dict(usedefault=True,), - label_files=dict(mandatory=True,), - out_file=dict(extensions=None, usedefault=True,), + class_labels=dict( + mandatory=True, + ), + detrend=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + incl_shared_variance=dict( + usedefault=True, + ), + include_global=dict( + usedefault=True, + ), + label_files=dict( + mandatory=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), ) inputs = SignalExtraction.input_spec() @@ -20,7 +36,11 @@ def test_SignalExtraction_inputs(): def test_SignalExtraction_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SignalExtraction.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_SlicerCommandLine.py b/nipype/interfaces/tests/test_auto_SlicerCommandLine.py index 99bd82a8f3..c13466483b 100644 --- a/nipype/interfaces/tests/test_auto_SlicerCommandLine.py +++ b/nipype/interfaces/tests/test_auto_SlicerCommandLine.py @@ -4,8 +4,13 @@ def test_SlicerCommandLine_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), module=dict(), ) inputs = SlicerCommandLine.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SplitNifti.py b/nipype/interfaces/tests/test_auto_SplitNifti.py index 219f378a82..14ccc6bdb0 100644 --- a/nipype/interfaces/tests/test_auto_SplitNifti.py +++ b/nipype/interfaces/tests/test_auto_SplitNifti.py @@ -4,8 +4,13 @@ def test_SplitNifti_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True,), - out_ext=dict(usedefault=True,), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_ext=dict( + usedefault=True, + ), out_format=dict(), out_path=dict(), split_dim=dict(), @@ -18,7 +23,9 @@ def test_SplitNifti_inputs(): def test_SplitNifti_outputs(): - output_map = dict(out_list=dict(),) + output_map = dict( + out_list=dict(), + ) outputs = SplitNifti.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_XNATSink.py b/nipype/interfaces/tests/test_auto_XNATSink.py index d22e2a1a63..ec6f920f57 100644 --- a/nipype/interfaces/tests/test_auto_XNATSink.py +++ b/nipype/interfaces/tests/test_auto_XNATSink.py @@ -4,17 +4,39 @@ def test_XNATSink_inputs(): input_map = dict( - _outputs=dict(usedefault=True,), - assessor_id=dict(xor=["reconstruction_id"],), + _outputs=dict( + usedefault=True, + ), + assessor_id=dict( + xor=["reconstruction_id"], + ), cache_dir=dict(), - config=dict(extensions=None, mandatory=True, xor=["server"],), - experiment_id=dict(mandatory=True,), - project_id=dict(mandatory=True,), + config=dict( + extensions=None, + mandatory=True, + xor=["server"], + ), + experiment_id=dict( + mandatory=True, + ), + project_id=dict( + mandatory=True, + ), pwd=dict(), - reconstruction_id=dict(xor=["assessor_id"],), - server=dict(mandatory=True, requires=["user", "pwd"], xor=["config"],), - share=dict(usedefault=True,), - subject_id=dict(mandatory=True,), + reconstruction_id=dict( + xor=["assessor_id"], + ), + server=dict( + mandatory=True, + requires=["user", "pwd"], + xor=["config"], + ), + share=dict( + usedefault=True, + ), + subject_id=dict( + mandatory=True, + ), user=dict(), ) inputs = XNATSink.input_spec() diff --git a/nipype/interfaces/tests/test_auto_XNATSource.py b/nipype/interfaces/tests/test_auto_XNATSource.py index a60af06b6a..f115657c9f 100644 --- a/nipype/interfaces/tests/test_auto_XNATSource.py +++ b/nipype/interfaces/tests/test_auto_XNATSource.py @@ -5,11 +5,23 @@ def test_XNATSource_inputs(): input_map = dict( cache_dir=dict(), - config=dict(extensions=None, mandatory=True, xor=["server"],), + config=dict( + extensions=None, + mandatory=True, + xor=["server"], + ), pwd=dict(), - query_template=dict(mandatory=True,), - query_template_args=dict(usedefault=True,), - server=dict(mandatory=True, requires=["user", "pwd"], xor=["config"],), + query_template=dict( + mandatory=True, + ), + query_template_args=dict( + usedefault=True, + ), + server=dict( + mandatory=True, + requires=["user", "pwd"], + xor=["config"], + ), user=dict(), ) inputs = XNATSource.input_spec() diff --git a/nipype/interfaces/utility/tests/test_auto_AssertEqual.py b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py index 2045d9149a..c550a5efba 100644 --- a/nipype/interfaces/utility/tests/test_auto_AssertEqual.py +++ b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py @@ -4,8 +4,14 @@ def test_AssertEqual_inputs(): input_map = dict( - volume1=dict(extensions=None, mandatory=True,), - volume2=dict(extensions=None, mandatory=True,), + volume1=dict( + extensions=None, + mandatory=True, + ), + volume2=dict( + extensions=None, + mandatory=True, + ), ) inputs = AssertEqual.input_spec() diff --git a/nipype/interfaces/utility/tests/test_auto_CSVReader.py b/nipype/interfaces/utility/tests/test_auto_CSVReader.py index 98adf59f6d..be24c59eb4 100644 --- a/nipype/interfaces/utility/tests/test_auto_CSVReader.py +++ b/nipype/interfaces/utility/tests/test_auto_CSVReader.py @@ -4,7 +4,13 @@ def test_CSVReader_inputs(): input_map = dict( - header=dict(usedefault=True,), in_file=dict(extensions=None, mandatory=True,), + header=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CSVReader.input_spec() diff --git a/nipype/interfaces/utility/tests/test_auto_Function.py b/nipype/interfaces/utility/tests/test_auto_Function.py index f2713a4407..fdbccc3098 100644 --- a/nipype/interfaces/utility/tests/test_auto_Function.py +++ b/nipype/interfaces/utility/tests/test_auto_Function.py @@ -3,7 +3,11 @@ def test_Function_inputs(): - input_map = dict(function_str=dict(mandatory=True,),) + input_map = dict( + function_str=dict( + mandatory=True, + ), + ) inputs = Function.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Merge.py b/nipype/interfaces/utility/tests/test_auto_Merge.py index 7658529a9d..f9304a9897 100644 --- a/nipype/interfaces/utility/tests/test_auto_Merge.py +++ b/nipype/interfaces/utility/tests/test_auto_Merge.py @@ -4,9 +4,15 @@ def test_Merge_inputs(): input_map = dict( - axis=dict(usedefault=True,), - no_flatten=dict(usedefault=True,), - ravel_inputs=dict(usedefault=True,), + axis=dict( + usedefault=True, + ), + no_flatten=dict( + usedefault=True, + ), + ravel_inputs=dict( + usedefault=True, + ), ) inputs = Merge.input_spec() @@ -16,7 +22,9 @@ def test_Merge_inputs(): def test_Merge_outputs(): - output_map = dict(out=dict(),) + output_map = dict( + out=dict(), + ) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Rename.py b/nipype/interfaces/utility/tests/test_auto_Rename.py index a722afac0e..5b9a183796 100644 --- a/nipype/interfaces/utility/tests/test_auto_Rename.py +++ b/nipype/interfaces/utility/tests/test_auto_Rename.py @@ -4,11 +4,18 @@ def test_Rename_inputs(): input_map = dict( - format_string=dict(mandatory=True,), - in_file=dict(extensions=None, mandatory=True,), + format_string=dict( + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), keep_ext=dict(), parse_string=dict(), - use_fullpath=dict(usedefault=True,), + use_fullpath=dict( + usedefault=True, + ), ) inputs = Rename.input_spec() @@ -18,7 +25,11 @@ def test_Rename_inputs(): def test_Rename_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Rename.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Select.py b/nipype/interfaces/utility/tests/test_auto_Select.py index 76f9061446..21dcb91670 100644 --- a/nipype/interfaces/utility/tests/test_auto_Select.py +++ b/nipype/interfaces/utility/tests/test_auto_Select.py @@ -3,7 +3,14 @@ def test_Select_inputs(): - input_map = dict(index=dict(mandatory=True,), inlist=dict(mandatory=True,),) + input_map = dict( + index=dict( + mandatory=True, + ), + inlist=dict( + mandatory=True, + ), + ) inputs = Select.input_spec() for key, metadata in list(input_map.items()): @@ -12,7 +19,9 @@ def test_Select_inputs(): def test_Select_outputs(): - output_map = dict(out=dict(),) + output_map = dict( + out=dict(), + ) outputs = Select.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Split.py b/nipype/interfaces/utility/tests/test_auto_Split.py index 901162ecab..60f46fb73c 100644 --- a/nipype/interfaces/utility/tests/test_auto_Split.py +++ b/nipype/interfaces/utility/tests/test_auto_Split.py @@ -4,9 +4,15 @@ def test_Split_inputs(): input_map = dict( - inlist=dict(mandatory=True,), - splits=dict(mandatory=True,), - squeeze=dict(usedefault=True,), + inlist=dict( + mandatory=True, + ), + splits=dict( + mandatory=True, + ), + squeeze=dict( + usedefault=True, + ), ) inputs = Split.input_spec() diff --git a/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py index 56bf94b7cc..f06a118fc2 100644 --- a/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py +++ b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py @@ -4,10 +4,24 @@ def test_Vnifti2Image_inputs(): input_map = dict( - args=dict(argstr="%s",), - attributes=dict(argstr="-attr %s", extensions=None, position=2,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=1,), + args=dict( + argstr="%s", + ), + attributes=dict( + argstr="-attr %s", + extensions=None, + position=2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + position=1, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -26,7 +40,11 @@ def test_Vnifti2Image_inputs(): def test_Vnifti2Image_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Vnifti2Image.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/vista/tests/test_auto_VtoMat.py b/nipype/interfaces/vista/tests/test_auto_VtoMat.py index 46cac1b5b6..8d2b53ab0c 100644 --- a/nipype/interfaces/vista/tests/test_auto_VtoMat.py +++ b/nipype/interfaces/vista/tests/test_auto_VtoMat.py @@ -4,9 +4,19 @@ def test_VtoMat_inputs(): input_map = dict( - args=dict(argstr="%s",), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=1,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + position=1, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -25,7 +35,11 @@ def test_VtoMat_inputs(): def test_VtoMat_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = VtoMat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py b/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py index 4be8b4aba7..03beac887a 100644 --- a/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py +++ b/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py @@ -4,7 +4,9 @@ def test_CiftiSmooth_inputs(): input_map = dict( - args=dict(argstr="%s",), + args=dict( + argstr="%s", + ), cerebellum_corrected_areas=dict( argstr="cerebellum-corrected-areas %s", extensions=None, @@ -12,21 +14,53 @@ def test_CiftiSmooth_inputs(): requires=["cerebellum_surf"], ), cerebellum_surf=dict( - argstr="-cerebellum-surface %s", extensions=None, position=9, - ), - cifti_roi=dict(argstr="-cifti-roi %s", extensions=None, position=11,), - direction=dict(argstr="%s", mandatory=True, position=3,), - environ=dict(nohash=True, usedefault=True,), - fix_zeros_surf=dict(argstr="-fix-zeros-surface", position=13,), - fix_zeros_vol=dict(argstr="-fix-zeros-volume", position=12,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), + argstr="-cerebellum-surface %s", + extensions=None, + position=9, + ), + cifti_roi=dict( + argstr="-cifti-roi %s", + extensions=None, + position=11, + ), + direction=dict( + argstr="%s", + mandatory=True, + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fix_zeros_surf=dict( + argstr="-fix-zeros-surface", + position=13, + ), + fix_zeros_vol=dict( + argstr="-fix-zeros-volume", + position=12, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), left_corrected_areas=dict( - argstr="-left-corrected-areas %s", extensions=None, position=6, + argstr="-left-corrected-areas %s", + extensions=None, + position=6, ), left_surf=dict( - argstr="-left-surface %s", extensions=None, mandatory=True, position=5, + argstr="-left-surface %s", + extensions=None, + mandatory=True, + position=5, + ), + merged_volume=dict( + argstr="-merged-volume", + position=14, ), - merged_volume=dict(argstr="-merged-volume", position=14,), out_file=dict( argstr="%s", extensions=None, @@ -36,13 +70,26 @@ def test_CiftiSmooth_inputs(): position=4, ), right_corrected_areas=dict( - argstr="-right-corrected-areas %s", extensions=None, position=8, + argstr="-right-corrected-areas %s", + extensions=None, + position=8, ), right_surf=dict( - argstr="-right-surface %s", extensions=None, mandatory=True, position=7, + argstr="-right-surface %s", + extensions=None, + mandatory=True, + position=7, + ), + sigma_surf=dict( + argstr="%s", + mandatory=True, + position=1, + ), + sigma_vol=dict( + argstr="%s", + mandatory=True, + position=2, ), - sigma_surf=dict(argstr="%s", mandatory=True, position=1,), - sigma_vol=dict(argstr="%s", mandatory=True, position=2,), ) inputs = CiftiSmooth.input_spec() @@ -52,7 +99,11 @@ def test_CiftiSmooth_inputs(): def test_CiftiSmooth_outputs(): - output_map = dict(out_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = CiftiSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/workbench/tests/test_auto_MetricResample.py b/nipype/interfaces/workbench/tests/test_auto_MetricResample.py index eb9201b7f1..e7a861963b 100644 --- a/nipype/interfaces/workbench/tests/test_auto_MetricResample.py +++ b/nipype/interfaces/workbench/tests/test_auto_MetricResample.py @@ -4,17 +4,60 @@ def test_MetricResample_inputs(): input_map = dict( - area_metrics=dict(argstr="-area-metrics", position=5, xor=["area_surfs"],), - area_surfs=dict(argstr="-area-surfs", position=5, xor=["area_metrics"],), - args=dict(argstr="%s",), - current_area=dict(argstr="%s", extensions=None, position=6,), - current_sphere=dict(argstr="%s", extensions=None, mandatory=True, position=1,), - environ=dict(nohash=True, usedefault=True,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0,), - largest=dict(argstr="-largest", position=10,), - method=dict(argstr="%s", mandatory=True, position=3,), - new_area=dict(argstr="%s", extensions=None, position=7,), - new_sphere=dict(argstr="%s", extensions=None, mandatory=True, position=2,), + area_metrics=dict( + argstr="-area-metrics", + position=5, + xor=["area_surfs"], + ), + area_surfs=dict( + argstr="-area-surfs", + position=5, + xor=["area_metrics"], + ), + args=dict( + argstr="%s", + ), + current_area=dict( + argstr="%s", + extensions=None, + position=6, + ), + current_sphere=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), + largest=dict( + argstr="-largest", + position=10, + ), + method=dict( + argstr="%s", + mandatory=True, + position=3, + ), + new_area=dict( + argstr="%s", + extensions=None, + position=7, + ), + new_sphere=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), out_file=dict( argstr="%s", extensions=None, @@ -23,8 +66,15 @@ def test_MetricResample_inputs(): name_template="%s.out", position=4, ), - roi_metric=dict(argstr="-current-roi %s", extensions=None, position=8,), - valid_roi_out=dict(argstr="-valid-roi-out", position=9,), + roi_metric=dict( + argstr="-current-roi %s", + extensions=None, + position=8, + ), + valid_roi_out=dict( + argstr="-valid-roi-out", + position=9, + ), ) inputs = MetricResample.input_spec() @@ -34,7 +84,14 @@ def test_MetricResample_inputs(): def test_MetricResample_outputs(): - output_map = dict(out_file=dict(extensions=None,), roi_file=dict(extensions=None,),) + output_map = dict( + out_file=dict( + extensions=None, + ), + roi_file=dict( + extensions=None, + ), + ) outputs = MetricResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/workbench/tests/test_auto_WBCommand.py b/nipype/interfaces/workbench/tests/test_auto_WBCommand.py index 0a32276e5f..d2cc9a6b96 100644 --- a/nipype/interfaces/workbench/tests/test_auto_WBCommand.py +++ b/nipype/interfaces/workbench/tests/test_auto_WBCommand.py @@ -4,7 +4,13 @@ def test_WBCommand_inputs(): input_map = dict( - args=dict(argstr="%s",), environ=dict(nohash=True, usedefault=True,), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), ) inputs = WBCommand.input_spec() From f4f91dc52b99ac0302163f6118e78b6cede5d25c Mon Sep 17 00:00:00 2001 From: Horea Christian Date: Thu, 17 Sep 2020 12:05:58 -0400 Subject: [PATCH 0879/1665] FIX: No longer depending on pydotplus (networkx >=2.0 update) Fixes: https://github.com/nipy/nipype/issues/3244 --- nipype/info.py | 3 +-- nipype/interfaces/cmtk/nx.py | 10 ++-------- requirements.txt | 3 +-- 3 files changed, 4 insertions(+), 12 deletions(-) diff --git a/nipype/info.py b/nipype/info.py index 73ad1b88d5..396b24cea7 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -99,7 +99,7 @@ def get_nipype_gitversion(): # versions NIBABEL_MIN_VERSION = "2.1.0" -NETWORKX_MIN_VERSION = "1.9" +NETWORKX_MIN_VERSION = "2.0" NUMPY_MIN_VERSION = "1.13" # Numpy bug in python 3.7: # https://www.opensourceanswers.com/blog/you-shouldnt-use-python-37-for-data-science-right-now.html @@ -143,7 +143,6 @@ def get_nipype_gitversion(): "packaging", "prov>=%s" % PROV_VERSION, "pydot>=%s" % PYDOT_MIN_VERSION, - "pydotplus", "python-dateutil>=%s" % DATEUTIL_MIN_VERSION, "rdflib>=%s" % RDFLIB_MIN_VERSION, "scipy>=%s" % SCIPY_MIN_VERSION, diff --git a/nipype/interfaces/cmtk/nx.py b/nipype/interfaces/cmtk/nx.py index 3886fe8844..aaf4bece39 100644 --- a/nipype/interfaces/cmtk/nx.py +++ b/nipype/interfaces/cmtk/nx.py @@ -289,15 +289,9 @@ def compute_singlevalued_measures(ntwk, weighted=True, calculate_cliques=False): iflogger.info("Computing single valued measures:") measures = {} iflogger.info("...Computing degree assortativity (pearson number) ...") - try: - measures["degree_pearsonr"] = nx.degree_pearsonr(ntwk) - except AttributeError: # For NetworkX 1.6 - measures["degree_pearsonr"] = nx.degree_pearson_correlation_coefficient(ntwk) + measures["degree_pearsonr"] = nx.degree_pearson_correlation_coefficient(ntwk) iflogger.info("...Computing degree assortativity...") - try: - measures["degree_assortativity"] = nx.degree_assortativity(ntwk) - except AttributeError: - measures["degree_assortativity"] = nx.degree_assortativity_coefficient(ntwk) + measures["degree_assortativity"] = nx.degree_assortativity_coefficient(ntwk) iflogger.info("...Computing transitivity...") measures["transitivity"] = nx.transitivity(ntwk) iflogger.info("...Computing number of connected_components...") diff --git a/requirements.txt b/requirements.txt index 6cb09abdee..afec34ebfd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,13 +1,12 @@ # Auto-generated by tools/update_requirements.py click>=6.6.0 -networkx>=1.9 +networkx>=2.0 nibabel>=2.1.0 numpy>=1.13 ; python_version < "3.7" numpy>=1.15.3 ; python_version >= "3.7" packaging prov>=1.5.2 pydot>=1.2.3 -pydotplus python-dateutil>=2.2 rdflib>=5.0.0 scipy>=0.14 From 3e2f4580532a325f4afe76d6e8e833d161b32cdb Mon Sep 17 00:00:00 2001 From: Eric Condamine <37933899+servoz@users.noreply.github.com> Date: Mon, 21 Sep 2020 15:04:15 +0200 Subject: [PATCH 0880/1665] SPM SliceTiming must accept either Int or float for ref_slice and slice_order input parameter --- nipype/interfaces/spm/preprocess.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index 17dc03ff5a..a9d211aa83 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -276,12 +276,14 @@ class SliceTimingInputSpec(SPMCommandInputSpec): mandatory=True, ) slice_order = traits.List( - traits.Float(), + traits.Either(traits.Int(),traits.Float()), field="so", desc=("1-based order or onset (in ms) in which slices are acquired"), mandatory=True, ) - ref_slice = traits.Int( + ref_slice = traits.Either( + traits.Int(), + traits.Float(), field="refslice", desc="1-based Number of the reference slice or " "reference time point if slice_order is in " From 2096a09c71710e7d2ee01106ccd532d3391fae4f Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 23 Sep 2020 13:47:25 +0200 Subject: [PATCH 0881/1665] FIX: Raise version error when using ``-g`` with ``antsAI`` < 2.3.0 --- nipype/interfaces/ants/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index a6fdc5cf00..8770158736 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -394,6 +394,7 @@ class AIInputSpec(ANTSCommandInputSpec): traits.Tuple(traits.Float, traits.Tuple(traits.Float, traits.Float)), argstr="-g %s", desc="Translation search grid in mm", + min_ver="2.3.0", ) convergence = traits.Tuple( From 0df1dff1581d1a8b551a5e10bf624fb9b57b3067 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Fri, 25 Sep 2020 06:25:17 -0700 Subject: [PATCH 0882/1665] chore(specs): update --- nipype/interfaces/ants/tests/test_auto_AI.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/interfaces/ants/tests/test_auto_AI.py b/nipype/interfaces/ants/tests/test_auto_AI.py index fa4cb42916..bef56b7ee6 100644 --- a/nipype/interfaces/ants/tests/test_auto_AI.py +++ b/nipype/interfaces/ants/tests/test_auto_AI.py @@ -59,6 +59,7 @@ def test_AI_inputs(): ), search_grid=dict( argstr="-g %s", + min_ver="2.3.0", ), transform=dict( argstr="-t %s[%g]", From bfc21fd4805a06274dd92ee9e264e8d216556df3 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Sat, 8 Aug 2020 13:57:55 +0200 Subject: [PATCH 0883/1665] [REF] Optimize _get_parameter_node performance - Traverse nested workflows in a loop - Avoid constructing the entire workflow.inputs or workflow.outputs data structure --- nipype/pipeline/engine/workflows.py | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 0f50b9a5aa..d7b6c11b1c 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -838,15 +838,28 @@ def _get_parameter_node(self, parameter, subtype="in"): """Returns the underlying node corresponding to an input or output parameter """ - if subtype == "in": - subobject = self.inputs - else: - subobject = self.outputs attrlist = parameter.split(".") - cur_out = subobject - for attr in attrlist[:-1]: - cur_out = getattr(cur_out, attr) - return cur_out.traits()[attrlist[-1]].node + _ = attrlist.pop() # take attribute name + nodename = attrlist.pop() + + targetworkflow = self + while attrlist: + workflowname = attrlist.pop(0) + workflow = None + for node in targetworkflow._graph.nodes(): + if node.name == workflowname and isinstance(node, Workflow): + workflow = node + break + if workflow is None: + return + targetworkflow = workflow + + for node in targetworkflow._graph.nodes(): + if node.name == nodename: + if isinstance(node, Workflow): + return + else: + return node def _check_outputs(self, parameter): return self._has_attr(parameter, subtype="out") From 84c791b9d03832a65a1c47ac1b10c9ebfec60d4c Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Mon, 19 Oct 2020 22:24:31 +0200 Subject: [PATCH 0884/1665] Replace _get_parameter_node with get_node --- nipype/pipeline/engine/workflows.py | 31 ++--------------------------- 1 file changed, 2 insertions(+), 29 deletions(-) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index d7b6c11b1c..30350e986d 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -834,33 +834,6 @@ def _check_is_already_connected(workflow, node, attrname): return True - def _get_parameter_node(self, parameter, subtype="in"): - """Returns the underlying node corresponding to an input or - output parameter - """ - attrlist = parameter.split(".") - _ = attrlist.pop() # take attribute name - nodename = attrlist.pop() - - targetworkflow = self - while attrlist: - workflowname = attrlist.pop(0) - workflow = None - for node in targetworkflow._graph.nodes(): - if node.name == workflowname and isinstance(node, Workflow): - workflow = node - break - if workflow is None: - return - targetworkflow = workflow - - for node in targetworkflow._graph.nodes(): - if node.name == nodename: - if isinstance(node, Workflow): - return - else: - return node - def _check_outputs(self, parameter): return self._has_attr(parameter, subtype="out") @@ -989,7 +962,7 @@ def _generate_flatgraph(self): logger.debug("in: connections-> %s", str(d["connect"])) for cd in deepcopy(d["connect"]): logger.debug("in: %s", str(cd)) - dstnode = node._get_parameter_node(cd[1], subtype="in") + dstnode = node.get_node(parameter.rsplit(".", 1)[0]) srcnode = u srcout = cd[0] dstin = cd[1].split(".")[-1] @@ -1009,7 +982,7 @@ def _generate_flatgraph(self): parameter = cd[0][0] else: parameter = cd[0] - srcnode = node._get_parameter_node(parameter, subtype="out") + srcnode = node.get_node(parameter.rsplit(".", 1)[0]) if isinstance(cd[0], tuple): srcout = list(cd[0]) srcout[0] = parameter.split(".")[-1] From d5a88de71aae60a9e6350e1c3ef2548cf058f7d0 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Tue, 20 Oct 2020 10:08:23 +0200 Subject: [PATCH 0885/1665] Fix pass the correct argument to get_node --- nipype/pipeline/engine/workflows.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 30350e986d..9b6e60ffaf 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -962,7 +962,7 @@ def _generate_flatgraph(self): logger.debug("in: connections-> %s", str(d["connect"])) for cd in deepcopy(d["connect"]): logger.debug("in: %s", str(cd)) - dstnode = node.get_node(parameter.rsplit(".", 1)[0]) + dstnode = node.get_node(cd[1].rsplit(".", 1)[0]) srcnode = u srcout = cd[0] dstin = cd[1].split(".")[-1] From a722ca4b7e7e24f055d1161c12e0924879607e70 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 21 Oct 2020 16:00:56 -0400 Subject: [PATCH 0886/1665] FIX: Bad format string --- nipype/algorithms/misc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/algorithms/misc.py b/nipype/algorithms/misc.py index 756cdb8a10..50ae18f5be 100644 --- a/nipype/algorithms/misc.py +++ b/nipype/algorithms/misc.py @@ -1408,7 +1408,7 @@ def merge_rois(in_files, in_idxs, in_ref, dtype=None, out_file=None): # to avoid memory errors if op.splitext(in_ref)[1] == ".gz": try: - iflogger.info("uncompress %i", in_ref) + iflogger.info("uncompress %s", in_ref) sp.check_call(["gunzip", in_ref], stdout=sp.PIPE, shell=True) in_ref = op.splitext(in_ref)[0] except: From 9dc7f43f560092b5dbd079d559ac8530ea3d847c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 21 Oct 2020 16:48:10 -0400 Subject: [PATCH 0887/1665] CI: Upgrade Linux distribution Focal is latest, but Neurodebian only supports through Bionic --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 8fb76e5050..40d5937c02 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,5 @@ os: linux -dist: xenial +dist: bionic language: python # our build matrix From 0021bd3e7945975f73c019416371f87002a53e9a Mon Sep 17 00:00:00 2001 From: Gal Ben-Zvi Date: Mon, 26 Oct 2020 11:59:25 +0200 Subject: [PATCH 0888/1665] Update nipype/interfaces/mrtrix3/preprocess.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/mrtrix3/preprocess.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index 832d3256ac..e0ed0b9be3 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -184,6 +184,7 @@ class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): argstr="ants", mandatory=True, desc="use ANTS N4 to estimate the inhomogeneity field", + position=0, xor=["use_fsl"], ) use_fsl = traits.Bool( From 524482e9908f876ce3dff18bfb42d3653ec892c9 Mon Sep 17 00:00:00 2001 From: Gal Ben-Zvi Date: Mon, 26 Oct 2020 11:59:36 +0200 Subject: [PATCH 0889/1665] Update nipype/interfaces/mrtrix3/preprocess.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/mrtrix3/preprocess.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index e0ed0b9be3..cc34557420 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -191,6 +191,7 @@ class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec): argstr="fsl", mandatory=True, desc="use FSL FAST to estimate the inhomogeneity field", + position=0, xor=["use_ants"], ) bias = File(argstr="-bias %s", desc="bias field") From 613d647e49b2832b38f7cc8c6d90e533a39b36e6 Mon Sep 17 00:00:00 2001 From: Gal Ben-Zvi Date: Mon, 26 Oct 2020 11:59:44 +0200 Subject: [PATCH 0890/1665] Update .zenodo.json Co-authored-by: Chris Markiewicz --- .zenodo.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zenodo.json b/.zenodo.json index d59c95370b..06ed334796 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -699,7 +699,7 @@ }, { "affiliation": "Sagol School of Neuroscience, Tel Aviv University", - "name": "Ben-Zvi,Gal" + "name": "Ben-Zvi, Gal" }, { "name": "Matsubara, K" From a8a4c7fcb3a851f5a6554c9e6480993d54eaf6a0 Mon Sep 17 00:00:00 2001 From: Gal Ben-Zvi Date: Mon, 26 Oct 2020 12:07:01 +0200 Subject: [PATCH 0891/1665] Edited file according to Chris's requests --- nipype/interfaces/mrtrix3/preprocess.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index cc34557420..2af0fec354 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -226,14 +226,20 @@ class DWIBiasCorrect(MRTrix3Base): >>> bias_correct.inputs.in_file = 'dwi.mif' >>> bias_correct.inputs.use_ants = True >>> bias_correct.cmdline - 'dwibiascorrect -ants dwi.mif dwi_biascorr.mif' + 'dwibiascorrect ants dwi.mif dwi_biascorr.mif' >>> bias_correct.run() # doctest: +SKIP """ _cmd = "dwibiascorrect" input_spec = DWIBiasCorrectInputSpec output_spec = DWIBiasCorrectOutputSpec - + def _format_arg(self, name, trait_spec, value): + if name in ("use_ants", "use_fsl"): + ver = self.version + # Changed in version 3.0, after release candidates + if ver is not None and (ver[0] < "3" or ver.startswith("3.0_RC")): + return f"-{trait_spec.argstr}" + super()._format_arg(name, trait_spec, value) class ResponseSDInputSpec(MRTrix3BaseInputSpec): algorithm = traits.Enum( From 5ddcc0d6821811b3626c39bea67fdc552b013a4f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 27 Oct 2020 08:35:10 -0400 Subject: [PATCH 0892/1665] Update nipype/interfaces/mrtrix3/preprocess.py --- nipype/interfaces/mrtrix3/preprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index 2af0fec354..aa3347c7f9 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -239,7 +239,7 @@ def _format_arg(self, name, trait_spec, value): # Changed in version 3.0, after release candidates if ver is not None and (ver[0] < "3" or ver.startswith("3.0_RC")): return f"-{trait_spec.argstr}" - super()._format_arg(name, trait_spec, value) + return super()._format_arg(name, trait_spec, value) class ResponseSDInputSpec(MRTrix3BaseInputSpec): algorithm = traits.Enum( From 4f8aa4b66bd317742c42f0f6e8024d96157a762b Mon Sep 17 00:00:00 2001 From: Oliver Contier Date: Thu, 29 Oct 2020 16:11:51 +0100 Subject: [PATCH 0893/1665] changed _list_outputs() method of EpiReg interface to check whether 'seg' output exists in case 'wmseg' input is already provided --- nipype/interfaces/fsl/epi.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index e5dbe6de19..f6772aa53b 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1306,20 +1306,19 @@ def _list_outputs(self): outputs["epi2str_inv"] = os.path.join( os.getcwd(), self.inputs.out_base + "_inv.mat" ) - + if not isdefined(self.inputs.wmseg): + outputs["wmedge"] = os.path.join( + os.getcwd(), self.inputs.out_base + "_fast_wmedge.nii.gz" + ) + outputs["wmseg"] = os.path.join( + os.getcwd(), self.inputs.out_base + "_fast_wmseg.nii.gz" + ) + outputs["seg"] = os.path.join( + os.getcwd(), self.inputs.out_base + "_fast_seg.nii.gz" + ) outputs["epi2str_mat"] = os.path.join( os.getcwd(), self.inputs.out_base + ".mat" ) - outputs["wmedge"] = os.path.join( - os.getcwd(), self.inputs.out_base + "_fast_wmedge.nii.gz" - ) - outputs["wmseg"] = os.path.join( - os.getcwd(), self.inputs.out_base + "_fast_wmseg.nii.gz" - ) - outputs["seg"] = os.path.join( - os.getcwd(), self.inputs.out_base + "_fast_seg.nii.gz" - ) - return outputs From 177724544abeee1872d0a4054d4d599299aa9281 Mon Sep 17 00:00:00 2001 From: Oliver Contier Date: Thu, 29 Oct 2020 16:42:49 +0100 Subject: [PATCH 0894/1665] updated affiliation --- .zenodo.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zenodo.json b/.zenodo.json index 7324bd942c..cb9ffacc3c 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -487,7 +487,7 @@ "name": "Perkins, L. Nathan" }, { - "affiliation": "Otto-von-Guericke-University Magdeburg, Germany", + "affiliation": "Max Planck Institute for Human Cognitive and Brain Sciences", "name": "Contier, Oliver", "orcid": "0000-0002-2983-4709" }, From 73e98d2e97cc0f860aef2eee14e8aaeb14077398 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 8 Nov 2020 09:00:09 -0500 Subject: [PATCH 0895/1665] TEST: make specs --- .../mrtrix3/tests/test_auto_DWIBiasCorrect.py | 78 +++++++++++++++---- 1 file changed, 64 insertions(+), 14 deletions(-) diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py index 210b39b141..0028748ab9 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py @@ -4,17 +4,50 @@ def test_DWIBiasCorrect_inputs(): input_map = dict( - args=dict(argstr="%s",), - bias=dict(argstr="-bias %s", extensions=None,), - bval_scale=dict(argstr="-bvalue_scaling %s",), - environ=dict(nohash=True, usedefault=True,), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"],), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"],), - in_bval=dict(extensions=None,), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None,), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2,), - in_mask=dict(argstr="-mask %s", extensions=None,), - nthreads=dict(argstr="-nthreads %d", nohash=True,), + args=dict( + argstr="%s", + ), + bias=dict( + argstr="-bias %s", + extensions=None, + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + in_mask=dict( + argstr="-mask %s", + extensions=None, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -24,8 +57,18 @@ def test_DWIBiasCorrect_inputs(): name_template="%s_biascorr", position=-1, ), - use_ants=dict(argstr="-ants", mandatory=True, xor=["use_fsl"],), - use_fsl=dict(argstr="-fsl", mandatory=True, xor=["use_ants"],), + use_ants=dict( + argstr="ants", + mandatory=True, + position=0, + xor=["use_fsl"], + ), + use_fsl=dict( + argstr="fsl", + mandatory=True, + position=0, + xor=["use_ants"], + ), ) inputs = DWIBiasCorrect.input_spec() @@ -35,7 +78,14 @@ def test_DWIBiasCorrect_inputs(): def test_DWIBiasCorrect_outputs(): - output_map = dict(bias=dict(extensions=None,), out_file=dict(extensions=None,),) + output_map = dict( + bias=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + ) outputs = DWIBiasCorrect.output_spec() for key, metadata in list(output_map.items()): From bf8899d90d70bd4873301d61bd5790a051fbe6d3 Mon Sep 17 00:00:00 2001 From: "Michael R. Crusoe" <1330696+mr-c@users.noreply.github.com> Date: Fri, 20 Nov 2020 10:36:31 +0100 Subject: [PATCH 0896/1665] Remove myself (@mr-c) from the zenodo metadata --- .zenodo.json | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index a8ae4c70ee..0baed9eaa9 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -761,10 +761,6 @@ { "name": "Noel, Maxime" }, - { - "name": "Crusoe, Michael R.", - "orcid": "0000-0002-2961-9670" - }, { "affiliation": "Medical Imaging & Biomarkers, Bioclinica, Newark, CA, USA.", "name": "Pannetier, Nicolas", From 15d25b323abc2c3c9d650e36c680844c8572a6a5 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 21 Nov 2020 08:43:39 -0500 Subject: [PATCH 0897/1665] FIX: Purge BaseException.message --- nipype/interfaces/base/tests/test_specs.py | 4 ++-- nipype/interfaces/io.py | 21 ++++++--------------- nipype/utils/filemanip.py | 8 +++----- 3 files changed, 11 insertions(+), 22 deletions(-) diff --git a/nipype/interfaces/base/tests/test_specs.py b/nipype/interfaces/base/tests/test_specs.py index d94f97ed1b..b088c95716 100644 --- a/nipype/interfaces/base/tests/test_specs.py +++ b/nipype/interfaces/base/tests/test_specs.py @@ -183,7 +183,7 @@ class DeprecationSpec3(nib.TraitedSpec): except nib.TraitError: not_raised = False assert not_raised - assert len(w) == 1, "deprecated warning 1 %s" % [w1.message for w1 in w] + assert len(w) == 1, f"deprecated warning 1 {[str(w1) for w1 in w]}" with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always", "", UserWarning) @@ -201,7 +201,7 @@ class DeprecationSpec3(nib.TraitedSpec): assert not_raised assert spec_instance.foo == Undefined assert spec_instance.bar == 1 - assert len(w) == 1, "deprecated warning 2 %s" % [w1.message for w1 in w] + assert len(w) == 1, f"deprecated warning 2 {[str(w1) for w1 in w]}" def test_namesource(setup_file): diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 2b2510f169..67baa8cbf5 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -1010,11 +1010,8 @@ def _list_outputs(self): try: filledtemplate = template % tuple(argtuple) except TypeError as e: - raise TypeError( - e.message - + ": Template %s failed to convert with args %s" - % (template, str(tuple(argtuple))) - ) + raise TypeError(f"{e}: Template {template} failed to convert " + f"with args {tuple(argtuple)}") outfiles = [] for fname in bkt_files: if re.match(filledtemplate, fname): @@ -1286,11 +1283,8 @@ def _list_outputs(self): try: filledtemplate = template % tuple(argtuple) except TypeError as e: - raise TypeError( - e.message - + ": Template %s failed to convert with args %s" - % (template, str(tuple(argtuple))) - ) + raise TypeError(f"{e}: Template {template} failed to convert " + f"with args {tuple(argtuple)}") outfiles = glob.glob(filledtemplate) if len(outfiles) == 0: msg = "Output key: %s Template: %s returned no files" % ( @@ -2664,11 +2658,8 @@ def _list_outputs(self): try: filledtemplate = template % tuple(argtuple) except TypeError as e: - raise TypeError( - e.message - + ": Template %s failed to convert with args %s" - % (template, str(tuple(argtuple))) - ) + raise TypeError(f"{e}: Template {template} failed to convert " + f"with args {tuple(argtuple)}") outputs[key].append(self._get_files_over_ssh(filledtemplate)) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 46c4cc53be..656f4d23af 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -439,7 +439,7 @@ def copyfile( fmlogger.debug("Copying File: %s->%s", newfile, originalfile) shutil.copyfile(originalfile, newfile) except shutil.Error as e: - fmlogger.warning(e.message) + fmlogger.warning(str(e)) # Associated files if copy_related_files: @@ -870,10 +870,8 @@ def get_dependencies(name, environ): o, e = proc.communicate() deps = o.rstrip() except Exception as ex: - deps = '"%s" failed' % command - fmlogger.warning( - "Could not get dependencies of %s. Error:\n%s", name, ex.message - ) + deps = f'{command!r} failed' + fmlogger.warning(f"Could not get dependencies of {name}s. Error:\n{ex}") return deps From 77913ee8cb016885103568071d09d811d00b6d2c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 21 Nov 2020 09:02:26 -0500 Subject: [PATCH 0898/1665] FIX: Get OSError.errno by name, not index --- nipype/utils/subprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/utils/subprocess.py b/nipype/utils/subprocess.py index 2c8c38a2f8..f590b337a0 100644 --- a/nipype/utils/subprocess.py +++ b/nipype/utils/subprocess.py @@ -124,7 +124,7 @@ def _process(drain=0): res = select.select(streams, [], [], timeout) except select.error as e: iflogger.info(e) - if e[0] == errno.EINTR: + if e.errno == errno.EINTR: return else: raise From e8977681401bfd54a563fea08a912706d17299d5 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Sat, 21 Nov 2020 09:41:14 -0500 Subject: [PATCH 0899/1665] Update core.py Potential hack for #3138 --- nipype/interfaces/base/core.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 54c4302c7f..dcdb636994 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -31,7 +31,7 @@ from ...external.due import due -from .traits_extension import traits, isdefined +from .traits_extension import traits, isdefined, Undefined from .specs import ( BaseInterfaceInputSpec, CommandLineInputSpec, @@ -371,8 +371,10 @@ def run(self, cwd=None, ignore_exception=None, **inputs): enable_rm = config.resource_monitor and self.resource_monitor self.inputs.trait_set(**inputs) + unavailable_traits = self._check_version_requirements(self.inputs) + if unavailable_traits: + self.inputs.traitset(**{k: Undefined for k in unavailable_traits}) self._check_mandatory_inputs() - self._check_version_requirements(self.inputs) interface = self.__class__ self._duecredit_cite() From 737e2202e4469d1a32ec9b9a55bc39776b558133 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 21 Nov 2020 10:13:20 -0500 Subject: [PATCH 0900/1665] RF: Unset default traits for out-of-version inputs at init --- nipype/interfaces/base/core.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index dcdb636994..1063c6a0df 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -180,7 +180,14 @@ def __init__( if not self.input_spec: raise Exception("No input_spec in class: %s" % self.__class__.__name__) - self.inputs = self.input_spec(**inputs) + # Create input spec, disable any defaults that are unavailable due to + # version, and then apply the inputs that were passed. + self.inputs = self.input_spec() + unavailable_traits = self._check_version_requirements(self.inputs, raise_exception=False) + if unavailable_traits: + self.inputs.trait_set(**{k: Undefined for k in unavailable_traits}) + self.inputs.trait_set(**inputs) + self.ignore_exception = ignore_exception if resource_monitor is not None: @@ -371,10 +378,8 @@ def run(self, cwd=None, ignore_exception=None, **inputs): enable_rm = config.resource_monitor and self.resource_monitor self.inputs.trait_set(**inputs) - unavailable_traits = self._check_version_requirements(self.inputs) - if unavailable_traits: - self.inputs.traitset(**{k: Undefined for k in unavailable_traits}) self._check_mandatory_inputs() + self._check_version_requirements(self.inputs) interface = self.__class__ self._duecredit_cite() From eef368740a61f26545c868438f94ed25a1e1b171 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 21 Nov 2020 12:13:30 -0500 Subject: [PATCH 0901/1665] TEST: Verify behavior when an input has a default and max_ver --- nipype/interfaces/base/tests/test_core.py | 40 +++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/nipype/interfaces/base/tests/test_core.py b/nipype/interfaces/base/tests/test_core.py index 165b3532ab..e8da1f16b2 100644 --- a/nipype/interfaces/base/tests/test_core.py +++ b/nipype/interfaces/base/tests/test_core.py @@ -272,6 +272,46 @@ class input_spec(nib.TraitedSpec): obj._check_version_requirements(obj.inputs) +def test_unavailable_input(): + class WithInput(nib.BaseInterface): + class input_spec(nib.TraitedSpec): + foo = nib.traits.Int(3, usedefault=True, max_ver="0.5") + + _version = "0.4" + + def _run_interface(self, runtime): + return runtime + + class WithoutInput(WithInput): + _version = "0.6" + + has = WithInput() + hasnt = WithoutInput() + trying_anyway = WithoutInput(foo=3) + assert has.inputs.foo == 3 + assert not nib.isdefined(hasnt.inputs.foo) + assert trying_anyway.inputs.foo == 3 + + has.run() + hasnt.run() + with pytest.raises(Exception): + trying_anyway.run() + + # Still settable + has.inputs.foo = 4 + hasnt.inputs.foo = 4 + trying_anyway.inputs.foo = 4 + assert has.inputs.foo == 4 + assert hasnt.inputs.foo == 4 + assert trying_anyway.inputs.foo == 4 + + has.run() + with pytest.raises(Exception): + hasnt.run() + with pytest.raises(Exception): + trying_anyway.run() + + def test_output_version(): class InputSpec(nib.TraitedSpec): foo = nib.traits.Int(desc="a random int") From a29a041851b051d6b8e04628adb95310aa45cee2 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 21 Nov 2020 13:20:14 -0500 Subject: [PATCH 0902/1665] TEST: Expect more warnings --- nipype/interfaces/base/tests/test_core.py | 26 ++++++++++++++--------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/nipype/interfaces/base/tests/test_core.py b/nipype/interfaces/base/tests/test_core.py index e8da1f16b2..233635a972 100644 --- a/nipype/interfaces/base/tests/test_core.py +++ b/nipype/interfaces/base/tests/test_core.py @@ -245,15 +245,17 @@ class input_spec(nib.TraitedSpec): _version = "misparsed-garbage" - obj = DerivedInterface() + with caplog.at_level(logging.WARNING, logger="nipype.interface"): + obj = DerivedInterface() + assert len(caplog.records) == 2 obj.inputs.foo = 1 obj.inputs.bar = 1 with caplog.at_level(logging.WARNING, logger="nipype.interface"): obj._check_version_requirements(obj.inputs) - assert len(caplog.records) == 2 + assert len(caplog.records) == 4 -def test_input_version_missing_error(): +def test_input_version_missing_error(caplog): from nipype import config class DerivedInterface(nib.BaseInterface): @@ -263,13 +265,17 @@ class input_spec(nib.TraitedSpec): _version = "misparsed-garbage" - with mock.patch.object(config, "getboolean", return_value=True): - obj = DerivedInterface(foo=1) - with pytest.raises(ValueError): - obj._check_version_requirements(obj.inputs) - obj = DerivedInterface(bar=1) - with pytest.raises(ValueError): - obj._check_version_requirements(obj.inputs) + with caplog.at_level(logging.WARNING, logger="nipype.interface"): + obj1 = DerivedInterface(foo=1) + obj2 = DerivedInterface(bar=1) + assert len(caplog.records) == 4 + with caplog.at_level(logging.WARNING, logger="nipype.interface"): + with mock.patch.object(config, "getboolean", return_value=True): + with pytest.raises(ValueError): + obj1._check_version_requirements(obj1.inputs) + with pytest.raises(ValueError): + obj2._check_version_requirements(obj2.inputs) + assert len(caplog.records) == 6 def test_unavailable_input(): From 8db5b25c7e952ccbcae926fa85f0a95234db8556 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 21 Nov 2020 19:07:23 -0500 Subject: [PATCH 0903/1665] RF: Change raise_exception to permissive; suppress warnings and exceptions with same mechanism --- nipype/interfaces/base/core.py | 20 ++++++++++++++------ nipype/interfaces/base/tests/test_core.py | 14 +++++--------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 1063c6a0df..afafbebf84 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -183,7 +183,9 @@ def __init__( # Create input spec, disable any defaults that are unavailable due to # version, and then apply the inputs that were passed. self.inputs = self.input_spec() - unavailable_traits = self._check_version_requirements(self.inputs, raise_exception=False) + unavailable_traits = self._check_version_requirements( + self.inputs, permissive=True + ) if unavailable_traits: self.inputs.trait_set(**{k: Undefined for k in unavailable_traits}) self.inputs.trait_set(**inputs) @@ -271,8 +273,12 @@ def _check_mandatory_inputs(self): ): self._check_requires(spec, name, getattr(self.inputs, name)) - def _check_version_requirements(self, trait_object, raise_exception=True): + def _check_version_requirements(self, trait_object, permissive=False): """ Raises an exception on version mismatch + + Set the ``permissive`` attribute to True to suppress warnings and exceptions. + This is currently only used in __init__ to silently identify unavailable + traits. """ unavailable_traits = [] # check minimum version @@ -290,7 +296,8 @@ def _check_version_requirements(self, trait_object, raise_exception=True): f"Nipype cannot validate the package version {version!r} for " f"{self.__class__.__name__}. Trait {name} requires version >={min_ver}." ) - iflogger.warning(f"{msg}. Please verify validity.") + if not permissive: + iflogger.warning(f"{msg}. Please verify validity.") if config.getboolean("execution", "stop_on_unknown_version"): raise ValueError(msg) from err continue @@ -298,7 +305,7 @@ def _check_version_requirements(self, trait_object, raise_exception=True): unavailable_traits.append(name) if not isdefined(getattr(trait_object, name)): continue - if raise_exception: + if not permissive: raise Exception( "Trait %s (%s) (version %s < required %s)" % (name, self.__class__.__name__, version, min_ver) @@ -318,7 +325,8 @@ def _check_version_requirements(self, trait_object, raise_exception=True): f"Nipype cannot validate the package version {version!r} for " f"{self.__class__.__name__}. Trait {name} requires version <={max_ver}." ) - iflogger.warning(f"{msg}. Please verify validity.") + if not permissive: + iflogger.warning(f"{msg}. Please verify validity.") if config.getboolean("execution", "stop_on_unknown_version"): raise ValueError(msg) from err continue @@ -326,7 +334,7 @@ def _check_version_requirements(self, trait_object, raise_exception=True): unavailable_traits.append(name) if not isdefined(getattr(trait_object, name)): continue - if raise_exception: + if not permissive: raise Exception( "Trait %s (%s) (version %s > required %s)" % (name, self.__class__.__name__, version, max_ver) diff --git a/nipype/interfaces/base/tests/test_core.py b/nipype/interfaces/base/tests/test_core.py index 233635a972..6b587554fa 100644 --- a/nipype/interfaces/base/tests/test_core.py +++ b/nipype/interfaces/base/tests/test_core.py @@ -245,14 +245,12 @@ class input_spec(nib.TraitedSpec): _version = "misparsed-garbage" - with caplog.at_level(logging.WARNING, logger="nipype.interface"): - obj = DerivedInterface() - assert len(caplog.records) == 2 + obj = DerivedInterface() obj.inputs.foo = 1 obj.inputs.bar = 1 with caplog.at_level(logging.WARNING, logger="nipype.interface"): obj._check_version_requirements(obj.inputs) - assert len(caplog.records) == 4 + assert len(caplog.records) == 2 def test_input_version_missing_error(caplog): @@ -265,17 +263,15 @@ class input_spec(nib.TraitedSpec): _version = "misparsed-garbage" - with caplog.at_level(logging.WARNING, logger="nipype.interface"): - obj1 = DerivedInterface(foo=1) - obj2 = DerivedInterface(bar=1) - assert len(caplog.records) == 4 + obj1 = DerivedInterface(foo=1) + obj2 = DerivedInterface(bar=1) with caplog.at_level(logging.WARNING, logger="nipype.interface"): with mock.patch.object(config, "getboolean", return_value=True): with pytest.raises(ValueError): obj1._check_version_requirements(obj1.inputs) with pytest.raises(ValueError): obj2._check_version_requirements(obj2.inputs) - assert len(caplog.records) == 6 + assert len(caplog.records) == 2 def test_unavailable_input(): From 35ea6354aedbfd21f9432be5ddc27262bcfdfccf Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 21 Nov 2020 20:00:34 -0500 Subject: [PATCH 0904/1665] TEST/FIX: Bad input value now causing problems --- nipype/interfaces/tests/test_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index ef210de030..f2afedd492 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -465,7 +465,7 @@ def test_datasink_substitutions(tmpdir): files.append(f) open(f, "w") ds = nio.DataSink( - parametrization=False, + parameterization=False, base_directory=str(outdir), substitutions=[("ababab", "ABABAB")], # end archoring ($) is used to assure operation on the filename From d5be8c0b646f3953a57d6b4921d322aecef725b3 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 2 Nov 2020 16:23:46 -0500 Subject: [PATCH 0905/1665] FIX: Canonicalize environment in deprecated version_from_command() --- nipype/interfaces/base/core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index afafbebf84..755a33317c 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -26,7 +26,7 @@ from ... import config, logging, LooseVersion from ...utils.provenance import write_provenance from ...utils.misc import str2bool, rgetcwd -from ...utils.filemanip import split_filename, which, get_dependencies +from ...utils.filemanip import split_filename, which, get_dependencies, canonicalize_env from ...utils.subprocess import run_command from ...external.due import due @@ -778,7 +778,7 @@ def version_from_command(self, flag="-v", cmd=None): proc = sp.Popen( " ".join((cmd, flag)), shell=True, - env=env, + env=canonicalize_env(env), stdout=sp.PIPE, stderr=sp.PIPE, ) From 0dbcedda8c9e38f9c8ca90f8d9b6e0f5d0437fd8 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 2 Nov 2020 16:56:02 -0500 Subject: [PATCH 0906/1665] FIX: Windows actually wants strings --- nipype/utils/filemanip.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 656f4d23af..9b3741ae96 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -26,11 +26,7 @@ fmlogger = logging.getLogger("nipype.utils") -related_filetype_sets = [ - (".hdr", ".img", ".mat"), - (".nii", ".mat"), - (".BRIK", ".HEAD"), -] +related_filetype_sets = [(".hdr", ".img", ".mat"), (".nii", ".mat"), (".BRIK", ".HEAD")] def _resolve_with_filenotfound(path, **kwargs): @@ -876,7 +872,7 @@ def get_dependencies(name, environ): def canonicalize_env(env): - """Windows requires that environment be dicts with bytes as keys and values + """Windows requires that environment be dicts with str as keys and values This function converts any unicode entries for Windows only, returning the dictionary untouched in other environments. @@ -888,7 +884,7 @@ def canonicalize_env(env): Returns ------- env : dict - Windows: environment dictionary with bytes keys and values + Windows: environment dictionary with str keys and values Other: untouched input ``env`` """ if os.name != "nt": @@ -896,10 +892,10 @@ def canonicalize_env(env): out_env = {} for key, val in env.items(): - if not isinstance(key, bytes): - key = key.encode("utf-8") - if not isinstance(val, bytes): - val = val.encode("utf-8") + if not isinstance(key, str): + key = key.decode("utf-8") + if not isinstance(val, str): + val = val.decode("utf-8") out_env[key] = val return out_env From 72da248abf0f4dd17d52b714706dae264dc18e38 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 24 Nov 2020 10:21:50 -0500 Subject: [PATCH 0907/1665] MNT: 1.6.0 --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 396b24cea7..c9e49f53cb 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -5,7 +5,7 @@ # nipype version information # Remove -dev for release -__version__ = "1.6.0-dev" +__version__ = "1.6.0" def get_nipype_gitversion(): From aef6fb24ac62d81eaeccc0c8a4928b575eebabf7 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 24 Nov 2020 10:28:54 -0500 Subject: [PATCH 0908/1665] MNT: Update Zenodo ordering --- .zenodo.json | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 0baed9eaa9..6672d5bde0 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -406,6 +406,16 @@ "affiliation": "University of Iowa", "name": "Welch, David" }, + { + "affiliation": "Charite Universitatsmedizin Berlin, Germany", + "name": "Waller, Lea", + "orcid": "0000-0002-3239-6957" + }, + { + "affiliation": "Max Planck Institute for Human Cognitive and Brain Sciences", + "name": "Contier, Oliver", + "orcid": "0000-0002-2983-4709" + }, { "affiliation": "Department of Psychology, Stanford University", "name": "Triplett, William", @@ -486,11 +496,6 @@ "affiliation": "Boston University", "name": "Perkins, L. Nathan" }, - { - "affiliation": "Max Planck Institute for Human Cognitive and Brain Sciences", - "name": "Contier, Oliver", - "orcid": "0000-0002-2983-4709" - }, { "name": "Zhou, Dale" }, @@ -498,6 +503,10 @@ "name": "Bielievtsov, Dmytro", "orcid": "0000-0003-3846-7696" }, + { + "affiliation": "Sagol School of Neuroscience, Tel Aviv University", + "name": "Ben-Zvi, Gal" + }, { "affiliation": "University of Newcastle, Australia", "name": "Cooper, Gavin", @@ -513,11 +522,6 @@ "name": "Linkersd\u00f6rfer, Janosch", "orcid": "0000-0002-1577-1233" }, - { - "affiliation": "Charite Universitatsmedizin Berlin, Germany", - "name": "Waller, Lea", - "orcid": "0000-0002-3239-6957" - }, { "name": "Renfro, Mandy" }, @@ -565,6 +569,11 @@ "name": "Margulies, Daniel S.", "orcid": "0000-0002-8880-9204" }, + { + "affiliation": "CNRS, UMS3552 IRMaGe", + "name": "Condamine, Eric", + "orcid": "0000-0002-9533-3769" + }, { "affiliation": "Dartmouth College", "name": "Ma, Feilong", @@ -634,11 +643,6 @@ "affiliation": "Vrije Universiteit Amsterdam", "name": "Ort, Eduard" }, - { - "affiliation": "CNRS, UMS3552 IRMaGe", - "name": "Condamine, Eric", - "orcid": "0000-0002-9533-3769" - }, { "affiliation": "Stanford University", "name": "Lerma-Usabiaga, Garikoitz", @@ -697,10 +701,6 @@ "affiliation": "Sagol School of Neuroscience, Tel Aviv University", "name": "Baratz, Zvi" }, - { - "affiliation": "Sagol School of Neuroscience, Tel Aviv University", - "name": "Ben-Zvi, Gal" - }, { "name": "Matsubara, K" }, From d32e5bf7ff2fc931b4981dcd7fa091403902ec29 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 27 Nov 2020 19:44:34 -0500 Subject: [PATCH 0909/1665] DOC: 1.6.0 changelog --- doc/changelog/1.X.X-changelog.rst | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/doc/changelog/1.X.X-changelog.rst b/doc/changelog/1.X.X-changelog.rst index 4855404401..ade3f2dccd 100644 --- a/doc/changelog/1.X.X-changelog.rst +++ b/doc/changelog/1.X.X-changelog.rst @@ -1,3 +1,26 @@ +1.6.0 (November 27, 2020) +========================= + +New feature release in the 1.6.x series. + +In addition to the usual bug fixes, significant reductions were made +in workflow startup costs. + +(`Full changelog `__) + + * FIX: Canonicalize environment dicts to strings in Windows (https://github.com/nipy/nipype/pull/3267) + * FIX: Purge deprecated exception content accesses (https://github.com/nipy/nipype/pull/3272) + * FIX: Handle changes in CLI structure of mrtrix3.DWIBiasCorrect (https://github.com/nipy/nipype/pull/3248) + * FIX: EpiReg changed to not list certain outputs when 'wmseg' input is specified (https://github.com/nipy/nipype/pull/3265) + * FIX: CI issues (https://github.com/nipy/nipype/pull/3262) + * FIX: SPM SliceTiming must accept either Int or float for ref_slice and sli… (https://github.com/nipy/nipype/pull/3255) + * FIX: Raise version error when using ``-g`` with ``antsAI`` < 2.3.0 (https://github.com/nipy/nipype/pull/3256) + * FIX: No longer depending on pydotplus (networkx >=2.0 update) (https://github.com/nipy/nipype/pull/3251) + * FIX: ANTs' utilities revision - bug fixes and add more operations to ``ants.ImageMath`` (https://github.com/nipy/nipype/pull/3236) + * ENH: Handle unavailable traits due to version differences (https://github.com/nipy/nipype/pull/3273) + * ENH: Optimize workflow.run performance (https://github.com/nipy/nipype/pull/3260) + * DOC: Remove myself (@mr-c) from the zenodo metadata (https://github.com/nipy/nipype/pull/3271) + 1.5.1 (August 16, 2020) ======================= From 1f3d251c885a17bd834e6a501bbf5cebde47935f Mon Sep 17 00:00:00 2001 From: Tom Close Date: Tue, 29 Sep 2020 17:06:19 +1000 Subject: [PATCH 0910/1665] added predicted_signal option to tensor added predicted signal to estimate output spec --- nipype/interfaces/mrtrix3/reconst.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index 3f21e9ad54..d13c29d453 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -4,7 +4,8 @@ import os.path as op -from ..base import traits, TraitedSpec, File, Undefined, InputMultiObject +from ..base import ( + traits, TraitedSpec, File, Undefined, InputMultiObject, isdefined) from .base import MRTrix3BaseInputSpec, MRTrix3Base @@ -50,10 +51,16 @@ class FitTensorInputSpec(MRTrix3BaseInputSpec): "only applies to the non-linear methods" ), ) + predicted_signal = File( + argstr='-predicted_signal %s', + desc=( + "specify a file to contain the predicted signal from the tensor " + "fits. This can be used to calculate the residual signal")) class FitTensorOutputSpec(TraitedSpec): out_file = File(exists=True, desc="the output DTI file") + predicted_signal = File(desc="Predicted signal from fitted tensors") class FitTensor(MRTrix3Base): @@ -81,6 +88,9 @@ class FitTensor(MRTrix3Base): def _list_outputs(self): outputs = self.output_spec().get() outputs["out_file"] = op.abspath(self.inputs.out_file) + if isdefined(self.inputs.predicted_signal): + outputs['predicted_signal'] = op.abspath( + self.inputs.predicted_signal) return outputs @@ -144,12 +154,18 @@ class EstimateFODInputSpec(MRTrix3BaseInputSpec): "[ az el ] pairs for the directions." ), ) + predicted_signal = File( + argstr='-predicted_signal %s', + desc=( + "specify a file to contain the predicted signal from the FOD " + "estimates. This can be used to calculate the residual signal")) class EstimateFODOutputSpec(TraitedSpec): wm_odf = File(argstr="%s", desc="output WM ODF") gm_odf = File(argstr="%s", desc="output GM ODF") csf_odf = File(argstr="%s", desc="output CSF ODF") + predicted_signal = File(desc="output predicted signal") class EstimateFOD(MRTrix3Base): @@ -187,6 +203,9 @@ def _list_outputs(self): outputs["gm_odf"] = op.abspath(self.inputs.gm_odf) if self.inputs.csf_odf != Undefined: outputs["csf_odf"] = op.abspath(self.inputs.csf_odf) + if self.inputs.predicted_signal != Undefined: + outputs["predicted_signal"] = op.abspath( + self.inputs.predicted_signal) return outputs From a9a89046d8d4cd2552ac07f477365575f542a710 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 2 Dec 2020 16:41:18 +1100 Subject: [PATCH 0911/1665] added SHConv and SH2Amp interfaces to mrtrix utils --- nipype/interfaces/mrtrix3/utils.py | 126 +++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index d13b5d0ce7..3d312c2753 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -765,3 +765,129 @@ class MRResize(MRTrix3Base): _cmd = "mrresize" input_spec = MRResizeInputSpec output_spec = MRResizeOutputSpec + + +class SHConvInputSpec(TraitedSpec): + in_file = File( + exists=True, + argstr="%s", + mandatory=True, + position=-3, + desc="input ODF image", + ) + + # General options + response = File( + exists=True, + mandatory=True, + argstr="%s", + position=-2, + desc=("The response function"), + ) + + out_file = File( + "sh.mif", + argstr="%s", + mandatory=True, + position=-1, + usedefault=True, + desc="the output spherical harmonics", + ) + + +class SHConvOutputSpec(TraitedSpec): + out_file = File(exists=True, + desc="the output convoluted spherical harmonics file") + + +class SHConv(CommandLine): + """ + Convert diffusion-weighted images to tensor images + + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> tsr = mrt.SHConv() + >>> tsr.inputs.in_file = 'odf.mif' + >>> tsr.inputs.response = 'response.txt' + >>> tsr.inputs.grad_fsl = ('bvecs', 'bvals') + >>> tsr.cmdline # doctest: +ELLIPSIS + 'dwi2tensor -fslgrad bvecs bvals -mask mask.nii.gz dwi.mif dti.mif' + >>> tsr.run() # doctest: +SKIP + """ + + _cmd = "shconv" + input_spec = SHConvInputSpec + output_spec = SHConvOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs["out_file"] = op.abspath(self.inputs.out_file) + return outputs + + + +class SH2AmpInputSpec(MRTrix3BaseInputSpec): + in_file = File( + exists=True, + argstr="%s", + mandatory=True, + position=-3, + desc="input ODF image", + ) + + # General options + directions = File( + exists=True, + mandatory=True, + argstr="%s", + position=-2, + desc=("The directions along which to sample the function"), + ) + + out_file = File( + "amp.mif", + argstr="%s", + mandatory=True, + position=-1, + usedefault=True, + desc="the output spherical harmonics", + ) + + nonnegative = traits.Bool( + argstr='-nonnegative', + desc="cap all negative amplitudes to zero") + + +class SH2AmpOutputSpec(TraitedSpec): + out_file = File(exists=True, + desc="the output convoluted spherical harmonics file") + + +class SH2Amp(MRTrix3Base): + """ + Convert diffusion-weighted images to tensor images + + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> sha = mrt.SH2Amp() + >>> sha.inputs.in_file = 'odf.mif' + >>> sha.inputs.response = 'response.txt' + >>> sha.cmdline # doctest: +ELLIPSIS + 'dwi2tensor -fslgrad bvecs bvals -mask mask.nii.gz dwi.mif dti.mif' + >>> sha.run() # doctest: +SKIP + """ + + _cmd = "sh2amp" + input_spec = SH2AmpInputSpec + output_spec = SH2AmpOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs["out_file"] = op.abspath(self.inputs.out_file) + return outputs From ac7490c27a789e15d3513ceba8046ef7ee11c82f Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 2 Dec 2020 16:44:10 +1100 Subject: [PATCH 0912/1665] added name to authors list --- .zenodo.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index 6672d5bde0..89a3823172 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -711,6 +711,11 @@ { "name": "Marina, Ana" }, + { + "affiliation": "University of Sydney", + "name": "Close, Thomas", + "orcid": "0000-0002-4160-2134" + }, { "name": "Davison, Andrew" }, From 974ca38cc3b3709044580eee5f2e756a828dae04 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 2 Dec 2020 17:11:45 +1100 Subject: [PATCH 0913/1665] touched up docs and made commands inherit from commandline instead of mrtrixbase --- nipype/interfaces/mrtrix3/__init__.py | 2 ++ nipype/interfaces/mrtrix3/utils.py | 41 ++++++++++++++------------- 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/nipype/interfaces/mrtrix3/__init__.py b/nipype/interfaces/mrtrix3/__init__.py index f60e837310..dd0317591e 100644 --- a/nipype/interfaces/mrtrix3/__init__.py +++ b/nipype/interfaces/mrtrix3/__init__.py @@ -13,6 +13,8 @@ MRConvert, MRResize, DWIExtract, + SHConv, + SH2Amp ) from .preprocess import ( ResponseSD, diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index 3d312c2753..fcd1b7842f 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -767,7 +767,8 @@ class MRResize(MRTrix3Base): output_spec = MRResizeOutputSpec -class SHConvInputSpec(TraitedSpec): +class SHConvInputSpec(CommandLineInputSpec): + in_file = File( exists=True, argstr="%s", @@ -802,20 +803,20 @@ class SHConvOutputSpec(TraitedSpec): class SHConv(CommandLine): """ - Convert diffusion-weighted images to tensor images + Convolve spherical harmonics with a tissue response function. Useful for + checking residuals of ODF estimates. Example ------- >>> import nipype.interfaces.mrtrix3 as mrt - >>> tsr = mrt.SHConv() - >>> tsr.inputs.in_file = 'odf.mif' - >>> tsr.inputs.response = 'response.txt' - >>> tsr.inputs.grad_fsl = ('bvecs', 'bvals') - >>> tsr.cmdline # doctest: +ELLIPSIS - 'dwi2tensor -fslgrad bvecs bvals -mask mask.nii.gz dwi.mif dti.mif' - >>> tsr.run() # doctest: +SKIP + >>> sh = mrt.SHConv() + >>> sh.inputs.in_file = 'odf.mif' + >>> sh.inputs.response = 'response.txt' + >>> sh.cmdline # doctest: +ELLIPSIS + 'shconv odf.mif response.txt sh.mif' + >>> sh.run() # doctest: +SKIP """ _cmd = "shconv" @@ -829,7 +830,7 @@ def _list_outputs(self): -class SH2AmpInputSpec(MRTrix3BaseInputSpec): +class SH2AmpInputSpec(CommandLineInputSpec): in_file = File( exists=True, argstr="%s", @@ -844,7 +845,8 @@ class SH2AmpInputSpec(MRTrix3BaseInputSpec): mandatory=True, argstr="%s", position=-2, - desc=("The directions along which to sample the function"), + desc=("The gradient directions along which to sample the spherical " + "harmonics MRtrix format"), ) out_file = File( @@ -866,21 +868,22 @@ class SH2AmpOutputSpec(TraitedSpec): desc="the output convoluted spherical harmonics file") -class SH2Amp(MRTrix3Base): +class SH2Amp(CommandLine): """ - Convert diffusion-weighted images to tensor images + Sample spherical harmonics on a set of gradient orientations. Useful for + checking residuals of ODF estimates. Example ------- >>> import nipype.interfaces.mrtrix3 as mrt - >>> sha = mrt.SH2Amp() - >>> sha.inputs.in_file = 'odf.mif' - >>> sha.inputs.response = 'response.txt' - >>> sha.cmdline # doctest: +ELLIPSIS - 'dwi2tensor -fslgrad bvecs bvals -mask mask.nii.gz dwi.mif dti.mif' - >>> sha.run() # doctest: +SKIP + >>> sh = mrt.SH2Amp() + >>> sh.inputs.in_file = 'sh.mif' + >>> sh.inputs.directions = 'grads.txt' + >>> sh.cmdline # doctest: +ELLIPSIS + 'sh2amp sh.mif grads.txt amp.mif' + >>> sh.run() # doctest: +SKIP """ _cmd = "sh2amp" From ccd25c6df463fa7090e9f368468cf22823e174d5 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 3 Dec 2020 09:16:04 +1100 Subject: [PATCH 0914/1665] changed isdefined checks of EstimateFOD and check for correct algorithm when using predicted_signal --- nipype/interfaces/mrtrix3/reconst.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index d13c29d453..9ca170d23f 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -158,7 +158,10 @@ class EstimateFODInputSpec(MRTrix3BaseInputSpec): argstr='-predicted_signal %s', desc=( "specify a file to contain the predicted signal from the FOD " - "estimates. This can be used to calculate the residual signal")) + "estimates. This can be used to calculate the residual signal." + "Note that this is only valid if algorithm == 'msmt_csd'. " + "For single shell reconstructions use a combination of SHConv " + "and SH2Amp instead.")) class EstimateFODOutputSpec(TraitedSpec): @@ -199,11 +202,15 @@ class EstimateFOD(MRTrix3Base): def _list_outputs(self): outputs = self.output_spec().get() outputs["wm_odf"] = op.abspath(self.inputs.wm_odf) - if self.inputs.gm_odf != Undefined: + if isdefined(self.inputs.gm_odf): outputs["gm_odf"] = op.abspath(self.inputs.gm_odf) - if self.inputs.csf_odf != Undefined: + if isdefined(self.inputs.csf_odf): outputs["csf_odf"] = op.abspath(self.inputs.csf_odf) - if self.inputs.predicted_signal != Undefined: + if isdefined(self.inputs.predicted_signal): + if self.inputs.algorithm != 'msmt_csd': + raise Exception( + "'predicted_signal' option can only be used with " + "the 'msmt_csd' algorithm") outputs["predicted_signal"] = op.abspath( self.inputs.predicted_signal) return outputs From f245ba4985b3014a6d1dbdb753056760fcd07d13 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 3 Dec 2020 09:21:18 +1100 Subject: [PATCH 0915/1665] fixed up doctest strings and added dummy files --- nipype/interfaces/mrtrix3/utils.py | 6 +++--- nipype/testing/data/grads.txt | 0 nipype/testing/data/sh.mif | 0 3 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 nipype/testing/data/grads.txt create mode 100644 nipype/testing/data/sh.mif diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index fcd1b7842f..bcd5be3572 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -812,9 +812,9 @@ class SHConv(CommandLine): >>> import nipype.interfaces.mrtrix3 as mrt >>> sh = mrt.SHConv() - >>> sh.inputs.in_file = 'odf.mif' + >>> sh.inputs.in_file = 'csd.mif' >>> sh.inputs.response = 'response.txt' - >>> sh.cmdline # doctest: +ELLIPSIS + >>> sh.cmdline 'shconv odf.mif response.txt sh.mif' >>> sh.run() # doctest: +SKIP """ @@ -881,7 +881,7 @@ class SH2Amp(CommandLine): >>> sh = mrt.SH2Amp() >>> sh.inputs.in_file = 'sh.mif' >>> sh.inputs.directions = 'grads.txt' - >>> sh.cmdline # doctest: +ELLIPSIS + >>> sh.cmdline 'sh2amp sh.mif grads.txt amp.mif' >>> sh.run() # doctest: +SKIP """ diff --git a/nipype/testing/data/grads.txt b/nipype/testing/data/grads.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/sh.mif b/nipype/testing/data/sh.mif new file mode 100644 index 0000000000..e69de29bb2 From c81259bc3b28baa1f18f95f6b056c228c6bfd115 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 3 Dec 2020 09:24:05 +1100 Subject: [PATCH 0916/1665] run black over touched files --- nipype/interfaces/mrtrix3/__init__.py | 2 +- nipype/interfaces/mrtrix3/reconst.py | 26 ++++++++++++++------------ nipype/interfaces/mrtrix3/utils.py | 17 ++++++++--------- 3 files changed, 23 insertions(+), 22 deletions(-) diff --git a/nipype/interfaces/mrtrix3/__init__.py b/nipype/interfaces/mrtrix3/__init__.py index dd0317591e..ea201a18a6 100644 --- a/nipype/interfaces/mrtrix3/__init__.py +++ b/nipype/interfaces/mrtrix3/__init__.py @@ -14,7 +14,7 @@ MRResize, DWIExtract, SHConv, - SH2Amp + SH2Amp, ) from .preprocess import ( ResponseSD, diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index 9ca170d23f..0e5523fa1d 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -4,8 +4,7 @@ import os.path as op -from ..base import ( - traits, TraitedSpec, File, Undefined, InputMultiObject, isdefined) +from ..base import traits, TraitedSpec, File, Undefined, InputMultiObject, isdefined from .base import MRTrix3BaseInputSpec, MRTrix3Base @@ -52,10 +51,12 @@ class FitTensorInputSpec(MRTrix3BaseInputSpec): ), ) predicted_signal = File( - argstr='-predicted_signal %s', + argstr="-predicted_signal %s", desc=( "specify a file to contain the predicted signal from the tensor " - "fits. This can be used to calculate the residual signal")) + "fits. This can be used to calculate the residual signal" + ), + ) class FitTensorOutputSpec(TraitedSpec): @@ -89,8 +90,7 @@ def _list_outputs(self): outputs = self.output_spec().get() outputs["out_file"] = op.abspath(self.inputs.out_file) if isdefined(self.inputs.predicted_signal): - outputs['predicted_signal'] = op.abspath( - self.inputs.predicted_signal) + outputs["predicted_signal"] = op.abspath(self.inputs.predicted_signal) return outputs @@ -155,13 +155,15 @@ class EstimateFODInputSpec(MRTrix3BaseInputSpec): ), ) predicted_signal = File( - argstr='-predicted_signal %s', + argstr="-predicted_signal %s", desc=( "specify a file to contain the predicted signal from the FOD " "estimates. This can be used to calculate the residual signal." "Note that this is only valid if algorithm == 'msmt_csd'. " "For single shell reconstructions use a combination of SHConv " - "and SH2Amp instead.")) + "and SH2Amp instead." + ), + ) class EstimateFODOutputSpec(TraitedSpec): @@ -207,12 +209,12 @@ def _list_outputs(self): if isdefined(self.inputs.csf_odf): outputs["csf_odf"] = op.abspath(self.inputs.csf_odf) if isdefined(self.inputs.predicted_signal): - if self.inputs.algorithm != 'msmt_csd': + if self.inputs.algorithm != "msmt_csd": raise Exception( "'predicted_signal' option can only be used with " - "the 'msmt_csd' algorithm") - outputs["predicted_signal"] = op.abspath( - self.inputs.predicted_signal) + "the 'msmt_csd' algorithm" + ) + outputs["predicted_signal"] = op.abspath(self.inputs.predicted_signal) return outputs diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index bcd5be3572..4af0b5380f 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -797,8 +797,7 @@ class SHConvInputSpec(CommandLineInputSpec): class SHConvOutputSpec(TraitedSpec): - out_file = File(exists=True, - desc="the output convoluted spherical harmonics file") + out_file = File(exists=True, desc="the output convoluted spherical harmonics file") class SHConv(CommandLine): @@ -829,7 +828,6 @@ def _list_outputs(self): return outputs - class SH2AmpInputSpec(CommandLineInputSpec): in_file = File( exists=True, @@ -845,8 +843,10 @@ class SH2AmpInputSpec(CommandLineInputSpec): mandatory=True, argstr="%s", position=-2, - desc=("The gradient directions along which to sample the spherical " - "harmonics MRtrix format"), + desc=( + "The gradient directions along which to sample the spherical " + "harmonics MRtrix format" + ), ) out_file = File( @@ -859,13 +859,12 @@ class SH2AmpInputSpec(CommandLineInputSpec): ) nonnegative = traits.Bool( - argstr='-nonnegative', - desc="cap all negative amplitudes to zero") + argstr="-nonnegative", desc="cap all negative amplitudes to zero" + ) class SH2AmpOutputSpec(TraitedSpec): - out_file = File(exists=True, - desc="the output convoluted spherical harmonics file") + out_file = File(exists=True, desc="the output convoluted spherical harmonics file") class SH2Amp(CommandLine): From ab96460859a95e4a5a68519fcbe7f2184d72ebe5 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 3 Dec 2020 09:27:06 +1100 Subject: [PATCH 0917/1665] ran make specs --- ..._auto_ConstrainedSphericalDeconvolution.py | 7 +++ .../mrtrix3/tests/test_auto_EstimateFOD.py | 7 +++ .../mrtrix3/tests/test_auto_FitTensor.py | 7 +++ .../mrtrix3/tests/test_auto_SH2Amp.py | 54 +++++++++++++++++++ .../mrtrix3/tests/test_auto_SHConv.py | 51 ++++++++++++++++++ 5 files changed, 126 insertions(+) create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py b/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py index 1e87ebc377..c395f0d1c8 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py @@ -77,6 +77,10 @@ def test_ConstrainedSphericalDeconvolution_inputs(): argstr="-nthreads %d", nohash=True, ), + predicted_signal=dict( + argstr="-predicted_signal %s", + extensions=None, + ), shell=dict( argstr="-shell %s", sep=",", @@ -112,6 +116,9 @@ def test_ConstrainedSphericalDeconvolution_outputs(): argstr="%s", extensions=None, ), + predicted_signal=dict( + extensions=None, + ), wm_odf=dict( argstr="%s", extensions=None, diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py index d91d270c4d..2d15207571 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py @@ -80,6 +80,10 @@ def test_EstimateFOD_inputs(): argstr="-nthreads %d", nohash=True, ), + predicted_signal=dict( + argstr="-predicted_signal %s", + extensions=None, + ), shell=dict( argstr="-shell %s", sep=",", @@ -115,6 +119,9 @@ def test_EstimateFOD_outputs(): argstr="%s", extensions=None, ), + predicted_signal=dict( + extensions=None, + ), wm_odf=dict( argstr="%s", extensions=None, diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py index cbadd78fa6..7cf38faf8c 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py @@ -54,6 +54,10 @@ def test_FitTensor_inputs(): position=-1, usedefault=True, ), + predicted_signal=dict( + argstr="-predicted_signal %s", + extensions=None, + ), reg_term=dict( argstr="-regularisation %f", max_ver="0.3.13", @@ -71,6 +75,9 @@ def test_FitTensor_outputs(): out_file=dict( extensions=None, ), + predicted_signal=dict( + extensions=None, + ), ) outputs = FitTensor.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py b/nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py new file mode 100644 index 0000000000..b2af3f51e6 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..utils import SH2Amp + + +def test_SH2Amp_inputs(): + input_map = dict( + args=dict( + argstr="%s", + ), + directions=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + nonnegative=dict( + argstr="-nonnegative", + ), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + ) + inputs = SH2Amp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_SH2Amp_outputs(): + output_map = dict( + out_file=dict( + extensions=None, + ), + ) + outputs = SH2Amp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py b/nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py new file mode 100644 index 0000000000..96e696768a --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..utils import SHConv + + +def test_SHConv_inputs(): + input_map = dict( + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + response=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + ) + inputs = SHConv.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_SHConv_outputs(): + output_map = dict( + out_file=dict( + extensions=None, + ), + ) + outputs = SHConv.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value From cd4bbc612a44ae8082de53c3249ed507be0fe3c6 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 3 Dec 2020 10:08:57 +1100 Subject: [PATCH 0918/1665] fixed up doctest failure --- nipype/interfaces/mrtrix3/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index 4af0b5380f..d2dd5c5906 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -814,7 +814,7 @@ class SHConv(CommandLine): >>> sh.inputs.in_file = 'csd.mif' >>> sh.inputs.response = 'response.txt' >>> sh.cmdline - 'shconv odf.mif response.txt sh.mif' + 'shconv csd.mif response.txt sh.mif' >>> sh.run() # doctest: +SKIP """ From 47fe00b387e205f3f884691a208ef7f20df4119f Mon Sep 17 00:00:00 2001 From: Xihe Xie Date: Tue, 8 Dec 2020 12:16:40 -0500 Subject: [PATCH 0919/1665] ENH: Add new `dwifslpreproc` interface for MRtrix3 (#3278) * added mrtrix3 dwifslpreproc as interface * use defaults added for outputs * Added dwifslpreproc interface to __init__ * edited doc string to correct order * added doctest skip * added MRtrix3 gradient empty test data * specified positions, requires for gradient exports * finalized docstring example and pytest * added entry to contribution list * simplify phase-encoding design input Moved `-rpe_` to `argstr` for simpler input. Co-authored-by: Chris Markiewicz * Apply suggestions from code review Fixed typos and improved `-eddy` and `-topup` optional inputs. Co-authored-by: Chris Markiewicz * Update nipype/interfaces/mrtrix3/preprocess.py Co-authored-by: Xihe Xie * fixed input positions according to review * fixed doc string for new positions Co-authored-by: Chris Markiewicz --- .zenodo.json | 5 + nipype/interfaces/mrtrix3/__init__.py | 1 + nipype/interfaces/mrtrix3/preprocess.py | 134 ++++++++++++++++++ .../mrtrix3/tests/test_auto_DWIPreproc.py | 54 +++++++ nipype/testing/data/grad.b | 0 5 files changed, 194 insertions(+) create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py create mode 100644 nipype/testing/data/grad.b diff --git a/.zenodo.json b/.zenodo.json index 6672d5bde0..e525f14326 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -788,6 +788,11 @@ { "name": "Tambini, Arielle" }, + { + "affiliation": "Weill Cornell Medicine", + "name": "Xie, Xihe", + "orcid": "0000-0001-6595-2473" + }, { "affiliation": "Max Planck Institute for Human Cognitive and Brain Sciences, Leipzig, Germany.", "name": "Mihai, Paul Glad", diff --git a/nipype/interfaces/mrtrix3/__init__.py b/nipype/interfaces/mrtrix3/__init__.py index f60e837310..53af56ef65 100644 --- a/nipype/interfaces/mrtrix3/__init__.py +++ b/nipype/interfaces/mrtrix3/__init__.py @@ -18,6 +18,7 @@ ResponseSD, ACTPrepareFSL, ReplaceFSwithFIRST, + DWIPreproc, DWIDenoise, MRDeGibbs, DWIBiasCorrect, diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index aa3347c7f9..ef67365f0b 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -233,6 +233,7 @@ class DWIBiasCorrect(MRTrix3Base): _cmd = "dwibiascorrect" input_spec = DWIBiasCorrectInputSpec output_spec = DWIBiasCorrectOutputSpec + def _format_arg(self, name, trait_spec, value): if name in ("use_ants", "use_fsl"): ver = self.version @@ -241,6 +242,139 @@ def _format_arg(self, name, trait_spec, value): return f"-{trait_spec.argstr}" return super()._format_arg(name, trait_spec, value) + +class DWIPreprocInputSpec(MRTrix3BaseInputSpec): + in_file = File( + exists=True, argstr="%s", position=0, mandatory=True, desc="input DWI image" + ) + out_file = File( + "preproc.mif", + argstr="%s", + mandatory=True, + position=1, + usedefault=True, + desc="output file after preprocessing", + ) + rpe_options = traits.Enum( + "none", + "pair", + "all", + "header", + argstr="-rpe_%s", + position=2, + mandatory=True, + desc='Specify acquisition phase-encoding design. "none" for no reversed phase-encoding image, "all" for all DWIs have opposing phase-encoding acquisition, "pair" for using a pair of b0 volumes for inhomogeneity field estimation only, and "header" for phase-encoding information can be found in the image header(s)', + ) + pe_dir = traits.Str( + argstr="-pe_dir %s", + mandatory=True, + desc="Specify the phase encoding direction of the input series, can be a signed axis number (e.g. -0, 1, +2), an axis designator (e.g. RL, PA, IS), or NIfTI axis codes (e.g. i-, j, k)", + ) + ro_time = traits.Float( + argstr="-readout_time %f", + desc="Total readout time of input series (in seconds)", + ) + in_epi = File( + exists=True, + argstr="-se_epi %s", + desc="Provide an additional image series consisting of spin-echo EPI images, which is to be used exclusively by topup for estimating the inhomogeneity field (i.e. it will not form part of the output image series)", + ) + align_seepi = traits.Bool( + argstr="-align_seepi", + desc="Achieve alignment between the SE-EPI images used for inhomogeneity field estimation, and the DWIs", + ) + eddy_options = traits.Str( + argstr='-eddy_options "%s"', + desc="Manually provide additional command-line options to the eddy command", + ) + topup_options = traits.Str( + argstr='-topup_options "%s"', + desc="Manually provide additional command-line options to the topup command", + ) + export_grad_mrtrix = traits.Bool( + argstr="-export_grad_mrtrix", desc="export new gradient files in mrtrix format" + ) + export_grad_fsl = traits.Bool( + argstr="-export_grad_fsl", desc="export gradient files in FSL format" + ) + out_grad_mrtrix = File( + "grad.b", + argstr="%s", + usedefault=True, + requires=["export_grad_mrtrix"], + desc="name of new gradient file", + ) + out_grad_fsl = traits.Tuple( + File("grad.bvecs", usedefault=True, desc="bvecs"), + File("grad.bvals", usedefault=True, desc="bvals"), + argstr="%s, %s", + requires=["export_grad_fsl"], + desc="Output (bvecs, bvals) gradients FSL format", + ) + + +class DWIPreprocOutputSpec(TraitedSpec): + out_file = File(argstr="%s", desc="output preprocessed image series") + out_grad_mrtrix = File( + "grad.b", + argstr="%s", + usedefault=True, + desc="preprocessed gradient file in mrtrix3 format", + ) + out_fsl_bvec = File( + "grad.bvecs", + argstr="%s", + usedefault=True, + desc="exported fsl gradient bvec file", + ) + out_fsl_bval = File( + "grad.bvals", + argstr="%s", + usedefault=True, + desc="exported fsl gradient bval file", + ) + + +class DWIPreproc(MRTrix3Base): + """ + Perform diffusion image pre-processing using FSL's eddy tool; including inhomogeneity distortion correction using FSL's topup tool if possible + + For more information, see + + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> preproc = mrt.DWIPreproc() + >>> preproc.inputs.in_file = 'dwi.mif' + >>> preproc.inputs.rpe_options = 'none' + >>> preproc.inputs.out_file = "preproc.mif" + >>> preproc.inputs.eddy_options = '--slm=linear --repol' # linear second level model and replace outliers + >>> preproc.inputs.export_grad_mrtrix = True # export final gradient table in MRtrix format + >>> preproc.inputs.ro_time = 0.165240 # 'TotalReadoutTime' in BIDS JSON metadata files + >>> preproc.inputs.pe_dir = 'j' # 'PhaseEncodingDirection' in BIDS JSON metadata files + >>> preproc.cmdline + 'dwifslpreproc dwi.mif preproc.mif -rpe_none -eddy_options "--slm=linear --repol" -export_grad_mrtrix grad.b -pe_dir j -readout_time 0.165240' + >>> preproc.run() # doctest: +SKIP + """ + + _cmd = "dwifslpreproc" + input_spec = DWIPreprocInputSpec + output_spec = DWIPreprocOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs["out_file"] = op.abspath(self.inputs.out_file) + if self.inputs.export_grad_mrtrix: + outputs["out_grad_mrtrix"] = op.abspath(self.inputs.out_grad_mrtrix) + if self.inputs.export_grad_fsl: + outputs["out_fsl_bvec"] = op.abspath(self.inputs.out_grad_fsl[0]) + outputs["out_fsl_bval"] = op.abspath(self.inputs.out_grad_fsl[1]) + + return outputs + + class ResponseSDInputSpec(MRTrix3BaseInputSpec): algorithm = traits.Enum( "msmt_5tt", diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py new file mode 100644 index 0000000000..76fae6548f --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..preprocess import DWIPreproc + + +def test_DWIPreproc_inputs(): + input_map = dict( + align_seepi=dict(argstr="-align_seepi"), + args=dict(argstr="%s"), + bval_scale=dict(argstr="-bvalue_scaling %s"), + eddy_options=dict(argstr='-eddy_options "%s"'), + environ=dict(nohash=True, usedefault=True), + export_grad_fsl=dict(argstr="-export_grad_fsl"), + export_grad_mrtrix=dict(argstr="-export_grad_mrtrix"), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_epi=dict(argstr="-se_epi %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + nthreads=dict(argstr="-nthreads %d", nohash=True), + out_file=dict( + argstr="%s", extensions=None, mandatory=True, position=1, usedefault=True + ), + out_grad_fsl=dict(argstr="%s, %s", requires=["export_grad_fsl"]), + out_grad_mrtrix=dict( + argstr="%s", + extensions=None, + requires=["export_grad_mrtrix"], + usedefault=True, + ), + pe_dir=dict(argstr="-pe_dir %s", mandatory=True), + ro_time=dict(argstr="-readout_time %f"), + rpe_options=dict(argstr="-rpe_%s", mandatory=True, position=2), + topup_options=dict(argstr='-topup_options "%s"'), + ) + inputs = DWIPreproc.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_DWIPreproc_outputs(): + output_map = dict( + out_file=dict(argstr="%s", extensions=None), + out_fsl_bval=dict(argstr="%s", extensions=None, usedefault=True), + out_fsl_bvec=dict(argstr="%s", extensions=None, usedefault=True), + out_grad_mrtrix=dict(argstr="%s", extensions=None, usedefault=True), + ) + outputs = DWIPreproc.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/testing/data/grad.b b/nipype/testing/data/grad.b new file mode 100644 index 0000000000..e69de29bb2 From 82d1ad2b90bd507d9a5491a64473f0b492341a6f Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 20 Dec 2020 15:49:42 -0500 Subject: [PATCH 0920/1665] MNT: Drop support for numpy < 1.15.3 --- nipype/info.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/nipype/info.py b/nipype/info.py index c9e49f53cb..ef0817ba36 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -100,10 +100,9 @@ def get_nipype_gitversion(): # versions NIBABEL_MIN_VERSION = "2.1.0" NETWORKX_MIN_VERSION = "2.0" -NUMPY_MIN_VERSION = "1.13" # Numpy bug in python 3.7: # https://www.opensourceanswers.com/blog/you-shouldnt-use-python-37-for-data-science-right-now.html -NUMPY_MIN_VERSION_37 = "1.15.3" +NUMPY_MIN_VERSION = "1.15.3" SCIPY_MIN_VERSION = "0.14" TRAITS_MIN_VERSION = "4.6" DATEUTIL_MIN_VERSION = "2.2" @@ -138,8 +137,7 @@ def get_nipype_gitversion(): "click>=%s" % CLICK_MIN_VERSION, "networkx>=%s" % NETWORKX_MIN_VERSION, "nibabel>=%s" % NIBABEL_MIN_VERSION, - 'numpy>=%s ; python_version < "3.7"' % NUMPY_MIN_VERSION, - 'numpy>=%s ; python_version >= "3.7"' % NUMPY_MIN_VERSION_37, + 'numpy>=%s' % NUMPY_MIN_VERSION, "packaging", "prov>=%s" % PROV_VERSION, "pydot>=%s" % PYDOT_MIN_VERSION, From ed6ad57ca857f8d9715a285bfb83d4a1f1da0812 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 5 Jan 2021 08:01:18 -0500 Subject: [PATCH 0921/1665] CI: Build docker images with Python 3.8 --- docker/generate_dockerfiles.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/generate_dockerfiles.sh b/docker/generate_dockerfiles.sh index dd31b804ac..e8af4d0042 100755 --- a/docker/generate_dockerfiles.sh +++ b/docker/generate_dockerfiles.sh @@ -87,7 +87,7 @@ function generate_main_dockerfile() { --label maintainer="The nipype developers https://github.com/nipy/nipype" \ --env MKL_NUM_THREADS=1 \ OMP_NUM_THREADS=1 \ - --arg PYTHON_VERSION_MAJOR=3 PYTHON_VERSION_MINOR=6 BUILD_DATE VCS_REF VERSION \ + --arg PYTHON_VERSION_MAJOR=3 PYTHON_VERSION_MINOR=8 BUILD_DATE VCS_REF VERSION \ --user neuro \ --workdir /home/neuro \ --miniconda create_env=neuro \ From 931a766bba6361afd44daa2d15ddfae3a49c49a9 Mon Sep 17 00:00:00 2001 From: Tim Date: Mon, 11 Jan 2021 00:33:58 -0500 Subject: [PATCH 0922/1665] interface for R mostly a copy-paste of the matlab interface --- nipype/interfaces/r.py | 129 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 nipype/interfaces/r.py diff --git a/nipype/interfaces/r.py b/nipype/interfaces/r.py new file mode 100644 index 0000000000..37010853d8 --- /dev/null +++ b/nipype/interfaces/r.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Interfaces to run R scripts.""" +import os + +from .. import config +from .base import ( + CommandLineInputSpec, + InputMultiPath, + isdefined, + CommandLine, + traits, + File, + Directory, +) + + +def get_r_command(): + if "NIPYPE_NO_R" in os.environ: + return None + try: + r_cmd = os.environ["RCMD"] + except: + r_cmd = "R" + return r_cmd + + +no_r = get_r_command() is None + + +class RInputSpec(CommandLineInputSpec): + """ Basic expected inputs to R interface """ + + script = traits.Str( + argstr='-e "%s"', desc="R code to run", mandatory=True, position=-1 + ) + # non-commandline options + rfile = traits.Bool(True, desc="Run R using R script", usedefault=True) + script_file = File( + "pyscript.R", usedefault=True, desc="Name of file to write R code to" + ) + + +class RCommand(CommandLine): + """Interface that runs R code + + >>> import nipype.interfaces.r as r + >>> r = r.RCommand(rfile=False) # don't write script file + >>> r.inputs.script = "Sys.getenv('USER')" + >>> out = r.run() # doctest: +SKIP + """ + + _cmd = "R" + _default_r_cmd = None + _default_rfile = None + input_spec = RInputSpec + + def __init__(self, r_cmd=None, **inputs): + """initializes interface to r + (default 'R') + """ + super(RCommand, self).__init__(**inputs) + if r_cmd and isdefined(r_cmd): + self._cmd = r_cmd + elif self._default_r_cmd: + self._cmd = self._default_r_cmd + + if self._default_rfile and not isdefined(self.inputs.rfile): + self.inputs.rfile = self._default_rfile + + # For r commands force all output to be returned since r + # does not have a clean way of notifying an error + self.terminal_output = "allatonce" + + @classmethod + def set_default_r_cmd(cls, r_cmd): + """Set the default R command line for R classes. + + This method is used to set values for all R + subclasses. However, setting this will not update the output + type for any existing instances. For these, assign the + .inputs.r_cmd. + """ + cls._default_r_cmd = r_cmd + + @classmethod + def set_default_rfile(cls, rfile): + """Set the default R script file format for R classes. + + This method is used to set values for all R + subclasses. However, setting this will not update the output + type for any existing instances. For these, assign the + .inputs.rfile. + """ + cls._default_rfile = rfile + + def _run_interface(self, runtime): + self.terminal_output = "allatonce" + runtime = super(RCommand, self)._run_interface(runtime) + if "R code threw an exception" in runtime.stderr: + self.raise_exception(runtime) + return runtime + + def _format_arg(self, name, trait_spec, value): + if name in ["script"]: + argstr = trait_spec.argstr + return self._gen_r_command(argstr, value) + return super(RCommand, self)._format_arg(name, trait_spec, value) + + def _gen_r_command(self, argstr, script_lines): + """ Generates commands and, if rfile specified, writes it to disk.""" + if not self.inputs.rfile: + # replace newlines with ;, strip comments + script = "; ".join([ + line + for line in script_lines.split("\n") + if not line.strip().startswith("#") + ]) + # escape " and $ + script = script.replace('"','\\"') + script = script.replace('$','\\$') + else: + script_path = os.path.join(os.getcwd(), self.inputs.script_file) + with open(script_path, "wt") as rfile: + rfile.write(script_lines) + script = "source('%s')" % script_path + + return argstr % script From 533850b33117768e05ee8bc80f6dd46425cbea42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Mon, 1 Mar 2021 15:21:38 +0000 Subject: [PATCH 0923/1665] Added cat12 interface. --- nipype/interfaces/cat12/__init__.py | 0 nipype/interfaces/cat12/preprocess.py | 216 ++++++++++++++++++++++++++ 2 files changed, 216 insertions(+) create mode 100644 nipype/interfaces/cat12/__init__.py create mode 100644 nipype/interfaces/cat12/preprocess.py diff --git a/nipype/interfaces/cat12/__init__.py b/nipype/interfaces/cat12/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py new file mode 100644 index 0000000000..5fd58f09a5 --- /dev/null +++ b/nipype/interfaces/cat12/preprocess.py @@ -0,0 +1,216 @@ +import os +import sys +import traits +from nipype.interfaces.base import InputMultiPath, TraitedSpec, isdefined +from nipype.interfaces.spm import SPMCommand +from nipype.interfaces.spm.base import SPMCommandInputSpec, ImageFileSPM, scans_for_fnames, scans_for_fname +from nipype.utils.filemanip import split_filename, fname_presuffix +from traits.trait_types import Int, File +from traits.trait_types import List + + +class CAT12SegmentInputSpec(SPMCommandInputSpec): + in_files = InputMultiPath(ImageFileSPM(exists=True), field="data", desc="file to segment", mandatory=True, + copyfile=False) + + tpm = InputMultiPath(ImageFileSPM(exists=True), field="tpm", desc="Tissue Probability Maps", mandatory=False, + copyfile=False) + + n_jobs = traits.trait_types.Int(1, usedefault=True, mandatory=True, field="nproc", desc="Number of threads") + use_prior = traits.trait_types.Str(field="useprior", usedefault=True) + + affine_regularization = traits.trait_types.Str(default_value="mni", + field="opts.affreg", usedefault=True) + + power_spm_inhomogeneity_correction = traits.trait_types.Float(default_value=0.5, field='opts.biasacc', + usedefault=True) + # Extended options for CAT12 preprocessing + affine_preprocessing = traits.trait_types.Int(1070, field="extopts.APP", usedefault=True) + initial_segmentation = traits.trait_types.Int(0, field="extopts.spm_kamap", usedefault=True) + local_adaptive_seg = traits.trait_types.Float(0.5, field="extopts.LASstr", usedefault=True) + skull_strip = traits.trait_types.Float(2, field="extopts.gcutstr", usedefault=True) + wm_hyper_intensity_correction = traits.trait_types.Int(1, field="extopts.WMHC", usedefault=True) + spatial_registration = traits.trait_types.Int(1, field="extopts.WMHC", usedefault=True) + voxel_size = traits.trait_types.Float(1.5, field="extopts.vox", usedefault=True) + internal_resampling_process = traits.trait_types.Tuple(traits.trait_types.Float(1), traits.trait_types.Float(0.1), + minlen=2, maxlen=2, + field="extopts.restypes.optimal", usedefault=True) + ignore_errors = traits.trait_types.Int(1, field="extopts.ignoreErrors", usedefault=True) + + # Writing options + surface_and_thickness_estimation = traits.trait_types.Int(1, field="surface", usedefault=True) + surface_measures = traits.trait_types.Int(1, field="output.surf_measures", usedefault=True) + + # Templates + neuromorphometrics = traits.trait_types.Bool(True, field="output.ROImenu.atlases.neuromorphometrics", + usedefault=True) + lpba40 = traits.trait_types.Bool(False, field="output.ROImenu.atlases.lpba40", usedefault=True) + cobra = traits.trait_types.Bool(True, field="output.ROImenu.atlases.hammers", usedefault=True) + hammers = traits.trait_types.Bool(False, field="output.ROImenu.atlases.cobra", usedefault=True) + own_atlas = InputMultiPath(ImageFileSPM(exists=True), field="output.ROImenu.atlases.ownatlas", + desc="Own Atlas", mandatory=False, copyfile=False) + + # Grey matter + gm_output_native = traits.trait_types.Bool(False, field="output.GM.native", usedefault=True) + gm_output_modulated = traits.trait_types.Bool(True, field="output.GM.mod", usedefault=True) + gm_output_dartel = traits.trait_types.Bool(False, field="output.GM.dartel", usedefault=True) + + # White matter + wm_output_native = traits.trait_types.Bool(False, field="output.WM.native", usedefault=True) + wm_output_modulated = traits.trait_types.Bool(True, field="output.WM.mod", usedefault=True) + wm_output_dartel = traits.trait_types.Bool(False, field="output.WM.dartel", usedefault=True) + + # CSF matter + csf_output_native = traits.trait_types.Bool(False, field="output.CSF.native", usedefault=True) + csf_output_modulated = traits.trait_types.Bool(True, field="output.CSF.mod", usedefault=True) + csf_output_dartel = traits.trait_types.Bool(False, field="output.CSF.dartel", usedefault=True) + + # Labels + label_native = traits.trait_types.Bool(False, field="output.label.native", usedefault=True) + label_warped = traits.trait_types.Bool(True, field="output.label.warped", usedefault=True) + label_dartel = traits.trait_types.Bool(False, field="output.label.dartel", usedefault=True) + output_labelnative = traits.trait_types.Bool(False, field="output.labelnative", usedefault=True) + + # Bias + save_bias_corrected = traits.trait_types.Bool(True, field="output.bias.warped", usedefault=True) + + # las + las_native = traits.trait_types.Bool(False, field="output.las.native", usedefault=True) + las_warped = traits.trait_types.Bool(True, field="output.las.warped", usedefault=True) + las_dartel = traits.trait_types.Bool(False, field="output.las.dartel", usedefault=True) + + # Jacobian Warped + jacobianwarped = traits.trait_types.Bool(True, field="output.jacobianwarped", usedefault=True) + + # Deformation Fields + warps = traits.trait_types.Tuple(traits.trait_types.Int(1), traits.trait_types.Int(0), minlen=2, maxlen=2, + field="output.warps", usedefault=True) + + +class CAT12SegmentOutputSpec(TraitedSpec): + ########################################## + # Label XML files + ########################################## + label_files = List(File(exists=True)) + + label_rois = File(exists=True, desc="ROIs Volumes") + label_roi = File(exists=True, desc="ROI volumes") + + ########################################## + # MRI .nii files + ########################################## + + mri_images = List(File(exists=True)) + + # Grey Matter + gm_modulated_image = File(exists=True) + gm_dartel_image = File(exists=True) + gm_native_image = File(exists=True) + + # White Matter + wm_modulated_image = File(exists=True) + wm_dartel_image = File(exists=True) + wm_native_image = File(exists=True) + + # CSF + csf_modulated_image = File(exists=True) + csf_dartel_image = File(exists=True) + csf_native_image = File(exists=True) + + bias_corrected_image = File(exists=True) + ########################################## + # Surface files + ########################################## + + surface_files = List(File(exists=True)) + + # Right hemisphere + rh_central_surface = File(exists=True) + rh_sphere_surface = File(exists=True) + + # Left hemisphere + lh_central_surface = File(exists=True) + lh_sphere_surface = File(exists=True) + + # Report files + report_files = List(File(exists=True)) + report = File(exists=True) + + +class CAT12Segment(SPMCommand): + input_spec = CAT12SegmentInputSpec + output_spec = CAT12SegmentOutputSpec + + def __init__(self, **inputs): + _local_version = SPMCommand().version + if _local_version and "12." in _local_version: + self._jobtype = "tools" + self._jobname = "cat.estwrite" + + SPMCommand.__init__(self, **inputs) + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt in ["in_files"]: + if isinstance(val, list): + return scans_for_fnames(val) + else: + return scans_for_fname(val) + return super(CAT12Segment, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + f = self.inputs.in_files[0] + pth, base, ext = split_filename(f) + + outputs["mri_images"] = [os.path.join(os.path.join(pth, "mri"), f) for f in + os.listdir(os.path.join(pth, "mri")) + if os.path.isfile(os.path.join(os.path.join(pth, "mri"), f))] + + for tidx, tissue in enumerate(["gm", "wm", "csf"]): + + for idx, (image, prefix) in enumerate([("modulated", "mw"), ("dartel", "r"), ("native", "")]): + outtype = f'{tissue}_output_{image}' + if isdefined(getattr(self.inputs, outtype)) and getattr(self.inputs, outtype): + outfield = f'{tissue}_{image}_image' + prefix = os.path.join("mri", f'{prefix}p{tidx + 1}') + if image != "dartel": + outputs[outfield] = fname_presuffix(f, prefix=prefix) + else: + outputs[outfield] = fname_presuffix(f, prefix=prefix, suffix="_rigid") + + if isdefined(self.inputs.save_bias_corrected) and self.inputs.save_bias_corrected: + outputs["bias_corrected_image"] = fname_presuffix(f, prefix=os.path.join("mri", 'mi')) + + outputs["surface_files"] = [os.path.join(os.path.join(pth, "surf"), f) for f in + os.listdir(os.path.join(pth, "surf")) + if os.path.isfile(os.path.join(os.path.join(pth, "surf"), f))] + + for tidx, hemisphere in enumerate(["rh", "lh"]): + for idx, suffix in enumerate(["central", "sphere"]): + outfield = f'{hemisphere}_{suffix}_surface' + outputs[outfield] = fname_presuffix(f, prefix=os.path.join("surf", f'{hemisphere}.{suffix}.'), + suffix=".gii", use_ext=False) + + outputs["report_files"] = [os.path.join(os.path.join(pth, "report"), f) for f in + os.listdir(os.path.join(pth, "report")) + if os.path.isfile(os.path.join(os.path.join(pth, "report"), f))] + outputs[f'report'] = fname_presuffix(f, prefix=os.path.join("report", f'cat_'), suffix=".xml", use_ext=False) + + outputs["label_files"] = [os.path.join(os.path.join(pth, "label"), f) for f in + os.listdir(os.path.join(pth, "label")) + if os.path.isfile(os.path.join(os.path.join(pth, "label"), f))] + + outputs['label_rois'] = fname_presuffix(f, prefix=os.path.join("label", f'catROIs_'), suffix=".xml", + use_ext=False) + outputs['label_roi'] = fname_presuffix(f, prefix=os.path.join("label", f'catROI_'), suffix=".xml", + use_ext=False) + + return outputs + + +if __name__ == '__main__': + path_mr = sys.argv[1] + cat = CAT12Segment(in_files=path_mr) + cat.run() From eb80b27033fb268a939a6a848d0627d77499549e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Mon, 1 Mar 2021 16:02:10 +0000 Subject: [PATCH 0924/1665] Updated .zenodo.json. --- .zenodo.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index e525f14326..d539f2aa95 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -807,6 +807,11 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" + }, + { + "affiliation": "CIBIT, UC", + "name": "Machado, Fátima", + "orcid": "0000-0001-8878-1750" } ], "keywords": [ From a8ee022d6c3d0217aae9837147d16fad110f18db Mon Sep 17 00:00:00 2001 From: Ali Ghayoor Date: Wed, 21 Oct 2020 12:22:15 -0400 Subject: [PATCH 0925/1665] ENH: added afni NetCorr Afni 3dNetCorr is added to nipype. For more details, please see: https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dNetCorr.html --- nipype/interfaces/afni/__init__.py | 1 + nipype/interfaces/afni/preprocess.py | 73 ++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) diff --git a/nipype/interfaces/afni/__init__.py b/nipype/interfaces/afni/__init__.py index d5f2bb4361..3629090ac0 100644 --- a/nipype/interfaces/afni/__init__.py +++ b/nipype/interfaces/afni/__init__.py @@ -29,6 +29,7 @@ LFCD, Maskave, Means, + NetCorr, OutlierCount, QualityIndex, ROIStats, diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 1d53aac98c..03bee38ef2 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -2556,6 +2556,79 @@ def _format_arg(self, name, trait_spec, value): return super(TCorrMap, self)._format_arg(name, trait_spec, value) +class NetCorrInputSpec(AFNICommandInputSpec): + in_file = File(exists=True, argstr="-inset %s", mandatory=True) + in_rois = File(exists=True, argstr="-in_rois %s", mandatory=True) + mask = File(exists=True, argstr="-mask %s") + weight_ts = File(exists=True, argstr="-weight_ts %s") + fish_z = traits.Bool(argstr="-fish_z") + part_corr = traits.Bool(argstr="-part_corr") + ts_out = traits.Bool(argstr="-ts_out") + ts_label = traits.Bool(argstr="-ts_label") + ts_indiv = traits.Bool(argstr="-ts_indiv") + ts_wb_corr = traits.Bool(argstr="-ts_wb_corr") + ts_wb_Z = traits.Bool(argstr="-ts_wb_Z") + ts_wb_strlabel = traits.Bool(argstr="-ts_wb_strlabel") + nifti = traits.Bool(argstr="-nifti") + output_mask_nonnull = traits.Bool(argstr="-output_mask_nonnull") + push_thru_many_zeros = traits.Bool(argstr="-push_thru_many_zeros") + ignore_LT = traits.Bool(argstr="-ignore_LT") + out_file = File( + name_template="%s_netcorr", + desc="output file name part", + argstr="-prefix %s", + position=1, + name_source="in_file", + ) + +class NetCorrOutputSpec(TraitedSpec): + out_matrix = File(desc="output text file for correlation stats") + +class NetCorr(AFNICommand): + """Calculate correlation matrix of a set of ROIs (using mean time series of + each). Several networks may be analyzed simultaneously, one per brick. + + For complete details, see the `3dTcorrMap Documentation. + `_ + + Examples + -------- + >>> from nipype.interfaces import afni + >>> ncorr = afni.NetCorr() + >>> ncorr.inputs.in_file = 'functional.nii' + >>> ncorr.inputs.mask = 'mask.nii' + >>> ncorr.inputs.in_rois = 'rois.nii' + >>> ncorr.inputs.ts_wb_corr = True + >>> ncorr.inputs.ts_wb_Z = True + >>> ncorr.inputs.fish_z = True + >>> ncorr.inputs.prefix = 'sub0.tp1.ncorr' + >>> ncorr.cmdline # doctest: +SKIP + '3dNetCorr -prefix sub0.tp1.ncorr -inset functional.nii -mask mask.nii -in_rois rois.nii -ts_wb_corr -ts_wb_Z -fish_z' + >>> res = ncorr.run() # doctest: +SKIP + + """ + + _cmd = "3dNetCorr" + input_spec = NetCorrInputSpec + output_spec = NetCorrOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + + if not isdefined(self.inputs.out_file): + prefix = self._gen_fname(self.inputs.in_file, suffix="_netcorr") + else: + prefix = self.inputs.out_file + + # All outputs should be in the same directory as the prefix + out_dir = os.path.dirname(os.path.abspath(prefix)) + + outputs["out_matrix"] = ( + fname_presuffix(prefix, suffix="_000", use_ext=False, newpath=out_dir) + ".netcc" + ) + return outputs + + class TCorrelateInputSpec(AFNICommandInputSpec): xset = File( desc="input xset", From 6da8ee8865fa9b81a271884559769d042344f541 Mon Sep 17 00:00:00 2001 From: Ali Ghayoor Date: Mon, 1 Mar 2021 14:49:46 -0500 Subject: [PATCH 0926/1665] ENH: modified afni NetCorr development --- nipype/interfaces/afni/preprocess.py | 148 ++++++++++++++++++++++----- 1 file changed, 125 insertions(+), 23 deletions(-) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 03bee38ef2..9ca4c3948b 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -2557,25 +2557,127 @@ def _format_arg(self, name, trait_spec, value): class NetCorrInputSpec(AFNICommandInputSpec): - in_file = File(exists=True, argstr="-inset %s", mandatory=True) - in_rois = File(exists=True, argstr="-in_rois %s", mandatory=True) - mask = File(exists=True, argstr="-mask %s") - weight_ts = File(exists=True, argstr="-weight_ts %s") - fish_z = traits.Bool(argstr="-fish_z") - part_corr = traits.Bool(argstr="-part_corr") - ts_out = traits.Bool(argstr="-ts_out") - ts_label = traits.Bool(argstr="-ts_label") - ts_indiv = traits.Bool(argstr="-ts_indiv") - ts_wb_corr = traits.Bool(argstr="-ts_wb_corr") - ts_wb_Z = traits.Bool(argstr="-ts_wb_Z") - ts_wb_strlabel = traits.Bool(argstr="-ts_wb_strlabel") - nifti = traits.Bool(argstr="-nifti") - output_mask_nonnull = traits.Bool(argstr="-output_mask_nonnull") - push_thru_many_zeros = traits.Bool(argstr="-push_thru_many_zeros") - ignore_LT = traits.Bool(argstr="-ignore_LT") + in_file = File( + desc="input time series file (4D data set)", + exists=True, + argstr="-inset %s", + mandatory=True) + in_rois = File( + desc="input set of ROIs, each labelled with distinct integers", + exists=True, + argstr="-in_rois %s", + mandatory=True) + mask = File( + desc="can include a whole brain mask within which to " + "calculate correlation. Otherwise, data should be " + "masked already", + exists=True, + argstr="-mask %s") + weight_ts = File( + desc="input a 1D file WTS of weights that will be applied " + "multiplicatively to each ROI's average time series. " + "WTS can be a column- or row-file of values, but it " + "must have the same length as the input time series " + "volume. " + "If the initial average time series was A[n] for " + "n=0,..,(N-1) time points, then applying a set of " + "weights W[n] of the same length from WTS would " + "produce a new time series: B[n] = A[n] * W[n]", + exists=True, + argstr="-weight_ts %s") + fish_z = traits.Bool( + desc="switch to also output a matrix of Fisher Z-transform " + "values for the corr coefs (r): " + "Z = atanh(r) , " + "(with Z=4 being output along matrix diagonals where " + "r=1, as the r-to-Z conversion is ceilinged at " + "Z = atanh(r=0.999329) = 4, which is still *quite* a " + "high Pearson-r value", + argstr="-fish_z") + part_corr = traits.Bool( + desc="output the partial correlation matrix", + argstr="-part_corr") + ts_out = traits.Bool( + desc="switch to output the mean time series of the ROIs that " + "have been used to generate the correlation matrices. " + "Output filenames mirror those of the correlation " + "matrix files, with a '.netts' postfix", + argstr="-ts_out") + ts_label = traits.Bool( + desc="additional switch when using '-ts_out'. Using this " + "option will insert the integer ROI label at the start " + "of each line of the *.netts file created. Thus, for " + "a time series of length N, each line will have N+1 " + "numbers, where the first is the integer ROI label " + "and the subsequent N are scientific notation values", + argstr="-ts_label") + ts_indiv = traits.Bool( + desc="switch to create a directory for each network that " + "contains the average time series for each ROI in " + "individual files (each file has one line). " + "The directories are labelled PREFIX_000_INDIV/, " + "PREFIX_001_INDIV/, etc. (one per network). Within each " + "directory, the files are labelled ROI_001.netts, " + "ROI_002.netts, etc., with the numbers given by the " + "actual ROI integer labels", + argstr="-ts_indiv") + ts_wb_corr = traits.Bool( + desc="switch to create a set of whole brain correlation maps. " + "Performs whole brain correlation for each " + "ROI's average time series; this will automatically " + "create a directory for each network that contains the " + "set of whole brain correlation maps (Pearson 'r's). " + "The directories are labelled as above for '-ts_indiv' " + "Within each directory, the files are labelled " + "WB_CORR_ROI_001+orig, WB_CORR_ROI_002+orig, etc., with " + "the numbers given by the actual ROI integer labels", + argstr="-ts_wb_corr") + ts_wb_Z = traits.Bool( + desc="same as above in '-ts_wb_corr', except that the maps " + "have been Fisher transformed to Z-scores the relation: " + "Z=atanh(r). " + "To avoid infinities in the transform, Pearson values " + "are effectively capped at |r| = 0.999329 (where |Z| = 4.0). " + "Files are labelled WB_Z_ROI_001+orig, etc", + argstr="-ts_wb_Z") + ts_wb_strlabel = traits.Bool( + desc="by default, '-ts_wb_{corr,Z}' output files are named " + "using the int number of a given ROI, such as: " + "WB_Z_ROI_001+orig. " + "With this option, one can replace the int (such as '001') " + "with the string label (such as 'L-thalamus') " + "*if* one has a labeltable attached to the file", + argstr="-ts_wb_strlabel") + nifti = traits.Bool( + desc="output any correlation map files as NIFTI files " + "(default is BRIK/HEAD). Only useful if using " + "'-ts_wb_corr' and/or '-ts_wb_Z'", + argstr="-nifti") + output_mask_nonnull = traits.Bool( + desc="internally, this program checks for where there are " + "nonnull time series, because we don't like those, in " + "general. With this flag, the user can output the " + "determined mask of non-null time series.", + argstr="-output_mask_nonnull") + push_thru_many_zeros = traits.Bool( + desc="by default, this program will grind to a halt and " + "refuse to calculate if any ROI contains >10 percent " + "of voxels with null times series (i.e., each point is " + "0), as of April, 2017. This is because it seems most " + "likely that hidden badness is responsible. However, " + "if the user still wants to carry on the calculation " + "anyways, then this option will allow one to push on " + "through. However, if any ROI *only* has null time " + "series, then the program will not calculate and the " + "user will really, really, really need to address their masking", + argstr="-push_thru_many_zeros") + ignore_LT = traits.Bool( + desc="switch to ignore any label table labels in the " + "'-in_rois' file, if there are any labels attached", + argstr="-ignore_LT") out_file = File( - name_template="%s_netcorr", desc="output file name part", + name_template="%s_netcorr", argstr="-prefix %s", position=1, name_source="in_file", @@ -2588,8 +2690,8 @@ class NetCorr(AFNICommand): """Calculate correlation matrix of a set of ROIs (using mean time series of each). Several networks may be analyzed simultaneously, one per brick. - For complete details, see the `3dTcorrMap Documentation. - `_ + For complete details, see the `3dNetCorr Documentation + `_. Examples -------- @@ -2597,13 +2699,13 @@ class NetCorr(AFNICommand): >>> ncorr = afni.NetCorr() >>> ncorr.inputs.in_file = 'functional.nii' >>> ncorr.inputs.mask = 'mask.nii' - >>> ncorr.inputs.in_rois = 'rois.nii' + >>> ncorr.inputs.in_rois = 'maps.nii' >>> ncorr.inputs.ts_wb_corr = True >>> ncorr.inputs.ts_wb_Z = True >>> ncorr.inputs.fish_z = True - >>> ncorr.inputs.prefix = 'sub0.tp1.ncorr' - >>> ncorr.cmdline # doctest: +SKIP - '3dNetCorr -prefix sub0.tp1.ncorr -inset functional.nii -mask mask.nii -in_rois rois.nii -ts_wb_corr -ts_wb_Z -fish_z' + >>> ncorr.inputs.out_file = 'sub0.tp1.ncorr' + >>> ncorr.cmdline + '3dNetCorr -prefix sub0.tp1.ncorr -inset functional.nii -mask mask.nii -in_rois maps.nii -ts_wb_corr -ts_wb_Z -fish_z' >>> res = ncorr.run() # doctest: +SKIP """ From 380d798c1e7b5211add0577ccc31f39cc1afac39 Mon Sep 17 00:00:00 2001 From: Ali Ghayoor Date: Tue, 2 Mar 2021 15:40:56 -0500 Subject: [PATCH 0927/1665] ENH: AFNI NetCorr sorted commandline Co-authored-by: Chris Markiewicz --- nipype/interfaces/afni/preprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 9ca4c3948b..1b55e87e10 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -2705,7 +2705,7 @@ class NetCorr(AFNICommand): >>> ncorr.inputs.fish_z = True >>> ncorr.inputs.out_file = 'sub0.tp1.ncorr' >>> ncorr.cmdline - '3dNetCorr -prefix sub0.tp1.ncorr -inset functional.nii -mask mask.nii -in_rois maps.nii -ts_wb_corr -ts_wb_Z -fish_z' + '3dNetCorr -prefix sub0.tp1.ncorr -fish_z -inset functional.nii -in_rois maps.nii -mask mask.nii -ts_wb_Z -ts_wb_corr' >>> res = ncorr.run() # doctest: +SKIP """ From bfcfa94e3668ff57c5a7139dd76571e0cde429c8 Mon Sep 17 00:00:00 2001 From: Ali Ghayoor Date: Wed, 3 Mar 2021 19:12:36 -0500 Subject: [PATCH 0928/1665] ENH: added the correlation maps to the output afni NetCorr --- nipype/interfaces/afni/preprocess.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 1b55e87e10..ada49fc9ab 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -2684,8 +2684,9 @@ class NetCorrInputSpec(AFNICommandInputSpec): ) class NetCorrOutputSpec(TraitedSpec): - out_matrix = File(desc="output text file for correlation stats") - + out_corr_matrix = File(desc="output correlation matrix between ROIs written to a text file with .netcc suffix") + out_corr_maps = traits.List(File(), desc="output correlation maps in Pearson and/or Z-scores") + class NetCorr(AFNICommand): """Calculate correlation matrix of a set of ROIs (using mean time series of each). Several networks may be analyzed simultaneously, one per brick. @@ -2715,6 +2716,8 @@ class NetCorr(AFNICommand): output_spec = NetCorrOutputSpec def _list_outputs(self): + import glob + outputs = self.output_spec().get() if not isdefined(self.inputs.out_file): @@ -2723,11 +2726,13 @@ def _list_outputs(self): prefix = self.inputs.out_file # All outputs should be in the same directory as the prefix - out_dir = os.path.dirname(os.path.abspath(prefix)) + odir = os.path.dirname(os.path.abspath(prefix)) + outputs["out_corr_matrix"] = glob.glob(os.path.join(odir, "*.netcc"))[0] + + if isdefined(self.inputs.ts_wb_corr) or isdefined(self.inputs.ts_Z_corr): + corrdir = os.path.join(odir, prefix + "_000_INDIV") + outputs["out_corr_maps"] = glob.glob(os.path.join(corrdir, "*.nii.gz")) - outputs["out_matrix"] = ( - fname_presuffix(prefix, suffix="_000", use_ext=False, newpath=out_dir) + ".netcc" - ) return outputs From c00dd2fdce6b1ec09f2e97afd8a9276e5c8b3f25 Mon Sep 17 00:00:00 2001 From: Ali Ghayoor Date: Wed, 3 Mar 2021 21:27:58 -0500 Subject: [PATCH 0929/1665] ENH: added test_auto_NetCorr This file was automatically generated by running "make specs" --- .../afni/tests/test_auto_NetCorr.py | 99 +++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 nipype/interfaces/afni/tests/test_auto_NetCorr.py diff --git a/nipype/interfaces/afni/tests/test_auto_NetCorr.py b/nipype/interfaces/afni/tests/test_auto_NetCorr.py new file mode 100644 index 0000000000..e613dc13eb --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_NetCorr.py @@ -0,0 +1,99 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..preprocess import NetCorr + + +def test_NetCorr_inputs(): + input_map = dict( + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fish_z=dict( + argstr="-fish_z", + ), + ignore_LT=dict( + argstr="-ignore_LT", + ), + in_file=dict( + argstr="-inset %s", + extensions=None, + mandatory=True, + ), + in_rois=dict( + argstr="-in_rois %s", + extensions=None, + mandatory=True, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + nifti=dict( + argstr="-nifti", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="-prefix %s", + extensions=None, + name_source="in_file", + name_template="%s_netcorr", + position=1, + ), + output_mask_nonnull=dict( + argstr="-output_mask_nonnull", + ), + outputtype=dict(), + part_corr=dict( + argstr="-part_corr", + ), + push_thru_many_zeros=dict( + argstr="-push_thru_many_zeros", + ), + ts_indiv=dict( + argstr="-ts_indiv", + ), + ts_label=dict( + argstr="-ts_label", + ), + ts_out=dict( + argstr="-ts_out", + ), + ts_wb_Z=dict( + argstr="-ts_wb_Z", + ), + ts_wb_corr=dict( + argstr="-ts_wb_corr", + ), + ts_wb_strlabel=dict( + argstr="-ts_wb_strlabel", + ), + weight_ts=dict( + argstr="-weight_ts %s", + extensions=None, + ), + ) + inputs = NetCorr.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_NetCorr_outputs(): + output_map = dict( + out_corr_maps=dict(), + out_corr_matrix=dict( + extensions=None, + ), + ) + outputs = NetCorr.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value From 83828578cfd991d9140c5e22d995b78fc4256241 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Thu, 4 Mar 2021 08:13:41 +0200 Subject: [PATCH 0930/1665] Added copyfile=False metadata to BETInputSpec in_file specification and applied black formatting. --- nipype/interfaces/fsl/preprocess.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py index f2fbe8c079..f54b6c6e18 100644 --- a/nipype/interfaces/fsl/preprocess.py +++ b/nipype/interfaces/fsl/preprocess.py @@ -35,6 +35,7 @@ class BETInputSpec(FSLCommandInputSpec): argstr="%s", position=0, mandatory=True, + copyfile=False, ) out_file = File( desc="name of output skull stripped image", @@ -1309,10 +1310,7 @@ def _list_outputs(self): if key == "out_intensitymap_file" and isdefined(outputs[key]): basename = FNIRT.intensitymap_file_basename(outputs[key]) - outputs[key] = [ - outputs[key], - "%s.txt" % basename, - ] + outputs[key] = [outputs[key], "%s.txt" % basename] return outputs def _format_arg(self, name, spec, value): From 6c889e518314615963cf85ceffaac4c2ed46ca5d Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Thu, 4 Mar 2021 09:56:12 +0200 Subject: [PATCH 0931/1665] Changed BET interface to generate relative and list absolute out_file path. --- nipype/interfaces/fsl/preprocess.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py index f54b6c6e18..64bc8ad54c 100644 --- a/nipype/interfaces/fsl/preprocess.py +++ b/nipype/interfaces/fsl/preprocess.py @@ -169,11 +169,11 @@ def _gen_outfilename(self): out_file = self.inputs.out_file if not isdefined(out_file) and isdefined(self.inputs.in_file): out_file = self._gen_fname(self.inputs.in_file, suffix="_brain") - return os.path.abspath(out_file) + return op.relpath(out_file, start=os.getcwd()) def _list_outputs(self): outputs = self.output_spec().get() - outputs["out_file"] = self._gen_outfilename() + outputs["out_file"] = os.path.abspath(self._gen_outfilename()) basename = os.path.basename(outputs["out_file"]) cwd = os.path.dirname(outputs["out_file"]) From 7a42b631c8a0991852dfdd5176b8c3bcd4bd36f4 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Thu, 4 Mar 2021 10:37:51 +0200 Subject: [PATCH 0932/1665] Updated BET tests to assert relative out_file path. --- nipype/interfaces/fsl/tests/test_preprocess.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/fsl/tests/test_preprocess.py b/nipype/interfaces/fsl/tests/test_preprocess.py index 438f3f0ec4..dd4d827ce7 100644 --- a/nipype/interfaces/fsl/tests/test_preprocess.py +++ b/nipype/interfaces/fsl/tests/test_preprocess.py @@ -41,8 +41,7 @@ def test_bet(setup_infile): # Test generated outfile name better.inputs.in_file = tmp_infile outfile = fsl_name(better, "foo_brain") - outpath = os.path.join(os.getcwd(), outfile) - realcmd = "bet %s %s" % (tmp_infile, outpath) + realcmd = "bet %s %s" % (tmp_infile, outfile) assert better.cmdline == realcmd # Test specified outfile name outfile = fsl_name(better, "/newdata/bar") @@ -79,12 +78,11 @@ def func(): # test each of our arguments better = fsl.BET() outfile = fsl_name(better, "foo_brain") - outpath = os.path.join(os.getcwd(), outfile) for name, settings in list(opt_map.items()): better = fsl.BET(**{name: settings[1]}) # Add mandatory input better.inputs.in_file = tmp_infile - realcmd = " ".join([better.cmd, tmp_infile, outpath, settings[0]]) + realcmd = " ".join([better.cmd, tmp_infile, outfile, settings[0]]) assert better.cmdline == realcmd From e66bc41eb1b6636be3c84e1b006ebf4411982644 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Thu, 4 Mar 2021 10:56:33 +0200 Subject: [PATCH 0933/1665] Changed BET interface's _gen_outfilename() to return relative path only for generated out_file. --- nipype/interfaces/fsl/preprocess.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py index 64bc8ad54c..0f7e402445 100644 --- a/nipype/interfaces/fsl/preprocess.py +++ b/nipype/interfaces/fsl/preprocess.py @@ -169,7 +169,8 @@ def _gen_outfilename(self): out_file = self.inputs.out_file if not isdefined(out_file) and isdefined(self.inputs.in_file): out_file = self._gen_fname(self.inputs.in_file, suffix="_brain") - return op.relpath(out_file, start=os.getcwd()) + return op.relpath(out_file, start=os.getcwd()) + return out_file def _list_outputs(self): outputs = self.output_spec().get() From f1091633c0b36e36971296d48651148dd5d7a70d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Mon, 8 Mar 2021 18:24:30 +0000 Subject: [PATCH 0934/1665] Added interfaces to extract measures from the surface using CAT12. --- nipype/interfaces/cat12/surface.py | 234 +++++++++++++++++++++++++++++ 1 file changed, 234 insertions(+) create mode 100644 nipype/interfaces/cat12/surface.py diff --git a/nipype/interfaces/cat12/surface.py b/nipype/interfaces/cat12/surface.py new file mode 100644 index 0000000000..34b7dbf892 --- /dev/null +++ b/nipype/interfaces/cat12/surface.py @@ -0,0 +1,234 @@ +import os +import sys + +import traits +from traits.trait_base import _Undefined +from traits.trait_types import List + +from nipype.interfaces.base import File, InputMultiPath, TraitedSpec +from nipype.interfaces.spm import SPMCommand +from nipype.interfaces.spm.base import SPMCommandInputSpec +from nipype.utils.filemanip import split_filename + + +class ExtractAdditionalSurfaceParametersInputSpec(SPMCommandInputSpec): + left_central_surfaces = InputMultiPath(File(exists=True), field="data_surf", + desc="Left and central surfaces files", mandatory=True, copyfile=False) + surface_files = InputMultiPath(File(exists=True), + desc="All surface files", mandatory=False, copyfile=False) + + gyrification = traits.trait_types.Bool(True, field="GI", usedefault=True, + desc="Extract gyrification index (GI) based on absolute mean curvature. The" + " method is described in Luders et al. Neuroimage, 29:1224-1230, 2006") + gmv = traits.trait_types.Bool(True, field="gmv", usedefault=True, desc="Extract volume") + area = traits.trait_types.Bool(True, field="area", usedefault=True, desc="Extract area surface") + depth = traits.trait_types.Bool(False, field="SD", usedefault=True, + desc="Extract sulcus depth based on euclidian distance between the central " + "surface anf its convex hull.") + fractal_dimension = traits.trait_types.Bool(False, field="FD", usedefault=True, + desc="Extract cortical complexity (fractal dimension) which is " + "described in Yotter ar al. Neuroimage, 56(3): 961-973, 2011") + + +class ExtractAdditionalSurfaceParametersOutputSpec(TraitedSpec): + lh_extracted_files = List(File(exists=True)) + rh_extracted_files = List(File(exists=True)) + + lh_gyrification = List(File(exists=True)) + rh_gyrification = List(File(exists=True)) + + lh_gyrification_resampled = List(File(exists=True)) + rh_gyrification_resampled = List(File(exists=True)) + + lh_gmv = List(File(exists=True)) + rh_gmv = List(File(exists=True)) + + lh_area = List(File(exists=True)) + rh_area = List(File(exists=True)) + + lh_depth = List(File(exists=True)) + rh_depth = List(File(exists=True)) + + lh_fractaldimension = List(File(exists=True)) + rh_fractaldimension = List(File(exists=True)) + + +class ExtractAdditionalSurfaceParameters(SPMCommand): + """ + Additional surface parameters can be extracted that can be used for statistical analysis, such as: + + * Central surfaces + * Surface area + * Surface GM volume + * Gyrification Index + * Sulcus depth + * Toro's gyrification index + * Shaer's local gyrification index + * Laplacian gyrification indeces + * Addicional surfaces + * Measure normalization + * Lazy processing + + http://www.neuro.uni-jena.de/cat12/CAT12-Manual.pdf#page=53 + + Examples + -------- + # Set the left surface files, both will be processed + lh_path_central = ... + + # Put here all surface files generated by CAT12 Segment, this is only required if the this approach is putted in + # a Node + surf_files = [rh.sphere..., lh.sphere..., etc] + + extract_additional_measures = ExtractAdditionalSurfaceParameters(left_central_surfaces=lh_path_central, + surface_files=surf_files) + extract_additional_measures.run() + + """ + input_spec = ExtractAdditionalSurfaceParametersInputSpec + output_spec = ExtractAdditionalSurfaceParametersOutputSpec + + def __init__(self, **inputs): + _local_version = SPMCommand().version + if _local_version and "12." in _local_version: + self._jobtype = "tools" + self._jobname = "cat.stools.surfextract" + + super().__init__(**inputs) + + def _list_outputs(self): + outputs = self._outputs().get() + + names_outputs = [(self.inputs.gyrification, 'gyrification'), (self.inputs.gmv, 'gmv'), + (self.inputs.area, 'area'), (self.inputs.depth, 'depth'), + (self.inputs.fractal_dimension, 'fractaldimension')] + + for filename in self.inputs.left_central_surfaces: + pth, base, ext = split_filename(filename) + # The first part of the filename is rh.central or lh.central + original_filename = base.split(".", 2)[-1] + for i, (extracted_parameter, parameter_name) in enumerate(names_outputs): + if extracted_parameter: + for hemisphere in ["rh", "lh"]: + all_files_hemisphere = hemisphere + '_extracted_files' + name_hemisphere = hemisphere + "_" + parameter_name + if isinstance(outputs[name_hemisphere], _Undefined): + outputs[name_hemisphere] = [] + if isinstance(outputs[all_files_hemisphere], _Undefined): + outputs[all_files_hemisphere] = [] + generated_filename = ".".join([hemisphere, parameter_name, original_filename]) + outputs[name_hemisphere].append(os.path.join(pth, generated_filename)) + + # Add all hemisphere files into one list, this is important because only the left hemisphere + # files are used as input in the Surface ROI Tools, fpr instance. + outputs[all_files_hemisphere].append(os.path.join(pth, generated_filename)) + + return outputs + + def _format_arg(self, opt, spec, val): + if opt == "left_central_surfaces": + return Cell2Str(val) + return super(ExtractAdditionalSurfaceParameters, self)._format_arg(opt, spec, val) + + +class ExtractROIBasedSurfaceMeasuresInputSpec(SPMCommandInputSpec): + # Only these files are given as input, yet the right hemisphere (rh) files should also be on the processing + # directory. + + surface_files = InputMultiPath(File(exists=True), desc="Surface data files. This variable should be a list " + "with all", mandatory=False, copyfile=False) + lh_roi_atlas = InputMultiPath(File(exists=True), field="rdata", desc="(Left) ROI Atlas. These are the ROI's ", + mandatory=True, copyfile=False) + + lh_surface_measure = InputMultiPath(File(exists=True), field="cdata", desc="(Left) Surface data files. ", + mandatory=True, copyfile=False) + rh_surface_measure = InputMultiPath(File(exists=True), desc="(Right) Surface data files.", + mandatory=False, copyfile=False) + + rh_roi_atlas = InputMultiPath(File(exists=True), desc="(Right) ROI Atlas. These are the ROI's ", + mandatory=False, copyfile=False) + + +class ExtractROIBasedSurfaceMeasures(SPMCommand): + """ + Extract ROI-based surface values + While ROI-based values for VBM (volume) data are automatically saved in the label folder as XML file it is + necessary to additionally extract these values for surface data (except for thickness which is automatically + extracted during segmentation). This has to be done after preprocessing the data and creating cortical surfaces. + + You can extract ROI-based values for cortical thickness but also for any other surface parameter that was extracted + using the Extract Additional Surface Parameters such as volume, area, depth, gyrification and fractal dimension. + + + http://www.neuro.uni-jena.de/cat12/CAT12-Manual.pdf#page=53 + + Examples + -------- + # Template surface files + lh_atlas = sys.argv[2] + rh_atlas = sys.argv[3] + + # Put here all surface files generated by CAT12 Segment, this is only required if the this approach is putted in + # a Node + surf_files = [rh.sphere..., lh.sphere..., etc] + + # Set the path to the left hemisphere measure file, both will be processed + lh_measure = .... + + extract_additional_measures = ExtractROIBasedSurfaceMeasures(surface_files=surf_files, + lh_surface_measure=lh_measure, + lh_roi_atlas=lh_atlas, + rh_roi_atlas=rh_atlas) + extract_additional_measures.run() + + + """ + + input_spec = ExtractROIBasedSurfaceMeasuresInputSpec + output_spec = None + + def __init__(self, **inputs): + _local_version = SPMCommand().version + if _local_version and "12." in _local_version: + self._jobtype = "tools" + self._jobname = "cat.stools.surf2roi" + + SPMCommand.__init__(self, **inputs) + + def _format_arg(self, opt, spec, val): + if opt == "lh_surface_measure": + return NestedCell(val) + elif opt == "lh_roi_atlas": + return Cell2Str(val) + + return super(ExtractROIBasedSurfaceMeasures, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + pass + + +class Cell: + def __init__(self, arg): + self.arg = arg + + def to_string(self): + if isinstance(self.arg, list): + v = '\n'.join([f"'{el}'" for el in self.arg]) + else: + v = self.arg + return v + + +class NestedCell(Cell): + + def __str__(self): + return "{{%s}}" % self.to_string() + + +class Cell2Str(Cell): + + def __str__(self): + """Convert input to appropriate format for cat12 + """ + return "{%s}" % self.to_string() + From d8ca991d249f5a9436991e8b95d5a4c4213dff41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Tue, 9 Mar 2021 10:02:38 +0000 Subject: [PATCH 0935/1665] Improved documentation of the surface measures extraction. --- nipype/interfaces/cat12/surface.py | 47 +++++++++++++++--------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/nipype/interfaces/cat12/surface.py b/nipype/interfaces/cat12/surface.py index 34b7dbf892..08cc2b2d7a 100644 --- a/nipype/interfaces/cat12/surface.py +++ b/nipype/interfaces/cat12/surface.py @@ -73,16 +73,16 @@ class ExtractAdditionalSurfaceParameters(SPMCommand): Examples -------- - # Set the left surface files, both will be processed - lh_path_central = ... - - # Put here all surface files generated by CAT12 Segment, this is only required if the this approach is putted in - # a Node - surf_files = [rh.sphere..., lh.sphere..., etc] - - extract_additional_measures = ExtractAdditionalSurfaceParameters(left_central_surfaces=lh_path_central, - surface_files=surf_files) - extract_additional_measures.run() + >>> # Set the left surface files, both will be processed + >>> lh_path_central = 'lh.central.subject_filename.gii' + >>> # Put here all surface files generated by CAT12 Segment, this is only required if the this approach is putted in + >>> surf_files = ["lh.sphere.reg.subject_filename.gii", "rh.sphere.reg.subject_filename.gii", \ + "lh.sphere.subject_filename.gii", "rh.sphere.subject_filename.gii", \ + 'rh.central.subject_filename.gii', \ + 'lh.pbt.subject_filename', 'rh.pbt.subject_filename'] + >>> extract_additional_measures = ExtractAdditionalSurfaceParameters(left_central_surfaces=lh_path_central, \ + surface_files=surf_files) + >>> extract_additional_measures.run() # doctest: +SKIP """ input_spec = ExtractAdditionalSurfaceParametersInputSpec @@ -164,22 +164,23 @@ class ExtractROIBasedSurfaceMeasures(SPMCommand): Examples -------- - # Template surface files - lh_atlas = sys.argv[2] - rh_atlas = sys.argv[3] - - # Put here all surface files generated by CAT12 Segment, this is only required if the this approach is putted in - # a Node - surf_files = [rh.sphere..., lh.sphere..., etc] - + >>> # Template surface files + >>> lh_atlas = "lh.aparc_a2009s.freesurfer.annot" + >>> rh_atlas = "rh.aparc_a2009s.freesurfer.annot" + + >>> # Put here all surface files generated by CAT12 Segment, this is only required if the this approach is putted in a Node + >>> surf_files = ["lh.sphere.reg.subject_filename.gii", "rh.sphere.reg.subject_filename.gii", \ + "lh.sphere.subject_filename.gii", "rh.sphere.subject_filename.gii", \ + 'lh.central.subject_filename.gii', 'rh.central.subject_filename.gii', \ + 'lh.pbt.subject_filename', 'rh.pbt.subject_filename'] # Set the path to the left hemisphere measure file, both will be processed - lh_measure = .... + >>> lh_measure = "lh.area.subject_filename" - extract_additional_measures = ExtractROIBasedSurfaceMeasures(surface_files=surf_files, - lh_surface_measure=lh_measure, - lh_roi_atlas=lh_atlas, + >>> extract_additional_measures = ExtractROIBasedSurfaceMeasures(surface_files=surf_files, \ + lh_surface_measure=lh_measure, \ + lh_roi_atlas=lh_atlas, \ rh_roi_atlas=rh_atlas) - extract_additional_measures.run() + >>> extract_additional_measures.run() # doctest: +SKIP """ From c906740d8ea66f951866adc36e4d30a878fc402a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Tue, 9 Mar 2021 11:31:48 +0000 Subject: [PATCH 0936/1665] - Added examples to the segmentation. --- nipype/interfaces/cat12/preprocess.py | 222 +++++++++++++++++++++----- 1 file changed, 185 insertions(+), 37 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 5fd58f09a5..eb3c3c5d4b 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -10,35 +10,159 @@ class CAT12SegmentInputSpec(SPMCommandInputSpec): + """ + CAT12: Segmentation + This toolbox is an extension to the default segmentation in SPM12, but uses a completely different segmentation + approach. + The segmentation approach is based on an Adaptive Maximum A Posterior (MAP) technique without the need for a priori + information about tissue probabilities. That is, the Tissue Probability Maps (TPM) are not used constantly in the + sense of the classical Unified Segmentation approach (Ashburner et. al. 2005), but just for spatial normalization. + The following AMAP estimation is adaptive in the sense that local variations of the parameters (i.e., means and + variance) are modeled as slowly varying spatial functions (Rajapakse et al. 1997). This not only accounts for + intensity inhomogeneities but also for other local variations of intensity. + Additionally, the segmentation approach uses a Partial Volume Estimation (PVE) with a simplified mixed model of at + most two tissue types (Tohka et al. 2004). We start with an initial segmentation into three pure classes: gray + matter (GM), white matter (WM), and cerebrospinal fluid (CSF) based on the above described AMAP estimation. The + initial segmentation is followed by a PVE of two additional mixed classes: GM-WM and GM-CSF. This results in an + estimation of the amount (or fraction) of each pure tissue type present in every voxel (as single voxels - given by + Another important extension to the SPM12 segmentation is the integration of the Dartel or Geodesic Shooting + registration into the toolbox by an already existing Dartel/Shooting template in MNI space. This template was + derived from 555 healthy control subjects of the IXI-database (http://www.brain-development.org) and provides the + several Dartel or Shooting iterations. Thus, for the majority of studies the creation of sample-specific templates + is not necessary anymore and is mainly recommended for children data.'}; + + http://www.neuro.uni-jena.de/cat12/CAT12-Manual.pdf#page=15 + + Examples + -------- + path_mr = 'structural.nii' + cat = CAT12Segment(in_files=path_mr) + cat.run() # doctest: +SKIP + """ + in_files = InputMultiPath(ImageFileSPM(exists=True), field="data", desc="file to segment", mandatory=True, copyfile=False) - tpm = InputMultiPath(ImageFileSPM(exists=True), field="tpm", desc="Tissue Probability Maps", mandatory=False, + help_tpm = 'Tissue Probability Map. Select the tissue probability image that includes 6 tissue probability ' \ + 'classes for (1) grey matter, (2) white matter, (3) cerebrospinal fluid, (4) bone, (5) non-brain soft ' \ + 'tissue, and (6) the background. CAT uses the TPM only for the initial SPM segmentation.' + tpm = InputMultiPath(ImageFileSPM(exists=True), field="tpm", desc=help_tpm, mandatory=False, copyfile=False) n_jobs = traits.trait_types.Int(1, usedefault=True, mandatory=True, field="nproc", desc="Number of threads") use_prior = traits.trait_types.Str(field="useprior", usedefault=True) + affine_reg_help = 'Affine Regularization. The procedure is a local optimisation, so it needs reasonable initial ' \ + 'starting estimates. Images should be placed in approximate alignment using the Display ' \ + 'function of SPM before beginning. A Mutual Information affine registration with the tissue ' \ + 'probability maps (D''Agostino et al, 2004) is used to achieve approximate alignment.' affine_regularization = traits.trait_types.Str(default_value="mni", - field="opts.affreg", usedefault=True) + field="opts.affreg", usedefault=True, desc=affine_reg_help) + bias_acc_help = "Strength of the SPM inhomogeneity (bias) correction that simultaneously controls the SPM biasreg, " \ + "biasfwhm, samp (resolution), and tol (iteration) parameter." power_spm_inhomogeneity_correction = traits.trait_types.Float(default_value=0.5, field='opts.biasacc', - usedefault=True) + usedefault=True, + desc=bias_acc_help) # Extended options for CAT12 preprocessing - affine_preprocessing = traits.trait_types.Int(1070, field="extopts.APP", usedefault=True) - initial_segmentation = traits.trait_types.Int(0, field="extopts.spm_kamap", usedefault=True) - local_adaptive_seg = traits.trait_types.Float(0.5, field="extopts.LASstr", usedefault=True) - skull_strip = traits.trait_types.Float(2, field="extopts.gcutstr", usedefault=True) - wm_hyper_intensity_correction = traits.trait_types.Int(1, field="extopts.WMHC", usedefault=True) - spatial_registration = traits.trait_types.Int(1, field="extopts.WMHC", usedefault=True) - voxel_size = traits.trait_types.Float(1.5, field="extopts.vox", usedefault=True) + help_app = 'Affine registration and SPM preprocessing can fail in some subjects with deviating anatomy (e.g. ' \ + 'other species/neonates) or in images with strong signal inhomogeneities, or untypical intensities ' \ + '(e.g. synthetic images). An initial bias correction can help to reduce such problems (see details ' \ + 'below). Recommended are the "default" and "full" option.' + affine_preprocessing = traits.trait_types.Int(1070, field="extopts.APP", desc=help_app, usedefault=True) + + help_initial_seg = 'In rare cases the Unified Segmentation can fail in highly abnormal brains, where e.g. the ' \ + 'cerebrospinal fluid of superlarge ventricles (hydrocephalus) were classified as white matter.' \ + ' However, if the affine registration is correct, the AMAP segmentation with an ' \ + 'prior-independent k-means initialization can be used to replace the SPM brain tissue ' \ + 'classification. Moreover, if the default Dartel and Shooting registrations will fail then the' \ + ' "Optimized Shooting - superlarge ventricles" option for "Spatial registration" is required! ' \ + 'Values: \nnone: 0;\nlight: 1;\nfull: 2;\ndefault: 1070.' + initial_segmentation = traits.trait_types.Int(0, field="extopts.spm_kamap", desc=help_initial_seg, usedefault=True) + + help_las = 'Additionally to WM-inhomogeneities, GM intensity can vary across different regions such as the motor' \ + ' cortex, the basal ganglia, or the occipital lobe. These changes have an anatomical background ' \ + '(e.g. iron content, myelinization), but are dependent on the MR-protocol and often lead to ' \ + 'underestimation of GM at higher intensities and overestimation of CSF at lower intensities. ' \ + 'Therefore, a local intensity transformation of all tissue classes is used to reduce these effects in ' \ + 'the image. This local adaptive segmentation (LAS) is applied before the final AMAP segmentation.' \ + 'Possible Values: \nSPM Unified Segmentation: 0 \nk-means AMAP: 2' + local_adaptive_seg = traits.trait_types.Float(0.5, field="extopts.LASstr", usedefault=True, desc=help_las) + + help_gcutstr = 'Method of initial skull-stripping before AMAP segmentation. The SPM approach works quite stable ' \ + 'for the majority of data. However, in some rare cases parts of GM (i.e. in frontal lobe) might ' \ + 'be cut. If this happens the GCUT approach is a good alternative. GCUT is a graph-cut/region-' \ + 'growing approach starting from the WM area. APRG (adaptive probability region-growing) is a new' \ + ' method that refines the probability maps of the SPM approach by region-growing techniques of ' \ + 'the gcut approach with a final surface-based optimization strategy. This is currently the method ' \ + 'with the most accurate and reliable results. If you use already skull-stripped data you can turn' \ + ' off skull-stripping although this is automaticaly detected in most cases. Please note that the' \ + ' choice of the skull-stripping method will also influence the estimation of TIV, because the' \ + ' methods mainly differ in the handling of the outer CSF around the cortical surface. ' \ + '\nPossible Values:\n - none (already skull-stripped): -1;\n - SPM approach: 0; ' \ + '\n - GCUT approach: 0.50; \n - APRG approach: 2' + skull_strip = traits.trait_types.Float(2, field="extopts.gcutstr", desc=help_gcutstr, usedefault=True) + + help_wmhc = 'WARNING: Please note that the detection of WM hyperintensies is still under development and does ' \ + 'not have the same accuracy as approaches that additionally consider FLAIR images (e.g. Lesion ' \ + 'Segmentation Toolbox)! In aging or (neurodegenerative) diseases WM intensity can be reduced ' \ + 'locally in T1 or increased in T2/PD images. These so-called WM hyperintensies (WMHs) can lead to ' \ + 'preprocessing errors. Large GM areas next to the ventricle can cause normalization problems. ' \ + 'Therefore, a temporary correction for normalization is useful if WMHs are expected. CAT allows ' \ + 'different ways to handle WMHs: ' \ + '\n0) No Correction (handled as GM). \n1) Temporary (internal) correction as WM for spatial ' \ + 'normalization and estimation of cortical thickness. \n2) Permanent correction to WM. ' + wm_hyper_intensity_correction = traits.trait_types.Int(1, field="extopts.WMHC", desc=help_wmhc, usedefault=True) + + help_vox = 'The (isotropic) voxel sizes of any spatially normalised written images. A non-finite value will be ' \ + 'replaced by the average voxel size of the tissue probability maps used by the segmentation.' + voxel_size = traits.trait_types.Float(1.5, field="extopts.vox", desc=help_vox, usedefault=True) + + help_resampling = 'Internal resampling for preprocessing.\n The default fixed image resolution offers a good ' \ + 'trade-off between optimal quality and preprocessing time and memory demands. Standard ' \ + 'structural data with a voxel resolution around 1 mm or even data with high in-plane resolution' \ + ' and large slice thickness (e.g. 0.5x0.5x1.5 mm) will benefit from this setting. If you have' \ + ' higher native resolutions the highres option "Fixed 0.8 mm" will sometimes offer slightly' \ + ' better preprocessing quality with an increase of preprocessing time and memory demands. In' \ + ' case of even higher resolutions and high signal-to-noise ratio (e.g. for 7 T data) the ' \ + '"Best native" option will process the data on the highest native resolution. I.e. a resolution' \ + ' of 0.4x0.7x1.0 mm will be interpolated to 0.4x0.4x0.4 mm. A tolerance range of 0.1 mm is used' \ + ' to avoid interpolation artifacts, i.e. a resolution of 0.95x1.01x1.08 mm will not be ' \ + 'interpolated in case of the "Fixed 1.0 mm"! This "optimal" option prefers an isotropic voxel ' \ + 'size with at least 1.1 mm that is controlled by the median voxel size and a volume term that ' \ + 'penalizes highly anisotropic voxels.' \ + 'Values:\nOptimal: [1.0 0.1]\nFixed 1.0 mm: [1.0 0.1];\nFixed 0.8 mm:[0.8 0.1]' \ + '\nBest native: [0.5 0.1]' internal_resampling_process = traits.trait_types.Tuple(traits.trait_types.Float(1), traits.trait_types.Float(0.1), minlen=2, maxlen=2, - field="extopts.restypes.optimal", usedefault=True) - ignore_errors = traits.trait_types.Int(1, field="extopts.ignoreErrors", usedefault=True) + field="extopts.restypes.optimal", desc="help_resampling", + usedefault=True) + errors_help = 'Error handling.\nTry to catch preprocessing errors and continue with the next data set or ignore ' \ + 'all warnings (e.g., bad intensities) and use an experimental pipeline which is still in ' \ + 'development. In case of errors, CAT continues with the next subject if this option is enabled. If ' \ + 'the experimental option with backup functions is selected and warnings occur, CAT will try to use' \ + ' backup routines and skip some processing steps which require good T1 contrasts (e.g., LAS). If ' \ + 'you want to avoid processing of critical data and ensure that only the main pipeline is used then' \ + ' select the option "Ignore errors (continue with the next subject)". It is strongly recommended to' \ + ' check for preprocessing problems, especially with non-T1 contrasts. ' \ + '\nValues:\nnone: 0,\ndefault: 1,\ndetails: 2.' + ignore_errors = traits.trait_types.Int(1, field="extopts.ignoreErrors", desc=errors_help, usedefault=True) # Writing options - surface_and_thickness_estimation = traits.trait_types.Int(1, field="surface", usedefault=True) + help_surf = 'Surface and thickness estimation. \nUse projection-based thickness (PBT) (Dahnke et al. 2012) to' \ + ' estimate cortical thickness and to create the central cortical surface for left and right ' \ + 'hemisphere. Surface reconstruction includes topology correction (Yotter et al. 2011), spherical ' \ + 'inflation (Yotter et al.) and spherical registration. Additionally you can also estimate surface ' \ + 'parameters such as gyrification, cortical complexity or sulcal depth that can be subsequently ' \ + 'analyzed at each vertex of the surface. Please note, that surface reconstruction and spherical ' \ + 'registration additionally requires about 20-60 min of computation time. A fast (1-3 min) surface ' \ + 'pipeline is available for visual preview (e.g., to check preprocessing quality) in the ' \ + 'cross-sectional, but not in the longitudinal pipeline. Only the initial surfaces are created with a' \ + ' lower resolution and without topology correction, spherical mapping and surface registration. ' \ + 'Please note that the files with the estimated surface thickness can therefore not be used for ' \ + 'further analysis! For distinction, these files contain "preview" in their filename and they' \ + ' are not available as batch dependencies objects. ' + surface_and_thickness_estimation = traits.trait_types.Int(1, field="surface", desc=help_surf, usedefault=True) surface_measures = traits.trait_types.Int(1, field="output.surf_measures", usedefault=True) # Templates @@ -50,41 +174,71 @@ class CAT12SegmentInputSpec(SPMCommandInputSpec): own_atlas = InputMultiPath(ImageFileSPM(exists=True), field="output.ROImenu.atlases.ownatlas", desc="Own Atlas", mandatory=False, copyfile=False) + dartel_help = 'This option is to export data into a form that can be used with DARTEL. The SPM default is to ' \ + 'only apply rigid body transformation. However, a more appropriate option is to apply affine ' \ + 'transformation, because the additional scaling of the images requires less deformations to ' \ + 'non-linearly register brains to the template.' + # Grey matter - gm_output_native = traits.trait_types.Bool(False, field="output.GM.native", usedefault=True) - gm_output_modulated = traits.trait_types.Bool(True, field="output.GM.mod", usedefault=True) - gm_output_dartel = traits.trait_types.Bool(False, field="output.GM.dartel", usedefault=True) + gm_desc = 'Options to save grey matter images.' + gm_output_native = traits.trait_types.Bool(False, field="output.GM.native", usedefault=True, desc=gm_desc) + gm_output_modulated = traits.trait_types.Bool(True, field="output.GM.mod", usedefault=True, desc=gm_desc) + gm_output_dartel = traits.trait_types.Bool(False, field="output.GM.dartel", usedefault=True, desc=gm_desc) # White matter - wm_output_native = traits.trait_types.Bool(False, field="output.WM.native", usedefault=True) - wm_output_modulated = traits.trait_types.Bool(True, field="output.WM.mod", usedefault=True) - wm_output_dartel = traits.trait_types.Bool(False, field="output.WM.dartel", usedefault=True) + wm_desc = 'Options to save white matter images.' + wm_output_native = traits.trait_types.Bool(False, field="output.WM.native", usedefault=True, desc=wm_desc) + wm_output_modulated = traits.trait_types.Bool(True, field="output.WM.mod", usedefault=True, desc=wm_desc) + wm_output_dartel = traits.trait_types.Bool(False, field="output.WM.dartel", usedefault=True, desc=wm_desc) # CSF matter - csf_output_native = traits.trait_types.Bool(False, field="output.CSF.native", usedefault=True) - csf_output_modulated = traits.trait_types.Bool(True, field="output.CSF.mod", usedefault=True) - csf_output_dartel = traits.trait_types.Bool(False, field="output.CSF.dartel", usedefault=True) + csf_desc = 'Options to save CSF images.' + csf_output_native = traits.trait_types.Bool(False, field="output.CSF.native", usedefault=True, desc=csf_desc) + csf_output_modulated = traits.trait_types.Bool(True, field="output.CSF.mod", usedefault=True, desc=csf_desc) + csf_output_dartel = traits.trait_types.Bool(False, field="output.CSF.dartel", usedefault=True, desc=csf_desc) # Labels - label_native = traits.trait_types.Bool(False, field="output.label.native", usedefault=True) - label_warped = traits.trait_types.Bool(True, field="output.label.warped", usedefault=True) - label_dartel = traits.trait_types.Bool(False, field="output.label.dartel", usedefault=True) - output_labelnative = traits.trait_types.Bool(False, field="output.labelnative", usedefault=True) + label_desc = 'This is the option to save a labeled version of your segmentations for fast visual comparision. ' \ + 'Labels are saved as Partial Volume Estimation (PVE) values with different mix classes for GM-WM ' \ + '(2.5) and GM-CSF (1.5). BG=0, CSF=1, GM=2, WM=3, WMH=4 (if WMHC=3), SL=1.5 (if SLC)' + label_native = traits.trait_types.Bool(False, field="output.label.native", usedefault=True, desc=label_desc) + label_warped = traits.trait_types.Bool(True, field="output.label.warped", usedefault=True, desc=label_desc) + label_dartel = traits.trait_types.Bool(False, field="output.label.dartel", usedefault=True, desc=label_desc) + output_labelnative = traits.trait_types.Bool(False, field="output.labelnative", usedefault=True, desc=label_desc) # Bias save_bias_corrected = traits.trait_types.Bool(True, field="output.bias.warped", usedefault=True) # las - las_native = traits.trait_types.Bool(False, field="output.las.native", usedefault=True) - las_warped = traits.trait_types.Bool(True, field="output.las.warped", usedefault=True) - las_dartel = traits.trait_types.Bool(False, field="output.las.dartel", usedefault=True) + las_desc = 'This is the option to save a bias, noise, and local intensity corrected version of the original T1' \ + ' image. MR images are usually corrupted by a smooth, spatially varying artifact that modulates the' \ + ' intensity of the image (bias). These artifacts, although not usually a problem for visual ' \ + 'inspection, can impede automated processing of the images. The bias corrected version should have ' \ + 'more uniform intensities within the different types of tissues and can be saved in native space ' \ + 'and/or normalised. Noise is corrected by an adaptive non-local mean (NLM) filter (Manjon 2008, ' \ + 'Medical Image Analysis 12).' + las_native = traits.trait_types.Bool(False, field="output.las.native", usedefault=True, desc=las_desc) + las_warped = traits.trait_types.Bool(True, field="output.las.warped", usedefault=True, desc=las_desc) + las_dartel = traits.trait_types.Bool(False, field="output.las.dartel", usedefault=True, desc=las_desc) # Jacobian Warped - jacobianwarped = traits.trait_types.Bool(True, field="output.jacobianwarped", usedefault=True) + help_jacobian = 'This is the option to save the Jacobian determinant, which expresses local volume changes. This' \ + ' image can be used in a pure deformation based morphometry (DBM) design. Please note that the' \ + ' affine part of the deformation field is ignored. Thus, there is no need for any additional' \ + ' correction for different brain sizes using ICV.' + jacobianwarped = traits.trait_types.Bool(True, field="output.jacobianwarped", usedefault=True, desc=help_jacobian) # Deformation Fields + help_warp = 'Deformation fields can be saved to disk, and used by the Deformations Utility and/or applied to ' \ + 'coregistered data from other modalities (e.g. fMRI). For spatially normalising images to MNI space,' \ + ' you will need the forward deformation, whereas for spatially normalising (eg) GIFTI surface files,' \ + ' you''ll need the inverse. It is also possible to transform data in MNI space on to the individual' \ + ' subject, which also requires the inverse transform. Deformations are saved as .nii files, which' \ + ' contain three volumes to encode the x, y and z coordinates.' \ + '\nValues: No:[0 0];\nImage->Template (forward): [1 0];\nTemplate->Image (inverse): [0 1]; ' \ + '\ninverse + forward: [1 1]' warps = traits.trait_types.Tuple(traits.trait_types.Int(1), traits.trait_types.Int(0), minlen=2, maxlen=2, - field="output.warps", usedefault=True) + field="output.warps", usedefault=True, desc=help_warp) class CAT12SegmentOutputSpec(TraitedSpec): @@ -208,9 +362,3 @@ def _list_outputs(self): use_ext=False) return outputs - - -if __name__ == '__main__': - path_mr = sys.argv[1] - cat = CAT12Segment(in_files=path_mr) - cat.run() From 2885499e7fe935a96d9ed4660596359e472e1a4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Tue, 9 Mar 2021 11:49:50 +0000 Subject: [PATCH 0937/1665] Improved documentation. --- nipype/interfaces/cat12/preprocess.py | 48 +++++++++++++-------------- nipype/interfaces/cat12/surface.py | 27 +++++++-------- 2 files changed, 36 insertions(+), 39 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index eb3c3c5d4b..767ad58f29 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -35,9 +35,9 @@ class CAT12SegmentInputSpec(SPMCommandInputSpec): Examples -------- - path_mr = 'structural.nii' - cat = CAT12Segment(in_files=path_mr) - cat.run() # doctest: +SKIP + >>> path_mr = 'structural.nii' + >>> cat = CAT12Segment(in_files=path_mr) + >>> cat.run() # doctest: +SKIP """ in_files = InputMultiPath(ImageFileSPM(exists=True), field="data", desc="file to segment", mandatory=True, @@ -245,50 +245,50 @@ class CAT12SegmentOutputSpec(TraitedSpec): ########################################## # Label XML files ########################################## - label_files = List(File(exists=True)) + label_files = List(File(exists=True), desc="Files with the labeled version of the segmentations.") - label_rois = File(exists=True, desc="ROIs Volumes") - label_roi = File(exists=True, desc="ROI volumes") + label_rois = File(exists=True, desc="Files with labeled version of segmentations of ROIs Volumes") + label_roi = File(exists=True, desc="Files with labeled version of segmentations of ROI Volumes") ########################################## # MRI .nii files ########################################## - mri_images = List(File(exists=True)) + mri_images = List(File(exists=True), desc="Different segmented images.") # Grey Matter - gm_modulated_image = File(exists=True) - gm_dartel_image = File(exists=True) - gm_native_image = File(exists=True) + gm_modulated_image = File(exists=True, desc="Grey matter modulated image.") + gm_dartel_image = File(exists=True, desc="Grey matter dartel image.") + gm_native_image = File(exists=True, desc="Grey matter native space.") # White Matter - wm_modulated_image = File(exists=True) - wm_dartel_image = File(exists=True) - wm_native_image = File(exists=True) + wm_modulated_image = File(exists=True, desc="White matter modulated image.") + wm_dartel_image = File(exists=True, desc="White matter dartel image.") + wm_native_image = File(exists=True, desc="White matter in native space.") # CSF - csf_modulated_image = File(exists=True) - csf_dartel_image = File(exists=True) - csf_native_image = File(exists=True) + csf_modulated_image = File(exists=True, desc="CSF modulated image.") + csf_dartel_image = File(exists=True, desc="CSF dartel image.") + csf_native_image = File(exists=True, desc="CSF in native space.") - bias_corrected_image = File(exists=True) + bias_corrected_image = File(exists=True, desc="Bias corrected image") ########################################## # Surface files ########################################## - surface_files = List(File(exists=True)) + surface_files = List(File(exists=True), desc="Surface files") # Right hemisphere - rh_central_surface = File(exists=True) - rh_sphere_surface = File(exists=True) + rh_central_surface = File(exists=True, desc="Central right hemisphere files") + rh_sphere_surface = File(exists=True, desc="Sphere right hemisphere files") # Left hemisphere - lh_central_surface = File(exists=True) - lh_sphere_surface = File(exists=True) + lh_central_surface = File(exists=True, desc="Central left hemisphere files") + lh_sphere_surface = File(exists=True, desc="Sphere left hemisphere files") # Report files - report_files = List(File(exists=True)) - report = File(exists=True) + report_files = List(File(exists=True), desc="Report files.") + report = File(exists=True, desc="Report file.") class CAT12Segment(SPMCommand): diff --git a/nipype/interfaces/cat12/surface.py b/nipype/interfaces/cat12/surface.py index 08cc2b2d7a..312245b8ce 100644 --- a/nipype/interfaces/cat12/surface.py +++ b/nipype/interfaces/cat12/surface.py @@ -31,26 +31,23 @@ class ExtractAdditionalSurfaceParametersInputSpec(SPMCommandInputSpec): class ExtractAdditionalSurfaceParametersOutputSpec(TraitedSpec): - lh_extracted_files = List(File(exists=True)) - rh_extracted_files = List(File(exists=True)) + lh_extracted_files = List(File(exists=True), desc="Files of left Hemisphere extracted measures") + rh_extracted_files = List(File(exists=True), desc="Files of right Hemisphere extracted measures") - lh_gyrification = List(File(exists=True)) - rh_gyrification = List(File(exists=True)) + lh_gyrification = List(File(exists=True), desc="Gyrification of left Hemisphere") + rh_gyrification = List(File(exists=True), desc="Gyrification of right Hemisphere") - lh_gyrification_resampled = List(File(exists=True)) - rh_gyrification_resampled = List(File(exists=True)) + lh_gmv = List(File(exists=True), desc="Grey matter volume of left Hemisphere") + rh_gmv = List(File(exists=True), desc="Grey matter volume of right Hemisphere") - lh_gmv = List(File(exists=True)) - rh_gmv = List(File(exists=True)) + lh_area = List(File(exists=True), desc="Area of left Hemisphere") + rh_area = List(File(exists=True), desc="Area of right Hemisphere") - lh_area = List(File(exists=True)) - rh_area = List(File(exists=True)) + lh_depth = List(File(exists=True), desc="Depth of left Hemisphere") + rh_depth = List(File(exists=True), desc="Depth of right Hemisphere") - lh_depth = List(File(exists=True)) - rh_depth = List(File(exists=True)) - - lh_fractaldimension = List(File(exists=True)) - rh_fractaldimension = List(File(exists=True)) + lh_fractaldimension = List(File(exists=True), desc="Fractal Dimension of left Hemisphere") + rh_fractaldimension = List(File(exists=True), desc="Fractal Dimension of right Hemisphere") class ExtractAdditionalSurfaceParameters(SPMCommand): From 98f118dcaf959fc77240853224961beb2771d928 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Tue, 9 Mar 2021 12:02:20 +0000 Subject: [PATCH 0938/1665] Added outputs to ExtractROIBasedSurfaceMeasures interface. --- nipype/interfaces/cat12/preprocess.py | 6 +++--- nipype/interfaces/cat12/surface.py | 16 ++++++++++++++-- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 767ad58f29..c6746f110e 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -245,10 +245,10 @@ class CAT12SegmentOutputSpec(TraitedSpec): ########################################## # Label XML files ########################################## - label_files = List(File(exists=True), desc="Files with the labeled version of the segmentations.") + label_files = List(File(exists=True), desc="Files with the measures extracted for OI ands ROIs") - label_rois = File(exists=True, desc="Files with labeled version of segmentations of ROIs Volumes") - label_roi = File(exists=True, desc="Files with labeled version of segmentations of ROI Volumes") + label_rois = File(exists=True, desc="Files with thickness values of ROIs.") + label_roi = File(exists=True, desc="Files with thickness values of ROI.") ########################################## # MRI .nii files diff --git a/nipype/interfaces/cat12/surface.py b/nipype/interfaces/cat12/surface.py index 312245b8ce..aab38d1aa0 100644 --- a/nipype/interfaces/cat12/surface.py +++ b/nipype/interfaces/cat12/surface.py @@ -146,6 +146,10 @@ class ExtractROIBasedSurfaceMeasuresInputSpec(SPMCommandInputSpec): mandatory=False, copyfile=False) +class ExtractROIBasedSurfaceMeasuresOutputSpec(TraitedSpec): + label_files = List(File(exists=True), desc="Files with the measures extracted for ROIs.") + + class ExtractROIBasedSurfaceMeasures(SPMCommand): """ Extract ROI-based surface values @@ -183,7 +187,7 @@ class ExtractROIBasedSurfaceMeasures(SPMCommand): """ input_spec = ExtractROIBasedSurfaceMeasuresInputSpec - output_spec = None + output_spec = ExtractROIBasedSurfaceMeasuresOutputSpec def __init__(self, **inputs): _local_version = SPMCommand().version @@ -202,7 +206,15 @@ def _format_arg(self, opt, spec, val): return super(ExtractROIBasedSurfaceMeasures, self)._format_arg(opt, spec, val) def _list_outputs(self): - pass + outputs = self._outputs().get() + + outputs["label_files"] = [] + for f in self.inputs.lh_roi_atlas: + pth, base, ext = split_filename(f) + + outputs["label_files"].extend([os.path.join(os.path.join(pth, "label"), f) for f in + os.listdir(os.path.join(pth, "label")) + if os.path.isfile(os.path.join(os.path.join(pth, "label"), f))]) class Cell: From 7713f788845faab77c11925711551c0dfdde95a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Tue, 9 Mar 2021 14:20:47 +0000 Subject: [PATCH 0939/1665] Added files to test the interfaces. --- nipype/interfaces/cat12/surface.py | 20 +++++++++---------- .../data/lh.aparc_a2009s.freesurfer.annot | 0 nipype/testing/data/lh.area.structural | 0 nipype/testing/data/lh.central.structural.gii | 0 nipype/testing/data/lh.pbt.structural | 0 .../testing/data/lh.sphere.reg.structural.gii | 0 nipype/testing/data/lh.sphere.structural.gii | 0 .../data/rh.aparc_a2009s.freesurfer.annot | 0 nipype/testing/data/rh.central.structural.gii | 0 nipype/testing/data/rh.pbt.structural | 0 .../testing/data/rh.sphere.reg.structural.gii | 0 nipype/testing/data/rh.sphere.structural.gii | 0 12 files changed, 10 insertions(+), 10 deletions(-) create mode 100644 nipype/testing/data/lh.aparc_a2009s.freesurfer.annot create mode 100644 nipype/testing/data/lh.area.structural create mode 100644 nipype/testing/data/lh.central.structural.gii create mode 100644 nipype/testing/data/lh.pbt.structural create mode 100644 nipype/testing/data/lh.sphere.reg.structural.gii create mode 100644 nipype/testing/data/lh.sphere.structural.gii create mode 100644 nipype/testing/data/rh.aparc_a2009s.freesurfer.annot create mode 100644 nipype/testing/data/rh.central.structural.gii create mode 100644 nipype/testing/data/rh.pbt.structural create mode 100644 nipype/testing/data/rh.sphere.reg.structural.gii create mode 100644 nipype/testing/data/rh.sphere.structural.gii diff --git a/nipype/interfaces/cat12/surface.py b/nipype/interfaces/cat12/surface.py index aab38d1aa0..b8583c3e9b 100644 --- a/nipype/interfaces/cat12/surface.py +++ b/nipype/interfaces/cat12/surface.py @@ -71,12 +71,12 @@ class ExtractAdditionalSurfaceParameters(SPMCommand): Examples -------- >>> # Set the left surface files, both will be processed - >>> lh_path_central = 'lh.central.subject_filename.gii' + >>> lh_path_central = 'lh.central.structural.gii' >>> # Put here all surface files generated by CAT12 Segment, this is only required if the this approach is putted in - >>> surf_files = ["lh.sphere.reg.subject_filename.gii", "rh.sphere.reg.subject_filename.gii", \ - "lh.sphere.subject_filename.gii", "rh.sphere.subject_filename.gii", \ - 'rh.central.subject_filename.gii', \ - 'lh.pbt.subject_filename', 'rh.pbt.subject_filename'] + >>> surf_files = ["lh.sphere.reg.structural.gii", "rh.sphere.reg.structural.gii", \ + "lh.sphere.structural.gii", "rh.sphere.structural.gii", \ + 'rh.central.structural.gii', \ + 'lh.pbt.structural', 'rh.pbt.structural'] >>> extract_additional_measures = ExtractAdditionalSurfaceParameters(left_central_surfaces=lh_path_central, \ surface_files=surf_files) >>> extract_additional_measures.run() # doctest: +SKIP @@ -170,12 +170,12 @@ class ExtractROIBasedSurfaceMeasures(SPMCommand): >>> rh_atlas = "rh.aparc_a2009s.freesurfer.annot" >>> # Put here all surface files generated by CAT12 Segment, this is only required if the this approach is putted in a Node - >>> surf_files = ["lh.sphere.reg.subject_filename.gii", "rh.sphere.reg.subject_filename.gii", \ - "lh.sphere.subject_filename.gii", "rh.sphere.subject_filename.gii", \ - 'lh.central.subject_filename.gii', 'rh.central.subject_filename.gii', \ - 'lh.pbt.subject_filename', 'rh.pbt.subject_filename'] + >>> surf_files = ["lh.sphere.reg.structural.gii", "rh.sphere.reg.structural.gii", \ + "lh.sphere.structural.gii", "rh.sphere.structural.gii", \ + 'lh.central.structural.gii', 'rh.central.structural.gii', \ + 'lh.pbt.structural', 'rh.pbt.structural'] # Set the path to the left hemisphere measure file, both will be processed - >>> lh_measure = "lh.area.subject_filename" + >>> lh_measure = "lh.area.structural" >>> extract_additional_measures = ExtractROIBasedSurfaceMeasures(surface_files=surf_files, \ lh_surface_measure=lh_measure, \ diff --git a/nipype/testing/data/lh.aparc_a2009s.freesurfer.annot b/nipype/testing/data/lh.aparc_a2009s.freesurfer.annot new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/lh.area.structural b/nipype/testing/data/lh.area.structural new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/lh.central.structural.gii b/nipype/testing/data/lh.central.structural.gii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/lh.pbt.structural b/nipype/testing/data/lh.pbt.structural new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/lh.sphere.reg.structural.gii b/nipype/testing/data/lh.sphere.reg.structural.gii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/lh.sphere.structural.gii b/nipype/testing/data/lh.sphere.structural.gii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/rh.aparc_a2009s.freesurfer.annot b/nipype/testing/data/rh.aparc_a2009s.freesurfer.annot new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/rh.central.structural.gii b/nipype/testing/data/rh.central.structural.gii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/rh.pbt.structural b/nipype/testing/data/rh.pbt.structural new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/rh.sphere.reg.structural.gii b/nipype/testing/data/rh.sphere.reg.structural.gii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/rh.sphere.structural.gii b/nipype/testing/data/rh.sphere.structural.gii new file mode 100644 index 0000000000..e69de29bb2 From 44b860534dbcca7fc1a756fad18a3432ff1e9521 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Tue, 9 Mar 2021 14:20:47 +0000 Subject: [PATCH 0940/1665] Added files to test the interfaces. --- nipype/interfaces/cat12/surface.py | 20 +++++++++---------- .../data/lh.aparc_a2009s.freesurfer.annot | 0 nipype/testing/data/lh.area.structural | 0 nipype/testing/data/lh.central.structural.gii | 0 nipype/testing/data/lh.pbt.structural | 0 .../testing/data/lh.sphere.reg.structural.gii | 0 nipype/testing/data/lh.sphere.structural.gii | 0 .../data/rh.aparc_a2009s.freesurfer.annot | 0 nipype/testing/data/rh.central.structural.gii | 0 nipype/testing/data/rh.pbt.structural | 0 .../testing/data/rh.sphere.reg.structural.gii | 0 nipype/testing/data/rh.sphere.structural.gii | 0 12 files changed, 10 insertions(+), 10 deletions(-) create mode 100644 nipype/testing/data/lh.aparc_a2009s.freesurfer.annot create mode 100644 nipype/testing/data/lh.area.structural create mode 100644 nipype/testing/data/lh.central.structural.gii create mode 100644 nipype/testing/data/lh.pbt.structural create mode 100644 nipype/testing/data/lh.sphere.reg.structural.gii create mode 100644 nipype/testing/data/lh.sphere.structural.gii create mode 100644 nipype/testing/data/rh.aparc_a2009s.freesurfer.annot create mode 100644 nipype/testing/data/rh.central.structural.gii create mode 100644 nipype/testing/data/rh.pbt.structural create mode 100644 nipype/testing/data/rh.sphere.reg.structural.gii create mode 100644 nipype/testing/data/rh.sphere.structural.gii diff --git a/nipype/interfaces/cat12/surface.py b/nipype/interfaces/cat12/surface.py index aab38d1aa0..b8583c3e9b 100644 --- a/nipype/interfaces/cat12/surface.py +++ b/nipype/interfaces/cat12/surface.py @@ -71,12 +71,12 @@ class ExtractAdditionalSurfaceParameters(SPMCommand): Examples -------- >>> # Set the left surface files, both will be processed - >>> lh_path_central = 'lh.central.subject_filename.gii' + >>> lh_path_central = 'lh.central.structural.gii' >>> # Put here all surface files generated by CAT12 Segment, this is only required if the this approach is putted in - >>> surf_files = ["lh.sphere.reg.subject_filename.gii", "rh.sphere.reg.subject_filename.gii", \ - "lh.sphere.subject_filename.gii", "rh.sphere.subject_filename.gii", \ - 'rh.central.subject_filename.gii', \ - 'lh.pbt.subject_filename', 'rh.pbt.subject_filename'] + >>> surf_files = ["lh.sphere.reg.structural.gii", "rh.sphere.reg.structural.gii", \ + "lh.sphere.structural.gii", "rh.sphere.structural.gii", \ + 'rh.central.structural.gii', \ + 'lh.pbt.structural', 'rh.pbt.structural'] >>> extract_additional_measures = ExtractAdditionalSurfaceParameters(left_central_surfaces=lh_path_central, \ surface_files=surf_files) >>> extract_additional_measures.run() # doctest: +SKIP @@ -170,12 +170,12 @@ class ExtractROIBasedSurfaceMeasures(SPMCommand): >>> rh_atlas = "rh.aparc_a2009s.freesurfer.annot" >>> # Put here all surface files generated by CAT12 Segment, this is only required if the this approach is putted in a Node - >>> surf_files = ["lh.sphere.reg.subject_filename.gii", "rh.sphere.reg.subject_filename.gii", \ - "lh.sphere.subject_filename.gii", "rh.sphere.subject_filename.gii", \ - 'lh.central.subject_filename.gii', 'rh.central.subject_filename.gii', \ - 'lh.pbt.subject_filename', 'rh.pbt.subject_filename'] + >>> surf_files = ["lh.sphere.reg.structural.gii", "rh.sphere.reg.structural.gii", \ + "lh.sphere.structural.gii", "rh.sphere.structural.gii", \ + 'lh.central.structural.gii', 'rh.central.structural.gii', \ + 'lh.pbt.structural', 'rh.pbt.structural'] # Set the path to the left hemisphere measure file, both will be processed - >>> lh_measure = "lh.area.subject_filename" + >>> lh_measure = "lh.area.structural" >>> extract_additional_measures = ExtractROIBasedSurfaceMeasures(surface_files=surf_files, \ lh_surface_measure=lh_measure, \ diff --git a/nipype/testing/data/lh.aparc_a2009s.freesurfer.annot b/nipype/testing/data/lh.aparc_a2009s.freesurfer.annot new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/lh.area.structural b/nipype/testing/data/lh.area.structural new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/lh.central.structural.gii b/nipype/testing/data/lh.central.structural.gii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/lh.pbt.structural b/nipype/testing/data/lh.pbt.structural new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/lh.sphere.reg.structural.gii b/nipype/testing/data/lh.sphere.reg.structural.gii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/lh.sphere.structural.gii b/nipype/testing/data/lh.sphere.structural.gii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/rh.aparc_a2009s.freesurfer.annot b/nipype/testing/data/rh.aparc_a2009s.freesurfer.annot new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/rh.central.structural.gii b/nipype/testing/data/rh.central.structural.gii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/rh.pbt.structural b/nipype/testing/data/rh.pbt.structural new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/rh.sphere.reg.structural.gii b/nipype/testing/data/rh.sphere.reg.structural.gii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/rh.sphere.structural.gii b/nipype/testing/data/rh.sphere.structural.gii new file mode 100644 index 0000000000..e69de29bb2 From 1bed8ede05890c941ea3bed76ecef9ad88bf2534 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Tue, 9 Mar 2021 14:59:18 +0000 Subject: [PATCH 0941/1665] Minor fix --- nipype/interfaces/cat12/surface.py | 28 +++++++--------------------- 1 file changed, 7 insertions(+), 21 deletions(-) diff --git a/nipype/interfaces/cat12/surface.py b/nipype/interfaces/cat12/surface.py index b8583c3e9b..fcc77df97f 100644 --- a/nipype/interfaces/cat12/surface.py +++ b/nipype/interfaces/cat12/surface.py @@ -73,12 +73,8 @@ class ExtractAdditionalSurfaceParameters(SPMCommand): >>> # Set the left surface files, both will be processed >>> lh_path_central = 'lh.central.structural.gii' >>> # Put here all surface files generated by CAT12 Segment, this is only required if the this approach is putted in - >>> surf_files = ["lh.sphere.reg.structural.gii", "rh.sphere.reg.structural.gii", \ - "lh.sphere.structural.gii", "rh.sphere.structural.gii", \ - 'rh.central.structural.gii', \ - 'lh.pbt.structural', 'rh.pbt.structural'] - >>> extract_additional_measures = ExtractAdditionalSurfaceParameters(left_central_surfaces=lh_path_central, \ - surface_files=surf_files) + >>> surf_files = ['lh.sphere.reg.structural.gii', 'rh.sphere.reg.structural.gii', 'lh.sphere.structural.gii', 'rh.sphere.structural.gii', 'rh.central.structural.gii', 'lh.pbt.structural', 'rh.pbt.structural'] + >>> extract_additional_measures = ExtractAdditionalSurfaceParameters(left_central_surfaces=lh_path_central, surface_files=surf_files) >>> extract_additional_measures.run() # doctest: +SKIP """ @@ -166,21 +162,11 @@ class ExtractROIBasedSurfaceMeasures(SPMCommand): Examples -------- >>> # Template surface files - >>> lh_atlas = "lh.aparc_a2009s.freesurfer.annot" - >>> rh_atlas = "rh.aparc_a2009s.freesurfer.annot" - - >>> # Put here all surface files generated by CAT12 Segment, this is only required if the this approach is putted in a Node - >>> surf_files = ["lh.sphere.reg.structural.gii", "rh.sphere.reg.structural.gii", \ - "lh.sphere.structural.gii", "rh.sphere.structural.gii", \ - 'lh.central.structural.gii', 'rh.central.structural.gii', \ - 'lh.pbt.structural', 'rh.pbt.structural'] - # Set the path to the left hemisphere measure file, both will be processed - >>> lh_measure = "lh.area.structural" - - >>> extract_additional_measures = ExtractROIBasedSurfaceMeasures(surface_files=surf_files, \ - lh_surface_measure=lh_measure, \ - lh_roi_atlas=lh_atlas, \ - rh_roi_atlas=rh_atlas) + >>> lh_atlas = 'lh.aparc_a2009s.freesurfer.annot' + >>> rh_atlas = 'rh.aparc_a2009s.freesurfer.annot' + >>> surf_files = ['lh.sphere.reg.structural.gii', 'rh.sphere.reg.structural.gii', 'lh.sphere.structural.gii', 'rh.sphere.structural.gii', 'lh.central.structural.gii', 'rh.central.structural.gii', 'lh.pbt.structural', 'rh.pbt.structural'] + >>> lh_measure = 'lh.area.structural' + >>> extract_additional_measures = ExtractROIBasedSurfaceMeasures(surface_files=surf_files, lh_surface_measure=lh_measure, lh_roi_atlas=lh_atlas, rh_roi_atlas=rh_atlas) >>> extract_additional_measures.run() # doctest: +SKIP From 6fec887755154d3fda209c4492eafa8798d62cfd Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Wed, 10 Mar 2021 09:59:33 +0200 Subject: [PATCH 0942/1665] Added BET's in_file conversion to relative path. --- nipype/interfaces/fsl/preprocess.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py index 0f7e402445..54a41be039 100644 --- a/nipype/interfaces/fsl/preprocess.py +++ b/nipype/interfaces/fsl/preprocess.py @@ -165,10 +165,21 @@ def _run_interface(self, runtime): self.raise_exception(runtime) return runtime + def _format_arg(self, name, spec, value): + formatted = super(BET, self)._format_arg(name, spec, value) + if name == "in_file": + # Convert to relative path to prevent BET failure + # with long paths. + return op.relpath(formatted, start=os.getcwd()) + return formatted + def _gen_outfilename(self): out_file = self.inputs.out_file + # Generate default output filename if non specified. if not isdefined(out_file) and isdefined(self.inputs.in_file): out_file = self._gen_fname(self.inputs.in_file, suffix="_brain") + # Convert to relative path to prevent BET failure + # with long paths. return op.relpath(out_file, start=os.getcwd()) return out_file From 489a48d968947bc7b210296fd4669a0f1e72c151 Mon Sep 17 00:00:00 2001 From: Zvi Baratz Date: Wed, 10 Mar 2021 10:33:49 +0200 Subject: [PATCH 0943/1665] Trying to fix test_bet to assert the relative path. --- nipype/interfaces/fsl/tests/test_preprocess.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nipype/interfaces/fsl/tests/test_preprocess.py b/nipype/interfaces/fsl/tests/test_preprocess.py index dd4d827ce7..6b1b6cb610 100644 --- a/nipype/interfaces/fsl/tests/test_preprocess.py +++ b/nipype/interfaces/fsl/tests/test_preprocess.py @@ -31,6 +31,9 @@ def setup_infile(tmpdir): @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") def test_bet(setup_infile): tmp_infile, tp_dir = setup_infile + # BET converts the in_file path to be relative to prevent + # failure with long paths. + tmp_infile = os.path.relpath(tmp_infile, start=os.getcwd()) better = fsl.BET() assert better.cmd == "bet" From aa41da61fe65c4a900c9796635e7d7a6337cac56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Mon, 15 Mar 2021 21:53:40 +0000 Subject: [PATCH 0944/1665] Bug fix in the output of ROIMeasures. --- nipype/interfaces/cat12/surface.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/nipype/interfaces/cat12/surface.py b/nipype/interfaces/cat12/surface.py index fcc77df97f..02fe3d906e 100644 --- a/nipype/interfaces/cat12/surface.py +++ b/nipype/interfaces/cat12/surface.py @@ -133,14 +133,14 @@ class ExtractROIBasedSurfaceMeasuresInputSpec(SPMCommandInputSpec): lh_roi_atlas = InputMultiPath(File(exists=True), field="rdata", desc="(Left) ROI Atlas. These are the ROI's ", mandatory=True, copyfile=False) + rh_roi_atlas = InputMultiPath(File(exists=True), desc="(Right) ROI Atlas. These are the ROI's ", + mandatory=False, copyfile=False) + lh_surface_measure = InputMultiPath(File(exists=True), field="cdata", desc="(Left) Surface data files. ", mandatory=True, copyfile=False) rh_surface_measure = InputMultiPath(File(exists=True), desc="(Right) Surface data files.", mandatory=False, copyfile=False) - rh_roi_atlas = InputMultiPath(File(exists=True), desc="(Right) ROI Atlas. These are the ROI's ", - mandatory=False, copyfile=False) - class ExtractROIBasedSurfaceMeasuresOutputSpec(TraitedSpec): label_files = List(File(exists=True), desc="Files with the measures extracted for ROIs.") @@ -194,13 +194,12 @@ def _format_arg(self, opt, spec, val): def _list_outputs(self): outputs = self._outputs().get() - outputs["label_files"] = [] - for f in self.inputs.lh_roi_atlas: - pth, base, ext = split_filename(f) + pth, base, ext = split_filename(self.inputs.lh_surface_measure[0]) - outputs["label_files"].extend([os.path.join(os.path.join(pth, "label"), f) for f in - os.listdir(os.path.join(pth, "label")) - if os.path.isfile(os.path.join(os.path.join(pth, "label"), f))]) + outputs["label_files"] = [os.path.join(os.path.join(pth, "label"), f) for f in + os.listdir(os.path.join(pth, "label")) + if os.path.isfile(os.path.join(os.path.join(pth, "label"), f))] + return outputs class Cell: @@ -227,4 +226,3 @@ def __str__(self): """Convert input to appropriate format for cat12 """ return "{%s}" % self.to_string() - From b4877aaaaa55aa4f2e3b5368eb331c816759dc90 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Tue, 30 Mar 2021 12:53:54 +0200 Subject: [PATCH 0945/1665] Make SmoothEstimateOutputSpec consistent with smoothest documentation --- nipype/interfaces/fsl/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/model.py b/nipype/interfaces/fsl/model.py index e06377cb4d..d8dc107e99 100644 --- a/nipype/interfaces/fsl/model.py +++ b/nipype/interfaces/fsl/model.py @@ -1886,7 +1886,7 @@ class SmoothEstimateInputSpec(FSLCommandInputSpec): class SmoothEstimateOutputSpec(TraitedSpec): dlh = traits.Float(desc="smoothness estimate sqrt(det(Lambda))") volume = traits.Int(desc="number of voxels in mask") - resels = traits.Float(desc="number of resels") + resels = traits.Float(desc="number of voxels per resel") class SmoothEstimate(FSLCommand): From 5af8c0e2eda2c5b521c55a8e4bad7b42cdc29ccb Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Tue, 30 Mar 2021 19:11:19 +0200 Subject: [PATCH 0946/1665] Update nipype/interfaces/fsl/model.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/fsl/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/model.py b/nipype/interfaces/fsl/model.py index d8dc107e99..d7484c0f99 100644 --- a/nipype/interfaces/fsl/model.py +++ b/nipype/interfaces/fsl/model.py @@ -1886,7 +1886,7 @@ class SmoothEstimateInputSpec(FSLCommandInputSpec): class SmoothEstimateOutputSpec(TraitedSpec): dlh = traits.Float(desc="smoothness estimate sqrt(det(Lambda))") volume = traits.Int(desc="number of voxels in mask") - resels = traits.Float(desc="number of voxels per resel") + resels = traits.Float(desc="volume of resel, in voxels, defined as FWHM_x * FWHM_y * FWHM_z") class SmoothEstimate(FSLCommand): From dd152ee08e458c696087087450eae68fa51ffa9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Wed, 31 Mar 2021 11:16:56 +0100 Subject: [PATCH 0947/1665] Bug fix bias corrected file. --- nipype/interfaces/cat12/preprocess.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index c6746f110e..64c32ad53e 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -1,5 +1,4 @@ import os -import sys import traits from nipype.interfaces.base import InputMultiPath, TraitedSpec, isdefined from nipype.interfaces.spm import SPMCommand @@ -59,8 +58,8 @@ class CAT12SegmentInputSpec(SPMCommandInputSpec): affine_regularization = traits.trait_types.Str(default_value="mni", field="opts.affreg", usedefault=True, desc=affine_reg_help) - bias_acc_help = "Strength of the SPM inhomogeneity (bias) correction that simultaneously controls the SPM biasreg, " \ - "biasfwhm, samp (resolution), and tol (iteration) parameter." + bias_acc_help = "Strength of the SPM inhomogeneity (bias) correction that simultaneously controls the SPM " \ + "biasreg, biasfwhm, samp (resolution), and tol (iteration) parameter." power_spm_inhomogeneity_correction = traits.trait_types.Float(default_value=0.5, field='opts.biasacc', usedefault=True, desc=bias_acc_help) @@ -168,9 +167,9 @@ class CAT12SegmentInputSpec(SPMCommandInputSpec): # Templates neuromorphometrics = traits.trait_types.Bool(True, field="output.ROImenu.atlases.neuromorphometrics", usedefault=True) - lpba40 = traits.trait_types.Bool(False, field="output.ROImenu.atlases.lpba40", usedefault=True) + lpba40 = traits.trait_types.Bool(True, field="output.ROImenu.atlases.lpba40", usedefault=True) cobra = traits.trait_types.Bool(True, field="output.ROImenu.atlases.hammers", usedefault=True) - hammers = traits.trait_types.Bool(False, field="output.ROImenu.atlases.cobra", usedefault=True) + hammers = traits.trait_types.Bool(True, field="output.ROImenu.atlases.cobra", usedefault=True) own_atlas = InputMultiPath(ImageFileSPM(exists=True), field="output.ROImenu.atlases.ownatlas", desc="Own Atlas", mandatory=False, copyfile=False) @@ -335,7 +334,7 @@ def _list_outputs(self): outputs[outfield] = fname_presuffix(f, prefix=prefix, suffix="_rigid") if isdefined(self.inputs.save_bias_corrected) and self.inputs.save_bias_corrected: - outputs["bias_corrected_image"] = fname_presuffix(f, prefix=os.path.join("mri", 'mi')) + outputs["bias_corrected_image"] = fname_presuffix(f, prefix=os.path.join("mri", 'wmi')) outputs["surface_files"] = [os.path.join(os.path.join(pth, "surf"), f) for f in os.listdir(os.path.join(pth, "surf")) From 615887cc3d5a6e50d8db9a7626f2334b389e03c4 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 31 Mar 2021 14:07:27 -0400 Subject: [PATCH 0948/1665] CI: Move from Travis to GitHub actions --- .github/workflows/tests.yml | 89 ++++++++++++++++++++++++++++ tools/ci/activate.sh | 9 +++ tools/ci/build_archive.sh | 28 +++++++++ tools/ci/check.sh | 25 ++++++++ tools/ci/create_venv.sh | 24 ++++++++ tools/ci/env.sh | 17 ++++++ tools/ci/install.sh | 39 ++++++++++++ tools/ci/install_deb_dependencies.sh | 13 ++++ tools/ci/install_dependencies.sh | 26 ++++++++ 9 files changed, 270 insertions(+) create mode 100644 .github/workflows/tests.yml create mode 100644 tools/ci/activate.sh create mode 100755 tools/ci/build_archive.sh create mode 100755 tools/ci/check.sh create mode 100755 tools/ci/create_venv.sh create mode 100644 tools/ci/env.sh create mode 100755 tools/ci/install.sh create mode 100755 tools/ci/install_deb_dependencies.sh create mode 100755 tools/ci/install_dependencies.sh diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 0000000000..82676ee28c --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,89 @@ +name: Stable tests + +# This file tests the claimed support range of nipype including +# +# * Operating systems: Linux, OSX +# * Dependencies: minimum requirements, optional requirements +# * Installation methods: setup.py, sdist, wheel, archive + +on: + push: + branches: + - master + - maint/* + pull_request: + branches: + - master + - maint/* + +defaults: + run: + shell: bash + +jobs: + stable: + # Check each OS, all supported Python, minimum versions and latest releases + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: ['ubuntu-18.04'] + python-version: [3.6, 3.7, 3.8, 3.9] + check: ['test'] + pip-flags: [''] + depends: ['REQUIREMENTS'] + deb-depends: [false] + nipype-extras: ['doc,tests,profiler'] + include: + - os: ubuntu-18.04 + python-version: 3.8 + check: test + pip-flags: '' + depends: REQUIREMENTS + deb-depends: true + nipype-extras: doc,tests,nipy,profiler,duecredit,ssh + env: + DEPENDS: ${{ matrix.depends }} + CHECK_TYPE: ${{ matrix.check }} + EXTRA_PIP_FLAGS: ${{ matrix.pip-flags }} + INSTALL_DEB_DEPENDENCIES: ${{ matrix.deb-depends }} + NIPYPE_EXTRAS: ${{ matrix.nipype-extras }} + INSTALL_TYPE: pip + CI_SKIP_TEST: 1 + + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + fetch-depth: 0 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Display Python version + run: python -c "import sys; print(sys.version)" + - name: Create virtual environment + run: tools/ci/create_venv.sh + - name: Build archive + run: | + source tools/ci/build_archive.sh + echo "ARCHIVE=$ARCHIVE" >> $GITHUB_ENV + - name: Install Debian dependencies + run: tools/ci/install_deb_dependencies.sh + if: ${{ matrix.os == 'ubuntu-18.04' }} + - name: Install dependencies + run: tools/ci/install_dependencies.sh + - name: Install Nipype + run: tools/ci/install.sh + - name: Run tests + run: tools/ci/check.sh + if: ${{ matrix.check != 'skiptests' }} + - uses: codecov/codecov-action@v1 + with: + file: coverage.xml + if: ${{ always() }} + - name: Upload pytest test results + uses: actions/upload-artifact@v2 + with: + name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} + path: test-results.xml + if: ${{ always() && matrix.check == 'test' }} diff --git a/tools/ci/activate.sh b/tools/ci/activate.sh new file mode 100644 index 0000000000..567e13a67b --- /dev/null +++ b/tools/ci/activate.sh @@ -0,0 +1,9 @@ +if [ -e virtenv/bin/activate ]; then + source virtenv/bin/activate +elif [ -e virtenv/Scripts/activate ]; then + source virtenv/Scripts/activate +else + echo Cannot activate virtual environment + ls -R virtenv + false +fi diff --git a/tools/ci/build_archive.sh b/tools/ci/build_archive.sh new file mode 100755 index 0000000000..484ed19f6b --- /dev/null +++ b/tools/ci/build_archive.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +echo "Building archive" + +source tools/ci/activate.sh + +set -eu + +# Required dependencies +echo "INSTALL_TYPE = $INSTALL_TYPE" + +set -x + +if [ "$INSTALL_TYPE" == "sdist" ]; then + python setup.py egg_info # check egg_info while we're here + python setup.py sdist + export ARCHIVE=$( ls dist/*.tar.gz ) +elif [ "$INSTALL_TYPE" == "wheel" ]; then + python setup.py bdist_wheel + export ARCHIVE=$( ls dist/*.whl ) +elif [ "$INSTALL_TYPE" == "archive" ]; then + export ARCHIVE="package.tar.gz" + git archive -o $ARCHIVE HEAD +elif [ "$INSTALL_TYPE" == "pip" ]; then + export ARCHIVE="." +fi + +set +eux diff --git a/tools/ci/check.sh b/tools/ci/check.sh new file mode 100755 index 0000000000..d180752524 --- /dev/null +++ b/tools/ci/check.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +echo Running tests + +source tools/ci/activate.sh +source tools/ci/env.sh + +set -eu + +# Required variables +echo CHECK_TYPE = $CHECK_TYPE + +set -x + +if [ "${CHECK_TYPE}" == "test" ]; then + pytest --capture=no --verbose --doctest-modules -c nipype/pytest.ini \ + --cov-config .coveragerc --cov nipype --cov-report xml \ + --junitxml=test-results.xml nipype +else + false +fi + +set +eux + +echo Done running tests diff --git a/tools/ci/create_venv.sh b/tools/ci/create_venv.sh new file mode 100755 index 0000000000..7a28767396 --- /dev/null +++ b/tools/ci/create_venv.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +echo Creating isolated virtual environment + +source tools/ci/env.sh + +set -eu + +# Required variables +echo SETUP_REQUIRES = $SETUP_REQUIRES + +set -x + +python -m pip install --upgrade pip virtualenv +virtualenv --python=python virtenv +source tools/ci/activate.sh +python --version +python -m pip install -U $SETUP_REQUIRES +which python +which pip + +set +eux + +echo Done creating isolated virtual environment diff --git a/tools/ci/env.sh b/tools/ci/env.sh new file mode 100644 index 0000000000..87e766b8c2 --- /dev/null +++ b/tools/ci/env.sh @@ -0,0 +1,17 @@ +SETUP_REQUIRES="pip setuptools>=30.3.0 wheel" + +# Minimum requirements +REQUIREMENTS="-r requirements.txt" +# Minimum versions of minimum requirements +MIN_REQUIREMENTS="-r min-requirements.txt" + +# Numpy and scipy upload nightly/weekly/intermittent wheels +NIGHTLY_WHEELS="https://pypi.anaconda.org/scipy-wheels-nightly/simple" +STAGING_WHEELS="https://pypi.anaconda.org/multibuild-wheels-staging/simple" +PRE_PIP_FLAGS="--pre --extra-index-url $NIGHTLY_WHEELS --extra-index-url $STAGING_WHEELS" + +for CONF in (/etc/fsl/fsl.sh /etc/afni/afni.sh); do + if [ -r $CONF ]; then source $CONF; fi +done + +FSLOUTPUTTYPE=NIFTI_GZ diff --git a/tools/ci/install.sh b/tools/ci/install.sh new file mode 100755 index 0000000000..428ffc8b8c --- /dev/null +++ b/tools/ci/install.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +echo Installing nipype + +source tools/ci/activate.sh +source tools/ci/env.sh + +set -eu + +# Required variables +echo INSTALL_TYPE = $INSTALL_TYPE +echo CHECK_TYPE = $CHECK_TYPE +echo NIPYPE_EXTRAS = $NIPYPE_EXTRAS +echo EXTRA_PIP_FLAGS = $EXTRA_PIP_FLAGS + +set -x + +if [ -n "$EXTRA_PIP_FLAGS" ]; then + EXTRA_PIP_FLAGS=${!EXTRA_PIP_FLAGS} +fi + +if [ "$INSTALL_TYPE" == "setup" ]; then + python setup.py install +else + pip install $EXTRA_PIP_FLAGS $ARCHIVE +fi + +# Basic import check +python -c 'import nipype; print(nipype.__version__)' + +if [ "$CHECK_TYPE" == "skiptests" ]; then + exit 0 +fi + +pip install $EXTRA_PIP_FLAGS "nipype[$NIPYPE_EXTRAS]" + +set +eux + +echo Done installing nipype diff --git a/tools/ci/install_deb_dependencies.sh b/tools/ci/install_deb_dependencies.sh new file mode 100755 index 0000000000..ff1e67732b --- /dev/null +++ b/tools/ci/install_deb_dependencies.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +echo "Installing NeuroDebian dependencies" + +set -eu + +echo "INSTALL_DEB_DEPENDENCIES = $INSTALL_DEB_DEPENDENCIES" + +if $INSTALL_DEB_DEPENDENCIES; then + bash <(wget -q -O- http://neuro.debian.net/_files/neurodebian-travis.sh) + sudo apt update + sudo apt install -y -qq fsl afni elastix fsl-atlases xvfb fusefat graphviz +fi diff --git a/tools/ci/install_dependencies.sh b/tools/ci/install_dependencies.sh new file mode 100755 index 0000000000..617389cb5e --- /dev/null +++ b/tools/ci/install_dependencies.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +echo Installing dependencies + +source tools/ci/activate.sh +source tools/ci/env.sh + +set -eu + +# Required variables +echo EXTRA_PIP_FLAGS = $EXTRA_PIP_FLAGS +echo DEPENDS = $DEPENDS + +set -x + +if [ -n "$EXTRA_PIP_FLAGS" ]; then + EXTRA_PIP_FLAGS=${!EXTRA_PIP_FLAGS} +fi + +if [ -n "$DEPENDS" ]; then + pip install ${EXTRA_PIP_FLAGS} --prefer-binary ${!DEPENDS} +fi + +set +eux + +echo Done installing dependencies From 1e9b61c3a23c96591218f5ee318cf2c25560cf2e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 31 Mar 2021 15:26:43 -0400 Subject: [PATCH 0949/1665] CI: Add specs and style checks --- .github/workflows/contrib.yml | 76 +++++++++++++++++++++++++++++++++++ tools/ci/check.sh | 6 +++ 2 files changed, 82 insertions(+) create mode 100644 .github/workflows/contrib.yml diff --git a/.github/workflows/contrib.yml b/.github/workflows/contrib.yml new file mode 100644 index 0000000000..cb4f9117d6 --- /dev/null +++ b/.github/workflows/contrib.yml @@ -0,0 +1,76 @@ +name: Contribution checks + +# This checks validate contributions meet baseline checks +# +# * specs - Ensure make + +on: + push: + branches: + - master + - maint/* + pull_request: + branches: + - master + - maint/* + +defaults: + run: + shell: bash + +jobs: + stable: + # Check each OS, all supported Python, minimum versions and latest releases + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: ['ubuntu-18.04'] + python-version: [3.8] + nipype-extras: ['dev'] + check: ['specs', 'style'] + env: + DEPENDS: "" + CHECK_TYPE: ${{ matrix.check }} + NIPYPE_EXTRAS: ${{ matrix.nipype-extras }} + EXTRA_PIP_FLAGS: "" + INSTALL_DEB_DEPENDENCIES: false + INSTALL_TYPE: pip + CI_SKIP_TEST: 1 + + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + fetch-depth: 0 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Display Python version + run: python -c "import sys; print(sys.version)" + - name: Create virtual environment + run: tools/ci/create_venv.sh + - name: Build archive + run: | + source tools/ci/build_archive.sh + echo "ARCHIVE=$ARCHIVE" >> $GITHUB_ENV + - name: Install Debian dependencies + run: tools/ci/install_deb_dependencies.sh + if: ${{ matrix.os == 'ubuntu-18.04' }} + - name: Install dependencies + run: tools/ci/install_dependencies.sh + - name: Install Nipype + run: tools/ci/install.sh + - name: Run tests + run: tools/ci/check.sh + if: ${{ matrix.check != 'skiptests' }} + - uses: codecov/codecov-action@v1 + with: + file: coverage.xml + if: ${{ always() }} + - name: Upload pytest test results + uses: actions/upload-artifact@v2 + with: + name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} + path: test-results.xml + if: ${{ always() && matrix.check == 'test' }} diff --git a/tools/ci/check.sh b/tools/ci/check.sh index d180752524..5151854902 100755 --- a/tools/ci/check.sh +++ b/tools/ci/check.sh @@ -16,6 +16,12 @@ if [ "${CHECK_TYPE}" == "test" ]; then pytest --capture=no --verbose --doctest-modules -c nipype/pytest.ini \ --cov-config .coveragerc --cov nipype --cov-report xml \ --junitxml=test-results.xml nipype +elif [ "$CHECK_TYPE" = "specs" ]; then + make specs + git status -s + test -z "$(git status -s)" +elif [ "$CHECK_TYPE" = "style" ]; then + black --check nipype setup.py else false fi From 4ef8325288d7974c2fe47599e7780e93b0af298e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 1 Apr 2021 09:23:29 -0400 Subject: [PATCH 0950/1665] STY: Eliminate tabs in docstrings --- nipype/algorithms/modelgen.py | 14 +++++++------- nipype/interfaces/minc/minc.py | 2 +- nipype/interfaces/mne/base.py | 2 +- nipype/interfaces/mrtrix/tensors.py | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index f3a4bbd2fb..29f31c60c0 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -40,13 +40,13 @@ def spm_hrf(RT, P=None, fMRI_T=16): % p - parameters of the response function (two gamma % functions) % defaults (seconds) - % p(0) - delay of response (relative to onset) 6 - % p(1) - delay of undershoot (relative to onset) 16 - % p(2) - dispersion of response 1 - % p(3) - dispersion of undershoot 1 - % p(4) - ratio of response to undershoot 6 - % p(5) - onset (seconds) 0 - % p(6) - length of kernel (seconds) 32 + % p(0) - delay of response (relative to onset) 6 + % p(1) - delay of undershoot (relative to onset) 16 + % p(2) - dispersion of response 1 + % p(3) - dispersion of undershoot 1 + % p(4) - ratio of response to undershoot 6 + % p(5) - onset (seconds) 0 + % p(6) - length of kernel (seconds) 32 % % hrf - hemodynamic response function % p - parameters of the response function diff --git a/nipype/interfaces/minc/minc.py b/nipype/interfaces/minc/minc.py index 14c29f7b1b..74fdbfb031 100644 --- a/nipype/interfaces/minc/minc.py +++ b/nipype/interfaces/minc/minc.py @@ -1327,7 +1327,7 @@ class BeastInputSpec(CommandLineInputSpec): Generic options for all commands: -help: Print summary of command-line options and abort -version: Print version number of program and exit - Copyright (C) 2011 Simon Fristed Eskildsen, Vladimir Fonov, + Copyright (C) 2011 Simon Fristed Eskildsen, Vladimir Fonov, Pierrick Coupe, Jose V. Manjon This program comes with ABSOLUTELY NO WARRANTY; for details type 'cat COPYING'. diff --git a/nipype/interfaces/mne/base.py b/nipype/interfaces/mne/base.py index f410da794a..b06384fb4e 100644 --- a/nipype/interfaces/mne/base.py +++ b/nipype/interfaces/mne/base.py @@ -98,7 +98,7 @@ class WatershedBEM(FSCommand): >>> bem.inputs.subjects_dir = '.' >>> bem.cmdline 'mne watershed_bem --overwrite --subject subj1 --volume T1' - >>> bem.run() # doctest: +SKIP + >>> bem.run() # doctest: +SKIP """ diff --git a/nipype/interfaces/mrtrix/tensors.py b/nipype/interfaces/mrtrix/tensors.py index d7cbb5f7a8..70b789d4e3 100644 --- a/nipype/interfaces/mrtrix/tensors.py +++ b/nipype/interfaces/mrtrix/tensors.py @@ -191,7 +191,7 @@ class ConstrainedSphericalDeconvolution(CommandLine): First, the fact the signal attenuation profile is real implies that it has conjugate symmetry, i.e. Y(l,-m) = Y(l,m)* (where * denotes the complex conjugate). Second, the diffusion profile should be antipodally symmetric (i.e. S(x) = S(-x)), implying that all odd l components should be zero. - Therefore, this program only computes the even elements. Note that the spherical harmonics equations used here + Therefore, this program only computes the even elements. Note that the spherical harmonics equations used here differ slightly from those conventionally used, in that the (-1)^m factor has been omitted. This should be taken into account in all subsequent calculations. Each volume in the output image corresponds to a different spherical harmonic component, according to the following convention: From 82c9276261b91a1170250c8046dd140bd259982e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 1 Apr 2021 09:41:18 -0400 Subject: [PATCH 0951/1665] MNT: Update pre-commit --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c429bfa6d9..d5c5a4a51a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,13 +2,13 @@ # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v2.0.0 + rev: v3.4.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml - id: check-added-large-files - repo: https://github.com/psf/black - rev: 19.3b0 + rev: 20.8b1 hooks: - id: black From ea72f5e58ba3a7c1dc5f310ce634baf5243732d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Thu, 1 Apr 2021 15:41:20 +0100 Subject: [PATCH 0952/1665] Update nipype/interfaces/cat12/preprocess.py Update nipype/interfaces/cat12/surface.py --- nipype/interfaces/cat12/preprocess.py | 438 +++++++++++++------------- nipype/interfaces/cat12/surface.py | 46 ++- 2 files changed, 236 insertions(+), 248 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 64c32ad53e..18da122061 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -1,250 +1,218 @@ import os -import traits -from nipype.interfaces.base import InputMultiPath, TraitedSpec, isdefined +from pathlib import Path + +from nipype.interfaces.base import InputMultiPath, TraitedSpec, traits, isdefined, File, Str from nipype.interfaces.spm import SPMCommand from nipype.interfaces.spm.base import SPMCommandInputSpec, ImageFileSPM, scans_for_fnames, scans_for_fname from nipype.utils.filemanip import split_filename, fname_presuffix -from traits.trait_types import Int, File -from traits.trait_types import List class CAT12SegmentInputSpec(SPMCommandInputSpec): - """ - CAT12: Segmentation - This toolbox is an extension to the default segmentation in SPM12, but uses a completely different segmentation - approach. - The segmentation approach is based on an Adaptive Maximum A Posterior (MAP) technique without the need for a priori - information about tissue probabilities. That is, the Tissue Probability Maps (TPM) are not used constantly in the - sense of the classical Unified Segmentation approach (Ashburner et. al. 2005), but just for spatial normalization. - The following AMAP estimation is adaptive in the sense that local variations of the parameters (i.e., means and - variance) are modeled as slowly varying spatial functions (Rajapakse et al. 1997). This not only accounts for - intensity inhomogeneities but also for other local variations of intensity. - Additionally, the segmentation approach uses a Partial Volume Estimation (PVE) with a simplified mixed model of at - most two tissue types (Tohka et al. 2004). We start with an initial segmentation into three pure classes: gray - matter (GM), white matter (WM), and cerebrospinal fluid (CSF) based on the above described AMAP estimation. The - initial segmentation is followed by a PVE of two additional mixed classes: GM-WM and GM-CSF. This results in an - estimation of the amount (or fraction) of each pure tissue type present in every voxel (as single voxels - given by - Another important extension to the SPM12 segmentation is the integration of the Dartel or Geodesic Shooting - registration into the toolbox by an already existing Dartel/Shooting template in MNI space. This template was - derived from 555 healthy control subjects of the IXI-database (http://www.brain-development.org) and provides the - several Dartel or Shooting iterations. Thus, for the majority of studies the creation of sample-specific templates - is not necessary anymore and is mainly recommended for children data.'}; - - http://www.neuro.uni-jena.de/cat12/CAT12-Manual.pdf#page=15 - - Examples - -------- - >>> path_mr = 'structural.nii' - >>> cat = CAT12Segment(in_files=path_mr) - >>> cat.run() # doctest: +SKIP - """ - in_files = InputMultiPath(ImageFileSPM(exists=True), field="data", desc="file to segment", mandatory=True, copyfile=False) - help_tpm = 'Tissue Probability Map. Select the tissue probability image that includes 6 tissue probability ' \ - 'classes for (1) grey matter, (2) white matter, (3) cerebrospinal fluid, (4) bone, (5) non-brain soft ' \ - 'tissue, and (6) the background. CAT uses the TPM only for the initial SPM segmentation.' - tpm = InputMultiPath(ImageFileSPM(exists=True), field="tpm", desc=help_tpm, mandatory=False, + _help_tpm = 'Tissue Probability Map. Select the tissue probability image that includes 6 tissue probability ' \ + 'classes for (1) grey matter, (2) white matter, (3) cerebrospinal fluid, (4) bone, (5) non-brain ' \ + 'soft tissue, and (6) the background. CAT uses the TPM only for the initial SPM segmentation.' + tpm = InputMultiPath(ImageFileSPM(exists=True), field="tpm", desc=_help_tpm, mandatory=False, copyfile=False) - n_jobs = traits.trait_types.Int(1, usedefault=True, mandatory=True, field="nproc", desc="Number of threads") - use_prior = traits.trait_types.Str(field="useprior", usedefault=True) - - affine_reg_help = 'Affine Regularization. The procedure is a local optimisation, so it needs reasonable initial ' \ - 'starting estimates. Images should be placed in approximate alignment using the Display ' \ - 'function of SPM before beginning. A Mutual Information affine registration with the tissue ' \ - 'probability maps (D''Agostino et al, 2004) is used to achieve approximate alignment.' - affine_regularization = traits.trait_types.Str(default_value="mni", - field="opts.affreg", usedefault=True, desc=affine_reg_help) - - bias_acc_help = "Strength of the SPM inhomogeneity (bias) correction that simultaneously controls the SPM " \ - "biasreg, biasfwhm, samp (resolution), and tol (iteration) parameter." - power_spm_inhomogeneity_correction = traits.trait_types.Float(default_value=0.5, field='opts.biasacc', - usedefault=True, - desc=bias_acc_help) + n_jobs = traits.Int(1, usedefault=True, mandatory=True, field="nproc", desc="Number of threads") + use_prior = Str(field="useprior", usedefault=True) + + _help_affine_reg = 'Affine Regularization. The procedure is a local optimisation, so it needs reasonable initial ' \ + 'starting estimates. Images should be placed in approximate alignment using the Display ' \ + 'function of SPM before beginning. A Mutual Information affine registration with the tissue ' \ + 'probability maps (D''Agostino et al, 2004) is used to achieve approximate alignment.' + affine_regularization = Str(default_value="mni", + field="opts.affreg", usedefault=True, desc=_help_affine_reg) + + _help_bias_acc = "Strength of the SPM inhomogeneity (bias) correction that simultaneously controls the SPM " \ + "biasreg, biasfwhm, samp (resolution), and tol (iteration) parameter." + power_spm_inhomogeneity_correction = traits.Float(default_value=0.5, field='opts.biasacc', + usedefault=True, + desc=_help_bias_acc) # Extended options for CAT12 preprocessing - help_app = 'Affine registration and SPM preprocessing can fail in some subjects with deviating anatomy (e.g. ' \ - 'other species/neonates) or in images with strong signal inhomogeneities, or untypical intensities ' \ - '(e.g. synthetic images). An initial bias correction can help to reduce such problems (see details ' \ - 'below). Recommended are the "default" and "full" option.' - affine_preprocessing = traits.trait_types.Int(1070, field="extopts.APP", desc=help_app, usedefault=True) - - help_initial_seg = 'In rare cases the Unified Segmentation can fail in highly abnormal brains, where e.g. the ' \ - 'cerebrospinal fluid of superlarge ventricles (hydrocephalus) were classified as white matter.' \ - ' However, if the affine registration is correct, the AMAP segmentation with an ' \ - 'prior-independent k-means initialization can be used to replace the SPM brain tissue ' \ - 'classification. Moreover, if the default Dartel and Shooting registrations will fail then the' \ - ' "Optimized Shooting - superlarge ventricles" option for "Spatial registration" is required! ' \ - 'Values: \nnone: 0;\nlight: 1;\nfull: 2;\ndefault: 1070.' - initial_segmentation = traits.trait_types.Int(0, field="extopts.spm_kamap", desc=help_initial_seg, usedefault=True) - - help_las = 'Additionally to WM-inhomogeneities, GM intensity can vary across different regions such as the motor' \ - ' cortex, the basal ganglia, or the occipital lobe. These changes have an anatomical background ' \ - '(e.g. iron content, myelinization), but are dependent on the MR-protocol and often lead to ' \ - 'underestimation of GM at higher intensities and overestimation of CSF at lower intensities. ' \ - 'Therefore, a local intensity transformation of all tissue classes is used to reduce these effects in ' \ - 'the image. This local adaptive segmentation (LAS) is applied before the final AMAP segmentation.' \ - 'Possible Values: \nSPM Unified Segmentation: 0 \nk-means AMAP: 2' - local_adaptive_seg = traits.trait_types.Float(0.5, field="extopts.LASstr", usedefault=True, desc=help_las) - - help_gcutstr = 'Method of initial skull-stripping before AMAP segmentation. The SPM approach works quite stable ' \ - 'for the majority of data. However, in some rare cases parts of GM (i.e. in frontal lobe) might ' \ - 'be cut. If this happens the GCUT approach is a good alternative. GCUT is a graph-cut/region-' \ - 'growing approach starting from the WM area. APRG (adaptive probability region-growing) is a new' \ - ' method that refines the probability maps of the SPM approach by region-growing techniques of ' \ - 'the gcut approach with a final surface-based optimization strategy. This is currently the method ' \ - 'with the most accurate and reliable results. If you use already skull-stripped data you can turn' \ - ' off skull-stripping although this is automaticaly detected in most cases. Please note that the' \ - ' choice of the skull-stripping method will also influence the estimation of TIV, because the' \ - ' methods mainly differ in the handling of the outer CSF around the cortical surface. ' \ - '\nPossible Values:\n - none (already skull-stripped): -1;\n - SPM approach: 0; ' \ - '\n - GCUT approach: 0.50; \n - APRG approach: 2' - skull_strip = traits.trait_types.Float(2, field="extopts.gcutstr", desc=help_gcutstr, usedefault=True) - - help_wmhc = 'WARNING: Please note that the detection of WM hyperintensies is still under development and does ' \ - 'not have the same accuracy as approaches that additionally consider FLAIR images (e.g. Lesion ' \ - 'Segmentation Toolbox)! In aging or (neurodegenerative) diseases WM intensity can be reduced ' \ - 'locally in T1 or increased in T2/PD images. These so-called WM hyperintensies (WMHs) can lead to ' \ - 'preprocessing errors. Large GM areas next to the ventricle can cause normalization problems. ' \ - 'Therefore, a temporary correction for normalization is useful if WMHs are expected. CAT allows ' \ - 'different ways to handle WMHs: ' \ - '\n0) No Correction (handled as GM). \n1) Temporary (internal) correction as WM for spatial ' \ - 'normalization and estimation of cortical thickness. \n2) Permanent correction to WM. ' - wm_hyper_intensity_correction = traits.trait_types.Int(1, field="extopts.WMHC", desc=help_wmhc, usedefault=True) - - help_vox = 'The (isotropic) voxel sizes of any spatially normalised written images. A non-finite value will be ' \ - 'replaced by the average voxel size of the tissue probability maps used by the segmentation.' - voxel_size = traits.trait_types.Float(1.5, field="extopts.vox", desc=help_vox, usedefault=True) - - help_resampling = 'Internal resampling for preprocessing.\n The default fixed image resolution offers a good ' \ - 'trade-off between optimal quality and preprocessing time and memory demands. Standard ' \ - 'structural data with a voxel resolution around 1 mm or even data with high in-plane resolution' \ - ' and large slice thickness (e.g. 0.5x0.5x1.5 mm) will benefit from this setting. If you have' \ - ' higher native resolutions the highres option "Fixed 0.8 mm" will sometimes offer slightly' \ - ' better preprocessing quality with an increase of preprocessing time and memory demands. In' \ - ' case of even higher resolutions and high signal-to-noise ratio (e.g. for 7 T data) the ' \ - '"Best native" option will process the data on the highest native resolution. I.e. a resolution' \ - ' of 0.4x0.7x1.0 mm will be interpolated to 0.4x0.4x0.4 mm. A tolerance range of 0.1 mm is used' \ - ' to avoid interpolation artifacts, i.e. a resolution of 0.95x1.01x1.08 mm will not be ' \ - 'interpolated in case of the "Fixed 1.0 mm"! This "optimal" option prefers an isotropic voxel ' \ - 'size with at least 1.1 mm that is controlled by the median voxel size and a volume term that ' \ - 'penalizes highly anisotropic voxels.' \ - 'Values:\nOptimal: [1.0 0.1]\nFixed 1.0 mm: [1.0 0.1];\nFixed 0.8 mm:[0.8 0.1]' \ - '\nBest native: [0.5 0.1]' - internal_resampling_process = traits.trait_types.Tuple(traits.trait_types.Float(1), traits.trait_types.Float(0.1), - minlen=2, maxlen=2, - field="extopts.restypes.optimal", desc="help_resampling", - usedefault=True) - errors_help = 'Error handling.\nTry to catch preprocessing errors and continue with the next data set or ignore ' \ - 'all warnings (e.g., bad intensities) and use an experimental pipeline which is still in ' \ - 'development. In case of errors, CAT continues with the next subject if this option is enabled. If ' \ - 'the experimental option with backup functions is selected and warnings occur, CAT will try to use' \ - ' backup routines and skip some processing steps which require good T1 contrasts (e.g., LAS). If ' \ - 'you want to avoid processing of critical data and ensure that only the main pipeline is used then' \ - ' select the option "Ignore errors (continue with the next subject)". It is strongly recommended to' \ - ' check for preprocessing problems, especially with non-T1 contrasts. ' \ - '\nValues:\nnone: 0,\ndefault: 1,\ndetails: 2.' - ignore_errors = traits.trait_types.Int(1, field="extopts.ignoreErrors", desc=errors_help, usedefault=True) + _help_app = 'Affine registration and SPM preprocessing can fail in some subjects with deviating anatomy (e.g. ' \ + 'other species/neonates) or in images with strong signal inhomogeneities, or untypical intensities ' \ + '(e.g. synthetic images). An initial bias correction can help to reduce such problems (see details ' \ + 'below). Recommended are the "default" and "full" option.' + affine_preprocessing = traits.Int(1070, field="extopts.APP", desc=_help_app, usedefault=True) + + _help_initial_seg = 'In rare cases the Unified Segmentation can fail in highly abnormal brains, where e.g. the ' \ + 'cerebrospinal fluid of superlarge ventricles (hydrocephalus) were classified as white ' \ + 'matter. However, if the affine registration is correct, the AMAP segmentation with an ' \ + 'prior-independent k-means initialization can be used to replace the SPM brain tissue ' \ + 'classification. Moreover, if the default Dartel and Shooting registrations will fail then ' \ + 'rhe "Optimized Shooting - superlarge ventricles" option for "Spatial registration" is ! ' \ + 'required Values: \nnone: 0;\nlight: 1;\nfull: 2;\ndefault: 1070.' + initial_segmentation = traits.Int(0, field="extopts.spm_kamap", desc=_help_initial_seg, usedefault=True) + + _help_las = 'Additionally to WM-inhomogeneities, GM intensity can vary across different regions such as the motor' \ + ' cortex, the basal ganglia, or the occipital lobe. These changes have an anatomical background ' \ + '(e.g. iron content, myelinization), but are dependent on the MR-protocol and often lead to ' \ + 'underestimation of GM at higher intensities and overestimation of CSF at lower intensities. ' \ + 'Therefore, a local intensity transformation of all tissue classes is used to reduce these effects in' \ + ' the image. This local adaptive segmentation (LAS) is applied before the final AMAP segmentation.' \ + 'Possible Values: \nSPM Unified Segmentation: 0 \nk-means AMAP: 2' + local_adaptive_seg = traits.Float(0.5, field="extopts.LASstr", usedefault=True, desc=_help_las) + + _help_gcutstr = 'Method of initial skull-stripping before AMAP segmentation. The SPM approach works quite stable ' \ + 'for the majority of data. However, in some rare cases parts of GM (i.e. in frontal lobe) might ' \ + 'be cut. If this happens the GCUT approach is a good alternative. GCUT is a graph-cut/region-' \ + 'growing approach starting from the WM area. APRG (adaptive probability region-growing) is a new' \ + ' method that refines the probability maps of the SPM approach by region-growing techniques of ' \ + 'the gcut approach with a final surface-based optimization strategy. This is currently the method' \ + ' with the most accurate and reliable results. If you use already skull-stripped data you can ' \ + 'turn off skull-stripping although this is automaticaly detected in most cases. Please note that ' \ + 'the choice of the skull-stripping method will also influence the estimation of TIV, because the' \ + ' methods mainly differ in the handling of the outer CSF around the cortical surface. ' \ + '\nPossible Values:\n - none (already skull-stripped): -1;\n - SPM approach: 0; ' \ + '\n - GCUT approach: 0.50; \n - APRG approach: 2' + skull_strip = traits.Float(2, field="extopts.gcutstr", desc=_help_gcutstr, usedefault=True) + + _help_wmhc = 'WARNING: Please note that the detection of WM hyperintensies is still under development and does ' \ + 'not have the same accuracy as approaches that additionally consider FLAIR images (e.g. Lesion ' \ + 'Segmentation Toolbox)! In aging or (neurodegenerative) diseases WM intensity can be reduced ' \ + 'locally in T1 or increased in T2/PD images. These so-called WM hyperintensies (WMHs) can lead to ' \ + 'preprocessing errors. Large GM areas next to the ventricle can cause normalization problems. ' \ + 'Therefore, a temporary correction for normalization is useful if WMHs are expected. CAT allows ' \ + 'different ways to handle WMHs: ' \ + '\n0) No Correction (handled as GM). \n1) Temporary (internal) correction as WM for spatial ' \ + 'normalization and estimation of cortical thickness. \n2) Permanent correction to WM. ' + wm_hyper_intensity_correction = traits.Int(1, field="extopts.WMHC", desc=_help_wmhc, usedefault=True) + + _help_vox = 'The (isotropic) voxel sizes of any spatially normalised written images. A non-finite value will be ' \ + 'replaced by the average voxel size of the tissue probability maps used by the segmentation.' + voxel_size = traits.Float(1.5, field="extopts.vox", desc=_help_vox, usedefault=True) + + _help_resampling = 'Internal resampling for preprocessing.\n The default fixed image resolution offers a good ' \ + 'trade-off between optimal quality and preprocessing time and memory demands. Standard ' \ + 'structural data with a voxel resolution around 1mm or even data with high in-plane resolution' \ + ' and large slice thickness (e.g. 0.5x0.5x1.5 mm) will benefit from this setting. If you have' \ + ' higher native resolutions the highres option "Fixed 0.8 mm" will sometimes offer slightly' \ + ' better preprocessing quality with an increase of preprocessing time and memory demands. In' \ + ' case of even higher resolutions and high signal-to-noise ratio (e.g. for 7 T data) the ' \ + '"Best native" option will process the data on the highest native resolution. A resolution' \ + ' of 0.4x0.7x1.0 mm will be interpolated to 0.4x0.4x0.4 mm. A tolerance range of 0.1 mm is ' \ + 'used to avoid interpolation artifacts, i.e. a resolution of 0.95x1.01x1.08 mm will not be ' \ + 'interpolated in case of the "Fixed 1.0 mm"! This "optimal" option prefers an isotropic voxel ' \ + 'size with at least 1.1 mm that is controlled by the median voxel size and a volume term that ' \ + 'penalizes highly anisotropic voxels.' \ + 'Values:\nOptimal: [1.0 0.1]\nFixed 1.0 mm: [1.0 0.1];\nFixed 0.8 mm:[0.8 0.1]' \ + '\nBest native: [0.5 0.1]' + internal_resampling_process = traits.Tuple(traits.Float(1), traits.Float(0.1), minlen=2, maxlen=2, usedefault=True, + field="extopts.restypes.optimal", desc="help_resampling") + _errors_help = 'Error handling.\nTry to catch preprocessing errors and continue with the next data set or ignore ' \ + 'all warnings (e.g., bad intensities) and use an experimental pipeline which is still in ' \ + 'development. In case of errors, CAT continues with the next subject if this option is enabled. If ' \ + 'the experimental option with backup functions is selected and warnings occur, CAT will try to use' \ + ' backup routines and skip some processing steps which require good T1 contrasts (e.g., LAS). If ' \ + 'you want to avoid processing of critical data and ensure that only the main pipeline is used then' \ + ' select the option "Ignore errors (continue with the next subject)". It is strongly recommended ' \ + 'to check for preprocessing problems, especially with non-T1 contrasts. ' \ + '\nValues:\nnone: 0,\ndefault: 1,\ndetails: 2.' + ignore_errors = traits.Int(1, field="extopts.ignoreErrors", desc=_errors_help, usedefault=True) # Writing options - help_surf = 'Surface and thickness estimation. \nUse projection-based thickness (PBT) (Dahnke et al. 2012) to' \ - ' estimate cortical thickness and to create the central cortical surface for left and right ' \ - 'hemisphere. Surface reconstruction includes topology correction (Yotter et al. 2011), spherical ' \ - 'inflation (Yotter et al.) and spherical registration. Additionally you can also estimate surface ' \ - 'parameters such as gyrification, cortical complexity or sulcal depth that can be subsequently ' \ - 'analyzed at each vertex of the surface. Please note, that surface reconstruction and spherical ' \ - 'registration additionally requires about 20-60 min of computation time. A fast (1-3 min) surface ' \ - 'pipeline is available for visual preview (e.g., to check preprocessing quality) in the ' \ - 'cross-sectional, but not in the longitudinal pipeline. Only the initial surfaces are created with a' \ - ' lower resolution and without topology correction, spherical mapping and surface registration. ' \ - 'Please note that the files with the estimated surface thickness can therefore not be used for ' \ - 'further analysis! For distinction, these files contain "preview" in their filename and they' \ - ' are not available as batch dependencies objects. ' - surface_and_thickness_estimation = traits.trait_types.Int(1, field="surface", desc=help_surf, usedefault=True) - surface_measures = traits.trait_types.Int(1, field="output.surf_measures", usedefault=True) + _help_surf = 'Surface and thickness estimation. \nUse projection-based thickness (PBT) (Dahnke et al. 2012) to' \ + ' estimate cortical thickness and to create the central cortical surface for left and right ' \ + 'hemisphere. Surface reconstruction includes topology correction (Yotter et al. 2011), spherical ' \ + 'inflation (Yotter et al.) and spherical registration. Additionally you can also estimate surface ' \ + 'parameters such as gyrification, cortical complexity or sulcal depth that can be subsequently ' \ + 'analyzed at each vertex of the surface. Please note, that surface reconstruction and spherical ' \ + 'registration additionally requires about 20-60 min of computation time. A fast (1-3 min) surface ' \ + 'pipeline is available for visual preview (e.g., to check preprocessing quality) in the ' \ + 'cross-sectional, but not in the longitudinal pipeline. Only the initial surfaces are created with ' \ + 'a lower resolution and without topology correction, spherical mapping and surface registration. ' \ + 'Please note that the files with the estimated surface thickness can therefore not be used for ' \ + 'further analysis! For distinction, these files contain "preview" in their filename and they' \ + ' are not available as batch dependencies objects. ' + surface_and_thickness_estimation = traits.Int(1, field="surface", desc=_help_surf, usedefault=True) + surface_measures = traits.Int(1, field="output.surf_measures", usedefault=True) # Templates - neuromorphometrics = traits.trait_types.Bool(True, field="output.ROImenu.atlases.neuromorphometrics", - usedefault=True) - lpba40 = traits.trait_types.Bool(True, field="output.ROImenu.atlases.lpba40", usedefault=True) - cobra = traits.trait_types.Bool(True, field="output.ROImenu.atlases.hammers", usedefault=True) - hammers = traits.trait_types.Bool(True, field="output.ROImenu.atlases.cobra", usedefault=True) + neuromorphometrics = traits.Bool(True, field="output.ROImenu.atlases.neuromorphometrics", + usedefault=True) + lpba40 = traits.Bool(True, field="output.ROImenu.atlases.lpba40", usedefault=True) + cobra = traits.Bool(True, field="output.ROImenu.atlases.hammers", usedefault=True) + hammers = traits.Bool(True, field="output.ROImenu.atlases.cobra", usedefault=True) own_atlas = InputMultiPath(ImageFileSPM(exists=True), field="output.ROImenu.atlases.ownatlas", desc="Own Atlas", mandatory=False, copyfile=False) - dartel_help = 'This option is to export data into a form that can be used with DARTEL. The SPM default is to ' \ - 'only apply rigid body transformation. However, a more appropriate option is to apply affine ' \ - 'transformation, because the additional scaling of the images requires less deformations to ' \ - 'non-linearly register brains to the template.' + _dartel_help = 'This option is to export data into a form that can be used with DARTEL. The SPM default is to ' \ + 'only apply rigid body transformation. However, a more appropriate option is to apply affine ' \ + 'transformation, because the additional scaling of the images requires less deformations to ' \ + 'non-linearly register brains to the template.' # Grey matter - gm_desc = 'Options to save grey matter images.' - gm_output_native = traits.trait_types.Bool(False, field="output.GM.native", usedefault=True, desc=gm_desc) - gm_output_modulated = traits.trait_types.Bool(True, field="output.GM.mod", usedefault=True, desc=gm_desc) - gm_output_dartel = traits.trait_types.Bool(False, field="output.GM.dartel", usedefault=True, desc=gm_desc) + _gm_desc = 'Options to save grey matter images.' + gm_output_native = traits.Bool(False, field="output.GM.native", usedefault=True, desc=_gm_desc) + gm_output_modulated = traits.Bool(True, field="output.GM.mod", usedefault=True, desc=_gm_desc) + gm_output_dartel = traits.Bool(False, field="output.GM.dartel", usedefault=True, desc=_gm_desc) # White matter - wm_desc = 'Options to save white matter images.' - wm_output_native = traits.trait_types.Bool(False, field="output.WM.native", usedefault=True, desc=wm_desc) - wm_output_modulated = traits.trait_types.Bool(True, field="output.WM.mod", usedefault=True, desc=wm_desc) - wm_output_dartel = traits.trait_types.Bool(False, field="output.WM.dartel", usedefault=True, desc=wm_desc) + _wm_desc = 'Options to save white matter images.' + wm_output_native = traits.Bool(False, field="output.WM.native", usedefault=True, desc=_wm_desc) + wm_output_modulated = traits.Bool(True, field="output.WM.mod", usedefault=True, desc=_wm_desc) + wm_output_dartel = traits.Bool(False, field="output.WM.dartel", usedefault=True, desc=_wm_desc) # CSF matter - csf_desc = 'Options to save CSF images.' - csf_output_native = traits.trait_types.Bool(False, field="output.CSF.native", usedefault=True, desc=csf_desc) - csf_output_modulated = traits.trait_types.Bool(True, field="output.CSF.mod", usedefault=True, desc=csf_desc) - csf_output_dartel = traits.trait_types.Bool(False, field="output.CSF.dartel", usedefault=True, desc=csf_desc) + _csf_desc = 'Options to save CSF images.' + csf_output_native = traits.Bool(False, field="output.CSF.native", usedefault=True, desc=_csf_desc) + csf_output_modulated = traits.Bool(True, field="output.CSF.mod", usedefault=True, desc=_csf_desc) + csf_output_dartel = traits.Bool(False, field="output.CSF.dartel", usedefault=True, desc=_csf_desc) # Labels - label_desc = 'This is the option to save a labeled version of your segmentations for fast visual comparision. ' \ - 'Labels are saved as Partial Volume Estimation (PVE) values with different mix classes for GM-WM ' \ - '(2.5) and GM-CSF (1.5). BG=0, CSF=1, GM=2, WM=3, WMH=4 (if WMHC=3), SL=1.5 (if SLC)' - label_native = traits.trait_types.Bool(False, field="output.label.native", usedefault=True, desc=label_desc) - label_warped = traits.trait_types.Bool(True, field="output.label.warped", usedefault=True, desc=label_desc) - label_dartel = traits.trait_types.Bool(False, field="output.label.dartel", usedefault=True, desc=label_desc) - output_labelnative = traits.trait_types.Bool(False, field="output.labelnative", usedefault=True, desc=label_desc) + _help_label_desc = 'This is the option to save a labeled version of your segmentations for fast visual ' \ + 'comparision. Labels are saved as Partial Volume Estimation (PVE) values with different mix ' \ + 'classes for GM-WM (2.5) and GM-CSF (1.5). BG=0, CSF=1, GM=2, WM=3, WMH=4 (if WMHC=3), ' \ + 'SL=1.5 (if SLC)' + label_native = traits.Bool(False, field="output.label.native", usedefault=True, desc=_help_label_desc) + label_warped = traits.Bool(True, field="output.label.warped", usedefault=True, desc=_help_label_desc) + label_dartel = traits.Bool(False, field="output.label.dartel", usedefault=True, desc=_help_label_desc) + output_labelnative = traits.Bool(False, field="output.labelnative", usedefault=True, desc=_help_label_desc) # Bias - save_bias_corrected = traits.trait_types.Bool(True, field="output.bias.warped", usedefault=True) + save_bias_corrected = traits.Bool(True, field="output.bias.warped", usedefault=True) # las - las_desc = 'This is the option to save a bias, noise, and local intensity corrected version of the original T1' \ - ' image. MR images are usually corrupted by a smooth, spatially varying artifact that modulates the' \ - ' intensity of the image (bias). These artifacts, although not usually a problem for visual ' \ - 'inspection, can impede automated processing of the images. The bias corrected version should have ' \ - 'more uniform intensities within the different types of tissues and can be saved in native space ' \ - 'and/or normalised. Noise is corrected by an adaptive non-local mean (NLM) filter (Manjon 2008, ' \ - 'Medical Image Analysis 12).' - las_native = traits.trait_types.Bool(False, field="output.las.native", usedefault=True, desc=las_desc) - las_warped = traits.trait_types.Bool(True, field="output.las.warped", usedefault=True, desc=las_desc) - las_dartel = traits.trait_types.Bool(False, field="output.las.dartel", usedefault=True, desc=las_desc) + _las_desc = 'This is the option to save a bias, noise, and local intensity corrected version of the original T1' \ + ' image. MR images are usually corrupted by a smooth, spatially varying artifact that modulates the' \ + ' intensity of the image (bias). These artifacts, although not usually a problem for visual ' \ + 'inspection, can impede automated processing of the images. The bias corrected version should have ' \ + 'more uniform intensities within the different types of tissues and can be saved in native space ' \ + 'and/or normalised. Noise is corrected by an adaptive non-local mean (NLM) filter (Manjon 2008, ' \ + 'Medical Image Analysis 12).' + las_native = traits.Bool(False, field="output.las.native", usedefault=True, desc=_las_desc) + las_warped = traits.Bool(True, field="output.las.warped", usedefault=True, desc=_las_desc) + las_dartel = traits.Bool(False, field="output.las.dartel", usedefault=True, desc=_las_desc) # Jacobian Warped - help_jacobian = 'This is the option to save the Jacobian determinant, which expresses local volume changes. This' \ - ' image can be used in a pure deformation based morphometry (DBM) design. Please note that the' \ - ' affine part of the deformation field is ignored. Thus, there is no need for any additional' \ - ' correction for different brain sizes using ICV.' - jacobianwarped = traits.trait_types.Bool(True, field="output.jacobianwarped", usedefault=True, desc=help_jacobian) + _help_jacobian = 'This is the option to save the Jacobian determinant, which expresses local volume changes. This' \ + ' image can be used in a pure deformation based morphometry (DBM) design. Please note that the' \ + ' affine part of the deformation field is ignored. Thus, there is no need for any additional' \ + ' correction for different brain sizes using ICV.' + jacobianwarped = traits.Bool(True, field="output.jacobianwarped", usedefault=True, desc=_help_jacobian) # Deformation Fields - help_warp = 'Deformation fields can be saved to disk, and used by the Deformations Utility and/or applied to ' \ - 'coregistered data from other modalities (e.g. fMRI). For spatially normalising images to MNI space,' \ - ' you will need the forward deformation, whereas for spatially normalising (eg) GIFTI surface files,' \ - ' you''ll need the inverse. It is also possible to transform data in MNI space on to the individual' \ - ' subject, which also requires the inverse transform. Deformations are saved as .nii files, which' \ - ' contain three volumes to encode the x, y and z coordinates.' \ - '\nValues: No:[0 0];\nImage->Template (forward): [1 0];\nTemplate->Image (inverse): [0 1]; ' \ - '\ninverse + forward: [1 1]' - warps = traits.trait_types.Tuple(traits.trait_types.Int(1), traits.trait_types.Int(0), minlen=2, maxlen=2, - field="output.warps", usedefault=True, desc=help_warp) + _help_warp = 'Deformation fields can be saved to disk, and used by the Deformations Utility and/or applied to ' \ + 'coregistered data from other modalities (e.g. fMRI). For spatially normalising images to MNI space,' \ + ' you will need the forward deformation, whereas for spatially normalising (eg) GIFTI surface files,' \ + ' you''ll need the inverse. It is also possible to transform data in MNI space on to the individual' \ + ' subject, which also requires the inverse transform. Deformations are saved as .nii files, which' \ + ' contain three volumes to encode the x, y and z coordinates.' \ + '\nValues: No:[0 0];\nImage->Template (forward): [1 0];\nTemplate->Image (inverse): [0 1]; ' \ + '\ninverse + forward: [1 1]' + warps = traits.Tuple(traits.Int(1), traits.Int(0), minlen=2, maxlen=2, + field="output.warps", usedefault=True, desc=_help_warp) class CAT12SegmentOutputSpec(TraitedSpec): ########################################## # Label XML files ########################################## - label_files = List(File(exists=True), desc="Files with the measures extracted for OI ands ROIs") + label_files = traits.List(File(exists=True), desc="Files with the measures extracted for OI ands ROIs") label_rois = File(exists=True, desc="Files with thickness values of ROIs.") label_roi = File(exists=True, desc="Files with thickness values of ROI.") @@ -253,7 +221,7 @@ class CAT12SegmentOutputSpec(TraitedSpec): # MRI .nii files ########################################## - mri_images = List(File(exists=True), desc="Different segmented images.") + mri_images = traits.List(File(exists=True), desc="Different segmented images.") # Grey Matter gm_modulated_image = File(exists=True, desc="Grey matter modulated image.") @@ -275,7 +243,7 @@ class CAT12SegmentOutputSpec(TraitedSpec): # Surface files ########################################## - surface_files = List(File(exists=True), desc="Surface files") + surface_files = traits.List(File(exists=True), desc="Surface files") # Right hemisphere rh_central_surface = File(exists=True, desc="Central right hemisphere files") @@ -286,11 +254,41 @@ class CAT12SegmentOutputSpec(TraitedSpec): lh_sphere_surface = File(exists=True, desc="Sphere left hemisphere files") # Report files - report_files = List(File(exists=True), desc="Report files.") + report_files = traits.List(File(exists=True), desc="Report files.") report = File(exists=True, desc="Report file.") class CAT12Segment(SPMCommand): + """ + CAT12: Segmentation + This toolbox is an extension to the default segmentation in SPM12, but uses a completely different segmentation + approach. + The segmentation approach is based on an Adaptive Maximum A Posterior (MAP) technique without the need for a priori + information about tissue probabilities. That is, the Tissue Probability Maps (TPM) are not used constantly in the + sense of the classical Unified Segmentation approach (Ashburner et. al. 2005), but just for spatial normalization. + The following AMAP estimation is adaptive in the sense that local variations of the parameters (i.e., means and + variance) are modeled as slowly varying spatial functions (Rajapakse et al. 1997). This not only accounts for + intensity inhomogeneities but also for other local variations of intensity. + Additionally, the segmentation approach uses a Partial Volume Estimation (PVE) with a simplified mixed model of at + most two tissue types (Tohka et al. 2004). We start with an initial segmentation into three pure classes: gray + matter (GM), white matter (WM), and cerebrospinal fluid (CSF) based on the above described AMAP estimation. The + initial segmentation is followed by a PVE of two additional mixed classes: GM-WM and GM-CSF. This results in an + estimation of the amount (or fraction) of each pure tissue type present in every voxel (as single voxels - given by + Another important extension to the SPM12 segmentation is the integration of the Dartel or Geodesic Shooting + registration into the toolbox by an already existing Dartel/Shooting template in MNI space. This template was + derived from 555 healthy control subjects of the IXI-database (http://www.brain-development.org) and provides the + several Dartel or Shooting iterations. Thus, for the majority of studies the creation of sample-specific templates + is not necessary anymore and is mainly recommended for children data.'}; + + http://www.neuro.uni-jena.de/cat12/CAT12-Manual.pdf#page=15 + + Examples + -------- + >>> path_mr = 'structural.nii' + >>> cat = CAT12Segment(in_files=path_mr) + >>> cat.run() # doctest: +SKIP + """ + input_spec = CAT12SegmentInputSpec output_spec = CAT12SegmentOutputSpec @@ -317,9 +315,7 @@ def _list_outputs(self): f = self.inputs.in_files[0] pth, base, ext = split_filename(f) - outputs["mri_images"] = [os.path.join(os.path.join(pth, "mri"), f) for f in - os.listdir(os.path.join(pth, "mri")) - if os.path.isfile(os.path.join(os.path.join(pth, "mri"), f))] + outputs["mri_images"] = [str(mri) for mri in Path(pth).glob("mri/*") if mri.is_file()] for tidx, tissue in enumerate(["gm", "wm", "csf"]): @@ -333,27 +329,23 @@ def _list_outputs(self): else: outputs[outfield] = fname_presuffix(f, prefix=prefix, suffix="_rigid") - if isdefined(self.inputs.save_bias_corrected) and self.inputs.save_bias_corrected: + if self.inputs.save_bias_corrected: outputs["bias_corrected_image"] = fname_presuffix(f, prefix=os.path.join("mri", 'wmi')) - outputs["surface_files"] = [os.path.join(os.path.join(pth, "surf"), f) for f in - os.listdir(os.path.join(pth, "surf")) - if os.path.isfile(os.path.join(os.path.join(pth, "surf"), f))] + outputs["surface_files"] = [str(surf) for surf in Path(pth).glob("surf/*") if surf.is_file()] - for tidx, hemisphere in enumerate(["rh", "lh"]): - for idx, suffix in enumerate(["central", "sphere"]): + for hemisphere in ["rh", "lh"]: + for suffix in ["central", "sphere"]: outfield = f'{hemisphere}_{suffix}_surface' outputs[outfield] = fname_presuffix(f, prefix=os.path.join("surf", f'{hemisphere}.{suffix}.'), suffix=".gii", use_ext=False) - outputs["report_files"] = [os.path.join(os.path.join(pth, "report"), f) for f in - os.listdir(os.path.join(pth, "report")) - if os.path.isfile(os.path.join(os.path.join(pth, "report"), f))] + outputs["report_files"] = outputs["report_files"] = [str(report) for report in Path(pth).glob("report/*") + if report.is_file()] + outputs[f'report'] = fname_presuffix(f, prefix=os.path.join("report", f'cat_'), suffix=".xml", use_ext=False) - outputs["label_files"] = [os.path.join(os.path.join(pth, "label"), f) for f in - os.listdir(os.path.join(pth, "label")) - if os.path.isfile(os.path.join(os.path.join(pth, "label"), f))] + outputs["label_files"] = [str(label) for label in Path(pth).glob("label/*") if label.is_file()] outputs['label_rois'] = fname_presuffix(f, prefix=os.path.join("label", f'catROIs_'), suffix=".xml", use_ext=False) diff --git a/nipype/interfaces/cat12/surface.py b/nipype/interfaces/cat12/surface.py index 02fe3d906e..dedaa53b7c 100644 --- a/nipype/interfaces/cat12/surface.py +++ b/nipype/interfaces/cat12/surface.py @@ -1,11 +1,9 @@ import os -import sys +from pathlib import Path -import traits from traits.trait_base import _Undefined -from traits.trait_types import List -from nipype.interfaces.base import File, InputMultiPath, TraitedSpec +from nipype.interfaces.base import File, InputMultiPath, TraitedSpec, traits from nipype.interfaces.spm import SPMCommand from nipype.interfaces.spm.base import SPMCommandInputSpec from nipype.utils.filemanip import split_filename @@ -17,37 +15,37 @@ class ExtractAdditionalSurfaceParametersInputSpec(SPMCommandInputSpec): surface_files = InputMultiPath(File(exists=True), desc="All surface files", mandatory=False, copyfile=False) - gyrification = traits.trait_types.Bool(True, field="GI", usedefault=True, + gyrification = traits.Bool(True, field="GI", usedefault=True, desc="Extract gyrification index (GI) based on absolute mean curvature. The" " method is described in Luders et al. Neuroimage, 29:1224-1230, 2006") - gmv = traits.trait_types.Bool(True, field="gmv", usedefault=True, desc="Extract volume") - area = traits.trait_types.Bool(True, field="area", usedefault=True, desc="Extract area surface") - depth = traits.trait_types.Bool(False, field="SD", usedefault=True, + gmv = traits.Bool(True, field="gmv", usedefault=True, desc="Extract volume") + area = traits.Bool(True, field="area", usedefault=True, desc="Extract area surface") + depth = traits.Bool(False, field="SD", usedefault=True, desc="Extract sulcus depth based on euclidian distance between the central " "surface anf its convex hull.") - fractal_dimension = traits.trait_types.Bool(False, field="FD", usedefault=True, + fractal_dimension = traits.Bool(False, field="FD", usedefault=True, desc="Extract cortical complexity (fractal dimension) which is " "described in Yotter ar al. Neuroimage, 56(3): 961-973, 2011") class ExtractAdditionalSurfaceParametersOutputSpec(TraitedSpec): - lh_extracted_files = List(File(exists=True), desc="Files of left Hemisphere extracted measures") - rh_extracted_files = List(File(exists=True), desc="Files of right Hemisphere extracted measures") + lh_extracted_files = traits.List(File(exists=True), desc="Files of left Hemisphere extracted measures") + rh_extracted_files = traits.List(File(exists=True), desc="Files of right Hemisphere extracted measures") - lh_gyrification = List(File(exists=True), desc="Gyrification of left Hemisphere") - rh_gyrification = List(File(exists=True), desc="Gyrification of right Hemisphere") + lh_gyrification = traits.List(File(exists=True), desc="Gyrification of left Hemisphere") + rh_gyrification = traits.List(File(exists=True), desc="Gyrification of right Hemisphere") - lh_gmv = List(File(exists=True), desc="Grey matter volume of left Hemisphere") - rh_gmv = List(File(exists=True), desc="Grey matter volume of right Hemisphere") + lh_gmv = traits.List(File(exists=True), desc="Grey matter volume of left Hemisphere") + rh_gmv = traits.List(File(exists=True), desc="Grey matter volume of right Hemisphere") - lh_area = List(File(exists=True), desc="Area of left Hemisphere") - rh_area = List(File(exists=True), desc="Area of right Hemisphere") + lh_area = traits.List(File(exists=True), desc="Area of left Hemisphere") + rh_area = traits.List(File(exists=True), desc="Area of right Hemisphere") - lh_depth = List(File(exists=True), desc="Depth of left Hemisphere") - rh_depth = List(File(exists=True), desc="Depth of right Hemisphere") + lh_depth = traits.List(File(exists=True), desc="Depth of left Hemisphere") + rh_depth = traits.List(File(exists=True), desc="Depth of right Hemisphere") - lh_fractaldimension = List(File(exists=True), desc="Fractal Dimension of left Hemisphere") - rh_fractaldimension = List(File(exists=True), desc="Fractal Dimension of right Hemisphere") + lh_fractaldimension = traits.List(File(exists=True), desc="Fractal Dimension of left Hemisphere") + rh_fractaldimension = traits.List(File(exists=True), desc="Fractal Dimension of right Hemisphere") class ExtractAdditionalSurfaceParameters(SPMCommand): @@ -143,7 +141,7 @@ class ExtractROIBasedSurfaceMeasuresInputSpec(SPMCommandInputSpec): class ExtractROIBasedSurfaceMeasuresOutputSpec(TraitedSpec): - label_files = List(File(exists=True), desc="Files with the measures extracted for ROIs.") + label_files = traits.List(File(exists=True), desc="Files with the measures extracted for ROIs.") class ExtractROIBasedSurfaceMeasures(SPMCommand): @@ -196,9 +194,7 @@ def _list_outputs(self): pth, base, ext = split_filename(self.inputs.lh_surface_measure[0]) - outputs["label_files"] = [os.path.join(os.path.join(pth, "label"), f) for f in - os.listdir(os.path.join(pth, "label")) - if os.path.isfile(os.path.join(os.path.join(pth, "label"), f))] + outputs["label_files"] = [str(label) for label in Path(pth).glob("label/*") if label.is_file()] return outputs From f64bf338f630a9ee5cbe7a3ec98c68292897e720 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 1 Apr 2021 12:26:08 -0400 Subject: [PATCH 0953/1665] STY: black --- nipype/algorithms/metrics.py | 5 +- nipype/algorithms/misc.py | 6 +- nipype/algorithms/modelgen.py | 16 +- nipype/algorithms/rapidart.py | 6 +- nipype/algorithms/tests/test_TSNR.py | 4 +- nipype/algorithms/tests/test_modelgen.py | 6 +- nipype/caching/memory.py | 183 ++++---- nipype/external/cloghandler.py | 18 +- nipype/info.py | 2 +- nipype/interfaces/afni/preprocess.py | 206 +++++---- nipype/interfaces/afni/utils.py | 19 +- nipype/interfaces/ants/base.py | 10 +- nipype/interfaces/ants/registration.py | 55 ++- nipype/interfaces/ants/resampling.py | 14 +- nipype/interfaces/base/core.py | 27 +- nipype/interfaces/base/specs.py | 20 +- nipype/interfaces/base/support.py | 6 +- nipype/interfaces/base/traits_extension.py | 7 +- nipype/interfaces/brainsuite/brainsuite.py | 28 +- nipype/interfaces/camino2trackvis/convert.py | 4 +- nipype/interfaces/cmtk/cmtk.py | 5 +- nipype/interfaces/cmtk/convert.py | 2 +- nipype/interfaces/cmtk/parcellation.py | 21 +- nipype/interfaces/dcm2nii.py | 5 +- nipype/interfaces/dcmstack.py | 3 +- nipype/interfaces/diffusion_toolkit/base.py | 2 +- nipype/interfaces/diffusion_toolkit/dti.py | 3 +- nipype/interfaces/diffusion_toolkit/odf.py | 9 +- nipype/interfaces/freesurfer/base.py | 9 +- nipype/interfaces/freesurfer/longitudinal.py | 4 +- nipype/interfaces/freesurfer/model.py | 4 +- nipype/interfaces/freesurfer/preprocess.py | 26 +- nipype/interfaces/freesurfer/registration.py | 6 +- .../freesurfer/tests/test_BBRegister.py | 76 ++-- .../freesurfer/tests/test_preprocess.py | 3 +- nipype/interfaces/fsl/base.py | 4 +- nipype/interfaces/fsl/dti.py | 4 +- nipype/interfaces/fsl/epi.py | 13 +- nipype/interfaces/fsl/maths.py | 44 +- nipype/interfaces/fsl/model.py | 52 ++- nipype/interfaces/fsl/tests/test_FILMGLS.py | 48 +-- .../interfaces/fsl/tests/test_preprocess.py | 7 +- nipype/interfaces/fsl/utils.py | 20 +- nipype/interfaces/image.py | 87 ++-- nipype/interfaces/io.py | 51 ++- nipype/interfaces/minc/base.py | 13 +- nipype/interfaces/minc/minc.py | 124 ++---- nipype/interfaces/minc/testdata.py | 2 +- nipype/interfaces/mixins/fixheader.py | 2 +- nipype/interfaces/mne/base.py | 2 +- nipype/interfaces/mrtrix3/base.py | 2 +- nipype/interfaces/niftyseg/label_fusion.py | 22 +- .../niftyseg/tests/test_label_fusion.py | 2 +- nipype/interfaces/nilearn.py | 6 +- nipype/interfaces/nipy/preprocess.py | 2 +- nipype/interfaces/nitime/tests/test_nitime.py | 2 +- nipype/interfaces/semtools/brains/classify.py | 17 +- .../semtools/brains/segmentation.py | 47 +- .../interfaces/semtools/brains/utilities.py | 33 +- nipype/interfaces/semtools/converters.py | 34 +- .../semtools/diffusion/diffusion.py | 134 +++--- .../interfaces/semtools/diffusion/gtract.py | 408 +++++++++--------- .../semtools/diffusion/maxcurvature.py | 25 +- .../diffusion/tractography/commandlineonly.py | 25 +- .../diffusion/tractography/fiberprocess.py | 27 +- .../diffusion/tractography/fibertrack.py | 31 +- .../diffusion/tractography/ukftractography.py | 15 +- nipype/interfaces/semtools/featurecreator.py | 15 +- .../semtools/filtering/denoising.py | 31 +- .../semtools/filtering/featuredetection.py | 258 ++++++----- .../semtools/legacy/registration.py | 21 +- .../semtools/registration/brainsfit.py | 17 +- .../semtools/registration/brainsresample.py | 17 +- .../semtools/registration/brainsresize.py | 15 +- .../semtools/registration/specialized.py | 49 +-- .../semtools/segmentation/specialized.py | 80 ++-- .../semtools/testing/featuredetection.py | 11 +- .../testing/generateaveragelmkfile.py | 9 +- .../semtools/testing/landmarkscompare.py | 9 +- .../interfaces/semtools/utilities/brains.py | 244 +++++------ nipype/interfaces/slicer/converters.py | 32 +- .../interfaces/slicer/diffusion/diffusion.py | 153 ++++--- .../interfaces/slicer/filtering/arithmetic.py | 79 ++-- .../slicer/filtering/checkerboardfilter.py | 15 +- .../interfaces/slicer/filtering/denoising.py | 66 ++- .../slicer/filtering/extractskeleton.py | 15 +- .../slicer/filtering/histogrammatching.py | 21 +- .../slicer/filtering/imagelabelcombine.py | 13 +- .../interfaces/slicer/filtering/morphology.py | 48 +-- .../filtering/n4itkbiasfieldcorrection.py | 15 +- .../resamplescalarvectordwivolume.py | 19 +- .../slicer/filtering/thresholdscalarvolume.py | 15 +- .../votingbinaryholefillingimagefilter.py | 15 +- nipype/interfaces/slicer/generate_classes.py | 28 +- nipype/interfaces/slicer/legacy/converters.py | 15 +- .../slicer/legacy/diffusion/denoising.py | 23 +- nipype/interfaces/slicer/legacy/filtering.py | 34 +- .../interfaces/slicer/legacy/registration.py | 102 +++-- .../interfaces/slicer/legacy/segmentation.py | 15 +- .../quantification/changequantification.py | 19 +- .../petstandarduptakevaluecomputation.py | 15 +- .../slicer/registration/brainsfit.py | 17 +- .../slicer/registration/brainsresample.py | 19 +- .../slicer/registration/specialized.py | 70 ++- .../simpleregiongrowingsegmentation.py | 15 +- .../slicer/segmentation/specialized.py | 49 +-- nipype/interfaces/slicer/surface.py | 96 ++--- nipype/interfaces/slicer/utilities.py | 13 +- nipype/interfaces/spm/base.py | 2 +- nipype/interfaces/spm/model.py | 37 +- nipype/interfaces/spm/preprocess.py | 94 ++-- nipype/interfaces/spm/utils.py | 17 +- nipype/interfaces/tests/test_io.py | 3 +- nipype/interfaces/tests/test_nilearn.py | 2 +- .../interfaces/utility/tests/test_wrappers.py | 6 +- nipype/pipeline/engine/nodes.py | 3 +- nipype/pipeline/engine/tests/test_engine.py | 3 +- nipype/pipeline/engine/utils.py | 20 +- nipype/pipeline/engine/workflows.py | 37 +- nipype/pipeline/plugins/base.py | 23 +- nipype/pipeline/plugins/debug.py | 3 +- nipype/pipeline/plugins/ipython.py | 3 +- nipype/pipeline/plugins/linear.py | 3 +- nipype/pipeline/plugins/sge.py | 26 +- nipype/pipeline/plugins/somaflow.py | 3 +- nipype/pipeline/plugins/tools.py | 3 +- nipype/pkg_info.py | 4 +- nipype/sphinxext/apidoc/__init__.py | 2 +- nipype/sphinxext/gh.py | 2 +- nipype/testing/decorators.py | 2 +- nipype/utils/draw_gantt_chart.py | 3 +- nipype/utils/filemanip.py | 10 +- nipype/utils/logger.py | 3 +- nipype/utils/onetime.py | 15 +- setup.py | 2 +- 135 files changed, 1961 insertions(+), 2307 deletions(-) diff --git a/nipype/algorithms/metrics.py b/nipype/algorithms/metrics.py index 611ec57af5..e399becb65 100644 --- a/nipype/algorithms/metrics.py +++ b/nipype/algorithms/metrics.py @@ -63,8 +63,7 @@ class DistanceOutputSpec(TraitedSpec): class Distance(BaseInterface): - """Calculates distance between two volumes. - """ + """Calculates distance between two volumes.""" input_spec = DistanceInputSpec output_spec = DistanceOutputSpec @@ -542,7 +541,7 @@ class ErrorMapOutputSpec(TraitedSpec): class ErrorMap(BaseInterface): - """ Calculates the error (distance) map between two input volumes. + """Calculates the error (distance) map between two input volumes. Example ------- diff --git a/nipype/algorithms/misc.py b/nipype/algorithms/misc.py index 50ae18f5be..dac75d960f 100644 --- a/nipype/algorithms/misc.py +++ b/nipype/algorithms/misc.py @@ -141,8 +141,7 @@ class SimpleThresholdOutputSpec(TraitedSpec): class SimpleThreshold(BaseInterface): - """Applies a threshold to input volumes - """ + """Applies a threshold to input volumes""" input_spec = SimpleThresholdInputSpec output_spec = SimpleThresholdOutputSpec @@ -240,8 +239,7 @@ class CreateNiftiOutputSpec(TraitedSpec): class CreateNifti(BaseInterface): - """Creates a nifti volume - """ + """Creates a nifti volume""" input_spec = CreateNiftiInputSpec output_spec = CreateNiftiOutputSpec diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index 29f31c60c0..a04ef04b3a 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -135,7 +135,7 @@ def scale_timings(timelist, input_units, output_units, time_repetition): def bids_gen_info( - bids_event_files, condition_column="", amplitude_column=None, time_repetition=False, + bids_event_files, condition_column="", amplitude_column=None, time_repetition=False ): """ Generate a subject_info structure from a list of BIDS .tsv event files. @@ -471,8 +471,7 @@ def _generate_standard_design( return sessinfo def _generate_design(self, infolist=None): - """Generate design specification for a typical fmri paradigm - """ + """Generate design specification for a typical fmri paradigm""" realignment_parameters = [] if isdefined(self.inputs.realignment_parameters): for parfile in self.inputs.realignment_parameters: @@ -517,8 +516,7 @@ def _generate_design(self, infolist=None): ) def _run_interface(self, runtime): - """ - """ + """""" self._sessioninfo = None self._generate_design() return runtime @@ -739,7 +737,7 @@ class SpecifySparseModelOutputSpec(SpecifyModelOutputSpec): class SpecifySparseModel(SpecifyModel): - """ Specify a sparse model that is compatible with SPM/FSL designers [1]_. + """Specify a sparse model that is compatible with SPM/FSL designers [1]_. Examples -------- @@ -770,8 +768,7 @@ class SpecifySparseModel(SpecifyModel): output_spec = SpecifySparseModelOutputSpec def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): - """Generates a regressor for a sparse/clustered-sparse acquisition - """ + """Generates a regressor for a sparse/clustered-sparse acquisition""" bplot = False if isdefined(self.inputs.save_plot) and self.inputs.save_plot: bplot = True @@ -901,8 +898,7 @@ def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): return reg def _cond_to_regress(self, info, nscans): - """Converts condition information to full regressors - """ + """Converts condition information to full regressors""" reg = [] regnames = [] for i, cond in enumerate(info.conditions): diff --git a/nipype/algorithms/rapidart.py b/nipype/algorithms/rapidart.py index b3cdbc8a23..15e691856a 100644 --- a/nipype/algorithms/rapidart.py +++ b/nipype/algorithms/rapidart.py @@ -686,8 +686,7 @@ def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): save_json(statsfile, stats) def _run_interface(self, runtime): - """Execute this module. - """ + """Execute this module.""" funcfilelist = ensure_list(self.inputs.realigned_files) motparamlist = ensure_list(self.inputs.realignment_parameters) for i, imgf in enumerate(funcfilelist): @@ -814,8 +813,7 @@ def _get_spm_submatrix(self, spmmat, sessidx, rows=None): return outmatrix def _run_interface(self, runtime): - """Execute this module. - """ + """Execute this module.""" import scipy.io as sio motparamlist = self.inputs.realignment_parameters diff --git a/nipype/algorithms/tests/test_TSNR.py b/nipype/algorithms/tests/test_TSNR.py index b9de248155..7da36d3661 100644 --- a/nipype/algorithms/tests/test_TSNR.py +++ b/nipype/algorithms/tests/test_TSNR.py @@ -93,8 +93,8 @@ def test_tsnr_withpoly3(self): @mock.patch("warnings.warn") def test_warning(self, mock_warn): - """ test that usage of misc.TSNR trips a warning to use - confounds.TSNR instead """ + """test that usage of misc.TSNR trips a warning to use + confounds.TSNR instead""" # run misc.TSNR(in_file=self.in_filenames["in_file"]) diff --git a/nipype/algorithms/tests/test_modelgen.py b/nipype/algorithms/tests/test_modelgen.py index a2c85f6747..5931fd894e 100644 --- a/nipype/algorithms/tests/test_modelgen.py +++ b/nipype/algorithms/tests/test_modelgen.py @@ -105,7 +105,7 @@ def test_modelgen1(tmpdir): np.array([6.0, 6.0]), ) npt.assert_almost_equal( - np.array(res.outputs.session_info[0]["cond"][1]["duration"]), np.array([6.0,]) + np.array(res.outputs.session_info[0]["cond"][1]["duration"]), np.array([6.0]) ) npt.assert_almost_equal( np.array(res.outputs.session_info[1]["cond"][1]["duration"]), @@ -191,14 +191,14 @@ def test_modelgen_spm_concat(tmpdir): np.array([1.0, 1.0]), ) npt.assert_almost_equal( - np.array(res.outputs.session_info[0]["cond"][1]["duration"]), np.array([1.0,]) + np.array(res.outputs.session_info[0]["cond"][1]["duration"]), np.array([1.0]) ) npt.assert_almost_equal( np.array(res.outputs.session_info[1]["cond"][1]["duration"]), np.array([1.0, 1.0]), ) npt.assert_almost_equal( - np.array(res.outputs.session_info[2]["cond"][1]["duration"]), np.array([1.0,]) + np.array(res.outputs.session_info[2]["cond"][1]["duration"]), np.array([1.0]) ) # Test case for variable number of events in concatenated runs, sometimes unique. diff --git a/nipype/caching/memory.py b/nipype/caching/memory.py index 4f773f0c3c..34d5ac1927 100644 --- a/nipype/caching/memory.py +++ b/nipype/caching/memory.py @@ -19,28 +19,28 @@ class PipeFunc(object): - """ Callable interface to nipype.interface objects + """Callable interface to nipype.interface objects - Use this to wrap nipype.interface object and call them - specifying their input with keyword arguments:: + Use this to wrap nipype.interface object and call them + specifying their input with keyword arguments:: - fsl_merge = PipeFunc(fsl.Merge, base_dir='.') - out = fsl_merge(in_files=files, dimension='t') + fsl_merge = PipeFunc(fsl.Merge, base_dir='.') + out = fsl_merge(in_files=files, dimension='t') """ def __init__(self, interface, base_dir, callback=None): """ - Parameters - =========== - interface: a nipype interface class - The interface class to wrap - base_dir: a string - The directory in which the computation will be - stored - callback: a callable - An optional callable called each time after the function - is called. + Parameters + =========== + interface: a nipype interface class + The interface class to wrap + base_dir: a string + The directory in which the computation will be + stored + callback: a callable + An optional callable called each time after the function + is called. """ if not (isinstance(interface, type) and issubclass(interface, BaseInterface)): raise ValueError( @@ -113,14 +113,14 @@ def read_log(filename, run_dict=None): def rm_all_but(base_dir, dirs_to_keep, warn=False): - """ Remove all the sub-directories of base_dir, but those listed - - Parameters - ============ - base_dir: string - The base directory - dirs_to_keep: set - The names of the directories to keep + """Remove all the sub-directories of base_dir, but those listed + + Parameters + ============ + base_dir: string + The base directory + dirs_to_keep: set + The names of the directories to keep """ try: all_dirs = os.listdir(base_dir) @@ -148,23 +148,23 @@ def __call__(self, dir_name, job_name): class Memory(object): - """ Memory context to provide caching for interfaces - - Parameters - ========== - base_dir: string - The directory name of the location for the caching - - Methods - ======= - cache - Creates a cacheable function from an nipype Interface class - clear_previous_runs - Removes from the disk all the runs that where not used after - the creation time of the specific Memory instance - clear_previous_runs - Removes from the disk all the runs that where not used after - the given time + """Memory context to provide caching for interfaces + + Parameters + ========== + base_dir: string + The directory name of the location for the caching + + Methods + ======= + cache + Creates a cacheable function from an nipype Interface class + clear_previous_runs + Removes from the disk all the runs that where not used after + the creation time of the specific Memory instance + clear_previous_runs + Removes from the disk all the runs that where not used after + the given time """ def __init__(self, base_dir): @@ -177,49 +177,48 @@ def __init__(self, base_dir): open(os.path.join(base_dir, "log.current"), "a").close() def cache(self, interface): - """ Returns a callable that caches the output of an interface + """Returns a callable that caches the output of an interface - Parameters - ========== - interface: nipype interface - The nipype interface class to be wrapped and cached + Parameters + ========== + interface: nipype interface + The nipype interface class to be wrapped and cached - Returns - ======= - pipe_func: a PipeFunc callable object - An object that can be used as a function to apply the - interface to arguments. Inputs of the interface are given - as keyword arguments, bearing the same name as the name - in the inputs specs of the interface. + Returns + ======= + pipe_func: a PipeFunc callable object + An object that can be used as a function to apply the + interface to arguments. Inputs of the interface are given + as keyword arguments, bearing the same name as the name + in the inputs specs of the interface. - Examples - ======== + Examples + ======== - >>> from tempfile import mkdtemp - >>> mem = Memory(mkdtemp()) - >>> from nipype.interfaces import fsl + >>> from tempfile import mkdtemp + >>> mem = Memory(mkdtemp()) + >>> from nipype.interfaces import fsl - Here we create a callable that can be used to apply an - fsl.Merge interface to files + Here we create a callable that can be used to apply an + fsl.Merge interface to files - >>> fsl_merge = mem.cache(fsl.Merge) + >>> fsl_merge = mem.cache(fsl.Merge) - Now we apply it to a list of files. We need to specify the - list of input files and the dimension along which the files - should be merged. + Now we apply it to a list of files. We need to specify the + list of input files and the dimension along which the files + should be merged. - >>> results = fsl_merge(in_files=['a.nii', 'b.nii'], - ... dimension='t') # doctest: +SKIP + >>> results = fsl_merge(in_files=['a.nii', 'b.nii'], + ... dimension='t') # doctest: +SKIP - We can retrieve the resulting file from the outputs: - >>> results.outputs.merged_file # doctest: +SKIP - '...' + We can retrieve the resulting file from the outputs: + >>> results.outputs.merged_file # doctest: +SKIP + '...' """ return PipeFunc(interface, self.base_dir, _MemoryCallback(self)) def _log_name(self, dir_name, job_name): - """ Increment counters tracking which cached function get executed. - """ + """Increment counters tracking which cached function get executed.""" base_dir = self.base_dir # Every counter is a file opened in append mode and closed # immediately to avoid race conditions in parallel computing: @@ -243,32 +242,32 @@ def _log_name(self, dir_name, job_name): rotatefile.write("%s/%s\n" % (dir_name, job_name)) def clear_previous_runs(self, warn=True): - """ Remove all the cache that where not used in the latest run of - the memory object: i.e. since the corresponding Python object - was created. - - Parameters - ========== - warn: boolean, optional - If true, echoes warning messages for all directory - removed + """Remove all the cache that where not used in the latest run of + the memory object: i.e. since the corresponding Python object + was created. + + Parameters + ========== + warn: boolean, optional + If true, echoes warning messages for all directory + removed """ base_dir = self.base_dir latest_runs = read_log(os.path.join(base_dir, "log.current")) self._clear_all_but(latest_runs, warn=warn) def clear_runs_since(self, day=None, month=None, year=None, warn=True): - """ Remove all the cache that where not used since the given date - - Parameters - ========== - day, month, year: integers, optional - The integers specifying the latest day (in localtime) that - a node should have been accessed to be kept. If not - given, the current date is used. - warn: boolean, optional - If true, echoes warning messages for all directory - removed + """Remove all the cache that where not used since the given date + + Parameters + ========== + day, month, year: integers, optional + The integers specifying the latest day (in localtime) that + a node should have been accessed to be kept. If not + given, the current date is used. + warn: boolean, optional + If true, echoes warning messages for all directory + removed """ t = time.localtime() day = day if day is not None else t.tm_mday @@ -288,8 +287,8 @@ def clear_runs_since(self, day=None, month=None, year=None, warn=True): os.remove(log_name) def _clear_all_but(self, runs, warn=True): - """ Remove all the runs appart from those given to the function - input. + """Remove all the runs appart from those given to the function + input. """ rm_all_but(self.base_dir, set(runs.keys()), warn=warn) for dir_name, job_names in list(runs.items()): diff --git a/nipype/external/cloghandler.py b/nipype/external/cloghandler.py index c5ee9d7a6f..c25670e600 100644 --- a/nipype/external/cloghandler.py +++ b/nipype/external/cloghandler.py @@ -44,9 +44,7 @@ __version__ = "$Id: cloghandler.py 6175 2009-11-02 18:40:35Z lowell $" __author__ = "Lowell Alleman" -__all__ = [ - "ConcurrentRotatingFileHandler", -] +__all__ = ["ConcurrentRotatingFileHandler"] import os import sys @@ -175,8 +173,8 @@ def _openFile(self, mode): self.stream = open(self.baseFilename, mode) def acquire(self): - """ Acquire thread and file locks. Also re-opening log file when running - in 'degraded' mode. """ + """Acquire thread and file locks. Also re-opening log file when running + in 'degraded' mode.""" # handle thread lock Handler.acquire(self) self.stream_lock.acquire() @@ -184,8 +182,8 @@ def acquire(self): self._openFile(self.mode) def release(self): - """ Release file and thread locks. Flush stream and take care of closing - stream in 'degraded' mode. """ + """Release file and thread locks. Flush stream and take care of closing + stream in 'degraded' mode.""" try: if not self.stream.closed: self.stream.flush() @@ -211,7 +209,7 @@ def close(self): Handler.close(self) def flush(self): - """ flush(): Do nothing. + """flush(): Do nothing. Since a flush is issued in release(), we don't do it here. To do a flush here, it would be necessary to re-lock everything, and it is just easier @@ -220,7 +218,7 @@ def flush(self): Doing a flush() here would also introduces a window of opportunity for another process to write to the log file in between calling - stream.write() and stream.flush(), which seems like a bad thing. """ + stream.write() and stream.flush(), which seems like a bad thing.""" pass def _degrade(self, degrade, msg, *args): @@ -229,7 +227,7 @@ def _degrade(self, degrade, msg, *args): del msg, args # avoid pychecker warnings def _degrade_debug(self, degrade, msg, *args): - """ A more colorful version of _degade(). (This is enabled by passing + """A more colorful version of _degade(). (This is enabled by passing "debug=True" at initialization). """ if degrade: diff --git a/nipype/info.py b/nipype/info.py index ef0817ba36..f5a43334c9 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -137,7 +137,7 @@ def get_nipype_gitversion(): "click>=%s" % CLICK_MIN_VERSION, "networkx>=%s" % NETWORKX_MIN_VERSION, "nibabel>=%s" % NIBABEL_MIN_VERSION, - 'numpy>=%s' % NUMPY_MIN_VERSION, + "numpy>=%s" % NUMPY_MIN_VERSION, "packaging", "prov>=%s" % PROV_VERSION, "pydot>=%s" % PYDOT_MIN_VERSION, diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index ada49fc9ab..fad5cbdf2f 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -37,8 +37,7 @@ class CentralityInputSpec(AFNICommandInputSpec): - """Common input spec class for all centrality-related commands - """ + """Common input spec class for all centrality-related commands""" mask = File(desc="mask file to mask input data", argstr="-mask %s", exists=True) thresh = traits.Float( @@ -1163,8 +1162,7 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): class DegreeCentralityInputSpec(CentralityInputSpec): - """DegreeCentrality inputspec - """ + """DegreeCentrality inputspec""" in_file = File( desc="input file to 3dDegreeCentrality", @@ -1183,8 +1181,7 @@ class DegreeCentralityInputSpec(CentralityInputSpec): class DegreeCentralityOutputSpec(AFNICommandOutputSpec): - """DegreeCentrality outputspec - """ + """DegreeCentrality outputspec""" oned_file = File( desc="The text output of the similarity matrix computed after " @@ -1310,8 +1307,7 @@ class Detrend(AFNICommand): class ECMInputSpec(CentralityInputSpec): - """ECM inputspec - """ + """ECM inputspec""" in_file = File( desc="input file to 3dECM", @@ -1583,8 +1579,7 @@ def _list_outputs(self): class LFCDInputSpec(CentralityInputSpec): - """LFCD inputspec - """ + """LFCD inputspec""" in_file = File( desc="input file to 3dLFCD", @@ -2561,120 +2556,135 @@ class NetCorrInputSpec(AFNICommandInputSpec): desc="input time series file (4D data set)", exists=True, argstr="-inset %s", - mandatory=True) + mandatory=True, + ) in_rois = File( desc="input set of ROIs, each labelled with distinct integers", exists=True, argstr="-in_rois %s", - mandatory=True) + mandatory=True, + ) mask = File( desc="can include a whole brain mask within which to " - "calculate correlation. Otherwise, data should be " - "masked already", + "calculate correlation. Otherwise, data should be " + "masked already", exists=True, - argstr="-mask %s") + argstr="-mask %s", + ) weight_ts = File( desc="input a 1D file WTS of weights that will be applied " - "multiplicatively to each ROI's average time series. " - "WTS can be a column- or row-file of values, but it " - "must have the same length as the input time series " - "volume. " - "If the initial average time series was A[n] for " - "n=0,..,(N-1) time points, then applying a set of " - "weights W[n] of the same length from WTS would " - "produce a new time series: B[n] = A[n] * W[n]", + "multiplicatively to each ROI's average time series. " + "WTS can be a column- or row-file of values, but it " + "must have the same length as the input time series " + "volume. " + "If the initial average time series was A[n] for " + "n=0,..,(N-1) time points, then applying a set of " + "weights W[n] of the same length from WTS would " + "produce a new time series: B[n] = A[n] * W[n]", exists=True, - argstr="-weight_ts %s") + argstr="-weight_ts %s", + ) fish_z = traits.Bool( desc="switch to also output a matrix of Fisher Z-transform " - "values for the corr coefs (r): " - "Z = atanh(r) , " - "(with Z=4 being output along matrix diagonals where " - "r=1, as the r-to-Z conversion is ceilinged at " - "Z = atanh(r=0.999329) = 4, which is still *quite* a " - "high Pearson-r value", - argstr="-fish_z") + "values for the corr coefs (r): " + "Z = atanh(r) , " + "(with Z=4 being output along matrix diagonals where " + "r=1, as the r-to-Z conversion is ceilinged at " + "Z = atanh(r=0.999329) = 4, which is still *quite* a " + "high Pearson-r value", + argstr="-fish_z", + ) part_corr = traits.Bool( - desc="output the partial correlation matrix", - argstr="-part_corr") + desc="output the partial correlation matrix", argstr="-part_corr" + ) ts_out = traits.Bool( desc="switch to output the mean time series of the ROIs that " - "have been used to generate the correlation matrices. " - "Output filenames mirror those of the correlation " - "matrix files, with a '.netts' postfix", - argstr="-ts_out") + "have been used to generate the correlation matrices. " + "Output filenames mirror those of the correlation " + "matrix files, with a '.netts' postfix", + argstr="-ts_out", + ) ts_label = traits.Bool( desc="additional switch when using '-ts_out'. Using this " - "option will insert the integer ROI label at the start " - "of each line of the *.netts file created. Thus, for " - "a time series of length N, each line will have N+1 " - "numbers, where the first is the integer ROI label " - "and the subsequent N are scientific notation values", - argstr="-ts_label") + "option will insert the integer ROI label at the start " + "of each line of the *.netts file created. Thus, for " + "a time series of length N, each line will have N+1 " + "numbers, where the first is the integer ROI label " + "and the subsequent N are scientific notation values", + argstr="-ts_label", + ) ts_indiv = traits.Bool( desc="switch to create a directory for each network that " - "contains the average time series for each ROI in " - "individual files (each file has one line). " - "The directories are labelled PREFIX_000_INDIV/, " - "PREFIX_001_INDIV/, etc. (one per network). Within each " - "directory, the files are labelled ROI_001.netts, " - "ROI_002.netts, etc., with the numbers given by the " - "actual ROI integer labels", - argstr="-ts_indiv") + "contains the average time series for each ROI in " + "individual files (each file has one line). " + "The directories are labelled PREFIX_000_INDIV/, " + "PREFIX_001_INDIV/, etc. (one per network). Within each " + "directory, the files are labelled ROI_001.netts, " + "ROI_002.netts, etc., with the numbers given by the " + "actual ROI integer labels", + argstr="-ts_indiv", + ) ts_wb_corr = traits.Bool( desc="switch to create a set of whole brain correlation maps. " - "Performs whole brain correlation for each " - "ROI's average time series; this will automatically " - "create a directory for each network that contains the " - "set of whole brain correlation maps (Pearson 'r's). " - "The directories are labelled as above for '-ts_indiv' " - "Within each directory, the files are labelled " - "WB_CORR_ROI_001+orig, WB_CORR_ROI_002+orig, etc., with " - "the numbers given by the actual ROI integer labels", - argstr="-ts_wb_corr") + "Performs whole brain correlation for each " + "ROI's average time series; this will automatically " + "create a directory for each network that contains the " + "set of whole brain correlation maps (Pearson 'r's). " + "The directories are labelled as above for '-ts_indiv' " + "Within each directory, the files are labelled " + "WB_CORR_ROI_001+orig, WB_CORR_ROI_002+orig, etc., with " + "the numbers given by the actual ROI integer labels", + argstr="-ts_wb_corr", + ) ts_wb_Z = traits.Bool( desc="same as above in '-ts_wb_corr', except that the maps " - "have been Fisher transformed to Z-scores the relation: " - "Z=atanh(r). " - "To avoid infinities in the transform, Pearson values " - "are effectively capped at |r| = 0.999329 (where |Z| = 4.0). " - "Files are labelled WB_Z_ROI_001+orig, etc", - argstr="-ts_wb_Z") + "have been Fisher transformed to Z-scores the relation: " + "Z=atanh(r). " + "To avoid infinities in the transform, Pearson values " + "are effectively capped at |r| = 0.999329 (where |Z| = 4.0). " + "Files are labelled WB_Z_ROI_001+orig, etc", + argstr="-ts_wb_Z", + ) ts_wb_strlabel = traits.Bool( desc="by default, '-ts_wb_{corr,Z}' output files are named " - "using the int number of a given ROI, such as: " - "WB_Z_ROI_001+orig. " - "With this option, one can replace the int (such as '001') " - "with the string label (such as 'L-thalamus') " - "*if* one has a labeltable attached to the file", - argstr="-ts_wb_strlabel") + "using the int number of a given ROI, such as: " + "WB_Z_ROI_001+orig. " + "With this option, one can replace the int (such as '001') " + "with the string label (such as 'L-thalamus') " + "*if* one has a labeltable attached to the file", + argstr="-ts_wb_strlabel", + ) nifti = traits.Bool( desc="output any correlation map files as NIFTI files " - "(default is BRIK/HEAD). Only useful if using " - "'-ts_wb_corr' and/or '-ts_wb_Z'", - argstr="-nifti") + "(default is BRIK/HEAD). Only useful if using " + "'-ts_wb_corr' and/or '-ts_wb_Z'", + argstr="-nifti", + ) output_mask_nonnull = traits.Bool( desc="internally, this program checks for where there are " - "nonnull time series, because we don't like those, in " - "general. With this flag, the user can output the " - "determined mask of non-null time series.", - argstr="-output_mask_nonnull") + "nonnull time series, because we don't like those, in " + "general. With this flag, the user can output the " + "determined mask of non-null time series.", + argstr="-output_mask_nonnull", + ) push_thru_many_zeros = traits.Bool( desc="by default, this program will grind to a halt and " - "refuse to calculate if any ROI contains >10 percent " - "of voxels with null times series (i.e., each point is " - "0), as of April, 2017. This is because it seems most " - "likely that hidden badness is responsible. However, " - "if the user still wants to carry on the calculation " - "anyways, then this option will allow one to push on " - "through. However, if any ROI *only* has null time " - "series, then the program will not calculate and the " - "user will really, really, really need to address their masking", - argstr="-push_thru_many_zeros") + "refuse to calculate if any ROI contains >10 percent " + "of voxels with null times series (i.e., each point is " + "0), as of April, 2017. This is because it seems most " + "likely that hidden badness is responsible. However, " + "if the user still wants to carry on the calculation " + "anyways, then this option will allow one to push on " + "through. However, if any ROI *only* has null time " + "series, then the program will not calculate and the " + "user will really, really, really need to address their masking", + argstr="-push_thru_many_zeros", + ) ignore_LT = traits.Bool( desc="switch to ignore any label table labels in the " - "'-in_rois' file, if there are any labels attached", - argstr="-ignore_LT") + "'-in_rois' file, if there are any labels attached", + argstr="-ignore_LT", + ) out_file = File( desc="output file name part", name_template="%s_netcorr", @@ -2683,10 +2693,16 @@ class NetCorrInputSpec(AFNICommandInputSpec): name_source="in_file", ) + class NetCorrOutputSpec(TraitedSpec): - out_corr_matrix = File(desc="output correlation matrix between ROIs written to a text file with .netcc suffix") - out_corr_maps = traits.List(File(), desc="output correlation maps in Pearson and/or Z-scores") - + out_corr_matrix = File( + desc="output correlation matrix between ROIs written to a text file with .netcc suffix" + ) + out_corr_maps = traits.List( + File(), desc="output correlation maps in Pearson and/or Z-scores" + ) + + class NetCorr(AFNICommand): """Calculate correlation matrix of a set of ROIs (using mean time series of each). Several networks may be analyzed simultaneously, one per brick. @@ -2717,7 +2733,7 @@ class NetCorr(AFNICommand): def _list_outputs(self): import glob - + outputs = self.output_spec().get() if not isdefined(self.inputs.out_file): diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py index 8ece6a8765..e4b6f37778 100644 --- a/nipype/interfaces/afni/utils.py +++ b/nipype/interfaces/afni/utils.py @@ -496,8 +496,7 @@ def _format_arg(self, name, trait_spec, value): return super(Calc, self)._format_arg(name, trait_spec, value) def _parse_inputs(self, skip=None): - """Skip the arguments without argstr metadata - """ + """Skip the arguments without argstr metadata""" return super(Calc, self)._parse_inputs(skip=("start_idx", "stop_idx", "other")) @@ -1130,8 +1129,7 @@ def _format_arg(self, name, trait_spec, value): return super(Eval, self)._format_arg(name, trait_spec, value) def _parse_inputs(self, skip=None): - """Skip the arguments without argstr metadata - """ + """Skip the arguments without argstr metadata""" return super(Eval, self)._parse_inputs(skip=("start_idx", "stop_idx", "other")) @@ -1354,7 +1352,7 @@ class FWHMx(AFNICommandBase): "}" ), "tags": ["method"], - }, + } ] _acf = True @@ -2325,8 +2323,7 @@ class OneDToolPy(AFNIPythonCommand): >>> odt.inputs.out_file = 'motion_dmean.1D' >>> odt.cmdline # doctest: +ELLIPSIS 'python2 ...1d_tool.py -demean -infile f1.1D -write motion_dmean.1D -set_nruns 3' - >>> res = odt.run() # doctest: +SKIP -""" + >>> res = odt.run() # doctest: +SKIP""" _cmd = "1d_tool.py" @@ -2591,11 +2588,7 @@ def _list_outputs(self): return outputs def _format_arg(self, name, spec, value): - _neigh_dict = { - "faces": 7, - "edges": 19, - "vertices": 27, - } + _neigh_dict = {"faces": 7, "edges": 19, "vertices": 27} if name == "neighborhood": value = _neigh_dict[value] return super(ReHo, self)._format_arg(name, spec, value) @@ -2892,7 +2885,7 @@ class To3D(AFNICommand): 'to3d -datum float -anat -prefix dicomdir.nii ./*.dcm' >>> res = to3d.run() # doctest: +SKIP - """ + """ _cmd = "to3d" input_spec = To3DInputSpec diff --git a/nipype/interfaces/ants/base.py b/nipype/interfaces/ants/base.py index 4b5e5ef8db..a3b0109c7b 100644 --- a/nipype/interfaces/ants/base.py +++ b/nipype/interfaces/ants/base.py @@ -53,8 +53,7 @@ def parse_version(raw_info): class ANTSCommandInputSpec(CommandLineInputSpec): - """Base Input Specification for all ANTS Commands - """ + """Base Input Specification for all ANTS Commands""" num_threads = traits.Int( LOCAL_DEFAULT_NUMBER_OF_THREADS, @@ -65,8 +64,7 @@ class ANTSCommandInputSpec(CommandLineInputSpec): class ANTSCommand(CommandLine): - """Base class for ANTS interfaces - """ + """Base class for ANTS interfaces""" input_spec = ANTSCommandInputSpec _num_threads = LOCAL_DEFAULT_NUMBER_OF_THREADS @@ -103,8 +101,8 @@ def _num_threads_update(self): @staticmethod def _format_xarray(val): - """ Convenience method for converting input arrays [1,2,3] to - commandline format '1x2x3' """ + """Convenience method for converting input arrays [1,2,3] to + commandline format '1x2x3'""" return "x".join([str(x) for x in val]) @classmethod diff --git a/nipype/interfaces/ants/registration.py b/nipype/interfaces/ants/registration.py index 47b8d53cfc..049d5c5882 100644 --- a/nipype/interfaces/ants/registration.py +++ b/nipype/interfaces/ants/registration.py @@ -1289,11 +1289,15 @@ def _format_arg(self, opt, spec, val): do_center_of_mass_init, ) elif opt == "interpolation": - if self.inputs.interpolation in [ - "BSpline", - "MultiLabel", - "Gaussian", - ] and isdefined(self.inputs.interpolation_parameters): + if ( + self.inputs.interpolation + in [ + "BSpline", + "MultiLabel", + "Gaussian", + ] + and isdefined(self.inputs.interpolation_parameters) + ): return "--interpolation %s[ %s ]" % ( self.inputs.interpolation, ", ".join( @@ -1386,10 +1390,11 @@ def _list_outputs(self): self.inputs.initial_moving_transform + outputs["reverse_transforms"] ) - outputs["reverse_invert_flags"] = ( - [not e for e in invert_initial_moving_transform] - + outputs["reverse_invert_flags"] - ) # Prepend + outputs["reverse_invert_flags"] = [ + not e for e in invert_initial_moving_transform + ] + outputs[ + "reverse_invert_flags" + ] # Prepend transform_count += len(self.inputs.initial_moving_transform) elif isdefined(self.inputs.initial_moving_transform_com): forward_filename, forward_inversemode = self._output_filenames( @@ -1502,7 +1507,7 @@ class MeasureImageSimilarityInputSpec(ANTSCommandInputSpec): desc="Dimensionality of the fixed/moving image pair", ) fixed_image = File( - exists=True, mandatory=True, desc="Image to which the moving image is warped", + exists=True, mandatory=True, desc="Image to which the moving image is warped" ) moving_image = File( exists=True, @@ -1510,14 +1515,7 @@ class MeasureImageSimilarityInputSpec(ANTSCommandInputSpec): desc="Image to apply transformation to (generally a coregistered functional)", ) metric = traits.Enum( - "CC", - "MI", - "Mattes", - "MeanSquares", - "Demons", - "GC", - argstr="%s", - mandatory=True, + "CC", "MI", "Mattes", "MeanSquares", "Demons", "GC", argstr="%s", mandatory=True ) metric_weight = traits.Float( requires=["metric"], @@ -1592,17 +1590,14 @@ class MeasureImageSimilarity(ANTSCommand): output_spec = MeasureImageSimilarityOutputSpec def _metric_constructor(self): - retval = ( - '--metric {metric}["{fixed_image}","{moving_image}",{metric_weight},' - "{radius_or_number_of_bins},{sampling_strategy},{sampling_percentage}]".format( - metric=self.inputs.metric, - fixed_image=self.inputs.fixed_image, - moving_image=self.inputs.moving_image, - metric_weight=self.inputs.metric_weight, - radius_or_number_of_bins=self.inputs.radius_or_number_of_bins, - sampling_strategy=self.inputs.sampling_strategy, - sampling_percentage=self.inputs.sampling_percentage, - ) + retval = '--metric {metric}["{fixed_image}","{moving_image}",{metric_weight},' "{radius_or_number_of_bins},{sampling_strategy},{sampling_percentage}]".format( + metric=self.inputs.metric, + fixed_image=self.inputs.fixed_image, + moving_image=self.inputs.moving_image, + metric_weight=self.inputs.metric_weight, + radius_or_number_of_bins=self.inputs.radius_or_number_of_bins, + sampling_strategy=self.inputs.sampling_strategy, + sampling_percentage=self.inputs.sampling_percentage, ) return retval @@ -1614,7 +1609,7 @@ def _mask_constructor(self): ) else: retval = '--masks "{fixed_image_mask}"'.format( - fixed_image_mask=self.inputs.fixed_image_mask, + fixed_image_mask=self.inputs.fixed_image_mask ) return retval diff --git a/nipype/interfaces/ants/resampling.py b/nipype/interfaces/ants/resampling.py index 607202c109..3b602bad5b 100644 --- a/nipype/interfaces/ants/resampling.py +++ b/nipype/interfaces/ants/resampling.py @@ -504,11 +504,15 @@ def _format_arg(self, opt, spec, val): elif opt == "transforms": return self._get_transform_filenames() elif opt == "interpolation": - if self.inputs.interpolation in [ - "BSpline", - "MultiLabel", - "Gaussian", - ] and isdefined(self.inputs.interpolation_parameters): + if ( + self.inputs.interpolation + in [ + "BSpline", + "MultiLabel", + "Gaussian", + ] + and isdefined(self.inputs.interpolation_parameters) + ): return "--interpolation %s[ %s ]" % ( self.inputs.interpolation, ", ".join( diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 755a33317c..6459a9c6c8 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -202,8 +202,7 @@ def __init__( setattr(self.inputs, name, value) def _outputs(self): - """ Returns a bunch containing output fields for the class - """ + """Returns a bunch containing output fields for the class""" outputs = None if self.output_spec: outputs = self.output_spec() @@ -211,8 +210,7 @@ def _outputs(self): return outputs def _check_requires(self, spec, name, value): - """ check if required inputs are satisfied - """ + """check if required inputs are satisfied""" if spec.requires: values = [ not isdefined(getattr(self.inputs, field)) for field in spec.requires @@ -237,8 +235,7 @@ def _check_requires(self, spec, name, value): raise ValueError(msg) def _check_xor(self, spec, name, value): - """ check if mutually exclusive inputs are satisfied - """ + """check if mutually exclusive inputs are satisfied""" if spec.xor: values = [isdefined(getattr(self.inputs, field)) for field in spec.xor] if not any(values) and not isdefined(value): @@ -254,8 +251,7 @@ def _check_xor(self, spec, name, value): raise ValueError(msg) def _check_mandatory_inputs(self): - """ Raises an exception if a mandatory input is Undefined - """ + """Raises an exception if a mandatory input is Undefined""" for name, spec in list(self.inputs.traits(mandatory=True).items()): value = getattr(self.inputs, name) self._check_xor(spec, name, value) @@ -274,7 +270,7 @@ def _check_mandatory_inputs(self): self._check_requires(spec, name, getattr(self.inputs, name)) def _check_version_requirements(self, trait_object, permissive=False): - """ Raises an exception on version mismatch + """Raises an exception on version mismatch Set the ``permissive`` attribute to True to suppress warnings and exceptions. This is currently only used in __init__ to silently identify unavailable @@ -342,13 +338,11 @@ def _check_version_requirements(self, trait_object, permissive=False): return unavailable_traits def _run_interface(self, runtime): - """ Core function that executes interface - """ + """Core function that executes interface""" raise NotImplementedError def _duecredit_cite(self): - """ Add the interface references to the duecredit citations - """ + """Add the interface references to the duecredit citations""" for r in self.references_: r["path"] = self.__module__ due.cite(**r) @@ -500,8 +494,7 @@ def run(self, cwd=None, ignore_exception=None, **inputs): return results def _list_outputs(self): - """ List the expected outputs - """ + """List the expected outputs""" if self.output_spec: raise NotImplementedError else: @@ -604,7 +597,7 @@ def _post_run_hook(self, runtime): class SimpleInterface(BaseInterface): - """ An interface pattern that allows outputs to be set in a dictionary + """An interface pattern that allows outputs to be set in a dictionary called ``_results`` that is automatically interpreted by ``_list_outputs()`` to find the outputs. @@ -733,7 +726,7 @@ def cmd(self): @property def cmdline(self): - """ `command` plus any arguments (args) + """`command` plus any arguments (args) validates arguments and generates command line""" self._check_mandatory_inputs() allargs = [self._cmd_prefix + self.cmd] + self._parse_inputs() diff --git a/nipype/interfaces/base/specs.py b/nipype/interfaces/base/specs.py index b42a73f501..dda80d7697 100644 --- a/nipype/interfaces/base/specs.py +++ b/nipype/interfaces/base/specs.py @@ -74,8 +74,7 @@ def __init__(self, **kwargs): self.trait_set(**kwargs) def items(self): - """ Name, trait generator for user modifiable traits - """ + """Name, trait generator for user modifiable traits""" for name in sorted(self.copyable_trait_names()): yield name, self.traits()[name] @@ -100,8 +99,7 @@ def _generate_handlers(self): self.on_trait_change(self._deprecated_warn, elem) def _xor_warn(self, obj, name, old, new): - """ Generates warnings for xor traits - """ + """Generates warnings for xor traits""" if isdefined(new): trait_spec = self.traits()[name] # for each xor, set to default_value @@ -120,8 +118,7 @@ def _xor_warn(self, obj, name, old, new): raise IOError(msg) def _deprecated_warn(self, obj, name, old, new): - """Checks if a user assigns a value to a deprecated trait - """ + """Checks if a user assigns a value to a deprecated trait""" if isdefined(new): trait_spec = self.traits()[name] msg1 = "Input %s in interface %s is deprecated." % ( @@ -157,7 +154,7 @@ def _deprecated_warn(self, obj, name, old, new): ) def trait_get(self, **kwargs): - """ Returns traited class as a dict + """Returns traited class as a dict Augments the trait get function to return a dictionary without notification handles @@ -169,7 +166,7 @@ def trait_get(self, **kwargs): get = trait_get def get_traitsfree(self, **kwargs): - """ Returns traited class as a dict + """Returns traited class as a dict Augments the trait get function to return a dictionary without any traits. The dictionary does not contain any attributes that @@ -180,8 +177,7 @@ def get_traitsfree(self, **kwargs): return out def _clean_container(self, objekt, undefinedval=None, skipundefined=False): - """Convert a traited obejct into a pure python representation. - """ + """Convert a traited obejct into a pure python representation.""" if isinstance(objekt, TraitDictObject) or isinstance(objekt, dict): out = {} for key, val in list(objekt.items()): @@ -361,7 +357,7 @@ def __getstate__(self): class TraitedSpec(BaseTraitedSpec): - """ Create a subclass with strict traits. + """Create a subclass with strict traits. This is used in 90% of the cases. """ @@ -374,7 +370,7 @@ class BaseInterfaceInputSpec(TraitedSpec): class DynamicTraitedSpec(BaseTraitedSpec): - """ A subclass to handle dynamic traits + """A subclass to handle dynamic traits This class is a workaround for add_traits and clone_traits not functioning well together. diff --git a/nipype/interfaces/base/support.py b/nipype/interfaces/base/support.py index 80835604a0..88359354fd 100644 --- a/nipype/interfaces/base/support.py +++ b/nipype/interfaces/base/support.py @@ -75,13 +75,11 @@ def iteritems(self): return list(self.items()) def get(self, *args): - """Support dictionary get() functionality - """ + """Support dictionary get() functionality""" return self.__dict__.get(*args) def set(self, **kwargs): - """Support dictionary get() functionality - """ + """Support dictionary get() functionality""" return self.__dict__.update(**kwargs) def dictcopy(self): diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index e3b54eb7cb..b513a17eee 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -402,8 +402,7 @@ def has_metadata(trait, metadata, value=None, recursive=True): class MultiObject(traits.List): - """ Abstract class - shared functionality of input and output MultiObject - """ + """Abstract class - shared functionality of input and output MultiObject""" def validate(self, objekt, name, value): @@ -432,7 +431,7 @@ def validate(self, objekt, name, value): class OutputMultiObject(MultiObject): - """ Implements a user friendly traits that accepts one or more + """Implements a user friendly traits that accepts one or more paths to files or directories. This is the output version which return a single string whenever possible (when it was set to a single value or a list of length 1). Default value of this trait @@ -478,7 +477,7 @@ def set(self, objekt, name, value): class InputMultiObject(MultiObject): - """ Implements a user friendly traits that accepts one or more + """Implements a user friendly traits that accepts one or more paths to files or directories. This is the input version which always returns a list. Default value of this trait is _Undefined. It does not accept empty lists. diff --git a/nipype/interfaces/brainsuite/brainsuite.py b/nipype/interfaces/brainsuite/brainsuite.py index 84177a16ad..0b40745215 100644 --- a/nipype/interfaces/brainsuite/brainsuite.py +++ b/nipype/interfaces/brainsuite/brainsuite.py @@ -260,15 +260,12 @@ def _format_arg(self, name, spec, value): % {"low": "--low", "medium": "--medium", "high": "--high"}[value] ) if name == "intermediate_file_type": - return ( - spec.argstr - % { - "analyze": "--analyze", - "nifti": "--nifti", - "gzippedAnalyze": "--analyzegz", - "gzippedNifti": "--niftigz", - }[value] - ) + return spec.argstr % { + "analyze": "--analyze", + "nifti": "--nifti", + "gzippedAnalyze": "--analyzegz", + "gzippedNifti": "--niftigz", + }[value] return super(Bfc, self)._format_arg(name, spec, value) @@ -775,14 +772,11 @@ def _format_arg(self, name, spec, value): return "" # blank argstr if name == "specialTessellation": threshold = self.inputs.tessellationThreshold - return ( - spec.argstr - % { - "greater_than": "".join(("-gt %f" % threshold)), - "less_than": "".join(("-lt %f" % threshold)), - "equal_to": "".join(("-eq %f" % threshold)), - }[value] - ) + return spec.argstr % { + "greater_than": "".join(("-gt %f" % threshold)), + "less_than": "".join(("-lt %f" % threshold)), + "equal_to": "".join(("-eq %f" % threshold)), + }[value] return super(Dfs, self)._format_arg(name, spec, value) def _gen_filename(self, name): diff --git a/nipype/interfaces/camino2trackvis/convert.py b/nipype/interfaces/camino2trackvis/convert.py index 63d7a385da..a4db0b59ef 100644 --- a/nipype/interfaces/camino2trackvis/convert.py +++ b/nipype/interfaces/camino2trackvis/convert.py @@ -81,7 +81,7 @@ class Camino2TrackvisOutputSpec(TraitedSpec): class Camino2Trackvis(CommandLine): - """ Wraps camino_to_trackvis from Camino-Trackvis + """Wraps camino_to_trackvis from Camino-Trackvis Convert files from camino .Bfloat format to trackvis .trk format. @@ -120,7 +120,7 @@ def _gen_outfilename(self): class Trackvis2CaminoInputSpec(CommandLineInputSpec): - """ Wraps trackvis_to_camino from Camino-Trackvis + """Wraps trackvis_to_camino from Camino-Trackvis Convert files from camino .Bfloat format to trackvis .trk format. diff --git a/nipype/interfaces/cmtk/cmtk.py b/nipype/interfaces/cmtk/cmtk.py index 6587dcc291..1ba9d29a4c 100644 --- a/nipype/interfaces/cmtk/cmtk.py +++ b/nipype/interfaces/cmtk/cmtk.py @@ -99,8 +99,7 @@ def get_connectivity_matrix(n_rois, list_of_roi_crossed_lists): def create_allpoints_cmat(streamlines, roiData, voxelSize, n_rois): - """ Create the intersection arrays for each fiber - """ + """Create the intersection arrays for each fiber""" n_fib = len(streamlines) pc = -1 # Computation for each fiber @@ -133,7 +132,7 @@ def create_allpoints_cmat(streamlines, roiData, voxelSize, n_rois): def create_endpoints_array(fib, voxelSize): - """ Create the endpoints arrays for each fiber. + """Create the endpoints arrays for each fiber. Parameters ---------- diff --git a/nipype/interfaces/cmtk/convert.py b/nipype/interfaces/cmtk/convert.py index a0f956b6f9..321a40fbba 100644 --- a/nipype/interfaces/cmtk/convert.py +++ b/nipype/interfaces/cmtk/convert.py @@ -234,7 +234,7 @@ class MergeCNetworksOutputSpec(TraitedSpec): class MergeCNetworks(CFFBaseInterface): - """ Merges networks from multiple CFF files into one new CFF file. + """Merges networks from multiple CFF files into one new CFF file. Example ------- diff --git a/nipype/interfaces/cmtk/parcellation.py b/nipype/interfaces/cmtk/parcellation.py index 77e22bd6f7..2b4b4f84ed 100644 --- a/nipype/interfaces/cmtk/parcellation.py +++ b/nipype/interfaces/cmtk/parcellation.py @@ -303,16 +303,13 @@ def create_annot_label(subject_id, subjects_dir, fs_dir, parcellation_name): op.join(output_dir, "regenerated_lh_60", "lh.corpuscallosum.label"), lhco ) - mri_cmd = ( - """mri_label2vol --label "%s" --label "%s" --label "%s" --label "%s" --temp "%s" --o "%s" --identity """ - % ( - rhun, - lhun, - rhco, - lhco, - op.join(op.join(subjects_dir, subject_id), "mri", "orig.mgz"), - op.join(fs_label_dir, "cc_unknown.nii.gz"), - ) + mri_cmd = """mri_label2vol --label "%s" --label "%s" --label "%s" --label "%s" --temp "%s" --o "%s" --identity """ % ( + rhun, + lhun, + rhco, + lhco, + op.join(op.join(subjects_dir, subject_id), "mri", "orig.mgz"), + op.join(fs_label_dir, "cc_unknown.nii.gz"), ) runCmd(mri_cmd, log) runCmd("mris_volmask %s" % subject_id, log) @@ -331,8 +328,8 @@ def create_annot_label(subject_id, subjects_dir, fs_dir, parcellation_name): def create_roi(subject_id, subjects_dir, fs_dir, parcellation_name, dilation): - """ Creates the ROI_%s.nii.gz files using the given parcellation information - from networks. Iteratively create volume. """ + """Creates the ROI_%s.nii.gz files using the given parcellation information + from networks. Iteratively create volume.""" import cmp from cmp.util import runCmd diff --git a/nipype/interfaces/dcm2nii.py b/nipype/interfaces/dcm2nii.py index c88f11ba6a..2ab83e2f39 100644 --- a/nipype/interfaces/dcm2nii.py +++ b/nipype/interfaces/dcm2nii.py @@ -118,8 +118,7 @@ class Dcm2nii(CommandLine): >>> converter.inputs.gzip_output = True >>> converter.inputs.output_dir = '.' >>> converter.cmdline # doctest: +ELLIPSIS - 'dcm2nii -a y -c y -b config.ini -v y -d y -e y -g y -i n -n y -o . -p y -x n -f n functional_1.dcm' -""" + 'dcm2nii -a y -c y -b config.ini -v y -d y -e y -g y -i n -n y -o . -p y -x n -f n functional_1.dcm'""" input_spec = Dcm2niiInputSpec output_spec = Dcm2niiOutputSpec @@ -442,7 +441,7 @@ def _format_arg(self, opt, spec, val): def _run_interface(self, runtime): # may use return code 1 despite conversion runtime = super(Dcm2niix, self)._run_interface( - runtime, correct_return_codes=(0, 1,) + runtime, correct_return_codes=(0, 1) ) self._parse_files(self._parse_stdout(runtime.stdout)) return runtime diff --git a/nipype/interfaces/dcmstack.py b/nipype/interfaces/dcmstack.py index bf77158896..bc18659c93 100644 --- a/nipype/interfaces/dcmstack.py +++ b/nipype/interfaces/dcmstack.py @@ -177,8 +177,7 @@ class GroupAndStackOutputSpec(TraitedSpec): class GroupAndStack(DcmStack): - """Create (potentially) multiple Nifti files for a set of DICOM files. - """ + """Create (potentially) multiple Nifti files for a set of DICOM files.""" input_spec = DcmStackInputSpec output_spec = GroupAndStackOutputSpec diff --git a/nipype/interfaces/diffusion_toolkit/base.py b/nipype/interfaces/diffusion_toolkit/base.py index 02c7deceb1..b4b5ba1893 100644 --- a/nipype/interfaces/diffusion_toolkit/base.py +++ b/nipype/interfaces/diffusion_toolkit/base.py @@ -20,7 +20,7 @@ class Info(object): - """ Handle dtk output type and version information. + """Handle dtk output type and version information. Examples -------- diff --git a/nipype/interfaces/diffusion_toolkit/dti.py b/nipype/interfaces/diffusion_toolkit/dti.py index f5715fb443..9fc409f8f4 100644 --- a/nipype/interfaces/diffusion_toolkit/dti.py +++ b/nipype/interfaces/diffusion_toolkit/dti.py @@ -88,8 +88,7 @@ class DTIReconOutputSpec(TraitedSpec): class DTIRecon(CommandLine): - """Use dti_recon to generate tensors and other maps - """ + """Use dti_recon to generate tensors and other maps""" input_spec = DTIReconInputSpec output_spec = DTIReconOutputSpec diff --git a/nipype/interfaces/diffusion_toolkit/odf.py b/nipype/interfaces/diffusion_toolkit/odf.py index b3d6574099..e1819912b6 100644 --- a/nipype/interfaces/diffusion_toolkit/odf.py +++ b/nipype/interfaces/diffusion_toolkit/odf.py @@ -90,8 +90,7 @@ class HARDIMatOutputSpec(TraitedSpec): class HARDIMat(CommandLine): - """Use hardi_mat to calculate a reconstruction matrix from a gradient table - """ + """Use hardi_mat to calculate a reconstruction matrix from a gradient table""" input_spec = HARDIMatInputSpec output_spec = HARDIMatOutputSpec @@ -210,8 +209,7 @@ class ODFReconOutputSpec(TraitedSpec): class ODFRecon(CommandLine): - """Use odf_recon to generate tensors and other maps - """ + """Use odf_recon to generate tensors and other maps""" input_spec = ODFReconInputSpec output_spec = ODFReconOutputSpec @@ -368,8 +366,7 @@ class ODFTrackerOutputSpec(TraitedSpec): class ODFTracker(CommandLine): - """Use odf_tracker to generate track file - """ + """Use odf_tracker to generate track file""" input_spec = ODFTrackerInputSpec output_spec = ODFTrackerOutputSpec diff --git a/nipype/interfaces/freesurfer/base.py b/nipype/interfaces/freesurfer/base.py index 1108cbf6bc..9527e97192 100644 --- a/nipype/interfaces/freesurfer/base.py +++ b/nipype/interfaces/freesurfer/base.py @@ -33,7 +33,7 @@ class Info(PackageInfo): - """ Freesurfer subject directory and version information. + """Freesurfer subject directory and version information. Examples -------- @@ -53,7 +53,7 @@ def parse_version(raw_info): @classmethod def looseversion(cls): - """ Return a comparable version object + """Return a comparable version object If no version found, use LooseVersion('0.0.0') """ @@ -118,7 +118,7 @@ class FSTraitedSpec(CommandLineInputSpec): class FSCommand(CommandLine): """General support for FreeSurfer commands. - Every FS command accepts 'subjects_dir' input. + Every FS command accepts 'subjects_dir' input. """ input_spec = FSTraitedSpec @@ -208,8 +208,7 @@ def _associated_file(in_file, out_name): class FSScriptCommand(FSCommand): - """ Support for Freesurfer script commands with log terminal_output - """ + """Support for Freesurfer script commands with log terminal_output""" _terminal_output = "file" _always_run = False diff --git a/nipype/interfaces/freesurfer/longitudinal.py b/nipype/interfaces/freesurfer/longitudinal.py index aa5f928550..899a67bb50 100644 --- a/nipype/interfaces/freesurfer/longitudinal.py +++ b/nipype/interfaces/freesurfer/longitudinal.py @@ -108,7 +108,7 @@ class RobustTemplateOutputSpec(TraitedSpec): class RobustTemplate(FSCommandOpenMP): - """ construct an unbiased robust template for longitudinal volumes + """construct an unbiased robust template for longitudinal volumes Examples -------- @@ -221,7 +221,7 @@ class FuseSegmentationsOutputSpec(TraitedSpec): class FuseSegmentations(FSCommand): - """ fuse segmentations together from multiple timepoints + """fuse segmentations together from multiple timepoints Examples -------- diff --git a/nipype/interfaces/freesurfer/model.py b/nipype/interfaces/freesurfer/model.py index 9e7d35d096..81758b6ac3 100644 --- a/nipype/interfaces/freesurfer/model.py +++ b/nipype/interfaces/freesurfer/model.py @@ -617,7 +617,7 @@ class Binarize(FSCommand): >>> binvol.cmdline 'mri_binarize --o foo_out.nii --i structural.nii --min 10.000000' - """ + """ _cmd = "mri_binarize" input_spec = BinarizeInputSpec @@ -1249,7 +1249,7 @@ class Label2Vol(FSCommand): >>> binvol.cmdline 'mri_label2vol --fillthresh 0.5 --label cortex.label --reg register.dat --temp structural.nii --o foo_out.nii' - """ + """ _cmd = "mri_label2vol" input_spec = Label2VolInputSpec diff --git a/nipype/interfaces/freesurfer/preprocess.py b/nipype/interfaces/freesurfer/preprocess.py index 91eba956b5..cf4e97c9c9 100644 --- a/nipype/interfaces/freesurfer/preprocess.py +++ b/nipype/interfaces/freesurfer/preprocess.py @@ -74,7 +74,7 @@ class ParseDICOMDir(FSCommand): >>> dcminfo.cmdline 'mri_parse_sdcmdir --d . --o dicominfo.txt --sortbyrun --summarize' - """ + """ _cmd = "mri_parse_sdcmdir" input_spec = ParseDICOMDirInputSpec @@ -721,7 +721,7 @@ def _get_filelist(self, outdir): @property def cmdline(self): - """ `command` plus any arguments (args) + """`command` plus any arguments (args) validates arguments and generates command line""" self._check_mandatory_inputs() outdir = self._get_outdir() @@ -1764,12 +1764,16 @@ def _list_outputs(self): return outputs def _format_arg(self, name, spec, value): - if name in ( - "registered_file", - "out_fsl_file", - "out_lta_file", - "init_cost_file", - ) and isinstance(value, bool): + if ( + name + in ( + "registered_file", + "out_fsl_file", + "out_lta_file", + "init_cost_file", + ) + and isinstance(value, bool) + ): value = self._list_outputs()[name] return super(BBRegister, self)._format_arg(name, spec, value) @@ -2475,7 +2479,7 @@ class MNIBiasCorrectionOutputSpec(TraitedSpec): class MNIBiasCorrection(FSCommand): - """ Wrapper for nu_correct, a program from the Montreal Neurological Insitute (MNI) + """Wrapper for nu_correct, a program from the Montreal Neurological Insitute (MNI) used for correcting intensity non-uniformity (ie, bias fields). You must have the MNI software installed on your system to run this. See [www.bic.mni.mcgill.ca/software/N3] for more info. @@ -2533,7 +2537,7 @@ class WatershedSkullStripOutputSpec(TraitedSpec): class WatershedSkullStrip(FSCommand): - """ This program strips skull and other outer non-brain tissue and + """This program strips skull and other outer non-brain tissue and produces the brain volume from T1 volume or the scanned volume. The "watershed" segmentation algorithm was used to dertermine the @@ -3339,7 +3343,7 @@ class ConcatenateLTAOutputSpec(TraitedSpec): class ConcatenateLTA(FSCommand): - """ Concatenates two consecutive LTA transformations into one overall + """Concatenates two consecutive LTA transformations into one overall transformation Out = LTA2*LTA1 diff --git a/nipype/interfaces/freesurfer/registration.py b/nipype/interfaces/freesurfer/registration.py index d6fd82cc15..5d7780f85c 100644 --- a/nipype/interfaces/freesurfer/registration.py +++ b/nipype/interfaces/freesurfer/registration.py @@ -211,7 +211,7 @@ class EMRegisterOutputSpec(TraitedSpec): class EMRegister(FSCommandOpenMP): - """ This program creates a tranform in lta format + """This program creates a tranform in lta format Examples ======== @@ -285,7 +285,7 @@ class RegisterOutputSpec(TraitedSpec): class Register(FSCommand): - """ This program registers a surface to an average surface template. + """This program registers a surface to an average surface template. Examples ======== @@ -543,7 +543,7 @@ class MRICoregOutputSpec(TraitedSpec): class MRICoreg(FSCommand): - """ This program registers one volume to another + """This program registers one volume to another mri_coreg is a C reimplementation of spm_coreg in FreeSurfer diff --git a/nipype/interfaces/freesurfer/tests/test_BBRegister.py b/nipype/interfaces/freesurfer/tests/test_BBRegister.py index b9ed6a8bcd..1b3496c516 100644 --- a/nipype/interfaces/freesurfer/tests/test_BBRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_BBRegister.py @@ -3,47 +3,47 @@ def test_BBRegister_inputs(): input_map_5_3 = dict( - args=dict(argstr="%s",), - contrast_type=dict(argstr="--%s", mandatory=True,), - dof=dict(argstr="--%d",), - environ=dict(nohash=True, usedefault=True,), - epi_mask=dict(argstr="--epi-mask",), - fsldof=dict(argstr="--fsl-dof %d",), - init=dict(argstr="--init-%s", mandatory=True, xor=["init_reg_file"],), - init_cost_file=dict(argstr="--initcost %s",), - init_reg_file=dict(argstr="--init-reg %s", mandatory=True, xor=["init"],), - intermediate_file=dict(argstr="--int %s",), - out_fsl_file=dict(argstr="--fslmat %s",), - out_lta_file=dict(argstr="--lta %s", min_ver="5.2.0",), - out_reg_file=dict(argstr="--reg %s", genfile=True,), - reg_frame=dict(argstr="--frame %d", xor=["reg_middle_frame"],), - reg_middle_frame=dict(argstr="--mid-frame", xor=["reg_frame"],), - registered_file=dict(argstr="--o %s",), - source_file=dict(argstr="--mov %s", copyfile=False, mandatory=True,), - spm_nifti=dict(argstr="--spm-nii",), - subject_id=dict(argstr="--s %s", mandatory=True,), + args=dict(argstr="%s"), + contrast_type=dict(argstr="--%s", mandatory=True), + dof=dict(argstr="--%d"), + environ=dict(nohash=True, usedefault=True), + epi_mask=dict(argstr="--epi-mask"), + fsldof=dict(argstr="--fsl-dof %d"), + init=dict(argstr="--init-%s", mandatory=True, xor=["init_reg_file"]), + init_cost_file=dict(argstr="--initcost %s"), + init_reg_file=dict(argstr="--init-reg %s", mandatory=True, xor=["init"]), + intermediate_file=dict(argstr="--int %s"), + out_fsl_file=dict(argstr="--fslmat %s"), + out_lta_file=dict(argstr="--lta %s", min_ver="5.2.0"), + out_reg_file=dict(argstr="--reg %s", genfile=True), + reg_frame=dict(argstr="--frame %d", xor=["reg_middle_frame"]), + reg_middle_frame=dict(argstr="--mid-frame", xor=["reg_frame"]), + registered_file=dict(argstr="--o %s"), + source_file=dict(argstr="--mov %s", copyfile=False, mandatory=True), + spm_nifti=dict(argstr="--spm-nii"), + subject_id=dict(argstr="--s %s", mandatory=True), subjects_dir=dict(), ) input_map_6_0 = dict( - args=dict(argstr="%s",), - contrast_type=dict(argstr="--%s", mandatory=True,), - dof=dict(argstr="--%d",), - environ=dict(nohash=True, usedefault=True,), - epi_mask=dict(argstr="--epi-mask",), - fsldof=dict(argstr="--fsl-dof %d",), - init=dict(argstr="--init-%s", xor=["init_reg_file"],), - init_reg_file=dict(argstr="--init-reg %s", xor=["init"],), - init_cost_file=dict(argstr="--initcost %s",), - intermediate_file=dict(argstr="--int %s",), - out_fsl_file=dict(argstr="--fslmat %s",), - out_lta_file=dict(argstr="--lta %s", min_ver="5.2.0",), - out_reg_file=dict(argstr="--reg %s", genfile=True,), - reg_frame=dict(argstr="--frame %d", xor=["reg_middle_frame"],), - reg_middle_frame=dict(argstr="--mid-frame", xor=["reg_frame"],), - registered_file=dict(argstr="--o %s",), - source_file=dict(argstr="--mov %s", copyfile=False, mandatory=True,), - spm_nifti=dict(argstr="--spm-nii",), - subject_id=dict(argstr="--s %s", mandatory=True,), + args=dict(argstr="%s"), + contrast_type=dict(argstr="--%s", mandatory=True), + dof=dict(argstr="--%d"), + environ=dict(nohash=True, usedefault=True), + epi_mask=dict(argstr="--epi-mask"), + fsldof=dict(argstr="--fsl-dof %d"), + init=dict(argstr="--init-%s", xor=["init_reg_file"]), + init_reg_file=dict(argstr="--init-reg %s", xor=["init"]), + init_cost_file=dict(argstr="--initcost %s"), + intermediate_file=dict(argstr="--int %s"), + out_fsl_file=dict(argstr="--fslmat %s"), + out_lta_file=dict(argstr="--lta %s", min_ver="5.2.0"), + out_reg_file=dict(argstr="--reg %s", genfile=True), + reg_frame=dict(argstr="--frame %d", xor=["reg_middle_frame"]), + reg_middle_frame=dict(argstr="--mid-frame", xor=["reg_frame"]), + registered_file=dict(argstr="--o %s"), + source_file=dict(argstr="--mov %s", copyfile=False, mandatory=True), + spm_nifti=dict(argstr="--spm-nii"), + subject_id=dict(argstr="--s %s", mandatory=True), subjects_dir=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_preprocess.py b/nipype/interfaces/freesurfer/tests/test_preprocess.py index 518d60b9a5..9743f7bf95 100644 --- a/nipype/interfaces/freesurfer/tests/test_preprocess.py +++ b/nipype/interfaces/freesurfer/tests/test_preprocess.py @@ -192,7 +192,6 @@ def test_bbregister(create_files_in_directory): def test_FSVersion(): - """Check that FSVersion is a string that can be compared with LooseVersion - """ + """Check that FSVersion is a string that can be compared with LooseVersion""" assert isinstance(freesurfer.preprocess.FSVersion, str) assert LooseVersion(freesurfer.preprocess.FSVersion) >= LooseVersion("0") diff --git a/nipype/interfaces/fsl/base.py b/nipype/interfaces/fsl/base.py index 43ad7b9f9c..34127cadef 100644 --- a/nipype/interfaces/fsl/base.py +++ b/nipype/interfaces/fsl/base.py @@ -146,9 +146,7 @@ class FSLCommandInputSpec(CommandLineInputSpec): class FSLCommand(CommandLine): - """Base support for FSL commands. - - """ + """Base support for FSL commands.""" input_spec = FSLCommandInputSpec _output_type = None diff --git a/nipype/interfaces/fsl/dti.py b/nipype/interfaces/fsl/dti.py index 90f05e3bab..e1e6c2af8d 100644 --- a/nipype/interfaces/fsl/dti.py +++ b/nipype/interfaces/fsl/dti.py @@ -89,7 +89,7 @@ class DTIFitOutputSpec(TraitedSpec): class DTIFit(FSLCommand): - """ Use FSL dtifit command for fitting a diffusion tensor model at each + """Use FSL dtifit command for fitting a diffusion tensor model at each voxel Example @@ -1045,7 +1045,7 @@ class ProbTrackX2OutputSpec(ProbTrackXOutputSpec): class ProbTrackX2(ProbTrackX): - """ Use FSL probtrackx2 for tractography on bedpostx results + """Use FSL probtrackx2 for tractography on bedpostx results Examples -------- diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index f6772aa53b..99ba0fce0b 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -402,8 +402,7 @@ def _get_encfilename(self): return out_file def _generate_encfile(self): - """Generate a topup compatible encoding file based on given directions - """ + """Generate a topup compatible encoding file based on given directions""" out_file = self._get_encfilename() durations = self.inputs.readout_times if len(self.inputs.encoding_direction) != len(durations): @@ -605,9 +604,7 @@ class EddyInputSpec(FSLCommandInputSpec): desc="Topup results file containing the movement parameters (movpar.txt)", ) field = File( - exists=True, - argstr="--field=%s", - desc="Non-topup derived fieldmap scaled in Hz", + exists=True, argstr="--field=%s", desc="Non-topup derived fieldmap scaled in Hz" ) field_mat = File( exists=True, @@ -1575,15 +1572,13 @@ class EddyQuadInputSpec(FSLCommandInputSpec): argstr="--output-dir %s", desc="Output directory - default = '.qc'", ) - field = File( - exists=True, argstr="--field %s", desc="TOPUP estimated field (in Hz)", - ) + field = File(exists=True, argstr="--field %s", desc="TOPUP estimated field (in Hz)") slice_spec = File( exists=True, argstr="--slspec %s", desc="Text file specifying slice/group acquisition", ) - verbose = traits.Bool(argstr="--verbose", desc="Display debug messages",) + verbose = traits.Bool(argstr="--verbose", desc="Display debug messages") class EddyQuadOutputSpec(TraitedSpec): diff --git a/nipype/interfaces/fsl/maths.py b/nipype/interfaces/fsl/maths.py index 1b64511f9e..9e05b4d102 100644 --- a/nipype/interfaces/fsl/maths.py +++ b/nipype/interfaces/fsl/maths.py @@ -76,9 +76,7 @@ class ChangeDataTypeInput(MathsInput): class ChangeDataType(MathsCommand): - """Use fslmaths to change the datatype of an image. - - """ + """Use fslmaths to change the datatype of an image.""" input_spec = ChangeDataTypeInput _suffix = "_chdt" @@ -105,9 +103,7 @@ class ThresholdInputSpec(MathsInput): class Threshold(MathsCommand): - """Use fslmaths to apply a threshold to an image in a variety of ways. - - """ + """Use fslmaths to apply a threshold to an image in a variety of ways.""" input_spec = ThresholdInputSpec _suffix = "_thresh" @@ -167,9 +163,7 @@ class MeanImageInput(MathsInput): class MeanImage(MathsCommand): - """Use fslmaths to generate a mean image across a given dimension. - - """ + """Use fslmaths to generate a mean image across a given dimension.""" input_spec = MeanImageInput _suffix = "_mean" @@ -286,9 +280,7 @@ class MinImageInput(MathsInput): class MinImage(MathsCommand): - """Use fslmaths to generate a minimum image across a given dimension. - - """ + """Use fslmaths to generate a minimum image across a given dimension.""" input_spec = MinImageInput _suffix = "_min" @@ -309,9 +301,7 @@ class MedianImageInput(MathsInput): class MedianImage(MathsCommand): - """Use fslmaths to generate a median image across a given dimension. - - """ + """Use fslmaths to generate a median image across a given dimension.""" input_spec = MedianImageInput _suffix = "_median" @@ -360,9 +350,7 @@ class IsotropicSmoothInput(MathsInput): class IsotropicSmooth(MathsCommand): - """Use fslmaths to spatially smooth an image with a gaussian kernel. - - """ + """Use fslmaths to spatially smooth an image with a gaussian kernel.""" input_spec = IsotropicSmoothInput _suffix = "_smooth" @@ -386,9 +374,7 @@ class ApplyMaskInput(MathsInput): class ApplyMask(MathsCommand): - """Use fslmaths to apply a binary mask to another image. - - """ + """Use fslmaths to apply a binary mask to another image.""" input_spec = ApplyMaskInput _suffix = "_masked" @@ -439,9 +425,7 @@ class DilateInput(KernelInput): class DilateImage(MathsCommand): - """Use fslmaths to perform a spatial dilation of an image. - - """ + """Use fslmaths to perform a spatial dilation of an image.""" input_spec = DilateInput _suffix = "_dil" @@ -464,9 +448,7 @@ class ErodeInput(KernelInput): class ErodeImage(MathsCommand): - """Use fslmaths to perform a spatial erosion of an image. - - """ + """Use fslmaths to perform a spatial erosion of an image.""" input_spec = ErodeInput _suffix = "_ero" @@ -493,9 +475,7 @@ class SpatialFilterInput(KernelInput): class SpatialFilter(MathsCommand): - """Use fslmaths to spatially filter an image. - - """ + """Use fslmaths to spatially filter an image.""" input_spec = SpatialFilterInput _suffix = "_filt" @@ -535,9 +515,7 @@ class UnaryMathsInput(MathsInput): class UnaryMaths(MathsCommand): - """Use fslmaths to perorm a variety of mathematical operations on an image. - - """ + """Use fslmaths to perorm a variety of mathematical operations on an image.""" input_spec = UnaryMathsInput diff --git a/nipype/interfaces/fsl/model.py b/nipype/interfaces/fsl/model.py index d7484c0f99..8c269caac7 100644 --- a/nipype/interfaces/fsl/model.py +++ b/nipype/interfaces/fsl/model.py @@ -165,21 +165,21 @@ def _create_ev_files( ): """Creates EV files from condition and regressor information. - Parameters: - ----------- - - runinfo : dict - Generated by `SpecifyModel` and contains information - about events and other regressors. - runidx : int - Index to run number - ev_parameters : dict - A dictionary containing the model parameters for the - given design type. - orthogonalization : dict - A dictionary of dictionaries specifying orthogonal EVs. - contrasts : list of lists - Information on contrasts to be evaluated + Parameters: + ----------- + + runinfo : dict + Generated by `SpecifyModel` and contains information + about events and other regressors. + runidx : int + Index to run number + ev_parameters : dict + A dictionary containing the model parameters for the + given design type. + orthogonalization : dict + A dictionary of dictionaries specifying orthogonal EVs. + contrasts : list of lists + Information on contrasts to be evaluated """ conds = {} evname = [] @@ -346,8 +346,7 @@ def _format_session_info(self, session_info): return session_info def _get_func_files(self, session_info): - """Returns functional files in the order of runs - """ + """Returns functional files in the order of runs""" func_files = [] for i, info in enumerate(session_info): func_files.insert(i, info["scans"]) @@ -457,8 +456,7 @@ class FEATOutputSpec(TraitedSpec): class FEAT(FSLCommand): - """Uses FSL feat to calculate first level stats - """ + """Uses FSL feat to calculate first level stats""" _cmd = "feat" input_spec = FEATInputSpec @@ -518,8 +516,7 @@ class FEATModelOutpuSpec(TraitedSpec): class FEATModel(FSLCommand): - """Uses FSL feat_model to generate design.mat files - """ + """Uses FSL feat_model to generate design.mat files""" _cmd = "feat_model" input_spec = FEATModelInputSpec @@ -937,8 +934,7 @@ class FEATRegisterOutputSpec(TraitedSpec): class FEATRegister(BaseInterface): - """Register feat directories to a specific standard - """ + """Register feat directories to a specific standard""" input_spec = FEATRegisterInputSpec output_spec = FEATRegisterOutputSpec @@ -1458,7 +1454,7 @@ class MultipleRegressDesignInputSpec(BaseInterfaceInputSpec): traits.Enum("T"), traits.List(traits.Str), traits.List(traits.Float), - ), + ) ), ), ), @@ -1886,11 +1882,13 @@ class SmoothEstimateInputSpec(FSLCommandInputSpec): class SmoothEstimateOutputSpec(TraitedSpec): dlh = traits.Float(desc="smoothness estimate sqrt(det(Lambda))") volume = traits.Int(desc="number of voxels in mask") - resels = traits.Float(desc="volume of resel, in voxels, defined as FWHM_x * FWHM_y * FWHM_z") + resels = traits.Float( + desc="volume of resel, in voxels, defined as FWHM_x * FWHM_y * FWHM_z" + ) class SmoothEstimate(FSLCommand): - """ Estimates the smoothness of an image + """Estimates the smoothness of an image Examples -------- @@ -2044,7 +2042,7 @@ class ClusterOutputSpec(TraitedSpec): class Cluster(FSLCommand): - """ Uses FSL cluster to perform clustering on statistical output + """Uses FSL cluster to perform clustering on statistical output Examples -------- diff --git a/nipype/interfaces/fsl/tests/test_FILMGLS.py b/nipype/interfaces/fsl/tests/test_FILMGLS.py index b4934f4d7a..a308e9da50 100644 --- a/nipype/interfaces/fsl/tests/test_FILMGLS.py +++ b/nipype/interfaces/fsl/tests/test_FILMGLS.py @@ -4,7 +4,7 @@ def test_filmgls(): input_map = dict( - args=dict(argstr="%s",), + args=dict(argstr="%s"), autocorr_estimate_only=dict( xor=[ "autocorr_estimate_only", @@ -27,9 +27,9 @@ def test_filmgls(): ], argstr="-noest", ), - brightness_threshold=dict(argstr="-epith %d",), - design_file=dict(argstr="%s",), - environ=dict(usedefault=True,), + brightness_threshold=dict(argstr="-epith %d"), + design_file=dict(argstr="%s"), + environ=dict(usedefault=True), fit_armodel=dict( xor=[ "autocorr_estimate_only", @@ -41,9 +41,9 @@ def test_filmgls(): ], argstr="-ar", ), - full_data=dict(argstr="-v",), - in_file=dict(mandatory=True, argstr="%s",), - mask_size=dict(argstr="-ms %d",), + full_data=dict(argstr="-v"), + in_file=dict(mandatory=True, argstr="%s"), + mask_size=dict(argstr="-ms %d"), multitaper_product=dict( xor=[ "autocorr_estimate_only", @@ -55,11 +55,11 @@ def test_filmgls(): ], argstr="-mt %d", ), - output_pwdata=dict(argstr="-output_pwdata",), + output_pwdata=dict(argstr="-output_pwdata"), output_type=dict(), - results_dir=dict(usedefault=True, argstr="-rn %s",), - smooth_autocorr=dict(argstr="-sa",), - threshold=dict(argstr="%f",), + results_dir=dict(usedefault=True, argstr="-rn %s"), + smooth_autocorr=dict(argstr="-sa"), + threshold=dict(argstr="%f"), tukey_window=dict( xor=[ "autocorr_estimate_only", @@ -71,10 +71,10 @@ def test_filmgls(): ], argstr="-tukey %d", ), - use_pava=dict(argstr="-pava",), + use_pava=dict(argstr="-pava"), ) input_map2 = dict( - args=dict(argstr="%s",), + args=dict(argstr="%s"), autocorr_estimate_only=dict( xor=[ "autocorr_estimate_only", @@ -97,9 +97,9 @@ def test_filmgls(): ], argstr="--noest", ), - brightness_threshold=dict(argstr="--epith=%d",), - design_file=dict(argstr="--pd=%s",), - environ=dict(usedefault=True,), + brightness_threshold=dict(argstr="--epith=%d"), + design_file=dict(argstr="--pd=%s"), + environ=dict(usedefault=True), fit_armodel=dict( xor=[ "autocorr_estimate_only", @@ -111,9 +111,9 @@ def test_filmgls(): ], argstr="--ar", ), - full_data=dict(argstr="-v",), - in_file=dict(mandatory=True, argstr="--in=%s",), - mask_size=dict(argstr="--ms=%d",), + full_data=dict(argstr="-v"), + in_file=dict(mandatory=True, argstr="--in=%s"), + mask_size=dict(argstr="--ms=%d"), multitaper_product=dict( xor=[ "autocorr_estimate_only", @@ -125,11 +125,11 @@ def test_filmgls(): ], argstr="--mt=%d", ), - output_pwdata=dict(argstr="--outputPWdata",), + output_pwdata=dict(argstr="--outputPWdata"), output_type=dict(), - results_dir=dict(argstr="--rn=%s", usedefault=True,), - smooth_autocorr=dict(argstr="--sa",), - threshold=dict(usedefault=True, argstr="--thr=%f",), + results_dir=dict(argstr="--rn=%s", usedefault=True), + smooth_autocorr=dict(argstr="--sa"), + threshold=dict(usedefault=True, argstr="--thr=%f"), tukey_window=dict( xor=[ "autocorr_estimate_only", @@ -141,7 +141,7 @@ def test_filmgls(): ], argstr="--tukey=%d", ), - use_pava=dict(argstr="--pava",), + use_pava=dict(argstr="--pava"), ) instance = FILMGLS() if isinstance(instance.inputs, FILMGLSInputSpec): diff --git a/nipype/interfaces/fsl/tests/test_preprocess.py b/nipype/interfaces/fsl/tests/test_preprocess.py index 6b1b6cb610..23ae7a6824 100644 --- a/nipype/interfaces/fsl/tests/test_preprocess.py +++ b/nipype/interfaces/fsl/tests/test_preprocess.py @@ -14,8 +14,7 @@ def fsl_name(obj, fname): - """Create valid fsl name, including file extension for output type. - """ + """Create valid fsl name, including file extension for output type.""" ext = Info.output_type_to_ext(obj.inputs.output_type) return fname + ext @@ -152,9 +151,9 @@ def test_fast(setup_infile): @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") def test_fast_list_outputs(setup_infile, tmpdir): - """ By default (no -o), FSL's fast command outputs files into the same + """By default (no -o), FSL's fast command outputs files into the same directory as the input files. If the flag -o is set, it outputs files into - the cwd """ + the cwd""" def _run_and_test(opts, output_base): outputs = fsl.FAST(**opts)._list_outputs() diff --git a/nipype/interfaces/fsl/utils.py b/nipype/interfaces/fsl/utils.py index 6ada44e046..cf9e4c68f0 100644 --- a/nipype/interfaces/fsl/utils.py +++ b/nipype/interfaces/fsl/utils.py @@ -161,9 +161,9 @@ class ImageMeantsOutputSpec(TraitedSpec): class ImageMeants(FSLCommand): - """ Use fslmeants for printing the average timeseries (intensities) to - the screen (or saves to a file). The average is taken over all voxels - in the mask (or all voxels in the image if no mask is specified) + """Use fslmeants for printing the average timeseries (intensities) to + the screen (or saves to a file). The average is taken over all voxels + in the mask (or all voxels in the image if no mask is specified) """ @@ -1036,7 +1036,7 @@ class OverlayOutputSpec(TraitedSpec): class Overlay(FSLCommand): - """ Use FSL's overlay command to combine background and statistical images + """Use FSL's overlay command to combine background and statistical images into one volume @@ -1980,19 +1980,19 @@ class ComplexInputSpec(FSLCommandInputSpec): start_vol = traits.Int(position=-2, argstr="%d") end_vol = traits.Int(position=-1, argstr="%d") - real_polar = traits.Bool(argstr="-realpolar", xor=_conversion, position=1,) + real_polar = traits.Bool(argstr="-realpolar", xor=_conversion, position=1) # requires=['complex_in_file','magnitude_out_file','phase_out_file']) - real_cartesian = traits.Bool(argstr="-realcartesian", xor=_conversion, position=1,) + real_cartesian = traits.Bool(argstr="-realcartesian", xor=_conversion, position=1) # requires=['complex_in_file','real_out_file','imaginary_out_file']) - complex_cartesian = traits.Bool(argstr="-complex", xor=_conversion, position=1,) + complex_cartesian = traits.Bool(argstr="-complex", xor=_conversion, position=1) # requires=['real_in_file','imaginary_in_file','complex_out_file']) - complex_polar = traits.Bool(argstr="-complexpolar", xor=_conversion, position=1,) + complex_polar = traits.Bool(argstr="-complexpolar", xor=_conversion, position=1) # requires=['magnitude_in_file','phase_in_file', # 'magnitude_out_file','phase_out_file']) - complex_split = traits.Bool(argstr="-complexsplit", xor=_conversion, position=1,) + complex_split = traits.Bool(argstr="-complexsplit", xor=_conversion, position=1) # requires=['complex_in_file','complex_out_file']) complex_merge = traits.Bool( - argstr="-complexmerge", xor=_conversion + ["start_vol", "end_vol"], position=1, + argstr="-complexmerge", xor=_conversion + ["start_vol", "end_vol"], position=1 ) diff --git a/nipype/interfaces/image.py b/nipype/interfaces/image.py index a984efa159..8ea33647c5 100644 --- a/nipype/interfaces/image.py +++ b/nipype/interfaces/image.py @@ -122,63 +122,62 @@ class ReorientOutputSpec(TraitedSpec): class Reorient(SimpleInterface): """Conform an image to a given orientation -Flips and reorder the image data array so that the axes match the -directions indicated in ``orientation``. -The default ``RAS`` orientation corresponds to the first axis being ordered -from left to right, the second axis from posterior to anterior, and the -third axis from inferior to superior. + Flips and reorder the image data array so that the axes match the + directions indicated in ``orientation``. + The default ``RAS`` orientation corresponds to the first axis being ordered + from left to right, the second axis from posterior to anterior, and the + third axis from inferior to superior. -For oblique images, the original orientation is considered to be the -closest plumb orientation. + For oblique images, the original orientation is considered to be the + closest plumb orientation. -No resampling is performed, and thus the output image is not de-obliqued -or registered to any other image or template. + No resampling is performed, and thus the output image is not de-obliqued + or registered to any other image or template. -The effective transform is calculated from the original affine matrix to -the reoriented affine matrix. + The effective transform is calculated from the original affine matrix to + the reoriented affine matrix. -Examples --------- - -If an image is not reoriented, the original file is not modified + Examples + -------- -.. testsetup:: + If an image is not reoriented, the original file is not modified - >>> def print_affine(matrix): - ... print(str(matrix).replace(']', ' ').replace('[', ' ')) + .. testsetup:: ->>> import numpy as np ->>> from nipype.interfaces.image import Reorient ->>> reorient = Reorient(orientation='LPS') ->>> reorient.inputs.in_file = 'segmentation0.nii.gz' ->>> res = reorient.run() ->>> res.outputs.out_file -'segmentation0.nii.gz' + >>> def print_affine(matrix): + ... print(str(matrix).replace(']', ' ').replace('[', ' ')) ->>> print_affine(np.loadtxt(res.outputs.transform)) -1. 0. 0. 0. -0. 1. 0. 0. -0. 0. 1. 0. -0. 0. 0. 1. + >>> import numpy as np + >>> from nipype.interfaces.image import Reorient + >>> reorient = Reorient(orientation='LPS') + >>> reorient.inputs.in_file = 'segmentation0.nii.gz' + >>> res = reorient.run() + >>> res.outputs.out_file + 'segmentation0.nii.gz' ->>> reorient.inputs.orientation = 'RAS' ->>> res = reorient.run() ->>> res.outputs.out_file # doctest: +ELLIPSIS -'.../segmentation0_ras.nii.gz' + >>> print_affine(np.loadtxt(res.outputs.transform)) + 1. 0. 0. 0. + 0. 1. 0. 0. + 0. 0. 1. 0. + 0. 0. 0. 1. ->>> print_affine(np.loadtxt(res.outputs.transform)) --1. 0. 0. 60. - 0. -1. 0. 72. - 0. 0. 1. 0. - 0. 0. 0. 1. + >>> reorient.inputs.orientation = 'RAS' + >>> res = reorient.run() + >>> res.outputs.out_file # doctest: +ELLIPSIS + '.../segmentation0_ras.nii.gz' -.. testcleanup:: + >>> print_affine(np.loadtxt(res.outputs.transform)) + -1. 0. 0. 60. + 0. -1. 0. 72. + 0. 0. 1. 0. + 0. 0. 0. 1. - >>> import os - >>> os.unlink(res.outputs.out_file) - >>> os.unlink(res.outputs.transform) + .. testcleanup:: -""" + >>> import os + >>> os.unlink(res.outputs.out_file) + >>> os.unlink(res.outputs.transform) + """ input_spec = ReorientInputSpec output_spec = ReorientOutputSpec diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 67baa8cbf5..f6c6a893ad 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -96,7 +96,7 @@ def copytree(src, dst, use_hardlink=False): def add_traits(base, names, trait_type=None): - """ Add traits to a traited class. + """Add traits to a traited class. All traits are set to Undefined by default """ @@ -114,7 +114,7 @@ def add_traits(base, names, trait_type=None): def _get_head_bucket(s3_resource, bucket_name): - """ Try to get the header info of a bucket, in order to + """Try to get the header info of a bucket, in order to check if it exists and its permissions """ @@ -169,8 +169,7 @@ class ProgressPercentage(object): """ def __init__(self, filename): - """ - """ + """""" # Import packages import threading @@ -182,8 +181,7 @@ def __init__(self, filename): self._lock = threading.Lock() def __call__(self, bytes_amount): - """ - """ + """""" # Import packages import sys @@ -208,8 +206,7 @@ def __call__(self, bytes_amount): # DataSink inputs class DataSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - """ - """ + """""" # Init inputspec data attributes base_directory = Str(desc="Path to the base directory for storing data.") @@ -661,8 +658,7 @@ def _upload_to_s3(self, bucket, src, dst): # List outputs, main run routine def _list_outputs(self): - """Execute this module. - """ + """Execute this module.""" # Init variables outputs = self.output_spec().get() @@ -1010,8 +1006,10 @@ def _list_outputs(self): try: filledtemplate = template % tuple(argtuple) except TypeError as e: - raise TypeError(f"{e}: Template {template} failed to convert " - f"with args {tuple(argtuple)}") + raise TypeError( + f"{e}: Template {template} failed to convert " + f"with args {tuple(argtuple)}" + ) outfiles = [] for fname in bkt_files: if re.match(filledtemplate, fname): @@ -1283,8 +1281,10 @@ def _list_outputs(self): try: filledtemplate = template % tuple(argtuple) except TypeError as e: - raise TypeError(f"{e}: Template {template} failed to convert " - f"with args {tuple(argtuple)}") + raise TypeError( + f"{e}: Template {template} failed to convert " + f"with args {tuple(argtuple)}" + ) outfiles = glob.glob(filledtemplate) if len(outfiles) == 0: msg = "Output key: %s Template: %s returned no files" % ( @@ -1489,7 +1489,7 @@ def _list_outputs(self): class DataFinderInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - root_paths = traits.Either(traits.List(), Str(), mandatory=True,) + root_paths = traits.Either(traits.List(), Str(), mandatory=True) match_regex = Str( "(.+)", usedefault=True, desc=("Regular expression for matching paths.") ) @@ -2115,17 +2115,16 @@ def __setattr__(self, key, value): class XNATSink(LibraryBaseInterface, IOBase): - """ Generic datasink module that takes a directory containing a - list of nifti files and provides a set of structured output - fields. + """Generic datasink module that takes a directory containing a + list of nifti files and provides a set of structured output + fields. """ input_spec = XNATSinkInputSpec _pkg = "pyxnat" def _list_outputs(self): - """Execute this module. - """ + """Execute this module.""" import pyxnat # setup XNAT connection @@ -2326,8 +2325,7 @@ def __init__(self, input_names, **inputs): add_traits(self.inputs, [name for name in self._input_names]) def _list_outputs(self): - """Execute this module. - """ + """Execute this module.""" import sqlite3 conn = sqlite3.connect(self.inputs.database_file, check_same_thread=False) @@ -2390,8 +2388,7 @@ def __init__(self, input_names, **inputs): add_traits(self.inputs, [name for name in self._input_names]) def _list_outputs(self): - """Execute this module. - """ + """Execute this module.""" import MySQLdb if isdefined(self.inputs.config): @@ -2658,8 +2655,10 @@ def _list_outputs(self): try: filledtemplate = template % tuple(argtuple) except TypeError as e: - raise TypeError(f"{e}: Template {template} failed to convert " - f"with args {tuple(argtuple)}") + raise TypeError( + f"{e}: Template {template} failed to convert " + f"with args {tuple(argtuple)}" + ) outputs[key].append(self._get_files_over_ssh(filledtemplate)) diff --git a/nipype/interfaces/minc/base.py b/nipype/interfaces/minc/base.py index 5aca3e434e..3de0112614 100644 --- a/nipype/interfaces/minc/base.py +++ b/nipype/interfaces/minc/base.py @@ -18,15 +18,13 @@ def check_minc(): - """Returns True if and only if MINC is installed.' - """ + """Returns True if and only if MINC is installed.'""" return Info.version() is not None def no_minc(): - """Returns True if and only if MINC is *not* installed. - """ + """Returns True if and only if MINC is *not* installed.""" return not check_minc() @@ -79,12 +77,7 @@ def read_hdf5_version(s): return s.split(":")[1].strip() return None - versions = { - "minc": None, - "libminc": None, - "netcdf": None, - "hdf5": None, - } + versions = {"minc": None, "libminc": None, "netcdf": None, "hdf5": None} for l in out.split("\n"): for (name, f) in [ diff --git a/nipype/interfaces/minc/minc.py b/nipype/interfaces/minc/minc.py index 74fdbfb031..0d4c302f94 100644 --- a/nipype/interfaces/minc/minc.py +++ b/nipype/interfaces/minc/minc.py @@ -33,7 +33,7 @@ class ExtractInputSpec(StdOutCommandLineInputSpec): input_file = File( - desc="input file", exists=True, mandatory=True, argstr="%s", position=-2, + desc="input file", exists=True, mandatory=True, argstr="%s", position=-2 ) output_file = File( @@ -109,10 +109,7 @@ class ExtractInputSpec(StdOutCommandLineInputSpec): desc="Specify the range of output values\nDefault value: 1.79769e+308 1.79769e+308.", ) - _xor_normalize = ( - "normalize", - "nonormalize", - ) + _xor_normalize = ("normalize", "nonormalize") normalize = traits.Bool( desc="Normalize integer pixel values to file max and min.", @@ -265,7 +262,7 @@ class Extract(StdOutCommandLine): class ToRawInputSpec(StdOutCommandLineInputSpec): input_file = File( - desc="input file", exists=True, mandatory=True, argstr="%s", position=-2, + desc="input file", exists=True, mandatory=True, argstr="%s", position=-2 ) output_file = File( @@ -334,10 +331,7 @@ class ToRawInputSpec(StdOutCommandLineInputSpec): ), ) - _xor_normalize = ( - "normalize", - "nonormalize", - ) + _xor_normalize = ("normalize", "nonormalize") normalize = traits.Bool( desc="Normalize integer pixel values to file max and min.", @@ -456,11 +450,7 @@ class Convert(CommandLine): class CopyInputSpec(CommandLineInputSpec): input_file = File( - desc="input file to copy", - exists=True, - mandatory=True, - argstr="%s", - position=-2, + desc="input file to copy", exists=True, mandatory=True, argstr="%s", position=-2 ) output_file = File( @@ -558,8 +548,7 @@ class ToEcatInputSpec(CommandLineInputSpec): ) no_decay_corr_fctr = traits.Bool( - desc="Do not compute the decay correction factors", - argstr="-no_decay_corr_fctr", + desc="Do not compute the decay correction factors", argstr="-no_decay_corr_fctr" ) voxels_as_integers = traits.Bool( @@ -600,7 +589,7 @@ class ToEcat(CommandLine): class DumpInputSpec(StdOutCommandLineInputSpec): input_file = File( - desc="input file", exists=True, mandatory=True, argstr="%s", position=-2, + desc="input file", exists=True, mandatory=True, argstr="%s", position=-2 ) output_file = File( @@ -612,10 +601,7 @@ class DumpInputSpec(StdOutCommandLineInputSpec): keep_extension=False, ) - _xor_coords_or_header = ( - "coordinate_data", - "header_data", - ) + _xor_coords_or_header = ("coordinate_data", "header_data") coordinate_data = traits.Bool( desc="Coordinate variable data and header information.", @@ -627,10 +613,7 @@ class DumpInputSpec(StdOutCommandLineInputSpec): desc="Header information only, no data.", argstr="-h", xor=_xor_coords_or_header ) - _xor_annotations = ( - "annotations_brief", - "annotations_full", - ) + _xor_annotations = ("annotations_brief", "annotations_full") annotations_brief = traits.Enum( "c", @@ -705,7 +688,7 @@ def _format_arg(self, name, spec, value): and isinstance(value[0], int) and isinstance(value[1], int) ): - return "-p %d,%d" % (value[0], value[1],) + return "-p %d,%d" % (value[0], value[1]) else: raise ValueError("Invalid precision argument: " + str(value)) return super(Dump, self)._format_arg(name, spec, value) @@ -751,10 +734,7 @@ class AverageInputSpec(CommandLineInputSpec): default_value=True, ) - _xor_verbose = ( - "verbose", - "quiet", - ) + _xor_verbose = ("verbose", "quiet") verbose = traits.Bool( desc="Print out log messages (default).", argstr="-verbose", xor=_xor_verbose @@ -765,10 +745,7 @@ class AverageInputSpec(CommandLineInputSpec): debug = traits.Bool(desc="Print out debugging messages.", argstr="-debug") - _xor_check_dimensions = ( - "check_dimensions", - "no_check_dimensions", - ) + _xor_check_dimensions = ("check_dimensions", "no_check_dimensions") check_dimensions = traits.Bool( desc="Check that dimension info matches across files (default).", @@ -837,10 +814,7 @@ class AverageInputSpec(CommandLineInputSpec): argstr="-max_buffer_size_in_kb %d", ) - _xor_normalize = ( - "normalize", - "nonormalize", - ) + _xor_normalize = ("normalize", "nonormalize") normalize = traits.Bool( desc="Normalize data sets for mean intensity.", @@ -939,11 +913,7 @@ class Average(CommandLine): class BlobInputSpec(CommandLineInputSpec): input_file = File( - desc="input file to blob", - exists=True, - mandatory=True, - argstr="%s", - position=-2, + desc="input file to blob", exists=True, mandatory=True, argstr="%s", position=-2 ) output_file = File( @@ -1025,10 +995,7 @@ class CalcInputSpec(CommandLineInputSpec): default_value=True, ) - _xor_verbose = ( - "verbose", - "quiet", - ) + _xor_verbose = ("verbose", "quiet") verbose = traits.Bool( desc="Print out log messages (default).", argstr="-verbose", xor=_xor_verbose @@ -1120,10 +1087,7 @@ class CalcInputSpec(CommandLineInputSpec): argstr="-max_buffer_size_in_kb %d", ) - _xor_check_dimensions = ( - "check_dimensions", - "no_check_dimensions", - ) + _xor_check_dimensions = ("check_dimensions", "no_check_dimensions") check_dimensions = traits.Bool( desc="Check that files have matching dimensions (default).", @@ -1236,7 +1200,7 @@ class Calc(CommandLine): class BBoxInputSpec(StdOutCommandLineInputSpec): input_file = File( - desc="input file", exists=True, mandatory=True, argstr="%s", position=-2, + desc="input file", exists=True, mandatory=True, argstr="%s", position=-2 ) output_file = File( @@ -1493,7 +1457,7 @@ class Beast(CommandLine): class PikInputSpec(CommandLineInputSpec): input_file = File( - desc="input file", exists=True, mandatory=True, argstr="%s", position=-2, + desc="input file", exists=True, mandatory=True, argstr="%s", position=-2 ) _xor_image_type = ("jpg", "png") @@ -1677,7 +1641,7 @@ def _format_arg(self, name, spec, value): class BlurInputSpec(CommandLineInputSpec): input_file = File( - desc="input file", exists=True, mandatory=True, argstr="%s", position=-2, + desc="input file", exists=True, mandatory=True, argstr="%s", position=-2 ) output_file_base = File(desc="output file base", argstr="%s", position=-1) @@ -1966,10 +1930,7 @@ class MathInputSpec(CommandLineInputSpec): argstr="-max_buffer_size_in_kb %d", ) - _xor_check_dimensions = ( - "check_dimensions", - "no_check_dimensions", - ) + _xor_check_dimensions = ("check_dimensions", "no_check_dimensions") check_dimensions = traits.Bool( desc="Check that dimension info matches across files (default).", @@ -2246,15 +2207,14 @@ def _format_arg(self, name, spec, value): elif isinstance(value, bool) and not value: raise ValueError("Does not make sense to specify %s=False" % (name,)) elif isinstance(value, float): - return "%s -const %s" % (spec.argstr, value,) + return "%s -const %s" % (spec.argstr, value) else: - raise ValueError("Invalid %s argument: %s" % (name, value,)) + raise ValueError("Invalid %s argument: %s" % (name, value)) return super(Math, self)._format_arg(name, spec, value) def _parse_inputs(self): - """A number of the command line options expect precisely one or two files. - """ + """A number of the command line options expect precisely one or two files.""" nr_input_files = len(self.inputs.input_files) @@ -2266,13 +2226,13 @@ def _parse_inputs(self): if nr_input_files != 2: raise ValueError( "Due to the %s option we expected 2 files but input_files is of length %d" - % (n, nr_input_files,) + % (n, nr_input_files) ) elif isinstance(t, float): if nr_input_files != 1: raise ValueError( "Due to the %s option we expected 1 file but input_files is of length %d" - % (n, nr_input_files,) + % (n, nr_input_files) ) else: raise ValueError( @@ -2286,7 +2246,7 @@ def _parse_inputs(self): if nr_input_files != 1: raise ValueError( "Due to the %s option we expected 1 file but input_files is of length %d" - % (n, nr_input_files,) + % (n, nr_input_files) ) for n in self.input_spec.two_volume_traits: @@ -2296,7 +2256,7 @@ def _parse_inputs(self): if nr_input_files != 2: raise ValueError( "Due to the %s option we expected 2 files but input_files is of length %d" - % (n, nr_input_files,) + % (n, nr_input_files) ) for n in self.input_spec.n_volume_traits: @@ -2306,7 +2266,7 @@ def _parse_inputs(self): if not nr_input_files >= 1: raise ValueError( "Due to the %s option we expected at least one file but input_files is of length %d" - % (n, nr_input_files,) + % (n, nr_input_files) ) return super(Math, self)._parse_inputs() @@ -2341,7 +2301,7 @@ class ResampleInputSpec(CommandLineInputSpec): ) # This is a dummy input. - input_grid_files = InputMultiPath(File, desc="input grid file(s)",) + input_grid_files = InputMultiPath(File, desc="input grid file(s)") two = traits.Bool(desc="Create a MINC 2 output file.", argstr="-2") @@ -3164,9 +3124,7 @@ class GennlxfmInputSpec(CommandLineInputSpec): ) step = traits.Int(desc="Output ident xfm step [default: 1].", argstr="-step %s") - like = File( - desc="Generate a nlxfm like this file.", exists=True, argstr="-like %s", - ) + like = File(desc="Generate a nlxfm like this file.", exists=True, argstr="-like %s") class GennlxfmOutputSpec(TraitedSpec): @@ -3215,7 +3173,7 @@ class XfmConcatInputSpec(CommandLineInputSpec): ) # This is a dummy input. - input_grid_files = InputMultiPath(File, desc="input grid file(s)",) + input_grid_files = InputMultiPath(File, desc="input grid file(s)") output_file = File( desc="output file", @@ -3275,11 +3233,11 @@ def _list_outputs(self): class BestLinRegInputSpec(CommandLineInputSpec): source = File( - desc="source Minc file", exists=True, mandatory=True, argstr="%s", position=-4, + desc="source Minc file", exists=True, mandatory=True, argstr="%s", position=-4 ) target = File( - desc="target Minc file", exists=True, mandatory=True, argstr="%s", position=-3, + desc="target Minc file", exists=True, mandatory=True, argstr="%s", position=-3 ) output_xfm = File( @@ -3356,17 +3314,17 @@ class BestLinReg(CommandLine): class NlpFitInputSpec(CommandLineInputSpec): source = File( - desc="source Minc file", exists=True, mandatory=True, argstr="%s", position=-3, + desc="source Minc file", exists=True, mandatory=True, argstr="%s", position=-3 ) target = File( - desc="target Minc file", exists=True, mandatory=True, argstr="%s", position=-2, + desc="target Minc file", exists=True, mandatory=True, argstr="%s", position=-2 ) - output_xfm = File(desc="output xfm file", genfile=True, argstr="%s", position=-1,) + output_xfm = File(desc="output xfm file", genfile=True, argstr="%s", position=-1) # This is a dummy input. - input_grid_files = InputMultiPath(File, desc="input grid file(s)",) + input_grid_files = InputMultiPath(File, desc="input grid file(s)") config_file = File( desc="File containing the fitting configuration use.", @@ -3472,9 +3430,9 @@ class XfmAvgInputSpec(CommandLineInputSpec): ) # This is a dummy input. - input_grid_files = InputMultiPath(File, desc="input grid file(s)",) + input_grid_files = InputMultiPath(File, desc="input grid file(s)") - output_file = File(desc="output file", genfile=True, argstr="%s", position=-1,) + output_file = File(desc="output file", genfile=True, argstr="%s", position=-1) verbose = traits.Bool( desc="Print out log messages. Default: False.", argstr="-verbose" @@ -3568,7 +3526,7 @@ class XfmInvertInputSpec(CommandLineInputSpec): desc="input file", exists=True, mandatory=True, argstr="%s", position=-2 ) - output_file = File(desc="output file", genfile=True, argstr="%s", position=-1,) + output_file = File(desc="output file", genfile=True, argstr="%s", position=-1) verbose = traits.Bool( desc="Print out log messages. Default: False.", argstr="-verbose" @@ -3818,7 +3776,7 @@ class VolSymmInputSpec(CommandLineInputSpec): ) # This is a dummy input. - input_grid_files = InputMultiPath(File, desc="input grid file(s)",) + input_grid_files = InputMultiPath(File, desc="input grid file(s)") verbose = traits.Bool( desc="Print out log messages. Default: False.", argstr="-verbose" diff --git a/nipype/interfaces/minc/testdata.py b/nipype/interfaces/minc/testdata.py index f4e2836d65..1c33fe2b2b 100644 --- a/nipype/interfaces/minc/testdata.py +++ b/nipype/interfaces/minc/testdata.py @@ -10,4 +10,4 @@ def nonempty_minc_data(i, shape="2D"): - return example_data("minc_test_%s_%.2d.mnc" % (shape, i,)) + return example_data("minc_test_%s_%.2d.mnc" % (shape, i)) diff --git a/nipype/interfaces/mixins/fixheader.py b/nipype/interfaces/mixins/fixheader.py index ded1830582..7bbff18f2b 100644 --- a/nipype/interfaces/mixins/fixheader.py +++ b/nipype/interfaces/mixins/fixheader.py @@ -9,7 +9,7 @@ class CopyHeaderInputSpec(BaseInterfaceInputSpec): class CopyHeaderInterface(BaseInterface): - """ Copy headers if the copy_header input is ``True`` + """Copy headers if the copy_header input is ``True`` This interface mixin adds a post-run hook that allows for copying an input header to an output file. diff --git a/nipype/interfaces/mne/base.py b/nipype/interfaces/mne/base.py index b06384fb4e..9fa880d44c 100644 --- a/nipype/interfaces/mne/base.py +++ b/nipype/interfaces/mne/base.py @@ -100,7 +100,7 @@ class WatershedBEM(FSCommand): 'mne watershed_bem --overwrite --subject subj1 --volume T1' >>> bem.run() # doctest: +SKIP - """ + """ _cmd = "mne watershed_bem" input_spec = WatershedBEMInputSpec diff --git a/nipype/interfaces/mrtrix3/base.py b/nipype/interfaces/mrtrix3/base.py index 15b208b4cd..7684e06cd7 100644 --- a/nipype/interfaces/mrtrix3/base.py +++ b/nipype/interfaces/mrtrix3/base.py @@ -36,7 +36,7 @@ def parse_version(raw_info): @classmethod def looseversion(cls): - """ Return a comparable version object + """Return a comparable version object If no version found, use LooseVersion('0.0.0') """ diff --git a/nipype/interfaces/niftyseg/label_fusion.py b/nipype/interfaces/niftyseg/label_fusion.py index aa255247d2..c406451e4e 100644 --- a/nipype/interfaces/niftyseg/label_fusion.py +++ b/nipype/interfaces/niftyseg/label_fusion.py @@ -168,15 +168,19 @@ class LabelFusion(NiftySegCommand): def _format_arg(self, opt, spec, val): """Convert input to appropriate format for seg_maths.""" # Remove options if not STAPLE or STEPS as fusion type: - if opt in [ - "proportion", - "prob_update_flag", - "set_pq", - "mrf_value", - "max_iter", - "unc_thresh", - "conv", - ] and self.inputs.classifier_type not in ["STAPLE", "STEPS"]: + if ( + opt + in [ + "proportion", + "prob_update_flag", + "set_pq", + "mrf_value", + "max_iter", + "unc_thresh", + "conv", + ] + and self.inputs.classifier_type not in ["STAPLE", "STEPS"] + ): return "" if opt == "sm_ranking": diff --git a/nipype/interfaces/niftyseg/tests/test_label_fusion.py b/nipype/interfaces/niftyseg/tests/test_label_fusion.py index 0a469a774e..a7a4fa7c91 100644 --- a/nipype/interfaces/niftyseg/tests/test_label_fusion.py +++ b/nipype/interfaces/niftyseg/tests/test_label_fusion.py @@ -120,6 +120,6 @@ def test_seg_calctopncc(): calctopncc.inputs.top_templates = 1 cmd_tmp = "{cmd} -target {in_file} -templates 2 {file1} {file2} -n 1" - expected_cmd = cmd_tmp.format(cmd=cmd, in_file=in_file, file1=file1, file2=file2,) + expected_cmd = cmd_tmp.format(cmd=cmd, in_file=in_file, file1=file1, file2=file2) assert calctopncc.cmdline == expected_cmd diff --git a/nipype/interfaces/nilearn.py b/nipype/interfaces/nilearn.py index 55f6982213..053902e2bd 100644 --- a/nipype/interfaces/nilearn.py +++ b/nipype/interfaces/nilearn.py @@ -112,7 +112,7 @@ def _run_interface(self, runtime): return runtime def _process_inputs(self): - """ validate and process inputs into useful form. + """validate and process inputs into useful form. Returns a list of nilearn maskers and the list of corresponding label names.""" import nilearn.input_data as nl @@ -170,6 +170,6 @@ def _process_inputs(self): return maskers def _4d(self, array, affine): - """ takes a 3-dimensional numpy array and an affine, - returns the equivalent 4th dimensional nifti file """ + """takes a 3-dimensional numpy array and an affine, + returns the equivalent 4th dimensional nifti file""" return nb.Nifti1Image(array[:, :, :, np.newaxis], affine) diff --git a/nipype/interfaces/nipy/preprocess.py b/nipype/interfaces/nipy/preprocess.py index 115b7f93ad..d9e7f65ade 100644 --- a/nipype/interfaces/nipy/preprocess.py +++ b/nipype/interfaces/nipy/preprocess.py @@ -240,7 +240,7 @@ class TrimOutputSpec(TraitedSpec): class Trim(NipyBaseInterface): - """ Simple interface to trim a few volumes from a 4d fmri nifti file + """Simple interface to trim a few volumes from a 4d fmri nifti file Examples -------- diff --git a/nipype/interfaces/nitime/tests/test_nitime.py b/nipype/interfaces/nitime/tests/test_nitime.py index 507e1856ae..0c72d63860 100644 --- a/nipype/interfaces/nitime/tests/test_nitime.py +++ b/nipype/interfaces/nitime/tests/test_nitime.py @@ -17,7 +17,7 @@ @pytest.mark.skipif(no_nitime, reason="nitime is not installed") def test_read_csv(): """Test that reading the data from csv file gives you back a reasonable - time-series object """ + time-series object""" CA = nitime.CoherenceAnalyzer() CA.inputs.TR = 1.89 # bogus value just to pass traits test CA.inputs.in_file = example_data("fmri_timeseries_nolabels.csv") diff --git a/nipype/interfaces/semtools/brains/classify.py b/nipype/interfaces/semtools/brains/classify.py index f59b53183e..bc46613693 100644 --- a/nipype/interfaces/semtools/brains/classify.py +++ b/nipype/interfaces/semtools/brains/classify.py @@ -67,21 +67,20 @@ class BRAINSPosteriorToContinuousClassOutputSpec(TraitedSpec): class BRAINSPosteriorToContinuousClass(SEMLikeCommandLine): """title: Tissue Classification -category: BRAINS.Classify + category: BRAINS.Classify -description: This program will generate an 8-bit continuous tissue classified image based on BRAINSABC posterior images. + description: This program will generate an 8-bit continuous tissue classified image based on BRAINSABC posterior images. -version: 3.0 + version: 3.0 -documentation-url: http://www.nitrc.org/plugins/mwiki/index.php/brains:BRAINSClassify + documentation-url: http://www.nitrc.org/plugins/mwiki/index.php/brains:BRAINSClassify -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: Vincent A. Magnotta + contributor: Vincent A. Magnotta -acknowledgements: Funding for this work was provided by NIH/NINDS award NS050568 - -""" + acknowledgements: Funding for this work was provided by NIH/NINDS award NS050568 + """ input_spec = BRAINSPosteriorToContinuousClassInputSpec output_spec = BRAINSPosteriorToContinuousClassOutputSpec diff --git a/nipype/interfaces/semtools/brains/segmentation.py b/nipype/interfaces/semtools/brains/segmentation.py index 5b0a901277..2c97b86842 100644 --- a/nipype/interfaces/semtools/brains/segmentation.py +++ b/nipype/interfaces/semtools/brains/segmentation.py @@ -46,17 +46,16 @@ class SimilarityIndexOutputSpec(TraitedSpec): class SimilarityIndex(SEMLikeCommandLine): """title: BRAINSCut:SimilarityIndexComputation -category: BRAINS.Segmentation + category: BRAINS.Segmentation -description: Automatic analysis of BRAINSCut Output + description: Automatic analysis of BRAINSCut Output -version: 1.0 + version: 1.0 -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: Eunyoung Regin Kim - -""" + contributor: Eunyoung Regin Kim + """ input_spec = SimilarityIndexInputSpec output_spec = SimilarityIndexOutputSpec @@ -113,21 +112,20 @@ class BRAINSTalairachOutputSpec(TraitedSpec): class BRAINSTalairach(SEMLikeCommandLine): """title: BRAINS Talairach -category: BRAINS.Segmentation - -description: This program creates a VTK structured grid defining the Talairach coordinate system based on four points: AC, PC, IRP, and SLA. The resulting structred grid can be written as either a classic VTK file or the new VTK XML file format. Two representations of the resulting grid can be written. The first is a bounding box representation that also contains the location of the AC and PC points. The second representation is the full Talairach grid representation that includes the additional rows of boxes added to the inferior allowing full coverage of the cerebellum. + category: BRAINS.Segmentation -version: 0.1 + description: This program creates a VTK structured grid defining the Talairach coordinate system based on four points: AC, PC, IRP, and SLA. The resulting structred grid can be written as either a classic VTK file or the new VTK XML file format. Two representations of the resulting grid can be written. The first is a bounding box representation that also contains the location of the AC and PC points. The second representation is the full Talairach grid representation that includes the additional rows of boxes added to the inferior allowing full coverage of the cerebellum. -documentation-url: http://www.nitrc.org/plugins/mwiki/index.php/brains:BRAINSTalairach + version: 0.1 -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + documentation-url: http://www.nitrc.org/plugins/mwiki/index.php/brains:BRAINSTalairach -contributor: Steven Dunn and Vincent Magnotta + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -acknowledgements: Funding for this work was provided by NIH/NINDS award NS050568 + contributor: Steven Dunn and Vincent Magnotta -""" + acknowledgements: Funding for this work was provided by NIH/NINDS award NS050568 + """ input_spec = BRAINSTalairachInputSpec output_spec = BRAINSTalairachOutputSpec @@ -178,21 +176,20 @@ class BRAINSTalairachMaskOutputSpec(TraitedSpec): class BRAINSTalairachMask(SEMLikeCommandLine): """title: Talairach Mask -category: BRAINS.Segmentation - -description: This program creates a binary image representing the specified Talairach region. The input is an example image to define the physical space for the resulting image, the Talairach grid representation in VTK format, and the file containing the Talairach box definitions to be generated. These can be combined in BRAINS to create a label map using the procedure Brains::WorkupUtils::CreateLabelMapFromBinaryImages. + category: BRAINS.Segmentation -version: 0.1 + description: This program creates a binary image representing the specified Talairach region. The input is an example image to define the physical space for the resulting image, the Talairach grid representation in VTK format, and the file containing the Talairach box definitions to be generated. These can be combined in BRAINS to create a label map using the procedure Brains::WorkupUtils::CreateLabelMapFromBinaryImages. -documentation-url: http://www.nitrc.org/plugins/mwiki/index.php/brains:BRAINSTalairachMask + version: 0.1 -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + documentation-url: http://www.nitrc.org/plugins/mwiki/index.php/brains:BRAINSTalairachMask -contributor: Steven Dunn and Vincent Magnotta + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -acknowledgements: Funding for this work was provided by NIH/NINDS award NS050568 + contributor: Steven Dunn and Vincent Magnotta -""" + acknowledgements: Funding for this work was provided by NIH/NINDS award NS050568 + """ input_spec = BRAINSTalairachMaskInputSpec output_spec = BRAINSTalairachMaskOutputSpec diff --git a/nipype/interfaces/semtools/brains/utilities.py b/nipype/interfaces/semtools/brains/utilities.py index 7c6351eaab..bed7438271 100644 --- a/nipype/interfaces/semtools/brains/utilities.py +++ b/nipype/interfaces/semtools/brains/utilities.py @@ -70,15 +70,14 @@ class HistogramMatchingFilterOutputSpec(TraitedSpec): class HistogramMatchingFilter(SEMLikeCommandLine): """title: Write Out Image Intensities -category: BRAINS.Utilities + category: BRAINS.Utilities -description: For Analysis + description: For Analysis -version: 0.1 + version: 0.1 -contributor: University of Iowa Department of Psychiatry, http:://www.psychiatry.uiowa.edu - -""" + contributor: University of Iowa Department of Psychiatry, http:://www.psychiatry.uiowa.edu + """ input_spec = HistogramMatchingFilterInputSpec output_spec = HistogramMatchingFilterOutputSpec @@ -144,15 +143,14 @@ class GenerateEdgeMapImageOutputSpec(TraitedSpec): class GenerateEdgeMapImage(SEMLikeCommandLine): """title: GenerateEdgeMapImage -category: BRAINS.Utilities - -description: Automatic edgemap generation for edge-guided super-resolution reconstruction + category: BRAINS.Utilities -version: 1.0 + description: Automatic edgemap generation for edge-guided super-resolution reconstruction -contributor: Ali Ghayoor + version: 1.0 -""" + contributor: Ali Ghayoor + """ input_spec = GenerateEdgeMapImageInputSpec output_spec = GenerateEdgeMapImageOutputSpec @@ -195,15 +193,14 @@ class GeneratePurePlugMaskOutputSpec(TraitedSpec): class GeneratePurePlugMask(SEMLikeCommandLine): """title: GeneratePurePlugMask -category: BRAINS.Utilities - -description: This program gets several modality image files and returns a binary mask that defines the pure plugs + category: BRAINS.Utilities -version: 1.0 + description: This program gets several modality image files and returns a binary mask that defines the pure plugs -contributor: Ali Ghayoor + version: 1.0 -""" + contributor: Ali Ghayoor + """ input_spec = GeneratePurePlugMaskInputSpec output_spec = GeneratePurePlugMaskOutputSpec diff --git a/nipype/interfaces/semtools/converters.py b/nipype/interfaces/semtools/converters.py index f3c1d432f5..4df811eff5 100644 --- a/nipype/interfaces/semtools/converters.py +++ b/nipype/interfaces/semtools/converters.py @@ -43,21 +43,20 @@ class DWISimpleCompareOutputSpec(TraitedSpec): class DWISimpleCompare(SEMLikeCommandLine): """title: Nrrd DWI comparison -category: Converters + category: Converters -description: Compares two nrrd format DWI images and verifies that gradient magnitudes, gradient directions, measurement frame, and max B0 value are identicle. Used for testing DWIConvert. + description: Compares two nrrd format DWI images and verifies that gradient magnitudes, gradient directions, measurement frame, and max B0 value are identicle. Used for testing DWIConvert. -version: 0.1.0.$Revision: 916 $(alpha) + version: 0.1.0.$Revision: 916 $(alpha) -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DWIConvert + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DWIConvert -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: Mark Scully (UIowa) + contributor: Mark Scully (UIowa) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for DTI data produced on Philips scanners was contributed by Vincent Magnotta and Hans Johnson at the University of Iowa. - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for DTI data produced on Philips scanners was contributed by Vincent Magnotta and Hans Johnson at the University of Iowa. + """ input_spec = DWISimpleCompareInputSpec output_spec = DWISimpleCompareOutputSpec @@ -86,21 +85,20 @@ class DWICompareOutputSpec(TraitedSpec): class DWICompare(SEMLikeCommandLine): """title: Nrrd DWI comparison -category: Converters - -description: Compares two nrrd format DWI images and verifies that gradient magnitudes, gradient directions, measurement frame, and max B0 value are identicle. Used for testing DWIConvert. + category: Converters -version: 0.1.0.$Revision: 916 $(alpha) + description: Compares two nrrd format DWI images and verifies that gradient magnitudes, gradient directions, measurement frame, and max B0 value are identicle. Used for testing DWIConvert. -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DWIConvert + version: 0.1.0.$Revision: 916 $(alpha) -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DWIConvert -contributor: Mark Scully (UIowa) + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for DTI data produced on Philips scanners was contributed by Vincent Magnotta and Hans Johnson at the University of Iowa. + contributor: Mark Scully (UIowa) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for DTI data produced on Philips scanners was contributed by Vincent Magnotta and Hans Johnson at the University of Iowa. + """ input_spec = DWICompareInputSpec output_spec = DWICompareOutputSpec diff --git a/nipype/interfaces/semtools/diffusion/diffusion.py b/nipype/interfaces/semtools/diffusion/diffusion.py index 90377b8ee0..8cc5a320e6 100644 --- a/nipype/interfaces/semtools/diffusion/diffusion.py +++ b/nipype/interfaces/semtools/diffusion/diffusion.py @@ -46,24 +46,23 @@ class dtiaverageOutputSpec(TraitedSpec): class dtiaverage(SEMLikeCommandLine): """title: DTIAverage (DTIProcess) -category: Diffusion.Diffusion Tensor Images.CommandLineOnly + category: Diffusion.Diffusion Tensor Images.CommandLineOnly -description: dtiaverage is a program that allows to compute the average of an arbitrary number of tensor fields (listed after the --inputs option) This program is used in our pipeline as the last step of the atlas building processing. When all the tensor fields have been deformed in the same space, to create the average tensor field (--tensor_output) we use dtiaverage. - Several average method can be used (specified by the --method option): euclidian, log-euclidian and pga. The default being euclidian. + description: dtiaverage is a program that allows to compute the average of an arbitrary number of tensor fields (listed after the --inputs option) This program is used in our pipeline as the last step of the atlas building processing. When all the tensor fields have been deformed in the same space, to create the average tensor field (--tensor_output) we use dtiaverage. + Several average method can be used (specified by the --method option): euclidian, log-euclidian and pga. The default being euclidian. -version: 1.0.0 + version: 1.0.0 -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess -license: Copyright (c) Casey Goodlett. All rights reserved. - See http://www.ia.unc.edu/dev/Copyright.htm for details. - This software is distributed WITHOUT ANY WARRANTY; without even - the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR - PURPOSE. See the above copyright notices for more information. + license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. -contributor: Casey Goodlett - -""" + contributor: Casey Goodlett + """ input_spec = dtiaverageInputSpec output_spec = dtiaverageOutputSpec @@ -179,45 +178,44 @@ class dtiestimOutputSpec(TraitedSpec): class dtiestim(SEMLikeCommandLine): """title: DTIEstim (DTIProcess) -category: Diffusion.Diffusion Weighted Images - -description: dtiestim is a tool that takes in a set of DWIs (with --dwi_image option) in nrrd format and estimates a tensor field out of it. The output tensor file name is specified with the --tensor_output option -There are several methods to estimate the tensors which you can specify with the option --method lls|wls|nls|ml . Here is a short description of the different methods: + category: Diffusion.Diffusion Weighted Images -lls - Linear least squares. Standard estimation technique that recovers the tensor parameters by multiplying the log of the normalized signal intensities by the pseudo-inverse of the gradient matrix. Default option. + description: dtiestim is a tool that takes in a set of DWIs (with --dwi_image option) in nrrd format and estimates a tensor field out of it. The output tensor file name is specified with the --tensor_output option + There are several methods to estimate the tensors which you can specify with the option --method lls|wls|nls|ml . Here is a short description of the different methods: -wls - Weighted least squares. This method is similar to the linear least squares method except that the gradient matrix is weighted by the original lls estimate. (See Salvador, R., Pena, A., Menon, D. K., Carpenter, T. A., Pickard, J. D., and Bullmore, E. T. Formal characterization and extension of the linearized diffusion tensor model. Human Brain Mapping 24, 2 (Feb. 2005), 144-155. for more information on this method). This method is recommended for most applications. The weight for each iteration can be specified with the --weight_iterations. It is not currently the default due to occasional matrix singularities. -nls - Non-linear least squares. This method does not take the log of the signal and requires an optimization based on levenberg-marquadt to optimize the parameters of the signal. The lls estimate is used as an initialization. For this method the step size can be specified with the --step option. -ml - Maximum likelihood estimation. This method is experimental and is not currently recommended. For this ml method the sigma can be specified with the option --sigma and the step size can be specified with the --step option. + lls + Linear least squares. Standard estimation technique that recovers the tensor parameters by multiplying the log of the normalized signal intensities by the pseudo-inverse of the gradient matrix. Default option. -You can set a threshold (--threshold) to have the tensor estimated to only a subset of voxels. All the baseline voxel value higher than the threshold define the voxels where the tensors are computed. If not specified the threshold is calculated using an OTSU threshold on the baseline image.The masked generated by the -t option or by the otsu value can be saved with the --B0_mask_output option. + wls + Weighted least squares. This method is similar to the linear least squares method except that the gradient matrix is weighted by the original lls estimate. (See Salvador, R., Pena, A., Menon, D. K., Carpenter, T. A., Pickard, J. D., and Bullmore, E. T. Formal characterization and extension of the linearized diffusion tensor model. Human Brain Mapping 24, 2 (Feb. 2005), 144-155. for more information on this method). This method is recommended for most applications. The weight for each iteration can be specified with the --weight_iterations. It is not currently the default due to occasional matrix singularities. + nls + Non-linear least squares. This method does not take the log of the signal and requires an optimization based on levenberg-marquadt to optimize the parameters of the signal. The lls estimate is used as an initialization. For this method the step size can be specified with the --step option. + ml + Maximum likelihood estimation. This method is experimental and is not currently recommended. For this ml method the sigma can be specified with the option --sigma and the step size can be specified with the --step option. -dtiestim also can extract a few scalar images out of the DWI set of images: + You can set a threshold (--threshold) to have the tensor estimated to only a subset of voxels. All the baseline voxel value higher than the threshold define the voxels where the tensors are computed. If not specified the threshold is calculated using an OTSU threshold on the baseline image.The masked generated by the -t option or by the otsu value can be saved with the --B0_mask_output option. - - the average baseline image (--B0) which is the average of all the B0s. - - the IDWI (--idwi)which is the geometric mean of the diffusion images. + dtiestim also can extract a few scalar images out of the DWI set of images: -You can also load a mask if you want to compute the tensors only where the voxels are non-zero (--brain_mask) or a negative mask and the tensors will be estimated where the negative mask has zero values (--bad_region_mask) + - the average baseline image (--B0) which is the average of all the B0s. + - the IDWI (--idwi)which is the geometric mean of the diffusion images. -version: 1.2.0 + You can also load a mask if you want to compute the tensors only where the voxels are non-zero (--brain_mask) or a negative mask and the tensors will be estimated where the negative mask has zero values (--bad_region_mask) -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + version: 1.2.0 -license: Copyright (c) Casey Goodlett. All rights reserved. - See http://www.ia.unc.edu/dev/Copyright.htm for details. - This software is distributed WITHOUT ANY WARRANTY; without even - the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR - PURPOSE. See the above copyright notices for more information. + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess -contributor: Casey Goodlett, Francois Budin + license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. -acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. + contributor: Casey Goodlett, Francois Budin -""" + acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. + """ input_spec = dtiestimInputSpec output_spec = dtiestimOutputSpec @@ -436,35 +434,34 @@ class dtiprocessOutputSpec(TraitedSpec): class dtiprocess(SEMLikeCommandLine): """title: DTIProcess (DTIProcess) -category: Diffusion.Diffusion Tensor Images + category: Diffusion.Diffusion Tensor Images -description: dtiprocess is a tool that handles tensor fields. It takes as an input a tensor field in nrrd format. -It can generate diffusion scalar properties out of the tensor field such as : FA (--fa_output), Gradient FA image (--fa_gradient_output), color FA (--color_fa_output), MD (--md_output), Frobenius norm (--frobenius_norm_output), lbd1, lbd2, lbd3 (--lambda{1,2,3}_output), binary map of voxel where if any of the eigenvalue is negative, the voxel is set to 1 (--negative_eigenvector_output) + description: dtiprocess is a tool that handles tensor fields. It takes as an input a tensor field in nrrd format. + It can generate diffusion scalar properties out of the tensor field such as : FA (--fa_output), Gradient FA image (--fa_gradient_output), color FA (--color_fa_output), MD (--md_output), Frobenius norm (--frobenius_norm_output), lbd1, lbd2, lbd3 (--lambda{1,2,3}_output), binary map of voxel where if any of the eigenvalue is negative, the voxel is set to 1 (--negative_eigenvector_output) -It also creates 4D images out of the tensor field such as: Highest eigenvector map (highest eigenvector at each voxel) (--principal_eigenvector_output) + It also creates 4D images out of the tensor field such as: Highest eigenvector map (highest eigenvector at each voxel) (--principal_eigenvector_output) -Masking capabilities: For any of the processing done with dtiprocess, it's possible to apply it on a masked region of the tensor field. You need to use the --mask option for any of the option to be applied on that tensor field sub-region only. If you want to save the masked tensor field use the option --outmask and specify the new masked tensor field file name. -dtiprocess also allows a range of transformations on the tensor fields. The transformed tensor field file name is specified with the option --deformation_output. There are 3 resampling interpolation methods specified with the tag --interpolation followed by the type to use (nearestneighbor, linear, cubic) Then you have several transformations possible to apply: + Masking capabilities: For any of the processing done with dtiprocess, it's possible to apply it on a masked region of the tensor field. You need to use the --mask option for any of the option to be applied on that tensor field sub-region only. If you want to save the masked tensor field use the option --outmask and specify the new masked tensor field file name. + dtiprocess also allows a range of transformations on the tensor fields. The transformed tensor field file name is specified with the option --deformation_output. There are 3 resampling interpolation methods specified with the tag --interpolation followed by the type to use (nearestneighbor, linear, cubic) Then you have several transformations possible to apply: - - Affine transformations using as an input - - itk affine transformation file (based on the itkAffineTransform class) - - Affine transformations using rview (details and download at http://www.doc.ic.ac.uk/~dr/software/). There are 2 versions of rview both creating transformation files called dof files. The old version of rview outputs text files containing the transformation parameters. It can be read in with the --dof_file option. The new version outputs binary dof files. These dof files can be transformed into human readable file with the dof2mat tool which is part of the rview package. So you need to save the output of dof2mat into a text file which can then be used with the -- newdof_file option. Usage example: dof2mat mynewdoffile.dof >> mynewdoffile.txt dtiprocess --dti_image mytensorfield.nhdr --newdof_file mynewdoffile.txt --rot_output myaffinetensorfield.nhdr + - Affine transformations using as an input + - itk affine transformation file (based on the itkAffineTransform class) + - Affine transformations using rview (details and download at http://www.doc.ic.ac.uk/~dr/software/). There are 2 versions of rview both creating transformation files called dof files. The old version of rview outputs text files containing the transformation parameters. It can be read in with the --dof_file option. The new version outputs binary dof files. These dof files can be transformed into human readable file with the dof2mat tool which is part of the rview package. So you need to save the output of dof2mat into a text file which can then be used with the -- newdof_file option. Usage example: dof2mat mynewdoffile.dof >> mynewdoffile.txt dtiprocess --dti_image mytensorfield.nhdr --newdof_file mynewdoffile.txt --rot_output myaffinetensorfield.nhdr -Non linear transformations as an input: The default transformation file type is d-field (displacement field) in nrrd format. The option to use is --forward with the name of the file. If the transformation file is a h-field you have to add the option --hField. + Non linear transformations as an input: The default transformation file type is d-field (displacement field) in nrrd format. The option to use is --forward with the name of the file. If the transformation file is a h-field you have to add the option --hField. -version: 1.0.1 + version: 1.0.1 -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess -license: Copyright (c) Casey Goodlett. All rights reserved. - See http://www.ia.unc.edu/dev/Copyright.htm for details. - This software is distributed WITHOUT ANY WARRANTY; without even - the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR - PURPOSE. See the above copyright notices for more information. + license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. -contributor: Casey Goodlett - -""" + contributor: Casey Goodlett + """ input_spec = dtiprocessInputSpec output_spec = dtiprocessOutputSpec @@ -606,21 +603,20 @@ class DWIConvertOutputSpec(TraitedSpec): class DWIConvert(SEMLikeCommandLine): """title: DWIConverter -category: Diffusion.Diffusion Data Conversion - -description: Converts diffusion weighted MR images in dicom series into Nrrd format for analysis in Slicer. This program has been tested on only a limited subset of DTI dicom formats available from Siemens, GE, and Phillips scanners. Work in progress to support dicom multi-frame data. The program parses dicom header to extract necessary information about measurement frame, diffusion weighting directions, b-values, etc, and write out a nrrd image. For non-diffusion weighted dicom images, it loads in an entire dicom series and writes out a single dicom volume in a .nhdr/.raw pair. + category: Diffusion.Diffusion Data Conversion -version: Version 1.0 + description: Converts diffusion weighted MR images in dicom series into Nrrd format for analysis in Slicer. This program has been tested on only a limited subset of DTI dicom formats available from Siemens, GE, and Phillips scanners. Work in progress to support dicom multi-frame data. The program parses dicom header to extract necessary information about measurement frame, diffusion weighting directions, b-values, etc, and write out a nrrd image. For non-diffusion weighted dicom images, it loads in an entire dicom series and writes out a single dicom volume in a .nhdr/.raw pair. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DWIConverter + version: Version 1.0 -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DWIConverter -contributor: Vince Magnotta (UIowa), Hans Johnson (UIowa), Joy Matsui (UIowa), Kent Williams (UIowa), Mark Scully (Uiowa), Xiaodong Tao (GE) + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for DTI data produced on Philips scanners was contributed by Vincent Magnotta and Hans Johnson at the University of Iowa. + contributor: Vince Magnotta (UIowa), Hans Johnson (UIowa), Joy Matsui (UIowa), Kent Williams (UIowa), Mark Scully (Uiowa), Xiaodong Tao (GE) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for DTI data produced on Philips scanners was contributed by Vincent Magnotta and Hans Johnson at the University of Iowa. + """ input_spec = DWIConvertInputSpec output_spec = DWIConvertOutputSpec diff --git a/nipype/interfaces/semtools/diffusion/gtract.py b/nipype/interfaces/semtools/diffusion/gtract.py index 0ad255b536..eb8e05f4f5 100644 --- a/nipype/interfaces/semtools/diffusion/gtract.py +++ b/nipype/interfaces/semtools/diffusion/gtract.py @@ -48,21 +48,20 @@ class gtractTransformToDisplacementFieldOutputSpec(TraitedSpec): class gtractTransformToDisplacementField(SEMLikeCommandLine): """title: Create Displacement Field -category: Diffusion.GTRACT + category: Diffusion.GTRACT -description: This program will compute forward deformation from the given Transform. The size of the DF is equal to MNI space + description: This program will compute forward deformation from the given Transform. The size of the DF is equal to MNI space -version: 4.0.0 + version: 4.0.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -contributor: This tool was developed by Vincent Magnotta, Madhura Ingalhalikar, and Greg Harris + contributor: This tool was developed by Vincent Magnotta, Madhura Ingalhalikar, and Greg Harris -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 - -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractTransformToDisplacementFieldInputSpec output_spec = gtractTransformToDisplacementFieldOutputSpec @@ -110,21 +109,20 @@ class gtractInvertBSplineTransformOutputSpec(TraitedSpec): class gtractInvertBSplineTransform(SEMLikeCommandLine): """title: B-Spline Transform Inversion -category: Diffusion.GTRACT - -description: This program will invert a B-Spline transform using a thin-plate spline approximation. + category: Diffusion.GTRACT -version: 4.0.0 + description: This program will invert a B-Spline transform using a thin-plate spline approximation. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractInvertBSplineTransformInputSpec output_spec = gtractInvertBSplineTransformOutputSpec @@ -166,21 +164,20 @@ class gtractConcatDwiOutputSpec(TraitedSpec): class gtractConcatDwi(SEMLikeCommandLine): """title: Concat DWI Images -category: Diffusion.GTRACT - -description: This program will concatenate two DTI runs together. + category: Diffusion.GTRACT -version: 4.0.0 + description: This program will concatenate two DTI runs together. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractConcatDwiInputSpec output_spec = gtractConcatDwiOutputSpec @@ -226,21 +223,20 @@ class gtractAverageBvaluesOutputSpec(TraitedSpec): class gtractAverageBvalues(SEMLikeCommandLine): """title: Average B-Values -category: Diffusion.GTRACT + category: Diffusion.GTRACT -description: This program will directly average together the baseline gradients (b value equals 0) within a DWI scan. This is usually used after gtractCoregBvalues. + description: This program will directly average together the baseline gradients (b value equals 0) within a DWI scan. This is usually used after gtractCoregBvalues. -version: 4.0.0 + version: 4.0.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 - -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractAverageBvaluesInputSpec output_spec = gtractAverageBvaluesOutputSpec @@ -336,21 +332,20 @@ class gtractCoregBvaluesOutputSpec(TraitedSpec): class gtractCoregBvalues(SEMLikeCommandLine): """title: Coregister B-Values -category: Diffusion.GTRACT - -description: This step should be performed after converting DWI scans from DICOM to NRRD format. This program will register all gradients in a NRRD diffusion weighted 4D vector image (moving image) to a specified index in a fixed image. It also supports co-registration with a T2 weighted image or field map in the same plane as the DWI data. The fixed image for the registration should be a b0 image. A mutual information metric cost function is used for the registration because of the differences in signal intensity as a result of the diffusion gradients. The full affine allows the registration procedure to correct for eddy current distortions that may exist in the data. If the eddyCurrentCorrection is enabled, relaxationFactor (0.25) and maximumStepSize (0.1) should be adjusted. + category: Diffusion.GTRACT -version: 4.0.0 + description: This step should be performed after converting DWI scans from DICOM to NRRD format. This program will register all gradients in a NRRD diffusion weighted 4D vector image (moving image) to a specified index in a fixed image. It also supports co-registration with a T2 weighted image or field map in the same plane as the DWI data. The fixed image for the registration should be a b0 image. A mutual information metric cost function is used for the registration because of the differences in signal intensity as a result of the diffusion gradients. The full affine allows the registration procedure to correct for eddy current distortions that may exist in the data. If the eddyCurrentCorrection is enabled, relaxationFactor (0.25) and maximumStepSize (0.1) should be adjusted. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractCoregBvaluesInputSpec output_spec = gtractCoregBvaluesOutputSpec @@ -407,21 +402,20 @@ class gtractResampleAnisotropyOutputSpec(TraitedSpec): class gtractResampleAnisotropy(SEMLikeCommandLine): """title: Resample Anisotropy -category: Diffusion.GTRACT - -description: This program will resample a floating point image using either the Rigid or B-Spline transform. You may want to save the aligned B0 image after each of the anisotropy map co-registration steps with the anatomical image to check the registration quality with another tool. + category: Diffusion.GTRACT -version: 4.0.0 + description: This program will resample a floating point image using either the Rigid or B-Spline transform. You may want to save the aligned B0 image after each of the anisotropy map co-registration steps with the anatomical image to check the registration quality with another tool. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractResampleAnisotropyInputSpec output_spec = gtractResampleAnisotropyOutputSpec @@ -478,21 +472,20 @@ class gtractResampleCodeImageOutputSpec(TraitedSpec): class gtractResampleCodeImage(SEMLikeCommandLine): """title: Resample Code Image -category: Diffusion.GTRACT + category: Diffusion.GTRACT -description: This program will resample a short integer code image using either the Rigid or Inverse-B-Spline transform. The reference image is the DTI tensor anisotropy image space, and the input code image is in anatomical space. + description: This program will resample a short integer code image using either the Rigid or Inverse-B-Spline transform. The reference image is the DTI tensor anisotropy image space, and the input code image is in anatomical space. -version: 4.0.0 + version: 4.0.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 - -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractResampleCodeImageInputSpec output_spec = gtractResampleCodeImageOutputSpec @@ -535,21 +528,20 @@ class gtractCopyImageOrientationOutputSpec(TraitedSpec): class gtractCopyImageOrientation(SEMLikeCommandLine): """title: Copy Image Orientation -category: Diffusion.GTRACT - -description: This program will copy the orientation from the reference image into the moving image. Currently, the registration process requires that the diffusion weighted images and the anatomical images have the same image orientation (i.e. Axial, Coronal, Sagittal). It is suggested that you copy the image orientation from the diffusion weighted images and apply this to the anatomical image. This image can be subsequently removed after the registration step is complete. We anticipate that this limitation will be removed in future versions of the registration programs. + category: Diffusion.GTRACT -version: 4.0.0 + description: This program will copy the orientation from the reference image into the moving image. Currently, the registration process requires that the diffusion weighted images and the anatomical images have the same image orientation (i.e. Axial, Coronal, Sagittal). It is suggested that you copy the image orientation from the diffusion weighted images and apply this to the anatomical image. This image can be subsequently removed after the registration step is complete. We anticipate that this limitation will be removed in future versions of the registration programs. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractCopyImageOrientationInputSpec output_spec = gtractCopyImageOrientationOutputSpec @@ -591,21 +583,20 @@ class gtractCreateGuideFiberOutputSpec(TraitedSpec): class gtractCreateGuideFiber(SEMLikeCommandLine): """title: Create Guide Fiber -category: Diffusion.GTRACT - -description: This program will create a guide fiber by averaging fibers from a previously generated tract. + category: Diffusion.GTRACT -version: 4.0.0 + description: This program will create a guide fiber by averaging fibers from a previously generated tract. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractCreateGuideFiberInputSpec output_spec = gtractCreateGuideFiberOutputSpec @@ -654,21 +645,20 @@ class gtractAnisotropyMapOutputSpec(TraitedSpec): class gtractAnisotropyMap(SEMLikeCommandLine): """title: Anisotropy Map -category: Diffusion.GTRACT + category: Diffusion.GTRACT -description: This program will generate a scalar map of anisotropy, given a tensor representation. Anisotropy images are used for fiber tracking, but the anisotropy scalars are not defined along the path. Instead, the tensor representation is included as point data allowing all of these metrics to be computed using only the fiber tract point data. The images can be saved in any ITK supported format, but it is suggested that you use an image format that supports the definition of the image origin. This includes NRRD, NifTI, and Meta formats. These images can also be used for scalar analysis including regional anisotropy measures or VBM style analysis. + description: This program will generate a scalar map of anisotropy, given a tensor representation. Anisotropy images are used for fiber tracking, but the anisotropy scalars are not defined along the path. Instead, the tensor representation is included as point data allowing all of these metrics to be computed using only the fiber tract point data. The images can be saved in any ITK supported format, but it is suggested that you use an image format that supports the definition of the image origin. This includes NRRD, NifTI, and Meta formats. These images can also be used for scalar analysis including regional anisotropy measures or VBM style analysis. -version: 4.0.0 + version: 4.0.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 - -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractAnisotropyMapInputSpec output_spec = gtractAnisotropyMapOutputSpec @@ -710,21 +700,20 @@ class gtractClipAnisotropyOutputSpec(TraitedSpec): class gtractClipAnisotropy(SEMLikeCommandLine): """title: Clip Anisotropy -category: Diffusion.GTRACT - -description: This program will zero the first and/or last slice of an anisotropy image, creating a clipped anisotropy image. + category: Diffusion.GTRACT -version: 4.0.0 + description: This program will zero the first and/or last slice of an anisotropy image, creating a clipped anisotropy image. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractClipAnisotropyInputSpec output_spec = gtractClipAnisotropyOutputSpec @@ -782,21 +771,20 @@ class gtractResampleB0OutputSpec(TraitedSpec): class gtractResampleB0(SEMLikeCommandLine): """title: Resample B0 -category: Diffusion.GTRACT - -description: This program will resample a signed short image using either a Rigid or B-Spline transform. The user must specify a template image that will be used to define the origin, orientation, spacing, and size of the resampled image. + category: Diffusion.GTRACT -version: 4.0.0 + description: This program will resample a signed short image using either a Rigid or B-Spline transform. The user must specify a template image that will be used to define the origin, orientation, spacing, and size of the resampled image. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractResampleB0InputSpec output_spec = gtractResampleB0OutputSpec @@ -831,21 +819,20 @@ class gtractInvertRigidTransformOutputSpec(TraitedSpec): class gtractInvertRigidTransform(SEMLikeCommandLine): """title: Rigid Transform Inversion -category: Diffusion.GTRACT + category: Diffusion.GTRACT -description: This program will invert a Rigid transform. + description: This program will invert a Rigid transform. -version: 4.0.0 + version: 4.0.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 - -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractInvertRigidTransformInputSpec output_spec = gtractInvertRigidTransformOutputSpec @@ -888,21 +875,20 @@ class gtractImageConformityOutputSpec(TraitedSpec): class gtractImageConformity(SEMLikeCommandLine): """title: Image Conformity -category: Diffusion.GTRACT - -description: This program will straighten out the Direction and Origin to match the Reference Image. + category: Diffusion.GTRACT -version: 4.0.0 + description: This program will straighten out the Direction and Origin to match the Reference Image. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractImageConformityInputSpec output_spec = gtractImageConformityOutputSpec @@ -954,21 +940,20 @@ class compareTractInclusionOutputSpec(TraitedSpec): class compareTractInclusion(SEMLikeCommandLine): """title: Compare Tracts -category: Diffusion.GTRACT - -description: This program will halt with a status code indicating whether a test tract is nearly enough included in a standard tract in the sense that every fiber in the test tract has a low enough sum of squares distance to some fiber in the standard tract modulo spline resampling of every fiber to a fixed number of points. + category: Diffusion.GTRACT -version: 4.0.0 + description: This program will halt with a status code indicating whether a test tract is nearly enough included in a standard tract in the sense that every fiber in the test tract has a low enough sum of squares distance to some fiber in the standard tract modulo spline resampling of every fiber to a fixed number of points. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = compareTractInclusionInputSpec output_spec = compareTractInclusionOutputSpec @@ -1048,21 +1033,20 @@ class gtractFastMarchingTrackingOutputSpec(TraitedSpec): class gtractFastMarchingTracking(SEMLikeCommandLine): """title: Fast Marching Tracking -category: Diffusion.GTRACT + category: Diffusion.GTRACT -description: This program will use a fast marching fiber tracking algorithm to identify fiber tracts from a tensor image. This program is the second portion of the algorithm. The user must first run gtractCostFastMarching to generate the vcl_cost image. The second step of the algorithm implemented here is a gradient descent soplution from the defined ending region back to the seed points specified in gtractCostFastMarching. This algorithm is roughly based on the work by G. Parker et al. from IEEE Transactions On Medical Imaging, 21(5): 505-512, 2002. An additional feature of including anisotropy into the vcl_cost function calculation is included. + description: This program will use a fast marching fiber tracking algorithm to identify fiber tracts from a tensor image. This program is the second portion of the algorithm. The user must first run gtractCostFastMarching to generate the vcl_cost image. The second step of the algorithm implemented here is a gradient descent soplution from the defined ending region back to the seed points specified in gtractCostFastMarching. This algorithm is roughly based on the work by G. Parker et al. from IEEE Transactions On Medical Imaging, 21(5): 505-512, 2002. An additional feature of including anisotropy into the vcl_cost function calculation is included. -version: 4.0.0 + version: 4.0.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -contributor: This tool was developed by Vincent Magnotta and Greg Harris. The original code here was developed by Daisy Espino. + contributor: This tool was developed by Vincent Magnotta and Greg Harris. The original code here was developed by Daisy Espino. -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 - -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractFastMarchingTrackingInputSpec output_spec = gtractFastMarchingTrackingOutputSpec @@ -1106,21 +1090,20 @@ class gtractInvertDisplacementFieldOutputSpec(TraitedSpec): class gtractInvertDisplacementField(SEMLikeCommandLine): """title: Invert Displacement Field -category: Diffusion.GTRACT - -description: This program will invert a deformatrion field. The size of the deformation field is defined by an example image provided by the user + category: Diffusion.GTRACT -version: 4.0.0 + description: This program will invert a deformatrion field. The size of the deformation field is defined by an example image provided by the user -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractInvertDisplacementFieldInputSpec output_spec = gtractInvertDisplacementFieldOutputSpec @@ -1239,21 +1222,20 @@ class gtractCoRegAnatomyOutputSpec(TraitedSpec): class gtractCoRegAnatomy(SEMLikeCommandLine): """title: Coregister B0 to Anatomy B-Spline -category: Diffusion.GTRACT - -description: This program will register a Nrrd diffusion weighted 4D vector image to a fixed anatomical image. Two registration methods are supported for alignment with anatomical images: Rigid and B-Spline. The rigid registration performs a rigid body registration with the anatomical images and should be done as well to initialize the B-Spline transform. The B-SPline transform is the deformable transform, where the user can control the amount of deformation based on the number of control points as well as the maximum distance that these points can move. The B-Spline registration places a low dimensional grid in the image, which is deformed. This allows for some susceptibility related distortions to be removed from the diffusion weighted images. In general the amount of motion in the slice selection and read-out directions direction should be kept low. The distortion is in the phase encoding direction in the images. It is recommended that skull stripped (i.e. image containing only brain with skull removed) images shoud be used for image co-registration with the B-Spline transform. + category: Diffusion.GTRACT -version: 4.0.0 + description: This program will register a Nrrd diffusion weighted 4D vector image to a fixed anatomical image. Two registration methods are supported for alignment with anatomical images: Rigid and B-Spline. The rigid registration performs a rigid body registration with the anatomical images and should be done as well to initialize the B-Spline transform. The B-SPline transform is the deformable transform, where the user can control the amount of deformation based on the number of control points as well as the maximum distance that these points can move. The B-Spline registration places a low dimensional grid in the image, which is deformed. This allows for some susceptibility related distortions to be removed from the diffusion weighted images. In general the amount of motion in the slice selection and read-out directions direction should be kept low. The distortion is in the phase encoding direction in the images. It is recommended that skull stripped (i.e. image containing only brain with skull removed) images shoud be used for image co-registration with the B-Spline transform. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractCoRegAnatomyInputSpec output_spec = gtractCoRegAnatomyOutputSpec @@ -1327,21 +1309,20 @@ class gtractResampleDWIInPlaceOutputSpec(TraitedSpec): class gtractResampleDWIInPlace(SEMLikeCommandLine): """title: Resample DWI In Place -category: Diffusion.GTRACT + category: Diffusion.GTRACT -description: Resamples DWI image to structural image. + description: Resamples DWI image to structural image. -version: 4.0.0 + version: 4.0.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -contributor: This tool was developed by Vincent Magnotta, Greg Harris, Hans Johnson, and Joy Matsui. + contributor: This tool was developed by Vincent Magnotta, Greg Harris, Hans Johnson, and Joy Matsui. -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 - -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractResampleDWIInPlaceInputSpec output_spec = gtractResampleDWIInPlaceOutputSpec @@ -1411,21 +1392,20 @@ class gtractCostFastMarchingOutputSpec(TraitedSpec): class gtractCostFastMarching(SEMLikeCommandLine): """title: Cost Fast Marching -category: Diffusion.GTRACT - -description: This program will use a fast marching fiber tracking algorithm to identify fiber tracts from a tensor image. This program is the first portion of the algorithm. The user must first run gtractFastMarchingTracking to generate the actual fiber tracts. This algorithm is roughly based on the work by G. Parker et al. from IEEE Transactions On Medical Imaging, 21(5): 505-512, 2002. An additional feature of including anisotropy into the vcl_cost function calculation is included. + category: Diffusion.GTRACT -version: 4.0.0 + description: This program will use a fast marching fiber tracking algorithm to identify fiber tracts from a tensor image. This program is the first portion of the algorithm. The user must first run gtractFastMarchingTracking to generate the actual fiber tracts. This algorithm is roughly based on the work by G. Parker et al. from IEEE Transactions On Medical Imaging, 21(5): 505-512, 2002. An additional feature of including anisotropy into the vcl_cost function calculation is included. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta and Greg Harris. The original code here was developed by Daisy Espino. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta and Greg Harris. The original code here was developed by Daisy Espino. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractCostFastMarchingInputSpec output_spec = gtractCostFastMarchingOutputSpec @@ -1560,21 +1540,20 @@ class gtractFiberTrackingOutputSpec(TraitedSpec): class gtractFiberTracking(SEMLikeCommandLine): """title: Fiber Tracking -category: Diffusion.GTRACT - -description: This program implements four fiber tracking methods (Free, Streamline, GraphSearch, Guided). The output of the fiber tracking is vtkPolyData (i.e. Polylines) that can be loaded into Slicer3 for visualization. The poly data can be saved in either old VTK format files (.vtk) or in the new VTK XML format (.xml). The polylines contain point data that defines ther Tensor at each point along the fiber tract. This can then be used to rendered as glyphs in Slicer3 and can be used to define severeal scalar measures without referencing back to the anisotropy images. (1) Free tracking is a basic streamlines algorithm. This is a direct implementation of the method original proposed by Basser et al. The tracking follows the primarty eigenvector. The tracking begins with seed points in the starting region. Only those voxels above the specified anisotropy threshold in the starting region are used as seed points. Tracking terminates either as a result of maximum fiber length, low ansiotropy, or large curvature. This is a great way to explore your data. (2) The streamlines algorithm is a direct implementation of the method originally proposed by Basser et al. The tracking follows the primary eigenvector. The tracking begins with seed points in the starting region. Only those voxels above the specified anisotropy threshold in the starting region are used as seed points. Tracking terminates either by reaching the ending region or reaching some stopping criteria. Stopping criteria are specified using the following parameters: tracking threshold, curvature threshold, and max length. Only paths terminating in the ending region are kept in this method. The TEND algorithm proposed by Lazar et al. (Human Brain Mapping 18:306-321, 2003) has been instrumented. This can be enabled using the --useTend option while performing Streamlines tracking. This utilizes the entire diffusion tensor to deflect the incoming vector instead of simply following the primary eigenvector. The TEND parameters are set using the --tendF and --tendG options. (3) Graph Search tracking is the first step in the full GTRACT algorithm developed by Cheng et al. (NeuroImage 31(3): 1075-1085, 2006) for finding the tracks in a tensor image. This method was developed to generate fibers in a Tensor representation where crossing fibers occur. The graph search algorithm follows the primary eigenvector in non-ambigous regions and utilizes branching and a graph search algorithm in ambigous regions. Ambiguous tracking regions are defined based on two criteria: Branching Al Threshold (anisotropy values below this value and above the traching threshold) and Curvature Major Eigen (angles of the primary eigenvector direction and the current tracking direction). In regions that meet this criteria, two or three tracking paths are considered. The first is the standard primary eigenvector direction. The second is the seconadary eigenvector direction. This is based on the assumption that these regions may be prolate regions. If the Random Walk option is selected then a third direction is also considered. This direction is defined by a cone pointing from the current position to the centroid of the ending region. The interior angle of the cone is specified by the user with the Branch/Guide Angle parameter. A vector contained inside of the cone is selected at random and used as the third direction. This method can also utilize the TEND option where the primary tracking direction is that specified by the TEND method instead of the primary eigenvector. The parameter '--maximumBranchPoints' allows the tracking to have this number of branches being considered at a time. If this number of branch points is exceeded at any time, then the algorithm will revert back to a streamline alogrithm until the number of branches is reduced. This allows the user to constrain the computational complexity of the algorithm. (4) The second phase of the GTRACT algorithm is Guided Tracking. This method incorporates anatomical information about the track orientation using an initial guess of the fiber track. In the originally proposed GTRACT method, this would be created from the fibers resulting from the Graph Search tracking. However, in practice this can be created using any method and could be defined manually. To create the guide fiber the program gtractCreateGuideFiber can be used. This program will load a fiber tract that has been generated and create a centerline representation of the fiber tract (i.e. a single fiber). In this method, the fiber tracking follows the primary eigenvector direction unless it deviates from the guide fiber track by a angle greater than that specified by the '--guidedCurvatureThreshold' parameter. The user must specify the guide fiber when running this program. + category: Diffusion.GTRACT -version: 4.0.0 + description: This program implements four fiber tracking methods (Free, Streamline, GraphSearch, Guided). The output of the fiber tracking is vtkPolyData (i.e. Polylines) that can be loaded into Slicer3 for visualization. The poly data can be saved in either old VTK format files (.vtk) or in the new VTK XML format (.xml). The polylines contain point data that defines ther Tensor at each point along the fiber tract. This can then be used to rendered as glyphs in Slicer3 and can be used to define severeal scalar measures without referencing back to the anisotropy images. (1) Free tracking is a basic streamlines algorithm. This is a direct implementation of the method original proposed by Basser et al. The tracking follows the primarty eigenvector. The tracking begins with seed points in the starting region. Only those voxels above the specified anisotropy threshold in the starting region are used as seed points. Tracking terminates either as a result of maximum fiber length, low ansiotropy, or large curvature. This is a great way to explore your data. (2) The streamlines algorithm is a direct implementation of the method originally proposed by Basser et al. The tracking follows the primary eigenvector. The tracking begins with seed points in the starting region. Only those voxels above the specified anisotropy threshold in the starting region are used as seed points. Tracking terminates either by reaching the ending region or reaching some stopping criteria. Stopping criteria are specified using the following parameters: tracking threshold, curvature threshold, and max length. Only paths terminating in the ending region are kept in this method. The TEND algorithm proposed by Lazar et al. (Human Brain Mapping 18:306-321, 2003) has been instrumented. This can be enabled using the --useTend option while performing Streamlines tracking. This utilizes the entire diffusion tensor to deflect the incoming vector instead of simply following the primary eigenvector. The TEND parameters are set using the --tendF and --tendG options. (3) Graph Search tracking is the first step in the full GTRACT algorithm developed by Cheng et al. (NeuroImage 31(3): 1075-1085, 2006) for finding the tracks in a tensor image. This method was developed to generate fibers in a Tensor representation where crossing fibers occur. The graph search algorithm follows the primary eigenvector in non-ambigous regions and utilizes branching and a graph search algorithm in ambigous regions. Ambiguous tracking regions are defined based on two criteria: Branching Al Threshold (anisotropy values below this value and above the traching threshold) and Curvature Major Eigen (angles of the primary eigenvector direction and the current tracking direction). In regions that meet this criteria, two or three tracking paths are considered. The first is the standard primary eigenvector direction. The second is the seconadary eigenvector direction. This is based on the assumption that these regions may be prolate regions. If the Random Walk option is selected then a third direction is also considered. This direction is defined by a cone pointing from the current position to the centroid of the ending region. The interior angle of the cone is specified by the user with the Branch/Guide Angle parameter. A vector contained inside of the cone is selected at random and used as the third direction. This method can also utilize the TEND option where the primary tracking direction is that specified by the TEND method instead of the primary eigenvector. The parameter '--maximumBranchPoints' allows the tracking to have this number of branches being considered at a time. If this number of branch points is exceeded at any time, then the algorithm will revert back to a streamline alogrithm until the number of branches is reduced. This allows the user to constrain the computational complexity of the algorithm. (4) The second phase of the GTRACT algorithm is Guided Tracking. This method incorporates anatomical information about the track orientation using an initial guess of the fiber track. In the originally proposed GTRACT method, this would be created from the fibers resulting from the Graph Search tracking. However, in practice this can be created using any method and could be defined manually. To create the guide fiber the program gtractCreateGuideFiber can be used. This program will load a fiber tract that has been generated and create a centerline representation of the fiber tract (i.e. a single fiber). In this method, the fiber tracking follows the primary eigenvector direction unless it deviates from the guide fiber track by a angle greater than that specified by the '--guidedCurvatureThreshold' parameter. The user must specify the guide fiber when running this program. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta, Greg Harris and Yongqiang Zhao. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta, Greg Harris and Yongqiang Zhao. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractFiberTrackingInputSpec output_spec = gtractFiberTrackingOutputSpec @@ -1623,21 +1602,20 @@ class extractNrrdVectorIndexOutputSpec(TraitedSpec): class extractNrrdVectorIndex(SEMLikeCommandLine): """title: Extract Nrrd Index -category: Diffusion.GTRACT + category: Diffusion.GTRACT -description: This program will extract a 3D image (single vector) from a vector 3D image at a given vector index. + description: This program will extract a 3D image (single vector) from a vector 3D image at a given vector index. -version: 4.0.0 + version: 4.0.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 - -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = extractNrrdVectorIndexInputSpec output_spec = extractNrrdVectorIndexOutputSpec @@ -1689,21 +1667,20 @@ class gtractResampleFibersOutputSpec(TraitedSpec): class gtractResampleFibers(SEMLikeCommandLine): """title: Resample Fibers -category: Diffusion.GTRACT - -description: This program will resample a fiber tract with respect to a pair of deformation fields that represent the forward and reverse deformation fields. + category: Diffusion.GTRACT -version: 4.0.0 + description: This program will resample a fiber tract with respect to a pair of deformation fields that represent the forward and reverse deformation fields. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractResampleFibersInputSpec output_spec = gtractResampleFibersOutputSpec @@ -1781,21 +1758,20 @@ class gtractTensorOutputSpec(TraitedSpec): class gtractTensor(SEMLikeCommandLine): """title: Tensor Estimation -category: Diffusion.GTRACT - -description: This step will convert a b-value averaged diffusion tensor image to a 3x3 tensor voxel image. This step takes the diffusion tensor image data and generates a tensor representation of the data based on the signal intensity decay, b values applied, and the diffusion difrections. The apparent diffusion coefficient for a given orientation is computed on a pixel-by-pixel basis by fitting the image data (voxel intensities) to the Stejskal-Tanner equation. If at least 6 diffusion directions are used, then the diffusion tensor can be computed. This program uses itk::DiffusionTensor3DReconstructionImageFilter. The user can adjust background threshold, median filter, and isotropic resampling. + category: Diffusion.GTRACT -version: 4.0.0 + description: This step will convert a b-value averaged diffusion tensor image to a 3x3 tensor voxel image. This step takes the diffusion tensor image data and generates a tensor representation of the data based on the signal intensity decay, b values applied, and the diffusion difrections. The apparent diffusion coefficient for a given orientation is computed on a pixel-by-pixel basis by fitting the image data (voxel intensities) to the Stejskal-Tanner equation. If at least 6 diffusion directions are used, then the diffusion tensor can be computed. This program uses itk::DiffusionTensor3DReconstructionImageFilter. The user can adjust background threshold, median filter, and isotropic resampling. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + version: 4.0.0 -license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT -contributor: This tool was developed by Vincent Magnotta and Greg Harris. + license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt -acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + contributor: This tool was developed by Vincent Magnotta and Greg Harris. -""" + acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + """ input_spec = gtractTensorInputSpec output_spec = gtractTensorOutputSpec diff --git a/nipype/interfaces/semtools/diffusion/maxcurvature.py b/nipype/interfaces/semtools/diffusion/maxcurvature.py index fdd5057097..c4f170e9cb 100644 --- a/nipype/interfaces/semtools/diffusion/maxcurvature.py +++ b/nipype/interfaces/semtools/diffusion/maxcurvature.py @@ -35,25 +35,24 @@ class maxcurvatureOutputSpec(TraitedSpec): class maxcurvature(SEMLikeCommandLine): """title: MaxCurvature-Hessian (DTIProcess) -category: Diffusion + category: Diffusion -description: This program computes the Hessian of the FA image (--image). We use this scalar image as a registration input when doing DTI atlas building. For most adult FA we use a sigma of 2 whereas for neonate or primate images and sigma of 1 or 1.5 is more appropriate. For really noisy images, 2.5 - 4 can be considered. The final image (--output) shows the main feature of the input image. + description: This program computes the Hessian of the FA image (--image). We use this scalar image as a registration input when doing DTI atlas building. For most adult FA we use a sigma of 2 whereas for neonate or primate images and sigma of 1 or 1.5 is more appropriate. For really noisy images, 2.5 - 4 can be considered. The final image (--output) shows the main feature of the input image. -version: 1.1.0 + version: 1.1.0 -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess -license: Copyright (c) Casey Goodlett. All rights reserved. - See http://www.ia.unc.edu/dev/Copyright.htm for details. - This software is distributed WITHOUT ANY WARRANTY; without even - the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR - PURPOSE. See the above copyright notices for more information. + license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. -contributor: Casey Goodlett + contributor: Casey Goodlett -acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. - -""" + acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. + """ input_spec = maxcurvatureInputSpec output_spec = maxcurvatureOutputSpec diff --git a/nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py b/nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py index becf1466e9..cbf58623dc 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py +++ b/nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py @@ -31,25 +31,24 @@ class fiberstatsOutputSpec(TraitedSpec): class fiberstats(SEMLikeCommandLine): """title: FiberStats (DTIProcess) -category: Diffusion.Tractography.CommandLineOnly + category: Diffusion.Tractography.CommandLineOnly -description: Obsolete tool - Not used anymore + description: Obsolete tool - Not used anymore -version: 1.1.0 + version: 1.1.0 -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess -license: Copyright (c) Casey Goodlett. All rights reserved. - See http://www.ia.unc.edu/dev/Copyright.htm for details. - This software is distributed WITHOUT ANY WARRANTY; without even - the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR - PURPOSE. See the above copyright notices for more information. + license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. -contributor: Casey Goodlett + contributor: Casey Goodlett -acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. - -""" + acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. + """ input_spec = fiberstatsInputSpec output_spec = fiberstatsOutputSpec diff --git a/nipype/interfaces/semtools/diffusion/tractography/fiberprocess.py b/nipype/interfaces/semtools/diffusion/tractography/fiberprocess.py index 1798ead449..e069c8d6b7 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/fiberprocess.py +++ b/nipype/interfaces/semtools/diffusion/tractography/fiberprocess.py @@ -90,25 +90,24 @@ class fiberprocessOutputSpec(TraitedSpec): class fiberprocess(SEMLikeCommandLine): """title: FiberProcess (DTIProcess) -category: Diffusion.Tractography + category: Diffusion.Tractography -description: fiberprocess is a tool that manage fiber files extracted from the fibertrack tool or any fiber tracking algorithm. It takes as an input .fib and .vtk files (--fiber_file) and saves the changed fibers (--fiber_output) into the 2 same formats. The main purpose of this tool is to deform the fiber file with a transformation field as an input (--displacement_field or --h_field depending if you deal with dfield or hfield). To use that option you need to specify the tensor field from which the fiber file was extracted with the option --tensor_volume. The transformation applied on the fiber file is the inverse of the one input. If the transformation is from one case to an atlas, fiberprocess assumes that the fiber file is in the atlas space and you want it in the original case space, so it's the inverse of the transformation which has been computed. -You have 2 options for fiber modification. You can either deform the fibers (their geometry) into the space OR you can keep the same geometry but map the diffusion properties (fa, md, lbd's...) of the original tensor field along the fibers at the corresponding locations. This is triggered by the --no_warp option. To use the previous example: when you have a tensor field in the original space and the deformed tensor field in the atlas space, you want to track the fibers in the atlas space, keeping this geometry but with the original case diffusion properties. Then you can specify the transformations field (from original case -> atlas) and the original tensor field with the --tensor_volume option. -With fiberprocess you can also binarize a fiber file. Using the --voxelize option will create an image where each voxel through which a fiber is passing is set to 1. The output is going to be a binary image with the values 0 or 1 by default but the 1 value voxel can be set to any number with the --voxel_label option. Finally you can create an image where the value at the voxel is the number of fiber passing through. (--voxelize_count_fibers) + description: fiberprocess is a tool that manage fiber files extracted from the fibertrack tool or any fiber tracking algorithm. It takes as an input .fib and .vtk files (--fiber_file) and saves the changed fibers (--fiber_output) into the 2 same formats. The main purpose of this tool is to deform the fiber file with a transformation field as an input (--displacement_field or --h_field depending if you deal with dfield or hfield). To use that option you need to specify the tensor field from which the fiber file was extracted with the option --tensor_volume. The transformation applied on the fiber file is the inverse of the one input. If the transformation is from one case to an atlas, fiberprocess assumes that the fiber file is in the atlas space and you want it in the original case space, so it's the inverse of the transformation which has been computed. + You have 2 options for fiber modification. You can either deform the fibers (their geometry) into the space OR you can keep the same geometry but map the diffusion properties (fa, md, lbd's...) of the original tensor field along the fibers at the corresponding locations. This is triggered by the --no_warp option. To use the previous example: when you have a tensor field in the original space and the deformed tensor field in the atlas space, you want to track the fibers in the atlas space, keeping this geometry but with the original case diffusion properties. Then you can specify the transformations field (from original case -> atlas) and the original tensor field with the --tensor_volume option. + With fiberprocess you can also binarize a fiber file. Using the --voxelize option will create an image where each voxel through which a fiber is passing is set to 1. The output is going to be a binary image with the values 0 or 1 by default but the 1 value voxel can be set to any number with the --voxel_label option. Finally you can create an image where the value at the voxel is the number of fiber passing through. (--voxelize_count_fibers) -version: 1.0.0 + version: 1.0.0 -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess -license: Copyright (c) Casey Goodlett. All rights reserved. - See http://www.ia.unc.edu/dev/Copyright.htm for details. - This software is distributed WITHOUT ANY WARRANTY; without even - the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR - PURPOSE. See the above copyright notices for more information. + license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. -contributor: Casey Goodlett - -""" + contributor: Casey Goodlett + """ input_spec = fiberprocessInputSpec output_spec = fiberprocessOutputSpec diff --git a/nipype/interfaces/semtools/diffusion/tractography/fibertrack.py b/nipype/interfaces/semtools/diffusion/tractography/fibertrack.py index c6eb7f13e0..caddd16e22 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/fibertrack.py +++ b/nipype/interfaces/semtools/diffusion/tractography/fibertrack.py @@ -74,28 +74,27 @@ class fibertrackOutputSpec(TraitedSpec): class fibertrack(SEMLikeCommandLine): """title: FiberTrack (DTIProcess) -category: Diffusion.Tractography + category: Diffusion.Tractography -description: This program implements a simple streamline tractography method based on the principal eigenvector of the tensor field. A fourth order Runge-Kutta integration rule used to advance the streamlines. -As a first parameter you have to input the tensor field (with the --input_tensor_file option). Then the region of interest image file is set with the --input_roi_file. Next you want to set the output fiber file name after the --output_fiber_file option. -You can specify the label value in the input_roi_file with the --target_label, --source_label and --fobidden_label options. By default target label is 1, source label is 2 and forbidden label is 0. The source label is where the streamlines are seeded, the target label defines the voxels through which the fibers must pass by to be kept in the final fiber file and the forbidden label defines the voxels where the streamlines are stopped if they pass through it. There is also a --whole_brain option which, if enabled, consider both target and source labels of the roi image as target labels and all the voxels of the image are considered as sources. -During the tractography, the --fa_min parameter is used as the minimum value needed at different voxel for the tracking to keep going along a streamline. The --step_size parameter is used for each iteration of the tracking algorithm and defines the length of each step. The --max_angle option defines the maximum angle allowed between two successive segments along the tracked fiber. + description: This program implements a simple streamline tractography method based on the principal eigenvector of the tensor field. A fourth order Runge-Kutta integration rule used to advance the streamlines. + As a first parameter you have to input the tensor field (with the --input_tensor_file option). Then the region of interest image file is set with the --input_roi_file. Next you want to set the output fiber file name after the --output_fiber_file option. + You can specify the label value in the input_roi_file with the --target_label, --source_label and --fobidden_label options. By default target label is 1, source label is 2 and forbidden label is 0. The source label is where the streamlines are seeded, the target label defines the voxels through which the fibers must pass by to be kept in the final fiber file and the forbidden label defines the voxels where the streamlines are stopped if they pass through it. There is also a --whole_brain option which, if enabled, consider both target and source labels of the roi image as target labels and all the voxels of the image are considered as sources. + During the tractography, the --fa_min parameter is used as the minimum value needed at different voxel for the tracking to keep going along a streamline. The --step_size parameter is used for each iteration of the tracking algorithm and defines the length of each step. The --max_angle option defines the maximum angle allowed between two successive segments along the tracked fiber. -version: 1.1.0 + version: 1.1.0 -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess -license: Copyright (c) Casey Goodlett. All rights reserved. - See http://www.ia.unc.edu/dev/Copyright.htm for details. - This software is distributed WITHOUT ANY WARRANTY; without even - the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR - PURPOSE. See the above copyright notices for more information. + license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. -contributor: Casey Goodlett + contributor: Casey Goodlett -acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. - -""" + acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. + """ input_spec = fibertrackInputSpec output_spec = fibertrackOutputSpec diff --git a/nipype/interfaces/semtools/diffusion/tractography/ukftractography.py b/nipype/interfaces/semtools/diffusion/tractography/ukftractography.py index 228d162560..67026cb890 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/ukftractography.py +++ b/nipype/interfaces/semtools/diffusion/tractography/ukftractography.py @@ -157,19 +157,18 @@ class UKFTractographyOutputSpec(TraitedSpec): class UKFTractography(SEMLikeCommandLine): """title: UKF Tractography -category: Diffusion.Tractography + category: Diffusion.Tractography -description: This module traces fibers in a DWI Volume using the multiple tensor unscented Kalman Filter methology. For more informations check the documentation. + description: This module traces fibers in a DWI Volume using the multiple tensor unscented Kalman Filter methology. For more informations check the documentation. -version: 1.0 + version: 1.0 -documentation-url: http://www.nitrc.org/plugins/mwiki/index.php/ukftractography:MainPage + documentation-url: http://www.nitrc.org/plugins/mwiki/index.php/ukftractography:MainPage -contributor: Yogesh Rathi, Stefan Lienhard, Yinpeng Li, Martin Styner, Ipek Oguz, Yundi Shi, Christian Baumgartner, Kent Williams, Hans Johnson, Peter Savadjiev, Carl-Fredrik Westin. + contributor: Yogesh Rathi, Stefan Lienhard, Yinpeng Li, Martin Styner, Ipek Oguz, Yundi Shi, Christian Baumgartner, Kent Williams, Hans Johnson, Peter Savadjiev, Carl-Fredrik Westin. -acknowledgements: The development of this module was supported by NIH grants R01 MH097979 (PI Rathi), R01 MH092862 (PIs Westin and Verma), U01 NS083223 (PI Westin), R01 MH074794 (PI Westin) and P41 EB015902 (PI Kikinis). - -""" + acknowledgements: The development of this module was supported by NIH grants R01 MH097979 (PI Rathi), R01 MH092862 (PIs Westin and Verma), U01 NS083223 (PI Westin), R01 MH074794 (PI Westin) and P41 EB015902 (PI Kikinis). + """ input_spec = UKFTractographyInputSpec output_spec = UKFTractographyOutputSpec diff --git a/nipype/interfaces/semtools/featurecreator.py b/nipype/interfaces/semtools/featurecreator.py index f02d19fda8..1e5b01f252 100644 --- a/nipype/interfaces/semtools/featurecreator.py +++ b/nipype/interfaces/semtools/featurecreator.py @@ -41,19 +41,18 @@ class GenerateCsfClippedFromClassifiedImageOutputSpec(TraitedSpec): class GenerateCsfClippedFromClassifiedImage(SEMLikeCommandLine): """title: GenerateCsfClippedFromClassifiedImage -category: FeatureCreator + category: FeatureCreator -description: Get the distance from a voxel to the nearest voxel of a given tissue type. + description: Get the distance from a voxel to the nearest voxel of a given tissue type. -version: 0.1.0.$Revision: 1 $(alpha) + version: 0.1.0.$Revision: 1 $(alpha) -documentation-url: http:://www.na-mic.org/ + documentation-url: http:://www.na-mic.org/ -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: This tool was written by Hans J. Johnson. - -""" + contributor: This tool was written by Hans J. Johnson. + """ input_spec = GenerateCsfClippedFromClassifiedImageInputSpec output_spec = GenerateCsfClippedFromClassifiedImageOutputSpec diff --git a/nipype/interfaces/semtools/filtering/denoising.py b/nipype/interfaces/semtools/filtering/denoising.py index 2ca6840128..24b9055afc 100644 --- a/nipype/interfaces/semtools/filtering/denoising.py +++ b/nipype/interfaces/semtools/filtering/denoising.py @@ -62,27 +62,26 @@ class UnbiasedNonLocalMeansOutputSpec(TraitedSpec): class UnbiasedNonLocalMeans(SEMLikeCommandLine): """title: Unbiased NLM for MRI -category: Filtering.Denoising + category: Filtering.Denoising -description: This module implements a fast version of the popular Non-Local Means filter for image denoising. This algorithm filters each pixel as a weighted average of its neighbors in a large vicinity. The weights are computed based on the similarity of each neighbor with the voxel to be denoised. - In the original formulation a patch with a certain radius is centered in each of the voxels, and the Mean Squared Error between each pair of corresponding voxels is computed. In this implementation, only the mean value and gradient components are compared. This, together with an efficient memory management, can attain a speed-up of nearly 20x. Besides, the filtering is more accurate than the original with poor SNR. - This code is intended for its use with MRI (or any other Rician-distributed modality): the second order moment is estimated, then we subtract twice the squared power of noise, and finally we take the square root of the result to remove the Rician bias. - The original implementation of the NLM filter may be found in: - A. Buades, B. Coll, J. Morel, "A review of image denoising algorithms, with a new one", Multiscale Modelling and Simulation 4(2): 490-530. 2005. - The correction of the Rician bias is described in the following reference (among others): - S. Aja-Fernandez, K. Krissian, "An unbiased Non-Local Means scheme for DWI filtering", in: Proceedings of the MICCAI Workshop on Computational Diffusion MRI, 2008, pp. 277-284. - The whole description of this version may be found in the following paper (please, cite it if you are willing to use this software): - A. Tristan-Vega, V. Garcia Perez, S. Aja-Fenandez, and C.-F. Westin, "Efficient and Robust Nonlocal Means Denoising of MR Data Based on Salient Features Matching", Computer Methods and Programs in Biomedicine. (Accepted for publication) 2011. + description: This module implements a fast version of the popular Non-Local Means filter for image denoising. This algorithm filters each pixel as a weighted average of its neighbors in a large vicinity. The weights are computed based on the similarity of each neighbor with the voxel to be denoised. + In the original formulation a patch with a certain radius is centered in each of the voxels, and the Mean Squared Error between each pair of corresponding voxels is computed. In this implementation, only the mean value and gradient components are compared. This, together with an efficient memory management, can attain a speed-up of nearly 20x. Besides, the filtering is more accurate than the original with poor SNR. + This code is intended for its use with MRI (or any other Rician-distributed modality): the second order moment is estimated, then we subtract twice the squared power of noise, and finally we take the square root of the result to remove the Rician bias. + The original implementation of the NLM filter may be found in: + A. Buades, B. Coll, J. Morel, "A review of image denoising algorithms, with a new one", Multiscale Modelling and Simulation 4(2): 490-530. 2005. + The correction of the Rician bias is described in the following reference (among others): + S. Aja-Fernandez, K. Krissian, "An unbiased Non-Local Means scheme for DWI filtering", in: Proceedings of the MICCAI Workshop on Computational Diffusion MRI, 2008, pp. 277-284. + The whole description of this version may be found in the following paper (please, cite it if you are willing to use this software): + A. Tristan-Vega, V. Garcia Perez, S. Aja-Fenandez, and C.-F. Westin, "Efficient and Robust Nonlocal Means Denoising of MR Data Based on Salient Features Matching", Computer Methods and Programs in Biomedicine. (Accepted for publication) 2011. -version: 0.0.1.$Revision: 1 $(beta) + version: 0.0.1.$Revision: 1 $(beta) -documentation-url: http://www.slicer.org/slicerWiki/index.php/Modules:UnbiasedNonLocalMeans-Documentation-3.6 + documentation-url: http://www.slicer.org/slicerWiki/index.php/Modules:UnbiasedNonLocalMeans-Documentation-3.6 -contributor: Antonio Tristan Vega, Veronica Garcia-Perez, Santiago Aja-Fernandez, Carl-Fredrik Westin + contributor: Antonio Tristan Vega, Veronica Garcia-Perez, Santiago Aja-Fernandez, Carl-Fredrik Westin -acknowledgements: Supported by grant number FMECD-2010/71131616E from the Spanish Ministry of Education/Fulbright Committee - -""" + acknowledgements: Supported by grant number FMECD-2010/71131616E from the Spanish Ministry of Education/Fulbright Committee + """ input_spec = UnbiasedNonLocalMeansInputSpec output_spec = UnbiasedNonLocalMeansOutputSpec diff --git a/nipype/interfaces/semtools/filtering/featuredetection.py b/nipype/interfaces/semtools/filtering/featuredetection.py index e15e1de6b0..37a44ae4d5 100644 --- a/nipype/interfaces/semtools/filtering/featuredetection.py +++ b/nipype/interfaces/semtools/filtering/featuredetection.py @@ -50,17 +50,16 @@ class GenerateSummedGradientImageOutputSpec(TraitedSpec): class GenerateSummedGradientImage(SEMLikeCommandLine): """title: GenerateSummedGradient -category: Filtering.FeatureDetection + category: Filtering.FeatureDetection -description: Automatic FeatureImages using neural networks + description: Automatic FeatureImages using neural networks -version: 1.0 + version: 1.0 -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: Greg Harris, Eun Young Kim - -""" + contributor: Greg Harris, Eun Young Kim + """ input_spec = GenerateSummedGradientImageInputSpec output_spec = GenerateSummedGradientImageOutputSpec @@ -101,19 +100,18 @@ class CannySegmentationLevelSetImageFilterOutputSpec(TraitedSpec): class CannySegmentationLevelSetImageFilter(SEMLikeCommandLine): """title: Canny Level Set Image Filter -category: Filtering.FeatureDetection - -description: The CannySegmentationLevelSet is commonly used to refine a manually generated manual mask. + category: Filtering.FeatureDetection -version: 0.3.0 + description: The CannySegmentationLevelSet is commonly used to refine a manually generated manual mask. -license: CC + version: 0.3.0 -contributor: Regina Kim + license: CC -acknowledgements: This command module was derived from Insight/Examples/Segmentation/CannySegmentationLevelSetImageFilter.cxx (copyright) Insight Software Consortium. See http://wiki.na-mic.org/Wiki/index.php/Slicer3:Execution_Model_Documentation for more detailed descriptions. + contributor: Regina Kim -""" + acknowledgements: This command module was derived from Insight/Examples/Segmentation/CannySegmentationLevelSetImageFilter.cxx (copyright) Insight Software Consortium. See http://wiki.na-mic.org/Wiki/index.php/Slicer3:Execution_Model_Documentation for more detailed descriptions. + """ input_spec = CannySegmentationLevelSetImageFilterInputSpec output_spec = CannySegmentationLevelSetImageFilterOutputSpec @@ -153,19 +151,18 @@ class DilateImageOutputSpec(TraitedSpec): class DilateImage(SEMLikeCommandLine): """title: Dilate Image -category: Filtering.FeatureDetection - -description: Uses mathematical morphology to dilate the input images. + category: Filtering.FeatureDetection -version: 0.1.0.$Revision: 1 $(alpha) + description: Uses mathematical morphology to dilate the input images. -documentation-url: http:://www.na-mic.org/ + version: 0.1.0.$Revision: 1 $(alpha) -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + documentation-url: http:://www.na-mic.org/ -contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -""" + contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + """ input_spec = DilateImageInputSpec output_spec = DilateImageOutputSpec @@ -197,19 +194,18 @@ class TextureFromNoiseImageFilterOutputSpec(TraitedSpec): class TextureFromNoiseImageFilter(SEMLikeCommandLine): """title: TextureFromNoiseImageFilter -category: Filtering.FeatureDetection + category: Filtering.FeatureDetection -description: Calculate the local noise in an image. + description: Calculate the local noise in an image. -version: 0.1.0.$Revision: 1 $(alpha) + version: 0.1.0.$Revision: 1 $(alpha) -documentation-url: http:://www.na-mic.org/ + documentation-url: http:://www.na-mic.org/ -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: This tool was developed by Eunyoung Regina Kim - -""" + contributor: This tool was developed by Eunyoung Regina Kim + """ input_spec = TextureFromNoiseImageFilterInputSpec output_spec = TextureFromNoiseImageFilterOutputSpec @@ -243,19 +239,18 @@ class FlippedDifferenceOutputSpec(TraitedSpec): class FlippedDifference(SEMLikeCommandLine): """title: Flip Image -category: Filtering.FeatureDetection - -description: Difference between an image and the axially flipped version of that image. + category: Filtering.FeatureDetection -version: 0.1.0.$Revision: 1 $(alpha) + description: Difference between an image and the axially flipped version of that image. -documentation-url: http:://www.na-mic.org/ + version: 0.1.0.$Revision: 1 $(alpha) -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + documentation-url: http:://www.na-mic.org/ -contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -""" + contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + """ input_spec = FlippedDifferenceInputSpec output_spec = FlippedDifferenceOutputSpec @@ -292,19 +287,18 @@ class ErodeImageOutputSpec(TraitedSpec): class ErodeImage(SEMLikeCommandLine): """title: Erode Image -category: Filtering.FeatureDetection + category: Filtering.FeatureDetection -description: Uses mathematical morphology to erode the input images. + description: Uses mathematical morphology to erode the input images. -version: 0.1.0.$Revision: 1 $(alpha) + version: 0.1.0.$Revision: 1 $(alpha) -documentation-url: http:://www.na-mic.org/ + documentation-url: http:://www.na-mic.org/ -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: This tool was developed by Mark Scully and Jeremy Bockholt. - -""" + contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + """ input_spec = ErodeImageInputSpec output_spec = ErodeImageOutputSpec @@ -340,17 +334,16 @@ class GenerateBrainClippedImageOutputSpec(TraitedSpec): class GenerateBrainClippedImage(SEMLikeCommandLine): """title: GenerateBrainClippedImage -category: Filtering.FeatureDetection - -description: Automatic FeatureImages using neural networks + category: Filtering.FeatureDetection -version: 1.0 + description: Automatic FeatureImages using neural networks -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + version: 1.0 -contributor: Eun Young Kim + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -""" + contributor: Eun Young Kim + """ input_spec = GenerateBrainClippedImageInputSpec output_spec = GenerateBrainClippedImageOutputSpec @@ -387,19 +380,18 @@ class NeighborhoodMedianOutputSpec(TraitedSpec): class NeighborhoodMedian(SEMLikeCommandLine): """title: Neighborhood Median -category: Filtering.FeatureDetection + category: Filtering.FeatureDetection -description: Calculates the median, for the given neighborhood size, at each voxel of the input image. + description: Calculates the median, for the given neighborhood size, at each voxel of the input image. -version: 0.1.0.$Revision: 1 $(alpha) + version: 0.1.0.$Revision: 1 $(alpha) -documentation-url: http:://www.na-mic.org/ + documentation-url: http:://www.na-mic.org/ -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: This tool was developed by Mark Scully and Jeremy Bockholt. - -""" + contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + """ input_spec = NeighborhoodMedianInputSpec output_spec = NeighborhoodMedianOutputSpec @@ -433,17 +425,16 @@ class GenerateTestImageOutputSpec(TraitedSpec): class GenerateTestImage(SEMLikeCommandLine): """title: DownSampleImage -category: Filtering.FeatureDetection - -description: Down sample image for testing + category: Filtering.FeatureDetection -version: 1.0 + description: Down sample image for testing -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + version: 1.0 -contributor: Eun Young Kim + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -""" + contributor: Eun Young Kim + """ input_spec = GenerateTestImageInputSpec output_spec = GenerateTestImageOutputSpec @@ -480,19 +471,18 @@ class NeighborhoodMeanOutputSpec(TraitedSpec): class NeighborhoodMean(SEMLikeCommandLine): """title: Neighborhood Mean -category: Filtering.FeatureDetection + category: Filtering.FeatureDetection -description: Calculates the mean, for the given neighborhood size, at each voxel of the T1, T2, and FLAIR. + description: Calculates the mean, for the given neighborhood size, at each voxel of the T1, T2, and FLAIR. -version: 0.1.0.$Revision: 1 $(alpha) + version: 0.1.0.$Revision: 1 $(alpha) -documentation-url: http:://www.na-mic.org/ + documentation-url: http:://www.na-mic.org/ -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: This tool was developed by Mark Scully and Jeremy Bockholt. - -""" + contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + """ input_spec = NeighborhoodMeanInputSpec output_spec = NeighborhoodMeanOutputSpec @@ -532,19 +522,18 @@ class HammerAttributeCreatorOutputSpec(TraitedSpec): class HammerAttributeCreator(SEMLikeCommandLine): """title: HAMMER Feature Vectors -category: Filtering.FeatureDetection - -description: Create the feature vectors used by HAMMER. + category: Filtering.FeatureDetection -version: 0.1.0.$Revision: 1 $(alpha) + description: Create the feature vectors used by HAMMER. -documentation-url: http:://www.na-mic.org/ + version: 0.1.0.$Revision: 1 $(alpha) -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + documentation-url: http:://www.na-mic.org/ -contributor: This was extracted from the Hammer Registration source code, and wrapped up by Hans J. Johnson. + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -""" + contributor: This was extracted from the Hammer Registration source code, and wrapped up by Hans J. Johnson. + """ input_spec = HammerAttributeCreatorInputSpec output_spec = HammerAttributeCreatorOutputSpec @@ -570,19 +559,18 @@ class TextureMeasureFilterOutputSpec(TraitedSpec): class TextureMeasureFilter(SEMLikeCommandLine): """title: Canny Level Set Image Filter -category: Filtering.FeatureDetection - -description: The CannySegmentationLevelSet is commonly used to refine a manually generated manual mask. + category: Filtering.FeatureDetection -version: 0.3.0 + description: The CannySegmentationLevelSet is commonly used to refine a manually generated manual mask. -license: CC + version: 0.3.0 -contributor: Regina Kim + license: CC -acknowledgements: This command module was derived from Insight/Examples/Segmentation/CannySegmentationLevelSetImageFilter.cxx (copyright) Insight Software Consortium. See http://wiki.na-mic.org/Wiki/index.php/Slicer3:Execution_Model_Documentation for more detailed descriptions. + contributor: Regina Kim -""" + acknowledgements: This command module was derived from Insight/Examples/Segmentation/CannySegmentationLevelSetImageFilter.cxx (copyright) Insight Software Consortium. See http://wiki.na-mic.org/Wiki/index.php/Slicer3:Execution_Model_Documentation for more detailed descriptions. + """ input_spec = TextureMeasureFilterInputSpec output_spec = TextureMeasureFilterOutputSpec @@ -623,19 +611,18 @@ class DilateMaskOutputSpec(TraitedSpec): class DilateMask(SEMLikeCommandLine): """title: Dilate Image -category: Filtering.FeatureDetection + category: Filtering.FeatureDetection -description: Uses mathematical morphology to dilate the input images. + description: Uses mathematical morphology to dilate the input images. -version: 0.1.0.$Revision: 1 $(alpha) + version: 0.1.0.$Revision: 1 $(alpha) -documentation-url: http:://www.na-mic.org/ + documentation-url: http:://www.na-mic.org/ -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: This tool was developed by Mark Scully and Jeremy Bockholt. - -""" + contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + """ input_spec = DilateMaskInputSpec output_spec = DilateMaskOutputSpec @@ -664,19 +651,18 @@ class DumpBinaryTrainingVectorsOutputSpec(TraitedSpec): class DumpBinaryTrainingVectors(SEMLikeCommandLine): """title: Erode Image -category: Filtering.FeatureDetection - -description: Uses mathematical morphology to erode the input images. + category: Filtering.FeatureDetection -version: 0.1.0.$Revision: 1 $(alpha) + description: Uses mathematical morphology to erode the input images. -documentation-url: http:://www.na-mic.org/ + version: 0.1.0.$Revision: 1 $(alpha) -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + documentation-url: http:://www.na-mic.org/ -contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -""" + contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + """ input_spec = DumpBinaryTrainingVectorsInputSpec output_spec = DumpBinaryTrainingVectorsOutputSpec @@ -716,19 +702,18 @@ class DistanceMapsOutputSpec(TraitedSpec): class DistanceMaps(SEMLikeCommandLine): """title: Mauerer Distance -category: Filtering.FeatureDetection + category: Filtering.FeatureDetection -description: Get the distance from a voxel to the nearest voxel of a given tissue type. + description: Get the distance from a voxel to the nearest voxel of a given tissue type. -version: 0.1.0.$Revision: 1 $(alpha) + version: 0.1.0.$Revision: 1 $(alpha) -documentation-url: http:://www.na-mic.org/ + documentation-url: http:://www.na-mic.org/ -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: This tool was developed by Mark Scully and Jeremy Bockholt. - -""" + contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + """ input_spec = DistanceMapsInputSpec output_spec = DistanceMapsOutputSpec @@ -762,19 +747,18 @@ class STAPLEAnalysisOutputSpec(TraitedSpec): class STAPLEAnalysis(SEMLikeCommandLine): """title: Dilate Image -category: Filtering.FeatureDetection - -description: Uses mathematical morphology to dilate the input images. + category: Filtering.FeatureDetection -version: 0.1.0.$Revision: 1 $(alpha) + description: Uses mathematical morphology to dilate the input images. -documentation-url: http:://www.na-mic.org/ + version: 0.1.0.$Revision: 1 $(alpha) -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + documentation-url: http:://www.na-mic.org/ -contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -""" + contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + """ input_spec = STAPLEAnalysisInputSpec output_spec = STAPLEAnalysisOutputSpec @@ -812,13 +796,12 @@ class GradientAnisotropicDiffusionImageFilterOutputSpec(TraitedSpec): class GradientAnisotropicDiffusionImageFilter(SEMLikeCommandLine): """title: GradientAnisopropicDiffusionFilter -category: Filtering.FeatureDetection + category: Filtering.FeatureDetection -description: Image Smoothing using Gradient Anisotropic Diffuesion Filer + description: Image Smoothing using Gradient Anisotropic Diffuesion Filer -contributor: This tool was developed by Eun Young Kim by modifying ITK Example - -""" + contributor: This tool was developed by Eun Young Kim by modifying ITK Example + """ input_spec = GradientAnisotropicDiffusionImageFilterInputSpec output_spec = GradientAnisotropicDiffusionImageFilterOutputSpec @@ -861,19 +844,18 @@ class CannyEdgeOutputSpec(TraitedSpec): class CannyEdge(SEMLikeCommandLine): """title: Canny Edge Detection -category: Filtering.FeatureDetection - -description: Get the distance from a voxel to the nearest voxel of a given tissue type. + category: Filtering.FeatureDetection -version: 0.1.0.(alpha) + description: Get the distance from a voxel to the nearest voxel of a given tissue type. -documentation-url: http:://www.na-mic.org/ + version: 0.1.0.(alpha) -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + documentation-url: http:://www.na-mic.org/ -contributor: This tool was written by Hans J. Johnson. + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -""" + contributor: This tool was written by Hans J. Johnson. + """ input_spec = CannyEdgeInputSpec output_spec = CannyEdgeOutputSpec diff --git a/nipype/interfaces/semtools/legacy/registration.py b/nipype/interfaces/semtools/legacy/registration.py index 9835889040..cb65aa12f5 100644 --- a/nipype/interfaces/semtools/legacy/registration.py +++ b/nipype/interfaces/semtools/legacy/registration.py @@ -57,21 +57,20 @@ class scalartransformOutputSpec(TraitedSpec): class scalartransform(SEMLikeCommandLine): """title: ScalarTransform (DTIProcess) -category: Legacy.Registration + category: Legacy.Registration -version: 1.0.0 + version: 1.0.0 -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess -license: Copyright (c) Casey Goodlett. All rights reserved. - See http://www.ia.unc.edu/dev/Copyright.htm for details. - This software is distributed WITHOUT ANY WARRANTY; without even - the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR - PURPOSE. See the above copyright notices for more information. + license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. -contributor: Casey Goodlett - -""" + contributor: Casey Goodlett + """ input_spec = scalartransformInputSpec output_spec = scalartransformOutputSpec diff --git a/nipype/interfaces/semtools/registration/brainsfit.py b/nipype/interfaces/semtools/registration/brainsfit.py index 343edd2155..b319ce1c86 100644 --- a/nipype/interfaces/semtools/registration/brainsfit.py +++ b/nipype/interfaces/semtools/registration/brainsfit.py @@ -390,21 +390,20 @@ class BRAINSFitOutputSpec(TraitedSpec): class BRAINSFit(SEMLikeCommandLine): """title: General Registration (BRAINS) -category: Registration + category: Registration -description: Register a three-dimensional volume to a reference volume (Mattes Mutual Information by default). Full documentation avalable here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSFit. Method described in BRAINSFit: Mutual Information Registrations of Whole-Brain 3D Images, Using the Insight Toolkit, Johnson H.J., Harris G., Williams K., The Insight Journal, 2007. http://hdl.handle.net/1926/1291 + description: Register a three-dimensional volume to a reference volume (Mattes Mutual Information by default). Full documentation avalable here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSFit. Method described in BRAINSFit: Mutual Information Registrations of Whole-Brain 3D Images, Using the Insight Toolkit, Johnson H.J., Harris G., Williams K., The Insight Journal, 2007. http://hdl.handle.net/1926/1291 -version: 3.0.0 + version: 3.0.0 -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSFit + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSFit -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://www.psychiatry.uiowa.edu + contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://www.psychiatry.uiowa.edu -acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); Gregory Harris(1), Vincent Magnotta(1,2,3); Andriy Fedorov(5) 1=University of Iowa Department of Psychiatry, 2=University of Iowa Department of Radiology, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering, 5=Surgical Planning Lab, Harvard - -""" + acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); Gregory Harris(1), Vincent Magnotta(1,2,3); Andriy Fedorov(5) 1=University of Iowa Department of Psychiatry, 2=University of Iowa Department of Radiology, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering, 5=Surgical Planning Lab, Harvard + """ input_spec = BRAINSFitInputSpec output_spec = BRAINSFitOutputSpec diff --git a/nipype/interfaces/semtools/registration/brainsresample.py b/nipype/interfaces/semtools/registration/brainsresample.py index 0eb6b5f29c..e8ac045936 100644 --- a/nipype/interfaces/semtools/registration/brainsresample.py +++ b/nipype/interfaces/semtools/registration/brainsresample.py @@ -92,21 +92,20 @@ class BRAINSResampleOutputSpec(TraitedSpec): class BRAINSResample(SEMLikeCommandLine): """title: Resample Image (BRAINS) -category: Registration + category: Registration -description: This program collects together three common image processing tasks that all involve resampling an image volume: Resampling to a new resolution and spacing, applying a transformation (using an ITK transform IO mechanisms) and Warping (using a vector image deformation field). Full documentation available here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSResample. + description: This program collects together three common image processing tasks that all involve resampling an image volume: Resampling to a new resolution and spacing, applying a transformation (using an ITK transform IO mechanisms) and Warping (using a vector image deformation field). Full documentation available here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSResample. -version: 3.0.0 + version: 3.0.0 -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSResample + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSResample -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: This tool was developed by Vincent Magnotta, Greg Harris, and Hans Johnson. + contributor: This tool was developed by Vincent Magnotta, Greg Harris, and Hans Johnson. -acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. - -""" + acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + """ input_spec = BRAINSResampleInputSpec output_spec = BRAINSResampleOutputSpec diff --git a/nipype/interfaces/semtools/registration/brainsresize.py b/nipype/interfaces/semtools/registration/brainsresize.py index a81ee204b5..8de4d616b9 100644 --- a/nipype/interfaces/semtools/registration/brainsresize.py +++ b/nipype/interfaces/semtools/registration/brainsresize.py @@ -51,19 +51,18 @@ class BRAINSResizeOutputSpec(TraitedSpec): class BRAINSResize(SEMLikeCommandLine): """title: Resize Image (BRAINS) -category: Registration + category: Registration -description: This program is useful for downsampling an image by a constant scale factor. + description: This program is useful for downsampling an image by a constant scale factor. -version: 3.0.0 + version: 3.0.0 -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: This tool was developed by Hans Johnson. + contributor: This tool was developed by Hans Johnson. -acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. - -""" + acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + """ input_spec = BRAINSResizeInputSpec output_spec = BRAINSResizeOutputSpec diff --git a/nipype/interfaces/semtools/registration/specialized.py b/nipype/interfaces/semtools/registration/specialized.py index 5a6daf5250..0726ab807c 100644 --- a/nipype/interfaces/semtools/registration/specialized.py +++ b/nipype/interfaces/semtools/registration/specialized.py @@ -257,21 +257,20 @@ class VBRAINSDemonWarpOutputSpec(TraitedSpec): class VBRAINSDemonWarp(SEMLikeCommandLine): """title: Vector Demon Registration (BRAINS) -category: Registration.Specialized + category: Registration.Specialized -description: This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://www.nitrc.org/projects/brainsdemonwarp. + description: This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://www.nitrc.org/projects/brainsdemonwarp. -version: 3.0.0 + version: 3.0.0 -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSDemonWarp + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSDemonWarp -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: This tool was developed by Hans J. Johnson and Greg Harris. + contributor: This tool was developed by Hans J. Johnson and Greg Harris. -acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. - -""" + acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + """ input_spec = VBRAINSDemonWarpInputSpec output_spec = VBRAINSDemonWarpOutputSpec @@ -516,21 +515,20 @@ class BRAINSDemonWarpOutputSpec(TraitedSpec): class BRAINSDemonWarp(SEMLikeCommandLine): """title: Demon Registration (BRAINS) -category: Registration.Specialized - -description: This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSDemonWarp. + category: Registration.Specialized -version: 3.0.0 + description: This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSDemonWarp. -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSDemonWarp + version: 3.0.0 -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSDemonWarp -contributor: This tool was developed by Hans J. Johnson and Greg Harris. + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + contributor: This tool was developed by Hans J. Johnson and Greg Harris. -""" + acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + """ input_spec = BRAINSDemonWarpInputSpec output_spec = BRAINSDemonWarpOutputSpec @@ -593,19 +591,18 @@ class BRAINSTransformFromFiducialsOutputSpec(TraitedSpec): class BRAINSTransformFromFiducials(SEMLikeCommandLine): """title: Fiducial Registration (BRAINS) -category: Registration.Specialized - -description: Computes a rigid, similarity or affine transform from a matched list of fiducials + category: Registration.Specialized -version: 0.1.0.$Revision$ + description: Computes a rigid, similarity or affine transform from a matched list of fiducials -documentation-url: http://www.slicer.org/slicerWiki/index.php/Modules:TransformFromFiducials-Documentation-3.6 + version: 0.1.0.$Revision$ -contributor: Casey B Goodlett + documentation-url: http://www.slicer.org/slicerWiki/index.php/Modules:TransformFromFiducials-Documentation-3.6 -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Casey B Goodlett -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = BRAINSTransformFromFiducialsInputSpec output_spec = BRAINSTransformFromFiducialsOutputSpec diff --git a/nipype/interfaces/semtools/segmentation/specialized.py b/nipype/interfaces/semtools/segmentation/specialized.py index 483b5470fe..0b1f46f420 100644 --- a/nipype/interfaces/semtools/segmentation/specialized.py +++ b/nipype/interfaces/semtools/segmentation/specialized.py @@ -86,17 +86,16 @@ class BRAINSCutOutputSpec(TraitedSpec): class BRAINSCut(SEMLikeCommandLine): """title: BRAINSCut (BRAINS) -category: Segmentation.Specialized + category: Segmentation.Specialized -description: Automatic Segmentation using neural networks + description: Automatic Segmentation using neural networks -version: 1.0 + version: 1.0 -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: Vince Magnotta, Hans Johnson, Greg Harris, Kent Williams, Eunyoung Regina Kim - -""" + contributor: Vince Magnotta, Hans Johnson, Greg Harris, Kent Williams, Eunyoung Regina Kim + """ input_spec = BRAINSCutInputSpec output_spec = BRAINSCutOutputSpec @@ -177,19 +176,18 @@ class BRAINSROIAutoOutputSpec(TraitedSpec): class BRAINSROIAuto(SEMLikeCommandLine): """title: Foreground masking (BRAINS) -category: Segmentation.Specialized - -description: This program is used to create a mask over the most prominant forground region in an image. This is accomplished via a combination of otsu thresholding and a closing operation. More documentation is available here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ForegroundMasking. + category: Segmentation.Specialized -version: 2.4.1 + description: This program is used to create a mask over the most prominant forground region in an image. This is accomplished via a combination of otsu thresholding and a closing operation. More documentation is available here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ForegroundMasking. -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + version: 2.4.1 -contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://www.psychiatry.uiowa.edu + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); Gregory Harris(1), Vincent Magnotta(1,2,3); Andriy Fedorov(5), fedorov -at- bwh.harvard.edu (Slicer integration); (1=University of Iowa Department of Psychiatry, 2=University of Iowa Department of Radiology, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering, 5=Surgical Planning Lab, Harvard) + contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://www.psychiatry.uiowa.edu -""" + acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); Gregory Harris(1), Vincent Magnotta(1,2,3); Andriy Fedorov(5), fedorov -at- bwh.harvard.edu (Slicer integration); (1=University of Iowa Department of Psychiatry, 2=University of Iowa Department of Radiology, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering, 5=Surgical Planning Lab, Harvard) + """ input_spec = BRAINSROIAutoInputSpec output_spec = BRAINSROIAutoOutputSpec @@ -467,15 +465,14 @@ class BRAINSConstellationDetectorOutputSpec(TraitedSpec): class BRAINSConstellationDetector(SEMLikeCommandLine): """title: Brain Landmark Constellation Detector (BRAINS) -category: Segmentation.Specialized + category: Segmentation.Specialized -description: This program will find the mid-sagittal plane, a constellation of landmarks in a volume, and create an AC/PC aligned data set with the AC point at the center of the voxel lattice (labeled at the origin of the image physical space.) Part of this work is an extention of the algorithms originally described by Dr. Babak A. Ardekani, Alvin H. Bachman, Model-based automatic detection of the anterior and posterior commissures on MRI scans, NeuroImage, Volume 46, Issue 3, 1 July 2009, Pages 677-682, ISSN 1053-8119, DOI: 10.1016/j.neuroimage.2009.02.030. (http://www.sciencedirect.com/science/article/B6WNP-4VRP25C-4/2/8207b962a38aa83c822c6379bc43fe4c) + description: This program will find the mid-sagittal plane, a constellation of landmarks in a volume, and create an AC/PC aligned data set with the AC point at the center of the voxel lattice (labeled at the origin of the image physical space.) Part of this work is an extention of the algorithms originally described by Dr. Babak A. Ardekani, Alvin H. Bachman, Model-based automatic detection of the anterior and posterior commissures on MRI scans, NeuroImage, Volume 46, Issue 3, 1 July 2009, Pages 677-682, ISSN 1053-8119, DOI: 10.1016/j.neuroimage.2009.02.030. (http://www.sciencedirect.com/science/article/B6WNP-4VRP25C-4/2/8207b962a38aa83c822c6379bc43fe4c) -version: 1.0 + version: 1.0 -documentation-url: http://www.nitrc.org/projects/brainscdetector/ - -""" + documentation-url: http://www.nitrc.org/projects/brainscdetector/ + """ input_spec = BRAINSConstellationDetectorInputSpec output_spec = BRAINSConstellationDetectorOutputSpec @@ -545,11 +542,10 @@ class BRAINSCreateLabelMapFromProbabilityMapsOutputSpec(TraitedSpec): class BRAINSCreateLabelMapFromProbabilityMaps(SEMLikeCommandLine): """title: Create Label Map From Probability Maps (BRAINS) -category: Segmentation.Specialized - -description: Given A list of Probability Maps, generate a LabelMap. + category: Segmentation.Specialized -""" + description: Given A list of Probability Maps, generate a LabelMap. + """ input_spec = BRAINSCreateLabelMapFromProbabilityMapsInputSpec output_spec = BRAINSCreateLabelMapFromProbabilityMapsOutputSpec @@ -614,13 +610,12 @@ class BinaryMaskEditorBasedOnLandmarksOutputSpec(TraitedSpec): class BinaryMaskEditorBasedOnLandmarks(SEMLikeCommandLine): """title: BRAINS Binary Mask Editor Based On Landmarks(BRAINS) -category: Segmentation.Specialized + category: Segmentation.Specialized -version: 1.0 + version: 1.0 -documentation-url: http://www.nitrc.org/projects/brainscdetector/ - -""" + documentation-url: http://www.nitrc.org/projects/brainscdetector/ + """ input_spec = BinaryMaskEditorBasedOnLandmarksInputSpec output_spec = BinaryMaskEditorBasedOnLandmarksOutputSpec @@ -681,11 +676,10 @@ class BRAINSMultiSTAPLEOutputSpec(TraitedSpec): class BRAINSMultiSTAPLE(SEMLikeCommandLine): """title: Create best representative label map) -category: Segmentation.Specialized - -description: given a list of label map images, create a representative/average label map. + category: Segmentation.Specialized -""" + description: given a list of label map images, create a representative/average label map. + """ input_spec = BRAINSMultiSTAPLEInputSpec output_spec = BRAINSMultiSTAPLEOutputSpec @@ -766,7 +760,7 @@ class BRAINSABCInputSpec(CommandLineInputSpec): ) outputVolumes = traits.Either( traits.Bool, - InputMultiPath(File(),), + InputMultiPath(File()), hash_files=False, desc="Corrected Output Images: should specify the same number of images as inputVolume, if only one element is given, then it is used as a file pattern where %s is replaced by the imageVolumeType, and %d by the index list location.", argstr="--outputVolumes %s...", @@ -854,7 +848,7 @@ class BRAINSABCInputSpec(CommandLineInputSpec): defaultSuffix = traits.Str(argstr="--defaultSuffix %s") implicitOutputs = traits.Either( traits.Bool, - InputMultiPath(File(),), + InputMultiPath(File()), hash_files=False, desc="Outputs to be made available to NiPype. Needed because not all BRAINSABC outputs have command line arguments.", argstr="--implicitOutputs %s...", @@ -900,11 +894,10 @@ class BRAINSABCOutputSpec(TraitedSpec): class BRAINSABC(SEMLikeCommandLine): """title: Intra-subject registration, bias Correction, and tissue classification (BRAINS) -category: Segmentation.Specialized + category: Segmentation.Specialized -description: Atlas-based tissue segmentation method. This is an algorithmic extension of work done by XXXX at UNC and Utah XXXX need more description here. - -""" + description: Atlas-based tissue segmentation method. This is an algorithmic extension of work done by XXXX at UNC and Utah XXXX need more description here. + """ input_spec = BRAINSABCInputSpec output_spec = BRAINSABCOutputSpec @@ -965,11 +958,10 @@ class ESLROutputSpec(TraitedSpec): class ESLR(SEMLikeCommandLine): """title: Clean Contiguous Label Map (BRAINS) -category: Segmentation.Specialized - -description: From a range of label map values, extract the largest contiguous region of those labels + category: Segmentation.Specialized -""" + description: From a range of label map values, extract the largest contiguous region of those labels + """ input_spec = ESLRInputSpec output_spec = ESLROutputSpec diff --git a/nipype/interfaces/semtools/testing/featuredetection.py b/nipype/interfaces/semtools/testing/featuredetection.py index 19e5076b0a..16735b3bb8 100644 --- a/nipype/interfaces/semtools/testing/featuredetection.py +++ b/nipype/interfaces/semtools/testing/featuredetection.py @@ -34,15 +34,14 @@ class SphericalCoordinateGenerationOutputSpec(TraitedSpec): class SphericalCoordinateGeneration(SEMLikeCommandLine): """title: Spherical Coordinate Generation -category: Testing.FeatureDetection + category: Testing.FeatureDetection -description: get the atlas image as input and generates the rho, phi and theta images. + description: get the atlas image as input and generates the rho, phi and theta images. -version: 0.1.0.$Revision: 1 $(alpha) + version: 0.1.0.$Revision: 1 $(alpha) -contributor: Ali Ghayoor - -""" + contributor: Ali Ghayoor + """ input_spec = SphericalCoordinateGenerationInputSpec output_spec = SphericalCoordinateGenerationOutputSpec diff --git a/nipype/interfaces/semtools/testing/generateaveragelmkfile.py b/nipype/interfaces/semtools/testing/generateaveragelmkfile.py index 3995a9b73d..7138dc37d3 100644 --- a/nipype/interfaces/semtools/testing/generateaveragelmkfile.py +++ b/nipype/interfaces/semtools/testing/generateaveragelmkfile.py @@ -44,13 +44,12 @@ class GenerateAverageLmkFileOutputSpec(TraitedSpec): class GenerateAverageLmkFile(SEMLikeCommandLine): """title: Average Fiducials -category: Testing + category: Testing -description: This program gets several fcsv file each one contains several landmarks with the same name but slightly different coordinates. For EACH landmark we compute the average coordination. + description: This program gets several fcsv file each one contains several landmarks with the same name but slightly different coordinates. For EACH landmark we compute the average coordination. -contributor: Ali Ghayoor - -""" + contributor: Ali Ghayoor + """ input_spec = GenerateAverageLmkFileInputSpec output_spec = GenerateAverageLmkFileOutputSpec diff --git a/nipype/interfaces/semtools/testing/landmarkscompare.py b/nipype/interfaces/semtools/testing/landmarkscompare.py index 066a92f24b..9a5ad26883 100644 --- a/nipype/interfaces/semtools/testing/landmarkscompare.py +++ b/nipype/interfaces/semtools/testing/landmarkscompare.py @@ -42,13 +42,12 @@ class LandmarksCompareOutputSpec(TraitedSpec): class LandmarksCompare(SEMLikeCommandLine): """title: Compare Fiducials -category: Testing + category: Testing -description: Compares two .fcsv or .wts text files and verifies that they are identicle. Used for testing landmarks files. + description: Compares two .fcsv or .wts text files and verifies that they are identicle. Used for testing landmarks files. -contributor: Ali Ghayoor - -""" + contributor: Ali Ghayoor + """ input_spec = LandmarksCompareInputSpec output_spec = LandmarksCompareOutputSpec diff --git a/nipype/interfaces/semtools/utilities/brains.py b/nipype/interfaces/semtools/utilities/brains.py index 59a61a1137..5ff0f9aa35 100644 --- a/nipype/interfaces/semtools/utilities/brains.py +++ b/nipype/interfaces/semtools/utilities/brains.py @@ -97,11 +97,10 @@ class BRAINSConstellationModelerOutputSpec(TraitedSpec): class BRAINSConstellationModeler(SEMLikeCommandLine): """title: Generate Landmarks Model (BRAINS) -category: Utilities.BRAINS + category: Utilities.BRAINS -description: Train up a model for BRAINSConstellationDetector - -""" + description: Train up a model for BRAINSConstellationDetector + """ input_spec = BRAINSConstellationModelerInputSpec output_spec = BRAINSConstellationModelerOutputSpec @@ -145,11 +144,10 @@ class landmarksConstellationWeightsOutputSpec(TraitedSpec): class landmarksConstellationWeights(SEMLikeCommandLine): """title: Generate Landmarks Weights (BRAINS) -category: Utilities.BRAINS - -description: Train up a list of Weights for the Landmarks in BRAINSConstellationDetector + category: Utilities.BRAINS -""" + description: Train up a list of Weights for the Landmarks in BRAINSConstellationDetector + """ input_spec = landmarksConstellationWeightsInputSpec output_spec = landmarksConstellationWeightsOutputSpec @@ -207,15 +205,14 @@ class BRAINSTrimForegroundInDirectionOutputSpec(TraitedSpec): class BRAINSTrimForegroundInDirection(SEMLikeCommandLine): """title: Trim Foreground In Direction (BRAINS) -category: Utilities.BRAINS - -description: This program will trim off the neck and also air-filling noise from the inputImage. + category: Utilities.BRAINS -version: 0.1 + description: This program will trim off the neck and also air-filling noise from the inputImage. -documentation-url: http://www.nitrc.org/projects/art/ + version: 0.1 -""" + documentation-url: http://www.nitrc.org/projects/art/ + """ input_spec = BRAINSTrimForegroundInDirectionInputSpec output_spec = BRAINSTrimForegroundInDirectionOutputSpec @@ -278,15 +275,14 @@ class BRAINSLmkTransformOutputSpec(TraitedSpec): class BRAINSLmkTransform(SEMLikeCommandLine): """title: Landmark Transform (BRAINS) -category: Utilities.BRAINS + category: Utilities.BRAINS -description: This utility program estimates the affine transform to align the fixed landmarks to the moving landmarks, and then generate the resampled moving image to the same physical space as that of the reference image. + description: This utility program estimates the affine transform to align the fixed landmarks to the moving landmarks, and then generate the resampled moving image to the same physical space as that of the reference image. -version: 1.0 + version: 1.0 -documentation-url: http://www.nitrc.org/projects/brainscdetector/ - -""" + documentation-url: http://www.nitrc.org/projects/brainscdetector/ + """ input_spec = BRAINSLmkTransformInputSpec output_spec = BRAINSLmkTransformOutputSpec @@ -396,21 +392,20 @@ class BRAINSMushOutputSpec(TraitedSpec): class BRAINSMush(SEMLikeCommandLine): """title: Brain Extraction from T1/T2 image (BRAINS) -category: Utilities.BRAINS - -description: This program: 1) generates a weighted mixture image optimizing the mean and variance and 2) produces a mask of the brain volume + category: Utilities.BRAINS -version: 0.1.0.$Revision: 1.4 $(alpha) + description: This program: 1) generates a weighted mixture image optimizing the mean and variance and 2) produces a mask of the brain volume -documentation-url: http:://mri.radiology.uiowa.edu + version: 0.1.0.$Revision: 1.4 $(alpha) -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + documentation-url: http:://mri.radiology.uiowa.edu -contributor: This tool is a modification by Steven Dunn of a program developed by Greg Harris and Ron Pierson. + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -acknowledgements: This work was developed by the University of Iowa Departments of Radiology and Psychiatry. This software was supported in part of NIH/NINDS award NS050568. + contributor: This tool is a modification by Steven Dunn of a program developed by Greg Harris and Ron Pierson. -""" + acknowledgements: This work was developed by the University of Iowa Departments of Radiology and Psychiatry. This software was supported in part of NIH/NINDS award NS050568. + """ input_spec = BRAINSMushInputSpec output_spec = BRAINSMushOutputSpec @@ -458,19 +453,18 @@ class BRAINSTransformConvertOutputSpec(TraitedSpec): class BRAINSTransformConvert(SEMLikeCommandLine): """title: BRAINS Transform Convert -category: Utilities.BRAINS - -description: Convert ITK transforms to higher order transforms + category: Utilities.BRAINS -version: 1.0 + description: Convert ITK transforms to higher order transforms -documentation-url: A utility to convert between transform file formats. + version: 1.0 -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + documentation-url: A utility to convert between transform file formats. -contributor: Hans J. Johnson,Kent Williams, Ali Ghayoor + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -""" + contributor: Hans J. Johnson,Kent Williams, Ali Ghayoor + """ input_spec = BRAINSTransformConvertInputSpec output_spec = BRAINSTransformConvertOutputSpec @@ -504,13 +498,12 @@ class landmarksConstellationAlignerOutputSpec(TraitedSpec): class landmarksConstellationAligner(SEMLikeCommandLine): """title: MidACPC Landmark Insertion -category: Utilities.BRAINS + category: Utilities.BRAINS -description: This program converts the original landmark files to the acpc-aligned landmark files + description: This program converts the original landmark files to the acpc-aligned landmark files -contributor: Ali Ghayoor - -""" + contributor: Ali Ghayoor + """ input_spec = landmarksConstellationAlignerInputSpec output_spec = landmarksConstellationAlignerOutputSpec @@ -542,13 +535,12 @@ class BRAINSEyeDetectorOutputSpec(TraitedSpec): class BRAINSEyeDetector(SEMLikeCommandLine): """title: Eye Detector (BRAINS) -category: Utilities.BRAINS - -version: 1.0 + category: Utilities.BRAINS -documentation-url: http://www.nitrc.org/projects/brainscdetector/ + version: 1.0 -""" + documentation-url: http://www.nitrc.org/projects/brainscdetector/ + """ input_spec = BRAINSEyeDetectorInputSpec output_spec = BRAINSEyeDetectorOutputSpec @@ -576,15 +568,14 @@ class BRAINSLinearModelerEPCAOutputSpec(TraitedSpec): class BRAINSLinearModelerEPCA(SEMLikeCommandLine): """title: Landmark Linear Modeler (BRAINS) -category: Utilities.BRAINS - -description: Training linear model using EPCA. Implementation based on my MS thesis, "A METHOD FOR AUTOMATED LANDMARK CONSTELLATION DETECTION USING EVOLUTIONARY PRINCIPAL COMPONENTS AND STATISTICAL SHAPE MODELS" + category: Utilities.BRAINS -version: 1.0 + description: Training linear model using EPCA. Implementation based on my MS thesis, "A METHOD FOR AUTOMATED LANDMARK CONSTELLATION DETECTION USING EVOLUTIONARY PRINCIPAL COMPONENTS AND STATISTICAL SHAPE MODELS" -documentation-url: http://www.nitrc.org/projects/brainscdetector/ + version: 1.0 -""" + documentation-url: http://www.nitrc.org/projects/brainscdetector/ + """ input_spec = BRAINSLinearModelerEPCAInputSpec output_spec = BRAINSLinearModelerEPCAOutputSpec @@ -630,19 +621,18 @@ class BRAINSInitializedControlPointsOutputSpec(TraitedSpec): class BRAINSInitializedControlPoints(SEMLikeCommandLine): """title: Initialized Control Points (BRAINS) -category: Utilities.BRAINS + category: Utilities.BRAINS -description: Outputs bspline control points as landmarks + description: Outputs bspline control points as landmarks -version: 0.1.0.$Revision: 916 $(alpha) + version: 0.1.0.$Revision: 916 $(alpha) -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: Mark Scully + contributor: Mark Scully -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for Mark Scully and Hans Johnson at the University of Iowa. - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for Mark Scully and Hans Johnson at the University of Iowa. + """ input_spec = BRAINSInitializedControlPointsInputSpec output_spec = BRAINSInitializedControlPointsOutputSpec @@ -659,7 +649,7 @@ class CleanUpOverlapLabelsInputSpec(CommandLineInputSpec): ) outputBinaryVolumes = traits.Either( traits.Bool, - InputMultiPath(File(),), + InputMultiPath(File()), hash_files=False, desc="The output label map images, with integer values in it. Each label value specified in the inputLabels is combined into this output label map volume", argstr="--outputBinaryVolumes %s...", @@ -676,15 +666,14 @@ class CleanUpOverlapLabelsOutputSpec(TraitedSpec): class CleanUpOverlapLabels(SEMLikeCommandLine): """title: Clean Up Overla Labels -category: Utilities.BRAINS - -description: Take a series of input binary images and clean up for those overlapped area. Binary volumes given first always wins out + category: Utilities.BRAINS -version: 0.1.0 + description: Take a series of input binary images and clean up for those overlapped area. Binary volumes given first always wins out -contributor: Eun Young Kim + version: 0.1.0 -""" + contributor: Eun Young Kim + """ input_spec = CleanUpOverlapLabelsInputSpec output_spec = CleanUpOverlapLabelsOutputSpec @@ -730,13 +719,12 @@ class BRAINSClipInferiorOutputSpec(TraitedSpec): class BRAINSClipInferior(SEMLikeCommandLine): """title: Clip Inferior of Center of Brain (BRAINS) -category: Utilities.BRAINS + category: Utilities.BRAINS -description: This program will read the inputVolume as a short int image, write the BackgroundFillValue everywhere inferior to the lower bound, and write the resulting clipped short int image in the outputVolume. + description: This program will read the inputVolume as a short int image, write the BackgroundFillValue everywhere inferior to the lower bound, and write the resulting clipped short int image in the outputVolume. -version: 1.0 - -""" + version: 1.0 + """ input_spec = BRAINSClipInferiorInputSpec output_spec = BRAINSClipInferiorOutputSpec @@ -773,15 +761,14 @@ class GenerateLabelMapFromProbabilityMapOutputSpec(TraitedSpec): class GenerateLabelMapFromProbabilityMap(SEMLikeCommandLine): """title: Label Map from Probability Images -category: Utilities.BRAINS - -description: Given a list of probability maps for labels, create a discrete label map where only the highest probability region is used for the labeling. + category: Utilities.BRAINS -version: 0.1 + description: Given a list of probability maps for labels, create a discrete label map where only the highest probability region is used for the labeling. -contributor: University of Iowa Department of Psychiatry, http:://www.psychiatry.uiowa.edu + version: 0.1 -""" + contributor: University of Iowa Department of Psychiatry, http:://www.psychiatry.uiowa.edu + """ input_spec = GenerateLabelMapFromProbabilityMapInputSpec output_spec = GenerateLabelMapFromProbabilityMapOutputSpec @@ -872,11 +859,10 @@ class BRAINSAlignMSPOutputSpec(TraitedSpec): class BRAINSAlignMSP(SEMLikeCommandLine): """title: Align Mid Saggital Brain (BRAINS) -category: Utilities.BRAINS - -description: Resample an image into ACPC alignement ACPCDetect + category: Utilities.BRAINS -""" + description: Resample an image into ACPC alignement ACPCDetect + """ input_spec = BRAINSAlignMSPInputSpec output_spec = BRAINSAlignMSPOutputSpec @@ -922,17 +908,16 @@ class BRAINSLandmarkInitializerOutputSpec(TraitedSpec): class BRAINSLandmarkInitializer(SEMLikeCommandLine): """title: BRAINSLandmarkInitializer -category: Utilities.BRAINS + category: Utilities.BRAINS -description: Create transformation file (*mat) from a pair of landmarks (*fcsv) files. + description: Create transformation file (*mat) from a pair of landmarks (*fcsv) files. -version: 1.0 + version: 1.0 -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: Eunyoung Regina Kim - -""" + contributor: Eunyoung Regina Kim + """ input_spec = BRAINSLandmarkInitializerInputSpec output_spec = BRAINSLandmarkInitializerOutputSpec @@ -961,13 +946,12 @@ class insertMidACPCpointOutputSpec(TraitedSpec): class insertMidACPCpoint(SEMLikeCommandLine): """title: MidACPC Landmark Insertion -category: Utilities.BRAINS - -description: This program gets a landmark fcsv file and adds a new landmark as the midpoint between AC and PC points to the output landmark fcsv file + category: Utilities.BRAINS -contributor: Ali Ghayoor + description: This program gets a landmark fcsv file and adds a new landmark as the midpoint between AC and PC points to the output landmark fcsv file -""" + contributor: Ali Ghayoor + """ input_spec = insertMidACPCpointInputSpec output_spec = insertMidACPCpointOutputSpec @@ -1027,17 +1011,16 @@ class BRAINSSnapShotWriterOutputSpec(TraitedSpec): class BRAINSSnapShotWriter(SEMLikeCommandLine): """title: BRAINSSnapShotWriter -category: Utilities.BRAINS - -description: Create 2D snapshot of input images. Mask images are color-coded + category: Utilities.BRAINS -version: 1.0 + description: Create 2D snapshot of input images. Mask images are color-coded -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + version: 1.0 -contributor: Eunyoung Regina Kim + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -""" + contributor: Eunyoung Regina Kim + """ input_spec = BRAINSSnapShotWriterInputSpec output_spec = BRAINSSnapShotWriterOutputSpec @@ -1083,15 +1066,14 @@ class JointHistogramOutputSpec(TraitedSpec): class JointHistogram(SEMLikeCommandLine): """title: Write Out Image Intensities -category: Utilities.BRAINS + category: Utilities.BRAINS -description: For Analysis + description: For Analysis -version: 0.1 + version: 0.1 -contributor: University of Iowa Department of Psychiatry, http:://www.psychiatry.uiowa.edu - -""" + contributor: University of Iowa Department of Psychiatry, http:://www.psychiatry.uiowa.edu + """ input_spec = JointHistogramInputSpec output_spec = JointHistogramOutputSpec @@ -1129,17 +1111,16 @@ class ShuffleVectorsModuleOutputSpec(TraitedSpec): class ShuffleVectorsModule(SEMLikeCommandLine): """title: ShuffleVectors -category: Utilities.BRAINS - -description: Automatic Segmentation using neural networks + category: Utilities.BRAINS -version: 1.0 + description: Automatic Segmentation using neural networks -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + version: 1.0 -contributor: Hans Johnson + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -""" + contributor: Hans Johnson + """ input_spec = ShuffleVectorsModuleInputSpec output_spec = ShuffleVectorsModuleOutputSpec @@ -1193,15 +1174,14 @@ class ImageRegionPlotterOutputSpec(TraitedSpec): class ImageRegionPlotter(SEMLikeCommandLine): """title: Write Out Image Intensities -category: Utilities.BRAINS - -description: For Analysis + category: Utilities.BRAINS -version: 0.1 + description: For Analysis -contributor: University of Iowa Department of Psychiatry, http:://www.psychiatry.uiowa.edu + version: 0.1 -""" + contributor: University of Iowa Department of Psychiatry, http:://www.psychiatry.uiowa.edu + """ input_spec = ImageRegionPlotterInputSpec output_spec = ImageRegionPlotterOutputSpec @@ -1256,11 +1236,10 @@ class fcsv_to_hdf5OutputSpec(TraitedSpec): class fcsv_to_hdf5(SEMLikeCommandLine): """title: fcsv_to_hdf5 (BRAINS) -category: Utilities.BRAINS + category: Utilities.BRAINS -description: Convert a collection of fcsv files to a HDF5 format file - -""" + description: Convert a collection of fcsv files to a HDF5 format file + """ input_spec = fcsv_to_hdf5InputSpec output_spec = fcsv_to_hdf5OutputSpec @@ -1322,19 +1301,18 @@ class FindCenterOfBrainOutputSpec(TraitedSpec): class FindCenterOfBrain(SEMLikeCommandLine): """title: Center Of Brain (BRAINS) -category: Utilities.BRAINS - -description: Finds the center point of a brain + category: Utilities.BRAINS -version: 3.0.0 + description: Finds the center point of a brain -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + version: 3.0.0 -contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://wwww.psychiatry.uiowa.edu + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering + contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://wwww.psychiatry.uiowa.edu -""" + acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering + """ input_spec = FindCenterOfBrainInputSpec output_spec = FindCenterOfBrainOutputSpec diff --git a/nipype/interfaces/slicer/converters.py b/nipype/interfaces/slicer/converters.py index cc477e99d0..6d4a824eea 100644 --- a/nipype/interfaces/slicer/converters.py +++ b/nipype/interfaces/slicer/converters.py @@ -61,21 +61,20 @@ class DicomToNrrdConverterOutputSpec(TraitedSpec): class DicomToNrrdConverter(SEMLikeCommandLine): """title: DICOM to NRRD Converter -category: Converters + category: Converters -description: Converts diffusion weighted MR images in dicom series into Nrrd format for analysis in Slicer. This program has been tested on only a limited subset of DTI dicom formats available from Siemens, GE, and Phillips scanners. Work in progress to support dicom multi-frame data. The program parses dicom header to extract necessary information about measurement frame, diffusion weighting directions, b-values, etc, and write out a nrrd image. For non-diffusion weighted dicom images, it loads in an entire dicom series and writes out a single dicom volume in a .nhdr/.raw pair. + description: Converts diffusion weighted MR images in dicom series into Nrrd format for analysis in Slicer. This program has been tested on only a limited subset of DTI dicom formats available from Siemens, GE, and Phillips scanners. Work in progress to support dicom multi-frame data. The program parses dicom header to extract necessary information about measurement frame, diffusion weighting directions, b-values, etc, and write out a nrrd image. For non-diffusion weighted dicom images, it loads in an entire dicom series and writes out a single dicom volume in a .nhdr/.raw pair. -version: 0.2.0.$Revision: 916 $(alpha) + version: 0.2.0.$Revision: 916 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DicomToNrrdConverter + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DicomToNrrdConverter -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: Xiaodong Tao (GE), Vince Magnotta (UIowa), Hans Johnson (UIowa) + contributor: Xiaodong Tao (GE), Vince Magnotta (UIowa), Hans Johnson (UIowa) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for DTI data produced on Philips scanners was contributed by Vincent Magnotta and Hans Johnson at the University of Iowa. - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for DTI data produced on Philips scanners was contributed by Vincent Magnotta and Hans Johnson at the University of Iowa. + """ input_spec = DicomToNrrdConverterInputSpec output_spec = DicomToNrrdConverterOutputSpec @@ -157,19 +156,18 @@ class OrientScalarVolumeOutputSpec(TraitedSpec): class OrientScalarVolume(SEMLikeCommandLine): """title: Orient Scalar Volume -category: Converters - -description: Orients an output volume. Rearranges the slices in a volume according to the selected orientation. The slices are not interpolated. They are just reordered and/or permuted. The resulting volume will cover the original volume. NOTE: since Slicer takes into account the orientation of a volume, the re-oriented volume will not show any difference from the original volume, To see the difference, save the volume and display it with a system that either ignores the orientation of the image (e.g. Paraview) or displays individual images. + category: Converters -version: 0.1.0.$Revision: 19608 $(alpha) + description: Orients an output volume. Rearranges the slices in a volume according to the selected orientation. The slices are not interpolated. They are just reordered and/or permuted. The resulting volume will cover the original volume. NOTE: since Slicer takes into account the orientation of a volume, the re-oriented volume will not show any difference from the original volume, To see the difference, save the volume and display it with a system that either ignores the orientation of the image (e.g. Paraview) or displays individual images. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/OrientImage + version: 0.1.0.$Revision: 19608 $(alpha) -contributor: Bill Lorensen (GE) + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/OrientImage -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Bill Lorensen (GE) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = OrientScalarVolumeInputSpec output_spec = OrientScalarVolumeOutputSpec diff --git a/nipype/interfaces/slicer/diffusion/diffusion.py b/nipype/interfaces/slicer/diffusion/diffusion.py index 8fc0f0c6c9..2668f060c4 100644 --- a/nipype/interfaces/slicer/diffusion/diffusion.py +++ b/nipype/interfaces/slicer/diffusion/diffusion.py @@ -162,19 +162,18 @@ class ResampleDTIVolumeOutputSpec(TraitedSpec): class ResampleDTIVolume(SEMLikeCommandLine): """title: Resample DTI Volume -category: Diffusion.Diffusion Tensor Images + category: Diffusion.Diffusion Tensor Images -description: Resampling an image is a very important task in image analysis. It is especially important in the frame of image registration. This module implements DT image resampling through the use of itk Transforms. The resampling is controlled by the Output Spacing. "Resampling" is performed in space coordinates, not pixel/grid coordinates. It is quite important to ensure that image spacing is properly set on the images involved. The interpolator is required since the mapping from one space to the other will often require evaluation of the intensity of the image at non-grid positions. + description: Resampling an image is a very important task in image analysis. It is especially important in the frame of image registration. This module implements DT image resampling through the use of itk Transforms. The resampling is controlled by the Output Spacing. "Resampling" is performed in space coordinates, not pixel/grid coordinates. It is quite important to ensure that image spacing is properly set on the images involved. The interpolator is required since the mapping from one space to the other will often require evaluation of the intensity of the image at non-grid positions. -version: 0.1 + version: 0.1 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ResampleDTI + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ResampleDTI -contributor: Francois Budin (UNC) + contributor: Francois Budin (UNC) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Information on the National Centers for Biomedical Computing can be obtained from http://nihroadmap.nih.gov/bioinformatics - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Information on the National Centers for Biomedical Computing can be obtained from http://nihroadmap.nih.gov/bioinformatics + """ input_spec = ResampleDTIVolumeInputSpec output_spec = ResampleDTIVolumeOutputSpec @@ -232,22 +231,21 @@ class DWIRicianLMMSEFilterOutputSpec(TraitedSpec): class DWIRicianLMMSEFilter(SEMLikeCommandLine): """title: DWI Rician LMMSE Filter -category: Diffusion.Diffusion Weighted Images - -description: This module reduces noise (or unwanted detail) on a set of diffusion weighted images. For this, it filters the image in the mean squared error sense using a Rician noise model. Images corresponding to each gradient direction, including baseline, are processed individually. The noise parameter is automatically estimated (noise estimation improved but slower). -Note that this is a general purpose filter for MRi images. The module jointLMMSE has been specifically designed for DWI volumes and shows a better performance, so its use is recommended instead. -A complete description of the algorithm in this module can be found in: -S. Aja-Fernandez, M. Niethammer, M. Kubicki, M. Shenton, and C.-F. Westin. Restoration of DWI data using a Rician LMMSE estimator. IEEE Transactions on Medical Imaging, 27(10): pp. 1389-1403, Oct. 2008. + category: Diffusion.Diffusion Weighted Images -version: 0.1.1.$Revision: 1 $(alpha) + description: This module reduces noise (or unwanted detail) on a set of diffusion weighted images. For this, it filters the image in the mean squared error sense using a Rician noise model. Images corresponding to each gradient direction, including baseline, are processed individually. The noise parameter is automatically estimated (noise estimation improved but slower). + Note that this is a general purpose filter for MRi images. The module jointLMMSE has been specifically designed for DWI volumes and shows a better performance, so its use is recommended instead. + A complete description of the algorithm in this module can be found in: + S. Aja-Fernandez, M. Niethammer, M. Kubicki, M. Shenton, and C.-F. Westin. Restoration of DWI data using a Rician LMMSE estimator. IEEE Transactions on Medical Imaging, 27(10): pp. 1389-1403, Oct. 2008. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/RicianLMMSEImageFilter + version: 0.1.1.$Revision: 1 $(alpha) -contributor: Antonio Tristan Vega (UVa), Santiago Aja Fernandez (UVa), Marc Niethammer (UNC) + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/RicianLMMSEImageFilter -acknowledgements: Partially founded by grant number TEC2007-67073/TCM from the Comision Interministerial de Ciencia y Tecnologia (Spain). + contributor: Antonio Tristan Vega (UVa), Santiago Aja Fernandez (UVa), Marc Niethammer (UNC) -""" + acknowledgements: Partially founded by grant number TEC2007-67073/TCM from the Comision Interministerial de Ciencia y Tecnologia (Spain). + """ input_spec = DWIRicianLMMSEFilterInputSpec output_spec = DWIRicianLMMSEFilterOutputSpec @@ -330,21 +328,20 @@ class TractographyLabelMapSeedingOutputSpec(TraitedSpec): class TractographyLabelMapSeeding(SEMLikeCommandLine): """title: Tractography Label Map Seeding -category: Diffusion.Diffusion Tensor Images - -description: Seed tracts on a Diffusion Tensor Image (DT) from a label map + category: Diffusion.Diffusion Tensor Images -version: 0.1.0.$Revision: 1892 $(alpha) + description: Seed tracts on a Diffusion Tensor Image (DT) from a label map -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Seeding + version: 0.1.0.$Revision: 1892 $(alpha) -license: slicer3 + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Seeding -contributor: Raul San Jose (SPL, BWH), Demian Wassermann (SPL, BWH) + license: slicer3 -acknowledgements: Laboratory of Mathematics in Imaging. This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Raul San Jose (SPL, BWH), Demian Wassermann (SPL, BWH) -""" + acknowledgements: Laboratory of Mathematics in Imaging. This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = TractographyLabelMapSeedingInputSpec output_spec = TractographyLabelMapSeedingOutputSpec @@ -386,22 +383,21 @@ class DWIJointRicianLMMSEFilterOutputSpec(TraitedSpec): class DWIJointRicianLMMSEFilter(SEMLikeCommandLine): """title: DWI Joint Rician LMMSE Filter -category: Diffusion.Diffusion Weighted Images + category: Diffusion.Diffusion Weighted Images -description: This module reduces Rician noise (or unwanted detail) on a set of diffusion weighted images. For this, it filters the image in the mean squared error sense using a Rician noise model. The N closest gradient directions to the direction being processed are filtered together to improve the results: the noise-free signal is seen as an n-diemensional vector which has to be estimated with the LMMSE method from a set of corrupted measurements. To that end, the covariance matrix of the noise-free vector and the cross covariance between this signal and the noise have to be estimated, which is done taking into account the image formation process. -The noise parameter is automatically estimated from a rough segmentation of the background of the image. In this area the signal is simply 0, so that Rician statistics reduce to Rayleigh and the noise power can be easily estimated from the mode of the histogram. -A complete description of the algorithm may be found in: -Antonio Tristan-Vega and Santiago Aja-Fernandez, DWI filtering using joint information for DTI and HARDI, Medical Image Analysis, Volume 14, Issue 2, Pages 205-218. 2010. + description: This module reduces Rician noise (or unwanted detail) on a set of diffusion weighted images. For this, it filters the image in the mean squared error sense using a Rician noise model. The N closest gradient directions to the direction being processed are filtered together to improve the results: the noise-free signal is seen as an n-diemensional vector which has to be estimated with the LMMSE method from a set of corrupted measurements. To that end, the covariance matrix of the noise-free vector and the cross covariance between this signal and the noise have to be estimated, which is done taking into account the image formation process. + The noise parameter is automatically estimated from a rough segmentation of the background of the image. In this area the signal is simply 0, so that Rician statistics reduce to Rayleigh and the noise power can be easily estimated from the mode of the histogram. + A complete description of the algorithm may be found in: + Antonio Tristan-Vega and Santiago Aja-Fernandez, DWI filtering using joint information for DTI and HARDI, Medical Image Analysis, Volume 14, Issue 2, Pages 205-218. 2010. -version: 0.1.1.$Revision: 1 $(alpha) + version: 0.1.1.$Revision: 1 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/JointRicianLMMSEImageFilter + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/JointRicianLMMSEImageFilter -contributor: Antonio Tristan Vega (UVa), Santiago Aja Fernandez (UVa) + contributor: Antonio Tristan Vega (UVa), Santiago Aja Fernandez (UVa) -acknowledgements: Partially founded by grant number TEC2007-67073/TCM from the Comision Interministerial de Ciencia y Tecnologia (Spain). - -""" + acknowledgements: Partially founded by grant number TEC2007-67073/TCM from the Comision Interministerial de Ciencia y Tecnologia (Spain). + """ input_spec = DWIJointRicianLMMSEFilterInputSpec output_spec = DWIJointRicianLMMSEFilterOutputSpec @@ -444,19 +440,18 @@ class DiffusionWeightedVolumeMaskingOutputSpec(TraitedSpec): class DiffusionWeightedVolumeMasking(SEMLikeCommandLine): """title: Diffusion Weighted Volume Masking -category: Diffusion.Diffusion Weighted Images - -description:

Performs a mask calculation from a diffusion weighted (DW) image.

Starting from a dw image, this module computes the baseline image averaging all the images without diffusion weighting and then applies the otsu segmentation algorithm in order to produce a mask. this mask can then be used when estimating the diffusion tensor (dt) image, not to estimate tensors all over the volume.

+ category: Diffusion.Diffusion Weighted Images -version: 0.1.0.$Revision: 1892 $(alpha) + description:

Performs a mask calculation from a diffusion weighted (DW) image.

Starting from a dw image, this module computes the baseline image averaging all the images without diffusion weighting and then applies the otsu segmentation algorithm in order to produce a mask. this mask can then be used when estimating the diffusion tensor (dt) image, not to estimate tensors all over the volume.

-documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DiffusionWeightedMasking + version: 0.1.0.$Revision: 1892 $(alpha) -license: slicer3 + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DiffusionWeightedMasking -contributor: Demian Wassermann (SPL, BWH) + license: slicer3 -""" + contributor: Demian Wassermann (SPL, BWH) + """ input_spec = DiffusionWeightedVolumeMaskingInputSpec output_spec = DiffusionWeightedVolumeMaskingOutputSpec @@ -490,19 +485,18 @@ class DTIimportOutputSpec(TraitedSpec): class DTIimport(SEMLikeCommandLine): """title: DTIimport -category: Diffusion.Diffusion Data Conversion + category: Diffusion.Diffusion Data Conversion -description: Import tensor datasets from various formats, including the NifTi file format + description: Import tensor datasets from various formats, including the NifTi file format -version: 1.0 + version: 1.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DTIImport + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DTIImport -contributor: Sonia Pujol (SPL, BWH) + contributor: Sonia Pujol (SPL, BWH) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NA-MIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NA-MIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = DTIimportInputSpec output_spec = DTIimportOutputSpec @@ -551,23 +545,22 @@ class DWIToDTIEstimationOutputSpec(TraitedSpec): class DWIToDTIEstimation(SEMLikeCommandLine): """title: DWI to DTI Estimation -category: Diffusion.Diffusion Weighted Images - -description: Performs a tensor model estimation from diffusion weighted images. + category: Diffusion.Diffusion Weighted Images -There are three estimation methods available: least squares, weigthed least squares and non-linear estimation. The first method is the traditional method for tensor estimation and the fastest one. Weighted least squares takes into account the noise characteristics of the MRI images to weight the DWI samples used in the estimation based on its intensity magnitude. The last method is the more complex. + description: Performs a tensor model estimation from diffusion weighted images. -version: 0.1.0.$Revision: 1892 $(alpha) + There are three estimation methods available: least squares, weigthed least squares and non-linear estimation. The first method is the traditional method for tensor estimation and the fastest one. Weighted least squares takes into account the noise characteristics of the MRI images to weight the DWI samples used in the estimation based on its intensity magnitude. The last method is the more complex. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DiffusionTensorEstimation + version: 0.1.0.$Revision: 1892 $(alpha) -license: slicer3 + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DiffusionTensorEstimation -contributor: Raul San Jose (SPL, BWH) + license: slicer3 -acknowledgements: This command module is based on the estimation functionality provided by the Teem library. This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Raul San Jose (SPL, BWH) -""" + acknowledgements: This command module is based on the estimation functionality provided by the Teem library. This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = DWIToDTIEstimationInputSpec output_spec = DWIToDTIEstimationOutputSpec @@ -628,19 +621,18 @@ class DiffusionTensorScalarMeasurementsOutputSpec(TraitedSpec): class DiffusionTensorScalarMeasurements(SEMLikeCommandLine): """title: Diffusion Tensor Scalar Measurements -category: Diffusion.Diffusion Tensor Images + category: Diffusion.Diffusion Tensor Images -description: Compute a set of different scalar measurements from a tensor field, specially oriented for Diffusion Tensors where some rotationally invariant measurements, like Fractional Anisotropy, are highly used to describe the anistropic behaviour of the tensor. + description: Compute a set of different scalar measurements from a tensor field, specially oriented for Diffusion Tensors where some rotationally invariant measurements, like Fractional Anisotropy, are highly used to describe the anistropic behaviour of the tensor. -version: 0.1.0.$Revision: 1892 $(alpha) + version: 0.1.0.$Revision: 1892 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DiffusionTensorMathematics + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DiffusionTensorMathematics -contributor: Raul San Jose (SPL, BWH) + contributor: Raul San Jose (SPL, BWH) -acknowledgements: LMI - -""" + acknowledgements: LMI + """ input_spec = DiffusionTensorScalarMeasurementsInputSpec output_spec = DiffusionTensorScalarMeasurementsOutputSpec @@ -667,19 +659,18 @@ class DTIexportOutputSpec(TraitedSpec): class DTIexport(SEMLikeCommandLine): """title: DTIexport -category: Diffusion.Diffusion Data Conversion - -description: Export DTI data to various file formats + category: Diffusion.Diffusion Data Conversion -version: 1.0 + description: Export DTI data to various file formats -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DTIExport + version: 1.0 -contributor: Sonia Pujol (SPL, BWH) + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DTIExport -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NA-MIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Sonia Pujol (SPL, BWH) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NA-MIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = DTIexportInputSpec output_spec = DTIexportOutputSpec diff --git a/nipype/interfaces/slicer/filtering/arithmetic.py b/nipype/interfaces/slicer/filtering/arithmetic.py index 64d60feef3..f57d7adf37 100644 --- a/nipype/interfaces/slicer/filtering/arithmetic.py +++ b/nipype/interfaces/slicer/filtering/arithmetic.py @@ -46,19 +46,18 @@ class MultiplyScalarVolumesOutputSpec(TraitedSpec): class MultiplyScalarVolumes(SEMLikeCommandLine): """title: Multiply Scalar Volumes -category: Filtering.Arithmetic + category: Filtering.Arithmetic -description: Multiplies two images. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. + description: Multiplies two images. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. -version: 0.1.0.$Revision: 8595 $(alpha) + version: 0.1.0.$Revision: 8595 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Multiply + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Multiply -contributor: Bill Lorensen (GE) + contributor: Bill Lorensen (GE) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = MultiplyScalarVolumesInputSpec output_spec = MultiplyScalarVolumesOutputSpec @@ -101,19 +100,18 @@ class MaskScalarVolumeOutputSpec(TraitedSpec): class MaskScalarVolume(SEMLikeCommandLine): """title: Mask Scalar Volume -category: Filtering.Arithmetic - -description: Masks two images. The output image is set to 0 everywhere except where the chosen label from the mask volume is present, at which point it will retain it's original values. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. + category: Filtering.Arithmetic -version: 0.1.0.$Revision: 8595 $(alpha) + description: Masks two images. The output image is set to 0 everywhere except where the chosen label from the mask volume is present, at which point it will retain it's original values. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Mask + version: 0.1.0.$Revision: 8595 $(alpha) -contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Mask -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = MaskScalarVolumeInputSpec output_spec = MaskScalarVolumeOutputSpec @@ -149,19 +147,18 @@ class SubtractScalarVolumesOutputSpec(TraitedSpec): class SubtractScalarVolumes(SEMLikeCommandLine): """title: Subtract Scalar Volumes -category: Filtering.Arithmetic - -description: Subtracts two images. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. + category: Filtering.Arithmetic -version: 0.1.0.$Revision: 19608 $(alpha) + description: Subtracts two images. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Subtract + version: 0.1.0.$Revision: 19608 $(alpha) -contributor: Bill Lorensen (GE) + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Subtract -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Bill Lorensen (GE) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = SubtractScalarVolumesInputSpec output_spec = SubtractScalarVolumesOutputSpec @@ -197,19 +194,18 @@ class AddScalarVolumesOutputSpec(TraitedSpec): class AddScalarVolumes(SEMLikeCommandLine): """title: Add Scalar Volumes -category: Filtering.Arithmetic + category: Filtering.Arithmetic -description: Adds two images. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. + description: Adds two images. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. -version: 0.1.0.$Revision: 19608 $(alpha) + version: 0.1.0.$Revision: 19608 $(alpha) -documentation-url: http://slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Add + documentation-url: http://slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Add -contributor: Bill Lorensen (GE) + contributor: Bill Lorensen (GE) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = AddScalarVolumesInputSpec output_spec = AddScalarVolumesOutputSpec @@ -252,21 +248,20 @@ class CastScalarVolumeOutputSpec(TraitedSpec): class CastScalarVolume(SEMLikeCommandLine): """title: Cast Scalar Volume -category: Filtering.Arithmetic - -description: Cast a volume to a given data type. -Use at your own risk when casting an input volume into a lower precision type! -Allows casting to the same type as the input volume. + category: Filtering.Arithmetic -version: 0.1.0.$Revision: 2104 $(alpha) + description: Cast a volume to a given data type. + Use at your own risk when casting an input volume into a lower precision type! + Allows casting to the same type as the input volume. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Cast + version: 0.1.0.$Revision: 2104 $(alpha) -contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Cast -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = CastScalarVolumeInputSpec output_spec = CastScalarVolumeOutputSpec diff --git a/nipype/interfaces/slicer/filtering/checkerboardfilter.py b/nipype/interfaces/slicer/filtering/checkerboardfilter.py index 1789359e7d..6566f15248 100644 --- a/nipype/interfaces/slicer/filtering/checkerboardfilter.py +++ b/nipype/interfaces/slicer/filtering/checkerboardfilter.py @@ -48,19 +48,18 @@ class CheckerBoardFilterOutputSpec(TraitedSpec): class CheckerBoardFilter(SEMLikeCommandLine): """title: CheckerBoard Filter -category: Filtering + category: Filtering -description: Create a checkerboard volume of two volumes. The output volume will show the two inputs alternating according to the user supplied checkerPattern. This filter is often used to compare the results of image registration. Note that the second input is resampled to the same origin, spacing and direction before it is composed with the first input. The scalar type of the output volume will be the same as the input image scalar type. + description: Create a checkerboard volume of two volumes. The output volume will show the two inputs alternating according to the user supplied checkerPattern. This filter is often used to compare the results of image registration. Note that the second input is resampled to the same origin, spacing and direction before it is composed with the first input. The scalar type of the output volume will be the same as the input image scalar type. -version: 0.1.0.$Revision: 19608 $(alpha) + version: 0.1.0.$Revision: 19608 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/CheckerBoard + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/CheckerBoard -contributor: Bill Lorensen (GE) + contributor: Bill Lorensen (GE) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = CheckerBoardFilterInputSpec output_spec = CheckerBoardFilterOutputSpec diff --git a/nipype/interfaces/slicer/filtering/denoising.py b/nipype/interfaces/slicer/filtering/denoising.py index 6c26b74618..c28fc0746d 100644 --- a/nipype/interfaces/slicer/filtering/denoising.py +++ b/nipype/interfaces/slicer/filtering/denoising.py @@ -51,21 +51,20 @@ class GradientAnisotropicDiffusionOutputSpec(TraitedSpec): class GradientAnisotropicDiffusion(SEMLikeCommandLine): """title: Gradient Anisotropic Diffusion -category: Filtering.Denoising + category: Filtering.Denoising -description: Runs gradient anisotropic diffusion on a volume. + description: Runs gradient anisotropic diffusion on a volume. -Anisotropic diffusion methods reduce noise (or unwanted detail) in images while preserving specific image features, like edges. For many applications, there is an assumption that light-dark transitions (edges) are interesting. Standard isotropic diffusion methods move and blur light-dark boundaries. Anisotropic diffusion methods are formulated to specifically preserve edges. The conductance term for this implementation is a function of the gradient magnitude of the image at each point, reducing the strength of diffusion at edges. The numerical implementation of this equation is similar to that described in the Perona-Malik paper, but uses a more robust technique for gradient magnitude estimation and has been generalized to N-dimensions. + Anisotropic diffusion methods reduce noise (or unwanted detail) in images while preserving specific image features, like edges. For many applications, there is an assumption that light-dark transitions (edges) are interesting. Standard isotropic diffusion methods move and blur light-dark boundaries. Anisotropic diffusion methods are formulated to specifically preserve edges. The conductance term for this implementation is a function of the gradient magnitude of the image at each point, reducing the strength of diffusion at edges. The numerical implementation of this equation is similar to that described in the Perona-Malik paper, but uses a more robust technique for gradient magnitude estimation and has been generalized to N-dimensions. -version: 0.1.0.$Revision: 19608 $(alpha) + version: 0.1.0.$Revision: 19608 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GradientAnisotropicDiffusion + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GradientAnisotropicDiffusion -contributor: Bill Lorensen (GE) + contributor: Bill Lorensen (GE) -acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium - -""" + acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium + """ input_spec = GradientAnisotropicDiffusionInputSpec output_spec = GradientAnisotropicDiffusionOutputSpec @@ -106,23 +105,22 @@ class CurvatureAnisotropicDiffusionOutputSpec(TraitedSpec): class CurvatureAnisotropicDiffusion(SEMLikeCommandLine): """title: Curvature Anisotropic Diffusion -category: Filtering.Denoising - -description: Performs anisotropic diffusion on an image using a modified curvature diffusion equation (MCDE). + category: Filtering.Denoising -MCDE does not exhibit the edge enhancing properties of classic anisotropic diffusion, which can under certain conditions undergo a 'negative' diffusion, which enhances the contrast of edges. Equations of the form of MCDE always undergo positive diffusion, with the conductance term only varying the strength of that diffusion. + description: Performs anisotropic diffusion on an image using a modified curvature diffusion equation (MCDE). - Qualitatively, MCDE compares well with other non-linear diffusion techniques. It is less sensitive to contrast than classic Perona-Malik style diffusion, and preserves finer detailed structures in images. There is a potential speed trade-off for using this function in place of Gradient Anisotropic Diffusion. Each iteration of the solution takes roughly twice as long. Fewer iterations, however, may be required to reach an acceptable solution. + MCDE does not exhibit the edge enhancing properties of classic anisotropic diffusion, which can under certain conditions undergo a 'negative' diffusion, which enhances the contrast of edges. Equations of the form of MCDE always undergo positive diffusion, with the conductance term only varying the strength of that diffusion. -version: 0.1.0.$Revision: 19608 $(alpha) + Qualitatively, MCDE compares well with other non-linear diffusion techniques. It is less sensitive to contrast than classic Perona-Malik style diffusion, and preserves finer detailed structures in images. There is a potential speed trade-off for using this function in place of Gradient Anisotropic Diffusion. Each iteration of the solution takes roughly twice as long. Fewer iterations, however, may be required to reach an acceptable solution. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/CurvatureAnisotropicDiffusion + version: 0.1.0.$Revision: 19608 $(alpha) -contributor: Bill Lorensen (GE) + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/CurvatureAnisotropicDiffusion -acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium + contributor: Bill Lorensen (GE) -""" + acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium + """ input_spec = CurvatureAnisotropicDiffusionInputSpec output_spec = CurvatureAnisotropicDiffusionOutputSpec @@ -153,19 +151,18 @@ class GaussianBlurImageFilterOutputSpec(TraitedSpec): class GaussianBlurImageFilter(SEMLikeCommandLine): """title: Gaussian Blur Image Filter -category: Filtering.Denoising + category: Filtering.Denoising -description: Apply a gaussian blurr to an image + description: Apply a gaussian blurr to an image -version: 0.1.0.$Revision: 1.1 $(alpha) + version: 0.1.0.$Revision: 1.1 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GaussianBlurImageFilter + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GaussianBlurImageFilter -contributor: Julien Jomier (Kitware), Stephen Aylward (Kitware) + contributor: Julien Jomier (Kitware), Stephen Aylward (Kitware) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = GaussianBlurImageFilterInputSpec output_spec = GaussianBlurImageFilterOutputSpec @@ -200,19 +197,18 @@ class MedianImageFilterOutputSpec(TraitedSpec): class MedianImageFilter(SEMLikeCommandLine): """title: Median Image Filter -category: Filtering.Denoising - -description: The MedianImageFilter is commonly used as a robust approach for noise reduction. This filter is particularly efficient against "salt-and-pepper" noise. In other words, it is robust to the presence of gray-level outliers. MedianImageFilter computes the value of each output pixel as the statistical median of the neighborhood of values around the corresponding input pixel. + category: Filtering.Denoising -version: 0.1.0.$Revision: 19608 $(alpha) + description: The MedianImageFilter is commonly used as a robust approach for noise reduction. This filter is particularly efficient against "salt-and-pepper" noise. In other words, it is robust to the presence of gray-level outliers. MedianImageFilter computes the value of each output pixel as the statistical median of the neighborhood of values around the corresponding input pixel. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MedianImageFilter + version: 0.1.0.$Revision: 19608 $(alpha) -contributor: Bill Lorensen (GE) + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MedianImageFilter -acknowledgements: This command module was derived from Insight/Examples/Filtering/MedianImageFilter (copyright) Insight Software Consortium + contributor: Bill Lorensen (GE) -""" + acknowledgements: This command module was derived from Insight/Examples/Filtering/MedianImageFilter (copyright) Insight Software Consortium + """ input_spec = MedianImageFilterInputSpec output_spec = MedianImageFilterOutputSpec diff --git a/nipype/interfaces/slicer/filtering/extractskeleton.py b/nipype/interfaces/slicer/filtering/extractskeleton.py index 7900be472c..d6cca550e8 100644 --- a/nipype/interfaces/slicer/filtering/extractskeleton.py +++ b/nipype/interfaces/slicer/filtering/extractskeleton.py @@ -53,19 +53,18 @@ class ExtractSkeletonOutputSpec(TraitedSpec): class ExtractSkeleton(SEMLikeCommandLine): """title: Extract Skeleton -category: Filtering + category: Filtering -description: Extract the skeleton of a binary object. The skeleton can be limited to being a 1D curve or allowed to be a full 2D manifold. The branches of the skeleton can be pruned so that only the maximal center skeleton is returned. + description: Extract the skeleton of a binary object. The skeleton can be limited to being a 1D curve or allowed to be a full 2D manifold. The branches of the skeleton can be pruned so that only the maximal center skeleton is returned. -version: 0.1.0.$Revision: 2104 $(alpha) + version: 0.1.0.$Revision: 2104 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ExtractSkeleton + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ExtractSkeleton -contributor: Pierre Seroul (UNC), Martin Styner (UNC), Guido Gerig (UNC), Stephen Aylward (Kitware) + contributor: Pierre Seroul (UNC), Martin Styner (UNC), Guido Gerig (UNC), Stephen Aylward (Kitware) -acknowledgements: The original implementation of this method was provided by ETH Zurich, Image Analysis Laboratory of Profs Olaf Kuebler, Gabor Szekely and Guido Gerig. Martin Styner at UNC, Chapel Hill made enhancements. Wrapping for Slicer was provided by Pierre Seroul and Stephen Aylward at Kitware, Inc. - -""" + acknowledgements: The original implementation of this method was provided by ETH Zurich, Image Analysis Laboratory of Profs Olaf Kuebler, Gabor Szekely and Guido Gerig. Martin Styner at UNC, Chapel Hill made enhancements. Wrapping for Slicer was provided by Pierre Seroul and Stephen Aylward at Kitware, Inc. + """ input_spec = ExtractSkeletonInputSpec output_spec = ExtractSkeletonOutputSpec diff --git a/nipype/interfaces/slicer/filtering/histogrammatching.py b/nipype/interfaces/slicer/filtering/histogrammatching.py index 9b6cb17813..e6132bd987 100644 --- a/nipype/interfaces/slicer/filtering/histogrammatching.py +++ b/nipype/interfaces/slicer/filtering/histogrammatching.py @@ -60,25 +60,24 @@ class HistogramMatchingOutputSpec(TraitedSpec): class HistogramMatching(SEMLikeCommandLine): """title: Histogram Matching -category: Filtering + category: Filtering -description: Normalizes the grayscale values of a source image based on the grayscale values of a reference image. This filter uses a histogram matching technique where the histograms of the two images are matched only at a specified number of quantile values. + description: Normalizes the grayscale values of a source image based on the grayscale values of a reference image. This filter uses a histogram matching technique where the histograms of the two images are matched only at a specified number of quantile values. -The filter was orginally designed to normalize MR images of the sameMR protocol and same body part. The algorithm works best if background pixels are excluded from both the source and reference histograms. A simple background exclusion method is to exclude all pixels whose grayscale values are smaller than the mean grayscale value. ThresholdAtMeanIntensity switches on this simple background exclusion method. + The filter was orginally designed to normalize MR images of the sameMR protocol and same body part. The algorithm works best if background pixels are excluded from both the source and reference histograms. A simple background exclusion method is to exclude all pixels whose grayscale values are smaller than the mean grayscale value. ThresholdAtMeanIntensity switches on this simple background exclusion method. -Number of match points governs the number of quantile values to be matched. + Number of match points governs the number of quantile values to be matched. -The filter assumes that both the source and reference are of the same type and that the input and output image type have the same number of dimension and have scalar pixel types. + The filter assumes that both the source and reference are of the same type and that the input and output image type have the same number of dimension and have scalar pixel types. -version: 0.1.0.$Revision: 19608 $(alpha) + version: 0.1.0.$Revision: 19608 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/HistogramMatching + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/HistogramMatching -contributor: Bill Lorensen (GE) + contributor: Bill Lorensen (GE) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = HistogramMatchingInputSpec output_spec = HistogramMatchingOutputSpec diff --git a/nipype/interfaces/slicer/filtering/imagelabelcombine.py b/nipype/interfaces/slicer/filtering/imagelabelcombine.py index 666385845d..b8990f3e21 100644 --- a/nipype/interfaces/slicer/filtering/imagelabelcombine.py +++ b/nipype/interfaces/slicer/filtering/imagelabelcombine.py @@ -46,17 +46,16 @@ class ImageLabelCombineOutputSpec(TraitedSpec): class ImageLabelCombine(SEMLikeCommandLine): """title: Image Label Combine -category: Filtering + category: Filtering -description: Combine two label maps into one + description: Combine two label maps into one -version: 0.1.0 + version: 0.1.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ImageLabelCombine + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ImageLabelCombine -contributor: Alex Yarmarkovich (SPL, BWH) - -""" + contributor: Alex Yarmarkovich (SPL, BWH) + """ input_spec = ImageLabelCombineInputSpec output_spec = ImageLabelCombineOutputSpec diff --git a/nipype/interfaces/slicer/filtering/morphology.py b/nipype/interfaces/slicer/filtering/morphology.py index b33d3e9c6d..40e4960d2b 100644 --- a/nipype/interfaces/slicer/filtering/morphology.py +++ b/nipype/interfaces/slicer/filtering/morphology.py @@ -39,29 +39,28 @@ class GrayscaleGrindPeakImageFilterOutputSpec(TraitedSpec): class GrayscaleGrindPeakImageFilter(SEMLikeCommandLine): """title: Grayscale Grind Peak Image Filter -category: Filtering.Morphology + category: Filtering.Morphology -description: GrayscaleGrindPeakImageFilter removes peaks in a grayscale image. Peaks are local maxima in the grayscale topography that are not connected to boundaries of the image. Gray level values adjacent to a peak are extrapolated through the peak. + description: GrayscaleGrindPeakImageFilter removes peaks in a grayscale image. Peaks are local maxima in the grayscale topography that are not connected to boundaries of the image. Gray level values adjacent to a peak are extrapolated through the peak. -This filter is used to smooth over local maxima without affecting the values of local minima. If you take the difference between the output of this filter and the original image (and perhaps threshold the difference above a small value), you'll obtain a map of the local maxima. + This filter is used to smooth over local maxima without affecting the values of local minima. If you take the difference between the output of this filter and the original image (and perhaps threshold the difference above a small value), you'll obtain a map of the local maxima. -This filter uses the GrayscaleGeodesicDilateImageFilter. It provides its own input as the "mask" input to the geodesic erosion. The "marker" image for the geodesic erosion is constructed such that boundary pixels match the boundary pixels of the input image and the interior pixels are set to the minimum pixel value in the input image. + This filter uses the GrayscaleGeodesicDilateImageFilter. It provides its own input as the "mask" input to the geodesic erosion. The "marker" image for the geodesic erosion is constructed such that boundary pixels match the boundary pixels of the input image and the interior pixels are set to the minimum pixel value in the input image. -This filter is the dual to the GrayscaleFillholeImageFilter which implements the Fillhole algorithm. Since it is a dual, it is somewhat superfluous but is provided as a convenience. + This filter is the dual to the GrayscaleFillholeImageFilter which implements the Fillhole algorithm. Since it is a dual, it is somewhat superfluous but is provided as a convenience. -Geodesic morphology and the Fillhole algorithm is described in Chapter 6 of Pierre Soille's book "Morphological Image Analysis: Principles and Applications", Second Edition, Springer, 2003. + Geodesic morphology and the Fillhole algorithm is described in Chapter 6 of Pierre Soille's book "Morphological Image Analysis: Principles and Applications", Second Edition, Springer, 2003. -A companion filter, Grayscale Fill Hole, fills holes in grayscale images. + A companion filter, Grayscale Fill Hole, fills holes in grayscale images. -version: 0.1.0.$Revision: 19608 $(alpha) + version: 0.1.0.$Revision: 19608 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GrayscaleGrindPeakImageFilter + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GrayscaleGrindPeakImageFilter -contributor: Bill Lorensen (GE) + contributor: Bill Lorensen (GE) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = GrayscaleGrindPeakImageFilterInputSpec output_spec = GrayscaleGrindPeakImageFilterOutputSpec @@ -90,27 +89,26 @@ class GrayscaleFillHoleImageFilterOutputSpec(TraitedSpec): class GrayscaleFillHoleImageFilter(SEMLikeCommandLine): """title: Grayscale Fill Hole Image Filter -category: Filtering.Morphology - -description: GrayscaleFillholeImageFilter fills holes in a grayscale image. Holes are local minima in the grayscale topography that are not connected to boundaries of the image. Gray level values adjacent to a hole are extrapolated across the hole. + category: Filtering.Morphology -This filter is used to smooth over local minima without affecting the values of local maxima. If you take the difference between the output of this filter and the original image (and perhaps threshold the difference above a small value), you'll obtain a map of the local minima. + description: GrayscaleFillholeImageFilter fills holes in a grayscale image. Holes are local minima in the grayscale topography that are not connected to boundaries of the image. Gray level values adjacent to a hole are extrapolated across the hole. -This filter uses the itkGrayscaleGeodesicErodeImageFilter. It provides its own input as the "mask" input to the geodesic erosion. The "marker" image for the geodesic erosion is constructed such that boundary pixels match the boundary pixels of the input image and the interior pixels are set to the maximum pixel value in the input image. + This filter is used to smooth over local minima without affecting the values of local maxima. If you take the difference between the output of this filter and the original image (and perhaps threshold the difference above a small value), you'll obtain a map of the local minima. - Geodesic morphology and the Fillhole algorithm is described in Chapter 6 of Pierre Soille's book "Morphological Image Analysis: Principles and Applications", Second Edition, Springer, 2003. + This filter uses the itkGrayscaleGeodesicErodeImageFilter. It provides its own input as the "mask" input to the geodesic erosion. The "marker" image for the geodesic erosion is constructed such that boundary pixels match the boundary pixels of the input image and the interior pixels are set to the maximum pixel value in the input image. - A companion filter, Grayscale Grind Peak, removes peaks in grayscale images. + Geodesic morphology and the Fillhole algorithm is described in Chapter 6 of Pierre Soille's book "Morphological Image Analysis: Principles and Applications", Second Edition, Springer, 2003. -version: 0.1.0.$Revision: 19608 $(alpha) + A companion filter, Grayscale Grind Peak, removes peaks in grayscale images. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GrayscaleFillHoleImageFilter + version: 0.1.0.$Revision: 19608 $(alpha) -contributor: Bill Lorensen (GE) + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GrayscaleFillHoleImageFilter -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Bill Lorensen (GE) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = GrayscaleFillHoleImageFilterInputSpec output_spec = GrayscaleFillHoleImageFilterOutputSpec diff --git a/nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py b/nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py index b1243160b5..1de5e87e2f 100644 --- a/nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py +++ b/nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py @@ -88,19 +88,18 @@ class N4ITKBiasFieldCorrectionOutputSpec(TraitedSpec): class N4ITKBiasFieldCorrection(SEMLikeCommandLine): """title: N4ITK MRI Bias correction -category: Filtering + category: Filtering -description: Performs image bias correction using N4 algorithm. This module is based on the ITK filters contributed in the following publication: Tustison N, Gee J "N4ITK: Nick's N3 ITK Implementation For MRI Bias Field Correction", The Insight Journal 2009 January-June, http://hdl.handle.net/10380/3053 + description: Performs image bias correction using N4 algorithm. This module is based on the ITK filters contributed in the following publication: Tustison N, Gee J "N4ITK: Nick's N3 ITK Implementation For MRI Bias Field Correction", The Insight Journal 2009 January-June, http://hdl.handle.net/10380/3053 -version: 9 + version: 9 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/N4ITKBiasFieldCorrection + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/N4ITKBiasFieldCorrection -contributor: Nick Tustison (UPenn), Andrey Fedorov (SPL, BWH), Ron Kikinis (SPL, BWH) + contributor: Nick Tustison (UPenn), Andrey Fedorov (SPL, BWH), Ron Kikinis (SPL, BWH) -acknowledgements: The development of this module was partially supported by NIH grants R01 AA016748-01, R01 CA111288 and U01 CA151261 as well as by NA-MIC, NAC, NCIGT and the Slicer community. - -""" + acknowledgements: The development of this module was partially supported by NIH grants R01 AA016748-01, R01 CA111288 and U01 CA151261 as well as by NA-MIC, NAC, NCIGT and the Slicer community. + """ input_spec = N4ITKBiasFieldCorrectionInputSpec output_spec = N4ITKBiasFieldCorrectionOutputSpec diff --git a/nipype/interfaces/slicer/filtering/resamplescalarvectordwivolume.py b/nipype/interfaces/slicer/filtering/resamplescalarvectordwivolume.py index c41827cb39..0d7c0777dd 100644 --- a/nipype/interfaces/slicer/filtering/resamplescalarvectordwivolume.py +++ b/nipype/interfaces/slicer/filtering/resamplescalarvectordwivolume.py @@ -145,23 +145,22 @@ class ResampleScalarVectorDWIVolumeOutputSpec(TraitedSpec): class ResampleScalarVectorDWIVolume(SEMLikeCommandLine): """title: Resample Scalar/Vector/DWI Volume -category: Filtering + category: Filtering -description: This module implements image and vector-image resampling through the use of itk Transforms.It can also handle diffusion weighted MRI image resampling. "Resampling" is performed in space coordinates, not pixel/grid coordinates. It is quite important to ensure that image spacing is properly set on the images involved. The interpolator is required since the mapping from one space to the other will often require evaluation of the intensity of the image at non-grid positions. + description: This module implements image and vector-image resampling through the use of itk Transforms.It can also handle diffusion weighted MRI image resampling. "Resampling" is performed in space coordinates, not pixel/grid coordinates. It is quite important to ensure that image spacing is properly set on the images involved. The interpolator is required since the mapping from one space to the other will often require evaluation of the intensity of the image at non-grid positions. -Warning: To resample DWMR Images, use nrrd input and output files. + Warning: To resample DWMR Images, use nrrd input and output files. -Warning: Do not use to resample Diffusion Tensor Images, tensors would not be reoriented + Warning: Do not use to resample Diffusion Tensor Images, tensors would not be reoriented -version: 0.1 + version: 0.1 -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ResampleScalarVectorDWIVolume + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ResampleScalarVectorDWIVolume -contributor: Francois Budin (UNC) + contributor: Francois Budin (UNC) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Information on the National Centers for Biomedical Computing can be obtained from http://nihroadmap.nih.gov/bioinformatics - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Information on the National Centers for Biomedical Computing can be obtained from http://nihroadmap.nih.gov/bioinformatics + """ input_spec = ResampleScalarVectorDWIVolumeInputSpec output_spec = ResampleScalarVectorDWIVolumeOutputSpec diff --git a/nipype/interfaces/slicer/filtering/thresholdscalarvolume.py b/nipype/interfaces/slicer/filtering/thresholdscalarvolume.py index e72284456b..2fdfc76d52 100644 --- a/nipype/interfaces/slicer/filtering/thresholdscalarvolume.py +++ b/nipype/interfaces/slicer/filtering/thresholdscalarvolume.py @@ -51,19 +51,18 @@ class ThresholdScalarVolumeOutputSpec(TraitedSpec): class ThresholdScalarVolume(SEMLikeCommandLine): """title: Threshold Scalar Volume -category: Filtering + category: Filtering -description:

Threshold an image.

Set image values to a user-specified outside value if they are below, above, or between simple threshold values.

ThresholdAbove: The values greater than or equal to the threshold value are set to OutsideValue.

ThresholdBelow: The values less than or equal to the threshold value are set to OutsideValue.

ThresholdOutside: The values outside the range Lower-Upper are set to OutsideValue.

Although all image types are supported on input, only signed types are produced.

+ description:

Threshold an image.

Set image values to a user-specified outside value if they are below, above, or between simple threshold values.

ThresholdAbove: The values greater than or equal to the threshold value are set to OutsideValue.

ThresholdBelow: The values less than or equal to the threshold value are set to OutsideValue.

ThresholdOutside: The values outside the range Lower-Upper are set to OutsideValue.

Although all image types are supported on input, only signed types are produced.

-version: 0.1.0.$Revision: 2104 $(alpha) + version: 0.1.0.$Revision: 2104 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Threshold + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Threshold -contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) + contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = ThresholdScalarVolumeInputSpec output_spec = ThresholdScalarVolumeOutputSpec diff --git a/nipype/interfaces/slicer/filtering/votingbinaryholefillingimagefilter.py b/nipype/interfaces/slicer/filtering/votingbinaryholefillingimagefilter.py index 2ed3736d1b..d8756b0b05 100644 --- a/nipype/interfaces/slicer/filtering/votingbinaryholefillingimagefilter.py +++ b/nipype/interfaces/slicer/filtering/votingbinaryholefillingimagefilter.py @@ -57,19 +57,18 @@ class VotingBinaryHoleFillingImageFilterOutputSpec(TraitedSpec): class VotingBinaryHoleFillingImageFilter(SEMLikeCommandLine): """title: Voting Binary Hole Filling Image Filter -category: Filtering + category: Filtering -description: Applies a voting operation in order to fill-in cavities. This can be used for smoothing contours and for filling holes in binary images. This technique is used frequently when segmenting complete organs that may have ducts or vasculature that may not have been included in the initial segmentation, e.g. lungs, kidneys, liver. + description: Applies a voting operation in order to fill-in cavities. This can be used for smoothing contours and for filling holes in binary images. This technique is used frequently when segmenting complete organs that may have ducts or vasculature that may not have been included in the initial segmentation, e.g. lungs, kidneys, liver. -version: 0.1.0.$Revision: 19608 $(alpha) + version: 0.1.0.$Revision: 19608 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/VotingBinaryHoleFillingImageFilter + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/VotingBinaryHoleFillingImageFilter -contributor: Bill Lorensen (GE) + contributor: Bill Lorensen (GE) -acknowledgements: This command module was derived from Insight/Examples/Filtering/VotingBinaryHoleFillingImageFilter (copyright) Insight Software Consortium - -""" + acknowledgements: This command module was derived from Insight/Examples/Filtering/VotingBinaryHoleFillingImageFilter (copyright) Insight Software Consortium + """ input_spec = VotingBinaryHoleFillingImageFilterInputSpec output_spec = VotingBinaryHoleFillingImageFilterOutputSpec diff --git a/nipype/interfaces/slicer/generate_classes.py b/nipype/interfaces/slicer/generate_classes.py index 54eeb0b089..b725cbc589 100644 --- a/nipype/interfaces/slicer/generate_classes.py +++ b/nipype/interfaces/slicer/generate_classes.py @@ -16,7 +16,7 @@ def force_to_valid_python_variable_name(old_name): - """ Valid c++ names are not always valid in python, so + """Valid c++ names are not always valid in python, so provide alternate naming >>> force_to_valid_python_variable_name('lambda') @@ -120,9 +120,9 @@ def configuration(parent_package='',top_path=None): def generate_all_classes( modules_list=[], launcher=[], redirect_x=False, mipav_hacks=False ): - """ modules_list contains all the SEM compliant tools that should have wrappers created for them. - launcher containtains the command line prefix wrapper arugments needed to prepare - a proper environment for each of the modules. + """modules_list contains all the SEM compliant tools that should have wrappers created for them. + launcher containtains the command line prefix wrapper arugments needed to prepare + a proper environment for each of the modules. """ all_code = {} for module in modules_list: @@ -367,14 +367,18 @@ def generate_class( param.getElementsByTagName("channel")[0].firstChild.nodeValue == "input" ): - if param.nodeName in [ - "file", - "directory", - "image", - "geometry", - "transform", - "table", - ] and type not in ["InputMultiPath", "traits.List"]: + if ( + param.nodeName + in [ + "file", + "directory", + "image", + "geometry", + "transform", + "table", + ] + and type not in ["InputMultiPath", "traits.List"] + ): traitsParams["exists"] = True inputTraits.append( "%s = %s(%s%s)" diff --git a/nipype/interfaces/slicer/legacy/converters.py b/nipype/interfaces/slicer/legacy/converters.py index aadd840d71..490eb5b23c 100644 --- a/nipype/interfaces/slicer/legacy/converters.py +++ b/nipype/interfaces/slicer/legacy/converters.py @@ -33,19 +33,18 @@ class BSplineToDeformationFieldOutputSpec(TraitedSpec): class BSplineToDeformationField(SEMLikeCommandLine): """title: BSpline to deformation field -category: Legacy.Converters + category: Legacy.Converters -description: Create a dense deformation field from a bspline+bulk transform. + description: Create a dense deformation field from a bspline+bulk transform. -version: 0.1.0.$Revision: 2104 $(alpha) + version: 0.1.0.$Revision: 2104 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BSplineToDeformationField + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BSplineToDeformationField -contributor: Andrey Fedorov (SPL, BWH) + contributor: Andrey Fedorov (SPL, BWH) -acknowledgements: This work is funded by NIH grants R01 CA111288 and U01 CA151261. - -""" + acknowledgements: This work is funded by NIH grants R01 CA111288 and U01 CA151261. + """ input_spec = BSplineToDeformationFieldInputSpec output_spec = BSplineToDeformationFieldOutputSpec diff --git a/nipype/interfaces/slicer/legacy/diffusion/denoising.py b/nipype/interfaces/slicer/legacy/diffusion/denoising.py index 0cde8fe64e..b868193511 100644 --- a/nipype/interfaces/slicer/legacy/diffusion/denoising.py +++ b/nipype/interfaces/slicer/legacy/diffusion/denoising.py @@ -63,23 +63,22 @@ class DWIUnbiasedNonLocalMeansFilterOutputSpec(TraitedSpec): class DWIUnbiasedNonLocalMeansFilter(SEMLikeCommandLine): """title: DWI Unbiased Non Local Means Filter -category: Legacy.Diffusion.Denoising + category: Legacy.Diffusion.Denoising -description: This module reduces noise (or unwanted detail) on a set of diffusion weighted images. For this, it filters the images using a Unbiased Non Local Means for Rician noise algorithm. It exploits not only the spatial redundancy, but the redundancy in similar gradient directions as well; it takes into account the N closest gradient directions to the direction being processed (a maximum of 5 gradient directions is allowed to keep a reasonable computational load, since we do not use neither similarity maps nor block-wise implementation). -The noise parameter is automatically estimated in the same way as in the jointLMMSE module. -A complete description of the algorithm may be found in: -Antonio Tristan-Vega and Santiago Aja-Fernandez, DWI filtering using joint information for DTI and HARDI, Medical Image Analysis, Volume 14, Issue 2, Pages 205-218. 2010. -Please, note that the execution of this filter is extremely slow, son only very conservative parameters (block size and search size as small as possible) should be used. Even so, its execution may take several hours. The advantage of this filter over joint LMMSE is its better preservation of edges and fine structures. + description: This module reduces noise (or unwanted detail) on a set of diffusion weighted images. For this, it filters the images using a Unbiased Non Local Means for Rician noise algorithm. It exploits not only the spatial redundancy, but the redundancy in similar gradient directions as well; it takes into account the N closest gradient directions to the direction being processed (a maximum of 5 gradient directions is allowed to keep a reasonable computational load, since we do not use neither similarity maps nor block-wise implementation). + The noise parameter is automatically estimated in the same way as in the jointLMMSE module. + A complete description of the algorithm may be found in: + Antonio Tristan-Vega and Santiago Aja-Fernandez, DWI filtering using joint information for DTI and HARDI, Medical Image Analysis, Volume 14, Issue 2, Pages 205-218. 2010. + Please, note that the execution of this filter is extremely slow, son only very conservative parameters (block size and search size as small as possible) should be used. Even so, its execution may take several hours. The advantage of this filter over joint LMMSE is its better preservation of edges and fine structures. -version: 0.0.1.$Revision: 1 $(alpha) + version: 0.0.1.$Revision: 1 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/UnbiasedNonLocalMeansFilterForDWI + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/UnbiasedNonLocalMeansFilterForDWI -contributor: Antonio Tristan Vega (UVa), Santiago Aja Fernandez (UVa) + contributor: Antonio Tristan Vega (UVa), Santiago Aja Fernandez (UVa) -acknowledgements: Partially founded by grant number TEC2007-67073/TCM from the Comision Interministerial de Ciencia y Tecnologia (Spain). - -""" + acknowledgements: Partially founded by grant number TEC2007-67073/TCM from the Comision Interministerial de Ciencia y Tecnologia (Spain). + """ input_spec = DWIUnbiasedNonLocalMeansFilterInputSpec output_spec = DWIUnbiasedNonLocalMeansFilterOutputSpec diff --git a/nipype/interfaces/slicer/legacy/filtering.py b/nipype/interfaces/slicer/legacy/filtering.py index ee041bbc50..dda29920b2 100644 --- a/nipype/interfaces/slicer/legacy/filtering.py +++ b/nipype/interfaces/slicer/legacy/filtering.py @@ -50,23 +50,22 @@ class OtsuThresholdImageFilterOutputSpec(TraitedSpec): class OtsuThresholdImageFilter(SEMLikeCommandLine): """title: Otsu Threshold Image Filter -category: Legacy.Filtering + category: Legacy.Filtering -description: This filter creates a binary thresholded image that separates an image into foreground and background components. The filter calculates the optimum threshold separating those two classes so that their combined spread (intra-class variance) is minimal (see http://en.wikipedia.org/wiki/Otsu%27s_method). Then the filter applies that threshold to the input image using the itkBinaryThresholdImageFilter. The numberOfHistogram bins can be set for the Otsu Calculator. The insideValue and outsideValue can be set for the BinaryThresholdImageFilter. The filter produces a labeled volume. + description: This filter creates a binary thresholded image that separates an image into foreground and background components. The filter calculates the optimum threshold separating those two classes so that their combined spread (intra-class variance) is minimal (see http://en.wikipedia.org/wiki/Otsu%27s_method). Then the filter applies that threshold to the input image using the itkBinaryThresholdImageFilter. The numberOfHistogram bins can be set for the Otsu Calculator. The insideValue and outsideValue can be set for the BinaryThresholdImageFilter. The filter produces a labeled volume. -The original reference is: + The original reference is: -N.Otsu, A threshold selection method from gray level histograms, IEEE Trans.Syst.ManCybern.SMC-9,62–66 1979. + N.Otsu, A threshold selection method from gray level histograms, IEEE Trans.Syst.ManCybern.SMC-9,62–66 1979. -version: 0.1.0.$Revision: 19608 $(alpha) + version: 0.1.0.$Revision: 19608 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/OtsuThresholdImageFilter + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/OtsuThresholdImageFilter -contributor: Bill Lorensen (GE) + contributor: Bill Lorensen (GE) -acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium - -""" + acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium + """ input_spec = OtsuThresholdImageFilterInputSpec output_spec = OtsuThresholdImageFilterOutputSpec @@ -113,19 +112,18 @@ class ResampleScalarVolumeOutputSpec(TraitedSpec): class ResampleScalarVolume(SEMLikeCommandLine): """title: Resample Scalar Volume -category: Legacy.Filtering - -description: Resampling an image is an important task in image analysis. It is especially important in the frame of image registration. This module implements image resampling through the use of itk Transforms. This module uses an Identity Transform. The resampling is controlled by the Output Spacing. "Resampling" is performed in space coordinates, not pixel/grid coordinates. It is quite important to ensure that image spacing is properly set on the images involved. The interpolator is required since the mapping from one space to the other will often require evaluation of the intensity of the image at non-grid positions. Several interpolators are available: linear, nearest neighbor, bspline and five flavors of sinc. The sinc interpolators, although more precise, are much slower than the linear and nearest neighbor interpolator. To resample label volumnes, nearest neighbor interpolation should be used exclusively. + category: Legacy.Filtering -version: 0.1.0.$Revision: 20594 $(alpha) + description: Resampling an image is an important task in image analysis. It is especially important in the frame of image registration. This module implements image resampling through the use of itk Transforms. This module uses an Identity Transform. The resampling is controlled by the Output Spacing. "Resampling" is performed in space coordinates, not pixel/grid coordinates. It is quite important to ensure that image spacing is properly set on the images involved. The interpolator is required since the mapping from one space to the other will often require evaluation of the intensity of the image at non-grid positions. Several interpolators are available: linear, nearest neighbor, bspline and five flavors of sinc. The sinc interpolators, although more precise, are much slower than the linear and nearest neighbor interpolator. To resample label volumnes, nearest neighbor interpolation should be used exclusively. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ResampleVolume + version: 0.1.0.$Revision: 20594 $(alpha) -contributor: Bill Lorensen (GE) + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ResampleVolume -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Bill Lorensen (GE) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = ResampleScalarVolumeInputSpec output_spec = ResampleScalarVolumeOutputSpec diff --git a/nipype/interfaces/slicer/legacy/registration.py b/nipype/interfaces/slicer/legacy/registration.py index 3bfd2377d4..109b5c0464 100644 --- a/nipype/interfaces/slicer/legacy/registration.py +++ b/nipype/interfaces/slicer/legacy/registration.py @@ -96,19 +96,18 @@ class BSplineDeformableRegistrationOutputSpec(TraitedSpec): class BSplineDeformableRegistration(SEMLikeCommandLine): """title: BSpline Deformable Registration -category: Legacy.Registration + category: Legacy.Registration -description: Registers two images together using BSpline transform and mutual information. + description: Registers two images together using BSpline transform and mutual information. -version: 0.1.0.$Revision: 19608 $(alpha) + version: 0.1.0.$Revision: 19608 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BSplineDeformableRegistration + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BSplineDeformableRegistration -contributor: Bill Lorensen (GE) + contributor: Bill Lorensen (GE) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = BSplineDeformableRegistrationInputSpec output_spec = BSplineDeformableRegistrationOutputSpec @@ -183,25 +182,24 @@ class AffineRegistrationOutputSpec(TraitedSpec): class AffineRegistration(SEMLikeCommandLine): """title: Affine Registration -category: Legacy.Registration - -description: Registers two images together using an affine transform and mutual information. This module is often used to align images of different subjects or images of the same subject from different modalities. + category: Legacy.Registration -This module can smooth images prior to registration to mitigate noise and improve convergence. Many of the registration parameters require a working knowledge of the algorithm although the default parameters are sufficient for many registration tasks. + description: Registers two images together using an affine transform and mutual information. This module is often used to align images of different subjects or images of the same subject from different modalities. + This module can smooth images prior to registration to mitigate noise and improve convergence. Many of the registration parameters require a working knowledge of the algorithm although the default parameters are sufficient for many registration tasks. -version: 0.1.0.$Revision: 19608 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/AffineRegistration + version: 0.1.0.$Revision: 19608 $(alpha) -contributor: Daniel Blezek (GE) + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/AffineRegistration -acknowledgements: This module was developed by Daniel Blezek while at GE Research with contributions from Jim Miller. + contributor: Daniel Blezek (GE) -This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + acknowledgements: This module was developed by Daniel Blezek while at GE Research with contributions from Jim Miller. -""" + This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = AffineRegistrationInputSpec output_spec = AffineRegistrationOutputSpec @@ -276,19 +274,18 @@ class MultiResolutionAffineRegistrationOutputSpec(TraitedSpec): class MultiResolutionAffineRegistration(SEMLikeCommandLine): """title: Robust Multiresolution Affine Registration -category: Legacy.Registration - -description: Provides affine registration using multiple resolution levels and decomposed affine transforms. + category: Legacy.Registration -version: 0.1.0.$Revision: 2104 $(alpha) + description: Provides affine registration using multiple resolution levels and decomposed affine transforms. -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MultiResolutionAffineRegistration + version: 0.1.0.$Revision: 2104 $(alpha) -contributor: Casey B Goodlett (Utah) + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MultiResolutionAffineRegistration -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Casey B Goodlett (Utah) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = MultiResolutionAffineRegistrationInputSpec output_spec = MultiResolutionAffineRegistrationOutputSpec @@ -377,29 +374,28 @@ class RigidRegistrationOutputSpec(TraitedSpec): class RigidRegistration(SEMLikeCommandLine): """title: Rigid Registration -category: Legacy.Registration + category: Legacy.Registration -description: Registers two images together using a rigid transform and mutual information. + description: Registers two images together using a rigid transform and mutual information. -This module was originally distributed as "Linear registration" but has been renamed to eliminate confusion with the "Affine registration" module. + This module was originally distributed as "Linear registration" but has been renamed to eliminate confusion with the "Affine registration" module. -This module is often used to align images of different subjects or images of the same subject from different modalities. + This module is often used to align images of different subjects or images of the same subject from different modalities. -This module can smooth images prior to registration to mitigate noise and improve convergence. Many of the registration parameters require a working knowledge of the algorithm although the default parameters are sufficient for many registration tasks. + This module can smooth images prior to registration to mitigate noise and improve convergence. Many of the registration parameters require a working knowledge of the algorithm although the default parameters are sufficient for many registration tasks. -version: 0.1.0.$Revision: 19608 $(alpha) + version: 0.1.0.$Revision: 19608 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/RigidRegistration + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/RigidRegistration -contributor: Daniel Blezek (GE) + contributor: Daniel Blezek (GE) -acknowledgements: This module was developed by Daniel Blezek while at GE Research with contributions from Jim Miller. + acknowledgements: This module was developed by Daniel Blezek while at GE Research with contributions from Jim Miller. -This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. - -""" + This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = RigidRegistrationInputSpec output_spec = RigidRegistrationOutputSpec @@ -484,19 +480,18 @@ class LinearRegistrationOutputSpec(TraitedSpec): class LinearRegistration(SEMLikeCommandLine): """title: Linear Registration -category: Legacy.Registration - -description: Registers two images together using a rigid transform and mutual information. + category: Legacy.Registration -version: 0.1.0.$Revision: 19608 $(alpha) + description: Registers two images together using a rigid transform and mutual information. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/LinearRegistration + version: 0.1.0.$Revision: 19608 $(alpha) -contributor: Daniel Blezek (GE) + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/LinearRegistration -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Daniel Blezek (GE) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = LinearRegistrationInputSpec output_spec = LinearRegistrationOutputSpec @@ -664,19 +659,18 @@ class ExpertAutomatedRegistrationOutputSpec(TraitedSpec): class ExpertAutomatedRegistration(SEMLikeCommandLine): """title: Expert Automated Registration -category: Legacy.Registration - -description: Provides rigid, affine, and BSpline registration methods via a simple GUI + category: Legacy.Registration -version: 0.1.0.$Revision: 2104 $(alpha) + description: Provides rigid, affine, and BSpline registration methods via a simple GUI -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ExpertAutomatedRegistration + version: 0.1.0.$Revision: 2104 $(alpha) -contributor: Stephen R Aylward (Kitware), Casey B Goodlett (Kitware) + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ExpertAutomatedRegistration -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Stephen R Aylward (Kitware), Casey B Goodlett (Kitware) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = ExpertAutomatedRegistrationInputSpec output_spec = ExpertAutomatedRegistrationOutputSpec diff --git a/nipype/interfaces/slicer/legacy/segmentation.py b/nipype/interfaces/slicer/legacy/segmentation.py index 118ffbcb77..8440c776c0 100644 --- a/nipype/interfaces/slicer/legacy/segmentation.py +++ b/nipype/interfaces/slicer/legacy/segmentation.py @@ -55,19 +55,18 @@ class OtsuThresholdSegmentationOutputSpec(TraitedSpec): class OtsuThresholdSegmentation(SEMLikeCommandLine): """title: Otsu Threshold Segmentation -category: Legacy.Segmentation + category: Legacy.Segmentation -description: This filter creates a labeled image from a grayscale image. First, it calculates an optimal threshold that separates the image into foreground and background. This threshold separates those two classes so that their intra-class variance is minimal (see http://en.wikipedia.org/wiki/Otsu%27s_method). Then the filter runs a connected component algorithm to generate unique labels for each connected region of the foreground. Finally, the resulting image is relabeled to provide consecutive numbering. + description: This filter creates a labeled image from a grayscale image. First, it calculates an optimal threshold that separates the image into foreground and background. This threshold separates those two classes so that their intra-class variance is minimal (see http://en.wikipedia.org/wiki/Otsu%27s_method). Then the filter runs a connected component algorithm to generate unique labels for each connected region of the foreground. Finally, the resulting image is relabeled to provide consecutive numbering. -version: 1.0 + version: 1.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/OtsuThresholdSegmentation + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/OtsuThresholdSegmentation -contributor: Bill Lorensen (GE) + contributor: Bill Lorensen (GE) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = OtsuThresholdSegmentationInputSpec output_spec = OtsuThresholdSegmentationOutputSpec diff --git a/nipype/interfaces/slicer/quantification/changequantification.py b/nipype/interfaces/slicer/quantification/changequantification.py index c0e36b9bf9..8b529fa33b 100644 --- a/nipype/interfaces/slicer/quantification/changequantification.py +++ b/nipype/interfaces/slicer/quantification/changequantification.py @@ -70,25 +70,24 @@ class IntensityDifferenceMetricOutputSpec(TraitedSpec): class IntensityDifferenceMetric(SEMLikeCommandLine): """title: - Intensity Difference Change Detection (FAST) + Intensity Difference Change Detection (FAST) -category: - Quantification.ChangeQuantification + category: + Quantification.ChangeQuantification -description: - Quantifies the changes between two spatially aligned images based on the pixel-wise difference of image intensities. + description: + Quantifies the changes between two spatially aligned images based on the pixel-wise difference of image intensities. -version: 0.1 + version: 0.1 -contributor: Andrey Fedorov + contributor: Andrey Fedorov -acknowledgements: + acknowledgements: - -""" + """ input_spec = IntensityDifferenceMetricInputSpec output_spec = IntensityDifferenceMetricOutputSpec diff --git a/nipype/interfaces/slicer/quantification/petstandarduptakevaluecomputation.py b/nipype/interfaces/slicer/quantification/petstandarduptakevaluecomputation.py index 9cf01c5359..2fe281f09f 100644 --- a/nipype/interfaces/slicer/quantification/petstandarduptakevaluecomputation.py +++ b/nipype/interfaces/slicer/quantification/petstandarduptakevaluecomputation.py @@ -69,19 +69,18 @@ class PETStandardUptakeValueComputationOutputSpec(TraitedSpec): class PETStandardUptakeValueComputation(SEMLikeCommandLine): """title: PET Standard Uptake Value Computation -category: Quantification + category: Quantification -description: Computes the standardized uptake value based on body weight. Takes an input PET image in DICOM and NRRD format (DICOM header must contain Radiopharmaceutical parameters). Produces a CSV file that contains patientID, studyDate, dose, labelID, suvmin, suvmax, suvmean, labelName for each volume of interest. It also displays some of the information as output strings in the GUI, the CSV file is optional in that case. The CSV file is appended to on each execution of the CLI. + description: Computes the standardized uptake value based on body weight. Takes an input PET image in DICOM and NRRD format (DICOM header must contain Radiopharmaceutical parameters). Produces a CSV file that contains patientID, studyDate, dose, labelID, suvmin, suvmax, suvmean, labelName for each volume of interest. It also displays some of the information as output strings in the GUI, the CSV file is optional in that case. The CSV file is appended to on each execution of the CLI. -version: 0.1.0.$Revision: 8595 $(alpha) + version: 0.1.0.$Revision: 8595 $(alpha) -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ComputeSUVBodyWeight + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ComputeSUVBodyWeight -contributor: Wendy Plesniak (SPL, BWH), Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) + contributor: Wendy Plesniak (SPL, BWH), Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) -acknowledgements: This work is funded by the Harvard Catalyst, and the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. - -""" + acknowledgements: This work is funded by the Harvard Catalyst, and the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = PETStandardUptakeValueComputationInputSpec output_spec = PETStandardUptakeValueComputationOutputSpec diff --git a/nipype/interfaces/slicer/registration/brainsfit.py b/nipype/interfaces/slicer/registration/brainsfit.py index 2ca7f09d3c..e26c7036a2 100644 --- a/nipype/interfaces/slicer/registration/brainsfit.py +++ b/nipype/interfaces/slicer/registration/brainsfit.py @@ -365,21 +365,20 @@ class BRAINSFitOutputSpec(TraitedSpec): class BRAINSFit(SEMLikeCommandLine): """title: General Registration (BRAINS) -category: Registration + category: Registration -description: Register a three-dimensional volume to a reference volume (Mattes Mutual Information by default). Described in BRAINSFit: Mutual Information Registrations of Whole-Brain 3D Images, Using the Insight Toolkit, Johnson H.J., Harris G., Williams K., The Insight Journal, 2007. http://hdl.handle.net/1926/1291 + description: Register a three-dimensional volume to a reference volume (Mattes Mutual Information by default). Described in BRAINSFit: Mutual Information Registrations of Whole-Brain 3D Images, Using the Insight Toolkit, Johnson H.J., Harris G., Williams K., The Insight Journal, 2007. http://hdl.handle.net/1926/1291 -version: 3.0.0 + version: 3.0.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:BRAINSFit + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:BRAINSFit -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://wwww.psychiatry.uiowa.edu + contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://wwww.psychiatry.uiowa.edu -acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); Gregory Harris(1), Vincent Magnotta(1,2,3); Andriy Fedorov(5) 1=University of Iowa Department of Psychiatry, 2=University of Iowa Department of Radiology, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering, 5=Surgical Planning Lab, Harvard - -""" + acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); Gregory Harris(1), Vincent Magnotta(1,2,3); Andriy Fedorov(5) 1=University of Iowa Department of Psychiatry, 2=University of Iowa Department of Radiology, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering, 5=Surgical Planning Lab, Harvard + """ input_spec = BRAINSFitInputSpec output_spec = BRAINSFitOutputSpec diff --git a/nipype/interfaces/slicer/registration/brainsresample.py b/nipype/interfaces/slicer/registration/brainsresample.py index 9031b86d8c..bf7b4f5547 100644 --- a/nipype/interfaces/slicer/registration/brainsresample.py +++ b/nipype/interfaces/slicer/registration/brainsresample.py @@ -91,23 +91,22 @@ class BRAINSResampleOutputSpec(TraitedSpec): class BRAINSResample(SEMLikeCommandLine): """title: Resample Image (BRAINS) -category: Registration + category: Registration -description: - This program resamples an image image using a deformation field or a transform (BSpline, Affine, Rigid, etc.). + description: + This program resamples an image image using a deformation field or a transform (BSpline, Affine, Rigid, etc.). -version: 3.0.0 + version: 3.0.0 -documentation-url: http://www.slicer.org/slicerWiki/index.php/Modules:BRAINSResample + documentation-url: http://www.slicer.org/slicerWiki/index.php/Modules:BRAINSResample -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: This tool was developed by Vincent Magnotta, Greg Harris, and Hans Johnson. + contributor: This tool was developed by Vincent Magnotta, Greg Harris, and Hans Johnson. -acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. - -""" + acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + """ input_spec = BRAINSResampleInputSpec output_spec = BRAINSResampleOutputSpec diff --git a/nipype/interfaces/slicer/registration/specialized.py b/nipype/interfaces/slicer/registration/specialized.py index 12835b0cef..01cfafbc6a 100644 --- a/nipype/interfaces/slicer/registration/specialized.py +++ b/nipype/interfaces/slicer/registration/specialized.py @@ -51,21 +51,20 @@ class ACPCTransformOutputSpec(TraitedSpec): class ACPCTransform(SEMLikeCommandLine): """title: ACPC Transform -category: Registration.Specialized + category: Registration.Specialized -description:

Calculate a transformation from two lists of fiducial points.

ACPC line is two fiducial points, one at the anterior commissure and one at the posterior commissure. The resulting transform will bring the line connecting them to horizontal to the AP axis.

The midline is a series of points defining the division between the hemispheres of the brain (the mid sagittal plane). The resulting transform will put the output volume with the mid sagittal plane lined up with the AS plane.

Use the Filtering moduleResample Scalar/Vector/DWI Volumeto apply the transformation to a volume.

+ description:

Calculate a transformation from two lists of fiducial points.

ACPC line is two fiducial points, one at the anterior commissure and one at the posterior commissure. The resulting transform will bring the line connecting them to horizontal to the AP axis.

The midline is a series of points defining the division between the hemispheres of the brain (the mid sagittal plane). The resulting transform will put the output volume with the mid sagittal plane lined up with the AS plane.

Use the Filtering moduleResample Scalar/Vector/DWI Volumeto apply the transformation to a volume.

-version: 1.0 + version: 1.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ACPCTransform + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ACPCTransform -license: slicer3 + license: slicer3 -contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) + contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = ACPCTransformInputSpec output_spec = ACPCTransformOutputSpec @@ -113,19 +112,18 @@ class FiducialRegistrationOutputSpec(TraitedSpec): class FiducialRegistration(SEMLikeCommandLine): """title: Fiducial Registration -category: Registration.Specialized - -description: Computes a rigid, similarity or affine transform from a matched list of fiducials + category: Registration.Specialized -version: 0.1.0.$Revision$ + description: Computes a rigid, similarity or affine transform from a matched list of fiducials -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/TransformFromFiducials + version: 0.1.0.$Revision$ -contributor: Casey B Goodlett (Kitware), Dominik Meier (SPL, BWH) + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/TransformFromFiducials -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Casey B Goodlett (Kitware), Dominik Meier (SPL, BWH) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = FiducialRegistrationInputSpec output_spec = FiducialRegistrationOutputSpec @@ -371,24 +369,23 @@ class VBRAINSDemonWarpOutputSpec(TraitedSpec): class VBRAINSDemonWarp(SEMLikeCommandLine): """title: Vector Demon Registration (BRAINS) -category: Registration.Specialized + category: Registration.Specialized -description: - This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://www.nitrc.org/projects/brainsdemonwarp. + description: + This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://www.nitrc.org/projects/brainsdemonwarp. -version: 3.0.0 + version: 3.0.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:BRAINSDemonWarp + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:BRAINSDemonWarp -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -contributor: This tool was developed by Hans J. Johnson and Greg Harris. + contributor: This tool was developed by Hans J. Johnson and Greg Harris. -acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. - -""" + acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + """ input_spec = VBRAINSDemonWarpInputSpec output_spec = VBRAINSDemonWarpOutputSpec @@ -632,24 +629,23 @@ class BRAINSDemonWarpOutputSpec(TraitedSpec): class BRAINSDemonWarp(SEMLikeCommandLine): """title: Demon Registration (BRAINS) -category: Registration.Specialized - -description: - This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://www.nitrc.org/projects/brainsdemonwarp. + category: Registration.Specialized + description: + This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://www.nitrc.org/projects/brainsdemonwarp. -version: 3.0.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:BRAINSDemonWarp + version: 3.0.0 -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:BRAINSDemonWarp -contributor: This tool was developed by Hans J. Johnson and Greg Harris. + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + contributor: This tool was developed by Hans J. Johnson and Greg Harris. -""" + acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + """ input_spec = BRAINSDemonWarpInputSpec output_spec = BRAINSDemonWarpOutputSpec diff --git a/nipype/interfaces/slicer/segmentation/simpleregiongrowingsegmentation.py b/nipype/interfaces/slicer/segmentation/simpleregiongrowingsegmentation.py index a32a8dde5c..6d58a63ea9 100644 --- a/nipype/interfaces/slicer/segmentation/simpleregiongrowingsegmentation.py +++ b/nipype/interfaces/slicer/segmentation/simpleregiongrowingsegmentation.py @@ -63,19 +63,18 @@ class SimpleRegionGrowingSegmentationOutputSpec(TraitedSpec): class SimpleRegionGrowingSegmentation(SEMLikeCommandLine): """title: Simple Region Growing Segmentation -category: Segmentation + category: Segmentation -description: A simple region growing segmentation algorithm based on intensity statistics. To create a list of fiducials (Seeds) for this algorithm, click on the tool bar icon of an arrow pointing to a starburst fiducial to enter the 'place a new object mode' and then use the fiducials module. This module uses the Slicer Command Line Interface (CLI) and the ITK filters CurvatureFlowImageFilter and ConfidenceConnectedImageFilter. + description: A simple region growing segmentation algorithm based on intensity statistics. To create a list of fiducials (Seeds) for this algorithm, click on the tool bar icon of an arrow pointing to a starburst fiducial to enter the 'place a new object mode' and then use the fiducials module. This module uses the Slicer Command Line Interface (CLI) and the ITK filters CurvatureFlowImageFilter and ConfidenceConnectedImageFilter. -version: 0.1.0.$Revision: 19904 $(alpha) + version: 0.1.0.$Revision: 19904 $(alpha) -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/SimpleRegionGrowingSegmentation + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/SimpleRegionGrowingSegmentation -contributor: Jim Miller (GE) + contributor: Jim Miller (GE) -acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium - -""" + acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium + """ input_spec = SimpleRegionGrowingSegmentationInputSpec output_spec = SimpleRegionGrowingSegmentationOutputSpec diff --git a/nipype/interfaces/slicer/segmentation/specialized.py b/nipype/interfaces/slicer/segmentation/specialized.py index 0ae90d0334..3abab602dc 100644 --- a/nipype/interfaces/slicer/segmentation/specialized.py +++ b/nipype/interfaces/slicer/segmentation/specialized.py @@ -61,19 +61,18 @@ class RobustStatisticsSegmenterOutputSpec(TraitedSpec): class RobustStatisticsSegmenter(SEMLikeCommandLine): """title: Robust Statistics Segmenter -category: Segmentation.Specialized + category: Segmentation.Specialized -description: Active contour segmentation using robust statistic. + description: Active contour segmentation using robust statistic. -version: 1.0 + version: 1.0 -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/RobustStatisticsSegmenter + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/RobustStatisticsSegmenter -contributor: Yi Gao (gatech), Allen Tannenbaum (gatech), Ron Kikinis (SPL, BWH) + contributor: Yi Gao (gatech), Allen Tannenbaum (gatech), Ron Kikinis (SPL, BWH) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health + """ input_spec = RobustStatisticsSegmenterInputSpec output_spec = RobustStatisticsSegmenterOutputSpec @@ -194,25 +193,24 @@ class EMSegmentCommandLineOutputSpec(TraitedSpec): class EMSegmentCommandLine(SEMLikeCommandLine): """title: - EMSegment Command-line - + EMSegment Command-line -category: - Segmentation.Specialized + category: + Segmentation.Specialized -description: - This module is used to simplify the process of segmenting large collections of images by providing a command line interface to the EMSegment algorithm for script and batch processing. + description: + This module is used to simplify the process of segmenting large collections of images by providing a command line interface to the EMSegment algorithm for script and batch processing. -documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.0/EMSegment_Command-line -contributor: Sebastien Barre, Brad Davis, Kilian Pohl, Polina Golland, Yumin Yuan, Daniel Haehn + documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.0/EMSegment_Command-line -acknowledgements: Many people and organizations have contributed to the funding, design, and development of the EMSegment algorithm and its various implementations. + contributor: Sebastien Barre, Brad Davis, Kilian Pohl, Polina Golland, Yumin Yuan, Daniel Haehn + acknowledgements: Many people and organizations have contributed to the funding, design, and development of the EMSegment algorithm and its various implementations. -""" + """ input_spec = EMSegmentCommandLineInputSpec output_spec = EMSegmentCommandLineOutputSpec @@ -288,20 +286,19 @@ class BRAINSROIAutoOutputSpec(TraitedSpec): class BRAINSROIAuto(SEMLikeCommandLine): """title: Foreground masking (BRAINS) -category: Segmentation.Specialized - -description: This tool uses a combination of otsu thresholding and a closing operations to identify the most prominant foreground region in an image. + category: Segmentation.Specialized + description: This tool uses a combination of otsu thresholding and a closing operations to identify the most prominant foreground region in an image. -version: 2.4.1 -license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + version: 2.4.1 -contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://wwww.psychiatry.uiowa.edu + license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt -acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); Gregory Harris(1), Vincent Magnotta(1,2,3); Andriy Fedorov(5), fedorov -at- bwh.harvard.edu (Slicer integration); (1=University of Iowa Department of Psychiatry, 2=University of Iowa Department of Radiology, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering, 5=Surgical Planning Lab, Harvard) + contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://wwww.psychiatry.uiowa.edu -""" + acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); Gregory Harris(1), Vincent Magnotta(1,2,3); Andriy Fedorov(5), fedorov -at- bwh.harvard.edu (Slicer integration); (1=University of Iowa Department of Psychiatry, 2=University of Iowa Department of Radiology, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering, 5=Surgical Planning Lab, Harvard) + """ input_spec = BRAINSROIAutoInputSpec output_spec = BRAINSROIAutoOutputSpec diff --git a/nipype/interfaces/slicer/surface.py b/nipype/interfaces/slicer/surface.py index 970d067157..d2ebe4d15f 100644 --- a/nipype/interfaces/slicer/surface.py +++ b/nipype/interfaces/slicer/surface.py @@ -33,19 +33,18 @@ class MergeModelsOutputSpec(TraitedSpec): class MergeModels(SEMLikeCommandLine): """title: Merge Models -category: Surface Models + category: Surface Models -description: Merge the polydata from two input models and output a new model with the added polydata. Uses the vtkAppendPolyData filter. Works on .vtp and .vtk surface files. + description: Merge the polydata from two input models and output a new model with the added polydata. Uses the vtkAppendPolyData filter. Works on .vtp and .vtk surface files. -version: $Revision$ + version: $Revision$ -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MergeModels + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MergeModels -contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH), Daniel Haehn (SPL, BWH) + contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH), Daniel Haehn (SPL, BWH) -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. - -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = MergeModelsInputSpec output_spec = MergeModelsOutputSpec @@ -74,19 +73,18 @@ class ModelToLabelMapOutputSpec(TraitedSpec): class ModelToLabelMap(SEMLikeCommandLine): """title: Model To Label Map -category: Surface Models - -description: Intersects an input model with an reference volume and produces an output label map. + category: Surface Models -version: 0.1.0.$Revision: 8643 $(alpha) + description: Intersects an input model with an reference volume and produces an output label map. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/PolyDataToLabelMap + version: 0.1.0.$Revision: 8643 $(alpha) -contributor: Nicole Aucoin (SPL, BWH), Xiaodong Tao (GE) + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/PolyDataToLabelMap -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Nicole Aucoin (SPL, BWH), Xiaodong Tao (GE) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = ModelToLabelMapInputSpec output_spec = ModelToLabelMapOutputSpec @@ -141,21 +139,20 @@ class GrayscaleModelMakerOutputSpec(TraitedSpec): class GrayscaleModelMaker(SEMLikeCommandLine): """title: Grayscale Model Maker -category: Surface Models - -description: Create 3D surface models from grayscale data. This module uses Marching Cubes to create an isosurface at a given threshold. The resulting surface consists of triangles that separate a volume into regions below and above the threshold. The resulting surface can be smoothed and decimated. This model works on continuous data while the module Model Maker works on labeled (or discrete) data. + category: Surface Models -version: 3.0 + description: Create 3D surface models from grayscale data. This module uses Marching Cubes to create an isosurface at a given threshold. The resulting surface consists of triangles that separate a volume into regions below and above the threshold. The resulting surface can be smoothed and decimated. This model works on continuous data while the module Model Maker works on labeled (or discrete) data. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GrayscaleModelMaker + version: 3.0 -license: slicer3 + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GrayscaleModelMaker -contributor: Nicole Aucoin (SPL, BWH), Bill Lorensen (GE) + license: slicer3 -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Nicole Aucoin (SPL, BWH), Bill Lorensen (GE) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = GrayscaleModelMakerInputSpec output_spec = GrayscaleModelMakerOutputSpec @@ -185,19 +182,18 @@ class ProbeVolumeWithModelOutputSpec(TraitedSpec): class ProbeVolumeWithModel(SEMLikeCommandLine): """title: Probe Volume With Model -category: Surface Models + category: Surface Models -description: Paint a model by a volume (using vtkProbeFilter). + description: Paint a model by a volume (using vtkProbeFilter). -version: 0.1.0.$Revision: 1892 $(alpha) + version: 0.1.0.$Revision: 1892 $(alpha) -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ProbeVolumeWithModel + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ProbeVolumeWithModel -contributor: Lauren O'Donnell (SPL, BWH) + contributor: Lauren O'Donnell (SPL, BWH) -acknowledgements: BWH, NCIGT/LMI - -""" + acknowledgements: BWH, NCIGT/LMI + """ input_spec = ProbeVolumeWithModelInputSpec output_spec = ProbeVolumeWithModelOutputSpec @@ -239,19 +235,18 @@ class LabelMapSmoothingOutputSpec(TraitedSpec): class LabelMapSmoothing(SEMLikeCommandLine): """title: Label Map Smoothing -category: Surface Models - -description: This filter smoothes a binary label map. With a label map as input, this filter runs an anti-alising algorithm followed by a Gaussian smoothing algorithm. The output is a smoothed label map. + category: Surface Models -version: 1.0 + description: This filter smoothes a binary label map. With a label map as input, this filter runs an anti-alising algorithm followed by a Gaussian smoothing algorithm. The output is a smoothed label map. -documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/LabelMapSmoothing + version: 1.0 -contributor: Dirk Padfield (GE), Josh Cates (Utah), Ross Whitaker (Utah) + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/LabelMapSmoothing -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. This filter is based on work developed at the University of Utah, and implemented at GE Research. + contributor: Dirk Padfield (GE), Josh Cates (Utah), Ross Whitaker (Utah) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. This filter is based on work developed at the University of Utah, and implemented at GE Research. + """ input_spec = LabelMapSmoothingInputSpec output_spec = LabelMapSmoothingOutputSpec @@ -273,7 +268,7 @@ class ModelMakerInputSpec(CommandLineInputSpec): ) modelSceneFile = traits.Either( traits.Bool, - InputMultiPath(File(),), + InputMultiPath(File()), hash_files=False, desc="Generated models, under a model hierarchy node. Models are imported into Slicer under a model hierarchy node, and their colors are set by the color table associated with the input label map volume. The model hierarchy node must be created before running the model maker, by selecting Create New ModelHierarchy from the Models drop down menu. If you're running from the command line, a model hierarchy node in a new mrml scene will be created for you.", argstr="--modelSceneFile %s...", @@ -354,21 +349,20 @@ class ModelMakerOutputSpec(TraitedSpec): class ModelMaker(SEMLikeCommandLine): """title: Model Maker -category: Surface Models - -description: Create 3D surface models from segmented data.

Models are imported into Slicer under a model hierarchy node in a MRML scene. The model colors are set by the color table associated with the input volume (these colours will only be visible if you load the model scene file).

Create Multiple:

If you specify a list of Labels, it will over ride any start/end label settings.

If you clickGenerate Allit will over ride the list of lables and any start/end label settings.

Model Maker Settings:

You can set the number of smoothing iterations, target reduction in number of polygons (decimal percentage). Use 0 and 1 if you wish no smoothing nor decimation.
You can set the flags to split normals or generate point normals in this pane as well.
You can save a copy of the models after intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation); these models are not saved in the mrml file, turn off deleting temporary files first in the python window:
slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff()

+ category: Surface Models -version: 4.1 + description: Create 3D surface models from segmented data.

Models are imported into Slicer under a model hierarchy node in a MRML scene. The model colors are set by the color table associated with the input volume (these colours will only be visible if you load the model scene file).

Create Multiple:

If you specify a list of Labels, it will over ride any start/end label settings.

If you clickGenerate Allit will over ride the list of lables and any start/end label settings.

Model Maker Settings:

You can set the number of smoothing iterations, target reduction in number of polygons (decimal percentage). Use 0 and 1 if you wish no smoothing nor decimation.
You can set the flags to split normals or generate point normals in this pane as well.
You can save a copy of the models after intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation); these models are not saved in the mrml file, turn off deleting temporary files first in the python window:
slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff()

-documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ModelMaker + version: 4.1 -license: slicer4 + documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ModelMaker -contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH), Bill Lorensen (GE) + license: slicer4 -acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH), Bill Lorensen (GE) -""" + acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + """ input_spec = ModelMakerInputSpec output_spec = ModelMakerOutputSpec diff --git a/nipype/interfaces/slicer/utilities.py b/nipype/interfaces/slicer/utilities.py index ad998e58fe..01f469f259 100644 --- a/nipype/interfaces/slicer/utilities.py +++ b/nipype/interfaces/slicer/utilities.py @@ -46,18 +46,17 @@ class EMSegmentTransformToNewFormatOutputSpec(TraitedSpec): class EMSegmentTransformToNewFormat(SEMLikeCommandLine): """title: - Transform MRML Files to New EMSegmenter Standard + Transform MRML Files to New EMSegmenter Standard -category: - Utilities + category: + Utilities -description: - Transform MRML Files to New EMSegmenter Standard + description: + Transform MRML Files to New EMSegmenter Standard - -""" + """ input_spec = EMSegmentTransformToNewFormatInputSpec output_spec = EMSegmentTransformToNewFormatOutputSpec diff --git a/nipype/interfaces/spm/base.py b/nipype/interfaces/spm/base.py index f168ce3329..147d63894c 100644 --- a/nipype/interfaces/spm/base.py +++ b/nipype/interfaces/spm/base.py @@ -251,7 +251,7 @@ def getinfo(klass, matlab_cmd=None, paths=None, use_mcr=None): def no_spm(): - """ Checks if SPM is NOT installed + """Checks if SPM is NOT installed used with pytest.mark.skipif decorator to skip tests that will fail if spm is not installed""" diff --git a/nipype/interfaces/spm/model.py b/nipype/interfaces/spm/model.py index de6dc15216..260742f5b0 100644 --- a/nipype/interfaces/spm/model.py +++ b/nipype/interfaces/spm/model.py @@ -145,8 +145,7 @@ class Level1Design(SPMCommand): _jobname = "fmri_spec" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt in ["spm_mat_dir", "mask_image"]: return np.array([str(val)], dtype=object) if opt in ["session_info"]: # , 'factor_info']: @@ -157,8 +156,7 @@ def _format_arg(self, opt, spec, val): return super(Level1Design, self)._format_arg(opt, spec, val) def _parse_inputs(self): - """validate spm realign options if set to None ignore - """ + """validate spm realign options if set to None ignore""" einputs = super(Level1Design, self)._parse_inputs( skip=("mask_threshold", "flags") ) @@ -273,8 +271,7 @@ class EstimateModel(SPMCommand): _jobname = "fmri_est" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt == "spm_mat_file": return np.array([str(val)], dtype=object) if opt == "estimation_method": @@ -285,8 +282,7 @@ def _format_arg(self, opt, spec, val): return super(EstimateModel, self)._format_arg(opt, spec, val) def _parse_inputs(self): - """validate spm realign options if set to None ignore - """ + """validate spm realign options if set to None ignore""" einputs = super(EstimateModel, self)._parse_inputs(skip=("flags")) if isdefined(self.inputs.flags): einputs[0].update({flag: val for (flag, val) in self.inputs.flags.items()}) @@ -514,12 +510,12 @@ def _make_matlab_command(self, _): script += ["sidx = find(condsess(idx)==%d);" % (sno + 1)] script += [ "consess{%d}.tcon.convec(idx(sidx)) = %f;" - % (i + 1, sw * contrast.weights[c0],) + % (i + 1, sw * contrast.weights[c0]) ] else: script += [ "consess{%d}.tcon.convec(idx) = %f;" - % (i + 1, contrast.weights[c0],) + % (i + 1, contrast.weights[c0]) ] for i, contrast in enumerate(contrasts): if contrast.stat == "F": @@ -830,8 +826,7 @@ class ThresholdStatisticsInputSpec(SPMCommandInputSpec): mandatory=True, desc="which contrast in the SPM.mat to use" ) height_threshold = traits.Float( - desc=("stat value for initial thresholding (defining clusters)"), - mandatory=True, + desc=("stat value for initial thresholding (defining clusters)"), mandatory=True ) extent_threshold = traits.Int( 0, usedefault=True, desc="Minimum cluster size in voxels" @@ -1020,8 +1015,7 @@ class FactorialDesign(SPMCommand): _jobname = "factorial_design" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt in ["spm_mat_dir", "explicit_mask_file"]: return np.array([str(val)], dtype=object) if opt in ["covariates"]: @@ -1041,8 +1035,7 @@ def _format_arg(self, opt, spec, val): return super(FactorialDesign, self)._format_arg(opt, spec, val) def _parse_inputs(self): - """validate spm realign options if set to None ignore - """ + """validate spm realign options if set to None ignore""" einputs = super(FactorialDesign, self)._parse_inputs() if not isdefined(self.inputs.spm_mat_dir): einputs[0]["dir"] = np.array([str(os.getcwd())], dtype=object) @@ -1079,8 +1072,7 @@ class OneSampleTTestDesign(FactorialDesign): input_spec = OneSampleTTestDesignInputSpec def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt in ["in_files"]: return np.array(val, dtype=object) return super(OneSampleTTestDesign, self)._format_arg(opt, spec, val) @@ -1127,8 +1119,7 @@ class TwoSampleTTestDesign(FactorialDesign): input_spec = TwoSampleTTestDesignInputSpec def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt in ["group1_files", "group2_files"]: return np.array(val, dtype=object) return super(TwoSampleTTestDesign, self)._format_arg(opt, spec, val) @@ -1164,8 +1155,7 @@ class PairedTTestDesign(FactorialDesign): input_spec = PairedTTestDesignInputSpec def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt in ["paired_files"]: return [dict(scans=np.array(files, dtype=object)) for files in val] return super(PairedTTestDesign, self)._format_arg(opt, spec, val) @@ -1206,8 +1196,7 @@ class MultipleRegressionDesign(FactorialDesign): input_spec = MultipleRegressionDesignInputSpec def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt in ["in_files"]: return np.array(val, dtype=object) if opt in ["user_covariates"]: diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index a9d211aa83..5dc2a8fa3e 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -230,16 +230,14 @@ class FieldMap(SPMCommand): _jobname = "fieldmap" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt in ["phase_file", "magnitude_file", "anat_file", "epi_file"]: return scans_for_fname(ensure_list(val)) return super(FieldMap, self)._format_arg(opt, spec, val) def _parse_inputs(self): - """validate spm fieldmap options if set to None ignore - """ + """validate spm fieldmap options if set to None ignore""" einputs = super(FieldMap, self)._parse_inputs() return [{self.inputs.jobtype: einputs[0]}] @@ -276,7 +274,7 @@ class SliceTimingInputSpec(SPMCommandInputSpec): mandatory=True, ) slice_order = traits.List( - traits.Either(traits.Int(),traits.Float()), + traits.Either(traits.Int(), traits.Float()), field="so", desc=("1-based order or onset (in ms) in which slices are acquired"), mandatory=True, @@ -329,8 +327,7 @@ class SliceTiming(SPMCommand): _jobname = "st" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt == "in_files": return scans_for_fnames( ensure_list(val), keep4d=False, separate_sessions=True @@ -478,8 +475,7 @@ class Realign(SPMCommand): _jobname = "realign" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt == "in_files": if self.inputs.jobtype == "write": separate_sessions = False @@ -491,8 +487,7 @@ def _format_arg(self, opt, spec, val): return super(Realign, self)._format_arg(opt, spec, val) def _parse_inputs(self): - """validate spm realign options if set to None ignore - """ + """validate spm realign options if set to None ignore""" einputs = super(Realign, self)._parse_inputs() return [{"%s" % (self.inputs.jobtype): einputs[0]}] @@ -745,8 +740,7 @@ class RealignUnwarp(SPMCommand): _jobname = "realignunwarp" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt == "in_files": return scans_for_fnames( ensure_list(val), keep4d=False, separate_sessions=True @@ -921,8 +915,7 @@ class Coregister(SPMCommand): _jobname = "coreg" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt == "target" or (opt == "source" and self.inputs.jobtype != "write"): return scans_for_fnames(ensure_list(val), keep4d=True) if opt == "apply_to_files": @@ -935,8 +928,7 @@ def _format_arg(self, opt, spec, val): return super(Coregister, self)._format_arg(opt, spec, val) def _parse_inputs(self): - """validate spm coregister options if set to None ignore - """ + """validate spm coregister options if set to None ignore""" if self.inputs.jobtype == "write": einputs = super(Coregister, self)._parse_inputs( skip=("jobtype", "apply_to_files") @@ -1070,7 +1062,7 @@ class NormalizeInputSpec(SPMCommandInputSpec): class NormalizeOutputSpec(TraitedSpec): normalization_parameters = OutputMultiPath( - File(exists=True), desc=("MAT files containing the normalization parameters"), + File(exists=True), desc=("MAT files containing the normalization parameters") ) normalized_source = OutputMultiPath( File(exists=True), desc="Normalized source files" @@ -1098,8 +1090,7 @@ class Normalize(SPMCommand): _jobname = "normalise" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt == "template": return scans_for_fname(ensure_list(val)) if opt == "source": @@ -1114,8 +1105,7 @@ def _format_arg(self, opt, spec, val): return super(Normalize, self)._format_arg(opt, spec, val) def _parse_inputs(self): - """Validate spm normalize options if set to None ignore - """ + """Validate spm normalize options if set to None ignore""" einputs = super(Normalize, self)._parse_inputs( skip=("jobtype", "apply_to_files") ) @@ -1333,8 +1323,7 @@ class Normalize12(SPMCommand): _jobname = "normalise" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt == "tpm": return scans_for_fname(ensure_list(val)) if opt == "image_to_align": @@ -1349,8 +1338,7 @@ def _format_arg(self, opt, spec, val): return super(Normalize12, self)._format_arg(opt, spec, val) def _parse_inputs(self, skip=()): - """validate spm normalize options if set to None ignore - """ + """validate spm normalize options if set to None ignore""" einputs = super(Normalize12, self)._parse_inputs( skip=("jobtype", "apply_to_files") ) @@ -1526,7 +1514,7 @@ class SegmentInputSpec(SPMCommandInputSpec): desc="FWHM of Gaussian smoothness of bias", ) sampling_distance = traits.Float( - field="opts.samp", desc=("Sampling distance on data for parameter estimation"), + field="opts.samp", desc=("Sampling distance on data for parameter estimation") ) mask_image = File( exists=True, @@ -1537,7 +1525,7 @@ class SegmentInputSpec(SPMCommandInputSpec): class SegmentOutputSpec(TraitedSpec): native_gm_image = File(desc="native space grey probability map") - normalized_gm_image = File(desc="normalized grey probability map",) + normalized_gm_image = File(desc="normalized grey probability map") modulated_gm_image = File(desc=("modulated, normalized grey probability map")) native_wm_image = File(desc="native space white probability map") normalized_wm_image = File(desc="normalized white probability map") @@ -1585,8 +1573,7 @@ def __init__(self, **inputs): SPMCommand.__init__(self, **inputs) def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" clean_masks_dict = {"no": 0, "light": 1, "thorough": 2} if opt in ["data", "tissue_prob_maps"]: @@ -1682,7 +1669,7 @@ class NewSegmentInputSpec(SPMCommandInputSpec): ), ) sampling_distance = traits.Float( - field="warp.samp", desc=("Sampling distance on data for parameter estimation"), + field="warp.samp", desc=("Sampling distance on data for parameter estimation") ) write_deformation_fields = traits.List( traits.Bool(), @@ -1763,8 +1750,7 @@ def __init__(self, **inputs): SPMCommand.__init__(self, **inputs) def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt in ["channel_files", "channel_info"]: # structure have to be recreated because of some weird traits error @@ -1931,7 +1917,7 @@ class MultiChannelNewSegmentInputSpec(SPMCommandInputSpec): ), ) sampling_distance = traits.Float( - field="warp.samp", desc=("Sampling distance on data for parameter estimation"), + field="warp.samp", desc=("Sampling distance on data for parameter estimation") ) write_deformation_fields = traits.List( traits.Bool(), @@ -2011,8 +1997,7 @@ def __init__(self, **inputs): SPMCommand.__init__(self, **inputs) def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt == "channels": # structure have to be recreated because of some weird traits error @@ -2279,8 +2264,7 @@ class DARTEL(SPMCommand): _jobname = "dartel" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt in ["image_files"]: return scans_for_fnames(val, keep4d=True, separate_sessions=True) @@ -2407,8 +2391,7 @@ class DARTELNorm2MNI(SPMCommand): _jobname = "dartel" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt in ["template_file"]: return np.array([val], dtype=object) elif opt in ["flowfield_files"]: @@ -2501,8 +2484,7 @@ class CreateWarped(SPMCommand): _jobname = "dartel" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt in ["image_files"]: return scans_for_fnames(val, keep4d=True, separate_sessions=True) @@ -2546,8 +2528,7 @@ class ApplyDeformations(SPMCommand): _jobname = "defs" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt in ["deformation_field", "reference_volume"]: val = [val] @@ -2615,7 +2596,7 @@ class VBMSegmentInputSpec(SPMCommandInputSpec): desc="Controls balance between parameters and data", ) - spatial_normalization = traits.Enum("high", "low", usedefault=True,) + spatial_normalization = traits.Enum("high", "low", usedefault=True) dartel_template = ImageFileSPM( exists=True, field="estwrite.extopts.dartelwarp.normhigh.darteltpm" ) @@ -2636,9 +2617,9 @@ class VBMSegmentInputSpec(SPMCommandInputSpec): ) display_results = traits.Bool(True, usedefault=True, field="estwrite.extopts.print") - gm_native = traits.Bool(False, usedefault=True, field="estwrite.output.GM.native",) + gm_native = traits.Bool(False, usedefault=True, field="estwrite.output.GM.native") gm_normalized = traits.Bool( - False, usedefault=True, field="estwrite.output.GM.warped", + False, usedefault=True, field="estwrite.output.GM.warped" ) gm_modulated_normalized = traits.Range( 0, @@ -2657,9 +2638,9 @@ class VBMSegmentInputSpec(SPMCommandInputSpec): desc="0=None,1=rigid(SPM8 default),2=affine", ) - wm_native = traits.Bool(False, usedefault=True, field="estwrite.output.WM.native",) + wm_native = traits.Bool(False, usedefault=True, field="estwrite.output.WM.native") wm_normalized = traits.Bool( - False, usedefault=True, field="estwrite.output.WM.warped", + False, usedefault=True, field="estwrite.output.WM.warped" ) wm_modulated_normalized = traits.Range( 0, @@ -2678,11 +2659,9 @@ class VBMSegmentInputSpec(SPMCommandInputSpec): desc="0=None,1=rigid(SPM8 default),2=affine", ) - csf_native = traits.Bool( - False, usedefault=True, field="estwrite.output.CSF.native", - ) + csf_native = traits.Bool(False, usedefault=True, field="estwrite.output.CSF.native") csf_normalized = traits.Bool( - False, usedefault=True, field="estwrite.output.CSF.warped", + False, usedefault=True, field="estwrite.output.CSF.warped" ) csf_modulated_normalized = traits.Range( 0, @@ -2702,13 +2681,13 @@ class VBMSegmentInputSpec(SPMCommandInputSpec): ) bias_corrected_native = traits.Bool( - False, usedefault=True, field="estwrite.output.bias.native", + False, usedefault=True, field="estwrite.output.bias.native" ) bias_corrected_normalized = traits.Bool( - True, usedefault=True, field="estwrite.output.bias.warped", + True, usedefault=True, field="estwrite.output.bias.warped" ) bias_corrected_affine = traits.Bool( - False, usedefault=True, field="estwrite.output.bias.affine", + False, usedefault=True, field="estwrite.output.bias.affine" ) pve_label_native = traits.Bool( @@ -2906,8 +2885,7 @@ def _list_outputs(self): return outputs def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt in ["in_files"]: return scans_for_fnames(val, keep4d=True) elif opt in ["spatial_normalization"]: diff --git a/nipype/interfaces/spm/utils.py b/nipype/interfaces/spm/utils.py index 99e3d57d3b..74355f6b3f 100644 --- a/nipype/interfaces/spm/utils.py +++ b/nipype/interfaces/spm/utils.py @@ -64,7 +64,7 @@ class CalcCoregAffineOutputSpec(TraitedSpec): class CalcCoregAffine(SPMCommand): - """ Uses SPM (spm_coreg) to calculate the transform mapping + """Uses SPM (spm_coreg) to calculate the transform mapping moving to target. Saves Transform in mat (matlab binary file) Also saves inverse transform @@ -148,7 +148,7 @@ class ApplyTransformOutputSpec(TraitedSpec): class ApplyTransform(SPMCommand): - """ Uses SPM to apply transform stored in a .mat file to given file + """Uses SPM to apply transform stored in a .mat file to given file Examples -------- @@ -305,7 +305,7 @@ class ApplyInverseDeformationOutput(TraitedSpec): class ApplyInverseDeformation(SPMCommand): - """ Uses spm to apply inverse deformation stored in a .mat file or a + """Uses spm to apply inverse deformation stored in a .mat file or a deformation field to a given file Examples @@ -326,8 +326,7 @@ class ApplyInverseDeformation(SPMCommand): _jobname = "defs" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt == "in_files": return scans_for_fnames(ensure_list(val)) if opt == "target": @@ -402,8 +401,7 @@ class ResliceToReference(SPMCommand): _jobname = "defs" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt == "in_files": return scans_for_fnames(ensure_list(val)) if opt == "target": @@ -466,7 +464,7 @@ class DicomImportOutputSpec(TraitedSpec): class DicomImport(SPMCommand): - """ Uses spm to convert DICOM files to nii or img+hdr. + """Uses spm to convert DICOM files to nii or img+hdr. Examples -------- @@ -484,8 +482,7 @@ class DicomImport(SPMCommand): _jobname = "dicom" def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt == "in_files": return np.array(val, dtype=object) if opt == "output_dir": diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index f2afedd492..9816a44a4d 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -661,8 +661,7 @@ def test_bids_infields_outfields(tmpdir): @pytest.mark.skipif(no_paramiko, reason="paramiko library is not available") @pytest.mark.skipif(no_local_ssh, reason="SSH Server is not running") def test_SSHDataGrabber(tmpdir): - """Test SSHDataGrabber by connecting to localhost and collecting some data. - """ + """Test SSHDataGrabber by connecting to localhost and collecting some data.""" old_cwd = tmpdir.chdir() source_dir = tmpdir.mkdir("source") diff --git a/nipype/interfaces/tests/test_nilearn.py b/nipype/interfaces/tests/test_nilearn.py index 6c3a52a670..edf17ea058 100644 --- a/nipype/interfaces/tests/test_nilearn.py +++ b/nipype/interfaces/tests/test_nilearn.py @@ -118,7 +118,7 @@ def test_signal_extr_shared(self): self._test_4d_label(wanted, self.fake_4d_label_data) def test_signal_extr_traits_valid(self): - """ Test a node using the SignalExtraction interface. + """Test a node using the SignalExtraction interface. Unlike interface.run(), node.run() checks the traits """ # run diff --git a/nipype/interfaces/utility/tests/test_wrappers.py b/nipype/interfaces/utility/tests/test_wrappers.py index 98ee7c7959..76413e5760 100644 --- a/nipype/interfaces/utility/tests/test_wrappers.py +++ b/nipype/interfaces/utility/tests/test_wrappers.py @@ -95,7 +95,7 @@ def test_function_with_imports(tmpdir): def test_aux_connect_function(tmpdir): - """ This tests excution nodes with multiple inputs and auxiliary + """This tests excution nodes with multiple inputs and auxiliary function inside the Workflow connect function. """ tmpdir.chdir() @@ -103,7 +103,7 @@ def test_aux_connect_function(tmpdir): wf = pe.Workflow(name="test_workflow") def _gen_tuple(size): - return [1,] * size + return [1] * size def _sum_and_sub_mul(a, b, c): return (a + b) * c, (a - b) * c @@ -138,7 +138,7 @@ def _inc(x): (params, gen_tuple, [(("size", _inc), "size")]), (params, ssm, [(("num", _inc), "c")]), (gen_tuple, split, [("tuple", "inlist")]), - (split, ssm, [(("out1", _inc), "a"), ("out2", "b"),]), + (split, ssm, [(("out1", _inc), "a"), ("out2", "b")]), ] ) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index aeff5f12da..b458b3f820 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -1126,8 +1126,7 @@ def __init__( self._serial = serial def _create_dynamic_traits(self, basetraits, fields=None, nitems=None): - """Convert specific fields of a trait to accept multiple inputs - """ + """Convert specific fields of a trait to accept multiple inputs""" output = DynamicTraitedSpec() if fields is None: fields = basetraits.copyable_trait_names() diff --git a/nipype/pipeline/engine/tests/test_engine.py b/nipype/pipeline/engine/tests/test_engine.py index f28b0f3bf3..3668d2d574 100644 --- a/nipype/pipeline/engine/tests/test_engine.py +++ b/nipype/pipeline/engine/tests/test_engine.py @@ -326,8 +326,7 @@ def func2(a): def test_mapnode_json(tmpdir): - """Tests that mapnodes don't generate excess jsons - """ + """Tests that mapnodes don't generate excess jsons""" tmpdir.chdir() wd = os.getcwd() from nipype import MapNode, Function, Workflow diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index f77f771ea7..a79858947f 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -825,8 +825,7 @@ def _merge_graphs( def _connect_nodes(graph, srcnode, destnode, connection_info): - """Add a connection between two nodes - """ + """Add a connection between two nodes""" data = graph.get_edge_data(srcnode, destnode, default=None) if not data: data = {"connect": connection_info} @@ -867,8 +866,7 @@ def _identity_nodes(graph, include_iterables): def _remove_identity_node(graph, node): - """Remove identity nodes from an execution graph - """ + """Remove identity nodes from an execution graph""" portinputs, portoutputs = _node_ports(graph, node) for field, connections in list(portoutputs.items()): if portinputs: @@ -1337,7 +1335,7 @@ def export_graph( format="png", simple_form=True, ): - """ Displays the graph layout of the pipeline + """Displays the graph layout of the pipeline This function requires that pygraphviz and matplotlib are available on the system. @@ -1433,8 +1431,7 @@ def get_all_files(infile): def walk_outputs(object): - """Extract every file and directory from a python structure - """ + """Extract every file and directory from a python structure""" out = [] if isinstance(object, dict): for _, val in sorted(object.items()): @@ -1462,8 +1459,7 @@ def walk_files(cwd): def clean_working_directory( outputs, cwd, inputs, needed_outputs, config, files2keep=None, dirs2keep=None ): - """Removes all files not needed for further analysis from the directory - """ + """Removes all files not needed for further analysis from the directory""" if not outputs: return outputs_to_keep = list(outputs.trait_get().keys()) @@ -1577,8 +1573,7 @@ def merge_bundles(g1, g2): def write_workflow_prov(graph, filename=None, format="all"): - """Write W3C PROV Model JSON file - """ + """Write W3C PROV Model JSON file""" if not filename: filename = os.path.join(os.getcwd(), "workflow_provenance") @@ -1728,8 +1723,7 @@ def write_workflow_resources(graph, filename=None, append=None): def topological_sort(graph, depth_first=False): - """Returns a depth first sorted order if depth_first is True - """ + """Returns a depth first sorted order if depth_first is True""" import networkx as nx nodesort = list(nx.topological_sort(graph)) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 9b6e60ffaf..5eea2d51ec 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -315,7 +315,7 @@ def disconnect(self, *args): self._graph.add_edges_from([(srcnode, dstnode, edge_data)]) def add_nodes(self, nodes): - """ Add nodes to a workflow + """Add nodes to a workflow Parameters ---------- @@ -348,7 +348,7 @@ def add_nodes(self, nodes): self._graph.add_nodes_from(newnodes) def remove_nodes(self, nodes): - """ Remove nodes from a workflow + """Remove nodes from a workflow Parameters ---------- @@ -367,8 +367,7 @@ def outputs(self): return self._get_outputs() def get_node(self, name): - """Return an internal node by name - """ + """Return an internal node by name""" nodenames = name.split(".") nodename = nodenames[0] outnode = [ @@ -383,8 +382,7 @@ def get_node(self, name): return outnode def list_node_names(self): - """List names of all nodes in a workflow - """ + """List names of all nodes in a workflow""" import networkx as nx outlist = [] @@ -587,7 +585,7 @@ def export( return all_lines def run(self, plugin=None, plugin_args=None, updatehash=False): - """ Execute the workflow + """Execute the workflow Parameters ---------- @@ -736,8 +734,7 @@ def _set_needed_outputs(self, graph): node.needed_outputs = sorted(node.needed_outputs) def _configure_exec_nodes(self, graph): - """Ensure that each node knows where to get inputs from - """ + """Ensure that each node knows where to get inputs from""" for node in graph.nodes(): node.input_source = {} for edge in graph.in_edges(node): @@ -749,9 +746,7 @@ def _configure_exec_nodes(self, graph): ) def _check_nodes(self, nodes): - """Checks if any of the nodes are already in the graph - - """ + """Checks if any of the nodes are already in the graph""" node_names = [node.name for node in self._graph.nodes()] node_lineage = [node._hierarchy for node in self._graph.nodes()] for node in nodes: @@ -768,8 +763,7 @@ def _check_nodes(self, nodes): node_names.append(node.name) def _has_attr(self, parameter, subtype="in"): - """Checks if a parameter is available as an input or output - """ + """Checks if a parameter is available as an input or output""" hierarchy = parameter.split(".") # Connecting to a workflow needs at least two values, @@ -867,8 +861,7 @@ def _get_inputs(self): return inputdict def _get_outputs(self): - """Returns all possible output ports that are not already connected - """ + """Returns all possible output ports that are not already connected""" outputdict = TraitedSpec() for node in self._graph.nodes(): outputdict.add_trait(node.name, traits.Instance(TraitedSpec)) @@ -883,8 +876,7 @@ def _get_outputs(self): return outputdict def _set_input(self, objekt, name, newvalue): - """Trait callback function to update a node input - """ + """Trait callback function to update a node input""" objekt.traits()[name].node.set_input(name, newvalue) def _set_node_input(self, node, param, source, sourceinfo): @@ -928,8 +920,7 @@ def _create_flat_graph(self): return workflowcopy._graph def _reset_hierarchy(self): - """Reset the hierarchy on a graph - """ + """Reset the hierarchy on a graph""" for node in self._graph.nodes(): if isinstance(node, Workflow): node._reset_hierarchy() @@ -939,8 +930,7 @@ def _reset_hierarchy(self): node._hierarchy = self.name def _generate_flatgraph(self): - """Generate a graph containing only Nodes or MapNodes - """ + """Generate a graph containing only Nodes or MapNodes""" import networkx as nx logger.debug("expanding workflow: %s", self) @@ -1009,8 +999,7 @@ def _generate_flatgraph(self): def _get_dot( self, prefix=None, hierarchy=None, colored=False, simple_form=True, level=0 ): - """Create a dot file with connection info - """ + """Create a dot file with connection info""" import networkx as nx if prefix is None: diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index 4be8eb232b..8949d36be3 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -392,7 +392,7 @@ def _local_hash_check(self, jobid, graph): return False def _task_finished_cb(self, jobid, cached=False): - """ Extract outputs and assign to inputs of dependent tasks + """Extract outputs and assign to inputs of dependent tasks This is called when a job is completed. """ @@ -413,8 +413,7 @@ def _task_finished_cb(self, jobid, cached=False): self.refidx[self.refidx[:, jobid].nonzero()[0], jobid] = 0 def _generate_dependency_list(self, graph): - """ Generates a dependency list for a list of graphs. - """ + """Generates a dependency list for a list of graphs.""" import networkx as nx self.procs, _ = topological_sort(graph) @@ -444,8 +443,7 @@ def _remove_node_deps(self, jobid, crashfile, graph): return dict(node=self.procs[jobid], dependents=subnodes, crashfile=crashfile) def _remove_node_dirs(self): - """Removes directories whose outputs have already been used up - """ + """Removes directories whose outputs have already been used up""" if str2bool(self._config["execution"]["remove_node_directories"]): indices = np.nonzero((self.refidx.sum(axis=1) == 0).__array__())[0] for idx in indices: @@ -465,8 +463,7 @@ def _remove_node_dirs(self): class SGELikeBatchManagerBase(DistributedPluginBase): - """Execute workflow with SGE/OGE/PBS like batch system - """ + """Execute workflow with SGE/OGE/PBS like batch system""" def __init__(self, template, plugin_args=None): super(SGELikeBatchManagerBase, self).__init__(plugin_args=plugin_args) @@ -483,13 +480,11 @@ def __init__(self, template, plugin_args=None): self._pending = {} def _is_pending(self, taskid): - """Check if a task is pending in the batch system - """ + """Check if a task is pending in the batch system""" raise NotImplementedError def _submit_batchtask(self, scriptfile, node): - """Submit a task to the batch system - """ + """Submit a task to the batch system""" raise NotImplementedError def _get_result(self, taskid): @@ -544,8 +539,7 @@ def _get_result(self, taskid): return result_out def _submit_job(self, node, updatehash=False): - """submit job and return taskid - """ + """submit job and return taskid""" pyscript = create_pyscript(node, updatehash=updatehash) batch_dir, name = os.path.split(pyscript) name = ".".join(name.split(".")[:-1]) @@ -560,8 +554,7 @@ def _clear_task(self, taskid): class GraphPluginBase(PluginBase): - """Base class for plugins that distribute graphs to workflows - """ + """Base class for plugins that distribute graphs to workflows""" def __init__(self, plugin_args=None): if plugin_args and plugin_args.get("status_callback"): diff --git a/nipype/pipeline/plugins/debug.py b/nipype/pipeline/plugins/debug.py index 16ea8f44ee..31ce4e08e5 100644 --- a/nipype/pipeline/plugins/debug.py +++ b/nipype/pipeline/plugins/debug.py @@ -9,8 +9,7 @@ class DebugPlugin(PluginBase): - """Execute workflow in series - """ + """Execute workflow in series""" def __init__(self, plugin_args=None): super(DebugPlugin, self).__init__(plugin_args=plugin_args) diff --git a/nipype/pipeline/plugins/ipython.py b/nipype/pipeline/plugins/ipython.py index 8a786a16f1..b22a5ea4e5 100644 --- a/nipype/pipeline/plugins/ipython.py +++ b/nipype/pipeline/plugins/ipython.py @@ -44,8 +44,7 @@ def execute_task(pckld_task, node_config, updatehash): class IPythonPlugin(DistributedPluginBase): - """Execute workflow with ipython - """ + """Execute workflow with ipython""" def __init__(self, plugin_args=None): if IPython_not_loaded: diff --git a/nipype/pipeline/plugins/linear.py b/nipype/pipeline/plugins/linear.py index 650bff280f..8471a187d3 100644 --- a/nipype/pipeline/plugins/linear.py +++ b/nipype/pipeline/plugins/linear.py @@ -10,8 +10,7 @@ class LinearPlugin(PluginBase): - """Execute workflow in series - """ + """Execute workflow in series""" def run(self, graph, config, updatehash=False): """Executes a pre-defined pipeline in a serial order. diff --git a/nipype/pipeline/plugins/sge.py b/nipype/pipeline/plugins/sge.py index 17a5093ae2..2690e78fcf 100644 --- a/nipype/pipeline/plugins/sge.py +++ b/nipype/pipeline/plugins/sge.py @@ -20,8 +20,7 @@ def sge_debug_print(message): - """ Needed for debugging on big jobs. Once this is fully vetted, it can be removed. - """ + """Needed for debugging on big jobs. Once this is fully vetted, it can be removed.""" logger.debug(DEBUGGING_PREFIX + " " + "=!" * 3 + " " + message) # print DEBUGGING_PREFIX + " " + "=!" * 3 + " " + message @@ -79,8 +78,7 @@ def is_pending(self): return self._job_queue_state == "pending" def is_job_state_pending(self): - """ Return True, unless job is in the "zombie" status - """ + """Return True, unless job is in the "zombie" status""" time_diff = time.time() - self._job_info_creation_time if self.is_zombie(): sge_debug_print( @@ -133,7 +131,7 @@ def __init__( self._remove_old_jobs() def _remove_old_jobs(self): - """ This is only called during initialization of the function for the purpose + """This is only called during initialization of the function for the purpose of identifying jobs that are not part of this run of nipype. They are jobs that existed prior to starting a new jobs, so they are irrelevant. """ @@ -154,8 +152,8 @@ def add_startup_job(self, taskid, qsub_command_line): @staticmethod def _qacct_verified_complete(taskid): - """ request definitive job completion information for the current job - from the qacct report + """request definitive job completion information for the current job + from the qacct report """ sge_debug_print( "WARNING: " @@ -280,12 +278,12 @@ def _parse_qstat_job_list(self, xml_job_list): pass def _run_qstat(self, reason_for_qstat, force_instant=True): - """ request all job information for the current user in xmlformat. - See documentation from java documentation: - http://arc.liv.ac.uk/SGE/javadocs/jgdi/com/sun/grid/jgdi/monitoring/filter/JobStateFilter.html - -s r gives running jobs - -s z gives recently completed jobs (**recently** is very ambiguous) - -s s suspended jobs + """request all job information for the current user in xmlformat. + See documentation from java documentation: + http://arc.liv.ac.uk/SGE/javadocs/jgdi/com/sun/grid/jgdi/monitoring/filter/JobStateFilter.html + -s r gives running jobs + -s z gives recently completed jobs (**recently** is very ambiguous) + -s s suspended jobs """ sge_debug_print( "WARNING: CONTACTING qmaster for jobs, " @@ -373,7 +371,7 @@ def is_job_pending(self, task_id): def qsub_sanitize_job_name(testjobname): - """ Ensure that qsub job names must begin with a letter. + """Ensure that qsub job names must begin with a letter. Numbers and punctuation are not allowed. diff --git a/nipype/pipeline/plugins/somaflow.py b/nipype/pipeline/plugins/somaflow.py index d621c7967a..62076d9f65 100644 --- a/nipype/pipeline/plugins/somaflow.py +++ b/nipype/pipeline/plugins/somaflow.py @@ -15,8 +15,7 @@ class SomaFlowPlugin(GraphPluginBase): - """Execute using Soma workflow - """ + """Execute using Soma workflow""" def __init__(self, plugin_args=None): if soma_not_loaded: diff --git a/nipype/pipeline/plugins/tools.py b/nipype/pipeline/plugins/tools.py index ef213be36d..b816e61463 100644 --- a/nipype/pipeline/plugins/tools.py +++ b/nipype/pipeline/plugins/tools.py @@ -18,8 +18,7 @@ def report_crash(node, traceback=None, hostname=None): - """Writes crash related information to a file - """ + """Writes crash related information to a file""" name = node._id host = None traceback = traceback or format_exception(*sys.exc_info()) diff --git a/nipype/pkg_info.py b/nipype/pkg_info.py index fbba2f138c..e80fde9d76 100644 --- a/nipype/pkg_info.py +++ b/nipype/pkg_info.py @@ -9,7 +9,7 @@ def pkg_commit_hash(pkg_path): - """ Get short form of commit hash given directory `pkg_path` + """Get short form of commit hash given directory `pkg_path` There should be a file called 'COMMIT_INFO.txt' in `pkg_path`. This is a file in INI file format, with at least one section: ``commit hash`` and two @@ -66,7 +66,7 @@ def pkg_commit_hash(pkg_path): def get_pkg_info(pkg_path): - """ Return dict describing the context of this package + """Return dict describing the context of this package Parameters ---------- diff --git a/nipype/sphinxext/apidoc/__init__.py b/nipype/sphinxext/apidoc/__init__.py index 9c64cb4fb9..e3a8670d94 100644 --- a/nipype/sphinxext/apidoc/__init__.py +++ b/nipype/sphinxext/apidoc/__init__.py @@ -40,7 +40,7 @@ class Config(NapoleonConfig): """ _config_values = { "nipype_skip_classes": ( - ["Tester", "InputSpec", "OutputSpec", "Numpy", "NipypeTester",], + ["Tester", "InputSpec", "OutputSpec", "Numpy", "NipypeTester"], "env", ), **NapoleonConfig._config_values, diff --git a/nipype/sphinxext/gh.py b/nipype/sphinxext/gh.py index c373f84c1a..3d1f4a9f43 100644 --- a/nipype/sphinxext/gh.py +++ b/nipype/sphinxext/gh.py @@ -23,7 +23,7 @@ def get_url(obj): revision = _get_git_revision() if revision is not None: shortfile = os.path.join("nipype", filename.split("nipype/")[-1]) - uri = "http://github.com/nipy/nipype/blob/%s/%s" % (revision, shortfile,) + uri = "http://github.com/nipy/nipype/blob/%s/%s" % (revision, shortfile) lines, lstart = inspect.getsourcelines(obj) lend = len(lines) + lstart return "%s#L%d-L%d" % (uri, lstart, lend) diff --git a/nipype/testing/decorators.py b/nipype/testing/decorators.py index 36c647634c..a0e4c2ede1 100644 --- a/nipype/testing/decorators.py +++ b/nipype/testing/decorators.py @@ -71,7 +71,7 @@ def decor(f): def needs_review(msg): - """ Skip a test that needs further review. + """Skip a test that needs further review. Parameters ---------- diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 0da078af84..b5994c0375 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -322,8 +322,7 @@ def draw_resource_bar( left, resource, ): - """ - """ + """""" # Memory header result = "

%s

" % (left, resource) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 9b3741ae96..d8933e078d 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -148,8 +148,7 @@ def fname_presuffix(fname, prefix="", suffix="", newpath=None, use_ext=True): def fnames_presuffix(fnames, prefix="", suffix="", newpath=None, use_ext=True): - """Calls fname_presuffix for a list of files. - """ + """Calls fname_presuffix for a list of files.""" f2 = [] for fname in fnames: f2.append(fname_presuffix(fname, prefix, suffix, newpath, use_ext)) @@ -517,8 +516,7 @@ def copyfiles(filelist, dest, copy=False, create_new=False): def ensure_list(filename): - """Returns a list given either a string or a list - """ + """Returns a list given either a string or a list""" if isinstance(filename, (str, bytes)): return [filename] elif isinstance(filename, list): @@ -531,7 +529,7 @@ def ensure_list(filename): def simplify_list(filelist): """Returns a list if filelist is a list of length greater than 1, - otherwise returns the first element + otherwise returns the first element """ if len(filelist) > 1: return filelist @@ -866,7 +864,7 @@ def get_dependencies(name, environ): o, e = proc.communicate() deps = o.rstrip() except Exception as ex: - deps = f'{command!r} failed' + deps = f"{command!r} failed" fmlogger.warning(f"Could not get dependencies of {name}s. Error:\n{ex}") return deps diff --git a/nipype/utils/logger.py b/nipype/utils/logger.py index 3638843722..bfa23628a4 100644 --- a/nipype/utils/logger.py +++ b/nipype/utils/logger.py @@ -17,8 +17,7 @@ class Logging(object): - """Nipype logging class - """ + """Nipype logging class""" fmt = "%(asctime)s,%(msecs)d %(name)-2s " "%(levelname)-2s:\n\t %(message)s" datefmt = "%y%m%d-%H:%M:%S" diff --git a/nipype/utils/onetime.py b/nipype/utils/onetime.py index e9a905254b..e0bf9e7747 100644 --- a/nipype/utils/onetime.py +++ b/nipype/utils/onetime.py @@ -20,19 +20,18 @@ class OneTimeProperty(object): - """A descriptor to make special properties that become normal attributes. - """ + """A descriptor to make special properties that become normal attributes.""" def __init__(self, func): """Create a OneTimeProperty instance. - Parameters - ---------- - func : method + Parameters + ---------- + func : method - The method that will be called the first time to compute a value. - Afterwards, the method's name will be a standard attribute holding - the value of this computation. + The method that will be called the first time to compute a value. + Afterwards, the method's name will be a standard attribute holding + the value of this computation. """ self.getter = func self.name = func.__name__ diff --git a/setup.py b/setup.py index dc6ef0d973..1a666c6115 100755 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ class BuildWithCommitInfoCommand(build_py): - """ Return extended build command class for recording commit + """Return extended build command class for recording commit The extended command tries to run git to find the current commit, getting the empty string if it fails. It then writes the commit hash into a file From c518433959bd6f3a6f82f7a8dbe501352ccc40d1 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 1 Apr 2021 12:28:26 -0400 Subject: [PATCH 0954/1665] make specs --- nipype/interfaces/fsl/tests/test_auto_BET.py | 1 + .../mrtrix3/tests/test_auto_DWIPreproc.py | 119 ++++++++++++++---- 2 files changed, 96 insertions(+), 24 deletions(-) diff --git a/nipype/interfaces/fsl/tests/test_auto_BET.py b/nipype/interfaces/fsl/tests/test_auto_BET.py index 9d1b18846d..82757a10a6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BET.py +++ b/nipype/interfaces/fsl/tests/test_auto_BET.py @@ -32,6 +32,7 @@ def test_BET_inputs(): ), in_file=dict( argstr="%s", + copyfile=False, extensions=None, mandatory=True, position=0, diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py index 76fae6548f..7f226fe3cd 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py @@ -4,34 +4,90 @@ def test_DWIPreproc_inputs(): input_map = dict( - align_seepi=dict(argstr="-align_seepi"), - args=dict(argstr="%s"), - bval_scale=dict(argstr="-bvalue_scaling %s"), - eddy_options=dict(argstr='-eddy_options "%s"'), - environ=dict(nohash=True, usedefault=True), - export_grad_fsl=dict(argstr="-export_grad_fsl"), - export_grad_mrtrix=dict(argstr="-export_grad_mrtrix"), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_epi=dict(argstr="-se_epi %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), - nthreads=dict(argstr="-nthreads %d", nohash=True), + align_seepi=dict( + argstr="-align_seepi", + ), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + eddy_options=dict( + argstr='-eddy_options "%s"', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + export_grad_fsl=dict( + argstr="-export_grad_fsl", + ), + export_grad_mrtrix=dict( + argstr="-export_grad_mrtrix", + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_epi=dict( + argstr="-se_epi %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=1, usedefault=True + argstr="%s", + extensions=None, + mandatory=True, + position=1, + usedefault=True, + ), + out_grad_fsl=dict( + argstr="%s, %s", + requires=["export_grad_fsl"], ), - out_grad_fsl=dict(argstr="%s, %s", requires=["export_grad_fsl"]), out_grad_mrtrix=dict( argstr="%s", extensions=None, requires=["export_grad_mrtrix"], usedefault=True, ), - pe_dir=dict(argstr="-pe_dir %s", mandatory=True), - ro_time=dict(argstr="-readout_time %f"), - rpe_options=dict(argstr="-rpe_%s", mandatory=True, position=2), - topup_options=dict(argstr='-topup_options "%s"'), + pe_dir=dict( + argstr="-pe_dir %s", + mandatory=True, + ), + ro_time=dict( + argstr="-readout_time %f", + ), + rpe_options=dict( + argstr="-rpe_%s", + mandatory=True, + position=2, + ), + topup_options=dict( + argstr='-topup_options "%s"', + ), ) inputs = DWIPreproc.input_spec() @@ -42,10 +98,25 @@ def test_DWIPreproc_inputs(): def test_DWIPreproc_outputs(): output_map = dict( - out_file=dict(argstr="%s", extensions=None), - out_fsl_bval=dict(argstr="%s", extensions=None, usedefault=True), - out_fsl_bvec=dict(argstr="%s", extensions=None, usedefault=True), - out_grad_mrtrix=dict(argstr="%s", extensions=None, usedefault=True), + out_file=dict( + argstr="%s", + extensions=None, + ), + out_fsl_bval=dict( + argstr="%s", + extensions=None, + usedefault=True, + ), + out_fsl_bvec=dict( + argstr="%s", + extensions=None, + usedefault=True, + ), + out_grad_mrtrix=dict( + argstr="%s", + extensions=None, + usedefault=True, + ), ) outputs = DWIPreproc.output_spec() From c6168de36d43d35c30247082fa58882f15c84aa2 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 1 Apr 2021 13:13:10 -0400 Subject: [PATCH 0955/1665] MNT: Add .git-blame-ignore-revs To ignore style-only commits, run: git config blame.ignoreRevsFile .git-blame-ignore-revs --- .git-blame-ignore-revs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 .git-blame-ignore-revs diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..d5b7c0107a --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,18 @@ +# Commits with messages like "STY: black" or "run black" +f64bf338f630a9ee5cbe7a3ec98c68292897e720 +83358d7f17aac07cb90d0330f11ea2322e2974d8 +faef7d0f93013a700c882f709e98fb3cd36ebb03 +d50c1858564c0b3073fb23c54886a0454cb66afa +417b8897a116fcded5000e21e2b6ccbe29452a52 +aaf677a87f64c485f3e305799e4a5dc73b69e5fb +f763008442d88d8ce00ec266698268389415f8d6 +b1eccafd4edc8503b02d715f5b5f6f783520fdf9 +70db90349598cc7f26a4a513779529fba7d0a797 +6c1d91d71f6f0db0e985bd2adc34206442b0653d +97bdbd5f48ab242de5288ba4715192a27619a803 +78fa360f5b785224349b8b85b07e510d2233bb63 +7f85f43a34de8bff8e634232c939b17cee8e8fc5 +9c50b5daa797def5672dd057155b0e2c658853e2 +47194993ae14aceeec436cfb3769def667196668 +75653feadc6667d5313d83e9c62a5d5819771a9c +497b44d680eee0892fa59c6aaaae22a17d70a536 From 29e2760dfb5b262fba458004193e0e2fd04f9f6d Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 1 Apr 2021 13:29:36 -0400 Subject: [PATCH 0956/1665] CI: Fix syntax error in env.sh --- tools/ci/env.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/env.sh b/tools/ci/env.sh index 87e766b8c2..15e12275b6 100644 --- a/tools/ci/env.sh +++ b/tools/ci/env.sh @@ -10,7 +10,7 @@ NIGHTLY_WHEELS="https://pypi.anaconda.org/scipy-wheels-nightly/simple" STAGING_WHEELS="https://pypi.anaconda.org/multibuild-wheels-staging/simple" PRE_PIP_FLAGS="--pre --extra-index-url $NIGHTLY_WHEELS --extra-index-url $STAGING_WHEELS" -for CONF in (/etc/fsl/fsl.sh /etc/afni/afni.sh); do +for CONF in /etc/fsl/fsl.sh /etc/afni/afni.sh; do if [ -r $CONF ]; then source $CONF; fi done From fe4cf08856a521159430aefb031fc4db119cd1fb Mon Sep 17 00:00:00 2001 From: henrymj Date: Fri, 9 Apr 2021 09:02:04 -0700 Subject: [PATCH 0957/1665] change fsl interface randomise --f_only to --fonly for #3322 --- nipype/interfaces/fsl/model.py | 2 +- nipype/interfaces/fsl/tests/test_auto_Randomise.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/fsl/model.py b/nipype/interfaces/fsl/model.py index 8c269caac7..15fb36f6c0 100644 --- a/nipype/interfaces/fsl/model.py +++ b/nipype/interfaces/fsl/model.py @@ -2245,7 +2245,7 @@ class RandomiseInputSpec(FSLCommandInputSpec): desc=("carry out Threshold-Free Cluster Enhancement with 2D " "optimisation"), argstr="--T2", ) - f_only = traits.Bool(desc="calculate f-statistics only", argstr="--f_only") + f_only = traits.Bool(desc="calculate f-statistics only", argstr="--fonly") raw_stats_imgs = traits.Bool( desc="output raw ( unpermuted ) statistic images", argstr="-R" ) diff --git a/nipype/interfaces/fsl/tests/test_auto_Randomise.py b/nipype/interfaces/fsl/tests/test_auto_Randomise.py index cf816c56de..9b0b74bf28 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Randomise.py +++ b/nipype/interfaces/fsl/tests/test_auto_Randomise.py @@ -37,7 +37,7 @@ def test_Randomise_inputs(): argstr="-S %.2f", ), f_only=dict( - argstr="--f_only", + argstr="--fonly", ), fcon=dict( argstr="-f %s", From 5413e77315e270b6c43274f2af80c93ec2d13f59 Mon Sep 17 00:00:00 2001 From: Raunak Jalan Date: Thu, 22 Apr 2021 10:22:15 -0400 Subject: [PATCH 0958/1665] Fix:#3329 Fixed Developer Setup Link in install.rst --- doc/users/install.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/users/install.rst b/doc/users/install.rst index a16d41c5df..1e55e22907 100644 --- a/doc/users/install.rst +++ b/doc/users/install.rst @@ -116,7 +116,7 @@ NeuroDocker_. Installation for developers --------------------------- -Developers should start `here <../devel/testing_nipype.html>`_. +Developers should start `here <../devel/testing_nipype.rst>`_. Developers can also use this docker container: `docker pull nipype/nipype:master` From fe35b216bb6e20b90cea6775bccab5305f467de8 Mon Sep 17 00:00:00 2001 From: Raunak Jalan <41023976+RaunakJalan@users.noreply.github.com> Date: Thu, 22 Apr 2021 23:58:40 +0530 Subject: [PATCH 0959/1665] Adding Raunak Jalan as contributor --- .zenodo.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index e525f14326..b28a449044 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -807,6 +807,9 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" + }, + { + "name": "Jalan, Raunak", } ], "keywords": [ From dc57459bf6f183c1ac5b937708f599f882432a0e Mon Sep 17 00:00:00 2001 From: Raunak Jalan <41023976+RaunakJalan@users.noreply.github.com> Date: Fri, 23 Apr 2021 00:52:35 +0530 Subject: [PATCH 0960/1665] Adding Raunak Jalan as Contributor. --- .zenodo.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index b28a449044..1c82949afb 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -798,6 +798,9 @@ "name": "Mihai, Paul Glad", "orcid": "0000-0001-5715-6442" }, + { + "name": "Jalan, Raunak", + }, { "affiliation": "Department of Psychology, Stanford University", "name": "Gorgolewski, Krzysztof J.", @@ -807,9 +810,6 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" - }, - { - "name": "Jalan, Raunak", } ], "keywords": [ From e2a996e60328ec1397f44505bc5fd3a7cbaf1111 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Wed, 28 Apr 2021 11:27:46 +0200 Subject: [PATCH 0961/1665] Improve performance for _has_node - Networkx represents graphs as a dictionary where the nodes are the keys. As such, we can use the built-in __contains__ function for fast lookup - We can also iterate the graphs directly without constructing a NodeView using self._graph.nodes() --- nipype/pipeline/engine/workflows.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 9b6e60ffaf..30f61a81ae 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -912,10 +912,12 @@ def _get_all_nodes(self): return allnodes def _has_node(self, wanted_node): - for node in self._graph.nodes(): + if wanted_node in self._graph: + return True # best case scenario + for node in self._graph: # iterate otherwise if wanted_node == node: return True - if isinstance(node, Workflow): + if hasattr(node, "_has_node"): # hasattr is faster than isinstance if node._has_node(wanted_node): return True return False From 176cff010b7d30f96f573f620df4fd020f83ba10 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Wed, 28 Apr 2021 20:14:42 +0200 Subject: [PATCH 0962/1665] Use set instead of np.unique to avoid sorting - Also avoids casting the list to np.array --- nipype/pipeline/engine/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index f77f771ea7..3557475ffe 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -753,7 +753,7 @@ def _merge_graphs( # nodes of the supergraph. supernodes = supergraph.nodes() ids = [n._hierarchy + n._id for n in supernodes] - if len(np.unique(ids)) != len(ids): + if len(set(ids)) != len(ids): # This should trap the problem of miswiring when multiple iterables are # used at the same level. The use of the template below for naming # updates to nodes is the general solution. From 1c789ad95f88e8401c8e19d6f94fe4ac162d4825 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Thu, 29 Apr 2021 09:41:43 +0200 Subject: [PATCH 0963/1665] Add a cache for nested workflows - Update every time we connect/disconenct or add/remove a node - Keep track of which nodes are workflows and which are not - As a result, we do not need to iterate all nodes to determine the result of `_has_node`, we can use O(1) set operations --- nipype/pipeline/engine/workflows.py | 44 ++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 30f61a81ae..69ab287536 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -59,6 +59,9 @@ def __init__(self, name, base_dir=None): super(Workflow, self).__init__(name, base_dir) self._graph = nx.DiGraph() + self._nodes_cache = set() + self._nested_workflows_cache = set() + # PUBLIC API def clone(self, name): """Clone a workflow @@ -269,6 +272,8 @@ def connect(self, *args, **kwargs): "(%s, %s): new edge data: %s", srcnode, destnode, str(edge_data) ) + self._update_node_cache() + def disconnect(self, *args): """Disconnect nodes See the docstring for connect for format. @@ -314,6 +319,8 @@ def disconnect(self, *args): else: self._graph.add_edges_from([(srcnode, dstnode, edge_data)]) + self._update_node_cache() + def add_nodes(self, nodes): """ Add nodes to a workflow @@ -346,6 +353,7 @@ def add_nodes(self, nodes): if node._hierarchy is None: node._hierarchy = self.name self._graph.add_nodes_from(newnodes) + self._update_node_cache() def remove_nodes(self, nodes): """ Remove nodes from a workflow @@ -356,6 +364,7 @@ def remove_nodes(self, nodes): A list of EngineBase-based objects """ self._graph.remove_nodes_from(nodes) + self._update_node_cache() # Input-Output access @property @@ -903,23 +912,32 @@ def _set_node_input(self, node, param, source, sourceinfo): node.set_input(param, deepcopy(newval)) def _get_all_nodes(self): - allnodes = [] - for node in self._graph.nodes(): - if isinstance(node, Workflow): - allnodes.extend(node._get_all_nodes()) - else: - allnodes.append(node) + allnodes = [ + *self._nodes_cache.difference(self._nested_workflows_cache) + ] # all nodes that are not workflows + for node in self._nested_workflows_cache: + allnodes.extend(node._get_all_nodes()) return allnodes + def _update_node_cache(self): + nodes = set(self._graph) + + added_nodes = nodes.difference(self._nodes_cache) + removed_nodes = self._nodes_cache.difference(nodes) + + self._nodes_cache = nodes + self._nested_workflows_cache.difference_update(removed_nodes) + + for node in added_nodes: + if isinstance(node, Workflow): + self._nested_workflows_cache.add(node) + def _has_node(self, wanted_node): - if wanted_node in self._graph: - return True # best case scenario - for node in self._graph: # iterate otherwise - if wanted_node == node: + if wanted_node in self._nodes_cache: + return True + for node in self._nested_workflows_cache: + if node._has_node(wanted_node): return True - if hasattr(node, "_has_node"): # hasattr is faster than isinstance - if node._has_node(wanted_node): - return True return False def _create_flat_graph(self): From bc85f3e7a4c2f9b5f7013938c07b7f3e94f66c58 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Thu, 29 Apr 2021 09:43:21 +0200 Subject: [PATCH 0964/1665] Use set for newnodes to make adding faster - Do not need to loop over entries - Faster O(1) __contains__ --- nipype/pipeline/engine/workflows.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 69ab287536..eb73afc2ee 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -144,7 +144,7 @@ def connect(self, *args, **kwargs): self.disconnect(connection_list) return - newnodes = [] + newnodes = set() for srcnode, destnode, _ in connection_list: if self in [srcnode, destnode]: msg = ( @@ -154,9 +154,9 @@ def connect(self, *args, **kwargs): raise IOError(msg) if (srcnode not in newnodes) and not self._has_node(srcnode): - newnodes.append(srcnode) + newnodes.add(srcnode) if (destnode not in newnodes) and not self._has_node(destnode): - newnodes.append(destnode) + newnodes.add(destnode) if newnodes: self._check_nodes(newnodes) for node in newnodes: From e9fb94e97e9428b108b01df859cf3f9f95c31968 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Thu, 29 Apr 2021 09:44:08 +0200 Subject: [PATCH 0965/1665] Use sets for connected_ports - Faster operations for update and contains --- nipype/pipeline/engine/workflows.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index eb73afc2ee..1498b07c55 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -166,15 +166,16 @@ def connect(self, *args, **kwargs): connected_ports = {} for srcnode, destnode, connects in connection_list: if destnode not in connected_ports: - connected_ports[destnode] = [] + connected_ports[destnode] = set() # check to see which ports of destnode are already # connected. if not disconnect and (destnode in self._graph.nodes()): for edge in self._graph.in_edges(destnode): data = self._graph.get_edge_data(*edge) - for sourceinfo, destname in data["connect"]: - if destname not in connected_ports[destnode]: - connected_ports[destnode] += [destname] + connected_ports[destnode].update( + destname + for _, destname in data["connect"] + ) for source, dest in connects: # Currently datasource/sink/grabber.io modules # determine their inputs/outputs depending on @@ -229,7 +230,7 @@ def connect(self, *args, **kwargs): ) if sourcename and not srcnode._check_outputs(sourcename): not_found.append(["out", srcnode.name, sourcename]) - connected_ports[destnode] += [dest] + connected_ports[destnode].add(dest) infostr = [] for info in not_found: infostr += [ From 89d3059b7be8e026fdc5c384836c6ab94a0a7020 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Thu, 29 Apr 2021 17:35:44 +0100 Subject: [PATCH 0966/1665] Added more variable descriptions. --- nipype/interfaces/cat12/preprocess.py | 64 ++++++++++++++++----------- 1 file changed, 37 insertions(+), 27 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 18da122061..2fab299f15 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -18,7 +18,6 @@ class CAT12SegmentInputSpec(SPMCommandInputSpec): copyfile=False) n_jobs = traits.Int(1, usedefault=True, mandatory=True, field="nproc", desc="Number of threads") - use_prior = Str(field="useprior", usedefault=True) _help_affine_reg = 'Affine Regularization. The procedure is a local optimisation, so it needs reasonable initial ' \ 'starting estimates. Images should be placed in approximate alignment using the Display ' \ @@ -129,16 +128,19 @@ class CAT12SegmentInputSpec(SPMCommandInputSpec): 'further analysis! For distinction, these files contain "preview" in their filename and they' \ ' are not available as batch dependencies objects. ' surface_and_thickness_estimation = traits.Int(1, field="surface", desc=_help_surf, usedefault=True) - surface_measures = traits.Int(1, field="output.surf_measures", usedefault=True) + surface_measures = traits.Int(1, field="output.surf_measures", usedefault=True, desc="Extract surface measures") # Templates neuromorphometrics = traits.Bool(True, field="output.ROImenu.atlases.neuromorphometrics", - usedefault=True) - lpba40 = traits.Bool(True, field="output.ROImenu.atlases.lpba40", usedefault=True) - cobra = traits.Bool(True, field="output.ROImenu.atlases.hammers", usedefault=True) - hammers = traits.Bool(True, field="output.ROImenu.atlases.cobra", usedefault=True) + usedefault=True, desc="Extract brain measures for Neuromorphometrics template") + lpba40 = traits.Bool(True, field="output.ROImenu.atlases.lpba40", usedefault=True, + desc="Extract brain measures for LPBA40 template") + cobra = traits.Bool(True, field="output.ROImenu.atlases.hammers", usedefault=True, + desc="Extract brain measures for COBRA template") + hammers = traits.Bool(True, field="output.ROImenu.atlases.cobra", usedefault=True, + desc="Extract brain measures for Hammers template") own_atlas = InputMultiPath(ImageFileSPM(exists=True), field="output.ROImenu.atlases.ownatlas", - desc="Own Atlas", mandatory=False, copyfile=False) + desc="Extract brain measures for a given template", mandatory=False, copyfile=False) _dartel_help = 'This option is to export data into a form that can be used with DARTEL. The SPM default is to ' \ 'only apply rigid body transformation. However, a more appropriate option is to apply affine ' \ @@ -146,47 +148,55 @@ class CAT12SegmentInputSpec(SPMCommandInputSpec): 'non-linearly register brains to the template.' # Grey matter - _gm_desc = 'Options to save grey matter images.' - gm_output_native = traits.Bool(False, field="output.GM.native", usedefault=True, desc=_gm_desc) - gm_output_modulated = traits.Bool(True, field="output.GM.mod", usedefault=True, desc=_gm_desc) - gm_output_dartel = traits.Bool(False, field="output.GM.dartel", usedefault=True, desc=_gm_desc) + gm_output_native = traits.Bool(False, field="output.GM.native", usedefault=True, + desc='Save modulated grey matter images.') + gm_output_modulated = traits.Bool(True, field="output.GM.mod", usedefault=True, + desc='Save native grey matter images.') + gm_output_dartel = traits.Bool(False, field="output.GM.dartel", usedefault=True, + desc='Save dartel grey matter images.') # White matter _wm_desc = 'Options to save white matter images.' - wm_output_native = traits.Bool(False, field="output.WM.native", usedefault=True, desc=_wm_desc) - wm_output_modulated = traits.Bool(True, field="output.WM.mod", usedefault=True, desc=_wm_desc) - wm_output_dartel = traits.Bool(False, field="output.WM.dartel", usedefault=True, desc=_wm_desc) + wm_output_native = traits.Bool(False, field="output.WM.native", usedefault=True, + desc='Save dartel white matter images.') + wm_output_modulated = traits.Bool(True, field="output.WM.mod", usedefault=True, + desc='Save dartel white matter images.') + wm_output_dartel = traits.Bool(False, field="output.WM.dartel", usedefault=True, + desc='Save dartel white matter images.') # CSF matter _csf_desc = 'Options to save CSF images.' - csf_output_native = traits.Bool(False, field="output.CSF.native", usedefault=True, desc=_csf_desc) - csf_output_modulated = traits.Bool(True, field="output.CSF.mod", usedefault=True, desc=_csf_desc) - csf_output_dartel = traits.Bool(False, field="output.CSF.dartel", usedefault=True, desc=_csf_desc) + csf_output_native = traits.Bool(False, field="output.CSF.native", usedefault=True, + desc='Save dartel CSF images.') + csf_output_modulated = traits.Bool(True, field="output.CSF.mod", usedefault=True, + desc='Save dartel CSF images.') + csf_output_dartel = traits.Bool(False, field="output.CSF.dartel", usedefault=True, + desc='Save dartel CSF images.') # Labels - _help_label_desc = 'This is the option to save a labeled version of your segmentations for fast visual ' \ + _help_label_desc = 'This is the option to save a labeled version of your segmentations in the %s space for fast visual ' \ 'comparision. Labels are saved as Partial Volume Estimation (PVE) values with different mix ' \ 'classes for GM-WM (2.5) and GM-CSF (1.5). BG=0, CSF=1, GM=2, WM=3, WMH=4 (if WMHC=3), ' \ 'SL=1.5 (if SLC)' - label_native = traits.Bool(False, field="output.label.native", usedefault=True, desc=_help_label_desc) - label_warped = traits.Bool(True, field="output.label.warped", usedefault=True, desc=_help_label_desc) - label_dartel = traits.Bool(False, field="output.label.dartel", usedefault=True, desc=_help_label_desc) - output_labelnative = traits.Bool(False, field="output.labelnative", usedefault=True, desc=_help_label_desc) + label_native = traits.Bool(False, field="output.label.native", usedefault=True, desc=_help_label_desc % "native") + label_warped = traits.Bool(True, field="output.label.warped", usedefault=True, desc=_help_label_desc % "warped") + label_dartel = traits.Bool(False, field="output.label.dartel", usedefault=True, desc=_help_label_desc % "dartel") + output_labelnative = traits.Bool(False, field="output.labelnative", usedefault=True, desc=_help_label_desc % "native") # Bias - save_bias_corrected = traits.Bool(True, field="output.bias.warped", usedefault=True) + save_bias_corrected = traits.Bool(True, field="output.bias.warped", usedefault=True, desc="Save bias corrected image") # las _las_desc = 'This is the option to save a bias, noise, and local intensity corrected version of the original T1' \ - ' image. MR images are usually corrupted by a smooth, spatially varying artifact that modulates the' \ + ' image in the %s space. MR images are usually corrupted by a smooth, spatially varying artifact that modulates the' \ ' intensity of the image (bias). These artifacts, although not usually a problem for visual ' \ 'inspection, can impede automated processing of the images. The bias corrected version should have ' \ 'more uniform intensities within the different types of tissues and can be saved in native space ' \ 'and/or normalised. Noise is corrected by an adaptive non-local mean (NLM) filter (Manjon 2008, ' \ 'Medical Image Analysis 12).' - las_native = traits.Bool(False, field="output.las.native", usedefault=True, desc=_las_desc) - las_warped = traits.Bool(True, field="output.las.warped", usedefault=True, desc=_las_desc) - las_dartel = traits.Bool(False, field="output.las.dartel", usedefault=True, desc=_las_desc) + las_native = traits.Bool(False, field="output.las.native", usedefault=True, desc=_las_desc % "native") + las_warped = traits.Bool(True, field="output.las.warped", usedefault=True, desc=_las_desc % "warped") + las_dartel = traits.Bool(False, field="output.las.dartel", usedefault=True, desc=_las_desc % "dartel") # Jacobian Warped _help_jacobian = 'This is the option to save the Jacobian determinant, which expresses local volume changes. This' \ From 3fa52a93ed5cbb90ee9603116514972e67769cc3 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Fri, 30 Apr 2021 09:43:49 +0200 Subject: [PATCH 0967/1665] Remove topological sort for _generate_flatgraph - The function is only adding/removing nodes and adjusting their connections - As such, there are no serial dependencies, and we can iterate in any order --- nipype/pipeline/engine/workflows.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 1498b07c55..0a8000bfda 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -970,7 +970,7 @@ def _generate_flatgraph(self): raise Exception( ("Workflow: %s is not a directed acyclic graph " "(DAG)") % self.name ) - nodes = list(nx.topological_sort(self._graph)) + nodes = list(self._graph.nodes) for node in nodes: logger.debug("processing node: %s", node) if isinstance(node, Workflow): From 49685aa8edb05afd2e11f1abb7ce5059404b7ae5 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Fri, 30 Apr 2021 09:47:26 +0200 Subject: [PATCH 0968/1665] Remove regular expression re-compiles - The only thing that changes about the regular expression is the prefix - We can also detect the prefix with startswith, and then use the same regular expression across the loop - This means that Python can cache the compiled regex internally, and we save some time --- nipype/pipeline/engine/utils.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 3557475ffe..de769607c2 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -1100,11 +1100,12 @@ def make_field_func(*pair): old_edge_dict = jedge_dict[jnode] # the edge source node replicates expansions = defaultdict(list) - for node in graph_in.nodes(): + for node in graph_in: for src_id in list(old_edge_dict.keys()): # Drop the original JoinNodes; only concerned with # generated Nodes - if hasattr(node, "joinfield") and node.itername == src_id: + itername = node.itername + if hasattr(node, "joinfield") and itername == src_id: continue # Patterns: # - src_id : Non-iterable node @@ -1113,10 +1114,12 @@ def make_field_func(*pair): # - src_id.[a-z]I.[a-z]\d+ : # Non-IdentityInterface w/ iterables # - src_idJ\d+ : JoinNode(IdentityInterface) - if re.match( - src_id + r"((\.[a-z](I\.[a-z])?|J)\d+)?$", node.itername - ): - expansions[src_id].append(node) + if itername.startswith(src_id): + itername = itername[len(src_id):] + if re.fullmatch( + r"((\.[a-z](I\.[a-z])?|J)\d+)?", itername + ): + expansions[src_id].append(node) for in_id, in_nodes in list(expansions.items()): logger.debug( "The join node %s input %s was expanded" " to %d nodes.", From 54aa6adb7b2670fb7361687e1e83db171cfd407e Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Fri, 30 Apr 2021 17:14:46 +0200 Subject: [PATCH 0969/1665] Apply suggestions from code review Co-authored-by: Chris Markiewicz --- nipype/pipeline/engine/utils.py | 6 ++---- nipype/pipeline/engine/workflows.py | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index de769607c2..4e8b6d2e8c 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -1115,10 +1115,8 @@ def make_field_func(*pair): # Non-IdentityInterface w/ iterables # - src_idJ\d+ : JoinNode(IdentityInterface) if itername.startswith(src_id): - itername = itername[len(src_id):] - if re.fullmatch( - r"((\.[a-z](I\.[a-z])?|J)\d+)?", itername - ): + suffix = itername[len(src_id):] + if re.fullmatch(r"((\.[a-z](I\.[a-z])?|J)\d+)?", suffix): expansions[src_id].append(node) for in_id, in_nodes in list(expansions.items()): logger.debug( diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 0a8000bfda..30878b9b12 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -913,11 +913,9 @@ def _set_node_input(self, node, param, source, sourceinfo): node.set_input(param, deepcopy(newval)) def _get_all_nodes(self): - allnodes = [ - *self._nodes_cache.difference(self._nested_workflows_cache) - ] # all nodes that are not workflows + allnodes = self._nodes_cache - self._nested_workflows_cache for node in self._nested_workflows_cache: - allnodes.extend(node._get_all_nodes()) + allnodes |= node._get_all_nodes() return allnodes def _update_node_cache(self): From 0b49a70a007d9de7387b63db20dcea43fb263d49 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Fri, 30 Apr 2021 17:25:50 +0200 Subject: [PATCH 0970/1665] Apply formatting suggestion from code review --- nipype/pipeline/engine/workflows.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 30878b9b12..4dd204fba2 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -932,12 +932,13 @@ def _update_node_cache(self): self._nested_workflows_cache.add(node) def _has_node(self, wanted_node): - if wanted_node in self._nodes_cache: - return True - for node in self._nested_workflows_cache: - if node._has_node(wanted_node): - return True - return False + return ( + wanted_node in self._nodes_cache or + any( + wf._has_node(wanted_node) + for wf in self._nested_workflows_cache + ) + ) def _create_flat_graph(self): """Make a simple DAG where no node is a workflow.""" From 672a2340ee09fbb05b808d454f9038548569a312 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Fri, 30 Apr 2021 18:42:20 +0200 Subject: [PATCH 0971/1665] Add suggestion from code review for `add_nodes` --- nipype/pipeline/engine/workflows.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 4dd204fba2..32be25f003 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -333,7 +333,7 @@ def add_nodes(self, nodes): newnodes = [] all_nodes = self._get_all_nodes() for node in nodes: - if self._has_node(node): + if node in all_nodes: raise IOError("Node %s already exists in the workflow" % node) if isinstance(node, Workflow): for subnode in node._get_all_nodes(): From 0c485d3d1a5511421d85e1f4a44794b254932063 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Fri, 30 Apr 2021 22:05:11 +0200 Subject: [PATCH 0972/1665] Remove unnecessary calls to `_update_node_cache` - Apply suggestions from code review Co-authored-by: Chris Markiewicz --- nipype/pipeline/engine/workflows.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 32be25f003..184cfd5a57 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -273,7 +273,8 @@ def connect(self, *args, **kwargs): "(%s, %s): new edge data: %s", srcnode, destnode, str(edge_data) ) - self._update_node_cache() + if newnodes: + self._update_node_cache() def disconnect(self, *args): """Disconnect nodes @@ -320,8 +321,6 @@ def disconnect(self, *args): else: self._graph.add_edges_from([(srcnode, dstnode, edge_data)]) - self._update_node_cache() - def add_nodes(self, nodes): """ Add nodes to a workflow From 12deb959cccc431fb8222cc5854f1c92a0080021 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 30 Apr 2021 16:50:00 -0400 Subject: [PATCH 0973/1665] STY: black --- nipype/algorithms/confounds.py | 6 ++--- nipype/algorithms/mesh.py | 2 +- nipype/algorithms/modelgen.py | 2 +- nipype/algorithms/tests/test_CompCor.py | 2 +- nipype/algorithms/tests/test_TSNR.py | 2 +- nipype/external/cloghandler.py | 2 +- nipype/interfaces/base/core.py | 6 ++--- nipype/interfaces/base/specs.py | 4 +-- nipype/interfaces/cmtk/cmtk.py | 4 +-- nipype/interfaces/dipy/base.py | 4 +-- nipype/interfaces/fsl/fix.py | 2 +- nipype/interfaces/fsl/preprocess.py | 2 +- nipype/interfaces/io.py | 6 ++--- nipype/interfaces/matlab.py | 4 +-- nipype/interfaces/niftyfit/asl.py | 4 +-- nipype/interfaces/niftyfit/base.py | 2 +- nipype/interfaces/niftyfit/dwi.py | 8 +++--- nipype/interfaces/niftyfit/qt1.py | 4 +-- nipype/interfaces/niftyfit/tests/test_asl.py | 2 +- nipype/interfaces/niftyfit/tests/test_dwi.py | 4 +-- nipype/interfaces/niftyfit/tests/test_qt1.py | 2 +- nipype/interfaces/niftyreg/reg.py | 8 +++--- nipype/interfaces/niftyreg/regutils.py | 26 +++++++++---------- nipype/interfaces/niftyreg/tests/test_reg.py | 4 +-- .../niftyreg/tests/test_regutils.py | 12 ++++----- .../niftyseg/tests/test_label_fusion.py | 4 +-- .../interfaces/niftyseg/tests/test_stats.py | 4 +-- nipype/interfaces/nitime/analysis.py | 2 +- nipype/interfaces/nitime/tests/test_nitime.py | 2 +- nipype/interfaces/spm/utils.py | 8 +++--- nipype/interfaces/vtkbase.py | 10 +++---- nipype/pipeline/engine/tests/test_engine.py | 4 +-- nipype/pipeline/engine/utils.py | 2 +- nipype/pipeline/engine/workflows.py | 11 +++----- nipype/pipeline/plugins/legacymultiproc.py | 2 +- nipype/pipeline/plugins/multiproc.py | 2 +- nipype/utils/draw_gantt_chart.py | 2 +- nipype/utils/filemanip.py | 6 ++--- nipype/utils/matlabtools.py | 2 +- nipype/utils/misc.py | 2 +- nipype/utils/onetime.py | 2 +- 41 files changed, 92 insertions(+), 97 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 8f7e31061b..1aa88d6e62 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -577,7 +577,7 @@ class CompCor(SimpleInterface): ] def __init__(self, *args, **kwargs): - """ exactly the same as compcor except the header """ + """exactly the same as compcor except the header""" super(CompCor, self).__init__(*args, **kwargs) self._header = "CompCor" @@ -774,7 +774,7 @@ class ACompCor(CompCor): """ def __init__(self, *args, **kwargs): - """ exactly the same as compcor except the header """ + """exactly the same as compcor except the header""" super(ACompCor, self).__init__(*args, **kwargs) self._header = "aCompCor" @@ -825,7 +825,7 @@ class TCompCor(CompCor): output_spec = TCompCorOutputSpec def __init__(self, *args, **kwargs): - """ exactly the same as compcor except the header """ + """exactly the same as compcor except the header""" super(TCompCor, self).__init__(*args, **kwargs) self._header = "tCompCor" self._mask_files = [] diff --git a/nipype/algorithms/mesh.py b/nipype/algorithms/mesh.py index ab70237030..188cc3ec7c 100644 --- a/nipype/algorithms/mesh.py +++ b/nipype/algorithms/mesh.py @@ -23,7 +23,7 @@ class TVTKBaseInterface(BaseInterface): - """ A base class for interfaces using VTK """ + """A base class for interfaces using VTK""" _redirect_x = True diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index a04ef04b3a..b6ceba68a2 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -516,7 +516,7 @@ def _generate_design(self, infolist=None): ) def _run_interface(self, runtime): - """""" + """ """ self._sessioninfo = None self._generate_design() return runtime diff --git a/nipype/algorithms/tests/test_CompCor.py b/nipype/algorithms/tests/test_CompCor.py index 314b9ed9bf..4d00f1b0c1 100644 --- a/nipype/algorithms/tests/test_CompCor.py +++ b/nipype/algorithms/tests/test_CompCor.py @@ -11,7 +11,7 @@ class TestCompCor: - """ Note: Tests currently do a poor job of testing functionality """ + """Note: Tests currently do a poor job of testing functionality""" filenames = { "functionalnii": "compcorfunc.nii", diff --git a/nipype/algorithms/tests/test_TSNR.py b/nipype/algorithms/tests/test_TSNR.py index 7da36d3661..26c1019b63 100644 --- a/nipype/algorithms/tests/test_TSNR.py +++ b/nipype/algorithms/tests/test_TSNR.py @@ -14,7 +14,7 @@ class TestTSNR: - """ Note: Tests currently do a poor job of testing functionality """ + """Note: Tests currently do a poor job of testing functionality""" in_filenames = {"in_file": "tsnrinfile.nii"} diff --git a/nipype/external/cloghandler.py b/nipype/external/cloghandler.py index c25670e600..08db7a3a0a 100644 --- a/nipype/external/cloghandler.py +++ b/nipype/external/cloghandler.py @@ -222,7 +222,7 @@ def flush(self): pass def _degrade(self, degrade, msg, *args): - """ Set degrade mode or not. Ignore msg. """ + """Set degrade mode or not. Ignore msg.""" self._rotateFailed = degrade del msg, args # avoid pychecker warnings diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 6459a9c6c8..57e889da9b 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -87,12 +87,12 @@ def version(self): @classmethod def _outputs(cls): - """ Initializes outputs""" + """Initializes outputs""" raise NotImplementedError @classmethod def help(cls, returnhelp=False): - """ Prints class help """ + """Prints class help""" allhelp = format_help(cls) if returnhelp: return allhelp @@ -112,7 +112,7 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): raise NotImplementedError def _list_outputs(self): - """ List expected outputs""" + """List expected outputs""" raise NotImplementedError @classmethod diff --git a/nipype/interfaces/base/specs.py b/nipype/interfaces/base/specs.py index dda80d7697..5c92c7ec69 100644 --- a/nipype/interfaces/base/specs.py +++ b/nipype/interfaces/base/specs.py @@ -58,7 +58,7 @@ class BaseTraitedSpec(traits.HasTraits): package_version = nipype_version def __init__(self, **kwargs): - """ Initialize handlers and inputs""" + """Initialize handlers and inputs""" # NOTE: In python 2.6, object.__init__ no longer accepts input # arguments. HasTraits does not define an __init__ and # therefore these args were being ignored. @@ -79,7 +79,7 @@ def items(self): yield name, self.traits()[name] def __repr__(self): - """ Return a well-formatted representation of the traits """ + """Return a well-formatted representation of the traits""" outstr = [] for name, value in sorted(self.trait_get().items()): outstr.append("%s = %s" % (name, value)) diff --git a/nipype/interfaces/cmtk/cmtk.py b/nipype/interfaces/cmtk/cmtk.py index 1ba9d29a4c..a2718b92b5 100644 --- a/nipype/interfaces/cmtk/cmtk.py +++ b/nipype/interfaces/cmtk/cmtk.py @@ -190,7 +190,7 @@ def cmat( endpoint_name, intersections=False, ): - """ Create the connection matrix for each resolution using fibers and ROIs. """ + """Create the connection matrix for each resolution using fibers and ROIs.""" import scipy.io as sio stats = {} @@ -460,7 +460,7 @@ def cmat( def save_fibers(oldhdr, oldfib, fname, indices): - """ Stores a new trackvis file fname using only given indices """ + """Stores a new trackvis file fname using only given indices""" hdrnew = oldhdr.copy() outstreams = [] for i in indices: diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py index a850d730f0..db0cb05e2d 100644 --- a/nipype/interfaces/dipy/base.py +++ b/nipype/interfaces/dipy/base.py @@ -27,13 +27,13 @@ def no_dipy(): - """ Check if dipy is available """ + """Check if dipy is available""" global HAVE_DIPY return not HAVE_DIPY def dipy_version(): - """ Check dipy version """ + """Check dipy version""" if no_dipy(): return None diff --git a/nipype/interfaces/fsl/fix.py b/nipype/interfaces/fsl/fix.py index 769513f8c3..ddfc01d19c 100644 --- a/nipype/interfaces/fsl/fix.py +++ b/nipype/interfaces/fsl/fix.py @@ -375,7 +375,7 @@ class Cleaner(CommandLine): cmd = "fix -a" def _get_cleaned_functional_filename(self, artifacts_list_filename): - """ extract the proper filename from the first line of the artifacts file """ + """extract the proper filename from the first line of the artifacts file""" artifacts_list_file = open(artifacts_list_filename, "r") functional_filename, extension = artifacts_list_file.readline().split(".") artifacts_list_file_path, artifacts_list_filename = os.path.split( diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py index 54a41be039..5ab9a92010 100644 --- a/nipype/interfaces/fsl/preprocess.py +++ b/nipype/interfaces/fsl/preprocess.py @@ -236,7 +236,7 @@ def _gen_filename(self, name): class FASTInputSpec(FSLCommandInputSpec): - """ Defines inputs (trait classes) for FAST """ + """Defines inputs (trait classes) for FAST""" in_files = InputMultiPath( File(exists=True), diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index f6c6a893ad..bfa9353b98 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -169,7 +169,7 @@ class ProgressPercentage(object): """ def __init__(self, filename): - """""" + """ """ # Import packages import threading @@ -181,7 +181,7 @@ def __init__(self, filename): self._lock = threading.Lock() def __call__(self, bytes_amount): - """""" + """ """ # Import packages import sys @@ -206,7 +206,7 @@ def __call__(self, bytes_amount): # DataSink inputs class DataSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - """""" + """ """ # Init inputspec data attributes base_directory = Str(desc="Path to the base directory for storing data.") diff --git a/nipype/interfaces/matlab.py b/nipype/interfaces/matlab.py index 59c36eb478..543b1e7a55 100644 --- a/nipype/interfaces/matlab.py +++ b/nipype/interfaces/matlab.py @@ -42,7 +42,7 @@ def get_matlab_command(): class MatlabInputSpec(CommandLineInputSpec): - """ Basic expected inputs to Matlab interface """ + """Basic expected inputs to Matlab interface""" script = traits.Str( argstr='-r "%s;exit"', desc="m-code to run", mandatory=True, position=-1 @@ -187,7 +187,7 @@ def _format_arg(self, name, trait_spec, value): return super(MatlabCommand, self)._format_arg(name, trait_spec, value) def _gen_matlab_command(self, argstr, script_lines): - """ Generates commands and, if mfile specified, writes it to disk.""" + """Generates commands and, if mfile specified, writes it to disk.""" cwd = os.getcwd() mfile = self.inputs.mfile or self.inputs.uses_mcr paths = [] diff --git a/nipype/interfaces/niftyfit/asl.py b/nipype/interfaces/niftyfit/asl.py index 5c23769e8f..c3d073d579 100644 --- a/nipype/interfaces/niftyfit/asl.py +++ b/nipype/interfaces/niftyfit/asl.py @@ -10,7 +10,7 @@ class FitAslInputSpec(CommandLineInputSpec): - """ Input Spec for FitAsl. """ + """Input Spec for FitAsl.""" desc = "Filename of the 4D ASL (control/label) source image (mandatory)." source_file = File( @@ -129,7 +129,7 @@ class FitAslInputSpec(CommandLineInputSpec): class FitAslOutputSpec(TraitedSpec): - """ Output Spec for FitAsl. """ + """Output Spec for FitAsl.""" desc = "Filename of the Cerebral Blood Flow map (in ml/100g/min)." cbf_file = File(exists=True, desc=desc) diff --git a/nipype/interfaces/niftyfit/base.py b/nipype/interfaces/niftyfit/base.py index 7af72cd30d..cdd116eb38 100644 --- a/nipype/interfaces/niftyfit/base.py +++ b/nipype/interfaces/niftyfit/base.py @@ -31,7 +31,7 @@ class NiftyFitCommand(CommandLine): _suffix = "_nf" def __init__(self, **inputs): - """ Init method calling super. No version to be checked.""" + """Init method calling super. No version to be checked.""" super(NiftyFitCommand, self).__init__(**inputs) def _gen_fname(self, basename, out_dir=None, suffix=None, ext=None): diff --git a/nipype/interfaces/niftyfit/dwi.py b/nipype/interfaces/niftyfit/dwi.py index 1cfc8826d8..9adb6b3817 100644 --- a/nipype/interfaces/niftyfit/dwi.py +++ b/nipype/interfaces/niftyfit/dwi.py @@ -10,7 +10,7 @@ class FitDwiInputSpec(CommandLineInputSpec): - """ Input Spec for FitDwi. """ + """Input Spec for FitDwi.""" # Inputs options source_file = File( @@ -281,7 +281,7 @@ class FitDwiInputSpec(CommandLineInputSpec): class FitDwiOutputSpec(TraitedSpec): - """ Output Spec for FitDwi. """ + """Output Spec for FitDwi.""" error_file = File(desc="Filename of parameter error maps") res_file = File(desc="Filename of model residual map") @@ -342,7 +342,7 @@ def _format_arg(self, name, trait_spec, value): class DwiToolInputSpec(CommandLineInputSpec): - """ Input Spec for DwiTool. """ + """Input Spec for DwiTool.""" desc = "The source image containing the fitted model." source_file = File( @@ -540,7 +540,7 @@ class DwiToolInputSpec(CommandLineInputSpec): class DwiToolOutputSpec(TraitedSpec): - """ Output Spec for DwiTool. """ + """Output Spec for DwiTool.""" desc = "Filename of multi-compartment model parameter map \ (-ivim,-ball,-nod)" diff --git a/nipype/interfaces/niftyfit/qt1.py b/nipype/interfaces/niftyfit/qt1.py index 9337de2306..870130234e 100644 --- a/nipype/interfaces/niftyfit/qt1.py +++ b/nipype/interfaces/niftyfit/qt1.py @@ -11,7 +11,7 @@ class FitQt1InputSpec(CommandLineInputSpec): - """ Input Spec for FitQt1. """ + """Input Spec for FitQt1.""" desc = "Filename of the 4D Multi-Echo T1 source image." source_file = File( @@ -144,7 +144,7 @@ class FitQt1InputSpec(CommandLineInputSpec): class FitQt1OutputSpec(TraitedSpec): - """ Output Spec for FitQt1. """ + """Output Spec for FitQt1.""" t1map_file = File(desc="Filename of the estimated output T1 map (in ms)") m0map_file = File(desc="Filename of the m0 map") diff --git a/nipype/interfaces/niftyfit/tests/test_asl.py b/nipype/interfaces/niftyfit/tests/test_asl.py index bbcfd4f01a..b500a9aa68 100644 --- a/nipype/interfaces/niftyfit/tests/test_asl.py +++ b/nipype/interfaces/niftyfit/tests/test_asl.py @@ -13,7 +13,7 @@ @pytest.mark.skipif(no_nifty_tool(cmd="fit_asl"), reason="niftyfit is not installed") def test_fit_asl(): - """ Testing FitAsl interface.""" + """Testing FitAsl interface.""" # Create the test node fit_asl = FitAsl() diff --git a/nipype/interfaces/niftyfit/tests/test_dwi.py b/nipype/interfaces/niftyfit/tests/test_dwi.py index a726301203..e3bfab3d66 100644 --- a/nipype/interfaces/niftyfit/tests/test_dwi.py +++ b/nipype/interfaces/niftyfit/tests/test_dwi.py @@ -12,7 +12,7 @@ @pytest.mark.skipif(no_nifty_tool(cmd="fit_dwi"), reason="niftyfit is not installed") def test_fit_dwi(): - """ Testing FitDwi interface.""" + """Testing FitDwi interface.""" # Create a node object fit_dwi = FitDwi() @@ -60,7 +60,7 @@ def test_fit_dwi(): @pytest.mark.skipif(no_nifty_tool(cmd="dwi_tool"), reason="niftyfit is not installed") def test_dwi_tool(): - """ Testing DwiTool interface.""" + """Testing DwiTool interface.""" # Create a node object dwi_tool = DwiTool() diff --git a/nipype/interfaces/niftyfit/tests/test_qt1.py b/nipype/interfaces/niftyfit/tests/test_qt1.py index 9146e7e97f..930176467f 100644 --- a/nipype/interfaces/niftyfit/tests/test_qt1.py +++ b/nipype/interfaces/niftyfit/tests/test_qt1.py @@ -12,7 +12,7 @@ @pytest.mark.skipif(no_nifty_tool(cmd="fit_qt1"), reason="niftyfit is not installed") def test_fit_qt1(): - """ Testing FitQt1 interface.""" + """Testing FitQt1 interface.""" # Create a node object fit_qt1 = FitQt1() diff --git a/nipype/interfaces/niftyreg/reg.py b/nipype/interfaces/niftyreg/reg.py index f149006d49..1fc357227f 100644 --- a/nipype/interfaces/niftyreg/reg.py +++ b/nipype/interfaces/niftyreg/reg.py @@ -15,7 +15,7 @@ class RegAladinInputSpec(NiftyRegCommandInputSpec): - """ Input Spec for RegAladin. """ + """Input Spec for RegAladin.""" # Input reference file ref_file = File( @@ -116,7 +116,7 @@ class RegAladinInputSpec(NiftyRegCommandInputSpec): class RegAladinOutputSpec(TraitedSpec): - """ Output Spec for RegAladin. """ + """Output Spec for RegAladin.""" aff_file = File(desc="The output affine file") res_file = File(desc="The output transformed image") @@ -163,7 +163,7 @@ def _list_outputs(self): class RegF3DInputSpec(NiftyRegCommandInputSpec): - """ Input Spec for RegF3D. """ + """Input Spec for RegF3D.""" # Input reference file ref_file = File( @@ -343,7 +343,7 @@ class RegF3DInputSpec(NiftyRegCommandInputSpec): class RegF3DOutputSpec(TraitedSpec): - """ Output Spec for RegF3D. """ + """Output Spec for RegF3D.""" cpp_file = File(desc="The output CPP file") res_file = File(desc="The output resampled image") diff --git a/nipype/interfaces/niftyreg/regutils.py b/nipype/interfaces/niftyreg/regutils.py index 032f106933..30799d8f2e 100644 --- a/nipype/interfaces/niftyreg/regutils.py +++ b/nipype/interfaces/niftyreg/regutils.py @@ -14,7 +14,7 @@ class RegResampleInputSpec(NiftyRegCommandInputSpec): - """ Input Spec for RegResample. """ + """Input Spec for RegResample.""" # Input reference file ref_file = File( @@ -78,7 +78,7 @@ class RegResampleInputSpec(NiftyRegCommandInputSpec): class RegResampleOutputSpec(TraitedSpec): - """ Output Spec for RegResample. """ + """Output Spec for RegResample.""" out_file = File(desc="The output filename of the transformed image") @@ -126,7 +126,7 @@ def _overload_extension(self, value, name=None): class RegJacobianInputSpec(NiftyRegCommandInputSpec): - """ Input Spec for RegJacobian. """ + """Input Spec for RegJacobian.""" # Reference file name desc = "Reference/target file (required if specifying CPP transformations." @@ -157,7 +157,7 @@ class RegJacobianInputSpec(NiftyRegCommandInputSpec): class RegJacobianOutputSpec(TraitedSpec): - """ Output Spec for RegJacobian. """ + """Output Spec for RegJacobian.""" out_file = File(desc="The output file") @@ -194,7 +194,7 @@ def _overload_extension(self, value, name=None): class RegToolsInputSpec(NiftyRegCommandInputSpec): - """ Input Spec for RegTools. """ + """Input Spec for RegTools.""" # Input image file in_file = File( @@ -302,7 +302,7 @@ class RegToolsInputSpec(NiftyRegCommandInputSpec): class RegToolsOutputSpec(TraitedSpec): - """ Output Spec for RegTools. """ + """Output Spec for RegTools.""" out_file = File(desc="The output file", exists=True) @@ -343,7 +343,7 @@ def _format_arg(self, name, spec, value): class RegAverageInputSpec(NiftyRegCommandInputSpec): - """ Input Spec for RegAverage. """ + """Input Spec for RegAverage.""" avg_files = traits.List( File(exist=True), @@ -462,7 +462,7 @@ class RegAverageInputSpec(NiftyRegCommandInputSpec): class RegAverageOutputSpec(TraitedSpec): - """ Output Spec for RegAverage. """ + """Output Spec for RegAverage.""" out_file = File(desc="Output file name") @@ -522,7 +522,7 @@ def _list_outputs(self): @property def cmdline(self): - """ Rewrite the cmdline to write options in text_file.""" + """Rewrite the cmdline to write options in text_file.""" argv = super(RegAverage, self).cmdline reg_average_cmd = os.path.join(os.getcwd(), "reg_average_cmd") with open(reg_average_cmd, "w") as f: @@ -531,7 +531,7 @@ def cmdline(self): class RegTransformInputSpec(NiftyRegCommandInputSpec): - """ Input Spec for RegTransform. """ + """Input Spec for RegTransform.""" ref1_file = File( exists=True, @@ -794,7 +794,7 @@ class RegTransformInputSpec(NiftyRegCommandInputSpec): class RegTransformOutputSpec(TraitedSpec): - """ Output Spec for RegTransform. """ + """Output Spec for RegTransform.""" out_file = File(desc="Output File (transformation in any format)") @@ -894,7 +894,7 @@ def _list_outputs(self): class RegMeasureInputSpec(NiftyRegCommandInputSpec): - """ Input Spec for RegMeasure. """ + """Input Spec for RegMeasure.""" # Input reference file ref_file = File( @@ -928,7 +928,7 @@ class RegMeasureInputSpec(NiftyRegCommandInputSpec): class RegMeasureOutputSpec(TraitedSpec): - """ Output Spec for RegMeasure. """ + """Output Spec for RegMeasure.""" out_file = File(desc="The output text file containing the measure") diff --git a/nipype/interfaces/niftyreg/tests/test_reg.py b/nipype/interfaces/niftyreg/tests/test_reg.py index 77b56e21da..59773c880e 100644 --- a/nipype/interfaces/niftyreg/tests/test_reg.py +++ b/nipype/interfaces/niftyreg/tests/test_reg.py @@ -14,7 +14,7 @@ reason="niftyreg is not installed. reg_aladin not found.", ) def test_reg_aladin(): - """ tests for reg_aladin interface""" + """tests for reg_aladin interface""" # Create a reg_aladin object nr_aladin = RegAladin() @@ -53,7 +53,7 @@ def test_reg_aladin(): no_nifty_tool(cmd="reg_f3d"), reason="niftyreg is not installed. reg_f3d not found." ) def test_reg_f3d(): - """ tests for reg_f3d interface""" + """tests for reg_f3d interface""" # Create a reg_f3d object nr_f3d = RegF3D() diff --git a/nipype/interfaces/niftyreg/tests/test_regutils.py b/nipype/interfaces/niftyreg/tests/test_regutils.py index 26431ddc44..3efc9efb56 100644 --- a/nipype/interfaces/niftyreg/tests/test_regutils.py +++ b/nipype/interfaces/niftyreg/tests/test_regutils.py @@ -26,7 +26,7 @@ def no_nifty_tool(cmd=None): reason="niftyreg is not installed. reg_resample not found.", ) def test_reg_resample_res(): - """ tests for reg_resample interface """ + """tests for reg_resample interface""" # Create a reg_resample object nr_resample = RegResample() @@ -88,7 +88,7 @@ def test_reg_resample_res(): reason="niftyreg is not installed. reg_jacobian not found.", ) def test_reg_jacobian_jac(): - """ Test interface for RegJacobian """ + """Test interface for RegJacobian""" # Create a reg_jacobian object nr_jacobian = RegJacobian() @@ -156,7 +156,7 @@ def test_reg_jacobian_jac(): reason="niftyreg is not installed. reg_tools not found.", ) def test_reg_tools_mul(): - """ tests for reg_tools interface """ + """tests for reg_tools interface""" # Create a reg_tools object nr_tools = RegTools() @@ -198,7 +198,7 @@ def test_reg_tools_mul(): reason="niftyreg is not installed. reg_average not found.", ) def test_reg_average(): - """ tests for reg_average interface """ + """tests for reg_average interface""" # Create a reg_average object nr_average = RegAverage() @@ -384,7 +384,7 @@ def test_reg_average(): reason="niftyreg is not installed. reg_transform not found.", ) def test_reg_transform_def(): - """ tests for reg_transform interface """ + """tests for reg_transform interface""" # Create a reg_transform object nr_transform = RegTransform() @@ -500,7 +500,7 @@ def test_reg_transform_def(): reason="niftyreg is not installed. reg_measure not found.", ) def test_reg_measure(): - """ tests for reg_measure interface """ + """tests for reg_measure interface""" # Create a reg_measure object nr_measure = RegMeasure() diff --git a/nipype/interfaces/niftyseg/tests/test_label_fusion.py b/nipype/interfaces/niftyseg/tests/test_label_fusion.py index a7a4fa7c91..824b98c230 100644 --- a/nipype/interfaces/niftyseg/tests/test_label_fusion.py +++ b/nipype/interfaces/niftyseg/tests/test_label_fusion.py @@ -13,7 +13,7 @@ no_nifty_tool(cmd="seg_LabFusion"), reason="niftyseg is not installed" ) def test_seg_lab_fusion(): - """ Test interfaces for seg_labfusion""" + """Test interfaces for seg_labfusion""" # Create a node object steps = LabelFusion() @@ -98,7 +98,7 @@ def test_seg_lab_fusion(): no_nifty_tool(cmd="seg_CalcTopNCC"), reason="niftyseg is not installed" ) def test_seg_calctopncc(): - """ Test interfaces for seg_CalctoNCC""" + """Test interfaces for seg_CalctoNCC""" # Create a node object calctopncc = CalcTopNCC() diff --git a/nipype/interfaces/niftyseg/tests/test_stats.py b/nipype/interfaces/niftyseg/tests/test_stats.py index cfeefe404a..7f3824e01a 100644 --- a/nipype/interfaces/niftyseg/tests/test_stats.py +++ b/nipype/interfaces/niftyseg/tests/test_stats.py @@ -11,7 +11,7 @@ @pytest.mark.skipif(no_nifty_tool(cmd="seg_stats"), reason="niftyseg is not installed") def test_unary_stats(): - """ Test for the seg_stats interfaces """ + """Test for the seg_stats interfaces""" # Create a node object unarys = UnaryStats() @@ -35,7 +35,7 @@ def test_unary_stats(): @pytest.mark.skipif(no_nifty_tool(cmd="seg_stats"), reason="niftyseg is not installed") def test_binary_stats(): - """ Test for the seg_stats interfaces """ + """Test for the seg_stats interfaces""" # Create a node object binarys = BinaryStats() diff --git a/nipype/interfaces/nitime/analysis.py b/nipype/interfaces/nitime/analysis.py index ffaf5380ce..f6c7aa1f61 100644 --- a/nipype/interfaces/nitime/analysis.py +++ b/nipype/interfaces/nitime/analysis.py @@ -159,7 +159,7 @@ def _read_csv(self): return data, roi_names def _csv2ts(self): - """ Read data from the in_file and generate a nitime TimeSeries object""" + """Read data from the in_file and generate a nitime TimeSeries object""" from nitime.timeseries import TimeSeries data, roi_names = self._read_csv() diff --git a/nipype/interfaces/nitime/tests/test_nitime.py b/nipype/interfaces/nitime/tests/test_nitime.py index 0c72d63860..2f94ccd2d2 100644 --- a/nipype/interfaces/nitime/tests/test_nitime.py +++ b/nipype/interfaces/nitime/tests/test_nitime.py @@ -32,7 +32,7 @@ def test_read_csv(): @pytest.mark.skipif(no_nitime, reason="nitime is not installed") def test_coherence_analysis(tmpdir): - """Test that the coherence analyzer works """ + """Test that the coherence analyzer works""" import nitime.analysis as nta import nitime.timeseries as ts diff --git a/nipype/interfaces/spm/utils.py b/nipype/interfaces/spm/utils.py index 74355f6b3f..543a0d3024 100644 --- a/nipype/interfaces/spm/utils.py +++ b/nipype/interfaces/spm/utils.py @@ -90,12 +90,12 @@ class CalcCoregAffine(SPMCommand): output_spec = CalcCoregAffineOutputSpec def _make_inv_file(self): - """ makes filename to hold inverse transform if not specified""" + """makes filename to hold inverse transform if not specified""" invmat = fname_presuffix(self.inputs.mat, prefix="inverse_") return invmat def _make_mat_file(self): - """ makes name for matfile if doesn exist""" + """makes name for matfile if doesn exist""" pth, mv, _ = split_filename(self.inputs.moving) _, tgt, _ = split_filename(self.inputs.target) mat = os.path.join(pth, "%s_to_%s.mat" % (mv, tgt)) @@ -228,13 +228,13 @@ class ResliceOutputSpec(TraitedSpec): class Reslice(SPMCommand): - """ uses spm_reslice to resample in_file into space of space_defining""" + """uses spm_reslice to resample in_file into space of space_defining""" input_spec = ResliceInputSpec output_spec = ResliceOutputSpec def _make_matlab_command(self, _): - """ generates script""" + """generates script""" if not isdefined(self.inputs.out_file): self.inputs.out_file = fname_presuffix(self.inputs.in_file, prefix="r") script = """ diff --git a/nipype/interfaces/vtkbase.py b/nipype/interfaces/vtkbase.py index 875ccb61d5..cb4d74e62f 100644 --- a/nipype/interfaces/vtkbase.py +++ b/nipype/interfaces/vtkbase.py @@ -43,25 +43,25 @@ def vtk_version(): - """ Get VTK version """ + """Get VTK version""" global _vtk_version return _vtk_version def no_vtk(): - """ Checks if VTK is installed and the python wrapper is functional """ + """Checks if VTK is installed and the python wrapper is functional""" global _vtk_version return _vtk_version is None def no_tvtk(): - """ Checks if tvtk was found """ + """Checks if tvtk was found""" global _have_tvtk return not _have_tvtk def vtk_old(): - """ Checks if VTK uses the old-style pipeline (VTK<6.0) """ + """Checks if VTK uses the old-style pipeline (VTK<6.0)""" global _vtk_version if _vtk_version is None: raise RuntimeException("VTK is not correctly installed.") @@ -80,7 +80,7 @@ def configure_input_data(obj, data): def vtk_output(obj): - """ Configure the input data for vtk pipeline object obj.""" + """Configure the input data for vtk pipeline object obj.""" if vtk_old(): return obj.output return obj.get_output() diff --git a/nipype/pipeline/engine/tests/test_engine.py b/nipype/pipeline/engine/tests/test_engine.py index 3668d2d574..0f076af380 100644 --- a/nipype/pipeline/engine/tests/test_engine.py +++ b/nipype/pipeline/engine/tests/test_engine.py @@ -538,7 +538,7 @@ def test_deep_nested_write_graph_runs(tmpdir): "graph_type", ["orig", "flat", "exec", "hierarchical", "colored"] ) def test_write_graph_dotfile(tmpdir, graph_type, simple): - """ checking dot files for a workflow without iterables""" + """checking dot files for a workflow without iterables""" tmpdir.chdir() pipe = pe.Workflow(name="pipe") @@ -631,7 +631,7 @@ def test_write_graph_dotfile(tmpdir, graph_type, simple): "graph_type", ["orig", "flat", "exec", "hierarchical", "colored"] ) def test_write_graph_dotfile_iterables(tmpdir, graph_type, simple): - """ checking dot files for a workflow with iterables""" + """checking dot files for a workflow with iterables""" tmpdir.chdir() pipe = pe.Workflow(name="pipe") diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 12ebfef614..a7ba7f5f34 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -1113,7 +1113,7 @@ def make_field_func(*pair): # Non-IdentityInterface w/ iterables # - src_idJ\d+ : JoinNode(IdentityInterface) if itername.startswith(src_id): - suffix = itername[len(src_id):] + suffix = itername[len(src_id) :] if re.fullmatch(r"((\.[a-z](I\.[a-z])?|J)\d+)?", suffix): expansions[src_id].append(node) for in_id, in_nodes in list(expansions.items()): diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 7e1021b8ee..563ce6a840 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -173,8 +173,7 @@ def connect(self, *args, **kwargs): for edge in self._graph.in_edges(destnode): data = self._graph.get_edge_data(*edge) connected_ports[destnode].update( - destname - for _, destname in data["connect"] + destname for _, destname in data["connect"] ) for source, dest in connects: # Currently datasource/sink/grabber.io modules @@ -923,12 +922,8 @@ def _update_node_cache(self): self._nested_workflows_cache.add(node) def _has_node(self, wanted_node): - return ( - wanted_node in self._nodes_cache or - any( - wf._has_node(wanted_node) - for wf in self._nested_workflows_cache - ) + return wanted_node in self._nodes_cache or any( + wf._has_node(wanted_node) for wf in self._nested_workflows_cache ) def _create_flat_graph(self): diff --git a/nipype/pipeline/plugins/legacymultiproc.py b/nipype/pipeline/plugins/legacymultiproc.py index 528184472d..c51df4935b 100644 --- a/nipype/pipeline/plugins/legacymultiproc.py +++ b/nipype/pipeline/plugins/legacymultiproc.py @@ -28,7 +28,7 @@ except ImportError: def indent(text, prefix): - """ A textwrap.indent replacement for Python < 3.3 """ + """A textwrap.indent replacement for Python < 3.3""" if not prefix: return text splittext = text.splitlines(True) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index eac662533c..ca7820939d 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -28,7 +28,7 @@ except ImportError: def indent(text, prefix): - """ A textwrap.indent replacement for Python < 3.3 """ + """A textwrap.indent replacement for Python < 3.3""" if not prefix: return text splittext = text.splitlines(True) diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index b5994c0375..a13d596bf7 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -322,7 +322,7 @@ def draw_resource_bar( left, resource, ): - """""" + """ """ # Memory header result = "

%s

" % (left, resource) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index d8933e078d..f02efa163f 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -30,7 +30,7 @@ def _resolve_with_filenotfound(path, **kwargs): - """ Raise FileNotFoundError instead of OSError """ + """Raise FileNotFoundError instead of OSError""" try: return path.resolve(**kwargs) except OSError as e: @@ -210,7 +210,7 @@ def hash_infile(afile, chunk_len=8192, crypto=hashlib.md5, raise_notfound=False) def hash_timestamp(afile): - """ Computes md5 hash of the timestamp of a file """ + """Computes md5 hash of the timestamp of a file""" md5hex = None if op.isfile(afile): md5obj = md5() @@ -672,7 +672,7 @@ def loadpkl(infile): def crash2txt(filename, record): - """ Write out plain text crash file """ + """Write out plain text crash file""" with open(filename, "w") as fp: if "node" in record: node = record["node"] diff --git a/nipype/utils/matlabtools.py b/nipype/utils/matlabtools.py index 4a25b6f180..4919c39c2b 100644 --- a/nipype/utils/matlabtools.py +++ b/nipype/utils/matlabtools.py @@ -13,7 +13,7 @@ def fltcols(vals): - """ Trivial little function to make 1xN float vector """ + """Trivial little function to make 1xN float vector""" return np.atleast_2d(np.array(vals, dtype=float)) diff --git a/nipype/utils/misc.py b/nipype/utils/misc.py index 85743ba01d..3531610800 100644 --- a/nipype/utils/misc.py +++ b/nipype/utils/misc.py @@ -18,7 +18,7 @@ except ImportError: def textwrap_indent(text, prefix): - """ A textwrap.indent replacement for Python < 3.3 """ + """A textwrap.indent replacement for Python < 3.3""" if not prefix: return text splittext = text.splitlines(True) diff --git a/nipype/utils/onetime.py b/nipype/utils/onetime.py index e0bf9e7747..bb721dc7e8 100644 --- a/nipype/utils/onetime.py +++ b/nipype/utils/onetime.py @@ -37,7 +37,7 @@ def __init__(self, func): self.name = func.__name__ def __get__(self, obj, type=None): - """ Called on attribute access on the class or instance. """ + """Called on attribute access on the class or instance.""" if obj is None: # Being called on the class, return the original function. # This way, introspection works on the class. From ea117f2518f840a0be54bfb486ca12a51c8e3030 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 30 Apr 2021 16:50:53 -0400 Subject: [PATCH 0974/1665] STY: Ignore 12deb95 --- .git-blame-ignore-revs | 1 + 1 file changed, 1 insertion(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index d5b7c0107a..4c410e9b0d 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -1,4 +1,5 @@ # Commits with messages like "STY: black" or "run black" +12deb959cccc431fb8222cc5854f1c92a0080021 f64bf338f630a9ee5cbe7a3ec98c68292897e720 83358d7f17aac07cb90d0330f11ea2322e2974d8 faef7d0f93013a700c882f709e98fb3cd36ebb03 From 04718ac71436b6f283af7575dda0f6998b64f893 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 30 Apr 2021 17:19:55 -0400 Subject: [PATCH 0975/1665] STY: black --- nipype/interfaces/cat12/preprocess.py | 633 +++++++++++++++++--------- nipype/interfaces/cat12/surface.py | 172 ++++--- 2 files changed, 540 insertions(+), 265 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 2fab299f15..caf337a211 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -1,228 +1,411 @@ import os from pathlib import Path -from nipype.interfaces.base import InputMultiPath, TraitedSpec, traits, isdefined, File, Str +from nipype.interfaces.base import ( + InputMultiPath, + TraitedSpec, + traits, + isdefined, + File, + Str, +) from nipype.interfaces.spm import SPMCommand -from nipype.interfaces.spm.base import SPMCommandInputSpec, ImageFileSPM, scans_for_fnames, scans_for_fname +from nipype.interfaces.spm.base import ( + SPMCommandInputSpec, + ImageFileSPM, + scans_for_fnames, + scans_for_fname, +) from nipype.utils.filemanip import split_filename, fname_presuffix class CAT12SegmentInputSpec(SPMCommandInputSpec): - in_files = InputMultiPath(ImageFileSPM(exists=True), field="data", desc="file to segment", mandatory=True, - copyfile=False) - - _help_tpm = 'Tissue Probability Map. Select the tissue probability image that includes 6 tissue probability ' \ - 'classes for (1) grey matter, (2) white matter, (3) cerebrospinal fluid, (4) bone, (5) non-brain ' \ - 'soft tissue, and (6) the background. CAT uses the TPM only for the initial SPM segmentation.' - tpm = InputMultiPath(ImageFileSPM(exists=True), field="tpm", desc=_help_tpm, mandatory=False, - copyfile=False) - - n_jobs = traits.Int(1, usedefault=True, mandatory=True, field="nproc", desc="Number of threads") - - _help_affine_reg = 'Affine Regularization. The procedure is a local optimisation, so it needs reasonable initial ' \ - 'starting estimates. Images should be placed in approximate alignment using the Display ' \ - 'function of SPM before beginning. A Mutual Information affine registration with the tissue ' \ - 'probability maps (D''Agostino et al, 2004) is used to achieve approximate alignment.' - affine_regularization = Str(default_value="mni", - field="opts.affreg", usedefault=True, desc=_help_affine_reg) - - _help_bias_acc = "Strength of the SPM inhomogeneity (bias) correction that simultaneously controls the SPM " \ - "biasreg, biasfwhm, samp (resolution), and tol (iteration) parameter." - power_spm_inhomogeneity_correction = traits.Float(default_value=0.5, field='opts.biasacc', - usedefault=True, - desc=_help_bias_acc) + in_files = InputMultiPath( + ImageFileSPM(exists=True), + field="data", + desc="file to segment", + mandatory=True, + copyfile=False, + ) + + _help_tpm = ( + "Tissue Probability Map. Select the tissue probability image that includes 6 tissue probability " + "classes for (1) grey matter, (2) white matter, (3) cerebrospinal fluid, (4) bone, (5) non-brain " + "soft tissue, and (6) the background. CAT uses the TPM only for the initial SPM segmentation." + ) + tpm = InputMultiPath( + ImageFileSPM(exists=True), + field="tpm", + desc=_help_tpm, + mandatory=False, + copyfile=False, + ) + + n_jobs = traits.Int( + 1, usedefault=True, mandatory=True, field="nproc", desc="Number of threads" + ) + + _help_affine_reg = ( + "Affine Regularization. The procedure is a local optimisation, so it needs reasonable initial " + "starting estimates. Images should be placed in approximate alignment using the Display " + "function of SPM before beginning. A Mutual Information affine registration with the tissue " + "probability maps (D" + "Agostino et al, 2004) is used to achieve approximate alignment." + ) + affine_regularization = Str( + default_value="mni", field="opts.affreg", usedefault=True, desc=_help_affine_reg + ) + + _help_bias_acc = ( + "Strength of the SPM inhomogeneity (bias) correction that simultaneously controls the SPM " + "biasreg, biasfwhm, samp (resolution), and tol (iteration) parameter." + ) + power_spm_inhomogeneity_correction = traits.Float( + default_value=0.5, field="opts.biasacc", usedefault=True, desc=_help_bias_acc + ) # Extended options for CAT12 preprocessing - _help_app = 'Affine registration and SPM preprocessing can fail in some subjects with deviating anatomy (e.g. ' \ - 'other species/neonates) or in images with strong signal inhomogeneities, or untypical intensities ' \ - '(e.g. synthetic images). An initial bias correction can help to reduce such problems (see details ' \ - 'below). Recommended are the "default" and "full" option.' - affine_preprocessing = traits.Int(1070, field="extopts.APP", desc=_help_app, usedefault=True) - - _help_initial_seg = 'In rare cases the Unified Segmentation can fail in highly abnormal brains, where e.g. the ' \ - 'cerebrospinal fluid of superlarge ventricles (hydrocephalus) were classified as white ' \ - 'matter. However, if the affine registration is correct, the AMAP segmentation with an ' \ - 'prior-independent k-means initialization can be used to replace the SPM brain tissue ' \ - 'classification. Moreover, if the default Dartel and Shooting registrations will fail then ' \ - 'rhe "Optimized Shooting - superlarge ventricles" option for "Spatial registration" is ! ' \ - 'required Values: \nnone: 0;\nlight: 1;\nfull: 2;\ndefault: 1070.' - initial_segmentation = traits.Int(0, field="extopts.spm_kamap", desc=_help_initial_seg, usedefault=True) - - _help_las = 'Additionally to WM-inhomogeneities, GM intensity can vary across different regions such as the motor' \ - ' cortex, the basal ganglia, or the occipital lobe. These changes have an anatomical background ' \ - '(e.g. iron content, myelinization), but are dependent on the MR-protocol and often lead to ' \ - 'underestimation of GM at higher intensities and overestimation of CSF at lower intensities. ' \ - 'Therefore, a local intensity transformation of all tissue classes is used to reduce these effects in' \ - ' the image. This local adaptive segmentation (LAS) is applied before the final AMAP segmentation.' \ - 'Possible Values: \nSPM Unified Segmentation: 0 \nk-means AMAP: 2' - local_adaptive_seg = traits.Float(0.5, field="extopts.LASstr", usedefault=True, desc=_help_las) - - _help_gcutstr = 'Method of initial skull-stripping before AMAP segmentation. The SPM approach works quite stable ' \ - 'for the majority of data. However, in some rare cases parts of GM (i.e. in frontal lobe) might ' \ - 'be cut. If this happens the GCUT approach is a good alternative. GCUT is a graph-cut/region-' \ - 'growing approach starting from the WM area. APRG (adaptive probability region-growing) is a new' \ - ' method that refines the probability maps of the SPM approach by region-growing techniques of ' \ - 'the gcut approach with a final surface-based optimization strategy. This is currently the method' \ - ' with the most accurate and reliable results. If you use already skull-stripped data you can ' \ - 'turn off skull-stripping although this is automaticaly detected in most cases. Please note that ' \ - 'the choice of the skull-stripping method will also influence the estimation of TIV, because the' \ - ' methods mainly differ in the handling of the outer CSF around the cortical surface. ' \ - '\nPossible Values:\n - none (already skull-stripped): -1;\n - SPM approach: 0; ' \ - '\n - GCUT approach: 0.50; \n - APRG approach: 2' - skull_strip = traits.Float(2, field="extopts.gcutstr", desc=_help_gcutstr, usedefault=True) - - _help_wmhc = 'WARNING: Please note that the detection of WM hyperintensies is still under development and does ' \ - 'not have the same accuracy as approaches that additionally consider FLAIR images (e.g. Lesion ' \ - 'Segmentation Toolbox)! In aging or (neurodegenerative) diseases WM intensity can be reduced ' \ - 'locally in T1 or increased in T2/PD images. These so-called WM hyperintensies (WMHs) can lead to ' \ - 'preprocessing errors. Large GM areas next to the ventricle can cause normalization problems. ' \ - 'Therefore, a temporary correction for normalization is useful if WMHs are expected. CAT allows ' \ - 'different ways to handle WMHs: ' \ - '\n0) No Correction (handled as GM). \n1) Temporary (internal) correction as WM for spatial ' \ - 'normalization and estimation of cortical thickness. \n2) Permanent correction to WM. ' - wm_hyper_intensity_correction = traits.Int(1, field="extopts.WMHC", desc=_help_wmhc, usedefault=True) - - _help_vox = 'The (isotropic) voxel sizes of any spatially normalised written images. A non-finite value will be ' \ - 'replaced by the average voxel size of the tissue probability maps used by the segmentation.' + _help_app = ( + "Affine registration and SPM preprocessing can fail in some subjects with deviating anatomy (e.g. " + "other species/neonates) or in images with strong signal inhomogeneities, or untypical intensities " + "(e.g. synthetic images). An initial bias correction can help to reduce such problems (see details " + 'below). Recommended are the "default" and "full" option.' + ) + affine_preprocessing = traits.Int( + 1070, field="extopts.APP", desc=_help_app, usedefault=True + ) + + _help_initial_seg = ( + "In rare cases the Unified Segmentation can fail in highly abnormal brains, where e.g. the " + "cerebrospinal fluid of superlarge ventricles (hydrocephalus) were classified as white " + "matter. However, if the affine registration is correct, the AMAP segmentation with an " + "prior-independent k-means initialization can be used to replace the SPM brain tissue " + "classification. Moreover, if the default Dartel and Shooting registrations will fail then " + 'rhe "Optimized Shooting - superlarge ventricles" option for "Spatial registration" is ! ' + "required Values: \nnone: 0;\nlight: 1;\nfull: 2;\ndefault: 1070." + ) + initial_segmentation = traits.Int( + 0, field="extopts.spm_kamap", desc=_help_initial_seg, usedefault=True + ) + + _help_las = ( + "Additionally to WM-inhomogeneities, GM intensity can vary across different regions such as the motor" + " cortex, the basal ganglia, or the occipital lobe. These changes have an anatomical background " + "(e.g. iron content, myelinization), but are dependent on the MR-protocol and often lead to " + "underestimation of GM at higher intensities and overestimation of CSF at lower intensities. " + "Therefore, a local intensity transformation of all tissue classes is used to reduce these effects in" + " the image. This local adaptive segmentation (LAS) is applied before the final AMAP segmentation." + "Possible Values: \nSPM Unified Segmentation: 0 \nk-means AMAP: 2" + ) + local_adaptive_seg = traits.Float( + 0.5, field="extopts.LASstr", usedefault=True, desc=_help_las + ) + + _help_gcutstr = ( + "Method of initial skull-stripping before AMAP segmentation. The SPM approach works quite stable " + "for the majority of data. However, in some rare cases parts of GM (i.e. in frontal lobe) might " + "be cut. If this happens the GCUT approach is a good alternative. GCUT is a graph-cut/region-" + "growing approach starting from the WM area. APRG (adaptive probability region-growing) is a new" + " method that refines the probability maps of the SPM approach by region-growing techniques of " + "the gcut approach with a final surface-based optimization strategy. This is currently the method" + " with the most accurate and reliable results. If you use already skull-stripped data you can " + "turn off skull-stripping although this is automaticaly detected in most cases. Please note that " + "the choice of the skull-stripping method will also influence the estimation of TIV, because the" + " methods mainly differ in the handling of the outer CSF around the cortical surface. " + "\nPossible Values:\n - none (already skull-stripped): -1;\n - SPM approach: 0; " + "\n - GCUT approach: 0.50; \n - APRG approach: 2" + ) + skull_strip = traits.Float( + 2, field="extopts.gcutstr", desc=_help_gcutstr, usedefault=True + ) + + _help_wmhc = ( + "WARNING: Please note that the detection of WM hyperintensies is still under development and does " + "not have the same accuracy as approaches that additionally consider FLAIR images (e.g. Lesion " + "Segmentation Toolbox)! In aging or (neurodegenerative) diseases WM intensity can be reduced " + "locally in T1 or increased in T2/PD images. These so-called WM hyperintensies (WMHs) can lead to " + "preprocessing errors. Large GM areas next to the ventricle can cause normalization problems. " + "Therefore, a temporary correction for normalization is useful if WMHs are expected. CAT allows " + "different ways to handle WMHs: " + "\n0) No Correction (handled as GM). \n1) Temporary (internal) correction as WM for spatial " + "normalization and estimation of cortical thickness. \n2) Permanent correction to WM. " + ) + wm_hyper_intensity_correction = traits.Int( + 1, field="extopts.WMHC", desc=_help_wmhc, usedefault=True + ) + + _help_vox = ( + "The (isotropic) voxel sizes of any spatially normalised written images. A non-finite value will be " + "replaced by the average voxel size of the tissue probability maps used by the segmentation." + ) voxel_size = traits.Float(1.5, field="extopts.vox", desc=_help_vox, usedefault=True) - _help_resampling = 'Internal resampling for preprocessing.\n The default fixed image resolution offers a good ' \ - 'trade-off between optimal quality and preprocessing time and memory demands. Standard ' \ - 'structural data with a voxel resolution around 1mm or even data with high in-plane resolution' \ - ' and large slice thickness (e.g. 0.5x0.5x1.5 mm) will benefit from this setting. If you have' \ - ' higher native resolutions the highres option "Fixed 0.8 mm" will sometimes offer slightly' \ - ' better preprocessing quality with an increase of preprocessing time and memory demands. In' \ - ' case of even higher resolutions and high signal-to-noise ratio (e.g. for 7 T data) the ' \ - '"Best native" option will process the data on the highest native resolution. A resolution' \ - ' of 0.4x0.7x1.0 mm will be interpolated to 0.4x0.4x0.4 mm. A tolerance range of 0.1 mm is ' \ - 'used to avoid interpolation artifacts, i.e. a resolution of 0.95x1.01x1.08 mm will not be ' \ - 'interpolated in case of the "Fixed 1.0 mm"! This "optimal" option prefers an isotropic voxel ' \ - 'size with at least 1.1 mm that is controlled by the median voxel size and a volume term that ' \ - 'penalizes highly anisotropic voxels.' \ - 'Values:\nOptimal: [1.0 0.1]\nFixed 1.0 mm: [1.0 0.1];\nFixed 0.8 mm:[0.8 0.1]' \ - '\nBest native: [0.5 0.1]' - internal_resampling_process = traits.Tuple(traits.Float(1), traits.Float(0.1), minlen=2, maxlen=2, usedefault=True, - field="extopts.restypes.optimal", desc="help_resampling") - _errors_help = 'Error handling.\nTry to catch preprocessing errors and continue with the next data set or ignore ' \ - 'all warnings (e.g., bad intensities) and use an experimental pipeline which is still in ' \ - 'development. In case of errors, CAT continues with the next subject if this option is enabled. If ' \ - 'the experimental option with backup functions is selected and warnings occur, CAT will try to use' \ - ' backup routines and skip some processing steps which require good T1 contrasts (e.g., LAS). If ' \ - 'you want to avoid processing of critical data and ensure that only the main pipeline is used then' \ - ' select the option "Ignore errors (continue with the next subject)". It is strongly recommended ' \ - 'to check for preprocessing problems, especially with non-T1 contrasts. ' \ - '\nValues:\nnone: 0,\ndefault: 1,\ndetails: 2.' - ignore_errors = traits.Int(1, field="extopts.ignoreErrors", desc=_errors_help, usedefault=True) + _help_resampling = ( + "Internal resampling for preprocessing.\n The default fixed image resolution offers a good " + "trade-off between optimal quality and preprocessing time and memory demands. Standard " + "structural data with a voxel resolution around 1mm or even data with high in-plane resolution" + " and large slice thickness (e.g. 0.5x0.5x1.5 mm) will benefit from this setting. If you have" + ' higher native resolutions the highres option "Fixed 0.8 mm" will sometimes offer slightly' + " better preprocessing quality with an increase of preprocessing time and memory demands. In" + " case of even higher resolutions and high signal-to-noise ratio (e.g. for 7 T data) the " + '"Best native" option will process the data on the highest native resolution. A resolution' + " of 0.4x0.7x1.0 mm will be interpolated to 0.4x0.4x0.4 mm. A tolerance range of 0.1 mm is " + "used to avoid interpolation artifacts, i.e. a resolution of 0.95x1.01x1.08 mm will not be " + 'interpolated in case of the "Fixed 1.0 mm"! This "optimal" option prefers an isotropic voxel ' + "size with at least 1.1 mm that is controlled by the median voxel size and a volume term that " + "penalizes highly anisotropic voxels." + "Values:\nOptimal: [1.0 0.1]\nFixed 1.0 mm: [1.0 0.1];\nFixed 0.8 mm:[0.8 0.1]" + "\nBest native: [0.5 0.1]" + ) + internal_resampling_process = traits.Tuple( + traits.Float(1), + traits.Float(0.1), + minlen=2, + maxlen=2, + usedefault=True, + field="extopts.restypes.optimal", + desc="help_resampling", + ) + _errors_help = ( + "Error handling.\nTry to catch preprocessing errors and continue with the next data set or ignore " + "all warnings (e.g., bad intensities) and use an experimental pipeline which is still in " + "development. In case of errors, CAT continues with the next subject if this option is enabled. If " + "the experimental option with backup functions is selected and warnings occur, CAT will try to use" + " backup routines and skip some processing steps which require good T1 contrasts (e.g., LAS). If " + "you want to avoid processing of critical data and ensure that only the main pipeline is used then" + ' select the option "Ignore errors (continue with the next subject)". It is strongly recommended ' + "to check for preprocessing problems, especially with non-T1 contrasts. " + "\nValues:\nnone: 0,\ndefault: 1,\ndetails: 2." + ) + ignore_errors = traits.Int( + 1, field="extopts.ignoreErrors", desc=_errors_help, usedefault=True + ) # Writing options - _help_surf = 'Surface and thickness estimation. \nUse projection-based thickness (PBT) (Dahnke et al. 2012) to' \ - ' estimate cortical thickness and to create the central cortical surface for left and right ' \ - 'hemisphere. Surface reconstruction includes topology correction (Yotter et al. 2011), spherical ' \ - 'inflation (Yotter et al.) and spherical registration. Additionally you can also estimate surface ' \ - 'parameters such as gyrification, cortical complexity or sulcal depth that can be subsequently ' \ - 'analyzed at each vertex of the surface. Please note, that surface reconstruction and spherical ' \ - 'registration additionally requires about 20-60 min of computation time. A fast (1-3 min) surface ' \ - 'pipeline is available for visual preview (e.g., to check preprocessing quality) in the ' \ - 'cross-sectional, but not in the longitudinal pipeline. Only the initial surfaces are created with ' \ - 'a lower resolution and without topology correction, spherical mapping and surface registration. ' \ - 'Please note that the files with the estimated surface thickness can therefore not be used for ' \ - 'further analysis! For distinction, these files contain "preview" in their filename and they' \ - ' are not available as batch dependencies objects. ' - surface_and_thickness_estimation = traits.Int(1, field="surface", desc=_help_surf, usedefault=True) - surface_measures = traits.Int(1, field="output.surf_measures", usedefault=True, desc="Extract surface measures") + _help_surf = ( + "Surface and thickness estimation. \nUse projection-based thickness (PBT) (Dahnke et al. 2012) to" + " estimate cortical thickness and to create the central cortical surface for left and right " + "hemisphere. Surface reconstruction includes topology correction (Yotter et al. 2011), spherical " + "inflation (Yotter et al.) and spherical registration. Additionally you can also estimate surface " + "parameters such as gyrification, cortical complexity or sulcal depth that can be subsequently " + "analyzed at each vertex of the surface. Please note, that surface reconstruction and spherical " + "registration additionally requires about 20-60 min of computation time. A fast (1-3 min) surface " + "pipeline is available for visual preview (e.g., to check preprocessing quality) in the " + "cross-sectional, but not in the longitudinal pipeline. Only the initial surfaces are created with " + "a lower resolution and without topology correction, spherical mapping and surface registration. " + "Please note that the files with the estimated surface thickness can therefore not be used for " + 'further analysis! For distinction, these files contain "preview" in their filename and they' + " are not available as batch dependencies objects. " + ) + surface_and_thickness_estimation = traits.Int( + 1, field="surface", desc=_help_surf, usedefault=True + ) + surface_measures = traits.Int( + 1, + field="output.surf_measures", + usedefault=True, + desc="Extract surface measures", + ) # Templates - neuromorphometrics = traits.Bool(True, field="output.ROImenu.atlases.neuromorphometrics", - usedefault=True, desc="Extract brain measures for Neuromorphometrics template") - lpba40 = traits.Bool(True, field="output.ROImenu.atlases.lpba40", usedefault=True, - desc="Extract brain measures for LPBA40 template") - cobra = traits.Bool(True, field="output.ROImenu.atlases.hammers", usedefault=True, - desc="Extract brain measures for COBRA template") - hammers = traits.Bool(True, field="output.ROImenu.atlases.cobra", usedefault=True, - desc="Extract brain measures for Hammers template") - own_atlas = InputMultiPath(ImageFileSPM(exists=True), field="output.ROImenu.atlases.ownatlas", - desc="Extract brain measures for a given template", mandatory=False, copyfile=False) - - _dartel_help = 'This option is to export data into a form that can be used with DARTEL. The SPM default is to ' \ - 'only apply rigid body transformation. However, a more appropriate option is to apply affine ' \ - 'transformation, because the additional scaling of the images requires less deformations to ' \ - 'non-linearly register brains to the template.' + neuromorphometrics = traits.Bool( + True, + field="output.ROImenu.atlases.neuromorphometrics", + usedefault=True, + desc="Extract brain measures for Neuromorphometrics template", + ) + lpba40 = traits.Bool( + True, + field="output.ROImenu.atlases.lpba40", + usedefault=True, + desc="Extract brain measures for LPBA40 template", + ) + cobra = traits.Bool( + True, + field="output.ROImenu.atlases.hammers", + usedefault=True, + desc="Extract brain measures for COBRA template", + ) + hammers = traits.Bool( + True, + field="output.ROImenu.atlases.cobra", + usedefault=True, + desc="Extract brain measures for Hammers template", + ) + own_atlas = InputMultiPath( + ImageFileSPM(exists=True), + field="output.ROImenu.atlases.ownatlas", + desc="Extract brain measures for a given template", + mandatory=False, + copyfile=False, + ) + + _dartel_help = ( + "This option is to export data into a form that can be used with DARTEL. The SPM default is to " + "only apply rigid body transformation. However, a more appropriate option is to apply affine " + "transformation, because the additional scaling of the images requires less deformations to " + "non-linearly register brains to the template." + ) # Grey matter - gm_output_native = traits.Bool(False, field="output.GM.native", usedefault=True, - desc='Save modulated grey matter images.') - gm_output_modulated = traits.Bool(True, field="output.GM.mod", usedefault=True, - desc='Save native grey matter images.') - gm_output_dartel = traits.Bool(False, field="output.GM.dartel", usedefault=True, - desc='Save dartel grey matter images.') + gm_output_native = traits.Bool( + False, + field="output.GM.native", + usedefault=True, + desc="Save modulated grey matter images.", + ) + gm_output_modulated = traits.Bool( + True, + field="output.GM.mod", + usedefault=True, + desc="Save native grey matter images.", + ) + gm_output_dartel = traits.Bool( + False, + field="output.GM.dartel", + usedefault=True, + desc="Save dartel grey matter images.", + ) # White matter - _wm_desc = 'Options to save white matter images.' - wm_output_native = traits.Bool(False, field="output.WM.native", usedefault=True, - desc='Save dartel white matter images.') - wm_output_modulated = traits.Bool(True, field="output.WM.mod", usedefault=True, - desc='Save dartel white matter images.') - wm_output_dartel = traits.Bool(False, field="output.WM.dartel", usedefault=True, - desc='Save dartel white matter images.') + _wm_desc = "Options to save white matter images." + wm_output_native = traits.Bool( + False, + field="output.WM.native", + usedefault=True, + desc="Save dartel white matter images.", + ) + wm_output_modulated = traits.Bool( + True, + field="output.WM.mod", + usedefault=True, + desc="Save dartel white matter images.", + ) + wm_output_dartel = traits.Bool( + False, + field="output.WM.dartel", + usedefault=True, + desc="Save dartel white matter images.", + ) # CSF matter - _csf_desc = 'Options to save CSF images.' - csf_output_native = traits.Bool(False, field="output.CSF.native", usedefault=True, - desc='Save dartel CSF images.') - csf_output_modulated = traits.Bool(True, field="output.CSF.mod", usedefault=True, - desc='Save dartel CSF images.') - csf_output_dartel = traits.Bool(False, field="output.CSF.dartel", usedefault=True, - desc='Save dartel CSF images.') + _csf_desc = "Options to save CSF images." + csf_output_native = traits.Bool( + False, + field="output.CSF.native", + usedefault=True, + desc="Save dartel CSF images.", + ) + csf_output_modulated = traits.Bool( + True, field="output.CSF.mod", usedefault=True, desc="Save dartel CSF images." + ) + csf_output_dartel = traits.Bool( + False, + field="output.CSF.dartel", + usedefault=True, + desc="Save dartel CSF images.", + ) # Labels - _help_label_desc = 'This is the option to save a labeled version of your segmentations in the %s space for fast visual ' \ - 'comparision. Labels are saved as Partial Volume Estimation (PVE) values with different mix ' \ - 'classes for GM-WM (2.5) and GM-CSF (1.5). BG=0, CSF=1, GM=2, WM=3, WMH=4 (if WMHC=3), ' \ - 'SL=1.5 (if SLC)' - label_native = traits.Bool(False, field="output.label.native", usedefault=True, desc=_help_label_desc % "native") - label_warped = traits.Bool(True, field="output.label.warped", usedefault=True, desc=_help_label_desc % "warped") - label_dartel = traits.Bool(False, field="output.label.dartel", usedefault=True, desc=_help_label_desc % "dartel") - output_labelnative = traits.Bool(False, field="output.labelnative", usedefault=True, desc=_help_label_desc % "native") + _help_label_desc = ( + "This is the option to save a labeled version of your segmentations in the %s space for fast visual " + "comparision. Labels are saved as Partial Volume Estimation (PVE) values with different mix " + "classes for GM-WM (2.5) and GM-CSF (1.5). BG=0, CSF=1, GM=2, WM=3, WMH=4 (if WMHC=3), " + "SL=1.5 (if SLC)" + ) + label_native = traits.Bool( + False, + field="output.label.native", + usedefault=True, + desc=_help_label_desc % "native", + ) + label_warped = traits.Bool( + True, + field="output.label.warped", + usedefault=True, + desc=_help_label_desc % "warped", + ) + label_dartel = traits.Bool( + False, + field="output.label.dartel", + usedefault=True, + desc=_help_label_desc % "dartel", + ) + output_labelnative = traits.Bool( + False, + field="output.labelnative", + usedefault=True, + desc=_help_label_desc % "native", + ) # Bias - save_bias_corrected = traits.Bool(True, field="output.bias.warped", usedefault=True, desc="Save bias corrected image") + save_bias_corrected = traits.Bool( + True, + field="output.bias.warped", + usedefault=True, + desc="Save bias corrected image", + ) # las - _las_desc = 'This is the option to save a bias, noise, and local intensity corrected version of the original T1' \ - ' image in the %s space. MR images are usually corrupted by a smooth, spatially varying artifact that modulates the' \ - ' intensity of the image (bias). These artifacts, although not usually a problem for visual ' \ - 'inspection, can impede automated processing of the images. The bias corrected version should have ' \ - 'more uniform intensities within the different types of tissues and can be saved in native space ' \ - 'and/or normalised. Noise is corrected by an adaptive non-local mean (NLM) filter (Manjon 2008, ' \ - 'Medical Image Analysis 12).' - las_native = traits.Bool(False, field="output.las.native", usedefault=True, desc=_las_desc % "native") - las_warped = traits.Bool(True, field="output.las.warped", usedefault=True, desc=_las_desc % "warped") - las_dartel = traits.Bool(False, field="output.las.dartel", usedefault=True, desc=_las_desc % "dartel") + _las_desc = ( + "This is the option to save a bias, noise, and local intensity corrected version of the original T1" + " image in the %s space. MR images are usually corrupted by a smooth, spatially varying artifact that modulates the" + " intensity of the image (bias). These artifacts, although not usually a problem for visual " + "inspection, can impede automated processing of the images. The bias corrected version should have " + "more uniform intensities within the different types of tissues and can be saved in native space " + "and/or normalised. Noise is corrected by an adaptive non-local mean (NLM) filter (Manjon 2008, " + "Medical Image Analysis 12)." + ) + las_native = traits.Bool( + False, field="output.las.native", usedefault=True, desc=_las_desc % "native" + ) + las_warped = traits.Bool( + True, field="output.las.warped", usedefault=True, desc=_las_desc % "warped" + ) + las_dartel = traits.Bool( + False, field="output.las.dartel", usedefault=True, desc=_las_desc % "dartel" + ) # Jacobian Warped - _help_jacobian = 'This is the option to save the Jacobian determinant, which expresses local volume changes. This' \ - ' image can be used in a pure deformation based morphometry (DBM) design. Please note that the' \ - ' affine part of the deformation field is ignored. Thus, there is no need for any additional' \ - ' correction for different brain sizes using ICV.' - jacobianwarped = traits.Bool(True, field="output.jacobianwarped", usedefault=True, desc=_help_jacobian) + _help_jacobian = ( + "This is the option to save the Jacobian determinant, which expresses local volume changes. This" + " image can be used in a pure deformation based morphometry (DBM) design. Please note that the" + " affine part of the deformation field is ignored. Thus, there is no need for any additional" + " correction for different brain sizes using ICV." + ) + jacobianwarped = traits.Bool( + True, field="output.jacobianwarped", usedefault=True, desc=_help_jacobian + ) # Deformation Fields - _help_warp = 'Deformation fields can be saved to disk, and used by the Deformations Utility and/or applied to ' \ - 'coregistered data from other modalities (e.g. fMRI). For spatially normalising images to MNI space,' \ - ' you will need the forward deformation, whereas for spatially normalising (eg) GIFTI surface files,' \ - ' you''ll need the inverse. It is also possible to transform data in MNI space on to the individual' \ - ' subject, which also requires the inverse transform. Deformations are saved as .nii files, which' \ - ' contain three volumes to encode the x, y and z coordinates.' \ - '\nValues: No:[0 0];\nImage->Template (forward): [1 0];\nTemplate->Image (inverse): [0 1]; ' \ - '\ninverse + forward: [1 1]' - warps = traits.Tuple(traits.Int(1), traits.Int(0), minlen=2, maxlen=2, - field="output.warps", usedefault=True, desc=_help_warp) + _help_warp = ( + "Deformation fields can be saved to disk, and used by the Deformations Utility and/or applied to " + "coregistered data from other modalities (e.g. fMRI). For spatially normalising images to MNI space," + " you will need the forward deformation, whereas for spatially normalising (eg) GIFTI surface files," + " you" + "ll need the inverse. It is also possible to transform data in MNI space on to the individual" + " subject, which also requires the inverse transform. Deformations are saved as .nii files, which" + " contain three volumes to encode the x, y and z coordinates." + "\nValues: No:[0 0];\nImage->Template (forward): [1 0];\nTemplate->Image (inverse): [0 1]; " + "\ninverse + forward: [1 1]" + ) + warps = traits.Tuple( + traits.Int(1), + traits.Int(0), + minlen=2, + maxlen=2, + field="output.warps", + usedefault=True, + desc=_help_warp, + ) class CAT12SegmentOutputSpec(TraitedSpec): ########################################## # Label XML files ########################################## - label_files = traits.List(File(exists=True), desc="Files with the measures extracted for OI ands ROIs") + label_files = traits.List( + File(exists=True), desc="Files with the measures extracted for OI ands ROIs" + ) label_rois = File(exists=True, desc="Files with thickness values of ROIs.") label_roi = File(exists=True, desc="Files with thickness values of ROI.") @@ -311,8 +494,7 @@ def __init__(self, **inputs): SPMCommand.__init__(self, **inputs) def _format_arg(self, opt, spec, val): - """Convert input to appropriate format for spm - """ + """Convert input to appropriate format for spm""" if opt in ["in_files"]: if isinstance(val, list): return scans_for_fnames(val) @@ -325,41 +507,64 @@ def _list_outputs(self): f = self.inputs.in_files[0] pth, base, ext = split_filename(f) - outputs["mri_images"] = [str(mri) for mri in Path(pth).glob("mri/*") if mri.is_file()] + outputs["mri_images"] = [ + str(mri) for mri in Path(pth).glob("mri/*") if mri.is_file() + ] for tidx, tissue in enumerate(["gm", "wm", "csf"]): - for idx, (image, prefix) in enumerate([("modulated", "mw"), ("dartel", "r"), ("native", "")]): - outtype = f'{tissue}_output_{image}' - if isdefined(getattr(self.inputs, outtype)) and getattr(self.inputs, outtype): - outfield = f'{tissue}_{image}_image' - prefix = os.path.join("mri", f'{prefix}p{tidx + 1}') + for idx, (image, prefix) in enumerate( + [("modulated", "mw"), ("dartel", "r"), ("native", "")] + ): + outtype = f"{tissue}_output_{image}" + if isdefined(getattr(self.inputs, outtype)) and getattr( + self.inputs, outtype + ): + outfield = f"{tissue}_{image}_image" + prefix = os.path.join("mri", f"{prefix}p{tidx + 1}") if image != "dartel": outputs[outfield] = fname_presuffix(f, prefix=prefix) else: - outputs[outfield] = fname_presuffix(f, prefix=prefix, suffix="_rigid") + outputs[outfield] = fname_presuffix( + f, prefix=prefix, suffix="_rigid" + ) if self.inputs.save_bias_corrected: - outputs["bias_corrected_image"] = fname_presuffix(f, prefix=os.path.join("mri", 'wmi')) + outputs["bias_corrected_image"] = fname_presuffix( + f, prefix=os.path.join("mri", "wmi") + ) - outputs["surface_files"] = [str(surf) for surf in Path(pth).glob("surf/*") if surf.is_file()] + outputs["surface_files"] = [ + str(surf) for surf in Path(pth).glob("surf/*") if surf.is_file() + ] for hemisphere in ["rh", "lh"]: for suffix in ["central", "sphere"]: - outfield = f'{hemisphere}_{suffix}_surface' - outputs[outfield] = fname_presuffix(f, prefix=os.path.join("surf", f'{hemisphere}.{suffix}.'), - suffix=".gii", use_ext=False) - - outputs["report_files"] = outputs["report_files"] = [str(report) for report in Path(pth).glob("report/*") - if report.is_file()] - - outputs[f'report'] = fname_presuffix(f, prefix=os.path.join("report", f'cat_'), suffix=".xml", use_ext=False) - - outputs["label_files"] = [str(label) for label in Path(pth).glob("label/*") if label.is_file()] - - outputs['label_rois'] = fname_presuffix(f, prefix=os.path.join("label", f'catROIs_'), suffix=".xml", - use_ext=False) - outputs['label_roi'] = fname_presuffix(f, prefix=os.path.join("label", f'catROI_'), suffix=".xml", - use_ext=False) + outfield = f"{hemisphere}_{suffix}_surface" + outputs[outfield] = fname_presuffix( + f, + prefix=os.path.join("surf", f"{hemisphere}.{suffix}."), + suffix=".gii", + use_ext=False, + ) + + outputs["report_files"] = outputs["report_files"] = [ + str(report) for report in Path(pth).glob("report/*") if report.is_file() + ] + + outputs[f"report"] = fname_presuffix( + f, prefix=os.path.join("report", f"cat_"), suffix=".xml", use_ext=False + ) + + outputs["label_files"] = [ + str(label) for label in Path(pth).glob("label/*") if label.is_file() + ] + + outputs["label_rois"] = fname_presuffix( + f, prefix=os.path.join("label", f"catROIs_"), suffix=".xml", use_ext=False + ) + outputs["label_roi"] = fname_presuffix( + f, prefix=os.path.join("label", f"catROI_"), suffix=".xml", use_ext=False + ) return outputs diff --git a/nipype/interfaces/cat12/surface.py b/nipype/interfaces/cat12/surface.py index dedaa53b7c..39925e6f29 100644 --- a/nipype/interfaces/cat12/surface.py +++ b/nipype/interfaces/cat12/surface.py @@ -10,33 +10,63 @@ class ExtractAdditionalSurfaceParametersInputSpec(SPMCommandInputSpec): - left_central_surfaces = InputMultiPath(File(exists=True), field="data_surf", - desc="Left and central surfaces files", mandatory=True, copyfile=False) - surface_files = InputMultiPath(File(exists=True), - desc="All surface files", mandatory=False, copyfile=False) - - gyrification = traits.Bool(True, field="GI", usedefault=True, - desc="Extract gyrification index (GI) based on absolute mean curvature. The" - " method is described in Luders et al. Neuroimage, 29:1224-1230, 2006") + left_central_surfaces = InputMultiPath( + File(exists=True), + field="data_surf", + desc="Left and central surfaces files", + mandatory=True, + copyfile=False, + ) + surface_files = InputMultiPath( + File(exists=True), desc="All surface files", mandatory=False, copyfile=False + ) + + gyrification = traits.Bool( + True, + field="GI", + usedefault=True, + desc="Extract gyrification index (GI) based on absolute mean curvature. The" + " method is described in Luders et al. Neuroimage, 29:1224-1230, 2006", + ) gmv = traits.Bool(True, field="gmv", usedefault=True, desc="Extract volume") area = traits.Bool(True, field="area", usedefault=True, desc="Extract area surface") - depth = traits.Bool(False, field="SD", usedefault=True, - desc="Extract sulcus depth based on euclidian distance between the central " - "surface anf its convex hull.") - fractal_dimension = traits.Bool(False, field="FD", usedefault=True, - desc="Extract cortical complexity (fractal dimension) which is " - "described in Yotter ar al. Neuroimage, 56(3): 961-973, 2011") + depth = traits.Bool( + False, + field="SD", + usedefault=True, + desc="Extract sulcus depth based on euclidian distance between the central " + "surface anf its convex hull.", + ) + fractal_dimension = traits.Bool( + False, + field="FD", + usedefault=True, + desc="Extract cortical complexity (fractal dimension) which is " + "described in Yotter ar al. Neuroimage, 56(3): 961-973, 2011", + ) class ExtractAdditionalSurfaceParametersOutputSpec(TraitedSpec): - lh_extracted_files = traits.List(File(exists=True), desc="Files of left Hemisphere extracted measures") - rh_extracted_files = traits.List(File(exists=True), desc="Files of right Hemisphere extracted measures") - - lh_gyrification = traits.List(File(exists=True), desc="Gyrification of left Hemisphere") - rh_gyrification = traits.List(File(exists=True), desc="Gyrification of right Hemisphere") - - lh_gmv = traits.List(File(exists=True), desc="Grey matter volume of left Hemisphere") - rh_gmv = traits.List(File(exists=True), desc="Grey matter volume of right Hemisphere") + lh_extracted_files = traits.List( + File(exists=True), desc="Files of left Hemisphere extracted measures" + ) + rh_extracted_files = traits.List( + File(exists=True), desc="Files of right Hemisphere extracted measures" + ) + + lh_gyrification = traits.List( + File(exists=True), desc="Gyrification of left Hemisphere" + ) + rh_gyrification = traits.List( + File(exists=True), desc="Gyrification of right Hemisphere" + ) + + lh_gmv = traits.List( + File(exists=True), desc="Grey matter volume of left Hemisphere" + ) + rh_gmv = traits.List( + File(exists=True), desc="Grey matter volume of right Hemisphere" + ) lh_area = traits.List(File(exists=True), desc="Area of left Hemisphere") rh_area = traits.List(File(exists=True), desc="Area of right Hemisphere") @@ -44,8 +74,12 @@ class ExtractAdditionalSurfaceParametersOutputSpec(TraitedSpec): lh_depth = traits.List(File(exists=True), desc="Depth of left Hemisphere") rh_depth = traits.List(File(exists=True), desc="Depth of right Hemisphere") - lh_fractaldimension = traits.List(File(exists=True), desc="Fractal Dimension of left Hemisphere") - rh_fractaldimension = traits.List(File(exists=True), desc="Fractal Dimension of right Hemisphere") + lh_fractaldimension = traits.List( + File(exists=True), desc="Fractal Dimension of left Hemisphere" + ) + rh_fractaldimension = traits.List( + File(exists=True), desc="Fractal Dimension of right Hemisphere" + ) class ExtractAdditionalSurfaceParameters(SPMCommand): @@ -76,6 +110,7 @@ class ExtractAdditionalSurfaceParameters(SPMCommand): >>> extract_additional_measures.run() # doctest: +SKIP """ + input_spec = ExtractAdditionalSurfaceParametersInputSpec output_spec = ExtractAdditionalSurfaceParametersOutputSpec @@ -90,9 +125,13 @@ def __init__(self, **inputs): def _list_outputs(self): outputs = self._outputs().get() - names_outputs = [(self.inputs.gyrification, 'gyrification'), (self.inputs.gmv, 'gmv'), - (self.inputs.area, 'area'), (self.inputs.depth, 'depth'), - (self.inputs.fractal_dimension, 'fractaldimension')] + names_outputs = [ + (self.inputs.gyrification, "gyrification"), + (self.inputs.gmv, "gmv"), + (self.inputs.area, "area"), + (self.inputs.depth, "depth"), + (self.inputs.fractal_dimension, "fractaldimension"), + ] for filename in self.inputs.left_central_surfaces: pth, base, ext = split_filename(filename) @@ -101,47 +140,79 @@ def _list_outputs(self): for i, (extracted_parameter, parameter_name) in enumerate(names_outputs): if extracted_parameter: for hemisphere in ["rh", "lh"]: - all_files_hemisphere = hemisphere + '_extracted_files' + all_files_hemisphere = hemisphere + "_extracted_files" name_hemisphere = hemisphere + "_" + parameter_name if isinstance(outputs[name_hemisphere], _Undefined): outputs[name_hemisphere] = [] if isinstance(outputs[all_files_hemisphere], _Undefined): outputs[all_files_hemisphere] = [] - generated_filename = ".".join([hemisphere, parameter_name, original_filename]) - outputs[name_hemisphere].append(os.path.join(pth, generated_filename)) + generated_filename = ".".join( + [hemisphere, parameter_name, original_filename] + ) + outputs[name_hemisphere].append( + os.path.join(pth, generated_filename) + ) # Add all hemisphere files into one list, this is important because only the left hemisphere # files are used as input in the Surface ROI Tools, fpr instance. - outputs[all_files_hemisphere].append(os.path.join(pth, generated_filename)) + outputs[all_files_hemisphere].append( + os.path.join(pth, generated_filename) + ) return outputs def _format_arg(self, opt, spec, val): if opt == "left_central_surfaces": return Cell2Str(val) - return super(ExtractAdditionalSurfaceParameters, self)._format_arg(opt, spec, val) + return super(ExtractAdditionalSurfaceParameters, self)._format_arg( + opt, spec, val + ) class ExtractROIBasedSurfaceMeasuresInputSpec(SPMCommandInputSpec): # Only these files are given as input, yet the right hemisphere (rh) files should also be on the processing # directory. - surface_files = InputMultiPath(File(exists=True), desc="Surface data files. This variable should be a list " - "with all", mandatory=False, copyfile=False) - lh_roi_atlas = InputMultiPath(File(exists=True), field="rdata", desc="(Left) ROI Atlas. These are the ROI's ", - mandatory=True, copyfile=False) - - rh_roi_atlas = InputMultiPath(File(exists=True), desc="(Right) ROI Atlas. These are the ROI's ", - mandatory=False, copyfile=False) - - lh_surface_measure = InputMultiPath(File(exists=True), field="cdata", desc="(Left) Surface data files. ", - mandatory=True, copyfile=False) - rh_surface_measure = InputMultiPath(File(exists=True), desc="(Right) Surface data files.", - mandatory=False, copyfile=False) + surface_files = InputMultiPath( + File(exists=True), + desc="Surface data files. This variable should be a list " "with all", + mandatory=False, + copyfile=False, + ) + lh_roi_atlas = InputMultiPath( + File(exists=True), + field="rdata", + desc="(Left) ROI Atlas. These are the ROI's ", + mandatory=True, + copyfile=False, + ) + + rh_roi_atlas = InputMultiPath( + File(exists=True), + desc="(Right) ROI Atlas. These are the ROI's ", + mandatory=False, + copyfile=False, + ) + + lh_surface_measure = InputMultiPath( + File(exists=True), + field="cdata", + desc="(Left) Surface data files. ", + mandatory=True, + copyfile=False, + ) + rh_surface_measure = InputMultiPath( + File(exists=True), + desc="(Right) Surface data files.", + mandatory=False, + copyfile=False, + ) class ExtractROIBasedSurfaceMeasuresOutputSpec(TraitedSpec): - label_files = traits.List(File(exists=True), desc="Files with the measures extracted for ROIs.") + label_files = traits.List( + File(exists=True), desc="Files with the measures extracted for ROIs." + ) class ExtractROIBasedSurfaceMeasures(SPMCommand): @@ -194,7 +265,9 @@ def _list_outputs(self): pth, base, ext = split_filename(self.inputs.lh_surface_measure[0]) - outputs["label_files"] = [str(label) for label in Path(pth).glob("label/*") if label.is_file()] + outputs["label_files"] = [ + str(label) for label in Path(pth).glob("label/*") if label.is_file() + ] return outputs @@ -204,21 +277,18 @@ def __init__(self, arg): def to_string(self): if isinstance(self.arg, list): - v = '\n'.join([f"'{el}'" for el in self.arg]) + v = "\n".join([f"'{el}'" for el in self.arg]) else: v = self.arg return v class NestedCell(Cell): - def __str__(self): return "{{%s}}" % self.to_string() class Cell2Str(Cell): - def __str__(self): - """Convert input to appropriate format for cat12 - """ + """Convert input to appropriate format for cat12""" return "{%s}" % self.to_string() From 8a44b4696b9fd70aaeb9c1384997ecf731758821 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 30 Apr 2021 17:20:10 -0400 Subject: [PATCH 0976/1665] TEST: make specs --- nipype/interfaces/cat12/tests/__init__.py | 0 .../cat12/tests/test_auto_CAT12Segment.py | 251 ++++++++++++++++++ ...auto_ExtractAdditionalSurfaceParameters.py | 73 +++++ ...est_auto_ExtractROIBasedSurfaceMeasures.py | 55 ++++ 4 files changed, 379 insertions(+) create mode 100644 nipype/interfaces/cat12/tests/__init__.py create mode 100644 nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py create mode 100644 nipype/interfaces/cat12/tests/test_auto_ExtractAdditionalSurfaceParameters.py create mode 100644 nipype/interfaces/cat12/tests/test_auto_ExtractROIBasedSurfaceMeasures.py diff --git a/nipype/interfaces/cat12/tests/__init__.py b/nipype/interfaces/cat12/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py b/nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py new file mode 100644 index 0000000000..67afbb573e --- /dev/null +++ b/nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py @@ -0,0 +1,251 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..preprocess import CAT12Segment + + +def test_CAT12Segment_inputs(): + input_map = dict( + affine_preprocessing=dict( + field="extopts.APP", + usedefault=True, + ), + affine_regularization=dict( + field="opts.affreg", + usedefault=True, + ), + cobra=dict( + field="output.ROImenu.atlases.hammers", + usedefault=True, + ), + csf_output_dartel=dict( + field="output.CSF.dartel", + usedefault=True, + ), + csf_output_modulated=dict( + field="output.CSF.mod", + usedefault=True, + ), + csf_output_native=dict( + field="output.CSF.native", + usedefault=True, + ), + gm_output_dartel=dict( + field="output.GM.dartel", + usedefault=True, + ), + gm_output_modulated=dict( + field="output.GM.mod", + usedefault=True, + ), + gm_output_native=dict( + field="output.GM.native", + usedefault=True, + ), + hammers=dict( + field="output.ROImenu.atlases.cobra", + usedefault=True, + ), + ignore_errors=dict( + field="extopts.ignoreErrors", + usedefault=True, + ), + in_files=dict( + copyfile=False, + field="data", + mandatory=True, + ), + initial_segmentation=dict( + field="extopts.spm_kamap", + usedefault=True, + ), + internal_resampling_process=dict( + field="extopts.restypes.optimal", + maxlen=2, + minlen=2, + usedefault=True, + ), + jacobianwarped=dict( + field="output.jacobianwarped", + usedefault=True, + ), + label_dartel=dict( + field="output.label.dartel", + usedefault=True, + ), + label_native=dict( + field="output.label.native", + usedefault=True, + ), + label_warped=dict( + field="output.label.warped", + usedefault=True, + ), + las_dartel=dict( + field="output.las.dartel", + usedefault=True, + ), + las_native=dict( + field="output.las.native", + usedefault=True, + ), + las_warped=dict( + field="output.las.warped", + usedefault=True, + ), + local_adaptive_seg=dict( + field="extopts.LASstr", + usedefault=True, + ), + lpba40=dict( + field="output.ROImenu.atlases.lpba40", + usedefault=True, + ), + matlab_cmd=dict(), + mfile=dict( + usedefault=True, + ), + n_jobs=dict( + field="nproc", + mandatory=True, + usedefault=True, + ), + neuromorphometrics=dict( + field="output.ROImenu.atlases.neuromorphometrics", + usedefault=True, + ), + output_labelnative=dict( + field="output.labelnative", + usedefault=True, + ), + own_atlas=dict( + copyfile=False, + field="output.ROImenu.atlases.ownatlas", + mandatory=False, + ), + paths=dict(), + power_spm_inhomogeneity_correction=dict( + field="opts.biasacc", + usedefault=True, + ), + save_bias_corrected=dict( + field="output.bias.warped", + usedefault=True, + ), + skull_strip=dict( + field="extopts.gcutstr", + usedefault=True, + ), + surface_and_thickness_estimation=dict( + field="surface", + usedefault=True, + ), + surface_measures=dict( + field="output.surf_measures", + usedefault=True, + ), + tpm=dict( + copyfile=False, + field="tpm", + mandatory=False, + ), + use_mcr=dict(), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + voxel_size=dict( + field="extopts.vox", + usedefault=True, + ), + warps=dict( + field="output.warps", + maxlen=2, + minlen=2, + usedefault=True, + ), + wm_hyper_intensity_correction=dict( + field="extopts.WMHC", + usedefault=True, + ), + wm_output_dartel=dict( + field="output.WM.dartel", + usedefault=True, + ), + wm_output_modulated=dict( + field="output.WM.mod", + usedefault=True, + ), + wm_output_native=dict( + field="output.WM.native", + usedefault=True, + ), + ) + inputs = CAT12Segment.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_CAT12Segment_outputs(): + output_map = dict( + bias_corrected_image=dict( + extensions=None, + ), + csf_dartel_image=dict( + extensions=None, + ), + csf_modulated_image=dict( + extensions=None, + ), + csf_native_image=dict( + extensions=None, + ), + gm_dartel_image=dict( + extensions=None, + ), + gm_modulated_image=dict( + extensions=None, + ), + gm_native_image=dict( + extensions=None, + ), + label_files=dict(), + label_roi=dict( + extensions=None, + ), + label_rois=dict( + extensions=None, + ), + lh_central_surface=dict( + extensions=None, + ), + lh_sphere_surface=dict( + extensions=None, + ), + mri_images=dict(), + report=dict( + extensions=None, + ), + report_files=dict(), + rh_central_surface=dict( + extensions=None, + ), + rh_sphere_surface=dict( + extensions=None, + ), + surface_files=dict(), + wm_dartel_image=dict( + extensions=None, + ), + wm_modulated_image=dict( + extensions=None, + ), + wm_native_image=dict( + extensions=None, + ), + ) + outputs = CAT12Segment.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/cat12/tests/test_auto_ExtractAdditionalSurfaceParameters.py b/nipype/interfaces/cat12/tests/test_auto_ExtractAdditionalSurfaceParameters.py new file mode 100644 index 0000000000..cde7f2057e --- /dev/null +++ b/nipype/interfaces/cat12/tests/test_auto_ExtractAdditionalSurfaceParameters.py @@ -0,0 +1,73 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..surface import ExtractAdditionalSurfaceParameters + + +def test_ExtractAdditionalSurfaceParameters_inputs(): + input_map = dict( + area=dict( + field="area", + usedefault=True, + ), + depth=dict( + field="SD", + usedefault=True, + ), + fractal_dimension=dict( + field="FD", + usedefault=True, + ), + gmv=dict( + field="gmv", + usedefault=True, + ), + gyrification=dict( + field="GI", + usedefault=True, + ), + left_central_surfaces=dict( + copyfile=False, + field="data_surf", + mandatory=True, + ), + matlab_cmd=dict(), + mfile=dict( + usedefault=True, + ), + paths=dict(), + surface_files=dict( + copyfile=False, + mandatory=False, + ), + use_mcr=dict(), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + ) + inputs = ExtractAdditionalSurfaceParameters.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_ExtractAdditionalSurfaceParameters_outputs(): + output_map = dict( + lh_area=dict(), + lh_depth=dict(), + lh_extracted_files=dict(), + lh_fractaldimension=dict(), + lh_gmv=dict(), + lh_gyrification=dict(), + rh_area=dict(), + rh_depth=dict(), + rh_extracted_files=dict(), + rh_fractaldimension=dict(), + rh_gmv=dict(), + rh_gyrification=dict(), + ) + outputs = ExtractAdditionalSurfaceParameters.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/cat12/tests/test_auto_ExtractROIBasedSurfaceMeasures.py b/nipype/interfaces/cat12/tests/test_auto_ExtractROIBasedSurfaceMeasures.py new file mode 100644 index 0000000000..ffc18324aa --- /dev/null +++ b/nipype/interfaces/cat12/tests/test_auto_ExtractROIBasedSurfaceMeasures.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..surface import ExtractROIBasedSurfaceMeasures + + +def test_ExtractROIBasedSurfaceMeasures_inputs(): + input_map = dict( + lh_roi_atlas=dict( + copyfile=False, + field="rdata", + mandatory=True, + ), + lh_surface_measure=dict( + copyfile=False, + field="cdata", + mandatory=True, + ), + matlab_cmd=dict(), + mfile=dict( + usedefault=True, + ), + paths=dict(), + rh_roi_atlas=dict( + copyfile=False, + mandatory=False, + ), + rh_surface_measure=dict( + copyfile=False, + mandatory=False, + ), + surface_files=dict( + copyfile=False, + mandatory=False, + ), + use_mcr=dict(), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + ) + inputs = ExtractROIBasedSurfaceMeasures.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_ExtractROIBasedSurfaceMeasures_outputs(): + output_map = dict( + label_files=dict(), + ) + outputs = ExtractROIBasedSurfaceMeasures.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value From b7879d75a63b6500b2e7d2c3eba5aa7670339274 Mon Sep 17 00:00:00 2001 From: d-vogel Date: Sat, 1 May 2021 14:45:20 +0200 Subject: [PATCH 0977/1665] ENH: Skip newline before Python call in batch submission to facilitate containerized runs (#3297) * Avoid empty line in bash script output Remove the newline at the end of template to avoid having am empty line after joining with the node command. This allows to have the node command integrated in more complex call, such as a `singularity exec ...` call. * Rather remove the newlines than trim the last char of the template. As per @effigies suggestion. Co-authored-by: Chris Markiewicz Co-authored-by: Christopher J. Markiewicz Co-authored-by: Chris Markiewicz --- nipype/pipeline/plugins/base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index 8949d36be3..becc6b4364 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -543,7 +543,9 @@ def _submit_job(self, node, updatehash=False): pyscript = create_pyscript(node, updatehash=updatehash) batch_dir, name = os.path.split(pyscript) name = ".".join(name.split(".")[:-1]) - batchscript = "\n".join((self._template, "%s %s" % (sys.executable, pyscript))) + batchscript = "\n".join( + (self._template.rstrip("\n"), "%s %s" % (sys.executable, pyscript)) + ) batchscriptfile = os.path.join(batch_dir, "batchscript_%s.sh" % name) with open(batchscriptfile, "wt") as fp: fp.writelines(batchscript) From 35a762f4e0705ee314d100da7eb9df0da9632ee9 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 6 May 2021 11:13:40 +1000 Subject: [PATCH 0978/1665] Update nipype/interfaces/mrtrix3/reconst.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/mrtrix3/reconst.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index 0e5523fa1d..be0832a3f8 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -4,7 +4,7 @@ import os.path as op -from ..base import traits, TraitedSpec, File, Undefined, InputMultiObject, isdefined +from ..base import traits, TraitedSpec, File, InputMultiObject, isdefined from .base import MRTrix3BaseInputSpec, MRTrix3Base From 7e670ffa6ea76113c2d6ad9496042d6d568673ed Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 6 May 2021 11:13:51 +1000 Subject: [PATCH 0979/1665] Update nipype/interfaces/mrtrix3/utils.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/mrtrix3/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index d2dd5c5906..c5c1d8ec21 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -768,7 +768,6 @@ class MRResize(MRTrix3Base): class SHConvInputSpec(CommandLineInputSpec): - in_file = File( exists=True, argstr="%s", From 8671983f2e4bdf84ef03dd1dc8332af7411f91ab Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 6 May 2021 11:13:59 +1000 Subject: [PATCH 0980/1665] Update nipype/interfaces/mrtrix3/utils.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/mrtrix3/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index c5c1d8ec21..6a343f1d95 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -775,7 +775,6 @@ class SHConvInputSpec(CommandLineInputSpec): position=-3, desc="input ODF image", ) - # General options response = File( exists=True, From 308436d63563d7a989b24e92fd48ef40205bcd8d Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 6 May 2021 11:14:06 +1000 Subject: [PATCH 0981/1665] Update nipype/interfaces/mrtrix3/utils.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/mrtrix3/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index 6a343f1d95..5cb1d7bcdd 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -783,7 +783,6 @@ class SHConvInputSpec(CommandLineInputSpec): position=-2, desc=("The response function"), ) - out_file = File( "sh.mif", argstr="%s", From f4eefebcb5118c80842cca3cd8baabb8eee222ea Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 6 May 2021 11:14:11 +1000 Subject: [PATCH 0982/1665] Update nipype/interfaces/mrtrix3/utils.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/mrtrix3/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index 5cb1d7bcdd..fb0e749d63 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -833,7 +833,6 @@ class SH2AmpInputSpec(CommandLineInputSpec): position=-3, desc="input ODF image", ) - # General options directions = File( exists=True, From 70e3bcfc14c9ab13ce32adcb0972a0525341691d Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 6 May 2021 11:14:29 +1000 Subject: [PATCH 0983/1665] Update nipype/interfaces/mrtrix3/utils.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/mrtrix3/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index fb0e749d63..187e853683 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -844,7 +844,6 @@ class SH2AmpInputSpec(CommandLineInputSpec): "harmonics MRtrix format" ), ) - out_file = File( "amp.mif", argstr="%s", From a6e440da59cf9234cc9eab92dae396e75b71ad40 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 6 May 2021 11:14:36 +1000 Subject: [PATCH 0984/1665] Update nipype/interfaces/mrtrix3/utils.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/mrtrix3/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index 187e853683..956f0a78ad 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -852,7 +852,6 @@ class SH2AmpInputSpec(CommandLineInputSpec): usedefault=True, desc="the output spherical harmonics", ) - nonnegative = traits.Bool( argstr="-nonnegative", desc="cap all negative amplitudes to zero" ) From 5266a7cc5039cf950e034336e2af11c0c51a545c Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 6 May 2021 11:15:10 +1000 Subject: [PATCH 0985/1665] Update nipype/interfaces/mrtrix3/utils.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/mrtrix3/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index 956f0a78ad..72287cd4a2 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -845,7 +845,8 @@ class SH2AmpInputSpec(CommandLineInputSpec): ), ) out_file = File( - "amp.mif", + name_template="%s_amp.mif", + name_source=["in_file"], argstr="%s", mandatory=True, position=-1, From c5d7ffe5480d316710b987d045bb81cc74b242b5 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 6 May 2021 11:15:16 +1000 Subject: [PATCH 0986/1665] Update nipype/interfaces/mrtrix3/utils.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/mrtrix3/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index 72287cd4a2..f605e3c5e4 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -784,7 +784,8 @@ class SHConvInputSpec(CommandLineInputSpec): desc=("The response function"), ) out_file = File( - "sh.mif", + name_template="%s_shconv.mif", + name_source=["in_file"], argstr="%s", mandatory=True, position=-1, From 424400b101efe8a992a3d46b2374138c2a8cd33e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 25 May 2021 10:06:48 -0400 Subject: [PATCH 0987/1665] CI: Drop Travis (#3332) --- .travis.yml | 84 ----------------------------------------------------- 1 file changed, 84 deletions(-) delete mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 40d5937c02..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,84 +0,0 @@ -os: linux -dist: bionic - -language: python -# our build matrix -python: -- 3.6 -- 3.7 -- 3.8 - -env: - global: - - EXTRA_WHEELS="https://5cf40426d9f06eb7461d-6fe47d9331aba7cd62fc36c7196769e4.ssl.cf2.rackcdn.com" - - PRE_WHEELS="https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" - - EXTRA_PIP_FLAGS="--find-links=$EXTRA_WHEELS" - - CHECK_TYPE=test - jobs: - - INSTALL_DEB_DEPENDECIES=true - NIPYPE_EXTRAS="doc,tests,nipy,profiler" - CI_SKIP_TEST=1 - - INSTALL_DEB_DEPENDECIES=false - NIPYPE_EXTRAS="doc,tests,profiler" - CI_SKIP_TEST=1 - - INSTALL_DEB_DEPENDECIES=true - NIPYPE_EXTRAS="doc,tests,nipy,profiler,duecredit,ssh" - CI_SKIP_TEST=1 - - INSTALL_DEB_DEPENDECIES=true - NIPYPE_EXTRAS="doc,tests,nipy,profiler" - EXTRA_PIP_FLAGS="--pre $EXTRA_PIP_FLAGS --find-links $PRE_WHEELS --upgrade" - CI_SKIP_TEST=1 - -jobs: - include: - - python: 3.7 - env: - - NIPYPE_EXTRAS=dev - CHECK_TYPE=specs - -addons: - apt: - packages: - - xvfb - - fusefat - - graphviz - -cache: - directories: - - ${HOME}/.cache - -before_install: -- if $INSTALL_DEB_DEPENDECIES; then sudo rm -rf /dev/shm; sudo ln -s /run/shm /dev/shm; fi -- travis_retry bash <(wget -q -O- http://neuro.debian.net/_files/neurodebian-travis.sh); -- if $INSTALL_DEB_DEPENDECIES; then - travis_retry sudo apt-get -y update && - travis_retry sudo apt-get install -y -qq fsl afni elastix fsl-atlases; - fi; -- if $INSTALL_DEB_DEPENDECIES; then - source /etc/fsl/fsl.sh; - source /etc/afni/afni.sh; - export FSLOUTPUTTYPE=NIFTI_GZ; - fi; - -- travis_retry pip install $EXTRA_PIP_FLAGS -r requirements.txt -- travis_retry pip install grabbit==0.2.6 -- travis_retry pip install -e git+https://github.com/bids-standard/pybids.git@0.7.0#egg=pybids - -install: -- travis_retry pip install $EXTRA_PIP_FLAGS -e .[$NIPYPE_EXTRAS] -- travis_retry pip install pytest-xdist - -script: -- | - if [ "$CHECK_TYPE" = "test" ]; then - py.test -sv --cov nipype --cov-config .coveragerc --cov-report xml:cov.xml -c nipype/pytest.ini --doctest-modules nipype -n 2 - fi -- | - if [ "$CHECK_TYPE" = "specs" ]; then - make specs - git add nipype - test "$( git diff --cached | wc -l )" -eq 0 || ( git diff --cached && false ) - fi - -after_script: -- codecov --file cov.xml --flags unittests -e TRAVIS_JOB_NUMBER From f980447086cf14b65a77ffb73e53760f80db7b58 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Tue, 25 May 2021 10:35:29 -0400 Subject: [PATCH 0988/1665] MNT: Blacklist Dipy 1.4.1 (#3335) --- nipype/info.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/info.py b/nipype/info.py index f5a43334c9..7f6899cdd0 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -162,7 +162,7 @@ def get_nipype_gitversion(): EXTRA_REQUIRES = { "data": ["datalad"], "doc": [ - "dipy", + "dipy!=1.4.1", "ipython", "matplotlib", "nbsphinx", @@ -172,7 +172,7 @@ def get_nipype_gitversion(): "sphinxcontrib-napoleon", ], "duecredit": ["duecredit"], - "nipy": ["nitime", "nilearn", "dipy", "nipy", "matplotlib"], + "nipy": ["nitime", "nilearn", "dipy!=1.4.1", "nipy", "matplotlib"], "profiler": ["psutil>=5.0"], "pybids": ["pybids>=0.7.0"], "specs": ["black"], From ea4a2d722ce0f1f253d6d39eb54d67f9cc4f3f48 Mon Sep 17 00:00:00 2001 From: Michael Date: Thu, 27 May 2021 15:15:28 +0200 Subject: [PATCH 0989/1665] CI: push tags to dockerhub (#3283) --- .circleci/config.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 902649cd03..8d67282867 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -52,6 +52,7 @@ _build_main_image_py36: &build_main_image_py36 --rm=false \ --tag nipype/nipype:latest \ --tag nipype/nipype:py36 \ + $(test -z "${CIRCLE_TAG}" || echo --tag nipype/nipype:"${CIRCLE_TAG}") \ --build-arg BUILD_DATE="$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ --build-arg VCS_REF="$(git rev-parse --short HEAD)" \ --build-arg VERSION="${CIRCLE_TAG}" /home/circleci/nipype @@ -192,6 +193,7 @@ jobs: docker push nipype/nipype:base docker push nipype/nipype:latest docker push nipype/nipype:py36 + test -z "${CIRCLE_TAG}" || docker push nipype/nipype:"${CIRCLE_TAG}" - run: name: Move pruned Dockerfile to /tmp/docker/cache directory command: | @@ -353,6 +355,8 @@ workflows: filters: branches: only: master + tags: + only: /.*/ requires: - test_pytest - deploy_pypi: From d02750a3f8b7aedebf807c88d1cf27c0e1d7e79c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 27 May 2021 09:17:22 -0400 Subject: [PATCH 0990/1665] CI: Drop Circle doc builds (#3338) --- .circleci/config.yml | 34 ---------------------------------- .github/workflows/main.yml | 12 ------------ 2 files changed, 46 deletions(-) delete mode 100644 .github/workflows/main.yml diff --git a/.circleci/config.yml b/.circleci/config.yml index 8d67282867..a1effa019c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -291,44 +291,10 @@ jobs: ssh-add ~/.ssh/id_ed25519 /home/circleci/nipype/tools/feedstock.sh - build_docs: - docker: - - image: python:3.7.4 - working_directory: /tmp/src/nipype - environment: - - FSLOUTPUTTYPE: 'NIFTI' - steps: - - checkout - - run: - name: Check Python version and upgrade pip - command: | - python --version - python -m pip install -U pip - - run: - name: Install graphviz - command: | - apt-get update - apt-get install -y graphviz - - run: - name: Install Requirements (may contain pinned versions) - command: python -m pip install -r doc/requirements.txt - - run: - name: Install NiPype - command: python -m pip install ".[doc]" - - run: - name: Build documentation - command: make -C doc html - - store_artifacts: - path: /tmp/src/nipype/doc/_build/html - workflows: version: 2 build_test_deploy: jobs: - - build_docs: - filters: - tags: - only: /.*/ - pypi_precheck: filters: branches: diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml deleted file mode 100644 index 90fda1aea7..0000000000 --- a/.github/workflows/main.yml +++ /dev/null @@ -1,12 +0,0 @@ -on: [status] -jobs: - circleci_artifacts_redirector_job: - runs-on: ubuntu-latest - name: Run CircleCI artifacts redirector - steps: - - name: GitHub Action step - uses: larsoner/circleci-artifacts-redirector-action@master - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - artifact-path: 0/tmp/src/nipype/doc/_build/html/index.html - circleci-jobs: build_docs From b3361325022c591a86f09579a24234457a269b47 Mon Sep 17 00:00:00 2001 From: Tim Robert-Fitzgerald Date: Thu, 27 May 2021 23:18:44 -0400 Subject: [PATCH 0991/1665] return None if R can't be found Co-authored-by: Chris Markiewicz --- nipype/interfaces/r.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/r.py b/nipype/interfaces/r.py index 37010853d8..06ee212644 100644 --- a/nipype/interfaces/r.py +++ b/nipype/interfaces/r.py @@ -19,11 +19,9 @@ def get_r_command(): if "NIPYPE_NO_R" in os.environ: return None - try: - r_cmd = os.environ["RCMD"] - except: - r_cmd = "R" - return r_cmd + r_cmd = os.getenv("RCMD", default="R") + + return r_cmd if which(r_cmd) else None no_r = get_r_command() is None From 2638489e2c02dff35fc583306a6c93e0bc5a3b1c Mon Sep 17 00:00:00 2001 From: Tim Robert-Fitzgerald Date: Thu, 27 May 2021 23:19:20 -0400 Subject: [PATCH 0992/1665] remove hardcoded variable Co-authored-by: Chris Markiewicz --- nipype/interfaces/r.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/r.py b/nipype/interfaces/r.py index 06ee212644..c9fd51b392 100644 --- a/nipype/interfaces/r.py +++ b/nipype/interfaces/r.py @@ -49,7 +49,7 @@ class RCommand(CommandLine): >>> out = r.run() # doctest: +SKIP """ - _cmd = "R" + _cmd = get_r_command() _default_r_cmd = None _default_rfile = None input_spec = RInputSpec From f7cb9b2f6d2e9607d5cc2670856d6227ce0d208e Mon Sep 17 00:00:00 2001 From: Terf Date: Thu, 27 May 2021 23:20:06 -0400 Subject: [PATCH 0993/1665] R interface tests --- nipype/interfaces/tests/test_r.py | 100 ++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 nipype/interfaces/tests/test_r.py diff --git a/nipype/interfaces/tests/test_r.py b/nipype/interfaces/tests/test_r.py new file mode 100644 index 0000000000..a851e4df6b --- /dev/null +++ b/nipype/interfaces/tests/test_r.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +import os + +import pytest +import nipype.interfaces.r as r + +r_cmd = r.get_r_command() +no_r = r_cmd is None +if not no_r: + r.RCommand.set_default_r_cmd(r_cmd) + + +def clean_workspace_and_get_default_script_file(): + # Make sure things are clean. + default_script_file = r.RInputSpec().script_file + if os.path.exists(default_script_file): + os.remove( + default_script_file + ) # raise Exception('Default script file needed for tests; please remove %s!' % default_script_file) + return default_script_file + + +@pytest.mark.skipif(no_r, reason="R is not available") +def test_cmdline(): + default_script_file = clean_workspace_and_get_default_script_file() + + ri = r.RCommand(script="1 + 1", script_file="testscript", rfile=False) + + assert ri.cmdline == r_cmd + ( + ' -e "1 + 1"' + ) + + assert ri.inputs.script == "1 + 1" + assert ri.inputs.script_file == "testscript" + assert not os.path.exists(ri.inputs.script_file), "scriptfile should not exist" + assert not os.path.exists( + default_script_file + ), "default scriptfile should not exist." + +@pytest.mark.skipif(no_r, reason="R is not available") +def test_r_init(): + default_script_file = clean_workspace_and_get_default_script_file() + + assert r.RCommand._cmd == "R" + assert r.RCommand.input_spec == r.RInputSpec + + assert r.RCommand().cmd == r_cmd + rc = r.RCommand(r_cmd="foo_m") + assert rc.cmd == "foo_m" + + +@pytest.mark.skipif(no_r, reason="R is not available") +def test_run_interface(tmpdir): + default_script_file = clean_workspace_and_get_default_script_file() + + rc = r.RCommand(r_cmd="foo_m") + assert not os.path.exists(default_script_file), "scriptfile should not exist 1." + with pytest.raises(ValueError): + rc.run() # script is mandatory + assert not os.path.exists(default_script_file), "scriptfile should not exist 2." + if os.path.exists(default_script_file): # cleanup + os.remove(default_script_file) + + rc.inputs.script = "a=1;" + assert not os.path.exists(default_script_file), "scriptfile should not exist 3." + with pytest.raises(IOError): + rc.run() # foo_m is not an executable + assert os.path.exists(default_script_file), "scriptfile should exist 3." + if os.path.exists(default_script_file): # cleanup + os.remove(default_script_file) + + cwd = tmpdir.chdir() + + # bypasses ubuntu dash issue + rc = r.RCommand(script="foo;", rfile=True) + assert not os.path.exists(default_script_file), "scriptfile should not exist 4." + with pytest.raises(RuntimeError): + rc.run() + assert os.path.exists(default_script_file), "scriptfile should exist 4." + if os.path.exists(default_script_file): # cleanup + os.remove(default_script_file) + + # bypasses ubuntu dash issue + res = r.RCommand(script="a=1;", rfile=True).run() + assert res.runtime.returncode == 0 + assert os.path.exists(default_script_file), "scriptfile should exist 5." + cwd.chdir() + + +@pytest.mark.skipif(no_r, reason="R is not available") +def test_set_rcmd(): + default_script_file = clean_workspace_and_get_default_script_file() + + ri = r.RCommand() + ri.set_default_r_cmd("foo") + assert not os.path.exists(default_script_file), "scriptfile should not exist." + assert ri._default_r_cmd == "foo" + ri.set_default_r_cmd(r_cmd) From f250f5ce1ff5976e3db3473b3393b6bccbec9467 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 30 May 2021 09:16:42 -0400 Subject: [PATCH 0994/1665] FIX: Set DistributedPluginBase.refidx type correctly --- nipype/pipeline/plugins/base.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index becc6b4364..a33a40aab6 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -417,14 +417,10 @@ def _generate_dependency_list(self, graph): import networkx as nx self.procs, _ = topological_sort(graph) - try: - self.depidx = nx.to_scipy_sparse_matrix( - graph, nodelist=self.procs, format="lil" - ) - except: - self.depidx = nx.to_scipy_sparse_matrix(graph, nodelist=self.procs) - self.refidx = deepcopy(self.depidx) - self.refidx.astype = np.int + self.depidx = nx.to_scipy_sparse_matrix( + graph, nodelist=self.procs, format="lil" + ) + self.refidx = self.depidx.astype(int) self.proc_done = np.zeros(len(self.procs), dtype=bool) self.proc_pending = np.zeros(len(self.procs), dtype=bool) From 12d06fdbdec27f7113469f910d12b4a5a1357358 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sun, 30 May 2021 10:17:44 -0400 Subject: [PATCH 0995/1665] FIX: Specify dtype object when making arrays from list that might contain lists (#3342) --- nipype/interfaces/utility/base.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/utility/base.py b/nipype/interfaces/utility/base.py index 5a59d95bad..641489ecc4 100644 --- a/nipype/interfaces/utility/base.py +++ b/nipype/interfaces/utility/base.py @@ -362,7 +362,9 @@ def _list_outputs(self): splits.extend(self.inputs.splits) splits = np.cumsum(splits) for i in range(len(splits) - 1): - val = np.array(self.inputs.inlist)[splits[i] : splits[i + 1]].tolist() + val = np.array(self.inputs.inlist, dtype=object)[ + splits[i] : splits[i + 1] + ].tolist() if self.inputs.squeeze and len(val) == 1: val = val[0] outputs["out%d" % (i + 1)] = val @@ -407,7 +409,9 @@ class Select(IOBase): def _list_outputs(self): outputs = self._outputs().get() - out = np.array(self.inputs.inlist)[np.array(self.inputs.index)].tolist() + out = np.array(self.inputs.inlist, dtype=object)[ + np.array(self.inputs.index) + ].tolist() outputs["out"] = out return outputs From e40fcb6422bdae94266d206fd5a2d9761aacd931 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Sun, 30 May 2021 22:58:05 +0100 Subject: [PATCH 0996/1665] Correct TPM and dartel shooting template format; Improved code. --- nipype/interfaces/cat12/preprocess.py | 53 ++++++++++++++++++++------- nipype/interfaces/cat12/surface.py | 10 ++--- 2 files changed, 43 insertions(+), 20 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index caf337a211..4a754d974b 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -18,6 +18,8 @@ ) from nipype.utils.filemanip import split_filename, fname_presuffix +from src.interfaces.cat12.surface import Cell + class CAT12SegmentInputSpec(SPMCommandInputSpec): in_files = InputMultiPath( @@ -41,6 +43,27 @@ class CAT12SegmentInputSpec(SPMCommandInputSpec): copyfile=False, ) + _help_shoots_tpm = ( + 'Shooting Template %d. The Shooting template must be in multi-volume nifti format and should contain GM,' + ' WM, and background segmentations and have to be saved with at least 16 bit. ' + ) + + shooting_tpm = ImageFileSPM(exists=True, field="extopts.registration.shooting.shootingtpm", + desc=_help_shoots_tpm % 0, mandatory=False, copyfile=False) + + shooting_tpm_template_1 = ImageFileSPM( + exists=True, desc=_help_shoots_tpm % 1, mandatory=False, copyfile=False + ) + shooting_tpm_template_2 = ImageFileSPM( + exists=True, desc=_help_shoots_tpm % 2, mandatory=False, copyfile=False + ) + shooting_tpm_template_3 = ImageFileSPM( + exists=True, desc=_help_shoots_tpm % 3, mandatory=False, copyfile=False + ) + shooting_tpm_template_4 = ImageFileSPM( + exists=True, desc=_help_shoots_tpm % 4, mandatory=False, copyfile=False + ) + n_jobs = traits.Int( 1, usedefault=True, mandatory=True, field="nproc", desc="Number of threads" ) @@ -239,13 +262,6 @@ class CAT12SegmentInputSpec(SPMCommandInputSpec): copyfile=False, ) - _dartel_help = ( - "This option is to export data into a form that can be used with DARTEL. The SPM default is to " - "only apply rigid body transformation. However, a more appropriate option is to apply affine " - "transformation, because the additional scaling of the images requires less deformations to " - "non-linearly register brains to the template." - ) - # Grey matter gm_output_native = traits.Bool( False, @@ -495,11 +511,14 @@ def __init__(self, **inputs): def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm""" - if opt in ["in_files"]: + if opt == "in_files": if isinstance(val, list): return scans_for_fnames(val) else: return scans_for_fname(val) + elif opt in ["tpm", "shooting_tpm"]: + return Cell2Str(val) + return super(CAT12Segment, self)._format_arg(opt, spec, val) def _list_outputs(self): @@ -513,9 +532,7 @@ def _list_outputs(self): for tidx, tissue in enumerate(["gm", "wm", "csf"]): - for idx, (image, prefix) in enumerate( - [("modulated", "mw"), ("dartel", "r"), ("native", "")] - ): + for image, prefix in [("modulated", "mw"), ("dartel", "r"), ("native", "")]: outtype = f"{tissue}_output_{image}" if isdefined(getattr(self.inputs, outtype)) and getattr( self.inputs, outtype @@ -552,7 +569,7 @@ def _list_outputs(self): str(report) for report in Path(pth).glob("report/*") if report.is_file() ] - outputs[f"report"] = fname_presuffix( + outputs["report"] = fname_presuffix( f, prefix=os.path.join("report", f"cat_"), suffix=".xml", use_ext=False ) @@ -561,10 +578,18 @@ def _list_outputs(self): ] outputs["label_rois"] = fname_presuffix( - f, prefix=os.path.join("label", f"catROIs_"), suffix=".xml", use_ext=False + f, prefix=os.path.join("label", "catROIs_"), suffix=".xml", use_ext=False ) outputs["label_roi"] = fname_presuffix( - f, prefix=os.path.join("label", f"catROI_"), suffix=".xml", use_ext=False + f, prefix=os.path.join("label", "catROI_"), suffix=".xml", use_ext=False ) return outputs + + +class Cell2Str(Cell): + + def __str__(self): + """Convert input to appropriate format for cat12 + """ + return "{'%s'}" % self.to_string() diff --git a/nipype/interfaces/cat12/surface.py b/nipype/interfaces/cat12/surface.py index 39925e6f29..69afc4e423 100644 --- a/nipype/interfaces/cat12/surface.py +++ b/nipype/interfaces/cat12/surface.py @@ -1,9 +1,7 @@ import os from pathlib import Path -from traits.trait_base import _Undefined - -from nipype.interfaces.base import File, InputMultiPath, TraitedSpec, traits +from nipype.interfaces.base import File, InputMultiPath, TraitedSpec, traits, isdefined from nipype.interfaces.spm import SPMCommand from nipype.interfaces.spm.base import SPMCommandInputSpec from nipype.utils.filemanip import split_filename @@ -137,14 +135,14 @@ def _list_outputs(self): pth, base, ext = split_filename(filename) # The first part of the filename is rh.central or lh.central original_filename = base.split(".", 2)[-1] - for i, (extracted_parameter, parameter_name) in enumerate(names_outputs): + for extracted_parameter, parameter_name in names_outputs: if extracted_parameter: for hemisphere in ["rh", "lh"]: all_files_hemisphere = hemisphere + "_extracted_files" name_hemisphere = hemisphere + "_" + parameter_name - if isinstance(outputs[name_hemisphere], _Undefined): + if not isdefined(outputs[name_hemisphere]): outputs[name_hemisphere] = [] - if isinstance(outputs[all_files_hemisphere], _Undefined): + if not isdefined(outputs[all_files_hemisphere]): outputs[all_files_hemisphere] = [] generated_filename = ".".join( [hemisphere, parameter_name, original_filename] From 920b87967958d7050fa4de79bf0862624659210c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Sun, 30 May 2021 23:48:01 +0100 Subject: [PATCH 0997/1665] Added format utils. --- nipype/interfaces/cat12/format_utils.py | 15 +++++++++++++++ nipype/interfaces/cat12/preprocess.py | 4 ++-- nipype/interfaces/cat12/surface.py | 18 +----------------- 3 files changed, 18 insertions(+), 19 deletions(-) create mode 100644 nipype/interfaces/cat12/format_utils.py diff --git a/nipype/interfaces/cat12/format_utils.py b/nipype/interfaces/cat12/format_utils.py new file mode 100644 index 0000000000..03091f1fb9 --- /dev/null +++ b/nipype/interfaces/cat12/format_utils.py @@ -0,0 +1,15 @@ +class Cell: + def __init__(self, arg): + self.arg = arg + + def to_string(self): + if isinstance(self.arg, list): + v = "\n".join([f"'{el}'" for el in self.arg]) + else: + v = self.arg + return v + + +class NestedCell(Cell): + def __str__(self): + return "{{%s}}" % self.to_string() diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 4a754d974b..5ee458af0a 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -9,6 +9,8 @@ File, Str, ) +from nipype.interfaces.cat12.format_utils import Cell + from nipype.interfaces.spm import SPMCommand from nipype.interfaces.spm.base import ( SPMCommandInputSpec, @@ -18,8 +20,6 @@ ) from nipype.utils.filemanip import split_filename, fname_presuffix -from src.interfaces.cat12.surface import Cell - class CAT12SegmentInputSpec(SPMCommandInputSpec): in_files = InputMultiPath( diff --git a/nipype/interfaces/cat12/surface.py b/nipype/interfaces/cat12/surface.py index 69afc4e423..810b2da7e7 100644 --- a/nipype/interfaces/cat12/surface.py +++ b/nipype/interfaces/cat12/surface.py @@ -2,6 +2,7 @@ from pathlib import Path from nipype.interfaces.base import File, InputMultiPath, TraitedSpec, traits, isdefined +from nipype.interfaces.cat12.format_utils import NestedCell, Cell from nipype.interfaces.spm import SPMCommand from nipype.interfaces.spm.base import SPMCommandInputSpec from nipype.utils.filemanip import split_filename @@ -269,23 +270,6 @@ def _list_outputs(self): return outputs -class Cell: - def __init__(self, arg): - self.arg = arg - - def to_string(self): - if isinstance(self.arg, list): - v = "\n".join([f"'{el}'" for el in self.arg]) - else: - v = self.arg - return v - - -class NestedCell(Cell): - def __str__(self): - return "{{%s}}" % self.to_string() - - class Cell2Str(Cell): def __str__(self): """Convert input to appropriate format for cat12""" From 56a02f284caf58ceda3edc83b8aab5a2b20562f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Mon, 31 May 2021 13:57:03 +0100 Subject: [PATCH 0998/1665] Reformatted preprocess.py. --- nipype/interfaces/cat12/preprocess.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 5ee458af0a..61b5785f48 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -44,12 +44,17 @@ class CAT12SegmentInputSpec(SPMCommandInputSpec): ) _help_shoots_tpm = ( - 'Shooting Template %d. The Shooting template must be in multi-volume nifti format and should contain GM,' - ' WM, and background segmentations and have to be saved with at least 16 bit. ' + "Shooting Template %d. The Shooting template must be in multi-volume nifti format and should contain GM," + " WM, and background segmentations and have to be saved with at least 16 bit. " ) - shooting_tpm = ImageFileSPM(exists=True, field="extopts.registration.shooting.shootingtpm", - desc=_help_shoots_tpm % 0, mandatory=False, copyfile=False) + shooting_tpm = ImageFileSPM( + exists=True, + field="extopts.registration.shooting.shootingtpm", + desc=_help_shoots_tpm % 0, + mandatory=False, + copyfile=False, + ) shooting_tpm_template_1 = ImageFileSPM( exists=True, desc=_help_shoots_tpm % 1, mandatory=False, copyfile=False @@ -588,8 +593,6 @@ def _list_outputs(self): class Cell2Str(Cell): - def __str__(self): - """Convert input to appropriate format for cat12 - """ + """Convert input to appropriate format for cat12""" return "{'%s'}" % self.to_string() From 8a0ccae5674c01a4cb5efce6245ccc0578f416d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1tima=20Machado?= Date: Mon, 31 May 2021 16:29:11 +0100 Subject: [PATCH 0999/1665] Updated test_auto_CAT12Segment.py. --- .../cat12/tests/test_auto_CAT12Segment.py | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py b/nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py index 67afbb573e..979b3afa6b 100644 --- a/nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py +++ b/nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py @@ -130,6 +130,32 @@ def test_CAT12Segment_inputs(): field="output.bias.warped", usedefault=True, ), + shooting_tpm=dict( + copyfile=False, + extensions=[".hdr", ".img", ".img.gz", ".nii"], + field="extopts.registration.shooting.shootingtpm", + mandatory=False, + ), + shooting_tpm_template_1=dict( + copyfile=False, + extensions=[".hdr", ".img", ".img.gz", ".nii"], + mandatory=False, + ), + shooting_tpm_template_2=dict( + copyfile=False, + extensions=[".hdr", ".img", ".img.gz", ".nii"], + mandatory=False, + ), + shooting_tpm_template_3=dict( + copyfile=False, + extensions=[".hdr", ".img", ".img.gz", ".nii"], + mandatory=False, + ), + shooting_tpm_template_4=dict( + copyfile=False, + extensions=[".hdr", ".img", ".img.gz", ".nii"], + mandatory=False, + ), skull_strip=dict( field="extopts.gcutstr", usedefault=True, From cdcdba9eccc11ed674cfb9e9ca12eac86858e7f9 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 31 May 2021 17:13:05 -0400 Subject: [PATCH 1000/1665] DOC: Add CAT12 to interfaces list --- doc/interfaces.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/interfaces.rst b/doc/interfaces.rst index d04fd88271..dc83450b5a 100644 --- a/doc/interfaces.rst +++ b/doc/interfaces.rst @@ -54,6 +54,9 @@ Nipype provides interfaces for the following **third-party** tools: is an open-source software toolkit for diffusion MRI processing. * `Camino-TrackVis `__ allows interoperability between Camino and TrackVis. + * `CAT12 `__ + (Computational Anatomy Toolbox) extends SPM12 to provide computational + anatomy. * `Connectome Mapper (CMP) `__ implements a full processing pipeline for creating multi-variate and multi-resolution connectomes with dMRI data. From 0dcf413071ae0d868ac60cd9ecb553d3a5c4e668 Mon Sep 17 00:00:00 2001 From: Tim Robert-Fitzgerald Date: Mon, 31 May 2021 19:19:41 -0400 Subject: [PATCH 1001/1665] Update nipype/interfaces/r.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/r.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/interfaces/r.py b/nipype/interfaces/r.py index c9fd51b392..d9507b2f94 100644 --- a/nipype/interfaces/r.py +++ b/nipype/interfaces/r.py @@ -3,6 +3,7 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Interfaces to run R scripts.""" import os +from shutil import which from .. import config from .base import ( From 190b6f43a43cffed8bca9387a0d6d859570041af Mon Sep 17 00:00:00 2001 From: Tim Robert-Fitzgerald Date: Mon, 31 May 2021 19:20:07 -0400 Subject: [PATCH 1002/1665] Update nipype/interfaces/tests/test_r.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/tests/test_r.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_r.py b/nipype/interfaces/tests/test_r.py index a851e4df6b..52878f78d1 100644 --- a/nipype/interfaces/tests/test_r.py +++ b/nipype/interfaces/tests/test_r.py @@ -4,7 +4,7 @@ import os import pytest -import nipype.interfaces.r as r +from nipype.interfaces import r r_cmd = r.get_r_command() no_r = r_cmd is None From 02104a75a75784685774c3932371ceaaf4f4a67d Mon Sep 17 00:00:00 2001 From: Tim Robert-Fitzgerald Date: Mon, 31 May 2021 19:20:33 -0400 Subject: [PATCH 1003/1665] Update nipype/interfaces/tests/test_r.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/tests/test_r.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nipype/interfaces/tests/test_r.py b/nipype/interfaces/tests/test_r.py index 52878f78d1..a8a36ad3f0 100644 --- a/nipype/interfaces/tests/test_r.py +++ b/nipype/interfaces/tests/test_r.py @@ -6,8 +6,7 @@ import pytest from nipype.interfaces import r -r_cmd = r.get_r_command() -no_r = r_cmd is None +no_r = r.no_r if not no_r: r.RCommand.set_default_r_cmd(r_cmd) From 6fcf05b89e70b602aacdb7eb7128dcbaabbf09d9 Mon Sep 17 00:00:00 2001 From: Tim Robert-Fitzgerald Date: Mon, 31 May 2021 19:22:01 -0400 Subject: [PATCH 1004/1665] Update nipype/interfaces/tests/test_r.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/tests/test_r.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nipype/interfaces/tests/test_r.py b/nipype/interfaces/tests/test_r.py index a8a36ad3f0..10f42302b7 100644 --- a/nipype/interfaces/tests/test_r.py +++ b/nipype/interfaces/tests/test_r.py @@ -7,8 +7,6 @@ from nipype.interfaces import r no_r = r.no_r -if not no_r: - r.RCommand.set_default_r_cmd(r_cmd) def clean_workspace_and_get_default_script_file(): From b74fc859f3680af3364886d4ec8214bd76c9416b Mon Sep 17 00:00:00 2001 From: Tim Robert-Fitzgerald Date: Mon, 31 May 2021 19:37:40 -0400 Subject: [PATCH 1005/1665] Update nipype/interfaces/tests/test_r.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/tests/test_r.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_r.py b/nipype/interfaces/tests/test_r.py index 10f42302b7..0444d98b71 100644 --- a/nipype/interfaces/tests/test_r.py +++ b/nipype/interfaces/tests/test_r.py @@ -91,7 +91,8 @@ def test_set_rcmd(): default_script_file = clean_workspace_and_get_default_script_file() ri = r.RCommand() + _default_r_cmd = ri._default_r_cmd ri.set_default_r_cmd("foo") assert not os.path.exists(default_script_file), "scriptfile should not exist." assert ri._default_r_cmd == "foo" - ri.set_default_r_cmd(r_cmd) + ri.set_default_r_cmd(_default_r_cmd) From f72b5106d17d149b7f0b48f5ff7ecdfe604534b0 Mon Sep 17 00:00:00 2001 From: Tim Robert-Fitzgerald Date: Mon, 31 May 2021 19:38:25 -0400 Subject: [PATCH 1006/1665] Update nipype/interfaces/tests/test_r.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/tests/test_r.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/tests/test_r.py b/nipype/interfaces/tests/test_r.py index 0444d98b71..ae8cb49402 100644 --- a/nipype/interfaces/tests/test_r.py +++ b/nipype/interfaces/tests/test_r.py @@ -20,17 +20,15 @@ def clean_workspace_and_get_default_script_file(): @pytest.mark.skipif(no_r, reason="R is not available") -def test_cmdline(): - default_script_file = clean_workspace_and_get_default_script_file() - - ri = r.RCommand(script="1 + 1", script_file="testscript", rfile=False) +def test_cmdline(tmp_path): + ri = r.RCommand(script="1 + 1", script_file=str(tmp_path / "testscript"), rfile=False) assert ri.cmdline == r_cmd + ( ' -e "1 + 1"' ) assert ri.inputs.script == "1 + 1" - assert ri.inputs.script_file == "testscript" + assert ri.inputs.script_file == str(tmp_path / "testscript") assert not os.path.exists(ri.inputs.script_file), "scriptfile should not exist" assert not os.path.exists( default_script_file From 3d64c265b8ea580fc2d11cf056585264c9b9f4b0 Mon Sep 17 00:00:00 2001 From: Tim Robert-Fitzgerald Date: Mon, 31 May 2021 19:38:59 -0400 Subject: [PATCH 1007/1665] Update nipype/interfaces/tests/test_r.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/tests/test_r.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_r.py b/nipype/interfaces/tests/test_r.py index ae8cb49402..ae5ea21bb2 100644 --- a/nipype/interfaces/tests/test_r.py +++ b/nipype/interfaces/tests/test_r.py @@ -38,7 +38,7 @@ def test_cmdline(tmp_path): def test_r_init(): default_script_file = clean_workspace_and_get_default_script_file() - assert r.RCommand._cmd == "R" + assert r.RCommand._cmd == r.get_r_command() assert r.RCommand.input_spec == r.RInputSpec assert r.RCommand().cmd == r_cmd From c4cb5396efc7feb99b08763ebc5080b4d3fb8a10 Mon Sep 17 00:00:00 2001 From: Tim Robert-Fitzgerald Date: Mon, 31 May 2021 19:39:22 -0400 Subject: [PATCH 1008/1665] Update nipype/interfaces/tests/test_r.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/tests/test_r.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nipype/interfaces/tests/test_r.py b/nipype/interfaces/tests/test_r.py index ae5ea21bb2..9c7c370f28 100644 --- a/nipype/interfaces/tests/test_r.py +++ b/nipype/interfaces/tests/test_r.py @@ -34,6 +34,7 @@ def test_cmdline(tmp_path): default_script_file ), "default scriptfile should not exist." + @pytest.mark.skipif(no_r, reason="R is not available") def test_r_init(): default_script_file = clean_workspace_and_get_default_script_file() From 20629a09e620c6af801efd2db7faaeee9ed2cfe4 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 31 May 2021 21:38:22 -0400 Subject: [PATCH 1009/1665] RF: Move format_utils to base --- nipype/interfaces/cat12/{format_utils.py => base.py} | 0 nipype/interfaces/cat12/preprocess.py | 2 +- nipype/interfaces/cat12/surface.py | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename nipype/interfaces/cat12/{format_utils.py => base.py} (100%) diff --git a/nipype/interfaces/cat12/format_utils.py b/nipype/interfaces/cat12/base.py similarity index 100% rename from nipype/interfaces/cat12/format_utils.py rename to nipype/interfaces/cat12/base.py diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 61b5785f48..9c02701542 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -9,7 +9,7 @@ File, Str, ) -from nipype.interfaces.cat12.format_utils import Cell +from nipype.interfaces.cat12.base import Cell from nipype.interfaces.spm import SPMCommand from nipype.interfaces.spm.base import ( diff --git a/nipype/interfaces/cat12/surface.py b/nipype/interfaces/cat12/surface.py index 810b2da7e7..83307ebee6 100644 --- a/nipype/interfaces/cat12/surface.py +++ b/nipype/interfaces/cat12/surface.py @@ -2,7 +2,7 @@ from pathlib import Path from nipype.interfaces.base import File, InputMultiPath, TraitedSpec, traits, isdefined -from nipype.interfaces.cat12.format_utils import NestedCell, Cell +from nipype.interfaces.cat12.base import NestedCell, Cell from nipype.interfaces.spm import SPMCommand from nipype.interfaces.spm.base import SPMCommandInputSpec from nipype.utils.filemanip import split_filename From fc4f82daa7e092cf5faad0bd1597c1c2e2591dfc Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 31 May 2021 21:38:40 -0400 Subject: [PATCH 1010/1665] ENH: Add cat12 interfaces to __init__ --- nipype/interfaces/cat12/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nipype/interfaces/cat12/__init__.py b/nipype/interfaces/cat12/__init__.py index e69de29bb2..99498dc922 100644 --- a/nipype/interfaces/cat12/__init__.py +++ b/nipype/interfaces/cat12/__init__.py @@ -0,0 +1,5 @@ +from .preprocess import CAT12Segment +from .surface import ( + ExtractAdditionalSurfaceParameters, + ExtractROIBasedSurfaceMeasures, +) From 16f62fc0d782655c85335d205a3fcde9fb8871c1 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 31 May 2021 21:41:18 -0400 Subject: [PATCH 1011/1665] DOC: Minor cleanups --- nipype/interfaces/cat12/preprocess.py | 1 + nipype/interfaces/cat12/surface.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 9c02701542..5a73f42443 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -475,6 +475,7 @@ class CAT12SegmentOutputSpec(TraitedSpec): class CAT12Segment(SPMCommand): """ CAT12: Segmentation + This toolbox is an extension to the default segmentation in SPM12, but uses a completely different segmentation approach. The segmentation approach is based on an Adaptive Maximum A Posterior (MAP) technique without the need for a priori diff --git a/nipype/interfaces/cat12/surface.py b/nipype/interfaces/cat12/surface.py index 83307ebee6..41f8a5f680 100644 --- a/nipype/interfaces/cat12/surface.py +++ b/nipype/interfaces/cat12/surface.py @@ -217,7 +217,7 @@ class ExtractROIBasedSurfaceMeasuresOutputSpec(TraitedSpec): class ExtractROIBasedSurfaceMeasures(SPMCommand): """ Extract ROI-based surface values - While ROI-based values for VBM (volume) data are automatically saved in the label folder as XML file it is + While ROI-based values for VBM (volume) data are automatically saved in the ``label`` folder as XML file it is necessary to additionally extract these values for surface data (except for thickness which is automatically extracted during segmentation). This has to be done after preprocessing the data and creating cortical surfaces. From 9cad3cdddc66f8d0d2f8dc6c5c8a22cb3e071c0a Mon Sep 17 00:00:00 2001 From: Terf Date: Tue, 1 Jun 2021 10:52:10 -0400 Subject: [PATCH 1012/1665] @effigies R interface suggestions --- nipype/interfaces/r.py | 25 ++++---------- nipype/interfaces/tests/test_r.py | 57 ++++++------------------------- 2 files changed, 17 insertions(+), 65 deletions(-) diff --git a/nipype/interfaces/r.py b/nipype/interfaces/r.py index d9507b2f94..89b799b0ce 100644 --- a/nipype/interfaces/r.py +++ b/nipype/interfaces/r.py @@ -51,8 +51,6 @@ class RCommand(CommandLine): """ _cmd = get_r_command() - _default_r_cmd = None - _default_rfile = None input_spec = RInputSpec def __init__(self, r_cmd=None, **inputs): @@ -62,37 +60,26 @@ def __init__(self, r_cmd=None, **inputs): super(RCommand, self).__init__(**inputs) if r_cmd and isdefined(r_cmd): self._cmd = r_cmd - elif self._default_r_cmd: - self._cmd = self._default_r_cmd - - if self._default_rfile and not isdefined(self.inputs.rfile): - self.inputs.rfile = self._default_rfile # For r commands force all output to be returned since r # does not have a clean way of notifying an error self.terminal_output = "allatonce" - @classmethod - def set_default_r_cmd(cls, r_cmd): + def set_default_r_cmd(self, r_cmd): """Set the default R command line for R classes. This method is used to set values for all R - subclasses. However, setting this will not update the output - type for any existing instances. For these, assign the - .inputs.r_cmd. + subclasses. """ - cls._default_r_cmd = r_cmd + self._cmd = r_cmd - @classmethod - def set_default_rfile(cls, rfile): + def set_default_rfile(self, rfile): """Set the default R script file format for R classes. This method is used to set values for all R - subclasses. However, setting this will not update the output - type for any existing instances. For these, assign the - .inputs.rfile. + subclasses. """ - cls._default_rfile = rfile + self._rfile = rfile def _run_interface(self, runtime): self.terminal_output = "allatonce" diff --git a/nipype/interfaces/tests/test_r.py b/nipype/interfaces/tests/test_r.py index 9c7c370f28..fdf7528d40 100644 --- a/nipype/interfaces/tests/test_r.py +++ b/nipype/interfaces/tests/test_r.py @@ -8,48 +8,28 @@ no_r = r.no_r - -def clean_workspace_and_get_default_script_file(): - # Make sure things are clean. - default_script_file = r.RInputSpec().script_file - if os.path.exists(default_script_file): - os.remove( - default_script_file - ) # raise Exception('Default script file needed for tests; please remove %s!' % default_script_file) - return default_script_file - - @pytest.mark.skipif(no_r, reason="R is not available") def test_cmdline(tmp_path): - ri = r.RCommand(script="1 + 1", script_file=str(tmp_path / "testscript"), rfile=False) + default_script_file = str(tmp_path / "testscript") + ri = r.RCommand(script="1 + 1", script_file=default_script_file, rfile=False) + r_cmd = r.get_r_command() assert ri.cmdline == r_cmd + ( ' -e "1 + 1"' ) assert ri.inputs.script == "1 + 1" - assert ri.inputs.script_file == str(tmp_path / "testscript") + assert ri.inputs.script_file == default_script_file assert not os.path.exists(ri.inputs.script_file), "scriptfile should not exist" assert not os.path.exists( default_script_file ), "default scriptfile should not exist." -@pytest.mark.skipif(no_r, reason="R is not available") -def test_r_init(): - default_script_file = clean_workspace_and_get_default_script_file() - - assert r.RCommand._cmd == r.get_r_command() - assert r.RCommand.input_spec == r.RInputSpec - - assert r.RCommand().cmd == r_cmd - rc = r.RCommand(r_cmd="foo_m") - assert rc.cmd == "foo_m" - - @pytest.mark.skipif(no_r, reason="R is not available") def test_run_interface(tmpdir): - default_script_file = clean_workspace_and_get_default_script_file() + os.chdir(tmpdir) + default_script_file = r.RInputSpec().script_file rc = r.RCommand(r_cmd="foo_m") assert not os.path.exists(default_script_file), "scriptfile should not exist 1." @@ -67,31 +47,16 @@ def test_run_interface(tmpdir): if os.path.exists(default_script_file): # cleanup os.remove(default_script_file) - cwd = tmpdir.chdir() - - # bypasses ubuntu dash issue - rc = r.RCommand(script="foo;", rfile=True) - assert not os.path.exists(default_script_file), "scriptfile should not exist 4." - with pytest.raises(RuntimeError): - rc.run() - assert os.path.exists(default_script_file), "scriptfile should exist 4." - if os.path.exists(default_script_file): # cleanup - os.remove(default_script_file) - - # bypasses ubuntu dash issue - res = r.RCommand(script="a=1;", rfile=True).run() - assert res.runtime.returncode == 0 - assert os.path.exists(default_script_file), "scriptfile should exist 5." - cwd.chdir() @pytest.mark.skipif(no_r, reason="R is not available") -def test_set_rcmd(): - default_script_file = clean_workspace_and_get_default_script_file() +def test_set_rcmd(tmpdir): + os.chdir(tmpdir) + default_script_file = r.RInputSpec().script_file ri = r.RCommand() - _default_r_cmd = ri._default_r_cmd + _default_r_cmd = ri._cmd ri.set_default_r_cmd("foo") assert not os.path.exists(default_script_file), "scriptfile should not exist." - assert ri._default_r_cmd == "foo" + assert ri._cmd == "foo" ri.set_default_r_cmd(_default_r_cmd) From 6aab778ead6f0cd4be34956a1dca7b93ba621531 Mon Sep 17 00:00:00 2001 From: Tim Robert-Fitzgerald Date: Tue, 1 Jun 2021 12:17:34 -0400 Subject: [PATCH 1013/1665] Update nipype/interfaces/tests/test_r.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/tests/test_r.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_r.py b/nipype/interfaces/tests/test_r.py index fdf7528d40..9f7477777e 100644 --- a/nipype/interfaces/tests/test_r.py +++ b/nipype/interfaces/tests/test_r.py @@ -51,7 +51,7 @@ def test_run_interface(tmpdir): @pytest.mark.skipif(no_r, reason="R is not available") def test_set_rcmd(tmpdir): - os.chdir(tmpdir) + cwd = tmpdir.chdir() default_script_file = r.RInputSpec().script_file ri = r.RCommand() @@ -60,3 +60,4 @@ def test_set_rcmd(tmpdir): assert not os.path.exists(default_script_file), "scriptfile should not exist." assert ri._cmd == "foo" ri.set_default_r_cmd(_default_r_cmd) + cwd.chdir() From 7f3713b9e702dc194fdcfae2151f4189cafd3e89 Mon Sep 17 00:00:00 2001 From: Terf Date: Tue, 1 Jun 2021 12:28:14 -0400 Subject: [PATCH 1014/1665] ran `make specs` --- nipype/interfaces/tests/test_auto_RCommand.py | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 nipype/interfaces/tests/test_auto_RCommand.py diff --git a/nipype/interfaces/tests/test_auto_RCommand.py b/nipype/interfaces/tests/test_auto_RCommand.py new file mode 100644 index 0000000000..adfcf36cf0 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_RCommand.py @@ -0,0 +1,31 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..r import RCommand + + +def test_RCommand_inputs(): + input_map = dict( + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + rfile=dict( + usedefault=True, + ), + script=dict( + argstr='-e "%s"', + mandatory=True, + position=-1, + ), + script_file=dict( + extensions=None, + usedefault=True, + ), + ) + inputs = RCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value From 7f92f638d5813c5267c44e6f8a39f9b45051c0bb Mon Sep 17 00:00:00 2001 From: Terf Date: Tue, 1 Jun 2021 12:53:53 -0400 Subject: [PATCH 1015/1665] un-chdir R test --- nipype/interfaces/tests/test_r.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_r.py b/nipype/interfaces/tests/test_r.py index 9f7477777e..f15eadac55 100644 --- a/nipype/interfaces/tests/test_r.py +++ b/nipype/interfaces/tests/test_r.py @@ -28,7 +28,7 @@ def test_cmdline(tmp_path): @pytest.mark.skipif(no_r, reason="R is not available") def test_run_interface(tmpdir): - os.chdir(tmpdir) + cwd = tmpdir.chdir() default_script_file = r.RInputSpec().script_file rc = r.RCommand(r_cmd="foo_m") @@ -46,6 +46,7 @@ def test_run_interface(tmpdir): assert os.path.exists(default_script_file), "scriptfile should exist 3." if os.path.exists(default_script_file): # cleanup os.remove(default_script_file) + cwd.chdir() From 52e1a983826c1fd040620b5546e798ddaf4a81dc Mon Sep 17 00:00:00 2001 From: Terf Date: Tue, 1 Jun 2021 14:35:43 -0400 Subject: [PATCH 1016/1665] ran `black .` --- nipype/interfaces/r.py | 26 ++++++++++++++------------ nipype/interfaces/tests/test_r.py | 6 ++---- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/nipype/interfaces/r.py b/nipype/interfaces/r.py index 89b799b0ce..f9719ce777 100644 --- a/nipype/interfaces/r.py +++ b/nipype/interfaces/r.py @@ -21,7 +21,7 @@ def get_r_command(): if "NIPYPE_NO_R" in os.environ: return None r_cmd = os.getenv("RCMD", default="R") - + return r_cmd if which(r_cmd) else None @@ -29,7 +29,7 @@ def get_r_command(): class RInputSpec(CommandLineInputSpec): - """ Basic expected inputs to R interface """ + """Basic expected inputs to R interface""" script = traits.Str( argstr='-e "%s"', desc="R code to run", mandatory=True, position=-1 @@ -39,7 +39,7 @@ class RInputSpec(CommandLineInputSpec): script_file = File( "pyscript.R", usedefault=True, desc="Name of file to write R code to" ) - + class RCommand(CommandLine): """Interface that runs R code @@ -69,7 +69,7 @@ def set_default_r_cmd(self, r_cmd): """Set the default R command line for R classes. This method is used to set values for all R - subclasses. + subclasses. """ self._cmd = r_cmd @@ -95,17 +95,19 @@ def _format_arg(self, name, trait_spec, value): return super(RCommand, self)._format_arg(name, trait_spec, value) def _gen_r_command(self, argstr, script_lines): - """ Generates commands and, if rfile specified, writes it to disk.""" + """Generates commands and, if rfile specified, writes it to disk.""" if not self.inputs.rfile: # replace newlines with ;, strip comments - script = "; ".join([ - line - for line in script_lines.split("\n") - if not line.strip().startswith("#") - ]) + script = "; ".join( + [ + line + for line in script_lines.split("\n") + if not line.strip().startswith("#") + ] + ) # escape " and $ - script = script.replace('"','\\"') - script = script.replace('$','\\$') + script = script.replace('"', '\\"') + script = script.replace("$", "\\$") else: script_path = os.path.join(os.getcwd(), self.inputs.script_file) with open(script_path, "wt") as rfile: diff --git a/nipype/interfaces/tests/test_r.py b/nipype/interfaces/tests/test_r.py index f15eadac55..6550a32747 100644 --- a/nipype/interfaces/tests/test_r.py +++ b/nipype/interfaces/tests/test_r.py @@ -8,15 +8,14 @@ no_r = r.no_r + @pytest.mark.skipif(no_r, reason="R is not available") def test_cmdline(tmp_path): default_script_file = str(tmp_path / "testscript") ri = r.RCommand(script="1 + 1", script_file=default_script_file, rfile=False) r_cmd = r.get_r_command() - assert ri.cmdline == r_cmd + ( - ' -e "1 + 1"' - ) + assert ri.cmdline == r_cmd + (' -e "1 + 1"') assert ri.inputs.script == "1 + 1" assert ri.inputs.script_file == default_script_file @@ -49,7 +48,6 @@ def test_run_interface(tmpdir): cwd.chdir() - @pytest.mark.skipif(no_r, reason="R is not available") def test_set_rcmd(tmpdir): cwd = tmpdir.chdir() From e576c9057bdbb7403fe7672225af784a06f28d2c Mon Sep 17 00:00:00 2001 From: Terf Date: Tue, 1 Jun 2021 15:22:09 -0400 Subject: [PATCH 1017/1665] skip in CI as R isn't installed --- nipype/interfaces/r.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/r.py b/nipype/interfaces/r.py index f9719ce777..a586de183c 100644 --- a/nipype/interfaces/r.py +++ b/nipype/interfaces/r.py @@ -45,8 +45,8 @@ class RCommand(CommandLine): """Interface that runs R code >>> import nipype.interfaces.r as r - >>> r = r.RCommand(rfile=False) # don't write script file - >>> r.inputs.script = "Sys.getenv('USER')" + >>> r = r.RCommand(rfile=False) # doctest: +SKIP + >>> r.inputs.script = "Sys.getenv('USER')" # doctest: +SKIP >>> out = r.run() # doctest: +SKIP """ From 97f21276902f1c99312c0e88ce143c40626bff08 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 2 Jun 2021 08:59:56 -0400 Subject: [PATCH 1018/1665] MNT: Remove deprecated np.float usage (#3341) --- nipype/algorithms/modelgen.py | 2 +- nipype/algorithms/rapidart.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index b6ceba68a2..afd6841c59 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -383,7 +383,7 @@ def _generate_standard_design( for i, info in enumerate(infolist): sessinfo.insert(i, dict(cond=[])) if isdefined(self.inputs.high_pass_filter_cutoff): - sessinfo[i]["hpf"] = np.float(self.inputs.high_pass_filter_cutoff) + sessinfo[i]["hpf"] = float(self.inputs.high_pass_filter_cutoff) if hasattr(info, "conditions") and info.conditions is not None: for cid, cond in enumerate(info.conditions): diff --git a/nipype/algorithms/rapidart.py b/nipype/algorithms/rapidart.py index 15e691856a..0a819c466b 100644 --- a/nipype/algorithms/rapidart.py +++ b/nipype/algorithms/rapidart.py @@ -583,7 +583,7 @@ def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): tidx = find_indices(normval > self.inputs.norm_threshold) ridx = find_indices(normval < 0) if displacement is not None: - dmap = np.zeros((x, y, z, timepoints), dtype=np.float) + dmap = np.zeros((x, y, z, timepoints), dtype=np.float64) for i in range(timepoints): dmap[ voxel_coords[0], voxel_coords[1], voxel_coords[2], i From fd8cc40efbbad401221071b7b3a1401ac3d75c8d Mon Sep 17 00:00:00 2001 From: Tom Close Date: Fri, 11 Jun 2021 20:10:12 +1000 Subject: [PATCH 1019/1665] Update nipype/interfaces/mrtrix3/utils.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/mrtrix3/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index f605e3c5e4..9609c937e7 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -812,7 +812,7 @@ class SHConv(CommandLine): >>> sh.inputs.in_file = 'csd.mif' >>> sh.inputs.response = 'response.txt' >>> sh.cmdline - 'shconv csd.mif response.txt sh.mif' + 'shconv csd.mif response.txt csd_shconv.mif' >>> sh.run() # doctest: +SKIP """ From 76ccb7732da48fe6614cc81f48b77a3453853125 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Fri, 11 Jun 2021 20:10:20 +1000 Subject: [PATCH 1020/1665] Update nipype/interfaces/mrtrix3/utils.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/mrtrix3/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index 9609c937e7..690a0904c1 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -877,7 +877,7 @@ class SH2Amp(CommandLine): >>> sh.inputs.in_file = 'sh.mif' >>> sh.inputs.directions = 'grads.txt' >>> sh.cmdline - 'sh2amp sh.mif grads.txt amp.mif' + 'sh2amp sh.mif grads.txt sh_amp.mif' >>> sh.run() # doctest: +SKIP """ From af8e01facff59ab3caf1e4f45b1bb7c0e2e1518f Mon Sep 17 00:00:00 2001 From: Tom Close Date: Fri, 11 Jun 2021 20:10:29 +1000 Subject: [PATCH 1021/1665] Update nipype/interfaces/mrtrix3/utils.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/mrtrix3/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index 690a0904c1..86208d1000 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -787,7 +787,6 @@ class SHConvInputSpec(CommandLineInputSpec): name_template="%s_shconv.mif", name_source=["in_file"], argstr="%s", - mandatory=True, position=-1, usedefault=True, desc="the output spherical harmonics", From f698f4b1fe4f5ea59b13e8d1908861e6ddda902f Mon Sep 17 00:00:00 2001 From: Tom Close Date: Fri, 11 Jun 2021 20:10:36 +1000 Subject: [PATCH 1022/1665] Update nipype/interfaces/mrtrix3/utils.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/mrtrix3/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index 86208d1000..f999db7001 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -848,7 +848,6 @@ class SH2AmpInputSpec(CommandLineInputSpec): name_template="%s_amp.mif", name_source=["in_file"], argstr="%s", - mandatory=True, position=-1, usedefault=True, desc="the output spherical harmonics", From e096c19a47d8cce521da886f01caadb4a236c188 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Tue, 15 Jun 2021 16:44:31 -0400 Subject: [PATCH 1023/1665] TEST: make specs --- nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py | 3 ++- nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py b/nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py index b2af3f51e6..ab75fc1f8a 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py @@ -29,7 +29,8 @@ def test_SH2Amp_inputs(): out_file=dict( argstr="%s", extensions=None, - mandatory=True, + name_source=["in_file"], + name_template="%s_amp.mif", position=-1, usedefault=True, ), diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py b/nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py index 96e696768a..992e6984a8 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py @@ -20,7 +20,8 @@ def test_SHConv_inputs(): out_file=dict( argstr="%s", extensions=None, - mandatory=True, + name_source=["in_file"], + name_template="%s_shconv.mif", position=-1, usedefault=True, ), From 6e3387dcd2447e78db4d8e6e703cfc146dcbed22 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 26 May 2021 10:27:31 -0400 Subject: [PATCH 1024/1665] MNT: 1.6.1 --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 7f6899cdd0..4f065939ed 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -5,7 +5,7 @@ # nipype version information # Remove -dev for release -__version__ = "1.6.0" +__version__ = "1.6.1" def get_nipype_gitversion(): From 05adb41676fda13db5e7327025316c12207ae9f8 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 26 May 2021 10:48:17 -0400 Subject: [PATCH 1025/1665] MNT: Update mailmap --- .mailmap | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.mailmap b/.mailmap index f603849d24..a233bff1e0 100644 --- a/.mailmap +++ b/.mailmap @@ -55,6 +55,7 @@ David Mordom David Welch Dimitri Papadopoulos Orfanos Dmytro Belevtsoff +Dorian Vogel Dylan M. Nielson Dylan M. Nielson Eduard Ort @@ -73,6 +74,7 @@ Gilles de Hollander Gio Piantoni Guillaume Flandin Hans Johnson +Henry Jones Horea Christian Hrvoje Stojic Isaac Schwabacher @@ -153,6 +155,8 @@ Ranjit Khanuja Rastko Ćirić Rastko Ćirić Rastko Ćirić +Raunak Jalan +Raunak Jalan <41023976+RaunakJalan@users.noreply.github.com> Ross Markello Russell Poldrack Russell Poldrack @@ -175,6 +179,9 @@ Steven Giavasis Steven Tilley Sulantha Mathotaarachchi +Tim Robert-Fitzgerald +Tom Close +Tom Close Tristan Glatard Victor Férat Victor Férat From 5ef61f87dc9477e41934d7394506e0adb4f8b275 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 26 May 2021 10:48:55 -0400 Subject: [PATCH 1026/1665] MNT: Update Zenodo ordering --- .zenodo.json | 75 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 30 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 6c8db536d2..83764c5417 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -230,6 +230,11 @@ "name": "Mancini, Matteo", "orcid": "0000-0001-7194-4568" }, + { + "affiliation": "University of Sydney", + "name": "Close, Thomas", + "orcid": "0000-0002-4160-2134" + }, { "affiliation": "National Institute of Mental Health", "name": "Nielson, Dylan M.", @@ -246,15 +251,30 @@ { "name": "Mordom, David" }, + { + "affiliation": "CIBIT, UC", + "name": "Machado, F\u00e1tima", + "orcid": "0000-0001-8878-1750" + }, { "affiliation": "ARAMIS LAB, Brain and Spine Institute (ICM), Paris, France.", "name": "Guillon, Je\u0301re\u0301my", "orcid": "0000-0002-2672-7510" }, + { + "affiliation": "Charite Universitatsmedizin Berlin, Germany", + "name": "Waller, Lea", + "orcid": "0000-0002-3239-6957" + }, { "affiliation": "Indiana University, IN, USA", "name": "Koudoro, Serge" }, + { + "affiliation": "Penn Statistics in Imaging and Visualization Endeavor, University of Pennsylvania", + "name": "Robert-Fitzgerald, Timothy", + "orcid": "0000-0001-8303-8001" + }, { "affiliation": "Donders Institute for Brain, Cognition and Behavior, Center for Cognitive Neuroimaging", "name": "Chetverikov, Andrey", @@ -334,6 +354,11 @@ { "name": "Schwartz, Yannick" }, + { + "affiliation": "The University of Iowa", + "name": "Ghayoor, Ali", + "orcid": "0000-0002-8858-1254" + }, { "affiliation": "NIMH IRP", "name": "Lee, John A.", @@ -403,13 +428,12 @@ "orcid": "0000-0003-2766-8425" }, { - "affiliation": "University of Iowa", - "name": "Welch, David" + "affiliation": "Sagol School of Neuroscience, Tel Aviv University", + "name": "Baratz, Zvi" }, { - "affiliation": "Charite Universitatsmedizin Berlin, Germany", - "name": "Waller, Lea", - "orcid": "0000-0002-3239-6957" + "affiliation": "University of Iowa", + "name": "Welch, David" }, { "affiliation": "Max Planck Institute for Human Cognitive and Brain Sciences", @@ -421,11 +445,6 @@ "name": "Triplett, William", "orcid": "0000-0002-9546-1306" }, - { - "affiliation": "The University of Iowa", - "name": "Ghayoor, Ali", - "orcid": "0000-0002-8858-1254" - }, { "affiliation": "Child Mind Institute", "name": "Craddock, R. Cameron", @@ -614,6 +633,9 @@ "name": "Lee, Nat", "orcid": "0000-0001-9308-9988" }, + { + "name": "Jalan, Raunak" + }, { "name": "Inati, Souheil" }, @@ -697,10 +719,6 @@ "name": "Andberg, Sami Kristian", "orcid": "0000-0002-5650-3964" }, - { - "affiliation": "Sagol School of Neuroscience, Tel Aviv University", - "name": "Baratz, Zvi" - }, { "name": "Matsubara, K" }, @@ -711,11 +729,6 @@ { "name": "Marina, Ana" }, - { - "affiliation": "University of Sydney", - "name": "Close, Thomas", - "orcid": "0000-0002-4160-2134" - }, { "name": "Davison, Andrew" }, @@ -741,9 +754,19 @@ { "name": "Shachnev, Dmitry" }, + { + "affiliation": "University of Applied Sciences and Arts Northwestern Switzerland", + "name": "Vogel, Dorian", + "orcid": "0000-0003-3445-576X" + }, { "name": "Flandin, Guillaume" }, + { + "affiliation": "Stanford University and the University of Chicago", + "name": "Jones, Henry", + "orcid": "0000-0001-7719-3646" + }, { "affiliation": "Athinoula A. Martinos Center for Biomedical Imaging, Department of Radiology, Massachusetts General Hospital, Charlestown, MA, USA", "name": "Gonzalez, Ivan", @@ -790,22 +813,19 @@ "name": "Broderick, William", "orcid": "0000-0002-8999-9003" }, - { - "name": "Tambini, Arielle" - }, { "affiliation": "Weill Cornell Medicine", "name": "Xie, Xihe", "orcid": "0000-0001-6595-2473" }, + { + "name": "Tambini, Arielle" + }, { "affiliation": "Max Planck Institute for Human Cognitive and Brain Sciences, Leipzig, Germany.", "name": "Mihai, Paul Glad", "orcid": "0000-0001-5715-6442" }, - { - "name": "Jalan, Raunak", - }, { "affiliation": "Department of Psychology, Stanford University", "name": "Gorgolewski, Krzysztof J.", @@ -815,11 +835,6 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" - }, - { - "affiliation": "CIBIT, UC", - "name": "Machado, Fátima", - "orcid": "0000-0001-8878-1750" } ], "keywords": [ From b333b13e8df7f81b2a25e986ae9047c64d07c18d Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 26 May 2021 10:49:00 -0400 Subject: [PATCH 1027/1665] DOC: 1.6.1 changelog --- doc/changelog/1.X.X-changelog.rst | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/doc/changelog/1.X.X-changelog.rst b/doc/changelog/1.X.X-changelog.rst index 822e22547f..6d8cd64652 100644 --- a/doc/changelog/1.X.X-changelog.rst +++ b/doc/changelog/1.X.X-changelog.rst @@ -1,3 +1,31 @@ +1.6.1 (June 16, 2021) +===================== + +Bug-fix release in the 1.6.x series. + +(`Full changelog `__) + + * FIX: Set DistributedPluginBase.refidx type correctly (https://github.com/nipy/nipype/pull/3340) + * FIX: change fsl interface randomise --f_only to --fonly for #3322 (https://github.com/nipy/nipype/pull/3325) + * FIX: BET raising "No image files match: ..." with very long file names (https://github.com/nipy/nipype/pull/3309) + * FIX: Update SmoothEstimateOutputSpec resels description (https://github.com/nipy/nipype/pull/3316) + * ENH: Adds interfaces for MRtrix utils shconv and sh2amp (https://github.com/nipy/nipype/pull/3280) + * ENH: Interface for R (https://github.com/nipy/nipype/pull/3291) + * ENH: Add CAT12 interfaces (https://github.com/nipy/nipype/pull/3310) + * ENH: AFNI 3dNetCorr as afni.NetCorr (https://github.com/nipy/nipype/pull/3263) + * ENH: Skip newline before Python call in batch submission to facilitate containerized runs (https://github.com/nipy/nipype/pull/3297) + * ENH: Add new dwifslpreproc interface for MRtrix3 (https://github.com/nipy/nipype/pull/3278) + * REF: Cache nodes in workflow to speed up construction, other optimizations (https://github.com/nipy/nipype/pull/3331) + * DOC: Fixed Developer Setup Link in install.rst (https://github.com/nipy/nipype/pull/3330) + * MNT: Blacklist Dipy 1.4.1 (https://github.com/nipy/nipype/pull/3335) + * MNT: Drop support for numpy < 1.15.3 (https://github.com/nipy/nipype/pull/3284) + * CI: Build docker images with Python 3.8 (https://github.com/nipy/nipype/pull/3287) + * CI: Drop Circle doc builds (https://github.com/nipy/nipype/pull/3338) + * CI: Drop Travis (https://github.com/nipy/nipype/pull/3332) + * CI: Build docker images with Python 3.8 (https://github.com/nipy/nipype/pull/3287) + * CI: Add specs and style checks (https://github.com/nipy/nipype/pull/3321) + * CI: Move from Travis to GitHub actions (https://github.com/nipy/nipype/pull/3318) + 1.6.0 (November 28, 2020) ========================= From 77f7659e07d63a48d8b292c1dc32ad0ac23b1699 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 26 May 2021 10:50:39 -0400 Subject: [PATCH 1028/1665] DOC: Update previous releases in interfaces.rst --- doc/interfaces.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/interfaces.rst b/doc/interfaces.rst index dc83450b5a..107eb6519b 100644 --- a/doc/interfaces.rst +++ b/doc/interfaces.rst @@ -8,7 +8,7 @@ Interfaces and Workflows :Release: |version| :Date: |today| -Previous versions: `1.5.1 `_ `1.5.0 `_ +Previous versions: `1.6.0 `_ `1.5.1 `_ Workflows --------- From ba8ac9c62f4a51c9cdd6e8a9418c6dafbc22be21 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 27 May 2021 10:06:06 -0400 Subject: [PATCH 1029/1665] Squashed commit of the following: commit 36b721c3af8e672b32f0b1deb577edd5300ae409 Author: Christopher J. Markiewicz Date: Thu May 27 09:59:04 2021 -0400 TMP: Use Twine directly for now, skip test PyPI commit 41d942b16405b991f67e93163dba9e8010b373b4 Author: Christopher J. Markiewicz Date: Thu May 27 09:08:02 2021 -0400 MNT: Add content type for long description (text/x-rst) commit 4a7d600ee50d88674dc9574c783910023ea18244 Author: Christopher J. Markiewicz Date: Thu May 27 08:48:31 2021 -0400 CI: Avoid updating feedstock on non-numeric rel branches commit 5e8af43e305f0b746fe142f292e107fd2938f439 Author: Christopher J. Markiewicz Date: Thu May 27 08:46:06 2021 -0400 CI: Move packaging from Circle to GitHub actions --- .circleci/config.yml | 72 +---------------------------------- .github/workflows/package.yml | 61 +++++++++++++++++++++++++++++ setup.py | 1 + 3 files changed, 63 insertions(+), 71 deletions(-) create mode 100644 .github/workflows/package.yml diff --git a/.circleci/config.yml b/.circleci/config.yml index a1effa019c..26369371b1 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -204,61 +204,6 @@ jobs: - /tmp/docker/cache/Dockerfile.base-pruned key: dockerfile-cache-v1-{{ .Branch }}-{{ checksum "/tmp/docker/cache/Dockerfile.base-pruned" }} - pypi_precheck: - machine: *machine_kwds - working_directory: /home/circleci/nipype - steps: - - checkout: - path: /home/circleci/nipype - - run: - name: Check pypi preconditions - command: | - pyenv local 3.6.5 - pip install --upgrade pip twine future wheel readme_renderer setuptools - python setup.py sdist bdist_wheel - twine check dist/* - - run: - name: Validate Python 3 installation - command: | - pyenv local 3.6.5 - pip install --upgrade pip - pip install dist/nipype-*-py3-none-any.whl - - run: - name: Validate Python 3.7 installation - command: | - pyenv local 3.7.0 - pip install --upgrade pip - # Pre-install a version of numpy that will not pass - pip install numpy==1.15.0 - pip install dist/nipype-*-py3-none-any.whl - # Numpy should be upgraded to >= 1.15.3 - test "$(pip show numpy | grep Version)" \> "Version: 1.15.2" - - run: - name: Check python_requires prevents installation on Python 3.3 - command: | - pyenv install 3.3.7 - pyenv local 3.3.7 - FAIL=false - pip install dist/nipype-*-py2.py3-none-any.whl || FAIL=true - $FAIL - - store_artifacts: - path: /home/circleci/nipype/dist - - deploy_pypi: - machine: *machine_kwds - working_directory: /home/circleci/nipype - steps: - - checkout: - path: /home/circleci/nipype - - run: - name: Deploy to PyPI - command: | - pyenv local 3.6.5 - pip install --upgrade twine wheel readme_renderer setuptools - python setup.py check -r -s - python setup.py sdist bdist_wheel - twine upload dist/* - update_feedstock: machine: *machine_kwds working_directory: /home/circleci/nipype @@ -295,12 +240,6 @@ workflows: version: 2 build_test_deploy: jobs: - - pypi_precheck: - filters: - branches: - only: /(rel|dev)\/.*/ - tags: - only: /.*/ - compare_base_dockerfiles: filters: branches: @@ -325,19 +264,10 @@ workflows: only: /.*/ requires: - test_pytest - - deploy_pypi: - filters: - branches: - ignore: /.*/ - tags: - only: /.*/ - requires: - - pypi_precheck - - test_pytest - update_feedstock: context: nipybot filters: branches: - only: /rel\/.*/ + only: /rel\/\d.*/ tags: only: /.*/ diff --git a/.github/workflows/package.yml b/.github/workflows/package.yml new file mode 100644 index 0000000000..fe1fbe50d2 --- /dev/null +++ b/.github/workflows/package.yml @@ -0,0 +1,61 @@ +name: Packaging + +on: + push: + branches: + - master + - maint/* + - rel/* + tags: + - '*' + +defaults: + run: + shell: bash + +jobs: + package: + # Build packages and upload + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + - os: ubuntu-latest + python-version: 3.8 + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Display Python version + run: python -c "import sys; print(sys.version)" + - name: Create virtual environment + run: tools/ci/create_venv.sh + - name: Build sdist + run: tools/ci/build_archive.sh + env: + INSTALL_TYPE: sdist + - name: Build wheel + run: tools/ci/build_archive.sh + env: + INSTALL_TYPE: wheel + ### Temporary + - name: Check packages with twine + run: | + pip install twine + twine check dist/* + ### Switch back to this if we figure out who has permissions on test.pypi.org + # - name: Test PyPI upload + # uses: pypa/gh-action-pypi-publish@master + # with: + # user: __token__ + # password: ${{ secrets.TEST_PYPI_API_TOKEN }} + # repository_url: https://test.pypi.org/legacy/ + # skip_existing: true + - name: Upload to PyPI (on tags) + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') + uses: pypa/gh-action-pypi-publish@master + with: + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/setup.py b/setup.py index 1a666c6115..046124fde8 100755 --- a/setup.py +++ b/setup.py @@ -119,6 +119,7 @@ def main(): maintainer_email=ldict["MAINTAINER_EMAIL"], description=ldict["DESCRIPTION"], long_description=ldict["LONG_DESCRIPTION"], + long_description_content_type="text/x-rst", url=ldict["URL"], download_url=ldict["DOWNLOAD_URL"], license=ldict["LICENSE"], From 9745ebdd96ed3b6c3750aa1a5fbd3aae96aa67b8 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 30 May 2021 09:07:54 -0400 Subject: [PATCH 1030/1665] MNT: Clean up deprecations --- nipype/interfaces/base/traits_extension.py | 2 +- nipype/utils/misc.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py index b513a17eee..9ac4aa1839 100644 --- a/nipype/interfaces/base/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -19,7 +19,7 @@ (usually by Robert Kern). """ -from collections import Sequence +from collections.abc import Sequence # perform all external trait imports here from traits import __version__ as traits_version diff --git a/nipype/utils/misc.py b/nipype/utils/misc.py index 3531610800..8ec6ee5342 100644 --- a/nipype/utils/misc.py +++ b/nipype/utils/misc.py @@ -6,7 +6,7 @@ import os import sys import re -from collections import Iterator +from collections.abc import Iterator from warnings import warn from distutils.version import LooseVersion @@ -34,7 +34,7 @@ def atoi(text): def natural_keys(text): if isinstance(text, tuple): text = text[0] - return [atoi(c) for c in re.split("(\d+)", text)] + return [atoi(c) for c in re.split(r"(\d+)", text)] return sorted(l, key=natural_keys) From da278a6a0546b4ec9f84abebad4f0234e70fca87 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 30 May 2021 09:31:11 -0400 Subject: [PATCH 1031/1665] MNT: np.bool -> bool --- nipype/algorithms/confounds.py | 6 +++--- nipype/algorithms/metrics.py | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 1aa88d6e62..18c268b461 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -613,7 +613,7 @@ def _run_interface(self, runtime): if len(mask_images) == 0: img = nb.Nifti1Image( - np.ones(imgseries.shape[:3], dtype=np.bool), + np.ones(imgseries.shape[:3], dtype=bool), affine=imgseries.affine, header=imgseries.header, ) @@ -835,7 +835,7 @@ def _process_masks(self, mask_images, timeseries=None): self._mask_files = [] timeseries = np.asanyarray(timeseries) for i, img in enumerate(mask_images): - mask = np.asanyarray(img.dataobj).astype(np.bool) + mask = np.asanyarray(img.dataobj).astype(bool) imgseries = timeseries[mask, :] imgseries = regress_poly(2, imgseries)[0] tSTD = _compute_tSTD(imgseries, 0, axis=-1) @@ -1379,7 +1379,7 @@ def compute_noise_components( md_retained = [] for name, img in zip(mask_names, mask_images): - mask = np.asanyarray(nb.squeeze_image(img).dataobj).astype(np.bool) + mask = np.asanyarray(nb.squeeze_image(img).dataobj).astype(bool) if imgseries.shape[:3] != mask.shape: raise ValueError( "Inputs for CompCor, timeseries and mask, do not have " diff --git a/nipype/algorithms/metrics.py b/nipype/algorithms/metrics.py index e399becb65..b314197f77 100644 --- a/nipype/algorithms/metrics.py +++ b/nipype/algorithms/metrics.py @@ -88,10 +88,10 @@ def _get_coordinates(self, data, affine): def _eucl_min(self, nii1, nii2): from scipy.spatial.distance import cdist, euclidean - origdata1 = np.asanyarray(nii1.dataobj).astype(np.bool) + origdata1 = np.asanyarray(nii1.dataobj).astype(bool) border1 = self._find_border(origdata1) - origdata2 = np.asanyarray(nii2.dataobj).astype(np.bool) + origdata2 = np.asanyarray(nii2.dataobj).astype(bool) border2 = self._find_border(origdata2) set1_coordinates = self._get_coordinates(border1, nii1.affine) @@ -134,10 +134,10 @@ def _eucl_cog(self, nii1, nii2): def _eucl_mean(self, nii1, nii2, weighted=False): from scipy.spatial.distance import cdist - origdata1 = np.asanyarray(nii1.dataobj).astype(np.bool) + origdata1 = np.asanyarray(nii1.dataobj).astype(bool) border1 = self._find_border(origdata1) - origdata2 = np.asanyarray(nii2.dataobj).astype(np.bool) + origdata2 = np.asanyarray(nii2.dataobj).astype(bool) set1_coordinates = self._get_coordinates(border1, nii1.affine) set2_coordinates = self._get_coordinates(origdata2, nii2.affine) From 9fd978536697b25123b9be153c103f0f9367f748 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sun, 30 May 2021 09:34:08 -0400 Subject: [PATCH 1032/1665] MNT: Drop invalid "is not" usage --- nipype/interfaces/afni/base.py | 2 +- nipype/interfaces/diffusion_toolkit/base.py | 2 +- nipype/utils/tests/test_filemanip.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/afni/base.py b/nipype/interfaces/afni/base.py index 20a4a9b4d6..31a9f7585d 100644 --- a/nipype/interfaces/afni/base.py +++ b/nipype/interfaces/afni/base.py @@ -98,7 +98,7 @@ def standard_image(img_name): resource_monitor=False, terminal_output="allatonce", ).run() - if clout.runtime.returncode is not 0: + if clout.runtime.returncode != 0: return None out = clout.runtime.stdout diff --git a/nipype/interfaces/diffusion_toolkit/base.py b/nipype/interfaces/diffusion_toolkit/base.py index b4b5ba1893..2068f18988 100644 --- a/nipype/interfaces/diffusion_toolkit/base.py +++ b/nipype/interfaces/diffusion_toolkit/base.py @@ -47,7 +47,7 @@ def version(): """ clout = CommandLine(command="dti_recon", terminal_output="allatonce").run() - if clout.runtime.returncode is not 0: + if clout.runtime.returncode != 0: return None dtirecon = clout.runtime.stdout diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index e8da256261..299029a8d2 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -154,7 +154,7 @@ def test_copyfiles(_temp_analyze_files, _temp_analyze_files_prime): def test_linkchain(_temp_analyze_files): - if os.name is not "posix": + if os.name != "posix": return orig_img, orig_hdr = _temp_analyze_files pth, fname = os.path.split(orig_img) @@ -230,7 +230,7 @@ def test_recopy(_temp_analyze_files): def test_copyfallback(_temp_analyze_files): - if os.name is not "posix": + if os.name != "posix": return orig_img, orig_hdr = _temp_analyze_files pth, imgname = os.path.split(orig_img) From 7bf9de392f4eff9dccc83910a3a3f51a6b88dc3c Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Tue, 22 Jun 2021 09:02:53 +0200 Subject: [PATCH 1033/1665] STY: Make private member name consistent with the rest of them --- nipype/algorithms/confounds.py | 6 +++--- nipype/interfaces/afni/base.py | 2 +- nipype/interfaces/afni/utils.py | 4 ++-- nipype/interfaces/ants/segmentation.py | 2 +- nipype/interfaces/base/core.py | 4 ++-- nipype/interfaces/base/support.py | 2 +- nipype/interfaces/fsl/base.py | 2 +- nipype/interfaces/fsl/model.py | 2 +- nipype/interfaces/petpvc.py | 2 +- nipype/interfaces/quickshear.py | 2 +- nipype/interfaces/spm/base.py | 2 +- 11 files changed, 15 insertions(+), 15 deletions(-) diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 18c268b461..476ade8dbe 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -104,7 +104,7 @@ class ComputeDVARS(BaseInterface): input_spec = ComputeDVARSInputSpec output_spec = ComputeDVARSOutputSpec - references_ = [ + _references = [ { "entry": BibTeX( """\ @@ -311,7 +311,7 @@ class FramewiseDisplacement(BaseInterface): input_spec = FramewiseDisplacementInputSpec output_spec = FramewiseDisplacementOutputSpec - references_ = [ + _references = [ { "entry": BibTeX( """\ @@ -556,7 +556,7 @@ class CompCor(SimpleInterface): input_spec = CompCorInputSpec output_spec = CompCorOutputSpec - references_ = [ + _references = [ { "tags": ["method", "implementation"], "entry": BibTeX( diff --git a/nipype/interfaces/afni/base.py b/nipype/interfaces/afni/base.py index 31a9f7585d..3f338eb0ce 100644 --- a/nipype/interfaces/afni/base.py +++ b/nipype/interfaces/afni/base.py @@ -151,7 +151,7 @@ class AFNICommand(AFNICommandBase): input_spec = AFNICommandInputSpec _outputtype = None - references_ = [ + _references = [ { "entry": BibTeX( "@article{Cox1996," diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py index e4b6f37778..9c44a40fd8 100644 --- a/nipype/interfaces/afni/utils.py +++ b/nipype/interfaces/afni/utils.py @@ -1032,7 +1032,7 @@ class Edge3(AFNICommand): _cmd = "3dedge3" input_spec = Edge3InputSpec output_spec = AFNICommandOutputSpec - references_ = [ + _references = [ { "entry": BibTeX( """\ @@ -1341,7 +1341,7 @@ class FWHMx(AFNICommandBase): input_spec = FWHMxInputSpec output_spec = FWHMxOutputSpec - references_ = [ + _references = [ { "entry": BibTeX( "@article{CoxReynoldsTaylor2016," diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index 555cdd1777..33c92fc005 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -1734,7 +1734,7 @@ class KellyKapowski(ANTSCommand): input_spec = KellyKapowskiInputSpec output_spec = KellyKapowskiOutputSpec - references_ = [ + _references = [ { "entry": BibTeX( """\ diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 57e889da9b..605021384a 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -161,7 +161,7 @@ class BaseInterface(Interface): _version = None _additional_metadata = [] _redirect_x = False - references_ = [] + _references = [] resource_monitor = True # Enabled for this interface IFF enabled in the config _etelemetry_version_data = None @@ -343,7 +343,7 @@ def _run_interface(self, runtime): def _duecredit_cite(self): """Add the interface references to the duecredit citations""" - for r in self.references_: + for r in self._references: r["path"] = self.__module__ due.cite(**r) diff --git a/nipype/interfaces/base/support.py b/nipype/interfaces/base/support.py index 88359354fd..df357481fb 100644 --- a/nipype/interfaces/base/support.py +++ b/nipype/interfaces/base/support.py @@ -322,7 +322,7 @@ def _outputs_help(cls): def _refs_help(cls): """Prints interface references.""" - references = getattr(cls, "references_", None) + references = getattr(cls, "_references", None) if not references: return [] diff --git a/nipype/interfaces/fsl/base.py b/nipype/interfaces/fsl/base.py index 34127cadef..351be33ade 100644 --- a/nipype/interfaces/fsl/base.py +++ b/nipype/interfaces/fsl/base.py @@ -151,7 +151,7 @@ class FSLCommand(CommandLine): input_spec = FSLCommandInputSpec _output_type = None - references_ = [ + _references = [ { "entry": BibTeX( "@article{JenkinsonBeckmannBehrensWoolrichSmith2012," diff --git a/nipype/interfaces/fsl/model.py b/nipype/interfaces/fsl/model.py index 15fb36f6c0..059c597ce6 100644 --- a/nipype/interfaces/fsl/model.py +++ b/nipype/interfaces/fsl/model.py @@ -1100,7 +1100,7 @@ class FLAMEO(FSLCommand): input_spec = FLAMEOInputSpec output_spec = FLAMEOOutputSpec - references_ = [ + _references = [ { "entry": BibTeX( "@article{BeckmannJenkinsonSmith2003," diff --git a/nipype/interfaces/petpvc.py b/nipype/interfaces/petpvc.py index 6d0b8f7e04..f315e9fc7c 100644 --- a/nipype/interfaces/petpvc.py +++ b/nipype/interfaces/petpvc.py @@ -155,7 +155,7 @@ class PETPVC(CommandLine): output_spec = PETPVCOutputSpec _cmd = "petpvc" - references_ = [ + _references = [ { "entry": BibTeX( "@article{0031-9155-61-22-7975," diff --git a/nipype/interfaces/quickshear.py b/nipype/interfaces/quickshear.py index b7409fdbf3..feb9ee22f4 100644 --- a/nipype/interfaces/quickshear.py +++ b/nipype/interfaces/quickshear.py @@ -75,7 +75,7 @@ class Quickshear(CommandLine): input_spec = QuickshearInputSpec output_spec = QuickshearOutputSpec - references_ = [ + _references = [ { "entry": BibTeX( "@inproceedings{Schimke2011," diff --git a/nipype/interfaces/spm/base.py b/nipype/interfaces/spm/base.py index 147d63894c..2347d718ae 100644 --- a/nipype/interfaces/spm/base.py +++ b/nipype/interfaces/spm/base.py @@ -290,7 +290,7 @@ class SPMCommand(BaseInterface): _paths = None _use_mcr = None - references_ = [ + _references = [ { "entry": BibTeX( "@book{FrackowiakFristonFrithDolanMazziotta1997," From c5093fb53d75dab22255973ec0a52834f1d3e203 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Tue, 22 Jun 2021 08:54:29 +0200 Subject: [PATCH 1034/1665] ENH: Clean-up the BaseInterface ``run()`` function using context Python contexts seem the most appropriate pattern to follow. --- nipype/interfaces/base/core.py | 172 ++++++++---------------------- nipype/interfaces/base/support.py | 98 ++++++++++++++++- nipype/utils/profiler.py | 40 ++++++- 3 files changed, 179 insertions(+), 131 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 57e889da9b..f23e11e7bd 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -1,31 +1,24 @@ -# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Nipype interfaces core ...................... - Defines the ``Interface`` API and the body of the most basic interfaces. The I/O specifications corresponding to these base interfaces are found in the ``specs`` module. """ -from copy import deepcopy -from datetime import datetime as dt import os -import platform import subprocess as sp import shlex -import sys import simplejson as json -from dateutil.parser import parse as parseutc from traits.trait_errors import TraitError from ... import config, logging, LooseVersion from ...utils.provenance import write_provenance -from ...utils.misc import str2bool, rgetcwd +from ...utils.misc import str2bool from ...utils.filemanip import split_filename, which, get_dependencies, canonicalize_env from ...utils.subprocess import run_command @@ -39,7 +32,12 @@ MpiCommandLineInputSpec, get_filecopy_info, ) -from .support import Bunch, InterfaceResult, NipypeInterfaceError, format_help +from .support import ( + RuntimeContext, + InterfaceResult, + NipypeInterfaceError, + format_help, +) iflogger = logging.getLogger("nipype.interface") @@ -63,8 +61,15 @@ class Interface(object): """ - input_spec = None # A traited input specification - output_spec = None # A traited output specification + input_spec = None + """ + The specification of the input, defined by a :py:class:`~traits.has_traits.HasTraits` class. + """ + output_spec = None + """ + The specification of the output, defined by a :py:class:`~traits.has_traits.HasTraits` class. + """ + _can_resume = False # See property below _always_run = False # See property below @@ -365,131 +370,42 @@ def run(self, cwd=None, ignore_exception=None, **inputs): if successful, results """ - from ...utils.profiler import ResourceMonitor - - # if ignore_exception is not provided, taking self.ignore_exception - if ignore_exception is None: - ignore_exception = self.ignore_exception - - # Tear-up: get current and prev directories - syscwd = rgetcwd(error=False) # Recover when wd does not exist - if cwd is None: - cwd = syscwd - - os.chdir(cwd) # Change to the interface wd - - enable_rm = config.resource_monitor and self.resource_monitor - self.inputs.trait_set(**inputs) - self._check_mandatory_inputs() - self._check_version_requirements(self.inputs) - interface = self.__class__ - self._duecredit_cite() - - # initialize provenance tracking - store_provenance = str2bool( - config.get("execution", "write_provenance", "false") - ) - env = deepcopy(dict(os.environ)) - if self._redirect_x: - env["DISPLAY"] = config.get_display() - - runtime = Bunch( - cwd=cwd, - prevcwd=syscwd, - returncode=None, - duration=None, - environ=env, - startTime=dt.isoformat(dt.utcnow()), - endTime=None, - platform=platform.platform(), - hostname=platform.node(), - version=self.version, + rtc = RuntimeContext( + resource_monitor=config.resource_monitor and self.resource_monitor, + ignore_exception=ignore_exception + if ignore_exception is not None + else self.ignore_exception, ) - runtime_attrs = set(runtime.dictcopy()) - - mon_sp = None - if enable_rm: - mon_freq = float(config.get("execution", "resource_monitor_frequency", 1)) - proc_pid = os.getpid() - iflogger.debug( - "Creating a ResourceMonitor on a %s interface, PID=%d.", - self.__class__.__name__, - proc_pid, - ) - mon_sp = ResourceMonitor(proc_pid, freq=mon_freq) - mon_sp.start() - # Grab inputs now, as they should not change during execution - inputs = self.inputs.get_traitsfree() - outputs = None + with rtc(self, cwd=cwd, redirect_x=self._redirect_x) as runtime: + self.inputs.trait_set(**inputs) + self._check_mandatory_inputs() + self._check_version_requirements(self.inputs) - try: + # Grab inputs now, as they should not change during execution + inputs = self.inputs.get_traitsfree() + outputs = None + # Run interface runtime = self._pre_run_hook(runtime) runtime = self._run_interface(runtime) runtime = self._post_run_hook(runtime) + # Collect outputs outputs = self.aggregate_outputs(runtime) - except Exception as e: - import traceback - - # Retrieve the maximum info fast - runtime.traceback = traceback.format_exc() - # Gather up the exception arguments and append nipype info. - exc_args = e.args if getattr(e, "args") else tuple() - exc_args += ( - "An exception of type %s occurred while running interface %s." - % (type(e).__name__, self.__class__.__name__), - ) - if config.get("logging", "interface_level", "info").lower() == "debug": - exc_args += ("Inputs: %s" % str(self.inputs),) - - runtime.traceback_args = ("\n".join(["%s" % arg for arg in exc_args]),) - - if not ignore_exception: - raise - finally: - if runtime is None or runtime_attrs - set(runtime.dictcopy()): - raise RuntimeError( - "{} interface failed to return valid " - "runtime object".format(interface.__class__.__name__) - ) - # This needs to be done always - runtime.endTime = dt.isoformat(dt.utcnow()) - timediff = parseutc(runtime.endTime) - parseutc(runtime.startTime) - runtime.duration = ( - timediff.days * 86400 + timediff.seconds + timediff.microseconds / 1e6 - ) - results = InterfaceResult( - interface, runtime, inputs=inputs, outputs=outputs, provenance=None - ) - # Add provenance (if required) - if store_provenance: - # Provenance will only throw a warning if something went wrong - results.provenance = write_provenance(results) - - # Make sure runtime profiler is shut down - if enable_rm: - import numpy as np - - mon_sp.stop() - - runtime.mem_peak_gb = None - runtime.cpu_percent = None - - # Read .prof file in and set runtime values - vals = np.loadtxt(mon_sp.fname, delimiter=",") - if vals.size: - vals = np.atleast_2d(vals) - runtime.mem_peak_gb = vals[:, 2].max() / 1024 - runtime.cpu_percent = vals[:, 1].max() - - runtime.prof_dict = { - "time": vals[:, 0].tolist(), - "cpus": vals[:, 1].tolist(), - "rss_GiB": (vals[:, 2] / 1024).tolist(), - "vms_GiB": (vals[:, 3] / 1024).tolist(), - } - os.chdir(syscwd) + results = InterfaceResult( + self.__class__, + rtc.runtime, + inputs=inputs, + outputs=outputs, + provenance=None, + ) + + # Add provenance (if required) + if str2bool(config.get("execution", "write_provenance", "false")): + # Provenance will only throw a warning if something went wrong + results.provenance = write_provenance(results) + + self._duecredit_cite() return results diff --git a/nipype/interfaces/base/support.py b/nipype/interfaces/base/support.py index 88359354fd..4b01769e75 100644 --- a/nipype/interfaces/base/support.py +++ b/nipype/interfaces/base/support.py @@ -8,12 +8,16 @@ """ import os +from contextlib import AbstractContextManager from copy import deepcopy from textwrap import wrap import re +from datetime import datetime as dt +from dateutil.parser import parse as parseutc +import platform -from ... import logging -from ...utils.misc import is_container +from ... import logging, config +from ...utils.misc import is_container, rgetcwd from ...utils.filemanip import md5, hash_infile iflogger = logging.getLogger("nipype.interface") @@ -21,6 +25,96 @@ HELP_LINEWIDTH = 70 +class RuntimeContext(AbstractContextManager): + """A context manager to run NiPype interfaces.""" + + __slots__ = ("_runtime", "_resmon", "_ignore_exc") + + def __init__(self, resource_monitor=False, ignore_exception=False): + """Initialize the context manager object.""" + self._ignore_exc = ignore_exception + _proc_pid = os.getpid() + if resource_monitor: + from ...utils.profiler import ResourceMonitor + else: + from ...utils.profiler import ResourceMonitorMock as ResourceMonitor + + self._resmon = ResourceMonitor( + _proc_pid, + freq=float(config.get("execution", "resource_monitor_frequency", 1)), + ) + + def __call__(self, interface, cwd=None, redirect_x=False): + """Generate a new runtime object.""" + # Tear-up: get current and prev directories + _syscwd = rgetcwd(error=False) # Recover when wd does not exist + if cwd is None: + cwd = _syscwd + + self._runtime = Bunch( + cwd=str(cwd), + duration=None, + endTime=None, + environ=deepcopy(dict(os.environ)), + hostname=platform.node(), + interface=interface.__class__.__name__, + platform=platform.platform(), + prevcwd=str(_syscwd), + redirect_x=redirect_x, + resmon=self._resmon.fname or "off", + returncode=None, + startTime=None, + version=interface.version, + ) + return self + + def __enter__(self): + """Tear-up the execution of an interface.""" + if self._runtime.redirect_x: + self._runtime.environ["DISPLAY"] = config.get_display() + + self._runtime.startTime = dt.isoformat(dt.utcnow()) + self._resmon.start() + # TODO: Perhaps clean-up path and ensure it exists? + os.chdir(self._runtime.cwd) + return self._runtime + + def __exit__(self, exc_type, exc_value, exc_tb): + """Tear-down interface execution.""" + self._runtime.endTime = dt.isoformat(dt.utcnow()) + timediff = parseutc(self._runtime.endTime) - parseutc(self._runtime.startTime) + self._runtime.duration = ( + timediff.days * 86400 + timediff.seconds + timediff.microseconds / 1e6 + ) + # Collect monitored data + for k, v in self._resmon.stop(): + setattr(self._runtime, k, v) + + os.chdir(self._runtime.prevcwd) + + if exc_type is not None or exc_value is not None or exc_tb is not None: + import traceback + + # Retrieve the maximum info fast + self._runtime.traceback = "".join( + traceback.format_exception(exc_type, exc_value, exc_tb) + ) + # Gather up the exception arguments and append nipype info. + exc_args = exc_value.args if getattr(exc_value, "args") else tuple() + exc_args += ( + f"An exception of type {exc_type.__name__} occurred while " + f"running interface {self._runtime.interface}.", + ) + self._runtime.traceback_args = ("\n".join([f"{arg}" for arg in exc_args]),) + + if self._ignore_exc: + return True + + @property + def runtime(self): + return self._runtime + + class NipypeInterfaceError(Exception): """Custom error for interfaces""" diff --git a/nipype/utils/profiler.py b/nipype/utils/profiler.py index 20e024693f..9ed380eb04 100644 --- a/nipype/utils/profiler.py +++ b/nipype/utils/profiler.py @@ -5,6 +5,7 @@ Utilities to keep track of performance """ import os +import numpy as np import threading from time import time @@ -23,6 +24,24 @@ _MB = 1024.0 ** 2 +class ResourceMonitorMock: + """A mock class to use when the monitor is disabled.""" + + @property + def fname(self): + """Get/set the internal filename""" + return None + + def __init__(self, pid, freq=5, fname=None, python=True): + pass + + def start(self): + pass + + def stop(self): + return {} + + class ResourceMonitor(threading.Thread): """ A ``Thread`` to monitor a specific PID with a certain frequence @@ -57,7 +76,7 @@ def fname(self): return self._fname def stop(self): - """Stop monitoring""" + """Stop monitoring.""" if not self._event.is_set(): self._event.set() self.join() @@ -65,6 +84,25 @@ def stop(self): self._logfile.flush() self._logfile.close() + retval = { + "mem_peak_gb": None, + "cpu_percent": None, + } + + # Read .prof file in and set runtime values + vals = np.loadtxt(self._fname, delimiter=",") + if vals.size: + vals = np.atleast_2d(vals) + retval["mem_peak_gb"] = vals[:, 2].max() / 1024 + retval["cpu_percent"] = vals[:, 1].max() + retval["prof_dict"] = { + "time": vals[:, 0].tolist(), + "cpus": vals[:, 1].tolist(), + "rss_GiB": (vals[:, 2] / 1024).tolist(), + "vms_GiB": (vals[:, 3] / 1024).tolist(), + } + return retval + def _sample(self, cpu_interval=None): cpu = 0.0 rss = 0.0 From 24f2cbced7cb27364ca4c8725954f8ec53d16fac Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 21 Jul 2021 16:01:31 +0200 Subject: [PATCH 1035/1665] enh: roll back to a more constant behavior with errors --- nipype/interfaces/base/core.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index f23e11e7bd..7eb81b2c40 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -19,7 +19,13 @@ from ... import config, logging, LooseVersion from ...utils.provenance import write_provenance from ...utils.misc import str2bool -from ...utils.filemanip import split_filename, which, get_dependencies, canonicalize_env +from ...utils.filemanip import ( + canonicalize_env, + get_dependencies, + indirectory, + split_filename, + which, +) from ...utils.subprocess import run_command from ...external.due import due @@ -377,10 +383,12 @@ def run(self, cwd=None, ignore_exception=None, **inputs): else self.ignore_exception, ) - with rtc(self, cwd=cwd, redirect_x=self._redirect_x) as runtime: + with indirectory(cwd or os.getcwd()): self.inputs.trait_set(**inputs) - self._check_mandatory_inputs() - self._check_version_requirements(self.inputs) + self._check_mandatory_inputs() + self._check_version_requirements(self.inputs) + + with rtc(self, cwd=cwd, redirect_x=self._redirect_x) as runtime: # Grab inputs now, as they should not change during execution inputs = self.inputs.get_traitsfree() From cc7b3ae7151519bc36bdf6ffbf35b18a1458413c Mon Sep 17 00:00:00 2001 From: Christian Hinge Date: Fri, 6 Aug 2021 19:06:19 +0200 Subject: [PATCH 1036/1665] Changes the type of ConvertScalarImageToRGBInputSpec.mask_file from File to traits.Str --- .../ants/tests/test_auto_ConvertScalarImageToRGB.py | 1 - nipype/interfaces/ants/visualization.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py index a49239ebae..0ffdaef6fc 100644 --- a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py +++ b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py @@ -35,7 +35,6 @@ def test_ConvertScalarImageToRGB_inputs(): ), mask_image=dict( argstr="%s", - extensions=None, position=3, usedefault=True, ), diff --git a/nipype/interfaces/ants/visualization.py b/nipype/interfaces/ants/visualization.py index a443a76651..3d9ee6c120 100644 --- a/nipype/interfaces/ants/visualization.py +++ b/nipype/interfaces/ants/visualization.py @@ -28,8 +28,8 @@ class ConvertScalarImageToRGBInputSpec(ANTSCommandInputSpec): output_image = traits.Str( "rgb.nii.gz", argstr="%s", usedefault=True, desc="rgb output image", position=2 ) - mask_image = File( - "none", argstr="%s", exists=True, desc="mask image", position=3, usedefault=True + mask_image = traits.Str( + "none", argstr="%s", desc="mask image", position=3, usedefault=True ) colormap = traits.Enum( "grey", From bd0a2f8a164bd56bc4b445dbbcfb9ceb4bff9e5e Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Tue, 10 Aug 2021 13:45:40 +0200 Subject: [PATCH 1037/1665] Add mri_gtmseg to freesurfer preprocess module --- nipype/interfaces/freesurfer/preprocess.py | 35 + nipype/testing/data/.surf01.vtk.icloud | Bin 0 -> 160 bytes nipype/testing/data/surf01.vtk | 93104 ------------------- 3 files changed, 35 insertions(+), 93104 deletions(-) create mode 100644 nipype/testing/data/.surf01.vtk.icloud delete mode 100644 nipype/testing/data/surf01.vtk diff --git a/nipype/interfaces/freesurfer/preprocess.py b/nipype/interfaces/freesurfer/preprocess.py index cf4e97c9c9..0cf0fb156c 100644 --- a/nipype/interfaces/freesurfer/preprocess.py +++ b/nipype/interfaces/freesurfer/preprocess.py @@ -3380,3 +3380,38 @@ def _format_arg(self, name, spec, value): if name == "out_type": value = {"VOX2VOX": 0, "RAS2RAS": 1}[value] return super(ConcatenateLTA, self)._format_arg(name, spec, value) + +class MRIGtmSegInputSpec(FSTraitedSpec): + subject_id = traits.Str( + argstr="--s %s", desc="freesurfer subject id", mandatory=True + ) + + +class MRIGtmSeg(FSCommand): + """ + This program performs a high-resolution segmentation to be used for PET partial volume correction. + + + Examples + ======== + >>> ' + """ + + _cmd = "mri_gtmseg" + input_spec = MRIGtmSegInputSpec + + +class MRIGtmPVC(FSCommand): + """ + This program performs partial volume correction using the output from gtmseg. + + + Examples + ======== + >>> ' + """ + + _cmd = "mri_gtmpvc" + + + diff --git a/nipype/testing/data/.surf01.vtk.icloud b/nipype/testing/data/.surf01.vtk.icloud new file mode 100644 index 0000000000000000000000000000000000000000..ec68059205cdb66494d00b79f488fa48933a7df3 GIT binary patch literal 160 zcmYc)$jK}&F)+By$i&RT$`<1n92(@~mzbOComv?$AOPmNW#*&?XI4RkB;Z0psm1xF zMaiill?5QFsN&M1Gy_AuvXX2e2DQDD;ss=|Dh*0aFU?6T;^3Fj_X>?-00Txw2+hC_ IrD0Sh0N8CSGXMYp literal 0 HcmV?d00001 diff --git a/nipype/testing/data/surf01.vtk b/nipype/testing/data/surf01.vtk deleted file mode 100644 index c4d30e66b6..0000000000 --- a/nipype/testing/data/surf01.vtk +++ /dev/null @@ -1,93104 +0,0 @@ -# vtk DataFile Version 1.0 -vtk output -ASCII -DATASET POLYDATA -POINTS 31034 float --32.035343170 -33.840930939 -35.797355652 --31.336599350 -33.903381348 -35.879737854 --30.442840576 -33.946811676 -35.937088013 --29.481933594 -33.968864441 -35.965759277 --28.495168686 -33.977611542 -35.976860046 --27.498950958 -33.980327606 -35.980201721 --26.499826431 -33.980976105 -35.980960846 --25.499979019 -33.981090546 -35.981090546 --24.499998093 -33.981101990 -35.981101990 --23.500000000 -33.981101990 -35.981101990 --22.500000000 -33.981101990 -35.981101990 --21.500000000 -33.981101990 -35.981101990 --20.500000000 -33.981101990 -35.981101990 --19.500000000 -33.981101990 -35.981101990 --18.500000000 -33.981101990 -35.981101990 --17.500000000 -33.981101990 -35.981101990 --16.500000000 -33.981101990 -35.981101990 --15.500000000 -33.981101990 -35.981101990 --14.500000000 -33.981101990 -35.981101990 --13.500000000 -33.981101990 -35.981101990 --12.500000000 -33.981101990 -35.981101990 --11.500000000 -33.981101990 -35.981101990 --10.500000000 -33.981101990 -35.981101990 --9.500000000 -33.981101990 -35.981101990 --8.500000000 -33.981101990 -35.981101990 --7.500000000 -33.981101990 -35.981101990 --6.500000000 -33.981101990 -35.981101990 --5.500000000 -33.981101990 -35.981101990 --4.500000000 -33.981101990 -35.981101990 --3.500000000 -33.981101990 -35.981101990 --2.500000000 -33.981101990 -35.981101990 --1.500000000 -33.981101990 -35.981101990 --0.500000000 -33.981101990 -35.981101990 -0.500000000 -33.981101990 -35.981101990 -1.500000000 -33.981101990 -35.981101990 -2.500000000 -33.981101990 -35.981101990 -3.500000000 -33.981101990 -35.981101990 -4.500000000 -33.981101990 -35.981101990 -5.500000000 -33.981101990 -35.981101990 -6.500000000 -33.981101990 -35.981101990 -7.500000000 -33.981101990 -35.981101990 -8.500000000 -33.981101990 -35.981101990 -9.500000000 -33.981101990 -35.981101990 -10.500000000 -33.981101990 -35.981101990 -11.500000000 -33.981101990 -35.981101990 -12.500000000 -33.981101990 -35.981101990 -13.500000000 -33.981101990 -35.981101990 -14.500000000 -33.981101990 -35.981101990 -15.500000000 -33.981101990 -35.981101990 -16.500000000 -33.981101990 -35.981101990 -17.500000000 -33.981101990 -35.981101990 -18.500000000 -33.981101990 -35.981101990 -19.500000000 -33.981101990 -35.981101990 -20.500000000 -33.981101990 -35.981101990 -21.500000000 -33.981101990 -35.981101990 -22.500000000 -33.981101990 -35.981101990 -23.499998093 -33.981101990 -35.981101990 -24.499979019 -33.981090546 -35.981090546 -25.499824524 -33.980976105 -35.980960846 -26.498950958 -33.980335236 -35.980201721 -27.495168686 -33.977619171 -35.976860046 -28.481933594 -33.968864441 -35.965766907 -29.442840576 -33.946807861 -35.937091827 -30.336599350 -33.903377533 -35.879737854 -31.035345078 -33.840930939 -35.797355652 --33.030693054 -33.030693054 -35.846313477 --32.334243774 -33.254276276 -35.954723358 --31.405673981 -33.216838837 -36.112342834 --30.460596085 -33.220722198 -36.185966492 --29.486719131 -33.227870941 -36.217514038 --28.496377945 -33.231601715 -36.228958130 --27.499221802 -33.232868195 -36.232299805 --26.499872208 -33.233169556 -36.233074188 --25.499984741 -33.233219147 -36.233207703 --24.500000000 -33.233222961 -36.233222961 --23.500000000 -33.233222961 -36.233222961 --22.500000000 -33.233222961 -36.233222961 --21.500000000 -33.233222961 -36.233222961 --20.500000000 -33.233222961 -36.233222961 --19.500000000 -33.233222961 -36.233222961 --18.500000000 -33.233222961 -36.233222961 --17.500000000 -33.233222961 -36.233222961 --16.500000000 -33.233222961 -36.233222961 --15.500000000 -33.233222961 -36.233222961 --14.500000000 -33.233222961 -36.233222961 --13.500000000 -33.233222961 -36.233222961 --12.500000000 -33.233222961 -36.233222961 --11.500000000 -33.233222961 -36.233222961 --10.500000000 -33.233222961 -36.233222961 --9.500000000 -33.233222961 -36.233222961 --8.500000000 -33.233222961 -36.233222961 --7.500000000 -33.233222961 -36.233222961 --6.500000000 -33.233222961 -36.233222961 --5.500000000 -33.233222961 -36.233222961 --4.500000000 -33.233222961 -36.233222961 --3.500000000 -33.233222961 -36.233222961 --2.500000000 -33.233222961 -36.233222961 --1.500000000 -33.233222961 -36.233222961 --0.500000000 -33.233222961 -36.233222961 -0.500000000 -33.233222961 -36.233222961 -1.500000000 -33.233222961 -36.233222961 -2.500000000 -33.233222961 -36.233222961 -3.500000000 -33.233222961 -36.233222961 -4.500000000 -33.233222961 -36.233222961 -5.500000000 -33.233222961 -36.233222961 -6.500000000 -33.233222961 -36.233222961 -7.500000000 -33.233222961 -36.233222961 -8.500000000 -33.233222961 -36.233222961 -9.500000000 -33.233222961 -36.233222961 -10.500000000 -33.233222961 -36.233222961 -11.500000000 -33.233222961 -36.233222961 -12.500000000 -33.233222961 -36.233222961 -13.500000000 -33.233222961 -36.233222961 -14.500000000 -33.233222961 -36.233222961 -15.500000000 -33.233222961 -36.233222961 -16.500000000 -33.233222961 -36.233222961 -17.500000000 -33.233222961 -36.233222961 -18.500000000 -33.233222961 -36.233222961 -19.500000000 -33.233222961 -36.233222961 -20.500000000 -33.233222961 -36.233222961 -21.500000000 -33.233222961 -36.233222961 -22.500000000 -33.233222961 -36.233222961 -23.500000000 -33.233222961 -36.233222961 -24.499984741 -33.233222961 -36.233207703 -25.499872208 -33.233173370 -36.233074188 -26.499225616 -33.232868195 -36.232299805 -27.496377945 -33.231605530 -36.228958130 -28.486719131 -33.227874756 -36.217514038 -29.460596085 -33.220710754 -36.185966492 -30.405673981 -33.216838837 -36.112346649 -31.334243774 -33.254272461 -35.954723358 -32.030693054 -33.030693054 -35.846317291 --33.840927124 -32.035346985 -35.797351837 --33.254272461 -32.334243774 -35.954719543 --32.371025085 -32.371025085 -36.154674530 --31.438953400 -32.375358582 -36.280395508 --30.476003647 -32.379226685 -36.344284058 --29.492319107 -32.382312775 -36.371490479 --28.498050690 -32.383808136 -36.381023407 --27.499622345 -32.384300232 -36.383724213 --26.499948502 -32.384407043 -36.384311676 --25.499996185 -32.384422302 -36.384407043 --24.500000000 -32.384422302 -36.384422302 --23.500000000 -32.384422302 -36.384422302 --22.500000000 -32.384422302 -36.384422302 --21.500000000 -32.384422302 -36.384422302 --20.500000000 -32.384422302 -36.384422302 --19.500000000 -32.384422302 -36.384422302 --18.500000000 -32.384422302 -36.384422302 --17.500000000 -32.384422302 -36.384422302 --16.500000000 -32.384422302 -36.384422302 --15.500000000 -32.384422302 -36.384422302 --14.500000000 -32.384422302 -36.384422302 --13.500000000 -32.384422302 -36.384422302 --12.500000000 -32.384422302 -36.384422302 --11.500000000 -32.384422302 -36.384422302 --10.500000000 -32.384422302 -36.384422302 --9.500000000 -32.384422302 -36.384422302 --8.500000000 -32.384422302 -36.384422302 --7.500000000 -32.384422302 -36.384422302 --6.500000000 -32.384422302 -36.384422302 --5.500000000 -32.384422302 -36.384422302 --4.500000000 -32.384422302 -36.384422302 --3.500000000 -32.384422302 -36.384422302 --2.500000000 -32.384422302 -36.384422302 --1.500000000 -32.384422302 -36.384422302 --0.500000000 -32.384422302 -36.384422302 -0.500000000 -32.384422302 -36.384422302 -1.500000000 -32.384422302 -36.384422302 -2.500000000 -32.384422302 -36.384422302 -3.500000000 -32.384422302 -36.384422302 -4.500000000 -32.384422302 -36.384422302 -5.500000000 -32.384422302 -36.384422302 -6.500000000 -32.384422302 -36.384422302 -7.500000000 -32.384422302 -36.384422302 -8.500000000 -32.384422302 -36.384422302 -9.500000000 -32.384422302 -36.384422302 -10.500000000 -32.384422302 -36.384422302 -11.500000000 -32.384422302 -36.384422302 -12.500000000 -32.384422302 -36.384422302 -13.500000000 -32.384422302 -36.384422302 -14.500000000 -32.384422302 -36.384422302 -15.500000000 -32.384422302 -36.384422302 -16.500000000 -32.384422302 -36.384422302 -17.500000000 -32.384422302 -36.384422302 -18.500000000 -32.384422302 -36.384422302 -19.500000000 -32.384422302 -36.384422302 -20.500000000 -32.384422302 -36.384422302 -21.500000000 -32.384422302 -36.384422302 -22.500000000 -32.384422302 -36.384422302 -23.500000000 -32.384422302 -36.384422302 -24.499996185 -32.384422302 -36.384410858 -25.499948502 -32.384407043 -36.384315491 -26.499622345 -32.384300232 -36.383724213 -27.498050690 -32.383811951 -36.381023407 -28.492321014 -32.382312775 -36.371490479 -29.476007462 -32.379222870 -36.344287872 -30.438953400 -32.375358582 -36.280395508 -31.371026993 -32.371025085 -36.154674530 -32.254276276 -32.334243774 -35.954723358 -32.840930939 -32.035343170 -35.797355652 --33.903373718 -31.336603165 -35.879737854 --33.216838837 -31.405673981 -36.112342834 --32.375358582 -31.438953400 -36.280395508 --31.451213837 -31.451221466 -36.380989075 --30.483785629 -31.456089020 -36.430477142 --29.495611191 -31.458080292 -36.450424194 --28.499073029 -31.458770752 -36.456943512 --27.499858856 -31.458948135 -36.458606720 --26.499988556 -31.458978653 -36.458934784 --25.500000000 -31.458980560 -36.458976746 --24.500000000 -31.458980560 -36.458984375 --23.500000000 -31.458980560 -36.458984375 --22.500000000 -31.458980560 -36.458984375 --21.500000000 -31.458980560 -36.458984375 --20.500000000 -31.458980560 -36.458984375 --19.500000000 -31.458980560 -36.458984375 --18.500000000 -31.458980560 -36.458984375 --17.500000000 -31.458980560 -36.458984375 --16.500000000 -31.458980560 -36.458984375 --15.500000000 -31.458980560 -36.458984375 --14.500000000 -31.458980560 -36.458984375 --13.500000000 -31.458980560 -36.458984375 --12.500000000 -31.458980560 -36.458984375 --11.500000000 -31.458980560 -36.458984375 --10.500000000 -31.458980560 -36.458984375 --9.500000000 -31.458980560 -36.458984375 --8.500000000 -31.458980560 -36.458984375 --7.500000000 -31.458980560 -36.458984375 --6.500000000 -31.458980560 -36.458984375 --5.500000000 -31.458980560 -36.458984375 --4.500000000 -31.458980560 -36.458984375 --3.500000000 -31.458980560 -36.458984375 --2.500000000 -31.458980560 -36.458984375 --1.500000000 -31.458980560 -36.458984375 --0.500000000 -31.458980560 -36.458984375 -0.500000000 -31.458980560 -36.458984375 -1.500000000 -31.458980560 -36.458984375 -2.500000000 -31.458980560 -36.458984375 -3.500000000 -31.458980560 -36.458984375 -4.500000000 -31.458980560 -36.458984375 -5.500000000 -31.458980560 -36.458984375 -6.500000000 -31.458980560 -36.458984375 -7.500000000 -31.458980560 -36.458984375 -8.500000000 -31.458980560 -36.458984375 -9.500000000 -31.458980560 -36.458984375 -10.500000000 -31.458980560 -36.458984375 -11.500000000 -31.458980560 -36.458984375 -12.500000000 -31.458980560 -36.458984375 -13.500000000 -31.458980560 -36.458984375 -14.500000000 -31.458980560 -36.458984375 -15.500000000 -31.458980560 -36.458984375 -16.500000000 -31.458980560 -36.458984375 -17.500000000 -31.458980560 -36.458984375 -18.500000000 -31.458980560 -36.458984375 -19.500000000 -31.458980560 -36.458984375 -20.500000000 -31.458980560 -36.458984375 -21.500000000 -31.458980560 -36.458984375 -22.500000000 -31.458980560 -36.458984375 -23.500000000 -31.458980560 -36.458984375 -24.500000000 -31.458980560 -36.458976746 -25.499988556 -31.458978653 -36.458934784 -26.499858856 -31.458948135 -36.458606720 -27.499073029 -31.458770752 -36.456943512 -28.495611191 -31.458080292 -36.450424194 -29.483785629 -31.456085205 -36.430473328 -30.451217651 -31.451217651 -36.380989075 -31.375360489 -31.438953400 -36.280395508 -32.216842651 -31.405675888 -36.112342834 -32.903381348 -31.336603165 -35.879737854 --33.946807861 -30.442840576 -35.937091827 --33.220714569 -30.460592270 -36.185966492 --32.379222870 -30.476007462 -36.344284058 --31.456085205 -30.483789444 -36.430477142 --30.486968994 -30.486968994 -36.469234467 --29.496957779 -30.488048553 -36.483337402 --28.499475479 -30.488342285 -36.487373352 --27.499938965 -30.488397598 -36.488258362 --26.499996185 -30.488403320 -36.488391876 --25.500000000 -30.488403320 -36.488410950 --24.500000000 -30.488403320 -36.488410950 --23.500000000 -30.488403320 -36.488410950 --22.500000000 -30.488403320 -36.488410950 --21.500000000 -30.488403320 -36.488410950 --20.500000000 -30.488403320 -36.488410950 --19.500000000 -30.488403320 -36.488410950 --18.500000000 -30.488403320 -36.488410950 --17.500000000 -30.488403320 -36.488410950 --16.500000000 -30.488403320 -36.488410950 --15.500000000 -30.488403320 -36.488410950 --14.500000000 -30.488403320 -36.488410950 --13.500000000 -30.488403320 -36.488410950 --12.500000000 -30.488403320 -36.488410950 --11.500000000 -30.488403320 -36.488410950 --10.500000000 -30.488403320 -36.488410950 --9.500000000 -30.488403320 -36.488410950 --8.500000000 -30.488403320 -36.488410950 --7.500000000 -30.488403320 -36.488410950 --6.500000000 -30.488403320 -36.488410950 --5.500000000 -30.488403320 -36.488410950 --4.500000000 -30.488403320 -36.488410950 --3.500000000 -30.488403320 -36.488410950 --2.500000000 -30.488403320 -36.488410950 --1.500000000 -30.488403320 -36.488410950 --0.500000000 -30.488403320 -36.488410950 -0.500000000 -30.488403320 -36.488410950 -1.500000000 -30.488403320 -36.488410950 -2.500000000 -30.488403320 -36.488410950 -3.500000000 -30.488403320 -36.488410950 -4.500000000 -30.488403320 -36.488410950 -5.500000000 -30.488403320 -36.488410950 -6.500000000 -30.488403320 -36.488410950 -7.500000000 -30.488403320 -36.488410950 -8.500000000 -30.488403320 -36.488410950 -9.500000000 -30.488403320 -36.488410950 -10.500000000 -30.488403320 -36.488410950 -11.500000000 -30.488403320 -36.488410950 -12.500000000 -30.488403320 -36.488410950 -13.500000000 -30.488403320 -36.488410950 -14.500000000 -30.488403320 -36.488410950 -15.500000000 -30.488403320 -36.488410950 -16.500000000 -30.488403320 -36.488410950 -17.500000000 -30.488403320 -36.488410950 -18.500000000 -30.488403320 -36.488410950 -19.500000000 -30.488403320 -36.488410950 -20.500000000 -30.488403320 -36.488410950 -21.500000000 -30.488403320 -36.488410950 -22.500000000 -30.488403320 -36.488410950 -23.500000000 -30.488403320 -36.488410950 -24.500000000 -30.488403320 -36.488410950 -25.499996185 -30.488403320 -36.488395691 -26.499938965 -30.488399506 -36.488258362 -27.499477386 -30.488346100 -36.487373352 -28.496959686 -30.488052368 -36.483337402 -29.486968994 -30.486968994 -36.469234467 -30.456085205 -30.483785629 -36.430477142 -31.379222870 -30.476007462 -36.344284058 -32.220714569 -30.460596085 -36.185966492 -32.946811676 -30.442840576 -35.937088013 --33.968864441 -29.481933594 -35.965759277 --33.227867126 -29.486715317 -36.217510223 --32.382312775 -29.492319107 -36.371490479 --31.458074570 -29.495611191 -36.450428009 --30.488048553 -29.496957779 -36.483337402 --29.497375488 -29.497371674 -36.494171143 --28.499578476 -29.497457504 -36.496910095 --27.499954224 -29.497470856 -36.497406006 --26.499996185 -29.497470856 -36.497467041 --25.500000000 -29.497470856 -36.497470856 --24.500000000 -29.497470856 -36.497470856 --23.500000000 -29.497470856 -36.497470856 --22.500000000 -29.497470856 -36.497470856 --21.500000000 -29.497470856 -36.497470856 --20.500000000 -29.497470856 -36.497470856 --19.500000000 -29.497470856 -36.497470856 --18.500000000 -29.497470856 -36.497470856 --17.500000000 -29.497470856 -36.497470856 --16.500000000 -29.497470856 -36.497470856 --15.500000000 -29.497470856 -36.497470856 --14.500000000 -29.497470856 -36.497470856 --13.500000000 -29.497470856 -36.497470856 --12.500000000 -29.497470856 -36.497470856 --11.500000000 -29.497470856 -36.497470856 --10.500000000 -29.497470856 -36.497470856 --9.500000000 -29.497470856 -36.497470856 --8.500000000 -29.497470856 -36.497470856 --7.500000000 -29.497470856 -36.497470856 --6.500000000 -29.497470856 -36.497470856 --5.500000000 -29.497470856 -36.497470856 --4.500000000 -29.497470856 -36.497470856 --3.500000000 -29.497470856 -36.497470856 --2.500000000 -29.497470856 -36.497470856 --1.500000000 -29.497470856 -36.497470856 --0.500000000 -29.497470856 -36.497470856 -0.500000000 -29.497470856 -36.497470856 -1.500000000 -29.497470856 -36.497470856 -2.500000000 -29.497470856 -36.497470856 -3.500000000 -29.497470856 -36.497470856 -4.500000000 -29.497470856 -36.497470856 -5.500000000 -29.497470856 -36.497470856 -6.500000000 -29.497470856 -36.497470856 -7.500000000 -29.497470856 -36.497470856 -8.500000000 -29.497470856 -36.497470856 -9.500000000 -29.497470856 -36.497470856 -10.500000000 -29.497470856 -36.497470856 -11.500000000 -29.497470856 -36.497470856 -12.500000000 -29.497470856 -36.497470856 -13.500000000 -29.497470856 -36.497470856 -14.500000000 -29.497470856 -36.497470856 -15.500000000 -29.497470856 -36.497470856 -16.500000000 -29.497470856 -36.497470856 -17.500000000 -29.497470856 -36.497470856 -18.500000000 -29.497470856 -36.497470856 -19.500000000 -29.497470856 -36.497470856 -20.500000000 -29.497470856 -36.497470856 -21.500000000 -29.497470856 -36.497470856 -22.500000000 -29.497470856 -36.497470856 -23.500000000 -29.497470856 -36.497470856 -24.500000000 -29.497470856 -36.497470856 -25.499996185 -29.497470856 -36.497467041 -26.499954224 -29.497470856 -36.497409821 -27.499576569 -29.497461319 -36.496910095 -28.497371674 -29.497371674 -36.494174957 -29.488048553 -29.496959686 -36.483337402 -30.458074570 -29.495611191 -36.450428009 -31.382312775 -29.492319107 -36.371490479 -32.227874756 -29.486715317 -36.217514038 -32.968864441 -29.481933594 -35.965759277 --33.977611542 -28.495168686 -35.976860046 --33.231597900 -28.496377945 -36.228954315 --32.383808136 -28.498052597 -36.381023407 --31.458766937 -28.499073029 -36.456939697 --30.488346100 -28.499475479 -36.487373352 --29.497461319 -28.499576569 -36.496910095 --28.499593735 -28.499591827 -36.499160767 --27.499954224 -28.499591827 -36.499546051 --26.499996185 -28.499591827 -36.499588013 --25.500000000 -28.499591827 -36.499595642 --24.500000000 -28.499591827 -36.499595642 --23.500000000 -28.499591827 -36.499595642 --22.500000000 -28.499591827 -36.499595642 --21.500000000 -28.499591827 -36.499595642 --20.500000000 -28.499591827 -36.499595642 --19.500000000 -28.499591827 -36.499595642 --18.500000000 -28.499591827 -36.499595642 --17.500000000 -28.499591827 -36.499595642 --16.500000000 -28.499591827 -36.499595642 --15.500000000 -28.499591827 -36.499595642 --14.500000000 -28.499591827 -36.499595642 --13.500000000 -28.499591827 -36.499595642 --12.500000000 -28.499591827 -36.499595642 --11.500000000 -28.499591827 -36.499595642 --10.500000000 -28.499591827 -36.499595642 --9.500000000 -28.499591827 -36.499595642 --8.500000000 -28.499591827 -36.499595642 --7.500000000 -28.499591827 -36.499595642 --6.500000000 -28.499591827 -36.499595642 --5.500000000 -28.499591827 -36.499595642 --4.500000000 -28.499591827 -36.499595642 --3.500000000 -28.499591827 -36.499595642 --2.500000000 -28.499591827 -36.499595642 --1.500000000 -28.499591827 -36.499595642 --0.500000000 -28.499591827 -36.499595642 -0.500000000 -28.499591827 -36.499595642 -1.500000000 -28.499591827 -36.499595642 -2.500000000 -28.499591827 -36.499595642 -3.500000000 -28.499591827 -36.499595642 -4.500000000 -28.499591827 -36.499595642 -5.500000000 -28.499591827 -36.499595642 -6.500000000 -28.499591827 -36.499595642 -7.500000000 -28.499591827 -36.499595642 -8.500000000 -28.499591827 -36.499595642 -9.500000000 -28.499591827 -36.499595642 -10.500000000 -28.499591827 -36.499595642 -11.500000000 -28.499591827 -36.499595642 -12.500000000 -28.499591827 -36.499595642 -13.500000000 -28.499591827 -36.499595642 -14.500000000 -28.499591827 -36.499595642 -15.500000000 -28.499591827 -36.499595642 -16.500000000 -28.499591827 -36.499595642 -17.500000000 -28.499591827 -36.499595642 -18.500000000 -28.499591827 -36.499595642 -19.500000000 -28.499591827 -36.499595642 -20.500000000 -28.499591827 -36.499595642 -21.500000000 -28.499591827 -36.499595642 -22.500000000 -28.499591827 -36.499595642 -23.500000000 -28.499591827 -36.499595642 -24.500000000 -28.499591827 -36.499595642 -25.499996185 -28.499591827 -36.499588013 -26.499954224 -28.499591827 -36.499546051 -27.499591827 -28.499591827 -36.499160767 -28.497461319 -28.499576569 -36.496910095 -29.488346100 -28.499475479 -36.487373352 -30.458766937 -28.499073029 -36.456939697 -31.383810043 -28.498050690 -36.381031036 -32.231597900 -28.496377945 -36.228958130 -32.977611542 -28.495168686 -35.976860046 --33.980331421 -27.498950958 -35.980201721 --33.232864380 -27.499221802 -36.232307434 --32.384300232 -27.499622345 -36.383720398 --31.458948135 -27.499858856 -36.458606720 --30.488399506 -27.499938965 -36.488258362 --29.497472763 -27.499954224 -36.497406006 --28.499593735 -27.499954224 -36.499546051 --27.499954224 -27.499954224 -36.499908447 --26.499996185 -27.499954224 -36.499950409 --25.500000000 -27.499954224 -36.499954224 --24.500000000 -27.499954224 -36.499954224 --23.500000000 -27.499954224 -36.499954224 --22.500000000 -27.499954224 -36.499954224 --21.500000000 -27.499954224 -36.499954224 --20.500000000 -27.499954224 -36.499954224 --19.500000000 -27.499954224 -36.499954224 --18.500000000 -27.499954224 -36.499954224 --17.500000000 -27.499954224 -36.499954224 --16.500000000 -27.499954224 -36.499954224 --15.500000000 -27.499954224 -36.499954224 --14.500000000 -27.499954224 -36.499954224 --13.500000000 -27.499954224 -36.499954224 --12.500000000 -27.499954224 -36.499954224 --11.500000000 -27.499954224 -36.499954224 --10.500000000 -27.499954224 -36.499954224 --9.500000000 -27.499954224 -36.499954224 --8.500000000 -27.499954224 -36.499954224 --7.500000000 -27.499954224 -36.499954224 --6.500000000 -27.499954224 -36.499954224 --5.500000000 -27.499954224 -36.499954224 --4.500000000 -27.499954224 -36.499954224 --3.500000000 -27.499954224 -36.499954224 --2.500000000 -27.499954224 -36.499954224 --1.500000000 -27.499954224 -36.499954224 --0.500000000 -27.499954224 -36.499954224 -0.500000000 -27.499954224 -36.499954224 -1.500000000 -27.499954224 -36.499954224 -2.500000000 -27.499954224 -36.499954224 -3.500000000 -27.499954224 -36.499954224 -4.500000000 -27.499954224 -36.499954224 -5.500000000 -27.499954224 -36.499954224 -6.500000000 -27.499954224 -36.499954224 -7.500000000 -27.499954224 -36.499954224 -8.500000000 -27.499954224 -36.499954224 -9.500000000 -27.499954224 -36.499954224 -10.500000000 -27.499954224 -36.499954224 -11.500000000 -27.499954224 -36.499954224 -12.500000000 -27.499954224 -36.499954224 -13.500000000 -27.499954224 -36.499954224 -14.500000000 -27.499954224 -36.499954224 -15.500000000 -27.499954224 -36.499954224 -16.500000000 -27.499954224 -36.499954224 -17.500000000 -27.499954224 -36.499954224 -18.500000000 -27.499954224 -36.499954224 -19.500000000 -27.499954224 -36.499954224 -20.500000000 -27.499954224 -36.499954224 -21.500000000 -27.499954224 -36.499954224 -22.500000000 -27.499954224 -36.499954224 -23.500000000 -27.499954224 -36.499954224 -24.500000000 -27.499954224 -36.499954224 -25.499996185 -27.499954224 -36.499950409 -26.499954224 -27.499954224 -36.499908447 -27.499591827 -27.499954224 -36.499542236 -28.497470856 -27.499954224 -36.497406006 -29.488399506 -27.499938965 -36.488258362 -30.458948135 -27.499858856 -36.458606720 -31.384298325 -27.499618530 -36.383720398 -32.232864380 -27.499225616 -36.232307434 -32.980327606 -27.498950958 -35.980201721 --33.980972290 -26.499826431 -35.980957031 --33.233165741 -26.499874115 -36.233070374 --32.384403229 -26.499948502 -36.384307861 --31.458978653 -26.499988556 -36.458930969 --30.488407135 -26.499996185 -36.488391876 --29.497472763 -26.499996185 -36.497467041 --28.499593735 -26.499996185 -36.499588013 --27.499954224 -26.499996185 -36.499950409 --26.499996185 -26.499996185 -36.499992371 --25.500000000 -26.499996185 -36.499996185 --24.500000000 -26.499996185 -36.499996185 --23.500000000 -26.499996185 -36.499996185 --22.500000000 -26.499996185 -36.499996185 --21.500000000 -26.499996185 -36.499996185 --20.500000000 -26.499996185 -36.499996185 --19.500000000 -26.499996185 -36.499996185 --18.500000000 -26.499996185 -36.499996185 --17.500000000 -26.499996185 -36.499996185 --16.500000000 -26.499996185 -36.499996185 --15.500000000 -26.499996185 -36.499996185 --14.500000000 -26.499996185 -36.499996185 --13.500000000 -26.499996185 -36.499996185 --12.500000000 -26.499996185 -36.499996185 --11.500000000 -26.499996185 -36.499996185 --10.500000000 -26.499996185 -36.499996185 --9.500000000 -26.499996185 -36.499996185 --8.500000000 -26.499996185 -36.499996185 --7.500000000 -26.499996185 -36.499996185 --6.500000000 -26.499996185 -36.499996185 --5.500000000 -26.499996185 -36.499996185 --4.500000000 -26.499996185 -36.499996185 --3.500000000 -26.499996185 -36.499996185 --2.500000000 -26.499996185 -36.499996185 --1.500000000 -26.499996185 -36.499996185 --0.500000000 -26.499996185 -36.499996185 -0.500000000 -26.499996185 -36.499996185 -1.500000000 -26.499996185 -36.499996185 -2.500000000 -26.499996185 -36.499996185 -3.500000000 -26.499996185 -36.499996185 -4.500000000 -26.499996185 -36.499996185 -5.500000000 -26.499996185 -36.499996185 -6.500000000 -26.499996185 -36.499996185 -7.500000000 -26.499996185 -36.499996185 -8.500000000 -26.499996185 -36.499996185 -9.500000000 -26.499996185 -36.499996185 -10.500000000 -26.499996185 -36.499996185 -11.500000000 -26.499996185 -36.499996185 -12.500000000 -26.499996185 -36.499996185 -13.500000000 -26.499996185 -36.499996185 -14.500000000 -26.499996185 -36.499996185 -15.500000000 -26.499996185 -36.499996185 -16.500000000 -26.499996185 -36.499996185 -17.500000000 -26.499996185 -36.499996185 -18.500000000 -26.499996185 -36.499996185 -19.500000000 -26.499996185 -36.499996185 -20.500000000 -26.499996185 -36.499996185 -21.500000000 -26.499996185 -36.499996185 -22.500000000 -26.499996185 -36.499996185 -23.500000000 -26.499996185 -36.499996185 -24.500000000 -26.499996185 -36.499996185 -25.499996185 -26.499996185 -36.499992371 -26.499954224 -26.499996185 -36.499950409 -27.499591827 -26.499996185 -36.499588013 -28.497470856 -26.499996185 -36.497467041 -29.488407135 -26.499996185 -36.488391876 -30.458978653 -26.499988556 -36.458930969 -31.384403229 -26.499948502 -36.384311676 -32.233165741 -26.499874115 -36.233074188 -32.980976105 -26.499826431 -35.980960846 --33.981086731 -25.499979019 -35.981086731 --33.233215332 -25.499986649 -36.233203888 --32.384422302 -25.499996185 -36.384407043 --31.458978653 -25.500000000 -36.458972931 --30.488407135 -25.500000000 -36.488403320 --29.497472763 -25.500000000 -36.497467041 --28.499593735 -25.500000000 -36.499591827 --27.499954224 -25.500000000 -36.499954224 --26.499996185 -25.500000000 -36.499996185 --25.500000000 -25.500000000 -36.500000000 --24.500000000 -25.500000000 -36.500000000 --23.500000000 -25.500000000 -36.500000000 --22.500000000 -25.500000000 -36.500000000 --21.500000000 -25.500000000 -36.500000000 --20.500000000 -25.500000000 -36.500000000 --19.500000000 -25.500000000 -36.500000000 --18.500000000 -25.500000000 -36.500000000 --17.500000000 -25.500000000 -36.500000000 --16.500000000 -25.500000000 -36.500000000 --15.500000000 -25.500000000 -36.500000000 --14.500000000 -25.500000000 -36.500000000 --13.500000000 -25.500000000 -36.500000000 --12.500000000 -25.500000000 -36.500000000 --11.500000000 -25.500000000 -36.500000000 --10.500000000 -25.500000000 -36.500000000 --9.500000000 -25.500000000 -36.500000000 --8.500000000 -25.500000000 -36.500000000 --7.500000000 -25.500000000 -36.500000000 --6.500000000 -25.500000000 -36.500000000 --5.500000000 -25.500000000 -36.500000000 --4.500000000 -25.500000000 -36.500000000 --3.500000000 -25.500000000 -36.500000000 --2.500000000 -25.500000000 -36.500000000 --1.500000000 -25.500000000 -36.500000000 --0.500000000 -25.500000000 -36.500000000 -0.500000000 -25.500000000 -36.500000000 -1.500000000 -25.500000000 -36.500000000 -2.500000000 -25.500000000 -36.500000000 -3.500000000 -25.500000000 -36.500000000 -4.500000000 -25.500000000 -36.500000000 -5.500000000 -25.500000000 -36.500000000 -6.500000000 -25.500000000 -36.500000000 -7.500000000 -25.500000000 -36.500000000 -8.500000000 -25.500000000 -36.500000000 -9.500000000 -25.500000000 -36.500000000 -10.500000000 -25.500000000 -36.500000000 -11.500000000 -25.500000000 -36.500000000 -12.500000000 -25.500000000 -36.500000000 -13.500000000 -25.500000000 -36.500000000 -14.500000000 -25.500000000 -36.500000000 -15.500000000 -25.500000000 -36.500000000 -16.500000000 -25.500000000 -36.500000000 -17.500000000 -25.500000000 -36.500000000 -18.500000000 -25.500000000 -36.500000000 -19.500000000 -25.500000000 -36.500000000 -20.500000000 -25.500000000 -36.500000000 -21.500000000 -25.500000000 -36.500000000 -22.500000000 -25.500000000 -36.500000000 -23.500000000 -25.500000000 -36.500000000 -24.500000000 -25.500000000 -36.500000000 -25.499996185 -25.500000000 -36.499996185 -26.499954224 -25.500000000 -36.499954224 -27.499591827 -25.500000000 -36.499591827 -28.497470856 -25.500000000 -36.497474670 -29.488407135 -25.500000000 -36.488403320 -30.458978653 -25.500000000 -36.458976746 -31.384418488 -25.499996185 -36.384407043 -32.233215332 -25.499986649 -36.233207703 -32.981090546 -25.499975204 -35.981090546 --33.981101990 -24.499998093 -35.981101990 --33.233222961 -24.499998093 -36.233222961 --32.384422302 -24.500000000 -36.384418488 --31.458978653 -24.500000000 -36.458976746 --30.488407135 -24.500000000 -36.488403320 --29.497472763 -24.500000000 -36.497467041 --28.499593735 -24.500000000 -36.499591827 --27.499954224 -24.500000000 -36.499954224 --26.499996185 -24.500000000 -36.499996185 --25.500000000 -24.500000000 -36.500000000 --24.500000000 -24.500000000 -36.500000000 --23.500000000 -24.500000000 -36.500000000 --22.500000000 -24.500000000 -36.500000000 --21.500000000 -24.500000000 -36.500000000 --20.500000000 -24.500000000 -36.500000000 --19.500000000 -24.500000000 -36.500000000 --18.500000000 -24.500000000 -36.500000000 --17.500000000 -24.500000000 -36.500000000 --16.500000000 -24.500000000 -36.500000000 --15.500000000 -24.500000000 -36.500000000 --14.500000000 -24.500000000 -36.500000000 --13.500000000 -24.500000000 -36.500000000 --12.500000000 -24.500000000 -36.500000000 --11.500000000 -24.500000000 -36.500000000 --10.500000000 -24.500000000 -36.500000000 --9.500000000 -24.500000000 -36.500000000 --8.500000000 -24.500000000 -36.500000000 --7.500000000 -24.500000000 -36.500000000 --6.500000000 -24.500000000 -36.500000000 --5.500000000 -24.500000000 -36.500000000 --4.500000000 -24.500000000 -36.500000000 --3.500000000 -24.500000000 -36.500000000 --2.500000000 -24.500000000 -36.500000000 --1.500000000 -24.500000000 -36.500000000 --0.500000000 -24.500000000 -36.500000000 -0.500000000 -24.500000000 -36.500000000 -1.500000000 -24.500000000 -36.500000000 -2.500000000 -24.500000000 -36.500000000 -3.500000000 -24.500000000 -36.500000000 -4.500000000 -24.500000000 -36.500000000 -5.500000000 -24.500000000 -36.500000000 -6.500000000 -24.500000000 -36.500000000 -7.500000000 -24.500000000 -36.500000000 -8.500000000 -24.500000000 -36.500000000 -9.500000000 -24.500000000 -36.500000000 -10.500000000 -24.500000000 -36.500000000 -11.500000000 -24.500000000 -36.500000000 -12.500000000 -24.500000000 -36.500000000 -13.500000000 -24.500000000 -36.500000000 -14.500000000 -24.500000000 -36.500000000 -15.500000000 -24.500000000 -36.500000000 -16.500000000 -24.500000000 -36.500000000 -17.500000000 -24.500000000 -36.500000000 -18.500000000 -24.500000000 -36.500000000 -19.500000000 -24.500000000 -36.500000000 -20.500000000 -24.500000000 -36.500000000 -21.500000000 -24.500000000 -36.500000000 -22.500000000 -24.500000000 -36.500000000 -23.500000000 -24.500000000 -36.500000000 -24.500000000 -24.500000000 -36.500000000 -25.499996185 -24.500000000 -36.499996185 -26.499954224 -24.500000000 -36.499954224 -27.499591827 -24.500000000 -36.499591827 -28.497470856 -24.500000000 -36.497474670 -29.488407135 -24.500000000 -36.488403320 -30.458978653 -24.500000000 -36.458980560 -31.384418488 -24.500000000 -36.384422302 -32.233222961 -24.499998093 -36.233222961 -32.981101990 -24.499998093 -35.981101990 --33.981101990 -23.500000000 -35.981101990 --33.233222961 -23.500000000 -36.233222961 --32.384422302 -23.500000000 -36.384418488 --31.458978653 -23.500000000 -36.458976746 --30.488407135 -23.500000000 -36.488403320 --29.497472763 -23.500000000 -36.497467041 --28.499593735 -23.500000000 -36.499591827 --27.499954224 -23.500000000 -36.499954224 --26.499996185 -23.500000000 -36.499996185 --25.500000000 -23.500000000 -36.500000000 --24.500000000 -23.500000000 -36.500000000 --23.500000000 -23.500000000 -36.500000000 --22.500000000 -23.500000000 -36.500000000 --21.500000000 -23.500000000 -36.500000000 --20.500000000 -23.500000000 -36.500000000 --19.500000000 -23.500000000 -36.500000000 --18.500000000 -23.500000000 -36.500000000 --17.500000000 -23.500000000 -36.500000000 --16.500000000 -23.500000000 -36.500000000 --15.500000000 -23.500000000 -36.500000000 --14.500000000 -23.500000000 -36.500000000 --13.500000000 -23.500000000 -36.500000000 --12.500000000 -23.500000000 -36.500000000 --11.500000000 -23.500000000 -36.500000000 --10.500000000 -23.500000000 -36.500000000 --9.500000000 -23.500000000 -36.500000000 --8.500000000 -23.500000000 -36.500000000 --7.500000000 -23.500000000 -36.500000000 --6.500000000 -23.500000000 -36.500000000 --5.500000000 -23.500000000 -36.500000000 --4.500000000 -23.500000000 -36.500000000 --3.500000000 -23.500000000 -36.500000000 --2.500000000 -23.500000000 -36.500000000 --1.500000000 -23.500000000 -36.500000000 --0.500000000 -23.500000000 -36.500000000 -0.500000000 -23.500000000 -36.500000000 -1.500000000 -23.500000000 -36.500000000 -2.500000000 -23.500000000 -36.500000000 -3.500000000 -23.500000000 -36.500000000 -4.500000000 -23.500000000 -36.500000000 -5.500000000 -23.500000000 -36.500000000 -6.500000000 -23.500000000 -36.500000000 -7.500000000 -23.500000000 -36.500000000 -8.500000000 -23.500000000 -36.500000000 -9.500000000 -23.500000000 -36.500000000 -10.500000000 -23.500000000 -36.500000000 -11.500000000 -23.500000000 -36.500000000 -12.500000000 -23.500000000 -36.500000000 -13.500000000 -23.500000000 -36.500000000 -14.500000000 -23.500000000 -36.500000000 -15.500000000 -23.500000000 -36.500000000 -16.500000000 -23.500000000 -36.500000000 -17.500000000 -23.500000000 -36.500000000 -18.500000000 -23.500000000 -36.500000000 -19.500000000 -23.500000000 -36.500000000 -20.500000000 -23.500000000 -36.500000000 -21.500000000 -23.500000000 -36.500000000 -22.500000000 -23.500000000 -36.500000000 -23.500000000 -23.500000000 -36.500000000 -24.500000000 -23.500000000 -36.500000000 -25.499996185 -23.500000000 -36.499996185 -26.499954224 -23.500000000 -36.499954224 -27.499591827 -23.500000000 -36.499591827 -28.497470856 -23.500000000 -36.497474670 -29.488407135 -23.500000000 -36.488403320 -30.458978653 -23.500000000 -36.458980560 -31.384418488 -23.500000000 -36.384422302 -32.233222961 -23.500000000 -36.233226776 -32.981101990 -23.500000000 -35.981109619 --33.981101990 -22.500000000 -35.981101990 --33.233222961 -22.500000000 -36.233222961 --32.384422302 -22.500000000 -36.384418488 --31.458978653 -22.500000000 -36.458976746 --30.488407135 -22.500000000 -36.488403320 --29.497472763 -22.500000000 -36.497467041 --28.499593735 -22.500000000 -36.499591827 --27.499954224 -22.500000000 -36.499954224 --26.499996185 -22.500000000 -36.499996185 --25.500000000 -22.500000000 -36.500000000 --24.500000000 -22.500000000 -36.500000000 --23.500000000 -22.500000000 -36.500000000 --22.500000000 -22.500000000 -36.500000000 --21.500000000 -22.500000000 -36.500000000 --20.500000000 -22.500000000 -36.500000000 --19.500000000 -22.500000000 -36.500000000 --18.500000000 -22.500000000 -36.500000000 --17.500000000 -22.500000000 -36.500000000 --16.500000000 -22.500000000 -36.500000000 --15.500000000 -22.500000000 -36.500000000 --14.500000000 -22.500000000 -36.500000000 --13.500000000 -22.500000000 -36.500000000 --12.500000000 -22.500000000 -36.500000000 --11.500000000 -22.500000000 -36.500000000 --10.500000000 -22.500000000 -36.500000000 --9.500000000 -22.500000000 -36.500000000 --8.500000000 -22.500000000 -36.500000000 --7.500000000 -22.500000000 -36.500000000 --6.500000000 -22.500000000 -36.500000000 --5.500000000 -22.500000000 -36.500000000 --4.500000000 -22.500000000 -36.500000000 --3.500000000 -22.500000000 -36.500000000 --2.500000000 -22.500000000 -36.500000000 --1.500000000 -22.500000000 -36.500000000 --0.500000000 -22.500000000 -36.500000000 -0.500000000 -22.500000000 -36.500000000 -1.500000000 -22.500000000 -36.500000000 -2.500000000 -22.500000000 -36.500000000 -3.500000000 -22.500000000 -36.500000000 -4.500000000 -22.500000000 -36.500000000 -5.500000000 -22.500000000 -36.500000000 -6.500000000 -22.500000000 -36.500000000 -7.500000000 -22.500000000 -36.500000000 -8.500000000 -22.500000000 -36.500000000 -9.500000000 -22.500000000 -36.500000000 -10.500000000 -22.500000000 -36.500000000 -11.500000000 -22.500000000 -36.500000000 -12.500000000 -22.500000000 -36.500000000 -13.500000000 -22.500000000 -36.500000000 -14.500000000 -22.500000000 -36.500000000 -15.500000000 -22.500000000 -36.500000000 -16.500000000 -22.500000000 -36.500000000 -17.500000000 -22.500000000 -36.500000000 -18.500000000 -22.500000000 -36.500000000 -19.500000000 -22.500000000 -36.500000000 -20.500000000 -22.500000000 -36.500000000 -21.500000000 -22.500000000 -36.500000000 -22.500000000 -22.500000000 -36.500000000 -23.500000000 -22.500000000 -36.500000000 -24.500000000 -22.500000000 -36.500000000 -25.499996185 -22.500000000 -36.499996185 -26.499954224 -22.500000000 -36.499954224 -27.499591827 -22.500000000 -36.499591827 -28.497470856 -22.500000000 -36.497474670 -29.488407135 -22.500000000 -36.488403320 -30.458978653 -22.500000000 -36.458980560 -31.384418488 -22.500000000 -36.384422302 -32.233222961 -22.500000000 -36.233226776 -32.981101990 -22.500000000 -35.981109619 --33.981101990 -21.500000000 -35.981101990 --33.233222961 -21.500000000 -36.233222961 --32.384422302 -21.500000000 -36.384418488 --31.458978653 -21.500000000 -36.458976746 --30.488407135 -21.500000000 -36.488403320 --29.497472763 -21.500000000 -36.497467041 --28.499593735 -21.500000000 -36.499591827 --27.499954224 -21.500000000 -36.499954224 --26.499996185 -21.500000000 -36.499996185 --25.500000000 -21.500000000 -36.500000000 --24.500000000 -21.500000000 -36.500000000 --23.500000000 -21.500000000 -36.500000000 --22.500000000 -21.500000000 -36.500000000 --21.500000000 -21.500000000 -36.500000000 --20.500000000 -21.500000000 -36.500000000 --19.500000000 -21.500000000 -36.500000000 --18.500000000 -21.500000000 -36.500000000 --17.500000000 -21.500000000 -36.500000000 --16.500000000 -21.500000000 -36.500000000 --15.500000000 -21.500000000 -36.500000000 --14.500000000 -21.500000000 -36.500000000 --13.500000000 -21.500000000 -36.500000000 --12.500000000 -21.500000000 -36.500000000 --11.500000000 -21.500000000 -36.500000000 --10.500000000 -21.500000000 -36.500000000 --9.500000000 -21.500000000 -36.500000000 --8.500000000 -21.500000000 -36.500000000 --7.500000000 -21.500000000 -36.500000000 --6.500000000 -21.500000000 -36.500000000 --5.500000000 -21.500000000 -36.500000000 --4.500000000 -21.500000000 -36.500000000 --3.500000000 -21.500000000 -36.500000000 --2.500000000 -21.500000000 -36.500000000 --1.500000000 -21.500000000 -36.500000000 --0.500000000 -21.500000000 -36.500000000 -0.500000000 -21.500000000 -36.500000000 -1.500000000 -21.500000000 -36.500000000 -2.500000000 -21.500000000 -36.500000000 -3.500000000 -21.500000000 -36.500000000 -4.500000000 -21.500000000 -36.500000000 -5.500000000 -21.500000000 -36.500000000 -6.500000000 -21.500000000 -36.500000000 -7.500000000 -21.500000000 -36.500000000 -8.500000000 -21.500000000 -36.500000000 -9.500000000 -21.500000000 -36.500000000 -10.500000000 -21.500000000 -36.500000000 -11.500000000 -21.500000000 -36.500000000 -12.500000000 -21.500000000 -36.500000000 -13.500000000 -21.500000000 -36.500000000 -14.500000000 -21.500000000 -36.500000000 -15.500000000 -21.500000000 -36.500000000 -16.500000000 -21.500000000 -36.500000000 -17.500000000 -21.500000000 -36.500000000 -18.500000000 -21.500000000 -36.500000000 -19.500000000 -21.500000000 -36.500000000 -20.500000000 -21.500000000 -36.500000000 -21.500000000 -21.500000000 -36.500000000 -22.500000000 -21.500000000 -36.500000000 -23.500000000 -21.500000000 -36.500000000 -24.500000000 -21.500000000 -36.500000000 -25.499996185 -21.500000000 -36.499996185 -26.499954224 -21.500000000 -36.499954224 -27.499591827 -21.500000000 -36.499591827 -28.497470856 -21.500000000 -36.497474670 -29.488407135 -21.500000000 -36.488403320 -30.458978653 -21.500000000 -36.458980560 -31.384418488 -21.500000000 -36.384422302 -32.233222961 -21.500000000 -36.233226776 -32.981101990 -21.500000000 -35.981109619 --33.981101990 -20.500000000 -35.981101990 --33.233222961 -20.500000000 -36.233222961 --32.384422302 -20.500000000 -36.384418488 --31.458978653 -20.500000000 -36.458976746 --30.488407135 -20.500000000 -36.488403320 --29.497472763 -20.500000000 -36.497467041 --28.499593735 -20.500000000 -36.499591827 --27.499954224 -20.500000000 -36.499954224 --26.499996185 -20.500000000 -36.499996185 --25.500000000 -20.500000000 -36.500000000 --24.500000000 -20.500000000 -36.500000000 --23.500000000 -20.500000000 -36.500000000 --22.500000000 -20.500000000 -36.500000000 --21.500000000 -20.500000000 -36.500000000 --20.500000000 -20.500000000 -36.500000000 --19.500000000 -20.500000000 -36.500000000 --18.500000000 -20.500000000 -36.500000000 --17.500000000 -20.500000000 -36.500000000 --16.500000000 -20.500000000 -36.500000000 --15.500000000 -20.500000000 -36.500000000 --14.500000000 -20.500000000 -36.500000000 --13.500000000 -20.500000000 -36.500000000 --12.500000000 -20.500000000 -36.500000000 --11.500000000 -20.500000000 -36.500000000 --10.500000000 -20.500000000 -36.500000000 --9.500000000 -20.500000000 -36.500000000 --8.500000000 -20.500000000 -36.500000000 --7.500000000 -20.500000000 -36.500000000 --6.500000000 -20.500000000 -36.500000000 --5.500000000 -20.500000000 -36.500000000 --4.500000000 -20.500000000 -36.500000000 --3.500000000 -20.500000000 -36.500000000 --2.500000000 -20.500000000 -36.500000000 --1.500000000 -20.500000000 -36.500000000 --0.500000000 -20.500000000 -36.500000000 -0.500000000 -20.500000000 -36.500000000 -1.500000000 -20.500000000 -36.500000000 -2.500000000 -20.500000000 -36.500000000 -3.500000000 -20.500000000 -36.500000000 -4.500000000 -20.500000000 -36.500000000 -5.500000000 -20.500000000 -36.500000000 -6.500000000 -20.500000000 -36.500000000 -7.500000000 -20.500000000 -36.500000000 -8.500000000 -20.500000000 -36.500000000 -9.500000000 -20.500000000 -36.500000000 -10.500000000 -20.500000000 -36.500000000 -11.500000000 -20.500000000 -36.500000000 -12.500000000 -20.500000000 -36.500000000 -13.500000000 -20.500000000 -36.500000000 -14.500000000 -20.500000000 -36.500000000 -15.500000000 -20.500000000 -36.500000000 -16.500000000 -20.500000000 -36.500000000 -17.500000000 -20.500000000 -36.500000000 -18.500000000 -20.500000000 -36.500000000 -19.500000000 -20.500000000 -36.500000000 -20.500000000 -20.500000000 -36.500000000 -21.500000000 -20.500000000 -36.500000000 -22.500000000 -20.500000000 -36.500000000 -23.500000000 -20.500000000 -36.500000000 -24.500000000 -20.500000000 -36.500000000 -25.499996185 -20.500000000 -36.499996185 -26.499954224 -20.500000000 -36.499954224 -27.499591827 -20.500000000 -36.499591827 -28.497470856 -20.500000000 -36.497474670 -29.488407135 -20.500000000 -36.488403320 -30.458978653 -20.500000000 -36.458980560 -31.384418488 -20.500000000 -36.384422302 -32.233222961 -20.500000000 -36.233226776 -32.981101990 -20.500000000 -35.981109619 --33.981101990 -19.500000000 -35.981101990 --33.233222961 -19.500000000 -36.233222961 --32.384422302 -19.500000000 -36.384418488 --31.458978653 -19.500000000 -36.458976746 --30.488407135 -19.500000000 -36.488403320 --29.497472763 -19.500000000 -36.497467041 --28.499593735 -19.500000000 -36.499591827 --27.499954224 -19.500000000 -36.499954224 --26.499996185 -19.500000000 -36.499996185 --25.500000000 -19.500000000 -36.500000000 --24.500000000 -19.500000000 -36.500000000 --23.500000000 -19.500000000 -36.500000000 --22.500000000 -19.500000000 -36.500000000 --21.500000000 -19.500000000 -36.500000000 --20.500000000 -19.500000000 -36.500000000 --19.500000000 -19.500000000 -36.500000000 --18.500000000 -19.500000000 -36.500000000 --17.500000000 -19.500000000 -36.500000000 --16.500000000 -19.500000000 -36.500000000 --15.500000000 -19.500000000 -36.500000000 --14.500000000 -19.500000000 -36.500000000 --13.500000000 -19.500000000 -36.500000000 --12.500000000 -19.500000000 -36.500000000 --11.500000000 -19.500000000 -36.500000000 --10.500000000 -19.500000000 -36.500000000 --9.500000000 -19.500000000 -36.500000000 --8.500000000 -19.500000000 -36.500000000 --7.500000000 -19.500000000 -36.500000000 --6.500000000 -19.500000000 -36.500000000 --5.500000000 -19.500000000 -36.500000000 --4.500000000 -19.500000000 -36.500000000 --3.500000000 -19.500000000 -36.500000000 --2.500000000 -19.500000000 -36.500000000 --1.500000000 -19.500000000 -36.500000000 --0.500000000 -19.500000000 -36.500000000 -0.500000000 -19.500000000 -36.500000000 -1.500000000 -19.500000000 -36.500000000 -2.500000000 -19.500000000 -36.500000000 -3.500000000 -19.500000000 -36.500000000 -4.500000000 -19.500000000 -36.500000000 -5.500000000 -19.500000000 -36.500000000 -6.500000000 -19.500000000 -36.500000000 -7.500000000 -19.500000000 -36.500000000 -8.500000000 -19.500000000 -36.500000000 -9.500000000 -19.500000000 -36.500000000 -10.500000000 -19.500000000 -36.500000000 -11.500000000 -19.500000000 -36.500000000 -12.500000000 -19.500000000 -36.500000000 -13.500000000 -19.500000000 -36.500000000 -14.500000000 -19.500000000 -36.500000000 -15.500000000 -19.500000000 -36.500000000 -16.500000000 -19.500000000 -36.500000000 -17.500000000 -19.500000000 -36.500000000 -18.500000000 -19.500000000 -36.500000000 -19.500000000 -19.500000000 -36.500000000 -20.500000000 -19.500000000 -36.500000000 -21.500000000 -19.500000000 -36.500000000 -22.500000000 -19.500000000 -36.500000000 -23.500000000 -19.500000000 -36.500000000 -24.500000000 -19.500000000 -36.500000000 -25.499996185 -19.500000000 -36.499996185 -26.499954224 -19.500000000 -36.499954224 -27.499591827 -19.500000000 -36.499591827 -28.497470856 -19.500000000 -36.497474670 -29.488407135 -19.500000000 -36.488403320 -30.458978653 -19.500000000 -36.458980560 -31.384418488 -19.500000000 -36.384422302 -32.233222961 -19.500000000 -36.233226776 -32.981101990 -19.500000000 -35.981109619 --33.981101990 -18.500000000 -35.981101990 --33.233222961 -18.500000000 -36.233222961 --32.384422302 -18.500000000 -36.384418488 --31.458978653 -18.500000000 -36.458976746 --30.488407135 -18.500000000 -36.488403320 --29.497472763 -18.500000000 -36.497467041 --28.499593735 -18.500000000 -36.499591827 --27.499954224 -18.500000000 -36.499954224 --26.499996185 -18.500000000 -36.499996185 --25.500000000 -18.500000000 -36.500000000 --24.500000000 -18.500000000 -36.500000000 --23.500000000 -18.500000000 -36.500000000 --22.500000000 -18.500000000 -36.500000000 --21.500000000 -18.500000000 -36.500000000 --20.500000000 -18.500000000 -36.500000000 --19.500000000 -18.500000000 -36.500000000 --18.500000000 -18.500000000 -36.500000000 --17.500000000 -18.500000000 -36.500000000 --16.500000000 -18.500000000 -36.500000000 --15.500000000 -18.500000000 -36.500000000 --14.500000000 -18.500000000 -36.500000000 --13.500000000 -18.500000000 -36.500000000 --12.500000000 -18.500000000 -36.500000000 --11.500000000 -18.500000000 -36.500000000 --10.500000000 -18.500000000 -36.500000000 --9.500000000 -18.500000000 -36.500000000 --8.500000000 -18.500000000 -36.500000000 --7.500000000 -18.500000000 -36.500000000 --6.500000000 -18.500000000 -36.500000000 --5.500000000 -18.500000000 -36.500000000 --4.500000000 -18.500000000 -36.500000000 --3.500000000 -18.500000000 -36.500000000 --2.500000000 -18.500000000 -36.500000000 --1.500000000 -18.500000000 -36.500000000 --0.500000000 -18.500000000 -36.500000000 -0.500000000 -18.500000000 -36.500000000 -1.500000000 -18.500000000 -36.500000000 -2.500000000 -18.500000000 -36.500000000 -3.500000000 -18.500000000 -36.500000000 -4.500000000 -18.500000000 -36.500000000 -5.500000000 -18.500000000 -36.500000000 -6.500000000 -18.500000000 -36.500000000 -7.500000000 -18.500000000 -36.500000000 -8.500000000 -18.500000000 -36.500000000 -9.500000000 -18.500000000 -36.500000000 -10.500000000 -18.500000000 -36.500000000 -11.500000000 -18.500000000 -36.500000000 -12.500000000 -18.500000000 -36.500000000 -13.500000000 -18.500000000 -36.500000000 -14.500000000 -18.500000000 -36.500000000 -15.500000000 -18.500000000 -36.500000000 -16.500000000 -18.500000000 -36.500000000 -17.500000000 -18.500000000 -36.500000000 -18.500000000 -18.500000000 -36.500000000 -19.500000000 -18.500000000 -36.500000000 -20.500000000 -18.500000000 -36.500000000 -21.500000000 -18.500000000 -36.500000000 -22.500000000 -18.500000000 -36.500000000 -23.500000000 -18.500000000 -36.500000000 -24.500000000 -18.500000000 -36.500000000 -25.499996185 -18.500000000 -36.499996185 -26.499954224 -18.500000000 -36.499954224 -27.499591827 -18.500000000 -36.499591827 -28.497470856 -18.500000000 -36.497474670 -29.488407135 -18.500000000 -36.488403320 -30.458978653 -18.500000000 -36.458980560 -31.384418488 -18.500000000 -36.384422302 -32.233222961 -18.500000000 -36.233226776 -32.981101990 -18.500000000 -35.981109619 --33.981101990 -17.500000000 -35.981101990 --33.233222961 -17.500000000 -36.233222961 --32.384422302 -17.500000000 -36.384418488 --31.458978653 -17.500000000 -36.458976746 --30.488407135 -17.500000000 -36.488403320 --29.497472763 -17.500000000 -36.497467041 --28.499593735 -17.500000000 -36.499591827 --27.499954224 -17.500000000 -36.499954224 --26.499996185 -17.500000000 -36.499996185 --25.500000000 -17.500000000 -36.500000000 --24.500000000 -17.500000000 -36.500000000 --23.500000000 -17.500000000 -36.500000000 --22.500000000 -17.500000000 -36.500000000 --21.500000000 -17.500000000 -36.500000000 --20.500000000 -17.500000000 -36.500000000 --19.500000000 -17.500000000 -36.500000000 --18.500000000 -17.500000000 -36.500000000 --17.500000000 -17.500000000 -36.500000000 --16.500000000 -17.500000000 -36.500000000 --15.500000000 -17.500000000 -36.500000000 --14.500000000 -17.500000000 -36.500000000 --13.500000000 -17.500000000 -36.500000000 --12.500000000 -17.500000000 -36.500000000 --11.500000000 -17.500000000 -36.500000000 --10.500000000 -17.500000000 -36.500000000 --9.500000000 -17.500000000 -36.500000000 --8.500000000 -17.500000000 -36.500000000 --7.500000000 -17.500000000 -36.500000000 --6.500000000 -17.500000000 -36.500000000 --5.500000000 -17.500000000 -36.500000000 --4.500000000 -17.500000000 -36.500000000 --3.500000000 -17.500000000 -36.500000000 --2.500000000 -17.500000000 -36.500000000 --1.500000000 -17.500000000 -36.500000000 --0.500000000 -17.500000000 -36.500000000 -0.500000000 -17.500000000 -36.500000000 -1.500000000 -17.500000000 -36.500000000 -2.500000000 -17.500000000 -36.500000000 -3.500000000 -17.500000000 -36.500000000 -4.500000000 -17.500000000 -36.500000000 -5.500000000 -17.500000000 -36.500000000 -6.500000000 -17.500000000 -36.500000000 -7.500000000 -17.500000000 -36.500000000 -8.500000000 -17.500000000 -36.500000000 -9.500000000 -17.500000000 -36.500000000 -10.500000000 -17.500000000 -36.500000000 -11.500000000 -17.500000000 -36.500000000 -12.500000000 -17.500000000 -36.500000000 -13.500000000 -17.500000000 -36.500000000 -14.500000000 -17.500000000 -36.500000000 -15.500000000 -17.500000000 -36.500000000 -16.500000000 -17.500000000 -36.500000000 -17.500000000 -17.500000000 -36.500000000 -18.500000000 -17.500000000 -36.500000000 -19.500000000 -17.500000000 -36.500000000 -20.500000000 -17.500000000 -36.500000000 -21.500000000 -17.500000000 -36.500000000 -22.500000000 -17.500000000 -36.500000000 -23.500000000 -17.500000000 -36.500000000 -24.500000000 -17.500000000 -36.500000000 -25.499996185 -17.500000000 -36.499996185 -26.499954224 -17.500000000 -36.499954224 -27.499591827 -17.500000000 -36.499591827 -28.497470856 -17.500000000 -36.497474670 -29.488407135 -17.500000000 -36.488403320 -30.458978653 -17.500000000 -36.458980560 -31.384418488 -17.500000000 -36.384422302 -32.233222961 -17.500000000 -36.233226776 -32.981101990 -17.500000000 -35.981109619 --33.981101990 -16.500000000 -35.981101990 --33.233222961 -16.500000000 -36.233222961 --32.384422302 -16.500000000 -36.384418488 --31.458978653 -16.500000000 -36.458976746 --30.488407135 -16.500000000 -36.488403320 --29.497472763 -16.500000000 -36.497467041 --28.499593735 -16.500000000 -36.499591827 --27.499954224 -16.500000000 -36.499954224 --26.499996185 -16.500000000 -36.499996185 --25.500000000 -16.500000000 -36.500000000 --24.500000000 -16.500000000 -36.500000000 --23.500000000 -16.500000000 -36.500000000 --22.500000000 -16.500000000 -36.500000000 --21.500000000 -16.500000000 -36.500000000 --20.500000000 -16.500000000 -36.500000000 --19.500000000 -16.500000000 -36.500000000 --18.500000000 -16.500000000 -36.500000000 --17.500000000 -16.500000000 -36.500000000 --16.500000000 -16.500000000 -36.500000000 --15.500000000 -16.500000000 -36.500000000 --14.500000000 -16.500000000 -36.500000000 --13.500000000 -16.500000000 -36.500000000 --12.500000000 -16.500000000 -36.500000000 --11.500000000 -16.500000000 -36.500000000 --10.500000000 -16.500000000 -36.500000000 --9.500000000 -16.500000000 -36.500000000 --8.500000000 -16.500000000 -36.500000000 --7.500000000 -16.500000000 -36.500000000 --6.500000000 -16.500000000 -36.500000000 --5.500000000 -16.500000000 -36.500000000 --4.500000000 -16.500000000 -36.500000000 --3.500000000 -16.500000000 -36.500000000 --2.500000000 -16.500000000 -36.500000000 --1.500000000 -16.500000000 -36.500000000 --0.500000000 -16.500000000 -36.500000000 -0.500000000 -16.500000000 -36.500000000 -1.500000000 -16.500000000 -36.500000000 -2.500000000 -16.500000000 -36.500000000 -3.500000000 -16.500000000 -36.500000000 -4.500000000 -16.500000000 -36.500000000 -5.500000000 -16.500000000 -36.500000000 -6.500000000 -16.500000000 -36.500000000 -7.500000000 -16.500000000 -36.500000000 -8.500000000 -16.500000000 -36.500000000 -9.500000000 -16.500000000 -36.500000000 -10.500000000 -16.500000000 -36.500000000 -11.500000000 -16.500000000 -36.500000000 -12.500000000 -16.500000000 -36.500000000 -13.500000000 -16.500000000 -36.500000000 -14.500000000 -16.500000000 -36.500000000 -15.500000000 -16.500000000 -36.500000000 -16.500000000 -16.500000000 -36.500000000 -17.500000000 -16.500000000 -36.500000000 -18.500000000 -16.500000000 -36.500000000 -19.500000000 -16.500000000 -36.500000000 -20.500000000 -16.500000000 -36.500000000 -21.500000000 -16.500000000 -36.500000000 -22.500000000 -16.500000000 -36.500000000 -23.500000000 -16.500000000 -36.500000000 -24.500000000 -16.500000000 -36.500000000 -25.499996185 -16.500000000 -36.499996185 -26.499954224 -16.500000000 -36.499954224 -27.499591827 -16.500000000 -36.499591827 -28.497470856 -16.500000000 -36.497474670 -29.488407135 -16.500000000 -36.488403320 -30.458978653 -16.500000000 -36.458980560 -31.384418488 -16.500000000 -36.384422302 -32.233222961 -16.500000000 -36.233226776 -32.981101990 -16.500000000 -35.981109619 --33.981101990 -15.500000000 -35.981101990 --33.233222961 -15.500000000 -36.233222961 --32.384422302 -15.500000000 -36.384418488 --31.458978653 -15.500000000 -36.458976746 --30.488407135 -15.500000000 -36.488403320 --29.497472763 -15.500000000 -36.497467041 --28.499593735 -15.500000000 -36.499591827 --27.499954224 -15.500000000 -36.499954224 --26.499996185 -15.500000000 -36.499996185 --25.500000000 -15.500000000 -36.500000000 --24.500000000 -15.500000000 -36.500000000 --23.500000000 -15.500000000 -36.500000000 --22.500000000 -15.500000000 -36.500000000 --21.500000000 -15.500000000 -36.500000000 --20.500000000 -15.500000000 -36.500000000 --19.500000000 -15.500000000 -36.500000000 --18.500000000 -15.500000000 -36.500000000 --17.500000000 -15.500000000 -36.500000000 --16.500000000 -15.500000000 -36.500000000 --15.500000000 -15.500000000 -36.500000000 --14.500000000 -15.500000000 -36.500000000 --13.500000000 -15.500000000 -36.500000000 --12.500000000 -15.500000000 -36.500000000 --11.500000000 -15.500000000 -36.500000000 --10.500000000 -15.500000000 -36.500000000 --9.500000000 -15.500000000 -36.500000000 --8.500000000 -15.500000000 -36.500000000 --7.500000000 -15.500000000 -36.500000000 --6.500000000 -15.500000000 -36.500000000 --5.500000000 -15.500000000 -36.500000000 --4.500000000 -15.500000000 -36.500000000 --3.500000000 -15.500000000 -36.500000000 --2.500000000 -15.500000000 -36.500000000 --1.500000000 -15.500000000 -36.500000000 --0.500000000 -15.500000000 -36.500000000 -0.500000000 -15.500000000 -36.500000000 -1.500000000 -15.500000000 -36.500000000 -2.500000000 -15.500000000 -36.500000000 -3.500000000 -15.500000000 -36.500000000 -4.500000000 -15.500000000 -36.500000000 -5.500000000 -15.500000000 -36.500000000 -6.500000000 -15.500000000 -36.500000000 -7.500000000 -15.500000000 -36.500000000 -8.500000000 -15.500000000 -36.500000000 -9.500000000 -15.500000000 -36.500000000 -10.500000000 -15.500000000 -36.500000000 -11.500000000 -15.500000000 -36.500000000 -12.500000000 -15.500000000 -36.500000000 -13.500000000 -15.500000000 -36.500000000 -14.500000000 -15.500000000 -36.500000000 -15.500000000 -15.500000000 -36.500000000 -16.500000000 -15.500000000 -36.500000000 -17.500000000 -15.500000000 -36.500000000 -18.500000000 -15.500000000 -36.500000000 -19.500000000 -15.500000000 -36.500000000 -20.500000000 -15.500000000 -36.500000000 -21.500000000 -15.500000000 -36.500000000 -22.500000000 -15.500000000 -36.500000000 -23.500000000 -15.500000000 -36.500000000 -24.500000000 -15.500000000 -36.500000000 -25.499996185 -15.500000000 -36.499996185 -26.499954224 -15.500000000 -36.499954224 -27.499591827 -15.500000000 -36.499591827 -28.497470856 -15.500000000 -36.497474670 -29.488407135 -15.500000000 -36.488403320 -30.458978653 -15.500000000 -36.458980560 -31.384418488 -15.500000000 -36.384422302 -32.233222961 -15.500000000 -36.233226776 -32.981101990 -15.500000000 -35.981109619 --33.981101990 -14.500000000 -35.981101990 --33.233222961 -14.500000000 -36.233222961 --32.384422302 -14.500000000 -36.384418488 --31.458978653 -14.500000000 -36.458976746 --30.488407135 -14.500000000 -36.488403320 --29.497472763 -14.500000000 -36.497467041 --28.499593735 -14.500000000 -36.499591827 --27.499954224 -14.500000000 -36.499954224 --26.499996185 -14.500000000 -36.499996185 --25.500000000 -14.500000000 -36.500000000 --24.500000000 -14.500000000 -36.500000000 --23.500000000 -14.500000000 -36.500000000 --22.500000000 -14.500000000 -36.500000000 --21.500000000 -14.500000000 -36.500000000 --20.500000000 -14.500000000 -36.500000000 --19.500000000 -14.500000000 -36.500000000 --18.500000000 -14.500000000 -36.500000000 --17.500000000 -14.500000000 -36.500000000 --16.500000000 -14.500000000 -36.500000000 --15.500000000 -14.500000000 -36.500000000 --14.500000000 -14.500000000 -36.500000000 --13.500000000 -14.500000000 -36.500000000 --12.500000000 -14.500000000 -36.500000000 --11.500000000 -14.500000000 -36.500000000 --10.500000000 -14.500000000 -36.500000000 --9.500000000 -14.500000000 -36.500000000 --8.500000000 -14.500000000 -36.500000000 --7.500000000 -14.500000000 -36.500000000 --6.500000000 -14.500000000 -36.500000000 --5.500000000 -14.500000000 -36.500000000 --4.500000000 -14.500000000 -36.500000000 --3.500000000 -14.500000000 -36.500000000 --2.500000000 -14.500000000 -36.500000000 --1.500000000 -14.500000000 -36.500000000 --0.500000000 -14.500000000 -36.500000000 -0.500000000 -14.500000000 -36.500000000 -1.500000000 -14.500000000 -36.500000000 -2.500000000 -14.500000000 -36.500000000 -3.500000000 -14.500000000 -36.500000000 -4.500000000 -14.500000000 -36.500000000 -5.500000000 -14.500000000 -36.500000000 -6.500000000 -14.500000000 -36.500000000 -7.500000000 -14.500000000 -36.500000000 -8.500000000 -14.500000000 -36.500000000 -9.500000000 -14.500000000 -36.500000000 -10.500000000 -14.500000000 -36.500000000 -11.500000000 -14.500000000 -36.500000000 -12.500000000 -14.500000000 -36.500000000 -13.500000000 -14.500000000 -36.500000000 -14.500000000 -14.500000000 -36.500000000 -15.500000000 -14.500000000 -36.500000000 -16.500000000 -14.500000000 -36.500000000 -17.500000000 -14.500000000 -36.500000000 -18.500000000 -14.500000000 -36.500000000 -19.500000000 -14.500000000 -36.500000000 -20.500000000 -14.500000000 -36.500000000 -21.500000000 -14.500000000 -36.500000000 -22.500000000 -14.500000000 -36.500000000 -23.500000000 -14.500000000 -36.500000000 -24.500000000 -14.500000000 -36.500000000 -25.499996185 -14.500000000 -36.499996185 -26.499954224 -14.500000000 -36.499954224 -27.499591827 -14.500000000 -36.499591827 -28.497470856 -14.500000000 -36.497474670 -29.488407135 -14.500000000 -36.488403320 -30.458978653 -14.500000000 -36.458980560 -31.384418488 -14.500000000 -36.384422302 -32.233222961 -14.500000000 -36.233226776 -32.981101990 -14.500000000 -35.981109619 --33.981101990 -13.500000000 -35.981101990 --33.233222961 -13.500000000 -36.233222961 --32.384422302 -13.500000000 -36.384418488 --31.458978653 -13.500000000 -36.458976746 --30.488407135 -13.500000000 -36.488403320 --29.497472763 -13.500000000 -36.497467041 --28.499593735 -13.500000000 -36.499591827 --27.499954224 -13.500000000 -36.499954224 --26.499996185 -13.500000000 -36.499996185 --25.500000000 -13.500000000 -36.500000000 --24.500000000 -13.500000000 -36.500000000 --23.500000000 -13.500000000 -36.500000000 --22.500000000 -13.500000000 -36.500000000 --21.500000000 -13.500000000 -36.500000000 --20.500000000 -13.500000000 -36.500000000 --19.500000000 -13.500000000 -36.500000000 --18.500000000 -13.500000000 -36.500000000 --17.500000000 -13.500000000 -36.500000000 --16.500000000 -13.500000000 -36.500000000 --15.500000000 -13.500000000 -36.500000000 --14.500000000 -13.500000000 -36.500000000 --13.500000000 -13.500000000 -36.500000000 --12.500000000 -13.500000000 -36.500000000 --11.500000000 -13.500000000 -36.500000000 --10.500000000 -13.500000000 -36.500000000 --9.500000000 -13.500000000 -36.500000000 --8.500000000 -13.500000000 -36.500000000 --7.500000000 -13.500000000 -36.500000000 --6.500000000 -13.500000000 -36.500000000 --5.500000000 -13.500000000 -36.500000000 --4.500000000 -13.500000000 -36.500000000 --3.500000000 -13.500000000 -36.500000000 --2.500000000 -13.500000000 -36.500000000 --1.500000000 -13.500000000 -36.500000000 --0.500000000 -13.500000000 -36.500000000 -0.500000000 -13.500000000 -36.500000000 -1.500000000 -13.500000000 -36.500000000 -2.500000000 -13.500000000 -36.500000000 -3.500000000 -13.500000000 -36.500000000 -4.500000000 -13.500000000 -36.500000000 -5.500000000 -13.500000000 -36.500000000 -6.500000000 -13.500000000 -36.500000000 -7.500000000 -13.500000000 -36.500000000 -8.500000000 -13.500000000 -36.500000000 -9.500000000 -13.500000000 -36.500000000 -10.500000000 -13.500000000 -36.500000000 -11.500000000 -13.500000000 -36.500000000 -12.500000000 -13.500000000 -36.500000000 -13.500000000 -13.500000000 -36.500000000 -14.500000000 -13.500000000 -36.500000000 -15.500000000 -13.500000000 -36.500000000 -16.500000000 -13.500000000 -36.500000000 -17.500000000 -13.500000000 -36.500000000 -18.500000000 -13.500000000 -36.500000000 -19.500000000 -13.500000000 -36.500000000 -20.500000000 -13.500000000 -36.500000000 -21.500000000 -13.500000000 -36.500000000 -22.500000000 -13.500000000 -36.500000000 -23.500000000 -13.500000000 -36.500000000 -24.500000000 -13.500000000 -36.500000000 -25.499996185 -13.500000000 -36.499996185 -26.499954224 -13.500000000 -36.499954224 -27.499591827 -13.500000000 -36.499591827 -28.497470856 -13.500000000 -36.497474670 -29.488407135 -13.500000000 -36.488403320 -30.458978653 -13.500000000 -36.458980560 -31.384418488 -13.500000000 -36.384422302 -32.233222961 -13.500000000 -36.233226776 -32.981101990 -13.500000000 -35.981109619 --33.981101990 -12.500000000 -35.981101990 --33.233222961 -12.500000000 -36.233222961 --32.384422302 -12.500000000 -36.384418488 --31.458978653 -12.500000000 -36.458976746 --30.488407135 -12.500000000 -36.488403320 --29.497472763 -12.500000000 -36.497467041 --28.499593735 -12.500000000 -36.499591827 --27.499954224 -12.500000000 -36.499954224 --26.499996185 -12.500000000 -36.499996185 --25.500000000 -12.500000000 -36.500000000 --24.500000000 -12.500000000 -36.500000000 --23.500000000 -12.500000000 -36.500000000 --22.500000000 -12.500000000 -36.500000000 --21.500000000 -12.500000000 -36.500000000 --20.500000000 -12.500000000 -36.500000000 --19.500000000 -12.500000000 -36.500000000 --18.500000000 -12.500000000 -36.500000000 --17.500000000 -12.500000000 -36.500000000 --16.500000000 -12.500000000 -36.500000000 --15.500000000 -12.500000000 -36.500000000 --14.500000000 -12.500000000 -36.500000000 --13.500000000 -12.500000000 -36.500000000 --12.500000000 -12.500000000 -36.500000000 --11.500000000 -12.500000000 -36.500000000 --10.500000000 -12.500000000 -36.500000000 --9.500000000 -12.500000000 -36.500000000 --8.500000000 -12.500000000 -36.500000000 --7.500000000 -12.500000000 -36.500000000 --6.500000000 -12.500000000 -36.500000000 --5.500000000 -12.500000000 -36.500000000 --4.500000000 -12.500000000 -36.500000000 --3.500000000 -12.500000000 -36.500000000 --2.500000000 -12.500000000 -36.500000000 --1.500000000 -12.500000000 -36.500000000 --0.500000000 -12.500000000 -36.500000000 -0.500000000 -12.500000000 -36.500000000 -1.500000000 -12.500000000 -36.500000000 -2.500000000 -12.500000000 -36.500000000 -3.500000000 -12.500000000 -36.500000000 -4.500000000 -12.500000000 -36.500000000 -5.500000000 -12.500000000 -36.500000000 -6.500000000 -12.500000000 -36.500000000 -7.500000000 -12.500000000 -36.500000000 -8.500000000 -12.500000000 -36.500000000 -9.500000000 -12.500000000 -36.500000000 -10.500000000 -12.500000000 -36.500000000 -11.500000000 -12.500000000 -36.500000000 -12.500000000 -12.500000000 -36.500000000 -13.500000000 -12.500000000 -36.500000000 -14.500000000 -12.500000000 -36.500000000 -15.500000000 -12.500000000 -36.500000000 -16.500000000 -12.500000000 -36.500000000 -17.500000000 -12.500000000 -36.500000000 -18.500000000 -12.500000000 -36.500000000 -19.500000000 -12.500000000 -36.500000000 -20.500000000 -12.500000000 -36.500000000 -21.500000000 -12.500000000 -36.500000000 -22.500000000 -12.500000000 -36.500000000 -23.500000000 -12.500000000 -36.500000000 -24.500000000 -12.500000000 -36.500000000 -25.499996185 -12.500000000 -36.499996185 -26.499954224 -12.500000000 -36.499954224 -27.499591827 -12.500000000 -36.499591827 -28.497470856 -12.500000000 -36.497474670 -29.488407135 -12.500000000 -36.488403320 -30.458978653 -12.500000000 -36.458980560 -31.384418488 -12.500000000 -36.384422302 -32.233222961 -12.500000000 -36.233226776 -32.981101990 -12.500000000 -35.981109619 --33.981101990 -11.500000000 -35.981101990 --33.233222961 -11.500000000 -36.233222961 --32.384422302 -11.500000000 -36.384418488 --31.458978653 -11.500000000 -36.458976746 --30.488407135 -11.500000000 -36.488403320 --29.497472763 -11.500000000 -36.497467041 --28.499593735 -11.500000000 -36.499591827 --27.499954224 -11.500000000 -36.499954224 --26.499996185 -11.500000000 -36.499996185 --25.500000000 -11.500000000 -36.500000000 --24.500000000 -11.500000000 -36.500000000 --23.500000000 -11.500000000 -36.500000000 --22.500000000 -11.500000000 -36.500000000 --21.500000000 -11.500000000 -36.500000000 --20.500000000 -11.500000000 -36.500000000 --19.500000000 -11.500000000 -36.500000000 --18.500000000 -11.500000000 -36.500000000 --17.500000000 -11.500000000 -36.500000000 --16.500000000 -11.500000000 -36.500000000 --15.500000000 -11.500000000 -36.500000000 --14.500000000 -11.500000000 -36.500000000 --13.500000000 -11.500000000 -36.500000000 --12.500000000 -11.500000000 -36.500000000 --11.500000000 -11.500000000 -36.500000000 --10.500000000 -11.500000000 -36.500000000 --9.500000000 -11.500000000 -36.500000000 --8.500000000 -11.500000000 -36.500000000 --7.500000000 -11.500000000 -36.500000000 --6.500000000 -11.500000000 -36.500000000 --5.500000000 -11.500000000 -36.500000000 --4.500000000 -11.500000000 -36.500000000 --3.500000000 -11.500000000 -36.500000000 --2.500000000 -11.500000000 -36.500000000 --1.500000000 -11.500000000 -36.500000000 --0.500000000 -11.500000000 -36.500000000 -0.500000000 -11.500000000 -36.500000000 -1.500000000 -11.500000000 -36.500000000 -2.500000000 -11.500000000 -36.500000000 -3.500000000 -11.500000000 -36.500000000 -4.500000000 -11.500000000 -36.500000000 -5.500000000 -11.500000000 -36.500000000 -6.500000000 -11.500000000 -36.500000000 -7.500000000 -11.500000000 -36.500000000 -8.500000000 -11.500000000 -36.500000000 -9.500000000 -11.500000000 -36.500000000 -10.500000000 -11.500000000 -36.500000000 -11.500000000 -11.500000000 -36.500000000 -12.500000000 -11.500000000 -36.500000000 -13.500000000 -11.500000000 -36.500000000 -14.500000000 -11.500000000 -36.500000000 -15.500000000 -11.500000000 -36.500000000 -16.500000000 -11.500000000 -36.500000000 -17.500000000 -11.500000000 -36.500000000 -18.500000000 -11.500000000 -36.500000000 -19.500000000 -11.500000000 -36.500000000 -20.500000000 -11.500000000 -36.500000000 -21.500000000 -11.500000000 -36.500000000 -22.500000000 -11.500000000 -36.500000000 -23.500000000 -11.500000000 -36.500000000 -24.500000000 -11.500000000 -36.500000000 -25.499996185 -11.500000000 -36.499996185 -26.499954224 -11.500000000 -36.499954224 -27.499591827 -11.500000000 -36.499591827 -28.497470856 -11.500000000 -36.497474670 -29.488407135 -11.500000000 -36.488403320 -30.458978653 -11.500000000 -36.458980560 -31.384418488 -11.500000000 -36.384422302 -32.233222961 -11.500000000 -36.233226776 -32.981101990 -11.500000000 -35.981109619 --33.981101990 -10.500000000 -35.981101990 --33.233222961 -10.500000000 -36.233222961 --32.384422302 -10.500000000 -36.384418488 --31.458978653 -10.500000000 -36.458976746 --30.488407135 -10.500000000 -36.488403320 --29.497472763 -10.500000000 -36.497467041 --28.499593735 -10.500000000 -36.499591827 --27.499954224 -10.500000000 -36.499954224 --26.499996185 -10.500000000 -36.499996185 --25.500000000 -10.500000000 -36.500000000 --24.500000000 -10.500000000 -36.500000000 --23.500000000 -10.500000000 -36.500000000 --22.500000000 -10.500000000 -36.500000000 --21.500000000 -10.500000000 -36.500000000 --20.500000000 -10.500000000 -36.500000000 --19.500000000 -10.500000000 -36.500000000 --18.500000000 -10.500000000 -36.500000000 --17.500000000 -10.500000000 -36.500000000 --16.500000000 -10.500000000 -36.500000000 --15.500000000 -10.500000000 -36.500000000 --14.500000000 -10.500000000 -36.500000000 --13.500000000 -10.500000000 -36.500000000 --12.500000000 -10.500000000 -36.500000000 --11.500000000 -10.500000000 -36.500000000 --10.500000000 -10.500000000 -36.500000000 --9.500000000 -10.500000000 -36.500000000 --8.500000000 -10.500000000 -36.500000000 --7.500000000 -10.500000000 -36.500000000 --6.500000000 -10.500000000 -36.500000000 --5.500000000 -10.500000000 -36.500000000 --4.500000000 -10.500000000 -36.500000000 --3.500000000 -10.500000000 -36.500000000 --2.500000000 -10.500000000 -36.500000000 --1.500000000 -10.500000000 -36.500000000 --0.500000000 -10.500000000 -36.500000000 -0.500000000 -10.500000000 -36.500000000 -1.500000000 -10.500000000 -36.500000000 -2.500000000 -10.500000000 -36.500000000 -3.500000000 -10.500000000 -36.500000000 -4.500000000 -10.500000000 -36.500000000 -5.500000000 -10.500000000 -36.500000000 -6.500000000 -10.500000000 -36.500000000 -7.500000000 -10.500000000 -36.500000000 -8.500000000 -10.500000000 -36.500000000 -9.500000000 -10.500000000 -36.500000000 -10.500000000 -10.500000000 -36.500000000 -11.500000000 -10.500000000 -36.500000000 -12.500000000 -10.500000000 -36.500000000 -13.500000000 -10.500000000 -36.500000000 -14.500000000 -10.500000000 -36.500000000 -15.500000000 -10.500000000 -36.500000000 -16.500000000 -10.500000000 -36.500000000 -17.500000000 -10.500000000 -36.500000000 -18.500000000 -10.500000000 -36.500000000 -19.500000000 -10.500000000 -36.500000000 -20.500000000 -10.500000000 -36.500000000 -21.500000000 -10.500000000 -36.500000000 -22.500000000 -10.500000000 -36.500000000 -23.500000000 -10.500000000 -36.500000000 -24.500000000 -10.500000000 -36.500000000 -25.499996185 -10.500000000 -36.499996185 -26.499954224 -10.500000000 -36.499954224 -27.499591827 -10.500000000 -36.499591827 -28.497470856 -10.500000000 -36.497474670 -29.488407135 -10.500000000 -36.488403320 -30.458978653 -10.500000000 -36.458980560 -31.384418488 -10.500000000 -36.384422302 -32.233222961 -10.500000000 -36.233226776 -32.981101990 -10.500000000 -35.981109619 --33.981101990 -9.500000000 -35.981101990 --33.233222961 -9.500000000 -36.233222961 --32.384422302 -9.500000000 -36.384418488 --31.458978653 -9.500000000 -36.458976746 --30.488407135 -9.500000000 -36.488403320 --29.497472763 -9.500000000 -36.497467041 --28.499593735 -9.500000000 -36.499591827 --27.499954224 -9.500000000 -36.499954224 --26.499996185 -9.500000000 -36.499996185 --25.500000000 -9.500000000 -36.500000000 --24.500000000 -9.500000000 -36.500000000 --23.500000000 -9.500000000 -36.500000000 --22.500000000 -9.500000000 -36.500000000 --21.500000000 -9.500000000 -36.500000000 --20.500000000 -9.500000000 -36.500000000 --19.500000000 -9.500000000 -36.500000000 --18.500000000 -9.500000000 -36.500000000 --17.500000000 -9.500000000 -36.500000000 --16.500000000 -9.500000000 -36.500000000 --15.500000000 -9.500000000 -36.500000000 --14.500000000 -9.500000000 -36.500000000 --13.500000000 -9.500000000 -36.500000000 --12.500000000 -9.500000000 -36.500000000 --11.500000000 -9.500000000 -36.500000000 --10.500000000 -9.500000000 -36.500000000 --9.500000000 -9.500000000 -36.500000000 --8.500000000 -9.500000000 -36.500000000 --7.500000000 -9.500000000 -36.500000000 --6.500000000 -9.500000000 -36.500000000 --5.500000000 -9.500000000 -36.500000000 --4.500000000 -9.500000000 -36.500000000 --3.500000000 -9.500000000 -36.500000000 --2.500000000 -9.500000000 -36.500000000 --1.500000000 -9.500000000 -36.500000000 --0.500000000 -9.500000000 -36.500000000 -0.500000000 -9.500000000 -36.500000000 -1.500000000 -9.500000000 -36.500000000 -2.500000000 -9.500000000 -36.500000000 -3.500000000 -9.500000000 -36.500000000 -4.500000000 -9.500000000 -36.500000000 -5.500000000 -9.500000000 -36.500000000 -6.500000000 -9.500000000 -36.500000000 -7.500000000 -9.500000000 -36.500000000 -8.500000000 -9.500000000 -36.500000000 -9.500000000 -9.500000000 -36.500000000 -10.500000000 -9.500000000 -36.500000000 -11.500000000 -9.500000000 -36.500000000 -12.500000000 -9.500000000 -36.500000000 -13.500000000 -9.500000000 -36.500000000 -14.500000000 -9.500000000 -36.500000000 -15.500000000 -9.500000000 -36.500000000 -16.500000000 -9.500000000 -36.500000000 -17.500000000 -9.500000000 -36.500000000 -18.500000000 -9.500000000 -36.500000000 -19.500000000 -9.500000000 -36.500000000 -20.500000000 -9.500000000 -36.500000000 -21.500000000 -9.500000000 -36.500000000 -22.500000000 -9.500000000 -36.500000000 -23.500000000 -9.500000000 -36.500000000 -24.500000000 -9.500000000 -36.500000000 -25.499996185 -9.500000000 -36.499996185 -26.499954224 -9.500000000 -36.499954224 -27.499591827 -9.500000000 -36.499591827 -28.497470856 -9.500000000 -36.497474670 -29.488407135 -9.500000000 -36.488403320 -30.458978653 -9.500000000 -36.458980560 -31.384418488 -9.500000000 -36.384422302 -32.233222961 -9.500000000 -36.233226776 -32.981101990 -9.500000000 -35.981109619 --33.981101990 -8.500000000 -35.981101990 --33.233222961 -8.500000000 -36.233222961 --32.384422302 -8.500000000 -36.384418488 --31.458978653 -8.500000000 -36.458976746 --30.488407135 -8.500000000 -36.488403320 --29.497472763 -8.500000000 -36.497467041 --28.499593735 -8.500000000 -36.499591827 --27.499954224 -8.500000000 -36.499954224 --26.499996185 -8.500000000 -36.499996185 --25.500000000 -8.500000000 -36.500000000 --24.500000000 -8.500000000 -36.500000000 --23.500000000 -8.500000000 -36.500000000 --22.500000000 -8.500000000 -36.500000000 --21.500000000 -8.500000000 -36.500000000 --20.500000000 -8.500000000 -36.500000000 --19.500000000 -8.500000000 -36.500000000 --18.500000000 -8.500000000 -36.500000000 --17.500000000 -8.500000000 -36.500000000 --16.500000000 -8.500000000 -36.500000000 --15.500000000 -8.500000000 -36.500000000 --14.500000000 -8.500000000 -36.500000000 --13.500000000 -8.500000000 -36.500000000 --12.500000000 -8.500000000 -36.500000000 --11.500000000 -8.500000000 -36.500000000 --10.500000000 -8.500000000 -36.500000000 --9.500000000 -8.500000000 -36.500000000 --8.500000000 -8.500000000 -36.500000000 --7.500000000 -8.500000000 -36.500000000 --6.500000000 -8.500000000 -36.500000000 --5.500000000 -8.500000000 -36.500000000 --4.500000000 -8.500000000 -36.500000000 --3.500000000 -8.500000000 -36.500000000 --2.500000000 -8.500000000 -36.500000000 --1.500000000 -8.500000000 -36.500000000 --0.500000000 -8.500000000 -36.500000000 -0.500000000 -8.500000000 -36.500000000 -1.500000000 -8.500000000 -36.500000000 -2.500000000 -8.500000000 -36.500000000 -3.500000000 -8.500000000 -36.500000000 -4.500000000 -8.500000000 -36.500000000 -5.500000000 -8.500000000 -36.500000000 -6.500000000 -8.500000000 -36.500000000 -7.500000000 -8.500000000 -36.500000000 -8.500000000 -8.500000000 -36.500000000 -9.500000000 -8.500000000 -36.500000000 -10.500000000 -8.500000000 -36.500000000 -11.500000000 -8.500000000 -36.500000000 -12.500000000 -8.500000000 -36.500000000 -13.500000000 -8.500000000 -36.500000000 -14.500000000 -8.500000000 -36.500000000 -15.500000000 -8.500000000 -36.500000000 -16.500000000 -8.500000000 -36.500000000 -17.500000000 -8.500000000 -36.500000000 -18.500000000 -8.500000000 -36.500000000 -19.500000000 -8.500000000 -36.500000000 -20.500000000 -8.500000000 -36.500000000 -21.500000000 -8.500000000 -36.500000000 -22.500000000 -8.500000000 -36.500000000 -23.500000000 -8.500000000 -36.500000000 -24.500000000 -8.500000000 -36.500000000 -25.499996185 -8.500000000 -36.499996185 -26.499954224 -8.500000000 -36.499954224 -27.499591827 -8.500000000 -36.499591827 -28.497470856 -8.500000000 -36.497474670 -29.488407135 -8.500000000 -36.488403320 -30.458978653 -8.500000000 -36.458980560 -31.384418488 -8.500000000 -36.384422302 -32.233222961 -8.500000000 -36.233226776 -32.981101990 -8.500000000 -35.981109619 --33.981101990 -7.500000000 -35.981101990 --33.233222961 -7.500000000 -36.233222961 --32.384422302 -7.500000000 -36.384418488 --31.458978653 -7.500000000 -36.458976746 --30.488407135 -7.500000000 -36.488403320 --29.497472763 -7.500000000 -36.497467041 --28.499593735 -7.500000000 -36.499591827 --27.499954224 -7.500000000 -36.499954224 --26.499996185 -7.500000000 -36.499996185 --25.500000000 -7.500000000 -36.500000000 --24.500000000 -7.500000000 -36.500000000 --23.500000000 -7.500000000 -36.500000000 --22.500000000 -7.500000000 -36.500000000 --21.500000000 -7.500000000 -36.500000000 --20.500000000 -7.500000000 -36.500000000 --19.500000000 -7.500000000 -36.500000000 --18.500000000 -7.500000000 -36.500000000 --17.500000000 -7.500000000 -36.500000000 --16.500000000 -7.500000000 -36.500000000 --15.500000000 -7.500000000 -36.500000000 --14.500000000 -7.500000000 -36.500000000 --13.500000000 -7.500000000 -36.500000000 --12.500000000 -7.500000000 -36.500000000 --11.500000000 -7.500000000 -36.500000000 --10.500000000 -7.500000000 -36.500000000 --9.500000000 -7.500000000 -36.500000000 --8.500000000 -7.500000000 -36.500000000 --7.500000000 -7.500000000 -36.500000000 --6.500000000 -7.500000000 -36.500000000 --5.500000000 -7.500000000 -36.500000000 --4.500000000 -7.500000000 -36.500000000 --3.500000000 -7.500000000 -36.500000000 --2.500000000 -7.500000000 -36.500000000 --1.500000000 -7.500000000 -36.500000000 --0.500000000 -7.500000000 -36.500000000 -0.500000000 -7.500000000 -36.500000000 -1.500000000 -7.500000000 -36.500000000 -2.500000000 -7.500000000 -36.500000000 -3.500000000 -7.500000000 -36.500000000 -4.500000000 -7.500000000 -36.500000000 -5.500000000 -7.500000000 -36.500000000 -6.500000000 -7.500000000 -36.500000000 -7.500000000 -7.500000000 -36.500000000 -8.500000000 -7.500000000 -36.500000000 -9.500000000 -7.500000000 -36.500000000 -10.500000000 -7.500000000 -36.500000000 -11.500000000 -7.500000000 -36.500000000 -12.500000000 -7.500000000 -36.500000000 -13.500000000 -7.500000000 -36.500000000 -14.500000000 -7.500000000 -36.500000000 -15.500000000 -7.500000000 -36.500000000 -16.500000000 -7.500000000 -36.500000000 -17.500000000 -7.500000000 -36.500000000 -18.500000000 -7.500000000 -36.500000000 -19.500000000 -7.500000000 -36.500000000 -20.500000000 -7.500000000 -36.500000000 -21.500000000 -7.500000000 -36.500000000 -22.500000000 -7.500000000 -36.500000000 -23.500000000 -7.500000000 -36.500000000 -24.500000000 -7.500000000 -36.500000000 -25.499996185 -7.500000000 -36.499996185 -26.499954224 -7.500000000 -36.499954224 -27.499591827 -7.500000000 -36.499591827 -28.497470856 -7.500000000 -36.497474670 -29.488407135 -7.500000000 -36.488403320 -30.458978653 -7.500000000 -36.458980560 -31.384418488 -7.500000000 -36.384422302 -32.233222961 -7.500000000 -36.233226776 -32.981101990 -7.500000000 -35.981109619 --33.981101990 -6.500000000 -35.981101990 --33.233222961 -6.500000000 -36.233222961 --32.384422302 -6.500000000 -36.384418488 --31.458978653 -6.500000000 -36.458976746 --30.488407135 -6.500000000 -36.488403320 --29.497472763 -6.500000000 -36.497467041 --28.499593735 -6.500000000 -36.499591827 --27.499954224 -6.500000000 -36.499954224 --26.499996185 -6.500000000 -36.499996185 --25.500000000 -6.500000000 -36.500000000 --24.500000000 -6.500000000 -36.500000000 --23.500000000 -6.500000000 -36.500000000 --22.500000000 -6.500000000 -36.500000000 --21.500000000 -6.500000000 -36.500000000 --20.500000000 -6.500000000 -36.500000000 --19.500000000 -6.500000000 -36.500000000 --18.500000000 -6.500000000 -36.500000000 --17.500000000 -6.500000000 -36.500000000 --16.500000000 -6.500000000 -36.500000000 --15.500000000 -6.500000000 -36.500000000 --14.500000000 -6.500000000 -36.500000000 --13.500000000 -6.500000000 -36.500000000 --12.500000000 -6.500000000 -36.500000000 --11.500000000 -6.500000000 -36.500000000 --10.500000000 -6.500000000 -36.500000000 --9.500000000 -6.500000000 -36.500000000 --8.500000000 -6.500000000 -36.500000000 --7.500000000 -6.500000000 -36.500000000 --6.500000000 -6.500000000 -36.500000000 --5.500000000 -6.500000000 -36.500000000 --4.500000000 -6.500000000 -36.500000000 --3.500000000 -6.500000000 -36.500000000 --2.500000000 -6.500000000 -36.500000000 --1.500000000 -6.500000000 -36.500000000 --0.500000000 -6.500000000 -36.500000000 -0.500000000 -6.500000000 -36.500000000 -1.500000000 -6.500000000 -36.500000000 -2.500000000 -6.500000000 -36.500000000 -3.500000000 -6.500000000 -36.500000000 -4.500000000 -6.500000000 -36.500000000 -5.500000000 -6.500000000 -36.500000000 -6.500000000 -6.500000000 -36.500000000 -7.500000000 -6.500000000 -36.500000000 -8.500000000 -6.500000000 -36.500000000 -9.500000000 -6.500000000 -36.500000000 -10.500000000 -6.500000000 -36.500000000 -11.500000000 -6.500000000 -36.500000000 -12.500000000 -6.500000000 -36.500000000 -13.500000000 -6.500000000 -36.500000000 -14.500000000 -6.500000000 -36.500000000 -15.500000000 -6.500000000 -36.500000000 -16.500000000 -6.500000000 -36.500000000 -17.500000000 -6.500000000 -36.500000000 -18.500000000 -6.500000000 -36.500000000 -19.500000000 -6.500000000 -36.500000000 -20.500000000 -6.500000000 -36.500000000 -21.500000000 -6.500000000 -36.500000000 -22.500000000 -6.500000000 -36.500000000 -23.500000000 -6.500000000 -36.500000000 -24.500000000 -6.500000000 -36.500000000 -25.499996185 -6.500000000 -36.499996185 -26.499954224 -6.500000000 -36.499954224 -27.499591827 -6.500000000 -36.499591827 -28.497470856 -6.500000000 -36.497474670 -29.488407135 -6.500000000 -36.488403320 -30.458978653 -6.500000000 -36.458980560 -31.384418488 -6.500000000 -36.384422302 -32.233222961 -6.500000000 -36.233226776 -32.981101990 -6.500000000 -35.981109619 --33.981101990 -5.500000000 -35.981101990 --33.233222961 -5.500000000 -36.233222961 --32.384422302 -5.500000000 -36.384418488 --31.458978653 -5.500000000 -36.458976746 --30.488407135 -5.500000000 -36.488403320 --29.497472763 -5.500000000 -36.497467041 --28.499593735 -5.500000000 -36.499591827 --27.499954224 -5.500000000 -36.499954224 --26.499996185 -5.500000000 -36.499996185 --25.500000000 -5.500000000 -36.500000000 --24.500000000 -5.500000000 -36.500000000 --23.500000000 -5.500000000 -36.500000000 --22.500000000 -5.500000000 -36.500000000 --21.500000000 -5.500000000 -36.500000000 --20.500000000 -5.500000000 -36.500000000 --19.500000000 -5.500000000 -36.500000000 --18.500000000 -5.500000000 -36.500000000 --17.500000000 -5.500000000 -36.500000000 --16.500000000 -5.500000000 -36.500000000 --15.500000000 -5.500000000 -36.500000000 --14.500000000 -5.500000000 -36.500000000 --13.500000000 -5.500000000 -36.500000000 --12.500000000 -5.500000000 -36.500000000 --11.500000000 -5.500000000 -36.500000000 --10.500000000 -5.500000000 -36.500000000 --9.500000000 -5.500000000 -36.500000000 --8.500000000 -5.500000000 -36.500000000 --7.500000000 -5.500000000 -36.500000000 --6.500000000 -5.500000000 -36.500000000 --5.500000000 -5.500000000 -36.500000000 --4.500000000 -5.500000000 -36.500000000 --3.500000000 -5.500000000 -36.500000000 --2.500000000 -5.500000000 -36.500000000 --1.500000000 -5.500000000 -36.500000000 --0.500000000 -5.500000000 -36.500000000 -0.500000000 -5.500000000 -36.500000000 -1.500000000 -5.500000000 -36.500000000 -2.500000000 -5.500000000 -36.500000000 -3.500000000 -5.500000000 -36.500000000 -4.500000000 -5.500000000 -36.500000000 -5.500000000 -5.500000000 -36.500000000 -6.500000000 -5.500000000 -36.500000000 -7.500000000 -5.500000000 -36.500000000 -8.500000000 -5.500000000 -36.500000000 -9.500000000 -5.500000000 -36.500000000 -10.500000000 -5.500000000 -36.500000000 -11.500000000 -5.500000000 -36.500000000 -12.500000000 -5.500000000 -36.500000000 -13.500000000 -5.500000000 -36.500000000 -14.500000000 -5.500000000 -36.500000000 -15.500000000 -5.500000000 -36.500000000 -16.500000000 -5.500000000 -36.500000000 -17.500000000 -5.500000000 -36.500000000 -18.500000000 -5.500000000 -36.500000000 -19.500000000 -5.500000000 -36.500000000 -20.500000000 -5.500000000 -36.500000000 -21.500000000 -5.500000000 -36.500000000 -22.500000000 -5.500000000 -36.500000000 -23.500000000 -5.500000000 -36.500000000 -24.500000000 -5.500000000 -36.500000000 -25.499996185 -5.500000000 -36.499996185 -26.499954224 -5.500000000 -36.499954224 -27.499591827 -5.500000000 -36.499591827 -28.497470856 -5.500000000 -36.497474670 -29.488407135 -5.500000000 -36.488403320 -30.458978653 -5.500000000 -36.458980560 -31.384418488 -5.500000000 -36.384422302 -32.233222961 -5.500000000 -36.233226776 -32.981101990 -5.500000000 -35.981109619 --33.981101990 -4.500000000 -35.981101990 --33.233222961 -4.500000000 -36.233222961 --32.384422302 -4.500000000 -36.384418488 --31.458978653 -4.500000000 -36.458976746 --30.488407135 -4.500000000 -36.488403320 --29.497472763 -4.500000000 -36.497467041 --28.499593735 -4.500000000 -36.499591827 --27.499954224 -4.500000000 -36.499954224 --26.499996185 -4.500000000 -36.499996185 --25.500000000 -4.500000000 -36.500000000 --24.500000000 -4.500000000 -36.500000000 --23.500000000 -4.500000000 -36.500000000 --22.500000000 -4.500000000 -36.500000000 --21.500000000 -4.500000000 -36.500000000 --20.500000000 -4.500000000 -36.500000000 --19.500000000 -4.500000000 -36.500000000 --18.500000000 -4.500000000 -36.500000000 --17.500000000 -4.500000000 -36.500000000 --16.500000000 -4.500000000 -36.500000000 --15.500000000 -4.500000000 -36.500000000 --14.500000000 -4.500000000 -36.500000000 --13.500000000 -4.500000000 -36.500000000 --12.500000000 -4.500000000 -36.500000000 --11.500000000 -4.500000000 -36.500000000 --10.500000000 -4.500000000 -36.500000000 --9.500000000 -4.500000000 -36.500000000 --8.500000000 -4.500000000 -36.500000000 --7.500000000 -4.500000000 -36.500000000 --6.500000000 -4.500000000 -36.500000000 --5.500000000 -4.500000000 -36.500000000 --4.500000000 -4.500000000 -36.500000000 --3.500000000 -4.500000000 -36.500000000 --2.500000000 -4.500000000 -36.500000000 --1.500000000 -4.500000000 -36.500000000 --0.500000000 -4.500000000 -36.500000000 -0.500000000 -4.500000000 -36.500000000 -1.500000000 -4.500000000 -36.500000000 -2.500000000 -4.500000000 -36.500000000 -3.500000000 -4.500000000 -36.500000000 -4.500000000 -4.500000000 -36.500000000 -5.500000000 -4.500000000 -36.500000000 -6.500000000 -4.500000000 -36.500000000 -7.500000000 -4.500000000 -36.500000000 -8.500000000 -4.500000000 -36.500000000 -9.500000000 -4.500000000 -36.500000000 -10.500000000 -4.500000000 -36.500000000 -11.500000000 -4.500000000 -36.500000000 -12.500000000 -4.500000000 -36.500000000 -13.500000000 -4.500000000 -36.500000000 -14.500000000 -4.500000000 -36.500000000 -15.500000000 -4.500000000 -36.500000000 -16.500000000 -4.500000000 -36.500000000 -17.500000000 -4.500000000 -36.500000000 -18.500000000 -4.500000000 -36.500000000 -19.500000000 -4.500000000 -36.500000000 -20.500000000 -4.500000000 -36.500000000 -21.500000000 -4.500000000 -36.500000000 -22.500000000 -4.500000000 -36.500000000 -23.500000000 -4.500000000 -36.500000000 -24.500000000 -4.500000000 -36.500000000 -25.499996185 -4.500000000 -36.499996185 -26.499954224 -4.500000000 -36.499954224 -27.499591827 -4.500000000 -36.499591827 -28.497470856 -4.500000000 -36.497474670 -29.488407135 -4.500000000 -36.488403320 -30.458978653 -4.500000000 -36.458980560 -31.384418488 -4.500000000 -36.384422302 -32.233222961 -4.500000000 -36.233226776 -32.981101990 -4.500000000 -35.981109619 --33.981101990 -3.500000000 -35.981101990 --33.233222961 -3.500000000 -36.233222961 --32.384422302 -3.500000000 -36.384418488 --31.458978653 -3.500000000 -36.458976746 --30.488407135 -3.500000000 -36.488403320 --29.497472763 -3.500000000 -36.497467041 --28.499593735 -3.500000000 -36.499591827 --27.499954224 -3.500000000 -36.499954224 --26.499996185 -3.500000000 -36.499996185 --25.500000000 -3.500000000 -36.500000000 --24.500000000 -3.500000000 -36.500000000 --23.500000000 -3.500000000 -36.500000000 --22.500000000 -3.500000000 -36.500000000 --21.500000000 -3.500000000 -36.500000000 --20.500000000 -3.500000000 -36.500000000 --19.500000000 -3.500000000 -36.500000000 --18.500000000 -3.500000000 -36.500000000 --17.500000000 -3.500000000 -36.500000000 --16.500000000 -3.500000000 -36.500000000 --15.500000000 -3.500000000 -36.500000000 --14.500000000 -3.500000000 -36.500000000 --13.500000000 -3.500000000 -36.500000000 --12.500000000 -3.500000000 -36.500000000 --11.500000000 -3.500000000 -36.500000000 --10.500000000 -3.500000000 -36.500000000 --9.500000000 -3.500000000 -36.500000000 --8.500000000 -3.500000000 -36.500000000 --7.500000000 -3.500000000 -36.500000000 --6.500000000 -3.500000000 -36.500000000 --5.500000000 -3.500000000 -36.500000000 --4.500000000 -3.500000000 -36.500000000 --3.500000000 -3.500000000 -36.500000000 --2.500000000 -3.500000000 -36.500000000 --1.500000000 -3.500000000 -36.500000000 --0.500000000 -3.500000000 -36.500000000 -0.500000000 -3.500000000 -36.500000000 -1.500000000 -3.500000000 -36.500000000 -2.500000000 -3.500000000 -36.500000000 -3.500000000 -3.500000000 -36.500000000 -4.500000000 -3.500000000 -36.500000000 -5.500000000 -3.500000000 -36.500000000 -6.500000000 -3.500000000 -36.500000000 -7.500000000 -3.500000000 -36.500000000 -8.500000000 -3.500000000 -36.500000000 -9.500000000 -3.500000000 -36.500000000 -10.500000000 -3.500000000 -36.500000000 -11.500000000 -3.500000000 -36.500000000 -12.500000000 -3.500000000 -36.500000000 -13.500000000 -3.500000000 -36.500000000 -14.500000000 -3.500000000 -36.500000000 -15.500000000 -3.500000000 -36.500000000 -16.500000000 -3.500000000 -36.500000000 -17.500000000 -3.500000000 -36.500000000 -18.500000000 -3.500000000 -36.500000000 -19.500000000 -3.500000000 -36.500000000 -20.500000000 -3.500000000 -36.500000000 -21.500000000 -3.500000000 -36.500000000 -22.500000000 -3.500000000 -36.500000000 -23.500000000 -3.500000000 -36.500000000 -24.500000000 -3.500000000 -36.500000000 -25.499996185 -3.500000000 -36.499996185 -26.499954224 -3.500000000 -36.499954224 -27.499591827 -3.500000000 -36.499591827 -28.497470856 -3.500000000 -36.497474670 -29.488407135 -3.500000000 -36.488403320 -30.458978653 -3.500000000 -36.458980560 -31.384418488 -3.500000000 -36.384422302 -32.233222961 -3.500000000 -36.233226776 -32.981101990 -3.500000000 -35.981109619 --33.981101990 -2.500000000 -35.981101990 --33.233222961 -2.500000000 -36.233222961 --32.384422302 -2.500000000 -36.384418488 --31.458978653 -2.500000000 -36.458976746 --30.488407135 -2.500000000 -36.488403320 --29.497472763 -2.500000000 -36.497467041 --28.499593735 -2.500000000 -36.499591827 --27.499954224 -2.500000000 -36.499954224 --26.499996185 -2.500000000 -36.499996185 --25.500000000 -2.500000000 -36.500000000 --24.500000000 -2.500000000 -36.500000000 --23.500000000 -2.500000000 -36.500000000 --22.500000000 -2.500000000 -36.500000000 --21.500000000 -2.500000000 -36.500000000 --20.500000000 -2.500000000 -36.500000000 --19.500000000 -2.500000000 -36.500000000 --18.500000000 -2.500000000 -36.500000000 --17.500000000 -2.500000000 -36.500000000 --16.500000000 -2.500000000 -36.500000000 --15.500000000 -2.500000000 -36.500000000 --14.500000000 -2.500000000 -36.500000000 --13.500000000 -2.500000000 -36.500000000 --12.500000000 -2.500000000 -36.500000000 --11.500000000 -2.500000000 -36.500000000 --10.500000000 -2.500000000 -36.500000000 --9.500000000 -2.500000000 -36.500000000 --8.500000000 -2.500000000 -36.500000000 --7.500000000 -2.500000000 -36.500000000 --6.500000000 -2.500000000 -36.500000000 --5.500000000 -2.500000000 -36.500000000 --4.500000000 -2.500000000 -36.500000000 --3.500000000 -2.500000000 -36.500000000 --2.500000000 -2.500000000 -36.500000000 --1.500000000 -2.500000000 -36.500000000 --0.500000000 -2.500000000 -36.500000000 -0.500000000 -2.500000000 -36.500000000 -1.500000000 -2.500000000 -36.500000000 -2.500000000 -2.500000000 -36.500000000 -3.500000000 -2.500000000 -36.500000000 -4.500000000 -2.500000000 -36.500000000 -5.500000000 -2.500000000 -36.500000000 -6.500000000 -2.500000000 -36.500000000 -7.500000000 -2.500000000 -36.500000000 -8.500000000 -2.500000000 -36.500000000 -9.500000000 -2.500000000 -36.500000000 -10.500000000 -2.500000000 -36.500000000 -11.500000000 -2.500000000 -36.500000000 -12.500000000 -2.500000000 -36.500000000 -13.500000000 -2.500000000 -36.500000000 -14.500000000 -2.500000000 -36.500000000 -15.500000000 -2.500000000 -36.500000000 -16.500000000 -2.500000000 -36.500000000 -17.500000000 -2.500000000 -36.500000000 -18.500000000 -2.500000000 -36.500000000 -19.500000000 -2.500000000 -36.500000000 -20.500000000 -2.500000000 -36.500000000 -21.500000000 -2.500000000 -36.500000000 -22.500000000 -2.500000000 -36.500000000 -23.500000000 -2.500000000 -36.500000000 -24.500000000 -2.500000000 -36.500000000 -25.499996185 -2.500000000 -36.499996185 -26.499954224 -2.500000000 -36.499954224 -27.499591827 -2.500000000 -36.499591827 -28.497470856 -2.500000000 -36.497474670 -29.488407135 -2.500000000 -36.488403320 -30.458978653 -2.500000000 -36.458980560 -31.384418488 -2.500000000 -36.384422302 -32.233222961 -2.500000000 -36.233226776 -32.981101990 -2.500000000 -35.981109619 --33.981101990 -1.500000000 -35.981101990 --33.233222961 -1.500000000 -36.233222961 --32.384422302 -1.500000000 -36.384418488 --31.458978653 -1.500000000 -36.458976746 --30.488407135 -1.500000000 -36.488403320 --29.497472763 -1.500000000 -36.497467041 --28.499593735 -1.500000000 -36.499591827 --27.499954224 -1.500000000 -36.499954224 --26.499996185 -1.500000000 -36.499996185 --25.500000000 -1.500000000 -36.500000000 --24.500000000 -1.500000000 -36.500000000 --23.500000000 -1.500000000 -36.500000000 --22.500000000 -1.500000000 -36.500000000 --21.500000000 -1.500000000 -36.500000000 --20.500000000 -1.500000000 -36.500000000 --19.500000000 -1.500000000 -36.500000000 --18.500000000 -1.500000000 -36.500000000 --17.500000000 -1.500000000 -36.500000000 --16.500000000 -1.500000000 -36.500000000 --15.500000000 -1.500000000 -36.500000000 --14.500000000 -1.500000000 -36.500000000 --13.500000000 -1.500000000 -36.500000000 --12.500000000 -1.500000000 -36.500000000 --11.500000000 -1.500000000 -36.500000000 --10.500000000 -1.500000000 -36.500000000 --9.500000000 -1.500000000 -36.500000000 --8.500000000 -1.500000000 -36.500000000 --7.500000000 -1.500000000 -36.500000000 --6.500000000 -1.500000000 -36.500000000 --5.500000000 -1.500000000 -36.500000000 --4.500000000 -1.500000000 -36.500000000 --3.500000000 -1.500000000 -36.500000000 --2.500000000 -1.500000000 -36.500000000 --1.500000000 -1.500000000 -36.500000000 --0.500000000 -1.500000000 -36.500000000 -0.500000000 -1.500000000 -36.500000000 -1.500000000 -1.500000000 -36.500000000 -2.500000000 -1.500000000 -36.500000000 -3.500000000 -1.500000000 -36.500000000 -4.500000000 -1.500000000 -36.500000000 -5.500000000 -1.500000000 -36.500000000 -6.500000000 -1.500000000 -36.500000000 -7.500000000 -1.500000000 -36.500000000 -8.500000000 -1.500000000 -36.500000000 -9.500000000 -1.500000000 -36.500000000 -10.500000000 -1.500000000 -36.500000000 -11.500000000 -1.500000000 -36.500000000 -12.500000000 -1.500000000 -36.500000000 -13.500000000 -1.500000000 -36.500000000 -14.500000000 -1.500000000 -36.500000000 -15.500000000 -1.500000000 -36.500000000 -16.500000000 -1.500000000 -36.500000000 -17.500000000 -1.500000000 -36.500000000 -18.500000000 -1.500000000 -36.500000000 -19.500000000 -1.500000000 -36.500000000 -20.500000000 -1.500000000 -36.500000000 -21.500000000 -1.500000000 -36.500000000 -22.500000000 -1.500000000 -36.500000000 -23.500000000 -1.500000000 -36.500000000 -24.500000000 -1.500000000 -36.500000000 -25.499996185 -1.500000000 -36.499996185 -26.499954224 -1.500000000 -36.499954224 -27.499591827 -1.500000000 -36.499591827 -28.497470856 -1.500000000 -36.497474670 -29.488407135 -1.500000000 -36.488403320 -30.458978653 -1.500000000 -36.458980560 -31.384418488 -1.500000000 -36.384422302 -32.233222961 -1.500000000 -36.233226776 -32.981101990 -1.500000000 -35.981109619 --33.981101990 -0.500000000 -35.981101990 --33.233222961 -0.500000000 -36.233222961 --32.384422302 -0.500000000 -36.384418488 --31.458978653 -0.500000000 -36.458976746 --30.488407135 -0.500000000 -36.488403320 --29.497472763 -0.500000000 -36.497467041 --28.499593735 -0.500000000 -36.499591827 --27.499954224 -0.500000000 -36.499954224 --26.499996185 -0.500000000 -36.499996185 --25.500000000 -0.500000000 -36.500000000 --24.500000000 -0.500000000 -36.500000000 --23.500000000 -0.500000000 -36.500000000 --22.500000000 -0.500000000 -36.500000000 --21.500000000 -0.500000000 -36.500000000 --20.500000000 -0.500000000 -36.500000000 --19.500000000 -0.500000000 -36.500000000 --18.500000000 -0.500000000 -36.500000000 --17.500000000 -0.500000000 -36.500000000 --16.500000000 -0.500000000 -36.500000000 --15.500000000 -0.500000000 -36.500000000 --14.500000000 -0.500000000 -36.500000000 --13.500000000 -0.500000000 -36.500000000 --12.500000000 -0.500000000 -36.500000000 --11.500000000 -0.500000000 -36.500000000 --10.500000000 -0.500000000 -36.500000000 --9.500000000 -0.500000000 -36.500000000 --8.500000000 -0.500000000 -36.500000000 --7.500000000 -0.500000000 -36.500000000 --6.500000000 -0.500000000 -36.500000000 --5.500000000 -0.500000000 -36.500000000 --4.500000000 -0.500000000 -36.500000000 --3.500000000 -0.500000000 -36.500000000 --2.500000000 -0.500000000 -36.500000000 --1.500000000 -0.500000000 -36.500000000 --0.500000000 -0.500000000 -36.500000000 -0.500000000 -0.500000000 -36.500000000 -1.500000000 -0.500000000 -36.500000000 -2.500000000 -0.500000000 -36.500000000 -3.500000000 -0.500000000 -36.500000000 -4.500000000 -0.500000000 -36.500000000 -5.500000000 -0.500000000 -36.500000000 -6.500000000 -0.500000000 -36.500000000 -7.500000000 -0.500000000 -36.500000000 -8.500000000 -0.500000000 -36.500000000 -9.500000000 -0.500000000 -36.500000000 -10.500000000 -0.500000000 -36.500000000 -11.500000000 -0.500000000 -36.500000000 -12.500000000 -0.500000000 -36.500000000 -13.500000000 -0.500000000 -36.500000000 -14.500000000 -0.500000000 -36.500000000 -15.500000000 -0.500000000 -36.500000000 -16.500000000 -0.500000000 -36.500000000 -17.500000000 -0.500000000 -36.500000000 -18.500000000 -0.500000000 -36.500000000 -19.500000000 -0.500000000 -36.500000000 -20.500000000 -0.500000000 -36.500000000 -21.500000000 -0.500000000 -36.500000000 -22.500000000 -0.500000000 -36.500000000 -23.500000000 -0.500000000 -36.500000000 -24.500000000 -0.500000000 -36.500000000 -25.499996185 -0.500000000 -36.499996185 -26.499954224 -0.500000000 -36.499954224 -27.499591827 -0.500000000 -36.499591827 -28.497470856 -0.500000000 -36.497474670 -29.488407135 -0.500000000 -36.488403320 -30.458978653 -0.500000000 -36.458980560 -31.384418488 -0.500000000 -36.384422302 -32.233222961 -0.500000000 -36.233226776 -32.981101990 -0.500000000 -35.981109619 --33.981101990 0.500000000 -35.981101990 --33.233222961 0.500000000 -36.233222961 --32.384422302 0.500000000 -36.384418488 --31.458978653 0.500000000 -36.458976746 --30.488407135 0.500000000 -36.488403320 --29.497472763 0.500000000 -36.497467041 --28.499593735 0.500000000 -36.499591827 --27.499954224 0.500000000 -36.499954224 --26.499996185 0.500000000 -36.499996185 --25.500000000 0.500000000 -36.500000000 --24.500000000 0.500000000 -36.500000000 --23.500000000 0.500000000 -36.500000000 --22.500000000 0.500000000 -36.500000000 --21.500000000 0.500000000 -36.500000000 --20.500000000 0.500000000 -36.500000000 --19.500000000 0.500000000 -36.500000000 --18.500000000 0.500000000 -36.500000000 --17.500000000 0.500000000 -36.500000000 --16.500000000 0.500000000 -36.500000000 --15.500000000 0.500000000 -36.500000000 --14.500000000 0.500000000 -36.500000000 --13.500000000 0.500000000 -36.500000000 --12.500000000 0.500000000 -36.500000000 --11.500000000 0.500000000 -36.500000000 --10.500000000 0.500000000 -36.500000000 --9.500000000 0.500000000 -36.500000000 --8.500000000 0.500000000 -36.500000000 --7.500000000 0.500000000 -36.500000000 --6.500000000 0.500000000 -36.500000000 --5.500000000 0.500000000 -36.500000000 --4.500000000 0.500000000 -36.500000000 --3.500000000 0.500000000 -36.500000000 --2.500000000 0.500000000 -36.500000000 --1.500000000 0.500000000 -36.500000000 --0.500000000 0.500000000 -36.500000000 -0.500000000 0.500000000 -36.500000000 -1.500000000 0.500000000 -36.500000000 -2.500000000 0.500000000 -36.500000000 -3.500000000 0.500000000 -36.500000000 -4.500000000 0.500000000 -36.500000000 -5.500000000 0.500000000 -36.500000000 -6.500000000 0.500000000 -36.500000000 -7.500000000 0.500000000 -36.500000000 -8.500000000 0.500000000 -36.500000000 -9.500000000 0.500000000 -36.500000000 -10.500000000 0.500000000 -36.500000000 -11.500000000 0.500000000 -36.500000000 -12.500000000 0.500000000 -36.500000000 -13.500000000 0.500000000 -36.500000000 -14.500000000 0.500000000 -36.500000000 -15.500000000 0.500000000 -36.500000000 -16.500000000 0.500000000 -36.500000000 -17.500000000 0.500000000 -36.500000000 -18.500000000 0.500000000 -36.500000000 -19.500000000 0.500000000 -36.500000000 -20.500000000 0.500000000 -36.500000000 -21.500000000 0.500000000 -36.500000000 -22.500000000 0.500000000 -36.500000000 -23.500000000 0.500000000 -36.500000000 -24.500000000 0.500000000 -36.500000000 -25.499996185 0.500000000 -36.499996185 -26.499954224 0.500000000 -36.499954224 -27.499591827 0.500000000 -36.499591827 -28.497470856 0.500000000 -36.497474670 -29.488407135 0.500000000 -36.488403320 -30.458978653 0.500000000 -36.458980560 -31.384418488 0.500000000 -36.384422302 -32.233222961 0.500000000 -36.233226776 -32.981101990 0.500000000 -35.981109619 --33.981101990 1.500000000 -35.981101990 --33.233222961 1.500000000 -36.233222961 --32.384422302 1.500000000 -36.384418488 --31.458978653 1.500000000 -36.458976746 --30.488407135 1.500000000 -36.488403320 --29.497472763 1.500000000 -36.497467041 --28.499593735 1.500000000 -36.499591827 --27.499954224 1.500000000 -36.499954224 --26.499996185 1.500000000 -36.499996185 --25.500000000 1.500000000 -36.500000000 --24.500000000 1.500000000 -36.500000000 --23.500000000 1.500000000 -36.500000000 --22.500000000 1.500000000 -36.500000000 --21.500000000 1.500000000 -36.500000000 --20.500000000 1.500000000 -36.500000000 --19.500000000 1.500000000 -36.500000000 --18.500000000 1.500000000 -36.500000000 --17.500000000 1.500000000 -36.500000000 --16.500000000 1.500000000 -36.500000000 --15.500000000 1.500000000 -36.500000000 --14.500000000 1.500000000 -36.500000000 --13.500000000 1.500000000 -36.500000000 --12.500000000 1.500000000 -36.500000000 --11.500000000 1.500000000 -36.500000000 --10.500000000 1.500000000 -36.500000000 --9.500000000 1.500000000 -36.500000000 --8.500000000 1.500000000 -36.500000000 --7.500000000 1.500000000 -36.500000000 --6.500000000 1.500000000 -36.500000000 --5.500000000 1.500000000 -36.500000000 --4.500000000 1.500000000 -36.500000000 --3.500000000 1.500000000 -36.500000000 --2.500000000 1.500000000 -36.500000000 --1.500000000 1.500000000 -36.500000000 --0.500000000 1.500000000 -36.500000000 -0.500000000 1.500000000 -36.500000000 -1.500000000 1.500000000 -36.500000000 -2.500000000 1.500000000 -36.500000000 -3.500000000 1.500000000 -36.500000000 -4.500000000 1.500000000 -36.500000000 -5.500000000 1.500000000 -36.500000000 -6.500000000 1.500000000 -36.500000000 -7.500000000 1.500000000 -36.500000000 -8.500000000 1.500000000 -36.500000000 -9.500000000 1.500000000 -36.500000000 -10.500000000 1.500000000 -36.500000000 -11.500000000 1.500000000 -36.500000000 -12.500000000 1.500000000 -36.500000000 -13.500000000 1.500000000 -36.500000000 -14.500000000 1.500000000 -36.500000000 -15.500000000 1.500000000 -36.500000000 -16.500000000 1.500000000 -36.500000000 -17.500000000 1.500000000 -36.500000000 -18.500000000 1.500000000 -36.500000000 -19.500000000 1.500000000 -36.500000000 -20.500000000 1.500000000 -36.500000000 -21.500000000 1.500000000 -36.500000000 -22.500000000 1.500000000 -36.500000000 -23.500000000 1.500000000 -36.500000000 -24.500000000 1.500000000 -36.500000000 -25.499996185 1.500000000 -36.499996185 -26.499954224 1.500000000 -36.499954224 -27.499591827 1.500000000 -36.499591827 -28.497470856 1.500000000 -36.497474670 -29.488407135 1.500000000 -36.488403320 -30.458978653 1.500000000 -36.458980560 -31.384418488 1.500000000 -36.384422302 -32.233222961 1.500000000 -36.233226776 -32.981101990 1.500000000 -35.981109619 --33.981101990 2.500000000 -35.981101990 --33.233222961 2.500000000 -36.233222961 --32.384422302 2.500000000 -36.384418488 --31.458978653 2.500000000 -36.458976746 --30.488407135 2.500000000 -36.488403320 --29.497472763 2.500000000 -36.497467041 --28.499593735 2.500000000 -36.499591827 --27.499954224 2.500000000 -36.499954224 --26.499996185 2.500000000 -36.499996185 --25.500000000 2.500000000 -36.500000000 --24.500000000 2.500000000 -36.500000000 --23.500000000 2.500000000 -36.500000000 --22.500000000 2.500000000 -36.500000000 --21.500000000 2.500000000 -36.500000000 --20.500000000 2.500000000 -36.500000000 --19.500000000 2.500000000 -36.500000000 --18.500000000 2.500000000 -36.500000000 --17.500000000 2.500000000 -36.500000000 --16.500000000 2.500000000 -36.500000000 --15.500000000 2.500000000 -36.500000000 --14.500000000 2.500000000 -36.500000000 --13.500000000 2.500000000 -36.500000000 --12.500000000 2.500000000 -36.500000000 --11.500000000 2.500000000 -36.500000000 --10.500000000 2.500000000 -36.500000000 --9.500000000 2.500000000 -36.500000000 --8.500000000 2.500000000 -36.500000000 --7.500000000 2.500000000 -36.500000000 --6.500000000 2.500000000 -36.500000000 --5.500000000 2.500000000 -36.500000000 --4.500000000 2.500000000 -36.500000000 --3.500000000 2.500000000 -36.500000000 --2.500000000 2.500000000 -36.500000000 --1.500000000 2.500000000 -36.500000000 --0.500000000 2.500000000 -36.500000000 -0.500000000 2.500000000 -36.500000000 -1.500000000 2.500000000 -36.500000000 -2.500000000 2.500000000 -36.500000000 -3.500000000 2.500000000 -36.500000000 -4.500000000 2.500000000 -36.500000000 -5.500000000 2.500000000 -36.500000000 -6.500000000 2.500000000 -36.500000000 -7.500000000 2.500000000 -36.500000000 -8.500000000 2.500000000 -36.500000000 -9.500000000 2.500000000 -36.500000000 -10.500000000 2.500000000 -36.500000000 -11.500000000 2.500000000 -36.500000000 -12.500000000 2.500000000 -36.500000000 -13.500000000 2.500000000 -36.500000000 -14.500000000 2.500000000 -36.500000000 -15.500000000 2.500000000 -36.500000000 -16.500000000 2.500000000 -36.500000000 -17.500000000 2.500000000 -36.500000000 -18.500000000 2.500000000 -36.500000000 -19.500000000 2.500000000 -36.500000000 -20.500000000 2.500000000 -36.500000000 -21.500000000 2.500000000 -36.500000000 -22.500000000 2.500000000 -36.500000000 -23.500000000 2.500000000 -36.500000000 -24.500000000 2.500000000 -36.500000000 -25.499996185 2.500000000 -36.499996185 -26.499954224 2.500000000 -36.499954224 -27.499591827 2.500000000 -36.499591827 -28.497470856 2.500000000 -36.497474670 -29.488407135 2.500000000 -36.488403320 -30.458978653 2.500000000 -36.458980560 -31.384418488 2.500000000 -36.384422302 -32.233222961 2.500000000 -36.233226776 -32.981101990 2.500000000 -35.981109619 --33.981101990 3.500000000 -35.981101990 --33.233222961 3.500000000 -36.233222961 --32.384422302 3.500000000 -36.384418488 --31.458978653 3.500000000 -36.458976746 --30.488407135 3.500000000 -36.488403320 --29.497472763 3.500000000 -36.497467041 --28.499593735 3.500000000 -36.499591827 --27.499954224 3.500000000 -36.499954224 --26.499996185 3.500000000 -36.499996185 --25.500000000 3.500000000 -36.500000000 --24.500000000 3.500000000 -36.500000000 --23.500000000 3.500000000 -36.500000000 --22.500000000 3.500000000 -36.500000000 --21.500000000 3.500000000 -36.500000000 --20.500000000 3.500000000 -36.500000000 --19.500000000 3.500000000 -36.500000000 --18.500000000 3.500000000 -36.500000000 --17.500000000 3.500000000 -36.500000000 --16.500000000 3.500000000 -36.500000000 --15.500000000 3.500000000 -36.500000000 --14.500000000 3.500000000 -36.500000000 --13.500000000 3.500000000 -36.500000000 --12.500000000 3.500000000 -36.500000000 --11.500000000 3.500000000 -36.500000000 --10.500000000 3.500000000 -36.500000000 --9.500000000 3.500000000 -36.500000000 --8.500000000 3.500000000 -36.500000000 --7.500000000 3.500000000 -36.500000000 --6.500000000 3.500000000 -36.500000000 --5.500000000 3.500000000 -36.500000000 --4.500000000 3.500000000 -36.500000000 --3.500000000 3.500000000 -36.500000000 --2.500000000 3.500000000 -36.500000000 --1.500000000 3.500000000 -36.500000000 --0.500000000 3.500000000 -36.500000000 -0.500000000 3.500000000 -36.500000000 -1.500000000 3.500000000 -36.500000000 -2.500000000 3.500000000 -36.500000000 -3.500000000 3.500000000 -36.500000000 -4.500000000 3.500000000 -36.500000000 -5.500000000 3.500000000 -36.500000000 -6.500000000 3.500000000 -36.500000000 -7.500000000 3.500000000 -36.500000000 -8.500000000 3.500000000 -36.500000000 -9.500000000 3.500000000 -36.500000000 -10.500000000 3.500000000 -36.500000000 -11.500000000 3.500000000 -36.500000000 -12.500000000 3.500000000 -36.500000000 -13.500000000 3.500000000 -36.500000000 -14.500000000 3.500000000 -36.500000000 -15.500000000 3.500000000 -36.500000000 -16.500000000 3.500000000 -36.500000000 -17.500000000 3.500000000 -36.500000000 -18.500000000 3.500000000 -36.500000000 -19.500000000 3.500000000 -36.500000000 -20.500000000 3.500000000 -36.500000000 -21.500000000 3.500000000 -36.500000000 -22.500000000 3.500000000 -36.500000000 -23.500000000 3.500000000 -36.500000000 -24.500000000 3.500000000 -36.500000000 -25.499996185 3.500000000 -36.499996185 -26.499954224 3.500000000 -36.499954224 -27.499591827 3.500000000 -36.499591827 -28.497470856 3.500000000 -36.497474670 -29.488407135 3.500000000 -36.488403320 -30.458978653 3.500000000 -36.458980560 -31.384418488 3.500000000 -36.384422302 -32.233222961 3.500000000 -36.233226776 -32.981101990 3.500000000 -35.981109619 --33.981101990 4.500000000 -35.981101990 --33.233222961 4.500000000 -36.233222961 --32.384422302 4.500000000 -36.384418488 --31.458978653 4.500000000 -36.458976746 --30.488407135 4.500000000 -36.488403320 --29.497472763 4.500000000 -36.497467041 --28.499593735 4.500000000 -36.499591827 --27.499954224 4.500000000 -36.499954224 --26.499996185 4.500000000 -36.499996185 --25.500000000 4.500000000 -36.500000000 --24.500000000 4.500000000 -36.500000000 --23.500000000 4.500000000 -36.500000000 --22.500000000 4.500000000 -36.500000000 --21.500000000 4.500000000 -36.500000000 --20.500000000 4.500000000 -36.500000000 --19.500000000 4.500000000 -36.500000000 --18.500000000 4.500000000 -36.500000000 --17.500000000 4.500000000 -36.500000000 --16.500000000 4.500000000 -36.500000000 --15.500000000 4.500000000 -36.500000000 --14.500000000 4.500000000 -36.500000000 --13.500000000 4.500000000 -36.500000000 --12.500000000 4.500000000 -36.500000000 --11.500000000 4.500000000 -36.500000000 --10.500000000 4.500000000 -36.500000000 --9.500000000 4.500000000 -36.500000000 --8.500000000 4.500000000 -36.500000000 --7.500000000 4.500000000 -36.500000000 --6.500000000 4.500000000 -36.500000000 --5.500000000 4.500000000 -36.500000000 --4.500000000 4.500000000 -36.500000000 --3.500000000 4.500000000 -36.500000000 --2.500000000 4.500000000 -36.500000000 --1.500000000 4.500000000 -36.500000000 --0.500000000 4.500000000 -36.500000000 -0.500000000 4.500000000 -36.500000000 -1.500000000 4.500000000 -36.500000000 -2.500000000 4.500000000 -36.500000000 -3.500000000 4.500000000 -36.500000000 -4.500000000 4.500000000 -36.500000000 -5.500000000 4.500000000 -36.500000000 -6.500000000 4.500000000 -36.500000000 -7.500000000 4.500000000 -36.500000000 -8.500000000 4.500000000 -36.500000000 -9.500000000 4.500000000 -36.500000000 -10.500000000 4.500000000 -36.500000000 -11.500000000 4.500000000 -36.500000000 -12.500000000 4.500000000 -36.500000000 -13.500000000 4.500000000 -36.500000000 -14.500000000 4.500000000 -36.500000000 -15.500000000 4.500000000 -36.500000000 -16.500000000 4.500000000 -36.500000000 -17.500000000 4.500000000 -36.500000000 -18.500000000 4.500000000 -36.500000000 -19.500000000 4.500000000 -36.500000000 -20.500000000 4.500000000 -36.500000000 -21.500000000 4.500000000 -36.500000000 -22.500000000 4.500000000 -36.500000000 -23.500000000 4.500000000 -36.500000000 -24.500000000 4.500000000 -36.500000000 -25.499996185 4.500000000 -36.499996185 -26.499954224 4.500000000 -36.499954224 -27.499591827 4.500000000 -36.499591827 -28.497470856 4.500000000 -36.497474670 -29.488407135 4.500000000 -36.488403320 -30.458978653 4.500000000 -36.458980560 -31.384418488 4.500000000 -36.384422302 -32.233222961 4.500000000 -36.233226776 -32.981101990 4.500000000 -35.981109619 --33.981101990 5.500000000 -35.981101990 --33.233222961 5.500000000 -36.233222961 --32.384422302 5.500000000 -36.384418488 --31.458978653 5.500000000 -36.458976746 --30.488407135 5.500000000 -36.488403320 --29.497472763 5.500000000 -36.497467041 --28.499593735 5.500000000 -36.499591827 --27.499954224 5.500000000 -36.499954224 --26.499996185 5.500000000 -36.499996185 --25.500000000 5.500000000 -36.500000000 --24.500000000 5.500000000 -36.500000000 --23.500000000 5.500000000 -36.500000000 --22.500000000 5.500000000 -36.500000000 --21.500000000 5.500000000 -36.500000000 --20.500000000 5.500000000 -36.500000000 --19.500000000 5.500000000 -36.500000000 --18.500000000 5.500000000 -36.500000000 --17.500000000 5.500000000 -36.500000000 --16.500000000 5.500000000 -36.500000000 --15.500000000 5.500000000 -36.500000000 --14.500000000 5.500000000 -36.500000000 --13.500000000 5.500000000 -36.500000000 --12.500000000 5.500000000 -36.500000000 --11.500000000 5.500000000 -36.500000000 --10.500000000 5.500000000 -36.500000000 --9.500000000 5.500000000 -36.500000000 --8.500000000 5.500000000 -36.500000000 --7.500000000 5.500000000 -36.500000000 --6.500000000 5.500000000 -36.500000000 --5.500000000 5.500000000 -36.500000000 --4.500000000 5.500000000 -36.500000000 --3.500000000 5.500000000 -36.500000000 --2.500000000 5.500000000 -36.500000000 --1.500000000 5.500000000 -36.500000000 --0.500000000 5.500000000 -36.500000000 -0.500000000 5.500000000 -36.500000000 -1.500000000 5.500000000 -36.500000000 -2.500000000 5.500000000 -36.500000000 -3.500000000 5.500000000 -36.500000000 -4.500000000 5.500000000 -36.500000000 -5.500000000 5.500000000 -36.500000000 -6.500000000 5.500000000 -36.500000000 -7.500000000 5.500000000 -36.500000000 -8.500000000 5.500000000 -36.500000000 -9.500000000 5.500000000 -36.500000000 -10.500000000 5.500000000 -36.500000000 -11.500000000 5.500000000 -36.500000000 -12.500000000 5.500000000 -36.500000000 -13.500000000 5.500000000 -36.500000000 -14.500000000 5.500000000 -36.500000000 -15.500000000 5.500000000 -36.500000000 -16.500000000 5.500000000 -36.500000000 -17.500000000 5.500000000 -36.500000000 -18.500000000 5.500000000 -36.500000000 -19.500000000 5.500000000 -36.500000000 -20.500000000 5.500000000 -36.500000000 -21.500000000 5.500000000 -36.500000000 -22.500000000 5.500000000 -36.500000000 -23.500000000 5.500000000 -36.500000000 -24.500000000 5.500000000 -36.500000000 -25.499996185 5.500000000 -36.499996185 -26.499954224 5.500000000 -36.499954224 -27.499591827 5.500000000 -36.499591827 -28.497470856 5.500000000 -36.497474670 -29.488407135 5.500000000 -36.488403320 -30.458978653 5.500000000 -36.458980560 -31.384418488 5.500000000 -36.384422302 -32.233222961 5.500000000 -36.233226776 -32.981101990 5.500000000 -35.981109619 --33.981101990 6.500000000 -35.981101990 --33.233222961 6.500000000 -36.233222961 --32.384422302 6.500000000 -36.384418488 --31.458978653 6.500000000 -36.458976746 --30.488407135 6.500000000 -36.488403320 --29.497472763 6.500000000 -36.497467041 --28.499593735 6.500000000 -36.499591827 --27.499954224 6.500000000 -36.499954224 --26.499996185 6.500000000 -36.499996185 --25.500000000 6.500000000 -36.500000000 --24.500000000 6.500000000 -36.500000000 --23.500000000 6.500000000 -36.500000000 --22.500000000 6.500000000 -36.500000000 --21.500000000 6.500000000 -36.500000000 --20.500000000 6.500000000 -36.500000000 --19.500000000 6.500000000 -36.500000000 --18.500000000 6.500000000 -36.500000000 --17.500000000 6.500000000 -36.500000000 --16.500000000 6.500000000 -36.500000000 --15.500000000 6.500000000 -36.500000000 --14.500000000 6.500000000 -36.500000000 --13.500000000 6.500000000 -36.500000000 --12.500000000 6.500000000 -36.500000000 --11.500000000 6.500000000 -36.500000000 --10.500000000 6.500000000 -36.500000000 --9.500000000 6.500000000 -36.500000000 --8.500000000 6.500000000 -36.500000000 --7.500000000 6.500000000 -36.500000000 --6.500000000 6.500000000 -36.500000000 --5.500000000 6.500000000 -36.500000000 --4.500000000 6.500000000 -36.500000000 --3.500000000 6.500000000 -36.500000000 --2.500000000 6.500000000 -36.500000000 --1.500000000 6.500000000 -36.500000000 --0.500000000 6.500000000 -36.500000000 -0.500000000 6.500000000 -36.500000000 -1.500000000 6.500000000 -36.500000000 -2.500000000 6.500000000 -36.500000000 -3.500000000 6.500000000 -36.500000000 -4.500000000 6.500000000 -36.500000000 -5.500000000 6.500000000 -36.500000000 -6.500000000 6.500000000 -36.500000000 -7.500000000 6.500000000 -36.500000000 -8.500000000 6.500000000 -36.500000000 -9.500000000 6.500000000 -36.500000000 -10.500000000 6.500000000 -36.500000000 -11.500000000 6.500000000 -36.500000000 -12.500000000 6.500000000 -36.500000000 -13.500000000 6.500000000 -36.500000000 -14.500000000 6.500000000 -36.500000000 -15.500000000 6.500000000 -36.500000000 -16.500000000 6.500000000 -36.500000000 -17.500000000 6.500000000 -36.500000000 -18.500000000 6.500000000 -36.500000000 -19.500000000 6.500000000 -36.500000000 -20.500000000 6.500000000 -36.500000000 -21.500000000 6.500000000 -36.500000000 -22.500000000 6.500000000 -36.500000000 -23.500000000 6.500000000 -36.500000000 -24.500000000 6.500000000 -36.500000000 -25.499996185 6.500000000 -36.499996185 -26.499954224 6.500000000 -36.499954224 -27.499591827 6.500000000 -36.499591827 -28.497470856 6.500000000 -36.497474670 -29.488407135 6.500000000 -36.488403320 -30.458978653 6.500000000 -36.458980560 -31.384418488 6.500000000 -36.384422302 -32.233222961 6.500000000 -36.233226776 -32.981101990 6.500000000 -35.981109619 --33.981101990 7.500000000 -35.981101990 --33.233222961 7.500000000 -36.233222961 --32.384422302 7.500000000 -36.384418488 --31.458978653 7.500000000 -36.458976746 --30.488407135 7.500000000 -36.488403320 --29.497472763 7.500000000 -36.497467041 --28.499593735 7.500000000 -36.499591827 --27.499954224 7.500000000 -36.499954224 --26.499996185 7.500000000 -36.499996185 --25.500000000 7.500000000 -36.500000000 --24.500000000 7.500000000 -36.500000000 --23.500000000 7.500000000 -36.500000000 --22.500000000 7.500000000 -36.500000000 --21.500000000 7.500000000 -36.500000000 --20.500000000 7.500000000 -36.500000000 --19.500000000 7.500000000 -36.500000000 --18.500000000 7.500000000 -36.500000000 --17.500000000 7.500000000 -36.500000000 --16.500000000 7.500000000 -36.500000000 --15.500000000 7.500000000 -36.500000000 --14.500000000 7.500000000 -36.500000000 --13.500000000 7.500000000 -36.500000000 --12.500000000 7.500000000 -36.500000000 --11.500000000 7.500000000 -36.500000000 --10.500000000 7.500000000 -36.500000000 --9.500000000 7.500000000 -36.500000000 --8.500000000 7.500000000 -36.500000000 --7.500000000 7.500000000 -36.500000000 --6.500000000 7.500000000 -36.500000000 --5.500000000 7.500000000 -36.500000000 --4.500000000 7.500000000 -36.500000000 --3.500000000 7.500000000 -36.500000000 --2.500000000 7.500000000 -36.500000000 --1.500000000 7.500000000 -36.500000000 --0.500000000 7.500000000 -36.500000000 -0.500000000 7.500000000 -36.500000000 -1.500000000 7.500000000 -36.500000000 -2.500000000 7.500000000 -36.500000000 -3.500000000 7.500000000 -36.500000000 -4.500000000 7.500000000 -36.500000000 -5.500000000 7.500000000 -36.500000000 -6.500000000 7.500000000 -36.500000000 -7.500000000 7.500000000 -36.500000000 -8.500000000 7.500000000 -36.500000000 -9.500000000 7.500000000 -36.500000000 -10.500000000 7.500000000 -36.500000000 -11.500000000 7.500000000 -36.500000000 -12.500000000 7.500000000 -36.500000000 -13.500000000 7.500000000 -36.500000000 -14.500000000 7.500000000 -36.500000000 -15.500000000 7.500000000 -36.500000000 -16.500000000 7.500000000 -36.500000000 -17.500000000 7.500000000 -36.500000000 -18.500000000 7.500000000 -36.500000000 -19.500000000 7.500000000 -36.500000000 -20.500000000 7.500000000 -36.500000000 -21.500000000 7.500000000 -36.500000000 -22.500000000 7.500000000 -36.500000000 -23.500000000 7.500000000 -36.500000000 -24.500000000 7.500000000 -36.500000000 -25.499996185 7.500000000 -36.499996185 -26.499954224 7.500000000 -36.499954224 -27.499591827 7.500000000 -36.499591827 -28.497470856 7.500000000 -36.497474670 -29.488407135 7.500000000 -36.488403320 -30.458978653 7.500000000 -36.458980560 -31.384418488 7.500000000 -36.384422302 -32.233222961 7.500000000 -36.233226776 -32.981101990 7.500000000 -35.981109619 --33.981101990 8.500000000 -35.981101990 --33.233222961 8.500000000 -36.233222961 --32.384422302 8.500000000 -36.384418488 --31.458978653 8.500000000 -36.458976746 --30.488407135 8.500000000 -36.488403320 --29.497472763 8.500000000 -36.497467041 --28.499593735 8.500000000 -36.499591827 --27.499954224 8.500000000 -36.499954224 --26.499996185 8.500000000 -36.499996185 --25.500000000 8.500000000 -36.500000000 --24.500000000 8.500000000 -36.500000000 --23.500000000 8.500000000 -36.500000000 --22.500000000 8.500000000 -36.500000000 --21.500000000 8.500000000 -36.500000000 --20.500000000 8.500000000 -36.500000000 --19.500000000 8.500000000 -36.500000000 --18.500000000 8.500000000 -36.500000000 --17.500000000 8.500000000 -36.500000000 --16.500000000 8.500000000 -36.500000000 --15.500000000 8.500000000 -36.500000000 --14.500000000 8.500000000 -36.500000000 --13.500000000 8.500000000 -36.500000000 --12.500000000 8.500000000 -36.500000000 --11.500000000 8.500000000 -36.500000000 --10.500000000 8.500000000 -36.500000000 --9.500000000 8.500000000 -36.500000000 --8.500000000 8.500000000 -36.500000000 --7.500000000 8.500000000 -36.500000000 --6.500000000 8.500000000 -36.500000000 --5.500000000 8.500000000 -36.500000000 --4.500000000 8.500000000 -36.500000000 --3.500000000 8.500000000 -36.500000000 --2.500000000 8.500000000 -36.500000000 --1.500000000 8.500000000 -36.500000000 --0.500000000 8.500000000 -36.500000000 -0.500000000 8.500000000 -36.500000000 -1.500000000 8.500000000 -36.500000000 -2.500000000 8.500000000 -36.500000000 -3.500000000 8.500000000 -36.500000000 -4.500000000 8.500000000 -36.500000000 -5.500000000 8.500000000 -36.500000000 -6.500000000 8.500000000 -36.500000000 -7.500000000 8.500000000 -36.500000000 -8.500000000 8.500000000 -36.500000000 -9.500000000 8.500000000 -36.500000000 -10.500000000 8.500000000 -36.500000000 -11.500000000 8.500000000 -36.500000000 -12.500000000 8.500000000 -36.500000000 -13.500000000 8.500000000 -36.500000000 -14.500000000 8.500000000 -36.500000000 -15.500000000 8.500000000 -36.500000000 -16.500000000 8.500000000 -36.500000000 -17.500000000 8.500000000 -36.500000000 -18.500000000 8.500000000 -36.500000000 -19.500000000 8.500000000 -36.500000000 -20.500000000 8.500000000 -36.500000000 -21.500000000 8.500000000 -36.500000000 -22.500000000 8.500000000 -36.500000000 -23.500000000 8.500000000 -36.500000000 -24.500000000 8.500000000 -36.500000000 -25.499996185 8.500000000 -36.499996185 -26.499954224 8.500000000 -36.499954224 -27.499591827 8.500000000 -36.499591827 -28.497470856 8.500000000 -36.497474670 -29.488407135 8.500000000 -36.488403320 -30.458978653 8.500000000 -36.458980560 -31.384418488 8.500000000 -36.384422302 -32.233222961 8.500000000 -36.233226776 -32.981101990 8.500000000 -35.981109619 --33.981101990 9.500000000 -35.981101990 --33.233222961 9.500000000 -36.233222961 --32.384422302 9.500000000 -36.384418488 --31.458978653 9.500000000 -36.458976746 --30.488407135 9.500000000 -36.488403320 --29.497472763 9.500000000 -36.497467041 --28.499593735 9.500000000 -36.499591827 --27.499954224 9.500000000 -36.499954224 --26.499996185 9.500000000 -36.499996185 --25.500000000 9.500000000 -36.500000000 --24.500000000 9.500000000 -36.500000000 --23.500000000 9.500000000 -36.500000000 --22.500000000 9.500000000 -36.500000000 --21.500000000 9.500000000 -36.500000000 --20.500000000 9.500000000 -36.500000000 --19.500000000 9.500000000 -36.500000000 --18.500000000 9.500000000 -36.500000000 --17.500000000 9.500000000 -36.500000000 --16.500000000 9.500000000 -36.500000000 --15.500000000 9.500000000 -36.500000000 --14.500000000 9.500000000 -36.500000000 --13.500000000 9.500000000 -36.500000000 --12.500000000 9.500000000 -36.500000000 --11.500000000 9.500000000 -36.500000000 --10.500000000 9.500000000 -36.500000000 --9.500000000 9.500000000 -36.500000000 --8.500000000 9.500000000 -36.500000000 --7.500000000 9.500000000 -36.500000000 --6.500000000 9.500000000 -36.500000000 --5.500000000 9.500000000 -36.500000000 --4.500000000 9.500000000 -36.500000000 --3.500000000 9.500000000 -36.500000000 --2.500000000 9.500000000 -36.500000000 --1.500000000 9.500000000 -36.500000000 --0.500000000 9.500000000 -36.500000000 -0.500000000 9.500000000 -36.500000000 -1.500000000 9.500000000 -36.500000000 -2.500000000 9.500000000 -36.500000000 -3.500000000 9.500000000 -36.500000000 -4.500000000 9.500000000 -36.500000000 -5.500000000 9.500000000 -36.500000000 -6.500000000 9.500000000 -36.500000000 -7.500000000 9.500000000 -36.500000000 -8.500000000 9.500000000 -36.500000000 -9.500000000 9.500000000 -36.500000000 -10.500000000 9.500000000 -36.500000000 -11.500000000 9.500000000 -36.500000000 -12.500000000 9.500000000 -36.500000000 -13.500000000 9.500000000 -36.500000000 -14.500000000 9.500000000 -36.500000000 -15.500000000 9.500000000 -36.500000000 -16.500000000 9.500000000 -36.500000000 -17.500000000 9.500000000 -36.500000000 -18.500000000 9.500000000 -36.500000000 -19.500000000 9.500000000 -36.500000000 -20.500000000 9.500000000 -36.500000000 -21.500000000 9.500000000 -36.500000000 -22.500000000 9.500000000 -36.500000000 -23.500000000 9.500000000 -36.500000000 -24.500000000 9.500000000 -36.500000000 -25.499996185 9.500000000 -36.499996185 -26.499954224 9.500000000 -36.499954224 -27.499591827 9.500000000 -36.499591827 -28.497470856 9.500000000 -36.497474670 -29.488407135 9.500000000 -36.488403320 -30.458978653 9.500000000 -36.458980560 -31.384418488 9.500000000 -36.384422302 -32.233222961 9.500000000 -36.233226776 -32.981101990 9.500000000 -35.981109619 --33.981101990 10.500000000 -35.981101990 --33.233222961 10.500000000 -36.233222961 --32.384422302 10.500000000 -36.384418488 --31.458978653 10.500000000 -36.458976746 --30.488407135 10.500000000 -36.488403320 --29.497472763 10.500000000 -36.497467041 --28.499593735 10.500000000 -36.499591827 --27.499954224 10.500000000 -36.499954224 --26.499996185 10.500000000 -36.499996185 --25.500000000 10.500000000 -36.500000000 --24.500000000 10.500000000 -36.500000000 --23.500000000 10.500000000 -36.500000000 --22.500000000 10.500000000 -36.500000000 --21.500000000 10.500000000 -36.500000000 --20.500000000 10.500000000 -36.500000000 --19.500000000 10.500000000 -36.500000000 --18.500000000 10.500000000 -36.500000000 --17.500000000 10.500000000 -36.500000000 --16.500000000 10.500000000 -36.500000000 --15.500000000 10.500000000 -36.500000000 --14.500000000 10.500000000 -36.500000000 --13.500000000 10.500000000 -36.500000000 --12.500000000 10.500000000 -36.500000000 --11.500000000 10.500000000 -36.500000000 --10.500000000 10.500000000 -36.500000000 --9.500000000 10.500000000 -36.500000000 --8.500000000 10.500000000 -36.500000000 --7.500000000 10.500000000 -36.500000000 --6.500000000 10.500000000 -36.500000000 --5.500000000 10.500000000 -36.500000000 --4.500000000 10.500000000 -36.500000000 --3.500000000 10.500000000 -36.500000000 --2.500000000 10.500000000 -36.500000000 --1.500000000 10.500000000 -36.500000000 --0.500000000 10.500000000 -36.500000000 -0.500000000 10.500000000 -36.500000000 -1.500000000 10.500000000 -36.500000000 -2.500000000 10.500000000 -36.500000000 -3.500000000 10.500000000 -36.500000000 -4.500000000 10.500000000 -36.500000000 -5.500000000 10.500000000 -36.500000000 -6.500000000 10.500000000 -36.500000000 -7.500000000 10.500000000 -36.500000000 -8.500000000 10.500000000 -36.500000000 -9.500000000 10.500000000 -36.500000000 -10.500000000 10.500000000 -36.500000000 -11.500000000 10.500000000 -36.500000000 -12.500000000 10.500000000 -36.500000000 -13.500000000 10.500000000 -36.500000000 -14.500000000 10.500000000 -36.500000000 -15.500000000 10.500000000 -36.500000000 -16.500000000 10.500000000 -36.500000000 -17.500000000 10.500000000 -36.500000000 -18.500000000 10.500000000 -36.500000000 -19.500000000 10.500000000 -36.500000000 -20.500000000 10.500000000 -36.500000000 -21.500000000 10.500000000 -36.500000000 -22.500000000 10.500000000 -36.500000000 -23.500000000 10.500000000 -36.500000000 -24.500000000 10.500000000 -36.500000000 -25.499996185 10.500000000 -36.499996185 -26.499954224 10.500000000 -36.499954224 -27.499591827 10.500000000 -36.499591827 -28.497470856 10.500000000 -36.497474670 -29.488407135 10.500000000 -36.488403320 -30.458978653 10.500000000 -36.458980560 -31.384418488 10.500000000 -36.384422302 -32.233222961 10.500000000 -36.233226776 -32.981101990 10.500000000 -35.981109619 --33.981101990 11.500000000 -35.981101990 --33.233222961 11.500000000 -36.233222961 --32.384422302 11.500000000 -36.384418488 --31.458978653 11.500000000 -36.458976746 --30.488407135 11.500000000 -36.488403320 --29.497472763 11.500000000 -36.497467041 --28.499593735 11.500000000 -36.499591827 --27.499954224 11.500000000 -36.499954224 --26.499996185 11.500000000 -36.499996185 --25.500000000 11.500000000 -36.500000000 --24.500000000 11.500000000 -36.500000000 --23.500000000 11.500000000 -36.500000000 --22.500000000 11.500000000 -36.500000000 --21.500000000 11.500000000 -36.500000000 --20.500000000 11.500000000 -36.500000000 --19.500000000 11.500000000 -36.500000000 --18.500000000 11.500000000 -36.500000000 --17.500000000 11.500000000 -36.500000000 --16.500000000 11.500000000 -36.500000000 --15.500000000 11.500000000 -36.500000000 --14.500000000 11.500000000 -36.500000000 --13.500000000 11.500000000 -36.500000000 --12.500000000 11.500000000 -36.500000000 --11.500000000 11.500000000 -36.500000000 --10.500000000 11.500000000 -36.500000000 --9.500000000 11.500000000 -36.500000000 --8.500000000 11.500000000 -36.500000000 --7.500000000 11.500000000 -36.500000000 --6.500000000 11.500000000 -36.500000000 --5.500000000 11.500000000 -36.500000000 --4.500000000 11.500000000 -36.500000000 --3.500000000 11.500000000 -36.500000000 --2.500000000 11.500000000 -36.500000000 --1.500000000 11.500000000 -36.500000000 --0.500000000 11.500000000 -36.500000000 -0.500000000 11.500000000 -36.500000000 -1.500000000 11.500000000 -36.500000000 -2.500000000 11.500000000 -36.500000000 -3.500000000 11.500000000 -36.500000000 -4.500000000 11.500000000 -36.500000000 -5.500000000 11.500000000 -36.500000000 -6.500000000 11.500000000 -36.500000000 -7.500000000 11.500000000 -36.500000000 -8.500000000 11.500000000 -36.500000000 -9.500000000 11.500000000 -36.500000000 -10.500000000 11.500000000 -36.500000000 -11.500000000 11.500000000 -36.500000000 -12.500000000 11.500000000 -36.500000000 -13.500000000 11.500000000 -36.500000000 -14.500000000 11.500000000 -36.500000000 -15.500000000 11.500000000 -36.500000000 -16.500000000 11.500000000 -36.500000000 -17.500000000 11.500000000 -36.500000000 -18.500000000 11.500000000 -36.500000000 -19.500000000 11.500000000 -36.500000000 -20.500000000 11.500000000 -36.500000000 -21.500000000 11.500000000 -36.500000000 -22.500000000 11.500000000 -36.500000000 -23.500000000 11.500000000 -36.500000000 -24.500000000 11.500000000 -36.500000000 -25.499996185 11.500000000 -36.499996185 -26.499954224 11.500000000 -36.499954224 -27.499591827 11.500000000 -36.499591827 -28.497470856 11.500000000 -36.497474670 -29.488407135 11.500000000 -36.488403320 -30.458978653 11.500000000 -36.458980560 -31.384418488 11.500000000 -36.384422302 -32.233222961 11.500000000 -36.233226776 -32.981101990 11.500000000 -35.981109619 --33.981101990 12.500000000 -35.981101990 --33.233222961 12.500000000 -36.233222961 --32.384422302 12.500000000 -36.384418488 --31.458978653 12.500000000 -36.458976746 --30.488407135 12.500000000 -36.488403320 --29.497472763 12.500000000 -36.497467041 --28.499593735 12.500000000 -36.499591827 --27.499954224 12.500000000 -36.499954224 --26.499996185 12.500000000 -36.499996185 --25.500000000 12.500000000 -36.500000000 --24.500000000 12.500000000 -36.500000000 --23.500000000 12.500000000 -36.500000000 --22.500000000 12.500000000 -36.500000000 --21.500000000 12.500000000 -36.500000000 --20.500000000 12.500000000 -36.500000000 --19.500000000 12.500000000 -36.500000000 --18.500000000 12.500000000 -36.500000000 --17.500000000 12.500000000 -36.500000000 --16.500000000 12.500000000 -36.500000000 --15.500000000 12.500000000 -36.500000000 --14.500000000 12.500000000 -36.500000000 --13.500000000 12.500000000 -36.500000000 --12.500000000 12.500000000 -36.500000000 --11.500000000 12.500000000 -36.500000000 --10.500000000 12.500000000 -36.500000000 --9.500000000 12.500000000 -36.500000000 --8.500000000 12.500000000 -36.500000000 --7.500000000 12.500000000 -36.500000000 --6.500000000 12.500000000 -36.500000000 --5.500000000 12.500000000 -36.500000000 --4.500000000 12.500000000 -36.500000000 --3.500000000 12.500000000 -36.500000000 --2.500000000 12.500000000 -36.500000000 --1.500000000 12.500000000 -36.500000000 --0.500000000 12.500000000 -36.500000000 -0.500000000 12.500000000 -36.500000000 -1.500000000 12.500000000 -36.500000000 -2.500000000 12.500000000 -36.500000000 -3.500000000 12.500000000 -36.500000000 -4.500000000 12.500000000 -36.500000000 -5.500000000 12.500000000 -36.500000000 -6.500000000 12.500000000 -36.500000000 -7.500000000 12.500000000 -36.500000000 -8.500000000 12.500000000 -36.500000000 -9.500000000 12.500000000 -36.500000000 -10.500000000 12.500000000 -36.500000000 -11.500000000 12.500000000 -36.500000000 -12.500000000 12.500000000 -36.500000000 -13.500000000 12.500000000 -36.500000000 -14.500000000 12.500000000 -36.500000000 -15.500000000 12.500000000 -36.500000000 -16.500000000 12.500000000 -36.500000000 -17.500000000 12.500000000 -36.500000000 -18.500000000 12.500000000 -36.500000000 -19.500000000 12.500000000 -36.500000000 -20.500000000 12.500000000 -36.500000000 -21.500000000 12.500000000 -36.500000000 -22.500000000 12.500000000 -36.500000000 -23.500000000 12.500000000 -36.500000000 -24.500000000 12.500000000 -36.500000000 -25.499996185 12.500000000 -36.499996185 -26.499954224 12.500000000 -36.499954224 -27.499591827 12.500000000 -36.499591827 -28.497470856 12.500000000 -36.497474670 -29.488407135 12.500000000 -36.488403320 -30.458978653 12.500000000 -36.458980560 -31.384418488 12.500000000 -36.384422302 -32.233222961 12.500000000 -36.233226776 -32.981101990 12.500000000 -35.981109619 --33.981101990 13.500000000 -35.981101990 --33.233222961 13.500000000 -36.233222961 --32.384422302 13.500000000 -36.384418488 --31.458978653 13.500000000 -36.458976746 --30.488407135 13.500000000 -36.488403320 --29.497472763 13.500000000 -36.497467041 --28.499593735 13.500000000 -36.499591827 --27.499954224 13.500000000 -36.499954224 --26.499996185 13.500000000 -36.499996185 --25.500000000 13.500000000 -36.500000000 --24.500000000 13.500000000 -36.500000000 --23.500000000 13.500000000 -36.500000000 --22.500000000 13.500000000 -36.500000000 --21.500000000 13.500000000 -36.500000000 --20.500000000 13.500000000 -36.500000000 --19.500000000 13.500000000 -36.500000000 --18.500000000 13.500000000 -36.500000000 --17.500000000 13.500000000 -36.500000000 --16.500000000 13.500000000 -36.500000000 --15.500000000 13.500000000 -36.500000000 --14.500000000 13.500000000 -36.500000000 --13.500000000 13.500000000 -36.500000000 --12.500000000 13.500000000 -36.500000000 --11.500000000 13.500000000 -36.500000000 --10.500000000 13.500000000 -36.500000000 --9.500000000 13.500000000 -36.500000000 --8.500000000 13.500000000 -36.500000000 --7.500000000 13.500000000 -36.500000000 --6.500000000 13.500000000 -36.500000000 --5.500000000 13.500000000 -36.500000000 --4.500000000 13.500000000 -36.500000000 --3.500000000 13.500000000 -36.500000000 --2.500000000 13.500000000 -36.500000000 --1.500000000 13.500000000 -36.500000000 --0.500000000 13.500000000 -36.500000000 -0.500000000 13.500000000 -36.500000000 -1.500000000 13.500000000 -36.500000000 -2.500000000 13.500000000 -36.500000000 -3.500000000 13.500000000 -36.500000000 -4.500000000 13.500000000 -36.500000000 -5.500000000 13.500000000 -36.500000000 -6.500000000 13.500000000 -36.500000000 -7.500000000 13.500000000 -36.500000000 -8.500000000 13.500000000 -36.500000000 -9.500000000 13.500000000 -36.500000000 -10.500000000 13.500000000 -36.500000000 -11.500000000 13.500000000 -36.500000000 -12.500000000 13.500000000 -36.500000000 -13.500000000 13.500000000 -36.500000000 -14.500000000 13.500000000 -36.500000000 -15.500000000 13.500000000 -36.500000000 -16.500000000 13.500000000 -36.500000000 -17.500000000 13.500000000 -36.500000000 -18.500000000 13.500000000 -36.500000000 -19.500000000 13.500000000 -36.500000000 -20.500000000 13.500000000 -36.500000000 -21.500000000 13.500000000 -36.500000000 -22.500000000 13.500000000 -36.500000000 -23.500000000 13.500000000 -36.500000000 -24.500000000 13.500000000 -36.500000000 -25.499996185 13.500000000 -36.499996185 -26.499954224 13.500000000 -36.499954224 -27.499591827 13.500000000 -36.499591827 -28.497470856 13.500000000 -36.497474670 -29.488407135 13.500000000 -36.488403320 -30.458978653 13.500000000 -36.458980560 -31.384418488 13.500000000 -36.384422302 -32.233222961 13.500000000 -36.233226776 -32.981101990 13.500000000 -35.981109619 --33.981101990 14.500000000 -35.981101990 --33.233222961 14.500000000 -36.233222961 --32.384422302 14.500000000 -36.384418488 --31.458978653 14.500000000 -36.458976746 --30.488407135 14.500000000 -36.488403320 --29.497472763 14.500000000 -36.497467041 --28.499593735 14.500000000 -36.499591827 --27.499954224 14.500000000 -36.499954224 --26.499996185 14.500000000 -36.499996185 --25.500000000 14.500000000 -36.500000000 --24.500000000 14.500000000 -36.500000000 --23.500000000 14.500000000 -36.500000000 --22.500000000 14.500000000 -36.500000000 --21.500000000 14.500000000 -36.500000000 --20.500000000 14.500000000 -36.500000000 --19.500000000 14.500000000 -36.500000000 --18.500000000 14.500000000 -36.500000000 --17.500000000 14.500000000 -36.500000000 --16.500000000 14.500000000 -36.500000000 --15.500000000 14.500000000 -36.500000000 --14.500000000 14.500000000 -36.500000000 --13.500000000 14.500000000 -36.500000000 --12.500000000 14.500000000 -36.500000000 --11.500000000 14.500000000 -36.500000000 --10.500000000 14.500000000 -36.500000000 --9.500000000 14.500000000 -36.500000000 --8.500000000 14.500000000 -36.500000000 --7.500000000 14.500000000 -36.500000000 --6.500000000 14.500000000 -36.500000000 --5.500000000 14.500000000 -36.500000000 --4.500000000 14.500000000 -36.500000000 --3.500000000 14.500000000 -36.500000000 --2.500000000 14.500000000 -36.500000000 --1.500000000 14.500000000 -36.500000000 --0.500000000 14.500000000 -36.500000000 -0.500000000 14.500000000 -36.500000000 -1.500000000 14.500000000 -36.500000000 -2.500000000 14.500000000 -36.500000000 -3.500000000 14.500000000 -36.500000000 -4.500000000 14.500000000 -36.500000000 -5.500000000 14.500000000 -36.500000000 -6.500000000 14.500000000 -36.500000000 -7.500000000 14.500000000 -36.500000000 -8.500000000 14.500000000 -36.500000000 -9.500000000 14.500000000 -36.500000000 -10.500000000 14.500000000 -36.500000000 -11.500000000 14.500000000 -36.500000000 -12.500000000 14.500000000 -36.500000000 -13.500000000 14.500000000 -36.500000000 -14.500000000 14.500000000 -36.500000000 -15.500000000 14.500000000 -36.500000000 -16.500000000 14.500000000 -36.500000000 -17.500000000 14.500000000 -36.500000000 -18.500000000 14.500000000 -36.500000000 -19.500000000 14.500000000 -36.500000000 -20.500000000 14.500000000 -36.500000000 -21.500000000 14.500000000 -36.500000000 -22.500000000 14.500000000 -36.500000000 -23.500000000 14.500000000 -36.500000000 -24.500000000 14.500000000 -36.500000000 -25.499996185 14.500000000 -36.499996185 -26.499954224 14.500000000 -36.499954224 -27.499591827 14.500000000 -36.499591827 -28.497470856 14.500000000 -36.497474670 -29.488407135 14.500000000 -36.488403320 -30.458978653 14.500000000 -36.458980560 -31.384418488 14.500000000 -36.384422302 -32.233222961 14.500000000 -36.233226776 -32.981101990 14.500000000 -35.981109619 --33.981101990 15.500000000 -35.981101990 --33.233222961 15.500000000 -36.233222961 --32.384422302 15.500000000 -36.384418488 --31.458978653 15.500000000 -36.458976746 --30.488407135 15.500000000 -36.488403320 --29.497472763 15.500000000 -36.497467041 --28.499593735 15.500000000 -36.499591827 --27.499954224 15.500000000 -36.499954224 --26.499996185 15.500000000 -36.499996185 --25.500000000 15.500000000 -36.500000000 --24.500000000 15.500000000 -36.500000000 --23.500000000 15.500000000 -36.500000000 --22.500000000 15.500000000 -36.500000000 --21.500000000 15.500000000 -36.500000000 --20.500000000 15.500000000 -36.500000000 --19.500000000 15.500000000 -36.500000000 --18.500000000 15.500000000 -36.500000000 --17.500000000 15.500000000 -36.500000000 --16.500000000 15.500000000 -36.500000000 --15.500000000 15.500000000 -36.500000000 --14.500000000 15.500000000 -36.500000000 --13.500000000 15.500000000 -36.500000000 --12.500000000 15.500000000 -36.500000000 --11.500000000 15.500000000 -36.500000000 --10.500000000 15.500000000 -36.500000000 --9.500000000 15.500000000 -36.500000000 --8.500000000 15.500000000 -36.500000000 --7.500000000 15.500000000 -36.500000000 --6.500000000 15.500000000 -36.500000000 --5.500000000 15.500000000 -36.500000000 --4.500000000 15.500000000 -36.500000000 --3.500000000 15.500000000 -36.500000000 --2.500000000 15.500000000 -36.500000000 --1.500000000 15.500000000 -36.500000000 --0.500000000 15.500000000 -36.500000000 -0.500000000 15.500000000 -36.500000000 -1.500000000 15.500000000 -36.500000000 -2.500000000 15.500000000 -36.500000000 -3.500000000 15.500000000 -36.500000000 -4.500000000 15.500000000 -36.500000000 -5.500000000 15.500000000 -36.500000000 -6.500000000 15.500000000 -36.500000000 -7.500000000 15.500000000 -36.500000000 -8.500000000 15.500000000 -36.500000000 -9.500000000 15.500000000 -36.500000000 -10.500000000 15.500000000 -36.500000000 -11.500000000 15.500000000 -36.500000000 -12.500000000 15.500000000 -36.500000000 -13.500000000 15.500000000 -36.500000000 -14.500000000 15.500000000 -36.500000000 -15.500000000 15.500000000 -36.500000000 -16.500000000 15.500000000 -36.500000000 -17.500000000 15.500000000 -36.500000000 -18.500000000 15.500000000 -36.500000000 -19.500000000 15.500000000 -36.500000000 -20.500000000 15.500000000 -36.500000000 -21.500000000 15.500000000 -36.500000000 -22.500000000 15.500000000 -36.500000000 -23.500000000 15.500000000 -36.500000000 -24.500000000 15.500000000 -36.500000000 -25.499996185 15.500000000 -36.499996185 -26.499954224 15.500000000 -36.499954224 -27.499591827 15.500000000 -36.499591827 -28.497470856 15.500000000 -36.497474670 -29.488407135 15.500000000 -36.488403320 -30.458978653 15.500000000 -36.458980560 -31.384418488 15.500000000 -36.384422302 -32.233222961 15.500000000 -36.233226776 -32.981101990 15.500000000 -35.981109619 --33.981101990 16.500000000 -35.981101990 --33.233222961 16.500000000 -36.233222961 --32.384422302 16.500000000 -36.384418488 --31.458978653 16.500000000 -36.458976746 --30.488407135 16.500000000 -36.488403320 --29.497472763 16.500000000 -36.497467041 --28.499593735 16.500000000 -36.499591827 --27.499954224 16.500000000 -36.499954224 --26.499996185 16.500000000 -36.499996185 --25.500000000 16.500000000 -36.500000000 --24.500000000 16.500000000 -36.500000000 --23.500000000 16.500000000 -36.500000000 --22.500000000 16.500000000 -36.500000000 --21.500000000 16.500000000 -36.500000000 --20.500000000 16.500000000 -36.500000000 --19.500000000 16.500000000 -36.500000000 --18.500000000 16.500000000 -36.500000000 --17.500000000 16.500000000 -36.500000000 --16.500000000 16.500000000 -36.500000000 --15.500000000 16.500000000 -36.500000000 --14.500000000 16.500000000 -36.500000000 --13.500000000 16.500000000 -36.500000000 --12.500000000 16.500000000 -36.500000000 --11.500000000 16.500000000 -36.500000000 --10.500000000 16.500000000 -36.500000000 --9.500000000 16.500000000 -36.500000000 --8.500000000 16.500000000 -36.500000000 --7.500000000 16.500000000 -36.500000000 --6.500000000 16.500000000 -36.500000000 --5.500000000 16.500000000 -36.500000000 --4.500000000 16.500000000 -36.500000000 --3.500000000 16.500000000 -36.500000000 --2.500000000 16.500000000 -36.500000000 --1.500000000 16.500000000 -36.500000000 --0.500000000 16.500000000 -36.500000000 -0.500000000 16.500000000 -36.500000000 -1.500000000 16.500000000 -36.500000000 -2.500000000 16.500000000 -36.500000000 -3.500000000 16.500000000 -36.500000000 -4.500000000 16.500000000 -36.500000000 -5.500000000 16.500000000 -36.500000000 -6.500000000 16.500000000 -36.500000000 -7.500000000 16.500000000 -36.500000000 -8.500000000 16.500000000 -36.500000000 -9.500000000 16.500000000 -36.500000000 -10.500000000 16.500000000 -36.500000000 -11.500000000 16.500000000 -36.500000000 -12.500000000 16.500000000 -36.500000000 -13.500000000 16.500000000 -36.500000000 -14.500000000 16.500000000 -36.500000000 -15.500000000 16.500000000 -36.500000000 -16.500000000 16.500000000 -36.500000000 -17.500000000 16.500000000 -36.500000000 -18.500000000 16.500000000 -36.500000000 -19.500000000 16.500000000 -36.500000000 -20.500000000 16.500000000 -36.500000000 -21.500000000 16.500000000 -36.500000000 -22.500000000 16.500000000 -36.500000000 -23.500000000 16.500000000 -36.500000000 -24.500000000 16.500000000 -36.500000000 -25.499996185 16.500000000 -36.499996185 -26.499954224 16.500000000 -36.499954224 -27.499591827 16.500000000 -36.499591827 -28.497470856 16.500000000 -36.497474670 -29.488407135 16.500000000 -36.488403320 -30.458978653 16.500000000 -36.458980560 -31.384418488 16.500000000 -36.384422302 -32.233222961 16.500000000 -36.233226776 -32.981101990 16.500000000 -35.981109619 --33.981101990 17.500000000 -35.981101990 --33.233222961 17.500000000 -36.233222961 --32.384422302 17.500000000 -36.384418488 --31.458978653 17.500000000 -36.458976746 --30.488407135 17.500000000 -36.488403320 --29.497472763 17.500000000 -36.497467041 --28.499593735 17.500000000 -36.499591827 --27.499954224 17.500000000 -36.499954224 --26.499996185 17.500000000 -36.499996185 --25.500000000 17.500000000 -36.500000000 --24.500000000 17.500000000 -36.500000000 --23.500000000 17.500000000 -36.500000000 --22.500000000 17.500000000 -36.500000000 --21.500000000 17.500000000 -36.500000000 --20.500000000 17.500000000 -36.500000000 --19.500000000 17.500000000 -36.500000000 --18.500000000 17.500000000 -36.500000000 --17.500000000 17.500000000 -36.500000000 --16.500000000 17.500000000 -36.500000000 --15.500000000 17.500000000 -36.500000000 --14.500000000 17.500000000 -36.500000000 --13.500000000 17.500000000 -36.500000000 --12.500000000 17.500000000 -36.500000000 --11.500000000 17.500000000 -36.500000000 --10.500000000 17.500000000 -36.500000000 --9.500000000 17.500000000 -36.500000000 --8.500000000 17.500000000 -36.500000000 --7.500000000 17.500000000 -36.500000000 --6.500000000 17.500000000 -36.500000000 --5.500000000 17.500000000 -36.500000000 --4.500000000 17.500000000 -36.500000000 --3.500000000 17.500000000 -36.500000000 --2.500000000 17.500000000 -36.500000000 --1.500000000 17.500000000 -36.500000000 --0.500000000 17.500000000 -36.500000000 -0.500000000 17.500000000 -36.500000000 -1.500000000 17.500000000 -36.500000000 -2.500000000 17.500000000 -36.500000000 -3.500000000 17.500000000 -36.500000000 -4.500000000 17.500000000 -36.500000000 -5.500000000 17.500000000 -36.500000000 -6.500000000 17.500000000 -36.500000000 -7.500000000 17.500000000 -36.500000000 -8.500000000 17.500000000 -36.500000000 -9.500000000 17.500000000 -36.500000000 -10.500000000 17.500000000 -36.500000000 -11.500000000 17.500000000 -36.500000000 -12.500000000 17.500000000 -36.500000000 -13.500000000 17.500000000 -36.500000000 -14.500000000 17.500000000 -36.500000000 -15.500000000 17.500000000 -36.500000000 -16.500000000 17.500000000 -36.500000000 -17.500000000 17.500000000 -36.500000000 -18.500000000 17.500000000 -36.500000000 -19.500000000 17.500000000 -36.500000000 -20.500000000 17.500000000 -36.500000000 -21.500000000 17.500000000 -36.500000000 -22.500000000 17.500000000 -36.500000000 -23.500000000 17.500000000 -36.500000000 -24.500000000 17.500000000 -36.500000000 -25.499996185 17.500000000 -36.499996185 -26.499954224 17.500000000 -36.499954224 -27.499591827 17.500000000 -36.499591827 -28.497470856 17.500000000 -36.497474670 -29.488407135 17.500000000 -36.488403320 -30.458978653 17.500000000 -36.458980560 -31.384418488 17.500000000 -36.384422302 -32.233222961 17.500000000 -36.233226776 -32.981101990 17.500000000 -35.981109619 --33.981101990 18.500000000 -35.981101990 --33.233222961 18.500000000 -36.233222961 --32.384422302 18.500000000 -36.384418488 --31.458978653 18.500000000 -36.458976746 --30.488407135 18.500000000 -36.488403320 --29.497472763 18.500000000 -36.497467041 --28.499593735 18.500000000 -36.499591827 --27.499954224 18.500000000 -36.499954224 --26.499996185 18.500000000 -36.499996185 --25.500000000 18.500000000 -36.500000000 --24.500000000 18.500000000 -36.500000000 --23.500000000 18.500000000 -36.500000000 --22.500000000 18.500000000 -36.500000000 --21.500000000 18.500000000 -36.500000000 --20.500000000 18.500000000 -36.500000000 --19.500000000 18.500000000 -36.500000000 --18.500000000 18.500000000 -36.500000000 --17.500000000 18.500000000 -36.500000000 --16.500000000 18.500000000 -36.500000000 --15.500000000 18.500000000 -36.500000000 --14.500000000 18.500000000 -36.500000000 --13.500000000 18.500000000 -36.500000000 --12.500000000 18.500000000 -36.500000000 --11.500000000 18.500000000 -36.500000000 --10.500000000 18.500000000 -36.500000000 --9.500000000 18.500000000 -36.500000000 --8.500000000 18.500000000 -36.500000000 --7.500000000 18.500000000 -36.500000000 --6.500000000 18.500000000 -36.500000000 --5.500000000 18.500000000 -36.500000000 --4.500000000 18.500000000 -36.500000000 --3.500000000 18.500000000 -36.500000000 --2.500000000 18.500000000 -36.500000000 --1.500000000 18.500000000 -36.500000000 --0.500000000 18.500000000 -36.500000000 -0.500000000 18.500000000 -36.500000000 -1.500000000 18.500000000 -36.500000000 -2.500000000 18.500000000 -36.500000000 -3.500000000 18.500000000 -36.500000000 -4.500000000 18.500000000 -36.500000000 -5.500000000 18.500000000 -36.500000000 -6.500000000 18.500000000 -36.500000000 -7.500000000 18.500000000 -36.500000000 -8.500000000 18.500000000 -36.500000000 -9.500000000 18.500000000 -36.500000000 -10.500000000 18.500000000 -36.500000000 -11.500000000 18.500000000 -36.500000000 -12.500000000 18.500000000 -36.500000000 -13.500000000 18.500000000 -36.500000000 -14.500000000 18.500000000 -36.500000000 -15.500000000 18.500000000 -36.500000000 -16.500000000 18.500000000 -36.500000000 -17.500000000 18.500000000 -36.500000000 -18.500000000 18.500000000 -36.500000000 -19.500000000 18.500000000 -36.500000000 -20.500000000 18.500000000 -36.500000000 -21.500000000 18.500000000 -36.500000000 -22.500000000 18.500000000 -36.500000000 -23.500000000 18.500000000 -36.500000000 -24.500000000 18.500000000 -36.500000000 -25.499996185 18.500000000 -36.499996185 -26.499954224 18.500000000 -36.499954224 -27.499591827 18.500000000 -36.499591827 -28.497470856 18.500000000 -36.497474670 -29.488407135 18.500000000 -36.488403320 -30.458978653 18.500000000 -36.458980560 -31.384418488 18.500000000 -36.384422302 -32.233222961 18.500000000 -36.233226776 -32.981101990 18.500000000 -35.981109619 --33.981101990 19.500000000 -35.981101990 --33.233222961 19.500000000 -36.233222961 --32.384422302 19.500000000 -36.384418488 --31.458978653 19.500000000 -36.458976746 --30.488407135 19.500000000 -36.488403320 --29.497472763 19.500000000 -36.497467041 --28.499593735 19.500000000 -36.499591827 --27.499954224 19.500000000 -36.499954224 --26.499996185 19.500000000 -36.499996185 --25.500000000 19.500000000 -36.500000000 --24.500000000 19.500000000 -36.500000000 --23.500000000 19.500000000 -36.500000000 --22.500000000 19.500000000 -36.500000000 --21.500000000 19.500000000 -36.500000000 --20.500000000 19.500000000 -36.500000000 --19.500000000 19.500000000 -36.500000000 --18.500000000 19.500000000 -36.500000000 --17.500000000 19.500000000 -36.500000000 --16.500000000 19.500000000 -36.500000000 --15.500000000 19.500000000 -36.500000000 --14.500000000 19.500000000 -36.500000000 --13.500000000 19.500000000 -36.500000000 --12.500000000 19.500000000 -36.500000000 --11.500000000 19.500000000 -36.500000000 --10.500000000 19.500000000 -36.500000000 --9.500000000 19.500000000 -36.500000000 --8.500000000 19.500000000 -36.500000000 --7.500000000 19.500000000 -36.500000000 --6.500000000 19.500000000 -36.500000000 --5.500000000 19.500000000 -36.500000000 --4.500000000 19.500000000 -36.500000000 --3.500000000 19.500000000 -36.500000000 --2.500000000 19.500000000 -36.500000000 --1.500000000 19.500000000 -36.500000000 --0.500000000 19.500000000 -36.500000000 -0.500000000 19.500000000 -36.500000000 -1.500000000 19.500000000 -36.500000000 -2.500000000 19.500000000 -36.500000000 -3.500000000 19.500000000 -36.500000000 -4.500000000 19.500000000 -36.500000000 -5.500000000 19.500000000 -36.500000000 -6.500000000 19.500000000 -36.500000000 -7.500000000 19.500000000 -36.500000000 -8.500000000 19.500000000 -36.500000000 -9.500000000 19.500000000 -36.500000000 -10.500000000 19.500000000 -36.500000000 -11.500000000 19.500000000 -36.500000000 -12.500000000 19.500000000 -36.500000000 -13.500000000 19.500000000 -36.500000000 -14.500000000 19.500000000 -36.500000000 -15.500000000 19.500000000 -36.500000000 -16.500000000 19.500000000 -36.500000000 -17.500000000 19.500000000 -36.500000000 -18.500000000 19.500000000 -36.500000000 -19.500000000 19.500000000 -36.500000000 -20.500000000 19.500000000 -36.500000000 -21.500000000 19.500000000 -36.500000000 -22.500000000 19.500000000 -36.500000000 -23.500000000 19.500000000 -36.500000000 -24.500000000 19.500000000 -36.500000000 -25.499996185 19.500000000 -36.499996185 -26.499954224 19.500000000 -36.499954224 -27.499591827 19.500000000 -36.499591827 -28.497470856 19.500000000 -36.497474670 -29.488407135 19.500000000 -36.488403320 -30.458978653 19.500000000 -36.458980560 -31.384418488 19.500000000 -36.384422302 -32.233222961 19.500000000 -36.233226776 -32.981101990 19.500000000 -35.981109619 --33.981101990 20.500000000 -35.981101990 --33.233222961 20.500000000 -36.233222961 --32.384422302 20.500000000 -36.384418488 --31.458978653 20.500000000 -36.458976746 --30.488407135 20.500000000 -36.488403320 --29.497472763 20.500000000 -36.497467041 --28.499593735 20.500000000 -36.499591827 --27.499954224 20.500000000 -36.499954224 --26.499996185 20.500000000 -36.499996185 --25.500000000 20.500000000 -36.500000000 --24.500000000 20.500000000 -36.500000000 --23.500000000 20.500000000 -36.500000000 --22.500000000 20.500000000 -36.500000000 --21.500000000 20.500000000 -36.500000000 --20.500000000 20.500000000 -36.500000000 --19.500000000 20.500000000 -36.500000000 --18.500000000 20.500000000 -36.500000000 --17.500000000 20.500000000 -36.500000000 --16.500000000 20.500000000 -36.500000000 --15.500000000 20.500000000 -36.500000000 --14.500000000 20.500000000 -36.500000000 --13.500000000 20.500000000 -36.500000000 --12.500000000 20.500000000 -36.500000000 --11.500000000 20.500000000 -36.500000000 --10.500000000 20.500000000 -36.500000000 --9.500000000 20.500000000 -36.500000000 --8.500000000 20.500000000 -36.500000000 --7.500000000 20.500000000 -36.500000000 --6.500000000 20.500000000 -36.500000000 --5.500000000 20.500000000 -36.500000000 --4.500000000 20.500000000 -36.500000000 --3.500000000 20.500000000 -36.500000000 --2.500000000 20.500000000 -36.500000000 --1.500000000 20.500000000 -36.500000000 --0.500000000 20.500000000 -36.500000000 -0.500000000 20.500000000 -36.500000000 -1.500000000 20.500000000 -36.500000000 -2.500000000 20.500000000 -36.500000000 -3.500000000 20.500000000 -36.500000000 -4.500000000 20.500000000 -36.500000000 -5.500000000 20.500000000 -36.500000000 -6.500000000 20.500000000 -36.500000000 -7.500000000 20.500000000 -36.500000000 -8.500000000 20.500000000 -36.500000000 -9.500000000 20.500000000 -36.500000000 -10.500000000 20.500000000 -36.500000000 -11.500000000 20.500000000 -36.500000000 -12.500000000 20.500000000 -36.500000000 -13.500000000 20.500000000 -36.500000000 -14.500000000 20.500000000 -36.500000000 -15.500000000 20.500000000 -36.500000000 -16.500000000 20.500000000 -36.500000000 -17.500000000 20.500000000 -36.500000000 -18.500000000 20.500000000 -36.500000000 -19.500000000 20.500000000 -36.500000000 -20.500000000 20.500000000 -36.500000000 -21.500000000 20.500000000 -36.500000000 -22.500000000 20.500000000 -36.500000000 -23.500000000 20.500000000 -36.500000000 -24.500000000 20.500000000 -36.500000000 -25.499996185 20.500000000 -36.499996185 -26.499954224 20.500000000 -36.499954224 -27.499591827 20.500000000 -36.499591827 -28.497470856 20.500000000 -36.497474670 -29.488407135 20.500000000 -36.488403320 -30.458978653 20.500000000 -36.458980560 -31.384418488 20.500000000 -36.384422302 -32.233222961 20.500000000 -36.233226776 -32.981101990 20.500000000 -35.981109619 --33.981101990 21.500000000 -35.981101990 --33.233222961 21.500000000 -36.233222961 --32.384422302 21.500000000 -36.384418488 --31.458978653 21.500000000 -36.458976746 --30.488407135 21.500000000 -36.488403320 --29.497472763 21.500000000 -36.497467041 --28.499593735 21.500000000 -36.499591827 --27.499954224 21.500000000 -36.499954224 --26.499996185 21.500000000 -36.499996185 --25.500000000 21.500000000 -36.500000000 --24.500000000 21.500000000 -36.500000000 --23.500000000 21.500000000 -36.500000000 --22.500000000 21.500000000 -36.500000000 --21.500000000 21.500000000 -36.500000000 --20.500000000 21.500000000 -36.500000000 --19.500000000 21.500000000 -36.500000000 --18.500000000 21.500000000 -36.500000000 --17.500000000 21.500000000 -36.500000000 --16.500000000 21.500000000 -36.500000000 --15.500000000 21.500000000 -36.500000000 --14.500000000 21.500000000 -36.500000000 --13.500000000 21.500000000 -36.500000000 --12.500000000 21.500000000 -36.500000000 --11.500000000 21.500000000 -36.500000000 --10.500000000 21.500000000 -36.500000000 --9.500000000 21.500000000 -36.500000000 --8.500000000 21.500000000 -36.500000000 --7.500000000 21.500000000 -36.500000000 --6.500000000 21.500000000 -36.500000000 --5.500000000 21.500000000 -36.500000000 --4.500000000 21.500000000 -36.500000000 --3.500000000 21.500000000 -36.500000000 --2.500000000 21.500000000 -36.500000000 --1.500000000 21.500000000 -36.500000000 --0.500000000 21.500000000 -36.500000000 -0.500000000 21.500000000 -36.500000000 -1.500000000 21.500000000 -36.500000000 -2.500000000 21.500000000 -36.500000000 -3.500000000 21.500000000 -36.500000000 -4.500000000 21.500000000 -36.500000000 -5.500000000 21.500000000 -36.500000000 -6.500000000 21.500000000 -36.500000000 -7.500000000 21.500000000 -36.500000000 -8.500000000 21.500000000 -36.500000000 -9.500000000 21.500000000 -36.500000000 -10.500000000 21.500000000 -36.500000000 -11.500000000 21.500000000 -36.500000000 -12.500000000 21.500000000 -36.500000000 -13.500000000 21.500000000 -36.500000000 -14.500000000 21.500000000 -36.500000000 -15.500000000 21.500000000 -36.500000000 -16.500000000 21.500000000 -36.500000000 -17.500000000 21.500000000 -36.500000000 -18.500000000 21.500000000 -36.500000000 -19.500000000 21.500000000 -36.500000000 -20.500000000 21.500000000 -36.500000000 -21.500000000 21.500000000 -36.500000000 -22.500000000 21.500000000 -36.500000000 -23.500000000 21.500000000 -36.500000000 -24.500000000 21.500000000 -36.500000000 -25.499996185 21.500000000 -36.499996185 -26.499954224 21.500000000 -36.499954224 -27.499591827 21.500000000 -36.499591827 -28.497470856 21.500000000 -36.497474670 -29.488407135 21.500000000 -36.488403320 -30.458978653 21.500000000 -36.458980560 -31.384418488 21.500000000 -36.384422302 -32.233222961 21.500000000 -36.233226776 -32.981101990 21.500000000 -35.981109619 --33.981101990 22.500000000 -35.981101990 --33.233222961 22.500000000 -36.233222961 --32.384422302 22.500000000 -36.384418488 --31.458978653 22.500000000 -36.458976746 --30.488407135 22.500000000 -36.488403320 --29.497472763 22.500000000 -36.497467041 --28.499593735 22.500000000 -36.499591827 --27.499954224 22.500000000 -36.499954224 --26.499996185 22.500000000 -36.499996185 --25.500000000 22.500000000 -36.500000000 --24.500000000 22.500000000 -36.500000000 --23.500000000 22.500000000 -36.500000000 --22.500000000 22.500000000 -36.500000000 --21.500000000 22.500000000 -36.500000000 --20.500000000 22.500000000 -36.500000000 --19.500000000 22.500000000 -36.500000000 --18.500000000 22.500000000 -36.500000000 --17.500000000 22.500000000 -36.500000000 --16.500000000 22.500000000 -36.500000000 --15.500000000 22.500000000 -36.500000000 --14.500000000 22.500000000 -36.500000000 --13.500000000 22.500000000 -36.500000000 --12.500000000 22.500000000 -36.500000000 --11.500000000 22.500000000 -36.500000000 --10.500000000 22.500000000 -36.500000000 --9.500000000 22.500000000 -36.500000000 --8.500000000 22.500000000 -36.500000000 --7.500000000 22.500000000 -36.500000000 --6.500000000 22.500000000 -36.500000000 --5.500000000 22.500000000 -36.500000000 --4.500000000 22.500000000 -36.500000000 --3.500000000 22.500000000 -36.500000000 --2.500000000 22.500000000 -36.500000000 --1.500000000 22.500000000 -36.500000000 --0.500000000 22.500000000 -36.500000000 -0.500000000 22.500000000 -36.500000000 -1.500000000 22.500000000 -36.500000000 -2.500000000 22.500000000 -36.500000000 -3.500000000 22.500000000 -36.500000000 -4.500000000 22.500000000 -36.500000000 -5.500000000 22.500000000 -36.500000000 -6.500000000 22.500000000 -36.500000000 -7.500000000 22.500000000 -36.500000000 -8.500000000 22.500000000 -36.500000000 -9.500000000 22.500000000 -36.500000000 -10.500000000 22.500000000 -36.500000000 -11.500000000 22.500000000 -36.500000000 -12.500000000 22.500000000 -36.500000000 -13.500000000 22.500000000 -36.500000000 -14.500000000 22.500000000 -36.500000000 -15.500000000 22.500000000 -36.500000000 -16.500000000 22.500000000 -36.500000000 -17.500000000 22.500000000 -36.500000000 -18.500000000 22.500000000 -36.500000000 -19.500000000 22.500000000 -36.500000000 -20.500000000 22.500000000 -36.500000000 -21.500000000 22.500000000 -36.500000000 -22.500000000 22.500000000 -36.500000000 -23.500000000 22.500000000 -36.500000000 -24.500000000 22.500000000 -36.500000000 -25.499996185 22.500000000 -36.499996185 -26.499954224 22.500000000 -36.499954224 -27.499591827 22.500000000 -36.499591827 -28.497470856 22.500000000 -36.497474670 -29.488407135 22.500000000 -36.488403320 -30.458978653 22.500000000 -36.458980560 -31.384418488 22.500000000 -36.384422302 -32.233222961 22.500000000 -36.233226776 -32.981101990 22.500000000 -35.981109619 --33.981101990 23.499998093 -35.981101990 --33.233222961 23.500000000 -36.233222961 --32.384422302 23.500000000 -36.384418488 --31.458978653 23.500000000 -36.458976746 --30.488407135 23.500000000 -36.488403320 --29.497472763 23.500000000 -36.497467041 --28.499593735 23.500000000 -36.499591827 --27.499954224 23.500000000 -36.499954224 --26.499996185 23.500000000 -36.499996185 --25.500000000 23.500000000 -36.500000000 --24.500000000 23.500000000 -36.500000000 --23.500000000 23.500000000 -36.500000000 --22.500000000 23.500000000 -36.500000000 --21.500000000 23.500000000 -36.500000000 --20.500000000 23.500000000 -36.500000000 --19.500000000 23.500000000 -36.500000000 --18.500000000 23.500000000 -36.500000000 --17.500000000 23.500000000 -36.500000000 --16.500000000 23.500000000 -36.500000000 --15.500000000 23.500000000 -36.500000000 --14.500000000 23.500000000 -36.500000000 --13.500000000 23.500000000 -36.500000000 --12.500000000 23.500000000 -36.500000000 --11.500000000 23.500000000 -36.500000000 --10.500000000 23.500000000 -36.500000000 --9.500000000 23.500000000 -36.500000000 --8.500000000 23.500000000 -36.500000000 --7.500000000 23.500000000 -36.500000000 --6.500000000 23.500000000 -36.500000000 --5.500000000 23.500000000 -36.500000000 --4.500000000 23.500000000 -36.500000000 --3.500000000 23.500000000 -36.500000000 --2.500000000 23.500000000 -36.500000000 --1.500000000 23.500000000 -36.500000000 --0.500000000 23.500000000 -36.500000000 -0.500000000 23.500000000 -36.500000000 -1.500000000 23.500000000 -36.500000000 -2.500000000 23.500000000 -36.500000000 -3.500000000 23.500000000 -36.500000000 -4.500000000 23.500000000 -36.500000000 -5.500000000 23.500000000 -36.500000000 -6.500000000 23.500000000 -36.500000000 -7.500000000 23.500000000 -36.500000000 -8.500000000 23.500000000 -36.500000000 -9.500000000 23.500000000 -36.500000000 -10.500000000 23.500000000 -36.500000000 -11.500000000 23.500000000 -36.500000000 -12.500000000 23.500000000 -36.500000000 -13.500000000 23.500000000 -36.500000000 -14.500000000 23.500000000 -36.500000000 -15.500000000 23.500000000 -36.500000000 -16.500000000 23.500000000 -36.500000000 -17.500000000 23.500000000 -36.500000000 -18.500000000 23.500000000 -36.500000000 -19.500000000 23.500000000 -36.500000000 -20.500000000 23.500000000 -36.500000000 -21.500000000 23.500000000 -36.500000000 -22.500000000 23.500000000 -36.500000000 -23.500000000 23.500000000 -36.500000000 -24.500000000 23.500000000 -36.500000000 -25.499996185 23.500000000 -36.499996185 -26.499954224 23.500000000 -36.499954224 -27.499591827 23.500000000 -36.499591827 -28.497470856 23.500000000 -36.497474670 -29.488407135 23.500000000 -36.488403320 -30.458978653 23.500000000 -36.458980560 -31.384418488 23.500000000 -36.384422302 -32.233222961 23.500000000 -36.233222961 -32.981101990 23.499998093 -35.981105804 --33.981086731 24.499979019 -35.981090546 --33.233219147 24.499984741 -36.233207703 --32.384422302 24.499996185 -36.384407043 --31.458978653 24.500000000 -36.458972931 --30.488407135 24.500000000 -36.488403320 --29.497472763 24.500000000 -36.497467041 --28.499593735 24.500000000 -36.499591827 --27.499954224 24.500000000 -36.499954224 --26.499996185 24.500000000 -36.499996185 --25.500000000 24.500000000 -36.500000000 --24.500000000 24.500000000 -36.500000000 --23.500000000 24.500000000 -36.500000000 --22.500000000 24.500000000 -36.500000000 --21.500000000 24.500000000 -36.500000000 --20.500000000 24.500000000 -36.500000000 --19.500000000 24.500000000 -36.500000000 --18.500000000 24.500000000 -36.500000000 --17.500000000 24.500000000 -36.500000000 --16.500000000 24.500000000 -36.500000000 --15.500000000 24.500000000 -36.500000000 --14.500000000 24.500000000 -36.500000000 --13.500000000 24.500000000 -36.500000000 --12.500000000 24.500000000 -36.500000000 --11.500000000 24.500000000 -36.500000000 --10.500000000 24.500000000 -36.500000000 --9.500000000 24.500000000 -36.500000000 --8.500000000 24.500000000 -36.500000000 --7.500000000 24.500000000 -36.500000000 --6.500000000 24.500000000 -36.500000000 --5.500000000 24.500000000 -36.500000000 --4.500000000 24.500000000 -36.500000000 --3.500000000 24.500000000 -36.500000000 --2.500000000 24.500000000 -36.500000000 --1.500000000 24.500000000 -36.500000000 --0.500000000 24.500000000 -36.500000000 -0.500000000 24.500000000 -36.500000000 -1.500000000 24.500000000 -36.500000000 -2.500000000 24.500000000 -36.500000000 -3.500000000 24.500000000 -36.500000000 -4.500000000 24.500000000 -36.500000000 -5.500000000 24.500000000 -36.500000000 -6.500000000 24.500000000 -36.500000000 -7.500000000 24.500000000 -36.500000000 -8.500000000 24.500000000 -36.500000000 -9.500000000 24.500000000 -36.500000000 -10.500000000 24.500000000 -36.500000000 -11.500000000 24.500000000 -36.500000000 -12.500000000 24.500000000 -36.500000000 -13.500000000 24.500000000 -36.500000000 -14.500000000 24.500000000 -36.500000000 -15.500000000 24.500000000 -36.500000000 -16.500000000 24.500000000 -36.500000000 -17.500000000 24.500000000 -36.500000000 -18.500000000 24.500000000 -36.500000000 -19.500000000 24.500000000 -36.500000000 -20.500000000 24.500000000 -36.500000000 -21.500000000 24.500000000 -36.500000000 -22.500000000 24.500000000 -36.500000000 -23.500000000 24.500000000 -36.500000000 -24.500000000 24.500000000 -36.500000000 -25.499996185 24.500000000 -36.499996185 -26.499954224 24.500000000 -36.499954224 -27.499591827 24.500000000 -36.499591827 -28.497470856 24.500000000 -36.497474670 -29.488407135 24.500000000 -36.488403320 -30.458978653 24.500000000 -36.458976746 -31.384418488 24.499996185 -36.384407043 -32.233219147 24.499988556 -36.233207703 -32.981090546 24.499979019 -35.981090546 --33.980976105 25.499826431 -35.980960846 --33.233169556 25.499874115 -36.233074188 --32.384407043 25.499948502 -36.384307861 --31.458978653 25.499988556 -36.458930969 --30.488407135 25.499996185 -36.488388062 --29.497472763 25.499996185 -36.497467041 --28.499593735 25.499996185 -36.499584198 --27.499954224 25.499996185 -36.499950409 --26.499996185 25.499996185 -36.499992371 --25.500000000 25.499996185 -36.499996185 --24.500000000 25.499996185 -36.499996185 --23.500000000 25.499996185 -36.499996185 --22.500000000 25.499996185 -36.499996185 --21.500000000 25.499996185 -36.499996185 --20.500000000 25.499996185 -36.499996185 --19.500000000 25.499996185 -36.499996185 --18.500000000 25.499996185 -36.499996185 --17.500000000 25.499996185 -36.499996185 --16.500000000 25.499996185 -36.499996185 --15.500000000 25.499996185 -36.499996185 --14.500000000 25.499996185 -36.499996185 --13.500000000 25.499996185 -36.499996185 --12.500000000 25.499996185 -36.499996185 --11.500000000 25.499996185 -36.499996185 --10.500000000 25.499996185 -36.499996185 --9.500000000 25.499996185 -36.499996185 --8.500000000 25.499996185 -36.499996185 --7.500000000 25.499996185 -36.499996185 --6.500000000 25.499996185 -36.499996185 --5.500000000 25.499996185 -36.499996185 --4.500000000 25.499996185 -36.499996185 --3.500000000 25.499996185 -36.499996185 --2.500000000 25.499996185 -36.499996185 --1.500000000 25.499996185 -36.499996185 --0.500000000 25.499996185 -36.499996185 -0.500000000 25.499996185 -36.499996185 -1.500000000 25.499996185 -36.499996185 -2.500000000 25.499996185 -36.499996185 -3.500000000 25.499996185 -36.499996185 -4.500000000 25.499996185 -36.499996185 -5.500000000 25.499996185 -36.499996185 -6.500000000 25.499996185 -36.499996185 -7.500000000 25.499996185 -36.499996185 -8.500000000 25.499996185 -36.499996185 -9.500000000 25.499996185 -36.499996185 -10.500000000 25.499996185 -36.499996185 -11.500000000 25.499996185 -36.499996185 -12.500000000 25.499996185 -36.499996185 -13.500000000 25.499996185 -36.499996185 -14.500000000 25.499996185 -36.499996185 -15.500000000 25.499996185 -36.499996185 -16.500000000 25.499996185 -36.499996185 -17.500000000 25.499996185 -36.499996185 -18.500000000 25.499996185 -36.499996185 -19.500000000 25.499996185 -36.499996185 -20.500000000 25.499996185 -36.499996185 -21.500000000 25.499996185 -36.499996185 -22.500000000 25.499996185 -36.499996185 -23.500000000 25.499996185 -36.499996185 -24.500000000 25.499996185 -36.499996185 -25.499996185 25.499996185 -36.499992371 -26.499954224 25.499996185 -36.499950409 -27.499591827 25.499996185 -36.499588013 -28.497470856 25.499996185 -36.497467041 -29.488407135 25.499996185 -36.488388062 -30.458974838 25.499988556 -36.458934784 -31.384405136 25.499948502 -36.384307861 -32.233169556 25.499874115 -36.233074188 -32.980976105 25.499824524 -35.980957031 --33.980327606 26.498950958 -35.980201721 --33.232860565 26.499225616 -36.232303619 --32.384296417 26.499618530 -36.383720398 --31.458948135 26.499858856 -36.458606720 --30.488397598 26.499938965 -36.488258362 --29.497472763 26.499954224 -36.497406006 --28.499593735 26.499954224 -36.499542236 --27.499954224 26.499954224 -36.499908447 --26.499996185 26.499954224 -36.499950409 --25.500000000 26.499954224 -36.499950409 --24.500000000 26.499954224 -36.499950409 --23.500000000 26.499954224 -36.499950409 --22.500000000 26.499954224 -36.499950409 --21.500000000 26.499954224 -36.499950409 --20.500000000 26.499954224 -36.499950409 --19.500000000 26.499954224 -36.499950409 --18.500000000 26.499954224 -36.499950409 --17.500000000 26.499954224 -36.499950409 --16.500000000 26.499954224 -36.499950409 --15.500000000 26.499954224 -36.499950409 --14.500000000 26.499954224 -36.499950409 --13.500000000 26.499954224 -36.499950409 --12.500000000 26.499954224 -36.499950409 --11.500000000 26.499954224 -36.499950409 --10.500000000 26.499954224 -36.499950409 --9.500000000 26.499954224 -36.499950409 --8.500000000 26.499954224 -36.499950409 --7.500000000 26.499954224 -36.499950409 --6.500000000 26.499954224 -36.499950409 --5.500000000 26.499954224 -36.499950409 --4.500000000 26.499954224 -36.499950409 --3.500000000 26.499954224 -36.499950409 --2.500000000 26.499954224 -36.499950409 --1.500000000 26.499954224 -36.499950409 --0.500000000 26.499954224 -36.499950409 -0.500000000 26.499954224 -36.499950409 -1.500000000 26.499954224 -36.499950409 -2.500000000 26.499954224 -36.499950409 -3.500000000 26.499954224 -36.499950409 -4.500000000 26.499954224 -36.499950409 -5.500000000 26.499954224 -36.499950409 -6.500000000 26.499954224 -36.499950409 -7.500000000 26.499954224 -36.499950409 -8.500000000 26.499954224 -36.499950409 -9.500000000 26.499954224 -36.499950409 -10.500000000 26.499954224 -36.499950409 -11.500000000 26.499954224 -36.499950409 -12.500000000 26.499954224 -36.499950409 -13.500000000 26.499954224 -36.499950409 -14.500000000 26.499954224 -36.499950409 -15.500000000 26.499954224 -36.499950409 -16.500000000 26.499954224 -36.499950409 -17.500000000 26.499954224 -36.499950409 -18.500000000 26.499954224 -36.499950409 -19.500000000 26.499954224 -36.499950409 -20.500000000 26.499954224 -36.499950409 -21.500000000 26.499954224 -36.499950409 -22.500000000 26.499954224 -36.499950409 -23.500000000 26.499954224 -36.499950409 -24.500000000 26.499954224 -36.499950409 -25.499996185 26.499954224 -36.499950409 -26.499954224 26.499954224 -36.499908447 -27.499591827 26.499954224 -36.499542236 -28.497470856 26.499954224 -36.497406006 -29.488399506 26.499938965 -36.488258362 -30.458948135 26.499858856 -36.458606720 -31.384298325 26.499622345 -36.383720398 -32.232864380 26.499225616 -36.232303619 -32.980335236 26.498950958 -35.980201721 --33.977615356 27.495168686 -35.976860046 --33.231597900 27.496377945 -36.228958130 --32.383811951 27.498052597 -36.381027222 --31.458766937 27.499073029 -36.456939697 --30.488346100 27.499475479 -36.487373352 --29.497461319 27.499576569 -36.496910095 --28.499593735 27.499591827 -36.499160767 --27.499954224 27.499591827 -36.499542236 --26.499996185 27.499591827 -36.499584198 --25.500000000 27.499591827 -36.499591827 --24.500000000 27.499591827 -36.499591827 --23.500000000 27.499591827 -36.499591827 --22.500000000 27.499591827 -36.499591827 --21.500000000 27.499591827 -36.499591827 --20.500000000 27.499591827 -36.499591827 --19.500000000 27.499591827 -36.499591827 --18.500000000 27.499591827 -36.499591827 --17.500000000 27.499591827 -36.499591827 --16.500000000 27.499591827 -36.499591827 --15.500000000 27.499591827 -36.499591827 --14.500000000 27.499591827 -36.499591827 --13.500000000 27.499591827 -36.499591827 --12.500000000 27.499591827 -36.499591827 --11.500000000 27.499591827 -36.499591827 --10.500000000 27.499591827 -36.499591827 --9.500000000 27.499591827 -36.499591827 --8.500000000 27.499591827 -36.499591827 --7.500000000 27.499591827 -36.499591827 --6.500000000 27.499591827 -36.499591827 --5.500000000 27.499591827 -36.499591827 --4.500000000 27.499591827 -36.499591827 --3.500000000 27.499591827 -36.499591827 --2.500000000 27.499591827 -36.499591827 --1.500000000 27.499591827 -36.499591827 --0.500000000 27.499591827 -36.499591827 -0.500000000 27.499591827 -36.499591827 -1.500000000 27.499591827 -36.499591827 -2.500000000 27.499591827 -36.499591827 -3.500000000 27.499591827 -36.499591827 -4.500000000 27.499591827 -36.499591827 -5.500000000 27.499591827 -36.499591827 -6.500000000 27.499591827 -36.499591827 -7.500000000 27.499591827 -36.499591827 -8.500000000 27.499591827 -36.499591827 -9.500000000 27.499591827 -36.499591827 -10.500000000 27.499591827 -36.499591827 -11.500000000 27.499591827 -36.499591827 -12.500000000 27.499591827 -36.499591827 -13.500000000 27.499591827 -36.499591827 -14.500000000 27.499591827 -36.499591827 -15.500000000 27.499591827 -36.499591827 -16.500000000 27.499591827 -36.499591827 -17.500000000 27.499591827 -36.499591827 -18.500000000 27.499591827 -36.499591827 -19.500000000 27.499591827 -36.499591827 -20.500000000 27.499591827 -36.499591827 -21.500000000 27.499591827 -36.499591827 -22.500000000 27.499591827 -36.499591827 -23.500000000 27.499591827 -36.499591827 -24.500000000 27.499591827 -36.499591827 -25.499996185 27.499591827 -36.499584198 -26.499954224 27.499591827 -36.499542236 -27.499591827 27.499591827 -36.499160767 -28.497457504 27.499576569 -36.496910095 -29.488346100 27.499475479 -36.487373352 -30.458766937 27.499073029 -36.456939697 -31.383813858 27.498052597 -36.381027222 -32.231605530 27.496377945 -36.228958130 -32.977619171 27.495168686 -35.976860046 --33.968864441 28.481933594 -35.965766907 --33.227874756 28.486719131 -36.217517853 --32.382312775 28.492321014 -36.371490479 --31.458078384 28.495611191 -36.450428009 --30.488048553 28.496957779 -36.483337402 --29.497375488 28.497371674 -36.494178772 --28.499578476 28.497461319 -36.496910095 --27.499954224 28.497470856 -36.497406006 --26.499996185 28.497470856 -36.497467041 --25.500000000 28.497470856 -36.497467041 --24.500000000 28.497470856 -36.497467041 --23.500000000 28.497470856 -36.497467041 --22.500000000 28.497470856 -36.497467041 --21.500000000 28.497470856 -36.497467041 --20.500000000 28.497470856 -36.497467041 --19.500000000 28.497470856 -36.497467041 --18.500000000 28.497470856 -36.497467041 --17.500000000 28.497470856 -36.497467041 --16.500000000 28.497470856 -36.497467041 --15.500000000 28.497470856 -36.497467041 --14.500000000 28.497470856 -36.497467041 --13.500000000 28.497470856 -36.497467041 --12.500000000 28.497470856 -36.497467041 --11.500000000 28.497470856 -36.497467041 --10.500000000 28.497470856 -36.497467041 --9.500000000 28.497470856 -36.497467041 --8.500000000 28.497470856 -36.497467041 --7.500000000 28.497470856 -36.497467041 --6.500000000 28.497470856 -36.497467041 --5.500000000 28.497470856 -36.497467041 --4.500000000 28.497470856 -36.497467041 --3.500000000 28.497470856 -36.497467041 --2.500000000 28.497470856 -36.497467041 --1.500000000 28.497470856 -36.497467041 --0.500000000 28.497470856 -36.497467041 -0.500000000 28.497470856 -36.497467041 -1.500000000 28.497470856 -36.497467041 -2.500000000 28.497470856 -36.497467041 -3.500000000 28.497470856 -36.497467041 -4.500000000 28.497470856 -36.497467041 -5.500000000 28.497470856 -36.497467041 -6.500000000 28.497470856 -36.497467041 -7.500000000 28.497470856 -36.497467041 -8.500000000 28.497470856 -36.497467041 -9.500000000 28.497470856 -36.497467041 -10.500000000 28.497470856 -36.497467041 -11.500000000 28.497470856 -36.497467041 -12.500000000 28.497470856 -36.497467041 -13.500000000 28.497470856 -36.497467041 -14.500000000 28.497470856 -36.497467041 -15.500000000 28.497470856 -36.497467041 -16.500000000 28.497470856 -36.497467041 -17.500000000 28.497470856 -36.497467041 -18.500000000 28.497470856 -36.497467041 -19.500000000 28.497470856 -36.497467041 -20.500000000 28.497470856 -36.497467041 -21.500000000 28.497470856 -36.497467041 -22.500000000 28.497470856 -36.497467041 -23.500000000 28.497470856 -36.497467041 -24.500000000 28.497470856 -36.497467041 -25.499996185 28.497470856 -36.497467041 -26.499954224 28.497470856 -36.497406006 -27.499576569 28.497457504 -36.496910095 -28.497375488 28.497371674 -36.494178772 -29.488048553 28.496957779 -36.483337402 -30.458078384 28.495609283 -36.450428009 -31.382312775 28.492321014 -36.371490479 -32.227874756 28.486719131 -36.217510223 -32.968864441 28.481933594 -35.965766907 --33.946815491 29.442840576 -35.937091827 --33.220714569 29.460592270 -36.185966492 --32.379222870 29.476003647 -36.344280243 --31.456085205 29.483789444 -36.430480957 --30.486968994 29.486965179 -36.469230652 --29.496959686 29.488052368 -36.483337402 --28.499475479 29.488346100 -36.487373352 --27.499938965 29.488399506 -36.488258362 --26.499996185 29.488407135 -36.488391876 --25.500000000 29.488407135 -36.488403320 --24.500000000 29.488407135 -36.488403320 --23.500000000 29.488407135 -36.488403320 --22.500000000 29.488407135 -36.488403320 --21.500000000 29.488407135 -36.488403320 --20.500000000 29.488407135 -36.488403320 --19.500000000 29.488407135 -36.488403320 --18.500000000 29.488407135 -36.488403320 --17.500000000 29.488407135 -36.488403320 --16.500000000 29.488407135 -36.488403320 --15.500000000 29.488407135 -36.488403320 --14.500000000 29.488407135 -36.488403320 --13.500000000 29.488407135 -36.488403320 --12.500000000 29.488407135 -36.488403320 --11.500000000 29.488407135 -36.488403320 --10.500000000 29.488407135 -36.488403320 --9.500000000 29.488407135 -36.488403320 --8.500000000 29.488407135 -36.488403320 --7.500000000 29.488407135 -36.488403320 --6.500000000 29.488407135 -36.488403320 --5.500000000 29.488407135 -36.488403320 --4.500000000 29.488407135 -36.488403320 --3.500000000 29.488407135 -36.488403320 --2.500000000 29.488407135 -36.488403320 --1.500000000 29.488407135 -36.488403320 --0.500000000 29.488407135 -36.488403320 -0.500000000 29.488407135 -36.488403320 -1.500000000 29.488407135 -36.488403320 -2.500000000 29.488407135 -36.488403320 -3.500000000 29.488407135 -36.488403320 -4.500000000 29.488407135 -36.488403320 -5.500000000 29.488407135 -36.488403320 -6.500000000 29.488407135 -36.488403320 -7.500000000 29.488407135 -36.488403320 -8.500000000 29.488407135 -36.488403320 -9.500000000 29.488407135 -36.488403320 -10.500000000 29.488407135 -36.488403320 -11.500000000 29.488407135 -36.488403320 -12.500000000 29.488407135 -36.488403320 -13.500000000 29.488407135 -36.488403320 -14.500000000 29.488407135 -36.488403320 -15.500000000 29.488407135 -36.488403320 -16.500000000 29.488407135 -36.488403320 -17.500000000 29.488407135 -36.488403320 -18.500000000 29.488407135 -36.488403320 -19.500000000 29.488407135 -36.488403320 -20.500000000 29.488407135 -36.488403320 -21.500000000 29.488407135 -36.488403320 -22.500000000 29.488407135 -36.488403320 -23.500000000 29.488407135 -36.488403320 -24.500000000 29.488407135 -36.488403320 -25.499996185 29.488407135 -36.488391876 -26.499938965 29.488399506 -36.488258362 -27.499475479 29.488348007 -36.487377167 -28.496957779 29.488052368 -36.483337402 -29.486968994 29.486968994 -36.469238281 -30.456085205 29.483785629 -36.430473328 -31.379222870 29.476003647 -36.344280243 -32.220718384 29.460596085 -36.185966492 -32.946807861 29.442840576 -35.937095642 --33.903377533 30.336599350 -35.879737854 --33.216842651 30.405670166 -36.112342834 --32.375358582 30.438953400 -36.280395508 --31.451217651 30.451217651 -36.380989075 --30.483789444 30.456085205 -36.430473328 --29.495611191 30.458078384 -36.450424194 --28.499073029 30.458770752 -36.456939697 --27.499858856 30.458950043 -36.458606720 --26.499988556 30.458980560 -36.458930969 --25.500000000 30.458980560 -36.458976746 --24.500000000 30.458980560 -36.458980560 --23.500000000 30.458980560 -36.458980560 --22.500000000 30.458980560 -36.458980560 --21.500000000 30.458980560 -36.458980560 --20.500000000 30.458980560 -36.458980560 --19.500000000 30.458980560 -36.458980560 --18.500000000 30.458980560 -36.458980560 --17.500000000 30.458980560 -36.458980560 --16.500000000 30.458980560 -36.458980560 --15.500000000 30.458980560 -36.458980560 --14.500000000 30.458980560 -36.458980560 --13.500000000 30.458980560 -36.458980560 --12.500000000 30.458980560 -36.458980560 --11.500000000 30.458980560 -36.458980560 --10.500000000 30.458980560 -36.458980560 --9.500000000 30.458980560 -36.458980560 --8.500000000 30.458980560 -36.458980560 --7.500000000 30.458980560 -36.458980560 --6.500000000 30.458980560 -36.458980560 --5.500000000 30.458980560 -36.458980560 --4.500000000 30.458980560 -36.458980560 --3.500000000 30.458980560 -36.458980560 --2.500000000 30.458980560 -36.458980560 --1.500000000 30.458980560 -36.458980560 --0.500000000 30.458980560 -36.458980560 -0.500000000 30.458980560 -36.458980560 -1.500000000 30.458980560 -36.458980560 -2.500000000 30.458980560 -36.458980560 -3.500000000 30.458980560 -36.458980560 -4.500000000 30.458980560 -36.458980560 -5.500000000 30.458980560 -36.458980560 -6.500000000 30.458980560 -36.458980560 -7.500000000 30.458980560 -36.458980560 -8.500000000 30.458980560 -36.458980560 -9.500000000 30.458980560 -36.458980560 -10.500000000 30.458980560 -36.458980560 -11.500000000 30.458980560 -36.458980560 -12.500000000 30.458980560 -36.458980560 -13.500000000 30.458980560 -36.458980560 -14.500000000 30.458980560 -36.458980560 -15.500000000 30.458980560 -36.458980560 -16.500000000 30.458980560 -36.458980560 -17.500000000 30.458980560 -36.458980560 -18.500000000 30.458980560 -36.458980560 -19.500000000 30.458980560 -36.458980560 -20.500000000 30.458980560 -36.458980560 -21.500000000 30.458980560 -36.458980560 -22.500000000 30.458980560 -36.458980560 -23.500000000 30.458980560 -36.458980560 -24.500000000 30.458980560 -36.458976746 -25.499988556 30.458978653 -36.458934784 -26.499858856 30.458948135 -36.458606720 -27.499073029 30.458770752 -36.456939697 -28.495611191 30.458080292 -36.450428009 -29.483789444 30.456085205 -36.430473328 -30.451217651 30.451217651 -36.380989075 -31.375360489 30.438953400 -36.280395508 -32.216842651 30.405673981 -36.112346649 -32.903381348 30.336599350 -35.879741669 --33.840934753 31.035345078 -35.797355652 --33.254272461 31.334243774 -35.954723358 --32.371025085 31.371026993 -36.154674530 --31.438953400 31.375360489 -36.280391693 --30.476007462 31.379222870 -36.344280243 --29.492321014 31.382312775 -36.371486664 --28.498050690 31.383813858 -36.381027222 --27.499618530 31.384296417 -36.383720398 --26.499948502 31.384403229 -36.384307861 --25.499996185 31.384418488 -36.384407043 --24.500000000 31.384418488 -36.384418488 --23.500000000 31.384418488 -36.384422302 --22.500000000 31.384418488 -36.384422302 --21.500000000 31.384418488 -36.384422302 --20.500000000 31.384418488 -36.384422302 --19.500000000 31.384418488 -36.384422302 --18.500000000 31.384418488 -36.384422302 --17.500000000 31.384418488 -36.384422302 --16.500000000 31.384418488 -36.384422302 --15.500000000 31.384418488 -36.384422302 --14.500000000 31.384418488 -36.384422302 --13.500000000 31.384418488 -36.384422302 --12.500000000 31.384418488 -36.384422302 --11.500000000 31.384418488 -36.384422302 --10.500000000 31.384418488 -36.384422302 --9.500000000 31.384418488 -36.384422302 --8.500000000 31.384418488 -36.384422302 --7.500000000 31.384418488 -36.384422302 --6.500000000 31.384418488 -36.384422302 --5.500000000 31.384418488 -36.384422302 --4.500000000 31.384418488 -36.384422302 --3.500000000 31.384418488 -36.384422302 --2.500000000 31.384418488 -36.384422302 --1.500000000 31.384418488 -36.384422302 --0.500000000 31.384418488 -36.384422302 -0.500000000 31.384418488 -36.384422302 -1.500000000 31.384418488 -36.384422302 -2.500000000 31.384418488 -36.384422302 -3.500000000 31.384418488 -36.384422302 -4.500000000 31.384418488 -36.384422302 -5.500000000 31.384418488 -36.384422302 -6.500000000 31.384418488 -36.384422302 -7.500000000 31.384418488 -36.384422302 -8.500000000 31.384418488 -36.384422302 -9.500000000 31.384418488 -36.384422302 -10.500000000 31.384418488 -36.384422302 -11.500000000 31.384418488 -36.384422302 -12.500000000 31.384418488 -36.384422302 -13.500000000 31.384418488 -36.384422302 -14.500000000 31.384418488 -36.384422302 -15.500000000 31.384418488 -36.384422302 -16.500000000 31.384418488 -36.384422302 -17.500000000 31.384418488 -36.384422302 -18.500000000 31.384418488 -36.384422302 -19.500000000 31.384418488 -36.384422302 -20.500000000 31.384418488 -36.384422302 -21.500000000 31.384418488 -36.384422302 -22.500000000 31.384418488 -36.384422302 -23.500000000 31.384418488 -36.384418488 -24.499996185 31.384418488 -36.384407043 -25.499948502 31.384405136 -36.384315491 -26.499622345 31.384296417 -36.383720398 -27.498052597 31.383813858 -36.381027222 -28.492321014 31.382312775 -36.371486664 -29.476007462 31.379222870 -36.344280243 -30.438953400 31.375360489 -36.280395508 -31.371026993 31.371028900 -36.154674530 -32.254276276 31.334243774 -35.954723358 -32.840934753 31.035345078 -35.797359467 --33.030693054 32.030693054 -35.846313477 --32.334243774 32.254276276 -35.954723358 --31.405673981 32.216838837 -36.112346649 --30.460596085 32.220714569 -36.185966492 --29.486719131 32.227874756 -36.217510223 --28.496377945 32.231605530 -36.228954315 --27.499221802 32.232860565 -36.232307434 --26.499872208 32.233165741 -36.233074188 --25.499986649 32.233219147 -36.233207703 --24.499998093 32.233222961 -36.233222961 --23.500000000 32.233222961 -36.233226776 --22.500000000 32.233222961 -36.233226776 --21.500000000 32.233222961 -36.233226776 --20.500000000 32.233222961 -36.233226776 --19.500000000 32.233222961 -36.233226776 --18.500000000 32.233222961 -36.233226776 --17.500000000 32.233222961 -36.233226776 --16.500000000 32.233222961 -36.233226776 --15.500000000 32.233222961 -36.233226776 --14.500000000 32.233222961 -36.233226776 --13.500000000 32.233222961 -36.233226776 --12.500000000 32.233222961 -36.233226776 --11.500000000 32.233222961 -36.233226776 --10.500000000 32.233222961 -36.233226776 --9.500000000 32.233222961 -36.233226776 --8.500000000 32.233222961 -36.233226776 --7.500000000 32.233222961 -36.233226776 --6.500000000 32.233222961 -36.233226776 --5.500000000 32.233222961 -36.233226776 --4.500000000 32.233222961 -36.233226776 --3.500000000 32.233222961 -36.233226776 --2.500000000 32.233222961 -36.233226776 --1.500000000 32.233222961 -36.233226776 --0.500000000 32.233222961 -36.233226776 -0.500000000 32.233222961 -36.233226776 -1.500000000 32.233222961 -36.233226776 -2.500000000 32.233222961 -36.233226776 -3.500000000 32.233222961 -36.233226776 -4.500000000 32.233222961 -36.233226776 -5.500000000 32.233222961 -36.233226776 -6.500000000 32.233222961 -36.233226776 -7.500000000 32.233222961 -36.233226776 -8.500000000 32.233222961 -36.233226776 -9.500000000 32.233222961 -36.233226776 -10.500000000 32.233222961 -36.233226776 -11.500000000 32.233222961 -36.233226776 -12.500000000 32.233222961 -36.233226776 -13.500000000 32.233222961 -36.233226776 -14.500000000 32.233222961 -36.233226776 -15.500000000 32.233222961 -36.233226776 -16.500000000 32.233222961 -36.233226776 -17.500000000 32.233222961 -36.233226776 -18.500000000 32.233222961 -36.233226776 -19.500000000 32.233222961 -36.233226776 -20.500000000 32.233222961 -36.233226776 -21.500000000 32.233222961 -36.233226776 -22.500000000 32.233222961 -36.233226776 -23.499998093 32.233222961 -36.233222961 -24.499986649 32.233219147 -36.233207703 -25.499872208 32.233165741 -36.233074188 -26.499225616 32.232860565 -36.232303619 -27.496377945 32.231597900 -36.228954315 -28.486719131 32.227874756 -36.217510223 -29.460592270 32.220714569 -36.185966492 -30.405673981 32.216838837 -36.112342834 -31.334241867 32.254276276 -35.954723358 -32.030696869 32.030693054 -35.846317291 --32.035343170 32.840930939 -35.797355652 --31.336603165 32.903373718 -35.879737854 --30.442844391 32.946807861 -35.937095642 --29.481937408 32.968864441 -35.965759277 --28.495168686 32.977615356 -35.976863861 --27.498950958 32.980335236 -35.980205536 --26.499826431 32.980979919 -35.980964661 --25.499979019 32.981090546 -35.981090546 --24.499998093 32.981105804 -35.981105804 --23.500000000 32.981105804 -35.981105804 --22.500000000 32.981105804 -35.981105804 --21.500000000 32.981105804 -35.981105804 --20.500000000 32.981105804 -35.981105804 --19.500000000 32.981105804 -35.981105804 --18.500000000 32.981105804 -35.981105804 --17.500000000 32.981105804 -35.981105804 --16.500000000 32.981105804 -35.981105804 --15.500000000 32.981105804 -35.981105804 --14.500000000 32.981105804 -35.981105804 --13.500000000 32.981105804 -35.981105804 --12.500000000 32.981105804 -35.981105804 --11.500000000 32.981105804 -35.981105804 --10.500000000 32.981105804 -35.981105804 --9.500000000 32.981105804 -35.981105804 --8.500000000 32.981105804 -35.981105804 --7.500000000 32.981105804 -35.981105804 --6.500000000 32.981105804 -35.981105804 --5.500000000 32.981105804 -35.981105804 --4.500000000 32.981105804 -35.981105804 --3.500000000 32.981105804 -35.981105804 --2.500000000 32.981105804 -35.981105804 --1.500000000 32.981105804 -35.981105804 --0.500000000 32.981105804 -35.981105804 -0.500000000 32.981105804 -35.981105804 -1.500000000 32.981105804 -35.981105804 -2.500000000 32.981105804 -35.981105804 -3.500000000 32.981105804 -35.981105804 -4.500000000 32.981105804 -35.981105804 -5.500000000 32.981105804 -35.981105804 -6.500000000 32.981105804 -35.981105804 -7.500000000 32.981105804 -35.981105804 -8.500000000 32.981105804 -35.981105804 -9.500000000 32.981105804 -35.981105804 -10.500000000 32.981105804 -35.981105804 -11.500000000 32.981105804 -35.981105804 -12.500000000 32.981105804 -35.981105804 -13.500000000 32.981105804 -35.981105804 -14.500000000 32.981105804 -35.981105804 -15.500000000 32.981105804 -35.981105804 -16.500000000 32.981105804 -35.981105804 -17.500000000 32.981105804 -35.981105804 -18.500000000 32.981105804 -35.981105804 -19.500000000 32.981105804 -35.981105804 -20.500000000 32.981105804 -35.981105804 -21.500000000 32.981105804 -35.981105804 -22.500000000 32.981105804 -35.981105804 -23.499998093 32.981105804 -35.981105804 -24.499979019 32.981090546 -35.981090546 -25.499826431 32.980979919 -35.980960846 -26.498950958 32.980335236 -35.980201721 -27.495168686 32.977615356 -35.976860046 -28.481937408 32.968864441 -35.965766907 -29.442840576 32.946807861 -35.937091827 -30.336603165 32.903373718 -35.879737854 -31.035345078 32.840930939 -35.797355652 --33.006500244 -34.772159576 -34.772163391 --32.313602448 -34.912067413 -34.912067413 --31.413049698 -35.040893555 -35.040897369 --30.463253021 -35.115642548 -35.115646362 --29.487289429 -35.150257111 -35.150253296 --28.496557236 -35.163158417 -35.163158417 --27.499298096 -35.166961670 -35.166954041 --26.499902725 -35.167808533 -35.167808533 --25.499990463 -35.167949677 -35.167949677 --24.500000000 -35.167961121 -35.167961121 --23.500000000 -35.167968750 -35.167968750 --22.500000000 -35.167968750 -35.167968750 --21.500000000 -35.167968750 -35.167968750 --20.500000000 -35.167968750 -35.167968750 --19.500000000 -35.167968750 -35.167968750 --18.500000000 -35.167968750 -35.167968750 --17.500000000 -35.167968750 -35.167968750 --16.500000000 -35.167968750 -35.167968750 --15.500000000 -35.167968750 -35.167968750 --14.500000000 -35.167968750 -35.167968750 --13.500000000 -35.167968750 -35.167968750 --12.500000000 -35.167968750 -35.167968750 --11.500000000 -35.167968750 -35.167968750 --10.500000000 -35.167968750 -35.167968750 --9.500000000 -35.167968750 -35.167968750 --8.500000000 -35.167968750 -35.167968750 --7.500000000 -35.167968750 -35.167968750 --6.500000000 -35.167968750 -35.167968750 --5.500000000 -35.167968750 -35.167968750 --4.500000000 -35.167968750 -35.167968750 --3.500000000 -35.167968750 -35.167968750 --2.500000000 -35.167968750 -35.167968750 --1.500000000 -35.167968750 -35.167968750 --0.500000000 -35.167968750 -35.167968750 -0.500000000 -35.167968750 -35.167968750 -1.500000000 -35.167968750 -35.167968750 -2.500000000 -35.167968750 -35.167968750 -3.500000000 -35.167968750 -35.167968750 -4.500000000 -35.167968750 -35.167968750 -5.500000000 -35.167968750 -35.167968750 -6.500000000 -35.167968750 -35.167968750 -7.500000000 -35.167968750 -35.167968750 -8.500000000 -35.167968750 -35.167968750 -9.500000000 -35.167968750 -35.167968750 -10.500000000 -35.167968750 -35.167968750 -11.500000000 -35.167968750 -35.167968750 -12.500000000 -35.167968750 -35.167968750 -13.500000000 -35.167968750 -35.167968750 -14.500000000 -35.167968750 -35.167968750 -15.500000000 -35.167968750 -35.167968750 -16.500000000 -35.167968750 -35.167968750 -17.500000000 -35.167968750 -35.167968750 -18.500000000 -35.167968750 -35.167968750 -19.500000000 -35.167968750 -35.167968750 -20.500000000 -35.167968750 -35.167968750 -21.500000000 -35.167968750 -35.167968750 -22.500000000 -35.167968750 -35.167968750 -23.500000000 -35.167968750 -35.167968750 -24.499992371 -35.167953491 -35.167949677 -25.499898911 -35.167808533 -35.167804718 -26.499298096 -35.166961670 -35.166957855 -27.496557236 -35.163158417 -35.163158417 -28.487289429 -35.150249481 -35.150253296 -29.463253021 -35.115642548 -35.115642548 -30.413049698 -35.040897369 -35.040897369 -31.313602448 -34.912067413 -34.912059784 -32.006500244 -34.772163391 -34.772159576 --33.980762482 -33.980758667 -34.842590332 --33.316951752 -34.231868744 -35.057430267 --32.380741119 -34.260547638 -35.428535461 --31.410833359 -34.477191925 -35.518615723 --30.459178925 -34.567749023 -35.577739716 --29.485630035 -34.604709625 -35.607204437 --28.496026993 -34.617927551 -35.618602753 --27.499156952 -34.621795654 -35.621986389 --26.499868393 -34.622692108 -35.622741699 --25.499986649 -34.622852325 -35.622856140 --24.500000000 -34.622871399 -35.622871399 --23.500000000 -34.622871399 -35.622871399 --22.500000000 -34.622871399 -35.622871399 --21.500000000 -34.622871399 -35.622871399 --20.500000000 -34.622871399 -35.622871399 --19.500000000 -34.622871399 -35.622871399 --18.500000000 -34.622871399 -35.622871399 --17.500000000 -34.622871399 -35.622871399 --16.500000000 -34.622871399 -35.622871399 --15.500000000 -34.622871399 -35.622871399 --14.500000000 -34.622871399 -35.622871399 --13.500000000 -34.622871399 -35.622871399 --12.500000000 -34.622871399 -35.622871399 --11.500000000 -34.622871399 -35.622871399 --10.500000000 -34.622871399 -35.622871399 --9.500000000 -34.622871399 -35.622871399 --8.500000000 -34.622871399 -35.622871399 --7.500000000 -34.622871399 -35.622871399 --6.500000000 -34.622871399 -35.622871399 --5.500000000 -34.622871399 -35.622871399 --4.500000000 -34.622871399 -35.622871399 --3.500000000 -34.622871399 -35.622871399 --2.500000000 -34.622871399 -35.622871399 --1.500000000 -34.622871399 -35.622871399 --0.500000000 -34.622871399 -35.622871399 -0.500000000 -34.622871399 -35.622871399 -1.500000000 -34.622871399 -35.622871399 -2.500000000 -34.622871399 -35.622871399 -3.500000000 -34.622871399 -35.622871399 -4.500000000 -34.622871399 -35.622871399 -5.500000000 -34.622871399 -35.622871399 -6.500000000 -34.622871399 -35.622871399 -7.500000000 -34.622871399 -35.622871399 -8.500000000 -34.622871399 -35.622871399 -9.500000000 -34.622871399 -35.622871399 -10.500000000 -34.622871399 -35.622871399 -11.500000000 -34.622871399 -35.622871399 -12.500000000 -34.622871399 -35.622871399 -13.500000000 -34.622871399 -35.622871399 -14.500000000 -34.622871399 -35.622871399 -15.500000000 -34.622871399 -35.622871399 -16.500000000 -34.622871399 -35.622871399 -17.500000000 -34.622871399 -35.622871399 -18.500000000 -34.622871399 -35.622871399 -19.500000000 -34.622871399 -35.622871399 -20.500000000 -34.622871399 -35.622871399 -21.500000000 -34.622871399 -35.622871399 -22.500000000 -34.622871399 -35.622871399 -23.500000000 -34.622871399 -35.622871399 -24.499986649 -34.622852325 -35.622856140 -25.499866486 -34.622695923 -35.622741699 -26.499156952 -34.621795654 -35.621978760 -27.496026993 -34.617927551 -35.618602753 -28.485630035 -34.604709625 -35.607208252 -29.459177017 -34.567745209 -35.577743530 -30.410833359 -34.477191925 -35.518615723 -31.380739212 -34.260547638 -35.428535461 -32.316951752 -34.231872559 -35.057430267 -32.980762482 -33.980762482 -34.842590332 --34.772163391 -33.006500244 -34.772163391 --34.231872559 -33.316951752 -35.057430267 --33.424442291 -33.424442291 -35.498542786 --32.736049652 -33.678356171 -35.670326233 -31.736053467 -33.678352356 -35.670326233 -32.424442291 -33.424442291 -35.498546600 -33.231872559 -33.316947937 -35.057430267 -33.772163391 -33.006500244 -34.772163391 --34.912059784 -32.313602448 -34.912059784 --34.260543823 -32.380741119 -35.428531647 --33.678352356 -32.736053467 -35.670322418 -32.678352356 -32.736049652 -35.670326233 -33.260547638 -32.380737305 -35.428535461 -33.912059784 -32.313598633 -34.912067413 --35.040893555 -31.413049698 -35.040893555 --34.477191925 -31.410833359 -35.518615723 -33.477191925 -31.410833359 -35.518615723 -34.040893555 -31.413049698 -35.040893555 --35.115642548 -30.463253021 -35.115646362 --34.567745209 -30.459177017 -35.577739716 -33.567749023 -30.459178925 -35.577739716 -34.115642548 -30.463253021 -35.115646362 --35.150253296 -29.487289429 -35.150253296 --34.604705811 -29.485630035 -35.607208252 -33.604709625 -29.485630035 -35.607204437 -34.150257111 -29.487289429 -35.150253296 --35.163162231 -28.496557236 -35.163158417 --34.617927551 -28.496026993 -35.618602753 -33.617927551 -28.496026993 -35.618602753 -34.163158417 -28.496557236 -35.163158417 --35.166961670 -27.499298096 -35.166954041 --34.621799469 -27.499160767 -35.621978760 -33.621795654 -27.499156952 -35.621986389 -34.166961670 -27.499298096 -35.166954041 --35.167804718 -26.499902725 -35.167808533 --34.622692108 -26.499868393 -35.622734070 -33.622692108 -26.499868393 -35.622741699 -34.167808533 -26.499902725 -35.167808533 --35.167949677 -25.499990463 -35.167949677 --34.622844696 -25.499986649 -35.622856140 -33.622844696 -25.499986649 -35.622856140 -34.167949677 -25.499990463 -35.167949677 --35.167961121 -24.500000000 -35.167961121 --34.622871399 -24.500000000 -35.622871399 -33.622871399 -24.500000000 -35.622871399 -34.167961121 -24.500000000 -35.167961121 --35.167964935 -23.500000000 -35.167964935 --34.622871399 -23.500000000 -35.622871399 -33.622871399 -23.500000000 -35.622871399 -34.167968750 -23.500000000 -35.167968750 --35.167964935 -22.500000000 -35.167964935 --34.622871399 -22.500000000 -35.622871399 -33.622871399 -22.500000000 -35.622871399 -34.167968750 -22.500000000 -35.167968750 --35.167964935 -21.500000000 -35.167964935 --34.622871399 -21.500000000 -35.622871399 -33.622871399 -21.500000000 -35.622871399 -34.167968750 -21.500000000 -35.167968750 --35.167964935 -20.500000000 -35.167964935 --34.622871399 -20.500000000 -35.622871399 -33.622871399 -20.500000000 -35.622871399 -34.167968750 -20.500000000 -35.167968750 --35.167964935 -19.500000000 -35.167964935 --34.622871399 -19.500000000 -35.622871399 -33.622871399 -19.500000000 -35.622871399 -34.167968750 -19.500000000 -35.167968750 --35.167968750 -18.500000000 -35.167968750 --34.622871399 -18.500000000 -35.622871399 -33.622871399 -18.500000000 -35.622871399 -34.167968750 -18.500000000 -35.167968750 --35.167968750 -17.500000000 -35.167968750 --34.622871399 -17.500000000 -35.622871399 -33.622871399 -17.500000000 -35.622871399 -34.167968750 -17.500000000 -35.167968750 --35.167968750 -16.500000000 -35.167968750 --34.622871399 -16.500000000 -35.622871399 -33.622871399 -16.500000000 -35.622871399 -34.167968750 -16.500000000 -35.167968750 --35.167964935 -15.500000000 -35.167964935 --34.622871399 -15.500000000 -35.622871399 -33.622871399 -15.500000000 -35.622871399 -34.167968750 -15.500000000 -35.167968750 --35.167964935 -14.500000000 -35.167964935 --34.622871399 -14.500000000 -35.622871399 -33.622871399 -14.500000000 -35.622871399 -34.167968750 -14.500000000 -35.167968750 --35.167964935 -13.500000000 -35.167964935 --34.622871399 -13.500000000 -35.622871399 -33.622871399 -13.500000000 -35.622871399 -34.167968750 -13.500000000 -35.167968750 --35.167964935 -12.500000000 -35.167964935 --34.622871399 -12.500000000 -35.622871399 -33.622871399 -12.500000000 -35.622871399 -34.167968750 -12.500000000 -35.167968750 --35.167964935 -11.500000000 -35.167964935 --34.622871399 -11.500000000 -35.622871399 -33.622871399 -11.500000000 -35.622871399 -34.167968750 -11.500000000 -35.167968750 --35.167964935 -10.500000000 -35.167964935 --34.622871399 -10.500000000 -35.622871399 -33.622871399 -10.500000000 -35.622871399 -34.167968750 -10.500000000 -35.167968750 --35.167964935 -9.500000000 -35.167964935 --34.622871399 -9.500000000 -35.622871399 -33.622871399 -9.500000000 -35.622871399 -34.167968750 -9.500000000 -35.167968750 --35.167964935 -8.500000000 -35.167964935 --34.622871399 -8.500000000 -35.622871399 -33.622871399 -8.500000000 -35.622871399 -34.167968750 -8.500000000 -35.167968750 --35.167964935 -7.500000000 -35.167964935 --34.622871399 -7.500000000 -35.622871399 -33.622871399 -7.500000000 -35.622871399 -34.167968750 -7.500000000 -35.167968750 --35.167964935 -6.500000000 -35.167964935 --34.622871399 -6.500000000 -35.622871399 -33.622871399 -6.500000000 -35.622871399 -34.167968750 -6.500000000 -35.167968750 --35.167964935 -5.500000000 -35.167964935 --34.622871399 -5.500000000 -35.622871399 -33.622871399 -5.500000000 -35.622871399 -34.167968750 -5.500000000 -35.167968750 --35.167964935 -4.500000000 -35.167964935 --34.622871399 -4.500000000 -35.622871399 -33.622871399 -4.500000000 -35.622871399 -34.167968750 -4.500000000 -35.167968750 --35.167968750 -3.500000000 -35.167968750 --34.622871399 -3.500000000 -35.622871399 -33.622871399 -3.500000000 -35.622871399 -34.167968750 -3.500000000 -35.167968750 --35.167968750 -2.500000000 -35.167968750 --34.622871399 -2.500000000 -35.622871399 -33.622871399 -2.500000000 -35.622871399 -34.167968750 -2.500000000 -35.167968750 --35.167968750 -1.500000000 -35.167968750 --34.622871399 -1.500000000 -35.622871399 -33.622871399 -1.500000000 -35.622871399 -34.167968750 -1.500000000 -35.167968750 --35.167964935 -0.500000000 -35.167964935 --34.622871399 -0.500000000 -35.622871399 -33.622871399 -0.500000000 -35.622871399 -34.167968750 -0.500000000 -35.167968750 --35.167964935 0.500000000 -35.167964935 --34.622871399 0.500000000 -35.622871399 -33.622871399 0.500000000 -35.622871399 -34.167968750 0.500000000 -35.167968750 --35.167964935 1.500000000 -35.167964935 --34.622871399 1.500000000 -35.622871399 -33.622871399 1.500000000 -35.622871399 -34.167968750 1.500000000 -35.167968750 --35.167968750 2.500000000 -35.167968750 --34.622871399 2.500000000 -35.622871399 -33.622871399 2.500000000 -35.622871399 -34.167968750 2.500000000 -35.167968750 --35.167968750 3.500000000 -35.167968750 --34.622871399 3.500000000 -35.622871399 -33.622871399 3.500000000 -35.622871399 -34.167968750 3.500000000 -35.167968750 --35.167968750 4.500000000 -35.167968750 --34.622871399 4.500000000 -35.622871399 -33.622871399 4.500000000 -35.622871399 -34.167968750 4.500000000 -35.167968750 --35.167968750 5.500000000 -35.167968750 --34.622871399 5.500000000 -35.622871399 -33.622871399 5.500000000 -35.622871399 -34.167968750 5.500000000 -35.167968750 --35.167964935 6.500000000 -35.167964935 --34.622871399 6.500000000 -35.622871399 -33.622871399 6.500000000 -35.622871399 -34.167968750 6.500000000 -35.167968750 --35.167964935 7.500000000 -35.167964935 --34.622871399 7.500000000 -35.622871399 -33.622871399 7.500000000 -35.622871399 -34.167968750 7.500000000 -35.167968750 --35.167964935 8.500000000 -35.167964935 --34.622871399 8.500000000 -35.622871399 -33.622871399 8.500000000 -35.622871399 -34.167968750 8.500000000 -35.167968750 --35.167964935 9.500000000 -35.167964935 --34.622871399 9.500000000 -35.622871399 -33.622871399 9.500000000 -35.622871399 -34.167968750 9.500000000 -35.167968750 --35.167964935 10.500000000 -35.167964935 --34.622871399 10.500000000 -35.622871399 -33.622871399 10.500000000 -35.622871399 -34.167968750 10.500000000 -35.167968750 --35.167964935 11.500000000 -35.167964935 --34.622871399 11.500000000 -35.622871399 -33.622871399 11.500000000 -35.622871399 -34.167968750 11.500000000 -35.167968750 --35.167964935 12.500000000 -35.167964935 --34.622871399 12.500000000 -35.622871399 -33.622871399 12.500000000 -35.622871399 -34.167968750 12.500000000 -35.167968750 --35.167964935 13.500000000 -35.167964935 --34.622871399 13.500000000 -35.622871399 -33.622871399 13.500000000 -35.622871399 -34.167968750 13.500000000 -35.167968750 --35.167964935 14.500000000 -35.167964935 --34.622871399 14.500000000 -35.622871399 -33.622871399 14.500000000 -35.622871399 -34.167968750 14.500000000 -35.167968750 --35.167964935 15.500000000 -35.167964935 --34.622871399 15.500000000 -35.622871399 -33.622871399 15.500000000 -35.622871399 -34.167968750 15.500000000 -35.167968750 --35.167964935 16.500000000 -35.167964935 --34.622871399 16.500000000 -35.622871399 -33.622871399 16.500000000 -35.622871399 -34.167968750 16.500000000 -35.167968750 --35.167964935 17.500000000 -35.167964935 --34.622871399 17.500000000 -35.622871399 -33.622871399 17.500000000 -35.622871399 -34.167968750 17.500000000 -35.167968750 --35.167968750 18.500000000 -35.167968750 --34.622871399 18.500000000 -35.622871399 -33.622871399 18.500000000 -35.622871399 -34.167968750 18.500000000 -35.167968750 --35.167968750 19.500000000 -35.167968750 --34.622871399 19.500000000 -35.622871399 -33.622871399 19.500000000 -35.622871399 -34.167968750 19.500000000 -35.167968750 --35.167968750 20.500000000 -35.167968750 --34.622871399 20.500000000 -35.622871399 -33.622871399 20.500000000 -35.622871399 -34.167968750 20.500000000 -35.167968750 --35.167968750 21.500000000 -35.167968750 --34.622871399 21.500000000 -35.622871399 -33.622871399 21.500000000 -35.622871399 -34.167968750 21.500000000 -35.167968750 --35.167968750 22.500000000 -35.167968750 --34.622871399 22.500000000 -35.622871399 -33.622871399 22.500000000 -35.622871399 -34.167968750 22.500000000 -35.167968750 --35.167968750 23.500000000 -35.167968750 --34.622871399 23.500000000 -35.622871399 -33.622871399 23.500000000 -35.622871399 -34.167968750 23.500000000 -35.167968750 --35.167949677 24.499992371 -35.167949677 --34.622852325 24.499986649 -35.622859955 -33.622852325 24.499986649 -35.622856140 -34.167949677 24.499992371 -35.167949677 --35.167808533 25.499898911 -35.167808533 --34.622692108 25.499866486 -35.622734070 -33.622695923 25.499866486 -35.622734070 -34.167808533 25.499898911 -35.167804718 --35.166957855 26.499294281 -35.166957855 --34.621799469 26.499156952 -35.621986389 -33.621795654 26.499156952 -35.621978760 -34.166961670 26.499298096 -35.166957855 --35.163162231 27.496557236 -35.163162231 --34.617927551 27.496023178 -35.618602753 -33.617927551 27.496026993 -35.618602753 -34.163158417 27.496557236 -35.163158417 --35.150257111 28.487289429 -35.150253296 --34.604713440 28.485630035 -35.607208252 -33.604709625 28.485630035 -35.607208252 -34.150249481 28.487289429 -35.150253296 --35.115642548 29.463253021 -35.115642548 --34.567749023 29.459177017 -35.577743530 -33.567745209 29.459177017 -35.577747345 -34.115642548 29.463253021 -35.115642548 --35.040893555 30.413049698 -35.040897369 --34.477191925 30.410833359 -35.518615723 -33.477191925 30.410833359 -35.518623352 -34.040897369 30.413049698 -35.040897369 --34.912063599 31.313602448 -34.912063599 --34.260543823 31.380739212 -35.428535461 --33.678356171 31.736053467 -35.670326233 -32.678356171 31.736053467 -35.670326233 -33.260547638 31.380739212 -35.428535461 -33.912067413 31.313602448 -34.912059784 --34.772159576 32.006500244 -34.772163391 --34.231868744 32.316951752 -35.057430267 --33.424442291 32.424442291 -35.498542786 --32.736049652 32.678352356 -35.670322418 -31.736055374 32.678352356 -35.670326233 -32.424442291 32.424442291 -35.498546600 -33.231876373 32.316951752 -35.057430267 -33.772163391 32.006500244 -34.772159576 --33.980758667 32.980762482 -34.842594147 --33.316947937 33.231868744 -35.057430267 --32.380737305 33.260547638 -35.428535461 --31.410833359 33.477191925 -35.518615723 --30.459177017 33.567745209 -35.577739716 --29.485630035 33.604705811 -35.607208252 --28.496026993 33.617927551 -35.618602753 --27.499160767 33.621803284 -35.621986389 --26.499868393 33.622692108 -35.622741699 --25.499986649 33.622852325 -35.622856140 --24.500000000 33.622871399 -35.622871399 --23.500000000 33.622871399 -35.622871399 --22.500000000 33.622871399 -35.622871399 --21.500000000 33.622871399 -35.622871399 --20.500000000 33.622871399 -35.622871399 --19.500000000 33.622871399 -35.622871399 --18.500000000 33.622871399 -35.622871399 --17.500000000 33.622871399 -35.622871399 --16.500000000 33.622871399 -35.622871399 --15.500000000 33.622871399 -35.622871399 --14.500000000 33.622871399 -35.622871399 --13.500000000 33.622871399 -35.622871399 --12.500000000 33.622871399 -35.622871399 --11.500000000 33.622871399 -35.622871399 --10.500000000 33.622871399 -35.622871399 --9.500000000 33.622871399 -35.622871399 --8.500000000 33.622871399 -35.622871399 --7.500000000 33.622871399 -35.622871399 --6.500000000 33.622871399 -35.622871399 --5.500000000 33.622871399 -35.622871399 --4.500000000 33.622871399 -35.622871399 --3.500000000 33.622871399 -35.622871399 --2.500000000 33.622871399 -35.622871399 --1.500000000 33.622871399 -35.622871399 --0.500000000 33.622871399 -35.622871399 -0.500000000 33.622871399 -35.622871399 -1.500000000 33.622871399 -35.622871399 -2.500000000 33.622871399 -35.622871399 -3.500000000 33.622871399 -35.622871399 -4.500000000 33.622871399 -35.622871399 -5.500000000 33.622871399 -35.622871399 -6.500000000 33.622871399 -35.622871399 -7.500000000 33.622871399 -35.622871399 -8.500000000 33.622871399 -35.622871399 -9.500000000 33.622871399 -35.622871399 -10.500000000 33.622871399 -35.622871399 -11.500000000 33.622871399 -35.622871399 -12.500000000 33.622871399 -35.622871399 -13.500000000 33.622871399 -35.622871399 -14.500000000 33.622871399 -35.622871399 -15.500000000 33.622871399 -35.622871399 -16.500000000 33.622871399 -35.622871399 -17.500000000 33.622871399 -35.622871399 -18.500000000 33.622871399 -35.622871399 -19.500000000 33.622871399 -35.622871399 -20.500000000 33.622871399 -35.622871399 -21.500000000 33.622871399 -35.622871399 -22.500000000 33.622871399 -35.622871399 -23.500000000 33.622871399 -35.622871399 -24.499986649 33.622856140 -35.622856140 -25.499868393 33.622695923 -35.622741699 -26.499156952 33.621803284 -35.621986389 -27.496026993 33.617927551 -35.618602753 -28.485630035 33.604709625 -35.607208252 -29.459178925 33.567749023 -35.577739716 -30.410833359 33.477191925 -35.518615723 -31.380737305 33.260547638 -35.428535461 -32.316951752 33.231872559 -35.057430267 -32.980758667 32.980762482 -34.842590332 --33.006500244 33.772163391 -34.772163391 --32.313598633 33.912063599 -34.912063599 --31.413049698 34.040893555 -35.040893555 --30.463253021 34.115642548 -35.115642548 --29.487289429 34.150249481 -35.150253296 --28.496557236 34.163158417 -35.163162231 --27.499298096 34.166954041 -35.166961670 --26.499898911 34.167808533 -35.167808533 --25.499990463 34.167949677 -35.167949677 --24.500000000 34.167968750 -35.167964935 --23.500000000 34.167968750 -35.167968750 --22.500000000 34.167968750 -35.167968750 --21.500000000 34.167968750 -35.167968750 --20.500000000 34.167968750 -35.167968750 --19.500000000 34.167968750 -35.167968750 --18.500000000 34.167968750 -35.167968750 --17.500000000 34.167968750 -35.167968750 --16.500000000 34.167968750 -35.167968750 --15.500000000 34.167968750 -35.167968750 --14.500000000 34.167968750 -35.167968750 --13.500000000 34.167968750 -35.167968750 --12.500000000 34.167968750 -35.167968750 --11.500000000 34.167968750 -35.167968750 --10.500000000 34.167968750 -35.167968750 --9.500000000 34.167968750 -35.167968750 --8.500000000 34.167968750 -35.167968750 --7.500000000 34.167968750 -35.167968750 --6.500000000 34.167968750 -35.167968750 --5.500000000 34.167968750 -35.167968750 --4.500000000 34.167968750 -35.167968750 --3.500000000 34.167968750 -35.167968750 --2.500000000 34.167968750 -35.167968750 --1.500000000 34.167968750 -35.167968750 --0.500000000 34.167968750 -35.167968750 -0.500000000 34.167968750 -35.167968750 -1.500000000 34.167968750 -35.167968750 -2.500000000 34.167968750 -35.167968750 -3.500000000 34.167968750 -35.167968750 -4.500000000 34.167968750 -35.167968750 -5.500000000 34.167968750 -35.167968750 -6.500000000 34.167968750 -35.167968750 -7.500000000 34.167968750 -35.167968750 -8.500000000 34.167968750 -35.167968750 -9.500000000 34.167968750 -35.167968750 -10.500000000 34.167968750 -35.167968750 -11.500000000 34.167968750 -35.167968750 -12.500000000 34.167968750 -35.167968750 -13.500000000 34.167968750 -35.167968750 -14.500000000 34.167968750 -35.167968750 -15.500000000 34.167968750 -35.167968750 -16.500000000 34.167968750 -35.167968750 -17.500000000 34.167968750 -35.167968750 -18.500000000 34.167968750 -35.167968750 -19.500000000 34.167968750 -35.167968750 -20.500000000 34.167968750 -35.167968750 -21.500000000 34.167968750 -35.167968750 -22.500000000 34.167968750 -35.167968750 -23.500000000 34.167968750 -35.167961121 -24.499994278 34.167949677 -35.167949677 -25.499898911 34.167808533 -35.167808533 -26.499298096 34.166961670 -35.166961670 -27.496557236 34.163162231 -35.163158417 -28.487293243 34.150257111 -35.150253296 -29.463256836 34.115638733 -35.115642548 -30.413049698 34.040893555 -35.040893555 -31.313602448 33.912063599 -34.912063599 -32.006500244 33.772163391 -34.772163391 --32.035343170 -35.797355652 -33.840934753 --31.336603165 -35.879737854 -33.903385162 --30.442840576 -35.937091827 -33.946811676 --29.481933594 -35.965766907 -33.968872070 --28.495168686 -35.976860046 -33.977619171 --27.498950958 -35.980201721 -33.980335236 --26.499822617 -35.980957031 -33.980976105 --25.499977112 -35.981090546 -33.981090546 --24.499998093 -35.981101990 -33.981101990 --23.500000000 -35.981101990 -33.981101990 --22.500000000 -35.981101990 -33.981101990 --21.500000000 -35.981101990 -33.981101990 --20.500000000 -35.981101990 -33.981101990 --19.500000000 -35.981101990 -33.981101990 --18.500000000 -35.981101990 -33.981101990 --17.500000000 -35.981101990 -33.981101990 --16.500000000 -35.981101990 -33.981101990 --15.500000000 -35.981101990 -33.981101990 --14.500000000 -35.981101990 -33.981101990 --13.500000000 -35.981101990 -33.981101990 --12.500000000 -35.981101990 -33.981101990 --11.500000000 -35.981101990 -33.981101990 --10.500000000 -35.981101990 -33.981101990 --9.500000000 -35.981101990 -33.981101990 --8.500000000 -35.981101990 -33.981101990 --7.500000000 -35.981101990 -33.981101990 --6.500000000 -35.981101990 -33.981101990 --5.500000000 -35.981101990 -33.981101990 --4.500000000 -35.981101990 -33.981101990 --3.500000000 -35.981101990 -33.981101990 --2.500000000 -35.981101990 -33.981101990 --1.500000000 -35.981101990 -33.981101990 --0.500000000 -35.981101990 -33.981101990 -0.500000000 -35.981101990 -33.981101990 -1.500000000 -35.981101990 -33.981101990 -2.500000000 -35.981101990 -33.981101990 -3.500000000 -35.981101990 -33.981101990 -4.500000000 -35.981101990 -33.981101990 -5.500000000 -35.981101990 -33.981101990 -6.500000000 -35.981101990 -33.981101990 -7.500000000 -35.981101990 -33.981101990 -8.500000000 -35.981101990 -33.981101990 -9.500000000 -35.981101990 -33.981101990 -10.500000000 -35.981101990 -33.981101990 -11.500000000 -35.981101990 -33.981101990 -12.500000000 -35.981101990 -33.981101990 -13.500000000 -35.981101990 -33.981101990 -14.500000000 -35.981101990 -33.981101990 -15.500000000 -35.981101990 -33.981101990 -16.500000000 -35.981101990 -33.981101990 -17.500000000 -35.981101990 -33.981101990 -18.500000000 -35.981101990 -33.981101990 -19.500000000 -35.981101990 -33.981101990 -20.500000000 -35.981101990 -33.981101990 -21.500000000 -35.981101990 -33.981101990 -22.500000000 -35.981101990 -33.981101990 -23.499998093 -35.981101990 -33.981101990 -24.499979019 -35.981086731 -33.981090546 -25.499824524 -35.980957031 -33.980976105 -26.498950958 -35.980201721 -33.980335236 -27.495168686 -35.976860046 -33.977619171 -28.481933594 -35.965766907 -33.968872070 -29.442840576 -35.937091827 -33.946807861 -30.336599350 -35.879737854 -33.903377533 -31.035345078 -35.797355652 -33.840930939 --33.980762482 -34.842590332 -33.980762482 --33.316951752 -35.057430267 -34.231872559 --32.380741119 -35.428535461 -34.260547638 --31.410831451 -35.518615723 -34.477195740 --30.459177017 -35.577739716 -34.567749023 --29.485630035 -35.607204437 -34.604709625 --28.496023178 -35.618602753 -34.617927551 --27.499160767 -35.621982574 -34.621795654 --26.499868393 -35.622734070 -34.622692108 --25.499986649 -35.622856140 -34.622848511 --24.500000000 -35.622871399 -34.622871399 --23.500000000 -35.622871399 -34.622871399 --22.500000000 -35.622871399 -34.622871399 --21.500000000 -35.622871399 -34.622871399 --20.500000000 -35.622871399 -34.622871399 --19.500000000 -35.622871399 -34.622871399 --18.500000000 -35.622871399 -34.622871399 --17.500000000 -35.622871399 -34.622871399 --16.500000000 -35.622871399 -34.622871399 --15.500000000 -35.622871399 -34.622871399 --14.500000000 -35.622871399 -34.622871399 --13.500000000 -35.622871399 -34.622871399 --12.500000000 -35.622871399 -34.622871399 --11.500000000 -35.622871399 -34.622871399 --10.500000000 -35.622871399 -34.622871399 --9.500000000 -35.622871399 -34.622871399 --8.500000000 -35.622871399 -34.622871399 --7.500000000 -35.622871399 -34.622871399 --6.500000000 -35.622871399 -34.622871399 --5.500000000 -35.622871399 -34.622871399 --4.500000000 -35.622871399 -34.622871399 --3.500000000 -35.622871399 -34.622871399 --2.500000000 -35.622871399 -34.622871399 --1.500000000 -35.622871399 -34.622871399 --0.500000000 -35.622871399 -34.622871399 -0.500000000 -35.622871399 -34.622871399 -1.500000000 -35.622871399 -34.622871399 -2.500000000 -35.622871399 -34.622871399 -3.500000000 -35.622871399 -34.622871399 -4.500000000 -35.622871399 -34.622871399 -5.500000000 -35.622871399 -34.622871399 -6.500000000 -35.622871399 -34.622871399 -7.500000000 -35.622871399 -34.622871399 -8.500000000 -35.622871399 -34.622871399 -9.500000000 -35.622871399 -34.622871399 -10.500000000 -35.622871399 -34.622871399 -11.500000000 -35.622871399 -34.622871399 -12.500000000 -35.622871399 -34.622871399 -13.500000000 -35.622871399 -34.622871399 -14.500000000 -35.622871399 -34.622871399 -15.500000000 -35.622871399 -34.622871399 -16.500000000 -35.622871399 -34.622871399 -17.500000000 -35.622871399 -34.622871399 -18.500000000 -35.622871399 -34.622871399 -19.500000000 -35.622871399 -34.622871399 -20.500000000 -35.622871399 -34.622871399 -21.500000000 -35.622871399 -34.622871399 -22.500000000 -35.622871399 -34.622871399 -23.500000000 -35.622871399 -34.622871399 -24.499986649 -35.622856140 -34.622844696 -25.499866486 -35.622734070 -34.622692108 -26.499156952 -35.621978760 -34.621803284 -27.496023178 -35.618598938 -34.617927551 -28.485630035 -35.607208252 -34.604709625 -29.459178925 -35.577739716 -34.567745209 -30.410833359 -35.518615723 -34.477191925 -31.380739212 -35.428535461 -34.260547638 -32.316951752 -35.057430267 -34.231872559 -32.980762482 -34.842590332 -33.980758667 --34.842590332 -33.980758667 -33.980762482 --34.297924042 -34.297924042 -34.297924042 --33.672851562 -34.565631866 -34.565631866 -32.672851562 -34.565631866 -34.565628052 -33.297924042 -34.297924042 -34.297927856 -33.842590332 -33.980758667 -33.980758667 --35.057430267 -33.316947937 -34.231876373 --34.565631866 -33.672851562 -34.565631866 -33.565631866 -33.672851562 -34.565628052 -34.057430267 -33.316947937 -34.231868744 --35.797355652 -32.035343170 -33.840934753 --35.428535461 -32.380741119 -34.260547638 -34.428535461 -32.380741119 -34.260547638 -34.797355652 -32.035343170 -33.840934753 --35.879734039 -31.336599350 -33.903385162 --35.518615723 -31.410831451 -34.477195740 -34.518615723 -31.410831451 -34.477195740 -34.879737854 -31.336599350 -33.903385162 --35.937091827 -30.442840576 -33.946811676 --35.577735901 -30.459177017 -34.567752838 -34.577739716 -30.459177017 -34.567749023 -34.937091827 -30.442840576 -33.946811676 --35.965759277 -29.481933594 -33.968864441 --35.607204437 -29.485630035 -34.604709625 -34.607204437 -29.485630035 -34.604709625 -34.965766907 -29.481933594 -33.968872070 --35.976860046 -28.495168686 -33.977619171 --35.618598938 -28.496023178 -34.617927551 -34.618602753 -28.496023178 -34.617927551 -34.976860046 -28.495168686 -33.977619171 --35.980201721 -27.498950958 -33.980335236 --35.621982574 -27.499156952 -34.621803284 -34.621982574 -27.499160767 -34.621795654 -34.980201721 -27.498950958 -33.980335236 --35.980957031 -26.499822617 -33.980976105 --35.622734070 -26.499868393 -34.622692108 -34.622734070 -26.499868393 -34.622692108 -34.980957031 -26.499822617 -33.980976105 --35.981090546 -25.499979019 -33.981090546 --35.622856140 -25.499986649 -34.622844696 -34.622856140 -25.499986649 -34.622848511 -34.981090546 -25.499977112 -33.981090546 --35.981101990 -24.499998093 -33.981101990 --35.622871399 -24.500000000 -34.622871399 -34.622871399 -24.500000000 -34.622871399 -34.981101990 -24.499998093 -33.981101990 --35.981101990 -23.500000000 -33.981101990 --35.622871399 -23.500000000 -34.622871399 -34.622871399 -23.500000000 -34.622871399 -34.981101990 -23.500000000 -33.981101990 --35.981101990 -22.500000000 -33.981101990 --35.622871399 -22.500000000 -34.622871399 -34.622871399 -22.500000000 -34.622871399 -34.981101990 -22.500000000 -33.981101990 --35.981101990 -21.500000000 -33.981101990 --35.622871399 -21.500000000 -34.622871399 -34.622871399 -21.500000000 -34.622871399 -34.981101990 -21.500000000 -33.981101990 --35.981101990 -20.500000000 -33.981101990 --35.622871399 -20.500000000 -34.622871399 -34.622871399 -20.500000000 -34.622871399 -34.981101990 -20.500000000 -33.981101990 --35.981101990 -19.500000000 -33.981101990 --35.622871399 -19.500000000 -34.622871399 -34.622871399 -19.500000000 -34.622871399 -34.981101990 -19.500000000 -33.981101990 --35.981101990 -18.500000000 -33.981101990 --35.622871399 -18.500000000 -34.622871399 -34.622871399 -18.500000000 -34.622871399 -34.981101990 -18.500000000 -33.981101990 --35.981101990 -17.500000000 -33.981101990 --35.622871399 -17.500000000 -34.622871399 -34.622871399 -17.500000000 -34.622871399 -34.981101990 -17.500000000 -33.981101990 --35.981101990 -16.500000000 -33.981101990 --35.622871399 -16.500000000 -34.622871399 -34.622871399 -16.500000000 -34.622871399 -34.981101990 -16.500000000 -33.981101990 --35.981101990 -15.500000000 -33.981101990 --35.622871399 -15.500000000 -34.622871399 -34.622871399 -15.500000000 -34.622871399 -34.981101990 -15.500000000 -33.981101990 --35.981101990 -14.500000000 -33.981101990 --35.622871399 -14.500000000 -34.622871399 -34.622871399 -14.500000000 -34.622871399 -34.981101990 -14.500000000 -33.981101990 --35.981101990 -13.500000000 -33.981101990 --35.622871399 -13.500000000 -34.622871399 -34.622871399 -13.500000000 -34.622871399 -34.981101990 -13.500000000 -33.981101990 --35.981101990 -12.500000000 -33.981101990 --35.622871399 -12.500000000 -34.622871399 -34.622871399 -12.500000000 -34.622871399 -34.981101990 -12.500000000 -33.981101990 --35.981101990 -11.500000000 -33.981101990 --35.622871399 -11.500000000 -34.622871399 -34.622871399 -11.500000000 -34.622871399 -34.981101990 -11.500000000 -33.981101990 --35.981101990 -10.500000000 -33.981101990 --35.622871399 -10.500000000 -34.622871399 -34.622871399 -10.500000000 -34.622871399 -34.981101990 -10.500000000 -33.981101990 --35.981101990 -9.500000000 -33.981101990 --35.622871399 -9.500000000 -34.622871399 -34.622871399 -9.500000000 -34.622871399 -34.981101990 -9.500000000 -33.981101990 --35.981101990 -8.500000000 -33.981101990 --35.622871399 -8.500000000 -34.622871399 -34.622871399 -8.500000000 -34.622871399 -34.981101990 -8.500000000 -33.981101990 --35.981101990 -7.500000000 -33.981101990 --35.622871399 -7.500000000 -34.622871399 -34.622871399 -7.500000000 -34.622871399 -34.981101990 -7.500000000 -33.981101990 --35.981101990 -6.500000000 -33.981101990 --35.622871399 -6.500000000 -34.622871399 -34.622871399 -6.500000000 -34.622871399 -34.981101990 -6.500000000 -33.981101990 --35.981101990 -5.500000000 -33.981101990 --35.622871399 -5.500000000 -34.622871399 -34.622871399 -5.500000000 -34.622871399 -34.981101990 -5.500000000 -33.981101990 --35.981101990 -4.500000000 -33.981101990 --35.622871399 -4.500000000 -34.622871399 -34.622871399 -4.500000000 -34.622871399 -34.981101990 -4.500000000 -33.981101990 --35.981101990 -3.500000000 -33.981101990 --35.622871399 -3.500000000 -34.622871399 -34.622871399 -3.500000000 -34.622871399 -34.981101990 -3.500000000 -33.981101990 --35.981101990 -2.500000000 -33.981101990 --35.622871399 -2.500000000 -34.622871399 -34.622871399 -2.500000000 -34.622871399 -34.981101990 -2.500000000 -33.981101990 --35.981101990 -1.500000000 -33.981101990 --35.622871399 -1.500000000 -34.622871399 -34.622871399 -1.500000000 -34.622871399 -34.981101990 -1.500000000 -33.981101990 --35.981101990 -0.500000000 -33.981101990 --35.622871399 -0.500000000 -34.622871399 -34.622871399 -0.500000000 -34.622871399 -34.981101990 -0.500000000 -33.981101990 --35.981101990 0.500000000 -33.981101990 --35.622871399 0.500000000 -34.622871399 -34.622871399 0.500000000 -34.622871399 -34.981101990 0.500000000 -33.981101990 --35.981101990 1.500000000 -33.981101990 --35.622871399 1.500000000 -34.622871399 -34.622871399 1.500000000 -34.622871399 -34.981101990 1.500000000 -33.981101990 --35.981101990 2.500000000 -33.981101990 --35.622871399 2.500000000 -34.622871399 -34.622871399 2.500000000 -34.622871399 -34.981101990 2.500000000 -33.981101990 --35.981101990 3.500000000 -33.981101990 --35.622871399 3.500000000 -34.622871399 -34.622871399 3.500000000 -34.622871399 -34.981101990 3.500000000 -33.981101990 --35.981101990 4.500000000 -33.981101990 --35.622871399 4.500000000 -34.622871399 -34.622871399 4.500000000 -34.622871399 -34.981101990 4.500000000 -33.981101990 --35.981101990 5.500000000 -33.981101990 --35.622871399 5.500000000 -34.622871399 -34.622871399 5.500000000 -34.622871399 -34.981101990 5.500000000 -33.981101990 --35.981101990 6.500000000 -33.981101990 --35.622871399 6.500000000 -34.622871399 -34.622871399 6.500000000 -34.622871399 -34.981101990 6.500000000 -33.981101990 --35.981101990 7.500000000 -33.981101990 --35.622871399 7.500000000 -34.622871399 -34.622871399 7.500000000 -34.622871399 -34.981101990 7.500000000 -33.981101990 --35.981101990 8.500000000 -33.981101990 --35.622871399 8.500000000 -34.622871399 -34.622871399 8.500000000 -34.622871399 -34.981101990 8.500000000 -33.981101990 --35.981101990 9.500000000 -33.981101990 --35.622871399 9.500000000 -34.622871399 -34.622871399 9.500000000 -34.622871399 -34.981101990 9.500000000 -33.981101990 --35.981101990 10.500000000 -33.981101990 --35.622871399 10.500000000 -34.622871399 -34.622871399 10.500000000 -34.622871399 -34.981101990 10.500000000 -33.981101990 --35.981101990 11.500000000 -33.981101990 --35.622871399 11.500000000 -34.622871399 -34.622871399 11.500000000 -34.622871399 -34.981101990 11.500000000 -33.981101990 --35.981101990 12.500000000 -33.981101990 --35.622871399 12.500000000 -34.622871399 -34.622871399 12.500000000 -34.622871399 -34.981101990 12.500000000 -33.981101990 --35.981101990 13.500000000 -33.981101990 --35.622871399 13.500000000 -34.622871399 -34.622871399 13.500000000 -34.622871399 -34.981101990 13.500000000 -33.981101990 --35.981101990 14.500000000 -33.981101990 --35.622871399 14.500000000 -34.622871399 -34.622871399 14.500000000 -34.622871399 -34.981101990 14.500000000 -33.981101990 --35.981101990 15.500000000 -33.981101990 --35.622871399 15.500000000 -34.622871399 -34.622871399 15.500000000 -34.622871399 -34.981101990 15.500000000 -33.981101990 --35.981101990 16.500000000 -33.981101990 --35.622871399 16.500000000 -34.622871399 -34.622871399 16.500000000 -34.622871399 -34.981101990 16.500000000 -33.981101990 --35.981101990 17.500000000 -33.981101990 --35.622871399 17.500000000 -34.622871399 -34.622871399 17.500000000 -34.622871399 -34.981101990 17.500000000 -33.981101990 --35.981101990 18.500000000 -33.981101990 --35.622871399 18.500000000 -34.622871399 -34.622871399 18.500000000 -34.622871399 -34.981101990 18.500000000 -33.981101990 --35.981101990 19.500000000 -33.981101990 --35.622871399 19.500000000 -34.622871399 -34.622871399 19.500000000 -34.622871399 -34.981101990 19.500000000 -33.981101990 --35.981101990 20.500000000 -33.981101990 --35.622871399 20.500000000 -34.622871399 -34.622871399 20.500000000 -34.622871399 -34.981101990 20.500000000 -33.981101990 --35.981101990 21.500000000 -33.981101990 --35.622871399 21.500000000 -34.622871399 -34.622871399 21.500000000 -34.622871399 -34.981101990 21.500000000 -33.981101990 --35.981101990 22.500000000 -33.981101990 --35.622871399 22.500000000 -34.622871399 -34.622871399 22.500000000 -34.622871399 -34.981101990 22.500000000 -33.981101990 --35.981101990 23.499998093 -33.981101990 --35.622871399 23.500000000 -34.622871399 -34.622871399 23.500000000 -34.622871399 -34.981101990 23.499998093 -33.981101990 --35.981086731 24.499979019 -33.981090546 --35.622856140 24.499986649 -34.622848511 -34.622856140 24.499986649 -34.622844696 -34.981086731 24.499979019 -33.981090546 --35.980957031 25.499824524 -33.980976105 --35.622734070 25.499868393 -34.622692108 -34.622734070 25.499866486 -34.622692108 -34.980957031 25.499824524 -33.980976105 --35.980201721 26.498950958 -33.980335236 --35.621982574 26.499156952 -34.621803284 -34.621978760 26.499156952 -34.621803284 -34.980201721 26.498950958 -33.980335236 --35.976860046 27.495168686 -33.977619171 --35.618602753 27.496026993 -34.617927551 -34.618598938 27.496023178 -34.617927551 -34.976860046 27.495168686 -33.977619171 --35.965766907 28.481937408 -33.968868256 --35.607208252 28.485630035 -34.604709625 -34.607208252 28.485630035 -34.604709625 -34.965766907 28.481933594 -33.968872070 --35.937091827 29.442840576 -33.946811676 --35.577739716 29.459177017 -34.567745209 -34.577739716 29.459178925 -34.567745209 -34.937091827 29.442840576 -33.946807861 --35.879737854 30.336603165 -33.903377533 --35.518615723 30.410833359 -34.477191925 -34.518615723 30.410833359 -34.477191925 -34.879737854 30.336599350 -33.903377533 --35.797355652 31.035345078 -33.840934753 --35.428531647 31.380737305 -34.260543823 -34.428535461 31.380739212 -34.260547638 -34.797355652 31.035345078 -33.840930939 --35.057430267 32.316951752 -34.231872559 --34.565628052 32.672851562 -34.565631866 -33.565628052 32.672851562 -34.565628052 -34.057430267 32.316951752 -34.231868744 --34.842590332 32.980762482 -33.980762482 --34.297924042 33.297924042 -34.297931671 --33.672851562 33.565631866 -34.565631866 -32.672851562 33.565631866 -34.565628052 -33.297924042 33.297924042 -34.297924042 -33.842590332 32.980762482 -33.980758667 --33.980758667 33.842590332 -33.980762482 --33.316947937 34.057430267 -34.231876373 --32.380737305 34.428535461 -34.260547638 --31.410831451 34.518615723 -34.477191925 --30.459177017 34.577739716 -34.567749023 --29.485630035 34.607208252 -34.604709625 --28.496028900 34.618598938 -34.617931366 --27.499160767 34.621982574 -34.621803284 --26.499868393 34.622734070 -34.622692108 --25.499986649 34.622856140 -34.622844696 --24.500000000 34.622871399 -34.622863770 --23.500000000 34.622871399 -34.622871399 --22.500000000 34.622871399 -34.622871399 --21.500000000 34.622871399 -34.622871399 --20.500000000 34.622871399 -34.622871399 --19.500000000 34.622871399 -34.622871399 --18.500000000 34.622871399 -34.622871399 --17.500000000 34.622871399 -34.622871399 --16.500000000 34.622871399 -34.622871399 --15.500000000 34.622871399 -34.622871399 --14.500000000 34.622871399 -34.622871399 --13.500000000 34.622871399 -34.622871399 --12.500000000 34.622871399 -34.622871399 --11.500000000 34.622871399 -34.622871399 --10.500000000 34.622871399 -34.622871399 --9.500000000 34.622871399 -34.622871399 --8.500000000 34.622871399 -34.622871399 --7.500000000 34.622871399 -34.622871399 --6.500000000 34.622871399 -34.622871399 --5.500000000 34.622871399 -34.622871399 --4.500000000 34.622871399 -34.622871399 --3.500000000 34.622871399 -34.622871399 --2.500000000 34.622871399 -34.622871399 --1.500000000 34.622871399 -34.622871399 --0.500000000 34.622871399 -34.622871399 -0.500000000 34.622871399 -34.622871399 -1.500000000 34.622871399 -34.622871399 -2.500000000 34.622871399 -34.622871399 -3.500000000 34.622871399 -34.622871399 -4.500000000 34.622871399 -34.622871399 -5.500000000 34.622871399 -34.622871399 -6.500000000 34.622871399 -34.622871399 -7.500000000 34.622871399 -34.622871399 -8.500000000 34.622871399 -34.622871399 -9.500000000 34.622871399 -34.622871399 -10.500000000 34.622871399 -34.622871399 -11.500000000 34.622871399 -34.622871399 -12.500000000 34.622871399 -34.622871399 -13.500000000 34.622871399 -34.622871399 -14.500000000 34.622871399 -34.622871399 -15.500000000 34.622871399 -34.622871399 -16.500000000 34.622871399 -34.622871399 -17.500000000 34.622871399 -34.622871399 -18.500000000 34.622871399 -34.622871399 -19.500000000 34.622871399 -34.622871399 -20.500000000 34.622871399 -34.622871399 -21.500000000 34.622871399 -34.622871399 -22.500000000 34.622871399 -34.622871399 -23.500000000 34.622871399 -34.622871399 -24.499986649 34.622856140 -34.622844696 -25.499868393 34.622734070 -34.622692108 -26.499156952 34.621982574 -34.621803284 -27.496026993 34.618602753 -34.617927551 -28.485631943 34.607208252 -34.604709625 -29.459178925 34.577739716 -34.567752838 -30.410833359 34.518615723 -34.477191925 -31.380737305 34.428535461 -34.260547638 -32.316947937 34.057430267 -34.231868744 -32.980758667 33.842590332 -33.980758667 --32.035343170 34.797355652 -33.840934753 --31.336603165 34.879734039 -33.903381348 --30.442840576 34.937091827 -33.946807861 --29.481937408 34.965763092 -33.968864441 --28.495172501 34.976860046 -33.977611542 --27.498952866 34.980201721 -33.980331421 --26.499826431 34.980957031 -33.980972290 --25.499979019 34.981086731 -33.981086731 --24.499998093 34.981101990 -33.981098175 --23.500000000 34.981101990 -33.981101990 --22.500000000 34.981101990 -33.981101990 --21.500000000 34.981101990 -33.981101990 --20.500000000 34.981101990 -33.981101990 --19.500000000 34.981101990 -33.981101990 --18.500000000 34.981101990 -33.981101990 --17.500000000 34.981101990 -33.981101990 --16.500000000 34.981101990 -33.981101990 --15.500000000 34.981101990 -33.981101990 --14.500000000 34.981101990 -33.981101990 --13.500000000 34.981101990 -33.981101990 --12.500000000 34.981101990 -33.981101990 --11.500000000 34.981101990 -33.981101990 --10.500000000 34.981101990 -33.981101990 --9.500000000 34.981101990 -33.981101990 --8.500000000 34.981101990 -33.981101990 --7.500000000 34.981101990 -33.981101990 --6.500000000 34.981101990 -33.981101990 --5.500000000 34.981101990 -33.981101990 --4.500000000 34.981101990 -33.981101990 --3.500000000 34.981101990 -33.981101990 --2.500000000 34.981101990 -33.981101990 --1.500000000 34.981101990 -33.981101990 --0.500000000 34.981101990 -33.981101990 -0.500000000 34.981101990 -33.981101990 -1.500000000 34.981101990 -33.981101990 -2.500000000 34.981101990 -33.981101990 -3.500000000 34.981101990 -33.981101990 -4.500000000 34.981101990 -33.981101990 -5.500000000 34.981101990 -33.981101990 -6.500000000 34.981101990 -33.981101990 -7.500000000 34.981101990 -33.981101990 -8.500000000 34.981101990 -33.981101990 -9.500000000 34.981101990 -33.981101990 -10.500000000 34.981101990 -33.981101990 -11.500000000 34.981101990 -33.981101990 -12.500000000 34.981101990 -33.981101990 -13.500000000 34.981101990 -33.981101990 -14.500000000 34.981101990 -33.981101990 -15.500000000 34.981101990 -33.981101990 -16.500000000 34.981101990 -33.981101990 -17.500000000 34.981101990 -33.981101990 -18.500000000 34.981101990 -33.981101990 -19.500000000 34.981101990 -33.981101990 -20.500000000 34.981101990 -33.981101990 -21.500000000 34.981101990 -33.981101990 -22.500000000 34.981101990 -33.981101990 -23.499998093 34.981101990 -33.981101990 -24.499979019 34.981086731 -33.981086731 -25.499822617 34.980957031 -33.980972290 -26.498952866 34.980201721 -33.980331421 -27.495172501 34.976863861 -33.977619171 -28.481937408 34.965763092 -33.968864441 -29.442840576 34.937091827 -33.946811676 -30.336603165 34.879737854 -33.903381348 -31.035345078 34.797355652 -33.840934753 --33.030693054 -35.846317291 -33.030693054 --32.334243774 -35.954723358 -33.254272461 --31.405673981 -36.112342834 -33.216842651 --30.460596085 -36.185966492 -33.220710754 --29.486715317 -36.217514038 -33.227874756 --28.496374130 -36.228954315 -33.231601715 --27.499221802 -36.232299805 -33.232868195 --26.499872208 -36.233074188 -33.233169556 --25.499986649 -36.233207703 -33.233219147 --24.499998093 -36.233222961 -33.233222961 --23.500000000 -36.233222961 -33.233222961 --22.500000000 -36.233222961 -33.233222961 --21.500000000 -36.233222961 -33.233222961 --20.500000000 -36.233222961 -33.233222961 --19.500000000 -36.233222961 -33.233222961 --18.500000000 -36.233222961 -33.233222961 --17.500000000 -36.233222961 -33.233222961 --16.500000000 -36.233222961 -33.233222961 --15.500000000 -36.233222961 -33.233222961 --14.500000000 -36.233222961 -33.233222961 --13.500000000 -36.233222961 -33.233222961 --12.500000000 -36.233222961 -33.233222961 --11.500000000 -36.233222961 -33.233222961 --10.500000000 -36.233222961 -33.233222961 --9.500000000 -36.233222961 -33.233222961 --8.500000000 -36.233222961 -33.233222961 --7.500000000 -36.233222961 -33.233222961 --6.500000000 -36.233222961 -33.233222961 --5.500000000 -36.233222961 -33.233222961 --4.500000000 -36.233222961 -33.233222961 --3.500000000 -36.233222961 -33.233222961 --2.500000000 -36.233222961 -33.233222961 --1.500000000 -36.233222961 -33.233222961 --0.500000000 -36.233222961 -33.233222961 -0.500000000 -36.233222961 -33.233222961 -1.500000000 -36.233222961 -33.233222961 -2.500000000 -36.233222961 -33.233222961 -3.500000000 -36.233222961 -33.233222961 -4.500000000 -36.233222961 -33.233222961 -5.500000000 -36.233222961 -33.233222961 -6.500000000 -36.233222961 -33.233222961 -7.500000000 -36.233222961 -33.233222961 -8.500000000 -36.233222961 -33.233222961 -9.500000000 -36.233222961 -33.233222961 -10.500000000 -36.233222961 -33.233222961 -11.500000000 -36.233222961 -33.233222961 -12.500000000 -36.233222961 -33.233222961 -13.500000000 -36.233222961 -33.233222961 -14.500000000 -36.233222961 -33.233222961 -15.500000000 -36.233222961 -33.233222961 -16.500000000 -36.233222961 -33.233222961 -17.500000000 -36.233222961 -33.233222961 -18.500000000 -36.233222961 -33.233222961 -19.500000000 -36.233222961 -33.233222961 -20.500000000 -36.233222961 -33.233222961 -21.500000000 -36.233222961 -33.233222961 -22.500000000 -36.233222961 -33.233222961 -23.500000000 -36.233222961 -33.233222961 -24.499984741 -36.233207703 -33.233219147 -25.499872208 -36.233074188 -33.233173370 -26.499225616 -36.232299805 -33.232868195 -27.496377945 -36.228958130 -33.231605530 -28.486719131 -36.217514038 -33.227878571 -29.460596085 -36.185966492 -33.220718384 -30.405673981 -36.112342834 -33.216842651 -31.334243774 -35.954723358 -33.254276276 -32.030693054 -35.846313477 -33.030693054 --34.772163391 -34.772159576 -33.006500244 --34.231872559 -35.057430267 -33.316947937 --33.424438477 -35.498546600 -33.424442291 --32.736049652 -35.670326233 -33.678356171 -31.736053467 -35.670326233 -33.678352356 -32.424442291 -35.498538971 -33.424442291 -33.231872559 -35.057430267 -33.316951752 -33.772163391 -34.772159576 -33.006500244 --35.057430267 -34.231872559 -33.316947937 --34.565628052 -34.565628052 -33.672851562 -33.565631866 -34.565628052 -33.672847748 -34.057430267 -34.231868744 -33.316947937 --35.846313477 -33.030693054 -33.030693054 --35.498542786 -33.424438477 -33.424438477 -34.498546600 -33.424438477 -33.424438477 -34.846317291 -33.030689240 -33.030693054 --35.954723358 -32.334243774 -33.254276276 --35.670326233 -32.736049652 -33.678352356 -34.670322418 -32.736049652 -33.678352356 -34.954723358 -32.334243774 -33.254272461 --36.112342834 -31.405673981 -33.216842651 -35.112342834 -31.405673981 -33.216842651 --36.185966492 -30.460596085 -33.220722198 -35.185966492 -30.460596085 -33.220710754 --36.217510223 -29.486719131 -33.227870941 -35.217514038 -29.486715317 -33.227874756 --36.228954315 -28.496377945 -33.231601715 -35.228954315 -28.496374130 -33.231601715 --36.232299805 -27.499221802 -33.232868195 -35.232299805 -27.499221802 -33.232868195 --36.233074188 -26.499872208 -33.233165741 -35.233074188 -26.499872208 -33.233169556 --36.233207703 -25.499984741 -33.233219147 -35.233207703 -25.499986649 -33.233219147 --36.233222961 -24.500000000 -33.233222961 -35.233222961 -24.499998093 -33.233222961 --36.233222961 -23.500000000 -33.233222961 -35.233222961 -23.500000000 -33.233222961 --36.233222961 -22.500000000 -33.233222961 -35.233222961 -22.500000000 -33.233222961 --36.233222961 -21.500000000 -33.233222961 -35.233222961 -21.500000000 -33.233222961 --36.233222961 -20.500000000 -33.233222961 -35.233222961 -20.500000000 -33.233222961 --36.233222961 -19.500000000 -33.233222961 -35.233222961 -19.500000000 -33.233222961 --36.233222961 -18.500000000 -33.233222961 -35.233222961 -18.500000000 -33.233222961 --36.233222961 -17.500000000 -33.233222961 -35.233222961 -17.500000000 -33.233222961 --36.233222961 -16.500000000 -33.233222961 -35.233222961 -16.500000000 -33.233222961 --36.233222961 -15.500000000 -33.233222961 -35.233222961 -15.500000000 -33.233222961 --36.233222961 -14.500000000 -33.233222961 -35.233222961 -14.500000000 -33.233222961 --36.233222961 -13.500000000 -33.233222961 -35.233222961 -13.500000000 -33.233222961 --36.233222961 -12.500000000 -33.233222961 -35.233222961 -12.500000000 -33.233222961 --36.233222961 -11.500000000 -33.233222961 -35.233222961 -11.500000000 -33.233222961 --36.233222961 -10.500000000 -33.233222961 -35.233222961 -10.500000000 -33.233222961 --36.233222961 -9.500000000 -33.233222961 -35.233222961 -9.500000000 -33.233222961 --36.233222961 -8.500000000 -33.233222961 -35.233222961 -8.500000000 -33.233222961 --36.233222961 -7.500000000 -33.233222961 -35.233222961 -7.500000000 -33.233222961 --36.233222961 -6.500000000 -33.233222961 -35.233222961 -6.500000000 -33.233222961 --36.233222961 -5.500000000 -33.233222961 -35.233222961 -5.500000000 -33.233222961 --36.233222961 -4.500000000 -33.233222961 -35.233222961 -4.500000000 -33.233222961 --36.233222961 -3.500000000 -33.233222961 -35.233222961 -3.500000000 -33.233222961 --36.233222961 -2.500000000 -33.233222961 -35.233222961 -2.500000000 -33.233222961 --36.233222961 -1.500000000 -33.233222961 -35.233222961 -1.500000000 -33.233222961 --36.233222961 -0.500000000 -33.233222961 -35.233222961 -0.500000000 -33.233222961 --36.233222961 0.500000000 -33.233222961 -35.233222961 0.500000000 -33.233222961 --36.233222961 1.500000000 -33.233222961 -35.233222961 1.500000000 -33.233222961 --36.233222961 2.500000000 -33.233222961 -35.233222961 2.500000000 -33.233222961 --36.233222961 3.500000000 -33.233222961 -35.233222961 3.500000000 -33.233222961 --36.233222961 4.500000000 -33.233222961 -35.233222961 4.500000000 -33.233222961 --36.233222961 5.500000000 -33.233222961 -35.233222961 5.500000000 -33.233222961 --36.233222961 6.500000000 -33.233222961 -35.233222961 6.500000000 -33.233222961 --36.233222961 7.500000000 -33.233222961 -35.233222961 7.500000000 -33.233222961 --36.233222961 8.500000000 -33.233222961 -35.233222961 8.500000000 -33.233222961 --36.233222961 9.500000000 -33.233222961 -35.233222961 9.500000000 -33.233222961 --36.233222961 10.500000000 -33.233222961 -35.233222961 10.500000000 -33.233222961 --36.233222961 11.500000000 -33.233222961 -35.233222961 11.500000000 -33.233222961 --36.233222961 12.500000000 -33.233222961 -35.233222961 12.500000000 -33.233222961 --36.233222961 13.500000000 -33.233222961 -35.233222961 13.500000000 -33.233222961 --36.233222961 14.500000000 -33.233222961 -35.233222961 14.500000000 -33.233222961 --36.233222961 15.500000000 -33.233222961 -35.233222961 15.500000000 -33.233222961 --36.233222961 16.500000000 -33.233222961 -35.233222961 16.500000000 -33.233222961 --36.233222961 17.500000000 -33.233222961 -35.233222961 17.500000000 -33.233222961 --36.233222961 18.500000000 -33.233222961 -35.233222961 18.500000000 -33.233222961 --36.233222961 19.500000000 -33.233222961 -35.233222961 19.500000000 -33.233222961 --36.233222961 20.500000000 -33.233222961 -35.233222961 20.500000000 -33.233222961 --36.233222961 21.500000000 -33.233222961 -35.233222961 21.500000000 -33.233222961 --36.233222961 22.500000000 -33.233222961 -35.233222961 22.500000000 -33.233222961 --36.233222961 23.500000000 -33.233222961 -35.233222961 23.500000000 -33.233222961 --36.233207703 24.499984741 -33.233219147 -35.233207703 24.499984741 -33.233219147 --36.233074188 25.499872208 -33.233173370 -35.233074188 25.499872208 -33.233173370 --36.232299805 26.499225616 -33.232868195 -35.232299805 26.499225616 -33.232868195 --36.228958130 27.496377945 -33.231605530 -35.228958130 27.496377945 -33.231605530 --36.217517853 28.486719131 -33.227874756 -35.217514038 28.486719131 -33.227878571 --36.185966492 29.460596085 -33.220714569 -35.185966492 29.460596085 -33.220718384 --36.112346649 30.405673981 -33.216838837 -35.112342834 30.405673981 -33.216842651 --35.954723358 31.334243774 -33.254272461 --35.670326233 31.736053467 -33.678352356 -34.670326233 31.736053467 -33.678352356 -34.954723358 31.334243774 -33.254276276 --35.846317291 32.030693054 -33.030689240 --35.498538971 32.424442291 -33.424442291 -34.498538971 32.424442291 -33.424442291 -34.846313477 32.030693054 -33.030693054 --35.057430267 33.231872559 -33.316947937 --34.565628052 33.565628052 -33.672851562 -33.565624237 33.565631866 -33.672851562 -34.057430267 33.231872559 -33.316947937 --34.772159576 33.772163391 -33.006500244 --34.231872559 34.057430267 -33.316947937 --33.424438477 34.498542786 -33.424438477 --32.736049652 34.670326233 -33.678352356 -31.736051559 34.670326233 -33.678356171 -32.424438477 34.498546600 -33.424442291 -33.231868744 34.057430267 -33.316951752 -33.772159576 33.772163391 -33.006500244 --33.030693054 34.846313477 -33.030693054 --32.334243774 34.954723358 -33.254276276 --31.405670166 35.112342834 -33.216838837 --30.460596085 35.185966492 -33.220722198 --29.486719131 35.217517853 -33.227870941 --28.496377945 35.228958130 -33.231597900 --27.499225616 35.232299805 -33.232860565 --26.499874115 35.233074188 -33.233165741 --25.499984741 35.233203888 -33.233219147 --24.500000000 35.233222961 -33.233222961 --23.500000000 35.233222961 -33.233222961 --22.500000000 35.233222961 -33.233222961 --21.500000000 35.233222961 -33.233222961 --20.500000000 35.233222961 -33.233222961 --19.500000000 35.233222961 -33.233222961 --18.500000000 35.233222961 -33.233222961 --17.500000000 35.233222961 -33.233222961 --16.500000000 35.233222961 -33.233222961 --15.500000000 35.233222961 -33.233222961 --14.500000000 35.233222961 -33.233222961 --13.500000000 35.233222961 -33.233222961 --12.500000000 35.233222961 -33.233222961 --11.500000000 35.233222961 -33.233222961 --10.500000000 35.233222961 -33.233222961 --9.500000000 35.233222961 -33.233222961 --8.500000000 35.233222961 -33.233222961 --7.500000000 35.233222961 -33.233222961 --6.500000000 35.233222961 -33.233222961 --5.500000000 35.233222961 -33.233222961 --4.500000000 35.233222961 -33.233222961 --3.500000000 35.233222961 -33.233222961 --2.500000000 35.233222961 -33.233222961 --1.500000000 35.233222961 -33.233222961 --0.500000000 35.233222961 -33.233222961 -0.500000000 35.233222961 -33.233222961 -1.500000000 35.233222961 -33.233222961 -2.500000000 35.233222961 -33.233222961 -3.500000000 35.233222961 -33.233222961 -4.500000000 35.233222961 -33.233222961 -5.500000000 35.233222961 -33.233222961 -6.500000000 35.233222961 -33.233222961 -7.500000000 35.233222961 -33.233222961 -8.500000000 35.233222961 -33.233222961 -9.500000000 35.233222961 -33.233222961 -10.500000000 35.233222961 -33.233222961 -11.500000000 35.233222961 -33.233222961 -12.500000000 35.233222961 -33.233222961 -13.500000000 35.233222961 -33.233222961 -14.500000000 35.233222961 -33.233222961 -15.500000000 35.233222961 -33.233222961 -16.500000000 35.233222961 -33.233222961 -17.500000000 35.233222961 -33.233222961 -18.500000000 35.233222961 -33.233222961 -19.500000000 35.233222961 -33.233222961 -20.500000000 35.233222961 -33.233222961 -21.500000000 35.233222961 -33.233222961 -22.500000000 35.233222961 -33.233222961 -23.500000000 35.233222961 -33.233222961 -24.499984741 35.233203888 -33.233215332 -25.499872208 35.233074188 -33.233165741 -26.499225616 35.232299805 -33.232860565 -27.496377945 35.228958130 -33.231597900 -28.486719131 35.217517853 -33.227867126 -29.460596085 35.185966492 -33.220714569 -30.405673981 35.112346649 -33.216838837 -31.334243774 34.954723358 -33.254272461 -32.030693054 34.846317291 -33.030693054 --33.840934753 -35.797355652 -32.035343170 --33.254276276 -35.954723358 -32.334243774 --32.371025085 -36.154674530 -32.371025085 --31.438955307 -36.280395508 -32.375358582 --30.476007462 -36.344284058 -32.379222870 --29.492319107 -36.371490479 -32.382308960 --28.498050690 -36.381023407 -32.383811951 --27.499622345 -36.383720398 -32.384300232 --26.499948502 -36.384311676 -32.384403229 --25.499996185 -36.384407043 -32.384422302 --24.500000000 -36.384418488 -32.384422302 --23.500000000 -36.384418488 -32.384422302 --22.500000000 -36.384418488 -32.384422302 --21.500000000 -36.384418488 -32.384422302 --20.500000000 -36.384418488 -32.384422302 --19.500000000 -36.384418488 -32.384422302 --18.500000000 -36.384418488 -32.384422302 --17.500000000 -36.384418488 -32.384422302 --16.500000000 -36.384418488 -32.384422302 --15.500000000 -36.384418488 -32.384422302 --14.500000000 -36.384418488 -32.384422302 --13.500000000 -36.384418488 -32.384422302 --12.500000000 -36.384418488 -32.384422302 --11.500000000 -36.384418488 -32.384422302 --10.500000000 -36.384418488 -32.384422302 --9.500000000 -36.384418488 -32.384422302 --8.500000000 -36.384418488 -32.384422302 --7.500000000 -36.384418488 -32.384422302 --6.500000000 -36.384418488 -32.384422302 --5.500000000 -36.384418488 -32.384422302 --4.500000000 -36.384418488 -32.384422302 --3.500000000 -36.384418488 -32.384422302 --2.500000000 -36.384418488 -32.384422302 --1.500000000 -36.384418488 -32.384422302 --0.500000000 -36.384418488 -32.384422302 -0.500000000 -36.384418488 -32.384422302 -1.500000000 -36.384418488 -32.384422302 -2.500000000 -36.384418488 -32.384422302 -3.500000000 -36.384418488 -32.384422302 -4.500000000 -36.384418488 -32.384422302 -5.500000000 -36.384418488 -32.384422302 -6.500000000 -36.384418488 -32.384422302 -7.500000000 -36.384418488 -32.384422302 -8.500000000 -36.384418488 -32.384422302 -9.500000000 -36.384418488 -32.384422302 -10.500000000 -36.384418488 -32.384422302 -11.500000000 -36.384418488 -32.384422302 -12.500000000 -36.384418488 -32.384422302 -13.500000000 -36.384418488 -32.384422302 -14.500000000 -36.384418488 -32.384422302 -15.500000000 -36.384418488 -32.384422302 -16.500000000 -36.384418488 -32.384422302 -17.500000000 -36.384418488 -32.384422302 -18.500000000 -36.384418488 -32.384422302 -19.500000000 -36.384418488 -32.384422302 -20.500000000 -36.384418488 -32.384422302 -21.500000000 -36.384418488 -32.384422302 -22.500000000 -36.384418488 -32.384422302 -23.500000000 -36.384418488 -32.384422302 -24.499996185 -36.384407043 -32.384422302 -25.499948502 -36.384307861 -32.384407043 -26.499618530 -36.383720398 -32.384300232 -27.498050690 -36.381023407 -32.383811951 -28.492321014 -36.371490479 -32.382316589 -29.476007462 -36.344280243 -32.379222870 -30.438953400 -36.280391693 -32.375358582 -31.371026993 -36.154674530 -32.371025085 -32.254276276 -35.954723358 -32.334247589 -32.840934753 -35.797351837 -32.035343170 --34.912067413 -34.912067413 -32.313598633 --34.260543823 -35.428531647 -32.380737305 --33.678352356 -35.670322418 -32.736049652 -32.678356171 -35.670322418 -32.736053467 -33.260543823 -35.428531647 -32.380737305 -33.912067413 -34.912059784 -32.313602448 --35.797355652 -33.840934753 -32.035343170 --35.428531647 -34.260547638 -32.380737305 -34.428535461 -34.260540009 -32.380737305 -34.797355652 -33.840934753 -32.035343170 --35.954719543 -33.254276276 -32.334243774 --35.670322418 -33.678352356 -32.736053467 -34.670322418 -33.678352356 -32.736053467 -34.954723358 -33.254276276 -32.334243774 --36.154674530 -32.371025085 -32.371025085 -35.154674530 -32.371025085 -32.371025085 --36.280395508 -31.438953400 -32.375358582 -35.280395508 -31.438953400 -32.375358582 --36.344284058 -30.476003647 -32.379226685 -35.344284058 -30.476007462 -32.379222870 --36.371490479 -29.492319107 -32.382312775 -35.371490479 -29.492319107 -32.382308960 --36.381023407 -28.498050690 -32.383808136 -35.381023407 -28.498050690 -32.383811951 --36.383724213 -27.499622345 -32.384296417 -35.383720398 -27.499622345 -32.384300232 --36.384311676 -26.499948502 -32.384403229 -35.384311676 -26.499948502 -32.384403229 --36.384407043 -25.499996185 -32.384418488 -35.384407043 -25.499996185 -32.384422302 --36.384422302 -24.500000000 -32.384422302 -35.384418488 -24.500000000 -32.384422302 --36.384422302 -23.500000000 -32.384422302 -35.384418488 -23.500000000 -32.384422302 --36.384422302 -22.500000000 -32.384422302 -35.384418488 -22.500000000 -32.384422302 --36.384422302 -21.500000000 -32.384422302 -35.384418488 -21.500000000 -32.384422302 --36.384422302 -20.500000000 -32.384422302 -35.384418488 -20.500000000 -32.384422302 --36.384422302 -19.500000000 -32.384422302 -35.384418488 -19.500000000 -32.384422302 --36.384422302 -18.500000000 -32.384422302 -35.384418488 -18.500000000 -32.384422302 --36.384422302 -17.500000000 -32.384422302 -35.384418488 -17.500000000 -32.384422302 --36.384422302 -16.500000000 -32.384422302 -35.384418488 -16.500000000 -32.384422302 --36.384422302 -15.500000000 -32.384422302 -35.384418488 -15.500000000 -32.384422302 --36.384422302 -14.500000000 -32.384422302 -35.384418488 -14.500000000 -32.384422302 --36.384422302 -13.500000000 -32.384422302 -35.384418488 -13.500000000 -32.384422302 --36.384422302 -12.500000000 -32.384422302 -35.384418488 -12.500000000 -32.384422302 --36.384422302 -11.500000000 -32.384422302 -35.384418488 -11.500000000 -32.384422302 --36.384422302 -10.500000000 -32.384422302 -35.384418488 -10.500000000 -32.384422302 --36.384422302 -9.500000000 -32.384422302 -35.384418488 -9.500000000 -32.384422302 --36.384422302 -8.500000000 -32.384422302 -35.384418488 -8.500000000 -32.384422302 --36.384422302 -7.500000000 -32.384422302 -35.384418488 -7.500000000 -32.384422302 --36.384422302 -6.500000000 -32.384422302 -35.384418488 -6.500000000 -32.384422302 --36.384422302 -5.500000000 -32.384422302 -35.384418488 -5.500000000 -32.384422302 --36.384422302 -4.500000000 -32.384422302 -35.384418488 -4.500000000 -32.384422302 --36.384422302 -3.500000000 -32.384422302 -35.384418488 -3.500000000 -32.384422302 --36.384422302 -2.500000000 -32.384422302 -35.384418488 -2.500000000 -32.384422302 --36.384422302 -1.500000000 -32.384422302 -35.384418488 -1.500000000 -32.384422302 --36.384422302 -0.500000000 -32.384422302 -35.384418488 -0.500000000 -32.384422302 --36.384422302 0.500000000 -32.384422302 -35.384418488 0.500000000 -32.384422302 --36.384422302 1.500000000 -32.384422302 -35.384418488 1.500000000 -32.384422302 --36.384422302 2.500000000 -32.384422302 -35.384418488 2.500000000 -32.384422302 --36.384422302 3.500000000 -32.384422302 -35.384418488 3.500000000 -32.384422302 --36.384422302 4.500000000 -32.384422302 -35.384418488 4.500000000 -32.384422302 --36.384422302 5.500000000 -32.384422302 -35.384418488 5.500000000 -32.384422302 --36.384422302 6.500000000 -32.384422302 -35.384418488 6.500000000 -32.384422302 --36.384422302 7.500000000 -32.384422302 -35.384418488 7.500000000 -32.384422302 --36.384422302 8.500000000 -32.384422302 -35.384418488 8.500000000 -32.384422302 --36.384422302 9.500000000 -32.384422302 -35.384418488 9.500000000 -32.384422302 --36.384422302 10.500000000 -32.384422302 -35.384418488 10.500000000 -32.384422302 --36.384422302 11.500000000 -32.384422302 -35.384418488 11.500000000 -32.384422302 --36.384422302 12.500000000 -32.384422302 -35.384418488 12.500000000 -32.384422302 --36.384422302 13.500000000 -32.384422302 -35.384418488 13.500000000 -32.384422302 --36.384422302 14.500000000 -32.384422302 -35.384418488 14.500000000 -32.384422302 --36.384422302 15.500000000 -32.384422302 -35.384418488 15.500000000 -32.384422302 --36.384422302 16.500000000 -32.384422302 -35.384418488 16.500000000 -32.384422302 --36.384422302 17.500000000 -32.384422302 -35.384418488 17.500000000 -32.384422302 --36.384422302 18.500000000 -32.384422302 -35.384418488 18.500000000 -32.384422302 --36.384422302 19.500000000 -32.384422302 -35.384418488 19.500000000 -32.384422302 --36.384422302 20.500000000 -32.384422302 -35.384418488 20.500000000 -32.384422302 --36.384422302 21.500000000 -32.384422302 -35.384418488 21.500000000 -32.384422302 --36.384422302 22.500000000 -32.384422302 -35.384418488 22.500000000 -32.384422302 --36.384422302 23.500000000 -32.384422302 -35.384418488 23.500000000 -32.384422302 --36.384407043 24.499996185 -32.384418488 -35.384407043 24.499996185 -32.384422302 --36.384307861 25.499948502 -32.384403229 -35.384307861 25.499948502 -32.384407043 --36.383720398 26.499622345 -32.384300232 -35.383720398 26.499618530 -32.384300232 --36.381023407 27.498050690 -32.383811951 -35.381023407 27.498050690 -32.383811951 --36.371490479 28.492321014 -32.382312775 -35.371490479 28.492321014 -32.382316589 --36.344287872 29.476007462 -32.379222870 -35.344280243 29.476007462 -32.379222870 --36.280395508 30.438953400 -32.375358582 -35.280391693 30.438953400 -32.375358582 --36.154674530 31.371026993 -32.371025085 -35.154674530 31.371026993 -32.371025085 --35.954723358 32.254276276 -32.334243774 --35.670322418 32.678356171 -32.736049652 -34.670322418 32.678356171 -32.736053467 -34.954723358 32.254276276 -32.334247589 --35.797355652 32.840934753 -32.035339355 --35.428535461 33.260543823 -32.380737305 -34.428531647 33.260543823 -32.380737305 -34.797351837 32.840934753 -32.035343170 --34.912067413 33.912067413 -32.313598633 --34.260547638 34.428531647 -32.380737305 --33.678352356 34.670322418 -32.736053467 -32.678352356 34.670326233 -32.736053467 -33.260543823 34.428535461 -32.380737305 -33.912059784 33.912059784 -32.313602448 --33.840934753 34.797355652 -32.035343170 --33.254276276 34.954719543 -32.334243774 --32.371025085 35.154674530 -32.371025085 --31.438953400 35.280395508 -32.375358582 --30.476003647 35.344284058 -32.379226685 --29.492319107 35.371490479 -32.382312775 --28.498050690 35.381023407 -32.383808136 --27.499622345 35.383724213 -32.384296417 --26.499948502 35.384307861 -32.384403229 --25.499996185 35.384407043 -32.384418488 --24.500000000 35.384422302 -32.384422302 --23.500000000 35.384422302 -32.384422302 --22.500000000 35.384422302 -32.384422302 --21.500000000 35.384422302 -32.384422302 --20.500000000 35.384422302 -32.384422302 --19.500000000 35.384422302 -32.384422302 --18.500000000 35.384422302 -32.384422302 --17.500000000 35.384422302 -32.384422302 --16.500000000 35.384422302 -32.384422302 --15.500000000 35.384422302 -32.384422302 --14.500000000 35.384422302 -32.384422302 --13.500000000 35.384422302 -32.384422302 --12.500000000 35.384422302 -32.384422302 --11.500000000 35.384422302 -32.384422302 --10.500000000 35.384422302 -32.384422302 --9.500000000 35.384422302 -32.384422302 --8.500000000 35.384422302 -32.384422302 --7.500000000 35.384422302 -32.384422302 --6.500000000 35.384422302 -32.384422302 --5.500000000 35.384422302 -32.384422302 --4.500000000 35.384422302 -32.384422302 --3.500000000 35.384422302 -32.384422302 --2.500000000 35.384422302 -32.384422302 --1.500000000 35.384422302 -32.384422302 --0.500000000 35.384422302 -32.384422302 -0.500000000 35.384422302 -32.384422302 -1.500000000 35.384422302 -32.384422302 -2.500000000 35.384422302 -32.384422302 -3.500000000 35.384422302 -32.384422302 -4.500000000 35.384422302 -32.384422302 -5.500000000 35.384422302 -32.384422302 -6.500000000 35.384422302 -32.384422302 -7.500000000 35.384422302 -32.384422302 -8.500000000 35.384422302 -32.384422302 -9.500000000 35.384422302 -32.384422302 -10.500000000 35.384422302 -32.384422302 -11.500000000 35.384422302 -32.384422302 -12.500000000 35.384422302 -32.384422302 -13.500000000 35.384422302 -32.384422302 -14.500000000 35.384422302 -32.384422302 -15.500000000 35.384422302 -32.384422302 -16.500000000 35.384422302 -32.384422302 -17.500000000 35.384422302 -32.384422302 -18.500000000 35.384422302 -32.384422302 -19.500000000 35.384422302 -32.384422302 -20.500000000 35.384422302 -32.384422302 -21.500000000 35.384422302 -32.384422302 -22.500000000 35.384422302 -32.384422302 -23.500000000 35.384422302 -32.384422302 -24.499996185 35.384407043 -32.384418488 -25.499948502 35.384307861 -32.384403229 -26.499622345 35.383720398 -32.384296417 -27.498050690 35.381023407 -32.383808136 -28.492321014 35.371490479 -32.382312775 -29.476007462 35.344287872 -32.379222870 -30.438953400 35.280395508 -32.375358582 -31.371026993 35.154674530 -32.371025085 -32.254276276 34.954723358 -32.334243774 -32.840934753 34.797355652 -32.035346985 --33.903377533 -35.879737854 -31.336599350 --33.216838837 -36.112346649 -31.405673981 --32.375358582 -36.280395508 -31.438953400 --31.451217651 -36.380989075 -31.451221466 --30.483785629 -36.430477142 -31.456085205 --29.495611191 -36.450424194 -31.458078384 --28.499073029 -36.456939697 -31.458770752 --27.499858856 -36.458606720 -31.458948135 --26.499988556 -36.458930969 -31.458978653 --25.500000000 -36.458976746 -31.458980560 --24.500000000 -36.458976746 -31.458980560 --23.500000000 -36.458976746 -31.458980560 --22.500000000 -36.458976746 -31.458980560 --21.500000000 -36.458976746 -31.458980560 --20.500000000 -36.458976746 -31.458980560 --19.500000000 -36.458976746 -31.458980560 --18.500000000 -36.458976746 -31.458980560 --17.500000000 -36.458976746 -31.458980560 --16.500000000 -36.458976746 -31.458980560 --15.500000000 -36.458976746 -31.458980560 --14.500000000 -36.458976746 -31.458980560 --13.500000000 -36.458976746 -31.458980560 --12.500000000 -36.458976746 -31.458980560 --11.500000000 -36.458976746 -31.458980560 --10.500000000 -36.458976746 -31.458980560 --9.500000000 -36.458976746 -31.458980560 --8.500000000 -36.458976746 -31.458980560 --7.500000000 -36.458976746 -31.458980560 --6.500000000 -36.458976746 -31.458980560 --5.500000000 -36.458976746 -31.458980560 --4.500000000 -36.458976746 -31.458980560 --3.500000000 -36.458976746 -31.458980560 --2.500000000 -36.458976746 -31.458980560 --1.500000000 -36.458976746 -31.458980560 --0.500000000 -36.458976746 -31.458980560 -0.500000000 -36.458976746 -31.458980560 -1.500000000 -36.458976746 -31.458980560 -2.500000000 -36.458976746 -31.458980560 -3.500000000 -36.458976746 -31.458980560 -4.500000000 -36.458976746 -31.458980560 -5.500000000 -36.458976746 -31.458980560 -6.500000000 -36.458976746 -31.458980560 -7.500000000 -36.458976746 -31.458980560 -8.500000000 -36.458976746 -31.458980560 -9.500000000 -36.458976746 -31.458980560 -10.500000000 -36.458976746 -31.458980560 -11.500000000 -36.458976746 -31.458980560 -12.500000000 -36.458976746 -31.458980560 -13.500000000 -36.458976746 -31.458980560 -14.500000000 -36.458976746 -31.458980560 -15.500000000 -36.458976746 -31.458980560 -16.500000000 -36.458976746 -31.458980560 -17.500000000 -36.458976746 -31.458980560 -18.500000000 -36.458976746 -31.458980560 -19.500000000 -36.458976746 -31.458980560 -20.500000000 -36.458976746 -31.458980560 -21.500000000 -36.458976746 -31.458980560 -22.500000000 -36.458976746 -31.458980560 -23.500000000 -36.458976746 -31.458980560 -24.500000000 -36.458976746 -31.458980560 -25.499988556 -36.458930969 -31.458978653 -26.499858856 -36.458606720 -31.458948135 -27.499073029 -36.456939697 -31.458770752 -28.495611191 -36.450424194 -31.458080292 -29.483785629 -36.430473328 -31.456085205 -30.451217651 -36.380989075 -31.451217651 -31.375360489 -36.280395508 -31.438953400 -32.216842651 -36.112346649 -31.405673981 -32.903377533 -35.879737854 -31.336599350 --35.040893555 -35.040893555 -31.413045883 --34.477191925 -35.518615723 -31.410831451 -33.477191925 -35.518611908 -31.410831451 -34.040893555 -35.040893555 -31.413045883 --35.879737854 -33.903373718 -31.336599350 --35.518615723 -34.477191925 -31.410831451 -34.518611908 -34.477191925 -31.410831451 -34.879737854 -33.903381348 -31.336599350 --36.112342834 -33.216842651 -31.405673981 -35.112346649 -33.216842651 -31.405673981 --36.280395508 -32.375358582 -31.438953400 -35.280395508 -32.375358582 -31.438955307 --36.380989075 -31.451213837 -31.451221466 -35.380989075 -31.451217651 -31.451221466 --36.430477142 -30.483785629 -31.456089020 -35.430477142 -30.483785629 -31.456085205 --36.450424194 -29.495611191 -31.458080292 -35.450424194 -29.495611191 -31.458078384 --36.456943512 -28.499073029 -31.458770752 -35.456939697 -28.499073029 -31.458770752 --36.458606720 -27.499858856 -31.458948135 -35.458606720 -27.499858856 -31.458948135 --36.458934784 -26.499988556 -31.458974838 -35.458930969 -26.499988556 -31.458978653 --36.458976746 -25.500000000 -31.458978653 -35.458976746 -25.500000000 -31.458980560 --36.458984375 -24.500000000 -31.458978653 -35.458976746 -24.500000000 -31.458980560 --36.458984375 -23.500000000 -31.458978653 -35.458976746 -23.500000000 -31.458980560 --36.458984375 -22.500000000 -31.458978653 -35.458976746 -22.500000000 -31.458980560 --36.458984375 -21.500000000 -31.458978653 -35.458976746 -21.500000000 -31.458980560 --36.458984375 -20.500000000 -31.458978653 -35.458976746 -20.500000000 -31.458980560 --36.458984375 -19.500000000 -31.458978653 -35.458976746 -19.500000000 -31.458980560 --36.458984375 -18.500000000 -31.458978653 -35.458976746 -18.500000000 -31.458980560 --36.458984375 -17.500000000 -31.458978653 -35.458976746 -17.500000000 -31.458980560 --36.458984375 -16.500000000 -31.458978653 -35.458976746 -16.500000000 -31.458980560 --36.458984375 -15.500000000 -31.458978653 -35.458976746 -15.500000000 -31.458980560 --36.458984375 -14.500000000 -31.458978653 -35.458976746 -14.500000000 -31.458980560 --36.458984375 -13.500000000 -31.458978653 -35.458976746 -13.500000000 -31.458980560 --36.458984375 -12.500000000 -31.458978653 -35.458976746 -12.500000000 -31.458980560 --36.458984375 -11.500000000 -31.458978653 -35.458976746 -11.500000000 -31.458980560 --36.458984375 -10.500000000 -31.458978653 -35.458976746 -10.500000000 -31.458980560 --36.458984375 -9.500000000 -31.458978653 -35.458976746 -9.500000000 -31.458980560 --36.458984375 -8.500000000 -31.458978653 -35.458976746 -8.500000000 -31.458980560 --36.458984375 -7.500000000 -31.458978653 -35.458976746 -7.500000000 -31.458980560 --36.458984375 -6.500000000 -31.458978653 -35.458976746 -6.500000000 -31.458980560 --36.458984375 -5.500000000 -31.458978653 -35.458976746 -5.500000000 -31.458980560 --36.458984375 -4.500000000 -31.458978653 -35.458976746 -4.500000000 -31.458980560 --36.458984375 -3.500000000 -31.458978653 -35.458976746 -3.500000000 -31.458980560 --36.458984375 -2.500000000 -31.458978653 -35.458976746 -2.500000000 -31.458980560 --36.458984375 -1.500000000 -31.458978653 -35.458976746 -1.500000000 -31.458980560 --36.458984375 -0.500000000 -31.458978653 -35.458976746 -0.500000000 -31.458980560 --36.458984375 0.500000000 -31.458978653 -35.458976746 0.500000000 -31.458980560 --36.458984375 1.500000000 -31.458978653 -35.458976746 1.500000000 -31.458980560 --36.458984375 2.500000000 -31.458978653 -35.458976746 2.500000000 -31.458980560 --36.458984375 3.500000000 -31.458978653 -35.458976746 3.500000000 -31.458980560 --36.458984375 4.500000000 -31.458978653 -35.458976746 4.500000000 -31.458980560 --36.458984375 5.500000000 -31.458978653 -35.458976746 5.500000000 -31.458980560 --36.458984375 6.500000000 -31.458978653 -35.458976746 6.500000000 -31.458980560 --36.458984375 7.500000000 -31.458978653 -35.458976746 7.500000000 -31.458980560 --36.458984375 8.500000000 -31.458978653 -35.458976746 8.500000000 -31.458980560 --36.458984375 9.500000000 -31.458978653 -35.458976746 9.500000000 -31.458980560 --36.458984375 10.500000000 -31.458978653 -35.458976746 10.500000000 -31.458980560 --36.458984375 11.500000000 -31.458978653 -35.458976746 11.500000000 -31.458980560 --36.458984375 12.500000000 -31.458978653 -35.458976746 12.500000000 -31.458980560 --36.458984375 13.500000000 -31.458978653 -35.458976746 13.500000000 -31.458980560 --36.458984375 14.500000000 -31.458978653 -35.458976746 14.500000000 -31.458980560 --36.458984375 15.500000000 -31.458978653 -35.458976746 15.500000000 -31.458980560 --36.458984375 16.500000000 -31.458978653 -35.458976746 16.500000000 -31.458980560 --36.458984375 17.500000000 -31.458978653 -35.458976746 17.500000000 -31.458980560 --36.458984375 18.500000000 -31.458978653 -35.458976746 18.500000000 -31.458980560 --36.458984375 19.500000000 -31.458978653 -35.458976746 19.500000000 -31.458980560 --36.458984375 20.500000000 -31.458978653 -35.458976746 20.500000000 -31.458980560 --36.458984375 21.500000000 -31.458978653 -35.458976746 21.500000000 -31.458980560 --36.458984375 22.500000000 -31.458978653 -35.458976746 22.500000000 -31.458980560 --36.458984375 23.500000000 -31.458978653 -35.458976746 23.500000000 -31.458980560 --36.458976746 24.500000000 -31.458978653 -35.458976746 24.500000000 -31.458980560 --36.458934784 25.499988556 -31.458974838 -35.458930969 25.499988556 -31.458978653 --36.458606720 26.499858856 -31.458948135 -35.458606720 26.499858856 -31.458948135 --36.456943512 27.499073029 -31.458770752 -35.456939697 27.499073029 -31.458770752 --36.450424194 28.495611191 -31.458080292 -35.450424194 28.495611191 -31.458080292 --36.430473328 29.483785629 -31.456085205 -35.430473328 29.483785629 -31.456085205 --36.380989075 30.451217651 -31.451217651 -35.380989075 30.451217651 -31.451217651 --36.280395508 31.375360489 -31.438953400 -35.280395508 31.375360489 -31.438953400 --36.112342834 32.216842651 -31.405673981 -35.112346649 32.216842651 -31.405673981 --35.879737854 32.903377533 -31.336603165 --35.518615723 33.477191925 -31.410833359 -34.518611908 33.477191925 -31.410831451 -34.879737854 32.903377533 -31.336599350 --35.040893555 34.040893555 -31.413049698 --34.477191925 34.518615723 -31.410831451 -33.477191925 34.518611908 -31.410831451 -34.040893555 34.040893555 -31.413045883 --33.903373718 34.879737854 -31.336599350 --33.216842651 35.112342834 -31.405673981 --32.375358582 35.280395508 -31.438953400 --31.451213837 35.380989075 -31.451221466 --30.483785629 35.430477142 -31.456089020 --29.495611191 35.450424194 -31.458080292 --28.499073029 35.456943512 -31.458766937 --27.499858856 35.458606720 -31.458948135 --26.499988556 35.458934784 -31.458974838 --25.500000000 35.458976746 -31.458978653 --24.500000000 35.458984375 -31.458978653 --23.500000000 35.458984375 -31.458978653 --22.500000000 35.458984375 -31.458978653 --21.500000000 35.458984375 -31.458978653 --20.500000000 35.458984375 -31.458978653 --19.500000000 35.458984375 -31.458978653 --18.500000000 35.458984375 -31.458978653 --17.500000000 35.458984375 -31.458978653 --16.500000000 35.458984375 -31.458978653 --15.500000000 35.458984375 -31.458978653 --14.500000000 35.458984375 -31.458978653 --13.500000000 35.458984375 -31.458978653 --12.500000000 35.458984375 -31.458978653 --11.500000000 35.458984375 -31.458978653 --10.500000000 35.458984375 -31.458978653 --9.500000000 35.458984375 -31.458978653 --8.500000000 35.458984375 -31.458978653 --7.500000000 35.458984375 -31.458978653 --6.500000000 35.458984375 -31.458978653 --5.500000000 35.458984375 -31.458978653 --4.500000000 35.458984375 -31.458978653 --3.500000000 35.458984375 -31.458978653 --2.500000000 35.458984375 -31.458978653 --1.500000000 35.458984375 -31.458978653 --0.500000000 35.458984375 -31.458978653 -0.500000000 35.458984375 -31.458978653 -1.500000000 35.458984375 -31.458978653 -2.500000000 35.458984375 -31.458978653 -3.500000000 35.458984375 -31.458978653 -4.500000000 35.458984375 -31.458978653 -5.500000000 35.458984375 -31.458978653 -6.500000000 35.458984375 -31.458978653 -7.500000000 35.458984375 -31.458978653 -8.500000000 35.458984375 -31.458978653 -9.500000000 35.458984375 -31.458978653 -10.500000000 35.458984375 -31.458978653 -11.500000000 35.458984375 -31.458978653 -12.500000000 35.458984375 -31.458978653 -13.500000000 35.458984375 -31.458978653 -14.500000000 35.458984375 -31.458978653 -15.500000000 35.458984375 -31.458978653 -16.500000000 35.458984375 -31.458978653 -17.500000000 35.458984375 -31.458978653 -18.500000000 35.458984375 -31.458978653 -19.500000000 35.458984375 -31.458978653 -20.500000000 35.458984375 -31.458978653 -21.500000000 35.458984375 -31.458978653 -22.500000000 35.458984375 -31.458978653 -23.500000000 35.458984375 -31.458978653 -24.500000000 35.458976746 -31.458978653 -25.499988556 35.458934784 -31.458974838 -26.499858856 35.458606720 -31.458948135 -27.499073029 35.456943512 -31.458770752 -28.495611191 35.450424194 -31.458078384 -29.483785629 35.430473328 -31.456085205 -30.451217651 35.380989075 -31.451217651 -31.375356674 35.280395508 -31.438953400 -32.216842651 35.112342834 -31.405673981 -32.903377533 34.879737854 -31.336599350 --33.946811676 -35.937088013 -30.442840576 --33.220714569 -36.185966492 -30.460596085 --32.379222870 -36.344284058 -30.476007462 --31.456081390 -36.430477142 -30.483789444 --30.486965179 -36.469234467 -30.486968994 --29.496957779 -36.483337402 -30.488048553 --28.499475479 -36.487373352 -30.488346100 --27.499938965 -36.488258362 -30.488399506 --26.499996185 -36.488391876 -30.488403320 --25.500000000 -36.488403320 -30.488403320 --24.500000000 -36.488403320 -30.488403320 --23.500000000 -36.488403320 -30.488403320 --22.500000000 -36.488403320 -30.488403320 --21.500000000 -36.488403320 -30.488403320 --20.500000000 -36.488403320 -30.488403320 --19.500000000 -36.488403320 -30.488403320 --18.500000000 -36.488403320 -30.488403320 --17.500000000 -36.488403320 -30.488403320 --16.500000000 -36.488403320 -30.488403320 --15.500000000 -36.488403320 -30.488403320 --14.500000000 -36.488403320 -30.488403320 --13.500000000 -36.488403320 -30.488403320 --12.500000000 -36.488403320 -30.488403320 --11.500000000 -36.488403320 -30.488403320 --10.500000000 -36.488403320 -30.488403320 --9.500000000 -36.488403320 -30.488403320 --8.500000000 -36.488403320 -30.488403320 --7.500000000 -36.488403320 -30.488403320 --6.500000000 -36.488403320 -30.488403320 --5.500000000 -36.488403320 -30.488403320 --4.500000000 -36.488403320 -30.488403320 --3.500000000 -36.488403320 -30.488403320 --2.500000000 -36.488403320 -30.488403320 --1.500000000 -36.488403320 -30.488403320 --0.500000000 -36.488403320 -30.488403320 -0.500000000 -36.488403320 -30.488403320 -1.500000000 -36.488403320 -30.488403320 -2.500000000 -36.488403320 -30.488403320 -3.500000000 -36.488403320 -30.488403320 -4.500000000 -36.488403320 -30.488403320 -5.500000000 -36.488403320 -30.488403320 -6.500000000 -36.488403320 -30.488403320 -7.500000000 -36.488403320 -30.488403320 -8.500000000 -36.488403320 -30.488403320 -9.500000000 -36.488403320 -30.488403320 -10.500000000 -36.488403320 -30.488403320 -11.500000000 -36.488403320 -30.488403320 -12.500000000 -36.488403320 -30.488403320 -13.500000000 -36.488403320 -30.488403320 -14.500000000 -36.488403320 -30.488403320 -15.500000000 -36.488403320 -30.488403320 -16.500000000 -36.488403320 -30.488403320 -17.500000000 -36.488403320 -30.488403320 -18.500000000 -36.488403320 -30.488403320 -19.500000000 -36.488403320 -30.488403320 -20.500000000 -36.488403320 -30.488403320 -21.500000000 -36.488403320 -30.488403320 -22.500000000 -36.488403320 -30.488403320 -23.500000000 -36.488403320 -30.488403320 -24.500000000 -36.488403320 -30.488403320 -25.499996185 -36.488391876 -30.488403320 -26.499938965 -36.488258362 -30.488399506 -27.499475479 -36.487373352 -30.488346100 -28.496959686 -36.483337402 -30.488048553 -29.486968994 -36.469234467 -30.486968994 -30.456085205 -36.430473328 -30.483785629 -31.379222870 -36.344284058 -30.476003647 -32.220714569 -36.185966492 -30.460596085 -32.946811676 -35.937088013 -30.442840576 --35.115646362 -35.115638733 -30.463253021 --34.567749023 -35.577739716 -30.459178925 -33.567749023 -35.577739716 -30.459178925 -34.115646362 -35.115642548 -30.463253021 --35.937091827 -33.946807861 -30.442840576 --35.577739716 -34.567741394 -30.459177017 -34.577739716 -34.567749023 -30.459177017 -34.937091827 -33.946811676 -30.442840576 --36.185966492 -33.220714569 -30.460592270 -35.185966492 -33.220714569 -30.460596085 --36.344284058 -32.379222870 -30.476007462 -35.344284058 -32.379222870 -30.476007462 --36.430477142 -31.456085205 -30.483789444 -35.430477142 -31.456081390 -30.483789444 --36.469234467 -30.486968994 -30.486968994 -35.469234467 -30.486965179 -30.486968994 --36.483337402 -29.496957779 -30.488048553 -35.483337402 -29.496957779 -30.488048553 --36.487373352 -28.499475479 -30.488342285 -35.487373352 -28.499475479 -30.488346100 --36.488258362 -27.499938965 -30.488397598 -35.488258362 -27.499938965 -30.488399506 --36.488391876 -26.499996185 -30.488403320 -35.488391876 -26.499996185 -30.488403320 --36.488410950 -25.500000000 -30.488403320 -35.488403320 -25.500000000 -30.488403320 --36.488410950 -24.500000000 -30.488403320 -35.488403320 -24.500000000 -30.488403320 --36.488410950 -23.500000000 -30.488403320 -35.488403320 -23.500000000 -30.488403320 --36.488410950 -22.500000000 -30.488403320 -35.488403320 -22.500000000 -30.488403320 --36.488410950 -21.500000000 -30.488403320 -35.488403320 -21.500000000 -30.488403320 --36.488410950 -20.500000000 -30.488403320 -35.488403320 -20.500000000 -30.488403320 --36.488410950 -19.500000000 -30.488403320 -35.488403320 -19.500000000 -30.488403320 --36.488410950 -18.500000000 -30.488403320 -35.488403320 -18.500000000 -30.488403320 --36.488410950 -17.500000000 -30.488403320 -35.488403320 -17.500000000 -30.488403320 --36.488410950 -16.500000000 -30.488403320 -35.488403320 -16.500000000 -30.488403320 --36.488410950 -15.500000000 -30.488403320 -35.488403320 -15.500000000 -30.488403320 --36.488410950 -14.500000000 -30.488403320 -35.488403320 -14.500000000 -30.488403320 --36.488410950 -13.500000000 -30.488403320 -35.488403320 -13.500000000 -30.488403320 --36.488410950 -12.500000000 -30.488403320 -35.488403320 -12.500000000 -30.488403320 --36.488410950 -11.500000000 -30.488403320 -35.488403320 -11.500000000 -30.488403320 --36.488410950 -10.500000000 -30.488403320 -35.488403320 -10.500000000 -30.488403320 --36.488410950 -9.500000000 -30.488403320 -35.488403320 -9.500000000 -30.488403320 --36.488410950 -8.500000000 -30.488403320 -35.488403320 -8.500000000 -30.488403320 --36.488410950 -7.500000000 -30.488403320 -35.488403320 -7.500000000 -30.488403320 --36.488410950 -6.500000000 -30.488403320 -35.488403320 -6.500000000 -30.488403320 --36.488410950 -5.500000000 -30.488403320 -35.488403320 -5.500000000 -30.488403320 --36.488410950 -4.500000000 -30.488403320 -35.488403320 -4.500000000 -30.488403320 --36.488410950 -3.500000000 -30.488403320 -35.488403320 -3.500000000 -30.488403320 --36.488410950 -2.500000000 -30.488403320 -35.488403320 -2.500000000 -30.488403320 --36.488410950 -1.500000000 -30.488403320 -35.488403320 -1.500000000 -30.488403320 --36.488410950 -0.500000000 -30.488403320 -35.488403320 -0.500000000 -30.488403320 --36.488410950 0.500000000 -30.488403320 -35.488403320 0.500000000 -30.488403320 --36.488410950 1.500000000 -30.488403320 -35.488403320 1.500000000 -30.488403320 --36.488410950 2.500000000 -30.488403320 -35.488403320 2.500000000 -30.488403320 --36.488410950 3.500000000 -30.488403320 -35.488403320 3.500000000 -30.488403320 --36.488410950 4.500000000 -30.488403320 -35.488403320 4.500000000 -30.488403320 --36.488410950 5.500000000 -30.488403320 -35.488403320 5.500000000 -30.488403320 --36.488410950 6.500000000 -30.488403320 -35.488403320 6.500000000 -30.488403320 --36.488410950 7.500000000 -30.488403320 -35.488403320 7.500000000 -30.488403320 --36.488410950 8.500000000 -30.488403320 -35.488403320 8.500000000 -30.488403320 --36.488410950 9.500000000 -30.488403320 -35.488403320 9.500000000 -30.488403320 --36.488410950 10.500000000 -30.488403320 -35.488403320 10.500000000 -30.488403320 --36.488410950 11.500000000 -30.488403320 -35.488403320 11.500000000 -30.488403320 --36.488410950 12.500000000 -30.488403320 -35.488403320 12.500000000 -30.488403320 --36.488410950 13.500000000 -30.488403320 -35.488403320 13.500000000 -30.488403320 --36.488410950 14.500000000 -30.488403320 -35.488403320 14.500000000 -30.488403320 --36.488410950 15.500000000 -30.488403320 -35.488403320 15.500000000 -30.488403320 --36.488410950 16.500000000 -30.488403320 -35.488403320 16.500000000 -30.488403320 --36.488410950 17.500000000 -30.488403320 -35.488403320 17.500000000 -30.488403320 --36.488410950 18.500000000 -30.488403320 -35.488403320 18.500000000 -30.488403320 --36.488410950 19.500000000 -30.488403320 -35.488403320 19.500000000 -30.488403320 --36.488410950 20.500000000 -30.488403320 -35.488403320 20.500000000 -30.488403320 --36.488410950 21.500000000 -30.488403320 -35.488403320 21.500000000 -30.488403320 --36.488410950 22.500000000 -30.488403320 -35.488403320 22.500000000 -30.488403320 --36.488410950 23.500000000 -30.488403320 -35.488403320 23.500000000 -30.488403320 --36.488410950 24.500000000 -30.488403320 -35.488403320 24.500000000 -30.488403320 --36.488395691 25.499996185 -30.488403320 -35.488391876 25.499996185 -30.488403320 --36.488258362 26.499938965 -30.488399506 -35.488258362 26.499938965 -30.488399506 --36.487373352 27.499477386 -30.488346100 -35.487373352 27.499475479 -30.488346100 --36.483337402 28.496959686 -30.488052368 -35.483337402 28.496959686 -30.488048553 --36.469234467 29.486968994 -30.486968994 -35.469234467 29.486968994 -30.486968994 --36.430477142 30.456085205 -30.483785629 -35.430473328 30.456085205 -30.483785629 --36.344284058 31.379222870 -30.476007462 -35.344284058 31.379222870 -30.476003647 --36.185966492 32.220714569 -30.460596085 -35.185966492 32.220714569 -30.460596085 --35.937088013 32.946811676 -30.442840576 --35.577739716 33.567749023 -30.459178925 -34.577739716 33.567749023 -30.459178925 -34.937088013 32.946811676 -30.442840576 --35.115638733 34.115646362 -30.463253021 --34.567741394 34.577739716 -30.459177017 -33.567749023 34.577739716 -30.459177017 -34.115642548 34.115646362 -30.463253021 --33.946807861 34.937091827 -30.442840576 --33.220714569 35.185966492 -30.460592270 --32.379222870 35.344284058 -30.476007462 --31.456085205 35.430477142 -30.483789444 --30.486968994 35.469234467 -30.486968994 --29.496957779 35.483337402 -30.488048553 --28.499475479 35.487373352 -30.488342285 --27.499938965 35.488258362 -30.488397598 --26.499996185 35.488391876 -30.488403320 --25.500000000 35.488410950 -30.488403320 --24.500000000 35.488410950 -30.488403320 --23.500000000 35.488410950 -30.488403320 --22.500000000 35.488410950 -30.488403320 --21.500000000 35.488410950 -30.488403320 --20.500000000 35.488410950 -30.488403320 --19.500000000 35.488410950 -30.488403320 --18.500000000 35.488410950 -30.488403320 --17.500000000 35.488410950 -30.488403320 --16.500000000 35.488410950 -30.488403320 --15.500000000 35.488410950 -30.488403320 --14.500000000 35.488410950 -30.488403320 --13.500000000 35.488410950 -30.488403320 --12.500000000 35.488410950 -30.488403320 --11.500000000 35.488410950 -30.488403320 --10.500000000 35.488410950 -30.488403320 --9.500000000 35.488410950 -30.488403320 --8.500000000 35.488410950 -30.488403320 --7.500000000 35.488410950 -30.488403320 --6.500000000 35.488410950 -30.488403320 --5.500000000 35.488410950 -30.488403320 --4.500000000 35.488410950 -30.488403320 --3.500000000 35.488410950 -30.488403320 --2.500000000 35.488410950 -30.488403320 --1.500000000 35.488410950 -30.488403320 --0.500000000 35.488410950 -30.488403320 -0.500000000 35.488410950 -30.488403320 -1.500000000 35.488410950 -30.488403320 -2.500000000 35.488410950 -30.488403320 -3.500000000 35.488410950 -30.488403320 -4.500000000 35.488410950 -30.488403320 -5.500000000 35.488410950 -30.488403320 -6.500000000 35.488410950 -30.488403320 -7.500000000 35.488410950 -30.488403320 -8.500000000 35.488410950 -30.488403320 -9.500000000 35.488410950 -30.488403320 -10.500000000 35.488410950 -30.488403320 -11.500000000 35.488410950 -30.488403320 -12.500000000 35.488410950 -30.488403320 -13.500000000 35.488410950 -30.488403320 -14.500000000 35.488410950 -30.488403320 -15.500000000 35.488410950 -30.488403320 -16.500000000 35.488410950 -30.488403320 -17.500000000 35.488410950 -30.488403320 -18.500000000 35.488410950 -30.488403320 -19.500000000 35.488410950 -30.488403320 -20.500000000 35.488410950 -30.488403320 -21.500000000 35.488410950 -30.488403320 -22.500000000 35.488410950 -30.488403320 -23.500000000 35.488410950 -30.488403320 -24.500000000 35.488410950 -30.488403320 -25.499996185 35.488395691 -30.488403320 -26.499938965 35.488258362 -30.488399506 -27.499477386 35.487373352 -30.488346100 -28.496959686 35.483337402 -30.488052368 -29.486968994 35.469234467 -30.486968994 -30.456081390 35.430477142 -30.483785629 -31.379222870 35.344284058 -30.476007462 -32.220714569 35.185966492 -30.460596085 -32.946811676 34.937091827 -30.442840576 --33.968864441 -35.965759277 -29.481937408 --33.227874756 -36.217510223 -29.486719131 --32.382308960 -36.371490479 -29.492319107 --31.458078384 -36.450428009 -29.495611191 --30.488048553 -36.483337402 -29.496957779 --29.497375488 -36.494174957 -29.497371674 --28.499578476 -36.496910095 -29.497461319 --27.499954224 -36.497409821 -29.497470856 --26.499996185 -36.497467041 -29.497470856 --25.500000000 -36.497474670 -29.497470856 --24.500000000 -36.497474670 -29.497470856 --23.500000000 -36.497474670 -29.497470856 --22.500000000 -36.497474670 -29.497470856 --21.500000000 -36.497474670 -29.497470856 --20.500000000 -36.497474670 -29.497470856 --19.500000000 -36.497474670 -29.497470856 --18.500000000 -36.497474670 -29.497470856 --17.500000000 -36.497474670 -29.497470856 --16.500000000 -36.497474670 -29.497470856 --15.500000000 -36.497474670 -29.497470856 --14.500000000 -36.497474670 -29.497470856 --13.500000000 -36.497474670 -29.497470856 --12.500000000 -36.497474670 -29.497470856 --11.500000000 -36.497474670 -29.497470856 --10.500000000 -36.497474670 -29.497470856 --9.500000000 -36.497474670 -29.497470856 --8.500000000 -36.497474670 -29.497470856 --7.500000000 -36.497474670 -29.497470856 --6.500000000 -36.497474670 -29.497470856 --5.500000000 -36.497474670 -29.497470856 --4.500000000 -36.497474670 -29.497470856 --3.500000000 -36.497474670 -29.497470856 --2.500000000 -36.497474670 -29.497470856 --1.500000000 -36.497474670 -29.497470856 --0.500000000 -36.497474670 -29.497470856 -0.500000000 -36.497474670 -29.497470856 -1.500000000 -36.497474670 -29.497470856 -2.500000000 -36.497474670 -29.497470856 -3.500000000 -36.497474670 -29.497470856 -4.500000000 -36.497474670 -29.497470856 -5.500000000 -36.497474670 -29.497470856 -6.500000000 -36.497474670 -29.497470856 -7.500000000 -36.497474670 -29.497470856 -8.500000000 -36.497474670 -29.497470856 -9.500000000 -36.497474670 -29.497470856 -10.500000000 -36.497474670 -29.497470856 -11.500000000 -36.497474670 -29.497470856 -12.500000000 -36.497474670 -29.497470856 -13.500000000 -36.497474670 -29.497470856 -14.500000000 -36.497474670 -29.497470856 -15.500000000 -36.497474670 -29.497470856 -16.500000000 -36.497474670 -29.497470856 -17.500000000 -36.497474670 -29.497470856 -18.500000000 -36.497474670 -29.497470856 -19.500000000 -36.497474670 -29.497470856 -20.500000000 -36.497474670 -29.497470856 -21.500000000 -36.497474670 -29.497470856 -22.500000000 -36.497474670 -29.497470856 -23.500000000 -36.497474670 -29.497470856 -24.500000000 -36.497474670 -29.497470856 -25.499996185 -36.497470856 -29.497470856 -26.499954224 -36.497406006 -29.497470856 -27.499576569 -36.496910095 -29.497461319 -28.497375488 -36.494174957 -29.497375488 -29.488048553 -36.483337402 -29.496957779 -30.458078384 -36.450428009 -29.495611191 -31.382314682 -36.371490479 -29.492319107 -32.227874756 -36.217510223 -29.486715317 -32.968864441 -35.965759277 -29.481933594 --35.150257111 -35.150249481 -29.487293243 --34.604709625 -35.607204437 -29.485630035 -33.604705811 -35.607208252 -29.485630035 -34.150257111 -35.150253296 -29.487289429 --35.965759277 -33.968864441 -29.481937408 --35.607208252 -34.604705811 -29.485630035 -34.607208252 -34.604709625 -29.485630035 -34.965766907 -33.968864441 -29.481937408 --36.217514038 -33.227867126 -29.486715317 -35.217514038 -33.227874756 -29.486719131 --36.371490479 -32.382312775 -29.492319107 -35.371490479 -32.382308960 -29.492319107 --36.450428009 -31.458074570 -29.495611191 -35.450428009 -31.458078384 -29.495611191 --36.483337402 -30.488048553 -29.496957779 -35.483337402 -30.488048553 -29.496957779 --36.494171143 -29.497375488 -29.497371674 -35.494174957 -29.497375488 -29.497371674 --36.496910095 -28.499578476 -29.497457504 -35.496910095 -28.499578476 -29.497461319 --36.497406006 -27.499954224 -29.497470856 -35.497409821 -27.499954224 -29.497470856 --36.497467041 -26.499996185 -29.497470856 -35.497467041 -26.499996185 -29.497470856 --36.497470856 -25.500000000 -29.497470856 -35.497474670 -25.500000000 -29.497470856 --36.497470856 -24.500000000 -29.497470856 -35.497474670 -24.500000000 -29.497470856 --36.497470856 -23.500000000 -29.497470856 -35.497474670 -23.500000000 -29.497470856 --36.497470856 -22.500000000 -29.497470856 -35.497474670 -22.500000000 -29.497470856 --36.497470856 -21.500000000 -29.497470856 -35.497474670 -21.500000000 -29.497470856 --36.497470856 -20.500000000 -29.497470856 -35.497474670 -20.500000000 -29.497470856 --36.497470856 -19.500000000 -29.497470856 -35.497474670 -19.500000000 -29.497470856 --36.497470856 -18.500000000 -29.497470856 -35.497474670 -18.500000000 -29.497470856 --36.497470856 -17.500000000 -29.497470856 -35.497474670 -17.500000000 -29.497470856 --36.497470856 -16.500000000 -29.497470856 -35.497474670 -16.500000000 -29.497470856 --36.497470856 -15.500000000 -29.497470856 -35.497474670 -15.500000000 -29.497470856 --36.497470856 -14.500000000 -29.497470856 -35.497474670 -14.500000000 -29.497470856 --36.497470856 -13.500000000 -29.497470856 -35.497474670 -13.500000000 -29.497470856 --36.497470856 -12.500000000 -29.497470856 -35.497474670 -12.500000000 -29.497470856 --36.497470856 -11.500000000 -29.497470856 -35.497474670 -11.500000000 -29.497470856 --36.497470856 -10.500000000 -29.497470856 -35.497474670 -10.500000000 -29.497470856 --36.497470856 -9.500000000 -29.497470856 -35.497474670 -9.500000000 -29.497470856 --36.497470856 -8.500000000 -29.497470856 -35.497474670 -8.500000000 -29.497470856 --36.497470856 -7.500000000 -29.497470856 -35.497474670 -7.500000000 -29.497470856 --36.497470856 -6.500000000 -29.497470856 -35.497474670 -6.500000000 -29.497470856 --36.497470856 -5.500000000 -29.497470856 -35.497474670 -5.500000000 -29.497470856 --36.497470856 -4.500000000 -29.497470856 -35.497474670 -4.500000000 -29.497470856 --36.497470856 -3.500000000 -29.497470856 -35.497474670 -3.500000000 -29.497470856 --36.497470856 -2.500000000 -29.497470856 -35.497474670 -2.500000000 -29.497470856 --36.497470856 -1.500000000 -29.497470856 -35.497474670 -1.500000000 -29.497470856 --36.497470856 -0.500000000 -29.497470856 -35.497474670 -0.500000000 -29.497470856 --36.497470856 0.500000000 -29.497470856 -35.497474670 0.500000000 -29.497470856 --36.497470856 1.500000000 -29.497470856 -35.497474670 1.500000000 -29.497470856 --36.497470856 2.500000000 -29.497470856 -35.497474670 2.500000000 -29.497470856 --36.497470856 3.500000000 -29.497470856 -35.497474670 3.500000000 -29.497470856 --36.497470856 4.500000000 -29.497470856 -35.497474670 4.500000000 -29.497470856 --36.497470856 5.500000000 -29.497470856 -35.497474670 5.500000000 -29.497470856 --36.497470856 6.500000000 -29.497470856 -35.497474670 6.500000000 -29.497470856 --36.497470856 7.500000000 -29.497470856 -35.497474670 7.500000000 -29.497470856 --36.497470856 8.500000000 -29.497470856 -35.497474670 8.500000000 -29.497470856 --36.497470856 9.500000000 -29.497470856 -35.497474670 9.500000000 -29.497470856 --36.497470856 10.500000000 -29.497470856 -35.497474670 10.500000000 -29.497470856 --36.497470856 11.500000000 -29.497470856 -35.497474670 11.500000000 -29.497470856 --36.497470856 12.500000000 -29.497470856 -35.497474670 12.500000000 -29.497470856 --36.497470856 13.500000000 -29.497470856 -35.497474670 13.500000000 -29.497470856 --36.497470856 14.500000000 -29.497470856 -35.497474670 14.500000000 -29.497470856 --36.497470856 15.500000000 -29.497470856 -35.497474670 15.500000000 -29.497470856 --36.497470856 16.500000000 -29.497470856 -35.497474670 16.500000000 -29.497470856 --36.497470856 17.500000000 -29.497470856 -35.497474670 17.500000000 -29.497470856 --36.497470856 18.500000000 -29.497470856 -35.497474670 18.500000000 -29.497470856 --36.497470856 19.500000000 -29.497470856 -35.497474670 19.500000000 -29.497470856 --36.497470856 20.500000000 -29.497470856 -35.497474670 20.500000000 -29.497470856 --36.497470856 21.500000000 -29.497470856 -35.497474670 21.500000000 -29.497470856 --36.497470856 22.500000000 -29.497470856 -35.497474670 22.500000000 -29.497470856 --36.497470856 23.500000000 -29.497470856 -35.497474670 23.500000000 -29.497470856 --36.497470856 24.500000000 -29.497470856 -35.497474670 24.500000000 -29.497470856 --36.497467041 25.499996185 -29.497470856 -35.497470856 25.499996185 -29.497470856 --36.497409821 26.499954224 -29.497470856 -35.497406006 26.499954224 -29.497470856 --36.496910095 27.499576569 -29.497461319 -35.496910095 27.499576569 -29.497461319 --36.494174957 28.497371674 -29.497371674 -35.494174957 28.497375488 -29.497375488 --36.483337402 29.488048553 -29.496959686 -35.483337402 29.488048553 -29.496957779 --36.450428009 30.458074570 -29.495611191 -35.450428009 30.458078384 -29.495611191 --36.371490479 31.382312775 -29.492319107 -35.371490479 31.382314682 -29.492319107 --36.217514038 32.227874756 -29.486715317 -35.217510223 32.227874756 -29.486715317 --35.965759277 32.968864441 -29.481937408 --35.607204437 33.604709625 -29.485630035 -34.607208252 33.604709625 -29.485630035 -34.965759277 32.968864441 -29.481933594 --35.150249481 34.150257111 -29.487293243 --34.604705811 34.607208252 -29.485630035 -33.604709625 34.607208252 -29.485630035 -34.150253296 34.150257111 -29.487289429 --33.968864441 34.965759277 -29.481937408 --33.227867126 35.217514038 -29.486715317 --32.382312775 35.371490479 -29.492319107 --31.458074570 35.450428009 -29.495611191 --30.488048553 35.483337402 -29.496957779 --29.497375488 35.494171143 -29.497371674 --28.499578476 35.496910095 -29.497457504 --27.499954224 35.497406006 -29.497470856 --26.499996185 35.497467041 -29.497470856 --25.500000000 35.497470856 -29.497470856 --24.500000000 35.497470856 -29.497470856 --23.500000000 35.497470856 -29.497470856 --22.500000000 35.497470856 -29.497470856 --21.500000000 35.497470856 -29.497470856 --20.500000000 35.497470856 -29.497470856 --19.500000000 35.497470856 -29.497470856 --18.500000000 35.497470856 -29.497470856 --17.500000000 35.497470856 -29.497470856 --16.500000000 35.497470856 -29.497470856 --15.500000000 35.497470856 -29.497470856 --14.500000000 35.497470856 -29.497470856 --13.500000000 35.497470856 -29.497470856 --12.500000000 35.497470856 -29.497470856 --11.500000000 35.497470856 -29.497470856 --10.500000000 35.497470856 -29.497470856 --9.500000000 35.497470856 -29.497470856 --8.500000000 35.497470856 -29.497470856 --7.500000000 35.497470856 -29.497470856 --6.500000000 35.497470856 -29.497470856 --5.500000000 35.497470856 -29.497470856 --4.500000000 35.497470856 -29.497470856 --3.500000000 35.497470856 -29.497470856 --2.500000000 35.497470856 -29.497470856 --1.500000000 35.497470856 -29.497470856 --0.500000000 35.497470856 -29.497470856 -0.500000000 35.497470856 -29.497470856 -1.500000000 35.497470856 -29.497470856 -2.500000000 35.497470856 -29.497470856 -3.500000000 35.497470856 -29.497470856 -4.500000000 35.497470856 -29.497470856 -5.500000000 35.497470856 -29.497470856 -6.500000000 35.497470856 -29.497470856 -7.500000000 35.497470856 -29.497470856 -8.500000000 35.497470856 -29.497470856 -9.500000000 35.497470856 -29.497470856 -10.500000000 35.497470856 -29.497470856 -11.500000000 35.497470856 -29.497470856 -12.500000000 35.497470856 -29.497470856 -13.500000000 35.497470856 -29.497470856 -14.500000000 35.497470856 -29.497470856 -15.500000000 35.497470856 -29.497470856 -16.500000000 35.497470856 -29.497470856 -17.500000000 35.497470856 -29.497470856 -18.500000000 35.497470856 -29.497470856 -19.500000000 35.497470856 -29.497470856 -20.500000000 35.497470856 -29.497470856 -21.500000000 35.497470856 -29.497470856 -22.500000000 35.497470856 -29.497470856 -23.500000000 35.497470856 -29.497470856 -24.500000000 35.497470856 -29.497470856 -25.499996185 35.497467041 -29.497470856 -26.499954224 35.497409821 -29.497470856 -27.499576569 35.496910095 -29.497461319 -28.497371674 35.494174957 -29.497371674 -29.488048553 35.483337402 -29.496959686 -30.458074570 35.450428009 -29.495611191 -31.382312775 35.371490479 -29.492319107 -32.227874756 35.217514038 -29.486719131 -32.968864441 34.965766907 -29.481937408 --33.977611542 -35.976860046 -28.495168686 --33.231597900 -36.228954315 -28.496379852 --32.383808136 -36.381023407 -28.498052597 --31.458766937 -36.456939697 -28.499073029 --30.488346100 -36.487373352 -28.499475479 --29.497461319 -36.496910095 -28.499576569 --28.499593735 -36.499164581 -28.499591827 --27.499954224 -36.499546051 -28.499591827 --26.499996185 -36.499588013 -28.499591827 --25.500000000 -36.499591827 -28.499591827 --24.500000000 -36.499591827 -28.499591827 --23.500000000 -36.499591827 -28.499591827 --22.500000000 -36.499591827 -28.499591827 --21.500000000 -36.499591827 -28.499591827 --20.500000000 -36.499591827 -28.499591827 --19.500000000 -36.499591827 -28.499591827 --18.500000000 -36.499591827 -28.499591827 --17.500000000 -36.499591827 -28.499591827 --16.500000000 -36.499591827 -28.499591827 --15.500000000 -36.499591827 -28.499591827 --14.500000000 -36.499591827 -28.499591827 --13.500000000 -36.499591827 -28.499591827 --12.500000000 -36.499591827 -28.499591827 --11.500000000 -36.499591827 -28.499591827 --10.500000000 -36.499591827 -28.499591827 --9.500000000 -36.499591827 -28.499591827 --8.500000000 -36.499591827 -28.499591827 --7.500000000 -36.499591827 -28.499591827 --6.500000000 -36.499591827 -28.499591827 --5.500000000 -36.499591827 -28.499591827 --4.500000000 -36.499591827 -28.499591827 --3.500000000 -36.499591827 -28.499591827 --2.500000000 -36.499591827 -28.499591827 --1.500000000 -36.499591827 -28.499591827 --0.500000000 -36.499591827 -28.499591827 -0.500000000 -36.499591827 -28.499591827 -1.500000000 -36.499591827 -28.499591827 -2.500000000 -36.499591827 -28.499591827 -3.500000000 -36.499591827 -28.499591827 -4.500000000 -36.499591827 -28.499591827 -5.500000000 -36.499591827 -28.499591827 -6.500000000 -36.499591827 -28.499591827 -7.500000000 -36.499591827 -28.499591827 -8.500000000 -36.499591827 -28.499591827 -9.500000000 -36.499591827 -28.499591827 -10.500000000 -36.499591827 -28.499591827 -11.500000000 -36.499591827 -28.499591827 -12.500000000 -36.499591827 -28.499591827 -13.500000000 -36.499591827 -28.499591827 -14.500000000 -36.499591827 -28.499591827 -15.500000000 -36.499591827 -28.499591827 -16.500000000 -36.499591827 -28.499591827 -17.500000000 -36.499591827 -28.499591827 -18.500000000 -36.499591827 -28.499591827 -19.500000000 -36.499591827 -28.499591827 -20.500000000 -36.499591827 -28.499591827 -21.500000000 -36.499591827 -28.499591827 -22.500000000 -36.499591827 -28.499591827 -23.500000000 -36.499591827 -28.499591827 -24.500000000 -36.499591827 -28.499591827 -25.499996185 -36.499588013 -28.499591827 -26.499954224 -36.499542236 -28.499591827 -27.499591827 -36.499160767 -28.499591827 -28.497461319 -36.496910095 -28.499576569 -29.488346100 -36.487377167 -28.499475479 -30.458766937 -36.456939697 -28.499073029 -31.383810043 -36.381023407 -28.498050690 -32.231601715 -36.228954315 -28.496377945 -32.977619171 -35.976860046 -28.495168686 --35.163158417 -35.163154602 -28.496557236 --34.617927551 -35.618598938 -28.496026993 -33.617927551 -35.618602753 -28.496026993 -34.163158417 -35.163158417 -28.496557236 --35.976860046 -33.977611542 -28.495172501 --35.618602753 -34.617927551 -28.496026993 -34.618602753 -34.617931366 -28.496026993 -34.976860046 -33.977619171 -28.495172501 --36.228954315 -33.231597900 -28.496377945 -35.228958130 -33.231601715 -28.496379852 --36.381023407 -32.383808136 -28.498052597 -35.381023407 -32.383808136 -28.498052597 --36.456939697 -31.458766937 -28.499073029 -35.456939697 -31.458766937 -28.499073029 --36.487373352 -30.488346100 -28.499475479 -35.487373352 -30.488346100 -28.499475479 --36.496910095 -29.497461319 -28.499576569 -35.496910095 -29.497461319 -28.499576569 --36.499160767 -28.499593735 -28.499591827 -35.499164581 -28.499593735 -28.499591827 --36.499546051 -27.499954224 -28.499591827 -35.499546051 -27.499954224 -28.499591827 --36.499588013 -26.499996185 -28.499591827 -35.499588013 -26.499996185 -28.499591827 --36.499595642 -25.500000000 -28.499591827 -35.499591827 -25.500000000 -28.499591827 --36.499595642 -24.500000000 -28.499591827 -35.499591827 -24.500000000 -28.499591827 --36.499595642 -23.500000000 -28.499591827 -35.499591827 -23.500000000 -28.499591827 --36.499595642 -22.500000000 -28.499591827 -35.499591827 -22.500000000 -28.499591827 --36.499595642 -21.500000000 -28.499591827 -35.499591827 -21.500000000 -28.499591827 --36.499595642 -20.500000000 -28.499591827 -35.499591827 -20.500000000 -28.499591827 --36.499595642 -19.500000000 -28.499591827 -35.499591827 -19.500000000 -28.499591827 --36.499595642 -18.500000000 -28.499591827 -35.499591827 -18.500000000 -28.499591827 --36.499595642 -17.500000000 -28.499591827 -35.499591827 -17.500000000 -28.499591827 --36.499595642 -16.500000000 -28.499591827 -35.499591827 -16.500000000 -28.499591827 --36.499595642 -15.500000000 -28.499591827 -35.499591827 -15.500000000 -28.499591827 --36.499595642 -14.500000000 -28.499591827 -35.499591827 -14.500000000 -28.499591827 --36.499595642 -13.500000000 -28.499591827 -35.499591827 -13.500000000 -28.499591827 --36.499595642 -12.500000000 -28.499591827 -35.499591827 -12.500000000 -28.499591827 --36.499595642 -11.500000000 -28.499591827 -35.499591827 -11.500000000 -28.499591827 --36.499595642 -10.500000000 -28.499591827 -35.499591827 -10.500000000 -28.499591827 --36.499595642 -9.500000000 -28.499591827 -35.499591827 -9.500000000 -28.499591827 --36.499595642 -8.500000000 -28.499591827 -35.499591827 -8.500000000 -28.499591827 --36.499595642 -7.500000000 -28.499591827 -35.499591827 -7.500000000 -28.499591827 --36.499595642 -6.500000000 -28.499591827 -35.499591827 -6.500000000 -28.499591827 --36.499595642 -5.500000000 -28.499591827 -35.499591827 -5.500000000 -28.499591827 --36.499595642 -4.500000000 -28.499591827 -35.499591827 -4.500000000 -28.499591827 --36.499595642 -3.500000000 -28.499591827 -35.499591827 -3.500000000 -28.499591827 --36.499595642 -2.500000000 -28.499591827 -35.499591827 -2.500000000 -28.499591827 --36.499595642 -1.500000000 -28.499591827 -35.499591827 -1.500000000 -28.499591827 --36.499595642 -0.500000000 -28.499591827 -35.499591827 -0.500000000 -28.499591827 --36.499595642 0.500000000 -28.499591827 -35.499591827 0.500000000 -28.499591827 --36.499595642 1.500000000 -28.499591827 -35.499591827 1.500000000 -28.499591827 --36.499595642 2.500000000 -28.499591827 -35.499591827 2.500000000 -28.499591827 --36.499595642 3.500000000 -28.499591827 -35.499591827 3.500000000 -28.499591827 --36.499595642 4.500000000 -28.499591827 -35.499591827 4.500000000 -28.499591827 --36.499595642 5.500000000 -28.499591827 -35.499591827 5.500000000 -28.499591827 --36.499595642 6.500000000 -28.499591827 -35.499591827 6.500000000 -28.499591827 --36.499595642 7.500000000 -28.499591827 -35.499591827 7.500000000 -28.499591827 --36.499595642 8.500000000 -28.499591827 -35.499591827 8.500000000 -28.499591827 --36.499595642 9.500000000 -28.499591827 -35.499591827 9.500000000 -28.499591827 --36.499595642 10.500000000 -28.499591827 -35.499591827 10.500000000 -28.499591827 --36.499595642 11.500000000 -28.499591827 -35.499591827 11.500000000 -28.499591827 --36.499595642 12.500000000 -28.499591827 -35.499591827 12.500000000 -28.499591827 --36.499595642 13.500000000 -28.499591827 -35.499591827 13.500000000 -28.499591827 --36.499595642 14.500000000 -28.499591827 -35.499591827 14.500000000 -28.499591827 --36.499595642 15.500000000 -28.499591827 -35.499591827 15.500000000 -28.499591827 --36.499595642 16.500000000 -28.499591827 -35.499591827 16.500000000 -28.499591827 --36.499595642 17.500000000 -28.499591827 -35.499591827 17.500000000 -28.499591827 --36.499595642 18.500000000 -28.499591827 -35.499591827 18.500000000 -28.499591827 --36.499595642 19.500000000 -28.499591827 -35.499591827 19.500000000 -28.499591827 --36.499595642 20.500000000 -28.499591827 -35.499591827 20.500000000 -28.499591827 --36.499595642 21.500000000 -28.499591827 -35.499591827 21.500000000 -28.499591827 --36.499595642 22.500000000 -28.499591827 -35.499591827 22.500000000 -28.499591827 --36.499595642 23.500000000 -28.499591827 -35.499591827 23.500000000 -28.499591827 --36.499595642 24.500000000 -28.499591827 -35.499591827 24.500000000 -28.499591827 --36.499588013 25.499996185 -28.499591827 -35.499588013 25.499996185 -28.499591827 --36.499546051 26.499954224 -28.499591827 -35.499542236 26.499954224 -28.499591827 --36.499160767 27.499591827 -28.499591827 -35.499160767 27.499591827 -28.499591827 --36.496910095 28.497461319 -28.499576569 -35.496910095 28.497461319 -28.499576569 --36.487373352 29.488346100 -28.499475479 -35.487377167 29.488346100 -28.499475479 --36.456939697 30.458766937 -28.499073029 -35.456939697 30.458766937 -28.499073029 --36.381031036 31.383810043 -28.498050690 -35.381023407 31.383810043 -28.498050690 --36.228958130 32.231597900 -28.496377945 -35.228954315 32.231601715 -28.496377945 --35.976860046 32.977611542 -28.495168686 --35.618598938 33.617927551 -28.496026993 -34.618602753 33.617927551 -28.496026993 -34.976860046 32.977619171 -28.495168686 --35.163154602 34.163158417 -28.496557236 --34.617927551 34.618602753 -28.496026993 -33.617927551 34.618602753 -28.496026993 -34.163158417 34.163158417 -28.496557236 --33.977611542 34.976860046 -28.495172501 --33.231597900 35.228954315 -28.496377945 --32.383808136 35.381023407 -28.498052597 --31.458766937 35.456939697 -28.499073029 --30.488346100 35.487373352 -28.499475479 --29.497461319 35.496910095 -28.499576569 --28.499593735 35.499160767 -28.499591827 --27.499954224 35.499546051 -28.499591827 --26.499996185 35.499588013 -28.499591827 --25.500000000 35.499595642 -28.499591827 --24.500000000 35.499595642 -28.499591827 --23.500000000 35.499595642 -28.499591827 --22.500000000 35.499595642 -28.499591827 --21.500000000 35.499595642 -28.499591827 --20.500000000 35.499595642 -28.499591827 --19.500000000 35.499595642 -28.499591827 --18.500000000 35.499595642 -28.499591827 --17.500000000 35.499595642 -28.499591827 --16.500000000 35.499595642 -28.499591827 --15.500000000 35.499595642 -28.499591827 --14.500000000 35.499595642 -28.499591827 --13.500000000 35.499595642 -28.499591827 --12.500000000 35.499595642 -28.499591827 --11.500000000 35.499595642 -28.499591827 --10.500000000 35.499595642 -28.499591827 --9.500000000 35.499595642 -28.499591827 --8.500000000 35.499595642 -28.499591827 --7.500000000 35.499595642 -28.499591827 --6.500000000 35.499595642 -28.499591827 --5.500000000 35.499595642 -28.499591827 --4.500000000 35.499595642 -28.499591827 --3.500000000 35.499595642 -28.499591827 --2.500000000 35.499595642 -28.499591827 --1.500000000 35.499595642 -28.499591827 --0.500000000 35.499595642 -28.499591827 -0.500000000 35.499595642 -28.499591827 -1.500000000 35.499595642 -28.499591827 -2.500000000 35.499595642 -28.499591827 -3.500000000 35.499595642 -28.499591827 -4.500000000 35.499595642 -28.499591827 -5.500000000 35.499595642 -28.499591827 -6.500000000 35.499595642 -28.499591827 -7.500000000 35.499595642 -28.499591827 -8.500000000 35.499595642 -28.499591827 -9.500000000 35.499595642 -28.499591827 -10.500000000 35.499595642 -28.499591827 -11.500000000 35.499595642 -28.499591827 -12.500000000 35.499595642 -28.499591827 -13.500000000 35.499595642 -28.499591827 -14.500000000 35.499595642 -28.499591827 -15.500000000 35.499595642 -28.499591827 -16.500000000 35.499595642 -28.499591827 -17.500000000 35.499595642 -28.499591827 -18.500000000 35.499595642 -28.499591827 -19.500000000 35.499595642 -28.499591827 -20.500000000 35.499595642 -28.499591827 -21.500000000 35.499595642 -28.499591827 -22.500000000 35.499595642 -28.499591827 -23.500000000 35.499595642 -28.499591827 -24.500000000 35.499595642 -28.499591827 -25.499996185 35.499588013 -28.499591827 -26.499954224 35.499546051 -28.499591827 -27.499591827 35.499160767 -28.499591827 -28.497461319 35.496910095 -28.499576569 -29.488346100 35.487373352 -28.499475479 -30.458766937 35.456939697 -28.499073029 -31.383810043 35.381031036 -28.498050690 -32.231597900 35.228958130 -28.496377945 -32.977619171 34.976860046 -28.495172501 --33.980331421 -35.980201721 -27.498950958 --33.232864380 -36.232307434 -27.499221802 --32.384296417 -36.383720398 -27.499622345 --31.458948135 -36.458606720 -27.499858856 --30.488399506 -36.488258362 -27.499938965 --29.497472763 -36.497406006 -27.499954224 --28.499593735 -36.499549866 -27.499954224 --27.499954224 -36.499908447 -27.499954224 --26.499996185 -36.499950409 -27.499954224 --25.500000000 -36.499954224 -27.499954224 --24.500000000 -36.499954224 -27.499954224 --23.500000000 -36.499954224 -27.499954224 --22.500000000 -36.499954224 -27.499954224 --21.500000000 -36.499954224 -27.499954224 --20.500000000 -36.499954224 -27.499954224 --19.500000000 -36.499954224 -27.499954224 --18.500000000 -36.499954224 -27.499954224 --17.500000000 -36.499954224 -27.499954224 --16.500000000 -36.499954224 -27.499954224 --15.500000000 -36.499954224 -27.499954224 --14.500000000 -36.499954224 -27.499954224 --13.500000000 -36.499954224 -27.499954224 --12.500000000 -36.499954224 -27.499954224 --11.500000000 -36.499954224 -27.499954224 --10.500000000 -36.499954224 -27.499954224 --9.500000000 -36.499954224 -27.499954224 --8.500000000 -36.499954224 -27.499954224 --7.500000000 -36.499954224 -27.499954224 --6.500000000 -36.499954224 -27.499954224 --5.500000000 -36.499954224 -27.499954224 --4.500000000 -36.499954224 -27.499954224 --3.500000000 -36.499954224 -27.499954224 --2.500000000 -36.499954224 -27.499954224 --1.500000000 -36.499954224 -27.499954224 --0.500000000 -36.499954224 -27.499954224 -0.500000000 -36.499954224 -27.499954224 -1.500000000 -36.499954224 -27.499954224 -2.500000000 -36.499954224 -27.499954224 -3.500000000 -36.499954224 -27.499954224 -4.500000000 -36.499954224 -27.499954224 -5.500000000 -36.499954224 -27.499954224 -6.500000000 -36.499954224 -27.499954224 -7.500000000 -36.499954224 -27.499954224 -8.500000000 -36.499954224 -27.499954224 -9.500000000 -36.499954224 -27.499954224 -10.500000000 -36.499954224 -27.499954224 -11.500000000 -36.499954224 -27.499954224 -12.500000000 -36.499954224 -27.499954224 -13.500000000 -36.499954224 -27.499954224 -14.500000000 -36.499954224 -27.499954224 -15.500000000 -36.499954224 -27.499954224 -16.500000000 -36.499954224 -27.499954224 -17.500000000 -36.499954224 -27.499954224 -18.500000000 -36.499954224 -27.499954224 -19.500000000 -36.499954224 -27.499954224 -20.500000000 -36.499954224 -27.499954224 -21.500000000 -36.499954224 -27.499954224 -22.500000000 -36.499954224 -27.499954224 -23.500000000 -36.499954224 -27.499954224 -24.500000000 -36.499954224 -27.499954224 -25.499996185 -36.499950409 -27.499954224 -26.499954224 -36.499908447 -27.499954224 -27.499591827 -36.499546051 -27.499954224 -28.497470856 -36.497413635 -27.499954224 -29.488399506 -36.488258362 -27.499938965 -30.458948135 -36.458606720 -27.499858856 -31.384298325 -36.383720398 -27.499618530 -32.232864380 -36.232307434 -27.499225616 -32.980331421 -35.980201721 -27.498950958 --35.166961670 -35.166954041 -27.499298096 --34.621795654 -35.621982574 -27.499156952 -33.621795654 -35.621982574 -27.499156952 -34.166957855 -35.166961670 -27.499298096 --35.980201721 -33.980335236 -27.498950958 --35.621982574 -34.621795654 -27.499156952 -34.621982574 -34.621795654 -27.499156952 -34.980201721 -33.980331421 -27.498950958 --36.232307434 -33.232864380 -27.499221802 -35.232307434 -33.232864380 -27.499221802 --36.383720398 -32.384300232 -27.499622345 -35.383720398 -32.384296417 -27.499622345 --36.458606720 -31.458948135 -27.499858856 -35.458606720 -31.458948135 -27.499858856 --36.488258362 -30.488399506 -27.499938965 -35.488258362 -30.488399506 -27.499938965 --36.497406006 -29.497472763 -27.499954224 -35.497406006 -29.497472763 -27.499954224 --36.499546051 -28.499593735 -27.499954224 -35.499549866 -28.499593735 -27.499954224 --36.499908447 -27.499954224 -27.499954224 -35.499908447 -27.499954224 -27.499954224 --36.499950409 -26.499996185 -27.499954224 -35.499950409 -26.499996185 -27.499954224 --36.499954224 -25.500000000 -27.499954224 -35.499954224 -25.500000000 -27.499954224 --36.499954224 -24.500000000 -27.499954224 -35.499954224 -24.500000000 -27.499954224 --36.499954224 -23.500000000 -27.499954224 -35.499954224 -23.500000000 -27.499954224 --36.499954224 -22.500000000 -27.499954224 -35.499954224 -22.500000000 -27.499954224 --36.499954224 -21.500000000 -27.499954224 -35.499954224 -21.500000000 -27.499954224 --36.499954224 -20.500000000 -27.499954224 -35.499954224 -20.500000000 -27.499954224 --36.499954224 -19.500000000 -27.499954224 -35.499954224 -19.500000000 -27.499954224 --36.499954224 -18.500000000 -27.499954224 -35.499954224 -18.500000000 -27.499954224 --36.499954224 -17.500000000 -27.499954224 -35.499954224 -17.500000000 -27.499954224 --36.499954224 -16.500000000 -27.499954224 -35.499954224 -16.500000000 -27.499954224 --36.499954224 -15.500000000 -27.499954224 -35.499954224 -15.500000000 -27.499954224 --36.499954224 -14.500000000 -27.499954224 -35.499954224 -14.500000000 -27.499954224 --36.499954224 -13.500000000 -27.499954224 -35.499954224 -13.500000000 -27.499954224 --36.499954224 -12.500000000 -27.499954224 -35.499954224 -12.500000000 -27.499954224 --36.499954224 -11.500000000 -27.499954224 -35.499954224 -11.500000000 -27.499954224 --36.499954224 -10.500000000 -27.499954224 -35.499954224 -10.500000000 -27.499954224 --36.499954224 -9.500000000 -27.499954224 -35.499954224 -9.500000000 -27.499954224 --36.499954224 -8.500000000 -27.499954224 -35.499954224 -8.500000000 -27.499954224 --36.499954224 -7.500000000 -27.499954224 -35.499954224 -7.500000000 -27.499954224 --36.499954224 -6.500000000 -27.499954224 -35.499954224 -6.500000000 -27.499954224 --36.499954224 -5.500000000 -27.499954224 -35.499954224 -5.500000000 -27.499954224 --36.499954224 -4.500000000 -27.499954224 -35.499954224 -4.500000000 -27.499954224 --36.499954224 -3.500000000 -27.499954224 -35.499954224 -3.500000000 -27.499954224 --36.499954224 -2.500000000 -27.499954224 -35.499954224 -2.500000000 -27.499954224 --36.499954224 -1.500000000 -27.499954224 -35.499954224 -1.500000000 -27.499954224 --36.499954224 -0.500000000 -27.499954224 -35.499954224 -0.500000000 -27.499954224 --36.499954224 0.500000000 -27.499954224 -35.499954224 0.500000000 -27.499954224 --36.499954224 1.500000000 -27.499954224 -35.499954224 1.500000000 -27.499954224 --36.499954224 2.500000000 -27.499954224 -35.499954224 2.500000000 -27.499954224 --36.499954224 3.500000000 -27.499954224 -35.499954224 3.500000000 -27.499954224 --36.499954224 4.500000000 -27.499954224 -35.499954224 4.500000000 -27.499954224 --36.499954224 5.500000000 -27.499954224 -35.499954224 5.500000000 -27.499954224 --36.499954224 6.500000000 -27.499954224 -35.499954224 6.500000000 -27.499954224 --36.499954224 7.500000000 -27.499954224 -35.499954224 7.500000000 -27.499954224 --36.499954224 8.500000000 -27.499954224 -35.499954224 8.500000000 -27.499954224 --36.499954224 9.500000000 -27.499954224 -35.499954224 9.500000000 -27.499954224 --36.499954224 10.500000000 -27.499954224 -35.499954224 10.500000000 -27.499954224 --36.499954224 11.500000000 -27.499954224 -35.499954224 11.500000000 -27.499954224 --36.499954224 12.500000000 -27.499954224 -35.499954224 12.500000000 -27.499954224 --36.499954224 13.500000000 -27.499954224 -35.499954224 13.500000000 -27.499954224 --36.499954224 14.500000000 -27.499954224 -35.499954224 14.500000000 -27.499954224 --36.499954224 15.500000000 -27.499954224 -35.499954224 15.500000000 -27.499954224 --36.499954224 16.500000000 -27.499954224 -35.499954224 16.500000000 -27.499954224 --36.499954224 17.500000000 -27.499954224 -35.499954224 17.500000000 -27.499954224 --36.499954224 18.500000000 -27.499954224 -35.499954224 18.500000000 -27.499954224 --36.499954224 19.500000000 -27.499954224 -35.499954224 19.500000000 -27.499954224 --36.499954224 20.500000000 -27.499954224 -35.499954224 20.500000000 -27.499954224 --36.499954224 21.500000000 -27.499954224 -35.499954224 21.500000000 -27.499954224 --36.499954224 22.500000000 -27.499954224 -35.499954224 22.500000000 -27.499954224 --36.499954224 23.500000000 -27.499954224 -35.499954224 23.500000000 -27.499954224 --36.499954224 24.500000000 -27.499954224 -35.499954224 24.500000000 -27.499954224 --36.499950409 25.499996185 -27.499954224 -35.499950409 25.499996185 -27.499954224 --36.499908447 26.499954224 -27.499954224 -35.499908447 26.499954224 -27.499954224 --36.499542236 27.499591827 -27.499954224 -35.499546051 27.499591827 -27.499954224 --36.497406006 28.497470856 -27.499954224 -35.497413635 28.497470856 -27.499954224 --36.488258362 29.488399506 -27.499938965 -35.488258362 29.488399506 -27.499938965 --36.458606720 30.458948135 -27.499858856 -35.458606720 30.458948135 -27.499858856 --36.383720398 31.384298325 -27.499618530 -35.383720398 31.384298325 -27.499618530 --36.232307434 32.232864380 -27.499225616 -35.232307434 32.232864380 -27.499225616 --35.980201721 32.980331421 -27.498950958 --35.621982574 33.621795654 -27.499156952 -34.621982574 33.621795654 -27.499156952 -34.980201721 32.980331421 -27.498950958 --35.166954041 34.166961670 -27.499298096 --34.621795654 34.621982574 -27.499156952 -33.621795654 34.621982574 -27.499156952 -34.166961670 34.166957855 -27.499298096 --33.980335236 34.980201721 -27.498950958 --33.232864380 35.232307434 -27.499221802 --32.384300232 35.383720398 -27.499622345 --31.458948135 35.458606720 -27.499858856 --30.488399506 35.488258362 -27.499938965 --29.497472763 35.497406006 -27.499954224 --28.499593735 35.499546051 -27.499954224 --27.499954224 35.499908447 -27.499954224 --26.499996185 35.499950409 -27.499954224 --25.500000000 35.499954224 -27.499954224 --24.500000000 35.499954224 -27.499954224 --23.500000000 35.499954224 -27.499954224 --22.500000000 35.499954224 -27.499954224 --21.500000000 35.499954224 -27.499954224 --20.500000000 35.499954224 -27.499954224 --19.500000000 35.499954224 -27.499954224 --18.500000000 35.499954224 -27.499954224 --17.500000000 35.499954224 -27.499954224 --16.500000000 35.499954224 -27.499954224 --15.500000000 35.499954224 -27.499954224 --14.500000000 35.499954224 -27.499954224 --13.500000000 35.499954224 -27.499954224 --12.500000000 35.499954224 -27.499954224 --11.500000000 35.499954224 -27.499954224 --10.500000000 35.499954224 -27.499954224 --9.500000000 35.499954224 -27.499954224 --8.500000000 35.499954224 -27.499954224 --7.500000000 35.499954224 -27.499954224 --6.500000000 35.499954224 -27.499954224 --5.500000000 35.499954224 -27.499954224 --4.500000000 35.499954224 -27.499954224 --3.500000000 35.499954224 -27.499954224 --2.500000000 35.499954224 -27.499954224 --1.500000000 35.499954224 -27.499954224 --0.500000000 35.499954224 -27.499954224 -0.500000000 35.499954224 -27.499954224 -1.500000000 35.499954224 -27.499954224 -2.500000000 35.499954224 -27.499954224 -3.500000000 35.499954224 -27.499954224 -4.500000000 35.499954224 -27.499954224 -5.500000000 35.499954224 -27.499954224 -6.500000000 35.499954224 -27.499954224 -7.500000000 35.499954224 -27.499954224 -8.500000000 35.499954224 -27.499954224 -9.500000000 35.499954224 -27.499954224 -10.500000000 35.499954224 -27.499954224 -11.500000000 35.499954224 -27.499954224 -12.500000000 35.499954224 -27.499954224 -13.500000000 35.499954224 -27.499954224 -14.500000000 35.499954224 -27.499954224 -15.500000000 35.499954224 -27.499954224 -16.500000000 35.499954224 -27.499954224 -17.500000000 35.499954224 -27.499954224 -18.500000000 35.499954224 -27.499954224 -19.500000000 35.499954224 -27.499954224 -20.500000000 35.499954224 -27.499954224 -21.500000000 35.499954224 -27.499954224 -22.500000000 35.499954224 -27.499954224 -23.500000000 35.499954224 -27.499954224 -24.500000000 35.499954224 -27.499954224 -25.499996185 35.499950409 -27.499954224 -26.499954224 35.499908447 -27.499954224 -27.499591827 35.499542236 -27.499954224 -28.497470856 35.497406006 -27.499954224 -29.488399506 35.488258362 -27.499938965 -30.458948135 35.458606720 -27.499858856 -31.384298325 35.383720398 -27.499618530 -32.232864380 35.232307434 -27.499225616 -32.980327606 34.980201721 -27.498950958 --33.980976105 -35.980957031 -26.499826431 --33.233165741 -36.233070374 -26.499872208 --32.384403229 -36.384307861 -26.499948502 --31.458978653 -36.458930969 -26.499988556 --30.488407135 -36.488384247 -26.499996185 --29.497472763 -36.497467041 -26.499996185 --28.499593735 -36.499591827 -26.499996185 --27.499954224 -36.499950409 -26.499996185 --26.499996185 -36.499992371 -26.499996185 --25.500000000 -36.499996185 -26.499996185 --24.500000000 -36.499996185 -26.499996185 --23.500000000 -36.499996185 -26.499996185 --22.500000000 -36.499996185 -26.499996185 --21.500000000 -36.499996185 -26.499996185 --20.500000000 -36.499996185 -26.499996185 --19.500000000 -36.499996185 -26.499996185 --18.500000000 -36.499996185 -26.499996185 --17.500000000 -36.499996185 -26.499996185 --16.500000000 -36.499996185 -26.499996185 --15.500000000 -36.499996185 -26.499996185 --14.500000000 -36.499996185 -26.499996185 --13.500000000 -36.499996185 -26.499996185 --12.500000000 -36.499996185 -26.499996185 --11.500000000 -36.499996185 -26.499996185 --10.500000000 -36.499996185 -26.499996185 --9.500000000 -36.499996185 -26.499996185 --8.500000000 -36.499996185 -26.499996185 --7.500000000 -36.499996185 -26.499996185 --6.500000000 -36.499996185 -26.499996185 --5.500000000 -36.499996185 -26.499996185 --4.500000000 -36.499996185 -26.499996185 --3.500000000 -36.499996185 -26.499996185 --2.500000000 -36.499996185 -26.499996185 --1.500000000 -36.499996185 -26.499996185 --0.500000000 -36.499996185 -26.499996185 -0.500000000 -36.499996185 -26.499996185 -1.500000000 -36.499996185 -26.499996185 -2.500000000 -36.499996185 -26.499996185 -3.500000000 -36.499996185 -26.499996185 -4.500000000 -36.499996185 -26.499996185 -5.500000000 -36.499996185 -26.499996185 -6.500000000 -36.499996185 -26.499996185 -7.500000000 -36.499996185 -26.499996185 -8.500000000 -36.499996185 -26.499996185 -9.500000000 -36.499996185 -26.499996185 -10.500000000 -36.499996185 -26.499996185 -11.500000000 -36.499996185 -26.499996185 -12.500000000 -36.499996185 -26.499996185 -13.500000000 -36.499996185 -26.499996185 -14.500000000 -36.499996185 -26.499996185 -15.500000000 -36.499996185 -26.499996185 -16.500000000 -36.499996185 -26.499996185 -17.500000000 -36.499996185 -26.499996185 -18.500000000 -36.499996185 -26.499996185 -19.500000000 -36.499996185 -26.499996185 -20.500000000 -36.499996185 -26.499996185 -21.500000000 -36.499996185 -26.499996185 -22.500000000 -36.499996185 -26.499996185 -23.500000000 -36.499996185 -26.499996185 -24.500000000 -36.499996185 -26.499996185 -25.499996185 -36.499992371 -26.499996185 -26.499954224 -36.499950409 -26.499996185 -27.499591827 -36.499591827 -26.499996185 -28.497470856 -36.497467041 -26.499996185 -29.488407135 -36.488391876 -26.499996185 -30.458978653 -36.458934784 -26.499988556 -31.384403229 -36.384307861 -26.499948502 -32.233165741 -36.233070374 -26.499872208 -32.980972290 -35.980960846 -26.499826431 --35.167808533 -35.167808533 -26.499902725 --34.622692108 -35.622734070 -26.499868393 -33.622695923 -35.622734070 -26.499868393 -34.167808533 -35.167808533 -26.499902725 --35.980957031 -33.980976105 -26.499822617 --35.622734070 -34.622692108 -26.499868393 -34.622734070 -34.622692108 -26.499868393 -34.980957031 -33.980976105 -26.499822617 --36.233070374 -33.233165741 -26.499874115 -35.233070374 -33.233165741 -26.499872208 --36.384307861 -32.384403229 -26.499948502 -35.384307861 -32.384403229 -26.499948502 --36.458930969 -31.458978653 -26.499988556 -35.458930969 -31.458978653 -26.499988556 --36.488391876 -30.488407135 -26.499996185 -35.488384247 -30.488407135 -26.499996185 --36.497467041 -29.497472763 -26.499996185 -35.497467041 -29.497472763 -26.499996185 --36.499588013 -28.499593735 -26.499996185 -35.499591827 -28.499593735 -26.499996185 --36.499950409 -27.499954224 -26.499996185 -35.499950409 -27.499954224 -26.499996185 --36.499992371 -26.499996185 -26.499996185 -35.499992371 -26.499996185 -26.499996185 --36.499996185 -25.500000000 -26.499996185 -35.499996185 -25.500000000 -26.499996185 --36.499996185 -24.500000000 -26.499996185 -35.499996185 -24.500000000 -26.499996185 --36.499996185 -23.500000000 -26.499996185 -35.499996185 -23.500000000 -26.499996185 --36.499996185 -22.500000000 -26.499996185 -35.499996185 -22.500000000 -26.499996185 --36.499996185 -21.500000000 -26.499996185 -35.499996185 -21.500000000 -26.499996185 --36.499996185 -20.500000000 -26.499996185 -35.499996185 -20.500000000 -26.499996185 --36.499996185 -19.500000000 -26.499996185 -35.499996185 -19.500000000 -26.499996185 --36.499996185 -18.500000000 -26.499996185 -35.499996185 -18.500000000 -26.499996185 --36.499996185 -17.500000000 -26.499996185 -35.499996185 -17.500000000 -26.499996185 --36.499996185 -16.500000000 -26.499996185 -35.499996185 -16.500000000 -26.499996185 --36.499996185 -15.500000000 -26.499996185 -35.499996185 -15.500000000 -26.499996185 --36.499996185 -14.500000000 -26.499996185 -35.499996185 -14.500000000 -26.499996185 --36.499996185 -13.500000000 -26.499996185 -35.499996185 -13.500000000 -26.499996185 --36.499996185 -12.500000000 -26.499996185 -35.499996185 -12.500000000 -26.499996185 --36.499996185 -11.500000000 -26.499996185 -35.499996185 -11.500000000 -26.499996185 --36.499996185 -10.500000000 -26.499996185 -35.499996185 -10.500000000 -26.499996185 --36.499996185 -9.500000000 -26.499996185 -35.499996185 -9.500000000 -26.499996185 --36.499996185 -8.500000000 -26.499996185 -35.499996185 -8.500000000 -26.499996185 --36.499996185 -7.500000000 -26.499996185 -35.499996185 -7.500000000 -26.499996185 --36.499996185 -6.500000000 -26.499996185 -35.499996185 -6.500000000 -26.499996185 --36.499996185 -5.500000000 -26.499996185 -35.499996185 -5.500000000 -26.499996185 --36.499996185 -4.500000000 -26.499996185 -35.499996185 -4.500000000 -26.499996185 --36.499996185 -3.500000000 -26.499996185 -35.499996185 -3.500000000 -26.499996185 --36.499996185 -2.500000000 -26.499996185 -35.499996185 -2.500000000 -26.499996185 --36.499996185 -1.500000000 -26.499996185 -35.499996185 -1.500000000 -26.499996185 --36.499996185 -0.500000000 -26.499996185 -35.499996185 -0.500000000 -26.499996185 --36.499996185 0.500000000 -26.499996185 -35.499996185 0.500000000 -26.499996185 --36.499996185 1.500000000 -26.499996185 -35.499996185 1.500000000 -26.499996185 --36.499996185 2.500000000 -26.499996185 -35.499996185 2.500000000 -26.499996185 --36.499996185 3.500000000 -26.499996185 -35.499996185 3.500000000 -26.499996185 --36.499996185 4.500000000 -26.499996185 -35.499996185 4.500000000 -26.499996185 --36.499996185 5.500000000 -26.499996185 -35.499996185 5.500000000 -26.499996185 --36.499996185 6.500000000 -26.499996185 -35.499996185 6.500000000 -26.499996185 --36.499996185 7.500000000 -26.499996185 -35.499996185 7.500000000 -26.499996185 --36.499996185 8.500000000 -26.499996185 -35.499996185 8.500000000 -26.499996185 --36.499996185 9.500000000 -26.499996185 -35.499996185 9.500000000 -26.499996185 --36.499996185 10.500000000 -26.499996185 -35.499996185 10.500000000 -26.499996185 --36.499996185 11.500000000 -26.499996185 -35.499996185 11.500000000 -26.499996185 --36.499996185 12.500000000 -26.499996185 -35.499996185 12.500000000 -26.499996185 --36.499996185 13.500000000 -26.499996185 -35.499996185 13.500000000 -26.499996185 --36.499996185 14.500000000 -26.499996185 -35.499996185 14.500000000 -26.499996185 --36.499996185 15.500000000 -26.499996185 -35.499996185 15.500000000 -26.499996185 --36.499996185 16.500000000 -26.499996185 -35.499996185 16.500000000 -26.499996185 --36.499996185 17.500000000 -26.499996185 -35.499996185 17.500000000 -26.499996185 --36.499996185 18.500000000 -26.499996185 -35.499996185 18.500000000 -26.499996185 --36.499996185 19.500000000 -26.499996185 -35.499996185 19.500000000 -26.499996185 --36.499996185 20.500000000 -26.499996185 -35.499996185 20.500000000 -26.499996185 --36.499996185 21.500000000 -26.499996185 -35.499996185 21.500000000 -26.499996185 --36.499996185 22.500000000 -26.499996185 -35.499996185 22.500000000 -26.499996185 --36.499996185 23.500000000 -26.499996185 -35.499996185 23.500000000 -26.499996185 --36.499996185 24.500000000 -26.499996185 -35.499996185 24.500000000 -26.499996185 --36.499992371 25.499996185 -26.499996185 -35.499992371 25.499996185 -26.499996185 --36.499950409 26.499954224 -26.499996185 -35.499950409 26.499954224 -26.499996185 --36.499588013 27.499591827 -26.499996185 -35.499591827 27.499591827 -26.499996185 --36.497467041 28.497470856 -26.499996185 -35.497467041 28.497470856 -26.499996185 --36.488391876 29.488407135 -26.499996185 -35.488391876 29.488407135 -26.499996185 --36.458930969 30.458978653 -26.499988556 -35.458934784 30.458978653 -26.499988556 --36.384307861 31.384403229 -26.499948502 -35.384307861 31.384403229 -26.499948502 --36.233070374 32.233165741 -26.499874115 -35.233070374 32.233165741 -26.499872208 --35.980957031 32.980976105 -26.499826431 --35.622734070 33.622692108 -26.499868393 -34.622734070 33.622695923 -26.499868393 -34.980960846 32.980972290 -26.499826431 --35.167808533 34.167808533 -26.499902725 --34.622692108 34.622734070 -26.499868393 -33.622692108 34.622734070 -26.499868393 -34.167808533 34.167808533 -26.499902725 --33.980976105 34.980957031 -26.499822617 --33.233165741 35.233070374 -26.499874115 --32.384403229 35.384307861 -26.499948502 --31.458978653 35.458930969 -26.499988556 --30.488407135 35.488391876 -26.499996185 --29.497472763 35.497467041 -26.499996185 --28.499593735 35.499588013 -26.499996185 --27.499954224 35.499950409 -26.499996185 --26.499996185 35.499992371 -26.499996185 --25.500000000 35.499996185 -26.499996185 --24.500000000 35.499996185 -26.499996185 --23.500000000 35.499996185 -26.499996185 --22.500000000 35.499996185 -26.499996185 --21.500000000 35.499996185 -26.499996185 --20.500000000 35.499996185 -26.499996185 --19.500000000 35.499996185 -26.499996185 --18.500000000 35.499996185 -26.499996185 --17.500000000 35.499996185 -26.499996185 --16.500000000 35.499996185 -26.499996185 --15.500000000 35.499996185 -26.499996185 --14.500000000 35.499996185 -26.499996185 --13.500000000 35.499996185 -26.499996185 --12.500000000 35.499996185 -26.499996185 --11.500000000 35.499996185 -26.499996185 --10.500000000 35.499996185 -26.499996185 --9.500000000 35.499996185 -26.499996185 --8.500000000 35.499996185 -26.499996185 --7.500000000 35.499996185 -26.499996185 --6.500000000 35.499996185 -26.499996185 --5.500000000 35.499996185 -26.499996185 --4.500000000 35.499996185 -26.499996185 --3.500000000 35.499996185 -26.499996185 --2.500000000 35.499996185 -26.499996185 --1.500000000 35.499996185 -26.499996185 --0.500000000 35.499996185 -26.499996185 -0.500000000 35.499996185 -26.499996185 -1.500000000 35.499996185 -26.499996185 -2.500000000 35.499996185 -26.499996185 -3.500000000 35.499996185 -26.499996185 -4.500000000 35.499996185 -26.499996185 -5.500000000 35.499996185 -26.499996185 -6.500000000 35.499996185 -26.499996185 -7.500000000 35.499996185 -26.499996185 -8.500000000 35.499996185 -26.499996185 -9.500000000 35.499996185 -26.499996185 -10.500000000 35.499996185 -26.499996185 -11.500000000 35.499996185 -26.499996185 -12.500000000 35.499996185 -26.499996185 -13.500000000 35.499996185 -26.499996185 -14.500000000 35.499996185 -26.499996185 -15.500000000 35.499996185 -26.499996185 -16.500000000 35.499996185 -26.499996185 -17.500000000 35.499996185 -26.499996185 -18.500000000 35.499996185 -26.499996185 -19.500000000 35.499996185 -26.499996185 -20.500000000 35.499996185 -26.499996185 -21.500000000 35.499996185 -26.499996185 -22.500000000 35.499996185 -26.499996185 -23.500000000 35.499996185 -26.499996185 -24.500000000 35.499996185 -26.499996185 -25.499996185 35.499992371 -26.499996185 -26.499954224 35.499950409 -26.499996185 -27.499591827 35.499588013 -26.499996185 -28.497470856 35.497467041 -26.499996185 -29.488407135 35.488391876 -26.499996185 -30.458978653 35.458930969 -26.499988556 -31.384403229 35.384307861 -26.499948502 -32.233165741 35.233070374 -26.499874115 -32.980976105 34.980957031 -26.499822617 --33.981086731 -35.981086731 -25.499979019 --33.233215332 -36.233203888 -25.499984741 --32.384422302 -36.384407043 -25.499996185 --31.458978653 -36.458976746 -25.500000000 --30.488407135 -36.488403320 -25.500000000 --29.497472763 -36.497474670 -25.500000000 --28.499593735 -36.499591827 -25.500000000 --27.499954224 -36.499954224 -25.500000000 --26.499996185 -36.499996185 -25.500000000 --25.500000000 -36.500000000 -25.500000000 --24.500000000 -36.500000000 -25.500000000 --23.500000000 -36.500000000 -25.500000000 --22.500000000 -36.500000000 -25.500000000 --21.500000000 -36.500000000 -25.500000000 --20.500000000 -36.500000000 -25.500000000 --19.500000000 -36.500000000 -25.500000000 --18.500000000 -36.500000000 -25.500000000 --17.500000000 -36.500000000 -25.500000000 --16.500000000 -36.500000000 -25.500000000 --15.500000000 -36.500000000 -25.500000000 --14.500000000 -36.500000000 -25.500000000 --13.500000000 -36.500000000 -25.500000000 --12.500000000 -36.500000000 -25.500000000 --11.500000000 -36.500000000 -25.500000000 --10.500000000 -36.500000000 -25.500000000 --9.500000000 -36.500000000 -25.500000000 --8.500000000 -36.500000000 -25.500000000 --7.500000000 -36.500000000 -25.500000000 --6.500000000 -36.500000000 -25.500000000 --5.500000000 -36.500000000 -25.500000000 --4.500000000 -36.500000000 -25.500000000 --3.500000000 -36.500000000 -25.500000000 --2.500000000 -36.500000000 -25.500000000 --1.500000000 -36.500000000 -25.500000000 --0.500000000 -36.500000000 -25.500000000 -0.500000000 -36.500000000 -25.500000000 -1.500000000 -36.500000000 -25.500000000 -2.500000000 -36.500000000 -25.500000000 -3.500000000 -36.500000000 -25.500000000 -4.500000000 -36.500000000 -25.500000000 -5.500000000 -36.500000000 -25.500000000 -6.500000000 -36.500000000 -25.500000000 -7.500000000 -36.500000000 -25.500000000 -8.500000000 -36.500000000 -25.500000000 -9.500000000 -36.500000000 -25.500000000 -10.500000000 -36.500000000 -25.500000000 -11.500000000 -36.500000000 -25.500000000 -12.500000000 -36.500000000 -25.500000000 -13.500000000 -36.500000000 -25.500000000 -14.500000000 -36.500000000 -25.500000000 -15.500000000 -36.500000000 -25.500000000 -16.500000000 -36.500000000 -25.500000000 -17.500000000 -36.500000000 -25.500000000 -18.500000000 -36.500000000 -25.500000000 -19.500000000 -36.500000000 -25.500000000 -20.500000000 -36.500000000 -25.500000000 -21.500000000 -36.500000000 -25.500000000 -22.500000000 -36.500000000 -25.500000000 -23.500000000 -36.500000000 -25.500000000 -24.500000000 -36.500000000 -25.500000000 -25.499996185 -36.499996185 -25.500000000 -26.499954224 -36.499954224 -25.500000000 -27.499591827 -36.499591827 -25.500000000 -28.497470856 -36.497467041 -25.500000000 -29.488407135 -36.488403320 -25.500000000 -30.458978653 -36.458976746 -25.500000000 -31.384418488 -36.384407043 -25.499996185 -32.233215332 -36.233203888 -25.499986649 -32.981086731 -35.981086731 -25.499979019 --35.167949677 -35.167945862 -25.499990463 --34.622844696 -35.622852325 -25.499986649 -33.622844696 -35.622852325 -25.499986649 -34.167949677 -35.167949677 -25.499990463 --35.981086731 -33.981086731 -25.499977112 --35.622852325 -34.622844696 -25.499986649 -34.622856140 -34.622844696 -25.499986649 -34.981086731 -33.981086731 -25.499979019 --36.233203888 -33.233215332 -25.499986649 -35.233203888 -33.233222961 -25.499984741 --36.384407043 -32.384422302 -25.499996185 -35.384407043 -32.384422302 -25.499996185 --36.458972931 -31.458978653 -25.500000000 -35.458976746 -31.458978653 -25.500000000 --36.488403320 -30.488407135 -25.500000000 -35.488403320 -30.488407135 -25.500000000 --36.497467041 -29.497472763 -25.500000000 -35.497474670 -29.497472763 -25.500000000 --36.499591827 -28.499593735 -25.500000000 -35.499591827 -28.499593735 -25.500000000 --36.499954224 -27.499954224 -25.500000000 -35.499954224 -27.499954224 -25.500000000 --36.499996185 -26.499996185 -25.500000000 -35.499996185 -26.499996185 -25.500000000 --36.500000000 -25.500000000 -25.500000000 -35.500000000 -25.500000000 -25.500000000 --36.500000000 -24.500000000 -25.500000000 -35.500000000 -24.500000000 -25.500000000 --36.500000000 -23.500000000 -25.500000000 -35.500000000 -23.500000000 -25.500000000 --36.500000000 -22.500000000 -25.500000000 -35.500000000 -22.500000000 -25.500000000 --36.500000000 -21.500000000 -25.500000000 -35.500000000 -21.500000000 -25.500000000 --36.500000000 -20.500000000 -25.500000000 -35.500000000 -20.500000000 -25.500000000 --36.500000000 -19.500000000 -25.500000000 -35.500000000 -19.500000000 -25.500000000 --36.500000000 -18.500000000 -25.500000000 -35.500000000 -18.500000000 -25.500000000 --36.500000000 -17.500000000 -25.500000000 -35.500000000 -17.500000000 -25.500000000 --36.500000000 -16.500000000 -25.500000000 -35.500000000 -16.500000000 -25.500000000 --36.500000000 -15.500000000 -25.500000000 -35.500000000 -15.500000000 -25.500000000 --36.500000000 -14.500000000 -25.500000000 -35.500000000 -14.500000000 -25.500000000 --36.500000000 -13.500000000 -25.500000000 -35.500000000 -13.500000000 -25.500000000 --36.500000000 -12.500000000 -25.500000000 -35.500000000 -12.500000000 -25.500000000 --36.500000000 -11.500000000 -25.500000000 -35.500000000 -11.500000000 -25.500000000 --36.500000000 -10.500000000 -25.500000000 -35.500000000 -10.500000000 -25.500000000 --36.500000000 -9.500000000 -25.500000000 -35.500000000 -9.500000000 -25.500000000 --36.500000000 -8.500000000 -25.500000000 -35.500000000 -8.500000000 -25.500000000 --36.500000000 -7.500000000 -25.500000000 -35.500000000 -7.500000000 -25.500000000 --36.500000000 -6.500000000 -25.500000000 -35.500000000 -6.500000000 -25.500000000 --36.500000000 -5.500000000 -25.500000000 -35.500000000 -5.500000000 -25.500000000 --36.500000000 -4.500000000 -25.500000000 -35.500000000 -4.500000000 -25.500000000 --36.500000000 -3.500000000 -25.500000000 -35.500000000 -3.500000000 -25.500000000 --36.500000000 -2.500000000 -25.500000000 -35.500000000 -2.500000000 -25.500000000 --36.500000000 -1.500000000 -25.500000000 -35.500000000 -1.500000000 -25.500000000 --36.500000000 -0.500000000 -25.500000000 -35.500000000 -0.500000000 -25.500000000 --36.500000000 0.500000000 -25.500000000 -35.500000000 0.500000000 -25.500000000 --36.500000000 1.500000000 -25.500000000 -35.500000000 1.500000000 -25.500000000 --36.500000000 2.500000000 -25.500000000 -35.500000000 2.500000000 -25.500000000 --36.500000000 3.500000000 -25.500000000 -35.500000000 3.500000000 -25.500000000 --36.500000000 4.500000000 -25.500000000 -35.500000000 4.500000000 -25.500000000 --36.500000000 5.500000000 -25.500000000 -35.500000000 5.500000000 -25.500000000 --36.500000000 6.500000000 -25.500000000 -35.500000000 6.500000000 -25.500000000 --36.500000000 7.500000000 -25.500000000 -35.500000000 7.500000000 -25.500000000 --36.500000000 8.500000000 -25.500000000 -35.500000000 8.500000000 -25.500000000 --36.500000000 9.500000000 -25.500000000 -35.500000000 9.500000000 -25.500000000 --36.500000000 10.500000000 -25.500000000 -35.500000000 10.500000000 -25.500000000 --36.500000000 11.500000000 -25.500000000 -35.500000000 11.500000000 -25.500000000 --36.500000000 12.500000000 -25.500000000 -35.500000000 12.500000000 -25.500000000 --36.500000000 13.500000000 -25.500000000 -35.500000000 13.500000000 -25.500000000 --36.500000000 14.500000000 -25.500000000 -35.500000000 14.500000000 -25.500000000 --36.500000000 15.500000000 -25.500000000 -35.500000000 15.500000000 -25.500000000 --36.500000000 16.500000000 -25.500000000 -35.500000000 16.500000000 -25.500000000 --36.500000000 17.500000000 -25.500000000 -35.500000000 17.500000000 -25.500000000 --36.500000000 18.500000000 -25.500000000 -35.500000000 18.500000000 -25.500000000 --36.500000000 19.500000000 -25.500000000 -35.500000000 19.500000000 -25.500000000 --36.500000000 20.500000000 -25.500000000 -35.500000000 20.500000000 -25.500000000 --36.500000000 21.500000000 -25.500000000 -35.500000000 21.500000000 -25.500000000 --36.500000000 22.500000000 -25.500000000 -35.500000000 22.500000000 -25.500000000 --36.500000000 23.500000000 -25.500000000 -35.500000000 23.500000000 -25.500000000 --36.500000000 24.500000000 -25.500000000 -35.500000000 24.500000000 -25.500000000 --36.499996185 25.499996185 -25.500000000 -35.499996185 25.499996185 -25.500000000 --36.499954224 26.499954224 -25.500000000 -35.499954224 26.499954224 -25.500000000 --36.499591827 27.499591827 -25.500000000 -35.499591827 27.499591827 -25.500000000 --36.497474670 28.497470856 -25.500000000 -35.497467041 28.497470856 -25.500000000 --36.488403320 29.488407135 -25.500000000 -35.488403320 29.488407135 -25.500000000 --36.458976746 30.458978653 -25.500000000 -35.458976746 30.458978653 -25.500000000 --36.384407043 31.384418488 -25.499996185 -35.384407043 31.384418488 -25.499996185 --36.233203888 32.233215332 -25.499986649 -35.233203888 32.233215332 -25.499986649 --35.981086731 32.981086731 -25.499975204 --35.622852325 33.622844696 -25.499986649 -34.622852325 33.622844696 -25.499986649 -34.981086731 32.981086731 -25.499979019 --35.167945862 34.167949677 -25.499990463 --34.622844696 34.622852325 -25.499986649 -33.622844696 34.622852325 -25.499986649 -34.167949677 34.167949677 -25.499990463 --33.981086731 34.981086731 -25.499977112 --33.233215332 35.233203888 -25.499986649 --32.384422302 35.384407043 -25.499996185 --31.458978653 35.458972931 -25.500000000 --30.488407135 35.488403320 -25.500000000 --29.497472763 35.497467041 -25.500000000 --28.499593735 35.499591827 -25.500000000 --27.499954224 35.499954224 -25.500000000 --26.499996185 35.499996185 -25.500000000 --25.500000000 35.500000000 -25.500000000 --24.500000000 35.500000000 -25.500000000 --23.500000000 35.500000000 -25.500000000 --22.500000000 35.500000000 -25.500000000 --21.500000000 35.500000000 -25.500000000 --20.500000000 35.500000000 -25.500000000 --19.500000000 35.500000000 -25.500000000 --18.500000000 35.500000000 -25.500000000 --17.500000000 35.500000000 -25.500000000 --16.500000000 35.500000000 -25.500000000 --15.500000000 35.500000000 -25.500000000 --14.500000000 35.500000000 -25.500000000 --13.500000000 35.500000000 -25.500000000 --12.500000000 35.500000000 -25.500000000 --11.500000000 35.500000000 -25.500000000 --10.500000000 35.500000000 -25.500000000 --9.500000000 35.500000000 -25.500000000 --8.500000000 35.500000000 -25.500000000 --7.500000000 35.500000000 -25.500000000 --6.500000000 35.500000000 -25.500000000 --5.500000000 35.500000000 -25.500000000 --4.500000000 35.500000000 -25.500000000 --3.500000000 35.500000000 -25.500000000 --2.500000000 35.500000000 -25.500000000 --1.500000000 35.500000000 -25.500000000 --0.500000000 35.500000000 -25.500000000 -0.500000000 35.500000000 -25.500000000 -1.500000000 35.500000000 -25.500000000 -2.500000000 35.500000000 -25.500000000 -3.500000000 35.500000000 -25.500000000 -4.500000000 35.500000000 -25.500000000 -5.500000000 35.500000000 -25.500000000 -6.500000000 35.500000000 -25.500000000 -7.500000000 35.500000000 -25.500000000 -8.500000000 35.500000000 -25.500000000 -9.500000000 35.500000000 -25.500000000 -10.500000000 35.500000000 -25.500000000 -11.500000000 35.500000000 -25.500000000 -12.500000000 35.500000000 -25.500000000 -13.500000000 35.500000000 -25.500000000 -14.500000000 35.500000000 -25.500000000 -15.500000000 35.500000000 -25.500000000 -16.500000000 35.500000000 -25.500000000 -17.500000000 35.500000000 -25.500000000 -18.500000000 35.500000000 -25.500000000 -19.500000000 35.500000000 -25.500000000 -20.500000000 35.500000000 -25.500000000 -21.500000000 35.500000000 -25.500000000 -22.500000000 35.500000000 -25.500000000 -23.500000000 35.500000000 -25.500000000 -24.500000000 35.500000000 -25.500000000 -25.499996185 35.499996185 -25.500000000 -26.499954224 35.499954224 -25.500000000 -27.499591827 35.499591827 -25.500000000 -28.497470856 35.497474670 -25.500000000 -29.488407135 35.488403320 -25.500000000 -30.458978653 35.458976746 -25.500000000 -31.384418488 35.384407043 -25.499996185 -32.233215332 35.233203888 -25.499986649 -32.981086731 34.981086731 -25.499977112 --33.981101990 -35.981101990 -24.499998093 --33.233226776 -36.233222961 -24.500000000 --32.384422302 -36.384418488 -24.500000000 --31.458978653 -36.458980560 -24.500000000 --30.488407135 -36.488403320 -24.500000000 --29.497472763 -36.497474670 -24.500000000 --28.499593735 -36.499591827 -24.500000000 --27.499954224 -36.499954224 -24.500000000 --26.499996185 -36.499996185 -24.500000000 --25.500000000 -36.500000000 -24.500000000 --24.500000000 -36.500000000 -24.500000000 --23.500000000 -36.500000000 -24.500000000 --22.500000000 -36.500000000 -24.500000000 --21.500000000 -36.500000000 -24.500000000 --20.500000000 -36.500000000 -24.500000000 --19.500000000 -36.500000000 -24.500000000 --18.500000000 -36.500000000 -24.500000000 --17.500000000 -36.500000000 -24.500000000 --16.500000000 -36.500000000 -24.500000000 --15.500000000 -36.500000000 -24.500000000 --14.500000000 -36.500000000 -24.500000000 --13.500000000 -36.500000000 -24.500000000 --12.500000000 -36.500000000 -24.500000000 --11.500000000 -36.500000000 -24.500000000 --10.500000000 -36.500000000 -24.500000000 --9.500000000 -36.500000000 -24.500000000 --8.500000000 -36.500000000 -24.500000000 --7.500000000 -36.500000000 -24.500000000 --6.500000000 -36.500000000 -24.500000000 --5.500000000 -36.500000000 -24.500000000 --4.500000000 -36.500000000 -24.500000000 --3.500000000 -36.500000000 -24.500000000 --2.500000000 -36.500000000 -24.500000000 --1.500000000 -36.500000000 -24.500000000 --0.500000000 -36.500000000 -24.500000000 -0.500000000 -36.500000000 -24.500000000 -1.500000000 -36.500000000 -24.500000000 -2.500000000 -36.500000000 -24.500000000 -3.500000000 -36.500000000 -24.500000000 -4.500000000 -36.500000000 -24.500000000 -5.500000000 -36.500000000 -24.500000000 -6.500000000 -36.500000000 -24.500000000 -7.500000000 -36.500000000 -24.500000000 -8.500000000 -36.500000000 -24.500000000 -9.500000000 -36.500000000 -24.500000000 -10.500000000 -36.500000000 -24.500000000 -11.500000000 -36.500000000 -24.500000000 -12.500000000 -36.500000000 -24.500000000 -13.500000000 -36.500000000 -24.500000000 -14.500000000 -36.500000000 -24.500000000 -15.500000000 -36.500000000 -24.500000000 -16.500000000 -36.500000000 -24.500000000 -17.500000000 -36.500000000 -24.500000000 -18.500000000 -36.500000000 -24.500000000 -19.500000000 -36.500000000 -24.500000000 -20.500000000 -36.500000000 -24.500000000 -21.500000000 -36.500000000 -24.500000000 -22.500000000 -36.500000000 -24.500000000 -23.500000000 -36.500000000 -24.500000000 -24.500000000 -36.500000000 -24.500000000 -25.499996185 -36.499996185 -24.500000000 -26.499954224 -36.499954224 -24.500000000 -27.499591827 -36.499591827 -24.500000000 -28.497470856 -36.497467041 -24.500000000 -29.488407135 -36.488403320 -24.500000000 -30.458978653 -36.458980560 -24.500000000 -31.384418488 -36.384418488 -24.500000000 -32.233222961 -36.233222961 -24.499998093 -32.981101990 -35.981101990 -24.499998093 --35.167961121 -35.167961121 -24.500000000 --34.622871399 -35.622871399 -24.500000000 -33.622871399 -35.622871399 -24.500000000 -34.167961121 -35.167961121 -24.500000000 --35.981101990 -33.981101990 -24.499998093 --35.622871399 -34.622871399 -24.500000000 -34.622871399 -34.622863770 -24.500000000 -34.981101990 -33.981101990 -24.499998093 --36.233222961 -33.233222961 -24.499998093 -35.233222961 -33.233226776 -24.500000000 --36.384418488 -32.384422302 -24.500000000 -35.384418488 -32.384422302 -24.500000000 --36.458976746 -31.458978653 -24.500000000 -35.458980560 -31.458978653 -24.500000000 --36.488403320 -30.488407135 -24.500000000 -35.488403320 -30.488407135 -24.500000000 --36.497467041 -29.497472763 -24.500000000 -35.497474670 -29.497472763 -24.500000000 --36.499591827 -28.499593735 -24.500000000 -35.499591827 -28.499593735 -24.500000000 --36.499954224 -27.499954224 -24.500000000 -35.499954224 -27.499954224 -24.500000000 --36.499996185 -26.499996185 -24.500000000 -35.499996185 -26.499996185 -24.500000000 --36.500000000 -25.500000000 -24.500000000 -35.500000000 -25.500000000 -24.500000000 --36.500000000 -24.500000000 -24.500000000 -35.500000000 -24.500000000 -24.500000000 --36.500000000 -23.500000000 -24.500000000 -35.500000000 -23.500000000 -24.500000000 --36.500000000 -22.500000000 -24.500000000 -35.500000000 -22.500000000 -24.500000000 --36.500000000 -21.500000000 -24.500000000 -35.500000000 -21.500000000 -24.500000000 --36.500000000 -20.500000000 -24.500000000 -35.500000000 -20.500000000 -24.500000000 --36.500000000 -19.500000000 -24.500000000 -35.500000000 -19.500000000 -24.500000000 --36.500000000 -18.500000000 -24.500000000 -35.500000000 -18.500000000 -24.500000000 --36.500000000 -17.500000000 -24.500000000 -35.500000000 -17.500000000 -24.500000000 --36.500000000 -16.500000000 -24.500000000 -35.500000000 -16.500000000 -24.500000000 --36.500000000 -15.500000000 -24.500000000 -35.500000000 -15.500000000 -24.500000000 --36.500000000 -14.500000000 -24.500000000 -35.500000000 -14.500000000 -24.500000000 --36.500000000 -13.500000000 -24.500000000 -35.500000000 -13.500000000 -24.500000000 --36.500000000 -12.500000000 -24.500000000 -35.500000000 -12.500000000 -24.500000000 --36.500000000 -11.500000000 -24.500000000 -35.500000000 -11.500000000 -24.500000000 --36.500000000 -10.500000000 -24.500000000 -35.500000000 -10.500000000 -24.500000000 --36.500000000 -9.500000000 -24.500000000 -35.500000000 -9.500000000 -24.500000000 --36.500000000 -8.500000000 -24.500000000 -35.500000000 -8.500000000 -24.500000000 --36.500000000 -7.500000000 -24.500000000 -35.500000000 -7.500000000 -24.500000000 --36.500000000 -6.500000000 -24.500000000 -35.500000000 -6.500000000 -24.500000000 --36.500000000 -5.500000000 -24.500000000 -35.500000000 -5.500000000 -24.500000000 --36.500000000 -4.500000000 -24.500000000 -35.500000000 -4.500000000 -24.500000000 --36.500000000 -3.500000000 -24.500000000 -35.500000000 -3.500000000 -24.500000000 --36.500000000 -2.500000000 -24.500000000 -35.500000000 -2.500000000 -24.500000000 --36.500000000 -1.500000000 -24.500000000 -35.500000000 -1.500000000 -24.500000000 --36.500000000 -0.500000000 -24.500000000 -35.500000000 -0.500000000 -24.500000000 --36.500000000 0.500000000 -24.500000000 -35.500000000 0.500000000 -24.500000000 --36.500000000 1.500000000 -24.500000000 -35.500000000 1.500000000 -24.500000000 --36.500000000 2.500000000 -24.500000000 -35.500000000 2.500000000 -24.500000000 --36.500000000 3.500000000 -24.500000000 -35.500000000 3.500000000 -24.500000000 --36.500000000 4.500000000 -24.500000000 -35.500000000 4.500000000 -24.500000000 --36.500000000 5.500000000 -24.500000000 -35.500000000 5.500000000 -24.500000000 --36.500000000 6.500000000 -24.500000000 -35.500000000 6.500000000 -24.500000000 --36.500000000 7.500000000 -24.500000000 -35.500000000 7.500000000 -24.500000000 --36.500000000 8.500000000 -24.500000000 -35.500000000 8.500000000 -24.500000000 --36.500000000 9.500000000 -24.500000000 -35.500000000 9.500000000 -24.500000000 --36.500000000 10.500000000 -24.500000000 -35.500000000 10.500000000 -24.500000000 --36.500000000 11.500000000 -24.500000000 -35.500000000 11.500000000 -24.500000000 --36.500000000 12.500000000 -24.500000000 -35.500000000 12.500000000 -24.500000000 --36.500000000 13.500000000 -24.500000000 -35.500000000 13.500000000 -24.500000000 --36.500000000 14.500000000 -24.500000000 -35.500000000 14.500000000 -24.500000000 --36.500000000 15.500000000 -24.500000000 -35.500000000 15.500000000 -24.500000000 --36.500000000 16.500000000 -24.500000000 -35.500000000 16.500000000 -24.500000000 --36.500000000 17.500000000 -24.500000000 -35.500000000 17.500000000 -24.500000000 --36.500000000 18.500000000 -24.500000000 -35.500000000 18.500000000 -24.500000000 --36.500000000 19.500000000 -24.500000000 -35.500000000 19.500000000 -24.500000000 --36.500000000 20.500000000 -24.500000000 -35.500000000 20.500000000 -24.500000000 --36.500000000 21.500000000 -24.500000000 -35.500000000 21.500000000 -24.500000000 --36.500000000 22.500000000 -24.500000000 -35.500000000 22.500000000 -24.500000000 --36.500000000 23.500000000 -24.500000000 -35.500000000 23.500000000 -24.500000000 --36.500000000 24.500000000 -24.500000000 -35.500000000 24.500000000 -24.500000000 --36.499996185 25.499996185 -24.500000000 -35.499996185 25.499996185 -24.500000000 --36.499954224 26.499954224 -24.500000000 -35.499954224 26.499954224 -24.500000000 --36.499591827 27.499591827 -24.500000000 -35.499591827 27.499591827 -24.500000000 --36.497474670 28.497470856 -24.500000000 -35.497467041 28.497470856 -24.500000000 --36.488403320 29.488407135 -24.500000000 -35.488403320 29.488407135 -24.500000000 --36.458980560 30.458978653 -24.500000000 -35.458980560 30.458978653 -24.500000000 --36.384418488 31.384418488 -24.500000000 -35.384418488 31.384418488 -24.500000000 --36.233222961 32.233222961 -24.499998093 -35.233222961 32.233222961 -24.499998093 --35.981101990 32.981101990 -24.499998093 --35.622871399 33.622871399 -24.500000000 -34.622871399 33.622871399 -24.500000000 -34.981101990 32.981101990 -24.499998093 --35.167961121 34.167961121 -24.500000000 --34.622871399 34.622871399 -24.500000000 -33.622863770 34.622871399 -24.500000000 -34.167961121 34.167961121 -24.500000000 --33.981101990 34.981101990 -24.499998093 --33.233222961 35.233222961 -24.499998093 --32.384422302 35.384418488 -24.500000000 --31.458978653 35.458976746 -24.500000000 --30.488407135 35.488403320 -24.500000000 --29.497472763 35.497467041 -24.500000000 --28.499593735 35.499591827 -24.500000000 --27.499954224 35.499954224 -24.500000000 --26.499996185 35.499996185 -24.500000000 --25.500000000 35.500000000 -24.500000000 --24.500000000 35.500000000 -24.500000000 --23.500000000 35.500000000 -24.500000000 --22.500000000 35.500000000 -24.500000000 --21.500000000 35.500000000 -24.500000000 --20.500000000 35.500000000 -24.500000000 --19.500000000 35.500000000 -24.500000000 --18.500000000 35.500000000 -24.500000000 --17.500000000 35.500000000 -24.500000000 --16.500000000 35.500000000 -24.500000000 --15.500000000 35.500000000 -24.500000000 --14.500000000 35.500000000 -24.500000000 --13.500000000 35.500000000 -24.500000000 --12.500000000 35.500000000 -24.500000000 --11.500000000 35.500000000 -24.500000000 --10.500000000 35.500000000 -24.500000000 --9.500000000 35.500000000 -24.500000000 --8.500000000 35.500000000 -24.500000000 --7.500000000 35.500000000 -24.500000000 --6.500000000 35.500000000 -24.500000000 --5.500000000 35.500000000 -24.500000000 --4.500000000 35.500000000 -24.500000000 --3.500000000 35.500000000 -24.500000000 --2.500000000 35.500000000 -24.500000000 --1.500000000 35.500000000 -24.500000000 --0.500000000 35.500000000 -24.500000000 -0.500000000 35.500000000 -24.500000000 -1.500000000 35.500000000 -24.500000000 -2.500000000 35.500000000 -24.500000000 -3.500000000 35.500000000 -24.500000000 -4.500000000 35.500000000 -24.500000000 -5.500000000 35.500000000 -24.500000000 -6.500000000 35.500000000 -24.500000000 -7.500000000 35.500000000 -24.500000000 -8.500000000 35.500000000 -24.500000000 -9.500000000 35.500000000 -24.500000000 -10.500000000 35.500000000 -24.500000000 -11.500000000 35.500000000 -24.500000000 -12.500000000 35.500000000 -24.500000000 -13.500000000 35.500000000 -24.500000000 -14.500000000 35.500000000 -24.500000000 -15.500000000 35.500000000 -24.500000000 -16.500000000 35.500000000 -24.500000000 -17.500000000 35.500000000 -24.500000000 -18.500000000 35.500000000 -24.500000000 -19.500000000 35.500000000 -24.500000000 -20.500000000 35.500000000 -24.500000000 -21.500000000 35.500000000 -24.500000000 -22.500000000 35.500000000 -24.500000000 -23.500000000 35.500000000 -24.500000000 -24.500000000 35.500000000 -24.500000000 -25.499996185 35.499996185 -24.500000000 -26.499954224 35.499954224 -24.500000000 -27.499591827 35.499591827 -24.500000000 -28.497470856 35.497474670 -24.500000000 -29.488407135 35.488403320 -24.500000000 -30.458978653 35.458980560 -24.500000000 -31.384418488 35.384418488 -24.500000000 -32.233222961 35.233222961 -24.499998093 -32.981101990 34.981101990 -24.499998093 --33.981101990 -35.981101990 -23.500000000 --33.233226776 -36.233222961 -23.500000000 --32.384422302 -36.384418488 -23.500000000 --31.458978653 -36.458980560 -23.500000000 --30.488407135 -36.488403320 -23.500000000 --29.497472763 -36.497474670 -23.500000000 --28.499593735 -36.499591827 -23.500000000 --27.499954224 -36.499954224 -23.500000000 --26.499996185 -36.499996185 -23.500000000 --25.500000000 -36.500000000 -23.500000000 --24.500000000 -36.500000000 -23.500000000 --23.500000000 -36.500000000 -23.500000000 --22.500000000 -36.500000000 -23.500000000 --21.500000000 -36.500000000 -23.500000000 --20.500000000 -36.500000000 -23.500000000 --19.500000000 -36.500000000 -23.500000000 --18.500000000 -36.500000000 -23.500000000 --17.500000000 -36.500000000 -23.500000000 --16.500000000 -36.500000000 -23.500000000 --15.500000000 -36.500000000 -23.500000000 --14.500000000 -36.500000000 -23.500000000 --13.500000000 -36.500000000 -23.500000000 --12.500000000 -36.500000000 -23.500000000 --11.500000000 -36.500000000 -23.500000000 --10.500000000 -36.500000000 -23.500000000 --9.500000000 -36.500000000 -23.500000000 --8.500000000 -36.500000000 -23.500000000 --7.500000000 -36.500000000 -23.500000000 --6.500000000 -36.500000000 -23.500000000 --5.500000000 -36.500000000 -23.500000000 --4.500000000 -36.500000000 -23.500000000 --3.500000000 -36.500000000 -23.500000000 --2.500000000 -36.500000000 -23.500000000 --1.500000000 -36.500000000 -23.500000000 --0.500000000 -36.500000000 -23.500000000 -0.500000000 -36.500000000 -23.500000000 -1.500000000 -36.500000000 -23.500000000 -2.500000000 -36.500000000 -23.500000000 -3.500000000 -36.500000000 -23.500000000 -4.500000000 -36.500000000 -23.500000000 -5.500000000 -36.500000000 -23.500000000 -6.500000000 -36.500000000 -23.500000000 -7.500000000 -36.500000000 -23.500000000 -8.500000000 -36.500000000 -23.500000000 -9.500000000 -36.500000000 -23.500000000 -10.500000000 -36.500000000 -23.500000000 -11.500000000 -36.500000000 -23.500000000 -12.500000000 -36.500000000 -23.500000000 -13.500000000 -36.500000000 -23.500000000 -14.500000000 -36.500000000 -23.500000000 -15.500000000 -36.500000000 -23.500000000 -16.500000000 -36.500000000 -23.500000000 -17.500000000 -36.500000000 -23.500000000 -18.500000000 -36.500000000 -23.500000000 -19.500000000 -36.500000000 -23.500000000 -20.500000000 -36.500000000 -23.500000000 -21.500000000 -36.500000000 -23.500000000 -22.500000000 -36.500000000 -23.500000000 -23.500000000 -36.500000000 -23.500000000 -24.500000000 -36.500000000 -23.500000000 -25.499996185 -36.499996185 -23.500000000 -26.499954224 -36.499954224 -23.500000000 -27.499591827 -36.499591827 -23.500000000 -28.497470856 -36.497467041 -23.500000000 -29.488407135 -36.488403320 -23.500000000 -30.458978653 -36.458980560 -23.500000000 -31.384418488 -36.384422302 -23.500000000 -32.233222961 -36.233222961 -23.500000000 -32.981101990 -35.981101990 -23.500000000 --35.167964935 -35.167964935 -23.500000000 --34.622871399 -35.622871399 -23.500000000 -33.622871399 -35.622871399 -23.500000000 -34.167964935 -35.167964935 -23.500000000 --35.981101990 -33.981101990 -23.500000000 --35.622871399 -34.622871399 -23.500000000 -34.622871399 -34.622871399 -23.500000000 -34.981101990 -33.981101990 -23.500000000 --36.233222961 -33.233222961 -23.500000000 -35.233222961 -33.233226776 -23.500000000 --36.384418488 -32.384422302 -23.500000000 -35.384418488 -32.384422302 -23.500000000 --36.458976746 -31.458978653 -23.500000000 -35.458980560 -31.458978653 -23.500000000 --36.488403320 -30.488407135 -23.500000000 -35.488403320 -30.488407135 -23.500000000 --36.497467041 -29.497472763 -23.500000000 -35.497474670 -29.497472763 -23.500000000 --36.499591827 -28.499593735 -23.500000000 -35.499591827 -28.499593735 -23.500000000 --36.499954224 -27.499954224 -23.500000000 -35.499954224 -27.499954224 -23.500000000 --36.499996185 -26.499996185 -23.500000000 -35.499996185 -26.499996185 -23.500000000 --36.500000000 -25.500000000 -23.500000000 -35.500000000 -25.500000000 -23.500000000 --36.500000000 -24.500000000 -23.500000000 -35.500000000 -24.500000000 -23.500000000 --36.500000000 -23.500000000 -23.500000000 -35.500000000 -23.500000000 -23.500000000 --36.500000000 -22.500000000 -23.500000000 -35.500000000 -22.500000000 -23.500000000 --36.500000000 -21.500000000 -23.500000000 -35.500000000 -21.500000000 -23.500000000 --36.500000000 -20.500000000 -23.500000000 -35.500000000 -20.500000000 -23.500000000 --36.500000000 -19.500000000 -23.500000000 -35.500000000 -19.500000000 -23.500000000 --36.500000000 -18.500000000 -23.500000000 -35.500000000 -18.500000000 -23.500000000 --36.500000000 -17.500000000 -23.500000000 -35.500000000 -17.500000000 -23.500000000 --36.500000000 -16.500000000 -23.500000000 -35.500000000 -16.500000000 -23.500000000 --36.500000000 -15.500000000 -23.500000000 -35.500000000 -15.500000000 -23.500000000 --36.500000000 -14.500000000 -23.500000000 -35.500000000 -14.500000000 -23.500000000 --36.500000000 -13.500000000 -23.500000000 -35.500000000 -13.500000000 -23.500000000 --36.500000000 -12.500000000 -23.500000000 -35.500000000 -12.500000000 -23.500000000 --36.500000000 -11.500000000 -23.500000000 -35.500000000 -11.500000000 -23.500000000 --36.500000000 -10.500000000 -23.500000000 -35.500000000 -10.500000000 -23.500000000 --36.500000000 -9.500000000 -23.500000000 -35.500000000 -9.500000000 -23.500000000 --36.500000000 -8.500000000 -23.500000000 -35.500000000 -8.500000000 -23.500000000 --36.500000000 -7.500000000 -23.500000000 -35.500000000 -7.500000000 -23.500000000 --36.500000000 -6.500000000 -23.500000000 -35.500000000 -6.500000000 -23.500000000 --36.500000000 -5.500000000 -23.500000000 -35.500000000 -5.500000000 -23.500000000 --36.500000000 -4.500000000 -23.500000000 -35.500000000 -4.500000000 -23.500000000 --36.500000000 -3.500000000 -23.500000000 -35.500000000 -3.500000000 -23.500000000 --36.500000000 -2.500000000 -23.500000000 -35.500000000 -2.500000000 -23.500000000 --36.500000000 -1.500000000 -23.500000000 -35.500000000 -1.500000000 -23.500000000 --36.500000000 -0.500000000 -23.500000000 -35.500000000 -0.500000000 -23.500000000 --36.500000000 0.500000000 -23.500000000 -35.500000000 0.500000000 -23.500000000 --36.500000000 1.500000000 -23.500000000 -35.500000000 1.500000000 -23.500000000 --36.500000000 2.500000000 -23.500000000 -35.500000000 2.500000000 -23.500000000 --36.500000000 3.500000000 -23.500000000 -35.500000000 3.500000000 -23.500000000 --36.500000000 4.500000000 -23.500000000 -35.500000000 4.500000000 -23.500000000 --36.500000000 5.500000000 -23.500000000 -35.500000000 5.500000000 -23.500000000 --36.500000000 6.500000000 -23.500000000 -35.500000000 6.500000000 -23.500000000 --36.500000000 7.500000000 -23.500000000 -35.500000000 7.500000000 -23.500000000 --36.500000000 8.500000000 -23.500000000 -35.500000000 8.500000000 -23.500000000 --36.500000000 9.500000000 -23.500000000 -35.500000000 9.500000000 -23.500000000 --36.500000000 10.500000000 -23.500000000 -35.500000000 10.500000000 -23.500000000 --36.500000000 11.500000000 -23.500000000 -35.500000000 11.500000000 -23.500000000 --36.500000000 12.500000000 -23.500000000 -35.500000000 12.500000000 -23.500000000 --36.500000000 13.500000000 -23.500000000 -35.500000000 13.500000000 -23.500000000 --36.500000000 14.500000000 -23.500000000 -35.500000000 14.500000000 -23.500000000 --36.500000000 15.500000000 -23.500000000 -35.500000000 15.500000000 -23.500000000 --36.500000000 16.500000000 -23.500000000 -35.500000000 16.500000000 -23.500000000 --36.500000000 17.500000000 -23.500000000 -35.500000000 17.500000000 -23.500000000 --36.500000000 18.500000000 -23.500000000 -35.500000000 18.500000000 -23.500000000 --36.500000000 19.500000000 -23.500000000 -35.500000000 19.500000000 -23.500000000 --36.500000000 20.500000000 -23.500000000 -35.500000000 20.500000000 -23.500000000 --36.500000000 21.500000000 -23.500000000 -35.500000000 21.500000000 -23.500000000 --36.500000000 22.500000000 -23.500000000 -35.500000000 22.500000000 -23.500000000 --36.500000000 23.500000000 -23.500000000 -35.500000000 23.500000000 -23.500000000 --36.500000000 24.500000000 -23.500000000 -35.500000000 24.500000000 -23.500000000 --36.499996185 25.499996185 -23.500000000 -35.499996185 25.499996185 -23.500000000 --36.499954224 26.499954224 -23.500000000 -35.499954224 26.499954224 -23.500000000 --36.499591827 27.499591827 -23.500000000 -35.499591827 27.499591827 -23.500000000 --36.497474670 28.497470856 -23.500000000 -35.497467041 28.497470856 -23.500000000 --36.488403320 29.488407135 -23.500000000 -35.488403320 29.488407135 -23.500000000 --36.458980560 30.458978653 -23.500000000 -35.458980560 30.458978653 -23.500000000 --36.384422302 31.384418488 -23.500000000 -35.384422302 31.384418488 -23.500000000 --36.233222961 32.233222961 -23.500000000 -35.233222961 32.233222961 -23.500000000 --35.981101990 32.981101990 -23.500000000 --35.622871399 33.622871399 -23.500000000 -34.622871399 33.622871399 -23.500000000 -34.981101990 32.981101990 -23.500000000 --35.167964935 34.167964935 -23.500000000 --34.622871399 34.622871399 -23.500000000 -33.622871399 34.622871399 -23.500000000 -34.167964935 34.167964935 -23.500000000 --33.981101990 34.981101990 -23.500000000 --33.233222961 35.233222961 -23.500000000 --32.384422302 35.384418488 -23.500000000 --31.458978653 35.458976746 -23.500000000 --30.488407135 35.488403320 -23.500000000 --29.497472763 35.497467041 -23.500000000 --28.499593735 35.499591827 -23.500000000 --27.499954224 35.499954224 -23.500000000 --26.499996185 35.499996185 -23.500000000 --25.500000000 35.500000000 -23.500000000 --24.500000000 35.500000000 -23.500000000 --23.500000000 35.500000000 -23.500000000 --22.500000000 35.500000000 -23.500000000 --21.500000000 35.500000000 -23.500000000 --20.500000000 35.500000000 -23.500000000 --19.500000000 35.500000000 -23.500000000 --18.500000000 35.500000000 -23.500000000 --17.500000000 35.500000000 -23.500000000 --16.500000000 35.500000000 -23.500000000 --15.500000000 35.500000000 -23.500000000 --14.500000000 35.500000000 -23.500000000 --13.500000000 35.500000000 -23.500000000 --12.500000000 35.500000000 -23.500000000 --11.500000000 35.500000000 -23.500000000 --10.500000000 35.500000000 -23.500000000 --9.500000000 35.500000000 -23.500000000 --8.500000000 35.500000000 -23.500000000 --7.500000000 35.500000000 -23.500000000 --6.500000000 35.500000000 -23.500000000 --5.500000000 35.500000000 -23.500000000 --4.500000000 35.500000000 -23.500000000 --3.500000000 35.500000000 -23.500000000 --2.500000000 35.500000000 -23.500000000 --1.500000000 35.500000000 -23.500000000 --0.500000000 35.500000000 -23.500000000 -0.500000000 35.500000000 -23.500000000 -1.500000000 35.500000000 -23.500000000 -2.500000000 35.500000000 -23.500000000 -3.500000000 35.500000000 -23.500000000 -4.500000000 35.500000000 -23.500000000 -5.500000000 35.500000000 -23.500000000 -6.500000000 35.500000000 -23.500000000 -7.500000000 35.500000000 -23.500000000 -8.500000000 35.500000000 -23.500000000 -9.500000000 35.500000000 -23.500000000 -10.500000000 35.500000000 -23.500000000 -11.500000000 35.500000000 -23.500000000 -12.500000000 35.500000000 -23.500000000 -13.500000000 35.500000000 -23.500000000 -14.500000000 35.500000000 -23.500000000 -15.500000000 35.500000000 -23.500000000 -16.500000000 35.500000000 -23.500000000 -17.500000000 35.500000000 -23.500000000 -18.500000000 35.500000000 -23.500000000 -19.500000000 35.500000000 -23.500000000 -20.500000000 35.500000000 -23.500000000 -21.500000000 35.500000000 -23.500000000 -22.500000000 35.500000000 -23.500000000 -23.500000000 35.500000000 -23.500000000 -24.500000000 35.500000000 -23.500000000 -25.499996185 35.499996185 -23.500000000 -26.499954224 35.499954224 -23.500000000 -27.499591827 35.499591827 -23.500000000 -28.497470856 35.497474670 -23.500000000 -29.488407135 35.488403320 -23.500000000 -30.458978653 35.458980560 -23.500000000 -31.384418488 35.384422302 -23.500000000 -32.233222961 35.233222961 -23.500000000 -32.981101990 34.981101990 -23.500000000 --33.981101990 -35.981101990 -22.500000000 --33.233226776 -36.233222961 -22.500000000 --32.384422302 -36.384418488 -22.500000000 --31.458978653 -36.458980560 -22.500000000 --30.488407135 -36.488403320 -22.500000000 --29.497472763 -36.497474670 -22.500000000 --28.499593735 -36.499591827 -22.500000000 --27.499954224 -36.499954224 -22.500000000 --26.499996185 -36.499996185 -22.500000000 --25.500000000 -36.500000000 -22.500000000 --24.500000000 -36.500000000 -22.500000000 --23.500000000 -36.500000000 -22.500000000 --22.500000000 -36.500000000 -22.500000000 --21.500000000 -36.500000000 -22.500000000 --20.500000000 -36.500000000 -22.500000000 --19.500000000 -36.500000000 -22.500000000 --18.500000000 -36.500000000 -22.500000000 --17.500000000 -36.500000000 -22.500000000 --16.500000000 -36.500000000 -22.500000000 --15.500000000 -36.500000000 -22.500000000 --14.500000000 -36.500000000 -22.500000000 --13.500000000 -36.500000000 -22.500000000 --12.500000000 -36.500000000 -22.500000000 --11.500000000 -36.500000000 -22.500000000 --10.500000000 -36.500000000 -22.500000000 --9.500000000 -36.500000000 -22.500000000 --8.500000000 -36.500000000 -22.500000000 --7.500000000 -36.500000000 -22.500000000 --6.500000000 -36.500000000 -22.500000000 --5.500000000 -36.500000000 -22.500000000 --4.500000000 -36.500000000 -22.500000000 --3.500000000 -36.500000000 -22.500000000 --2.500000000 -36.500000000 -22.500000000 --1.500000000 -36.500000000 -22.500000000 --0.500000000 -36.500000000 -22.500000000 -0.500000000 -36.500000000 -22.500000000 -1.500000000 -36.500000000 -22.500000000 -2.500000000 -36.500000000 -22.500000000 -3.500000000 -36.500000000 -22.500000000 -4.500000000 -36.500000000 -22.500000000 -5.500000000 -36.500000000 -22.500000000 -6.500000000 -36.500000000 -22.500000000 -7.500000000 -36.500000000 -22.500000000 -8.500000000 -36.500000000 -22.500000000 -9.500000000 -36.500000000 -22.500000000 -10.500000000 -36.500000000 -22.500000000 -11.500000000 -36.500000000 -22.500000000 -12.500000000 -36.500000000 -22.500000000 -13.500000000 -36.500000000 -22.500000000 -14.500000000 -36.500000000 -22.500000000 -15.500000000 -36.500000000 -22.500000000 -16.500000000 -36.500000000 -22.500000000 -17.500000000 -36.500000000 -22.500000000 -18.500000000 -36.500000000 -22.500000000 -19.500000000 -36.500000000 -22.500000000 -20.500000000 -36.500000000 -22.500000000 -21.500000000 -36.500000000 -22.500000000 -22.500000000 -36.500000000 -22.500000000 -23.500000000 -36.500000000 -22.500000000 -24.500000000 -36.500000000 -22.500000000 -25.499996185 -36.499996185 -22.500000000 -26.499954224 -36.499954224 -22.500000000 -27.499591827 -36.499591827 -22.500000000 -28.497470856 -36.497467041 -22.500000000 -29.488407135 -36.488403320 -22.500000000 -30.458978653 -36.458980560 -22.500000000 -31.384418488 -36.384422302 -22.500000000 -32.233222961 -36.233222961 -22.500000000 -32.981101990 -35.981101990 -22.500000000 --35.167964935 -35.167964935 -22.500000000 --34.622871399 -35.622871399 -22.500000000 -33.622871399 -35.622871399 -22.500000000 -34.167964935 -35.167964935 -22.500000000 --35.981101990 -33.981101990 -22.500000000 --35.622871399 -34.622871399 -22.500000000 -34.622871399 -34.622871399 -22.500000000 -34.981101990 -33.981101990 -22.500000000 --36.233222961 -33.233222961 -22.500000000 -35.233222961 -33.233226776 -22.500000000 --36.384418488 -32.384422302 -22.500000000 -35.384418488 -32.384422302 -22.500000000 --36.458976746 -31.458978653 -22.500000000 -35.458980560 -31.458978653 -22.500000000 --36.488403320 -30.488407135 -22.500000000 -35.488403320 -30.488407135 -22.500000000 --36.497467041 -29.497472763 -22.500000000 -35.497474670 -29.497472763 -22.500000000 --36.499591827 -28.499593735 -22.500000000 -35.499591827 -28.499593735 -22.500000000 --36.499954224 -27.499954224 -22.500000000 -35.499954224 -27.499954224 -22.500000000 --36.499996185 -26.499996185 -22.500000000 -35.499996185 -26.499996185 -22.500000000 --36.500000000 -25.500000000 -22.500000000 -35.500000000 -25.500000000 -22.500000000 --36.500000000 -24.500000000 -22.500000000 -35.500000000 -24.500000000 -22.500000000 --36.500000000 -23.500000000 -22.500000000 -35.500000000 -23.500000000 -22.500000000 --36.500000000 -22.500000000 -22.500000000 -35.500000000 -22.500000000 -22.500000000 --36.500000000 -21.500000000 -22.500000000 -35.500000000 -21.500000000 -22.500000000 --36.500000000 -20.500000000 -22.500000000 -35.500000000 -20.500000000 -22.500000000 --36.500000000 -19.500000000 -22.500000000 -35.500000000 -19.500000000 -22.500000000 --36.500000000 -18.500000000 -22.500000000 -35.500000000 -18.500000000 -22.500000000 --36.500000000 -17.500000000 -22.500000000 -35.500000000 -17.500000000 -22.500000000 --36.500000000 -16.500000000 -22.500000000 -35.500000000 -16.500000000 -22.500000000 --36.500000000 -15.500000000 -22.500000000 -35.500000000 -15.500000000 -22.500000000 --36.500000000 -14.500000000 -22.500000000 -35.500000000 -14.500000000 -22.500000000 --36.500000000 -13.500000000 -22.500000000 -35.500000000 -13.500000000 -22.500000000 --36.500000000 -12.500000000 -22.500000000 -35.500000000 -12.500000000 -22.500000000 --36.500000000 -11.500000000 -22.500000000 -35.500000000 -11.500000000 -22.500000000 --36.500000000 -10.500000000 -22.500000000 -35.500000000 -10.500000000 -22.500000000 --36.500000000 -9.500000000 -22.500000000 -35.500000000 -9.500000000 -22.500000000 --36.500000000 -8.500000000 -22.500000000 -35.500000000 -8.500000000 -22.500000000 --36.500000000 -7.500000000 -22.500000000 -35.500000000 -7.500000000 -22.500000000 --36.500000000 -6.500000000 -22.500000000 -35.500000000 -6.500000000 -22.500000000 --36.500000000 -5.500000000 -22.500000000 -35.500000000 -5.500000000 -22.500000000 --36.500000000 -4.500000000 -22.500000000 -35.500000000 -4.500000000 -22.500000000 --36.500000000 -3.500000000 -22.500000000 -35.500000000 -3.500000000 -22.500000000 --36.500000000 -2.500000000 -22.500000000 -35.500000000 -2.500000000 -22.500000000 --36.500000000 -1.500000000 -22.500000000 -35.500000000 -1.500000000 -22.500000000 --36.500000000 -0.500000000 -22.500000000 -35.500000000 -0.500000000 -22.500000000 --36.500000000 0.500000000 -22.500000000 -35.500000000 0.500000000 -22.500000000 --36.500000000 1.500000000 -22.500000000 -35.500000000 1.500000000 -22.500000000 --36.500000000 2.500000000 -22.500000000 -35.500000000 2.500000000 -22.500000000 --36.500000000 3.500000000 -22.500000000 -35.500000000 3.500000000 -22.500000000 --36.500000000 4.500000000 -22.500000000 -35.500000000 4.500000000 -22.500000000 --36.500000000 5.500000000 -22.500000000 -35.500000000 5.500000000 -22.500000000 --36.500000000 6.500000000 -22.500000000 -35.500000000 6.500000000 -22.500000000 --36.500000000 7.500000000 -22.500000000 -35.500000000 7.500000000 -22.500000000 --36.500000000 8.500000000 -22.500000000 -35.500000000 8.500000000 -22.500000000 --36.500000000 9.500000000 -22.500000000 -35.500000000 9.500000000 -22.500000000 --36.500000000 10.500000000 -22.500000000 -35.500000000 10.500000000 -22.500000000 --36.500000000 11.500000000 -22.500000000 -35.500000000 11.500000000 -22.500000000 --36.500000000 12.500000000 -22.500000000 -35.500000000 12.500000000 -22.500000000 --36.500000000 13.500000000 -22.500000000 -35.500000000 13.500000000 -22.500000000 --36.500000000 14.500000000 -22.500000000 -35.500000000 14.500000000 -22.500000000 --36.500000000 15.500000000 -22.500000000 -35.500000000 15.500000000 -22.500000000 --36.500000000 16.500000000 -22.500000000 -35.500000000 16.500000000 -22.500000000 --36.500000000 17.500000000 -22.500000000 -35.500000000 17.500000000 -22.500000000 --36.500000000 18.500000000 -22.500000000 -35.500000000 18.500000000 -22.500000000 --36.500000000 19.500000000 -22.500000000 -35.500000000 19.500000000 -22.500000000 --36.500000000 20.500000000 -22.500000000 -35.500000000 20.500000000 -22.500000000 --36.500000000 21.500000000 -22.500000000 -35.500000000 21.500000000 -22.500000000 --36.500000000 22.500000000 -22.500000000 -35.500000000 22.500000000 -22.500000000 --36.500000000 23.500000000 -22.500000000 -35.500000000 23.500000000 -22.500000000 --36.500000000 24.500000000 -22.500000000 -35.500000000 24.500000000 -22.500000000 --36.499996185 25.499996185 -22.500000000 -35.499996185 25.499996185 -22.500000000 --36.499954224 26.499954224 -22.500000000 -35.499954224 26.499954224 -22.500000000 --36.499591827 27.499591827 -22.500000000 -35.499591827 27.499591827 -22.500000000 --36.497474670 28.497470856 -22.500000000 -35.497467041 28.497470856 -22.500000000 --36.488403320 29.488407135 -22.500000000 -35.488403320 29.488407135 -22.500000000 --36.458980560 30.458978653 -22.500000000 -35.458980560 30.458978653 -22.500000000 --36.384422302 31.384418488 -22.500000000 -35.384422302 31.384418488 -22.500000000 --36.233222961 32.233222961 -22.500000000 -35.233222961 32.233222961 -22.500000000 --35.981101990 32.981101990 -22.500000000 --35.622871399 33.622871399 -22.500000000 -34.622871399 33.622871399 -22.500000000 -34.981101990 32.981101990 -22.500000000 --35.167964935 34.167964935 -22.500000000 --34.622871399 34.622871399 -22.500000000 -33.622871399 34.622871399 -22.500000000 -34.167964935 34.167964935 -22.500000000 --33.981101990 34.981101990 -22.500000000 --33.233222961 35.233222961 -22.500000000 --32.384422302 35.384418488 -22.500000000 --31.458978653 35.458976746 -22.500000000 --30.488407135 35.488403320 -22.500000000 --29.497472763 35.497467041 -22.500000000 --28.499593735 35.499591827 -22.500000000 --27.499954224 35.499954224 -22.500000000 --26.499996185 35.499996185 -22.500000000 --25.500000000 35.500000000 -22.500000000 --24.500000000 35.500000000 -22.500000000 --23.500000000 35.500000000 -22.500000000 --22.500000000 35.500000000 -22.500000000 --21.500000000 35.500000000 -22.500000000 --20.500000000 35.500000000 -22.500000000 --19.500000000 35.500000000 -22.500000000 --18.500000000 35.500000000 -22.500000000 --17.500000000 35.500000000 -22.500000000 --16.500000000 35.500000000 -22.500000000 --15.500000000 35.500000000 -22.500000000 --14.500000000 35.500000000 -22.500000000 --13.500000000 35.500000000 -22.500000000 --12.500000000 35.500000000 -22.500000000 --11.500000000 35.500000000 -22.500000000 --10.500000000 35.500000000 -22.500000000 --9.500000000 35.500000000 -22.500000000 --8.500000000 35.500000000 -22.500000000 --7.500000000 35.500000000 -22.500000000 --6.500000000 35.500000000 -22.500000000 --5.500000000 35.500000000 -22.500000000 --4.500000000 35.500000000 -22.500000000 --3.500000000 35.500000000 -22.500000000 --2.500000000 35.500000000 -22.500000000 --1.500000000 35.500000000 -22.500000000 --0.500000000 35.500000000 -22.500000000 -0.500000000 35.500000000 -22.500000000 -1.500000000 35.500000000 -22.500000000 -2.500000000 35.500000000 -22.500000000 -3.500000000 35.500000000 -22.500000000 -4.500000000 35.500000000 -22.500000000 -5.500000000 35.500000000 -22.500000000 -6.500000000 35.500000000 -22.500000000 -7.500000000 35.500000000 -22.500000000 -8.500000000 35.500000000 -22.500000000 -9.500000000 35.500000000 -22.500000000 -10.500000000 35.500000000 -22.500000000 -11.500000000 35.500000000 -22.500000000 -12.500000000 35.500000000 -22.500000000 -13.500000000 35.500000000 -22.500000000 -14.500000000 35.500000000 -22.500000000 -15.500000000 35.500000000 -22.500000000 -16.500000000 35.500000000 -22.500000000 -17.500000000 35.500000000 -22.500000000 -18.500000000 35.500000000 -22.500000000 -19.500000000 35.500000000 -22.500000000 -20.500000000 35.500000000 -22.500000000 -21.500000000 35.500000000 -22.500000000 -22.500000000 35.500000000 -22.500000000 -23.500000000 35.500000000 -22.500000000 -24.500000000 35.500000000 -22.500000000 -25.499996185 35.499996185 -22.500000000 -26.499954224 35.499954224 -22.500000000 -27.499591827 35.499591827 -22.500000000 -28.497470856 35.497474670 -22.500000000 -29.488407135 35.488403320 -22.500000000 -30.458978653 35.458980560 -22.500000000 -31.384418488 35.384422302 -22.500000000 -32.233222961 35.233222961 -22.500000000 -32.981101990 34.981101990 -22.500000000 --33.981101990 -35.981101990 -21.500000000 --33.233226776 -36.233222961 -21.500000000 --32.384422302 -36.384418488 -21.500000000 --31.458978653 -36.458980560 -21.500000000 --30.488407135 -36.488403320 -21.500000000 --29.497472763 -36.497474670 -21.500000000 --28.499593735 -36.499591827 -21.500000000 --27.499954224 -36.499954224 -21.500000000 --26.499996185 -36.499996185 -21.500000000 --25.500000000 -36.500000000 -21.500000000 --24.500000000 -36.500000000 -21.500000000 --23.500000000 -36.500000000 -21.500000000 --22.500000000 -36.500000000 -21.500000000 --21.500000000 -36.500000000 -21.500000000 --20.500000000 -36.500000000 -21.500000000 --19.500000000 -36.500000000 -21.500000000 --18.500000000 -36.500000000 -21.500000000 --17.500000000 -36.500000000 -21.500000000 --16.500000000 -36.500000000 -21.500000000 --15.500000000 -36.500000000 -21.500000000 --14.500000000 -36.500000000 -21.500000000 --13.500000000 -36.500000000 -21.500000000 --12.500000000 -36.500000000 -21.500000000 --11.500000000 -36.500000000 -21.500000000 --10.500000000 -36.500000000 -21.500000000 --9.500000000 -36.500000000 -21.500000000 --8.500000000 -36.500000000 -21.500000000 --7.500000000 -36.500000000 -21.500000000 --6.500000000 -36.500000000 -21.500000000 --5.500000000 -36.500000000 -21.500000000 --4.500000000 -36.500000000 -21.500000000 --3.500000000 -36.500000000 -21.500000000 --2.500000000 -36.500000000 -21.500000000 --1.500000000 -36.500000000 -21.500000000 --0.500000000 -36.500000000 -21.500000000 -0.500000000 -36.500000000 -21.500000000 -1.500000000 -36.500000000 -21.500000000 -2.500000000 -36.500000000 -21.500000000 -3.500000000 -36.500000000 -21.500000000 -4.500000000 -36.500000000 -21.500000000 -5.500000000 -36.500000000 -21.500000000 -6.500000000 -36.500000000 -21.500000000 -7.500000000 -36.500000000 -21.500000000 -8.500000000 -36.500000000 -21.500000000 -9.500000000 -36.500000000 -21.500000000 -10.500000000 -36.500000000 -21.500000000 -11.500000000 -36.500000000 -21.500000000 -12.500000000 -36.500000000 -21.500000000 -13.500000000 -36.500000000 -21.500000000 -14.500000000 -36.500000000 -21.500000000 -15.500000000 -36.500000000 -21.500000000 -16.500000000 -36.500000000 -21.500000000 -17.500000000 -36.500000000 -21.500000000 -18.500000000 -36.500000000 -21.500000000 -19.500000000 -36.500000000 -21.500000000 -20.500000000 -36.500000000 -21.500000000 -21.500000000 -36.500000000 -21.500000000 -22.500000000 -36.500000000 -21.500000000 -23.500000000 -36.500000000 -21.500000000 -24.500000000 -36.500000000 -21.500000000 -25.499996185 -36.499996185 -21.500000000 -26.499954224 -36.499954224 -21.500000000 -27.499591827 -36.499591827 -21.500000000 -28.497470856 -36.497467041 -21.500000000 -29.488407135 -36.488403320 -21.500000000 -30.458978653 -36.458980560 -21.500000000 -31.384418488 -36.384422302 -21.500000000 -32.233222961 -36.233222961 -21.500000000 -32.981101990 -35.981101990 -21.500000000 --35.167964935 -35.167964935 -21.500000000 --34.622871399 -35.622871399 -21.500000000 -33.622871399 -35.622871399 -21.500000000 -34.167964935 -35.167964935 -21.500000000 --35.981101990 -33.981101990 -21.500000000 --35.622871399 -34.622871399 -21.500000000 -34.622871399 -34.622871399 -21.500000000 -34.981101990 -33.981101990 -21.500000000 --36.233222961 -33.233222961 -21.500000000 -35.233222961 -33.233226776 -21.500000000 --36.384418488 -32.384422302 -21.500000000 -35.384418488 -32.384422302 -21.500000000 --36.458976746 -31.458978653 -21.500000000 -35.458980560 -31.458978653 -21.500000000 --36.488403320 -30.488407135 -21.500000000 -35.488403320 -30.488407135 -21.500000000 --36.497467041 -29.497472763 -21.500000000 -35.497474670 -29.497472763 -21.500000000 --36.499591827 -28.499593735 -21.500000000 -35.499591827 -28.499593735 -21.500000000 --36.499954224 -27.499954224 -21.500000000 -35.499954224 -27.499954224 -21.500000000 --36.499996185 -26.499996185 -21.500000000 -35.499996185 -26.499996185 -21.500000000 --36.500000000 -25.500000000 -21.500000000 -35.500000000 -25.500000000 -21.500000000 --36.500000000 -24.500000000 -21.500000000 -35.500000000 -24.500000000 -21.500000000 --36.500000000 -23.500000000 -21.500000000 -35.500000000 -23.500000000 -21.500000000 --36.500000000 -22.500000000 -21.500000000 -35.500000000 -22.500000000 -21.500000000 --36.500000000 -21.500000000 -21.500000000 -35.500000000 -21.500000000 -21.500000000 --36.500000000 -20.500000000 -21.500000000 -35.500000000 -20.500000000 -21.500000000 --36.500000000 -19.500000000 -21.500000000 -35.500000000 -19.500000000 -21.500000000 --36.500000000 -18.500000000 -21.500000000 -35.500000000 -18.500000000 -21.500000000 --36.500000000 -17.500000000 -21.500000000 -35.500000000 -17.500000000 -21.500000000 --36.500000000 -16.500000000 -21.500000000 -35.500000000 -16.500000000 -21.500000000 --36.500000000 -15.500000000 -21.500000000 -35.500000000 -15.500000000 -21.500000000 --36.500000000 -14.500000000 -21.500000000 -35.500000000 -14.500000000 -21.500000000 --36.500000000 -13.500000000 -21.500000000 -35.500000000 -13.500000000 -21.500000000 --36.500000000 -12.500000000 -21.500000000 -35.500000000 -12.500000000 -21.500000000 --36.500000000 -11.500000000 -21.500000000 -35.500000000 -11.500000000 -21.500000000 --36.500000000 -10.500000000 -21.500000000 -35.500000000 -10.500000000 -21.500000000 --36.500000000 -9.500000000 -21.500000000 -35.500000000 -9.500000000 -21.500000000 --36.500000000 -8.500000000 -21.500000000 -35.500000000 -8.500000000 -21.500000000 --36.500000000 -7.500000000 -21.500000000 -35.500000000 -7.500000000 -21.500000000 --36.500000000 -6.500000000 -21.500000000 -35.500000000 -6.500000000 -21.500000000 --36.500000000 -5.500000000 -21.500000000 -35.500000000 -5.500000000 -21.500000000 --36.500000000 -4.500000000 -21.500000000 -35.500000000 -4.500000000 -21.500000000 --36.500000000 -3.500000000 -21.500000000 -35.500000000 -3.500000000 -21.500000000 --36.500000000 -2.500000000 -21.500000000 -35.500000000 -2.500000000 -21.500000000 --36.500000000 -1.500000000 -21.500000000 -35.500000000 -1.500000000 -21.500000000 --36.500000000 -0.500000000 -21.500000000 -35.500000000 -0.500000000 -21.500000000 --36.500000000 0.500000000 -21.500000000 -35.500000000 0.500000000 -21.500000000 --36.500000000 1.500000000 -21.500000000 -35.500000000 1.500000000 -21.500000000 --36.500000000 2.500000000 -21.500000000 -35.500000000 2.500000000 -21.500000000 --36.500000000 3.500000000 -21.500000000 -35.500000000 3.500000000 -21.500000000 --36.500000000 4.500000000 -21.500000000 -35.500000000 4.500000000 -21.500000000 --36.500000000 5.500000000 -21.500000000 -35.500000000 5.500000000 -21.500000000 --36.500000000 6.500000000 -21.500000000 -35.500000000 6.500000000 -21.500000000 --36.500000000 7.500000000 -21.500000000 -35.500000000 7.500000000 -21.500000000 --36.500000000 8.500000000 -21.500000000 -35.500000000 8.500000000 -21.500000000 --36.500000000 9.500000000 -21.500000000 -35.500000000 9.500000000 -21.500000000 --36.500000000 10.500000000 -21.500000000 -35.500000000 10.500000000 -21.500000000 --36.500000000 11.500000000 -21.500000000 -35.500000000 11.500000000 -21.500000000 --36.500000000 12.500000000 -21.500000000 -35.500000000 12.500000000 -21.500000000 --36.500000000 13.500000000 -21.500000000 -35.500000000 13.500000000 -21.500000000 --36.500000000 14.500000000 -21.500000000 -35.500000000 14.500000000 -21.500000000 --36.500000000 15.500000000 -21.500000000 -35.500000000 15.500000000 -21.500000000 --36.500000000 16.500000000 -21.500000000 -35.500000000 16.500000000 -21.500000000 --36.500000000 17.500000000 -21.500000000 -35.500000000 17.500000000 -21.500000000 --36.500000000 18.500000000 -21.500000000 -35.500000000 18.500000000 -21.500000000 --36.500000000 19.500000000 -21.500000000 -35.500000000 19.500000000 -21.500000000 --36.500000000 20.500000000 -21.500000000 -35.500000000 20.500000000 -21.500000000 --36.500000000 21.500000000 -21.500000000 -35.500000000 21.500000000 -21.500000000 --36.500000000 22.500000000 -21.500000000 -35.500000000 22.500000000 -21.500000000 --36.500000000 23.500000000 -21.500000000 -35.500000000 23.500000000 -21.500000000 --36.500000000 24.500000000 -21.500000000 -35.500000000 24.500000000 -21.500000000 --36.499996185 25.499996185 -21.500000000 -35.499996185 25.499996185 -21.500000000 --36.499954224 26.499954224 -21.500000000 -35.499954224 26.499954224 -21.500000000 --36.499591827 27.499591827 -21.500000000 -35.499591827 27.499591827 -21.500000000 --36.497474670 28.497470856 -21.500000000 -35.497467041 28.497470856 -21.500000000 --36.488403320 29.488407135 -21.500000000 -35.488403320 29.488407135 -21.500000000 --36.458980560 30.458978653 -21.500000000 -35.458980560 30.458978653 -21.500000000 --36.384422302 31.384418488 -21.500000000 -35.384422302 31.384418488 -21.500000000 --36.233222961 32.233222961 -21.500000000 -35.233222961 32.233222961 -21.500000000 --35.981101990 32.981101990 -21.500000000 --35.622871399 33.622871399 -21.500000000 -34.622871399 33.622871399 -21.500000000 -34.981101990 32.981101990 -21.500000000 --35.167964935 34.167964935 -21.500000000 --34.622871399 34.622871399 -21.500000000 -33.622871399 34.622871399 -21.500000000 -34.167964935 34.167964935 -21.500000000 --33.981101990 34.981101990 -21.500000000 --33.233222961 35.233222961 -21.500000000 --32.384422302 35.384418488 -21.500000000 --31.458978653 35.458976746 -21.500000000 --30.488407135 35.488403320 -21.500000000 --29.497472763 35.497467041 -21.500000000 --28.499593735 35.499591827 -21.500000000 --27.499954224 35.499954224 -21.500000000 --26.499996185 35.499996185 -21.500000000 --25.500000000 35.500000000 -21.500000000 --24.500000000 35.500000000 -21.500000000 --23.500000000 35.500000000 -21.500000000 --22.500000000 35.500000000 -21.500000000 --21.500000000 35.500000000 -21.500000000 --20.500000000 35.500000000 -21.500000000 --19.500000000 35.500000000 -21.500000000 --18.500000000 35.500000000 -21.500000000 --17.500000000 35.500000000 -21.500000000 --16.500000000 35.500000000 -21.500000000 --15.500000000 35.500000000 -21.500000000 --14.500000000 35.500000000 -21.500000000 --13.500000000 35.500000000 -21.500000000 --12.500000000 35.500000000 -21.500000000 --11.500000000 35.500000000 -21.500000000 --10.500000000 35.500000000 -21.500000000 --9.500000000 35.500000000 -21.500000000 --8.500000000 35.500000000 -21.500000000 --7.500000000 35.500000000 -21.500000000 --6.500000000 35.500000000 -21.500000000 --5.500000000 35.500000000 -21.500000000 --4.500000000 35.500000000 -21.500000000 --3.500000000 35.500000000 -21.500000000 --2.500000000 35.500000000 -21.500000000 --1.500000000 35.500000000 -21.500000000 --0.500000000 35.500000000 -21.500000000 -0.500000000 35.500000000 -21.500000000 -1.500000000 35.500000000 -21.500000000 -2.500000000 35.500000000 -21.500000000 -3.500000000 35.500000000 -21.500000000 -4.500000000 35.500000000 -21.500000000 -5.500000000 35.500000000 -21.500000000 -6.500000000 35.500000000 -21.500000000 -7.500000000 35.500000000 -21.500000000 -8.500000000 35.500000000 -21.500000000 -9.500000000 35.500000000 -21.500000000 -10.500000000 35.500000000 -21.500000000 -11.500000000 35.500000000 -21.500000000 -12.500000000 35.500000000 -21.500000000 -13.500000000 35.500000000 -21.500000000 -14.500000000 35.500000000 -21.500000000 -15.500000000 35.500000000 -21.500000000 -16.500000000 35.500000000 -21.500000000 -17.500000000 35.500000000 -21.500000000 -18.500000000 35.500000000 -21.500000000 -19.500000000 35.500000000 -21.500000000 -20.500000000 35.500000000 -21.500000000 -21.500000000 35.500000000 -21.500000000 -22.500000000 35.500000000 -21.500000000 -23.500000000 35.500000000 -21.500000000 -24.500000000 35.500000000 -21.500000000 -25.499996185 35.499996185 -21.500000000 -26.499954224 35.499954224 -21.500000000 -27.499591827 35.499591827 -21.500000000 -28.497470856 35.497474670 -21.500000000 -29.488407135 35.488403320 -21.500000000 -30.458978653 35.458980560 -21.500000000 -31.384418488 35.384422302 -21.500000000 -32.233222961 35.233222961 -21.500000000 -32.981101990 34.981101990 -21.500000000 --33.981101990 -35.981101990 -20.500000000 --33.233226776 -36.233222961 -20.500000000 --32.384422302 -36.384418488 -20.500000000 --31.458978653 -36.458980560 -20.500000000 --30.488407135 -36.488403320 -20.500000000 --29.497472763 -36.497474670 -20.500000000 --28.499593735 -36.499591827 -20.500000000 --27.499954224 -36.499954224 -20.500000000 --26.499996185 -36.499996185 -20.500000000 --25.500000000 -36.500000000 -20.500000000 --24.500000000 -36.500000000 -20.500000000 --23.500000000 -36.500000000 -20.500000000 --22.500000000 -36.500000000 -20.500000000 --21.500000000 -36.500000000 -20.500000000 --20.500000000 -36.500000000 -20.500000000 --19.500000000 -36.500000000 -20.500000000 --18.500000000 -36.500000000 -20.500000000 --17.500000000 -36.500000000 -20.500000000 --16.500000000 -36.500000000 -20.500000000 --15.500000000 -36.500000000 -20.500000000 --14.500000000 -36.500000000 -20.500000000 --13.500000000 -36.500000000 -20.500000000 --12.500000000 -36.500000000 -20.500000000 --11.500000000 -36.500000000 -20.500000000 --10.500000000 -36.500000000 -20.500000000 --9.500000000 -36.500000000 -20.500000000 --8.500000000 -36.500000000 -20.500000000 --7.500000000 -36.500000000 -20.500000000 --6.500000000 -36.500000000 -20.500000000 --5.500000000 -36.500000000 -20.500000000 --4.500000000 -36.500000000 -20.500000000 --3.500000000 -36.500000000 -20.500000000 --2.500000000 -36.500000000 -20.500000000 --1.500000000 -36.500000000 -20.500000000 --0.500000000 -36.500000000 -20.500000000 -0.500000000 -36.500000000 -20.500000000 -1.500000000 -36.500000000 -20.500000000 -2.500000000 -36.500000000 -20.500000000 -3.500000000 -36.500000000 -20.500000000 -4.500000000 -36.500000000 -20.500000000 -5.500000000 -36.500000000 -20.500000000 -6.500000000 -36.500000000 -20.500000000 -7.500000000 -36.500000000 -20.500000000 -8.500000000 -36.500000000 -20.500000000 -9.500000000 -36.500000000 -20.500000000 -10.500000000 -36.500000000 -20.500000000 -11.500000000 -36.500000000 -20.500000000 -12.500000000 -36.500000000 -20.500000000 -13.500000000 -36.500000000 -20.500000000 -14.500000000 -36.500000000 -20.500000000 -15.500000000 -36.500000000 -20.500000000 -16.500000000 -36.500000000 -20.500000000 -17.500000000 -36.500000000 -20.500000000 -18.500000000 -36.500000000 -20.500000000 -19.500000000 -36.500000000 -20.500000000 -20.500000000 -36.500000000 -20.500000000 -21.500000000 -36.500000000 -20.500000000 -22.500000000 -36.500000000 -20.500000000 -23.500000000 -36.500000000 -20.500000000 -24.500000000 -36.500000000 -20.500000000 -25.499996185 -36.499996185 -20.500000000 -26.499954224 -36.499954224 -20.500000000 -27.499591827 -36.499591827 -20.500000000 -28.497470856 -36.497467041 -20.500000000 -29.488407135 -36.488403320 -20.500000000 -30.458978653 -36.458980560 -20.500000000 -31.384418488 -36.384422302 -20.500000000 -32.233222961 -36.233222961 -20.500000000 -32.981101990 -35.981101990 -20.500000000 --35.167964935 -35.167964935 -20.500000000 --34.622871399 -35.622871399 -20.500000000 -33.622871399 -35.622871399 -20.500000000 -34.167964935 -35.167964935 -20.500000000 --35.981101990 -33.981101990 -20.500000000 --35.622871399 -34.622871399 -20.500000000 -34.622871399 -34.622871399 -20.500000000 -34.981101990 -33.981101990 -20.500000000 --36.233222961 -33.233222961 -20.500000000 -35.233222961 -33.233226776 -20.500000000 --36.384418488 -32.384422302 -20.500000000 -35.384418488 -32.384422302 -20.500000000 --36.458976746 -31.458978653 -20.500000000 -35.458980560 -31.458978653 -20.500000000 --36.488403320 -30.488407135 -20.500000000 -35.488403320 -30.488407135 -20.500000000 --36.497467041 -29.497472763 -20.500000000 -35.497474670 -29.497472763 -20.500000000 --36.499591827 -28.499593735 -20.500000000 -35.499591827 -28.499593735 -20.500000000 --36.499954224 -27.499954224 -20.500000000 -35.499954224 -27.499954224 -20.500000000 --36.499996185 -26.499996185 -20.500000000 -35.499996185 -26.499996185 -20.500000000 --36.500000000 -25.500000000 -20.500000000 -35.500000000 -25.500000000 -20.500000000 --36.500000000 -24.500000000 -20.500000000 -35.500000000 -24.500000000 -20.500000000 --36.500000000 -23.500000000 -20.500000000 -35.500000000 -23.500000000 -20.500000000 --36.500000000 -22.500000000 -20.500000000 -35.500000000 -22.500000000 -20.500000000 --36.500000000 -21.500000000 -20.500000000 -35.500000000 -21.500000000 -20.500000000 --36.500000000 -20.500000000 -20.500000000 -35.500000000 -20.500000000 -20.500000000 --36.500000000 -19.500000000 -20.500000000 -35.500000000 -19.500000000 -20.500000000 --36.500000000 -18.500000000 -20.500000000 -35.500000000 -18.500000000 -20.500000000 --36.500000000 -17.500000000 -20.500000000 -35.500000000 -17.500000000 -20.500000000 --36.500000000 -16.500000000 -20.500000000 -35.500000000 -16.500000000 -20.500000000 --36.500000000 -15.500000000 -20.500000000 -35.500000000 -15.500000000 -20.500000000 --36.500000000 -14.500000000 -20.500000000 -35.500000000 -14.500000000 -20.500000000 --36.500000000 -13.500000000 -20.500000000 -35.500000000 -13.500000000 -20.500000000 --36.500000000 -12.500000000 -20.500000000 -35.500000000 -12.500000000 -20.500000000 --36.500000000 -11.500000000 -20.500000000 -35.500000000 -11.500000000 -20.500000000 --36.500000000 -10.500000000 -20.500000000 -35.500000000 -10.500000000 -20.500000000 --36.500000000 -9.500000000 -20.500000000 -35.500000000 -9.500000000 -20.500000000 --36.500000000 -8.500000000 -20.500000000 -35.500000000 -8.500000000 -20.500000000 --36.500000000 -7.500000000 -20.500000000 -35.500000000 -7.500000000 -20.500000000 --36.500000000 -6.500000000 -20.500000000 -35.500000000 -6.500000000 -20.500000000 --36.500000000 -5.500000000 -20.500000000 -35.500000000 -5.500000000 -20.500000000 --36.500000000 -4.500000000 -20.500000000 -35.500000000 -4.500000000 -20.500000000 --36.500000000 -3.500000000 -20.500000000 -35.500000000 -3.500000000 -20.500000000 --36.500000000 -2.500000000 -20.500000000 -35.500000000 -2.500000000 -20.500000000 --36.500000000 -1.500000000 -20.500000000 -35.500000000 -1.500000000 -20.500000000 --36.500000000 -0.500000000 -20.500000000 -35.500000000 -0.500000000 -20.500000000 --36.500000000 0.500000000 -20.500000000 -35.500000000 0.500000000 -20.500000000 --36.500000000 1.500000000 -20.500000000 -35.500000000 1.500000000 -20.500000000 --36.500000000 2.500000000 -20.500000000 -35.500000000 2.500000000 -20.500000000 --36.500000000 3.500000000 -20.500000000 -35.500000000 3.500000000 -20.500000000 --36.500000000 4.500000000 -20.500000000 -35.500000000 4.500000000 -20.500000000 --36.500000000 5.500000000 -20.500000000 -35.500000000 5.500000000 -20.500000000 --36.500000000 6.500000000 -20.500000000 -35.500000000 6.500000000 -20.500000000 --36.500000000 7.500000000 -20.500000000 -35.500000000 7.500000000 -20.500000000 --36.500000000 8.500000000 -20.500000000 -35.500000000 8.500000000 -20.500000000 --36.500000000 9.500000000 -20.500000000 -35.500000000 9.500000000 -20.500000000 --36.500000000 10.500000000 -20.500000000 -35.500000000 10.500000000 -20.500000000 --36.500000000 11.500000000 -20.500000000 -35.500000000 11.500000000 -20.500000000 --36.500000000 12.500000000 -20.500000000 -35.500000000 12.500000000 -20.500000000 --36.500000000 13.500000000 -20.500000000 -35.500000000 13.500000000 -20.500000000 --36.500000000 14.500000000 -20.500000000 -35.500000000 14.500000000 -20.500000000 --36.500000000 15.500000000 -20.500000000 -35.500000000 15.500000000 -20.500000000 --36.500000000 16.500000000 -20.500000000 -35.500000000 16.500000000 -20.500000000 --36.500000000 17.500000000 -20.500000000 -35.500000000 17.500000000 -20.500000000 --36.500000000 18.500000000 -20.500000000 -35.500000000 18.500000000 -20.500000000 --36.500000000 19.500000000 -20.500000000 -35.500000000 19.500000000 -20.500000000 --36.500000000 20.500000000 -20.500000000 -35.500000000 20.500000000 -20.500000000 --36.500000000 21.500000000 -20.500000000 -35.500000000 21.500000000 -20.500000000 --36.500000000 22.500000000 -20.500000000 -35.500000000 22.500000000 -20.500000000 --36.500000000 23.500000000 -20.500000000 -35.500000000 23.500000000 -20.500000000 --36.500000000 24.500000000 -20.500000000 -35.500000000 24.500000000 -20.500000000 --36.499996185 25.499996185 -20.500000000 -35.499996185 25.499996185 -20.500000000 --36.499954224 26.499954224 -20.500000000 -35.499954224 26.499954224 -20.500000000 --36.499591827 27.499591827 -20.500000000 -35.499591827 27.499591827 -20.500000000 --36.497474670 28.497470856 -20.500000000 -35.497467041 28.497470856 -20.500000000 --36.488403320 29.488407135 -20.500000000 -35.488403320 29.488407135 -20.500000000 --36.458980560 30.458978653 -20.500000000 -35.458980560 30.458978653 -20.500000000 --36.384422302 31.384418488 -20.500000000 -35.384422302 31.384418488 -20.500000000 --36.233222961 32.233222961 -20.500000000 -35.233222961 32.233222961 -20.500000000 --35.981101990 32.981101990 -20.500000000 --35.622871399 33.622871399 -20.500000000 -34.622871399 33.622871399 -20.500000000 -34.981101990 32.981101990 -20.500000000 --35.167964935 34.167964935 -20.500000000 --34.622871399 34.622871399 -20.500000000 -33.622871399 34.622871399 -20.500000000 -34.167964935 34.167964935 -20.500000000 --33.981101990 34.981101990 -20.500000000 --33.233222961 35.233222961 -20.500000000 --32.384422302 35.384418488 -20.500000000 --31.458978653 35.458976746 -20.500000000 --30.488407135 35.488403320 -20.500000000 --29.497472763 35.497467041 -20.500000000 --28.499593735 35.499591827 -20.500000000 --27.499954224 35.499954224 -20.500000000 --26.499996185 35.499996185 -20.500000000 --25.500000000 35.500000000 -20.500000000 --24.500000000 35.500000000 -20.500000000 --23.500000000 35.500000000 -20.500000000 --22.500000000 35.500000000 -20.500000000 --21.500000000 35.500000000 -20.500000000 --20.500000000 35.500000000 -20.500000000 --19.500000000 35.500000000 -20.500000000 --18.500000000 35.500000000 -20.500000000 --17.500000000 35.500000000 -20.500000000 --16.500000000 35.500000000 -20.500000000 --15.500000000 35.500000000 -20.500000000 --14.500000000 35.500000000 -20.500000000 --13.500000000 35.500000000 -20.500000000 --12.500000000 35.500000000 -20.500000000 --11.500000000 35.500000000 -20.500000000 --10.500000000 35.500000000 -20.500000000 --9.500000000 35.500000000 -20.500000000 --8.500000000 35.500000000 -20.500000000 --7.500000000 35.500000000 -20.500000000 --6.500000000 35.500000000 -20.500000000 --5.500000000 35.500000000 -20.500000000 --4.500000000 35.500000000 -20.500000000 --3.500000000 35.500000000 -20.500000000 --2.500000000 35.500000000 -20.500000000 --1.500000000 35.500000000 -20.500000000 --0.500000000 35.500000000 -20.500000000 -0.500000000 35.500000000 -20.500000000 -1.500000000 35.500000000 -20.500000000 -2.500000000 35.500000000 -20.500000000 -3.500000000 35.500000000 -20.500000000 -4.500000000 35.500000000 -20.500000000 -5.500000000 35.500000000 -20.500000000 -6.500000000 35.500000000 -20.500000000 -7.500000000 35.500000000 -20.500000000 -8.500000000 35.500000000 -20.500000000 -9.500000000 35.500000000 -20.500000000 -10.500000000 35.500000000 -20.500000000 -11.500000000 35.500000000 -20.500000000 -12.500000000 35.500000000 -20.500000000 -13.500000000 35.500000000 -20.500000000 -14.500000000 35.500000000 -20.500000000 -15.500000000 35.500000000 -20.500000000 -16.500000000 35.500000000 -20.500000000 -17.500000000 35.500000000 -20.500000000 -18.500000000 35.500000000 -20.500000000 -19.500000000 35.500000000 -20.500000000 -20.500000000 35.500000000 -20.500000000 -21.500000000 35.500000000 -20.500000000 -22.500000000 35.500000000 -20.500000000 -23.500000000 35.500000000 -20.500000000 -24.500000000 35.500000000 -20.500000000 -25.499996185 35.499996185 -20.500000000 -26.499954224 35.499954224 -20.500000000 -27.499591827 35.499591827 -20.500000000 -28.497470856 35.497474670 -20.500000000 -29.488407135 35.488403320 -20.500000000 -30.458978653 35.458980560 -20.500000000 -31.384418488 35.384422302 -20.500000000 -32.233222961 35.233222961 -20.500000000 -32.981101990 34.981101990 -20.500000000 --33.981101990 -35.981101990 -19.500000000 --33.233226776 -36.233222961 -19.500000000 --32.384422302 -36.384418488 -19.500000000 --31.458978653 -36.458980560 -19.500000000 --30.488407135 -36.488403320 -19.500000000 --29.497472763 -36.497474670 -19.500000000 --28.499593735 -36.499591827 -19.500000000 --27.499954224 -36.499954224 -19.500000000 --26.499996185 -36.499996185 -19.500000000 --25.500000000 -36.500000000 -19.500000000 --24.500000000 -36.500000000 -19.500000000 --23.500000000 -36.500000000 -19.500000000 --22.500000000 -36.500000000 -19.500000000 --21.500000000 -36.500000000 -19.500000000 --20.500000000 -36.500000000 -19.500000000 --19.500000000 -36.500000000 -19.500000000 --18.500000000 -36.500000000 -19.500000000 --17.500000000 -36.500000000 -19.500000000 --16.500000000 -36.500000000 -19.500000000 --15.500000000 -36.500000000 -19.500000000 --14.500000000 -36.500000000 -19.500000000 --13.500000000 -36.500000000 -19.500000000 --12.500000000 -36.500000000 -19.500000000 --11.500000000 -36.500000000 -19.500000000 --10.500000000 -36.500000000 -19.500000000 --9.500000000 -36.500000000 -19.500000000 --8.500000000 -36.500000000 -19.500000000 --7.500000000 -36.500000000 -19.500000000 --6.500000000 -36.500000000 -19.500000000 --5.500000000 -36.500000000 -19.500000000 --4.500000000 -36.500000000 -19.500000000 --3.500000000 -36.500000000 -19.500000000 --2.500000000 -36.500000000 -19.500000000 --1.500000000 -36.500000000 -19.500000000 --0.500000000 -36.500000000 -19.500000000 -0.500000000 -36.500000000 -19.500000000 -1.500000000 -36.500000000 -19.500000000 -2.500000000 -36.500000000 -19.500000000 -3.500000000 -36.500000000 -19.500000000 -4.500000000 -36.500000000 -19.500000000 -5.500000000 -36.500000000 -19.500000000 -6.500000000 -36.500000000 -19.500000000 -7.500000000 -36.500000000 -19.500000000 -8.500000000 -36.500000000 -19.500000000 -9.500000000 -36.500000000 -19.500000000 -10.500000000 -36.500000000 -19.500000000 -11.500000000 -36.500000000 -19.500000000 -12.500000000 -36.500000000 -19.500000000 -13.500000000 -36.500000000 -19.500000000 -14.500000000 -36.500000000 -19.500000000 -15.500000000 -36.500000000 -19.500000000 -16.500000000 -36.500000000 -19.500000000 -17.500000000 -36.500000000 -19.500000000 -18.500000000 -36.500000000 -19.500000000 -19.500000000 -36.500000000 -19.500000000 -20.500000000 -36.500000000 -19.500000000 -21.500000000 -36.500000000 -19.500000000 -22.500000000 -36.500000000 -19.500000000 -23.500000000 -36.500000000 -19.500000000 -24.500000000 -36.500000000 -19.500000000 -25.499996185 -36.499996185 -19.500000000 -26.499954224 -36.499954224 -19.500000000 -27.499591827 -36.499591827 -19.500000000 -28.497470856 -36.497467041 -19.500000000 -29.488407135 -36.488403320 -19.500000000 -30.458978653 -36.458980560 -19.500000000 -31.384418488 -36.384422302 -19.500000000 -32.233222961 -36.233222961 -19.500000000 -32.981101990 -35.981101990 -19.500000000 --35.167964935 -35.167964935 -19.500000000 --34.622871399 -35.622871399 -19.500000000 -33.622871399 -35.622871399 -19.500000000 -34.167964935 -35.167964935 -19.500000000 --35.981101990 -33.981101990 -19.500000000 --35.622871399 -34.622871399 -19.500000000 -34.622871399 -34.622871399 -19.500000000 -34.981101990 -33.981101990 -19.500000000 --36.233222961 -33.233222961 -19.500000000 -35.233222961 -33.233226776 -19.500000000 --36.384418488 -32.384422302 -19.500000000 -35.384418488 -32.384422302 -19.500000000 --36.458976746 -31.458978653 -19.500000000 -35.458980560 -31.458978653 -19.500000000 --36.488403320 -30.488407135 -19.500000000 -35.488403320 -30.488407135 -19.500000000 --36.497467041 -29.497472763 -19.500000000 -35.497474670 -29.497472763 -19.500000000 --36.499591827 -28.499593735 -19.500000000 -35.499591827 -28.499593735 -19.500000000 --36.499954224 -27.499954224 -19.500000000 -35.499954224 -27.499954224 -19.500000000 --36.499996185 -26.499996185 -19.500000000 -35.499996185 -26.499996185 -19.500000000 --36.500000000 -25.500000000 -19.500000000 -35.500000000 -25.500000000 -19.500000000 --36.500000000 -24.500000000 -19.500000000 -35.500000000 -24.500000000 -19.500000000 --36.500000000 -23.500000000 -19.500000000 -35.500000000 -23.500000000 -19.500000000 --36.500000000 -22.500000000 -19.500000000 -35.500000000 -22.500000000 -19.500000000 --36.500000000 -21.500000000 -19.500000000 -35.500000000 -21.500000000 -19.500000000 --36.500000000 -20.500000000 -19.500000000 -35.500000000 -20.500000000 -19.500000000 --36.500000000 -19.500000000 -19.500000000 -35.500000000 -19.500000000 -19.500000000 --36.500000000 -18.500000000 -19.500000000 -35.500000000 -18.500000000 -19.500000000 --36.500000000 -17.500000000 -19.500000000 -35.500000000 -17.500000000 -19.500000000 --36.500000000 -16.500000000 -19.500000000 -35.500000000 -16.500000000 -19.500000000 --36.500000000 -15.500000000 -19.500000000 -35.500000000 -15.500000000 -19.500000000 --36.500000000 -14.500000000 -19.500000000 -35.500000000 -14.500000000 -19.500000000 --36.500000000 -13.500000000 -19.500000000 -35.500000000 -13.500000000 -19.500000000 --36.500000000 -12.500000000 -19.500000000 -35.500000000 -12.500000000 -19.500000000 --36.500000000 -11.500000000 -19.500000000 -35.500000000 -11.500000000 -19.500000000 --36.500000000 -10.500000000 -19.500000000 -35.500000000 -10.500000000 -19.500000000 --36.500000000 -9.500000000 -19.500000000 -35.500000000 -9.500000000 -19.500000000 --36.500000000 -8.500000000 -19.500000000 -35.500000000 -8.500000000 -19.500000000 --36.500000000 -7.500000000 -19.500000000 -35.500000000 -7.500000000 -19.500000000 --36.500000000 -6.500000000 -19.500000000 -35.500000000 -6.500000000 -19.500000000 --36.500000000 -5.500000000 -19.500000000 -35.500000000 -5.500000000 -19.500000000 --36.500000000 -4.500000000 -19.500000000 -35.500000000 -4.500000000 -19.500000000 --36.500000000 -3.500000000 -19.500000000 -35.500000000 -3.500000000 -19.500000000 --36.500000000 -2.500000000 -19.500000000 -35.500000000 -2.500000000 -19.500000000 --36.500000000 -1.500000000 -19.500000000 -35.500000000 -1.500000000 -19.500000000 --36.500000000 -0.500000000 -19.500000000 -35.500000000 -0.500000000 -19.500000000 --36.500000000 0.500000000 -19.500000000 -35.500000000 0.500000000 -19.500000000 --36.500000000 1.500000000 -19.500000000 -35.500000000 1.500000000 -19.500000000 --36.500000000 2.500000000 -19.500000000 -35.500000000 2.500000000 -19.500000000 --36.500000000 3.500000000 -19.500000000 -35.500000000 3.500000000 -19.500000000 --36.500000000 4.500000000 -19.500000000 -35.500000000 4.500000000 -19.500000000 --36.500000000 5.500000000 -19.500000000 -35.500000000 5.500000000 -19.500000000 --36.500000000 6.500000000 -19.500000000 -35.500000000 6.500000000 -19.500000000 --36.500000000 7.500000000 -19.500000000 -35.500000000 7.500000000 -19.500000000 --36.500000000 8.500000000 -19.500000000 -35.500000000 8.500000000 -19.500000000 --36.500000000 9.500000000 -19.500000000 -35.500000000 9.500000000 -19.500000000 --36.500000000 10.500000000 -19.500000000 -35.500000000 10.500000000 -19.500000000 --36.500000000 11.500000000 -19.500000000 -35.500000000 11.500000000 -19.500000000 --36.500000000 12.500000000 -19.500000000 -35.500000000 12.500000000 -19.500000000 --36.500000000 13.500000000 -19.500000000 -35.500000000 13.500000000 -19.500000000 --36.500000000 14.500000000 -19.500000000 -35.500000000 14.500000000 -19.500000000 --36.500000000 15.500000000 -19.500000000 -35.500000000 15.500000000 -19.500000000 --36.500000000 16.500000000 -19.500000000 -35.500000000 16.500000000 -19.500000000 --36.500000000 17.500000000 -19.500000000 -35.500000000 17.500000000 -19.500000000 --36.500000000 18.500000000 -19.500000000 -35.500000000 18.500000000 -19.500000000 --36.500000000 19.500000000 -19.500000000 -35.500000000 19.500000000 -19.500000000 --36.500000000 20.500000000 -19.500000000 -35.500000000 20.500000000 -19.500000000 --36.500000000 21.500000000 -19.500000000 -35.500000000 21.500000000 -19.500000000 --36.500000000 22.500000000 -19.500000000 -35.500000000 22.500000000 -19.500000000 --36.500000000 23.500000000 -19.500000000 -35.500000000 23.500000000 -19.500000000 --36.500000000 24.500000000 -19.500000000 -35.500000000 24.500000000 -19.500000000 --36.499996185 25.499996185 -19.500000000 -35.499996185 25.499996185 -19.500000000 --36.499954224 26.499954224 -19.500000000 -35.499954224 26.499954224 -19.500000000 --36.499591827 27.499591827 -19.500000000 -35.499591827 27.499591827 -19.500000000 --36.497474670 28.497470856 -19.500000000 -35.497467041 28.497470856 -19.500000000 --36.488403320 29.488407135 -19.500000000 -35.488403320 29.488407135 -19.500000000 --36.458980560 30.458978653 -19.500000000 -35.458980560 30.458978653 -19.500000000 --36.384422302 31.384418488 -19.500000000 -35.384422302 31.384418488 -19.500000000 --36.233222961 32.233222961 -19.500000000 -35.233222961 32.233222961 -19.500000000 --35.981101990 32.981101990 -19.500000000 --35.622871399 33.622871399 -19.500000000 -34.622871399 33.622871399 -19.500000000 -34.981101990 32.981101990 -19.500000000 --35.167964935 34.167964935 -19.500000000 --34.622871399 34.622871399 -19.500000000 -33.622871399 34.622871399 -19.500000000 -34.167964935 34.167964935 -19.500000000 --33.981101990 34.981101990 -19.500000000 --33.233222961 35.233222961 -19.500000000 --32.384422302 35.384418488 -19.500000000 --31.458978653 35.458976746 -19.500000000 --30.488407135 35.488403320 -19.500000000 --29.497472763 35.497467041 -19.500000000 --28.499593735 35.499591827 -19.500000000 --27.499954224 35.499954224 -19.500000000 --26.499996185 35.499996185 -19.500000000 --25.500000000 35.500000000 -19.500000000 --24.500000000 35.500000000 -19.500000000 --23.500000000 35.500000000 -19.500000000 --22.500000000 35.500000000 -19.500000000 --21.500000000 35.500000000 -19.500000000 --20.500000000 35.500000000 -19.500000000 --19.500000000 35.500000000 -19.500000000 --18.500000000 35.500000000 -19.500000000 --17.500000000 35.500000000 -19.500000000 --16.500000000 35.500000000 -19.500000000 --15.500000000 35.500000000 -19.500000000 --14.500000000 35.500000000 -19.500000000 --13.500000000 35.500000000 -19.500000000 --12.500000000 35.500000000 -19.500000000 --11.500000000 35.500000000 -19.500000000 --10.500000000 35.500000000 -19.500000000 --9.500000000 35.500000000 -19.500000000 --8.500000000 35.500000000 -19.500000000 --7.500000000 35.500000000 -19.500000000 --6.500000000 35.500000000 -19.500000000 --5.500000000 35.500000000 -19.500000000 --4.500000000 35.500000000 -19.500000000 --3.500000000 35.500000000 -19.500000000 --2.500000000 35.500000000 -19.500000000 --1.500000000 35.500000000 -19.500000000 --0.500000000 35.500000000 -19.500000000 -0.500000000 35.500000000 -19.500000000 -1.500000000 35.500000000 -19.500000000 -2.500000000 35.500000000 -19.500000000 -3.500000000 35.500000000 -19.500000000 -4.500000000 35.500000000 -19.500000000 -5.500000000 35.500000000 -19.500000000 -6.500000000 35.500000000 -19.500000000 -7.500000000 35.500000000 -19.500000000 -8.500000000 35.500000000 -19.500000000 -9.500000000 35.500000000 -19.500000000 -10.500000000 35.500000000 -19.500000000 -11.500000000 35.500000000 -19.500000000 -12.500000000 35.500000000 -19.500000000 -13.500000000 35.500000000 -19.500000000 -14.500000000 35.500000000 -19.500000000 -15.500000000 35.500000000 -19.500000000 -16.500000000 35.500000000 -19.500000000 -17.500000000 35.500000000 -19.500000000 -18.500000000 35.500000000 -19.500000000 -19.500000000 35.500000000 -19.500000000 -20.500000000 35.500000000 -19.500000000 -21.500000000 35.500000000 -19.500000000 -22.500000000 35.500000000 -19.500000000 -23.500000000 35.500000000 -19.500000000 -24.500000000 35.500000000 -19.500000000 -25.499996185 35.499996185 -19.500000000 -26.499954224 35.499954224 -19.500000000 -27.499591827 35.499591827 -19.500000000 -28.497470856 35.497474670 -19.500000000 -29.488407135 35.488403320 -19.500000000 -30.458978653 35.458980560 -19.500000000 -31.384418488 35.384422302 -19.500000000 -32.233222961 35.233222961 -19.500000000 -32.981101990 34.981101990 -19.500000000 --33.981101990 -35.981101990 -18.500000000 --33.233226776 -36.233222961 -18.500000000 --32.384422302 -36.384418488 -18.500000000 --31.458978653 -36.458980560 -18.500000000 --30.488407135 -36.488403320 -18.500000000 --29.497472763 -36.497474670 -18.500000000 --28.499593735 -36.499591827 -18.500000000 --27.499954224 -36.499954224 -18.500000000 --26.499996185 -36.499996185 -18.500000000 --25.500000000 -36.500000000 -18.500000000 --24.500000000 -36.500000000 -18.500000000 --23.500000000 -36.500000000 -18.500000000 --22.500000000 -36.500000000 -18.500000000 --21.500000000 -36.500000000 -18.500000000 --20.500000000 -36.500000000 -18.500000000 --19.500000000 -36.500000000 -18.500000000 --18.500000000 -36.500000000 -18.500000000 --17.500000000 -36.500000000 -18.500000000 --16.500000000 -36.500000000 -18.500000000 --15.500000000 -36.500000000 -18.500000000 --14.500000000 -36.500000000 -18.500000000 --13.500000000 -36.500000000 -18.500000000 --12.500000000 -36.500000000 -18.500000000 --11.500000000 -36.500000000 -18.500000000 --10.500000000 -36.500000000 -18.500000000 --9.500000000 -36.500000000 -18.500000000 --8.500000000 -36.500000000 -18.500000000 --7.500000000 -36.500000000 -18.500000000 --6.500000000 -36.500000000 -18.500000000 --5.500000000 -36.500000000 -18.500000000 --4.500000000 -36.500000000 -18.500000000 --3.500000000 -36.500000000 -18.500000000 --2.500000000 -36.500000000 -18.500000000 --1.500000000 -36.500000000 -18.500000000 --0.500000000 -36.500000000 -18.500000000 -0.500000000 -36.500000000 -18.500000000 -1.500000000 -36.500000000 -18.500000000 -2.500000000 -36.500000000 -18.500000000 -3.500000000 -36.500000000 -18.500000000 -4.500000000 -36.500000000 -18.500000000 -5.500000000 -36.500000000 -18.500000000 -6.500000000 -36.500000000 -18.500000000 -7.500000000 -36.500000000 -18.500000000 -8.500000000 -36.500000000 -18.500000000 -9.500000000 -36.500000000 -18.500000000 -10.500000000 -36.500000000 -18.500000000 -11.500000000 -36.500000000 -18.500000000 -12.500000000 -36.500000000 -18.500000000 -13.500000000 -36.500000000 -18.500000000 -14.500000000 -36.500000000 -18.500000000 -15.500000000 -36.500000000 -18.500000000 -16.500000000 -36.500000000 -18.500000000 -17.500000000 -36.500000000 -18.500000000 -18.500000000 -36.500000000 -18.500000000 -19.500000000 -36.500000000 -18.500000000 -20.500000000 -36.500000000 -18.500000000 -21.500000000 -36.500000000 -18.500000000 -22.500000000 -36.500000000 -18.500000000 -23.500000000 -36.500000000 -18.500000000 -24.500000000 -36.500000000 -18.500000000 -25.499996185 -36.499996185 -18.500000000 -26.499954224 -36.499954224 -18.500000000 -27.499591827 -36.499591827 -18.500000000 -28.497470856 -36.497467041 -18.500000000 -29.488407135 -36.488403320 -18.500000000 -30.458978653 -36.458980560 -18.500000000 -31.384418488 -36.384422302 -18.500000000 -32.233222961 -36.233222961 -18.500000000 -32.981101990 -35.981101990 -18.500000000 --35.167964935 -35.167964935 -18.500000000 --34.622871399 -35.622871399 -18.500000000 -33.622871399 -35.622871399 -18.500000000 -34.167964935 -35.167964935 -18.500000000 --35.981101990 -33.981101990 -18.500000000 --35.622871399 -34.622871399 -18.500000000 -34.622871399 -34.622871399 -18.500000000 -34.981101990 -33.981101990 -18.500000000 --36.233222961 -33.233222961 -18.500000000 -35.233222961 -33.233226776 -18.500000000 --36.384418488 -32.384422302 -18.500000000 -35.384418488 -32.384422302 -18.500000000 --36.458976746 -31.458978653 -18.500000000 -35.458980560 -31.458978653 -18.500000000 --36.488403320 -30.488407135 -18.500000000 -35.488403320 -30.488407135 -18.500000000 --36.497467041 -29.497472763 -18.500000000 -35.497474670 -29.497472763 -18.500000000 --36.499591827 -28.499593735 -18.500000000 -35.499591827 -28.499593735 -18.500000000 --36.499954224 -27.499954224 -18.500000000 -35.499954224 -27.499954224 -18.500000000 --36.499996185 -26.499996185 -18.500000000 -35.499996185 -26.499996185 -18.500000000 --36.500000000 -25.500000000 -18.500000000 -35.500000000 -25.500000000 -18.500000000 --36.500000000 -24.500000000 -18.500000000 -35.500000000 -24.500000000 -18.500000000 --36.500000000 -23.500000000 -18.500000000 -35.500000000 -23.500000000 -18.500000000 --36.500000000 -22.500000000 -18.500000000 -35.500000000 -22.500000000 -18.500000000 --36.500000000 -21.500000000 -18.500000000 -35.500000000 -21.500000000 -18.500000000 --36.500000000 -20.500000000 -18.500000000 -35.500000000 -20.500000000 -18.500000000 --36.500000000 -19.500000000 -18.500000000 -35.500000000 -19.500000000 -18.500000000 --36.500000000 -18.500000000 -18.500000000 -35.500000000 -18.500000000 -18.500000000 --36.500000000 -17.500000000 -18.500000000 -35.500000000 -17.500000000 -18.500000000 --36.500000000 -16.500000000 -18.500000000 -35.500000000 -16.500000000 -18.500000000 --36.500000000 -15.500000000 -18.500000000 -35.500000000 -15.500000000 -18.500000000 --36.500000000 -14.500000000 -18.500000000 -35.500000000 -14.500000000 -18.500000000 --36.500000000 -13.500000000 -18.500000000 -35.500000000 -13.500000000 -18.500000000 --36.500000000 -12.500000000 -18.500000000 -35.500000000 -12.500000000 -18.500000000 --36.500000000 -11.500000000 -18.500000000 -35.500000000 -11.500000000 -18.500000000 --36.500000000 -10.500000000 -18.500000000 -35.500000000 -10.500000000 -18.500000000 --36.500000000 -9.500000000 -18.500000000 -35.500000000 -9.500000000 -18.500000000 --36.500000000 -8.500000000 -18.500000000 -35.500000000 -8.500000000 -18.500000000 --36.500000000 -7.500000000 -18.500000000 -35.500000000 -7.500000000 -18.500000000 --36.500000000 -6.500000000 -18.500000000 -35.500000000 -6.500000000 -18.500000000 --36.500000000 -5.500000000 -18.500000000 -35.500000000 -5.500000000 -18.500000000 --36.500000000 -4.500000000 -18.500000000 -35.500000000 -4.500000000 -18.500000000 --36.500000000 -3.500000000 -18.500000000 -35.500000000 -3.500000000 -18.500000000 --36.500000000 -2.500000000 -18.500000000 -35.500000000 -2.500000000 -18.500000000 --36.500000000 -1.500000000 -18.500000000 -35.500000000 -1.500000000 -18.500000000 --36.500000000 -0.500000000 -18.500000000 -35.500000000 -0.500000000 -18.500000000 --36.500000000 0.500000000 -18.500000000 -35.500000000 0.500000000 -18.500000000 --36.500000000 1.500000000 -18.500000000 -35.500000000 1.500000000 -18.500000000 --36.500000000 2.500000000 -18.500000000 -35.500000000 2.500000000 -18.500000000 --36.500000000 3.500000000 -18.500000000 -35.500000000 3.500000000 -18.500000000 --36.500000000 4.500000000 -18.500000000 -35.500000000 4.500000000 -18.500000000 --36.500000000 5.500000000 -18.500000000 -35.500000000 5.500000000 -18.500000000 --36.500000000 6.500000000 -18.500000000 -35.500000000 6.500000000 -18.500000000 --36.500000000 7.500000000 -18.500000000 -35.500000000 7.500000000 -18.500000000 --36.500000000 8.500000000 -18.500000000 -35.500000000 8.500000000 -18.500000000 --36.500000000 9.500000000 -18.500000000 -35.500000000 9.500000000 -18.500000000 --36.500000000 10.500000000 -18.500000000 -35.500000000 10.500000000 -18.500000000 --36.500000000 11.500000000 -18.500000000 -35.500000000 11.500000000 -18.500000000 --36.500000000 12.500000000 -18.500000000 -35.500000000 12.500000000 -18.500000000 --36.500000000 13.500000000 -18.500000000 -35.500000000 13.500000000 -18.500000000 --36.500000000 14.500000000 -18.500000000 -35.500000000 14.500000000 -18.500000000 --36.500000000 15.500000000 -18.500000000 -35.500000000 15.500000000 -18.500000000 --36.500000000 16.500000000 -18.500000000 -35.500000000 16.500000000 -18.500000000 --36.500000000 17.500000000 -18.500000000 -35.500000000 17.500000000 -18.500000000 --36.500000000 18.500000000 -18.500000000 -35.500000000 18.500000000 -18.500000000 --36.500000000 19.500000000 -18.500000000 -35.500000000 19.500000000 -18.500000000 --36.500000000 20.500000000 -18.500000000 -35.500000000 20.500000000 -18.500000000 --36.500000000 21.500000000 -18.500000000 -35.500000000 21.500000000 -18.500000000 --36.500000000 22.500000000 -18.500000000 -35.500000000 22.500000000 -18.500000000 --36.500000000 23.500000000 -18.500000000 -35.500000000 23.500000000 -18.500000000 --36.500000000 24.500000000 -18.500000000 -35.500000000 24.500000000 -18.500000000 --36.499996185 25.499996185 -18.500000000 -35.499996185 25.499996185 -18.500000000 --36.499954224 26.499954224 -18.500000000 -35.499954224 26.499954224 -18.500000000 --36.499591827 27.499591827 -18.500000000 -35.499591827 27.499591827 -18.500000000 --36.497474670 28.497470856 -18.500000000 -35.497467041 28.497470856 -18.500000000 --36.488403320 29.488407135 -18.500000000 -35.488403320 29.488407135 -18.500000000 --36.458980560 30.458978653 -18.500000000 -35.458980560 30.458978653 -18.500000000 --36.384422302 31.384418488 -18.500000000 -35.384422302 31.384418488 -18.500000000 --36.233222961 32.233222961 -18.500000000 -35.233222961 32.233222961 -18.500000000 --35.981101990 32.981101990 -18.500000000 --35.622871399 33.622871399 -18.500000000 -34.622871399 33.622871399 -18.500000000 -34.981101990 32.981101990 -18.500000000 --35.167964935 34.167964935 -18.500000000 --34.622871399 34.622871399 -18.500000000 -33.622871399 34.622871399 -18.500000000 -34.167964935 34.167964935 -18.500000000 --33.981101990 34.981101990 -18.500000000 --33.233222961 35.233222961 -18.500000000 --32.384422302 35.384418488 -18.500000000 --31.458978653 35.458976746 -18.500000000 --30.488407135 35.488403320 -18.500000000 --29.497472763 35.497467041 -18.500000000 --28.499593735 35.499591827 -18.500000000 --27.499954224 35.499954224 -18.500000000 --26.499996185 35.499996185 -18.500000000 --25.500000000 35.500000000 -18.500000000 --24.500000000 35.500000000 -18.500000000 --23.500000000 35.500000000 -18.500000000 --22.500000000 35.500000000 -18.500000000 --21.500000000 35.500000000 -18.500000000 --20.500000000 35.500000000 -18.500000000 --19.500000000 35.500000000 -18.500000000 --18.500000000 35.500000000 -18.500000000 --17.500000000 35.500000000 -18.500000000 --16.500000000 35.500000000 -18.500000000 --15.500000000 35.500000000 -18.500000000 --14.500000000 35.500000000 -18.500000000 --13.500000000 35.500000000 -18.500000000 --12.500000000 35.500000000 -18.500000000 --11.500000000 35.500000000 -18.500000000 --10.500000000 35.500000000 -18.500000000 --9.500000000 35.500000000 -18.500000000 --8.500000000 35.500000000 -18.500000000 --7.500000000 35.500000000 -18.500000000 --6.500000000 35.500000000 -18.500000000 --5.500000000 35.500000000 -18.500000000 --4.500000000 35.500000000 -18.500000000 --3.500000000 35.500000000 -18.500000000 --2.500000000 35.500000000 -18.500000000 --1.500000000 35.500000000 -18.500000000 --0.500000000 35.500000000 -18.500000000 -0.500000000 35.500000000 -18.500000000 -1.500000000 35.500000000 -18.500000000 -2.500000000 35.500000000 -18.500000000 -3.500000000 35.500000000 -18.500000000 -4.500000000 35.500000000 -18.500000000 -5.500000000 35.500000000 -18.500000000 -6.500000000 35.500000000 -18.500000000 -7.500000000 35.500000000 -18.500000000 -8.500000000 35.500000000 -18.500000000 -9.500000000 35.500000000 -18.500000000 -10.500000000 35.500000000 -18.500000000 -11.500000000 35.500000000 -18.500000000 -12.500000000 35.500000000 -18.500000000 -13.500000000 35.500000000 -18.500000000 -14.500000000 35.500000000 -18.500000000 -15.500000000 35.500000000 -18.500000000 -16.500000000 35.500000000 -18.500000000 -17.500000000 35.500000000 -18.500000000 -18.500000000 35.500000000 -18.500000000 -19.500000000 35.500000000 -18.500000000 -20.500000000 35.500000000 -18.500000000 -21.500000000 35.500000000 -18.500000000 -22.500000000 35.500000000 -18.500000000 -23.500000000 35.500000000 -18.500000000 -24.500000000 35.500000000 -18.500000000 -25.499996185 35.499996185 -18.500000000 -26.499954224 35.499954224 -18.500000000 -27.499591827 35.499591827 -18.500000000 -28.497470856 35.497474670 -18.500000000 -29.488407135 35.488403320 -18.500000000 -30.458978653 35.458980560 -18.500000000 -31.384418488 35.384422302 -18.500000000 -32.233222961 35.233222961 -18.500000000 -32.981101990 34.981101990 -18.500000000 --33.981101990 -35.981101990 -17.500000000 --33.233226776 -36.233222961 -17.500000000 --32.384422302 -36.384418488 -17.500000000 --31.458978653 -36.458980560 -17.500000000 --30.488407135 -36.488403320 -17.500000000 --29.497472763 -36.497474670 -17.500000000 --28.499593735 -36.499591827 -17.500000000 --27.499954224 -36.499954224 -17.500000000 --26.499996185 -36.499996185 -17.500000000 --25.500000000 -36.500000000 -17.500000000 --24.500000000 -36.500000000 -17.500000000 --23.500000000 -36.500000000 -17.500000000 --22.500000000 -36.500000000 -17.500000000 --21.500000000 -36.500000000 -17.500000000 --20.500000000 -36.500000000 -17.500000000 --19.500000000 -36.500000000 -17.500000000 --18.500000000 -36.500000000 -17.500000000 --17.500000000 -36.500000000 -17.500000000 --16.500000000 -36.500000000 -17.500000000 --15.500000000 -36.500000000 -17.500000000 --14.500000000 -36.500000000 -17.500000000 --13.500000000 -36.500000000 -17.500000000 --12.500000000 -36.500000000 -17.500000000 --11.500000000 -36.500000000 -17.500000000 --10.500000000 -36.500000000 -17.500000000 --9.500000000 -36.500000000 -17.500000000 --8.500000000 -36.500000000 -17.500000000 --7.500000000 -36.500000000 -17.500000000 --6.500000000 -36.500000000 -17.500000000 --5.500000000 -36.500000000 -17.500000000 --4.500000000 -36.500000000 -17.500000000 --3.500000000 -36.500000000 -17.500000000 --2.500000000 -36.500000000 -17.500000000 --1.500000000 -36.500000000 -17.500000000 --0.500000000 -36.500000000 -17.500000000 -0.500000000 -36.500000000 -17.500000000 -1.500000000 -36.500000000 -17.500000000 -2.500000000 -36.500000000 -17.500000000 -3.500000000 -36.500000000 -17.500000000 -4.500000000 -36.500000000 -17.500000000 -5.500000000 -36.500000000 -17.500000000 -6.500000000 -36.500000000 -17.500000000 -7.500000000 -36.500000000 -17.500000000 -8.500000000 -36.500000000 -17.500000000 -9.500000000 -36.500000000 -17.500000000 -10.500000000 -36.500000000 -17.500000000 -11.500000000 -36.500000000 -17.500000000 -12.500000000 -36.500000000 -17.500000000 -13.500000000 -36.500000000 -17.500000000 -14.500000000 -36.500000000 -17.500000000 -15.500000000 -36.500000000 -17.500000000 -16.500000000 -36.500000000 -17.500000000 -17.500000000 -36.500000000 -17.500000000 -18.500000000 -36.500000000 -17.500000000 -19.500000000 -36.500000000 -17.500000000 -20.500000000 -36.500000000 -17.500000000 -21.500000000 -36.500000000 -17.500000000 -22.500000000 -36.500000000 -17.500000000 -23.500000000 -36.500000000 -17.500000000 -24.500000000 -36.500000000 -17.500000000 -25.499996185 -36.499996185 -17.500000000 -26.499954224 -36.499954224 -17.500000000 -27.499591827 -36.499591827 -17.500000000 -28.497470856 -36.497467041 -17.500000000 -29.488407135 -36.488403320 -17.500000000 -30.458978653 -36.458980560 -17.500000000 -31.384418488 -36.384422302 -17.500000000 -32.233222961 -36.233222961 -17.500000000 -32.981101990 -35.981101990 -17.500000000 --35.167964935 -35.167964935 -17.500000000 --34.622871399 -35.622871399 -17.500000000 -33.622871399 -35.622871399 -17.500000000 -34.167964935 -35.167964935 -17.500000000 --35.981101990 -33.981101990 -17.500000000 --35.622871399 -34.622871399 -17.500000000 -34.622871399 -34.622871399 -17.500000000 -34.981101990 -33.981101990 -17.500000000 --36.233222961 -33.233222961 -17.500000000 -35.233222961 -33.233226776 -17.500000000 --36.384418488 -32.384422302 -17.500000000 -35.384418488 -32.384422302 -17.500000000 --36.458976746 -31.458978653 -17.500000000 -35.458980560 -31.458978653 -17.500000000 --36.488403320 -30.488407135 -17.500000000 -35.488403320 -30.488407135 -17.500000000 --36.497467041 -29.497472763 -17.500000000 -35.497474670 -29.497472763 -17.500000000 --36.499591827 -28.499593735 -17.500000000 -35.499591827 -28.499593735 -17.500000000 --36.499954224 -27.499954224 -17.500000000 -35.499954224 -27.499954224 -17.500000000 --36.499996185 -26.499996185 -17.500000000 -35.499996185 -26.499996185 -17.500000000 --36.500000000 -25.500000000 -17.500000000 -35.500000000 -25.500000000 -17.500000000 --36.500000000 -24.500000000 -17.500000000 -35.500000000 -24.500000000 -17.500000000 --36.500000000 -23.500000000 -17.500000000 -35.500000000 -23.500000000 -17.500000000 --36.500000000 -22.500000000 -17.500000000 -35.500000000 -22.500000000 -17.500000000 --36.500000000 -21.500000000 -17.500000000 -35.500000000 -21.500000000 -17.500000000 --36.500000000 -20.500000000 -17.500000000 -35.500000000 -20.500000000 -17.500000000 --36.500000000 -19.500000000 -17.500000000 -35.500000000 -19.500000000 -17.500000000 --36.500000000 -18.500000000 -17.500000000 -35.500000000 -18.500000000 -17.500000000 --36.500000000 -17.500000000 -17.500000000 -35.500000000 -17.500000000 -17.500000000 --36.500000000 -16.500000000 -17.500000000 -35.500000000 -16.500000000 -17.500000000 --36.500000000 -15.500000000 -17.500000000 -35.500000000 -15.500000000 -17.500000000 --36.500000000 -14.500000000 -17.500000000 -35.500000000 -14.500000000 -17.500000000 --36.500000000 -13.500000000 -17.500000000 -35.500000000 -13.500000000 -17.500000000 --36.500000000 -12.500000000 -17.500000000 -35.500000000 -12.500000000 -17.500000000 --36.500000000 -11.500000000 -17.500000000 -35.500000000 -11.500000000 -17.500000000 --36.500000000 -10.500000000 -17.500000000 -35.500000000 -10.500000000 -17.500000000 --36.500000000 -9.500000000 -17.500000000 -35.500000000 -9.500000000 -17.500000000 --36.500000000 -8.500000000 -17.500000000 -35.500000000 -8.500000000 -17.500000000 --36.500000000 -7.500000000 -17.500000000 -35.500000000 -7.500000000 -17.500000000 --36.500000000 -6.500000000 -17.500000000 -35.500000000 -6.500000000 -17.500000000 --36.500000000 -5.500000000 -17.500000000 -35.500000000 -5.500000000 -17.500000000 --36.500000000 -4.500000000 -17.500000000 -35.500000000 -4.500000000 -17.500000000 --36.500000000 -3.500000000 -17.500000000 -35.500000000 -3.500000000 -17.500000000 --36.500000000 -2.500000000 -17.500000000 -35.500000000 -2.500000000 -17.500000000 --36.500000000 -1.500000000 -17.500000000 -35.500000000 -1.500000000 -17.500000000 --36.500000000 -0.500000000 -17.500000000 -35.500000000 -0.500000000 -17.500000000 --36.500000000 0.500000000 -17.500000000 -35.500000000 0.500000000 -17.500000000 --36.500000000 1.500000000 -17.500000000 -35.500000000 1.500000000 -17.500000000 --36.500000000 2.500000000 -17.500000000 -35.500000000 2.500000000 -17.500000000 --36.500000000 3.500000000 -17.500000000 -35.500000000 3.500000000 -17.500000000 --36.500000000 4.500000000 -17.500000000 -35.500000000 4.500000000 -17.500000000 --36.500000000 5.500000000 -17.500000000 -35.500000000 5.500000000 -17.500000000 --36.500000000 6.500000000 -17.500000000 -35.500000000 6.500000000 -17.500000000 --36.500000000 7.500000000 -17.500000000 -35.500000000 7.500000000 -17.500000000 --36.500000000 8.500000000 -17.500000000 -35.500000000 8.500000000 -17.500000000 --36.500000000 9.500000000 -17.500000000 -35.500000000 9.500000000 -17.500000000 --36.500000000 10.500000000 -17.500000000 -35.500000000 10.500000000 -17.500000000 --36.500000000 11.500000000 -17.500000000 -35.500000000 11.500000000 -17.500000000 --36.500000000 12.500000000 -17.500000000 -35.500000000 12.500000000 -17.500000000 --36.500000000 13.500000000 -17.500000000 -35.500000000 13.500000000 -17.500000000 --36.500000000 14.500000000 -17.500000000 -35.500000000 14.500000000 -17.500000000 --36.500000000 15.500000000 -17.500000000 -35.500000000 15.500000000 -17.500000000 --36.500000000 16.500000000 -17.500000000 -35.500000000 16.500000000 -17.500000000 --36.500000000 17.500000000 -17.500000000 -35.500000000 17.500000000 -17.500000000 --36.500000000 18.500000000 -17.500000000 -35.500000000 18.500000000 -17.500000000 --36.500000000 19.500000000 -17.500000000 -35.500000000 19.500000000 -17.500000000 --36.500000000 20.500000000 -17.500000000 -35.500000000 20.500000000 -17.500000000 --36.500000000 21.500000000 -17.500000000 -35.500000000 21.500000000 -17.500000000 --36.500000000 22.500000000 -17.500000000 -35.500000000 22.500000000 -17.500000000 --36.500000000 23.500000000 -17.500000000 -35.500000000 23.500000000 -17.500000000 --36.500000000 24.500000000 -17.500000000 -35.500000000 24.500000000 -17.500000000 --36.499996185 25.499996185 -17.500000000 -35.499996185 25.499996185 -17.500000000 --36.499954224 26.499954224 -17.500000000 -35.499954224 26.499954224 -17.500000000 --36.499591827 27.499591827 -17.500000000 -35.499591827 27.499591827 -17.500000000 --36.497474670 28.497470856 -17.500000000 -35.497467041 28.497470856 -17.500000000 --36.488403320 29.488407135 -17.500000000 -35.488403320 29.488407135 -17.500000000 --36.458980560 30.458978653 -17.500000000 -35.458980560 30.458978653 -17.500000000 --36.384422302 31.384418488 -17.500000000 -35.384422302 31.384418488 -17.500000000 --36.233222961 32.233222961 -17.500000000 -35.233222961 32.233222961 -17.500000000 --35.981101990 32.981101990 -17.500000000 --35.622871399 33.622871399 -17.500000000 -34.622871399 33.622871399 -17.500000000 -34.981101990 32.981101990 -17.500000000 --35.167964935 34.167964935 -17.500000000 --34.622871399 34.622871399 -17.500000000 -33.622871399 34.622871399 -17.500000000 -34.167964935 34.167964935 -17.500000000 --33.981101990 34.981101990 -17.500000000 --33.233222961 35.233222961 -17.500000000 --32.384422302 35.384418488 -17.500000000 --31.458978653 35.458976746 -17.500000000 --30.488407135 35.488403320 -17.500000000 --29.497472763 35.497467041 -17.500000000 --28.499593735 35.499591827 -17.500000000 --27.499954224 35.499954224 -17.500000000 --26.499996185 35.499996185 -17.500000000 --25.500000000 35.500000000 -17.500000000 --24.500000000 35.500000000 -17.500000000 --23.500000000 35.500000000 -17.500000000 --22.500000000 35.500000000 -17.500000000 --21.500000000 35.500000000 -17.500000000 --20.500000000 35.500000000 -17.500000000 --19.500000000 35.500000000 -17.500000000 --18.500000000 35.500000000 -17.500000000 --17.500000000 35.500000000 -17.500000000 --16.500000000 35.500000000 -17.500000000 --15.500000000 35.500000000 -17.500000000 --14.500000000 35.500000000 -17.500000000 --13.500000000 35.500000000 -17.500000000 --12.500000000 35.500000000 -17.500000000 --11.500000000 35.500000000 -17.500000000 --10.500000000 35.500000000 -17.500000000 --9.500000000 35.500000000 -17.500000000 --8.500000000 35.500000000 -17.500000000 --7.500000000 35.500000000 -17.500000000 --6.500000000 35.500000000 -17.500000000 --5.500000000 35.500000000 -17.500000000 --4.500000000 35.500000000 -17.500000000 --3.500000000 35.500000000 -17.500000000 --2.500000000 35.500000000 -17.500000000 --1.500000000 35.500000000 -17.500000000 --0.500000000 35.500000000 -17.500000000 -0.500000000 35.500000000 -17.500000000 -1.500000000 35.500000000 -17.500000000 -2.500000000 35.500000000 -17.500000000 -3.500000000 35.500000000 -17.500000000 -4.500000000 35.500000000 -17.500000000 -5.500000000 35.500000000 -17.500000000 -6.500000000 35.500000000 -17.500000000 -7.500000000 35.500000000 -17.500000000 -8.500000000 35.500000000 -17.500000000 -9.500000000 35.500000000 -17.500000000 -10.500000000 35.500000000 -17.500000000 -11.500000000 35.500000000 -17.500000000 -12.500000000 35.500000000 -17.500000000 -13.500000000 35.500000000 -17.500000000 -14.500000000 35.500000000 -17.500000000 -15.500000000 35.500000000 -17.500000000 -16.500000000 35.500000000 -17.500000000 -17.500000000 35.500000000 -17.500000000 -18.500000000 35.500000000 -17.500000000 -19.500000000 35.500000000 -17.500000000 -20.500000000 35.500000000 -17.500000000 -21.500000000 35.500000000 -17.500000000 -22.500000000 35.500000000 -17.500000000 -23.500000000 35.500000000 -17.500000000 -24.500000000 35.500000000 -17.500000000 -25.499996185 35.499996185 -17.500000000 -26.499954224 35.499954224 -17.500000000 -27.499591827 35.499591827 -17.500000000 -28.497470856 35.497474670 -17.500000000 -29.488407135 35.488403320 -17.500000000 -30.458978653 35.458980560 -17.500000000 -31.384418488 35.384422302 -17.500000000 -32.233222961 35.233222961 -17.500000000 -32.981101990 34.981101990 -17.500000000 --33.981101990 -35.981101990 -16.500000000 --33.233226776 -36.233222961 -16.500000000 --32.384422302 -36.384418488 -16.500000000 --31.458978653 -36.458980560 -16.500000000 --30.488407135 -36.488403320 -16.500000000 --29.497472763 -36.497474670 -16.500000000 --28.499593735 -36.499591827 -16.500000000 --27.499954224 -36.499954224 -16.500000000 --26.499996185 -36.499996185 -16.500000000 --25.500000000 -36.500000000 -16.500000000 --24.500000000 -36.500000000 -16.500000000 --23.500000000 -36.500000000 -16.500000000 --22.500000000 -36.500000000 -16.500000000 --21.500000000 -36.500000000 -16.500000000 --20.500000000 -36.500000000 -16.500000000 --19.500000000 -36.500000000 -16.500000000 --18.500000000 -36.500000000 -16.500000000 --17.500000000 -36.500000000 -16.500000000 --16.500000000 -36.500000000 -16.500000000 --15.500000000 -36.500000000 -16.500000000 --14.500000000 -36.500000000 -16.500000000 --13.500000000 -36.500000000 -16.500000000 --12.500000000 -36.500000000 -16.500000000 --11.500000000 -36.500000000 -16.500000000 --10.500000000 -36.500000000 -16.500000000 --9.500000000 -36.500000000 -16.500000000 --8.500000000 -36.500000000 -16.500000000 --7.500000000 -36.500000000 -16.500000000 --6.500000000 -36.500000000 -16.500000000 --5.500000000 -36.500000000 -16.500000000 --4.500000000 -36.500000000 -16.500000000 --3.500000000 -36.500000000 -16.500000000 --2.500000000 -36.500000000 -16.500000000 --1.500000000 -36.500000000 -16.500000000 --0.500000000 -36.500000000 -16.500000000 -0.500000000 -36.500000000 -16.500000000 -1.500000000 -36.500000000 -16.500000000 -2.500000000 -36.500000000 -16.500000000 -3.500000000 -36.500000000 -16.500000000 -4.500000000 -36.500000000 -16.500000000 -5.500000000 -36.500000000 -16.500000000 -6.500000000 -36.500000000 -16.500000000 -7.500000000 -36.500000000 -16.500000000 -8.500000000 -36.500000000 -16.500000000 -9.500000000 -36.500000000 -16.500000000 -10.500000000 -36.500000000 -16.500000000 -11.500000000 -36.500000000 -16.500000000 -12.500000000 -36.500000000 -16.500000000 -13.500000000 -36.500000000 -16.500000000 -14.500000000 -36.500000000 -16.500000000 -15.500000000 -36.500000000 -16.500000000 -16.500000000 -36.500000000 -16.500000000 -17.500000000 -36.500000000 -16.500000000 -18.500000000 -36.500000000 -16.500000000 -19.500000000 -36.500000000 -16.500000000 -20.500000000 -36.500000000 -16.500000000 -21.500000000 -36.500000000 -16.500000000 -22.500000000 -36.500000000 -16.500000000 -23.500000000 -36.500000000 -16.500000000 -24.500000000 -36.500000000 -16.500000000 -25.499996185 -36.499996185 -16.500000000 -26.499954224 -36.499954224 -16.500000000 -27.499591827 -36.499591827 -16.500000000 -28.497470856 -36.497467041 -16.500000000 -29.488407135 -36.488403320 -16.500000000 -30.458978653 -36.458980560 -16.500000000 -31.384418488 -36.384422302 -16.500000000 -32.233222961 -36.233222961 -16.500000000 -32.981101990 -35.981101990 -16.500000000 --35.167964935 -35.167964935 -16.500000000 --34.622871399 -35.622871399 -16.500000000 -33.622871399 -35.622871399 -16.500000000 -34.167964935 -35.167964935 -16.500000000 --35.981101990 -33.981101990 -16.500000000 --35.622871399 -34.622871399 -16.500000000 -34.622871399 -34.622871399 -16.500000000 -34.981101990 -33.981101990 -16.500000000 --36.233222961 -33.233222961 -16.500000000 -35.233222961 -33.233226776 -16.500000000 --36.384418488 -32.384422302 -16.500000000 -35.384418488 -32.384422302 -16.500000000 --36.458976746 -31.458978653 -16.500000000 -35.458980560 -31.458978653 -16.500000000 --36.488403320 -30.488407135 -16.500000000 -35.488403320 -30.488407135 -16.500000000 --36.497467041 -29.497472763 -16.500000000 -35.497474670 -29.497472763 -16.500000000 --36.499591827 -28.499593735 -16.500000000 -35.499591827 -28.499593735 -16.500000000 --36.499954224 -27.499954224 -16.500000000 -35.499954224 -27.499954224 -16.500000000 --36.499996185 -26.499996185 -16.500000000 -35.499996185 -26.499996185 -16.500000000 --36.500000000 -25.500000000 -16.500000000 -35.500000000 -25.500000000 -16.500000000 --36.500000000 -24.500000000 -16.500000000 -35.500000000 -24.500000000 -16.500000000 --36.500000000 -23.500000000 -16.500000000 -35.500000000 -23.500000000 -16.500000000 --36.500000000 -22.500000000 -16.500000000 -35.500000000 -22.500000000 -16.500000000 --36.500000000 -21.500000000 -16.500000000 -35.500000000 -21.500000000 -16.500000000 --36.500000000 -20.500000000 -16.500000000 -35.500000000 -20.500000000 -16.500000000 --36.500000000 -19.500000000 -16.500000000 -35.500000000 -19.500000000 -16.500000000 --36.500000000 -18.500000000 -16.500000000 -35.500000000 -18.500000000 -16.500000000 --36.500000000 -17.500000000 -16.500000000 -35.500000000 -17.500000000 -16.500000000 --36.500000000 -16.500000000 -16.500000000 -35.500000000 -16.500000000 -16.500000000 --36.500000000 -15.500000000 -16.500000000 -35.500000000 -15.500000000 -16.500000000 --36.500000000 -14.500000000 -16.500000000 -35.500000000 -14.500000000 -16.500000000 --36.500000000 -13.500000000 -16.500000000 -35.500000000 -13.500000000 -16.500000000 --36.500000000 -12.500000000 -16.500000000 -35.500000000 -12.500000000 -16.500000000 --36.500000000 -11.500000000 -16.500000000 -35.500000000 -11.500000000 -16.500000000 --36.500000000 -10.500000000 -16.500000000 -35.500000000 -10.500000000 -16.500000000 --36.500000000 -9.500000000 -16.500000000 -35.500000000 -9.500000000 -16.500000000 --36.500000000 -8.500000000 -16.500000000 -35.500000000 -8.500000000 -16.500000000 --36.500000000 -7.500000000 -16.500000000 -35.500000000 -7.500000000 -16.500000000 --36.500000000 -6.500000000 -16.500000000 -35.500000000 -6.500000000 -16.500000000 --36.500000000 -5.500000000 -16.500000000 -35.500000000 -5.500000000 -16.500000000 --36.500000000 -4.500000000 -16.500000000 -35.500000000 -4.500000000 -16.500000000 --36.500000000 -3.500000000 -16.500000000 -35.500000000 -3.500000000 -16.500000000 --36.500000000 -2.500000000 -16.500000000 -35.500000000 -2.500000000 -16.500000000 --36.500000000 -1.500000000 -16.500000000 -35.500000000 -1.500000000 -16.500000000 --36.500000000 -0.500000000 -16.500000000 -35.500000000 -0.500000000 -16.500000000 --36.500000000 0.500000000 -16.500000000 -35.500000000 0.500000000 -16.500000000 --36.500000000 1.500000000 -16.500000000 -35.500000000 1.500000000 -16.500000000 --36.500000000 2.500000000 -16.500000000 -35.500000000 2.500000000 -16.500000000 --36.500000000 3.500000000 -16.500000000 -35.500000000 3.500000000 -16.500000000 --36.500000000 4.500000000 -16.500000000 -35.500000000 4.500000000 -16.500000000 --36.500000000 5.500000000 -16.500000000 -35.500000000 5.500000000 -16.500000000 --36.500000000 6.500000000 -16.500000000 -35.500000000 6.500000000 -16.500000000 --36.500000000 7.500000000 -16.500000000 -35.500000000 7.500000000 -16.500000000 --36.500000000 8.500000000 -16.500000000 -35.500000000 8.500000000 -16.500000000 --36.500000000 9.500000000 -16.500000000 -35.500000000 9.500000000 -16.500000000 --36.500000000 10.500000000 -16.500000000 -35.500000000 10.500000000 -16.500000000 --36.500000000 11.500000000 -16.500000000 -35.500000000 11.500000000 -16.500000000 --36.500000000 12.500000000 -16.500000000 -35.500000000 12.500000000 -16.500000000 --36.500000000 13.500000000 -16.500000000 -35.500000000 13.500000000 -16.500000000 --36.500000000 14.500000000 -16.500000000 -35.500000000 14.500000000 -16.500000000 --36.500000000 15.500000000 -16.500000000 -35.500000000 15.500000000 -16.500000000 --36.500000000 16.500000000 -16.500000000 -35.500000000 16.500000000 -16.500000000 --36.500000000 17.500000000 -16.500000000 -35.500000000 17.500000000 -16.500000000 --36.500000000 18.500000000 -16.500000000 -35.500000000 18.500000000 -16.500000000 --36.500000000 19.500000000 -16.500000000 -35.500000000 19.500000000 -16.500000000 --36.500000000 20.500000000 -16.500000000 -35.500000000 20.500000000 -16.500000000 --36.500000000 21.500000000 -16.500000000 -35.500000000 21.500000000 -16.500000000 --36.500000000 22.500000000 -16.500000000 -35.500000000 22.500000000 -16.500000000 --36.500000000 23.500000000 -16.500000000 -35.500000000 23.500000000 -16.500000000 --36.500000000 24.500000000 -16.500000000 -35.500000000 24.500000000 -16.500000000 --36.499996185 25.499996185 -16.500000000 -35.499996185 25.499996185 -16.500000000 --36.499954224 26.499954224 -16.500000000 -35.499954224 26.499954224 -16.500000000 --36.499591827 27.499591827 -16.500000000 -35.499591827 27.499591827 -16.500000000 --36.497474670 28.497470856 -16.500000000 -35.497467041 28.497470856 -16.500000000 --36.488403320 29.488407135 -16.500000000 -35.488403320 29.488407135 -16.500000000 --36.458980560 30.458978653 -16.500000000 -35.458980560 30.458978653 -16.500000000 --36.384422302 31.384418488 -16.500000000 -35.384422302 31.384418488 -16.500000000 --36.233222961 32.233222961 -16.500000000 -35.233222961 32.233222961 -16.500000000 --35.981101990 32.981101990 -16.500000000 --35.622871399 33.622871399 -16.500000000 -34.622871399 33.622871399 -16.500000000 -34.981101990 32.981101990 -16.500000000 --35.167964935 34.167964935 -16.500000000 --34.622871399 34.622871399 -16.500000000 -33.622871399 34.622871399 -16.500000000 -34.167964935 34.167964935 -16.500000000 --33.981101990 34.981101990 -16.500000000 --33.233222961 35.233222961 -16.500000000 --32.384422302 35.384418488 -16.500000000 --31.458978653 35.458976746 -16.500000000 --30.488407135 35.488403320 -16.500000000 --29.497472763 35.497467041 -16.500000000 --28.499593735 35.499591827 -16.500000000 --27.499954224 35.499954224 -16.500000000 --26.499996185 35.499996185 -16.500000000 --25.500000000 35.500000000 -16.500000000 --24.500000000 35.500000000 -16.500000000 --23.500000000 35.500000000 -16.500000000 --22.500000000 35.500000000 -16.500000000 --21.500000000 35.500000000 -16.500000000 --20.500000000 35.500000000 -16.500000000 --19.500000000 35.500000000 -16.500000000 --18.500000000 35.500000000 -16.500000000 --17.500000000 35.500000000 -16.500000000 --16.500000000 35.500000000 -16.500000000 --15.500000000 35.500000000 -16.500000000 --14.500000000 35.500000000 -16.500000000 --13.500000000 35.500000000 -16.500000000 --12.500000000 35.500000000 -16.500000000 --11.500000000 35.500000000 -16.500000000 --10.500000000 35.500000000 -16.500000000 --9.500000000 35.500000000 -16.500000000 --8.500000000 35.500000000 -16.500000000 --7.500000000 35.500000000 -16.500000000 --6.500000000 35.500000000 -16.500000000 --5.500000000 35.500000000 -16.500000000 --4.500000000 35.500000000 -16.500000000 --3.500000000 35.500000000 -16.500000000 --2.500000000 35.500000000 -16.500000000 --1.500000000 35.500000000 -16.500000000 --0.500000000 35.500000000 -16.500000000 -0.500000000 35.500000000 -16.500000000 -1.500000000 35.500000000 -16.500000000 -2.500000000 35.500000000 -16.500000000 -3.500000000 35.500000000 -16.500000000 -4.500000000 35.500000000 -16.500000000 -5.500000000 35.500000000 -16.500000000 -6.500000000 35.500000000 -16.500000000 -7.500000000 35.500000000 -16.500000000 -8.500000000 35.500000000 -16.500000000 -9.500000000 35.500000000 -16.500000000 -10.500000000 35.500000000 -16.500000000 -11.500000000 35.500000000 -16.500000000 -12.500000000 35.500000000 -16.500000000 -13.500000000 35.500000000 -16.500000000 -14.500000000 35.500000000 -16.500000000 -15.500000000 35.500000000 -16.500000000 -16.500000000 35.500000000 -16.500000000 -17.500000000 35.500000000 -16.500000000 -18.500000000 35.500000000 -16.500000000 -19.500000000 35.500000000 -16.500000000 -20.500000000 35.500000000 -16.500000000 -21.500000000 35.500000000 -16.500000000 -22.500000000 35.500000000 -16.500000000 -23.500000000 35.500000000 -16.500000000 -24.500000000 35.500000000 -16.500000000 -25.499996185 35.499996185 -16.500000000 -26.499954224 35.499954224 -16.500000000 -27.499591827 35.499591827 -16.500000000 -28.497470856 35.497474670 -16.500000000 -29.488407135 35.488403320 -16.500000000 -30.458978653 35.458980560 -16.500000000 -31.384418488 35.384422302 -16.500000000 -32.233222961 35.233222961 -16.500000000 -32.981101990 34.981101990 -16.500000000 --33.981101990 -35.981101990 -15.500000000 --33.233226776 -36.233222961 -15.500000000 --32.384422302 -36.384418488 -15.500000000 --31.458978653 -36.458980560 -15.500000000 --30.488407135 -36.488403320 -15.500000000 --29.497472763 -36.497474670 -15.500000000 --28.499593735 -36.499591827 -15.500000000 --27.499954224 -36.499954224 -15.500000000 --26.499996185 -36.499996185 -15.500000000 --25.500000000 -36.500000000 -15.500000000 --24.500000000 -36.500000000 -15.500000000 --23.500000000 -36.500000000 -15.500000000 --22.500000000 -36.500000000 -15.500000000 --21.500000000 -36.500000000 -15.500000000 --20.500000000 -36.500000000 -15.500000000 --19.500000000 -36.500000000 -15.500000000 --18.500000000 -36.500000000 -15.500000000 --17.500000000 -36.500000000 -15.500000000 --16.500000000 -36.500000000 -15.500000000 --15.500000000 -36.500000000 -15.500000000 --14.500000000 -36.500000000 -15.500000000 --13.500000000 -36.500000000 -15.500000000 --12.500000000 -36.500000000 -15.500000000 --11.500000000 -36.500000000 -15.500000000 --10.500000000 -36.500000000 -15.500000000 --9.500000000 -36.500000000 -15.500000000 --8.500000000 -36.500000000 -15.500000000 --7.500000000 -36.500000000 -15.500000000 --6.500000000 -36.500000000 -15.500000000 --5.500000000 -36.500000000 -15.500000000 --4.500000000 -36.500000000 -15.500000000 --3.500000000 -36.500000000 -15.500000000 --2.500000000 -36.500000000 -15.500000000 --1.500000000 -36.500000000 -15.500000000 --0.500000000 -36.500000000 -15.500000000 -0.500000000 -36.500000000 -15.500000000 -1.500000000 -36.500000000 -15.500000000 -2.500000000 -36.500000000 -15.500000000 -3.500000000 -36.500000000 -15.500000000 -4.500000000 -36.500000000 -15.500000000 -5.500000000 -36.500000000 -15.500000000 -6.500000000 -36.500000000 -15.500000000 -7.500000000 -36.500000000 -15.500000000 -8.500000000 -36.500000000 -15.500000000 -9.500000000 -36.500000000 -15.500000000 -10.500000000 -36.500000000 -15.500000000 -11.500000000 -36.500000000 -15.500000000 -12.500000000 -36.500000000 -15.500000000 -13.500000000 -36.500000000 -15.500000000 -14.500000000 -36.500000000 -15.500000000 -15.500000000 -36.500000000 -15.500000000 -16.500000000 -36.500000000 -15.500000000 -17.500000000 -36.500000000 -15.500000000 -18.500000000 -36.500000000 -15.500000000 -19.500000000 -36.500000000 -15.500000000 -20.500000000 -36.500000000 -15.500000000 -21.500000000 -36.500000000 -15.500000000 -22.500000000 -36.500000000 -15.500000000 -23.500000000 -36.500000000 -15.500000000 -24.500000000 -36.500000000 -15.500000000 -25.499996185 -36.499996185 -15.500000000 -26.499954224 -36.499954224 -15.500000000 -27.499591827 -36.499591827 -15.500000000 -28.497470856 -36.497467041 -15.500000000 -29.488407135 -36.488403320 -15.500000000 -30.458978653 -36.458980560 -15.500000000 -31.384418488 -36.384422302 -15.500000000 -32.233222961 -36.233222961 -15.500000000 -32.981101990 -35.981101990 -15.500000000 --35.167964935 -35.167964935 -15.500000000 --34.622871399 -35.622871399 -15.500000000 -33.622871399 -35.622871399 -15.500000000 -34.167964935 -35.167964935 -15.500000000 --35.981101990 -33.981101990 -15.500000000 --35.622871399 -34.622871399 -15.500000000 -34.622871399 -34.622871399 -15.500000000 -34.981101990 -33.981101990 -15.500000000 --36.233222961 -33.233222961 -15.500000000 -35.233222961 -33.233226776 -15.500000000 --36.384418488 -32.384422302 -15.500000000 -35.384418488 -32.384422302 -15.500000000 --36.458976746 -31.458978653 -15.500000000 -35.458980560 -31.458978653 -15.500000000 --36.488403320 -30.488407135 -15.500000000 -35.488403320 -30.488407135 -15.500000000 --36.497467041 -29.497472763 -15.500000000 -35.497474670 -29.497472763 -15.500000000 --36.499591827 -28.499593735 -15.500000000 -35.499591827 -28.499593735 -15.500000000 --36.499954224 -27.499954224 -15.500000000 -35.499954224 -27.499954224 -15.500000000 --36.499996185 -26.499996185 -15.500000000 -35.499996185 -26.499996185 -15.500000000 --36.500000000 -25.500000000 -15.500000000 -35.500000000 -25.500000000 -15.500000000 --36.500000000 -24.500000000 -15.500000000 -35.500000000 -24.500000000 -15.500000000 --36.500000000 -23.500000000 -15.500000000 -35.500000000 -23.500000000 -15.500000000 --36.500000000 -22.500000000 -15.500000000 -35.500000000 -22.500000000 -15.500000000 --36.500000000 -21.500000000 -15.500000000 -35.500000000 -21.500000000 -15.500000000 --36.500000000 -20.500000000 -15.500000000 -35.500000000 -20.500000000 -15.500000000 --36.500000000 -19.500000000 -15.500000000 -35.500000000 -19.500000000 -15.500000000 --36.500000000 -18.500000000 -15.500000000 -35.500000000 -18.500000000 -15.500000000 --36.500000000 -17.500000000 -15.500000000 -35.500000000 -17.500000000 -15.500000000 --36.500000000 -16.500000000 -15.500000000 -35.500000000 -16.500000000 -15.500000000 --36.500000000 -15.500000000 -15.500000000 -35.500000000 -15.500000000 -15.500000000 --36.500000000 -14.500000000 -15.500000000 -35.500000000 -14.500000000 -15.500000000 --36.500000000 -13.500000000 -15.500000000 -35.500000000 -13.500000000 -15.500000000 --36.500000000 -12.500000000 -15.500000000 -35.500000000 -12.500000000 -15.500000000 --36.500000000 -11.500000000 -15.500000000 -35.500000000 -11.500000000 -15.500000000 --36.500000000 -10.500000000 -15.500000000 -35.500000000 -10.500000000 -15.500000000 --36.500000000 -9.500000000 -15.500000000 -35.500000000 -9.500000000 -15.500000000 --36.500000000 -8.500000000 -15.500000000 -35.500000000 -8.500000000 -15.500000000 --36.500000000 -7.500000000 -15.500000000 -35.500000000 -7.500000000 -15.500000000 --36.500000000 -6.500000000 -15.500000000 -35.500000000 -6.500000000 -15.500000000 --36.500000000 -5.500000000 -15.500000000 -35.500000000 -5.500000000 -15.500000000 --36.500000000 -4.500000000 -15.500000000 -35.500000000 -4.500000000 -15.500000000 --36.500000000 -3.500000000 -15.500000000 -35.500000000 -3.500000000 -15.500000000 --36.500000000 -2.500000000 -15.500000000 -35.500000000 -2.500000000 -15.500000000 --36.500000000 -1.500000000 -15.500000000 -35.500000000 -1.500000000 -15.500000000 --36.500000000 -0.500000000 -15.500000000 -35.500000000 -0.500000000 -15.500000000 --36.500000000 0.500000000 -15.500000000 -35.500000000 0.500000000 -15.500000000 --36.500000000 1.500000000 -15.500000000 -35.500000000 1.500000000 -15.500000000 --36.500000000 2.500000000 -15.500000000 -35.500000000 2.500000000 -15.500000000 --36.500000000 3.500000000 -15.500000000 -35.500000000 3.500000000 -15.500000000 --36.500000000 4.500000000 -15.500000000 -35.500000000 4.500000000 -15.500000000 --36.500000000 5.500000000 -15.500000000 -35.500000000 5.500000000 -15.500000000 --36.500000000 6.500000000 -15.500000000 -35.500000000 6.500000000 -15.500000000 --36.500000000 7.500000000 -15.500000000 -35.500000000 7.500000000 -15.500000000 --36.500000000 8.500000000 -15.500000000 -35.500000000 8.500000000 -15.500000000 --36.500000000 9.500000000 -15.500000000 -35.500000000 9.500000000 -15.500000000 --36.500000000 10.500000000 -15.500000000 -35.500000000 10.500000000 -15.500000000 --36.500000000 11.500000000 -15.500000000 -35.500000000 11.500000000 -15.500000000 --36.500000000 12.500000000 -15.500000000 -35.500000000 12.500000000 -15.500000000 --36.500000000 13.500000000 -15.500000000 -35.500000000 13.500000000 -15.500000000 --36.500000000 14.500000000 -15.500000000 -35.500000000 14.500000000 -15.500000000 --36.500000000 15.500000000 -15.500000000 -35.500000000 15.500000000 -15.500000000 --36.500000000 16.500000000 -15.500000000 -35.500000000 16.500000000 -15.500000000 --36.500000000 17.500000000 -15.500000000 -35.500000000 17.500000000 -15.500000000 --36.500000000 18.500000000 -15.500000000 -35.500000000 18.500000000 -15.500000000 --36.500000000 19.500000000 -15.500000000 -35.500000000 19.500000000 -15.500000000 --36.500000000 20.500000000 -15.500000000 -35.500000000 20.500000000 -15.500000000 --36.500000000 21.500000000 -15.500000000 -35.500000000 21.500000000 -15.500000000 --36.500000000 22.500000000 -15.500000000 -35.500000000 22.500000000 -15.500000000 --36.500000000 23.500000000 -15.500000000 -35.500000000 23.500000000 -15.500000000 --36.500000000 24.500000000 -15.500000000 -35.500000000 24.500000000 -15.500000000 --36.499996185 25.499996185 -15.500000000 -35.499996185 25.499996185 -15.500000000 --36.499954224 26.499954224 -15.500000000 -35.499954224 26.499954224 -15.500000000 --36.499591827 27.499591827 -15.500000000 -35.499591827 27.499591827 -15.500000000 --36.497474670 28.497470856 -15.500000000 -35.497467041 28.497470856 -15.500000000 --36.488403320 29.488407135 -15.500000000 -35.488403320 29.488407135 -15.500000000 --36.458980560 30.458978653 -15.500000000 -35.458980560 30.458978653 -15.500000000 --36.384422302 31.384418488 -15.500000000 -35.384422302 31.384418488 -15.500000000 --36.233222961 32.233222961 -15.500000000 -35.233222961 32.233222961 -15.500000000 --35.981101990 32.981101990 -15.500000000 --35.622871399 33.622871399 -15.500000000 -34.622871399 33.622871399 -15.500000000 -34.981101990 32.981101990 -15.500000000 --35.167964935 34.167964935 -15.500000000 --34.622871399 34.622871399 -15.500000000 -33.622871399 34.622871399 -15.500000000 -34.167964935 34.167964935 -15.500000000 --33.981101990 34.981101990 -15.500000000 --33.233222961 35.233222961 -15.500000000 --32.384422302 35.384418488 -15.500000000 --31.458978653 35.458976746 -15.500000000 --30.488407135 35.488403320 -15.500000000 --29.497472763 35.497467041 -15.500000000 --28.499593735 35.499591827 -15.500000000 --27.499954224 35.499954224 -15.500000000 --26.499996185 35.499996185 -15.500000000 --25.500000000 35.500000000 -15.500000000 --24.500000000 35.500000000 -15.500000000 --23.500000000 35.500000000 -15.500000000 --22.500000000 35.500000000 -15.500000000 --21.500000000 35.500000000 -15.500000000 --20.500000000 35.500000000 -15.500000000 --19.500000000 35.500000000 -15.500000000 --18.500000000 35.500000000 -15.500000000 --17.500000000 35.500000000 -15.500000000 --16.500000000 35.500000000 -15.500000000 --15.500000000 35.500000000 -15.500000000 --14.500000000 35.500000000 -15.500000000 --13.500000000 35.500000000 -15.500000000 --12.500000000 35.500000000 -15.500000000 --11.500000000 35.500000000 -15.500000000 --10.500000000 35.500000000 -15.500000000 --9.500000000 35.500000000 -15.500000000 --8.500000000 35.500000000 -15.500000000 --7.500000000 35.500000000 -15.500000000 --6.500000000 35.500000000 -15.500000000 --5.500000000 35.500000000 -15.500000000 --4.500000000 35.500000000 -15.500000000 --3.500000000 35.500000000 -15.500000000 --2.500000000 35.500000000 -15.500000000 --1.500000000 35.500000000 -15.500000000 --0.500000000 35.500000000 -15.500000000 -0.500000000 35.500000000 -15.500000000 -1.500000000 35.500000000 -15.500000000 -2.500000000 35.500000000 -15.500000000 -3.500000000 35.500000000 -15.500000000 -4.500000000 35.500000000 -15.500000000 -5.500000000 35.500000000 -15.500000000 -6.500000000 35.500000000 -15.500000000 -7.500000000 35.500000000 -15.500000000 -8.500000000 35.500000000 -15.500000000 -9.500000000 35.500000000 -15.500000000 -10.500000000 35.500000000 -15.500000000 -11.500000000 35.500000000 -15.500000000 -12.500000000 35.500000000 -15.500000000 -13.500000000 35.500000000 -15.500000000 -14.500000000 35.500000000 -15.500000000 -15.500000000 35.500000000 -15.500000000 -16.500000000 35.500000000 -15.500000000 -17.500000000 35.500000000 -15.500000000 -18.500000000 35.500000000 -15.500000000 -19.500000000 35.500000000 -15.500000000 -20.500000000 35.500000000 -15.500000000 -21.500000000 35.500000000 -15.500000000 -22.500000000 35.500000000 -15.500000000 -23.500000000 35.500000000 -15.500000000 -24.500000000 35.500000000 -15.500000000 -25.499996185 35.499996185 -15.500000000 -26.499954224 35.499954224 -15.500000000 -27.499591827 35.499591827 -15.500000000 -28.497470856 35.497474670 -15.500000000 -29.488407135 35.488403320 -15.500000000 -30.458978653 35.458980560 -15.500000000 -31.384418488 35.384422302 -15.500000000 -32.233222961 35.233222961 -15.500000000 -32.981101990 34.981101990 -15.500000000 --33.981101990 -35.981101990 -14.500000000 --33.233226776 -36.233222961 -14.500000000 --32.384422302 -36.384418488 -14.500000000 --31.458978653 -36.458980560 -14.500000000 --30.488407135 -36.488403320 -14.500000000 --29.497472763 -36.497474670 -14.500000000 --28.499593735 -36.499591827 -14.500000000 --27.499954224 -36.499954224 -14.500000000 --26.499996185 -36.499996185 -14.500000000 --25.500000000 -36.500000000 -14.500000000 --24.500000000 -36.500000000 -14.500000000 --23.500000000 -36.500000000 -14.500000000 --22.500000000 -36.500000000 -14.500000000 --21.500000000 -36.500000000 -14.500000000 --20.500000000 -36.500000000 -14.500000000 --19.500000000 -36.500000000 -14.500000000 --18.500000000 -36.500000000 -14.500000000 --17.500000000 -36.500000000 -14.500000000 --16.500000000 -36.500000000 -14.500000000 --15.500000000 -36.500000000 -14.500000000 --14.500000000 -36.500000000 -14.500000000 --13.500000000 -36.500000000 -14.500000000 --12.500000000 -36.500000000 -14.500000000 --11.500000000 -36.500000000 -14.500000000 --10.500000000 -36.500000000 -14.500000000 --9.500000000 -36.500000000 -14.500000000 --8.500000000 -36.500000000 -14.500000000 --7.500000000 -36.500000000 -14.500000000 --6.500000000 -36.500000000 -14.500000000 --5.500000000 -36.500000000 -14.500000000 --4.500000000 -36.500000000 -14.500000000 --3.500000000 -36.500000000 -14.500000000 --2.500000000 -36.500000000 -14.500000000 --1.500000000 -36.500000000 -14.500000000 --0.500000000 -36.500000000 -14.500000000 -0.500000000 -36.500000000 -14.500000000 -1.500000000 -36.500000000 -14.500000000 -2.500000000 -36.500000000 -14.500000000 -3.500000000 -36.500000000 -14.500000000 -4.500000000 -36.500000000 -14.500000000 -5.500000000 -36.500000000 -14.500000000 -6.500000000 -36.500000000 -14.500000000 -7.500000000 -36.500000000 -14.500000000 -8.500000000 -36.500000000 -14.500000000 -9.500000000 -36.500000000 -14.500000000 -10.500000000 -36.500000000 -14.500000000 -11.500000000 -36.500000000 -14.500000000 -12.500000000 -36.500000000 -14.500000000 -13.500000000 -36.500000000 -14.500000000 -14.500000000 -36.500000000 -14.500000000 -15.500000000 -36.500000000 -14.500000000 -16.500000000 -36.500000000 -14.500000000 -17.500000000 -36.500000000 -14.500000000 -18.500000000 -36.500000000 -14.500000000 -19.500000000 -36.500000000 -14.500000000 -20.500000000 -36.500000000 -14.500000000 -21.500000000 -36.500000000 -14.500000000 -22.500000000 -36.500000000 -14.500000000 -23.500000000 -36.500000000 -14.500000000 -24.500000000 -36.500000000 -14.500000000 -25.499996185 -36.499996185 -14.500000000 -26.499954224 -36.499954224 -14.500000000 -27.499591827 -36.499591827 -14.500000000 -28.497470856 -36.497467041 -14.500000000 -29.488407135 -36.488403320 -14.500000000 -30.458978653 -36.458980560 -14.500000000 -31.384418488 -36.384422302 -14.500000000 -32.233222961 -36.233222961 -14.500000000 -32.981101990 -35.981101990 -14.500000000 --35.167964935 -35.167964935 -14.500000000 --34.622871399 -35.622871399 -14.500000000 -33.622871399 -35.622871399 -14.500000000 -34.167964935 -35.167964935 -14.500000000 --35.981101990 -33.981101990 -14.500000000 --35.622871399 -34.622871399 -14.500000000 -34.622871399 -34.622871399 -14.500000000 -34.981101990 -33.981101990 -14.500000000 --36.233222961 -33.233222961 -14.500000000 -35.233222961 -33.233226776 -14.500000000 --36.384418488 -32.384422302 -14.500000000 -35.384418488 -32.384422302 -14.500000000 --36.458976746 -31.458978653 -14.500000000 -35.458980560 -31.458978653 -14.500000000 --36.488403320 -30.488407135 -14.500000000 -35.488403320 -30.488407135 -14.500000000 --36.497467041 -29.497472763 -14.500000000 -35.497474670 -29.497472763 -14.500000000 --36.499591827 -28.499593735 -14.500000000 -35.499591827 -28.499593735 -14.500000000 --36.499954224 -27.499954224 -14.500000000 -35.499954224 -27.499954224 -14.500000000 --36.499996185 -26.499996185 -14.500000000 -35.499996185 -26.499996185 -14.500000000 --36.500000000 -25.500000000 -14.500000000 -35.500000000 -25.500000000 -14.500000000 --36.500000000 -24.500000000 -14.500000000 -35.500000000 -24.500000000 -14.500000000 --36.500000000 -23.500000000 -14.500000000 -35.500000000 -23.500000000 -14.500000000 --36.500000000 -22.500000000 -14.500000000 -35.500000000 -22.500000000 -14.500000000 --36.500000000 -21.500000000 -14.500000000 -35.500000000 -21.500000000 -14.500000000 --36.500000000 -20.500000000 -14.500000000 -35.500000000 -20.500000000 -14.500000000 --36.500000000 -19.500000000 -14.500000000 -35.500000000 -19.500000000 -14.500000000 --36.500000000 -18.500000000 -14.500000000 -35.500000000 -18.500000000 -14.500000000 --36.500000000 -17.500000000 -14.500000000 -35.500000000 -17.500000000 -14.500000000 --36.500000000 -16.500000000 -14.500000000 -35.500000000 -16.500000000 -14.500000000 --36.500000000 -15.500000000 -14.500000000 -35.500000000 -15.500000000 -14.500000000 --36.500000000 -14.500000000 -14.500000000 -35.500000000 -14.500000000 -14.500000000 --36.500000000 -13.500000000 -14.500000000 -35.500000000 -13.500000000 -14.500000000 --36.500000000 -12.500000000 -14.500000000 -35.500000000 -12.500000000 -14.500000000 --36.500000000 -11.500000000 -14.500000000 -35.500000000 -11.500000000 -14.500000000 --36.500000000 -10.500000000 -14.500000000 -35.500000000 -10.500000000 -14.500000000 --36.500000000 -9.500000000 -14.500000000 -35.500000000 -9.500000000 -14.500000000 --36.500000000 -8.500000000 -14.500000000 -35.500000000 -8.500000000 -14.500000000 --36.500000000 -7.500000000 -14.500000000 -35.500000000 -7.500000000 -14.500000000 --36.500000000 -6.500000000 -14.500000000 -35.500000000 -6.500000000 -14.500000000 --36.500000000 -5.500000000 -14.500000000 -35.500000000 -5.500000000 -14.500000000 --36.500000000 -4.500000000 -14.500000000 -35.500000000 -4.500000000 -14.500000000 --36.500000000 -3.500000000 -14.500000000 -35.500000000 -3.500000000 -14.500000000 --36.500000000 -2.500000000 -14.500000000 -35.500000000 -2.500000000 -14.500000000 --36.500000000 -1.500000000 -14.500000000 -35.500000000 -1.500000000 -14.500000000 --36.500000000 -0.500000000 -14.500000000 -35.500000000 -0.500000000 -14.500000000 --36.500000000 0.500000000 -14.500000000 -35.500000000 0.500000000 -14.500000000 --36.500000000 1.500000000 -14.500000000 -35.500000000 1.500000000 -14.500000000 --36.500000000 2.500000000 -14.500000000 -35.500000000 2.500000000 -14.500000000 --36.500000000 3.500000000 -14.500000000 -35.500000000 3.500000000 -14.500000000 --36.500000000 4.500000000 -14.500000000 -35.500000000 4.500000000 -14.500000000 --36.500000000 5.500000000 -14.500000000 -35.500000000 5.500000000 -14.500000000 --36.500000000 6.500000000 -14.500000000 -35.500000000 6.500000000 -14.500000000 --36.500000000 7.500000000 -14.500000000 -35.500000000 7.500000000 -14.500000000 --36.500000000 8.500000000 -14.500000000 -35.500000000 8.500000000 -14.500000000 --36.500000000 9.500000000 -14.500000000 -35.500000000 9.500000000 -14.500000000 --36.500000000 10.500000000 -14.500000000 -35.500000000 10.500000000 -14.500000000 --36.500000000 11.500000000 -14.500000000 -35.500000000 11.500000000 -14.500000000 --36.500000000 12.500000000 -14.500000000 -35.500000000 12.500000000 -14.500000000 --36.500000000 13.500000000 -14.500000000 -35.500000000 13.500000000 -14.500000000 --36.500000000 14.500000000 -14.500000000 -35.500000000 14.500000000 -14.500000000 --36.500000000 15.500000000 -14.500000000 -35.500000000 15.500000000 -14.500000000 --36.500000000 16.500000000 -14.500000000 -35.500000000 16.500000000 -14.500000000 --36.500000000 17.500000000 -14.500000000 -35.500000000 17.500000000 -14.500000000 --36.500000000 18.500000000 -14.500000000 -35.500000000 18.500000000 -14.500000000 --36.500000000 19.500000000 -14.500000000 -35.500000000 19.500000000 -14.500000000 --36.500000000 20.500000000 -14.500000000 -35.500000000 20.500000000 -14.500000000 --36.500000000 21.500000000 -14.500000000 -35.500000000 21.500000000 -14.500000000 --36.500000000 22.500000000 -14.500000000 -35.500000000 22.500000000 -14.500000000 --36.500000000 23.500000000 -14.500000000 -35.500000000 23.500000000 -14.500000000 --36.500000000 24.500000000 -14.500000000 -35.500000000 24.500000000 -14.500000000 --36.499996185 25.499996185 -14.500000000 -35.499996185 25.499996185 -14.500000000 --36.499954224 26.499954224 -14.500000000 -35.499954224 26.499954224 -14.500000000 --36.499591827 27.499591827 -14.500000000 -35.499591827 27.499591827 -14.500000000 --36.497474670 28.497470856 -14.500000000 -35.497467041 28.497470856 -14.500000000 --36.488403320 29.488407135 -14.500000000 -35.488403320 29.488407135 -14.500000000 --36.458980560 30.458978653 -14.500000000 -35.458980560 30.458978653 -14.500000000 --36.384422302 31.384418488 -14.500000000 -35.384422302 31.384418488 -14.500000000 --36.233222961 32.233222961 -14.500000000 -35.233222961 32.233222961 -14.500000000 --35.981101990 32.981101990 -14.500000000 --35.622871399 33.622871399 -14.500000000 -34.622871399 33.622871399 -14.500000000 -34.981101990 32.981101990 -14.500000000 --35.167964935 34.167964935 -14.500000000 --34.622871399 34.622871399 -14.500000000 -33.622871399 34.622871399 -14.500000000 -34.167964935 34.167964935 -14.500000000 --33.981101990 34.981101990 -14.500000000 --33.233222961 35.233222961 -14.500000000 --32.384422302 35.384418488 -14.500000000 --31.458978653 35.458976746 -14.500000000 --30.488407135 35.488403320 -14.500000000 --29.497472763 35.497467041 -14.500000000 --28.499593735 35.499591827 -14.500000000 --27.499954224 35.499954224 -14.500000000 --26.499996185 35.499996185 -14.500000000 --25.500000000 35.500000000 -14.500000000 --24.500000000 35.500000000 -14.500000000 --23.500000000 35.500000000 -14.500000000 --22.500000000 35.500000000 -14.500000000 --21.500000000 35.500000000 -14.500000000 --20.500000000 35.500000000 -14.500000000 --19.500000000 35.500000000 -14.500000000 --18.500000000 35.500000000 -14.500000000 --17.500000000 35.500000000 -14.500000000 --16.500000000 35.500000000 -14.500000000 --15.500000000 35.500000000 -14.500000000 --14.500000000 35.500000000 -14.500000000 --13.500000000 35.500000000 -14.500000000 --12.500000000 35.500000000 -14.500000000 --11.500000000 35.500000000 -14.500000000 --10.500000000 35.500000000 -14.500000000 --9.500000000 35.500000000 -14.500000000 --8.500000000 35.500000000 -14.500000000 --7.500000000 35.500000000 -14.500000000 --6.500000000 35.500000000 -14.500000000 --5.500000000 35.500000000 -14.500000000 --4.500000000 35.500000000 -14.500000000 --3.500000000 35.500000000 -14.500000000 --2.500000000 35.500000000 -14.500000000 --1.500000000 35.500000000 -14.500000000 --0.500000000 35.500000000 -14.500000000 -0.500000000 35.500000000 -14.500000000 -1.500000000 35.500000000 -14.500000000 -2.500000000 35.500000000 -14.500000000 -3.500000000 35.500000000 -14.500000000 -4.500000000 35.500000000 -14.500000000 -5.500000000 35.500000000 -14.500000000 -6.500000000 35.500000000 -14.500000000 -7.500000000 35.500000000 -14.500000000 -8.500000000 35.500000000 -14.500000000 -9.500000000 35.500000000 -14.500000000 -10.500000000 35.500000000 -14.500000000 -11.500000000 35.500000000 -14.500000000 -12.500000000 35.500000000 -14.500000000 -13.500000000 35.500000000 -14.500000000 -14.500000000 35.500000000 -14.500000000 -15.500000000 35.500000000 -14.500000000 -16.500000000 35.500000000 -14.500000000 -17.500000000 35.500000000 -14.500000000 -18.500000000 35.500000000 -14.500000000 -19.500000000 35.500000000 -14.500000000 -20.500000000 35.500000000 -14.500000000 -21.500000000 35.500000000 -14.500000000 -22.500000000 35.500000000 -14.500000000 -23.500000000 35.500000000 -14.500000000 -24.500000000 35.500000000 -14.500000000 -25.499996185 35.499996185 -14.500000000 -26.499954224 35.499954224 -14.500000000 -27.499591827 35.499591827 -14.500000000 -28.497470856 35.497474670 -14.500000000 -29.488407135 35.488403320 -14.500000000 -30.458978653 35.458980560 -14.500000000 -31.384418488 35.384422302 -14.500000000 -32.233222961 35.233222961 -14.500000000 -32.981101990 34.981101990 -14.500000000 --33.981101990 -35.981101990 -13.500000000 --33.233226776 -36.233222961 -13.500000000 --32.384422302 -36.384418488 -13.500000000 --31.458978653 -36.458980560 -13.500000000 --30.488407135 -36.488403320 -13.500000000 --29.497472763 -36.497474670 -13.500000000 --28.499593735 -36.499591827 -13.500000000 --27.499954224 -36.499954224 -13.500000000 --26.499996185 -36.499996185 -13.500000000 --25.500000000 -36.500000000 -13.500000000 --24.500000000 -36.500000000 -13.500000000 --23.500000000 -36.500000000 -13.500000000 --22.500000000 -36.500000000 -13.500000000 --21.500000000 -36.500000000 -13.500000000 --20.500000000 -36.500000000 -13.500000000 --19.500000000 -36.500000000 -13.500000000 --18.500000000 -36.500000000 -13.500000000 --17.500000000 -36.500000000 -13.500000000 --16.500000000 -36.500000000 -13.500000000 --15.500000000 -36.500000000 -13.500000000 --14.500000000 -36.500000000 -13.500000000 --13.500000000 -36.500000000 -13.500000000 --12.500000000 -36.500000000 -13.500000000 --11.500000000 -36.500000000 -13.500000000 --10.500000000 -36.500000000 -13.500000000 --9.500000000 -36.500000000 -13.500000000 --8.500000000 -36.500000000 -13.500000000 --7.500000000 -36.500000000 -13.500000000 --6.500000000 -36.500000000 -13.500000000 --5.500000000 -36.500000000 -13.500000000 --4.500000000 -36.500000000 -13.500000000 --3.500000000 -36.500000000 -13.500000000 --2.500000000 -36.500000000 -13.500000000 --1.500000000 -36.500000000 -13.500000000 --0.500000000 -36.500000000 -13.500000000 -0.500000000 -36.500000000 -13.500000000 -1.500000000 -36.500000000 -13.500000000 -2.500000000 -36.500000000 -13.500000000 -3.500000000 -36.500000000 -13.500000000 -4.500000000 -36.500000000 -13.500000000 -5.500000000 -36.500000000 -13.500000000 -6.500000000 -36.500000000 -13.500000000 -7.500000000 -36.500000000 -13.500000000 -8.500000000 -36.500000000 -13.500000000 -9.500000000 -36.500000000 -13.500000000 -10.500000000 -36.500000000 -13.500000000 -11.500000000 -36.500000000 -13.500000000 -12.500000000 -36.500000000 -13.500000000 -13.500000000 -36.500000000 -13.500000000 -14.500000000 -36.500000000 -13.500000000 -15.500000000 -36.500000000 -13.500000000 -16.500000000 -36.500000000 -13.500000000 -17.500000000 -36.500000000 -13.500000000 -18.500000000 -36.500000000 -13.500000000 -19.500000000 -36.500000000 -13.500000000 -20.500000000 -36.500000000 -13.500000000 -21.500000000 -36.500000000 -13.500000000 -22.500000000 -36.500000000 -13.500000000 -23.500000000 -36.500000000 -13.500000000 -24.500000000 -36.500000000 -13.500000000 -25.499996185 -36.499996185 -13.500000000 -26.499954224 -36.499954224 -13.500000000 -27.499591827 -36.499591827 -13.500000000 -28.497470856 -36.497467041 -13.500000000 -29.488407135 -36.488403320 -13.500000000 -30.458978653 -36.458980560 -13.500000000 -31.384418488 -36.384422302 -13.500000000 -32.233222961 -36.233222961 -13.500000000 -32.981101990 -35.981101990 -13.500000000 --35.167964935 -35.167964935 -13.500000000 --34.622871399 -35.622871399 -13.500000000 -33.622871399 -35.622871399 -13.500000000 -34.167964935 -35.167964935 -13.500000000 --35.981101990 -33.981101990 -13.500000000 --35.622871399 -34.622871399 -13.500000000 -34.622871399 -34.622871399 -13.500000000 -34.981101990 -33.981101990 -13.500000000 --36.233222961 -33.233222961 -13.500000000 -35.233222961 -33.233226776 -13.500000000 --36.384418488 -32.384422302 -13.500000000 -35.384418488 -32.384422302 -13.500000000 --36.458976746 -31.458978653 -13.500000000 -35.458980560 -31.458978653 -13.500000000 --36.488403320 -30.488407135 -13.500000000 -35.488403320 -30.488407135 -13.500000000 --36.497467041 -29.497472763 -13.500000000 -35.497474670 -29.497472763 -13.500000000 --36.499591827 -28.499593735 -13.500000000 -35.499591827 -28.499593735 -13.500000000 --36.499954224 -27.499954224 -13.500000000 -35.499954224 -27.499954224 -13.500000000 --36.499996185 -26.499996185 -13.500000000 -35.499996185 -26.499996185 -13.500000000 --36.500000000 -25.500000000 -13.500000000 -35.500000000 -25.500000000 -13.500000000 --36.500000000 -24.500000000 -13.500000000 -35.500000000 -24.500000000 -13.500000000 --36.500000000 -23.500000000 -13.500000000 -35.500000000 -23.500000000 -13.500000000 --36.500000000 -22.500000000 -13.500000000 -35.500000000 -22.500000000 -13.500000000 --36.500000000 -21.500000000 -13.500000000 -35.500000000 -21.500000000 -13.500000000 --36.500000000 -20.500000000 -13.500000000 -35.500000000 -20.500000000 -13.500000000 --36.500000000 -19.500000000 -13.500000000 -35.500000000 -19.500000000 -13.500000000 --36.500000000 -18.500000000 -13.500000000 -35.500000000 -18.500000000 -13.500000000 --36.500000000 -17.500000000 -13.500000000 -35.500000000 -17.500000000 -13.500000000 --36.500000000 -16.500000000 -13.500000000 -35.500000000 -16.500000000 -13.500000000 --36.500000000 -15.500000000 -13.500000000 -35.500000000 -15.500000000 -13.500000000 --36.500000000 -14.500000000 -13.500000000 -35.500000000 -14.500000000 -13.500000000 --36.500000000 -13.500000000 -13.500000000 -35.500000000 -13.500000000 -13.500000000 --36.500000000 -12.500000000 -13.500000000 -35.500000000 -12.500000000 -13.500000000 --36.500000000 -11.500000000 -13.500000000 -35.500000000 -11.500000000 -13.500000000 --36.500000000 -10.500000000 -13.500000000 -35.500000000 -10.500000000 -13.500000000 --36.500000000 -9.500000000 -13.500000000 -35.500000000 -9.500000000 -13.500000000 --36.500000000 -8.500000000 -13.500000000 -35.500000000 -8.500000000 -13.500000000 --36.500000000 -7.500000000 -13.500000000 -35.500000000 -7.500000000 -13.500000000 --36.500000000 -6.500000000 -13.500000000 -35.500000000 -6.500000000 -13.500000000 --36.500000000 -5.500000000 -13.500000000 -35.500000000 -5.500000000 -13.500000000 --36.500000000 -4.500000000 -13.500000000 -35.500000000 -4.500000000 -13.500000000 --36.500000000 -3.500000000 -13.500000000 -35.500000000 -3.500000000 -13.500000000 --36.500000000 -2.500000000 -13.500000000 -35.500000000 -2.500000000 -13.500000000 --36.500000000 -1.500000000 -13.500000000 -35.500000000 -1.500000000 -13.500000000 --36.500000000 -0.500000000 -13.500000000 -35.500000000 -0.500000000 -13.500000000 --36.500000000 0.500000000 -13.500000000 -35.500000000 0.500000000 -13.500000000 --36.500000000 1.500000000 -13.500000000 -35.500000000 1.500000000 -13.500000000 --36.500000000 2.500000000 -13.500000000 -35.500000000 2.500000000 -13.500000000 --36.500000000 3.500000000 -13.500000000 -35.500000000 3.500000000 -13.500000000 --36.500000000 4.500000000 -13.500000000 -35.500000000 4.500000000 -13.500000000 --36.500000000 5.500000000 -13.500000000 -35.500000000 5.500000000 -13.500000000 --36.500000000 6.500000000 -13.500000000 -35.500000000 6.500000000 -13.500000000 --36.500000000 7.500000000 -13.500000000 -35.500000000 7.500000000 -13.500000000 --36.500000000 8.500000000 -13.500000000 -35.500000000 8.500000000 -13.500000000 --36.500000000 9.500000000 -13.500000000 -35.500000000 9.500000000 -13.500000000 --36.500000000 10.500000000 -13.500000000 -35.500000000 10.500000000 -13.500000000 --36.500000000 11.500000000 -13.500000000 -35.500000000 11.500000000 -13.500000000 --36.500000000 12.500000000 -13.500000000 -35.500000000 12.500000000 -13.500000000 --36.500000000 13.500000000 -13.500000000 -35.500000000 13.500000000 -13.500000000 --36.500000000 14.500000000 -13.500000000 -35.500000000 14.500000000 -13.500000000 --36.500000000 15.500000000 -13.500000000 -35.500000000 15.500000000 -13.500000000 --36.500000000 16.500000000 -13.500000000 -35.500000000 16.500000000 -13.500000000 --36.500000000 17.500000000 -13.500000000 -35.500000000 17.500000000 -13.500000000 --36.500000000 18.500000000 -13.500000000 -35.500000000 18.500000000 -13.500000000 --36.500000000 19.500000000 -13.500000000 -35.500000000 19.500000000 -13.500000000 --36.500000000 20.500000000 -13.500000000 -35.500000000 20.500000000 -13.500000000 --36.500000000 21.500000000 -13.500000000 -35.500000000 21.500000000 -13.500000000 --36.500000000 22.500000000 -13.500000000 -35.500000000 22.500000000 -13.500000000 --36.500000000 23.500000000 -13.500000000 -35.500000000 23.500000000 -13.500000000 --36.500000000 24.500000000 -13.500000000 -35.500000000 24.500000000 -13.500000000 --36.499996185 25.499996185 -13.500000000 -35.499996185 25.499996185 -13.500000000 --36.499954224 26.499954224 -13.500000000 -35.499954224 26.499954224 -13.500000000 --36.499591827 27.499591827 -13.500000000 -35.499591827 27.499591827 -13.500000000 --36.497474670 28.497470856 -13.500000000 -35.497467041 28.497470856 -13.500000000 --36.488403320 29.488407135 -13.500000000 -35.488403320 29.488407135 -13.500000000 --36.458980560 30.458978653 -13.500000000 -35.458980560 30.458978653 -13.500000000 --36.384422302 31.384418488 -13.500000000 -35.384422302 31.384418488 -13.500000000 --36.233222961 32.233222961 -13.500000000 -35.233222961 32.233222961 -13.500000000 --35.981101990 32.981101990 -13.500000000 --35.622871399 33.622871399 -13.500000000 -34.622871399 33.622871399 -13.500000000 -34.981101990 32.981101990 -13.500000000 --35.167964935 34.167964935 -13.500000000 --34.622871399 34.622871399 -13.500000000 -33.622871399 34.622871399 -13.500000000 -34.167964935 34.167964935 -13.500000000 --33.981101990 34.981101990 -13.500000000 --33.233222961 35.233222961 -13.500000000 --32.384422302 35.384418488 -13.500000000 --31.458978653 35.458976746 -13.500000000 --30.488407135 35.488403320 -13.500000000 --29.497472763 35.497467041 -13.500000000 --28.499593735 35.499591827 -13.500000000 --27.499954224 35.499954224 -13.500000000 --26.499996185 35.499996185 -13.500000000 --25.500000000 35.500000000 -13.500000000 --24.500000000 35.500000000 -13.500000000 --23.500000000 35.500000000 -13.500000000 --22.500000000 35.500000000 -13.500000000 --21.500000000 35.500000000 -13.500000000 --20.500000000 35.500000000 -13.500000000 --19.500000000 35.500000000 -13.500000000 --18.500000000 35.500000000 -13.500000000 --17.500000000 35.500000000 -13.500000000 --16.500000000 35.500000000 -13.500000000 --15.500000000 35.500000000 -13.500000000 --14.500000000 35.500000000 -13.500000000 --13.500000000 35.500000000 -13.500000000 --12.500000000 35.500000000 -13.500000000 --11.500000000 35.500000000 -13.500000000 --10.500000000 35.500000000 -13.500000000 --9.500000000 35.500000000 -13.500000000 --8.500000000 35.500000000 -13.500000000 --7.500000000 35.500000000 -13.500000000 --6.500000000 35.500000000 -13.500000000 --5.500000000 35.500000000 -13.500000000 --4.500000000 35.500000000 -13.500000000 --3.500000000 35.500000000 -13.500000000 --2.500000000 35.500000000 -13.500000000 --1.500000000 35.500000000 -13.500000000 --0.500000000 35.500000000 -13.500000000 -0.500000000 35.500000000 -13.500000000 -1.500000000 35.500000000 -13.500000000 -2.500000000 35.500000000 -13.500000000 -3.500000000 35.500000000 -13.500000000 -4.500000000 35.500000000 -13.500000000 -5.500000000 35.500000000 -13.500000000 -6.500000000 35.500000000 -13.500000000 -7.500000000 35.500000000 -13.500000000 -8.500000000 35.500000000 -13.500000000 -9.500000000 35.500000000 -13.500000000 -10.500000000 35.500000000 -13.500000000 -11.500000000 35.500000000 -13.500000000 -12.500000000 35.500000000 -13.500000000 -13.500000000 35.500000000 -13.500000000 -14.500000000 35.500000000 -13.500000000 -15.500000000 35.500000000 -13.500000000 -16.500000000 35.500000000 -13.500000000 -17.500000000 35.500000000 -13.500000000 -18.500000000 35.500000000 -13.500000000 -19.500000000 35.500000000 -13.500000000 -20.500000000 35.500000000 -13.500000000 -21.500000000 35.500000000 -13.500000000 -22.500000000 35.500000000 -13.500000000 -23.500000000 35.500000000 -13.500000000 -24.500000000 35.500000000 -13.500000000 -25.499996185 35.499996185 -13.500000000 -26.499954224 35.499954224 -13.500000000 -27.499591827 35.499591827 -13.500000000 -28.497470856 35.497474670 -13.500000000 -29.488407135 35.488403320 -13.500000000 -30.458978653 35.458980560 -13.500000000 -31.384418488 35.384422302 -13.500000000 -32.233222961 35.233222961 -13.500000000 -32.981101990 34.981101990 -13.500000000 --33.981101990 -35.981101990 -12.500000000 --33.233226776 -36.233222961 -12.500000000 --32.384422302 -36.384418488 -12.500000000 --31.458978653 -36.458980560 -12.500000000 --30.488407135 -36.488403320 -12.500000000 --29.497472763 -36.497474670 -12.500000000 --28.499593735 -36.499591827 -12.500000000 --27.499954224 -36.499954224 -12.500000000 --26.499996185 -36.499996185 -12.500000000 --25.500000000 -36.500000000 -12.500000000 --24.500000000 -36.500000000 -12.500000000 --23.500000000 -36.500000000 -12.500000000 --22.500000000 -36.500000000 -12.500000000 --21.500000000 -36.500000000 -12.500000000 --20.500000000 -36.500000000 -12.500000000 --19.500000000 -36.500000000 -12.500000000 --18.500000000 -36.500000000 -12.500000000 --17.500000000 -36.500000000 -12.500000000 --16.500000000 -36.500000000 -12.500000000 --15.500000000 -36.500000000 -12.500000000 --14.500000000 -36.500000000 -12.500000000 --13.500000000 -36.500000000 -12.500000000 --12.500000000 -36.500000000 -12.500000000 --11.500000000 -36.500000000 -12.500000000 --10.500000000 -36.500000000 -12.500000000 --9.500000000 -36.500000000 -12.500000000 --8.500000000 -36.500000000 -12.500000000 --7.500000000 -36.500000000 -12.500000000 --6.500000000 -36.500000000 -12.500000000 --5.500000000 -36.500000000 -12.500000000 --4.500000000 -36.500000000 -12.500000000 --3.500000000 -36.500000000 -12.500000000 --2.500000000 -36.500000000 -12.500000000 --1.500000000 -36.500000000 -12.500000000 --0.500000000 -36.500000000 -12.500000000 -0.500000000 -36.500000000 -12.500000000 -1.500000000 -36.500000000 -12.500000000 -2.500000000 -36.500000000 -12.500000000 -3.500000000 -36.500000000 -12.500000000 -4.500000000 -36.500000000 -12.500000000 -5.500000000 -36.500000000 -12.500000000 -6.500000000 -36.500000000 -12.500000000 -7.500000000 -36.500000000 -12.500000000 -8.500000000 -36.500000000 -12.500000000 -9.500000000 -36.500000000 -12.500000000 -10.500000000 -36.500000000 -12.500000000 -11.500000000 -36.500000000 -12.500000000 -12.500000000 -36.500000000 -12.500000000 -13.500000000 -36.500000000 -12.500000000 -14.500000000 -36.500000000 -12.500000000 -15.500000000 -36.500000000 -12.500000000 -16.500000000 -36.500000000 -12.500000000 -17.500000000 -36.500000000 -12.500000000 -18.500000000 -36.500000000 -12.500000000 -19.500000000 -36.500000000 -12.500000000 -20.500000000 -36.500000000 -12.500000000 -21.500000000 -36.500000000 -12.500000000 -22.500000000 -36.500000000 -12.500000000 -23.500000000 -36.500000000 -12.500000000 -24.500000000 -36.500000000 -12.500000000 -25.499996185 -36.499996185 -12.500000000 -26.499954224 -36.499954224 -12.500000000 -27.499591827 -36.499591827 -12.500000000 -28.497470856 -36.497467041 -12.500000000 -29.488407135 -36.488403320 -12.500000000 -30.458978653 -36.458980560 -12.500000000 -31.384418488 -36.384422302 -12.500000000 -32.233222961 -36.233222961 -12.500000000 -32.981101990 -35.981101990 -12.500000000 --35.167964935 -35.167964935 -12.500000000 --34.622871399 -35.622871399 -12.500000000 -33.622871399 -35.622871399 -12.500000000 -34.167964935 -35.167964935 -12.500000000 --35.981101990 -33.981101990 -12.500000000 --35.622871399 -34.622871399 -12.500000000 -34.622871399 -34.622871399 -12.500000000 -34.981101990 -33.981101990 -12.500000000 --36.233222961 -33.233222961 -12.500000000 -35.233222961 -33.233226776 -12.500000000 --36.384418488 -32.384422302 -12.500000000 -35.384418488 -32.384422302 -12.500000000 --36.458976746 -31.458978653 -12.500000000 -35.458980560 -31.458978653 -12.500000000 --36.488403320 -30.488407135 -12.500000000 -35.488403320 -30.488407135 -12.500000000 --36.497467041 -29.497472763 -12.500000000 -35.497474670 -29.497472763 -12.500000000 --36.499591827 -28.499593735 -12.500000000 -35.499591827 -28.499593735 -12.500000000 --36.499954224 -27.499954224 -12.500000000 -35.499954224 -27.499954224 -12.500000000 --36.499996185 -26.499996185 -12.500000000 -35.499996185 -26.499996185 -12.500000000 --36.500000000 -25.500000000 -12.500000000 -35.500000000 -25.500000000 -12.500000000 --36.500000000 -24.500000000 -12.500000000 -35.500000000 -24.500000000 -12.500000000 --36.500000000 -23.500000000 -12.500000000 -35.500000000 -23.500000000 -12.500000000 --36.500000000 -22.500000000 -12.500000000 -35.500000000 -22.500000000 -12.500000000 --36.500000000 -21.500000000 -12.500000000 -35.500000000 -21.500000000 -12.500000000 --36.500000000 -20.500000000 -12.500000000 -35.500000000 -20.500000000 -12.500000000 --36.500000000 -19.500000000 -12.500000000 -35.500000000 -19.500000000 -12.500000000 --36.500000000 -18.500000000 -12.500000000 -35.500000000 -18.500000000 -12.500000000 --36.500000000 -17.500000000 -12.500000000 -35.500000000 -17.500000000 -12.500000000 --36.500000000 -16.500000000 -12.500000000 -35.500000000 -16.500000000 -12.500000000 --36.500000000 -15.500000000 -12.500000000 -35.500000000 -15.500000000 -12.500000000 --36.500000000 -14.500000000 -12.500000000 -35.500000000 -14.500000000 -12.500000000 --36.500000000 -13.500000000 -12.500000000 -35.500000000 -13.500000000 -12.500000000 --36.500000000 -12.500000000 -12.500000000 -35.500000000 -12.500000000 -12.500000000 --36.500000000 -11.500000000 -12.500000000 -35.500000000 -11.500000000 -12.500000000 --36.500000000 -10.500000000 -12.500000000 -35.500000000 -10.500000000 -12.500000000 --36.500000000 -9.500000000 -12.500000000 -35.500000000 -9.500000000 -12.500000000 --36.500000000 -8.500000000 -12.500000000 -35.500000000 -8.500000000 -12.500000000 --36.500000000 -7.500000000 -12.500000000 -35.500000000 -7.500000000 -12.500000000 --36.500000000 -6.500000000 -12.500000000 -35.500000000 -6.500000000 -12.500000000 --36.500000000 -5.500000000 -12.500000000 -35.500000000 -5.500000000 -12.500000000 --36.500000000 -4.500000000 -12.500000000 -35.500000000 -4.500000000 -12.500000000 --36.500000000 -3.500000000 -12.500000000 -35.500000000 -3.500000000 -12.500000000 --36.500000000 -2.500000000 -12.500000000 -35.500000000 -2.500000000 -12.500000000 --36.500000000 -1.500000000 -12.500000000 -35.500000000 -1.500000000 -12.500000000 --36.500000000 -0.500000000 -12.500000000 -35.500000000 -0.500000000 -12.500000000 --36.500000000 0.500000000 -12.500000000 -35.500000000 0.500000000 -12.500000000 --36.500000000 1.500000000 -12.500000000 -35.500000000 1.500000000 -12.500000000 --36.500000000 2.500000000 -12.500000000 -35.500000000 2.500000000 -12.500000000 --36.500000000 3.500000000 -12.500000000 -35.500000000 3.500000000 -12.500000000 --36.500000000 4.500000000 -12.500000000 -35.500000000 4.500000000 -12.500000000 --36.500000000 5.500000000 -12.500000000 -35.500000000 5.500000000 -12.500000000 --36.500000000 6.500000000 -12.500000000 -35.500000000 6.500000000 -12.500000000 --36.500000000 7.500000000 -12.500000000 -35.500000000 7.500000000 -12.500000000 --36.500000000 8.500000000 -12.500000000 -35.500000000 8.500000000 -12.500000000 --36.500000000 9.500000000 -12.500000000 -35.500000000 9.500000000 -12.500000000 --36.500000000 10.500000000 -12.500000000 -35.500000000 10.500000000 -12.500000000 --36.500000000 11.500000000 -12.500000000 -35.500000000 11.500000000 -12.500000000 --36.500000000 12.500000000 -12.500000000 -35.500000000 12.500000000 -12.500000000 --36.500000000 13.500000000 -12.500000000 -35.500000000 13.500000000 -12.500000000 --36.500000000 14.500000000 -12.500000000 -35.500000000 14.500000000 -12.500000000 --36.500000000 15.500000000 -12.500000000 -35.500000000 15.500000000 -12.500000000 --36.500000000 16.500000000 -12.500000000 -35.500000000 16.500000000 -12.500000000 --36.500000000 17.500000000 -12.500000000 -35.500000000 17.500000000 -12.500000000 --36.500000000 18.500000000 -12.500000000 -35.500000000 18.500000000 -12.500000000 --36.500000000 19.500000000 -12.500000000 -35.500000000 19.500000000 -12.500000000 --36.500000000 20.500000000 -12.500000000 -35.500000000 20.500000000 -12.500000000 --36.500000000 21.500000000 -12.500000000 -35.500000000 21.500000000 -12.500000000 --36.500000000 22.500000000 -12.500000000 -35.500000000 22.500000000 -12.500000000 --36.500000000 23.500000000 -12.500000000 -35.500000000 23.500000000 -12.500000000 --36.500000000 24.500000000 -12.500000000 -35.500000000 24.500000000 -12.500000000 --36.499996185 25.499996185 -12.500000000 -35.499996185 25.499996185 -12.500000000 --36.499954224 26.499954224 -12.500000000 -35.499954224 26.499954224 -12.500000000 --36.499591827 27.499591827 -12.500000000 -35.499591827 27.499591827 -12.500000000 --36.497474670 28.497470856 -12.500000000 -35.497467041 28.497470856 -12.500000000 --36.488403320 29.488407135 -12.500000000 -35.488403320 29.488407135 -12.500000000 --36.458980560 30.458978653 -12.500000000 -35.458980560 30.458978653 -12.500000000 --36.384422302 31.384418488 -12.500000000 -35.384422302 31.384418488 -12.500000000 --36.233222961 32.233222961 -12.500000000 -35.233222961 32.233222961 -12.500000000 --35.981101990 32.981101990 -12.500000000 --35.622871399 33.622871399 -12.500000000 -34.622871399 33.622871399 -12.500000000 -34.981101990 32.981101990 -12.500000000 --35.167964935 34.167964935 -12.500000000 --34.622871399 34.622871399 -12.500000000 -33.622871399 34.622871399 -12.500000000 -34.167964935 34.167964935 -12.500000000 --33.981101990 34.981101990 -12.500000000 --33.233222961 35.233222961 -12.500000000 --32.384422302 35.384418488 -12.500000000 --31.458978653 35.458976746 -12.500000000 --30.488407135 35.488403320 -12.500000000 --29.497472763 35.497467041 -12.500000000 --28.499593735 35.499591827 -12.500000000 --27.499954224 35.499954224 -12.500000000 --26.499996185 35.499996185 -12.500000000 --25.500000000 35.500000000 -12.500000000 --24.500000000 35.500000000 -12.500000000 --23.500000000 35.500000000 -12.500000000 --22.500000000 35.500000000 -12.500000000 --21.500000000 35.500000000 -12.500000000 --20.500000000 35.500000000 -12.500000000 --19.500000000 35.500000000 -12.500000000 --18.500000000 35.500000000 -12.500000000 --17.500000000 35.500000000 -12.500000000 --16.500000000 35.500000000 -12.500000000 --15.500000000 35.500000000 -12.500000000 --14.500000000 35.500000000 -12.500000000 --13.500000000 35.500000000 -12.500000000 --12.500000000 35.500000000 -12.500000000 --11.500000000 35.500000000 -12.500000000 --10.500000000 35.500000000 -12.500000000 --9.500000000 35.500000000 -12.500000000 --8.500000000 35.500000000 -12.500000000 --7.500000000 35.500000000 -12.500000000 --6.500000000 35.500000000 -12.500000000 --5.500000000 35.500000000 -12.500000000 --4.500000000 35.500000000 -12.500000000 --3.500000000 35.500000000 -12.500000000 --2.500000000 35.500000000 -12.500000000 --1.500000000 35.500000000 -12.500000000 --0.500000000 35.500000000 -12.500000000 -0.500000000 35.500000000 -12.500000000 -1.500000000 35.500000000 -12.500000000 -2.500000000 35.500000000 -12.500000000 -3.500000000 35.500000000 -12.500000000 -4.500000000 35.500000000 -12.500000000 -5.500000000 35.500000000 -12.500000000 -6.500000000 35.500000000 -12.500000000 -7.500000000 35.500000000 -12.500000000 -8.500000000 35.500000000 -12.500000000 -9.500000000 35.500000000 -12.500000000 -10.500000000 35.500000000 -12.500000000 -11.500000000 35.500000000 -12.500000000 -12.500000000 35.500000000 -12.500000000 -13.500000000 35.500000000 -12.500000000 -14.500000000 35.500000000 -12.500000000 -15.500000000 35.500000000 -12.500000000 -16.500000000 35.500000000 -12.500000000 -17.500000000 35.500000000 -12.500000000 -18.500000000 35.500000000 -12.500000000 -19.500000000 35.500000000 -12.500000000 -20.500000000 35.500000000 -12.500000000 -21.500000000 35.500000000 -12.500000000 -22.500000000 35.500000000 -12.500000000 -23.500000000 35.500000000 -12.500000000 -24.500000000 35.500000000 -12.500000000 -25.499996185 35.499996185 -12.500000000 -26.499954224 35.499954224 -12.500000000 -27.499591827 35.499591827 -12.500000000 -28.497470856 35.497474670 -12.500000000 -29.488407135 35.488403320 -12.500000000 -30.458978653 35.458980560 -12.500000000 -31.384418488 35.384422302 -12.500000000 -32.233222961 35.233222961 -12.500000000 -32.981101990 34.981101990 -12.500000000 --33.981101990 -35.981101990 -11.500000000 --33.233226776 -36.233222961 -11.500000000 --32.384422302 -36.384418488 -11.500000000 --31.458978653 -36.458980560 -11.500000000 --30.488407135 -36.488403320 -11.500000000 --29.497472763 -36.497474670 -11.500000000 --28.499593735 -36.499591827 -11.500000000 --27.499954224 -36.499954224 -11.500000000 --26.499996185 -36.499996185 -11.500000000 --25.500000000 -36.500000000 -11.500000000 --24.500000000 -36.500000000 -11.500000000 --23.500000000 -36.500000000 -11.500000000 --22.500000000 -36.500000000 -11.500000000 --21.500000000 -36.500000000 -11.500000000 --20.500000000 -36.500000000 -11.500000000 --19.500000000 -36.500000000 -11.500000000 --18.500000000 -36.500000000 -11.500000000 --17.500000000 -36.500000000 -11.500000000 --16.500000000 -36.500000000 -11.500000000 --15.500000000 -36.500000000 -11.500000000 --14.500000000 -36.500000000 -11.500000000 --13.500000000 -36.500000000 -11.500000000 --12.500000000 -36.500000000 -11.500000000 --11.500000000 -36.500000000 -11.500000000 --10.500000000 -36.500000000 -11.500000000 --9.500000000 -36.500000000 -11.500000000 --8.500000000 -36.500000000 -11.500000000 --7.500000000 -36.500000000 -11.500000000 --6.500000000 -36.500000000 -11.500000000 --5.500000000 -36.500000000 -11.500000000 --4.500000000 -36.500000000 -11.500000000 --3.500000000 -36.500000000 -11.500000000 --2.500000000 -36.500000000 -11.500000000 --1.500000000 -36.500000000 -11.500000000 --0.500000000 -36.500000000 -11.500000000 -0.500000000 -36.500000000 -11.500000000 -1.500000000 -36.500000000 -11.500000000 -2.500000000 -36.500000000 -11.500000000 -3.500000000 -36.500000000 -11.500000000 -4.500000000 -36.500000000 -11.500000000 -5.500000000 -36.500000000 -11.500000000 -6.500000000 -36.500000000 -11.500000000 -7.500000000 -36.500000000 -11.500000000 -8.500000000 -36.500000000 -11.500000000 -9.500000000 -36.500000000 -11.500000000 -10.500000000 -36.500000000 -11.500000000 -11.500000000 -36.500000000 -11.500000000 -12.500000000 -36.500000000 -11.500000000 -13.500000000 -36.500000000 -11.500000000 -14.500000000 -36.500000000 -11.500000000 -15.500000000 -36.500000000 -11.500000000 -16.500000000 -36.500000000 -11.500000000 -17.500000000 -36.500000000 -11.500000000 -18.500000000 -36.500000000 -11.500000000 -19.500000000 -36.500000000 -11.500000000 -20.500000000 -36.500000000 -11.500000000 -21.500000000 -36.500000000 -11.500000000 -22.500000000 -36.500000000 -11.500000000 -23.500000000 -36.500000000 -11.500000000 -24.500000000 -36.500000000 -11.500000000 -25.499996185 -36.499996185 -11.500000000 -26.499954224 -36.499954224 -11.500000000 -27.499591827 -36.499591827 -11.500000000 -28.497470856 -36.497467041 -11.500000000 -29.488407135 -36.488403320 -11.500000000 -30.458978653 -36.458980560 -11.500000000 -31.384418488 -36.384422302 -11.500000000 -32.233222961 -36.233222961 -11.500000000 -32.981101990 -35.981101990 -11.500000000 --35.167964935 -35.167964935 -11.500000000 --34.622871399 -35.622871399 -11.500000000 -33.622871399 -35.622871399 -11.500000000 -34.167964935 -35.167964935 -11.500000000 --35.981101990 -33.981101990 -11.500000000 --35.622871399 -34.622871399 -11.500000000 -34.622871399 -34.622871399 -11.500000000 -34.981101990 -33.981101990 -11.500000000 --36.233222961 -33.233222961 -11.500000000 -35.233222961 -33.233226776 -11.500000000 --36.384418488 -32.384422302 -11.500000000 -35.384418488 -32.384422302 -11.500000000 --36.458976746 -31.458978653 -11.500000000 -35.458980560 -31.458978653 -11.500000000 --36.488403320 -30.488407135 -11.500000000 -35.488403320 -30.488407135 -11.500000000 --36.497467041 -29.497472763 -11.500000000 -35.497474670 -29.497472763 -11.500000000 --36.499591827 -28.499593735 -11.500000000 -35.499591827 -28.499593735 -11.500000000 --36.499954224 -27.499954224 -11.500000000 -35.499954224 -27.499954224 -11.500000000 --36.499996185 -26.499996185 -11.500000000 -35.499996185 -26.499996185 -11.500000000 --36.500000000 -25.500000000 -11.500000000 -35.500000000 -25.500000000 -11.500000000 --36.500000000 -24.500000000 -11.500000000 -35.500000000 -24.500000000 -11.500000000 --36.500000000 -23.500000000 -11.500000000 -35.500000000 -23.500000000 -11.500000000 --36.500000000 -22.500000000 -11.500000000 -35.500000000 -22.500000000 -11.500000000 --36.500000000 -21.500000000 -11.500000000 -35.500000000 -21.500000000 -11.500000000 --36.500000000 -20.500000000 -11.500000000 -35.500000000 -20.500000000 -11.500000000 --36.500000000 -19.500000000 -11.500000000 -35.500000000 -19.500000000 -11.500000000 --36.500000000 -18.500000000 -11.500000000 -35.500000000 -18.500000000 -11.500000000 --36.500000000 -17.500000000 -11.500000000 -35.500000000 -17.500000000 -11.500000000 --36.500000000 -16.500000000 -11.500000000 -35.500000000 -16.500000000 -11.500000000 --36.500000000 -15.500000000 -11.500000000 -35.500000000 -15.500000000 -11.500000000 --36.500000000 -14.500000000 -11.500000000 -35.500000000 -14.500000000 -11.500000000 --36.500000000 -13.500000000 -11.500000000 -35.500000000 -13.500000000 -11.500000000 --36.500000000 -12.500000000 -11.500000000 -35.500000000 -12.500000000 -11.500000000 --36.500000000 -11.500000000 -11.500000000 -35.500000000 -11.500000000 -11.500000000 --36.500000000 -10.500000000 -11.500000000 -35.500000000 -10.500000000 -11.500000000 --36.500000000 -9.500000000 -11.500000000 -35.500000000 -9.500000000 -11.500000000 --36.500000000 -8.500000000 -11.500000000 -35.500000000 -8.500000000 -11.500000000 --36.500000000 -7.500000000 -11.500000000 -35.500000000 -7.500000000 -11.500000000 --36.500000000 -6.500000000 -11.500000000 -35.500000000 -6.500000000 -11.500000000 --36.500000000 -5.500000000 -11.500000000 -35.500000000 -5.500000000 -11.500000000 --36.500000000 -4.500000000 -11.500000000 -35.500000000 -4.500000000 -11.500000000 --36.500000000 -3.500000000 -11.500000000 -35.500000000 -3.500000000 -11.500000000 --36.500000000 -2.500000000 -11.500000000 -35.500000000 -2.500000000 -11.500000000 --36.500000000 -1.500000000 -11.500000000 -35.500000000 -1.500000000 -11.500000000 --36.500000000 -0.500000000 -11.500000000 -35.500000000 -0.500000000 -11.500000000 --36.500000000 0.500000000 -11.500000000 -35.500000000 0.500000000 -11.500000000 --36.500000000 1.500000000 -11.500000000 -35.500000000 1.500000000 -11.500000000 --36.500000000 2.500000000 -11.500000000 -35.500000000 2.500000000 -11.500000000 --36.500000000 3.500000000 -11.500000000 -35.500000000 3.500000000 -11.500000000 --36.500000000 4.500000000 -11.500000000 -35.500000000 4.500000000 -11.500000000 --36.500000000 5.500000000 -11.500000000 -35.500000000 5.500000000 -11.500000000 --36.500000000 6.500000000 -11.500000000 -35.500000000 6.500000000 -11.500000000 --36.500000000 7.500000000 -11.500000000 -35.500000000 7.500000000 -11.500000000 --36.500000000 8.500000000 -11.500000000 -35.500000000 8.500000000 -11.500000000 --36.500000000 9.500000000 -11.500000000 -35.500000000 9.500000000 -11.500000000 --36.500000000 10.500000000 -11.500000000 -35.500000000 10.500000000 -11.500000000 --36.500000000 11.500000000 -11.500000000 -35.500000000 11.500000000 -11.500000000 --36.500000000 12.500000000 -11.500000000 -35.500000000 12.500000000 -11.500000000 --36.500000000 13.500000000 -11.500000000 -35.500000000 13.500000000 -11.500000000 --36.500000000 14.500000000 -11.500000000 -35.500000000 14.500000000 -11.500000000 --36.500000000 15.500000000 -11.500000000 -35.500000000 15.500000000 -11.500000000 --36.500000000 16.500000000 -11.500000000 -35.500000000 16.500000000 -11.500000000 --36.500000000 17.500000000 -11.500000000 -35.500000000 17.500000000 -11.500000000 --36.500000000 18.500000000 -11.500000000 -35.500000000 18.500000000 -11.500000000 --36.500000000 19.500000000 -11.500000000 -35.500000000 19.500000000 -11.500000000 --36.500000000 20.500000000 -11.500000000 -35.500000000 20.500000000 -11.500000000 --36.500000000 21.500000000 -11.500000000 -35.500000000 21.500000000 -11.500000000 --36.500000000 22.500000000 -11.500000000 -35.500000000 22.500000000 -11.500000000 --36.500000000 23.500000000 -11.500000000 -35.500000000 23.500000000 -11.500000000 --36.500000000 24.500000000 -11.500000000 -35.500000000 24.500000000 -11.500000000 --36.499996185 25.499996185 -11.500000000 -35.499996185 25.499996185 -11.500000000 --36.499954224 26.499954224 -11.500000000 -35.499954224 26.499954224 -11.500000000 --36.499591827 27.499591827 -11.500000000 -35.499591827 27.499591827 -11.500000000 --36.497474670 28.497470856 -11.500000000 -35.497467041 28.497470856 -11.500000000 --36.488403320 29.488407135 -11.500000000 -35.488403320 29.488407135 -11.500000000 --36.458980560 30.458978653 -11.500000000 -35.458980560 30.458978653 -11.500000000 --36.384422302 31.384418488 -11.500000000 -35.384422302 31.384418488 -11.500000000 --36.233222961 32.233222961 -11.500000000 -35.233222961 32.233222961 -11.500000000 --35.981101990 32.981101990 -11.500000000 --35.622871399 33.622871399 -11.500000000 -34.622871399 33.622871399 -11.500000000 -34.981101990 32.981101990 -11.500000000 --35.167964935 34.167964935 -11.500000000 --34.622871399 34.622871399 -11.500000000 -33.622871399 34.622871399 -11.500000000 -34.167964935 34.167964935 -11.500000000 --33.981101990 34.981101990 -11.500000000 --33.233222961 35.233222961 -11.500000000 --32.384422302 35.384418488 -11.500000000 --31.458978653 35.458976746 -11.500000000 --30.488407135 35.488403320 -11.500000000 --29.497472763 35.497467041 -11.500000000 --28.499593735 35.499591827 -11.500000000 --27.499954224 35.499954224 -11.500000000 --26.499996185 35.499996185 -11.500000000 --25.500000000 35.500000000 -11.500000000 --24.500000000 35.500000000 -11.500000000 --23.500000000 35.500000000 -11.500000000 --22.500000000 35.500000000 -11.500000000 --21.500000000 35.500000000 -11.500000000 --20.500000000 35.500000000 -11.500000000 --19.500000000 35.500000000 -11.500000000 --18.500000000 35.500000000 -11.500000000 --17.500000000 35.500000000 -11.500000000 --16.500000000 35.500000000 -11.500000000 --15.500000000 35.500000000 -11.500000000 --14.500000000 35.500000000 -11.500000000 --13.500000000 35.500000000 -11.500000000 --12.500000000 35.500000000 -11.500000000 --11.500000000 35.500000000 -11.500000000 --10.500000000 35.500000000 -11.500000000 --9.500000000 35.500000000 -11.500000000 --8.500000000 35.500000000 -11.500000000 --7.500000000 35.500000000 -11.500000000 --6.500000000 35.500000000 -11.500000000 --5.500000000 35.500000000 -11.500000000 --4.500000000 35.500000000 -11.500000000 --3.500000000 35.500000000 -11.500000000 --2.500000000 35.500000000 -11.500000000 --1.500000000 35.500000000 -11.500000000 --0.500000000 35.500000000 -11.500000000 -0.500000000 35.500000000 -11.500000000 -1.500000000 35.500000000 -11.500000000 -2.500000000 35.500000000 -11.500000000 -3.500000000 35.500000000 -11.500000000 -4.500000000 35.500000000 -11.500000000 -5.500000000 35.500000000 -11.500000000 -6.500000000 35.500000000 -11.500000000 -7.500000000 35.500000000 -11.500000000 -8.500000000 35.500000000 -11.500000000 -9.500000000 35.500000000 -11.500000000 -10.500000000 35.500000000 -11.500000000 -11.500000000 35.500000000 -11.500000000 -12.500000000 35.500000000 -11.500000000 -13.500000000 35.500000000 -11.500000000 -14.500000000 35.500000000 -11.500000000 -15.500000000 35.500000000 -11.500000000 -16.500000000 35.500000000 -11.500000000 -17.500000000 35.500000000 -11.500000000 -18.500000000 35.500000000 -11.500000000 -19.500000000 35.500000000 -11.500000000 -20.500000000 35.500000000 -11.500000000 -21.500000000 35.500000000 -11.500000000 -22.500000000 35.500000000 -11.500000000 -23.500000000 35.500000000 -11.500000000 -24.500000000 35.500000000 -11.500000000 -25.499996185 35.499996185 -11.500000000 -26.499954224 35.499954224 -11.500000000 -27.499591827 35.499591827 -11.500000000 -28.497470856 35.497474670 -11.500000000 -29.488407135 35.488403320 -11.500000000 -30.458978653 35.458980560 -11.500000000 -31.384418488 35.384422302 -11.500000000 -32.233222961 35.233222961 -11.500000000 -32.981101990 34.981101990 -11.500000000 --33.981101990 -35.981101990 -10.500000000 --33.233226776 -36.233222961 -10.500000000 --32.384422302 -36.384418488 -10.500000000 --31.458978653 -36.458980560 -10.500000000 --30.488407135 -36.488403320 -10.500000000 --29.497472763 -36.497474670 -10.500000000 --28.499593735 -36.499591827 -10.500000000 --27.499954224 -36.499954224 -10.500000000 --26.499996185 -36.499996185 -10.500000000 --25.500000000 -36.500000000 -10.500000000 --24.500000000 -36.500000000 -10.500000000 --23.500000000 -36.500000000 -10.500000000 --22.500000000 -36.500000000 -10.500000000 --21.500000000 -36.500000000 -10.500000000 --20.500000000 -36.500000000 -10.500000000 --19.500000000 -36.500000000 -10.500000000 --18.500000000 -36.500000000 -10.500000000 --17.500000000 -36.500000000 -10.500000000 --16.500000000 -36.500000000 -10.500000000 --15.500000000 -36.500000000 -10.500000000 --14.500000000 -36.500000000 -10.500000000 --13.500000000 -36.500000000 -10.500000000 --12.500000000 -36.500000000 -10.500000000 --11.500000000 -36.500000000 -10.500000000 --10.500000000 -36.500000000 -10.500000000 --9.500000000 -36.500000000 -10.500000000 --8.500000000 -36.500000000 -10.500000000 --7.500000000 -36.500000000 -10.500000000 --6.500000000 -36.500000000 -10.500000000 --5.500000000 -36.500000000 -10.500000000 --4.500000000 -36.500000000 -10.500000000 --3.500000000 -36.500000000 -10.500000000 --2.500000000 -36.500000000 -10.500000000 --1.500000000 -36.500000000 -10.500000000 --0.500000000 -36.500000000 -10.500000000 -0.500000000 -36.500000000 -10.500000000 -1.500000000 -36.500000000 -10.500000000 -2.500000000 -36.500000000 -10.500000000 -3.500000000 -36.500000000 -10.500000000 -4.500000000 -36.500000000 -10.500000000 -5.500000000 -36.500000000 -10.500000000 -6.500000000 -36.500000000 -10.500000000 -7.500000000 -36.500000000 -10.500000000 -8.500000000 -36.500000000 -10.500000000 -9.500000000 -36.500000000 -10.500000000 -10.500000000 -36.500000000 -10.500000000 -11.500000000 -36.500000000 -10.500000000 -12.500000000 -36.500000000 -10.500000000 -13.500000000 -36.500000000 -10.500000000 -14.500000000 -36.500000000 -10.500000000 -15.500000000 -36.500000000 -10.500000000 -16.500000000 -36.500000000 -10.500000000 -17.500000000 -36.500000000 -10.500000000 -18.500000000 -36.500000000 -10.500000000 -19.500000000 -36.500000000 -10.500000000 -20.500000000 -36.500000000 -10.500000000 -21.500000000 -36.500000000 -10.500000000 -22.500000000 -36.500000000 -10.500000000 -23.500000000 -36.500000000 -10.500000000 -24.500000000 -36.500000000 -10.500000000 -25.499996185 -36.499996185 -10.500000000 -26.499954224 -36.499954224 -10.500000000 -27.499591827 -36.499591827 -10.500000000 -28.497470856 -36.497467041 -10.500000000 -29.488407135 -36.488403320 -10.500000000 -30.458978653 -36.458980560 -10.500000000 -31.384418488 -36.384422302 -10.500000000 -32.233222961 -36.233222961 -10.500000000 -32.981101990 -35.981101990 -10.500000000 --35.167964935 -35.167964935 -10.500000000 --34.622871399 -35.622871399 -10.500000000 -33.622871399 -35.622871399 -10.500000000 -34.167964935 -35.167964935 -10.500000000 --35.981101990 -33.981101990 -10.500000000 --35.622871399 -34.622871399 -10.500000000 -34.622871399 -34.622871399 -10.500000000 -34.981101990 -33.981101990 -10.500000000 --36.233222961 -33.233222961 -10.500000000 -35.233222961 -33.233226776 -10.500000000 --36.384418488 -32.384422302 -10.500000000 -35.384418488 -32.384422302 -10.500000000 --36.458976746 -31.458978653 -10.500000000 -35.458980560 -31.458978653 -10.500000000 --36.488403320 -30.488407135 -10.500000000 -35.488403320 -30.488407135 -10.500000000 --36.497467041 -29.497472763 -10.500000000 -35.497474670 -29.497472763 -10.500000000 --36.499591827 -28.499593735 -10.500000000 -35.499591827 -28.499593735 -10.500000000 --36.499954224 -27.499954224 -10.500000000 -35.499954224 -27.499954224 -10.500000000 --36.499996185 -26.499996185 -10.500000000 -35.499996185 -26.499996185 -10.500000000 --36.500000000 -25.500000000 -10.500000000 -35.500000000 -25.500000000 -10.500000000 --36.500000000 -24.500000000 -10.500000000 -35.500000000 -24.500000000 -10.500000000 --36.500000000 -23.500000000 -10.500000000 -35.500000000 -23.500000000 -10.500000000 --36.500000000 -22.500000000 -10.500000000 -35.500000000 -22.500000000 -10.500000000 --36.500000000 -21.500000000 -10.500000000 -35.500000000 -21.500000000 -10.500000000 --36.500000000 -20.500000000 -10.500000000 -35.500000000 -20.500000000 -10.500000000 --36.500000000 -19.500000000 -10.500000000 -35.500000000 -19.500000000 -10.500000000 --36.500000000 -18.500000000 -10.500000000 -35.500000000 -18.500000000 -10.500000000 --36.500000000 -17.500000000 -10.500000000 -35.500000000 -17.500000000 -10.500000000 --36.500000000 -16.500000000 -10.500000000 -35.500000000 -16.500000000 -10.500000000 --36.500000000 -15.500000000 -10.500000000 -35.500000000 -15.500000000 -10.500000000 --36.500000000 -14.500000000 -10.500000000 -35.500000000 -14.500000000 -10.500000000 --36.500000000 -13.500000000 -10.500000000 -35.500000000 -13.500000000 -10.500000000 --36.500000000 -12.500000000 -10.500000000 -35.500000000 -12.500000000 -10.500000000 --36.500000000 -11.500000000 -10.500000000 -35.500000000 -11.500000000 -10.500000000 --36.500000000 -10.500000000 -10.500000000 -35.500000000 -10.500000000 -10.500000000 --36.500000000 -9.500000000 -10.500000000 -35.500000000 -9.500000000 -10.500000000 --36.500000000 -8.500000000 -10.500000000 -35.500000000 -8.500000000 -10.500000000 --36.500000000 -7.500000000 -10.500000000 -35.500000000 -7.500000000 -10.500000000 --36.500000000 -6.500000000 -10.500000000 -35.500000000 -6.500000000 -10.500000000 --36.500000000 -5.500000000 -10.500000000 -35.500000000 -5.500000000 -10.500000000 --36.500000000 -4.500000000 -10.500000000 -35.500000000 -4.500000000 -10.500000000 --36.500000000 -3.500000000 -10.500000000 -35.500000000 -3.500000000 -10.500000000 --36.500000000 -2.500000000 -10.500000000 -35.500000000 -2.500000000 -10.500000000 --36.500000000 -1.500000000 -10.500000000 -35.500000000 -1.500000000 -10.500000000 --36.500000000 -0.500000000 -10.500000000 -35.500000000 -0.500000000 -10.500000000 --36.500000000 0.500000000 -10.500000000 -35.500000000 0.500000000 -10.500000000 --36.500000000 1.500000000 -10.500000000 -35.500000000 1.500000000 -10.500000000 --36.500000000 2.500000000 -10.500000000 -35.500000000 2.500000000 -10.500000000 --36.500000000 3.500000000 -10.500000000 -35.500000000 3.500000000 -10.500000000 --36.500000000 4.500000000 -10.500000000 -35.500000000 4.500000000 -10.500000000 --36.500000000 5.500000000 -10.500000000 -35.500000000 5.500000000 -10.500000000 --36.500000000 6.500000000 -10.500000000 -35.500000000 6.500000000 -10.500000000 --36.500000000 7.500000000 -10.500000000 -35.500000000 7.500000000 -10.500000000 --36.500000000 8.500000000 -10.500000000 -35.500000000 8.500000000 -10.500000000 --36.500000000 9.500000000 -10.500000000 -35.500000000 9.500000000 -10.500000000 --36.500000000 10.500000000 -10.500000000 -35.500000000 10.500000000 -10.500000000 --36.500000000 11.500000000 -10.500000000 -35.500000000 11.500000000 -10.500000000 --36.500000000 12.500000000 -10.500000000 -35.500000000 12.500000000 -10.500000000 --36.500000000 13.500000000 -10.500000000 -35.500000000 13.500000000 -10.500000000 --36.500000000 14.500000000 -10.500000000 -35.500000000 14.500000000 -10.500000000 --36.500000000 15.500000000 -10.500000000 -35.500000000 15.500000000 -10.500000000 --36.500000000 16.500000000 -10.500000000 -35.500000000 16.500000000 -10.500000000 --36.500000000 17.500000000 -10.500000000 -35.500000000 17.500000000 -10.500000000 --36.500000000 18.500000000 -10.500000000 -35.500000000 18.500000000 -10.500000000 --36.500000000 19.500000000 -10.500000000 -35.500000000 19.500000000 -10.500000000 --36.500000000 20.500000000 -10.500000000 -35.500000000 20.500000000 -10.500000000 --36.500000000 21.500000000 -10.500000000 -35.500000000 21.500000000 -10.500000000 --36.500000000 22.500000000 -10.500000000 -35.500000000 22.500000000 -10.500000000 --36.500000000 23.500000000 -10.500000000 -35.500000000 23.500000000 -10.500000000 --36.500000000 24.500000000 -10.500000000 -35.500000000 24.500000000 -10.500000000 --36.499996185 25.499996185 -10.500000000 -35.499996185 25.499996185 -10.500000000 --36.499954224 26.499954224 -10.500000000 -35.499954224 26.499954224 -10.500000000 --36.499591827 27.499591827 -10.500000000 -35.499591827 27.499591827 -10.500000000 --36.497474670 28.497470856 -10.500000000 -35.497467041 28.497470856 -10.500000000 --36.488403320 29.488407135 -10.500000000 -35.488403320 29.488407135 -10.500000000 --36.458980560 30.458978653 -10.500000000 -35.458980560 30.458978653 -10.500000000 --36.384422302 31.384418488 -10.500000000 -35.384422302 31.384418488 -10.500000000 --36.233222961 32.233222961 -10.500000000 -35.233222961 32.233222961 -10.500000000 --35.981101990 32.981101990 -10.500000000 --35.622871399 33.622871399 -10.500000000 -34.622871399 33.622871399 -10.500000000 -34.981101990 32.981101990 -10.500000000 --35.167964935 34.167964935 -10.500000000 --34.622871399 34.622871399 -10.500000000 -33.622871399 34.622871399 -10.500000000 -34.167964935 34.167964935 -10.500000000 --33.981101990 34.981101990 -10.500000000 --33.233222961 35.233222961 -10.500000000 --32.384422302 35.384418488 -10.500000000 --31.458978653 35.458976746 -10.500000000 --30.488407135 35.488403320 -10.500000000 --29.497472763 35.497467041 -10.500000000 --28.499593735 35.499591827 -10.500000000 --27.499954224 35.499954224 -10.500000000 --26.499996185 35.499996185 -10.500000000 --25.500000000 35.500000000 -10.500000000 --24.500000000 35.500000000 -10.500000000 --23.500000000 35.500000000 -10.500000000 --22.500000000 35.500000000 -10.500000000 --21.500000000 35.500000000 -10.500000000 --20.500000000 35.500000000 -10.500000000 --19.500000000 35.500000000 -10.500000000 --18.500000000 35.500000000 -10.500000000 --17.500000000 35.500000000 -10.500000000 --16.500000000 35.500000000 -10.500000000 --15.500000000 35.500000000 -10.500000000 --14.500000000 35.500000000 -10.500000000 --13.500000000 35.500000000 -10.500000000 --12.500000000 35.500000000 -10.500000000 --11.500000000 35.500000000 -10.500000000 --10.500000000 35.500000000 -10.500000000 --9.500000000 35.500000000 -10.500000000 --8.500000000 35.500000000 -10.500000000 --7.500000000 35.500000000 -10.500000000 --6.500000000 35.500000000 -10.500000000 --5.500000000 35.500000000 -10.500000000 --4.500000000 35.500000000 -10.500000000 --3.500000000 35.500000000 -10.500000000 --2.500000000 35.500000000 -10.500000000 --1.500000000 35.500000000 -10.500000000 --0.500000000 35.500000000 -10.500000000 -0.500000000 35.500000000 -10.500000000 -1.500000000 35.500000000 -10.500000000 -2.500000000 35.500000000 -10.500000000 -3.500000000 35.500000000 -10.500000000 -4.500000000 35.500000000 -10.500000000 -5.500000000 35.500000000 -10.500000000 -6.500000000 35.500000000 -10.500000000 -7.500000000 35.500000000 -10.500000000 -8.500000000 35.500000000 -10.500000000 -9.500000000 35.500000000 -10.500000000 -10.500000000 35.500000000 -10.500000000 -11.500000000 35.500000000 -10.500000000 -12.500000000 35.500000000 -10.500000000 -13.500000000 35.500000000 -10.500000000 -14.500000000 35.500000000 -10.500000000 -15.500000000 35.500000000 -10.500000000 -16.500000000 35.500000000 -10.500000000 -17.500000000 35.500000000 -10.500000000 -18.500000000 35.500000000 -10.500000000 -19.500000000 35.500000000 -10.500000000 -20.500000000 35.500000000 -10.500000000 -21.500000000 35.500000000 -10.500000000 -22.500000000 35.500000000 -10.500000000 -23.500000000 35.500000000 -10.500000000 -24.500000000 35.500000000 -10.500000000 -25.499996185 35.499996185 -10.500000000 -26.499954224 35.499954224 -10.500000000 -27.499591827 35.499591827 -10.500000000 -28.497470856 35.497474670 -10.500000000 -29.488407135 35.488403320 -10.500000000 -30.458978653 35.458980560 -10.500000000 -31.384418488 35.384422302 -10.500000000 -32.233222961 35.233222961 -10.500000000 -32.981101990 34.981101990 -10.500000000 --33.981101990 -35.981101990 -9.500000000 --33.233226776 -36.233222961 -9.500000000 --32.384422302 -36.384418488 -9.500000000 --31.458978653 -36.458980560 -9.500000000 --30.488407135 -36.488403320 -9.500000000 --29.497472763 -36.497474670 -9.500000000 --28.499593735 -36.499591827 -9.500000000 --27.499954224 -36.499954224 -9.500000000 --26.499996185 -36.499996185 -9.500000000 --25.500000000 -36.500000000 -9.500000000 --24.500000000 -36.500000000 -9.500000000 --23.500000000 -36.500000000 -9.500000000 --22.500000000 -36.500000000 -9.500000000 --21.500000000 -36.500000000 -9.500000000 --20.500000000 -36.500000000 -9.500000000 --19.500000000 -36.500000000 -9.500000000 --18.500000000 -36.500000000 -9.500000000 --17.500000000 -36.500000000 -9.500000000 --16.500000000 -36.500000000 -9.500000000 --15.500000000 -36.500000000 -9.500000000 --14.500000000 -36.500000000 -9.500000000 --13.500000000 -36.500000000 -9.500000000 --12.500000000 -36.500000000 -9.500000000 --11.500000000 -36.500000000 -9.500000000 --10.500000000 -36.500000000 -9.500000000 --9.500000000 -36.500000000 -9.500000000 --8.500000000 -36.500000000 -9.500000000 --7.500000000 -36.500000000 -9.500000000 --6.500000000 -36.500000000 -9.500000000 --5.500000000 -36.500000000 -9.500000000 --4.500000000 -36.500000000 -9.500000000 --3.500000000 -36.500000000 -9.500000000 --2.500000000 -36.500000000 -9.500000000 --1.500000000 -36.500000000 -9.500000000 --0.500000000 -36.500000000 -9.500000000 -0.500000000 -36.500000000 -9.500000000 -1.500000000 -36.500000000 -9.500000000 -2.500000000 -36.500000000 -9.500000000 -3.500000000 -36.500000000 -9.500000000 -4.500000000 -36.500000000 -9.500000000 -5.500000000 -36.500000000 -9.500000000 -6.500000000 -36.500000000 -9.500000000 -7.500000000 -36.500000000 -9.500000000 -8.500000000 -36.500000000 -9.500000000 -9.500000000 -36.500000000 -9.500000000 -10.500000000 -36.500000000 -9.500000000 -11.500000000 -36.500000000 -9.500000000 -12.500000000 -36.500000000 -9.500000000 -13.500000000 -36.500000000 -9.500000000 -14.500000000 -36.500000000 -9.500000000 -15.500000000 -36.500000000 -9.500000000 -16.500000000 -36.500000000 -9.500000000 -17.500000000 -36.500000000 -9.500000000 -18.500000000 -36.500000000 -9.500000000 -19.500000000 -36.500000000 -9.500000000 -20.500000000 -36.500000000 -9.500000000 -21.500000000 -36.500000000 -9.500000000 -22.500000000 -36.500000000 -9.500000000 -23.500000000 -36.500000000 -9.500000000 -24.500000000 -36.500000000 -9.500000000 -25.499996185 -36.499996185 -9.500000000 -26.499954224 -36.499954224 -9.500000000 -27.499591827 -36.499591827 -9.500000000 -28.497470856 -36.497467041 -9.500000000 -29.488407135 -36.488403320 -9.500000000 -30.458978653 -36.458980560 -9.500000000 -31.384418488 -36.384422302 -9.500000000 -32.233222961 -36.233222961 -9.500000000 -32.981101990 -35.981101990 -9.500000000 --35.167964935 -35.167964935 -9.500000000 --34.622871399 -35.622871399 -9.500000000 -33.622871399 -35.622871399 -9.500000000 -34.167964935 -35.167964935 -9.500000000 --35.981101990 -33.981101990 -9.500000000 --35.622871399 -34.622871399 -9.500000000 -34.622871399 -34.622871399 -9.500000000 -34.981101990 -33.981101990 -9.500000000 --36.233222961 -33.233222961 -9.500000000 -35.233222961 -33.233226776 -9.500000000 --36.384418488 -32.384422302 -9.500000000 -35.384418488 -32.384422302 -9.500000000 --36.458976746 -31.458978653 -9.500000000 -35.458980560 -31.458978653 -9.500000000 --36.488403320 -30.488407135 -9.500000000 -35.488403320 -30.488407135 -9.500000000 --36.497467041 -29.497472763 -9.500000000 -35.497474670 -29.497472763 -9.500000000 --36.499591827 -28.499593735 -9.500000000 -35.499591827 -28.499593735 -9.500000000 --36.499954224 -27.499954224 -9.500000000 -35.499954224 -27.499954224 -9.500000000 --36.499996185 -26.499996185 -9.500000000 -35.499996185 -26.499996185 -9.500000000 --36.500000000 -25.500000000 -9.500000000 -35.500000000 -25.500000000 -9.500000000 --36.500000000 -24.500000000 -9.500000000 -35.500000000 -24.500000000 -9.500000000 --36.500000000 -23.500000000 -9.500000000 -35.500000000 -23.500000000 -9.500000000 --36.500000000 -22.500000000 -9.500000000 -35.500000000 -22.500000000 -9.500000000 --36.500000000 -21.500000000 -9.500000000 -35.500000000 -21.500000000 -9.500000000 --36.500000000 -20.500000000 -9.500000000 -35.500000000 -20.500000000 -9.500000000 --36.500000000 -19.500000000 -9.500000000 -35.500000000 -19.500000000 -9.500000000 --36.500000000 -18.500000000 -9.500000000 -35.500000000 -18.500000000 -9.500000000 --36.500000000 -17.500000000 -9.500000000 -35.500000000 -17.500000000 -9.500000000 --36.500000000 -16.500000000 -9.500000000 -35.500000000 -16.500000000 -9.500000000 --36.500000000 -15.500000000 -9.500000000 -35.500000000 -15.500000000 -9.500000000 --36.500000000 -14.500000000 -9.500000000 -35.500000000 -14.500000000 -9.500000000 --36.500000000 -13.500000000 -9.500000000 -35.500000000 -13.500000000 -9.500000000 --36.500000000 -12.500000000 -9.500000000 -35.500000000 -12.500000000 -9.500000000 --36.500000000 -11.500000000 -9.500000000 -35.500000000 -11.500000000 -9.500000000 --36.500000000 -10.500000000 -9.500000000 -35.500000000 -10.500000000 -9.500000000 --36.500000000 -9.500000000 -9.500000000 -35.500000000 -9.500000000 -9.500000000 --36.500000000 -8.500000000 -9.500000000 -35.500000000 -8.500000000 -9.500000000 --36.500000000 -7.500000000 -9.500000000 -35.500000000 -7.500000000 -9.500000000 --36.500000000 -6.500000000 -9.500000000 -35.500000000 -6.500000000 -9.500000000 --36.500000000 -5.500000000 -9.500000000 -35.500000000 -5.500000000 -9.500000000 --36.500000000 -4.500000000 -9.500000000 -35.500000000 -4.500000000 -9.500000000 --36.500000000 -3.500000000 -9.500000000 -35.500000000 -3.500000000 -9.500000000 --36.500000000 -2.500000000 -9.500000000 -35.500000000 -2.500000000 -9.500000000 --36.500000000 -1.500000000 -9.500000000 -35.500000000 -1.500000000 -9.500000000 --36.500000000 -0.500000000 -9.500000000 -35.500000000 -0.500000000 -9.500000000 --36.500000000 0.500000000 -9.500000000 -35.500000000 0.500000000 -9.500000000 --36.500000000 1.500000000 -9.500000000 -35.500000000 1.500000000 -9.500000000 --36.500000000 2.500000000 -9.500000000 -35.500000000 2.500000000 -9.500000000 --36.500000000 3.500000000 -9.500000000 -35.500000000 3.500000000 -9.500000000 --36.500000000 4.500000000 -9.500000000 -35.500000000 4.500000000 -9.500000000 --36.500000000 5.500000000 -9.500000000 -35.500000000 5.500000000 -9.500000000 --36.500000000 6.500000000 -9.500000000 -35.500000000 6.500000000 -9.500000000 --36.500000000 7.500000000 -9.500000000 -35.500000000 7.500000000 -9.500000000 --36.500000000 8.500000000 -9.500000000 -35.500000000 8.500000000 -9.500000000 --36.500000000 9.500000000 -9.500000000 -35.500000000 9.500000000 -9.500000000 --36.500000000 10.500000000 -9.500000000 -35.500000000 10.500000000 -9.500000000 --36.500000000 11.500000000 -9.500000000 -35.500000000 11.500000000 -9.500000000 --36.500000000 12.500000000 -9.500000000 -35.500000000 12.500000000 -9.500000000 --36.500000000 13.500000000 -9.500000000 -35.500000000 13.500000000 -9.500000000 --36.500000000 14.500000000 -9.500000000 -35.500000000 14.500000000 -9.500000000 --36.500000000 15.500000000 -9.500000000 -35.500000000 15.500000000 -9.500000000 --36.500000000 16.500000000 -9.500000000 -35.500000000 16.500000000 -9.500000000 --36.500000000 17.500000000 -9.500000000 -35.500000000 17.500000000 -9.500000000 --36.500000000 18.500000000 -9.500000000 -35.500000000 18.500000000 -9.500000000 --36.500000000 19.500000000 -9.500000000 -35.500000000 19.500000000 -9.500000000 --36.500000000 20.500000000 -9.500000000 -35.500000000 20.500000000 -9.500000000 --36.500000000 21.500000000 -9.500000000 -35.500000000 21.500000000 -9.500000000 --36.500000000 22.500000000 -9.500000000 -35.500000000 22.500000000 -9.500000000 --36.500000000 23.500000000 -9.500000000 -35.500000000 23.500000000 -9.500000000 --36.500000000 24.500000000 -9.500000000 -35.500000000 24.500000000 -9.500000000 --36.499996185 25.499996185 -9.500000000 -35.499996185 25.499996185 -9.500000000 --36.499954224 26.499954224 -9.500000000 -35.499954224 26.499954224 -9.500000000 --36.499591827 27.499591827 -9.500000000 -35.499591827 27.499591827 -9.500000000 --36.497474670 28.497470856 -9.500000000 -35.497467041 28.497470856 -9.500000000 --36.488403320 29.488407135 -9.500000000 -35.488403320 29.488407135 -9.500000000 --36.458980560 30.458978653 -9.500000000 -35.458980560 30.458978653 -9.500000000 --36.384422302 31.384418488 -9.500000000 -35.384422302 31.384418488 -9.500000000 --36.233222961 32.233222961 -9.500000000 -35.233222961 32.233222961 -9.500000000 --35.981101990 32.981101990 -9.500000000 --35.622871399 33.622871399 -9.500000000 -34.622871399 33.622871399 -9.500000000 -34.981101990 32.981101990 -9.500000000 --35.167964935 34.167964935 -9.500000000 --34.622871399 34.622871399 -9.500000000 -33.622871399 34.622871399 -9.500000000 -34.167964935 34.167964935 -9.500000000 --33.981101990 34.981101990 -9.500000000 --33.233222961 35.233222961 -9.500000000 --32.384422302 35.384418488 -9.500000000 --31.458978653 35.458976746 -9.500000000 --30.488407135 35.488403320 -9.500000000 --29.497472763 35.497467041 -9.500000000 --28.499593735 35.499591827 -9.500000000 --27.499954224 35.499954224 -9.500000000 --26.499996185 35.499996185 -9.500000000 --25.500000000 35.500000000 -9.500000000 --24.500000000 35.500000000 -9.500000000 --23.500000000 35.500000000 -9.500000000 --22.500000000 35.500000000 -9.500000000 --21.500000000 35.500000000 -9.500000000 --20.500000000 35.500000000 -9.500000000 --19.500000000 35.500000000 -9.500000000 --18.500000000 35.500000000 -9.500000000 --17.500000000 35.500000000 -9.500000000 --16.500000000 35.500000000 -9.500000000 --15.500000000 35.500000000 -9.500000000 --14.500000000 35.500000000 -9.500000000 --13.500000000 35.500000000 -9.500000000 --12.500000000 35.500000000 -9.500000000 --11.500000000 35.500000000 -9.500000000 --10.500000000 35.500000000 -9.500000000 --9.500000000 35.500000000 -9.500000000 --8.500000000 35.500000000 -9.500000000 --7.500000000 35.500000000 -9.500000000 --6.500000000 35.500000000 -9.500000000 --5.500000000 35.500000000 -9.500000000 --4.500000000 35.500000000 -9.500000000 --3.500000000 35.500000000 -9.500000000 --2.500000000 35.500000000 -9.500000000 --1.500000000 35.500000000 -9.500000000 --0.500000000 35.500000000 -9.500000000 -0.500000000 35.500000000 -9.500000000 -1.500000000 35.500000000 -9.500000000 -2.500000000 35.500000000 -9.500000000 -3.500000000 35.500000000 -9.500000000 -4.500000000 35.500000000 -9.500000000 -5.500000000 35.500000000 -9.500000000 -6.500000000 35.500000000 -9.500000000 -7.500000000 35.500000000 -9.500000000 -8.500000000 35.500000000 -9.500000000 -9.500000000 35.500000000 -9.500000000 -10.500000000 35.500000000 -9.500000000 -11.500000000 35.500000000 -9.500000000 -12.500000000 35.500000000 -9.500000000 -13.500000000 35.500000000 -9.500000000 -14.500000000 35.500000000 -9.500000000 -15.500000000 35.500000000 -9.500000000 -16.500000000 35.500000000 -9.500000000 -17.500000000 35.500000000 -9.500000000 -18.500000000 35.500000000 -9.500000000 -19.500000000 35.500000000 -9.500000000 -20.500000000 35.500000000 -9.500000000 -21.500000000 35.500000000 -9.500000000 -22.500000000 35.500000000 -9.500000000 -23.500000000 35.500000000 -9.500000000 -24.500000000 35.500000000 -9.500000000 -25.499996185 35.499996185 -9.500000000 -26.499954224 35.499954224 -9.500000000 -27.499591827 35.499591827 -9.500000000 -28.497470856 35.497474670 -9.500000000 -29.488407135 35.488403320 -9.500000000 -30.458978653 35.458980560 -9.500000000 -31.384418488 35.384422302 -9.500000000 -32.233222961 35.233222961 -9.500000000 -32.981101990 34.981101990 -9.500000000 --33.981101990 -35.981101990 -8.500000000 --33.233226776 -36.233222961 -8.500000000 --32.384422302 -36.384418488 -8.500000000 --31.458978653 -36.458980560 -8.500000000 --30.488407135 -36.488403320 -8.500000000 --29.497472763 -36.497474670 -8.500000000 --28.499593735 -36.499591827 -8.500000000 --27.499954224 -36.499954224 -8.500000000 --26.499996185 -36.499996185 -8.500000000 --25.500000000 -36.500000000 -8.500000000 --24.500000000 -36.500000000 -8.500000000 --23.500000000 -36.500000000 -8.500000000 --22.500000000 -36.500000000 -8.500000000 --21.500000000 -36.500000000 -8.500000000 --20.500000000 -36.500000000 -8.500000000 --19.500000000 -36.500000000 -8.500000000 --18.500000000 -36.500000000 -8.500000000 --17.500000000 -36.500000000 -8.500000000 --16.500000000 -36.500000000 -8.500000000 --15.500000000 -36.500000000 -8.500000000 --14.500000000 -36.500000000 -8.500000000 --13.500000000 -36.500000000 -8.500000000 --12.500000000 -36.500000000 -8.500000000 --11.500000000 -36.500000000 -8.500000000 --10.500000000 -36.500000000 -8.500000000 --9.500000000 -36.500000000 -8.500000000 --8.500000000 -36.500000000 -8.500000000 --7.500000000 -36.500000000 -8.500000000 --6.500000000 -36.500000000 -8.500000000 --5.500000000 -36.500000000 -8.500000000 --4.500000000 -36.500000000 -8.500000000 --3.500000000 -36.500000000 -8.500000000 --2.500000000 -36.500000000 -8.500000000 --1.500000000 -36.500000000 -8.500000000 --0.500000000 -36.500000000 -8.500000000 -0.500000000 -36.500000000 -8.500000000 -1.500000000 -36.500000000 -8.500000000 -2.500000000 -36.500000000 -8.500000000 -3.500000000 -36.500000000 -8.500000000 -4.500000000 -36.500000000 -8.500000000 -5.500000000 -36.500000000 -8.500000000 -6.500000000 -36.500000000 -8.500000000 -7.500000000 -36.500000000 -8.500000000 -8.500000000 -36.500000000 -8.500000000 -9.500000000 -36.500000000 -8.500000000 -10.500000000 -36.500000000 -8.500000000 -11.500000000 -36.500000000 -8.500000000 -12.500000000 -36.500000000 -8.500000000 -13.500000000 -36.500000000 -8.500000000 -14.500000000 -36.500000000 -8.500000000 -15.500000000 -36.500000000 -8.500000000 -16.500000000 -36.500000000 -8.500000000 -17.500000000 -36.500000000 -8.500000000 -18.500000000 -36.500000000 -8.500000000 -19.500000000 -36.500000000 -8.500000000 -20.500000000 -36.500000000 -8.500000000 -21.500000000 -36.500000000 -8.500000000 -22.500000000 -36.500000000 -8.500000000 -23.500000000 -36.500000000 -8.500000000 -24.500000000 -36.500000000 -8.500000000 -25.499996185 -36.499996185 -8.500000000 -26.499954224 -36.499954224 -8.500000000 -27.499591827 -36.499591827 -8.500000000 -28.497470856 -36.497467041 -8.500000000 -29.488407135 -36.488403320 -8.500000000 -30.458978653 -36.458980560 -8.500000000 -31.384418488 -36.384422302 -8.500000000 -32.233222961 -36.233222961 -8.500000000 -32.981101990 -35.981101990 -8.500000000 --35.167964935 -35.167964935 -8.500000000 --34.622871399 -35.622871399 -8.500000000 -33.622871399 -35.622871399 -8.500000000 -34.167964935 -35.167964935 -8.500000000 --35.981101990 -33.981101990 -8.500000000 --35.622871399 -34.622871399 -8.500000000 -34.622871399 -34.622871399 -8.500000000 -34.981101990 -33.981101990 -8.500000000 --36.233222961 -33.233222961 -8.500000000 -35.233222961 -33.233226776 -8.500000000 --36.384418488 -32.384422302 -8.500000000 -35.384418488 -32.384422302 -8.500000000 --36.458976746 -31.458978653 -8.500000000 -35.458980560 -31.458978653 -8.500000000 --36.488403320 -30.488407135 -8.500000000 -35.488403320 -30.488407135 -8.500000000 --36.497467041 -29.497472763 -8.500000000 -35.497474670 -29.497472763 -8.500000000 --36.499591827 -28.499593735 -8.500000000 -35.499591827 -28.499593735 -8.500000000 --36.499954224 -27.499954224 -8.500000000 -35.499954224 -27.499954224 -8.500000000 --36.499996185 -26.499996185 -8.500000000 -35.499996185 -26.499996185 -8.500000000 --36.500000000 -25.500000000 -8.500000000 -35.500000000 -25.500000000 -8.500000000 --36.500000000 -24.500000000 -8.500000000 -35.500000000 -24.500000000 -8.500000000 --36.500000000 -23.500000000 -8.500000000 -35.500000000 -23.500000000 -8.500000000 --36.500000000 -22.500000000 -8.500000000 -35.500000000 -22.500000000 -8.500000000 --36.500000000 -21.500000000 -8.500000000 -35.500000000 -21.500000000 -8.500000000 --36.500000000 -20.500000000 -8.500000000 -35.500000000 -20.500000000 -8.500000000 --36.500000000 -19.500000000 -8.500000000 -35.500000000 -19.500000000 -8.500000000 --36.500000000 -18.500000000 -8.500000000 -35.500000000 -18.500000000 -8.500000000 --36.500000000 -17.500000000 -8.500000000 -35.500000000 -17.500000000 -8.500000000 --36.500000000 -16.500000000 -8.500000000 -35.500000000 -16.500000000 -8.500000000 --36.500000000 -15.500000000 -8.500000000 -35.500000000 -15.500000000 -8.500000000 --36.500000000 -14.500000000 -8.500000000 -35.500000000 -14.500000000 -8.500000000 --36.500000000 -13.500000000 -8.500000000 -35.500000000 -13.500000000 -8.500000000 --36.500000000 -12.500000000 -8.500000000 -35.500000000 -12.500000000 -8.500000000 --36.500000000 -11.500000000 -8.500000000 -35.500000000 -11.500000000 -8.500000000 --36.500000000 -10.500000000 -8.500000000 -35.500000000 -10.500000000 -8.500000000 --36.500000000 -9.500000000 -8.500000000 -35.500000000 -9.500000000 -8.500000000 --36.500000000 -8.500000000 -8.500000000 -35.500000000 -8.500000000 -8.500000000 --36.500000000 -7.500000000 -8.500000000 -35.500000000 -7.500000000 -8.500000000 --36.500000000 -6.500000000 -8.500000000 -35.500000000 -6.500000000 -8.500000000 --36.500000000 -5.500000000 -8.500000000 -35.500000000 -5.500000000 -8.500000000 --36.500000000 -4.500000000 -8.500000000 -35.500000000 -4.500000000 -8.500000000 --36.500000000 -3.500000000 -8.500000000 -35.500000000 -3.500000000 -8.500000000 --36.500000000 -2.500000000 -8.500000000 -35.500000000 -2.500000000 -8.500000000 --36.500000000 -1.500000000 -8.500000000 -35.500000000 -1.500000000 -8.500000000 --36.500000000 -0.500000000 -8.500000000 -35.500000000 -0.500000000 -8.500000000 --36.500000000 0.500000000 -8.500000000 -35.500000000 0.500000000 -8.500000000 --36.500000000 1.500000000 -8.500000000 -35.500000000 1.500000000 -8.500000000 --36.500000000 2.500000000 -8.500000000 -35.500000000 2.500000000 -8.500000000 --36.500000000 3.500000000 -8.500000000 -35.500000000 3.500000000 -8.500000000 --36.500000000 4.500000000 -8.500000000 -35.500000000 4.500000000 -8.500000000 --36.500000000 5.500000000 -8.500000000 -35.500000000 5.500000000 -8.500000000 --36.500000000 6.500000000 -8.500000000 -35.500000000 6.500000000 -8.500000000 --36.500000000 7.500000000 -8.500000000 -35.500000000 7.500000000 -8.500000000 --36.500000000 8.500000000 -8.500000000 -35.500000000 8.500000000 -8.500000000 --36.500000000 9.500000000 -8.500000000 -35.500000000 9.500000000 -8.500000000 --36.500000000 10.500000000 -8.500000000 -35.500000000 10.500000000 -8.500000000 --36.500000000 11.500000000 -8.500000000 -35.500000000 11.500000000 -8.500000000 --36.500000000 12.500000000 -8.500000000 -35.500000000 12.500000000 -8.500000000 --36.500000000 13.500000000 -8.500000000 -35.500000000 13.500000000 -8.500000000 --36.500000000 14.500000000 -8.500000000 -35.500000000 14.500000000 -8.500000000 --36.500000000 15.500000000 -8.500000000 -35.500000000 15.500000000 -8.500000000 --36.500000000 16.500000000 -8.500000000 -35.500000000 16.500000000 -8.500000000 --36.500000000 17.500000000 -8.500000000 -35.500000000 17.500000000 -8.500000000 --36.500000000 18.500000000 -8.500000000 -35.500000000 18.500000000 -8.500000000 --36.500000000 19.500000000 -8.500000000 -35.500000000 19.500000000 -8.500000000 --36.500000000 20.500000000 -8.500000000 -35.500000000 20.500000000 -8.500000000 --36.500000000 21.500000000 -8.500000000 -35.500000000 21.500000000 -8.500000000 --36.500000000 22.500000000 -8.500000000 -35.500000000 22.500000000 -8.500000000 --36.500000000 23.500000000 -8.500000000 -35.500000000 23.500000000 -8.500000000 --36.500000000 24.500000000 -8.500000000 -35.500000000 24.500000000 -8.500000000 --36.499996185 25.499996185 -8.500000000 -35.499996185 25.499996185 -8.500000000 --36.499954224 26.499954224 -8.500000000 -35.499954224 26.499954224 -8.500000000 --36.499591827 27.499591827 -8.500000000 -35.499591827 27.499591827 -8.500000000 --36.497474670 28.497470856 -8.500000000 -35.497467041 28.497470856 -8.500000000 --36.488403320 29.488407135 -8.500000000 -35.488403320 29.488407135 -8.500000000 --36.458980560 30.458978653 -8.500000000 -35.458980560 30.458978653 -8.500000000 --36.384422302 31.384418488 -8.500000000 -35.384422302 31.384418488 -8.500000000 --36.233222961 32.233222961 -8.500000000 -35.233222961 32.233222961 -8.500000000 --35.981101990 32.981101990 -8.500000000 --35.622871399 33.622871399 -8.500000000 -34.622871399 33.622871399 -8.500000000 -34.981101990 32.981101990 -8.500000000 --35.167964935 34.167964935 -8.500000000 --34.622871399 34.622871399 -8.500000000 -33.622871399 34.622871399 -8.500000000 -34.167964935 34.167964935 -8.500000000 --33.981101990 34.981101990 -8.500000000 --33.233222961 35.233222961 -8.500000000 --32.384422302 35.384418488 -8.500000000 --31.458978653 35.458976746 -8.500000000 --30.488407135 35.488403320 -8.500000000 --29.497472763 35.497467041 -8.500000000 --28.499593735 35.499591827 -8.500000000 --27.499954224 35.499954224 -8.500000000 --26.499996185 35.499996185 -8.500000000 --25.500000000 35.500000000 -8.500000000 --24.500000000 35.500000000 -8.500000000 --23.500000000 35.500000000 -8.500000000 --22.500000000 35.500000000 -8.500000000 --21.500000000 35.500000000 -8.500000000 --20.500000000 35.500000000 -8.500000000 --19.500000000 35.500000000 -8.500000000 --18.500000000 35.500000000 -8.500000000 --17.500000000 35.500000000 -8.500000000 --16.500000000 35.500000000 -8.500000000 --15.500000000 35.500000000 -8.500000000 --14.500000000 35.500000000 -8.500000000 --13.500000000 35.500000000 -8.500000000 --12.500000000 35.500000000 -8.500000000 --11.500000000 35.500000000 -8.500000000 --10.500000000 35.500000000 -8.500000000 --9.500000000 35.500000000 -8.500000000 --8.500000000 35.500000000 -8.500000000 --7.500000000 35.500000000 -8.500000000 --6.500000000 35.500000000 -8.500000000 --5.500000000 35.500000000 -8.500000000 --4.500000000 35.500000000 -8.500000000 --3.500000000 35.500000000 -8.500000000 --2.500000000 35.500000000 -8.500000000 --1.500000000 35.500000000 -8.500000000 --0.500000000 35.500000000 -8.500000000 -0.500000000 35.500000000 -8.500000000 -1.500000000 35.500000000 -8.500000000 -2.500000000 35.500000000 -8.500000000 -3.500000000 35.500000000 -8.500000000 -4.500000000 35.500000000 -8.500000000 -5.500000000 35.500000000 -8.500000000 -6.500000000 35.500000000 -8.500000000 -7.500000000 35.500000000 -8.500000000 -8.500000000 35.500000000 -8.500000000 -9.500000000 35.500000000 -8.500000000 -10.500000000 35.500000000 -8.500000000 -11.500000000 35.500000000 -8.500000000 -12.500000000 35.500000000 -8.500000000 -13.500000000 35.500000000 -8.500000000 -14.500000000 35.500000000 -8.500000000 -15.500000000 35.500000000 -8.500000000 -16.500000000 35.500000000 -8.500000000 -17.500000000 35.500000000 -8.500000000 -18.500000000 35.500000000 -8.500000000 -19.500000000 35.500000000 -8.500000000 -20.500000000 35.500000000 -8.500000000 -21.500000000 35.500000000 -8.500000000 -22.500000000 35.500000000 -8.500000000 -23.500000000 35.500000000 -8.500000000 -24.500000000 35.500000000 -8.500000000 -25.499996185 35.499996185 -8.500000000 -26.499954224 35.499954224 -8.500000000 -27.499591827 35.499591827 -8.500000000 -28.497470856 35.497474670 -8.500000000 -29.488407135 35.488403320 -8.500000000 -30.458978653 35.458980560 -8.500000000 -31.384418488 35.384422302 -8.500000000 -32.233222961 35.233222961 -8.500000000 -32.981101990 34.981101990 -8.500000000 --33.981101990 -35.981101990 -7.500000000 --33.233226776 -36.233222961 -7.500000000 --32.384422302 -36.384418488 -7.500000000 --31.458978653 -36.458980560 -7.500000000 --30.488407135 -36.488403320 -7.500000000 --29.497472763 -36.497474670 -7.500000000 --28.499593735 -36.499591827 -7.500000000 --27.499954224 -36.499954224 -7.500000000 --26.499996185 -36.499996185 -7.500000000 --25.500000000 -36.500000000 -7.500000000 --24.500000000 -36.500000000 -7.500000000 --23.500000000 -36.500000000 -7.500000000 --22.500000000 -36.500000000 -7.500000000 --21.500000000 -36.500000000 -7.500000000 --20.500000000 -36.500000000 -7.500000000 --19.500000000 -36.500000000 -7.500000000 --18.500000000 -36.500000000 -7.500000000 --17.500000000 -36.500000000 -7.500000000 --16.500000000 -36.500000000 -7.500000000 --15.500000000 -36.500000000 -7.500000000 --14.500000000 -36.500000000 -7.500000000 --13.500000000 -36.500000000 -7.500000000 --12.500000000 -36.500000000 -7.500000000 --11.500000000 -36.500000000 -7.500000000 --10.500000000 -36.500000000 -7.500000000 --9.500000000 -36.500000000 -7.500000000 --8.500000000 -36.500000000 -7.500000000 --7.500000000 -36.500000000 -7.500000000 --6.500000000 -36.500000000 -7.500000000 --5.500000000 -36.500000000 -7.500000000 --4.500000000 -36.500000000 -7.500000000 --3.500000000 -36.500000000 -7.500000000 --2.500000000 -36.500000000 -7.500000000 --1.500000000 -36.500000000 -7.500000000 --0.500000000 -36.500000000 -7.500000000 -0.500000000 -36.500000000 -7.500000000 -1.500000000 -36.500000000 -7.500000000 -2.500000000 -36.500000000 -7.500000000 -3.500000000 -36.500000000 -7.500000000 -4.500000000 -36.500000000 -7.500000000 -5.500000000 -36.500000000 -7.500000000 -6.500000000 -36.500000000 -7.500000000 -7.500000000 -36.500000000 -7.500000000 -8.500000000 -36.500000000 -7.500000000 -9.500000000 -36.500000000 -7.500000000 -10.500000000 -36.500000000 -7.500000000 -11.500000000 -36.500000000 -7.500000000 -12.500000000 -36.500000000 -7.500000000 -13.500000000 -36.500000000 -7.500000000 -14.500000000 -36.500000000 -7.500000000 -15.500000000 -36.500000000 -7.500000000 -16.500000000 -36.500000000 -7.500000000 -17.500000000 -36.500000000 -7.500000000 -18.500000000 -36.500000000 -7.500000000 -19.500000000 -36.500000000 -7.500000000 -20.500000000 -36.500000000 -7.500000000 -21.500000000 -36.500000000 -7.500000000 -22.500000000 -36.500000000 -7.500000000 -23.500000000 -36.500000000 -7.500000000 -24.500000000 -36.500000000 -7.500000000 -25.499996185 -36.499996185 -7.500000000 -26.499954224 -36.499954224 -7.500000000 -27.499591827 -36.499591827 -7.500000000 -28.497470856 -36.497467041 -7.500000000 -29.488407135 -36.488403320 -7.500000000 -30.458978653 -36.458980560 -7.500000000 -31.384418488 -36.384422302 -7.500000000 -32.233222961 -36.233222961 -7.500000000 -32.981101990 -35.981101990 -7.500000000 --35.167964935 -35.167964935 -7.500000000 --34.622871399 -35.622871399 -7.500000000 -33.622871399 -35.622871399 -7.500000000 -34.167964935 -35.167964935 -7.500000000 --35.981101990 -33.981101990 -7.500000000 --35.622871399 -34.622871399 -7.500000000 -34.622871399 -34.622871399 -7.500000000 -34.981101990 -33.981101990 -7.500000000 --36.233222961 -33.233222961 -7.500000000 -35.233222961 -33.233226776 -7.500000000 --36.384418488 -32.384422302 -7.500000000 -35.384418488 -32.384422302 -7.500000000 --36.458976746 -31.458978653 -7.500000000 -35.458980560 -31.458978653 -7.500000000 --36.488403320 -30.488407135 -7.500000000 -35.488403320 -30.488407135 -7.500000000 --36.497467041 -29.497472763 -7.500000000 -35.497474670 -29.497472763 -7.500000000 --36.499591827 -28.499593735 -7.500000000 -35.499591827 -28.499593735 -7.500000000 --36.499954224 -27.499954224 -7.500000000 -35.499954224 -27.499954224 -7.500000000 --36.499996185 -26.499996185 -7.500000000 -35.499996185 -26.499996185 -7.500000000 --36.500000000 -25.500000000 -7.500000000 -35.500000000 -25.500000000 -7.500000000 --36.500000000 -24.500000000 -7.500000000 -35.500000000 -24.500000000 -7.500000000 --36.500000000 -23.500000000 -7.500000000 -35.500000000 -23.500000000 -7.500000000 --36.500000000 -22.500000000 -7.500000000 -35.500000000 -22.500000000 -7.500000000 --36.500000000 -21.500000000 -7.500000000 -35.500000000 -21.500000000 -7.500000000 --36.500000000 -20.500000000 -7.500000000 -35.500000000 -20.500000000 -7.500000000 --36.500000000 -19.500000000 -7.500000000 -35.500000000 -19.500000000 -7.500000000 --36.500000000 -18.500000000 -7.500000000 -35.500000000 -18.500000000 -7.500000000 --36.500000000 -17.500000000 -7.500000000 -35.500000000 -17.500000000 -7.500000000 --36.500000000 -16.500000000 -7.500000000 -35.500000000 -16.500000000 -7.500000000 --36.500000000 -15.500000000 -7.500000000 -35.500000000 -15.500000000 -7.500000000 --36.500000000 -14.500000000 -7.500000000 -35.500000000 -14.500000000 -7.500000000 --36.500000000 -13.500000000 -7.500000000 -35.500000000 -13.500000000 -7.500000000 --36.500000000 -12.500000000 -7.500000000 -35.500000000 -12.500000000 -7.500000000 --36.500000000 -11.500000000 -7.500000000 -35.500000000 -11.500000000 -7.500000000 --36.500000000 -10.500000000 -7.500000000 -35.500000000 -10.500000000 -7.500000000 --36.500000000 -9.500000000 -7.500000000 -35.500000000 -9.500000000 -7.500000000 --36.500000000 -8.500000000 -7.500000000 -35.500000000 -8.500000000 -7.500000000 --36.500000000 -7.500000000 -7.500000000 -35.500000000 -7.500000000 -7.500000000 --36.500000000 -6.500000000 -7.500000000 -35.500000000 -6.500000000 -7.500000000 --36.500000000 -5.500000000 -7.500000000 -35.500000000 -5.500000000 -7.500000000 --36.500000000 -4.500000000 -7.500000000 -35.500000000 -4.500000000 -7.500000000 --36.500000000 -3.500000000 -7.500000000 -35.500000000 -3.500000000 -7.500000000 --36.500000000 -2.500000000 -7.500000000 -35.500000000 -2.500000000 -7.500000000 --36.500000000 -1.500000000 -7.500000000 -35.500000000 -1.500000000 -7.500000000 --36.500000000 -0.500000000 -7.500000000 -35.500000000 -0.500000000 -7.500000000 --36.500000000 0.500000000 -7.500000000 -35.500000000 0.500000000 -7.500000000 --36.500000000 1.500000000 -7.500000000 -35.500000000 1.500000000 -7.500000000 --36.500000000 2.500000000 -7.500000000 -35.500000000 2.500000000 -7.500000000 --36.500000000 3.500000000 -7.500000000 -35.500000000 3.500000000 -7.500000000 --36.500000000 4.500000000 -7.500000000 -35.500000000 4.500000000 -7.500000000 --36.500000000 5.500000000 -7.500000000 -35.500000000 5.500000000 -7.500000000 --36.500000000 6.500000000 -7.500000000 -35.500000000 6.500000000 -7.500000000 --36.500000000 7.500000000 -7.500000000 -35.500000000 7.500000000 -7.500000000 --36.500000000 8.500000000 -7.500000000 -35.500000000 8.500000000 -7.500000000 --36.500000000 9.500000000 -7.500000000 -35.500000000 9.500000000 -7.500000000 --36.500000000 10.500000000 -7.500000000 -35.500000000 10.500000000 -7.500000000 --36.500000000 11.500000000 -7.500000000 -35.500000000 11.500000000 -7.500000000 --36.500000000 12.500000000 -7.500000000 -35.500000000 12.500000000 -7.500000000 --36.500000000 13.500000000 -7.500000000 -35.500000000 13.500000000 -7.500000000 --36.500000000 14.500000000 -7.500000000 -35.500000000 14.500000000 -7.500000000 --36.500000000 15.500000000 -7.500000000 -35.500000000 15.500000000 -7.500000000 --36.500000000 16.500000000 -7.500000000 -35.500000000 16.500000000 -7.500000000 --36.500000000 17.500000000 -7.500000000 -35.500000000 17.500000000 -7.500000000 --36.500000000 18.500000000 -7.500000000 -35.500000000 18.500000000 -7.500000000 --36.500000000 19.500000000 -7.500000000 -35.500000000 19.500000000 -7.500000000 --36.500000000 20.500000000 -7.500000000 -35.500000000 20.500000000 -7.500000000 --36.500000000 21.500000000 -7.500000000 -35.500000000 21.500000000 -7.500000000 --36.500000000 22.500000000 -7.500000000 -35.500000000 22.500000000 -7.500000000 --36.500000000 23.500000000 -7.500000000 -35.500000000 23.500000000 -7.500000000 --36.500000000 24.500000000 -7.500000000 -35.500000000 24.500000000 -7.500000000 --36.499996185 25.499996185 -7.500000000 -35.499996185 25.499996185 -7.500000000 --36.499954224 26.499954224 -7.500000000 -35.499954224 26.499954224 -7.500000000 --36.499591827 27.499591827 -7.500000000 -35.499591827 27.499591827 -7.500000000 --36.497474670 28.497470856 -7.500000000 -35.497467041 28.497470856 -7.500000000 --36.488403320 29.488407135 -7.500000000 -35.488403320 29.488407135 -7.500000000 --36.458980560 30.458978653 -7.500000000 -35.458980560 30.458978653 -7.500000000 --36.384422302 31.384418488 -7.500000000 -35.384422302 31.384418488 -7.500000000 --36.233222961 32.233222961 -7.500000000 -35.233222961 32.233222961 -7.500000000 --35.981101990 32.981101990 -7.500000000 --35.622871399 33.622871399 -7.500000000 -34.622871399 33.622871399 -7.500000000 -34.981101990 32.981101990 -7.500000000 --35.167964935 34.167964935 -7.500000000 --34.622871399 34.622871399 -7.500000000 -33.622871399 34.622871399 -7.500000000 -34.167964935 34.167964935 -7.500000000 --33.981101990 34.981101990 -7.500000000 --33.233222961 35.233222961 -7.500000000 --32.384422302 35.384418488 -7.500000000 --31.458978653 35.458976746 -7.500000000 --30.488407135 35.488403320 -7.500000000 --29.497472763 35.497467041 -7.500000000 --28.499593735 35.499591827 -7.500000000 --27.499954224 35.499954224 -7.500000000 --26.499996185 35.499996185 -7.500000000 --25.500000000 35.500000000 -7.500000000 --24.500000000 35.500000000 -7.500000000 --23.500000000 35.500000000 -7.500000000 --22.500000000 35.500000000 -7.500000000 --21.500000000 35.500000000 -7.500000000 --20.500000000 35.500000000 -7.500000000 --19.500000000 35.500000000 -7.500000000 --18.500000000 35.500000000 -7.500000000 --17.500000000 35.500000000 -7.500000000 --16.500000000 35.500000000 -7.500000000 --15.500000000 35.500000000 -7.500000000 --14.500000000 35.500000000 -7.500000000 --13.500000000 35.500000000 -7.500000000 --12.500000000 35.500000000 -7.500000000 --11.500000000 35.500000000 -7.500000000 --10.500000000 35.500000000 -7.500000000 --9.500000000 35.500000000 -7.500000000 --8.500000000 35.500000000 -7.500000000 --7.500000000 35.500000000 -7.500000000 --6.500000000 35.500000000 -7.500000000 --5.500000000 35.500000000 -7.500000000 --4.500000000 35.500000000 -7.500000000 --3.500000000 35.500000000 -7.500000000 --2.500000000 35.500000000 -7.500000000 --1.500000000 35.500000000 -7.500000000 --0.500000000 35.500000000 -7.500000000 -0.500000000 35.500000000 -7.500000000 -1.500000000 35.500000000 -7.500000000 -2.500000000 35.500000000 -7.500000000 -3.500000000 35.500000000 -7.500000000 -4.500000000 35.500000000 -7.500000000 -5.500000000 35.500000000 -7.500000000 -6.500000000 35.500000000 -7.500000000 -7.500000000 35.500000000 -7.500000000 -8.500000000 35.500000000 -7.500000000 -9.500000000 35.500000000 -7.500000000 -10.500000000 35.500000000 -7.500000000 -11.500000000 35.500000000 -7.500000000 -12.500000000 35.500000000 -7.500000000 -13.500000000 35.500000000 -7.500000000 -14.500000000 35.500000000 -7.500000000 -15.500000000 35.500000000 -7.500000000 -16.500000000 35.500000000 -7.500000000 -17.500000000 35.500000000 -7.500000000 -18.500000000 35.500000000 -7.500000000 -19.500000000 35.500000000 -7.500000000 -20.500000000 35.500000000 -7.500000000 -21.500000000 35.500000000 -7.500000000 -22.500000000 35.500000000 -7.500000000 -23.500000000 35.500000000 -7.500000000 -24.500000000 35.500000000 -7.500000000 -25.499996185 35.499996185 -7.500000000 -26.499954224 35.499954224 -7.500000000 -27.499591827 35.499591827 -7.500000000 -28.497470856 35.497474670 -7.500000000 -29.488407135 35.488403320 -7.500000000 -30.458978653 35.458980560 -7.500000000 -31.384418488 35.384422302 -7.500000000 -32.233222961 35.233222961 -7.500000000 -32.981101990 34.981101990 -7.500000000 --33.981101990 -35.981101990 -6.500000000 --33.233226776 -36.233222961 -6.500000000 --32.384422302 -36.384418488 -6.500000000 --31.458978653 -36.458980560 -6.500000000 --30.488407135 -36.488403320 -6.500000000 --29.497472763 -36.497474670 -6.500000000 --28.499593735 -36.499591827 -6.500000000 --27.499954224 -36.499954224 -6.500000000 --26.499996185 -36.499996185 -6.500000000 --25.500000000 -36.500000000 -6.500000000 --24.500000000 -36.500000000 -6.500000000 --23.500000000 -36.500000000 -6.500000000 --22.500000000 -36.500000000 -6.500000000 --21.500000000 -36.500000000 -6.500000000 --20.500000000 -36.500000000 -6.500000000 --19.500000000 -36.500000000 -6.500000000 --18.500000000 -36.500000000 -6.500000000 --17.500000000 -36.500000000 -6.500000000 --16.500000000 -36.500000000 -6.500000000 --15.500000000 -36.500000000 -6.500000000 --14.500000000 -36.500000000 -6.500000000 --13.500000000 -36.500000000 -6.500000000 --12.500000000 -36.500000000 -6.500000000 --11.500000000 -36.500000000 -6.500000000 --10.500000000 -36.500000000 -6.500000000 --9.500000000 -36.500000000 -6.500000000 --8.500000000 -36.500000000 -6.500000000 --7.500000000 -36.500000000 -6.500000000 --6.500000000 -36.500000000 -6.500000000 --5.500000000 -36.500000000 -6.500000000 --4.500000000 -36.500000000 -6.500000000 --3.500000000 -36.500000000 -6.500000000 --2.500000000 -36.500000000 -6.500000000 --1.500000000 -36.500000000 -6.500000000 --0.500000000 -36.500000000 -6.500000000 -0.500000000 -36.500000000 -6.500000000 -1.500000000 -36.500000000 -6.500000000 -2.500000000 -36.500000000 -6.500000000 -3.500000000 -36.500000000 -6.500000000 -4.500000000 -36.500000000 -6.500000000 -5.500000000 -36.500000000 -6.500000000 -6.500000000 -36.500000000 -6.500000000 -7.500000000 -36.500000000 -6.500000000 -8.500000000 -36.500000000 -6.500000000 -9.500000000 -36.500000000 -6.500000000 -10.500000000 -36.500000000 -6.500000000 -11.500000000 -36.500000000 -6.500000000 -12.500000000 -36.500000000 -6.500000000 -13.500000000 -36.500000000 -6.500000000 -14.500000000 -36.500000000 -6.500000000 -15.500000000 -36.500000000 -6.500000000 -16.500000000 -36.500000000 -6.500000000 -17.500000000 -36.500000000 -6.500000000 -18.500000000 -36.500000000 -6.500000000 -19.500000000 -36.500000000 -6.500000000 -20.500000000 -36.500000000 -6.500000000 -21.500000000 -36.500000000 -6.500000000 -22.500000000 -36.500000000 -6.500000000 -23.500000000 -36.500000000 -6.500000000 -24.500000000 -36.500000000 -6.500000000 -25.499996185 -36.499996185 -6.500000000 -26.499954224 -36.499954224 -6.500000000 -27.499591827 -36.499591827 -6.500000000 -28.497470856 -36.497467041 -6.500000000 -29.488407135 -36.488403320 -6.500000000 -30.458978653 -36.458980560 -6.500000000 -31.384418488 -36.384422302 -6.500000000 -32.233222961 -36.233222961 -6.500000000 -32.981101990 -35.981101990 -6.500000000 --35.167964935 -35.167964935 -6.500000000 --34.622871399 -35.622871399 -6.500000000 -33.622871399 -35.622871399 -6.500000000 -34.167964935 -35.167964935 -6.500000000 --35.981101990 -33.981101990 -6.500000000 --35.622871399 -34.622871399 -6.500000000 -34.622871399 -34.622871399 -6.500000000 -34.981101990 -33.981101990 -6.500000000 --36.233222961 -33.233222961 -6.500000000 -35.233222961 -33.233226776 -6.500000000 --36.384418488 -32.384422302 -6.500000000 -35.384418488 -32.384422302 -6.500000000 --36.458976746 -31.458978653 -6.500000000 -35.458980560 -31.458978653 -6.500000000 --36.488403320 -30.488407135 -6.500000000 -35.488403320 -30.488407135 -6.500000000 --36.497467041 -29.497472763 -6.500000000 -35.497474670 -29.497472763 -6.500000000 --36.499591827 -28.499593735 -6.500000000 -35.499591827 -28.499593735 -6.500000000 --36.499954224 -27.499954224 -6.500000000 -35.499954224 -27.499954224 -6.500000000 --36.499996185 -26.499996185 -6.500000000 -35.499996185 -26.499996185 -6.500000000 --36.500000000 -25.500000000 -6.500000000 -35.500000000 -25.500000000 -6.500000000 --36.500000000 -24.500000000 -6.500000000 -35.500000000 -24.500000000 -6.500000000 --36.500000000 -23.500000000 -6.500000000 -35.500000000 -23.500000000 -6.500000000 --36.500000000 -22.500000000 -6.500000000 -35.500000000 -22.500000000 -6.500000000 --36.500000000 -21.500000000 -6.500000000 -35.500000000 -21.500000000 -6.500000000 --36.500000000 -20.500000000 -6.500000000 -35.500000000 -20.500000000 -6.500000000 --36.500000000 -19.500000000 -6.500000000 -35.500000000 -19.500000000 -6.500000000 --36.500000000 -18.500000000 -6.500000000 -35.500000000 -18.500000000 -6.500000000 --36.500000000 -17.500000000 -6.500000000 -35.500000000 -17.500000000 -6.500000000 --36.500000000 -16.500000000 -6.500000000 -35.500000000 -16.500000000 -6.500000000 --36.500000000 -15.500000000 -6.500000000 -35.500000000 -15.500000000 -6.500000000 --36.500000000 -14.500000000 -6.500000000 -35.500000000 -14.500000000 -6.500000000 --36.500000000 -13.500000000 -6.500000000 -35.500000000 -13.500000000 -6.500000000 --36.500000000 -12.500000000 -6.500000000 -35.500000000 -12.500000000 -6.500000000 --36.500000000 -11.500000000 -6.500000000 -35.500000000 -11.500000000 -6.500000000 --36.500000000 -10.500000000 -6.500000000 -35.500000000 -10.500000000 -6.500000000 --36.500000000 -9.500000000 -6.500000000 -35.500000000 -9.500000000 -6.500000000 --36.500000000 -8.500000000 -6.500000000 -35.500000000 -8.500000000 -6.500000000 --36.500000000 -7.500000000 -6.500000000 -35.500000000 -7.500000000 -6.500000000 --36.500000000 -6.500000000 -6.500000000 -35.500000000 -6.500000000 -6.500000000 --36.500000000 -5.500000000 -6.500000000 -35.500000000 -5.500000000 -6.500000000 --36.500000000 -4.500000000 -6.500000000 -35.500000000 -4.500000000 -6.500000000 --36.500000000 -3.500000000 -6.500000000 -35.500000000 -3.500000000 -6.500000000 --36.500000000 -2.500000000 -6.500000000 -35.500000000 -2.500000000 -6.500000000 --36.500000000 -1.500000000 -6.500000000 -35.500000000 -1.500000000 -6.500000000 --36.500000000 -0.500000000 -6.500000000 -35.500000000 -0.500000000 -6.500000000 --36.500000000 0.500000000 -6.500000000 -35.500000000 0.500000000 -6.500000000 --36.500000000 1.500000000 -6.500000000 -35.500000000 1.500000000 -6.500000000 --36.500000000 2.500000000 -6.500000000 -35.500000000 2.500000000 -6.500000000 --36.500000000 3.500000000 -6.500000000 -35.500000000 3.500000000 -6.500000000 --36.500000000 4.500000000 -6.500000000 -35.500000000 4.500000000 -6.500000000 --36.500000000 5.500000000 -6.500000000 -35.500000000 5.500000000 -6.500000000 --36.500000000 6.500000000 -6.500000000 -35.500000000 6.500000000 -6.500000000 --36.500000000 7.500000000 -6.500000000 -35.500000000 7.500000000 -6.500000000 --36.500000000 8.500000000 -6.500000000 -35.500000000 8.500000000 -6.500000000 --36.500000000 9.500000000 -6.500000000 -35.500000000 9.500000000 -6.500000000 --36.500000000 10.500000000 -6.500000000 -35.500000000 10.500000000 -6.500000000 --36.500000000 11.500000000 -6.500000000 -35.500000000 11.500000000 -6.500000000 --36.500000000 12.500000000 -6.500000000 -35.500000000 12.500000000 -6.500000000 --36.500000000 13.500000000 -6.500000000 -35.500000000 13.500000000 -6.500000000 --36.500000000 14.500000000 -6.500000000 -35.500000000 14.500000000 -6.500000000 --36.500000000 15.500000000 -6.500000000 -35.500000000 15.500000000 -6.500000000 --36.500000000 16.500000000 -6.500000000 -35.500000000 16.500000000 -6.500000000 --36.500000000 17.500000000 -6.500000000 -35.500000000 17.500000000 -6.500000000 --36.500000000 18.500000000 -6.500000000 -35.500000000 18.500000000 -6.500000000 --36.500000000 19.500000000 -6.500000000 -35.500000000 19.500000000 -6.500000000 --36.500000000 20.500000000 -6.500000000 -35.500000000 20.500000000 -6.500000000 --36.500000000 21.500000000 -6.500000000 -35.500000000 21.500000000 -6.500000000 --36.500000000 22.500000000 -6.500000000 -35.500000000 22.500000000 -6.500000000 --36.500000000 23.500000000 -6.500000000 -35.500000000 23.500000000 -6.500000000 --36.500000000 24.500000000 -6.500000000 -35.500000000 24.500000000 -6.500000000 --36.499996185 25.499996185 -6.500000000 -35.499996185 25.499996185 -6.500000000 --36.499954224 26.499954224 -6.500000000 -35.499954224 26.499954224 -6.500000000 --36.499591827 27.499591827 -6.500000000 -35.499591827 27.499591827 -6.500000000 --36.497474670 28.497470856 -6.500000000 -35.497467041 28.497470856 -6.500000000 --36.488403320 29.488407135 -6.500000000 -35.488403320 29.488407135 -6.500000000 --36.458980560 30.458978653 -6.500000000 -35.458980560 30.458978653 -6.500000000 --36.384422302 31.384418488 -6.500000000 -35.384422302 31.384418488 -6.500000000 --36.233222961 32.233222961 -6.500000000 -35.233222961 32.233222961 -6.500000000 --35.981101990 32.981101990 -6.500000000 --35.622871399 33.622871399 -6.500000000 -34.622871399 33.622871399 -6.500000000 -34.981101990 32.981101990 -6.500000000 --35.167964935 34.167964935 -6.500000000 --34.622871399 34.622871399 -6.500000000 -33.622871399 34.622871399 -6.500000000 -34.167964935 34.167964935 -6.500000000 --33.981101990 34.981101990 -6.500000000 --33.233222961 35.233222961 -6.500000000 --32.384422302 35.384418488 -6.500000000 --31.458978653 35.458976746 -6.500000000 --30.488407135 35.488403320 -6.500000000 --29.497472763 35.497467041 -6.500000000 --28.499593735 35.499591827 -6.500000000 --27.499954224 35.499954224 -6.500000000 --26.499996185 35.499996185 -6.500000000 --25.500000000 35.500000000 -6.500000000 --24.500000000 35.500000000 -6.500000000 --23.500000000 35.500000000 -6.500000000 --22.500000000 35.500000000 -6.500000000 --21.500000000 35.500000000 -6.500000000 --20.500000000 35.500000000 -6.500000000 --19.500000000 35.500000000 -6.500000000 --18.500000000 35.500000000 -6.500000000 --17.500000000 35.500000000 -6.500000000 --16.500000000 35.500000000 -6.500000000 --15.500000000 35.500000000 -6.500000000 --14.500000000 35.500000000 -6.500000000 --13.500000000 35.500000000 -6.500000000 --12.500000000 35.500000000 -6.500000000 --11.500000000 35.500000000 -6.500000000 --10.500000000 35.500000000 -6.500000000 --9.500000000 35.500000000 -6.500000000 --8.500000000 35.500000000 -6.500000000 --7.500000000 35.500000000 -6.500000000 --6.500000000 35.500000000 -6.500000000 --5.500000000 35.500000000 -6.500000000 --4.500000000 35.500000000 -6.500000000 --3.500000000 35.500000000 -6.500000000 --2.500000000 35.500000000 -6.500000000 --1.500000000 35.500000000 -6.500000000 --0.500000000 35.500000000 -6.500000000 -0.500000000 35.500000000 -6.500000000 -1.500000000 35.500000000 -6.500000000 -2.500000000 35.500000000 -6.500000000 -3.500000000 35.500000000 -6.500000000 -4.500000000 35.500000000 -6.500000000 -5.500000000 35.500000000 -6.500000000 -6.500000000 35.500000000 -6.500000000 -7.500000000 35.500000000 -6.500000000 -8.500000000 35.500000000 -6.500000000 -9.500000000 35.500000000 -6.500000000 -10.500000000 35.500000000 -6.500000000 -11.500000000 35.500000000 -6.500000000 -12.500000000 35.500000000 -6.500000000 -13.500000000 35.500000000 -6.500000000 -14.500000000 35.500000000 -6.500000000 -15.500000000 35.500000000 -6.500000000 -16.500000000 35.500000000 -6.500000000 -17.500000000 35.500000000 -6.500000000 -18.500000000 35.500000000 -6.500000000 -19.500000000 35.500000000 -6.500000000 -20.500000000 35.500000000 -6.500000000 -21.500000000 35.500000000 -6.500000000 -22.500000000 35.500000000 -6.500000000 -23.500000000 35.500000000 -6.500000000 -24.500000000 35.500000000 -6.500000000 -25.499996185 35.499996185 -6.500000000 -26.499954224 35.499954224 -6.500000000 -27.499591827 35.499591827 -6.500000000 -28.497470856 35.497474670 -6.500000000 -29.488407135 35.488403320 -6.500000000 -30.458978653 35.458980560 -6.500000000 -31.384418488 35.384422302 -6.500000000 -32.233222961 35.233222961 -6.500000000 -32.981101990 34.981101990 -6.500000000 --33.981101990 -35.981101990 -5.500000000 --33.233226776 -36.233222961 -5.500000000 --32.384422302 -36.384418488 -5.500000000 --31.458978653 -36.458980560 -5.500000000 --30.488407135 -36.488403320 -5.500000000 --29.497472763 -36.497474670 -5.500000000 --28.499593735 -36.499591827 -5.500000000 --27.499954224 -36.499954224 -5.500000000 --26.499996185 -36.499996185 -5.500000000 --25.500000000 -36.500000000 -5.500000000 --24.500000000 -36.500000000 -5.500000000 --23.500000000 -36.500000000 -5.500000000 --22.500000000 -36.500000000 -5.500000000 --21.500000000 -36.500000000 -5.500000000 --20.500000000 -36.500000000 -5.500000000 --19.500000000 -36.500000000 -5.500000000 --18.500000000 -36.500000000 -5.500000000 --17.500000000 -36.500000000 -5.500000000 --16.500000000 -36.500000000 -5.500000000 --15.500000000 -36.500000000 -5.500000000 --14.500000000 -36.500000000 -5.500000000 --13.500000000 -36.500000000 -5.500000000 --12.500000000 -36.500000000 -5.500000000 --11.500000000 -36.500000000 -5.500000000 --10.500000000 -36.500000000 -5.500000000 --9.500000000 -36.500000000 -5.500000000 --8.500000000 -36.500000000 -5.500000000 --7.500000000 -36.500000000 -5.500000000 --6.500000000 -36.500000000 -5.500000000 --5.500000000 -36.500000000 -5.500000000 --4.500000000 -36.500000000 -5.500000000 --3.500000000 -36.500000000 -5.500000000 --2.500000000 -36.500000000 -5.500000000 --1.500000000 -36.500000000 -5.500000000 --0.500000000 -36.500000000 -5.500000000 -0.500000000 -36.500000000 -5.500000000 -1.500000000 -36.500000000 -5.500000000 -2.500000000 -36.500000000 -5.500000000 -3.500000000 -36.500000000 -5.500000000 -4.500000000 -36.500000000 -5.500000000 -5.500000000 -36.500000000 -5.500000000 -6.500000000 -36.500000000 -5.500000000 -7.500000000 -36.500000000 -5.500000000 -8.500000000 -36.500000000 -5.500000000 -9.500000000 -36.500000000 -5.500000000 -10.500000000 -36.500000000 -5.500000000 -11.500000000 -36.500000000 -5.500000000 -12.500000000 -36.500000000 -5.500000000 -13.500000000 -36.500000000 -5.500000000 -14.500000000 -36.500000000 -5.500000000 -15.500000000 -36.500000000 -5.500000000 -16.500000000 -36.500000000 -5.500000000 -17.500000000 -36.500000000 -5.500000000 -18.500000000 -36.500000000 -5.500000000 -19.500000000 -36.500000000 -5.500000000 -20.500000000 -36.500000000 -5.500000000 -21.500000000 -36.500000000 -5.500000000 -22.500000000 -36.500000000 -5.500000000 -23.500000000 -36.500000000 -5.500000000 -24.500000000 -36.500000000 -5.500000000 -25.499996185 -36.499996185 -5.500000000 -26.499954224 -36.499954224 -5.500000000 -27.499591827 -36.499591827 -5.500000000 -28.497470856 -36.497467041 -5.500000000 -29.488407135 -36.488403320 -5.500000000 -30.458978653 -36.458980560 -5.500000000 -31.384418488 -36.384422302 -5.500000000 -32.233222961 -36.233222961 -5.500000000 -32.981101990 -35.981101990 -5.500000000 --35.167964935 -35.167964935 -5.500000000 --34.622871399 -35.622871399 -5.500000000 -33.622871399 -35.622871399 -5.500000000 -34.167964935 -35.167964935 -5.500000000 --35.981101990 -33.981101990 -5.500000000 --35.622871399 -34.622871399 -5.500000000 -34.622871399 -34.622871399 -5.500000000 -34.981101990 -33.981101990 -5.500000000 --36.233222961 -33.233222961 -5.500000000 -35.233222961 -33.233226776 -5.500000000 --36.384418488 -32.384422302 -5.500000000 -35.384418488 -32.384422302 -5.500000000 --36.458976746 -31.458978653 -5.500000000 -35.458980560 -31.458978653 -5.500000000 --36.488403320 -30.488407135 -5.500000000 -35.488403320 -30.488407135 -5.500000000 --36.497467041 -29.497472763 -5.500000000 -35.497474670 -29.497472763 -5.500000000 --36.499591827 -28.499593735 -5.500000000 -35.499591827 -28.499593735 -5.500000000 --36.499954224 -27.499954224 -5.500000000 -35.499954224 -27.499954224 -5.500000000 --36.499996185 -26.499996185 -5.500000000 -35.499996185 -26.499996185 -5.500000000 --36.500000000 -25.500000000 -5.500000000 -35.500000000 -25.500000000 -5.500000000 --36.500000000 -24.500000000 -5.500000000 -35.500000000 -24.500000000 -5.500000000 --36.500000000 -23.500000000 -5.500000000 -35.500000000 -23.500000000 -5.500000000 --36.500000000 -22.500000000 -5.500000000 -35.500000000 -22.500000000 -5.500000000 --36.500000000 -21.500000000 -5.500000000 -35.500000000 -21.500000000 -5.500000000 --36.500000000 -20.500000000 -5.500000000 -35.500000000 -20.500000000 -5.500000000 --36.500000000 -19.500000000 -5.500000000 -35.500000000 -19.500000000 -5.500000000 --36.500000000 -18.500000000 -5.500000000 -35.500000000 -18.500000000 -5.500000000 --36.500000000 -17.500000000 -5.500000000 -35.500000000 -17.500000000 -5.500000000 --36.500000000 -16.500000000 -5.500000000 -35.500000000 -16.500000000 -5.500000000 --36.500000000 -15.500000000 -5.500000000 -35.500000000 -15.500000000 -5.500000000 --36.500000000 -14.500000000 -5.500000000 -35.500000000 -14.500000000 -5.500000000 --36.500000000 -13.500000000 -5.500000000 -35.500000000 -13.500000000 -5.500000000 --36.500000000 -12.500000000 -5.500000000 -35.500000000 -12.500000000 -5.500000000 --36.500000000 -11.500000000 -5.500000000 -35.500000000 -11.500000000 -5.500000000 --36.500000000 -10.500000000 -5.500000000 -35.500000000 -10.500000000 -5.500000000 --36.500000000 -9.500000000 -5.500000000 -35.500000000 -9.500000000 -5.500000000 --36.500000000 -8.500000000 -5.500000000 -35.500000000 -8.500000000 -5.500000000 --36.500000000 -7.500000000 -5.500000000 -35.500000000 -7.500000000 -5.500000000 --36.500000000 -6.500000000 -5.500000000 -35.500000000 -6.500000000 -5.500000000 --36.500000000 -5.500000000 -5.500000000 -35.500000000 -5.500000000 -5.500000000 --36.500000000 -4.500000000 -5.500000000 -35.500000000 -4.500000000 -5.500000000 --36.500000000 -3.500000000 -5.500000000 -35.500000000 -3.500000000 -5.500000000 --36.500000000 -2.500000000 -5.500000000 -35.500000000 -2.500000000 -5.500000000 --36.500000000 -1.500000000 -5.500000000 -35.500000000 -1.500000000 -5.500000000 --36.500000000 -0.500000000 -5.500000000 -35.500000000 -0.500000000 -5.500000000 --36.500000000 0.500000000 -5.500000000 -35.500000000 0.500000000 -5.500000000 --36.500000000 1.500000000 -5.500000000 -35.500000000 1.500000000 -5.500000000 --36.500000000 2.500000000 -5.500000000 -35.500000000 2.500000000 -5.500000000 --36.500000000 3.500000000 -5.500000000 -35.500000000 3.500000000 -5.500000000 --36.500000000 4.500000000 -5.500000000 -35.500000000 4.500000000 -5.500000000 --36.500000000 5.500000000 -5.500000000 -35.500000000 5.500000000 -5.500000000 --36.500000000 6.500000000 -5.500000000 -35.500000000 6.500000000 -5.500000000 --36.500000000 7.500000000 -5.500000000 -35.500000000 7.500000000 -5.500000000 --36.500000000 8.500000000 -5.500000000 -35.500000000 8.500000000 -5.500000000 --36.500000000 9.500000000 -5.500000000 -35.500000000 9.500000000 -5.500000000 --36.500000000 10.500000000 -5.500000000 -35.500000000 10.500000000 -5.500000000 --36.500000000 11.500000000 -5.500000000 -35.500000000 11.500000000 -5.500000000 --36.500000000 12.500000000 -5.500000000 -35.500000000 12.500000000 -5.500000000 --36.500000000 13.500000000 -5.500000000 -35.500000000 13.500000000 -5.500000000 --36.500000000 14.500000000 -5.500000000 -35.500000000 14.500000000 -5.500000000 --36.500000000 15.500000000 -5.500000000 -35.500000000 15.500000000 -5.500000000 --36.500000000 16.500000000 -5.500000000 -35.500000000 16.500000000 -5.500000000 --36.500000000 17.500000000 -5.500000000 -35.500000000 17.500000000 -5.500000000 --36.500000000 18.500000000 -5.500000000 -35.500000000 18.500000000 -5.500000000 --36.500000000 19.500000000 -5.500000000 -35.500000000 19.500000000 -5.500000000 --36.500000000 20.500000000 -5.500000000 -35.500000000 20.500000000 -5.500000000 --36.500000000 21.500000000 -5.500000000 -35.500000000 21.500000000 -5.500000000 --36.500000000 22.500000000 -5.500000000 -35.500000000 22.500000000 -5.500000000 --36.500000000 23.500000000 -5.500000000 -35.500000000 23.500000000 -5.500000000 --36.500000000 24.500000000 -5.500000000 -35.500000000 24.500000000 -5.500000000 --36.499996185 25.499996185 -5.500000000 -35.499996185 25.499996185 -5.500000000 --36.499954224 26.499954224 -5.500000000 -35.499954224 26.499954224 -5.500000000 --36.499591827 27.499591827 -5.500000000 -35.499591827 27.499591827 -5.500000000 --36.497474670 28.497470856 -5.500000000 -35.497467041 28.497470856 -5.500000000 --36.488403320 29.488407135 -5.500000000 -35.488403320 29.488407135 -5.500000000 --36.458980560 30.458978653 -5.500000000 -35.458980560 30.458978653 -5.500000000 --36.384422302 31.384418488 -5.500000000 -35.384422302 31.384418488 -5.500000000 --36.233222961 32.233222961 -5.500000000 -35.233222961 32.233222961 -5.500000000 --35.981101990 32.981101990 -5.500000000 --35.622871399 33.622871399 -5.500000000 -34.622871399 33.622871399 -5.500000000 -34.981101990 32.981101990 -5.500000000 --35.167964935 34.167964935 -5.500000000 --34.622871399 34.622871399 -5.500000000 -33.622871399 34.622871399 -5.500000000 -34.167964935 34.167964935 -5.500000000 --33.981101990 34.981101990 -5.500000000 --33.233222961 35.233222961 -5.500000000 --32.384422302 35.384418488 -5.500000000 --31.458978653 35.458976746 -5.500000000 --30.488407135 35.488403320 -5.500000000 --29.497472763 35.497467041 -5.500000000 --28.499593735 35.499591827 -5.500000000 --27.499954224 35.499954224 -5.500000000 --26.499996185 35.499996185 -5.500000000 --25.500000000 35.500000000 -5.500000000 --24.500000000 35.500000000 -5.500000000 --23.500000000 35.500000000 -5.500000000 --22.500000000 35.500000000 -5.500000000 --21.500000000 35.500000000 -5.500000000 --20.500000000 35.500000000 -5.500000000 --19.500000000 35.500000000 -5.500000000 --18.500000000 35.500000000 -5.500000000 --17.500000000 35.500000000 -5.500000000 --16.500000000 35.500000000 -5.500000000 --15.500000000 35.500000000 -5.500000000 --14.500000000 35.500000000 -5.500000000 --13.500000000 35.500000000 -5.500000000 --12.500000000 35.500000000 -5.500000000 --11.500000000 35.500000000 -5.500000000 --10.500000000 35.500000000 -5.500000000 --9.500000000 35.500000000 -5.500000000 --8.500000000 35.500000000 -5.500000000 --7.500000000 35.500000000 -5.500000000 --6.500000000 35.500000000 -5.500000000 --5.500000000 35.500000000 -5.500000000 --4.500000000 35.500000000 -5.500000000 --3.500000000 35.500000000 -5.500000000 --2.500000000 35.500000000 -5.500000000 --1.500000000 35.500000000 -5.500000000 --0.500000000 35.500000000 -5.500000000 -0.500000000 35.500000000 -5.500000000 -1.500000000 35.500000000 -5.500000000 -2.500000000 35.500000000 -5.500000000 -3.500000000 35.500000000 -5.500000000 -4.500000000 35.500000000 -5.500000000 -5.500000000 35.500000000 -5.500000000 -6.500000000 35.500000000 -5.500000000 -7.500000000 35.500000000 -5.500000000 -8.500000000 35.500000000 -5.500000000 -9.500000000 35.500000000 -5.500000000 -10.500000000 35.500000000 -5.500000000 -11.500000000 35.500000000 -5.500000000 -12.500000000 35.500000000 -5.500000000 -13.500000000 35.500000000 -5.500000000 -14.500000000 35.500000000 -5.500000000 -15.500000000 35.500000000 -5.500000000 -16.500000000 35.500000000 -5.500000000 -17.500000000 35.500000000 -5.500000000 -18.500000000 35.500000000 -5.500000000 -19.500000000 35.500000000 -5.500000000 -20.500000000 35.500000000 -5.500000000 -21.500000000 35.500000000 -5.500000000 -22.500000000 35.500000000 -5.500000000 -23.500000000 35.500000000 -5.500000000 -24.500000000 35.500000000 -5.500000000 -25.499996185 35.499996185 -5.500000000 -26.499954224 35.499954224 -5.500000000 -27.499591827 35.499591827 -5.500000000 -28.497470856 35.497474670 -5.500000000 -29.488407135 35.488403320 -5.500000000 -30.458978653 35.458980560 -5.500000000 -31.384418488 35.384422302 -5.500000000 -32.233222961 35.233222961 -5.500000000 -32.981101990 34.981101990 -5.500000000 --33.981101990 -35.981101990 -4.500000000 --33.233226776 -36.233222961 -4.500000000 --32.384422302 -36.384418488 -4.500000000 --31.458978653 -36.458980560 -4.500000000 --30.488407135 -36.488403320 -4.500000000 --29.497472763 -36.497474670 -4.500000000 --28.499593735 -36.499591827 -4.500000000 --27.499954224 -36.499954224 -4.500000000 --26.499996185 -36.499996185 -4.500000000 --25.500000000 -36.500000000 -4.500000000 --24.500000000 -36.500000000 -4.500000000 --23.500000000 -36.500000000 -4.500000000 --22.500000000 -36.500000000 -4.500000000 --21.500000000 -36.500000000 -4.500000000 --20.500000000 -36.500000000 -4.500000000 --19.500000000 -36.500000000 -4.500000000 --18.500000000 -36.500000000 -4.500000000 --17.500000000 -36.500000000 -4.500000000 --16.500000000 -36.500000000 -4.500000000 --15.500000000 -36.500000000 -4.500000000 --14.500000000 -36.500000000 -4.500000000 --13.500000000 -36.500000000 -4.500000000 --12.500000000 -36.500000000 -4.500000000 --11.500000000 -36.500000000 -4.500000000 --10.500000000 -36.500000000 -4.500000000 --9.500000000 -36.500000000 -4.500000000 --8.500000000 -36.500000000 -4.500000000 --7.500000000 -36.500000000 -4.500000000 --6.500000000 -36.500000000 -4.500000000 --5.500000000 -36.500000000 -4.500000000 --4.500000000 -36.500000000 -4.500000000 --3.500000000 -36.500000000 -4.500000000 --2.500000000 -36.500000000 -4.500000000 --1.500000000 -36.500000000 -4.500000000 --0.500000000 -36.500000000 -4.500000000 -0.500000000 -36.500000000 -4.500000000 -1.500000000 -36.500000000 -4.500000000 -2.500000000 -36.500000000 -4.500000000 -3.500000000 -36.500000000 -4.500000000 -4.500000000 -36.500000000 -4.500000000 -5.500000000 -36.500000000 -4.500000000 -6.500000000 -36.500000000 -4.500000000 -7.500000000 -36.500000000 -4.500000000 -8.500000000 -36.500000000 -4.500000000 -9.500000000 -36.500000000 -4.500000000 -10.500000000 -36.500000000 -4.500000000 -11.500000000 -36.500000000 -4.500000000 -12.500000000 -36.500000000 -4.500000000 -13.500000000 -36.500000000 -4.500000000 -14.500000000 -36.500000000 -4.500000000 -15.500000000 -36.500000000 -4.500000000 -16.500000000 -36.500000000 -4.500000000 -17.500000000 -36.500000000 -4.500000000 -18.500000000 -36.500000000 -4.500000000 -19.500000000 -36.500000000 -4.500000000 -20.500000000 -36.500000000 -4.500000000 -21.500000000 -36.500000000 -4.500000000 -22.500000000 -36.500000000 -4.500000000 -23.500000000 -36.500000000 -4.500000000 -24.500000000 -36.500000000 -4.500000000 -25.499996185 -36.499996185 -4.500000000 -26.499954224 -36.499954224 -4.500000000 -27.499591827 -36.499591827 -4.500000000 -28.497470856 -36.497467041 -4.500000000 -29.488407135 -36.488403320 -4.500000000 -30.458978653 -36.458980560 -4.500000000 -31.384418488 -36.384422302 -4.500000000 -32.233222961 -36.233222961 -4.500000000 -32.981101990 -35.981101990 -4.500000000 --35.167964935 -35.167964935 -4.500000000 --34.622871399 -35.622871399 -4.500000000 -33.622871399 -35.622871399 -4.500000000 -34.167964935 -35.167964935 -4.500000000 --35.981101990 -33.981101990 -4.500000000 --35.622871399 -34.622871399 -4.500000000 -34.622871399 -34.622871399 -4.500000000 -34.981101990 -33.981101990 -4.500000000 --36.233222961 -33.233222961 -4.500000000 -35.233222961 -33.233226776 -4.500000000 --36.384418488 -32.384422302 -4.500000000 -35.384418488 -32.384422302 -4.500000000 --36.458976746 -31.458978653 -4.500000000 -35.458980560 -31.458978653 -4.500000000 --36.488403320 -30.488407135 -4.500000000 -35.488403320 -30.488407135 -4.500000000 --36.497467041 -29.497472763 -4.500000000 -35.497474670 -29.497472763 -4.500000000 --36.499591827 -28.499593735 -4.500000000 -35.499591827 -28.499593735 -4.500000000 --36.499954224 -27.499954224 -4.500000000 -35.499954224 -27.499954224 -4.500000000 --36.499996185 -26.499996185 -4.500000000 -35.499996185 -26.499996185 -4.500000000 --36.500000000 -25.500000000 -4.500000000 -35.500000000 -25.500000000 -4.500000000 --36.500000000 -24.500000000 -4.500000000 -35.500000000 -24.500000000 -4.500000000 --36.500000000 -23.500000000 -4.500000000 -35.500000000 -23.500000000 -4.500000000 --36.500000000 -22.500000000 -4.500000000 -35.500000000 -22.500000000 -4.500000000 --36.500000000 -21.500000000 -4.500000000 -35.500000000 -21.500000000 -4.500000000 --36.500000000 -20.500000000 -4.500000000 -35.500000000 -20.500000000 -4.500000000 --36.500000000 -19.500000000 -4.500000000 -35.500000000 -19.500000000 -4.500000000 --36.500000000 -18.500000000 -4.500000000 -35.500000000 -18.500000000 -4.500000000 --36.500000000 -17.500000000 -4.500000000 -35.500000000 -17.500000000 -4.500000000 --36.500000000 -16.500000000 -4.500000000 -35.500000000 -16.500000000 -4.500000000 --36.500000000 -15.500000000 -4.500000000 -35.500000000 -15.500000000 -4.500000000 --36.500000000 -14.500000000 -4.500000000 -35.500000000 -14.500000000 -4.500000000 --36.500000000 -13.500000000 -4.500000000 -35.500000000 -13.500000000 -4.500000000 --36.500000000 -12.500000000 -4.500000000 -35.500000000 -12.500000000 -4.500000000 --36.500000000 -11.500000000 -4.500000000 -35.500000000 -11.500000000 -4.500000000 --36.500000000 -10.500000000 -4.500000000 -35.500000000 -10.500000000 -4.500000000 --36.500000000 -9.500000000 -4.500000000 -35.500000000 -9.500000000 -4.500000000 --36.500000000 -8.500000000 -4.500000000 -35.500000000 -8.500000000 -4.500000000 --36.500000000 -7.500000000 -4.500000000 -35.500000000 -7.500000000 -4.500000000 --36.500000000 -6.500000000 -4.500000000 -35.500000000 -6.500000000 -4.500000000 --36.500000000 -5.500000000 -4.500000000 -35.500000000 -5.500000000 -4.500000000 --36.500000000 -4.500000000 -4.500000000 -35.500000000 -4.500000000 -4.500000000 --36.500000000 -3.500000000 -4.500000000 -35.500000000 -3.500000000 -4.500000000 --36.500000000 -2.500000000 -4.500000000 -35.500000000 -2.500000000 -4.500000000 --36.500000000 -1.500000000 -4.500000000 -35.500000000 -1.500000000 -4.500000000 --36.500000000 -0.500000000 -4.500000000 -35.500000000 -0.500000000 -4.500000000 --36.500000000 0.500000000 -4.500000000 -35.500000000 0.500000000 -4.500000000 --36.500000000 1.500000000 -4.500000000 -35.500000000 1.500000000 -4.500000000 --36.500000000 2.500000000 -4.500000000 -35.500000000 2.500000000 -4.500000000 --36.500000000 3.500000000 -4.500000000 -35.500000000 3.500000000 -4.500000000 --36.500000000 4.500000000 -4.500000000 -35.500000000 4.500000000 -4.500000000 --36.500000000 5.500000000 -4.500000000 -35.500000000 5.500000000 -4.500000000 --36.500000000 6.500000000 -4.500000000 -35.500000000 6.500000000 -4.500000000 --36.500000000 7.500000000 -4.500000000 -35.500000000 7.500000000 -4.500000000 --36.500000000 8.500000000 -4.500000000 -35.500000000 8.500000000 -4.500000000 --36.500000000 9.500000000 -4.500000000 -35.500000000 9.500000000 -4.500000000 --36.500000000 10.500000000 -4.500000000 -35.500000000 10.500000000 -4.500000000 --36.500000000 11.500000000 -4.500000000 -35.500000000 11.500000000 -4.500000000 --36.500000000 12.500000000 -4.500000000 -35.500000000 12.500000000 -4.500000000 --36.500000000 13.500000000 -4.500000000 -35.500000000 13.500000000 -4.500000000 --36.500000000 14.500000000 -4.500000000 -35.500000000 14.500000000 -4.500000000 --36.500000000 15.500000000 -4.500000000 -35.500000000 15.500000000 -4.500000000 --36.500000000 16.500000000 -4.500000000 -35.500000000 16.500000000 -4.500000000 --36.500000000 17.500000000 -4.500000000 -35.500000000 17.500000000 -4.500000000 --36.500000000 18.500000000 -4.500000000 -35.500000000 18.500000000 -4.500000000 --36.500000000 19.500000000 -4.500000000 -35.500000000 19.500000000 -4.500000000 --36.500000000 20.500000000 -4.500000000 -35.500000000 20.500000000 -4.500000000 --36.500000000 21.500000000 -4.500000000 -35.500000000 21.500000000 -4.500000000 --36.500000000 22.500000000 -4.500000000 -35.500000000 22.500000000 -4.500000000 --36.500000000 23.500000000 -4.500000000 -35.500000000 23.500000000 -4.500000000 --36.500000000 24.500000000 -4.500000000 -35.500000000 24.500000000 -4.500000000 --36.499996185 25.499996185 -4.500000000 -35.499996185 25.499996185 -4.500000000 --36.499954224 26.499954224 -4.500000000 -35.499954224 26.499954224 -4.500000000 --36.499591827 27.499591827 -4.500000000 -35.499591827 27.499591827 -4.500000000 --36.497474670 28.497470856 -4.500000000 -35.497467041 28.497470856 -4.500000000 --36.488403320 29.488407135 -4.500000000 -35.488403320 29.488407135 -4.500000000 --36.458980560 30.458978653 -4.500000000 -35.458980560 30.458978653 -4.500000000 --36.384422302 31.384418488 -4.500000000 -35.384422302 31.384418488 -4.500000000 --36.233222961 32.233222961 -4.500000000 -35.233222961 32.233222961 -4.500000000 --35.981101990 32.981101990 -4.500000000 --35.622871399 33.622871399 -4.500000000 -34.622871399 33.622871399 -4.500000000 -34.981101990 32.981101990 -4.500000000 --35.167964935 34.167964935 -4.500000000 --34.622871399 34.622871399 -4.500000000 -33.622871399 34.622871399 -4.500000000 -34.167964935 34.167964935 -4.500000000 --33.981101990 34.981101990 -4.500000000 --33.233222961 35.233222961 -4.500000000 --32.384422302 35.384418488 -4.500000000 --31.458978653 35.458976746 -4.500000000 --30.488407135 35.488403320 -4.500000000 --29.497472763 35.497467041 -4.500000000 --28.499593735 35.499591827 -4.500000000 --27.499954224 35.499954224 -4.500000000 --26.499996185 35.499996185 -4.500000000 --25.500000000 35.500000000 -4.500000000 --24.500000000 35.500000000 -4.500000000 --23.500000000 35.500000000 -4.500000000 --22.500000000 35.500000000 -4.500000000 --21.500000000 35.500000000 -4.500000000 --20.500000000 35.500000000 -4.500000000 --19.500000000 35.500000000 -4.500000000 --18.500000000 35.500000000 -4.500000000 --17.500000000 35.500000000 -4.500000000 --16.500000000 35.500000000 -4.500000000 --15.500000000 35.500000000 -4.500000000 --14.500000000 35.500000000 -4.500000000 --13.500000000 35.500000000 -4.500000000 --12.500000000 35.500000000 -4.500000000 --11.500000000 35.500000000 -4.500000000 --10.500000000 35.500000000 -4.500000000 --9.500000000 35.500000000 -4.500000000 --8.500000000 35.500000000 -4.500000000 --7.500000000 35.500000000 -4.500000000 --6.500000000 35.500000000 -4.500000000 --5.500000000 35.500000000 -4.500000000 --4.500000000 35.500000000 -4.500000000 --3.500000000 35.500000000 -4.500000000 --2.500000000 35.500000000 -4.500000000 --1.500000000 35.500000000 -4.500000000 --0.500000000 35.500000000 -4.500000000 -0.500000000 35.500000000 -4.500000000 -1.500000000 35.500000000 -4.500000000 -2.500000000 35.500000000 -4.500000000 -3.500000000 35.500000000 -4.500000000 -4.500000000 35.500000000 -4.500000000 -5.500000000 35.500000000 -4.500000000 -6.500000000 35.500000000 -4.500000000 -7.500000000 35.500000000 -4.500000000 -8.500000000 35.500000000 -4.500000000 -9.500000000 35.500000000 -4.500000000 -10.500000000 35.500000000 -4.500000000 -11.500000000 35.500000000 -4.500000000 -12.500000000 35.500000000 -4.500000000 -13.500000000 35.500000000 -4.500000000 -14.500000000 35.500000000 -4.500000000 -15.500000000 35.500000000 -4.500000000 -16.500000000 35.500000000 -4.500000000 -17.500000000 35.500000000 -4.500000000 -18.500000000 35.500000000 -4.500000000 -19.500000000 35.500000000 -4.500000000 -20.500000000 35.500000000 -4.500000000 -21.500000000 35.500000000 -4.500000000 -22.500000000 35.500000000 -4.500000000 -23.500000000 35.500000000 -4.500000000 -24.500000000 35.500000000 -4.500000000 -25.499996185 35.499996185 -4.500000000 -26.499954224 35.499954224 -4.500000000 -27.499591827 35.499591827 -4.500000000 -28.497470856 35.497474670 -4.500000000 -29.488407135 35.488403320 -4.500000000 -30.458978653 35.458980560 -4.500000000 -31.384418488 35.384422302 -4.500000000 -32.233222961 35.233222961 -4.500000000 -32.981101990 34.981101990 -4.500000000 --33.981101990 -35.981101990 -3.500000000 --33.233226776 -36.233222961 -3.500000000 --32.384422302 -36.384418488 -3.500000000 --31.458978653 -36.458980560 -3.500000000 --30.488407135 -36.488403320 -3.500000000 --29.497472763 -36.497474670 -3.500000000 --28.499593735 -36.499591827 -3.500000000 --27.499954224 -36.499954224 -3.500000000 --26.499996185 -36.499996185 -3.500000000 --25.500000000 -36.500000000 -3.500000000 --24.500000000 -36.500000000 -3.500000000 --23.500000000 -36.500000000 -3.500000000 --22.500000000 -36.500000000 -3.500000000 --21.500000000 -36.500000000 -3.500000000 --20.500000000 -36.500000000 -3.500000000 --19.500000000 -36.500000000 -3.500000000 --18.500000000 -36.500000000 -3.500000000 --17.500000000 -36.500000000 -3.500000000 --16.500000000 -36.500000000 -3.500000000 --15.500000000 -36.500000000 -3.500000000 --14.500000000 -36.500000000 -3.500000000 --13.500000000 -36.500000000 -3.500000000 --12.500000000 -36.500000000 -3.500000000 --11.500000000 -36.500000000 -3.500000000 --10.500000000 -36.500000000 -3.500000000 --9.500000000 -36.500000000 -3.500000000 --8.500000000 -36.500000000 -3.500000000 --7.500000000 -36.500000000 -3.500000000 --6.500000000 -36.500000000 -3.500000000 --5.500000000 -36.500000000 -3.500000000 --4.500000000 -36.500000000 -3.500000000 --3.500000000 -36.500000000 -3.500000000 --2.500000000 -36.500000000 -3.500000000 --1.500000000 -36.500000000 -3.500000000 --0.500000000 -36.500000000 -3.500000000 -0.500000000 -36.500000000 -3.500000000 -1.500000000 -36.500000000 -3.500000000 -2.500000000 -36.500000000 -3.500000000 -3.500000000 -36.500000000 -3.500000000 -4.500000000 -36.500000000 -3.500000000 -5.500000000 -36.500000000 -3.500000000 -6.500000000 -36.500000000 -3.500000000 -7.500000000 -36.500000000 -3.500000000 -8.500000000 -36.500000000 -3.500000000 -9.500000000 -36.500000000 -3.500000000 -10.500000000 -36.500000000 -3.500000000 -11.500000000 -36.500000000 -3.500000000 -12.500000000 -36.500000000 -3.500000000 -13.500000000 -36.500000000 -3.500000000 -14.500000000 -36.500000000 -3.500000000 -15.500000000 -36.500000000 -3.500000000 -16.500000000 -36.500000000 -3.500000000 -17.500000000 -36.500000000 -3.500000000 -18.500000000 -36.500000000 -3.500000000 -19.500000000 -36.500000000 -3.500000000 -20.500000000 -36.500000000 -3.500000000 -21.500000000 -36.500000000 -3.500000000 -22.500000000 -36.500000000 -3.500000000 -23.500000000 -36.500000000 -3.500000000 -24.500000000 -36.500000000 -3.500000000 -25.499996185 -36.499996185 -3.500000000 -26.499954224 -36.499954224 -3.500000000 -27.499591827 -36.499591827 -3.500000000 -28.497470856 -36.497467041 -3.500000000 -29.488407135 -36.488403320 -3.500000000 -30.458978653 -36.458980560 -3.500000000 -31.384418488 -36.384422302 -3.500000000 -32.233222961 -36.233222961 -3.500000000 -32.981101990 -35.981101990 -3.500000000 --35.167964935 -35.167964935 -3.500000000 --34.622871399 -35.622871399 -3.500000000 -33.622871399 -35.622871399 -3.500000000 -34.167964935 -35.167964935 -3.500000000 --35.981101990 -33.981101990 -3.500000000 --35.622871399 -34.622871399 -3.500000000 -34.622871399 -34.622871399 -3.500000000 -34.981101990 -33.981101990 -3.500000000 --36.233222961 -33.233222961 -3.500000000 -35.233222961 -33.233226776 -3.500000000 --36.384418488 -32.384422302 -3.500000000 -35.384418488 -32.384422302 -3.500000000 --36.458976746 -31.458978653 -3.500000000 -35.458980560 -31.458978653 -3.500000000 --36.488403320 -30.488407135 -3.500000000 -35.488403320 -30.488407135 -3.500000000 --36.497467041 -29.497472763 -3.500000000 -35.497474670 -29.497472763 -3.500000000 --36.499591827 -28.499593735 -3.500000000 -35.499591827 -28.499593735 -3.500000000 --36.499954224 -27.499954224 -3.500000000 -35.499954224 -27.499954224 -3.500000000 --36.499996185 -26.499996185 -3.500000000 -35.499996185 -26.499996185 -3.500000000 --36.500000000 -25.500000000 -3.500000000 -35.500000000 -25.500000000 -3.500000000 --36.500000000 -24.500000000 -3.500000000 -35.500000000 -24.500000000 -3.500000000 --36.500000000 -23.500000000 -3.500000000 -35.500000000 -23.500000000 -3.500000000 --36.500000000 -22.500000000 -3.500000000 -35.500000000 -22.500000000 -3.500000000 --36.500000000 -21.500000000 -3.500000000 -35.500000000 -21.500000000 -3.500000000 --36.500000000 -20.500000000 -3.500000000 -35.500000000 -20.500000000 -3.500000000 --36.500000000 -19.500000000 -3.500000000 -35.500000000 -19.500000000 -3.500000000 --36.500000000 -18.500000000 -3.500000000 -35.500000000 -18.500000000 -3.500000000 --36.500000000 -17.500000000 -3.500000000 -35.500000000 -17.500000000 -3.500000000 --36.500000000 -16.500000000 -3.500000000 -35.500000000 -16.500000000 -3.500000000 --36.500000000 -15.500000000 -3.500000000 -35.500000000 -15.500000000 -3.500000000 --36.500000000 -14.500000000 -3.500000000 -35.500000000 -14.500000000 -3.500000000 --36.500000000 -13.500000000 -3.500000000 -35.500000000 -13.500000000 -3.500000000 --36.500000000 -12.500000000 -3.500000000 -35.500000000 -12.500000000 -3.500000000 --36.500000000 -11.500000000 -3.500000000 -35.500000000 -11.500000000 -3.500000000 --36.500000000 -10.500000000 -3.500000000 -35.500000000 -10.500000000 -3.500000000 --36.500000000 -9.500000000 -3.500000000 -35.500000000 -9.500000000 -3.500000000 --36.500000000 -8.500000000 -3.500000000 -35.500000000 -8.500000000 -3.500000000 --36.500000000 -7.500000000 -3.500000000 -35.500000000 -7.500000000 -3.500000000 --36.500000000 -6.500000000 -3.500000000 -35.500000000 -6.500000000 -3.500000000 --36.500000000 -5.500000000 -3.500000000 -35.500000000 -5.500000000 -3.500000000 --36.500000000 -4.500000000 -3.500000000 -35.500000000 -4.500000000 -3.500000000 --36.500000000 -3.500000000 -3.500000000 -35.500000000 -3.500000000 -3.500000000 --36.500000000 -2.500000000 -3.500000000 -35.500000000 -2.500000000 -3.500000000 --36.500000000 -1.500000000 -3.500000000 -35.500000000 -1.500000000 -3.500000000 --36.500000000 -0.500000000 -3.500000000 -35.500000000 -0.500000000 -3.500000000 --36.500000000 0.500000000 -3.500000000 -35.500000000 0.500000000 -3.500000000 --36.500000000 1.500000000 -3.500000000 -35.500000000 1.500000000 -3.500000000 --36.500000000 2.500000000 -3.500000000 -35.500000000 2.500000000 -3.500000000 --36.500000000 3.500000000 -3.500000000 -35.500000000 3.500000000 -3.500000000 --36.500000000 4.500000000 -3.500000000 -35.500000000 4.500000000 -3.500000000 --36.500000000 5.500000000 -3.500000000 -35.500000000 5.500000000 -3.500000000 --36.500000000 6.500000000 -3.500000000 -35.500000000 6.500000000 -3.500000000 --36.500000000 7.500000000 -3.500000000 -35.500000000 7.500000000 -3.500000000 --36.500000000 8.500000000 -3.500000000 -35.500000000 8.500000000 -3.500000000 --36.500000000 9.500000000 -3.500000000 -35.500000000 9.500000000 -3.500000000 --36.500000000 10.500000000 -3.500000000 -35.500000000 10.500000000 -3.500000000 --36.500000000 11.500000000 -3.500000000 -35.500000000 11.500000000 -3.500000000 --36.500000000 12.500000000 -3.500000000 -35.500000000 12.500000000 -3.500000000 --36.500000000 13.500000000 -3.500000000 -35.500000000 13.500000000 -3.500000000 --36.500000000 14.500000000 -3.500000000 -35.500000000 14.500000000 -3.500000000 --36.500000000 15.500000000 -3.500000000 -35.500000000 15.500000000 -3.500000000 --36.500000000 16.500000000 -3.500000000 -35.500000000 16.500000000 -3.500000000 --36.500000000 17.500000000 -3.500000000 -35.500000000 17.500000000 -3.500000000 --36.500000000 18.500000000 -3.500000000 -35.500000000 18.500000000 -3.500000000 --36.500000000 19.500000000 -3.500000000 -35.500000000 19.500000000 -3.500000000 --36.500000000 20.500000000 -3.500000000 -35.500000000 20.500000000 -3.500000000 --36.500000000 21.500000000 -3.500000000 -35.500000000 21.500000000 -3.500000000 --36.500000000 22.500000000 -3.500000000 -35.500000000 22.500000000 -3.500000000 --36.500000000 23.500000000 -3.500000000 -35.500000000 23.500000000 -3.500000000 --36.500000000 24.500000000 -3.500000000 -35.500000000 24.500000000 -3.500000000 --36.499996185 25.499996185 -3.500000000 -35.499996185 25.499996185 -3.500000000 --36.499954224 26.499954224 -3.500000000 -35.499954224 26.499954224 -3.500000000 --36.499591827 27.499591827 -3.500000000 -35.499591827 27.499591827 -3.500000000 --36.497474670 28.497470856 -3.500000000 -35.497467041 28.497470856 -3.500000000 --36.488403320 29.488407135 -3.500000000 -35.488403320 29.488407135 -3.500000000 --36.458980560 30.458978653 -3.500000000 -35.458980560 30.458978653 -3.500000000 --36.384422302 31.384418488 -3.500000000 -35.384422302 31.384418488 -3.500000000 --36.233222961 32.233222961 -3.500000000 -35.233222961 32.233222961 -3.500000000 --35.981101990 32.981101990 -3.500000000 --35.622871399 33.622871399 -3.500000000 -34.622871399 33.622871399 -3.500000000 -34.981101990 32.981101990 -3.500000000 --35.167964935 34.167964935 -3.500000000 --34.622871399 34.622871399 -3.500000000 -33.622871399 34.622871399 -3.500000000 -34.167964935 34.167964935 -3.500000000 --33.981101990 34.981101990 -3.500000000 --33.233222961 35.233222961 -3.500000000 --32.384422302 35.384418488 -3.500000000 --31.458978653 35.458976746 -3.500000000 --30.488407135 35.488403320 -3.500000000 --29.497472763 35.497467041 -3.500000000 --28.499593735 35.499591827 -3.500000000 --27.499954224 35.499954224 -3.500000000 --26.499996185 35.499996185 -3.500000000 --25.500000000 35.500000000 -3.500000000 --24.500000000 35.500000000 -3.500000000 --23.500000000 35.500000000 -3.500000000 --22.500000000 35.500000000 -3.500000000 --21.500000000 35.500000000 -3.500000000 --20.500000000 35.500000000 -3.500000000 --19.500000000 35.500000000 -3.500000000 --18.500000000 35.500000000 -3.500000000 --17.500000000 35.500000000 -3.500000000 --16.500000000 35.500000000 -3.500000000 --15.500000000 35.500000000 -3.500000000 --14.500000000 35.500000000 -3.500000000 --13.500000000 35.500000000 -3.500000000 --12.500000000 35.500000000 -3.500000000 --11.500000000 35.500000000 -3.500000000 --10.500000000 35.500000000 -3.500000000 --9.500000000 35.500000000 -3.500000000 --8.500000000 35.500000000 -3.500000000 --7.500000000 35.500000000 -3.500000000 --6.500000000 35.500000000 -3.500000000 --5.500000000 35.500000000 -3.500000000 --4.500000000 35.500000000 -3.500000000 --3.500000000 35.500000000 -3.500000000 --2.500000000 35.500000000 -3.500000000 --1.500000000 35.500000000 -3.500000000 --0.500000000 35.500000000 -3.500000000 -0.500000000 35.500000000 -3.500000000 -1.500000000 35.500000000 -3.500000000 -2.500000000 35.500000000 -3.500000000 -3.500000000 35.500000000 -3.500000000 -4.500000000 35.500000000 -3.500000000 -5.500000000 35.500000000 -3.500000000 -6.500000000 35.500000000 -3.500000000 -7.500000000 35.500000000 -3.500000000 -8.500000000 35.500000000 -3.500000000 -9.500000000 35.500000000 -3.500000000 -10.500000000 35.500000000 -3.500000000 -11.500000000 35.500000000 -3.500000000 -12.500000000 35.500000000 -3.500000000 -13.500000000 35.500000000 -3.500000000 -14.500000000 35.500000000 -3.500000000 -15.500000000 35.500000000 -3.500000000 -16.500000000 35.500000000 -3.500000000 -17.500000000 35.500000000 -3.500000000 -18.500000000 35.500000000 -3.500000000 -19.500000000 35.500000000 -3.500000000 -20.500000000 35.500000000 -3.500000000 -21.500000000 35.500000000 -3.500000000 -22.500000000 35.500000000 -3.500000000 -23.500000000 35.500000000 -3.500000000 -24.500000000 35.500000000 -3.500000000 -25.499996185 35.499996185 -3.500000000 -26.499954224 35.499954224 -3.500000000 -27.499591827 35.499591827 -3.500000000 -28.497470856 35.497474670 -3.500000000 -29.488407135 35.488403320 -3.500000000 -30.458978653 35.458980560 -3.500000000 -31.384418488 35.384422302 -3.500000000 -32.233222961 35.233222961 -3.500000000 -32.981101990 34.981101990 -3.500000000 --33.981101990 -35.981101990 -2.500000000 --33.233226776 -36.233222961 -2.500000000 --32.384422302 -36.384418488 -2.500000000 --31.458978653 -36.458980560 -2.500000000 --30.488407135 -36.488403320 -2.500000000 --29.497472763 -36.497474670 -2.500000000 --28.499593735 -36.499591827 -2.500000000 --27.499954224 -36.499954224 -2.500000000 --26.499996185 -36.499996185 -2.500000000 --25.500000000 -36.500000000 -2.500000000 --24.500000000 -36.500000000 -2.500000000 --23.500000000 -36.500000000 -2.500000000 --22.500000000 -36.500000000 -2.500000000 --21.500000000 -36.500000000 -2.500000000 --20.500000000 -36.500000000 -2.500000000 --19.500000000 -36.500000000 -2.500000000 --18.500000000 -36.500000000 -2.500000000 --17.500000000 -36.500000000 -2.500000000 --16.500000000 -36.500000000 -2.500000000 --15.500000000 -36.500000000 -2.500000000 --14.500000000 -36.500000000 -2.500000000 --13.500000000 -36.500000000 -2.500000000 --12.500000000 -36.500000000 -2.500000000 --11.500000000 -36.500000000 -2.500000000 --10.500000000 -36.500000000 -2.500000000 --9.500000000 -36.500000000 -2.500000000 --8.500000000 -36.500000000 -2.500000000 --7.500000000 -36.500000000 -2.500000000 --6.500000000 -36.500000000 -2.500000000 --5.500000000 -36.500000000 -2.500000000 --4.500000000 -36.500000000 -2.500000000 --3.500000000 -36.500000000 -2.500000000 --2.500000000 -36.500000000 -2.500000000 --1.500000000 -36.500000000 -2.500000000 --0.500000000 -36.500000000 -2.500000000 -0.500000000 -36.500000000 -2.500000000 -1.500000000 -36.500000000 -2.500000000 -2.500000000 -36.500000000 -2.500000000 -3.500000000 -36.500000000 -2.500000000 -4.500000000 -36.500000000 -2.500000000 -5.500000000 -36.500000000 -2.500000000 -6.500000000 -36.500000000 -2.500000000 -7.500000000 -36.500000000 -2.500000000 -8.500000000 -36.500000000 -2.500000000 -9.500000000 -36.500000000 -2.500000000 -10.500000000 -36.500000000 -2.500000000 -11.500000000 -36.500000000 -2.500000000 -12.500000000 -36.500000000 -2.500000000 -13.500000000 -36.500000000 -2.500000000 -14.500000000 -36.500000000 -2.500000000 -15.500000000 -36.500000000 -2.500000000 -16.500000000 -36.500000000 -2.500000000 -17.500000000 -36.500000000 -2.500000000 -18.500000000 -36.500000000 -2.500000000 -19.500000000 -36.500000000 -2.500000000 -20.500000000 -36.500000000 -2.500000000 -21.500000000 -36.500000000 -2.500000000 -22.500000000 -36.500000000 -2.500000000 -23.500000000 -36.500000000 -2.500000000 -24.500000000 -36.500000000 -2.500000000 -25.499996185 -36.499996185 -2.500000000 -26.499954224 -36.499954224 -2.500000000 -27.499591827 -36.499591827 -2.500000000 -28.497470856 -36.497467041 -2.500000000 -29.488407135 -36.488403320 -2.500000000 -30.458978653 -36.458980560 -2.500000000 -31.384418488 -36.384422302 -2.500000000 -32.233222961 -36.233222961 -2.500000000 -32.981101990 -35.981101990 -2.500000000 --35.167964935 -35.167964935 -2.500000000 --34.622871399 -35.622871399 -2.500000000 -33.622871399 -35.622871399 -2.500000000 -34.167964935 -35.167964935 -2.500000000 --35.981101990 -33.981101990 -2.500000000 --35.622871399 -34.622871399 -2.500000000 -34.622871399 -34.622871399 -2.500000000 -34.981101990 -33.981101990 -2.500000000 --36.233222961 -33.233222961 -2.500000000 -35.233222961 -33.233226776 -2.500000000 --36.384418488 -32.384422302 -2.500000000 -35.384418488 -32.384422302 -2.500000000 --36.458976746 -31.458978653 -2.500000000 -35.458980560 -31.458978653 -2.500000000 --36.488403320 -30.488407135 -2.500000000 -35.488403320 -30.488407135 -2.500000000 --36.497467041 -29.497472763 -2.500000000 -35.497474670 -29.497472763 -2.500000000 --36.499591827 -28.499593735 -2.500000000 -35.499591827 -28.499593735 -2.500000000 --36.499954224 -27.499954224 -2.500000000 -35.499954224 -27.499954224 -2.500000000 --36.499996185 -26.499996185 -2.500000000 -35.499996185 -26.499996185 -2.500000000 --36.500000000 -25.500000000 -2.500000000 -35.500000000 -25.500000000 -2.500000000 --36.500000000 -24.500000000 -2.500000000 -35.500000000 -24.500000000 -2.500000000 --36.500000000 -23.500000000 -2.500000000 -35.500000000 -23.500000000 -2.500000000 --36.500000000 -22.500000000 -2.500000000 -35.500000000 -22.500000000 -2.500000000 --36.500000000 -21.500000000 -2.500000000 -35.500000000 -21.500000000 -2.500000000 --36.500000000 -20.500000000 -2.500000000 -35.500000000 -20.500000000 -2.500000000 --36.500000000 -19.500000000 -2.500000000 -35.500000000 -19.500000000 -2.500000000 --36.500000000 -18.500000000 -2.500000000 -35.500000000 -18.500000000 -2.500000000 --36.500000000 -17.500000000 -2.500000000 -35.500000000 -17.500000000 -2.500000000 --36.500000000 -16.500000000 -2.500000000 -35.500000000 -16.500000000 -2.500000000 --36.500000000 -15.500000000 -2.500000000 -35.500000000 -15.500000000 -2.500000000 --36.500000000 -14.500000000 -2.500000000 -35.500000000 -14.500000000 -2.500000000 --36.500000000 -13.500000000 -2.500000000 -35.500000000 -13.500000000 -2.500000000 --36.500000000 -12.500000000 -2.500000000 -35.500000000 -12.500000000 -2.500000000 --36.500000000 -11.500000000 -2.500000000 -35.500000000 -11.500000000 -2.500000000 --36.500000000 -10.500000000 -2.500000000 -35.500000000 -10.500000000 -2.500000000 --36.500000000 -9.500000000 -2.500000000 -35.500000000 -9.500000000 -2.500000000 --36.500000000 -8.500000000 -2.500000000 -35.500000000 -8.500000000 -2.500000000 --36.500000000 -7.500000000 -2.500000000 -35.500000000 -7.500000000 -2.500000000 --36.500000000 -6.500000000 -2.500000000 -35.500000000 -6.500000000 -2.500000000 --36.500000000 -5.500000000 -2.500000000 -35.500000000 -5.500000000 -2.500000000 --36.500000000 -4.500000000 -2.500000000 -35.500000000 -4.500000000 -2.500000000 --36.500000000 -3.500000000 -2.500000000 -35.500000000 -3.500000000 -2.500000000 --36.500000000 -2.500000000 -2.500000000 -35.500000000 -2.500000000 -2.500000000 --36.500000000 -1.500000000 -2.500000000 -35.500000000 -1.500000000 -2.500000000 --36.500000000 -0.500000000 -2.500000000 -35.500000000 -0.500000000 -2.500000000 --36.500000000 0.500000000 -2.500000000 -35.500000000 0.500000000 -2.500000000 --36.500000000 1.500000000 -2.500000000 -35.500000000 1.500000000 -2.500000000 --36.500000000 2.500000000 -2.500000000 -35.500000000 2.500000000 -2.500000000 --36.500000000 3.500000000 -2.500000000 -35.500000000 3.500000000 -2.500000000 --36.500000000 4.500000000 -2.500000000 -35.500000000 4.500000000 -2.500000000 --36.500000000 5.500000000 -2.500000000 -35.500000000 5.500000000 -2.500000000 --36.500000000 6.500000000 -2.500000000 -35.500000000 6.500000000 -2.500000000 --36.500000000 7.500000000 -2.500000000 -35.500000000 7.500000000 -2.500000000 --36.500000000 8.500000000 -2.500000000 -35.500000000 8.500000000 -2.500000000 --36.500000000 9.500000000 -2.500000000 -35.500000000 9.500000000 -2.500000000 --36.500000000 10.500000000 -2.500000000 -35.500000000 10.500000000 -2.500000000 --36.500000000 11.500000000 -2.500000000 -35.500000000 11.500000000 -2.500000000 --36.500000000 12.500000000 -2.500000000 -35.500000000 12.500000000 -2.500000000 --36.500000000 13.500000000 -2.500000000 -35.500000000 13.500000000 -2.500000000 --36.500000000 14.500000000 -2.500000000 -35.500000000 14.500000000 -2.500000000 --36.500000000 15.500000000 -2.500000000 -35.500000000 15.500000000 -2.500000000 --36.500000000 16.500000000 -2.500000000 -35.500000000 16.500000000 -2.500000000 --36.500000000 17.500000000 -2.500000000 -35.500000000 17.500000000 -2.500000000 --36.500000000 18.500000000 -2.500000000 -35.500000000 18.500000000 -2.500000000 --36.500000000 19.500000000 -2.500000000 -35.500000000 19.500000000 -2.500000000 --36.500000000 20.500000000 -2.500000000 -35.500000000 20.500000000 -2.500000000 --36.500000000 21.500000000 -2.500000000 -35.500000000 21.500000000 -2.500000000 --36.500000000 22.500000000 -2.500000000 -35.500000000 22.500000000 -2.500000000 --36.500000000 23.500000000 -2.500000000 -35.500000000 23.500000000 -2.500000000 --36.500000000 24.500000000 -2.500000000 -35.500000000 24.500000000 -2.500000000 --36.499996185 25.499996185 -2.500000000 -35.499996185 25.499996185 -2.500000000 --36.499954224 26.499954224 -2.500000000 -35.499954224 26.499954224 -2.500000000 --36.499591827 27.499591827 -2.500000000 -35.499591827 27.499591827 -2.500000000 --36.497474670 28.497470856 -2.500000000 -35.497467041 28.497470856 -2.500000000 --36.488403320 29.488407135 -2.500000000 -35.488403320 29.488407135 -2.500000000 --36.458980560 30.458978653 -2.500000000 -35.458980560 30.458978653 -2.500000000 --36.384422302 31.384418488 -2.500000000 -35.384422302 31.384418488 -2.500000000 --36.233222961 32.233222961 -2.500000000 -35.233222961 32.233222961 -2.500000000 --35.981101990 32.981101990 -2.500000000 --35.622871399 33.622871399 -2.500000000 -34.622871399 33.622871399 -2.500000000 -34.981101990 32.981101990 -2.500000000 --35.167964935 34.167964935 -2.500000000 --34.622871399 34.622871399 -2.500000000 -33.622871399 34.622871399 -2.500000000 -34.167964935 34.167964935 -2.500000000 --33.981101990 34.981101990 -2.500000000 --33.233222961 35.233222961 -2.500000000 --32.384422302 35.384418488 -2.500000000 --31.458978653 35.458976746 -2.500000000 --30.488407135 35.488403320 -2.500000000 --29.497472763 35.497467041 -2.500000000 --28.499593735 35.499591827 -2.500000000 --27.499954224 35.499954224 -2.500000000 --26.499996185 35.499996185 -2.500000000 --25.500000000 35.500000000 -2.500000000 --24.500000000 35.500000000 -2.500000000 --23.500000000 35.500000000 -2.500000000 --22.500000000 35.500000000 -2.500000000 --21.500000000 35.500000000 -2.500000000 --20.500000000 35.500000000 -2.500000000 --19.500000000 35.500000000 -2.500000000 --18.500000000 35.500000000 -2.500000000 --17.500000000 35.500000000 -2.500000000 --16.500000000 35.500000000 -2.500000000 --15.500000000 35.500000000 -2.500000000 --14.500000000 35.500000000 -2.500000000 --13.500000000 35.500000000 -2.500000000 --12.500000000 35.500000000 -2.500000000 --11.500000000 35.500000000 -2.500000000 --10.500000000 35.500000000 -2.500000000 --9.500000000 35.500000000 -2.500000000 --8.500000000 35.500000000 -2.500000000 --7.500000000 35.500000000 -2.500000000 --6.500000000 35.500000000 -2.500000000 --5.500000000 35.500000000 -2.500000000 --4.500000000 35.500000000 -2.500000000 --3.500000000 35.500000000 -2.500000000 --2.500000000 35.500000000 -2.500000000 --1.500000000 35.500000000 -2.500000000 --0.500000000 35.500000000 -2.500000000 -0.500000000 35.500000000 -2.500000000 -1.500000000 35.500000000 -2.500000000 -2.500000000 35.500000000 -2.500000000 -3.500000000 35.500000000 -2.500000000 -4.500000000 35.500000000 -2.500000000 -5.500000000 35.500000000 -2.500000000 -6.500000000 35.500000000 -2.500000000 -7.500000000 35.500000000 -2.500000000 -8.500000000 35.500000000 -2.500000000 -9.500000000 35.500000000 -2.500000000 -10.500000000 35.500000000 -2.500000000 -11.500000000 35.500000000 -2.500000000 -12.500000000 35.500000000 -2.500000000 -13.500000000 35.500000000 -2.500000000 -14.500000000 35.500000000 -2.500000000 -15.500000000 35.500000000 -2.500000000 -16.500000000 35.500000000 -2.500000000 -17.500000000 35.500000000 -2.500000000 -18.500000000 35.500000000 -2.500000000 -19.500000000 35.500000000 -2.500000000 -20.500000000 35.500000000 -2.500000000 -21.500000000 35.500000000 -2.500000000 -22.500000000 35.500000000 -2.500000000 -23.500000000 35.500000000 -2.500000000 -24.500000000 35.500000000 -2.500000000 -25.499996185 35.499996185 -2.500000000 -26.499954224 35.499954224 -2.500000000 -27.499591827 35.499591827 -2.500000000 -28.497470856 35.497474670 -2.500000000 -29.488407135 35.488403320 -2.500000000 -30.458978653 35.458980560 -2.500000000 -31.384418488 35.384422302 -2.500000000 -32.233222961 35.233222961 -2.500000000 -32.981101990 34.981101990 -2.500000000 --33.981101990 -35.981101990 -1.500000000 --33.233226776 -36.233222961 -1.500000000 --32.384422302 -36.384418488 -1.500000000 --31.458978653 -36.458980560 -1.500000000 --30.488407135 -36.488403320 -1.500000000 --29.497472763 -36.497474670 -1.500000000 --28.499593735 -36.499591827 -1.500000000 --27.499954224 -36.499954224 -1.500000000 --26.499996185 -36.499996185 -1.500000000 --25.500000000 -36.500000000 -1.500000000 --24.500000000 -36.500000000 -1.500000000 --23.500000000 -36.500000000 -1.500000000 --22.500000000 -36.500000000 -1.500000000 --21.500000000 -36.500000000 -1.500000000 --20.500000000 -36.500000000 -1.500000000 --19.500000000 -36.500000000 -1.500000000 --18.500000000 -36.500000000 -1.500000000 --17.500000000 -36.500000000 -1.500000000 --16.500000000 -36.500000000 -1.500000000 --15.500000000 -36.500000000 -1.500000000 --14.500000000 -36.500000000 -1.500000000 --13.500000000 -36.500000000 -1.500000000 --12.500000000 -36.500000000 -1.500000000 --11.500000000 -36.500000000 -1.500000000 --10.500000000 -36.500000000 -1.500000000 --9.500000000 -36.500000000 -1.500000000 --8.500000000 -36.500000000 -1.500000000 --7.500000000 -36.500000000 -1.500000000 --6.500000000 -36.500000000 -1.500000000 --5.500000000 -36.500000000 -1.500000000 --4.500000000 -36.500000000 -1.500000000 --3.500000000 -36.500000000 -1.500000000 --2.500000000 -36.500000000 -1.500000000 --1.500000000 -36.500000000 -1.500000000 --0.500000000 -36.500000000 -1.500000000 -0.500000000 -36.500000000 -1.500000000 -1.500000000 -36.500000000 -1.500000000 -2.500000000 -36.500000000 -1.500000000 -3.500000000 -36.500000000 -1.500000000 -4.500000000 -36.500000000 -1.500000000 -5.500000000 -36.500000000 -1.500000000 -6.500000000 -36.500000000 -1.500000000 -7.500000000 -36.500000000 -1.500000000 -8.500000000 -36.500000000 -1.500000000 -9.500000000 -36.500000000 -1.500000000 -10.500000000 -36.500000000 -1.500000000 -11.500000000 -36.500000000 -1.500000000 -12.500000000 -36.500000000 -1.500000000 -13.500000000 -36.500000000 -1.500000000 -14.500000000 -36.500000000 -1.500000000 -15.500000000 -36.500000000 -1.500000000 -16.500000000 -36.500000000 -1.500000000 -17.500000000 -36.500000000 -1.500000000 -18.500000000 -36.500000000 -1.500000000 -19.500000000 -36.500000000 -1.500000000 -20.500000000 -36.500000000 -1.500000000 -21.500000000 -36.500000000 -1.500000000 -22.500000000 -36.500000000 -1.500000000 -23.500000000 -36.500000000 -1.500000000 -24.500000000 -36.500000000 -1.500000000 -25.499996185 -36.499996185 -1.500000000 -26.499954224 -36.499954224 -1.500000000 -27.499591827 -36.499591827 -1.500000000 -28.497470856 -36.497467041 -1.500000000 -29.488407135 -36.488403320 -1.500000000 -30.458978653 -36.458980560 -1.500000000 -31.384418488 -36.384422302 -1.500000000 -32.233222961 -36.233222961 -1.500000000 -32.981101990 -35.981101990 -1.500000000 --35.167964935 -35.167964935 -1.500000000 --34.622871399 -35.622871399 -1.500000000 -33.622871399 -35.622871399 -1.500000000 -34.167964935 -35.167964935 -1.500000000 --35.981101990 -33.981101990 -1.500000000 --35.622871399 -34.622871399 -1.500000000 -34.622871399 -34.622871399 -1.500000000 -34.981101990 -33.981101990 -1.500000000 --36.233222961 -33.233222961 -1.500000000 -35.233222961 -33.233226776 -1.500000000 --36.384418488 -32.384422302 -1.500000000 -35.384418488 -32.384422302 -1.500000000 --36.458976746 -31.458978653 -1.500000000 -35.458980560 -31.458978653 -1.500000000 --36.488403320 -30.488407135 -1.500000000 -35.488403320 -30.488407135 -1.500000000 --36.497467041 -29.497472763 -1.500000000 -35.497474670 -29.497472763 -1.500000000 --36.499591827 -28.499593735 -1.500000000 -35.499591827 -28.499593735 -1.500000000 --36.499954224 -27.499954224 -1.500000000 -35.499954224 -27.499954224 -1.500000000 --36.499996185 -26.499996185 -1.500000000 -35.499996185 -26.499996185 -1.500000000 --36.500000000 -25.500000000 -1.500000000 -35.500000000 -25.500000000 -1.500000000 --36.500000000 -24.500000000 -1.500000000 -35.500000000 -24.500000000 -1.500000000 --36.500000000 -23.500000000 -1.500000000 -35.500000000 -23.500000000 -1.500000000 --36.500000000 -22.500000000 -1.500000000 -35.500000000 -22.500000000 -1.500000000 --36.500000000 -21.500000000 -1.500000000 -35.500000000 -21.500000000 -1.500000000 --36.500000000 -20.500000000 -1.500000000 -35.500000000 -20.500000000 -1.500000000 --36.500000000 -19.500000000 -1.500000000 -35.500000000 -19.500000000 -1.500000000 --36.500000000 -18.500000000 -1.500000000 -35.500000000 -18.500000000 -1.500000000 --36.500000000 -17.500000000 -1.500000000 -35.500000000 -17.500000000 -1.500000000 --36.500000000 -16.500000000 -1.500000000 -35.500000000 -16.500000000 -1.500000000 --36.500000000 -15.500000000 -1.500000000 -35.500000000 -15.500000000 -1.500000000 --36.500000000 -14.500000000 -1.500000000 -35.500000000 -14.500000000 -1.500000000 --36.500000000 -13.500000000 -1.500000000 -35.500000000 -13.500000000 -1.500000000 --36.500000000 -12.500000000 -1.500000000 -35.500000000 -12.500000000 -1.500000000 --36.500000000 -11.500000000 -1.500000000 -35.500000000 -11.500000000 -1.500000000 --36.500000000 -10.500000000 -1.500000000 -35.500000000 -10.500000000 -1.500000000 --36.500000000 -9.500000000 -1.500000000 -35.500000000 -9.500000000 -1.500000000 --36.500000000 -8.500000000 -1.500000000 -35.500000000 -8.500000000 -1.500000000 --36.500000000 -7.500000000 -1.500000000 -35.500000000 -7.500000000 -1.500000000 --36.500000000 -6.500000000 -1.500000000 -35.500000000 -6.500000000 -1.500000000 --36.500000000 -5.500000000 -1.500000000 -35.500000000 -5.500000000 -1.500000000 --36.500000000 -4.500000000 -1.500000000 -35.500000000 -4.500000000 -1.500000000 --36.500000000 -3.500000000 -1.500000000 -35.500000000 -3.500000000 -1.500000000 --36.500000000 -2.500000000 -1.500000000 -35.500000000 -2.500000000 -1.500000000 --36.500000000 -1.500000000 -1.500000000 -35.500000000 -1.500000000 -1.500000000 --36.500000000 -0.500000000 -1.500000000 -35.500000000 -0.500000000 -1.500000000 --36.500000000 0.500000000 -1.500000000 -35.500000000 0.500000000 -1.500000000 --36.500000000 1.500000000 -1.500000000 -35.500000000 1.500000000 -1.500000000 --36.500000000 2.500000000 -1.500000000 -35.500000000 2.500000000 -1.500000000 --36.500000000 3.500000000 -1.500000000 -35.500000000 3.500000000 -1.500000000 --36.500000000 4.500000000 -1.500000000 -35.500000000 4.500000000 -1.500000000 --36.500000000 5.500000000 -1.500000000 -35.500000000 5.500000000 -1.500000000 --36.500000000 6.500000000 -1.500000000 -35.500000000 6.500000000 -1.500000000 --36.500000000 7.500000000 -1.500000000 -35.500000000 7.500000000 -1.500000000 --36.500000000 8.500000000 -1.500000000 -35.500000000 8.500000000 -1.500000000 --36.500000000 9.500000000 -1.500000000 -35.500000000 9.500000000 -1.500000000 --36.500000000 10.500000000 -1.500000000 -35.500000000 10.500000000 -1.500000000 --36.500000000 11.500000000 -1.500000000 -35.500000000 11.500000000 -1.500000000 --36.500000000 12.500000000 -1.500000000 -35.500000000 12.500000000 -1.500000000 --36.500000000 13.500000000 -1.500000000 -35.500000000 13.500000000 -1.500000000 --36.500000000 14.500000000 -1.500000000 -35.500000000 14.500000000 -1.500000000 --36.500000000 15.500000000 -1.500000000 -35.500000000 15.500000000 -1.500000000 --36.500000000 16.500000000 -1.500000000 -35.500000000 16.500000000 -1.500000000 --36.500000000 17.500000000 -1.500000000 -35.500000000 17.500000000 -1.500000000 --36.500000000 18.500000000 -1.500000000 -35.500000000 18.500000000 -1.500000000 --36.500000000 19.500000000 -1.500000000 -35.500000000 19.500000000 -1.500000000 --36.500000000 20.500000000 -1.500000000 -35.500000000 20.500000000 -1.500000000 --36.500000000 21.500000000 -1.500000000 -35.500000000 21.500000000 -1.500000000 --36.500000000 22.500000000 -1.500000000 -35.500000000 22.500000000 -1.500000000 --36.500000000 23.500000000 -1.500000000 -35.500000000 23.500000000 -1.500000000 --36.500000000 24.500000000 -1.500000000 -35.500000000 24.500000000 -1.500000000 --36.499996185 25.499996185 -1.500000000 -35.499996185 25.499996185 -1.500000000 --36.499954224 26.499954224 -1.500000000 -35.499954224 26.499954224 -1.500000000 --36.499591827 27.499591827 -1.500000000 -35.499591827 27.499591827 -1.500000000 --36.497474670 28.497470856 -1.500000000 -35.497467041 28.497470856 -1.500000000 --36.488403320 29.488407135 -1.500000000 -35.488403320 29.488407135 -1.500000000 --36.458980560 30.458978653 -1.500000000 -35.458980560 30.458978653 -1.500000000 --36.384422302 31.384418488 -1.500000000 -35.384422302 31.384418488 -1.500000000 --36.233222961 32.233222961 -1.500000000 -35.233222961 32.233222961 -1.500000000 --35.981101990 32.981101990 -1.500000000 --35.622871399 33.622871399 -1.500000000 -34.622871399 33.622871399 -1.500000000 -34.981101990 32.981101990 -1.500000000 --35.167964935 34.167964935 -1.500000000 --34.622871399 34.622871399 -1.500000000 -33.622871399 34.622871399 -1.500000000 -34.167964935 34.167964935 -1.500000000 --33.981101990 34.981101990 -1.500000000 --33.233222961 35.233222961 -1.500000000 --32.384422302 35.384418488 -1.500000000 --31.458978653 35.458976746 -1.500000000 --30.488407135 35.488403320 -1.500000000 --29.497472763 35.497467041 -1.500000000 --28.499593735 35.499591827 -1.500000000 --27.499954224 35.499954224 -1.500000000 --26.499996185 35.499996185 -1.500000000 --25.500000000 35.500000000 -1.500000000 --24.500000000 35.500000000 -1.500000000 --23.500000000 35.500000000 -1.500000000 --22.500000000 35.500000000 -1.500000000 --21.500000000 35.500000000 -1.500000000 --20.500000000 35.500000000 -1.500000000 --19.500000000 35.500000000 -1.500000000 --18.500000000 35.500000000 -1.500000000 --17.500000000 35.500000000 -1.500000000 --16.500000000 35.500000000 -1.500000000 --15.500000000 35.500000000 -1.500000000 --14.500000000 35.500000000 -1.500000000 --13.500000000 35.500000000 -1.500000000 --12.500000000 35.500000000 -1.500000000 --11.500000000 35.500000000 -1.500000000 --10.500000000 35.500000000 -1.500000000 --9.500000000 35.500000000 -1.500000000 --8.500000000 35.500000000 -1.500000000 --7.500000000 35.500000000 -1.500000000 --6.500000000 35.500000000 -1.500000000 --5.500000000 35.500000000 -1.500000000 --4.500000000 35.500000000 -1.500000000 --3.500000000 35.500000000 -1.500000000 --2.500000000 35.500000000 -1.500000000 --1.500000000 35.500000000 -1.500000000 --0.500000000 35.500000000 -1.500000000 -0.500000000 35.500000000 -1.500000000 -1.500000000 35.500000000 -1.500000000 -2.500000000 35.500000000 -1.500000000 -3.500000000 35.500000000 -1.500000000 -4.500000000 35.500000000 -1.500000000 -5.500000000 35.500000000 -1.500000000 -6.500000000 35.500000000 -1.500000000 -7.500000000 35.500000000 -1.500000000 -8.500000000 35.500000000 -1.500000000 -9.500000000 35.500000000 -1.500000000 -10.500000000 35.500000000 -1.500000000 -11.500000000 35.500000000 -1.500000000 -12.500000000 35.500000000 -1.500000000 -13.500000000 35.500000000 -1.500000000 -14.500000000 35.500000000 -1.500000000 -15.500000000 35.500000000 -1.500000000 -16.500000000 35.500000000 -1.500000000 -17.500000000 35.500000000 -1.500000000 -18.500000000 35.500000000 -1.500000000 -19.500000000 35.500000000 -1.500000000 -20.500000000 35.500000000 -1.500000000 -21.500000000 35.500000000 -1.500000000 -22.500000000 35.500000000 -1.500000000 -23.500000000 35.500000000 -1.500000000 -24.500000000 35.500000000 -1.500000000 -25.499996185 35.499996185 -1.500000000 -26.499954224 35.499954224 -1.500000000 -27.499591827 35.499591827 -1.500000000 -28.497470856 35.497474670 -1.500000000 -29.488407135 35.488403320 -1.500000000 -30.458978653 35.458980560 -1.500000000 -31.384418488 35.384422302 -1.500000000 -32.233222961 35.233222961 -1.500000000 -32.981101990 34.981101990 -1.500000000 --33.981101990 -35.981101990 -0.500000000 --33.233226776 -36.233222961 -0.500000000 --32.384422302 -36.384418488 -0.500000000 --31.458978653 -36.458980560 -0.500000000 --30.488407135 -36.488403320 -0.500000000 --29.497472763 -36.497474670 -0.500000000 --28.499593735 -36.499591827 -0.500000000 --27.499954224 -36.499954224 -0.500000000 --26.499996185 -36.499996185 -0.500000000 --25.500000000 -36.500000000 -0.500000000 --24.500000000 -36.500000000 -0.500000000 --23.500000000 -36.500000000 -0.500000000 --22.500000000 -36.500000000 -0.500000000 --21.500000000 -36.500000000 -0.500000000 --20.500000000 -36.500000000 -0.500000000 --19.500000000 -36.500000000 -0.500000000 --18.500000000 -36.500000000 -0.500000000 --17.500000000 -36.500000000 -0.500000000 --16.500000000 -36.500000000 -0.500000000 --15.500000000 -36.500000000 -0.500000000 --14.500000000 -36.500000000 -0.500000000 --13.500000000 -36.500000000 -0.500000000 --12.500000000 -36.500000000 -0.500000000 --11.500000000 -36.500000000 -0.500000000 --10.500000000 -36.500000000 -0.500000000 --9.500000000 -36.500000000 -0.500000000 --8.500000000 -36.500000000 -0.500000000 --7.500000000 -36.500000000 -0.500000000 --6.500000000 -36.500000000 -0.500000000 --5.500000000 -36.500000000 -0.500000000 --4.500000000 -36.500000000 -0.500000000 --3.500000000 -36.500000000 -0.500000000 --2.500000000 -36.500000000 -0.500000000 --1.500000000 -36.500000000 -0.500000000 --0.500000000 -36.500000000 -0.500000000 -0.500000000 -36.500000000 -0.500000000 -1.500000000 -36.500000000 -0.500000000 -2.500000000 -36.500000000 -0.500000000 -3.500000000 -36.500000000 -0.500000000 -4.500000000 -36.500000000 -0.500000000 -5.500000000 -36.500000000 -0.500000000 -6.500000000 -36.500000000 -0.500000000 -7.500000000 -36.500000000 -0.500000000 -8.500000000 -36.500000000 -0.500000000 -9.500000000 -36.500000000 -0.500000000 -10.500000000 -36.500000000 -0.500000000 -11.500000000 -36.500000000 -0.500000000 -12.500000000 -36.500000000 -0.500000000 -13.500000000 -36.500000000 -0.500000000 -14.500000000 -36.500000000 -0.500000000 -15.500000000 -36.500000000 -0.500000000 -16.500000000 -36.500000000 -0.500000000 -17.500000000 -36.500000000 -0.500000000 -18.500000000 -36.500000000 -0.500000000 -19.500000000 -36.500000000 -0.500000000 -20.500000000 -36.500000000 -0.500000000 -21.500000000 -36.500000000 -0.500000000 -22.500000000 -36.500000000 -0.500000000 -23.500000000 -36.500000000 -0.500000000 -24.500000000 -36.500000000 -0.500000000 -25.499996185 -36.499996185 -0.500000000 -26.499954224 -36.499954224 -0.500000000 -27.499591827 -36.499591827 -0.500000000 -28.497470856 -36.497467041 -0.500000000 -29.488407135 -36.488403320 -0.500000000 -30.458978653 -36.458980560 -0.500000000 -31.384418488 -36.384422302 -0.500000000 -32.233222961 -36.233222961 -0.500000000 -32.981101990 -35.981101990 -0.500000000 --35.167964935 -35.167964935 -0.500000000 --34.622871399 -35.622871399 -0.500000000 -33.622871399 -35.622871399 -0.500000000 -34.167964935 -35.167964935 -0.500000000 --35.981101990 -33.981101990 -0.500000000 --35.622871399 -34.622871399 -0.500000000 -34.622871399 -34.622871399 -0.500000000 -34.981101990 -33.981101990 -0.500000000 --36.233222961 -33.233222961 -0.500000000 -35.233222961 -33.233226776 -0.500000000 --36.384418488 -32.384422302 -0.500000000 -35.384418488 -32.384422302 -0.500000000 --36.458976746 -31.458978653 -0.500000000 -35.458980560 -31.458978653 -0.500000000 --36.488403320 -30.488407135 -0.500000000 -35.488403320 -30.488407135 -0.500000000 --36.497467041 -29.497472763 -0.500000000 -35.497474670 -29.497472763 -0.500000000 --36.499591827 -28.499593735 -0.500000000 -35.499591827 -28.499593735 -0.500000000 --36.499954224 -27.499954224 -0.500000000 -35.499954224 -27.499954224 -0.500000000 --36.499996185 -26.499996185 -0.500000000 -35.499996185 -26.499996185 -0.500000000 --36.500000000 -25.500000000 -0.500000000 -35.500000000 -25.500000000 -0.500000000 --36.500000000 -24.500000000 -0.500000000 -35.500000000 -24.500000000 -0.500000000 --36.500000000 -23.500000000 -0.500000000 -35.500000000 -23.500000000 -0.500000000 --36.500000000 -22.500000000 -0.500000000 -35.500000000 -22.500000000 -0.500000000 --36.500000000 -21.500000000 -0.500000000 -35.500000000 -21.500000000 -0.500000000 --36.500000000 -20.500000000 -0.500000000 -35.500000000 -20.500000000 -0.500000000 --36.500000000 -19.500000000 -0.500000000 -35.500000000 -19.500000000 -0.500000000 --36.500000000 -18.500000000 -0.500000000 -35.500000000 -18.500000000 -0.500000000 --36.500000000 -17.500000000 -0.500000000 -35.500000000 -17.500000000 -0.500000000 --36.500000000 -16.500000000 -0.500000000 -35.500000000 -16.500000000 -0.500000000 --36.500000000 -15.500000000 -0.500000000 -35.500000000 -15.500000000 -0.500000000 --36.500000000 -14.500000000 -0.500000000 -35.500000000 -14.500000000 -0.500000000 --36.500000000 -13.500000000 -0.500000000 -35.500000000 -13.500000000 -0.500000000 --36.500000000 -12.500000000 -0.500000000 -35.500000000 -12.500000000 -0.500000000 --36.500000000 -11.500000000 -0.500000000 -35.500000000 -11.500000000 -0.500000000 --36.500000000 -10.500000000 -0.500000000 -35.500000000 -10.500000000 -0.500000000 --36.500000000 -9.500000000 -0.500000000 -35.500000000 -9.500000000 -0.500000000 --36.500000000 -8.500000000 -0.500000000 -35.500000000 -8.500000000 -0.500000000 --36.500000000 -7.500000000 -0.500000000 -35.500000000 -7.500000000 -0.500000000 --36.500000000 -6.500000000 -0.500000000 -35.500000000 -6.500000000 -0.500000000 --36.500000000 -5.500000000 -0.500000000 -35.500000000 -5.500000000 -0.500000000 --36.500000000 -4.500000000 -0.500000000 -35.500000000 -4.500000000 -0.500000000 --36.500000000 -3.500000000 -0.500000000 -35.500000000 -3.500000000 -0.500000000 --36.500000000 -2.500000000 -0.500000000 -35.500000000 -2.500000000 -0.500000000 --36.500000000 -1.500000000 -0.500000000 -35.500000000 -1.500000000 -0.500000000 --36.500000000 -0.500000000 -0.500000000 -35.500000000 -0.500000000 -0.500000000 --36.500000000 0.500000000 -0.500000000 -35.500000000 0.500000000 -0.500000000 --36.500000000 1.500000000 -0.500000000 -35.500000000 1.500000000 -0.500000000 --36.500000000 2.500000000 -0.500000000 -35.500000000 2.500000000 -0.500000000 --36.500000000 3.500000000 -0.500000000 -35.500000000 3.500000000 -0.500000000 --36.500000000 4.500000000 -0.500000000 -35.500000000 4.500000000 -0.500000000 --36.500000000 5.500000000 -0.500000000 -35.500000000 5.500000000 -0.500000000 --36.500000000 6.500000000 -0.500000000 -35.500000000 6.500000000 -0.500000000 --36.500000000 7.500000000 -0.500000000 -35.500000000 7.500000000 -0.500000000 --36.500000000 8.500000000 -0.500000000 -35.500000000 8.500000000 -0.500000000 --36.500000000 9.500000000 -0.500000000 -35.500000000 9.500000000 -0.500000000 --36.500000000 10.500000000 -0.500000000 -35.500000000 10.500000000 -0.500000000 --36.500000000 11.500000000 -0.500000000 -35.500000000 11.500000000 -0.500000000 --36.500000000 12.500000000 -0.500000000 -35.500000000 12.500000000 -0.500000000 --36.500000000 13.500000000 -0.500000000 -35.500000000 13.500000000 -0.500000000 --36.500000000 14.500000000 -0.500000000 -35.500000000 14.500000000 -0.500000000 --36.500000000 15.500000000 -0.500000000 -35.500000000 15.500000000 -0.500000000 --36.500000000 16.500000000 -0.500000000 -35.500000000 16.500000000 -0.500000000 --36.500000000 17.500000000 -0.500000000 -35.500000000 17.500000000 -0.500000000 --36.500000000 18.500000000 -0.500000000 -35.500000000 18.500000000 -0.500000000 --36.500000000 19.500000000 -0.500000000 -35.500000000 19.500000000 -0.500000000 --36.500000000 20.500000000 -0.500000000 -35.500000000 20.500000000 -0.500000000 --36.500000000 21.500000000 -0.500000000 -35.500000000 21.500000000 -0.500000000 --36.500000000 22.500000000 -0.500000000 -35.500000000 22.500000000 -0.500000000 --36.500000000 23.500000000 -0.500000000 -35.500000000 23.500000000 -0.500000000 --36.500000000 24.500000000 -0.500000000 -35.500000000 24.500000000 -0.500000000 --36.499996185 25.499996185 -0.500000000 -35.499996185 25.499996185 -0.500000000 --36.499954224 26.499954224 -0.500000000 -35.499954224 26.499954224 -0.500000000 --36.499591827 27.499591827 -0.500000000 -35.499591827 27.499591827 -0.500000000 --36.497474670 28.497470856 -0.500000000 -35.497467041 28.497470856 -0.500000000 --36.488403320 29.488407135 -0.500000000 -35.488403320 29.488407135 -0.500000000 --36.458980560 30.458978653 -0.500000000 -35.458980560 30.458978653 -0.500000000 --36.384422302 31.384418488 -0.500000000 -35.384422302 31.384418488 -0.500000000 --36.233222961 32.233222961 -0.500000000 -35.233222961 32.233222961 -0.500000000 --35.981101990 32.981101990 -0.500000000 --35.622871399 33.622871399 -0.500000000 -34.622871399 33.622871399 -0.500000000 -34.981101990 32.981101990 -0.500000000 --35.167964935 34.167964935 -0.500000000 --34.622871399 34.622871399 -0.500000000 -33.622871399 34.622871399 -0.500000000 -34.167964935 34.167964935 -0.500000000 --33.981101990 34.981101990 -0.500000000 --33.233222961 35.233222961 -0.500000000 --32.384422302 35.384418488 -0.500000000 --31.458978653 35.458976746 -0.500000000 --30.488407135 35.488403320 -0.500000000 --29.497472763 35.497467041 -0.500000000 --28.499593735 35.499591827 -0.500000000 --27.499954224 35.499954224 -0.500000000 --26.499996185 35.499996185 -0.500000000 --25.500000000 35.500000000 -0.500000000 --24.500000000 35.500000000 -0.500000000 --23.500000000 35.500000000 -0.500000000 --22.500000000 35.500000000 -0.500000000 --21.500000000 35.500000000 -0.500000000 --20.500000000 35.500000000 -0.500000000 --19.500000000 35.500000000 -0.500000000 --18.500000000 35.500000000 -0.500000000 --17.500000000 35.500000000 -0.500000000 --16.500000000 35.500000000 -0.500000000 --15.500000000 35.500000000 -0.500000000 --14.500000000 35.500000000 -0.500000000 --13.500000000 35.500000000 -0.500000000 --12.500000000 35.500000000 -0.500000000 --11.500000000 35.500000000 -0.500000000 --10.500000000 35.500000000 -0.500000000 --9.500000000 35.500000000 -0.500000000 --8.500000000 35.500000000 -0.500000000 --7.500000000 35.500000000 -0.500000000 --6.500000000 35.500000000 -0.500000000 --5.500000000 35.500000000 -0.500000000 --4.500000000 35.500000000 -0.500000000 --3.500000000 35.500000000 -0.500000000 --2.500000000 35.500000000 -0.500000000 --1.500000000 35.500000000 -0.500000000 --0.500000000 35.500000000 -0.500000000 -0.500000000 35.500000000 -0.500000000 -1.500000000 35.500000000 -0.500000000 -2.500000000 35.500000000 -0.500000000 -3.500000000 35.500000000 -0.500000000 -4.500000000 35.500000000 -0.500000000 -5.500000000 35.500000000 -0.500000000 -6.500000000 35.500000000 -0.500000000 -7.500000000 35.500000000 -0.500000000 -8.500000000 35.500000000 -0.500000000 -9.500000000 35.500000000 -0.500000000 -10.500000000 35.500000000 -0.500000000 -11.500000000 35.500000000 -0.500000000 -12.500000000 35.500000000 -0.500000000 -13.500000000 35.500000000 -0.500000000 -14.500000000 35.500000000 -0.500000000 -15.500000000 35.500000000 -0.500000000 -16.500000000 35.500000000 -0.500000000 -17.500000000 35.500000000 -0.500000000 -18.500000000 35.500000000 -0.500000000 -19.500000000 35.500000000 -0.500000000 -20.500000000 35.500000000 -0.500000000 -21.500000000 35.500000000 -0.500000000 -22.500000000 35.500000000 -0.500000000 -23.500000000 35.500000000 -0.500000000 -24.500000000 35.500000000 -0.500000000 -25.499996185 35.499996185 -0.500000000 -26.499954224 35.499954224 -0.500000000 -27.499591827 35.499591827 -0.500000000 -28.497470856 35.497474670 -0.500000000 -29.488407135 35.488403320 -0.500000000 -30.458978653 35.458980560 -0.500000000 -31.384418488 35.384422302 -0.500000000 -32.233222961 35.233222961 -0.500000000 -32.981101990 34.981101990 -0.500000000 --33.981101990 -35.981101990 0.500000000 --33.233226776 -36.233222961 0.500000000 --32.384422302 -36.384418488 0.500000000 --31.458978653 -36.458980560 0.500000000 --30.488407135 -36.488403320 0.500000000 --29.497472763 -36.497474670 0.500000000 --28.499593735 -36.499591827 0.500000000 --27.499954224 -36.499954224 0.500000000 --26.499996185 -36.499996185 0.500000000 --25.500000000 -36.500000000 0.500000000 --24.500000000 -36.500000000 0.500000000 --23.500000000 -36.500000000 0.500000000 --22.500000000 -36.500000000 0.500000000 --21.500000000 -36.500000000 0.500000000 --20.500000000 -36.500000000 0.500000000 --19.500000000 -36.500000000 0.500000000 --18.500000000 -36.500000000 0.500000000 --17.500000000 -36.500000000 0.500000000 --16.500000000 -36.500000000 0.500000000 --15.500000000 -36.500000000 0.500000000 --14.500000000 -36.500000000 0.500000000 --13.500000000 -36.500000000 0.500000000 --12.500000000 -36.500000000 0.500000000 --11.500000000 -36.500000000 0.500000000 --10.500000000 -36.500000000 0.500000000 --9.500000000 -36.500000000 0.500000000 --8.500000000 -36.500000000 0.500000000 --7.500000000 -36.500000000 0.500000000 --6.500000000 -36.500000000 0.500000000 --5.500000000 -36.500000000 0.500000000 --4.500000000 -36.500000000 0.500000000 --3.500000000 -36.500000000 0.500000000 --2.500000000 -36.500000000 0.500000000 --1.500000000 -36.500000000 0.500000000 --0.500000000 -36.500000000 0.500000000 -0.500000000 -36.500000000 0.500000000 -1.500000000 -36.500000000 0.500000000 -2.500000000 -36.500000000 0.500000000 -3.500000000 -36.500000000 0.500000000 -4.500000000 -36.500000000 0.500000000 -5.500000000 -36.500000000 0.500000000 -6.500000000 -36.500000000 0.500000000 -7.500000000 -36.500000000 0.500000000 -8.500000000 -36.500000000 0.500000000 -9.500000000 -36.500000000 0.500000000 -10.500000000 -36.500000000 0.500000000 -11.500000000 -36.500000000 0.500000000 -12.500000000 -36.500000000 0.500000000 -13.500000000 -36.500000000 0.500000000 -14.500000000 -36.500000000 0.500000000 -15.500000000 -36.500000000 0.500000000 -16.500000000 -36.500000000 0.500000000 -17.500000000 -36.500000000 0.500000000 -18.500000000 -36.500000000 0.500000000 -19.500000000 -36.500000000 0.500000000 -20.500000000 -36.500000000 0.500000000 -21.500000000 -36.500000000 0.500000000 -22.500000000 -36.500000000 0.500000000 -23.500000000 -36.500000000 0.500000000 -24.500000000 -36.500000000 0.500000000 -25.499996185 -36.499996185 0.500000000 -26.499954224 -36.499954224 0.500000000 -27.499591827 -36.499591827 0.500000000 -28.497470856 -36.497467041 0.500000000 -29.488407135 -36.488403320 0.500000000 -30.458978653 -36.458980560 0.500000000 -31.384418488 -36.384422302 0.500000000 -32.233222961 -36.233222961 0.500000000 -32.981101990 -35.981101990 0.500000000 --35.167964935 -35.167964935 0.500000000 --34.622871399 -35.622871399 0.500000000 -33.622871399 -35.622871399 0.500000000 -34.167964935 -35.167964935 0.500000000 --35.981101990 -33.981101990 0.500000000 --35.622871399 -34.622871399 0.500000000 -34.622871399 -34.622871399 0.500000000 -34.981101990 -33.981101990 0.500000000 --36.233222961 -33.233222961 0.500000000 -35.233222961 -33.233226776 0.500000000 --36.384418488 -32.384422302 0.500000000 -35.384418488 -32.384422302 0.500000000 --36.458976746 -31.458978653 0.500000000 -35.458980560 -31.458978653 0.500000000 --36.488403320 -30.488407135 0.500000000 -35.488403320 -30.488407135 0.500000000 --36.497467041 -29.497472763 0.500000000 -35.497474670 -29.497472763 0.500000000 --36.499591827 -28.499593735 0.500000000 -35.499591827 -28.499593735 0.500000000 --36.499954224 -27.499954224 0.500000000 -35.499954224 -27.499954224 0.500000000 --36.499996185 -26.499996185 0.500000000 -35.499996185 -26.499996185 0.500000000 --36.500000000 -25.500000000 0.500000000 -35.500000000 -25.500000000 0.500000000 --36.500000000 -24.500000000 0.500000000 -35.500000000 -24.500000000 0.500000000 --36.500000000 -23.500000000 0.500000000 -35.500000000 -23.500000000 0.500000000 --36.500000000 -22.500000000 0.500000000 -35.500000000 -22.500000000 0.500000000 --36.500000000 -21.500000000 0.500000000 -35.500000000 -21.500000000 0.500000000 --36.500000000 -20.500000000 0.500000000 -35.500000000 -20.500000000 0.500000000 --36.500000000 -19.500000000 0.500000000 -35.500000000 -19.500000000 0.500000000 --36.500000000 -18.500000000 0.500000000 -35.500000000 -18.500000000 0.500000000 --36.500000000 -17.500000000 0.500000000 -35.500000000 -17.500000000 0.500000000 --36.500000000 -16.500000000 0.500000000 -35.500000000 -16.500000000 0.500000000 --36.500000000 -15.500000000 0.500000000 -35.500000000 -15.500000000 0.500000000 --36.500000000 -14.500000000 0.500000000 -35.500000000 -14.500000000 0.500000000 --36.500000000 -13.500000000 0.500000000 -35.500000000 -13.500000000 0.500000000 --36.500000000 -12.500000000 0.500000000 -35.500000000 -12.500000000 0.500000000 --36.500000000 -11.500000000 0.500000000 -35.500000000 -11.500000000 0.500000000 --36.500000000 -10.500000000 0.500000000 -35.500000000 -10.500000000 0.500000000 --36.500000000 -9.500000000 0.500000000 -35.500000000 -9.500000000 0.500000000 --36.500000000 -8.500000000 0.500000000 -35.500000000 -8.500000000 0.500000000 --36.500000000 -7.500000000 0.500000000 -35.500000000 -7.500000000 0.500000000 --36.500000000 -6.500000000 0.500000000 -35.500000000 -6.500000000 0.500000000 --36.500000000 -5.500000000 0.500000000 -35.500000000 -5.500000000 0.500000000 --36.500000000 -4.500000000 0.500000000 -35.500000000 -4.500000000 0.500000000 --36.500000000 -3.500000000 0.500000000 -35.500000000 -3.500000000 0.500000000 --36.500000000 -2.500000000 0.500000000 -35.500000000 -2.500000000 0.500000000 --36.500000000 -1.500000000 0.500000000 -35.500000000 -1.500000000 0.500000000 --36.500000000 -0.500000000 0.500000000 -35.500000000 -0.500000000 0.500000000 --36.500000000 0.500000000 0.500000000 -35.500000000 0.500000000 0.500000000 --36.500000000 1.500000000 0.500000000 -35.500000000 1.500000000 0.500000000 --36.500000000 2.500000000 0.500000000 -35.500000000 2.500000000 0.500000000 --36.500000000 3.500000000 0.500000000 -35.500000000 3.500000000 0.500000000 --36.500000000 4.500000000 0.500000000 -35.500000000 4.500000000 0.500000000 --36.500000000 5.500000000 0.500000000 -35.500000000 5.500000000 0.500000000 --36.500000000 6.500000000 0.500000000 -35.500000000 6.500000000 0.500000000 --36.500000000 7.500000000 0.500000000 -35.500000000 7.500000000 0.500000000 --36.500000000 8.500000000 0.500000000 -35.500000000 8.500000000 0.500000000 --36.500000000 9.500000000 0.500000000 -35.500000000 9.500000000 0.500000000 --36.500000000 10.500000000 0.500000000 -35.500000000 10.500000000 0.500000000 --36.500000000 11.500000000 0.500000000 -35.500000000 11.500000000 0.500000000 --36.500000000 12.500000000 0.500000000 -35.500000000 12.500000000 0.500000000 --36.500000000 13.500000000 0.500000000 -35.500000000 13.500000000 0.500000000 --36.500000000 14.500000000 0.500000000 -35.500000000 14.500000000 0.500000000 --36.500000000 15.500000000 0.500000000 -35.500000000 15.500000000 0.500000000 --36.500000000 16.500000000 0.500000000 -35.500000000 16.500000000 0.500000000 --36.500000000 17.500000000 0.500000000 -35.500000000 17.500000000 0.500000000 --36.500000000 18.500000000 0.500000000 -35.500000000 18.500000000 0.500000000 --36.500000000 19.500000000 0.500000000 -35.500000000 19.500000000 0.500000000 --36.500000000 20.500000000 0.500000000 -35.500000000 20.500000000 0.500000000 --36.500000000 21.500000000 0.500000000 -35.500000000 21.500000000 0.500000000 --36.500000000 22.500000000 0.500000000 -35.500000000 22.500000000 0.500000000 --36.500000000 23.500000000 0.500000000 -35.500000000 23.500000000 0.500000000 --36.500000000 24.500000000 0.500000000 -35.500000000 24.500000000 0.500000000 --36.499996185 25.499996185 0.500000000 -35.499996185 25.499996185 0.500000000 --36.499954224 26.499954224 0.500000000 -35.499954224 26.499954224 0.500000000 --36.499591827 27.499591827 0.500000000 -35.499591827 27.499591827 0.500000000 --36.497474670 28.497470856 0.500000000 -35.497467041 28.497470856 0.500000000 --36.488403320 29.488407135 0.500000000 -35.488403320 29.488407135 0.500000000 --36.458980560 30.458978653 0.500000000 -35.458980560 30.458978653 0.500000000 --36.384422302 31.384418488 0.500000000 -35.384422302 31.384418488 0.500000000 --36.233222961 32.233222961 0.500000000 -35.233222961 32.233222961 0.500000000 --35.981101990 32.981101990 0.500000000 --35.622871399 33.622871399 0.500000000 -34.622871399 33.622871399 0.500000000 -34.981101990 32.981101990 0.500000000 --35.167964935 34.167964935 0.500000000 --34.622871399 34.622871399 0.500000000 -33.622871399 34.622871399 0.500000000 -34.167964935 34.167964935 0.500000000 --33.981101990 34.981101990 0.500000000 --33.233222961 35.233222961 0.500000000 --32.384422302 35.384418488 0.500000000 --31.458978653 35.458976746 0.500000000 --30.488407135 35.488403320 0.500000000 --29.497472763 35.497467041 0.500000000 --28.499593735 35.499591827 0.500000000 --27.499954224 35.499954224 0.500000000 --26.499996185 35.499996185 0.500000000 --25.500000000 35.500000000 0.500000000 --24.500000000 35.500000000 0.500000000 --23.500000000 35.500000000 0.500000000 --22.500000000 35.500000000 0.500000000 --21.500000000 35.500000000 0.500000000 --20.500000000 35.500000000 0.500000000 --19.500000000 35.500000000 0.500000000 --18.500000000 35.500000000 0.500000000 --17.500000000 35.500000000 0.500000000 --16.500000000 35.500000000 0.500000000 --15.500000000 35.500000000 0.500000000 --14.500000000 35.500000000 0.500000000 --13.500000000 35.500000000 0.500000000 --12.500000000 35.500000000 0.500000000 --11.500000000 35.500000000 0.500000000 --10.500000000 35.500000000 0.500000000 --9.500000000 35.500000000 0.500000000 --8.500000000 35.500000000 0.500000000 --7.500000000 35.500000000 0.500000000 --6.500000000 35.500000000 0.500000000 --5.500000000 35.500000000 0.500000000 --4.500000000 35.500000000 0.500000000 --3.500000000 35.500000000 0.500000000 --2.500000000 35.500000000 0.500000000 --1.500000000 35.500000000 0.500000000 --0.500000000 35.500000000 0.500000000 -0.500000000 35.500000000 0.500000000 -1.500000000 35.500000000 0.500000000 -2.500000000 35.500000000 0.500000000 -3.500000000 35.500000000 0.500000000 -4.500000000 35.500000000 0.500000000 -5.500000000 35.500000000 0.500000000 -6.500000000 35.500000000 0.500000000 -7.500000000 35.500000000 0.500000000 -8.500000000 35.500000000 0.500000000 -9.500000000 35.500000000 0.500000000 -10.500000000 35.500000000 0.500000000 -11.500000000 35.500000000 0.500000000 -12.500000000 35.500000000 0.500000000 -13.500000000 35.500000000 0.500000000 -14.500000000 35.500000000 0.500000000 -15.500000000 35.500000000 0.500000000 -16.500000000 35.500000000 0.500000000 -17.500000000 35.500000000 0.500000000 -18.500000000 35.500000000 0.500000000 -19.500000000 35.500000000 0.500000000 -20.500000000 35.500000000 0.500000000 -21.500000000 35.500000000 0.500000000 -22.500000000 35.500000000 0.500000000 -23.500000000 35.500000000 0.500000000 -24.500000000 35.500000000 0.500000000 -25.499996185 35.499996185 0.500000000 -26.499954224 35.499954224 0.500000000 -27.499591827 35.499591827 0.500000000 -28.497470856 35.497474670 0.500000000 -29.488407135 35.488403320 0.500000000 -30.458978653 35.458980560 0.500000000 -31.384418488 35.384422302 0.500000000 -32.233222961 35.233222961 0.500000000 -32.981101990 34.981101990 0.500000000 --33.981101990 -35.981101990 1.500000000 --33.233226776 -36.233222961 1.500000000 --32.384422302 -36.384418488 1.500000000 --31.458978653 -36.458980560 1.500000000 --30.488407135 -36.488403320 1.500000000 --29.497472763 -36.497474670 1.500000000 --28.499593735 -36.499591827 1.500000000 --27.499954224 -36.499954224 1.500000000 --26.499996185 -36.499996185 1.500000000 --25.500000000 -36.500000000 1.500000000 --24.500000000 -36.500000000 1.500000000 --23.500000000 -36.500000000 1.500000000 --22.500000000 -36.500000000 1.500000000 --21.500000000 -36.500000000 1.500000000 --20.500000000 -36.500000000 1.500000000 --19.500000000 -36.500000000 1.500000000 --18.500000000 -36.500000000 1.500000000 --17.500000000 -36.500000000 1.500000000 --16.500000000 -36.500000000 1.500000000 --15.500000000 -36.500000000 1.500000000 --14.500000000 -36.500000000 1.500000000 --13.500000000 -36.500000000 1.500000000 --12.500000000 -36.500000000 1.500000000 --11.500000000 -36.500000000 1.500000000 --10.500000000 -36.500000000 1.500000000 --9.500000000 -36.500000000 1.500000000 --8.500000000 -36.500000000 1.500000000 --7.500000000 -36.500000000 1.500000000 --6.500000000 -36.500000000 1.500000000 --5.500000000 -36.500000000 1.500000000 --4.500000000 -36.500000000 1.500000000 --3.500000000 -36.500000000 1.500000000 --2.500000000 -36.500000000 1.500000000 --1.500000000 -36.500000000 1.500000000 --0.500000000 -36.500000000 1.500000000 -0.500000000 -36.500000000 1.500000000 -1.500000000 -36.500000000 1.500000000 -2.500000000 -36.500000000 1.500000000 -3.500000000 -36.500000000 1.500000000 -4.500000000 -36.500000000 1.500000000 -5.500000000 -36.500000000 1.500000000 -6.500000000 -36.500000000 1.500000000 -7.500000000 -36.500000000 1.500000000 -8.500000000 -36.500000000 1.500000000 -9.500000000 -36.500000000 1.500000000 -10.500000000 -36.500000000 1.500000000 -11.500000000 -36.500000000 1.500000000 -12.500000000 -36.500000000 1.500000000 -13.500000000 -36.500000000 1.500000000 -14.500000000 -36.500000000 1.500000000 -15.500000000 -36.500000000 1.500000000 -16.500000000 -36.500000000 1.500000000 -17.500000000 -36.500000000 1.500000000 -18.500000000 -36.500000000 1.500000000 -19.500000000 -36.500000000 1.500000000 -20.500000000 -36.500000000 1.500000000 -21.500000000 -36.500000000 1.500000000 -22.500000000 -36.500000000 1.500000000 -23.500000000 -36.500000000 1.500000000 -24.500000000 -36.500000000 1.500000000 -25.499996185 -36.499996185 1.500000000 -26.499954224 -36.499954224 1.500000000 -27.499591827 -36.499591827 1.500000000 -28.497470856 -36.497467041 1.500000000 -29.488407135 -36.488403320 1.500000000 -30.458978653 -36.458980560 1.500000000 -31.384418488 -36.384422302 1.500000000 -32.233222961 -36.233222961 1.500000000 -32.981101990 -35.981101990 1.500000000 --35.167964935 -35.167964935 1.500000000 --34.622871399 -35.622871399 1.500000000 -33.622871399 -35.622871399 1.500000000 -34.167964935 -35.167964935 1.500000000 --35.981101990 -33.981101990 1.500000000 --35.622871399 -34.622871399 1.500000000 -34.622871399 -34.622871399 1.500000000 -34.981101990 -33.981101990 1.500000000 --36.233222961 -33.233222961 1.500000000 -35.233222961 -33.233226776 1.500000000 --36.384418488 -32.384422302 1.500000000 -35.384418488 -32.384422302 1.500000000 --36.458976746 -31.458978653 1.500000000 -35.458980560 -31.458978653 1.500000000 --36.488403320 -30.488407135 1.500000000 -35.488403320 -30.488407135 1.500000000 --36.497467041 -29.497472763 1.500000000 -35.497474670 -29.497472763 1.500000000 --36.499591827 -28.499593735 1.500000000 -35.499591827 -28.499593735 1.500000000 --36.499954224 -27.499954224 1.500000000 -35.499954224 -27.499954224 1.500000000 --36.499996185 -26.499996185 1.500000000 -35.499996185 -26.499996185 1.500000000 --36.500000000 -25.500000000 1.500000000 -35.500000000 -25.500000000 1.500000000 --36.500000000 -24.500000000 1.500000000 -35.500000000 -24.500000000 1.500000000 --36.500000000 -23.500000000 1.500000000 -35.500000000 -23.500000000 1.500000000 --36.500000000 -22.500000000 1.500000000 -35.500000000 -22.500000000 1.500000000 --36.500000000 -21.500000000 1.500000000 -35.500000000 -21.500000000 1.500000000 --36.500000000 -20.500000000 1.500000000 -35.500000000 -20.500000000 1.500000000 --36.500000000 -19.500000000 1.500000000 -35.500000000 -19.500000000 1.500000000 --36.500000000 -18.500000000 1.500000000 -35.500000000 -18.500000000 1.500000000 --36.500000000 -17.500000000 1.500000000 -35.500000000 -17.500000000 1.500000000 --36.500000000 -16.500000000 1.500000000 -35.500000000 -16.500000000 1.500000000 --36.500000000 -15.500000000 1.500000000 -35.500000000 -15.500000000 1.500000000 --36.500000000 -14.500000000 1.500000000 -35.500000000 -14.500000000 1.500000000 --36.500000000 -13.500000000 1.500000000 -35.500000000 -13.500000000 1.500000000 --36.500000000 -12.500000000 1.500000000 -35.500000000 -12.500000000 1.500000000 --36.500000000 -11.500000000 1.500000000 -35.500000000 -11.500000000 1.500000000 --36.500000000 -10.500000000 1.500000000 -35.500000000 -10.500000000 1.500000000 --36.500000000 -9.500000000 1.500000000 -35.500000000 -9.500000000 1.500000000 --36.500000000 -8.500000000 1.500000000 -35.500000000 -8.500000000 1.500000000 --36.500000000 -7.500000000 1.500000000 -35.500000000 -7.500000000 1.500000000 --36.500000000 -6.500000000 1.500000000 -35.500000000 -6.500000000 1.500000000 --36.500000000 -5.500000000 1.500000000 -35.500000000 -5.500000000 1.500000000 --36.500000000 -4.500000000 1.500000000 -35.500000000 -4.500000000 1.500000000 --36.500000000 -3.500000000 1.500000000 -35.500000000 -3.500000000 1.500000000 --36.500000000 -2.500000000 1.500000000 -35.500000000 -2.500000000 1.500000000 --36.500000000 -1.500000000 1.500000000 -35.500000000 -1.500000000 1.500000000 --36.500000000 -0.500000000 1.500000000 -35.500000000 -0.500000000 1.500000000 --36.500000000 0.500000000 1.500000000 -35.500000000 0.500000000 1.500000000 --36.500000000 1.500000000 1.500000000 -35.500000000 1.500000000 1.500000000 --36.500000000 2.500000000 1.500000000 -35.500000000 2.500000000 1.500000000 --36.500000000 3.500000000 1.500000000 -35.500000000 3.500000000 1.500000000 --36.500000000 4.500000000 1.500000000 -35.500000000 4.500000000 1.500000000 --36.500000000 5.500000000 1.500000000 -35.500000000 5.500000000 1.500000000 --36.500000000 6.500000000 1.500000000 -35.500000000 6.500000000 1.500000000 --36.500000000 7.500000000 1.500000000 -35.500000000 7.500000000 1.500000000 --36.500000000 8.500000000 1.500000000 -35.500000000 8.500000000 1.500000000 --36.500000000 9.500000000 1.500000000 -35.500000000 9.500000000 1.500000000 --36.500000000 10.500000000 1.500000000 -35.500000000 10.500000000 1.500000000 --36.500000000 11.500000000 1.500000000 -35.500000000 11.500000000 1.500000000 --36.500000000 12.500000000 1.500000000 -35.500000000 12.500000000 1.500000000 --36.500000000 13.500000000 1.500000000 -35.500000000 13.500000000 1.500000000 --36.500000000 14.500000000 1.500000000 -35.500000000 14.500000000 1.500000000 --36.500000000 15.500000000 1.500000000 -35.500000000 15.500000000 1.500000000 --36.500000000 16.500000000 1.500000000 -35.500000000 16.500000000 1.500000000 --36.500000000 17.500000000 1.500000000 -35.500000000 17.500000000 1.500000000 --36.500000000 18.500000000 1.500000000 -35.500000000 18.500000000 1.500000000 --36.500000000 19.500000000 1.500000000 -35.500000000 19.500000000 1.500000000 --36.500000000 20.500000000 1.500000000 -35.500000000 20.500000000 1.500000000 --36.500000000 21.500000000 1.500000000 -35.500000000 21.500000000 1.500000000 --36.500000000 22.500000000 1.500000000 -35.500000000 22.500000000 1.500000000 --36.500000000 23.500000000 1.500000000 -35.500000000 23.500000000 1.500000000 --36.500000000 24.500000000 1.500000000 -35.500000000 24.500000000 1.500000000 --36.499996185 25.499996185 1.500000000 -35.499996185 25.499996185 1.500000000 --36.499954224 26.499954224 1.500000000 -35.499954224 26.499954224 1.500000000 --36.499591827 27.499591827 1.500000000 -35.499591827 27.499591827 1.500000000 --36.497474670 28.497470856 1.500000000 -35.497467041 28.497470856 1.500000000 --36.488403320 29.488407135 1.500000000 -35.488403320 29.488407135 1.500000000 --36.458980560 30.458978653 1.500000000 -35.458980560 30.458978653 1.500000000 --36.384422302 31.384418488 1.500000000 -35.384422302 31.384418488 1.500000000 --36.233222961 32.233222961 1.500000000 -35.233222961 32.233222961 1.500000000 --35.981101990 32.981101990 1.500000000 --35.622871399 33.622871399 1.500000000 -34.622871399 33.622871399 1.500000000 -34.981101990 32.981101990 1.500000000 --35.167964935 34.167964935 1.500000000 --34.622871399 34.622871399 1.500000000 -33.622871399 34.622871399 1.500000000 -34.167964935 34.167964935 1.500000000 --33.981101990 34.981101990 1.500000000 --33.233222961 35.233222961 1.500000000 --32.384422302 35.384418488 1.500000000 --31.458978653 35.458976746 1.500000000 --30.488407135 35.488403320 1.500000000 --29.497472763 35.497467041 1.500000000 --28.499593735 35.499591827 1.500000000 --27.499954224 35.499954224 1.500000000 --26.499996185 35.499996185 1.500000000 --25.500000000 35.500000000 1.500000000 --24.500000000 35.500000000 1.500000000 --23.500000000 35.500000000 1.500000000 --22.500000000 35.500000000 1.500000000 --21.500000000 35.500000000 1.500000000 --20.500000000 35.500000000 1.500000000 --19.500000000 35.500000000 1.500000000 --18.500000000 35.500000000 1.500000000 --17.500000000 35.500000000 1.500000000 --16.500000000 35.500000000 1.500000000 --15.500000000 35.500000000 1.500000000 --14.500000000 35.500000000 1.500000000 --13.500000000 35.500000000 1.500000000 --12.500000000 35.500000000 1.500000000 --11.500000000 35.500000000 1.500000000 --10.500000000 35.500000000 1.500000000 --9.500000000 35.500000000 1.500000000 --8.500000000 35.500000000 1.500000000 --7.500000000 35.500000000 1.500000000 --6.500000000 35.500000000 1.500000000 --5.500000000 35.500000000 1.500000000 --4.500000000 35.500000000 1.500000000 --3.500000000 35.500000000 1.500000000 --2.500000000 35.500000000 1.500000000 --1.500000000 35.500000000 1.500000000 --0.500000000 35.500000000 1.500000000 -0.500000000 35.500000000 1.500000000 -1.500000000 35.500000000 1.500000000 -2.500000000 35.500000000 1.500000000 -3.500000000 35.500000000 1.500000000 -4.500000000 35.500000000 1.500000000 -5.500000000 35.500000000 1.500000000 -6.500000000 35.500000000 1.500000000 -7.500000000 35.500000000 1.500000000 -8.500000000 35.500000000 1.500000000 -9.500000000 35.500000000 1.500000000 -10.500000000 35.500000000 1.500000000 -11.500000000 35.500000000 1.500000000 -12.500000000 35.500000000 1.500000000 -13.500000000 35.500000000 1.500000000 -14.500000000 35.500000000 1.500000000 -15.500000000 35.500000000 1.500000000 -16.500000000 35.500000000 1.500000000 -17.500000000 35.500000000 1.500000000 -18.500000000 35.500000000 1.500000000 -19.500000000 35.500000000 1.500000000 -20.500000000 35.500000000 1.500000000 -21.500000000 35.500000000 1.500000000 -22.500000000 35.500000000 1.500000000 -23.500000000 35.500000000 1.500000000 -24.500000000 35.500000000 1.500000000 -25.499996185 35.499996185 1.500000000 -26.499954224 35.499954224 1.500000000 -27.499591827 35.499591827 1.500000000 -28.497470856 35.497474670 1.500000000 -29.488407135 35.488403320 1.500000000 -30.458978653 35.458980560 1.500000000 -31.384418488 35.384422302 1.500000000 -32.233222961 35.233222961 1.500000000 -32.981101990 34.981101990 1.500000000 --33.981101990 -35.981101990 2.500000000 --33.233226776 -36.233222961 2.500000000 --32.384422302 -36.384418488 2.500000000 --31.458978653 -36.458980560 2.500000000 --30.488407135 -36.488403320 2.500000000 --29.497472763 -36.497474670 2.500000000 --28.499593735 -36.499591827 2.500000000 --27.499954224 -36.499954224 2.500000000 --26.499996185 -36.499996185 2.500000000 --25.500000000 -36.500000000 2.500000000 --24.500000000 -36.500000000 2.500000000 --23.500000000 -36.500000000 2.500000000 --22.500000000 -36.500000000 2.500000000 --21.500000000 -36.500000000 2.500000000 --20.500000000 -36.500000000 2.500000000 --19.500000000 -36.500000000 2.500000000 --18.500000000 -36.500000000 2.500000000 --17.500000000 -36.500000000 2.500000000 --16.500000000 -36.500000000 2.500000000 --15.500000000 -36.500000000 2.500000000 --14.500000000 -36.500000000 2.500000000 --13.500000000 -36.500000000 2.500000000 --12.500000000 -36.500000000 2.500000000 --11.500000000 -36.500000000 2.500000000 --10.500000000 -36.500000000 2.500000000 --9.500000000 -36.500000000 2.500000000 --8.500000000 -36.500000000 2.500000000 --7.500000000 -36.500000000 2.500000000 --6.500000000 -36.500000000 2.500000000 --5.500000000 -36.500000000 2.500000000 --4.500000000 -36.500000000 2.500000000 --3.500000000 -36.500000000 2.500000000 --2.500000000 -36.500000000 2.500000000 --1.500000000 -36.500000000 2.500000000 --0.500000000 -36.500000000 2.500000000 -0.500000000 -36.500000000 2.500000000 -1.500000000 -36.500000000 2.500000000 -2.500000000 -36.500000000 2.500000000 -3.500000000 -36.500000000 2.500000000 -4.500000000 -36.500000000 2.500000000 -5.500000000 -36.500000000 2.500000000 -6.500000000 -36.500000000 2.500000000 -7.500000000 -36.500000000 2.500000000 -8.500000000 -36.500000000 2.500000000 -9.500000000 -36.500000000 2.500000000 -10.500000000 -36.500000000 2.500000000 -11.500000000 -36.500000000 2.500000000 -12.500000000 -36.500000000 2.500000000 -13.500000000 -36.500000000 2.500000000 -14.500000000 -36.500000000 2.500000000 -15.500000000 -36.500000000 2.500000000 -16.500000000 -36.500000000 2.500000000 -17.500000000 -36.500000000 2.500000000 -18.500000000 -36.500000000 2.500000000 -19.500000000 -36.500000000 2.500000000 -20.500000000 -36.500000000 2.500000000 -21.500000000 -36.500000000 2.500000000 -22.500000000 -36.500000000 2.500000000 -23.500000000 -36.500000000 2.500000000 -24.500000000 -36.500000000 2.500000000 -25.499996185 -36.499996185 2.500000000 -26.499954224 -36.499954224 2.500000000 -27.499591827 -36.499591827 2.500000000 -28.497470856 -36.497467041 2.500000000 -29.488407135 -36.488403320 2.500000000 -30.458978653 -36.458980560 2.500000000 -31.384418488 -36.384422302 2.500000000 -32.233222961 -36.233222961 2.500000000 -32.981101990 -35.981101990 2.500000000 --35.167964935 -35.167964935 2.500000000 --34.622871399 -35.622871399 2.500000000 -33.622871399 -35.622871399 2.500000000 -34.167964935 -35.167964935 2.500000000 --35.981101990 -33.981101990 2.500000000 --35.622871399 -34.622871399 2.500000000 -34.622871399 -34.622871399 2.500000000 -34.981101990 -33.981101990 2.500000000 --36.233222961 -33.233222961 2.500000000 -35.233222961 -33.233226776 2.500000000 --36.384418488 -32.384422302 2.500000000 -35.384418488 -32.384422302 2.500000000 --36.458976746 -31.458978653 2.500000000 -35.458980560 -31.458978653 2.500000000 --36.488403320 -30.488407135 2.500000000 -35.488403320 -30.488407135 2.500000000 --36.497467041 -29.497472763 2.500000000 -35.497474670 -29.497472763 2.500000000 --36.499591827 -28.499593735 2.500000000 -35.499591827 -28.499593735 2.500000000 --36.499954224 -27.499954224 2.500000000 -35.499954224 -27.499954224 2.500000000 --36.499996185 -26.499996185 2.500000000 -35.499996185 -26.499996185 2.500000000 --36.500000000 -25.500000000 2.500000000 -35.500000000 -25.500000000 2.500000000 --36.500000000 -24.500000000 2.500000000 -35.500000000 -24.500000000 2.500000000 --36.500000000 -23.500000000 2.500000000 -35.500000000 -23.500000000 2.500000000 --36.500000000 -22.500000000 2.500000000 -35.500000000 -22.500000000 2.500000000 --36.500000000 -21.500000000 2.500000000 -35.500000000 -21.500000000 2.500000000 --36.500000000 -20.500000000 2.500000000 -35.500000000 -20.500000000 2.500000000 --36.500000000 -19.500000000 2.500000000 -35.500000000 -19.500000000 2.500000000 --36.500000000 -18.500000000 2.500000000 -35.500000000 -18.500000000 2.500000000 --36.500000000 -17.500000000 2.500000000 -35.500000000 -17.500000000 2.500000000 --36.500000000 -16.500000000 2.500000000 -35.500000000 -16.500000000 2.500000000 --36.500000000 -15.500000000 2.500000000 -35.500000000 -15.500000000 2.500000000 --36.500000000 -14.500000000 2.500000000 -35.500000000 -14.500000000 2.500000000 --36.500000000 -13.500000000 2.500000000 -35.500000000 -13.500000000 2.500000000 --36.500000000 -12.500000000 2.500000000 -35.500000000 -12.500000000 2.500000000 --36.500000000 -11.500000000 2.500000000 -35.500000000 -11.500000000 2.500000000 --36.500000000 -10.500000000 2.500000000 -35.500000000 -10.500000000 2.500000000 --36.500000000 -9.500000000 2.500000000 -35.500000000 -9.500000000 2.500000000 --36.500000000 -8.500000000 2.500000000 -35.500000000 -8.500000000 2.500000000 --36.500000000 -7.500000000 2.500000000 -35.500000000 -7.500000000 2.500000000 --36.500000000 -6.500000000 2.500000000 -35.500000000 -6.500000000 2.500000000 --36.500000000 -5.500000000 2.500000000 -35.500000000 -5.500000000 2.500000000 --36.500000000 -4.500000000 2.500000000 -35.500000000 -4.500000000 2.500000000 --36.500000000 -3.500000000 2.500000000 -35.500000000 -3.500000000 2.500000000 --36.500000000 -2.500000000 2.500000000 -35.500000000 -2.500000000 2.500000000 --36.500000000 -1.500000000 2.500000000 -35.500000000 -1.500000000 2.500000000 --36.500000000 -0.500000000 2.500000000 -35.500000000 -0.500000000 2.500000000 --36.500000000 0.500000000 2.500000000 -35.500000000 0.500000000 2.500000000 --36.500000000 1.500000000 2.500000000 -35.500000000 1.500000000 2.500000000 --36.500000000 2.500000000 2.500000000 -35.500000000 2.500000000 2.500000000 --36.500000000 3.500000000 2.500000000 -35.500000000 3.500000000 2.500000000 --36.500000000 4.500000000 2.500000000 -35.500000000 4.500000000 2.500000000 --36.500000000 5.500000000 2.500000000 -35.500000000 5.500000000 2.500000000 --36.500000000 6.500000000 2.500000000 -35.500000000 6.500000000 2.500000000 --36.500000000 7.500000000 2.500000000 -35.500000000 7.500000000 2.500000000 --36.500000000 8.500000000 2.500000000 -35.500000000 8.500000000 2.500000000 --36.500000000 9.500000000 2.500000000 -35.500000000 9.500000000 2.500000000 --36.500000000 10.500000000 2.500000000 -35.500000000 10.500000000 2.500000000 --36.500000000 11.500000000 2.500000000 -35.500000000 11.500000000 2.500000000 --36.500000000 12.500000000 2.500000000 -35.500000000 12.500000000 2.500000000 --36.500000000 13.500000000 2.500000000 -35.500000000 13.500000000 2.500000000 --36.500000000 14.500000000 2.500000000 -35.500000000 14.500000000 2.500000000 --36.500000000 15.500000000 2.500000000 -35.500000000 15.500000000 2.500000000 --36.500000000 16.500000000 2.500000000 -35.500000000 16.500000000 2.500000000 --36.500000000 17.500000000 2.500000000 -35.500000000 17.500000000 2.500000000 --36.500000000 18.500000000 2.500000000 -35.500000000 18.500000000 2.500000000 --36.500000000 19.500000000 2.500000000 -35.500000000 19.500000000 2.500000000 --36.500000000 20.500000000 2.500000000 -35.500000000 20.500000000 2.500000000 --36.500000000 21.500000000 2.500000000 -35.500000000 21.500000000 2.500000000 --36.500000000 22.500000000 2.500000000 -35.500000000 22.500000000 2.500000000 --36.500000000 23.500000000 2.500000000 -35.500000000 23.500000000 2.500000000 --36.500000000 24.500000000 2.500000000 -35.500000000 24.500000000 2.500000000 --36.499996185 25.499996185 2.500000000 -35.499996185 25.499996185 2.500000000 --36.499954224 26.499954224 2.500000000 -35.499954224 26.499954224 2.500000000 --36.499591827 27.499591827 2.500000000 -35.499591827 27.499591827 2.500000000 --36.497474670 28.497470856 2.500000000 -35.497467041 28.497470856 2.500000000 --36.488403320 29.488407135 2.500000000 -35.488403320 29.488407135 2.500000000 --36.458980560 30.458978653 2.500000000 -35.458980560 30.458978653 2.500000000 --36.384422302 31.384418488 2.500000000 -35.384422302 31.384418488 2.500000000 --36.233222961 32.233222961 2.500000000 -35.233222961 32.233222961 2.500000000 --35.981101990 32.981101990 2.500000000 --35.622871399 33.622871399 2.500000000 -34.622871399 33.622871399 2.500000000 -34.981101990 32.981101990 2.500000000 --35.167964935 34.167964935 2.500000000 --34.622871399 34.622871399 2.500000000 -33.622871399 34.622871399 2.500000000 -34.167964935 34.167964935 2.500000000 --33.981101990 34.981101990 2.500000000 --33.233222961 35.233222961 2.500000000 --32.384422302 35.384418488 2.500000000 --31.458978653 35.458976746 2.500000000 --30.488407135 35.488403320 2.500000000 --29.497472763 35.497467041 2.500000000 --28.499593735 35.499591827 2.500000000 --27.499954224 35.499954224 2.500000000 --26.499996185 35.499996185 2.500000000 --25.500000000 35.500000000 2.500000000 --24.500000000 35.500000000 2.500000000 --23.500000000 35.500000000 2.500000000 --22.500000000 35.500000000 2.500000000 --21.500000000 35.500000000 2.500000000 --20.500000000 35.500000000 2.500000000 --19.500000000 35.500000000 2.500000000 --18.500000000 35.500000000 2.500000000 --17.500000000 35.500000000 2.500000000 --16.500000000 35.500000000 2.500000000 --15.500000000 35.500000000 2.500000000 --14.500000000 35.500000000 2.500000000 --13.500000000 35.500000000 2.500000000 --12.500000000 35.500000000 2.500000000 --11.500000000 35.500000000 2.500000000 --10.500000000 35.500000000 2.500000000 --9.500000000 35.500000000 2.500000000 --8.500000000 35.500000000 2.500000000 --7.500000000 35.500000000 2.500000000 --6.500000000 35.500000000 2.500000000 --5.500000000 35.500000000 2.500000000 --4.500000000 35.500000000 2.500000000 --3.500000000 35.500000000 2.500000000 --2.500000000 35.500000000 2.500000000 --1.500000000 35.500000000 2.500000000 --0.500000000 35.500000000 2.500000000 -0.500000000 35.500000000 2.500000000 -1.500000000 35.500000000 2.500000000 -2.500000000 35.500000000 2.500000000 -3.500000000 35.500000000 2.500000000 -4.500000000 35.500000000 2.500000000 -5.500000000 35.500000000 2.500000000 -6.500000000 35.500000000 2.500000000 -7.500000000 35.500000000 2.500000000 -8.500000000 35.500000000 2.500000000 -9.500000000 35.500000000 2.500000000 -10.500000000 35.500000000 2.500000000 -11.500000000 35.500000000 2.500000000 -12.500000000 35.500000000 2.500000000 -13.500000000 35.500000000 2.500000000 -14.500000000 35.500000000 2.500000000 -15.500000000 35.500000000 2.500000000 -16.500000000 35.500000000 2.500000000 -17.500000000 35.500000000 2.500000000 -18.500000000 35.500000000 2.500000000 -19.500000000 35.500000000 2.500000000 -20.500000000 35.500000000 2.500000000 -21.500000000 35.500000000 2.500000000 -22.500000000 35.500000000 2.500000000 -23.500000000 35.500000000 2.500000000 -24.500000000 35.500000000 2.500000000 -25.499996185 35.499996185 2.500000000 -26.499954224 35.499954224 2.500000000 -27.499591827 35.499591827 2.500000000 -28.497470856 35.497474670 2.500000000 -29.488407135 35.488403320 2.500000000 -30.458978653 35.458980560 2.500000000 -31.384418488 35.384422302 2.500000000 -32.233222961 35.233222961 2.500000000 -32.981101990 34.981101990 2.500000000 --33.981101990 -35.981101990 3.500000000 --33.233226776 -36.233222961 3.500000000 --32.384422302 -36.384418488 3.500000000 --31.458978653 -36.458980560 3.500000000 --30.488407135 -36.488403320 3.500000000 --29.497472763 -36.497474670 3.500000000 --28.499593735 -36.499591827 3.500000000 --27.499954224 -36.499954224 3.500000000 --26.499996185 -36.499996185 3.500000000 --25.500000000 -36.500000000 3.500000000 --24.500000000 -36.500000000 3.500000000 --23.500000000 -36.500000000 3.500000000 --22.500000000 -36.500000000 3.500000000 --21.500000000 -36.500000000 3.500000000 --20.500000000 -36.500000000 3.500000000 --19.500000000 -36.500000000 3.500000000 --18.500000000 -36.500000000 3.500000000 --17.500000000 -36.500000000 3.500000000 --16.500000000 -36.500000000 3.500000000 --15.500000000 -36.500000000 3.500000000 --14.500000000 -36.500000000 3.500000000 --13.500000000 -36.500000000 3.500000000 --12.500000000 -36.500000000 3.500000000 --11.500000000 -36.500000000 3.500000000 --10.500000000 -36.500000000 3.500000000 --9.500000000 -36.500000000 3.500000000 --8.500000000 -36.500000000 3.500000000 --7.500000000 -36.500000000 3.500000000 --6.500000000 -36.500000000 3.500000000 --5.500000000 -36.500000000 3.500000000 --4.500000000 -36.500000000 3.500000000 --3.500000000 -36.500000000 3.500000000 --2.500000000 -36.500000000 3.500000000 --1.500000000 -36.500000000 3.500000000 --0.500000000 -36.500000000 3.500000000 -0.500000000 -36.500000000 3.500000000 -1.500000000 -36.500000000 3.500000000 -2.500000000 -36.500000000 3.500000000 -3.500000000 -36.500000000 3.500000000 -4.500000000 -36.500000000 3.500000000 -5.500000000 -36.500000000 3.500000000 -6.500000000 -36.500000000 3.500000000 -7.500000000 -36.500000000 3.500000000 -8.500000000 -36.500000000 3.500000000 -9.500000000 -36.500000000 3.500000000 -10.500000000 -36.500000000 3.500000000 -11.500000000 -36.500000000 3.500000000 -12.500000000 -36.500000000 3.500000000 -13.500000000 -36.500000000 3.500000000 -14.500000000 -36.500000000 3.500000000 -15.500000000 -36.500000000 3.500000000 -16.500000000 -36.500000000 3.500000000 -17.500000000 -36.500000000 3.500000000 -18.500000000 -36.500000000 3.500000000 -19.500000000 -36.500000000 3.500000000 -20.500000000 -36.500000000 3.500000000 -21.500000000 -36.500000000 3.500000000 -22.500000000 -36.500000000 3.500000000 -23.500000000 -36.500000000 3.500000000 -24.500000000 -36.500000000 3.500000000 -25.499996185 -36.499996185 3.500000000 -26.499954224 -36.499954224 3.500000000 -27.499591827 -36.499591827 3.500000000 -28.497470856 -36.497467041 3.500000000 -29.488407135 -36.488403320 3.500000000 -30.458978653 -36.458980560 3.500000000 -31.384418488 -36.384422302 3.500000000 -32.233222961 -36.233222961 3.500000000 -32.981101990 -35.981101990 3.500000000 --35.167964935 -35.167964935 3.500000000 --34.622871399 -35.622871399 3.500000000 -33.622871399 -35.622871399 3.500000000 -34.167964935 -35.167964935 3.500000000 --35.981101990 -33.981101990 3.500000000 --35.622871399 -34.622871399 3.500000000 -34.622871399 -34.622871399 3.500000000 -34.981101990 -33.981101990 3.500000000 --36.233222961 -33.233222961 3.500000000 -35.233222961 -33.233226776 3.500000000 --36.384418488 -32.384422302 3.500000000 -35.384418488 -32.384422302 3.500000000 --36.458976746 -31.458978653 3.500000000 -35.458980560 -31.458978653 3.500000000 --36.488403320 -30.488407135 3.500000000 -35.488403320 -30.488407135 3.500000000 --36.497467041 -29.497472763 3.500000000 -35.497474670 -29.497472763 3.500000000 --36.499591827 -28.499593735 3.500000000 -35.499591827 -28.499593735 3.500000000 --36.499954224 -27.499954224 3.500000000 -35.499954224 -27.499954224 3.500000000 --36.499996185 -26.499996185 3.500000000 -35.499996185 -26.499996185 3.500000000 --36.500000000 -25.500000000 3.500000000 -35.500000000 -25.500000000 3.500000000 --36.500000000 -24.500000000 3.500000000 -35.500000000 -24.500000000 3.500000000 --36.500000000 -23.500000000 3.500000000 -35.500000000 -23.500000000 3.500000000 --36.500000000 -22.500000000 3.500000000 -35.500000000 -22.500000000 3.500000000 --36.500000000 -21.500000000 3.500000000 -35.500000000 -21.500000000 3.500000000 --36.500000000 -20.500000000 3.500000000 -35.500000000 -20.500000000 3.500000000 --36.500000000 -19.500000000 3.500000000 -35.500000000 -19.500000000 3.500000000 --36.500000000 -18.500000000 3.500000000 -35.500000000 -18.500000000 3.500000000 --36.500000000 -17.500000000 3.500000000 -35.500000000 -17.500000000 3.500000000 --36.500000000 -16.500000000 3.500000000 -35.500000000 -16.500000000 3.500000000 --36.500000000 -15.500000000 3.500000000 -35.500000000 -15.500000000 3.500000000 --36.500000000 -14.500000000 3.500000000 -35.500000000 -14.500000000 3.500000000 --36.500000000 -13.500000000 3.500000000 -35.500000000 -13.500000000 3.500000000 --36.500000000 -12.500000000 3.500000000 -35.500000000 -12.500000000 3.500000000 --36.500000000 -11.500000000 3.500000000 -35.500000000 -11.500000000 3.500000000 --36.500000000 -10.500000000 3.500000000 -35.500000000 -10.500000000 3.500000000 --36.500000000 -9.500000000 3.500000000 -35.500000000 -9.500000000 3.500000000 --36.500000000 -8.500000000 3.500000000 -35.500000000 -8.500000000 3.500000000 --36.500000000 -7.500000000 3.500000000 -35.500000000 -7.500000000 3.500000000 --36.500000000 -6.500000000 3.500000000 -35.500000000 -6.500000000 3.500000000 --36.500000000 -5.500000000 3.500000000 -35.500000000 -5.500000000 3.500000000 --36.500000000 -4.500000000 3.500000000 -35.500000000 -4.500000000 3.500000000 --36.500000000 -3.500000000 3.500000000 -35.500000000 -3.500000000 3.500000000 --36.500000000 -2.500000000 3.500000000 -35.500000000 -2.500000000 3.500000000 --36.500000000 -1.500000000 3.500000000 -35.500000000 -1.500000000 3.500000000 --36.500000000 -0.500000000 3.500000000 -35.500000000 -0.500000000 3.500000000 --36.500000000 0.500000000 3.500000000 -35.500000000 0.500000000 3.500000000 --36.500000000 1.500000000 3.500000000 -35.500000000 1.500000000 3.500000000 --36.500000000 2.500000000 3.500000000 -35.500000000 2.500000000 3.500000000 --36.500000000 3.500000000 3.500000000 -35.500000000 3.500000000 3.500000000 --36.500000000 4.500000000 3.500000000 -35.500000000 4.500000000 3.500000000 --36.500000000 5.500000000 3.500000000 -35.500000000 5.500000000 3.500000000 --36.500000000 6.500000000 3.500000000 -35.500000000 6.500000000 3.500000000 --36.500000000 7.500000000 3.500000000 -35.500000000 7.500000000 3.500000000 --36.500000000 8.500000000 3.500000000 -35.500000000 8.500000000 3.500000000 --36.500000000 9.500000000 3.500000000 -35.500000000 9.500000000 3.500000000 --36.500000000 10.500000000 3.500000000 -35.500000000 10.500000000 3.500000000 --36.500000000 11.500000000 3.500000000 -35.500000000 11.500000000 3.500000000 --36.500000000 12.500000000 3.500000000 -35.500000000 12.500000000 3.500000000 --36.500000000 13.500000000 3.500000000 -35.500000000 13.500000000 3.500000000 --36.500000000 14.500000000 3.500000000 -35.500000000 14.500000000 3.500000000 --36.500000000 15.500000000 3.500000000 -35.500000000 15.500000000 3.500000000 --36.500000000 16.500000000 3.500000000 -35.500000000 16.500000000 3.500000000 --36.500000000 17.500000000 3.500000000 -35.500000000 17.500000000 3.500000000 --36.500000000 18.500000000 3.500000000 -35.500000000 18.500000000 3.500000000 --36.500000000 19.500000000 3.500000000 -35.500000000 19.500000000 3.500000000 --36.500000000 20.500000000 3.500000000 -35.500000000 20.500000000 3.500000000 --36.500000000 21.500000000 3.500000000 -35.500000000 21.500000000 3.500000000 --36.500000000 22.500000000 3.500000000 -35.500000000 22.500000000 3.500000000 --36.500000000 23.500000000 3.500000000 -35.500000000 23.500000000 3.500000000 --36.500000000 24.500000000 3.500000000 -35.500000000 24.500000000 3.500000000 --36.499996185 25.499996185 3.500000000 -35.499996185 25.499996185 3.500000000 --36.499954224 26.499954224 3.500000000 -35.499954224 26.499954224 3.500000000 --36.499591827 27.499591827 3.500000000 -35.499591827 27.499591827 3.500000000 --36.497474670 28.497470856 3.500000000 -35.497467041 28.497470856 3.500000000 --36.488403320 29.488407135 3.500000000 -35.488403320 29.488407135 3.500000000 --36.458980560 30.458978653 3.500000000 -35.458980560 30.458978653 3.500000000 --36.384422302 31.384418488 3.500000000 -35.384422302 31.384418488 3.500000000 --36.233222961 32.233222961 3.500000000 -35.233222961 32.233222961 3.500000000 --35.981101990 32.981101990 3.500000000 --35.622871399 33.622871399 3.500000000 -34.622871399 33.622871399 3.500000000 -34.981101990 32.981101990 3.500000000 --35.167964935 34.167964935 3.500000000 --34.622871399 34.622871399 3.500000000 -33.622871399 34.622871399 3.500000000 -34.167964935 34.167964935 3.500000000 --33.981101990 34.981101990 3.500000000 --33.233222961 35.233222961 3.500000000 --32.384422302 35.384418488 3.500000000 --31.458978653 35.458976746 3.500000000 --30.488407135 35.488403320 3.500000000 --29.497472763 35.497467041 3.500000000 --28.499593735 35.499591827 3.500000000 --27.499954224 35.499954224 3.500000000 --26.499996185 35.499996185 3.500000000 --25.500000000 35.500000000 3.500000000 --24.500000000 35.500000000 3.500000000 --23.500000000 35.500000000 3.500000000 --22.500000000 35.500000000 3.500000000 --21.500000000 35.500000000 3.500000000 --20.500000000 35.500000000 3.500000000 --19.500000000 35.500000000 3.500000000 --18.500000000 35.500000000 3.500000000 --17.500000000 35.500000000 3.500000000 --16.500000000 35.500000000 3.500000000 --15.500000000 35.500000000 3.500000000 --14.500000000 35.500000000 3.500000000 --13.500000000 35.500000000 3.500000000 --12.500000000 35.500000000 3.500000000 --11.500000000 35.500000000 3.500000000 --10.500000000 35.500000000 3.500000000 --9.500000000 35.500000000 3.500000000 --8.500000000 35.500000000 3.500000000 --7.500000000 35.500000000 3.500000000 --6.500000000 35.500000000 3.500000000 --5.500000000 35.500000000 3.500000000 --4.500000000 35.500000000 3.500000000 --3.500000000 35.500000000 3.500000000 --2.500000000 35.500000000 3.500000000 --1.500000000 35.500000000 3.500000000 --0.500000000 35.500000000 3.500000000 -0.500000000 35.500000000 3.500000000 -1.500000000 35.500000000 3.500000000 -2.500000000 35.500000000 3.500000000 -3.500000000 35.500000000 3.500000000 -4.500000000 35.500000000 3.500000000 -5.500000000 35.500000000 3.500000000 -6.500000000 35.500000000 3.500000000 -7.500000000 35.500000000 3.500000000 -8.500000000 35.500000000 3.500000000 -9.500000000 35.500000000 3.500000000 -10.500000000 35.500000000 3.500000000 -11.500000000 35.500000000 3.500000000 -12.500000000 35.500000000 3.500000000 -13.500000000 35.500000000 3.500000000 -14.500000000 35.500000000 3.500000000 -15.500000000 35.500000000 3.500000000 -16.500000000 35.500000000 3.500000000 -17.500000000 35.500000000 3.500000000 -18.500000000 35.500000000 3.500000000 -19.500000000 35.500000000 3.500000000 -20.500000000 35.500000000 3.500000000 -21.500000000 35.500000000 3.500000000 -22.500000000 35.500000000 3.500000000 -23.500000000 35.500000000 3.500000000 -24.500000000 35.500000000 3.500000000 -25.499996185 35.499996185 3.500000000 -26.499954224 35.499954224 3.500000000 -27.499591827 35.499591827 3.500000000 -28.497470856 35.497474670 3.500000000 -29.488407135 35.488403320 3.500000000 -30.458978653 35.458980560 3.500000000 -31.384418488 35.384422302 3.500000000 -32.233222961 35.233222961 3.500000000 -32.981101990 34.981101990 3.500000000 --33.981101990 -35.981101990 4.500000000 --33.233226776 -36.233222961 4.500000000 --32.384422302 -36.384418488 4.500000000 --31.458978653 -36.458980560 4.500000000 --30.488407135 -36.488403320 4.500000000 --29.497472763 -36.497474670 4.500000000 --28.499593735 -36.499591827 4.500000000 --27.499954224 -36.499954224 4.500000000 --26.499996185 -36.499996185 4.500000000 --25.500000000 -36.500000000 4.500000000 --24.500000000 -36.500000000 4.500000000 --23.500000000 -36.500000000 4.500000000 --22.500000000 -36.500000000 4.500000000 --21.500000000 -36.500000000 4.500000000 --20.500000000 -36.500000000 4.500000000 --19.500000000 -36.500000000 4.500000000 --18.500000000 -36.500000000 4.500000000 --17.500000000 -36.500000000 4.500000000 --16.500000000 -36.500000000 4.500000000 --15.500000000 -36.500000000 4.500000000 --14.500000000 -36.500000000 4.500000000 --13.500000000 -36.500000000 4.500000000 --12.500000000 -36.500000000 4.500000000 --11.500000000 -36.500000000 4.500000000 --10.500000000 -36.500000000 4.500000000 --9.500000000 -36.500000000 4.500000000 --8.500000000 -36.500000000 4.500000000 --7.500000000 -36.500000000 4.500000000 --6.500000000 -36.500000000 4.500000000 --5.500000000 -36.500000000 4.500000000 --4.500000000 -36.500000000 4.500000000 --3.500000000 -36.500000000 4.500000000 --2.500000000 -36.500000000 4.500000000 --1.500000000 -36.500000000 4.500000000 --0.500000000 -36.500000000 4.500000000 -0.500000000 -36.500000000 4.500000000 -1.500000000 -36.500000000 4.500000000 -2.500000000 -36.500000000 4.500000000 -3.500000000 -36.500000000 4.500000000 -4.500000000 -36.500000000 4.500000000 -5.500000000 -36.500000000 4.500000000 -6.500000000 -36.500000000 4.500000000 -7.500000000 -36.500000000 4.500000000 -8.500000000 -36.500000000 4.500000000 -9.500000000 -36.500000000 4.500000000 -10.500000000 -36.500000000 4.500000000 -11.500000000 -36.500000000 4.500000000 -12.500000000 -36.500000000 4.500000000 -13.500000000 -36.500000000 4.500000000 -14.500000000 -36.500000000 4.500000000 -15.500000000 -36.500000000 4.500000000 -16.500000000 -36.500000000 4.500000000 -17.500000000 -36.500000000 4.500000000 -18.500000000 -36.500000000 4.500000000 -19.500000000 -36.500000000 4.500000000 -20.500000000 -36.500000000 4.500000000 -21.500000000 -36.500000000 4.500000000 -22.500000000 -36.500000000 4.500000000 -23.500000000 -36.500000000 4.500000000 -24.500000000 -36.500000000 4.500000000 -25.499996185 -36.499996185 4.500000000 -26.499954224 -36.499954224 4.500000000 -27.499591827 -36.499591827 4.500000000 -28.497470856 -36.497467041 4.500000000 -29.488407135 -36.488403320 4.500000000 -30.458978653 -36.458980560 4.500000000 -31.384418488 -36.384422302 4.500000000 -32.233222961 -36.233222961 4.500000000 -32.981101990 -35.981101990 4.500000000 --35.167964935 -35.167964935 4.500000000 --34.622871399 -35.622871399 4.500000000 -33.622871399 -35.622871399 4.500000000 -34.167964935 -35.167964935 4.500000000 --35.981101990 -33.981101990 4.500000000 --35.622871399 -34.622871399 4.500000000 -34.622871399 -34.622871399 4.500000000 -34.981101990 -33.981101990 4.500000000 --36.233222961 -33.233222961 4.500000000 -35.233222961 -33.233226776 4.500000000 --36.384418488 -32.384422302 4.500000000 -35.384418488 -32.384422302 4.500000000 --36.458976746 -31.458978653 4.500000000 -35.458980560 -31.458978653 4.500000000 --36.488403320 -30.488407135 4.500000000 -35.488403320 -30.488407135 4.500000000 --36.497467041 -29.497472763 4.500000000 -35.497474670 -29.497472763 4.500000000 --36.499591827 -28.499593735 4.500000000 -35.499591827 -28.499593735 4.500000000 --36.499954224 -27.499954224 4.500000000 -35.499954224 -27.499954224 4.500000000 --36.499996185 -26.499996185 4.500000000 -35.499996185 -26.499996185 4.500000000 --36.500000000 -25.500000000 4.500000000 -35.500000000 -25.500000000 4.500000000 --36.500000000 -24.500000000 4.500000000 -35.500000000 -24.500000000 4.500000000 --36.500000000 -23.500000000 4.500000000 -35.500000000 -23.500000000 4.500000000 --36.500000000 -22.500000000 4.500000000 -35.500000000 -22.500000000 4.500000000 --36.500000000 -21.500000000 4.500000000 -35.500000000 -21.500000000 4.500000000 --36.500000000 -20.500000000 4.500000000 -35.500000000 -20.500000000 4.500000000 --36.500000000 -19.500000000 4.500000000 -35.500000000 -19.500000000 4.500000000 --36.500000000 -18.500000000 4.500000000 -35.500000000 -18.500000000 4.500000000 --36.500000000 -17.500000000 4.500000000 -35.500000000 -17.500000000 4.500000000 --36.500000000 -16.500000000 4.500000000 -35.500000000 -16.500000000 4.500000000 --36.500000000 -15.500000000 4.500000000 -35.500000000 -15.500000000 4.500000000 --36.500000000 -14.500000000 4.500000000 -35.500000000 -14.500000000 4.500000000 --36.500000000 -13.500000000 4.500000000 -35.500000000 -13.500000000 4.500000000 --36.500000000 -12.500000000 4.500000000 -35.500000000 -12.500000000 4.500000000 --36.500000000 -11.500000000 4.500000000 -35.500000000 -11.500000000 4.500000000 --36.500000000 -10.500000000 4.500000000 -35.500000000 -10.500000000 4.500000000 --36.500000000 -9.500000000 4.500000000 -35.500000000 -9.500000000 4.500000000 --36.500000000 -8.500000000 4.500000000 -35.500000000 -8.500000000 4.500000000 --36.500000000 -7.500000000 4.500000000 -35.500000000 -7.500000000 4.500000000 --36.500000000 -6.500000000 4.500000000 -35.500000000 -6.500000000 4.500000000 --36.500000000 -5.500000000 4.500000000 -35.500000000 -5.500000000 4.500000000 --36.500000000 -4.500000000 4.500000000 -35.500000000 -4.500000000 4.500000000 --36.500000000 -3.500000000 4.500000000 -35.500000000 -3.500000000 4.500000000 --36.500000000 -2.500000000 4.500000000 -35.500000000 -2.500000000 4.500000000 --36.500000000 -1.500000000 4.500000000 -35.500000000 -1.500000000 4.500000000 --36.500000000 -0.500000000 4.500000000 -35.500000000 -0.500000000 4.500000000 --36.500000000 0.500000000 4.500000000 -35.500000000 0.500000000 4.500000000 --36.500000000 1.500000000 4.500000000 -35.500000000 1.500000000 4.500000000 --36.500000000 2.500000000 4.500000000 -35.500000000 2.500000000 4.500000000 --36.500000000 3.500000000 4.500000000 -35.500000000 3.500000000 4.500000000 --36.500000000 4.500000000 4.500000000 -35.500000000 4.500000000 4.500000000 --36.500000000 5.500000000 4.500000000 -35.500000000 5.500000000 4.500000000 --36.500000000 6.500000000 4.500000000 -35.500000000 6.500000000 4.500000000 --36.500000000 7.500000000 4.500000000 -35.500000000 7.500000000 4.500000000 --36.500000000 8.500000000 4.500000000 -35.500000000 8.500000000 4.500000000 --36.500000000 9.500000000 4.500000000 -35.500000000 9.500000000 4.500000000 --36.500000000 10.500000000 4.500000000 -35.500000000 10.500000000 4.500000000 --36.500000000 11.500000000 4.500000000 -35.500000000 11.500000000 4.500000000 --36.500000000 12.500000000 4.500000000 -35.500000000 12.500000000 4.500000000 --36.500000000 13.500000000 4.500000000 -35.500000000 13.500000000 4.500000000 --36.500000000 14.500000000 4.500000000 -35.500000000 14.500000000 4.500000000 --36.500000000 15.500000000 4.500000000 -35.500000000 15.500000000 4.500000000 --36.500000000 16.500000000 4.500000000 -35.500000000 16.500000000 4.500000000 --36.500000000 17.500000000 4.500000000 -35.500000000 17.500000000 4.500000000 --36.500000000 18.500000000 4.500000000 -35.500000000 18.500000000 4.500000000 --36.500000000 19.500000000 4.500000000 -35.500000000 19.500000000 4.500000000 --36.500000000 20.500000000 4.500000000 -35.500000000 20.500000000 4.500000000 --36.500000000 21.500000000 4.500000000 -35.500000000 21.500000000 4.500000000 --36.500000000 22.500000000 4.500000000 -35.500000000 22.500000000 4.500000000 --36.500000000 23.500000000 4.500000000 -35.500000000 23.500000000 4.500000000 --36.500000000 24.500000000 4.500000000 -35.500000000 24.500000000 4.500000000 --36.499996185 25.499996185 4.500000000 -35.499996185 25.499996185 4.500000000 --36.499954224 26.499954224 4.500000000 -35.499954224 26.499954224 4.500000000 --36.499591827 27.499591827 4.500000000 -35.499591827 27.499591827 4.500000000 --36.497474670 28.497470856 4.500000000 -35.497467041 28.497470856 4.500000000 --36.488403320 29.488407135 4.500000000 -35.488403320 29.488407135 4.500000000 --36.458980560 30.458978653 4.500000000 -35.458980560 30.458978653 4.500000000 --36.384422302 31.384418488 4.500000000 -35.384422302 31.384418488 4.500000000 --36.233222961 32.233222961 4.500000000 -35.233222961 32.233222961 4.500000000 --35.981101990 32.981101990 4.500000000 --35.622871399 33.622871399 4.500000000 -34.622871399 33.622871399 4.500000000 -34.981101990 32.981101990 4.500000000 --35.167964935 34.167964935 4.500000000 --34.622871399 34.622871399 4.500000000 -33.622871399 34.622871399 4.500000000 -34.167964935 34.167964935 4.500000000 --33.981101990 34.981101990 4.500000000 --33.233222961 35.233222961 4.500000000 --32.384422302 35.384418488 4.500000000 --31.458978653 35.458976746 4.500000000 --30.488407135 35.488403320 4.500000000 --29.497472763 35.497467041 4.500000000 --28.499593735 35.499591827 4.500000000 --27.499954224 35.499954224 4.500000000 --26.499996185 35.499996185 4.500000000 --25.500000000 35.500000000 4.500000000 --24.500000000 35.500000000 4.500000000 --23.500000000 35.500000000 4.500000000 --22.500000000 35.500000000 4.500000000 --21.500000000 35.500000000 4.500000000 --20.500000000 35.500000000 4.500000000 --19.500000000 35.500000000 4.500000000 --18.500000000 35.500000000 4.500000000 --17.500000000 35.500000000 4.500000000 --16.500000000 35.500000000 4.500000000 --15.500000000 35.500000000 4.500000000 --14.500000000 35.500000000 4.500000000 --13.500000000 35.500000000 4.500000000 --12.500000000 35.500000000 4.500000000 --11.500000000 35.500000000 4.500000000 --10.500000000 35.500000000 4.500000000 --9.500000000 35.500000000 4.500000000 --8.500000000 35.500000000 4.500000000 --7.500000000 35.500000000 4.500000000 --6.500000000 35.500000000 4.500000000 --5.500000000 35.500000000 4.500000000 --4.500000000 35.500000000 4.500000000 --3.500000000 35.500000000 4.500000000 --2.500000000 35.500000000 4.500000000 --1.500000000 35.500000000 4.500000000 --0.500000000 35.500000000 4.500000000 -0.500000000 35.500000000 4.500000000 -1.500000000 35.500000000 4.500000000 -2.500000000 35.500000000 4.500000000 -3.500000000 35.500000000 4.500000000 -4.500000000 35.500000000 4.500000000 -5.500000000 35.500000000 4.500000000 -6.500000000 35.500000000 4.500000000 -7.500000000 35.500000000 4.500000000 -8.500000000 35.500000000 4.500000000 -9.500000000 35.500000000 4.500000000 -10.500000000 35.500000000 4.500000000 -11.500000000 35.500000000 4.500000000 -12.500000000 35.500000000 4.500000000 -13.500000000 35.500000000 4.500000000 -14.500000000 35.500000000 4.500000000 -15.500000000 35.500000000 4.500000000 -16.500000000 35.500000000 4.500000000 -17.500000000 35.500000000 4.500000000 -18.500000000 35.500000000 4.500000000 -19.500000000 35.500000000 4.500000000 -20.500000000 35.500000000 4.500000000 -21.500000000 35.500000000 4.500000000 -22.500000000 35.500000000 4.500000000 -23.500000000 35.500000000 4.500000000 -24.500000000 35.500000000 4.500000000 -25.499996185 35.499996185 4.500000000 -26.499954224 35.499954224 4.500000000 -27.499591827 35.499591827 4.500000000 -28.497470856 35.497474670 4.500000000 -29.488407135 35.488403320 4.500000000 -30.458978653 35.458980560 4.500000000 -31.384418488 35.384422302 4.500000000 -32.233222961 35.233222961 4.500000000 -32.981101990 34.981101990 4.500000000 --33.981101990 -35.981101990 5.500000000 --33.233226776 -36.233222961 5.500000000 --32.384422302 -36.384418488 5.500000000 --31.458978653 -36.458980560 5.500000000 --30.488407135 -36.488403320 5.500000000 --29.497472763 -36.497474670 5.500000000 --28.499593735 -36.499591827 5.500000000 --27.499954224 -36.499954224 5.500000000 --26.499996185 -36.499996185 5.500000000 --25.500000000 -36.500000000 5.500000000 --24.500000000 -36.500000000 5.500000000 --23.500000000 -36.500000000 5.500000000 --22.500000000 -36.500000000 5.500000000 --21.500000000 -36.500000000 5.500000000 --20.500000000 -36.500000000 5.500000000 --19.500000000 -36.500000000 5.500000000 --18.500000000 -36.500000000 5.500000000 --17.500000000 -36.500000000 5.500000000 --16.500000000 -36.500000000 5.500000000 --15.500000000 -36.500000000 5.500000000 --14.500000000 -36.500000000 5.500000000 --13.500000000 -36.500000000 5.500000000 --12.500000000 -36.500000000 5.500000000 --11.500000000 -36.500000000 5.500000000 --10.500000000 -36.500000000 5.500000000 --9.500000000 -36.500000000 5.500000000 --8.500000000 -36.500000000 5.500000000 --7.500000000 -36.500000000 5.500000000 --6.500000000 -36.500000000 5.500000000 --5.500000000 -36.500000000 5.500000000 --4.500000000 -36.500000000 5.500000000 --3.500000000 -36.500000000 5.500000000 --2.500000000 -36.500000000 5.500000000 --1.500000000 -36.500000000 5.500000000 --0.500000000 -36.500000000 5.500000000 -0.500000000 -36.500000000 5.500000000 -1.500000000 -36.500000000 5.500000000 -2.500000000 -36.500000000 5.500000000 -3.500000000 -36.500000000 5.500000000 -4.500000000 -36.500000000 5.500000000 -5.500000000 -36.500000000 5.500000000 -6.500000000 -36.500000000 5.500000000 -7.500000000 -36.500000000 5.500000000 -8.500000000 -36.500000000 5.500000000 -9.500000000 -36.500000000 5.500000000 -10.500000000 -36.500000000 5.500000000 -11.500000000 -36.500000000 5.500000000 -12.500000000 -36.500000000 5.500000000 -13.500000000 -36.500000000 5.500000000 -14.500000000 -36.500000000 5.500000000 -15.500000000 -36.500000000 5.500000000 -16.500000000 -36.500000000 5.500000000 -17.500000000 -36.500000000 5.500000000 -18.500000000 -36.500000000 5.500000000 -19.500000000 -36.500000000 5.500000000 -20.500000000 -36.500000000 5.500000000 -21.500000000 -36.500000000 5.500000000 -22.500000000 -36.500000000 5.500000000 -23.500000000 -36.500000000 5.500000000 -24.500000000 -36.500000000 5.500000000 -25.499996185 -36.499996185 5.500000000 -26.499954224 -36.499954224 5.500000000 -27.499591827 -36.499591827 5.500000000 -28.497470856 -36.497467041 5.500000000 -29.488407135 -36.488403320 5.500000000 -30.458978653 -36.458980560 5.500000000 -31.384418488 -36.384422302 5.500000000 -32.233222961 -36.233222961 5.500000000 -32.981101990 -35.981101990 5.500000000 --35.167964935 -35.167964935 5.500000000 --34.622871399 -35.622871399 5.500000000 -33.622871399 -35.622871399 5.500000000 -34.167964935 -35.167964935 5.500000000 --35.981101990 -33.981101990 5.500000000 --35.622871399 -34.622871399 5.500000000 -34.622871399 -34.622871399 5.500000000 -34.981101990 -33.981101990 5.500000000 --36.233222961 -33.233222961 5.500000000 -35.233222961 -33.233226776 5.500000000 --36.384418488 -32.384422302 5.500000000 -35.384418488 -32.384422302 5.500000000 --36.458976746 -31.458978653 5.500000000 -35.458980560 -31.458978653 5.500000000 --36.488403320 -30.488407135 5.500000000 -35.488403320 -30.488407135 5.500000000 --36.497467041 -29.497472763 5.500000000 -35.497474670 -29.497472763 5.500000000 --36.499591827 -28.499593735 5.500000000 -35.499591827 -28.499593735 5.500000000 --36.499954224 -27.499954224 5.500000000 -35.499954224 -27.499954224 5.500000000 --36.499996185 -26.499996185 5.500000000 -35.499996185 -26.499996185 5.500000000 --36.500000000 -25.500000000 5.500000000 -35.500000000 -25.500000000 5.500000000 --36.500000000 -24.500000000 5.500000000 -35.500000000 -24.500000000 5.500000000 --36.500000000 -23.500000000 5.500000000 -35.500000000 -23.500000000 5.500000000 --36.500000000 -22.500000000 5.500000000 -35.500000000 -22.500000000 5.500000000 --36.500000000 -21.500000000 5.500000000 -35.500000000 -21.500000000 5.500000000 --36.500000000 -20.500000000 5.500000000 -35.500000000 -20.500000000 5.500000000 --36.500000000 -19.500000000 5.500000000 -35.500000000 -19.500000000 5.500000000 --36.500000000 -18.500000000 5.500000000 -35.500000000 -18.500000000 5.500000000 --36.500000000 -17.500000000 5.500000000 -35.500000000 -17.500000000 5.500000000 --36.500000000 -16.500000000 5.500000000 -35.500000000 -16.500000000 5.500000000 --36.500000000 -15.500000000 5.500000000 -35.500000000 -15.500000000 5.500000000 --36.500000000 -14.500000000 5.500000000 -35.500000000 -14.500000000 5.500000000 --36.500000000 -13.500000000 5.500000000 -35.500000000 -13.500000000 5.500000000 --36.500000000 -12.500000000 5.500000000 -35.500000000 -12.500000000 5.500000000 --36.500000000 -11.500000000 5.500000000 -35.500000000 -11.500000000 5.500000000 --36.500000000 -10.500000000 5.500000000 -35.500000000 -10.500000000 5.500000000 --36.500000000 -9.500000000 5.500000000 -35.500000000 -9.500000000 5.500000000 --36.500000000 -8.500000000 5.500000000 -35.500000000 -8.500000000 5.500000000 --36.500000000 -7.500000000 5.500000000 -35.500000000 -7.500000000 5.500000000 --36.500000000 -6.500000000 5.500000000 -35.500000000 -6.500000000 5.500000000 --36.500000000 -5.500000000 5.500000000 -35.500000000 -5.500000000 5.500000000 --36.500000000 -4.500000000 5.500000000 -35.500000000 -4.500000000 5.500000000 --36.500000000 -3.500000000 5.500000000 -35.500000000 -3.500000000 5.500000000 --36.500000000 -2.500000000 5.500000000 -35.500000000 -2.500000000 5.500000000 --36.500000000 -1.500000000 5.500000000 -35.500000000 -1.500000000 5.500000000 --36.500000000 -0.500000000 5.500000000 -35.500000000 -0.500000000 5.500000000 --36.500000000 0.500000000 5.500000000 -35.500000000 0.500000000 5.500000000 --36.500000000 1.500000000 5.500000000 -35.500000000 1.500000000 5.500000000 --36.500000000 2.500000000 5.500000000 -35.500000000 2.500000000 5.500000000 --36.500000000 3.500000000 5.500000000 -35.500000000 3.500000000 5.500000000 --36.500000000 4.500000000 5.500000000 -35.500000000 4.500000000 5.500000000 --36.500000000 5.500000000 5.500000000 -35.500000000 5.500000000 5.500000000 --36.500000000 6.500000000 5.500000000 -35.500000000 6.500000000 5.500000000 --36.500000000 7.500000000 5.500000000 -35.500000000 7.500000000 5.500000000 --36.500000000 8.500000000 5.500000000 -35.500000000 8.500000000 5.500000000 --36.500000000 9.500000000 5.500000000 -35.500000000 9.500000000 5.500000000 --36.500000000 10.500000000 5.500000000 -35.500000000 10.500000000 5.500000000 --36.500000000 11.500000000 5.500000000 -35.500000000 11.500000000 5.500000000 --36.500000000 12.500000000 5.500000000 -35.500000000 12.500000000 5.500000000 --36.500000000 13.500000000 5.500000000 -35.500000000 13.500000000 5.500000000 --36.500000000 14.500000000 5.500000000 -35.500000000 14.500000000 5.500000000 --36.500000000 15.500000000 5.500000000 -35.500000000 15.500000000 5.500000000 --36.500000000 16.500000000 5.500000000 -35.500000000 16.500000000 5.500000000 --36.500000000 17.500000000 5.500000000 -35.500000000 17.500000000 5.500000000 --36.500000000 18.500000000 5.500000000 -35.500000000 18.500000000 5.500000000 --36.500000000 19.500000000 5.500000000 -35.500000000 19.500000000 5.500000000 --36.500000000 20.500000000 5.500000000 -35.500000000 20.500000000 5.500000000 --36.500000000 21.500000000 5.500000000 -35.500000000 21.500000000 5.500000000 --36.500000000 22.500000000 5.500000000 -35.500000000 22.500000000 5.500000000 --36.500000000 23.500000000 5.500000000 -35.500000000 23.500000000 5.500000000 --36.500000000 24.500000000 5.500000000 -35.500000000 24.500000000 5.500000000 --36.499996185 25.499996185 5.500000000 -35.499996185 25.499996185 5.500000000 --36.499954224 26.499954224 5.500000000 -35.499954224 26.499954224 5.500000000 --36.499591827 27.499591827 5.500000000 -35.499591827 27.499591827 5.500000000 --36.497474670 28.497470856 5.500000000 -35.497467041 28.497470856 5.500000000 --36.488403320 29.488407135 5.500000000 -35.488403320 29.488407135 5.500000000 --36.458980560 30.458978653 5.500000000 -35.458980560 30.458978653 5.500000000 --36.384422302 31.384418488 5.500000000 -35.384422302 31.384418488 5.500000000 --36.233222961 32.233222961 5.500000000 -35.233222961 32.233222961 5.500000000 --35.981101990 32.981101990 5.500000000 --35.622871399 33.622871399 5.500000000 -34.622871399 33.622871399 5.500000000 -34.981101990 32.981101990 5.500000000 --35.167964935 34.167964935 5.500000000 --34.622871399 34.622871399 5.500000000 -33.622871399 34.622871399 5.500000000 -34.167964935 34.167964935 5.500000000 --33.981101990 34.981101990 5.500000000 --33.233222961 35.233222961 5.500000000 --32.384422302 35.384418488 5.500000000 --31.458978653 35.458976746 5.500000000 --30.488407135 35.488403320 5.500000000 --29.497472763 35.497467041 5.500000000 --28.499593735 35.499591827 5.500000000 --27.499954224 35.499954224 5.500000000 --26.499996185 35.499996185 5.500000000 --25.500000000 35.500000000 5.500000000 --24.500000000 35.500000000 5.500000000 --23.500000000 35.500000000 5.500000000 --22.500000000 35.500000000 5.500000000 --21.500000000 35.500000000 5.500000000 --20.500000000 35.500000000 5.500000000 --19.500000000 35.500000000 5.500000000 --18.500000000 35.500000000 5.500000000 --17.500000000 35.500000000 5.500000000 --16.500000000 35.500000000 5.500000000 --15.500000000 35.500000000 5.500000000 --14.500000000 35.500000000 5.500000000 --13.500000000 35.500000000 5.500000000 --12.500000000 35.500000000 5.500000000 --11.500000000 35.500000000 5.500000000 --10.500000000 35.500000000 5.500000000 --9.500000000 35.500000000 5.500000000 --8.500000000 35.500000000 5.500000000 --7.500000000 35.500000000 5.500000000 --6.500000000 35.500000000 5.500000000 --5.500000000 35.500000000 5.500000000 --4.500000000 35.500000000 5.500000000 --3.500000000 35.500000000 5.500000000 --2.500000000 35.500000000 5.500000000 --1.500000000 35.500000000 5.500000000 --0.500000000 35.500000000 5.500000000 -0.500000000 35.500000000 5.500000000 -1.500000000 35.500000000 5.500000000 -2.500000000 35.500000000 5.500000000 -3.500000000 35.500000000 5.500000000 -4.500000000 35.500000000 5.500000000 -5.500000000 35.500000000 5.500000000 -6.500000000 35.500000000 5.500000000 -7.500000000 35.500000000 5.500000000 -8.500000000 35.500000000 5.500000000 -9.500000000 35.500000000 5.500000000 -10.500000000 35.500000000 5.500000000 -11.500000000 35.500000000 5.500000000 -12.500000000 35.500000000 5.500000000 -13.500000000 35.500000000 5.500000000 -14.500000000 35.500000000 5.500000000 -15.500000000 35.500000000 5.500000000 -16.500000000 35.500000000 5.500000000 -17.500000000 35.500000000 5.500000000 -18.500000000 35.500000000 5.500000000 -19.500000000 35.500000000 5.500000000 -20.500000000 35.500000000 5.500000000 -21.500000000 35.500000000 5.500000000 -22.500000000 35.500000000 5.500000000 -23.500000000 35.500000000 5.500000000 -24.500000000 35.500000000 5.500000000 -25.499996185 35.499996185 5.500000000 -26.499954224 35.499954224 5.500000000 -27.499591827 35.499591827 5.500000000 -28.497470856 35.497474670 5.500000000 -29.488407135 35.488403320 5.500000000 -30.458978653 35.458980560 5.500000000 -31.384418488 35.384422302 5.500000000 -32.233222961 35.233222961 5.500000000 -32.981101990 34.981101990 5.500000000 --33.981101990 -35.981101990 6.500000000 --33.233226776 -36.233222961 6.500000000 --32.384422302 -36.384418488 6.500000000 --31.458978653 -36.458980560 6.500000000 --30.488407135 -36.488403320 6.500000000 --29.497472763 -36.497474670 6.500000000 --28.499593735 -36.499591827 6.500000000 --27.499954224 -36.499954224 6.500000000 --26.499996185 -36.499996185 6.500000000 --25.500000000 -36.500000000 6.500000000 --24.500000000 -36.500000000 6.500000000 --23.500000000 -36.500000000 6.500000000 --22.500000000 -36.500000000 6.500000000 --21.500000000 -36.500000000 6.500000000 --20.500000000 -36.500000000 6.500000000 --19.500000000 -36.500000000 6.500000000 --18.500000000 -36.500000000 6.500000000 --17.500000000 -36.500000000 6.500000000 --16.500000000 -36.500000000 6.500000000 --15.500000000 -36.500000000 6.500000000 --14.500000000 -36.500000000 6.500000000 --13.500000000 -36.500000000 6.500000000 --12.500000000 -36.500000000 6.500000000 --11.500000000 -36.500000000 6.500000000 --10.500000000 -36.500000000 6.500000000 --9.500000000 -36.500000000 6.500000000 --8.500000000 -36.500000000 6.500000000 --7.500000000 -36.500000000 6.500000000 --6.500000000 -36.500000000 6.500000000 --5.500000000 -36.500000000 6.500000000 --4.500000000 -36.500000000 6.500000000 --3.500000000 -36.500000000 6.500000000 --2.500000000 -36.500000000 6.500000000 --1.500000000 -36.500000000 6.500000000 --0.500000000 -36.500000000 6.500000000 -0.500000000 -36.500000000 6.500000000 -1.500000000 -36.500000000 6.500000000 -2.500000000 -36.500000000 6.500000000 -3.500000000 -36.500000000 6.500000000 -4.500000000 -36.500000000 6.500000000 -5.500000000 -36.500000000 6.500000000 -6.500000000 -36.500000000 6.500000000 -7.500000000 -36.500000000 6.500000000 -8.500000000 -36.500000000 6.500000000 -9.500000000 -36.500000000 6.500000000 -10.500000000 -36.500000000 6.500000000 -11.500000000 -36.500000000 6.500000000 -12.500000000 -36.500000000 6.500000000 -13.500000000 -36.500000000 6.500000000 -14.500000000 -36.500000000 6.500000000 -15.500000000 -36.500000000 6.500000000 -16.500000000 -36.500000000 6.500000000 -17.500000000 -36.500000000 6.500000000 -18.500000000 -36.500000000 6.500000000 -19.500000000 -36.500000000 6.500000000 -20.500000000 -36.500000000 6.500000000 -21.500000000 -36.500000000 6.500000000 -22.500000000 -36.500000000 6.500000000 -23.500000000 -36.500000000 6.500000000 -24.500000000 -36.500000000 6.500000000 -25.499996185 -36.499996185 6.500000000 -26.499954224 -36.499954224 6.500000000 -27.499591827 -36.499591827 6.500000000 -28.497470856 -36.497467041 6.500000000 -29.488407135 -36.488403320 6.500000000 -30.458978653 -36.458980560 6.500000000 -31.384418488 -36.384422302 6.500000000 -32.233222961 -36.233222961 6.500000000 -32.981101990 -35.981101990 6.500000000 --35.167964935 -35.167964935 6.500000000 --34.622871399 -35.622871399 6.500000000 -33.622871399 -35.622871399 6.500000000 -34.167964935 -35.167964935 6.500000000 --35.981101990 -33.981101990 6.500000000 --35.622871399 -34.622871399 6.500000000 -34.622871399 -34.622871399 6.500000000 -34.981101990 -33.981101990 6.500000000 --36.233222961 -33.233222961 6.500000000 -35.233222961 -33.233226776 6.500000000 --36.384418488 -32.384422302 6.500000000 -35.384418488 -32.384422302 6.500000000 --36.458976746 -31.458978653 6.500000000 -35.458980560 -31.458978653 6.500000000 --36.488403320 -30.488407135 6.500000000 -35.488403320 -30.488407135 6.500000000 --36.497467041 -29.497472763 6.500000000 -35.497474670 -29.497472763 6.500000000 --36.499591827 -28.499593735 6.500000000 -35.499591827 -28.499593735 6.500000000 --36.499954224 -27.499954224 6.500000000 -35.499954224 -27.499954224 6.500000000 --36.499996185 -26.499996185 6.500000000 -35.499996185 -26.499996185 6.500000000 --36.500000000 -25.500000000 6.500000000 -35.500000000 -25.500000000 6.500000000 --36.500000000 -24.500000000 6.500000000 -35.500000000 -24.500000000 6.500000000 --36.500000000 -23.500000000 6.500000000 -35.500000000 -23.500000000 6.500000000 --36.500000000 -22.500000000 6.500000000 -35.500000000 -22.500000000 6.500000000 --36.500000000 -21.500000000 6.500000000 -35.500000000 -21.500000000 6.500000000 --36.500000000 -20.500000000 6.500000000 -35.500000000 -20.500000000 6.500000000 --36.500000000 -19.500000000 6.500000000 -35.500000000 -19.500000000 6.500000000 --36.500000000 -18.500000000 6.500000000 -35.500000000 -18.500000000 6.500000000 --36.500000000 -17.500000000 6.500000000 -35.500000000 -17.500000000 6.500000000 --36.500000000 -16.500000000 6.500000000 -35.500000000 -16.500000000 6.500000000 --36.500000000 -15.500000000 6.500000000 -35.500000000 -15.500000000 6.500000000 --36.500000000 -14.500000000 6.500000000 -35.500000000 -14.500000000 6.500000000 --36.500000000 -13.500000000 6.500000000 -35.500000000 -13.500000000 6.500000000 --36.500000000 -12.500000000 6.500000000 -35.500000000 -12.500000000 6.500000000 --36.500000000 -11.500000000 6.500000000 -35.500000000 -11.500000000 6.500000000 --36.500000000 -10.500000000 6.500000000 -35.500000000 -10.500000000 6.500000000 --36.500000000 -9.500000000 6.500000000 -35.500000000 -9.500000000 6.500000000 --36.500000000 -8.500000000 6.500000000 -35.500000000 -8.500000000 6.500000000 --36.500000000 -7.500000000 6.500000000 -35.500000000 -7.500000000 6.500000000 --36.500000000 -6.500000000 6.500000000 -35.500000000 -6.500000000 6.500000000 --36.500000000 -5.500000000 6.500000000 -35.500000000 -5.500000000 6.500000000 --36.500000000 -4.500000000 6.500000000 -35.500000000 -4.500000000 6.500000000 --36.500000000 -3.500000000 6.500000000 -35.500000000 -3.500000000 6.500000000 --36.500000000 -2.500000000 6.500000000 -35.500000000 -2.500000000 6.500000000 --36.500000000 -1.500000000 6.500000000 -35.500000000 -1.500000000 6.500000000 --36.500000000 -0.500000000 6.500000000 -35.500000000 -0.500000000 6.500000000 --36.500000000 0.500000000 6.500000000 -35.500000000 0.500000000 6.500000000 --36.500000000 1.500000000 6.500000000 -35.500000000 1.500000000 6.500000000 --36.500000000 2.500000000 6.500000000 -35.500000000 2.500000000 6.500000000 --36.500000000 3.500000000 6.500000000 -35.500000000 3.500000000 6.500000000 --36.500000000 4.500000000 6.500000000 -35.500000000 4.500000000 6.500000000 --36.500000000 5.500000000 6.500000000 -35.500000000 5.500000000 6.500000000 --36.500000000 6.500000000 6.500000000 -35.500000000 6.500000000 6.500000000 --36.500000000 7.500000000 6.500000000 -35.500000000 7.500000000 6.500000000 --36.500000000 8.500000000 6.500000000 -35.500000000 8.500000000 6.500000000 --36.500000000 9.500000000 6.500000000 -35.500000000 9.500000000 6.500000000 --36.500000000 10.500000000 6.500000000 -35.500000000 10.500000000 6.500000000 --36.500000000 11.500000000 6.500000000 -35.500000000 11.500000000 6.500000000 --36.500000000 12.500000000 6.500000000 -35.500000000 12.500000000 6.500000000 --36.500000000 13.500000000 6.500000000 -35.500000000 13.500000000 6.500000000 --36.500000000 14.500000000 6.500000000 -35.500000000 14.500000000 6.500000000 --36.500000000 15.500000000 6.500000000 -35.500000000 15.500000000 6.500000000 --36.500000000 16.500000000 6.500000000 -35.500000000 16.500000000 6.500000000 --36.500000000 17.500000000 6.500000000 -35.500000000 17.500000000 6.500000000 --36.500000000 18.500000000 6.500000000 -35.500000000 18.500000000 6.500000000 --36.500000000 19.500000000 6.500000000 -35.500000000 19.500000000 6.500000000 --36.500000000 20.500000000 6.500000000 -35.500000000 20.500000000 6.500000000 --36.500000000 21.500000000 6.500000000 -35.500000000 21.500000000 6.500000000 --36.500000000 22.500000000 6.500000000 -35.500000000 22.500000000 6.500000000 --36.500000000 23.500000000 6.500000000 -35.500000000 23.500000000 6.500000000 --36.500000000 24.500000000 6.500000000 -35.500000000 24.500000000 6.500000000 --36.499996185 25.499996185 6.500000000 -35.499996185 25.499996185 6.500000000 --36.499954224 26.499954224 6.500000000 -35.499954224 26.499954224 6.500000000 --36.499591827 27.499591827 6.500000000 -35.499591827 27.499591827 6.500000000 --36.497474670 28.497470856 6.500000000 -35.497467041 28.497470856 6.500000000 --36.488403320 29.488407135 6.500000000 -35.488403320 29.488407135 6.500000000 --36.458980560 30.458978653 6.500000000 -35.458980560 30.458978653 6.500000000 --36.384422302 31.384418488 6.500000000 -35.384422302 31.384418488 6.500000000 --36.233222961 32.233222961 6.500000000 -35.233222961 32.233222961 6.500000000 --35.981101990 32.981101990 6.500000000 --35.622871399 33.622871399 6.500000000 -34.622871399 33.622871399 6.500000000 -34.981101990 32.981101990 6.500000000 --35.167964935 34.167964935 6.500000000 --34.622871399 34.622871399 6.500000000 -33.622871399 34.622871399 6.500000000 -34.167964935 34.167964935 6.500000000 --33.981101990 34.981101990 6.500000000 --33.233222961 35.233222961 6.500000000 --32.384422302 35.384418488 6.500000000 --31.458978653 35.458976746 6.500000000 --30.488407135 35.488403320 6.500000000 --29.497472763 35.497467041 6.500000000 --28.499593735 35.499591827 6.500000000 --27.499954224 35.499954224 6.500000000 --26.499996185 35.499996185 6.500000000 --25.500000000 35.500000000 6.500000000 --24.500000000 35.500000000 6.500000000 --23.500000000 35.500000000 6.500000000 --22.500000000 35.500000000 6.500000000 --21.500000000 35.500000000 6.500000000 --20.500000000 35.500000000 6.500000000 --19.500000000 35.500000000 6.500000000 --18.500000000 35.500000000 6.500000000 --17.500000000 35.500000000 6.500000000 --16.500000000 35.500000000 6.500000000 --15.500000000 35.500000000 6.500000000 --14.500000000 35.500000000 6.500000000 --13.500000000 35.500000000 6.500000000 --12.500000000 35.500000000 6.500000000 --11.500000000 35.500000000 6.500000000 --10.500000000 35.500000000 6.500000000 --9.500000000 35.500000000 6.500000000 --8.500000000 35.500000000 6.500000000 --7.500000000 35.500000000 6.500000000 --6.500000000 35.500000000 6.500000000 --5.500000000 35.500000000 6.500000000 --4.500000000 35.500000000 6.500000000 --3.500000000 35.500000000 6.500000000 --2.500000000 35.500000000 6.500000000 --1.500000000 35.500000000 6.500000000 --0.500000000 35.500000000 6.500000000 -0.500000000 35.500000000 6.500000000 -1.500000000 35.500000000 6.500000000 -2.500000000 35.500000000 6.500000000 -3.500000000 35.500000000 6.500000000 -4.500000000 35.500000000 6.500000000 -5.500000000 35.500000000 6.500000000 -6.500000000 35.500000000 6.500000000 -7.500000000 35.500000000 6.500000000 -8.500000000 35.500000000 6.500000000 -9.500000000 35.500000000 6.500000000 -10.500000000 35.500000000 6.500000000 -11.500000000 35.500000000 6.500000000 -12.500000000 35.500000000 6.500000000 -13.500000000 35.500000000 6.500000000 -14.500000000 35.500000000 6.500000000 -15.500000000 35.500000000 6.500000000 -16.500000000 35.500000000 6.500000000 -17.500000000 35.500000000 6.500000000 -18.500000000 35.500000000 6.500000000 -19.500000000 35.500000000 6.500000000 -20.500000000 35.500000000 6.500000000 -21.500000000 35.500000000 6.500000000 -22.500000000 35.500000000 6.500000000 -23.500000000 35.500000000 6.500000000 -24.500000000 35.500000000 6.500000000 -25.499996185 35.499996185 6.500000000 -26.499954224 35.499954224 6.500000000 -27.499591827 35.499591827 6.500000000 -28.497470856 35.497474670 6.500000000 -29.488407135 35.488403320 6.500000000 -30.458978653 35.458980560 6.500000000 -31.384418488 35.384422302 6.500000000 -32.233222961 35.233222961 6.500000000 -32.981101990 34.981101990 6.500000000 --33.981101990 -35.981101990 7.500000000 --33.233226776 -36.233222961 7.500000000 --32.384422302 -36.384418488 7.500000000 --31.458978653 -36.458980560 7.500000000 --30.488407135 -36.488403320 7.500000000 --29.497472763 -36.497474670 7.500000000 --28.499593735 -36.499591827 7.500000000 --27.499954224 -36.499954224 7.500000000 --26.499996185 -36.499996185 7.500000000 --25.500000000 -36.500000000 7.500000000 --24.500000000 -36.500000000 7.500000000 --23.500000000 -36.500000000 7.500000000 --22.500000000 -36.500000000 7.500000000 --21.500000000 -36.500000000 7.500000000 --20.500000000 -36.500000000 7.500000000 --19.500000000 -36.500000000 7.500000000 --18.500000000 -36.500000000 7.500000000 --17.500000000 -36.500000000 7.500000000 --16.500000000 -36.500000000 7.500000000 --15.500000000 -36.500000000 7.500000000 --14.500000000 -36.500000000 7.500000000 --13.500000000 -36.500000000 7.500000000 --12.500000000 -36.500000000 7.500000000 --11.500000000 -36.500000000 7.500000000 --10.500000000 -36.500000000 7.500000000 --9.500000000 -36.500000000 7.500000000 --8.500000000 -36.500000000 7.500000000 --7.500000000 -36.500000000 7.500000000 --6.500000000 -36.500000000 7.500000000 --5.500000000 -36.500000000 7.500000000 --4.500000000 -36.500000000 7.500000000 --3.500000000 -36.500000000 7.500000000 --2.500000000 -36.500000000 7.500000000 --1.500000000 -36.500000000 7.500000000 --0.500000000 -36.500000000 7.500000000 -0.500000000 -36.500000000 7.500000000 -1.500000000 -36.500000000 7.500000000 -2.500000000 -36.500000000 7.500000000 -3.500000000 -36.500000000 7.500000000 -4.500000000 -36.500000000 7.500000000 -5.500000000 -36.500000000 7.500000000 -6.500000000 -36.500000000 7.500000000 -7.500000000 -36.500000000 7.500000000 -8.500000000 -36.500000000 7.500000000 -9.500000000 -36.500000000 7.500000000 -10.500000000 -36.500000000 7.500000000 -11.500000000 -36.500000000 7.500000000 -12.500000000 -36.500000000 7.500000000 -13.500000000 -36.500000000 7.500000000 -14.500000000 -36.500000000 7.500000000 -15.500000000 -36.500000000 7.500000000 -16.500000000 -36.500000000 7.500000000 -17.500000000 -36.500000000 7.500000000 -18.500000000 -36.500000000 7.500000000 -19.500000000 -36.500000000 7.500000000 -20.500000000 -36.500000000 7.500000000 -21.500000000 -36.500000000 7.500000000 -22.500000000 -36.500000000 7.500000000 -23.500000000 -36.500000000 7.500000000 -24.500000000 -36.500000000 7.500000000 -25.499996185 -36.499996185 7.500000000 -26.499954224 -36.499954224 7.500000000 -27.499591827 -36.499591827 7.500000000 -28.497470856 -36.497467041 7.500000000 -29.488407135 -36.488403320 7.500000000 -30.458978653 -36.458980560 7.500000000 -31.384418488 -36.384422302 7.500000000 -32.233222961 -36.233222961 7.500000000 -32.981101990 -35.981101990 7.500000000 --35.167964935 -35.167964935 7.500000000 --34.622871399 -35.622871399 7.500000000 -33.622871399 -35.622871399 7.500000000 -34.167964935 -35.167964935 7.500000000 --35.981101990 -33.981101990 7.500000000 --35.622871399 -34.622871399 7.500000000 -34.622871399 -34.622871399 7.500000000 -34.981101990 -33.981101990 7.500000000 --36.233222961 -33.233222961 7.500000000 -35.233222961 -33.233226776 7.500000000 --36.384418488 -32.384422302 7.500000000 -35.384418488 -32.384422302 7.500000000 --36.458976746 -31.458978653 7.500000000 -35.458980560 -31.458978653 7.500000000 --36.488403320 -30.488407135 7.500000000 -35.488403320 -30.488407135 7.500000000 --36.497467041 -29.497472763 7.500000000 -35.497474670 -29.497472763 7.500000000 --36.499591827 -28.499593735 7.500000000 -35.499591827 -28.499593735 7.500000000 --36.499954224 -27.499954224 7.500000000 -35.499954224 -27.499954224 7.500000000 --36.499996185 -26.499996185 7.500000000 -35.499996185 -26.499996185 7.500000000 --36.500000000 -25.500000000 7.500000000 -35.500000000 -25.500000000 7.500000000 --36.500000000 -24.500000000 7.500000000 -35.500000000 -24.500000000 7.500000000 --36.500000000 -23.500000000 7.500000000 -35.500000000 -23.500000000 7.500000000 --36.500000000 -22.500000000 7.500000000 -35.500000000 -22.500000000 7.500000000 --36.500000000 -21.500000000 7.500000000 -35.500000000 -21.500000000 7.500000000 --36.500000000 -20.500000000 7.500000000 -35.500000000 -20.500000000 7.500000000 --36.500000000 -19.500000000 7.500000000 -35.500000000 -19.500000000 7.500000000 --36.500000000 -18.500000000 7.500000000 -35.500000000 -18.500000000 7.500000000 --36.500000000 -17.500000000 7.500000000 -35.500000000 -17.500000000 7.500000000 --36.500000000 -16.500000000 7.500000000 -35.500000000 -16.500000000 7.500000000 --36.500000000 -15.500000000 7.500000000 -35.500000000 -15.500000000 7.500000000 --36.500000000 -14.500000000 7.500000000 -35.500000000 -14.500000000 7.500000000 --36.500000000 -13.500000000 7.500000000 -35.500000000 -13.500000000 7.500000000 --36.500000000 -12.500000000 7.500000000 -35.500000000 -12.500000000 7.500000000 --36.500000000 -11.500000000 7.500000000 -35.500000000 -11.500000000 7.500000000 --36.500000000 -10.500000000 7.500000000 -35.500000000 -10.500000000 7.500000000 --36.500000000 -9.500000000 7.500000000 -35.500000000 -9.500000000 7.500000000 --36.500000000 -8.500000000 7.500000000 -35.500000000 -8.500000000 7.500000000 --36.500000000 -7.500000000 7.500000000 -35.500000000 -7.500000000 7.500000000 --36.500000000 -6.500000000 7.500000000 -35.500000000 -6.500000000 7.500000000 --36.500000000 -5.500000000 7.500000000 -35.500000000 -5.500000000 7.500000000 --36.500000000 -4.500000000 7.500000000 -35.500000000 -4.500000000 7.500000000 --36.500000000 -3.500000000 7.500000000 -35.500000000 -3.500000000 7.500000000 --36.500000000 -2.500000000 7.500000000 -35.500000000 -2.500000000 7.500000000 --36.500000000 -1.500000000 7.500000000 -35.500000000 -1.500000000 7.500000000 --36.500000000 -0.500000000 7.500000000 -35.500000000 -0.500000000 7.500000000 --36.500000000 0.500000000 7.500000000 -35.500000000 0.500000000 7.500000000 --36.500000000 1.500000000 7.500000000 -35.500000000 1.500000000 7.500000000 --36.500000000 2.500000000 7.500000000 -35.500000000 2.500000000 7.500000000 --36.500000000 3.500000000 7.500000000 -35.500000000 3.500000000 7.500000000 --36.500000000 4.500000000 7.500000000 -35.500000000 4.500000000 7.500000000 --36.500000000 5.500000000 7.500000000 -35.500000000 5.500000000 7.500000000 --36.500000000 6.500000000 7.500000000 -35.500000000 6.500000000 7.500000000 --36.500000000 7.500000000 7.500000000 -35.500000000 7.500000000 7.500000000 --36.500000000 8.500000000 7.500000000 -35.500000000 8.500000000 7.500000000 --36.500000000 9.500000000 7.500000000 -35.500000000 9.500000000 7.500000000 --36.500000000 10.500000000 7.500000000 -35.500000000 10.500000000 7.500000000 --36.500000000 11.500000000 7.500000000 -35.500000000 11.500000000 7.500000000 --36.500000000 12.500000000 7.500000000 -35.500000000 12.500000000 7.500000000 --36.500000000 13.500000000 7.500000000 -35.500000000 13.500000000 7.500000000 --36.500000000 14.500000000 7.500000000 -35.500000000 14.500000000 7.500000000 --36.500000000 15.500000000 7.500000000 -35.500000000 15.500000000 7.500000000 --36.500000000 16.500000000 7.500000000 -35.500000000 16.500000000 7.500000000 --36.500000000 17.500000000 7.500000000 -35.500000000 17.500000000 7.500000000 --36.500000000 18.500000000 7.500000000 -35.500000000 18.500000000 7.500000000 --36.500000000 19.500000000 7.500000000 -35.500000000 19.500000000 7.500000000 --36.500000000 20.500000000 7.500000000 -35.500000000 20.500000000 7.500000000 --36.500000000 21.500000000 7.500000000 -35.500000000 21.500000000 7.500000000 --36.500000000 22.500000000 7.500000000 -35.500000000 22.500000000 7.500000000 --36.500000000 23.500000000 7.500000000 -35.500000000 23.500000000 7.500000000 --36.500000000 24.500000000 7.500000000 -35.500000000 24.500000000 7.500000000 --36.499996185 25.499996185 7.500000000 -35.499996185 25.499996185 7.500000000 --36.499954224 26.499954224 7.500000000 -35.499954224 26.499954224 7.500000000 --36.499591827 27.499591827 7.500000000 -35.499591827 27.499591827 7.500000000 --36.497474670 28.497470856 7.500000000 -35.497467041 28.497470856 7.500000000 --36.488403320 29.488407135 7.500000000 -35.488403320 29.488407135 7.500000000 --36.458980560 30.458978653 7.500000000 -35.458980560 30.458978653 7.500000000 --36.384422302 31.384418488 7.500000000 -35.384422302 31.384418488 7.500000000 --36.233222961 32.233222961 7.500000000 -35.233222961 32.233222961 7.500000000 --35.981101990 32.981101990 7.500000000 --35.622871399 33.622871399 7.500000000 -34.622871399 33.622871399 7.500000000 -34.981101990 32.981101990 7.500000000 --35.167964935 34.167964935 7.500000000 --34.622871399 34.622871399 7.500000000 -33.622871399 34.622871399 7.500000000 -34.167964935 34.167964935 7.500000000 --33.981101990 34.981101990 7.500000000 --33.233222961 35.233222961 7.500000000 --32.384422302 35.384418488 7.500000000 --31.458978653 35.458976746 7.500000000 --30.488407135 35.488403320 7.500000000 --29.497472763 35.497467041 7.500000000 --28.499593735 35.499591827 7.500000000 --27.499954224 35.499954224 7.500000000 --26.499996185 35.499996185 7.500000000 --25.500000000 35.500000000 7.500000000 --24.500000000 35.500000000 7.500000000 --23.500000000 35.500000000 7.500000000 --22.500000000 35.500000000 7.500000000 --21.500000000 35.500000000 7.500000000 --20.500000000 35.500000000 7.500000000 --19.500000000 35.500000000 7.500000000 --18.500000000 35.500000000 7.500000000 --17.500000000 35.500000000 7.500000000 --16.500000000 35.500000000 7.500000000 --15.500000000 35.500000000 7.500000000 --14.500000000 35.500000000 7.500000000 --13.500000000 35.500000000 7.500000000 --12.500000000 35.500000000 7.500000000 --11.500000000 35.500000000 7.500000000 --10.500000000 35.500000000 7.500000000 --9.500000000 35.500000000 7.500000000 --8.500000000 35.500000000 7.500000000 --7.500000000 35.500000000 7.500000000 --6.500000000 35.500000000 7.500000000 --5.500000000 35.500000000 7.500000000 --4.500000000 35.500000000 7.500000000 --3.500000000 35.500000000 7.500000000 --2.500000000 35.500000000 7.500000000 --1.500000000 35.500000000 7.500000000 --0.500000000 35.500000000 7.500000000 -0.500000000 35.500000000 7.500000000 -1.500000000 35.500000000 7.500000000 -2.500000000 35.500000000 7.500000000 -3.500000000 35.500000000 7.500000000 -4.500000000 35.500000000 7.500000000 -5.500000000 35.500000000 7.500000000 -6.500000000 35.500000000 7.500000000 -7.500000000 35.500000000 7.500000000 -8.500000000 35.500000000 7.500000000 -9.500000000 35.500000000 7.500000000 -10.500000000 35.500000000 7.500000000 -11.500000000 35.500000000 7.500000000 -12.500000000 35.500000000 7.500000000 -13.500000000 35.500000000 7.500000000 -14.500000000 35.500000000 7.500000000 -15.500000000 35.500000000 7.500000000 -16.500000000 35.500000000 7.500000000 -17.500000000 35.500000000 7.500000000 -18.500000000 35.500000000 7.500000000 -19.500000000 35.500000000 7.500000000 -20.500000000 35.500000000 7.500000000 -21.500000000 35.500000000 7.500000000 -22.500000000 35.500000000 7.500000000 -23.500000000 35.500000000 7.500000000 -24.500000000 35.500000000 7.500000000 -25.499996185 35.499996185 7.500000000 -26.499954224 35.499954224 7.500000000 -27.499591827 35.499591827 7.500000000 -28.497470856 35.497474670 7.500000000 -29.488407135 35.488403320 7.500000000 -30.458978653 35.458980560 7.500000000 -31.384418488 35.384422302 7.500000000 -32.233222961 35.233222961 7.500000000 -32.981101990 34.981101990 7.500000000 --33.981101990 -35.981101990 8.500000000 --33.233226776 -36.233222961 8.500000000 --32.384422302 -36.384418488 8.500000000 --31.458978653 -36.458980560 8.500000000 --30.488407135 -36.488403320 8.500000000 --29.497472763 -36.497474670 8.500000000 --28.499593735 -36.499591827 8.500000000 --27.499954224 -36.499954224 8.500000000 --26.499996185 -36.499996185 8.500000000 --25.500000000 -36.500000000 8.500000000 --24.500000000 -36.500000000 8.500000000 --23.500000000 -36.500000000 8.500000000 --22.500000000 -36.500000000 8.500000000 --21.500000000 -36.500000000 8.500000000 --20.500000000 -36.500000000 8.500000000 --19.500000000 -36.500000000 8.500000000 --18.500000000 -36.500000000 8.500000000 --17.500000000 -36.500000000 8.500000000 --16.500000000 -36.500000000 8.500000000 --15.500000000 -36.500000000 8.500000000 --14.500000000 -36.500000000 8.500000000 --13.500000000 -36.500000000 8.500000000 --12.500000000 -36.500000000 8.500000000 --11.500000000 -36.500000000 8.500000000 --10.500000000 -36.500000000 8.500000000 --9.500000000 -36.500000000 8.500000000 --8.500000000 -36.500000000 8.500000000 --7.500000000 -36.500000000 8.500000000 --6.500000000 -36.500000000 8.500000000 --5.500000000 -36.500000000 8.500000000 --4.500000000 -36.500000000 8.500000000 --3.500000000 -36.500000000 8.500000000 --2.500000000 -36.500000000 8.500000000 --1.500000000 -36.500000000 8.500000000 --0.500000000 -36.500000000 8.500000000 -0.500000000 -36.500000000 8.500000000 -1.500000000 -36.500000000 8.500000000 -2.500000000 -36.500000000 8.500000000 -3.500000000 -36.500000000 8.500000000 -4.500000000 -36.500000000 8.500000000 -5.500000000 -36.500000000 8.500000000 -6.500000000 -36.500000000 8.500000000 -7.500000000 -36.500000000 8.500000000 -8.500000000 -36.500000000 8.500000000 -9.500000000 -36.500000000 8.500000000 -10.500000000 -36.500000000 8.500000000 -11.500000000 -36.500000000 8.500000000 -12.500000000 -36.500000000 8.500000000 -13.500000000 -36.500000000 8.500000000 -14.500000000 -36.500000000 8.500000000 -15.500000000 -36.500000000 8.500000000 -16.500000000 -36.500000000 8.500000000 -17.500000000 -36.500000000 8.500000000 -18.500000000 -36.500000000 8.500000000 -19.500000000 -36.500000000 8.500000000 -20.500000000 -36.500000000 8.500000000 -21.500000000 -36.500000000 8.500000000 -22.500000000 -36.500000000 8.500000000 -23.500000000 -36.500000000 8.500000000 -24.500000000 -36.500000000 8.500000000 -25.499996185 -36.499996185 8.500000000 -26.499954224 -36.499954224 8.500000000 -27.499591827 -36.499591827 8.500000000 -28.497470856 -36.497467041 8.500000000 -29.488407135 -36.488403320 8.500000000 -30.458978653 -36.458980560 8.500000000 -31.384418488 -36.384422302 8.500000000 -32.233222961 -36.233222961 8.500000000 -32.981101990 -35.981101990 8.500000000 --35.167964935 -35.167964935 8.500000000 --34.622871399 -35.622871399 8.500000000 -33.622871399 -35.622871399 8.500000000 -34.167964935 -35.167964935 8.500000000 --35.981101990 -33.981101990 8.500000000 --35.622871399 -34.622871399 8.500000000 -34.622871399 -34.622871399 8.500000000 -34.981101990 -33.981101990 8.500000000 --36.233222961 -33.233222961 8.500000000 -35.233222961 -33.233226776 8.500000000 --36.384418488 -32.384422302 8.500000000 -35.384418488 -32.384422302 8.500000000 --36.458976746 -31.458978653 8.500000000 -35.458980560 -31.458978653 8.500000000 --36.488403320 -30.488407135 8.500000000 -35.488403320 -30.488407135 8.500000000 --36.497467041 -29.497472763 8.500000000 -35.497474670 -29.497472763 8.500000000 --36.499591827 -28.499593735 8.500000000 -35.499591827 -28.499593735 8.500000000 --36.499954224 -27.499954224 8.500000000 -35.499954224 -27.499954224 8.500000000 --36.499996185 -26.499996185 8.500000000 -35.499996185 -26.499996185 8.500000000 --36.500000000 -25.500000000 8.500000000 -35.500000000 -25.500000000 8.500000000 --36.500000000 -24.500000000 8.500000000 -35.500000000 -24.500000000 8.500000000 --36.500000000 -23.500000000 8.500000000 -35.500000000 -23.500000000 8.500000000 --36.500000000 -22.500000000 8.500000000 -35.500000000 -22.500000000 8.500000000 --36.500000000 -21.500000000 8.500000000 -35.500000000 -21.500000000 8.500000000 --36.500000000 -20.500000000 8.500000000 -35.500000000 -20.500000000 8.500000000 --36.500000000 -19.500000000 8.500000000 -35.500000000 -19.500000000 8.500000000 --36.500000000 -18.500000000 8.500000000 -35.500000000 -18.500000000 8.500000000 --36.500000000 -17.500000000 8.500000000 -35.500000000 -17.500000000 8.500000000 --36.500000000 -16.500000000 8.500000000 -35.500000000 -16.500000000 8.500000000 --36.500000000 -15.500000000 8.500000000 -35.500000000 -15.500000000 8.500000000 --36.500000000 -14.500000000 8.500000000 -35.500000000 -14.500000000 8.500000000 --36.500000000 -13.500000000 8.500000000 -35.500000000 -13.500000000 8.500000000 --36.500000000 -12.500000000 8.500000000 -35.500000000 -12.500000000 8.500000000 --36.500000000 -11.500000000 8.500000000 -35.500000000 -11.500000000 8.500000000 --36.500000000 -10.500000000 8.500000000 -35.500000000 -10.500000000 8.500000000 --36.500000000 -9.500000000 8.500000000 -35.500000000 -9.500000000 8.500000000 --36.500000000 -8.500000000 8.500000000 -35.500000000 -8.500000000 8.500000000 --36.500000000 -7.500000000 8.500000000 -35.500000000 -7.500000000 8.500000000 --36.500000000 -6.500000000 8.500000000 -35.500000000 -6.500000000 8.500000000 --36.500000000 -5.500000000 8.500000000 -35.500000000 -5.500000000 8.500000000 --36.500000000 -4.500000000 8.500000000 -35.500000000 -4.500000000 8.500000000 --36.500000000 -3.500000000 8.500000000 -35.500000000 -3.500000000 8.500000000 --36.500000000 -2.500000000 8.500000000 -35.500000000 -2.500000000 8.500000000 --36.500000000 -1.500000000 8.500000000 -35.500000000 -1.500000000 8.500000000 --36.500000000 -0.500000000 8.500000000 -35.500000000 -0.500000000 8.500000000 --36.500000000 0.500000000 8.500000000 -35.500000000 0.500000000 8.500000000 --36.500000000 1.500000000 8.500000000 -35.500000000 1.500000000 8.500000000 --36.500000000 2.500000000 8.500000000 -35.500000000 2.500000000 8.500000000 --36.500000000 3.500000000 8.500000000 -35.500000000 3.500000000 8.500000000 --36.500000000 4.500000000 8.500000000 -35.500000000 4.500000000 8.500000000 --36.500000000 5.500000000 8.500000000 -35.500000000 5.500000000 8.500000000 --36.500000000 6.500000000 8.500000000 -35.500000000 6.500000000 8.500000000 --36.500000000 7.500000000 8.500000000 -35.500000000 7.500000000 8.500000000 --36.500000000 8.500000000 8.500000000 -35.500000000 8.500000000 8.500000000 --36.500000000 9.500000000 8.500000000 -35.500000000 9.500000000 8.500000000 --36.500000000 10.500000000 8.500000000 -35.500000000 10.500000000 8.500000000 --36.500000000 11.500000000 8.500000000 -35.500000000 11.500000000 8.500000000 --36.500000000 12.500000000 8.500000000 -35.500000000 12.500000000 8.500000000 --36.500000000 13.500000000 8.500000000 -35.500000000 13.500000000 8.500000000 --36.500000000 14.500000000 8.500000000 -35.500000000 14.500000000 8.500000000 --36.500000000 15.500000000 8.500000000 -35.500000000 15.500000000 8.500000000 --36.500000000 16.500000000 8.500000000 -35.500000000 16.500000000 8.500000000 --36.500000000 17.500000000 8.500000000 -35.500000000 17.500000000 8.500000000 --36.500000000 18.500000000 8.500000000 -35.500000000 18.500000000 8.500000000 --36.500000000 19.500000000 8.500000000 -35.500000000 19.500000000 8.500000000 --36.500000000 20.500000000 8.500000000 -35.500000000 20.500000000 8.500000000 --36.500000000 21.500000000 8.500000000 -35.500000000 21.500000000 8.500000000 --36.500000000 22.500000000 8.500000000 -35.500000000 22.500000000 8.500000000 --36.500000000 23.500000000 8.500000000 -35.500000000 23.500000000 8.500000000 --36.500000000 24.500000000 8.500000000 -35.500000000 24.500000000 8.500000000 --36.499996185 25.499996185 8.500000000 -35.499996185 25.499996185 8.500000000 --36.499954224 26.499954224 8.500000000 -35.499954224 26.499954224 8.500000000 --36.499591827 27.499591827 8.500000000 -35.499591827 27.499591827 8.500000000 --36.497474670 28.497470856 8.500000000 -35.497467041 28.497470856 8.500000000 --36.488403320 29.488407135 8.500000000 -35.488403320 29.488407135 8.500000000 --36.458980560 30.458978653 8.500000000 -35.458980560 30.458978653 8.500000000 --36.384422302 31.384418488 8.500000000 -35.384422302 31.384418488 8.500000000 --36.233222961 32.233222961 8.500000000 -35.233222961 32.233222961 8.500000000 --35.981101990 32.981101990 8.500000000 --35.622871399 33.622871399 8.500000000 -34.622871399 33.622871399 8.500000000 -34.981101990 32.981101990 8.500000000 --35.167964935 34.167964935 8.500000000 --34.622871399 34.622871399 8.500000000 -33.622871399 34.622871399 8.500000000 -34.167964935 34.167964935 8.500000000 --33.981101990 34.981101990 8.500000000 --33.233222961 35.233222961 8.500000000 --32.384422302 35.384418488 8.500000000 --31.458978653 35.458976746 8.500000000 --30.488407135 35.488403320 8.500000000 --29.497472763 35.497467041 8.500000000 --28.499593735 35.499591827 8.500000000 --27.499954224 35.499954224 8.500000000 --26.499996185 35.499996185 8.500000000 --25.500000000 35.500000000 8.500000000 --24.500000000 35.500000000 8.500000000 --23.500000000 35.500000000 8.500000000 --22.500000000 35.500000000 8.500000000 --21.500000000 35.500000000 8.500000000 --20.500000000 35.500000000 8.500000000 --19.500000000 35.500000000 8.500000000 --18.500000000 35.500000000 8.500000000 --17.500000000 35.500000000 8.500000000 --16.500000000 35.500000000 8.500000000 --15.500000000 35.500000000 8.500000000 --14.500000000 35.500000000 8.500000000 --13.500000000 35.500000000 8.500000000 --12.500000000 35.500000000 8.500000000 --11.500000000 35.500000000 8.500000000 --10.500000000 35.500000000 8.500000000 --9.500000000 35.500000000 8.500000000 --8.500000000 35.500000000 8.500000000 --7.500000000 35.500000000 8.500000000 --6.500000000 35.500000000 8.500000000 --5.500000000 35.500000000 8.500000000 --4.500000000 35.500000000 8.500000000 --3.500000000 35.500000000 8.500000000 --2.500000000 35.500000000 8.500000000 --1.500000000 35.500000000 8.500000000 --0.500000000 35.500000000 8.500000000 -0.500000000 35.500000000 8.500000000 -1.500000000 35.500000000 8.500000000 -2.500000000 35.500000000 8.500000000 -3.500000000 35.500000000 8.500000000 -4.500000000 35.500000000 8.500000000 -5.500000000 35.500000000 8.500000000 -6.500000000 35.500000000 8.500000000 -7.500000000 35.500000000 8.500000000 -8.500000000 35.500000000 8.500000000 -9.500000000 35.500000000 8.500000000 -10.500000000 35.500000000 8.500000000 -11.500000000 35.500000000 8.500000000 -12.500000000 35.500000000 8.500000000 -13.500000000 35.500000000 8.500000000 -14.500000000 35.500000000 8.500000000 -15.500000000 35.500000000 8.500000000 -16.500000000 35.500000000 8.500000000 -17.500000000 35.500000000 8.500000000 -18.500000000 35.500000000 8.500000000 -19.500000000 35.500000000 8.500000000 -20.500000000 35.500000000 8.500000000 -21.500000000 35.500000000 8.500000000 -22.500000000 35.500000000 8.500000000 -23.500000000 35.500000000 8.500000000 -24.500000000 35.500000000 8.500000000 -25.499996185 35.499996185 8.500000000 -26.499954224 35.499954224 8.500000000 -27.499591827 35.499591827 8.500000000 -28.497470856 35.497474670 8.500000000 -29.488407135 35.488403320 8.500000000 -30.458978653 35.458980560 8.500000000 -31.384418488 35.384422302 8.500000000 -32.233222961 35.233222961 8.500000000 -32.981101990 34.981101990 8.500000000 --33.981101990 -35.981101990 9.500000000 --33.233226776 -36.233222961 9.500000000 --32.384422302 -36.384418488 9.500000000 --31.458978653 -36.458980560 9.500000000 --30.488407135 -36.488403320 9.500000000 --29.497472763 -36.497474670 9.500000000 --28.499593735 -36.499591827 9.500000000 --27.499954224 -36.499954224 9.500000000 --26.499996185 -36.499996185 9.500000000 --25.500000000 -36.500000000 9.500000000 --24.500000000 -36.500000000 9.500000000 --23.500000000 -36.500000000 9.500000000 --22.500000000 -36.500000000 9.500000000 --21.500000000 -36.500000000 9.500000000 --20.500000000 -36.500000000 9.500000000 --19.500000000 -36.500000000 9.500000000 --18.500000000 -36.500000000 9.500000000 --17.500000000 -36.500000000 9.500000000 --16.500000000 -36.500000000 9.500000000 --15.500000000 -36.500000000 9.500000000 --14.500000000 -36.500000000 9.500000000 --13.500000000 -36.500000000 9.500000000 --12.500000000 -36.500000000 9.500000000 --11.500000000 -36.500000000 9.500000000 --10.500000000 -36.500000000 9.500000000 --9.500000000 -36.500000000 9.500000000 --8.500000000 -36.500000000 9.500000000 --7.500000000 -36.500000000 9.500000000 --6.500000000 -36.500000000 9.500000000 --5.500000000 -36.500000000 9.500000000 --4.500000000 -36.500000000 9.500000000 --3.500000000 -36.500000000 9.500000000 --2.500000000 -36.500000000 9.500000000 --1.500000000 -36.500000000 9.500000000 --0.500000000 -36.500000000 9.500000000 -0.500000000 -36.500000000 9.500000000 -1.500000000 -36.500000000 9.500000000 -2.500000000 -36.500000000 9.500000000 -3.500000000 -36.500000000 9.500000000 -4.500000000 -36.500000000 9.500000000 -5.500000000 -36.500000000 9.500000000 -6.500000000 -36.500000000 9.500000000 -7.500000000 -36.500000000 9.500000000 -8.500000000 -36.500000000 9.500000000 -9.500000000 -36.500000000 9.500000000 -10.500000000 -36.500000000 9.500000000 -11.500000000 -36.500000000 9.500000000 -12.500000000 -36.500000000 9.500000000 -13.500000000 -36.500000000 9.500000000 -14.500000000 -36.500000000 9.500000000 -15.500000000 -36.500000000 9.500000000 -16.500000000 -36.500000000 9.500000000 -17.500000000 -36.500000000 9.500000000 -18.500000000 -36.500000000 9.500000000 -19.500000000 -36.500000000 9.500000000 -20.500000000 -36.500000000 9.500000000 -21.500000000 -36.500000000 9.500000000 -22.500000000 -36.500000000 9.500000000 -23.500000000 -36.500000000 9.500000000 -24.500000000 -36.500000000 9.500000000 -25.499996185 -36.499996185 9.500000000 -26.499954224 -36.499954224 9.500000000 -27.499591827 -36.499591827 9.500000000 -28.497470856 -36.497467041 9.500000000 -29.488407135 -36.488403320 9.500000000 -30.458978653 -36.458980560 9.500000000 -31.384418488 -36.384422302 9.500000000 -32.233222961 -36.233222961 9.500000000 -32.981101990 -35.981101990 9.500000000 --35.167964935 -35.167964935 9.500000000 --34.622871399 -35.622871399 9.500000000 -33.622871399 -35.622871399 9.500000000 -34.167964935 -35.167964935 9.500000000 --35.981101990 -33.981101990 9.500000000 --35.622871399 -34.622871399 9.500000000 -34.622871399 -34.622871399 9.500000000 -34.981101990 -33.981101990 9.500000000 --36.233222961 -33.233222961 9.500000000 -35.233222961 -33.233226776 9.500000000 --36.384418488 -32.384422302 9.500000000 -35.384418488 -32.384422302 9.500000000 --36.458976746 -31.458978653 9.500000000 -35.458980560 -31.458978653 9.500000000 --36.488403320 -30.488407135 9.500000000 -35.488403320 -30.488407135 9.500000000 --36.497467041 -29.497472763 9.500000000 -35.497474670 -29.497472763 9.500000000 --36.499591827 -28.499593735 9.500000000 -35.499591827 -28.499593735 9.500000000 --36.499954224 -27.499954224 9.500000000 -35.499954224 -27.499954224 9.500000000 --36.499996185 -26.499996185 9.500000000 -35.499996185 -26.499996185 9.500000000 --36.500000000 -25.500000000 9.500000000 -35.500000000 -25.500000000 9.500000000 --36.500000000 -24.500000000 9.500000000 -35.500000000 -24.500000000 9.500000000 --36.500000000 -23.500000000 9.500000000 -35.500000000 -23.500000000 9.500000000 --36.500000000 -22.500000000 9.500000000 -35.500000000 -22.500000000 9.500000000 --36.500000000 -21.500000000 9.500000000 -35.500000000 -21.500000000 9.500000000 --36.500000000 -20.500000000 9.500000000 -35.500000000 -20.500000000 9.500000000 --36.500000000 -19.500000000 9.500000000 -35.500000000 -19.500000000 9.500000000 --36.500000000 -18.500000000 9.500000000 -35.500000000 -18.500000000 9.500000000 --36.500000000 -17.500000000 9.500000000 -35.500000000 -17.500000000 9.500000000 --36.500000000 -16.500000000 9.500000000 -35.500000000 -16.500000000 9.500000000 --36.500000000 -15.500000000 9.500000000 -35.500000000 -15.500000000 9.500000000 --36.500000000 -14.500000000 9.500000000 -35.500000000 -14.500000000 9.500000000 --36.500000000 -13.500000000 9.500000000 -35.500000000 -13.500000000 9.500000000 --36.500000000 -12.500000000 9.500000000 -35.500000000 -12.500000000 9.500000000 --36.500000000 -11.500000000 9.500000000 -35.500000000 -11.500000000 9.500000000 --36.500000000 -10.500000000 9.500000000 -35.500000000 -10.500000000 9.500000000 --36.500000000 -9.500000000 9.500000000 -35.500000000 -9.500000000 9.500000000 --36.500000000 -8.500000000 9.500000000 -35.500000000 -8.500000000 9.500000000 --36.500000000 -7.500000000 9.500000000 -35.500000000 -7.500000000 9.500000000 --36.500000000 -6.500000000 9.500000000 -35.500000000 -6.500000000 9.500000000 --36.500000000 -5.500000000 9.500000000 -35.500000000 -5.500000000 9.500000000 --36.500000000 -4.500000000 9.500000000 -35.500000000 -4.500000000 9.500000000 --36.500000000 -3.500000000 9.500000000 -35.500000000 -3.500000000 9.500000000 --36.500000000 -2.500000000 9.500000000 -35.500000000 -2.500000000 9.500000000 --36.500000000 -1.500000000 9.500000000 -35.500000000 -1.500000000 9.500000000 --36.500000000 -0.500000000 9.500000000 -35.500000000 -0.500000000 9.500000000 --36.500000000 0.500000000 9.500000000 -35.500000000 0.500000000 9.500000000 --36.500000000 1.500000000 9.500000000 -35.500000000 1.500000000 9.500000000 --36.500000000 2.500000000 9.500000000 -35.500000000 2.500000000 9.500000000 --36.500000000 3.500000000 9.500000000 -35.500000000 3.500000000 9.500000000 --36.500000000 4.500000000 9.500000000 -35.500000000 4.500000000 9.500000000 --36.500000000 5.500000000 9.500000000 -35.500000000 5.500000000 9.500000000 --36.500000000 6.500000000 9.500000000 -35.500000000 6.500000000 9.500000000 --36.500000000 7.500000000 9.500000000 -35.500000000 7.500000000 9.500000000 --36.500000000 8.500000000 9.500000000 -35.500000000 8.500000000 9.500000000 --36.500000000 9.500000000 9.500000000 -35.500000000 9.500000000 9.500000000 --36.500000000 10.500000000 9.500000000 -35.500000000 10.500000000 9.500000000 --36.500000000 11.500000000 9.500000000 -35.500000000 11.500000000 9.500000000 --36.500000000 12.500000000 9.500000000 -35.500000000 12.500000000 9.500000000 --36.500000000 13.500000000 9.500000000 -35.500000000 13.500000000 9.500000000 --36.500000000 14.500000000 9.500000000 -35.500000000 14.500000000 9.500000000 --36.500000000 15.500000000 9.500000000 -35.500000000 15.500000000 9.500000000 --36.500000000 16.500000000 9.500000000 -35.500000000 16.500000000 9.500000000 --36.500000000 17.500000000 9.500000000 -35.500000000 17.500000000 9.500000000 --36.500000000 18.500000000 9.500000000 -35.500000000 18.500000000 9.500000000 --36.500000000 19.500000000 9.500000000 -35.500000000 19.500000000 9.500000000 --36.500000000 20.500000000 9.500000000 -35.500000000 20.500000000 9.500000000 --36.500000000 21.500000000 9.500000000 -35.500000000 21.500000000 9.500000000 --36.500000000 22.500000000 9.500000000 -35.500000000 22.500000000 9.500000000 --36.500000000 23.500000000 9.500000000 -35.500000000 23.500000000 9.500000000 --36.500000000 24.500000000 9.500000000 -35.500000000 24.500000000 9.500000000 --36.499996185 25.499996185 9.500000000 -35.499996185 25.499996185 9.500000000 --36.499954224 26.499954224 9.500000000 -35.499954224 26.499954224 9.500000000 --36.499591827 27.499591827 9.500000000 -35.499591827 27.499591827 9.500000000 --36.497474670 28.497470856 9.500000000 -35.497467041 28.497470856 9.500000000 --36.488403320 29.488407135 9.500000000 -35.488403320 29.488407135 9.500000000 --36.458980560 30.458978653 9.500000000 -35.458980560 30.458978653 9.500000000 --36.384422302 31.384418488 9.500000000 -35.384422302 31.384418488 9.500000000 --36.233222961 32.233222961 9.500000000 -35.233222961 32.233222961 9.500000000 --35.981101990 32.981101990 9.500000000 --35.622871399 33.622871399 9.500000000 -34.622871399 33.622871399 9.500000000 -34.981101990 32.981101990 9.500000000 --35.167964935 34.167964935 9.500000000 --34.622871399 34.622871399 9.500000000 -33.622871399 34.622871399 9.500000000 -34.167964935 34.167964935 9.500000000 --33.981101990 34.981101990 9.500000000 --33.233222961 35.233222961 9.500000000 --32.384422302 35.384418488 9.500000000 --31.458978653 35.458976746 9.500000000 --30.488407135 35.488403320 9.500000000 --29.497472763 35.497467041 9.500000000 --28.499593735 35.499591827 9.500000000 --27.499954224 35.499954224 9.500000000 --26.499996185 35.499996185 9.500000000 --25.500000000 35.500000000 9.500000000 --24.500000000 35.500000000 9.500000000 --23.500000000 35.500000000 9.500000000 --22.500000000 35.500000000 9.500000000 --21.500000000 35.500000000 9.500000000 --20.500000000 35.500000000 9.500000000 --19.500000000 35.500000000 9.500000000 --18.500000000 35.500000000 9.500000000 --17.500000000 35.500000000 9.500000000 --16.500000000 35.500000000 9.500000000 --15.500000000 35.500000000 9.500000000 --14.500000000 35.500000000 9.500000000 --13.500000000 35.500000000 9.500000000 --12.500000000 35.500000000 9.500000000 --11.500000000 35.500000000 9.500000000 --10.500000000 35.500000000 9.500000000 --9.500000000 35.500000000 9.500000000 --8.500000000 35.500000000 9.500000000 --7.500000000 35.500000000 9.500000000 --6.500000000 35.500000000 9.500000000 --5.500000000 35.500000000 9.500000000 --4.500000000 35.500000000 9.500000000 --3.500000000 35.500000000 9.500000000 --2.500000000 35.500000000 9.500000000 --1.500000000 35.500000000 9.500000000 --0.500000000 35.500000000 9.500000000 -0.500000000 35.500000000 9.500000000 -1.500000000 35.500000000 9.500000000 -2.500000000 35.500000000 9.500000000 -3.500000000 35.500000000 9.500000000 -4.500000000 35.500000000 9.500000000 -5.500000000 35.500000000 9.500000000 -6.500000000 35.500000000 9.500000000 -7.500000000 35.500000000 9.500000000 -8.500000000 35.500000000 9.500000000 -9.500000000 35.500000000 9.500000000 -10.500000000 35.500000000 9.500000000 -11.500000000 35.500000000 9.500000000 -12.500000000 35.500000000 9.500000000 -13.500000000 35.500000000 9.500000000 -14.500000000 35.500000000 9.500000000 -15.500000000 35.500000000 9.500000000 -16.500000000 35.500000000 9.500000000 -17.500000000 35.500000000 9.500000000 -18.500000000 35.500000000 9.500000000 -19.500000000 35.500000000 9.500000000 -20.500000000 35.500000000 9.500000000 -21.500000000 35.500000000 9.500000000 -22.500000000 35.500000000 9.500000000 -23.500000000 35.500000000 9.500000000 -24.500000000 35.500000000 9.500000000 -25.499996185 35.499996185 9.500000000 -26.499954224 35.499954224 9.500000000 -27.499591827 35.499591827 9.500000000 -28.497470856 35.497474670 9.500000000 -29.488407135 35.488403320 9.500000000 -30.458978653 35.458980560 9.500000000 -31.384418488 35.384422302 9.500000000 -32.233222961 35.233222961 9.500000000 -32.981101990 34.981101990 9.500000000 --33.981101990 -35.981101990 10.500000000 --33.233226776 -36.233222961 10.500000000 --32.384422302 -36.384418488 10.500000000 --31.458978653 -36.458980560 10.500000000 --30.488407135 -36.488403320 10.500000000 --29.497472763 -36.497474670 10.500000000 --28.499593735 -36.499591827 10.500000000 --27.499954224 -36.499954224 10.500000000 --26.499996185 -36.499996185 10.500000000 --25.500000000 -36.500000000 10.500000000 --24.500000000 -36.500000000 10.500000000 --23.500000000 -36.500000000 10.500000000 --22.500000000 -36.500000000 10.500000000 --21.500000000 -36.500000000 10.500000000 --20.500000000 -36.500000000 10.500000000 --19.500000000 -36.500000000 10.500000000 --18.500000000 -36.500000000 10.500000000 --17.500000000 -36.500000000 10.500000000 --16.500000000 -36.500000000 10.500000000 --15.500000000 -36.500000000 10.500000000 --14.500000000 -36.500000000 10.500000000 --13.500000000 -36.500000000 10.500000000 --12.500000000 -36.500000000 10.500000000 --11.500000000 -36.500000000 10.500000000 --10.500000000 -36.500000000 10.500000000 --9.500000000 -36.500000000 10.500000000 --8.500000000 -36.500000000 10.500000000 --7.500000000 -36.500000000 10.500000000 --6.500000000 -36.500000000 10.500000000 --5.500000000 -36.500000000 10.500000000 --4.500000000 -36.500000000 10.500000000 --3.500000000 -36.500000000 10.500000000 --2.500000000 -36.500000000 10.500000000 --1.500000000 -36.500000000 10.500000000 --0.500000000 -36.500000000 10.500000000 -0.500000000 -36.500000000 10.500000000 -1.500000000 -36.500000000 10.500000000 -2.500000000 -36.500000000 10.500000000 -3.500000000 -36.500000000 10.500000000 -4.500000000 -36.500000000 10.500000000 -5.500000000 -36.500000000 10.500000000 -6.500000000 -36.500000000 10.500000000 -7.500000000 -36.500000000 10.500000000 -8.500000000 -36.500000000 10.500000000 -9.500000000 -36.500000000 10.500000000 -10.500000000 -36.500000000 10.500000000 -11.500000000 -36.500000000 10.500000000 -12.500000000 -36.500000000 10.500000000 -13.500000000 -36.500000000 10.500000000 -14.500000000 -36.500000000 10.500000000 -15.500000000 -36.500000000 10.500000000 -16.500000000 -36.500000000 10.500000000 -17.500000000 -36.500000000 10.500000000 -18.500000000 -36.500000000 10.500000000 -19.500000000 -36.500000000 10.500000000 -20.500000000 -36.500000000 10.500000000 -21.500000000 -36.500000000 10.500000000 -22.500000000 -36.500000000 10.500000000 -23.500000000 -36.500000000 10.500000000 -24.500000000 -36.500000000 10.500000000 -25.499996185 -36.499996185 10.500000000 -26.499954224 -36.499954224 10.500000000 -27.499591827 -36.499591827 10.500000000 -28.497470856 -36.497467041 10.500000000 -29.488407135 -36.488403320 10.500000000 -30.458978653 -36.458980560 10.500000000 -31.384418488 -36.384422302 10.500000000 -32.233222961 -36.233222961 10.500000000 -32.981101990 -35.981101990 10.500000000 --35.167964935 -35.167964935 10.500000000 --34.622871399 -35.622871399 10.500000000 -33.622871399 -35.622871399 10.500000000 -34.167964935 -35.167964935 10.500000000 --35.981101990 -33.981101990 10.500000000 --35.622871399 -34.622871399 10.500000000 -34.622871399 -34.622871399 10.500000000 -34.981101990 -33.981101990 10.500000000 --36.233222961 -33.233222961 10.500000000 -35.233222961 -33.233226776 10.500000000 --36.384418488 -32.384422302 10.500000000 -35.384418488 -32.384422302 10.500000000 --36.458976746 -31.458978653 10.500000000 -35.458980560 -31.458978653 10.500000000 --36.488403320 -30.488407135 10.500000000 -35.488403320 -30.488407135 10.500000000 --36.497467041 -29.497472763 10.500000000 -35.497474670 -29.497472763 10.500000000 --36.499591827 -28.499593735 10.500000000 -35.499591827 -28.499593735 10.500000000 --36.499954224 -27.499954224 10.500000000 -35.499954224 -27.499954224 10.500000000 --36.499996185 -26.499996185 10.500000000 -35.499996185 -26.499996185 10.500000000 --36.500000000 -25.500000000 10.500000000 -35.500000000 -25.500000000 10.500000000 --36.500000000 -24.500000000 10.500000000 -35.500000000 -24.500000000 10.500000000 --36.500000000 -23.500000000 10.500000000 -35.500000000 -23.500000000 10.500000000 --36.500000000 -22.500000000 10.500000000 -35.500000000 -22.500000000 10.500000000 --36.500000000 -21.500000000 10.500000000 -35.500000000 -21.500000000 10.500000000 --36.500000000 -20.500000000 10.500000000 -35.500000000 -20.500000000 10.500000000 --36.500000000 -19.500000000 10.500000000 -35.500000000 -19.500000000 10.500000000 --36.500000000 -18.500000000 10.500000000 -35.500000000 -18.500000000 10.500000000 --36.500000000 -17.500000000 10.500000000 -35.500000000 -17.500000000 10.500000000 --36.500000000 -16.500000000 10.500000000 -35.500000000 -16.500000000 10.500000000 --36.500000000 -15.500000000 10.500000000 -35.500000000 -15.500000000 10.500000000 --36.500000000 -14.500000000 10.500000000 -35.500000000 -14.500000000 10.500000000 --36.500000000 -13.500000000 10.500000000 -35.500000000 -13.500000000 10.500000000 --36.500000000 -12.500000000 10.500000000 -35.500000000 -12.500000000 10.500000000 --36.500000000 -11.500000000 10.500000000 -35.500000000 -11.500000000 10.500000000 --36.500000000 -10.500000000 10.500000000 -35.500000000 -10.500000000 10.500000000 --36.500000000 -9.500000000 10.500000000 -35.500000000 -9.500000000 10.500000000 --36.500000000 -8.500000000 10.500000000 -35.500000000 -8.500000000 10.500000000 --36.500000000 -7.500000000 10.500000000 -35.500000000 -7.500000000 10.500000000 --36.500000000 -6.500000000 10.500000000 -35.500000000 -6.500000000 10.500000000 --36.500000000 -5.500000000 10.500000000 -35.500000000 -5.500000000 10.500000000 --36.500000000 -4.500000000 10.500000000 -35.500000000 -4.500000000 10.500000000 --36.500000000 -3.500000000 10.500000000 -35.500000000 -3.500000000 10.500000000 --36.500000000 -2.500000000 10.500000000 -35.500000000 -2.500000000 10.500000000 --36.500000000 -1.500000000 10.500000000 -35.500000000 -1.500000000 10.500000000 --36.500000000 -0.500000000 10.500000000 -35.500000000 -0.500000000 10.500000000 --36.500000000 0.500000000 10.500000000 -35.500000000 0.500000000 10.500000000 --36.500000000 1.500000000 10.500000000 -35.500000000 1.500000000 10.500000000 --36.500000000 2.500000000 10.500000000 -35.500000000 2.500000000 10.500000000 --36.500000000 3.500000000 10.500000000 -35.500000000 3.500000000 10.500000000 --36.500000000 4.500000000 10.500000000 -35.500000000 4.500000000 10.500000000 --36.500000000 5.500000000 10.500000000 -35.500000000 5.500000000 10.500000000 --36.500000000 6.500000000 10.500000000 -35.500000000 6.500000000 10.500000000 --36.500000000 7.500000000 10.500000000 -35.500000000 7.500000000 10.500000000 --36.500000000 8.500000000 10.500000000 -35.500000000 8.500000000 10.500000000 --36.500000000 9.500000000 10.500000000 -35.500000000 9.500000000 10.500000000 --36.500000000 10.500000000 10.500000000 -35.500000000 10.500000000 10.500000000 --36.500000000 11.500000000 10.500000000 -35.500000000 11.500000000 10.500000000 --36.500000000 12.500000000 10.500000000 -35.500000000 12.500000000 10.500000000 --36.500000000 13.500000000 10.500000000 -35.500000000 13.500000000 10.500000000 --36.500000000 14.500000000 10.500000000 -35.500000000 14.500000000 10.500000000 --36.500000000 15.500000000 10.500000000 -35.500000000 15.500000000 10.500000000 --36.500000000 16.500000000 10.500000000 -35.500000000 16.500000000 10.500000000 --36.500000000 17.500000000 10.500000000 -35.500000000 17.500000000 10.500000000 --36.500000000 18.500000000 10.500000000 -35.500000000 18.500000000 10.500000000 --36.500000000 19.500000000 10.500000000 -35.500000000 19.500000000 10.500000000 --36.500000000 20.500000000 10.500000000 -35.500000000 20.500000000 10.500000000 --36.500000000 21.500000000 10.500000000 -35.500000000 21.500000000 10.500000000 --36.500000000 22.500000000 10.500000000 -35.500000000 22.500000000 10.500000000 --36.500000000 23.500000000 10.500000000 -35.500000000 23.500000000 10.500000000 --36.500000000 24.500000000 10.500000000 -35.500000000 24.500000000 10.500000000 --36.499996185 25.499996185 10.500000000 -35.499996185 25.499996185 10.500000000 --36.499954224 26.499954224 10.500000000 -35.499954224 26.499954224 10.500000000 --36.499591827 27.499591827 10.500000000 -35.499591827 27.499591827 10.500000000 --36.497474670 28.497470856 10.500000000 -35.497467041 28.497470856 10.500000000 --36.488403320 29.488407135 10.500000000 -35.488403320 29.488407135 10.500000000 --36.458980560 30.458978653 10.500000000 -35.458980560 30.458978653 10.500000000 --36.384422302 31.384418488 10.500000000 -35.384422302 31.384418488 10.500000000 --36.233222961 32.233222961 10.500000000 -35.233222961 32.233222961 10.500000000 --35.981101990 32.981101990 10.500000000 --35.622871399 33.622871399 10.500000000 -34.622871399 33.622871399 10.500000000 -34.981101990 32.981101990 10.500000000 --35.167964935 34.167964935 10.500000000 --34.622871399 34.622871399 10.500000000 -33.622871399 34.622871399 10.500000000 -34.167964935 34.167964935 10.500000000 --33.981101990 34.981101990 10.500000000 --33.233222961 35.233222961 10.500000000 --32.384422302 35.384418488 10.500000000 --31.458978653 35.458976746 10.500000000 --30.488407135 35.488403320 10.500000000 --29.497472763 35.497467041 10.500000000 --28.499593735 35.499591827 10.500000000 --27.499954224 35.499954224 10.500000000 --26.499996185 35.499996185 10.500000000 --25.500000000 35.500000000 10.500000000 --24.500000000 35.500000000 10.500000000 --23.500000000 35.500000000 10.500000000 --22.500000000 35.500000000 10.500000000 --21.500000000 35.500000000 10.500000000 --20.500000000 35.500000000 10.500000000 --19.500000000 35.500000000 10.500000000 --18.500000000 35.500000000 10.500000000 --17.500000000 35.500000000 10.500000000 --16.500000000 35.500000000 10.500000000 --15.500000000 35.500000000 10.500000000 --14.500000000 35.500000000 10.500000000 --13.500000000 35.500000000 10.500000000 --12.500000000 35.500000000 10.500000000 --11.500000000 35.500000000 10.500000000 --10.500000000 35.500000000 10.500000000 --9.500000000 35.500000000 10.500000000 --8.500000000 35.500000000 10.500000000 --7.500000000 35.500000000 10.500000000 --6.500000000 35.500000000 10.500000000 --5.500000000 35.500000000 10.500000000 --4.500000000 35.500000000 10.500000000 --3.500000000 35.500000000 10.500000000 --2.500000000 35.500000000 10.500000000 --1.500000000 35.500000000 10.500000000 --0.500000000 35.500000000 10.500000000 -0.500000000 35.500000000 10.500000000 -1.500000000 35.500000000 10.500000000 -2.500000000 35.500000000 10.500000000 -3.500000000 35.500000000 10.500000000 -4.500000000 35.500000000 10.500000000 -5.500000000 35.500000000 10.500000000 -6.500000000 35.500000000 10.500000000 -7.500000000 35.500000000 10.500000000 -8.500000000 35.500000000 10.500000000 -9.500000000 35.500000000 10.500000000 -10.500000000 35.500000000 10.500000000 -11.500000000 35.500000000 10.500000000 -12.500000000 35.500000000 10.500000000 -13.500000000 35.500000000 10.500000000 -14.500000000 35.500000000 10.500000000 -15.500000000 35.500000000 10.500000000 -16.500000000 35.500000000 10.500000000 -17.500000000 35.500000000 10.500000000 -18.500000000 35.500000000 10.500000000 -19.500000000 35.500000000 10.500000000 -20.500000000 35.500000000 10.500000000 -21.500000000 35.500000000 10.500000000 -22.500000000 35.500000000 10.500000000 -23.500000000 35.500000000 10.500000000 -24.500000000 35.500000000 10.500000000 -25.499996185 35.499996185 10.500000000 -26.499954224 35.499954224 10.500000000 -27.499591827 35.499591827 10.500000000 -28.497470856 35.497474670 10.500000000 -29.488407135 35.488403320 10.500000000 -30.458978653 35.458980560 10.500000000 -31.384418488 35.384422302 10.500000000 -32.233222961 35.233222961 10.500000000 -32.981101990 34.981101990 10.500000000 --33.981101990 -35.981101990 11.500000000 --33.233226776 -36.233222961 11.500000000 --32.384422302 -36.384418488 11.500000000 --31.458978653 -36.458980560 11.500000000 --30.488407135 -36.488403320 11.500000000 --29.497472763 -36.497474670 11.500000000 --28.499593735 -36.499591827 11.500000000 --27.499954224 -36.499954224 11.500000000 --26.499996185 -36.499996185 11.500000000 --25.500000000 -36.500000000 11.500000000 --24.500000000 -36.500000000 11.500000000 --23.500000000 -36.500000000 11.500000000 --22.500000000 -36.500000000 11.500000000 --21.500000000 -36.500000000 11.500000000 --20.500000000 -36.500000000 11.500000000 --19.500000000 -36.500000000 11.500000000 --18.500000000 -36.500000000 11.500000000 --17.500000000 -36.500000000 11.500000000 --16.500000000 -36.500000000 11.500000000 --15.500000000 -36.500000000 11.500000000 --14.500000000 -36.500000000 11.500000000 --13.500000000 -36.500000000 11.500000000 --12.500000000 -36.500000000 11.500000000 --11.500000000 -36.500000000 11.500000000 --10.500000000 -36.500000000 11.500000000 --9.500000000 -36.500000000 11.500000000 --8.500000000 -36.500000000 11.500000000 --7.500000000 -36.500000000 11.500000000 --6.500000000 -36.500000000 11.500000000 --5.500000000 -36.500000000 11.500000000 --4.500000000 -36.500000000 11.500000000 --3.500000000 -36.500000000 11.500000000 --2.500000000 -36.500000000 11.500000000 --1.500000000 -36.500000000 11.500000000 --0.500000000 -36.500000000 11.500000000 -0.500000000 -36.500000000 11.500000000 -1.500000000 -36.500000000 11.500000000 -2.500000000 -36.500000000 11.500000000 -3.500000000 -36.500000000 11.500000000 -4.500000000 -36.500000000 11.500000000 -5.500000000 -36.500000000 11.500000000 -6.500000000 -36.500000000 11.500000000 -7.500000000 -36.500000000 11.500000000 -8.500000000 -36.500000000 11.500000000 -9.500000000 -36.500000000 11.500000000 -10.500000000 -36.500000000 11.500000000 -11.500000000 -36.500000000 11.500000000 -12.500000000 -36.500000000 11.500000000 -13.500000000 -36.500000000 11.500000000 -14.500000000 -36.500000000 11.500000000 -15.500000000 -36.500000000 11.500000000 -16.500000000 -36.500000000 11.500000000 -17.500000000 -36.500000000 11.500000000 -18.500000000 -36.500000000 11.500000000 -19.500000000 -36.500000000 11.500000000 -20.500000000 -36.500000000 11.500000000 -21.500000000 -36.500000000 11.500000000 -22.500000000 -36.500000000 11.500000000 -23.500000000 -36.500000000 11.500000000 -24.500000000 -36.500000000 11.500000000 -25.499996185 -36.499996185 11.500000000 -26.499954224 -36.499954224 11.500000000 -27.499591827 -36.499591827 11.500000000 -28.497470856 -36.497467041 11.500000000 -29.488407135 -36.488403320 11.500000000 -30.458978653 -36.458980560 11.500000000 -31.384418488 -36.384422302 11.500000000 -32.233222961 -36.233222961 11.500000000 -32.981101990 -35.981101990 11.500000000 --35.167964935 -35.167964935 11.500000000 --34.622871399 -35.622871399 11.500000000 -33.622871399 -35.622871399 11.500000000 -34.167964935 -35.167964935 11.500000000 --35.981101990 -33.981101990 11.500000000 --35.622871399 -34.622871399 11.500000000 -34.622871399 -34.622871399 11.500000000 -34.981101990 -33.981101990 11.500000000 --36.233222961 -33.233222961 11.500000000 -35.233222961 -33.233226776 11.500000000 --36.384418488 -32.384422302 11.500000000 -35.384418488 -32.384422302 11.500000000 --36.458976746 -31.458978653 11.500000000 -35.458980560 -31.458978653 11.500000000 --36.488403320 -30.488407135 11.500000000 -35.488403320 -30.488407135 11.500000000 --36.497467041 -29.497472763 11.500000000 -35.497474670 -29.497472763 11.500000000 --36.499591827 -28.499593735 11.500000000 -35.499591827 -28.499593735 11.500000000 --36.499954224 -27.499954224 11.500000000 -35.499954224 -27.499954224 11.500000000 --36.499996185 -26.499996185 11.500000000 -35.499996185 -26.499996185 11.500000000 --36.500000000 -25.500000000 11.500000000 -35.500000000 -25.500000000 11.500000000 --36.500000000 -24.500000000 11.500000000 -35.500000000 -24.500000000 11.500000000 --36.500000000 -23.500000000 11.500000000 -35.500000000 -23.500000000 11.500000000 --36.500000000 -22.500000000 11.500000000 -35.500000000 -22.500000000 11.500000000 --36.500000000 -21.500000000 11.500000000 -35.500000000 -21.500000000 11.500000000 --36.500000000 -20.500000000 11.500000000 -35.500000000 -20.500000000 11.500000000 --36.500000000 -19.500000000 11.500000000 -35.500000000 -19.500000000 11.500000000 --36.500000000 -18.500000000 11.500000000 -35.500000000 -18.500000000 11.500000000 --36.500000000 -17.500000000 11.500000000 -35.500000000 -17.500000000 11.500000000 --36.500000000 -16.500000000 11.500000000 -35.500000000 -16.500000000 11.500000000 --36.500000000 -15.500000000 11.500000000 -35.500000000 -15.500000000 11.500000000 --36.500000000 -14.500000000 11.500000000 -35.500000000 -14.500000000 11.500000000 --36.500000000 -13.500000000 11.500000000 -35.500000000 -13.500000000 11.500000000 --36.500000000 -12.500000000 11.500000000 -35.500000000 -12.500000000 11.500000000 --36.500000000 -11.500000000 11.500000000 -35.500000000 -11.500000000 11.500000000 --36.500000000 -10.500000000 11.500000000 -35.500000000 -10.500000000 11.500000000 --36.500000000 -9.500000000 11.500000000 -35.500000000 -9.500000000 11.500000000 --36.500000000 -8.500000000 11.500000000 -35.500000000 -8.500000000 11.500000000 --36.500000000 -7.500000000 11.500000000 -35.500000000 -7.500000000 11.500000000 --36.500000000 -6.500000000 11.500000000 -35.500000000 -6.500000000 11.500000000 --36.500000000 -5.500000000 11.500000000 -35.500000000 -5.500000000 11.500000000 --36.500000000 -4.500000000 11.500000000 -35.500000000 -4.500000000 11.500000000 --36.500000000 -3.500000000 11.500000000 -35.500000000 -3.500000000 11.500000000 --36.500000000 -2.500000000 11.500000000 -35.500000000 -2.500000000 11.500000000 --36.500000000 -1.500000000 11.500000000 -35.500000000 -1.500000000 11.500000000 --36.500000000 -0.500000000 11.500000000 -35.500000000 -0.500000000 11.500000000 --36.500000000 0.500000000 11.500000000 -35.500000000 0.500000000 11.500000000 --36.500000000 1.500000000 11.500000000 -35.500000000 1.500000000 11.500000000 --36.500000000 2.500000000 11.500000000 -35.500000000 2.500000000 11.500000000 --36.500000000 3.500000000 11.500000000 -35.500000000 3.500000000 11.500000000 --36.500000000 4.500000000 11.500000000 -35.500000000 4.500000000 11.500000000 --36.500000000 5.500000000 11.500000000 -35.500000000 5.500000000 11.500000000 --36.500000000 6.500000000 11.500000000 -35.500000000 6.500000000 11.500000000 --36.500000000 7.500000000 11.500000000 -35.500000000 7.500000000 11.500000000 --36.500000000 8.500000000 11.500000000 -35.500000000 8.500000000 11.500000000 --36.500000000 9.500000000 11.500000000 -35.500000000 9.500000000 11.500000000 --36.500000000 10.500000000 11.500000000 -35.500000000 10.500000000 11.500000000 --36.500000000 11.500000000 11.500000000 -35.500000000 11.500000000 11.500000000 --36.500000000 12.500000000 11.500000000 -35.500000000 12.500000000 11.500000000 --36.500000000 13.500000000 11.500000000 -35.500000000 13.500000000 11.500000000 --36.500000000 14.500000000 11.500000000 -35.500000000 14.500000000 11.500000000 --36.500000000 15.500000000 11.500000000 -35.500000000 15.500000000 11.500000000 --36.500000000 16.500000000 11.500000000 -35.500000000 16.500000000 11.500000000 --36.500000000 17.500000000 11.500000000 -35.500000000 17.500000000 11.500000000 --36.500000000 18.500000000 11.500000000 -35.500000000 18.500000000 11.500000000 --36.500000000 19.500000000 11.500000000 -35.500000000 19.500000000 11.500000000 --36.500000000 20.500000000 11.500000000 -35.500000000 20.500000000 11.500000000 --36.500000000 21.500000000 11.500000000 -35.500000000 21.500000000 11.500000000 --36.500000000 22.500000000 11.500000000 -35.500000000 22.500000000 11.500000000 --36.500000000 23.500000000 11.500000000 -35.500000000 23.500000000 11.500000000 --36.500000000 24.500000000 11.500000000 -35.500000000 24.500000000 11.500000000 --36.499996185 25.499996185 11.500000000 -35.499996185 25.499996185 11.500000000 --36.499954224 26.499954224 11.500000000 -35.499954224 26.499954224 11.500000000 --36.499591827 27.499591827 11.500000000 -35.499591827 27.499591827 11.500000000 --36.497474670 28.497470856 11.500000000 -35.497467041 28.497470856 11.500000000 --36.488403320 29.488407135 11.500000000 -35.488403320 29.488407135 11.500000000 --36.458980560 30.458978653 11.500000000 -35.458980560 30.458978653 11.500000000 --36.384422302 31.384418488 11.500000000 -35.384422302 31.384418488 11.500000000 --36.233222961 32.233222961 11.500000000 -35.233222961 32.233222961 11.500000000 --35.981101990 32.981101990 11.500000000 --35.622871399 33.622871399 11.500000000 -34.622871399 33.622871399 11.500000000 -34.981101990 32.981101990 11.500000000 --35.167964935 34.167964935 11.500000000 --34.622871399 34.622871399 11.500000000 -33.622871399 34.622871399 11.500000000 -34.167964935 34.167964935 11.500000000 --33.981101990 34.981101990 11.500000000 --33.233222961 35.233222961 11.500000000 --32.384422302 35.384418488 11.500000000 --31.458978653 35.458976746 11.500000000 --30.488407135 35.488403320 11.500000000 --29.497472763 35.497467041 11.500000000 --28.499593735 35.499591827 11.500000000 --27.499954224 35.499954224 11.500000000 --26.499996185 35.499996185 11.500000000 --25.500000000 35.500000000 11.500000000 --24.500000000 35.500000000 11.500000000 --23.500000000 35.500000000 11.500000000 --22.500000000 35.500000000 11.500000000 --21.500000000 35.500000000 11.500000000 --20.500000000 35.500000000 11.500000000 --19.500000000 35.500000000 11.500000000 --18.500000000 35.500000000 11.500000000 --17.500000000 35.500000000 11.500000000 --16.500000000 35.500000000 11.500000000 --15.500000000 35.500000000 11.500000000 --14.500000000 35.500000000 11.500000000 --13.500000000 35.500000000 11.500000000 --12.500000000 35.500000000 11.500000000 --11.500000000 35.500000000 11.500000000 --10.500000000 35.500000000 11.500000000 --9.500000000 35.500000000 11.500000000 --8.500000000 35.500000000 11.500000000 --7.500000000 35.500000000 11.500000000 --6.500000000 35.500000000 11.500000000 --5.500000000 35.500000000 11.500000000 --4.500000000 35.500000000 11.500000000 --3.500000000 35.500000000 11.500000000 --2.500000000 35.500000000 11.500000000 --1.500000000 35.500000000 11.500000000 --0.500000000 35.500000000 11.500000000 -0.500000000 35.500000000 11.500000000 -1.500000000 35.500000000 11.500000000 -2.500000000 35.500000000 11.500000000 -3.500000000 35.500000000 11.500000000 -4.500000000 35.500000000 11.500000000 -5.500000000 35.500000000 11.500000000 -6.500000000 35.500000000 11.500000000 -7.500000000 35.500000000 11.500000000 -8.500000000 35.500000000 11.500000000 -9.500000000 35.500000000 11.500000000 -10.500000000 35.500000000 11.500000000 -11.500000000 35.500000000 11.500000000 -12.500000000 35.500000000 11.500000000 -13.500000000 35.500000000 11.500000000 -14.500000000 35.500000000 11.500000000 -15.500000000 35.500000000 11.500000000 -16.500000000 35.500000000 11.500000000 -17.500000000 35.500000000 11.500000000 -18.500000000 35.500000000 11.500000000 -19.500000000 35.500000000 11.500000000 -20.500000000 35.500000000 11.500000000 -21.500000000 35.500000000 11.500000000 -22.500000000 35.500000000 11.500000000 -23.500000000 35.500000000 11.500000000 -24.500000000 35.500000000 11.500000000 -25.499996185 35.499996185 11.500000000 -26.499954224 35.499954224 11.500000000 -27.499591827 35.499591827 11.500000000 -28.497470856 35.497474670 11.500000000 -29.488407135 35.488403320 11.500000000 -30.458978653 35.458980560 11.500000000 -31.384418488 35.384422302 11.500000000 -32.233222961 35.233222961 11.500000000 -32.981101990 34.981101990 11.500000000 --33.981101990 -35.981101990 12.500000000 --33.233226776 -36.233222961 12.500000000 --32.384422302 -36.384418488 12.500000000 --31.458978653 -36.458980560 12.500000000 --30.488407135 -36.488403320 12.500000000 --29.497472763 -36.497474670 12.500000000 --28.499593735 -36.499591827 12.500000000 --27.499954224 -36.499954224 12.500000000 --26.499996185 -36.499996185 12.500000000 --25.500000000 -36.500000000 12.500000000 --24.500000000 -36.500000000 12.500000000 --23.500000000 -36.500000000 12.500000000 --22.500000000 -36.500000000 12.500000000 --21.500000000 -36.500000000 12.500000000 --20.500000000 -36.500000000 12.500000000 --19.500000000 -36.500000000 12.500000000 --18.500000000 -36.500000000 12.500000000 --17.500000000 -36.500000000 12.500000000 --16.500000000 -36.500000000 12.500000000 --15.500000000 -36.500000000 12.500000000 --14.500000000 -36.500000000 12.500000000 --13.500000000 -36.500000000 12.500000000 --12.500000000 -36.500000000 12.500000000 --11.500000000 -36.500000000 12.500000000 --10.500000000 -36.500000000 12.500000000 --9.500000000 -36.500000000 12.500000000 --8.500000000 -36.500000000 12.500000000 --7.500000000 -36.500000000 12.500000000 --6.500000000 -36.500000000 12.500000000 --5.500000000 -36.500000000 12.500000000 --4.500000000 -36.500000000 12.500000000 --3.500000000 -36.500000000 12.500000000 --2.500000000 -36.500000000 12.500000000 --1.500000000 -36.500000000 12.500000000 --0.500000000 -36.500000000 12.500000000 -0.500000000 -36.500000000 12.500000000 -1.500000000 -36.500000000 12.500000000 -2.500000000 -36.500000000 12.500000000 -3.500000000 -36.500000000 12.500000000 -4.500000000 -36.500000000 12.500000000 -5.500000000 -36.500000000 12.500000000 -6.500000000 -36.500000000 12.500000000 -7.500000000 -36.500000000 12.500000000 -8.500000000 -36.500000000 12.500000000 -9.500000000 -36.500000000 12.500000000 -10.500000000 -36.500000000 12.500000000 -11.500000000 -36.500000000 12.500000000 -12.500000000 -36.500000000 12.500000000 -13.500000000 -36.500000000 12.500000000 -14.500000000 -36.500000000 12.500000000 -15.500000000 -36.500000000 12.500000000 -16.500000000 -36.500000000 12.500000000 -17.500000000 -36.500000000 12.500000000 -18.500000000 -36.500000000 12.500000000 -19.500000000 -36.500000000 12.500000000 -20.500000000 -36.500000000 12.500000000 -21.500000000 -36.500000000 12.500000000 -22.500000000 -36.500000000 12.500000000 -23.500000000 -36.500000000 12.500000000 -24.500000000 -36.500000000 12.500000000 -25.499996185 -36.499996185 12.500000000 -26.499954224 -36.499954224 12.500000000 -27.499591827 -36.499591827 12.500000000 -28.497470856 -36.497467041 12.500000000 -29.488407135 -36.488403320 12.500000000 -30.458978653 -36.458980560 12.500000000 -31.384418488 -36.384422302 12.500000000 -32.233222961 -36.233222961 12.500000000 -32.981101990 -35.981101990 12.500000000 --35.167964935 -35.167964935 12.500000000 --34.622871399 -35.622871399 12.500000000 -33.622871399 -35.622871399 12.500000000 -34.167964935 -35.167964935 12.500000000 --35.981101990 -33.981101990 12.500000000 --35.622871399 -34.622871399 12.500000000 -34.622871399 -34.622871399 12.500000000 -34.981101990 -33.981101990 12.500000000 --36.233222961 -33.233222961 12.500000000 -35.233222961 -33.233226776 12.500000000 --36.384418488 -32.384422302 12.500000000 -35.384418488 -32.384422302 12.500000000 --36.458976746 -31.458978653 12.500000000 -35.458980560 -31.458978653 12.500000000 --36.488403320 -30.488407135 12.500000000 -35.488403320 -30.488407135 12.500000000 --36.497467041 -29.497472763 12.500000000 -35.497474670 -29.497472763 12.500000000 --36.499591827 -28.499593735 12.500000000 -35.499591827 -28.499593735 12.500000000 --36.499954224 -27.499954224 12.500000000 -35.499954224 -27.499954224 12.500000000 --36.499996185 -26.499996185 12.500000000 -35.499996185 -26.499996185 12.500000000 --36.500000000 -25.500000000 12.500000000 -35.500000000 -25.500000000 12.500000000 --36.500000000 -24.500000000 12.500000000 -35.500000000 -24.500000000 12.500000000 --36.500000000 -23.500000000 12.500000000 -35.500000000 -23.500000000 12.500000000 --36.500000000 -22.500000000 12.500000000 -35.500000000 -22.500000000 12.500000000 --36.500000000 -21.500000000 12.500000000 -35.500000000 -21.500000000 12.500000000 --36.500000000 -20.500000000 12.500000000 -35.500000000 -20.500000000 12.500000000 --36.500000000 -19.500000000 12.500000000 -35.500000000 -19.500000000 12.500000000 --36.500000000 -18.500000000 12.500000000 -35.500000000 -18.500000000 12.500000000 --36.500000000 -17.500000000 12.500000000 -35.500000000 -17.500000000 12.500000000 --36.500000000 -16.500000000 12.500000000 -35.500000000 -16.500000000 12.500000000 --36.500000000 -15.500000000 12.500000000 -35.500000000 -15.500000000 12.500000000 --36.500000000 -14.500000000 12.500000000 -35.500000000 -14.500000000 12.500000000 --36.500000000 -13.500000000 12.500000000 -35.500000000 -13.500000000 12.500000000 --36.500000000 -12.500000000 12.500000000 -35.500000000 -12.500000000 12.500000000 --36.500000000 -11.500000000 12.500000000 -35.500000000 -11.500000000 12.500000000 --36.500000000 -10.500000000 12.500000000 -35.500000000 -10.500000000 12.500000000 --36.500000000 -9.500000000 12.500000000 -35.500000000 -9.500000000 12.500000000 --36.500000000 -8.500000000 12.500000000 -35.500000000 -8.500000000 12.500000000 --36.500000000 -7.500000000 12.500000000 -35.500000000 -7.500000000 12.500000000 --36.500000000 -6.500000000 12.500000000 -35.500000000 -6.500000000 12.500000000 --36.500000000 -5.500000000 12.500000000 -35.500000000 -5.500000000 12.500000000 --36.500000000 -4.500000000 12.500000000 -35.500000000 -4.500000000 12.500000000 --36.500000000 -3.500000000 12.500000000 -35.500000000 -3.500000000 12.500000000 --36.500000000 -2.500000000 12.500000000 -35.500000000 -2.500000000 12.500000000 --36.500000000 -1.500000000 12.500000000 -35.500000000 -1.500000000 12.500000000 --36.500000000 -0.500000000 12.500000000 -35.500000000 -0.500000000 12.500000000 --36.500000000 0.500000000 12.500000000 -35.500000000 0.500000000 12.500000000 --36.500000000 1.500000000 12.500000000 -35.500000000 1.500000000 12.500000000 --36.500000000 2.500000000 12.500000000 -35.500000000 2.500000000 12.500000000 --36.500000000 3.500000000 12.500000000 -35.500000000 3.500000000 12.500000000 --36.500000000 4.500000000 12.500000000 -35.500000000 4.500000000 12.500000000 --36.500000000 5.500000000 12.500000000 -35.500000000 5.500000000 12.500000000 --36.500000000 6.500000000 12.500000000 -35.500000000 6.500000000 12.500000000 --36.500000000 7.500000000 12.500000000 -35.500000000 7.500000000 12.500000000 --36.500000000 8.500000000 12.500000000 -35.500000000 8.500000000 12.500000000 --36.500000000 9.500000000 12.500000000 -35.500000000 9.500000000 12.500000000 --36.500000000 10.500000000 12.500000000 -35.500000000 10.500000000 12.500000000 --36.500000000 11.500000000 12.500000000 -35.500000000 11.500000000 12.500000000 --36.500000000 12.500000000 12.500000000 -35.500000000 12.500000000 12.500000000 --36.500000000 13.500000000 12.500000000 -35.500000000 13.500000000 12.500000000 --36.500000000 14.500000000 12.500000000 -35.500000000 14.500000000 12.500000000 --36.500000000 15.500000000 12.500000000 -35.500000000 15.500000000 12.500000000 --36.500000000 16.500000000 12.500000000 -35.500000000 16.500000000 12.500000000 --36.500000000 17.500000000 12.500000000 -35.500000000 17.500000000 12.500000000 --36.500000000 18.500000000 12.500000000 -35.500000000 18.500000000 12.500000000 --36.500000000 19.500000000 12.500000000 -35.500000000 19.500000000 12.500000000 --36.500000000 20.500000000 12.500000000 -35.500000000 20.500000000 12.500000000 --36.500000000 21.500000000 12.500000000 -35.500000000 21.500000000 12.500000000 --36.500000000 22.500000000 12.500000000 -35.500000000 22.500000000 12.500000000 --36.500000000 23.500000000 12.500000000 -35.500000000 23.500000000 12.500000000 --36.500000000 24.500000000 12.500000000 -35.500000000 24.500000000 12.500000000 --36.499996185 25.499996185 12.500000000 -35.499996185 25.499996185 12.500000000 --36.499954224 26.499954224 12.500000000 -35.499954224 26.499954224 12.500000000 --36.499591827 27.499591827 12.500000000 -35.499591827 27.499591827 12.500000000 --36.497474670 28.497470856 12.500000000 -35.497467041 28.497470856 12.500000000 --36.488403320 29.488407135 12.500000000 -35.488403320 29.488407135 12.500000000 --36.458980560 30.458978653 12.500000000 -35.458980560 30.458978653 12.500000000 --36.384422302 31.384418488 12.500000000 -35.384422302 31.384418488 12.500000000 --36.233222961 32.233222961 12.500000000 -35.233222961 32.233222961 12.500000000 --35.981101990 32.981101990 12.500000000 --35.622871399 33.622871399 12.500000000 -34.622871399 33.622871399 12.500000000 -34.981101990 32.981101990 12.500000000 --35.167964935 34.167964935 12.500000000 --34.622871399 34.622871399 12.500000000 -33.622871399 34.622871399 12.500000000 -34.167964935 34.167964935 12.500000000 --33.981101990 34.981101990 12.500000000 --33.233222961 35.233222961 12.500000000 --32.384422302 35.384418488 12.500000000 --31.458978653 35.458976746 12.500000000 --30.488407135 35.488403320 12.500000000 --29.497472763 35.497467041 12.500000000 --28.499593735 35.499591827 12.500000000 --27.499954224 35.499954224 12.500000000 --26.499996185 35.499996185 12.500000000 --25.500000000 35.500000000 12.500000000 --24.500000000 35.500000000 12.500000000 --23.500000000 35.500000000 12.500000000 --22.500000000 35.500000000 12.500000000 --21.500000000 35.500000000 12.500000000 --20.500000000 35.500000000 12.500000000 --19.500000000 35.500000000 12.500000000 --18.500000000 35.500000000 12.500000000 --17.500000000 35.500000000 12.500000000 --16.500000000 35.500000000 12.500000000 --15.500000000 35.500000000 12.500000000 --14.500000000 35.500000000 12.500000000 --13.500000000 35.500000000 12.500000000 --12.500000000 35.500000000 12.500000000 --11.500000000 35.500000000 12.500000000 --10.500000000 35.500000000 12.500000000 --9.500000000 35.500000000 12.500000000 --8.500000000 35.500000000 12.500000000 --7.500000000 35.500000000 12.500000000 --6.500000000 35.500000000 12.500000000 --5.500000000 35.500000000 12.500000000 --4.500000000 35.500000000 12.500000000 --3.500000000 35.500000000 12.500000000 --2.500000000 35.500000000 12.500000000 --1.500000000 35.500000000 12.500000000 --0.500000000 35.500000000 12.500000000 -0.500000000 35.500000000 12.500000000 -1.500000000 35.500000000 12.500000000 -2.500000000 35.500000000 12.500000000 -3.500000000 35.500000000 12.500000000 -4.500000000 35.500000000 12.500000000 -5.500000000 35.500000000 12.500000000 -6.500000000 35.500000000 12.500000000 -7.500000000 35.500000000 12.500000000 -8.500000000 35.500000000 12.500000000 -9.500000000 35.500000000 12.500000000 -10.500000000 35.500000000 12.500000000 -11.500000000 35.500000000 12.500000000 -12.500000000 35.500000000 12.500000000 -13.500000000 35.500000000 12.500000000 -14.500000000 35.500000000 12.500000000 -15.500000000 35.500000000 12.500000000 -16.500000000 35.500000000 12.500000000 -17.500000000 35.500000000 12.500000000 -18.500000000 35.500000000 12.500000000 -19.500000000 35.500000000 12.500000000 -20.500000000 35.500000000 12.500000000 -21.500000000 35.500000000 12.500000000 -22.500000000 35.500000000 12.500000000 -23.500000000 35.500000000 12.500000000 -24.500000000 35.500000000 12.500000000 -25.499996185 35.499996185 12.500000000 -26.499954224 35.499954224 12.500000000 -27.499591827 35.499591827 12.500000000 -28.497470856 35.497474670 12.500000000 -29.488407135 35.488403320 12.500000000 -30.458978653 35.458980560 12.500000000 -31.384418488 35.384422302 12.500000000 -32.233222961 35.233222961 12.500000000 -32.981101990 34.981101990 12.500000000 --33.981101990 -35.981101990 13.500000000 --33.233226776 -36.233222961 13.500000000 --32.384422302 -36.384418488 13.500000000 --31.458978653 -36.458980560 13.500000000 --30.488407135 -36.488403320 13.500000000 --29.497472763 -36.497474670 13.500000000 --28.499593735 -36.499591827 13.500000000 --27.499954224 -36.499954224 13.500000000 --26.499996185 -36.499996185 13.500000000 --25.500000000 -36.500000000 13.500000000 --24.500000000 -36.500000000 13.500000000 --23.500000000 -36.500000000 13.500000000 --22.500000000 -36.500000000 13.500000000 --21.500000000 -36.500000000 13.500000000 --20.500000000 -36.500000000 13.500000000 --19.500000000 -36.500000000 13.500000000 --18.500000000 -36.500000000 13.500000000 --17.500000000 -36.500000000 13.500000000 --16.500000000 -36.500000000 13.500000000 --15.500000000 -36.500000000 13.500000000 --14.500000000 -36.500000000 13.500000000 --13.500000000 -36.500000000 13.500000000 --12.500000000 -36.500000000 13.500000000 --11.500000000 -36.500000000 13.500000000 --10.500000000 -36.500000000 13.500000000 --9.500000000 -36.500000000 13.500000000 --8.500000000 -36.500000000 13.500000000 --7.500000000 -36.500000000 13.500000000 --6.500000000 -36.500000000 13.500000000 --5.500000000 -36.500000000 13.500000000 --4.500000000 -36.500000000 13.500000000 --3.500000000 -36.500000000 13.500000000 --2.500000000 -36.500000000 13.500000000 --1.500000000 -36.500000000 13.500000000 --0.500000000 -36.500000000 13.500000000 -0.500000000 -36.500000000 13.500000000 -1.500000000 -36.500000000 13.500000000 -2.500000000 -36.500000000 13.500000000 -3.500000000 -36.500000000 13.500000000 -4.500000000 -36.500000000 13.500000000 -5.500000000 -36.500000000 13.500000000 -6.500000000 -36.500000000 13.500000000 -7.500000000 -36.500000000 13.500000000 -8.500000000 -36.500000000 13.500000000 -9.500000000 -36.500000000 13.500000000 -10.500000000 -36.500000000 13.500000000 -11.500000000 -36.500000000 13.500000000 -12.500000000 -36.500000000 13.500000000 -13.500000000 -36.500000000 13.500000000 -14.500000000 -36.500000000 13.500000000 -15.500000000 -36.500000000 13.500000000 -16.500000000 -36.500000000 13.500000000 -17.500000000 -36.500000000 13.500000000 -18.500000000 -36.500000000 13.500000000 -19.500000000 -36.500000000 13.500000000 -20.500000000 -36.500000000 13.500000000 -21.500000000 -36.500000000 13.500000000 -22.500000000 -36.500000000 13.500000000 -23.500000000 -36.500000000 13.500000000 -24.500000000 -36.500000000 13.500000000 -25.499996185 -36.499996185 13.500000000 -26.499954224 -36.499954224 13.500000000 -27.499591827 -36.499591827 13.500000000 -28.497470856 -36.497467041 13.500000000 -29.488407135 -36.488403320 13.500000000 -30.458978653 -36.458980560 13.500000000 -31.384418488 -36.384422302 13.500000000 -32.233222961 -36.233222961 13.500000000 -32.981101990 -35.981101990 13.500000000 --35.167964935 -35.167964935 13.500000000 --34.622871399 -35.622871399 13.500000000 -33.622871399 -35.622871399 13.500000000 -34.167964935 -35.167964935 13.500000000 --35.981101990 -33.981101990 13.500000000 --35.622871399 -34.622871399 13.500000000 -34.622871399 -34.622871399 13.500000000 -34.981101990 -33.981101990 13.500000000 --36.233222961 -33.233222961 13.500000000 -35.233222961 -33.233226776 13.500000000 --36.384418488 -32.384422302 13.500000000 -35.384418488 -32.384422302 13.500000000 --36.458976746 -31.458978653 13.500000000 -35.458980560 -31.458978653 13.500000000 --36.488403320 -30.488407135 13.500000000 -35.488403320 -30.488407135 13.500000000 --36.497467041 -29.497472763 13.500000000 -35.497474670 -29.497472763 13.500000000 --36.499591827 -28.499593735 13.500000000 -35.499591827 -28.499593735 13.500000000 --36.499954224 -27.499954224 13.500000000 -35.499954224 -27.499954224 13.500000000 --36.499996185 -26.499996185 13.500000000 -35.499996185 -26.499996185 13.500000000 --36.500000000 -25.500000000 13.500000000 -35.500000000 -25.500000000 13.500000000 --36.500000000 -24.500000000 13.500000000 -35.500000000 -24.500000000 13.500000000 --36.500000000 -23.500000000 13.500000000 -35.500000000 -23.500000000 13.500000000 --36.500000000 -22.500000000 13.500000000 -35.500000000 -22.500000000 13.500000000 --36.500000000 -21.500000000 13.500000000 -35.500000000 -21.500000000 13.500000000 --36.500000000 -20.500000000 13.500000000 -35.500000000 -20.500000000 13.500000000 --36.500000000 -19.500000000 13.500000000 -35.500000000 -19.500000000 13.500000000 --36.500000000 -18.500000000 13.500000000 -35.500000000 -18.500000000 13.500000000 --36.500000000 -17.500000000 13.500000000 -35.500000000 -17.500000000 13.500000000 --36.500000000 -16.500000000 13.500000000 -35.500000000 -16.500000000 13.500000000 --36.500000000 -15.500000000 13.500000000 -35.500000000 -15.500000000 13.500000000 --36.500000000 -14.500000000 13.500000000 -35.500000000 -14.500000000 13.500000000 --36.500000000 -13.500000000 13.500000000 -35.500000000 -13.500000000 13.500000000 --36.500000000 -12.500000000 13.500000000 -35.500000000 -12.500000000 13.500000000 --36.500000000 -11.500000000 13.500000000 -35.500000000 -11.500000000 13.500000000 --36.500000000 -10.500000000 13.500000000 -35.500000000 -10.500000000 13.500000000 --36.500000000 -9.500000000 13.500000000 -35.500000000 -9.500000000 13.500000000 --36.500000000 -8.500000000 13.500000000 -35.500000000 -8.500000000 13.500000000 --36.500000000 -7.500000000 13.500000000 -35.500000000 -7.500000000 13.500000000 --36.500000000 -6.500000000 13.500000000 -35.500000000 -6.500000000 13.500000000 --36.500000000 -5.500000000 13.500000000 -35.500000000 -5.500000000 13.500000000 --36.500000000 -4.500000000 13.500000000 -35.500000000 -4.500000000 13.500000000 --36.500000000 -3.500000000 13.500000000 -35.500000000 -3.500000000 13.500000000 --36.500000000 -2.500000000 13.500000000 -35.500000000 -2.500000000 13.500000000 --36.500000000 -1.500000000 13.500000000 -35.500000000 -1.500000000 13.500000000 --36.500000000 -0.500000000 13.500000000 -35.500000000 -0.500000000 13.500000000 --36.500000000 0.500000000 13.500000000 -35.500000000 0.500000000 13.500000000 --36.500000000 1.500000000 13.500000000 -35.500000000 1.500000000 13.500000000 --36.500000000 2.500000000 13.500000000 -35.500000000 2.500000000 13.500000000 --36.500000000 3.500000000 13.500000000 -35.500000000 3.500000000 13.500000000 --36.500000000 4.500000000 13.500000000 -35.500000000 4.500000000 13.500000000 --36.500000000 5.500000000 13.500000000 -35.500000000 5.500000000 13.500000000 --36.500000000 6.500000000 13.500000000 -35.500000000 6.500000000 13.500000000 --36.500000000 7.500000000 13.500000000 -35.500000000 7.500000000 13.500000000 --36.500000000 8.500000000 13.500000000 -35.500000000 8.500000000 13.500000000 --36.500000000 9.500000000 13.500000000 -35.500000000 9.500000000 13.500000000 --36.500000000 10.500000000 13.500000000 -35.500000000 10.500000000 13.500000000 --36.500000000 11.500000000 13.500000000 -35.500000000 11.500000000 13.500000000 --36.500000000 12.500000000 13.500000000 -35.500000000 12.500000000 13.500000000 --36.500000000 13.500000000 13.500000000 -35.500000000 13.500000000 13.500000000 --36.500000000 14.500000000 13.500000000 -35.500000000 14.500000000 13.500000000 --36.500000000 15.500000000 13.500000000 -35.500000000 15.500000000 13.500000000 --36.500000000 16.500000000 13.500000000 -35.500000000 16.500000000 13.500000000 --36.500000000 17.500000000 13.500000000 -35.500000000 17.500000000 13.500000000 --36.500000000 18.500000000 13.500000000 -35.500000000 18.500000000 13.500000000 --36.500000000 19.500000000 13.500000000 -35.500000000 19.500000000 13.500000000 --36.500000000 20.500000000 13.500000000 -35.500000000 20.500000000 13.500000000 --36.500000000 21.500000000 13.500000000 -35.500000000 21.500000000 13.500000000 --36.500000000 22.500000000 13.500000000 -35.500000000 22.500000000 13.500000000 --36.500000000 23.500000000 13.500000000 -35.500000000 23.500000000 13.500000000 --36.500000000 24.500000000 13.500000000 -35.500000000 24.500000000 13.500000000 --36.499996185 25.499996185 13.500000000 -35.499996185 25.499996185 13.500000000 --36.499954224 26.499954224 13.500000000 -35.499954224 26.499954224 13.500000000 --36.499591827 27.499591827 13.500000000 -35.499591827 27.499591827 13.500000000 --36.497474670 28.497470856 13.500000000 -35.497467041 28.497470856 13.500000000 --36.488403320 29.488407135 13.500000000 -35.488403320 29.488407135 13.500000000 --36.458980560 30.458978653 13.500000000 -35.458980560 30.458978653 13.500000000 --36.384422302 31.384418488 13.500000000 -35.384422302 31.384418488 13.500000000 --36.233222961 32.233222961 13.500000000 -35.233222961 32.233222961 13.500000000 --35.981101990 32.981101990 13.500000000 --35.622871399 33.622871399 13.500000000 -34.622871399 33.622871399 13.500000000 -34.981101990 32.981101990 13.500000000 --35.167964935 34.167964935 13.500000000 --34.622871399 34.622871399 13.500000000 -33.622871399 34.622871399 13.500000000 -34.167964935 34.167964935 13.500000000 --33.981101990 34.981101990 13.500000000 --33.233222961 35.233222961 13.500000000 --32.384422302 35.384418488 13.500000000 --31.458978653 35.458976746 13.500000000 --30.488407135 35.488403320 13.500000000 --29.497472763 35.497467041 13.500000000 --28.499593735 35.499591827 13.500000000 --27.499954224 35.499954224 13.500000000 --26.499996185 35.499996185 13.500000000 --25.500000000 35.500000000 13.500000000 --24.500000000 35.500000000 13.500000000 --23.500000000 35.500000000 13.500000000 --22.500000000 35.500000000 13.500000000 --21.500000000 35.500000000 13.500000000 --20.500000000 35.500000000 13.500000000 --19.500000000 35.500000000 13.500000000 --18.500000000 35.500000000 13.500000000 --17.500000000 35.500000000 13.500000000 --16.500000000 35.500000000 13.500000000 --15.500000000 35.500000000 13.500000000 --14.500000000 35.500000000 13.500000000 --13.500000000 35.500000000 13.500000000 --12.500000000 35.500000000 13.500000000 --11.500000000 35.500000000 13.500000000 --10.500000000 35.500000000 13.500000000 --9.500000000 35.500000000 13.500000000 --8.500000000 35.500000000 13.500000000 --7.500000000 35.500000000 13.500000000 --6.500000000 35.500000000 13.500000000 --5.500000000 35.500000000 13.500000000 --4.500000000 35.500000000 13.500000000 --3.500000000 35.500000000 13.500000000 --2.500000000 35.500000000 13.500000000 --1.500000000 35.500000000 13.500000000 --0.500000000 35.500000000 13.500000000 -0.500000000 35.500000000 13.500000000 -1.500000000 35.500000000 13.500000000 -2.500000000 35.500000000 13.500000000 -3.500000000 35.500000000 13.500000000 -4.500000000 35.500000000 13.500000000 -5.500000000 35.500000000 13.500000000 -6.500000000 35.500000000 13.500000000 -7.500000000 35.500000000 13.500000000 -8.500000000 35.500000000 13.500000000 -9.500000000 35.500000000 13.500000000 -10.500000000 35.500000000 13.500000000 -11.500000000 35.500000000 13.500000000 -12.500000000 35.500000000 13.500000000 -13.500000000 35.500000000 13.500000000 -14.500000000 35.500000000 13.500000000 -15.500000000 35.500000000 13.500000000 -16.500000000 35.500000000 13.500000000 -17.500000000 35.500000000 13.500000000 -18.500000000 35.500000000 13.500000000 -19.500000000 35.500000000 13.500000000 -20.500000000 35.500000000 13.500000000 -21.500000000 35.500000000 13.500000000 -22.500000000 35.500000000 13.500000000 -23.500000000 35.500000000 13.500000000 -24.500000000 35.500000000 13.500000000 -25.499996185 35.499996185 13.500000000 -26.499954224 35.499954224 13.500000000 -27.499591827 35.499591827 13.500000000 -28.497470856 35.497474670 13.500000000 -29.488407135 35.488403320 13.500000000 -30.458978653 35.458980560 13.500000000 -31.384418488 35.384422302 13.500000000 -32.233222961 35.233222961 13.500000000 -32.981101990 34.981101990 13.500000000 --33.981101990 -35.981101990 14.500000000 --33.233226776 -36.233222961 14.500000000 --32.384422302 -36.384418488 14.500000000 --31.458978653 -36.458980560 14.500000000 --30.488407135 -36.488403320 14.500000000 --29.497472763 -36.497474670 14.500000000 --28.499593735 -36.499591827 14.500000000 --27.499954224 -36.499954224 14.500000000 --26.499996185 -36.499996185 14.500000000 --25.500000000 -36.500000000 14.500000000 --24.500000000 -36.500000000 14.500000000 --23.500000000 -36.500000000 14.500000000 --22.500000000 -36.500000000 14.500000000 --21.500000000 -36.500000000 14.500000000 --20.500000000 -36.500000000 14.500000000 --19.500000000 -36.500000000 14.500000000 --18.500000000 -36.500000000 14.500000000 --17.500000000 -36.500000000 14.500000000 --16.500000000 -36.500000000 14.500000000 --15.500000000 -36.500000000 14.500000000 --14.500000000 -36.500000000 14.500000000 --13.500000000 -36.500000000 14.500000000 --12.500000000 -36.500000000 14.500000000 --11.500000000 -36.500000000 14.500000000 --10.500000000 -36.500000000 14.500000000 --9.500000000 -36.500000000 14.500000000 --8.500000000 -36.500000000 14.500000000 --7.500000000 -36.500000000 14.500000000 --6.500000000 -36.500000000 14.500000000 --5.500000000 -36.500000000 14.500000000 --4.500000000 -36.500000000 14.500000000 --3.500000000 -36.500000000 14.500000000 --2.500000000 -36.500000000 14.500000000 --1.500000000 -36.500000000 14.500000000 --0.500000000 -36.500000000 14.500000000 -0.500000000 -36.500000000 14.500000000 -1.500000000 -36.500000000 14.500000000 -2.500000000 -36.500000000 14.500000000 -3.500000000 -36.500000000 14.500000000 -4.500000000 -36.500000000 14.500000000 -5.500000000 -36.500000000 14.500000000 -6.500000000 -36.500000000 14.500000000 -7.500000000 -36.500000000 14.500000000 -8.500000000 -36.500000000 14.500000000 -9.500000000 -36.500000000 14.500000000 -10.500000000 -36.500000000 14.500000000 -11.500000000 -36.500000000 14.500000000 -12.500000000 -36.500000000 14.500000000 -13.500000000 -36.500000000 14.500000000 -14.500000000 -36.500000000 14.500000000 -15.500000000 -36.500000000 14.500000000 -16.500000000 -36.500000000 14.500000000 -17.500000000 -36.500000000 14.500000000 -18.500000000 -36.500000000 14.500000000 -19.500000000 -36.500000000 14.500000000 -20.500000000 -36.500000000 14.500000000 -21.500000000 -36.500000000 14.500000000 -22.500000000 -36.500000000 14.500000000 -23.500000000 -36.500000000 14.500000000 -24.500000000 -36.500000000 14.500000000 -25.499996185 -36.499996185 14.500000000 -26.499954224 -36.499954224 14.500000000 -27.499591827 -36.499591827 14.500000000 -28.497470856 -36.497467041 14.500000000 -29.488407135 -36.488403320 14.500000000 -30.458978653 -36.458980560 14.500000000 -31.384418488 -36.384422302 14.500000000 -32.233222961 -36.233222961 14.500000000 -32.981101990 -35.981101990 14.500000000 --35.167964935 -35.167964935 14.500000000 --34.622871399 -35.622871399 14.500000000 -33.622871399 -35.622871399 14.500000000 -34.167964935 -35.167964935 14.500000000 --35.981101990 -33.981101990 14.500000000 --35.622871399 -34.622871399 14.500000000 -34.622871399 -34.622871399 14.500000000 -34.981101990 -33.981101990 14.500000000 --36.233222961 -33.233222961 14.500000000 -35.233222961 -33.233226776 14.500000000 --36.384418488 -32.384422302 14.500000000 -35.384418488 -32.384422302 14.500000000 --36.458976746 -31.458978653 14.500000000 -35.458980560 -31.458978653 14.500000000 --36.488403320 -30.488407135 14.500000000 -35.488403320 -30.488407135 14.500000000 --36.497467041 -29.497472763 14.500000000 -35.497474670 -29.497472763 14.500000000 --36.499591827 -28.499593735 14.500000000 -35.499591827 -28.499593735 14.500000000 --36.499954224 -27.499954224 14.500000000 -35.499954224 -27.499954224 14.500000000 --36.499996185 -26.499996185 14.500000000 -35.499996185 -26.499996185 14.500000000 --36.500000000 -25.500000000 14.500000000 -35.500000000 -25.500000000 14.500000000 --36.500000000 -24.500000000 14.500000000 -35.500000000 -24.500000000 14.500000000 --36.500000000 -23.500000000 14.500000000 -35.500000000 -23.500000000 14.500000000 --36.500000000 -22.500000000 14.500000000 -35.500000000 -22.500000000 14.500000000 --36.500000000 -21.500000000 14.500000000 -35.500000000 -21.500000000 14.500000000 --36.500000000 -20.500000000 14.500000000 -35.500000000 -20.500000000 14.500000000 --36.500000000 -19.500000000 14.500000000 -35.500000000 -19.500000000 14.500000000 --36.500000000 -18.500000000 14.500000000 -35.500000000 -18.500000000 14.500000000 --36.500000000 -17.500000000 14.500000000 -35.500000000 -17.500000000 14.500000000 --36.500000000 -16.500000000 14.500000000 -35.500000000 -16.500000000 14.500000000 --36.500000000 -15.500000000 14.500000000 -35.500000000 -15.500000000 14.500000000 --36.500000000 -14.500000000 14.500000000 -35.500000000 -14.500000000 14.500000000 --36.500000000 -13.500000000 14.500000000 -35.500000000 -13.500000000 14.500000000 --36.500000000 -12.500000000 14.500000000 -35.500000000 -12.500000000 14.500000000 --36.500000000 -11.500000000 14.500000000 -35.500000000 -11.500000000 14.500000000 --36.500000000 -10.500000000 14.500000000 -35.500000000 -10.500000000 14.500000000 --36.500000000 -9.500000000 14.500000000 -35.500000000 -9.500000000 14.500000000 --36.500000000 -8.500000000 14.500000000 -35.500000000 -8.500000000 14.500000000 --36.500000000 -7.500000000 14.500000000 -35.500000000 -7.500000000 14.500000000 --36.500000000 -6.500000000 14.500000000 -35.500000000 -6.500000000 14.500000000 --36.500000000 -5.500000000 14.500000000 -35.500000000 -5.500000000 14.500000000 --36.500000000 -4.500000000 14.500000000 -35.500000000 -4.500000000 14.500000000 --36.500000000 -3.500000000 14.500000000 -35.500000000 -3.500000000 14.500000000 --36.500000000 -2.500000000 14.500000000 -35.500000000 -2.500000000 14.500000000 --36.500000000 -1.500000000 14.500000000 -35.500000000 -1.500000000 14.500000000 --36.500000000 -0.500000000 14.500000000 -35.500000000 -0.500000000 14.500000000 --36.500000000 0.500000000 14.500000000 -35.500000000 0.500000000 14.500000000 --36.500000000 1.500000000 14.500000000 -35.500000000 1.500000000 14.500000000 --36.500000000 2.500000000 14.500000000 -35.500000000 2.500000000 14.500000000 --36.500000000 3.500000000 14.500000000 -35.500000000 3.500000000 14.500000000 --36.500000000 4.500000000 14.500000000 -35.500000000 4.500000000 14.500000000 --36.500000000 5.500000000 14.500000000 -35.500000000 5.500000000 14.500000000 --36.500000000 6.500000000 14.500000000 -35.500000000 6.500000000 14.500000000 --36.500000000 7.500000000 14.500000000 -35.500000000 7.500000000 14.500000000 --36.500000000 8.500000000 14.500000000 -35.500000000 8.500000000 14.500000000 --36.500000000 9.500000000 14.500000000 -35.500000000 9.500000000 14.500000000 --36.500000000 10.500000000 14.500000000 -35.500000000 10.500000000 14.500000000 --36.500000000 11.500000000 14.500000000 -35.500000000 11.500000000 14.500000000 --36.500000000 12.500000000 14.500000000 -35.500000000 12.500000000 14.500000000 --36.500000000 13.500000000 14.500000000 -35.500000000 13.500000000 14.500000000 --36.500000000 14.500000000 14.500000000 -35.500000000 14.500000000 14.500000000 --36.500000000 15.500000000 14.500000000 -35.500000000 15.500000000 14.500000000 --36.500000000 16.500000000 14.500000000 -35.500000000 16.500000000 14.500000000 --36.500000000 17.500000000 14.500000000 -35.500000000 17.500000000 14.500000000 --36.500000000 18.500000000 14.500000000 -35.500000000 18.500000000 14.500000000 --36.500000000 19.500000000 14.500000000 -35.500000000 19.500000000 14.500000000 --36.500000000 20.500000000 14.500000000 -35.500000000 20.500000000 14.500000000 --36.500000000 21.500000000 14.500000000 -35.500000000 21.500000000 14.500000000 --36.500000000 22.500000000 14.500000000 -35.500000000 22.500000000 14.500000000 --36.500000000 23.500000000 14.500000000 -35.500000000 23.500000000 14.500000000 --36.500000000 24.500000000 14.500000000 -35.500000000 24.500000000 14.500000000 --36.499996185 25.499996185 14.500000000 -35.499996185 25.499996185 14.500000000 --36.499954224 26.499954224 14.500000000 -35.499954224 26.499954224 14.500000000 --36.499591827 27.499591827 14.500000000 -35.499591827 27.499591827 14.500000000 --36.497474670 28.497470856 14.500000000 -35.497467041 28.497470856 14.500000000 --36.488403320 29.488407135 14.500000000 -35.488403320 29.488407135 14.500000000 --36.458980560 30.458978653 14.500000000 -35.458980560 30.458978653 14.500000000 --36.384422302 31.384418488 14.500000000 -35.384422302 31.384418488 14.500000000 --36.233222961 32.233222961 14.500000000 -35.233222961 32.233222961 14.500000000 --35.981101990 32.981101990 14.500000000 --35.622871399 33.622871399 14.500000000 -34.622871399 33.622871399 14.500000000 -34.981101990 32.981101990 14.500000000 --35.167964935 34.167964935 14.500000000 --34.622871399 34.622871399 14.500000000 -33.622871399 34.622871399 14.500000000 -34.167964935 34.167964935 14.500000000 --33.981101990 34.981101990 14.500000000 --33.233222961 35.233222961 14.500000000 --32.384422302 35.384418488 14.500000000 --31.458978653 35.458976746 14.500000000 --30.488407135 35.488403320 14.500000000 --29.497472763 35.497467041 14.500000000 --28.499593735 35.499591827 14.500000000 --27.499954224 35.499954224 14.500000000 --26.499996185 35.499996185 14.500000000 --25.500000000 35.500000000 14.500000000 --24.500000000 35.500000000 14.500000000 --23.500000000 35.500000000 14.500000000 --22.500000000 35.500000000 14.500000000 --21.500000000 35.500000000 14.500000000 --20.500000000 35.500000000 14.500000000 --19.500000000 35.500000000 14.500000000 --18.500000000 35.500000000 14.500000000 --17.500000000 35.500000000 14.500000000 --16.500000000 35.500000000 14.500000000 --15.500000000 35.500000000 14.500000000 --14.500000000 35.500000000 14.500000000 --13.500000000 35.500000000 14.500000000 --12.500000000 35.500000000 14.500000000 --11.500000000 35.500000000 14.500000000 --10.500000000 35.500000000 14.500000000 --9.500000000 35.500000000 14.500000000 --8.500000000 35.500000000 14.500000000 --7.500000000 35.500000000 14.500000000 --6.500000000 35.500000000 14.500000000 --5.500000000 35.500000000 14.500000000 --4.500000000 35.500000000 14.500000000 --3.500000000 35.500000000 14.500000000 --2.500000000 35.500000000 14.500000000 --1.500000000 35.500000000 14.500000000 --0.500000000 35.500000000 14.500000000 -0.500000000 35.500000000 14.500000000 -1.500000000 35.500000000 14.500000000 -2.500000000 35.500000000 14.500000000 -3.500000000 35.500000000 14.500000000 -4.500000000 35.500000000 14.500000000 -5.500000000 35.500000000 14.500000000 -6.500000000 35.500000000 14.500000000 -7.500000000 35.500000000 14.500000000 -8.500000000 35.500000000 14.500000000 -9.500000000 35.500000000 14.500000000 -10.500000000 35.500000000 14.500000000 -11.500000000 35.500000000 14.500000000 -12.500000000 35.500000000 14.500000000 -13.500000000 35.500000000 14.500000000 -14.500000000 35.500000000 14.500000000 -15.500000000 35.500000000 14.500000000 -16.500000000 35.500000000 14.500000000 -17.500000000 35.500000000 14.500000000 -18.500000000 35.500000000 14.500000000 -19.500000000 35.500000000 14.500000000 -20.500000000 35.500000000 14.500000000 -21.500000000 35.500000000 14.500000000 -22.500000000 35.500000000 14.500000000 -23.500000000 35.500000000 14.500000000 -24.500000000 35.500000000 14.500000000 -25.499996185 35.499996185 14.500000000 -26.499954224 35.499954224 14.500000000 -27.499591827 35.499591827 14.500000000 -28.497470856 35.497474670 14.500000000 -29.488407135 35.488403320 14.500000000 -30.458978653 35.458980560 14.500000000 -31.384418488 35.384422302 14.500000000 -32.233222961 35.233222961 14.500000000 -32.981101990 34.981101990 14.500000000 --33.981101990 -35.981101990 15.500000000 --33.233226776 -36.233222961 15.500000000 --32.384422302 -36.384418488 15.500000000 --31.458978653 -36.458980560 15.500000000 --30.488407135 -36.488403320 15.500000000 --29.497472763 -36.497474670 15.500000000 --28.499593735 -36.499591827 15.500000000 --27.499954224 -36.499954224 15.500000000 --26.499996185 -36.499996185 15.500000000 --25.500000000 -36.500000000 15.500000000 --24.500000000 -36.500000000 15.500000000 --23.500000000 -36.500000000 15.500000000 --22.500000000 -36.500000000 15.500000000 --21.500000000 -36.500000000 15.500000000 --20.500000000 -36.500000000 15.500000000 --19.500000000 -36.500000000 15.500000000 --18.500000000 -36.500000000 15.500000000 --17.500000000 -36.500000000 15.500000000 --16.500000000 -36.500000000 15.500000000 --15.500000000 -36.500000000 15.500000000 --14.500000000 -36.500000000 15.500000000 --13.500000000 -36.500000000 15.500000000 --12.500000000 -36.500000000 15.500000000 --11.500000000 -36.500000000 15.500000000 --10.500000000 -36.500000000 15.500000000 --9.500000000 -36.500000000 15.500000000 --8.500000000 -36.500000000 15.500000000 --7.500000000 -36.500000000 15.500000000 --6.500000000 -36.500000000 15.500000000 --5.500000000 -36.500000000 15.500000000 --4.500000000 -36.500000000 15.500000000 --3.500000000 -36.500000000 15.500000000 --2.500000000 -36.500000000 15.500000000 --1.500000000 -36.500000000 15.500000000 --0.500000000 -36.500000000 15.500000000 -0.500000000 -36.500000000 15.500000000 -1.500000000 -36.500000000 15.500000000 -2.500000000 -36.500000000 15.500000000 -3.500000000 -36.500000000 15.500000000 -4.500000000 -36.500000000 15.500000000 -5.500000000 -36.500000000 15.500000000 -6.500000000 -36.500000000 15.500000000 -7.500000000 -36.500000000 15.500000000 -8.500000000 -36.500000000 15.500000000 -9.500000000 -36.500000000 15.500000000 -10.500000000 -36.500000000 15.500000000 -11.500000000 -36.500000000 15.500000000 -12.500000000 -36.500000000 15.500000000 -13.500000000 -36.500000000 15.500000000 -14.500000000 -36.500000000 15.500000000 -15.500000000 -36.500000000 15.500000000 -16.500000000 -36.500000000 15.500000000 -17.500000000 -36.500000000 15.500000000 -18.500000000 -36.500000000 15.500000000 -19.500000000 -36.500000000 15.500000000 -20.500000000 -36.500000000 15.500000000 -21.500000000 -36.500000000 15.500000000 -22.500000000 -36.500000000 15.500000000 -23.500000000 -36.500000000 15.500000000 -24.500000000 -36.500000000 15.500000000 -25.499996185 -36.499996185 15.500000000 -26.499954224 -36.499954224 15.500000000 -27.499591827 -36.499591827 15.500000000 -28.497470856 -36.497467041 15.500000000 -29.488407135 -36.488403320 15.500000000 -30.458978653 -36.458980560 15.500000000 -31.384418488 -36.384422302 15.500000000 -32.233222961 -36.233222961 15.500000000 -32.981101990 -35.981101990 15.500000000 --35.167964935 -35.167964935 15.500000000 --34.622871399 -35.622871399 15.500000000 -33.622871399 -35.622871399 15.500000000 -34.167964935 -35.167964935 15.500000000 --35.981101990 -33.981101990 15.500000000 --35.622871399 -34.622871399 15.500000000 -34.622871399 -34.622871399 15.500000000 -34.981101990 -33.981101990 15.500000000 --36.233222961 -33.233222961 15.500000000 -35.233222961 -33.233226776 15.500000000 --36.384418488 -32.384422302 15.500000000 -35.384418488 -32.384422302 15.500000000 --36.458976746 -31.458978653 15.500000000 -35.458980560 -31.458978653 15.500000000 --36.488403320 -30.488407135 15.500000000 -35.488403320 -30.488407135 15.500000000 --36.497467041 -29.497472763 15.500000000 -35.497474670 -29.497472763 15.500000000 --36.499591827 -28.499593735 15.500000000 -35.499591827 -28.499593735 15.500000000 --36.499954224 -27.499954224 15.500000000 -35.499954224 -27.499954224 15.500000000 --36.499996185 -26.499996185 15.500000000 -35.499996185 -26.499996185 15.500000000 --36.500000000 -25.500000000 15.500000000 -35.500000000 -25.500000000 15.500000000 --36.500000000 -24.500000000 15.500000000 -35.500000000 -24.500000000 15.500000000 --36.500000000 -23.500000000 15.500000000 -35.500000000 -23.500000000 15.500000000 --36.500000000 -22.500000000 15.500000000 -35.500000000 -22.500000000 15.500000000 --36.500000000 -21.500000000 15.500000000 -35.500000000 -21.500000000 15.500000000 --36.500000000 -20.500000000 15.500000000 -35.500000000 -20.500000000 15.500000000 --36.500000000 -19.500000000 15.500000000 -35.500000000 -19.500000000 15.500000000 --36.500000000 -18.500000000 15.500000000 -35.500000000 -18.500000000 15.500000000 --36.500000000 -17.500000000 15.500000000 -35.500000000 -17.500000000 15.500000000 --36.500000000 -16.500000000 15.500000000 -35.500000000 -16.500000000 15.500000000 --36.500000000 -15.500000000 15.500000000 -35.500000000 -15.500000000 15.500000000 --36.500000000 -14.500000000 15.500000000 -35.500000000 -14.500000000 15.500000000 --36.500000000 -13.500000000 15.500000000 -35.500000000 -13.500000000 15.500000000 --36.500000000 -12.500000000 15.500000000 -35.500000000 -12.500000000 15.500000000 --36.500000000 -11.500000000 15.500000000 -35.500000000 -11.500000000 15.500000000 --36.500000000 -10.500000000 15.500000000 -35.500000000 -10.500000000 15.500000000 --36.500000000 -9.500000000 15.500000000 -35.500000000 -9.500000000 15.500000000 --36.500000000 -8.500000000 15.500000000 -35.500000000 -8.500000000 15.500000000 --36.500000000 -7.500000000 15.500000000 -35.500000000 -7.500000000 15.500000000 --36.500000000 -6.500000000 15.500000000 -35.500000000 -6.500000000 15.500000000 --36.500000000 -5.500000000 15.500000000 -35.500000000 -5.500000000 15.500000000 --36.500000000 -4.500000000 15.500000000 -35.500000000 -4.500000000 15.500000000 --36.500000000 -3.500000000 15.500000000 -35.500000000 -3.500000000 15.500000000 --36.500000000 -2.500000000 15.500000000 -35.500000000 -2.500000000 15.500000000 --36.500000000 -1.500000000 15.500000000 -35.500000000 -1.500000000 15.500000000 --36.500000000 -0.500000000 15.500000000 -35.500000000 -0.500000000 15.500000000 --36.500000000 0.500000000 15.500000000 -35.500000000 0.500000000 15.500000000 --36.500000000 1.500000000 15.500000000 -35.500000000 1.500000000 15.500000000 --36.500000000 2.500000000 15.500000000 -35.500000000 2.500000000 15.500000000 --36.500000000 3.500000000 15.500000000 -35.500000000 3.500000000 15.500000000 --36.500000000 4.500000000 15.500000000 -35.500000000 4.500000000 15.500000000 --36.500000000 5.500000000 15.500000000 -35.500000000 5.500000000 15.500000000 --36.500000000 6.500000000 15.500000000 -35.500000000 6.500000000 15.500000000 --36.500000000 7.500000000 15.500000000 -35.500000000 7.500000000 15.500000000 --36.500000000 8.500000000 15.500000000 -35.500000000 8.500000000 15.500000000 --36.500000000 9.500000000 15.500000000 -35.500000000 9.500000000 15.500000000 --36.500000000 10.500000000 15.500000000 -35.500000000 10.500000000 15.500000000 --36.500000000 11.500000000 15.500000000 -35.500000000 11.500000000 15.500000000 --36.500000000 12.500000000 15.500000000 -35.500000000 12.500000000 15.500000000 --36.500000000 13.500000000 15.500000000 -35.500000000 13.500000000 15.500000000 --36.500000000 14.500000000 15.500000000 -35.500000000 14.500000000 15.500000000 --36.500000000 15.500000000 15.500000000 -35.500000000 15.500000000 15.500000000 --36.500000000 16.500000000 15.500000000 -35.500000000 16.500000000 15.500000000 --36.500000000 17.500000000 15.500000000 -35.500000000 17.500000000 15.500000000 --36.500000000 18.500000000 15.500000000 -35.500000000 18.500000000 15.500000000 --36.500000000 19.500000000 15.500000000 -35.500000000 19.500000000 15.500000000 --36.500000000 20.500000000 15.500000000 -35.500000000 20.500000000 15.500000000 --36.500000000 21.500000000 15.500000000 -35.500000000 21.500000000 15.500000000 --36.500000000 22.500000000 15.500000000 -35.500000000 22.500000000 15.500000000 --36.500000000 23.500000000 15.500000000 -35.500000000 23.500000000 15.500000000 --36.500000000 24.500000000 15.500000000 -35.500000000 24.500000000 15.500000000 --36.499996185 25.499996185 15.500000000 -35.499996185 25.499996185 15.500000000 --36.499954224 26.499954224 15.500000000 -35.499954224 26.499954224 15.500000000 --36.499591827 27.499591827 15.500000000 -35.499591827 27.499591827 15.500000000 --36.497474670 28.497470856 15.500000000 -35.497467041 28.497470856 15.500000000 --36.488403320 29.488407135 15.500000000 -35.488403320 29.488407135 15.500000000 --36.458980560 30.458978653 15.500000000 -35.458980560 30.458978653 15.500000000 --36.384422302 31.384418488 15.500000000 -35.384422302 31.384418488 15.500000000 --36.233222961 32.233222961 15.500000000 -35.233222961 32.233222961 15.500000000 --35.981101990 32.981101990 15.500000000 --35.622871399 33.622871399 15.500000000 -34.622871399 33.622871399 15.500000000 -34.981101990 32.981101990 15.500000000 --35.167964935 34.167964935 15.500000000 --34.622871399 34.622871399 15.500000000 -33.622871399 34.622871399 15.500000000 -34.167964935 34.167964935 15.500000000 --33.981101990 34.981101990 15.500000000 --33.233222961 35.233222961 15.500000000 --32.384422302 35.384418488 15.500000000 --31.458978653 35.458976746 15.500000000 --30.488407135 35.488403320 15.500000000 --29.497472763 35.497467041 15.500000000 --28.499593735 35.499591827 15.500000000 --27.499954224 35.499954224 15.500000000 --26.499996185 35.499996185 15.500000000 --25.500000000 35.500000000 15.500000000 --24.500000000 35.500000000 15.500000000 --23.500000000 35.500000000 15.500000000 --22.500000000 35.500000000 15.500000000 --21.500000000 35.500000000 15.500000000 --20.500000000 35.500000000 15.500000000 --19.500000000 35.500000000 15.500000000 --18.500000000 35.500000000 15.500000000 --17.500000000 35.500000000 15.500000000 --16.500000000 35.500000000 15.500000000 --15.500000000 35.500000000 15.500000000 --14.500000000 35.500000000 15.500000000 --13.500000000 35.500000000 15.500000000 --12.500000000 35.500000000 15.500000000 --11.500000000 35.500000000 15.500000000 --10.500000000 35.500000000 15.500000000 --9.500000000 35.500000000 15.500000000 --8.500000000 35.500000000 15.500000000 --7.500000000 35.500000000 15.500000000 --6.500000000 35.500000000 15.500000000 --5.500000000 35.500000000 15.500000000 --4.500000000 35.500000000 15.500000000 --3.500000000 35.500000000 15.500000000 --2.500000000 35.500000000 15.500000000 --1.500000000 35.500000000 15.500000000 --0.500000000 35.500000000 15.500000000 -0.500000000 35.500000000 15.500000000 -1.500000000 35.500000000 15.500000000 -2.500000000 35.500000000 15.500000000 -3.500000000 35.500000000 15.500000000 -4.500000000 35.500000000 15.500000000 -5.500000000 35.500000000 15.500000000 -6.500000000 35.500000000 15.500000000 -7.500000000 35.500000000 15.500000000 -8.500000000 35.500000000 15.500000000 -9.500000000 35.500000000 15.500000000 -10.500000000 35.500000000 15.500000000 -11.500000000 35.500000000 15.500000000 -12.500000000 35.500000000 15.500000000 -13.500000000 35.500000000 15.500000000 -14.500000000 35.500000000 15.500000000 -15.500000000 35.500000000 15.500000000 -16.500000000 35.500000000 15.500000000 -17.500000000 35.500000000 15.500000000 -18.500000000 35.500000000 15.500000000 -19.500000000 35.500000000 15.500000000 -20.500000000 35.500000000 15.500000000 -21.500000000 35.500000000 15.500000000 -22.500000000 35.500000000 15.500000000 -23.500000000 35.500000000 15.500000000 -24.500000000 35.500000000 15.500000000 -25.499996185 35.499996185 15.500000000 -26.499954224 35.499954224 15.500000000 -27.499591827 35.499591827 15.500000000 -28.497470856 35.497474670 15.500000000 -29.488407135 35.488403320 15.500000000 -30.458978653 35.458980560 15.500000000 -31.384418488 35.384422302 15.500000000 -32.233222961 35.233222961 15.500000000 -32.981101990 34.981101990 15.500000000 --33.981101990 -35.981101990 16.500000000 --33.233226776 -36.233222961 16.500000000 --32.384422302 -36.384418488 16.500000000 --31.458978653 -36.458980560 16.500000000 --30.488407135 -36.488403320 16.500000000 --29.497472763 -36.497474670 16.500000000 --28.499593735 -36.499591827 16.500000000 --27.499954224 -36.499954224 16.500000000 --26.499996185 -36.499996185 16.500000000 --25.500000000 -36.500000000 16.500000000 --24.500000000 -36.500000000 16.500000000 --23.500000000 -36.500000000 16.500000000 --22.500000000 -36.500000000 16.500000000 --21.500000000 -36.500000000 16.500000000 --20.500000000 -36.500000000 16.500000000 --19.500000000 -36.500000000 16.500000000 --18.500000000 -36.500000000 16.500000000 --17.500000000 -36.500000000 16.500000000 --16.500000000 -36.500000000 16.500000000 --15.500000000 -36.500000000 16.500000000 --14.500000000 -36.500000000 16.500000000 --13.500000000 -36.500000000 16.500000000 --12.500000000 -36.500000000 16.500000000 --11.500000000 -36.500000000 16.500000000 --10.500000000 -36.500000000 16.500000000 --9.500000000 -36.500000000 16.500000000 --8.500000000 -36.500000000 16.500000000 --7.500000000 -36.500000000 16.500000000 --6.500000000 -36.500000000 16.500000000 --5.500000000 -36.500000000 16.500000000 --4.500000000 -36.500000000 16.500000000 --3.500000000 -36.500000000 16.500000000 --2.500000000 -36.500000000 16.500000000 --1.500000000 -36.500000000 16.500000000 --0.500000000 -36.500000000 16.500000000 -0.500000000 -36.500000000 16.500000000 -1.500000000 -36.500000000 16.500000000 -2.500000000 -36.500000000 16.500000000 -3.500000000 -36.500000000 16.500000000 -4.500000000 -36.500000000 16.500000000 -5.500000000 -36.500000000 16.500000000 -6.500000000 -36.500000000 16.500000000 -7.500000000 -36.500000000 16.500000000 -8.500000000 -36.500000000 16.500000000 -9.500000000 -36.500000000 16.500000000 -10.500000000 -36.500000000 16.500000000 -11.500000000 -36.500000000 16.500000000 -12.500000000 -36.500000000 16.500000000 -13.500000000 -36.500000000 16.500000000 -14.500000000 -36.500000000 16.500000000 -15.500000000 -36.500000000 16.500000000 -16.500000000 -36.500000000 16.500000000 -17.500000000 -36.500000000 16.500000000 -18.500000000 -36.500000000 16.500000000 -19.500000000 -36.500000000 16.500000000 -20.500000000 -36.500000000 16.500000000 -21.500000000 -36.500000000 16.500000000 -22.500000000 -36.500000000 16.500000000 -23.500000000 -36.500000000 16.500000000 -24.500000000 -36.500000000 16.500000000 -25.499996185 -36.499996185 16.500000000 -26.499954224 -36.499954224 16.500000000 -27.499591827 -36.499591827 16.500000000 -28.497470856 -36.497467041 16.500000000 -29.488407135 -36.488403320 16.500000000 -30.458978653 -36.458980560 16.500000000 -31.384418488 -36.384422302 16.500000000 -32.233222961 -36.233222961 16.500000000 -32.981101990 -35.981101990 16.500000000 --35.167964935 -35.167964935 16.500000000 --34.622871399 -35.622871399 16.500000000 -33.622871399 -35.622871399 16.500000000 -34.167964935 -35.167964935 16.500000000 --35.981101990 -33.981101990 16.500000000 --35.622871399 -34.622871399 16.500000000 -34.622871399 -34.622871399 16.500000000 -34.981101990 -33.981101990 16.500000000 --36.233222961 -33.233222961 16.500000000 -35.233222961 -33.233226776 16.500000000 --36.384418488 -32.384422302 16.500000000 -35.384418488 -32.384422302 16.500000000 --36.458976746 -31.458978653 16.500000000 -35.458980560 -31.458978653 16.500000000 --36.488403320 -30.488407135 16.500000000 -35.488403320 -30.488407135 16.500000000 --36.497467041 -29.497472763 16.500000000 -35.497474670 -29.497472763 16.500000000 --36.499591827 -28.499593735 16.500000000 -35.499591827 -28.499593735 16.500000000 --36.499954224 -27.499954224 16.500000000 -35.499954224 -27.499954224 16.500000000 --36.499996185 -26.499996185 16.500000000 -35.499996185 -26.499996185 16.500000000 --36.500000000 -25.500000000 16.500000000 -35.500000000 -25.500000000 16.500000000 --36.500000000 -24.500000000 16.500000000 -35.500000000 -24.500000000 16.500000000 --36.500000000 -23.500000000 16.500000000 -35.500000000 -23.500000000 16.500000000 --36.500000000 -22.500000000 16.500000000 -35.500000000 -22.500000000 16.500000000 --36.500000000 -21.500000000 16.500000000 -35.500000000 -21.500000000 16.500000000 --36.500000000 -20.500000000 16.500000000 -35.500000000 -20.500000000 16.500000000 --36.500000000 -19.500000000 16.500000000 -35.500000000 -19.500000000 16.500000000 --36.500000000 -18.500000000 16.500000000 -35.500000000 -18.500000000 16.500000000 --36.500000000 -17.500000000 16.500000000 -35.500000000 -17.500000000 16.500000000 --36.500000000 -16.500000000 16.500000000 -35.500000000 -16.500000000 16.500000000 --36.500000000 -15.500000000 16.500000000 -35.500000000 -15.500000000 16.500000000 --36.500000000 -14.500000000 16.500000000 -35.500000000 -14.500000000 16.500000000 --36.500000000 -13.500000000 16.500000000 -35.500000000 -13.500000000 16.500000000 --36.500000000 -12.500000000 16.500000000 -35.500000000 -12.500000000 16.500000000 --36.500000000 -11.500000000 16.500000000 -35.500000000 -11.500000000 16.500000000 --36.500000000 -10.500000000 16.500000000 -35.500000000 -10.500000000 16.500000000 --36.500000000 -9.500000000 16.500000000 -35.500000000 -9.500000000 16.500000000 --36.500000000 -8.500000000 16.500000000 -35.500000000 -8.500000000 16.500000000 --36.500000000 -7.500000000 16.500000000 -35.500000000 -7.500000000 16.500000000 --36.500000000 -6.500000000 16.500000000 -35.500000000 -6.500000000 16.500000000 --36.500000000 -5.500000000 16.500000000 -35.500000000 -5.500000000 16.500000000 --36.500000000 -4.500000000 16.500000000 -35.500000000 -4.500000000 16.500000000 --36.500000000 -3.500000000 16.500000000 -35.500000000 -3.500000000 16.500000000 --36.500000000 -2.500000000 16.500000000 -35.500000000 -2.500000000 16.500000000 --36.500000000 -1.500000000 16.500000000 -35.500000000 -1.500000000 16.500000000 --36.500000000 -0.500000000 16.500000000 -35.500000000 -0.500000000 16.500000000 --36.500000000 0.500000000 16.500000000 -35.500000000 0.500000000 16.500000000 --36.500000000 1.500000000 16.500000000 -35.500000000 1.500000000 16.500000000 --36.500000000 2.500000000 16.500000000 -35.500000000 2.500000000 16.500000000 --36.500000000 3.500000000 16.500000000 -35.500000000 3.500000000 16.500000000 --36.500000000 4.500000000 16.500000000 -35.500000000 4.500000000 16.500000000 --36.500000000 5.500000000 16.500000000 -35.500000000 5.500000000 16.500000000 --36.500000000 6.500000000 16.500000000 -35.500000000 6.500000000 16.500000000 --36.500000000 7.500000000 16.500000000 -35.500000000 7.500000000 16.500000000 --36.500000000 8.500000000 16.500000000 -35.500000000 8.500000000 16.500000000 --36.500000000 9.500000000 16.500000000 -35.500000000 9.500000000 16.500000000 --36.500000000 10.500000000 16.500000000 -35.500000000 10.500000000 16.500000000 --36.500000000 11.500000000 16.500000000 -35.500000000 11.500000000 16.500000000 --36.500000000 12.500000000 16.500000000 -35.500000000 12.500000000 16.500000000 --36.500000000 13.500000000 16.500000000 -35.500000000 13.500000000 16.500000000 --36.500000000 14.500000000 16.500000000 -35.500000000 14.500000000 16.500000000 --36.500000000 15.500000000 16.500000000 -35.500000000 15.500000000 16.500000000 --36.500000000 16.500000000 16.500000000 -35.500000000 16.500000000 16.500000000 --36.500000000 17.500000000 16.500000000 -35.500000000 17.500000000 16.500000000 --36.500000000 18.500000000 16.500000000 -35.500000000 18.500000000 16.500000000 --36.500000000 19.500000000 16.500000000 -35.500000000 19.500000000 16.500000000 --36.500000000 20.500000000 16.500000000 -35.500000000 20.500000000 16.500000000 --36.500000000 21.500000000 16.500000000 -35.500000000 21.500000000 16.500000000 --36.500000000 22.500000000 16.500000000 -35.500000000 22.500000000 16.500000000 --36.500000000 23.500000000 16.500000000 -35.500000000 23.500000000 16.500000000 --36.500000000 24.500000000 16.500000000 -35.500000000 24.500000000 16.500000000 --36.499996185 25.499996185 16.500000000 -35.499996185 25.499996185 16.500000000 --36.499954224 26.499954224 16.500000000 -35.499954224 26.499954224 16.500000000 --36.499591827 27.499591827 16.500000000 -35.499591827 27.499591827 16.500000000 --36.497474670 28.497470856 16.500000000 -35.497467041 28.497470856 16.500000000 --36.488403320 29.488407135 16.500000000 -35.488403320 29.488407135 16.500000000 --36.458980560 30.458978653 16.500000000 -35.458980560 30.458978653 16.500000000 --36.384422302 31.384418488 16.500000000 -35.384422302 31.384418488 16.500000000 --36.233222961 32.233222961 16.500000000 -35.233222961 32.233222961 16.500000000 --35.981101990 32.981101990 16.500000000 --35.622871399 33.622871399 16.500000000 -34.622871399 33.622871399 16.500000000 -34.981101990 32.981101990 16.500000000 --35.167964935 34.167964935 16.500000000 --34.622871399 34.622871399 16.500000000 -33.622871399 34.622871399 16.500000000 -34.167964935 34.167964935 16.500000000 --33.981101990 34.981101990 16.500000000 --33.233222961 35.233222961 16.500000000 --32.384422302 35.384418488 16.500000000 --31.458978653 35.458976746 16.500000000 --30.488407135 35.488403320 16.500000000 --29.497472763 35.497467041 16.500000000 --28.499593735 35.499591827 16.500000000 --27.499954224 35.499954224 16.500000000 --26.499996185 35.499996185 16.500000000 --25.500000000 35.500000000 16.500000000 --24.500000000 35.500000000 16.500000000 --23.500000000 35.500000000 16.500000000 --22.500000000 35.500000000 16.500000000 --21.500000000 35.500000000 16.500000000 --20.500000000 35.500000000 16.500000000 --19.500000000 35.500000000 16.500000000 --18.500000000 35.500000000 16.500000000 --17.500000000 35.500000000 16.500000000 --16.500000000 35.500000000 16.500000000 --15.500000000 35.500000000 16.500000000 --14.500000000 35.500000000 16.500000000 --13.500000000 35.500000000 16.500000000 --12.500000000 35.500000000 16.500000000 --11.500000000 35.500000000 16.500000000 --10.500000000 35.500000000 16.500000000 --9.500000000 35.500000000 16.500000000 --8.500000000 35.500000000 16.500000000 --7.500000000 35.500000000 16.500000000 --6.500000000 35.500000000 16.500000000 --5.500000000 35.500000000 16.500000000 --4.500000000 35.500000000 16.500000000 --3.500000000 35.500000000 16.500000000 --2.500000000 35.500000000 16.500000000 --1.500000000 35.500000000 16.500000000 --0.500000000 35.500000000 16.500000000 -0.500000000 35.500000000 16.500000000 -1.500000000 35.500000000 16.500000000 -2.500000000 35.500000000 16.500000000 -3.500000000 35.500000000 16.500000000 -4.500000000 35.500000000 16.500000000 -5.500000000 35.500000000 16.500000000 -6.500000000 35.500000000 16.500000000 -7.500000000 35.500000000 16.500000000 -8.500000000 35.500000000 16.500000000 -9.500000000 35.500000000 16.500000000 -10.500000000 35.500000000 16.500000000 -11.500000000 35.500000000 16.500000000 -12.500000000 35.500000000 16.500000000 -13.500000000 35.500000000 16.500000000 -14.500000000 35.500000000 16.500000000 -15.500000000 35.500000000 16.500000000 -16.500000000 35.500000000 16.500000000 -17.500000000 35.500000000 16.500000000 -18.500000000 35.500000000 16.500000000 -19.500000000 35.500000000 16.500000000 -20.500000000 35.500000000 16.500000000 -21.500000000 35.500000000 16.500000000 -22.500000000 35.500000000 16.500000000 -23.500000000 35.500000000 16.500000000 -24.500000000 35.500000000 16.500000000 -25.499996185 35.499996185 16.500000000 -26.499954224 35.499954224 16.500000000 -27.499591827 35.499591827 16.500000000 -28.497470856 35.497474670 16.500000000 -29.488407135 35.488403320 16.500000000 -30.458978653 35.458980560 16.500000000 -31.384418488 35.384422302 16.500000000 -32.233222961 35.233222961 16.500000000 -32.981101990 34.981101990 16.500000000 --33.981101990 -35.981101990 17.500000000 --33.233226776 -36.233222961 17.500000000 --32.384422302 -36.384418488 17.500000000 --31.458978653 -36.458980560 17.500000000 --30.488407135 -36.488403320 17.500000000 --29.497472763 -36.497474670 17.500000000 --28.499593735 -36.499591827 17.500000000 --27.499954224 -36.499954224 17.500000000 --26.499996185 -36.499996185 17.500000000 --25.500000000 -36.500000000 17.500000000 --24.500000000 -36.500000000 17.500000000 --23.500000000 -36.500000000 17.500000000 --22.500000000 -36.500000000 17.500000000 --21.500000000 -36.500000000 17.500000000 --20.500000000 -36.500000000 17.500000000 --19.500000000 -36.500000000 17.500000000 --18.500000000 -36.500000000 17.500000000 --17.500000000 -36.500000000 17.500000000 --16.500000000 -36.500000000 17.500000000 --15.500000000 -36.500000000 17.500000000 --14.500000000 -36.500000000 17.500000000 --13.500000000 -36.500000000 17.500000000 --12.500000000 -36.500000000 17.500000000 --11.500000000 -36.500000000 17.500000000 --10.500000000 -36.500000000 17.500000000 --9.500000000 -36.500000000 17.500000000 --8.500000000 -36.500000000 17.500000000 --7.500000000 -36.500000000 17.500000000 --6.500000000 -36.500000000 17.500000000 --5.500000000 -36.500000000 17.500000000 --4.500000000 -36.500000000 17.500000000 --3.500000000 -36.500000000 17.500000000 --2.500000000 -36.500000000 17.500000000 --1.500000000 -36.500000000 17.500000000 --0.500000000 -36.500000000 17.500000000 -0.500000000 -36.500000000 17.500000000 -1.500000000 -36.500000000 17.500000000 -2.500000000 -36.500000000 17.500000000 -3.500000000 -36.500000000 17.500000000 -4.500000000 -36.500000000 17.500000000 -5.500000000 -36.500000000 17.500000000 -6.500000000 -36.500000000 17.500000000 -7.500000000 -36.500000000 17.500000000 -8.500000000 -36.500000000 17.500000000 -9.500000000 -36.500000000 17.500000000 -10.500000000 -36.500000000 17.500000000 -11.500000000 -36.500000000 17.500000000 -12.500000000 -36.500000000 17.500000000 -13.500000000 -36.500000000 17.500000000 -14.500000000 -36.500000000 17.500000000 -15.500000000 -36.500000000 17.500000000 -16.500000000 -36.500000000 17.500000000 -17.500000000 -36.500000000 17.500000000 -18.500000000 -36.500000000 17.500000000 -19.500000000 -36.500000000 17.500000000 -20.500000000 -36.500000000 17.500000000 -21.500000000 -36.500000000 17.500000000 -22.500000000 -36.500000000 17.500000000 -23.500000000 -36.500000000 17.500000000 -24.500000000 -36.500000000 17.500000000 -25.499996185 -36.499996185 17.500000000 -26.499954224 -36.499954224 17.500000000 -27.499591827 -36.499591827 17.500000000 -28.497470856 -36.497467041 17.500000000 -29.488407135 -36.488403320 17.500000000 -30.458978653 -36.458980560 17.500000000 -31.384418488 -36.384422302 17.500000000 -32.233222961 -36.233222961 17.500000000 -32.981101990 -35.981101990 17.500000000 --35.167964935 -35.167964935 17.500000000 --34.622871399 -35.622871399 17.500000000 -33.622871399 -35.622871399 17.500000000 -34.167964935 -35.167964935 17.500000000 --35.981101990 -33.981101990 17.500000000 --35.622871399 -34.622871399 17.500000000 -34.622871399 -34.622871399 17.500000000 -34.981101990 -33.981101990 17.500000000 --36.233222961 -33.233222961 17.500000000 -35.233222961 -33.233226776 17.500000000 --36.384418488 -32.384422302 17.500000000 -35.384418488 -32.384422302 17.500000000 --36.458976746 -31.458978653 17.500000000 -35.458980560 -31.458978653 17.500000000 --36.488403320 -30.488407135 17.500000000 -35.488403320 -30.488407135 17.500000000 --36.497467041 -29.497472763 17.500000000 -35.497474670 -29.497472763 17.500000000 --36.499591827 -28.499593735 17.500000000 -35.499591827 -28.499593735 17.500000000 --36.499954224 -27.499954224 17.500000000 -35.499954224 -27.499954224 17.500000000 --36.499996185 -26.499996185 17.500000000 -35.499996185 -26.499996185 17.500000000 --36.500000000 -25.500000000 17.500000000 -35.500000000 -25.500000000 17.500000000 --36.500000000 -24.500000000 17.500000000 -35.500000000 -24.500000000 17.500000000 --36.500000000 -23.500000000 17.500000000 -35.500000000 -23.500000000 17.500000000 --36.500000000 -22.500000000 17.500000000 -35.500000000 -22.500000000 17.500000000 --36.500000000 -21.500000000 17.500000000 -35.500000000 -21.500000000 17.500000000 --36.500000000 -20.500000000 17.500000000 -35.500000000 -20.500000000 17.500000000 --36.500000000 -19.500000000 17.500000000 -35.500000000 -19.500000000 17.500000000 --36.500000000 -18.500000000 17.500000000 -35.500000000 -18.500000000 17.500000000 --36.500000000 -17.500000000 17.500000000 -35.500000000 -17.500000000 17.500000000 --36.500000000 -16.500000000 17.500000000 -35.500000000 -16.500000000 17.500000000 --36.500000000 -15.500000000 17.500000000 -35.500000000 -15.500000000 17.500000000 --36.500000000 -14.500000000 17.500000000 -35.500000000 -14.500000000 17.500000000 --36.500000000 -13.500000000 17.500000000 -35.500000000 -13.500000000 17.500000000 --36.500000000 -12.500000000 17.500000000 -35.500000000 -12.500000000 17.500000000 --36.500000000 -11.500000000 17.500000000 -35.500000000 -11.500000000 17.500000000 --36.500000000 -10.500000000 17.500000000 -35.500000000 -10.500000000 17.500000000 --36.500000000 -9.500000000 17.500000000 -35.500000000 -9.500000000 17.500000000 --36.500000000 -8.500000000 17.500000000 -35.500000000 -8.500000000 17.500000000 --36.500000000 -7.500000000 17.500000000 -35.500000000 -7.500000000 17.500000000 --36.500000000 -6.500000000 17.500000000 -35.500000000 -6.500000000 17.500000000 --36.500000000 -5.500000000 17.500000000 -35.500000000 -5.500000000 17.500000000 --36.500000000 -4.500000000 17.500000000 -35.500000000 -4.500000000 17.500000000 --36.500000000 -3.500000000 17.500000000 -35.500000000 -3.500000000 17.500000000 --36.500000000 -2.500000000 17.500000000 -35.500000000 -2.500000000 17.500000000 --36.500000000 -1.500000000 17.500000000 -35.500000000 -1.500000000 17.500000000 --36.500000000 -0.500000000 17.500000000 -35.500000000 -0.500000000 17.500000000 --36.500000000 0.500000000 17.500000000 -35.500000000 0.500000000 17.500000000 --36.500000000 1.500000000 17.500000000 -35.500000000 1.500000000 17.500000000 --36.500000000 2.500000000 17.500000000 -35.500000000 2.500000000 17.500000000 --36.500000000 3.500000000 17.500000000 -35.500000000 3.500000000 17.500000000 --36.500000000 4.500000000 17.500000000 -35.500000000 4.500000000 17.500000000 --36.500000000 5.500000000 17.500000000 -35.500000000 5.500000000 17.500000000 --36.500000000 6.500000000 17.500000000 -35.500000000 6.500000000 17.500000000 --36.500000000 7.500000000 17.500000000 -35.500000000 7.500000000 17.500000000 --36.500000000 8.500000000 17.500000000 -35.500000000 8.500000000 17.500000000 --36.500000000 9.500000000 17.500000000 -35.500000000 9.500000000 17.500000000 --36.500000000 10.500000000 17.500000000 -35.500000000 10.500000000 17.500000000 --36.500000000 11.500000000 17.500000000 -35.500000000 11.500000000 17.500000000 --36.500000000 12.500000000 17.500000000 -35.500000000 12.500000000 17.500000000 --36.500000000 13.500000000 17.500000000 -35.500000000 13.500000000 17.500000000 --36.500000000 14.500000000 17.500000000 -35.500000000 14.500000000 17.500000000 --36.500000000 15.500000000 17.500000000 -35.500000000 15.500000000 17.500000000 --36.500000000 16.500000000 17.500000000 -35.500000000 16.500000000 17.500000000 --36.500000000 17.500000000 17.500000000 -35.500000000 17.500000000 17.500000000 --36.500000000 18.500000000 17.500000000 -35.500000000 18.500000000 17.500000000 --36.500000000 19.500000000 17.500000000 -35.500000000 19.500000000 17.500000000 --36.500000000 20.500000000 17.500000000 -35.500000000 20.500000000 17.500000000 --36.500000000 21.500000000 17.500000000 -35.500000000 21.500000000 17.500000000 --36.500000000 22.500000000 17.500000000 -35.500000000 22.500000000 17.500000000 --36.500000000 23.500000000 17.500000000 -35.500000000 23.500000000 17.500000000 --36.500000000 24.500000000 17.500000000 -35.500000000 24.500000000 17.500000000 --36.499996185 25.499996185 17.500000000 -35.499996185 25.499996185 17.500000000 --36.499954224 26.499954224 17.500000000 -35.499954224 26.499954224 17.500000000 --36.499591827 27.499591827 17.500000000 -35.499591827 27.499591827 17.500000000 --36.497474670 28.497470856 17.500000000 -35.497467041 28.497470856 17.500000000 --36.488403320 29.488407135 17.500000000 -35.488403320 29.488407135 17.500000000 --36.458980560 30.458978653 17.500000000 -35.458980560 30.458978653 17.500000000 --36.384422302 31.384418488 17.500000000 -35.384422302 31.384418488 17.500000000 --36.233222961 32.233222961 17.500000000 -35.233222961 32.233222961 17.500000000 --35.981101990 32.981101990 17.500000000 --35.622871399 33.622871399 17.500000000 -34.622871399 33.622871399 17.500000000 -34.981101990 32.981101990 17.500000000 --35.167964935 34.167964935 17.500000000 --34.622871399 34.622871399 17.500000000 -33.622871399 34.622871399 17.500000000 -34.167964935 34.167964935 17.500000000 --33.981101990 34.981101990 17.500000000 --33.233222961 35.233222961 17.500000000 --32.384422302 35.384418488 17.500000000 --31.458978653 35.458976746 17.500000000 --30.488407135 35.488403320 17.500000000 --29.497472763 35.497467041 17.500000000 --28.499593735 35.499591827 17.500000000 --27.499954224 35.499954224 17.500000000 --26.499996185 35.499996185 17.500000000 --25.500000000 35.500000000 17.500000000 --24.500000000 35.500000000 17.500000000 --23.500000000 35.500000000 17.500000000 --22.500000000 35.500000000 17.500000000 --21.500000000 35.500000000 17.500000000 --20.500000000 35.500000000 17.500000000 --19.500000000 35.500000000 17.500000000 --18.500000000 35.500000000 17.500000000 --17.500000000 35.500000000 17.500000000 --16.500000000 35.500000000 17.500000000 --15.500000000 35.500000000 17.500000000 --14.500000000 35.500000000 17.500000000 --13.500000000 35.500000000 17.500000000 --12.500000000 35.500000000 17.500000000 --11.500000000 35.500000000 17.500000000 --10.500000000 35.500000000 17.500000000 --9.500000000 35.500000000 17.500000000 --8.500000000 35.500000000 17.500000000 --7.500000000 35.500000000 17.500000000 --6.500000000 35.500000000 17.500000000 --5.500000000 35.500000000 17.500000000 --4.500000000 35.500000000 17.500000000 --3.500000000 35.500000000 17.500000000 --2.500000000 35.500000000 17.500000000 --1.500000000 35.500000000 17.500000000 --0.500000000 35.500000000 17.500000000 -0.500000000 35.500000000 17.500000000 -1.500000000 35.500000000 17.500000000 -2.500000000 35.500000000 17.500000000 -3.500000000 35.500000000 17.500000000 -4.500000000 35.500000000 17.500000000 -5.500000000 35.500000000 17.500000000 -6.500000000 35.500000000 17.500000000 -7.500000000 35.500000000 17.500000000 -8.500000000 35.500000000 17.500000000 -9.500000000 35.500000000 17.500000000 -10.500000000 35.500000000 17.500000000 -11.500000000 35.500000000 17.500000000 -12.500000000 35.500000000 17.500000000 -13.500000000 35.500000000 17.500000000 -14.500000000 35.500000000 17.500000000 -15.500000000 35.500000000 17.500000000 -16.500000000 35.500000000 17.500000000 -17.500000000 35.500000000 17.500000000 -18.500000000 35.500000000 17.500000000 -19.500000000 35.500000000 17.500000000 -20.500000000 35.500000000 17.500000000 -21.500000000 35.500000000 17.500000000 -22.500000000 35.500000000 17.500000000 -23.500000000 35.500000000 17.500000000 -24.500000000 35.500000000 17.500000000 -25.499996185 35.499996185 17.500000000 -26.499954224 35.499954224 17.500000000 -27.499591827 35.499591827 17.500000000 -28.497470856 35.497474670 17.500000000 -29.488407135 35.488403320 17.500000000 -30.458978653 35.458980560 17.500000000 -31.384418488 35.384422302 17.500000000 -32.233222961 35.233222961 17.500000000 -32.981101990 34.981101990 17.500000000 --33.981101990 -35.981101990 18.500000000 --33.233226776 -36.233222961 18.500000000 --32.384422302 -36.384418488 18.500000000 --31.458978653 -36.458980560 18.500000000 --30.488407135 -36.488403320 18.500000000 --29.497472763 -36.497474670 18.500000000 --28.499593735 -36.499591827 18.500000000 --27.499954224 -36.499954224 18.500000000 --26.499996185 -36.499996185 18.500000000 --25.500000000 -36.500000000 18.500000000 --24.500000000 -36.500000000 18.500000000 --23.500000000 -36.500000000 18.500000000 --22.500000000 -36.500000000 18.500000000 --21.500000000 -36.500000000 18.500000000 --20.500000000 -36.500000000 18.500000000 --19.500000000 -36.500000000 18.500000000 --18.500000000 -36.500000000 18.500000000 --17.500000000 -36.500000000 18.500000000 --16.500000000 -36.500000000 18.500000000 --15.500000000 -36.500000000 18.500000000 --14.500000000 -36.500000000 18.500000000 --13.500000000 -36.500000000 18.500000000 --12.500000000 -36.500000000 18.500000000 --11.500000000 -36.500000000 18.500000000 --10.500000000 -36.500000000 18.500000000 --9.500000000 -36.500000000 18.500000000 --8.500000000 -36.500000000 18.500000000 --7.500000000 -36.500000000 18.500000000 --6.500000000 -36.500000000 18.500000000 --5.500000000 -36.500000000 18.500000000 --4.500000000 -36.500000000 18.500000000 --3.500000000 -36.500000000 18.500000000 --2.500000000 -36.500000000 18.500000000 --1.500000000 -36.500000000 18.500000000 --0.500000000 -36.500000000 18.500000000 -0.500000000 -36.500000000 18.500000000 -1.500000000 -36.500000000 18.500000000 -2.500000000 -36.500000000 18.500000000 -3.500000000 -36.500000000 18.500000000 -4.500000000 -36.500000000 18.500000000 -5.500000000 -36.500000000 18.500000000 -6.500000000 -36.500000000 18.500000000 -7.500000000 -36.500000000 18.500000000 -8.500000000 -36.500000000 18.500000000 -9.500000000 -36.500000000 18.500000000 -10.500000000 -36.500000000 18.500000000 -11.500000000 -36.500000000 18.500000000 -12.500000000 -36.500000000 18.500000000 -13.500000000 -36.500000000 18.500000000 -14.500000000 -36.500000000 18.500000000 -15.500000000 -36.500000000 18.500000000 -16.500000000 -36.500000000 18.500000000 -17.500000000 -36.500000000 18.500000000 -18.500000000 -36.500000000 18.500000000 -19.500000000 -36.500000000 18.500000000 -20.500000000 -36.500000000 18.500000000 -21.500000000 -36.500000000 18.500000000 -22.500000000 -36.500000000 18.500000000 -23.500000000 -36.500000000 18.500000000 -24.500000000 -36.500000000 18.500000000 -25.499996185 -36.499996185 18.500000000 -26.499954224 -36.499954224 18.500000000 -27.499591827 -36.499591827 18.500000000 -28.497470856 -36.497467041 18.500000000 -29.488407135 -36.488403320 18.500000000 -30.458978653 -36.458980560 18.500000000 -31.384418488 -36.384422302 18.500000000 -32.233222961 -36.233222961 18.500000000 -32.981101990 -35.981101990 18.500000000 --35.167964935 -35.167964935 18.500000000 --34.622871399 -35.622871399 18.500000000 -33.622871399 -35.622871399 18.500000000 -34.167964935 -35.167964935 18.500000000 --35.981101990 -33.981101990 18.500000000 --35.622871399 -34.622871399 18.500000000 -34.622871399 -34.622871399 18.500000000 -34.981101990 -33.981101990 18.500000000 --36.233222961 -33.233222961 18.500000000 -35.233222961 -33.233226776 18.500000000 --36.384418488 -32.384422302 18.500000000 -35.384418488 -32.384422302 18.500000000 --36.458976746 -31.458978653 18.500000000 -35.458980560 -31.458978653 18.500000000 --36.488403320 -30.488407135 18.500000000 -35.488403320 -30.488407135 18.500000000 --36.497467041 -29.497472763 18.500000000 -35.497474670 -29.497472763 18.500000000 --36.499591827 -28.499593735 18.500000000 -35.499591827 -28.499593735 18.500000000 --36.499954224 -27.499954224 18.500000000 -35.499954224 -27.499954224 18.500000000 --36.499996185 -26.499996185 18.500000000 -35.499996185 -26.499996185 18.500000000 --36.500000000 -25.500000000 18.500000000 -35.500000000 -25.500000000 18.500000000 --36.500000000 -24.500000000 18.500000000 -35.500000000 -24.500000000 18.500000000 --36.500000000 -23.500000000 18.500000000 -35.500000000 -23.500000000 18.500000000 --36.500000000 -22.500000000 18.500000000 -35.500000000 -22.500000000 18.500000000 --36.500000000 -21.500000000 18.500000000 -35.500000000 -21.500000000 18.500000000 --36.500000000 -20.500000000 18.500000000 -35.500000000 -20.500000000 18.500000000 --36.500000000 -19.500000000 18.500000000 -35.500000000 -19.500000000 18.500000000 --36.500000000 -18.500000000 18.500000000 -35.500000000 -18.500000000 18.500000000 --36.500000000 -17.500000000 18.500000000 -35.500000000 -17.500000000 18.500000000 --36.500000000 -16.500000000 18.500000000 -35.500000000 -16.500000000 18.500000000 --36.500000000 -15.500000000 18.500000000 -35.500000000 -15.500000000 18.500000000 --36.500000000 -14.500000000 18.500000000 -35.500000000 -14.500000000 18.500000000 --36.500000000 -13.500000000 18.500000000 -35.500000000 -13.500000000 18.500000000 --36.500000000 -12.500000000 18.500000000 -35.500000000 -12.500000000 18.500000000 --36.500000000 -11.500000000 18.500000000 -35.500000000 -11.500000000 18.500000000 --36.500000000 -10.500000000 18.500000000 -35.500000000 -10.500000000 18.500000000 --36.500000000 -9.500000000 18.500000000 -35.500000000 -9.500000000 18.500000000 --36.500000000 -8.500000000 18.500000000 -35.500000000 -8.500000000 18.500000000 --36.500000000 -7.500000000 18.500000000 -35.500000000 -7.500000000 18.500000000 --36.500000000 -6.500000000 18.500000000 -35.500000000 -6.500000000 18.500000000 --36.500000000 -5.500000000 18.500000000 -35.500000000 -5.500000000 18.500000000 --36.500000000 -4.500000000 18.500000000 -35.500000000 -4.500000000 18.500000000 --36.500000000 -3.500000000 18.500000000 -35.500000000 -3.500000000 18.500000000 --36.500000000 -2.500000000 18.500000000 -35.500000000 -2.500000000 18.500000000 --36.500000000 -1.500000000 18.500000000 -35.500000000 -1.500000000 18.500000000 --36.500000000 -0.500000000 18.500000000 -35.500000000 -0.500000000 18.500000000 --36.500000000 0.500000000 18.500000000 -35.500000000 0.500000000 18.500000000 --36.500000000 1.500000000 18.500000000 -35.500000000 1.500000000 18.500000000 --36.500000000 2.500000000 18.500000000 -35.500000000 2.500000000 18.500000000 --36.500000000 3.500000000 18.500000000 -35.500000000 3.500000000 18.500000000 --36.500000000 4.500000000 18.500000000 -35.500000000 4.500000000 18.500000000 --36.500000000 5.500000000 18.500000000 -35.500000000 5.500000000 18.500000000 --36.500000000 6.500000000 18.500000000 -35.500000000 6.500000000 18.500000000 --36.500000000 7.500000000 18.500000000 -35.500000000 7.500000000 18.500000000 --36.500000000 8.500000000 18.500000000 -35.500000000 8.500000000 18.500000000 --36.500000000 9.500000000 18.500000000 -35.500000000 9.500000000 18.500000000 --36.500000000 10.500000000 18.500000000 -35.500000000 10.500000000 18.500000000 --36.500000000 11.500000000 18.500000000 -35.500000000 11.500000000 18.500000000 --36.500000000 12.500000000 18.500000000 -35.500000000 12.500000000 18.500000000 --36.500000000 13.500000000 18.500000000 -35.500000000 13.500000000 18.500000000 --36.500000000 14.500000000 18.500000000 -35.500000000 14.500000000 18.500000000 --36.500000000 15.500000000 18.500000000 -35.500000000 15.500000000 18.500000000 --36.500000000 16.500000000 18.500000000 -35.500000000 16.500000000 18.500000000 --36.500000000 17.500000000 18.500000000 -35.500000000 17.500000000 18.500000000 --36.500000000 18.500000000 18.500000000 -35.500000000 18.500000000 18.500000000 --36.500000000 19.500000000 18.500000000 -35.500000000 19.500000000 18.500000000 --36.500000000 20.500000000 18.500000000 -35.500000000 20.500000000 18.500000000 --36.500000000 21.500000000 18.500000000 -35.500000000 21.500000000 18.500000000 --36.500000000 22.500000000 18.500000000 -35.500000000 22.500000000 18.500000000 --36.500000000 23.500000000 18.500000000 -35.500000000 23.500000000 18.500000000 --36.500000000 24.500000000 18.500000000 -35.500000000 24.500000000 18.500000000 --36.499996185 25.499996185 18.500000000 -35.499996185 25.499996185 18.500000000 --36.499954224 26.499954224 18.500000000 -35.499954224 26.499954224 18.500000000 --36.499591827 27.499591827 18.500000000 -35.499591827 27.499591827 18.500000000 --36.497474670 28.497470856 18.500000000 -35.497467041 28.497470856 18.500000000 --36.488403320 29.488407135 18.500000000 -35.488403320 29.488407135 18.500000000 --36.458980560 30.458978653 18.500000000 -35.458980560 30.458978653 18.500000000 --36.384422302 31.384418488 18.500000000 -35.384422302 31.384418488 18.500000000 --36.233222961 32.233222961 18.500000000 -35.233222961 32.233222961 18.500000000 --35.981101990 32.981101990 18.500000000 --35.622871399 33.622871399 18.500000000 -34.622871399 33.622871399 18.500000000 -34.981101990 32.981101990 18.500000000 --35.167964935 34.167964935 18.500000000 --34.622871399 34.622871399 18.500000000 -33.622871399 34.622871399 18.500000000 -34.167964935 34.167964935 18.500000000 --33.981101990 34.981101990 18.500000000 --33.233222961 35.233222961 18.500000000 --32.384422302 35.384418488 18.500000000 --31.458978653 35.458976746 18.500000000 --30.488407135 35.488403320 18.500000000 --29.497472763 35.497467041 18.500000000 --28.499593735 35.499591827 18.500000000 --27.499954224 35.499954224 18.500000000 --26.499996185 35.499996185 18.500000000 --25.500000000 35.500000000 18.500000000 --24.500000000 35.500000000 18.500000000 --23.500000000 35.500000000 18.500000000 --22.500000000 35.500000000 18.500000000 --21.500000000 35.500000000 18.500000000 --20.500000000 35.500000000 18.500000000 --19.500000000 35.500000000 18.500000000 --18.500000000 35.500000000 18.500000000 --17.500000000 35.500000000 18.500000000 --16.500000000 35.500000000 18.500000000 --15.500000000 35.500000000 18.500000000 --14.500000000 35.500000000 18.500000000 --13.500000000 35.500000000 18.500000000 --12.500000000 35.500000000 18.500000000 --11.500000000 35.500000000 18.500000000 --10.500000000 35.500000000 18.500000000 --9.500000000 35.500000000 18.500000000 --8.500000000 35.500000000 18.500000000 --7.500000000 35.500000000 18.500000000 --6.500000000 35.500000000 18.500000000 --5.500000000 35.500000000 18.500000000 --4.500000000 35.500000000 18.500000000 --3.500000000 35.500000000 18.500000000 --2.500000000 35.500000000 18.500000000 --1.500000000 35.500000000 18.500000000 --0.500000000 35.500000000 18.500000000 -0.500000000 35.500000000 18.500000000 -1.500000000 35.500000000 18.500000000 -2.500000000 35.500000000 18.500000000 -3.500000000 35.500000000 18.500000000 -4.500000000 35.500000000 18.500000000 -5.500000000 35.500000000 18.500000000 -6.500000000 35.500000000 18.500000000 -7.500000000 35.500000000 18.500000000 -8.500000000 35.500000000 18.500000000 -9.500000000 35.500000000 18.500000000 -10.500000000 35.500000000 18.500000000 -11.500000000 35.500000000 18.500000000 -12.500000000 35.500000000 18.500000000 -13.500000000 35.500000000 18.500000000 -14.500000000 35.500000000 18.500000000 -15.500000000 35.500000000 18.500000000 -16.500000000 35.500000000 18.500000000 -17.500000000 35.500000000 18.500000000 -18.500000000 35.500000000 18.500000000 -19.500000000 35.500000000 18.500000000 -20.500000000 35.500000000 18.500000000 -21.500000000 35.500000000 18.500000000 -22.500000000 35.500000000 18.500000000 -23.500000000 35.500000000 18.500000000 -24.500000000 35.500000000 18.500000000 -25.499996185 35.499996185 18.500000000 -26.499954224 35.499954224 18.500000000 -27.499591827 35.499591827 18.500000000 -28.497470856 35.497474670 18.500000000 -29.488407135 35.488403320 18.500000000 -30.458978653 35.458980560 18.500000000 -31.384418488 35.384422302 18.500000000 -32.233222961 35.233222961 18.500000000 -32.981101990 34.981101990 18.500000000 --33.981101990 -35.981101990 19.500000000 --33.233226776 -36.233222961 19.500000000 --32.384422302 -36.384418488 19.500000000 --31.458978653 -36.458980560 19.500000000 --30.488407135 -36.488403320 19.500000000 --29.497472763 -36.497474670 19.500000000 --28.499593735 -36.499591827 19.500000000 --27.499954224 -36.499954224 19.500000000 --26.499996185 -36.499996185 19.500000000 --25.500000000 -36.500000000 19.500000000 --24.500000000 -36.500000000 19.500000000 --23.500000000 -36.500000000 19.500000000 --22.500000000 -36.500000000 19.500000000 --21.500000000 -36.500000000 19.500000000 --20.500000000 -36.500000000 19.500000000 --19.500000000 -36.500000000 19.500000000 --18.500000000 -36.500000000 19.500000000 --17.500000000 -36.500000000 19.500000000 --16.500000000 -36.500000000 19.500000000 --15.500000000 -36.500000000 19.500000000 --14.500000000 -36.500000000 19.500000000 --13.500000000 -36.500000000 19.500000000 --12.500000000 -36.500000000 19.500000000 --11.500000000 -36.500000000 19.500000000 --10.500000000 -36.500000000 19.500000000 --9.500000000 -36.500000000 19.500000000 --8.500000000 -36.500000000 19.500000000 --7.500000000 -36.500000000 19.500000000 --6.500000000 -36.500000000 19.500000000 --5.500000000 -36.500000000 19.500000000 --4.500000000 -36.500000000 19.500000000 --3.500000000 -36.500000000 19.500000000 --2.500000000 -36.500000000 19.500000000 --1.500000000 -36.500000000 19.500000000 --0.500000000 -36.500000000 19.500000000 -0.500000000 -36.500000000 19.500000000 -1.500000000 -36.500000000 19.500000000 -2.500000000 -36.500000000 19.500000000 -3.500000000 -36.500000000 19.500000000 -4.500000000 -36.500000000 19.500000000 -5.500000000 -36.500000000 19.500000000 -6.500000000 -36.500000000 19.500000000 -7.500000000 -36.500000000 19.500000000 -8.500000000 -36.500000000 19.500000000 -9.500000000 -36.500000000 19.500000000 -10.500000000 -36.500000000 19.500000000 -11.500000000 -36.500000000 19.500000000 -12.500000000 -36.500000000 19.500000000 -13.500000000 -36.500000000 19.500000000 -14.500000000 -36.500000000 19.500000000 -15.500000000 -36.500000000 19.500000000 -16.500000000 -36.500000000 19.500000000 -17.500000000 -36.500000000 19.500000000 -18.500000000 -36.500000000 19.500000000 -19.500000000 -36.500000000 19.500000000 -20.500000000 -36.500000000 19.500000000 -21.500000000 -36.500000000 19.500000000 -22.500000000 -36.500000000 19.500000000 -23.500000000 -36.500000000 19.500000000 -24.500000000 -36.500000000 19.500000000 -25.499996185 -36.499996185 19.500000000 -26.499954224 -36.499954224 19.500000000 -27.499591827 -36.499591827 19.500000000 -28.497470856 -36.497467041 19.500000000 -29.488407135 -36.488403320 19.500000000 -30.458978653 -36.458980560 19.500000000 -31.384418488 -36.384422302 19.500000000 -32.233222961 -36.233222961 19.500000000 -32.981101990 -35.981101990 19.500000000 --35.167964935 -35.167964935 19.500000000 --34.622871399 -35.622871399 19.500000000 -33.622871399 -35.622871399 19.500000000 -34.167964935 -35.167964935 19.500000000 --35.981101990 -33.981101990 19.500000000 --35.622871399 -34.622871399 19.500000000 -34.622871399 -34.622871399 19.500000000 -34.981101990 -33.981101990 19.500000000 --36.233222961 -33.233222961 19.500000000 -35.233222961 -33.233226776 19.500000000 --36.384418488 -32.384422302 19.500000000 -35.384418488 -32.384422302 19.500000000 --36.458976746 -31.458978653 19.500000000 -35.458980560 -31.458978653 19.500000000 --36.488403320 -30.488407135 19.500000000 -35.488403320 -30.488407135 19.500000000 --36.497467041 -29.497472763 19.500000000 -35.497474670 -29.497472763 19.500000000 --36.499591827 -28.499593735 19.500000000 -35.499591827 -28.499593735 19.500000000 --36.499954224 -27.499954224 19.500000000 -35.499954224 -27.499954224 19.500000000 --36.499996185 -26.499996185 19.500000000 -35.499996185 -26.499996185 19.500000000 --36.500000000 -25.500000000 19.500000000 -35.500000000 -25.500000000 19.500000000 --36.500000000 -24.500000000 19.500000000 -35.500000000 -24.500000000 19.500000000 --36.500000000 -23.500000000 19.500000000 -35.500000000 -23.500000000 19.500000000 --36.500000000 -22.500000000 19.500000000 -35.500000000 -22.500000000 19.500000000 --36.500000000 -21.500000000 19.500000000 -35.500000000 -21.500000000 19.500000000 --36.500000000 -20.500000000 19.500000000 -35.500000000 -20.500000000 19.500000000 --36.500000000 -19.500000000 19.500000000 -35.500000000 -19.500000000 19.500000000 --36.500000000 -18.500000000 19.500000000 -35.500000000 -18.500000000 19.500000000 --36.500000000 -17.500000000 19.500000000 -35.500000000 -17.500000000 19.500000000 --36.500000000 -16.500000000 19.500000000 -35.500000000 -16.500000000 19.500000000 --36.500000000 -15.500000000 19.500000000 -35.500000000 -15.500000000 19.500000000 --36.500000000 -14.500000000 19.500000000 -35.500000000 -14.500000000 19.500000000 --36.500000000 -13.500000000 19.500000000 -35.500000000 -13.500000000 19.500000000 --36.500000000 -12.500000000 19.500000000 -35.500000000 -12.500000000 19.500000000 --36.500000000 -11.500000000 19.500000000 -35.500000000 -11.500000000 19.500000000 --36.500000000 -10.500000000 19.500000000 -35.500000000 -10.500000000 19.500000000 --36.500000000 -9.500000000 19.500000000 -35.500000000 -9.500000000 19.500000000 --36.500000000 -8.500000000 19.500000000 -35.500000000 -8.500000000 19.500000000 --36.500000000 -7.500000000 19.500000000 -35.500000000 -7.500000000 19.500000000 --36.500000000 -6.500000000 19.500000000 -35.500000000 -6.500000000 19.500000000 --36.500000000 -5.500000000 19.500000000 -35.500000000 -5.500000000 19.500000000 --36.500000000 -4.500000000 19.500000000 -35.500000000 -4.500000000 19.500000000 --36.500000000 -3.500000000 19.500000000 -35.500000000 -3.500000000 19.500000000 --36.500000000 -2.500000000 19.500000000 -35.500000000 -2.500000000 19.500000000 --36.500000000 -1.500000000 19.500000000 -35.500000000 -1.500000000 19.500000000 --36.500000000 -0.500000000 19.500000000 -35.500000000 -0.500000000 19.500000000 --36.500000000 0.500000000 19.500000000 -35.500000000 0.500000000 19.500000000 --36.500000000 1.500000000 19.500000000 -35.500000000 1.500000000 19.500000000 --36.500000000 2.500000000 19.500000000 -35.500000000 2.500000000 19.500000000 --36.500000000 3.500000000 19.500000000 -35.500000000 3.500000000 19.500000000 --36.500000000 4.500000000 19.500000000 -35.500000000 4.500000000 19.500000000 --36.500000000 5.500000000 19.500000000 -35.500000000 5.500000000 19.500000000 --36.500000000 6.500000000 19.500000000 -35.500000000 6.500000000 19.500000000 --36.500000000 7.500000000 19.500000000 -35.500000000 7.500000000 19.500000000 --36.500000000 8.500000000 19.500000000 -35.500000000 8.500000000 19.500000000 --36.500000000 9.500000000 19.500000000 -35.500000000 9.500000000 19.500000000 --36.500000000 10.500000000 19.500000000 -35.500000000 10.500000000 19.500000000 --36.500000000 11.500000000 19.500000000 -35.500000000 11.500000000 19.500000000 --36.500000000 12.500000000 19.500000000 -35.500000000 12.500000000 19.500000000 --36.500000000 13.500000000 19.500000000 -35.500000000 13.500000000 19.500000000 --36.500000000 14.500000000 19.500000000 -35.500000000 14.500000000 19.500000000 --36.500000000 15.500000000 19.500000000 -35.500000000 15.500000000 19.500000000 --36.500000000 16.500000000 19.500000000 -35.500000000 16.500000000 19.500000000 --36.500000000 17.500000000 19.500000000 -35.500000000 17.500000000 19.500000000 --36.500000000 18.500000000 19.500000000 -35.500000000 18.500000000 19.500000000 --36.500000000 19.500000000 19.500000000 -35.500000000 19.500000000 19.500000000 --36.500000000 20.500000000 19.500000000 -35.500000000 20.500000000 19.500000000 --36.500000000 21.500000000 19.500000000 -35.500000000 21.500000000 19.500000000 --36.500000000 22.500000000 19.500000000 -35.500000000 22.500000000 19.500000000 --36.500000000 23.500000000 19.500000000 -35.500000000 23.500000000 19.500000000 --36.500000000 24.500000000 19.500000000 -35.500000000 24.500000000 19.500000000 --36.499996185 25.499996185 19.500000000 -35.499996185 25.499996185 19.500000000 --36.499954224 26.499954224 19.500000000 -35.499954224 26.499954224 19.500000000 --36.499591827 27.499591827 19.500000000 -35.499591827 27.499591827 19.500000000 --36.497474670 28.497470856 19.500000000 -35.497467041 28.497470856 19.500000000 --36.488403320 29.488407135 19.500000000 -35.488403320 29.488407135 19.500000000 --36.458980560 30.458978653 19.500000000 -35.458980560 30.458978653 19.500000000 --36.384422302 31.384418488 19.500000000 -35.384422302 31.384418488 19.500000000 --36.233222961 32.233222961 19.500000000 -35.233222961 32.233222961 19.500000000 --35.981101990 32.981101990 19.500000000 --35.622871399 33.622871399 19.500000000 -34.622871399 33.622871399 19.500000000 -34.981101990 32.981101990 19.500000000 --35.167964935 34.167964935 19.500000000 --34.622871399 34.622871399 19.500000000 -33.622871399 34.622871399 19.500000000 -34.167964935 34.167964935 19.500000000 --33.981101990 34.981101990 19.500000000 --33.233222961 35.233222961 19.500000000 --32.384422302 35.384418488 19.500000000 --31.458978653 35.458976746 19.500000000 --30.488407135 35.488403320 19.500000000 --29.497472763 35.497467041 19.500000000 --28.499593735 35.499591827 19.500000000 --27.499954224 35.499954224 19.500000000 --26.499996185 35.499996185 19.500000000 --25.500000000 35.500000000 19.500000000 --24.500000000 35.500000000 19.500000000 --23.500000000 35.500000000 19.500000000 --22.500000000 35.500000000 19.500000000 --21.500000000 35.500000000 19.500000000 --20.500000000 35.500000000 19.500000000 --19.500000000 35.500000000 19.500000000 --18.500000000 35.500000000 19.500000000 --17.500000000 35.500000000 19.500000000 --16.500000000 35.500000000 19.500000000 --15.500000000 35.500000000 19.500000000 --14.500000000 35.500000000 19.500000000 --13.500000000 35.500000000 19.500000000 --12.500000000 35.500000000 19.500000000 --11.500000000 35.500000000 19.500000000 --10.500000000 35.500000000 19.500000000 --9.500000000 35.500000000 19.500000000 --8.500000000 35.500000000 19.500000000 --7.500000000 35.500000000 19.500000000 --6.500000000 35.500000000 19.500000000 --5.500000000 35.500000000 19.500000000 --4.500000000 35.500000000 19.500000000 --3.500000000 35.500000000 19.500000000 --2.500000000 35.500000000 19.500000000 --1.500000000 35.500000000 19.500000000 --0.500000000 35.500000000 19.500000000 -0.500000000 35.500000000 19.500000000 -1.500000000 35.500000000 19.500000000 -2.500000000 35.500000000 19.500000000 -3.500000000 35.500000000 19.500000000 -4.500000000 35.500000000 19.500000000 -5.500000000 35.500000000 19.500000000 -6.500000000 35.500000000 19.500000000 -7.500000000 35.500000000 19.500000000 -8.500000000 35.500000000 19.500000000 -9.500000000 35.500000000 19.500000000 -10.500000000 35.500000000 19.500000000 -11.500000000 35.500000000 19.500000000 -12.500000000 35.500000000 19.500000000 -13.500000000 35.500000000 19.500000000 -14.500000000 35.500000000 19.500000000 -15.500000000 35.500000000 19.500000000 -16.500000000 35.500000000 19.500000000 -17.500000000 35.500000000 19.500000000 -18.500000000 35.500000000 19.500000000 -19.500000000 35.500000000 19.500000000 -20.500000000 35.500000000 19.500000000 -21.500000000 35.500000000 19.500000000 -22.500000000 35.500000000 19.500000000 -23.500000000 35.500000000 19.500000000 -24.500000000 35.500000000 19.500000000 -25.499996185 35.499996185 19.500000000 -26.499954224 35.499954224 19.500000000 -27.499591827 35.499591827 19.500000000 -28.497470856 35.497474670 19.500000000 -29.488407135 35.488403320 19.500000000 -30.458978653 35.458980560 19.500000000 -31.384418488 35.384422302 19.500000000 -32.233222961 35.233222961 19.500000000 -32.981101990 34.981101990 19.500000000 --33.981101990 -35.981101990 20.500000000 --33.233226776 -36.233222961 20.500000000 --32.384422302 -36.384418488 20.500000000 --31.458978653 -36.458980560 20.500000000 --30.488407135 -36.488403320 20.500000000 --29.497472763 -36.497474670 20.500000000 --28.499593735 -36.499591827 20.500000000 --27.499954224 -36.499954224 20.500000000 --26.499996185 -36.499996185 20.500000000 --25.500000000 -36.500000000 20.500000000 --24.500000000 -36.500000000 20.500000000 --23.500000000 -36.500000000 20.500000000 --22.500000000 -36.500000000 20.500000000 --21.500000000 -36.500000000 20.500000000 --20.500000000 -36.500000000 20.500000000 --19.500000000 -36.500000000 20.500000000 --18.500000000 -36.500000000 20.500000000 --17.500000000 -36.500000000 20.500000000 --16.500000000 -36.500000000 20.500000000 --15.500000000 -36.500000000 20.500000000 --14.500000000 -36.500000000 20.500000000 --13.500000000 -36.500000000 20.500000000 --12.500000000 -36.500000000 20.500000000 --11.500000000 -36.500000000 20.500000000 --10.500000000 -36.500000000 20.500000000 --9.500000000 -36.500000000 20.500000000 --8.500000000 -36.500000000 20.500000000 --7.500000000 -36.500000000 20.500000000 --6.500000000 -36.500000000 20.500000000 --5.500000000 -36.500000000 20.500000000 --4.500000000 -36.500000000 20.500000000 --3.500000000 -36.500000000 20.500000000 --2.500000000 -36.500000000 20.500000000 --1.500000000 -36.500000000 20.500000000 --0.500000000 -36.500000000 20.500000000 -0.500000000 -36.500000000 20.500000000 -1.500000000 -36.500000000 20.500000000 -2.500000000 -36.500000000 20.500000000 -3.500000000 -36.500000000 20.500000000 -4.500000000 -36.500000000 20.500000000 -5.500000000 -36.500000000 20.500000000 -6.500000000 -36.500000000 20.500000000 -7.500000000 -36.500000000 20.500000000 -8.500000000 -36.500000000 20.500000000 -9.500000000 -36.500000000 20.500000000 -10.500000000 -36.500000000 20.500000000 -11.500000000 -36.500000000 20.500000000 -12.500000000 -36.500000000 20.500000000 -13.500000000 -36.500000000 20.500000000 -14.500000000 -36.500000000 20.500000000 -15.500000000 -36.500000000 20.500000000 -16.500000000 -36.500000000 20.500000000 -17.500000000 -36.500000000 20.500000000 -18.500000000 -36.500000000 20.500000000 -19.500000000 -36.500000000 20.500000000 -20.500000000 -36.500000000 20.500000000 -21.500000000 -36.500000000 20.500000000 -22.500000000 -36.500000000 20.500000000 -23.500000000 -36.500000000 20.500000000 -24.500000000 -36.500000000 20.500000000 -25.499996185 -36.499996185 20.500000000 -26.499954224 -36.499954224 20.500000000 -27.499591827 -36.499591827 20.500000000 -28.497470856 -36.497467041 20.500000000 -29.488407135 -36.488403320 20.500000000 -30.458978653 -36.458980560 20.500000000 -31.384418488 -36.384422302 20.500000000 -32.233222961 -36.233222961 20.500000000 -32.981101990 -35.981101990 20.500000000 --35.167964935 -35.167964935 20.500000000 --34.622871399 -35.622871399 20.500000000 -33.622871399 -35.622871399 20.500000000 -34.167964935 -35.167964935 20.500000000 --35.981101990 -33.981101990 20.500000000 --35.622871399 -34.622871399 20.500000000 -34.622871399 -34.622871399 20.500000000 -34.981101990 -33.981101990 20.500000000 --36.233222961 -33.233222961 20.500000000 -35.233222961 -33.233226776 20.500000000 --36.384418488 -32.384422302 20.500000000 -35.384418488 -32.384422302 20.500000000 --36.458976746 -31.458978653 20.500000000 -35.458980560 -31.458978653 20.500000000 --36.488403320 -30.488407135 20.500000000 -35.488403320 -30.488407135 20.500000000 --36.497467041 -29.497472763 20.500000000 -35.497474670 -29.497472763 20.500000000 --36.499591827 -28.499593735 20.500000000 -35.499591827 -28.499593735 20.500000000 --36.499954224 -27.499954224 20.500000000 -35.499954224 -27.499954224 20.500000000 --36.499996185 -26.499996185 20.500000000 -35.499996185 -26.499996185 20.500000000 --36.500000000 -25.500000000 20.500000000 -35.500000000 -25.500000000 20.500000000 --36.500000000 -24.500000000 20.500000000 -35.500000000 -24.500000000 20.500000000 --36.500000000 -23.500000000 20.500000000 -35.500000000 -23.500000000 20.500000000 --36.500000000 -22.500000000 20.500000000 -35.500000000 -22.500000000 20.500000000 --36.500000000 -21.500000000 20.500000000 -35.500000000 -21.500000000 20.500000000 --36.500000000 -20.500000000 20.500000000 -35.500000000 -20.500000000 20.500000000 --36.500000000 -19.500000000 20.500000000 -35.500000000 -19.500000000 20.500000000 --36.500000000 -18.500000000 20.500000000 -35.500000000 -18.500000000 20.500000000 --36.500000000 -17.500000000 20.500000000 -35.500000000 -17.500000000 20.500000000 --36.500000000 -16.500000000 20.500000000 -35.500000000 -16.500000000 20.500000000 --36.500000000 -15.500000000 20.500000000 -35.500000000 -15.500000000 20.500000000 --36.500000000 -14.500000000 20.500000000 -35.500000000 -14.500000000 20.500000000 --36.500000000 -13.500000000 20.500000000 -35.500000000 -13.500000000 20.500000000 --36.500000000 -12.500000000 20.500000000 -35.500000000 -12.500000000 20.500000000 --36.500000000 -11.500000000 20.500000000 -35.500000000 -11.500000000 20.500000000 --36.500000000 -10.500000000 20.500000000 -35.500000000 -10.500000000 20.500000000 --36.500000000 -9.500000000 20.500000000 -35.500000000 -9.500000000 20.500000000 --36.500000000 -8.500000000 20.500000000 -35.500000000 -8.500000000 20.500000000 --36.500000000 -7.500000000 20.500000000 -35.500000000 -7.500000000 20.500000000 --36.500000000 -6.500000000 20.500000000 -35.500000000 -6.500000000 20.500000000 --36.500000000 -5.500000000 20.500000000 -35.500000000 -5.500000000 20.500000000 --36.500000000 -4.500000000 20.500000000 -35.500000000 -4.500000000 20.500000000 --36.500000000 -3.500000000 20.500000000 -35.500000000 -3.500000000 20.500000000 --36.500000000 -2.500000000 20.500000000 -35.500000000 -2.500000000 20.500000000 --36.500000000 -1.500000000 20.500000000 -35.500000000 -1.500000000 20.500000000 --36.500000000 -0.500000000 20.500000000 -35.500000000 -0.500000000 20.500000000 --36.500000000 0.500000000 20.500000000 -35.500000000 0.500000000 20.500000000 --36.500000000 1.500000000 20.500000000 -35.500000000 1.500000000 20.500000000 --36.500000000 2.500000000 20.500000000 -35.500000000 2.500000000 20.500000000 --36.500000000 3.500000000 20.500000000 -35.500000000 3.500000000 20.500000000 --36.500000000 4.500000000 20.500000000 -35.500000000 4.500000000 20.500000000 --36.500000000 5.500000000 20.500000000 -35.500000000 5.500000000 20.500000000 --36.500000000 6.500000000 20.500000000 -35.500000000 6.500000000 20.500000000 --36.500000000 7.500000000 20.500000000 -35.500000000 7.500000000 20.500000000 --36.500000000 8.500000000 20.500000000 -35.500000000 8.500000000 20.500000000 --36.500000000 9.500000000 20.500000000 -35.500000000 9.500000000 20.500000000 --36.500000000 10.500000000 20.500000000 -35.500000000 10.500000000 20.500000000 --36.500000000 11.500000000 20.500000000 -35.500000000 11.500000000 20.500000000 --36.500000000 12.500000000 20.500000000 -35.500000000 12.500000000 20.500000000 --36.500000000 13.500000000 20.500000000 -35.500000000 13.500000000 20.500000000 --36.500000000 14.500000000 20.500000000 -35.500000000 14.500000000 20.500000000 --36.500000000 15.500000000 20.500000000 -35.500000000 15.500000000 20.500000000 --36.500000000 16.500000000 20.500000000 -35.500000000 16.500000000 20.500000000 --36.500000000 17.500000000 20.500000000 -35.500000000 17.500000000 20.500000000 --36.500000000 18.500000000 20.500000000 -35.500000000 18.500000000 20.500000000 --36.500000000 19.500000000 20.500000000 -35.500000000 19.500000000 20.500000000 --36.500000000 20.500000000 20.500000000 -35.500000000 20.500000000 20.500000000 --36.500000000 21.500000000 20.500000000 -35.500000000 21.500000000 20.500000000 --36.500000000 22.500000000 20.500000000 -35.500000000 22.500000000 20.500000000 --36.500000000 23.500000000 20.500000000 -35.500000000 23.500000000 20.500000000 --36.500000000 24.500000000 20.500000000 -35.500000000 24.500000000 20.500000000 --36.499996185 25.499996185 20.500000000 -35.499996185 25.499996185 20.500000000 --36.499954224 26.499954224 20.500000000 -35.499954224 26.499954224 20.500000000 --36.499591827 27.499591827 20.500000000 -35.499591827 27.499591827 20.500000000 --36.497474670 28.497470856 20.500000000 -35.497467041 28.497470856 20.500000000 --36.488403320 29.488407135 20.500000000 -35.488403320 29.488407135 20.500000000 --36.458980560 30.458978653 20.500000000 -35.458980560 30.458978653 20.500000000 --36.384422302 31.384418488 20.500000000 -35.384422302 31.384418488 20.500000000 --36.233222961 32.233222961 20.500000000 -35.233222961 32.233222961 20.500000000 --35.981101990 32.981101990 20.500000000 --35.622871399 33.622871399 20.500000000 -34.622871399 33.622871399 20.500000000 -34.981101990 32.981101990 20.500000000 --35.167964935 34.167964935 20.500000000 --34.622871399 34.622871399 20.500000000 -33.622871399 34.622871399 20.500000000 -34.167964935 34.167964935 20.500000000 --33.981101990 34.981101990 20.500000000 --33.233222961 35.233222961 20.500000000 --32.384422302 35.384418488 20.500000000 --31.458978653 35.458976746 20.500000000 --30.488407135 35.488403320 20.500000000 --29.497472763 35.497467041 20.500000000 --28.499593735 35.499591827 20.500000000 --27.499954224 35.499954224 20.500000000 --26.499996185 35.499996185 20.500000000 --25.500000000 35.500000000 20.500000000 --24.500000000 35.500000000 20.500000000 --23.500000000 35.500000000 20.500000000 --22.500000000 35.500000000 20.500000000 --21.500000000 35.500000000 20.500000000 --20.500000000 35.500000000 20.500000000 --19.500000000 35.500000000 20.500000000 --18.500000000 35.500000000 20.500000000 --17.500000000 35.500000000 20.500000000 --16.500000000 35.500000000 20.500000000 --15.500000000 35.500000000 20.500000000 --14.500000000 35.500000000 20.500000000 --13.500000000 35.500000000 20.500000000 --12.500000000 35.500000000 20.500000000 --11.500000000 35.500000000 20.500000000 --10.500000000 35.500000000 20.500000000 --9.500000000 35.500000000 20.500000000 --8.500000000 35.500000000 20.500000000 --7.500000000 35.500000000 20.500000000 --6.500000000 35.500000000 20.500000000 --5.500000000 35.500000000 20.500000000 --4.500000000 35.500000000 20.500000000 --3.500000000 35.500000000 20.500000000 --2.500000000 35.500000000 20.500000000 --1.500000000 35.500000000 20.500000000 --0.500000000 35.500000000 20.500000000 -0.500000000 35.500000000 20.500000000 -1.500000000 35.500000000 20.500000000 -2.500000000 35.500000000 20.500000000 -3.500000000 35.500000000 20.500000000 -4.500000000 35.500000000 20.500000000 -5.500000000 35.500000000 20.500000000 -6.500000000 35.500000000 20.500000000 -7.500000000 35.500000000 20.500000000 -8.500000000 35.500000000 20.500000000 -9.500000000 35.500000000 20.500000000 -10.500000000 35.500000000 20.500000000 -11.500000000 35.500000000 20.500000000 -12.500000000 35.500000000 20.500000000 -13.500000000 35.500000000 20.500000000 -14.500000000 35.500000000 20.500000000 -15.500000000 35.500000000 20.500000000 -16.500000000 35.500000000 20.500000000 -17.500000000 35.500000000 20.500000000 -18.500000000 35.500000000 20.500000000 -19.500000000 35.500000000 20.500000000 -20.500000000 35.500000000 20.500000000 -21.500000000 35.500000000 20.500000000 -22.500000000 35.500000000 20.500000000 -23.500000000 35.500000000 20.500000000 -24.500000000 35.500000000 20.500000000 -25.499996185 35.499996185 20.500000000 -26.499954224 35.499954224 20.500000000 -27.499591827 35.499591827 20.500000000 -28.497470856 35.497474670 20.500000000 -29.488407135 35.488403320 20.500000000 -30.458978653 35.458980560 20.500000000 -31.384418488 35.384422302 20.500000000 -32.233222961 35.233222961 20.500000000 -32.981101990 34.981101990 20.500000000 --33.981101990 -35.981101990 21.500000000 --33.233226776 -36.233222961 21.500000000 --32.384422302 -36.384418488 21.500000000 --31.458978653 -36.458980560 21.500000000 --30.488407135 -36.488403320 21.500000000 --29.497472763 -36.497474670 21.500000000 --28.499593735 -36.499591827 21.500000000 --27.499954224 -36.499954224 21.500000000 --26.499996185 -36.499996185 21.500000000 --25.500000000 -36.500000000 21.500000000 --24.500000000 -36.500000000 21.500000000 --23.500000000 -36.500000000 21.500000000 --22.500000000 -36.500000000 21.500000000 --21.500000000 -36.500000000 21.500000000 --20.500000000 -36.500000000 21.500000000 --19.500000000 -36.500000000 21.500000000 --18.500000000 -36.500000000 21.500000000 --17.500000000 -36.500000000 21.500000000 --16.500000000 -36.500000000 21.500000000 --15.500000000 -36.500000000 21.500000000 --14.500000000 -36.500000000 21.500000000 --13.500000000 -36.500000000 21.500000000 --12.500000000 -36.500000000 21.500000000 --11.500000000 -36.500000000 21.500000000 --10.500000000 -36.500000000 21.500000000 --9.500000000 -36.500000000 21.500000000 --8.500000000 -36.500000000 21.500000000 --7.500000000 -36.500000000 21.500000000 --6.500000000 -36.500000000 21.500000000 --5.500000000 -36.500000000 21.500000000 --4.500000000 -36.500000000 21.500000000 --3.500000000 -36.500000000 21.500000000 --2.500000000 -36.500000000 21.500000000 --1.500000000 -36.500000000 21.500000000 --0.500000000 -36.500000000 21.500000000 -0.500000000 -36.500000000 21.500000000 -1.500000000 -36.500000000 21.500000000 -2.500000000 -36.500000000 21.500000000 -3.500000000 -36.500000000 21.500000000 -4.500000000 -36.500000000 21.500000000 -5.500000000 -36.500000000 21.500000000 -6.500000000 -36.500000000 21.500000000 -7.500000000 -36.500000000 21.500000000 -8.500000000 -36.500000000 21.500000000 -9.500000000 -36.500000000 21.500000000 -10.500000000 -36.500000000 21.500000000 -11.500000000 -36.500000000 21.500000000 -12.500000000 -36.500000000 21.500000000 -13.500000000 -36.500000000 21.500000000 -14.500000000 -36.500000000 21.500000000 -15.500000000 -36.500000000 21.500000000 -16.500000000 -36.500000000 21.500000000 -17.500000000 -36.500000000 21.500000000 -18.500000000 -36.500000000 21.500000000 -19.500000000 -36.500000000 21.500000000 -20.500000000 -36.500000000 21.500000000 -21.500000000 -36.500000000 21.500000000 -22.500000000 -36.500000000 21.500000000 -23.500000000 -36.500000000 21.500000000 -24.500000000 -36.500000000 21.500000000 -25.499996185 -36.499996185 21.500000000 -26.499954224 -36.499954224 21.500000000 -27.499591827 -36.499591827 21.500000000 -28.497470856 -36.497467041 21.500000000 -29.488407135 -36.488403320 21.500000000 -30.458978653 -36.458980560 21.500000000 -31.384418488 -36.384422302 21.500000000 -32.233222961 -36.233222961 21.500000000 -32.981101990 -35.981101990 21.500000000 --35.167964935 -35.167964935 21.500000000 --34.622871399 -35.622871399 21.500000000 -33.622871399 -35.622871399 21.500000000 -34.167964935 -35.167964935 21.500000000 --35.981101990 -33.981101990 21.500000000 --35.622871399 -34.622871399 21.500000000 -34.622871399 -34.622871399 21.500000000 -34.981101990 -33.981101990 21.500000000 --36.233222961 -33.233222961 21.500000000 -35.233222961 -33.233226776 21.500000000 --36.384418488 -32.384422302 21.500000000 -35.384418488 -32.384422302 21.500000000 --36.458976746 -31.458978653 21.500000000 -35.458980560 -31.458978653 21.500000000 --36.488403320 -30.488407135 21.500000000 -35.488403320 -30.488407135 21.500000000 --36.497467041 -29.497472763 21.500000000 -35.497474670 -29.497472763 21.500000000 --36.499591827 -28.499593735 21.500000000 -35.499591827 -28.499593735 21.500000000 --36.499954224 -27.499954224 21.500000000 -35.499954224 -27.499954224 21.500000000 --36.499996185 -26.499996185 21.500000000 -35.499996185 -26.499996185 21.500000000 --36.500000000 -25.500000000 21.500000000 -35.500000000 -25.500000000 21.500000000 --36.500000000 -24.500000000 21.500000000 -35.500000000 -24.500000000 21.500000000 --36.500000000 -23.500000000 21.500000000 -35.500000000 -23.500000000 21.500000000 --36.500000000 -22.500000000 21.500000000 -35.500000000 -22.500000000 21.500000000 --36.500000000 -21.500000000 21.500000000 -35.500000000 -21.500000000 21.500000000 --36.500000000 -20.500000000 21.500000000 -35.500000000 -20.500000000 21.500000000 --36.500000000 -19.500000000 21.500000000 -35.500000000 -19.500000000 21.500000000 --36.500000000 -18.500000000 21.500000000 -35.500000000 -18.500000000 21.500000000 --36.500000000 -17.500000000 21.500000000 -35.500000000 -17.500000000 21.500000000 --36.500000000 -16.500000000 21.500000000 -35.500000000 -16.500000000 21.500000000 --36.500000000 -15.500000000 21.500000000 -35.500000000 -15.500000000 21.500000000 --36.500000000 -14.500000000 21.500000000 -35.500000000 -14.500000000 21.500000000 --36.500000000 -13.500000000 21.500000000 -35.500000000 -13.500000000 21.500000000 --36.500000000 -12.500000000 21.500000000 -35.500000000 -12.500000000 21.500000000 --36.500000000 -11.500000000 21.500000000 -35.500000000 -11.500000000 21.500000000 --36.500000000 -10.500000000 21.500000000 -35.500000000 -10.500000000 21.500000000 --36.500000000 -9.500000000 21.500000000 -35.500000000 -9.500000000 21.500000000 --36.500000000 -8.500000000 21.500000000 -35.500000000 -8.500000000 21.500000000 --36.500000000 -7.500000000 21.500000000 -35.500000000 -7.500000000 21.500000000 --36.500000000 -6.500000000 21.500000000 -35.500000000 -6.500000000 21.500000000 --36.500000000 -5.500000000 21.500000000 -35.500000000 -5.500000000 21.500000000 --36.500000000 -4.500000000 21.500000000 -35.500000000 -4.500000000 21.500000000 --36.500000000 -3.500000000 21.500000000 -35.500000000 -3.500000000 21.500000000 --36.500000000 -2.500000000 21.500000000 -35.500000000 -2.500000000 21.500000000 --36.500000000 -1.500000000 21.500000000 -35.500000000 -1.500000000 21.500000000 --36.500000000 -0.500000000 21.500000000 -35.500000000 -0.500000000 21.500000000 --36.500000000 0.500000000 21.500000000 -35.500000000 0.500000000 21.500000000 --36.500000000 1.500000000 21.500000000 -35.500000000 1.500000000 21.500000000 --36.500000000 2.500000000 21.500000000 -35.500000000 2.500000000 21.500000000 --36.500000000 3.500000000 21.500000000 -35.500000000 3.500000000 21.500000000 --36.500000000 4.500000000 21.500000000 -35.500000000 4.500000000 21.500000000 --36.500000000 5.500000000 21.500000000 -35.500000000 5.500000000 21.500000000 --36.500000000 6.500000000 21.500000000 -35.500000000 6.500000000 21.500000000 --36.500000000 7.500000000 21.500000000 -35.500000000 7.500000000 21.500000000 --36.500000000 8.500000000 21.500000000 -35.500000000 8.500000000 21.500000000 --36.500000000 9.500000000 21.500000000 -35.500000000 9.500000000 21.500000000 --36.500000000 10.500000000 21.500000000 -35.500000000 10.500000000 21.500000000 --36.500000000 11.500000000 21.500000000 -35.500000000 11.500000000 21.500000000 --36.500000000 12.500000000 21.500000000 -35.500000000 12.500000000 21.500000000 --36.500000000 13.500000000 21.500000000 -35.500000000 13.500000000 21.500000000 --36.500000000 14.500000000 21.500000000 -35.500000000 14.500000000 21.500000000 --36.500000000 15.500000000 21.500000000 -35.500000000 15.500000000 21.500000000 --36.500000000 16.500000000 21.500000000 -35.500000000 16.500000000 21.500000000 --36.500000000 17.500000000 21.500000000 -35.500000000 17.500000000 21.500000000 --36.500000000 18.500000000 21.500000000 -35.500000000 18.500000000 21.500000000 --36.500000000 19.500000000 21.500000000 -35.500000000 19.500000000 21.500000000 --36.500000000 20.500000000 21.500000000 -35.500000000 20.500000000 21.500000000 --36.500000000 21.500000000 21.500000000 -35.500000000 21.500000000 21.500000000 --36.500000000 22.500000000 21.500000000 -35.500000000 22.500000000 21.500000000 --36.500000000 23.500000000 21.500000000 -35.500000000 23.500000000 21.500000000 --36.500000000 24.500000000 21.500000000 -35.500000000 24.500000000 21.500000000 --36.499996185 25.499996185 21.500000000 -35.499996185 25.499996185 21.500000000 --36.499954224 26.499954224 21.500000000 -35.499954224 26.499954224 21.500000000 --36.499591827 27.499591827 21.500000000 -35.499591827 27.499591827 21.500000000 --36.497474670 28.497470856 21.500000000 -35.497467041 28.497470856 21.500000000 --36.488403320 29.488407135 21.500000000 -35.488403320 29.488407135 21.500000000 --36.458980560 30.458978653 21.500000000 -35.458980560 30.458978653 21.500000000 --36.384422302 31.384418488 21.500000000 -35.384422302 31.384418488 21.500000000 --36.233222961 32.233222961 21.500000000 -35.233222961 32.233222961 21.500000000 --35.981101990 32.981101990 21.500000000 --35.622871399 33.622871399 21.500000000 -34.622871399 33.622871399 21.500000000 -34.981101990 32.981101990 21.500000000 --35.167964935 34.167964935 21.500000000 --34.622871399 34.622871399 21.500000000 -33.622871399 34.622871399 21.500000000 -34.167964935 34.167964935 21.500000000 --33.981101990 34.981101990 21.500000000 --33.233222961 35.233222961 21.500000000 --32.384422302 35.384418488 21.500000000 --31.458978653 35.458976746 21.500000000 --30.488407135 35.488403320 21.500000000 --29.497472763 35.497467041 21.500000000 --28.499593735 35.499591827 21.500000000 --27.499954224 35.499954224 21.500000000 --26.499996185 35.499996185 21.500000000 --25.500000000 35.500000000 21.500000000 --24.500000000 35.500000000 21.500000000 --23.500000000 35.500000000 21.500000000 --22.500000000 35.500000000 21.500000000 --21.500000000 35.500000000 21.500000000 --20.500000000 35.500000000 21.500000000 --19.500000000 35.500000000 21.500000000 --18.500000000 35.500000000 21.500000000 --17.500000000 35.500000000 21.500000000 --16.500000000 35.500000000 21.500000000 --15.500000000 35.500000000 21.500000000 --14.500000000 35.500000000 21.500000000 --13.500000000 35.500000000 21.500000000 --12.500000000 35.500000000 21.500000000 --11.500000000 35.500000000 21.500000000 --10.500000000 35.500000000 21.500000000 --9.500000000 35.500000000 21.500000000 --8.500000000 35.500000000 21.500000000 --7.500000000 35.500000000 21.500000000 --6.500000000 35.500000000 21.500000000 --5.500000000 35.500000000 21.500000000 --4.500000000 35.500000000 21.500000000 --3.500000000 35.500000000 21.500000000 --2.500000000 35.500000000 21.500000000 --1.500000000 35.500000000 21.500000000 --0.500000000 35.500000000 21.500000000 -0.500000000 35.500000000 21.500000000 -1.500000000 35.500000000 21.500000000 -2.500000000 35.500000000 21.500000000 -3.500000000 35.500000000 21.500000000 -4.500000000 35.500000000 21.500000000 -5.500000000 35.500000000 21.500000000 -6.500000000 35.500000000 21.500000000 -7.500000000 35.500000000 21.500000000 -8.500000000 35.500000000 21.500000000 -9.500000000 35.500000000 21.500000000 -10.500000000 35.500000000 21.500000000 -11.500000000 35.500000000 21.500000000 -12.500000000 35.500000000 21.500000000 -13.500000000 35.500000000 21.500000000 -14.500000000 35.500000000 21.500000000 -15.500000000 35.500000000 21.500000000 -16.500000000 35.500000000 21.500000000 -17.500000000 35.500000000 21.500000000 -18.500000000 35.500000000 21.500000000 -19.500000000 35.500000000 21.500000000 -20.500000000 35.500000000 21.500000000 -21.500000000 35.500000000 21.500000000 -22.500000000 35.500000000 21.500000000 -23.500000000 35.500000000 21.500000000 -24.500000000 35.500000000 21.500000000 -25.499996185 35.499996185 21.500000000 -26.499954224 35.499954224 21.500000000 -27.499591827 35.499591827 21.500000000 -28.497470856 35.497474670 21.500000000 -29.488407135 35.488403320 21.500000000 -30.458978653 35.458980560 21.500000000 -31.384418488 35.384422302 21.500000000 -32.233222961 35.233222961 21.500000000 -32.981101990 34.981101990 21.500000000 --33.981101990 -35.981101990 22.500000000 --33.233226776 -36.233222961 22.500000000 --32.384422302 -36.384418488 22.500000000 --31.458978653 -36.458980560 22.500000000 --30.488407135 -36.488403320 22.500000000 --29.497472763 -36.497474670 22.500000000 --28.499593735 -36.499591827 22.500000000 --27.499954224 -36.499954224 22.500000000 --26.499996185 -36.499996185 22.500000000 --25.500000000 -36.500000000 22.500000000 --24.500000000 -36.500000000 22.500000000 --23.500000000 -36.500000000 22.500000000 --22.500000000 -36.500000000 22.500000000 --21.500000000 -36.500000000 22.500000000 --20.500000000 -36.500000000 22.500000000 --19.500000000 -36.500000000 22.500000000 --18.500000000 -36.500000000 22.500000000 --17.500000000 -36.500000000 22.500000000 --16.500000000 -36.500000000 22.500000000 --15.500000000 -36.500000000 22.500000000 --14.500000000 -36.500000000 22.500000000 --13.500000000 -36.500000000 22.500000000 --12.500000000 -36.500000000 22.500000000 --11.500000000 -36.500000000 22.500000000 --10.500000000 -36.500000000 22.500000000 --9.500000000 -36.500000000 22.500000000 --8.500000000 -36.500000000 22.500000000 --7.500000000 -36.500000000 22.500000000 --6.500000000 -36.500000000 22.500000000 --5.500000000 -36.500000000 22.500000000 --4.500000000 -36.500000000 22.500000000 --3.500000000 -36.500000000 22.500000000 --2.500000000 -36.500000000 22.500000000 --1.500000000 -36.500000000 22.500000000 --0.500000000 -36.500000000 22.500000000 -0.500000000 -36.500000000 22.500000000 -1.500000000 -36.500000000 22.500000000 -2.500000000 -36.500000000 22.500000000 -3.500000000 -36.500000000 22.500000000 -4.500000000 -36.500000000 22.500000000 -5.500000000 -36.500000000 22.500000000 -6.500000000 -36.500000000 22.500000000 -7.500000000 -36.500000000 22.500000000 -8.500000000 -36.500000000 22.500000000 -9.500000000 -36.500000000 22.500000000 -10.500000000 -36.500000000 22.500000000 -11.500000000 -36.500000000 22.500000000 -12.500000000 -36.500000000 22.500000000 -13.500000000 -36.500000000 22.500000000 -14.500000000 -36.500000000 22.500000000 -15.500000000 -36.500000000 22.500000000 -16.500000000 -36.500000000 22.500000000 -17.500000000 -36.500000000 22.500000000 -18.500000000 -36.500000000 22.500000000 -19.500000000 -36.500000000 22.500000000 -20.500000000 -36.500000000 22.500000000 -21.500000000 -36.500000000 22.500000000 -22.500000000 -36.500000000 22.500000000 -23.500000000 -36.500000000 22.500000000 -24.500000000 -36.500000000 22.500000000 -25.499996185 -36.499996185 22.500000000 -26.499954224 -36.499954224 22.500000000 -27.499591827 -36.499591827 22.500000000 -28.497470856 -36.497467041 22.500000000 -29.488407135 -36.488403320 22.500000000 -30.458978653 -36.458980560 22.500000000 -31.384418488 -36.384422302 22.500000000 -32.233222961 -36.233222961 22.500000000 -32.981101990 -35.981101990 22.500000000 --35.167964935 -35.167964935 22.500000000 --34.622871399 -35.622871399 22.500000000 -33.622871399 -35.622871399 22.500000000 -34.167964935 -35.167964935 22.500000000 --35.981101990 -33.981101990 22.500000000 --35.622871399 -34.622871399 22.500000000 -34.622871399 -34.622871399 22.500000000 -34.981101990 -33.981101990 22.500000000 --36.233222961 -33.233222961 22.500000000 -35.233222961 -33.233226776 22.500000000 --36.384418488 -32.384422302 22.500000000 -35.384418488 -32.384422302 22.500000000 --36.458976746 -31.458978653 22.500000000 -35.458980560 -31.458978653 22.500000000 --36.488403320 -30.488407135 22.500000000 -35.488403320 -30.488407135 22.500000000 --36.497467041 -29.497472763 22.500000000 -35.497474670 -29.497472763 22.500000000 --36.499591827 -28.499593735 22.500000000 -35.499591827 -28.499593735 22.500000000 --36.499954224 -27.499954224 22.500000000 -35.499954224 -27.499954224 22.500000000 --36.499996185 -26.499996185 22.500000000 -35.499996185 -26.499996185 22.500000000 --36.500000000 -25.500000000 22.500000000 -35.500000000 -25.500000000 22.500000000 --36.500000000 -24.500000000 22.500000000 -35.500000000 -24.500000000 22.500000000 --36.500000000 -23.500000000 22.500000000 -35.500000000 -23.500000000 22.500000000 --36.500000000 -22.500000000 22.500000000 -35.500000000 -22.500000000 22.500000000 --36.500000000 -21.500000000 22.500000000 -35.500000000 -21.500000000 22.500000000 --36.500000000 -20.500000000 22.500000000 -35.500000000 -20.500000000 22.500000000 --36.500000000 -19.500000000 22.500000000 -35.500000000 -19.500000000 22.500000000 --36.500000000 -18.500000000 22.500000000 -35.500000000 -18.500000000 22.500000000 --36.500000000 -17.500000000 22.500000000 -35.500000000 -17.500000000 22.500000000 --36.500000000 -16.500000000 22.500000000 -35.500000000 -16.500000000 22.500000000 --36.500000000 -15.500000000 22.500000000 -35.500000000 -15.500000000 22.500000000 --36.500000000 -14.500000000 22.500000000 -35.500000000 -14.500000000 22.500000000 --36.500000000 -13.500000000 22.500000000 -35.500000000 -13.500000000 22.500000000 --36.500000000 -12.500000000 22.500000000 -35.500000000 -12.500000000 22.500000000 --36.500000000 -11.500000000 22.500000000 -35.500000000 -11.500000000 22.500000000 --36.500000000 -10.500000000 22.500000000 -35.500000000 -10.500000000 22.500000000 --36.500000000 -9.500000000 22.500000000 -35.500000000 -9.500000000 22.500000000 --36.500000000 -8.500000000 22.500000000 -35.500000000 -8.500000000 22.500000000 --36.500000000 -7.500000000 22.500000000 -35.500000000 -7.500000000 22.500000000 --36.500000000 -6.500000000 22.500000000 -35.500000000 -6.500000000 22.500000000 --36.500000000 -5.500000000 22.500000000 -35.500000000 -5.500000000 22.500000000 --36.500000000 -4.500000000 22.500000000 -35.500000000 -4.500000000 22.500000000 --36.500000000 -3.500000000 22.500000000 -35.500000000 -3.500000000 22.500000000 --36.500000000 -2.500000000 22.500000000 -35.500000000 -2.500000000 22.500000000 --36.500000000 -1.500000000 22.500000000 -35.500000000 -1.500000000 22.500000000 --36.500000000 -0.500000000 22.500000000 -35.500000000 -0.500000000 22.500000000 --36.500000000 0.500000000 22.500000000 -35.500000000 0.500000000 22.500000000 --36.500000000 1.500000000 22.500000000 -35.500000000 1.500000000 22.500000000 --36.500000000 2.500000000 22.500000000 -35.500000000 2.500000000 22.500000000 --36.500000000 3.500000000 22.500000000 -35.500000000 3.500000000 22.500000000 --36.500000000 4.500000000 22.500000000 -35.500000000 4.500000000 22.500000000 --36.500000000 5.500000000 22.500000000 -35.500000000 5.500000000 22.500000000 --36.500000000 6.500000000 22.500000000 -35.500000000 6.500000000 22.500000000 --36.500000000 7.500000000 22.500000000 -35.500000000 7.500000000 22.500000000 --36.500000000 8.500000000 22.500000000 -35.500000000 8.500000000 22.500000000 --36.500000000 9.500000000 22.500000000 -35.500000000 9.500000000 22.500000000 --36.500000000 10.500000000 22.500000000 -35.500000000 10.500000000 22.500000000 --36.500000000 11.500000000 22.500000000 -35.500000000 11.500000000 22.500000000 --36.500000000 12.500000000 22.500000000 -35.500000000 12.500000000 22.500000000 --36.500000000 13.500000000 22.500000000 -35.500000000 13.500000000 22.500000000 --36.500000000 14.500000000 22.500000000 -35.500000000 14.500000000 22.500000000 --36.500000000 15.500000000 22.500000000 -35.500000000 15.500000000 22.500000000 --36.500000000 16.500000000 22.500000000 -35.500000000 16.500000000 22.500000000 --36.500000000 17.500000000 22.500000000 -35.500000000 17.500000000 22.500000000 --36.500000000 18.500000000 22.500000000 -35.500000000 18.500000000 22.500000000 --36.500000000 19.500000000 22.500000000 -35.500000000 19.500000000 22.500000000 --36.500000000 20.500000000 22.500000000 -35.500000000 20.500000000 22.500000000 --36.500000000 21.500000000 22.500000000 -35.500000000 21.500000000 22.500000000 --36.500000000 22.500000000 22.500000000 -35.500000000 22.500000000 22.500000000 --36.500000000 23.500000000 22.500000000 -35.500000000 23.500000000 22.500000000 --36.500000000 24.500000000 22.500000000 -35.500000000 24.500000000 22.500000000 --36.499996185 25.499996185 22.500000000 -35.499996185 25.499996185 22.500000000 --36.499954224 26.499954224 22.500000000 -35.499954224 26.499954224 22.500000000 --36.499591827 27.499591827 22.500000000 -35.499591827 27.499591827 22.500000000 --36.497474670 28.497470856 22.500000000 -35.497467041 28.497470856 22.500000000 --36.488403320 29.488407135 22.500000000 -35.488403320 29.488407135 22.500000000 --36.458980560 30.458978653 22.500000000 -35.458980560 30.458978653 22.500000000 --36.384422302 31.384418488 22.500000000 -35.384422302 31.384418488 22.500000000 --36.233222961 32.233222961 22.500000000 -35.233222961 32.233222961 22.500000000 --35.981101990 32.981101990 22.500000000 --35.622871399 33.622871399 22.500000000 -34.622871399 33.622871399 22.500000000 -34.981101990 32.981101990 22.500000000 --35.167964935 34.167964935 22.500000000 --34.622871399 34.622871399 22.500000000 -33.622871399 34.622871399 22.500000000 -34.167964935 34.167964935 22.500000000 --33.981101990 34.981101990 22.500000000 --33.233222961 35.233222961 22.500000000 --32.384422302 35.384418488 22.500000000 --31.458978653 35.458976746 22.500000000 --30.488407135 35.488403320 22.500000000 --29.497472763 35.497467041 22.500000000 --28.499593735 35.499591827 22.500000000 --27.499954224 35.499954224 22.500000000 --26.499996185 35.499996185 22.500000000 --25.500000000 35.500000000 22.500000000 --24.500000000 35.500000000 22.500000000 --23.500000000 35.500000000 22.500000000 --22.500000000 35.500000000 22.500000000 --21.500000000 35.500000000 22.500000000 --20.500000000 35.500000000 22.500000000 --19.500000000 35.500000000 22.500000000 --18.500000000 35.500000000 22.500000000 --17.500000000 35.500000000 22.500000000 --16.500000000 35.500000000 22.500000000 --15.500000000 35.500000000 22.500000000 --14.500000000 35.500000000 22.500000000 --13.500000000 35.500000000 22.500000000 --12.500000000 35.500000000 22.500000000 --11.500000000 35.500000000 22.500000000 --10.500000000 35.500000000 22.500000000 --9.500000000 35.500000000 22.500000000 --8.500000000 35.500000000 22.500000000 --7.500000000 35.500000000 22.500000000 --6.500000000 35.500000000 22.500000000 --5.500000000 35.500000000 22.500000000 --4.500000000 35.500000000 22.500000000 --3.500000000 35.500000000 22.500000000 --2.500000000 35.500000000 22.500000000 --1.500000000 35.500000000 22.500000000 --0.500000000 35.500000000 22.500000000 -0.500000000 35.500000000 22.500000000 -1.500000000 35.500000000 22.500000000 -2.500000000 35.500000000 22.500000000 -3.500000000 35.500000000 22.500000000 -4.500000000 35.500000000 22.500000000 -5.500000000 35.500000000 22.500000000 -6.500000000 35.500000000 22.500000000 -7.500000000 35.500000000 22.500000000 -8.500000000 35.500000000 22.500000000 -9.500000000 35.500000000 22.500000000 -10.500000000 35.500000000 22.500000000 -11.500000000 35.500000000 22.500000000 -12.500000000 35.500000000 22.500000000 -13.500000000 35.500000000 22.500000000 -14.500000000 35.500000000 22.500000000 -15.500000000 35.500000000 22.500000000 -16.500000000 35.500000000 22.500000000 -17.500000000 35.500000000 22.500000000 -18.500000000 35.500000000 22.500000000 -19.500000000 35.500000000 22.500000000 -20.500000000 35.500000000 22.500000000 -21.500000000 35.500000000 22.500000000 -22.500000000 35.500000000 22.500000000 -23.500000000 35.500000000 22.500000000 -24.500000000 35.500000000 22.500000000 -25.499996185 35.499996185 22.500000000 -26.499954224 35.499954224 22.500000000 -27.499591827 35.499591827 22.500000000 -28.497470856 35.497474670 22.500000000 -29.488407135 35.488403320 22.500000000 -30.458978653 35.458980560 22.500000000 -31.384418488 35.384422302 22.500000000 -32.233222961 35.233222961 22.500000000 -32.981101990 34.981101990 22.500000000 --33.981101990 -35.981101990 23.499998093 --33.233226776 -36.233222961 23.500000000 --32.384422302 -36.384418488 23.500000000 --31.458978653 -36.458980560 23.500000000 --30.488407135 -36.488403320 23.500000000 --29.497472763 -36.497474670 23.500000000 --28.499593735 -36.499591827 23.500000000 --27.499954224 -36.499954224 23.500000000 --26.499996185 -36.499996185 23.500000000 --25.500000000 -36.500000000 23.500000000 --24.500000000 -36.500000000 23.500000000 --23.500000000 -36.500000000 23.500000000 --22.500000000 -36.500000000 23.500000000 --21.500000000 -36.500000000 23.500000000 --20.500000000 -36.500000000 23.500000000 --19.500000000 -36.500000000 23.500000000 --18.500000000 -36.500000000 23.500000000 --17.500000000 -36.500000000 23.500000000 --16.500000000 -36.500000000 23.500000000 --15.500000000 -36.500000000 23.500000000 --14.500000000 -36.500000000 23.500000000 --13.500000000 -36.500000000 23.500000000 --12.500000000 -36.500000000 23.500000000 --11.500000000 -36.500000000 23.500000000 --10.500000000 -36.500000000 23.500000000 --9.500000000 -36.500000000 23.500000000 --8.500000000 -36.500000000 23.500000000 --7.500000000 -36.500000000 23.500000000 --6.500000000 -36.500000000 23.500000000 --5.500000000 -36.500000000 23.500000000 --4.500000000 -36.500000000 23.500000000 --3.500000000 -36.500000000 23.500000000 --2.500000000 -36.500000000 23.500000000 --1.500000000 -36.500000000 23.500000000 --0.500000000 -36.500000000 23.500000000 -0.500000000 -36.500000000 23.500000000 -1.500000000 -36.500000000 23.500000000 -2.500000000 -36.500000000 23.500000000 -3.500000000 -36.500000000 23.500000000 -4.500000000 -36.500000000 23.500000000 -5.500000000 -36.500000000 23.500000000 -6.500000000 -36.500000000 23.500000000 -7.500000000 -36.500000000 23.500000000 -8.500000000 -36.500000000 23.500000000 -9.500000000 -36.500000000 23.500000000 -10.500000000 -36.500000000 23.500000000 -11.500000000 -36.500000000 23.500000000 -12.500000000 -36.500000000 23.500000000 -13.500000000 -36.500000000 23.500000000 -14.500000000 -36.500000000 23.500000000 -15.500000000 -36.500000000 23.500000000 -16.500000000 -36.500000000 23.500000000 -17.500000000 -36.500000000 23.500000000 -18.500000000 -36.500000000 23.500000000 -19.500000000 -36.500000000 23.500000000 -20.500000000 -36.500000000 23.500000000 -21.500000000 -36.500000000 23.500000000 -22.500000000 -36.500000000 23.500000000 -23.500000000 -36.500000000 23.500000000 -24.500000000 -36.500000000 23.500000000 -25.499996185 -36.499996185 23.500000000 -26.499954224 -36.499954224 23.500000000 -27.499591827 -36.499591827 23.500000000 -28.497470856 -36.497467041 23.500000000 -29.488407135 -36.488403320 23.500000000 -30.458978653 -36.458980560 23.500000000 -31.384418488 -36.384422302 23.500000000 -32.233222961 -36.233222961 23.500000000 -32.981101990 -35.981101990 23.499998093 --35.167964935 -35.167964935 23.500000000 --34.622867584 -35.622867584 23.500000000 -33.622871399 -35.622871399 23.500000000 -34.167961121 -35.167961121 23.500000000 --35.981101990 -33.981101990 23.499998093 --35.622867584 -34.622867584 23.500000000 -34.622867584 -34.622867584 23.500000000 -34.981101990 -33.981101990 23.499998093 --36.233222961 -33.233222961 23.500000000 -35.233222961 -33.233226776 23.500000000 --36.384418488 -32.384422302 23.500000000 -35.384418488 -32.384422302 23.500000000 --36.458976746 -31.458978653 23.500000000 -35.458980560 -31.458978653 23.500000000 --36.488403320 -30.488407135 23.500000000 -35.488403320 -30.488407135 23.500000000 --36.497467041 -29.497472763 23.500000000 -35.497474670 -29.497472763 23.500000000 --36.499591827 -28.499593735 23.500000000 -35.499591827 -28.499593735 23.500000000 --36.499954224 -27.499954224 23.500000000 -35.499954224 -27.499954224 23.500000000 --36.499996185 -26.499996185 23.500000000 -35.499996185 -26.499996185 23.500000000 --36.500000000 -25.500000000 23.500000000 -35.500000000 -25.500000000 23.500000000 --36.500000000 -24.500000000 23.500000000 -35.500000000 -24.500000000 23.500000000 --36.500000000 -23.500000000 23.500000000 -35.500000000 -23.500000000 23.500000000 --36.500000000 -22.500000000 23.500000000 -35.500000000 -22.500000000 23.500000000 --36.500000000 -21.500000000 23.500000000 -35.500000000 -21.500000000 23.500000000 --36.500000000 -20.500000000 23.500000000 -35.500000000 -20.500000000 23.500000000 --36.500000000 -19.500000000 23.500000000 -35.500000000 -19.500000000 23.500000000 --36.500000000 -18.500000000 23.500000000 -35.500000000 -18.500000000 23.500000000 --36.500000000 -17.500000000 23.500000000 -35.500000000 -17.500000000 23.500000000 --36.500000000 -16.500000000 23.500000000 -35.500000000 -16.500000000 23.500000000 --36.500000000 -15.500000000 23.500000000 -35.500000000 -15.500000000 23.500000000 --36.500000000 -14.500000000 23.500000000 -35.500000000 -14.500000000 23.500000000 --36.500000000 -13.500000000 23.500000000 -35.500000000 -13.500000000 23.500000000 --36.500000000 -12.500000000 23.500000000 -35.500000000 -12.500000000 23.500000000 --36.500000000 -11.500000000 23.500000000 -35.500000000 -11.500000000 23.500000000 --36.500000000 -10.500000000 23.500000000 -35.500000000 -10.500000000 23.500000000 --36.500000000 -9.500000000 23.500000000 -35.500000000 -9.500000000 23.500000000 --36.500000000 -8.500000000 23.500000000 -35.500000000 -8.500000000 23.500000000 --36.500000000 -7.500000000 23.500000000 -35.500000000 -7.500000000 23.500000000 --36.500000000 -6.500000000 23.500000000 -35.500000000 -6.500000000 23.500000000 --36.500000000 -5.500000000 23.500000000 -35.500000000 -5.500000000 23.500000000 --36.500000000 -4.500000000 23.500000000 -35.500000000 -4.500000000 23.500000000 --36.500000000 -3.500000000 23.500000000 -35.500000000 -3.500000000 23.500000000 --36.500000000 -2.500000000 23.500000000 -35.500000000 -2.500000000 23.500000000 --36.500000000 -1.500000000 23.500000000 -35.500000000 -1.500000000 23.500000000 --36.500000000 -0.500000000 23.500000000 -35.500000000 -0.500000000 23.500000000 --36.500000000 0.500000000 23.500000000 -35.500000000 0.500000000 23.500000000 --36.500000000 1.500000000 23.500000000 -35.500000000 1.500000000 23.500000000 --36.500000000 2.500000000 23.500000000 -35.500000000 2.500000000 23.500000000 --36.500000000 3.500000000 23.500000000 -35.500000000 3.500000000 23.500000000 --36.500000000 4.500000000 23.500000000 -35.500000000 4.500000000 23.500000000 --36.500000000 5.500000000 23.500000000 -35.500000000 5.500000000 23.500000000 --36.500000000 6.500000000 23.500000000 -35.500000000 6.500000000 23.500000000 --36.500000000 7.500000000 23.500000000 -35.500000000 7.500000000 23.500000000 --36.500000000 8.500000000 23.500000000 -35.500000000 8.500000000 23.500000000 --36.500000000 9.500000000 23.500000000 -35.500000000 9.500000000 23.500000000 --36.500000000 10.500000000 23.500000000 -35.500000000 10.500000000 23.500000000 --36.500000000 11.500000000 23.500000000 -35.500000000 11.500000000 23.500000000 --36.500000000 12.500000000 23.500000000 -35.500000000 12.500000000 23.500000000 --36.500000000 13.500000000 23.500000000 -35.500000000 13.500000000 23.500000000 --36.500000000 14.500000000 23.500000000 -35.500000000 14.500000000 23.500000000 --36.500000000 15.500000000 23.500000000 -35.500000000 15.500000000 23.500000000 --36.500000000 16.500000000 23.500000000 -35.500000000 16.500000000 23.500000000 --36.500000000 17.500000000 23.500000000 -35.500000000 17.500000000 23.500000000 --36.500000000 18.500000000 23.500000000 -35.500000000 18.500000000 23.500000000 --36.500000000 19.500000000 23.500000000 -35.500000000 19.500000000 23.500000000 --36.500000000 20.500000000 23.500000000 -35.500000000 20.500000000 23.500000000 --36.500000000 21.500000000 23.500000000 -35.500000000 21.500000000 23.500000000 --36.500000000 22.500000000 23.500000000 -35.500000000 22.500000000 23.500000000 --36.500000000 23.500000000 23.500000000 -35.500000000 23.500000000 23.500000000 --36.500000000 24.500000000 23.500000000 -35.500000000 24.500000000 23.500000000 --36.499996185 25.499996185 23.500000000 -35.499996185 25.499996185 23.500000000 --36.499954224 26.499954224 23.500000000 -35.499954224 26.499954224 23.500000000 --36.499591827 27.499591827 23.500000000 -35.499591827 27.499591827 23.500000000 --36.497474670 28.497470856 23.500000000 -35.497467041 28.497470856 23.500000000 --36.488403320 29.488407135 23.500000000 -35.488403320 29.488407135 23.500000000 --36.458980560 30.458978653 23.500000000 -35.458980560 30.458978653 23.500000000 --36.384422302 31.384418488 23.500000000 -35.384422302 31.384418488 23.500000000 --36.233222961 32.233222961 23.500000000 -35.233222961 32.233222961 23.500000000 --35.981101990 32.981101990 23.499998093 --35.622867584 33.622867584 23.500000000 -34.622871399 33.622867584 23.500000000 -34.981101990 32.981101990 23.499998093 --35.167964935 34.167964935 23.500000000 --34.622867584 34.622867584 23.500000000 -33.622867584 34.622867584 23.500000000 -34.167961121 34.167961121 23.500000000 --33.981101990 34.981101990 23.499998093 --33.233222961 35.233222961 23.500000000 --32.384422302 35.384418488 23.500000000 --31.458978653 35.458976746 23.500000000 --30.488407135 35.488403320 23.500000000 --29.497472763 35.497467041 23.500000000 --28.499593735 35.499591827 23.500000000 --27.499954224 35.499954224 23.500000000 --26.499996185 35.499996185 23.500000000 --25.500000000 35.500000000 23.500000000 --24.500000000 35.500000000 23.500000000 --23.500000000 35.500000000 23.500000000 --22.500000000 35.500000000 23.500000000 --21.500000000 35.500000000 23.500000000 --20.500000000 35.500000000 23.500000000 --19.500000000 35.500000000 23.500000000 --18.500000000 35.500000000 23.500000000 --17.500000000 35.500000000 23.500000000 --16.500000000 35.500000000 23.500000000 --15.500000000 35.500000000 23.500000000 --14.500000000 35.500000000 23.500000000 --13.500000000 35.500000000 23.500000000 --12.500000000 35.500000000 23.500000000 --11.500000000 35.500000000 23.500000000 --10.500000000 35.500000000 23.500000000 --9.500000000 35.500000000 23.500000000 --8.500000000 35.500000000 23.500000000 --7.500000000 35.500000000 23.500000000 --6.500000000 35.500000000 23.500000000 --5.500000000 35.500000000 23.500000000 --4.500000000 35.500000000 23.500000000 --3.500000000 35.500000000 23.500000000 --2.500000000 35.500000000 23.500000000 --1.500000000 35.500000000 23.500000000 --0.500000000 35.500000000 23.500000000 -0.500000000 35.500000000 23.500000000 -1.500000000 35.500000000 23.500000000 -2.500000000 35.500000000 23.500000000 -3.500000000 35.500000000 23.500000000 -4.500000000 35.500000000 23.500000000 -5.500000000 35.500000000 23.500000000 -6.500000000 35.500000000 23.500000000 -7.500000000 35.500000000 23.500000000 -8.500000000 35.500000000 23.500000000 -9.500000000 35.500000000 23.500000000 -10.500000000 35.500000000 23.500000000 -11.500000000 35.500000000 23.500000000 -12.500000000 35.500000000 23.500000000 -13.500000000 35.500000000 23.500000000 -14.500000000 35.500000000 23.500000000 -15.500000000 35.500000000 23.500000000 -16.500000000 35.500000000 23.500000000 -17.500000000 35.500000000 23.500000000 -18.500000000 35.500000000 23.500000000 -19.500000000 35.500000000 23.500000000 -20.500000000 35.500000000 23.500000000 -21.500000000 35.500000000 23.500000000 -22.500000000 35.500000000 23.500000000 -23.500000000 35.500000000 23.500000000 -24.500000000 35.500000000 23.500000000 -25.499996185 35.499996185 23.500000000 -26.499954224 35.499954224 23.500000000 -27.499591827 35.499591827 23.500000000 -28.497470856 35.497474670 23.500000000 -29.488407135 35.488403320 23.500000000 -30.458978653 35.458980560 23.500000000 -31.384418488 35.384422302 23.500000000 -32.233222961 35.233222961 23.500000000 -32.981101990 34.981101990 23.499998093 --33.981086731 -35.981086731 24.499979019 --33.233219147 -36.233203888 24.499984741 --32.384422302 -36.384407043 24.499996185 --31.458978653 -36.458972931 24.500000000 --30.488407135 -36.488403320 24.500000000 --29.497472763 -36.497474670 24.500000000 --28.499593735 -36.499591827 24.500000000 --27.499954224 -36.499954224 24.500000000 --26.499996185 -36.499996185 24.500000000 --25.500000000 -36.500000000 24.500000000 --24.500000000 -36.500000000 24.500000000 --23.500000000 -36.500000000 24.500000000 --22.500000000 -36.500000000 24.500000000 --21.500000000 -36.500000000 24.500000000 --20.500000000 -36.500000000 24.500000000 --19.500000000 -36.500000000 24.500000000 --18.500000000 -36.500000000 24.500000000 --17.500000000 -36.500000000 24.500000000 --16.500000000 -36.500000000 24.500000000 --15.500000000 -36.500000000 24.500000000 --14.500000000 -36.500000000 24.500000000 --13.500000000 -36.500000000 24.500000000 --12.500000000 -36.500000000 24.500000000 --11.500000000 -36.500000000 24.500000000 --10.500000000 -36.500000000 24.500000000 --9.500000000 -36.500000000 24.500000000 --8.500000000 -36.500000000 24.500000000 --7.500000000 -36.500000000 24.500000000 --6.500000000 -36.500000000 24.500000000 --5.500000000 -36.500000000 24.500000000 --4.500000000 -36.500000000 24.500000000 --3.500000000 -36.500000000 24.500000000 --2.500000000 -36.500000000 24.500000000 --1.500000000 -36.500000000 24.500000000 --0.500000000 -36.500000000 24.500000000 -0.500000000 -36.500000000 24.500000000 -1.500000000 -36.500000000 24.500000000 -2.500000000 -36.500000000 24.500000000 -3.500000000 -36.500000000 24.500000000 -4.500000000 -36.500000000 24.500000000 -5.500000000 -36.500000000 24.500000000 -6.500000000 -36.500000000 24.500000000 -7.500000000 -36.500000000 24.500000000 -8.500000000 -36.500000000 24.500000000 -9.500000000 -36.500000000 24.500000000 -10.500000000 -36.500000000 24.500000000 -11.500000000 -36.500000000 24.500000000 -12.500000000 -36.500000000 24.500000000 -13.500000000 -36.500000000 24.500000000 -14.500000000 -36.500000000 24.500000000 -15.500000000 -36.500000000 24.500000000 -16.500000000 -36.500000000 24.500000000 -17.500000000 -36.500000000 24.500000000 -18.500000000 -36.500000000 24.500000000 -19.500000000 -36.500000000 24.500000000 -20.500000000 -36.500000000 24.500000000 -21.500000000 -36.500000000 24.500000000 -22.500000000 -36.500000000 24.500000000 -23.500000000 -36.500000000 24.500000000 -24.500000000 -36.500000000 24.500000000 -25.499996185 -36.499996185 24.500000000 -26.499954224 -36.499954224 24.500000000 -27.499591827 -36.499591827 24.500000000 -28.497470856 -36.497467041 24.500000000 -29.488407135 -36.488403320 24.500000000 -30.458978653 -36.458976746 24.500000000 -31.384418488 -36.384407043 24.499996185 -32.233219147 -36.233207703 24.499988556 -32.981086731 -35.981086731 24.499979019 --35.167949677 -35.167949677 24.499992371 --34.622844696 -35.622856140 24.499986649 -33.622844696 -35.622856140 24.499986649 -34.167949677 -35.167949677 24.499992371 --35.981086731 -33.981086731 24.499979019 --35.622856140 -34.622844696 24.499988556 -34.622856140 -34.622844696 24.499988556 -34.981086731 -33.981086731 24.499979019 --36.233207703 -33.233219147 24.499984741 -35.233203888 -33.233219147 24.499984741 --36.384407043 -32.384422302 24.499996185 -35.384407043 -32.384422302 24.499996185 --36.458972931 -31.458978653 24.500000000 -35.458972931 -31.458978653 24.500000000 --36.488403320 -30.488407135 24.500000000 -35.488403320 -30.488407135 24.500000000 --36.497467041 -29.497472763 24.500000000 -35.497474670 -29.497472763 24.500000000 --36.499591827 -28.499593735 24.500000000 -35.499591827 -28.499593735 24.500000000 --36.499954224 -27.499954224 24.500000000 -35.499954224 -27.499954224 24.500000000 --36.499996185 -26.499996185 24.500000000 -35.499996185 -26.499996185 24.500000000 --36.500000000 -25.500000000 24.500000000 -35.500000000 -25.500000000 24.500000000 --36.500000000 -24.500000000 24.500000000 -35.500000000 -24.500000000 24.500000000 --36.500000000 -23.500000000 24.500000000 -35.500000000 -23.500000000 24.500000000 --36.500000000 -22.500000000 24.500000000 -35.500000000 -22.500000000 24.500000000 --36.500000000 -21.500000000 24.500000000 -35.500000000 -21.500000000 24.500000000 --36.500000000 -20.500000000 24.500000000 -35.500000000 -20.500000000 24.500000000 --36.500000000 -19.500000000 24.500000000 -35.500000000 -19.500000000 24.500000000 --36.500000000 -18.500000000 24.500000000 -35.500000000 -18.500000000 24.500000000 --36.500000000 -17.500000000 24.500000000 -35.500000000 -17.500000000 24.500000000 --36.500000000 -16.500000000 24.500000000 -35.500000000 -16.500000000 24.500000000 --36.500000000 -15.500000000 24.500000000 -35.500000000 -15.500000000 24.500000000 --36.500000000 -14.500000000 24.500000000 -35.500000000 -14.500000000 24.500000000 --36.500000000 -13.500000000 24.500000000 -35.500000000 -13.500000000 24.500000000 --36.500000000 -12.500000000 24.500000000 -35.500000000 -12.500000000 24.500000000 --36.500000000 -11.500000000 24.500000000 -35.500000000 -11.500000000 24.500000000 --36.500000000 -10.500000000 24.500000000 -35.500000000 -10.500000000 24.500000000 --36.500000000 -9.500000000 24.500000000 -35.500000000 -9.500000000 24.500000000 --36.500000000 -8.500000000 24.500000000 -35.500000000 -8.500000000 24.500000000 --36.500000000 -7.500000000 24.500000000 -35.500000000 -7.500000000 24.500000000 --36.500000000 -6.500000000 24.500000000 -35.500000000 -6.500000000 24.500000000 --36.500000000 -5.500000000 24.500000000 -35.500000000 -5.500000000 24.500000000 --36.500000000 -4.500000000 24.500000000 -35.500000000 -4.500000000 24.500000000 --36.500000000 -3.500000000 24.500000000 -35.500000000 -3.500000000 24.500000000 --36.500000000 -2.500000000 24.500000000 -35.500000000 -2.500000000 24.500000000 --36.500000000 -1.500000000 24.500000000 -35.500000000 -1.500000000 24.500000000 --36.500000000 -0.500000000 24.500000000 -35.500000000 -0.500000000 24.500000000 --36.500000000 0.500000000 24.500000000 -35.500000000 0.500000000 24.500000000 --36.500000000 1.500000000 24.500000000 -35.500000000 1.500000000 24.500000000 --36.500000000 2.500000000 24.500000000 -35.500000000 2.500000000 24.500000000 --36.500000000 3.500000000 24.500000000 -35.500000000 3.500000000 24.500000000 --36.500000000 4.500000000 24.500000000 -35.500000000 4.500000000 24.500000000 --36.500000000 5.500000000 24.500000000 -35.500000000 5.500000000 24.500000000 --36.500000000 6.500000000 24.500000000 -35.500000000 6.500000000 24.500000000 --36.500000000 7.500000000 24.500000000 -35.500000000 7.500000000 24.500000000 --36.500000000 8.500000000 24.500000000 -35.500000000 8.500000000 24.500000000 --36.500000000 9.500000000 24.500000000 -35.500000000 9.500000000 24.500000000 --36.500000000 10.500000000 24.500000000 -35.500000000 10.500000000 24.500000000 --36.500000000 11.500000000 24.500000000 -35.500000000 11.500000000 24.500000000 --36.500000000 12.500000000 24.500000000 -35.500000000 12.500000000 24.500000000 --36.500000000 13.500000000 24.500000000 -35.500000000 13.500000000 24.500000000 --36.500000000 14.500000000 24.500000000 -35.500000000 14.500000000 24.500000000 --36.500000000 15.500000000 24.500000000 -35.500000000 15.500000000 24.500000000 --36.500000000 16.500000000 24.500000000 -35.500000000 16.500000000 24.500000000 --36.500000000 17.500000000 24.500000000 -35.500000000 17.500000000 24.500000000 --36.500000000 18.500000000 24.500000000 -35.500000000 18.500000000 24.500000000 --36.500000000 19.500000000 24.500000000 -35.500000000 19.500000000 24.500000000 --36.500000000 20.500000000 24.500000000 -35.500000000 20.500000000 24.500000000 --36.500000000 21.500000000 24.500000000 -35.500000000 21.500000000 24.500000000 --36.500000000 22.500000000 24.500000000 -35.500000000 22.500000000 24.500000000 --36.500000000 23.500000000 24.500000000 -35.500000000 23.500000000 24.500000000 --36.500000000 24.500000000 24.500000000 -35.500000000 24.500000000 24.500000000 --36.499996185 25.499996185 24.500000000 -35.499996185 25.499996185 24.500000000 --36.499954224 26.499954224 24.500000000 -35.499954224 26.499954224 24.500000000 --36.499591827 27.499591827 24.500000000 -35.499591827 27.499591827 24.500000000 --36.497474670 28.497470856 24.500000000 -35.497467041 28.497470856 24.500000000 --36.488403320 29.488407135 24.500000000 -35.488403320 29.488407135 24.500000000 --36.458976746 30.458978653 24.500000000 -35.458976746 30.458978653 24.500000000 --36.384407043 31.384418488 24.499996185 -35.384407043 31.384418488 24.499996185 --36.233207703 32.233219147 24.499988556 -35.233207703 32.233219147 24.499988556 --35.981086731 32.981086731 24.499979019 --35.622856140 33.622844696 24.499986649 -34.622856140 33.622844696 24.499986649 -34.981086731 32.981086731 24.499979019 --35.167949677 34.167949677 24.499992371 --34.622844696 34.622856140 24.499988556 -33.622844696 34.622856140 24.499988556 -34.167949677 34.167949677 24.499992371 --33.981086731 34.981086731 24.499979019 --33.233219147 35.233207703 24.499984741 --32.384422302 35.384407043 24.499996185 --31.458978653 35.458972931 24.500000000 --30.488407135 35.488403320 24.500000000 --29.497472763 35.497467041 24.500000000 --28.499593735 35.499591827 24.500000000 --27.499954224 35.499954224 24.500000000 --26.499996185 35.499996185 24.500000000 --25.500000000 35.500000000 24.500000000 --24.500000000 35.500000000 24.500000000 --23.500000000 35.500000000 24.500000000 --22.500000000 35.500000000 24.500000000 --21.500000000 35.500000000 24.500000000 --20.500000000 35.500000000 24.500000000 --19.500000000 35.500000000 24.500000000 --18.500000000 35.500000000 24.500000000 --17.500000000 35.500000000 24.500000000 --16.500000000 35.500000000 24.500000000 --15.500000000 35.500000000 24.500000000 --14.500000000 35.500000000 24.500000000 --13.500000000 35.500000000 24.500000000 --12.500000000 35.500000000 24.500000000 --11.500000000 35.500000000 24.500000000 --10.500000000 35.500000000 24.500000000 --9.500000000 35.500000000 24.500000000 --8.500000000 35.500000000 24.500000000 --7.500000000 35.500000000 24.500000000 --6.500000000 35.500000000 24.500000000 --5.500000000 35.500000000 24.500000000 --4.500000000 35.500000000 24.500000000 --3.500000000 35.500000000 24.500000000 --2.500000000 35.500000000 24.500000000 --1.500000000 35.500000000 24.500000000 --0.500000000 35.500000000 24.500000000 -0.500000000 35.500000000 24.500000000 -1.500000000 35.500000000 24.500000000 -2.500000000 35.500000000 24.500000000 -3.500000000 35.500000000 24.500000000 -4.500000000 35.500000000 24.500000000 -5.500000000 35.500000000 24.500000000 -6.500000000 35.500000000 24.500000000 -7.500000000 35.500000000 24.500000000 -8.500000000 35.500000000 24.500000000 -9.500000000 35.500000000 24.500000000 -10.500000000 35.500000000 24.500000000 -11.500000000 35.500000000 24.500000000 -12.500000000 35.500000000 24.500000000 -13.500000000 35.500000000 24.500000000 -14.500000000 35.500000000 24.500000000 -15.500000000 35.500000000 24.500000000 -16.500000000 35.500000000 24.500000000 -17.500000000 35.500000000 24.500000000 -18.500000000 35.500000000 24.500000000 -19.500000000 35.500000000 24.500000000 -20.500000000 35.500000000 24.500000000 -21.500000000 35.500000000 24.500000000 -22.500000000 35.500000000 24.500000000 -23.500000000 35.500000000 24.500000000 -24.500000000 35.500000000 24.500000000 -25.499996185 35.499996185 24.500000000 -26.499954224 35.499954224 24.500000000 -27.499591827 35.499591827 24.500000000 -28.497470856 35.497474670 24.500000000 -29.488407135 35.488403320 24.500000000 -30.458978653 35.458976746 24.500000000 -31.384418488 35.384407043 24.499996185 -32.233219147 35.233207703 24.499988556 -32.981086731 34.981086731 24.499979019 --33.980976105 -35.980957031 25.499824524 --33.233169556 -36.233074188 25.499874115 --32.384407043 -36.384307861 25.499948502 --31.458978653 -36.458930969 25.499988556 --30.488407135 -36.488388062 25.499996185 --29.497472763 -36.497470856 25.499996185 --28.499593735 -36.499588013 25.499996185 --27.499954224 -36.499950409 25.499996185 --26.499996185 -36.499992371 25.499996185 --25.500000000 -36.499996185 25.499996185 --24.500000000 -36.499996185 25.499996185 --23.500000000 -36.499996185 25.499996185 --22.500000000 -36.499996185 25.499996185 --21.500000000 -36.499996185 25.499996185 --20.500000000 -36.499996185 25.499996185 --19.500000000 -36.499996185 25.499996185 --18.500000000 -36.499996185 25.499996185 --17.500000000 -36.499996185 25.499996185 --16.500000000 -36.499996185 25.499996185 --15.500000000 -36.499996185 25.499996185 --14.500000000 -36.499996185 25.499996185 --13.500000000 -36.499996185 25.499996185 --12.500000000 -36.499996185 25.499996185 --11.500000000 -36.499996185 25.499996185 --10.500000000 -36.499996185 25.499996185 --9.500000000 -36.499996185 25.499996185 --8.500000000 -36.499996185 25.499996185 --7.500000000 -36.499996185 25.499996185 --6.500000000 -36.499996185 25.499996185 --5.500000000 -36.499996185 25.499996185 --4.500000000 -36.499996185 25.499996185 --3.500000000 -36.499996185 25.499996185 --2.500000000 -36.499996185 25.499996185 --1.500000000 -36.499996185 25.499996185 --0.500000000 -36.499996185 25.499996185 -0.500000000 -36.499996185 25.499996185 -1.500000000 -36.499996185 25.499996185 -2.500000000 -36.499996185 25.499996185 -3.500000000 -36.499996185 25.499996185 -4.500000000 -36.499996185 25.499996185 -5.500000000 -36.499996185 25.499996185 -6.500000000 -36.499996185 25.499996185 -7.500000000 -36.499996185 25.499996185 -8.500000000 -36.499996185 25.499996185 -9.500000000 -36.499996185 25.499996185 -10.500000000 -36.499996185 25.499996185 -11.500000000 -36.499996185 25.499996185 -12.500000000 -36.499996185 25.499996185 -13.500000000 -36.499996185 25.499996185 -14.500000000 -36.499996185 25.499996185 -15.500000000 -36.499996185 25.499996185 -16.500000000 -36.499996185 25.499996185 -17.500000000 -36.499996185 25.499996185 -18.500000000 -36.499996185 25.499996185 -19.500000000 -36.499996185 25.499996185 -20.500000000 -36.499996185 25.499996185 -21.500000000 -36.499996185 25.499996185 -22.500000000 -36.499996185 25.499996185 -23.500000000 -36.499996185 25.499996185 -24.500000000 -36.499996185 25.499996185 -25.499996185 -36.499992371 25.499996185 -26.499954224 -36.499950409 25.499996185 -27.499591827 -36.499588013 25.499996185 -28.497470856 -36.497467041 25.499996185 -29.488407135 -36.488391876 25.499996185 -30.458974838 -36.458934784 25.499988556 -31.384403229 -36.384307861 25.499948502 -32.233165741 -36.233070374 25.499874115 -32.980976105 -35.980957031 25.499826431 --35.167804718 -35.167808533 25.499902725 --34.622692108 -35.622734070 25.499866486 -33.622692108 -35.622734070 25.499866486 -34.167808533 -35.167804718 25.499902725 --35.980957031 -33.980976105 25.499824524 --35.622734070 -34.622692108 25.499866486 -34.622734070 -34.622692108 25.499868393 -34.980957031 -33.980976105 25.499824524 --36.233074188 -33.233169556 25.499874115 -35.233074188 -33.233165741 25.499874115 --36.384307861 -32.384407043 25.499948502 -35.384307861 -32.384407043 25.499948502 --36.458930969 -31.458978653 25.499988556 -35.458930969 -31.458978653 25.499988556 --36.488388062 -30.488407135 25.499996185 -35.488388062 -30.488407135 25.499996185 --36.497467041 -29.497472763 25.499996185 -35.497470856 -29.497472763 25.499996185 --36.499584198 -28.499593735 25.499996185 -35.499588013 -28.499593735 25.499996185 --36.499950409 -27.499954224 25.499996185 -35.499950409 -27.499954224 25.499996185 --36.499992371 -26.499996185 25.499996185 -35.499992371 -26.499996185 25.499996185 --36.499996185 -25.500000000 25.499996185 -35.499996185 -25.500000000 25.499996185 --36.499996185 -24.500000000 25.499996185 -35.499996185 -24.500000000 25.499996185 --36.499996185 -23.500000000 25.499996185 -35.499996185 -23.500000000 25.499996185 --36.499996185 -22.500000000 25.499996185 -35.499996185 -22.500000000 25.499996185 --36.499996185 -21.500000000 25.499996185 -35.499996185 -21.500000000 25.499996185 --36.499996185 -20.500000000 25.499996185 -35.499996185 -20.500000000 25.499996185 --36.499996185 -19.500000000 25.499996185 -35.499996185 -19.500000000 25.499996185 --36.499996185 -18.500000000 25.499996185 -35.499996185 -18.500000000 25.499996185 --36.499996185 -17.500000000 25.499996185 -35.499996185 -17.500000000 25.499996185 --36.499996185 -16.500000000 25.499996185 -35.499996185 -16.500000000 25.499996185 --36.499996185 -15.500000000 25.499996185 -35.499996185 -15.500000000 25.499996185 --36.499996185 -14.500000000 25.499996185 -35.499996185 -14.500000000 25.499996185 --36.499996185 -13.500000000 25.499996185 -35.499996185 -13.500000000 25.499996185 --36.499996185 -12.500000000 25.499996185 -35.499996185 -12.500000000 25.499996185 --36.499996185 -11.500000000 25.499996185 -35.499996185 -11.500000000 25.499996185 --36.499996185 -10.500000000 25.499996185 -35.499996185 -10.500000000 25.499996185 --36.499996185 -9.500000000 25.499996185 -35.499996185 -9.500000000 25.499996185 --36.499996185 -8.500000000 25.499996185 -35.499996185 -8.500000000 25.499996185 --36.499996185 -7.500000000 25.499996185 -35.499996185 -7.500000000 25.499996185 --36.499996185 -6.500000000 25.499996185 -35.499996185 -6.500000000 25.499996185 --36.499996185 -5.500000000 25.499996185 -35.499996185 -5.500000000 25.499996185 --36.499996185 -4.500000000 25.499996185 -35.499996185 -4.500000000 25.499996185 --36.499996185 -3.500000000 25.499996185 -35.499996185 -3.500000000 25.499996185 --36.499996185 -2.500000000 25.499996185 -35.499996185 -2.500000000 25.499996185 --36.499996185 -1.500000000 25.499996185 -35.499996185 -1.500000000 25.499996185 --36.499996185 -0.500000000 25.499996185 -35.499996185 -0.500000000 25.499996185 --36.499996185 0.500000000 25.499996185 -35.499996185 0.500000000 25.499996185 --36.499996185 1.500000000 25.499996185 -35.499996185 1.500000000 25.499996185 --36.499996185 2.500000000 25.499996185 -35.499996185 2.500000000 25.499996185 --36.499996185 3.500000000 25.499996185 -35.499996185 3.500000000 25.499996185 --36.499996185 4.500000000 25.499996185 -35.499996185 4.500000000 25.499996185 --36.499996185 5.500000000 25.499996185 -35.499996185 5.500000000 25.499996185 --36.499996185 6.500000000 25.499996185 -35.499996185 6.500000000 25.499996185 --36.499996185 7.500000000 25.499996185 -35.499996185 7.500000000 25.499996185 --36.499996185 8.500000000 25.499996185 -35.499996185 8.500000000 25.499996185 --36.499996185 9.500000000 25.499996185 -35.499996185 9.500000000 25.499996185 --36.499996185 10.500000000 25.499996185 -35.499996185 10.500000000 25.499996185 --36.499996185 11.500000000 25.499996185 -35.499996185 11.500000000 25.499996185 --36.499996185 12.500000000 25.499996185 -35.499996185 12.500000000 25.499996185 --36.499996185 13.500000000 25.499996185 -35.499996185 13.500000000 25.499996185 --36.499996185 14.500000000 25.499996185 -35.499996185 14.500000000 25.499996185 --36.499996185 15.500000000 25.499996185 -35.499996185 15.500000000 25.499996185 --36.499996185 16.500000000 25.499996185 -35.499996185 16.500000000 25.499996185 --36.499996185 17.500000000 25.499996185 -35.499996185 17.500000000 25.499996185 --36.499996185 18.500000000 25.499996185 -35.499996185 18.500000000 25.499996185 --36.499996185 19.500000000 25.499996185 -35.499996185 19.500000000 25.499996185 --36.499996185 20.500000000 25.499996185 -35.499996185 20.500000000 25.499996185 --36.499996185 21.500000000 25.499996185 -35.499996185 21.500000000 25.499996185 --36.499996185 22.500000000 25.499996185 -35.499996185 22.500000000 25.499996185 --36.499996185 23.500000000 25.499996185 -35.499996185 23.500000000 25.499996185 --36.499996185 24.500000000 25.499996185 -35.499996185 24.500000000 25.499996185 --36.499992371 25.499996185 25.499996185 -35.499992371 25.499996185 25.499996185 --36.499950409 26.499954224 25.499996185 -35.499950409 26.499954224 25.499996185 --36.499588013 27.499591827 25.499996185 -35.499588013 27.499591827 25.499996185 --36.497467041 28.497470856 25.499996185 -35.497467041 28.497470856 25.499996185 --36.488388062 29.488407135 25.499996185 -35.488391876 29.488407135 25.499996185 --36.458934784 30.458974838 25.499988556 -35.458934784 30.458974838 25.499988556 --36.384307861 31.384399414 25.499948502 -35.384307861 31.384403229 25.499948502 --36.233074188 32.233165741 25.499874115 -35.233070374 32.233165741 25.499874115 --35.980957031 32.980976105 25.499824524 --35.622734070 33.622692108 25.499866486 -34.622734070 33.622692108 25.499866486 -34.980957031 32.980976105 25.499826431 --35.167808533 34.167804718 25.499902725 --34.622692108 34.622734070 25.499866486 -33.622692108 34.622734070 25.499868393 -34.167804718 34.167808533 25.499902725 --33.980976105 34.980957031 25.499824524 --33.233169556 35.233074188 25.499874115 --32.384407043 35.384307861 25.499948502 --31.458978653 35.458930969 25.499988556 --30.488407135 35.488388062 25.499996185 --29.497472763 35.497467041 25.499996185 --28.499593735 35.499584198 25.499996185 --27.499954224 35.499950409 25.499996185 --26.499996185 35.499992371 25.499996185 --25.500000000 35.499996185 25.499996185 --24.500000000 35.499996185 25.499996185 --23.500000000 35.499996185 25.499996185 --22.500000000 35.499996185 25.499996185 --21.500000000 35.499996185 25.499996185 --20.500000000 35.499996185 25.499996185 --19.500000000 35.499996185 25.499996185 --18.500000000 35.499996185 25.499996185 --17.500000000 35.499996185 25.499996185 --16.500000000 35.499996185 25.499996185 --15.500000000 35.499996185 25.499996185 --14.500000000 35.499996185 25.499996185 --13.500000000 35.499996185 25.499996185 --12.500000000 35.499996185 25.499996185 --11.500000000 35.499996185 25.499996185 --10.500000000 35.499996185 25.499996185 --9.500000000 35.499996185 25.499996185 --8.500000000 35.499996185 25.499996185 --7.500000000 35.499996185 25.499996185 --6.500000000 35.499996185 25.499996185 --5.500000000 35.499996185 25.499996185 --4.500000000 35.499996185 25.499996185 --3.500000000 35.499996185 25.499996185 --2.500000000 35.499996185 25.499996185 --1.500000000 35.499996185 25.499996185 --0.500000000 35.499996185 25.499996185 -0.500000000 35.499996185 25.499996185 -1.500000000 35.499996185 25.499996185 -2.500000000 35.499996185 25.499996185 -3.500000000 35.499996185 25.499996185 -4.500000000 35.499996185 25.499996185 -5.500000000 35.499996185 25.499996185 -6.500000000 35.499996185 25.499996185 -7.500000000 35.499996185 25.499996185 -8.500000000 35.499996185 25.499996185 -9.500000000 35.499996185 25.499996185 -10.500000000 35.499996185 25.499996185 -11.500000000 35.499996185 25.499996185 -12.500000000 35.499996185 25.499996185 -13.500000000 35.499996185 25.499996185 -14.500000000 35.499996185 25.499996185 -15.500000000 35.499996185 25.499996185 -16.500000000 35.499996185 25.499996185 -17.500000000 35.499996185 25.499996185 -18.500000000 35.499996185 25.499996185 -19.500000000 35.499996185 25.499996185 -20.500000000 35.499996185 25.499996185 -21.500000000 35.499996185 25.499996185 -22.500000000 35.499996185 25.499996185 -23.500000000 35.499996185 25.499996185 -24.500000000 35.499996185 25.499996185 -25.499996185 35.499992371 25.499996185 -26.499954224 35.499950409 25.499996185 -27.499591827 35.499588013 25.499996185 -28.497470856 35.497467041 25.499996185 -29.488407135 35.488388062 25.499996185 -30.458974838 35.458934784 25.499988556 -31.384403229 35.384307861 25.499948502 -32.233165741 35.233074188 25.499874115 -32.980968475 34.980957031 25.499824524 --33.980335236 -35.980194092 26.498950958 --33.232864380 -36.232299805 26.499225616 --32.384296417 -36.383720398 26.499622345 --31.458948135 -36.458606720 26.499858856 --30.488397598 -36.488258362 26.499938965 --29.497472763 -36.497406006 26.499954224 --28.499593735 -36.499549866 26.499954224 --27.499954224 -36.499908447 26.499954224 --26.499996185 -36.499950409 26.499954224 --25.500000000 -36.499954224 26.499954224 --24.500000000 -36.499954224 26.499954224 --23.500000000 -36.499954224 26.499954224 --22.500000000 -36.499954224 26.499954224 --21.500000000 -36.499954224 26.499954224 --20.500000000 -36.499954224 26.499954224 --19.500000000 -36.499954224 26.499954224 --18.500000000 -36.499954224 26.499954224 --17.500000000 -36.499954224 26.499954224 --16.500000000 -36.499954224 26.499954224 --15.500000000 -36.499954224 26.499954224 --14.500000000 -36.499954224 26.499954224 --13.500000000 -36.499954224 26.499954224 --12.500000000 -36.499954224 26.499954224 --11.500000000 -36.499954224 26.499954224 --10.500000000 -36.499954224 26.499954224 --9.500000000 -36.499954224 26.499954224 --8.500000000 -36.499954224 26.499954224 --7.500000000 -36.499954224 26.499954224 --6.500000000 -36.499954224 26.499954224 --5.500000000 -36.499954224 26.499954224 --4.500000000 -36.499954224 26.499954224 --3.500000000 -36.499954224 26.499954224 --2.500000000 -36.499954224 26.499954224 --1.500000000 -36.499954224 26.499954224 --0.500000000 -36.499954224 26.499954224 -0.500000000 -36.499954224 26.499954224 -1.500000000 -36.499954224 26.499954224 -2.500000000 -36.499954224 26.499954224 -3.500000000 -36.499954224 26.499954224 -4.500000000 -36.499954224 26.499954224 -5.500000000 -36.499954224 26.499954224 -6.500000000 -36.499954224 26.499954224 -7.500000000 -36.499954224 26.499954224 -8.500000000 -36.499954224 26.499954224 -9.500000000 -36.499954224 26.499954224 -10.500000000 -36.499954224 26.499954224 -11.500000000 -36.499954224 26.499954224 -12.500000000 -36.499954224 26.499954224 -13.500000000 -36.499954224 26.499954224 -14.500000000 -36.499954224 26.499954224 -15.500000000 -36.499954224 26.499954224 -16.500000000 -36.499954224 26.499954224 -17.500000000 -36.499954224 26.499954224 -18.500000000 -36.499954224 26.499954224 -19.500000000 -36.499954224 26.499954224 -20.500000000 -36.499954224 26.499954224 -21.500000000 -36.499954224 26.499954224 -22.500000000 -36.499954224 26.499954224 -23.500000000 -36.499954224 26.499954224 -24.500000000 -36.499954224 26.499954224 -25.499996185 -36.499950409 26.499954224 -26.499954224 -36.499908447 26.499954224 -27.499591827 -36.499542236 26.499954224 -28.497470856 -36.497409821 26.499954224 -29.488397598 -36.488258362 26.499938965 -30.458948135 -36.458606720 26.499858856 -31.384296417 -36.383720398 26.499622345 -32.232860565 -36.232303619 26.499225616 -32.980335236 -35.980201721 26.498950958 --35.166954041 -35.166954041 26.499298096 --34.621799469 -35.621978760 26.499156952 -33.621799469 -35.621978760 26.499156952 -34.166961670 -35.166957855 26.499298096 --35.980201721 -33.980327606 26.498950958 --35.621978760 -34.621799469 26.499160767 -34.621982574 -34.621799469 26.499156952 -34.980201721 -33.980331421 26.498950958 --36.232303619 -33.232860565 26.499225616 -35.232303619 -33.232864380 26.499225616 --36.383720398 -32.384296417 26.499618530 -35.383720398 -32.384296417 26.499622345 --36.458606720 -31.458948135 26.499858856 -35.458606720 -31.458948135 26.499858856 --36.488258362 -30.488397598 26.499938965 -35.488258362 -30.488397598 26.499938965 --36.497406006 -29.497472763 26.499954224 -35.497406006 -29.497472763 26.499954224 --36.499542236 -28.499593735 26.499954224 -35.499549866 -28.499593735 26.499954224 --36.499908447 -27.499954224 26.499954224 -35.499908447 -27.499954224 26.499954224 --36.499950409 -26.499996185 26.499954224 -35.499950409 -26.499996185 26.499954224 --36.499950409 -25.500000000 26.499954224 -35.499954224 -25.500000000 26.499954224 --36.499950409 -24.500000000 26.499954224 -35.499954224 -24.500000000 26.499954224 --36.499950409 -23.500000000 26.499954224 -35.499954224 -23.500000000 26.499954224 --36.499950409 -22.500000000 26.499954224 -35.499954224 -22.500000000 26.499954224 --36.499950409 -21.500000000 26.499954224 -35.499954224 -21.500000000 26.499954224 --36.499950409 -20.500000000 26.499954224 -35.499954224 -20.500000000 26.499954224 --36.499950409 -19.500000000 26.499954224 -35.499954224 -19.500000000 26.499954224 --36.499950409 -18.500000000 26.499954224 -35.499954224 -18.500000000 26.499954224 --36.499950409 -17.500000000 26.499954224 -35.499954224 -17.500000000 26.499954224 --36.499950409 -16.500000000 26.499954224 -35.499954224 -16.500000000 26.499954224 --36.499950409 -15.500000000 26.499954224 -35.499954224 -15.500000000 26.499954224 --36.499950409 -14.500000000 26.499954224 -35.499954224 -14.500000000 26.499954224 --36.499950409 -13.500000000 26.499954224 -35.499954224 -13.500000000 26.499954224 --36.499950409 -12.500000000 26.499954224 -35.499954224 -12.500000000 26.499954224 --36.499950409 -11.500000000 26.499954224 -35.499954224 -11.500000000 26.499954224 --36.499950409 -10.500000000 26.499954224 -35.499954224 -10.500000000 26.499954224 --36.499950409 -9.500000000 26.499954224 -35.499954224 -9.500000000 26.499954224 --36.499950409 -8.500000000 26.499954224 -35.499954224 -8.500000000 26.499954224 --36.499950409 -7.500000000 26.499954224 -35.499954224 -7.500000000 26.499954224 --36.499950409 -6.500000000 26.499954224 -35.499954224 -6.500000000 26.499954224 --36.499950409 -5.500000000 26.499954224 -35.499954224 -5.500000000 26.499954224 --36.499950409 -4.500000000 26.499954224 -35.499954224 -4.500000000 26.499954224 --36.499950409 -3.500000000 26.499954224 -35.499954224 -3.500000000 26.499954224 --36.499950409 -2.500000000 26.499954224 -35.499954224 -2.500000000 26.499954224 --36.499950409 -1.500000000 26.499954224 -35.499954224 -1.500000000 26.499954224 --36.499950409 -0.500000000 26.499954224 -35.499954224 -0.500000000 26.499954224 --36.499950409 0.500000000 26.499954224 -35.499954224 0.500000000 26.499954224 --36.499950409 1.500000000 26.499954224 -35.499954224 1.500000000 26.499954224 --36.499950409 2.500000000 26.499954224 -35.499954224 2.500000000 26.499954224 --36.499950409 3.500000000 26.499954224 -35.499954224 3.500000000 26.499954224 --36.499950409 4.500000000 26.499954224 -35.499954224 4.500000000 26.499954224 --36.499950409 5.500000000 26.499954224 -35.499954224 5.500000000 26.499954224 --36.499950409 6.500000000 26.499954224 -35.499954224 6.500000000 26.499954224 --36.499950409 7.500000000 26.499954224 -35.499954224 7.500000000 26.499954224 --36.499950409 8.500000000 26.499954224 -35.499954224 8.500000000 26.499954224 --36.499950409 9.500000000 26.499954224 -35.499954224 9.500000000 26.499954224 --36.499950409 10.500000000 26.499954224 -35.499954224 10.500000000 26.499954224 --36.499950409 11.500000000 26.499954224 -35.499954224 11.500000000 26.499954224 --36.499950409 12.500000000 26.499954224 -35.499954224 12.500000000 26.499954224 --36.499950409 13.500000000 26.499954224 -35.499954224 13.500000000 26.499954224 --36.499950409 14.500000000 26.499954224 -35.499954224 14.500000000 26.499954224 --36.499950409 15.500000000 26.499954224 -35.499954224 15.500000000 26.499954224 --36.499950409 16.500000000 26.499954224 -35.499954224 16.500000000 26.499954224 --36.499950409 17.500000000 26.499954224 -35.499954224 17.500000000 26.499954224 --36.499950409 18.500000000 26.499954224 -35.499954224 18.500000000 26.499954224 --36.499950409 19.500000000 26.499954224 -35.499954224 19.500000000 26.499954224 --36.499950409 20.500000000 26.499954224 -35.499954224 20.500000000 26.499954224 --36.499950409 21.500000000 26.499954224 -35.499954224 21.500000000 26.499954224 --36.499950409 22.500000000 26.499954224 -35.499954224 22.500000000 26.499954224 --36.499950409 23.500000000 26.499954224 -35.499954224 23.500000000 26.499954224 --36.499950409 24.500000000 26.499954224 -35.499954224 24.500000000 26.499954224 --36.499950409 25.499996185 26.499954224 -35.499950409 25.499996185 26.499954224 --36.499908447 26.499954224 26.499954224 -35.499908447 26.499954224 26.499954224 --36.499542236 27.499591827 26.499954224 -35.499542236 27.499591827 26.499954224 --36.497406006 28.497470856 26.499954224 -35.497409821 28.497470856 26.499954224 --36.488258362 29.488397598 26.499938965 -35.488258362 29.488397598 26.499938965 --36.458606720 30.458948135 26.499858856 -35.458606720 30.458948135 26.499858856 --36.383720398 31.384296417 26.499622345 -35.383720398 31.384296417 26.499622345 --36.232299805 32.232860565 26.499225616 -35.232303619 32.232860565 26.499225616 --35.980194092 32.980335236 26.498950958 --35.621978760 33.621799469 26.499156952 -34.621978760 33.621799469 26.499156952 -34.980201721 32.980335236 26.498950958 --35.166954041 34.166954041 26.499298096 --34.621799469 34.621978760 26.499160767 -33.621799469 34.621982574 26.499160767 -34.166954041 34.166961670 26.499298096 --33.980327606 34.980201721 26.498950958 --33.232860565 35.232303619 26.499225616 --32.384296417 35.383720398 26.499618530 --31.458948135 35.458606720 26.499858856 --30.488397598 35.488258362 26.499938965 --29.497472763 35.497406006 26.499954224 --28.499593735 35.499542236 26.499954224 --27.499954224 35.499908447 26.499954224 --26.499996185 35.499950409 26.499954224 --25.500000000 35.499950409 26.499954224 --24.500000000 35.499950409 26.499954224 --23.500000000 35.499950409 26.499954224 --22.500000000 35.499950409 26.499954224 --21.500000000 35.499950409 26.499954224 --20.500000000 35.499950409 26.499954224 --19.500000000 35.499950409 26.499954224 --18.500000000 35.499950409 26.499954224 --17.500000000 35.499950409 26.499954224 --16.500000000 35.499950409 26.499954224 --15.500000000 35.499950409 26.499954224 --14.500000000 35.499950409 26.499954224 --13.500000000 35.499950409 26.499954224 --12.500000000 35.499950409 26.499954224 --11.500000000 35.499950409 26.499954224 --10.500000000 35.499950409 26.499954224 --9.500000000 35.499950409 26.499954224 --8.500000000 35.499950409 26.499954224 --7.500000000 35.499950409 26.499954224 --6.500000000 35.499950409 26.499954224 --5.500000000 35.499950409 26.499954224 --4.500000000 35.499950409 26.499954224 --3.500000000 35.499950409 26.499954224 --2.500000000 35.499950409 26.499954224 --1.500000000 35.499950409 26.499954224 --0.500000000 35.499950409 26.499954224 -0.500000000 35.499950409 26.499954224 -1.500000000 35.499950409 26.499954224 -2.500000000 35.499950409 26.499954224 -3.500000000 35.499950409 26.499954224 -4.500000000 35.499950409 26.499954224 -5.500000000 35.499950409 26.499954224 -6.500000000 35.499950409 26.499954224 -7.500000000 35.499950409 26.499954224 -8.500000000 35.499950409 26.499954224 -9.500000000 35.499950409 26.499954224 -10.500000000 35.499950409 26.499954224 -11.500000000 35.499950409 26.499954224 -12.500000000 35.499950409 26.499954224 -13.500000000 35.499950409 26.499954224 -14.500000000 35.499950409 26.499954224 -15.500000000 35.499950409 26.499954224 -16.500000000 35.499950409 26.499954224 -17.500000000 35.499950409 26.499954224 -18.500000000 35.499950409 26.499954224 -19.500000000 35.499950409 26.499954224 -20.500000000 35.499950409 26.499954224 -21.500000000 35.499950409 26.499954224 -22.500000000 35.499950409 26.499954224 -23.500000000 35.499950409 26.499954224 -24.500000000 35.499950409 26.499954224 -25.499996185 35.499950409 26.499954224 -26.499954224 35.499908447 26.499954224 -27.499591827 35.499542236 26.499954224 -28.497470856 35.497406006 26.499954224 -29.488397598 35.488258362 26.499938965 -30.458948135 35.458606720 26.499858856 -31.384296417 35.383720398 26.499622345 -32.232860565 35.232303619 26.499225616 -32.980327606 34.980201721 26.498950958 --33.977619171 -35.976856232 27.495168686 --33.231597900 -36.228954315 27.496377945 --32.383811951 -36.381027222 27.498052597 --31.458766937 -36.456939697 27.499073029 --30.488346100 -36.487373352 27.499475479 --29.497461319 -36.496910095 27.499576569 --28.499593735 -36.499160767 27.499591827 --27.499954224 -36.499542236 27.499591827 --26.499996185 -36.499591827 27.499591827 --25.500000000 -36.499591827 27.499591827 --24.500000000 -36.499591827 27.499591827 --23.500000000 -36.499591827 27.499591827 --22.500000000 -36.499591827 27.499591827 --21.500000000 -36.499591827 27.499591827 --20.500000000 -36.499591827 27.499591827 --19.500000000 -36.499591827 27.499591827 --18.500000000 -36.499591827 27.499591827 --17.500000000 -36.499591827 27.499591827 --16.500000000 -36.499591827 27.499591827 --15.500000000 -36.499591827 27.499591827 --14.500000000 -36.499591827 27.499591827 --13.500000000 -36.499591827 27.499591827 --12.500000000 -36.499591827 27.499591827 --11.500000000 -36.499591827 27.499591827 --10.500000000 -36.499591827 27.499591827 --9.500000000 -36.499591827 27.499591827 --8.500000000 -36.499591827 27.499591827 --7.500000000 -36.499591827 27.499591827 --6.500000000 -36.499591827 27.499591827 --5.500000000 -36.499591827 27.499591827 --4.500000000 -36.499591827 27.499591827 --3.500000000 -36.499591827 27.499591827 --2.500000000 -36.499591827 27.499591827 --1.500000000 -36.499591827 27.499591827 --0.500000000 -36.499591827 27.499591827 -0.500000000 -36.499591827 27.499591827 -1.500000000 -36.499591827 27.499591827 -2.500000000 -36.499591827 27.499591827 -3.500000000 -36.499591827 27.499591827 -4.500000000 -36.499591827 27.499591827 -5.500000000 -36.499591827 27.499591827 -6.500000000 -36.499591827 27.499591827 -7.500000000 -36.499591827 27.499591827 -8.500000000 -36.499591827 27.499591827 -9.500000000 -36.499591827 27.499591827 -10.500000000 -36.499591827 27.499591827 -11.500000000 -36.499591827 27.499591827 -12.500000000 -36.499591827 27.499591827 -13.500000000 -36.499591827 27.499591827 -14.500000000 -36.499591827 27.499591827 -15.500000000 -36.499591827 27.499591827 -16.500000000 -36.499591827 27.499591827 -17.500000000 -36.499591827 27.499591827 -18.500000000 -36.499591827 27.499591827 -19.500000000 -36.499591827 27.499591827 -20.500000000 -36.499591827 27.499591827 -21.500000000 -36.499591827 27.499591827 -22.500000000 -36.499591827 27.499591827 -23.500000000 -36.499591827 27.499591827 -24.500000000 -36.499591827 27.499591827 -25.499996185 -36.499591827 27.499591827 -26.499954224 -36.499546051 27.499591827 -27.499591827 -36.499164581 27.499591827 -28.497457504 -36.496910095 27.499576569 -29.488346100 -36.487373352 27.499475479 -30.458766937 -36.456939697 27.499073029 -31.383810043 -36.381027222 27.498052597 -32.231597900 -36.228958130 27.496377945 -32.977615356 -35.976860046 27.495168686 --35.163158417 -35.163158417 27.496557236 --34.617927551 -35.618598938 27.496026993 -33.617927551 -35.618602753 27.496023178 -34.163158417 -35.163158417 27.496557236 --35.976860046 -33.977615356 27.495168686 --35.618602753 -34.617927551 27.496026993 -34.618602753 -34.617927551 27.496026993 -34.976860046 -33.977619171 27.495168686 --36.228958130 -33.231597900 27.496377945 -35.228958130 -33.231597900 27.496377945 --36.381027222 -32.383811951 27.498052597 -35.381027222 -32.383811951 27.498052597 --36.456939697 -31.458766937 27.499073029 -35.456939697 -31.458766937 27.499073029 --36.487373352 -30.488346100 27.499475479 -35.487373352 -30.488346100 27.499475479 --36.496910095 -29.497461319 27.499576569 -35.496910095 -29.497461319 27.499576569 --36.499160767 -28.499593735 27.499591827 -35.499160767 -28.499593735 27.499591827 --36.499542236 -27.499954224 27.499591827 -35.499542236 -27.499954224 27.499591827 --36.499584198 -26.499996185 27.499591827 -35.499591827 -26.499996185 27.499591827 --36.499591827 -25.500000000 27.499591827 -35.499591827 -25.500000000 27.499591827 --36.499591827 -24.500000000 27.499591827 -35.499591827 -24.500000000 27.499591827 --36.499591827 -23.500000000 27.499591827 -35.499591827 -23.500000000 27.499591827 --36.499591827 -22.500000000 27.499591827 -35.499591827 -22.500000000 27.499591827 --36.499591827 -21.500000000 27.499591827 -35.499591827 -21.500000000 27.499591827 --36.499591827 -20.500000000 27.499591827 -35.499591827 -20.500000000 27.499591827 --36.499591827 -19.500000000 27.499591827 -35.499591827 -19.500000000 27.499591827 --36.499591827 -18.500000000 27.499591827 -35.499591827 -18.500000000 27.499591827 --36.499591827 -17.500000000 27.499591827 -35.499591827 -17.500000000 27.499591827 --36.499591827 -16.500000000 27.499591827 -35.499591827 -16.500000000 27.499591827 --36.499591827 -15.500000000 27.499591827 -35.499591827 -15.500000000 27.499591827 --36.499591827 -14.500000000 27.499591827 -35.499591827 -14.500000000 27.499591827 --36.499591827 -13.500000000 27.499591827 -35.499591827 -13.500000000 27.499591827 --36.499591827 -12.500000000 27.499591827 -35.499591827 -12.500000000 27.499591827 --36.499591827 -11.500000000 27.499591827 -35.499591827 -11.500000000 27.499591827 --36.499591827 -10.500000000 27.499591827 -35.499591827 -10.500000000 27.499591827 --36.499591827 -9.500000000 27.499591827 -35.499591827 -9.500000000 27.499591827 --36.499591827 -8.500000000 27.499591827 -35.499591827 -8.500000000 27.499591827 --36.499591827 -7.500000000 27.499591827 -35.499591827 -7.500000000 27.499591827 --36.499591827 -6.500000000 27.499591827 -35.499591827 -6.500000000 27.499591827 --36.499591827 -5.500000000 27.499591827 -35.499591827 -5.500000000 27.499591827 --36.499591827 -4.500000000 27.499591827 -35.499591827 -4.500000000 27.499591827 --36.499591827 -3.500000000 27.499591827 -35.499591827 -3.500000000 27.499591827 --36.499591827 -2.500000000 27.499591827 -35.499591827 -2.500000000 27.499591827 --36.499591827 -1.500000000 27.499591827 -35.499591827 -1.500000000 27.499591827 --36.499591827 -0.500000000 27.499591827 -35.499591827 -0.500000000 27.499591827 --36.499591827 0.500000000 27.499591827 -35.499591827 0.500000000 27.499591827 --36.499591827 1.500000000 27.499591827 -35.499591827 1.500000000 27.499591827 --36.499591827 2.500000000 27.499591827 -35.499591827 2.500000000 27.499591827 --36.499591827 3.500000000 27.499591827 -35.499591827 3.500000000 27.499591827 --36.499591827 4.500000000 27.499591827 -35.499591827 4.500000000 27.499591827 --36.499591827 5.500000000 27.499591827 -35.499591827 5.500000000 27.499591827 --36.499591827 6.500000000 27.499591827 -35.499591827 6.500000000 27.499591827 --36.499591827 7.500000000 27.499591827 -35.499591827 7.500000000 27.499591827 --36.499591827 8.500000000 27.499591827 -35.499591827 8.500000000 27.499591827 --36.499591827 9.500000000 27.499591827 -35.499591827 9.500000000 27.499591827 --36.499591827 10.500000000 27.499591827 -35.499591827 10.500000000 27.499591827 --36.499591827 11.500000000 27.499591827 -35.499591827 11.500000000 27.499591827 --36.499591827 12.500000000 27.499591827 -35.499591827 12.500000000 27.499591827 --36.499591827 13.500000000 27.499591827 -35.499591827 13.500000000 27.499591827 --36.499591827 14.500000000 27.499591827 -35.499591827 14.500000000 27.499591827 --36.499591827 15.500000000 27.499591827 -35.499591827 15.500000000 27.499591827 --36.499591827 16.500000000 27.499591827 -35.499591827 16.500000000 27.499591827 --36.499591827 17.500000000 27.499591827 -35.499591827 17.500000000 27.499591827 --36.499591827 18.500000000 27.499591827 -35.499591827 18.500000000 27.499591827 --36.499591827 19.500000000 27.499591827 -35.499591827 19.500000000 27.499591827 --36.499591827 20.500000000 27.499591827 -35.499591827 20.500000000 27.499591827 --36.499591827 21.500000000 27.499591827 -35.499591827 21.500000000 27.499591827 --36.499591827 22.500000000 27.499591827 -35.499591827 22.500000000 27.499591827 --36.499591827 23.500000000 27.499591827 -35.499591827 23.500000000 27.499591827 --36.499591827 24.500000000 27.499591827 -35.499591827 24.500000000 27.499591827 --36.499584198 25.499996185 27.499591827 -35.499591827 25.499996185 27.499591827 --36.499542236 26.499954224 27.499591827 -35.499546051 26.499954224 27.499591827 --36.499160767 27.499591827 27.499591827 -35.499164581 27.499591827 27.499591827 --36.496910095 28.497457504 27.499576569 -35.496910095 28.497457504 27.499576569 --36.487373352 29.488346100 27.499475479 -35.487373352 29.488346100 27.499475479 --36.456939697 30.458766937 27.499073029 -35.456939697 30.458766937 27.499073029 --36.381027222 31.383810043 27.498052597 -35.381027222 31.383810043 27.498052597 --36.228950500 32.231597900 27.496377945 -35.228958130 32.231597900 27.496377945 --35.976856232 32.977611542 27.495168686 --35.618598938 33.617927551 27.496026993 -34.618602753 33.617927551 27.496023178 -34.976860046 32.977611542 27.495168686 --35.163158417 34.163158417 27.496557236 --34.617927551 34.618602753 27.496026993 -33.617927551 34.618602753 27.496026993 -34.163158417 34.163158417 27.496557236 --33.977615356 34.976860046 27.495168686 --33.231597900 35.228958130 27.496377945 --32.383811951 35.381027222 27.498052597 --31.458766937 35.456939697 27.499073029 --30.488346100 35.487373352 27.499475479 --29.497461319 35.496910095 27.499576569 --28.499593735 35.499160767 27.499591827 --27.499954224 35.499542236 27.499591827 --26.499996185 35.499584198 27.499591827 --25.500000000 35.499591827 27.499591827 --24.500000000 35.499591827 27.499591827 --23.500000000 35.499591827 27.499591827 --22.500000000 35.499591827 27.499591827 --21.500000000 35.499591827 27.499591827 --20.500000000 35.499591827 27.499591827 --19.500000000 35.499591827 27.499591827 --18.500000000 35.499591827 27.499591827 --17.500000000 35.499591827 27.499591827 --16.500000000 35.499591827 27.499591827 --15.500000000 35.499591827 27.499591827 --14.500000000 35.499591827 27.499591827 --13.500000000 35.499591827 27.499591827 --12.500000000 35.499591827 27.499591827 --11.500000000 35.499591827 27.499591827 --10.500000000 35.499591827 27.499591827 --9.500000000 35.499591827 27.499591827 --8.500000000 35.499591827 27.499591827 --7.500000000 35.499591827 27.499591827 --6.500000000 35.499591827 27.499591827 --5.500000000 35.499591827 27.499591827 --4.500000000 35.499591827 27.499591827 --3.500000000 35.499591827 27.499591827 --2.500000000 35.499591827 27.499591827 --1.500000000 35.499591827 27.499591827 --0.500000000 35.499591827 27.499591827 -0.500000000 35.499591827 27.499591827 -1.500000000 35.499591827 27.499591827 -2.500000000 35.499591827 27.499591827 -3.500000000 35.499591827 27.499591827 -4.500000000 35.499591827 27.499591827 -5.500000000 35.499591827 27.499591827 -6.500000000 35.499591827 27.499591827 -7.500000000 35.499591827 27.499591827 -8.500000000 35.499591827 27.499591827 -9.500000000 35.499591827 27.499591827 -10.500000000 35.499591827 27.499591827 -11.500000000 35.499591827 27.499591827 -12.500000000 35.499591827 27.499591827 -13.500000000 35.499591827 27.499591827 -14.500000000 35.499591827 27.499591827 -15.500000000 35.499591827 27.499591827 -16.500000000 35.499591827 27.499591827 -17.500000000 35.499591827 27.499591827 -18.500000000 35.499591827 27.499591827 -19.500000000 35.499591827 27.499591827 -20.500000000 35.499591827 27.499591827 -21.500000000 35.499591827 27.499591827 -22.500000000 35.499591827 27.499591827 -23.500000000 35.499591827 27.499591827 -24.500000000 35.499591827 27.499591827 -25.499996185 35.499584198 27.499591827 -26.499954224 35.499542236 27.499591827 -27.499591827 35.499160767 27.499591827 -28.497457504 35.496910095 27.499576569 -29.488346100 35.487373352 27.499475479 -30.458766937 35.456939697 27.499073029 -31.383810043 35.381027222 27.498052597 -32.231597900 35.228958130 27.496377945 -32.977615356 34.976860046 27.495168686 --33.968864441 -35.965759277 28.481937408 --33.227870941 -36.217510223 28.486719131 --32.382308960 -36.371490479 28.492321014 --31.458078384 -36.450428009 28.495611191 --30.488048553 -36.483341217 28.496957779 --29.497375488 -36.494178772 28.497371674 --28.499578476 -36.496910095 28.497461319 --27.499954224 -36.497402191 28.497470856 --26.499996185 -36.497467041 28.497470856 --25.500000000 -36.497470856 28.497470856 --24.500000000 -36.497470856 28.497470856 --23.500000000 -36.497470856 28.497470856 --22.500000000 -36.497470856 28.497470856 --21.500000000 -36.497470856 28.497470856 --20.500000000 -36.497470856 28.497470856 --19.500000000 -36.497470856 28.497470856 --18.500000000 -36.497470856 28.497470856 --17.500000000 -36.497470856 28.497470856 --16.500000000 -36.497470856 28.497470856 --15.500000000 -36.497470856 28.497470856 --14.500000000 -36.497470856 28.497470856 --13.500000000 -36.497470856 28.497470856 --12.500000000 -36.497470856 28.497470856 --11.500000000 -36.497470856 28.497470856 --10.500000000 -36.497470856 28.497470856 --9.500000000 -36.497470856 28.497470856 --8.500000000 -36.497470856 28.497470856 --7.500000000 -36.497470856 28.497470856 --6.500000000 -36.497470856 28.497470856 --5.500000000 -36.497470856 28.497470856 --4.500000000 -36.497470856 28.497470856 --3.500000000 -36.497470856 28.497470856 --2.500000000 -36.497470856 28.497470856 --1.500000000 -36.497470856 28.497470856 --0.500000000 -36.497470856 28.497470856 -0.500000000 -36.497470856 28.497470856 -1.500000000 -36.497470856 28.497470856 -2.500000000 -36.497470856 28.497470856 -3.500000000 -36.497470856 28.497470856 -4.500000000 -36.497470856 28.497470856 -5.500000000 -36.497470856 28.497470856 -6.500000000 -36.497470856 28.497470856 -7.500000000 -36.497470856 28.497470856 -8.500000000 -36.497470856 28.497470856 -9.500000000 -36.497470856 28.497470856 -10.500000000 -36.497470856 28.497470856 -11.500000000 -36.497470856 28.497470856 -12.500000000 -36.497470856 28.497470856 -13.500000000 -36.497470856 28.497470856 -14.500000000 -36.497470856 28.497470856 -15.500000000 -36.497470856 28.497470856 -16.500000000 -36.497470856 28.497470856 -17.500000000 -36.497470856 28.497470856 -18.500000000 -36.497470856 28.497470856 -19.500000000 -36.497470856 28.497470856 -20.500000000 -36.497470856 28.497470856 -21.500000000 -36.497470856 28.497470856 -22.500000000 -36.497470856 28.497470856 -23.500000000 -36.497470856 28.497470856 -24.500000000 -36.497470856 28.497470856 -25.499996185 -36.497467041 28.497470856 -26.499954224 -36.497406006 28.497470856 -27.499576569 -36.496910095 28.497457504 -28.497371674 -36.494174957 28.497375488 -29.488048553 -36.483337402 28.496957779 -30.458078384 -36.450428009 28.495611191 -31.382312775 -36.371490479 28.492321014 -32.227874756 -36.217517853 28.486719131 -32.968864441 -35.965766907 28.481933594 --35.150249481 -35.150249481 28.487289429 --34.604705811 -35.607204437 28.485630035 -33.604705811 -35.607208252 28.485630035 -34.150249481 -35.150249481 28.487289429 --35.965766907 -33.968864441 28.481937408 --35.607204437 -34.604705811 28.485631943 -34.607204437 -34.604705811 28.485631943 -34.965766907 -33.968868256 28.481937408 --36.217517853 -33.227874756 28.486719131 -35.217517853 -33.227878571 28.486719131 --36.371490479 -32.382312775 28.492321014 -35.371490479 -32.382308960 28.492321014 --36.450428009 -31.458078384 28.495611191 -35.450428009 -31.458078384 28.495611191 --36.483337402 -30.488048553 28.496957779 -35.483341217 -30.488048553 28.496957779 --36.494178772 -29.497375488 28.497371674 -35.494178772 -29.497375488 28.497371674 --36.496910095 -28.499578476 28.497461319 -35.496910095 -28.499578476 28.497461319 --36.497406006 -27.499954224 28.497470856 -35.497402191 -27.499954224 28.497470856 --36.497467041 -26.499996185 28.497470856 -35.497467041 -26.499996185 28.497470856 --36.497467041 -25.500000000 28.497470856 -35.497470856 -25.500000000 28.497470856 --36.497467041 -24.500000000 28.497470856 -35.497470856 -24.500000000 28.497470856 --36.497467041 -23.500000000 28.497470856 -35.497470856 -23.500000000 28.497470856 --36.497467041 -22.500000000 28.497470856 -35.497470856 -22.500000000 28.497470856 --36.497467041 -21.500000000 28.497470856 -35.497470856 -21.500000000 28.497470856 --36.497467041 -20.500000000 28.497470856 -35.497470856 -20.500000000 28.497470856 --36.497467041 -19.500000000 28.497470856 -35.497470856 -19.500000000 28.497470856 --36.497467041 -18.500000000 28.497470856 -35.497470856 -18.500000000 28.497470856 --36.497467041 -17.500000000 28.497470856 -35.497470856 -17.500000000 28.497470856 --36.497467041 -16.500000000 28.497470856 -35.497470856 -16.500000000 28.497470856 --36.497467041 -15.500000000 28.497470856 -35.497470856 -15.500000000 28.497470856 --36.497467041 -14.500000000 28.497470856 -35.497470856 -14.500000000 28.497470856 --36.497467041 -13.500000000 28.497470856 -35.497470856 -13.500000000 28.497470856 --36.497467041 -12.500000000 28.497470856 -35.497470856 -12.500000000 28.497470856 --36.497467041 -11.500000000 28.497470856 -35.497470856 -11.500000000 28.497470856 --36.497467041 -10.500000000 28.497470856 -35.497470856 -10.500000000 28.497470856 --36.497467041 -9.500000000 28.497470856 -35.497470856 -9.500000000 28.497470856 --36.497467041 -8.500000000 28.497470856 -35.497470856 -8.500000000 28.497470856 --36.497467041 -7.500000000 28.497470856 -35.497470856 -7.500000000 28.497470856 --36.497467041 -6.500000000 28.497470856 -35.497470856 -6.500000000 28.497470856 --36.497467041 -5.500000000 28.497470856 -35.497470856 -5.500000000 28.497470856 --36.497467041 -4.500000000 28.497470856 -35.497470856 -4.500000000 28.497470856 --36.497467041 -3.500000000 28.497470856 -35.497470856 -3.500000000 28.497470856 --36.497467041 -2.500000000 28.497470856 -35.497470856 -2.500000000 28.497470856 --36.497467041 -1.500000000 28.497470856 -35.497470856 -1.500000000 28.497470856 --36.497467041 -0.500000000 28.497470856 -35.497470856 -0.500000000 28.497470856 --36.497467041 0.500000000 28.497470856 -35.497470856 0.500000000 28.497470856 --36.497467041 1.500000000 28.497470856 -35.497470856 1.500000000 28.497470856 --36.497467041 2.500000000 28.497470856 -35.497470856 2.500000000 28.497470856 --36.497467041 3.500000000 28.497470856 -35.497470856 3.500000000 28.497470856 --36.497467041 4.500000000 28.497470856 -35.497470856 4.500000000 28.497470856 --36.497467041 5.500000000 28.497470856 -35.497470856 5.500000000 28.497470856 --36.497467041 6.500000000 28.497470856 -35.497470856 6.500000000 28.497470856 --36.497467041 7.500000000 28.497470856 -35.497470856 7.500000000 28.497470856 --36.497467041 8.500000000 28.497470856 -35.497470856 8.500000000 28.497470856 --36.497467041 9.500000000 28.497470856 -35.497470856 9.500000000 28.497470856 --36.497467041 10.500000000 28.497470856 -35.497470856 10.500000000 28.497470856 --36.497467041 11.500000000 28.497470856 -35.497470856 11.500000000 28.497470856 --36.497467041 12.500000000 28.497470856 -35.497470856 12.500000000 28.497470856 --36.497467041 13.500000000 28.497470856 -35.497470856 13.500000000 28.497470856 --36.497467041 14.500000000 28.497470856 -35.497470856 14.500000000 28.497470856 --36.497467041 15.500000000 28.497470856 -35.497470856 15.500000000 28.497470856 --36.497467041 16.500000000 28.497470856 -35.497470856 16.500000000 28.497470856 --36.497467041 17.500000000 28.497470856 -35.497470856 17.500000000 28.497470856 --36.497467041 18.500000000 28.497470856 -35.497470856 18.500000000 28.497470856 --36.497467041 19.500000000 28.497470856 -35.497470856 19.500000000 28.497470856 --36.497467041 20.500000000 28.497470856 -35.497470856 20.500000000 28.497470856 --36.497467041 21.500000000 28.497470856 -35.497470856 21.500000000 28.497470856 --36.497467041 22.500000000 28.497470856 -35.497470856 22.500000000 28.497470856 --36.497467041 23.500000000 28.497470856 -35.497470856 23.500000000 28.497470856 --36.497467041 24.500000000 28.497470856 -35.497470856 24.500000000 28.497470856 --36.497467041 25.499996185 28.497470856 -35.497467041 25.499996185 28.497470856 --36.497406006 26.499954224 28.497470856 -35.497406006 26.499954224 28.497470856 --36.496910095 27.499576569 28.497457504 -35.496910095 27.499576569 28.497457504 --36.494178772 28.497375488 28.497371674 -35.494174957 28.497371674 28.497375488 --36.483337402 29.488048553 28.496957779 -35.483337402 29.488048553 28.496957779 --36.450428009 30.458078384 28.495609283 -35.450428009 30.458078384 28.495611191 --36.371490479 31.382312775 28.492321014 -35.371490479 31.382312775 28.492321014 --36.217510223 32.227870941 28.486719131 -35.217510223 32.227870941 28.486719131 --35.965759277 32.968864441 28.481937408 --35.607204437 33.604705811 28.485630035 -34.607204437 33.604705811 28.485630035 -34.965759277 32.968864441 28.481937408 --35.150249481 34.150249481 28.487289429 --34.604705811 34.607204437 28.485631943 -33.604705811 34.607204437 28.485631943 -34.150249481 34.150249481 28.487293243 --33.968864441 34.965766907 28.481937408 --33.227874756 35.217517853 28.486719131 --32.382312775 35.371490479 28.492321014 --31.458078384 35.450428009 28.495611191 --30.488048553 35.483337402 28.496957779 --29.497375488 35.494178772 28.497371674 --28.499578476 35.496910095 28.497461319 --27.499954224 35.497406006 28.497470856 --26.499996185 35.497467041 28.497470856 --25.500000000 35.497467041 28.497470856 --24.500000000 35.497467041 28.497470856 --23.500000000 35.497467041 28.497470856 --22.500000000 35.497467041 28.497470856 --21.500000000 35.497467041 28.497470856 --20.500000000 35.497467041 28.497470856 --19.500000000 35.497467041 28.497470856 --18.500000000 35.497467041 28.497470856 --17.500000000 35.497467041 28.497470856 --16.500000000 35.497467041 28.497470856 --15.500000000 35.497467041 28.497470856 --14.500000000 35.497467041 28.497470856 --13.500000000 35.497467041 28.497470856 --12.500000000 35.497467041 28.497470856 --11.500000000 35.497467041 28.497470856 --10.500000000 35.497467041 28.497470856 --9.500000000 35.497467041 28.497470856 --8.500000000 35.497467041 28.497470856 --7.500000000 35.497467041 28.497470856 --6.500000000 35.497467041 28.497470856 --5.500000000 35.497467041 28.497470856 --4.500000000 35.497467041 28.497470856 --3.500000000 35.497467041 28.497470856 --2.500000000 35.497467041 28.497470856 --1.500000000 35.497467041 28.497470856 --0.500000000 35.497467041 28.497470856 -0.500000000 35.497467041 28.497470856 -1.500000000 35.497467041 28.497470856 -2.500000000 35.497467041 28.497470856 -3.500000000 35.497467041 28.497470856 -4.500000000 35.497467041 28.497470856 -5.500000000 35.497467041 28.497470856 -6.500000000 35.497467041 28.497470856 -7.500000000 35.497467041 28.497470856 -8.500000000 35.497467041 28.497470856 -9.500000000 35.497467041 28.497470856 -10.500000000 35.497467041 28.497470856 -11.500000000 35.497467041 28.497470856 -12.500000000 35.497467041 28.497470856 -13.500000000 35.497467041 28.497470856 -14.500000000 35.497467041 28.497470856 -15.500000000 35.497467041 28.497470856 -16.500000000 35.497467041 28.497470856 -17.500000000 35.497467041 28.497470856 -18.500000000 35.497467041 28.497470856 -19.500000000 35.497467041 28.497470856 -20.500000000 35.497467041 28.497470856 -21.500000000 35.497467041 28.497470856 -22.500000000 35.497467041 28.497470856 -23.500000000 35.497467041 28.497470856 -24.500000000 35.497467041 28.497470856 -25.499996185 35.497467041 28.497470856 -26.499954224 35.497406006 28.497470856 -27.499576569 35.496910095 28.497457504 -28.497375488 35.494178772 28.497371674 -29.488048553 35.483337402 28.496957779 -30.458078384 35.450428009 28.495609283 -31.382308960 35.371490479 28.492321014 -32.227870941 35.217517853 28.486719131 -32.968864441 34.965766907 28.481937408 --33.946807861 -35.937091827 29.442840576 --33.220714569 -36.185966492 29.460592270 --32.379219055 -36.344280243 29.476003647 --31.456085205 -36.430480957 29.483789444 --30.486968994 -36.469238281 29.486968994 --29.496959686 -36.483337402 29.488048553 --28.499475479 -36.487373352 29.488346100 --27.499938965 -36.488258362 29.488399506 --26.499996185 -36.488391876 29.488407135 --25.500000000 -36.488403320 29.488407135 --24.500000000 -36.488407135 29.488407135 --23.500000000 -36.488407135 29.488407135 --22.500000000 -36.488407135 29.488407135 --21.500000000 -36.488407135 29.488407135 --20.500000000 -36.488407135 29.488407135 --19.500000000 -36.488407135 29.488407135 --18.500000000 -36.488407135 29.488407135 --17.500000000 -36.488407135 29.488407135 --16.500000000 -36.488407135 29.488407135 --15.500000000 -36.488407135 29.488407135 --14.500000000 -36.488407135 29.488407135 --13.500000000 -36.488407135 29.488407135 --12.500000000 -36.488407135 29.488407135 --11.500000000 -36.488407135 29.488407135 --10.500000000 -36.488407135 29.488407135 --9.500000000 -36.488407135 29.488407135 --8.500000000 -36.488407135 29.488407135 --7.500000000 -36.488407135 29.488407135 --6.500000000 -36.488407135 29.488407135 --5.500000000 -36.488407135 29.488407135 --4.500000000 -36.488407135 29.488407135 --3.500000000 -36.488407135 29.488407135 --2.500000000 -36.488407135 29.488407135 --1.500000000 -36.488407135 29.488407135 --0.500000000 -36.488407135 29.488407135 -0.500000000 -36.488407135 29.488407135 -1.500000000 -36.488407135 29.488407135 -2.500000000 -36.488407135 29.488407135 -3.500000000 -36.488407135 29.488407135 -4.500000000 -36.488407135 29.488407135 -5.500000000 -36.488407135 29.488407135 -6.500000000 -36.488407135 29.488407135 -7.500000000 -36.488407135 29.488407135 -8.500000000 -36.488407135 29.488407135 -9.500000000 -36.488407135 29.488407135 -10.500000000 -36.488407135 29.488407135 -11.500000000 -36.488407135 29.488407135 -12.500000000 -36.488407135 29.488407135 -13.500000000 -36.488407135 29.488407135 -14.500000000 -36.488407135 29.488407135 -15.500000000 -36.488407135 29.488407135 -16.500000000 -36.488407135 29.488407135 -17.500000000 -36.488407135 29.488407135 -18.500000000 -36.488407135 29.488407135 -19.500000000 -36.488407135 29.488407135 -20.500000000 -36.488407135 29.488407135 -21.500000000 -36.488407135 29.488407135 -22.500000000 -36.488407135 29.488407135 -23.500000000 -36.488407135 29.488407135 -24.500000000 -36.488407135 29.488407135 -25.499996185 -36.488391876 29.488407135 -26.499938965 -36.488258362 29.488399506 -27.499475479 -36.487373352 29.488346100 -28.496959686 -36.483337402 29.488048553 -29.486968994 -36.469238281 29.486965179 -30.456085205 -36.430480957 29.483789444 -31.379222870 -36.344280243 29.476003647 -32.220714569 -36.185966492 29.460596085 -32.946807861 -35.937091827 29.442840576 --35.115642548 -35.115642548 29.463253021 --34.567741394 -35.577739716 29.459178925 -33.567741394 -35.577739716 29.459178925 -34.115642548 -35.115642548 29.463253021 --35.937091827 -33.946807861 29.442840576 --35.577739716 -34.567741394 29.459178925 -34.577739716 -34.567749023 29.459177017 -34.937091827 -33.946807861 29.442840576 --36.185966492 -33.220714569 29.460592270 -35.185966492 -33.220718384 29.460592270 --36.344280243 -32.379222870 29.476003647 -35.344280243 -32.379222870 29.476003647 --36.430480957 -31.456085205 29.483789444 -35.430480957 -31.456085205 29.483789444 --36.469230652 -30.486968994 29.486965179 -35.469238281 -30.486968994 29.486968994 --36.483337402 -29.496959686 29.488052368 -35.483337402 -29.496959686 29.488048553 --36.487373352 -28.499475479 29.488346100 -35.487373352 -28.499475479 29.488346100 --36.488258362 -27.499938965 29.488399506 -35.488258362 -27.499938965 29.488399506 --36.488391876 -26.499996185 29.488407135 -35.488391876 -26.499996185 29.488407135 --36.488403320 -25.500000000 29.488407135 -35.488403320 -25.500000000 29.488407135 --36.488403320 -24.500000000 29.488407135 -35.488407135 -24.500000000 29.488407135 --36.488403320 -23.500000000 29.488407135 -35.488407135 -23.500000000 29.488407135 --36.488403320 -22.500000000 29.488407135 -35.488407135 -22.500000000 29.488407135 --36.488403320 -21.500000000 29.488407135 -35.488407135 -21.500000000 29.488407135 --36.488403320 -20.500000000 29.488407135 -35.488407135 -20.500000000 29.488407135 --36.488403320 -19.500000000 29.488407135 -35.488407135 -19.500000000 29.488407135 --36.488403320 -18.500000000 29.488407135 -35.488407135 -18.500000000 29.488407135 --36.488403320 -17.500000000 29.488407135 -35.488407135 -17.500000000 29.488407135 --36.488403320 -16.500000000 29.488407135 -35.488407135 -16.500000000 29.488407135 --36.488403320 -15.500000000 29.488407135 -35.488407135 -15.500000000 29.488407135 --36.488403320 -14.500000000 29.488407135 -35.488407135 -14.500000000 29.488407135 --36.488403320 -13.500000000 29.488407135 -35.488407135 -13.500000000 29.488407135 --36.488403320 -12.500000000 29.488407135 -35.488407135 -12.500000000 29.488407135 --36.488403320 -11.500000000 29.488407135 -35.488407135 -11.500000000 29.488407135 --36.488403320 -10.500000000 29.488407135 -35.488407135 -10.500000000 29.488407135 --36.488403320 -9.500000000 29.488407135 -35.488407135 -9.500000000 29.488407135 --36.488403320 -8.500000000 29.488407135 -35.488407135 -8.500000000 29.488407135 --36.488403320 -7.500000000 29.488407135 -35.488407135 -7.500000000 29.488407135 --36.488403320 -6.500000000 29.488407135 -35.488407135 -6.500000000 29.488407135 --36.488403320 -5.500000000 29.488407135 -35.488407135 -5.500000000 29.488407135 --36.488403320 -4.500000000 29.488407135 -35.488407135 -4.500000000 29.488407135 --36.488403320 -3.500000000 29.488407135 -35.488407135 -3.500000000 29.488407135 --36.488403320 -2.500000000 29.488407135 -35.488407135 -2.500000000 29.488407135 --36.488403320 -1.500000000 29.488407135 -35.488407135 -1.500000000 29.488407135 --36.488403320 -0.500000000 29.488407135 -35.488407135 -0.500000000 29.488407135 --36.488403320 0.500000000 29.488407135 -35.488407135 0.500000000 29.488407135 --36.488403320 1.500000000 29.488407135 -35.488407135 1.500000000 29.488407135 --36.488403320 2.500000000 29.488407135 -35.488407135 2.500000000 29.488407135 --36.488403320 3.500000000 29.488407135 -35.488407135 3.500000000 29.488407135 --36.488403320 4.500000000 29.488407135 -35.488407135 4.500000000 29.488407135 --36.488403320 5.500000000 29.488407135 -35.488407135 5.500000000 29.488407135 --36.488403320 6.500000000 29.488407135 -35.488407135 6.500000000 29.488407135 --36.488403320 7.500000000 29.488407135 -35.488407135 7.500000000 29.488407135 --36.488403320 8.500000000 29.488407135 -35.488407135 8.500000000 29.488407135 --36.488403320 9.500000000 29.488407135 -35.488407135 9.500000000 29.488407135 --36.488403320 10.500000000 29.488407135 -35.488407135 10.500000000 29.488407135 --36.488403320 11.500000000 29.488407135 -35.488407135 11.500000000 29.488407135 --36.488403320 12.500000000 29.488407135 -35.488407135 12.500000000 29.488407135 --36.488403320 13.500000000 29.488407135 -35.488407135 13.500000000 29.488407135 --36.488403320 14.500000000 29.488407135 -35.488407135 14.500000000 29.488407135 --36.488403320 15.500000000 29.488407135 -35.488407135 15.500000000 29.488407135 --36.488403320 16.500000000 29.488407135 -35.488407135 16.500000000 29.488407135 --36.488403320 17.500000000 29.488407135 -35.488407135 17.500000000 29.488407135 --36.488403320 18.500000000 29.488407135 -35.488407135 18.500000000 29.488407135 --36.488403320 19.500000000 29.488407135 -35.488407135 19.500000000 29.488407135 --36.488403320 20.500000000 29.488407135 -35.488407135 20.500000000 29.488407135 --36.488403320 21.500000000 29.488407135 -35.488407135 21.500000000 29.488407135 --36.488403320 22.500000000 29.488407135 -35.488407135 22.500000000 29.488407135 --36.488403320 23.500000000 29.488407135 -35.488407135 23.500000000 29.488407135 --36.488403320 24.500000000 29.488407135 -35.488407135 24.500000000 29.488407135 --36.488391876 25.499996185 29.488407135 -35.488391876 25.499996185 29.488407135 --36.488258362 26.499938965 29.488399506 -35.488258362 26.499938965 29.488399506 --36.487377167 27.499475479 29.488348007 -35.487373352 27.499475479 29.488346100 --36.483337402 28.496957779 29.488052368 -35.483337402 28.496959686 29.488048553 --36.469238281 29.486965179 29.486968994 -35.469238281 29.486968994 29.486965179 --36.430473328 30.456085205 29.483785629 -35.430480957 30.456085205 29.483789444 --36.344280243 31.379220963 29.476003647 -35.344280243 31.379222870 29.476003647 --36.185966492 32.220710754 29.460596085 -35.185966492 32.220714569 29.460596085 --35.937091827 32.946807861 29.442840576 --35.577739716 33.567741394 29.459178925 -34.577739716 33.567741394 29.459178925 -34.937091827 32.946807861 29.442840576 --35.115642548 34.115642548 29.463253021 --34.567741394 34.577739716 29.459178925 -33.567741394 34.577739716 29.459178925 -34.115642548 34.115642548 29.463256836 --33.946807861 34.937091827 29.442840576 --33.220714569 35.185966492 29.460592270 --32.379222870 35.344280243 29.476003647 --31.456085205 35.430480957 29.483789444 --30.486968994 35.469230652 29.486965179 --29.496959686 35.483337402 29.488052368 --28.499475479 35.487373352 29.488346100 --27.499938965 35.488258362 29.488399506 --26.499996185 35.488391876 29.488407135 --25.500000000 35.488403320 29.488407135 --24.500000000 35.488403320 29.488407135 --23.500000000 35.488403320 29.488407135 --22.500000000 35.488403320 29.488407135 --21.500000000 35.488403320 29.488407135 --20.500000000 35.488403320 29.488407135 --19.500000000 35.488403320 29.488407135 --18.500000000 35.488403320 29.488407135 --17.500000000 35.488403320 29.488407135 --16.500000000 35.488403320 29.488407135 --15.500000000 35.488403320 29.488407135 --14.500000000 35.488403320 29.488407135 --13.500000000 35.488403320 29.488407135 --12.500000000 35.488403320 29.488407135 --11.500000000 35.488403320 29.488407135 --10.500000000 35.488403320 29.488407135 --9.500000000 35.488403320 29.488407135 --8.500000000 35.488403320 29.488407135 --7.500000000 35.488403320 29.488407135 --6.500000000 35.488403320 29.488407135 --5.500000000 35.488403320 29.488407135 --4.500000000 35.488403320 29.488407135 --3.500000000 35.488403320 29.488407135 --2.500000000 35.488403320 29.488407135 --1.500000000 35.488403320 29.488407135 --0.500000000 35.488403320 29.488407135 -0.500000000 35.488403320 29.488407135 -1.500000000 35.488403320 29.488407135 -2.500000000 35.488403320 29.488407135 -3.500000000 35.488403320 29.488407135 -4.500000000 35.488403320 29.488407135 -5.500000000 35.488403320 29.488407135 -6.500000000 35.488403320 29.488407135 -7.500000000 35.488403320 29.488407135 -8.500000000 35.488403320 29.488407135 -9.500000000 35.488403320 29.488407135 -10.500000000 35.488403320 29.488407135 -11.500000000 35.488403320 29.488407135 -12.500000000 35.488403320 29.488407135 -13.500000000 35.488403320 29.488407135 -14.500000000 35.488403320 29.488407135 -15.500000000 35.488403320 29.488407135 -16.500000000 35.488403320 29.488407135 -17.500000000 35.488403320 29.488407135 -18.500000000 35.488403320 29.488407135 -19.500000000 35.488403320 29.488407135 -20.500000000 35.488403320 29.488407135 -21.500000000 35.488403320 29.488407135 -22.500000000 35.488403320 29.488407135 -23.500000000 35.488403320 29.488407135 -24.500000000 35.488403320 29.488407135 -25.499996185 35.488391876 29.488407135 -26.499938965 35.488258362 29.488399506 -27.499475479 35.487377167 29.488348007 -28.496957779 35.483337402 29.488052368 -29.486965179 35.469238281 29.486968994 -30.456085205 35.430473328 29.483785629 -31.379220963 35.344280243 29.476003647 -32.220710754 35.185966492 29.460596085 -32.946807861 34.937091827 29.442840576 --33.903377533 -35.879737854 30.336599350 --33.216838837 -36.112342834 30.405670166 --32.375358582 -36.280395508 30.438953400 --31.451217651 -36.380989075 30.451217651 --30.483789444 -36.430473328 30.456085205 --29.495611191 -36.450424194 30.458078384 --28.499073029 -36.456939697 30.458770752 --27.499858856 -36.458606720 30.458950043 --26.499988556 -36.458930969 30.458980560 --25.500000000 -36.458976746 30.458980560 --24.500000000 -36.458976746 30.458980560 --23.500000000 -36.458976746 30.458980560 --22.500000000 -36.458976746 30.458980560 --21.500000000 -36.458976746 30.458980560 --20.500000000 -36.458976746 30.458980560 --19.500000000 -36.458976746 30.458980560 --18.500000000 -36.458976746 30.458980560 --17.500000000 -36.458976746 30.458980560 --16.500000000 -36.458976746 30.458980560 --15.500000000 -36.458976746 30.458980560 --14.500000000 -36.458976746 30.458980560 --13.500000000 -36.458976746 30.458980560 --12.500000000 -36.458976746 30.458980560 --11.500000000 -36.458976746 30.458980560 --10.500000000 -36.458976746 30.458980560 --9.500000000 -36.458976746 30.458980560 --8.500000000 -36.458976746 30.458980560 --7.500000000 -36.458976746 30.458980560 --6.500000000 -36.458976746 30.458980560 --5.500000000 -36.458976746 30.458980560 --4.500000000 -36.458976746 30.458980560 --3.500000000 -36.458976746 30.458980560 --2.500000000 -36.458976746 30.458980560 --1.500000000 -36.458976746 30.458980560 --0.500000000 -36.458976746 30.458980560 -0.500000000 -36.458976746 30.458980560 -1.500000000 -36.458976746 30.458980560 -2.500000000 -36.458976746 30.458980560 -3.500000000 -36.458976746 30.458980560 -4.500000000 -36.458976746 30.458980560 -5.500000000 -36.458976746 30.458980560 -6.500000000 -36.458976746 30.458980560 -7.500000000 -36.458976746 30.458980560 -8.500000000 -36.458976746 30.458980560 -9.500000000 -36.458976746 30.458980560 -10.500000000 -36.458976746 30.458980560 -11.500000000 -36.458976746 30.458980560 -12.500000000 -36.458976746 30.458980560 -13.500000000 -36.458976746 30.458980560 -14.500000000 -36.458976746 30.458980560 -15.500000000 -36.458976746 30.458980560 -16.500000000 -36.458976746 30.458980560 -17.500000000 -36.458976746 30.458980560 -18.500000000 -36.458976746 30.458980560 -19.500000000 -36.458976746 30.458980560 -20.500000000 -36.458976746 30.458980560 -21.500000000 -36.458976746 30.458980560 -22.500000000 -36.458976746 30.458980560 -23.500000000 -36.458976746 30.458980560 -24.500000000 -36.458972931 30.458980560 -25.499988556 -36.458930969 30.458978653 -26.499858856 -36.458606720 30.458948135 -27.499073029 -36.456939697 30.458770752 -28.495611191 -36.450424194 30.458080292 -29.483789444 -36.430473328 30.456085205 -30.451217651 -36.380989075 30.451217651 -31.375356674 -36.280395508 30.438949585 -32.216838837 -36.112342834 30.405673981 -32.903377533 -35.879737854 30.336599350 --35.040893555 -35.040893555 30.413049698 --34.477191925 -35.518615723 30.410833359 -33.477191925 -35.518619537 30.410833359 -34.040893555 -35.040897369 30.413049698 --35.879737854 -33.903377533 30.336599350 --35.518615723 -34.477191925 30.410833359 -34.518619537 -34.477191925 30.410833359 -34.879737854 -33.903381348 30.336603165 --36.112342834 -33.216838837 30.405670166 -35.112342834 -33.216838837 30.405670166 --36.280395508 -32.375358582 30.438953400 -35.280395508 -32.375358582 30.438953400 --36.380989075 -31.451217651 30.451217651 -35.380989075 -31.451217651 30.451217651 --36.430473328 -30.483789444 30.456085205 -35.430473328 -30.483789444 30.456085205 --36.450424194 -29.495611191 30.458078384 -35.450424194 -29.495611191 30.458078384 --36.456939697 -28.499073029 30.458770752 -35.456939697 -28.499073029 30.458770752 --36.458606720 -27.499858856 30.458950043 -35.458606720 -27.499858856 30.458950043 --36.458930969 -26.499988556 30.458980560 -35.458930969 -26.499988556 30.458980560 --36.458976746 -25.500000000 30.458980560 -35.458976746 -25.500000000 30.458980560 --36.458980560 -24.500000000 30.458980560 -35.458976746 -24.500000000 30.458980560 --36.458980560 -23.500000000 30.458980560 -35.458976746 -23.500000000 30.458980560 --36.458980560 -22.500000000 30.458980560 -35.458976746 -22.500000000 30.458980560 --36.458980560 -21.500000000 30.458980560 -35.458976746 -21.500000000 30.458980560 --36.458980560 -20.500000000 30.458980560 -35.458976746 -20.500000000 30.458980560 --36.458980560 -19.500000000 30.458980560 -35.458976746 -19.500000000 30.458980560 --36.458980560 -18.500000000 30.458980560 -35.458976746 -18.500000000 30.458980560 --36.458980560 -17.500000000 30.458980560 -35.458976746 -17.500000000 30.458980560 --36.458980560 -16.500000000 30.458980560 -35.458976746 -16.500000000 30.458980560 --36.458980560 -15.500000000 30.458980560 -35.458976746 -15.500000000 30.458980560 --36.458980560 -14.500000000 30.458980560 -35.458976746 -14.500000000 30.458980560 --36.458980560 -13.500000000 30.458980560 -35.458976746 -13.500000000 30.458980560 --36.458980560 -12.500000000 30.458980560 -35.458976746 -12.500000000 30.458980560 --36.458980560 -11.500000000 30.458980560 -35.458976746 -11.500000000 30.458980560 --36.458980560 -10.500000000 30.458980560 -35.458976746 -10.500000000 30.458980560 --36.458980560 -9.500000000 30.458980560 -35.458976746 -9.500000000 30.458980560 --36.458980560 -8.500000000 30.458980560 -35.458976746 -8.500000000 30.458980560 --36.458980560 -7.500000000 30.458980560 -35.458976746 -7.500000000 30.458980560 --36.458980560 -6.500000000 30.458980560 -35.458976746 -6.500000000 30.458980560 --36.458980560 -5.500000000 30.458980560 -35.458976746 -5.500000000 30.458980560 --36.458980560 -4.500000000 30.458980560 -35.458976746 -4.500000000 30.458980560 --36.458980560 -3.500000000 30.458980560 -35.458976746 -3.500000000 30.458980560 --36.458980560 -2.500000000 30.458980560 -35.458976746 -2.500000000 30.458980560 --36.458980560 -1.500000000 30.458980560 -35.458976746 -1.500000000 30.458980560 --36.458980560 -0.500000000 30.458980560 -35.458976746 -0.500000000 30.458980560 --36.458980560 0.500000000 30.458980560 -35.458976746 0.500000000 30.458980560 --36.458980560 1.500000000 30.458980560 -35.458976746 1.500000000 30.458980560 --36.458980560 2.500000000 30.458980560 -35.458976746 2.500000000 30.458980560 --36.458980560 3.500000000 30.458980560 -35.458976746 3.500000000 30.458980560 --36.458980560 4.500000000 30.458980560 -35.458976746 4.500000000 30.458980560 --36.458980560 5.500000000 30.458980560 -35.458976746 5.500000000 30.458980560 --36.458980560 6.500000000 30.458980560 -35.458976746 6.500000000 30.458980560 --36.458980560 7.500000000 30.458980560 -35.458976746 7.500000000 30.458980560 --36.458980560 8.500000000 30.458980560 -35.458976746 8.500000000 30.458980560 --36.458980560 9.500000000 30.458980560 -35.458976746 9.500000000 30.458980560 --36.458980560 10.500000000 30.458980560 -35.458976746 10.500000000 30.458980560 --36.458980560 11.500000000 30.458980560 -35.458976746 11.500000000 30.458980560 --36.458980560 12.500000000 30.458980560 -35.458976746 12.500000000 30.458980560 --36.458980560 13.500000000 30.458980560 -35.458976746 13.500000000 30.458980560 --36.458980560 14.500000000 30.458980560 -35.458976746 14.500000000 30.458980560 --36.458980560 15.500000000 30.458980560 -35.458976746 15.500000000 30.458980560 --36.458980560 16.500000000 30.458980560 -35.458976746 16.500000000 30.458980560 --36.458980560 17.500000000 30.458980560 -35.458976746 17.500000000 30.458980560 --36.458980560 18.500000000 30.458980560 -35.458976746 18.500000000 30.458980560 --36.458980560 19.500000000 30.458980560 -35.458976746 19.500000000 30.458980560 --36.458980560 20.500000000 30.458980560 -35.458976746 20.500000000 30.458980560 --36.458980560 21.500000000 30.458980560 -35.458976746 21.500000000 30.458980560 --36.458980560 22.500000000 30.458980560 -35.458976746 22.500000000 30.458980560 --36.458980560 23.500000000 30.458980560 -35.458976746 23.500000000 30.458980560 --36.458976746 24.500000000 30.458980560 -35.458972931 24.500000000 30.458980560 --36.458934784 25.499988556 30.458978653 -35.458930969 25.499988556 30.458978653 --36.458606720 26.499858856 30.458948135 -35.458606720 26.499858856 30.458948135 --36.456939697 27.499073029 30.458770752 -35.456939697 27.499073029 30.458770752 --36.450428009 28.495611191 30.458080292 -35.450424194 28.495611191 30.458080292 --36.430473328 29.483789444 30.456085205 -35.430473328 29.483789444 30.456085205 --36.380989075 30.451217651 30.451217651 -35.380989075 30.451217651 30.451217651 --36.280395508 31.375356674 30.438953400 -35.280395508 31.375356674 30.438949585 --36.112342834 32.216835022 30.405673981 -35.112342834 32.216835022 30.405673981 --35.879737854 32.903377533 30.336599350 --35.518615723 33.477191925 30.410833359 -34.518615723 33.477191925 30.410833359 -34.879737854 32.903377533 30.336599350 --35.040893555 34.040893555 30.413049698 --34.477191925 34.518615723 30.410833359 -33.477191925 34.518615723 30.410833359 -34.040893555 34.040893555 30.413049698 --33.903377533 34.879737854 30.336599350 --33.216838837 35.112342834 30.405670166 --32.375358582 35.280395508 30.438953400 --31.451217651 35.380989075 30.451217651 --30.483789444 35.430473328 30.456085205 --29.495611191 35.450424194 30.458078384 --28.499073029 35.456939697 30.458770752 --27.499858856 35.458606720 30.458950043 --26.499988556 35.458930969 30.458980560 --25.500000000 35.458976746 30.458980560 --24.500000000 35.458980560 30.458980560 --23.500000000 35.458980560 30.458980560 --22.500000000 35.458980560 30.458980560 --21.500000000 35.458980560 30.458980560 --20.500000000 35.458980560 30.458980560 --19.500000000 35.458980560 30.458980560 --18.500000000 35.458980560 30.458980560 --17.500000000 35.458980560 30.458980560 --16.500000000 35.458980560 30.458980560 --15.500000000 35.458980560 30.458980560 --14.500000000 35.458980560 30.458980560 --13.500000000 35.458980560 30.458980560 --12.500000000 35.458980560 30.458980560 --11.500000000 35.458980560 30.458980560 --10.500000000 35.458980560 30.458980560 --9.500000000 35.458980560 30.458980560 --8.500000000 35.458980560 30.458980560 --7.500000000 35.458980560 30.458980560 --6.500000000 35.458980560 30.458980560 --5.500000000 35.458980560 30.458980560 --4.500000000 35.458980560 30.458980560 --3.500000000 35.458980560 30.458980560 --2.500000000 35.458980560 30.458980560 --1.500000000 35.458980560 30.458980560 --0.500000000 35.458980560 30.458980560 -0.500000000 35.458980560 30.458980560 -1.500000000 35.458980560 30.458980560 -2.500000000 35.458980560 30.458980560 -3.500000000 35.458980560 30.458980560 -4.500000000 35.458980560 30.458980560 -5.500000000 35.458980560 30.458980560 -6.500000000 35.458980560 30.458980560 -7.500000000 35.458980560 30.458980560 -8.500000000 35.458980560 30.458980560 -9.500000000 35.458980560 30.458980560 -10.500000000 35.458980560 30.458980560 -11.500000000 35.458980560 30.458980560 -12.500000000 35.458980560 30.458980560 -13.500000000 35.458980560 30.458980560 -14.500000000 35.458980560 30.458980560 -15.500000000 35.458980560 30.458980560 -16.500000000 35.458980560 30.458980560 -17.500000000 35.458980560 30.458980560 -18.500000000 35.458980560 30.458980560 -19.500000000 35.458980560 30.458980560 -20.500000000 35.458980560 30.458980560 -21.500000000 35.458980560 30.458980560 -22.500000000 35.458980560 30.458980560 -23.500000000 35.458980560 30.458980560 -24.500000000 35.458976746 30.458980560 -25.499988556 35.458934784 30.458978653 -26.499858856 35.458606720 30.458948135 -27.499073029 35.456939697 30.458770752 -28.495611191 35.450428009 30.458080292 -29.483789444 35.430473328 30.456085205 -30.451217651 35.380989075 30.451217651 -31.375360489 35.280395508 30.438953400 -32.216842651 35.112342834 30.405673981 -32.903377533 34.879737854 30.336603165 --33.840934753 -35.797355652 31.035345078 --33.254272461 -35.954723358 31.334243774 --32.371025085 -36.154674530 31.371026993 --31.438953400 -36.280391693 31.375360489 --30.476007462 -36.344280243 31.379222870 --29.492319107 -36.371486664 31.382312775 --28.498052597 -36.381027222 31.383813858 --27.499622345 -36.383720398 31.384298325 --26.499948502 -36.384304047 31.384403229 --25.499996185 -36.384407043 31.384418488 --24.500000000 -36.384418488 31.384418488 --23.500000000 -36.384418488 31.384418488 --22.500000000 -36.384418488 31.384418488 --21.500000000 -36.384418488 31.384418488 --20.500000000 -36.384418488 31.384418488 --19.500000000 -36.384418488 31.384418488 --18.500000000 -36.384418488 31.384418488 --17.500000000 -36.384418488 31.384418488 --16.500000000 -36.384418488 31.384418488 --15.500000000 -36.384418488 31.384418488 --14.500000000 -36.384418488 31.384418488 --13.500000000 -36.384418488 31.384418488 --12.500000000 -36.384418488 31.384418488 --11.500000000 -36.384418488 31.384418488 --10.500000000 -36.384418488 31.384418488 --9.500000000 -36.384418488 31.384418488 --8.500000000 -36.384418488 31.384418488 --7.500000000 -36.384418488 31.384418488 --6.500000000 -36.384418488 31.384418488 --5.500000000 -36.384418488 31.384418488 --4.500000000 -36.384418488 31.384418488 --3.500000000 -36.384418488 31.384418488 --2.500000000 -36.384418488 31.384418488 --1.500000000 -36.384418488 31.384418488 --0.500000000 -36.384418488 31.384418488 -0.500000000 -36.384418488 31.384418488 -1.500000000 -36.384418488 31.384418488 -2.500000000 -36.384418488 31.384418488 -3.500000000 -36.384418488 31.384418488 -4.500000000 -36.384418488 31.384418488 -5.500000000 -36.384418488 31.384418488 -6.500000000 -36.384418488 31.384418488 -7.500000000 -36.384418488 31.384418488 -8.500000000 -36.384418488 31.384418488 -9.500000000 -36.384418488 31.384418488 -10.500000000 -36.384418488 31.384418488 -11.500000000 -36.384418488 31.384418488 -12.500000000 -36.384418488 31.384418488 -13.500000000 -36.384418488 31.384418488 -14.500000000 -36.384418488 31.384418488 -15.500000000 -36.384418488 31.384418488 -16.500000000 -36.384418488 31.384418488 -17.500000000 -36.384418488 31.384418488 -18.500000000 -36.384418488 31.384418488 -19.500000000 -36.384418488 31.384418488 -20.500000000 -36.384418488 31.384418488 -21.500000000 -36.384418488 31.384418488 -22.500000000 -36.384418488 31.384418488 -23.500000000 -36.384410858 31.384418488 -24.499996185 -36.384407043 31.384418488 -25.499948502 -36.384307861 31.384405136 -26.499618530 -36.383720398 31.384296417 -27.498050690 -36.381027222 31.383813858 -28.492321014 -36.371486664 31.382312775 -29.476007462 -36.344284058 31.379222870 -30.438953400 -36.280395508 31.375360489 -31.371026993 -36.154670715 31.371026993 -32.254276276 -35.954719543 31.334241867 -32.840930939 -35.797355652 31.035345078 --34.912059784 -34.912059784 31.313602448 --34.260543823 -35.428531647 31.380739212 --33.678352356 -35.670326233 31.736053467 -32.678352356 -35.670326233 31.736053467 -33.260547638 -35.428535461 31.380739212 -33.912063599 -34.912063599 31.313602448 --35.797355652 -33.840930939 31.035345078 --35.428535461 -34.260543823 31.380739212 -34.428535461 -34.260543823 31.380739212 -34.797355652 -33.840930939 31.035345078 --35.954723358 -33.254272461 31.334243774 --35.670322418 -33.678352356 31.736053467 -34.670326233 -33.678352356 31.736053467 -34.954723358 -33.254272461 31.334243774 --36.154674530 -32.371025085 31.371026993 -35.154674530 -32.371025085 31.371026993 --36.280391693 -31.438953400 31.375360489 -35.280395508 -31.438953400 31.375360489 --36.344280243 -30.476007462 31.379222870 -35.344280243 -30.476007462 31.379222870 --36.371486664 -29.492321014 31.382312775 -35.371486664 -29.492319107 31.382312775 --36.381027222 -28.498050690 31.383813858 -35.381027222 -28.498052597 31.383813858 --36.383720398 -27.499618530 31.384296417 -35.383720398 -27.499622345 31.384298325 --36.384307861 -26.499948502 31.384403229 -35.384304047 -26.499948502 31.384403229 --36.384407043 -25.499996185 31.384418488 -35.384407043 -25.499996185 31.384418488 --36.384418488 -24.500000000 31.384418488 -35.384418488 -24.500000000 31.384418488 --36.384418488 -23.500000000 31.384418488 -35.384418488 -23.500000000 31.384418488 --36.384418488 -22.500000000 31.384418488 -35.384418488 -22.500000000 31.384418488 --36.384418488 -21.500000000 31.384418488 -35.384418488 -21.500000000 31.384418488 --36.384418488 -20.500000000 31.384418488 -35.384418488 -20.500000000 31.384418488 --36.384418488 -19.500000000 31.384418488 -35.384418488 -19.500000000 31.384418488 --36.384418488 -18.500000000 31.384418488 -35.384418488 -18.500000000 31.384418488 --36.384418488 -17.500000000 31.384418488 -35.384418488 -17.500000000 31.384418488 --36.384418488 -16.500000000 31.384418488 -35.384418488 -16.500000000 31.384418488 --36.384418488 -15.500000000 31.384418488 -35.384418488 -15.500000000 31.384418488 --36.384418488 -14.500000000 31.384418488 -35.384418488 -14.500000000 31.384418488 --36.384418488 -13.500000000 31.384418488 -35.384418488 -13.500000000 31.384418488 --36.384418488 -12.500000000 31.384418488 -35.384418488 -12.500000000 31.384418488 --36.384418488 -11.500000000 31.384418488 -35.384418488 -11.500000000 31.384418488 --36.384418488 -10.500000000 31.384418488 -35.384418488 -10.500000000 31.384418488 --36.384418488 -9.500000000 31.384418488 -35.384418488 -9.500000000 31.384418488 --36.384418488 -8.500000000 31.384418488 -35.384418488 -8.500000000 31.384418488 --36.384418488 -7.500000000 31.384418488 -35.384418488 -7.500000000 31.384418488 --36.384418488 -6.500000000 31.384418488 -35.384418488 -6.500000000 31.384418488 --36.384418488 -5.500000000 31.384418488 -35.384418488 -5.500000000 31.384418488 --36.384418488 -4.500000000 31.384418488 -35.384418488 -4.500000000 31.384418488 --36.384418488 -3.500000000 31.384418488 -35.384418488 -3.500000000 31.384418488 --36.384418488 -2.500000000 31.384418488 -35.384418488 -2.500000000 31.384418488 --36.384418488 -1.500000000 31.384418488 -35.384418488 -1.500000000 31.384418488 --36.384418488 -0.500000000 31.384418488 -35.384418488 -0.500000000 31.384418488 --36.384418488 0.500000000 31.384418488 -35.384418488 0.500000000 31.384418488 --36.384418488 1.500000000 31.384418488 -35.384418488 1.500000000 31.384418488 --36.384418488 2.500000000 31.384418488 -35.384418488 2.500000000 31.384418488 --36.384418488 3.500000000 31.384418488 -35.384418488 3.500000000 31.384418488 --36.384418488 4.500000000 31.384418488 -35.384418488 4.500000000 31.384418488 --36.384418488 5.500000000 31.384418488 -35.384418488 5.500000000 31.384418488 --36.384418488 6.500000000 31.384418488 -35.384418488 6.500000000 31.384418488 --36.384418488 7.500000000 31.384418488 -35.384418488 7.500000000 31.384418488 --36.384418488 8.500000000 31.384418488 -35.384418488 8.500000000 31.384418488 --36.384418488 9.500000000 31.384418488 -35.384418488 9.500000000 31.384418488 --36.384418488 10.500000000 31.384418488 -35.384418488 10.500000000 31.384418488 --36.384418488 11.500000000 31.384418488 -35.384418488 11.500000000 31.384418488 --36.384418488 12.500000000 31.384418488 -35.384418488 12.500000000 31.384418488 --36.384418488 13.500000000 31.384418488 -35.384418488 13.500000000 31.384418488 --36.384418488 14.500000000 31.384418488 -35.384418488 14.500000000 31.384418488 --36.384418488 15.500000000 31.384418488 -35.384418488 15.500000000 31.384418488 --36.384418488 16.500000000 31.384418488 -35.384418488 16.500000000 31.384418488 --36.384418488 17.500000000 31.384418488 -35.384418488 17.500000000 31.384418488 --36.384418488 18.500000000 31.384418488 -35.384418488 18.500000000 31.384418488 --36.384418488 19.500000000 31.384418488 -35.384418488 19.500000000 31.384418488 --36.384418488 20.500000000 31.384418488 -35.384418488 20.500000000 31.384418488 --36.384418488 21.500000000 31.384418488 -35.384418488 21.500000000 31.384418488 --36.384418488 22.500000000 31.384418488 -35.384418488 22.500000000 31.384418488 --36.384418488 23.500000000 31.384418488 -35.384410858 23.500000000 31.384418488 --36.384407043 24.499996185 31.384418488 -35.384407043 24.499996185 31.384418488 --36.384311676 25.499948502 31.384403229 -35.384307861 25.499948502 31.384405136 --36.383720398 26.499622345 31.384296417 -35.383720398 26.499618530 31.384296417 --36.381027222 27.498052597 31.383813858 -35.381023407 27.498050690 31.383813858 --36.371486664 28.492321014 31.382312775 -35.371486664 28.492321014 31.382312775 --36.344280243 29.476007462 31.379222870 -35.344280243 29.476007462 31.379222870 --36.280395508 30.438953400 31.375360489 -35.280395508 30.438953400 31.375360489 --36.154670715 31.371026993 31.371028900 -35.154670715 31.371026993 31.371026993 --35.954723358 32.254276276 31.334243774 --35.670322418 32.678352356 31.736053467 -34.670326233 32.678352356 31.736053467 -34.954719543 32.254276276 31.334241867 --35.797355652 32.840934753 31.035345078 --35.428531647 33.260543823 31.380739212 -34.428531647 33.260543823 31.380739212 -34.797351837 32.840930939 31.035345078 --34.912059784 33.912059784 31.313602448 --34.260543823 34.428535461 31.380739212 --33.678352356 34.670322418 31.736053467 -32.678352356 34.670326233 31.736053467 -33.260543823 34.428535461 31.380739212 -33.912063599 33.912063599 31.313602448 --33.840930939 34.797355652 31.035345078 --33.254272461 34.954723358 31.334243774 --32.371025085 35.154674530 31.371026993 --31.438953400 35.280391693 31.375360489 --30.476007462 35.344280243 31.379222870 --29.492321014 35.371486664 31.382312775 --28.498050690 35.381027222 31.383813858 --27.499618530 35.383720398 31.384296417 --26.499948502 35.384307861 31.384403229 --25.499996185 35.384407043 31.384418488 --24.500000000 35.384418488 31.384418488 --23.500000000 35.384418488 31.384418488 --22.500000000 35.384418488 31.384418488 --21.500000000 35.384418488 31.384418488 --20.500000000 35.384418488 31.384418488 --19.500000000 35.384418488 31.384418488 --18.500000000 35.384418488 31.384418488 --17.500000000 35.384418488 31.384418488 --16.500000000 35.384418488 31.384418488 --15.500000000 35.384418488 31.384418488 --14.500000000 35.384418488 31.384418488 --13.500000000 35.384418488 31.384418488 --12.500000000 35.384418488 31.384418488 --11.500000000 35.384418488 31.384418488 --10.500000000 35.384418488 31.384418488 --9.500000000 35.384418488 31.384418488 --8.500000000 35.384418488 31.384418488 --7.500000000 35.384418488 31.384418488 --6.500000000 35.384418488 31.384418488 --5.500000000 35.384418488 31.384418488 --4.500000000 35.384418488 31.384418488 --3.500000000 35.384418488 31.384418488 --2.500000000 35.384418488 31.384418488 --1.500000000 35.384418488 31.384418488 --0.500000000 35.384418488 31.384418488 -0.500000000 35.384418488 31.384418488 -1.500000000 35.384418488 31.384418488 -2.500000000 35.384418488 31.384418488 -3.500000000 35.384418488 31.384418488 -4.500000000 35.384418488 31.384418488 -5.500000000 35.384418488 31.384418488 -6.500000000 35.384418488 31.384418488 -7.500000000 35.384418488 31.384418488 -8.500000000 35.384418488 31.384418488 -9.500000000 35.384418488 31.384418488 -10.500000000 35.384418488 31.384418488 -11.500000000 35.384418488 31.384418488 -12.500000000 35.384418488 31.384418488 -13.500000000 35.384418488 31.384418488 -14.500000000 35.384418488 31.384418488 -15.500000000 35.384418488 31.384418488 -16.500000000 35.384418488 31.384418488 -17.500000000 35.384418488 31.384418488 -18.500000000 35.384418488 31.384418488 -19.500000000 35.384418488 31.384418488 -20.500000000 35.384418488 31.384418488 -21.500000000 35.384418488 31.384418488 -22.500000000 35.384418488 31.384418488 -23.500000000 35.384418488 31.384418488 -24.499996185 35.384407043 31.384418488 -25.499948502 35.384311676 31.384403229 -26.499622345 35.383720398 31.384296417 -27.498052597 35.381027222 31.383813858 -28.492321014 35.371486664 31.382312775 -29.476007462 35.344280243 31.379222870 -30.438953400 35.280395508 31.375360489 -31.371026993 35.154674530 31.371028900 -32.254272461 34.954723358 31.334243774 -32.840934753 34.797355652 31.035345078 --33.030693054 -35.846317291 32.030693054 --32.334243774 -35.954723358 32.254276276 --31.405670166 -36.112342834 32.216838837 --30.460596085 -36.185962677 32.220710754 --29.486719131 -36.217510223 32.227870941 --28.496377945 -36.228950500 32.231601715 --27.499225616 -36.232303619 32.232860565 --26.499872208 -36.233070374 32.233165741 --25.499986649 -36.233203888 32.233215332 --24.499998093 -36.233222961 32.233222961 --23.500000000 -36.233222961 32.233222961 --22.500000000 -36.233222961 32.233222961 --21.500000000 -36.233222961 32.233222961 --20.500000000 -36.233222961 32.233222961 --19.500000000 -36.233222961 32.233222961 --18.500000000 -36.233222961 32.233222961 --17.500000000 -36.233222961 32.233222961 --16.500000000 -36.233222961 32.233222961 --15.500000000 -36.233222961 32.233222961 --14.500000000 -36.233222961 32.233222961 --13.500000000 -36.233222961 32.233222961 --12.500000000 -36.233222961 32.233222961 --11.500000000 -36.233222961 32.233222961 --10.500000000 -36.233222961 32.233222961 --9.500000000 -36.233222961 32.233222961 --8.500000000 -36.233222961 32.233222961 --7.500000000 -36.233222961 32.233222961 --6.500000000 -36.233222961 32.233222961 --5.500000000 -36.233222961 32.233222961 --4.500000000 -36.233222961 32.233222961 --3.500000000 -36.233222961 32.233222961 --2.500000000 -36.233222961 32.233222961 --1.500000000 -36.233222961 32.233222961 --0.500000000 -36.233222961 32.233222961 -0.500000000 -36.233222961 32.233222961 -1.500000000 -36.233222961 32.233222961 -2.500000000 -36.233222961 32.233222961 -3.500000000 -36.233222961 32.233222961 -4.500000000 -36.233222961 32.233222961 -5.500000000 -36.233222961 32.233222961 -6.500000000 -36.233222961 32.233222961 -7.500000000 -36.233222961 32.233222961 -8.500000000 -36.233222961 32.233222961 -9.500000000 -36.233222961 32.233222961 -10.500000000 -36.233222961 32.233222961 -11.500000000 -36.233222961 32.233222961 -12.500000000 -36.233222961 32.233222961 -13.500000000 -36.233222961 32.233222961 -14.500000000 -36.233222961 32.233222961 -15.500000000 -36.233222961 32.233222961 -16.500000000 -36.233222961 32.233222961 -17.500000000 -36.233222961 32.233222961 -18.500000000 -36.233222961 32.233222961 -19.500000000 -36.233222961 32.233222961 -20.500000000 -36.233222961 32.233222961 -21.500000000 -36.233222961 32.233222961 -22.500000000 -36.233222961 32.233222961 -23.499998093 -36.233222961 32.233222961 -24.499986649 -36.233203888 32.233219147 -25.499874115 -36.233070374 32.233165741 -26.499225616 -36.232303619 32.232860565 -27.496377945 -36.228954315 32.231597900 -28.486715317 -36.217510223 32.227874756 -29.460596085 -36.185966492 32.220710754 -30.405673981 -36.112342834 32.216835022 -31.334245682 -35.954723358 32.254276276 -32.030693054 -35.846313477 32.030693054 --34.772159576 -34.772163391 32.006500244 --34.231872559 -35.057430267 32.316947937 --33.424438477 -35.498546600 32.424438477 --32.736053467 -35.670326233 32.678352356 -31.736051559 -35.670322418 32.678352356 -32.424442291 -35.498542786 32.424442291 -33.231868744 -35.057430267 32.316951752 -33.772163391 -34.772163391 32.006500244 --35.057430267 -34.231868744 32.316947937 --34.565631866 -34.565628052 32.672851562 -33.565631866 -34.565631866 32.672851562 -34.057430267 -34.231872559 32.316947937 --35.846313477 -33.030693054 32.030693054 --35.498538971 -33.424438477 32.424442291 -34.498542786 -33.424442291 32.424438477 -34.846317291 -33.030693054 32.030693054 --35.954715729 -32.334243774 32.254276276 --35.670322418 -32.736049652 32.678356171 -34.670326233 -32.736049652 32.678352356 -34.954723358 -32.334243774 32.254276276 --36.112346649 -31.405673981 32.216838837 -35.112342834 -31.405673981 32.216838837 --36.185966492 -30.460596085 32.220714569 -35.185966492 -30.460596085 32.220710754 --36.217510223 -29.486719131 32.227870941 -35.217510223 -29.486719131 32.227870941 --36.228950500 -28.496377945 32.231597900 -35.228954315 -28.496377945 32.231597900 --36.232299805 -27.499221802 32.232860565 -35.232303619 -27.499225616 32.232860565 --36.233070374 -26.499872208 32.233165741 -35.233070374 -26.499872208 32.233165741 --36.233207703 -25.499986649 32.233215332 -35.233203888 -25.499986649 32.233215332 --36.233222961 -24.499998093 32.233222961 -35.233222961 -24.499998093 32.233222961 --36.233222961 -23.500000000 32.233222961 -35.233222961 -23.500000000 32.233222961 --36.233222961 -22.500000000 32.233222961 -35.233222961 -22.500000000 32.233222961 --36.233222961 -21.500000000 32.233222961 -35.233222961 -21.500000000 32.233222961 --36.233222961 -20.500000000 32.233222961 -35.233222961 -20.500000000 32.233222961 --36.233222961 -19.500000000 32.233222961 -35.233222961 -19.500000000 32.233222961 --36.233222961 -18.500000000 32.233222961 -35.233222961 -18.500000000 32.233222961 --36.233222961 -17.500000000 32.233222961 -35.233222961 -17.500000000 32.233222961 --36.233222961 -16.500000000 32.233222961 -35.233222961 -16.500000000 32.233222961 --36.233222961 -15.500000000 32.233222961 -35.233222961 -15.500000000 32.233222961 --36.233222961 -14.500000000 32.233222961 -35.233222961 -14.500000000 32.233222961 --36.233222961 -13.500000000 32.233222961 -35.233222961 -13.500000000 32.233222961 --36.233222961 -12.500000000 32.233222961 -35.233222961 -12.500000000 32.233222961 --36.233222961 -11.500000000 32.233222961 -35.233222961 -11.500000000 32.233222961 --36.233222961 -10.500000000 32.233222961 -35.233222961 -10.500000000 32.233222961 --36.233222961 -9.500000000 32.233222961 -35.233222961 -9.500000000 32.233222961 --36.233222961 -8.500000000 32.233222961 -35.233222961 -8.500000000 32.233222961 --36.233222961 -7.500000000 32.233222961 -35.233222961 -7.500000000 32.233222961 --36.233222961 -6.500000000 32.233222961 -35.233222961 -6.500000000 32.233222961 --36.233222961 -5.500000000 32.233222961 -35.233222961 -5.500000000 32.233222961 --36.233222961 -4.500000000 32.233222961 -35.233222961 -4.500000000 32.233222961 --36.233222961 -3.500000000 32.233222961 -35.233222961 -3.500000000 32.233222961 --36.233222961 -2.500000000 32.233222961 -35.233222961 -2.500000000 32.233222961 --36.233222961 -1.500000000 32.233222961 -35.233222961 -1.500000000 32.233222961 --36.233222961 -0.500000000 32.233222961 -35.233222961 -0.500000000 32.233222961 --36.233222961 0.500000000 32.233222961 -35.233222961 0.500000000 32.233222961 --36.233222961 1.500000000 32.233222961 -35.233222961 1.500000000 32.233222961 --36.233222961 2.500000000 32.233222961 -35.233222961 2.500000000 32.233222961 --36.233222961 3.500000000 32.233222961 -35.233222961 3.500000000 32.233222961 --36.233222961 4.500000000 32.233222961 -35.233222961 4.500000000 32.233222961 --36.233222961 5.500000000 32.233222961 -35.233222961 5.500000000 32.233222961 --36.233222961 6.500000000 32.233222961 -35.233222961 6.500000000 32.233222961 --36.233222961 7.500000000 32.233222961 -35.233222961 7.500000000 32.233222961 --36.233222961 8.500000000 32.233222961 -35.233222961 8.500000000 32.233222961 --36.233222961 9.500000000 32.233222961 -35.233222961 9.500000000 32.233222961 --36.233222961 10.500000000 32.233222961 -35.233222961 10.500000000 32.233222961 --36.233222961 11.500000000 32.233222961 -35.233222961 11.500000000 32.233222961 --36.233222961 12.500000000 32.233222961 -35.233222961 12.500000000 32.233222961 --36.233222961 13.500000000 32.233222961 -35.233222961 13.500000000 32.233222961 --36.233222961 14.500000000 32.233222961 -35.233222961 14.500000000 32.233222961 --36.233222961 15.500000000 32.233222961 -35.233222961 15.500000000 32.233222961 --36.233222961 16.500000000 32.233222961 -35.233222961 16.500000000 32.233222961 --36.233222961 17.500000000 32.233222961 -35.233222961 17.500000000 32.233222961 --36.233222961 18.500000000 32.233222961 -35.233222961 18.500000000 32.233222961 --36.233222961 19.500000000 32.233222961 -35.233222961 19.500000000 32.233222961 --36.233222961 20.500000000 32.233222961 -35.233222961 20.500000000 32.233222961 --36.233222961 21.500000000 32.233222961 -35.233222961 21.500000000 32.233222961 --36.233222961 22.500000000 32.233222961 -35.233222961 22.500000000 32.233222961 --36.233222961 23.499998093 32.233222961 -35.233222961 23.499998093 32.233222961 --36.233207703 24.499986649 32.233219147 -35.233203888 24.499986649 32.233219147 --36.233070374 25.499872208 32.233165741 -35.233070374 25.499874115 32.233165741 --36.232303619 26.499225616 32.232860565 -35.232303619 26.499225616 32.232860565 --36.228954315 27.496377945 32.231597900 -35.228954315 27.496377945 32.231597900 --36.217510223 28.486719131 32.227874756 -35.217510223 28.486715317 32.227874756 --36.185966492 29.460592270 32.220714569 -35.185962677 29.460596085 32.220710754 --36.112342834 30.405673981 32.216838837 -35.112342834 30.405673981 32.216835022 --35.954723358 31.334241867 32.254276276 --35.670322418 31.736051559 32.678356171 -34.670322418 31.736051559 32.678352356 -34.954723358 31.334245682 32.254276276 --35.846313477 32.030693054 32.030693054 --35.498542786 32.424438477 32.424442291 -34.498542786 32.424442291 32.424438477 -34.846313477 32.030693054 32.030693054 --35.057430267 33.231868744 32.316951752 --34.565628052 33.565628052 32.672851562 -33.565628052 33.565628052 32.672851562 -34.057430267 33.231868744 32.316951752 --34.772159576 33.772159576 32.006500244 --34.231868744 34.057430267 32.316947937 --33.424438477 34.498538971 32.424442291 --32.736049652 34.670322418 32.678356171 -31.736051559 34.670322418 32.678356171 -32.424438477 34.498542786 32.424442291 -33.231868744 34.057430267 32.316947937 -33.772163391 33.772163391 32.006500244 --33.030693054 34.846313477 32.030693054 --32.334243774 34.954715729 32.254276276 --31.405673981 35.112346649 32.216838837 --30.460596085 35.185966492 32.220714569 --29.486719131 35.217510223 32.227870941 --28.496377945 35.228954315 32.231597900 --27.499221802 35.232299805 32.232860565 --26.499872208 35.233070374 32.233165741 --25.499986649 35.233207703 32.233215332 --24.499998093 35.233222961 32.233222961 --23.500000000 35.233222961 32.233222961 --22.500000000 35.233222961 32.233222961 --21.500000000 35.233222961 32.233222961 --20.500000000 35.233222961 32.233222961 --19.500000000 35.233222961 32.233222961 --18.500000000 35.233222961 32.233222961 --17.500000000 35.233222961 32.233222961 --16.500000000 35.233222961 32.233222961 --15.500000000 35.233222961 32.233222961 --14.500000000 35.233222961 32.233222961 --13.500000000 35.233222961 32.233222961 --12.500000000 35.233222961 32.233222961 --11.500000000 35.233222961 32.233222961 --10.500000000 35.233222961 32.233222961 --9.500000000 35.233222961 32.233222961 --8.500000000 35.233222961 32.233222961 --7.500000000 35.233222961 32.233222961 --6.500000000 35.233222961 32.233222961 --5.500000000 35.233222961 32.233222961 --4.500000000 35.233222961 32.233222961 --3.500000000 35.233222961 32.233222961 --2.500000000 35.233222961 32.233222961 --1.500000000 35.233222961 32.233222961 --0.500000000 35.233222961 32.233222961 -0.500000000 35.233222961 32.233222961 -1.500000000 35.233222961 32.233222961 -2.500000000 35.233222961 32.233222961 -3.500000000 35.233222961 32.233222961 -4.500000000 35.233222961 32.233222961 -5.500000000 35.233222961 32.233222961 -6.500000000 35.233222961 32.233222961 -7.500000000 35.233222961 32.233222961 -8.500000000 35.233222961 32.233222961 -9.500000000 35.233222961 32.233222961 -10.500000000 35.233222961 32.233222961 -11.500000000 35.233222961 32.233222961 -12.500000000 35.233222961 32.233222961 -13.500000000 35.233222961 32.233222961 -14.500000000 35.233222961 32.233222961 -15.500000000 35.233222961 32.233222961 -16.500000000 35.233222961 32.233222961 -17.500000000 35.233222961 32.233222961 -18.500000000 35.233222961 32.233222961 -19.500000000 35.233222961 32.233222961 -20.500000000 35.233222961 32.233222961 -21.500000000 35.233222961 32.233222961 -22.500000000 35.233222961 32.233222961 -23.499998093 35.233222961 32.233222961 -24.499986649 35.233207703 32.233219147 -25.499872208 35.233070374 32.233165741 -26.499225616 35.232303619 32.232860565 -27.496377945 35.228954315 32.231597900 -28.486719131 35.217510223 32.227874756 -29.460592270 35.185966492 32.220714569 -30.405673981 35.112346649 32.216838837 -31.334241867 34.954723358 32.254276276 -32.030689240 34.846317291 32.030693054 --32.035343170 -35.797355652 32.840930939 --31.336603165 -35.879737854 32.903373718 --30.442844391 -35.937091827 32.946807861 --29.481937408 -35.965759277 32.968868256 --28.495168686 -35.976860046 32.977619171 --27.498952866 -35.980201721 32.980335236 --26.499826431 -35.980957031 32.980976105 --25.499975204 -35.981086731 32.981086731 --24.499998093 -35.981101990 32.981105804 --23.500000000 -35.981105804 32.981105804 --22.500000000 -35.981105804 32.981105804 --21.500000000 -35.981105804 32.981105804 --20.500000000 -35.981105804 32.981105804 --19.500000000 -35.981105804 32.981105804 --18.500000000 -35.981105804 32.981105804 --17.500000000 -35.981105804 32.981105804 --16.500000000 -35.981105804 32.981105804 --15.500000000 -35.981105804 32.981105804 --14.500000000 -35.981105804 32.981105804 --13.500000000 -35.981105804 32.981105804 --12.500000000 -35.981105804 32.981105804 --11.500000000 -35.981105804 32.981105804 --10.500000000 -35.981105804 32.981105804 --9.500000000 -35.981105804 32.981105804 --8.500000000 -35.981105804 32.981105804 --7.500000000 -35.981105804 32.981105804 --6.500000000 -35.981105804 32.981105804 --5.500000000 -35.981105804 32.981105804 --4.500000000 -35.981105804 32.981105804 --3.500000000 -35.981105804 32.981105804 --2.500000000 -35.981105804 32.981105804 --1.500000000 -35.981105804 32.981105804 --0.500000000 -35.981105804 32.981105804 -0.500000000 -35.981105804 32.981105804 -1.500000000 -35.981105804 32.981105804 -2.500000000 -35.981105804 32.981105804 -3.500000000 -35.981105804 32.981105804 -4.500000000 -35.981105804 32.981105804 -5.500000000 -35.981105804 32.981105804 -6.500000000 -35.981105804 32.981105804 -7.500000000 -35.981105804 32.981105804 -8.500000000 -35.981105804 32.981105804 -9.500000000 -35.981105804 32.981105804 -10.500000000 -35.981105804 32.981105804 -11.500000000 -35.981105804 32.981105804 -12.500000000 -35.981105804 32.981105804 -13.500000000 -35.981105804 32.981105804 -14.500000000 -35.981105804 32.981105804 -15.500000000 -35.981105804 32.981105804 -16.500000000 -35.981105804 32.981105804 -17.500000000 -35.981105804 32.981105804 -18.500000000 -35.981105804 32.981105804 -19.500000000 -35.981105804 32.981105804 -20.500000000 -35.981105804 32.981105804 -21.500000000 -35.981105804 32.981105804 -22.500000000 -35.981105804 32.981105804 -23.499998093 -35.981105804 32.981101990 -24.499979019 -35.981086731 32.981090546 -25.499826431 -35.980960846 32.980976105 -26.498950958 -35.980201721 32.980335236 -27.495168686 -35.976860046 32.977615356 -28.481937408 -35.965759277 32.968864441 -29.442840576 -35.937095642 32.946807861 -30.336603165 -35.879737854 32.903373718 -31.035345078 -35.797355652 32.840927124 --33.980762482 -34.842590332 32.980762482 --33.316951752 -35.057430267 33.231868744 --32.380741119 -35.428535461 33.260547638 --31.410833359 -35.518615723 33.477191925 --30.459177017 -35.577739716 33.567745209 --29.485630035 -35.607204437 33.604709625 --28.496026993 -35.618598938 33.617927551 --27.499156952 -35.621978760 33.621795654 --26.499868393 -35.622734070 33.622692108 --25.499986649 -35.622856140 33.622852325 --24.500000000 -35.622871399 33.622871399 --23.500000000 -35.622871399 33.622871399 --22.500000000 -35.622871399 33.622871399 --21.500000000 -35.622871399 33.622871399 --20.500000000 -35.622871399 33.622871399 --19.500000000 -35.622871399 33.622871399 --18.500000000 -35.622871399 33.622871399 --17.500000000 -35.622871399 33.622871399 --16.500000000 -35.622871399 33.622871399 --15.500000000 -35.622871399 33.622871399 --14.500000000 -35.622871399 33.622871399 --13.500000000 -35.622871399 33.622871399 --12.500000000 -35.622871399 33.622871399 --11.500000000 -35.622871399 33.622871399 --10.500000000 -35.622871399 33.622871399 --9.500000000 -35.622871399 33.622871399 --8.500000000 -35.622871399 33.622871399 --7.500000000 -35.622871399 33.622871399 --6.500000000 -35.622871399 33.622871399 --5.500000000 -35.622871399 33.622871399 --4.500000000 -35.622871399 33.622871399 --3.500000000 -35.622871399 33.622871399 --2.500000000 -35.622871399 33.622871399 --1.500000000 -35.622871399 33.622871399 --0.500000000 -35.622871399 33.622871399 -0.500000000 -35.622871399 33.622871399 -1.500000000 -35.622871399 33.622871399 -2.500000000 -35.622871399 33.622871399 -3.500000000 -35.622871399 33.622871399 -4.500000000 -35.622871399 33.622871399 -5.500000000 -35.622871399 33.622871399 -6.500000000 -35.622871399 33.622871399 -7.500000000 -35.622871399 33.622871399 -8.500000000 -35.622871399 33.622871399 -9.500000000 -35.622871399 33.622871399 -10.500000000 -35.622871399 33.622871399 -11.500000000 -35.622871399 33.622871399 -12.500000000 -35.622871399 33.622871399 -13.500000000 -35.622871399 33.622871399 -14.500000000 -35.622871399 33.622871399 -15.500000000 -35.622871399 33.622871399 -16.500000000 -35.622871399 33.622871399 -17.500000000 -35.622871399 33.622871399 -18.500000000 -35.622871399 33.622871399 -19.500000000 -35.622871399 33.622871399 -20.500000000 -35.622871399 33.622871399 -21.500000000 -35.622871399 33.622871399 -22.500000000 -35.622871399 33.622871399 -23.500000000 -35.622871399 33.622871399 -24.499986649 -35.622856140 33.622852325 -25.499868393 -35.622741699 33.622692108 -26.499156952 -35.621986389 33.621795654 -27.496026993 -35.618598938 33.617927551 -28.485630035 -35.607208252 33.604709625 -29.459178925 -35.577739716 33.567745209 -30.410833359 -35.518615723 33.477184296 -31.380737305 -35.428535461 33.260547638 -32.316951752 -35.057430267 33.231868744 -32.980762482 -34.842594147 32.980762482 --34.842590332 -33.980758667 32.980762482 --34.297924042 -34.297924042 33.297924042 --33.672851562 -34.565624237 33.565631866 -32.672851562 -34.565628052 33.565631866 -33.297924042 -34.297924042 33.297924042 -33.842590332 -33.980762482 32.980758667 --35.057430267 -33.316947937 33.231872559 --34.565628052 -33.672847748 33.565631866 -33.565628052 -33.672851562 33.565628052 -34.057430267 -33.316947937 33.231868744 --35.797355652 -32.035343170 32.840934753 --35.428535461 -32.380737305 33.260543823 -34.428535461 -32.380741119 33.260540009 -34.797359467 -32.035343170 32.840927124 --35.879737854 -31.336603165 32.903373718 --35.518615723 -31.410833359 33.477191925 -34.518615723 -31.410833359 33.477188110 -34.879737854 -31.336603165 32.903373718 --35.937095642 -30.442844391 32.946807861 --35.577735901 -30.459178925 33.567745209 -34.577739716 -30.459178925 33.567745209 -34.937091827 -30.442844391 32.946807861 --35.965759277 -29.481937408 32.968864441 --35.607208252 -29.485630035 33.604705811 -34.607204437 -29.485630035 33.604709625 -34.965766907 -29.481937408 32.968864441 --35.976860046 -28.495168686 32.977611542 --35.618598938 -28.496026993 33.617927551 -34.618598938 -28.496026993 33.617927551 -34.976860046 -28.495168686 32.977615356 --35.980201721 -27.498950958 32.980331421 --35.621982574 -27.499156952 33.621795654 -34.621982574 -27.499156952 33.621799469 -34.980201721 -27.498952866 32.980331421 --35.980957031 -26.499826431 32.980968475 --35.622734070 -26.499868393 33.622692108 -34.622734070 -26.499868393 33.622692108 -34.980957031 -26.499826431 32.980976105 --35.981086731 -25.499979019 32.981086731 --35.622856140 -25.499986649 33.622844696 -34.622856140 -25.499986649 33.622852325 -34.981086731 -25.499975204 32.981090546 --35.981101990 -24.499998093 32.981101990 --35.622871399 -24.500000000 33.622871399 -34.622871399 -24.500000000 33.622871399 -34.981101990 -24.499998093 32.981101990 --35.981101990 -23.500000000 32.981101990 --35.622871399 -23.500000000 33.622871399 -34.622871399 -23.500000000 33.622871399 -34.981101990 -23.500000000 32.981101990 --35.981101990 -22.500000000 32.981101990 --35.622871399 -22.500000000 33.622871399 -34.622871399 -22.500000000 33.622871399 -34.981101990 -22.500000000 32.981101990 --35.981101990 -21.500000000 32.981101990 --35.622871399 -21.500000000 33.622871399 -34.622871399 -21.500000000 33.622871399 -34.981101990 -21.500000000 32.981101990 --35.981101990 -20.500000000 32.981101990 --35.622871399 -20.500000000 33.622871399 -34.622871399 -20.500000000 33.622871399 -34.981101990 -20.500000000 32.981101990 --35.981101990 -19.500000000 32.981101990 --35.622871399 -19.500000000 33.622871399 -34.622871399 -19.500000000 33.622871399 -34.981101990 -19.500000000 32.981101990 --35.981101990 -18.500000000 32.981101990 --35.622871399 -18.500000000 33.622871399 -34.622871399 -18.500000000 33.622871399 -34.981101990 -18.500000000 32.981101990 --35.981101990 -17.500000000 32.981101990 --35.622871399 -17.500000000 33.622871399 -34.622871399 -17.500000000 33.622871399 -34.981101990 -17.500000000 32.981101990 --35.981101990 -16.500000000 32.981101990 --35.622871399 -16.500000000 33.622871399 -34.622871399 -16.500000000 33.622871399 -34.981101990 -16.500000000 32.981101990 --35.981101990 -15.500000000 32.981101990 --35.622871399 -15.500000000 33.622871399 -34.622871399 -15.500000000 33.622871399 -34.981101990 -15.500000000 32.981101990 --35.981101990 -14.500000000 32.981101990 --35.622871399 -14.500000000 33.622871399 -34.622871399 -14.500000000 33.622871399 -34.981101990 -14.500000000 32.981101990 --35.981101990 -13.500000000 32.981101990 --35.622871399 -13.500000000 33.622871399 -34.622871399 -13.500000000 33.622871399 -34.981101990 -13.500000000 32.981101990 --35.981101990 -12.500000000 32.981101990 --35.622871399 -12.500000000 33.622871399 -34.622871399 -12.500000000 33.622871399 -34.981101990 -12.500000000 32.981101990 --35.981101990 -11.500000000 32.981101990 --35.622871399 -11.500000000 33.622871399 -34.622871399 -11.500000000 33.622871399 -34.981101990 -11.500000000 32.981101990 --35.981101990 -10.500000000 32.981101990 --35.622871399 -10.500000000 33.622871399 -34.622871399 -10.500000000 33.622871399 -34.981101990 -10.500000000 32.981101990 --35.981101990 -9.500000000 32.981101990 --35.622871399 -9.500000000 33.622871399 -34.622871399 -9.500000000 33.622871399 -34.981101990 -9.500000000 32.981101990 --35.981101990 -8.500000000 32.981101990 --35.622871399 -8.500000000 33.622871399 -34.622871399 -8.500000000 33.622871399 -34.981101990 -8.500000000 32.981101990 --35.981101990 -7.500000000 32.981101990 --35.622871399 -7.500000000 33.622871399 -34.622871399 -7.500000000 33.622871399 -34.981101990 -7.500000000 32.981101990 --35.981101990 -6.500000000 32.981101990 --35.622871399 -6.500000000 33.622871399 -34.622871399 -6.500000000 33.622871399 -34.981101990 -6.500000000 32.981101990 --35.981101990 -5.500000000 32.981101990 --35.622871399 -5.500000000 33.622871399 -34.622871399 -5.500000000 33.622871399 -34.981101990 -5.500000000 32.981101990 --35.981101990 -4.500000000 32.981101990 --35.622871399 -4.500000000 33.622871399 -34.622871399 -4.500000000 33.622871399 -34.981101990 -4.500000000 32.981101990 --35.981101990 -3.500000000 32.981101990 --35.622871399 -3.500000000 33.622871399 -34.622871399 -3.500000000 33.622871399 -34.981101990 -3.500000000 32.981101990 --35.981101990 -2.500000000 32.981101990 --35.622871399 -2.500000000 33.622871399 -34.622871399 -2.500000000 33.622871399 -34.981101990 -2.500000000 32.981101990 --35.981101990 -1.500000000 32.981101990 --35.622871399 -1.500000000 33.622871399 -34.622871399 -1.500000000 33.622871399 -34.981101990 -1.500000000 32.981101990 --35.981101990 -0.500000000 32.981101990 --35.622871399 -0.500000000 33.622871399 -34.622871399 -0.500000000 33.622871399 -34.981101990 -0.500000000 32.981101990 --35.981101990 0.500000000 32.981101990 --35.622871399 0.500000000 33.622871399 -34.622871399 0.500000000 33.622871399 -34.981101990 0.500000000 32.981101990 --35.981101990 1.500000000 32.981101990 --35.622871399 1.500000000 33.622871399 -34.622871399 1.500000000 33.622871399 -34.981101990 1.500000000 32.981101990 --35.981101990 2.500000000 32.981101990 --35.622871399 2.500000000 33.622871399 -34.622871399 2.500000000 33.622871399 -34.981101990 2.500000000 32.981101990 --35.981101990 3.500000000 32.981101990 --35.622871399 3.500000000 33.622871399 -34.622871399 3.500000000 33.622871399 -34.981101990 3.500000000 32.981101990 --35.981101990 4.500000000 32.981101990 --35.622871399 4.500000000 33.622871399 -34.622871399 4.500000000 33.622871399 -34.981101990 4.500000000 32.981101990 --35.981101990 5.500000000 32.981101990 --35.622871399 5.500000000 33.622871399 -34.622871399 5.500000000 33.622871399 -34.981101990 5.500000000 32.981101990 --35.981101990 6.500000000 32.981101990 --35.622871399 6.500000000 33.622871399 -34.622871399 6.500000000 33.622871399 -34.981101990 6.500000000 32.981101990 --35.981101990 7.500000000 32.981101990 --35.622871399 7.500000000 33.622871399 -34.622871399 7.500000000 33.622871399 -34.981101990 7.500000000 32.981101990 --35.981101990 8.500000000 32.981101990 --35.622871399 8.500000000 33.622871399 -34.622871399 8.500000000 33.622871399 -34.981101990 8.500000000 32.981101990 --35.981101990 9.500000000 32.981101990 --35.622871399 9.500000000 33.622871399 -34.622871399 9.500000000 33.622871399 -34.981101990 9.500000000 32.981101990 --35.981101990 10.500000000 32.981101990 --35.622871399 10.500000000 33.622871399 -34.622871399 10.500000000 33.622871399 -34.981101990 10.500000000 32.981101990 --35.981101990 11.500000000 32.981101990 --35.622871399 11.500000000 33.622871399 -34.622871399 11.500000000 33.622871399 -34.981101990 11.500000000 32.981101990 --35.981101990 12.500000000 32.981101990 --35.622871399 12.500000000 33.622871399 -34.622871399 12.500000000 33.622871399 -34.981101990 12.500000000 32.981101990 --35.981101990 13.500000000 32.981101990 --35.622871399 13.500000000 33.622871399 -34.622871399 13.500000000 33.622871399 -34.981101990 13.500000000 32.981101990 --35.981101990 14.500000000 32.981101990 --35.622871399 14.500000000 33.622871399 -34.622871399 14.500000000 33.622871399 -34.981101990 14.500000000 32.981101990 --35.981101990 15.500000000 32.981101990 --35.622871399 15.500000000 33.622871399 -34.622871399 15.500000000 33.622871399 -34.981101990 15.500000000 32.981101990 --35.981101990 16.500000000 32.981101990 --35.622871399 16.500000000 33.622871399 -34.622871399 16.500000000 33.622871399 -34.981101990 16.500000000 32.981101990 --35.981101990 17.500000000 32.981101990 --35.622871399 17.500000000 33.622871399 -34.622871399 17.500000000 33.622871399 -34.981101990 17.500000000 32.981101990 --35.981101990 18.500000000 32.981101990 --35.622871399 18.500000000 33.622871399 -34.622871399 18.500000000 33.622871399 -34.981101990 18.500000000 32.981101990 --35.981101990 19.500000000 32.981101990 --35.622871399 19.500000000 33.622871399 -34.622871399 19.500000000 33.622871399 -34.981101990 19.500000000 32.981101990 --35.981101990 20.500000000 32.981101990 --35.622871399 20.500000000 33.622871399 -34.622871399 20.500000000 33.622871399 -34.981101990 20.500000000 32.981101990 --35.981101990 21.500000000 32.981101990 --35.622871399 21.500000000 33.622871399 -34.622871399 21.500000000 33.622871399 -34.981101990 21.500000000 32.981101990 --35.981101990 22.500000000 32.981101990 --35.622871399 22.500000000 33.622871399 -34.622871399 22.500000000 33.622871399 -34.981101990 22.500000000 32.981101990 --35.981101990 23.499998093 32.981101990 --35.622871399 23.500000000 33.622871399 -34.622871399 23.500000000 33.622871399 -34.981101990 23.499998093 32.981101990 --35.981086731 24.499979019 32.981086731 --35.622856140 24.499986649 33.622844696 -34.622856140 24.499986649 33.622852325 -34.981086731 24.499979019 32.981090546 --35.980957031 25.499826431 32.980968475 --35.622734070 25.499868393 33.622692108 -34.622734070 25.499868393 33.622692108 -34.980957031 25.499826431 32.980976105 --35.980201721 26.498950958 32.980335236 --35.621978760 26.499156952 33.621799469 -34.621978760 26.499156952 33.621799469 -34.980201721 26.498950958 32.980335236 --35.976860046 27.495168686 32.977615356 --35.618598938 27.496026993 33.617927551 -34.618598938 27.496026993 33.617927551 -34.976860046 27.495168686 32.977615356 --35.965766907 28.481937408 32.968864441 --35.607208252 28.485630035 33.604705811 -34.607208252 28.485630035 33.604705811 -34.965759277 28.481937408 32.968864441 --35.937091827 29.442840576 32.946807861 --35.577739716 29.459178925 33.567749023 -34.577739716 29.459178925 33.567745209 -34.937091827 29.442840576 32.946807861 --35.879737854 30.336603165 32.903373718 --35.518615723 30.410833359 33.477191925 -34.518619537 30.410833359 33.477184296 -34.879737854 30.336603165 32.903377533 --35.797355652 31.035345078 32.840930939 --35.428535461 31.380737305 33.260547638 -34.428535461 31.380737305 33.260547638 -34.797355652 31.035345078 32.840927124 --35.057430267 32.316951752 33.231872559 --34.565628052 32.672847748 33.565631866 -33.565628052 32.672847748 33.565628052 -34.057430267 32.316951752 33.231868744 --34.842590332 32.980758667 32.980762482 --34.297924042 33.297924042 33.297924042 --33.672851562 33.565624237 33.565628052 -32.672851562 33.565624237 33.565628052 -33.297924042 33.297924042 33.297924042 -33.842590332 32.980758667 32.980762482 --33.980758667 33.842590332 32.980762482 --33.316947937 34.057430267 33.231872559 --32.380737305 34.428535461 33.260543823 --31.410833359 34.518615723 33.477188110 --30.459178925 34.577739716 33.567745209 --29.485630035 34.607204437 33.604709625 --28.496026993 34.618602753 33.617927551 --27.499156952 34.621982574 33.621795654 --26.499868393 34.622737885 33.622692108 --25.499986649 34.622856140 33.622844696 --24.500000000 34.622871399 33.622871399 --23.500000000 34.622871399 33.622871399 --22.500000000 34.622871399 33.622871399 --21.500000000 34.622871399 33.622871399 --20.500000000 34.622871399 33.622871399 --19.500000000 34.622871399 33.622871399 --18.500000000 34.622871399 33.622871399 --17.500000000 34.622871399 33.622871399 --16.500000000 34.622871399 33.622871399 --15.500000000 34.622871399 33.622871399 --14.500000000 34.622871399 33.622871399 --13.500000000 34.622871399 33.622871399 --12.500000000 34.622871399 33.622871399 --11.500000000 34.622871399 33.622871399 --10.500000000 34.622871399 33.622871399 --9.500000000 34.622871399 33.622871399 --8.500000000 34.622871399 33.622871399 --7.500000000 34.622871399 33.622871399 --6.500000000 34.622871399 33.622871399 --5.500000000 34.622871399 33.622871399 --4.500000000 34.622871399 33.622871399 --3.500000000 34.622871399 33.622871399 --2.500000000 34.622871399 33.622871399 --1.500000000 34.622871399 33.622871399 --0.500000000 34.622871399 33.622871399 -0.500000000 34.622871399 33.622871399 -1.500000000 34.622871399 33.622871399 -2.500000000 34.622871399 33.622871399 -3.500000000 34.622871399 33.622871399 -4.500000000 34.622871399 33.622871399 -5.500000000 34.622871399 33.622871399 -6.500000000 34.622871399 33.622871399 -7.500000000 34.622871399 33.622871399 -8.500000000 34.622871399 33.622871399 -9.500000000 34.622871399 33.622871399 -10.500000000 34.622871399 33.622871399 -11.500000000 34.622871399 33.622871399 -12.500000000 34.622871399 33.622871399 -13.500000000 34.622871399 33.622871399 -14.500000000 34.622871399 33.622871399 -15.500000000 34.622871399 33.622871399 -16.500000000 34.622871399 33.622871399 -17.500000000 34.622871399 33.622871399 -18.500000000 34.622871399 33.622871399 -19.500000000 34.622871399 33.622871399 -20.500000000 34.622871399 33.622871399 -21.500000000 34.622871399 33.622871399 -22.500000000 34.622871399 33.622871399 -23.500000000 34.622871399 33.622871399 -24.499986649 34.622856140 33.622844696 -25.499868393 34.622734070 33.622692108 -26.499156952 34.621978760 33.621799469 -27.496026993 34.618598938 33.617927551 -28.485630035 34.607208252 33.604705811 -29.459178925 34.577739716 33.567749023 -30.410833359 34.518615723 33.477191925 -31.380737305 34.428535461 33.260547638 -32.316947937 34.057430267 33.231872559 -32.980758667 33.842590332 32.980762482 --32.035343170 34.797355652 32.840934753 --31.336603165 34.879737854 32.903373718 --30.442844391 34.937095642 32.946804047 --29.481937408 34.965759277 32.968864441 --28.495168686 34.976860046 32.977615356 --27.498950958 34.980201721 32.980331421 --26.499826431 34.980957031 32.980968475 --25.499979019 34.981086731 32.981086731 --24.499998093 34.981101990 32.981101990 --23.500000000 34.981101990 32.981101990 --22.500000000 34.981101990 32.981101990 --21.500000000 34.981101990 32.981101990 --20.500000000 34.981101990 32.981101990 --19.500000000 34.981101990 32.981101990 --18.500000000 34.981101990 32.981101990 --17.500000000 34.981101990 32.981101990 --16.500000000 34.981101990 32.981101990 --15.500000000 34.981101990 32.981101990 --14.500000000 34.981101990 32.981101990 --13.500000000 34.981101990 32.981101990 --12.500000000 34.981101990 32.981101990 --11.500000000 34.981101990 32.981101990 --10.500000000 34.981101990 32.981101990 --9.500000000 34.981101990 32.981101990 --8.500000000 34.981101990 32.981101990 --7.500000000 34.981101990 32.981101990 --6.500000000 34.981101990 32.981101990 --5.500000000 34.981101990 32.981101990 --4.500000000 34.981101990 32.981101990 --3.500000000 34.981101990 32.981101990 --2.500000000 34.981101990 32.981101990 --1.500000000 34.981101990 32.981101990 --0.500000000 34.981101990 32.981101990 -0.500000000 34.981101990 32.981101990 -1.500000000 34.981101990 32.981101990 -2.500000000 34.981101990 32.981101990 -3.500000000 34.981101990 32.981101990 -4.500000000 34.981101990 32.981101990 -5.500000000 34.981101990 32.981101990 -6.500000000 34.981101990 32.981101990 -7.500000000 34.981101990 32.981101990 -8.500000000 34.981101990 32.981101990 -9.500000000 34.981101990 32.981101990 -10.500000000 34.981101990 32.981101990 -11.500000000 34.981101990 32.981101990 -12.500000000 34.981101990 32.981101990 -13.500000000 34.981101990 32.981101990 -14.500000000 34.981101990 32.981101990 -15.500000000 34.981101990 32.981101990 -16.500000000 34.981101990 32.981101990 -17.500000000 34.981101990 32.981101990 -18.500000000 34.981101990 32.981101990 -19.500000000 34.981101990 32.981101990 -20.500000000 34.981101990 32.981101990 -21.500000000 34.981101990 32.981101990 -22.500000000 34.981101990 32.981101990 -23.499998093 34.981101990 32.981101990 -24.499979019 34.981086731 32.981086731 -25.499826431 34.980957031 32.980968475 -26.498950958 34.980201721 32.980335236 -27.495168686 34.976860046 32.977615356 -28.481937408 34.965766907 32.968864441 -29.442840576 34.937091827 32.946807861 -30.336603165 34.879737854 32.903373718 -31.035345078 34.797359467 32.840934753 --33.006500244 -34.772159576 33.772163391 --32.313602448 -34.912063599 33.912063599 --31.413049698 -35.040893555 34.040893555 --30.463253021 -35.115642548 34.115642548 --29.487289429 -35.150253296 34.150257111 --28.496557236 -35.163162231 34.163158417 --27.499298096 -35.166961670 34.166954041 --26.499902725 -35.167808533 34.167808533 --25.499990463 -35.167949677 34.167949677 --24.500000000 -35.167968750 34.167968750 --23.500000000 -35.167968750 34.167968750 --22.500000000 -35.167968750 34.167968750 --21.500000000 -35.167968750 34.167968750 --20.500000000 -35.167968750 34.167968750 --19.500000000 -35.167968750 34.167968750 --18.500000000 -35.167968750 34.167968750 --17.500000000 -35.167968750 34.167968750 --16.500000000 -35.167968750 34.167968750 --15.500000000 -35.167968750 34.167968750 --14.500000000 -35.167968750 34.167968750 --13.500000000 -35.167968750 34.167968750 --12.500000000 -35.167968750 34.167968750 --11.500000000 -35.167968750 34.167968750 --10.500000000 -35.167968750 34.167968750 --9.500000000 -35.167968750 34.167968750 --8.500000000 -35.167968750 34.167968750 --7.500000000 -35.167968750 34.167968750 --6.500000000 -35.167968750 34.167968750 --5.500000000 -35.167968750 34.167968750 --4.500000000 -35.167968750 34.167968750 --3.500000000 -35.167968750 34.167968750 --2.500000000 -35.167968750 34.167968750 --1.500000000 -35.167968750 34.167968750 --0.500000000 -35.167968750 34.167968750 -0.500000000 -35.167968750 34.167968750 -1.500000000 -35.167968750 34.167968750 -2.500000000 -35.167968750 34.167968750 -3.500000000 -35.167968750 34.167968750 -4.500000000 -35.167968750 34.167968750 -5.500000000 -35.167968750 34.167968750 -6.500000000 -35.167968750 34.167968750 -7.500000000 -35.167968750 34.167968750 -8.500000000 -35.167968750 34.167968750 -9.500000000 -35.167968750 34.167968750 -10.500000000 -35.167968750 34.167968750 -11.500000000 -35.167968750 34.167968750 -12.500000000 -35.167968750 34.167968750 -13.500000000 -35.167968750 34.167968750 -14.500000000 -35.167968750 34.167968750 -15.500000000 -35.167968750 34.167968750 -16.500000000 -35.167968750 34.167968750 -17.500000000 -35.167968750 34.167968750 -18.500000000 -35.167968750 34.167968750 -19.500000000 -35.167968750 34.167968750 -20.500000000 -35.167968750 34.167968750 -21.500000000 -35.167968750 34.167968750 -22.500000000 -35.167968750 34.167968750 -23.500000000 -35.167968750 34.167968750 -24.499994278 -35.167949677 34.167949677 -25.499898911 -35.167804718 34.167808533 -26.499298096 -35.166954041 34.166954041 -27.496557236 -35.163154602 34.163158417 -28.487289429 -35.150249481 34.150249481 -29.463253021 -35.115642548 34.115642548 -30.413049698 -35.040893555 34.040893555 -31.313602448 -34.912059784 33.912063599 -32.006500244 -34.772159576 33.772163391 --33.980758667 -33.980758667 33.842590332 --33.316947937 -34.231868744 34.057434082 --32.380737305 -34.260547638 34.428531647 --31.410831451 -34.477184296 34.518615723 --30.459177017 -34.567749023 34.577739716 --29.485630035 -34.604709625 34.607204437 --28.496026993 -34.617927551 34.618598938 --27.499160767 -34.621795654 34.621982574 --26.499868393 -34.622692108 34.622734070 --25.499986649 -34.622844696 34.622856140 --24.500000000 -34.622871399 34.622871399 --23.500000000 -34.622871399 34.622871399 --22.500000000 -34.622871399 34.622871399 --21.500000000 -34.622871399 34.622871399 --20.500000000 -34.622871399 34.622871399 --19.500000000 -34.622871399 34.622871399 --18.500000000 -34.622871399 34.622871399 --17.500000000 -34.622871399 34.622871399 --16.500000000 -34.622871399 34.622871399 --15.500000000 -34.622871399 34.622871399 --14.500000000 -34.622871399 34.622871399 --13.500000000 -34.622871399 34.622871399 --12.500000000 -34.622871399 34.622871399 --11.500000000 -34.622871399 34.622871399 --10.500000000 -34.622871399 34.622871399 --9.500000000 -34.622871399 34.622871399 --8.500000000 -34.622871399 34.622871399 --7.500000000 -34.622871399 34.622871399 --6.500000000 -34.622871399 34.622871399 --5.500000000 -34.622871399 34.622871399 --4.500000000 -34.622871399 34.622871399 --3.500000000 -34.622871399 34.622871399 --2.500000000 -34.622871399 34.622871399 --1.500000000 -34.622871399 34.622871399 --0.500000000 -34.622871399 34.622871399 -0.500000000 -34.622871399 34.622871399 -1.500000000 -34.622871399 34.622871399 -2.500000000 -34.622871399 34.622871399 -3.500000000 -34.622871399 34.622871399 -4.500000000 -34.622871399 34.622871399 -5.500000000 -34.622871399 34.622871399 -6.500000000 -34.622871399 34.622871399 -7.500000000 -34.622871399 34.622871399 -8.500000000 -34.622871399 34.622871399 -9.500000000 -34.622871399 34.622871399 -10.500000000 -34.622871399 34.622871399 -11.500000000 -34.622871399 34.622871399 -12.500000000 -34.622871399 34.622871399 -13.500000000 -34.622871399 34.622871399 -14.500000000 -34.622871399 34.622871399 -15.500000000 -34.622871399 34.622871399 -16.500000000 -34.622871399 34.622871399 -17.500000000 -34.622871399 34.622871399 -18.500000000 -34.622871399 34.622871399 -19.500000000 -34.622871399 34.622871399 -20.500000000 -34.622871399 34.622871399 -21.500000000 -34.622871399 34.622871399 -22.500000000 -34.622871399 34.622871399 -23.500000000 -34.622871399 34.622871399 -24.499986649 -34.622844696 34.622856140 -25.499868393 -34.622692108 34.622734070 -26.499156952 -34.621795654 34.621978760 -27.496026993 -34.617927551 34.618598938 -28.485631943 -34.604705811 34.607204437 -29.459178925 -34.567741394 34.577739716 -30.410831451 -34.477191925 34.518615723 -31.380739212 -34.260543823 34.428535461 -32.316947937 -34.231868744 34.057430267 -32.980758667 -33.980758667 33.842590332 --34.772159576 -33.006500244 33.772163391 --34.231868744 -33.316947937 34.057434082 --33.424438477 -33.424438477 34.498546600 --32.736049652 -33.678352356 34.670326233 -31.736051559 -33.678352356 34.670322418 -32.424438477 -33.424442291 34.498542786 -33.231868744 -33.316947937 34.057430267 -33.772159576 -33.006500244 33.772155762 --34.912063599 -32.313598633 33.912063599 --34.260547638 -32.380737305 34.428535461 --33.678352356 -32.736053467 34.670326233 -32.678352356 -32.736053467 34.670322418 -33.260543823 -32.380741119 34.428531647 -33.912059784 -32.313602448 33.912059784 --35.040893555 -31.413049698 34.040893555 --34.477191925 -31.410831451 34.518615723 -33.477191925 -31.410831451 34.518611908 -34.040893555 -31.413049698 34.040893555 --35.115642548 -30.463253021 34.115642548 --34.567745209 -30.459178925 34.577739716 -33.567745209 -30.459178925 34.577739716 -34.115642548 -30.463253021 34.115638733 --35.150253296 -29.487289429 34.150249481 --34.604709625 -29.485630035 34.607204437 -33.604709625 -29.485630035 34.607208252 -34.150253296 -29.487293243 34.150257111 --35.163154602 -28.496557236 34.163158417 --34.617927551 -28.496026993 34.618598938 -33.617927551 -28.496026993 34.618598938 -34.163154602 -28.496557236 34.163158417 --35.166954041 -27.499298096 34.166954041 --34.621795654 -27.499156952 34.621982574 -33.621795654 -27.499156952 34.621982574 -34.166957855 -27.499298096 34.166961670 --35.167808533 -26.499898911 34.167808533 --34.622692108 -26.499868393 34.622734070 -33.622692108 -26.499868393 34.622734070 -34.167808533 -26.499902725 34.167808533 --35.167949677 -25.499990463 34.167949677 --34.622844696 -25.499986649 34.622856140 -33.622844696 -25.499986649 34.622852325 -34.167949677 -25.499990463 34.167945862 --35.167961121 -24.500000000 34.167961121 --34.622863770 -24.500000000 34.622871399 -33.622871399 -24.500000000 34.622871399 -34.167968750 -24.500000000 34.167968750 --35.167961121 -23.500000000 34.167961121 --34.622871399 -23.500000000 34.622871399 -33.622871399 -23.500000000 34.622871399 -34.167968750 -23.500000000 34.167968750 --35.167961121 -22.500000000 34.167961121 --34.622871399 -22.500000000 34.622871399 -33.622871399 -22.500000000 34.622871399 -34.167968750 -22.500000000 34.167968750 --35.167961121 -21.500000000 34.167961121 --34.622871399 -21.500000000 34.622871399 -33.622871399 -21.500000000 34.622871399 -34.167968750 -21.500000000 34.167968750 --35.167961121 -20.500000000 34.167961121 --34.622871399 -20.500000000 34.622871399 -33.622871399 -20.500000000 34.622871399 -34.167968750 -20.500000000 34.167968750 --35.167961121 -19.500000000 34.167961121 --34.622871399 -19.500000000 34.622871399 -33.622871399 -19.500000000 34.622871399 -34.167968750 -19.500000000 34.167968750 --35.167961121 -18.500000000 34.167961121 --34.622871399 -18.500000000 34.622871399 -33.622871399 -18.500000000 34.622871399 -34.167968750 -18.500000000 34.167968750 --35.167961121 -17.500000000 34.167961121 --34.622871399 -17.500000000 34.622871399 -33.622871399 -17.500000000 34.622871399 -34.167968750 -17.500000000 34.167968750 --35.167961121 -16.500000000 34.167961121 --34.622871399 -16.500000000 34.622871399 -33.622871399 -16.500000000 34.622871399 -34.167968750 -16.500000000 34.167968750 --35.167961121 -15.500000000 34.167961121 --34.622871399 -15.500000000 34.622871399 -33.622871399 -15.500000000 34.622871399 -34.167968750 -15.500000000 34.167968750 --35.167961121 -14.500000000 34.167961121 --34.622871399 -14.500000000 34.622871399 -33.622871399 -14.500000000 34.622871399 -34.167968750 -14.500000000 34.167968750 --35.167961121 -13.500000000 34.167961121 --34.622871399 -13.500000000 34.622871399 -33.622871399 -13.500000000 34.622871399 -34.167968750 -13.500000000 34.167968750 --35.167961121 -12.500000000 34.167961121 --34.622871399 -12.500000000 34.622871399 -33.622871399 -12.500000000 34.622871399 -34.167968750 -12.500000000 34.167968750 --35.167961121 -11.500000000 34.167961121 --34.622871399 -11.500000000 34.622871399 -33.622871399 -11.500000000 34.622871399 -34.167968750 -11.500000000 34.167968750 --35.167961121 -10.500000000 34.167961121 --34.622871399 -10.500000000 34.622871399 -33.622871399 -10.500000000 34.622871399 -34.167968750 -10.500000000 34.167968750 --35.167961121 -9.500000000 34.167961121 --34.622871399 -9.500000000 34.622871399 -33.622871399 -9.500000000 34.622871399 -34.167968750 -9.500000000 34.167968750 --35.167961121 -8.500000000 34.167961121 --34.622871399 -8.500000000 34.622871399 -33.622871399 -8.500000000 34.622871399 -34.167968750 -8.500000000 34.167968750 --35.167961121 -7.500000000 34.167961121 --34.622871399 -7.500000000 34.622871399 -33.622871399 -7.500000000 34.622871399 -34.167968750 -7.500000000 34.167968750 --35.167961121 -6.500000000 34.167961121 --34.622871399 -6.500000000 34.622871399 -33.622871399 -6.500000000 34.622871399 -34.167968750 -6.500000000 34.167968750 --35.167961121 -5.500000000 34.167961121 --34.622871399 -5.500000000 34.622871399 -33.622871399 -5.500000000 34.622871399 -34.167968750 -5.500000000 34.167968750 --35.167961121 -4.500000000 34.167961121 --34.622871399 -4.500000000 34.622871399 -33.622871399 -4.500000000 34.622871399 -34.167968750 -4.500000000 34.167968750 --35.167961121 -3.500000000 34.167961121 --34.622871399 -3.500000000 34.622871399 -33.622871399 -3.500000000 34.622871399 -34.167968750 -3.500000000 34.167968750 --35.167961121 -2.500000000 34.167961121 --34.622871399 -2.500000000 34.622871399 -33.622871399 -2.500000000 34.622871399 -34.167968750 -2.500000000 34.167968750 --35.167961121 -1.500000000 34.167961121 --34.622871399 -1.500000000 34.622871399 -33.622871399 -1.500000000 34.622871399 -34.167968750 -1.500000000 34.167968750 --35.167961121 -0.500000000 34.167961121 --34.622871399 -0.500000000 34.622871399 -33.622871399 -0.500000000 34.622871399 -34.167968750 -0.500000000 34.167968750 --35.167961121 0.500000000 34.167961121 --34.622871399 0.500000000 34.622871399 -33.622871399 0.500000000 34.622871399 -34.167968750 0.500000000 34.167968750 --35.167961121 1.500000000 34.167961121 --34.622871399 1.500000000 34.622871399 -33.622871399 1.500000000 34.622871399 -34.167968750 1.500000000 34.167968750 --35.167961121 2.500000000 34.167961121 --34.622871399 2.500000000 34.622871399 -33.622871399 2.500000000 34.622871399 -34.167968750 2.500000000 34.167968750 --35.167961121 3.500000000 34.167961121 --34.622871399 3.500000000 34.622871399 -33.622871399 3.500000000 34.622871399 -34.167968750 3.500000000 34.167968750 --35.167961121 4.500000000 34.167961121 --34.622871399 4.500000000 34.622871399 -33.622871399 4.500000000 34.622871399 -34.167968750 4.500000000 34.167968750 --35.167961121 5.500000000 34.167961121 --34.622871399 5.500000000 34.622871399 -33.622871399 5.500000000 34.622871399 -34.167968750 5.500000000 34.167968750 --35.167961121 6.500000000 34.167961121 --34.622871399 6.500000000 34.622871399 -33.622871399 6.500000000 34.622871399 -34.167968750 6.500000000 34.167968750 --35.167961121 7.500000000 34.167961121 --34.622871399 7.500000000 34.622871399 -33.622871399 7.500000000 34.622871399 -34.167968750 7.500000000 34.167968750 --35.167961121 8.500000000 34.167961121 --34.622871399 8.500000000 34.622871399 -33.622871399 8.500000000 34.622871399 -34.167968750 8.500000000 34.167968750 --35.167961121 9.500000000 34.167961121 --34.622871399 9.500000000 34.622871399 -33.622871399 9.500000000 34.622871399 -34.167968750 9.500000000 34.167968750 --35.167961121 10.500000000 34.167961121 --34.622871399 10.500000000 34.622871399 -33.622871399 10.500000000 34.622871399 -34.167968750 10.500000000 34.167968750 --35.167961121 11.500000000 34.167961121 --34.622871399 11.500000000 34.622871399 -33.622871399 11.500000000 34.622871399 -34.167968750 11.500000000 34.167968750 --35.167961121 12.500000000 34.167961121 --34.622871399 12.500000000 34.622871399 -33.622871399 12.500000000 34.622871399 -34.167968750 12.500000000 34.167968750 --35.167961121 13.500000000 34.167961121 --34.622871399 13.500000000 34.622871399 -33.622871399 13.500000000 34.622871399 -34.167968750 13.500000000 34.167968750 --35.167961121 14.500000000 34.167961121 --34.622871399 14.500000000 34.622871399 -33.622871399 14.500000000 34.622871399 -34.167968750 14.500000000 34.167968750 --35.167961121 15.500000000 34.167961121 --34.622871399 15.500000000 34.622871399 -33.622871399 15.500000000 34.622871399 -34.167968750 15.500000000 34.167968750 --35.167961121 16.500000000 34.167961121 --34.622871399 16.500000000 34.622871399 -33.622871399 16.500000000 34.622871399 -34.167968750 16.500000000 34.167968750 --35.167961121 17.500000000 34.167961121 --34.622871399 17.500000000 34.622871399 -33.622871399 17.500000000 34.622871399 -34.167968750 17.500000000 34.167968750 --35.167961121 18.500000000 34.167961121 --34.622871399 18.500000000 34.622871399 -33.622871399 18.500000000 34.622871399 -34.167968750 18.500000000 34.167968750 --35.167961121 19.500000000 34.167961121 --34.622871399 19.500000000 34.622871399 -33.622871399 19.500000000 34.622871399 -34.167968750 19.500000000 34.167968750 --35.167961121 20.500000000 34.167961121 --34.622871399 20.500000000 34.622871399 -33.622871399 20.500000000 34.622871399 -34.167968750 20.500000000 34.167968750 --35.167961121 21.500000000 34.167961121 --34.622871399 21.500000000 34.622871399 -33.622871399 21.500000000 34.622871399 -34.167968750 21.500000000 34.167968750 --35.167961121 22.500000000 34.167961121 --34.622871399 22.500000000 34.622871399 -33.622871399 22.500000000 34.622871399 -34.167968750 22.500000000 34.167968750 --35.167961121 23.500000000 34.167961121 --34.622867584 23.500000000 34.622867584 -33.622867584 23.500000000 34.622867584 -34.167968750 23.500000000 34.167968750 --35.167949677 24.499994278 34.167949677 --34.622844696 24.499988556 34.622856140 -33.622844696 24.499988556 34.622856140 -34.167949677 24.499994278 34.167949677 --35.167804718 25.499900818 34.167808533 --34.622692108 25.499868393 34.622734070 -33.622692108 25.499868393 34.622734070 -34.167804718 25.499900818 34.167808533 --35.166954041 26.499298096 34.166961670 --34.621799469 26.499156952 34.621982574 -33.621799469 26.499160767 34.621982574 -34.166954041 26.499298096 34.166954041 --35.163158417 27.496557236 34.163154602 --34.617927551 27.496026993 34.618598938 -33.617927551 27.496026993 34.618598938 -34.163158417 27.496557236 34.163158417 --35.150249481 28.487293243 34.150249481 --34.604705811 28.485630035 34.607204437 -33.604705811 28.485631943 34.607204437 -34.150249481 28.487289429 34.150249481 --35.115642548 29.463253021 34.115638733 --34.567741394 29.459178925 34.577739716 -33.567741394 29.459178925 34.577739716 -34.115638733 29.463256836 34.115642548 --35.040893555 30.413049698 34.040893555 --34.477191925 30.410831451 34.518619537 -33.477191925 30.410833359 34.518615723 -34.040893555 30.413049698 34.040893555 --34.912063599 31.313602448 33.912063599 --34.260543823 31.380737305 34.428535461 --33.678352356 31.736051559 34.670322418 -32.678352356 31.736051559 34.670322418 -33.260543823 31.380739212 34.428535461 -33.912059784 31.313604355 33.912059784 --34.772159576 32.006500244 33.772163391 --34.231868744 32.316947937 34.057430267 --33.424438477 32.424438477 34.498542786 --32.736053467 32.678352356 34.670322418 -31.736051559 32.678352356 34.670322418 -32.424438477 32.424434662 34.498542786 -33.231868744 32.316951752 34.057430267 -33.772159576 32.006500244 33.772159576 --33.980758667 32.980758667 33.842590332 --33.316947937 33.231868744 34.057430267 --32.380737305 33.260543823 34.428539276 --31.410833359 33.477191925 34.518619537 --30.459178925 33.567745209 34.577747345 --29.485630035 33.604709625 34.607208252 --28.496026993 33.617927551 34.618598938 --27.499156952 33.621795654 34.621982574 --26.499868393 33.622692108 34.622734070 --25.499986649 33.622844696 34.622856140 --24.500000000 33.622871399 34.622871399 --23.500000000 33.622871399 34.622871399 --22.500000000 33.622871399 34.622871399 --21.500000000 33.622871399 34.622871399 --20.500000000 33.622871399 34.622871399 --19.500000000 33.622871399 34.622871399 --18.500000000 33.622871399 34.622871399 --17.500000000 33.622871399 34.622871399 --16.500000000 33.622871399 34.622871399 --15.500000000 33.622871399 34.622871399 --14.500000000 33.622871399 34.622871399 --13.500000000 33.622871399 34.622871399 --12.500000000 33.622871399 34.622871399 --11.500000000 33.622871399 34.622871399 --10.500000000 33.622871399 34.622871399 --9.500000000 33.622871399 34.622871399 --8.500000000 33.622871399 34.622871399 --7.500000000 33.622871399 34.622871399 --6.500000000 33.622871399 34.622871399 --5.500000000 33.622871399 34.622871399 --4.500000000 33.622871399 34.622871399 --3.500000000 33.622871399 34.622871399 --2.500000000 33.622871399 34.622871399 --1.500000000 33.622871399 34.622871399 --0.500000000 33.622871399 34.622871399 -0.500000000 33.622871399 34.622871399 -1.500000000 33.622871399 34.622871399 -2.500000000 33.622871399 34.622871399 -3.500000000 33.622871399 34.622871399 -4.500000000 33.622871399 34.622871399 -5.500000000 33.622871399 34.622871399 -6.500000000 33.622871399 34.622871399 -7.500000000 33.622871399 34.622871399 -8.500000000 33.622871399 34.622871399 -9.500000000 33.622871399 34.622871399 -10.500000000 33.622871399 34.622871399 -11.500000000 33.622871399 34.622871399 -12.500000000 33.622871399 34.622871399 -13.500000000 33.622871399 34.622871399 -14.500000000 33.622871399 34.622871399 -15.500000000 33.622871399 34.622871399 -16.500000000 33.622871399 34.622871399 -17.500000000 33.622871399 34.622871399 -18.500000000 33.622871399 34.622871399 -19.500000000 33.622871399 34.622871399 -20.500000000 33.622871399 34.622871399 -21.500000000 33.622871399 34.622871399 -22.500000000 33.622871399 34.622871399 -23.500000000 33.622867584 34.622867584 -24.499988556 33.622844696 34.622856140 -25.499868393 33.622692108 34.622734070 -26.499160767 33.621799469 34.621982574 -27.496026993 33.617927551 34.618598938 -28.485631943 33.604705811 34.607204437 -29.459178925 33.567749023 34.577739716 -30.410833359 33.477184296 34.518615723 -31.380737305 33.260543823 34.428535461 -32.316947937 33.231868744 34.057430267 -32.980758667 32.980758667 33.842590332 --33.006500244 33.772159576 33.772163391 --32.313598633 33.912063599 33.912063599 --31.413049698 34.040893555 34.040893555 --30.463253021 34.115642548 34.115638733 --29.487289429 34.150253296 34.150249481 --28.496557236 34.163162231 34.163158417 --27.499298096 34.166961670 34.166961670 --26.499898911 34.167808533 34.167808533 --25.499990463 34.167949677 34.167949677 --24.500000000 34.167961121 34.167961121 --23.500000000 34.167961121 34.167961121 --22.500000000 34.167961121 34.167961121 --21.500000000 34.167961121 34.167961121 --20.500000000 34.167961121 34.167961121 --19.500000000 34.167961121 34.167961121 --18.500000000 34.167961121 34.167961121 --17.500000000 34.167961121 34.167961121 --16.500000000 34.167961121 34.167961121 --15.500000000 34.167961121 34.167961121 --14.500000000 34.167961121 34.167961121 --13.500000000 34.167961121 34.167961121 --12.500000000 34.167961121 34.167961121 --11.500000000 34.167961121 34.167961121 --10.500000000 34.167961121 34.167961121 --9.500000000 34.167961121 34.167961121 --8.500000000 34.167961121 34.167961121 --7.500000000 34.167961121 34.167961121 --6.500000000 34.167961121 34.167961121 --5.500000000 34.167961121 34.167961121 --4.500000000 34.167961121 34.167961121 --3.500000000 34.167961121 34.167961121 --2.500000000 34.167961121 34.167961121 --1.500000000 34.167961121 34.167961121 --0.500000000 34.167961121 34.167961121 -0.500000000 34.167961121 34.167961121 -1.500000000 34.167961121 34.167961121 -2.500000000 34.167961121 34.167961121 -3.500000000 34.167961121 34.167961121 -4.500000000 34.167961121 34.167961121 -5.500000000 34.167961121 34.167961121 -6.500000000 34.167961121 34.167961121 -7.500000000 34.167961121 34.167961121 -8.500000000 34.167961121 34.167961121 -9.500000000 34.167961121 34.167961121 -10.500000000 34.167961121 34.167961121 -11.500000000 34.167961121 34.167961121 -12.500000000 34.167961121 34.167961121 -13.500000000 34.167961121 34.167961121 -14.500000000 34.167961121 34.167961121 -15.500000000 34.167961121 34.167961121 -16.500000000 34.167961121 34.167961121 -17.500000000 34.167961121 34.167961121 -18.500000000 34.167961121 34.167961121 -19.500000000 34.167961121 34.167961121 -20.500000000 34.167961121 34.167961121 -21.500000000 34.167961121 34.167961121 -22.500000000 34.167961121 34.167961121 -23.500000000 34.167961121 34.167961121 -24.499994278 34.167949677 34.167949677 -25.499900818 34.167804718 34.167808533 -26.499298096 34.166954041 34.166961670 -27.496557236 34.163158417 34.163154602 -28.487293243 34.150249481 34.150249481 -29.463253021 34.115642548 34.115638733 -30.413049698 34.040893555 34.040893555 -31.313604355 33.912059784 33.912063599 -32.006500244 33.772159576 33.772163391 --32.035343170 -33.840930939 34.797355652 --31.336603165 -33.903373718 34.879734039 --30.442840576 -33.946807861 34.937091827 --29.481933594 -33.968864441 34.965763092 --28.495172501 -33.977615356 34.976860046 --27.498952866 -33.980331421 34.980201721 --26.499826431 -33.980972290 34.980957031 --25.499977112 -33.981090546 34.981086731 --24.499998093 -33.981101990 34.981101990 --23.500000000 -33.981101990 34.981101990 --22.500000000 -33.981101990 34.981101990 --21.500000000 -33.981101990 34.981101990 --20.500000000 -33.981101990 34.981101990 --19.500000000 -33.981101990 34.981101990 --18.500000000 -33.981101990 34.981101990 --17.500000000 -33.981101990 34.981101990 --16.500000000 -33.981101990 34.981101990 --15.500000000 -33.981101990 34.981101990 --14.500000000 -33.981101990 34.981101990 --13.500000000 -33.981101990 34.981101990 --12.500000000 -33.981101990 34.981101990 --11.500000000 -33.981101990 34.981101990 --10.500000000 -33.981101990 34.981101990 --9.500000000 -33.981101990 34.981101990 --8.500000000 -33.981101990 34.981101990 --7.500000000 -33.981101990 34.981101990 --6.500000000 -33.981101990 34.981101990 --5.500000000 -33.981101990 34.981101990 --4.500000000 -33.981101990 34.981101990 --3.500000000 -33.981101990 34.981101990 --2.500000000 -33.981101990 34.981101990 --1.500000000 -33.981101990 34.981101990 --0.500000000 -33.981101990 34.981101990 -0.500000000 -33.981101990 34.981101990 -1.500000000 -33.981101990 34.981101990 -2.500000000 -33.981101990 34.981101990 -3.500000000 -33.981101990 34.981101990 -4.500000000 -33.981101990 34.981101990 -5.500000000 -33.981101990 34.981101990 -6.500000000 -33.981101990 34.981101990 -7.500000000 -33.981101990 34.981101990 -8.500000000 -33.981101990 34.981101990 -9.500000000 -33.981101990 34.981101990 -10.500000000 -33.981101990 34.981101990 -11.500000000 -33.981101990 34.981101990 -12.500000000 -33.981101990 34.981101990 -13.500000000 -33.981101990 34.981101990 -14.500000000 -33.981101990 34.981101990 -15.500000000 -33.981101990 34.981101990 -16.500000000 -33.981101990 34.981101990 -17.500000000 -33.981101990 34.981101990 -18.500000000 -33.981101990 34.981101990 -19.500000000 -33.981101990 34.981101990 -20.500000000 -33.981101990 34.981101990 -21.500000000 -33.981101990 34.981101990 -22.500000000 -33.981101990 34.981101990 -23.499998093 -33.981101990 34.981101990 -24.499979019 -33.981090546 34.981086731 -25.499826431 -33.980972290 34.980957031 -26.498950958 -33.980331421 34.980201721 -27.495172501 -33.977611542 34.976860046 -28.481937408 -33.968864441 34.965763092 -29.442840576 -33.946807861 34.937091827 -30.336599350 -33.903373718 34.879737854 -31.035345078 -33.840927124 34.797355652 --33.030689240 -33.030693054 34.846317291 --32.334243774 -33.254272461 34.954723358 --31.405673981 -33.216838837 35.112342834 --30.460596085 -33.220710754 35.185966492 --29.486719131 -33.227874756 35.217510223 --28.496377945 -33.231601715 35.228954315 --27.499221802 -33.232868195 35.232299805 --26.499872208 -33.233169556 35.233074188 --25.499986649 -33.233219147 35.233207703 --24.499998093 -33.233222961 35.233222961 --23.500000000 -33.233222961 35.233222961 --22.500000000 -33.233222961 35.233222961 --21.500000000 -33.233222961 35.233222961 --20.500000000 -33.233222961 35.233222961 --19.500000000 -33.233222961 35.233222961 --18.500000000 -33.233222961 35.233222961 --17.500000000 -33.233222961 35.233222961 --16.500000000 -33.233222961 35.233222961 --15.500000000 -33.233222961 35.233222961 --14.500000000 -33.233222961 35.233222961 --13.500000000 -33.233222961 35.233222961 --12.500000000 -33.233222961 35.233222961 --11.500000000 -33.233222961 35.233222961 --10.500000000 -33.233222961 35.233222961 --9.500000000 -33.233222961 35.233222961 --8.500000000 -33.233222961 35.233222961 --7.500000000 -33.233222961 35.233222961 --6.500000000 -33.233222961 35.233222961 --5.500000000 -33.233222961 35.233222961 --4.500000000 -33.233222961 35.233222961 --3.500000000 -33.233222961 35.233222961 --2.500000000 -33.233222961 35.233222961 --1.500000000 -33.233222961 35.233222961 --0.500000000 -33.233222961 35.233222961 -0.500000000 -33.233222961 35.233222961 -1.500000000 -33.233222961 35.233222961 -2.500000000 -33.233222961 35.233222961 -3.500000000 -33.233222961 35.233222961 -4.500000000 -33.233222961 35.233222961 -5.500000000 -33.233222961 35.233222961 -6.500000000 -33.233222961 35.233222961 -7.500000000 -33.233222961 35.233222961 -8.500000000 -33.233222961 35.233222961 -9.500000000 -33.233222961 35.233222961 -10.500000000 -33.233222961 35.233222961 -11.500000000 -33.233222961 35.233222961 -12.500000000 -33.233222961 35.233222961 -13.500000000 -33.233222961 35.233222961 -14.500000000 -33.233222961 35.233222961 -15.500000000 -33.233222961 35.233222961 -16.500000000 -33.233222961 35.233222961 -17.500000000 -33.233222961 35.233222961 -18.500000000 -33.233222961 35.233222961 -19.500000000 -33.233222961 35.233222961 -20.500000000 -33.233222961 35.233222961 -21.500000000 -33.233222961 35.233222961 -22.500000000 -33.233222961 35.233222961 -23.500000000 -33.233222961 35.233222961 -24.499984741 -33.233219147 35.233207703 -25.499874115 -33.233169556 35.233074188 -26.499225616 -33.232864380 35.232299805 -27.496379852 -33.231597900 35.228954315 -28.486719131 -33.227867126 35.217514038 -29.460596085 -33.220710754 35.185966492 -30.405673981 -33.216835022 35.112342834 -31.334243774 -33.254272461 34.954723358 -32.030693054 -33.030693054 34.846313477 --33.840934753 -32.035343170 34.797355652 --33.254272461 -32.334239960 34.954723358 --32.371025085 -32.371025085 35.154670715 --31.438953400 -32.375358582 35.280395508 --30.476007462 -32.379222870 35.344284058 --29.492319107 -32.382308960 35.371490479 --28.498050690 -32.383811951 35.381023407 --27.499622345 -32.384300232 35.383720398 --26.499948502 -32.384403229 35.384311676 --25.499996185 -32.384422302 35.384407043 --24.500000000 -32.384422302 35.384418488 --23.500000000 -32.384422302 35.384418488 --22.500000000 -32.384422302 35.384418488 --21.500000000 -32.384422302 35.384418488 --20.500000000 -32.384422302 35.384418488 --19.500000000 -32.384422302 35.384418488 --18.500000000 -32.384422302 35.384418488 --17.500000000 -32.384422302 35.384418488 --16.500000000 -32.384422302 35.384418488 --15.500000000 -32.384422302 35.384418488 --14.500000000 -32.384422302 35.384418488 --13.500000000 -32.384422302 35.384418488 --12.500000000 -32.384422302 35.384418488 --11.500000000 -32.384422302 35.384418488 --10.500000000 -32.384422302 35.384418488 --9.500000000 -32.384422302 35.384418488 --8.500000000 -32.384422302 35.384418488 --7.500000000 -32.384422302 35.384418488 --6.500000000 -32.384422302 35.384418488 --5.500000000 -32.384422302 35.384418488 --4.500000000 -32.384422302 35.384418488 --3.500000000 -32.384422302 35.384418488 --2.500000000 -32.384422302 35.384418488 --1.500000000 -32.384422302 35.384418488 --0.500000000 -32.384422302 35.384418488 -0.500000000 -32.384422302 35.384418488 -1.500000000 -32.384422302 35.384418488 -2.500000000 -32.384422302 35.384418488 -3.500000000 -32.384422302 35.384418488 -4.500000000 -32.384422302 35.384418488 -5.500000000 -32.384422302 35.384418488 -6.500000000 -32.384422302 35.384418488 -7.500000000 -32.384422302 35.384418488 -8.500000000 -32.384422302 35.384418488 -9.500000000 -32.384422302 35.384418488 -10.500000000 -32.384422302 35.384418488 -11.500000000 -32.384422302 35.384418488 -12.500000000 -32.384422302 35.384418488 -13.500000000 -32.384422302 35.384418488 -14.500000000 -32.384422302 35.384418488 -15.500000000 -32.384422302 35.384418488 -16.500000000 -32.384422302 35.384418488 -17.500000000 -32.384422302 35.384418488 -18.500000000 -32.384422302 35.384418488 -19.500000000 -32.384422302 35.384418488 -20.500000000 -32.384422302 35.384418488 -21.500000000 -32.384422302 35.384418488 -22.500000000 -32.384422302 35.384418488 -23.500000000 -32.384422302 35.384418488 -24.499996185 -32.384418488 35.384407043 -25.499948502 -32.384407043 35.384307861 -26.499622345 -32.384300232 35.383720398 -27.498052597 -32.383811951 35.381023407 -28.492321014 -32.382316589 35.371486664 -29.476007462 -32.379222870 35.344280243 -30.438953400 -32.375358582 35.280395508 -31.371026993 -32.371025085 35.154674530 -32.254272461 -32.334243774 34.954723358 -32.840930939 -32.035346985 34.797355652 --33.903373718 -31.336603165 34.879734039 --33.216838837 -31.405673981 35.112346649 --32.375358582 -31.438955307 35.280395508 --31.451217651 -31.451221466 35.380989075 --30.483785629 -31.456085205 35.430477142 --29.495611191 -31.458078384 35.450424194 --28.499073029 -31.458770752 35.456939697 --27.499858856 -31.458948135 35.458606720 --26.499988556 -31.458978653 35.458930969 --25.500000000 -31.458980560 35.458976746 --24.500000000 -31.458980560 35.458976746 --23.500000000 -31.458980560 35.458976746 --22.500000000 -31.458980560 35.458976746 --21.500000000 -31.458980560 35.458976746 --20.500000000 -31.458980560 35.458976746 --19.500000000 -31.458980560 35.458976746 --18.500000000 -31.458980560 35.458976746 --17.500000000 -31.458980560 35.458976746 --16.500000000 -31.458980560 35.458976746 --15.500000000 -31.458980560 35.458976746 --14.500000000 -31.458980560 35.458976746 --13.500000000 -31.458980560 35.458976746 --12.500000000 -31.458980560 35.458976746 --11.500000000 -31.458980560 35.458976746 --10.500000000 -31.458980560 35.458976746 --9.500000000 -31.458980560 35.458976746 --8.500000000 -31.458980560 35.458976746 --7.500000000 -31.458980560 35.458976746 --6.500000000 -31.458980560 35.458976746 --5.500000000 -31.458980560 35.458976746 --4.500000000 -31.458980560 35.458976746 --3.500000000 -31.458980560 35.458976746 --2.500000000 -31.458980560 35.458976746 --1.500000000 -31.458980560 35.458976746 --0.500000000 -31.458980560 35.458976746 -0.500000000 -31.458980560 35.458976746 -1.500000000 -31.458980560 35.458976746 -2.500000000 -31.458980560 35.458976746 -3.500000000 -31.458980560 35.458976746 -4.500000000 -31.458980560 35.458976746 -5.500000000 -31.458980560 35.458976746 -6.500000000 -31.458980560 35.458976746 -7.500000000 -31.458980560 35.458976746 -8.500000000 -31.458980560 35.458976746 -9.500000000 -31.458980560 35.458976746 -10.500000000 -31.458980560 35.458976746 -11.500000000 -31.458980560 35.458976746 -12.500000000 -31.458980560 35.458976746 -13.500000000 -31.458980560 35.458976746 -14.500000000 -31.458980560 35.458976746 -15.500000000 -31.458980560 35.458976746 -16.500000000 -31.458980560 35.458976746 -17.500000000 -31.458980560 35.458976746 -18.500000000 -31.458980560 35.458976746 -19.500000000 -31.458980560 35.458976746 -20.500000000 -31.458980560 35.458976746 -21.500000000 -31.458980560 35.458976746 -22.500000000 -31.458980560 35.458976746 -23.500000000 -31.458980560 35.458976746 -24.500000000 -31.458980560 35.458976746 -25.499988556 -31.458978653 35.458930969 -26.499858856 -31.458948135 35.458606720 -27.499073029 -31.458770752 35.456935883 -28.495611191 -31.458078384 35.450424194 -29.483785629 -31.456085205 35.430473328 -30.451217651 -31.451217651 35.380989075 -31.375356674 -31.438953400 35.280395508 -32.216842651 -31.405675888 35.112346649 -32.903373718 -31.336603165 34.879734039 --33.946807861 -30.442844391 34.937091827 --33.220714569 -30.460596085 35.185966492 --32.379222870 -30.476007462 35.344284058 --31.456081390 -30.483789444 35.430477142 --30.486965179 -30.486968994 35.469234467 --29.496957779 -30.488048553 35.483337402 --28.499475479 -30.488346100 35.487373352 --27.499938965 -30.488399506 35.488258362 --26.499996185 -30.488403320 35.488391876 --25.500000000 -30.488403320 35.488403320 --24.500000000 -30.488403320 35.488403320 --23.500000000 -30.488403320 35.488403320 --22.500000000 -30.488403320 35.488403320 --21.500000000 -30.488403320 35.488403320 --20.500000000 -30.488403320 35.488403320 --19.500000000 -30.488403320 35.488403320 --18.500000000 -30.488403320 35.488403320 --17.500000000 -30.488403320 35.488403320 --16.500000000 -30.488403320 35.488403320 --15.500000000 -30.488403320 35.488403320 --14.500000000 -30.488403320 35.488403320 --13.500000000 -30.488403320 35.488403320 --12.500000000 -30.488403320 35.488403320 --11.500000000 -30.488403320 35.488403320 --10.500000000 -30.488403320 35.488403320 --9.500000000 -30.488403320 35.488403320 --8.500000000 -30.488403320 35.488403320 --7.500000000 -30.488403320 35.488403320 --6.500000000 -30.488403320 35.488403320 --5.500000000 -30.488403320 35.488403320 --4.500000000 -30.488403320 35.488403320 --3.500000000 -30.488403320 35.488403320 --2.500000000 -30.488403320 35.488403320 --1.500000000 -30.488403320 35.488403320 --0.500000000 -30.488403320 35.488403320 -0.500000000 -30.488403320 35.488403320 -1.500000000 -30.488403320 35.488403320 -2.500000000 -30.488403320 35.488403320 -3.500000000 -30.488403320 35.488403320 -4.500000000 -30.488403320 35.488403320 -5.500000000 -30.488403320 35.488403320 -6.500000000 -30.488403320 35.488403320 -7.500000000 -30.488403320 35.488403320 -8.500000000 -30.488403320 35.488403320 -9.500000000 -30.488403320 35.488403320 -10.500000000 -30.488403320 35.488403320 -11.500000000 -30.488403320 35.488403320 -12.500000000 -30.488403320 35.488403320 -13.500000000 -30.488403320 35.488403320 -14.500000000 -30.488403320 35.488403320 -15.500000000 -30.488403320 35.488403320 -16.500000000 -30.488403320 35.488403320 -17.500000000 -30.488403320 35.488403320 -18.500000000 -30.488403320 35.488403320 -19.500000000 -30.488403320 35.488403320 -20.500000000 -30.488403320 35.488403320 -21.500000000 -30.488403320 35.488403320 -22.500000000 -30.488403320 35.488403320 -23.500000000 -30.488403320 35.488403320 -24.500000000 -30.488403320 35.488403320 -25.499996185 -30.488403320 35.488391876 -26.499938965 -30.488399506 35.488258362 -27.499475479 -30.488346100 35.487373352 -28.496959686 -30.488048553 35.483337402 -29.486968994 -30.486968994 35.469234467 -30.456085205 -30.483785629 35.430473328 -31.379222870 -30.476007462 35.344284058 -32.220714569 -30.460596085 35.185966492 -32.946807861 -30.442840576 34.937091827 --33.968864441 -29.481937408 34.965763092 --33.227874756 -29.486719131 35.217510223 --32.382308960 -29.492319107 35.371490479 --31.458078384 -29.495611191 35.450428009 --30.488048553 -29.496957779 35.483337402 --29.497375488 -29.497371674 35.494174957 --28.499578476 -29.497461319 35.496910095 --27.499954224 -29.497470856 35.497409821 --26.499996185 -29.497470856 35.497467041 --25.500000000 -29.497470856 35.497474670 --24.500000000 -29.497470856 35.497474670 --23.500000000 -29.497470856 35.497474670 --22.500000000 -29.497470856 35.497474670 --21.500000000 -29.497470856 35.497474670 --20.500000000 -29.497470856 35.497474670 --19.500000000 -29.497470856 35.497474670 --18.500000000 -29.497470856 35.497474670 --17.500000000 -29.497470856 35.497474670 --16.500000000 -29.497470856 35.497474670 --15.500000000 -29.497470856 35.497474670 --14.500000000 -29.497470856 35.497474670 --13.500000000 -29.497470856 35.497474670 --12.500000000 -29.497470856 35.497474670 --11.500000000 -29.497470856 35.497474670 --10.500000000 -29.497470856 35.497474670 --9.500000000 -29.497470856 35.497474670 --8.500000000 -29.497470856 35.497474670 --7.500000000 -29.497470856 35.497474670 --6.500000000 -29.497470856 35.497474670 --5.500000000 -29.497470856 35.497474670 --4.500000000 -29.497470856 35.497474670 --3.500000000 -29.497470856 35.497474670 --2.500000000 -29.497470856 35.497474670 --1.500000000 -29.497470856 35.497474670 --0.500000000 -29.497470856 35.497474670 -0.500000000 -29.497470856 35.497474670 -1.500000000 -29.497470856 35.497474670 -2.500000000 -29.497470856 35.497474670 -3.500000000 -29.497470856 35.497474670 -4.500000000 -29.497470856 35.497474670 -5.500000000 -29.497470856 35.497474670 -6.500000000 -29.497470856 35.497474670 -7.500000000 -29.497470856 35.497474670 -8.500000000 -29.497470856 35.497474670 -9.500000000 -29.497470856 35.497474670 -10.500000000 -29.497470856 35.497474670 -11.500000000 -29.497470856 35.497474670 -12.500000000 -29.497470856 35.497474670 -13.500000000 -29.497470856 35.497474670 -14.500000000 -29.497470856 35.497474670 -15.500000000 -29.497470856 35.497474670 -16.500000000 -29.497470856 35.497474670 -17.500000000 -29.497470856 35.497474670 -18.500000000 -29.497470856 35.497474670 -19.500000000 -29.497470856 35.497474670 -20.500000000 -29.497470856 35.497474670 -21.500000000 -29.497470856 35.497474670 -22.500000000 -29.497470856 35.497474670 -23.500000000 -29.497470856 35.497474670 -24.500000000 -29.497470856 35.497474670 -25.499996185 -29.497470856 35.497470856 -26.499954224 -29.497470856 35.497406006 -27.499576569 -29.497461319 35.496910095 -28.497375488 -29.497375488 35.494174957 -29.488048553 -29.496957779 35.483337402 -30.458078384 -29.495611191 35.450428009 -31.382312775 -29.492321014 35.371490479 -32.227874756 -29.486719131 35.217510223 -32.968864441 -29.481937408 34.965763092 --33.977611542 -28.495172501 34.976860046 --33.231597900 -28.496379852 35.228954315 --32.383808136 -28.498052597 35.381023407 --31.458766937 -28.499073029 35.456939697 --30.488346100 -28.499475479 35.487373352 --29.497461319 -28.499576569 35.496910095 --28.499593735 -28.499591827 35.499164581 --27.499954224 -28.499591827 35.499546051 --26.499996185 -28.499591827 35.499588013 --25.500000000 -28.499591827 35.499591827 --24.500000000 -28.499591827 35.499591827 --23.500000000 -28.499591827 35.499591827 --22.500000000 -28.499591827 35.499591827 --21.500000000 -28.499591827 35.499591827 --20.500000000 -28.499591827 35.499591827 --19.500000000 -28.499591827 35.499591827 --18.500000000 -28.499591827 35.499591827 --17.500000000 -28.499591827 35.499591827 --16.500000000 -28.499591827 35.499591827 --15.500000000 -28.499591827 35.499591827 --14.500000000 -28.499591827 35.499591827 --13.500000000 -28.499591827 35.499591827 --12.500000000 -28.499591827 35.499591827 --11.500000000 -28.499591827 35.499591827 --10.500000000 -28.499591827 35.499591827 --9.500000000 -28.499591827 35.499591827 --8.500000000 -28.499591827 35.499591827 --7.500000000 -28.499591827 35.499591827 --6.500000000 -28.499591827 35.499591827 --5.500000000 -28.499591827 35.499591827 --4.500000000 -28.499591827 35.499591827 --3.500000000 -28.499591827 35.499591827 --2.500000000 -28.499591827 35.499591827 --1.500000000 -28.499591827 35.499591827 --0.500000000 -28.499591827 35.499591827 -0.500000000 -28.499591827 35.499591827 -1.500000000 -28.499591827 35.499591827 -2.500000000 -28.499591827 35.499591827 -3.500000000 -28.499591827 35.499591827 -4.500000000 -28.499591827 35.499591827 -5.500000000 -28.499591827 35.499591827 -6.500000000 -28.499591827 35.499591827 -7.500000000 -28.499591827 35.499591827 -8.500000000 -28.499591827 35.499591827 -9.500000000 -28.499591827 35.499591827 -10.500000000 -28.499591827 35.499591827 -11.500000000 -28.499591827 35.499591827 -12.500000000 -28.499591827 35.499591827 -13.500000000 -28.499591827 35.499591827 -14.500000000 -28.499591827 35.499591827 -15.500000000 -28.499591827 35.499591827 -16.500000000 -28.499591827 35.499591827 -17.500000000 -28.499591827 35.499591827 -18.500000000 -28.499591827 35.499591827 -19.500000000 -28.499591827 35.499591827 -20.500000000 -28.499591827 35.499591827 -21.500000000 -28.499591827 35.499591827 -22.500000000 -28.499591827 35.499591827 -23.500000000 -28.499591827 35.499591827 -24.500000000 -28.499591827 35.499591827 -25.499996185 -28.499591827 35.499588013 -26.499954224 -28.499591827 35.499542236 -27.499591827 -28.499591827 35.499160767 -28.497461319 -28.499576569 35.496910095 -29.488346100 -28.499475479 35.487377167 -30.458766937 -28.499073029 35.456939697 -31.383810043 -28.498052597 35.381023407 -32.231597900 -28.496377945 35.228954315 -32.977611542 -28.495172501 34.976860046 --33.980331421 -27.498952866 34.980201721 --33.232864380 -27.499225616 35.232307434 --32.384296417 -27.499622345 35.383720398 --31.458948135 -27.499858856 35.458606720 --30.488399506 -27.499938965 35.488258362 --29.497472763 -27.499954224 35.497406006 --28.499593735 -27.499954224 35.499549866 --27.499954224 -27.499954224 35.499908447 --26.499996185 -27.499954224 35.499950409 --25.500000000 -27.499954224 35.499954224 --24.500000000 -27.499954224 35.499954224 --23.500000000 -27.499954224 35.499954224 --22.500000000 -27.499954224 35.499954224 --21.500000000 -27.499954224 35.499954224 --20.500000000 -27.499954224 35.499954224 --19.500000000 -27.499954224 35.499954224 --18.500000000 -27.499954224 35.499954224 --17.500000000 -27.499954224 35.499954224 --16.500000000 -27.499954224 35.499954224 --15.500000000 -27.499954224 35.499954224 --14.500000000 -27.499954224 35.499954224 --13.500000000 -27.499954224 35.499954224 --12.500000000 -27.499954224 35.499954224 --11.500000000 -27.499954224 35.499954224 --10.500000000 -27.499954224 35.499954224 --9.500000000 -27.499954224 35.499954224 --8.500000000 -27.499954224 35.499954224 --7.500000000 -27.499954224 35.499954224 --6.500000000 -27.499954224 35.499954224 --5.500000000 -27.499954224 35.499954224 --4.500000000 -27.499954224 35.499954224 --3.500000000 -27.499954224 35.499954224 --2.500000000 -27.499954224 35.499954224 --1.500000000 -27.499954224 35.499954224 --0.500000000 -27.499954224 35.499954224 -0.500000000 -27.499954224 35.499954224 -1.500000000 -27.499954224 35.499954224 -2.500000000 -27.499954224 35.499954224 -3.500000000 -27.499954224 35.499954224 -4.500000000 -27.499954224 35.499954224 -5.500000000 -27.499954224 35.499954224 -6.500000000 -27.499954224 35.499954224 -7.500000000 -27.499954224 35.499954224 -8.500000000 -27.499954224 35.499954224 -9.500000000 -27.499954224 35.499954224 -10.500000000 -27.499954224 35.499954224 -11.500000000 -27.499954224 35.499954224 -12.500000000 -27.499954224 35.499954224 -13.500000000 -27.499954224 35.499954224 -14.500000000 -27.499954224 35.499954224 -15.500000000 -27.499954224 35.499954224 -16.500000000 -27.499954224 35.499954224 -17.500000000 -27.499954224 35.499954224 -18.500000000 -27.499954224 35.499954224 -19.500000000 -27.499954224 35.499954224 -20.500000000 -27.499954224 35.499954224 -21.500000000 -27.499954224 35.499954224 -22.500000000 -27.499954224 35.499954224 -23.500000000 -27.499954224 35.499954224 -24.500000000 -27.499954224 35.499954224 -25.499996185 -27.499954224 35.499950409 -26.499954224 -27.499954224 35.499908447 -27.499591827 -27.499954224 35.499546051 -28.497470856 -27.499954224 35.497413635 -29.488399506 -27.499938965 35.488258362 -30.458948135 -27.499858856 35.458606720 -31.384298325 -27.499618530 35.383720398 -32.232860565 -27.499225616 35.232307434 -32.980335236 -27.498952866 34.980201721 --33.980972290 -26.499826431 34.980957031 --33.233165741 -26.499874115 35.233070374 --32.384403229 -26.499948502 35.384307861 --31.458978653 -26.499988556 35.458930969 --30.488407135 -26.499996185 35.488384247 --29.497472763 -26.499996185 35.497467041 --28.499593735 -26.499996185 35.499591827 --27.499954224 -26.499996185 35.499950409 --26.499996185 -26.499996185 35.499992371 --25.500000000 -26.499996185 35.499996185 --24.500000000 -26.499996185 35.499996185 --23.500000000 -26.499996185 35.499996185 --22.500000000 -26.499996185 35.499996185 --21.500000000 -26.499996185 35.499996185 --20.500000000 -26.499996185 35.499996185 --19.500000000 -26.499996185 35.499996185 --18.500000000 -26.499996185 35.499996185 --17.500000000 -26.499996185 35.499996185 --16.500000000 -26.499996185 35.499996185 --15.500000000 -26.499996185 35.499996185 --14.500000000 -26.499996185 35.499996185 --13.500000000 -26.499996185 35.499996185 --12.500000000 -26.499996185 35.499996185 --11.500000000 -26.499996185 35.499996185 --10.500000000 -26.499996185 35.499996185 --9.500000000 -26.499996185 35.499996185 --8.500000000 -26.499996185 35.499996185 --7.500000000 -26.499996185 35.499996185 --6.500000000 -26.499996185 35.499996185 --5.500000000 -26.499996185 35.499996185 --4.500000000 -26.499996185 35.499996185 --3.500000000 -26.499996185 35.499996185 --2.500000000 -26.499996185 35.499996185 --1.500000000 -26.499996185 35.499996185 --0.500000000 -26.499996185 35.499996185 -0.500000000 -26.499996185 35.499996185 -1.500000000 -26.499996185 35.499996185 -2.500000000 -26.499996185 35.499996185 -3.500000000 -26.499996185 35.499996185 -4.500000000 -26.499996185 35.499996185 -5.500000000 -26.499996185 35.499996185 -6.500000000 -26.499996185 35.499996185 -7.500000000 -26.499996185 35.499996185 -8.500000000 -26.499996185 35.499996185 -9.500000000 -26.499996185 35.499996185 -10.500000000 -26.499996185 35.499996185 -11.500000000 -26.499996185 35.499996185 -12.500000000 -26.499996185 35.499996185 -13.500000000 -26.499996185 35.499996185 -14.500000000 -26.499996185 35.499996185 -15.500000000 -26.499996185 35.499996185 -16.500000000 -26.499996185 35.499996185 -17.500000000 -26.499996185 35.499996185 -18.500000000 -26.499996185 35.499996185 -19.500000000 -26.499996185 35.499996185 -20.500000000 -26.499996185 35.499996185 -21.500000000 -26.499996185 35.499996185 -22.500000000 -26.499996185 35.499996185 -23.500000000 -26.499996185 35.499996185 -24.500000000 -26.499996185 35.499996185 -25.499996185 -26.499996185 35.499992371 -26.499954224 -26.499996185 35.499950409 -27.499591827 -26.499996185 35.499591827 -28.497470856 -26.499996185 35.497467041 -29.488407135 -26.499996185 35.488391876 -30.458978653 -26.499988556 35.458934784 -31.384403229 -26.499948502 35.384307861 -32.233165741 -26.499872208 35.233070374 -32.980972290 -26.499826431 34.980957031 --33.981090546 -25.499979019 34.981086731 --33.233222961 -25.499984741 35.233203888 --32.384422302 -25.499996185 35.384407043 --31.458978653 -25.500000000 35.458976746 --30.488407135 -25.500000000 35.488403320 --29.497472763 -25.500000000 35.497474670 --28.499593735 -25.500000000 35.499591827 --27.499954224 -25.500000000 35.499954224 --26.499996185 -25.500000000 35.499996185 --25.500000000 -25.500000000 35.500000000 --24.500000000 -25.500000000 35.500000000 --23.500000000 -25.500000000 35.500000000 --22.500000000 -25.500000000 35.500000000 --21.500000000 -25.500000000 35.500000000 --20.500000000 -25.500000000 35.500000000 --19.500000000 -25.500000000 35.500000000 --18.500000000 -25.500000000 35.500000000 --17.500000000 -25.500000000 35.500000000 --16.500000000 -25.500000000 35.500000000 --15.500000000 -25.500000000 35.500000000 --14.500000000 -25.500000000 35.500000000 --13.500000000 -25.500000000 35.500000000 --12.500000000 -25.500000000 35.500000000 --11.500000000 -25.500000000 35.500000000 --10.500000000 -25.500000000 35.500000000 --9.500000000 -25.500000000 35.500000000 --8.500000000 -25.500000000 35.500000000 --7.500000000 -25.500000000 35.500000000 --6.500000000 -25.500000000 35.500000000 --5.500000000 -25.500000000 35.500000000 --4.500000000 -25.500000000 35.500000000 --3.500000000 -25.500000000 35.500000000 --2.500000000 -25.500000000 35.500000000 --1.500000000 -25.500000000 35.500000000 --0.500000000 -25.500000000 35.500000000 -0.500000000 -25.500000000 35.500000000 -1.500000000 -25.500000000 35.500000000 -2.500000000 -25.500000000 35.500000000 -3.500000000 -25.500000000 35.500000000 -4.500000000 -25.500000000 35.500000000 -5.500000000 -25.500000000 35.500000000 -6.500000000 -25.500000000 35.500000000 -7.500000000 -25.500000000 35.500000000 -8.500000000 -25.500000000 35.500000000 -9.500000000 -25.500000000 35.500000000 -10.500000000 -25.500000000 35.500000000 -11.500000000 -25.500000000 35.500000000 -12.500000000 -25.500000000 35.500000000 -13.500000000 -25.500000000 35.500000000 -14.500000000 -25.500000000 35.500000000 -15.500000000 -25.500000000 35.500000000 -16.500000000 -25.500000000 35.500000000 -17.500000000 -25.500000000 35.500000000 -18.500000000 -25.500000000 35.500000000 -19.500000000 -25.500000000 35.500000000 -20.500000000 -25.500000000 35.500000000 -21.500000000 -25.500000000 35.500000000 -22.500000000 -25.500000000 35.500000000 -23.500000000 -25.500000000 35.500000000 -24.500000000 -25.500000000 35.500000000 -25.499996185 -25.500000000 35.499996185 -26.499954224 -25.500000000 35.499954224 -27.499591827 -25.500000000 35.499591827 -28.497470856 -25.500000000 35.497467041 -29.488407135 -25.500000000 35.488403320 -30.458978653 -25.500000000 35.458976746 -31.384418488 -25.499996185 35.384407043 -32.233215332 -25.499986649 35.233203888 -32.981086731 -25.499977112 34.981086731 --33.981101990 -24.499998093 34.981101990 --33.233226776 -24.500000000 35.233222961 --32.384422302 -24.500000000 35.384418488 --31.458978653 -24.500000000 35.458980560 --30.488407135 -24.500000000 35.488403320 --29.497472763 -24.500000000 35.497474670 --28.499593735 -24.500000000 35.499591827 --27.499954224 -24.500000000 35.499954224 --26.499996185 -24.500000000 35.499996185 --25.500000000 -24.500000000 35.500000000 --24.500000000 -24.500000000 35.500000000 --23.500000000 -24.500000000 35.500000000 --22.500000000 -24.500000000 35.500000000 --21.500000000 -24.500000000 35.500000000 --20.500000000 -24.500000000 35.500000000 --19.500000000 -24.500000000 35.500000000 --18.500000000 -24.500000000 35.500000000 --17.500000000 -24.500000000 35.500000000 --16.500000000 -24.500000000 35.500000000 --15.500000000 -24.500000000 35.500000000 --14.500000000 -24.500000000 35.500000000 --13.500000000 -24.500000000 35.500000000 --12.500000000 -24.500000000 35.500000000 --11.500000000 -24.500000000 35.500000000 --10.500000000 -24.500000000 35.500000000 --9.500000000 -24.500000000 35.500000000 --8.500000000 -24.500000000 35.500000000 --7.500000000 -24.500000000 35.500000000 --6.500000000 -24.500000000 35.500000000 --5.500000000 -24.500000000 35.500000000 --4.500000000 -24.500000000 35.500000000 --3.500000000 -24.500000000 35.500000000 --2.500000000 -24.500000000 35.500000000 --1.500000000 -24.500000000 35.500000000 --0.500000000 -24.500000000 35.500000000 -0.500000000 -24.500000000 35.500000000 -1.500000000 -24.500000000 35.500000000 -2.500000000 -24.500000000 35.500000000 -3.500000000 -24.500000000 35.500000000 -4.500000000 -24.500000000 35.500000000 -5.500000000 -24.500000000 35.500000000 -6.500000000 -24.500000000 35.500000000 -7.500000000 -24.500000000 35.500000000 -8.500000000 -24.500000000 35.500000000 -9.500000000 -24.500000000 35.500000000 -10.500000000 -24.500000000 35.500000000 -11.500000000 -24.500000000 35.500000000 -12.500000000 -24.500000000 35.500000000 -13.500000000 -24.500000000 35.500000000 -14.500000000 -24.500000000 35.500000000 -15.500000000 -24.500000000 35.500000000 -16.500000000 -24.500000000 35.500000000 -17.500000000 -24.500000000 35.500000000 -18.500000000 -24.500000000 35.500000000 -19.500000000 -24.500000000 35.500000000 -20.500000000 -24.500000000 35.500000000 -21.500000000 -24.500000000 35.500000000 -22.500000000 -24.500000000 35.500000000 -23.500000000 -24.500000000 35.500000000 -24.500000000 -24.500000000 35.500000000 -25.499996185 -24.500000000 35.499996185 -26.499954224 -24.500000000 35.499954224 -27.499591827 -24.500000000 35.499591827 -28.497470856 -24.500000000 35.497467041 -29.488407135 -24.500000000 35.488403320 -30.458978653 -24.500000000 35.458980560 -31.384418488 -24.500000000 35.384418488 -32.233222961 -24.499998093 35.233222961 -32.981101990 -24.499998093 34.981101990 --33.981101990 -23.500000000 34.981101990 --33.233226776 -23.500000000 35.233222961 --32.384422302 -23.500000000 35.384418488 --31.458978653 -23.500000000 35.458980560 --30.488407135 -23.500000000 35.488403320 --29.497472763 -23.500000000 35.497474670 --28.499593735 -23.500000000 35.499591827 --27.499954224 -23.500000000 35.499954224 --26.499996185 -23.500000000 35.499996185 --25.500000000 -23.500000000 35.500000000 --24.500000000 -23.500000000 35.500000000 --23.500000000 -23.500000000 35.500000000 --22.500000000 -23.500000000 35.500000000 --21.500000000 -23.500000000 35.500000000 --20.500000000 -23.500000000 35.500000000 --19.500000000 -23.500000000 35.500000000 --18.500000000 -23.500000000 35.500000000 --17.500000000 -23.500000000 35.500000000 --16.500000000 -23.500000000 35.500000000 --15.500000000 -23.500000000 35.500000000 --14.500000000 -23.500000000 35.500000000 --13.500000000 -23.500000000 35.500000000 --12.500000000 -23.500000000 35.500000000 --11.500000000 -23.500000000 35.500000000 --10.500000000 -23.500000000 35.500000000 --9.500000000 -23.500000000 35.500000000 --8.500000000 -23.500000000 35.500000000 --7.500000000 -23.500000000 35.500000000 --6.500000000 -23.500000000 35.500000000 --5.500000000 -23.500000000 35.500000000 --4.500000000 -23.500000000 35.500000000 --3.500000000 -23.500000000 35.500000000 --2.500000000 -23.500000000 35.500000000 --1.500000000 -23.500000000 35.500000000 --0.500000000 -23.500000000 35.500000000 -0.500000000 -23.500000000 35.500000000 -1.500000000 -23.500000000 35.500000000 -2.500000000 -23.500000000 35.500000000 -3.500000000 -23.500000000 35.500000000 -4.500000000 -23.500000000 35.500000000 -5.500000000 -23.500000000 35.500000000 -6.500000000 -23.500000000 35.500000000 -7.500000000 -23.500000000 35.500000000 -8.500000000 -23.500000000 35.500000000 -9.500000000 -23.500000000 35.500000000 -10.500000000 -23.500000000 35.500000000 -11.500000000 -23.500000000 35.500000000 -12.500000000 -23.500000000 35.500000000 -13.500000000 -23.500000000 35.500000000 -14.500000000 -23.500000000 35.500000000 -15.500000000 -23.500000000 35.500000000 -16.500000000 -23.500000000 35.500000000 -17.500000000 -23.500000000 35.500000000 -18.500000000 -23.500000000 35.500000000 -19.500000000 -23.500000000 35.500000000 -20.500000000 -23.500000000 35.500000000 -21.500000000 -23.500000000 35.500000000 -22.500000000 -23.500000000 35.500000000 -23.500000000 -23.500000000 35.500000000 -24.500000000 -23.500000000 35.500000000 -25.499996185 -23.500000000 35.499996185 -26.499954224 -23.500000000 35.499954224 -27.499591827 -23.500000000 35.499591827 -28.497470856 -23.500000000 35.497467041 -29.488407135 -23.500000000 35.488403320 -30.458978653 -23.500000000 35.458980560 -31.384418488 -23.500000000 35.384422302 -32.233222961 -23.500000000 35.233222961 -32.981101990 -23.500000000 34.981101990 --33.981101990 -22.500000000 34.981101990 --33.233226776 -22.500000000 35.233222961 --32.384422302 -22.500000000 35.384418488 --31.458978653 -22.500000000 35.458980560 --30.488407135 -22.500000000 35.488403320 --29.497472763 -22.500000000 35.497474670 --28.499593735 -22.500000000 35.499591827 --27.499954224 -22.500000000 35.499954224 --26.499996185 -22.500000000 35.499996185 --25.500000000 -22.500000000 35.500000000 --24.500000000 -22.500000000 35.500000000 --23.500000000 -22.500000000 35.500000000 --22.500000000 -22.500000000 35.500000000 --21.500000000 -22.500000000 35.500000000 --20.500000000 -22.500000000 35.500000000 --19.500000000 -22.500000000 35.500000000 --18.500000000 -22.500000000 35.500000000 --17.500000000 -22.500000000 35.500000000 --16.500000000 -22.500000000 35.500000000 --15.500000000 -22.500000000 35.500000000 --14.500000000 -22.500000000 35.500000000 --13.500000000 -22.500000000 35.500000000 --12.500000000 -22.500000000 35.500000000 --11.500000000 -22.500000000 35.500000000 --10.500000000 -22.500000000 35.500000000 --9.500000000 -22.500000000 35.500000000 --8.500000000 -22.500000000 35.500000000 --7.500000000 -22.500000000 35.500000000 --6.500000000 -22.500000000 35.500000000 --5.500000000 -22.500000000 35.500000000 --4.500000000 -22.500000000 35.500000000 --3.500000000 -22.500000000 35.500000000 --2.500000000 -22.500000000 35.500000000 --1.500000000 -22.500000000 35.500000000 --0.500000000 -22.500000000 35.500000000 -0.500000000 -22.500000000 35.500000000 -1.500000000 -22.500000000 35.500000000 -2.500000000 -22.500000000 35.500000000 -3.500000000 -22.500000000 35.500000000 -4.500000000 -22.500000000 35.500000000 -5.500000000 -22.500000000 35.500000000 -6.500000000 -22.500000000 35.500000000 -7.500000000 -22.500000000 35.500000000 -8.500000000 -22.500000000 35.500000000 -9.500000000 -22.500000000 35.500000000 -10.500000000 -22.500000000 35.500000000 -11.500000000 -22.500000000 35.500000000 -12.500000000 -22.500000000 35.500000000 -13.500000000 -22.500000000 35.500000000 -14.500000000 -22.500000000 35.500000000 -15.500000000 -22.500000000 35.500000000 -16.500000000 -22.500000000 35.500000000 -17.500000000 -22.500000000 35.500000000 -18.500000000 -22.500000000 35.500000000 -19.500000000 -22.500000000 35.500000000 -20.500000000 -22.500000000 35.500000000 -21.500000000 -22.500000000 35.500000000 -22.500000000 -22.500000000 35.500000000 -23.500000000 -22.500000000 35.500000000 -24.500000000 -22.500000000 35.500000000 -25.499996185 -22.500000000 35.499996185 -26.499954224 -22.500000000 35.499954224 -27.499591827 -22.500000000 35.499591827 -28.497470856 -22.500000000 35.497467041 -29.488407135 -22.500000000 35.488403320 -30.458978653 -22.500000000 35.458980560 -31.384418488 -22.500000000 35.384422302 -32.233222961 -22.500000000 35.233222961 -32.981101990 -22.500000000 34.981101990 --33.981101990 -21.500000000 34.981101990 --33.233226776 -21.500000000 35.233222961 --32.384422302 -21.500000000 35.384418488 --31.458978653 -21.500000000 35.458980560 --30.488407135 -21.500000000 35.488403320 --29.497472763 -21.500000000 35.497474670 --28.499593735 -21.500000000 35.499591827 --27.499954224 -21.500000000 35.499954224 --26.499996185 -21.500000000 35.499996185 --25.500000000 -21.500000000 35.500000000 --24.500000000 -21.500000000 35.500000000 --23.500000000 -21.500000000 35.500000000 --22.500000000 -21.500000000 35.500000000 --21.500000000 -21.500000000 35.500000000 --20.500000000 -21.500000000 35.500000000 --19.500000000 -21.500000000 35.500000000 --18.500000000 -21.500000000 35.500000000 --17.500000000 -21.500000000 35.500000000 --16.500000000 -21.500000000 35.500000000 --15.500000000 -21.500000000 35.500000000 --14.500000000 -21.500000000 35.500000000 --13.500000000 -21.500000000 35.500000000 --12.500000000 -21.500000000 35.500000000 --11.500000000 -21.500000000 35.500000000 --10.500000000 -21.500000000 35.500000000 --9.500000000 -21.500000000 35.500000000 --8.500000000 -21.500000000 35.500000000 --7.500000000 -21.500000000 35.500000000 --6.500000000 -21.500000000 35.500000000 --5.500000000 -21.500000000 35.500000000 --4.500000000 -21.500000000 35.500000000 --3.500000000 -21.500000000 35.500000000 --2.500000000 -21.500000000 35.500000000 --1.500000000 -21.500000000 35.500000000 --0.500000000 -21.500000000 35.500000000 -0.500000000 -21.500000000 35.500000000 -1.500000000 -21.500000000 35.500000000 -2.500000000 -21.500000000 35.500000000 -3.500000000 -21.500000000 35.500000000 -4.500000000 -21.500000000 35.500000000 -5.500000000 -21.500000000 35.500000000 -6.500000000 -21.500000000 35.500000000 -7.500000000 -21.500000000 35.500000000 -8.500000000 -21.500000000 35.500000000 -9.500000000 -21.500000000 35.500000000 -10.500000000 -21.500000000 35.500000000 -11.500000000 -21.500000000 35.500000000 -12.500000000 -21.500000000 35.500000000 -13.500000000 -21.500000000 35.500000000 -14.500000000 -21.500000000 35.500000000 -15.500000000 -21.500000000 35.500000000 -16.500000000 -21.500000000 35.500000000 -17.500000000 -21.500000000 35.500000000 -18.500000000 -21.500000000 35.500000000 -19.500000000 -21.500000000 35.500000000 -20.500000000 -21.500000000 35.500000000 -21.500000000 -21.500000000 35.500000000 -22.500000000 -21.500000000 35.500000000 -23.500000000 -21.500000000 35.500000000 -24.500000000 -21.500000000 35.500000000 -25.499996185 -21.500000000 35.499996185 -26.499954224 -21.500000000 35.499954224 -27.499591827 -21.500000000 35.499591827 -28.497470856 -21.500000000 35.497467041 -29.488407135 -21.500000000 35.488403320 -30.458978653 -21.500000000 35.458980560 -31.384418488 -21.500000000 35.384422302 -32.233222961 -21.500000000 35.233222961 -32.981101990 -21.500000000 34.981101990 --33.981101990 -20.500000000 34.981101990 --33.233226776 -20.500000000 35.233222961 --32.384422302 -20.500000000 35.384418488 --31.458978653 -20.500000000 35.458980560 --30.488407135 -20.500000000 35.488403320 --29.497472763 -20.500000000 35.497474670 --28.499593735 -20.500000000 35.499591827 --27.499954224 -20.500000000 35.499954224 --26.499996185 -20.500000000 35.499996185 --25.500000000 -20.500000000 35.500000000 --24.500000000 -20.500000000 35.500000000 --23.500000000 -20.500000000 35.500000000 --22.500000000 -20.500000000 35.500000000 --21.500000000 -20.500000000 35.500000000 --20.500000000 -20.500000000 35.500000000 --19.500000000 -20.500000000 35.500000000 --18.500000000 -20.500000000 35.500000000 --17.500000000 -20.500000000 35.500000000 --16.500000000 -20.500000000 35.500000000 --15.500000000 -20.500000000 35.500000000 --14.500000000 -20.500000000 35.500000000 --13.500000000 -20.500000000 35.500000000 --12.500000000 -20.500000000 35.500000000 --11.500000000 -20.500000000 35.500000000 --10.500000000 -20.500000000 35.500000000 --9.500000000 -20.500000000 35.500000000 --8.500000000 -20.500000000 35.500000000 --7.500000000 -20.500000000 35.500000000 --6.500000000 -20.500000000 35.500000000 --5.500000000 -20.500000000 35.500000000 --4.500000000 -20.500000000 35.500000000 --3.500000000 -20.500000000 35.500000000 --2.500000000 -20.500000000 35.500000000 --1.500000000 -20.500000000 35.500000000 --0.500000000 -20.500000000 35.500000000 -0.500000000 -20.500000000 35.500000000 -1.500000000 -20.500000000 35.500000000 -2.500000000 -20.500000000 35.500000000 -3.500000000 -20.500000000 35.500000000 -4.500000000 -20.500000000 35.500000000 -5.500000000 -20.500000000 35.500000000 -6.500000000 -20.500000000 35.500000000 -7.500000000 -20.500000000 35.500000000 -8.500000000 -20.500000000 35.500000000 -9.500000000 -20.500000000 35.500000000 -10.500000000 -20.500000000 35.500000000 -11.500000000 -20.500000000 35.500000000 -12.500000000 -20.500000000 35.500000000 -13.500000000 -20.500000000 35.500000000 -14.500000000 -20.500000000 35.500000000 -15.500000000 -20.500000000 35.500000000 -16.500000000 -20.500000000 35.500000000 -17.500000000 -20.500000000 35.500000000 -18.500000000 -20.500000000 35.500000000 -19.500000000 -20.500000000 35.500000000 -20.500000000 -20.500000000 35.500000000 -21.500000000 -20.500000000 35.500000000 -22.500000000 -20.500000000 35.500000000 -23.500000000 -20.500000000 35.500000000 -24.500000000 -20.500000000 35.500000000 -25.499996185 -20.500000000 35.499996185 -26.499954224 -20.500000000 35.499954224 -27.499591827 -20.500000000 35.499591827 -28.497470856 -20.500000000 35.497467041 -29.488407135 -20.500000000 35.488403320 -30.458978653 -20.500000000 35.458980560 -31.384418488 -20.500000000 35.384422302 -32.233222961 -20.500000000 35.233222961 -32.981101990 -20.500000000 34.981101990 --33.981101990 -19.500000000 34.981101990 --33.233226776 -19.500000000 35.233222961 --32.384422302 -19.500000000 35.384418488 --31.458978653 -19.500000000 35.458980560 --30.488407135 -19.500000000 35.488403320 --29.497472763 -19.500000000 35.497474670 --28.499593735 -19.500000000 35.499591827 --27.499954224 -19.500000000 35.499954224 --26.499996185 -19.500000000 35.499996185 --25.500000000 -19.500000000 35.500000000 --24.500000000 -19.500000000 35.500000000 --23.500000000 -19.500000000 35.500000000 --22.500000000 -19.500000000 35.500000000 --21.500000000 -19.500000000 35.500000000 --20.500000000 -19.500000000 35.500000000 --19.500000000 -19.500000000 35.500000000 --18.500000000 -19.500000000 35.500000000 --17.500000000 -19.500000000 35.500000000 --16.500000000 -19.500000000 35.500000000 --15.500000000 -19.500000000 35.500000000 --14.500000000 -19.500000000 35.500000000 --13.500000000 -19.500000000 35.500000000 --12.500000000 -19.500000000 35.500000000 --11.500000000 -19.500000000 35.500000000 --10.500000000 -19.500000000 35.500000000 --9.500000000 -19.500000000 35.500000000 --8.500000000 -19.500000000 35.500000000 --7.500000000 -19.500000000 35.500000000 --6.500000000 -19.500000000 35.500000000 --5.500000000 -19.500000000 35.500000000 --4.500000000 -19.500000000 35.500000000 --3.500000000 -19.500000000 35.500000000 --2.500000000 -19.500000000 35.500000000 --1.500000000 -19.500000000 35.500000000 --0.500000000 -19.500000000 35.500000000 -0.500000000 -19.500000000 35.500000000 -1.500000000 -19.500000000 35.500000000 -2.500000000 -19.500000000 35.500000000 -3.500000000 -19.500000000 35.500000000 -4.500000000 -19.500000000 35.500000000 -5.500000000 -19.500000000 35.500000000 -6.500000000 -19.500000000 35.500000000 -7.500000000 -19.500000000 35.500000000 -8.500000000 -19.500000000 35.500000000 -9.500000000 -19.500000000 35.500000000 -10.500000000 -19.500000000 35.500000000 -11.500000000 -19.500000000 35.500000000 -12.500000000 -19.500000000 35.500000000 -13.500000000 -19.500000000 35.500000000 -14.500000000 -19.500000000 35.500000000 -15.500000000 -19.500000000 35.500000000 -16.500000000 -19.500000000 35.500000000 -17.500000000 -19.500000000 35.500000000 -18.500000000 -19.500000000 35.500000000 -19.500000000 -19.500000000 35.500000000 -20.500000000 -19.500000000 35.500000000 -21.500000000 -19.500000000 35.500000000 -22.500000000 -19.500000000 35.500000000 -23.500000000 -19.500000000 35.500000000 -24.500000000 -19.500000000 35.500000000 -25.499996185 -19.500000000 35.499996185 -26.499954224 -19.500000000 35.499954224 -27.499591827 -19.500000000 35.499591827 -28.497470856 -19.500000000 35.497467041 -29.488407135 -19.500000000 35.488403320 -30.458978653 -19.500000000 35.458980560 -31.384418488 -19.500000000 35.384422302 -32.233222961 -19.500000000 35.233222961 -32.981101990 -19.500000000 34.981101990 --33.981101990 -18.500000000 34.981101990 --33.233226776 -18.500000000 35.233222961 --32.384422302 -18.500000000 35.384418488 --31.458978653 -18.500000000 35.458980560 --30.488407135 -18.500000000 35.488403320 --29.497472763 -18.500000000 35.497474670 --28.499593735 -18.500000000 35.499591827 --27.499954224 -18.500000000 35.499954224 --26.499996185 -18.500000000 35.499996185 --25.500000000 -18.500000000 35.500000000 --24.500000000 -18.500000000 35.500000000 --23.500000000 -18.500000000 35.500000000 --22.500000000 -18.500000000 35.500000000 --21.500000000 -18.500000000 35.500000000 --20.500000000 -18.500000000 35.500000000 --19.500000000 -18.500000000 35.500000000 --18.500000000 -18.500000000 35.500000000 --17.500000000 -18.500000000 35.500000000 --16.500000000 -18.500000000 35.500000000 --15.500000000 -18.500000000 35.500000000 --14.500000000 -18.500000000 35.500000000 --13.500000000 -18.500000000 35.500000000 --12.500000000 -18.500000000 35.500000000 --11.500000000 -18.500000000 35.500000000 --10.500000000 -18.500000000 35.500000000 --9.500000000 -18.500000000 35.500000000 --8.500000000 -18.500000000 35.500000000 --7.500000000 -18.500000000 35.500000000 --6.500000000 -18.500000000 35.500000000 --5.500000000 -18.500000000 35.500000000 --4.500000000 -18.500000000 35.500000000 --3.500000000 -18.500000000 35.500000000 --2.500000000 -18.500000000 35.500000000 --1.500000000 -18.500000000 35.500000000 --0.500000000 -18.500000000 35.500000000 -0.500000000 -18.500000000 35.500000000 -1.500000000 -18.500000000 35.500000000 -2.500000000 -18.500000000 35.500000000 -3.500000000 -18.500000000 35.500000000 -4.500000000 -18.500000000 35.500000000 -5.500000000 -18.500000000 35.500000000 -6.500000000 -18.500000000 35.500000000 -7.500000000 -18.500000000 35.500000000 -8.500000000 -18.500000000 35.500000000 -9.500000000 -18.500000000 35.500000000 -10.500000000 -18.500000000 35.500000000 -11.500000000 -18.500000000 35.500000000 -12.500000000 -18.500000000 35.500000000 -13.500000000 -18.500000000 35.500000000 -14.500000000 -18.500000000 35.500000000 -15.500000000 -18.500000000 35.500000000 -16.500000000 -18.500000000 35.500000000 -17.500000000 -18.500000000 35.500000000 -18.500000000 -18.500000000 35.500000000 -19.500000000 -18.500000000 35.500000000 -20.500000000 -18.500000000 35.500000000 -21.500000000 -18.500000000 35.500000000 -22.500000000 -18.500000000 35.500000000 -23.500000000 -18.500000000 35.500000000 -24.500000000 -18.500000000 35.500000000 -25.499996185 -18.500000000 35.499996185 -26.499954224 -18.500000000 35.499954224 -27.499591827 -18.500000000 35.499591827 -28.497470856 -18.500000000 35.497467041 -29.488407135 -18.500000000 35.488403320 -30.458978653 -18.500000000 35.458980560 -31.384418488 -18.500000000 35.384422302 -32.233222961 -18.500000000 35.233222961 -32.981101990 -18.500000000 34.981101990 --33.981101990 -17.500000000 34.981101990 --33.233226776 -17.500000000 35.233222961 --32.384422302 -17.500000000 35.384418488 --31.458978653 -17.500000000 35.458980560 --30.488407135 -17.500000000 35.488403320 --29.497472763 -17.500000000 35.497474670 --28.499593735 -17.500000000 35.499591827 --27.499954224 -17.500000000 35.499954224 --26.499996185 -17.500000000 35.499996185 --25.500000000 -17.500000000 35.500000000 --24.500000000 -17.500000000 35.500000000 --23.500000000 -17.500000000 35.500000000 --22.500000000 -17.500000000 35.500000000 --21.500000000 -17.500000000 35.500000000 --20.500000000 -17.500000000 35.500000000 --19.500000000 -17.500000000 35.500000000 --18.500000000 -17.500000000 35.500000000 --17.500000000 -17.500000000 35.500000000 --16.500000000 -17.500000000 35.500000000 --15.500000000 -17.500000000 35.500000000 --14.500000000 -17.500000000 35.500000000 --13.500000000 -17.500000000 35.500000000 --12.500000000 -17.500000000 35.500000000 --11.500000000 -17.500000000 35.500000000 --10.500000000 -17.500000000 35.500000000 --9.500000000 -17.500000000 35.500000000 --8.500000000 -17.500000000 35.500000000 --7.500000000 -17.500000000 35.500000000 --6.500000000 -17.500000000 35.500000000 --5.500000000 -17.500000000 35.500000000 --4.500000000 -17.500000000 35.500000000 --3.500000000 -17.500000000 35.500000000 --2.500000000 -17.500000000 35.500000000 --1.500000000 -17.500000000 35.500000000 --0.500000000 -17.500000000 35.500000000 -0.500000000 -17.500000000 35.500000000 -1.500000000 -17.500000000 35.500000000 -2.500000000 -17.500000000 35.500000000 -3.500000000 -17.500000000 35.500000000 -4.500000000 -17.500000000 35.500000000 -5.500000000 -17.500000000 35.500000000 -6.500000000 -17.500000000 35.500000000 -7.500000000 -17.500000000 35.500000000 -8.500000000 -17.500000000 35.500000000 -9.500000000 -17.500000000 35.500000000 -10.500000000 -17.500000000 35.500000000 -11.500000000 -17.500000000 35.500000000 -12.500000000 -17.500000000 35.500000000 -13.500000000 -17.500000000 35.500000000 -14.500000000 -17.500000000 35.500000000 -15.500000000 -17.500000000 35.500000000 -16.500000000 -17.500000000 35.500000000 -17.500000000 -17.500000000 35.500000000 -18.500000000 -17.500000000 35.500000000 -19.500000000 -17.500000000 35.500000000 -20.500000000 -17.500000000 35.500000000 -21.500000000 -17.500000000 35.500000000 -22.500000000 -17.500000000 35.500000000 -23.500000000 -17.500000000 35.500000000 -24.500000000 -17.500000000 35.500000000 -25.499996185 -17.500000000 35.499996185 -26.499954224 -17.500000000 35.499954224 -27.499591827 -17.500000000 35.499591827 -28.497470856 -17.500000000 35.497467041 -29.488407135 -17.500000000 35.488403320 -30.458978653 -17.500000000 35.458980560 -31.384418488 -17.500000000 35.384422302 -32.233222961 -17.500000000 35.233222961 -32.981101990 -17.500000000 34.981101990 --33.981101990 -16.500000000 34.981101990 --33.233226776 -16.500000000 35.233222961 --32.384422302 -16.500000000 35.384418488 --31.458978653 -16.500000000 35.458980560 --30.488407135 -16.500000000 35.488403320 --29.497472763 -16.500000000 35.497474670 --28.499593735 -16.500000000 35.499591827 --27.499954224 -16.500000000 35.499954224 --26.499996185 -16.500000000 35.499996185 --25.500000000 -16.500000000 35.500000000 --24.500000000 -16.500000000 35.500000000 --23.500000000 -16.500000000 35.500000000 --22.500000000 -16.500000000 35.500000000 --21.500000000 -16.500000000 35.500000000 --20.500000000 -16.500000000 35.500000000 --19.500000000 -16.500000000 35.500000000 --18.500000000 -16.500000000 35.500000000 --17.500000000 -16.500000000 35.500000000 --16.500000000 -16.500000000 35.500000000 --15.500000000 -16.500000000 35.500000000 --14.500000000 -16.500000000 35.500000000 --13.500000000 -16.500000000 35.500000000 --12.500000000 -16.500000000 35.500000000 --11.500000000 -16.500000000 35.500000000 --10.500000000 -16.500000000 35.500000000 --9.500000000 -16.500000000 35.500000000 --8.500000000 -16.500000000 35.500000000 --7.500000000 -16.500000000 35.500000000 --6.500000000 -16.500000000 35.500000000 --5.500000000 -16.500000000 35.500000000 --4.500000000 -16.500000000 35.500000000 --3.500000000 -16.500000000 35.500000000 --2.500000000 -16.500000000 35.500000000 --1.500000000 -16.500000000 35.500000000 --0.500000000 -16.500000000 35.500000000 -0.500000000 -16.500000000 35.500000000 -1.500000000 -16.500000000 35.500000000 -2.500000000 -16.500000000 35.500000000 -3.500000000 -16.500000000 35.500000000 -4.500000000 -16.500000000 35.500000000 -5.500000000 -16.500000000 35.500000000 -6.500000000 -16.500000000 35.500000000 -7.500000000 -16.500000000 35.500000000 -8.500000000 -16.500000000 35.500000000 -9.500000000 -16.500000000 35.500000000 -10.500000000 -16.500000000 35.500000000 -11.500000000 -16.500000000 35.500000000 -12.500000000 -16.500000000 35.500000000 -13.500000000 -16.500000000 35.500000000 -14.500000000 -16.500000000 35.500000000 -15.500000000 -16.500000000 35.500000000 -16.500000000 -16.500000000 35.500000000 -17.500000000 -16.500000000 35.500000000 -18.500000000 -16.500000000 35.500000000 -19.500000000 -16.500000000 35.500000000 -20.500000000 -16.500000000 35.500000000 -21.500000000 -16.500000000 35.500000000 -22.500000000 -16.500000000 35.500000000 -23.500000000 -16.500000000 35.500000000 -24.500000000 -16.500000000 35.500000000 -25.499996185 -16.500000000 35.499996185 -26.499954224 -16.500000000 35.499954224 -27.499591827 -16.500000000 35.499591827 -28.497470856 -16.500000000 35.497467041 -29.488407135 -16.500000000 35.488403320 -30.458978653 -16.500000000 35.458980560 -31.384418488 -16.500000000 35.384422302 -32.233222961 -16.500000000 35.233222961 -32.981101990 -16.500000000 34.981101990 --33.981101990 -15.500000000 34.981101990 --33.233226776 -15.500000000 35.233222961 --32.384422302 -15.500000000 35.384418488 --31.458978653 -15.500000000 35.458980560 --30.488407135 -15.500000000 35.488403320 --29.497472763 -15.500000000 35.497474670 --28.499593735 -15.500000000 35.499591827 --27.499954224 -15.500000000 35.499954224 --26.499996185 -15.500000000 35.499996185 --25.500000000 -15.500000000 35.500000000 --24.500000000 -15.500000000 35.500000000 --23.500000000 -15.500000000 35.500000000 --22.500000000 -15.500000000 35.500000000 --21.500000000 -15.500000000 35.500000000 --20.500000000 -15.500000000 35.500000000 --19.500000000 -15.500000000 35.500000000 --18.500000000 -15.500000000 35.500000000 --17.500000000 -15.500000000 35.500000000 --16.500000000 -15.500000000 35.500000000 --15.500000000 -15.500000000 35.500000000 --14.500000000 -15.500000000 35.500000000 --13.500000000 -15.500000000 35.500000000 --12.500000000 -15.500000000 35.500000000 --11.500000000 -15.500000000 35.500000000 --10.500000000 -15.500000000 35.500000000 --9.500000000 -15.500000000 35.500000000 --8.500000000 -15.500000000 35.500000000 --7.500000000 -15.500000000 35.500000000 --6.500000000 -15.500000000 35.500000000 --5.500000000 -15.500000000 35.500000000 --4.500000000 -15.500000000 35.500000000 --3.500000000 -15.500000000 35.500000000 --2.500000000 -15.500000000 35.500000000 --1.500000000 -15.500000000 35.500000000 --0.500000000 -15.500000000 35.500000000 -0.500000000 -15.500000000 35.500000000 -1.500000000 -15.500000000 35.500000000 -2.500000000 -15.500000000 35.500000000 -3.500000000 -15.500000000 35.500000000 -4.500000000 -15.500000000 35.500000000 -5.500000000 -15.500000000 35.500000000 -6.500000000 -15.500000000 35.500000000 -7.500000000 -15.500000000 35.500000000 -8.500000000 -15.500000000 35.500000000 -9.500000000 -15.500000000 35.500000000 -10.500000000 -15.500000000 35.500000000 -11.500000000 -15.500000000 35.500000000 -12.500000000 -15.500000000 35.500000000 -13.500000000 -15.500000000 35.500000000 -14.500000000 -15.500000000 35.500000000 -15.500000000 -15.500000000 35.500000000 -16.500000000 -15.500000000 35.500000000 -17.500000000 -15.500000000 35.500000000 -18.500000000 -15.500000000 35.500000000 -19.500000000 -15.500000000 35.500000000 -20.500000000 -15.500000000 35.500000000 -21.500000000 -15.500000000 35.500000000 -22.500000000 -15.500000000 35.500000000 -23.500000000 -15.500000000 35.500000000 -24.500000000 -15.500000000 35.500000000 -25.499996185 -15.500000000 35.499996185 -26.499954224 -15.500000000 35.499954224 -27.499591827 -15.500000000 35.499591827 -28.497470856 -15.500000000 35.497467041 -29.488407135 -15.500000000 35.488403320 -30.458978653 -15.500000000 35.458980560 -31.384418488 -15.500000000 35.384422302 -32.233222961 -15.500000000 35.233222961 -32.981101990 -15.500000000 34.981101990 --33.981101990 -14.500000000 34.981101990 --33.233226776 -14.500000000 35.233222961 --32.384422302 -14.500000000 35.384418488 --31.458978653 -14.500000000 35.458980560 --30.488407135 -14.500000000 35.488403320 --29.497472763 -14.500000000 35.497474670 --28.499593735 -14.500000000 35.499591827 --27.499954224 -14.500000000 35.499954224 --26.499996185 -14.500000000 35.499996185 --25.500000000 -14.500000000 35.500000000 --24.500000000 -14.500000000 35.500000000 --23.500000000 -14.500000000 35.500000000 --22.500000000 -14.500000000 35.500000000 --21.500000000 -14.500000000 35.500000000 --20.500000000 -14.500000000 35.500000000 --19.500000000 -14.500000000 35.500000000 --18.500000000 -14.500000000 35.500000000 --17.500000000 -14.500000000 35.500000000 --16.500000000 -14.500000000 35.500000000 --15.500000000 -14.500000000 35.500000000 --14.500000000 -14.500000000 35.500000000 --13.500000000 -14.500000000 35.500000000 --12.500000000 -14.500000000 35.500000000 --11.500000000 -14.500000000 35.500000000 --10.500000000 -14.500000000 35.500000000 --9.500000000 -14.500000000 35.500000000 --8.500000000 -14.500000000 35.500000000 --7.500000000 -14.500000000 35.500000000 --6.500000000 -14.500000000 35.500000000 --5.500000000 -14.500000000 35.500000000 --4.500000000 -14.500000000 35.500000000 --3.500000000 -14.500000000 35.500000000 --2.500000000 -14.500000000 35.500000000 --1.500000000 -14.500000000 35.500000000 --0.500000000 -14.500000000 35.500000000 -0.500000000 -14.500000000 35.500000000 -1.500000000 -14.500000000 35.500000000 -2.500000000 -14.500000000 35.500000000 -3.500000000 -14.500000000 35.500000000 -4.500000000 -14.500000000 35.500000000 -5.500000000 -14.500000000 35.500000000 -6.500000000 -14.500000000 35.500000000 -7.500000000 -14.500000000 35.500000000 -8.500000000 -14.500000000 35.500000000 -9.500000000 -14.500000000 35.500000000 -10.500000000 -14.500000000 35.500000000 -11.500000000 -14.500000000 35.500000000 -12.500000000 -14.500000000 35.500000000 -13.500000000 -14.500000000 35.500000000 -14.500000000 -14.500000000 35.500000000 -15.500000000 -14.500000000 35.500000000 -16.500000000 -14.500000000 35.500000000 -17.500000000 -14.500000000 35.500000000 -18.500000000 -14.500000000 35.500000000 -19.500000000 -14.500000000 35.500000000 -20.500000000 -14.500000000 35.500000000 -21.500000000 -14.500000000 35.500000000 -22.500000000 -14.500000000 35.500000000 -23.500000000 -14.500000000 35.500000000 -24.500000000 -14.500000000 35.500000000 -25.499996185 -14.500000000 35.499996185 -26.499954224 -14.500000000 35.499954224 -27.499591827 -14.500000000 35.499591827 -28.497470856 -14.500000000 35.497467041 -29.488407135 -14.500000000 35.488403320 -30.458978653 -14.500000000 35.458980560 -31.384418488 -14.500000000 35.384422302 -32.233222961 -14.500000000 35.233222961 -32.981101990 -14.500000000 34.981101990 --33.981101990 -13.500000000 34.981101990 --33.233226776 -13.500000000 35.233222961 --32.384422302 -13.500000000 35.384418488 --31.458978653 -13.500000000 35.458980560 --30.488407135 -13.500000000 35.488403320 --29.497472763 -13.500000000 35.497474670 --28.499593735 -13.500000000 35.499591827 --27.499954224 -13.500000000 35.499954224 --26.499996185 -13.500000000 35.499996185 --25.500000000 -13.500000000 35.500000000 --24.500000000 -13.500000000 35.500000000 --23.500000000 -13.500000000 35.500000000 --22.500000000 -13.500000000 35.500000000 --21.500000000 -13.500000000 35.500000000 --20.500000000 -13.500000000 35.500000000 --19.500000000 -13.500000000 35.500000000 --18.500000000 -13.500000000 35.500000000 --17.500000000 -13.500000000 35.500000000 --16.500000000 -13.500000000 35.500000000 --15.500000000 -13.500000000 35.500000000 --14.500000000 -13.500000000 35.500000000 --13.500000000 -13.500000000 35.500000000 --12.500000000 -13.500000000 35.500000000 --11.500000000 -13.500000000 35.500000000 --10.500000000 -13.500000000 35.500000000 --9.500000000 -13.500000000 35.500000000 --8.500000000 -13.500000000 35.500000000 --7.500000000 -13.500000000 35.500000000 --6.500000000 -13.500000000 35.500000000 --5.500000000 -13.500000000 35.500000000 --4.500000000 -13.500000000 35.500000000 --3.500000000 -13.500000000 35.500000000 --2.500000000 -13.500000000 35.500000000 --1.500000000 -13.500000000 35.500000000 --0.500000000 -13.500000000 35.500000000 -0.500000000 -13.500000000 35.500000000 -1.500000000 -13.500000000 35.500000000 -2.500000000 -13.500000000 35.500000000 -3.500000000 -13.500000000 35.500000000 -4.500000000 -13.500000000 35.500000000 -5.500000000 -13.500000000 35.500000000 -6.500000000 -13.500000000 35.500000000 -7.500000000 -13.500000000 35.500000000 -8.500000000 -13.500000000 35.500000000 -9.500000000 -13.500000000 35.500000000 -10.500000000 -13.500000000 35.500000000 -11.500000000 -13.500000000 35.500000000 -12.500000000 -13.500000000 35.500000000 -13.500000000 -13.500000000 35.500000000 -14.500000000 -13.500000000 35.500000000 -15.500000000 -13.500000000 35.500000000 -16.500000000 -13.500000000 35.500000000 -17.500000000 -13.500000000 35.500000000 -18.500000000 -13.500000000 35.500000000 -19.500000000 -13.500000000 35.500000000 -20.500000000 -13.500000000 35.500000000 -21.500000000 -13.500000000 35.500000000 -22.500000000 -13.500000000 35.500000000 -23.500000000 -13.500000000 35.500000000 -24.500000000 -13.500000000 35.500000000 -25.499996185 -13.500000000 35.499996185 -26.499954224 -13.500000000 35.499954224 -27.499591827 -13.500000000 35.499591827 -28.497470856 -13.500000000 35.497467041 -29.488407135 -13.500000000 35.488403320 -30.458978653 -13.500000000 35.458980560 -31.384418488 -13.500000000 35.384422302 -32.233222961 -13.500000000 35.233222961 -32.981101990 -13.500000000 34.981101990 --33.981101990 -12.500000000 34.981101990 --33.233226776 -12.500000000 35.233222961 --32.384422302 -12.500000000 35.384418488 --31.458978653 -12.500000000 35.458980560 --30.488407135 -12.500000000 35.488403320 --29.497472763 -12.500000000 35.497474670 --28.499593735 -12.500000000 35.499591827 --27.499954224 -12.500000000 35.499954224 --26.499996185 -12.500000000 35.499996185 --25.500000000 -12.500000000 35.500000000 --24.500000000 -12.500000000 35.500000000 --23.500000000 -12.500000000 35.500000000 --22.500000000 -12.500000000 35.500000000 --21.500000000 -12.500000000 35.500000000 --20.500000000 -12.500000000 35.500000000 --19.500000000 -12.500000000 35.500000000 --18.500000000 -12.500000000 35.500000000 --17.500000000 -12.500000000 35.500000000 --16.500000000 -12.500000000 35.500000000 --15.500000000 -12.500000000 35.500000000 --14.500000000 -12.500000000 35.500000000 --13.500000000 -12.500000000 35.500000000 --12.500000000 -12.500000000 35.500000000 --11.500000000 -12.500000000 35.500000000 --10.500000000 -12.500000000 35.500000000 --9.500000000 -12.500000000 35.500000000 --8.500000000 -12.500000000 35.500000000 --7.500000000 -12.500000000 35.500000000 --6.500000000 -12.500000000 35.500000000 --5.500000000 -12.500000000 35.500000000 --4.500000000 -12.500000000 35.500000000 --3.500000000 -12.500000000 35.500000000 --2.500000000 -12.500000000 35.500000000 --1.500000000 -12.500000000 35.500000000 --0.500000000 -12.500000000 35.500000000 -0.500000000 -12.500000000 35.500000000 -1.500000000 -12.500000000 35.500000000 -2.500000000 -12.500000000 35.500000000 -3.500000000 -12.500000000 35.500000000 -4.500000000 -12.500000000 35.500000000 -5.500000000 -12.500000000 35.500000000 -6.500000000 -12.500000000 35.500000000 -7.500000000 -12.500000000 35.500000000 -8.500000000 -12.500000000 35.500000000 -9.500000000 -12.500000000 35.500000000 -10.500000000 -12.500000000 35.500000000 -11.500000000 -12.500000000 35.500000000 -12.500000000 -12.500000000 35.500000000 -13.500000000 -12.500000000 35.500000000 -14.500000000 -12.500000000 35.500000000 -15.500000000 -12.500000000 35.500000000 -16.500000000 -12.500000000 35.500000000 -17.500000000 -12.500000000 35.500000000 -18.500000000 -12.500000000 35.500000000 -19.500000000 -12.500000000 35.500000000 -20.500000000 -12.500000000 35.500000000 -21.500000000 -12.500000000 35.500000000 -22.500000000 -12.500000000 35.500000000 -23.500000000 -12.500000000 35.500000000 -24.500000000 -12.500000000 35.500000000 -25.499996185 -12.500000000 35.499996185 -26.499954224 -12.500000000 35.499954224 -27.499591827 -12.500000000 35.499591827 -28.497470856 -12.500000000 35.497467041 -29.488407135 -12.500000000 35.488403320 -30.458978653 -12.500000000 35.458980560 -31.384418488 -12.500000000 35.384422302 -32.233222961 -12.500000000 35.233222961 -32.981101990 -12.500000000 34.981101990 --33.981101990 -11.500000000 34.981101990 --33.233226776 -11.500000000 35.233222961 --32.384422302 -11.500000000 35.384418488 --31.458978653 -11.500000000 35.458980560 --30.488407135 -11.500000000 35.488403320 --29.497472763 -11.500000000 35.497474670 --28.499593735 -11.500000000 35.499591827 --27.499954224 -11.500000000 35.499954224 --26.499996185 -11.500000000 35.499996185 --25.500000000 -11.500000000 35.500000000 --24.500000000 -11.500000000 35.500000000 --23.500000000 -11.500000000 35.500000000 --22.500000000 -11.500000000 35.500000000 --21.500000000 -11.500000000 35.500000000 --20.500000000 -11.500000000 35.500000000 --19.500000000 -11.500000000 35.500000000 --18.500000000 -11.500000000 35.500000000 --17.500000000 -11.500000000 35.500000000 --16.500000000 -11.500000000 35.500000000 --15.500000000 -11.500000000 35.500000000 --14.500000000 -11.500000000 35.500000000 --13.500000000 -11.500000000 35.500000000 --12.500000000 -11.500000000 35.500000000 --11.500000000 -11.500000000 35.500000000 --10.500000000 -11.500000000 35.500000000 --9.500000000 -11.500000000 35.500000000 --8.500000000 -11.500000000 35.500000000 --7.500000000 -11.500000000 35.500000000 --6.500000000 -11.500000000 35.500000000 --5.500000000 -11.500000000 35.500000000 --4.500000000 -11.500000000 35.500000000 --3.500000000 -11.500000000 35.500000000 --2.500000000 -11.500000000 35.500000000 --1.500000000 -11.500000000 35.500000000 --0.500000000 -11.500000000 35.500000000 -0.500000000 -11.500000000 35.500000000 -1.500000000 -11.500000000 35.500000000 -2.500000000 -11.500000000 35.500000000 -3.500000000 -11.500000000 35.500000000 -4.500000000 -11.500000000 35.500000000 -5.500000000 -11.500000000 35.500000000 -6.500000000 -11.500000000 35.500000000 -7.500000000 -11.500000000 35.500000000 -8.500000000 -11.500000000 35.500000000 -9.500000000 -11.500000000 35.500000000 -10.500000000 -11.500000000 35.500000000 -11.500000000 -11.500000000 35.500000000 -12.500000000 -11.500000000 35.500000000 -13.500000000 -11.500000000 35.500000000 -14.500000000 -11.500000000 35.500000000 -15.500000000 -11.500000000 35.500000000 -16.500000000 -11.500000000 35.500000000 -17.500000000 -11.500000000 35.500000000 -18.500000000 -11.500000000 35.500000000 -19.500000000 -11.500000000 35.500000000 -20.500000000 -11.500000000 35.500000000 -21.500000000 -11.500000000 35.500000000 -22.500000000 -11.500000000 35.500000000 -23.500000000 -11.500000000 35.500000000 -24.500000000 -11.500000000 35.500000000 -25.499996185 -11.500000000 35.499996185 -26.499954224 -11.500000000 35.499954224 -27.499591827 -11.500000000 35.499591827 -28.497470856 -11.500000000 35.497467041 -29.488407135 -11.500000000 35.488403320 -30.458978653 -11.500000000 35.458980560 -31.384418488 -11.500000000 35.384422302 -32.233222961 -11.500000000 35.233222961 -32.981101990 -11.500000000 34.981101990 --33.981101990 -10.500000000 34.981101990 --33.233226776 -10.500000000 35.233222961 --32.384422302 -10.500000000 35.384418488 --31.458978653 -10.500000000 35.458980560 --30.488407135 -10.500000000 35.488403320 --29.497472763 -10.500000000 35.497474670 --28.499593735 -10.500000000 35.499591827 --27.499954224 -10.500000000 35.499954224 --26.499996185 -10.500000000 35.499996185 --25.500000000 -10.500000000 35.500000000 --24.500000000 -10.500000000 35.500000000 --23.500000000 -10.500000000 35.500000000 --22.500000000 -10.500000000 35.500000000 --21.500000000 -10.500000000 35.500000000 --20.500000000 -10.500000000 35.500000000 --19.500000000 -10.500000000 35.500000000 --18.500000000 -10.500000000 35.500000000 --17.500000000 -10.500000000 35.500000000 --16.500000000 -10.500000000 35.500000000 --15.500000000 -10.500000000 35.500000000 --14.500000000 -10.500000000 35.500000000 --13.500000000 -10.500000000 35.500000000 --12.500000000 -10.500000000 35.500000000 --11.500000000 -10.500000000 35.500000000 --10.500000000 -10.500000000 35.500000000 --9.500000000 -10.500000000 35.500000000 --8.500000000 -10.500000000 35.500000000 --7.500000000 -10.500000000 35.500000000 --6.500000000 -10.500000000 35.500000000 --5.500000000 -10.500000000 35.500000000 --4.500000000 -10.500000000 35.500000000 --3.500000000 -10.500000000 35.500000000 --2.500000000 -10.500000000 35.500000000 --1.500000000 -10.500000000 35.500000000 --0.500000000 -10.500000000 35.500000000 -0.500000000 -10.500000000 35.500000000 -1.500000000 -10.500000000 35.500000000 -2.500000000 -10.500000000 35.500000000 -3.500000000 -10.500000000 35.500000000 -4.500000000 -10.500000000 35.500000000 -5.500000000 -10.500000000 35.500000000 -6.500000000 -10.500000000 35.500000000 -7.500000000 -10.500000000 35.500000000 -8.500000000 -10.500000000 35.500000000 -9.500000000 -10.500000000 35.500000000 -10.500000000 -10.500000000 35.500000000 -11.500000000 -10.500000000 35.500000000 -12.500000000 -10.500000000 35.500000000 -13.500000000 -10.500000000 35.500000000 -14.500000000 -10.500000000 35.500000000 -15.500000000 -10.500000000 35.500000000 -16.500000000 -10.500000000 35.500000000 -17.500000000 -10.500000000 35.500000000 -18.500000000 -10.500000000 35.500000000 -19.500000000 -10.500000000 35.500000000 -20.500000000 -10.500000000 35.500000000 -21.500000000 -10.500000000 35.500000000 -22.500000000 -10.500000000 35.500000000 -23.500000000 -10.500000000 35.500000000 -24.500000000 -10.500000000 35.500000000 -25.499996185 -10.500000000 35.499996185 -26.499954224 -10.500000000 35.499954224 -27.499591827 -10.500000000 35.499591827 -28.497470856 -10.500000000 35.497467041 -29.488407135 -10.500000000 35.488403320 -30.458978653 -10.500000000 35.458980560 -31.384418488 -10.500000000 35.384422302 -32.233222961 -10.500000000 35.233222961 -32.981101990 -10.500000000 34.981101990 --33.981101990 -9.500000000 34.981101990 --33.233226776 -9.500000000 35.233222961 --32.384422302 -9.500000000 35.384418488 --31.458978653 -9.500000000 35.458980560 --30.488407135 -9.500000000 35.488403320 --29.497472763 -9.500000000 35.497474670 --28.499593735 -9.500000000 35.499591827 --27.499954224 -9.500000000 35.499954224 --26.499996185 -9.500000000 35.499996185 --25.500000000 -9.500000000 35.500000000 --24.500000000 -9.500000000 35.500000000 --23.500000000 -9.500000000 35.500000000 --22.500000000 -9.500000000 35.500000000 --21.500000000 -9.500000000 35.500000000 --20.500000000 -9.500000000 35.500000000 --19.500000000 -9.500000000 35.500000000 --18.500000000 -9.500000000 35.500000000 --17.500000000 -9.500000000 35.500000000 --16.500000000 -9.500000000 35.500000000 --15.500000000 -9.500000000 35.500000000 --14.500000000 -9.500000000 35.500000000 --13.500000000 -9.500000000 35.500000000 --12.500000000 -9.500000000 35.500000000 --11.500000000 -9.500000000 35.500000000 --10.500000000 -9.500000000 35.500000000 --9.500000000 -9.500000000 35.500000000 --8.500000000 -9.500000000 35.500000000 --7.500000000 -9.500000000 35.500000000 --6.500000000 -9.500000000 35.500000000 --5.500000000 -9.500000000 35.500000000 --4.500000000 -9.500000000 35.500000000 --3.500000000 -9.500000000 35.500000000 --2.500000000 -9.500000000 35.500000000 --1.500000000 -9.500000000 35.500000000 --0.500000000 -9.500000000 35.500000000 -0.500000000 -9.500000000 35.500000000 -1.500000000 -9.500000000 35.500000000 -2.500000000 -9.500000000 35.500000000 -3.500000000 -9.500000000 35.500000000 -4.500000000 -9.500000000 35.500000000 -5.500000000 -9.500000000 35.500000000 -6.500000000 -9.500000000 35.500000000 -7.500000000 -9.500000000 35.500000000 -8.500000000 -9.500000000 35.500000000 -9.500000000 -9.500000000 35.500000000 -10.500000000 -9.500000000 35.500000000 -11.500000000 -9.500000000 35.500000000 -12.500000000 -9.500000000 35.500000000 -13.500000000 -9.500000000 35.500000000 -14.500000000 -9.500000000 35.500000000 -15.500000000 -9.500000000 35.500000000 -16.500000000 -9.500000000 35.500000000 -17.500000000 -9.500000000 35.500000000 -18.500000000 -9.500000000 35.500000000 -19.500000000 -9.500000000 35.500000000 -20.500000000 -9.500000000 35.500000000 -21.500000000 -9.500000000 35.500000000 -22.500000000 -9.500000000 35.500000000 -23.500000000 -9.500000000 35.500000000 -24.500000000 -9.500000000 35.500000000 -25.499996185 -9.500000000 35.499996185 -26.499954224 -9.500000000 35.499954224 -27.499591827 -9.500000000 35.499591827 -28.497470856 -9.500000000 35.497467041 -29.488407135 -9.500000000 35.488403320 -30.458978653 -9.500000000 35.458980560 -31.384418488 -9.500000000 35.384422302 -32.233222961 -9.500000000 35.233222961 -32.981101990 -9.500000000 34.981101990 --33.981101990 -8.500000000 34.981101990 --33.233226776 -8.500000000 35.233222961 --32.384422302 -8.500000000 35.384418488 --31.458978653 -8.500000000 35.458980560 --30.488407135 -8.500000000 35.488403320 --29.497472763 -8.500000000 35.497474670 --28.499593735 -8.500000000 35.499591827 --27.499954224 -8.500000000 35.499954224 --26.499996185 -8.500000000 35.499996185 --25.500000000 -8.500000000 35.500000000 --24.500000000 -8.500000000 35.500000000 --23.500000000 -8.500000000 35.500000000 --22.500000000 -8.500000000 35.500000000 --21.500000000 -8.500000000 35.500000000 --20.500000000 -8.500000000 35.500000000 --19.500000000 -8.500000000 35.500000000 --18.500000000 -8.500000000 35.500000000 --17.500000000 -8.500000000 35.500000000 --16.500000000 -8.500000000 35.500000000 --15.500000000 -8.500000000 35.500000000 --14.500000000 -8.500000000 35.500000000 --13.500000000 -8.500000000 35.500000000 --12.500000000 -8.500000000 35.500000000 --11.500000000 -8.500000000 35.500000000 --10.500000000 -8.500000000 35.500000000 --9.500000000 -8.500000000 35.500000000 --8.500000000 -8.500000000 35.500000000 --7.500000000 -8.500000000 35.500000000 --6.500000000 -8.500000000 35.500000000 --5.500000000 -8.500000000 35.500000000 --4.500000000 -8.500000000 35.500000000 --3.500000000 -8.500000000 35.500000000 --2.500000000 -8.500000000 35.500000000 --1.500000000 -8.500000000 35.500000000 --0.500000000 -8.500000000 35.500000000 -0.500000000 -8.500000000 35.500000000 -1.500000000 -8.500000000 35.500000000 -2.500000000 -8.500000000 35.500000000 -3.500000000 -8.500000000 35.500000000 -4.500000000 -8.500000000 35.500000000 -5.500000000 -8.500000000 35.500000000 -6.500000000 -8.500000000 35.500000000 -7.500000000 -8.500000000 35.500000000 -8.500000000 -8.500000000 35.500000000 -9.500000000 -8.500000000 35.500000000 -10.500000000 -8.500000000 35.500000000 -11.500000000 -8.500000000 35.500000000 -12.500000000 -8.500000000 35.500000000 -13.500000000 -8.500000000 35.500000000 -14.500000000 -8.500000000 35.500000000 -15.500000000 -8.500000000 35.500000000 -16.500000000 -8.500000000 35.500000000 -17.500000000 -8.500000000 35.500000000 -18.500000000 -8.500000000 35.500000000 -19.500000000 -8.500000000 35.500000000 -20.500000000 -8.500000000 35.500000000 -21.500000000 -8.500000000 35.500000000 -22.500000000 -8.500000000 35.500000000 -23.500000000 -8.500000000 35.500000000 -24.500000000 -8.500000000 35.500000000 -25.499996185 -8.500000000 35.499996185 -26.499954224 -8.500000000 35.499954224 -27.499591827 -8.500000000 35.499591827 -28.497470856 -8.500000000 35.497467041 -29.488407135 -8.500000000 35.488403320 -30.458978653 -8.500000000 35.458980560 -31.384418488 -8.500000000 35.384422302 -32.233222961 -8.500000000 35.233222961 -32.981101990 -8.500000000 34.981101990 --33.981101990 -7.500000000 34.981101990 --33.233226776 -7.500000000 35.233222961 --32.384422302 -7.500000000 35.384418488 --31.458978653 -7.500000000 35.458980560 --30.488407135 -7.500000000 35.488403320 --29.497472763 -7.500000000 35.497474670 --28.499593735 -7.500000000 35.499591827 --27.499954224 -7.500000000 35.499954224 --26.499996185 -7.500000000 35.499996185 --25.500000000 -7.500000000 35.500000000 --24.500000000 -7.500000000 35.500000000 --23.500000000 -7.500000000 35.500000000 --22.500000000 -7.500000000 35.500000000 --21.500000000 -7.500000000 35.500000000 --20.500000000 -7.500000000 35.500000000 --19.500000000 -7.500000000 35.500000000 --18.500000000 -7.500000000 35.500000000 --17.500000000 -7.500000000 35.500000000 --16.500000000 -7.500000000 35.500000000 --15.500000000 -7.500000000 35.500000000 --14.500000000 -7.500000000 35.500000000 --13.500000000 -7.500000000 35.500000000 --12.500000000 -7.500000000 35.500000000 --11.500000000 -7.500000000 35.500000000 --10.500000000 -7.500000000 35.500000000 --9.500000000 -7.500000000 35.500000000 --8.500000000 -7.500000000 35.500000000 --7.500000000 -7.500000000 35.500000000 --6.500000000 -7.500000000 35.500000000 --5.500000000 -7.500000000 35.500000000 --4.500000000 -7.500000000 35.500000000 --3.500000000 -7.500000000 35.500000000 --2.500000000 -7.500000000 35.500000000 --1.500000000 -7.500000000 35.500000000 --0.500000000 -7.500000000 35.500000000 -0.500000000 -7.500000000 35.500000000 -1.500000000 -7.500000000 35.500000000 -2.500000000 -7.500000000 35.500000000 -3.500000000 -7.500000000 35.500000000 -4.500000000 -7.500000000 35.500000000 -5.500000000 -7.500000000 35.500000000 -6.500000000 -7.500000000 35.500000000 -7.500000000 -7.500000000 35.500000000 -8.500000000 -7.500000000 35.500000000 -9.500000000 -7.500000000 35.500000000 -10.500000000 -7.500000000 35.500000000 -11.500000000 -7.500000000 35.500000000 -12.500000000 -7.500000000 35.500000000 -13.500000000 -7.500000000 35.500000000 -14.500000000 -7.500000000 35.500000000 -15.500000000 -7.500000000 35.500000000 -16.500000000 -7.500000000 35.500000000 -17.500000000 -7.500000000 35.500000000 -18.500000000 -7.500000000 35.500000000 -19.500000000 -7.500000000 35.500000000 -20.500000000 -7.500000000 35.500000000 -21.500000000 -7.500000000 35.500000000 -22.500000000 -7.500000000 35.500000000 -23.500000000 -7.500000000 35.500000000 -24.500000000 -7.500000000 35.500000000 -25.499996185 -7.500000000 35.499996185 -26.499954224 -7.500000000 35.499954224 -27.499591827 -7.500000000 35.499591827 -28.497470856 -7.500000000 35.497467041 -29.488407135 -7.500000000 35.488403320 -30.458978653 -7.500000000 35.458980560 -31.384418488 -7.500000000 35.384422302 -32.233222961 -7.500000000 35.233222961 -32.981101990 -7.500000000 34.981101990 --33.981101990 -6.500000000 34.981101990 --33.233226776 -6.500000000 35.233222961 --32.384422302 -6.500000000 35.384418488 --31.458978653 -6.500000000 35.458980560 --30.488407135 -6.500000000 35.488403320 --29.497472763 -6.500000000 35.497474670 --28.499593735 -6.500000000 35.499591827 --27.499954224 -6.500000000 35.499954224 --26.499996185 -6.500000000 35.499996185 --25.500000000 -6.500000000 35.500000000 --24.500000000 -6.500000000 35.500000000 --23.500000000 -6.500000000 35.500000000 --22.500000000 -6.500000000 35.500000000 --21.500000000 -6.500000000 35.500000000 --20.500000000 -6.500000000 35.500000000 --19.500000000 -6.500000000 35.500000000 --18.500000000 -6.500000000 35.500000000 --17.500000000 -6.500000000 35.500000000 --16.500000000 -6.500000000 35.500000000 --15.500000000 -6.500000000 35.500000000 --14.500000000 -6.500000000 35.500000000 --13.500000000 -6.500000000 35.500000000 --12.500000000 -6.500000000 35.500000000 --11.500000000 -6.500000000 35.500000000 --10.500000000 -6.500000000 35.500000000 --9.500000000 -6.500000000 35.500000000 --8.500000000 -6.500000000 35.500000000 --7.500000000 -6.500000000 35.500000000 --6.500000000 -6.500000000 35.500000000 --5.500000000 -6.500000000 35.500000000 --4.500000000 -6.500000000 35.500000000 --3.500000000 -6.500000000 35.500000000 --2.500000000 -6.500000000 35.500000000 --1.500000000 -6.500000000 35.500000000 --0.500000000 -6.500000000 35.500000000 -0.500000000 -6.500000000 35.500000000 -1.500000000 -6.500000000 35.500000000 -2.500000000 -6.500000000 35.500000000 -3.500000000 -6.500000000 35.500000000 -4.500000000 -6.500000000 35.500000000 -5.500000000 -6.500000000 35.500000000 -6.500000000 -6.500000000 35.500000000 -7.500000000 -6.500000000 35.500000000 -8.500000000 -6.500000000 35.500000000 -9.500000000 -6.500000000 35.500000000 -10.500000000 -6.500000000 35.500000000 -11.500000000 -6.500000000 35.500000000 -12.500000000 -6.500000000 35.500000000 -13.500000000 -6.500000000 35.500000000 -14.500000000 -6.500000000 35.500000000 -15.500000000 -6.500000000 35.500000000 -16.500000000 -6.500000000 35.500000000 -17.500000000 -6.500000000 35.500000000 -18.500000000 -6.500000000 35.500000000 -19.500000000 -6.500000000 35.500000000 -20.500000000 -6.500000000 35.500000000 -21.500000000 -6.500000000 35.500000000 -22.500000000 -6.500000000 35.500000000 -23.500000000 -6.500000000 35.500000000 -24.500000000 -6.500000000 35.500000000 -25.499996185 -6.500000000 35.499996185 -26.499954224 -6.500000000 35.499954224 -27.499591827 -6.500000000 35.499591827 -28.497470856 -6.500000000 35.497467041 -29.488407135 -6.500000000 35.488403320 -30.458978653 -6.500000000 35.458980560 -31.384418488 -6.500000000 35.384422302 -32.233222961 -6.500000000 35.233222961 -32.981101990 -6.500000000 34.981101990 --33.981101990 -5.500000000 34.981101990 --33.233226776 -5.500000000 35.233222961 --32.384422302 -5.500000000 35.384418488 --31.458978653 -5.500000000 35.458980560 --30.488407135 -5.500000000 35.488403320 --29.497472763 -5.500000000 35.497474670 --28.499593735 -5.500000000 35.499591827 --27.499954224 -5.500000000 35.499954224 --26.499996185 -5.500000000 35.499996185 --25.500000000 -5.500000000 35.500000000 --24.500000000 -5.500000000 35.500000000 --23.500000000 -5.500000000 35.500000000 --22.500000000 -5.500000000 35.500000000 --21.500000000 -5.500000000 35.500000000 --20.500000000 -5.500000000 35.500000000 --19.500000000 -5.500000000 35.500000000 --18.500000000 -5.500000000 35.500000000 --17.500000000 -5.500000000 35.500000000 --16.500000000 -5.500000000 35.500000000 --15.500000000 -5.500000000 35.500000000 --14.500000000 -5.500000000 35.500000000 --13.500000000 -5.500000000 35.500000000 --12.500000000 -5.500000000 35.500000000 --11.500000000 -5.500000000 35.500000000 --10.500000000 -5.500000000 35.500000000 --9.500000000 -5.500000000 35.500000000 --8.500000000 -5.500000000 35.500000000 --7.500000000 -5.500000000 35.500000000 --6.500000000 -5.500000000 35.500000000 --5.500000000 -5.500000000 35.500000000 --4.500000000 -5.500000000 35.500000000 --3.500000000 -5.500000000 35.500000000 --2.500000000 -5.500000000 35.500000000 --1.500000000 -5.500000000 35.500000000 --0.500000000 -5.500000000 35.500000000 -0.500000000 -5.500000000 35.500000000 -1.500000000 -5.500000000 35.500000000 -2.500000000 -5.500000000 35.500000000 -3.500000000 -5.500000000 35.500000000 -4.500000000 -5.500000000 35.500000000 -5.500000000 -5.500000000 35.500000000 -6.500000000 -5.500000000 35.500000000 -7.500000000 -5.500000000 35.500000000 -8.500000000 -5.500000000 35.500000000 -9.500000000 -5.500000000 35.500000000 -10.500000000 -5.500000000 35.500000000 -11.500000000 -5.500000000 35.500000000 -12.500000000 -5.500000000 35.500000000 -13.500000000 -5.500000000 35.500000000 -14.500000000 -5.500000000 35.500000000 -15.500000000 -5.500000000 35.500000000 -16.500000000 -5.500000000 35.500000000 -17.500000000 -5.500000000 35.500000000 -18.500000000 -5.500000000 35.500000000 -19.500000000 -5.500000000 35.500000000 -20.500000000 -5.500000000 35.500000000 -21.500000000 -5.500000000 35.500000000 -22.500000000 -5.500000000 35.500000000 -23.500000000 -5.500000000 35.500000000 -24.500000000 -5.500000000 35.500000000 -25.499996185 -5.500000000 35.499996185 -26.499954224 -5.500000000 35.499954224 -27.499591827 -5.500000000 35.499591827 -28.497470856 -5.500000000 35.497467041 -29.488407135 -5.500000000 35.488403320 -30.458978653 -5.500000000 35.458980560 -31.384418488 -5.500000000 35.384422302 -32.233222961 -5.500000000 35.233222961 -32.981101990 -5.500000000 34.981101990 --33.981101990 -4.500000000 34.981101990 --33.233226776 -4.500000000 35.233222961 --32.384422302 -4.500000000 35.384418488 --31.458978653 -4.500000000 35.458980560 --30.488407135 -4.500000000 35.488403320 --29.497472763 -4.500000000 35.497474670 --28.499593735 -4.500000000 35.499591827 --27.499954224 -4.500000000 35.499954224 --26.499996185 -4.500000000 35.499996185 --25.500000000 -4.500000000 35.500000000 --24.500000000 -4.500000000 35.500000000 --23.500000000 -4.500000000 35.500000000 --22.500000000 -4.500000000 35.500000000 --21.500000000 -4.500000000 35.500000000 --20.500000000 -4.500000000 35.500000000 --19.500000000 -4.500000000 35.500000000 --18.500000000 -4.500000000 35.500000000 --17.500000000 -4.500000000 35.500000000 --16.500000000 -4.500000000 35.500000000 --15.500000000 -4.500000000 35.500000000 --14.500000000 -4.500000000 35.500000000 --13.500000000 -4.500000000 35.500000000 --12.500000000 -4.500000000 35.500000000 --11.500000000 -4.500000000 35.500000000 --10.500000000 -4.500000000 35.500000000 --9.500000000 -4.500000000 35.500000000 --8.500000000 -4.500000000 35.500000000 --7.500000000 -4.500000000 35.500000000 --6.500000000 -4.500000000 35.500000000 --5.500000000 -4.500000000 35.500000000 --4.500000000 -4.500000000 35.500000000 --3.500000000 -4.500000000 35.500000000 --2.500000000 -4.500000000 35.500000000 --1.500000000 -4.500000000 35.500000000 --0.500000000 -4.500000000 35.500000000 -0.500000000 -4.500000000 35.500000000 -1.500000000 -4.500000000 35.500000000 -2.500000000 -4.500000000 35.500000000 -3.500000000 -4.500000000 35.500000000 -4.500000000 -4.500000000 35.500000000 -5.500000000 -4.500000000 35.500000000 -6.500000000 -4.500000000 35.500000000 -7.500000000 -4.500000000 35.500000000 -8.500000000 -4.500000000 35.500000000 -9.500000000 -4.500000000 35.500000000 -10.500000000 -4.500000000 35.500000000 -11.500000000 -4.500000000 35.500000000 -12.500000000 -4.500000000 35.500000000 -13.500000000 -4.500000000 35.500000000 -14.500000000 -4.500000000 35.500000000 -15.500000000 -4.500000000 35.500000000 -16.500000000 -4.500000000 35.500000000 -17.500000000 -4.500000000 35.500000000 -18.500000000 -4.500000000 35.500000000 -19.500000000 -4.500000000 35.500000000 -20.500000000 -4.500000000 35.500000000 -21.500000000 -4.500000000 35.500000000 -22.500000000 -4.500000000 35.500000000 -23.500000000 -4.500000000 35.500000000 -24.500000000 -4.500000000 35.500000000 -25.499996185 -4.500000000 35.499996185 -26.499954224 -4.500000000 35.499954224 -27.499591827 -4.500000000 35.499591827 -28.497470856 -4.500000000 35.497467041 -29.488407135 -4.500000000 35.488403320 -30.458978653 -4.500000000 35.458980560 -31.384418488 -4.500000000 35.384422302 -32.233222961 -4.500000000 35.233222961 -32.981101990 -4.500000000 34.981101990 --33.981101990 -3.500000000 34.981101990 --33.233226776 -3.500000000 35.233222961 --32.384422302 -3.500000000 35.384418488 --31.458978653 -3.500000000 35.458980560 --30.488407135 -3.500000000 35.488403320 --29.497472763 -3.500000000 35.497474670 --28.499593735 -3.500000000 35.499591827 --27.499954224 -3.500000000 35.499954224 --26.499996185 -3.500000000 35.499996185 --25.500000000 -3.500000000 35.500000000 --24.500000000 -3.500000000 35.500000000 --23.500000000 -3.500000000 35.500000000 --22.500000000 -3.500000000 35.500000000 --21.500000000 -3.500000000 35.500000000 --20.500000000 -3.500000000 35.500000000 --19.500000000 -3.500000000 35.500000000 --18.500000000 -3.500000000 35.500000000 --17.500000000 -3.500000000 35.500000000 --16.500000000 -3.500000000 35.500000000 --15.500000000 -3.500000000 35.500000000 --14.500000000 -3.500000000 35.500000000 --13.500000000 -3.500000000 35.500000000 --12.500000000 -3.500000000 35.500000000 --11.500000000 -3.500000000 35.500000000 --10.500000000 -3.500000000 35.500000000 --9.500000000 -3.500000000 35.500000000 --8.500000000 -3.500000000 35.500000000 --7.500000000 -3.500000000 35.500000000 --6.500000000 -3.500000000 35.500000000 --5.500000000 -3.500000000 35.500000000 --4.500000000 -3.500000000 35.500000000 --3.500000000 -3.500000000 35.500000000 --2.500000000 -3.500000000 35.500000000 --1.500000000 -3.500000000 35.500000000 --0.500000000 -3.500000000 35.500000000 -0.500000000 -3.500000000 35.500000000 -1.500000000 -3.500000000 35.500000000 -2.500000000 -3.500000000 35.500000000 -3.500000000 -3.500000000 35.500000000 -4.500000000 -3.500000000 35.500000000 -5.500000000 -3.500000000 35.500000000 -6.500000000 -3.500000000 35.500000000 -7.500000000 -3.500000000 35.500000000 -8.500000000 -3.500000000 35.500000000 -9.500000000 -3.500000000 35.500000000 -10.500000000 -3.500000000 35.500000000 -11.500000000 -3.500000000 35.500000000 -12.500000000 -3.500000000 35.500000000 -13.500000000 -3.500000000 35.500000000 -14.500000000 -3.500000000 35.500000000 -15.500000000 -3.500000000 35.500000000 -16.500000000 -3.500000000 35.500000000 -17.500000000 -3.500000000 35.500000000 -18.500000000 -3.500000000 35.500000000 -19.500000000 -3.500000000 35.500000000 -20.500000000 -3.500000000 35.500000000 -21.500000000 -3.500000000 35.500000000 -22.500000000 -3.500000000 35.500000000 -23.500000000 -3.500000000 35.500000000 -24.500000000 -3.500000000 35.500000000 -25.499996185 -3.500000000 35.499996185 -26.499954224 -3.500000000 35.499954224 -27.499591827 -3.500000000 35.499591827 -28.497470856 -3.500000000 35.497467041 -29.488407135 -3.500000000 35.488403320 -30.458978653 -3.500000000 35.458980560 -31.384418488 -3.500000000 35.384422302 -32.233222961 -3.500000000 35.233222961 -32.981101990 -3.500000000 34.981101990 --33.981101990 -2.500000000 34.981101990 --33.233226776 -2.500000000 35.233222961 --32.384422302 -2.500000000 35.384418488 --31.458978653 -2.500000000 35.458980560 --30.488407135 -2.500000000 35.488403320 --29.497472763 -2.500000000 35.497474670 --28.499593735 -2.500000000 35.499591827 --27.499954224 -2.500000000 35.499954224 --26.499996185 -2.500000000 35.499996185 --25.500000000 -2.500000000 35.500000000 --24.500000000 -2.500000000 35.500000000 --23.500000000 -2.500000000 35.500000000 --22.500000000 -2.500000000 35.500000000 --21.500000000 -2.500000000 35.500000000 --20.500000000 -2.500000000 35.500000000 --19.500000000 -2.500000000 35.500000000 --18.500000000 -2.500000000 35.500000000 --17.500000000 -2.500000000 35.500000000 --16.500000000 -2.500000000 35.500000000 --15.500000000 -2.500000000 35.500000000 --14.500000000 -2.500000000 35.500000000 --13.500000000 -2.500000000 35.500000000 --12.500000000 -2.500000000 35.500000000 --11.500000000 -2.500000000 35.500000000 --10.500000000 -2.500000000 35.500000000 --9.500000000 -2.500000000 35.500000000 --8.500000000 -2.500000000 35.500000000 --7.500000000 -2.500000000 35.500000000 --6.500000000 -2.500000000 35.500000000 --5.500000000 -2.500000000 35.500000000 --4.500000000 -2.500000000 35.500000000 --3.500000000 -2.500000000 35.500000000 --2.500000000 -2.500000000 35.500000000 --1.500000000 -2.500000000 35.500000000 --0.500000000 -2.500000000 35.500000000 -0.500000000 -2.500000000 35.500000000 -1.500000000 -2.500000000 35.500000000 -2.500000000 -2.500000000 35.500000000 -3.500000000 -2.500000000 35.500000000 -4.500000000 -2.500000000 35.500000000 -5.500000000 -2.500000000 35.500000000 -6.500000000 -2.500000000 35.500000000 -7.500000000 -2.500000000 35.500000000 -8.500000000 -2.500000000 35.500000000 -9.500000000 -2.500000000 35.500000000 -10.500000000 -2.500000000 35.500000000 -11.500000000 -2.500000000 35.500000000 -12.500000000 -2.500000000 35.500000000 -13.500000000 -2.500000000 35.500000000 -14.500000000 -2.500000000 35.500000000 -15.500000000 -2.500000000 35.500000000 -16.500000000 -2.500000000 35.500000000 -17.500000000 -2.500000000 35.500000000 -18.500000000 -2.500000000 35.500000000 -19.500000000 -2.500000000 35.500000000 -20.500000000 -2.500000000 35.500000000 -21.500000000 -2.500000000 35.500000000 -22.500000000 -2.500000000 35.500000000 -23.500000000 -2.500000000 35.500000000 -24.500000000 -2.500000000 35.500000000 -25.499996185 -2.500000000 35.499996185 -26.499954224 -2.500000000 35.499954224 -27.499591827 -2.500000000 35.499591827 -28.497470856 -2.500000000 35.497467041 -29.488407135 -2.500000000 35.488403320 -30.458978653 -2.500000000 35.458980560 -31.384418488 -2.500000000 35.384422302 -32.233222961 -2.500000000 35.233222961 -32.981101990 -2.500000000 34.981101990 --33.981101990 -1.500000000 34.981101990 --33.233226776 -1.500000000 35.233222961 --32.384422302 -1.500000000 35.384418488 --31.458978653 -1.500000000 35.458980560 --30.488407135 -1.500000000 35.488403320 --29.497472763 -1.500000000 35.497474670 --28.499593735 -1.500000000 35.499591827 --27.499954224 -1.500000000 35.499954224 --26.499996185 -1.500000000 35.499996185 --25.500000000 -1.500000000 35.500000000 --24.500000000 -1.500000000 35.500000000 --23.500000000 -1.500000000 35.500000000 --22.500000000 -1.500000000 35.500000000 --21.500000000 -1.500000000 35.500000000 --20.500000000 -1.500000000 35.500000000 --19.500000000 -1.500000000 35.500000000 --18.500000000 -1.500000000 35.500000000 --17.500000000 -1.500000000 35.500000000 --16.500000000 -1.500000000 35.500000000 --15.500000000 -1.500000000 35.500000000 --14.500000000 -1.500000000 35.500000000 --13.500000000 -1.500000000 35.500000000 --12.500000000 -1.500000000 35.500000000 --11.500000000 -1.500000000 35.500000000 --10.500000000 -1.500000000 35.500000000 --9.500000000 -1.500000000 35.500000000 --8.500000000 -1.500000000 35.500000000 --7.500000000 -1.500000000 35.500000000 --6.500000000 -1.500000000 35.500000000 --5.500000000 -1.500000000 35.500000000 --4.500000000 -1.500000000 35.500000000 --3.500000000 -1.500000000 35.500000000 --2.500000000 -1.500000000 35.500000000 --1.500000000 -1.500000000 35.500000000 --0.500000000 -1.500000000 35.500000000 -0.500000000 -1.500000000 35.500000000 -1.500000000 -1.500000000 35.500000000 -2.500000000 -1.500000000 35.500000000 -3.500000000 -1.500000000 35.500000000 -4.500000000 -1.500000000 35.500000000 -5.500000000 -1.500000000 35.500000000 -6.500000000 -1.500000000 35.500000000 -7.500000000 -1.500000000 35.500000000 -8.500000000 -1.500000000 35.500000000 -9.500000000 -1.500000000 35.500000000 -10.500000000 -1.500000000 35.500000000 -11.500000000 -1.500000000 35.500000000 -12.500000000 -1.500000000 35.500000000 -13.500000000 -1.500000000 35.500000000 -14.500000000 -1.500000000 35.500000000 -15.500000000 -1.500000000 35.500000000 -16.500000000 -1.500000000 35.500000000 -17.500000000 -1.500000000 35.500000000 -18.500000000 -1.500000000 35.500000000 -19.500000000 -1.500000000 35.500000000 -20.500000000 -1.500000000 35.500000000 -21.500000000 -1.500000000 35.500000000 -22.500000000 -1.500000000 35.500000000 -23.500000000 -1.500000000 35.500000000 -24.500000000 -1.500000000 35.500000000 -25.499996185 -1.500000000 35.499996185 -26.499954224 -1.500000000 35.499954224 -27.499591827 -1.500000000 35.499591827 -28.497470856 -1.500000000 35.497467041 -29.488407135 -1.500000000 35.488403320 -30.458978653 -1.500000000 35.458980560 -31.384418488 -1.500000000 35.384422302 -32.233222961 -1.500000000 35.233222961 -32.981101990 -1.500000000 34.981101990 --33.981101990 -0.500000000 34.981101990 --33.233226776 -0.500000000 35.233222961 --32.384422302 -0.500000000 35.384418488 --31.458978653 -0.500000000 35.458980560 --30.488407135 -0.500000000 35.488403320 --29.497472763 -0.500000000 35.497474670 --28.499593735 -0.500000000 35.499591827 --27.499954224 -0.500000000 35.499954224 --26.499996185 -0.500000000 35.499996185 --25.500000000 -0.500000000 35.500000000 --24.500000000 -0.500000000 35.500000000 --23.500000000 -0.500000000 35.500000000 --22.500000000 -0.500000000 35.500000000 --21.500000000 -0.500000000 35.500000000 --20.500000000 -0.500000000 35.500000000 --19.500000000 -0.500000000 35.500000000 --18.500000000 -0.500000000 35.500000000 --17.500000000 -0.500000000 35.500000000 --16.500000000 -0.500000000 35.500000000 --15.500000000 -0.500000000 35.500000000 --14.500000000 -0.500000000 35.500000000 --13.500000000 -0.500000000 35.500000000 --12.500000000 -0.500000000 35.500000000 --11.500000000 -0.500000000 35.500000000 --10.500000000 -0.500000000 35.500000000 --9.500000000 -0.500000000 35.500000000 --8.500000000 -0.500000000 35.500000000 --7.500000000 -0.500000000 35.500000000 --6.500000000 -0.500000000 35.500000000 --5.500000000 -0.500000000 35.500000000 --4.500000000 -0.500000000 35.500000000 --3.500000000 -0.500000000 35.500000000 --2.500000000 -0.500000000 35.500000000 --1.500000000 -0.500000000 35.500000000 --0.500000000 -0.500000000 35.500000000 -0.500000000 -0.500000000 35.500000000 -1.500000000 -0.500000000 35.500000000 -2.500000000 -0.500000000 35.500000000 -3.500000000 -0.500000000 35.500000000 -4.500000000 -0.500000000 35.500000000 -5.500000000 -0.500000000 35.500000000 -6.500000000 -0.500000000 35.500000000 -7.500000000 -0.500000000 35.500000000 -8.500000000 -0.500000000 35.500000000 -9.500000000 -0.500000000 35.500000000 -10.500000000 -0.500000000 35.500000000 -11.500000000 -0.500000000 35.500000000 -12.500000000 -0.500000000 35.500000000 -13.500000000 -0.500000000 35.500000000 -14.500000000 -0.500000000 35.500000000 -15.500000000 -0.500000000 35.500000000 -16.500000000 -0.500000000 35.500000000 -17.500000000 -0.500000000 35.500000000 -18.500000000 -0.500000000 35.500000000 -19.500000000 -0.500000000 35.500000000 -20.500000000 -0.500000000 35.500000000 -21.500000000 -0.500000000 35.500000000 -22.500000000 -0.500000000 35.500000000 -23.500000000 -0.500000000 35.500000000 -24.500000000 -0.500000000 35.500000000 -25.499996185 -0.500000000 35.499996185 -26.499954224 -0.500000000 35.499954224 -27.499591827 -0.500000000 35.499591827 -28.497470856 -0.500000000 35.497467041 -29.488407135 -0.500000000 35.488403320 -30.458978653 -0.500000000 35.458980560 -31.384418488 -0.500000000 35.384422302 -32.233222961 -0.500000000 35.233222961 -32.981101990 -0.500000000 34.981101990 --33.981101990 0.500000000 34.981101990 --33.233226776 0.500000000 35.233222961 --32.384422302 0.500000000 35.384418488 --31.458978653 0.500000000 35.458980560 --30.488407135 0.500000000 35.488403320 --29.497472763 0.500000000 35.497474670 --28.499593735 0.500000000 35.499591827 --27.499954224 0.500000000 35.499954224 --26.499996185 0.500000000 35.499996185 --25.500000000 0.500000000 35.500000000 --24.500000000 0.500000000 35.500000000 --23.500000000 0.500000000 35.500000000 --22.500000000 0.500000000 35.500000000 --21.500000000 0.500000000 35.500000000 --20.500000000 0.500000000 35.500000000 --19.500000000 0.500000000 35.500000000 --18.500000000 0.500000000 35.500000000 --17.500000000 0.500000000 35.500000000 --16.500000000 0.500000000 35.500000000 --15.500000000 0.500000000 35.500000000 --14.500000000 0.500000000 35.500000000 --13.500000000 0.500000000 35.500000000 --12.500000000 0.500000000 35.500000000 --11.500000000 0.500000000 35.500000000 --10.500000000 0.500000000 35.500000000 --9.500000000 0.500000000 35.500000000 --8.500000000 0.500000000 35.500000000 --7.500000000 0.500000000 35.500000000 --6.500000000 0.500000000 35.500000000 --5.500000000 0.500000000 35.500000000 --4.500000000 0.500000000 35.500000000 --3.500000000 0.500000000 35.500000000 --2.500000000 0.500000000 35.500000000 --1.500000000 0.500000000 35.500000000 --0.500000000 0.500000000 35.500000000 -0.500000000 0.500000000 35.500000000 -1.500000000 0.500000000 35.500000000 -2.500000000 0.500000000 35.500000000 -3.500000000 0.500000000 35.500000000 -4.500000000 0.500000000 35.500000000 -5.500000000 0.500000000 35.500000000 -6.500000000 0.500000000 35.500000000 -7.500000000 0.500000000 35.500000000 -8.500000000 0.500000000 35.500000000 -9.500000000 0.500000000 35.500000000 -10.500000000 0.500000000 35.500000000 -11.500000000 0.500000000 35.500000000 -12.500000000 0.500000000 35.500000000 -13.500000000 0.500000000 35.500000000 -14.500000000 0.500000000 35.500000000 -15.500000000 0.500000000 35.500000000 -16.500000000 0.500000000 35.500000000 -17.500000000 0.500000000 35.500000000 -18.500000000 0.500000000 35.500000000 -19.500000000 0.500000000 35.500000000 -20.500000000 0.500000000 35.500000000 -21.500000000 0.500000000 35.500000000 -22.500000000 0.500000000 35.500000000 -23.500000000 0.500000000 35.500000000 -24.500000000 0.500000000 35.500000000 -25.499996185 0.500000000 35.499996185 -26.499954224 0.500000000 35.499954224 -27.499591827 0.500000000 35.499591827 -28.497470856 0.500000000 35.497467041 -29.488407135 0.500000000 35.488403320 -30.458978653 0.500000000 35.458980560 -31.384418488 0.500000000 35.384422302 -32.233222961 0.500000000 35.233222961 -32.981101990 0.500000000 34.981101990 --33.981101990 1.500000000 34.981101990 --33.233226776 1.500000000 35.233222961 --32.384422302 1.500000000 35.384418488 --31.458978653 1.500000000 35.458980560 --30.488407135 1.500000000 35.488403320 --29.497472763 1.500000000 35.497474670 --28.499593735 1.500000000 35.499591827 --27.499954224 1.500000000 35.499954224 --26.499996185 1.500000000 35.499996185 --25.500000000 1.500000000 35.500000000 --24.500000000 1.500000000 35.500000000 --23.500000000 1.500000000 35.500000000 --22.500000000 1.500000000 35.500000000 --21.500000000 1.500000000 35.500000000 --20.500000000 1.500000000 35.500000000 --19.500000000 1.500000000 35.500000000 --18.500000000 1.500000000 35.500000000 --17.500000000 1.500000000 35.500000000 --16.500000000 1.500000000 35.500000000 --15.500000000 1.500000000 35.500000000 --14.500000000 1.500000000 35.500000000 --13.500000000 1.500000000 35.500000000 --12.500000000 1.500000000 35.500000000 --11.500000000 1.500000000 35.500000000 --10.500000000 1.500000000 35.500000000 --9.500000000 1.500000000 35.500000000 --8.500000000 1.500000000 35.500000000 --7.500000000 1.500000000 35.500000000 --6.500000000 1.500000000 35.500000000 --5.500000000 1.500000000 35.500000000 --4.500000000 1.500000000 35.500000000 --3.500000000 1.500000000 35.500000000 --2.500000000 1.500000000 35.500000000 --1.500000000 1.500000000 35.500000000 --0.500000000 1.500000000 35.500000000 -0.500000000 1.500000000 35.500000000 -1.500000000 1.500000000 35.500000000 -2.500000000 1.500000000 35.500000000 -3.500000000 1.500000000 35.500000000 -4.500000000 1.500000000 35.500000000 -5.500000000 1.500000000 35.500000000 -6.500000000 1.500000000 35.500000000 -7.500000000 1.500000000 35.500000000 -8.500000000 1.500000000 35.500000000 -9.500000000 1.500000000 35.500000000 -10.500000000 1.500000000 35.500000000 -11.500000000 1.500000000 35.500000000 -12.500000000 1.500000000 35.500000000 -13.500000000 1.500000000 35.500000000 -14.500000000 1.500000000 35.500000000 -15.500000000 1.500000000 35.500000000 -16.500000000 1.500000000 35.500000000 -17.500000000 1.500000000 35.500000000 -18.500000000 1.500000000 35.500000000 -19.500000000 1.500000000 35.500000000 -20.500000000 1.500000000 35.500000000 -21.500000000 1.500000000 35.500000000 -22.500000000 1.500000000 35.500000000 -23.500000000 1.500000000 35.500000000 -24.500000000 1.500000000 35.500000000 -25.499996185 1.500000000 35.499996185 -26.499954224 1.500000000 35.499954224 -27.499591827 1.500000000 35.499591827 -28.497470856 1.500000000 35.497467041 -29.488407135 1.500000000 35.488403320 -30.458978653 1.500000000 35.458980560 -31.384418488 1.500000000 35.384422302 -32.233222961 1.500000000 35.233222961 -32.981101990 1.500000000 34.981101990 --33.981101990 2.500000000 34.981101990 --33.233226776 2.500000000 35.233222961 --32.384422302 2.500000000 35.384418488 --31.458978653 2.500000000 35.458980560 --30.488407135 2.500000000 35.488403320 --29.497472763 2.500000000 35.497474670 --28.499593735 2.500000000 35.499591827 --27.499954224 2.500000000 35.499954224 --26.499996185 2.500000000 35.499996185 --25.500000000 2.500000000 35.500000000 --24.500000000 2.500000000 35.500000000 --23.500000000 2.500000000 35.500000000 --22.500000000 2.500000000 35.500000000 --21.500000000 2.500000000 35.500000000 --20.500000000 2.500000000 35.500000000 --19.500000000 2.500000000 35.500000000 --18.500000000 2.500000000 35.500000000 --17.500000000 2.500000000 35.500000000 --16.500000000 2.500000000 35.500000000 --15.500000000 2.500000000 35.500000000 --14.500000000 2.500000000 35.500000000 --13.500000000 2.500000000 35.500000000 --12.500000000 2.500000000 35.500000000 --11.500000000 2.500000000 35.500000000 --10.500000000 2.500000000 35.500000000 --9.500000000 2.500000000 35.500000000 --8.500000000 2.500000000 35.500000000 --7.500000000 2.500000000 35.500000000 --6.500000000 2.500000000 35.500000000 --5.500000000 2.500000000 35.500000000 --4.500000000 2.500000000 35.500000000 --3.500000000 2.500000000 35.500000000 --2.500000000 2.500000000 35.500000000 --1.500000000 2.500000000 35.500000000 --0.500000000 2.500000000 35.500000000 -0.500000000 2.500000000 35.500000000 -1.500000000 2.500000000 35.500000000 -2.500000000 2.500000000 35.500000000 -3.500000000 2.500000000 35.500000000 -4.500000000 2.500000000 35.500000000 -5.500000000 2.500000000 35.500000000 -6.500000000 2.500000000 35.500000000 -7.500000000 2.500000000 35.500000000 -8.500000000 2.500000000 35.500000000 -9.500000000 2.500000000 35.500000000 -10.500000000 2.500000000 35.500000000 -11.500000000 2.500000000 35.500000000 -12.500000000 2.500000000 35.500000000 -13.500000000 2.500000000 35.500000000 -14.500000000 2.500000000 35.500000000 -15.500000000 2.500000000 35.500000000 -16.500000000 2.500000000 35.500000000 -17.500000000 2.500000000 35.500000000 -18.500000000 2.500000000 35.500000000 -19.500000000 2.500000000 35.500000000 -20.500000000 2.500000000 35.500000000 -21.500000000 2.500000000 35.500000000 -22.500000000 2.500000000 35.500000000 -23.500000000 2.500000000 35.500000000 -24.500000000 2.500000000 35.500000000 -25.499996185 2.500000000 35.499996185 -26.499954224 2.500000000 35.499954224 -27.499591827 2.500000000 35.499591827 -28.497470856 2.500000000 35.497467041 -29.488407135 2.500000000 35.488403320 -30.458978653 2.500000000 35.458980560 -31.384418488 2.500000000 35.384422302 -32.233222961 2.500000000 35.233222961 -32.981101990 2.500000000 34.981101990 --33.981101990 3.500000000 34.981101990 --33.233226776 3.500000000 35.233222961 --32.384422302 3.500000000 35.384418488 --31.458978653 3.500000000 35.458980560 --30.488407135 3.500000000 35.488403320 --29.497472763 3.500000000 35.497474670 --28.499593735 3.500000000 35.499591827 --27.499954224 3.500000000 35.499954224 --26.499996185 3.500000000 35.499996185 --25.500000000 3.500000000 35.500000000 --24.500000000 3.500000000 35.500000000 --23.500000000 3.500000000 35.500000000 --22.500000000 3.500000000 35.500000000 --21.500000000 3.500000000 35.500000000 --20.500000000 3.500000000 35.500000000 --19.500000000 3.500000000 35.500000000 --18.500000000 3.500000000 35.500000000 --17.500000000 3.500000000 35.500000000 --16.500000000 3.500000000 35.500000000 --15.500000000 3.500000000 35.500000000 --14.500000000 3.500000000 35.500000000 --13.500000000 3.500000000 35.500000000 --12.500000000 3.500000000 35.500000000 --11.500000000 3.500000000 35.500000000 --10.500000000 3.500000000 35.500000000 --9.500000000 3.500000000 35.500000000 --8.500000000 3.500000000 35.500000000 --7.500000000 3.500000000 35.500000000 --6.500000000 3.500000000 35.500000000 --5.500000000 3.500000000 35.500000000 --4.500000000 3.500000000 35.500000000 --3.500000000 3.500000000 35.500000000 --2.500000000 3.500000000 35.500000000 --1.500000000 3.500000000 35.500000000 --0.500000000 3.500000000 35.500000000 -0.500000000 3.500000000 35.500000000 -1.500000000 3.500000000 35.500000000 -2.500000000 3.500000000 35.500000000 -3.500000000 3.500000000 35.500000000 -4.500000000 3.500000000 35.500000000 -5.500000000 3.500000000 35.500000000 -6.500000000 3.500000000 35.500000000 -7.500000000 3.500000000 35.500000000 -8.500000000 3.500000000 35.500000000 -9.500000000 3.500000000 35.500000000 -10.500000000 3.500000000 35.500000000 -11.500000000 3.500000000 35.500000000 -12.500000000 3.500000000 35.500000000 -13.500000000 3.500000000 35.500000000 -14.500000000 3.500000000 35.500000000 -15.500000000 3.500000000 35.500000000 -16.500000000 3.500000000 35.500000000 -17.500000000 3.500000000 35.500000000 -18.500000000 3.500000000 35.500000000 -19.500000000 3.500000000 35.500000000 -20.500000000 3.500000000 35.500000000 -21.500000000 3.500000000 35.500000000 -22.500000000 3.500000000 35.500000000 -23.500000000 3.500000000 35.500000000 -24.500000000 3.500000000 35.500000000 -25.499996185 3.500000000 35.499996185 -26.499954224 3.500000000 35.499954224 -27.499591827 3.500000000 35.499591827 -28.497470856 3.500000000 35.497467041 -29.488407135 3.500000000 35.488403320 -30.458978653 3.500000000 35.458980560 -31.384418488 3.500000000 35.384422302 -32.233222961 3.500000000 35.233222961 -32.981101990 3.500000000 34.981101990 --33.981101990 4.500000000 34.981101990 --33.233226776 4.500000000 35.233222961 --32.384422302 4.500000000 35.384418488 --31.458978653 4.500000000 35.458980560 --30.488407135 4.500000000 35.488403320 --29.497472763 4.500000000 35.497474670 --28.499593735 4.500000000 35.499591827 --27.499954224 4.500000000 35.499954224 --26.499996185 4.500000000 35.499996185 --25.500000000 4.500000000 35.500000000 --24.500000000 4.500000000 35.500000000 --23.500000000 4.500000000 35.500000000 --22.500000000 4.500000000 35.500000000 --21.500000000 4.500000000 35.500000000 --20.500000000 4.500000000 35.500000000 --19.500000000 4.500000000 35.500000000 --18.500000000 4.500000000 35.500000000 --17.500000000 4.500000000 35.500000000 --16.500000000 4.500000000 35.500000000 --15.500000000 4.500000000 35.500000000 --14.500000000 4.500000000 35.500000000 --13.500000000 4.500000000 35.500000000 --12.500000000 4.500000000 35.500000000 --11.500000000 4.500000000 35.500000000 --10.500000000 4.500000000 35.500000000 --9.500000000 4.500000000 35.500000000 --8.500000000 4.500000000 35.500000000 --7.500000000 4.500000000 35.500000000 --6.500000000 4.500000000 35.500000000 --5.500000000 4.500000000 35.500000000 --4.500000000 4.500000000 35.500000000 --3.500000000 4.500000000 35.500000000 --2.500000000 4.500000000 35.500000000 --1.500000000 4.500000000 35.500000000 --0.500000000 4.500000000 35.500000000 -0.500000000 4.500000000 35.500000000 -1.500000000 4.500000000 35.500000000 -2.500000000 4.500000000 35.500000000 -3.500000000 4.500000000 35.500000000 -4.500000000 4.500000000 35.500000000 -5.500000000 4.500000000 35.500000000 -6.500000000 4.500000000 35.500000000 -7.500000000 4.500000000 35.500000000 -8.500000000 4.500000000 35.500000000 -9.500000000 4.500000000 35.500000000 -10.500000000 4.500000000 35.500000000 -11.500000000 4.500000000 35.500000000 -12.500000000 4.500000000 35.500000000 -13.500000000 4.500000000 35.500000000 -14.500000000 4.500000000 35.500000000 -15.500000000 4.500000000 35.500000000 -16.500000000 4.500000000 35.500000000 -17.500000000 4.500000000 35.500000000 -18.500000000 4.500000000 35.500000000 -19.500000000 4.500000000 35.500000000 -20.500000000 4.500000000 35.500000000 -21.500000000 4.500000000 35.500000000 -22.500000000 4.500000000 35.500000000 -23.500000000 4.500000000 35.500000000 -24.500000000 4.500000000 35.500000000 -25.499996185 4.500000000 35.499996185 -26.499954224 4.500000000 35.499954224 -27.499591827 4.500000000 35.499591827 -28.497470856 4.500000000 35.497467041 -29.488407135 4.500000000 35.488403320 -30.458978653 4.500000000 35.458980560 -31.384418488 4.500000000 35.384422302 -32.233222961 4.500000000 35.233222961 -32.981101990 4.500000000 34.981101990 --33.981101990 5.500000000 34.981101990 --33.233226776 5.500000000 35.233222961 --32.384422302 5.500000000 35.384418488 --31.458978653 5.500000000 35.458980560 --30.488407135 5.500000000 35.488403320 --29.497472763 5.500000000 35.497474670 --28.499593735 5.500000000 35.499591827 --27.499954224 5.500000000 35.499954224 --26.499996185 5.500000000 35.499996185 --25.500000000 5.500000000 35.500000000 --24.500000000 5.500000000 35.500000000 --23.500000000 5.500000000 35.500000000 --22.500000000 5.500000000 35.500000000 --21.500000000 5.500000000 35.500000000 --20.500000000 5.500000000 35.500000000 --19.500000000 5.500000000 35.500000000 --18.500000000 5.500000000 35.500000000 --17.500000000 5.500000000 35.500000000 --16.500000000 5.500000000 35.500000000 --15.500000000 5.500000000 35.500000000 --14.500000000 5.500000000 35.500000000 --13.500000000 5.500000000 35.500000000 --12.500000000 5.500000000 35.500000000 --11.500000000 5.500000000 35.500000000 --10.500000000 5.500000000 35.500000000 --9.500000000 5.500000000 35.500000000 --8.500000000 5.500000000 35.500000000 --7.500000000 5.500000000 35.500000000 --6.500000000 5.500000000 35.500000000 --5.500000000 5.500000000 35.500000000 --4.500000000 5.500000000 35.500000000 --3.500000000 5.500000000 35.500000000 --2.500000000 5.500000000 35.500000000 --1.500000000 5.500000000 35.500000000 --0.500000000 5.500000000 35.500000000 -0.500000000 5.500000000 35.500000000 -1.500000000 5.500000000 35.500000000 -2.500000000 5.500000000 35.500000000 -3.500000000 5.500000000 35.500000000 -4.500000000 5.500000000 35.500000000 -5.500000000 5.500000000 35.500000000 -6.500000000 5.500000000 35.500000000 -7.500000000 5.500000000 35.500000000 -8.500000000 5.500000000 35.500000000 -9.500000000 5.500000000 35.500000000 -10.500000000 5.500000000 35.500000000 -11.500000000 5.500000000 35.500000000 -12.500000000 5.500000000 35.500000000 -13.500000000 5.500000000 35.500000000 -14.500000000 5.500000000 35.500000000 -15.500000000 5.500000000 35.500000000 -16.500000000 5.500000000 35.500000000 -17.500000000 5.500000000 35.500000000 -18.500000000 5.500000000 35.500000000 -19.500000000 5.500000000 35.500000000 -20.500000000 5.500000000 35.500000000 -21.500000000 5.500000000 35.500000000 -22.500000000 5.500000000 35.500000000 -23.500000000 5.500000000 35.500000000 -24.500000000 5.500000000 35.500000000 -25.499996185 5.500000000 35.499996185 -26.499954224 5.500000000 35.499954224 -27.499591827 5.500000000 35.499591827 -28.497470856 5.500000000 35.497467041 -29.488407135 5.500000000 35.488403320 -30.458978653 5.500000000 35.458980560 -31.384418488 5.500000000 35.384422302 -32.233222961 5.500000000 35.233222961 -32.981101990 5.500000000 34.981101990 --33.981101990 6.500000000 34.981101990 --33.233226776 6.500000000 35.233222961 --32.384422302 6.500000000 35.384418488 --31.458978653 6.500000000 35.458980560 --30.488407135 6.500000000 35.488403320 --29.497472763 6.500000000 35.497474670 --28.499593735 6.500000000 35.499591827 --27.499954224 6.500000000 35.499954224 --26.499996185 6.500000000 35.499996185 --25.500000000 6.500000000 35.500000000 --24.500000000 6.500000000 35.500000000 --23.500000000 6.500000000 35.500000000 --22.500000000 6.500000000 35.500000000 --21.500000000 6.500000000 35.500000000 --20.500000000 6.500000000 35.500000000 --19.500000000 6.500000000 35.500000000 --18.500000000 6.500000000 35.500000000 --17.500000000 6.500000000 35.500000000 --16.500000000 6.500000000 35.500000000 --15.500000000 6.500000000 35.500000000 --14.500000000 6.500000000 35.500000000 --13.500000000 6.500000000 35.500000000 --12.500000000 6.500000000 35.500000000 --11.500000000 6.500000000 35.500000000 --10.500000000 6.500000000 35.500000000 --9.500000000 6.500000000 35.500000000 --8.500000000 6.500000000 35.500000000 --7.500000000 6.500000000 35.500000000 --6.500000000 6.500000000 35.500000000 --5.500000000 6.500000000 35.500000000 --4.500000000 6.500000000 35.500000000 --3.500000000 6.500000000 35.500000000 --2.500000000 6.500000000 35.500000000 --1.500000000 6.500000000 35.500000000 --0.500000000 6.500000000 35.500000000 -0.500000000 6.500000000 35.500000000 -1.500000000 6.500000000 35.500000000 -2.500000000 6.500000000 35.500000000 -3.500000000 6.500000000 35.500000000 -4.500000000 6.500000000 35.500000000 -5.500000000 6.500000000 35.500000000 -6.500000000 6.500000000 35.500000000 -7.500000000 6.500000000 35.500000000 -8.500000000 6.500000000 35.500000000 -9.500000000 6.500000000 35.500000000 -10.500000000 6.500000000 35.500000000 -11.500000000 6.500000000 35.500000000 -12.500000000 6.500000000 35.500000000 -13.500000000 6.500000000 35.500000000 -14.500000000 6.500000000 35.500000000 -15.500000000 6.500000000 35.500000000 -16.500000000 6.500000000 35.500000000 -17.500000000 6.500000000 35.500000000 -18.500000000 6.500000000 35.500000000 -19.500000000 6.500000000 35.500000000 -20.500000000 6.500000000 35.500000000 -21.500000000 6.500000000 35.500000000 -22.500000000 6.500000000 35.500000000 -23.500000000 6.500000000 35.500000000 -24.500000000 6.500000000 35.500000000 -25.499996185 6.500000000 35.499996185 -26.499954224 6.500000000 35.499954224 -27.499591827 6.500000000 35.499591827 -28.497470856 6.500000000 35.497467041 -29.488407135 6.500000000 35.488403320 -30.458978653 6.500000000 35.458980560 -31.384418488 6.500000000 35.384422302 -32.233222961 6.500000000 35.233222961 -32.981101990 6.500000000 34.981101990 --33.981101990 7.500000000 34.981101990 --33.233226776 7.500000000 35.233222961 --32.384422302 7.500000000 35.384418488 --31.458978653 7.500000000 35.458980560 --30.488407135 7.500000000 35.488403320 --29.497472763 7.500000000 35.497474670 --28.499593735 7.500000000 35.499591827 --27.499954224 7.500000000 35.499954224 --26.499996185 7.500000000 35.499996185 --25.500000000 7.500000000 35.500000000 --24.500000000 7.500000000 35.500000000 --23.500000000 7.500000000 35.500000000 --22.500000000 7.500000000 35.500000000 --21.500000000 7.500000000 35.500000000 --20.500000000 7.500000000 35.500000000 --19.500000000 7.500000000 35.500000000 --18.500000000 7.500000000 35.500000000 --17.500000000 7.500000000 35.500000000 --16.500000000 7.500000000 35.500000000 --15.500000000 7.500000000 35.500000000 --14.500000000 7.500000000 35.500000000 --13.500000000 7.500000000 35.500000000 --12.500000000 7.500000000 35.500000000 --11.500000000 7.500000000 35.500000000 --10.500000000 7.500000000 35.500000000 --9.500000000 7.500000000 35.500000000 --8.500000000 7.500000000 35.500000000 --7.500000000 7.500000000 35.500000000 --6.500000000 7.500000000 35.500000000 --5.500000000 7.500000000 35.500000000 --4.500000000 7.500000000 35.500000000 --3.500000000 7.500000000 35.500000000 --2.500000000 7.500000000 35.500000000 --1.500000000 7.500000000 35.500000000 --0.500000000 7.500000000 35.500000000 -0.500000000 7.500000000 35.500000000 -1.500000000 7.500000000 35.500000000 -2.500000000 7.500000000 35.500000000 -3.500000000 7.500000000 35.500000000 -4.500000000 7.500000000 35.500000000 -5.500000000 7.500000000 35.500000000 -6.500000000 7.500000000 35.500000000 -7.500000000 7.500000000 35.500000000 -8.500000000 7.500000000 35.500000000 -9.500000000 7.500000000 35.500000000 -10.500000000 7.500000000 35.500000000 -11.500000000 7.500000000 35.500000000 -12.500000000 7.500000000 35.500000000 -13.500000000 7.500000000 35.500000000 -14.500000000 7.500000000 35.500000000 -15.500000000 7.500000000 35.500000000 -16.500000000 7.500000000 35.500000000 -17.500000000 7.500000000 35.500000000 -18.500000000 7.500000000 35.500000000 -19.500000000 7.500000000 35.500000000 -20.500000000 7.500000000 35.500000000 -21.500000000 7.500000000 35.500000000 -22.500000000 7.500000000 35.500000000 -23.500000000 7.500000000 35.500000000 -24.500000000 7.500000000 35.500000000 -25.499996185 7.500000000 35.499996185 -26.499954224 7.500000000 35.499954224 -27.499591827 7.500000000 35.499591827 -28.497470856 7.500000000 35.497467041 -29.488407135 7.500000000 35.488403320 -30.458978653 7.500000000 35.458980560 -31.384418488 7.500000000 35.384422302 -32.233222961 7.500000000 35.233222961 -32.981101990 7.500000000 34.981101990 --33.981101990 8.500000000 34.981101990 --33.233226776 8.500000000 35.233222961 --32.384422302 8.500000000 35.384418488 --31.458978653 8.500000000 35.458980560 --30.488407135 8.500000000 35.488403320 --29.497472763 8.500000000 35.497474670 --28.499593735 8.500000000 35.499591827 --27.499954224 8.500000000 35.499954224 --26.499996185 8.500000000 35.499996185 --25.500000000 8.500000000 35.500000000 --24.500000000 8.500000000 35.500000000 --23.500000000 8.500000000 35.500000000 --22.500000000 8.500000000 35.500000000 --21.500000000 8.500000000 35.500000000 --20.500000000 8.500000000 35.500000000 --19.500000000 8.500000000 35.500000000 --18.500000000 8.500000000 35.500000000 --17.500000000 8.500000000 35.500000000 --16.500000000 8.500000000 35.500000000 --15.500000000 8.500000000 35.500000000 --14.500000000 8.500000000 35.500000000 --13.500000000 8.500000000 35.500000000 --12.500000000 8.500000000 35.500000000 --11.500000000 8.500000000 35.500000000 --10.500000000 8.500000000 35.500000000 --9.500000000 8.500000000 35.500000000 --8.500000000 8.500000000 35.500000000 --7.500000000 8.500000000 35.500000000 --6.500000000 8.500000000 35.500000000 --5.500000000 8.500000000 35.500000000 --4.500000000 8.500000000 35.500000000 --3.500000000 8.500000000 35.500000000 --2.500000000 8.500000000 35.500000000 --1.500000000 8.500000000 35.500000000 --0.500000000 8.500000000 35.500000000 -0.500000000 8.500000000 35.500000000 -1.500000000 8.500000000 35.500000000 -2.500000000 8.500000000 35.500000000 -3.500000000 8.500000000 35.500000000 -4.500000000 8.500000000 35.500000000 -5.500000000 8.500000000 35.500000000 -6.500000000 8.500000000 35.500000000 -7.500000000 8.500000000 35.500000000 -8.500000000 8.500000000 35.500000000 -9.500000000 8.500000000 35.500000000 -10.500000000 8.500000000 35.500000000 -11.500000000 8.500000000 35.500000000 -12.500000000 8.500000000 35.500000000 -13.500000000 8.500000000 35.500000000 -14.500000000 8.500000000 35.500000000 -15.500000000 8.500000000 35.500000000 -16.500000000 8.500000000 35.500000000 -17.500000000 8.500000000 35.500000000 -18.500000000 8.500000000 35.500000000 -19.500000000 8.500000000 35.500000000 -20.500000000 8.500000000 35.500000000 -21.500000000 8.500000000 35.500000000 -22.500000000 8.500000000 35.500000000 -23.500000000 8.500000000 35.500000000 -24.500000000 8.500000000 35.500000000 -25.499996185 8.500000000 35.499996185 -26.499954224 8.500000000 35.499954224 -27.499591827 8.500000000 35.499591827 -28.497470856 8.500000000 35.497467041 -29.488407135 8.500000000 35.488403320 -30.458978653 8.500000000 35.458980560 -31.384418488 8.500000000 35.384422302 -32.233222961 8.500000000 35.233222961 -32.981101990 8.500000000 34.981101990 --33.981101990 9.500000000 34.981101990 --33.233226776 9.500000000 35.233222961 --32.384422302 9.500000000 35.384418488 --31.458978653 9.500000000 35.458980560 --30.488407135 9.500000000 35.488403320 --29.497472763 9.500000000 35.497474670 --28.499593735 9.500000000 35.499591827 --27.499954224 9.500000000 35.499954224 --26.499996185 9.500000000 35.499996185 --25.500000000 9.500000000 35.500000000 --24.500000000 9.500000000 35.500000000 --23.500000000 9.500000000 35.500000000 --22.500000000 9.500000000 35.500000000 --21.500000000 9.500000000 35.500000000 --20.500000000 9.500000000 35.500000000 --19.500000000 9.500000000 35.500000000 --18.500000000 9.500000000 35.500000000 --17.500000000 9.500000000 35.500000000 --16.500000000 9.500000000 35.500000000 --15.500000000 9.500000000 35.500000000 --14.500000000 9.500000000 35.500000000 --13.500000000 9.500000000 35.500000000 --12.500000000 9.500000000 35.500000000 --11.500000000 9.500000000 35.500000000 --10.500000000 9.500000000 35.500000000 --9.500000000 9.500000000 35.500000000 --8.500000000 9.500000000 35.500000000 --7.500000000 9.500000000 35.500000000 --6.500000000 9.500000000 35.500000000 --5.500000000 9.500000000 35.500000000 --4.500000000 9.500000000 35.500000000 --3.500000000 9.500000000 35.500000000 --2.500000000 9.500000000 35.500000000 --1.500000000 9.500000000 35.500000000 --0.500000000 9.500000000 35.500000000 -0.500000000 9.500000000 35.500000000 -1.500000000 9.500000000 35.500000000 -2.500000000 9.500000000 35.500000000 -3.500000000 9.500000000 35.500000000 -4.500000000 9.500000000 35.500000000 -5.500000000 9.500000000 35.500000000 -6.500000000 9.500000000 35.500000000 -7.500000000 9.500000000 35.500000000 -8.500000000 9.500000000 35.500000000 -9.500000000 9.500000000 35.500000000 -10.500000000 9.500000000 35.500000000 -11.500000000 9.500000000 35.500000000 -12.500000000 9.500000000 35.500000000 -13.500000000 9.500000000 35.500000000 -14.500000000 9.500000000 35.500000000 -15.500000000 9.500000000 35.500000000 -16.500000000 9.500000000 35.500000000 -17.500000000 9.500000000 35.500000000 -18.500000000 9.500000000 35.500000000 -19.500000000 9.500000000 35.500000000 -20.500000000 9.500000000 35.500000000 -21.500000000 9.500000000 35.500000000 -22.500000000 9.500000000 35.500000000 -23.500000000 9.500000000 35.500000000 -24.500000000 9.500000000 35.500000000 -25.499996185 9.500000000 35.499996185 -26.499954224 9.500000000 35.499954224 -27.499591827 9.500000000 35.499591827 -28.497470856 9.500000000 35.497467041 -29.488407135 9.500000000 35.488403320 -30.458978653 9.500000000 35.458980560 -31.384418488 9.500000000 35.384422302 -32.233222961 9.500000000 35.233222961 -32.981101990 9.500000000 34.981101990 --33.981101990 10.500000000 34.981101990 --33.233226776 10.500000000 35.233222961 --32.384422302 10.500000000 35.384418488 --31.458978653 10.500000000 35.458980560 --30.488407135 10.500000000 35.488403320 --29.497472763 10.500000000 35.497474670 --28.499593735 10.500000000 35.499591827 --27.499954224 10.500000000 35.499954224 --26.499996185 10.500000000 35.499996185 --25.500000000 10.500000000 35.500000000 --24.500000000 10.500000000 35.500000000 --23.500000000 10.500000000 35.500000000 --22.500000000 10.500000000 35.500000000 --21.500000000 10.500000000 35.500000000 --20.500000000 10.500000000 35.500000000 --19.500000000 10.500000000 35.500000000 --18.500000000 10.500000000 35.500000000 --17.500000000 10.500000000 35.500000000 --16.500000000 10.500000000 35.500000000 --15.500000000 10.500000000 35.500000000 --14.500000000 10.500000000 35.500000000 --13.500000000 10.500000000 35.500000000 --12.500000000 10.500000000 35.500000000 --11.500000000 10.500000000 35.500000000 --10.500000000 10.500000000 35.500000000 --9.500000000 10.500000000 35.500000000 --8.500000000 10.500000000 35.500000000 --7.500000000 10.500000000 35.500000000 --6.500000000 10.500000000 35.500000000 --5.500000000 10.500000000 35.500000000 --4.500000000 10.500000000 35.500000000 --3.500000000 10.500000000 35.500000000 --2.500000000 10.500000000 35.500000000 --1.500000000 10.500000000 35.500000000 --0.500000000 10.500000000 35.500000000 -0.500000000 10.500000000 35.500000000 -1.500000000 10.500000000 35.500000000 -2.500000000 10.500000000 35.500000000 -3.500000000 10.500000000 35.500000000 -4.500000000 10.500000000 35.500000000 -5.500000000 10.500000000 35.500000000 -6.500000000 10.500000000 35.500000000 -7.500000000 10.500000000 35.500000000 -8.500000000 10.500000000 35.500000000 -9.500000000 10.500000000 35.500000000 -10.500000000 10.500000000 35.500000000 -11.500000000 10.500000000 35.500000000 -12.500000000 10.500000000 35.500000000 -13.500000000 10.500000000 35.500000000 -14.500000000 10.500000000 35.500000000 -15.500000000 10.500000000 35.500000000 -16.500000000 10.500000000 35.500000000 -17.500000000 10.500000000 35.500000000 -18.500000000 10.500000000 35.500000000 -19.500000000 10.500000000 35.500000000 -20.500000000 10.500000000 35.500000000 -21.500000000 10.500000000 35.500000000 -22.500000000 10.500000000 35.500000000 -23.500000000 10.500000000 35.500000000 -24.500000000 10.500000000 35.500000000 -25.499996185 10.500000000 35.499996185 -26.499954224 10.500000000 35.499954224 -27.499591827 10.500000000 35.499591827 -28.497470856 10.500000000 35.497467041 -29.488407135 10.500000000 35.488403320 -30.458978653 10.500000000 35.458980560 -31.384418488 10.500000000 35.384422302 -32.233222961 10.500000000 35.233222961 -32.981101990 10.500000000 34.981101990 --33.981101990 11.500000000 34.981101990 --33.233226776 11.500000000 35.233222961 --32.384422302 11.500000000 35.384418488 --31.458978653 11.500000000 35.458980560 --30.488407135 11.500000000 35.488403320 --29.497472763 11.500000000 35.497474670 --28.499593735 11.500000000 35.499591827 --27.499954224 11.500000000 35.499954224 --26.499996185 11.500000000 35.499996185 --25.500000000 11.500000000 35.500000000 --24.500000000 11.500000000 35.500000000 --23.500000000 11.500000000 35.500000000 --22.500000000 11.500000000 35.500000000 --21.500000000 11.500000000 35.500000000 --20.500000000 11.500000000 35.500000000 --19.500000000 11.500000000 35.500000000 --18.500000000 11.500000000 35.500000000 --17.500000000 11.500000000 35.500000000 --16.500000000 11.500000000 35.500000000 --15.500000000 11.500000000 35.500000000 --14.500000000 11.500000000 35.500000000 --13.500000000 11.500000000 35.500000000 --12.500000000 11.500000000 35.500000000 --11.500000000 11.500000000 35.500000000 --10.500000000 11.500000000 35.500000000 --9.500000000 11.500000000 35.500000000 --8.500000000 11.500000000 35.500000000 --7.500000000 11.500000000 35.500000000 --6.500000000 11.500000000 35.500000000 --5.500000000 11.500000000 35.500000000 --4.500000000 11.500000000 35.500000000 --3.500000000 11.500000000 35.500000000 --2.500000000 11.500000000 35.500000000 --1.500000000 11.500000000 35.500000000 --0.500000000 11.500000000 35.500000000 -0.500000000 11.500000000 35.500000000 -1.500000000 11.500000000 35.500000000 -2.500000000 11.500000000 35.500000000 -3.500000000 11.500000000 35.500000000 -4.500000000 11.500000000 35.500000000 -5.500000000 11.500000000 35.500000000 -6.500000000 11.500000000 35.500000000 -7.500000000 11.500000000 35.500000000 -8.500000000 11.500000000 35.500000000 -9.500000000 11.500000000 35.500000000 -10.500000000 11.500000000 35.500000000 -11.500000000 11.500000000 35.500000000 -12.500000000 11.500000000 35.500000000 -13.500000000 11.500000000 35.500000000 -14.500000000 11.500000000 35.500000000 -15.500000000 11.500000000 35.500000000 -16.500000000 11.500000000 35.500000000 -17.500000000 11.500000000 35.500000000 -18.500000000 11.500000000 35.500000000 -19.500000000 11.500000000 35.500000000 -20.500000000 11.500000000 35.500000000 -21.500000000 11.500000000 35.500000000 -22.500000000 11.500000000 35.500000000 -23.500000000 11.500000000 35.500000000 -24.500000000 11.500000000 35.500000000 -25.499996185 11.500000000 35.499996185 -26.499954224 11.500000000 35.499954224 -27.499591827 11.500000000 35.499591827 -28.497470856 11.500000000 35.497467041 -29.488407135 11.500000000 35.488403320 -30.458978653 11.500000000 35.458980560 -31.384418488 11.500000000 35.384422302 -32.233222961 11.500000000 35.233222961 -32.981101990 11.500000000 34.981101990 --33.981101990 12.500000000 34.981101990 --33.233226776 12.500000000 35.233222961 --32.384422302 12.500000000 35.384418488 --31.458978653 12.500000000 35.458980560 --30.488407135 12.500000000 35.488403320 --29.497472763 12.500000000 35.497474670 --28.499593735 12.500000000 35.499591827 --27.499954224 12.500000000 35.499954224 --26.499996185 12.500000000 35.499996185 --25.500000000 12.500000000 35.500000000 --24.500000000 12.500000000 35.500000000 --23.500000000 12.500000000 35.500000000 --22.500000000 12.500000000 35.500000000 --21.500000000 12.500000000 35.500000000 --20.500000000 12.500000000 35.500000000 --19.500000000 12.500000000 35.500000000 --18.500000000 12.500000000 35.500000000 --17.500000000 12.500000000 35.500000000 --16.500000000 12.500000000 35.500000000 --15.500000000 12.500000000 35.500000000 --14.500000000 12.500000000 35.500000000 --13.500000000 12.500000000 35.500000000 --12.500000000 12.500000000 35.500000000 --11.500000000 12.500000000 35.500000000 --10.500000000 12.500000000 35.500000000 --9.500000000 12.500000000 35.500000000 --8.500000000 12.500000000 35.500000000 --7.500000000 12.500000000 35.500000000 --6.500000000 12.500000000 35.500000000 --5.500000000 12.500000000 35.500000000 --4.500000000 12.500000000 35.500000000 --3.500000000 12.500000000 35.500000000 --2.500000000 12.500000000 35.500000000 --1.500000000 12.500000000 35.500000000 --0.500000000 12.500000000 35.500000000 -0.500000000 12.500000000 35.500000000 -1.500000000 12.500000000 35.500000000 -2.500000000 12.500000000 35.500000000 -3.500000000 12.500000000 35.500000000 -4.500000000 12.500000000 35.500000000 -5.500000000 12.500000000 35.500000000 -6.500000000 12.500000000 35.500000000 -7.500000000 12.500000000 35.500000000 -8.500000000 12.500000000 35.500000000 -9.500000000 12.500000000 35.500000000 -10.500000000 12.500000000 35.500000000 -11.500000000 12.500000000 35.500000000 -12.500000000 12.500000000 35.500000000 -13.500000000 12.500000000 35.500000000 -14.500000000 12.500000000 35.500000000 -15.500000000 12.500000000 35.500000000 -16.500000000 12.500000000 35.500000000 -17.500000000 12.500000000 35.500000000 -18.500000000 12.500000000 35.500000000 -19.500000000 12.500000000 35.500000000 -20.500000000 12.500000000 35.500000000 -21.500000000 12.500000000 35.500000000 -22.500000000 12.500000000 35.500000000 -23.500000000 12.500000000 35.500000000 -24.500000000 12.500000000 35.500000000 -25.499996185 12.500000000 35.499996185 -26.499954224 12.500000000 35.499954224 -27.499591827 12.500000000 35.499591827 -28.497470856 12.500000000 35.497467041 -29.488407135 12.500000000 35.488403320 -30.458978653 12.500000000 35.458980560 -31.384418488 12.500000000 35.384422302 -32.233222961 12.500000000 35.233222961 -32.981101990 12.500000000 34.981101990 --33.981101990 13.500000000 34.981101990 --33.233226776 13.500000000 35.233222961 --32.384422302 13.500000000 35.384418488 --31.458978653 13.500000000 35.458980560 --30.488407135 13.500000000 35.488403320 --29.497472763 13.500000000 35.497474670 --28.499593735 13.500000000 35.499591827 --27.499954224 13.500000000 35.499954224 --26.499996185 13.500000000 35.499996185 --25.500000000 13.500000000 35.500000000 --24.500000000 13.500000000 35.500000000 --23.500000000 13.500000000 35.500000000 --22.500000000 13.500000000 35.500000000 --21.500000000 13.500000000 35.500000000 --20.500000000 13.500000000 35.500000000 --19.500000000 13.500000000 35.500000000 --18.500000000 13.500000000 35.500000000 --17.500000000 13.500000000 35.500000000 --16.500000000 13.500000000 35.500000000 --15.500000000 13.500000000 35.500000000 --14.500000000 13.500000000 35.500000000 --13.500000000 13.500000000 35.500000000 --12.500000000 13.500000000 35.500000000 --11.500000000 13.500000000 35.500000000 --10.500000000 13.500000000 35.500000000 --9.500000000 13.500000000 35.500000000 --8.500000000 13.500000000 35.500000000 --7.500000000 13.500000000 35.500000000 --6.500000000 13.500000000 35.500000000 --5.500000000 13.500000000 35.500000000 --4.500000000 13.500000000 35.500000000 --3.500000000 13.500000000 35.500000000 --2.500000000 13.500000000 35.500000000 --1.500000000 13.500000000 35.500000000 --0.500000000 13.500000000 35.500000000 -0.500000000 13.500000000 35.500000000 -1.500000000 13.500000000 35.500000000 -2.500000000 13.500000000 35.500000000 -3.500000000 13.500000000 35.500000000 -4.500000000 13.500000000 35.500000000 -5.500000000 13.500000000 35.500000000 -6.500000000 13.500000000 35.500000000 -7.500000000 13.500000000 35.500000000 -8.500000000 13.500000000 35.500000000 -9.500000000 13.500000000 35.500000000 -10.500000000 13.500000000 35.500000000 -11.500000000 13.500000000 35.500000000 -12.500000000 13.500000000 35.500000000 -13.500000000 13.500000000 35.500000000 -14.500000000 13.500000000 35.500000000 -15.500000000 13.500000000 35.500000000 -16.500000000 13.500000000 35.500000000 -17.500000000 13.500000000 35.500000000 -18.500000000 13.500000000 35.500000000 -19.500000000 13.500000000 35.500000000 -20.500000000 13.500000000 35.500000000 -21.500000000 13.500000000 35.500000000 -22.500000000 13.500000000 35.500000000 -23.500000000 13.500000000 35.500000000 -24.500000000 13.500000000 35.500000000 -25.499996185 13.500000000 35.499996185 -26.499954224 13.500000000 35.499954224 -27.499591827 13.500000000 35.499591827 -28.497470856 13.500000000 35.497467041 -29.488407135 13.500000000 35.488403320 -30.458978653 13.500000000 35.458980560 -31.384418488 13.500000000 35.384422302 -32.233222961 13.500000000 35.233222961 -32.981101990 13.500000000 34.981101990 --33.981101990 14.500000000 34.981101990 --33.233226776 14.500000000 35.233222961 --32.384422302 14.500000000 35.384418488 --31.458978653 14.500000000 35.458980560 --30.488407135 14.500000000 35.488403320 --29.497472763 14.500000000 35.497474670 --28.499593735 14.500000000 35.499591827 --27.499954224 14.500000000 35.499954224 --26.499996185 14.500000000 35.499996185 --25.500000000 14.500000000 35.500000000 --24.500000000 14.500000000 35.500000000 --23.500000000 14.500000000 35.500000000 --22.500000000 14.500000000 35.500000000 --21.500000000 14.500000000 35.500000000 --20.500000000 14.500000000 35.500000000 --19.500000000 14.500000000 35.500000000 --18.500000000 14.500000000 35.500000000 --17.500000000 14.500000000 35.500000000 --16.500000000 14.500000000 35.500000000 --15.500000000 14.500000000 35.500000000 --14.500000000 14.500000000 35.500000000 --13.500000000 14.500000000 35.500000000 --12.500000000 14.500000000 35.500000000 --11.500000000 14.500000000 35.500000000 --10.500000000 14.500000000 35.500000000 --9.500000000 14.500000000 35.500000000 --8.500000000 14.500000000 35.500000000 --7.500000000 14.500000000 35.500000000 --6.500000000 14.500000000 35.500000000 --5.500000000 14.500000000 35.500000000 --4.500000000 14.500000000 35.500000000 --3.500000000 14.500000000 35.500000000 --2.500000000 14.500000000 35.500000000 --1.500000000 14.500000000 35.500000000 --0.500000000 14.500000000 35.500000000 -0.500000000 14.500000000 35.500000000 -1.500000000 14.500000000 35.500000000 -2.500000000 14.500000000 35.500000000 -3.500000000 14.500000000 35.500000000 -4.500000000 14.500000000 35.500000000 -5.500000000 14.500000000 35.500000000 -6.500000000 14.500000000 35.500000000 -7.500000000 14.500000000 35.500000000 -8.500000000 14.500000000 35.500000000 -9.500000000 14.500000000 35.500000000 -10.500000000 14.500000000 35.500000000 -11.500000000 14.500000000 35.500000000 -12.500000000 14.500000000 35.500000000 -13.500000000 14.500000000 35.500000000 -14.500000000 14.500000000 35.500000000 -15.500000000 14.500000000 35.500000000 -16.500000000 14.500000000 35.500000000 -17.500000000 14.500000000 35.500000000 -18.500000000 14.500000000 35.500000000 -19.500000000 14.500000000 35.500000000 -20.500000000 14.500000000 35.500000000 -21.500000000 14.500000000 35.500000000 -22.500000000 14.500000000 35.500000000 -23.500000000 14.500000000 35.500000000 -24.500000000 14.500000000 35.500000000 -25.499996185 14.500000000 35.499996185 -26.499954224 14.500000000 35.499954224 -27.499591827 14.500000000 35.499591827 -28.497470856 14.500000000 35.497467041 -29.488407135 14.500000000 35.488403320 -30.458978653 14.500000000 35.458980560 -31.384418488 14.500000000 35.384422302 -32.233222961 14.500000000 35.233222961 -32.981101990 14.500000000 34.981101990 --33.981101990 15.500000000 34.981101990 --33.233226776 15.500000000 35.233222961 --32.384422302 15.500000000 35.384418488 --31.458978653 15.500000000 35.458980560 --30.488407135 15.500000000 35.488403320 --29.497472763 15.500000000 35.497474670 --28.499593735 15.500000000 35.499591827 --27.499954224 15.500000000 35.499954224 --26.499996185 15.500000000 35.499996185 --25.500000000 15.500000000 35.500000000 --24.500000000 15.500000000 35.500000000 --23.500000000 15.500000000 35.500000000 --22.500000000 15.500000000 35.500000000 --21.500000000 15.500000000 35.500000000 --20.500000000 15.500000000 35.500000000 --19.500000000 15.500000000 35.500000000 --18.500000000 15.500000000 35.500000000 --17.500000000 15.500000000 35.500000000 --16.500000000 15.500000000 35.500000000 --15.500000000 15.500000000 35.500000000 --14.500000000 15.500000000 35.500000000 --13.500000000 15.500000000 35.500000000 --12.500000000 15.500000000 35.500000000 --11.500000000 15.500000000 35.500000000 --10.500000000 15.500000000 35.500000000 --9.500000000 15.500000000 35.500000000 --8.500000000 15.500000000 35.500000000 --7.500000000 15.500000000 35.500000000 --6.500000000 15.500000000 35.500000000 --5.500000000 15.500000000 35.500000000 --4.500000000 15.500000000 35.500000000 --3.500000000 15.500000000 35.500000000 --2.500000000 15.500000000 35.500000000 --1.500000000 15.500000000 35.500000000 --0.500000000 15.500000000 35.500000000 -0.500000000 15.500000000 35.500000000 -1.500000000 15.500000000 35.500000000 -2.500000000 15.500000000 35.500000000 -3.500000000 15.500000000 35.500000000 -4.500000000 15.500000000 35.500000000 -5.500000000 15.500000000 35.500000000 -6.500000000 15.500000000 35.500000000 -7.500000000 15.500000000 35.500000000 -8.500000000 15.500000000 35.500000000 -9.500000000 15.500000000 35.500000000 -10.500000000 15.500000000 35.500000000 -11.500000000 15.500000000 35.500000000 -12.500000000 15.500000000 35.500000000 -13.500000000 15.500000000 35.500000000 -14.500000000 15.500000000 35.500000000 -15.500000000 15.500000000 35.500000000 -16.500000000 15.500000000 35.500000000 -17.500000000 15.500000000 35.500000000 -18.500000000 15.500000000 35.500000000 -19.500000000 15.500000000 35.500000000 -20.500000000 15.500000000 35.500000000 -21.500000000 15.500000000 35.500000000 -22.500000000 15.500000000 35.500000000 -23.500000000 15.500000000 35.500000000 -24.500000000 15.500000000 35.500000000 -25.499996185 15.500000000 35.499996185 -26.499954224 15.500000000 35.499954224 -27.499591827 15.500000000 35.499591827 -28.497470856 15.500000000 35.497467041 -29.488407135 15.500000000 35.488403320 -30.458978653 15.500000000 35.458980560 -31.384418488 15.500000000 35.384422302 -32.233222961 15.500000000 35.233222961 -32.981101990 15.500000000 34.981101990 --33.981101990 16.500000000 34.981101990 --33.233226776 16.500000000 35.233222961 --32.384422302 16.500000000 35.384418488 --31.458978653 16.500000000 35.458980560 --30.488407135 16.500000000 35.488403320 --29.497472763 16.500000000 35.497474670 --28.499593735 16.500000000 35.499591827 --27.499954224 16.500000000 35.499954224 --26.499996185 16.500000000 35.499996185 --25.500000000 16.500000000 35.500000000 --24.500000000 16.500000000 35.500000000 --23.500000000 16.500000000 35.500000000 --22.500000000 16.500000000 35.500000000 --21.500000000 16.500000000 35.500000000 --20.500000000 16.500000000 35.500000000 --19.500000000 16.500000000 35.500000000 --18.500000000 16.500000000 35.500000000 --17.500000000 16.500000000 35.500000000 --16.500000000 16.500000000 35.500000000 --15.500000000 16.500000000 35.500000000 --14.500000000 16.500000000 35.500000000 --13.500000000 16.500000000 35.500000000 --12.500000000 16.500000000 35.500000000 --11.500000000 16.500000000 35.500000000 --10.500000000 16.500000000 35.500000000 --9.500000000 16.500000000 35.500000000 --8.500000000 16.500000000 35.500000000 --7.500000000 16.500000000 35.500000000 --6.500000000 16.500000000 35.500000000 --5.500000000 16.500000000 35.500000000 --4.500000000 16.500000000 35.500000000 --3.500000000 16.500000000 35.500000000 --2.500000000 16.500000000 35.500000000 --1.500000000 16.500000000 35.500000000 --0.500000000 16.500000000 35.500000000 -0.500000000 16.500000000 35.500000000 -1.500000000 16.500000000 35.500000000 -2.500000000 16.500000000 35.500000000 -3.500000000 16.500000000 35.500000000 -4.500000000 16.500000000 35.500000000 -5.500000000 16.500000000 35.500000000 -6.500000000 16.500000000 35.500000000 -7.500000000 16.500000000 35.500000000 -8.500000000 16.500000000 35.500000000 -9.500000000 16.500000000 35.500000000 -10.500000000 16.500000000 35.500000000 -11.500000000 16.500000000 35.500000000 -12.500000000 16.500000000 35.500000000 -13.500000000 16.500000000 35.500000000 -14.500000000 16.500000000 35.500000000 -15.500000000 16.500000000 35.500000000 -16.500000000 16.500000000 35.500000000 -17.500000000 16.500000000 35.500000000 -18.500000000 16.500000000 35.500000000 -19.500000000 16.500000000 35.500000000 -20.500000000 16.500000000 35.500000000 -21.500000000 16.500000000 35.500000000 -22.500000000 16.500000000 35.500000000 -23.500000000 16.500000000 35.500000000 -24.500000000 16.500000000 35.500000000 -25.499996185 16.500000000 35.499996185 -26.499954224 16.500000000 35.499954224 -27.499591827 16.500000000 35.499591827 -28.497470856 16.500000000 35.497467041 -29.488407135 16.500000000 35.488403320 -30.458978653 16.500000000 35.458980560 -31.384418488 16.500000000 35.384422302 -32.233222961 16.500000000 35.233222961 -32.981101990 16.500000000 34.981101990 --33.981101990 17.500000000 34.981101990 --33.233226776 17.500000000 35.233222961 --32.384422302 17.500000000 35.384418488 --31.458978653 17.500000000 35.458980560 --30.488407135 17.500000000 35.488403320 --29.497472763 17.500000000 35.497474670 --28.499593735 17.500000000 35.499591827 --27.499954224 17.500000000 35.499954224 --26.499996185 17.500000000 35.499996185 --25.500000000 17.500000000 35.500000000 --24.500000000 17.500000000 35.500000000 --23.500000000 17.500000000 35.500000000 --22.500000000 17.500000000 35.500000000 --21.500000000 17.500000000 35.500000000 --20.500000000 17.500000000 35.500000000 --19.500000000 17.500000000 35.500000000 --18.500000000 17.500000000 35.500000000 --17.500000000 17.500000000 35.500000000 --16.500000000 17.500000000 35.500000000 --15.500000000 17.500000000 35.500000000 --14.500000000 17.500000000 35.500000000 --13.500000000 17.500000000 35.500000000 --12.500000000 17.500000000 35.500000000 --11.500000000 17.500000000 35.500000000 --10.500000000 17.500000000 35.500000000 --9.500000000 17.500000000 35.500000000 --8.500000000 17.500000000 35.500000000 --7.500000000 17.500000000 35.500000000 --6.500000000 17.500000000 35.500000000 --5.500000000 17.500000000 35.500000000 --4.500000000 17.500000000 35.500000000 --3.500000000 17.500000000 35.500000000 --2.500000000 17.500000000 35.500000000 --1.500000000 17.500000000 35.500000000 --0.500000000 17.500000000 35.500000000 -0.500000000 17.500000000 35.500000000 -1.500000000 17.500000000 35.500000000 -2.500000000 17.500000000 35.500000000 -3.500000000 17.500000000 35.500000000 -4.500000000 17.500000000 35.500000000 -5.500000000 17.500000000 35.500000000 -6.500000000 17.500000000 35.500000000 -7.500000000 17.500000000 35.500000000 -8.500000000 17.500000000 35.500000000 -9.500000000 17.500000000 35.500000000 -10.500000000 17.500000000 35.500000000 -11.500000000 17.500000000 35.500000000 -12.500000000 17.500000000 35.500000000 -13.500000000 17.500000000 35.500000000 -14.500000000 17.500000000 35.500000000 -15.500000000 17.500000000 35.500000000 -16.500000000 17.500000000 35.500000000 -17.500000000 17.500000000 35.500000000 -18.500000000 17.500000000 35.500000000 -19.500000000 17.500000000 35.500000000 -20.500000000 17.500000000 35.500000000 -21.500000000 17.500000000 35.500000000 -22.500000000 17.500000000 35.500000000 -23.500000000 17.500000000 35.500000000 -24.500000000 17.500000000 35.500000000 -25.499996185 17.500000000 35.499996185 -26.499954224 17.500000000 35.499954224 -27.499591827 17.500000000 35.499591827 -28.497470856 17.500000000 35.497467041 -29.488407135 17.500000000 35.488403320 -30.458978653 17.500000000 35.458980560 -31.384418488 17.500000000 35.384422302 -32.233222961 17.500000000 35.233222961 -32.981101990 17.500000000 34.981101990 --33.981101990 18.500000000 34.981101990 --33.233226776 18.500000000 35.233222961 --32.384422302 18.500000000 35.384418488 --31.458978653 18.500000000 35.458980560 --30.488407135 18.500000000 35.488403320 --29.497472763 18.500000000 35.497474670 --28.499593735 18.500000000 35.499591827 --27.499954224 18.500000000 35.499954224 --26.499996185 18.500000000 35.499996185 --25.500000000 18.500000000 35.500000000 --24.500000000 18.500000000 35.500000000 --23.500000000 18.500000000 35.500000000 --22.500000000 18.500000000 35.500000000 --21.500000000 18.500000000 35.500000000 --20.500000000 18.500000000 35.500000000 --19.500000000 18.500000000 35.500000000 --18.500000000 18.500000000 35.500000000 --17.500000000 18.500000000 35.500000000 --16.500000000 18.500000000 35.500000000 --15.500000000 18.500000000 35.500000000 --14.500000000 18.500000000 35.500000000 --13.500000000 18.500000000 35.500000000 --12.500000000 18.500000000 35.500000000 --11.500000000 18.500000000 35.500000000 --10.500000000 18.500000000 35.500000000 --9.500000000 18.500000000 35.500000000 --8.500000000 18.500000000 35.500000000 --7.500000000 18.500000000 35.500000000 --6.500000000 18.500000000 35.500000000 --5.500000000 18.500000000 35.500000000 --4.500000000 18.500000000 35.500000000 --3.500000000 18.500000000 35.500000000 --2.500000000 18.500000000 35.500000000 --1.500000000 18.500000000 35.500000000 --0.500000000 18.500000000 35.500000000 -0.500000000 18.500000000 35.500000000 -1.500000000 18.500000000 35.500000000 -2.500000000 18.500000000 35.500000000 -3.500000000 18.500000000 35.500000000 -4.500000000 18.500000000 35.500000000 -5.500000000 18.500000000 35.500000000 -6.500000000 18.500000000 35.500000000 -7.500000000 18.500000000 35.500000000 -8.500000000 18.500000000 35.500000000 -9.500000000 18.500000000 35.500000000 -10.500000000 18.500000000 35.500000000 -11.500000000 18.500000000 35.500000000 -12.500000000 18.500000000 35.500000000 -13.500000000 18.500000000 35.500000000 -14.500000000 18.500000000 35.500000000 -15.500000000 18.500000000 35.500000000 -16.500000000 18.500000000 35.500000000 -17.500000000 18.500000000 35.500000000 -18.500000000 18.500000000 35.500000000 -19.500000000 18.500000000 35.500000000 -20.500000000 18.500000000 35.500000000 -21.500000000 18.500000000 35.500000000 -22.500000000 18.500000000 35.500000000 -23.500000000 18.500000000 35.500000000 -24.500000000 18.500000000 35.500000000 -25.499996185 18.500000000 35.499996185 -26.499954224 18.500000000 35.499954224 -27.499591827 18.500000000 35.499591827 -28.497470856 18.500000000 35.497467041 -29.488407135 18.500000000 35.488403320 -30.458978653 18.500000000 35.458980560 -31.384418488 18.500000000 35.384422302 -32.233222961 18.500000000 35.233222961 -32.981101990 18.500000000 34.981101990 --33.981101990 19.500000000 34.981101990 --33.233226776 19.500000000 35.233222961 --32.384422302 19.500000000 35.384418488 --31.458978653 19.500000000 35.458980560 --30.488407135 19.500000000 35.488403320 --29.497472763 19.500000000 35.497474670 --28.499593735 19.500000000 35.499591827 --27.499954224 19.500000000 35.499954224 --26.499996185 19.500000000 35.499996185 --25.500000000 19.500000000 35.500000000 --24.500000000 19.500000000 35.500000000 --23.500000000 19.500000000 35.500000000 --22.500000000 19.500000000 35.500000000 --21.500000000 19.500000000 35.500000000 --20.500000000 19.500000000 35.500000000 --19.500000000 19.500000000 35.500000000 --18.500000000 19.500000000 35.500000000 --17.500000000 19.500000000 35.500000000 --16.500000000 19.500000000 35.500000000 --15.500000000 19.500000000 35.500000000 --14.500000000 19.500000000 35.500000000 --13.500000000 19.500000000 35.500000000 --12.500000000 19.500000000 35.500000000 --11.500000000 19.500000000 35.500000000 --10.500000000 19.500000000 35.500000000 --9.500000000 19.500000000 35.500000000 --8.500000000 19.500000000 35.500000000 --7.500000000 19.500000000 35.500000000 --6.500000000 19.500000000 35.500000000 --5.500000000 19.500000000 35.500000000 --4.500000000 19.500000000 35.500000000 --3.500000000 19.500000000 35.500000000 --2.500000000 19.500000000 35.500000000 --1.500000000 19.500000000 35.500000000 --0.500000000 19.500000000 35.500000000 -0.500000000 19.500000000 35.500000000 -1.500000000 19.500000000 35.500000000 -2.500000000 19.500000000 35.500000000 -3.500000000 19.500000000 35.500000000 -4.500000000 19.500000000 35.500000000 -5.500000000 19.500000000 35.500000000 -6.500000000 19.500000000 35.500000000 -7.500000000 19.500000000 35.500000000 -8.500000000 19.500000000 35.500000000 -9.500000000 19.500000000 35.500000000 -10.500000000 19.500000000 35.500000000 -11.500000000 19.500000000 35.500000000 -12.500000000 19.500000000 35.500000000 -13.500000000 19.500000000 35.500000000 -14.500000000 19.500000000 35.500000000 -15.500000000 19.500000000 35.500000000 -16.500000000 19.500000000 35.500000000 -17.500000000 19.500000000 35.500000000 -18.500000000 19.500000000 35.500000000 -19.500000000 19.500000000 35.500000000 -20.500000000 19.500000000 35.500000000 -21.500000000 19.500000000 35.500000000 -22.500000000 19.500000000 35.500000000 -23.500000000 19.500000000 35.500000000 -24.500000000 19.500000000 35.500000000 -25.499996185 19.500000000 35.499996185 -26.499954224 19.500000000 35.499954224 -27.499591827 19.500000000 35.499591827 -28.497470856 19.500000000 35.497467041 -29.488407135 19.500000000 35.488403320 -30.458978653 19.500000000 35.458980560 -31.384418488 19.500000000 35.384422302 -32.233222961 19.500000000 35.233222961 -32.981101990 19.500000000 34.981101990 --33.981101990 20.500000000 34.981101990 --33.233226776 20.500000000 35.233222961 --32.384422302 20.500000000 35.384418488 --31.458978653 20.500000000 35.458980560 --30.488407135 20.500000000 35.488403320 --29.497472763 20.500000000 35.497474670 --28.499593735 20.500000000 35.499591827 --27.499954224 20.500000000 35.499954224 --26.499996185 20.500000000 35.499996185 --25.500000000 20.500000000 35.500000000 --24.500000000 20.500000000 35.500000000 --23.500000000 20.500000000 35.500000000 --22.500000000 20.500000000 35.500000000 --21.500000000 20.500000000 35.500000000 --20.500000000 20.500000000 35.500000000 --19.500000000 20.500000000 35.500000000 --18.500000000 20.500000000 35.500000000 --17.500000000 20.500000000 35.500000000 --16.500000000 20.500000000 35.500000000 --15.500000000 20.500000000 35.500000000 --14.500000000 20.500000000 35.500000000 --13.500000000 20.500000000 35.500000000 --12.500000000 20.500000000 35.500000000 --11.500000000 20.500000000 35.500000000 --10.500000000 20.500000000 35.500000000 --9.500000000 20.500000000 35.500000000 --8.500000000 20.500000000 35.500000000 --7.500000000 20.500000000 35.500000000 --6.500000000 20.500000000 35.500000000 --5.500000000 20.500000000 35.500000000 --4.500000000 20.500000000 35.500000000 --3.500000000 20.500000000 35.500000000 --2.500000000 20.500000000 35.500000000 --1.500000000 20.500000000 35.500000000 --0.500000000 20.500000000 35.500000000 -0.500000000 20.500000000 35.500000000 -1.500000000 20.500000000 35.500000000 -2.500000000 20.500000000 35.500000000 -3.500000000 20.500000000 35.500000000 -4.500000000 20.500000000 35.500000000 -5.500000000 20.500000000 35.500000000 -6.500000000 20.500000000 35.500000000 -7.500000000 20.500000000 35.500000000 -8.500000000 20.500000000 35.500000000 -9.500000000 20.500000000 35.500000000 -10.500000000 20.500000000 35.500000000 -11.500000000 20.500000000 35.500000000 -12.500000000 20.500000000 35.500000000 -13.500000000 20.500000000 35.500000000 -14.500000000 20.500000000 35.500000000 -15.500000000 20.500000000 35.500000000 -16.500000000 20.500000000 35.500000000 -17.500000000 20.500000000 35.500000000 -18.500000000 20.500000000 35.500000000 -19.500000000 20.500000000 35.500000000 -20.500000000 20.500000000 35.500000000 -21.500000000 20.500000000 35.500000000 -22.500000000 20.500000000 35.500000000 -23.500000000 20.500000000 35.500000000 -24.500000000 20.500000000 35.500000000 -25.499996185 20.500000000 35.499996185 -26.499954224 20.500000000 35.499954224 -27.499591827 20.500000000 35.499591827 -28.497470856 20.500000000 35.497467041 -29.488407135 20.500000000 35.488403320 -30.458978653 20.500000000 35.458980560 -31.384418488 20.500000000 35.384422302 -32.233222961 20.500000000 35.233222961 -32.981101990 20.500000000 34.981101990 --33.981101990 21.500000000 34.981101990 --33.233226776 21.500000000 35.233222961 --32.384422302 21.500000000 35.384418488 --31.458978653 21.500000000 35.458980560 --30.488407135 21.500000000 35.488403320 --29.497472763 21.500000000 35.497474670 --28.499593735 21.500000000 35.499591827 --27.499954224 21.500000000 35.499954224 --26.499996185 21.500000000 35.499996185 --25.500000000 21.500000000 35.500000000 --24.500000000 21.500000000 35.500000000 --23.500000000 21.500000000 35.500000000 --22.500000000 21.500000000 35.500000000 --21.500000000 21.500000000 35.500000000 --20.500000000 21.500000000 35.500000000 --19.500000000 21.500000000 35.500000000 --18.500000000 21.500000000 35.500000000 --17.500000000 21.500000000 35.500000000 --16.500000000 21.500000000 35.500000000 --15.500000000 21.500000000 35.500000000 --14.500000000 21.500000000 35.500000000 --13.500000000 21.500000000 35.500000000 --12.500000000 21.500000000 35.500000000 --11.500000000 21.500000000 35.500000000 --10.500000000 21.500000000 35.500000000 --9.500000000 21.500000000 35.500000000 --8.500000000 21.500000000 35.500000000 --7.500000000 21.500000000 35.500000000 --6.500000000 21.500000000 35.500000000 --5.500000000 21.500000000 35.500000000 --4.500000000 21.500000000 35.500000000 --3.500000000 21.500000000 35.500000000 --2.500000000 21.500000000 35.500000000 --1.500000000 21.500000000 35.500000000 --0.500000000 21.500000000 35.500000000 -0.500000000 21.500000000 35.500000000 -1.500000000 21.500000000 35.500000000 -2.500000000 21.500000000 35.500000000 -3.500000000 21.500000000 35.500000000 -4.500000000 21.500000000 35.500000000 -5.500000000 21.500000000 35.500000000 -6.500000000 21.500000000 35.500000000 -7.500000000 21.500000000 35.500000000 -8.500000000 21.500000000 35.500000000 -9.500000000 21.500000000 35.500000000 -10.500000000 21.500000000 35.500000000 -11.500000000 21.500000000 35.500000000 -12.500000000 21.500000000 35.500000000 -13.500000000 21.500000000 35.500000000 -14.500000000 21.500000000 35.500000000 -15.500000000 21.500000000 35.500000000 -16.500000000 21.500000000 35.500000000 -17.500000000 21.500000000 35.500000000 -18.500000000 21.500000000 35.500000000 -19.500000000 21.500000000 35.500000000 -20.500000000 21.500000000 35.500000000 -21.500000000 21.500000000 35.500000000 -22.500000000 21.500000000 35.500000000 -23.500000000 21.500000000 35.500000000 -24.500000000 21.500000000 35.500000000 -25.499996185 21.500000000 35.499996185 -26.499954224 21.500000000 35.499954224 -27.499591827 21.500000000 35.499591827 -28.497470856 21.500000000 35.497467041 -29.488407135 21.500000000 35.488403320 -30.458978653 21.500000000 35.458980560 -31.384418488 21.500000000 35.384422302 -32.233222961 21.500000000 35.233222961 -32.981101990 21.500000000 34.981101990 --33.981101990 22.500000000 34.981101990 --33.233226776 22.500000000 35.233222961 --32.384422302 22.500000000 35.384418488 --31.458978653 22.500000000 35.458980560 --30.488407135 22.500000000 35.488403320 --29.497472763 22.500000000 35.497474670 --28.499593735 22.500000000 35.499591827 --27.499954224 22.500000000 35.499954224 --26.499996185 22.500000000 35.499996185 --25.500000000 22.500000000 35.500000000 --24.500000000 22.500000000 35.500000000 --23.500000000 22.500000000 35.500000000 --22.500000000 22.500000000 35.500000000 --21.500000000 22.500000000 35.500000000 --20.500000000 22.500000000 35.500000000 --19.500000000 22.500000000 35.500000000 --18.500000000 22.500000000 35.500000000 --17.500000000 22.500000000 35.500000000 --16.500000000 22.500000000 35.500000000 --15.500000000 22.500000000 35.500000000 --14.500000000 22.500000000 35.500000000 --13.500000000 22.500000000 35.500000000 --12.500000000 22.500000000 35.500000000 --11.500000000 22.500000000 35.500000000 --10.500000000 22.500000000 35.500000000 --9.500000000 22.500000000 35.500000000 --8.500000000 22.500000000 35.500000000 --7.500000000 22.500000000 35.500000000 --6.500000000 22.500000000 35.500000000 --5.500000000 22.500000000 35.500000000 --4.500000000 22.500000000 35.500000000 --3.500000000 22.500000000 35.500000000 --2.500000000 22.500000000 35.500000000 --1.500000000 22.500000000 35.500000000 --0.500000000 22.500000000 35.500000000 -0.500000000 22.500000000 35.500000000 -1.500000000 22.500000000 35.500000000 -2.500000000 22.500000000 35.500000000 -3.500000000 22.500000000 35.500000000 -4.500000000 22.500000000 35.500000000 -5.500000000 22.500000000 35.500000000 -6.500000000 22.500000000 35.500000000 -7.500000000 22.500000000 35.500000000 -8.500000000 22.500000000 35.500000000 -9.500000000 22.500000000 35.500000000 -10.500000000 22.500000000 35.500000000 -11.500000000 22.500000000 35.500000000 -12.500000000 22.500000000 35.500000000 -13.500000000 22.500000000 35.500000000 -14.500000000 22.500000000 35.500000000 -15.500000000 22.500000000 35.500000000 -16.500000000 22.500000000 35.500000000 -17.500000000 22.500000000 35.500000000 -18.500000000 22.500000000 35.500000000 -19.500000000 22.500000000 35.500000000 -20.500000000 22.500000000 35.500000000 -21.500000000 22.500000000 35.500000000 -22.500000000 22.500000000 35.500000000 -23.500000000 22.500000000 35.500000000 -24.500000000 22.500000000 35.500000000 -25.499996185 22.500000000 35.499996185 -26.499954224 22.500000000 35.499954224 -27.499591827 22.500000000 35.499591827 -28.497470856 22.500000000 35.497467041 -29.488407135 22.500000000 35.488403320 -30.458978653 22.500000000 35.458980560 -31.384418488 22.500000000 35.384422302 -32.233222961 22.500000000 35.233222961 -32.981101990 22.500000000 34.981101990 --33.981101990 23.499998093 34.981101990 --33.233226776 23.500000000 35.233222961 --32.384422302 23.500000000 35.384418488 --31.458978653 23.500000000 35.458980560 --30.488407135 23.500000000 35.488403320 --29.497472763 23.500000000 35.497474670 --28.499593735 23.500000000 35.499591827 --27.499954224 23.500000000 35.499954224 --26.499996185 23.500000000 35.499996185 --25.500000000 23.500000000 35.500000000 --24.500000000 23.500000000 35.500000000 --23.500000000 23.500000000 35.500000000 --22.500000000 23.500000000 35.500000000 --21.500000000 23.500000000 35.500000000 --20.500000000 23.500000000 35.500000000 --19.500000000 23.500000000 35.500000000 --18.500000000 23.500000000 35.500000000 --17.500000000 23.500000000 35.500000000 --16.500000000 23.500000000 35.500000000 --15.500000000 23.500000000 35.500000000 --14.500000000 23.500000000 35.500000000 --13.500000000 23.500000000 35.500000000 --12.500000000 23.500000000 35.500000000 --11.500000000 23.500000000 35.500000000 --10.500000000 23.500000000 35.500000000 --9.500000000 23.500000000 35.500000000 --8.500000000 23.500000000 35.500000000 --7.500000000 23.500000000 35.500000000 --6.500000000 23.500000000 35.500000000 --5.500000000 23.500000000 35.500000000 --4.500000000 23.500000000 35.500000000 --3.500000000 23.500000000 35.500000000 --2.500000000 23.500000000 35.500000000 --1.500000000 23.500000000 35.500000000 --0.500000000 23.500000000 35.500000000 -0.500000000 23.500000000 35.500000000 -1.500000000 23.500000000 35.500000000 -2.500000000 23.500000000 35.500000000 -3.500000000 23.500000000 35.500000000 -4.500000000 23.500000000 35.500000000 -5.500000000 23.500000000 35.500000000 -6.500000000 23.500000000 35.500000000 -7.500000000 23.500000000 35.500000000 -8.500000000 23.500000000 35.500000000 -9.500000000 23.500000000 35.500000000 -10.500000000 23.500000000 35.500000000 -11.500000000 23.500000000 35.500000000 -12.500000000 23.500000000 35.500000000 -13.500000000 23.500000000 35.500000000 -14.500000000 23.500000000 35.500000000 -15.500000000 23.500000000 35.500000000 -16.500000000 23.500000000 35.500000000 -17.500000000 23.500000000 35.500000000 -18.500000000 23.500000000 35.500000000 -19.500000000 23.500000000 35.500000000 -20.500000000 23.500000000 35.500000000 -21.500000000 23.500000000 35.500000000 -22.500000000 23.500000000 35.500000000 -23.500000000 23.500000000 35.500000000 -24.500000000 23.500000000 35.500000000 -25.499996185 23.500000000 35.499996185 -26.499954224 23.500000000 35.499954224 -27.499591827 23.500000000 35.499591827 -28.497470856 23.500000000 35.497467041 -29.488407135 23.500000000 35.488403320 -30.458978653 23.500000000 35.458980560 -31.384418488 23.500000000 35.384422302 -32.233222961 23.500000000 35.233222961 -32.981101990 23.499998093 34.981101990 --33.981086731 24.499979019 34.981086731 --33.233219147 24.499984741 35.233203888 --32.384422302 24.499996185 35.384407043 --31.458978653 24.500000000 35.458972931 --30.488407135 24.500000000 35.488403320 --29.497472763 24.500000000 35.497474670 --28.499593735 24.500000000 35.499591827 --27.499954224 24.500000000 35.499954224 --26.499996185 24.500000000 35.499996185 --25.500000000 24.500000000 35.500000000 --24.500000000 24.500000000 35.500000000 --23.500000000 24.500000000 35.500000000 --22.500000000 24.500000000 35.500000000 --21.500000000 24.500000000 35.500000000 --20.500000000 24.500000000 35.500000000 --19.500000000 24.500000000 35.500000000 --18.500000000 24.500000000 35.500000000 --17.500000000 24.500000000 35.500000000 --16.500000000 24.500000000 35.500000000 --15.500000000 24.500000000 35.500000000 --14.500000000 24.500000000 35.500000000 --13.500000000 24.500000000 35.500000000 --12.500000000 24.500000000 35.500000000 --11.500000000 24.500000000 35.500000000 --10.500000000 24.500000000 35.500000000 --9.500000000 24.500000000 35.500000000 --8.500000000 24.500000000 35.500000000 --7.500000000 24.500000000 35.500000000 --6.500000000 24.500000000 35.500000000 --5.500000000 24.500000000 35.500000000 --4.500000000 24.500000000 35.500000000 --3.500000000 24.500000000 35.500000000 --2.500000000 24.500000000 35.500000000 --1.500000000 24.500000000 35.500000000 --0.500000000 24.500000000 35.500000000 -0.500000000 24.500000000 35.500000000 -1.500000000 24.500000000 35.500000000 -2.500000000 24.500000000 35.500000000 -3.500000000 24.500000000 35.500000000 -4.500000000 24.500000000 35.500000000 -5.500000000 24.500000000 35.500000000 -6.500000000 24.500000000 35.500000000 -7.500000000 24.500000000 35.500000000 -8.500000000 24.500000000 35.500000000 -9.500000000 24.500000000 35.500000000 -10.500000000 24.500000000 35.500000000 -11.500000000 24.500000000 35.500000000 -12.500000000 24.500000000 35.500000000 -13.500000000 24.500000000 35.500000000 -14.500000000 24.500000000 35.500000000 -15.500000000 24.500000000 35.500000000 -16.500000000 24.500000000 35.500000000 -17.500000000 24.500000000 35.500000000 -18.500000000 24.500000000 35.500000000 -19.500000000 24.500000000 35.500000000 -20.500000000 24.500000000 35.500000000 -21.500000000 24.500000000 35.500000000 -22.500000000 24.500000000 35.500000000 -23.500000000 24.500000000 35.500000000 -24.500000000 24.500000000 35.500000000 -25.499996185 24.500000000 35.499996185 -26.499954224 24.500000000 35.499954224 -27.499591827 24.500000000 35.499591827 -28.497470856 24.500000000 35.497467041 -29.488407135 24.500000000 35.488403320 -30.458978653 24.500000000 35.458976746 -31.384418488 24.499996185 35.384407043 -32.233219147 24.499988556 35.233207703 -32.981086731 24.499979019 34.981086731 --33.980972290 25.499826431 34.980957031 --33.233169556 25.499874115 35.233074188 --32.384407043 25.499950409 35.384307861 --31.458978653 25.499988556 35.458930969 --30.488407135 25.499996185 35.488388062 --29.497472763 25.499996185 35.497470856 --28.499593735 25.499996185 35.499588013 --27.499954224 25.499996185 35.499950409 --26.499996185 25.499996185 35.499992371 --25.500000000 25.499996185 35.499996185 --24.500000000 25.499996185 35.499996185 --23.500000000 25.499996185 35.499996185 --22.500000000 25.499996185 35.499996185 --21.500000000 25.499996185 35.499996185 --20.500000000 25.499996185 35.499996185 --19.500000000 25.499996185 35.499996185 --18.500000000 25.499996185 35.499996185 --17.500000000 25.499996185 35.499996185 --16.500000000 25.499996185 35.499996185 --15.500000000 25.499996185 35.499996185 --14.500000000 25.499996185 35.499996185 --13.500000000 25.499996185 35.499996185 --12.500000000 25.499996185 35.499996185 --11.500000000 25.499996185 35.499996185 --10.500000000 25.499996185 35.499996185 --9.500000000 25.499996185 35.499996185 --8.500000000 25.499996185 35.499996185 --7.500000000 25.499996185 35.499996185 --6.500000000 25.499996185 35.499996185 --5.500000000 25.499996185 35.499996185 --4.500000000 25.499996185 35.499996185 --3.500000000 25.499996185 35.499996185 --2.500000000 25.499996185 35.499996185 --1.500000000 25.499996185 35.499996185 --0.500000000 25.499996185 35.499996185 -0.500000000 25.499996185 35.499996185 -1.500000000 25.499996185 35.499996185 -2.500000000 25.499996185 35.499996185 -3.500000000 25.499996185 35.499996185 -4.500000000 25.499996185 35.499996185 -5.500000000 25.499996185 35.499996185 -6.500000000 25.499996185 35.499996185 -7.500000000 25.499996185 35.499996185 -8.500000000 25.499996185 35.499996185 -9.500000000 25.499996185 35.499996185 -10.500000000 25.499996185 35.499996185 -11.500000000 25.499996185 35.499996185 -12.500000000 25.499996185 35.499996185 -13.500000000 25.499996185 35.499996185 -14.500000000 25.499996185 35.499996185 -15.500000000 25.499996185 35.499996185 -16.500000000 25.499996185 35.499996185 -17.500000000 25.499996185 35.499996185 -18.500000000 25.499996185 35.499996185 -19.500000000 25.499996185 35.499996185 -20.500000000 25.499996185 35.499996185 -21.500000000 25.499996185 35.499996185 -22.500000000 25.499996185 35.499996185 -23.500000000 25.499996185 35.499996185 -24.500000000 25.499996185 35.499996185 -25.499996185 25.499996185 35.499992371 -26.499954224 25.499996185 35.499950409 -27.499591827 25.499996185 35.499588013 -28.497470856 25.499996185 35.497467041 -29.488407135 25.499996185 35.488391876 -30.458974838 25.499988556 35.458934784 -31.384403229 25.499950409 35.384307861 -32.233165741 25.499874115 35.233070374 -32.980972290 25.499826431 34.980957031 --33.980331421 26.498952866 34.980201721 --33.232864380 26.499227524 35.232303619 --32.384296417 26.499622345 35.383720398 --31.458948135 26.499858856 35.458606720 --30.488397598 26.499938965 35.488258362 --29.497472763 26.499954224 35.497406006 --28.499593735 26.499954224 35.499549866 --27.499954224 26.499954224 35.499908447 --26.499996185 26.499954224 35.499950409 --25.500000000 26.499954224 35.499954224 --24.500000000 26.499954224 35.499954224 --23.500000000 26.499954224 35.499954224 --22.500000000 26.499954224 35.499954224 --21.500000000 26.499954224 35.499954224 --20.500000000 26.499954224 35.499954224 --19.500000000 26.499954224 35.499954224 --18.500000000 26.499954224 35.499954224 --17.500000000 26.499954224 35.499954224 --16.500000000 26.499954224 35.499954224 --15.500000000 26.499954224 35.499954224 --14.500000000 26.499954224 35.499954224 --13.500000000 26.499954224 35.499954224 --12.500000000 26.499954224 35.499954224 --11.500000000 26.499954224 35.499954224 --10.500000000 26.499954224 35.499954224 --9.500000000 26.499954224 35.499954224 --8.500000000 26.499954224 35.499954224 --7.500000000 26.499954224 35.499954224 --6.500000000 26.499954224 35.499954224 --5.500000000 26.499954224 35.499954224 --4.500000000 26.499954224 35.499954224 --3.500000000 26.499954224 35.499954224 --2.500000000 26.499954224 35.499954224 --1.500000000 26.499954224 35.499954224 --0.500000000 26.499954224 35.499954224 -0.500000000 26.499954224 35.499954224 -1.500000000 26.499954224 35.499954224 -2.500000000 26.499954224 35.499954224 -3.500000000 26.499954224 35.499954224 -4.500000000 26.499954224 35.499954224 -5.500000000 26.499954224 35.499954224 -6.500000000 26.499954224 35.499954224 -7.500000000 26.499954224 35.499954224 -8.500000000 26.499954224 35.499954224 -9.500000000 26.499954224 35.499954224 -10.500000000 26.499954224 35.499954224 -11.500000000 26.499954224 35.499954224 -12.500000000 26.499954224 35.499954224 -13.500000000 26.499954224 35.499954224 -14.500000000 26.499954224 35.499954224 -15.500000000 26.499954224 35.499954224 -16.500000000 26.499954224 35.499954224 -17.500000000 26.499954224 35.499954224 -18.500000000 26.499954224 35.499954224 -19.500000000 26.499954224 35.499954224 -20.500000000 26.499954224 35.499954224 -21.500000000 26.499954224 35.499954224 -22.500000000 26.499954224 35.499954224 -23.500000000 26.499954224 35.499954224 -24.500000000 26.499954224 35.499954224 -25.499996185 26.499954224 35.499950409 -26.499954224 26.499954224 35.499908447 -27.499591827 26.499954224 35.499542236 -28.497470856 26.499954224 35.497409821 -29.488397598 26.499938965 35.488258362 -30.458948135 26.499862671 35.458606720 -31.384296417 26.499622345 35.383720398 -32.232860565 26.499225616 35.232303619 -32.980327606 26.498952866 34.980201721 --33.977615356 27.495172501 34.976860046 --33.231597900 27.496379852 35.228954315 --32.383811951 27.498052597 35.381027222 --31.458766937 27.499073029 35.456939697 --30.488346100 27.499475479 35.487373352 --29.497461319 27.499576569 35.496910095 --28.499593735 27.499591827 35.499160767 --27.499954224 27.499591827 35.499542236 --26.499996185 27.499591827 35.499591827 --25.500000000 27.499591827 35.499591827 --24.500000000 27.499591827 35.499591827 --23.500000000 27.499591827 35.499591827 --22.500000000 27.499591827 35.499591827 --21.500000000 27.499591827 35.499591827 --20.500000000 27.499591827 35.499591827 --19.500000000 27.499591827 35.499591827 --18.500000000 27.499591827 35.499591827 --17.500000000 27.499591827 35.499591827 --16.500000000 27.499591827 35.499591827 --15.500000000 27.499591827 35.499591827 --14.500000000 27.499591827 35.499591827 --13.500000000 27.499591827 35.499591827 --12.500000000 27.499591827 35.499591827 --11.500000000 27.499591827 35.499591827 --10.500000000 27.499591827 35.499591827 --9.500000000 27.499591827 35.499591827 --8.500000000 27.499591827 35.499591827 --7.500000000 27.499591827 35.499591827 --6.500000000 27.499591827 35.499591827 --5.500000000 27.499591827 35.499591827 --4.500000000 27.499591827 35.499591827 --3.500000000 27.499591827 35.499591827 --2.500000000 27.499591827 35.499591827 --1.500000000 27.499591827 35.499591827 --0.500000000 27.499591827 35.499591827 -0.500000000 27.499591827 35.499591827 -1.500000000 27.499591827 35.499591827 -2.500000000 27.499591827 35.499591827 -3.500000000 27.499591827 35.499591827 -4.500000000 27.499591827 35.499591827 -5.500000000 27.499591827 35.499591827 -6.500000000 27.499591827 35.499591827 -7.500000000 27.499591827 35.499591827 -8.500000000 27.499591827 35.499591827 -9.500000000 27.499591827 35.499591827 -10.500000000 27.499591827 35.499591827 -11.500000000 27.499591827 35.499591827 -12.500000000 27.499591827 35.499591827 -13.500000000 27.499591827 35.499591827 -14.500000000 27.499591827 35.499591827 -15.500000000 27.499591827 35.499591827 -16.500000000 27.499591827 35.499591827 -17.500000000 27.499591827 35.499591827 -18.500000000 27.499591827 35.499591827 -19.500000000 27.499591827 35.499591827 -20.500000000 27.499591827 35.499591827 -21.500000000 27.499591827 35.499591827 -22.500000000 27.499591827 35.499591827 -23.500000000 27.499591827 35.499591827 -24.500000000 27.499591827 35.499591827 -25.499996185 27.499591827 35.499591827 -26.499954224 27.499591827 35.499546051 -27.499591827 27.499591827 35.499164581 -28.497457504 27.499576569 35.496910095 -29.488346100 27.499475479 35.487373352 -30.458766937 27.499073029 35.456939697 -31.383810043 27.498052597 35.381027222 -32.231597900 27.496379852 35.228954315 -32.977619171 27.495172501 34.976860046 --33.968864441 28.481937408 34.965763092 --33.227870941 28.486719131 35.217517853 --32.382308960 28.492321014 35.371490479 --31.458078384 28.495611191 35.450428009 --30.488048553 28.496957779 35.483341217 --29.497375488 28.497371674 35.494178772 --28.499578476 28.497461319 35.496910095 --27.499954224 28.497470856 35.497402191 --26.499996185 28.497470856 35.497467041 --25.500000000 28.497470856 35.497470856 --24.500000000 28.497470856 35.497470856 --23.500000000 28.497470856 35.497470856 --22.500000000 28.497470856 35.497470856 --21.500000000 28.497470856 35.497470856 --20.500000000 28.497470856 35.497470856 --19.500000000 28.497470856 35.497470856 --18.500000000 28.497470856 35.497470856 --17.500000000 28.497470856 35.497470856 --16.500000000 28.497470856 35.497470856 --15.500000000 28.497470856 35.497470856 --14.500000000 28.497470856 35.497470856 --13.500000000 28.497470856 35.497470856 --12.500000000 28.497470856 35.497470856 --11.500000000 28.497470856 35.497470856 --10.500000000 28.497470856 35.497470856 --9.500000000 28.497470856 35.497470856 --8.500000000 28.497470856 35.497470856 --7.500000000 28.497470856 35.497470856 --6.500000000 28.497470856 35.497470856 --5.500000000 28.497470856 35.497470856 --4.500000000 28.497470856 35.497470856 --3.500000000 28.497470856 35.497470856 --2.500000000 28.497470856 35.497470856 --1.500000000 28.497470856 35.497470856 --0.500000000 28.497470856 35.497470856 -0.500000000 28.497470856 35.497470856 -1.500000000 28.497470856 35.497470856 -2.500000000 28.497470856 35.497470856 -3.500000000 28.497470856 35.497470856 -4.500000000 28.497470856 35.497470856 -5.500000000 28.497470856 35.497470856 -6.500000000 28.497470856 35.497470856 -7.500000000 28.497470856 35.497470856 -8.500000000 28.497470856 35.497470856 -9.500000000 28.497470856 35.497470856 -10.500000000 28.497470856 35.497470856 -11.500000000 28.497470856 35.497470856 -12.500000000 28.497470856 35.497470856 -13.500000000 28.497470856 35.497470856 -14.500000000 28.497470856 35.497470856 -15.500000000 28.497470856 35.497470856 -16.500000000 28.497470856 35.497470856 -17.500000000 28.497470856 35.497470856 -18.500000000 28.497470856 35.497470856 -19.500000000 28.497470856 35.497470856 -20.500000000 28.497470856 35.497470856 -21.500000000 28.497470856 35.497470856 -22.500000000 28.497470856 35.497470856 -23.500000000 28.497470856 35.497470856 -24.500000000 28.497470856 35.497470856 -25.499996185 28.497470856 35.497467041 -26.499954224 28.497470856 35.497406006 -27.499576569 28.497457504 35.496910095 -28.497371674 28.497375488 35.494174957 -29.488048553 28.496957779 35.483337402 -30.458078384 28.495611191 35.450428009 -31.382312775 28.492321014 35.371490479 -32.227874756 28.486719131 35.217510223 -32.968864441 28.481939316 34.965759277 --33.946811676 29.442840576 34.937091827 --33.220714569 29.460596085 35.185966492 --32.379219055 29.476003647 35.344280243 --31.456085205 29.483789444 35.430480957 --30.486968994 29.486968994 35.469238281 --29.496959686 29.488048553 35.483337402 --28.499475479 29.488346100 35.487373352 --27.499938965 29.488399506 35.488258362 --26.499996185 29.488407135 35.488391876 --25.500000000 29.488407135 35.488403320 --24.500000000 29.488407135 35.488407135 --23.500000000 29.488407135 35.488407135 --22.500000000 29.488407135 35.488407135 --21.500000000 29.488407135 35.488407135 --20.500000000 29.488407135 35.488407135 --19.500000000 29.488407135 35.488407135 --18.500000000 29.488407135 35.488407135 --17.500000000 29.488407135 35.488407135 --16.500000000 29.488407135 35.488407135 --15.500000000 29.488407135 35.488407135 --14.500000000 29.488407135 35.488407135 --13.500000000 29.488407135 35.488407135 --12.500000000 29.488407135 35.488407135 --11.500000000 29.488407135 35.488407135 --10.500000000 29.488407135 35.488407135 --9.500000000 29.488407135 35.488407135 --8.500000000 29.488407135 35.488407135 --7.500000000 29.488407135 35.488407135 --6.500000000 29.488407135 35.488407135 --5.500000000 29.488407135 35.488407135 --4.500000000 29.488407135 35.488407135 --3.500000000 29.488407135 35.488407135 --2.500000000 29.488407135 35.488407135 --1.500000000 29.488407135 35.488407135 --0.500000000 29.488407135 35.488407135 -0.500000000 29.488407135 35.488407135 -1.500000000 29.488407135 35.488407135 -2.500000000 29.488407135 35.488407135 -3.500000000 29.488407135 35.488407135 -4.500000000 29.488407135 35.488407135 -5.500000000 29.488407135 35.488407135 -6.500000000 29.488407135 35.488407135 -7.500000000 29.488407135 35.488407135 -8.500000000 29.488407135 35.488407135 -9.500000000 29.488407135 35.488407135 -10.500000000 29.488407135 35.488407135 -11.500000000 29.488407135 35.488407135 -12.500000000 29.488407135 35.488407135 -13.500000000 29.488407135 35.488407135 -14.500000000 29.488407135 35.488407135 -15.500000000 29.488407135 35.488407135 -16.500000000 29.488407135 35.488407135 -17.500000000 29.488407135 35.488407135 -18.500000000 29.488407135 35.488407135 -19.500000000 29.488407135 35.488407135 -20.500000000 29.488407135 35.488407135 -21.500000000 29.488407135 35.488407135 -22.500000000 29.488407135 35.488407135 -23.500000000 29.488407135 35.488407135 -24.500000000 29.488407135 35.488407135 -25.499996185 29.488407135 35.488391876 -26.499938965 29.488399506 35.488258362 -27.499475479 29.488346100 35.487373352 -28.496959686 29.488048553 35.483337402 -29.486968994 29.486965179 35.469238281 -30.456085205 29.483789444 35.430480957 -31.379222870 29.476003647 35.344280243 -32.220714569 29.460596085 35.185966492 -32.946811676 29.442840576 34.937091827 --33.903377533 30.336603165 34.879737854 --33.216838837 30.405670166 35.112342834 --32.375358582 30.438953400 35.280399323 --31.451217651 30.451217651 35.380989075 --30.483789444 30.456085205 35.430473328 --29.495611191 30.458078384 35.450424194 --28.499073029 30.458770752 35.456939697 --27.499858856 30.458950043 35.458606720 --26.499988556 30.458980560 35.458930969 --25.500000000 30.458980560 35.458976746 --24.500000000 30.458980560 35.458976746 --23.500000000 30.458980560 35.458976746 --22.500000000 30.458980560 35.458976746 --21.500000000 30.458980560 35.458976746 --20.500000000 30.458980560 35.458976746 --19.500000000 30.458980560 35.458976746 --18.500000000 30.458980560 35.458976746 --17.500000000 30.458980560 35.458976746 --16.500000000 30.458980560 35.458976746 --15.500000000 30.458980560 35.458976746 --14.500000000 30.458980560 35.458976746 --13.500000000 30.458980560 35.458976746 --12.500000000 30.458980560 35.458976746 --11.500000000 30.458980560 35.458976746 --10.500000000 30.458980560 35.458976746 --9.500000000 30.458980560 35.458976746 --8.500000000 30.458980560 35.458976746 --7.500000000 30.458980560 35.458976746 --6.500000000 30.458980560 35.458976746 --5.500000000 30.458980560 35.458976746 --4.500000000 30.458980560 35.458976746 --3.500000000 30.458980560 35.458976746 --2.500000000 30.458980560 35.458976746 --1.500000000 30.458980560 35.458976746 --0.500000000 30.458980560 35.458976746 -0.500000000 30.458980560 35.458976746 -1.500000000 30.458980560 35.458976746 -2.500000000 30.458980560 35.458976746 -3.500000000 30.458980560 35.458976746 -4.500000000 30.458980560 35.458976746 -5.500000000 30.458980560 35.458976746 -6.500000000 30.458980560 35.458976746 -7.500000000 30.458980560 35.458976746 -8.500000000 30.458980560 35.458976746 -9.500000000 30.458980560 35.458976746 -10.500000000 30.458980560 35.458976746 -11.500000000 30.458980560 35.458976746 -12.500000000 30.458980560 35.458976746 -13.500000000 30.458980560 35.458976746 -14.500000000 30.458980560 35.458976746 -15.500000000 30.458980560 35.458976746 -16.500000000 30.458980560 35.458976746 -17.500000000 30.458980560 35.458976746 -18.500000000 30.458980560 35.458976746 -19.500000000 30.458980560 35.458976746 -20.500000000 30.458980560 35.458976746 -21.500000000 30.458980560 35.458976746 -22.500000000 30.458980560 35.458976746 -23.500000000 30.458980560 35.458976746 -24.500000000 30.458980560 35.458972931 -25.499988556 30.458978653 35.458930969 -26.499858856 30.458948135 35.458606720 -27.499073029 30.458770752 35.456939697 -28.495611191 30.458080292 35.450424194 -29.483789444 30.456085205 35.430473328 -30.451217651 30.451217651 35.380989075 -31.375356674 30.438949585 35.280395508 -32.216835022 30.405673981 35.112342834 -32.903373718 30.336603165 34.879737854 --33.840930939 31.035345078 34.797355652 --33.254276276 31.334241867 34.954723358 --32.371025085 31.371026993 35.154674530 --31.438953400 31.375360489 35.280391693 --30.476007462 31.379222870 35.344280243 --29.492319107 31.382312775 35.371486664 --28.498052597 31.383813858 35.381027222 --27.499622345 31.384298325 35.383720398 --26.499948502 31.384403229 35.384304047 --25.499996185 31.384418488 35.384407043 --24.500000000 31.384418488 35.384414673 --23.500000000 31.384418488 35.384414673 --22.500000000 31.384418488 35.384414673 --21.500000000 31.384418488 35.384414673 --20.500000000 31.384418488 35.384414673 --19.500000000 31.384418488 35.384414673 --18.500000000 31.384418488 35.384414673 --17.500000000 31.384418488 35.384414673 --16.500000000 31.384418488 35.384414673 --15.500000000 31.384418488 35.384414673 --14.500000000 31.384418488 35.384414673 --13.500000000 31.384418488 35.384414673 --12.500000000 31.384418488 35.384414673 --11.500000000 31.384418488 35.384414673 --10.500000000 31.384418488 35.384414673 --9.500000000 31.384418488 35.384414673 --8.500000000 31.384418488 35.384414673 --7.500000000 31.384418488 35.384414673 --6.500000000 31.384418488 35.384414673 --5.500000000 31.384418488 35.384414673 --4.500000000 31.384418488 35.384414673 --3.500000000 31.384418488 35.384414673 --2.500000000 31.384418488 35.384414673 --1.500000000 31.384418488 35.384414673 --0.500000000 31.384418488 35.384414673 -0.500000000 31.384418488 35.384414673 -1.500000000 31.384418488 35.384414673 -2.500000000 31.384418488 35.384414673 -3.500000000 31.384418488 35.384414673 -4.500000000 31.384418488 35.384414673 -5.500000000 31.384418488 35.384414673 -6.500000000 31.384418488 35.384414673 -7.500000000 31.384418488 35.384414673 -8.500000000 31.384418488 35.384414673 -9.500000000 31.384418488 35.384414673 -10.500000000 31.384418488 35.384414673 -11.500000000 31.384418488 35.384414673 -12.500000000 31.384418488 35.384414673 -13.500000000 31.384418488 35.384414673 -14.500000000 31.384418488 35.384414673 -15.500000000 31.384418488 35.384414673 -16.500000000 31.384418488 35.384414673 -17.500000000 31.384418488 35.384414673 -18.500000000 31.384418488 35.384414673 -19.500000000 31.384418488 35.384414673 -20.500000000 31.384418488 35.384414673 -21.500000000 31.384418488 35.384414673 -22.500000000 31.384418488 35.384414673 -23.500000000 31.384418488 35.384410858 -24.499996185 31.384418488 35.384407043 -25.499948502 31.384403229 35.384304047 -26.499618530 31.384296417 35.383716583 -27.498050690 31.383813858 35.381023407 -28.492321014 31.382312775 35.371486664 -29.476007462 31.379222870 35.344280243 -30.438953400 31.375360489 35.280395508 -31.371026993 31.371026993 35.154670715 -32.254272461 31.334243774 34.954723358 -32.840927124 31.035345078 34.797355652 --33.030693054 32.030693054 34.846317291 --32.334243774 32.254272461 34.954723358 --31.405673981 32.216838837 35.112342834 --30.460596085 32.220714569 35.185966492 --29.486719131 32.227870941 35.217510223 --28.496377945 32.231601715 35.228950500 --27.499225616 32.232860565 35.232303619 --26.499872208 32.233165741 35.233070374 --25.499986649 32.233215332 35.233203888 --24.499998093 32.233222961 35.233219147 --23.500000000 32.233222961 35.233219147 --22.500000000 32.233222961 35.233219147 --21.500000000 32.233222961 35.233219147 --20.500000000 32.233222961 35.233219147 --19.500000000 32.233222961 35.233219147 --18.500000000 32.233222961 35.233219147 --17.500000000 32.233222961 35.233219147 --16.500000000 32.233222961 35.233219147 --15.500000000 32.233222961 35.233219147 --14.500000000 32.233222961 35.233219147 --13.500000000 32.233222961 35.233219147 --12.500000000 32.233222961 35.233219147 --11.500000000 32.233222961 35.233219147 --10.500000000 32.233222961 35.233219147 --9.500000000 32.233222961 35.233219147 --8.500000000 32.233222961 35.233219147 --7.500000000 32.233222961 35.233219147 --6.500000000 32.233222961 35.233219147 --5.500000000 32.233222961 35.233219147 --4.500000000 32.233222961 35.233219147 --3.500000000 32.233222961 35.233219147 --2.500000000 32.233222961 35.233219147 --1.500000000 32.233222961 35.233219147 --0.500000000 32.233222961 35.233219147 -0.500000000 32.233222961 35.233219147 -1.500000000 32.233222961 35.233219147 -2.500000000 32.233222961 35.233219147 -3.500000000 32.233222961 35.233219147 -4.500000000 32.233222961 35.233219147 -5.500000000 32.233222961 35.233219147 -6.500000000 32.233222961 35.233219147 -7.500000000 32.233222961 35.233219147 -8.500000000 32.233222961 35.233219147 -9.500000000 32.233222961 35.233219147 -10.500000000 32.233222961 35.233219147 -11.500000000 32.233222961 35.233219147 -12.500000000 32.233222961 35.233219147 -13.500000000 32.233222961 35.233219147 -14.500000000 32.233222961 35.233219147 -15.500000000 32.233222961 35.233219147 -16.500000000 32.233222961 35.233219147 -17.500000000 32.233222961 35.233219147 -18.500000000 32.233222961 35.233219147 -19.500000000 32.233222961 35.233219147 -20.500000000 32.233222961 35.233219147 -21.500000000 32.233222961 35.233219147 -22.500000000 32.233222961 35.233219147 -23.499998093 32.233222961 35.233219147 -24.499986649 32.233219147 35.233203888 -25.499874115 32.233161926 35.233070374 -26.499225616 32.232856750 35.232299805 -27.496377945 32.231597900 35.228950500 -28.486719131 32.227874756 35.217510223 -29.460596085 32.220714569 35.185962677 -30.405673981 32.216838837 35.112342834 -31.334243774 32.254276276 34.954723358 -32.030693054 32.030693054 34.846313477 --32.035343170 32.840934753 34.797355652 --31.336603165 32.903373718 34.879737854 --30.442840576 32.946811676 34.937095642 --29.481933594 32.968864441 34.965763092 --28.495172501 32.977619171 34.976860046 --27.498952866 32.980335236 34.980201721 --26.499826431 32.980972290 34.980957031 --25.499977112 32.981086731 34.981086731 --24.499998093 32.981101990 34.981098175 --23.500000000 32.981101990 34.981098175 --22.500000000 32.981101990 34.981098175 --21.500000000 32.981101990 34.981098175 --20.500000000 32.981101990 34.981098175 --19.500000000 32.981101990 34.981098175 --18.500000000 32.981101990 34.981098175 --17.500000000 32.981101990 34.981098175 --16.500000000 32.981101990 34.981098175 --15.500000000 32.981101990 34.981098175 --14.500000000 32.981101990 34.981098175 --13.500000000 32.981101990 34.981098175 --12.500000000 32.981101990 34.981098175 --11.500000000 32.981101990 34.981098175 --10.500000000 32.981101990 34.981098175 --9.500000000 32.981101990 34.981098175 --8.500000000 32.981101990 34.981098175 --7.500000000 32.981101990 34.981098175 --6.500000000 32.981101990 34.981098175 --5.500000000 32.981101990 34.981098175 --4.500000000 32.981101990 34.981098175 --3.500000000 32.981101990 34.981098175 --2.500000000 32.981101990 34.981098175 --1.500000000 32.981101990 34.981098175 --0.500000000 32.981101990 34.981098175 -0.500000000 32.981101990 34.981098175 -1.500000000 32.981101990 34.981098175 -2.500000000 32.981101990 34.981098175 -3.500000000 32.981101990 34.981098175 -4.500000000 32.981101990 34.981098175 -5.500000000 32.981101990 34.981098175 -6.500000000 32.981101990 34.981098175 -7.500000000 32.981101990 34.981098175 -8.500000000 32.981101990 34.981098175 -9.500000000 32.981101990 34.981098175 -10.500000000 32.981101990 34.981098175 -11.500000000 32.981101990 34.981098175 -12.500000000 32.981101990 34.981098175 -13.500000000 32.981101990 34.981098175 -14.500000000 32.981101990 34.981098175 -15.500000000 32.981101990 34.981098175 -16.500000000 32.981101990 34.981098175 -17.500000000 32.981101990 34.981098175 -18.500000000 32.981101990 34.981098175 -19.500000000 32.981101990 34.981098175 -20.500000000 32.981101990 34.981098175 -21.500000000 32.981101990 34.981098175 -22.500000000 32.981101990 34.981098175 -23.499998093 32.981101990 34.981098175 -24.499977112 32.981086731 34.981086731 -25.499826431 32.980972290 34.980957031 -26.498952866 32.980327606 34.980201721 -27.495172501 32.977611542 34.976860046 -28.481937408 32.968864441 34.965763092 -29.442840576 32.946811676 34.937091827 -30.336603165 32.903381348 34.879737854 -31.035345078 32.840930939 34.797355652 -POLYGONS 62064 248256 -3 0 1 66 -3 67 66 1 -3 0 4818 1 -3 4819 1 4818 -3 0 66 4888 -3 0 4888 4818 -3 1 2 67 -3 68 67 2 -3 1 4819 4820 -3 1 4820 2 -3 2 3 69 -3 2 69 68 -3 2 4820 3 -3 4821 3 4820 -3 3 4 69 -3 70 69 4 -3 3 4821 4 -3 4822 4 4821 -3 4 5 71 -3 4 71 70 -3 4 4822 4823 -3 4 4823 5 -3 5 6 71 -3 72 71 6 -3 5 4823 4824 -3 5 4824 6 -3 6 7 72 -3 73 72 7 -3 6 4824 4825 -3 6 4825 7 -3 7 8 74 -3 7 74 73 -3 7 4825 8 -3 4826 8 4825 -3 8 9 74 -3 75 74 9 -3 8 4826 9 -3 4827 9 4826 -3 9 10 75 -3 76 75 10 -3 9 4827 10 -3 4828 10 4827 -3 10 11 77 -3 10 77 76 -3 10 4828 11 -3 4829 11 4828 -3 11 12 78 -3 11 78 77 -3 11 4829 4830 -3 11 4830 12 -3 12 13 78 -3 79 78 13 -3 12 4830 4831 -3 12 4831 13 -3 13 14 79 -3 80 79 14 -3 13 4831 4832 -3 13 4832 14 -3 14 15 80 -3 81 80 15 -3 14 4832 4833 -3 14 4833 15 -3 15 16 82 -3 15 82 81 -3 15 4833 4834 -3 15 4834 16 -3 16 17 83 -3 16 83 82 -3 16 4834 17 -3 4835 17 4834 -3 17 18 83 -3 84 83 18 -3 17 4835 18 -3 4836 18 4835 -3 18 19 84 -3 85 84 19 -3 18 4836 19 -3 4837 19 4836 -3 19 20 85 -3 86 85 20 -3 19 4837 20 -3 4838 20 4837 -3 20 21 87 -3 20 87 86 -3 20 4838 21 -3 4839 21 4838 -3 21 22 88 -3 21 88 87 -3 21 4839 22 -3 4840 22 4839 -3 22 23 89 -3 22 89 88 -3 22 4840 4841 -3 22 4841 23 -3 23 24 89 -3 90 89 24 -3 23 4841 4842 -3 23 4842 24 -3 24 25 90 -3 91 90 25 -3 24 4842 4843 -3 24 4843 25 -3 25 26 91 -3 92 91 26 -3 25 4843 4844 -3 25 4844 26 -3 26 27 93 -3 26 93 92 -3 26 4844 4845 -3 26 4845 27 -3 27 28 94 -3 27 94 93 -3 27 4845 4846 -3 27 4846 28 -3 28 29 95 -3 28 95 94 -3 28 4846 29 -3 4847 29 4846 -3 29 30 95 -3 96 95 30 -3 29 4847 30 -3 4848 30 4847 -3 30 31 96 -3 97 96 31 -3 30 4848 31 -3 4849 31 4848 -3 31 32 97 -3 98 97 32 -3 31 4849 32 -3 4850 32 4849 -3 32 33 99 -3 32 99 98 -3 32 4850 33 -3 4851 33 4850 -3 33 34 100 -3 33 100 99 -3 33 4851 34 -3 4852 34 4851 -3 34 35 101 -3 34 101 100 -3 34 4852 35 -3 4853 35 4852 -3 35 36 102 -3 35 102 101 -3 35 4853 36 -3 4854 36 4853 -3 36 37 102 -3 103 102 37 -3 36 4854 4855 -3 36 4855 37 -3 37 38 103 -3 104 103 38 -3 37 4855 4856 -3 37 4856 38 -3 38 39 104 -3 105 104 39 -3 38 4856 4857 -3 38 4857 39 -3 39 40 105 -3 106 105 40 -3 39 4857 4858 -3 39 4858 40 -3 40 41 107 -3 40 107 106 -3 40 4858 4859 -3 40 4859 41 -3 41 42 108 -3 41 108 107 -3 41 4859 4860 -3 41 4860 42 -3 42 43 109 -3 42 109 108 -3 42 4860 4861 -3 42 4861 43 -3 43 44 110 -3 43 110 109 -3 43 4861 4862 -3 43 4862 44 -3 44 45 110 -3 111 110 45 -3 44 4862 45 -3 4863 45 4862 -3 45 46 111 -3 112 111 46 -3 45 4863 46 -3 4864 46 4863 -3 46 47 112 -3 113 112 47 -3 46 4864 47 -3 4865 47 4864 -3 47 48 113 -3 114 113 48 -3 47 4865 48 -3 4866 48 4865 -3 48 49 115 -3 48 115 114 -3 48 4866 49 -3 4867 49 4866 -3 49 50 116 -3 49 116 115 -3 49 4867 50 -3 4868 50 4867 -3 50 51 117 -3 50 117 116 -3 50 4868 51 -3 4869 51 4868 -3 51 52 118 -3 51 118 117 -3 51 4869 52 -3 4870 52 4869 -3 52 53 118 -3 119 118 53 -3 52 4870 53 -3 4871 53 4870 -3 53 54 119 -3 120 119 54 -3 53 4871 4872 -3 53 4872 54 -3 54 55 120 -3 121 120 55 -3 54 4872 4873 -3 54 4873 55 -3 55 56 121 -3 122 121 56 -3 55 4873 4874 -3 55 4874 56 -3 56 57 122 -3 123 122 57 -3 56 4874 4875 -3 56 4875 57 -3 57 58 124 -3 57 124 123 -3 57 4875 4876 -3 57 4876 58 -3 58 59 125 -3 58 125 124 -3 58 4876 4877 -3 58 4877 59 -3 59 60 126 -3 59 126 125 -3 59 4877 4878 -3 59 4878 60 -3 60 61 127 -3 60 127 126 -3 60 4878 4879 -3 60 4879 61 -3 61 62 128 -3 61 128 127 -3 61 4879 4880 -3 61 4880 62 -3 62 63 128 -3 129 128 63 -3 62 4880 63 -3 4881 63 4880 -3 63 64 129 -3 130 129 64 -3 63 4881 64 -3 4882 64 4881 -3 64 4882 130 -3 4889 130 4882 -3 65 66 133 -3 134 133 66 -3 65 4887 66 -3 4888 66 4887 -3 65 133 4895 -3 65 4895 4887 -3 66 67 135 -3 66 135 134 -3 67 68 136 -3 67 136 135 -3 68 69 137 -3 68 137 136 -3 69 70 138 -3 69 138 137 -3 70 71 139 -3 70 139 138 -3 71 72 140 -3 71 140 139 -3 72 73 140 -3 141 140 73 -3 73 74 141 -3 142 141 74 -3 74 75 142 -3 143 142 75 -3 75 76 143 -3 144 143 76 -3 76 77 144 -3 145 144 77 -3 77 78 146 -3 77 146 145 -3 78 79 147 -3 78 147 146 -3 79 80 148 -3 79 148 147 -3 80 81 149 -3 80 149 148 -3 81 82 150 -3 81 150 149 -3 82 83 150 -3 151 150 83 -3 83 84 151 -3 152 151 84 -3 84 85 152 -3 153 152 85 -3 85 86 153 -3 154 153 86 -3 86 87 154 -3 155 154 87 -3 87 88 155 -3 156 155 88 -3 88 89 157 -3 88 157 156 -3 89 90 158 -3 89 158 157 -3 90 91 159 -3 90 159 158 -3 91 92 160 -3 91 160 159 -3 92 93 161 -3 92 161 160 -3 93 94 162 -3 93 162 161 -3 94 95 162 -3 163 162 95 -3 95 96 163 -3 164 163 96 -3 96 97 164 -3 165 164 97 -3 97 98 165 -3 166 165 98 -3 98 99 166 -3 167 166 99 -3 99 100 167 -3 168 167 100 -3 100 101 169 -3 100 169 168 -3 101 102 170 -3 101 170 169 -3 102 103 171 -3 102 171 170 -3 103 104 172 -3 103 172 171 -3 104 105 173 -3 104 173 172 -3 105 106 174 -3 105 174 173 -3 106 107 174 -3 175 174 107 -3 107 108 175 -3 176 175 108 -3 108 109 176 -3 177 176 109 -3 109 110 177 -3 178 177 110 -3 110 111 178 -3 179 178 111 -3 111 112 179 -3 180 179 112 -3 112 113 180 -3 181 180 113 -3 113 114 182 -3 113 182 181 -3 114 115 183 -3 114 183 182 -3 115 116 184 -3 115 184 183 -3 116 117 185 -3 116 185 184 -3 117 118 186 -3 117 186 185 -3 118 119 187 -3 118 187 186 -3 119 120 187 -3 188 187 120 -3 120 121 188 -3 189 188 121 -3 121 122 189 -3 190 189 122 -3 122 123 190 -3 191 190 123 -3 123 124 191 -3 192 191 124 -3 124 125 192 -3 193 192 125 -3 125 126 193 -3 194 193 126 -3 126 127 195 -3 126 195 194 -3 127 128 196 -3 127 196 195 -3 128 129 197 -3 128 197 196 -3 129 130 198 -3 129 198 197 -3 130 131 199 -3 130 199 198 -3 130 4889 4890 -3 130 4890 131 -3 131 4890 4896 -3 131 4896 199 -3 132 133 202 -3 132 202 201 -3 132 4894 4895 -3 132 4895 133 -3 132 201 4894 -3 4900 4894 201 -3 133 134 202 -3 203 202 134 -3 134 135 203 -3 204 203 135 -3 135 136 204 -3 205 204 136 -3 136 137 205 -3 206 205 137 -3 137 138 206 -3 207 206 138 -3 138 139 207 -3 208 207 139 -3 139 140 208 -3 209 208 140 -3 140 141 210 -3 140 210 209 -3 141 142 211 -3 141 211 210 -3 142 143 212 -3 142 212 211 -3 143 144 213 -3 143 213 212 -3 144 145 214 -3 144 214 213 -3 145 146 215 -3 145 215 214 -3 146 147 216 -3 146 216 215 -3 147 148 217 -3 147 217 216 -3 148 149 217 -3 218 217 149 -3 149 150 218 -3 219 218 150 -3 150 151 219 -3 220 219 151 -3 151 152 220 -3 221 220 152 -3 152 153 221 -3 222 221 153 -3 153 154 222 -3 223 222 154 -3 154 155 223 -3 224 223 155 -3 155 156 225 -3 155 225 224 -3 156 157 226 -3 156 226 225 -3 157 158 227 -3 157 227 226 -3 158 159 228 -3 158 228 227 -3 159 160 229 -3 159 229 228 -3 160 161 230 -3 160 230 229 -3 161 162 231 -3 161 231 230 -3 162 163 232 -3 162 232 231 -3 163 164 232 -3 233 232 164 -3 164 165 233 -3 234 233 165 -3 165 166 234 -3 235 234 166 -3 166 167 235 -3 236 235 167 -3 167 168 236 -3 237 236 168 -3 168 169 237 -3 238 237 169 -3 169 170 238 -3 239 238 170 -3 170 171 239 -3 240 239 171 -3 171 172 241 -3 171 241 240 -3 172 173 242 -3 172 242 241 -3 173 174 243 -3 173 243 242 -3 174 175 244 -3 174 244 243 -3 175 176 245 -3 175 245 244 -3 176 177 246 -3 176 246 245 -3 177 178 247 -3 177 247 246 -3 178 179 248 -3 178 248 247 -3 179 180 248 -3 249 248 180 -3 180 181 249 -3 250 249 181 -3 181 182 250 -3 251 250 182 -3 182 183 251 -3 252 251 183 -3 183 184 252 -3 253 252 184 -3 184 185 253 -3 254 253 185 -3 185 186 254 -3 255 254 186 -3 186 187 255 -3 256 255 187 -3 187 188 257 -3 187 257 256 -3 188 189 258 -3 188 258 257 -3 189 190 259 -3 189 259 258 -3 190 191 260 -3 190 260 259 -3 191 192 261 -3 191 261 260 -3 192 193 262 -3 192 262 261 -3 193 194 263 -3 193 263 262 -3 194 195 264 -3 194 264 263 -3 195 196 265 -3 195 265 264 -3 196 197 265 -3 266 265 197 -3 197 198 266 -3 267 266 198 -3 198 199 267 -3 268 267 199 -3 199 200 268 -3 269 268 200 -3 199 4896 200 -3 4897 200 4896 -3 200 4897 269 -3 4901 269 4897 -3 201 202 270 -3 271 270 202 -3 201 270 4900 -3 4904 4900 270 -3 202 203 271 -3 272 271 203 -3 203 204 272 -3 273 272 204 -3 204 205 273 -3 274 273 205 -3 205 206 275 -3 205 275 274 -3 206 207 276 -3 206 276 275 -3 207 208 277 -3 207 277 276 -3 208 209 278 -3 208 278 277 -3 209 210 279 -3 209 279 278 -3 210 211 280 -3 210 280 279 -3 211 212 281 -3 211 281 280 -3 212 213 282 -3 212 282 281 -3 213 214 283 -3 213 283 282 -3 214 215 283 -3 284 283 215 -3 215 216 284 -3 285 284 216 -3 216 217 285 -3 286 285 217 -3 217 218 286 -3 287 286 218 -3 218 219 287 -3 288 287 219 -3 219 220 288 -3 289 288 220 -3 220 221 289 -3 290 289 221 -3 221 222 290 -3 291 290 222 -3 222 223 291 -3 292 291 223 -3 223 224 293 -3 223 293 292 -3 224 225 294 -3 224 294 293 -3 225 226 295 -3 225 295 294 -3 226 227 296 -3 226 296 295 -3 227 228 297 -3 227 297 296 -3 228 229 298 -3 228 298 297 -3 229 230 299 -3 229 299 298 -3 230 231 300 -3 230 300 299 -3 231 232 301 -3 231 301 300 -3 232 233 301 -3 302 301 233 -3 233 234 302 -3 303 302 234 -3 234 235 303 -3 304 303 235 -3 235 236 304 -3 305 304 236 -3 236 237 305 -3 306 305 237 -3 237 238 306 -3 307 306 238 -3 238 239 307 -3 308 307 239 -3 239 240 308 -3 309 308 240 -3 240 241 309 -3 310 309 241 -3 241 242 311 -3 241 311 310 -3 242 243 312 -3 242 312 311 -3 243 244 313 -3 243 313 312 -3 244 245 314 -3 244 314 313 -3 245 246 315 -3 245 315 314 -3 246 247 316 -3 246 316 315 -3 247 248 317 -3 247 317 316 -3 248 249 318 -3 248 318 317 -3 249 250 319 -3 249 319 318 -3 250 251 320 -3 250 320 319 -3 251 252 320 -3 321 320 252 -3 252 253 321 -3 322 321 253 -3 253 254 322 -3 323 322 254 -3 254 255 323 -3 324 323 255 -3 255 256 324 -3 325 324 256 -3 256 257 325 -3 326 325 257 -3 257 258 326 -3 327 326 258 -3 258 259 327 -3 328 327 259 -3 259 260 328 -3 329 328 260 -3 260 261 329 -3 330 329 261 -3 261 262 331 -3 261 331 330 -3 262 263 332 -3 262 332 331 -3 263 264 333 -3 263 333 332 -3 264 265 334 -3 264 334 333 -3 265 266 335 -3 265 335 334 -3 266 267 336 -3 266 336 335 -3 267 268 337 -3 267 337 336 -3 268 269 338 -3 268 338 337 -3 269 4901 338 -3 4905 338 4901 -3 270 271 340 -3 270 340 339 -3 270 339 4908 -3 270 4908 4904 -3 271 272 340 -3 341 340 272 -3 272 273 341 -3 342 341 273 -3 273 274 342 -3 343 342 274 -3 274 275 343 -3 344 343 275 -3 275 276 344 -3 345 344 276 -3 276 277 345 -3 346 345 277 -3 277 278 346 -3 347 346 278 -3 278 279 347 -3 348 347 279 -3 279 280 348 -3 349 348 280 -3 280 281 349 -3 350 349 281 -3 281 282 351 -3 281 351 350 -3 282 283 352 -3 282 352 351 -3 283 284 353 -3 283 353 352 -3 284 285 354 -3 284 354 353 -3 285 286 355 -3 285 355 354 -3 286 287 356 -3 286 356 355 -3 287 288 357 -3 287 357 356 -3 288 289 358 -3 288 358 357 -3 289 290 359 -3 289 359 358 -3 290 291 360 -3 290 360 359 -3 291 292 361 -3 291 361 360 -3 292 293 361 -3 362 361 293 -3 293 294 362 -3 363 362 294 -3 294 295 363 -3 364 363 295 -3 295 296 364 -3 365 364 296 -3 296 297 365 -3 366 365 297 -3 297 298 366 -3 367 366 298 -3 298 299 367 -3 368 367 299 -3 299 300 368 -3 369 368 300 -3 300 301 369 -3 370 369 301 -3 301 302 370 -3 371 370 302 -3 302 303 372 -3 302 372 371 -3 303 304 373 -3 303 373 372 -3 304 305 374 -3 304 374 373 -3 305 306 375 -3 305 375 374 -3 306 307 376 -3 306 376 375 -3 307 308 377 -3 307 377 376 -3 308 309 378 -3 308 378 377 -3 309 310 379 -3 309 379 378 -3 310 311 380 -3 310 380 379 -3 311 312 381 -3 311 381 380 -3 312 313 382 -3 312 382 381 -3 313 314 382 -3 383 382 314 -3 314 315 383 -3 384 383 315 -3 315 316 384 -3 385 384 316 -3 316 317 385 -3 386 385 317 -3 317 318 386 -3 387 386 318 -3 318 319 387 -3 388 387 319 -3 319 320 388 -3 389 388 320 -3 320 321 389 -3 390 389 321 -3 321 322 390 -3 391 390 322 -3 322 323 391 -3 392 391 323 -3 323 324 392 -3 393 392 324 -3 324 325 394 -3 324 394 393 -3 325 326 395 -3 325 395 394 -3 326 327 396 -3 326 396 395 -3 327 328 397 -3 327 397 396 -3 328 329 398 -3 328 398 397 -3 329 330 399 -3 329 399 398 -3 330 331 400 -3 330 400 399 -3 331 332 401 -3 331 401 400 -3 332 333 402 -3 332 402 401 -3 333 334 403 -3 333 403 402 -3 334 335 404 -3 334 404 403 -3 335 336 404 -3 405 404 336 -3 336 337 405 -3 406 405 337 -3 337 338 406 -3 407 406 338 -3 338 4905 407 -3 4909 407 4905 -3 339 340 408 -3 409 408 340 -3 339 408 4912 -3 339 4912 4908 -3 340 341 409 -3 410 409 341 -3 341 342 410 -3 411 410 342 -3 342 343 411 -3 412 411 343 -3 343 344 412 -3 413 412 344 -3 344 345 413 -3 414 413 345 -3 345 346 414 -3 415 414 346 -3 346 347 415 -3 416 415 347 -3 347 348 417 -3 347 417 416 -3 348 349 418 -3 348 418 417 -3 349 350 419 -3 349 419 418 -3 350 351 420 -3 350 420 419 -3 351 352 421 -3 351 421 420 -3 352 353 422 -3 352 422 421 -3 353 354 423 -3 353 423 422 -3 354 355 424 -3 354 424 423 -3 355 356 425 -3 355 425 424 -3 356 357 426 -3 356 426 425 -3 357 358 427 -3 357 427 426 -3 358 359 427 -3 428 427 359 -3 359 360 428 -3 429 428 360 -3 360 361 429 -3 430 429 361 -3 361 362 430 -3 431 430 362 -3 362 363 431 -3 432 431 363 -3 363 364 432 -3 433 432 364 -3 364 365 433 -3 434 433 365 -3 365 366 434 -3 435 434 366 -3 366 367 435 -3 436 435 367 -3 367 368 436 -3 437 436 368 -3 368 369 437 -3 438 437 369 -3 369 370 438 -3 439 438 370 -3 370 371 440 -3 370 440 439 -3 371 372 441 -3 371 441 440 -3 372 373 442 -3 372 442 441 -3 373 374 443 -3 373 443 442 -3 374 375 444 -3 374 444 443 -3 375 376 445 -3 375 445 444 -3 376 377 446 -3 376 446 445 -3 377 378 447 -3 377 447 446 -3 378 379 448 -3 378 448 447 -3 379 380 449 -3 379 449 448 -3 380 381 450 -3 380 450 449 -3 381 382 451 -3 381 451 450 -3 382 383 451 -3 452 451 383 -3 383 384 452 -3 453 452 384 -3 384 385 453 -3 454 453 385 -3 385 386 454 -3 455 454 386 -3 386 387 455 -3 456 455 387 -3 387 388 456 -3 457 456 388 -3 388 389 457 -3 458 457 389 -3 389 390 458 -3 459 458 390 -3 390 391 459 -3 460 459 391 -3 391 392 460 -3 461 460 392 -3 392 393 461 -3 462 461 393 -3 393 394 462 -3 463 462 394 -3 394 395 464 -3 394 464 463 -3 395 396 465 -3 395 465 464 -3 396 397 466 -3 396 466 465 -3 397 398 467 -3 397 467 466 -3 398 399 468 -3 398 468 467 -3 399 400 469 -3 399 469 468 -3 400 401 470 -3 400 470 469 -3 401 402 471 -3 401 471 470 -3 402 403 472 -3 402 472 471 -3 403 404 473 -3 403 473 472 -3 404 405 474 -3 404 474 473 -3 405 406 475 -3 405 475 474 -3 406 407 475 -3 476 475 407 -3 407 4909 4913 -3 407 4913 476 -3 408 409 477 -3 478 477 409 -3 408 477 4916 -3 408 4916 4912 -3 409 410 478 -3 479 478 410 -3 410 411 479 -3 480 479 411 -3 411 412 480 -3 481 480 412 -3 412 413 481 -3 482 481 413 -3 413 414 482 -3 483 482 414 -3 414 415 483 -3 484 483 415 -3 415 416 484 -3 485 484 416 -3 416 417 485 -3 486 485 417 -3 417 418 486 -3 487 486 418 -3 418 419 487 -3 488 487 419 -3 419 420 489 -3 419 489 488 -3 420 421 490 -3 420 490 489 -3 421 422 491 -3 421 491 490 -3 422 423 492 -3 422 492 491 -3 423 424 493 -3 423 493 492 -3 424 425 494 -3 424 494 493 -3 425 426 495 -3 425 495 494 -3 426 427 496 -3 426 496 495 -3 427 428 497 -3 427 497 496 -3 428 429 498 -3 428 498 497 -3 429 430 499 -3 429 499 498 -3 430 431 500 -3 430 500 499 -3 431 432 500 -3 501 500 432 -3 432 433 501 -3 502 501 433 -3 433 434 502 -3 503 502 434 -3 434 435 503 -3 504 503 435 -3 435 436 504 -3 505 504 436 -3 436 437 505 -3 506 505 437 -3 437 438 506 -3 507 506 438 -3 438 439 507 -3 508 507 439 -3 439 440 508 -3 509 508 440 -3 440 441 509 -3 510 509 441 -3 441 442 510 -3 511 510 442 -3 442 443 511 -3 512 511 443 -3 443 444 512 -3 513 512 444 -3 444 445 514 -3 444 514 513 -3 445 446 515 -3 445 515 514 -3 446 447 516 -3 446 516 515 -3 447 448 517 -3 447 517 516 -3 448 449 518 -3 448 518 517 -3 449 450 519 -3 449 519 518 -3 450 451 520 -3 450 520 519 -3 451 452 521 -3 451 521 520 -3 452 453 522 -3 452 522 521 -3 453 454 523 -3 453 523 522 -3 454 455 524 -3 454 524 523 -3 455 456 525 -3 455 525 524 -3 456 457 526 -3 456 526 525 -3 457 458 526 -3 527 526 458 -3 458 459 527 -3 528 527 459 -3 459 460 528 -3 529 528 460 -3 460 461 529 -3 530 529 461 -3 461 462 530 -3 531 530 462 -3 462 463 531 -3 532 531 463 -3 463 464 532 -3 533 532 464 -3 464 465 533 -3 534 533 465 -3 465 466 534 -3 535 534 466 -3 466 467 535 -3 536 535 467 -3 467 468 536 -3 537 536 468 -3 468 469 537 -3 538 537 469 -3 469 470 538 -3 539 538 470 -3 470 471 539 -3 540 539 471 -3 471 472 541 -3 471 541 540 -3 472 473 542 -3 472 542 541 -3 473 474 543 -3 473 543 542 -3 474 475 544 -3 474 544 543 -3 475 476 545 -3 475 545 544 -3 476 4913 4917 -3 476 4917 545 -3 477 478 547 -3 477 547 546 -3 477 546 4916 -3 4920 4916 546 -3 478 479 548 -3 478 548 547 -3 479 480 549 -3 479 549 548 -3 480 481 550 -3 480 550 549 -3 481 482 551 -3 481 551 550 -3 482 483 552 -3 482 552 551 -3 483 484 553 -3 483 553 552 -3 484 485 553 -3 554 553 485 -3 485 486 554 -3 555 554 486 -3 486 487 555 -3 556 555 487 -3 487 488 556 -3 557 556 488 -3 488 489 557 -3 558 557 489 -3 489 490 558 -3 559 558 490 -3 490 491 559 -3 560 559 491 -3 491 492 560 -3 561 560 492 -3 492 493 561 -3 562 561 493 -3 493 494 562 -3 563 562 494 -3 494 495 563 -3 564 563 495 -3 495 496 564 -3 565 564 496 -3 496 497 565 -3 566 565 497 -3 497 498 566 -3 567 566 498 -3 498 499 568 -3 498 568 567 -3 499 500 569 -3 499 569 568 -3 500 501 570 -3 500 570 569 -3 501 502 571 -3 501 571 570 -3 502 503 572 -3 502 572 571 -3 503 504 573 -3 503 573 572 -3 504 505 574 -3 504 574 573 -3 505 506 575 -3 505 575 574 -3 506 507 576 -3 506 576 575 -3 507 508 577 -3 507 577 576 -3 508 509 578 -3 508 578 577 -3 509 510 579 -3 509 579 578 -3 510 511 580 -3 510 580 579 -3 511 512 581 -3 511 581 580 -3 512 513 581 -3 582 581 513 -3 513 514 582 -3 583 582 514 -3 514 515 583 -3 584 583 515 -3 515 516 584 -3 585 584 516 -3 516 517 585 -3 586 585 517 -3 517 518 586 -3 587 586 518 -3 518 519 587 -3 588 587 519 -3 519 520 588 -3 589 588 520 -3 520 521 589 -3 590 589 521 -3 521 522 590 -3 591 590 522 -3 522 523 591 -3 592 591 523 -3 523 524 592 -3 593 592 524 -3 524 525 593 -3 594 593 525 -3 525 526 594 -3 595 594 526 -3 526 527 596 -3 526 596 595 -3 527 528 597 -3 527 597 596 -3 528 529 598 -3 528 598 597 -3 529 530 599 -3 529 599 598 -3 530 531 600 -3 530 600 599 -3 531 532 601 -3 531 601 600 -3 532 533 602 -3 532 602 601 -3 533 534 603 -3 533 603 602 -3 534 535 604 -3 534 604 603 -3 535 536 605 -3 535 605 604 -3 536 537 606 -3 536 606 605 -3 537 538 607 -3 537 607 606 -3 538 539 608 -3 538 608 607 -3 539 540 609 -3 539 609 608 -3 540 541 609 -3 610 609 541 -3 541 542 610 -3 611 610 542 -3 542 543 611 -3 612 611 543 -3 543 544 612 -3 613 612 544 -3 544 545 613 -3 614 613 545 -3 545 4917 4921 -3 545 4921 614 -3 546 547 615 -3 616 615 547 -3 546 615 4924 -3 546 4924 4920 -3 547 548 616 -3 617 616 548 -3 548 549 617 -3 618 617 549 -3 549 550 618 -3 619 618 550 -3 550 551 619 -3 620 619 551 -3 551 552 620 -3 621 620 552 -3 552 553 621 -3 622 621 553 -3 553 554 622 -3 623 622 554 -3 554 555 624 -3 554 624 623 -3 555 556 625 -3 555 625 624 -3 556 557 626 -3 556 626 625 -3 557 558 627 -3 557 627 626 -3 558 559 628 -3 558 628 627 -3 559 560 629 -3 559 629 628 -3 560 561 630 -3 560 630 629 -3 561 562 631 -3 561 631 630 -3 562 563 632 -3 562 632 631 -3 563 564 633 -3 563 633 632 -3 564 565 634 -3 564 634 633 -3 565 566 635 -3 565 635 634 -3 566 567 636 -3 566 636 635 -3 567 568 637 -3 567 637 636 -3 568 569 638 -3 568 638 637 -3 569 570 638 -3 639 638 570 -3 570 571 639 -3 640 639 571 -3 571 572 640 -3 641 640 572 -3 572 573 641 -3 642 641 573 -3 573 574 642 -3 643 642 574 -3 574 575 643 -3 644 643 575 -3 575 576 644 -3 645 644 576 -3 576 577 645 -3 646 645 577 -3 577 578 646 -3 647 646 578 -3 578 579 647 -3 648 647 579 -3 579 580 648 -3 649 648 580 -3 580 581 649 -3 650 649 581 -3 581 582 650 -3 651 650 582 -3 582 583 651 -3 652 651 583 -3 583 584 652 -3 653 652 584 -3 584 585 654 -3 584 654 653 -3 585 586 655 -3 585 655 654 -3 586 587 656 -3 586 656 655 -3 587 588 657 -3 587 657 656 -3 588 589 658 -3 588 658 657 -3 589 590 659 -3 589 659 658 -3 590 591 660 -3 590 660 659 -3 591 592 661 -3 591 661 660 -3 592 593 662 -3 592 662 661 -3 593 594 663 -3 593 663 662 -3 594 595 664 -3 594 664 663 -3 595 596 665 -3 595 665 664 -3 596 597 666 -3 596 666 665 -3 597 598 667 -3 597 667 666 -3 598 599 668 -3 598 668 667 -3 599 600 668 -3 669 668 600 -3 600 601 669 -3 670 669 601 -3 601 602 670 -3 671 670 602 -3 602 603 671 -3 672 671 603 -3 603 604 672 -3 673 672 604 -3 604 605 673 -3 674 673 605 -3 605 606 674 -3 675 674 606 -3 606 607 675 -3 676 675 607 -3 607 608 676 -3 677 676 608 -3 608 609 677 -3 678 677 609 -3 609 610 678 -3 679 678 610 -3 610 611 679 -3 680 679 611 -3 611 612 680 -3 681 680 612 -3 612 613 681 -3 682 681 613 -3 613 614 682 -3 683 682 614 -3 614 4921 4925 -3 614 4925 683 -3 615 616 685 -3 615 685 684 -3 615 684 4928 -3 615 4928 4924 -3 616 617 686 -3 616 686 685 -3 617 618 687 -3 617 687 686 -3 618 619 688 -3 618 688 687 -3 619 620 689 -3 619 689 688 -3 620 621 690 -3 620 690 689 -3 621 622 691 -3 621 691 690 -3 622 623 692 -3 622 692 691 -3 623 624 693 -3 623 693 692 -3 624 625 694 -3 624 694 693 -3 625 626 695 -3 625 695 694 -3 626 627 696 -3 626 696 695 -3 627 628 697 -3 627 697 696 -3 628 629 698 -3 628 698 697 -3 629 630 698 -3 699 698 630 -3 630 631 699 -3 700 699 631 -3 631 632 700 -3 701 700 632 -3 632 633 701 -3 702 701 633 -3 633 634 702 -3 703 702 634 -3 634 635 703 -3 704 703 635 -3 635 636 704 -3 705 704 636 -3 636 637 705 -3 706 705 637 -3 637 638 706 -3 707 706 638 -3 638 639 707 -3 708 707 639 -3 639 640 708 -3 709 708 640 -3 640 641 709 -3 710 709 641 -3 641 642 710 -3 711 710 642 -3 642 643 711 -3 712 711 643 -3 643 644 712 -3 713 712 644 -3 644 645 713 -3 714 713 645 -3 645 646 715 -3 645 715 714 -3 646 647 716 -3 646 716 715 -3 647 648 717 -3 647 717 716 -3 648 649 718 -3 648 718 717 -3 649 650 719 -3 649 719 718 -3 650 651 720 -3 650 720 719 -3 651 652 721 -3 651 721 720 -3 652 653 722 -3 652 722 721 -3 653 654 723 -3 653 723 722 -3 654 655 724 -3 654 724 723 -3 655 656 725 -3 655 725 724 -3 656 657 726 -3 656 726 725 -3 657 658 727 -3 657 727 726 -3 658 659 728 -3 658 728 727 -3 659 660 729 -3 659 729 728 -3 660 661 729 -3 730 729 661 -3 661 662 730 -3 731 730 662 -3 662 663 731 -3 732 731 663 -3 663 664 732 -3 733 732 664 -3 664 665 733 -3 734 733 665 -3 665 666 734 -3 735 734 666 -3 666 667 735 -3 736 735 667 -3 667 668 736 -3 737 736 668 -3 668 669 737 -3 738 737 669 -3 669 670 738 -3 739 738 670 -3 670 671 739 -3 740 739 671 -3 671 672 740 -3 741 740 672 -3 672 673 741 -3 742 741 673 -3 673 674 742 -3 743 742 674 -3 674 675 743 -3 744 743 675 -3 675 676 744 -3 745 744 676 -3 676 677 746 -3 676 746 745 -3 677 678 747 -3 677 747 746 -3 678 679 748 -3 678 748 747 -3 679 680 749 -3 679 749 748 -3 680 681 750 -3 680 750 749 -3 681 682 751 -3 681 751 750 -3 682 683 752 -3 682 752 751 -3 683 4925 4929 -3 683 4929 752 -3 684 685 754 -3 684 754 753 -3 684 753 4932 -3 684 4932 4928 -3 685 686 755 -3 685 755 754 -3 686 687 756 -3 686 756 755 -3 687 688 757 -3 687 757 756 -3 688 689 758 -3 688 758 757 -3 689 690 759 -3 689 759 758 -3 690 691 760 -3 690 760 759 -3 691 692 761 -3 691 761 760 -3 692 693 761 -3 762 761 693 -3 693 694 762 -3 763 762 694 -3 694 695 763 -3 764 763 695 -3 695 696 764 -3 765 764 696 -3 696 697 765 -3 766 765 697 -3 697 698 766 -3 767 766 698 -3 698 699 767 -3 768 767 699 -3 699 700 768 -3 769 768 700 -3 700 701 769 -3 770 769 701 -3 701 702 770 -3 771 770 702 -3 702 703 771 -3 772 771 703 -3 703 704 772 -3 773 772 704 -3 704 705 773 -3 774 773 705 -3 705 706 774 -3 775 774 706 -3 706 707 775 -3 776 775 707 -3 707 708 776 -3 777 776 708 -3 708 709 777 -3 778 777 709 -3 709 710 779 -3 709 779 778 -3 710 711 780 -3 710 780 779 -3 711 712 781 -3 711 781 780 -3 712 713 782 -3 712 782 781 -3 713 714 783 -3 713 783 782 -3 714 715 784 -3 714 784 783 -3 715 716 785 -3 715 785 784 -3 716 717 786 -3 716 786 785 -3 717 718 787 -3 717 787 786 -3 718 719 788 -3 718 788 787 -3 719 720 789 -3 719 789 788 -3 720 721 790 -3 720 790 789 -3 721 722 791 -3 721 791 790 -3 722 723 792 -3 722 792 791 -3 723 724 793 -3 723 793 792 -3 724 725 794 -3 724 794 793 -3 725 726 794 -3 795 794 726 -3 726 727 795 -3 796 795 727 -3 727 728 796 -3 797 796 728 -3 728 729 797 -3 798 797 729 -3 729 730 798 -3 799 798 730 -3 730 731 799 -3 800 799 731 -3 731 732 800 -3 801 800 732 -3 732 733 801 -3 802 801 733 -3 733 734 802 -3 803 802 734 -3 734 735 803 -3 804 803 735 -3 735 736 804 -3 805 804 736 -3 736 737 805 -3 806 805 737 -3 737 738 806 -3 807 806 738 -3 738 739 807 -3 808 807 739 -3 739 740 808 -3 809 808 740 -3 740 741 809 -3 810 809 741 -3 741 742 810 -3 811 810 742 -3 742 743 812 -3 742 812 811 -3 743 744 813 -3 743 813 812 -3 744 745 814 -3 744 814 813 -3 745 746 815 -3 745 815 814 -3 746 747 816 -3 746 816 815 -3 747 748 817 -3 747 817 816 -3 748 749 818 -3 748 818 817 -3 749 750 819 -3 749 819 818 -3 750 751 820 -3 750 820 819 -3 751 752 821 -3 751 821 820 -3 752 4929 4933 -3 752 4933 821 -3 753 754 823 -3 753 823 822 -3 753 822 4936 -3 753 4936 4932 -3 754 755 824 -3 754 824 823 -3 755 756 825 -3 755 825 824 -3 756 757 826 -3 756 826 825 -3 757 758 827 -3 757 827 826 -3 758 759 828 -3 758 828 827 -3 759 760 828 -3 829 828 760 -3 760 761 829 -3 830 829 761 -3 761 762 830 -3 831 830 762 -3 762 763 831 -3 832 831 763 -3 763 764 832 -3 833 832 764 -3 764 765 833 -3 834 833 765 -3 765 766 834 -3 835 834 766 -3 766 767 835 -3 836 835 767 -3 767 768 836 -3 837 836 768 -3 768 769 837 -3 838 837 769 -3 769 770 838 -3 839 838 770 -3 770 771 839 -3 840 839 771 -3 771 772 840 -3 841 840 772 -3 772 773 841 -3 842 841 773 -3 773 774 842 -3 843 842 774 -3 774 775 843 -3 844 843 775 -3 775 776 844 -3 845 844 776 -3 776 777 846 -3 776 846 845 -3 777 778 847 -3 777 847 846 -3 778 779 848 -3 778 848 847 -3 779 780 849 -3 779 849 848 -3 780 781 850 -3 780 850 849 -3 781 782 851 -3 781 851 850 -3 782 783 852 -3 782 852 851 -3 783 784 853 -3 783 853 852 -3 784 785 854 -3 784 854 853 -3 785 786 855 -3 785 855 854 -3 786 787 856 -3 786 856 855 -3 787 788 857 -3 787 857 856 -3 788 789 858 -3 788 858 857 -3 789 790 859 -3 789 859 858 -3 790 791 860 -3 790 860 859 -3 791 792 861 -3 791 861 860 -3 792 793 862 -3 792 862 861 -3 793 794 862 -3 863 862 794 -3 794 795 863 -3 864 863 795 -3 795 796 864 -3 865 864 796 -3 796 797 865 -3 866 865 797 -3 797 798 866 -3 867 866 798 -3 798 799 867 -3 868 867 799 -3 799 800 868 -3 869 868 800 -3 800 801 869 -3 870 869 801 -3 801 802 870 -3 871 870 802 -3 802 803 871 -3 872 871 803 -3 803 804 872 -3 873 872 804 -3 804 805 873 -3 874 873 805 -3 805 806 874 -3 875 874 806 -3 806 807 875 -3 876 875 807 -3 807 808 876 -3 877 876 808 -3 808 809 877 -3 878 877 809 -3 809 810 878 -3 879 878 810 -3 810 811 880 -3 810 880 879 -3 811 812 881 -3 811 881 880 -3 812 813 882 -3 812 882 881 -3 813 814 883 -3 813 883 882 -3 814 815 884 -3 814 884 883 -3 815 816 885 -3 815 885 884 -3 816 817 886 -3 816 886 885 -3 817 818 887 -3 817 887 886 -3 818 819 888 -3 818 888 887 -3 819 820 889 -3 819 889 888 -3 820 821 890 -3 820 890 889 -3 821 4933 4937 -3 821 4937 890 -3 822 823 892 -3 822 892 891 -3 822 891 4940 -3 822 4940 4936 -3 823 824 893 -3 823 893 892 -3 824 825 894 -3 824 894 893 -3 825 826 895 -3 825 895 894 -3 826 827 896 -3 826 896 895 -3 827 828 897 -3 827 897 896 -3 828 829 897 -3 898 897 829 -3 829 830 898 -3 899 898 830 -3 830 831 899 -3 900 899 831 -3 831 832 900 -3 901 900 832 -3 832 833 901 -3 902 901 833 -3 833 834 902 -3 903 902 834 -3 834 835 903 -3 904 903 835 -3 835 836 904 -3 905 904 836 -3 836 837 905 -3 906 905 837 -3 837 838 906 -3 907 906 838 -3 838 839 907 -3 908 907 839 -3 839 840 908 -3 909 908 840 -3 840 841 909 -3 910 909 841 -3 841 842 910 -3 911 910 842 -3 842 843 911 -3 912 911 843 -3 843 844 912 -3 913 912 844 -3 844 845 913 -3 914 913 845 -3 845 846 914 -3 915 914 846 -3 846 847 916 -3 846 916 915 -3 847 848 917 -3 847 917 916 -3 848 849 918 -3 848 918 917 -3 849 850 919 -3 849 919 918 -3 850 851 920 -3 850 920 919 -3 851 852 921 -3 851 921 920 -3 852 853 922 -3 852 922 921 -3 853 854 923 -3 853 923 922 -3 854 855 924 -3 854 924 923 -3 855 856 925 -3 855 925 924 -3 856 857 926 -3 856 926 925 -3 857 858 927 -3 857 927 926 -3 858 859 928 -3 858 928 927 -3 859 860 929 -3 859 929 928 -3 860 861 930 -3 860 930 929 -3 861 862 931 -3 861 931 930 -3 862 863 932 -3 862 932 931 -3 863 864 933 -3 863 933 932 -3 864 865 933 -3 934 933 865 -3 865 866 934 -3 935 934 866 -3 866 867 935 -3 936 935 867 -3 867 868 936 -3 937 936 868 -3 868 869 937 -3 938 937 869 -3 869 870 938 -3 939 938 870 -3 870 871 939 -3 940 939 871 -3 871 872 940 -3 941 940 872 -3 872 873 941 -3 942 941 873 -3 873 874 942 -3 943 942 874 -3 874 875 943 -3 944 943 875 -3 875 876 944 -3 945 944 876 -3 876 877 945 -3 946 945 877 -3 877 878 946 -3 947 946 878 -3 878 879 947 -3 948 947 879 -3 879 880 948 -3 949 948 880 -3 880 881 949 -3 950 949 881 -3 881 882 950 -3 951 950 882 -3 882 883 952 -3 882 952 951 -3 883 884 953 -3 883 953 952 -3 884 885 954 -3 884 954 953 -3 885 886 955 -3 885 955 954 -3 886 887 956 -3 886 956 955 -3 887 888 957 -3 887 957 956 -3 888 889 958 -3 888 958 957 -3 889 890 959 -3 889 959 958 -3 890 4937 4941 -3 890 4941 959 -3 891 892 961 -3 891 961 960 -3 891 960 4944 -3 891 4944 4940 -3 892 893 962 -3 892 962 961 -3 893 894 963 -3 893 963 962 -3 894 895 964 -3 894 964 963 -3 895 896 965 -3 895 965 964 -3 896 897 966 -3 896 966 965 -3 897 898 967 -3 897 967 966 -3 898 899 968 -3 898 968 967 -3 899 900 969 -3 899 969 968 -3 900 901 969 -3 970 969 901 -3 901 902 970 -3 971 970 902 -3 902 903 971 -3 972 971 903 -3 903 904 972 -3 973 972 904 -3 904 905 973 -3 974 973 905 -3 905 906 974 -3 975 974 906 -3 906 907 975 -3 976 975 907 -3 907 908 976 -3 977 976 908 -3 908 909 977 -3 978 977 909 -3 909 910 978 -3 979 978 910 -3 910 911 979 -3 980 979 911 -3 911 912 980 -3 981 980 912 -3 912 913 981 -3 982 981 913 -3 913 914 982 -3 983 982 914 -3 914 915 983 -3 984 983 915 -3 915 916 984 -3 985 984 916 -3 916 917 985 -3 986 985 917 -3 917 918 986 -3 987 986 918 -3 918 919 987 -3 988 987 919 -3 919 920 989 -3 919 989 988 -3 920 921 990 -3 920 990 989 -3 921 922 991 -3 921 991 990 -3 922 923 992 -3 922 992 991 -3 923 924 993 -3 923 993 992 -3 924 925 994 -3 924 994 993 -3 925 926 995 -3 925 995 994 -3 926 927 996 -3 926 996 995 -3 927 928 997 -3 927 997 996 -3 928 929 998 -3 928 998 997 -3 929 930 999 -3 929 999 998 -3 930 931 1000 -3 930 1000 999 -3 931 932 1001 -3 931 1001 1000 -3 932 933 1002 -3 932 1002 1001 -3 933 934 1003 -3 933 1003 1002 -3 934 935 1004 -3 934 1004 1003 -3 935 936 1005 -3 935 1005 1004 -3 936 937 1006 -3 936 1006 1005 -3 937 938 1007 -3 937 1007 1006 -3 938 939 1007 -3 1008 1007 939 -3 939 940 1008 -3 1009 1008 940 -3 940 941 1009 -3 1010 1009 941 -3 941 942 1010 -3 1011 1010 942 -3 942 943 1011 -3 1012 1011 943 -3 943 944 1012 -3 1013 1012 944 -3 944 945 1013 -3 1014 1013 945 -3 945 946 1014 -3 1015 1014 946 -3 946 947 1015 -3 1016 1015 947 -3 947 948 1016 -3 1017 1016 948 -3 948 949 1017 -3 1018 1017 949 -3 949 950 1018 -3 1019 1018 950 -3 950 951 1019 -3 1020 1019 951 -3 951 952 1020 -3 1021 1020 952 -3 952 953 1021 -3 1022 1021 953 -3 953 954 1022 -3 1023 1022 954 -3 954 955 1023 -3 1024 1023 955 -3 955 956 1024 -3 1025 1024 956 -3 956 957 1025 -3 1026 1025 957 -3 957 958 1027 -3 957 1027 1026 -3 958 959 1028 -3 958 1028 1027 -3 959 4941 1028 -3 4945 1028 4941 -3 960 961 1030 -3 960 1030 1029 -3 960 1029 4948 -3 960 4948 4944 -3 961 962 1031 -3 961 1031 1030 -3 962 963 1032 -3 962 1032 1031 -3 963 964 1033 -3 963 1033 1032 -3 964 965 1034 -3 964 1034 1033 -3 965 966 1035 -3 965 1035 1034 -3 966 967 1036 -3 966 1036 1035 -3 967 968 1037 -3 967 1037 1036 -3 968 969 1038 -3 968 1038 1037 -3 969 970 1039 -3 969 1039 1038 -3 970 971 1040 -3 970 1040 1039 -3 971 972 1041 -3 971 1041 1040 -3 972 973 1042 -3 972 1042 1041 -3 973 974 1043 -3 973 1043 1042 -3 974 975 1044 -3 974 1044 1043 -3 975 976 1045 -3 975 1045 1044 -3 976 977 1045 -3 1046 1045 977 -3 977 978 1046 -3 1047 1046 978 -3 978 979 1047 -3 1048 1047 979 -3 979 980 1048 -3 1049 1048 980 -3 980 981 1049 -3 1050 1049 981 -3 981 982 1050 -3 1051 1050 982 -3 982 983 1051 -3 1052 1051 983 -3 983 984 1052 -3 1053 1052 984 -3 984 985 1053 -3 1054 1053 985 -3 985 986 1054 -3 1055 1054 986 -3 986 987 1055 -3 1056 1055 987 -3 987 988 1056 -3 1057 1056 988 -3 988 989 1057 -3 1058 1057 989 -3 989 990 1058 -3 1059 1058 990 -3 990 991 1059 -3 1060 1059 991 -3 991 992 1060 -3 1061 1060 992 -3 992 993 1061 -3 1062 1061 993 -3 993 994 1062 -3 1063 1062 994 -3 994 995 1063 -3 1064 1063 995 -3 995 996 1065 -3 995 1065 1064 -3 996 997 1066 -3 996 1066 1065 -3 997 998 1067 -3 997 1067 1066 -3 998 999 1068 -3 998 1068 1067 -3 999 1000 1069 -3 999 1069 1068 -3 1000 1001 1070 -3 1000 1070 1069 -3 1001 1002 1071 -3 1001 1071 1070 -3 1002 1003 1072 -3 1002 1072 1071 -3 1003 1004 1073 -3 1003 1073 1072 -3 1004 1005 1074 -3 1004 1074 1073 -3 1005 1006 1075 -3 1005 1075 1074 -3 1006 1007 1076 -3 1006 1076 1075 -3 1007 1008 1077 -3 1007 1077 1076 -3 1008 1009 1078 -3 1008 1078 1077 -3 1009 1010 1079 -3 1009 1079 1078 -3 1010 1011 1080 -3 1010 1080 1079 -3 1011 1012 1081 -3 1011 1081 1080 -3 1012 1013 1082 -3 1012 1082 1081 -3 1013 1014 1083 -3 1013 1083 1082 -3 1014 1015 1084 -3 1014 1084 1083 -3 1015 1016 1084 -3 1085 1084 1016 -3 1016 1017 1085 -3 1086 1085 1017 -3 1017 1018 1086 -3 1087 1086 1018 -3 1018 1019 1087 -3 1088 1087 1019 -3 1019 1020 1088 -3 1089 1088 1020 -3 1020 1021 1089 -3 1090 1089 1021 -3 1021 1022 1090 -3 1091 1090 1022 -3 1022 1023 1091 -3 1092 1091 1023 -3 1023 1024 1092 -3 1093 1092 1024 -3 1024 1025 1093 -3 1094 1093 1025 -3 1025 1026 1094 -3 1095 1094 1026 -3 1026 1027 1095 -3 1096 1095 1027 -3 1027 1028 1096 -3 1097 1096 1028 -3 1028 4945 1097 -3 4949 1097 4945 -3 1029 1030 1098 -3 1099 1098 1030 -3 1029 1098 4948 -3 4952 4948 1098 -3 1030 1031 1099 -3 1100 1099 1031 -3 1031 1032 1100 -3 1101 1100 1032 -3 1032 1033 1101 -3 1102 1101 1033 -3 1033 1034 1102 -3 1103 1102 1034 -3 1034 1035 1104 -3 1034 1104 1103 -3 1035 1036 1105 -3 1035 1105 1104 -3 1036 1037 1106 -3 1036 1106 1105 -3 1037 1038 1107 -3 1037 1107 1106 -3 1038 1039 1108 -3 1038 1108 1107 -3 1039 1040 1109 -3 1039 1109 1108 -3 1040 1041 1110 -3 1040 1110 1109 -3 1041 1042 1111 -3 1041 1111 1110 -3 1042 1043 1112 -3 1042 1112 1111 -3 1043 1044 1113 -3 1043 1113 1112 -3 1044 1045 1114 -3 1044 1114 1113 -3 1045 1046 1115 -3 1045 1115 1114 -3 1046 1047 1116 -3 1046 1116 1115 -3 1047 1048 1117 -3 1047 1117 1116 -3 1048 1049 1118 -3 1048 1118 1117 -3 1049 1050 1119 -3 1049 1119 1118 -3 1050 1051 1120 -3 1050 1120 1119 -3 1051 1052 1121 -3 1051 1121 1120 -3 1052 1053 1122 -3 1052 1122 1121 -3 1053 1054 1123 -3 1053 1123 1122 -3 1054 1055 1123 -3 1124 1123 1055 -3 1055 1056 1124 -3 1125 1124 1056 -3 1056 1057 1125 -3 1126 1125 1057 -3 1057 1058 1126 -3 1127 1126 1058 -3 1058 1059 1127 -3 1128 1127 1059 -3 1059 1060 1128 -3 1129 1128 1060 -3 1060 1061 1129 -3 1130 1129 1061 -3 1061 1062 1130 -3 1131 1130 1062 -3 1062 1063 1131 -3 1132 1131 1063 -3 1063 1064 1132 -3 1133 1132 1064 -3 1064 1065 1133 -3 1134 1133 1065 -3 1065 1066 1134 -3 1135 1134 1066 -3 1066 1067 1135 -3 1136 1135 1067 -3 1067 1068 1136 -3 1137 1136 1068 -3 1068 1069 1137 -3 1138 1137 1069 -3 1069 1070 1138 -3 1139 1138 1070 -3 1070 1071 1139 -3 1140 1139 1071 -3 1071 1072 1140 -3 1141 1140 1072 -3 1072 1073 1141 -3 1142 1141 1073 -3 1073 1074 1142 -3 1143 1142 1074 -3 1074 1075 1144 -3 1074 1144 1143 -3 1075 1076 1145 -3 1075 1145 1144 -3 1076 1077 1146 -3 1076 1146 1145 -3 1077 1078 1147 -3 1077 1147 1146 -3 1078 1079 1148 -3 1078 1148 1147 -3 1079 1080 1149 -3 1079 1149 1148 -3 1080 1081 1150 -3 1080 1150 1149 -3 1081 1082 1151 -3 1081 1151 1150 -3 1082 1083 1152 -3 1082 1152 1151 -3 1083 1084 1153 -3 1083 1153 1152 -3 1084 1085 1154 -3 1084 1154 1153 -3 1085 1086 1155 -3 1085 1155 1154 -3 1086 1087 1156 -3 1086 1156 1155 -3 1087 1088 1157 -3 1087 1157 1156 -3 1088 1089 1158 -3 1088 1158 1157 -3 1089 1090 1159 -3 1089 1159 1158 -3 1090 1091 1160 -3 1090 1160 1159 -3 1091 1092 1161 -3 1091 1161 1160 -3 1092 1093 1162 -3 1092 1162 1161 -3 1093 1094 1163 -3 1093 1163 1162 -3 1094 1095 1164 -3 1094 1164 1163 -3 1095 1096 1164 -3 1165 1164 1096 -3 1096 1097 1165 -3 1166 1165 1097 -3 1097 4949 4953 -3 1097 4953 1166 -3 1098 1099 1167 -3 1168 1167 1099 -3 1098 1167 4952 -3 4956 4952 1167 -3 1099 1100 1168 -3 1169 1168 1100 -3 1100 1101 1169 -3 1170 1169 1101 -3 1101 1102 1170 -3 1171 1170 1102 -3 1102 1103 1171 -3 1172 1171 1103 -3 1103 1104 1172 -3 1173 1172 1104 -3 1104 1105 1173 -3 1174 1173 1105 -3 1105 1106 1174 -3 1175 1174 1106 -3 1106 1107 1175 -3 1176 1175 1107 -3 1107 1108 1176 -3 1177 1176 1108 -3 1108 1109 1177 -3 1178 1177 1109 -3 1109 1110 1178 -3 1179 1178 1110 -3 1110 1111 1179 -3 1180 1179 1111 -3 1111 1112 1180 -3 1181 1180 1112 -3 1112 1113 1181 -3 1182 1181 1113 -3 1113 1114 1182 -3 1183 1182 1114 -3 1114 1115 1183 -3 1184 1183 1115 -3 1115 1116 1185 -3 1115 1185 1184 -3 1116 1117 1186 -3 1116 1186 1185 -3 1117 1118 1187 -3 1117 1187 1186 -3 1118 1119 1188 -3 1118 1188 1187 -3 1119 1120 1189 -3 1119 1189 1188 -3 1120 1121 1190 -3 1120 1190 1189 -3 1121 1122 1191 -3 1121 1191 1190 -3 1122 1123 1192 -3 1122 1192 1191 -3 1123 1124 1193 -3 1123 1193 1192 -3 1124 1125 1194 -3 1124 1194 1193 -3 1125 1126 1195 -3 1125 1195 1194 -3 1126 1127 1196 -3 1126 1196 1195 -3 1127 1128 1197 -3 1127 1197 1196 -3 1128 1129 1198 -3 1128 1198 1197 -3 1129 1130 1199 -3 1129 1199 1198 -3 1130 1131 1200 -3 1130 1200 1199 -3 1131 1132 1201 -3 1131 1201 1200 -3 1132 1133 1202 -3 1132 1202 1201 -3 1133 1134 1203 -3 1133 1203 1202 -3 1134 1135 1204 -3 1134 1204 1203 -3 1135 1136 1205 -3 1135 1205 1204 -3 1136 1137 1205 -3 1206 1205 1137 -3 1137 1138 1206 -3 1207 1206 1138 -3 1138 1139 1207 -3 1208 1207 1139 -3 1139 1140 1208 -3 1209 1208 1140 -3 1140 1141 1209 -3 1210 1209 1141 -3 1141 1142 1210 -3 1211 1210 1142 -3 1142 1143 1211 -3 1212 1211 1143 -3 1143 1144 1212 -3 1213 1212 1144 -3 1144 1145 1213 -3 1214 1213 1145 -3 1145 1146 1214 -3 1215 1214 1146 -3 1146 1147 1215 -3 1216 1215 1147 -3 1147 1148 1216 -3 1217 1216 1148 -3 1148 1149 1217 -3 1218 1217 1149 -3 1149 1150 1218 -3 1219 1218 1150 -3 1150 1151 1219 -3 1220 1219 1151 -3 1151 1152 1220 -3 1221 1220 1152 -3 1152 1153 1221 -3 1222 1221 1153 -3 1153 1154 1222 -3 1223 1222 1154 -3 1154 1155 1223 -3 1224 1223 1155 -3 1155 1156 1224 -3 1225 1224 1156 -3 1156 1157 1226 -3 1156 1226 1225 -3 1157 1158 1227 -3 1157 1227 1226 -3 1158 1159 1228 -3 1158 1228 1227 -3 1159 1160 1229 -3 1159 1229 1228 -3 1160 1161 1230 -3 1160 1230 1229 -3 1161 1162 1231 -3 1161 1231 1230 -3 1162 1163 1232 -3 1162 1232 1231 -3 1163 1164 1233 -3 1163 1233 1232 -3 1164 1165 1234 -3 1164 1234 1233 -3 1165 1166 1235 -3 1165 1235 1234 -3 1166 4953 4957 -3 1166 4957 1235 -3 1167 1168 1237 -3 1167 1237 1236 -3 1167 1236 4960 -3 1167 4960 4956 -3 1168 1169 1238 -3 1168 1238 1237 -3 1169 1170 1239 -3 1169 1239 1238 -3 1170 1171 1240 -3 1170 1240 1239 -3 1171 1172 1241 -3 1171 1241 1240 -3 1172 1173 1242 -3 1172 1242 1241 -3 1173 1174 1243 -3 1173 1243 1242 -3 1174 1175 1244 -3 1174 1244 1243 -3 1175 1176 1245 -3 1175 1245 1244 -3 1176 1177 1246 -3 1176 1246 1245 -3 1177 1178 1246 -3 1247 1246 1178 -3 1178 1179 1247 -3 1248 1247 1179 -3 1179 1180 1248 -3 1249 1248 1180 -3 1180 1181 1249 -3 1250 1249 1181 -3 1181 1182 1250 -3 1251 1250 1182 -3 1182 1183 1251 -3 1252 1251 1183 -3 1183 1184 1252 -3 1253 1252 1184 -3 1184 1185 1253 -3 1254 1253 1185 -3 1185 1186 1254 -3 1255 1254 1186 -3 1186 1187 1255 -3 1256 1255 1187 -3 1187 1188 1256 -3 1257 1256 1188 -3 1188 1189 1257 -3 1258 1257 1189 -3 1189 1190 1258 -3 1259 1258 1190 -3 1190 1191 1259 -3 1260 1259 1191 -3 1191 1192 1260 -3 1261 1260 1192 -3 1192 1193 1261 -3 1262 1261 1193 -3 1193 1194 1262 -3 1263 1262 1194 -3 1194 1195 1263 -3 1264 1263 1195 -3 1195 1196 1264 -3 1265 1264 1196 -3 1196 1197 1265 -3 1266 1265 1197 -3 1197 1198 1266 -3 1267 1266 1198 -3 1198 1199 1267 -3 1268 1267 1199 -3 1199 1200 1269 -3 1199 1269 1268 -3 1200 1201 1270 -3 1200 1270 1269 -3 1201 1202 1271 -3 1201 1271 1270 -3 1202 1203 1272 -3 1202 1272 1271 -3 1203 1204 1273 -3 1203 1273 1272 -3 1204 1205 1274 -3 1204 1274 1273 -3 1205 1206 1275 -3 1205 1275 1274 -3 1206 1207 1276 -3 1206 1276 1275 -3 1207 1208 1277 -3 1207 1277 1276 -3 1208 1209 1278 -3 1208 1278 1277 -3 1209 1210 1279 -3 1209 1279 1278 -3 1210 1211 1280 -3 1210 1280 1279 -3 1211 1212 1281 -3 1211 1281 1280 -3 1212 1213 1282 -3 1212 1282 1281 -3 1213 1214 1283 -3 1213 1283 1282 -3 1214 1215 1284 -3 1214 1284 1283 -3 1215 1216 1285 -3 1215 1285 1284 -3 1216 1217 1286 -3 1216 1286 1285 -3 1217 1218 1287 -3 1217 1287 1286 -3 1218 1219 1288 -3 1218 1288 1287 -3 1219 1220 1289 -3 1219 1289 1288 -3 1220 1221 1289 -3 1290 1289 1221 -3 1221 1222 1290 -3 1291 1290 1222 -3 1222 1223 1291 -3 1292 1291 1223 -3 1223 1224 1292 -3 1293 1292 1224 -3 1224 1225 1293 -3 1294 1293 1225 -3 1225 1226 1294 -3 1295 1294 1226 -3 1226 1227 1295 -3 1296 1295 1227 -3 1227 1228 1296 -3 1297 1296 1228 -3 1228 1229 1297 -3 1298 1297 1229 -3 1229 1230 1298 -3 1299 1298 1230 -3 1230 1231 1299 -3 1300 1299 1231 -3 1231 1232 1300 -3 1301 1300 1232 -3 1232 1233 1301 -3 1302 1301 1233 -3 1233 1234 1302 -3 1303 1302 1234 -3 1234 1235 1303 -3 1304 1303 1235 -3 1235 4957 1304 -3 4961 1304 4957 -3 1236 1237 1305 -3 1306 1305 1237 -3 1236 1305 4960 -3 4964 4960 1305 -3 1237 1238 1306 -3 1307 1306 1238 -3 1238 1239 1307 -3 1308 1307 1239 -3 1239 1240 1308 -3 1309 1308 1240 -3 1240 1241 1309 -3 1310 1309 1241 -3 1241 1242 1310 -3 1311 1310 1242 -3 1242 1243 1312 -3 1242 1312 1311 -3 1243 1244 1313 -3 1243 1313 1312 -3 1244 1245 1314 -3 1244 1314 1313 -3 1245 1246 1315 -3 1245 1315 1314 -3 1246 1247 1316 -3 1246 1316 1315 -3 1247 1248 1317 -3 1247 1317 1316 -3 1248 1249 1318 -3 1248 1318 1317 -3 1249 1250 1319 -3 1249 1319 1318 -3 1250 1251 1320 -3 1250 1320 1319 -3 1251 1252 1321 -3 1251 1321 1320 -3 1252 1253 1322 -3 1252 1322 1321 -3 1253 1254 1323 -3 1253 1323 1322 -3 1254 1255 1324 -3 1254 1324 1323 -3 1255 1256 1325 -3 1255 1325 1324 -3 1256 1257 1326 -3 1256 1326 1325 -3 1257 1258 1327 -3 1257 1327 1326 -3 1258 1259 1328 -3 1258 1328 1327 -3 1259 1260 1329 -3 1259 1329 1328 -3 1260 1261 1330 -3 1260 1330 1329 -3 1261 1262 1331 -3 1261 1331 1330 -3 1262 1263 1332 -3 1262 1332 1331 -3 1263 1264 1333 -3 1263 1333 1332 -3 1264 1265 1333 -3 1334 1333 1265 -3 1265 1266 1334 -3 1335 1334 1266 -3 1266 1267 1335 -3 1336 1335 1267 -3 1267 1268 1336 -3 1337 1336 1268 -3 1268 1269 1337 -3 1338 1337 1269 -3 1269 1270 1338 -3 1339 1338 1270 -3 1270 1271 1339 -3 1340 1339 1271 -3 1271 1272 1340 -3 1341 1340 1272 -3 1272 1273 1341 -3 1342 1341 1273 -3 1273 1274 1342 -3 1343 1342 1274 -3 1274 1275 1343 -3 1344 1343 1275 -3 1275 1276 1344 -3 1345 1344 1276 -3 1276 1277 1345 -3 1346 1345 1277 -3 1277 1278 1346 -3 1347 1346 1278 -3 1278 1279 1347 -3 1348 1347 1279 -3 1279 1280 1348 -3 1349 1348 1280 -3 1280 1281 1349 -3 1350 1349 1281 -3 1281 1282 1350 -3 1351 1350 1282 -3 1282 1283 1351 -3 1352 1351 1283 -3 1283 1284 1352 -3 1353 1352 1284 -3 1284 1285 1353 -3 1354 1353 1285 -3 1285 1286 1355 -3 1285 1355 1354 -3 1286 1287 1356 -3 1286 1356 1355 -3 1287 1288 1357 -3 1287 1357 1356 -3 1288 1289 1358 -3 1288 1358 1357 -3 1289 1290 1359 -3 1289 1359 1358 -3 1290 1291 1360 -3 1290 1360 1359 -3 1291 1292 1361 -3 1291 1361 1360 -3 1292 1293 1362 -3 1292 1362 1361 -3 1293 1294 1363 -3 1293 1363 1362 -3 1294 1295 1364 -3 1294 1364 1363 -3 1295 1296 1365 -3 1295 1365 1364 -3 1296 1297 1366 -3 1296 1366 1365 -3 1297 1298 1367 -3 1297 1367 1366 -3 1298 1299 1368 -3 1298 1368 1367 -3 1299 1300 1369 -3 1299 1369 1368 -3 1300 1301 1370 -3 1300 1370 1369 -3 1301 1302 1371 -3 1301 1371 1370 -3 1302 1303 1372 -3 1302 1372 1371 -3 1303 1304 1373 -3 1303 1373 1372 -3 1304 4961 1373 -3 4965 1373 4961 -3 1305 1306 1375 -3 1305 1375 1374 -3 1305 1374 4968 -3 1305 4968 4964 -3 1306 1307 1376 -3 1306 1376 1375 -3 1307 1308 1377 -3 1307 1377 1376 -3 1308 1309 1377 -3 1378 1377 1309 -3 1309 1310 1378 -3 1379 1378 1310 -3 1310 1311 1379 -3 1380 1379 1311 -3 1311 1312 1380 -3 1381 1380 1312 -3 1312 1313 1381 -3 1382 1381 1313 -3 1313 1314 1382 -3 1383 1382 1314 -3 1314 1315 1383 -3 1384 1383 1315 -3 1315 1316 1384 -3 1385 1384 1316 -3 1316 1317 1385 -3 1386 1385 1317 -3 1317 1318 1386 -3 1387 1386 1318 -3 1318 1319 1387 -3 1388 1387 1319 -3 1319 1320 1388 -3 1389 1388 1320 -3 1320 1321 1389 -3 1390 1389 1321 -3 1321 1322 1390 -3 1391 1390 1322 -3 1322 1323 1391 -3 1392 1391 1323 -3 1323 1324 1392 -3 1393 1392 1324 -3 1324 1325 1393 -3 1394 1393 1325 -3 1325 1326 1394 -3 1395 1394 1326 -3 1326 1327 1395 -3 1396 1395 1327 -3 1327 1328 1396 -3 1397 1396 1328 -3 1328 1329 1397 -3 1398 1397 1329 -3 1329 1330 1398 -3 1399 1398 1330 -3 1330 1331 1400 -3 1330 1400 1399 -3 1331 1332 1401 -3 1331 1401 1400 -3 1332 1333 1402 -3 1332 1402 1401 -3 1333 1334 1403 -3 1333 1403 1402 -3 1334 1335 1404 -3 1334 1404 1403 -3 1335 1336 1405 -3 1335 1405 1404 -3 1336 1337 1406 -3 1336 1406 1405 -3 1337 1338 1407 -3 1337 1407 1406 -3 1338 1339 1408 -3 1338 1408 1407 -3 1339 1340 1409 -3 1339 1409 1408 -3 1340 1341 1410 -3 1340 1410 1409 -3 1341 1342 1411 -3 1341 1411 1410 -3 1342 1343 1412 -3 1342 1412 1411 -3 1343 1344 1413 -3 1343 1413 1412 -3 1344 1345 1414 -3 1344 1414 1413 -3 1345 1346 1415 -3 1345 1415 1414 -3 1346 1347 1416 -3 1346 1416 1415 -3 1347 1348 1417 -3 1347 1417 1416 -3 1348 1349 1418 -3 1348 1418 1417 -3 1349 1350 1419 -3 1349 1419 1418 -3 1350 1351 1420 -3 1350 1420 1419 -3 1351 1352 1421 -3 1351 1421 1420 -3 1352 1353 1422 -3 1352 1422 1421 -3 1353 1354 1422 -3 1423 1422 1354 -3 1354 1355 1423 -3 1424 1423 1355 -3 1355 1356 1424 -3 1425 1424 1356 -3 1356 1357 1425 -3 1426 1425 1357 -3 1357 1358 1426 -3 1427 1426 1358 -3 1358 1359 1427 -3 1428 1427 1359 -3 1359 1360 1428 -3 1429 1428 1360 -3 1360 1361 1429 -3 1430 1429 1361 -3 1361 1362 1430 -3 1431 1430 1362 -3 1362 1363 1431 -3 1432 1431 1363 -3 1363 1364 1432 -3 1433 1432 1364 -3 1364 1365 1433 -3 1434 1433 1365 -3 1365 1366 1434 -3 1435 1434 1366 -3 1366 1367 1435 -3 1436 1435 1367 -3 1367 1368 1436 -3 1437 1436 1368 -3 1368 1369 1437 -3 1438 1437 1369 -3 1369 1370 1438 -3 1439 1438 1370 -3 1370 1371 1439 -3 1440 1439 1371 -3 1371 1372 1440 -3 1441 1440 1372 -3 1372 1373 1441 -3 1442 1441 1373 -3 1373 4965 4969 -3 1373 4969 1442 -3 1374 1375 1443 -3 1444 1443 1375 -3 1374 1443 4968 -3 4972 4968 1443 -3 1375 1376 1445 -3 1375 1445 1444 -3 1376 1377 1446 -3 1376 1446 1445 -3 1377 1378 1447 -3 1377 1447 1446 -3 1378 1379 1448 -3 1378 1448 1447 -3 1379 1380 1449 -3 1379 1449 1448 -3 1380 1381 1450 -3 1380 1450 1449 -3 1381 1382 1451 -3 1381 1451 1450 -3 1382 1383 1452 -3 1382 1452 1451 -3 1383 1384 1453 -3 1383 1453 1452 -3 1384 1385 1454 -3 1384 1454 1453 -3 1385 1386 1455 -3 1385 1455 1454 -3 1386 1387 1456 -3 1386 1456 1455 -3 1387 1388 1457 -3 1387 1457 1456 -3 1388 1389 1458 -3 1388 1458 1457 -3 1389 1390 1459 -3 1389 1459 1458 -3 1390 1391 1460 -3 1390 1460 1459 -3 1391 1392 1461 -3 1391 1461 1460 -3 1392 1393 1462 -3 1392 1462 1461 -3 1393 1394 1463 -3 1393 1463 1462 -3 1394 1395 1464 -3 1394 1464 1463 -3 1395 1396 1465 -3 1395 1465 1464 -3 1396 1397 1466 -3 1396 1466 1465 -3 1397 1398 1467 -3 1397 1467 1466 -3 1398 1399 1467 -3 1468 1467 1399 -3 1399 1400 1468 -3 1469 1468 1400 -3 1400 1401 1469 -3 1470 1469 1401 -3 1401 1402 1470 -3 1471 1470 1402 -3 1402 1403 1471 -3 1472 1471 1403 -3 1403 1404 1472 -3 1473 1472 1404 -3 1404 1405 1473 -3 1474 1473 1405 -3 1405 1406 1474 -3 1475 1474 1406 -3 1406 1407 1475 -3 1476 1475 1407 -3 1407 1408 1476 -3 1477 1476 1408 -3 1408 1409 1477 -3 1478 1477 1409 -3 1409 1410 1478 -3 1479 1478 1410 -3 1410 1411 1479 -3 1480 1479 1411 -3 1411 1412 1480 -3 1481 1480 1412 -3 1412 1413 1481 -3 1482 1481 1413 -3 1413 1414 1482 -3 1483 1482 1414 -3 1414 1415 1483 -3 1484 1483 1415 -3 1415 1416 1484 -3 1485 1484 1416 -3 1416 1417 1485 -3 1486 1485 1417 -3 1417 1418 1486 -3 1487 1486 1418 -3 1418 1419 1487 -3 1488 1487 1419 -3 1419 1420 1488 -3 1489 1488 1420 -3 1420 1421 1489 -3 1490 1489 1421 -3 1421 1422 1491 -3 1421 1491 1490 -3 1422 1423 1492 -3 1422 1492 1491 -3 1423 1424 1493 -3 1423 1493 1492 -3 1424 1425 1494 -3 1424 1494 1493 -3 1425 1426 1495 -3 1425 1495 1494 -3 1426 1427 1496 -3 1426 1496 1495 -3 1427 1428 1497 -3 1427 1497 1496 -3 1428 1429 1498 -3 1428 1498 1497 -3 1429 1430 1499 -3 1429 1499 1498 -3 1430 1431 1500 -3 1430 1500 1499 -3 1431 1432 1501 -3 1431 1501 1500 -3 1432 1433 1502 -3 1432 1502 1501 -3 1433 1434 1503 -3 1433 1503 1502 -3 1434 1435 1504 -3 1434 1504 1503 -3 1435 1436 1505 -3 1435 1505 1504 -3 1436 1437 1506 -3 1436 1506 1505 -3 1437 1438 1507 -3 1437 1507 1506 -3 1438 1439 1508 -3 1438 1508 1507 -3 1439 1440 1509 -3 1439 1509 1508 -3 1440 1441 1510 -3 1440 1510 1509 -3 1441 1442 1511 -3 1441 1511 1510 -3 1442 4969 1511 -3 4973 1511 4969 -3 1443 1444 1513 -3 1443 1513 1512 -3 1443 1512 4976 -3 1443 4976 4972 -3 1444 1445 1514 -3 1444 1514 1513 -3 1445 1446 1514 -3 1515 1514 1446 -3 1446 1447 1515 -3 1516 1515 1447 -3 1447 1448 1516 -3 1517 1516 1448 -3 1448 1449 1517 -3 1518 1517 1449 -3 1449 1450 1518 -3 1519 1518 1450 -3 1450 1451 1519 -3 1520 1519 1451 -3 1451 1452 1520 -3 1521 1520 1452 -3 1452 1453 1521 -3 1522 1521 1453 -3 1453 1454 1522 -3 1523 1522 1454 -3 1454 1455 1523 -3 1524 1523 1455 -3 1455 1456 1524 -3 1525 1524 1456 -3 1456 1457 1525 -3 1526 1525 1457 -3 1457 1458 1526 -3 1527 1526 1458 -3 1458 1459 1527 -3 1528 1527 1459 -3 1459 1460 1528 -3 1529 1528 1460 -3 1460 1461 1529 -3 1530 1529 1461 -3 1461 1462 1530 -3 1531 1530 1462 -3 1462 1463 1531 -3 1532 1531 1463 -3 1463 1464 1532 -3 1533 1532 1464 -3 1464 1465 1533 -3 1534 1533 1465 -3 1465 1466 1534 -3 1535 1534 1466 -3 1466 1467 1535 -3 1536 1535 1467 -3 1467 1468 1536 -3 1537 1536 1468 -3 1468 1469 1538 -3 1468 1538 1537 -3 1469 1470 1539 -3 1469 1539 1538 -3 1470 1471 1540 -3 1470 1540 1539 -3 1471 1472 1541 -3 1471 1541 1540 -3 1472 1473 1542 -3 1472 1542 1541 -3 1473 1474 1543 -3 1473 1543 1542 -3 1474 1475 1544 -3 1474 1544 1543 -3 1475 1476 1545 -3 1475 1545 1544 -3 1476 1477 1546 -3 1476 1546 1545 -3 1477 1478 1547 -3 1477 1547 1546 -3 1478 1479 1548 -3 1478 1548 1547 -3 1479 1480 1549 -3 1479 1549 1548 -3 1480 1481 1550 -3 1480 1550 1549 -3 1481 1482 1551 -3 1481 1551 1550 -3 1482 1483 1552 -3 1482 1552 1551 -3 1483 1484 1553 -3 1483 1553 1552 -3 1484 1485 1554 -3 1484 1554 1553 -3 1485 1486 1555 -3 1485 1555 1554 -3 1486 1487 1556 -3 1486 1556 1555 -3 1487 1488 1557 -3 1487 1557 1556 -3 1488 1489 1558 -3 1488 1558 1557 -3 1489 1490 1559 -3 1489 1559 1558 -3 1490 1491 1560 -3 1490 1560 1559 -3 1491 1492 1561 -3 1491 1561 1560 -3 1492 1493 1561 -3 1562 1561 1493 -3 1493 1494 1562 -3 1563 1562 1494 -3 1494 1495 1563 -3 1564 1563 1495 -3 1495 1496 1564 -3 1565 1564 1496 -3 1496 1497 1565 -3 1566 1565 1497 -3 1497 1498 1566 -3 1567 1566 1498 -3 1498 1499 1567 -3 1568 1567 1499 -3 1499 1500 1568 -3 1569 1568 1500 -3 1500 1501 1569 -3 1570 1569 1501 -3 1501 1502 1570 -3 1571 1570 1502 -3 1502 1503 1571 -3 1572 1571 1503 -3 1503 1504 1572 -3 1573 1572 1504 -3 1504 1505 1573 -3 1574 1573 1505 -3 1505 1506 1574 -3 1575 1574 1506 -3 1506 1507 1575 -3 1576 1575 1507 -3 1507 1508 1576 -3 1577 1576 1508 -3 1508 1509 1577 -3 1578 1577 1509 -3 1509 1510 1578 -3 1579 1578 1510 -3 1510 1511 1579 -3 1580 1579 1511 -3 1511 4973 1580 -3 4977 1580 4973 -3 1512 1513 1581 -3 1582 1581 1513 -3 1512 1581 4976 -3 4980 4976 1581 -3 1513 1514 1582 -3 1583 1582 1514 -3 1514 1515 1583 -3 1584 1583 1515 -3 1515 1516 1584 -3 1585 1584 1516 -3 1516 1517 1586 -3 1516 1586 1585 -3 1517 1518 1587 -3 1517 1587 1586 -3 1518 1519 1588 -3 1518 1588 1587 -3 1519 1520 1589 -3 1519 1589 1588 -3 1520 1521 1590 -3 1520 1590 1589 -3 1521 1522 1591 -3 1521 1591 1590 -3 1522 1523 1592 -3 1522 1592 1591 -3 1523 1524 1593 -3 1523 1593 1592 -3 1524 1525 1594 -3 1524 1594 1593 -3 1525 1526 1595 -3 1525 1595 1594 -3 1526 1527 1596 -3 1526 1596 1595 -3 1527 1528 1597 -3 1527 1597 1596 -3 1528 1529 1598 -3 1528 1598 1597 -3 1529 1530 1599 -3 1529 1599 1598 -3 1530 1531 1600 -3 1530 1600 1599 -3 1531 1532 1601 -3 1531 1601 1600 -3 1532 1533 1602 -3 1532 1602 1601 -3 1533 1534 1603 -3 1533 1603 1602 -3 1534 1535 1604 -3 1534 1604 1603 -3 1535 1536 1605 -3 1535 1605 1604 -3 1536 1537 1606 -3 1536 1606 1605 -3 1537 1538 1607 -3 1537 1607 1606 -3 1538 1539 1608 -3 1538 1608 1607 -3 1539 1540 1609 -3 1539 1609 1608 -3 1540 1541 1609 -3 1610 1609 1541 -3 1541 1542 1610 -3 1611 1610 1542 -3 1542 1543 1611 -3 1612 1611 1543 -3 1543 1544 1612 -3 1613 1612 1544 -3 1544 1545 1613 -3 1614 1613 1545 -3 1545 1546 1614 -3 1615 1614 1546 -3 1546 1547 1615 -3 1616 1615 1547 -3 1547 1548 1616 -3 1617 1616 1548 -3 1548 1549 1617 -3 1618 1617 1549 -3 1549 1550 1618 -3 1619 1618 1550 -3 1550 1551 1619 -3 1620 1619 1551 -3 1551 1552 1620 -3 1621 1620 1552 -3 1552 1553 1621 -3 1622 1621 1553 -3 1553 1554 1622 -3 1623 1622 1554 -3 1554 1555 1623 -3 1624 1623 1555 -3 1555 1556 1624 -3 1625 1624 1556 -3 1556 1557 1625 -3 1626 1625 1557 -3 1557 1558 1626 -3 1627 1626 1558 -3 1558 1559 1627 -3 1628 1627 1559 -3 1559 1560 1628 -3 1629 1628 1560 -3 1560 1561 1629 -3 1630 1629 1561 -3 1561 1562 1630 -3 1631 1630 1562 -3 1562 1563 1631 -3 1632 1631 1563 -3 1563 1564 1632 -3 1633 1632 1564 -3 1564 1565 1634 -3 1564 1634 1633 -3 1565 1566 1635 -3 1565 1635 1634 -3 1566 1567 1636 -3 1566 1636 1635 -3 1567 1568 1637 -3 1567 1637 1636 -3 1568 1569 1638 -3 1568 1638 1637 -3 1569 1570 1639 -3 1569 1639 1638 -3 1570 1571 1640 -3 1570 1640 1639 -3 1571 1572 1641 -3 1571 1641 1640 -3 1572 1573 1642 -3 1572 1642 1641 -3 1573 1574 1643 -3 1573 1643 1642 -3 1574 1575 1644 -3 1574 1644 1643 -3 1575 1576 1645 -3 1575 1645 1644 -3 1576 1577 1646 -3 1576 1646 1645 -3 1577 1578 1647 -3 1577 1647 1646 -3 1578 1579 1648 -3 1578 1648 1647 -3 1579 1580 1649 -3 1579 1649 1648 -3 1580 4977 4981 -3 1580 4981 1649 -3 1581 1582 1651 -3 1581 1651 1650 -3 1581 1650 4984 -3 1581 4984 4980 -3 1582 1583 1652 -3 1582 1652 1651 -3 1583 1584 1653 -3 1583 1653 1652 -3 1584 1585 1654 -3 1584 1654 1653 -3 1585 1586 1655 -3 1585 1655 1654 -3 1586 1587 1656 -3 1586 1656 1655 -3 1587 1588 1657 -3 1587 1657 1656 -3 1588 1589 1657 -3 1658 1657 1589 -3 1589 1590 1658 -3 1659 1658 1590 -3 1590 1591 1659 -3 1660 1659 1591 -3 1591 1592 1660 -3 1661 1660 1592 -3 1592 1593 1661 -3 1662 1661 1593 -3 1593 1594 1662 -3 1663 1662 1594 -3 1594 1595 1663 -3 1664 1663 1595 -3 1595 1596 1664 -3 1665 1664 1596 -3 1596 1597 1665 -3 1666 1665 1597 -3 1597 1598 1666 -3 1667 1666 1598 -3 1598 1599 1667 -3 1668 1667 1599 -3 1599 1600 1668 -3 1669 1668 1600 -3 1600 1601 1669 -3 1670 1669 1601 -3 1601 1602 1670 -3 1671 1670 1602 -3 1602 1603 1671 -3 1672 1671 1603 -3 1603 1604 1672 -3 1673 1672 1604 -3 1604 1605 1673 -3 1674 1673 1605 -3 1605 1606 1674 -3 1675 1674 1606 -3 1606 1607 1675 -3 1676 1675 1607 -3 1607 1608 1676 -3 1677 1676 1608 -3 1608 1609 1677 -3 1678 1677 1609 -3 1609 1610 1678 -3 1679 1678 1610 -3 1610 1611 1679 -3 1680 1679 1611 -3 1611 1612 1680 -3 1681 1680 1612 -3 1612 1613 1681 -3 1682 1681 1613 -3 1613 1614 1683 -3 1613 1683 1682 -3 1614 1615 1684 -3 1614 1684 1683 -3 1615 1616 1685 -3 1615 1685 1684 -3 1616 1617 1686 -3 1616 1686 1685 -3 1617 1618 1687 -3 1617 1687 1686 -3 1618 1619 1688 -3 1618 1688 1687 -3 1619 1620 1689 -3 1619 1689 1688 -3 1620 1621 1690 -3 1620 1690 1689 -3 1621 1622 1691 -3 1621 1691 1690 -3 1622 1623 1692 -3 1622 1692 1691 -3 1623 1624 1693 -3 1623 1693 1692 -3 1624 1625 1694 -3 1624 1694 1693 -3 1625 1626 1695 -3 1625 1695 1694 -3 1626 1627 1696 -3 1626 1696 1695 -3 1627 1628 1697 -3 1627 1697 1696 -3 1628 1629 1698 -3 1628 1698 1697 -3 1629 1630 1699 -3 1629 1699 1698 -3 1630 1631 1700 -3 1630 1700 1699 -3 1631 1632 1701 -3 1631 1701 1700 -3 1632 1633 1702 -3 1632 1702 1701 -3 1633 1634 1703 -3 1633 1703 1702 -3 1634 1635 1704 -3 1634 1704 1703 -3 1635 1636 1705 -3 1635 1705 1704 -3 1636 1637 1706 -3 1636 1706 1705 -3 1637 1638 1707 -3 1637 1707 1706 -3 1638 1639 1707 -3 1708 1707 1639 -3 1639 1640 1708 -3 1709 1708 1640 -3 1640 1641 1709 -3 1710 1709 1641 -3 1641 1642 1710 -3 1711 1710 1642 -3 1642 1643 1711 -3 1712 1711 1643 -3 1643 1644 1712 -3 1713 1712 1644 -3 1644 1645 1713 -3 1714 1713 1645 -3 1645 1646 1714 -3 1715 1714 1646 -3 1646 1647 1715 -3 1716 1715 1647 -3 1647 1648 1716 -3 1717 1716 1648 -3 1648 1649 1717 -3 1718 1717 1649 -3 1649 4981 1718 -3 4985 1718 4981 -3 1650 1651 1719 -3 1720 1719 1651 -3 1650 1719 4984 -3 4988 4984 1719 -3 1651 1652 1720 -3 1721 1720 1652 -3 1652 1653 1721 -3 1722 1721 1653 -3 1653 1654 1722 -3 1723 1722 1654 -3 1654 1655 1723 -3 1724 1723 1655 -3 1655 1656 1724 -3 1725 1724 1656 -3 1656 1657 1725 -3 1726 1725 1657 -3 1657 1658 1726 -3 1727 1726 1658 -3 1658 1659 1727 -3 1728 1727 1659 -3 1659 1660 1728 -3 1729 1728 1660 -3 1660 1661 1729 -3 1730 1729 1661 -3 1661 1662 1730 -3 1731 1730 1662 -3 1662 1663 1731 -3 1732 1731 1663 -3 1663 1664 1733 -3 1663 1733 1732 -3 1664 1665 1734 -3 1664 1734 1733 -3 1665 1666 1735 -3 1665 1735 1734 -3 1666 1667 1736 -3 1666 1736 1735 -3 1667 1668 1737 -3 1667 1737 1736 -3 1668 1669 1738 -3 1668 1738 1737 -3 1669 1670 1739 -3 1669 1739 1738 -3 1670 1671 1740 -3 1670 1740 1739 -3 1671 1672 1741 -3 1671 1741 1740 -3 1672 1673 1742 -3 1672 1742 1741 -3 1673 1674 1743 -3 1673 1743 1742 -3 1674 1675 1744 -3 1674 1744 1743 -3 1675 1676 1745 -3 1675 1745 1744 -3 1676 1677 1746 -3 1676 1746 1745 -3 1677 1678 1747 -3 1677 1747 1746 -3 1678 1679 1748 -3 1678 1748 1747 -3 1679 1680 1749 -3 1679 1749 1748 -3 1680 1681 1750 -3 1680 1750 1749 -3 1681 1682 1751 -3 1681 1751 1750 -3 1682 1683 1752 -3 1682 1752 1751 -3 1683 1684 1753 -3 1683 1753 1752 -3 1684 1685 1754 -3 1684 1754 1753 -3 1685 1686 1755 -3 1685 1755 1754 -3 1686 1687 1756 -3 1686 1756 1755 -3 1687 1688 1757 -3 1687 1757 1756 -3 1688 1689 1757 -3 1758 1757 1689 -3 1689 1690 1758 -3 1759 1758 1690 -3 1690 1691 1759 -3 1760 1759 1691 -3 1691 1692 1760 -3 1761 1760 1692 -3 1692 1693 1761 -3 1762 1761 1693 -3 1693 1694 1762 -3 1763 1762 1694 -3 1694 1695 1763 -3 1764 1763 1695 -3 1695 1696 1764 -3 1765 1764 1696 -3 1696 1697 1765 -3 1766 1765 1697 -3 1697 1698 1766 -3 1767 1766 1698 -3 1698 1699 1767 -3 1768 1767 1699 -3 1699 1700 1768 -3 1769 1768 1700 -3 1700 1701 1769 -3 1770 1769 1701 -3 1701 1702 1770 -3 1771 1770 1702 -3 1702 1703 1771 -3 1772 1771 1703 -3 1703 1704 1772 -3 1773 1772 1704 -3 1704 1705 1773 -3 1774 1773 1705 -3 1705 1706 1774 -3 1775 1774 1706 -3 1706 1707 1775 -3 1776 1775 1707 -3 1707 1708 1776 -3 1777 1776 1708 -3 1708 1709 1777 -3 1778 1777 1709 -3 1709 1710 1778 -3 1779 1778 1710 -3 1710 1711 1779 -3 1780 1779 1711 -3 1711 1712 1780 -3 1781 1780 1712 -3 1712 1713 1781 -3 1782 1781 1713 -3 1713 1714 1783 -3 1713 1783 1782 -3 1714 1715 1784 -3 1714 1784 1783 -3 1715 1716 1785 -3 1715 1785 1784 -3 1716 1717 1786 -3 1716 1786 1785 -3 1717 1718 1787 -3 1717 1787 1786 -3 1718 4985 4989 -3 1718 4989 1787 -3 1719 1720 1789 -3 1719 1789 1788 -3 1719 1788 4988 -3 4992 4988 1788 -3 1720 1721 1790 -3 1720 1790 1789 -3 1721 1722 1791 -3 1721 1791 1790 -3 1722 1723 1792 -3 1722 1792 1791 -3 1723 1724 1793 -3 1723 1793 1792 -3 1724 1725 1794 -3 1724 1794 1793 -3 1725 1726 1795 -3 1725 1795 1794 -3 1726 1727 1796 -3 1726 1796 1795 -3 1727 1728 1797 -3 1727 1797 1796 -3 1728 1729 1798 -3 1728 1798 1797 -3 1729 1730 1799 -3 1729 1799 1798 -3 1730 1731 1800 -3 1730 1800 1799 -3 1731 1732 1801 -3 1731 1801 1800 -3 1732 1733 1802 -3 1732 1802 1801 -3 1733 1734 1803 -3 1733 1803 1802 -3 1734 1735 1804 -3 1734 1804 1803 -3 1735 1736 1805 -3 1735 1805 1804 -3 1736 1737 1806 -3 1736 1806 1805 -3 1737 1738 1807 -3 1737 1807 1806 -3 1738 1739 1808 -3 1738 1808 1807 -3 1739 1740 1808 -3 1809 1808 1740 -3 1740 1741 1809 -3 1810 1809 1741 -3 1741 1742 1810 -3 1811 1810 1742 -3 1742 1743 1811 -3 1812 1811 1743 -3 1743 1744 1812 -3 1813 1812 1744 -3 1744 1745 1813 -3 1814 1813 1745 -3 1745 1746 1814 -3 1815 1814 1746 -3 1746 1747 1815 -3 1816 1815 1747 -3 1747 1748 1816 -3 1817 1816 1748 -3 1748 1749 1817 -3 1818 1817 1749 -3 1749 1750 1818 -3 1819 1818 1750 -3 1750 1751 1819 -3 1820 1819 1751 -3 1751 1752 1820 -3 1821 1820 1752 -3 1752 1753 1821 -3 1822 1821 1753 -3 1753 1754 1822 -3 1823 1822 1754 -3 1754 1755 1823 -3 1824 1823 1755 -3 1755 1756 1824 -3 1825 1824 1756 -3 1756 1757 1825 -3 1826 1825 1757 -3 1757 1758 1826 -3 1827 1826 1758 -3 1758 1759 1827 -3 1828 1827 1759 -3 1759 1760 1828 -3 1829 1828 1760 -3 1760 1761 1829 -3 1830 1829 1761 -3 1761 1762 1830 -3 1831 1830 1762 -3 1762 1763 1831 -3 1832 1831 1763 -3 1763 1764 1832 -3 1833 1832 1764 -3 1764 1765 1833 -3 1834 1833 1765 -3 1765 1766 1835 -3 1765 1835 1834 -3 1766 1767 1836 -3 1766 1836 1835 -3 1767 1768 1837 -3 1767 1837 1836 -3 1768 1769 1838 -3 1768 1838 1837 -3 1769 1770 1839 -3 1769 1839 1838 -3 1770 1771 1840 -3 1770 1840 1839 -3 1771 1772 1841 -3 1771 1841 1840 -3 1772 1773 1842 -3 1772 1842 1841 -3 1773 1774 1843 -3 1773 1843 1842 -3 1774 1775 1844 -3 1774 1844 1843 -3 1775 1776 1845 -3 1775 1845 1844 -3 1776 1777 1846 -3 1776 1846 1845 -3 1777 1778 1847 -3 1777 1847 1846 -3 1778 1779 1848 -3 1778 1848 1847 -3 1779 1780 1849 -3 1779 1849 1848 -3 1780 1781 1850 -3 1780 1850 1849 -3 1781 1782 1851 -3 1781 1851 1850 -3 1782 1783 1852 -3 1782 1852 1851 -3 1783 1784 1853 -3 1783 1853 1852 -3 1784 1785 1854 -3 1784 1854 1853 -3 1785 1786 1855 -3 1785 1855 1854 -3 1786 1787 1856 -3 1786 1856 1855 -3 1787 4989 1856 -3 4993 1856 4989 -3 1788 1789 1858 -3 1788 1858 1857 -3 1788 1857 4996 -3 1788 4996 4992 -3 1789 1790 1859 -3 1789 1859 1858 -3 1790 1791 1860 -3 1790 1860 1859 -3 1791 1792 1860 -3 1861 1860 1792 -3 1792 1793 1861 -3 1862 1861 1793 -3 1793 1794 1862 -3 1863 1862 1794 -3 1794 1795 1863 -3 1864 1863 1795 -3 1795 1796 1864 -3 1865 1864 1796 -3 1796 1797 1865 -3 1866 1865 1797 -3 1797 1798 1866 -3 1867 1866 1798 -3 1798 1799 1867 -3 1868 1867 1799 -3 1799 1800 1868 -3 1869 1868 1800 -3 1800 1801 1869 -3 1870 1869 1801 -3 1801 1802 1870 -3 1871 1870 1802 -3 1802 1803 1871 -3 1872 1871 1803 -3 1803 1804 1872 -3 1873 1872 1804 -3 1804 1805 1873 -3 1874 1873 1805 -3 1805 1806 1874 -3 1875 1874 1806 -3 1806 1807 1875 -3 1876 1875 1807 -3 1807 1808 1876 -3 1877 1876 1808 -3 1808 1809 1877 -3 1878 1877 1809 -3 1809 1810 1878 -3 1879 1878 1810 -3 1810 1811 1879 -3 1880 1879 1811 -3 1811 1812 1880 -3 1881 1880 1812 -3 1812 1813 1881 -3 1882 1881 1813 -3 1813 1814 1882 -3 1883 1882 1814 -3 1814 1815 1883 -3 1884 1883 1815 -3 1815 1816 1884 -3 1885 1884 1816 -3 1816 1817 1885 -3 1886 1885 1817 -3 1817 1818 1887 -3 1817 1887 1886 -3 1818 1819 1888 -3 1818 1888 1887 -3 1819 1820 1889 -3 1819 1889 1888 -3 1820 1821 1890 -3 1820 1890 1889 -3 1821 1822 1891 -3 1821 1891 1890 -3 1822 1823 1892 -3 1822 1892 1891 -3 1823 1824 1893 -3 1823 1893 1892 -3 1824 1825 1894 -3 1824 1894 1893 -3 1825 1826 1895 -3 1825 1895 1894 -3 1826 1827 1896 -3 1826 1896 1895 -3 1827 1828 1897 -3 1827 1897 1896 -3 1828 1829 1898 -3 1828 1898 1897 -3 1829 1830 1899 -3 1829 1899 1898 -3 1830 1831 1900 -3 1830 1900 1899 -3 1831 1832 1901 -3 1831 1901 1900 -3 1832 1833 1902 -3 1832 1902 1901 -3 1833 1834 1903 -3 1833 1903 1902 -3 1834 1835 1904 -3 1834 1904 1903 -3 1835 1836 1905 -3 1835 1905 1904 -3 1836 1837 1906 -3 1836 1906 1905 -3 1837 1838 1907 -3 1837 1907 1906 -3 1838 1839 1908 -3 1838 1908 1907 -3 1839 1840 1909 -3 1839 1909 1908 -3 1840 1841 1910 -3 1840 1910 1909 -3 1841 1842 1911 -3 1841 1911 1910 -3 1842 1843 1912 -3 1842 1912 1911 -3 1843 1844 1912 -3 1913 1912 1844 -3 1844 1845 1913 -3 1914 1913 1845 -3 1845 1846 1914 -3 1915 1914 1846 -3 1846 1847 1915 -3 1916 1915 1847 -3 1847 1848 1916 -3 1917 1916 1848 -3 1848 1849 1917 -3 1918 1917 1849 -3 1849 1850 1918 -3 1919 1918 1850 -3 1850 1851 1919 -3 1920 1919 1851 -3 1851 1852 1920 -3 1921 1920 1852 -3 1852 1853 1921 -3 1922 1921 1853 -3 1853 1854 1922 -3 1923 1922 1854 -3 1854 1855 1923 -3 1924 1923 1855 -3 1855 1856 1924 -3 1925 1924 1856 -3 1856 4993 1925 -3 4997 1925 4993 -3 1857 1858 1926 -3 1927 1926 1858 -3 1857 1926 4996 -3 5000 4996 1926 -3 1858 1859 1927 -3 1928 1927 1859 -3 1859 1860 1928 -3 1929 1928 1860 -3 1860 1861 1929 -3 1930 1929 1861 -3 1861 1862 1930 -3 1931 1930 1862 -3 1862 1863 1931 -3 1932 1931 1863 -3 1863 1864 1932 -3 1933 1932 1864 -3 1864 1865 1933 -3 1934 1933 1865 -3 1865 1866 1934 -3 1935 1934 1866 -3 1866 1867 1935 -3 1936 1935 1867 -3 1867 1868 1936 -3 1937 1936 1868 -3 1868 1869 1937 -3 1938 1937 1869 -3 1869 1870 1938 -3 1939 1938 1870 -3 1870 1871 1940 -3 1870 1940 1939 -3 1871 1872 1941 -3 1871 1941 1940 -3 1872 1873 1942 -3 1872 1942 1941 -3 1873 1874 1943 -3 1873 1943 1942 -3 1874 1875 1944 -3 1874 1944 1943 -3 1875 1876 1945 -3 1875 1945 1944 -3 1876 1877 1946 -3 1876 1946 1945 -3 1877 1878 1947 -3 1877 1947 1946 -3 1878 1879 1948 -3 1878 1948 1947 -3 1879 1880 1949 -3 1879 1949 1948 -3 1880 1881 1950 -3 1880 1950 1949 -3 1881 1882 1951 -3 1881 1951 1950 -3 1882 1883 1952 -3 1882 1952 1951 -3 1883 1884 1953 -3 1883 1953 1952 -3 1884 1885 1954 -3 1884 1954 1953 -3 1885 1886 1955 -3 1885 1955 1954 -3 1886 1887 1956 -3 1886 1956 1955 -3 1887 1888 1957 -3 1887 1957 1956 -3 1888 1889 1958 -3 1888 1958 1957 -3 1889 1890 1959 -3 1889 1959 1958 -3 1890 1891 1960 -3 1890 1960 1959 -3 1891 1892 1961 -3 1891 1961 1960 -3 1892 1893 1962 -3 1892 1962 1961 -3 1893 1894 1963 -3 1893 1963 1962 -3 1894 1895 1964 -3 1894 1964 1963 -3 1895 1896 1965 -3 1895 1965 1964 -3 1896 1897 1965 -3 1966 1965 1897 -3 1897 1898 1966 -3 1967 1966 1898 -3 1898 1899 1967 -3 1968 1967 1899 -3 1899 1900 1968 -3 1969 1968 1900 -3 1900 1901 1969 -3 1970 1969 1901 -3 1901 1902 1970 -3 1971 1970 1902 -3 1902 1903 1971 -3 1972 1971 1903 -3 1903 1904 1972 -3 1973 1972 1904 -3 1904 1905 1973 -3 1974 1973 1905 -3 1905 1906 1974 -3 1975 1974 1906 -3 1906 1907 1975 -3 1976 1975 1907 -3 1907 1908 1976 -3 1977 1976 1908 -3 1908 1909 1977 -3 1978 1977 1909 -3 1909 1910 1978 -3 1979 1978 1910 -3 1910 1911 1979 -3 1980 1979 1911 -3 1911 1912 1980 -3 1981 1980 1912 -3 1912 1913 1981 -3 1982 1981 1913 -3 1913 1914 1982 -3 1983 1982 1914 -3 1914 1915 1983 -3 1984 1983 1915 -3 1915 1916 1984 -3 1985 1984 1916 -3 1916 1917 1985 -3 1986 1985 1917 -3 1917 1918 1986 -3 1987 1986 1918 -3 1918 1919 1987 -3 1988 1987 1919 -3 1919 1920 1988 -3 1989 1988 1920 -3 1920 1921 1989 -3 1990 1989 1921 -3 1921 1922 1990 -3 1991 1990 1922 -3 1922 1923 1991 -3 1992 1991 1923 -3 1923 1924 1993 -3 1923 1993 1992 -3 1924 1925 1994 -3 1924 1994 1993 -3 1925 4997 5001 -3 1925 5001 1994 -3 1926 1927 1996 -3 1926 1996 1995 -3 1926 1995 5000 -3 5004 5000 1995 -3 1927 1928 1997 -3 1927 1997 1996 -3 1928 1929 1998 -3 1928 1998 1997 -3 1929 1930 1999 -3 1929 1999 1998 -3 1930 1931 2000 -3 1930 2000 1999 -3 1931 1932 2001 -3 1931 2001 2000 -3 1932 1933 2002 -3 1932 2002 2001 -3 1933 1934 2003 -3 1933 2003 2002 -3 1934 1935 2004 -3 1934 2004 2003 -3 1935 1936 2005 -3 1935 2005 2004 -3 1936 1937 2006 -3 1936 2006 2005 -3 1937 1938 2007 -3 1937 2007 2006 -3 1938 1939 2008 -3 1938 2008 2007 -3 1939 1940 2009 -3 1939 2009 2008 -3 1940 1941 2010 -3 1940 2010 2009 -3 1941 1942 2011 -3 1941 2011 2010 -3 1942 1943 2012 -3 1942 2012 2011 -3 1943 1944 2013 -3 1943 2013 2012 -3 1944 1945 2014 -3 1944 2014 2013 -3 1945 1946 2015 -3 1945 2015 2014 -3 1946 1947 2016 -3 1946 2016 2015 -3 1947 1948 2017 -3 1947 2017 2016 -3 1948 1949 2018 -3 1948 2018 2017 -3 1949 1950 2019 -3 1949 2019 2018 -3 1950 1951 2019 -3 2020 2019 1951 -3 1951 1952 2020 -3 2021 2020 1952 -3 1952 1953 2021 -3 2022 2021 1953 -3 1953 1954 2022 -3 2023 2022 1954 -3 1954 1955 2023 -3 2024 2023 1955 -3 1955 1956 2024 -3 2025 2024 1956 -3 1956 1957 2025 -3 2026 2025 1957 -3 1957 1958 2026 -3 2027 2026 1958 -3 1958 1959 2027 -3 2028 2027 1959 -3 1959 1960 2028 -3 2029 2028 1960 -3 1960 1961 2029 -3 2030 2029 1961 -3 1961 1962 2030 -3 2031 2030 1962 -3 1962 1963 2031 -3 2032 2031 1963 -3 1963 1964 2032 -3 2033 2032 1964 -3 1964 1965 2033 -3 2034 2033 1965 -3 1965 1966 2034 -3 2035 2034 1966 -3 1966 1967 2035 -3 2036 2035 1967 -3 1967 1968 2036 -3 2037 2036 1968 -3 1968 1969 2037 -3 2038 2037 1969 -3 1969 1970 2038 -3 2039 2038 1970 -3 1970 1971 2039 -3 2040 2039 1971 -3 1971 1972 2040 -3 2041 2040 1972 -3 1972 1973 2041 -3 2042 2041 1973 -3 1973 1974 2042 -3 2043 2042 1974 -3 1974 1975 2043 -3 2044 2043 1975 -3 1975 1976 2044 -3 2045 2044 1976 -3 1976 1977 2045 -3 2046 2045 1977 -3 1977 1978 2046 -3 2047 2046 1978 -3 1978 1979 2048 -3 1978 2048 2047 -3 1979 1980 2049 -3 1979 2049 2048 -3 1980 1981 2050 -3 1980 2050 2049 -3 1981 1982 2051 -3 1981 2051 2050 -3 1982 1983 2052 -3 1982 2052 2051 -3 1983 1984 2053 -3 1983 2053 2052 -3 1984 1985 2054 -3 1984 2054 2053 -3 1985 1986 2055 -3 1985 2055 2054 -3 1986 1987 2056 -3 1986 2056 2055 -3 1987 1988 2057 -3 1987 2057 2056 -3 1988 1989 2058 -3 1988 2058 2057 -3 1989 1990 2059 -3 1989 2059 2058 -3 1990 1991 2060 -3 1990 2060 2059 -3 1991 1992 2061 -3 1991 2061 2060 -3 1992 1993 2062 -3 1992 2062 2061 -3 1993 1994 2063 -3 1993 2063 2062 -3 1994 5001 2063 -3 5005 2063 5001 -3 1995 1996 2065 -3 1995 2065 2064 -3 1995 2064 5008 -3 1995 5008 5004 -3 1996 1997 2066 -3 1996 2066 2065 -3 1997 1998 2067 -3 1997 2067 2066 -3 1998 1999 2068 -3 1998 2068 2067 -3 1999 2000 2069 -3 1999 2069 2068 -3 2000 2001 2070 -3 2000 2070 2069 -3 2001 2002 2071 -3 2001 2071 2070 -3 2002 2003 2072 -3 2002 2072 2071 -3 2003 2004 2073 -3 2003 2073 2072 -3 2004 2005 2074 -3 2004 2074 2073 -3 2005 2006 2074 -3 2075 2074 2006 -3 2006 2007 2075 -3 2076 2075 2007 -3 2007 2008 2076 -3 2077 2076 2008 -3 2008 2009 2077 -3 2078 2077 2009 -3 2009 2010 2078 -3 2079 2078 2010 -3 2010 2011 2079 -3 2080 2079 2011 -3 2011 2012 2080 -3 2081 2080 2012 -3 2012 2013 2081 -3 2082 2081 2013 -3 2013 2014 2082 -3 2083 2082 2014 -3 2014 2015 2083 -3 2084 2083 2015 -3 2015 2016 2084 -3 2085 2084 2016 -3 2016 2017 2085 -3 2086 2085 2017 -3 2017 2018 2086 -3 2087 2086 2018 -3 2018 2019 2087 -3 2088 2087 2019 -3 2019 2020 2088 -3 2089 2088 2020 -3 2020 2021 2089 -3 2090 2089 2021 -3 2021 2022 2090 -3 2091 2090 2022 -3 2022 2023 2091 -3 2092 2091 2023 -3 2023 2024 2092 -3 2093 2092 2024 -3 2024 2025 2093 -3 2094 2093 2025 -3 2025 2026 2094 -3 2095 2094 2026 -3 2026 2027 2095 -3 2096 2095 2027 -3 2027 2028 2096 -3 2097 2096 2028 -3 2028 2029 2097 -3 2098 2097 2029 -3 2029 2030 2098 -3 2099 2098 2030 -3 2030 2031 2099 -3 2100 2099 2031 -3 2031 2032 2100 -3 2101 2100 2032 -3 2032 2033 2101 -3 2102 2101 2033 -3 2033 2034 2103 -3 2033 2103 2102 -3 2034 2035 2104 -3 2034 2104 2103 -3 2035 2036 2105 -3 2035 2105 2104 -3 2036 2037 2106 -3 2036 2106 2105 -3 2037 2038 2107 -3 2037 2107 2106 -3 2038 2039 2108 -3 2038 2108 2107 -3 2039 2040 2109 -3 2039 2109 2108 -3 2040 2041 2110 -3 2040 2110 2109 -3 2041 2042 2111 -3 2041 2111 2110 -3 2042 2043 2112 -3 2042 2112 2111 -3 2043 2044 2113 -3 2043 2113 2112 -3 2044 2045 2114 -3 2044 2114 2113 -3 2045 2046 2115 -3 2045 2115 2114 -3 2046 2047 2116 -3 2046 2116 2115 -3 2047 2048 2117 -3 2047 2117 2116 -3 2048 2049 2118 -3 2048 2118 2117 -3 2049 2050 2119 -3 2049 2119 2118 -3 2050 2051 2120 -3 2050 2120 2119 -3 2051 2052 2121 -3 2051 2121 2120 -3 2052 2053 2122 -3 2052 2122 2121 -3 2053 2054 2123 -3 2053 2123 2122 -3 2054 2055 2124 -3 2054 2124 2123 -3 2055 2056 2125 -3 2055 2125 2124 -3 2056 2057 2126 -3 2056 2126 2125 -3 2057 2058 2127 -3 2057 2127 2126 -3 2058 2059 2128 -3 2058 2128 2127 -3 2059 2060 2129 -3 2059 2129 2128 -3 2060 2061 2130 -3 2060 2130 2129 -3 2061 2062 2130 -3 2131 2130 2062 -3 2062 2063 2131 -3 2132 2131 2063 -3 2063 5005 5009 -3 2063 5009 2132 -3 2064 2065 2133 -3 2134 2133 2065 -3 2064 2133 5012 -3 2064 5012 5008 -3 2065 2066 2134 -3 2135 2134 2066 -3 2066 2067 2135 -3 2136 2135 2067 -3 2067 2068 2136 -3 2137 2136 2068 -3 2068 2069 2137 -3 2138 2137 2069 -3 2069 2070 2138 -3 2139 2138 2070 -3 2070 2071 2139 -3 2140 2139 2071 -3 2071 2072 2140 -3 2141 2140 2072 -3 2072 2073 2141 -3 2142 2141 2073 -3 2073 2074 2142 -3 2143 2142 2074 -3 2074 2075 2143 -3 2144 2143 2075 -3 2075 2076 2144 -3 2145 2144 2076 -3 2076 2077 2145 -3 2146 2145 2077 -3 2077 2078 2146 -3 2147 2146 2078 -3 2078 2079 2147 -3 2148 2147 2079 -3 2079 2080 2148 -3 2149 2148 2080 -3 2080 2081 2149 -3 2150 2149 2081 -3 2081 2082 2150 -3 2151 2150 2082 -3 2082 2083 2151 -3 2152 2151 2083 -3 2083 2084 2152 -3 2153 2152 2084 -3 2084 2085 2153 -3 2154 2153 2085 -3 2085 2086 2154 -3 2155 2154 2086 -3 2086 2087 2155 -3 2156 2155 2087 -3 2087 2088 2156 -3 2157 2156 2088 -3 2088 2089 2157 -3 2158 2157 2089 -3 2089 2090 2159 -3 2089 2159 2158 -3 2090 2091 2160 -3 2090 2160 2159 -3 2091 2092 2161 -3 2091 2161 2160 -3 2092 2093 2162 -3 2092 2162 2161 -3 2093 2094 2163 -3 2093 2163 2162 -3 2094 2095 2164 -3 2094 2164 2163 -3 2095 2096 2165 -3 2095 2165 2164 -3 2096 2097 2166 -3 2096 2166 2165 -3 2097 2098 2167 -3 2097 2167 2166 -3 2098 2099 2168 -3 2098 2168 2167 -3 2099 2100 2169 -3 2099 2169 2168 -3 2100 2101 2170 -3 2100 2170 2169 -3 2101 2102 2171 -3 2101 2171 2170 -3 2102 2103 2172 -3 2102 2172 2171 -3 2103 2104 2173 -3 2103 2173 2172 -3 2104 2105 2174 -3 2104 2174 2173 -3 2105 2106 2175 -3 2105 2175 2174 -3 2106 2107 2176 -3 2106 2176 2175 -3 2107 2108 2177 -3 2107 2177 2176 -3 2108 2109 2178 -3 2108 2178 2177 -3 2109 2110 2179 -3 2109 2179 2178 -3 2110 2111 2180 -3 2110 2180 2179 -3 2111 2112 2181 -3 2111 2181 2180 -3 2112 2113 2182 -3 2112 2182 2181 -3 2113 2114 2183 -3 2113 2183 2182 -3 2114 2115 2184 -3 2114 2184 2183 -3 2115 2116 2185 -3 2115 2185 2184 -3 2116 2117 2186 -3 2116 2186 2185 -3 2117 2118 2186 -3 2187 2186 2118 -3 2118 2119 2187 -3 2188 2187 2119 -3 2119 2120 2188 -3 2189 2188 2120 -3 2120 2121 2189 -3 2190 2189 2121 -3 2121 2122 2190 -3 2191 2190 2122 -3 2122 2123 2191 -3 2192 2191 2123 -3 2123 2124 2192 -3 2193 2192 2124 -3 2124 2125 2193 -3 2194 2193 2125 -3 2125 2126 2194 -3 2195 2194 2126 -3 2126 2127 2195 -3 2196 2195 2127 -3 2127 2128 2196 -3 2197 2196 2128 -3 2128 2129 2197 -3 2198 2197 2129 -3 2129 2130 2198 -3 2199 2198 2130 -3 2130 2131 2199 -3 2200 2199 2131 -3 2131 2132 2200 -3 2201 2200 2132 -3 2132 5009 2201 -3 5013 2201 5009 -3 2133 2134 2202 -3 2203 2202 2134 -3 2133 2202 5016 -3 2133 5016 5012 -3 2134 2135 2203 -3 2204 2203 2135 -3 2135 2136 2204 -3 2205 2204 2136 -3 2136 2137 2205 -3 2206 2205 2137 -3 2137 2138 2206 -3 2207 2206 2138 -3 2138 2139 2207 -3 2208 2207 2139 -3 2139 2140 2208 -3 2209 2208 2140 -3 2140 2141 2209 -3 2210 2209 2141 -3 2141 2142 2210 -3 2211 2210 2142 -3 2142 2143 2211 -3 2212 2211 2143 -3 2143 2144 2212 -3 2213 2212 2144 -3 2144 2145 2213 -3 2214 2213 2145 -3 2145 2146 2215 -3 2145 2215 2214 -3 2146 2147 2216 -3 2146 2216 2215 -3 2147 2148 2217 -3 2147 2217 2216 -3 2148 2149 2218 -3 2148 2218 2217 -3 2149 2150 2219 -3 2149 2219 2218 -3 2150 2151 2220 -3 2150 2220 2219 -3 2151 2152 2221 -3 2151 2221 2220 -3 2152 2153 2222 -3 2152 2222 2221 -3 2153 2154 2223 -3 2153 2223 2222 -3 2154 2155 2224 -3 2154 2224 2223 -3 2155 2156 2225 -3 2155 2225 2224 -3 2156 2157 2226 -3 2156 2226 2225 -3 2157 2158 2227 -3 2157 2227 2226 -3 2158 2159 2228 -3 2158 2228 2227 -3 2159 2160 2229 -3 2159 2229 2228 -3 2160 2161 2230 -3 2160 2230 2229 -3 2161 2162 2231 -3 2161 2231 2230 -3 2162 2163 2232 -3 2162 2232 2231 -3 2163 2164 2233 -3 2163 2233 2232 -3 2164 2165 2234 -3 2164 2234 2233 -3 2165 2166 2235 -3 2165 2235 2234 -3 2166 2167 2236 -3 2166 2236 2235 -3 2167 2168 2237 -3 2167 2237 2236 -3 2168 2169 2238 -3 2168 2238 2237 -3 2169 2170 2239 -3 2169 2239 2238 -3 2170 2171 2240 -3 2170 2240 2239 -3 2171 2172 2241 -3 2171 2241 2240 -3 2172 2173 2242 -3 2172 2242 2241 -3 2173 2174 2243 -3 2173 2243 2242 -3 2174 2175 2243 -3 2244 2243 2175 -3 2175 2176 2244 -3 2245 2244 2176 -3 2176 2177 2245 -3 2246 2245 2177 -3 2177 2178 2246 -3 2247 2246 2178 -3 2178 2179 2247 -3 2248 2247 2179 -3 2179 2180 2248 -3 2249 2248 2180 -3 2180 2181 2249 -3 2250 2249 2181 -3 2181 2182 2250 -3 2251 2250 2182 -3 2182 2183 2251 -3 2252 2251 2183 -3 2183 2184 2252 -3 2253 2252 2184 -3 2184 2185 2253 -3 2254 2253 2185 -3 2185 2186 2254 -3 2255 2254 2186 -3 2186 2187 2255 -3 2256 2255 2187 -3 2187 2188 2256 -3 2257 2256 2188 -3 2188 2189 2257 -3 2258 2257 2189 -3 2189 2190 2258 -3 2259 2258 2190 -3 2190 2191 2259 -3 2260 2259 2191 -3 2191 2192 2260 -3 2261 2260 2192 -3 2192 2193 2261 -3 2262 2261 2193 -3 2193 2194 2262 -3 2263 2262 2194 -3 2194 2195 2263 -3 2264 2263 2195 -3 2195 2196 2264 -3 2265 2264 2196 -3 2196 2197 2265 -3 2266 2265 2197 -3 2197 2198 2266 -3 2267 2266 2198 -3 2198 2199 2267 -3 2268 2267 2199 -3 2199 2200 2268 -3 2269 2268 2200 -3 2200 2201 2269 -3 2270 2269 2201 -3 2201 5013 5017 -3 2201 5017 2270 -3 2202 2203 2271 -3 2272 2271 2203 -3 2202 2271 5016 -3 5020 5016 2271 -3 2203 2204 2273 -3 2203 2273 2272 -3 2204 2205 2274 -3 2204 2274 2273 -3 2205 2206 2275 -3 2205 2275 2274 -3 2206 2207 2276 -3 2206 2276 2275 -3 2207 2208 2277 -3 2207 2277 2276 -3 2208 2209 2278 -3 2208 2278 2277 -3 2209 2210 2279 -3 2209 2279 2278 -3 2210 2211 2280 -3 2210 2280 2279 -3 2211 2212 2281 -3 2211 2281 2280 -3 2212 2213 2282 -3 2212 2282 2281 -3 2213 2214 2283 -3 2213 2283 2282 -3 2214 2215 2284 -3 2214 2284 2283 -3 2215 2216 2285 -3 2215 2285 2284 -3 2216 2217 2286 -3 2216 2286 2285 -3 2217 2218 2287 -3 2217 2287 2286 -3 2218 2219 2288 -3 2218 2288 2287 -3 2219 2220 2289 -3 2219 2289 2288 -3 2220 2221 2290 -3 2220 2290 2289 -3 2221 2222 2291 -3 2221 2291 2290 -3 2222 2223 2292 -3 2222 2292 2291 -3 2223 2224 2293 -3 2223 2293 2292 -3 2224 2225 2294 -3 2224 2294 2293 -3 2225 2226 2295 -3 2225 2295 2294 -3 2226 2227 2296 -3 2226 2296 2295 -3 2227 2228 2297 -3 2227 2297 2296 -3 2228 2229 2298 -3 2228 2298 2297 -3 2229 2230 2299 -3 2229 2299 2298 -3 2230 2231 2300 -3 2230 2300 2299 -3 2231 2232 2301 -3 2231 2301 2300 -3 2232 2233 2301 -3 2302 2301 2233 -3 2233 2234 2302 -3 2303 2302 2234 -3 2234 2235 2303 -3 2304 2303 2235 -3 2235 2236 2304 -3 2305 2304 2236 -3 2236 2237 2305 -3 2306 2305 2237 -3 2237 2238 2306 -3 2307 2306 2238 -3 2238 2239 2307 -3 2308 2307 2239 -3 2239 2240 2308 -3 2309 2308 2240 -3 2240 2241 2309 -3 2310 2309 2241 -3 2241 2242 2310 -3 2311 2310 2242 -3 2242 2243 2311 -3 2312 2311 2243 -3 2243 2244 2312 -3 2313 2312 2244 -3 2244 2245 2313 -3 2314 2313 2245 -3 2245 2246 2314 -3 2315 2314 2246 -3 2246 2247 2315 -3 2316 2315 2247 -3 2247 2248 2316 -3 2317 2316 2248 -3 2248 2249 2317 -3 2318 2317 2249 -3 2249 2250 2318 -3 2319 2318 2250 -3 2250 2251 2319 -3 2320 2319 2251 -3 2251 2252 2320 -3 2321 2320 2252 -3 2252 2253 2321 -3 2322 2321 2253 -3 2253 2254 2322 -3 2323 2322 2254 -3 2254 2255 2323 -3 2324 2323 2255 -3 2255 2256 2324 -3 2325 2324 2256 -3 2256 2257 2325 -3 2326 2325 2257 -3 2257 2258 2326 -3 2327 2326 2258 -3 2258 2259 2327 -3 2328 2327 2259 -3 2259 2260 2328 -3 2329 2328 2260 -3 2260 2261 2329 -3 2330 2329 2261 -3 2261 2262 2331 -3 2261 2331 2330 -3 2262 2263 2332 -3 2262 2332 2331 -3 2263 2264 2333 -3 2263 2333 2332 -3 2264 2265 2334 -3 2264 2334 2333 -3 2265 2266 2335 -3 2265 2335 2334 -3 2266 2267 2336 -3 2266 2336 2335 -3 2267 2268 2337 -3 2267 2337 2336 -3 2268 2269 2338 -3 2268 2338 2337 -3 2269 2270 2339 -3 2269 2339 2338 -3 2270 5017 2339 -3 5021 2339 5017 -3 2271 2272 2341 -3 2271 2341 2340 -3 2271 2340 5020 -3 5024 5020 2340 -3 2272 2273 2342 -3 2272 2342 2341 -3 2273 2274 2343 -3 2273 2343 2342 -3 2274 2275 2344 -3 2274 2344 2343 -3 2275 2276 2345 -3 2275 2345 2344 -3 2276 2277 2346 -3 2276 2346 2345 -3 2277 2278 2347 -3 2277 2347 2346 -3 2278 2279 2348 -3 2278 2348 2347 -3 2279 2280 2349 -3 2279 2349 2348 -3 2280 2281 2350 -3 2280 2350 2349 -3 2281 2282 2351 -3 2281 2351 2350 -3 2282 2283 2352 -3 2282 2352 2351 -3 2283 2284 2353 -3 2283 2353 2352 -3 2284 2285 2354 -3 2284 2354 2353 -3 2285 2286 2355 -3 2285 2355 2354 -3 2286 2287 2356 -3 2286 2356 2355 -3 2287 2288 2357 -3 2287 2357 2356 -3 2288 2289 2358 -3 2288 2358 2357 -3 2289 2290 2359 -3 2289 2359 2358 -3 2290 2291 2359 -3 2360 2359 2291 -3 2291 2292 2360 -3 2361 2360 2292 -3 2292 2293 2361 -3 2362 2361 2293 -3 2293 2294 2362 -3 2363 2362 2294 -3 2294 2295 2363 -3 2364 2363 2295 -3 2295 2296 2364 -3 2365 2364 2296 -3 2296 2297 2365 -3 2366 2365 2297 -3 2297 2298 2366 -3 2367 2366 2298 -3 2298 2299 2367 -3 2368 2367 2299 -3 2299 2300 2368 -3 2369 2368 2300 -3 2300 2301 2369 -3 2370 2369 2301 -3 2301 2302 2370 -3 2371 2370 2302 -3 2302 2303 2371 -3 2372 2371 2303 -3 2303 2304 2372 -3 2373 2372 2304 -3 2304 2305 2373 -3 2374 2373 2305 -3 2305 2306 2374 -3 2375 2374 2306 -3 2306 2307 2375 -3 2376 2375 2307 -3 2307 2308 2376 -3 2377 2376 2308 -3 2308 2309 2377 -3 2378 2377 2309 -3 2309 2310 2378 -3 2379 2378 2310 -3 2310 2311 2379 -3 2380 2379 2311 -3 2311 2312 2380 -3 2381 2380 2312 -3 2312 2313 2381 -3 2382 2381 2313 -3 2313 2314 2382 -3 2383 2382 2314 -3 2314 2315 2383 -3 2384 2383 2315 -3 2315 2316 2384 -3 2385 2384 2316 -3 2316 2317 2385 -3 2386 2385 2317 -3 2317 2318 2386 -3 2387 2386 2318 -3 2318 2319 2387 -3 2388 2387 2319 -3 2319 2320 2388 -3 2389 2388 2320 -3 2320 2321 2390 -3 2320 2390 2389 -3 2321 2322 2391 -3 2321 2391 2390 -3 2322 2323 2392 -3 2322 2392 2391 -3 2323 2324 2393 -3 2323 2393 2392 -3 2324 2325 2394 -3 2324 2394 2393 -3 2325 2326 2395 -3 2325 2395 2394 -3 2326 2327 2396 -3 2326 2396 2395 -3 2327 2328 2397 -3 2327 2397 2396 -3 2328 2329 2398 -3 2328 2398 2397 -3 2329 2330 2399 -3 2329 2399 2398 -3 2330 2331 2400 -3 2330 2400 2399 -3 2331 2332 2401 -3 2331 2401 2400 -3 2332 2333 2402 -3 2332 2402 2401 -3 2333 2334 2403 -3 2333 2403 2402 -3 2334 2335 2404 -3 2334 2404 2403 -3 2335 2336 2405 -3 2335 2405 2404 -3 2336 2337 2406 -3 2336 2406 2405 -3 2337 2338 2407 -3 2337 2407 2406 -3 2338 2339 2408 -3 2338 2408 2407 -3 2339 5021 5025 -3 2339 5025 2408 -3 2340 2341 2410 -3 2340 2410 2409 -3 2340 2409 5028 -3 2340 5028 5024 -3 2341 2342 2411 -3 2341 2411 2410 -3 2342 2343 2412 -3 2342 2412 2411 -3 2343 2344 2413 -3 2343 2413 2412 -3 2344 2345 2414 -3 2344 2414 2413 -3 2345 2346 2415 -3 2345 2415 2414 -3 2346 2347 2416 -3 2346 2416 2415 -3 2347 2348 2417 -3 2347 2417 2416 -3 2348 2349 2418 -3 2348 2418 2417 -3 2349 2350 2419 -3 2349 2419 2418 -3 2350 2351 2419 -3 2420 2419 2351 -3 2351 2352 2420 -3 2421 2420 2352 -3 2352 2353 2421 -3 2422 2421 2353 -3 2353 2354 2422 -3 2423 2422 2354 -3 2354 2355 2423 -3 2424 2423 2355 -3 2355 2356 2424 -3 2425 2424 2356 -3 2356 2357 2425 -3 2426 2425 2357 -3 2357 2358 2426 -3 2427 2426 2358 -3 2358 2359 2427 -3 2428 2427 2359 -3 2359 2360 2428 -3 2429 2428 2360 -3 2360 2361 2429 -3 2430 2429 2361 -3 2361 2362 2430 -3 2431 2430 2362 -3 2362 2363 2431 -3 2432 2431 2363 -3 2363 2364 2432 -3 2433 2432 2364 -3 2364 2365 2433 -3 2434 2433 2365 -3 2365 2366 2434 -3 2435 2434 2366 -3 2366 2367 2435 -3 2436 2435 2367 -3 2367 2368 2436 -3 2437 2436 2368 -3 2368 2369 2437 -3 2438 2437 2369 -3 2369 2370 2438 -3 2439 2438 2370 -3 2370 2371 2439 -3 2440 2439 2371 -3 2371 2372 2440 -3 2441 2440 2372 -3 2372 2373 2441 -3 2442 2441 2373 -3 2373 2374 2442 -3 2443 2442 2374 -3 2374 2375 2443 -3 2444 2443 2375 -3 2375 2376 2444 -3 2445 2444 2376 -3 2376 2377 2445 -3 2446 2445 2377 -3 2377 2378 2446 -3 2447 2446 2378 -3 2378 2379 2447 -3 2448 2447 2379 -3 2379 2380 2449 -3 2379 2449 2448 -3 2380 2381 2450 -3 2380 2450 2449 -3 2381 2382 2451 -3 2381 2451 2450 -3 2382 2383 2452 -3 2382 2452 2451 -3 2383 2384 2453 -3 2383 2453 2452 -3 2384 2385 2454 -3 2384 2454 2453 -3 2385 2386 2455 -3 2385 2455 2454 -3 2386 2387 2456 -3 2386 2456 2455 -3 2387 2388 2457 -3 2387 2457 2456 -3 2388 2389 2458 -3 2388 2458 2457 -3 2389 2390 2459 -3 2389 2459 2458 -3 2390 2391 2460 -3 2390 2460 2459 -3 2391 2392 2461 -3 2391 2461 2460 -3 2392 2393 2462 -3 2392 2462 2461 -3 2393 2394 2463 -3 2393 2463 2462 -3 2394 2395 2464 -3 2394 2464 2463 -3 2395 2396 2465 -3 2395 2465 2464 -3 2396 2397 2466 -3 2396 2466 2465 -3 2397 2398 2467 -3 2397 2467 2466 -3 2398 2399 2468 -3 2398 2468 2467 -3 2399 2400 2469 -3 2399 2469 2468 -3 2400 2401 2470 -3 2400 2470 2469 -3 2401 2402 2471 -3 2401 2471 2470 -3 2402 2403 2472 -3 2402 2472 2471 -3 2403 2404 2473 -3 2403 2473 2472 -3 2404 2405 2474 -3 2404 2474 2473 -3 2405 2406 2475 -3 2405 2475 2474 -3 2406 2407 2476 -3 2406 2476 2475 -3 2407 2408 2477 -3 2407 2477 2476 -3 2408 5025 2477 -3 5029 2477 5025 -3 2409 2410 2479 -3 2409 2479 2478 -3 2409 2478 5032 -3 2409 5032 5028 -3 2410 2411 2479 -3 2480 2479 2411 -3 2411 2412 2480 -3 2481 2480 2412 -3 2412 2413 2481 -3 2482 2481 2413 -3 2413 2414 2482 -3 2483 2482 2414 -3 2414 2415 2483 -3 2484 2483 2415 -3 2415 2416 2484 -3 2485 2484 2416 -3 2416 2417 2485 -3 2486 2485 2417 -3 2417 2418 2486 -3 2487 2486 2418 -3 2418 2419 2487 -3 2488 2487 2419 -3 2419 2420 2488 -3 2489 2488 2420 -3 2420 2421 2489 -3 2490 2489 2421 -3 2421 2422 2490 -3 2491 2490 2422 -3 2422 2423 2491 -3 2492 2491 2423 -3 2423 2424 2492 -3 2493 2492 2424 -3 2424 2425 2493 -3 2494 2493 2425 -3 2425 2426 2494 -3 2495 2494 2426 -3 2426 2427 2495 -3 2496 2495 2427 -3 2427 2428 2496 -3 2497 2496 2428 -3 2428 2429 2497 -3 2498 2497 2429 -3 2429 2430 2498 -3 2499 2498 2430 -3 2430 2431 2499 -3 2500 2499 2431 -3 2431 2432 2500 -3 2501 2500 2432 -3 2432 2433 2501 -3 2502 2501 2433 -3 2433 2434 2502 -3 2503 2502 2434 -3 2434 2435 2503 -3 2504 2503 2435 -3 2435 2436 2504 -3 2505 2504 2436 -3 2436 2437 2505 -3 2506 2505 2437 -3 2437 2438 2506 -3 2507 2506 2438 -3 2438 2439 2507 -3 2508 2507 2439 -3 2439 2440 2508 -3 2509 2508 2440 -3 2440 2441 2510 -3 2440 2510 2509 -3 2441 2442 2511 -3 2441 2511 2510 -3 2442 2443 2512 -3 2442 2512 2511 -3 2443 2444 2513 -3 2443 2513 2512 -3 2444 2445 2514 -3 2444 2514 2513 -3 2445 2446 2515 -3 2445 2515 2514 -3 2446 2447 2516 -3 2446 2516 2515 -3 2447 2448 2517 -3 2447 2517 2516 -3 2448 2449 2518 -3 2448 2518 2517 -3 2449 2450 2519 -3 2449 2519 2518 -3 2450 2451 2520 -3 2450 2520 2519 -3 2451 2452 2521 -3 2451 2521 2520 -3 2452 2453 2522 -3 2452 2522 2521 -3 2453 2454 2523 -3 2453 2523 2522 -3 2454 2455 2524 -3 2454 2524 2523 -3 2455 2456 2525 -3 2455 2525 2524 -3 2456 2457 2526 -3 2456 2526 2525 -3 2457 2458 2527 -3 2457 2527 2526 -3 2458 2459 2528 -3 2458 2528 2527 -3 2459 2460 2529 -3 2459 2529 2528 -3 2460 2461 2530 -3 2460 2530 2529 -3 2461 2462 2531 -3 2461 2531 2530 -3 2462 2463 2532 -3 2462 2532 2531 -3 2463 2464 2533 -3 2463 2533 2532 -3 2464 2465 2534 -3 2464 2534 2533 -3 2465 2466 2535 -3 2465 2535 2534 -3 2466 2467 2536 -3 2466 2536 2535 -3 2467 2468 2537 -3 2467 2537 2536 -3 2468 2469 2538 -3 2468 2538 2537 -3 2469 2470 2539 -3 2469 2539 2538 -3 2470 2471 2539 -3 2540 2539 2471 -3 2471 2472 2540 -3 2541 2540 2472 -3 2472 2473 2541 -3 2542 2541 2473 -3 2473 2474 2542 -3 2543 2542 2474 -3 2474 2475 2543 -3 2544 2543 2475 -3 2475 2476 2544 -3 2545 2544 2476 -3 2476 2477 2545 -3 2546 2545 2477 -3 2477 5029 5033 -3 2477 5033 2546 -3 2478 2479 2547 -3 2548 2547 2479 -3 2478 2547 5036 -3 2478 5036 5032 -3 2479 2480 2548 -3 2549 2548 2480 -3 2480 2481 2549 -3 2550 2549 2481 -3 2481 2482 2550 -3 2551 2550 2482 -3 2482 2483 2551 -3 2552 2551 2483 -3 2483 2484 2552 -3 2553 2552 2484 -3 2484 2485 2553 -3 2554 2553 2485 -3 2485 2486 2554 -3 2555 2554 2486 -3 2486 2487 2555 -3 2556 2555 2487 -3 2487 2488 2556 -3 2557 2556 2488 -3 2488 2489 2557 -3 2558 2557 2489 -3 2489 2490 2558 -3 2559 2558 2490 -3 2490 2491 2559 -3 2560 2559 2491 -3 2491 2492 2560 -3 2561 2560 2492 -3 2492 2493 2561 -3 2562 2561 2493 -3 2493 2494 2562 -3 2563 2562 2494 -3 2494 2495 2563 -3 2564 2563 2495 -3 2495 2496 2564 -3 2565 2564 2496 -3 2496 2497 2565 -3 2566 2565 2497 -3 2497 2498 2566 -3 2567 2566 2498 -3 2498 2499 2567 -3 2568 2567 2499 -3 2499 2500 2568 -3 2569 2568 2500 -3 2500 2501 2569 -3 2570 2569 2501 -3 2501 2502 2571 -3 2501 2571 2570 -3 2502 2503 2572 -3 2502 2572 2571 -3 2503 2504 2573 -3 2503 2573 2572 -3 2504 2505 2574 -3 2504 2574 2573 -3 2505 2506 2575 -3 2505 2575 2574 -3 2506 2507 2576 -3 2506 2576 2575 -3 2507 2508 2577 -3 2507 2577 2576 -3 2508 2509 2578 -3 2508 2578 2577 -3 2509 2510 2579 -3 2509 2579 2578 -3 2510 2511 2580 -3 2510 2580 2579 -3 2511 2512 2581 -3 2511 2581 2580 -3 2512 2513 2582 -3 2512 2582 2581 -3 2513 2514 2583 -3 2513 2583 2582 -3 2514 2515 2584 -3 2514 2584 2583 -3 2515 2516 2585 -3 2515 2585 2584 -3 2516 2517 2586 -3 2516 2586 2585 -3 2517 2518 2587 -3 2517 2587 2586 -3 2518 2519 2588 -3 2518 2588 2587 -3 2519 2520 2589 -3 2519 2589 2588 -3 2520 2521 2590 -3 2520 2590 2589 -3 2521 2522 2591 -3 2521 2591 2590 -3 2522 2523 2592 -3 2522 2592 2591 -3 2523 2524 2593 -3 2523 2593 2592 -3 2524 2525 2594 -3 2524 2594 2593 -3 2525 2526 2595 -3 2525 2595 2594 -3 2526 2527 2596 -3 2526 2596 2595 -3 2527 2528 2597 -3 2527 2597 2596 -3 2528 2529 2598 -3 2528 2598 2597 -3 2529 2530 2599 -3 2529 2599 2598 -3 2530 2531 2600 -3 2530 2600 2599 -3 2531 2532 2601 -3 2531 2601 2600 -3 2532 2533 2601 -3 2602 2601 2533 -3 2533 2534 2602 -3 2603 2602 2534 -3 2534 2535 2603 -3 2604 2603 2535 -3 2535 2536 2604 -3 2605 2604 2536 -3 2536 2537 2605 -3 2606 2605 2537 -3 2537 2538 2606 -3 2607 2606 2538 -3 2538 2539 2607 -3 2608 2607 2539 -3 2539 2540 2608 -3 2609 2608 2540 -3 2540 2541 2609 -3 2610 2609 2541 -3 2541 2542 2610 -3 2611 2610 2542 -3 2542 2543 2611 -3 2612 2611 2543 -3 2543 2544 2612 -3 2613 2612 2544 -3 2544 2545 2613 -3 2614 2613 2545 -3 2545 2546 2614 -3 2615 2614 2546 -3 2546 5033 2615 -3 5037 2615 5033 -3 2547 2548 2616 -3 2617 2616 2548 -3 2547 2616 5040 -3 2547 5040 5036 -3 2548 2549 2617 -3 2618 2617 2549 -3 2549 2550 2618 -3 2619 2618 2550 -3 2550 2551 2619 -3 2620 2619 2551 -3 2551 2552 2620 -3 2621 2620 2552 -3 2552 2553 2621 -3 2622 2621 2553 -3 2553 2554 2622 -3 2623 2622 2554 -3 2554 2555 2623 -3 2624 2623 2555 -3 2555 2556 2624 -3 2625 2624 2556 -3 2556 2557 2625 -3 2626 2625 2557 -3 2557 2558 2626 -3 2627 2626 2558 -3 2558 2559 2627 -3 2628 2627 2559 -3 2559 2560 2628 -3 2629 2628 2560 -3 2560 2561 2629 -3 2630 2629 2561 -3 2561 2562 2630 -3 2631 2630 2562 -3 2562 2563 2631 -3 2632 2631 2563 -3 2563 2564 2633 -3 2563 2633 2632 -3 2564 2565 2634 -3 2564 2634 2633 -3 2565 2566 2635 -3 2565 2635 2634 -3 2566 2567 2636 -3 2566 2636 2635 -3 2567 2568 2637 -3 2567 2637 2636 -3 2568 2569 2638 -3 2568 2638 2637 -3 2569 2570 2639 -3 2569 2639 2638 -3 2570 2571 2640 -3 2570 2640 2639 -3 2571 2572 2641 -3 2571 2641 2640 -3 2572 2573 2642 -3 2572 2642 2641 -3 2573 2574 2643 -3 2573 2643 2642 -3 2574 2575 2644 -3 2574 2644 2643 -3 2575 2576 2645 -3 2575 2645 2644 -3 2576 2577 2646 -3 2576 2646 2645 -3 2577 2578 2647 -3 2577 2647 2646 -3 2578 2579 2648 -3 2578 2648 2647 -3 2579 2580 2649 -3 2579 2649 2648 -3 2580 2581 2650 -3 2580 2650 2649 -3 2581 2582 2651 -3 2581 2651 2650 -3 2582 2583 2652 -3 2582 2652 2651 -3 2583 2584 2653 -3 2583 2653 2652 -3 2584 2585 2654 -3 2584 2654 2653 -3 2585 2586 2655 -3 2585 2655 2654 -3 2586 2587 2656 -3 2586 2656 2655 -3 2587 2588 2657 -3 2587 2657 2656 -3 2588 2589 2658 -3 2588 2658 2657 -3 2589 2590 2659 -3 2589 2659 2658 -3 2590 2591 2660 -3 2590 2660 2659 -3 2591 2592 2661 -3 2591 2661 2660 -3 2592 2593 2662 -3 2592 2662 2661 -3 2593 2594 2663 -3 2593 2663 2662 -3 2594 2595 2663 -3 2664 2663 2595 -3 2595 2596 2664 -3 2665 2664 2596 -3 2596 2597 2665 -3 2666 2665 2597 -3 2597 2598 2666 -3 2667 2666 2598 -3 2598 2599 2667 -3 2668 2667 2599 -3 2599 2600 2668 -3 2669 2668 2600 -3 2600 2601 2669 -3 2670 2669 2601 -3 2601 2602 2670 -3 2671 2670 2602 -3 2602 2603 2671 -3 2672 2671 2603 -3 2603 2604 2672 -3 2673 2672 2604 -3 2604 2605 2673 -3 2674 2673 2605 -3 2605 2606 2674 -3 2675 2674 2606 -3 2606 2607 2675 -3 2676 2675 2607 -3 2607 2608 2676 -3 2677 2676 2608 -3 2608 2609 2677 -3 2678 2677 2609 -3 2609 2610 2678 -3 2679 2678 2610 -3 2610 2611 2679 -3 2680 2679 2611 -3 2611 2612 2680 -3 2681 2680 2612 -3 2612 2613 2681 -3 2682 2681 2613 -3 2613 2614 2682 -3 2683 2682 2614 -3 2614 2615 2683 -3 2684 2683 2615 -3 2615 5037 5041 -3 2615 5041 2684 -3 2616 2617 2685 -3 2686 2685 2617 -3 2616 2685 5044 -3 2616 5044 5040 -3 2617 2618 2686 -3 2687 2686 2618 -3 2618 2619 2687 -3 2688 2687 2619 -3 2619 2620 2688 -3 2689 2688 2620 -3 2620 2621 2689 -3 2690 2689 2621 -3 2621 2622 2690 -3 2691 2690 2622 -3 2622 2623 2691 -3 2692 2691 2623 -3 2623 2624 2692 -3 2693 2692 2624 -3 2624 2625 2693 -3 2694 2693 2625 -3 2625 2626 2694 -3 2695 2694 2626 -3 2626 2627 2696 -3 2626 2696 2695 -3 2627 2628 2697 -3 2627 2697 2696 -3 2628 2629 2698 -3 2628 2698 2697 -3 2629 2630 2699 -3 2629 2699 2698 -3 2630 2631 2700 -3 2630 2700 2699 -3 2631 2632 2701 -3 2631 2701 2700 -3 2632 2633 2702 -3 2632 2702 2701 -3 2633 2634 2703 -3 2633 2703 2702 -3 2634 2635 2704 -3 2634 2704 2703 -3 2635 2636 2705 -3 2635 2705 2704 -3 2636 2637 2706 -3 2636 2706 2705 -3 2637 2638 2707 -3 2637 2707 2706 -3 2638 2639 2708 -3 2638 2708 2707 -3 2639 2640 2709 -3 2639 2709 2708 -3 2640 2641 2710 -3 2640 2710 2709 -3 2641 2642 2711 -3 2641 2711 2710 -3 2642 2643 2712 -3 2642 2712 2711 -3 2643 2644 2713 -3 2643 2713 2712 -3 2644 2645 2714 -3 2644 2714 2713 -3 2645 2646 2715 -3 2645 2715 2714 -3 2646 2647 2716 -3 2646 2716 2715 -3 2647 2648 2717 -3 2647 2717 2716 -3 2648 2649 2718 -3 2648 2718 2717 -3 2649 2650 2719 -3 2649 2719 2718 -3 2650 2651 2720 -3 2650 2720 2719 -3 2651 2652 2721 -3 2651 2721 2720 -3 2652 2653 2722 -3 2652 2722 2721 -3 2653 2654 2723 -3 2653 2723 2722 -3 2654 2655 2724 -3 2654 2724 2723 -3 2655 2656 2725 -3 2655 2725 2724 -3 2656 2657 2726 -3 2656 2726 2725 -3 2657 2658 2726 -3 2727 2726 2658 -3 2658 2659 2727 -3 2728 2727 2659 -3 2659 2660 2728 -3 2729 2728 2660 -3 2660 2661 2729 -3 2730 2729 2661 -3 2661 2662 2730 -3 2731 2730 2662 -3 2662 2663 2731 -3 2732 2731 2663 -3 2663 2664 2732 -3 2733 2732 2664 -3 2664 2665 2733 -3 2734 2733 2665 -3 2665 2666 2734 -3 2735 2734 2666 -3 2666 2667 2735 -3 2736 2735 2667 -3 2667 2668 2736 -3 2737 2736 2668 -3 2668 2669 2737 -3 2738 2737 2669 -3 2669 2670 2738 -3 2739 2738 2670 -3 2670 2671 2739 -3 2740 2739 2671 -3 2671 2672 2740 -3 2741 2740 2672 -3 2672 2673 2741 -3 2742 2741 2673 -3 2673 2674 2742 -3 2743 2742 2674 -3 2674 2675 2743 -3 2744 2743 2675 -3 2675 2676 2744 -3 2745 2744 2676 -3 2676 2677 2745 -3 2746 2745 2677 -3 2677 2678 2746 -3 2747 2746 2678 -3 2678 2679 2747 -3 2748 2747 2679 -3 2679 2680 2748 -3 2749 2748 2680 -3 2680 2681 2749 -3 2750 2749 2681 -3 2681 2682 2750 -3 2751 2750 2682 -3 2682 2683 2751 -3 2752 2751 2683 -3 2683 2684 2752 -3 2753 2752 2684 -3 2684 5041 2753 -3 5045 2753 5041 -3 2685 2686 2754 -3 2755 2754 2686 -3 2685 2754 5044 -3 5048 5044 2754 -3 2686 2687 2755 -3 2756 2755 2687 -3 2687 2688 2756 -3 2757 2756 2688 -3 2688 2689 2757 -3 2758 2757 2689 -3 2689 2690 2759 -3 2689 2759 2758 -3 2690 2691 2760 -3 2690 2760 2759 -3 2691 2692 2761 -3 2691 2761 2760 -3 2692 2693 2762 -3 2692 2762 2761 -3 2693 2694 2763 -3 2693 2763 2762 -3 2694 2695 2764 -3 2694 2764 2763 -3 2695 2696 2765 -3 2695 2765 2764 -3 2696 2697 2766 -3 2696 2766 2765 -3 2697 2698 2767 -3 2697 2767 2766 -3 2698 2699 2768 -3 2698 2768 2767 -3 2699 2700 2769 -3 2699 2769 2768 -3 2700 2701 2770 -3 2700 2770 2769 -3 2701 2702 2771 -3 2701 2771 2770 -3 2702 2703 2772 -3 2702 2772 2771 -3 2703 2704 2773 -3 2703 2773 2772 -3 2704 2705 2774 -3 2704 2774 2773 -3 2705 2706 2775 -3 2705 2775 2774 -3 2706 2707 2776 -3 2706 2776 2775 -3 2707 2708 2777 -3 2707 2777 2776 -3 2708 2709 2778 -3 2708 2778 2777 -3 2709 2710 2779 -3 2709 2779 2778 -3 2710 2711 2780 -3 2710 2780 2779 -3 2711 2712 2781 -3 2711 2781 2780 -3 2712 2713 2782 -3 2712 2782 2781 -3 2713 2714 2783 -3 2713 2783 2782 -3 2714 2715 2784 -3 2714 2784 2783 -3 2715 2716 2785 -3 2715 2785 2784 -3 2716 2717 2786 -3 2716 2786 2785 -3 2717 2718 2787 -3 2717 2787 2786 -3 2718 2719 2788 -3 2718 2788 2787 -3 2719 2720 2789 -3 2719 2789 2788 -3 2720 2721 2790 -3 2720 2790 2789 -3 2721 2722 2790 -3 2791 2790 2722 -3 2722 2723 2791 -3 2792 2791 2723 -3 2723 2724 2792 -3 2793 2792 2724 -3 2724 2725 2793 -3 2794 2793 2725 -3 2725 2726 2794 -3 2795 2794 2726 -3 2726 2727 2795 -3 2796 2795 2727 -3 2727 2728 2796 -3 2797 2796 2728 -3 2728 2729 2797 -3 2798 2797 2729 -3 2729 2730 2798 -3 2799 2798 2730 -3 2730 2731 2799 -3 2800 2799 2731 -3 2731 2732 2800 -3 2801 2800 2732 -3 2732 2733 2801 -3 2802 2801 2733 -3 2733 2734 2802 -3 2803 2802 2734 -3 2734 2735 2803 -3 2804 2803 2735 -3 2735 2736 2804 -3 2805 2804 2736 -3 2736 2737 2805 -3 2806 2805 2737 -3 2737 2738 2806 -3 2807 2806 2738 -3 2738 2739 2807 -3 2808 2807 2739 -3 2739 2740 2808 -3 2809 2808 2740 -3 2740 2741 2809 -3 2810 2809 2741 -3 2741 2742 2810 -3 2811 2810 2742 -3 2742 2743 2811 -3 2812 2811 2743 -3 2743 2744 2812 -3 2813 2812 2744 -3 2744 2745 2813 -3 2814 2813 2745 -3 2745 2746 2814 -3 2815 2814 2746 -3 2746 2747 2815 -3 2816 2815 2747 -3 2747 2748 2816 -3 2817 2816 2748 -3 2748 2749 2817 -3 2818 2817 2749 -3 2749 2750 2818 -3 2819 2818 2750 -3 2750 2751 2819 -3 2820 2819 2751 -3 2751 2752 2820 -3 2821 2820 2752 -3 2752 2753 2821 -3 2822 2821 2753 -3 2753 5045 5049 -3 2753 5049 2822 -3 2754 2755 2824 -3 2754 2824 2823 -3 2754 2823 5048 -3 5052 5048 2823 -3 2755 2756 2825 -3 2755 2825 2824 -3 2756 2757 2826 -3 2756 2826 2825 -3 2757 2758 2827 -3 2757 2827 2826 -3 2758 2759 2828 -3 2758 2828 2827 -3 2759 2760 2829 -3 2759 2829 2828 -3 2760 2761 2830 -3 2760 2830 2829 -3 2761 2762 2831 -3 2761 2831 2830 -3 2762 2763 2832 -3 2762 2832 2831 -3 2763 2764 2833 -3 2763 2833 2832 -3 2764 2765 2834 -3 2764 2834 2833 -3 2765 2766 2835 -3 2765 2835 2834 -3 2766 2767 2836 -3 2766 2836 2835 -3 2767 2768 2837 -3 2767 2837 2836 -3 2768 2769 2838 -3 2768 2838 2837 -3 2769 2770 2839 -3 2769 2839 2838 -3 2770 2771 2840 -3 2770 2840 2839 -3 2771 2772 2841 -3 2771 2841 2840 -3 2772 2773 2842 -3 2772 2842 2841 -3 2773 2774 2843 -3 2773 2843 2842 -3 2774 2775 2844 -3 2774 2844 2843 -3 2775 2776 2845 -3 2775 2845 2844 -3 2776 2777 2846 -3 2776 2846 2845 -3 2777 2778 2847 -3 2777 2847 2846 -3 2778 2779 2848 -3 2778 2848 2847 -3 2779 2780 2849 -3 2779 2849 2848 -3 2780 2781 2850 -3 2780 2850 2849 -3 2781 2782 2851 -3 2781 2851 2850 -3 2782 2783 2852 -3 2782 2852 2851 -3 2783 2784 2853 -3 2783 2853 2852 -3 2784 2785 2854 -3 2784 2854 2853 -3 2785 2786 2855 -3 2785 2855 2854 -3 2786 2787 2855 -3 2856 2855 2787 -3 2787 2788 2856 -3 2857 2856 2788 -3 2788 2789 2857 -3 2858 2857 2789 -3 2789 2790 2858 -3 2859 2858 2790 -3 2790 2791 2859 -3 2860 2859 2791 -3 2791 2792 2860 -3 2861 2860 2792 -3 2792 2793 2861 -3 2862 2861 2793 -3 2793 2794 2862 -3 2863 2862 2794 -3 2794 2795 2863 -3 2864 2863 2795 -3 2795 2796 2864 -3 2865 2864 2796 -3 2796 2797 2865 -3 2866 2865 2797 -3 2797 2798 2866 -3 2867 2866 2798 -3 2798 2799 2867 -3 2868 2867 2799 -3 2799 2800 2868 -3 2869 2868 2800 -3 2800 2801 2869 -3 2870 2869 2801 -3 2801 2802 2870 -3 2871 2870 2802 -3 2802 2803 2871 -3 2872 2871 2803 -3 2803 2804 2872 -3 2873 2872 2804 -3 2804 2805 2873 -3 2874 2873 2805 -3 2805 2806 2874 -3 2875 2874 2806 -3 2806 2807 2875 -3 2876 2875 2807 -3 2807 2808 2876 -3 2877 2876 2808 -3 2808 2809 2877 -3 2878 2877 2809 -3 2809 2810 2878 -3 2879 2878 2810 -3 2810 2811 2879 -3 2880 2879 2811 -3 2811 2812 2880 -3 2881 2880 2812 -3 2812 2813 2881 -3 2882 2881 2813 -3 2813 2814 2882 -3 2883 2882 2814 -3 2814 2815 2883 -3 2884 2883 2815 -3 2815 2816 2884 -3 2885 2884 2816 -3 2816 2817 2885 -3 2886 2885 2817 -3 2817 2818 2886 -3 2887 2886 2818 -3 2818 2819 2888 -3 2818 2888 2887 -3 2819 2820 2889 -3 2819 2889 2888 -3 2820 2821 2890 -3 2820 2890 2889 -3 2821 2822 2891 -3 2821 2891 2890 -3 2822 5049 2891 -3 5053 2891 5049 -3 2823 2824 2893 -3 2823 2893 2892 -3 2823 2892 5052 -3 5056 5052 2892 -3 2824 2825 2894 -3 2824 2894 2893 -3 2825 2826 2895 -3 2825 2895 2894 -3 2826 2827 2896 -3 2826 2896 2895 -3 2827 2828 2897 -3 2827 2897 2896 -3 2828 2829 2898 -3 2828 2898 2897 -3 2829 2830 2899 -3 2829 2899 2898 -3 2830 2831 2900 -3 2830 2900 2899 -3 2831 2832 2901 -3 2831 2901 2900 -3 2832 2833 2902 -3 2832 2902 2901 -3 2833 2834 2903 -3 2833 2903 2902 -3 2834 2835 2904 -3 2834 2904 2903 -3 2835 2836 2905 -3 2835 2905 2904 -3 2836 2837 2906 -3 2836 2906 2905 -3 2837 2838 2907 -3 2837 2907 2906 -3 2838 2839 2908 -3 2838 2908 2907 -3 2839 2840 2909 -3 2839 2909 2908 -3 2840 2841 2910 -3 2840 2910 2909 -3 2841 2842 2911 -3 2841 2911 2910 -3 2842 2843 2912 -3 2842 2912 2911 -3 2843 2844 2913 -3 2843 2913 2912 -3 2844 2845 2914 -3 2844 2914 2913 -3 2845 2846 2915 -3 2845 2915 2914 -3 2846 2847 2916 -3 2846 2916 2915 -3 2847 2848 2917 -3 2847 2917 2916 -3 2848 2849 2918 -3 2848 2918 2917 -3 2849 2850 2919 -3 2849 2919 2918 -3 2850 2851 2920 -3 2850 2920 2919 -3 2851 2852 2920 -3 2921 2920 2852 -3 2852 2853 2921 -3 2922 2921 2853 -3 2853 2854 2922 -3 2923 2922 2854 -3 2854 2855 2923 -3 2924 2923 2855 -3 2855 2856 2924 -3 2925 2924 2856 -3 2856 2857 2925 -3 2926 2925 2857 -3 2857 2858 2926 -3 2927 2926 2858 -3 2858 2859 2927 -3 2928 2927 2859 -3 2859 2860 2928 -3 2929 2928 2860 -3 2860 2861 2929 -3 2930 2929 2861 -3 2861 2862 2930 -3 2931 2930 2862 -3 2862 2863 2931 -3 2932 2931 2863 -3 2863 2864 2932 -3 2933 2932 2864 -3 2864 2865 2933 -3 2934 2933 2865 -3 2865 2866 2934 -3 2935 2934 2866 -3 2866 2867 2935 -3 2936 2935 2867 -3 2867 2868 2936 -3 2937 2936 2868 -3 2868 2869 2937 -3 2938 2937 2869 -3 2869 2870 2938 -3 2939 2938 2870 -3 2870 2871 2939 -3 2940 2939 2871 -3 2871 2872 2940 -3 2941 2940 2872 -3 2872 2873 2941 -3 2942 2941 2873 -3 2873 2874 2942 -3 2943 2942 2874 -3 2874 2875 2943 -3 2944 2943 2875 -3 2875 2876 2944 -3 2945 2944 2876 -3 2876 2877 2945 -3 2946 2945 2877 -3 2877 2878 2946 -3 2947 2946 2878 -3 2878 2879 2947 -3 2948 2947 2879 -3 2879 2880 2948 -3 2949 2948 2880 -3 2880 2881 2949 -3 2950 2949 2881 -3 2881 2882 2950 -3 2951 2950 2882 -3 2882 2883 2951 -3 2952 2951 2883 -3 2883 2884 2952 -3 2953 2952 2884 -3 2884 2885 2954 -3 2884 2954 2953 -3 2885 2886 2955 -3 2885 2955 2954 -3 2886 2887 2956 -3 2886 2956 2955 -3 2887 2888 2957 -3 2887 2957 2956 -3 2888 2889 2958 -3 2888 2958 2957 -3 2889 2890 2959 -3 2889 2959 2958 -3 2890 2891 2960 -3 2890 2960 2959 -3 2891 5053 5057 -3 2891 5057 2960 -3 2892 2893 2962 -3 2892 2962 2961 -3 2892 2961 5056 -3 5060 5056 2961 -3 2893 2894 2963 -3 2893 2963 2962 -3 2894 2895 2964 -3 2894 2964 2963 -3 2895 2896 2965 -3 2895 2965 2964 -3 2896 2897 2966 -3 2896 2966 2965 -3 2897 2898 2967 -3 2897 2967 2966 -3 2898 2899 2968 -3 2898 2968 2967 -3 2899 2900 2969 -3 2899 2969 2968 -3 2900 2901 2970 -3 2900 2970 2969 -3 2901 2902 2971 -3 2901 2971 2970 -3 2902 2903 2972 -3 2902 2972 2971 -3 2903 2904 2973 -3 2903 2973 2972 -3 2904 2905 2974 -3 2904 2974 2973 -3 2905 2906 2975 -3 2905 2975 2974 -3 2906 2907 2976 -3 2906 2976 2975 -3 2907 2908 2977 -3 2907 2977 2976 -3 2908 2909 2978 -3 2908 2978 2977 -3 2909 2910 2979 -3 2909 2979 2978 -3 2910 2911 2980 -3 2910 2980 2979 -3 2911 2912 2981 -3 2911 2981 2980 -3 2912 2913 2982 -3 2912 2982 2981 -3 2913 2914 2983 -3 2913 2983 2982 -3 2914 2915 2984 -3 2914 2984 2983 -3 2915 2916 2985 -3 2915 2985 2984 -3 2916 2917 2986 -3 2916 2986 2985 -3 2917 2918 2986 -3 2987 2986 2918 -3 2918 2919 2987 -3 2988 2987 2919 -3 2919 2920 2988 -3 2989 2988 2920 -3 2920 2921 2989 -3 2990 2989 2921 -3 2921 2922 2990 -3 2991 2990 2922 -3 2922 2923 2991 -3 2992 2991 2923 -3 2923 2924 2992 -3 2993 2992 2924 -3 2924 2925 2993 -3 2994 2993 2925 -3 2925 2926 2994 -3 2995 2994 2926 -3 2926 2927 2995 -3 2996 2995 2927 -3 2927 2928 2996 -3 2997 2996 2928 -3 2928 2929 2997 -3 2998 2997 2929 -3 2929 2930 2998 -3 2999 2998 2930 -3 2930 2931 2999 -3 3000 2999 2931 -3 2931 2932 3000 -3 3001 3000 2932 -3 2932 2933 3001 -3 3002 3001 2933 -3 2933 2934 3002 -3 3003 3002 2934 -3 2934 2935 3003 -3 3004 3003 2935 -3 2935 2936 3004 -3 3005 3004 2936 -3 2936 2937 3005 -3 3006 3005 2937 -3 2937 2938 3006 -3 3007 3006 2938 -3 2938 2939 3007 -3 3008 3007 2939 -3 2939 2940 3008 -3 3009 3008 2940 -3 2940 2941 3009 -3 3010 3009 2941 -3 2941 2942 3010 -3 3011 3010 2942 -3 2942 2943 3011 -3 3012 3011 2943 -3 2943 2944 3012 -3 3013 3012 2944 -3 2944 2945 3013 -3 3014 3013 2945 -3 2945 2946 3014 -3 3015 3014 2946 -3 2946 2947 3015 -3 3016 3015 2947 -3 2947 2948 3016 -3 3017 3016 2948 -3 2948 2949 3017 -3 3018 3017 2949 -3 2949 2950 3018 -3 3019 3018 2950 -3 2950 2951 3019 -3 3020 3019 2951 -3 2951 2952 3021 -3 2951 3021 3020 -3 2952 2953 3022 -3 2952 3022 3021 -3 2953 2954 3023 -3 2953 3023 3022 -3 2954 2955 3024 -3 2954 3024 3023 -3 2955 2956 3025 -3 2955 3025 3024 -3 2956 2957 3026 -3 2956 3026 3025 -3 2957 2958 3027 -3 2957 3027 3026 -3 2958 2959 3028 -3 2958 3028 3027 -3 2959 2960 3029 -3 2959 3029 3028 -3 2960 5057 3029 -3 5061 3029 5057 -3 2961 2962 3031 -3 2961 3031 3030 -3 2961 3030 5060 -3 5064 5060 3030 -3 2962 2963 3032 -3 2962 3032 3031 -3 2963 2964 3033 -3 2963 3033 3032 -3 2964 2965 3034 -3 2964 3034 3033 -3 2965 2966 3035 -3 2965 3035 3034 -3 2966 2967 3036 -3 2966 3036 3035 -3 2967 2968 3037 -3 2967 3037 3036 -3 2968 2969 3038 -3 2968 3038 3037 -3 2969 2970 3039 -3 2969 3039 3038 -3 2970 2971 3040 -3 2970 3040 3039 -3 2971 2972 3041 -3 2971 3041 3040 -3 2972 2973 3042 -3 2972 3042 3041 -3 2973 2974 3043 -3 2973 3043 3042 -3 2974 2975 3044 -3 2974 3044 3043 -3 2975 2976 3045 -3 2975 3045 3044 -3 2976 2977 3046 -3 2976 3046 3045 -3 2977 2978 3047 -3 2977 3047 3046 -3 2978 2979 3048 -3 2978 3048 3047 -3 2979 2980 3049 -3 2979 3049 3048 -3 2980 2981 3050 -3 2980 3050 3049 -3 2981 2982 3051 -3 2981 3051 3050 -3 2982 2983 3052 -3 2982 3052 3051 -3 2983 2984 3053 -3 2983 3053 3052 -3 2984 2985 3053 -3 3054 3053 2985 -3 2985 2986 3054 -3 3055 3054 2986 -3 2986 2987 3055 -3 3056 3055 2987 -3 2987 2988 3056 -3 3057 3056 2988 -3 2988 2989 3057 -3 3058 3057 2989 -3 2989 2990 3058 -3 3059 3058 2990 -3 2990 2991 3059 -3 3060 3059 2991 -3 2991 2992 3060 -3 3061 3060 2992 -3 2992 2993 3061 -3 3062 3061 2993 -3 2993 2994 3062 -3 3063 3062 2994 -3 2994 2995 3063 -3 3064 3063 2995 -3 2995 2996 3064 -3 3065 3064 2996 -3 2996 2997 3065 -3 3066 3065 2997 -3 2997 2998 3066 -3 3067 3066 2998 -3 2998 2999 3067 -3 3068 3067 2999 -3 2999 3000 3068 -3 3069 3068 3000 -3 3000 3001 3069 -3 3070 3069 3001 -3 3001 3002 3070 -3 3071 3070 3002 -3 3002 3003 3071 -3 3072 3071 3003 -3 3003 3004 3072 -3 3073 3072 3004 -3 3004 3005 3073 -3 3074 3073 3005 -3 3005 3006 3074 -3 3075 3074 3006 -3 3006 3007 3075 -3 3076 3075 3007 -3 3007 3008 3076 -3 3077 3076 3008 -3 3008 3009 3077 -3 3078 3077 3009 -3 3009 3010 3078 -3 3079 3078 3010 -3 3010 3011 3079 -3 3080 3079 3011 -3 3011 3012 3080 -3 3081 3080 3012 -3 3012 3013 3081 -3 3082 3081 3013 -3 3013 3014 3082 -3 3083 3082 3014 -3 3014 3015 3083 -3 3084 3083 3015 -3 3015 3016 3084 -3 3085 3084 3016 -3 3016 3017 3085 -3 3086 3085 3017 -3 3017 3018 3086 -3 3087 3086 3018 -3 3018 3019 3088 -3 3018 3088 3087 -3 3019 3020 3089 -3 3019 3089 3088 -3 3020 3021 3090 -3 3020 3090 3089 -3 3021 3022 3091 -3 3021 3091 3090 -3 3022 3023 3092 -3 3022 3092 3091 -3 3023 3024 3093 -3 3023 3093 3092 -3 3024 3025 3094 -3 3024 3094 3093 -3 3025 3026 3095 -3 3025 3095 3094 -3 3026 3027 3096 -3 3026 3096 3095 -3 3027 3028 3097 -3 3027 3097 3096 -3 3028 3029 3098 -3 3028 3098 3097 -3 3029 5061 5065 -3 3029 5065 3098 -3 3030 3031 3100 -3 3030 3100 3099 -3 3030 3099 5064 -3 5068 5064 3099 -3 3031 3032 3101 -3 3031 3101 3100 -3 3032 3033 3102 -3 3032 3102 3101 -3 3033 3034 3103 -3 3033 3103 3102 -3 3034 3035 3104 -3 3034 3104 3103 -3 3035 3036 3105 -3 3035 3105 3104 -3 3036 3037 3106 -3 3036 3106 3105 -3 3037 3038 3107 -3 3037 3107 3106 -3 3038 3039 3108 -3 3038 3108 3107 -3 3039 3040 3109 -3 3039 3109 3108 -3 3040 3041 3110 -3 3040 3110 3109 -3 3041 3042 3111 -3 3041 3111 3110 -3 3042 3043 3112 -3 3042 3112 3111 -3 3043 3044 3113 -3 3043 3113 3112 -3 3044 3045 3114 -3 3044 3114 3113 -3 3045 3046 3115 -3 3045 3115 3114 -3 3046 3047 3116 -3 3046 3116 3115 -3 3047 3048 3117 -3 3047 3117 3116 -3 3048 3049 3118 -3 3048 3118 3117 -3 3049 3050 3119 -3 3049 3119 3118 -3 3050 3051 3120 -3 3050 3120 3119 -3 3051 3052 3121 -3 3051 3121 3120 -3 3052 3053 3121 -3 3122 3121 3053 -3 3053 3054 3122 -3 3123 3122 3054 -3 3054 3055 3123 -3 3124 3123 3055 -3 3055 3056 3124 -3 3125 3124 3056 -3 3056 3057 3125 -3 3126 3125 3057 -3 3057 3058 3126 -3 3127 3126 3058 -3 3058 3059 3127 -3 3128 3127 3059 -3 3059 3060 3128 -3 3129 3128 3060 -3 3060 3061 3129 -3 3130 3129 3061 -3 3061 3062 3130 -3 3131 3130 3062 -3 3062 3063 3131 -3 3132 3131 3063 -3 3063 3064 3132 -3 3133 3132 3064 -3 3064 3065 3133 -3 3134 3133 3065 -3 3065 3066 3134 -3 3135 3134 3066 -3 3066 3067 3135 -3 3136 3135 3067 -3 3067 3068 3136 -3 3137 3136 3068 -3 3068 3069 3137 -3 3138 3137 3069 -3 3069 3070 3138 -3 3139 3138 3070 -3 3070 3071 3139 -3 3140 3139 3071 -3 3071 3072 3140 -3 3141 3140 3072 -3 3072 3073 3141 -3 3142 3141 3073 -3 3073 3074 3142 -3 3143 3142 3074 -3 3074 3075 3143 -3 3144 3143 3075 -3 3075 3076 3144 -3 3145 3144 3076 -3 3076 3077 3145 -3 3146 3145 3077 -3 3077 3078 3146 -3 3147 3146 3078 -3 3078 3079 3147 -3 3148 3147 3079 -3 3079 3080 3148 -3 3149 3148 3080 -3 3080 3081 3149 -3 3150 3149 3081 -3 3081 3082 3150 -3 3151 3150 3082 -3 3082 3083 3151 -3 3152 3151 3083 -3 3083 3084 3152 -3 3153 3152 3084 -3 3084 3085 3153 -3 3154 3153 3085 -3 3085 3086 3154 -3 3155 3154 3086 -3 3086 3087 3156 -3 3086 3156 3155 -3 3087 3088 3157 -3 3087 3157 3156 -3 3088 3089 3158 -3 3088 3158 3157 -3 3089 3090 3159 -3 3089 3159 3158 -3 3090 3091 3160 -3 3090 3160 3159 -3 3091 3092 3161 -3 3091 3161 3160 -3 3092 3093 3162 -3 3092 3162 3161 -3 3093 3094 3163 -3 3093 3163 3162 -3 3094 3095 3164 -3 3094 3164 3163 -3 3095 3096 3165 -3 3095 3165 3164 -3 3096 3097 3166 -3 3096 3166 3165 -3 3097 3098 3167 -3 3097 3167 3166 -3 3098 5065 3167 -3 5069 3167 5065 -3 3099 3100 3169 -3 3099 3169 3168 -3 3099 3168 5068 -3 5072 5068 3168 -3 3100 3101 3170 -3 3100 3170 3169 -3 3101 3102 3171 -3 3101 3171 3170 -3 3102 3103 3172 -3 3102 3172 3171 -3 3103 3104 3173 -3 3103 3173 3172 -3 3104 3105 3174 -3 3104 3174 3173 -3 3105 3106 3175 -3 3105 3175 3174 -3 3106 3107 3176 -3 3106 3176 3175 -3 3107 3108 3177 -3 3107 3177 3176 -3 3108 3109 3178 -3 3108 3178 3177 -3 3109 3110 3179 -3 3109 3179 3178 -3 3110 3111 3180 -3 3110 3180 3179 -3 3111 3112 3181 -3 3111 3181 3180 -3 3112 3113 3182 -3 3112 3182 3181 -3 3113 3114 3183 -3 3113 3183 3182 -3 3114 3115 3184 -3 3114 3184 3183 -3 3115 3116 3185 -3 3115 3185 3184 -3 3116 3117 3186 -3 3116 3186 3185 -3 3117 3118 3187 -3 3117 3187 3186 -3 3118 3119 3188 -3 3118 3188 3187 -3 3119 3120 3189 -3 3119 3189 3188 -3 3120 3121 3189 -3 3190 3189 3121 -3 3121 3122 3190 -3 3191 3190 3122 -3 3122 3123 3191 -3 3192 3191 3123 -3 3123 3124 3192 -3 3193 3192 3124 -3 3124 3125 3193 -3 3194 3193 3125 -3 3125 3126 3194 -3 3195 3194 3126 -3 3126 3127 3195 -3 3196 3195 3127 -3 3127 3128 3196 -3 3197 3196 3128 -3 3128 3129 3197 -3 3198 3197 3129 -3 3129 3130 3198 -3 3199 3198 3130 -3 3130 3131 3199 -3 3200 3199 3131 -3 3131 3132 3200 -3 3201 3200 3132 -3 3132 3133 3201 -3 3202 3201 3133 -3 3133 3134 3202 -3 3203 3202 3134 -3 3134 3135 3203 -3 3204 3203 3135 -3 3135 3136 3204 -3 3205 3204 3136 -3 3136 3137 3205 -3 3206 3205 3137 -3 3137 3138 3206 -3 3207 3206 3138 -3 3138 3139 3207 -3 3208 3207 3139 -3 3139 3140 3208 -3 3209 3208 3140 -3 3140 3141 3209 -3 3210 3209 3141 -3 3141 3142 3210 -3 3211 3210 3142 -3 3142 3143 3211 -3 3212 3211 3143 -3 3143 3144 3212 -3 3213 3212 3144 -3 3144 3145 3213 -3 3214 3213 3145 -3 3145 3146 3214 -3 3215 3214 3146 -3 3146 3147 3215 -3 3216 3215 3147 -3 3147 3148 3216 -3 3217 3216 3148 -3 3148 3149 3217 -3 3218 3217 3149 -3 3149 3150 3218 -3 3219 3218 3150 -3 3150 3151 3219 -3 3220 3219 3151 -3 3151 3152 3220 -3 3221 3220 3152 -3 3152 3153 3221 -3 3222 3221 3153 -3 3153 3154 3222 -3 3223 3222 3154 -3 3154 3155 3223 -3 3224 3223 3155 -3 3155 3156 3225 -3 3155 3225 3224 -3 3156 3157 3226 -3 3156 3226 3225 -3 3157 3158 3227 -3 3157 3227 3226 -3 3158 3159 3228 -3 3158 3228 3227 -3 3159 3160 3229 -3 3159 3229 3228 -3 3160 3161 3230 -3 3160 3230 3229 -3 3161 3162 3231 -3 3161 3231 3230 -3 3162 3163 3232 -3 3162 3232 3231 -3 3163 3164 3233 -3 3163 3233 3232 -3 3164 3165 3234 -3 3164 3234 3233 -3 3165 3166 3235 -3 3165 3235 3234 -3 3166 3167 3236 -3 3166 3236 3235 -3 3167 5069 5073 -3 3167 5073 3236 -3 3168 3169 3238 -3 3168 3238 3237 -3 3168 3237 5072 -3 5076 5072 3237 -3 3169 3170 3239 -3 3169 3239 3238 -3 3170 3171 3240 -3 3170 3240 3239 -3 3171 3172 3241 -3 3171 3241 3240 -3 3172 3173 3242 -3 3172 3242 3241 -3 3173 3174 3243 -3 3173 3243 3242 -3 3174 3175 3244 -3 3174 3244 3243 -3 3175 3176 3245 -3 3175 3245 3244 -3 3176 3177 3246 -3 3176 3246 3245 -3 3177 3178 3247 -3 3177 3247 3246 -3 3178 3179 3248 -3 3178 3248 3247 -3 3179 3180 3249 -3 3179 3249 3248 -3 3180 3181 3250 -3 3180 3250 3249 -3 3181 3182 3251 -3 3181 3251 3250 -3 3182 3183 3252 -3 3182 3252 3251 -3 3183 3184 3253 -3 3183 3253 3252 -3 3184 3185 3254 -3 3184 3254 3253 -3 3185 3186 3255 -3 3185 3255 3254 -3 3186 3187 3256 -3 3186 3256 3255 -3 3187 3188 3257 -3 3187 3257 3256 -3 3188 3189 3258 -3 3188 3258 3257 -3 3189 3190 3258 -3 3259 3258 3190 -3 3190 3191 3259 -3 3260 3259 3191 -3 3191 3192 3260 -3 3261 3260 3192 -3 3192 3193 3261 -3 3262 3261 3193 -3 3193 3194 3262 -3 3263 3262 3194 -3 3194 3195 3263 -3 3264 3263 3195 -3 3195 3196 3264 -3 3265 3264 3196 -3 3196 3197 3265 -3 3266 3265 3197 -3 3197 3198 3266 -3 3267 3266 3198 -3 3198 3199 3267 -3 3268 3267 3199 -3 3199 3200 3268 -3 3269 3268 3200 -3 3200 3201 3269 -3 3270 3269 3201 -3 3201 3202 3270 -3 3271 3270 3202 -3 3202 3203 3271 -3 3272 3271 3203 -3 3203 3204 3272 -3 3273 3272 3204 -3 3204 3205 3273 -3 3274 3273 3205 -3 3205 3206 3274 -3 3275 3274 3206 -3 3206 3207 3275 -3 3276 3275 3207 -3 3207 3208 3276 -3 3277 3276 3208 -3 3208 3209 3277 -3 3278 3277 3209 -3 3209 3210 3278 -3 3279 3278 3210 -3 3210 3211 3279 -3 3280 3279 3211 -3 3211 3212 3280 -3 3281 3280 3212 -3 3212 3213 3281 -3 3282 3281 3213 -3 3213 3214 3282 -3 3283 3282 3214 -3 3214 3215 3283 -3 3284 3283 3215 -3 3215 3216 3284 -3 3285 3284 3216 -3 3216 3217 3285 -3 3286 3285 3217 -3 3217 3218 3286 -3 3287 3286 3218 -3 3218 3219 3287 -3 3288 3287 3219 -3 3219 3220 3288 -3 3289 3288 3220 -3 3220 3221 3289 -3 3290 3289 3221 -3 3221 3222 3290 -3 3291 3290 3222 -3 3222 3223 3291 -3 3292 3291 3223 -3 3223 3224 3292 -3 3293 3292 3224 -3 3224 3225 3294 -3 3224 3294 3293 -3 3225 3226 3295 -3 3225 3295 3294 -3 3226 3227 3296 -3 3226 3296 3295 -3 3227 3228 3297 -3 3227 3297 3296 -3 3228 3229 3298 -3 3228 3298 3297 -3 3229 3230 3299 -3 3229 3299 3298 -3 3230 3231 3300 -3 3230 3300 3299 -3 3231 3232 3301 -3 3231 3301 3300 -3 3232 3233 3302 -3 3232 3302 3301 -3 3233 3234 3303 -3 3233 3303 3302 -3 3234 3235 3304 -3 3234 3304 3303 -3 3235 3236 3305 -3 3235 3305 3304 -3 3236 5073 3305 -3 5077 3305 5073 -3 3237 3238 3307 -3 3237 3307 3306 -3 3237 3306 5076 -3 5080 5076 3306 -3 3238 3239 3308 -3 3238 3308 3307 -3 3239 3240 3309 -3 3239 3309 3308 -3 3240 3241 3310 -3 3240 3310 3309 -3 3241 3242 3311 -3 3241 3311 3310 -3 3242 3243 3312 -3 3242 3312 3311 -3 3243 3244 3313 -3 3243 3313 3312 -3 3244 3245 3314 -3 3244 3314 3313 -3 3245 3246 3315 -3 3245 3315 3314 -3 3246 3247 3316 -3 3246 3316 3315 -3 3247 3248 3317 -3 3247 3317 3316 -3 3248 3249 3318 -3 3248 3318 3317 -3 3249 3250 3319 -3 3249 3319 3318 -3 3250 3251 3320 -3 3250 3320 3319 -3 3251 3252 3321 -3 3251 3321 3320 -3 3252 3253 3322 -3 3252 3322 3321 -3 3253 3254 3323 -3 3253 3323 3322 -3 3254 3255 3324 -3 3254 3324 3323 -3 3255 3256 3325 -3 3255 3325 3324 -3 3256 3257 3326 -3 3256 3326 3325 -3 3257 3258 3327 -3 3257 3327 3326 -3 3258 3259 3328 -3 3258 3328 3327 -3 3259 3260 3328 -3 3329 3328 3260 -3 3260 3261 3329 -3 3330 3329 3261 -3 3261 3262 3330 -3 3331 3330 3262 -3 3262 3263 3331 -3 3332 3331 3263 -3 3263 3264 3332 -3 3333 3332 3264 -3 3264 3265 3333 -3 3334 3333 3265 -3 3265 3266 3334 -3 3335 3334 3266 -3 3266 3267 3335 -3 3336 3335 3267 -3 3267 3268 3336 -3 3337 3336 3268 -3 3268 3269 3337 -3 3338 3337 3269 -3 3269 3270 3338 -3 3339 3338 3270 -3 3270 3271 3339 -3 3340 3339 3271 -3 3271 3272 3340 -3 3341 3340 3272 -3 3272 3273 3341 -3 3342 3341 3273 -3 3273 3274 3342 -3 3343 3342 3274 -3 3274 3275 3343 -3 3344 3343 3275 -3 3275 3276 3344 -3 3345 3344 3276 -3 3276 3277 3345 -3 3346 3345 3277 -3 3277 3278 3346 -3 3347 3346 3278 -3 3278 3279 3347 -3 3348 3347 3279 -3 3279 3280 3348 -3 3349 3348 3280 -3 3280 3281 3349 -3 3350 3349 3281 -3 3281 3282 3350 -3 3351 3350 3282 -3 3282 3283 3351 -3 3352 3351 3283 -3 3283 3284 3352 -3 3353 3352 3284 -3 3284 3285 3353 -3 3354 3353 3285 -3 3285 3286 3354 -3 3355 3354 3286 -3 3286 3287 3355 -3 3356 3355 3287 -3 3287 3288 3356 -3 3357 3356 3288 -3 3288 3289 3357 -3 3358 3357 3289 -3 3289 3290 3358 -3 3359 3358 3290 -3 3290 3291 3359 -3 3360 3359 3291 -3 3291 3292 3360 -3 3361 3360 3292 -3 3292 3293 3361 -3 3362 3361 3293 -3 3293 3294 3362 -3 3363 3362 3294 -3 3294 3295 3364 -3 3294 3364 3363 -3 3295 3296 3365 -3 3295 3365 3364 -3 3296 3297 3366 -3 3296 3366 3365 -3 3297 3298 3367 -3 3297 3367 3366 -3 3298 3299 3368 -3 3298 3368 3367 -3 3299 3300 3369 -3 3299 3369 3368 -3 3300 3301 3370 -3 3300 3370 3369 -3 3301 3302 3371 -3 3301 3371 3370 -3 3302 3303 3372 -3 3302 3372 3371 -3 3303 3304 3373 -3 3303 3373 3372 -3 3304 3305 3374 -3 3304 3374 3373 -3 3305 5077 5081 -3 3305 5081 3374 -3 3306 3307 3376 -3 3306 3376 3375 -3 3306 3375 5080 -3 5084 5080 3375 -3 3307 3308 3377 -3 3307 3377 3376 -3 3308 3309 3378 -3 3308 3378 3377 -3 3309 3310 3379 -3 3309 3379 3378 -3 3310 3311 3380 -3 3310 3380 3379 -3 3311 3312 3381 -3 3311 3381 3380 -3 3312 3313 3382 -3 3312 3382 3381 -3 3313 3314 3383 -3 3313 3383 3382 -3 3314 3315 3384 -3 3314 3384 3383 -3 3315 3316 3385 -3 3315 3385 3384 -3 3316 3317 3386 -3 3316 3386 3385 -3 3317 3318 3387 -3 3317 3387 3386 -3 3318 3319 3388 -3 3318 3388 3387 -3 3319 3320 3389 -3 3319 3389 3388 -3 3320 3321 3390 -3 3320 3390 3389 -3 3321 3322 3391 -3 3321 3391 3390 -3 3322 3323 3392 -3 3322 3392 3391 -3 3323 3324 3393 -3 3323 3393 3392 -3 3324 3325 3394 -3 3324 3394 3393 -3 3325 3326 3395 -3 3325 3395 3394 -3 3326 3327 3396 -3 3326 3396 3395 -3 3327 3328 3397 -3 3327 3397 3396 -3 3328 3329 3398 -3 3328 3398 3397 -3 3329 3330 3399 -3 3329 3399 3398 -3 3330 3331 3399 -3 3400 3399 3331 -3 3331 3332 3400 -3 3401 3400 3332 -3 3332 3333 3401 -3 3402 3401 3333 -3 3333 3334 3402 -3 3403 3402 3334 -3 3334 3335 3403 -3 3404 3403 3335 -3 3335 3336 3404 -3 3405 3404 3336 -3 3336 3337 3405 -3 3406 3405 3337 -3 3337 3338 3406 -3 3407 3406 3338 -3 3338 3339 3407 -3 3408 3407 3339 -3 3339 3340 3408 -3 3409 3408 3340 -3 3340 3341 3409 -3 3410 3409 3341 -3 3341 3342 3410 -3 3411 3410 3342 -3 3342 3343 3411 -3 3412 3411 3343 -3 3343 3344 3412 -3 3413 3412 3344 -3 3344 3345 3413 -3 3414 3413 3345 -3 3345 3346 3414 -3 3415 3414 3346 -3 3346 3347 3415 -3 3416 3415 3347 -3 3347 3348 3416 -3 3417 3416 3348 -3 3348 3349 3417 -3 3418 3417 3349 -3 3349 3350 3418 -3 3419 3418 3350 -3 3350 3351 3419 -3 3420 3419 3351 -3 3351 3352 3420 -3 3421 3420 3352 -3 3352 3353 3421 -3 3422 3421 3353 -3 3353 3354 3422 -3 3423 3422 3354 -3 3354 3355 3423 -3 3424 3423 3355 -3 3355 3356 3424 -3 3425 3424 3356 -3 3356 3357 3425 -3 3426 3425 3357 -3 3357 3358 3426 -3 3427 3426 3358 -3 3358 3359 3427 -3 3428 3427 3359 -3 3359 3360 3428 -3 3429 3428 3360 -3 3360 3361 3429 -3 3430 3429 3361 -3 3361 3362 3430 -3 3431 3430 3362 -3 3362 3363 3431 -3 3432 3431 3363 -3 3363 3364 3432 -3 3433 3432 3364 -3 3364 3365 3433 -3 3434 3433 3365 -3 3365 3366 3434 -3 3435 3434 3366 -3 3366 3367 3436 -3 3366 3436 3435 -3 3367 3368 3437 -3 3367 3437 3436 -3 3368 3369 3438 -3 3368 3438 3437 -3 3369 3370 3439 -3 3369 3439 3438 -3 3370 3371 3440 -3 3370 3440 3439 -3 3371 3372 3441 -3 3371 3441 3440 -3 3372 3373 3442 -3 3372 3442 3441 -3 3373 3374 3443 -3 3373 3443 3442 -3 3374 5081 5085 -3 3374 5085 3443 -3 3375 3376 3445 -3 3375 3445 3444 -3 3375 3444 5084 -3 5088 5084 3444 -3 3376 3377 3446 -3 3376 3446 3445 -3 3377 3378 3447 -3 3377 3447 3446 -3 3378 3379 3448 -3 3378 3448 3447 -3 3379 3380 3449 -3 3379 3449 3448 -3 3380 3381 3450 -3 3380 3450 3449 -3 3381 3382 3451 -3 3381 3451 3450 -3 3382 3383 3452 -3 3382 3452 3451 -3 3383 3384 3453 -3 3383 3453 3452 -3 3384 3385 3454 -3 3384 3454 3453 -3 3385 3386 3455 -3 3385 3455 3454 -3 3386 3387 3456 -3 3386 3456 3455 -3 3387 3388 3457 -3 3387 3457 3456 -3 3388 3389 3458 -3 3388 3458 3457 -3 3389 3390 3459 -3 3389 3459 3458 -3 3390 3391 3460 -3 3390 3460 3459 -3 3391 3392 3461 -3 3391 3461 3460 -3 3392 3393 3462 -3 3392 3462 3461 -3 3393 3394 3463 -3 3393 3463 3462 -3 3394 3395 3464 -3 3394 3464 3463 -3 3395 3396 3465 -3 3395 3465 3464 -3 3396 3397 3466 -3 3396 3466 3465 -3 3397 3398 3467 -3 3397 3467 3466 -3 3398 3399 3468 -3 3398 3468 3467 -3 3399 3400 3469 -3 3399 3469 3468 -3 3400 3401 3470 -3 3400 3470 3469 -3 3401 3402 3470 -3 3471 3470 3402 -3 3402 3403 3471 -3 3472 3471 3403 -3 3403 3404 3472 -3 3473 3472 3404 -3 3404 3405 3473 -3 3474 3473 3405 -3 3405 3406 3474 -3 3475 3474 3406 -3 3406 3407 3475 -3 3476 3475 3407 -3 3407 3408 3476 -3 3477 3476 3408 -3 3408 3409 3477 -3 3478 3477 3409 -3 3409 3410 3478 -3 3479 3478 3410 -3 3410 3411 3479 -3 3480 3479 3411 -3 3411 3412 3480 -3 3481 3480 3412 -3 3412 3413 3481 -3 3482 3481 3413 -3 3413 3414 3482 -3 3483 3482 3414 -3 3414 3415 3483 -3 3484 3483 3415 -3 3415 3416 3484 -3 3485 3484 3416 -3 3416 3417 3485 -3 3486 3485 3417 -3 3417 3418 3486 -3 3487 3486 3418 -3 3418 3419 3487 -3 3488 3487 3419 -3 3419 3420 3488 -3 3489 3488 3420 -3 3420 3421 3489 -3 3490 3489 3421 -3 3421 3422 3490 -3 3491 3490 3422 -3 3422 3423 3491 -3 3492 3491 3423 -3 3423 3424 3492 -3 3493 3492 3424 -3 3424 3425 3493 -3 3494 3493 3425 -3 3425 3426 3494 -3 3495 3494 3426 -3 3426 3427 3495 -3 3496 3495 3427 -3 3427 3428 3496 -3 3497 3496 3428 -3 3428 3429 3497 -3 3498 3497 3429 -3 3429 3430 3498 -3 3499 3498 3430 -3 3430 3431 3499 -3 3500 3499 3431 -3 3431 3432 3500 -3 3501 3500 3432 -3 3432 3433 3501 -3 3502 3501 3433 -3 3433 3434 3502 -3 3503 3502 3434 -3 3434 3435 3503 -3 3504 3503 3435 -3 3435 3436 3504 -3 3505 3504 3436 -3 3436 3437 3505 -3 3506 3505 3437 -3 3437 3438 3507 -3 3437 3507 3506 -3 3438 3439 3508 -3 3438 3508 3507 -3 3439 3440 3509 -3 3439 3509 3508 -3 3440 3441 3510 -3 3440 3510 3509 -3 3441 3442 3511 -3 3441 3511 3510 -3 3442 3443 3512 -3 3442 3512 3511 -3 3443 5085 3512 -3 5089 3512 5085 -3 3444 3445 3514 -3 3444 3514 3513 -3 3444 3513 5088 -3 5092 5088 3513 -3 3445 3446 3515 -3 3445 3515 3514 -3 3446 3447 3516 -3 3446 3516 3515 -3 3447 3448 3517 -3 3447 3517 3516 -3 3448 3449 3518 -3 3448 3518 3517 -3 3449 3450 3519 -3 3449 3519 3518 -3 3450 3451 3520 -3 3450 3520 3519 -3 3451 3452 3521 -3 3451 3521 3520 -3 3452 3453 3522 -3 3452 3522 3521 -3 3453 3454 3523 -3 3453 3523 3522 -3 3454 3455 3524 -3 3454 3524 3523 -3 3455 3456 3525 -3 3455 3525 3524 -3 3456 3457 3526 -3 3456 3526 3525 -3 3457 3458 3527 -3 3457 3527 3526 -3 3458 3459 3528 -3 3458 3528 3527 -3 3459 3460 3529 -3 3459 3529 3528 -3 3460 3461 3530 -3 3460 3530 3529 -3 3461 3462 3531 -3 3461 3531 3530 -3 3462 3463 3532 -3 3462 3532 3531 -3 3463 3464 3533 -3 3463 3533 3532 -3 3464 3465 3534 -3 3464 3534 3533 -3 3465 3466 3535 -3 3465 3535 3534 -3 3466 3467 3536 -3 3466 3536 3535 -3 3467 3468 3537 -3 3467 3537 3536 -3 3468 3469 3538 -3 3468 3538 3537 -3 3469 3470 3539 -3 3469 3539 3538 -3 3470 3471 3540 -3 3470 3540 3539 -3 3471 3472 3541 -3 3471 3541 3540 -3 3472 3473 3542 -3 3472 3542 3541 -3 3473 3474 3542 -3 3543 3542 3474 -3 3474 3475 3543 -3 3544 3543 3475 -3 3475 3476 3544 -3 3545 3544 3476 -3 3476 3477 3545 -3 3546 3545 3477 -3 3477 3478 3546 -3 3547 3546 3478 -3 3478 3479 3547 -3 3548 3547 3479 -3 3479 3480 3548 -3 3549 3548 3480 -3 3480 3481 3549 -3 3550 3549 3481 -3 3481 3482 3550 -3 3551 3550 3482 -3 3482 3483 3551 -3 3552 3551 3483 -3 3483 3484 3552 -3 3553 3552 3484 -3 3484 3485 3553 -3 3554 3553 3485 -3 3485 3486 3554 -3 3555 3554 3486 -3 3486 3487 3555 -3 3556 3555 3487 -3 3487 3488 3556 -3 3557 3556 3488 -3 3488 3489 3557 -3 3558 3557 3489 -3 3489 3490 3558 -3 3559 3558 3490 -3 3490 3491 3559 -3 3560 3559 3491 -3 3491 3492 3560 -3 3561 3560 3492 -3 3492 3493 3561 -3 3562 3561 3493 -3 3493 3494 3562 -3 3563 3562 3494 -3 3494 3495 3563 -3 3564 3563 3495 -3 3495 3496 3564 -3 3565 3564 3496 -3 3496 3497 3565 -3 3566 3565 3497 -3 3497 3498 3566 -3 3567 3566 3498 -3 3498 3499 3567 -3 3568 3567 3499 -3 3499 3500 3568 -3 3569 3568 3500 -3 3500 3501 3569 -3 3570 3569 3501 -3 3501 3502 3570 -3 3571 3570 3502 -3 3502 3503 3571 -3 3572 3571 3503 -3 3503 3504 3572 -3 3573 3572 3504 -3 3504 3505 3573 -3 3574 3573 3505 -3 3505 3506 3574 -3 3575 3574 3506 -3 3506 3507 3575 -3 3576 3575 3507 -3 3507 3508 3576 -3 3577 3576 3508 -3 3508 3509 3577 -3 3578 3577 3509 -3 3509 3510 3578 -3 3579 3578 3510 -3 3510 3511 3580 -3 3510 3580 3579 -3 3511 3512 3581 -3 3511 3581 3580 -3 3512 5089 5093 -3 3512 5093 3581 -3 3513 3514 3583 -3 3513 3583 3582 -3 3513 3582 5092 -3 5096 5092 3582 -3 3514 3515 3584 -3 3514 3584 3583 -3 3515 3516 3585 -3 3515 3585 3584 -3 3516 3517 3586 -3 3516 3586 3585 -3 3517 3518 3587 -3 3517 3587 3586 -3 3518 3519 3588 -3 3518 3588 3587 -3 3519 3520 3589 -3 3519 3589 3588 -3 3520 3521 3590 -3 3520 3590 3589 -3 3521 3522 3591 -3 3521 3591 3590 -3 3522 3523 3592 -3 3522 3592 3591 -3 3523 3524 3593 -3 3523 3593 3592 -3 3524 3525 3594 -3 3524 3594 3593 -3 3525 3526 3595 -3 3525 3595 3594 -3 3526 3527 3596 -3 3526 3596 3595 -3 3527 3528 3597 -3 3527 3597 3596 -3 3528 3529 3598 -3 3528 3598 3597 -3 3529 3530 3599 -3 3529 3599 3598 -3 3530 3531 3600 -3 3530 3600 3599 -3 3531 3532 3601 -3 3531 3601 3600 -3 3532 3533 3602 -3 3532 3602 3601 -3 3533 3534 3603 -3 3533 3603 3602 -3 3534 3535 3604 -3 3534 3604 3603 -3 3535 3536 3605 -3 3535 3605 3604 -3 3536 3537 3606 -3 3536 3606 3605 -3 3537 3538 3607 -3 3537 3607 3606 -3 3538 3539 3608 -3 3538 3608 3607 -3 3539 3540 3609 -3 3539 3609 3608 -3 3540 3541 3610 -3 3540 3610 3609 -3 3541 3542 3611 -3 3541 3611 3610 -3 3542 3543 3612 -3 3542 3612 3611 -3 3543 3544 3613 -3 3543 3613 3612 -3 3544 3545 3614 -3 3544 3614 3613 -3 3545 3546 3615 -3 3545 3615 3614 -3 3546 3547 3615 -3 3616 3615 3547 -3 3547 3548 3616 -3 3617 3616 3548 -3 3548 3549 3617 -3 3618 3617 3549 -3 3549 3550 3618 -3 3619 3618 3550 -3 3550 3551 3619 -3 3620 3619 3551 -3 3551 3552 3620 -3 3621 3620 3552 -3 3552 3553 3621 -3 3622 3621 3553 -3 3553 3554 3622 -3 3623 3622 3554 -3 3554 3555 3623 -3 3624 3623 3555 -3 3555 3556 3624 -3 3625 3624 3556 -3 3556 3557 3625 -3 3626 3625 3557 -3 3557 3558 3626 -3 3627 3626 3558 -3 3558 3559 3627 -3 3628 3627 3559 -3 3559 3560 3628 -3 3629 3628 3560 -3 3560 3561 3629 -3 3630 3629 3561 -3 3561 3562 3630 -3 3631 3630 3562 -3 3562 3563 3631 -3 3632 3631 3563 -3 3563 3564 3632 -3 3633 3632 3564 -3 3564 3565 3633 -3 3634 3633 3565 -3 3565 3566 3634 -3 3635 3634 3566 -3 3566 3567 3635 -3 3636 3635 3567 -3 3567 3568 3636 -3 3637 3636 3568 -3 3568 3569 3637 -3 3638 3637 3569 -3 3569 3570 3638 -3 3639 3638 3570 -3 3570 3571 3639 -3 3640 3639 3571 -3 3571 3572 3640 -3 3641 3640 3572 -3 3572 3573 3641 -3 3642 3641 3573 -3 3573 3574 3642 -3 3643 3642 3574 -3 3574 3575 3643 -3 3644 3643 3575 -3 3575 3576 3644 -3 3645 3644 3576 -3 3576 3577 3645 -3 3646 3645 3577 -3 3577 3578 3646 -3 3647 3646 3578 -3 3578 3579 3647 -3 3648 3647 3579 -3 3579 3580 3648 -3 3649 3648 3580 -3 3580 3581 3649 -3 3650 3649 3581 -3 3581 5093 3650 -3 5097 3650 5093 -3 3582 3583 3651 -3 3652 3651 3583 -3 3582 3651 5096 -3 5100 5096 3651 -3 3583 3584 3653 -3 3583 3653 3652 -3 3584 3585 3654 -3 3584 3654 3653 -3 3585 3586 3655 -3 3585 3655 3654 -3 3586 3587 3656 -3 3586 3656 3655 -3 3587 3588 3657 -3 3587 3657 3656 -3 3588 3589 3658 -3 3588 3658 3657 -3 3589 3590 3659 -3 3589 3659 3658 -3 3590 3591 3660 -3 3590 3660 3659 -3 3591 3592 3661 -3 3591 3661 3660 -3 3592 3593 3662 -3 3592 3662 3661 -3 3593 3594 3663 -3 3593 3663 3662 -3 3594 3595 3664 -3 3594 3664 3663 -3 3595 3596 3665 -3 3595 3665 3664 -3 3596 3597 3666 -3 3596 3666 3665 -3 3597 3598 3667 -3 3597 3667 3666 -3 3598 3599 3668 -3 3598 3668 3667 -3 3599 3600 3669 -3 3599 3669 3668 -3 3600 3601 3670 -3 3600 3670 3669 -3 3601 3602 3671 -3 3601 3671 3670 -3 3602 3603 3672 -3 3602 3672 3671 -3 3603 3604 3673 -3 3603 3673 3672 -3 3604 3605 3674 -3 3604 3674 3673 -3 3605 3606 3675 -3 3605 3675 3674 -3 3606 3607 3676 -3 3606 3676 3675 -3 3607 3608 3677 -3 3607 3677 3676 -3 3608 3609 3678 -3 3608 3678 3677 -3 3609 3610 3679 -3 3609 3679 3678 -3 3610 3611 3680 -3 3610 3680 3679 -3 3611 3612 3681 -3 3611 3681 3680 -3 3612 3613 3682 -3 3612 3682 3681 -3 3613 3614 3683 -3 3613 3683 3682 -3 3614 3615 3684 -3 3614 3684 3683 -3 3615 3616 3685 -3 3615 3685 3684 -3 3616 3617 3686 -3 3616 3686 3685 -3 3617 3618 3687 -3 3617 3687 3686 -3 3618 3619 3688 -3 3618 3688 3687 -3 3619 3620 3689 -3 3619 3689 3688 -3 3620 3621 3689 -3 3690 3689 3621 -3 3621 3622 3690 -3 3691 3690 3622 -3 3622 3623 3691 -3 3692 3691 3623 -3 3623 3624 3692 -3 3693 3692 3624 -3 3624 3625 3693 -3 3694 3693 3625 -3 3625 3626 3694 -3 3695 3694 3626 -3 3626 3627 3695 -3 3696 3695 3627 -3 3627 3628 3696 -3 3697 3696 3628 -3 3628 3629 3697 -3 3698 3697 3629 -3 3629 3630 3698 -3 3699 3698 3630 -3 3630 3631 3699 -3 3700 3699 3631 -3 3631 3632 3700 -3 3701 3700 3632 -3 3632 3633 3701 -3 3702 3701 3633 -3 3633 3634 3702 -3 3703 3702 3634 -3 3634 3635 3703 -3 3704 3703 3635 -3 3635 3636 3704 -3 3705 3704 3636 -3 3636 3637 3705 -3 3706 3705 3637 -3 3637 3638 3706 -3 3707 3706 3638 -3 3638 3639 3707 -3 3708 3707 3639 -3 3639 3640 3708 -3 3709 3708 3640 -3 3640 3641 3709 -3 3710 3709 3641 -3 3641 3642 3710 -3 3711 3710 3642 -3 3642 3643 3711 -3 3712 3711 3643 -3 3643 3644 3712 -3 3713 3712 3644 -3 3644 3645 3713 -3 3714 3713 3645 -3 3645 3646 3714 -3 3715 3714 3646 -3 3646 3647 3715 -3 3716 3715 3647 -3 3647 3648 3716 -3 3717 3716 3648 -3 3648 3649 3717 -3 3718 3717 3649 -3 3649 3650 3718 -3 3719 3718 3650 -3 3650 5097 5101 -3 3650 5101 3719 -3 3651 3652 3720 -3 3721 3720 3652 -3 3651 3720 5104 -3 3651 5104 5100 -3 3652 3653 3721 -3 3722 3721 3653 -3 3653 3654 3722 -3 3723 3722 3654 -3 3654 3655 3723 -3 3724 3723 3655 -3 3655 3656 3724 -3 3725 3724 3656 -3 3656 3657 3725 -3 3726 3725 3657 -3 3657 3658 3727 -3 3657 3727 3726 -3 3658 3659 3728 -3 3658 3728 3727 -3 3659 3660 3729 -3 3659 3729 3728 -3 3660 3661 3730 -3 3660 3730 3729 -3 3661 3662 3731 -3 3661 3731 3730 -3 3662 3663 3732 -3 3662 3732 3731 -3 3663 3664 3733 -3 3663 3733 3732 -3 3664 3665 3734 -3 3664 3734 3733 -3 3665 3666 3735 -3 3665 3735 3734 -3 3666 3667 3736 -3 3666 3736 3735 -3 3667 3668 3737 -3 3667 3737 3736 -3 3668 3669 3738 -3 3668 3738 3737 -3 3669 3670 3739 -3 3669 3739 3738 -3 3670 3671 3740 -3 3670 3740 3739 -3 3671 3672 3741 -3 3671 3741 3740 -3 3672 3673 3742 -3 3672 3742 3741 -3 3673 3674 3743 -3 3673 3743 3742 -3 3674 3675 3744 -3 3674 3744 3743 -3 3675 3676 3745 -3 3675 3745 3744 -3 3676 3677 3746 -3 3676 3746 3745 -3 3677 3678 3747 -3 3677 3747 3746 -3 3678 3679 3748 -3 3678 3748 3747 -3 3679 3680 3749 -3 3679 3749 3748 -3 3680 3681 3750 -3 3680 3750 3749 -3 3681 3682 3751 -3 3681 3751 3750 -3 3682 3683 3752 -3 3682 3752 3751 -3 3683 3684 3753 -3 3683 3753 3752 -3 3684 3685 3754 -3 3684 3754 3753 -3 3685 3686 3755 -3 3685 3755 3754 -3 3686 3687 3756 -3 3686 3756 3755 -3 3687 3688 3757 -3 3687 3757 3756 -3 3688 3689 3758 -3 3688 3758 3757 -3 3689 3690 3759 -3 3689 3759 3758 -3 3690 3691 3760 -3 3690 3760 3759 -3 3691 3692 3761 -3 3691 3761 3760 -3 3692 3693 3762 -3 3692 3762 3761 -3 3693 3694 3763 -3 3693 3763 3762 -3 3694 3695 3764 -3 3694 3764 3763 -3 3695 3696 3764 -3 3765 3764 3696 -3 3696 3697 3765 -3 3766 3765 3697 -3 3697 3698 3766 -3 3767 3766 3698 -3 3698 3699 3767 -3 3768 3767 3699 -3 3699 3700 3768 -3 3769 3768 3700 -3 3700 3701 3769 -3 3770 3769 3701 -3 3701 3702 3770 -3 3771 3770 3702 -3 3702 3703 3771 -3 3772 3771 3703 -3 3703 3704 3772 -3 3773 3772 3704 -3 3704 3705 3773 -3 3774 3773 3705 -3 3705 3706 3774 -3 3775 3774 3706 -3 3706 3707 3775 -3 3776 3775 3707 -3 3707 3708 3776 -3 3777 3776 3708 -3 3708 3709 3777 -3 3778 3777 3709 -3 3709 3710 3778 -3 3779 3778 3710 -3 3710 3711 3779 -3 3780 3779 3711 -3 3711 3712 3780 -3 3781 3780 3712 -3 3712 3713 3781 -3 3782 3781 3713 -3 3713 3714 3782 -3 3783 3782 3714 -3 3714 3715 3783 -3 3784 3783 3715 -3 3715 3716 3784 -3 3785 3784 3716 -3 3716 3717 3785 -3 3786 3785 3717 -3 3717 3718 3786 -3 3787 3786 3718 -3 3718 3719 3787 -3 3788 3787 3719 -3 3719 5101 3788 -3 5105 3788 5101 -3 3720 3721 3789 -3 3790 3789 3721 -3 3720 3789 5108 -3 3720 5108 5104 -3 3721 3722 3790 -3 3791 3790 3722 -3 3722 3723 3791 -3 3792 3791 3723 -3 3723 3724 3792 -3 3793 3792 3724 -3 3724 3725 3793 -3 3794 3793 3725 -3 3725 3726 3794 -3 3795 3794 3726 -3 3726 3727 3795 -3 3796 3795 3727 -3 3727 3728 3796 -3 3797 3796 3728 -3 3728 3729 3797 -3 3798 3797 3729 -3 3729 3730 3798 -3 3799 3798 3730 -3 3730 3731 3799 -3 3800 3799 3731 -3 3731 3732 3800 -3 3801 3800 3732 -3 3732 3733 3802 -3 3732 3802 3801 -3 3733 3734 3803 -3 3733 3803 3802 -3 3734 3735 3804 -3 3734 3804 3803 -3 3735 3736 3805 -3 3735 3805 3804 -3 3736 3737 3806 -3 3736 3806 3805 -3 3737 3738 3807 -3 3737 3807 3806 -3 3738 3739 3808 -3 3738 3808 3807 -3 3739 3740 3809 -3 3739 3809 3808 -3 3740 3741 3810 -3 3740 3810 3809 -3 3741 3742 3811 -3 3741 3811 3810 -3 3742 3743 3812 -3 3742 3812 3811 -3 3743 3744 3813 -3 3743 3813 3812 -3 3744 3745 3814 -3 3744 3814 3813 -3 3745 3746 3815 -3 3745 3815 3814 -3 3746 3747 3816 -3 3746 3816 3815 -3 3747 3748 3817 -3 3747 3817 3816 -3 3748 3749 3818 -3 3748 3818 3817 -3 3749 3750 3819 -3 3749 3819 3818 -3 3750 3751 3820 -3 3750 3820 3819 -3 3751 3752 3821 -3 3751 3821 3820 -3 3752 3753 3822 -3 3752 3822 3821 -3 3753 3754 3823 -3 3753 3823 3822 -3 3754 3755 3824 -3 3754 3824 3823 -3 3755 3756 3825 -3 3755 3825 3824 -3 3756 3757 3826 -3 3756 3826 3825 -3 3757 3758 3827 -3 3757 3827 3826 -3 3758 3759 3828 -3 3758 3828 3827 -3 3759 3760 3829 -3 3759 3829 3828 -3 3760 3761 3830 -3 3760 3830 3829 -3 3761 3762 3831 -3 3761 3831 3830 -3 3762 3763 3832 -3 3762 3832 3831 -3 3763 3764 3833 -3 3763 3833 3832 -3 3764 3765 3834 -3 3764 3834 3833 -3 3765 3766 3835 -3 3765 3835 3834 -3 3766 3767 3836 -3 3766 3836 3835 -3 3767 3768 3837 -3 3767 3837 3836 -3 3768 3769 3838 -3 3768 3838 3837 -3 3769 3770 3839 -3 3769 3839 3838 -3 3770 3771 3839 -3 3840 3839 3771 -3 3771 3772 3840 -3 3841 3840 3772 -3 3772 3773 3841 -3 3842 3841 3773 -3 3773 3774 3842 -3 3843 3842 3774 -3 3774 3775 3843 -3 3844 3843 3775 -3 3775 3776 3844 -3 3845 3844 3776 -3 3776 3777 3845 -3 3846 3845 3777 -3 3777 3778 3846 -3 3847 3846 3778 -3 3778 3779 3847 -3 3848 3847 3779 -3 3779 3780 3848 -3 3849 3848 3780 -3 3780 3781 3849 -3 3850 3849 3781 -3 3781 3782 3850 -3 3851 3850 3782 -3 3782 3783 3851 -3 3852 3851 3783 -3 3783 3784 3852 -3 3853 3852 3784 -3 3784 3785 3853 -3 3854 3853 3785 -3 3785 3786 3854 -3 3855 3854 3786 -3 3786 3787 3855 -3 3856 3855 3787 -3 3787 3788 3856 -3 3857 3856 3788 -3 3788 5105 5109 -3 3788 5109 3857 -3 3789 3790 3858 -3 3859 3858 3790 -3 3789 3858 5112 -3 3789 5112 5108 -3 3790 3791 3859 -3 3860 3859 3791 -3 3791 3792 3860 -3 3861 3860 3792 -3 3792 3793 3861 -3 3862 3861 3793 -3 3793 3794 3862 -3 3863 3862 3794 -3 3794 3795 3863 -3 3864 3863 3795 -3 3795 3796 3864 -3 3865 3864 3796 -3 3796 3797 3865 -3 3866 3865 3797 -3 3797 3798 3866 -3 3867 3866 3798 -3 3798 3799 3867 -3 3868 3867 3799 -3 3799 3800 3868 -3 3869 3868 3800 -3 3800 3801 3869 -3 3870 3869 3801 -3 3801 3802 3870 -3 3871 3870 3802 -3 3802 3803 3871 -3 3872 3871 3803 -3 3803 3804 3872 -3 3873 3872 3804 -3 3804 3805 3873 -3 3874 3873 3805 -3 3805 3806 3874 -3 3875 3874 3806 -3 3806 3807 3875 -3 3876 3875 3807 -3 3807 3808 3876 -3 3877 3876 3808 -3 3808 3809 3878 -3 3808 3878 3877 -3 3809 3810 3879 -3 3809 3879 3878 -3 3810 3811 3880 -3 3810 3880 3879 -3 3811 3812 3881 -3 3811 3881 3880 -3 3812 3813 3882 -3 3812 3882 3881 -3 3813 3814 3883 -3 3813 3883 3882 -3 3814 3815 3884 -3 3814 3884 3883 -3 3815 3816 3885 -3 3815 3885 3884 -3 3816 3817 3886 -3 3816 3886 3885 -3 3817 3818 3887 -3 3817 3887 3886 -3 3818 3819 3888 -3 3818 3888 3887 -3 3819 3820 3889 -3 3819 3889 3888 -3 3820 3821 3890 -3 3820 3890 3889 -3 3821 3822 3891 -3 3821 3891 3890 -3 3822 3823 3892 -3 3822 3892 3891 -3 3823 3824 3893 -3 3823 3893 3892 -3 3824 3825 3894 -3 3824 3894 3893 -3 3825 3826 3895 -3 3825 3895 3894 -3 3826 3827 3896 -3 3826 3896 3895 -3 3827 3828 3897 -3 3827 3897 3896 -3 3828 3829 3898 -3 3828 3898 3897 -3 3829 3830 3899 -3 3829 3899 3898 -3 3830 3831 3900 -3 3830 3900 3899 -3 3831 3832 3901 -3 3831 3901 3900 -3 3832 3833 3902 -3 3832 3902 3901 -3 3833 3834 3903 -3 3833 3903 3902 -3 3834 3835 3904 -3 3834 3904 3903 -3 3835 3836 3905 -3 3835 3905 3904 -3 3836 3837 3906 -3 3836 3906 3905 -3 3837 3838 3907 -3 3837 3907 3906 -3 3838 3839 3908 -3 3838 3908 3907 -3 3839 3840 3909 -3 3839 3909 3908 -3 3840 3841 3910 -3 3840 3910 3909 -3 3841 3842 3911 -3 3841 3911 3910 -3 3842 3843 3912 -3 3842 3912 3911 -3 3843 3844 3913 -3 3843 3913 3912 -3 3844 3845 3914 -3 3844 3914 3913 -3 3845 3846 3915 -3 3845 3915 3914 -3 3846 3847 3915 -3 3916 3915 3847 -3 3847 3848 3916 -3 3917 3916 3848 -3 3848 3849 3917 -3 3918 3917 3849 -3 3849 3850 3918 -3 3919 3918 3850 -3 3850 3851 3919 -3 3920 3919 3851 -3 3851 3852 3920 -3 3921 3920 3852 -3 3852 3853 3921 -3 3922 3921 3853 -3 3853 3854 3922 -3 3923 3922 3854 -3 3854 3855 3923 -3 3924 3923 3855 -3 3855 3856 3924 -3 3925 3924 3856 -3 3856 3857 3925 -3 3926 3925 3857 -3 3857 5109 5113 -3 3857 5113 3926 -3 3858 3859 3927 -3 3928 3927 3859 -3 3858 3927 5116 -3 3858 5116 5112 -3 3859 3860 3928 -3 3929 3928 3860 -3 3860 3861 3929 -3 3930 3929 3861 -3 3861 3862 3930 -3 3931 3930 3862 -3 3862 3863 3931 -3 3932 3931 3863 -3 3863 3864 3932 -3 3933 3932 3864 -3 3864 3865 3933 -3 3934 3933 3865 -3 3865 3866 3934 -3 3935 3934 3866 -3 3866 3867 3935 -3 3936 3935 3867 -3 3867 3868 3936 -3 3937 3936 3868 -3 3868 3869 3937 -3 3938 3937 3869 -3 3869 3870 3938 -3 3939 3938 3870 -3 3870 3871 3939 -3 3940 3939 3871 -3 3871 3872 3940 -3 3941 3940 3872 -3 3872 3873 3941 -3 3942 3941 3873 -3 3873 3874 3942 -3 3943 3942 3874 -3 3874 3875 3943 -3 3944 3943 3875 -3 3875 3876 3944 -3 3945 3944 3876 -3 3876 3877 3945 -3 3946 3945 3877 -3 3877 3878 3946 -3 3947 3946 3878 -3 3878 3879 3947 -3 3948 3947 3879 -3 3879 3880 3948 -3 3949 3948 3880 -3 3880 3881 3949 -3 3950 3949 3881 -3 3881 3882 3950 -3 3951 3950 3882 -3 3882 3883 3951 -3 3952 3951 3883 -3 3883 3884 3952 -3 3953 3952 3884 -3 3884 3885 3954 -3 3884 3954 3953 -3 3885 3886 3955 -3 3885 3955 3954 -3 3886 3887 3956 -3 3886 3956 3955 -3 3887 3888 3957 -3 3887 3957 3956 -3 3888 3889 3958 -3 3888 3958 3957 -3 3889 3890 3959 -3 3889 3959 3958 -3 3890 3891 3960 -3 3890 3960 3959 -3 3891 3892 3961 -3 3891 3961 3960 -3 3892 3893 3962 -3 3892 3962 3961 -3 3893 3894 3963 -3 3893 3963 3962 -3 3894 3895 3964 -3 3894 3964 3963 -3 3895 3896 3965 -3 3895 3965 3964 -3 3896 3897 3966 -3 3896 3966 3965 -3 3897 3898 3967 -3 3897 3967 3966 -3 3898 3899 3968 -3 3898 3968 3967 -3 3899 3900 3969 -3 3899 3969 3968 -3 3900 3901 3970 -3 3900 3970 3969 -3 3901 3902 3971 -3 3901 3971 3970 -3 3902 3903 3972 -3 3902 3972 3971 -3 3903 3904 3973 -3 3903 3973 3972 -3 3904 3905 3974 -3 3904 3974 3973 -3 3905 3906 3975 -3 3905 3975 3974 -3 3906 3907 3976 -3 3906 3976 3975 -3 3907 3908 3977 -3 3907 3977 3976 -3 3908 3909 3978 -3 3908 3978 3977 -3 3909 3910 3979 -3 3909 3979 3978 -3 3910 3911 3980 -3 3910 3980 3979 -3 3911 3912 3981 -3 3911 3981 3980 -3 3912 3913 3982 -3 3912 3982 3981 -3 3913 3914 3983 -3 3913 3983 3982 -3 3914 3915 3984 -3 3914 3984 3983 -3 3915 3916 3985 -3 3915 3985 3984 -3 3916 3917 3986 -3 3916 3986 3985 -3 3917 3918 3987 -3 3917 3987 3986 -3 3918 3919 3988 -3 3918 3988 3987 -3 3919 3920 3989 -3 3919 3989 3988 -3 3920 3921 3990 -3 3920 3990 3989 -3 3921 3922 3991 -3 3921 3991 3990 -3 3922 3923 3991 -3 3992 3991 3923 -3 3923 3924 3992 -3 3993 3992 3924 -3 3924 3925 3993 -3 3994 3993 3925 -3 3925 3926 3994 -3 3995 3994 3926 -3 3926 5113 3995 -3 5117 3995 5113 -3 3927 3928 3996 -3 3997 3996 3928 -3 3927 3996 5120 -3 3927 5120 5116 -3 3928 3929 3997 -3 3998 3997 3929 -3 3929 3930 3998 -3 3999 3998 3930 -3 3930 3931 3999 -3 4000 3999 3931 -3 3931 3932 4000 -3 4001 4000 3932 -3 3932 3933 4001 -3 4002 4001 3933 -3 3933 3934 4002 -3 4003 4002 3934 -3 3934 3935 4003 -3 4004 4003 3935 -3 3935 3936 4004 -3 4005 4004 3936 -3 3936 3937 4005 -3 4006 4005 3937 -3 3937 3938 4006 -3 4007 4006 3938 -3 3938 3939 4007 -3 4008 4007 3939 -3 3939 3940 4008 -3 4009 4008 3940 -3 3940 3941 4009 -3 4010 4009 3941 -3 3941 3942 4010 -3 4011 4010 3942 -3 3942 3943 4011 -3 4012 4011 3943 -3 3943 3944 4012 -3 4013 4012 3944 -3 3944 3945 4013 -3 4014 4013 3945 -3 3945 3946 4014 -3 4015 4014 3946 -3 3946 3947 4015 -3 4016 4015 3947 -3 3947 3948 4016 -3 4017 4016 3948 -3 3948 3949 4017 -3 4018 4017 3949 -3 3949 3950 4018 -3 4019 4018 3950 -3 3950 3951 4019 -3 4020 4019 3951 -3 3951 3952 4020 -3 4021 4020 3952 -3 3952 3953 4021 -3 4022 4021 3953 -3 3953 3954 4022 -3 4023 4022 3954 -3 3954 3955 4023 -3 4024 4023 3955 -3 3955 3956 4024 -3 4025 4024 3956 -3 3956 3957 4025 -3 4026 4025 3957 -3 3957 3958 4026 -3 4027 4026 3958 -3 3958 3959 4027 -3 4028 4027 3959 -3 3959 3960 4028 -3 4029 4028 3960 -3 3960 3961 4029 -3 4030 4029 3961 -3 3961 3962 4031 -3 3961 4031 4030 -3 3962 3963 4032 -3 3962 4032 4031 -3 3963 3964 4033 -3 3963 4033 4032 -3 3964 3965 4034 -3 3964 4034 4033 -3 3965 3966 4035 -3 3965 4035 4034 -3 3966 3967 4036 -3 3966 4036 4035 -3 3967 3968 4037 -3 3967 4037 4036 -3 3968 3969 4038 -3 3968 4038 4037 -3 3969 3970 4039 -3 3969 4039 4038 -3 3970 3971 4040 -3 3970 4040 4039 -3 3971 3972 4041 -3 3971 4041 4040 -3 3972 3973 4042 -3 3972 4042 4041 -3 3973 3974 4043 -3 3973 4043 4042 -3 3974 3975 4044 -3 3974 4044 4043 -3 3975 3976 4045 -3 3975 4045 4044 -3 3976 3977 4046 -3 3976 4046 4045 -3 3977 3978 4047 -3 3977 4047 4046 -3 3978 3979 4048 -3 3978 4048 4047 -3 3979 3980 4049 -3 3979 4049 4048 -3 3980 3981 4050 -3 3980 4050 4049 -3 3981 3982 4051 -3 3981 4051 4050 -3 3982 3983 4052 -3 3982 4052 4051 -3 3983 3984 4053 -3 3983 4053 4052 -3 3984 3985 4054 -3 3984 4054 4053 -3 3985 3986 4055 -3 3985 4055 4054 -3 3986 3987 4056 -3 3986 4056 4055 -3 3987 3988 4057 -3 3987 4057 4056 -3 3988 3989 4058 -3 3988 4058 4057 -3 3989 3990 4059 -3 3989 4059 4058 -3 3990 3991 4060 -3 3990 4060 4059 -3 3991 3992 4061 -3 3991 4061 4060 -3 3992 3993 4062 -3 3992 4062 4061 -3 3993 3994 4063 -3 3993 4063 4062 -3 3994 3995 4064 -3 3994 4064 4063 -3 3995 5117 5121 -3 3995 5121 4064 -3 3996 3997 4066 -3 3996 4066 4065 -3 3996 4065 5120 -3 5124 5120 4065 -3 3997 3998 4067 -3 3997 4067 4066 -3 3998 3999 4068 -3 3998 4068 4067 -3 3999 4000 4069 -3 3999 4069 4068 -3 4000 4001 4069 -3 4070 4069 4001 -3 4001 4002 4070 -3 4071 4070 4002 -3 4002 4003 4071 -3 4072 4071 4003 -3 4003 4004 4072 -3 4073 4072 4004 -3 4004 4005 4073 -3 4074 4073 4005 -3 4005 4006 4074 -3 4075 4074 4006 -3 4006 4007 4075 -3 4076 4075 4007 -3 4007 4008 4076 -3 4077 4076 4008 -3 4008 4009 4077 -3 4078 4077 4009 -3 4009 4010 4078 -3 4079 4078 4010 -3 4010 4011 4079 -3 4080 4079 4011 -3 4011 4012 4080 -3 4081 4080 4012 -3 4012 4013 4081 -3 4082 4081 4013 -3 4013 4014 4082 -3 4083 4082 4014 -3 4014 4015 4083 -3 4084 4083 4015 -3 4015 4016 4084 -3 4085 4084 4016 -3 4016 4017 4085 -3 4086 4085 4017 -3 4017 4018 4086 -3 4087 4086 4018 -3 4018 4019 4087 -3 4088 4087 4019 -3 4019 4020 4088 -3 4089 4088 4020 -3 4020 4021 4089 -3 4090 4089 4021 -3 4021 4022 4090 -3 4091 4090 4022 -3 4022 4023 4091 -3 4092 4091 4023 -3 4023 4024 4092 -3 4093 4092 4024 -3 4024 4025 4093 -3 4094 4093 4025 -3 4025 4026 4094 -3 4095 4094 4026 -3 4026 4027 4095 -3 4096 4095 4027 -3 4027 4028 4096 -3 4097 4096 4028 -3 4028 4029 4097 -3 4098 4097 4029 -3 4029 4030 4098 -3 4099 4098 4030 -3 4030 4031 4099 -3 4100 4099 4031 -3 4031 4032 4100 -3 4101 4100 4032 -3 4032 4033 4101 -3 4102 4101 4033 -3 4033 4034 4102 -3 4103 4102 4034 -3 4034 4035 4103 -3 4104 4103 4035 -3 4035 4036 4104 -3 4105 4104 4036 -3 4036 4037 4105 -3 4106 4105 4037 -3 4037 4038 4106 -3 4107 4106 4038 -3 4038 4039 4107 -3 4108 4107 4039 -3 4039 4040 4109 -3 4039 4109 4108 -3 4040 4041 4110 -3 4040 4110 4109 -3 4041 4042 4111 -3 4041 4111 4110 -3 4042 4043 4112 -3 4042 4112 4111 -3 4043 4044 4113 -3 4043 4113 4112 -3 4044 4045 4114 -3 4044 4114 4113 -3 4045 4046 4115 -3 4045 4115 4114 -3 4046 4047 4116 -3 4046 4116 4115 -3 4047 4048 4117 -3 4047 4117 4116 -3 4048 4049 4118 -3 4048 4118 4117 -3 4049 4050 4119 -3 4049 4119 4118 -3 4050 4051 4120 -3 4050 4120 4119 -3 4051 4052 4121 -3 4051 4121 4120 -3 4052 4053 4122 -3 4052 4122 4121 -3 4053 4054 4123 -3 4053 4123 4122 -3 4054 4055 4124 -3 4054 4124 4123 -3 4055 4056 4125 -3 4055 4125 4124 -3 4056 4057 4126 -3 4056 4126 4125 -3 4057 4058 4127 -3 4057 4127 4126 -3 4058 4059 4128 -3 4058 4128 4127 -3 4059 4060 4129 -3 4059 4129 4128 -3 4060 4061 4130 -3 4060 4130 4129 -3 4061 4062 4131 -3 4061 4131 4130 -3 4062 4063 4132 -3 4062 4132 4131 -3 4063 4064 4133 -3 4063 4133 4132 -3 4064 5121 4133 -3 5125 4133 5121 -3 4065 4066 4135 -3 4065 4135 4134 -3 4065 4134 5124 -3 5128 5124 4134 -3 4066 4067 4136 -3 4066 4136 4135 -3 4067 4068 4137 -3 4067 4137 4136 -3 4068 4069 4138 -3 4068 4138 4137 -3 4069 4070 4139 -3 4069 4139 4138 -3 4070 4071 4140 -3 4070 4140 4139 -3 4071 4072 4141 -3 4071 4141 4140 -3 4072 4073 4142 -3 4072 4142 4141 -3 4073 4074 4143 -3 4073 4143 4142 -3 4074 4075 4144 -3 4074 4144 4143 -3 4075 4076 4145 -3 4075 4145 4144 -3 4076 4077 4146 -3 4076 4146 4145 -3 4077 4078 4147 -3 4077 4147 4146 -3 4078 4079 4147 -3 4148 4147 4079 -3 4079 4080 4148 -3 4149 4148 4080 -3 4080 4081 4149 -3 4150 4149 4081 -3 4081 4082 4150 -3 4151 4150 4082 -3 4082 4083 4151 -3 4152 4151 4083 -3 4083 4084 4152 -3 4153 4152 4084 -3 4084 4085 4153 -3 4154 4153 4085 -3 4085 4086 4154 -3 4155 4154 4086 -3 4086 4087 4155 -3 4156 4155 4087 -3 4087 4088 4156 -3 4157 4156 4088 -3 4088 4089 4157 -3 4158 4157 4089 -3 4089 4090 4158 -3 4159 4158 4090 -3 4090 4091 4159 -3 4160 4159 4091 -3 4091 4092 4160 -3 4161 4160 4092 -3 4092 4093 4161 -3 4162 4161 4093 -3 4093 4094 4162 -3 4163 4162 4094 -3 4094 4095 4163 -3 4164 4163 4095 -3 4095 4096 4164 -3 4165 4164 4096 -3 4096 4097 4165 -3 4166 4165 4097 -3 4097 4098 4166 -3 4167 4166 4098 -3 4098 4099 4167 -3 4168 4167 4099 -3 4099 4100 4168 -3 4169 4168 4100 -3 4100 4101 4169 -3 4170 4169 4101 -3 4101 4102 4170 -3 4171 4170 4102 -3 4102 4103 4171 -3 4172 4171 4103 -3 4103 4104 4172 -3 4173 4172 4104 -3 4104 4105 4173 -3 4174 4173 4105 -3 4105 4106 4174 -3 4175 4174 4106 -3 4106 4107 4175 -3 4176 4175 4107 -3 4107 4108 4176 -3 4177 4176 4108 -3 4108 4109 4177 -3 4178 4177 4109 -3 4109 4110 4178 -3 4179 4178 4110 -3 4110 4111 4179 -3 4180 4179 4111 -3 4111 4112 4180 -3 4181 4180 4112 -3 4112 4113 4181 -3 4182 4181 4113 -3 4113 4114 4182 -3 4183 4182 4114 -3 4114 4115 4183 -3 4184 4183 4115 -3 4115 4116 4184 -3 4185 4184 4116 -3 4116 4117 4185 -3 4186 4185 4117 -3 4117 4118 4186 -3 4187 4186 4118 -3 4118 4119 4188 -3 4118 4188 4187 -3 4119 4120 4189 -3 4119 4189 4188 -3 4120 4121 4190 -3 4120 4190 4189 -3 4121 4122 4191 -3 4121 4191 4190 -3 4122 4123 4192 -3 4122 4192 4191 -3 4123 4124 4193 -3 4123 4193 4192 -3 4124 4125 4194 -3 4124 4194 4193 -3 4125 4126 4195 -3 4125 4195 4194 -3 4126 4127 4196 -3 4126 4196 4195 -3 4127 4128 4197 -3 4127 4197 4196 -3 4128 4129 4198 -3 4128 4198 4197 -3 4129 4130 4199 -3 4129 4199 4198 -3 4130 4131 4200 -3 4130 4200 4199 -3 4131 4132 4201 -3 4131 4201 4200 -3 4132 4133 4202 -3 4132 4202 4201 -3 4133 5125 5129 -3 4133 5129 4202 -3 4134 4135 4204 -3 4134 4204 4203 -3 4134 4203 5128 -3 5132 5128 4203 -3 4135 4136 4205 -3 4135 4205 4204 -3 4136 4137 4206 -3 4136 4206 4205 -3 4137 4138 4207 -3 4137 4207 4206 -3 4138 4139 4208 -3 4138 4208 4207 -3 4139 4140 4209 -3 4139 4209 4208 -3 4140 4141 4210 -3 4140 4210 4209 -3 4141 4142 4211 -3 4141 4211 4210 -3 4142 4143 4212 -3 4142 4212 4211 -3 4143 4144 4213 -3 4143 4213 4212 -3 4144 4145 4214 -3 4144 4214 4213 -3 4145 4146 4215 -3 4145 4215 4214 -3 4146 4147 4216 -3 4146 4216 4215 -3 4147 4148 4217 -3 4147 4217 4216 -3 4148 4149 4218 -3 4148 4218 4217 -3 4149 4150 4219 -3 4149 4219 4218 -3 4150 4151 4220 -3 4150 4220 4219 -3 4151 4152 4221 -3 4151 4221 4220 -3 4152 4153 4222 -3 4152 4222 4221 -3 4153 4154 4223 -3 4153 4223 4222 -3 4154 4155 4224 -3 4154 4224 4223 -3 4155 4156 4225 -3 4155 4225 4224 -3 4156 4157 4226 -3 4156 4226 4225 -3 4157 4158 4226 -3 4227 4226 4158 -3 4158 4159 4227 -3 4228 4227 4159 -3 4159 4160 4228 -3 4229 4228 4160 -3 4160 4161 4229 -3 4230 4229 4161 -3 4161 4162 4230 -3 4231 4230 4162 -3 4162 4163 4231 -3 4232 4231 4163 -3 4163 4164 4232 -3 4233 4232 4164 -3 4164 4165 4233 -3 4234 4233 4165 -3 4165 4166 4234 -3 4235 4234 4166 -3 4166 4167 4235 -3 4236 4235 4167 -3 4167 4168 4236 -3 4237 4236 4168 -3 4168 4169 4237 -3 4238 4237 4169 -3 4169 4170 4238 -3 4239 4238 4170 -3 4170 4171 4239 -3 4240 4239 4171 -3 4171 4172 4240 -3 4241 4240 4172 -3 4172 4173 4241 -3 4242 4241 4173 -3 4173 4174 4242 -3 4243 4242 4174 -3 4174 4175 4243 -3 4244 4243 4175 -3 4175 4176 4244 -3 4245 4244 4176 -3 4176 4177 4245 -3 4246 4245 4177 -3 4177 4178 4246 -3 4247 4246 4178 -3 4178 4179 4247 -3 4248 4247 4179 -3 4179 4180 4248 -3 4249 4248 4180 -3 4180 4181 4249 -3 4250 4249 4181 -3 4181 4182 4250 -3 4251 4250 4182 -3 4182 4183 4251 -3 4252 4251 4183 -3 4183 4184 4252 -3 4253 4252 4184 -3 4184 4185 4253 -3 4254 4253 4185 -3 4185 4186 4254 -3 4255 4254 4186 -3 4186 4187 4255 -3 4256 4255 4187 -3 4187 4188 4256 -3 4257 4256 4188 -3 4188 4189 4257 -3 4258 4257 4189 -3 4189 4190 4258 -3 4259 4258 4190 -3 4190 4191 4259 -3 4260 4259 4191 -3 4191 4192 4260 -3 4261 4260 4192 -3 4192 4193 4261 -3 4262 4261 4193 -3 4193 4194 4262 -3 4263 4262 4194 -3 4194 4195 4263 -3 4264 4263 4195 -3 4195 4196 4264 -3 4265 4264 4196 -3 4196 4197 4265 -3 4266 4265 4197 -3 4197 4198 4267 -3 4197 4267 4266 -3 4198 4199 4268 -3 4198 4268 4267 -3 4199 4200 4269 -3 4199 4269 4268 -3 4200 4201 4270 -3 4200 4270 4269 -3 4201 4202 4271 -3 4201 4271 4270 -3 4202 5129 5133 -3 4202 5133 4271 -3 4203 4204 4273 -3 4203 4273 4272 -3 4203 4272 5132 -3 5136 5132 4272 -3 4204 4205 4274 -3 4204 4274 4273 -3 4205 4206 4275 -3 4205 4275 4274 -3 4206 4207 4276 -3 4206 4276 4275 -3 4207 4208 4277 -3 4207 4277 4276 -3 4208 4209 4278 -3 4208 4278 4277 -3 4209 4210 4279 -3 4209 4279 4278 -3 4210 4211 4280 -3 4210 4280 4279 -3 4211 4212 4281 -3 4211 4281 4280 -3 4212 4213 4282 -3 4212 4282 4281 -3 4213 4214 4283 -3 4213 4283 4282 -3 4214 4215 4284 -3 4214 4284 4283 -3 4215 4216 4285 -3 4215 4285 4284 -3 4216 4217 4286 -3 4216 4286 4285 -3 4217 4218 4287 -3 4217 4287 4286 -3 4218 4219 4288 -3 4218 4288 4287 -3 4219 4220 4289 -3 4219 4289 4288 -3 4220 4221 4290 -3 4220 4290 4289 -3 4221 4222 4291 -3 4221 4291 4290 -3 4222 4223 4292 -3 4222 4292 4291 -3 4223 4224 4293 -3 4223 4293 4292 -3 4224 4225 4294 -3 4224 4294 4293 -3 4225 4226 4295 -3 4225 4295 4294 -3 4226 4227 4296 -3 4226 4296 4295 -3 4227 4228 4297 -3 4227 4297 4296 -3 4228 4229 4298 -3 4228 4298 4297 -3 4229 4230 4299 -3 4229 4299 4298 -3 4230 4231 4300 -3 4230 4300 4299 -3 4231 4232 4301 -3 4231 4301 4300 -3 4232 4233 4302 -3 4232 4302 4301 -3 4233 4234 4303 -3 4233 4303 4302 -3 4234 4235 4304 -3 4234 4304 4303 -3 4235 4236 4305 -3 4235 4305 4304 -3 4236 4237 4306 -3 4236 4306 4305 -3 4237 4238 4306 -3 4307 4306 4238 -3 4238 4239 4307 -3 4308 4307 4239 -3 4239 4240 4308 -3 4309 4308 4240 -3 4240 4241 4309 -3 4310 4309 4241 -3 4241 4242 4310 -3 4311 4310 4242 -3 4242 4243 4311 -3 4312 4311 4243 -3 4243 4244 4312 -3 4313 4312 4244 -3 4244 4245 4313 -3 4314 4313 4245 -3 4245 4246 4314 -3 4315 4314 4246 -3 4246 4247 4315 -3 4316 4315 4247 -3 4247 4248 4316 -3 4317 4316 4248 -3 4248 4249 4317 -3 4318 4317 4249 -3 4249 4250 4318 -3 4319 4318 4250 -3 4250 4251 4319 -3 4320 4319 4251 -3 4251 4252 4320 -3 4321 4320 4252 -3 4252 4253 4321 -3 4322 4321 4253 -3 4253 4254 4322 -3 4323 4322 4254 -3 4254 4255 4323 -3 4324 4323 4255 -3 4255 4256 4324 -3 4325 4324 4256 -3 4256 4257 4325 -3 4326 4325 4257 -3 4257 4258 4326 -3 4327 4326 4258 -3 4258 4259 4327 -3 4328 4327 4259 -3 4259 4260 4328 -3 4329 4328 4260 -3 4260 4261 4329 -3 4330 4329 4261 -3 4261 4262 4330 -3 4331 4330 4262 -3 4262 4263 4331 -3 4332 4331 4263 -3 4263 4264 4332 -3 4333 4332 4264 -3 4264 4265 4333 -3 4334 4333 4265 -3 4265 4266 4334 -3 4335 4334 4266 -3 4266 4267 4335 -3 4336 4335 4267 -3 4267 4268 4336 -3 4337 4336 4268 -3 4268 4269 4337 -3 4338 4337 4269 -3 4269 4270 4338 -3 4339 4338 4270 -3 4270 4271 4339 -3 4340 4339 4271 -3 4271 5133 4340 -3 5137 4340 5133 -3 4272 4273 4341 -3 4342 4341 4273 -3 4272 4341 5140 -3 4272 5140 5136 -3 4273 4274 4342 -3 4343 4342 4274 -3 4274 4275 4343 -3 4344 4343 4275 -3 4275 4276 4344 -3 4345 4344 4276 -3 4276 4277 4345 -3 4346 4345 4277 -3 4277 4278 4347 -3 4277 4347 4346 -3 4278 4279 4348 -3 4278 4348 4347 -3 4279 4280 4349 -3 4279 4349 4348 -3 4280 4281 4350 -3 4280 4350 4349 -3 4281 4282 4351 -3 4281 4351 4350 -3 4282 4283 4352 -3 4282 4352 4351 -3 4283 4284 4353 -3 4283 4353 4352 -3 4284 4285 4354 -3 4284 4354 4353 -3 4285 4286 4355 -3 4285 4355 4354 -3 4286 4287 4356 -3 4286 4356 4355 -3 4287 4288 4357 -3 4287 4357 4356 -3 4288 4289 4358 -3 4288 4358 4357 -3 4289 4290 4359 -3 4289 4359 4358 -3 4290 4291 4360 -3 4290 4360 4359 -3 4291 4292 4361 -3 4291 4361 4360 -3 4292 4293 4362 -3 4292 4362 4361 -3 4293 4294 4363 -3 4293 4363 4362 -3 4294 4295 4364 -3 4294 4364 4363 -3 4295 4296 4365 -3 4295 4365 4364 -3 4296 4297 4366 -3 4296 4366 4365 -3 4297 4298 4367 -3 4297 4367 4366 -3 4298 4299 4368 -3 4298 4368 4367 -3 4299 4300 4369 -3 4299 4369 4368 -3 4300 4301 4370 -3 4300 4370 4369 -3 4301 4302 4371 -3 4301 4371 4370 -3 4302 4303 4372 -3 4302 4372 4371 -3 4303 4304 4373 -3 4303 4373 4372 -3 4304 4305 4374 -3 4304 4374 4373 -3 4305 4306 4375 -3 4305 4375 4374 -3 4306 4307 4376 -3 4306 4376 4375 -3 4307 4308 4377 -3 4307 4377 4376 -3 4308 4309 4378 -3 4308 4378 4377 -3 4309 4310 4379 -3 4309 4379 4378 -3 4310 4311 4380 -3 4310 4380 4379 -3 4311 4312 4381 -3 4311 4381 4380 -3 4312 4313 4382 -3 4312 4382 4381 -3 4313 4314 4383 -3 4313 4383 4382 -3 4314 4315 4384 -3 4314 4384 4383 -3 4315 4316 4385 -3 4315 4385 4384 -3 4316 4317 4386 -3 4316 4386 4385 -3 4317 4318 4386 -3 4387 4386 4318 -3 4318 4319 4387 -3 4388 4387 4319 -3 4319 4320 4388 -3 4389 4388 4320 -3 4320 4321 4389 -3 4390 4389 4321 -3 4321 4322 4390 -3 4391 4390 4322 -3 4322 4323 4391 -3 4392 4391 4323 -3 4323 4324 4392 -3 4393 4392 4324 -3 4324 4325 4393 -3 4394 4393 4325 -3 4325 4326 4394 -3 4395 4394 4326 -3 4326 4327 4395 -3 4396 4395 4327 -3 4327 4328 4396 -3 4397 4396 4328 -3 4328 4329 4397 -3 4398 4397 4329 -3 4329 4330 4398 -3 4399 4398 4330 -3 4330 4331 4399 -3 4400 4399 4331 -3 4331 4332 4400 -3 4401 4400 4332 -3 4332 4333 4401 -3 4402 4401 4333 -3 4333 4334 4402 -3 4403 4402 4334 -3 4334 4335 4403 -3 4404 4403 4335 -3 4335 4336 4404 -3 4405 4404 4336 -3 4336 4337 4405 -3 4406 4405 4337 -3 4337 4338 4406 -3 4407 4406 4338 -3 4338 4339 4407 -3 4408 4407 4339 -3 4339 4340 4408 -3 4409 4408 4340 -3 4340 5137 5141 -3 4340 5141 4409 -3 4341 4342 4410 -3 4411 4410 4342 -3 4341 4410 5144 -3 4341 5144 5140 -3 4342 4343 4411 -3 4412 4411 4343 -3 4343 4344 4412 -3 4413 4412 4344 -3 4344 4345 4413 -3 4414 4413 4345 -3 4345 4346 4414 -3 4415 4414 4346 -3 4346 4347 4415 -3 4416 4415 4347 -3 4347 4348 4416 -3 4417 4416 4348 -3 4348 4349 4417 -3 4418 4417 4349 -3 4349 4350 4418 -3 4419 4418 4350 -3 4350 4351 4419 -3 4420 4419 4351 -3 4351 4352 4420 -3 4421 4420 4352 -3 4352 4353 4421 -3 4422 4421 4353 -3 4353 4354 4422 -3 4423 4422 4354 -3 4354 4355 4423 -3 4424 4423 4355 -3 4355 4356 4424 -3 4425 4424 4356 -3 4356 4357 4425 -3 4426 4425 4357 -3 4357 4358 4426 -3 4427 4426 4358 -3 4358 4359 4428 -3 4358 4428 4427 -3 4359 4360 4429 -3 4359 4429 4428 -3 4360 4361 4430 -3 4360 4430 4429 -3 4361 4362 4431 -3 4361 4431 4430 -3 4362 4363 4432 -3 4362 4432 4431 -3 4363 4364 4433 -3 4363 4433 4432 -3 4364 4365 4434 -3 4364 4434 4433 -3 4365 4366 4435 -3 4365 4435 4434 -3 4366 4367 4436 -3 4366 4436 4435 -3 4367 4368 4437 -3 4367 4437 4436 -3 4368 4369 4438 -3 4368 4438 4437 -3 4369 4370 4439 -3 4369 4439 4438 -3 4370 4371 4440 -3 4370 4440 4439 -3 4371 4372 4441 -3 4371 4441 4440 -3 4372 4373 4442 -3 4372 4442 4441 -3 4373 4374 4443 -3 4373 4443 4442 -3 4374 4375 4444 -3 4374 4444 4443 -3 4375 4376 4445 -3 4375 4445 4444 -3 4376 4377 4446 -3 4376 4446 4445 -3 4377 4378 4447 -3 4377 4447 4446 -3 4378 4379 4448 -3 4378 4448 4447 -3 4379 4380 4449 -3 4379 4449 4448 -3 4380 4381 4450 -3 4380 4450 4449 -3 4381 4382 4451 -3 4381 4451 4450 -3 4382 4383 4452 -3 4382 4452 4451 -3 4383 4384 4453 -3 4383 4453 4452 -3 4384 4385 4454 -3 4384 4454 4453 -3 4385 4386 4455 -3 4385 4455 4454 -3 4386 4387 4456 -3 4386 4456 4455 -3 4387 4388 4457 -3 4387 4457 4456 -3 4388 4389 4458 -3 4388 4458 4457 -3 4389 4390 4459 -3 4389 4459 4458 -3 4390 4391 4460 -3 4390 4460 4459 -3 4391 4392 4461 -3 4391 4461 4460 -3 4392 4393 4462 -3 4392 4462 4461 -3 4393 4394 4463 -3 4393 4463 4462 -3 4394 4395 4464 -3 4394 4464 4463 -3 4395 4396 4465 -3 4395 4465 4464 -3 4396 4397 4466 -3 4396 4466 4465 -3 4397 4398 4467 -3 4397 4467 4466 -3 4398 4399 4468 -3 4398 4468 4467 -3 4399 4400 4468 -3 4469 4468 4400 -3 4400 4401 4469 -3 4470 4469 4401 -3 4401 4402 4470 -3 4471 4470 4402 -3 4402 4403 4471 -3 4472 4471 4403 -3 4403 4404 4472 -3 4473 4472 4404 -3 4404 4405 4473 -3 4474 4473 4405 -3 4405 4406 4474 -3 4475 4474 4406 -3 4406 4407 4475 -3 4476 4475 4407 -3 4407 4408 4476 -3 4477 4476 4408 -3 4408 4409 4477 -3 4478 4477 4409 -3 4409 5141 4478 -3 5145 4478 5141 -3 4410 4411 4479 -3 4480 4479 4411 -3 4410 4479 5148 -3 4410 5148 5144 -3 4411 4412 4480 -3 4481 4480 4412 -3 4412 4413 4481 -3 4482 4481 4413 -3 4413 4414 4482 -3 4483 4482 4414 -3 4414 4415 4483 -3 4484 4483 4415 -3 4415 4416 4484 -3 4485 4484 4416 -3 4416 4417 4485 -3 4486 4485 4417 -3 4417 4418 4486 -3 4487 4486 4418 -3 4418 4419 4487 -3 4488 4487 4419 -3 4419 4420 4488 -3 4489 4488 4420 -3 4420 4421 4489 -3 4490 4489 4421 -3 4421 4422 4490 -3 4491 4490 4422 -3 4422 4423 4491 -3 4492 4491 4423 -3 4423 4424 4492 -3 4493 4492 4424 -3 4424 4425 4493 -3 4494 4493 4425 -3 4425 4426 4494 -3 4495 4494 4426 -3 4426 4427 4495 -3 4496 4495 4427 -3 4427 4428 4496 -3 4497 4496 4428 -3 4428 4429 4497 -3 4498 4497 4429 -3 4429 4430 4498 -3 4499 4498 4430 -3 4430 4431 4499 -3 4500 4499 4431 -3 4431 4432 4500 -3 4501 4500 4432 -3 4432 4433 4501 -3 4502 4501 4433 -3 4433 4434 4502 -3 4503 4502 4434 -3 4434 4435 4503 -3 4504 4503 4435 -3 4435 4436 4504 -3 4505 4504 4436 -3 4436 4437 4505 -3 4506 4505 4437 -3 4437 4438 4506 -3 4507 4506 4438 -3 4438 4439 4507 -3 4508 4507 4439 -3 4439 4440 4508 -3 4509 4508 4440 -3 4440 4441 4510 -3 4440 4510 4509 -3 4441 4442 4511 -3 4441 4511 4510 -3 4442 4443 4512 -3 4442 4512 4511 -3 4443 4444 4513 -3 4443 4513 4512 -3 4444 4445 4514 -3 4444 4514 4513 -3 4445 4446 4515 -3 4445 4515 4514 -3 4446 4447 4516 -3 4446 4516 4515 -3 4447 4448 4517 -3 4447 4517 4516 -3 4448 4449 4518 -3 4448 4518 4517 -3 4449 4450 4519 -3 4449 4519 4518 -3 4450 4451 4520 -3 4450 4520 4519 -3 4451 4452 4521 -3 4451 4521 4520 -3 4452 4453 4522 -3 4452 4522 4521 -3 4453 4454 4523 -3 4453 4523 4522 -3 4454 4455 4524 -3 4454 4524 4523 -3 4455 4456 4525 -3 4455 4525 4524 -3 4456 4457 4526 -3 4456 4526 4525 -3 4457 4458 4527 -3 4457 4527 4526 -3 4458 4459 4528 -3 4458 4528 4527 -3 4459 4460 4529 -3 4459 4529 4528 -3 4460 4461 4530 -3 4460 4530 4529 -3 4461 4462 4531 -3 4461 4531 4530 -3 4462 4463 4532 -3 4462 4532 4531 -3 4463 4464 4533 -3 4463 4533 4532 -3 4464 4465 4534 -3 4464 4534 4533 -3 4465 4466 4535 -3 4465 4535 4534 -3 4466 4467 4536 -3 4466 4536 4535 -3 4467 4468 4537 -3 4467 4537 4536 -3 4468 4469 4538 -3 4468 4538 4537 -3 4469 4470 4539 -3 4469 4539 4538 -3 4470 4471 4540 -3 4470 4540 4539 -3 4471 4472 4541 -3 4471 4541 4540 -3 4472 4473 4542 -3 4472 4542 4541 -3 4473 4474 4543 -3 4473 4543 4542 -3 4474 4475 4544 -3 4474 4544 4543 -3 4475 4476 4545 -3 4475 4545 4544 -3 4476 4477 4546 -3 4476 4546 4545 -3 4477 4478 4547 -3 4477 4547 4546 -3 4478 5145 4547 -3 5149 4547 5145 -3 4479 4480 4549 -3 4479 4549 4548 -3 4479 4548 5148 -3 5152 5148 4548 -3 4480 4481 4550 -3 4480 4550 4549 -3 4481 4482 4550 -3 4551 4550 4482 -3 4482 4483 4551 -3 4552 4551 4483 -3 4483 4484 4552 -3 4553 4552 4484 -3 4484 4485 4553 -3 4554 4553 4485 -3 4485 4486 4554 -3 4555 4554 4486 -3 4486 4487 4555 -3 4556 4555 4487 -3 4487 4488 4556 -3 4557 4556 4488 -3 4488 4489 4557 -3 4558 4557 4489 -3 4489 4490 4558 -3 4559 4558 4490 -3 4490 4491 4559 -3 4560 4559 4491 -3 4491 4492 4560 -3 4561 4560 4492 -3 4492 4493 4561 -3 4562 4561 4493 -3 4493 4494 4562 -3 4563 4562 4494 -3 4494 4495 4563 -3 4564 4563 4495 -3 4495 4496 4564 -3 4565 4564 4496 -3 4496 4497 4565 -3 4566 4565 4497 -3 4497 4498 4566 -3 4567 4566 4498 -3 4498 4499 4567 -3 4568 4567 4499 -3 4499 4500 4568 -3 4569 4568 4500 -3 4500 4501 4569 -3 4570 4569 4501 -3 4501 4502 4570 -3 4571 4570 4502 -3 4502 4503 4571 -3 4572 4571 4503 -3 4503 4504 4572 -3 4573 4572 4504 -3 4504 4505 4573 -3 4574 4573 4505 -3 4505 4506 4574 -3 4575 4574 4506 -3 4506 4507 4575 -3 4576 4575 4507 -3 4507 4508 4576 -3 4577 4576 4508 -3 4508 4509 4577 -3 4578 4577 4509 -3 4509 4510 4578 -3 4579 4578 4510 -3 4510 4511 4579 -3 4580 4579 4511 -3 4511 4512 4580 -3 4581 4580 4512 -3 4512 4513 4581 -3 4582 4581 4513 -3 4513 4514 4582 -3 4583 4582 4514 -3 4514 4515 4583 -3 4584 4583 4515 -3 4515 4516 4584 -3 4585 4584 4516 -3 4516 4517 4585 -3 4586 4585 4517 -3 4517 4518 4586 -3 4587 4586 4518 -3 4518 4519 4587 -3 4588 4587 4519 -3 4519 4520 4588 -3 4589 4588 4520 -3 4520 4521 4589 -3 4590 4589 4521 -3 4521 4522 4590 -3 4591 4590 4522 -3 4522 4523 4592 -3 4522 4592 4591 -3 4523 4524 4593 -3 4523 4593 4592 -3 4524 4525 4594 -3 4524 4594 4593 -3 4525 4526 4595 -3 4525 4595 4594 -3 4526 4527 4596 -3 4526 4596 4595 -3 4527 4528 4597 -3 4527 4597 4596 -3 4528 4529 4598 -3 4528 4598 4597 -3 4529 4530 4599 -3 4529 4599 4598 -3 4530 4531 4600 -3 4530 4600 4599 -3 4531 4532 4601 -3 4531 4601 4600 -3 4532 4533 4602 -3 4532 4602 4601 -3 4533 4534 4603 -3 4533 4603 4602 -3 4534 4535 4604 -3 4534 4604 4603 -3 4535 4536 4605 -3 4535 4605 4604 -3 4536 4537 4606 -3 4536 4606 4605 -3 4537 4538 4607 -3 4537 4607 4606 -3 4538 4539 4608 -3 4538 4608 4607 -3 4539 4540 4609 -3 4539 4609 4608 -3 4540 4541 4610 -3 4540 4610 4609 -3 4541 4542 4611 -3 4541 4611 4610 -3 4542 4543 4612 -3 4542 4612 4611 -3 4543 4544 4613 -3 4543 4613 4612 -3 4544 4545 4614 -3 4544 4614 4613 -3 4545 4546 4615 -3 4545 4615 4614 -3 4546 4547 4616 -3 4546 4616 4615 -3 4547 5149 5155 -3 4547 5155 4616 -3 4548 4549 5153 -3 4548 5153 5152 -3 4549 4550 4618 -3 4549 4618 4617 -3 4549 4617 5153 -3 5159 5153 4617 -3 4550 4551 4619 -3 4550 4619 4618 -3 4551 4552 4620 -3 4551 4620 4619 -3 4552 4553 4621 -3 4552 4621 4620 -3 4553 4554 4622 -3 4553 4622 4621 -3 4554 4555 4623 -3 4554 4623 4622 -3 4555 4556 4624 -3 4555 4624 4623 -3 4556 4557 4625 -3 4556 4625 4624 -3 4557 4558 4626 -3 4557 4626 4625 -3 4558 4559 4627 -3 4558 4627 4626 -3 4559 4560 4628 -3 4559 4628 4627 -3 4560 4561 4629 -3 4560 4629 4628 -3 4561 4562 4630 -3 4561 4630 4629 -3 4562 4563 4631 -3 4562 4631 4630 -3 4563 4564 4632 -3 4563 4632 4631 -3 4564 4565 4632 -3 4633 4632 4565 -3 4565 4566 4633 -3 4634 4633 4566 -3 4566 4567 4634 -3 4635 4634 4567 -3 4567 4568 4635 -3 4636 4635 4568 -3 4568 4569 4636 -3 4637 4636 4569 -3 4569 4570 4637 -3 4638 4637 4570 -3 4570 4571 4638 -3 4639 4638 4571 -3 4571 4572 4639 -3 4640 4639 4572 -3 4572 4573 4640 -3 4641 4640 4573 -3 4573 4574 4641 -3 4642 4641 4574 -3 4574 4575 4642 -3 4643 4642 4575 -3 4575 4576 4643 -3 4644 4643 4576 -3 4576 4577 4644 -3 4645 4644 4577 -3 4577 4578 4645 -3 4646 4645 4578 -3 4578 4579 4646 -3 4647 4646 4579 -3 4579 4580 4647 -3 4648 4647 4580 -3 4580 4581 4648 -3 4649 4648 4581 -3 4581 4582 4649 -3 4650 4649 4582 -3 4582 4583 4650 -3 4651 4650 4583 -3 4583 4584 4651 -3 4652 4651 4584 -3 4584 4585 4652 -3 4653 4652 4585 -3 4585 4586 4653 -3 4654 4653 4586 -3 4586 4587 4654 -3 4655 4654 4587 -3 4587 4588 4655 -3 4656 4655 4588 -3 4588 4589 4656 -3 4657 4656 4589 -3 4589 4590 4657 -3 4658 4657 4590 -3 4590 4591 4658 -3 4659 4658 4591 -3 4591 4592 4659 -3 4660 4659 4592 -3 4592 4593 4660 -3 4661 4660 4593 -3 4593 4594 4661 -3 4662 4661 4594 -3 4594 4595 4662 -3 4663 4662 4595 -3 4595 4596 4663 -3 4664 4663 4596 -3 4596 4597 4664 -3 4665 4664 4597 -3 4597 4598 4665 -3 4666 4665 4598 -3 4598 4599 4666 -3 4667 4666 4599 -3 4599 4600 4667 -3 4668 4667 4600 -3 4600 4601 4668 -3 4669 4668 4601 -3 4601 4602 4669 -3 4670 4669 4602 -3 4602 4603 4670 -3 4671 4670 4603 -3 4603 4604 4671 -3 4672 4671 4604 -3 4604 4605 4672 -3 4673 4672 4605 -3 4605 4606 4674 -3 4605 4674 4673 -3 4606 4607 4675 -3 4606 4675 4674 -3 4607 4608 4676 -3 4607 4676 4675 -3 4608 4609 4677 -3 4608 4677 4676 -3 4609 4610 4678 -3 4609 4678 4677 -3 4610 4611 4679 -3 4610 4679 4678 -3 4611 4612 4680 -3 4611 4680 4679 -3 4612 4613 4681 -3 4612 4681 4680 -3 4613 4614 4682 -3 4613 4682 4681 -3 4614 4615 4683 -3 4614 4683 4682 -3 4615 4616 5155 -3 4615 5155 5154 -3 4615 5154 4683 -3 5162 4683 5154 -3 4617 4618 5160 -3 4617 5160 5159 -3 4618 4619 4685 -3 4618 4685 4684 -3 4618 4684 5160 -3 5167 5160 4684 -3 4619 4620 4686 -3 4619 4686 4685 -3 4620 4621 4687 -3 4620 4687 4686 -3 4621 4622 4688 -3 4621 4688 4687 -3 4622 4623 4689 -3 4622 4689 4688 -3 4623 4624 4690 -3 4623 4690 4689 -3 4624 4625 4691 -3 4624 4691 4690 -3 4625 4626 4692 -3 4625 4692 4691 -3 4626 4627 4693 -3 4626 4693 4692 -3 4627 4628 4694 -3 4627 4694 4693 -3 4628 4629 4695 -3 4628 4695 4694 -3 4629 4630 4696 -3 4629 4696 4695 -3 4630 4631 4697 -3 4630 4697 4696 -3 4631 4632 4698 -3 4631 4698 4697 -3 4632 4633 4699 -3 4632 4699 4698 -3 4633 4634 4700 -3 4633 4700 4699 -3 4634 4635 4701 -3 4634 4701 4700 -3 4635 4636 4702 -3 4635 4702 4701 -3 4636 4637 4703 -3 4636 4703 4702 -3 4637 4638 4704 -3 4637 4704 4703 -3 4638 4639 4705 -3 4638 4705 4704 -3 4639 4640 4706 -3 4639 4706 4705 -3 4640 4641 4707 -3 4640 4707 4706 -3 4641 4642 4708 -3 4641 4708 4707 -3 4642 4643 4709 -3 4642 4709 4708 -3 4643 4644 4710 -3 4643 4710 4709 -3 4644 4645 4711 -3 4644 4711 4710 -3 4645 4646 4712 -3 4645 4712 4711 -3 4646 4647 4713 -3 4646 4713 4712 -3 4647 4648 4713 -3 4714 4713 4648 -3 4648 4649 4714 -3 4715 4714 4649 -3 4649 4650 4715 -3 4716 4715 4650 -3 4650 4651 4716 -3 4717 4716 4651 -3 4651 4652 4717 -3 4718 4717 4652 -3 4652 4653 4718 -3 4719 4718 4653 -3 4653 4654 4719 -3 4720 4719 4654 -3 4654 4655 4720 -3 4721 4720 4655 -3 4655 4656 4721 -3 4722 4721 4656 -3 4656 4657 4722 -3 4723 4722 4657 -3 4657 4658 4723 -3 4724 4723 4658 -3 4658 4659 4724 -3 4725 4724 4659 -3 4659 4660 4725 -3 4726 4725 4660 -3 4660 4661 4726 -3 4727 4726 4661 -3 4661 4662 4727 -3 4728 4727 4662 -3 4662 4663 4728 -3 4729 4728 4663 -3 4663 4664 4729 -3 4730 4729 4664 -3 4664 4665 4730 -3 4731 4730 4665 -3 4665 4666 4731 -3 4732 4731 4666 -3 4666 4667 4732 -3 4733 4732 4667 -3 4667 4668 4733 -3 4734 4733 4668 -3 4668 4669 4734 -3 4735 4734 4669 -3 4669 4670 4735 -3 4736 4735 4670 -3 4670 4671 4736 -3 4737 4736 4671 -3 4671 4672 4737 -3 4738 4737 4672 -3 4672 4673 4738 -3 4739 4738 4673 -3 4673 4674 4739 -3 4740 4739 4674 -3 4674 4675 4740 -3 4741 4740 4675 -3 4675 4676 4741 -3 4742 4741 4676 -3 4676 4677 4742 -3 4743 4742 4677 -3 4677 4678 4743 -3 4744 4743 4678 -3 4678 4679 4744 -3 4745 4744 4679 -3 4679 4680 4745 -3 4746 4745 4680 -3 4680 4681 4746 -3 4747 4746 4681 -3 4681 4682 4747 -3 4748 4747 4682 -3 4682 4683 5161 -3 5162 5161 4683 -3 4682 5161 5231 -3 4682 5231 4748 -3 4684 4685 5167 -3 5168 5167 4685 -3 4685 4686 5168 -3 5169 5168 4686 -3 4686 4687 5169 -3 5170 5169 4687 -3 4687 4688 5170 -3 5171 5170 4688 -3 4688 4689 5171 -3 5172 5171 4689 -3 4689 4690 5173 -3 4689 5173 5172 -3 4690 4691 5174 -3 4690 5174 5173 -3 4691 4692 5175 -3 4691 5175 5174 -3 4692 4693 5176 -3 4692 5176 5175 -3 4693 4694 5177 -3 4693 5177 5176 -3 4694 4695 5178 -3 4694 5178 5177 -3 4695 4696 5179 -3 4695 5179 5178 -3 4696 4697 5180 -3 4696 5180 5179 -3 4697 4698 5181 -3 4697 5181 5180 -3 4698 4699 5182 -3 4698 5182 5181 -3 4699 4700 5183 -3 4699 5183 5182 -3 4700 4701 5184 -3 4700 5184 5183 -3 4701 4702 5185 -3 4701 5185 5184 -3 4702 4703 5186 -3 4702 5186 5185 -3 4703 4704 5187 -3 4703 5187 5186 -3 4704 4705 5188 -3 4704 5188 5187 -3 4705 4706 5189 -3 4705 5189 5188 -3 4706 4707 5190 -3 4706 5190 5189 -3 4707 4708 5191 -3 4707 5191 5190 -3 4708 4709 5192 -3 4708 5192 5191 -3 4709 4710 5193 -3 4709 5193 5192 -3 4710 4711 5194 -3 4710 5194 5193 -3 4711 4712 5195 -3 4711 5195 5194 -3 4712 4713 5196 -3 4712 5196 5195 -3 4713 4714 5197 -3 4713 5197 5196 -3 4714 4715 5198 -3 4714 5198 5197 -3 4715 4716 5199 -3 4715 5199 5198 -3 4716 4717 5200 -3 4716 5200 5199 -3 4717 4718 5201 -3 4717 5201 5200 -3 4718 4719 5202 -3 4718 5202 5201 -3 4719 4720 5203 -3 4719 5203 5202 -3 4720 4721 5204 -3 4720 5204 5203 -3 4721 4722 5205 -3 4721 5205 5204 -3 4722 4723 5206 -3 4722 5206 5205 -3 4723 4724 5207 -3 4723 5207 5206 -3 4724 4725 5208 -3 4724 5208 5207 -3 4725 4726 5209 -3 4725 5209 5208 -3 4726 4727 5210 -3 4726 5210 5209 -3 4727 4728 5211 -3 4727 5211 5210 -3 4728 4729 5212 -3 4728 5212 5211 -3 4729 4730 5213 -3 4729 5213 5212 -3 4730 4731 5214 -3 4730 5214 5213 -3 4731 4732 5214 -3 5215 5214 4732 -3 4732 4733 5215 -3 5216 5215 4733 -3 4733 4734 5216 -3 5217 5216 4734 -3 4734 4735 5217 -3 5218 5217 4735 -3 4735 4736 5218 -3 5219 5218 4736 -3 4736 4737 5219 -3 5220 5219 4737 -3 4737 4738 5220 -3 5221 5220 4738 -3 4738 4739 5221 -3 5222 5221 4739 -3 4739 4740 5222 -3 5223 5222 4740 -3 4740 4741 5223 -3 5224 5223 4741 -3 4741 4742 5224 -3 5225 5224 4742 -3 4742 4743 5225 -3 5226 5225 4743 -3 4743 4744 5226 -3 5227 5226 4744 -3 4744 4745 5227 -3 5228 5227 4745 -3 4745 4746 5228 -3 5229 5228 4746 -3 4746 4747 5229 -3 5230 5229 4747 -3 4747 4748 5230 -3 5231 5230 4748 -3 4749 4750 4817 -3 4818 4817 4750 -3 4749 5367 4750 -3 5368 4750 5367 -3 4749 4817 5437 -3 4749 5437 5367 -3 4750 4751 4818 -3 4819 4818 4751 -3 4750 5368 4751 -3 5369 4751 5368 -3 4751 4752 4819 -3 4820 4819 4752 -3 4751 5369 4752 -3 5370 4752 5369 -3 4752 4753 4820 -3 4821 4820 4753 -3 4752 5370 4753 -3 5371 4753 5370 -3 4753 4754 4821 -3 4822 4821 4754 -3 4753 5371 4754 -3 5372 4754 5371 -3 4754 4755 4822 -3 4823 4822 4755 -3 4754 5372 4755 -3 5373 4755 5372 -3 4755 4756 4823 -3 4824 4823 4756 -3 4755 5373 4756 -3 5374 4756 5373 -3 4756 4757 4824 -3 4825 4824 4757 -3 4756 5374 4757 -3 5375 4757 5374 -3 4757 4758 4825 -3 4826 4825 4758 -3 4757 5375 4758 -3 5376 4758 5375 -3 4758 4759 4826 -3 4827 4826 4759 -3 4758 5376 4759 -3 5377 4759 5376 -3 4759 4760 4827 -3 4828 4827 4760 -3 4759 5377 4760 -3 5378 4760 5377 -3 4760 4761 4828 -3 4829 4828 4761 -3 4760 5378 4761 -3 5379 4761 5378 -3 4761 4762 4829 -3 4830 4829 4762 -3 4761 5379 4762 -3 5380 4762 5379 -3 4762 4763 4830 -3 4831 4830 4763 -3 4762 5380 4763 -3 5381 4763 5380 -3 4763 4764 4831 -3 4832 4831 4764 -3 4763 5381 4764 -3 5382 4764 5381 -3 4764 4765 4832 -3 4833 4832 4765 -3 4764 5382 4765 -3 5383 4765 5382 -3 4765 4766 4833 -3 4834 4833 4766 -3 4765 5383 4766 -3 5384 4766 5383 -3 4766 4767 4834 -3 4835 4834 4767 -3 4766 5384 4767 -3 5385 4767 5384 -3 4767 4768 4835 -3 4836 4835 4768 -3 4767 5385 4768 -3 5386 4768 5385 -3 4768 4769 4836 -3 4837 4836 4769 -3 4768 5386 4769 -3 5387 4769 5386 -3 4769 4770 4837 -3 4838 4837 4770 -3 4769 5387 5388 -3 4769 5388 4770 -3 4770 4771 4838 -3 4839 4838 4771 -3 4770 5388 5389 -3 4770 5389 4771 -3 4771 4772 4839 -3 4840 4839 4772 -3 4771 5389 5390 -3 4771 5390 4772 -3 4772 4773 4840 -3 4841 4840 4773 -3 4772 5390 5391 -3 4772 5391 4773 -3 4773 4774 4841 -3 4842 4841 4774 -3 4773 5391 5392 -3 4773 5392 4774 -3 4774 4775 4843 -3 4774 4843 4842 -3 4774 5392 5393 -3 4774 5393 4775 -3 4775 4776 4844 -3 4775 4844 4843 -3 4775 5393 5394 -3 4775 5394 4776 -3 4776 4777 4845 -3 4776 4845 4844 -3 4776 5394 5395 -3 4776 5395 4777 -3 4777 4778 4846 -3 4777 4846 4845 -3 4777 5395 5396 -3 4777 5396 4778 -3 4778 4779 4847 -3 4778 4847 4846 -3 4778 5396 5397 -3 4778 5397 4779 -3 4779 4780 4848 -3 4779 4848 4847 -3 4779 5397 5398 -3 4779 5398 4780 -3 4780 4781 4849 -3 4780 4849 4848 -3 4780 5398 5399 -3 4780 5399 4781 -3 4781 4782 4850 -3 4781 4850 4849 -3 4781 5399 5400 -3 4781 5400 4782 -3 4782 4783 4851 -3 4782 4851 4850 -3 4782 5400 5401 -3 4782 5401 4783 -3 4783 4784 4852 -3 4783 4852 4851 -3 4783 5401 5402 -3 4783 5402 4784 -3 4784 4785 4853 -3 4784 4853 4852 -3 4784 5402 5403 -3 4784 5403 4785 -3 4785 4786 4854 -3 4785 4854 4853 -3 4785 5403 5404 -3 4785 5404 4786 -3 4786 4787 4855 -3 4786 4855 4854 -3 4786 5404 5405 -3 4786 5405 4787 -3 4787 4788 4856 -3 4787 4856 4855 -3 4787 5405 5406 -3 4787 5406 4788 -3 4788 4789 4857 -3 4788 4857 4856 -3 4788 5406 5407 -3 4788 5407 4789 -3 4789 4790 4858 -3 4789 4858 4857 -3 4789 5407 5408 -3 4789 5408 4790 -3 4790 4791 4859 -3 4790 4859 4858 -3 4790 5408 5409 -3 4790 5409 4791 -3 4791 4792 4860 -3 4791 4860 4859 -3 4791 5409 5410 -3 4791 5410 4792 -3 4792 4793 4861 -3 4792 4861 4860 -3 4792 5410 5411 -3 4792 5411 4793 -3 4793 4794 4862 -3 4793 4862 4861 -3 4793 5411 5412 -3 4793 5412 4794 -3 4794 4795 4863 -3 4794 4863 4862 -3 4794 5412 5413 -3 4794 5413 4795 -3 4795 4796 4864 -3 4795 4864 4863 -3 4795 5413 5414 -3 4795 5414 4796 -3 4796 4797 4865 -3 4796 4865 4864 -3 4796 5414 5415 -3 4796 5415 4797 -3 4797 4798 4866 -3 4797 4866 4865 -3 4797 5415 5416 -3 4797 5416 4798 -3 4798 4799 4867 -3 4798 4867 4866 -3 4798 5416 5417 -3 4798 5417 4799 -3 4799 4800 4868 -3 4799 4868 4867 -3 4799 5417 5418 -3 4799 5418 4800 -3 4800 4801 4869 -3 4800 4869 4868 -3 4800 5418 5419 -3 4800 5419 4801 -3 4801 4802 4870 -3 4801 4870 4869 -3 4801 5419 5420 -3 4801 5420 4802 -3 4802 4803 4871 -3 4802 4871 4870 -3 4802 5420 5421 -3 4802 5421 4803 -3 4803 4804 4872 -3 4803 4872 4871 -3 4803 5421 5422 -3 4803 5422 4804 -3 4804 4805 4873 -3 4804 4873 4872 -3 4804 5422 5423 -3 4804 5423 4805 -3 4805 4806 4874 -3 4805 4874 4873 -3 4805 5423 5424 -3 4805 5424 4806 -3 4806 4807 4875 -3 4806 4875 4874 -3 4806 5424 5425 -3 4806 5425 4807 -3 4807 4808 4876 -3 4807 4876 4875 -3 4807 5425 5426 -3 4807 5426 4808 -3 4808 4809 4877 -3 4808 4877 4876 -3 4808 5426 5427 -3 4808 5427 4809 -3 4809 4810 4878 -3 4809 4878 4877 -3 4809 5427 5428 -3 4809 5428 4810 -3 4810 4811 4879 -3 4810 4879 4878 -3 4810 5428 5429 -3 4810 5429 4811 -3 4811 4812 4880 -3 4811 4880 4879 -3 4811 5429 5430 -3 4811 5430 4812 -3 4812 4813 4881 -3 4812 4881 4880 -3 4812 5430 5431 -3 4812 5431 4813 -3 4813 4814 4882 -3 4813 4882 4881 -3 4813 5431 5432 -3 4813 5432 4814 -3 4814 4815 4883 -3 4814 4883 4882 -3 4814 5432 4815 -3 5433 4815 5432 -3 4815 5433 4883 -3 5438 4883 5433 -3 4816 4817 4886 -3 4887 4886 4817 -3 4816 5436 4817 -3 5437 4817 5436 -3 4816 4886 5436 -3 5442 5436 4886 -3 4817 4818 4887 -3 4888 4887 4818 -3 4882 4883 4890 -3 4882 4890 4889 -3 4883 4884 4891 -3 4883 4891 4890 -3 4883 5438 4884 -3 5439 4884 5438 -3 4884 5439 4891 -3 5443 4891 5439 -3 4885 4886 4894 -3 4885 4894 4893 -3 4885 5441 4886 -3 5442 4886 5441 -3 4885 4893 5446 -3 4885 5446 5441 -3 4886 4887 4895 -3 4886 4895 4894 -3 4890 4891 4897 -3 4890 4897 4896 -3 4891 4892 4898 -3 4891 4898 4897 -3 4891 5443 4892 -3 5444 4892 5443 -3 4892 5444 4898 -3 5447 4898 5444 -3 4893 4894 4900 -3 4893 4900 4899 -3 4893 4899 5450 -3 4893 5450 5446 -3 4897 4898 4902 -3 4897 4902 4901 -3 4898 5447 5451 -3 4898 5451 4902 -3 4899 4900 4904 -3 4899 4904 4903 -3 4899 4903 5454 -3 4899 5454 5450 -3 4901 4902 4906 -3 4901 4906 4905 -3 4902 5451 5455 -3 4902 5455 4906 -3 4903 4904 4907 -3 4908 4907 4904 -3 4903 4907 5454 -3 5458 5454 4907 -3 4905 4906 4909 -3 4910 4909 4906 -3 4906 5455 5459 -3 4906 5459 4910 -3 4907 4908 4911 -3 4912 4911 4908 -3 4907 4911 5458 -3 5462 5458 4911 -3 4909 4910 4913 -3 4914 4913 4910 -3 4910 5459 5463 -3 4910 5463 4914 -3 4911 4912 4915 -3 4916 4915 4912 -3 4911 4915 5462 -3 5466 5462 4915 -3 4913 4914 4917 -3 4918 4917 4914 -3 4914 5463 5467 -3 4914 5467 4918 -3 4915 4916 4919 -3 4920 4919 4916 -3 4915 4919 5466 -3 5470 5466 4919 -3 4917 4918 4921 -3 4922 4921 4918 -3 4918 5467 5471 -3 4918 5471 4922 -3 4919 4920 4923 -3 4924 4923 4920 -3 4919 4923 5470 -3 5474 5470 4923 -3 4921 4922 4925 -3 4926 4925 4922 -3 4922 5471 5475 -3 4922 5475 4926 -3 4923 4924 4927 -3 4928 4927 4924 -3 4923 4927 5474 -3 5478 5474 4927 -3 4925 4926 4929 -3 4930 4929 4926 -3 4926 5475 5479 -3 4926 5479 4930 -3 4927 4928 4931 -3 4932 4931 4928 -3 4927 4931 5478 -3 5482 5478 4931 -3 4929 4930 4933 -3 4934 4933 4930 -3 4930 5479 5483 -3 4930 5483 4934 -3 4931 4932 4935 -3 4936 4935 4932 -3 4931 4935 5482 -3 5486 5482 4935 -3 4933 4934 4937 -3 4938 4937 4934 -3 4934 5483 5487 -3 4934 5487 4938 -3 4935 4936 4939 -3 4940 4939 4936 -3 4935 4939 5486 -3 5490 5486 4939 -3 4937 4938 4941 -3 4942 4941 4938 -3 4938 5487 5491 -3 4938 5491 4942 -3 4939 4940 4943 -3 4944 4943 4940 -3 4939 4943 5490 -3 5494 5490 4943 -3 4941 4942 4945 -3 4946 4945 4942 -3 4942 5491 4946 -3 5495 4946 5491 -3 4943 4944 4947 -3 4948 4947 4944 -3 4943 4947 5494 -3 5498 5494 4947 -3 4945 4946 4950 -3 4945 4950 4949 -3 4946 5495 4950 -3 5499 4950 5495 -3 4947 4948 4952 -3 4947 4952 4951 -3 4947 4951 5502 -3 4947 5502 5498 -3 4949 4950 4954 -3 4949 4954 4953 -3 4950 5499 4954 -3 5503 4954 5499 -3 4951 4952 4956 -3 4951 4956 4955 -3 4951 4955 5506 -3 4951 5506 5502 -3 4953 4954 4958 -3 4953 4958 4957 -3 4954 5503 4958 -3 5507 4958 5503 -3 4955 4956 4960 -3 4955 4960 4959 -3 4955 4959 5510 -3 4955 5510 5506 -3 4957 4958 4962 -3 4957 4962 4961 -3 4958 5507 4962 -3 5511 4962 5507 -3 4959 4960 4964 -3 4959 4964 4963 -3 4959 4963 5514 -3 4959 5514 5510 -3 4961 4962 4966 -3 4961 4966 4965 -3 4962 5511 4966 -3 5515 4966 5511 -3 4963 4964 4968 -3 4963 4968 4967 -3 4963 4967 5518 -3 4963 5518 5514 -3 4965 4966 4970 -3 4965 4970 4969 -3 4966 5515 4970 -3 5519 4970 5515 -3 4967 4968 4972 -3 4967 4972 4971 -3 4967 4971 5522 -3 4967 5522 5518 -3 4969 4970 4974 -3 4969 4974 4973 -3 4970 5519 4974 -3 5523 4974 5519 -3 4971 4972 4976 -3 4971 4976 4975 -3 4971 4975 5526 -3 4971 5526 5522 -3 4973 4974 4978 -3 4973 4978 4977 -3 4974 5523 4978 -3 5527 4978 5523 -3 4975 4976 4980 -3 4975 4980 4979 -3 4975 4979 5530 -3 4975 5530 5526 -3 4977 4978 4982 -3 4977 4982 4981 -3 4978 5527 4982 -3 5531 4982 5527 -3 4979 4980 4984 -3 4979 4984 4983 -3 4979 4983 5534 -3 4979 5534 5530 -3 4981 4982 4986 -3 4981 4986 4985 -3 4982 5531 4986 -3 5535 4986 5531 -3 4983 4984 4988 -3 4983 4988 4987 -3 4983 4987 5538 -3 4983 5538 5534 -3 4985 4986 4990 -3 4985 4990 4989 -3 4986 5535 5539 -3 4986 5539 4990 -3 4987 4988 4992 -3 4987 4992 4991 -3 4987 4991 5538 -3 5542 5538 4991 -3 4989 4990 4993 -3 4994 4993 4990 -3 4990 5539 5543 -3 4990 5543 4994 -3 4991 4992 4995 -3 4996 4995 4992 -3 4991 4995 5542 -3 5546 5542 4995 -3 4993 4994 4997 -3 4998 4997 4994 -3 4994 5543 5547 -3 4994 5547 4998 -3 4995 4996 4999 -3 5000 4999 4996 -3 4995 4999 5546 -3 5550 5546 4999 -3 4997 4998 5001 -3 5002 5001 4998 -3 4998 5547 5551 -3 4998 5551 5002 -3 4999 5000 5003 -3 5004 5003 5000 -3 4999 5003 5550 -3 5554 5550 5003 -3 5001 5002 5005 -3 5006 5005 5002 -3 5002 5551 5555 -3 5002 5555 5006 -3 5003 5004 5007 -3 5008 5007 5004 -3 5003 5007 5554 -3 5558 5554 5007 -3 5005 5006 5009 -3 5010 5009 5006 -3 5006 5555 5559 -3 5006 5559 5010 -3 5007 5008 5011 -3 5012 5011 5008 -3 5007 5011 5558 -3 5562 5558 5011 -3 5009 5010 5013 -3 5014 5013 5010 -3 5010 5559 5563 -3 5010 5563 5014 -3 5011 5012 5015 -3 5016 5015 5012 -3 5011 5015 5562 -3 5566 5562 5015 -3 5013 5014 5017 -3 5018 5017 5014 -3 5014 5563 5567 -3 5014 5567 5018 -3 5015 5016 5019 -3 5020 5019 5016 -3 5015 5019 5566 -3 5570 5566 5019 -3 5017 5018 5021 -3 5022 5021 5018 -3 5018 5567 5571 -3 5018 5571 5022 -3 5019 5020 5023 -3 5024 5023 5020 -3 5019 5023 5570 -3 5574 5570 5023 -3 5021 5022 5025 -3 5026 5025 5022 -3 5022 5571 5575 -3 5022 5575 5026 -3 5023 5024 5027 -3 5028 5027 5024 -3 5023 5027 5574 -3 5578 5574 5027 -3 5025 5026 5029 -3 5030 5029 5026 -3 5026 5575 5579 -3 5026 5579 5030 -3 5027 5028 5031 -3 5032 5031 5028 -3 5027 5031 5578 -3 5582 5578 5031 -3 5029 5030 5033 -3 5034 5033 5030 -3 5030 5579 5583 -3 5030 5583 5034 -3 5031 5032 5035 -3 5036 5035 5032 -3 5031 5035 5586 -3 5031 5586 5582 -3 5033 5034 5038 -3 5033 5038 5037 -3 5034 5583 5038 -3 5587 5038 5583 -3 5035 5036 5040 -3 5035 5040 5039 -3 5035 5039 5590 -3 5035 5590 5586 -3 5037 5038 5042 -3 5037 5042 5041 -3 5038 5587 5042 -3 5591 5042 5587 -3 5039 5040 5044 -3 5039 5044 5043 -3 5039 5043 5594 -3 5039 5594 5590 -3 5041 5042 5046 -3 5041 5046 5045 -3 5042 5591 5046 -3 5595 5046 5591 -3 5043 5044 5048 -3 5043 5048 5047 -3 5043 5047 5598 -3 5043 5598 5594 -3 5045 5046 5050 -3 5045 5050 5049 -3 5046 5595 5050 -3 5599 5050 5595 -3 5047 5048 5052 -3 5047 5052 5051 -3 5047 5051 5602 -3 5047 5602 5598 -3 5049 5050 5054 -3 5049 5054 5053 -3 5050 5599 5054 -3 5603 5054 5599 -3 5051 5052 5056 -3 5051 5056 5055 -3 5051 5055 5606 -3 5051 5606 5602 -3 5053 5054 5058 -3 5053 5058 5057 -3 5054 5603 5058 -3 5607 5058 5603 -3 5055 5056 5060 -3 5055 5060 5059 -3 5055 5059 5610 -3 5055 5610 5606 -3 5057 5058 5062 -3 5057 5062 5061 -3 5058 5607 5062 -3 5611 5062 5607 -3 5059 5060 5064 -3 5059 5064 5063 -3 5059 5063 5614 -3 5059 5614 5610 -3 5061 5062 5066 -3 5061 5066 5065 -3 5062 5611 5066 -3 5615 5066 5611 -3 5063 5064 5068 -3 5063 5068 5067 -3 5063 5067 5618 -3 5063 5618 5614 -3 5065 5066 5070 -3 5065 5070 5069 -3 5066 5615 5070 -3 5619 5070 5615 -3 5067 5068 5072 -3 5067 5072 5071 -3 5067 5071 5622 -3 5067 5622 5618 -3 5069 5070 5074 -3 5069 5074 5073 -3 5070 5619 5074 -3 5623 5074 5619 -3 5071 5072 5076 -3 5071 5076 5075 -3 5071 5075 5626 -3 5071 5626 5622 -3 5073 5074 5078 -3 5073 5078 5077 -3 5074 5623 5078 -3 5627 5078 5623 -3 5075 5076 5080 -3 5075 5080 5079 -3 5075 5079 5626 -3 5630 5626 5079 -3 5077 5078 5081 -3 5082 5081 5078 -3 5078 5627 5631 -3 5078 5631 5082 -3 5079 5080 5083 -3 5084 5083 5080 -3 5079 5083 5630 -3 5634 5630 5083 -3 5081 5082 5085 -3 5086 5085 5082 -3 5082 5631 5635 -3 5082 5635 5086 -3 5083 5084 5087 -3 5088 5087 5084 -3 5083 5087 5634 -3 5638 5634 5087 -3 5085 5086 5089 -3 5090 5089 5086 -3 5086 5635 5639 -3 5086 5639 5090 -3 5087 5088 5091 -3 5092 5091 5088 -3 5087 5091 5638 -3 5642 5638 5091 -3 5089 5090 5093 -3 5094 5093 5090 -3 5090 5639 5643 -3 5090 5643 5094 -3 5091 5092 5095 -3 5096 5095 5092 -3 5091 5095 5642 -3 5646 5642 5095 -3 5093 5094 5097 -3 5098 5097 5094 -3 5094 5643 5647 -3 5094 5647 5098 -3 5095 5096 5099 -3 5100 5099 5096 -3 5095 5099 5646 -3 5650 5646 5099 -3 5097 5098 5101 -3 5102 5101 5098 -3 5098 5647 5651 -3 5098 5651 5102 -3 5099 5100 5103 -3 5104 5103 5100 -3 5099 5103 5650 -3 5654 5650 5103 -3 5101 5102 5105 -3 5106 5105 5102 -3 5102 5651 5655 -3 5102 5655 5106 -3 5103 5104 5107 -3 5108 5107 5104 -3 5103 5107 5654 -3 5658 5654 5107 -3 5105 5106 5109 -3 5110 5109 5106 -3 5106 5655 5659 -3 5106 5659 5110 -3 5107 5108 5111 -3 5112 5111 5108 -3 5107 5111 5658 -3 5662 5658 5111 -3 5109 5110 5113 -3 5114 5113 5110 -3 5110 5659 5663 -3 5110 5663 5114 -3 5111 5112 5115 -3 5116 5115 5112 -3 5111 5115 5662 -3 5666 5662 5115 -3 5113 5114 5117 -3 5118 5117 5114 -3 5114 5663 5667 -3 5114 5667 5118 -3 5115 5116 5119 -3 5120 5119 5116 -3 5115 5119 5666 -3 5670 5666 5119 -3 5117 5118 5121 -3 5122 5121 5118 -3 5118 5667 5671 -3 5118 5671 5122 -3 5119 5120 5123 -3 5124 5123 5120 -3 5119 5123 5674 -3 5119 5674 5670 -3 5121 5122 5126 -3 5121 5126 5125 -3 5122 5671 5126 -3 5675 5126 5671 -3 5123 5124 5128 -3 5123 5128 5127 -3 5123 5127 5678 -3 5123 5678 5674 -3 5125 5126 5130 -3 5125 5130 5129 -3 5126 5675 5130 -3 5679 5130 5675 -3 5127 5128 5132 -3 5127 5132 5131 -3 5127 5131 5682 -3 5127 5682 5678 -3 5129 5130 5134 -3 5129 5134 5133 -3 5130 5679 5134 -3 5683 5134 5679 -3 5131 5132 5136 -3 5131 5136 5135 -3 5131 5135 5686 -3 5131 5686 5682 -3 5133 5134 5138 -3 5133 5138 5137 -3 5134 5683 5138 -3 5687 5138 5683 -3 5135 5136 5140 -3 5135 5140 5139 -3 5135 5139 5690 -3 5135 5690 5686 -3 5137 5138 5142 -3 5137 5142 5141 -3 5138 5687 5142 -3 5691 5142 5687 -3 5139 5140 5144 -3 5139 5144 5143 -3 5139 5143 5694 -3 5139 5694 5690 -3 5141 5142 5146 -3 5141 5146 5145 -3 5142 5691 5146 -3 5695 5146 5691 -3 5143 5144 5148 -3 5143 5148 5147 -3 5143 5147 5698 -3 5143 5698 5694 -3 5145 5146 5150 -3 5145 5150 5149 -3 5146 5695 5150 -3 5699 5150 5695 -3 5147 5148 5152 -3 5147 5152 5151 -3 5147 5151 5702 -3 5147 5702 5698 -3 5149 5150 5156 -3 5149 5156 5155 -3 5150 5699 5156 -3 5703 5156 5699 -3 5151 5152 5158 -3 5151 5158 5157 -3 5151 5157 5705 -3 5151 5705 5702 -3 5152 5153 5159 -3 5152 5159 5158 -3 5154 5155 5163 -3 5154 5163 5162 -3 5155 5156 5164 -3 5155 5164 5163 -3 5156 5703 5164 -3 5708 5164 5703 -3 5157 5158 5706 -3 5157 5706 5705 -3 5158 5159 5166 -3 5158 5166 5165 -3 5158 5165 5710 -3 5158 5710 5706 -3 5159 5160 5167 -3 5159 5167 5166 -3 5161 5162 5232 -3 5161 5232 5231 -3 5162 5163 5233 -3 5162 5233 5232 -3 5163 5164 5708 -3 5163 5708 5707 -3 5163 5707 5233 -3 5713 5233 5707 -3 5165 5166 5710 -3 5711 5710 5166 -3 5166 5167 5234 -3 5235 5234 5167 -3 5166 5234 5711 -3 5716 5711 5234 -3 5167 5168 5235 -3 5236 5235 5168 -3 5168 5169 5236 -3 5237 5236 5169 -3 5169 5170 5237 -3 5238 5237 5170 -3 5170 5171 5238 -3 5239 5238 5171 -3 5171 5172 5239 -3 5240 5239 5172 -3 5172 5173 5240 -3 5241 5240 5173 -3 5173 5174 5241 -3 5242 5241 5174 -3 5174 5175 5242 -3 5243 5242 5175 -3 5175 5176 5243 -3 5244 5243 5176 -3 5176 5177 5244 -3 5245 5244 5177 -3 5177 5178 5245 -3 5246 5245 5178 -3 5178 5179 5246 -3 5247 5246 5179 -3 5179 5180 5247 -3 5248 5247 5180 -3 5180 5181 5248 -3 5249 5248 5181 -3 5181 5182 5249 -3 5250 5249 5182 -3 5182 5183 5250 -3 5251 5250 5183 -3 5183 5184 5251 -3 5252 5251 5184 -3 5184 5185 5252 -3 5253 5252 5185 -3 5185 5186 5253 -3 5254 5253 5186 -3 5186 5187 5254 -3 5255 5254 5187 -3 5187 5188 5255 -3 5256 5255 5188 -3 5188 5189 5256 -3 5257 5256 5189 -3 5189 5190 5257 -3 5258 5257 5190 -3 5190 5191 5258 -3 5259 5258 5191 -3 5191 5192 5259 -3 5260 5259 5192 -3 5192 5193 5260 -3 5261 5260 5193 -3 5193 5194 5261 -3 5262 5261 5194 -3 5194 5195 5262 -3 5263 5262 5195 -3 5195 5196 5263 -3 5264 5263 5196 -3 5196 5197 5264 -3 5265 5264 5197 -3 5197 5198 5265 -3 5266 5265 5198 -3 5198 5199 5266 -3 5267 5266 5199 -3 5199 5200 5267 -3 5268 5267 5200 -3 5200 5201 5268 -3 5269 5268 5201 -3 5201 5202 5269 -3 5270 5269 5202 -3 5202 5203 5270 -3 5271 5270 5203 -3 5203 5204 5271 -3 5272 5271 5204 -3 5204 5205 5272 -3 5273 5272 5205 -3 5205 5206 5273 -3 5274 5273 5206 -3 5206 5207 5274 -3 5275 5274 5207 -3 5207 5208 5275 -3 5276 5275 5208 -3 5208 5209 5276 -3 5277 5276 5209 -3 5209 5210 5278 -3 5209 5278 5277 -3 5210 5211 5279 -3 5210 5279 5278 -3 5211 5212 5280 -3 5211 5280 5279 -3 5212 5213 5281 -3 5212 5281 5280 -3 5213 5214 5282 -3 5213 5282 5281 -3 5214 5215 5283 -3 5214 5283 5282 -3 5215 5216 5284 -3 5215 5284 5283 -3 5216 5217 5285 -3 5216 5285 5284 -3 5217 5218 5286 -3 5217 5286 5285 -3 5218 5219 5287 -3 5218 5287 5286 -3 5219 5220 5288 -3 5219 5288 5287 -3 5220 5221 5289 -3 5220 5289 5288 -3 5221 5222 5290 -3 5221 5290 5289 -3 5222 5223 5291 -3 5222 5291 5290 -3 5223 5224 5292 -3 5223 5292 5291 -3 5224 5225 5293 -3 5224 5293 5292 -3 5225 5226 5294 -3 5225 5294 5293 -3 5226 5227 5295 -3 5226 5295 5294 -3 5227 5228 5296 -3 5227 5296 5295 -3 5228 5229 5297 -3 5228 5297 5296 -3 5229 5230 5298 -3 5229 5298 5297 -3 5230 5231 5299 -3 5230 5299 5298 -3 5231 5232 5300 -3 5231 5300 5299 -3 5232 5233 5713 -3 5232 5713 5712 -3 5232 5712 5782 -3 5232 5782 5300 -3 5234 5235 5717 -3 5234 5717 5716 -3 5235 5236 5718 -3 5235 5718 5717 -3 5236 5237 5719 -3 5236 5719 5718 -3 5237 5238 5720 -3 5237 5720 5719 -3 5238 5239 5721 -3 5238 5721 5720 -3 5239 5240 5722 -3 5239 5722 5721 -3 5240 5241 5723 -3 5240 5723 5722 -3 5241 5242 5724 -3 5241 5724 5723 -3 5242 5243 5725 -3 5242 5725 5724 -3 5243 5244 5726 -3 5243 5726 5725 -3 5244 5245 5727 -3 5244 5727 5726 -3 5245 5246 5728 -3 5245 5728 5727 -3 5246 5247 5729 -3 5246 5729 5728 -3 5247 5248 5730 -3 5247 5730 5729 -3 5248 5249 5731 -3 5248 5731 5730 -3 5249 5250 5732 -3 5249 5732 5731 -3 5250 5251 5733 -3 5250 5733 5732 -3 5251 5252 5734 -3 5251 5734 5733 -3 5252 5253 5735 -3 5252 5735 5734 -3 5253 5254 5735 -3 5736 5735 5254 -3 5254 5255 5736 -3 5737 5736 5255 -3 5255 5256 5737 -3 5738 5737 5256 -3 5256 5257 5738 -3 5739 5738 5257 -3 5257 5258 5739 -3 5740 5739 5258 -3 5258 5259 5740 -3 5741 5740 5259 -3 5259 5260 5741 -3 5742 5741 5260 -3 5260 5261 5742 -3 5743 5742 5261 -3 5261 5262 5743 -3 5744 5743 5262 -3 5262 5263 5744 -3 5745 5744 5263 -3 5263 5264 5745 -3 5746 5745 5264 -3 5264 5265 5746 -3 5747 5746 5265 -3 5265 5266 5747 -3 5748 5747 5266 -3 5266 5267 5748 -3 5749 5748 5267 -3 5267 5268 5749 -3 5750 5749 5268 -3 5268 5269 5750 -3 5751 5750 5269 -3 5269 5270 5751 -3 5752 5751 5270 -3 5270 5271 5752 -3 5753 5752 5271 -3 5271 5272 5753 -3 5754 5753 5272 -3 5272 5273 5754 -3 5755 5754 5273 -3 5273 5274 5755 -3 5756 5755 5274 -3 5274 5275 5756 -3 5757 5756 5275 -3 5275 5276 5757 -3 5758 5757 5276 -3 5276 5277 5758 -3 5759 5758 5277 -3 5277 5278 5759 -3 5760 5759 5278 -3 5278 5279 5760 -3 5761 5760 5279 -3 5279 5280 5761 -3 5762 5761 5280 -3 5280 5281 5762 -3 5763 5762 5281 -3 5281 5282 5763 -3 5764 5763 5282 -3 5282 5283 5764 -3 5765 5764 5283 -3 5283 5284 5765 -3 5766 5765 5284 -3 5284 5285 5766 -3 5767 5766 5285 -3 5285 5286 5767 -3 5768 5767 5286 -3 5286 5287 5768 -3 5769 5768 5287 -3 5287 5288 5769 -3 5770 5769 5288 -3 5288 5289 5770 -3 5771 5770 5289 -3 5289 5290 5771 -3 5772 5771 5290 -3 5290 5291 5772 -3 5773 5772 5291 -3 5291 5292 5773 -3 5774 5773 5292 -3 5292 5293 5774 -3 5775 5774 5293 -3 5293 5294 5775 -3 5776 5775 5294 -3 5294 5295 5776 -3 5777 5776 5295 -3 5295 5296 5777 -3 5778 5777 5296 -3 5296 5297 5778 -3 5779 5778 5297 -3 5297 5298 5779 -3 5780 5779 5298 -3 5298 5299 5781 -3 5298 5781 5780 -3 5299 5300 5782 -3 5299 5782 5781 -3 5301 5302 5369 -3 5301 5369 5368 -3 5301 5850 5851 -3 5301 5851 5302 -3 5301 5368 5919 -3 5301 5919 5850 -3 5302 5303 5370 -3 5302 5370 5369 -3 5302 5851 5852 -3 5302 5852 5303 -3 5303 5304 5371 -3 5303 5371 5370 -3 5303 5852 5853 -3 5303 5853 5304 -3 5304 5305 5372 -3 5304 5372 5371 -3 5304 5853 5305 -3 5854 5305 5853 -3 5305 5306 5373 -3 5305 5373 5372 -3 5305 5854 5306 -3 5855 5306 5854 -3 5306 5307 5374 -3 5306 5374 5373 -3 5306 5855 5307 -3 5856 5307 5855 -3 5307 5308 5375 -3 5307 5375 5374 -3 5307 5856 5308 -3 5857 5308 5856 -3 5308 5309 5376 -3 5308 5376 5375 -3 5308 5857 5309 -3 5858 5309 5857 -3 5309 5310 5377 -3 5309 5377 5376 -3 5309 5858 5310 -3 5859 5310 5858 -3 5310 5311 5378 -3 5310 5378 5377 -3 5310 5859 5311 -3 5860 5311 5859 -3 5311 5312 5379 -3 5311 5379 5378 -3 5311 5860 5312 -3 5861 5312 5860 -3 5312 5313 5380 -3 5312 5380 5379 -3 5312 5861 5313 -3 5862 5313 5861 -3 5313 5314 5381 -3 5313 5381 5380 -3 5313 5862 5314 -3 5863 5314 5862 -3 5314 5315 5382 -3 5314 5382 5381 -3 5314 5863 5315 -3 5864 5315 5863 -3 5315 5316 5383 -3 5315 5383 5382 -3 5315 5864 5316 -3 5865 5316 5864 -3 5316 5317 5384 -3 5316 5384 5383 -3 5316 5865 5317 -3 5866 5317 5865 -3 5317 5318 5385 -3 5317 5385 5384 -3 5317 5866 5318 -3 5867 5318 5866 -3 5318 5319 5386 -3 5318 5386 5385 -3 5318 5867 5319 -3 5868 5319 5867 -3 5319 5320 5387 -3 5319 5387 5386 -3 5319 5868 5320 -3 5869 5320 5868 -3 5320 5321 5388 -3 5320 5388 5387 -3 5320 5869 5321 -3 5870 5321 5869 -3 5321 5322 5389 -3 5321 5389 5388 -3 5321 5870 5322 -3 5871 5322 5870 -3 5322 5323 5390 -3 5322 5390 5389 -3 5322 5871 5323 -3 5872 5323 5871 -3 5323 5324 5391 -3 5323 5391 5390 -3 5323 5872 5324 -3 5873 5324 5872 -3 5324 5325 5392 -3 5324 5392 5391 -3 5324 5873 5325 -3 5874 5325 5873 -3 5325 5326 5393 -3 5325 5393 5392 -3 5325 5874 5326 -3 5875 5326 5874 -3 5326 5327 5394 -3 5326 5394 5393 -3 5326 5875 5327 -3 5876 5327 5875 -3 5327 5328 5395 -3 5327 5395 5394 -3 5327 5876 5328 -3 5877 5328 5876 -3 5328 5329 5396 -3 5328 5396 5395 -3 5328 5877 5329 -3 5878 5329 5877 -3 5329 5330 5397 -3 5329 5397 5396 -3 5329 5878 5330 -3 5879 5330 5878 -3 5330 5331 5398 -3 5330 5398 5397 -3 5330 5879 5331 -3 5880 5331 5879 -3 5331 5332 5399 -3 5331 5399 5398 -3 5331 5880 5332 -3 5881 5332 5880 -3 5332 5333 5400 -3 5332 5400 5399 -3 5332 5881 5333 -3 5882 5333 5881 -3 5333 5334 5401 -3 5333 5401 5400 -3 5333 5882 5334 -3 5883 5334 5882 -3 5334 5335 5402 -3 5334 5402 5401 -3 5334 5883 5335 -3 5884 5335 5883 -3 5335 5336 5403 -3 5335 5403 5402 -3 5335 5884 5336 -3 5885 5336 5884 -3 5336 5337 5404 -3 5336 5404 5403 -3 5336 5885 5337 -3 5886 5337 5885 -3 5337 5338 5405 -3 5337 5405 5404 -3 5337 5886 5338 -3 5887 5338 5886 -3 5338 5339 5406 -3 5338 5406 5405 -3 5338 5887 5339 -3 5888 5339 5887 -3 5339 5340 5407 -3 5339 5407 5406 -3 5339 5888 5340 -3 5889 5340 5888 -3 5340 5341 5408 -3 5340 5408 5407 -3 5340 5889 5341 -3 5890 5341 5889 -3 5341 5342 5409 -3 5341 5409 5408 -3 5341 5890 5342 -3 5891 5342 5890 -3 5342 5343 5410 -3 5342 5410 5409 -3 5342 5891 5343 -3 5892 5343 5891 -3 5343 5344 5410 -3 5411 5410 5344 -3 5343 5892 5344 -3 5893 5344 5892 -3 5344 5345 5411 -3 5412 5411 5345 -3 5344 5893 5345 -3 5894 5345 5893 -3 5345 5346 5412 -3 5413 5412 5346 -3 5345 5894 5346 -3 5895 5346 5894 -3 5346 5347 5413 -3 5414 5413 5347 -3 5346 5895 5347 -3 5896 5347 5895 -3 5347 5348 5414 -3 5415 5414 5348 -3 5347 5896 5348 -3 5897 5348 5896 -3 5348 5349 5415 -3 5416 5415 5349 -3 5348 5897 5349 -3 5898 5349 5897 -3 5349 5350 5416 -3 5417 5416 5350 -3 5349 5898 5350 -3 5899 5350 5898 -3 5350 5351 5417 -3 5418 5417 5351 -3 5350 5899 5900 -3 5350 5900 5351 -3 5351 5352 5418 -3 5419 5418 5352 -3 5351 5900 5901 -3 5351 5901 5352 -3 5352 5353 5419 -3 5420 5419 5353 -3 5352 5901 5902 -3 5352 5902 5353 -3 5353 5354 5420 -3 5421 5420 5354 -3 5353 5902 5903 -3 5353 5903 5354 -3 5354 5355 5421 -3 5422 5421 5355 -3 5354 5903 5904 -3 5354 5904 5355 -3 5355 5356 5422 -3 5423 5422 5356 -3 5355 5904 5905 -3 5355 5905 5356 -3 5356 5357 5423 -3 5424 5423 5357 -3 5356 5905 5906 -3 5356 5906 5357 -3 5357 5358 5424 -3 5425 5424 5358 -3 5357 5906 5907 -3 5357 5907 5358 -3 5358 5359 5425 -3 5426 5425 5359 -3 5358 5907 5908 -3 5358 5908 5359 -3 5359 5360 5426 -3 5427 5426 5360 -3 5359 5908 5909 -3 5359 5909 5360 -3 5360 5361 5427 -3 5428 5427 5361 -3 5360 5909 5910 -3 5360 5910 5361 -3 5361 5362 5428 -3 5429 5428 5362 -3 5361 5910 5911 -3 5361 5911 5362 -3 5362 5363 5429 -3 5430 5429 5363 -3 5362 5911 5912 -3 5362 5912 5363 -3 5363 5364 5430 -3 5431 5430 5364 -3 5363 5912 5913 -3 5363 5913 5364 -3 5364 5365 5431 -3 5432 5431 5365 -3 5364 5913 5914 -3 5364 5914 5365 -3 5365 5914 5920 -3 5365 5920 5432 -3 5366 5367 5436 -3 5437 5436 5367 -3 5366 5917 5918 -3 5366 5918 5367 -3 5366 5436 5925 -3 5366 5925 5917 -3 5367 5918 5919 -3 5367 5919 5368 -3 5432 5920 5433 -3 5921 5433 5920 -3 5433 5434 5438 -3 5439 5438 5434 -3 5433 5921 5434 -3 5922 5434 5921 -3 5434 5922 5439 -3 5926 5439 5922 -3 5435 5436 5441 -3 5442 5441 5436 -3 5435 5924 5436 -3 5925 5436 5924 -3 5435 5441 5924 -3 5929 5924 5441 -3 5439 5440 5443 -3 5444 5443 5440 -3 5439 5926 5440 -3 5927 5440 5926 -3 5440 5927 5444 -3 5930 5444 5927 -3 5441 5446 5929 -3 5933 5929 5446 -3 5444 5930 5447 -3 5934 5447 5930 -3 5445 5446 5449 -3 5450 5449 5446 -3 5445 5932 5446 -3 5933 5446 5932 -3 5445 5449 5932 -3 5936 5932 5449 -3 5447 5448 5451 -3 5452 5451 5448 -3 5447 5934 5448 -3 5935 5448 5934 -3 5448 5935 5452 -3 5937 5452 5935 -3 5449 5450 5453 -3 5454 5453 5450 -3 5449 5453 5936 -3 5938 5936 5453 -3 5451 5452 5455 -3 5456 5455 5452 -3 5452 5937 5456 -3 5939 5456 5937 -3 5453 5454 5457 -3 5458 5457 5454 -3 5453 5457 5938 -3 5940 5938 5457 -3 5455 5456 5459 -3 5460 5459 5456 -3 5456 5939 5460 -3 5941 5460 5939 -3 5457 5458 5461 -3 5462 5461 5458 -3 5457 5461 5940 -3 5942 5940 5461 -3 5459 5460 5463 -3 5464 5463 5460 -3 5460 5941 5464 -3 5943 5464 5941 -3 5461 5462 5465 -3 5466 5465 5462 -3 5461 5465 5942 -3 5944 5942 5465 -3 5463 5464 5467 -3 5468 5467 5464 -3 5464 5943 5468 -3 5945 5468 5943 -3 5465 5466 5469 -3 5470 5469 5466 -3 5465 5469 5944 -3 5946 5944 5469 -3 5467 5468 5471 -3 5472 5471 5468 -3 5468 5945 5472 -3 5947 5472 5945 -3 5469 5470 5473 -3 5474 5473 5470 -3 5469 5473 5946 -3 5948 5946 5473 -3 5471 5472 5475 -3 5476 5475 5472 -3 5472 5947 5476 -3 5949 5476 5947 -3 5473 5474 5477 -3 5478 5477 5474 -3 5473 5477 5948 -3 5950 5948 5477 -3 5475 5476 5479 -3 5480 5479 5476 -3 5476 5949 5480 -3 5951 5480 5949 -3 5477 5478 5481 -3 5482 5481 5478 -3 5477 5481 5952 -3 5477 5952 5950 -3 5479 5480 5484 -3 5479 5484 5483 -3 5480 5951 5484 -3 5953 5484 5951 -3 5481 5482 5486 -3 5481 5486 5485 -3 5481 5485 5954 -3 5481 5954 5952 -3 5483 5484 5488 -3 5483 5488 5487 -3 5484 5953 5488 -3 5955 5488 5953 -3 5485 5486 5490 -3 5485 5490 5489 -3 5485 5489 5956 -3 5485 5956 5954 -3 5487 5488 5492 -3 5487 5492 5491 -3 5488 5955 5492 -3 5957 5492 5955 -3 5489 5490 5494 -3 5489 5494 5493 -3 5489 5493 5958 -3 5489 5958 5956 -3 5491 5492 5496 -3 5491 5496 5495 -3 5492 5957 5959 -3 5492 5959 5496 -3 5493 5494 5498 -3 5493 5498 5497 -3 5493 5497 5960 -3 5493 5960 5958 -3 5495 5496 5500 -3 5495 5500 5499 -3 5496 5959 5961 -3 5496 5961 5500 -3 5497 5498 5502 -3 5497 5502 5501 -3 5497 5501 5962 -3 5497 5962 5960 -3 5499 5500 5504 -3 5499 5504 5503 -3 5500 5961 5963 -3 5500 5963 5504 -3 5501 5502 5506 -3 5501 5506 5505 -3 5501 5505 5964 -3 5501 5964 5962 -3 5503 5504 5508 -3 5503 5508 5507 -3 5504 5963 5965 -3 5504 5965 5508 -3 5505 5506 5510 -3 5505 5510 5509 -3 5505 5509 5966 -3 5505 5966 5964 -3 5507 5508 5512 -3 5507 5512 5511 -3 5508 5965 5967 -3 5508 5967 5512 -3 5509 5510 5514 -3 5509 5514 5513 -3 5509 5513 5968 -3 5509 5968 5966 -3 5511 5512 5516 -3 5511 5516 5515 -3 5512 5967 5969 -3 5512 5969 5516 -3 5513 5514 5518 -3 5513 5518 5517 -3 5513 5517 5970 -3 5513 5970 5968 -3 5515 5516 5520 -3 5515 5520 5519 -3 5516 5969 5971 -3 5516 5971 5520 -3 5517 5518 5522 -3 5517 5522 5521 -3 5517 5521 5972 -3 5517 5972 5970 -3 5519 5520 5524 -3 5519 5524 5523 -3 5520 5971 5973 -3 5520 5973 5524 -3 5521 5522 5526 -3 5521 5526 5525 -3 5521 5525 5974 -3 5521 5974 5972 -3 5523 5524 5528 -3 5523 5528 5527 -3 5524 5973 5975 -3 5524 5975 5528 -3 5525 5526 5529 -3 5530 5529 5526 -3 5525 5529 5974 -3 5976 5974 5529 -3 5527 5528 5531 -3 5532 5531 5528 -3 5528 5975 5977 -3 5528 5977 5532 -3 5529 5530 5533 -3 5534 5533 5530 -3 5529 5533 5976 -3 5978 5976 5533 -3 5531 5532 5535 -3 5536 5535 5532 -3 5532 5977 5979 -3 5532 5979 5536 -3 5533 5534 5537 -3 5538 5537 5534 -3 5533 5537 5978 -3 5980 5978 5537 -3 5535 5536 5539 -3 5540 5539 5536 -3 5536 5979 5981 -3 5536 5981 5540 -3 5537 5538 5541 -3 5542 5541 5538 -3 5537 5541 5980 -3 5982 5980 5541 -3 5539 5540 5543 -3 5544 5543 5540 -3 5540 5981 5983 -3 5540 5983 5544 -3 5541 5542 5545 -3 5546 5545 5542 -3 5541 5545 5982 -3 5984 5982 5545 -3 5543 5544 5547 -3 5548 5547 5544 -3 5544 5983 5985 -3 5544 5985 5548 -3 5545 5546 5549 -3 5550 5549 5546 -3 5545 5549 5984 -3 5986 5984 5549 -3 5547 5548 5551 -3 5552 5551 5548 -3 5548 5985 5987 -3 5548 5987 5552 -3 5549 5550 5553 -3 5554 5553 5550 -3 5549 5553 5986 -3 5988 5986 5553 -3 5551 5552 5555 -3 5556 5555 5552 -3 5552 5987 5989 -3 5552 5989 5556 -3 5553 5554 5557 -3 5558 5557 5554 -3 5553 5557 5988 -3 5990 5988 5557 -3 5555 5556 5559 -3 5560 5559 5556 -3 5556 5989 5560 -3 5991 5560 5989 -3 5557 5558 5561 -3 5562 5561 5558 -3 5557 5561 5990 -3 5992 5990 5561 -3 5559 5560 5563 -3 5564 5563 5560 -3 5560 5991 5564 -3 5993 5564 5991 -3 5561 5562 5565 -3 5566 5565 5562 -3 5561 5565 5992 -3 5994 5992 5565 -3 5563 5564 5567 -3 5568 5567 5564 -3 5564 5993 5568 -3 5995 5568 5993 -3 5565 5566 5569 -3 5570 5569 5566 -3 5565 5569 5994 -3 5996 5994 5569 -3 5567 5568 5571 -3 5572 5571 5568 -3 5568 5995 5572 -3 5997 5572 5995 -3 5569 5570 5573 -3 5574 5573 5570 -3 5569 5573 5998 -3 5569 5998 5996 -3 5571 5572 5576 -3 5571 5576 5575 -3 5572 5997 5576 -3 5999 5576 5997 -3 5573 5574 5578 -3 5573 5578 5577 -3 5573 5577 6000 -3 5573 6000 5998 -3 5575 5576 5580 -3 5575 5580 5579 -3 5576 5999 5580 -3 6001 5580 5999 -3 5577 5578 5582 -3 5577 5582 5581 -3 5577 5581 6002 -3 5577 6002 6000 -3 5579 5580 5584 -3 5579 5584 5583 -3 5580 6001 5584 -3 6003 5584 6001 -3 5581 5582 5586 -3 5581 5586 5585 -3 5581 5585 6004 -3 5581 6004 6002 -3 5583 5584 5588 -3 5583 5588 5587 -3 5584 6003 5588 -3 6005 5588 6003 -3 5585 5586 5590 -3 5585 5590 5589 -3 5585 5589 6006 -3 5585 6006 6004 -3 5587 5588 5592 -3 5587 5592 5591 -3 5588 6005 5592 -3 6007 5592 6005 -3 5589 5590 5594 -3 5589 5594 5593 -3 5589 5593 6008 -3 5589 6008 6006 -3 5591 5592 5596 -3 5591 5596 5595 -3 5592 6007 5596 -3 6009 5596 6007 -3 5593 5594 5598 -3 5593 5598 5597 -3 5593 5597 6010 -3 5593 6010 6008 -3 5595 5596 5600 -3 5595 5600 5599 -3 5596 6009 5600 -3 6011 5600 6009 -3 5597 5598 5602 -3 5597 5602 5601 -3 5597 5601 6012 -3 5597 6012 6010 -3 5599 5600 5604 -3 5599 5604 5603 -3 5600 6011 5604 -3 6013 5604 6011 -3 5601 5602 5606 -3 5601 5606 5605 -3 5601 5605 6014 -3 5601 6014 6012 -3 5603 5604 5608 -3 5603 5608 5607 -3 5604 6013 5608 -3 6015 5608 6013 -3 5605 5606 5610 -3 5605 5610 5609 -3 5605 5609 6016 -3 5605 6016 6014 -3 5607 5608 5612 -3 5607 5612 5611 -3 5608 6015 5612 -3 6017 5612 6015 -3 5609 5610 5614 -3 5609 5614 5613 -3 5609 5613 6018 -3 5609 6018 6016 -3 5611 5612 5616 -3 5611 5616 5615 -3 5612 6017 5616 -3 6019 5616 6017 -3 5613 5614 5618 -3 5613 5618 5617 -3 5613 5617 6020 -3 5613 6020 6018 -3 5615 5616 5620 -3 5615 5620 5619 -3 5616 6019 5620 -3 6021 5620 6019 -3 5617 5618 5621 -3 5622 5621 5618 -3 5617 5621 6020 -3 6022 6020 5621 -3 5619 5620 5623 -3 5624 5623 5620 -3 5620 6021 6023 -3 5620 6023 5624 -3 5621 5622 5625 -3 5626 5625 5622 -3 5621 5625 6022 -3 6024 6022 5625 -3 5623 5624 5627 -3 5628 5627 5624 -3 5624 6023 6025 -3 5624 6025 5628 -3 5625 5626 5629 -3 5630 5629 5626 -3 5625 5629 6024 -3 6026 6024 5629 -3 5627 5628 5631 -3 5632 5631 5628 -3 5628 6025 6027 -3 5628 6027 5632 -3 5629 5630 5633 -3 5634 5633 5630 -3 5629 5633 6026 -3 6028 6026 5633 -3 5631 5632 5635 -3 5636 5635 5632 -3 5632 6027 6029 -3 5632 6029 5636 -3 5633 5634 5637 -3 5638 5637 5634 -3 5633 5637 6028 -3 6030 6028 5637 -3 5635 5636 5639 -3 5640 5639 5636 -3 5636 6029 6031 -3 5636 6031 5640 -3 5637 5638 5641 -3 5642 5641 5638 -3 5637 5641 6030 -3 6032 6030 5641 -3 5639 5640 5643 -3 5644 5643 5640 -3 5640 6031 6033 -3 5640 6033 5644 -3 5641 5642 5645 -3 5646 5645 5642 -3 5641 5645 6032 -3 6034 6032 5645 -3 5643 5644 5647 -3 5648 5647 5644 -3 5644 6033 6035 -3 5644 6035 5648 -3 5645 5646 5649 -3 5650 5649 5646 -3 5645 5649 6034 -3 6036 6034 5649 -3 5647 5648 5651 -3 5652 5651 5648 -3 5648 6035 6037 -3 5648 6037 5652 -3 5649 5650 5653 -3 5654 5653 5650 -3 5649 5653 6036 -3 6038 6036 5653 -3 5651 5652 5655 -3 5656 5655 5652 -3 5652 6037 6039 -3 5652 6039 5656 -3 5653 5654 5657 -3 5658 5657 5654 -3 5653 5657 6038 -3 6040 6038 5657 -3 5655 5656 5659 -3 5660 5659 5656 -3 5656 6039 6041 -3 5656 6041 5660 -3 5657 5658 5661 -3 5662 5661 5658 -3 5657 5661 6040 -3 6042 6040 5661 -3 5659 5660 5663 -3 5664 5663 5660 -3 5660 6041 6043 -3 5660 6043 5664 -3 5661 5662 5665 -3 5666 5665 5662 -3 5661 5665 6044 -3 5661 6044 6042 -3 5663 5664 5668 -3 5663 5668 5667 -3 5664 6043 6045 -3 5664 6045 5668 -3 5665 5666 5670 -3 5665 5670 5669 -3 5665 5669 6046 -3 5665 6046 6044 -3 5667 5668 5672 -3 5667 5672 5671 -3 5668 6045 6047 -3 5668 6047 5672 -3 5669 5670 5674 -3 5669 5674 5673 -3 5669 5673 6048 -3 5669 6048 6046 -3 5671 5672 5676 -3 5671 5676 5675 -3 5672 6047 6049 -3 5672 6049 5676 -3 5673 5674 5678 -3 5673 5678 5677 -3 5673 5677 6050 -3 5673 6050 6048 -3 5675 5676 5680 -3 5675 5680 5679 -3 5676 6049 6051 -3 5676 6051 5680 -3 5677 5678 5682 -3 5677 5682 5681 -3 5677 5681 6052 -3 5677 6052 6050 -3 5679 5680 5684 -3 5679 5684 5683 -3 5680 6051 6053 -3 5680 6053 5684 -3 5681 5682 5686 -3 5681 5686 5685 -3 5681 5685 6054 -3 5681 6054 6052 -3 5683 5684 5688 -3 5683 5688 5687 -3 5684 6053 6055 -3 5684 6055 5688 -3 5685 5686 5690 -3 5685 5690 5689 -3 5685 5689 6056 -3 5685 6056 6054 -3 5687 5688 5692 -3 5687 5692 5691 -3 5688 6055 5692 -3 6057 5692 6055 -3 5689 5690 5694 -3 5689 5694 5693 -3 5689 5693 6058 -3 5689 6058 6056 -3 5691 5692 5696 -3 5691 5696 5695 -3 5692 6057 5696 -3 6059 5696 6057 -3 5693 5694 5698 -3 5693 5698 5697 -3 5693 5697 6060 -3 5693 6060 6058 -3 5695 5696 5700 -3 5695 5700 5699 -3 5696 6059 5700 -3 6061 5700 6059 -3 5697 5698 5702 -3 5697 5702 5701 -3 5697 5701 6062 -3 5697 6062 6060 -3 5699 5700 5704 -3 5699 5704 5703 -3 5700 6061 5704 -3 6065 5704 6061 -3 5701 5702 6063 -3 5701 6063 6062 -3 5702 5705 6067 -3 5702 6067 6063 -3 5703 5704 6065 -3 5703 6065 6064 -3 5703 6064 5708 -3 6068 5708 6064 -3 5705 5706 5710 -3 5705 5710 5709 -3 5705 5709 6070 -3 5705 6070 6067 -3 5707 5708 5714 -3 5707 5714 5713 -3 5708 6068 5714 -3 6073 5714 6068 -3 5709 5710 6070 -3 6071 6070 5710 -3 5710 5711 5715 -3 5716 5715 5711 -3 5710 5715 6071 -3 6075 6071 5715 -3 5712 5713 5782 -3 5783 5782 5713 -3 5713 5714 6072 -3 6073 6072 5714 -3 5713 6072 5783 -3 6080 5783 6072 -3 5715 5716 6075 -3 6076 6075 5716 -3 5716 5717 6076 -3 6077 6076 5717 -3 5717 5718 5784 -3 5785 5784 5718 -3 5717 5784 6083 -3 5717 6083 6077 -3 5718 5719 5785 -3 5786 5785 5719 -3 5719 5720 5786 -3 5787 5786 5720 -3 5720 5721 5787 -3 5788 5787 5721 -3 5721 5722 5788 -3 5789 5788 5722 -3 5722 5723 5789 -3 5790 5789 5723 -3 5723 5724 5790 -3 5791 5790 5724 -3 5724 5725 5791 -3 5792 5791 5725 -3 5725 5726 5792 -3 5793 5792 5726 -3 5726 5727 5793 -3 5794 5793 5727 -3 5727 5728 5794 -3 5795 5794 5728 -3 5728 5729 5795 -3 5796 5795 5729 -3 5729 5730 5796 -3 5797 5796 5730 -3 5730 5731 5797 -3 5798 5797 5731 -3 5731 5732 5798 -3 5799 5798 5732 -3 5732 5733 5799 -3 5800 5799 5733 -3 5733 5734 5800 -3 5801 5800 5734 -3 5734 5735 5801 -3 5802 5801 5735 -3 5735 5736 5802 -3 5803 5802 5736 -3 5736 5737 5803 -3 5804 5803 5737 -3 5737 5738 5804 -3 5805 5804 5738 -3 5738 5739 5805 -3 5806 5805 5739 -3 5739 5740 5806 -3 5807 5806 5740 -3 5740 5741 5807 -3 5808 5807 5741 -3 5741 5742 5808 -3 5809 5808 5742 -3 5742 5743 5809 -3 5810 5809 5743 -3 5743 5744 5810 -3 5811 5810 5744 -3 5744 5745 5811 -3 5812 5811 5745 -3 5745 5746 5812 -3 5813 5812 5746 -3 5746 5747 5813 -3 5814 5813 5747 -3 5747 5748 5814 -3 5815 5814 5748 -3 5748 5749 5815 -3 5816 5815 5749 -3 5749 5750 5816 -3 5817 5816 5750 -3 5750 5751 5817 -3 5818 5817 5751 -3 5751 5752 5818 -3 5819 5818 5752 -3 5752 5753 5819 -3 5820 5819 5753 -3 5753 5754 5820 -3 5821 5820 5754 -3 5754 5755 5821 -3 5822 5821 5755 -3 5755 5756 5823 -3 5755 5823 5822 -3 5756 5757 5824 -3 5756 5824 5823 -3 5757 5758 5825 -3 5757 5825 5824 -3 5758 5759 5826 -3 5758 5826 5825 -3 5759 5760 5827 -3 5759 5827 5826 -3 5760 5761 5828 -3 5760 5828 5827 -3 5761 5762 5829 -3 5761 5829 5828 -3 5762 5763 5830 -3 5762 5830 5829 -3 5763 5764 5831 -3 5763 5831 5830 -3 5764 5765 5832 -3 5764 5832 5831 -3 5765 5766 5833 -3 5765 5833 5832 -3 5766 5767 5834 -3 5766 5834 5833 -3 5767 5768 5835 -3 5767 5835 5834 -3 5768 5769 5836 -3 5768 5836 5835 -3 5769 5770 5837 -3 5769 5837 5836 -3 5770 5771 5838 -3 5770 5838 5837 -3 5771 5772 5839 -3 5771 5839 5838 -3 5772 5773 5840 -3 5772 5840 5839 -3 5773 5774 5841 -3 5773 5841 5840 -3 5774 5775 5842 -3 5774 5842 5841 -3 5775 5776 5843 -3 5775 5843 5842 -3 5776 5777 5844 -3 5776 5844 5843 -3 5777 5778 5845 -3 5777 5845 5844 -3 5778 5779 5846 -3 5778 5846 5845 -3 5779 5780 5847 -3 5779 5847 5846 -3 5780 5781 5848 -3 5780 5848 5847 -3 5781 5782 6079 -3 5781 6079 6078 -3 5781 6078 6147 -3 5781 6147 5848 -3 5782 5783 6080 -3 5782 6080 6079 -3 5784 5785 6084 -3 5784 6084 6083 -3 5785 5786 6085 -3 5785 6085 6084 -3 5786 5787 6086 -3 5786 6086 6085 -3 5787 5788 6087 -3 5787 6087 6086 -3 5788 5789 6088 -3 5788 6088 6087 -3 5789 5790 6089 -3 5789 6089 6088 -3 5790 5791 6090 -3 5790 6090 6089 -3 5791 5792 6091 -3 5791 6091 6090 -3 5792 5793 6092 -3 5792 6092 6091 -3 5793 5794 6093 -3 5793 6093 6092 -3 5794 5795 6094 -3 5794 6094 6093 -3 5795 5796 6095 -3 5795 6095 6094 -3 5796 5797 6096 -3 5796 6096 6095 -3 5797 5798 6097 -3 5797 6097 6096 -3 5798 5799 6098 -3 5798 6098 6097 -3 5799 5800 6099 -3 5799 6099 6098 -3 5800 5801 6100 -3 5800 6100 6099 -3 5801 5802 6101 -3 5801 6101 6100 -3 5802 5803 6101 -3 6102 6101 5803 -3 5803 5804 6102 -3 6103 6102 5804 -3 5804 5805 6103 -3 6104 6103 5805 -3 5805 5806 6104 -3 6105 6104 5806 -3 5806 5807 6105 -3 6106 6105 5807 -3 5807 5808 6106 -3 6107 6106 5808 -3 5808 5809 6107 -3 6108 6107 5809 -3 5809 5810 6108 -3 6109 6108 5810 -3 5810 5811 6109 -3 6110 6109 5811 -3 5811 5812 6110 -3 6111 6110 5812 -3 5812 5813 6111 -3 6112 6111 5813 -3 5813 5814 6112 -3 6113 6112 5814 -3 5814 5815 6113 -3 6114 6113 5815 -3 5815 5816 6114 -3 6115 6114 5816 -3 5816 5817 6115 -3 6116 6115 5817 -3 5817 5818 6116 -3 6117 6116 5818 -3 5818 5819 6117 -3 6118 6117 5819 -3 5819 5820 6118 -3 6119 6118 5820 -3 5820 5821 6119 -3 6120 6119 5821 -3 5821 5822 6120 -3 6121 6120 5822 -3 5822 5823 6121 -3 6122 6121 5823 -3 5823 5824 6122 -3 6123 6122 5824 -3 5824 5825 6123 -3 6124 6123 5825 -3 5825 5826 6124 -3 6125 6124 5826 -3 5826 5827 6125 -3 6126 6125 5827 -3 5827 5828 6126 -3 6127 6126 5828 -3 5828 5829 6127 -3 6128 6127 5829 -3 5829 5830 6128 -3 6129 6128 5830 -3 5830 5831 6129 -3 6130 6129 5831 -3 5831 5832 6130 -3 6131 6130 5832 -3 5832 5833 6131 -3 6132 6131 5833 -3 5833 5834 6132 -3 6133 6132 5834 -3 5834 5835 6133 -3 6134 6133 5835 -3 5835 5836 6134 -3 6135 6134 5836 -3 5836 5837 6135 -3 6136 6135 5837 -3 5837 5838 6136 -3 6137 6136 5838 -3 5838 5839 6137 -3 6138 6137 5839 -3 5839 5840 6138 -3 6139 6138 5840 -3 5840 5841 6139 -3 6140 6139 5841 -3 5841 5842 6140 -3 6141 6140 5842 -3 5842 5843 6141 -3 6142 6141 5843 -3 5843 5844 6142 -3 6143 6142 5844 -3 5844 5845 6143 -3 6144 6143 5845 -3 5845 5846 6144 -3 6145 6144 5846 -3 5846 5847 6145 -3 6146 6145 5847 -3 5847 5848 6146 -3 6147 6146 5848 -3 5849 5850 5919 -3 5849 5919 5918 -3 5849 6150 5850 -3 6151 5850 6150 -3 5849 5918 6220 -3 5849 6220 6150 -3 5850 6151 5851 -3 6152 5851 6151 -3 5851 6152 5852 -3 6153 5852 6152 -3 5852 6153 5853 -3 6154 5853 6153 -3 5853 6154 5854 -3 6155 5854 6154 -3 5854 6155 5855 -3 6156 5855 6155 -3 5855 6156 5856 -3 6157 5856 6156 -3 5856 6157 5857 -3 6158 5857 6157 -3 5857 6158 5858 -3 6159 5858 6158 -3 5858 6159 5859 -3 6160 5859 6159 -3 5859 6160 5860 -3 6161 5860 6160 -3 5860 6161 5861 -3 6162 5861 6161 -3 5861 6162 5862 -3 6163 5862 6162 -3 5862 6163 5863 -3 6164 5863 6163 -3 5863 6164 5864 -3 6165 5864 6164 -3 5864 6165 5865 -3 6166 5865 6165 -3 5865 6166 5866 -3 6167 5866 6166 -3 5866 6167 5867 -3 6168 5867 6167 -3 5867 6168 6169 -3 5867 6169 5868 -3 5868 6169 6170 -3 5868 6170 5869 -3 5869 6170 6171 -3 5869 6171 5870 -3 5870 6171 6172 -3 5870 6172 5871 -3 5871 6172 6173 -3 5871 6173 5872 -3 5872 6173 6174 -3 5872 6174 5873 -3 5873 6174 6175 -3 5873 6175 5874 -3 5874 6175 6176 -3 5874 6176 5875 -3 5875 6176 6177 -3 5875 6177 5876 -3 5876 6177 6178 -3 5876 6178 5877 -3 5877 6178 6179 -3 5877 6179 5878 -3 5878 6179 6180 -3 5878 6180 5879 -3 5879 6180 6181 -3 5879 6181 5880 -3 5880 6181 6182 -3 5880 6182 5881 -3 5881 6182 6183 -3 5881 6183 5882 -3 5882 6183 6184 -3 5882 6184 5883 -3 5883 6184 6185 -3 5883 6185 5884 -3 5884 6185 6186 -3 5884 6186 5885 -3 5885 6186 6187 -3 5885 6187 5886 -3 5886 6187 6188 -3 5886 6188 5887 -3 5887 6188 6189 -3 5887 6189 5888 -3 5888 6189 6190 -3 5888 6190 5889 -3 5889 6190 6191 -3 5889 6191 5890 -3 5890 6191 6192 -3 5890 6192 5891 -3 5891 6192 6193 -3 5891 6193 5892 -3 5892 6193 6194 -3 5892 6194 5893 -3 5893 6194 6195 -3 5893 6195 5894 -3 5894 6195 6196 -3 5894 6196 5895 -3 5895 6196 6197 -3 5895 6197 5896 -3 5896 6197 6198 -3 5896 6198 5897 -3 5897 6198 6199 -3 5897 6199 5898 -3 5898 6199 6200 -3 5898 6200 5899 -3 5899 6200 6201 -3 5899 6201 5900 -3 5900 6201 6202 -3 5900 6202 5901 -3 5901 6202 6203 -3 5901 6203 5902 -3 5902 6203 6204 -3 5902 6204 5903 -3 5903 6204 6205 -3 5903 6205 5904 -3 5904 6205 6206 -3 5904 6206 5905 -3 5905 6206 6207 -3 5905 6207 5906 -3 5906 6207 6208 -3 5906 6208 5907 -3 5907 6208 6209 -3 5907 6209 5908 -3 5908 6209 6210 -3 5908 6210 5909 -3 5909 6210 6211 -3 5909 6211 5910 -3 5910 6211 6212 -3 5910 6212 5911 -3 5911 6212 6213 -3 5911 6213 5912 -3 5912 6213 6214 -3 5912 6214 5913 -3 5913 6214 6215 -3 5913 6215 5914 -3 5914 5915 5920 -3 5921 5920 5915 -3 5914 6215 6216 -3 5914 6216 5915 -3 5915 6216 5921 -3 6221 5921 6216 -3 5916 5917 5924 -3 5925 5924 5917 -3 5916 6218 5917 -3 6219 5917 6218 -3 5916 5924 6218 -3 6225 6218 5924 -3 5917 6219 5918 -3 6220 5918 6219 -3 5921 6221 5922 -3 6222 5922 6221 -3 5922 5923 5926 -3 5927 5926 5923 -3 5922 6222 5923 -3 6223 5923 6222 -3 5923 6223 5927 -3 6226 5927 6223 -3 5924 5929 6225 -3 6229 6225 5929 -3 5927 6226 5930 -3 6230 5930 6226 -3 5928 5929 5932 -3 5933 5932 5929 -3 5928 6228 5929 -3 6229 5929 6228 -3 5928 5932 6228 -3 6232 6228 5932 -3 5930 5931 5934 -3 5935 5934 5931 -3 5930 6230 5931 -3 6231 5931 6230 -3 5931 6231 5935 -3 6233 5935 6231 -3 5932 5936 6232 -3 6234 6232 5936 -3 5935 6233 5937 -3 6235 5937 6233 -3 5936 5938 6234 -3 6236 6234 5938 -3 5937 6235 5939 -3 6237 5939 6235 -3 5938 5940 6236 -3 6238 6236 5940 -3 5939 6237 5941 -3 6239 5941 6237 -3 5940 5942 6238 -3 6240 6238 5942 -3 5941 6239 5943 -3 6241 5943 6239 -3 5942 5944 6240 -3 6242 6240 5944 -3 5943 6241 5945 -3 6243 5945 6241 -3 5944 5946 6244 -3 5944 6244 6242 -3 5945 6243 5947 -3 6245 5947 6243 -3 5946 5948 6246 -3 5946 6246 6244 -3 5947 6245 5949 -3 6247 5949 6245 -3 5948 5950 6248 -3 5948 6248 6246 -3 5949 6247 5951 -3 6249 5951 6247 -3 5950 5952 6250 -3 5950 6250 6248 -3 5951 6249 5953 -3 6251 5953 6249 -3 5952 5954 6252 -3 5952 6252 6250 -3 5953 6251 5955 -3 6253 5955 6251 -3 5954 5956 6254 -3 5954 6254 6252 -3 5955 6253 5957 -3 6255 5957 6253 -3 5956 5958 6256 -3 5956 6256 6254 -3 5957 6255 5959 -3 6257 5959 6255 -3 5958 5960 6258 -3 5958 6258 6256 -3 5959 6257 5961 -3 6259 5961 6257 -3 5960 5962 6260 -3 5960 6260 6258 -3 5961 6259 5963 -3 6261 5963 6259 -3 5962 5964 6262 -3 5962 6262 6260 -3 5963 6261 5965 -3 6263 5965 6261 -3 5964 5966 6264 -3 5964 6264 6262 -3 5965 6263 6265 -3 5965 6265 5967 -3 5966 5968 6266 -3 5966 6266 6264 -3 5967 6265 6267 -3 5967 6267 5969 -3 5968 5970 6268 -3 5968 6268 6266 -3 5969 6267 6269 -3 5969 6269 5971 -3 5970 5972 6270 -3 5970 6270 6268 -3 5971 6269 6271 -3 5971 6271 5973 -3 5972 5974 6272 -3 5972 6272 6270 -3 5973 6271 6273 -3 5973 6273 5975 -3 5974 5976 6274 -3 5974 6274 6272 -3 5975 6273 6275 -3 5975 6275 5977 -3 5976 5978 6276 -3 5976 6276 6274 -3 5977 6275 6277 -3 5977 6277 5979 -3 5978 5980 6278 -3 5978 6278 6276 -3 5979 6277 6279 -3 5979 6279 5981 -3 5980 5982 6280 -3 5980 6280 6278 -3 5981 6279 6281 -3 5981 6281 5983 -3 5982 5984 6282 -3 5982 6282 6280 -3 5983 6281 6283 -3 5983 6283 5985 -3 5984 5986 6284 -3 5984 6284 6282 -3 5985 6283 6285 -3 5985 6285 5987 -3 5986 5988 6286 -3 5986 6286 6284 -3 5987 6285 6287 -3 5987 6287 5989 -3 5988 5990 6288 -3 5988 6288 6286 -3 5989 6287 6289 -3 5989 6289 5991 -3 5990 5992 6290 -3 5990 6290 6288 -3 5991 6289 6291 -3 5991 6291 5993 -3 5992 5994 6290 -3 6292 6290 5994 -3 5993 6291 6293 -3 5993 6293 5995 -3 5994 5996 6292 -3 6294 6292 5996 -3 5995 6293 6295 -3 5995 6295 5997 -3 5996 5998 6294 -3 6296 6294 5998 -3 5997 6295 6297 -3 5997 6297 5999 -3 5998 6000 6296 -3 6298 6296 6000 -3 5999 6297 6299 -3 5999 6299 6001 -3 6000 6002 6298 -3 6300 6298 6002 -3 6001 6299 6301 -3 6001 6301 6003 -3 6002 6004 6300 -3 6302 6300 6004 -3 6003 6301 6303 -3 6003 6303 6005 -3 6004 6006 6302 -3 6304 6302 6006 -3 6005 6303 6305 -3 6005 6305 6007 -3 6006 6008 6304 -3 6306 6304 6008 -3 6007 6305 6307 -3 6007 6307 6009 -3 6008 6010 6306 -3 6308 6306 6010 -3 6009 6307 6309 -3 6009 6309 6011 -3 6010 6012 6308 -3 6310 6308 6012 -3 6011 6309 6311 -3 6011 6311 6013 -3 6012 6014 6310 -3 6312 6310 6014 -3 6013 6311 6015 -3 6313 6015 6311 -3 6014 6016 6312 -3 6314 6312 6016 -3 6015 6313 6017 -3 6315 6017 6313 -3 6016 6018 6314 -3 6316 6314 6018 -3 6017 6315 6019 -3 6317 6019 6315 -3 6018 6020 6316 -3 6318 6316 6020 -3 6019 6317 6021 -3 6319 6021 6317 -3 6020 6022 6318 -3 6320 6318 6022 -3 6021 6319 6023 -3 6321 6023 6319 -3 6022 6024 6320 -3 6322 6320 6024 -3 6023 6321 6025 -3 6323 6025 6321 -3 6024 6026 6322 -3 6324 6322 6026 -3 6025 6323 6027 -3 6325 6027 6323 -3 6026 6028 6324 -3 6326 6324 6028 -3 6027 6325 6029 -3 6327 6029 6325 -3 6028 6030 6326 -3 6328 6326 6030 -3 6029 6327 6031 -3 6329 6031 6327 -3 6030 6032 6328 -3 6330 6328 6032 -3 6031 6329 6033 -3 6331 6033 6329 -3 6032 6034 6330 -3 6332 6330 6034 -3 6033 6331 6035 -3 6333 6035 6331 -3 6034 6036 6332 -3 6334 6332 6036 -3 6035 6333 6037 -3 6335 6037 6333 -3 6036 6038 6334 -3 6336 6334 6038 -3 6037 6335 6039 -3 6337 6039 6335 -3 6038 6040 6338 -3 6038 6338 6336 -3 6039 6337 6041 -3 6339 6041 6337 -3 6040 6042 6340 -3 6040 6340 6338 -3 6041 6339 6043 -3 6341 6043 6339 -3 6042 6044 6342 -3 6042 6342 6340 -3 6043 6341 6045 -3 6343 6045 6341 -3 6044 6046 6344 -3 6044 6344 6342 -3 6045 6343 6047 -3 6345 6047 6343 -3 6046 6048 6346 -3 6046 6346 6344 -3 6047 6345 6049 -3 6347 6049 6345 -3 6048 6050 6348 -3 6048 6348 6346 -3 6049 6347 6051 -3 6349 6051 6347 -3 6050 6052 6350 -3 6050 6350 6348 -3 6051 6349 6053 -3 6351 6053 6349 -3 6052 6054 6352 -3 6052 6352 6350 -3 6053 6351 6055 -3 6353 6055 6351 -3 6054 6056 6354 -3 6054 6354 6352 -3 6055 6353 6057 -3 6355 6057 6353 -3 6056 6058 6356 -3 6056 6356 6354 -3 6057 6355 6059 -3 6357 6059 6355 -3 6058 6060 6358 -3 6058 6358 6356 -3 6059 6357 6061 -3 6359 6061 6357 -3 6060 6062 6360 -3 6060 6360 6358 -3 6061 6359 6065 -3 6361 6065 6359 -3 6062 6063 6067 -3 6062 6067 6066 -3 6062 6066 6362 -3 6062 6362 6360 -3 6064 6065 6069 -3 6064 6069 6068 -3 6065 6361 6365 -3 6065 6365 6069 -3 6066 6067 6363 -3 6066 6363 6362 -3 6067 6070 6367 -3 6067 6367 6363 -3 6068 6069 6365 -3 6068 6365 6364 -3 6068 6364 6368 -3 6068 6368 6073 -3 6070 6071 6075 -3 6070 6075 6074 -3 6070 6074 6370 -3 6070 6370 6367 -3 6072 6073 6081 -3 6072 6081 6080 -3 6073 6368 6375 -3 6073 6375 6081 -3 6074 6075 6371 -3 6074 6371 6370 -3 6075 6076 6372 -3 6075 6372 6371 -3 6076 6077 6083 -3 6076 6083 6082 -3 6076 6082 6377 -3 6076 6377 6372 -3 6078 6079 6148 -3 6078 6148 6147 -3 6079 6080 6374 -3 6079 6374 6373 -3 6079 6373 6443 -3 6079 6443 6148 -3 6080 6081 6375 -3 6080 6375 6374 -3 6082 6083 6378 -3 6082 6378 6377 -3 6083 6084 6379 -3 6083 6379 6378 -3 6084 6085 6380 -3 6084 6380 6379 -3 6085 6086 6381 -3 6085 6381 6380 -3 6086 6087 6382 -3 6086 6382 6381 -3 6087 6088 6382 -3 6383 6382 6088 -3 6088 6089 6383 -3 6384 6383 6089 -3 6089 6090 6384 -3 6385 6384 6090 -3 6090 6091 6385 -3 6386 6385 6091 -3 6091 6092 6386 -3 6387 6386 6092 -3 6092 6093 6387 -3 6388 6387 6093 -3 6093 6094 6388 -3 6389 6388 6094 -3 6094 6095 6389 -3 6390 6389 6095 -3 6095 6096 6390 -3 6391 6390 6096 -3 6096 6097 6391 -3 6392 6391 6097 -3 6097 6098 6392 -3 6393 6392 6098 -3 6098 6099 6393 -3 6394 6393 6099 -3 6099 6100 6394 -3 6395 6394 6100 -3 6100 6101 6395 -3 6396 6395 6101 -3 6101 6102 6396 -3 6397 6396 6102 -3 6102 6103 6397 -3 6398 6397 6103 -3 6103 6104 6398 -3 6399 6398 6104 -3 6104 6105 6399 -3 6400 6399 6105 -3 6105 6106 6400 -3 6401 6400 6106 -3 6106 6107 6401 -3 6402 6401 6107 -3 6107 6108 6402 -3 6403 6402 6108 -3 6108 6109 6403 -3 6404 6403 6109 -3 6109 6110 6404 -3 6405 6404 6110 -3 6110 6111 6405 -3 6406 6405 6111 -3 6111 6112 6406 -3 6407 6406 6112 -3 6112 6113 6407 -3 6408 6407 6113 -3 6113 6114 6408 -3 6409 6408 6114 -3 6114 6115 6409 -3 6410 6409 6115 -3 6115 6116 6410 -3 6411 6410 6116 -3 6116 6117 6411 -3 6412 6411 6117 -3 6117 6118 6412 -3 6413 6412 6118 -3 6118 6119 6413 -3 6414 6413 6119 -3 6119 6120 6414 -3 6415 6414 6120 -3 6120 6121 6415 -3 6416 6415 6121 -3 6121 6122 6416 -3 6417 6416 6122 -3 6122 6123 6417 -3 6418 6417 6123 -3 6123 6124 6418 -3 6419 6418 6124 -3 6124 6125 6419 -3 6420 6419 6125 -3 6125 6126 6420 -3 6421 6420 6126 -3 6126 6127 6421 -3 6422 6421 6127 -3 6127 6128 6422 -3 6423 6422 6128 -3 6128 6129 6423 -3 6424 6423 6129 -3 6129 6130 6424 -3 6425 6424 6130 -3 6130 6131 6425 -3 6426 6425 6131 -3 6131 6132 6426 -3 6427 6426 6132 -3 6132 6133 6427 -3 6428 6427 6133 -3 6133 6134 6428 -3 6429 6428 6134 -3 6134 6135 6429 -3 6430 6429 6135 -3 6135 6136 6431 -3 6135 6431 6430 -3 6136 6137 6432 -3 6136 6432 6431 -3 6137 6138 6433 -3 6137 6433 6432 -3 6138 6139 6434 -3 6138 6434 6433 -3 6139 6140 6435 -3 6139 6435 6434 -3 6140 6141 6436 -3 6140 6436 6435 -3 6141 6142 6437 -3 6141 6437 6436 -3 6142 6143 6438 -3 6142 6438 6437 -3 6143 6144 6439 -3 6143 6439 6438 -3 6144 6145 6440 -3 6144 6440 6439 -3 6145 6146 6441 -3 6145 6441 6440 -3 6146 6147 6442 -3 6146 6442 6441 -3 6147 6148 6443 -3 6147 6443 6442 -3 6149 6150 6220 -3 6149 6220 6219 -3 6149 6445 6150 -3 6446 6150 6445 -3 6149 6219 6445 -3 6515 6445 6219 -3 6150 6446 6151 -3 6447 6151 6446 -3 6151 6447 6152 -3 6448 6152 6447 -3 6152 6448 6153 -3 6449 6153 6448 -3 6153 6449 6154 -3 6450 6154 6449 -3 6154 6450 6155 -3 6451 6155 6450 -3 6155 6451 6156 -3 6452 6156 6451 -3 6156 6452 6157 -3 6453 6157 6452 -3 6157 6453 6158 -3 6454 6158 6453 -3 6158 6454 6159 -3 6455 6159 6454 -3 6159 6455 6160 -3 6456 6160 6455 -3 6160 6456 6457 -3 6160 6457 6161 -3 6161 6457 6458 -3 6161 6458 6162 -3 6162 6458 6459 -3 6162 6459 6163 -3 6163 6459 6460 -3 6163 6460 6164 -3 6164 6460 6461 -3 6164 6461 6165 -3 6165 6461 6462 -3 6165 6462 6166 -3 6166 6462 6463 -3 6166 6463 6167 -3 6167 6463 6464 -3 6167 6464 6168 -3 6168 6464 6465 -3 6168 6465 6169 -3 6169 6465 6466 -3 6169 6466 6170 -3 6170 6466 6467 -3 6170 6467 6171 -3 6171 6467 6468 -3 6171 6468 6172 -3 6172 6468 6469 -3 6172 6469 6173 -3 6173 6469 6470 -3 6173 6470 6174 -3 6174 6470 6471 -3 6174 6471 6175 -3 6175 6471 6472 -3 6175 6472 6176 -3 6176 6472 6473 -3 6176 6473 6177 -3 6177 6473 6474 -3 6177 6474 6178 -3 6178 6474 6475 -3 6178 6475 6179 -3 6179 6475 6476 -3 6179 6476 6180 -3 6180 6476 6477 -3 6180 6477 6181 -3 6181 6477 6478 -3 6181 6478 6182 -3 6182 6478 6479 -3 6182 6479 6183 -3 6183 6479 6480 -3 6183 6480 6184 -3 6184 6480 6481 -3 6184 6481 6185 -3 6185 6481 6482 -3 6185 6482 6186 -3 6186 6482 6483 -3 6186 6483 6187 -3 6187 6483 6484 -3 6187 6484 6188 -3 6188 6484 6485 -3 6188 6485 6189 -3 6189 6485 6486 -3 6189 6486 6190 -3 6190 6486 6487 -3 6190 6487 6191 -3 6191 6487 6488 -3 6191 6488 6192 -3 6192 6488 6489 -3 6192 6489 6193 -3 6193 6489 6490 -3 6193 6490 6194 -3 6194 6490 6491 -3 6194 6491 6195 -3 6195 6491 6492 -3 6195 6492 6196 -3 6196 6492 6493 -3 6196 6493 6197 -3 6197 6493 6494 -3 6197 6494 6198 -3 6198 6494 6495 -3 6198 6495 6199 -3 6199 6495 6496 -3 6199 6496 6200 -3 6200 6496 6497 -3 6200 6497 6201 -3 6201 6497 6498 -3 6201 6498 6202 -3 6202 6498 6499 -3 6202 6499 6203 -3 6203 6499 6500 -3 6203 6500 6204 -3 6204 6500 6501 -3 6204 6501 6205 -3 6205 6501 6502 -3 6205 6502 6206 -3 6206 6502 6503 -3 6206 6503 6207 -3 6207 6503 6504 -3 6207 6504 6208 -3 6208 6504 6505 -3 6208 6505 6209 -3 6209 6505 6210 -3 6506 6210 6505 -3 6210 6506 6211 -3 6507 6211 6506 -3 6211 6507 6212 -3 6508 6212 6507 -3 6212 6508 6213 -3 6509 6213 6508 -3 6213 6509 6214 -3 6510 6214 6509 -3 6214 6510 6215 -3 6511 6215 6510 -3 6215 6511 6216 -3 6512 6216 6511 -3 6216 6217 6221 -3 6222 6221 6217 -3 6216 6512 6217 -3 6513 6217 6512 -3 6217 6513 6222 -3 6516 6222 6513 -3 6218 6514 6219 -3 6515 6219 6514 -3 6218 6225 6514 -3 6519 6514 6225 -3 6222 6516 6223 -3 6517 6223 6516 -3 6223 6517 6226 -3 6520 6226 6517 -3 6224 6225 6228 -3 6229 6228 6225 -3 6224 6518 6225 -3 6519 6225 6518 -3 6224 6228 6518 -3 6522 6518 6228 -3 6226 6227 6230 -3 6231 6230 6227 -3 6226 6520 6227 -3 6521 6227 6520 -3 6227 6521 6231 -3 6523 6231 6521 -3 6228 6232 6522 -3 6524 6522 6232 -3 6231 6523 6233 -3 6525 6233 6523 -3 6232 6234 6526 -3 6232 6526 6524 -3 6233 6525 6235 -3 6527 6235 6525 -3 6234 6236 6528 -3 6234 6528 6526 -3 6235 6527 6237 -3 6529 6237 6527 -3 6236 6238 6530 -3 6236 6530 6528 -3 6237 6529 6239 -3 6531 6239 6529 -3 6238 6240 6532 -3 6238 6532 6530 -3 6239 6531 6241 -3 6533 6241 6531 -3 6240 6242 6534 -3 6240 6534 6532 -3 6241 6533 6243 -3 6535 6243 6533 -3 6242 6244 6536 -3 6242 6536 6534 -3 6243 6535 6245 -3 6537 6245 6535 -3 6244 6246 6538 -3 6244 6538 6536 -3 6245 6537 6247 -3 6539 6247 6537 -3 6246 6248 6540 -3 6246 6540 6538 -3 6247 6539 6249 -3 6541 6249 6539 -3 6248 6250 6542 -3 6248 6542 6540 -3 6249 6541 6251 -3 6543 6251 6541 -3 6250 6252 6544 -3 6250 6544 6542 -3 6251 6543 6253 -3 6545 6253 6543 -3 6252 6254 6546 -3 6252 6546 6544 -3 6253 6545 6255 -3 6547 6255 6545 -3 6254 6256 6548 -3 6254 6548 6546 -3 6255 6547 6257 -3 6549 6257 6547 -3 6256 6258 6550 -3 6256 6550 6548 -3 6257 6549 6259 -3 6551 6259 6549 -3 6258 6260 6552 -3 6258 6552 6550 -3 6259 6551 6261 -3 6553 6261 6551 -3 6260 6262 6554 -3 6260 6554 6552 -3 6261 6553 6555 -3 6261 6555 6263 -3 6262 6264 6556 -3 6262 6556 6554 -3 6263 6555 6557 -3 6263 6557 6265 -3 6264 6266 6558 -3 6264 6558 6556 -3 6265 6557 6559 -3 6265 6559 6267 -3 6266 6268 6560 -3 6266 6560 6558 -3 6267 6559 6561 -3 6267 6561 6269 -3 6268 6270 6562 -3 6268 6562 6560 -3 6269 6561 6563 -3 6269 6563 6271 -3 6270 6272 6564 -3 6270 6564 6562 -3 6271 6563 6565 -3 6271 6565 6273 -3 6272 6274 6566 -3 6272 6566 6564 -3 6273 6565 6567 -3 6273 6567 6275 -3 6274 6276 6568 -3 6274 6568 6566 -3 6275 6567 6569 -3 6275 6569 6277 -3 6276 6278 6570 -3 6276 6570 6568 -3 6277 6569 6571 -3 6277 6571 6279 -3 6278 6280 6572 -3 6278 6572 6570 -3 6279 6571 6573 -3 6279 6573 6281 -3 6280 6282 6572 -3 6574 6572 6282 -3 6281 6573 6575 -3 6281 6575 6283 -3 6282 6284 6574 -3 6576 6574 6284 -3 6283 6575 6577 -3 6283 6577 6285 -3 6284 6286 6576 -3 6578 6576 6286 -3 6285 6577 6579 -3 6285 6579 6287 -3 6286 6288 6578 -3 6580 6578 6288 -3 6287 6579 6581 -3 6287 6581 6289 -3 6288 6290 6580 -3 6582 6580 6290 -3 6289 6581 6583 -3 6289 6583 6291 -3 6290 6292 6582 -3 6584 6582 6292 -3 6291 6583 6585 -3 6291 6585 6293 -3 6292 6294 6584 -3 6586 6584 6294 -3 6293 6585 6587 -3 6293 6587 6295 -3 6294 6296 6586 -3 6588 6586 6296 -3 6295 6587 6589 -3 6295 6589 6297 -3 6296 6298 6588 -3 6590 6588 6298 -3 6297 6589 6591 -3 6297 6591 6299 -3 6298 6300 6590 -3 6592 6590 6300 -3 6299 6591 6593 -3 6299 6593 6301 -3 6300 6302 6592 -3 6594 6592 6302 -3 6301 6593 6595 -3 6301 6595 6303 -3 6302 6304 6594 -3 6596 6594 6304 -3 6303 6595 6597 -3 6303 6597 6305 -3 6304 6306 6596 -3 6598 6596 6306 -3 6305 6597 6599 -3 6305 6599 6307 -3 6306 6308 6598 -3 6600 6598 6308 -3 6307 6599 6601 -3 6307 6601 6309 -3 6308 6310 6600 -3 6602 6600 6310 -3 6309 6601 6603 -3 6309 6603 6311 -3 6310 6312 6602 -3 6604 6602 6312 -3 6311 6603 6313 -3 6605 6313 6603 -3 6312 6314 6604 -3 6606 6604 6314 -3 6313 6605 6315 -3 6607 6315 6605 -3 6314 6316 6606 -3 6608 6606 6316 -3 6315 6607 6317 -3 6609 6317 6607 -3 6316 6318 6608 -3 6610 6608 6318 -3 6317 6609 6319 -3 6611 6319 6609 -3 6318 6320 6610 -3 6612 6610 6320 -3 6319 6611 6321 -3 6613 6321 6611 -3 6320 6322 6612 -3 6614 6612 6322 -3 6321 6613 6323 -3 6615 6323 6613 -3 6322 6324 6614 -3 6616 6614 6324 -3 6323 6615 6325 -3 6617 6325 6615 -3 6324 6326 6616 -3 6618 6616 6326 -3 6325 6617 6327 -3 6619 6327 6617 -3 6326 6328 6618 -3 6620 6618 6328 -3 6327 6619 6329 -3 6621 6329 6619 -3 6328 6330 6620 -3 6622 6620 6330 -3 6329 6621 6331 -3 6623 6331 6621 -3 6330 6332 6624 -3 6330 6624 6622 -3 6331 6623 6333 -3 6625 6333 6623 -3 6332 6334 6626 -3 6332 6626 6624 -3 6333 6625 6335 -3 6627 6335 6625 -3 6334 6336 6628 -3 6334 6628 6626 -3 6335 6627 6337 -3 6629 6337 6627 -3 6336 6338 6630 -3 6336 6630 6628 -3 6337 6629 6339 -3 6631 6339 6629 -3 6338 6340 6632 -3 6338 6632 6630 -3 6339 6631 6341 -3 6633 6341 6631 -3 6340 6342 6634 -3 6340 6634 6632 -3 6341 6633 6343 -3 6635 6343 6633 -3 6342 6344 6636 -3 6342 6636 6634 -3 6343 6635 6345 -3 6637 6345 6635 -3 6344 6346 6638 -3 6344 6638 6636 -3 6345 6637 6347 -3 6639 6347 6637 -3 6346 6348 6640 -3 6346 6640 6638 -3 6347 6639 6349 -3 6641 6349 6639 -3 6348 6350 6642 -3 6348 6642 6640 -3 6349 6641 6351 -3 6643 6351 6641 -3 6350 6352 6644 -3 6350 6644 6642 -3 6351 6643 6353 -3 6645 6353 6643 -3 6352 6354 6646 -3 6352 6646 6644 -3 6353 6645 6355 -3 6647 6355 6645 -3 6354 6356 6648 -3 6354 6648 6646 -3 6355 6647 6357 -3 6649 6357 6647 -3 6356 6358 6650 -3 6356 6650 6648 -3 6357 6649 6359 -3 6651 6359 6649 -3 6358 6360 6652 -3 6358 6652 6650 -3 6359 6651 6361 -3 6653 6361 6651 -3 6360 6362 6654 -3 6360 6654 6652 -3 6361 6653 6655 -3 6361 6655 6365 -3 6362 6363 6367 -3 6362 6367 6366 -3 6362 6366 6656 -3 6362 6656 6654 -3 6364 6365 6369 -3 6364 6369 6368 -3 6365 6655 6659 -3 6365 6659 6369 -3 6366 6367 6657 -3 6366 6657 6656 -3 6367 6370 6660 -3 6367 6660 6657 -3 6368 6369 6659 -3 6368 6659 6658 -3 6368 6658 6663 -3 6368 6663 6375 -3 6370 6371 6661 -3 6370 6661 6660 -3 6371 6372 6377 -3 6371 6377 6376 -3 6371 6376 6664 -3 6371 6664 6661 -3 6373 6374 6444 -3 6373 6444 6443 -3 6374 6375 6663 -3 6374 6663 6662 -3 6374 6662 6732 -3 6374 6732 6444 -3 6376 6377 6665 -3 6376 6665 6664 -3 6377 6378 6666 -3 6377 6666 6665 -3 6378 6379 6666 -3 6667 6666 6379 -3 6379 6380 6667 -3 6668 6667 6380 -3 6380 6381 6668 -3 6669 6668 6381 -3 6381 6382 6669 -3 6670 6669 6382 -3 6382 6383 6670 -3 6671 6670 6383 -3 6383 6384 6671 -3 6672 6671 6384 -3 6384 6385 6672 -3 6673 6672 6385 -3 6385 6386 6673 -3 6674 6673 6386 -3 6386 6387 6674 -3 6675 6674 6387 -3 6387 6388 6675 -3 6676 6675 6388 -3 6388 6389 6676 -3 6677 6676 6389 -3 6389 6390 6677 -3 6678 6677 6390 -3 6390 6391 6678 -3 6679 6678 6391 -3 6391 6392 6679 -3 6680 6679 6392 -3 6392 6393 6680 -3 6681 6680 6393 -3 6393 6394 6681 -3 6682 6681 6394 -3 6394 6395 6682 -3 6683 6682 6395 -3 6395 6396 6683 -3 6684 6683 6396 -3 6396 6397 6684 -3 6685 6684 6397 -3 6397 6398 6685 -3 6686 6685 6398 -3 6398 6399 6686 -3 6687 6686 6399 -3 6399 6400 6687 -3 6688 6687 6400 -3 6400 6401 6688 -3 6689 6688 6401 -3 6401 6402 6689 -3 6690 6689 6402 -3 6402 6403 6690 -3 6691 6690 6403 -3 6403 6404 6691 -3 6692 6691 6404 -3 6404 6405 6692 -3 6693 6692 6405 -3 6405 6406 6693 -3 6694 6693 6406 -3 6406 6407 6694 -3 6695 6694 6407 -3 6407 6408 6695 -3 6696 6695 6408 -3 6408 6409 6696 -3 6697 6696 6409 -3 6409 6410 6697 -3 6698 6697 6410 -3 6410 6411 6698 -3 6699 6698 6411 -3 6411 6412 6699 -3 6700 6699 6412 -3 6412 6413 6700 -3 6701 6700 6413 -3 6413 6414 6701 -3 6702 6701 6414 -3 6414 6415 6702 -3 6703 6702 6415 -3 6415 6416 6703 -3 6704 6703 6416 -3 6416 6417 6704 -3 6705 6704 6417 -3 6417 6418 6705 -3 6706 6705 6418 -3 6418 6419 6706 -3 6707 6706 6419 -3 6419 6420 6707 -3 6708 6707 6420 -3 6420 6421 6708 -3 6709 6708 6421 -3 6421 6422 6709 -3 6710 6709 6422 -3 6422 6423 6710 -3 6711 6710 6423 -3 6423 6424 6711 -3 6712 6711 6424 -3 6424 6425 6712 -3 6713 6712 6425 -3 6425 6426 6713 -3 6714 6713 6426 -3 6426 6427 6714 -3 6715 6714 6427 -3 6427 6428 6715 -3 6716 6715 6428 -3 6428 6429 6717 -3 6428 6717 6716 -3 6429 6430 6718 -3 6429 6718 6717 -3 6430 6431 6719 -3 6430 6719 6718 -3 6431 6432 6720 -3 6431 6720 6719 -3 6432 6433 6721 -3 6432 6721 6720 -3 6433 6434 6722 -3 6433 6722 6721 -3 6434 6435 6723 -3 6434 6723 6722 -3 6435 6436 6724 -3 6435 6724 6723 -3 6436 6437 6725 -3 6436 6725 6724 -3 6437 6438 6726 -3 6437 6726 6725 -3 6438 6439 6727 -3 6438 6727 6726 -3 6439 6440 6728 -3 6439 6728 6727 -3 6440 6441 6729 -3 6440 6729 6728 -3 6441 6442 6730 -3 6441 6730 6729 -3 6442 6443 6731 -3 6442 6731 6730 -3 6443 6444 6732 -3 6443 6732 6731 -3 6445 6733 6446 -3 6734 6446 6733 -3 6445 6515 6733 -3 6803 6733 6515 -3 6446 6734 6447 -3 6735 6447 6734 -3 6447 6735 6448 -3 6736 6448 6735 -3 6448 6736 6449 -3 6737 6449 6736 -3 6449 6737 6450 -3 6738 6450 6737 -3 6450 6738 6451 -3 6739 6451 6738 -3 6451 6739 6452 -3 6740 6452 6739 -3 6452 6740 6453 -3 6741 6453 6740 -3 6453 6741 6454 -3 6742 6454 6741 -3 6454 6742 6455 -3 6743 6455 6742 -3 6455 6743 6456 -3 6744 6456 6743 -3 6456 6744 6457 -3 6745 6457 6744 -3 6457 6745 6458 -3 6746 6458 6745 -3 6458 6746 6459 -3 6747 6459 6746 -3 6459 6747 6460 -3 6748 6460 6747 -3 6460 6748 6461 -3 6749 6461 6748 -3 6461 6749 6462 -3 6750 6462 6749 -3 6462 6750 6751 -3 6462 6751 6463 -3 6463 6751 6752 -3 6463 6752 6464 -3 6464 6752 6753 -3 6464 6753 6465 -3 6465 6753 6754 -3 6465 6754 6466 -3 6466 6754 6755 -3 6466 6755 6467 -3 6467 6755 6756 -3 6467 6756 6468 -3 6468 6756 6757 -3 6468 6757 6469 -3 6469 6757 6758 -3 6469 6758 6470 -3 6470 6758 6759 -3 6470 6759 6471 -3 6471 6759 6760 -3 6471 6760 6472 -3 6472 6760 6761 -3 6472 6761 6473 -3 6473 6761 6762 -3 6473 6762 6474 -3 6474 6762 6763 -3 6474 6763 6475 -3 6475 6763 6764 -3 6475 6764 6476 -3 6476 6764 6765 -3 6476 6765 6477 -3 6477 6765 6766 -3 6477 6766 6478 -3 6478 6766 6767 -3 6478 6767 6479 -3 6479 6767 6768 -3 6479 6768 6480 -3 6480 6768 6769 -3 6480 6769 6481 -3 6481 6769 6770 -3 6481 6770 6482 -3 6482 6770 6771 -3 6482 6771 6483 -3 6483 6771 6772 -3 6483 6772 6484 -3 6484 6772 6773 -3 6484 6773 6485 -3 6485 6773 6774 -3 6485 6774 6486 -3 6486 6774 6775 -3 6486 6775 6487 -3 6487 6775 6776 -3 6487 6776 6488 -3 6488 6776 6777 -3 6488 6777 6489 -3 6489 6777 6778 -3 6489 6778 6490 -3 6490 6778 6779 -3 6490 6779 6491 -3 6491 6779 6780 -3 6491 6780 6492 -3 6492 6780 6781 -3 6492 6781 6493 -3 6493 6781 6782 -3 6493 6782 6494 -3 6494 6782 6783 -3 6494 6783 6495 -3 6495 6783 6784 -3 6495 6784 6496 -3 6496 6784 6785 -3 6496 6785 6497 -3 6497 6785 6786 -3 6497 6786 6498 -3 6498 6786 6787 -3 6498 6787 6499 -3 6499 6787 6788 -3 6499 6788 6500 -3 6500 6788 6789 -3 6500 6789 6501 -3 6501 6789 6790 -3 6501 6790 6502 -3 6502 6790 6791 -3 6502 6791 6503 -3 6503 6791 6792 -3 6503 6792 6504 -3 6504 6792 6793 -3 6504 6793 6505 -3 6505 6793 6794 -3 6505 6794 6506 -3 6506 6794 6795 -3 6506 6795 6507 -3 6507 6795 6796 -3 6507 6796 6508 -3 6508 6796 6797 -3 6508 6797 6509 -3 6509 6797 6798 -3 6509 6798 6510 -3 6510 6798 6799 -3 6510 6799 6511 -3 6511 6799 6800 -3 6511 6800 6512 -3 6512 6800 6513 -3 6801 6513 6800 -3 6513 6801 6516 -3 6804 6516 6801 -3 6514 6802 6515 -3 6803 6515 6802 -3 6514 6519 6802 -3 6807 6802 6519 -3 6516 6804 6517 -3 6805 6517 6804 -3 6517 6805 6520 -3 6808 6520 6805 -3 6518 6806 6519 -3 6807 6519 6806 -3 6518 6522 6806 -3 6810 6806 6522 -3 6520 6808 6521 -3 6809 6521 6808 -3 6521 6809 6523 -3 6811 6523 6809 -3 6522 6524 6810 -3 6812 6810 6524 -3 6523 6811 6525 -3 6813 6525 6811 -3 6524 6526 6812 -3 6814 6812 6526 -3 6525 6813 6527 -3 6815 6527 6813 -3 6526 6528 6816 -3 6526 6816 6814 -3 6527 6815 6529 -3 6817 6529 6815 -3 6528 6530 6818 -3 6528 6818 6816 -3 6529 6817 6531 -3 6819 6531 6817 -3 6530 6532 6820 -3 6530 6820 6818 -3 6531 6819 6533 -3 6821 6533 6819 -3 6532 6534 6822 -3 6532 6822 6820 -3 6533 6821 6535 -3 6823 6535 6821 -3 6534 6536 6824 -3 6534 6824 6822 -3 6535 6823 6537 -3 6825 6537 6823 -3 6536 6538 6826 -3 6536 6826 6824 -3 6537 6825 6539 -3 6827 6539 6825 -3 6538 6540 6828 -3 6538 6828 6826 -3 6539 6827 6541 -3 6829 6541 6827 -3 6540 6542 6830 -3 6540 6830 6828 -3 6541 6829 6543 -3 6831 6543 6829 -3 6542 6544 6832 -3 6542 6832 6830 -3 6543 6831 6545 -3 6833 6545 6831 -3 6544 6546 6834 -3 6544 6834 6832 -3 6545 6833 6547 -3 6835 6547 6833 -3 6546 6548 6836 -3 6546 6836 6834 -3 6547 6835 6549 -3 6837 6549 6835 -3 6548 6550 6838 -3 6548 6838 6836 -3 6549 6837 6551 -3 6839 6551 6837 -3 6550 6552 6840 -3 6550 6840 6838 -3 6551 6839 6553 -3 6841 6553 6839 -3 6552 6554 6842 -3 6552 6842 6840 -3 6553 6841 6555 -3 6843 6555 6841 -3 6554 6556 6844 -3 6554 6844 6842 -3 6555 6843 6557 -3 6845 6557 6843 -3 6556 6558 6846 -3 6556 6846 6844 -3 6557 6845 6559 -3 6847 6559 6845 -3 6558 6560 6848 -3 6558 6848 6846 -3 6559 6847 6561 -3 6849 6561 6847 -3 6560 6562 6850 -3 6560 6850 6848 -3 6561 6849 6563 -3 6851 6563 6849 -3 6562 6564 6852 -3 6562 6852 6850 -3 6563 6851 6853 -3 6563 6853 6565 -3 6564 6566 6854 -3 6564 6854 6852 -3 6565 6853 6855 -3 6565 6855 6567 -3 6566 6568 6856 -3 6566 6856 6854 -3 6567 6855 6857 -3 6567 6857 6569 -3 6568 6570 6858 -3 6568 6858 6856 -3 6569 6857 6859 -3 6569 6859 6571 -3 6570 6572 6860 -3 6570 6860 6858 -3 6571 6859 6861 -3 6571 6861 6573 -3 6572 6574 6862 -3 6572 6862 6860 -3 6573 6861 6863 -3 6573 6863 6575 -3 6574 6576 6864 -3 6574 6864 6862 -3 6575 6863 6865 -3 6575 6865 6577 -3 6576 6578 6864 -3 6866 6864 6578 -3 6577 6865 6867 -3 6577 6867 6579 -3 6578 6580 6866 -3 6868 6866 6580 -3 6579 6867 6869 -3 6579 6869 6581 -3 6580 6582 6868 -3 6870 6868 6582 -3 6581 6869 6871 -3 6581 6871 6583 -3 6582 6584 6870 -3 6872 6870 6584 -3 6583 6871 6873 -3 6583 6873 6585 -3 6584 6586 6872 -3 6874 6872 6586 -3 6585 6873 6875 -3 6585 6875 6587 -3 6586 6588 6874 -3 6876 6874 6588 -3 6587 6875 6877 -3 6587 6877 6589 -3 6588 6590 6876 -3 6878 6876 6590 -3 6589 6877 6879 -3 6589 6879 6591 -3 6590 6592 6878 -3 6880 6878 6592 -3 6591 6879 6881 -3 6591 6881 6593 -3 6592 6594 6880 -3 6882 6880 6594 -3 6593 6881 6883 -3 6593 6883 6595 -3 6594 6596 6882 -3 6884 6882 6596 -3 6595 6883 6885 -3 6595 6885 6597 -3 6596 6598 6884 -3 6886 6884 6598 -3 6597 6885 6887 -3 6597 6887 6599 -3 6598 6600 6886 -3 6888 6886 6600 -3 6599 6887 6889 -3 6599 6889 6601 -3 6600 6602 6888 -3 6890 6888 6602 -3 6601 6889 6891 -3 6601 6891 6603 -3 6602 6604 6890 -3 6892 6890 6604 -3 6603 6891 6893 -3 6603 6893 6605 -3 6604 6606 6892 -3 6894 6892 6606 -3 6605 6893 6895 -3 6605 6895 6607 -3 6606 6608 6894 -3 6896 6894 6608 -3 6607 6895 6897 -3 6607 6897 6609 -3 6608 6610 6896 -3 6898 6896 6610 -3 6609 6897 6899 -3 6609 6899 6611 -3 6610 6612 6898 -3 6900 6898 6612 -3 6611 6899 6901 -3 6611 6901 6613 -3 6612 6614 6900 -3 6902 6900 6614 -3 6613 6901 6615 -3 6903 6615 6901 -3 6614 6616 6902 -3 6904 6902 6616 -3 6615 6903 6617 -3 6905 6617 6903 -3 6616 6618 6904 -3 6906 6904 6618 -3 6617 6905 6619 -3 6907 6619 6905 -3 6618 6620 6906 -3 6908 6906 6620 -3 6619 6907 6621 -3 6909 6621 6907 -3 6620 6622 6908 -3 6910 6908 6622 -3 6621 6909 6623 -3 6911 6623 6909 -3 6622 6624 6910 -3 6912 6910 6624 -3 6623 6911 6625 -3 6913 6625 6911 -3 6624 6626 6912 -3 6914 6912 6626 -3 6625 6913 6627 -3 6915 6627 6913 -3 6626 6628 6916 -3 6626 6916 6914 -3 6627 6915 6629 -3 6917 6629 6915 -3 6628 6630 6918 -3 6628 6918 6916 -3 6629 6917 6631 -3 6919 6631 6917 -3 6630 6632 6920 -3 6630 6920 6918 -3 6631 6919 6633 -3 6921 6633 6919 -3 6632 6634 6922 -3 6632 6922 6920 -3 6633 6921 6635 -3 6923 6635 6921 -3 6634 6636 6924 -3 6634 6924 6922 -3 6635 6923 6637 -3 6925 6637 6923 -3 6636 6638 6926 -3 6636 6926 6924 -3 6637 6925 6639 -3 6927 6639 6925 -3 6638 6640 6928 -3 6638 6928 6926 -3 6639 6927 6641 -3 6929 6641 6927 -3 6640 6642 6930 -3 6640 6930 6928 -3 6641 6929 6643 -3 6931 6643 6929 -3 6642 6644 6932 -3 6642 6932 6930 -3 6643 6931 6645 -3 6933 6645 6931 -3 6644 6646 6934 -3 6644 6934 6932 -3 6645 6933 6647 -3 6935 6647 6933 -3 6646 6648 6936 -3 6646 6936 6934 -3 6647 6935 6649 -3 6937 6649 6935 -3 6648 6650 6938 -3 6648 6938 6936 -3 6649 6937 6651 -3 6939 6651 6937 -3 6650 6652 6940 -3 6650 6940 6938 -3 6651 6939 6653 -3 6941 6653 6939 -3 6652 6654 6942 -3 6652 6942 6940 -3 6653 6941 6655 -3 6943 6655 6941 -3 6654 6656 6944 -3 6654 6944 6942 -3 6655 6943 6659 -3 6947 6659 6943 -3 6656 6657 6945 -3 6656 6945 6944 -3 6657 6660 6948 -3 6657 6948 6945 -3 6658 6659 6947 -3 6658 6947 6946 -3 6658 6946 6663 -3 6951 6663 6946 -3 6660 6661 6949 -3 6660 6949 6948 -3 6661 6664 6952 -3 6661 6952 6949 -3 6662 6663 6951 -3 6662 6951 6950 -3 6662 6950 6732 -3 7020 6732 6950 -3 6664 6665 6953 -3 6664 6953 6952 -3 6665 6666 6954 -3 6665 6954 6953 -3 6666 6667 6955 -3 6666 6955 6954 -3 6667 6668 6956 -3 6667 6956 6955 -3 6668 6669 6957 -3 6668 6957 6956 -3 6669 6670 6958 -3 6669 6958 6957 -3 6670 6671 6959 -3 6670 6959 6958 -3 6671 6672 6960 -3 6671 6960 6959 -3 6672 6673 6961 -3 6672 6961 6960 -3 6673 6674 6962 -3 6673 6962 6961 -3 6674 6675 6963 -3 6674 6963 6962 -3 6675 6676 6964 -3 6675 6964 6963 -3 6676 6677 6965 -3 6676 6965 6964 -3 6677 6678 6965 -3 6966 6965 6678 -3 6678 6679 6966 -3 6967 6966 6679 -3 6679 6680 6967 -3 6968 6967 6680 -3 6680 6681 6968 -3 6969 6968 6681 -3 6681 6682 6969 -3 6970 6969 6682 -3 6682 6683 6970 -3 6971 6970 6683 -3 6683 6684 6971 -3 6972 6971 6684 -3 6684 6685 6972 -3 6973 6972 6685 -3 6685 6686 6973 -3 6974 6973 6686 -3 6686 6687 6974 -3 6975 6974 6687 -3 6687 6688 6975 -3 6976 6975 6688 -3 6688 6689 6976 -3 6977 6976 6689 -3 6689 6690 6977 -3 6978 6977 6690 -3 6690 6691 6978 -3 6979 6978 6691 -3 6691 6692 6979 -3 6980 6979 6692 -3 6692 6693 6980 -3 6981 6980 6693 -3 6693 6694 6981 -3 6982 6981 6694 -3 6694 6695 6982 -3 6983 6982 6695 -3 6695 6696 6983 -3 6984 6983 6696 -3 6696 6697 6984 -3 6985 6984 6697 -3 6697 6698 6985 -3 6986 6985 6698 -3 6698 6699 6986 -3 6987 6986 6699 -3 6699 6700 6987 -3 6988 6987 6700 -3 6700 6701 6988 -3 6989 6988 6701 -3 6701 6702 6989 -3 6990 6989 6702 -3 6702 6703 6990 -3 6991 6990 6703 -3 6703 6704 6991 -3 6992 6991 6704 -3 6704 6705 6992 -3 6993 6992 6705 -3 6705 6706 6993 -3 6994 6993 6706 -3 6706 6707 6994 -3 6995 6994 6707 -3 6707 6708 6995 -3 6996 6995 6708 -3 6708 6709 6996 -3 6997 6996 6709 -3 6709 6710 6997 -3 6998 6997 6710 -3 6710 6711 6998 -3 6999 6998 6711 -3 6711 6712 6999 -3 7000 6999 6712 -3 6712 6713 7000 -3 7001 7000 6713 -3 6713 6714 7001 -3 7002 7001 6714 -3 6714 6715 7002 -3 7003 7002 6715 -3 6715 6716 7003 -3 7004 7003 6716 -3 6716 6717 7004 -3 7005 7004 6717 -3 6717 6718 7005 -3 7006 7005 6718 -3 6718 6719 7006 -3 7007 7006 6719 -3 6719 6720 7007 -3 7008 7007 6720 -3 6720 6721 7008 -3 7009 7008 6721 -3 6721 6722 7009 -3 7010 7009 6722 -3 6722 6723 7010 -3 7011 7010 6723 -3 6723 6724 7011 -3 7012 7011 6724 -3 6724 6725 7012 -3 7013 7012 6725 -3 6725 6726 7013 -3 7014 7013 6726 -3 6726 6727 7014 -3 7015 7014 6727 -3 6727 6728 7016 -3 6727 7016 7015 -3 6728 6729 7017 -3 6728 7017 7016 -3 6729 6730 7018 -3 6729 7018 7017 -3 6730 6731 7019 -3 6730 7019 7018 -3 6731 6732 7020 -3 6731 7020 7019 -3 6733 7021 6734 -3 7022 6734 7021 -3 6733 6803 7091 -3 6733 7091 7021 -3 6734 7022 6735 -3 7023 6735 7022 -3 6735 7023 6736 -3 7024 6736 7023 -3 6736 7024 6737 -3 7025 6737 7024 -3 6737 7025 6738 -3 7026 6738 7025 -3 6738 7026 6739 -3 7027 6739 7026 -3 6739 7027 6740 -3 7028 6740 7027 -3 6740 7028 6741 -3 7029 6741 7028 -3 6741 7029 6742 -3 7030 6742 7029 -3 6742 7030 6743 -3 7031 6743 7030 -3 6743 7031 6744 -3 7032 6744 7031 -3 6744 7032 6745 -3 7033 6745 7032 -3 6745 7033 6746 -3 7034 6746 7033 -3 6746 7034 6747 -3 7035 6747 7034 -3 6747 7035 6748 -3 7036 6748 7035 -3 6748 7036 6749 -3 7037 6749 7036 -3 6749 7037 6750 -3 7038 6750 7037 -3 6750 7038 6751 -3 7039 6751 7038 -3 6751 7039 6752 -3 7040 6752 7039 -3 6752 7040 6753 -3 7041 6753 7040 -3 6753 7041 6754 -3 7042 6754 7041 -3 6754 7042 6755 -3 7043 6755 7042 -3 6755 7043 6756 -3 7044 6756 7043 -3 6756 7044 6757 -3 7045 6757 7044 -3 6757 7045 6758 -3 7046 6758 7045 -3 6758 7046 6759 -3 7047 6759 7046 -3 6759 7047 6760 -3 7048 6760 7047 -3 6760 7048 6761 -3 7049 6761 7048 -3 6761 7049 6762 -3 7050 6762 7049 -3 6762 7050 6763 -3 7051 6763 7050 -3 6763 7051 6764 -3 7052 6764 7051 -3 6764 7052 6765 -3 7053 6765 7052 -3 6765 7053 6766 -3 7054 6766 7053 -3 6766 7054 7055 -3 6766 7055 6767 -3 6767 7055 7056 -3 6767 7056 6768 -3 6768 7056 7057 -3 6768 7057 6769 -3 6769 7057 7058 -3 6769 7058 6770 -3 6770 7058 7059 -3 6770 7059 6771 -3 6771 7059 7060 -3 6771 7060 6772 -3 6772 7060 7061 -3 6772 7061 6773 -3 6773 7061 7062 -3 6773 7062 6774 -3 6774 7062 7063 -3 6774 7063 6775 -3 6775 7063 7064 -3 6775 7064 6776 -3 6776 7064 7065 -3 6776 7065 6777 -3 6777 7065 7066 -3 6777 7066 6778 -3 6778 7066 7067 -3 6778 7067 6779 -3 6779 7067 7068 -3 6779 7068 6780 -3 6780 7068 7069 -3 6780 7069 6781 -3 6781 7069 7070 -3 6781 7070 6782 -3 6782 7070 7071 -3 6782 7071 6783 -3 6783 7071 7072 -3 6783 7072 6784 -3 6784 7072 7073 -3 6784 7073 6785 -3 6785 7073 7074 -3 6785 7074 6786 -3 6786 7074 7075 -3 6786 7075 6787 -3 6787 7075 7076 -3 6787 7076 6788 -3 6788 7076 7077 -3 6788 7077 6789 -3 6789 7077 7078 -3 6789 7078 6790 -3 6790 7078 7079 -3 6790 7079 6791 -3 6791 7079 7080 -3 6791 7080 6792 -3 6792 7080 7081 -3 6792 7081 6793 -3 6793 7081 7082 -3 6793 7082 6794 -3 6794 7082 7083 -3 6794 7083 6795 -3 6795 7083 7084 -3 6795 7084 6796 -3 6796 7084 7085 -3 6796 7085 6797 -3 6797 7085 7086 -3 6797 7086 6798 -3 6798 7086 7087 -3 6798 7087 6799 -3 6799 7087 7088 -3 6799 7088 6800 -3 6800 7088 7089 -3 6800 7089 6801 -3 6801 7089 7092 -3 6801 7092 6804 -3 6802 7090 7091 -3 6802 7091 6803 -3 6802 6807 7090 -3 7095 7090 6807 -3 6804 7092 7093 -3 6804 7093 6805 -3 6805 7093 7096 -3 6805 7096 6808 -3 6806 7094 7095 -3 6806 7095 6807 -3 6806 6810 7094 -3 7098 7094 6810 -3 6808 7096 7097 -3 6808 7097 6809 -3 6809 7097 7099 -3 6809 7099 6811 -3 6810 6812 7098 -3 7100 7098 6812 -3 6811 7099 7101 -3 6811 7101 6813 -3 6812 6814 7100 -3 7102 7100 6814 -3 6813 7101 7103 -3 6813 7103 6815 -3 6814 6816 7102 -3 7104 7102 6816 -3 6815 7103 7105 -3 6815 7105 6817 -3 6816 6818 7104 -3 7106 7104 6818 -3 6817 7105 6819 -3 7107 6819 7105 -3 6818 6820 7106 -3 7108 7106 6820 -3 6819 7107 6821 -3 7109 6821 7107 -3 6820 6822 7108 -3 7110 7108 6822 -3 6821 7109 6823 -3 7111 6823 7109 -3 6822 6824 7110 -3 7112 7110 6824 -3 6823 7111 6825 -3 7113 6825 7111 -3 6824 6826 7112 -3 7114 7112 6826 -3 6825 7113 6827 -3 7115 6827 7113 -3 6826 6828 7114 -3 7116 7114 6828 -3 6827 7115 6829 -3 7117 6829 7115 -3 6828 6830 7118 -3 6828 7118 7116 -3 6829 7117 6831 -3 7119 6831 7117 -3 6830 6832 7120 -3 6830 7120 7118 -3 6831 7119 6833 -3 7121 6833 7119 -3 6832 6834 7122 -3 6832 7122 7120 -3 6833 7121 6835 -3 7123 6835 7121 -3 6834 6836 7124 -3 6834 7124 7122 -3 6835 7123 6837 -3 7125 6837 7123 -3 6836 6838 7126 -3 6836 7126 7124 -3 6837 7125 6839 -3 7127 6839 7125 -3 6838 6840 7128 -3 6838 7128 7126 -3 6839 7127 6841 -3 7129 6841 7127 -3 6840 6842 7130 -3 6840 7130 7128 -3 6841 7129 6843 -3 7131 6843 7129 -3 6842 6844 7132 -3 6842 7132 7130 -3 6843 7131 6845 -3 7133 6845 7131 -3 6844 6846 7134 -3 6844 7134 7132 -3 6845 7133 6847 -3 7135 6847 7133 -3 6846 6848 7136 -3 6846 7136 7134 -3 6847 7135 6849 -3 7137 6849 7135 -3 6848 6850 7138 -3 6848 7138 7136 -3 6849 7137 6851 -3 7139 6851 7137 -3 6850 6852 7140 -3 6850 7140 7138 -3 6851 7139 6853 -3 7141 6853 7139 -3 6852 6854 7142 -3 6852 7142 7140 -3 6853 7141 6855 -3 7143 6855 7141 -3 6854 6856 7144 -3 6854 7144 7142 -3 6855 7143 6857 -3 7145 6857 7143 -3 6856 6858 7146 -3 6856 7146 7144 -3 6857 7145 6859 -3 7147 6859 7145 -3 6858 6860 7148 -3 6858 7148 7146 -3 6859 7147 6861 -3 7149 6861 7147 -3 6860 6862 7150 -3 6860 7150 7148 -3 6861 7149 6863 -3 7151 6863 7149 -3 6862 6864 7152 -3 6862 7152 7150 -3 6863 7151 6865 -3 7153 6865 7151 -3 6864 6866 7154 -3 6864 7154 7152 -3 6865 7153 6867 -3 7155 6867 7153 -3 6866 6868 7156 -3 6866 7156 7154 -3 6867 7155 6869 -3 7157 6869 7155 -3 6868 6870 7158 -3 6868 7158 7156 -3 6869 7157 7159 -3 6869 7159 6871 -3 6870 6872 7160 -3 6870 7160 7158 -3 6871 7159 7161 -3 6871 7161 6873 -3 6872 6874 7162 -3 6872 7162 7160 -3 6873 7161 7163 -3 6873 7163 6875 -3 6874 6876 7164 -3 6874 7164 7162 -3 6875 7163 7165 -3 6875 7165 6877 -3 6876 6878 7166 -3 6876 7166 7164 -3 6877 7165 7167 -3 6877 7167 6879 -3 6878 6880 7168 -3 6878 7168 7166 -3 6879 7167 7169 -3 6879 7169 6881 -3 6880 6882 7168 -3 7170 7168 6882 -3 6881 7169 7171 -3 6881 7171 6883 -3 6882 6884 7170 -3 7172 7170 6884 -3 6883 7171 7173 -3 6883 7173 6885 -3 6884 6886 7172 -3 7174 7172 6886 -3 6885 7173 7175 -3 6885 7175 6887 -3 6886 6888 7174 -3 7176 7174 6888 -3 6887 7175 7177 -3 6887 7177 6889 -3 6888 6890 7176 -3 7178 7176 6890 -3 6889 7177 7179 -3 6889 7179 6891 -3 6890 6892 7178 -3 7180 7178 6892 -3 6891 7179 7181 -3 6891 7181 6893 -3 6892 6894 7180 -3 7182 7180 6894 -3 6893 7181 7183 -3 6893 7183 6895 -3 6894 6896 7182 -3 7184 7182 6896 -3 6895 7183 7185 -3 6895 7185 6897 -3 6896 6898 7184 -3 7186 7184 6898 -3 6897 7185 7187 -3 6897 7187 6899 -3 6898 6900 7186 -3 7188 7186 6900 -3 6899 7187 7189 -3 6899 7189 6901 -3 6900 6902 7188 -3 7190 7188 6902 -3 6901 7189 7191 -3 6901 7191 6903 -3 6902 6904 7190 -3 7192 7190 6904 -3 6903 7191 7193 -3 6903 7193 6905 -3 6904 6906 7192 -3 7194 7192 6906 -3 6905 7193 7195 -3 6905 7195 6907 -3 6906 6908 7194 -3 7196 7194 6908 -3 6907 7195 7197 -3 6907 7197 6909 -3 6908 6910 7196 -3 7198 7196 6910 -3 6909 7197 7199 -3 6909 7199 6911 -3 6910 6912 7198 -3 7200 7198 6912 -3 6911 7199 7201 -3 6911 7201 6913 -3 6912 6914 7200 -3 7202 7200 6914 -3 6913 7201 7203 -3 6913 7203 6915 -3 6914 6916 7202 -3 7204 7202 6916 -3 6915 7203 7205 -3 6915 7205 6917 -3 6916 6918 7204 -3 7206 7204 6918 -3 6917 7205 7207 -3 6917 7207 6919 -3 6918 6920 7206 -3 7208 7206 6920 -3 6919 7207 7209 -3 6919 7209 6921 -3 6920 6922 7208 -3 7210 7208 6922 -3 6921 7209 6923 -3 7211 6923 7209 -3 6922 6924 7210 -3 7212 7210 6924 -3 6923 7211 6925 -3 7213 6925 7211 -3 6924 6926 7212 -3 7214 7212 6926 -3 6925 7213 6927 -3 7215 6927 7213 -3 6926 6928 7214 -3 7216 7214 6928 -3 6927 7215 6929 -3 7217 6929 7215 -3 6928 6930 7216 -3 7218 7216 6930 -3 6929 7217 6931 -3 7219 6931 7217 -3 6930 6932 7220 -3 6930 7220 7218 -3 6931 7219 6933 -3 7221 6933 7219 -3 6932 6934 7222 -3 6932 7222 7220 -3 6933 7221 6935 -3 7223 6935 7221 -3 6934 6936 7224 -3 6934 7224 7222 -3 6935 7223 6937 -3 7225 6937 7223 -3 6936 6938 7226 -3 6936 7226 7224 -3 6937 7225 6939 -3 7227 6939 7225 -3 6938 6940 7228 -3 6938 7228 7226 -3 6939 7227 6941 -3 7229 6941 7227 -3 6940 6942 7230 -3 6940 7230 7228 -3 6941 7229 6943 -3 7231 6943 7229 -3 6942 6944 7232 -3 6942 7232 7230 -3 6943 7231 6947 -3 7235 6947 7231 -3 6944 6945 7233 -3 6944 7233 7232 -3 6945 6948 7236 -3 6945 7236 7233 -3 6946 6947 7235 -3 6946 7235 7234 -3 6946 7234 6951 -3 7239 6951 7234 -3 6948 6949 7237 -3 6948 7237 7236 -3 6949 6952 7240 -3 6949 7240 7237 -3 6950 6951 7239 -3 6950 7239 7238 -3 6950 7238 7020 -3 7308 7020 7238 -3 6952 6953 7241 -3 6952 7241 7240 -3 6953 6954 7242 -3 6953 7242 7241 -3 6954 6955 7243 -3 6954 7243 7242 -3 6955 6956 7244 -3 6955 7244 7243 -3 6956 6957 7245 -3 6956 7245 7244 -3 6957 6958 7246 -3 6957 7246 7245 -3 6958 6959 7247 -3 6958 7247 7246 -3 6959 6960 7248 -3 6959 7248 7247 -3 6960 6961 7249 -3 6960 7249 7248 -3 6961 6962 7250 -3 6961 7250 7249 -3 6962 6963 7251 -3 6962 7251 7250 -3 6963 6964 7252 -3 6963 7252 7251 -3 6964 6965 7253 -3 6964 7253 7252 -3 6965 6966 7254 -3 6965 7254 7253 -3 6966 6967 7255 -3 6966 7255 7254 -3 6967 6968 7256 -3 6967 7256 7255 -3 6968 6969 7257 -3 6968 7257 7256 -3 6969 6970 7258 -3 6969 7258 7257 -3 6970 6971 7259 -3 6970 7259 7258 -3 6971 6972 7260 -3 6971 7260 7259 -3 6972 6973 7261 -3 6972 7261 7260 -3 6973 6974 7262 -3 6973 7262 7261 -3 6974 6975 7263 -3 6974 7263 7262 -3 6975 6976 7264 -3 6975 7264 7263 -3 6976 6977 7265 -3 6976 7265 7264 -3 6977 6978 7266 -3 6977 7266 7265 -3 6978 6979 7267 -3 6978 7267 7266 -3 6979 6980 7268 -3 6979 7268 7267 -3 6980 6981 7269 -3 6980 7269 7268 -3 6981 6982 7270 -3 6981 7270 7269 -3 6982 6983 7270 -3 7271 7270 6983 -3 6983 6984 7271 -3 7272 7271 6984 -3 6984 6985 7272 -3 7273 7272 6985 -3 6985 6986 7273 -3 7274 7273 6986 -3 6986 6987 7274 -3 7275 7274 6987 -3 6987 6988 7275 -3 7276 7275 6988 -3 6988 6989 7276 -3 7277 7276 6989 -3 6989 6990 7277 -3 7278 7277 6990 -3 6990 6991 7278 -3 7279 7278 6991 -3 6991 6992 7279 -3 7280 7279 6992 -3 6992 6993 7280 -3 7281 7280 6993 -3 6993 6994 7281 -3 7282 7281 6994 -3 6994 6995 7282 -3 7283 7282 6995 -3 6995 6996 7283 -3 7284 7283 6996 -3 6996 6997 7284 -3 7285 7284 6997 -3 6997 6998 7285 -3 7286 7285 6998 -3 6998 6999 7286 -3 7287 7286 6999 -3 6999 7000 7287 -3 7288 7287 7000 -3 7000 7001 7288 -3 7289 7288 7001 -3 7001 7002 7289 -3 7290 7289 7002 -3 7002 7003 7290 -3 7291 7290 7003 -3 7003 7004 7291 -3 7292 7291 7004 -3 7004 7005 7292 -3 7293 7292 7005 -3 7005 7006 7293 -3 7294 7293 7006 -3 7006 7007 7294 -3 7295 7294 7007 -3 7007 7008 7295 -3 7296 7295 7008 -3 7008 7009 7296 -3 7297 7296 7009 -3 7009 7010 7297 -3 7298 7297 7010 -3 7010 7011 7298 -3 7299 7298 7011 -3 7011 7012 7299 -3 7300 7299 7012 -3 7012 7013 7300 -3 7301 7300 7013 -3 7013 7014 7301 -3 7302 7301 7014 -3 7014 7015 7302 -3 7303 7302 7015 -3 7015 7016 7303 -3 7304 7303 7016 -3 7016 7017 7304 -3 7305 7304 7017 -3 7017 7018 7305 -3 7306 7305 7018 -3 7018 7019 7306 -3 7307 7306 7019 -3 7019 7020 7307 -3 7308 7307 7020 -3 7021 7309 7310 -3 7021 7310 7022 -3 7021 7091 7379 -3 7021 7379 7309 -3 7022 7310 7311 -3 7022 7311 7023 -3 7023 7311 7312 -3 7023 7312 7024 -3 7024 7312 7313 -3 7024 7313 7025 -3 7025 7313 7026 -3 7314 7026 7313 -3 7026 7314 7027 -3 7315 7027 7314 -3 7027 7315 7028 -3 7316 7028 7315 -3 7028 7316 7029 -3 7317 7029 7316 -3 7029 7317 7030 -3 7318 7030 7317 -3 7030 7318 7031 -3 7319 7031 7318 -3 7031 7319 7032 -3 7320 7032 7319 -3 7032 7320 7033 -3 7321 7033 7320 -3 7033 7321 7034 -3 7322 7034 7321 -3 7034 7322 7035 -3 7323 7035 7322 -3 7035 7323 7036 -3 7324 7036 7323 -3 7036 7324 7037 -3 7325 7037 7324 -3 7037 7325 7038 -3 7326 7038 7325 -3 7038 7326 7039 -3 7327 7039 7326 -3 7039 7327 7040 -3 7328 7040 7327 -3 7040 7328 7041 -3 7329 7041 7328 -3 7041 7329 7042 -3 7330 7042 7329 -3 7042 7330 7043 -3 7331 7043 7330 -3 7043 7331 7044 -3 7332 7044 7331 -3 7044 7332 7045 -3 7333 7045 7332 -3 7045 7333 7046 -3 7334 7046 7333 -3 7046 7334 7047 -3 7335 7047 7334 -3 7047 7335 7048 -3 7336 7048 7335 -3 7048 7336 7049 -3 7337 7049 7336 -3 7049 7337 7050 -3 7338 7050 7337 -3 7050 7338 7051 -3 7339 7051 7338 -3 7051 7339 7052 -3 7340 7052 7339 -3 7052 7340 7053 -3 7341 7053 7340 -3 7053 7341 7054 -3 7342 7054 7341 -3 7054 7342 7055 -3 7343 7055 7342 -3 7055 7343 7056 -3 7344 7056 7343 -3 7056 7344 7057 -3 7345 7057 7344 -3 7057 7345 7058 -3 7346 7058 7345 -3 7058 7346 7059 -3 7347 7059 7346 -3 7059 7347 7060 -3 7348 7060 7347 -3 7060 7348 7061 -3 7349 7061 7348 -3 7061 7349 7062 -3 7350 7062 7349 -3 7062 7350 7063 -3 7351 7063 7350 -3 7063 7351 7064 -3 7352 7064 7351 -3 7064 7352 7065 -3 7353 7065 7352 -3 7065 7353 7066 -3 7354 7066 7353 -3 7066 7354 7067 -3 7355 7067 7354 -3 7067 7355 7068 -3 7356 7068 7355 -3 7068 7356 7069 -3 7357 7069 7356 -3 7069 7357 7070 -3 7358 7070 7357 -3 7070 7358 7071 -3 7359 7071 7358 -3 7071 7359 7072 -3 7360 7072 7359 -3 7072 7360 7073 -3 7361 7073 7360 -3 7073 7361 7074 -3 7362 7074 7361 -3 7074 7362 7075 -3 7363 7075 7362 -3 7075 7363 7076 -3 7364 7076 7363 -3 7076 7364 7077 -3 7365 7077 7364 -3 7077 7365 7366 -3 7077 7366 7078 -3 7078 7366 7367 -3 7078 7367 7079 -3 7079 7367 7368 -3 7079 7368 7080 -3 7080 7368 7369 -3 7080 7369 7081 -3 7081 7369 7370 -3 7081 7370 7082 -3 7082 7370 7371 -3 7082 7371 7083 -3 7083 7371 7372 -3 7083 7372 7084 -3 7084 7372 7373 -3 7084 7373 7085 -3 7085 7373 7374 -3 7085 7374 7086 -3 7086 7374 7375 -3 7086 7375 7087 -3 7087 7375 7376 -3 7087 7376 7088 -3 7088 7376 7377 -3 7088 7377 7089 -3 7089 7377 7380 -3 7089 7380 7092 -3 7090 7378 7379 -3 7090 7379 7091 -3 7090 7095 7378 -3 7383 7378 7095 -3 7092 7380 7381 -3 7092 7381 7093 -3 7093 7381 7384 -3 7093 7384 7096 -3 7094 7382 7383 -3 7094 7383 7095 -3 7094 7098 7382 -3 7386 7382 7098 -3 7096 7384 7385 -3 7096 7385 7097 -3 7097 7385 7387 -3 7097 7387 7099 -3 7098 7100 7386 -3 7388 7386 7100 -3 7099 7387 7389 -3 7099 7389 7101 -3 7100 7102 7388 -3 7390 7388 7102 -3 7101 7389 7391 -3 7101 7391 7103 -3 7102 7104 7390 -3 7392 7390 7104 -3 7103 7391 7393 -3 7103 7393 7105 -3 7104 7106 7392 -3 7394 7392 7106 -3 7105 7393 7395 -3 7105 7395 7107 -3 7106 7108 7394 -3 7396 7394 7108 -3 7107 7395 7397 -3 7107 7397 7109 -3 7108 7110 7396 -3 7398 7396 7110 -3 7109 7397 7399 -3 7109 7399 7111 -3 7110 7112 7398 -3 7400 7398 7112 -3 7111 7399 7401 -3 7111 7401 7113 -3 7112 7114 7400 -3 7402 7400 7114 -3 7113 7401 7403 -3 7113 7403 7115 -3 7114 7116 7402 -3 7404 7402 7116 -3 7115 7403 7405 -3 7115 7405 7117 -3 7116 7118 7404 -3 7406 7404 7118 -3 7117 7405 7407 -3 7117 7407 7119 -3 7118 7120 7406 -3 7408 7406 7120 -3 7119 7407 7409 -3 7119 7409 7121 -3 7120 7122 7408 -3 7410 7408 7122 -3 7121 7409 7411 -3 7121 7411 7123 -3 7122 7124 7410 -3 7412 7410 7124 -3 7123 7411 7413 -3 7123 7413 7125 -3 7124 7126 7412 -3 7414 7412 7126 -3 7125 7413 7415 -3 7125 7415 7127 -3 7126 7128 7414 -3 7416 7414 7128 -3 7127 7415 7417 -3 7127 7417 7129 -3 7128 7130 7416 -3 7418 7416 7130 -3 7129 7417 7131 -3 7419 7131 7417 -3 7130 7132 7418 -3 7420 7418 7132 -3 7131 7419 7133 -3 7421 7133 7419 -3 7132 7134 7420 -3 7422 7420 7134 -3 7133 7421 7135 -3 7423 7135 7421 -3 7134 7136 7422 -3 7424 7422 7136 -3 7135 7423 7137 -3 7425 7137 7423 -3 7136 7138 7426 -3 7136 7426 7424 -3 7137 7425 7139 -3 7427 7139 7425 -3 7138 7140 7428 -3 7138 7428 7426 -3 7139 7427 7141 -3 7429 7141 7427 -3 7140 7142 7430 -3 7140 7430 7428 -3 7141 7429 7143 -3 7431 7143 7429 -3 7142 7144 7432 -3 7142 7432 7430 -3 7143 7431 7145 -3 7433 7145 7431 -3 7144 7146 7434 -3 7144 7434 7432 -3 7145 7433 7147 -3 7435 7147 7433 -3 7146 7148 7436 -3 7146 7436 7434 -3 7147 7435 7149 -3 7437 7149 7435 -3 7148 7150 7438 -3 7148 7438 7436 -3 7149 7437 7151 -3 7439 7151 7437 -3 7150 7152 7440 -3 7150 7440 7438 -3 7151 7439 7153 -3 7441 7153 7439 -3 7152 7154 7442 -3 7152 7442 7440 -3 7153 7441 7155 -3 7443 7155 7441 -3 7154 7156 7444 -3 7154 7444 7442 -3 7155 7443 7157 -3 7445 7157 7443 -3 7156 7158 7446 -3 7156 7446 7444 -3 7157 7445 7159 -3 7447 7159 7445 -3 7158 7160 7448 -3 7158 7448 7446 -3 7159 7447 7161 -3 7449 7161 7447 -3 7160 7162 7450 -3 7160 7450 7448 -3 7161 7449 7163 -3 7451 7163 7449 -3 7162 7164 7452 -3 7162 7452 7450 -3 7163 7451 7165 -3 7453 7165 7451 -3 7164 7166 7454 -3 7164 7454 7452 -3 7165 7453 7167 -3 7455 7167 7453 -3 7166 7168 7456 -3 7166 7456 7454 -3 7167 7455 7169 -3 7457 7169 7455 -3 7168 7170 7458 -3 7168 7458 7456 -3 7169 7457 7171 -3 7459 7171 7457 -3 7170 7172 7460 -3 7170 7460 7458 -3 7171 7459 7173 -3 7461 7173 7459 -3 7172 7174 7462 -3 7172 7462 7460 -3 7173 7461 7175 -3 7463 7175 7461 -3 7174 7176 7464 -3 7174 7464 7462 -3 7175 7463 7177 -3 7465 7177 7463 -3 7176 7178 7466 -3 7176 7466 7464 -3 7177 7465 7179 -3 7467 7179 7465 -3 7178 7180 7468 -3 7178 7468 7466 -3 7179 7467 7181 -3 7469 7181 7467 -3 7180 7182 7470 -3 7180 7470 7468 -3 7181 7469 7183 -3 7471 7183 7469 -3 7182 7184 7472 -3 7182 7472 7470 -3 7183 7471 7473 -3 7183 7473 7185 -3 7184 7186 7474 -3 7184 7474 7472 -3 7185 7473 7475 -3 7185 7475 7187 -3 7186 7188 7476 -3 7186 7476 7474 -3 7187 7475 7477 -3 7187 7477 7189 -3 7188 7190 7478 -3 7188 7478 7476 -3 7189 7477 7479 -3 7189 7479 7191 -3 7190 7192 7478 -3 7480 7478 7192 -3 7191 7479 7481 -3 7191 7481 7193 -3 7192 7194 7480 -3 7482 7480 7194 -3 7193 7481 7483 -3 7193 7483 7195 -3 7194 7196 7482 -3 7484 7482 7196 -3 7195 7483 7485 -3 7195 7485 7197 -3 7196 7198 7484 -3 7486 7484 7198 -3 7197 7485 7487 -3 7197 7487 7199 -3 7198 7200 7486 -3 7488 7486 7200 -3 7199 7487 7489 -3 7199 7489 7201 -3 7200 7202 7488 -3 7490 7488 7202 -3 7201 7489 7491 -3 7201 7491 7203 -3 7202 7204 7490 -3 7492 7490 7204 -3 7203 7491 7493 -3 7203 7493 7205 -3 7204 7206 7492 -3 7494 7492 7206 -3 7205 7493 7495 -3 7205 7495 7207 -3 7206 7208 7494 -3 7496 7494 7208 -3 7207 7495 7497 -3 7207 7497 7209 -3 7208 7210 7496 -3 7498 7496 7210 -3 7209 7497 7499 -3 7209 7499 7211 -3 7210 7212 7498 -3 7500 7498 7212 -3 7211 7499 7501 -3 7211 7501 7213 -3 7212 7214 7500 -3 7502 7500 7214 -3 7213 7501 7503 -3 7213 7503 7215 -3 7214 7216 7502 -3 7504 7502 7216 -3 7215 7503 7505 -3 7215 7505 7217 -3 7216 7218 7504 -3 7506 7504 7218 -3 7217 7505 7507 -3 7217 7507 7219 -3 7218 7220 7506 -3 7508 7506 7220 -3 7219 7507 7509 -3 7219 7509 7221 -3 7220 7222 7508 -3 7510 7508 7222 -3 7221 7509 7511 -3 7221 7511 7223 -3 7222 7224 7510 -3 7512 7510 7224 -3 7223 7511 7513 -3 7223 7513 7225 -3 7224 7226 7512 -3 7514 7512 7226 -3 7225 7513 7515 -3 7225 7515 7227 -3 7226 7228 7514 -3 7516 7514 7228 -3 7227 7515 7517 -3 7227 7517 7229 -3 7228 7230 7516 -3 7518 7516 7230 -3 7229 7517 7519 -3 7229 7519 7231 -3 7230 7232 7518 -3 7520 7518 7232 -3 7231 7519 7523 -3 7231 7523 7235 -3 7232 7233 7520 -3 7521 7520 7233 -3 7233 7236 7521 -3 7524 7521 7236 -3 7234 7235 7522 -3 7523 7522 7235 -3 7234 7522 7527 -3 7234 7527 7239 -3 7236 7237 7524 -3 7525 7524 7237 -3 7237 7240 7525 -3 7528 7525 7240 -3 7238 7239 7526 -3 7527 7526 7239 -3 7238 7526 7308 -3 7596 7308 7526 -3 7240 7241 7528 -3 7529 7528 7241 -3 7241 7242 7530 -3 7241 7530 7529 -3 7242 7243 7531 -3 7242 7531 7530 -3 7243 7244 7532 -3 7243 7532 7531 -3 7244 7245 7533 -3 7244 7533 7532 -3 7245 7246 7534 -3 7245 7534 7533 -3 7246 7247 7535 -3 7246 7535 7534 -3 7247 7248 7536 -3 7247 7536 7535 -3 7248 7249 7537 -3 7248 7537 7536 -3 7249 7250 7538 -3 7249 7538 7537 -3 7250 7251 7539 -3 7250 7539 7538 -3 7251 7252 7540 -3 7251 7540 7539 -3 7252 7253 7541 -3 7252 7541 7540 -3 7253 7254 7542 -3 7253 7542 7541 -3 7254 7255 7543 -3 7254 7543 7542 -3 7255 7256 7544 -3 7255 7544 7543 -3 7256 7257 7545 -3 7256 7545 7544 -3 7257 7258 7546 -3 7257 7546 7545 -3 7258 7259 7547 -3 7258 7547 7546 -3 7259 7260 7548 -3 7259 7548 7547 -3 7260 7261 7549 -3 7260 7549 7548 -3 7261 7262 7550 -3 7261 7550 7549 -3 7262 7263 7551 -3 7262 7551 7550 -3 7263 7264 7552 -3 7263 7552 7551 -3 7264 7265 7553 -3 7264 7553 7552 -3 7265 7266 7554 -3 7265 7554 7553 -3 7266 7267 7555 -3 7266 7555 7554 -3 7267 7268 7556 -3 7267 7556 7555 -3 7268 7269 7557 -3 7268 7557 7556 -3 7269 7270 7558 -3 7269 7558 7557 -3 7270 7271 7559 -3 7270 7559 7558 -3 7271 7272 7560 -3 7271 7560 7559 -3 7272 7273 7561 -3 7272 7561 7560 -3 7273 7274 7562 -3 7273 7562 7561 -3 7274 7275 7563 -3 7274 7563 7562 -3 7275 7276 7564 -3 7275 7564 7563 -3 7276 7277 7565 -3 7276 7565 7564 -3 7277 7278 7566 -3 7277 7566 7565 -3 7278 7279 7567 -3 7278 7567 7566 -3 7279 7280 7568 -3 7279 7568 7567 -3 7280 7281 7569 -3 7280 7569 7568 -3 7281 7282 7570 -3 7281 7570 7569 -3 7282 7283 7571 -3 7282 7571 7570 -3 7283 7284 7572 -3 7283 7572 7571 -3 7284 7285 7573 -3 7284 7573 7572 -3 7285 7286 7574 -3 7285 7574 7573 -3 7286 7287 7575 -3 7286 7575 7574 -3 7287 7288 7576 -3 7287 7576 7575 -3 7288 7289 7577 -3 7288 7577 7576 -3 7289 7290 7578 -3 7289 7578 7577 -3 7290 7291 7579 -3 7290 7579 7578 -3 7291 7292 7580 -3 7291 7580 7579 -3 7292 7293 7581 -3 7292 7581 7580 -3 7293 7294 7582 -3 7293 7582 7581 -3 7294 7295 7582 -3 7583 7582 7295 -3 7295 7296 7583 -3 7584 7583 7296 -3 7296 7297 7584 -3 7585 7584 7297 -3 7297 7298 7585 -3 7586 7585 7298 -3 7298 7299 7586 -3 7587 7586 7299 -3 7299 7300 7587 -3 7588 7587 7300 -3 7300 7301 7588 -3 7589 7588 7301 -3 7301 7302 7589 -3 7590 7589 7302 -3 7302 7303 7590 -3 7591 7590 7303 -3 7303 7304 7591 -3 7592 7591 7304 -3 7304 7305 7592 -3 7593 7592 7305 -3 7305 7306 7593 -3 7594 7593 7306 -3 7306 7307 7594 -3 7595 7594 7307 -3 7307 7308 7595 -3 7596 7595 7308 -3 7309 7597 7598 -3 7309 7598 7310 -3 7309 7379 7667 -3 7309 7667 7597 -3 7310 7598 7599 -3 7310 7599 7311 -3 7311 7599 7600 -3 7311 7600 7312 -3 7312 7600 7601 -3 7312 7601 7313 -3 7313 7601 7602 -3 7313 7602 7314 -3 7314 7602 7603 -3 7314 7603 7315 -3 7315 7603 7604 -3 7315 7604 7316 -3 7316 7604 7605 -3 7316 7605 7317 -3 7317 7605 7606 -3 7317 7606 7318 -3 7318 7606 7607 -3 7318 7607 7319 -3 7319 7607 7608 -3 7319 7608 7320 -3 7320 7608 7609 -3 7320 7609 7321 -3 7321 7609 7610 -3 7321 7610 7322 -3 7322 7610 7611 -3 7322 7611 7323 -3 7323 7611 7612 -3 7323 7612 7324 -3 7324 7612 7613 -3 7324 7613 7325 -3 7325 7613 7614 -3 7325 7614 7326 -3 7326 7614 7615 -3 7326 7615 7327 -3 7327 7615 7616 -3 7327 7616 7328 -3 7328 7616 7617 -3 7328 7617 7329 -3 7329 7617 7618 -3 7329 7618 7330 -3 7330 7618 7619 -3 7330 7619 7331 -3 7331 7619 7620 -3 7331 7620 7332 -3 7332 7620 7621 -3 7332 7621 7333 -3 7333 7621 7622 -3 7333 7622 7334 -3 7334 7622 7623 -3 7334 7623 7335 -3 7335 7623 7624 -3 7335 7624 7336 -3 7336 7624 7625 -3 7336 7625 7337 -3 7337 7625 7626 -3 7337 7626 7338 -3 7338 7626 7627 -3 7338 7627 7339 -3 7339 7627 7628 -3 7339 7628 7340 -3 7340 7628 7629 -3 7340 7629 7341 -3 7341 7629 7342 -3 7630 7342 7629 -3 7342 7630 7343 -3 7631 7343 7630 -3 7343 7631 7344 -3 7632 7344 7631 -3 7344 7632 7345 -3 7633 7345 7632 -3 7345 7633 7346 -3 7634 7346 7633 -3 7346 7634 7347 -3 7635 7347 7634 -3 7347 7635 7348 -3 7636 7348 7635 -3 7348 7636 7349 -3 7637 7349 7636 -3 7349 7637 7350 -3 7638 7350 7637 -3 7350 7638 7351 -3 7639 7351 7638 -3 7351 7639 7352 -3 7640 7352 7639 -3 7352 7640 7353 -3 7641 7353 7640 -3 7353 7641 7354 -3 7642 7354 7641 -3 7354 7642 7355 -3 7643 7355 7642 -3 7355 7643 7356 -3 7644 7356 7643 -3 7356 7644 7357 -3 7645 7357 7644 -3 7357 7645 7358 -3 7646 7358 7645 -3 7358 7646 7359 -3 7647 7359 7646 -3 7359 7647 7360 -3 7648 7360 7647 -3 7360 7648 7361 -3 7649 7361 7648 -3 7361 7649 7362 -3 7650 7362 7649 -3 7362 7650 7363 -3 7651 7363 7650 -3 7363 7651 7364 -3 7652 7364 7651 -3 7364 7652 7365 -3 7653 7365 7652 -3 7365 7653 7366 -3 7654 7366 7653 -3 7366 7654 7367 -3 7655 7367 7654 -3 7367 7655 7368 -3 7656 7368 7655 -3 7368 7656 7369 -3 7657 7369 7656 -3 7369 7657 7370 -3 7658 7370 7657 -3 7370 7658 7371 -3 7659 7371 7658 -3 7371 7659 7372 -3 7660 7372 7659 -3 7372 7660 7373 -3 7661 7373 7660 -3 7373 7661 7374 -3 7662 7374 7661 -3 7374 7662 7375 -3 7663 7375 7662 -3 7375 7663 7376 -3 7664 7376 7663 -3 7376 7664 7377 -3 7665 7377 7664 -3 7377 7665 7380 -3 7668 7380 7665 -3 7378 7666 7379 -3 7667 7379 7666 -3 7378 7383 7671 -3 7378 7671 7666 -3 7380 7668 7381 -3 7669 7381 7668 -3 7381 7669 7384 -3 7672 7384 7669 -3 7382 7670 7383 -3 7671 7383 7670 -3 7382 7386 7674 -3 7382 7674 7670 -3 7384 7672 7385 -3 7673 7385 7672 -3 7385 7673 7387 -3 7675 7387 7673 -3 7386 7388 7676 -3 7386 7676 7674 -3 7387 7675 7389 -3 7677 7389 7675 -3 7388 7390 7678 -3 7388 7678 7676 -3 7389 7677 7391 -3 7679 7391 7677 -3 7390 7392 7680 -3 7390 7680 7678 -3 7391 7679 7393 -3 7681 7393 7679 -3 7392 7394 7682 -3 7392 7682 7680 -3 7393 7681 7395 -3 7683 7395 7681 -3 7394 7396 7684 -3 7394 7684 7682 -3 7395 7683 7685 -3 7395 7685 7397 -3 7396 7398 7686 -3 7396 7686 7684 -3 7397 7685 7687 -3 7397 7687 7399 -3 7398 7400 7688 -3 7398 7688 7686 -3 7399 7687 7689 -3 7399 7689 7401 -3 7400 7402 7688 -3 7690 7688 7402 -3 7401 7689 7691 -3 7401 7691 7403 -3 7402 7404 7690 -3 7692 7690 7404 -3 7403 7691 7693 -3 7403 7693 7405 -3 7404 7406 7692 -3 7694 7692 7406 -3 7405 7693 7695 -3 7405 7695 7407 -3 7406 7408 7694 -3 7696 7694 7408 -3 7407 7695 7697 -3 7407 7697 7409 -3 7408 7410 7696 -3 7698 7696 7410 -3 7409 7697 7699 -3 7409 7699 7411 -3 7410 7412 7698 -3 7700 7698 7412 -3 7411 7699 7701 -3 7411 7701 7413 -3 7412 7414 7700 -3 7702 7700 7414 -3 7413 7701 7703 -3 7413 7703 7415 -3 7414 7416 7702 -3 7704 7702 7416 -3 7415 7703 7705 -3 7415 7705 7417 -3 7416 7418 7704 -3 7706 7704 7418 -3 7417 7705 7707 -3 7417 7707 7419 -3 7418 7420 7706 -3 7708 7706 7420 -3 7419 7707 7709 -3 7419 7709 7421 -3 7420 7422 7708 -3 7710 7708 7422 -3 7421 7709 7711 -3 7421 7711 7423 -3 7422 7424 7710 -3 7712 7710 7424 -3 7423 7711 7713 -3 7423 7713 7425 -3 7424 7426 7712 -3 7714 7712 7426 -3 7425 7713 7715 -3 7425 7715 7427 -3 7426 7428 7714 -3 7716 7714 7428 -3 7427 7715 7717 -3 7427 7717 7429 -3 7428 7430 7716 -3 7718 7716 7430 -3 7429 7717 7719 -3 7429 7719 7431 -3 7430 7432 7718 -3 7720 7718 7432 -3 7431 7719 7721 -3 7431 7721 7433 -3 7432 7434 7720 -3 7722 7720 7434 -3 7433 7721 7723 -3 7433 7723 7435 -3 7434 7436 7722 -3 7724 7722 7436 -3 7435 7723 7725 -3 7435 7725 7437 -3 7436 7438 7724 -3 7726 7724 7438 -3 7437 7725 7727 -3 7437 7727 7439 -3 7438 7440 7726 -3 7728 7726 7440 -3 7439 7727 7729 -3 7439 7729 7441 -3 7440 7442 7728 -3 7730 7728 7442 -3 7441 7729 7731 -3 7441 7731 7443 -3 7442 7444 7730 -3 7732 7730 7444 -3 7443 7731 7733 -3 7443 7733 7445 -3 7444 7446 7732 -3 7734 7732 7446 -3 7445 7733 7735 -3 7445 7735 7447 -3 7446 7448 7734 -3 7736 7734 7448 -3 7447 7735 7737 -3 7447 7737 7449 -3 7448 7450 7736 -3 7738 7736 7450 -3 7449 7737 7451 -3 7739 7451 7737 -3 7450 7452 7738 -3 7740 7738 7452 -3 7451 7739 7453 -3 7741 7453 7739 -3 7452 7454 7742 -3 7452 7742 7740 -3 7453 7741 7455 -3 7743 7455 7741 -3 7454 7456 7744 -3 7454 7744 7742 -3 7455 7743 7457 -3 7745 7457 7743 -3 7456 7458 7746 -3 7456 7746 7744 -3 7457 7745 7459 -3 7747 7459 7745 -3 7458 7460 7748 -3 7458 7748 7746 -3 7459 7747 7461 -3 7749 7461 7747 -3 7460 7462 7750 -3 7460 7750 7748 -3 7461 7749 7463 -3 7751 7463 7749 -3 7462 7464 7752 -3 7462 7752 7750 -3 7463 7751 7465 -3 7753 7465 7751 -3 7464 7466 7754 -3 7464 7754 7752 -3 7465 7753 7467 -3 7755 7467 7753 -3 7466 7468 7756 -3 7466 7756 7754 -3 7467 7755 7469 -3 7757 7469 7755 -3 7468 7470 7758 -3 7468 7758 7756 -3 7469 7757 7471 -3 7759 7471 7757 -3 7470 7472 7760 -3 7470 7760 7758 -3 7471 7759 7473 -3 7761 7473 7759 -3 7472 7474 7762 -3 7472 7762 7760 -3 7473 7761 7475 -3 7763 7475 7761 -3 7474 7476 7764 -3 7474 7764 7762 -3 7475 7763 7477 -3 7765 7477 7763 -3 7476 7478 7766 -3 7476 7766 7764 -3 7477 7765 7479 -3 7767 7479 7765 -3 7478 7480 7768 -3 7478 7768 7766 -3 7479 7767 7481 -3 7769 7481 7767 -3 7480 7482 7770 -3 7480 7770 7768 -3 7481 7769 7483 -3 7771 7483 7769 -3 7482 7484 7772 -3 7482 7772 7770 -3 7483 7771 7485 -3 7773 7485 7771 -3 7484 7486 7774 -3 7484 7774 7772 -3 7485 7773 7487 -3 7775 7487 7773 -3 7486 7488 7776 -3 7486 7776 7774 -3 7487 7775 7489 -3 7777 7489 7775 -3 7488 7490 7778 -3 7488 7778 7776 -3 7489 7777 7491 -3 7779 7491 7777 -3 7490 7492 7780 -3 7490 7780 7778 -3 7491 7779 7493 -3 7781 7493 7779 -3 7492 7494 7782 -3 7492 7782 7780 -3 7493 7781 7495 -3 7783 7495 7781 -3 7494 7496 7784 -3 7494 7784 7782 -3 7495 7783 7497 -3 7785 7497 7783 -3 7496 7498 7786 -3 7496 7786 7784 -3 7497 7785 7499 -3 7787 7499 7785 -3 7498 7500 7788 -3 7498 7788 7786 -3 7499 7787 7501 -3 7789 7501 7787 -3 7500 7502 7790 -3 7500 7790 7788 -3 7501 7789 7503 -3 7791 7503 7789 -3 7502 7504 7792 -3 7502 7792 7790 -3 7503 7791 7793 -3 7503 7793 7505 -3 7504 7506 7794 -3 7504 7794 7792 -3 7505 7793 7795 -3 7505 7795 7507 -3 7506 7508 7794 -3 7796 7794 7508 -3 7507 7795 7797 -3 7507 7797 7509 -3 7508 7510 7796 -3 7798 7796 7510 -3 7509 7797 7799 -3 7509 7799 7511 -3 7510 7512 7798 -3 7800 7798 7512 -3 7511 7799 7801 -3 7511 7801 7513 -3 7512 7514 7800 -3 7802 7800 7514 -3 7513 7801 7803 -3 7513 7803 7515 -3 7514 7516 7802 -3 7804 7802 7516 -3 7515 7803 7805 -3 7515 7805 7517 -3 7516 7518 7804 -3 7806 7804 7518 -3 7517 7805 7807 -3 7517 7807 7519 -3 7518 7520 7806 -3 7808 7806 7520 -3 7519 7807 7811 -3 7519 7811 7523 -3 7520 7521 7808 -3 7809 7808 7521 -3 7521 7524 7809 -3 7812 7809 7524 -3 7522 7523 7810 -3 7811 7810 7523 -3 7522 7810 7815 -3 7522 7815 7527 -3 7524 7525 7812 -3 7813 7812 7525 -3 7525 7528 7813 -3 7816 7813 7528 -3 7526 7527 7814 -3 7815 7814 7527 -3 7526 7814 7884 -3 7526 7884 7596 -3 7528 7529 7816 -3 7817 7816 7529 -3 7529 7530 7817 -3 7818 7817 7530 -3 7530 7531 7818 -3 7819 7818 7531 -3 7531 7532 7819 -3 7820 7819 7532 -3 7532 7533 7820 -3 7821 7820 7533 -3 7533 7534 7821 -3 7822 7821 7534 -3 7534 7535 7822 -3 7823 7822 7535 -3 7535 7536 7823 -3 7824 7823 7536 -3 7536 7537 7824 -3 7825 7824 7537 -3 7537 7538 7825 -3 7826 7825 7538 -3 7538 7539 7826 -3 7827 7826 7539 -3 7539 7540 7827 -3 7828 7827 7540 -3 7540 7541 7828 -3 7829 7828 7541 -3 7541 7542 7829 -3 7830 7829 7542 -3 7542 7543 7830 -3 7831 7830 7543 -3 7543 7544 7831 -3 7832 7831 7544 -3 7544 7545 7832 -3 7833 7832 7545 -3 7545 7546 7833 -3 7834 7833 7546 -3 7546 7547 7834 -3 7835 7834 7547 -3 7547 7548 7835 -3 7836 7835 7548 -3 7548 7549 7836 -3 7837 7836 7549 -3 7549 7550 7837 -3 7838 7837 7550 -3 7550 7551 7838 -3 7839 7838 7551 -3 7551 7552 7839 -3 7840 7839 7552 -3 7552 7553 7840 -3 7841 7840 7553 -3 7553 7554 7841 -3 7842 7841 7554 -3 7554 7555 7842 -3 7843 7842 7555 -3 7555 7556 7843 -3 7844 7843 7556 -3 7556 7557 7844 -3 7845 7844 7557 -3 7557 7558 7845 -3 7846 7845 7558 -3 7558 7559 7846 -3 7847 7846 7559 -3 7559 7560 7848 -3 7559 7848 7847 -3 7560 7561 7849 -3 7560 7849 7848 -3 7561 7562 7850 -3 7561 7850 7849 -3 7562 7563 7851 -3 7562 7851 7850 -3 7563 7564 7852 -3 7563 7852 7851 -3 7564 7565 7853 -3 7564 7853 7852 -3 7565 7566 7854 -3 7565 7854 7853 -3 7566 7567 7855 -3 7566 7855 7854 -3 7567 7568 7856 -3 7567 7856 7855 -3 7568 7569 7857 -3 7568 7857 7856 -3 7569 7570 7858 -3 7569 7858 7857 -3 7570 7571 7859 -3 7570 7859 7858 -3 7571 7572 7860 -3 7571 7860 7859 -3 7572 7573 7861 -3 7572 7861 7860 -3 7573 7574 7862 -3 7573 7862 7861 -3 7574 7575 7863 -3 7574 7863 7862 -3 7575 7576 7864 -3 7575 7864 7863 -3 7576 7577 7865 -3 7576 7865 7864 -3 7577 7578 7866 -3 7577 7866 7865 -3 7578 7579 7867 -3 7578 7867 7866 -3 7579 7580 7868 -3 7579 7868 7867 -3 7580 7581 7869 -3 7580 7869 7868 -3 7581 7582 7870 -3 7581 7870 7869 -3 7582 7583 7871 -3 7582 7871 7870 -3 7583 7584 7872 -3 7583 7872 7871 -3 7584 7585 7873 -3 7584 7873 7872 -3 7585 7586 7874 -3 7585 7874 7873 -3 7586 7587 7875 -3 7586 7875 7874 -3 7587 7588 7876 -3 7587 7876 7875 -3 7588 7589 7877 -3 7588 7877 7876 -3 7589 7590 7878 -3 7589 7878 7877 -3 7590 7591 7879 -3 7590 7879 7878 -3 7591 7592 7880 -3 7591 7880 7879 -3 7592 7593 7881 -3 7592 7881 7880 -3 7593 7594 7882 -3 7593 7882 7881 -3 7594 7595 7883 -3 7594 7883 7882 -3 7595 7596 7884 -3 7595 7884 7883 -3 7597 7885 7598 -3 7886 7598 7885 -3 7597 7667 7885 -3 7955 7885 7667 -3 7598 7886 7599 -3 7887 7599 7886 -3 7599 7887 7600 -3 7888 7600 7887 -3 7600 7888 7601 -3 7889 7601 7888 -3 7601 7889 7602 -3 7890 7602 7889 -3 7602 7890 7603 -3 7891 7603 7890 -3 7603 7891 7604 -3 7892 7604 7891 -3 7604 7892 7605 -3 7893 7605 7892 -3 7605 7893 7606 -3 7894 7606 7893 -3 7606 7894 7607 -3 7895 7607 7894 -3 7607 7895 7608 -3 7896 7608 7895 -3 7608 7896 7609 -3 7897 7609 7896 -3 7609 7897 7610 -3 7898 7610 7897 -3 7610 7898 7899 -3 7610 7899 7611 -3 7611 7899 7900 -3 7611 7900 7612 -3 7612 7900 7901 -3 7612 7901 7613 -3 7613 7901 7902 -3 7613 7902 7614 -3 7614 7902 7903 -3 7614 7903 7615 -3 7615 7903 7904 -3 7615 7904 7616 -3 7616 7904 7905 -3 7616 7905 7617 -3 7617 7905 7906 -3 7617 7906 7618 -3 7618 7906 7907 -3 7618 7907 7619 -3 7619 7907 7908 -3 7619 7908 7620 -3 7620 7908 7909 -3 7620 7909 7621 -3 7621 7909 7910 -3 7621 7910 7622 -3 7622 7910 7911 -3 7622 7911 7623 -3 7623 7911 7912 -3 7623 7912 7624 -3 7624 7912 7913 -3 7624 7913 7625 -3 7625 7913 7914 -3 7625 7914 7626 -3 7626 7914 7915 -3 7626 7915 7627 -3 7627 7915 7916 -3 7627 7916 7628 -3 7628 7916 7917 -3 7628 7917 7629 -3 7629 7917 7918 -3 7629 7918 7630 -3 7630 7918 7919 -3 7630 7919 7631 -3 7631 7919 7920 -3 7631 7920 7632 -3 7632 7920 7921 -3 7632 7921 7633 -3 7633 7921 7922 -3 7633 7922 7634 -3 7634 7922 7923 -3 7634 7923 7635 -3 7635 7923 7924 -3 7635 7924 7636 -3 7636 7924 7925 -3 7636 7925 7637 -3 7637 7925 7926 -3 7637 7926 7638 -3 7638 7926 7927 -3 7638 7927 7639 -3 7639 7927 7928 -3 7639 7928 7640 -3 7640 7928 7929 -3 7640 7929 7641 -3 7641 7929 7930 -3 7641 7930 7642 -3 7642 7930 7931 -3 7642 7931 7643 -3 7643 7931 7932 -3 7643 7932 7644 -3 7644 7932 7933 -3 7644 7933 7645 -3 7645 7933 7934 -3 7645 7934 7646 -3 7646 7934 7935 -3 7646 7935 7647 -3 7647 7935 7936 -3 7647 7936 7648 -3 7648 7936 7937 -3 7648 7937 7649 -3 7649 7937 7938 -3 7649 7938 7650 -3 7650 7938 7939 -3 7650 7939 7651 -3 7651 7939 7940 -3 7651 7940 7652 -3 7652 7940 7941 -3 7652 7941 7653 -3 7653 7941 7942 -3 7653 7942 7654 -3 7654 7942 7943 -3 7654 7943 7655 -3 7655 7943 7944 -3 7655 7944 7656 -3 7656 7944 7945 -3 7656 7945 7657 -3 7657 7945 7946 -3 7657 7946 7658 -3 7658 7946 7947 -3 7658 7947 7659 -3 7659 7947 7948 -3 7659 7948 7660 -3 7660 7948 7949 -3 7660 7949 7661 -3 7661 7949 7950 -3 7661 7950 7662 -3 7662 7950 7951 -3 7662 7951 7663 -3 7663 7951 7952 -3 7663 7952 7664 -3 7664 7952 7665 -3 7953 7665 7952 -3 7665 7953 7668 -3 7956 7668 7953 -3 7666 7954 7667 -3 7955 7667 7954 -3 7666 7671 7959 -3 7666 7959 7954 -3 7668 7956 7669 -3 7957 7669 7956 -3 7669 7957 7672 -3 7960 7672 7957 -3 7670 7958 7671 -3 7959 7671 7958 -3 7670 7674 7962 -3 7670 7962 7958 -3 7672 7960 7673 -3 7961 7673 7960 -3 7673 7961 7675 -3 7963 7675 7961 -3 7674 7676 7964 -3 7674 7964 7962 -3 7675 7963 7677 -3 7965 7677 7963 -3 7676 7678 7966 -3 7676 7966 7964 -3 7677 7965 7679 -3 7967 7679 7965 -3 7678 7680 7968 -3 7678 7968 7966 -3 7679 7967 7681 -3 7969 7681 7967 -3 7680 7682 7970 -3 7680 7970 7968 -3 7681 7969 7683 -3 7971 7683 7969 -3 7682 7684 7972 -3 7682 7972 7970 -3 7683 7971 7685 -3 7973 7685 7971 -3 7684 7686 7974 -3 7684 7974 7972 -3 7685 7973 7687 -3 7975 7687 7973 -3 7686 7688 7976 -3 7686 7976 7974 -3 7687 7975 7689 -3 7977 7689 7975 -3 7688 7690 7978 -3 7688 7978 7976 -3 7689 7977 7691 -3 7979 7691 7977 -3 7690 7692 7980 -3 7690 7980 7978 -3 7691 7979 7693 -3 7981 7693 7979 -3 7692 7694 7982 -3 7692 7982 7980 -3 7693 7981 7695 -3 7983 7695 7981 -3 7694 7696 7984 -3 7694 7984 7982 -3 7695 7983 7697 -3 7985 7697 7983 -3 7696 7698 7986 -3 7696 7986 7984 -3 7697 7985 7699 -3 7987 7699 7985 -3 7698 7700 7988 -3 7698 7988 7986 -3 7699 7987 7701 -3 7989 7701 7987 -3 7700 7702 7990 -3 7700 7990 7988 -3 7701 7989 7703 -3 7991 7703 7989 -3 7702 7704 7992 -3 7702 7992 7990 -3 7703 7991 7705 -3 7993 7705 7991 -3 7704 7706 7994 -3 7704 7994 7992 -3 7705 7993 7707 -3 7995 7707 7993 -3 7706 7708 7996 -3 7706 7996 7994 -3 7707 7995 7709 -3 7997 7709 7995 -3 7708 7710 7998 -3 7708 7998 7996 -3 7709 7997 7711 -3 7999 7711 7997 -3 7710 7712 8000 -3 7710 8000 7998 -3 7711 7999 7713 -3 8001 7713 7999 -3 7712 7714 8002 -3 7712 8002 8000 -3 7713 8001 7715 -3 8003 7715 8001 -3 7714 7716 8004 -3 7714 8004 8002 -3 7715 8003 7717 -3 8005 7717 8003 -3 7716 7718 8006 -3 7716 8006 8004 -3 7717 8005 7719 -3 8007 7719 8005 -3 7718 7720 8008 -3 7718 8008 8006 -3 7719 8007 8009 -3 7719 8009 7721 -3 7720 7722 8008 -3 8010 8008 7722 -3 7721 8009 8011 -3 7721 8011 7723 -3 7722 7724 8010 -3 8012 8010 7724 -3 7723 8011 8013 -3 7723 8013 7725 -3 7724 7726 8012 -3 8014 8012 7726 -3 7725 8013 8015 -3 7725 8015 7727 -3 7726 7728 8014 -3 8016 8014 7728 -3 7727 8015 8017 -3 7727 8017 7729 -3 7728 7730 8016 -3 8018 8016 7730 -3 7729 8017 8019 -3 7729 8019 7731 -3 7730 7732 8018 -3 8020 8018 7732 -3 7731 8019 8021 -3 7731 8021 7733 -3 7732 7734 8020 -3 8022 8020 7734 -3 7733 8021 8023 -3 7733 8023 7735 -3 7734 7736 8022 -3 8024 8022 7736 -3 7735 8023 8025 -3 7735 8025 7737 -3 7736 7738 8024 -3 8026 8024 7738 -3 7737 8025 8027 -3 7737 8027 7739 -3 7738 7740 8026 -3 8028 8026 7740 -3 7739 8027 8029 -3 7739 8029 7741 -3 7740 7742 8028 -3 8030 8028 7742 -3 7741 8029 8031 -3 7741 8031 7743 -3 7742 7744 8030 -3 8032 8030 7744 -3 7743 8031 8033 -3 7743 8033 7745 -3 7744 7746 8032 -3 8034 8032 7746 -3 7745 8033 8035 -3 7745 8035 7747 -3 7746 7748 8034 -3 8036 8034 7748 -3 7747 8035 8037 -3 7747 8037 7749 -3 7748 7750 8036 -3 8038 8036 7750 -3 7749 8037 8039 -3 7749 8039 7751 -3 7750 7752 8038 -3 8040 8038 7752 -3 7751 8039 8041 -3 7751 8041 7753 -3 7752 7754 8040 -3 8042 8040 7754 -3 7753 8041 8043 -3 7753 8043 7755 -3 7754 7756 8042 -3 8044 8042 7756 -3 7755 8043 8045 -3 7755 8045 7757 -3 7756 7758 8044 -3 8046 8044 7758 -3 7757 8045 8047 -3 7757 8047 7759 -3 7758 7760 8046 -3 8048 8046 7760 -3 7759 8047 8049 -3 7759 8049 7761 -3 7760 7762 8048 -3 8050 8048 7762 -3 7761 8049 8051 -3 7761 8051 7763 -3 7762 7764 8050 -3 8052 8050 7764 -3 7763 8051 8053 -3 7763 8053 7765 -3 7764 7766 8052 -3 8054 8052 7766 -3 7765 8053 8055 -3 7765 8055 7767 -3 7766 7768 8054 -3 8056 8054 7768 -3 7767 8055 8057 -3 7767 8057 7769 -3 7768 7770 8056 -3 8058 8056 7770 -3 7769 8057 8059 -3 7769 8059 7771 -3 7770 7772 8058 -3 8060 8058 7772 -3 7771 8059 8061 -3 7771 8061 7773 -3 7772 7774 8060 -3 8062 8060 7774 -3 7773 8061 8063 -3 7773 8063 7775 -3 7774 7776 8064 -3 7774 8064 8062 -3 7775 8063 7777 -3 8065 7777 8063 -3 7776 7778 8066 -3 7776 8066 8064 -3 7777 8065 7779 -3 8067 7779 8065 -3 7778 7780 8068 -3 7778 8068 8066 -3 7779 8067 7781 -3 8069 7781 8067 -3 7780 7782 8070 -3 7780 8070 8068 -3 7781 8069 7783 -3 8071 7783 8069 -3 7782 7784 8072 -3 7782 8072 8070 -3 7783 8071 7785 -3 8073 7785 8071 -3 7784 7786 8074 -3 7784 8074 8072 -3 7785 8073 7787 -3 8075 7787 8073 -3 7786 7788 8076 -3 7786 8076 8074 -3 7787 8075 7789 -3 8077 7789 8075 -3 7788 7790 8078 -3 7788 8078 8076 -3 7789 8077 7791 -3 8079 7791 8077 -3 7790 7792 8080 -3 7790 8080 8078 -3 7791 8079 7793 -3 8081 7793 8079 -3 7792 7794 8082 -3 7792 8082 8080 -3 7793 8081 7795 -3 8083 7795 8081 -3 7794 7796 8084 -3 7794 8084 8082 -3 7795 8083 7797 -3 8085 7797 8083 -3 7796 7798 8086 -3 7796 8086 8084 -3 7797 8085 7799 -3 8087 7799 8085 -3 7798 7800 8088 -3 7798 8088 8086 -3 7799 8087 7801 -3 8089 7801 8087 -3 7800 7802 8090 -3 7800 8090 8088 -3 7801 8089 7803 -3 8091 7803 8089 -3 7802 7804 8092 -3 7802 8092 8090 -3 7803 8091 7805 -3 8093 7805 8091 -3 7804 7806 8094 -3 7804 8094 8092 -3 7805 8093 7807 -3 8095 7807 8093 -3 7806 7808 8096 -3 7806 8096 8094 -3 7807 8095 7811 -3 8099 7811 8095 -3 7808 7809 8097 -3 7808 8097 8096 -3 7809 7812 8100 -3 7809 8100 8097 -3 7810 7811 8099 -3 7810 8099 8098 -3 7810 8098 7815 -3 8103 7815 8098 -3 7812 7813 8101 -3 7812 8101 8100 -3 7813 7816 8104 -3 7813 8104 8101 -3 7814 7815 8103 -3 7814 8103 8102 -3 7814 8102 7884 -3 8172 7884 8102 -3 7816 7817 8105 -3 7816 8105 8104 -3 7817 7818 8106 -3 7817 8106 8105 -3 7818 7819 8107 -3 7818 8107 8106 -3 7819 7820 8108 -3 7819 8108 8107 -3 7820 7821 8109 -3 7820 8109 8108 -3 7821 7822 8110 -3 7821 8110 8109 -3 7822 7823 8111 -3 7822 8111 8110 -3 7823 7824 8112 -3 7823 8112 8111 -3 7824 7825 8113 -3 7824 8113 8112 -3 7825 7826 8114 -3 7825 8114 8113 -3 7826 7827 8115 -3 7826 8115 8114 -3 7827 7828 8116 -3 7827 8116 8115 -3 7828 7829 8117 -3 7828 8117 8116 -3 7829 7830 8117 -3 8118 8117 7830 -3 7830 7831 8118 -3 8119 8118 7831 -3 7831 7832 8119 -3 8120 8119 7832 -3 7832 7833 8120 -3 8121 8120 7833 -3 7833 7834 8121 -3 8122 8121 7834 -3 7834 7835 8122 -3 8123 8122 7835 -3 7835 7836 8123 -3 8124 8123 7836 -3 7836 7837 8124 -3 8125 8124 7837 -3 7837 7838 8125 -3 8126 8125 7838 -3 7838 7839 8126 -3 8127 8126 7839 -3 7839 7840 8127 -3 8128 8127 7840 -3 7840 7841 8128 -3 8129 8128 7841 -3 7841 7842 8129 -3 8130 8129 7842 -3 7842 7843 8130 -3 8131 8130 7843 -3 7843 7844 8131 -3 8132 8131 7844 -3 7844 7845 8132 -3 8133 8132 7845 -3 7845 7846 8133 -3 8134 8133 7846 -3 7846 7847 8134 -3 8135 8134 7847 -3 7847 7848 8135 -3 8136 8135 7848 -3 7848 7849 8136 -3 8137 8136 7849 -3 7849 7850 8137 -3 8138 8137 7850 -3 7850 7851 8138 -3 8139 8138 7851 -3 7851 7852 8139 -3 8140 8139 7852 -3 7852 7853 8140 -3 8141 8140 7853 -3 7853 7854 8141 -3 8142 8141 7854 -3 7854 7855 8142 -3 8143 8142 7855 -3 7855 7856 8143 -3 8144 8143 7856 -3 7856 7857 8144 -3 8145 8144 7857 -3 7857 7858 8145 -3 8146 8145 7858 -3 7858 7859 8146 -3 8147 8146 7859 -3 7859 7860 8147 -3 8148 8147 7860 -3 7860 7861 8148 -3 8149 8148 7861 -3 7861 7862 8149 -3 8150 8149 7862 -3 7862 7863 8150 -3 8151 8150 7863 -3 7863 7864 8151 -3 8152 8151 7864 -3 7864 7865 8152 -3 8153 8152 7865 -3 7865 7866 8153 -3 8154 8153 7866 -3 7866 7867 8154 -3 8155 8154 7867 -3 7867 7868 8155 -3 8156 8155 7868 -3 7868 7869 8156 -3 8157 8156 7869 -3 7869 7870 8157 -3 8158 8157 7870 -3 7870 7871 8158 -3 8159 8158 7871 -3 7871 7872 8159 -3 8160 8159 7872 -3 7872 7873 8160 -3 8161 8160 7873 -3 7873 7874 8161 -3 8162 8161 7874 -3 7874 7875 8162 -3 8163 8162 7875 -3 7875 7876 8163 -3 8164 8163 7876 -3 7876 7877 8164 -3 8165 8164 7877 -3 7877 7878 8165 -3 8166 8165 7878 -3 7878 7879 8166 -3 8167 8166 7879 -3 7879 7880 8167 -3 8168 8167 7880 -3 7880 7881 8168 -3 8169 8168 7881 -3 7881 7882 8169 -3 8170 8169 7882 -3 7882 7883 8170 -3 8171 8170 7883 -3 7883 7884 8171 -3 8172 8171 7884 -3 7885 8173 7886 -3 8174 7886 8173 -3 7885 7955 8243 -3 7885 8243 8173 -3 7886 8174 7887 -3 8175 7887 8174 -3 7887 8175 7888 -3 8176 7888 8175 -3 7888 8176 7889 -3 8177 7889 8176 -3 7889 8177 7890 -3 8178 7890 8177 -3 7890 8178 7891 -3 8179 7891 8178 -3 7891 8179 7892 -3 8180 7892 8179 -3 7892 8180 7893 -3 8181 7893 8180 -3 7893 8181 7894 -3 8182 7894 8181 -3 7894 8182 7895 -3 8183 7895 8182 -3 7895 8183 7896 -3 8184 7896 8183 -3 7896 8184 7897 -3 8185 7897 8184 -3 7897 8185 7898 -3 8186 7898 8185 -3 7898 8186 7899 -3 8187 7899 8186 -3 7899 8187 7900 -3 8188 7900 8187 -3 7900 8188 7901 -3 8189 7901 8188 -3 7901 8189 7902 -3 8190 7902 8189 -3 7902 8190 7903 -3 8191 7903 8190 -3 7903 8191 7904 -3 8192 7904 8191 -3 7904 8192 7905 -3 8193 7905 8192 -3 7905 8193 7906 -3 8194 7906 8193 -3 7906 8194 7907 -3 8195 7907 8194 -3 7907 8195 7908 -3 8196 7908 8195 -3 7908 8196 7909 -3 8197 7909 8196 -3 7909 8197 7910 -3 8198 7910 8197 -3 7910 8198 7911 -3 8199 7911 8198 -3 7911 8199 7912 -3 8200 7912 8199 -3 7912 8200 7913 -3 8201 7913 8200 -3 7913 8201 7914 -3 8202 7914 8201 -3 7914 8202 7915 -3 8203 7915 8202 -3 7915 8203 7916 -3 8204 7916 8203 -3 7916 8204 7917 -3 8205 7917 8204 -3 7917 8205 7918 -3 8206 7918 8205 -3 7918 8206 7919 -3 8207 7919 8206 -3 7919 8207 7920 -3 8208 7920 8207 -3 7920 8208 7921 -3 8209 7921 8208 -3 7921 8209 7922 -3 8210 7922 8209 -3 7922 8210 7923 -3 8211 7923 8210 -3 7923 8211 7924 -3 8212 7924 8211 -3 7924 8212 7925 -3 8213 7925 8212 -3 7925 8213 7926 -3 8214 7926 8213 -3 7926 8214 7927 -3 8215 7927 8214 -3 7927 8215 7928 -3 8216 7928 8215 -3 7928 8216 7929 -3 8217 7929 8216 -3 7929 8217 7930 -3 8218 7930 8217 -3 7930 8218 7931 -3 8219 7931 8218 -3 7931 8219 7932 -3 8220 7932 8219 -3 7932 8220 7933 -3 8221 7933 8220 -3 7933 8221 7934 -3 8222 7934 8221 -3 7934 8222 7935 -3 8223 7935 8222 -3 7935 8223 7936 -3 8224 7936 8223 -3 7936 8224 7937 -3 8225 7937 8224 -3 7937 8225 7938 -3 8226 7938 8225 -3 7938 8226 7939 -3 8227 7939 8226 -3 7939 8227 8228 -3 7939 8228 7940 -3 7940 8228 8229 -3 7940 8229 7941 -3 7941 8229 8230 -3 7941 8230 7942 -3 7942 8230 8231 -3 7942 8231 7943 -3 7943 8231 8232 -3 7943 8232 7944 -3 7944 8232 8233 -3 7944 8233 7945 -3 7945 8233 8234 -3 7945 8234 7946 -3 7946 8234 8235 -3 7946 8235 7947 -3 7947 8235 8236 -3 7947 8236 7948 -3 7948 8236 8237 -3 7948 8237 7949 -3 7949 8237 8238 -3 7949 8238 7950 -3 7950 8238 8239 -3 7950 8239 7951 -3 7951 8239 8240 -3 7951 8240 7952 -3 7952 8240 8241 -3 7952 8241 7953 -3 7953 8241 8244 -3 7953 8244 7956 -3 7954 8242 8243 -3 7954 8243 7955 -3 7954 7959 8242 -3 8247 8242 7959 -3 7956 8244 8245 -3 7956 8245 7957 -3 7957 8245 8248 -3 7957 8248 7960 -3 7958 8246 8247 -3 7958 8247 7959 -3 7958 7962 8246 -3 8250 8246 7962 -3 7960 8248 8249 -3 7960 8249 7961 -3 7961 8249 8251 -3 7961 8251 7963 -3 7962 7964 8250 -3 8252 8250 7964 -3 7963 8251 8253 -3 7963 8253 7965 -3 7964 7966 8252 -3 8254 8252 7966 -3 7965 8253 8255 -3 7965 8255 7967 -3 7966 7968 8254 -3 8256 8254 7968 -3 7967 8255 8257 -3 7967 8257 7969 -3 7968 7970 8256 -3 8258 8256 7970 -3 7969 8257 8259 -3 7969 8259 7971 -3 7970 7972 8258 -3 8260 8258 7972 -3 7971 8259 8261 -3 7971 8261 7973 -3 7972 7974 8260 -3 8262 8260 7974 -3 7973 8261 8263 -3 7973 8263 7975 -3 7974 7976 8262 -3 8264 8262 7976 -3 7975 8263 8265 -3 7975 8265 7977 -3 7976 7978 8264 -3 8266 8264 7978 -3 7977 8265 8267 -3 7977 8267 7979 -3 7978 7980 8266 -3 8268 8266 7980 -3 7979 8267 8269 -3 7979 8269 7981 -3 7980 7982 8268 -3 8270 8268 7982 -3 7981 8269 8271 -3 7981 8271 7983 -3 7982 7984 8270 -3 8272 8270 7984 -3 7983 8271 8273 -3 7983 8273 7985 -3 7984 7986 8272 -3 8274 8272 7986 -3 7985 8273 8275 -3 7985 8275 7987 -3 7986 7988 8274 -3 8276 8274 7988 -3 7987 8275 8277 -3 7987 8277 7989 -3 7988 7990 8276 -3 8278 8276 7990 -3 7989 8277 8279 -3 7989 8279 7991 -3 7990 7992 8278 -3 8280 8278 7992 -3 7991 8279 8281 -3 7991 8281 7993 -3 7992 7994 8280 -3 8282 8280 7994 -3 7993 8281 8283 -3 7993 8283 7995 -3 7994 7996 8284 -3 7994 8284 8282 -3 7995 8283 7997 -3 8285 7997 8283 -3 7996 7998 8286 -3 7996 8286 8284 -3 7997 8285 7999 -3 8287 7999 8285 -3 7998 8000 8288 -3 7998 8288 8286 -3 7999 8287 8001 -3 8289 8001 8287 -3 8000 8002 8290 -3 8000 8290 8288 -3 8001 8289 8003 -3 8291 8003 8289 -3 8002 8004 8292 -3 8002 8292 8290 -3 8003 8291 8005 -3 8293 8005 8291 -3 8004 8006 8294 -3 8004 8294 8292 -3 8005 8293 8007 -3 8295 8007 8293 -3 8006 8008 8296 -3 8006 8296 8294 -3 8007 8295 8009 -3 8297 8009 8295 -3 8008 8010 8298 -3 8008 8298 8296 -3 8009 8297 8011 -3 8299 8011 8297 -3 8010 8012 8300 -3 8010 8300 8298 -3 8011 8299 8013 -3 8301 8013 8299 -3 8012 8014 8302 -3 8012 8302 8300 -3 8013 8301 8015 -3 8303 8015 8301 -3 8014 8016 8304 -3 8014 8304 8302 -3 8015 8303 8017 -3 8305 8017 8303 -3 8016 8018 8306 -3 8016 8306 8304 -3 8017 8305 8019 -3 8307 8019 8305 -3 8018 8020 8308 -3 8018 8308 8306 -3 8019 8307 8021 -3 8309 8021 8307 -3 8020 8022 8310 -3 8020 8310 8308 -3 8021 8309 8023 -3 8311 8023 8309 -3 8022 8024 8312 -3 8022 8312 8310 -3 8023 8311 8025 -3 8313 8025 8311 -3 8024 8026 8314 -3 8024 8314 8312 -3 8025 8313 8027 -3 8315 8027 8313 -3 8026 8028 8316 -3 8026 8316 8314 -3 8027 8315 8029 -3 8317 8029 8315 -3 8028 8030 8318 -3 8028 8318 8316 -3 8029 8317 8031 -3 8319 8031 8317 -3 8030 8032 8320 -3 8030 8320 8318 -3 8031 8319 8033 -3 8321 8033 8319 -3 8032 8034 8322 -3 8032 8322 8320 -3 8033 8321 8035 -3 8323 8035 8321 -3 8034 8036 8324 -3 8034 8324 8322 -3 8035 8323 8037 -3 8325 8037 8323 -3 8036 8038 8326 -3 8036 8326 8324 -3 8037 8325 8039 -3 8327 8039 8325 -3 8038 8040 8328 -3 8038 8328 8326 -3 8039 8327 8041 -3 8329 8041 8327 -3 8040 8042 8330 -3 8040 8330 8328 -3 8041 8329 8043 -3 8331 8043 8329 -3 8042 8044 8332 -3 8042 8332 8330 -3 8043 8331 8045 -3 8333 8045 8331 -3 8044 8046 8334 -3 8044 8334 8332 -3 8045 8333 8047 -3 8335 8047 8333 -3 8046 8048 8336 -3 8046 8336 8334 -3 8047 8335 8049 -3 8337 8049 8335 -3 8048 8050 8336 -3 8338 8336 8050 -3 8049 8337 8051 -3 8339 8051 8337 -3 8050 8052 8338 -3 8340 8338 8052 -3 8051 8339 8341 -3 8051 8341 8053 -3 8052 8054 8340 -3 8342 8340 8054 -3 8053 8341 8343 -3 8053 8343 8055 -3 8054 8056 8342 -3 8344 8342 8056 -3 8055 8343 8345 -3 8055 8345 8057 -3 8056 8058 8344 -3 8346 8344 8058 -3 8057 8345 8347 -3 8057 8347 8059 -3 8058 8060 8346 -3 8348 8346 8060 -3 8059 8347 8349 -3 8059 8349 8061 -3 8060 8062 8348 -3 8350 8348 8062 -3 8061 8349 8351 -3 8061 8351 8063 -3 8062 8064 8350 -3 8352 8350 8064 -3 8063 8351 8353 -3 8063 8353 8065 -3 8064 8066 8352 -3 8354 8352 8066 -3 8065 8353 8355 -3 8065 8355 8067 -3 8066 8068 8354 -3 8356 8354 8068 -3 8067 8355 8357 -3 8067 8357 8069 -3 8068 8070 8356 -3 8358 8356 8070 -3 8069 8357 8359 -3 8069 8359 8071 -3 8070 8072 8358 -3 8360 8358 8072 -3 8071 8359 8361 -3 8071 8361 8073 -3 8072 8074 8360 -3 8362 8360 8074 -3 8073 8361 8363 -3 8073 8363 8075 -3 8074 8076 8362 -3 8364 8362 8076 -3 8075 8363 8365 -3 8075 8365 8077 -3 8076 8078 8364 -3 8366 8364 8078 -3 8077 8365 8367 -3 8077 8367 8079 -3 8078 8080 8366 -3 8368 8366 8080 -3 8079 8367 8369 -3 8079 8369 8081 -3 8080 8082 8368 -3 8370 8368 8082 -3 8081 8369 8371 -3 8081 8371 8083 -3 8082 8084 8370 -3 8372 8370 8084 -3 8083 8371 8373 -3 8083 8373 8085 -3 8084 8086 8372 -3 8374 8372 8086 -3 8085 8373 8375 -3 8085 8375 8087 -3 8086 8088 8374 -3 8376 8374 8088 -3 8087 8375 8377 -3 8087 8377 8089 -3 8088 8090 8376 -3 8378 8376 8090 -3 8089 8377 8379 -3 8089 8379 8091 -3 8090 8092 8378 -3 8380 8378 8092 -3 8091 8379 8381 -3 8091 8381 8093 -3 8092 8094 8380 -3 8382 8380 8094 -3 8093 8381 8383 -3 8093 8383 8095 -3 8094 8096 8382 -3 8384 8382 8096 -3 8095 8383 8387 -3 8095 8387 8099 -3 8096 8097 8384 -3 8385 8384 8097 -3 8097 8100 8385 -3 8388 8385 8100 -3 8098 8099 8386 -3 8387 8386 8099 -3 8098 8386 8391 -3 8098 8391 8103 -3 8100 8101 8388 -3 8389 8388 8101 -3 8101 8104 8389 -3 8392 8389 8104 -3 8102 8103 8390 -3 8391 8390 8103 -3 8102 8390 8460 -3 8102 8460 8172 -3 8104 8105 8393 -3 8104 8393 8392 -3 8105 8106 8394 -3 8105 8394 8393 -3 8106 8107 8395 -3 8106 8395 8394 -3 8107 8108 8396 -3 8107 8396 8395 -3 8108 8109 8397 -3 8108 8397 8396 -3 8109 8110 8398 -3 8109 8398 8397 -3 8110 8111 8399 -3 8110 8399 8398 -3 8111 8112 8400 -3 8111 8400 8399 -3 8112 8113 8401 -3 8112 8401 8400 -3 8113 8114 8402 -3 8113 8402 8401 -3 8114 8115 8403 -3 8114 8403 8402 -3 8115 8116 8404 -3 8115 8404 8403 -3 8116 8117 8405 -3 8116 8405 8404 -3 8117 8118 8406 -3 8117 8406 8405 -3 8118 8119 8407 -3 8118 8407 8406 -3 8119 8120 8408 -3 8119 8408 8407 -3 8120 8121 8409 -3 8120 8409 8408 -3 8121 8122 8410 -3 8121 8410 8409 -3 8122 8123 8411 -3 8122 8411 8410 -3 8123 8124 8412 -3 8123 8412 8411 -3 8124 8125 8413 -3 8124 8413 8412 -3 8125 8126 8414 -3 8125 8414 8413 -3 8126 8127 8415 -3 8126 8415 8414 -3 8127 8128 8416 -3 8127 8416 8415 -3 8128 8129 8417 -3 8128 8417 8416 -3 8129 8130 8418 -3 8129 8418 8417 -3 8130 8131 8419 -3 8130 8419 8418 -3 8131 8132 8420 -3 8131 8420 8419 -3 8132 8133 8421 -3 8132 8421 8420 -3 8133 8134 8422 -3 8133 8422 8421 -3 8134 8135 8423 -3 8134 8423 8422 -3 8135 8136 8424 -3 8135 8424 8423 -3 8136 8137 8425 -3 8136 8425 8424 -3 8137 8138 8426 -3 8137 8426 8425 -3 8138 8139 8427 -3 8138 8427 8426 -3 8139 8140 8428 -3 8139 8428 8427 -3 8140 8141 8429 -3 8140 8429 8428 -3 8141 8142 8430 -3 8141 8430 8429 -3 8142 8143 8431 -3 8142 8431 8430 -3 8143 8144 8432 -3 8143 8432 8431 -3 8144 8145 8433 -3 8144 8433 8432 -3 8145 8146 8434 -3 8145 8434 8433 -3 8146 8147 8435 -3 8146 8435 8434 -3 8147 8148 8436 -3 8147 8436 8435 -3 8148 8149 8437 -3 8148 8437 8436 -3 8149 8150 8438 -3 8149 8438 8437 -3 8150 8151 8439 -3 8150 8439 8438 -3 8151 8152 8440 -3 8151 8440 8439 -3 8152 8153 8441 -3 8152 8441 8440 -3 8153 8154 8442 -3 8153 8442 8441 -3 8154 8155 8443 -3 8154 8443 8442 -3 8155 8156 8444 -3 8155 8444 8443 -3 8156 8157 8445 -3 8156 8445 8444 -3 8157 8158 8446 -3 8157 8446 8445 -3 8158 8159 8447 -3 8158 8447 8446 -3 8159 8160 8447 -3 8448 8447 8160 -3 8160 8161 8448 -3 8449 8448 8161 -3 8161 8162 8449 -3 8450 8449 8162 -3 8162 8163 8450 -3 8451 8450 8163 -3 8163 8164 8451 -3 8452 8451 8164 -3 8164 8165 8452 -3 8453 8452 8165 -3 8165 8166 8453 -3 8454 8453 8166 -3 8166 8167 8454 -3 8455 8454 8167 -3 8167 8168 8455 -3 8456 8455 8168 -3 8168 8169 8456 -3 8457 8456 8169 -3 8169 8170 8457 -3 8458 8457 8170 -3 8170 8171 8458 -3 8459 8458 8171 -3 8171 8172 8459 -3 8460 8459 8172 -3 8173 8461 8462 -3 8173 8462 8174 -3 8173 8243 8461 -3 8531 8461 8243 -3 8174 8462 8463 -3 8174 8463 8175 -3 8175 8463 8464 -3 8175 8464 8176 -3 8176 8464 8465 -3 8176 8465 8177 -3 8177 8465 8466 -3 8177 8466 8178 -3 8178 8466 8467 -3 8178 8467 8179 -3 8179 8467 8468 -3 8179 8468 8180 -3 8180 8468 8469 -3 8180 8469 8181 -3 8181 8469 8470 -3 8181 8470 8182 -3 8182 8470 8471 -3 8182 8471 8183 -3 8183 8471 8472 -3 8183 8472 8184 -3 8184 8472 8473 -3 8184 8473 8185 -3 8185 8473 8474 -3 8185 8474 8186 -3 8186 8474 8475 -3 8186 8475 8187 -3 8187 8475 8476 -3 8187 8476 8188 -3 8188 8476 8477 -3 8188 8477 8189 -3 8189 8477 8478 -3 8189 8478 8190 -3 8190 8478 8479 -3 8190 8479 8191 -3 8191 8479 8480 -3 8191 8480 8192 -3 8192 8480 8481 -3 8192 8481 8193 -3 8193 8481 8482 -3 8193 8482 8194 -3 8194 8482 8483 -3 8194 8483 8195 -3 8195 8483 8484 -3 8195 8484 8196 -3 8196 8484 8485 -3 8196 8485 8197 -3 8197 8485 8486 -3 8197 8486 8198 -3 8198 8486 8487 -3 8198 8487 8199 -3 8199 8487 8488 -3 8199 8488 8200 -3 8200 8488 8489 -3 8200 8489 8201 -3 8201 8489 8490 -3 8201 8490 8202 -3 8202 8490 8491 -3 8202 8491 8203 -3 8203 8491 8492 -3 8203 8492 8204 -3 8204 8492 8493 -3 8204 8493 8205 -3 8205 8493 8494 -3 8205 8494 8206 -3 8206 8494 8495 -3 8206 8495 8207 -3 8207 8495 8496 -3 8207 8496 8208 -3 8208 8496 8497 -3 8208 8497 8209 -3 8209 8497 8498 -3 8209 8498 8210 -3 8210 8498 8499 -3 8210 8499 8211 -3 8211 8499 8500 -3 8211 8500 8212 -3 8212 8500 8501 -3 8212 8501 8213 -3 8213 8501 8502 -3 8213 8502 8214 -3 8214 8502 8503 -3 8214 8503 8215 -3 8215 8503 8504 -3 8215 8504 8216 -3 8216 8504 8505 -3 8216 8505 8217 -3 8217 8505 8506 -3 8217 8506 8218 -3 8218 8506 8507 -3 8218 8507 8219 -3 8219 8507 8220 -3 8508 8220 8507 -3 8220 8508 8221 -3 8509 8221 8508 -3 8221 8509 8222 -3 8510 8222 8509 -3 8222 8510 8223 -3 8511 8223 8510 -3 8223 8511 8224 -3 8512 8224 8511 -3 8224 8512 8225 -3 8513 8225 8512 -3 8225 8513 8226 -3 8514 8226 8513 -3 8226 8514 8227 -3 8515 8227 8514 -3 8227 8515 8228 -3 8516 8228 8515 -3 8228 8516 8229 -3 8517 8229 8516 -3 8229 8517 8230 -3 8518 8230 8517 -3 8230 8518 8231 -3 8519 8231 8518 -3 8231 8519 8232 -3 8520 8232 8519 -3 8232 8520 8233 -3 8521 8233 8520 -3 8233 8521 8234 -3 8522 8234 8521 -3 8234 8522 8235 -3 8523 8235 8522 -3 8235 8523 8236 -3 8524 8236 8523 -3 8236 8524 8237 -3 8525 8237 8524 -3 8237 8525 8238 -3 8526 8238 8525 -3 8238 8526 8239 -3 8527 8239 8526 -3 8239 8527 8240 -3 8528 8240 8527 -3 8240 8528 8241 -3 8529 8241 8528 -3 8241 8529 8244 -3 8532 8244 8529 -3 8242 8530 8243 -3 8531 8243 8530 -3 8242 8247 8535 -3 8242 8535 8530 -3 8244 8532 8245 -3 8533 8245 8532 -3 8245 8533 8248 -3 8536 8248 8533 -3 8246 8534 8247 -3 8535 8247 8534 -3 8246 8250 8538 -3 8246 8538 8534 -3 8248 8536 8249 -3 8537 8249 8536 -3 8249 8537 8251 -3 8539 8251 8537 -3 8250 8252 8540 -3 8250 8540 8538 -3 8251 8539 8253 -3 8541 8253 8539 -3 8252 8254 8542 -3 8252 8542 8540 -3 8253 8541 8255 -3 8543 8255 8541 -3 8254 8256 8544 -3 8254 8544 8542 -3 8255 8543 8257 -3 8545 8257 8543 -3 8256 8258 8546 -3 8256 8546 8544 -3 8257 8545 8259 -3 8547 8259 8545 -3 8258 8260 8548 -3 8258 8548 8546 -3 8259 8547 8261 -3 8549 8261 8547 -3 8260 8262 8550 -3 8260 8550 8548 -3 8261 8549 8263 -3 8551 8263 8549 -3 8262 8264 8552 -3 8262 8552 8550 -3 8263 8551 8265 -3 8553 8265 8551 -3 8264 8266 8554 -3 8264 8554 8552 -3 8265 8553 8267 -3 8555 8267 8553 -3 8266 8268 8556 -3 8266 8556 8554 -3 8267 8555 8269 -3 8557 8269 8555 -3 8268 8270 8558 -3 8268 8558 8556 -3 8269 8557 8271 -3 8559 8271 8557 -3 8270 8272 8558 -3 8560 8558 8272 -3 8271 8559 8273 -3 8561 8273 8559 -3 8272 8274 8560 -3 8562 8560 8274 -3 8273 8561 8275 -3 8563 8275 8561 -3 8274 8276 8562 -3 8564 8562 8276 -3 8275 8563 8565 -3 8275 8565 8277 -3 8276 8278 8564 -3 8566 8564 8278 -3 8277 8565 8567 -3 8277 8567 8279 -3 8278 8280 8566 -3 8568 8566 8280 -3 8279 8567 8569 -3 8279 8569 8281 -3 8280 8282 8568 -3 8570 8568 8282 -3 8281 8569 8571 -3 8281 8571 8283 -3 8282 8284 8570 -3 8572 8570 8284 -3 8283 8571 8573 -3 8283 8573 8285 -3 8284 8286 8572 -3 8574 8572 8286 -3 8285 8573 8575 -3 8285 8575 8287 -3 8286 8288 8574 -3 8576 8574 8288 -3 8287 8575 8577 -3 8287 8577 8289 -3 8288 8290 8576 -3 8578 8576 8290 -3 8289 8577 8579 -3 8289 8579 8291 -3 8290 8292 8578 -3 8580 8578 8292 -3 8291 8579 8581 -3 8291 8581 8293 -3 8292 8294 8580 -3 8582 8580 8294 -3 8293 8581 8583 -3 8293 8583 8295 -3 8294 8296 8582 -3 8584 8582 8296 -3 8295 8583 8585 -3 8295 8585 8297 -3 8296 8298 8584 -3 8586 8584 8298 -3 8297 8585 8587 -3 8297 8587 8299 -3 8298 8300 8586 -3 8588 8586 8300 -3 8299 8587 8589 -3 8299 8589 8301 -3 8300 8302 8588 -3 8590 8588 8302 -3 8301 8589 8591 -3 8301 8591 8303 -3 8302 8304 8590 -3 8592 8590 8304 -3 8303 8591 8593 -3 8303 8593 8305 -3 8304 8306 8592 -3 8594 8592 8306 -3 8305 8593 8595 -3 8305 8595 8307 -3 8306 8308 8594 -3 8596 8594 8308 -3 8307 8595 8597 -3 8307 8597 8309 -3 8308 8310 8596 -3 8598 8596 8310 -3 8309 8597 8599 -3 8309 8599 8311 -3 8310 8312 8598 -3 8600 8598 8312 -3 8311 8599 8601 -3 8311 8601 8313 -3 8312 8314 8600 -3 8602 8600 8314 -3 8313 8601 8603 -3 8313 8603 8315 -3 8314 8316 8602 -3 8604 8602 8316 -3 8315 8603 8605 -3 8315 8605 8317 -3 8316 8318 8604 -3 8606 8604 8318 -3 8317 8605 8607 -3 8317 8607 8319 -3 8318 8320 8606 -3 8608 8606 8320 -3 8319 8607 8609 -3 8319 8609 8321 -3 8320 8322 8608 -3 8610 8608 8322 -3 8321 8609 8611 -3 8321 8611 8323 -3 8322 8324 8610 -3 8612 8610 8324 -3 8323 8611 8613 -3 8323 8613 8325 -3 8324 8326 8612 -3 8614 8612 8326 -3 8325 8613 8615 -3 8325 8615 8327 -3 8326 8328 8616 -3 8326 8616 8614 -3 8327 8615 8617 -3 8327 8617 8329 -3 8328 8330 8618 -3 8328 8618 8616 -3 8329 8617 8619 -3 8329 8619 8331 -3 8330 8332 8620 -3 8330 8620 8618 -3 8331 8619 8621 -3 8331 8621 8333 -3 8332 8334 8622 -3 8332 8622 8620 -3 8333 8621 8335 -3 8623 8335 8621 -3 8334 8336 8624 -3 8334 8624 8622 -3 8335 8623 8337 -3 8625 8337 8623 -3 8336 8338 8626 -3 8336 8626 8624 -3 8337 8625 8339 -3 8627 8339 8625 -3 8338 8340 8628 -3 8338 8628 8626 -3 8339 8627 8341 -3 8629 8341 8627 -3 8340 8342 8630 -3 8340 8630 8628 -3 8341 8629 8343 -3 8631 8343 8629 -3 8342 8344 8632 -3 8342 8632 8630 -3 8343 8631 8345 -3 8633 8345 8631 -3 8344 8346 8634 -3 8344 8634 8632 -3 8345 8633 8347 -3 8635 8347 8633 -3 8346 8348 8636 -3 8346 8636 8634 -3 8347 8635 8349 -3 8637 8349 8635 -3 8348 8350 8638 -3 8348 8638 8636 -3 8349 8637 8351 -3 8639 8351 8637 -3 8350 8352 8640 -3 8350 8640 8638 -3 8351 8639 8353 -3 8641 8353 8639 -3 8352 8354 8642 -3 8352 8642 8640 -3 8353 8641 8355 -3 8643 8355 8641 -3 8354 8356 8644 -3 8354 8644 8642 -3 8355 8643 8357 -3 8645 8357 8643 -3 8356 8358 8646 -3 8356 8646 8644 -3 8357 8645 8359 -3 8647 8359 8645 -3 8358 8360 8648 -3 8358 8648 8646 -3 8359 8647 8361 -3 8649 8361 8647 -3 8360 8362 8650 -3 8360 8650 8648 -3 8361 8649 8363 -3 8651 8363 8649 -3 8362 8364 8652 -3 8362 8652 8650 -3 8363 8651 8365 -3 8653 8365 8651 -3 8364 8366 8654 -3 8364 8654 8652 -3 8365 8653 8367 -3 8655 8367 8653 -3 8366 8368 8656 -3 8366 8656 8654 -3 8367 8655 8369 -3 8657 8369 8655 -3 8368 8370 8658 -3 8368 8658 8656 -3 8369 8657 8371 -3 8659 8371 8657 -3 8370 8372 8660 -3 8370 8660 8658 -3 8371 8659 8373 -3 8661 8373 8659 -3 8372 8374 8662 -3 8372 8662 8660 -3 8373 8661 8375 -3 8663 8375 8661 -3 8374 8376 8664 -3 8374 8664 8662 -3 8375 8663 8377 -3 8665 8377 8663 -3 8376 8378 8666 -3 8376 8666 8664 -3 8377 8665 8379 -3 8667 8379 8665 -3 8378 8380 8668 -3 8378 8668 8666 -3 8379 8667 8381 -3 8669 8381 8667 -3 8380 8382 8670 -3 8380 8670 8668 -3 8381 8669 8383 -3 8671 8383 8669 -3 8382 8384 8672 -3 8382 8672 8670 -3 8383 8671 8387 -3 8675 8387 8671 -3 8384 8385 8672 -3 8673 8672 8385 -3 8385 8388 8673 -3 8676 8673 8388 -3 8386 8387 8674 -3 8675 8674 8387 -3 8386 8674 8391 -3 8679 8391 8674 -3 8388 8389 8676 -3 8677 8676 8389 -3 8389 8392 8677 -3 8680 8677 8392 -3 8390 8391 8678 -3 8679 8678 8391 -3 8390 8678 8748 -3 8390 8748 8460 -3 8392 8393 8680 -3 8681 8680 8393 -3 8393 8394 8681 -3 8682 8681 8394 -3 8394 8395 8682 -3 8683 8682 8395 -3 8395 8396 8683 -3 8684 8683 8396 -3 8396 8397 8684 -3 8685 8684 8397 -3 8397 8398 8685 -3 8686 8685 8398 -3 8398 8399 8686 -3 8687 8686 8399 -3 8399 8400 8687 -3 8688 8687 8400 -3 8400 8401 8688 -3 8689 8688 8401 -3 8401 8402 8689 -3 8690 8689 8402 -3 8402 8403 8690 -3 8691 8690 8403 -3 8403 8404 8691 -3 8692 8691 8404 -3 8404 8405 8692 -3 8693 8692 8405 -3 8405 8406 8693 -3 8694 8693 8406 -3 8406 8407 8694 -3 8695 8694 8407 -3 8407 8408 8695 -3 8696 8695 8408 -3 8408 8409 8696 -3 8697 8696 8409 -3 8409 8410 8697 -3 8698 8697 8410 -3 8410 8411 8698 -3 8699 8698 8411 -3 8411 8412 8699 -3 8700 8699 8412 -3 8412 8413 8700 -3 8701 8700 8413 -3 8413 8414 8701 -3 8702 8701 8414 -3 8414 8415 8702 -3 8703 8702 8415 -3 8415 8416 8703 -3 8704 8703 8416 -3 8416 8417 8704 -3 8705 8704 8417 -3 8417 8418 8705 -3 8706 8705 8418 -3 8418 8419 8706 -3 8707 8706 8419 -3 8419 8420 8707 -3 8708 8707 8420 -3 8420 8421 8708 -3 8709 8708 8421 -3 8421 8422 8709 -3 8710 8709 8422 -3 8422 8423 8710 -3 8711 8710 8423 -3 8423 8424 8711 -3 8712 8711 8424 -3 8424 8425 8712 -3 8713 8712 8425 -3 8425 8426 8713 -3 8714 8713 8426 -3 8426 8427 8714 -3 8715 8714 8427 -3 8427 8428 8715 -3 8716 8715 8428 -3 8428 8429 8716 -3 8717 8716 8429 -3 8429 8430 8717 -3 8718 8717 8430 -3 8430 8431 8718 -3 8719 8718 8431 -3 8431 8432 8719 -3 8720 8719 8432 -3 8432 8433 8720 -3 8721 8720 8433 -3 8433 8434 8721 -3 8722 8721 8434 -3 8434 8435 8722 -3 8723 8722 8435 -3 8435 8436 8723 -3 8724 8723 8436 -3 8436 8437 8724 -3 8725 8724 8437 -3 8437 8438 8725 -3 8726 8725 8438 -3 8438 8439 8726 -3 8727 8726 8439 -3 8439 8440 8727 -3 8728 8727 8440 -3 8440 8441 8729 -3 8440 8729 8728 -3 8441 8442 8730 -3 8441 8730 8729 -3 8442 8443 8731 -3 8442 8731 8730 -3 8443 8444 8732 -3 8443 8732 8731 -3 8444 8445 8733 -3 8444 8733 8732 -3 8445 8446 8734 -3 8445 8734 8733 -3 8446 8447 8735 -3 8446 8735 8734 -3 8447 8448 8736 -3 8447 8736 8735 -3 8448 8449 8737 -3 8448 8737 8736 -3 8449 8450 8738 -3 8449 8738 8737 -3 8450 8451 8739 -3 8450 8739 8738 -3 8451 8452 8740 -3 8451 8740 8739 -3 8452 8453 8741 -3 8452 8741 8740 -3 8453 8454 8742 -3 8453 8742 8741 -3 8454 8455 8743 -3 8454 8743 8742 -3 8455 8456 8744 -3 8455 8744 8743 -3 8456 8457 8745 -3 8456 8745 8744 -3 8457 8458 8746 -3 8457 8746 8745 -3 8458 8459 8747 -3 8458 8747 8746 -3 8459 8460 8748 -3 8459 8748 8747 -3 8461 8749 8462 -3 8750 8462 8749 -3 8461 8531 8749 -3 8819 8749 8531 -3 8462 8750 8463 -3 8751 8463 8750 -3 8463 8751 8464 -3 8752 8464 8751 -3 8464 8752 8465 -3 8753 8465 8752 -3 8465 8753 8466 -3 8754 8466 8753 -3 8466 8754 8467 -3 8755 8467 8754 -3 8467 8755 8468 -3 8756 8468 8755 -3 8468 8756 8469 -3 8757 8469 8756 -3 8469 8757 8470 -3 8758 8470 8757 -3 8470 8758 8471 -3 8759 8471 8758 -3 8471 8759 8472 -3 8760 8472 8759 -3 8472 8760 8473 -3 8761 8473 8760 -3 8473 8761 8474 -3 8762 8474 8761 -3 8474 8762 8475 -3 8763 8475 8762 -3 8475 8763 8476 -3 8764 8476 8763 -3 8476 8764 8477 -3 8765 8477 8764 -3 8477 8765 8478 -3 8766 8478 8765 -3 8478 8766 8479 -3 8767 8479 8766 -3 8479 8767 8480 -3 8768 8480 8767 -3 8480 8768 8481 -3 8769 8481 8768 -3 8481 8769 8482 -3 8770 8482 8769 -3 8482 8770 8483 -3 8771 8483 8770 -3 8483 8771 8484 -3 8772 8484 8771 -3 8484 8772 8485 -3 8773 8485 8772 -3 8485 8773 8486 -3 8774 8486 8773 -3 8486 8774 8487 -3 8775 8487 8774 -3 8487 8775 8488 -3 8776 8488 8775 -3 8488 8776 8489 -3 8777 8489 8776 -3 8489 8777 8490 -3 8778 8490 8777 -3 8490 8778 8491 -3 8779 8491 8778 -3 8491 8779 8492 -3 8780 8492 8779 -3 8492 8780 8493 -3 8781 8493 8780 -3 8493 8781 8494 -3 8782 8494 8781 -3 8494 8782 8495 -3 8783 8495 8782 -3 8495 8783 8496 -3 8784 8496 8783 -3 8496 8784 8497 -3 8785 8497 8784 -3 8497 8785 8498 -3 8786 8498 8785 -3 8498 8786 8499 -3 8787 8499 8786 -3 8499 8787 8500 -3 8788 8500 8787 -3 8500 8788 8501 -3 8789 8501 8788 -3 8501 8789 8502 -3 8790 8502 8789 -3 8502 8790 8503 -3 8791 8503 8790 -3 8503 8791 8792 -3 8503 8792 8504 -3 8504 8792 8793 -3 8504 8793 8505 -3 8505 8793 8794 -3 8505 8794 8506 -3 8506 8794 8795 -3 8506 8795 8507 -3 8507 8795 8796 -3 8507 8796 8508 -3 8508 8796 8797 -3 8508 8797 8509 -3 8509 8797 8798 -3 8509 8798 8510 -3 8510 8798 8799 -3 8510 8799 8511 -3 8511 8799 8800 -3 8511 8800 8512 -3 8512 8800 8801 -3 8512 8801 8513 -3 8513 8801 8802 -3 8513 8802 8514 -3 8514 8802 8803 -3 8514 8803 8515 -3 8515 8803 8804 -3 8515 8804 8516 -3 8516 8804 8805 -3 8516 8805 8517 -3 8517 8805 8806 -3 8517 8806 8518 -3 8518 8806 8807 -3 8518 8807 8519 -3 8519 8807 8808 -3 8519 8808 8520 -3 8520 8808 8809 -3 8520 8809 8521 -3 8521 8809 8810 -3 8521 8810 8522 -3 8522 8810 8811 -3 8522 8811 8523 -3 8523 8811 8812 -3 8523 8812 8524 -3 8524 8812 8813 -3 8524 8813 8525 -3 8525 8813 8814 -3 8525 8814 8526 -3 8526 8814 8815 -3 8526 8815 8527 -3 8527 8815 8816 -3 8527 8816 8528 -3 8528 8816 8817 -3 8528 8817 8529 -3 8529 8817 8820 -3 8529 8820 8532 -3 8530 8818 8819 -3 8530 8819 8531 -3 8530 8535 8818 -3 8823 8818 8535 -3 8532 8820 8821 -3 8532 8821 8533 -3 8533 8821 8824 -3 8533 8824 8536 -3 8534 8822 8823 -3 8534 8823 8535 -3 8534 8538 8822 -3 8826 8822 8538 -3 8536 8824 8825 -3 8536 8825 8537 -3 8537 8825 8827 -3 8537 8827 8539 -3 8538 8540 8826 -3 8828 8826 8540 -3 8539 8827 8829 -3 8539 8829 8541 -3 8540 8542 8828 -3 8830 8828 8542 -3 8541 8829 8831 -3 8541 8831 8543 -3 8542 8544 8830 -3 8832 8830 8544 -3 8543 8831 8833 -3 8543 8833 8545 -3 8544 8546 8832 -3 8834 8832 8546 -3 8545 8833 8835 -3 8545 8835 8547 -3 8546 8548 8834 -3 8836 8834 8548 -3 8547 8835 8837 -3 8547 8837 8549 -3 8548 8550 8836 -3 8838 8836 8550 -3 8549 8837 8839 -3 8549 8839 8551 -3 8550 8552 8838 -3 8840 8838 8552 -3 8551 8839 8841 -3 8551 8841 8553 -3 8552 8554 8840 -3 8842 8840 8554 -3 8553 8841 8843 -3 8553 8843 8555 -3 8554 8556 8844 -3 8554 8844 8842 -3 8555 8843 8845 -3 8555 8845 8557 -3 8556 8558 8846 -3 8556 8846 8844 -3 8557 8845 8847 -3 8557 8847 8559 -3 8558 8560 8848 -3 8558 8848 8846 -3 8559 8847 8849 -3 8559 8849 8561 -3 8560 8562 8850 -3 8560 8850 8848 -3 8561 8849 8563 -3 8851 8563 8849 -3 8562 8564 8852 -3 8562 8852 8850 -3 8563 8851 8565 -3 8853 8565 8851 -3 8564 8566 8854 -3 8564 8854 8852 -3 8565 8853 8567 -3 8855 8567 8853 -3 8566 8568 8856 -3 8566 8856 8854 -3 8567 8855 8569 -3 8857 8569 8855 -3 8568 8570 8858 -3 8568 8858 8856 -3 8569 8857 8571 -3 8859 8571 8857 -3 8570 8572 8860 -3 8570 8860 8858 -3 8571 8859 8573 -3 8861 8573 8859 -3 8572 8574 8862 -3 8572 8862 8860 -3 8573 8861 8575 -3 8863 8575 8861 -3 8574 8576 8864 -3 8574 8864 8862 -3 8575 8863 8577 -3 8865 8577 8863 -3 8576 8578 8866 -3 8576 8866 8864 -3 8577 8865 8579 -3 8867 8579 8865 -3 8578 8580 8868 -3 8578 8868 8866 -3 8579 8867 8581 -3 8869 8581 8867 -3 8580 8582 8870 -3 8580 8870 8868 -3 8581 8869 8583 -3 8871 8583 8869 -3 8582 8584 8872 -3 8582 8872 8870 -3 8583 8871 8585 -3 8873 8585 8871 -3 8584 8586 8874 -3 8584 8874 8872 -3 8585 8873 8587 -3 8875 8587 8873 -3 8586 8588 8876 -3 8586 8876 8874 -3 8587 8875 8589 -3 8877 8589 8875 -3 8588 8590 8878 -3 8588 8878 8876 -3 8589 8877 8591 -3 8879 8591 8877 -3 8590 8592 8880 -3 8590 8880 8878 -3 8591 8879 8593 -3 8881 8593 8879 -3 8592 8594 8882 -3 8592 8882 8880 -3 8593 8881 8595 -3 8883 8595 8881 -3 8594 8596 8884 -3 8594 8884 8882 -3 8595 8883 8597 -3 8885 8597 8883 -3 8596 8598 8886 -3 8596 8886 8884 -3 8597 8885 8599 -3 8887 8599 8885 -3 8598 8600 8888 -3 8598 8888 8886 -3 8599 8887 8601 -3 8889 8601 8887 -3 8600 8602 8890 -3 8600 8890 8888 -3 8601 8889 8603 -3 8891 8603 8889 -3 8602 8604 8892 -3 8602 8892 8890 -3 8603 8891 8605 -3 8893 8605 8891 -3 8604 8606 8894 -3 8604 8894 8892 -3 8605 8893 8607 -3 8895 8607 8893 -3 8606 8608 8896 -3 8606 8896 8894 -3 8607 8895 8609 -3 8897 8609 8895 -3 8608 8610 8898 -3 8608 8898 8896 -3 8609 8897 8611 -3 8899 8611 8897 -3 8610 8612 8898 -3 8900 8898 8612 -3 8611 8899 8613 -3 8901 8613 8899 -3 8612 8614 8900 -3 8902 8900 8614 -3 8613 8901 8615 -3 8903 8615 8901 -3 8614 8616 8902 -3 8904 8902 8616 -3 8615 8903 8617 -3 8905 8617 8903 -3 8616 8618 8904 -3 8906 8904 8618 -3 8617 8905 8619 -3 8907 8619 8905 -3 8618 8620 8906 -3 8908 8906 8620 -3 8619 8907 8909 -3 8619 8909 8621 -3 8620 8622 8908 -3 8910 8908 8622 -3 8621 8909 8911 -3 8621 8911 8623 -3 8622 8624 8910 -3 8912 8910 8624 -3 8623 8911 8913 -3 8623 8913 8625 -3 8624 8626 8912 -3 8914 8912 8626 -3 8625 8913 8915 -3 8625 8915 8627 -3 8626 8628 8914 -3 8916 8914 8628 -3 8627 8915 8917 -3 8627 8917 8629 -3 8628 8630 8916 -3 8918 8916 8630 -3 8629 8917 8919 -3 8629 8919 8631 -3 8630 8632 8918 -3 8920 8918 8632 -3 8631 8919 8921 -3 8631 8921 8633 -3 8632 8634 8920 -3 8922 8920 8634 -3 8633 8921 8923 -3 8633 8923 8635 -3 8634 8636 8922 -3 8924 8922 8636 -3 8635 8923 8925 -3 8635 8925 8637 -3 8636 8638 8924 -3 8926 8924 8638 -3 8637 8925 8927 -3 8637 8927 8639 -3 8638 8640 8926 -3 8928 8926 8640 -3 8639 8927 8929 -3 8639 8929 8641 -3 8640 8642 8928 -3 8930 8928 8642 -3 8641 8929 8931 -3 8641 8931 8643 -3 8642 8644 8930 -3 8932 8930 8644 -3 8643 8931 8933 -3 8643 8933 8645 -3 8644 8646 8932 -3 8934 8932 8646 -3 8645 8933 8935 -3 8645 8935 8647 -3 8646 8648 8934 -3 8936 8934 8648 -3 8647 8935 8937 -3 8647 8937 8649 -3 8648 8650 8936 -3 8938 8936 8650 -3 8649 8937 8939 -3 8649 8939 8651 -3 8650 8652 8938 -3 8940 8938 8652 -3 8651 8939 8941 -3 8651 8941 8653 -3 8652 8654 8940 -3 8942 8940 8654 -3 8653 8941 8943 -3 8653 8943 8655 -3 8654 8656 8942 -3 8944 8942 8656 -3 8655 8943 8945 -3 8655 8945 8657 -3 8656 8658 8944 -3 8946 8944 8658 -3 8657 8945 8947 -3 8657 8947 8659 -3 8658 8660 8946 -3 8948 8946 8660 -3 8659 8947 8949 -3 8659 8949 8661 -3 8660 8662 8948 -3 8950 8948 8662 -3 8661 8949 8951 -3 8661 8951 8663 -3 8662 8664 8950 -3 8952 8950 8664 -3 8663 8951 8953 -3 8663 8953 8665 -3 8664 8666 8952 -3 8954 8952 8666 -3 8665 8953 8955 -3 8665 8955 8667 -3 8666 8668 8954 -3 8956 8954 8668 -3 8667 8955 8957 -3 8667 8957 8669 -3 8668 8670 8958 -3 8668 8958 8956 -3 8669 8957 8959 -3 8669 8959 8671 -3 8670 8672 8960 -3 8670 8960 8958 -3 8671 8959 8963 -3 8671 8963 8675 -3 8672 8673 8961 -3 8672 8961 8960 -3 8673 8676 8964 -3 8673 8964 8961 -3 8674 8675 8963 -3 8674 8963 8962 -3 8674 8962 8967 -3 8674 8967 8679 -3 8676 8677 8965 -3 8676 8965 8964 -3 8677 8680 8968 -3 8677 8968 8965 -3 8678 8679 8967 -3 8678 8967 8966 -3 8678 8966 8748 -3 9036 8748 8966 -3 8680 8681 8969 -3 8680 8969 8968 -3 8681 8682 8970 -3 8681 8970 8969 -3 8682 8683 8971 -3 8682 8971 8970 -3 8683 8684 8972 -3 8683 8972 8971 -3 8684 8685 8973 -3 8684 8973 8972 -3 8685 8686 8974 -3 8685 8974 8973 -3 8686 8687 8975 -3 8686 8975 8974 -3 8687 8688 8976 -3 8687 8976 8975 -3 8688 8689 8977 -3 8688 8977 8976 -3 8689 8690 8978 -3 8689 8978 8977 -3 8690 8691 8979 -3 8690 8979 8978 -3 8691 8692 8980 -3 8691 8980 8979 -3 8692 8693 8981 -3 8692 8981 8980 -3 8693 8694 8982 -3 8693 8982 8981 -3 8694 8695 8983 -3 8694 8983 8982 -3 8695 8696 8984 -3 8695 8984 8983 -3 8696 8697 8985 -3 8696 8985 8984 -3 8697 8698 8986 -3 8697 8986 8985 -3 8698 8699 8987 -3 8698 8987 8986 -3 8699 8700 8988 -3 8699 8988 8987 -3 8700 8701 8989 -3 8700 8989 8988 -3 8701 8702 8990 -3 8701 8990 8989 -3 8702 8703 8991 -3 8702 8991 8990 -3 8703 8704 8992 -3 8703 8992 8991 -3 8704 8705 8993 -3 8704 8993 8992 -3 8705 8706 8994 -3 8705 8994 8993 -3 8706 8707 8995 -3 8706 8995 8994 -3 8707 8708 8996 -3 8707 8996 8995 -3 8708 8709 8997 -3 8708 8997 8996 -3 8709 8710 8998 -3 8709 8998 8997 -3 8710 8711 8999 -3 8710 8999 8998 -3 8711 8712 9000 -3 8711 9000 8999 -3 8712 8713 9001 -3 8712 9001 9000 -3 8713 8714 9002 -3 8713 9002 9001 -3 8714 8715 9003 -3 8714 9003 9002 -3 8715 8716 9004 -3 8715 9004 9003 -3 8716 8717 9005 -3 8716 9005 9004 -3 8717 8718 9006 -3 8717 9006 9005 -3 8718 8719 9007 -3 8718 9007 9006 -3 8719 8720 9008 -3 8719 9008 9007 -3 8720 8721 9009 -3 8720 9009 9008 -3 8721 8722 9010 -3 8721 9010 9009 -3 8722 8723 9011 -3 8722 9011 9010 -3 8723 8724 9012 -3 8723 9012 9011 -3 8724 8725 9013 -3 8724 9013 9012 -3 8725 8726 9013 -3 9014 9013 8726 -3 8726 8727 9014 -3 9015 9014 8727 -3 8727 8728 9015 -3 9016 9015 8728 -3 8728 8729 9016 -3 9017 9016 8729 -3 8729 8730 9017 -3 9018 9017 8730 -3 8730 8731 9018 -3 9019 9018 8731 -3 8731 8732 9019 -3 9020 9019 8732 -3 8732 8733 9020 -3 9021 9020 8733 -3 8733 8734 9021 -3 9022 9021 8734 -3 8734 8735 9022 -3 9023 9022 8735 -3 8735 8736 9023 -3 9024 9023 8736 -3 8736 8737 9024 -3 9025 9024 8737 -3 8737 8738 9025 -3 9026 9025 8738 -3 8738 8739 9026 -3 9027 9026 8739 -3 8739 8740 9027 -3 9028 9027 8740 -3 8740 8741 9028 -3 9029 9028 8741 -3 8741 8742 9029 -3 9030 9029 8742 -3 8742 8743 9030 -3 9031 9030 8743 -3 8743 8744 9031 -3 9032 9031 8744 -3 8744 8745 9032 -3 9033 9032 8745 -3 8745 8746 9033 -3 9034 9033 8746 -3 8746 8747 9034 -3 9035 9034 8747 -3 8747 8748 9035 -3 9036 9035 8748 -3 8749 9037 9038 -3 8749 9038 8750 -3 8749 8819 9107 -3 8749 9107 9037 -3 8750 9038 9039 -3 8750 9039 8751 -3 8751 9039 9040 -3 8751 9040 8752 -3 8752 9040 9041 -3 8752 9041 8753 -3 8753 9041 9042 -3 8753 9042 8754 -3 8754 9042 9043 -3 8754 9043 8755 -3 8755 9043 9044 -3 8755 9044 8756 -3 8756 9044 9045 -3 8756 9045 8757 -3 8757 9045 9046 -3 8757 9046 8758 -3 8758 9046 9047 -3 8758 9047 8759 -3 8759 9047 9048 -3 8759 9048 8760 -3 8760 9048 9049 -3 8760 9049 8761 -3 8761 9049 9050 -3 8761 9050 8762 -3 8762 9050 9051 -3 8762 9051 8763 -3 8763 9051 9052 -3 8763 9052 8764 -3 8764 9052 9053 -3 8764 9053 8765 -3 8765 9053 9054 -3 8765 9054 8766 -3 8766 9054 9055 -3 8766 9055 8767 -3 8767 9055 9056 -3 8767 9056 8768 -3 8768 9056 9057 -3 8768 9057 8769 -3 8769 9057 9058 -3 8769 9058 8770 -3 8770 9058 9059 -3 8770 9059 8771 -3 8771 9059 9060 -3 8771 9060 8772 -3 8772 9060 9061 -3 8772 9061 8773 -3 8773 9061 9062 -3 8773 9062 8774 -3 8774 9062 9063 -3 8774 9063 8775 -3 8775 9063 9064 -3 8775 9064 8776 -3 8776 9064 9065 -3 8776 9065 8777 -3 8777 9065 9066 -3 8777 9066 8778 -3 8778 9066 9067 -3 8778 9067 8779 -3 8779 9067 9068 -3 8779 9068 8780 -3 8780 9068 9069 -3 8780 9069 8781 -3 8781 9069 9070 -3 8781 9070 8782 -3 8782 9070 9071 -3 8782 9071 8783 -3 8783 9071 9072 -3 8783 9072 8784 -3 8784 9072 9073 -3 8784 9073 8785 -3 8785 9073 9074 -3 8785 9074 8786 -3 8786 9074 9075 -3 8786 9075 8787 -3 8787 9075 9076 -3 8787 9076 8788 -3 8788 9076 9077 -3 8788 9077 8789 -3 8789 9077 9078 -3 8789 9078 8790 -3 8790 9078 9079 -3 8790 9079 8791 -3 8791 9079 9080 -3 8791 9080 8792 -3 8792 9080 8793 -3 9081 8793 9080 -3 8793 9081 8794 -3 9082 8794 9081 -3 8794 9082 8795 -3 9083 8795 9082 -3 8795 9083 8796 -3 9084 8796 9083 -3 8796 9084 8797 -3 9085 8797 9084 -3 8797 9085 8798 -3 9086 8798 9085 -3 8798 9086 8799 -3 9087 8799 9086 -3 8799 9087 8800 -3 9088 8800 9087 -3 8800 9088 8801 -3 9089 8801 9088 -3 8801 9089 8802 -3 9090 8802 9089 -3 8802 9090 8803 -3 9091 8803 9090 -3 8803 9091 8804 -3 9092 8804 9091 -3 8804 9092 8805 -3 9093 8805 9092 -3 8805 9093 8806 -3 9094 8806 9093 -3 8806 9094 8807 -3 9095 8807 9094 -3 8807 9095 8808 -3 9096 8808 9095 -3 8808 9096 8809 -3 9097 8809 9096 -3 8809 9097 8810 -3 9098 8810 9097 -3 8810 9098 8811 -3 9099 8811 9098 -3 8811 9099 8812 -3 9100 8812 9099 -3 8812 9100 8813 -3 9101 8813 9100 -3 8813 9101 8814 -3 9102 8814 9101 -3 8814 9102 8815 -3 9103 8815 9102 -3 8815 9103 8816 -3 9104 8816 9103 -3 8816 9104 8817 -3 9105 8817 9104 -3 8817 9105 8820 -3 9108 8820 9105 -3 8818 9106 8819 -3 9107 8819 9106 -3 8818 8823 9111 -3 8818 9111 9106 -3 8820 9108 8821 -3 9109 8821 9108 -3 8821 9109 8824 -3 9112 8824 9109 -3 8822 9110 8823 -3 9111 8823 9110 -3 8822 8826 9114 -3 8822 9114 9110 -3 8824 9112 8825 -3 9113 8825 9112 -3 8825 9113 8827 -3 9115 8827 9113 -3 8826 8828 9116 -3 8826 9116 9114 -3 8827 9115 8829 -3 9117 8829 9115 -3 8828 8830 9118 -3 8828 9118 9116 -3 8829 9117 8831 -3 9119 8831 9117 -3 8830 8832 9120 -3 8830 9120 9118 -3 8831 9119 8833 -3 9121 8833 9119 -3 8832 8834 9122 -3 8832 9122 9120 -3 8833 9121 8835 -3 9123 8835 9121 -3 8834 8836 9124 -3 8834 9124 9122 -3 8835 9123 8837 -3 9125 8837 9123 -3 8836 8838 9126 -3 8836 9126 9124 -3 8837 9125 8839 -3 9127 8839 9125 -3 8838 8840 9128 -3 8838 9128 9126 -3 8839 9127 8841 -3 9129 8841 9127 -3 8840 8842 9128 -3 9130 9128 8842 -3 8841 9129 8843 -3 9131 8843 9129 -3 8842 8844 9130 -3 9132 9130 8844 -3 8843 9131 8845 -3 9133 8845 9131 -3 8844 8846 9132 -3 9134 9132 8846 -3 8845 9133 8847 -3 9135 8847 9133 -3 8846 8848 9134 -3 9136 9134 8848 -3 8847 9135 8849 -3 9137 8849 9135 -3 8848 8850 9136 -3 9138 9136 8850 -3 8849 9137 8851 -3 9139 8851 9137 -3 8850 8852 9138 -3 9140 9138 8852 -3 8851 9139 9141 -3 8851 9141 8853 -3 8852 8854 9140 -3 9142 9140 8854 -3 8853 9141 9143 -3 8853 9143 8855 -3 8854 8856 9142 -3 9144 9142 8856 -3 8855 9143 9145 -3 8855 9145 8857 -3 8856 8858 9144 -3 9146 9144 8858 -3 8857 9145 9147 -3 8857 9147 8859 -3 8858 8860 9146 -3 9148 9146 8860 -3 8859 9147 9149 -3 8859 9149 8861 -3 8860 8862 9148 -3 9150 9148 8862 -3 8861 9149 9151 -3 8861 9151 8863 -3 8862 8864 9150 -3 9152 9150 8864 -3 8863 9151 9153 -3 8863 9153 8865 -3 8864 8866 9152 -3 9154 9152 8866 -3 8865 9153 9155 -3 8865 9155 8867 -3 8866 8868 9154 -3 9156 9154 8868 -3 8867 9155 9157 -3 8867 9157 8869 -3 8868 8870 9156 -3 9158 9156 8870 -3 8869 9157 9159 -3 8869 9159 8871 -3 8870 8872 9158 -3 9160 9158 8872 -3 8871 9159 9161 -3 8871 9161 8873 -3 8872 8874 9160 -3 9162 9160 8874 -3 8873 9161 9163 -3 8873 9163 8875 -3 8874 8876 9162 -3 9164 9162 8876 -3 8875 9163 9165 -3 8875 9165 8877 -3 8876 8878 9164 -3 9166 9164 8878 -3 8877 9165 9167 -3 8877 9167 8879 -3 8878 8880 9166 -3 9168 9166 8880 -3 8879 9167 9169 -3 8879 9169 8881 -3 8880 8882 9168 -3 9170 9168 8882 -3 8881 9169 9171 -3 8881 9171 8883 -3 8882 8884 9170 -3 9172 9170 8884 -3 8883 9171 9173 -3 8883 9173 8885 -3 8884 8886 9172 -3 9174 9172 8886 -3 8885 9173 9175 -3 8885 9175 8887 -3 8886 8888 9174 -3 9176 9174 8888 -3 8887 9175 9177 -3 8887 9177 8889 -3 8888 8890 9176 -3 9178 9176 8890 -3 8889 9177 9179 -3 8889 9179 8891 -3 8890 8892 9178 -3 9180 9178 8892 -3 8891 9179 9181 -3 8891 9181 8893 -3 8892 8894 9180 -3 9182 9180 8894 -3 8893 9181 9183 -3 8893 9183 8895 -3 8894 8896 9182 -3 9184 9182 8896 -3 8895 9183 9185 -3 8895 9185 8897 -3 8896 8898 9184 -3 9186 9184 8898 -3 8897 9185 9187 -3 8897 9187 8899 -3 8898 8900 9188 -3 8898 9188 9186 -3 8899 9187 9189 -3 8899 9189 8901 -3 8900 8902 9190 -3 8900 9190 9188 -3 8901 9189 9191 -3 8901 9191 8903 -3 8902 8904 9192 -3 8902 9192 9190 -3 8903 9191 9193 -3 8903 9193 8905 -3 8904 8906 9194 -3 8904 9194 9192 -3 8905 9193 9195 -3 8905 9195 8907 -3 8906 8908 9196 -3 8906 9196 9194 -3 8907 9195 9197 -3 8907 9197 8909 -3 8908 8910 9198 -3 8908 9198 9196 -3 8909 9197 8911 -3 9199 8911 9197 -3 8910 8912 9200 -3 8910 9200 9198 -3 8911 9199 8913 -3 9201 8913 9199 -3 8912 8914 9202 -3 8912 9202 9200 -3 8913 9201 8915 -3 9203 8915 9201 -3 8914 8916 9204 -3 8914 9204 9202 -3 8915 9203 8917 -3 9205 8917 9203 -3 8916 8918 9206 -3 8916 9206 9204 -3 8917 9205 8919 -3 9207 8919 9205 -3 8918 8920 9208 -3 8918 9208 9206 -3 8919 9207 8921 -3 9209 8921 9207 -3 8920 8922 9210 -3 8920 9210 9208 -3 8921 9209 8923 -3 9211 8923 9209 -3 8922 8924 9212 -3 8922 9212 9210 -3 8923 9211 8925 -3 9213 8925 9211 -3 8924 8926 9214 -3 8924 9214 9212 -3 8925 9213 8927 -3 9215 8927 9213 -3 8926 8928 9216 -3 8926 9216 9214 -3 8927 9215 8929 -3 9217 8929 9215 -3 8928 8930 9218 -3 8928 9218 9216 -3 8929 9217 8931 -3 9219 8931 9217 -3 8930 8932 9220 -3 8930 9220 9218 -3 8931 9219 8933 -3 9221 8933 9219 -3 8932 8934 9222 -3 8932 9222 9220 -3 8933 9221 8935 -3 9223 8935 9221 -3 8934 8936 9224 -3 8934 9224 9222 -3 8935 9223 8937 -3 9225 8937 9223 -3 8936 8938 9226 -3 8936 9226 9224 -3 8937 9225 8939 -3 9227 8939 9225 -3 8938 8940 9228 -3 8938 9228 9226 -3 8939 9227 8941 -3 9229 8941 9227 -3 8940 8942 9230 -3 8940 9230 9228 -3 8941 9229 8943 -3 9231 8943 9229 -3 8942 8944 9232 -3 8942 9232 9230 -3 8943 9231 8945 -3 9233 8945 9231 -3 8944 8946 9234 -3 8944 9234 9232 -3 8945 9233 8947 -3 9235 8947 9233 -3 8946 8948 9236 -3 8946 9236 9234 -3 8947 9235 8949 -3 9237 8949 9235 -3 8948 8950 9238 -3 8948 9238 9236 -3 8949 9237 8951 -3 9239 8951 9237 -3 8950 8952 9240 -3 8950 9240 9238 -3 8951 9239 8953 -3 9241 8953 9239 -3 8952 8954 9242 -3 8952 9242 9240 -3 8953 9241 8955 -3 9243 8955 9241 -3 8954 8956 9244 -3 8954 9244 9242 -3 8955 9243 8957 -3 9245 8957 9243 -3 8956 8958 9244 -3 9246 9244 8958 -3 8957 9245 8959 -3 9247 8959 9245 -3 8958 8960 9246 -3 9248 9246 8960 -3 8959 9247 8963 -3 9251 8963 9247 -3 8960 8961 9248 -3 9249 9248 8961 -3 8961 8964 9249 -3 9252 9249 8964 -3 8962 8963 9250 -3 9251 9250 8963 -3 8962 9250 8967 -3 9255 8967 9250 -3 8964 8965 9252 -3 9253 9252 8965 -3 8965 8968 9253 -3 9256 9253 8968 -3 8966 8967 9254 -3 9255 9254 8967 -3 8966 9254 9036 -3 9324 9036 9254 -3 8968 8969 9256 -3 9257 9256 8969 -3 8969 8970 9257 -3 9258 9257 8970 -3 8970 8971 9258 -3 9259 9258 8971 -3 8971 8972 9259 -3 9260 9259 8972 -3 8972 8973 9260 -3 9261 9260 8973 -3 8973 8974 9261 -3 9262 9261 8974 -3 8974 8975 9262 -3 9263 9262 8975 -3 8975 8976 9263 -3 9264 9263 8976 -3 8976 8977 9264 -3 9265 9264 8977 -3 8977 8978 9265 -3 9266 9265 8978 -3 8978 8979 9266 -3 9267 9266 8979 -3 8979 8980 9267 -3 9268 9267 8980 -3 8980 8981 9268 -3 9269 9268 8981 -3 8981 8982 9269 -3 9270 9269 8982 -3 8982 8983 9270 -3 9271 9270 8983 -3 8983 8984 9271 -3 9272 9271 8984 -3 8984 8985 9272 -3 9273 9272 8985 -3 8985 8986 9273 -3 9274 9273 8986 -3 8986 8987 9274 -3 9275 9274 8987 -3 8987 8988 9275 -3 9276 9275 8988 -3 8988 8989 9276 -3 9277 9276 8989 -3 8989 8990 9277 -3 9278 9277 8990 -3 8990 8991 9278 -3 9279 9278 8991 -3 8991 8992 9279 -3 9280 9279 8992 -3 8992 8993 9280 -3 9281 9280 8993 -3 8993 8994 9281 -3 9282 9281 8994 -3 8994 8995 9282 -3 9283 9282 8995 -3 8995 8996 9283 -3 9284 9283 8996 -3 8996 8997 9284 -3 9285 9284 8997 -3 8997 8998 9285 -3 9286 9285 8998 -3 8998 8999 9286 -3 9287 9286 8999 -3 8999 9000 9287 -3 9288 9287 9000 -3 9000 9001 9288 -3 9289 9288 9001 -3 9001 9002 9289 -3 9290 9289 9002 -3 9002 9003 9290 -3 9291 9290 9003 -3 9003 9004 9291 -3 9292 9291 9004 -3 9004 9005 9292 -3 9293 9292 9005 -3 9005 9006 9293 -3 9294 9293 9006 -3 9006 9007 9294 -3 9295 9294 9007 -3 9007 9008 9295 -3 9296 9295 9008 -3 9008 9009 9296 -3 9297 9296 9009 -3 9009 9010 9297 -3 9298 9297 9010 -3 9010 9011 9298 -3 9299 9298 9011 -3 9011 9012 9299 -3 9300 9299 9012 -3 9012 9013 9300 -3 9301 9300 9013 -3 9013 9014 9301 -3 9302 9301 9014 -3 9014 9015 9302 -3 9303 9302 9015 -3 9015 9016 9304 -3 9015 9304 9303 -3 9016 9017 9305 -3 9016 9305 9304 -3 9017 9018 9306 -3 9017 9306 9305 -3 9018 9019 9307 -3 9018 9307 9306 -3 9019 9020 9308 -3 9019 9308 9307 -3 9020 9021 9309 -3 9020 9309 9308 -3 9021 9022 9310 -3 9021 9310 9309 -3 9022 9023 9311 -3 9022 9311 9310 -3 9023 9024 9312 -3 9023 9312 9311 -3 9024 9025 9313 -3 9024 9313 9312 -3 9025 9026 9314 -3 9025 9314 9313 -3 9026 9027 9315 -3 9026 9315 9314 -3 9027 9028 9316 -3 9027 9316 9315 -3 9028 9029 9317 -3 9028 9317 9316 -3 9029 9030 9318 -3 9029 9318 9317 -3 9030 9031 9319 -3 9030 9319 9318 -3 9031 9032 9320 -3 9031 9320 9319 -3 9032 9033 9321 -3 9032 9321 9320 -3 9033 9034 9322 -3 9033 9322 9321 -3 9034 9035 9323 -3 9034 9323 9322 -3 9035 9036 9324 -3 9035 9324 9323 -3 9037 9325 9038 -3 9326 9038 9325 -3 9037 9107 9325 -3 9395 9325 9107 -3 9038 9326 9039 -3 9327 9039 9326 -3 9039 9327 9040 -3 9328 9040 9327 -3 9040 9328 9041 -3 9329 9041 9328 -3 9041 9329 9042 -3 9330 9042 9329 -3 9042 9330 9043 -3 9331 9043 9330 -3 9043 9331 9044 -3 9332 9044 9331 -3 9044 9332 9045 -3 9333 9045 9332 -3 9045 9333 9046 -3 9334 9046 9333 -3 9046 9334 9047 -3 9335 9047 9334 -3 9047 9335 9048 -3 9336 9048 9335 -3 9048 9336 9049 -3 9337 9049 9336 -3 9049 9337 9050 -3 9338 9050 9337 -3 9050 9338 9051 -3 9339 9051 9338 -3 9051 9339 9052 -3 9340 9052 9339 -3 9052 9340 9053 -3 9341 9053 9340 -3 9053 9341 9054 -3 9342 9054 9341 -3 9054 9342 9055 -3 9343 9055 9342 -3 9055 9343 9056 -3 9344 9056 9343 -3 9056 9344 9057 -3 9345 9057 9344 -3 9057 9345 9058 -3 9346 9058 9345 -3 9058 9346 9059 -3 9347 9059 9346 -3 9059 9347 9060 -3 9348 9060 9347 -3 9060 9348 9061 -3 9349 9061 9348 -3 9061 9349 9062 -3 9350 9062 9349 -3 9062 9350 9063 -3 9351 9063 9350 -3 9063 9351 9064 -3 9352 9064 9351 -3 9064 9352 9065 -3 9353 9065 9352 -3 9065 9353 9066 -3 9354 9066 9353 -3 9066 9354 9067 -3 9355 9067 9354 -3 9067 9355 9068 -3 9356 9068 9355 -3 9068 9356 9069 -3 9357 9069 9356 -3 9069 9357 9070 -3 9358 9070 9357 -3 9070 9358 9071 -3 9359 9071 9358 -3 9071 9359 9072 -3 9360 9072 9359 -3 9072 9360 9073 -3 9361 9073 9360 -3 9073 9361 9074 -3 9362 9074 9361 -3 9074 9362 9075 -3 9363 9075 9362 -3 9075 9363 9076 -3 9364 9076 9363 -3 9076 9364 9077 -3 9365 9077 9364 -3 9077 9365 9078 -3 9366 9078 9365 -3 9078 9366 9079 -3 9367 9079 9366 -3 9079 9367 9080 -3 9368 9080 9367 -3 9080 9368 9081 -3 9369 9081 9368 -3 9081 9369 9082 -3 9370 9082 9369 -3 9082 9370 9083 -3 9371 9083 9370 -3 9083 9371 9084 -3 9372 9084 9371 -3 9084 9372 9085 -3 9373 9085 9372 -3 9085 9373 9374 -3 9085 9374 9086 -3 9086 9374 9375 -3 9086 9375 9087 -3 9087 9375 9376 -3 9087 9376 9088 -3 9088 9376 9377 -3 9088 9377 9089 -3 9089 9377 9378 -3 9089 9378 9090 -3 9090 9378 9379 -3 9090 9379 9091 -3 9091 9379 9380 -3 9091 9380 9092 -3 9092 9380 9381 -3 9092 9381 9093 -3 9093 9381 9382 -3 9093 9382 9094 -3 9094 9382 9383 -3 9094 9383 9095 -3 9095 9383 9384 -3 9095 9384 9096 -3 9096 9384 9385 -3 9096 9385 9097 -3 9097 9385 9386 -3 9097 9386 9098 -3 9098 9386 9387 -3 9098 9387 9099 -3 9099 9387 9388 -3 9099 9388 9100 -3 9100 9388 9389 -3 9100 9389 9101 -3 9101 9389 9390 -3 9101 9390 9102 -3 9102 9390 9391 -3 9102 9391 9103 -3 9103 9391 9392 -3 9103 9392 9104 -3 9104 9392 9393 -3 9104 9393 9105 -3 9105 9393 9396 -3 9105 9396 9108 -3 9106 9394 9395 -3 9106 9395 9107 -3 9106 9111 9394 -3 9399 9394 9111 -3 9108 9396 9397 -3 9108 9397 9109 -3 9109 9397 9400 -3 9109 9400 9112 -3 9110 9398 9399 -3 9110 9399 9111 -3 9110 9114 9398 -3 9402 9398 9114 -3 9112 9400 9401 -3 9112 9401 9113 -3 9113 9401 9403 -3 9113 9403 9115 -3 9114 9116 9402 -3 9404 9402 9116 -3 9115 9403 9405 -3 9115 9405 9117 -3 9116 9118 9404 -3 9406 9404 9118 -3 9117 9405 9407 -3 9117 9407 9119 -3 9118 9120 9406 -3 9408 9406 9120 -3 9119 9407 9409 -3 9119 9409 9121 -3 9120 9122 9408 -3 9410 9408 9122 -3 9121 9409 9411 -3 9121 9411 9123 -3 9122 9124 9410 -3 9412 9410 9124 -3 9123 9411 9413 -3 9123 9413 9125 -3 9124 9126 9412 -3 9414 9412 9126 -3 9125 9413 9415 -3 9125 9415 9127 -3 9126 9128 9414 -3 9416 9414 9128 -3 9127 9415 9417 -3 9127 9417 9129 -3 9128 9130 9416 -3 9418 9416 9130 -3 9129 9417 9419 -3 9129 9419 9131 -3 9130 9132 9418 -3 9420 9418 9132 -3 9131 9419 9421 -3 9131 9421 9133 -3 9132 9134 9422 -3 9132 9422 9420 -3 9133 9421 9423 -3 9133 9423 9135 -3 9134 9136 9424 -3 9134 9424 9422 -3 9135 9423 9425 -3 9135 9425 9137 -3 9136 9138 9426 -3 9136 9426 9424 -3 9137 9425 9427 -3 9137 9427 9139 -3 9138 9140 9428 -3 9138 9428 9426 -3 9139 9427 9429 -3 9139 9429 9141 -3 9140 9142 9430 -3 9140 9430 9428 -3 9141 9429 9431 -3 9141 9431 9143 -3 9142 9144 9432 -3 9142 9432 9430 -3 9143 9431 9433 -3 9143 9433 9145 -3 9144 9146 9434 -3 9144 9434 9432 -3 9145 9433 9147 -3 9435 9147 9433 -3 9146 9148 9436 -3 9146 9436 9434 -3 9147 9435 9149 -3 9437 9149 9435 -3 9148 9150 9438 -3 9148 9438 9436 -3 9149 9437 9151 -3 9439 9151 9437 -3 9150 9152 9440 -3 9150 9440 9438 -3 9151 9439 9153 -3 9441 9153 9439 -3 9152 9154 9442 -3 9152 9442 9440 -3 9153 9441 9155 -3 9443 9155 9441 -3 9154 9156 9444 -3 9154 9444 9442 -3 9155 9443 9157 -3 9445 9157 9443 -3 9156 9158 9446 -3 9156 9446 9444 -3 9157 9445 9159 -3 9447 9159 9445 -3 9158 9160 9448 -3 9158 9448 9446 -3 9159 9447 9161 -3 9449 9161 9447 -3 9160 9162 9450 -3 9160 9450 9448 -3 9161 9449 9163 -3 9451 9163 9449 -3 9162 9164 9452 -3 9162 9452 9450 -3 9163 9451 9165 -3 9453 9165 9451 -3 9164 9166 9454 -3 9164 9454 9452 -3 9165 9453 9167 -3 9455 9167 9453 -3 9166 9168 9456 -3 9166 9456 9454 -3 9167 9455 9169 -3 9457 9169 9455 -3 9168 9170 9458 -3 9168 9458 9456 -3 9169 9457 9171 -3 9459 9171 9457 -3 9170 9172 9460 -3 9170 9460 9458 -3 9171 9459 9173 -3 9461 9173 9459 -3 9172 9174 9462 -3 9172 9462 9460 -3 9173 9461 9175 -3 9463 9175 9461 -3 9174 9176 9464 -3 9174 9464 9462 -3 9175 9463 9177 -3 9465 9177 9463 -3 9176 9178 9466 -3 9176 9466 9464 -3 9177 9465 9179 -3 9467 9179 9465 -3 9178 9180 9468 -3 9178 9468 9466 -3 9179 9467 9181 -3 9469 9181 9467 -3 9180 9182 9470 -3 9180 9470 9468 -3 9181 9469 9183 -3 9471 9183 9469 -3 9182 9184 9472 -3 9182 9472 9470 -3 9183 9471 9185 -3 9473 9185 9471 -3 9184 9186 9474 -3 9184 9474 9472 -3 9185 9473 9187 -3 9475 9187 9473 -3 9186 9188 9476 -3 9186 9476 9474 -3 9187 9475 9189 -3 9477 9189 9475 -3 9188 9190 9478 -3 9188 9478 9476 -3 9189 9477 9191 -3 9479 9191 9477 -3 9190 9192 9478 -3 9480 9478 9192 -3 9191 9479 9193 -3 9481 9193 9479 -3 9192 9194 9480 -3 9482 9480 9194 -3 9193 9481 9195 -3 9483 9195 9481 -3 9194 9196 9482 -3 9484 9482 9196 -3 9195 9483 9197 -3 9485 9197 9483 -3 9196 9198 9484 -3 9486 9484 9198 -3 9197 9485 9199 -3 9487 9199 9485 -3 9198 9200 9486 -3 9488 9486 9200 -3 9199 9487 9201 -3 9489 9201 9487 -3 9200 9202 9488 -3 9490 9488 9202 -3 9201 9489 9203 -3 9491 9203 9489 -3 9202 9204 9490 -3 9492 9490 9204 -3 9203 9491 9205 -3 9493 9205 9491 -3 9204 9206 9492 -3 9494 9492 9206 -3 9205 9493 9495 -3 9205 9495 9207 -3 9206 9208 9494 -3 9496 9494 9208 -3 9207 9495 9497 -3 9207 9497 9209 -3 9208 9210 9496 -3 9498 9496 9210 -3 9209 9497 9499 -3 9209 9499 9211 -3 9210 9212 9498 -3 9500 9498 9212 -3 9211 9499 9501 -3 9211 9501 9213 -3 9212 9214 9500 -3 9502 9500 9214 -3 9213 9501 9503 -3 9213 9503 9215 -3 9214 9216 9502 -3 9504 9502 9216 -3 9215 9503 9505 -3 9215 9505 9217 -3 9216 9218 9504 -3 9506 9504 9218 -3 9217 9505 9507 -3 9217 9507 9219 -3 9218 9220 9506 -3 9508 9506 9220 -3 9219 9507 9509 -3 9219 9509 9221 -3 9220 9222 9508 -3 9510 9508 9222 -3 9221 9509 9511 -3 9221 9511 9223 -3 9222 9224 9510 -3 9512 9510 9224 -3 9223 9511 9513 -3 9223 9513 9225 -3 9224 9226 9512 -3 9514 9512 9226 -3 9225 9513 9515 -3 9225 9515 9227 -3 9226 9228 9514 -3 9516 9514 9228 -3 9227 9515 9517 -3 9227 9517 9229 -3 9228 9230 9516 -3 9518 9516 9230 -3 9229 9517 9519 -3 9229 9519 9231 -3 9230 9232 9518 -3 9520 9518 9232 -3 9231 9519 9521 -3 9231 9521 9233 -3 9232 9234 9520 -3 9522 9520 9234 -3 9233 9521 9523 -3 9233 9523 9235 -3 9234 9236 9522 -3 9524 9522 9236 -3 9235 9523 9525 -3 9235 9525 9237 -3 9236 9238 9524 -3 9526 9524 9238 -3 9237 9525 9527 -3 9237 9527 9239 -3 9238 9240 9526 -3 9528 9526 9240 -3 9239 9527 9529 -3 9239 9529 9241 -3 9240 9242 9528 -3 9530 9528 9242 -3 9241 9529 9531 -3 9241 9531 9243 -3 9242 9244 9530 -3 9532 9530 9244 -3 9243 9531 9533 -3 9243 9533 9245 -3 9244 9246 9532 -3 9534 9532 9246 -3 9245 9533 9535 -3 9245 9535 9247 -3 9246 9248 9534 -3 9536 9534 9248 -3 9247 9535 9539 -3 9247 9539 9251 -3 9248 9249 9536 -3 9537 9536 9249 -3 9249 9252 9540 -3 9249 9540 9537 -3 9250 9251 9539 -3 9250 9539 9538 -3 9250 9538 9543 -3 9250 9543 9255 -3 9252 9253 9541 -3 9252 9541 9540 -3 9253 9256 9544 -3 9253 9544 9541 -3 9254 9255 9543 -3 9254 9543 9542 -3 9254 9542 9612 -3 9254 9612 9324 -3 9256 9257 9545 -3 9256 9545 9544 -3 9257 9258 9546 -3 9257 9546 9545 -3 9258 9259 9547 -3 9258 9547 9546 -3 9259 9260 9548 -3 9259 9548 9547 -3 9260 9261 9549 -3 9260 9549 9548 -3 9261 9262 9550 -3 9261 9550 9549 -3 9262 9263 9551 -3 9262 9551 9550 -3 9263 9264 9552 -3 9263 9552 9551 -3 9264 9265 9553 -3 9264 9553 9552 -3 9265 9266 9554 -3 9265 9554 9553 -3 9266 9267 9555 -3 9266 9555 9554 -3 9267 9268 9556 -3 9267 9556 9555 -3 9268 9269 9557 -3 9268 9557 9556 -3 9269 9270 9558 -3 9269 9558 9557 -3 9270 9271 9559 -3 9270 9559 9558 -3 9271 9272 9560 -3 9271 9560 9559 -3 9272 9273 9561 -3 9272 9561 9560 -3 9273 9274 9562 -3 9273 9562 9561 -3 9274 9275 9563 -3 9274 9563 9562 -3 9275 9276 9564 -3 9275 9564 9563 -3 9276 9277 9565 -3 9276 9565 9564 -3 9277 9278 9566 -3 9277 9566 9565 -3 9278 9279 9567 -3 9278 9567 9566 -3 9279 9280 9568 -3 9279 9568 9567 -3 9280 9281 9569 -3 9280 9569 9568 -3 9281 9282 9570 -3 9281 9570 9569 -3 9282 9283 9571 -3 9282 9571 9570 -3 9283 9284 9572 -3 9283 9572 9571 -3 9284 9285 9573 -3 9284 9573 9572 -3 9285 9286 9574 -3 9285 9574 9573 -3 9286 9287 9575 -3 9286 9575 9574 -3 9287 9288 9576 -3 9287 9576 9575 -3 9288 9289 9577 -3 9288 9577 9576 -3 9289 9290 9578 -3 9289 9578 9577 -3 9290 9291 9579 -3 9290 9579 9578 -3 9291 9292 9580 -3 9291 9580 9579 -3 9292 9293 9581 -3 9292 9581 9580 -3 9293 9294 9582 -3 9293 9582 9581 -3 9294 9295 9583 -3 9294 9583 9582 -3 9295 9296 9584 -3 9295 9584 9583 -3 9296 9297 9585 -3 9296 9585 9584 -3 9297 9298 9586 -3 9297 9586 9585 -3 9298 9299 9587 -3 9298 9587 9586 -3 9299 9300 9588 -3 9299 9588 9587 -3 9300 9301 9589 -3 9300 9589 9588 -3 9301 9302 9590 -3 9301 9590 9589 -3 9302 9303 9591 -3 9302 9591 9590 -3 9303 9304 9592 -3 9303 9592 9591 -3 9304 9305 9593 -3 9304 9593 9592 -3 9305 9306 9594 -3 9305 9594 9593 -3 9306 9307 9595 -3 9306 9595 9594 -3 9307 9308 9596 -3 9307 9596 9595 -3 9308 9309 9597 -3 9308 9597 9596 -3 9309 9310 9597 -3 9598 9597 9310 -3 9310 9311 9598 -3 9599 9598 9311 -3 9311 9312 9599 -3 9600 9599 9312 -3 9312 9313 9600 -3 9601 9600 9313 -3 9313 9314 9601 -3 9602 9601 9314 -3 9314 9315 9602 -3 9603 9602 9315 -3 9315 9316 9603 -3 9604 9603 9316 -3 9316 9317 9604 -3 9605 9604 9317 -3 9317 9318 9605 -3 9606 9605 9318 -3 9318 9319 9606 -3 9607 9606 9319 -3 9319 9320 9607 -3 9608 9607 9320 -3 9320 9321 9608 -3 9609 9608 9321 -3 9321 9322 9609 -3 9610 9609 9322 -3 9322 9323 9610 -3 9611 9610 9323 -3 9323 9324 9611 -3 9612 9611 9324 -3 9325 9613 9614 -3 9325 9614 9326 -3 9325 9395 9613 -3 9683 9613 9395 -3 9326 9614 9615 -3 9326 9615 9327 -3 9327 9615 9616 -3 9327 9616 9328 -3 9328 9616 9617 -3 9328 9617 9329 -3 9329 9617 9618 -3 9329 9618 9330 -3 9330 9618 9619 -3 9330 9619 9331 -3 9331 9619 9620 -3 9331 9620 9332 -3 9332 9620 9621 -3 9332 9621 9333 -3 9333 9621 9622 -3 9333 9622 9334 -3 9334 9622 9623 -3 9334 9623 9335 -3 9335 9623 9624 -3 9335 9624 9336 -3 9336 9624 9625 -3 9336 9625 9337 -3 9337 9625 9626 -3 9337 9626 9338 -3 9338 9626 9627 -3 9338 9627 9339 -3 9339 9627 9628 -3 9339 9628 9340 -3 9340 9628 9629 -3 9340 9629 9341 -3 9341 9629 9630 -3 9341 9630 9342 -3 9342 9630 9631 -3 9342 9631 9343 -3 9343 9631 9632 -3 9343 9632 9344 -3 9344 9632 9633 -3 9344 9633 9345 -3 9345 9633 9634 -3 9345 9634 9346 -3 9346 9634 9635 -3 9346 9635 9347 -3 9347 9635 9636 -3 9347 9636 9348 -3 9348 9636 9637 -3 9348 9637 9349 -3 9349 9637 9638 -3 9349 9638 9350 -3 9350 9638 9639 -3 9350 9639 9351 -3 9351 9639 9640 -3 9351 9640 9352 -3 9352 9640 9641 -3 9352 9641 9353 -3 9353 9641 9642 -3 9353 9642 9354 -3 9354 9642 9643 -3 9354 9643 9355 -3 9355 9643 9644 -3 9355 9644 9356 -3 9356 9644 9645 -3 9356 9645 9357 -3 9357 9645 9646 -3 9357 9646 9358 -3 9358 9646 9647 -3 9358 9647 9359 -3 9359 9647 9648 -3 9359 9648 9360 -3 9360 9648 9649 -3 9360 9649 9361 -3 9361 9649 9650 -3 9361 9650 9362 -3 9362 9650 9651 -3 9362 9651 9363 -3 9363 9651 9652 -3 9363 9652 9364 -3 9364 9652 9653 -3 9364 9653 9365 -3 9365 9653 9654 -3 9365 9654 9366 -3 9366 9654 9655 -3 9366 9655 9367 -3 9367 9655 9656 -3 9367 9656 9368 -3 9368 9656 9657 -3 9368 9657 9369 -3 9369 9657 9658 -3 9369 9658 9370 -3 9370 9658 9659 -3 9370 9659 9371 -3 9371 9659 9660 -3 9371 9660 9372 -3 9372 9660 9661 -3 9372 9661 9373 -3 9373 9661 9662 -3 9373 9662 9374 -3 9374 9662 9663 -3 9374 9663 9375 -3 9375 9663 9664 -3 9375 9664 9376 -3 9376 9664 9665 -3 9376 9665 9377 -3 9377 9665 9666 -3 9377 9666 9378 -3 9378 9666 9667 -3 9378 9667 9379 -3 9379 9667 9668 -3 9379 9668 9380 -3 9380 9668 9669 -3 9380 9669 9381 -3 9381 9669 9670 -3 9381 9670 9382 -3 9382 9670 9671 -3 9382 9671 9383 -3 9383 9671 9384 -3 9672 9384 9671 -3 9384 9672 9385 -3 9673 9385 9672 -3 9385 9673 9386 -3 9674 9386 9673 -3 9386 9674 9387 -3 9675 9387 9674 -3 9387 9675 9388 -3 9676 9388 9675 -3 9388 9676 9389 -3 9677 9389 9676 -3 9389 9677 9390 -3 9678 9390 9677 -3 9390 9678 9391 -3 9679 9391 9678 -3 9391 9679 9392 -3 9680 9392 9679 -3 9392 9680 9393 -3 9681 9393 9680 -3 9393 9681 9396 -3 9684 9396 9681 -3 9394 9682 9395 -3 9683 9395 9682 -3 9394 9399 9687 -3 9394 9687 9682 -3 9396 9684 9397 -3 9685 9397 9684 -3 9397 9685 9400 -3 9688 9400 9685 -3 9398 9686 9399 -3 9687 9399 9686 -3 9398 9402 9690 -3 9398 9690 9686 -3 9400 9688 9401 -3 9689 9401 9688 -3 9401 9689 9403 -3 9691 9403 9689 -3 9402 9404 9692 -3 9402 9692 9690 -3 9403 9691 9405 -3 9693 9405 9691 -3 9404 9406 9694 -3 9404 9694 9692 -3 9405 9693 9407 -3 9695 9407 9693 -3 9406 9408 9696 -3 9406 9696 9694 -3 9407 9695 9409 -3 9697 9409 9695 -3 9408 9410 9698 -3 9408 9698 9696 -3 9409 9697 9411 -3 9699 9411 9697 -3 9410 9412 9700 -3 9410 9700 9698 -3 9411 9699 9413 -3 9701 9413 9699 -3 9412 9414 9702 -3 9412 9702 9700 -3 9413 9701 9415 -3 9703 9415 9701 -3 9414 9416 9704 -3 9414 9704 9702 -3 9415 9703 9417 -3 9705 9417 9703 -3 9416 9418 9706 -3 9416 9706 9704 -3 9417 9705 9419 -3 9707 9419 9705 -3 9418 9420 9708 -3 9418 9708 9706 -3 9419 9707 9421 -3 9709 9421 9707 -3 9420 9422 9710 -3 9420 9710 9708 -3 9421 9709 9423 -3 9711 9423 9709 -3 9422 9424 9712 -3 9422 9712 9710 -3 9423 9711 9425 -3 9713 9425 9711 -3 9424 9426 9714 -3 9424 9714 9712 -3 9425 9713 9427 -3 9715 9427 9713 -3 9426 9428 9716 -3 9426 9716 9714 -3 9427 9715 9429 -3 9717 9429 9715 -3 9428 9430 9716 -3 9718 9716 9430 -3 9429 9717 9431 -3 9719 9431 9717 -3 9430 9432 9718 -3 9720 9718 9432 -3 9431 9719 9433 -3 9721 9433 9719 -3 9432 9434 9720 -3 9722 9720 9434 -3 9433 9721 9435 -3 9723 9435 9721 -3 9434 9436 9722 -3 9724 9722 9436 -3 9435 9723 9437 -3 9725 9437 9723 -3 9436 9438 9724 -3 9726 9724 9438 -3 9437 9725 9439 -3 9727 9439 9725 -3 9438 9440 9726 -3 9728 9726 9440 -3 9439 9727 9441 -3 9729 9441 9727 -3 9440 9442 9728 -3 9730 9728 9442 -3 9441 9729 9443 -3 9731 9443 9729 -3 9442 9444 9730 -3 9732 9730 9444 -3 9443 9731 9445 -3 9733 9445 9731 -3 9444 9446 9732 -3 9734 9732 9446 -3 9445 9733 9735 -3 9445 9735 9447 -3 9446 9448 9734 -3 9736 9734 9448 -3 9447 9735 9737 -3 9447 9737 9449 -3 9448 9450 9736 -3 9738 9736 9450 -3 9449 9737 9739 -3 9449 9739 9451 -3 9450 9452 9738 -3 9740 9738 9452 -3 9451 9739 9741 -3 9451 9741 9453 -3 9452 9454 9740 -3 9742 9740 9454 -3 9453 9741 9743 -3 9453 9743 9455 -3 9454 9456 9742 -3 9744 9742 9456 -3 9455 9743 9745 -3 9455 9745 9457 -3 9456 9458 9744 -3 9746 9744 9458 -3 9457 9745 9747 -3 9457 9747 9459 -3 9458 9460 9746 -3 9748 9746 9460 -3 9459 9747 9749 -3 9459 9749 9461 -3 9460 9462 9748 -3 9750 9748 9462 -3 9461 9749 9751 -3 9461 9751 9463 -3 9462 9464 9750 -3 9752 9750 9464 -3 9463 9751 9753 -3 9463 9753 9465 -3 9464 9466 9752 -3 9754 9752 9466 -3 9465 9753 9755 -3 9465 9755 9467 -3 9466 9468 9754 -3 9756 9754 9468 -3 9467 9755 9757 -3 9467 9757 9469 -3 9468 9470 9756 -3 9758 9756 9470 -3 9469 9757 9759 -3 9469 9759 9471 -3 9470 9472 9758 -3 9760 9758 9472 -3 9471 9759 9761 -3 9471 9761 9473 -3 9472 9474 9760 -3 9762 9760 9474 -3 9473 9761 9763 -3 9473 9763 9475 -3 9474 9476 9762 -3 9764 9762 9476 -3 9475 9763 9765 -3 9475 9765 9477 -3 9476 9478 9764 -3 9766 9764 9478 -3 9477 9765 9767 -3 9477 9767 9479 -3 9478 9480 9766 -3 9768 9766 9480 -3 9479 9767 9769 -3 9479 9769 9481 -3 9480 9482 9768 -3 9770 9768 9482 -3 9481 9769 9771 -3 9481 9771 9483 -3 9482 9484 9770 -3 9772 9770 9484 -3 9483 9771 9773 -3 9483 9773 9485 -3 9484 9486 9772 -3 9774 9772 9486 -3 9485 9773 9775 -3 9485 9775 9487 -3 9486 9488 9774 -3 9776 9774 9488 -3 9487 9775 9777 -3 9487 9777 9489 -3 9488 9490 9778 -3 9488 9778 9776 -3 9489 9777 9779 -3 9489 9779 9491 -3 9490 9492 9780 -3 9490 9780 9778 -3 9491 9779 9781 -3 9491 9781 9493 -3 9492 9494 9782 -3 9492 9782 9780 -3 9493 9781 9783 -3 9493 9783 9495 -3 9494 9496 9784 -3 9494 9784 9782 -3 9495 9783 9785 -3 9495 9785 9497 -3 9496 9498 9786 -3 9496 9786 9784 -3 9497 9785 9787 -3 9497 9787 9499 -3 9498 9500 9788 -3 9498 9788 9786 -3 9499 9787 9789 -3 9499 9789 9501 -3 9500 9502 9790 -3 9500 9790 9788 -3 9501 9789 9791 -3 9501 9791 9503 -3 9502 9504 9792 -3 9502 9792 9790 -3 9503 9791 9793 -3 9503 9793 9505 -3 9504 9506 9794 -3 9504 9794 9792 -3 9505 9793 9507 -3 9795 9507 9793 -3 9506 9508 9796 -3 9506 9796 9794 -3 9507 9795 9509 -3 9797 9509 9795 -3 9508 9510 9798 -3 9508 9798 9796 -3 9509 9797 9511 -3 9799 9511 9797 -3 9510 9512 9800 -3 9510 9800 9798 -3 9511 9799 9513 -3 9801 9513 9799 -3 9512 9514 9802 -3 9512 9802 9800 -3 9513 9801 9515 -3 9803 9515 9801 -3 9514 9516 9804 -3 9514 9804 9802 -3 9515 9803 9517 -3 9805 9517 9803 -3 9516 9518 9806 -3 9516 9806 9804 -3 9517 9805 9519 -3 9807 9519 9805 -3 9518 9520 9808 -3 9518 9808 9806 -3 9519 9807 9521 -3 9809 9521 9807 -3 9520 9522 9810 -3 9520 9810 9808 -3 9521 9809 9523 -3 9811 9523 9809 -3 9522 9524 9812 -3 9522 9812 9810 -3 9523 9811 9525 -3 9813 9525 9811 -3 9524 9526 9814 -3 9524 9814 9812 -3 9525 9813 9527 -3 9815 9527 9813 -3 9526 9528 9816 -3 9526 9816 9814 -3 9527 9815 9529 -3 9817 9529 9815 -3 9528 9530 9818 -3 9528 9818 9816 -3 9529 9817 9531 -3 9819 9531 9817 -3 9530 9532 9820 -3 9530 9820 9818 -3 9531 9819 9533 -3 9821 9533 9819 -3 9532 9534 9822 -3 9532 9822 9820 -3 9533 9821 9535 -3 9823 9535 9821 -3 9534 9536 9824 -3 9534 9824 9822 -3 9535 9823 9539 -3 9827 9539 9823 -3 9536 9537 9825 -3 9536 9825 9824 -3 9537 9540 9828 -3 9537 9828 9825 -3 9538 9539 9827 -3 9538 9827 9826 -3 9538 9826 9543 -3 9831 9543 9826 -3 9540 9541 9829 -3 9540 9829 9828 -3 9541 9544 9832 -3 9541 9832 9829 -3 9542 9543 9831 -3 9542 9831 9830 -3 9542 9830 9612 -3 9900 9612 9830 -3 9544 9545 9833 -3 9544 9833 9832 -3 9545 9546 9834 -3 9545 9834 9833 -3 9546 9547 9835 -3 9546 9835 9834 -3 9547 9548 9836 -3 9547 9836 9835 -3 9548 9549 9836 -3 9837 9836 9549 -3 9549 9550 9837 -3 9838 9837 9550 -3 9550 9551 9838 -3 9839 9838 9551 -3 9551 9552 9839 -3 9840 9839 9552 -3 9552 9553 9840 -3 9841 9840 9553 -3 9553 9554 9841 -3 9842 9841 9554 -3 9554 9555 9842 -3 9843 9842 9555 -3 9555 9556 9843 -3 9844 9843 9556 -3 9556 9557 9844 -3 9845 9844 9557 -3 9557 9558 9845 -3 9846 9845 9558 -3 9558 9559 9846 -3 9847 9846 9559 -3 9559 9560 9847 -3 9848 9847 9560 -3 9560 9561 9848 -3 9849 9848 9561 -3 9561 9562 9849 -3 9850 9849 9562 -3 9562 9563 9850 -3 9851 9850 9563 -3 9563 9564 9851 -3 9852 9851 9564 -3 9564 9565 9852 -3 9853 9852 9565 -3 9565 9566 9853 -3 9854 9853 9566 -3 9566 9567 9854 -3 9855 9854 9567 -3 9567 9568 9855 -3 9856 9855 9568 -3 9568 9569 9856 -3 9857 9856 9569 -3 9569 9570 9857 -3 9858 9857 9570 -3 9570 9571 9858 -3 9859 9858 9571 -3 9571 9572 9859 -3 9860 9859 9572 -3 9572 9573 9860 -3 9861 9860 9573 -3 9573 9574 9861 -3 9862 9861 9574 -3 9574 9575 9862 -3 9863 9862 9575 -3 9575 9576 9863 -3 9864 9863 9576 -3 9576 9577 9864 -3 9865 9864 9577 -3 9577 9578 9865 -3 9866 9865 9578 -3 9578 9579 9866 -3 9867 9866 9579 -3 9579 9580 9867 -3 9868 9867 9580 -3 9580 9581 9868 -3 9869 9868 9581 -3 9581 9582 9869 -3 9870 9869 9582 -3 9582 9583 9870 -3 9871 9870 9583 -3 9583 9584 9871 -3 9872 9871 9584 -3 9584 9585 9872 -3 9873 9872 9585 -3 9585 9586 9873 -3 9874 9873 9586 -3 9586 9587 9874 -3 9875 9874 9587 -3 9587 9588 9875 -3 9876 9875 9588 -3 9588 9589 9876 -3 9877 9876 9589 -3 9589 9590 9877 -3 9878 9877 9590 -3 9590 9591 9878 -3 9879 9878 9591 -3 9591 9592 9879 -3 9880 9879 9592 -3 9592 9593 9880 -3 9881 9880 9593 -3 9593 9594 9881 -3 9882 9881 9594 -3 9594 9595 9882 -3 9883 9882 9595 -3 9595 9596 9883 -3 9884 9883 9596 -3 9596 9597 9884 -3 9885 9884 9597 -3 9597 9598 9885 -3 9886 9885 9598 -3 9598 9599 9886 -3 9887 9886 9599 -3 9599 9600 9887 -3 9888 9887 9600 -3 9600 9601 9888 -3 9889 9888 9601 -3 9601 9602 9889 -3 9890 9889 9602 -3 9602 9603 9890 -3 9891 9890 9603 -3 9603 9604 9891 -3 9892 9891 9604 -3 9604 9605 9892 -3 9893 9892 9605 -3 9605 9606 9893 -3 9894 9893 9606 -3 9606 9607 9894 -3 9895 9894 9607 -3 9607 9608 9895 -3 9896 9895 9608 -3 9608 9609 9897 -3 9608 9897 9896 -3 9609 9610 9898 -3 9609 9898 9897 -3 9610 9611 9899 -3 9610 9899 9898 -3 9611 9612 9900 -3 9611 9900 9899 -3 9613 9901 9902 -3 9613 9902 9614 -3 9613 9683 9971 -3 9613 9971 9901 -3 9614 9902 9903 -3 9614 9903 9615 -3 9615 9903 9904 -3 9615 9904 9616 -3 9616 9904 9905 -3 9616 9905 9617 -3 9617 9905 9906 -3 9617 9906 9618 -3 9618 9906 9907 -3 9618 9907 9619 -3 9619 9907 9908 -3 9619 9908 9620 -3 9620 9908 9909 -3 9620 9909 9621 -3 9621 9909 9910 -3 9621 9910 9622 -3 9622 9910 9911 -3 9622 9911 9623 -3 9623 9911 9912 -3 9623 9912 9624 -3 9624 9912 9913 -3 9624 9913 9625 -3 9625 9913 9914 -3 9625 9914 9626 -3 9626 9914 9627 -3 9915 9627 9914 -3 9627 9915 9628 -3 9916 9628 9915 -3 9628 9916 9629 -3 9917 9629 9916 -3 9629 9917 9630 -3 9918 9630 9917 -3 9630 9918 9631 -3 9919 9631 9918 -3 9631 9919 9632 -3 9920 9632 9919 -3 9632 9920 9633 -3 9921 9633 9920 -3 9633 9921 9634 -3 9922 9634 9921 -3 9634 9922 9635 -3 9923 9635 9922 -3 9635 9923 9636 -3 9924 9636 9923 -3 9636 9924 9637 -3 9925 9637 9924 -3 9637 9925 9638 -3 9926 9638 9925 -3 9638 9926 9639 -3 9927 9639 9926 -3 9639 9927 9640 -3 9928 9640 9927 -3 9640 9928 9641 -3 9929 9641 9928 -3 9641 9929 9642 -3 9930 9642 9929 -3 9642 9930 9643 -3 9931 9643 9930 -3 9643 9931 9644 -3 9932 9644 9931 -3 9644 9932 9645 -3 9933 9645 9932 -3 9645 9933 9646 -3 9934 9646 9933 -3 9646 9934 9647 -3 9935 9647 9934 -3 9647 9935 9648 -3 9936 9648 9935 -3 9648 9936 9649 -3 9937 9649 9936 -3 9649 9937 9650 -3 9938 9650 9937 -3 9650 9938 9651 -3 9939 9651 9938 -3 9651 9939 9652 -3 9940 9652 9939 -3 9652 9940 9653 -3 9941 9653 9940 -3 9653 9941 9654 -3 9942 9654 9941 -3 9654 9942 9655 -3 9943 9655 9942 -3 9655 9943 9656 -3 9944 9656 9943 -3 9656 9944 9657 -3 9945 9657 9944 -3 9657 9945 9658 -3 9946 9658 9945 -3 9658 9946 9659 -3 9947 9659 9946 -3 9659 9947 9660 -3 9948 9660 9947 -3 9660 9948 9661 -3 9949 9661 9948 -3 9661 9949 9662 -3 9950 9662 9949 -3 9662 9950 9663 -3 9951 9663 9950 -3 9663 9951 9664 -3 9952 9664 9951 -3 9664 9952 9665 -3 9953 9665 9952 -3 9665 9953 9666 -3 9954 9666 9953 -3 9666 9954 9667 -3 9955 9667 9954 -3 9667 9955 9668 -3 9956 9668 9955 -3 9668 9956 9669 -3 9957 9669 9956 -3 9669 9957 9670 -3 9958 9670 9957 -3 9670 9958 9671 -3 9959 9671 9958 -3 9671 9959 9672 -3 9960 9672 9959 -3 9672 9960 9673 -3 9961 9673 9960 -3 9673 9961 9674 -3 9962 9674 9961 -3 9674 9962 9675 -3 9963 9675 9962 -3 9675 9963 9676 -3 9964 9676 9963 -3 9676 9964 9677 -3 9965 9677 9964 -3 9677 9965 9678 -3 9966 9678 9965 -3 9678 9966 9679 -3 9967 9679 9966 -3 9679 9967 9680 -3 9968 9680 9967 -3 9680 9968 9681 -3 9969 9681 9968 -3 9681 9969 9684 -3 9972 9684 9969 -3 9682 9970 9683 -3 9971 9683 9970 -3 9682 9687 9970 -3 9975 9970 9687 -3 9684 9972 9685 -3 9973 9685 9972 -3 9685 9973 9688 -3 9976 9688 9973 -3 9686 9974 9687 -3 9975 9687 9974 -3 9686 9690 9974 -3 9978 9974 9690 -3 9688 9976 9977 -3 9688 9977 9689 -3 9689 9977 9979 -3 9689 9979 9691 -3 9690 9692 9978 -3 9980 9978 9692 -3 9691 9979 9981 -3 9691 9981 9693 -3 9692 9694 9980 -3 9982 9980 9694 -3 9693 9981 9983 -3 9693 9983 9695 -3 9694 9696 9982 -3 9984 9982 9696 -3 9695 9983 9985 -3 9695 9985 9697 -3 9696 9698 9984 -3 9986 9984 9698 -3 9697 9985 9987 -3 9697 9987 9699 -3 9698 9700 9986 -3 9988 9986 9700 -3 9699 9987 9989 -3 9699 9989 9701 -3 9700 9702 9988 -3 9990 9988 9702 -3 9701 9989 9991 -3 9701 9991 9703 -3 9702 9704 9990 -3 9992 9990 9704 -3 9703 9991 9993 -3 9703 9993 9705 -3 9704 9706 9992 -3 9994 9992 9706 -3 9705 9993 9995 -3 9705 9995 9707 -3 9706 9708 9994 -3 9996 9994 9708 -3 9707 9995 9997 -3 9707 9997 9709 -3 9708 9710 9996 -3 9998 9996 9710 -3 9709 9997 9999 -3 9709 9999 9711 -3 9710 9712 9998 -3 10000 9998 9712 -3 9711 9999 10001 -3 9711 10001 9713 -3 9712 9714 10000 -3 10002 10000 9714 -3 9713 10001 10003 -3 9713 10003 9715 -3 9714 9716 10002 -3 10004 10002 9716 -3 9715 10003 10005 -3 9715 10005 9717 -3 9716 9718 10004 -3 10006 10004 9718 -3 9717 10005 10007 -3 9717 10007 9719 -3 9718 9720 10006 -3 10008 10006 9720 -3 9719 10007 10009 -3 9719 10009 9721 -3 9720 9722 10008 -3 10010 10008 9722 -3 9721 10009 10011 -3 9721 10011 9723 -3 9722 9724 10010 -3 10012 10010 9724 -3 9723 10011 10013 -3 9723 10013 9725 -3 9724 9726 10012 -3 10014 10012 9726 -3 9725 10013 10015 -3 9725 10015 9727 -3 9726 9728 10014 -3 10016 10014 9728 -3 9727 10015 10017 -3 9727 10017 9729 -3 9728 9730 10016 -3 10018 10016 9730 -3 9729 10017 10019 -3 9729 10019 9731 -3 9730 9732 10020 -3 9730 10020 10018 -3 9731 10019 10021 -3 9731 10021 9733 -3 9732 9734 10022 -3 9732 10022 10020 -3 9733 10021 10023 -3 9733 10023 9735 -3 9734 9736 10024 -3 9734 10024 10022 -3 9735 10023 10025 -3 9735 10025 9737 -3 9736 9738 10026 -3 9736 10026 10024 -3 9737 10025 10027 -3 9737 10027 9739 -3 9738 9740 10028 -3 9738 10028 10026 -3 9739 10027 10029 -3 9739 10029 9741 -3 9740 9742 10030 -3 9740 10030 10028 -3 9741 10029 10031 -3 9741 10031 9743 -3 9742 9744 10032 -3 9742 10032 10030 -3 9743 10031 10033 -3 9743 10033 9745 -3 9744 9746 10034 -3 9744 10034 10032 -3 9745 10033 10035 -3 9745 10035 9747 -3 9746 9748 10036 -3 9746 10036 10034 -3 9747 10035 10037 -3 9747 10037 9749 -3 9748 9750 10038 -3 9748 10038 10036 -3 9749 10037 9751 -3 10039 9751 10037 -3 9750 9752 10040 -3 9750 10040 10038 -3 9751 10039 9753 -3 10041 9753 10039 -3 9752 9754 10042 -3 9752 10042 10040 -3 9753 10041 9755 -3 10043 9755 10041 -3 9754 9756 10044 -3 9754 10044 10042 -3 9755 10043 9757 -3 10045 9757 10043 -3 9756 9758 10046 -3 9756 10046 10044 -3 9757 10045 9759 -3 10047 9759 10045 -3 9758 9760 10048 -3 9758 10048 10046 -3 9759 10047 9761 -3 10049 9761 10047 -3 9760 9762 10050 -3 9760 10050 10048 -3 9761 10049 9763 -3 10051 9763 10049 -3 9762 9764 10052 -3 9762 10052 10050 -3 9763 10051 9765 -3 10053 9765 10051 -3 9764 9766 10054 -3 9764 10054 10052 -3 9765 10053 9767 -3 10055 9767 10053 -3 9766 9768 10056 -3 9766 10056 10054 -3 9767 10055 9769 -3 10057 9769 10055 -3 9768 9770 10058 -3 9768 10058 10056 -3 9769 10057 9771 -3 10059 9771 10057 -3 9770 9772 10060 -3 9770 10060 10058 -3 9771 10059 9773 -3 10061 9773 10059 -3 9772 9774 10062 -3 9772 10062 10060 -3 9773 10061 9775 -3 10063 9775 10061 -3 9774 9776 10064 -3 9774 10064 10062 -3 9775 10063 9777 -3 10065 9777 10063 -3 9776 9778 10066 -3 9776 10066 10064 -3 9777 10065 9779 -3 10067 9779 10065 -3 9778 9780 10068 -3 9778 10068 10066 -3 9779 10067 9781 -3 10069 9781 10067 -3 9780 9782 10070 -3 9780 10070 10068 -3 9781 10069 9783 -3 10071 9783 10069 -3 9782 9784 10072 -3 9782 10072 10070 -3 9783 10071 9785 -3 10073 9785 10071 -3 9784 9786 10074 -3 9784 10074 10072 -3 9785 10073 9787 -3 10075 9787 10073 -3 9786 9788 10076 -3 9786 10076 10074 -3 9787 10075 9789 -3 10077 9789 10075 -3 9788 9790 10078 -3 9788 10078 10076 -3 9789 10077 9791 -3 10079 9791 10077 -3 9790 9792 10078 -3 10080 10078 9792 -3 9791 10079 9793 -3 10081 9793 10079 -3 9792 9794 10080 -3 10082 10080 9794 -3 9793 10081 9795 -3 10083 9795 10081 -3 9794 9796 10082 -3 10084 10082 9796 -3 9795 10083 9797 -3 10085 9797 10083 -3 9796 9798 10084 -3 10086 10084 9798 -3 9797 10085 9799 -3 10087 9799 10085 -3 9798 9800 10086 -3 10088 10086 9800 -3 9799 10087 9801 -3 10089 9801 10087 -3 9800 9802 10088 -3 10090 10088 9802 -3 9801 10089 9803 -3 10091 9803 10089 -3 9802 9804 10090 -3 10092 10090 9804 -3 9803 10091 9805 -3 10093 9805 10091 -3 9804 9806 10092 -3 10094 10092 9806 -3 9805 10093 9807 -3 10095 9807 10093 -3 9806 9808 10094 -3 10096 10094 9808 -3 9807 10095 9809 -3 10097 9809 10095 -3 9808 9810 10096 -3 10098 10096 9810 -3 9809 10097 10099 -3 9809 10099 9811 -3 9810 9812 10098 -3 10100 10098 9812 -3 9811 10099 10101 -3 9811 10101 9813 -3 9812 9814 10100 -3 10102 10100 9814 -3 9813 10101 10103 -3 9813 10103 9815 -3 9814 9816 10102 -3 10104 10102 9816 -3 9815 10103 10105 -3 9815 10105 9817 -3 9816 9818 10104 -3 10106 10104 9818 -3 9817 10105 10107 -3 9817 10107 9819 -3 9818 9820 10106 -3 10108 10106 9820 -3 9819 10107 10109 -3 9819 10109 9821 -3 9820 9822 10108 -3 10110 10108 9822 -3 9821 10109 10111 -3 9821 10111 9823 -3 9822 9824 10110 -3 10112 10110 9824 -3 9823 10111 10115 -3 9823 10115 9827 -3 9824 9825 10112 -3 10113 10112 9825 -3 9825 9828 10113 -3 10116 10113 9828 -3 9826 9827 10114 -3 10115 10114 9827 -3 9826 10114 10119 -3 9826 10119 9831 -3 9828 9829 10116 -3 10117 10116 9829 -3 9829 9832 10117 -3 10120 10117 9832 -3 9830 9831 10118 -3 10119 10118 9831 -3 9830 10118 10188 -3 9830 10188 9900 -3 9832 9833 10120 -3 10121 10120 9833 -3 9833 9834 10121 -3 10122 10121 9834 -3 9834 9835 10122 -3 10123 10122 9835 -3 9835 9836 10123 -3 10124 10123 9836 -3 9836 9837 10124 -3 10125 10124 9837 -3 9837 9838 10125 -3 10126 10125 9838 -3 9838 9839 10126 -3 10127 10126 9839 -3 9839 9840 10127 -3 10128 10127 9840 -3 9840 9841 10128 -3 10129 10128 9841 -3 9841 9842 10129 -3 10130 10129 9842 -3 9842 9843 10130 -3 10131 10130 9843 -3 9843 9844 10131 -3 10132 10131 9844 -3 9844 9845 10132 -3 10133 10132 9845 -3 9845 9846 10133 -3 10134 10133 9846 -3 9846 9847 10134 -3 10135 10134 9847 -3 9847 9848 10135 -3 10136 10135 9848 -3 9848 9849 10136 -3 10137 10136 9849 -3 9849 9850 10137 -3 10138 10137 9850 -3 9850 9851 10138 -3 10139 10138 9851 -3 9851 9852 10140 -3 9851 10140 10139 -3 9852 9853 10141 -3 9852 10141 10140 -3 9853 9854 10142 -3 9853 10142 10141 -3 9854 9855 10143 -3 9854 10143 10142 -3 9855 9856 10144 -3 9855 10144 10143 -3 9856 9857 10145 -3 9856 10145 10144 -3 9857 9858 10146 -3 9857 10146 10145 -3 9858 9859 10147 -3 9858 10147 10146 -3 9859 9860 10148 -3 9859 10148 10147 -3 9860 9861 10149 -3 9860 10149 10148 -3 9861 9862 10150 -3 9861 10150 10149 -3 9862 9863 10151 -3 9862 10151 10150 -3 9863 9864 10152 -3 9863 10152 10151 -3 9864 9865 10153 -3 9864 10153 10152 -3 9865 9866 10154 -3 9865 10154 10153 -3 9866 9867 10155 -3 9866 10155 10154 -3 9867 9868 10156 -3 9867 10156 10155 -3 9868 9869 10157 -3 9868 10157 10156 -3 9869 9870 10158 -3 9869 10158 10157 -3 9870 9871 10159 -3 9870 10159 10158 -3 9871 9872 10160 -3 9871 10160 10159 -3 9872 9873 10161 -3 9872 10161 10160 -3 9873 9874 10162 -3 9873 10162 10161 -3 9874 9875 10163 -3 9874 10163 10162 -3 9875 9876 10164 -3 9875 10164 10163 -3 9876 9877 10165 -3 9876 10165 10164 -3 9877 9878 10166 -3 9877 10166 10165 -3 9878 9879 10167 -3 9878 10167 10166 -3 9879 9880 10168 -3 9879 10168 10167 -3 9880 9881 10169 -3 9880 10169 10168 -3 9881 9882 10170 -3 9881 10170 10169 -3 9882 9883 10171 -3 9882 10171 10170 -3 9883 9884 10172 -3 9883 10172 10171 -3 9884 9885 10173 -3 9884 10173 10172 -3 9885 9886 10174 -3 9885 10174 10173 -3 9886 9887 10175 -3 9886 10175 10174 -3 9887 9888 10176 -3 9887 10176 10175 -3 9888 9889 10177 -3 9888 10177 10176 -3 9889 9890 10178 -3 9889 10178 10177 -3 9890 9891 10179 -3 9890 10179 10178 -3 9891 9892 10180 -3 9891 10180 10179 -3 9892 9893 10181 -3 9892 10181 10180 -3 9893 9894 10182 -3 9893 10182 10181 -3 9894 9895 10183 -3 9894 10183 10182 -3 9895 9896 10184 -3 9895 10184 10183 -3 9896 9897 10185 -3 9896 10185 10184 -3 9897 9898 10186 -3 9897 10186 10185 -3 9898 9899 10187 -3 9898 10187 10186 -3 9899 9900 10188 -3 9899 10188 10187 -3 9901 10189 9902 -3 10190 9902 10189 -3 9901 9971 10189 -3 10259 10189 9971 -3 9902 10190 9903 -3 10191 9903 10190 -3 9903 10191 9904 -3 10192 9904 10191 -3 9904 10192 9905 -3 10193 9905 10192 -3 9905 10193 9906 -3 10194 9906 10193 -3 9906 10194 9907 -3 10195 9907 10194 -3 9907 10195 9908 -3 10196 9908 10195 -3 9908 10196 9909 -3 10197 9909 10196 -3 9909 10197 9910 -3 10198 9910 10197 -3 9910 10198 9911 -3 10199 9911 10198 -3 9911 10199 9912 -3 10200 9912 10199 -3 9912 10200 9913 -3 10201 9913 10200 -3 9913 10201 9914 -3 10202 9914 10201 -3 9914 10202 9915 -3 10203 9915 10202 -3 9915 10203 9916 -3 10204 9916 10203 -3 9916 10204 9917 -3 10205 9917 10204 -3 9917 10205 9918 -3 10206 9918 10205 -3 9918 10206 9919 -3 10207 9919 10206 -3 9919 10207 9920 -3 10208 9920 10207 -3 9920 10208 9921 -3 10209 9921 10208 -3 9921 10209 9922 -3 10210 9922 10209 -3 9922 10210 9923 -3 10211 9923 10210 -3 9923 10211 9924 -3 10212 9924 10211 -3 9924 10212 9925 -3 10213 9925 10212 -3 9925 10213 9926 -3 10214 9926 10213 -3 9926 10214 9927 -3 10215 9927 10214 -3 9927 10215 9928 -3 10216 9928 10215 -3 9928 10216 9929 -3 10217 9929 10216 -3 9929 10217 9930 -3 10218 9930 10217 -3 9930 10218 9931 -3 10219 9931 10218 -3 9931 10219 9932 -3 10220 9932 10219 -3 9932 10220 10221 -3 9932 10221 9933 -3 9933 10221 10222 -3 9933 10222 9934 -3 9934 10222 10223 -3 9934 10223 9935 -3 9935 10223 10224 -3 9935 10224 9936 -3 9936 10224 10225 -3 9936 10225 9937 -3 9937 10225 10226 -3 9937 10226 9938 -3 9938 10226 10227 -3 9938 10227 9939 -3 9939 10227 10228 -3 9939 10228 9940 -3 9940 10228 10229 -3 9940 10229 9941 -3 9941 10229 10230 -3 9941 10230 9942 -3 9942 10230 10231 -3 9942 10231 9943 -3 9943 10231 10232 -3 9943 10232 9944 -3 9944 10232 10233 -3 9944 10233 9945 -3 9945 10233 10234 -3 9945 10234 9946 -3 9946 10234 10235 -3 9946 10235 9947 -3 9947 10235 10236 -3 9947 10236 9948 -3 9948 10236 10237 -3 9948 10237 9949 -3 9949 10237 10238 -3 9949 10238 9950 -3 9950 10238 10239 -3 9950 10239 9951 -3 9951 10239 10240 -3 9951 10240 9952 -3 9952 10240 10241 -3 9952 10241 9953 -3 9953 10241 10242 -3 9953 10242 9954 -3 9954 10242 10243 -3 9954 10243 9955 -3 9955 10243 10244 -3 9955 10244 9956 -3 9956 10244 10245 -3 9956 10245 9957 -3 9957 10245 10246 -3 9957 10246 9958 -3 9958 10246 10247 -3 9958 10247 9959 -3 9959 10247 10248 -3 9959 10248 9960 -3 9960 10248 10249 -3 9960 10249 9961 -3 9961 10249 10250 -3 9961 10250 9962 -3 9962 10250 10251 -3 9962 10251 9963 -3 9963 10251 10252 -3 9963 10252 9964 -3 9964 10252 10253 -3 9964 10253 9965 -3 9965 10253 10254 -3 9965 10254 9966 -3 9966 10254 10255 -3 9966 10255 9967 -3 9967 10255 10256 -3 9967 10256 9968 -3 9968 10256 10257 -3 9968 10257 9969 -3 9969 10257 10260 -3 9969 10260 9972 -3 9970 10258 10259 -3 9970 10259 9971 -3 9970 9975 10258 -3 10263 10258 9975 -3 9972 10260 10261 -3 9972 10261 9973 -3 9973 10261 10264 -3 9973 10264 9976 -3 9974 10262 10263 -3 9974 10263 9975 -3 9974 9978 10266 -3 9974 10266 10262 -3 9976 10264 10265 -3 9976 10265 9977 -3 9977 10265 10267 -3 9977 10267 9979 -3 9978 9980 10268 -3 9978 10268 10266 -3 9979 10267 10269 -3 9979 10269 9981 -3 9980 9982 10270 -3 9980 10270 10268 -3 9981 10269 10271 -3 9981 10271 9983 -3 9982 9984 10272 -3 9982 10272 10270 -3 9983 10271 10273 -3 9983 10273 9985 -3 9984 9986 10274 -3 9984 10274 10272 -3 9985 10273 10275 -3 9985 10275 9987 -3 9986 9988 10276 -3 9986 10276 10274 -3 9987 10275 10277 -3 9987 10277 9989 -3 9988 9990 10278 -3 9988 10278 10276 -3 9989 10277 10279 -3 9989 10279 9991 -3 9990 9992 10280 -3 9990 10280 10278 -3 9991 10279 10281 -3 9991 10281 9993 -3 9992 9994 10282 -3 9992 10282 10280 -3 9993 10281 10283 -3 9993 10283 9995 -3 9994 9996 10284 -3 9994 10284 10282 -3 9995 10283 9997 -3 10285 9997 10283 -3 9996 9998 10286 -3 9996 10286 10284 -3 9997 10285 9999 -3 10287 9999 10285 -3 9998 10000 10288 -3 9998 10288 10286 -3 9999 10287 10001 -3 10289 10001 10287 -3 10000 10002 10290 -3 10000 10290 10288 -3 10001 10289 10003 -3 10291 10003 10289 -3 10002 10004 10292 -3 10002 10292 10290 -3 10003 10291 10005 -3 10293 10005 10291 -3 10004 10006 10294 -3 10004 10294 10292 -3 10005 10293 10007 -3 10295 10007 10293 -3 10006 10008 10296 -3 10006 10296 10294 -3 10007 10295 10009 -3 10297 10009 10295 -3 10008 10010 10298 -3 10008 10298 10296 -3 10009 10297 10011 -3 10299 10011 10297 -3 10010 10012 10300 -3 10010 10300 10298 -3 10011 10299 10013 -3 10301 10013 10299 -3 10012 10014 10302 -3 10012 10302 10300 -3 10013 10301 10015 -3 10303 10015 10301 -3 10014 10016 10304 -3 10014 10304 10302 -3 10015 10303 10017 -3 10305 10017 10303 -3 10016 10018 10306 -3 10016 10306 10304 -3 10017 10305 10019 -3 10307 10019 10305 -3 10018 10020 10308 -3 10018 10308 10306 -3 10019 10307 10021 -3 10309 10021 10307 -3 10020 10022 10310 -3 10020 10310 10308 -3 10021 10309 10023 -3 10311 10023 10309 -3 10022 10024 10312 -3 10022 10312 10310 -3 10023 10311 10025 -3 10313 10025 10311 -3 10024 10026 10314 -3 10024 10314 10312 -3 10025 10313 10027 -3 10315 10027 10313 -3 10026 10028 10316 -3 10026 10316 10314 -3 10027 10315 10029 -3 10317 10029 10315 -3 10028 10030 10318 -3 10028 10318 10316 -3 10029 10317 10031 -3 10319 10031 10317 -3 10030 10032 10320 -3 10030 10320 10318 -3 10031 10319 10033 -3 10321 10033 10319 -3 10032 10034 10322 -3 10032 10322 10320 -3 10033 10321 10035 -3 10323 10035 10321 -3 10034 10036 10324 -3 10034 10324 10322 -3 10035 10323 10037 -3 10325 10037 10323 -3 10036 10038 10324 -3 10326 10324 10038 -3 10037 10325 10039 -3 10327 10039 10325 -3 10038 10040 10326 -3 10328 10326 10040 -3 10039 10327 10041 -3 10329 10041 10327 -3 10040 10042 10328 -3 10330 10328 10042 -3 10041 10329 10043 -3 10331 10043 10329 -3 10042 10044 10330 -3 10332 10330 10044 -3 10043 10331 10045 -3 10333 10045 10331 -3 10044 10046 10332 -3 10334 10332 10046 -3 10045 10333 10047 -3 10335 10047 10333 -3 10046 10048 10334 -3 10336 10334 10048 -3 10047 10335 10049 -3 10337 10049 10335 -3 10048 10050 10336 -3 10338 10336 10050 -3 10049 10337 10051 -3 10339 10051 10337 -3 10050 10052 10338 -3 10340 10338 10052 -3 10051 10339 10053 -3 10341 10053 10339 -3 10052 10054 10340 -3 10342 10340 10054 -3 10053 10341 10055 -3 10343 10055 10341 -3 10054 10056 10342 -3 10344 10342 10056 -3 10055 10343 10057 -3 10345 10057 10343 -3 10056 10058 10344 -3 10346 10344 10058 -3 10057 10345 10347 -3 10057 10347 10059 -3 10058 10060 10346 -3 10348 10346 10060 -3 10059 10347 10349 -3 10059 10349 10061 -3 10060 10062 10348 -3 10350 10348 10062 -3 10061 10349 10351 -3 10061 10351 10063 -3 10062 10064 10350 -3 10352 10350 10064 -3 10063 10351 10353 -3 10063 10353 10065 -3 10064 10066 10352 -3 10354 10352 10066 -3 10065 10353 10355 -3 10065 10355 10067 -3 10066 10068 10354 -3 10356 10354 10068 -3 10067 10355 10357 -3 10067 10357 10069 -3 10068 10070 10356 -3 10358 10356 10070 -3 10069 10357 10359 -3 10069 10359 10071 -3 10070 10072 10358 -3 10360 10358 10072 -3 10071 10359 10361 -3 10071 10361 10073 -3 10072 10074 10360 -3 10362 10360 10074 -3 10073 10361 10363 -3 10073 10363 10075 -3 10074 10076 10362 -3 10364 10362 10076 -3 10075 10363 10365 -3 10075 10365 10077 -3 10076 10078 10364 -3 10366 10364 10078 -3 10077 10365 10367 -3 10077 10367 10079 -3 10078 10080 10366 -3 10368 10366 10080 -3 10079 10367 10369 -3 10079 10369 10081 -3 10080 10082 10368 -3 10370 10368 10082 -3 10081 10369 10371 -3 10081 10371 10083 -3 10082 10084 10370 -3 10372 10370 10084 -3 10083 10371 10373 -3 10083 10373 10085 -3 10084 10086 10372 -3 10374 10372 10086 -3 10085 10373 10375 -3 10085 10375 10087 -3 10086 10088 10374 -3 10376 10374 10088 -3 10087 10375 10377 -3 10087 10377 10089 -3 10088 10090 10376 -3 10378 10376 10090 -3 10089 10377 10379 -3 10089 10379 10091 -3 10090 10092 10378 -3 10380 10378 10092 -3 10091 10379 10381 -3 10091 10381 10093 -3 10092 10094 10380 -3 10382 10380 10094 -3 10093 10381 10383 -3 10093 10383 10095 -3 10094 10096 10382 -3 10384 10382 10096 -3 10095 10383 10385 -3 10095 10385 10097 -3 10096 10098 10384 -3 10386 10384 10098 -3 10097 10385 10387 -3 10097 10387 10099 -3 10098 10100 10388 -3 10098 10388 10386 -3 10099 10387 10389 -3 10099 10389 10101 -3 10100 10102 10390 -3 10100 10390 10388 -3 10101 10389 10391 -3 10101 10391 10103 -3 10102 10104 10392 -3 10102 10392 10390 -3 10103 10391 10393 -3 10103 10393 10105 -3 10104 10106 10394 -3 10104 10394 10392 -3 10105 10393 10395 -3 10105 10395 10107 -3 10106 10108 10396 -3 10106 10396 10394 -3 10107 10395 10397 -3 10107 10397 10109 -3 10108 10110 10398 -3 10108 10398 10396 -3 10109 10397 10399 -3 10109 10399 10111 -3 10110 10112 10400 -3 10110 10400 10398 -3 10111 10399 10403 -3 10111 10403 10115 -3 10112 10113 10401 -3 10112 10401 10400 -3 10113 10116 10404 -3 10113 10404 10401 -3 10114 10115 10403 -3 10114 10403 10402 -3 10114 10402 10407 -3 10114 10407 10119 -3 10116 10117 10405 -3 10116 10405 10404 -3 10117 10120 10408 -3 10117 10408 10405 -3 10118 10119 10407 -3 10118 10407 10406 -3 10118 10406 10476 -3 10118 10476 10188 -3 10120 10121 10409 -3 10120 10409 10408 -3 10121 10122 10410 -3 10121 10410 10409 -3 10122 10123 10411 -3 10122 10411 10410 -3 10123 10124 10412 -3 10123 10412 10411 -3 10124 10125 10413 -3 10124 10413 10412 -3 10125 10126 10414 -3 10125 10414 10413 -3 10126 10127 10415 -3 10126 10415 10414 -3 10127 10128 10416 -3 10127 10416 10415 -3 10128 10129 10417 -3 10128 10417 10416 -3 10129 10130 10418 -3 10129 10418 10417 -3 10130 10131 10419 -3 10130 10419 10418 -3 10131 10132 10420 -3 10131 10420 10419 -3 10132 10133 10421 -3 10132 10421 10420 -3 10133 10134 10422 -3 10133 10422 10421 -3 10134 10135 10423 -3 10134 10423 10422 -3 10135 10136 10424 -3 10135 10424 10423 -3 10136 10137 10425 -3 10136 10425 10424 -3 10137 10138 10426 -3 10137 10426 10425 -3 10138 10139 10427 -3 10138 10427 10426 -3 10139 10140 10428 -3 10139 10428 10427 -3 10140 10141 10429 -3 10140 10429 10428 -3 10141 10142 10430 -3 10141 10430 10429 -3 10142 10143 10431 -3 10142 10431 10430 -3 10143 10144 10432 -3 10143 10432 10431 -3 10144 10145 10433 -3 10144 10433 10432 -3 10145 10146 10434 -3 10145 10434 10433 -3 10146 10147 10435 -3 10146 10435 10434 -3 10147 10148 10436 -3 10147 10436 10435 -3 10148 10149 10437 -3 10148 10437 10436 -3 10149 10150 10438 -3 10149 10438 10437 -3 10150 10151 10439 -3 10150 10439 10438 -3 10151 10152 10440 -3 10151 10440 10439 -3 10152 10153 10441 -3 10152 10441 10440 -3 10153 10154 10442 -3 10153 10442 10441 -3 10154 10155 10443 -3 10154 10443 10442 -3 10155 10156 10444 -3 10155 10444 10443 -3 10156 10157 10445 -3 10156 10445 10444 -3 10157 10158 10446 -3 10157 10446 10445 -3 10158 10159 10447 -3 10158 10447 10446 -3 10159 10160 10447 -3 10448 10447 10160 -3 10160 10161 10448 -3 10449 10448 10161 -3 10161 10162 10449 -3 10450 10449 10162 -3 10162 10163 10450 -3 10451 10450 10163 -3 10163 10164 10451 -3 10452 10451 10164 -3 10164 10165 10452 -3 10453 10452 10165 -3 10165 10166 10453 -3 10454 10453 10166 -3 10166 10167 10454 -3 10455 10454 10167 -3 10167 10168 10455 -3 10456 10455 10168 -3 10168 10169 10456 -3 10457 10456 10169 -3 10169 10170 10457 -3 10458 10457 10170 -3 10170 10171 10458 -3 10459 10458 10171 -3 10171 10172 10459 -3 10460 10459 10172 -3 10172 10173 10460 -3 10461 10460 10173 -3 10173 10174 10461 -3 10462 10461 10174 -3 10174 10175 10462 -3 10463 10462 10175 -3 10175 10176 10463 -3 10464 10463 10176 -3 10176 10177 10464 -3 10465 10464 10177 -3 10177 10178 10465 -3 10466 10465 10178 -3 10178 10179 10466 -3 10467 10466 10179 -3 10179 10180 10467 -3 10468 10467 10180 -3 10180 10181 10468 -3 10469 10468 10181 -3 10181 10182 10469 -3 10470 10469 10182 -3 10182 10183 10470 -3 10471 10470 10183 -3 10183 10184 10471 -3 10472 10471 10184 -3 10184 10185 10472 -3 10473 10472 10185 -3 10185 10186 10473 -3 10474 10473 10186 -3 10186 10187 10474 -3 10475 10474 10187 -3 10187 10188 10475 -3 10476 10475 10188 -3 10189 10477 10478 -3 10189 10478 10190 -3 10189 10259 10547 -3 10189 10547 10477 -3 10190 10478 10479 -3 10190 10479 10191 -3 10191 10479 10480 -3 10191 10480 10192 -3 10192 10480 10481 -3 10192 10481 10193 -3 10193 10481 10482 -3 10193 10482 10194 -3 10194 10482 10483 -3 10194 10483 10195 -3 10195 10483 10484 -3 10195 10484 10196 -3 10196 10484 10485 -3 10196 10485 10197 -3 10197 10485 10486 -3 10197 10486 10198 -3 10198 10486 10487 -3 10198 10487 10199 -3 10199 10487 10488 -3 10199 10488 10200 -3 10200 10488 10489 -3 10200 10489 10201 -3 10201 10489 10490 -3 10201 10490 10202 -3 10202 10490 10491 -3 10202 10491 10203 -3 10203 10491 10492 -3 10203 10492 10204 -3 10204 10492 10493 -3 10204 10493 10205 -3 10205 10493 10494 -3 10205 10494 10206 -3 10206 10494 10495 -3 10206 10495 10207 -3 10207 10495 10496 -3 10207 10496 10208 -3 10208 10496 10497 -3 10208 10497 10209 -3 10209 10497 10498 -3 10209 10498 10210 -3 10210 10498 10499 -3 10210 10499 10211 -3 10211 10499 10500 -3 10211 10500 10212 -3 10212 10500 10501 -3 10212 10501 10213 -3 10213 10501 10502 -3 10213 10502 10214 -3 10214 10502 10503 -3 10214 10503 10215 -3 10215 10503 10504 -3 10215 10504 10216 -3 10216 10504 10505 -3 10216 10505 10217 -3 10217 10505 10506 -3 10217 10506 10218 -3 10218 10506 10507 -3 10218 10507 10219 -3 10219 10507 10508 -3 10219 10508 10220 -3 10220 10508 10509 -3 10220 10509 10221 -3 10221 10509 10510 -3 10221 10510 10222 -3 10222 10510 10511 -3 10222 10511 10223 -3 10223 10511 10512 -3 10223 10512 10224 -3 10224 10512 10513 -3 10224 10513 10225 -3 10225 10513 10514 -3 10225 10514 10226 -3 10226 10514 10515 -3 10226 10515 10227 -3 10227 10515 10516 -3 10227 10516 10228 -3 10228 10516 10517 -3 10228 10517 10229 -3 10229 10517 10518 -3 10229 10518 10230 -3 10230 10518 10519 -3 10230 10519 10231 -3 10231 10519 10520 -3 10231 10520 10232 -3 10232 10520 10521 -3 10232 10521 10233 -3 10233 10521 10522 -3 10233 10522 10234 -3 10234 10522 10523 -3 10234 10523 10235 -3 10235 10523 10524 -3 10235 10524 10236 -3 10236 10524 10525 -3 10236 10525 10237 -3 10237 10525 10526 -3 10237 10526 10238 -3 10238 10526 10527 -3 10238 10527 10239 -3 10239 10527 10528 -3 10239 10528 10240 -3 10240 10528 10529 -3 10240 10529 10241 -3 10241 10529 10530 -3 10241 10530 10242 -3 10242 10530 10531 -3 10242 10531 10243 -3 10243 10531 10532 -3 10243 10532 10244 -3 10244 10532 10245 -3 10533 10245 10532 -3 10245 10533 10246 -3 10534 10246 10533 -3 10246 10534 10247 -3 10535 10247 10534 -3 10247 10535 10248 -3 10536 10248 10535 -3 10248 10536 10249 -3 10537 10249 10536 -3 10249 10537 10250 -3 10538 10250 10537 -3 10250 10538 10251 -3 10539 10251 10538 -3 10251 10539 10252 -3 10540 10252 10539 -3 10252 10540 10253 -3 10541 10253 10540 -3 10253 10541 10254 -3 10542 10254 10541 -3 10254 10542 10255 -3 10543 10255 10542 -3 10255 10543 10256 -3 10544 10256 10543 -3 10256 10544 10257 -3 10545 10257 10544 -3 10257 10545 10260 -3 10548 10260 10545 -3 10258 10546 10259 -3 10547 10259 10546 -3 10258 10263 10551 -3 10258 10551 10546 -3 10260 10548 10261 -3 10549 10261 10548 -3 10261 10549 10264 -3 10552 10264 10549 -3 10262 10550 10263 -3 10551 10263 10550 -3 10262 10266 10554 -3 10262 10554 10550 -3 10264 10552 10265 -3 10553 10265 10552 -3 10265 10553 10267 -3 10555 10267 10553 -3 10266 10268 10556 -3 10266 10556 10554 -3 10267 10555 10269 -3 10557 10269 10555 -3 10268 10270 10558 -3 10268 10558 10556 -3 10269 10557 10271 -3 10559 10271 10557 -3 10270 10272 10560 -3 10270 10560 10558 -3 10271 10559 10273 -3 10561 10273 10559 -3 10272 10274 10562 -3 10272 10562 10560 -3 10273 10561 10275 -3 10563 10275 10561 -3 10274 10276 10564 -3 10274 10564 10562 -3 10275 10563 10277 -3 10565 10277 10563 -3 10276 10278 10566 -3 10276 10566 10564 -3 10277 10565 10279 -3 10567 10279 10565 -3 10278 10280 10568 -3 10278 10568 10566 -3 10279 10567 10281 -3 10569 10281 10567 -3 10280 10282 10570 -3 10280 10570 10568 -3 10281 10569 10283 -3 10571 10283 10569 -3 10282 10284 10572 -3 10282 10572 10570 -3 10283 10571 10285 -3 10573 10285 10571 -3 10284 10286 10572 -3 10574 10572 10286 -3 10285 10573 10287 -3 10575 10287 10573 -3 10286 10288 10574 -3 10576 10574 10288 -3 10287 10575 10289 -3 10577 10289 10575 -3 10288 10290 10576 -3 10578 10576 10290 -3 10289 10577 10291 -3 10579 10291 10577 -3 10290 10292 10578 -3 10580 10578 10292 -3 10291 10579 10293 -3 10581 10293 10579 -3 10292 10294 10580 -3 10582 10580 10294 -3 10293 10581 10295 -3 10583 10295 10581 -3 10294 10296 10582 -3 10584 10582 10296 -3 10295 10583 10297 -3 10585 10297 10583 -3 10296 10298 10584 -3 10586 10584 10298 -3 10297 10585 10299 -3 10587 10299 10585 -3 10298 10300 10586 -3 10588 10586 10300 -3 10299 10587 10301 -3 10589 10301 10587 -3 10300 10302 10588 -3 10590 10588 10302 -3 10301 10589 10303 -3 10591 10303 10589 -3 10302 10304 10590 -3 10592 10590 10304 -3 10303 10591 10305 -3 10593 10305 10591 -3 10304 10306 10592 -3 10594 10592 10306 -3 10305 10593 10307 -3 10595 10307 10593 -3 10306 10308 10594 -3 10596 10594 10308 -3 10307 10595 10597 -3 10307 10597 10309 -3 10308 10310 10596 -3 10598 10596 10310 -3 10309 10597 10599 -3 10309 10599 10311 -3 10310 10312 10598 -3 10600 10598 10312 -3 10311 10599 10601 -3 10311 10601 10313 -3 10312 10314 10600 -3 10602 10600 10314 -3 10313 10601 10603 -3 10313 10603 10315 -3 10314 10316 10602 -3 10604 10602 10316 -3 10315 10603 10605 -3 10315 10605 10317 -3 10316 10318 10604 -3 10606 10604 10318 -3 10317 10605 10607 -3 10317 10607 10319 -3 10318 10320 10606 -3 10608 10606 10320 -3 10319 10607 10609 -3 10319 10609 10321 -3 10320 10322 10608 -3 10610 10608 10322 -3 10321 10609 10611 -3 10321 10611 10323 -3 10322 10324 10610 -3 10612 10610 10324 -3 10323 10611 10613 -3 10323 10613 10325 -3 10324 10326 10612 -3 10614 10612 10326 -3 10325 10613 10615 -3 10325 10615 10327 -3 10326 10328 10614 -3 10616 10614 10328 -3 10327 10615 10617 -3 10327 10617 10329 -3 10328 10330 10616 -3 10618 10616 10330 -3 10329 10617 10619 -3 10329 10619 10331 -3 10330 10332 10618 -3 10620 10618 10332 -3 10331 10619 10621 -3 10331 10621 10333 -3 10332 10334 10620 -3 10622 10620 10334 -3 10333 10621 10623 -3 10333 10623 10335 -3 10334 10336 10622 -3 10624 10622 10336 -3 10335 10623 10625 -3 10335 10625 10337 -3 10336 10338 10624 -3 10626 10624 10338 -3 10337 10625 10627 -3 10337 10627 10339 -3 10338 10340 10626 -3 10628 10626 10340 -3 10339 10627 10629 -3 10339 10629 10341 -3 10340 10342 10628 -3 10630 10628 10342 -3 10341 10629 10631 -3 10341 10631 10343 -3 10342 10344 10630 -3 10632 10630 10344 -3 10343 10631 10633 -3 10343 10633 10345 -3 10344 10346 10632 -3 10634 10632 10346 -3 10345 10633 10635 -3 10345 10635 10347 -3 10346 10348 10636 -3 10346 10636 10634 -3 10347 10635 10637 -3 10347 10637 10349 -3 10348 10350 10638 -3 10348 10638 10636 -3 10349 10637 10639 -3 10349 10639 10351 -3 10350 10352 10640 -3 10350 10640 10638 -3 10351 10639 10641 -3 10351 10641 10353 -3 10352 10354 10642 -3 10352 10642 10640 -3 10353 10641 10643 -3 10353 10643 10355 -3 10354 10356 10644 -3 10354 10644 10642 -3 10355 10643 10645 -3 10355 10645 10357 -3 10356 10358 10646 -3 10356 10646 10644 -3 10357 10645 10647 -3 10357 10647 10359 -3 10358 10360 10648 -3 10358 10648 10646 -3 10359 10647 10649 -3 10359 10649 10361 -3 10360 10362 10650 -3 10360 10650 10648 -3 10361 10649 10651 -3 10361 10651 10363 -3 10362 10364 10652 -3 10362 10652 10650 -3 10363 10651 10653 -3 10363 10653 10365 -3 10364 10366 10654 -3 10364 10654 10652 -3 10365 10653 10655 -3 10365 10655 10367 -3 10366 10368 10656 -3 10366 10656 10654 -3 10367 10655 10657 -3 10367 10657 10369 -3 10368 10370 10658 -3 10368 10658 10656 -3 10369 10657 10659 -3 10369 10659 10371 -3 10370 10372 10660 -3 10370 10660 10658 -3 10371 10659 10373 -3 10661 10373 10659 -3 10372 10374 10662 -3 10372 10662 10660 -3 10373 10661 10375 -3 10663 10375 10661 -3 10374 10376 10664 -3 10374 10664 10662 -3 10375 10663 10377 -3 10665 10377 10663 -3 10376 10378 10666 -3 10376 10666 10664 -3 10377 10665 10379 -3 10667 10379 10665 -3 10378 10380 10668 -3 10378 10668 10666 -3 10379 10667 10381 -3 10669 10381 10667 -3 10380 10382 10670 -3 10380 10670 10668 -3 10381 10669 10383 -3 10671 10383 10669 -3 10382 10384 10672 -3 10382 10672 10670 -3 10383 10671 10385 -3 10673 10385 10671 -3 10384 10386 10674 -3 10384 10674 10672 -3 10385 10673 10387 -3 10675 10387 10673 -3 10386 10388 10676 -3 10386 10676 10674 -3 10387 10675 10389 -3 10677 10389 10675 -3 10388 10390 10678 -3 10388 10678 10676 -3 10389 10677 10391 -3 10679 10391 10677 -3 10390 10392 10680 -3 10390 10680 10678 -3 10391 10679 10393 -3 10681 10393 10679 -3 10392 10394 10682 -3 10392 10682 10680 -3 10393 10681 10395 -3 10683 10395 10681 -3 10394 10396 10684 -3 10394 10684 10682 -3 10395 10683 10397 -3 10685 10397 10683 -3 10396 10398 10686 -3 10396 10686 10684 -3 10397 10685 10399 -3 10687 10399 10685 -3 10398 10400 10688 -3 10398 10688 10686 -3 10399 10687 10403 -3 10691 10403 10687 -3 10400 10401 10689 -3 10400 10689 10688 -3 10401 10404 10692 -3 10401 10692 10689 -3 10402 10403 10691 -3 10402 10691 10690 -3 10402 10690 10407 -3 10695 10407 10690 -3 10404 10405 10693 -3 10404 10693 10692 -3 10405 10408 10696 -3 10405 10696 10693 -3 10406 10407 10695 -3 10406 10695 10694 -3 10406 10694 10476 -3 10764 10476 10694 -3 10408 10409 10697 -3 10408 10697 10696 -3 10409 10410 10697 -3 10698 10697 10410 -3 10410 10411 10698 -3 10699 10698 10411 -3 10411 10412 10699 -3 10700 10699 10412 -3 10412 10413 10700 -3 10701 10700 10413 -3 10413 10414 10701 -3 10702 10701 10414 -3 10414 10415 10702 -3 10703 10702 10415 -3 10415 10416 10703 -3 10704 10703 10416 -3 10416 10417 10704 -3 10705 10704 10417 -3 10417 10418 10705 -3 10706 10705 10418 -3 10418 10419 10706 -3 10707 10706 10419 -3 10419 10420 10707 -3 10708 10707 10420 -3 10420 10421 10708 -3 10709 10708 10421 -3 10421 10422 10709 -3 10710 10709 10422 -3 10422 10423 10710 -3 10711 10710 10423 -3 10423 10424 10711 -3 10712 10711 10424 -3 10424 10425 10712 -3 10713 10712 10425 -3 10425 10426 10713 -3 10714 10713 10426 -3 10426 10427 10714 -3 10715 10714 10427 -3 10427 10428 10715 -3 10716 10715 10428 -3 10428 10429 10716 -3 10717 10716 10429 -3 10429 10430 10717 -3 10718 10717 10430 -3 10430 10431 10718 -3 10719 10718 10431 -3 10431 10432 10719 -3 10720 10719 10432 -3 10432 10433 10720 -3 10721 10720 10433 -3 10433 10434 10721 -3 10722 10721 10434 -3 10434 10435 10722 -3 10723 10722 10435 -3 10435 10436 10723 -3 10724 10723 10436 -3 10436 10437 10724 -3 10725 10724 10437 -3 10437 10438 10725 -3 10726 10725 10438 -3 10438 10439 10726 -3 10727 10726 10439 -3 10439 10440 10727 -3 10728 10727 10440 -3 10440 10441 10728 -3 10729 10728 10441 -3 10441 10442 10729 -3 10730 10729 10442 -3 10442 10443 10730 -3 10731 10730 10443 -3 10443 10444 10731 -3 10732 10731 10444 -3 10444 10445 10732 -3 10733 10732 10445 -3 10445 10446 10733 -3 10734 10733 10446 -3 10446 10447 10734 -3 10735 10734 10447 -3 10447 10448 10735 -3 10736 10735 10448 -3 10448 10449 10736 -3 10737 10736 10449 -3 10449 10450 10737 -3 10738 10737 10450 -3 10450 10451 10738 -3 10739 10738 10451 -3 10451 10452 10739 -3 10740 10739 10452 -3 10452 10453 10740 -3 10741 10740 10453 -3 10453 10454 10741 -3 10742 10741 10454 -3 10454 10455 10742 -3 10743 10742 10455 -3 10455 10456 10743 -3 10744 10743 10456 -3 10456 10457 10744 -3 10745 10744 10457 -3 10457 10458 10745 -3 10746 10745 10458 -3 10458 10459 10746 -3 10747 10746 10459 -3 10459 10460 10747 -3 10748 10747 10460 -3 10460 10461 10748 -3 10749 10748 10461 -3 10461 10462 10749 -3 10750 10749 10462 -3 10462 10463 10750 -3 10751 10750 10463 -3 10463 10464 10751 -3 10752 10751 10464 -3 10464 10465 10752 -3 10753 10752 10465 -3 10465 10466 10753 -3 10754 10753 10466 -3 10466 10467 10754 -3 10755 10754 10467 -3 10467 10468 10755 -3 10756 10755 10468 -3 10468 10469 10756 -3 10757 10756 10469 -3 10469 10470 10757 -3 10758 10757 10470 -3 10470 10471 10758 -3 10759 10758 10471 -3 10471 10472 10759 -3 10760 10759 10472 -3 10472 10473 10761 -3 10472 10761 10760 -3 10473 10474 10762 -3 10473 10762 10761 -3 10474 10475 10763 -3 10474 10763 10762 -3 10475 10476 10764 -3 10475 10764 10763 -3 10477 10765 10766 -3 10477 10766 10478 -3 10477 10547 10835 -3 10477 10835 10765 -3 10478 10766 10767 -3 10478 10767 10479 -3 10479 10767 10768 -3 10479 10768 10480 -3 10480 10768 10769 -3 10480 10769 10481 -3 10481 10769 10770 -3 10481 10770 10482 -3 10482 10770 10771 -3 10482 10771 10483 -3 10483 10771 10772 -3 10483 10772 10484 -3 10484 10772 10773 -3 10484 10773 10485 -3 10485 10773 10774 -3 10485 10774 10486 -3 10486 10774 10775 -3 10486 10775 10487 -3 10487 10775 10776 -3 10487 10776 10488 -3 10488 10776 10777 -3 10488 10777 10489 -3 10489 10777 10778 -3 10489 10778 10490 -3 10490 10778 10779 -3 10490 10779 10491 -3 10491 10779 10780 -3 10491 10780 10492 -3 10492 10780 10781 -3 10492 10781 10493 -3 10493 10781 10782 -3 10493 10782 10494 -3 10494 10782 10783 -3 10494 10783 10495 -3 10495 10783 10784 -3 10495 10784 10496 -3 10496 10784 10785 -3 10496 10785 10497 -3 10497 10785 10498 -3 10786 10498 10785 -3 10498 10786 10499 -3 10787 10499 10786 -3 10499 10787 10500 -3 10788 10500 10787 -3 10500 10788 10501 -3 10789 10501 10788 -3 10501 10789 10502 -3 10790 10502 10789 -3 10502 10790 10503 -3 10791 10503 10790 -3 10503 10791 10504 -3 10792 10504 10791 -3 10504 10792 10505 -3 10793 10505 10792 -3 10505 10793 10506 -3 10794 10506 10793 -3 10506 10794 10507 -3 10795 10507 10794 -3 10507 10795 10508 -3 10796 10508 10795 -3 10508 10796 10509 -3 10797 10509 10796 -3 10509 10797 10510 -3 10798 10510 10797 -3 10510 10798 10511 -3 10799 10511 10798 -3 10511 10799 10512 -3 10800 10512 10799 -3 10512 10800 10513 -3 10801 10513 10800 -3 10513 10801 10514 -3 10802 10514 10801 -3 10514 10802 10515 -3 10803 10515 10802 -3 10515 10803 10516 -3 10804 10516 10803 -3 10516 10804 10517 -3 10805 10517 10804 -3 10517 10805 10518 -3 10806 10518 10805 -3 10518 10806 10519 -3 10807 10519 10806 -3 10519 10807 10520 -3 10808 10520 10807 -3 10520 10808 10521 -3 10809 10521 10808 -3 10521 10809 10522 -3 10810 10522 10809 -3 10522 10810 10523 -3 10811 10523 10810 -3 10523 10811 10524 -3 10812 10524 10811 -3 10524 10812 10525 -3 10813 10525 10812 -3 10525 10813 10526 -3 10814 10526 10813 -3 10526 10814 10527 -3 10815 10527 10814 -3 10527 10815 10528 -3 10816 10528 10815 -3 10528 10816 10529 -3 10817 10529 10816 -3 10529 10817 10530 -3 10818 10530 10817 -3 10530 10818 10531 -3 10819 10531 10818 -3 10531 10819 10532 -3 10820 10532 10819 -3 10532 10820 10533 -3 10821 10533 10820 -3 10533 10821 10534 -3 10822 10534 10821 -3 10534 10822 10535 -3 10823 10535 10822 -3 10535 10823 10536 -3 10824 10536 10823 -3 10536 10824 10537 -3 10825 10537 10824 -3 10537 10825 10538 -3 10826 10538 10825 -3 10538 10826 10539 -3 10827 10539 10826 -3 10539 10827 10540 -3 10828 10540 10827 -3 10540 10828 10541 -3 10829 10541 10828 -3 10541 10829 10542 -3 10830 10542 10829 -3 10542 10830 10543 -3 10831 10543 10830 -3 10543 10831 10544 -3 10832 10544 10831 -3 10544 10832 10545 -3 10833 10545 10832 -3 10545 10833 10548 -3 10836 10548 10833 -3 10546 10834 10547 -3 10835 10547 10834 -3 10546 10551 10834 -3 10839 10834 10551 -3 10548 10836 10549 -3 10837 10549 10836 -3 10549 10837 10552 -3 10840 10552 10837 -3 10550 10838 10551 -3 10839 10551 10838 -3 10550 10554 10838 -3 10842 10838 10554 -3 10552 10840 10553 -3 10841 10553 10840 -3 10553 10841 10555 -3 10843 10555 10841 -3 10554 10556 10842 -3 10844 10842 10556 -3 10555 10843 10557 -3 10845 10557 10843 -3 10556 10558 10844 -3 10846 10844 10558 -3 10557 10845 10559 -3 10847 10559 10845 -3 10558 10560 10846 -3 10848 10846 10560 -3 10559 10847 10561 -3 10849 10561 10847 -3 10560 10562 10848 -3 10850 10848 10562 -3 10561 10849 10851 -3 10561 10851 10563 -3 10562 10564 10850 -3 10852 10850 10564 -3 10563 10851 10853 -3 10563 10853 10565 -3 10564 10566 10852 -3 10854 10852 10566 -3 10565 10853 10855 -3 10565 10855 10567 -3 10566 10568 10854 -3 10856 10854 10568 -3 10567 10855 10857 -3 10567 10857 10569 -3 10568 10570 10856 -3 10858 10856 10570 -3 10569 10857 10859 -3 10569 10859 10571 -3 10570 10572 10858 -3 10860 10858 10572 -3 10571 10859 10861 -3 10571 10861 10573 -3 10572 10574 10860 -3 10862 10860 10574 -3 10573 10861 10863 -3 10573 10863 10575 -3 10574 10576 10862 -3 10864 10862 10576 -3 10575 10863 10865 -3 10575 10865 10577 -3 10576 10578 10864 -3 10866 10864 10578 -3 10577 10865 10867 -3 10577 10867 10579 -3 10578 10580 10866 -3 10868 10866 10580 -3 10579 10867 10869 -3 10579 10869 10581 -3 10580 10582 10868 -3 10870 10868 10582 -3 10581 10869 10871 -3 10581 10871 10583 -3 10582 10584 10870 -3 10872 10870 10584 -3 10583 10871 10873 -3 10583 10873 10585 -3 10584 10586 10872 -3 10874 10872 10586 -3 10585 10873 10875 -3 10585 10875 10587 -3 10586 10588 10874 -3 10876 10874 10588 -3 10587 10875 10877 -3 10587 10877 10589 -3 10588 10590 10876 -3 10878 10876 10590 -3 10589 10877 10879 -3 10589 10879 10591 -3 10590 10592 10878 -3 10880 10878 10592 -3 10591 10879 10881 -3 10591 10881 10593 -3 10592 10594 10880 -3 10882 10880 10594 -3 10593 10881 10883 -3 10593 10883 10595 -3 10594 10596 10882 -3 10884 10882 10596 -3 10595 10883 10885 -3 10595 10885 10597 -3 10596 10598 10884 -3 10886 10884 10598 -3 10597 10885 10887 -3 10597 10887 10599 -3 10598 10600 10888 -3 10598 10888 10886 -3 10599 10887 10889 -3 10599 10889 10601 -3 10600 10602 10890 -3 10600 10890 10888 -3 10601 10889 10891 -3 10601 10891 10603 -3 10602 10604 10892 -3 10602 10892 10890 -3 10603 10891 10893 -3 10603 10893 10605 -3 10604 10606 10894 -3 10604 10894 10892 -3 10605 10893 10895 -3 10605 10895 10607 -3 10606 10608 10896 -3 10606 10896 10894 -3 10607 10895 10897 -3 10607 10897 10609 -3 10608 10610 10898 -3 10608 10898 10896 -3 10609 10897 10899 -3 10609 10899 10611 -3 10610 10612 10900 -3 10610 10900 10898 -3 10611 10899 10901 -3 10611 10901 10613 -3 10612 10614 10902 -3 10612 10902 10900 -3 10613 10901 10903 -3 10613 10903 10615 -3 10614 10616 10904 -3 10614 10904 10902 -3 10615 10903 10905 -3 10615 10905 10617 -3 10616 10618 10906 -3 10616 10906 10904 -3 10617 10905 10907 -3 10617 10907 10619 -3 10618 10620 10908 -3 10618 10908 10906 -3 10619 10907 10909 -3 10619 10909 10621 -3 10620 10622 10910 -3 10620 10910 10908 -3 10621 10909 10911 -3 10621 10911 10623 -3 10622 10624 10912 -3 10622 10912 10910 -3 10623 10911 10913 -3 10623 10913 10625 -3 10624 10626 10914 -3 10624 10914 10912 -3 10625 10913 10627 -3 10915 10627 10913 -3 10626 10628 10916 -3 10626 10916 10914 -3 10627 10915 10629 -3 10917 10629 10915 -3 10628 10630 10918 -3 10628 10918 10916 -3 10629 10917 10631 -3 10919 10631 10917 -3 10630 10632 10920 -3 10630 10920 10918 -3 10631 10919 10633 -3 10921 10633 10919 -3 10632 10634 10922 -3 10632 10922 10920 -3 10633 10921 10635 -3 10923 10635 10921 -3 10634 10636 10924 -3 10634 10924 10922 -3 10635 10923 10637 -3 10925 10637 10923 -3 10636 10638 10926 -3 10636 10926 10924 -3 10637 10925 10639 -3 10927 10639 10925 -3 10638 10640 10928 -3 10638 10928 10926 -3 10639 10927 10641 -3 10929 10641 10927 -3 10640 10642 10930 -3 10640 10930 10928 -3 10641 10929 10643 -3 10931 10643 10929 -3 10642 10644 10932 -3 10642 10932 10930 -3 10643 10931 10645 -3 10933 10645 10931 -3 10644 10646 10934 -3 10644 10934 10932 -3 10645 10933 10647 -3 10935 10647 10933 -3 10646 10648 10936 -3 10646 10936 10934 -3 10647 10935 10649 -3 10937 10649 10935 -3 10648 10650 10938 -3 10648 10938 10936 -3 10649 10937 10651 -3 10939 10651 10937 -3 10650 10652 10940 -3 10650 10940 10938 -3 10651 10939 10653 -3 10941 10653 10939 -3 10652 10654 10942 -3 10652 10942 10940 -3 10653 10941 10655 -3 10943 10655 10941 -3 10654 10656 10944 -3 10654 10944 10942 -3 10655 10943 10657 -3 10945 10657 10943 -3 10656 10658 10946 -3 10656 10946 10944 -3 10657 10945 10659 -3 10947 10659 10945 -3 10658 10660 10948 -3 10658 10948 10946 -3 10659 10947 10661 -3 10949 10661 10947 -3 10660 10662 10950 -3 10660 10950 10948 -3 10661 10949 10663 -3 10951 10663 10949 -3 10662 10664 10950 -3 10952 10950 10664 -3 10663 10951 10665 -3 10953 10665 10951 -3 10664 10666 10952 -3 10954 10952 10666 -3 10665 10953 10667 -3 10955 10667 10953 -3 10666 10668 10954 -3 10956 10954 10668 -3 10667 10955 10669 -3 10957 10669 10955 -3 10668 10670 10956 -3 10958 10956 10670 -3 10669 10957 10671 -3 10959 10671 10957 -3 10670 10672 10958 -3 10960 10958 10672 -3 10671 10959 10673 -3 10961 10673 10959 -3 10672 10674 10960 -3 10962 10960 10674 -3 10673 10961 10675 -3 10963 10675 10961 -3 10674 10676 10962 -3 10964 10962 10676 -3 10675 10963 10677 -3 10965 10677 10963 -3 10676 10678 10964 -3 10966 10964 10678 -3 10677 10965 10679 -3 10967 10679 10965 -3 10678 10680 10966 -3 10968 10966 10680 -3 10679 10967 10681 -3 10969 10681 10967 -3 10680 10682 10968 -3 10970 10968 10682 -3 10681 10969 10683 -3 10971 10683 10969 -3 10682 10684 10970 -3 10972 10970 10684 -3 10683 10971 10685 -3 10973 10685 10971 -3 10684 10686 10972 -3 10974 10972 10686 -3 10685 10973 10687 -3 10975 10687 10973 -3 10686 10688 10974 -3 10976 10974 10688 -3 10687 10975 10691 -3 10979 10691 10975 -3 10688 10689 10976 -3 10977 10976 10689 -3 10689 10692 10977 -3 10980 10977 10692 -3 10690 10691 10978 -3 10979 10978 10691 -3 10690 10978 10983 -3 10690 10983 10695 -3 10692 10693 10980 -3 10981 10980 10693 -3 10693 10696 10981 -3 10984 10981 10696 -3 10694 10695 10982 -3 10983 10982 10695 -3 10694 10982 11052 -3 10694 11052 10764 -3 10696 10697 10984 -3 10985 10984 10697 -3 10697 10698 10985 -3 10986 10985 10698 -3 10698 10699 10986 -3 10987 10986 10699 -3 10699 10700 10987 -3 10988 10987 10700 -3 10700 10701 10988 -3 10989 10988 10701 -3 10701 10702 10989 -3 10990 10989 10702 -3 10702 10703 10990 -3 10991 10990 10703 -3 10703 10704 10991 -3 10992 10991 10704 -3 10704 10705 10992 -3 10993 10992 10705 -3 10705 10706 10993 -3 10994 10993 10706 -3 10706 10707 10994 -3 10995 10994 10707 -3 10707 10708 10995 -3 10996 10995 10708 -3 10708 10709 10996 -3 10997 10996 10709 -3 10709 10710 10997 -3 10998 10997 10710 -3 10710 10711 10998 -3 10999 10998 10711 -3 10711 10712 10999 -3 11000 10999 10712 -3 10712 10713 11000 -3 11001 11000 10713 -3 10713 10714 11001 -3 11002 11001 10714 -3 10714 10715 11002 -3 11003 11002 10715 -3 10715 10716 11003 -3 11004 11003 10716 -3 10716 10717 11004 -3 11005 11004 10717 -3 10717 10718 11005 -3 11006 11005 10718 -3 10718 10719 11006 -3 11007 11006 10719 -3 10719 10720 11007 -3 11008 11007 10720 -3 10720 10721 11008 -3 11009 11008 10721 -3 10721 10722 11009 -3 11010 11009 10722 -3 10722 10723 11010 -3 11011 11010 10723 -3 10723 10724 11011 -3 11012 11011 10724 -3 10724 10725 11012 -3 11013 11012 10725 -3 10725 10726 11014 -3 10725 11014 11013 -3 10726 10727 11015 -3 10726 11015 11014 -3 10727 10728 11016 -3 10727 11016 11015 -3 10728 10729 11017 -3 10728 11017 11016 -3 10729 10730 11018 -3 10729 11018 11017 -3 10730 10731 11019 -3 10730 11019 11018 -3 10731 10732 11020 -3 10731 11020 11019 -3 10732 10733 11021 -3 10732 11021 11020 -3 10733 10734 11022 -3 10733 11022 11021 -3 10734 10735 11023 -3 10734 11023 11022 -3 10735 10736 11024 -3 10735 11024 11023 -3 10736 10737 11025 -3 10736 11025 11024 -3 10737 10738 11026 -3 10737 11026 11025 -3 10738 10739 11027 -3 10738 11027 11026 -3 10739 10740 11028 -3 10739 11028 11027 -3 10740 10741 11029 -3 10740 11029 11028 -3 10741 10742 11030 -3 10741 11030 11029 -3 10742 10743 11031 -3 10742 11031 11030 -3 10743 10744 11032 -3 10743 11032 11031 -3 10744 10745 11033 -3 10744 11033 11032 -3 10745 10746 11034 -3 10745 11034 11033 -3 10746 10747 11035 -3 10746 11035 11034 -3 10747 10748 11036 -3 10747 11036 11035 -3 10748 10749 11037 -3 10748 11037 11036 -3 10749 10750 11038 -3 10749 11038 11037 -3 10750 10751 11039 -3 10750 11039 11038 -3 10751 10752 11040 -3 10751 11040 11039 -3 10752 10753 11041 -3 10752 11041 11040 -3 10753 10754 11042 -3 10753 11042 11041 -3 10754 10755 11043 -3 10754 11043 11042 -3 10755 10756 11044 -3 10755 11044 11043 -3 10756 10757 11045 -3 10756 11045 11044 -3 10757 10758 11046 -3 10757 11046 11045 -3 10758 10759 11047 -3 10758 11047 11046 -3 10759 10760 11048 -3 10759 11048 11047 -3 10760 10761 11049 -3 10760 11049 11048 -3 10761 10762 11050 -3 10761 11050 11049 -3 10762 10763 11051 -3 10762 11051 11050 -3 10763 10764 11052 -3 10763 11052 11051 -3 10765 11053 10766 -3 11054 10766 11053 -3 10765 10835 11053 -3 11123 11053 10835 -3 10766 11054 10767 -3 11055 10767 11054 -3 10767 11055 10768 -3 11056 10768 11055 -3 10768 11056 10769 -3 11057 10769 11056 -3 10769 11057 10770 -3 11058 10770 11057 -3 10770 11058 10771 -3 11059 10771 11058 -3 10771 11059 10772 -3 11060 10772 11059 -3 10772 11060 10773 -3 11061 10773 11060 -3 10773 11061 10774 -3 11062 10774 11061 -3 10774 11062 10775 -3 11063 10775 11062 -3 10775 11063 10776 -3 11064 10776 11063 -3 10776 11064 10777 -3 11065 10777 11064 -3 10777 11065 10778 -3 11066 10778 11065 -3 10778 11066 10779 -3 11067 10779 11066 -3 10779 11067 10780 -3 11068 10780 11067 -3 10780 11068 10781 -3 11069 10781 11068 -3 10781 11069 10782 -3 11070 10782 11069 -3 10782 11070 10783 -3 11071 10783 11070 -3 10783 11071 10784 -3 11072 10784 11071 -3 10784 11072 10785 -3 11073 10785 11072 -3 10785 11073 10786 -3 11074 10786 11073 -3 10786 11074 10787 -3 11075 10787 11074 -3 10787 11075 10788 -3 11076 10788 11075 -3 10788 11076 10789 -3 11077 10789 11076 -3 10789 11077 10790 -3 11078 10790 11077 -3 10790 11078 10791 -3 11079 10791 11078 -3 10791 11079 10792 -3 11080 10792 11079 -3 10792 11080 10793 -3 11081 10793 11080 -3 10793 11081 10794 -3 11082 10794 11081 -3 10794 11082 10795 -3 11083 10795 11082 -3 10795 11083 10796 -3 11084 10796 11083 -3 10796 11084 10797 -3 11085 10797 11084 -3 10797 11085 10798 -3 11086 10798 11085 -3 10798 11086 10799 -3 11087 10799 11086 -3 10799 11087 10800 -3 11088 10800 11087 -3 10800 11088 10801 -3 11089 10801 11088 -3 10801 11089 10802 -3 11090 10802 11089 -3 10802 11090 10803 -3 11091 10803 11090 -3 10803 11091 10804 -3 11092 10804 11091 -3 10804 11092 10805 -3 11093 10805 11092 -3 10805 11093 10806 -3 11094 10806 11093 -3 10806 11094 10807 -3 11095 10807 11094 -3 10807 11095 10808 -3 11096 10808 11095 -3 10808 11096 10809 -3 11097 10809 11096 -3 10809 11097 10810 -3 11098 10810 11097 -3 10810 11098 10811 -3 11099 10811 11098 -3 10811 11099 10812 -3 11100 10812 11099 -3 10812 11100 10813 -3 11101 10813 11100 -3 10813 11101 10814 -3 11102 10814 11101 -3 10814 11102 10815 -3 11103 10815 11102 -3 10815 11103 10816 -3 11104 10816 11103 -3 10816 11104 10817 -3 11105 10817 11104 -3 10817 11105 11106 -3 10817 11106 10818 -3 10818 11106 11107 -3 10818 11107 10819 -3 10819 11107 11108 -3 10819 11108 10820 -3 10820 11108 11109 -3 10820 11109 10821 -3 10821 11109 11110 -3 10821 11110 10822 -3 10822 11110 11111 -3 10822 11111 10823 -3 10823 11111 11112 -3 10823 11112 10824 -3 10824 11112 11113 -3 10824 11113 10825 -3 10825 11113 11114 -3 10825 11114 10826 -3 10826 11114 11115 -3 10826 11115 10827 -3 10827 11115 11116 -3 10827 11116 10828 -3 10828 11116 11117 -3 10828 11117 10829 -3 10829 11117 11118 -3 10829 11118 10830 -3 10830 11118 11119 -3 10830 11119 10831 -3 10831 11119 11120 -3 10831 11120 10832 -3 10832 11120 11121 -3 10832 11121 10833 -3 10833 11121 11124 -3 10833 11124 10836 -3 10834 11122 11123 -3 10834 11123 10835 -3 10834 10839 11122 -3 11127 11122 10839 -3 10836 11124 11125 -3 10836 11125 10837 -3 10837 11125 11128 -3 10837 11128 10840 -3 10838 11126 11127 -3 10838 11127 10839 -3 10838 10842 11126 -3 11130 11126 10842 -3 10840 11128 11129 -3 10840 11129 10841 -3 10841 11129 11131 -3 10841 11131 10843 -3 10842 10844 11130 -3 11132 11130 10844 -3 10843 11131 11133 -3 10843 11133 10845 -3 10844 10846 11132 -3 11134 11132 10846 -3 10845 11133 11135 -3 10845 11135 10847 -3 10846 10848 11134 -3 11136 11134 10848 -3 10847 11135 11137 -3 10847 11137 10849 -3 10848 10850 11136 -3 11138 11136 10850 -3 10849 11137 11139 -3 10849 11139 10851 -3 10850 10852 11138 -3 11140 11138 10852 -3 10851 11139 11141 -3 10851 11141 10853 -3 10852 10854 11142 -3 10852 11142 11140 -3 10853 11141 11143 -3 10853 11143 10855 -3 10854 10856 11144 -3 10854 11144 11142 -3 10855 11143 11145 -3 10855 11145 10857 -3 10856 10858 11146 -3 10856 11146 11144 -3 10857 11145 11147 -3 10857 11147 10859 -3 10858 10860 11148 -3 10858 11148 11146 -3 10859 11147 11149 -3 10859 11149 10861 -3 10860 10862 11150 -3 10860 11150 11148 -3 10861 11149 11151 -3 10861 11151 10863 -3 10862 10864 11152 -3 10862 11152 11150 -3 10863 11151 11153 -3 10863 11153 10865 -3 10864 10866 11154 -3 10864 11154 11152 -3 10865 11153 11155 -3 10865 11155 10867 -3 10866 10868 11156 -3 10866 11156 11154 -3 10867 11155 11157 -3 10867 11157 10869 -3 10868 10870 11158 -3 10868 11158 11156 -3 10869 11157 11159 -3 10869 11159 10871 -3 10870 10872 11160 -3 10870 11160 11158 -3 10871 11159 11161 -3 10871 11161 10873 -3 10872 10874 11162 -3 10872 11162 11160 -3 10873 11161 11163 -3 10873 11163 10875 -3 10874 10876 11164 -3 10874 11164 11162 -3 10875 11163 11165 -3 10875 11165 10877 -3 10876 10878 11166 -3 10876 11166 11164 -3 10877 11165 11167 -3 10877 11167 10879 -3 10878 10880 11168 -3 10878 11168 11166 -3 10879 11167 11169 -3 10879 11169 10881 -3 10880 10882 11170 -3 10880 11170 11168 -3 10881 11169 11171 -3 10881 11171 10883 -3 10882 10884 11172 -3 10882 11172 11170 -3 10883 11171 10885 -3 11173 10885 11171 -3 10884 10886 11174 -3 10884 11174 11172 -3 10885 11173 10887 -3 11175 10887 11173 -3 10886 10888 11176 -3 10886 11176 11174 -3 10887 11175 10889 -3 11177 10889 11175 -3 10888 10890 11178 -3 10888 11178 11176 -3 10889 11177 10891 -3 11179 10891 11177 -3 10890 10892 11180 -3 10890 11180 11178 -3 10891 11179 10893 -3 11181 10893 11179 -3 10892 10894 11182 -3 10892 11182 11180 -3 10893 11181 10895 -3 11183 10895 11181 -3 10894 10896 11184 -3 10894 11184 11182 -3 10895 11183 10897 -3 11185 10897 11183 -3 10896 10898 11186 -3 10896 11186 11184 -3 10897 11185 10899 -3 11187 10899 11185 -3 10898 10900 11188 -3 10898 11188 11186 -3 10899 11187 10901 -3 11189 10901 11187 -3 10900 10902 11190 -3 10900 11190 11188 -3 10901 11189 10903 -3 11191 10903 11189 -3 10902 10904 11192 -3 10902 11192 11190 -3 10903 11191 10905 -3 11193 10905 11191 -3 10904 10906 11194 -3 10904 11194 11192 -3 10905 11193 10907 -3 11195 10907 11193 -3 10906 10908 11196 -3 10906 11196 11194 -3 10907 11195 10909 -3 11197 10909 11195 -3 10908 10910 11198 -3 10908 11198 11196 -3 10909 11197 10911 -3 11199 10911 11197 -3 10910 10912 11200 -3 10910 11200 11198 -3 10911 11199 10913 -3 11201 10913 11199 -3 10912 10914 11202 -3 10912 11202 11200 -3 10913 11201 10915 -3 11203 10915 11201 -3 10914 10916 11204 -3 10914 11204 11202 -3 10915 11203 10917 -3 11205 10917 11203 -3 10916 10918 11206 -3 10916 11206 11204 -3 10917 11205 10919 -3 11207 10919 11205 -3 10918 10920 11206 -3 11208 11206 10920 -3 10919 11207 10921 -3 11209 10921 11207 -3 10920 10922 11208 -3 11210 11208 10922 -3 10921 11209 10923 -3 11211 10923 11209 -3 10922 10924 11210 -3 11212 11210 10924 -3 10923 11211 10925 -3 11213 10925 11211 -3 10924 10926 11212 -3 11214 11212 10926 -3 10925 11213 10927 -3 11215 10927 11213 -3 10926 10928 11214 -3 11216 11214 10928 -3 10927 11215 10929 -3 11217 10929 11215 -3 10928 10930 11216 -3 11218 11216 10930 -3 10929 11217 10931 -3 11219 10931 11217 -3 10930 10932 11218 -3 11220 11218 10932 -3 10931 11219 10933 -3 11221 10933 11219 -3 10932 10934 11220 -3 11222 11220 10934 -3 10933 11221 10935 -3 11223 10935 11221 -3 10934 10936 11222 -3 11224 11222 10936 -3 10935 11223 10937 -3 11225 10937 11223 -3 10936 10938 11224 -3 11226 11224 10938 -3 10937 11225 10939 -3 11227 10939 11225 -3 10938 10940 11226 -3 11228 11226 10940 -3 10939 11227 10941 -3 11229 10941 11227 -3 10940 10942 11228 -3 11230 11228 10942 -3 10941 11229 10943 -3 11231 10943 11229 -3 10942 10944 11230 -3 11232 11230 10944 -3 10943 11231 10945 -3 11233 10945 11231 -3 10944 10946 11232 -3 11234 11232 10946 -3 10945 11233 10947 -3 11235 10947 11233 -3 10946 10948 11234 -3 11236 11234 10948 -3 10947 11235 11237 -3 10947 11237 10949 -3 10948 10950 11236 -3 11238 11236 10950 -3 10949 11237 11239 -3 10949 11239 10951 -3 10950 10952 11238 -3 11240 11238 10952 -3 10951 11239 11241 -3 10951 11241 10953 -3 10952 10954 11240 -3 11242 11240 10954 -3 10953 11241 11243 -3 10953 11243 10955 -3 10954 10956 11242 -3 11244 11242 10956 -3 10955 11243 11245 -3 10955 11245 10957 -3 10956 10958 11244 -3 11246 11244 10958 -3 10957 11245 11247 -3 10957 11247 10959 -3 10958 10960 11246 -3 11248 11246 10960 -3 10959 11247 11249 -3 10959 11249 10961 -3 10960 10962 11248 -3 11250 11248 10962 -3 10961 11249 11251 -3 10961 11251 10963 -3 10962 10964 11250 -3 11252 11250 10964 -3 10963 11251 11253 -3 10963 11253 10965 -3 10964 10966 11252 -3 11254 11252 10966 -3 10965 11253 11255 -3 10965 11255 10967 -3 10966 10968 11254 -3 11256 11254 10968 -3 10967 11255 11257 -3 10967 11257 10969 -3 10968 10970 11256 -3 11258 11256 10970 -3 10969 11257 11259 -3 10969 11259 10971 -3 10970 10972 11258 -3 11260 11258 10972 -3 10971 11259 11261 -3 10971 11261 10973 -3 10972 10974 11260 -3 11262 11260 10974 -3 10973 11261 11263 -3 10973 11263 10975 -3 10974 10976 11262 -3 11264 11262 10976 -3 10975 11263 11267 -3 10975 11267 10979 -3 10976 10977 11264 -3 11265 11264 10977 -3 10977 10980 11265 -3 11268 11265 10980 -3 10978 10979 11266 -3 11267 11266 10979 -3 10978 11266 11271 -3 10978 11271 10983 -3 10980 10981 11268 -3 11269 11268 10981 -3 10981 10984 11272 -3 10981 11272 11269 -3 10982 10983 11271 -3 10982 11271 11270 -3 10982 11270 11340 -3 10982 11340 11052 -3 10984 10985 11273 -3 10984 11273 11272 -3 10985 10986 11274 -3 10985 11274 11273 -3 10986 10987 11275 -3 10986 11275 11274 -3 10987 10988 11276 -3 10987 11276 11275 -3 10988 10989 11277 -3 10988 11277 11276 -3 10989 10990 11278 -3 10989 11278 11277 -3 10990 10991 11279 -3 10990 11279 11278 -3 10991 10992 11280 -3 10991 11280 11279 -3 10992 10993 11281 -3 10992 11281 11280 -3 10993 10994 11282 -3 10993 11282 11281 -3 10994 10995 11283 -3 10994 11283 11282 -3 10995 10996 11284 -3 10995 11284 11283 -3 10996 10997 11285 -3 10996 11285 11284 -3 10997 10998 11286 -3 10997 11286 11285 -3 10998 10999 11287 -3 10998 11287 11286 -3 10999 11000 11288 -3 10999 11288 11287 -3 11000 11001 11289 -3 11000 11289 11288 -3 11001 11002 11290 -3 11001 11290 11289 -3 11002 11003 11291 -3 11002 11291 11290 -3 11003 11004 11292 -3 11003 11292 11291 -3 11004 11005 11293 -3 11004 11293 11292 -3 11005 11006 11294 -3 11005 11294 11293 -3 11006 11007 11295 -3 11006 11295 11294 -3 11007 11008 11296 -3 11007 11296 11295 -3 11008 11009 11297 -3 11008 11297 11296 -3 11009 11010 11298 -3 11009 11298 11297 -3 11010 11011 11299 -3 11010 11299 11298 -3 11011 11012 11300 -3 11011 11300 11299 -3 11012 11013 11301 -3 11012 11301 11300 -3 11013 11014 11302 -3 11013 11302 11301 -3 11014 11015 11303 -3 11014 11303 11302 -3 11015 11016 11304 -3 11015 11304 11303 -3 11016 11017 11305 -3 11016 11305 11304 -3 11017 11018 11306 -3 11017 11306 11305 -3 11018 11019 11307 -3 11018 11307 11306 -3 11019 11020 11308 -3 11019 11308 11307 -3 11020 11021 11309 -3 11020 11309 11308 -3 11021 11022 11310 -3 11021 11310 11309 -3 11022 11023 11311 -3 11022 11311 11310 -3 11023 11024 11312 -3 11023 11312 11311 -3 11024 11025 11313 -3 11024 11313 11312 -3 11025 11026 11314 -3 11025 11314 11313 -3 11026 11027 11315 -3 11026 11315 11314 -3 11027 11028 11316 -3 11027 11316 11315 -3 11028 11029 11317 -3 11028 11317 11316 -3 11029 11030 11318 -3 11029 11318 11317 -3 11030 11031 11319 -3 11030 11319 11318 -3 11031 11032 11320 -3 11031 11320 11319 -3 11032 11033 11321 -3 11032 11321 11320 -3 11033 11034 11322 -3 11033 11322 11321 -3 11034 11035 11323 -3 11034 11323 11322 -3 11035 11036 11324 -3 11035 11324 11323 -3 11036 11037 11325 -3 11036 11325 11324 -3 11037 11038 11326 -3 11037 11326 11325 -3 11038 11039 11327 -3 11038 11327 11326 -3 11039 11040 11328 -3 11039 11328 11327 -3 11040 11041 11329 -3 11040 11329 11328 -3 11041 11042 11330 -3 11041 11330 11329 -3 11042 11043 11331 -3 11042 11331 11330 -3 11043 11044 11332 -3 11043 11332 11331 -3 11044 11045 11333 -3 11044 11333 11332 -3 11045 11046 11334 -3 11045 11334 11333 -3 11046 11047 11334 -3 11335 11334 11047 -3 11047 11048 11335 -3 11336 11335 11048 -3 11048 11049 11336 -3 11337 11336 11049 -3 11049 11050 11337 -3 11338 11337 11050 -3 11050 11051 11338 -3 11339 11338 11051 -3 11051 11052 11339 -3 11340 11339 11052 -3 11053 11341 11054 -3 11342 11054 11341 -3 11053 11123 11341 -3 11411 11341 11123 -3 11054 11342 11055 -3 11343 11055 11342 -3 11055 11343 11056 -3 11344 11056 11343 -3 11056 11344 11057 -3 11345 11057 11344 -3 11057 11345 11058 -3 11346 11058 11345 -3 11058 11346 11059 -3 11347 11059 11346 -3 11059 11347 11060 -3 11348 11060 11347 -3 11060 11348 11061 -3 11349 11061 11348 -3 11061 11349 11062 -3 11350 11062 11349 -3 11062 11350 11063 -3 11351 11063 11350 -3 11063 11351 11064 -3 11352 11064 11351 -3 11064 11352 11065 -3 11353 11065 11352 -3 11065 11353 11066 -3 11354 11066 11353 -3 11066 11354 11067 -3 11355 11067 11354 -3 11067 11355 11068 -3 11356 11068 11355 -3 11068 11356 11069 -3 11357 11069 11356 -3 11069 11357 11070 -3 11358 11070 11357 -3 11070 11358 11071 -3 11359 11071 11358 -3 11071 11359 11072 -3 11360 11072 11359 -3 11072 11360 11073 -3 11361 11073 11360 -3 11073 11361 11074 -3 11362 11074 11361 -3 11074 11362 11075 -3 11363 11075 11362 -3 11075 11363 11076 -3 11364 11076 11363 -3 11076 11364 11365 -3 11076 11365 11077 -3 11077 11365 11366 -3 11077 11366 11078 -3 11078 11366 11367 -3 11078 11367 11079 -3 11079 11367 11368 -3 11079 11368 11080 -3 11080 11368 11369 -3 11080 11369 11081 -3 11081 11369 11370 -3 11081 11370 11082 -3 11082 11370 11371 -3 11082 11371 11083 -3 11083 11371 11372 -3 11083 11372 11084 -3 11084 11372 11373 -3 11084 11373 11085 -3 11085 11373 11374 -3 11085 11374 11086 -3 11086 11374 11375 -3 11086 11375 11087 -3 11087 11375 11376 -3 11087 11376 11088 -3 11088 11376 11377 -3 11088 11377 11089 -3 11089 11377 11378 -3 11089 11378 11090 -3 11090 11378 11379 -3 11090 11379 11091 -3 11091 11379 11380 -3 11091 11380 11092 -3 11092 11380 11381 -3 11092 11381 11093 -3 11093 11381 11382 -3 11093 11382 11094 -3 11094 11382 11383 -3 11094 11383 11095 -3 11095 11383 11384 -3 11095 11384 11096 -3 11096 11384 11385 -3 11096 11385 11097 -3 11097 11385 11386 -3 11097 11386 11098 -3 11098 11386 11387 -3 11098 11387 11099 -3 11099 11387 11388 -3 11099 11388 11100 -3 11100 11388 11389 -3 11100 11389 11101 -3 11101 11389 11390 -3 11101 11390 11102 -3 11102 11390 11391 -3 11102 11391 11103 -3 11103 11391 11392 -3 11103 11392 11104 -3 11104 11392 11393 -3 11104 11393 11105 -3 11105 11393 11394 -3 11105 11394 11106 -3 11106 11394 11395 -3 11106 11395 11107 -3 11107 11395 11396 -3 11107 11396 11108 -3 11108 11396 11397 -3 11108 11397 11109 -3 11109 11397 11398 -3 11109 11398 11110 -3 11110 11398 11399 -3 11110 11399 11111 -3 11111 11399 11400 -3 11111 11400 11112 -3 11112 11400 11401 -3 11112 11401 11113 -3 11113 11401 11402 -3 11113 11402 11114 -3 11114 11402 11403 -3 11114 11403 11115 -3 11115 11403 11404 -3 11115 11404 11116 -3 11116 11404 11405 -3 11116 11405 11117 -3 11117 11405 11406 -3 11117 11406 11118 -3 11118 11406 11407 -3 11118 11407 11119 -3 11119 11407 11408 -3 11119 11408 11120 -3 11120 11408 11409 -3 11120 11409 11121 -3 11121 11409 11412 -3 11121 11412 11124 -3 11122 11410 11411 -3 11122 11411 11123 -3 11122 11127 11415 -3 11122 11415 11410 -3 11124 11412 11413 -3 11124 11413 11125 -3 11125 11413 11416 -3 11125 11416 11128 -3 11126 11414 11415 -3 11126 11415 11127 -3 11126 11130 11418 -3 11126 11418 11414 -3 11128 11416 11417 -3 11128 11417 11129 -3 11129 11417 11419 -3 11129 11419 11131 -3 11130 11132 11420 -3 11130 11420 11418 -3 11131 11419 11421 -3 11131 11421 11133 -3 11132 11134 11422 -3 11132 11422 11420 -3 11133 11421 11423 -3 11133 11423 11135 -3 11134 11136 11424 -3 11134 11424 11422 -3 11135 11423 11425 -3 11135 11425 11137 -3 11136 11138 11426 -3 11136 11426 11424 -3 11137 11425 11427 -3 11137 11427 11139 -3 11138 11140 11428 -3 11138 11428 11426 -3 11139 11427 11429 -3 11139 11429 11141 -3 11140 11142 11430 -3 11140 11430 11428 -3 11141 11429 11431 -3 11141 11431 11143 -3 11142 11144 11432 -3 11142 11432 11430 -3 11143 11431 11145 -3 11433 11145 11431 -3 11144 11146 11434 -3 11144 11434 11432 -3 11145 11433 11147 -3 11435 11147 11433 -3 11146 11148 11436 -3 11146 11436 11434 -3 11147 11435 11149 -3 11437 11149 11435 -3 11148 11150 11438 -3 11148 11438 11436 -3 11149 11437 11151 -3 11439 11151 11437 -3 11150 11152 11440 -3 11150 11440 11438 -3 11151 11439 11153 -3 11441 11153 11439 -3 11152 11154 11442 -3 11152 11442 11440 -3 11153 11441 11155 -3 11443 11155 11441 -3 11154 11156 11444 -3 11154 11444 11442 -3 11155 11443 11157 -3 11445 11157 11443 -3 11156 11158 11446 -3 11156 11446 11444 -3 11157 11445 11159 -3 11447 11159 11445 -3 11158 11160 11448 -3 11158 11448 11446 -3 11159 11447 11161 -3 11449 11161 11447 -3 11160 11162 11450 -3 11160 11450 11448 -3 11161 11449 11163 -3 11451 11163 11449 -3 11162 11164 11452 -3 11162 11452 11450 -3 11163 11451 11165 -3 11453 11165 11451 -3 11164 11166 11454 -3 11164 11454 11452 -3 11165 11453 11167 -3 11455 11167 11453 -3 11166 11168 11456 -3 11166 11456 11454 -3 11167 11455 11169 -3 11457 11169 11455 -3 11168 11170 11458 -3 11168 11458 11456 -3 11169 11457 11171 -3 11459 11171 11457 -3 11170 11172 11460 -3 11170 11460 11458 -3 11171 11459 11173 -3 11461 11173 11459 -3 11172 11174 11462 -3 11172 11462 11460 -3 11173 11461 11175 -3 11463 11175 11461 -3 11174 11176 11464 -3 11174 11464 11462 -3 11175 11463 11177 -3 11465 11177 11463 -3 11176 11178 11464 -3 11466 11464 11178 -3 11177 11465 11179 -3 11467 11179 11465 -3 11178 11180 11466 -3 11468 11466 11180 -3 11179 11467 11181 -3 11469 11181 11467 -3 11180 11182 11468 -3 11470 11468 11182 -3 11181 11469 11183 -3 11471 11183 11469 -3 11182 11184 11470 -3 11472 11470 11184 -3 11183 11471 11185 -3 11473 11185 11471 -3 11184 11186 11472 -3 11474 11472 11186 -3 11185 11473 11187 -3 11475 11187 11473 -3 11186 11188 11474 -3 11476 11474 11188 -3 11187 11475 11189 -3 11477 11189 11475 -3 11188 11190 11476 -3 11478 11476 11190 -3 11189 11477 11191 -3 11479 11191 11477 -3 11190 11192 11478 -3 11480 11478 11192 -3 11191 11479 11193 -3 11481 11193 11479 -3 11192 11194 11480 -3 11482 11480 11194 -3 11193 11481 11195 -3 11483 11195 11481 -3 11194 11196 11482 -3 11484 11482 11196 -3 11195 11483 11197 -3 11485 11197 11483 -3 11196 11198 11484 -3 11486 11484 11198 -3 11197 11485 11199 -3 11487 11199 11485 -3 11198 11200 11486 -3 11488 11486 11200 -3 11199 11487 11201 -3 11489 11201 11487 -3 11200 11202 11488 -3 11490 11488 11202 -3 11201 11489 11203 -3 11491 11203 11489 -3 11202 11204 11490 -3 11492 11490 11204 -3 11203 11491 11205 -3 11493 11205 11491 -3 11204 11206 11492 -3 11494 11492 11206 -3 11205 11493 11207 -3 11495 11207 11493 -3 11206 11208 11494 -3 11496 11494 11208 -3 11207 11495 11497 -3 11207 11497 11209 -3 11208 11210 11496 -3 11498 11496 11210 -3 11209 11497 11499 -3 11209 11499 11211 -3 11210 11212 11498 -3 11500 11498 11212 -3 11211 11499 11501 -3 11211 11501 11213 -3 11212 11214 11500 -3 11502 11500 11214 -3 11213 11501 11503 -3 11213 11503 11215 -3 11214 11216 11502 -3 11504 11502 11216 -3 11215 11503 11505 -3 11215 11505 11217 -3 11216 11218 11504 -3 11506 11504 11218 -3 11217 11505 11507 -3 11217 11507 11219 -3 11218 11220 11506 -3 11508 11506 11220 -3 11219 11507 11509 -3 11219 11509 11221 -3 11220 11222 11508 -3 11510 11508 11222 -3 11221 11509 11511 -3 11221 11511 11223 -3 11222 11224 11510 -3 11512 11510 11224 -3 11223 11511 11513 -3 11223 11513 11225 -3 11224 11226 11512 -3 11514 11512 11226 -3 11225 11513 11515 -3 11225 11515 11227 -3 11226 11228 11514 -3 11516 11514 11228 -3 11227 11515 11517 -3 11227 11517 11229 -3 11228 11230 11516 -3 11518 11516 11230 -3 11229 11517 11519 -3 11229 11519 11231 -3 11230 11232 11518 -3 11520 11518 11232 -3 11231 11519 11521 -3 11231 11521 11233 -3 11232 11234 11520 -3 11522 11520 11234 -3 11233 11521 11523 -3 11233 11523 11235 -3 11234 11236 11522 -3 11524 11522 11236 -3 11235 11523 11525 -3 11235 11525 11237 -3 11236 11238 11524 -3 11526 11524 11238 -3 11237 11525 11527 -3 11237 11527 11239 -3 11238 11240 11526 -3 11528 11526 11240 -3 11239 11527 11529 -3 11239 11529 11241 -3 11240 11242 11528 -3 11530 11528 11242 -3 11241 11529 11531 -3 11241 11531 11243 -3 11242 11244 11532 -3 11242 11532 11530 -3 11243 11531 11533 -3 11243 11533 11245 -3 11244 11246 11534 -3 11244 11534 11532 -3 11245 11533 11535 -3 11245 11535 11247 -3 11246 11248 11536 -3 11246 11536 11534 -3 11247 11535 11537 -3 11247 11537 11249 -3 11248 11250 11538 -3 11248 11538 11536 -3 11249 11537 11539 -3 11249 11539 11251 -3 11250 11252 11540 -3 11250 11540 11538 -3 11251 11539 11541 -3 11251 11541 11253 -3 11252 11254 11542 -3 11252 11542 11540 -3 11253 11541 11543 -3 11253 11543 11255 -3 11254 11256 11544 -3 11254 11544 11542 -3 11255 11543 11545 -3 11255 11545 11257 -3 11256 11258 11546 -3 11256 11546 11544 -3 11257 11545 11547 -3 11257 11547 11259 -3 11258 11260 11548 -3 11258 11548 11546 -3 11259 11547 11549 -3 11259 11549 11261 -3 11260 11262 11550 -3 11260 11550 11548 -3 11261 11549 11551 -3 11261 11551 11263 -3 11262 11264 11552 -3 11262 11552 11550 -3 11263 11551 11555 -3 11263 11555 11267 -3 11264 11265 11553 -3 11264 11553 11552 -3 11265 11268 11556 -3 11265 11556 11553 -3 11266 11267 11555 -3 11266 11555 11554 -3 11266 11554 11559 -3 11266 11559 11271 -3 11268 11269 11557 -3 11268 11557 11556 -3 11269 11272 11560 -3 11269 11560 11557 -3 11270 11271 11559 -3 11270 11559 11558 -3 11270 11558 11628 -3 11270 11628 11340 -3 11272 11273 11561 -3 11272 11561 11560 -3 11273 11274 11562 -3 11273 11562 11561 -3 11274 11275 11563 -3 11274 11563 11562 -3 11275 11276 11564 -3 11275 11564 11563 -3 11276 11277 11565 -3 11276 11565 11564 -3 11277 11278 11566 -3 11277 11566 11565 -3 11278 11279 11567 -3 11278 11567 11566 -3 11279 11280 11568 -3 11279 11568 11567 -3 11280 11281 11569 -3 11280 11569 11568 -3 11281 11282 11570 -3 11281 11570 11569 -3 11282 11283 11571 -3 11282 11571 11570 -3 11283 11284 11572 -3 11283 11572 11571 -3 11284 11285 11573 -3 11284 11573 11572 -3 11285 11286 11574 -3 11285 11574 11573 -3 11286 11287 11575 -3 11286 11575 11574 -3 11287 11288 11576 -3 11287 11576 11575 -3 11288 11289 11577 -3 11288 11577 11576 -3 11289 11290 11578 -3 11289 11578 11577 -3 11290 11291 11579 -3 11290 11579 11578 -3 11291 11292 11580 -3 11291 11580 11579 -3 11292 11293 11581 -3 11292 11581 11580 -3 11293 11294 11582 -3 11293 11582 11581 -3 11294 11295 11583 -3 11294 11583 11582 -3 11295 11296 11584 -3 11295 11584 11583 -3 11296 11297 11585 -3 11296 11585 11584 -3 11297 11298 11586 -3 11297 11586 11585 -3 11298 11299 11587 -3 11298 11587 11586 -3 11299 11300 11588 -3 11299 11588 11587 -3 11300 11301 11589 -3 11300 11589 11588 -3 11301 11302 11590 -3 11301 11590 11589 -3 11302 11303 11591 -3 11302 11591 11590 -3 11303 11304 11592 -3 11303 11592 11591 -3 11304 11305 11593 -3 11304 11593 11592 -3 11305 11306 11594 -3 11305 11594 11593 -3 11306 11307 11595 -3 11306 11595 11594 -3 11307 11308 11595 -3 11596 11595 11308 -3 11308 11309 11596 -3 11597 11596 11309 -3 11309 11310 11597 -3 11598 11597 11310 -3 11310 11311 11598 -3 11599 11598 11311 -3 11311 11312 11599 -3 11600 11599 11312 -3 11312 11313 11600 -3 11601 11600 11313 -3 11313 11314 11601 -3 11602 11601 11314 -3 11314 11315 11602 -3 11603 11602 11315 -3 11315 11316 11603 -3 11604 11603 11316 -3 11316 11317 11604 -3 11605 11604 11317 -3 11317 11318 11605 -3 11606 11605 11318 -3 11318 11319 11606 -3 11607 11606 11319 -3 11319 11320 11607 -3 11608 11607 11320 -3 11320 11321 11608 -3 11609 11608 11321 -3 11321 11322 11609 -3 11610 11609 11322 -3 11322 11323 11610 -3 11611 11610 11323 -3 11323 11324 11611 -3 11612 11611 11324 -3 11324 11325 11612 -3 11613 11612 11325 -3 11325 11326 11613 -3 11614 11613 11326 -3 11326 11327 11614 -3 11615 11614 11327 -3 11327 11328 11615 -3 11616 11615 11328 -3 11328 11329 11616 -3 11617 11616 11329 -3 11329 11330 11617 -3 11618 11617 11330 -3 11330 11331 11618 -3 11619 11618 11331 -3 11331 11332 11619 -3 11620 11619 11332 -3 11332 11333 11620 -3 11621 11620 11333 -3 11333 11334 11621 -3 11622 11621 11334 -3 11334 11335 11622 -3 11623 11622 11335 -3 11335 11336 11623 -3 11624 11623 11336 -3 11336 11337 11624 -3 11625 11624 11337 -3 11337 11338 11625 -3 11626 11625 11338 -3 11338 11339 11626 -3 11627 11626 11339 -3 11339 11340 11627 -3 11628 11627 11340 -3 11341 11629 11630 -3 11341 11630 11342 -3 11341 11411 11699 -3 11341 11699 11629 -3 11342 11630 11631 -3 11342 11631 11343 -3 11343 11631 11632 -3 11343 11632 11344 -3 11344 11632 11633 -3 11344 11633 11345 -3 11345 11633 11634 -3 11345 11634 11346 -3 11346 11634 11635 -3 11346 11635 11347 -3 11347 11635 11636 -3 11347 11636 11348 -3 11348 11636 11637 -3 11348 11637 11349 -3 11349 11637 11638 -3 11349 11638 11350 -3 11350 11638 11639 -3 11350 11639 11351 -3 11351 11639 11640 -3 11351 11640 11352 -3 11352 11640 11641 -3 11352 11641 11353 -3 11353 11641 11642 -3 11353 11642 11354 -3 11354 11642 11643 -3 11354 11643 11355 -3 11355 11643 11644 -3 11355 11644 11356 -3 11356 11644 11645 -3 11356 11645 11357 -3 11357 11645 11646 -3 11357 11646 11358 -3 11358 11646 11647 -3 11358 11647 11359 -3 11359 11647 11648 -3 11359 11648 11360 -3 11360 11648 11649 -3 11360 11649 11361 -3 11361 11649 11650 -3 11361 11650 11362 -3 11362 11650 11651 -3 11362 11651 11363 -3 11363 11651 11652 -3 11363 11652 11364 -3 11364 11652 11653 -3 11364 11653 11365 -3 11365 11653 11654 -3 11365 11654 11366 -3 11366 11654 11655 -3 11366 11655 11367 -3 11367 11655 11656 -3 11367 11656 11368 -3 11368 11656 11657 -3 11368 11657 11369 -3 11369 11657 11658 -3 11369 11658 11370 -3 11370 11658 11659 -3 11370 11659 11371 -3 11371 11659 11660 -3 11371 11660 11372 -3 11372 11660 11661 -3 11372 11661 11373 -3 11373 11661 11662 -3 11373 11662 11374 -3 11374 11662 11663 -3 11374 11663 11375 -3 11375 11663 11664 -3 11375 11664 11376 -3 11376 11664 11665 -3 11376 11665 11377 -3 11377 11665 11666 -3 11377 11666 11378 -3 11378 11666 11667 -3 11378 11667 11379 -3 11379 11667 11668 -3 11379 11668 11380 -3 11380 11668 11669 -3 11380 11669 11381 -3 11381 11669 11670 -3 11381 11670 11382 -3 11382 11670 11671 -3 11382 11671 11383 -3 11383 11671 11672 -3 11383 11672 11384 -3 11384 11672 11673 -3 11384 11673 11385 -3 11385 11673 11674 -3 11385 11674 11386 -3 11386 11674 11675 -3 11386 11675 11387 -3 11387 11675 11676 -3 11387 11676 11388 -3 11388 11676 11677 -3 11388 11677 11389 -3 11389 11677 11678 -3 11389 11678 11390 -3 11390 11678 11679 -3 11390 11679 11391 -3 11391 11679 11680 -3 11391 11680 11392 -3 11392 11680 11681 -3 11392 11681 11393 -3 11393 11681 11682 -3 11393 11682 11394 -3 11394 11682 11683 -3 11394 11683 11395 -3 11395 11683 11684 -3 11395 11684 11396 -3 11396 11684 11685 -3 11396 11685 11397 -3 11397 11685 11686 -3 11397 11686 11398 -3 11398 11686 11687 -3 11398 11687 11399 -3 11399 11687 11688 -3 11399 11688 11400 -3 11400 11688 11689 -3 11400 11689 11401 -3 11401 11689 11690 -3 11401 11690 11402 -3 11402 11690 11691 -3 11402 11691 11403 -3 11403 11691 11692 -3 11403 11692 11404 -3 11404 11692 11693 -3 11404 11693 11405 -3 11405 11693 11406 -3 11694 11406 11693 -3 11406 11694 11407 -3 11695 11407 11694 -3 11407 11695 11408 -3 11696 11408 11695 -3 11408 11696 11409 -3 11697 11409 11696 -3 11409 11697 11412 -3 11700 11412 11697 -3 11410 11698 11411 -3 11699 11411 11698 -3 11410 11415 11703 -3 11410 11703 11698 -3 11412 11700 11413 -3 11701 11413 11700 -3 11413 11701 11416 -3 11704 11416 11701 -3 11414 11702 11415 -3 11703 11415 11702 -3 11414 11418 11706 -3 11414 11706 11702 -3 11416 11704 11417 -3 11705 11417 11704 -3 11417 11705 11419 -3 11707 11419 11705 -3 11418 11420 11708 -3 11418 11708 11706 -3 11419 11707 11421 -3 11709 11421 11707 -3 11420 11422 11710 -3 11420 11710 11708 -3 11421 11709 11423 -3 11711 11423 11709 -3 11422 11424 11712 -3 11422 11712 11710 -3 11423 11711 11425 -3 11713 11425 11711 -3 11424 11426 11714 -3 11424 11714 11712 -3 11425 11713 11427 -3 11715 11427 11713 -3 11426 11428 11716 -3 11426 11716 11714 -3 11427 11715 11429 -3 11717 11429 11715 -3 11428 11430 11718 -3 11428 11718 11716 -3 11429 11717 11431 -3 11719 11431 11717 -3 11430 11432 11720 -3 11430 11720 11718 -3 11431 11719 11433 -3 11721 11433 11719 -3 11432 11434 11722 -3 11432 11722 11720 -3 11433 11721 11435 -3 11723 11435 11721 -3 11434 11436 11724 -3 11434 11724 11722 -3 11435 11723 11437 -3 11725 11437 11723 -3 11436 11438 11726 -3 11436 11726 11724 -3 11437 11725 11439 -3 11727 11439 11725 -3 11438 11440 11726 -3 11728 11726 11440 -3 11439 11727 11441 -3 11729 11441 11727 -3 11440 11442 11728 -3 11730 11728 11442 -3 11441 11729 11443 -3 11731 11443 11729 -3 11442 11444 11730 -3 11732 11730 11444 -3 11443 11731 11445 -3 11733 11445 11731 -3 11444 11446 11732 -3 11734 11732 11446 -3 11445 11733 11447 -3 11735 11447 11733 -3 11446 11448 11734 -3 11736 11734 11448 -3 11447 11735 11449 -3 11737 11449 11735 -3 11448 11450 11736 -3 11738 11736 11450 -3 11449 11737 11451 -3 11739 11451 11737 -3 11450 11452 11738 -3 11740 11738 11452 -3 11451 11739 11453 -3 11741 11453 11739 -3 11452 11454 11740 -3 11742 11740 11454 -3 11453 11741 11455 -3 11743 11455 11741 -3 11454 11456 11742 -3 11744 11742 11456 -3 11455 11743 11457 -3 11745 11457 11743 -3 11456 11458 11744 -3 11746 11744 11458 -3 11457 11745 11459 -3 11747 11459 11745 -3 11458 11460 11746 -3 11748 11746 11460 -3 11459 11747 11461 -3 11749 11461 11747 -3 11460 11462 11748 -3 11750 11748 11462 -3 11461 11749 11463 -3 11751 11463 11749 -3 11462 11464 11750 -3 11752 11750 11464 -3 11463 11751 11465 -3 11753 11465 11751 -3 11464 11466 11752 -3 11754 11752 11466 -3 11465 11753 11467 -3 11755 11467 11753 -3 11466 11468 11754 -3 11756 11754 11468 -3 11467 11755 11469 -3 11757 11469 11755 -3 11468 11470 11756 -3 11758 11756 11470 -3 11469 11757 11471 -3 11759 11471 11757 -3 11470 11472 11758 -3 11760 11758 11472 -3 11471 11759 11761 -3 11471 11761 11473 -3 11472 11474 11760 -3 11762 11760 11474 -3 11473 11761 11763 -3 11473 11763 11475 -3 11474 11476 11762 -3 11764 11762 11476 -3 11475 11763 11765 -3 11475 11765 11477 -3 11476 11478 11764 -3 11766 11764 11478 -3 11477 11765 11767 -3 11477 11767 11479 -3 11478 11480 11766 -3 11768 11766 11480 -3 11479 11767 11769 -3 11479 11769 11481 -3 11480 11482 11768 -3 11770 11768 11482 -3 11481 11769 11771 -3 11481 11771 11483 -3 11482 11484 11770 -3 11772 11770 11484 -3 11483 11771 11773 -3 11483 11773 11485 -3 11484 11486 11772 -3 11774 11772 11486 -3 11485 11773 11775 -3 11485 11775 11487 -3 11486 11488 11774 -3 11776 11774 11488 -3 11487 11775 11777 -3 11487 11777 11489 -3 11488 11490 11776 -3 11778 11776 11490 -3 11489 11777 11779 -3 11489 11779 11491 -3 11490 11492 11778 -3 11780 11778 11492 -3 11491 11779 11781 -3 11491 11781 11493 -3 11492 11494 11780 -3 11782 11780 11494 -3 11493 11781 11783 -3 11493 11783 11495 -3 11494 11496 11782 -3 11784 11782 11496 -3 11495 11783 11785 -3 11495 11785 11497 -3 11496 11498 11784 -3 11786 11784 11498 -3 11497 11785 11787 -3 11497 11787 11499 -3 11498 11500 11786 -3 11788 11786 11500 -3 11499 11787 11789 -3 11499 11789 11501 -3 11500 11502 11788 -3 11790 11788 11502 -3 11501 11789 11791 -3 11501 11791 11503 -3 11502 11504 11790 -3 11792 11790 11504 -3 11503 11791 11793 -3 11503 11793 11505 -3 11504 11506 11794 -3 11504 11794 11792 -3 11505 11793 11795 -3 11505 11795 11507 -3 11506 11508 11796 -3 11506 11796 11794 -3 11507 11795 11797 -3 11507 11797 11509 -3 11508 11510 11798 -3 11508 11798 11796 -3 11509 11797 11799 -3 11509 11799 11511 -3 11510 11512 11800 -3 11510 11800 11798 -3 11511 11799 11801 -3 11511 11801 11513 -3 11512 11514 11802 -3 11512 11802 11800 -3 11513 11801 11803 -3 11513 11803 11515 -3 11514 11516 11804 -3 11514 11804 11802 -3 11515 11803 11805 -3 11515 11805 11517 -3 11516 11518 11806 -3 11516 11806 11804 -3 11517 11805 11807 -3 11517 11807 11519 -3 11518 11520 11808 -3 11518 11808 11806 -3 11519 11807 11809 -3 11519 11809 11521 -3 11520 11522 11810 -3 11520 11810 11808 -3 11521 11809 11811 -3 11521 11811 11523 -3 11522 11524 11812 -3 11522 11812 11810 -3 11523 11811 11813 -3 11523 11813 11525 -3 11524 11526 11814 -3 11524 11814 11812 -3 11525 11813 11815 -3 11525 11815 11527 -3 11526 11528 11816 -3 11526 11816 11814 -3 11527 11815 11817 -3 11527 11817 11529 -3 11528 11530 11818 -3 11528 11818 11816 -3 11529 11817 11819 -3 11529 11819 11531 -3 11530 11532 11820 -3 11530 11820 11818 -3 11531 11819 11821 -3 11531 11821 11533 -3 11532 11534 11822 -3 11532 11822 11820 -3 11533 11821 11823 -3 11533 11823 11535 -3 11534 11536 11824 -3 11534 11824 11822 -3 11535 11823 11825 -3 11535 11825 11537 -3 11536 11538 11826 -3 11536 11826 11824 -3 11537 11825 11827 -3 11537 11827 11539 -3 11538 11540 11828 -3 11538 11828 11826 -3 11539 11827 11541 -3 11829 11541 11827 -3 11540 11542 11830 -3 11540 11830 11828 -3 11541 11829 11543 -3 11831 11543 11829 -3 11542 11544 11832 -3 11542 11832 11830 -3 11543 11831 11545 -3 11833 11545 11831 -3 11544 11546 11834 -3 11544 11834 11832 -3 11545 11833 11547 -3 11835 11547 11833 -3 11546 11548 11836 -3 11546 11836 11834 -3 11547 11835 11549 -3 11837 11549 11835 -3 11548 11550 11838 -3 11548 11838 11836 -3 11549 11837 11551 -3 11839 11551 11837 -3 11550 11552 11840 -3 11550 11840 11838 -3 11551 11839 11555 -3 11843 11555 11839 -3 11552 11553 11841 -3 11552 11841 11840 -3 11553 11556 11844 -3 11553 11844 11841 -3 11554 11555 11843 -3 11554 11843 11842 -3 11554 11842 11559 -3 11847 11559 11842 -3 11556 11557 11845 -3 11556 11845 11844 -3 11557 11560 11848 -3 11557 11848 11845 -3 11558 11559 11847 -3 11558 11847 11846 -3 11558 11846 11628 -3 11916 11628 11846 -3 11560 11561 11849 -3 11560 11849 11848 -3 11561 11562 11850 -3 11561 11850 11849 -3 11562 11563 11851 -3 11562 11851 11850 -3 11563 11564 11852 -3 11563 11852 11851 -3 11564 11565 11853 -3 11564 11853 11852 -3 11565 11566 11854 -3 11565 11854 11853 -3 11566 11567 11855 -3 11566 11855 11854 -3 11567 11568 11856 -3 11567 11856 11855 -3 11568 11569 11857 -3 11568 11857 11856 -3 11569 11570 11858 -3 11569 11858 11857 -3 11570 11571 11858 -3 11859 11858 11571 -3 11571 11572 11859 -3 11860 11859 11572 -3 11572 11573 11860 -3 11861 11860 11573 -3 11573 11574 11861 -3 11862 11861 11574 -3 11574 11575 11862 -3 11863 11862 11575 -3 11575 11576 11863 -3 11864 11863 11576 -3 11576 11577 11864 -3 11865 11864 11577 -3 11577 11578 11865 -3 11866 11865 11578 -3 11578 11579 11866 -3 11867 11866 11579 -3 11579 11580 11867 -3 11868 11867 11580 -3 11580 11581 11868 -3 11869 11868 11581 -3 11581 11582 11869 -3 11870 11869 11582 -3 11582 11583 11870 -3 11871 11870 11583 -3 11583 11584 11871 -3 11872 11871 11584 -3 11584 11585 11872 -3 11873 11872 11585 -3 11585 11586 11873 -3 11874 11873 11586 -3 11586 11587 11874 -3 11875 11874 11587 -3 11587 11588 11875 -3 11876 11875 11588 -3 11588 11589 11876 -3 11877 11876 11589 -3 11589 11590 11877 -3 11878 11877 11590 -3 11590 11591 11878 -3 11879 11878 11591 -3 11591 11592 11879 -3 11880 11879 11592 -3 11592 11593 11880 -3 11881 11880 11593 -3 11593 11594 11881 -3 11882 11881 11594 -3 11594 11595 11882 -3 11883 11882 11595 -3 11595 11596 11883 -3 11884 11883 11596 -3 11596 11597 11884 -3 11885 11884 11597 -3 11597 11598 11885 -3 11886 11885 11598 -3 11598 11599 11886 -3 11887 11886 11599 -3 11599 11600 11887 -3 11888 11887 11600 -3 11600 11601 11888 -3 11889 11888 11601 -3 11601 11602 11889 -3 11890 11889 11602 -3 11602 11603 11890 -3 11891 11890 11603 -3 11603 11604 11891 -3 11892 11891 11604 -3 11604 11605 11892 -3 11893 11892 11605 -3 11605 11606 11893 -3 11894 11893 11606 -3 11606 11607 11894 -3 11895 11894 11607 -3 11607 11608 11895 -3 11896 11895 11608 -3 11608 11609 11896 -3 11897 11896 11609 -3 11609 11610 11897 -3 11898 11897 11610 -3 11610 11611 11898 -3 11899 11898 11611 -3 11611 11612 11899 -3 11900 11899 11612 -3 11612 11613 11900 -3 11901 11900 11613 -3 11613 11614 11901 -3 11902 11901 11614 -3 11614 11615 11902 -3 11903 11902 11615 -3 11615 11616 11903 -3 11904 11903 11616 -3 11616 11617 11904 -3 11905 11904 11617 -3 11617 11618 11905 -3 11906 11905 11618 -3 11618 11619 11906 -3 11907 11906 11619 -3 11619 11620 11907 -3 11908 11907 11620 -3 11620 11621 11908 -3 11909 11908 11621 -3 11621 11622 11909 -3 11910 11909 11622 -3 11622 11623 11910 -3 11911 11910 11623 -3 11623 11624 11911 -3 11912 11911 11624 -3 11624 11625 11912 -3 11913 11912 11625 -3 11625 11626 11913 -3 11914 11913 11626 -3 11626 11627 11914 -3 11915 11914 11627 -3 11627 11628 11915 -3 11916 11915 11628 -3 11629 11917 11918 -3 11629 11918 11630 -3 11629 11699 11987 -3 11629 11987 11917 -3 11630 11918 11919 -3 11630 11919 11631 -3 11631 11919 11920 -3 11631 11920 11632 -3 11632 11920 11921 -3 11632 11921 11633 -3 11633 11921 11922 -3 11633 11922 11634 -3 11634 11922 11923 -3 11634 11923 11635 -3 11635 11923 11924 -3 11635 11924 11636 -3 11636 11924 11925 -3 11636 11925 11637 -3 11637 11925 11926 -3 11637 11926 11638 -3 11638 11926 11927 -3 11638 11927 11639 -3 11639 11927 11928 -3 11639 11928 11640 -3 11640 11928 11929 -3 11640 11929 11641 -3 11641 11929 11930 -3 11641 11930 11642 -3 11642 11930 11931 -3 11642 11931 11643 -3 11643 11931 11932 -3 11643 11932 11644 -3 11644 11932 11933 -3 11644 11933 11645 -3 11645 11933 11934 -3 11645 11934 11646 -3 11646 11934 11935 -3 11646 11935 11647 -3 11647 11935 11936 -3 11647 11936 11648 -3 11648 11936 11937 -3 11648 11937 11649 -3 11649 11937 11938 -3 11649 11938 11650 -3 11650 11938 11939 -3 11650 11939 11651 -3 11651 11939 11940 -3 11651 11940 11652 -3 11652 11940 11941 -3 11652 11941 11653 -3 11653 11941 11942 -3 11653 11942 11654 -3 11654 11942 11943 -3 11654 11943 11655 -3 11655 11943 11944 -3 11655 11944 11656 -3 11656 11944 11945 -3 11656 11945 11657 -3 11657 11945 11946 -3 11657 11946 11658 -3 11658 11946 11947 -3 11658 11947 11659 -3 11659 11947 11948 -3 11659 11948 11660 -3 11660 11948 11949 -3 11660 11949 11661 -3 11661 11949 11950 -3 11661 11950 11662 -3 11662 11950 11951 -3 11662 11951 11663 -3 11663 11951 11952 -3 11663 11952 11664 -3 11664 11952 11953 -3 11664 11953 11665 -3 11665 11953 11954 -3 11665 11954 11666 -3 11666 11954 11955 -3 11666 11955 11667 -3 11667 11955 11956 -3 11667 11956 11668 -3 11668 11956 11957 -3 11668 11957 11669 -3 11669 11957 11958 -3 11669 11958 11670 -3 11670 11958 11959 -3 11670 11959 11671 -3 11671 11959 11672 -3 11960 11672 11959 -3 11672 11960 11673 -3 11961 11673 11960 -3 11673 11961 11674 -3 11962 11674 11961 -3 11674 11962 11675 -3 11963 11675 11962 -3 11675 11963 11676 -3 11964 11676 11963 -3 11676 11964 11677 -3 11965 11677 11964 -3 11677 11965 11678 -3 11966 11678 11965 -3 11678 11966 11679 -3 11967 11679 11966 -3 11679 11967 11680 -3 11968 11680 11967 -3 11680 11968 11681 -3 11969 11681 11968 -3 11681 11969 11682 -3 11970 11682 11969 -3 11682 11970 11683 -3 11971 11683 11970 -3 11683 11971 11684 -3 11972 11684 11971 -3 11684 11972 11685 -3 11973 11685 11972 -3 11685 11973 11686 -3 11974 11686 11973 -3 11686 11974 11687 -3 11975 11687 11974 -3 11687 11975 11688 -3 11976 11688 11975 -3 11688 11976 11689 -3 11977 11689 11976 -3 11689 11977 11690 -3 11978 11690 11977 -3 11690 11978 11691 -3 11979 11691 11978 -3 11691 11979 11692 -3 11980 11692 11979 -3 11692 11980 11693 -3 11981 11693 11980 -3 11693 11981 11694 -3 11982 11694 11981 -3 11694 11982 11695 -3 11983 11695 11982 -3 11695 11983 11696 -3 11984 11696 11983 -3 11696 11984 11697 -3 11985 11697 11984 -3 11697 11985 11700 -3 11988 11700 11985 -3 11698 11986 11699 -3 11987 11699 11986 -3 11698 11703 11991 -3 11698 11991 11986 -3 11700 11988 11701 -3 11989 11701 11988 -3 11701 11989 11704 -3 11992 11704 11989 -3 11702 11990 11703 -3 11991 11703 11990 -3 11702 11706 11990 -3 11994 11990 11706 -3 11704 11992 11705 -3 11993 11705 11992 -3 11705 11993 11707 -3 11995 11707 11993 -3 11706 11708 11994 -3 11996 11994 11708 -3 11707 11995 11709 -3 11997 11709 11995 -3 11708 11710 11996 -3 11998 11996 11710 -3 11709 11997 11711 -3 11999 11711 11997 -3 11710 11712 11998 -3 12000 11998 11712 -3 11711 11999 11713 -3 12001 11713 11999 -3 11712 11714 12000 -3 12002 12000 11714 -3 11713 12001 11715 -3 12003 11715 12001 -3 11714 11716 12002 -3 12004 12002 11716 -3 11715 12003 11717 -3 12005 11717 12003 -3 11716 11718 12004 -3 12006 12004 11718 -3 11717 12005 11719 -3 12007 11719 12005 -3 11718 11720 12006 -3 12008 12006 11720 -3 11719 12007 11721 -3 12009 11721 12007 -3 11720 11722 12008 -3 12010 12008 11722 -3 11721 12009 11723 -3 12011 11723 12009 -3 11722 11724 12010 -3 12012 12010 11724 -3 11723 12011 11725 -3 12013 11725 12011 -3 11724 11726 12012 -3 12014 12012 11726 -3 11725 12013 11727 -3 12015 11727 12013 -3 11726 11728 12014 -3 12016 12014 11728 -3 11727 12015 11729 -3 12017 11729 12015 -3 11728 11730 12016 -3 12018 12016 11730 -3 11729 12017 11731 -3 12019 11731 12017 -3 11730 11732 12018 -3 12020 12018 11732 -3 11731 12019 11733 -3 12021 11733 12019 -3 11732 11734 12020 -3 12022 12020 11734 -3 11733 12021 11735 -3 12023 11735 12021 -3 11734 11736 12022 -3 12024 12022 11736 -3 11735 12023 11737 -3 12025 11737 12023 -3 11736 11738 12024 -3 12026 12024 11738 -3 11737 12025 11739 -3 12027 11739 12025 -3 11738 11740 12026 -3 12028 12026 11740 -3 11739 12027 12029 -3 11739 12029 11741 -3 11740 11742 12028 -3 12030 12028 11742 -3 11741 12029 12031 -3 11741 12031 11743 -3 11742 11744 12030 -3 12032 12030 11744 -3 11743 12031 12033 -3 11743 12033 11745 -3 11744 11746 12032 -3 12034 12032 11746 -3 11745 12033 12035 -3 11745 12035 11747 -3 11746 11748 12034 -3 12036 12034 11748 -3 11747 12035 12037 -3 11747 12037 11749 -3 11748 11750 12036 -3 12038 12036 11750 -3 11749 12037 12039 -3 11749 12039 11751 -3 11750 11752 12038 -3 12040 12038 11752 -3 11751 12039 12041 -3 11751 12041 11753 -3 11752 11754 12040 -3 12042 12040 11754 -3 11753 12041 12043 -3 11753 12043 11755 -3 11754 11756 12042 -3 12044 12042 11756 -3 11755 12043 12045 -3 11755 12045 11757 -3 11756 11758 12044 -3 12046 12044 11758 -3 11757 12045 12047 -3 11757 12047 11759 -3 11758 11760 12046 -3 12048 12046 11760 -3 11759 12047 12049 -3 11759 12049 11761 -3 11760 11762 12048 -3 12050 12048 11762 -3 11761 12049 12051 -3 11761 12051 11763 -3 11762 11764 12050 -3 12052 12050 11764 -3 11763 12051 12053 -3 11763 12053 11765 -3 11764 11766 12052 -3 12054 12052 11766 -3 11765 12053 12055 -3 11765 12055 11767 -3 11766 11768 12054 -3 12056 12054 11768 -3 11767 12055 12057 -3 11767 12057 11769 -3 11768 11770 12056 -3 12058 12056 11770 -3 11769 12057 12059 -3 11769 12059 11771 -3 11770 11772 12060 -3 11770 12060 12058 -3 11771 12059 12061 -3 11771 12061 11773 -3 11772 11774 12062 -3 11772 12062 12060 -3 11773 12061 12063 -3 11773 12063 11775 -3 11774 11776 12064 -3 11774 12064 12062 -3 11775 12063 12065 -3 11775 12065 11777 -3 11776 11778 12066 -3 11776 12066 12064 -3 11777 12065 12067 -3 11777 12067 11779 -3 11778 11780 12068 -3 11778 12068 12066 -3 11779 12067 12069 -3 11779 12069 11781 -3 11780 11782 12070 -3 11780 12070 12068 -3 11781 12069 12071 -3 11781 12071 11783 -3 11782 11784 12072 -3 11782 12072 12070 -3 11783 12071 12073 -3 11783 12073 11785 -3 11784 11786 12074 -3 11784 12074 12072 -3 11785 12073 12075 -3 11785 12075 11787 -3 11786 11788 12076 -3 11786 12076 12074 -3 11787 12075 12077 -3 11787 12077 11789 -3 11788 11790 12078 -3 11788 12078 12076 -3 11789 12077 12079 -3 11789 12079 11791 -3 11790 11792 12080 -3 11790 12080 12078 -3 11791 12079 12081 -3 11791 12081 11793 -3 11792 11794 12082 -3 11792 12082 12080 -3 11793 12081 12083 -3 11793 12083 11795 -3 11794 11796 12084 -3 11794 12084 12082 -3 11795 12083 12085 -3 11795 12085 11797 -3 11796 11798 12086 -3 11796 12086 12084 -3 11797 12085 12087 -3 11797 12087 11799 -3 11798 11800 12088 -3 11798 12088 12086 -3 11799 12087 12089 -3 11799 12089 11801 -3 11800 11802 12090 -3 11800 12090 12088 -3 11801 12089 12091 -3 11801 12091 11803 -3 11802 11804 12092 -3 11802 12092 12090 -3 11803 12091 12093 -3 11803 12093 11805 -3 11804 11806 12094 -3 11804 12094 12092 -3 11805 12093 12095 -3 11805 12095 11807 -3 11806 11808 12096 -3 11806 12096 12094 -3 11807 12095 11809 -3 12097 11809 12095 -3 11808 11810 12098 -3 11808 12098 12096 -3 11809 12097 11811 -3 12099 11811 12097 -3 11810 11812 12100 -3 11810 12100 12098 -3 11811 12099 11813 -3 12101 11813 12099 -3 11812 11814 12102 -3 11812 12102 12100 -3 11813 12101 11815 -3 12103 11815 12101 -3 11814 11816 12104 -3 11814 12104 12102 -3 11815 12103 11817 -3 12105 11817 12103 -3 11816 11818 12106 -3 11816 12106 12104 -3 11817 12105 11819 -3 12107 11819 12105 -3 11818 11820 12108 -3 11818 12108 12106 -3 11819 12107 11821 -3 12109 11821 12107 -3 11820 11822 12110 -3 11820 12110 12108 -3 11821 12109 11823 -3 12111 11823 12109 -3 11822 11824 12112 -3 11822 12112 12110 -3 11823 12111 11825 -3 12113 11825 12111 -3 11824 11826 12114 -3 11824 12114 12112 -3 11825 12113 11827 -3 12115 11827 12113 -3 11826 11828 12116 -3 11826 12116 12114 -3 11827 12115 11829 -3 12117 11829 12115 -3 11828 11830 12118 -3 11828 12118 12116 -3 11829 12117 11831 -3 12119 11831 12117 -3 11830 11832 12120 -3 11830 12120 12118 -3 11831 12119 11833 -3 12121 11833 12119 -3 11832 11834 12122 -3 11832 12122 12120 -3 11833 12121 11835 -3 12123 11835 12121 -3 11834 11836 12124 -3 11834 12124 12122 -3 11835 12123 11837 -3 12125 11837 12123 -3 11836 11838 12124 -3 12126 12124 11838 -3 11837 12125 11839 -3 12127 11839 12125 -3 11838 11840 12126 -3 12128 12126 11840 -3 11839 12127 11843 -3 12131 11843 12127 -3 11840 11841 12128 -3 12129 12128 11841 -3 11841 11844 12129 -3 12132 12129 11844 -3 11842 11843 12130 -3 12131 12130 11843 -3 11842 12130 11847 -3 12135 11847 12130 -3 11844 11845 12132 -3 12133 12132 11845 -3 11845 11848 12133 -3 12136 12133 11848 -3 11846 11847 12134 -3 12135 12134 11847 -3 11846 12134 11916 -3 12204 11916 12134 -3 11848 11849 12136 -3 12137 12136 11849 -3 11849 11850 12137 -3 12138 12137 11850 -3 11850 11851 12138 -3 12139 12138 11851 -3 11851 11852 12139 -3 12140 12139 11852 -3 11852 11853 12140 -3 12141 12140 11853 -3 11853 11854 12141 -3 12142 12141 11854 -3 11854 11855 12142 -3 12143 12142 11855 -3 11855 11856 12143 -3 12144 12143 11856 -3 11856 11857 12144 -3 12145 12144 11857 -3 11857 11858 12145 -3 12146 12145 11858 -3 11858 11859 12146 -3 12147 12146 11859 -3 11859 11860 12147 -3 12148 12147 11860 -3 11860 11861 12148 -3 12149 12148 11861 -3 11861 11862 12149 -3 12150 12149 11862 -3 11862 11863 12150 -3 12151 12150 11863 -3 11863 11864 12151 -3 12152 12151 11864 -3 11864 11865 12152 -3 12153 12152 11865 -3 11865 11866 12153 -3 12154 12153 11866 -3 11866 11867 12154 -3 12155 12154 11867 -3 11867 11868 12155 -3 12156 12155 11868 -3 11868 11869 12156 -3 12157 12156 11869 -3 11869 11870 12157 -3 12158 12157 11870 -3 11870 11871 12158 -3 12159 12158 11871 -3 11871 11872 12159 -3 12160 12159 11872 -3 11872 11873 12160 -3 12161 12160 11873 -3 11873 11874 12161 -3 12162 12161 11874 -3 11874 11875 12162 -3 12163 12162 11875 -3 11875 11876 12163 -3 12164 12163 11876 -3 11876 11877 12164 -3 12165 12164 11877 -3 11877 11878 12165 -3 12166 12165 11878 -3 11878 11879 12166 -3 12167 12166 11879 -3 11879 11880 12167 -3 12168 12167 11880 -3 11880 11881 12168 -3 12169 12168 11881 -3 11881 11882 12169 -3 12170 12169 11882 -3 11882 11883 12170 -3 12171 12170 11883 -3 11883 11884 12171 -3 12172 12171 11884 -3 11884 11885 12172 -3 12173 12172 11885 -3 11885 11886 12173 -3 12174 12173 11886 -3 11886 11887 12174 -3 12175 12174 11887 -3 11887 11888 12175 -3 12176 12175 11888 -3 11888 11889 12176 -3 12177 12176 11889 -3 11889 11890 12177 -3 12178 12177 11890 -3 11890 11891 12178 -3 12179 12178 11891 -3 11891 11892 12179 -3 12180 12179 11892 -3 11892 11893 12180 -3 12181 12180 11893 -3 11893 11894 12181 -3 12182 12181 11894 -3 11894 11895 12182 -3 12183 12182 11895 -3 11895 11896 12183 -3 12184 12183 11896 -3 11896 11897 12184 -3 12185 12184 11897 -3 11897 11898 12185 -3 12186 12185 11898 -3 11898 11899 12186 -3 12187 12186 11899 -3 11899 11900 12187 -3 12188 12187 11900 -3 11900 11901 12188 -3 12189 12188 11901 -3 11901 11902 12189 -3 12190 12189 11902 -3 11902 11903 12190 -3 12191 12190 11903 -3 11903 11904 12192 -3 11903 12192 12191 -3 11904 11905 12193 -3 11904 12193 12192 -3 11905 11906 12194 -3 11905 12194 12193 -3 11906 11907 12195 -3 11906 12195 12194 -3 11907 11908 12196 -3 11907 12196 12195 -3 11908 11909 12197 -3 11908 12197 12196 -3 11909 11910 12198 -3 11909 12198 12197 -3 11910 11911 12199 -3 11910 12199 12198 -3 11911 11912 12200 -3 11911 12200 12199 -3 11912 11913 12201 -3 11912 12201 12200 -3 11913 11914 12202 -3 11913 12202 12201 -3 11914 11915 12203 -3 11914 12203 12202 -3 11915 11916 12204 -3 11915 12204 12203 -3 11917 12205 12206 -3 11917 12206 11918 -3 11917 11987 12275 -3 11917 12275 12205 -3 11918 12206 12207 -3 11918 12207 11919 -3 11919 12207 12208 -3 11919 12208 11920 -3 11920 12208 12209 -3 11920 12209 11921 -3 11921 12209 12210 -3 11921 12210 11922 -3 11922 12210 12211 -3 11922 12211 11923 -3 11923 12211 12212 -3 11923 12212 11924 -3 11924 12212 12213 -3 11924 12213 11925 -3 11925 12213 12214 -3 11925 12214 11926 -3 11926 12214 12215 -3 11926 12215 11927 -3 11927 12215 12216 -3 11927 12216 11928 -3 11928 12216 12217 -3 11928 12217 11929 -3 11929 12217 12218 -3 11929 12218 11930 -3 11930 12218 12219 -3 11930 12219 11931 -3 11931 12219 12220 -3 11931 12220 11932 -3 11932 12220 12221 -3 11932 12221 11933 -3 11933 12221 12222 -3 11933 12222 11934 -3 11934 12222 12223 -3 11934 12223 11935 -3 11935 12223 12224 -3 11935 12224 11936 -3 11936 12224 12225 -3 11936 12225 11937 -3 11937 12225 12226 -3 11937 12226 11938 -3 11938 12226 12227 -3 11938 12227 11939 -3 11939 12227 12228 -3 11939 12228 11940 -3 11940 12228 12229 -3 11940 12229 11941 -3 11941 12229 11942 -3 12230 11942 12229 -3 11942 12230 11943 -3 12231 11943 12230 -3 11943 12231 11944 -3 12232 11944 12231 -3 11944 12232 11945 -3 12233 11945 12232 -3 11945 12233 11946 -3 12234 11946 12233 -3 11946 12234 11947 -3 12235 11947 12234 -3 11947 12235 11948 -3 12236 11948 12235 -3 11948 12236 11949 -3 12237 11949 12236 -3 11949 12237 11950 -3 12238 11950 12237 -3 11950 12238 11951 -3 12239 11951 12238 -3 11951 12239 11952 -3 12240 11952 12239 -3 11952 12240 11953 -3 12241 11953 12240 -3 11953 12241 11954 -3 12242 11954 12241 -3 11954 12242 11955 -3 12243 11955 12242 -3 11955 12243 11956 -3 12244 11956 12243 -3 11956 12244 11957 -3 12245 11957 12244 -3 11957 12245 11958 -3 12246 11958 12245 -3 11958 12246 11959 -3 12247 11959 12246 -3 11959 12247 11960 -3 12248 11960 12247 -3 11960 12248 11961 -3 12249 11961 12248 -3 11961 12249 11962 -3 12250 11962 12249 -3 11962 12250 11963 -3 12251 11963 12250 -3 11963 12251 11964 -3 12252 11964 12251 -3 11964 12252 11965 -3 12253 11965 12252 -3 11965 12253 11966 -3 12254 11966 12253 -3 11966 12254 11967 -3 12255 11967 12254 -3 11967 12255 11968 -3 12256 11968 12255 -3 11968 12256 11969 -3 12257 11969 12256 -3 11969 12257 11970 -3 12258 11970 12257 -3 11970 12258 11971 -3 12259 11971 12258 -3 11971 12259 11972 -3 12260 11972 12259 -3 11972 12260 11973 -3 12261 11973 12260 -3 11973 12261 11974 -3 12262 11974 12261 -3 11974 12262 11975 -3 12263 11975 12262 -3 11975 12263 11976 -3 12264 11976 12263 -3 11976 12264 11977 -3 12265 11977 12264 -3 11977 12265 11978 -3 12266 11978 12265 -3 11978 12266 11979 -3 12267 11979 12266 -3 11979 12267 11980 -3 12268 11980 12267 -3 11980 12268 11981 -3 12269 11981 12268 -3 11981 12269 11982 -3 12270 11982 12269 -3 11982 12270 11983 -3 12271 11983 12270 -3 11983 12271 11984 -3 12272 11984 12271 -3 11984 12272 11985 -3 12273 11985 12272 -3 11985 12273 11988 -3 12276 11988 12273 -3 11986 12274 11987 -3 12275 11987 12274 -3 11986 11991 12274 -3 12279 12274 11991 -3 11988 12276 11989 -3 12277 11989 12276 -3 11989 12277 11992 -3 12280 11992 12277 -3 11990 12278 11991 -3 12279 11991 12278 -3 11990 11994 12278 -3 12282 12278 11994 -3 11992 12280 11993 -3 12281 11993 12280 -3 11993 12281 11995 -3 12283 11995 12281 -3 11994 11996 12282 -3 12284 12282 11996 -3 11995 12283 11997 -3 12285 11997 12283 -3 11996 11998 12284 -3 12286 12284 11998 -3 11997 12285 11999 -3 12287 11999 12285 -3 11998 12000 12286 -3 12288 12286 12000 -3 11999 12287 12001 -3 12289 12001 12287 -3 12000 12002 12288 -3 12290 12288 12002 -3 12001 12289 12003 -3 12291 12003 12289 -3 12002 12004 12290 -3 12292 12290 12004 -3 12003 12291 12005 -3 12293 12005 12291 -3 12004 12006 12292 -3 12294 12292 12006 -3 12005 12293 12007 -3 12295 12007 12293 -3 12006 12008 12294 -3 12296 12294 12008 -3 12007 12295 12009 -3 12297 12009 12295 -3 12008 12010 12296 -3 12298 12296 12010 -3 12009 12297 12299 -3 12009 12299 12011 -3 12010 12012 12298 -3 12300 12298 12012 -3 12011 12299 12301 -3 12011 12301 12013 -3 12012 12014 12300 -3 12302 12300 12014 -3 12013 12301 12303 -3 12013 12303 12015 -3 12014 12016 12302 -3 12304 12302 12016 -3 12015 12303 12305 -3 12015 12305 12017 -3 12016 12018 12304 -3 12306 12304 12018 -3 12017 12305 12307 -3 12017 12307 12019 -3 12018 12020 12306 -3 12308 12306 12020 -3 12019 12307 12309 -3 12019 12309 12021 -3 12020 12022 12308 -3 12310 12308 12022 -3 12021 12309 12311 -3 12021 12311 12023 -3 12022 12024 12310 -3 12312 12310 12024 -3 12023 12311 12313 -3 12023 12313 12025 -3 12024 12026 12312 -3 12314 12312 12026 -3 12025 12313 12315 -3 12025 12315 12027 -3 12026 12028 12314 -3 12316 12314 12028 -3 12027 12315 12317 -3 12027 12317 12029 -3 12028 12030 12316 -3 12318 12316 12030 -3 12029 12317 12319 -3 12029 12319 12031 -3 12030 12032 12318 -3 12320 12318 12032 -3 12031 12319 12321 -3 12031 12321 12033 -3 12032 12034 12320 -3 12322 12320 12034 -3 12033 12321 12323 -3 12033 12323 12035 -3 12034 12036 12322 -3 12324 12322 12036 -3 12035 12323 12325 -3 12035 12325 12037 -3 12036 12038 12324 -3 12326 12324 12038 -3 12037 12325 12327 -3 12037 12327 12039 -3 12038 12040 12328 -3 12038 12328 12326 -3 12039 12327 12329 -3 12039 12329 12041 -3 12040 12042 12330 -3 12040 12330 12328 -3 12041 12329 12331 -3 12041 12331 12043 -3 12042 12044 12332 -3 12042 12332 12330 -3 12043 12331 12333 -3 12043 12333 12045 -3 12044 12046 12334 -3 12044 12334 12332 -3 12045 12333 12335 -3 12045 12335 12047 -3 12046 12048 12336 -3 12046 12336 12334 -3 12047 12335 12337 -3 12047 12337 12049 -3 12048 12050 12338 -3 12048 12338 12336 -3 12049 12337 12339 -3 12049 12339 12051 -3 12050 12052 12340 -3 12050 12340 12338 -3 12051 12339 12341 -3 12051 12341 12053 -3 12052 12054 12342 -3 12052 12342 12340 -3 12053 12341 12343 -3 12053 12343 12055 -3 12054 12056 12344 -3 12054 12344 12342 -3 12055 12343 12345 -3 12055 12345 12057 -3 12056 12058 12346 -3 12056 12346 12344 -3 12057 12345 12347 -3 12057 12347 12059 -3 12058 12060 12348 -3 12058 12348 12346 -3 12059 12347 12349 -3 12059 12349 12061 -3 12060 12062 12350 -3 12060 12350 12348 -3 12061 12349 12351 -3 12061 12351 12063 -3 12062 12064 12352 -3 12062 12352 12350 -3 12063 12351 12353 -3 12063 12353 12065 -3 12064 12066 12354 -3 12064 12354 12352 -3 12065 12353 12355 -3 12065 12355 12067 -3 12066 12068 12356 -3 12066 12356 12354 -3 12067 12355 12357 -3 12067 12357 12069 -3 12068 12070 12358 -3 12068 12358 12356 -3 12069 12357 12359 -3 12069 12359 12071 -3 12070 12072 12360 -3 12070 12360 12358 -3 12071 12359 12361 -3 12071 12361 12073 -3 12072 12074 12362 -3 12072 12362 12360 -3 12073 12361 12363 -3 12073 12363 12075 -3 12074 12076 12364 -3 12074 12364 12362 -3 12075 12363 12365 -3 12075 12365 12077 -3 12076 12078 12366 -3 12076 12366 12364 -3 12077 12365 12079 -3 12367 12079 12365 -3 12078 12080 12368 -3 12078 12368 12366 -3 12079 12367 12081 -3 12369 12081 12367 -3 12080 12082 12370 -3 12080 12370 12368 -3 12081 12369 12083 -3 12371 12083 12369 -3 12082 12084 12372 -3 12082 12372 12370 -3 12083 12371 12085 -3 12373 12085 12371 -3 12084 12086 12374 -3 12084 12374 12372 -3 12085 12373 12087 -3 12375 12087 12373 -3 12086 12088 12376 -3 12086 12376 12374 -3 12087 12375 12089 -3 12377 12089 12375 -3 12088 12090 12378 -3 12088 12378 12376 -3 12089 12377 12091 -3 12379 12091 12377 -3 12090 12092 12380 -3 12090 12380 12378 -3 12091 12379 12093 -3 12381 12093 12379 -3 12092 12094 12382 -3 12092 12382 12380 -3 12093 12381 12095 -3 12383 12095 12381 -3 12094 12096 12384 -3 12094 12384 12382 -3 12095 12383 12097 -3 12385 12097 12383 -3 12096 12098 12386 -3 12096 12386 12384 -3 12097 12385 12099 -3 12387 12099 12385 -3 12098 12100 12388 -3 12098 12388 12386 -3 12099 12387 12101 -3 12389 12101 12387 -3 12100 12102 12390 -3 12100 12390 12388 -3 12101 12389 12103 -3 12391 12103 12389 -3 12102 12104 12392 -3 12102 12392 12390 -3 12103 12391 12105 -3 12393 12105 12391 -3 12104 12106 12394 -3 12104 12394 12392 -3 12105 12393 12107 -3 12395 12107 12393 -3 12106 12108 12394 -3 12396 12394 12108 -3 12107 12395 12109 -3 12397 12109 12395 -3 12108 12110 12396 -3 12398 12396 12110 -3 12109 12397 12111 -3 12399 12111 12397 -3 12110 12112 12398 -3 12400 12398 12112 -3 12111 12399 12113 -3 12401 12113 12399 -3 12112 12114 12400 -3 12402 12400 12114 -3 12113 12401 12115 -3 12403 12115 12401 -3 12114 12116 12402 -3 12404 12402 12116 -3 12115 12403 12117 -3 12405 12117 12403 -3 12116 12118 12404 -3 12406 12404 12118 -3 12117 12405 12119 -3 12407 12119 12405 -3 12118 12120 12406 -3 12408 12406 12120 -3 12119 12407 12121 -3 12409 12121 12407 -3 12120 12122 12408 -3 12410 12408 12122 -3 12121 12409 12123 -3 12411 12123 12409 -3 12122 12124 12410 -3 12412 12410 12124 -3 12123 12411 12125 -3 12413 12125 12411 -3 12124 12126 12412 -3 12414 12412 12126 -3 12125 12413 12127 -3 12415 12127 12413 -3 12126 12128 12414 -3 12416 12414 12128 -3 12127 12415 12131 -3 12419 12131 12415 -3 12128 12129 12416 -3 12417 12416 12129 -3 12129 12132 12417 -3 12420 12417 12132 -3 12130 12131 12418 -3 12419 12418 12131 -3 12130 12418 12135 -3 12423 12135 12418 -3 12132 12133 12420 -3 12421 12420 12133 -3 12133 12136 12421 -3 12424 12421 12136 -3 12134 12135 12422 -3 12423 12422 12135 -3 12134 12422 12204 -3 12492 12204 12422 -3 12136 12137 12424 -3 12425 12424 12137 -3 12137 12138 12425 -3 12426 12425 12138 -3 12138 12139 12426 -3 12427 12426 12139 -3 12139 12140 12427 -3 12428 12427 12140 -3 12140 12141 12428 -3 12429 12428 12141 -3 12141 12142 12429 -3 12430 12429 12142 -3 12142 12143 12430 -3 12431 12430 12143 -3 12143 12144 12431 -3 12432 12431 12144 -3 12144 12145 12432 -3 12433 12432 12145 -3 12145 12146 12433 -3 12434 12433 12146 -3 12146 12147 12434 -3 12435 12434 12147 -3 12147 12148 12435 -3 12436 12435 12148 -3 12148 12149 12436 -3 12437 12436 12149 -3 12149 12150 12437 -3 12438 12437 12150 -3 12150 12151 12438 -3 12439 12438 12151 -3 12151 12152 12439 -3 12440 12439 12152 -3 12152 12153 12440 -3 12441 12440 12153 -3 12153 12154 12441 -3 12442 12441 12154 -3 12154 12155 12442 -3 12443 12442 12155 -3 12155 12156 12443 -3 12444 12443 12156 -3 12156 12157 12444 -3 12445 12444 12157 -3 12157 12158 12445 -3 12446 12445 12158 -3 12158 12159 12446 -3 12447 12446 12159 -3 12159 12160 12447 -3 12448 12447 12160 -3 12160 12161 12448 -3 12449 12448 12161 -3 12161 12162 12449 -3 12450 12449 12162 -3 12162 12163 12450 -3 12451 12450 12163 -3 12163 12164 12451 -3 12452 12451 12164 -3 12164 12165 12452 -3 12453 12452 12165 -3 12165 12166 12453 -3 12454 12453 12166 -3 12166 12167 12454 -3 12455 12454 12167 -3 12167 12168 12455 -3 12456 12455 12168 -3 12168 12169 12456 -3 12457 12456 12169 -3 12169 12170 12457 -3 12458 12457 12170 -3 12170 12171 12458 -3 12459 12458 12171 -3 12171 12172 12459 -3 12460 12459 12172 -3 12172 12173 12460 -3 12461 12460 12173 -3 12173 12174 12462 -3 12173 12462 12461 -3 12174 12175 12463 -3 12174 12463 12462 -3 12175 12176 12464 -3 12175 12464 12463 -3 12176 12177 12465 -3 12176 12465 12464 -3 12177 12178 12466 -3 12177 12466 12465 -3 12178 12179 12467 -3 12178 12467 12466 -3 12179 12180 12468 -3 12179 12468 12467 -3 12180 12181 12469 -3 12180 12469 12468 -3 12181 12182 12470 -3 12181 12470 12469 -3 12182 12183 12471 -3 12182 12471 12470 -3 12183 12184 12472 -3 12183 12472 12471 -3 12184 12185 12473 -3 12184 12473 12472 -3 12185 12186 12474 -3 12185 12474 12473 -3 12186 12187 12475 -3 12186 12475 12474 -3 12187 12188 12476 -3 12187 12476 12475 -3 12188 12189 12477 -3 12188 12477 12476 -3 12189 12190 12478 -3 12189 12478 12477 -3 12190 12191 12479 -3 12190 12479 12478 -3 12191 12192 12480 -3 12191 12480 12479 -3 12192 12193 12481 -3 12192 12481 12480 -3 12193 12194 12482 -3 12193 12482 12481 -3 12194 12195 12483 -3 12194 12483 12482 -3 12195 12196 12484 -3 12195 12484 12483 -3 12196 12197 12485 -3 12196 12485 12484 -3 12197 12198 12486 -3 12197 12486 12485 -3 12198 12199 12487 -3 12198 12487 12486 -3 12199 12200 12488 -3 12199 12488 12487 -3 12200 12201 12489 -3 12200 12489 12488 -3 12201 12202 12490 -3 12201 12490 12489 -3 12202 12203 12491 -3 12202 12491 12490 -3 12203 12204 12492 -3 12203 12492 12491 -3 12205 12493 12494 -3 12205 12494 12206 -3 12205 12275 12493 -3 12563 12493 12275 -3 12206 12494 12495 -3 12206 12495 12207 -3 12207 12495 12496 -3 12207 12496 12208 -3 12208 12496 12497 -3 12208 12497 12209 -3 12209 12497 12498 -3 12209 12498 12210 -3 12210 12498 12499 -3 12210 12499 12211 -3 12211 12499 12500 -3 12211 12500 12212 -3 12212 12500 12501 -3 12212 12501 12213 -3 12213 12501 12214 -3 12502 12214 12501 -3 12214 12502 12215 -3 12503 12215 12502 -3 12215 12503 12216 -3 12504 12216 12503 -3 12216 12504 12217 -3 12505 12217 12504 -3 12217 12505 12218 -3 12506 12218 12505 -3 12218 12506 12219 -3 12507 12219 12506 -3 12219 12507 12220 -3 12508 12220 12507 -3 12220 12508 12221 -3 12509 12221 12508 -3 12221 12509 12222 -3 12510 12222 12509 -3 12222 12510 12223 -3 12511 12223 12510 -3 12223 12511 12224 -3 12512 12224 12511 -3 12224 12512 12225 -3 12513 12225 12512 -3 12225 12513 12226 -3 12514 12226 12513 -3 12226 12514 12227 -3 12515 12227 12514 -3 12227 12515 12228 -3 12516 12228 12515 -3 12228 12516 12229 -3 12517 12229 12516 -3 12229 12517 12230 -3 12518 12230 12517 -3 12230 12518 12231 -3 12519 12231 12518 -3 12231 12519 12232 -3 12520 12232 12519 -3 12232 12520 12233 -3 12521 12233 12520 -3 12233 12521 12234 -3 12522 12234 12521 -3 12234 12522 12235 -3 12523 12235 12522 -3 12235 12523 12236 -3 12524 12236 12523 -3 12236 12524 12237 -3 12525 12237 12524 -3 12237 12525 12238 -3 12526 12238 12525 -3 12238 12526 12239 -3 12527 12239 12526 -3 12239 12527 12240 -3 12528 12240 12527 -3 12240 12528 12241 -3 12529 12241 12528 -3 12241 12529 12242 -3 12530 12242 12529 -3 12242 12530 12243 -3 12531 12243 12530 -3 12243 12531 12244 -3 12532 12244 12531 -3 12244 12532 12245 -3 12533 12245 12532 -3 12245 12533 12246 -3 12534 12246 12533 -3 12246 12534 12247 -3 12535 12247 12534 -3 12247 12535 12248 -3 12536 12248 12535 -3 12248 12536 12249 -3 12537 12249 12536 -3 12249 12537 12250 -3 12538 12250 12537 -3 12250 12538 12251 -3 12539 12251 12538 -3 12251 12539 12252 -3 12540 12252 12539 -3 12252 12540 12253 -3 12541 12253 12540 -3 12253 12541 12254 -3 12542 12254 12541 -3 12254 12542 12255 -3 12543 12255 12542 -3 12255 12543 12256 -3 12544 12256 12543 -3 12256 12544 12257 -3 12545 12257 12544 -3 12257 12545 12258 -3 12546 12258 12545 -3 12258 12546 12259 -3 12547 12259 12546 -3 12259 12547 12260 -3 12548 12260 12547 -3 12260 12548 12261 -3 12549 12261 12548 -3 12261 12549 12262 -3 12550 12262 12549 -3 12262 12550 12263 -3 12551 12263 12550 -3 12263 12551 12264 -3 12552 12264 12551 -3 12264 12552 12265 -3 12553 12265 12552 -3 12265 12553 12266 -3 12554 12266 12553 -3 12266 12554 12267 -3 12555 12267 12554 -3 12267 12555 12268 -3 12556 12268 12555 -3 12268 12556 12269 -3 12557 12269 12556 -3 12269 12557 12270 -3 12558 12270 12557 -3 12270 12558 12271 -3 12559 12271 12558 -3 12271 12559 12272 -3 12560 12272 12559 -3 12272 12560 12273 -3 12561 12273 12560 -3 12273 12561 12276 -3 12564 12276 12561 -3 12274 12562 12275 -3 12563 12275 12562 -3 12274 12279 12562 -3 12567 12562 12279 -3 12276 12564 12277 -3 12565 12277 12564 -3 12277 12565 12280 -3 12568 12280 12565 -3 12278 12566 12279 -3 12567 12279 12566 -3 12278 12282 12566 -3 12570 12566 12282 -3 12280 12568 12281 -3 12569 12281 12568 -3 12281 12569 12283 -3 12571 12283 12569 -3 12282 12284 12570 -3 12572 12570 12284 -3 12283 12571 12573 -3 12283 12573 12285 -3 12284 12286 12572 -3 12574 12572 12286 -3 12285 12573 12575 -3 12285 12575 12287 -3 12286 12288 12574 -3 12576 12574 12288 -3 12287 12575 12577 -3 12287 12577 12289 -3 12288 12290 12576 -3 12578 12576 12290 -3 12289 12577 12579 -3 12289 12579 12291 -3 12290 12292 12578 -3 12580 12578 12292 -3 12291 12579 12581 -3 12291 12581 12293 -3 12292 12294 12580 -3 12582 12580 12294 -3 12293 12581 12583 -3 12293 12583 12295 -3 12294 12296 12582 -3 12584 12582 12296 -3 12295 12583 12585 -3 12295 12585 12297 -3 12296 12298 12584 -3 12586 12584 12298 -3 12297 12585 12587 -3 12297 12587 12299 -3 12298 12300 12586 -3 12588 12586 12300 -3 12299 12587 12589 -3 12299 12589 12301 -3 12300 12302 12588 -3 12590 12588 12302 -3 12301 12589 12591 -3 12301 12591 12303 -3 12302 12304 12590 -3 12592 12590 12304 -3 12303 12591 12593 -3 12303 12593 12305 -3 12304 12306 12592 -3 12594 12592 12306 -3 12305 12593 12595 -3 12305 12595 12307 -3 12306 12308 12594 -3 12596 12594 12308 -3 12307 12595 12597 -3 12307 12597 12309 -3 12308 12310 12596 -3 12598 12596 12310 -3 12309 12597 12599 -3 12309 12599 12311 -3 12310 12312 12600 -3 12310 12600 12598 -3 12311 12599 12601 -3 12311 12601 12313 -3 12312 12314 12602 -3 12312 12602 12600 -3 12313 12601 12603 -3 12313 12603 12315 -3 12314 12316 12604 -3 12314 12604 12602 -3 12315 12603 12605 -3 12315 12605 12317 -3 12316 12318 12606 -3 12316 12606 12604 -3 12317 12605 12607 -3 12317 12607 12319 -3 12318 12320 12608 -3 12318 12608 12606 -3 12319 12607 12609 -3 12319 12609 12321 -3 12320 12322 12610 -3 12320 12610 12608 -3 12321 12609 12611 -3 12321 12611 12323 -3 12322 12324 12612 -3 12322 12612 12610 -3 12323 12611 12613 -3 12323 12613 12325 -3 12324 12326 12614 -3 12324 12614 12612 -3 12325 12613 12615 -3 12325 12615 12327 -3 12326 12328 12616 -3 12326 12616 12614 -3 12327 12615 12617 -3 12327 12617 12329 -3 12328 12330 12618 -3 12328 12618 12616 -3 12329 12617 12619 -3 12329 12619 12331 -3 12330 12332 12620 -3 12330 12620 12618 -3 12331 12619 12621 -3 12331 12621 12333 -3 12332 12334 12622 -3 12332 12622 12620 -3 12333 12621 12623 -3 12333 12623 12335 -3 12334 12336 12624 -3 12334 12624 12622 -3 12335 12623 12625 -3 12335 12625 12337 -3 12336 12338 12626 -3 12336 12626 12624 -3 12337 12625 12627 -3 12337 12627 12339 -3 12338 12340 12628 -3 12338 12628 12626 -3 12339 12627 12629 -3 12339 12629 12341 -3 12340 12342 12630 -3 12340 12630 12628 -3 12341 12629 12631 -3 12341 12631 12343 -3 12342 12344 12632 -3 12342 12632 12630 -3 12343 12631 12633 -3 12343 12633 12345 -3 12344 12346 12634 -3 12344 12634 12632 -3 12345 12633 12635 -3 12345 12635 12347 -3 12346 12348 12636 -3 12346 12636 12634 -3 12347 12635 12637 -3 12347 12637 12349 -3 12348 12350 12638 -3 12348 12638 12636 -3 12349 12637 12639 -3 12349 12639 12351 -3 12350 12352 12640 -3 12350 12640 12638 -3 12351 12639 12353 -3 12641 12353 12639 -3 12352 12354 12642 -3 12352 12642 12640 -3 12353 12641 12355 -3 12643 12355 12641 -3 12354 12356 12644 -3 12354 12644 12642 -3 12355 12643 12357 -3 12645 12357 12643 -3 12356 12358 12646 -3 12356 12646 12644 -3 12357 12645 12359 -3 12647 12359 12645 -3 12358 12360 12648 -3 12358 12648 12646 -3 12359 12647 12361 -3 12649 12361 12647 -3 12360 12362 12650 -3 12360 12650 12648 -3 12361 12649 12363 -3 12651 12363 12649 -3 12362 12364 12652 -3 12362 12652 12650 -3 12363 12651 12365 -3 12653 12365 12651 -3 12364 12366 12654 -3 12364 12654 12652 -3 12365 12653 12367 -3 12655 12367 12653 -3 12366 12368 12656 -3 12366 12656 12654 -3 12367 12655 12369 -3 12657 12369 12655 -3 12368 12370 12658 -3 12368 12658 12656 -3 12369 12657 12371 -3 12659 12371 12657 -3 12370 12372 12660 -3 12370 12660 12658 -3 12371 12659 12373 -3 12661 12373 12659 -3 12372 12374 12662 -3 12372 12662 12660 -3 12373 12661 12375 -3 12663 12375 12661 -3 12374 12376 12664 -3 12374 12664 12662 -3 12375 12663 12377 -3 12665 12377 12663 -3 12376 12378 12666 -3 12376 12666 12664 -3 12377 12665 12379 -3 12667 12379 12665 -3 12378 12380 12666 -3 12668 12666 12380 -3 12379 12667 12381 -3 12669 12381 12667 -3 12380 12382 12668 -3 12670 12668 12382 -3 12381 12669 12383 -3 12671 12383 12669 -3 12382 12384 12670 -3 12672 12670 12384 -3 12383 12671 12385 -3 12673 12385 12671 -3 12384 12386 12672 -3 12674 12672 12386 -3 12385 12673 12387 -3 12675 12387 12673 -3 12386 12388 12674 -3 12676 12674 12388 -3 12387 12675 12389 -3 12677 12389 12675 -3 12388 12390 12676 -3 12678 12676 12390 -3 12389 12677 12391 -3 12679 12391 12677 -3 12390 12392 12678 -3 12680 12678 12392 -3 12391 12679 12393 -3 12681 12393 12679 -3 12392 12394 12680 -3 12682 12680 12394 -3 12393 12681 12395 -3 12683 12395 12681 -3 12394 12396 12682 -3 12684 12682 12396 -3 12395 12683 12397 -3 12685 12397 12683 -3 12396 12398 12684 -3 12686 12684 12398 -3 12397 12685 12399 -3 12687 12399 12685 -3 12398 12400 12686 -3 12688 12686 12400 -3 12399 12687 12401 -3 12689 12401 12687 -3 12400 12402 12688 -3 12690 12688 12402 -3 12401 12689 12403 -3 12691 12403 12689 -3 12402 12404 12690 -3 12692 12690 12404 -3 12403 12691 12405 -3 12693 12405 12691 -3 12404 12406 12692 -3 12694 12692 12406 -3 12405 12693 12407 -3 12695 12407 12693 -3 12406 12408 12694 -3 12696 12694 12408 -3 12407 12695 12409 -3 12697 12409 12695 -3 12408 12410 12696 -3 12698 12696 12410 -3 12409 12697 12411 -3 12699 12411 12697 -3 12410 12412 12698 -3 12700 12698 12412 -3 12411 12699 12413 -3 12701 12413 12699 -3 12412 12414 12700 -3 12702 12700 12414 -3 12413 12701 12415 -3 12703 12415 12701 -3 12414 12416 12702 -3 12704 12702 12416 -3 12415 12703 12419 -3 12707 12419 12703 -3 12416 12417 12704 -3 12705 12704 12417 -3 12417 12420 12705 -3 12708 12705 12420 -3 12418 12419 12706 -3 12707 12706 12419 -3 12418 12706 12423 -3 12711 12423 12706 -3 12420 12421 12708 -3 12709 12708 12421 -3 12421 12424 12709 -3 12712 12709 12424 -3 12422 12423 12710 -3 12711 12710 12423 -3 12422 12710 12780 -3 12422 12780 12492 -3 12424 12425 12712 -3 12713 12712 12425 -3 12425 12426 12713 -3 12714 12713 12426 -3 12426 12427 12714 -3 12715 12714 12427 -3 12427 12428 12715 -3 12716 12715 12428 -3 12428 12429 12716 -3 12717 12716 12429 -3 12429 12430 12717 -3 12718 12717 12430 -3 12430 12431 12718 -3 12719 12718 12431 -3 12431 12432 12719 -3 12720 12719 12432 -3 12432 12433 12720 -3 12721 12720 12433 -3 12433 12434 12721 -3 12722 12721 12434 -3 12434 12435 12722 -3 12723 12722 12435 -3 12435 12436 12723 -3 12724 12723 12436 -3 12436 12437 12724 -3 12725 12724 12437 -3 12437 12438 12725 -3 12726 12725 12438 -3 12438 12439 12726 -3 12727 12726 12439 -3 12439 12440 12727 -3 12728 12727 12440 -3 12440 12441 12728 -3 12729 12728 12441 -3 12441 12442 12729 -3 12730 12729 12442 -3 12442 12443 12730 -3 12731 12730 12443 -3 12443 12444 12731 -3 12732 12731 12444 -3 12444 12445 12732 -3 12733 12732 12445 -3 12445 12446 12733 -3 12734 12733 12446 -3 12446 12447 12734 -3 12735 12734 12447 -3 12447 12448 12736 -3 12447 12736 12735 -3 12448 12449 12737 -3 12448 12737 12736 -3 12449 12450 12738 -3 12449 12738 12737 -3 12450 12451 12739 -3 12450 12739 12738 -3 12451 12452 12740 -3 12451 12740 12739 -3 12452 12453 12741 -3 12452 12741 12740 -3 12453 12454 12742 -3 12453 12742 12741 -3 12454 12455 12743 -3 12454 12743 12742 -3 12455 12456 12744 -3 12455 12744 12743 -3 12456 12457 12745 -3 12456 12745 12744 -3 12457 12458 12746 -3 12457 12746 12745 -3 12458 12459 12747 -3 12458 12747 12746 -3 12459 12460 12748 -3 12459 12748 12747 -3 12460 12461 12749 -3 12460 12749 12748 -3 12461 12462 12750 -3 12461 12750 12749 -3 12462 12463 12751 -3 12462 12751 12750 -3 12463 12464 12752 -3 12463 12752 12751 -3 12464 12465 12753 -3 12464 12753 12752 -3 12465 12466 12754 -3 12465 12754 12753 -3 12466 12467 12755 -3 12466 12755 12754 -3 12467 12468 12756 -3 12467 12756 12755 -3 12468 12469 12757 -3 12468 12757 12756 -3 12469 12470 12758 -3 12469 12758 12757 -3 12470 12471 12759 -3 12470 12759 12758 -3 12471 12472 12760 -3 12471 12760 12759 -3 12472 12473 12761 -3 12472 12761 12760 -3 12473 12474 12762 -3 12473 12762 12761 -3 12474 12475 12763 -3 12474 12763 12762 -3 12475 12476 12764 -3 12475 12764 12763 -3 12476 12477 12765 -3 12476 12765 12764 -3 12477 12478 12766 -3 12477 12766 12765 -3 12478 12479 12767 -3 12478 12767 12766 -3 12479 12480 12768 -3 12479 12768 12767 -3 12480 12481 12769 -3 12480 12769 12768 -3 12481 12482 12770 -3 12481 12770 12769 -3 12482 12483 12771 -3 12482 12771 12770 -3 12483 12484 12772 -3 12483 12772 12771 -3 12484 12485 12773 -3 12484 12773 12772 -3 12485 12486 12774 -3 12485 12774 12773 -3 12486 12487 12775 -3 12486 12775 12774 -3 12487 12488 12776 -3 12487 12776 12775 -3 12488 12489 12777 -3 12488 12777 12776 -3 12489 12490 12778 -3 12489 12778 12777 -3 12490 12491 12779 -3 12490 12779 12778 -3 12491 12492 12780 -3 12491 12780 12779 -3 12493 12781 12494 -3 12782 12494 12781 -3 12493 12563 12781 -3 12851 12781 12563 -3 12494 12782 12495 -3 12783 12495 12782 -3 12495 12783 12496 -3 12784 12496 12783 -3 12496 12784 12497 -3 12785 12497 12784 -3 12497 12785 12498 -3 12786 12498 12785 -3 12498 12786 12499 -3 12787 12499 12786 -3 12499 12787 12500 -3 12788 12500 12787 -3 12500 12788 12501 -3 12789 12501 12788 -3 12501 12789 12502 -3 12790 12502 12789 -3 12502 12790 12503 -3 12791 12503 12790 -3 12503 12791 12504 -3 12792 12504 12791 -3 12504 12792 12505 -3 12793 12505 12792 -3 12505 12793 12506 -3 12794 12506 12793 -3 12506 12794 12507 -3 12795 12507 12794 -3 12507 12795 12508 -3 12796 12508 12795 -3 12508 12796 12509 -3 12797 12509 12796 -3 12509 12797 12510 -3 12798 12510 12797 -3 12510 12798 12511 -3 12799 12511 12798 -3 12511 12799 12512 -3 12800 12512 12799 -3 12512 12800 12513 -3 12801 12513 12800 -3 12513 12801 12514 -3 12802 12514 12801 -3 12514 12802 12515 -3 12803 12515 12802 -3 12515 12803 12516 -3 12804 12516 12803 -3 12516 12804 12517 -3 12805 12517 12804 -3 12517 12805 12518 -3 12806 12518 12805 -3 12518 12806 12519 -3 12807 12519 12806 -3 12519 12807 12520 -3 12808 12520 12807 -3 12520 12808 12521 -3 12809 12521 12808 -3 12521 12809 12522 -3 12810 12522 12809 -3 12522 12810 12523 -3 12811 12523 12810 -3 12523 12811 12524 -3 12812 12524 12811 -3 12524 12812 12525 -3 12813 12525 12812 -3 12525 12813 12526 -3 12814 12526 12813 -3 12526 12814 12527 -3 12815 12527 12814 -3 12527 12815 12528 -3 12816 12528 12815 -3 12528 12816 12529 -3 12817 12529 12816 -3 12529 12817 12530 -3 12818 12530 12817 -3 12530 12818 12531 -3 12819 12531 12818 -3 12531 12819 12532 -3 12820 12532 12819 -3 12532 12820 12533 -3 12821 12533 12820 -3 12533 12821 12534 -3 12822 12534 12821 -3 12534 12822 12535 -3 12823 12535 12822 -3 12535 12823 12536 -3 12824 12536 12823 -3 12536 12824 12537 -3 12825 12537 12824 -3 12537 12825 12538 -3 12826 12538 12825 -3 12538 12826 12539 -3 12827 12539 12826 -3 12539 12827 12540 -3 12828 12540 12827 -3 12540 12828 12541 -3 12829 12541 12828 -3 12541 12829 12542 -3 12830 12542 12829 -3 12542 12830 12543 -3 12831 12543 12830 -3 12543 12831 12544 -3 12832 12544 12831 -3 12544 12832 12545 -3 12833 12545 12832 -3 12545 12833 12546 -3 12834 12546 12833 -3 12546 12834 12547 -3 12835 12547 12834 -3 12547 12835 12548 -3 12836 12548 12835 -3 12548 12836 12549 -3 12837 12549 12836 -3 12549 12837 12550 -3 12838 12550 12837 -3 12550 12838 12551 -3 12839 12551 12838 -3 12551 12839 12552 -3 12840 12552 12839 -3 12552 12840 12553 -3 12841 12553 12840 -3 12553 12841 12554 -3 12842 12554 12841 -3 12554 12842 12555 -3 12843 12555 12842 -3 12555 12843 12556 -3 12844 12556 12843 -3 12556 12844 12557 -3 12845 12557 12844 -3 12557 12845 12558 -3 12846 12558 12845 -3 12558 12846 12847 -3 12558 12847 12559 -3 12559 12847 12848 -3 12559 12848 12560 -3 12560 12848 12849 -3 12560 12849 12561 -3 12561 12849 12852 -3 12561 12852 12564 -3 12562 12850 12851 -3 12562 12851 12563 -3 12562 12567 12850 -3 12855 12850 12567 -3 12564 12852 12853 -3 12564 12853 12565 -3 12565 12853 12856 -3 12565 12856 12568 -3 12566 12854 12855 -3 12566 12855 12567 -3 12566 12570 12854 -3 12858 12854 12570 -3 12568 12856 12857 -3 12568 12857 12569 -3 12569 12857 12859 -3 12569 12859 12571 -3 12570 12572 12858 -3 12860 12858 12572 -3 12571 12859 12861 -3 12571 12861 12573 -3 12572 12574 12860 -3 12862 12860 12574 -3 12573 12861 12863 -3 12573 12863 12575 -3 12574 12576 12862 -3 12864 12862 12576 -3 12575 12863 12865 -3 12575 12865 12577 -3 12576 12578 12864 -3 12866 12864 12578 -3 12577 12865 12867 -3 12577 12867 12579 -3 12578 12580 12866 -3 12868 12866 12580 -3 12579 12867 12869 -3 12579 12869 12581 -3 12580 12582 12868 -3 12870 12868 12582 -3 12581 12869 12871 -3 12581 12871 12583 -3 12582 12584 12870 -3 12872 12870 12584 -3 12583 12871 12873 -3 12583 12873 12585 -3 12584 12586 12874 -3 12584 12874 12872 -3 12585 12873 12875 -3 12585 12875 12587 -3 12586 12588 12876 -3 12586 12876 12874 -3 12587 12875 12877 -3 12587 12877 12589 -3 12588 12590 12878 -3 12588 12878 12876 -3 12589 12877 12879 -3 12589 12879 12591 -3 12590 12592 12880 -3 12590 12880 12878 -3 12591 12879 12881 -3 12591 12881 12593 -3 12592 12594 12882 -3 12592 12882 12880 -3 12593 12881 12883 -3 12593 12883 12595 -3 12594 12596 12884 -3 12594 12884 12882 -3 12595 12883 12885 -3 12595 12885 12597 -3 12596 12598 12886 -3 12596 12886 12884 -3 12597 12885 12887 -3 12597 12887 12599 -3 12598 12600 12888 -3 12598 12888 12886 -3 12599 12887 12889 -3 12599 12889 12601 -3 12600 12602 12890 -3 12600 12890 12888 -3 12601 12889 12891 -3 12601 12891 12603 -3 12602 12604 12892 -3 12602 12892 12890 -3 12603 12891 12893 -3 12603 12893 12605 -3 12604 12606 12894 -3 12604 12894 12892 -3 12605 12893 12895 -3 12605 12895 12607 -3 12606 12608 12896 -3 12606 12896 12894 -3 12607 12895 12897 -3 12607 12897 12609 -3 12608 12610 12898 -3 12608 12898 12896 -3 12609 12897 12899 -3 12609 12899 12611 -3 12610 12612 12900 -3 12610 12900 12898 -3 12611 12899 12901 -3 12611 12901 12613 -3 12612 12614 12902 -3 12612 12902 12900 -3 12613 12901 12903 -3 12613 12903 12615 -3 12614 12616 12904 -3 12614 12904 12902 -3 12615 12903 12905 -3 12615 12905 12617 -3 12616 12618 12906 -3 12616 12906 12904 -3 12617 12905 12907 -3 12617 12907 12619 -3 12618 12620 12908 -3 12618 12908 12906 -3 12619 12907 12909 -3 12619 12909 12621 -3 12620 12622 12910 -3 12620 12910 12908 -3 12621 12909 12911 -3 12621 12911 12623 -3 12622 12624 12912 -3 12622 12912 12910 -3 12623 12911 12913 -3 12623 12913 12625 -3 12624 12626 12914 -3 12624 12914 12912 -3 12625 12913 12915 -3 12625 12915 12627 -3 12626 12628 12916 -3 12626 12916 12914 -3 12627 12915 12629 -3 12917 12629 12915 -3 12628 12630 12918 -3 12628 12918 12916 -3 12629 12917 12631 -3 12919 12631 12917 -3 12630 12632 12920 -3 12630 12920 12918 -3 12631 12919 12633 -3 12921 12633 12919 -3 12632 12634 12922 -3 12632 12922 12920 -3 12633 12921 12635 -3 12923 12635 12921 -3 12634 12636 12924 -3 12634 12924 12922 -3 12635 12923 12637 -3 12925 12637 12923 -3 12636 12638 12926 -3 12636 12926 12924 -3 12637 12925 12639 -3 12927 12639 12925 -3 12638 12640 12928 -3 12638 12928 12926 -3 12639 12927 12641 -3 12929 12641 12927 -3 12640 12642 12930 -3 12640 12930 12928 -3 12641 12929 12643 -3 12931 12643 12929 -3 12642 12644 12932 -3 12642 12932 12930 -3 12643 12931 12645 -3 12933 12645 12931 -3 12644 12646 12934 -3 12644 12934 12932 -3 12645 12933 12647 -3 12935 12647 12933 -3 12646 12648 12936 -3 12646 12936 12934 -3 12647 12935 12649 -3 12937 12649 12935 -3 12648 12650 12938 -3 12648 12938 12936 -3 12649 12937 12651 -3 12939 12651 12937 -3 12650 12652 12940 -3 12650 12940 12938 -3 12651 12939 12653 -3 12941 12653 12939 -3 12652 12654 12942 -3 12652 12942 12940 -3 12653 12941 12655 -3 12943 12655 12941 -3 12654 12656 12942 -3 12944 12942 12656 -3 12655 12943 12657 -3 12945 12657 12943 -3 12656 12658 12944 -3 12946 12944 12658 -3 12657 12945 12659 -3 12947 12659 12945 -3 12658 12660 12946 -3 12948 12946 12660 -3 12659 12947 12661 -3 12949 12661 12947 -3 12660 12662 12948 -3 12950 12948 12662 -3 12661 12949 12663 -3 12951 12663 12949 -3 12662 12664 12950 -3 12952 12950 12664 -3 12663 12951 12665 -3 12953 12665 12951 -3 12664 12666 12952 -3 12954 12952 12666 -3 12665 12953 12667 -3 12955 12667 12953 -3 12666 12668 12954 -3 12956 12954 12668 -3 12667 12955 12669 -3 12957 12669 12955 -3 12668 12670 12956 -3 12958 12956 12670 -3 12669 12957 12671 -3 12959 12671 12957 -3 12670 12672 12958 -3 12960 12958 12672 -3 12671 12959 12673 -3 12961 12673 12959 -3 12672 12674 12960 -3 12962 12960 12674 -3 12673 12961 12675 -3 12963 12675 12961 -3 12674 12676 12962 -3 12964 12962 12676 -3 12675 12963 12677 -3 12965 12677 12963 -3 12676 12678 12964 -3 12966 12964 12678 -3 12677 12965 12679 -3 12967 12679 12965 -3 12678 12680 12966 -3 12968 12966 12680 -3 12679 12967 12681 -3 12969 12681 12967 -3 12680 12682 12968 -3 12970 12968 12682 -3 12681 12969 12683 -3 12971 12683 12969 -3 12682 12684 12970 -3 12972 12970 12684 -3 12683 12971 12685 -3 12973 12685 12971 -3 12684 12686 12972 -3 12974 12972 12686 -3 12685 12973 12687 -3 12975 12687 12973 -3 12686 12688 12974 -3 12976 12974 12688 -3 12687 12975 12689 -3 12977 12689 12975 -3 12688 12690 12976 -3 12978 12976 12690 -3 12689 12977 12691 -3 12979 12691 12977 -3 12690 12692 12978 -3 12980 12978 12692 -3 12691 12979 12693 -3 12981 12693 12979 -3 12692 12694 12980 -3 12982 12980 12694 -3 12693 12981 12695 -3 12983 12695 12981 -3 12694 12696 12982 -3 12984 12982 12696 -3 12695 12983 12697 -3 12985 12697 12983 -3 12696 12698 12984 -3 12986 12984 12698 -3 12697 12985 12987 -3 12697 12987 12699 -3 12698 12700 12986 -3 12988 12986 12700 -3 12699 12987 12989 -3 12699 12989 12701 -3 12700 12702 12988 -3 12990 12988 12702 -3 12701 12989 12991 -3 12701 12991 12703 -3 12702 12704 12990 -3 12992 12990 12704 -3 12703 12991 12995 -3 12703 12995 12707 -3 12704 12705 12992 -3 12993 12992 12705 -3 12705 12708 12993 -3 12996 12993 12708 -3 12706 12707 12994 -3 12995 12994 12707 -3 12706 12994 12999 -3 12706 12999 12711 -3 12708 12709 12996 -3 12997 12996 12709 -3 12709 12712 12997 -3 13000 12997 12712 -3 12710 12711 12998 -3 12999 12998 12711 -3 12710 12998 13068 -3 12710 13068 12780 -3 12712 12713 13000 -3 13001 13000 12713 -3 12713 12714 13001 -3 13002 13001 12714 -3 12714 12715 13002 -3 13003 13002 12715 -3 12715 12716 13003 -3 13004 13003 12716 -3 12716 12717 13004 -3 13005 13004 12717 -3 12717 12718 13005 -3 13006 13005 12718 -3 12718 12719 13006 -3 13007 13006 12719 -3 12719 12720 13007 -3 13008 13007 12720 -3 12720 12721 13008 -3 13009 13008 12721 -3 12721 12722 13009 -3 13010 13009 12722 -3 12722 12723 13010 -3 13011 13010 12723 -3 12723 12724 13012 -3 12723 13012 13011 -3 12724 12725 13013 -3 12724 13013 13012 -3 12725 12726 13014 -3 12725 13014 13013 -3 12726 12727 13015 -3 12726 13015 13014 -3 12727 12728 13016 -3 12727 13016 13015 -3 12728 12729 13017 -3 12728 13017 13016 -3 12729 12730 13018 -3 12729 13018 13017 -3 12730 12731 13019 -3 12730 13019 13018 -3 12731 12732 13020 -3 12731 13020 13019 -3 12732 12733 13021 -3 12732 13021 13020 -3 12733 12734 13022 -3 12733 13022 13021 -3 12734 12735 13023 -3 12734 13023 13022 -3 12735 12736 13024 -3 12735 13024 13023 -3 12736 12737 13025 -3 12736 13025 13024 -3 12737 12738 13026 -3 12737 13026 13025 -3 12738 12739 13027 -3 12738 13027 13026 -3 12739 12740 13028 -3 12739 13028 13027 -3 12740 12741 13029 -3 12740 13029 13028 -3 12741 12742 13030 -3 12741 13030 13029 -3 12742 12743 13031 -3 12742 13031 13030 -3 12743 12744 13032 -3 12743 13032 13031 -3 12744 12745 13033 -3 12744 13033 13032 -3 12745 12746 13034 -3 12745 13034 13033 -3 12746 12747 13035 -3 12746 13035 13034 -3 12747 12748 13036 -3 12747 13036 13035 -3 12748 12749 13037 -3 12748 13037 13036 -3 12749 12750 13038 -3 12749 13038 13037 -3 12750 12751 13039 -3 12750 13039 13038 -3 12751 12752 13040 -3 12751 13040 13039 -3 12752 12753 13041 -3 12752 13041 13040 -3 12753 12754 13042 -3 12753 13042 13041 -3 12754 12755 13043 -3 12754 13043 13042 -3 12755 12756 13044 -3 12755 13044 13043 -3 12756 12757 13045 -3 12756 13045 13044 -3 12757 12758 13046 -3 12757 13046 13045 -3 12758 12759 13047 -3 12758 13047 13046 -3 12759 12760 13048 -3 12759 13048 13047 -3 12760 12761 13049 -3 12760 13049 13048 -3 12761 12762 13050 -3 12761 13050 13049 -3 12762 12763 13051 -3 12762 13051 13050 -3 12763 12764 13052 -3 12763 13052 13051 -3 12764 12765 13053 -3 12764 13053 13052 -3 12765 12766 13054 -3 12765 13054 13053 -3 12766 12767 13055 -3 12766 13055 13054 -3 12767 12768 13056 -3 12767 13056 13055 -3 12768 12769 13057 -3 12768 13057 13056 -3 12769 12770 13058 -3 12769 13058 13057 -3 12770 12771 13059 -3 12770 13059 13058 -3 12771 12772 13060 -3 12771 13060 13059 -3 12772 12773 13061 -3 12772 13061 13060 -3 12773 12774 13062 -3 12773 13062 13061 -3 12774 12775 13063 -3 12774 13063 13062 -3 12775 12776 13064 -3 12775 13064 13063 -3 12776 12777 13065 -3 12776 13065 13064 -3 12777 12778 13066 -3 12777 13066 13065 -3 12778 12779 13067 -3 12778 13067 13066 -3 12779 12780 13068 -3 12779 13068 13067 -3 12781 13069 12782 -3 13070 12782 13069 -3 12781 12851 13069 -3 13139 13069 12851 -3 12782 13070 12783 -3 13071 12783 13070 -3 12783 13071 12784 -3 13072 12784 13071 -3 12784 13072 12785 -3 13073 12785 13072 -3 12785 13073 12786 -3 13074 12786 13073 -3 12786 13074 12787 -3 13075 12787 13074 -3 12787 13075 12788 -3 13076 12788 13075 -3 12788 13076 12789 -3 13077 12789 13076 -3 12789 13077 12790 -3 13078 12790 13077 -3 12790 13078 12791 -3 13079 12791 13078 -3 12791 13079 12792 -3 13080 12792 13079 -3 12792 13080 12793 -3 13081 12793 13080 -3 12793 13081 12794 -3 13082 12794 13081 -3 12794 13082 12795 -3 13083 12795 13082 -3 12795 13083 12796 -3 13084 12796 13083 -3 12796 13084 12797 -3 13085 12797 13084 -3 12797 13085 12798 -3 13086 12798 13085 -3 12798 13086 12799 -3 13087 12799 13086 -3 12799 13087 12800 -3 13088 12800 13087 -3 12800 13088 12801 -3 13089 12801 13088 -3 12801 13089 12802 -3 13090 12802 13089 -3 12802 13090 12803 -3 13091 12803 13090 -3 12803 13091 12804 -3 13092 12804 13091 -3 12804 13092 12805 -3 13093 12805 13092 -3 12805 13093 12806 -3 13094 12806 13093 -3 12806 13094 12807 -3 13095 12807 13094 -3 12807 13095 12808 -3 13096 12808 13095 -3 12808 13096 12809 -3 13097 12809 13096 -3 12809 13097 12810 -3 13098 12810 13097 -3 12810 13098 12811 -3 13099 12811 13098 -3 12811 13099 12812 -3 13100 12812 13099 -3 12812 13100 12813 -3 13101 12813 13100 -3 12813 13101 12814 -3 13102 12814 13101 -3 12814 13102 12815 -3 13103 12815 13102 -3 12815 13103 12816 -3 13104 12816 13103 -3 12816 13104 12817 -3 13105 12817 13104 -3 12817 13105 12818 -3 13106 12818 13105 -3 12818 13106 12819 -3 13107 12819 13106 -3 12819 13107 12820 -3 13108 12820 13107 -3 12820 13108 12821 -3 13109 12821 13108 -3 12821 13109 12822 -3 13110 12822 13109 -3 12822 13110 12823 -3 13111 12823 13110 -3 12823 13111 12824 -3 13112 12824 13111 -3 12824 13112 12825 -3 13113 12825 13112 -3 12825 13113 12826 -3 13114 12826 13113 -3 12826 13114 12827 -3 13115 12827 13114 -3 12827 13115 12828 -3 13116 12828 13115 -3 12828 13116 12829 -3 13117 12829 13116 -3 12829 13117 12830 -3 13118 12830 13117 -3 12830 13118 12831 -3 13119 12831 13118 -3 12831 13119 12832 -3 13120 12832 13119 -3 12832 13120 12833 -3 13121 12833 13120 -3 12833 13121 12834 -3 13122 12834 13121 -3 12834 13122 12835 -3 13123 12835 13122 -3 12835 13123 12836 -3 13124 12836 13123 -3 12836 13124 12837 -3 13125 12837 13124 -3 12837 13125 13126 -3 12837 13126 12838 -3 12838 13126 13127 -3 12838 13127 12839 -3 12839 13127 13128 -3 12839 13128 12840 -3 12840 13128 13129 -3 12840 13129 12841 -3 12841 13129 13130 -3 12841 13130 12842 -3 12842 13130 13131 -3 12842 13131 12843 -3 12843 13131 13132 -3 12843 13132 12844 -3 12844 13132 13133 -3 12844 13133 12845 -3 12845 13133 13134 -3 12845 13134 12846 -3 12846 13134 13135 -3 12846 13135 12847 -3 12847 13135 13136 -3 12847 13136 12848 -3 12848 13136 13137 -3 12848 13137 12849 -3 12849 13137 13140 -3 12849 13140 12852 -3 12850 13138 13139 -3 12850 13139 12851 -3 12850 12855 13138 -3 13143 13138 12855 -3 12852 13140 13141 -3 12852 13141 12853 -3 12853 13141 13144 -3 12853 13144 12856 -3 12854 13142 13143 -3 12854 13143 12855 -3 12854 12858 13142 -3 13146 13142 12858 -3 12856 13144 13145 -3 12856 13145 12857 -3 12857 13145 13147 -3 12857 13147 12859 -3 12858 12860 13146 -3 13148 13146 12860 -3 12859 13147 13149 -3 12859 13149 12861 -3 12860 12862 13148 -3 13150 13148 12862 -3 12861 13149 13151 -3 12861 13151 12863 -3 12862 12864 13152 -3 12862 13152 13150 -3 12863 13151 13153 -3 12863 13153 12865 -3 12864 12866 13154 -3 12864 13154 13152 -3 12865 13153 13155 -3 12865 13155 12867 -3 12866 12868 13156 -3 12866 13156 13154 -3 12867 13155 13157 -3 12867 13157 12869 -3 12868 12870 13158 -3 12868 13158 13156 -3 12869 13157 13159 -3 12869 13159 12871 -3 12870 12872 13160 -3 12870 13160 13158 -3 12871 13159 13161 -3 12871 13161 12873 -3 12872 12874 13162 -3 12872 13162 13160 -3 12873 13161 13163 -3 12873 13163 12875 -3 12874 12876 13164 -3 12874 13164 13162 -3 12875 13163 13165 -3 12875 13165 12877 -3 12876 12878 13166 -3 12876 13166 13164 -3 12877 13165 13167 -3 12877 13167 12879 -3 12878 12880 13168 -3 12878 13168 13166 -3 12879 13167 13169 -3 12879 13169 12881 -3 12880 12882 13170 -3 12880 13170 13168 -3 12881 13169 13171 -3 12881 13171 12883 -3 12882 12884 13172 -3 12882 13172 13170 -3 12883 13171 13173 -3 12883 13173 12885 -3 12884 12886 13174 -3 12884 13174 13172 -3 12885 13173 13175 -3 12885 13175 12887 -3 12886 12888 13176 -3 12886 13176 13174 -3 12887 13175 13177 -3 12887 13177 12889 -3 12888 12890 13178 -3 12888 13178 13176 -3 12889 13177 13179 -3 12889 13179 12891 -3 12890 12892 13180 -3 12890 13180 13178 -3 12891 13179 13181 -3 12891 13181 12893 -3 12892 12894 13182 -3 12892 13182 13180 -3 12893 13181 13183 -3 12893 13183 12895 -3 12894 12896 13184 -3 12894 13184 13182 -3 12895 13183 13185 -3 12895 13185 12897 -3 12896 12898 13186 -3 12896 13186 13184 -3 12897 13185 13187 -3 12897 13187 12899 -3 12898 12900 13188 -3 12898 13188 13186 -3 12899 13187 13189 -3 12899 13189 12901 -3 12900 12902 13190 -3 12900 13190 13188 -3 12901 13189 13191 -3 12901 13191 12903 -3 12902 12904 13192 -3 12902 13192 13190 -3 12903 13191 13193 -3 12903 13193 12905 -3 12904 12906 13194 -3 12904 13194 13192 -3 12905 13193 13195 -3 12905 13195 12907 -3 12906 12908 13196 -3 12906 13196 13194 -3 12907 13195 12909 -3 13197 12909 13195 -3 12908 12910 13198 -3 12908 13198 13196 -3 12909 13197 12911 -3 13199 12911 13197 -3 12910 12912 13200 -3 12910 13200 13198 -3 12911 13199 12913 -3 13201 12913 13199 -3 12912 12914 13202 -3 12912 13202 13200 -3 12913 13201 12915 -3 13203 12915 13201 -3 12914 12916 13204 -3 12914 13204 13202 -3 12915 13203 12917 -3 13205 12917 13203 -3 12916 12918 13206 -3 12916 13206 13204 -3 12917 13205 12919 -3 13207 12919 13205 -3 12918 12920 13208 -3 12918 13208 13206 -3 12919 13207 12921 -3 13209 12921 13207 -3 12920 12922 13210 -3 12920 13210 13208 -3 12921 13209 12923 -3 13211 12923 13209 -3 12922 12924 13212 -3 12922 13212 13210 -3 12923 13211 12925 -3 13213 12925 13211 -3 12924 12926 13214 -3 12924 13214 13212 -3 12925 13213 12927 -3 13215 12927 13213 -3 12926 12928 13216 -3 12926 13216 13214 -3 12927 13215 12929 -3 13217 12929 13215 -3 12928 12930 13218 -3 12928 13218 13216 -3 12929 13217 12931 -3 13219 12931 13217 -3 12930 12932 13220 -3 12930 13220 13218 -3 12931 13219 12933 -3 13221 12933 13219 -3 12932 12934 13220 -3 13222 13220 12934 -3 12933 13221 12935 -3 13223 12935 13221 -3 12934 12936 13222 -3 13224 13222 12936 -3 12935 13223 12937 -3 13225 12937 13223 -3 12936 12938 13224 -3 13226 13224 12938 -3 12937 13225 12939 -3 13227 12939 13225 -3 12938 12940 13226 -3 13228 13226 12940 -3 12939 13227 12941 -3 13229 12941 13227 -3 12940 12942 13228 -3 13230 13228 12942 -3 12941 13229 12943 -3 13231 12943 13229 -3 12942 12944 13230 -3 13232 13230 12944 -3 12943 13231 12945 -3 13233 12945 13231 -3 12944 12946 13232 -3 13234 13232 12946 -3 12945 13233 12947 -3 13235 12947 13233 -3 12946 12948 13234 -3 13236 13234 12948 -3 12947 13235 12949 -3 13237 12949 13235 -3 12948 12950 13236 -3 13238 13236 12950 -3 12949 13237 12951 -3 13239 12951 13237 -3 12950 12952 13238 -3 13240 13238 12952 -3 12951 13239 12953 -3 13241 12953 13239 -3 12952 12954 13240 -3 13242 13240 12954 -3 12953 13241 12955 -3 13243 12955 13241 -3 12954 12956 13242 -3 13244 13242 12956 -3 12955 13243 12957 -3 13245 12957 13243 -3 12956 12958 13244 -3 13246 13244 12958 -3 12957 13245 12959 -3 13247 12959 13245 -3 12958 12960 13246 -3 13248 13246 12960 -3 12959 13247 12961 -3 13249 12961 13247 -3 12960 12962 13248 -3 13250 13248 12962 -3 12961 13249 12963 -3 13251 12963 13249 -3 12962 12964 13250 -3 13252 13250 12964 -3 12963 13251 12965 -3 13253 12965 13251 -3 12964 12966 13252 -3 13254 13252 12966 -3 12965 13253 12967 -3 13255 12967 13253 -3 12966 12968 13254 -3 13256 13254 12968 -3 12967 13255 12969 -3 13257 12969 13255 -3 12968 12970 13256 -3 13258 13256 12970 -3 12969 13257 12971 -3 13259 12971 13257 -3 12970 12972 13258 -3 13260 13258 12972 -3 12971 13259 12973 -3 13261 12973 13259 -3 12972 12974 13260 -3 13262 13260 12974 -3 12973 13261 12975 -3 13263 12975 13261 -3 12974 12976 13262 -3 13264 13262 12976 -3 12975 13263 12977 -3 13265 12977 13263 -3 12976 12978 13264 -3 13266 13264 12978 -3 12977 13265 12979 -3 13267 12979 13265 -3 12978 12980 13266 -3 13268 13266 12980 -3 12979 13267 13269 -3 12979 13269 12981 -3 12980 12982 13268 -3 13270 13268 12982 -3 12981 13269 13271 -3 12981 13271 12983 -3 12982 12984 13270 -3 13272 13270 12984 -3 12983 13271 13273 -3 12983 13273 12985 -3 12984 12986 13272 -3 13274 13272 12986 -3 12985 13273 13275 -3 12985 13275 12987 -3 12986 12988 13274 -3 13276 13274 12988 -3 12987 13275 13277 -3 12987 13277 12989 -3 12988 12990 13276 -3 13278 13276 12990 -3 12989 13277 13279 -3 12989 13279 12991 -3 12990 12992 13278 -3 13280 13278 12992 -3 12991 13279 13283 -3 12991 13283 12995 -3 12992 12993 13280 -3 13281 13280 12993 -3 12993 12996 13281 -3 13284 13281 12996 -3 12994 12995 13282 -3 13283 13282 12995 -3 12994 13282 13287 -3 12994 13287 12999 -3 12996 12997 13284 -3 13285 13284 12997 -3 12997 13000 13285 -3 13288 13285 13000 -3 12998 12999 13286 -3 13287 13286 12999 -3 12998 13286 13356 -3 12998 13356 13068 -3 13000 13001 13288 -3 13289 13288 13001 -3 13001 13002 13289 -3 13290 13289 13002 -3 13002 13003 13291 -3 13002 13291 13290 -3 13003 13004 13292 -3 13003 13292 13291 -3 13004 13005 13293 -3 13004 13293 13292 -3 13005 13006 13294 -3 13005 13294 13293 -3 13006 13007 13295 -3 13006 13295 13294 -3 13007 13008 13296 -3 13007 13296 13295 -3 13008 13009 13297 -3 13008 13297 13296 -3 13009 13010 13298 -3 13009 13298 13297 -3 13010 13011 13299 -3 13010 13299 13298 -3 13011 13012 13300 -3 13011 13300 13299 -3 13012 13013 13301 -3 13012 13301 13300 -3 13013 13014 13302 -3 13013 13302 13301 -3 13014 13015 13303 -3 13014 13303 13302 -3 13015 13016 13304 -3 13015 13304 13303 -3 13016 13017 13305 -3 13016 13305 13304 -3 13017 13018 13306 -3 13017 13306 13305 -3 13018 13019 13307 -3 13018 13307 13306 -3 13019 13020 13308 -3 13019 13308 13307 -3 13020 13021 13309 -3 13020 13309 13308 -3 13021 13022 13310 -3 13021 13310 13309 -3 13022 13023 13311 -3 13022 13311 13310 -3 13023 13024 13312 -3 13023 13312 13311 -3 13024 13025 13313 -3 13024 13313 13312 -3 13025 13026 13314 -3 13025 13314 13313 -3 13026 13027 13315 -3 13026 13315 13314 -3 13027 13028 13316 -3 13027 13316 13315 -3 13028 13029 13317 -3 13028 13317 13316 -3 13029 13030 13318 -3 13029 13318 13317 -3 13030 13031 13319 -3 13030 13319 13318 -3 13031 13032 13320 -3 13031 13320 13319 -3 13032 13033 13321 -3 13032 13321 13320 -3 13033 13034 13322 -3 13033 13322 13321 -3 13034 13035 13323 -3 13034 13323 13322 -3 13035 13036 13324 -3 13035 13324 13323 -3 13036 13037 13325 -3 13036 13325 13324 -3 13037 13038 13326 -3 13037 13326 13325 -3 13038 13039 13327 -3 13038 13327 13326 -3 13039 13040 13328 -3 13039 13328 13327 -3 13040 13041 13329 -3 13040 13329 13328 -3 13041 13042 13330 -3 13041 13330 13329 -3 13042 13043 13331 -3 13042 13331 13330 -3 13043 13044 13332 -3 13043 13332 13331 -3 13044 13045 13333 -3 13044 13333 13332 -3 13045 13046 13334 -3 13045 13334 13333 -3 13046 13047 13335 -3 13046 13335 13334 -3 13047 13048 13336 -3 13047 13336 13335 -3 13048 13049 13337 -3 13048 13337 13336 -3 13049 13050 13338 -3 13049 13338 13337 -3 13050 13051 13339 -3 13050 13339 13338 -3 13051 13052 13340 -3 13051 13340 13339 -3 13052 13053 13341 -3 13052 13341 13340 -3 13053 13054 13342 -3 13053 13342 13341 -3 13054 13055 13343 -3 13054 13343 13342 -3 13055 13056 13344 -3 13055 13344 13343 -3 13056 13057 13345 -3 13056 13345 13344 -3 13057 13058 13346 -3 13057 13346 13345 -3 13058 13059 13347 -3 13058 13347 13346 -3 13059 13060 13348 -3 13059 13348 13347 -3 13060 13061 13349 -3 13060 13349 13348 -3 13061 13062 13350 -3 13061 13350 13349 -3 13062 13063 13351 -3 13062 13351 13350 -3 13063 13064 13352 -3 13063 13352 13351 -3 13064 13065 13353 -3 13064 13353 13352 -3 13065 13066 13354 -3 13065 13354 13353 -3 13066 13067 13355 -3 13066 13355 13354 -3 13067 13068 13356 -3 13067 13356 13355 -3 13069 13357 13070 -3 13358 13070 13357 -3 13069 13139 13357 -3 13427 13357 13139 -3 13070 13358 13071 -3 13359 13071 13358 -3 13071 13359 13072 -3 13360 13072 13359 -3 13072 13360 13073 -3 13361 13073 13360 -3 13073 13361 13074 -3 13362 13074 13361 -3 13074 13362 13075 -3 13363 13075 13362 -3 13075 13363 13076 -3 13364 13076 13363 -3 13076 13364 13077 -3 13365 13077 13364 -3 13077 13365 13078 -3 13366 13078 13365 -3 13078 13366 13079 -3 13367 13079 13366 -3 13079 13367 13080 -3 13368 13080 13367 -3 13080 13368 13081 -3 13369 13081 13368 -3 13081 13369 13082 -3 13370 13082 13369 -3 13082 13370 13083 -3 13371 13083 13370 -3 13083 13371 13084 -3 13372 13084 13371 -3 13084 13372 13085 -3 13373 13085 13372 -3 13085 13373 13086 -3 13374 13086 13373 -3 13086 13374 13087 -3 13375 13087 13374 -3 13087 13375 13088 -3 13376 13088 13375 -3 13088 13376 13089 -3 13377 13089 13376 -3 13089 13377 13090 -3 13378 13090 13377 -3 13090 13378 13091 -3 13379 13091 13378 -3 13091 13379 13092 -3 13380 13092 13379 -3 13092 13380 13093 -3 13381 13093 13380 -3 13093 13381 13094 -3 13382 13094 13381 -3 13094 13382 13095 -3 13383 13095 13382 -3 13095 13383 13096 -3 13384 13096 13383 -3 13096 13384 13097 -3 13385 13097 13384 -3 13097 13385 13098 -3 13386 13098 13385 -3 13098 13386 13099 -3 13387 13099 13386 -3 13099 13387 13100 -3 13388 13100 13387 -3 13100 13388 13101 -3 13389 13101 13388 -3 13101 13389 13102 -3 13390 13102 13389 -3 13102 13390 13103 -3 13391 13103 13390 -3 13103 13391 13104 -3 13392 13104 13391 -3 13104 13392 13105 -3 13393 13105 13392 -3 13105 13393 13106 -3 13394 13106 13393 -3 13106 13394 13107 -3 13395 13107 13394 -3 13107 13395 13108 -3 13396 13108 13395 -3 13108 13396 13109 -3 13397 13109 13396 -3 13109 13397 13110 -3 13398 13110 13397 -3 13110 13398 13111 -3 13399 13111 13398 -3 13111 13399 13112 -3 13400 13112 13399 -3 13112 13400 13113 -3 13401 13113 13400 -3 13113 13401 13114 -3 13402 13114 13401 -3 13114 13402 13115 -3 13403 13115 13402 -3 13115 13403 13116 -3 13404 13116 13403 -3 13116 13404 13117 -3 13405 13117 13404 -3 13117 13405 13118 -3 13406 13118 13405 -3 13118 13406 13119 -3 13407 13119 13406 -3 13119 13407 13408 -3 13119 13408 13120 -3 13120 13408 13409 -3 13120 13409 13121 -3 13121 13409 13410 -3 13121 13410 13122 -3 13122 13410 13411 -3 13122 13411 13123 -3 13123 13411 13412 -3 13123 13412 13124 -3 13124 13412 13413 -3 13124 13413 13125 -3 13125 13413 13414 -3 13125 13414 13126 -3 13126 13414 13415 -3 13126 13415 13127 -3 13127 13415 13416 -3 13127 13416 13128 -3 13128 13416 13417 -3 13128 13417 13129 -3 13129 13417 13418 -3 13129 13418 13130 -3 13130 13418 13419 -3 13130 13419 13131 -3 13131 13419 13420 -3 13131 13420 13132 -3 13132 13420 13421 -3 13132 13421 13133 -3 13133 13421 13422 -3 13133 13422 13134 -3 13134 13422 13423 -3 13134 13423 13135 -3 13135 13423 13424 -3 13135 13424 13136 -3 13136 13424 13425 -3 13136 13425 13137 -3 13137 13425 13428 -3 13137 13428 13140 -3 13138 13426 13427 -3 13138 13427 13139 -3 13138 13143 13426 -3 13431 13426 13143 -3 13140 13428 13429 -3 13140 13429 13141 -3 13141 13429 13432 -3 13141 13432 13144 -3 13142 13430 13431 -3 13142 13431 13143 -3 13142 13146 13434 -3 13142 13434 13430 -3 13144 13432 13433 -3 13144 13433 13145 -3 13145 13433 13435 -3 13145 13435 13147 -3 13146 13148 13436 -3 13146 13436 13434 -3 13147 13435 13437 -3 13147 13437 13149 -3 13148 13150 13438 -3 13148 13438 13436 -3 13149 13437 13439 -3 13149 13439 13151 -3 13150 13152 13440 -3 13150 13440 13438 -3 13151 13439 13441 -3 13151 13441 13153 -3 13152 13154 13442 -3 13152 13442 13440 -3 13153 13441 13443 -3 13153 13443 13155 -3 13154 13156 13444 -3 13154 13444 13442 -3 13155 13443 13445 -3 13155 13445 13157 -3 13156 13158 13446 -3 13156 13446 13444 -3 13157 13445 13447 -3 13157 13447 13159 -3 13158 13160 13448 -3 13158 13448 13446 -3 13159 13447 13449 -3 13159 13449 13161 -3 13160 13162 13450 -3 13160 13450 13448 -3 13161 13449 13451 -3 13161 13451 13163 -3 13162 13164 13452 -3 13162 13452 13450 -3 13163 13451 13453 -3 13163 13453 13165 -3 13164 13166 13454 -3 13164 13454 13452 -3 13165 13453 13455 -3 13165 13455 13167 -3 13166 13168 13456 -3 13166 13456 13454 -3 13167 13455 13457 -3 13167 13457 13169 -3 13168 13170 13458 -3 13168 13458 13456 -3 13169 13457 13459 -3 13169 13459 13171 -3 13170 13172 13460 -3 13170 13460 13458 -3 13171 13459 13461 -3 13171 13461 13173 -3 13172 13174 13462 -3 13172 13462 13460 -3 13173 13461 13463 -3 13173 13463 13175 -3 13174 13176 13464 -3 13174 13464 13462 -3 13175 13463 13465 -3 13175 13465 13177 -3 13176 13178 13466 -3 13176 13466 13464 -3 13177 13465 13467 -3 13177 13467 13179 -3 13178 13180 13468 -3 13178 13468 13466 -3 13179 13467 13469 -3 13179 13469 13181 -3 13180 13182 13470 -3 13180 13470 13468 -3 13181 13469 13471 -3 13181 13471 13183 -3 13182 13184 13472 -3 13182 13472 13470 -3 13183 13471 13473 -3 13183 13473 13185 -3 13184 13186 13474 -3 13184 13474 13472 -3 13185 13473 13475 -3 13185 13475 13187 -3 13186 13188 13476 -3 13186 13476 13474 -3 13187 13475 13477 -3 13187 13477 13189 -3 13188 13190 13478 -3 13188 13478 13476 -3 13189 13477 13479 -3 13189 13479 13191 -3 13190 13192 13480 -3 13190 13480 13478 -3 13191 13479 13193 -3 13481 13193 13479 -3 13192 13194 13482 -3 13192 13482 13480 -3 13193 13481 13195 -3 13483 13195 13481 -3 13194 13196 13484 -3 13194 13484 13482 -3 13195 13483 13197 -3 13485 13197 13483 -3 13196 13198 13486 -3 13196 13486 13484 -3 13197 13485 13199 -3 13487 13199 13485 -3 13198 13200 13488 -3 13198 13488 13486 -3 13199 13487 13201 -3 13489 13201 13487 -3 13200 13202 13490 -3 13200 13490 13488 -3 13201 13489 13203 -3 13491 13203 13489 -3 13202 13204 13492 -3 13202 13492 13490 -3 13203 13491 13205 -3 13493 13205 13491 -3 13204 13206 13494 -3 13204 13494 13492 -3 13205 13493 13207 -3 13495 13207 13493 -3 13206 13208 13496 -3 13206 13496 13494 -3 13207 13495 13209 -3 13497 13209 13495 -3 13208 13210 13498 -3 13208 13498 13496 -3 13209 13497 13211 -3 13499 13211 13497 -3 13210 13212 13500 -3 13210 13500 13498 -3 13211 13499 13213 -3 13501 13213 13499 -3 13212 13214 13502 -3 13212 13502 13500 -3 13213 13501 13215 -3 13503 13215 13501 -3 13214 13216 13502 -3 13504 13502 13216 -3 13215 13503 13217 -3 13505 13217 13503 -3 13216 13218 13504 -3 13506 13504 13218 -3 13217 13505 13219 -3 13507 13219 13505 -3 13218 13220 13506 -3 13508 13506 13220 -3 13219 13507 13221 -3 13509 13221 13507 -3 13220 13222 13508 -3 13510 13508 13222 -3 13221 13509 13223 -3 13511 13223 13509 -3 13222 13224 13510 -3 13512 13510 13224 -3 13223 13511 13225 -3 13513 13225 13511 -3 13224 13226 13512 -3 13514 13512 13226 -3 13225 13513 13227 -3 13515 13227 13513 -3 13226 13228 13514 -3 13516 13514 13228 -3 13227 13515 13229 -3 13517 13229 13515 -3 13228 13230 13516 -3 13518 13516 13230 -3 13229 13517 13231 -3 13519 13231 13517 -3 13230 13232 13518 -3 13520 13518 13232 -3 13231 13519 13233 -3 13521 13233 13519 -3 13232 13234 13520 -3 13522 13520 13234 -3 13233 13521 13235 -3 13523 13235 13521 -3 13234 13236 13522 -3 13524 13522 13236 -3 13235 13523 13237 -3 13525 13237 13523 -3 13236 13238 13524 -3 13526 13524 13238 -3 13237 13525 13239 -3 13527 13239 13525 -3 13238 13240 13526 -3 13528 13526 13240 -3 13239 13527 13241 -3 13529 13241 13527 -3 13240 13242 13528 -3 13530 13528 13242 -3 13241 13529 13243 -3 13531 13243 13529 -3 13242 13244 13530 -3 13532 13530 13244 -3 13243 13531 13245 -3 13533 13245 13531 -3 13244 13246 13532 -3 13534 13532 13246 -3 13245 13533 13247 -3 13535 13247 13533 -3 13246 13248 13534 -3 13536 13534 13248 -3 13247 13535 13249 -3 13537 13249 13535 -3 13248 13250 13536 -3 13538 13536 13250 -3 13249 13537 13251 -3 13539 13251 13537 -3 13250 13252 13538 -3 13540 13538 13252 -3 13251 13539 13253 -3 13541 13253 13539 -3 13252 13254 13540 -3 13542 13540 13254 -3 13253 13541 13255 -3 13543 13255 13541 -3 13254 13256 13542 -3 13544 13542 13256 -3 13255 13543 13257 -3 13545 13257 13543 -3 13256 13258 13544 -3 13546 13544 13258 -3 13257 13545 13259 -3 13547 13259 13545 -3 13258 13260 13546 -3 13548 13546 13260 -3 13259 13547 13261 -3 13549 13261 13547 -3 13260 13262 13548 -3 13550 13548 13262 -3 13261 13549 13263 -3 13551 13263 13549 -3 13262 13264 13550 -3 13552 13550 13264 -3 13263 13551 13553 -3 13263 13553 13265 -3 13264 13266 13552 -3 13554 13552 13266 -3 13265 13553 13555 -3 13265 13555 13267 -3 13266 13268 13554 -3 13556 13554 13268 -3 13267 13555 13557 -3 13267 13557 13269 -3 13268 13270 13556 -3 13558 13556 13270 -3 13269 13557 13559 -3 13269 13559 13271 -3 13270 13272 13558 -3 13560 13558 13272 -3 13271 13559 13561 -3 13271 13561 13273 -3 13272 13274 13560 -3 13562 13560 13274 -3 13273 13561 13563 -3 13273 13563 13275 -3 13274 13276 13562 -3 13564 13562 13276 -3 13275 13563 13565 -3 13275 13565 13277 -3 13276 13278 13564 -3 13566 13564 13278 -3 13277 13565 13567 -3 13277 13567 13279 -3 13278 13280 13566 -3 13568 13566 13280 -3 13279 13567 13571 -3 13279 13571 13283 -3 13280 13281 13568 -3 13569 13568 13281 -3 13281 13284 13569 -3 13572 13569 13284 -3 13282 13283 13570 -3 13571 13570 13283 -3 13282 13570 13575 -3 13282 13575 13287 -3 13284 13285 13573 -3 13284 13573 13572 -3 13285 13288 13576 -3 13285 13576 13573 -3 13286 13287 13575 -3 13286 13575 13574 -3 13286 13574 13644 -3 13286 13644 13356 -3 13288 13289 13577 -3 13288 13577 13576 -3 13289 13290 13578 -3 13289 13578 13577 -3 13290 13291 13579 -3 13290 13579 13578 -3 13291 13292 13580 -3 13291 13580 13579 -3 13292 13293 13581 -3 13292 13581 13580 -3 13293 13294 13582 -3 13293 13582 13581 -3 13294 13295 13583 -3 13294 13583 13582 -3 13295 13296 13584 -3 13295 13584 13583 -3 13296 13297 13585 -3 13296 13585 13584 -3 13297 13298 13586 -3 13297 13586 13585 -3 13298 13299 13587 -3 13298 13587 13586 -3 13299 13300 13588 -3 13299 13588 13587 -3 13300 13301 13589 -3 13300 13589 13588 -3 13301 13302 13590 -3 13301 13590 13589 -3 13302 13303 13591 -3 13302 13591 13590 -3 13303 13304 13592 -3 13303 13592 13591 -3 13304 13305 13593 -3 13304 13593 13592 -3 13305 13306 13594 -3 13305 13594 13593 -3 13306 13307 13595 -3 13306 13595 13594 -3 13307 13308 13596 -3 13307 13596 13595 -3 13308 13309 13597 -3 13308 13597 13596 -3 13309 13310 13598 -3 13309 13598 13597 -3 13310 13311 13599 -3 13310 13599 13598 -3 13311 13312 13600 -3 13311 13600 13599 -3 13312 13313 13601 -3 13312 13601 13600 -3 13313 13314 13602 -3 13313 13602 13601 -3 13314 13315 13603 -3 13314 13603 13602 -3 13315 13316 13604 -3 13315 13604 13603 -3 13316 13317 13605 -3 13316 13605 13604 -3 13317 13318 13606 -3 13317 13606 13605 -3 13318 13319 13607 -3 13318 13607 13606 -3 13319 13320 13608 -3 13319 13608 13607 -3 13320 13321 13609 -3 13320 13609 13608 -3 13321 13322 13610 -3 13321 13610 13609 -3 13322 13323 13611 -3 13322 13611 13610 -3 13323 13324 13612 -3 13323 13612 13611 -3 13324 13325 13613 -3 13324 13613 13612 -3 13325 13326 13614 -3 13325 13614 13613 -3 13326 13327 13615 -3 13326 13615 13614 -3 13327 13328 13616 -3 13327 13616 13615 -3 13328 13329 13617 -3 13328 13617 13616 -3 13329 13330 13618 -3 13329 13618 13617 -3 13330 13331 13619 -3 13330 13619 13618 -3 13331 13332 13620 -3 13331 13620 13619 -3 13332 13333 13621 -3 13332 13621 13620 -3 13333 13334 13622 -3 13333 13622 13621 -3 13334 13335 13623 -3 13334 13623 13622 -3 13335 13336 13624 -3 13335 13624 13623 -3 13336 13337 13625 -3 13336 13625 13624 -3 13337 13338 13626 -3 13337 13626 13625 -3 13338 13339 13627 -3 13338 13627 13626 -3 13339 13340 13628 -3 13339 13628 13627 -3 13340 13341 13629 -3 13340 13629 13628 -3 13341 13342 13630 -3 13341 13630 13629 -3 13342 13343 13631 -3 13342 13631 13630 -3 13343 13344 13632 -3 13343 13632 13631 -3 13344 13345 13633 -3 13344 13633 13632 -3 13345 13346 13634 -3 13345 13634 13633 -3 13346 13347 13635 -3 13346 13635 13634 -3 13347 13348 13636 -3 13347 13636 13635 -3 13348 13349 13637 -3 13348 13637 13636 -3 13349 13350 13638 -3 13349 13638 13637 -3 13350 13351 13639 -3 13350 13639 13638 -3 13351 13352 13640 -3 13351 13640 13639 -3 13352 13353 13641 -3 13352 13641 13640 -3 13353 13354 13642 -3 13353 13642 13641 -3 13354 13355 13643 -3 13354 13643 13642 -3 13355 13356 13643 -3 13644 13643 13356 -3 13357 13645 13358 -3 13646 13358 13645 -3 13357 13427 13645 -3 13715 13645 13427 -3 13358 13646 13359 -3 13647 13359 13646 -3 13359 13647 13360 -3 13648 13360 13647 -3 13360 13648 13361 -3 13649 13361 13648 -3 13361 13649 13362 -3 13650 13362 13649 -3 13362 13650 13363 -3 13651 13363 13650 -3 13363 13651 13364 -3 13652 13364 13651 -3 13364 13652 13365 -3 13653 13365 13652 -3 13365 13653 13366 -3 13654 13366 13653 -3 13366 13654 13367 -3 13655 13367 13654 -3 13367 13655 13368 -3 13656 13368 13655 -3 13368 13656 13369 -3 13657 13369 13656 -3 13369 13657 13370 -3 13658 13370 13657 -3 13370 13658 13371 -3 13659 13371 13658 -3 13371 13659 13372 -3 13660 13372 13659 -3 13372 13660 13373 -3 13661 13373 13660 -3 13373 13661 13374 -3 13662 13374 13661 -3 13374 13662 13375 -3 13663 13375 13662 -3 13375 13663 13376 -3 13664 13376 13663 -3 13376 13664 13377 -3 13665 13377 13664 -3 13377 13665 13378 -3 13666 13378 13665 -3 13378 13666 13379 -3 13667 13379 13666 -3 13379 13667 13380 -3 13668 13380 13667 -3 13380 13668 13381 -3 13669 13381 13668 -3 13381 13669 13382 -3 13670 13382 13669 -3 13382 13670 13383 -3 13671 13383 13670 -3 13383 13671 13384 -3 13672 13384 13671 -3 13384 13672 13385 -3 13673 13385 13672 -3 13385 13673 13386 -3 13674 13386 13673 -3 13386 13674 13387 -3 13675 13387 13674 -3 13387 13675 13388 -3 13676 13388 13675 -3 13388 13676 13389 -3 13677 13389 13676 -3 13389 13677 13390 -3 13678 13390 13677 -3 13390 13678 13391 -3 13679 13391 13678 -3 13391 13679 13392 -3 13680 13392 13679 -3 13392 13680 13393 -3 13681 13393 13680 -3 13393 13681 13394 -3 13682 13394 13681 -3 13394 13682 13395 -3 13683 13395 13682 -3 13395 13683 13396 -3 13684 13396 13683 -3 13396 13684 13397 -3 13685 13397 13684 -3 13397 13685 13398 -3 13686 13398 13685 -3 13398 13686 13399 -3 13687 13399 13686 -3 13399 13687 13400 -3 13688 13400 13687 -3 13400 13688 13401 -3 13689 13401 13688 -3 13401 13689 13402 -3 13690 13402 13689 -3 13402 13690 13403 -3 13691 13403 13690 -3 13403 13691 13404 -3 13692 13404 13691 -3 13404 13692 13405 -3 13693 13405 13692 -3 13405 13693 13694 -3 13405 13694 13406 -3 13406 13694 13695 -3 13406 13695 13407 -3 13407 13695 13696 -3 13407 13696 13408 -3 13408 13696 13697 -3 13408 13697 13409 -3 13409 13697 13698 -3 13409 13698 13410 -3 13410 13698 13699 -3 13410 13699 13411 -3 13411 13699 13700 -3 13411 13700 13412 -3 13412 13700 13701 -3 13412 13701 13413 -3 13413 13701 13702 -3 13413 13702 13414 -3 13414 13702 13703 -3 13414 13703 13415 -3 13415 13703 13704 -3 13415 13704 13416 -3 13416 13704 13705 -3 13416 13705 13417 -3 13417 13705 13706 -3 13417 13706 13418 -3 13418 13706 13707 -3 13418 13707 13419 -3 13419 13707 13708 -3 13419 13708 13420 -3 13420 13708 13709 -3 13420 13709 13421 -3 13421 13709 13710 -3 13421 13710 13422 -3 13422 13710 13711 -3 13422 13711 13423 -3 13423 13711 13712 -3 13423 13712 13424 -3 13424 13712 13713 -3 13424 13713 13425 -3 13425 13713 13716 -3 13425 13716 13428 -3 13426 13714 13715 -3 13426 13715 13427 -3 13426 13431 13719 -3 13426 13719 13714 -3 13428 13716 13717 -3 13428 13717 13429 -3 13429 13717 13720 -3 13429 13720 13432 -3 13430 13718 13719 -3 13430 13719 13431 -3 13430 13434 13722 -3 13430 13722 13718 -3 13432 13720 13721 -3 13432 13721 13433 -3 13433 13721 13723 -3 13433 13723 13435 -3 13434 13436 13724 -3 13434 13724 13722 -3 13435 13723 13725 -3 13435 13725 13437 -3 13436 13438 13726 -3 13436 13726 13724 -3 13437 13725 13727 -3 13437 13727 13439 -3 13438 13440 13728 -3 13438 13728 13726 -3 13439 13727 13729 -3 13439 13729 13441 -3 13440 13442 13730 -3 13440 13730 13728 -3 13441 13729 13731 -3 13441 13731 13443 -3 13442 13444 13732 -3 13442 13732 13730 -3 13443 13731 13733 -3 13443 13733 13445 -3 13444 13446 13734 -3 13444 13734 13732 -3 13445 13733 13735 -3 13445 13735 13447 -3 13446 13448 13736 -3 13446 13736 13734 -3 13447 13735 13737 -3 13447 13737 13449 -3 13448 13450 13738 -3 13448 13738 13736 -3 13449 13737 13739 -3 13449 13739 13451 -3 13450 13452 13740 -3 13450 13740 13738 -3 13451 13739 13741 -3 13451 13741 13453 -3 13452 13454 13742 -3 13452 13742 13740 -3 13453 13741 13743 -3 13453 13743 13455 -3 13454 13456 13744 -3 13454 13744 13742 -3 13455 13743 13745 -3 13455 13745 13457 -3 13456 13458 13746 -3 13456 13746 13744 -3 13457 13745 13747 -3 13457 13747 13459 -3 13458 13460 13748 -3 13458 13748 13746 -3 13459 13747 13749 -3 13459 13749 13461 -3 13460 13462 13750 -3 13460 13750 13748 -3 13461 13749 13751 -3 13461 13751 13463 -3 13462 13464 13752 -3 13462 13752 13750 -3 13463 13751 13753 -3 13463 13753 13465 -3 13464 13466 13754 -3 13464 13754 13752 -3 13465 13753 13755 -3 13465 13755 13467 -3 13466 13468 13756 -3 13466 13756 13754 -3 13467 13755 13757 -3 13467 13757 13469 -3 13468 13470 13758 -3 13468 13758 13756 -3 13469 13757 13759 -3 13469 13759 13471 -3 13470 13472 13760 -3 13470 13760 13758 -3 13471 13759 13761 -3 13471 13761 13473 -3 13472 13474 13762 -3 13472 13762 13760 -3 13473 13761 13763 -3 13473 13763 13475 -3 13474 13476 13764 -3 13474 13764 13762 -3 13475 13763 13765 -3 13475 13765 13477 -3 13476 13478 13766 -3 13476 13766 13764 -3 13477 13765 13479 -3 13767 13479 13765 -3 13478 13480 13768 -3 13478 13768 13766 -3 13479 13767 13481 -3 13769 13481 13767 -3 13480 13482 13770 -3 13480 13770 13768 -3 13481 13769 13483 -3 13771 13483 13769 -3 13482 13484 13772 -3 13482 13772 13770 -3 13483 13771 13485 -3 13773 13485 13771 -3 13484 13486 13774 -3 13484 13774 13772 -3 13485 13773 13487 -3 13775 13487 13773 -3 13486 13488 13776 -3 13486 13776 13774 -3 13487 13775 13489 -3 13777 13489 13775 -3 13488 13490 13778 -3 13488 13778 13776 -3 13489 13777 13491 -3 13779 13491 13777 -3 13490 13492 13780 -3 13490 13780 13778 -3 13491 13779 13493 -3 13781 13493 13779 -3 13492 13494 13782 -3 13492 13782 13780 -3 13493 13781 13495 -3 13783 13495 13781 -3 13494 13496 13784 -3 13494 13784 13782 -3 13495 13783 13497 -3 13785 13497 13783 -3 13496 13498 13786 -3 13496 13786 13784 -3 13497 13785 13499 -3 13787 13499 13785 -3 13498 13500 13786 -3 13788 13786 13500 -3 13499 13787 13501 -3 13789 13501 13787 -3 13500 13502 13788 -3 13790 13788 13502 -3 13501 13789 13503 -3 13791 13503 13789 -3 13502 13504 13790 -3 13792 13790 13504 -3 13503 13791 13505 -3 13793 13505 13791 -3 13504 13506 13792 -3 13794 13792 13506 -3 13505 13793 13507 -3 13795 13507 13793 -3 13506 13508 13794 -3 13796 13794 13508 -3 13507 13795 13509 -3 13797 13509 13795 -3 13508 13510 13796 -3 13798 13796 13510 -3 13509 13797 13511 -3 13799 13511 13797 -3 13510 13512 13798 -3 13800 13798 13512 -3 13511 13799 13513 -3 13801 13513 13799 -3 13512 13514 13800 -3 13802 13800 13514 -3 13513 13801 13515 -3 13803 13515 13801 -3 13514 13516 13802 -3 13804 13802 13516 -3 13515 13803 13517 -3 13805 13517 13803 -3 13516 13518 13804 -3 13806 13804 13518 -3 13517 13805 13519 -3 13807 13519 13805 -3 13518 13520 13806 -3 13808 13806 13520 -3 13519 13807 13521 -3 13809 13521 13807 -3 13520 13522 13808 -3 13810 13808 13522 -3 13521 13809 13523 -3 13811 13523 13809 -3 13522 13524 13810 -3 13812 13810 13524 -3 13523 13811 13525 -3 13813 13525 13811 -3 13524 13526 13812 -3 13814 13812 13526 -3 13525 13813 13527 -3 13815 13527 13813 -3 13526 13528 13814 -3 13816 13814 13528 -3 13527 13815 13529 -3 13817 13529 13815 -3 13528 13530 13816 -3 13818 13816 13530 -3 13529 13817 13531 -3 13819 13531 13817 -3 13530 13532 13818 -3 13820 13818 13532 -3 13531 13819 13533 -3 13821 13533 13819 -3 13532 13534 13820 -3 13822 13820 13534 -3 13533 13821 13535 -3 13823 13535 13821 -3 13534 13536 13822 -3 13824 13822 13536 -3 13535 13823 13537 -3 13825 13537 13823 -3 13536 13538 13824 -3 13826 13824 13538 -3 13537 13825 13539 -3 13827 13539 13825 -3 13538 13540 13826 -3 13828 13826 13540 -3 13539 13827 13541 -3 13829 13541 13827 -3 13540 13542 13828 -3 13830 13828 13542 -3 13541 13829 13543 -3 13831 13543 13829 -3 13542 13544 13830 -3 13832 13830 13544 -3 13543 13831 13545 -3 13833 13545 13831 -3 13544 13546 13832 -3 13834 13832 13546 -3 13545 13833 13547 -3 13835 13547 13833 -3 13546 13548 13834 -3 13836 13834 13548 -3 13547 13835 13549 -3 13837 13549 13835 -3 13548 13550 13836 -3 13838 13836 13550 -3 13549 13837 13839 -3 13549 13839 13551 -3 13550 13552 13838 -3 13840 13838 13552 -3 13551 13839 13841 -3 13551 13841 13553 -3 13552 13554 13840 -3 13842 13840 13554 -3 13553 13841 13843 -3 13553 13843 13555 -3 13554 13556 13842 -3 13844 13842 13556 -3 13555 13843 13845 -3 13555 13845 13557 -3 13556 13558 13844 -3 13846 13844 13558 -3 13557 13845 13847 -3 13557 13847 13559 -3 13558 13560 13846 -3 13848 13846 13560 -3 13559 13847 13849 -3 13559 13849 13561 -3 13560 13562 13848 -3 13850 13848 13562 -3 13561 13849 13851 -3 13561 13851 13563 -3 13562 13564 13850 -3 13852 13850 13564 -3 13563 13851 13853 -3 13563 13853 13565 -3 13564 13566 13852 -3 13854 13852 13566 -3 13565 13853 13855 -3 13565 13855 13567 -3 13566 13568 13854 -3 13856 13854 13568 -3 13567 13855 13859 -3 13567 13859 13571 -3 13568 13569 13856 -3 13857 13856 13569 -3 13569 13572 13860 -3 13569 13860 13857 -3 13570 13571 13859 -3 13570 13859 13858 -3 13570 13858 13863 -3 13570 13863 13575 -3 13572 13573 13861 -3 13572 13861 13860 -3 13573 13576 13864 -3 13573 13864 13861 -3 13574 13575 13863 -3 13574 13863 13862 -3 13574 13862 13932 -3 13574 13932 13644 -3 13576 13577 13865 -3 13576 13865 13864 -3 13577 13578 13866 -3 13577 13866 13865 -3 13578 13579 13867 -3 13578 13867 13866 -3 13579 13580 13868 -3 13579 13868 13867 -3 13580 13581 13869 -3 13580 13869 13868 -3 13581 13582 13870 -3 13581 13870 13869 -3 13582 13583 13871 -3 13582 13871 13870 -3 13583 13584 13872 -3 13583 13872 13871 -3 13584 13585 13873 -3 13584 13873 13872 -3 13585 13586 13874 -3 13585 13874 13873 -3 13586 13587 13875 -3 13586 13875 13874 -3 13587 13588 13876 -3 13587 13876 13875 -3 13588 13589 13877 -3 13588 13877 13876 -3 13589 13590 13878 -3 13589 13878 13877 -3 13590 13591 13879 -3 13590 13879 13878 -3 13591 13592 13880 -3 13591 13880 13879 -3 13592 13593 13881 -3 13592 13881 13880 -3 13593 13594 13882 -3 13593 13882 13881 -3 13594 13595 13883 -3 13594 13883 13882 -3 13595 13596 13884 -3 13595 13884 13883 -3 13596 13597 13885 -3 13596 13885 13884 -3 13597 13598 13886 -3 13597 13886 13885 -3 13598 13599 13887 -3 13598 13887 13886 -3 13599 13600 13888 -3 13599 13888 13887 -3 13600 13601 13889 -3 13600 13889 13888 -3 13601 13602 13890 -3 13601 13890 13889 -3 13602 13603 13891 -3 13602 13891 13890 -3 13603 13604 13892 -3 13603 13892 13891 -3 13604 13605 13893 -3 13604 13893 13892 -3 13605 13606 13894 -3 13605 13894 13893 -3 13606 13607 13895 -3 13606 13895 13894 -3 13607 13608 13896 -3 13607 13896 13895 -3 13608 13609 13897 -3 13608 13897 13896 -3 13609 13610 13898 -3 13609 13898 13897 -3 13610 13611 13899 -3 13610 13899 13898 -3 13611 13612 13900 -3 13611 13900 13899 -3 13612 13613 13901 -3 13612 13901 13900 -3 13613 13614 13902 -3 13613 13902 13901 -3 13614 13615 13903 -3 13614 13903 13902 -3 13615 13616 13904 -3 13615 13904 13903 -3 13616 13617 13905 -3 13616 13905 13904 -3 13617 13618 13906 -3 13617 13906 13905 -3 13618 13619 13907 -3 13618 13907 13906 -3 13619 13620 13908 -3 13619 13908 13907 -3 13620 13621 13909 -3 13620 13909 13908 -3 13621 13622 13910 -3 13621 13910 13909 -3 13622 13623 13911 -3 13622 13911 13910 -3 13623 13624 13912 -3 13623 13912 13911 -3 13624 13625 13913 -3 13624 13913 13912 -3 13625 13626 13914 -3 13625 13914 13913 -3 13626 13627 13915 -3 13626 13915 13914 -3 13627 13628 13916 -3 13627 13916 13915 -3 13628 13629 13917 -3 13628 13917 13916 -3 13629 13630 13918 -3 13629 13918 13917 -3 13630 13631 13919 -3 13630 13919 13918 -3 13631 13632 13920 -3 13631 13920 13919 -3 13632 13633 13921 -3 13632 13921 13920 -3 13633 13634 13922 -3 13633 13922 13921 -3 13634 13635 13923 -3 13634 13923 13922 -3 13635 13636 13924 -3 13635 13924 13923 -3 13636 13637 13925 -3 13636 13925 13924 -3 13637 13638 13926 -3 13637 13926 13925 -3 13638 13639 13927 -3 13638 13927 13926 -3 13639 13640 13928 -3 13639 13928 13927 -3 13640 13641 13929 -3 13640 13929 13928 -3 13641 13642 13929 -3 13930 13929 13642 -3 13642 13643 13930 -3 13931 13930 13643 -3 13643 13644 13931 -3 13932 13931 13644 -3 13645 13933 13646 -3 13934 13646 13933 -3 13645 13715 13933 -3 14003 13933 13715 -3 13646 13934 13647 -3 13935 13647 13934 -3 13647 13935 13648 -3 13936 13648 13935 -3 13648 13936 13649 -3 13937 13649 13936 -3 13649 13937 13650 -3 13938 13650 13937 -3 13650 13938 13651 -3 13939 13651 13938 -3 13651 13939 13652 -3 13940 13652 13939 -3 13652 13940 13653 -3 13941 13653 13940 -3 13653 13941 13654 -3 13942 13654 13941 -3 13654 13942 13655 -3 13943 13655 13942 -3 13655 13943 13656 -3 13944 13656 13943 -3 13656 13944 13657 -3 13945 13657 13944 -3 13657 13945 13658 -3 13946 13658 13945 -3 13658 13946 13659 -3 13947 13659 13946 -3 13659 13947 13660 -3 13948 13660 13947 -3 13660 13948 13661 -3 13949 13661 13948 -3 13661 13949 13662 -3 13950 13662 13949 -3 13662 13950 13663 -3 13951 13663 13950 -3 13663 13951 13664 -3 13952 13664 13951 -3 13664 13952 13665 -3 13953 13665 13952 -3 13665 13953 13666 -3 13954 13666 13953 -3 13666 13954 13667 -3 13955 13667 13954 -3 13667 13955 13668 -3 13956 13668 13955 -3 13668 13956 13669 -3 13957 13669 13956 -3 13669 13957 13670 -3 13958 13670 13957 -3 13670 13958 13671 -3 13959 13671 13958 -3 13671 13959 13672 -3 13960 13672 13959 -3 13672 13960 13673 -3 13961 13673 13960 -3 13673 13961 13674 -3 13962 13674 13961 -3 13674 13962 13675 -3 13963 13675 13962 -3 13675 13963 13676 -3 13964 13676 13963 -3 13676 13964 13677 -3 13965 13677 13964 -3 13677 13965 13678 -3 13966 13678 13965 -3 13678 13966 13679 -3 13967 13679 13966 -3 13679 13967 13680 -3 13968 13680 13967 -3 13680 13968 13681 -3 13969 13681 13968 -3 13681 13969 13682 -3 13970 13682 13969 -3 13682 13970 13683 -3 13971 13683 13970 -3 13683 13971 13684 -3 13972 13684 13971 -3 13684 13972 13685 -3 13973 13685 13972 -3 13685 13973 13686 -3 13974 13686 13973 -3 13686 13974 13687 -3 13975 13687 13974 -3 13687 13975 13688 -3 13976 13688 13975 -3 13688 13976 13689 -3 13977 13689 13976 -3 13689 13977 13690 -3 13978 13690 13977 -3 13690 13978 13691 -3 13979 13691 13978 -3 13691 13979 13692 -3 13980 13692 13979 -3 13692 13980 13693 -3 13981 13693 13980 -3 13693 13981 13982 -3 13693 13982 13694 -3 13694 13982 13983 -3 13694 13983 13695 -3 13695 13983 13984 -3 13695 13984 13696 -3 13696 13984 13985 -3 13696 13985 13697 -3 13697 13985 13986 -3 13697 13986 13698 -3 13698 13986 13987 -3 13698 13987 13699 -3 13699 13987 13988 -3 13699 13988 13700 -3 13700 13988 13989 -3 13700 13989 13701 -3 13701 13989 13990 -3 13701 13990 13702 -3 13702 13990 13991 -3 13702 13991 13703 -3 13703 13991 13992 -3 13703 13992 13704 -3 13704 13992 13993 -3 13704 13993 13705 -3 13705 13993 13994 -3 13705 13994 13706 -3 13706 13994 13995 -3 13706 13995 13707 -3 13707 13995 13996 -3 13707 13996 13708 -3 13708 13996 13997 -3 13708 13997 13709 -3 13709 13997 13998 -3 13709 13998 13710 -3 13710 13998 13999 -3 13710 13999 13711 -3 13711 13999 14000 -3 13711 14000 13712 -3 13712 14000 14001 -3 13712 14001 13713 -3 13713 14001 14004 -3 13713 14004 13716 -3 13714 14002 14003 -3 13714 14003 13715 -3 13714 13719 14007 -3 13714 14007 14002 -3 13716 14004 14005 -3 13716 14005 13717 -3 13717 14005 14008 -3 13717 14008 13720 -3 13718 14006 14007 -3 13718 14007 13719 -3 13718 13722 14010 -3 13718 14010 14006 -3 13720 14008 14009 -3 13720 14009 13721 -3 13721 14009 14011 -3 13721 14011 13723 -3 13722 13724 14012 -3 13722 14012 14010 -3 13723 14011 14013 -3 13723 14013 13725 -3 13724 13726 14014 -3 13724 14014 14012 -3 13725 14013 14015 -3 13725 14015 13727 -3 13726 13728 14016 -3 13726 14016 14014 -3 13727 14015 14017 -3 13727 14017 13729 -3 13728 13730 14018 -3 13728 14018 14016 -3 13729 14017 14019 -3 13729 14019 13731 -3 13730 13732 14020 -3 13730 14020 14018 -3 13731 14019 14021 -3 13731 14021 13733 -3 13732 13734 14022 -3 13732 14022 14020 -3 13733 14021 14023 -3 13733 14023 13735 -3 13734 13736 14024 -3 13734 14024 14022 -3 13735 14023 14025 -3 13735 14025 13737 -3 13736 13738 14026 -3 13736 14026 14024 -3 13737 14025 14027 -3 13737 14027 13739 -3 13738 13740 14028 -3 13738 14028 14026 -3 13739 14027 14029 -3 13739 14029 13741 -3 13740 13742 14030 -3 13740 14030 14028 -3 13741 14029 14031 -3 13741 14031 13743 -3 13742 13744 14032 -3 13742 14032 14030 -3 13743 14031 14033 -3 13743 14033 13745 -3 13744 13746 14034 -3 13744 14034 14032 -3 13745 14033 14035 -3 13745 14035 13747 -3 13746 13748 14036 -3 13746 14036 14034 -3 13747 14035 14037 -3 13747 14037 13749 -3 13748 13750 14038 -3 13748 14038 14036 -3 13749 14037 14039 -3 13749 14039 13751 -3 13750 13752 14040 -3 13750 14040 14038 -3 13751 14039 14041 -3 13751 14041 13753 -3 13752 13754 14042 -3 13752 14042 14040 -3 13753 14041 14043 -3 13753 14043 13755 -3 13754 13756 14044 -3 13754 14044 14042 -3 13755 14043 14045 -3 13755 14045 13757 -3 13756 13758 14046 -3 13756 14046 14044 -3 13757 14045 14047 -3 13757 14047 13759 -3 13758 13760 14048 -3 13758 14048 14046 -3 13759 14047 14049 -3 13759 14049 13761 -3 13760 13762 14050 -3 13760 14050 14048 -3 13761 14049 14051 -3 13761 14051 13763 -3 13762 13764 14052 -3 13762 14052 14050 -3 13763 14051 14053 -3 13763 14053 13765 -3 13764 13766 14054 -3 13764 14054 14052 -3 13765 14053 14055 -3 13765 14055 13767 -3 13766 13768 14056 -3 13766 14056 14054 -3 13767 14055 13769 -3 14057 13769 14055 -3 13768 13770 14058 -3 13768 14058 14056 -3 13769 14057 13771 -3 14059 13771 14057 -3 13770 13772 14060 -3 13770 14060 14058 -3 13771 14059 13773 -3 14061 13773 14059 -3 13772 13774 14062 -3 13772 14062 14060 -3 13773 14061 13775 -3 14063 13775 14061 -3 13774 13776 14064 -3 13774 14064 14062 -3 13775 14063 13777 -3 14065 13777 14063 -3 13776 13778 14066 -3 13776 14066 14064 -3 13777 14065 13779 -3 14067 13779 14065 -3 13778 13780 14068 -3 13778 14068 14066 -3 13779 14067 13781 -3 14069 13781 14067 -3 13780 13782 14070 -3 13780 14070 14068 -3 13781 14069 13783 -3 14071 13783 14069 -3 13782 13784 14072 -3 13782 14072 14070 -3 13783 14071 13785 -3 14073 13785 14071 -3 13784 13786 14074 -3 13784 14074 14072 -3 13785 14073 13787 -3 14075 13787 14073 -3 13786 13788 14074 -3 14076 14074 13788 -3 13787 14075 13789 -3 14077 13789 14075 -3 13788 13790 14076 -3 14078 14076 13790 -3 13789 14077 13791 -3 14079 13791 14077 -3 13790 13792 14078 -3 14080 14078 13792 -3 13791 14079 13793 -3 14081 13793 14079 -3 13792 13794 14080 -3 14082 14080 13794 -3 13793 14081 13795 -3 14083 13795 14081 -3 13794 13796 14082 -3 14084 14082 13796 -3 13795 14083 13797 -3 14085 13797 14083 -3 13796 13798 14084 -3 14086 14084 13798 -3 13797 14085 13799 -3 14087 13799 14085 -3 13798 13800 14086 -3 14088 14086 13800 -3 13799 14087 13801 -3 14089 13801 14087 -3 13800 13802 14088 -3 14090 14088 13802 -3 13801 14089 13803 -3 14091 13803 14089 -3 13802 13804 14090 -3 14092 14090 13804 -3 13803 14091 13805 -3 14093 13805 14091 -3 13804 13806 14092 -3 14094 14092 13806 -3 13805 14093 13807 -3 14095 13807 14093 -3 13806 13808 14094 -3 14096 14094 13808 -3 13807 14095 13809 -3 14097 13809 14095 -3 13808 13810 14096 -3 14098 14096 13810 -3 13809 14097 13811 -3 14099 13811 14097 -3 13810 13812 14098 -3 14100 14098 13812 -3 13811 14099 13813 -3 14101 13813 14099 -3 13812 13814 14100 -3 14102 14100 13814 -3 13813 14101 13815 -3 14103 13815 14101 -3 13814 13816 14102 -3 14104 14102 13816 -3 13815 14103 13817 -3 14105 13817 14103 -3 13816 13818 14104 -3 14106 14104 13818 -3 13817 14105 13819 -3 14107 13819 14105 -3 13818 13820 14106 -3 14108 14106 13820 -3 13819 14107 13821 -3 14109 13821 14107 -3 13820 13822 14108 -3 14110 14108 13822 -3 13821 14109 13823 -3 14111 13823 14109 -3 13822 13824 14110 -3 14112 14110 13824 -3 13823 14111 13825 -3 14113 13825 14111 -3 13824 13826 14112 -3 14114 14112 13826 -3 13825 14113 13827 -3 14115 13827 14113 -3 13826 13828 14114 -3 14116 14114 13828 -3 13827 14115 13829 -3 14117 13829 14115 -3 13828 13830 14116 -3 14118 14116 13830 -3 13829 14117 13831 -3 14119 13831 14117 -3 13830 13832 14118 -3 14120 14118 13832 -3 13831 14119 13833 -3 14121 13833 14119 -3 13832 13834 14120 -3 14122 14120 13834 -3 13833 14121 13835 -3 14123 13835 14121 -3 13834 13836 14122 -3 14124 14122 13836 -3 13835 14123 13837 -3 14125 13837 14123 -3 13836 13838 14124 -3 14126 14124 13838 -3 13837 14125 13839 -3 14127 13839 14125 -3 13838 13840 14126 -3 14128 14126 13840 -3 13839 14127 14129 -3 13839 14129 13841 -3 13840 13842 14128 -3 14130 14128 13842 -3 13841 14129 14131 -3 13841 14131 13843 -3 13842 13844 14130 -3 14132 14130 13844 -3 13843 14131 14133 -3 13843 14133 13845 -3 13844 13846 14132 -3 14134 14132 13846 -3 13845 14133 14135 -3 13845 14135 13847 -3 13846 13848 14134 -3 14136 14134 13848 -3 13847 14135 14137 -3 13847 14137 13849 -3 13848 13850 14136 -3 14138 14136 13850 -3 13849 14137 14139 -3 13849 14139 13851 -3 13850 13852 14138 -3 14140 14138 13852 -3 13851 14139 14141 -3 13851 14141 13853 -3 13852 13854 14140 -3 14142 14140 13854 -3 13853 14141 14143 -3 13853 14143 13855 -3 13854 13856 14142 -3 14144 14142 13856 -3 13855 14143 14147 -3 13855 14147 13859 -3 13856 13857 14144 -3 14145 14144 13857 -3 13857 13860 14148 -3 13857 14148 14145 -3 13858 13859 14147 -3 13858 14147 14146 -3 13858 14146 14151 -3 13858 14151 13863 -3 13860 13861 14149 -3 13860 14149 14148 -3 13861 13864 14152 -3 13861 14152 14149 -3 13862 13863 14151 -3 13862 14151 14150 -3 13862 14150 14220 -3 13862 14220 13932 -3 13864 13865 14153 -3 13864 14153 14152 -3 13865 13866 14154 -3 13865 14154 14153 -3 13866 13867 14155 -3 13866 14155 14154 -3 13867 13868 14156 -3 13867 14156 14155 -3 13868 13869 14157 -3 13868 14157 14156 -3 13869 13870 14158 -3 13869 14158 14157 -3 13870 13871 14159 -3 13870 14159 14158 -3 13871 13872 14160 -3 13871 14160 14159 -3 13872 13873 14161 -3 13872 14161 14160 -3 13873 13874 14162 -3 13873 14162 14161 -3 13874 13875 14163 -3 13874 14163 14162 -3 13875 13876 14164 -3 13875 14164 14163 -3 13876 13877 14165 -3 13876 14165 14164 -3 13877 13878 14166 -3 13877 14166 14165 -3 13878 13879 14167 -3 13878 14167 14166 -3 13879 13880 14168 -3 13879 14168 14167 -3 13880 13881 14169 -3 13880 14169 14168 -3 13881 13882 14170 -3 13881 14170 14169 -3 13882 13883 14171 -3 13882 14171 14170 -3 13883 13884 14172 -3 13883 14172 14171 -3 13884 13885 14173 -3 13884 14173 14172 -3 13885 13886 14174 -3 13885 14174 14173 -3 13886 13887 14175 -3 13886 14175 14174 -3 13887 13888 14176 -3 13887 14176 14175 -3 13888 13889 14177 -3 13888 14177 14176 -3 13889 13890 14178 -3 13889 14178 14177 -3 13890 13891 14179 -3 13890 14179 14178 -3 13891 13892 14180 -3 13891 14180 14179 -3 13892 13893 14181 -3 13892 14181 14180 -3 13893 13894 14182 -3 13893 14182 14181 -3 13894 13895 14183 -3 13894 14183 14182 -3 13895 13896 14184 -3 13895 14184 14183 -3 13896 13897 14185 -3 13896 14185 14184 -3 13897 13898 14186 -3 13897 14186 14185 -3 13898 13899 14187 -3 13898 14187 14186 -3 13899 13900 14188 -3 13899 14188 14187 -3 13900 13901 14189 -3 13900 14189 14188 -3 13901 13902 14190 -3 13901 14190 14189 -3 13902 13903 14191 -3 13902 14191 14190 -3 13903 13904 14192 -3 13903 14192 14191 -3 13904 13905 14193 -3 13904 14193 14192 -3 13905 13906 14194 -3 13905 14194 14193 -3 13906 13907 14195 -3 13906 14195 14194 -3 13907 13908 14196 -3 13907 14196 14195 -3 13908 13909 14197 -3 13908 14197 14196 -3 13909 13910 14198 -3 13909 14198 14197 -3 13910 13911 14199 -3 13910 14199 14198 -3 13911 13912 14200 -3 13911 14200 14199 -3 13912 13913 14201 -3 13912 14201 14200 -3 13913 13914 14202 -3 13913 14202 14201 -3 13914 13915 14203 -3 13914 14203 14202 -3 13915 13916 14204 -3 13915 14204 14203 -3 13916 13917 14205 -3 13916 14205 14204 -3 13917 13918 14206 -3 13917 14206 14205 -3 13918 13919 14207 -3 13918 14207 14206 -3 13919 13920 14208 -3 13919 14208 14207 -3 13920 13921 14209 -3 13920 14209 14208 -3 13921 13922 14210 -3 13921 14210 14209 -3 13922 13923 14211 -3 13922 14211 14210 -3 13923 13924 14212 -3 13923 14212 14211 -3 13924 13925 14213 -3 13924 14213 14212 -3 13925 13926 14214 -3 13925 14214 14213 -3 13926 13927 14215 -3 13926 14215 14214 -3 13927 13928 14216 -3 13927 14216 14215 -3 13928 13929 14217 -3 13928 14217 14216 -3 13929 13930 14218 -3 13929 14218 14217 -3 13930 13931 14218 -3 14219 14218 13931 -3 13931 13932 14219 -3 14220 14219 13932 -3 13933 14221 13934 -3 14222 13934 14221 -3 13933 14003 14221 -3 14291 14221 14003 -3 13934 14222 13935 -3 14223 13935 14222 -3 13935 14223 13936 -3 14224 13936 14223 -3 13936 14224 13937 -3 14225 13937 14224 -3 13937 14225 13938 -3 14226 13938 14225 -3 13938 14226 13939 -3 14227 13939 14226 -3 13939 14227 13940 -3 14228 13940 14227 -3 13940 14228 13941 -3 14229 13941 14228 -3 13941 14229 13942 -3 14230 13942 14229 -3 13942 14230 13943 -3 14231 13943 14230 -3 13943 14231 13944 -3 14232 13944 14231 -3 13944 14232 13945 -3 14233 13945 14232 -3 13945 14233 13946 -3 14234 13946 14233 -3 13946 14234 13947 -3 14235 13947 14234 -3 13947 14235 13948 -3 14236 13948 14235 -3 13948 14236 13949 -3 14237 13949 14236 -3 13949 14237 13950 -3 14238 13950 14237 -3 13950 14238 13951 -3 14239 13951 14238 -3 13951 14239 13952 -3 14240 13952 14239 -3 13952 14240 13953 -3 14241 13953 14240 -3 13953 14241 13954 -3 14242 13954 14241 -3 13954 14242 13955 -3 14243 13955 14242 -3 13955 14243 13956 -3 14244 13956 14243 -3 13956 14244 13957 -3 14245 13957 14244 -3 13957 14245 13958 -3 14246 13958 14245 -3 13958 14246 13959 -3 14247 13959 14246 -3 13959 14247 13960 -3 14248 13960 14247 -3 13960 14248 13961 -3 14249 13961 14248 -3 13961 14249 13962 -3 14250 13962 14249 -3 13962 14250 13963 -3 14251 13963 14250 -3 13963 14251 13964 -3 14252 13964 14251 -3 13964 14252 13965 -3 14253 13965 14252 -3 13965 14253 13966 -3 14254 13966 14253 -3 13966 14254 13967 -3 14255 13967 14254 -3 13967 14255 13968 -3 14256 13968 14255 -3 13968 14256 13969 -3 14257 13969 14256 -3 13969 14257 13970 -3 14258 13970 14257 -3 13970 14258 13971 -3 14259 13971 14258 -3 13971 14259 13972 -3 14260 13972 14259 -3 13972 14260 13973 -3 14261 13973 14260 -3 13973 14261 13974 -3 14262 13974 14261 -3 13974 14262 13975 -3 14263 13975 14262 -3 13975 14263 13976 -3 14264 13976 14263 -3 13976 14264 13977 -3 14265 13977 14264 -3 13977 14265 13978 -3 14266 13978 14265 -3 13978 14266 13979 -3 14267 13979 14266 -3 13979 14267 13980 -3 14268 13980 14267 -3 13980 14268 13981 -3 14269 13981 14268 -3 13981 14269 13982 -3 14270 13982 14269 -3 13982 14270 13983 -3 14271 13983 14270 -3 13983 14271 13984 -3 14272 13984 14271 -3 13984 14272 14273 -3 13984 14273 13985 -3 13985 14273 14274 -3 13985 14274 13986 -3 13986 14274 14275 -3 13986 14275 13987 -3 13987 14275 14276 -3 13987 14276 13988 -3 13988 14276 14277 -3 13988 14277 13989 -3 13989 14277 14278 -3 13989 14278 13990 -3 13990 14278 14279 -3 13990 14279 13991 -3 13991 14279 14280 -3 13991 14280 13992 -3 13992 14280 14281 -3 13992 14281 13993 -3 13993 14281 14282 -3 13993 14282 13994 -3 13994 14282 14283 -3 13994 14283 13995 -3 13995 14283 14284 -3 13995 14284 13996 -3 13996 14284 14285 -3 13996 14285 13997 -3 13997 14285 14286 -3 13997 14286 13998 -3 13998 14286 14287 -3 13998 14287 13999 -3 13999 14287 14288 -3 13999 14288 14000 -3 14000 14288 14289 -3 14000 14289 14001 -3 14001 14289 14292 -3 14001 14292 14004 -3 14002 14290 14291 -3 14002 14291 14003 -3 14002 14007 14295 -3 14002 14295 14290 -3 14004 14292 14293 -3 14004 14293 14005 -3 14005 14293 14296 -3 14005 14296 14008 -3 14006 14294 14295 -3 14006 14295 14007 -3 14006 14010 14298 -3 14006 14298 14294 -3 14008 14296 14297 -3 14008 14297 14009 -3 14009 14297 14299 -3 14009 14299 14011 -3 14010 14012 14300 -3 14010 14300 14298 -3 14011 14299 14301 -3 14011 14301 14013 -3 14012 14014 14302 -3 14012 14302 14300 -3 14013 14301 14303 -3 14013 14303 14015 -3 14014 14016 14304 -3 14014 14304 14302 -3 14015 14303 14305 -3 14015 14305 14017 -3 14016 14018 14306 -3 14016 14306 14304 -3 14017 14305 14307 -3 14017 14307 14019 -3 14018 14020 14308 -3 14018 14308 14306 -3 14019 14307 14309 -3 14019 14309 14021 -3 14020 14022 14310 -3 14020 14310 14308 -3 14021 14309 14311 -3 14021 14311 14023 -3 14022 14024 14312 -3 14022 14312 14310 -3 14023 14311 14313 -3 14023 14313 14025 -3 14024 14026 14314 -3 14024 14314 14312 -3 14025 14313 14315 -3 14025 14315 14027 -3 14026 14028 14316 -3 14026 14316 14314 -3 14027 14315 14317 -3 14027 14317 14029 -3 14028 14030 14318 -3 14028 14318 14316 -3 14029 14317 14319 -3 14029 14319 14031 -3 14030 14032 14320 -3 14030 14320 14318 -3 14031 14319 14321 -3 14031 14321 14033 -3 14032 14034 14322 -3 14032 14322 14320 -3 14033 14321 14323 -3 14033 14323 14035 -3 14034 14036 14324 -3 14034 14324 14322 -3 14035 14323 14325 -3 14035 14325 14037 -3 14036 14038 14326 -3 14036 14326 14324 -3 14037 14325 14327 -3 14037 14327 14039 -3 14038 14040 14328 -3 14038 14328 14326 -3 14039 14327 14329 -3 14039 14329 14041 -3 14040 14042 14330 -3 14040 14330 14328 -3 14041 14329 14331 -3 14041 14331 14043 -3 14042 14044 14332 -3 14042 14332 14330 -3 14043 14331 14333 -3 14043 14333 14045 -3 14044 14046 14334 -3 14044 14334 14332 -3 14045 14333 14335 -3 14045 14335 14047 -3 14046 14048 14336 -3 14046 14336 14334 -3 14047 14335 14337 -3 14047 14337 14049 -3 14048 14050 14338 -3 14048 14338 14336 -3 14049 14337 14339 -3 14049 14339 14051 -3 14050 14052 14340 -3 14050 14340 14338 -3 14051 14339 14341 -3 14051 14341 14053 -3 14052 14054 14342 -3 14052 14342 14340 -3 14053 14341 14343 -3 14053 14343 14055 -3 14054 14056 14344 -3 14054 14344 14342 -3 14055 14343 14345 -3 14055 14345 14057 -3 14056 14058 14346 -3 14056 14346 14344 -3 14057 14345 14347 -3 14057 14347 14059 -3 14058 14060 14348 -3 14058 14348 14346 -3 14059 14347 14061 -3 14349 14061 14347 -3 14060 14062 14350 -3 14060 14350 14348 -3 14061 14349 14063 -3 14351 14063 14349 -3 14062 14064 14352 -3 14062 14352 14350 -3 14063 14351 14065 -3 14353 14065 14351 -3 14064 14066 14354 -3 14064 14354 14352 -3 14065 14353 14067 -3 14355 14067 14353 -3 14066 14068 14356 -3 14066 14356 14354 -3 14067 14355 14069 -3 14357 14069 14355 -3 14068 14070 14358 -3 14068 14358 14356 -3 14069 14357 14071 -3 14359 14071 14357 -3 14070 14072 14360 -3 14070 14360 14358 -3 14071 14359 14073 -3 14361 14073 14359 -3 14072 14074 14362 -3 14072 14362 14360 -3 14073 14361 14075 -3 14363 14075 14361 -3 14074 14076 14364 -3 14074 14364 14362 -3 14075 14363 14077 -3 14365 14077 14363 -3 14076 14078 14364 -3 14366 14364 14078 -3 14077 14365 14079 -3 14367 14079 14365 -3 14078 14080 14366 -3 14368 14366 14080 -3 14079 14367 14081 -3 14369 14081 14367 -3 14080 14082 14368 -3 14370 14368 14082 -3 14081 14369 14083 -3 14371 14083 14369 -3 14082 14084 14370 -3 14372 14370 14084 -3 14083 14371 14085 -3 14373 14085 14371 -3 14084 14086 14372 -3 14374 14372 14086 -3 14085 14373 14087 -3 14375 14087 14373 -3 14086 14088 14374 -3 14376 14374 14088 -3 14087 14375 14089 -3 14377 14089 14375 -3 14088 14090 14376 -3 14378 14376 14090 -3 14089 14377 14091 -3 14379 14091 14377 -3 14090 14092 14378 -3 14380 14378 14092 -3 14091 14379 14093 -3 14381 14093 14379 -3 14092 14094 14380 -3 14382 14380 14094 -3 14093 14381 14095 -3 14383 14095 14381 -3 14094 14096 14382 -3 14384 14382 14096 -3 14095 14383 14097 -3 14385 14097 14383 -3 14096 14098 14384 -3 14386 14384 14098 -3 14097 14385 14099 -3 14387 14099 14385 -3 14098 14100 14386 -3 14388 14386 14100 -3 14099 14387 14101 -3 14389 14101 14387 -3 14100 14102 14388 -3 14390 14388 14102 -3 14101 14389 14103 -3 14391 14103 14389 -3 14102 14104 14390 -3 14392 14390 14104 -3 14103 14391 14105 -3 14393 14105 14391 -3 14104 14106 14392 -3 14394 14392 14106 -3 14105 14393 14107 -3 14395 14107 14393 -3 14106 14108 14394 -3 14396 14394 14108 -3 14107 14395 14109 -3 14397 14109 14395 -3 14108 14110 14396 -3 14398 14396 14110 -3 14109 14397 14111 -3 14399 14111 14397 -3 14110 14112 14398 -3 14400 14398 14112 -3 14111 14399 14113 -3 14401 14113 14399 -3 14112 14114 14400 -3 14402 14400 14114 -3 14113 14401 14115 -3 14403 14115 14401 -3 14114 14116 14402 -3 14404 14402 14116 -3 14115 14403 14117 -3 14405 14117 14403 -3 14116 14118 14404 -3 14406 14404 14118 -3 14117 14405 14119 -3 14407 14119 14405 -3 14118 14120 14406 -3 14408 14406 14120 -3 14119 14407 14121 -3 14409 14121 14407 -3 14120 14122 14408 -3 14410 14408 14122 -3 14121 14409 14123 -3 14411 14123 14409 -3 14122 14124 14410 -3 14412 14410 14124 -3 14123 14411 14125 -3 14413 14125 14411 -3 14124 14126 14412 -3 14414 14412 14126 -3 14125 14413 14127 -3 14415 14127 14413 -3 14126 14128 14414 -3 14416 14414 14128 -3 14127 14415 14129 -3 14417 14129 14415 -3 14128 14130 14416 -3 14418 14416 14130 -3 14129 14417 14131 -3 14419 14131 14417 -3 14130 14132 14418 -3 14420 14418 14132 -3 14131 14419 14421 -3 14131 14421 14133 -3 14132 14134 14420 -3 14422 14420 14134 -3 14133 14421 14423 -3 14133 14423 14135 -3 14134 14136 14422 -3 14424 14422 14136 -3 14135 14423 14425 -3 14135 14425 14137 -3 14136 14138 14424 -3 14426 14424 14138 -3 14137 14425 14427 -3 14137 14427 14139 -3 14138 14140 14426 -3 14428 14426 14140 -3 14139 14427 14429 -3 14139 14429 14141 -3 14140 14142 14428 -3 14430 14428 14142 -3 14141 14429 14431 -3 14141 14431 14143 -3 14142 14144 14430 -3 14432 14430 14144 -3 14143 14431 14435 -3 14143 14435 14147 -3 14144 14145 14432 -3 14433 14432 14145 -3 14145 14148 14433 -3 14436 14433 14148 -3 14146 14147 14434 -3 14435 14434 14147 -3 14146 14434 14439 -3 14146 14439 14151 -3 14148 14149 14436 -3 14437 14436 14149 -3 14149 14152 14440 -3 14149 14440 14437 -3 14150 14151 14439 -3 14150 14439 14438 -3 14150 14438 14508 -3 14150 14508 14220 -3 14152 14153 14441 -3 14152 14441 14440 -3 14153 14154 14442 -3 14153 14442 14441 -3 14154 14155 14443 -3 14154 14443 14442 -3 14155 14156 14444 -3 14155 14444 14443 -3 14156 14157 14445 -3 14156 14445 14444 -3 14157 14158 14446 -3 14157 14446 14445 -3 14158 14159 14447 -3 14158 14447 14446 -3 14159 14160 14448 -3 14159 14448 14447 -3 14160 14161 14449 -3 14160 14449 14448 -3 14161 14162 14450 -3 14161 14450 14449 -3 14162 14163 14451 -3 14162 14451 14450 -3 14163 14164 14452 -3 14163 14452 14451 -3 14164 14165 14453 -3 14164 14453 14452 -3 14165 14166 14454 -3 14165 14454 14453 -3 14166 14167 14455 -3 14166 14455 14454 -3 14167 14168 14456 -3 14167 14456 14455 -3 14168 14169 14457 -3 14168 14457 14456 -3 14169 14170 14458 -3 14169 14458 14457 -3 14170 14171 14459 -3 14170 14459 14458 -3 14171 14172 14460 -3 14171 14460 14459 -3 14172 14173 14461 -3 14172 14461 14460 -3 14173 14174 14462 -3 14173 14462 14461 -3 14174 14175 14463 -3 14174 14463 14462 -3 14175 14176 14464 -3 14175 14464 14463 -3 14176 14177 14465 -3 14176 14465 14464 -3 14177 14178 14466 -3 14177 14466 14465 -3 14178 14179 14467 -3 14178 14467 14466 -3 14179 14180 14468 -3 14179 14468 14467 -3 14180 14181 14469 -3 14180 14469 14468 -3 14181 14182 14470 -3 14181 14470 14469 -3 14182 14183 14471 -3 14182 14471 14470 -3 14183 14184 14472 -3 14183 14472 14471 -3 14184 14185 14473 -3 14184 14473 14472 -3 14185 14186 14474 -3 14185 14474 14473 -3 14186 14187 14475 -3 14186 14475 14474 -3 14187 14188 14476 -3 14187 14476 14475 -3 14188 14189 14477 -3 14188 14477 14476 -3 14189 14190 14478 -3 14189 14478 14477 -3 14190 14191 14479 -3 14190 14479 14478 -3 14191 14192 14480 -3 14191 14480 14479 -3 14192 14193 14481 -3 14192 14481 14480 -3 14193 14194 14482 -3 14193 14482 14481 -3 14194 14195 14483 -3 14194 14483 14482 -3 14195 14196 14484 -3 14195 14484 14483 -3 14196 14197 14485 -3 14196 14485 14484 -3 14197 14198 14486 -3 14197 14486 14485 -3 14198 14199 14487 -3 14198 14487 14486 -3 14199 14200 14488 -3 14199 14488 14487 -3 14200 14201 14489 -3 14200 14489 14488 -3 14201 14202 14490 -3 14201 14490 14489 -3 14202 14203 14491 -3 14202 14491 14490 -3 14203 14204 14492 -3 14203 14492 14491 -3 14204 14205 14493 -3 14204 14493 14492 -3 14205 14206 14494 -3 14205 14494 14493 -3 14206 14207 14495 -3 14206 14495 14494 -3 14207 14208 14496 -3 14207 14496 14495 -3 14208 14209 14497 -3 14208 14497 14496 -3 14209 14210 14498 -3 14209 14498 14497 -3 14210 14211 14499 -3 14210 14499 14498 -3 14211 14212 14500 -3 14211 14500 14499 -3 14212 14213 14501 -3 14212 14501 14500 -3 14213 14214 14502 -3 14213 14502 14501 -3 14214 14215 14503 -3 14214 14503 14502 -3 14215 14216 14504 -3 14215 14504 14503 -3 14216 14217 14505 -3 14216 14505 14504 -3 14217 14218 14506 -3 14217 14506 14505 -3 14218 14219 14507 -3 14218 14507 14506 -3 14219 14220 14508 -3 14219 14508 14507 -3 14221 14509 14222 -3 14510 14222 14509 -3 14221 14291 14509 -3 14579 14509 14291 -3 14222 14510 14223 -3 14511 14223 14510 -3 14223 14511 14224 -3 14512 14224 14511 -3 14224 14512 14225 -3 14513 14225 14512 -3 14225 14513 14226 -3 14514 14226 14513 -3 14226 14514 14227 -3 14515 14227 14514 -3 14227 14515 14228 -3 14516 14228 14515 -3 14228 14516 14229 -3 14517 14229 14516 -3 14229 14517 14230 -3 14518 14230 14517 -3 14230 14518 14231 -3 14519 14231 14518 -3 14231 14519 14232 -3 14520 14232 14519 -3 14232 14520 14233 -3 14521 14233 14520 -3 14233 14521 14234 -3 14522 14234 14521 -3 14234 14522 14235 -3 14523 14235 14522 -3 14235 14523 14236 -3 14524 14236 14523 -3 14236 14524 14237 -3 14525 14237 14524 -3 14237 14525 14238 -3 14526 14238 14525 -3 14238 14526 14239 -3 14527 14239 14526 -3 14239 14527 14240 -3 14528 14240 14527 -3 14240 14528 14241 -3 14529 14241 14528 -3 14241 14529 14242 -3 14530 14242 14529 -3 14242 14530 14243 -3 14531 14243 14530 -3 14243 14531 14244 -3 14532 14244 14531 -3 14244 14532 14245 -3 14533 14245 14532 -3 14245 14533 14246 -3 14534 14246 14533 -3 14246 14534 14247 -3 14535 14247 14534 -3 14247 14535 14248 -3 14536 14248 14535 -3 14248 14536 14249 -3 14537 14249 14536 -3 14249 14537 14250 -3 14538 14250 14537 -3 14250 14538 14251 -3 14539 14251 14538 -3 14251 14539 14252 -3 14540 14252 14539 -3 14252 14540 14253 -3 14541 14253 14540 -3 14253 14541 14254 -3 14542 14254 14541 -3 14254 14542 14255 -3 14543 14255 14542 -3 14255 14543 14256 -3 14544 14256 14543 -3 14256 14544 14257 -3 14545 14257 14544 -3 14257 14545 14258 -3 14546 14258 14545 -3 14258 14546 14259 -3 14547 14259 14546 -3 14259 14547 14260 -3 14548 14260 14547 -3 14260 14548 14261 -3 14549 14261 14548 -3 14261 14549 14262 -3 14550 14262 14549 -3 14262 14550 14263 -3 14551 14263 14550 -3 14263 14551 14264 -3 14552 14264 14551 -3 14264 14552 14265 -3 14553 14265 14552 -3 14265 14553 14266 -3 14554 14266 14553 -3 14266 14554 14267 -3 14555 14267 14554 -3 14267 14555 14268 -3 14556 14268 14555 -3 14268 14556 14269 -3 14557 14269 14556 -3 14269 14557 14270 -3 14558 14270 14557 -3 14270 14558 14271 -3 14559 14271 14558 -3 14271 14559 14272 -3 14560 14272 14559 -3 14272 14560 14273 -3 14561 14273 14560 -3 14273 14561 14274 -3 14562 14274 14561 -3 14274 14562 14275 -3 14563 14275 14562 -3 14275 14563 14276 -3 14564 14276 14563 -3 14276 14564 14277 -3 14565 14277 14564 -3 14277 14565 14278 -3 14566 14278 14565 -3 14278 14566 14279 -3 14567 14279 14566 -3 14279 14567 14568 -3 14279 14568 14280 -3 14280 14568 14569 -3 14280 14569 14281 -3 14281 14569 14570 -3 14281 14570 14282 -3 14282 14570 14571 -3 14282 14571 14283 -3 14283 14571 14572 -3 14283 14572 14284 -3 14284 14572 14573 -3 14284 14573 14285 -3 14285 14573 14574 -3 14285 14574 14286 -3 14286 14574 14575 -3 14286 14575 14287 -3 14287 14575 14576 -3 14287 14576 14288 -3 14288 14576 14577 -3 14288 14577 14289 -3 14289 14577 14580 -3 14289 14580 14292 -3 14290 14578 14579 -3 14290 14579 14291 -3 14290 14295 14578 -3 14583 14578 14295 -3 14292 14580 14581 -3 14292 14581 14293 -3 14293 14581 14584 -3 14293 14584 14296 -3 14294 14582 14583 -3 14294 14583 14295 -3 14294 14298 14586 -3 14294 14586 14582 -3 14296 14584 14585 -3 14296 14585 14297 -3 14297 14585 14587 -3 14297 14587 14299 -3 14298 14300 14588 -3 14298 14588 14586 -3 14299 14587 14589 -3 14299 14589 14301 -3 14300 14302 14590 -3 14300 14590 14588 -3 14301 14589 14591 -3 14301 14591 14303 -3 14302 14304 14592 -3 14302 14592 14590 -3 14303 14591 14593 -3 14303 14593 14305 -3 14304 14306 14594 -3 14304 14594 14592 -3 14305 14593 14595 -3 14305 14595 14307 -3 14306 14308 14596 -3 14306 14596 14594 -3 14307 14595 14597 -3 14307 14597 14309 -3 14308 14310 14598 -3 14308 14598 14596 -3 14309 14597 14599 -3 14309 14599 14311 -3 14310 14312 14600 -3 14310 14600 14598 -3 14311 14599 14601 -3 14311 14601 14313 -3 14312 14314 14602 -3 14312 14602 14600 -3 14313 14601 14603 -3 14313 14603 14315 -3 14314 14316 14604 -3 14314 14604 14602 -3 14315 14603 14605 -3 14315 14605 14317 -3 14316 14318 14606 -3 14316 14606 14604 -3 14317 14605 14607 -3 14317 14607 14319 -3 14318 14320 14608 -3 14318 14608 14606 -3 14319 14607 14609 -3 14319 14609 14321 -3 14320 14322 14610 -3 14320 14610 14608 -3 14321 14609 14611 -3 14321 14611 14323 -3 14322 14324 14612 -3 14322 14612 14610 -3 14323 14611 14613 -3 14323 14613 14325 -3 14324 14326 14614 -3 14324 14614 14612 -3 14325 14613 14615 -3 14325 14615 14327 -3 14326 14328 14616 -3 14326 14616 14614 -3 14327 14615 14617 -3 14327 14617 14329 -3 14328 14330 14618 -3 14328 14618 14616 -3 14329 14617 14619 -3 14329 14619 14331 -3 14330 14332 14620 -3 14330 14620 14618 -3 14331 14619 14621 -3 14331 14621 14333 -3 14332 14334 14622 -3 14332 14622 14620 -3 14333 14621 14623 -3 14333 14623 14335 -3 14334 14336 14624 -3 14334 14624 14622 -3 14335 14623 14625 -3 14335 14625 14337 -3 14336 14338 14626 -3 14336 14626 14624 -3 14337 14625 14627 -3 14337 14627 14339 -3 14338 14340 14628 -3 14338 14628 14626 -3 14339 14627 14629 -3 14339 14629 14341 -3 14340 14342 14630 -3 14340 14630 14628 -3 14341 14629 14631 -3 14341 14631 14343 -3 14342 14344 14632 -3 14342 14632 14630 -3 14343 14631 14633 -3 14343 14633 14345 -3 14344 14346 14634 -3 14344 14634 14632 -3 14345 14633 14635 -3 14345 14635 14347 -3 14346 14348 14636 -3 14346 14636 14634 -3 14347 14635 14637 -3 14347 14637 14349 -3 14348 14350 14638 -3 14348 14638 14636 -3 14349 14637 14639 -3 14349 14639 14351 -3 14350 14352 14640 -3 14350 14640 14638 -3 14351 14639 14641 -3 14351 14641 14353 -3 14352 14354 14642 -3 14352 14642 14640 -3 14353 14641 14355 -3 14643 14355 14641 -3 14354 14356 14644 -3 14354 14644 14642 -3 14355 14643 14357 -3 14645 14357 14643 -3 14356 14358 14646 -3 14356 14646 14644 -3 14357 14645 14359 -3 14647 14359 14645 -3 14358 14360 14648 -3 14358 14648 14646 -3 14359 14647 14361 -3 14649 14361 14647 -3 14360 14362 14650 -3 14360 14650 14648 -3 14361 14649 14363 -3 14651 14363 14649 -3 14362 14364 14652 -3 14362 14652 14650 -3 14363 14651 14365 -3 14653 14365 14651 -3 14364 14366 14654 -3 14364 14654 14652 -3 14365 14653 14367 -3 14655 14367 14653 -3 14366 14368 14656 -3 14366 14656 14654 -3 14367 14655 14369 -3 14657 14369 14655 -3 14368 14370 14658 -3 14368 14658 14656 -3 14369 14657 14371 -3 14659 14371 14657 -3 14370 14372 14658 -3 14660 14658 14372 -3 14371 14659 14373 -3 14661 14373 14659 -3 14372 14374 14660 -3 14662 14660 14374 -3 14373 14661 14375 -3 14663 14375 14661 -3 14374 14376 14662 -3 14664 14662 14376 -3 14375 14663 14377 -3 14665 14377 14663 -3 14376 14378 14664 -3 14666 14664 14378 -3 14377 14665 14379 -3 14667 14379 14665 -3 14378 14380 14666 -3 14668 14666 14380 -3 14379 14667 14381 -3 14669 14381 14667 -3 14380 14382 14668 -3 14670 14668 14382 -3 14381 14669 14383 -3 14671 14383 14669 -3 14382 14384 14670 -3 14672 14670 14384 -3 14383 14671 14385 -3 14673 14385 14671 -3 14384 14386 14672 -3 14674 14672 14386 -3 14385 14673 14387 -3 14675 14387 14673 -3 14386 14388 14674 -3 14676 14674 14388 -3 14387 14675 14389 -3 14677 14389 14675 -3 14388 14390 14676 -3 14678 14676 14390 -3 14389 14677 14391 -3 14679 14391 14677 -3 14390 14392 14678 -3 14680 14678 14392 -3 14391 14679 14393 -3 14681 14393 14679 -3 14392 14394 14680 -3 14682 14680 14394 -3 14393 14681 14395 -3 14683 14395 14681 -3 14394 14396 14682 -3 14684 14682 14396 -3 14395 14683 14397 -3 14685 14397 14683 -3 14396 14398 14684 -3 14686 14684 14398 -3 14397 14685 14399 -3 14687 14399 14685 -3 14398 14400 14686 -3 14688 14686 14400 -3 14399 14687 14401 -3 14689 14401 14687 -3 14400 14402 14688 -3 14690 14688 14402 -3 14401 14689 14403 -3 14691 14403 14689 -3 14402 14404 14690 -3 14692 14690 14404 -3 14403 14691 14405 -3 14693 14405 14691 -3 14404 14406 14692 -3 14694 14692 14406 -3 14405 14693 14407 -3 14695 14407 14693 -3 14406 14408 14694 -3 14696 14694 14408 -3 14407 14695 14409 -3 14697 14409 14695 -3 14408 14410 14696 -3 14698 14696 14410 -3 14409 14697 14411 -3 14699 14411 14697 -3 14410 14412 14698 -3 14700 14698 14412 -3 14411 14699 14413 -3 14701 14413 14699 -3 14412 14414 14700 -3 14702 14700 14414 -3 14413 14701 14415 -3 14703 14415 14701 -3 14414 14416 14702 -3 14704 14702 14416 -3 14415 14703 14417 -3 14705 14417 14703 -3 14416 14418 14704 -3 14706 14704 14418 -3 14417 14705 14419 -3 14707 14419 14705 -3 14418 14420 14706 -3 14708 14706 14420 -3 14419 14707 14421 -3 14709 14421 14707 -3 14420 14422 14708 -3 14710 14708 14422 -3 14421 14709 14423 -3 14711 14423 14709 -3 14422 14424 14710 -3 14712 14710 14424 -3 14423 14711 14425 -3 14713 14425 14711 -3 14424 14426 14712 -3 14714 14712 14426 -3 14425 14713 14427 -3 14715 14427 14713 -3 14426 14428 14714 -3 14716 14714 14428 -3 14427 14715 14717 -3 14427 14717 14429 -3 14428 14430 14716 -3 14718 14716 14430 -3 14429 14717 14719 -3 14429 14719 14431 -3 14430 14432 14718 -3 14720 14718 14432 -3 14431 14719 14723 -3 14431 14723 14435 -3 14432 14433 14720 -3 14721 14720 14433 -3 14433 14436 14721 -3 14724 14721 14436 -3 14434 14435 14722 -3 14723 14722 14435 -3 14434 14722 14727 -3 14434 14727 14439 -3 14436 14437 14724 -3 14725 14724 14437 -3 14437 14440 14725 -3 14728 14725 14440 -3 14438 14439 14726 -3 14727 14726 14439 -3 14438 14726 14796 -3 14438 14796 14508 -3 14440 14441 14728 -3 14729 14728 14441 -3 14441 14442 14729 -3 14730 14729 14442 -3 14442 14443 14730 -3 14731 14730 14443 -3 14443 14444 14731 -3 14732 14731 14444 -3 14444 14445 14733 -3 14444 14733 14732 -3 14445 14446 14734 -3 14445 14734 14733 -3 14446 14447 14735 -3 14446 14735 14734 -3 14447 14448 14736 -3 14447 14736 14735 -3 14448 14449 14737 -3 14448 14737 14736 -3 14449 14450 14738 -3 14449 14738 14737 -3 14450 14451 14739 -3 14450 14739 14738 -3 14451 14452 14740 -3 14451 14740 14739 -3 14452 14453 14741 -3 14452 14741 14740 -3 14453 14454 14742 -3 14453 14742 14741 -3 14454 14455 14743 -3 14454 14743 14742 -3 14455 14456 14744 -3 14455 14744 14743 -3 14456 14457 14745 -3 14456 14745 14744 -3 14457 14458 14746 -3 14457 14746 14745 -3 14458 14459 14747 -3 14458 14747 14746 -3 14459 14460 14748 -3 14459 14748 14747 -3 14460 14461 14749 -3 14460 14749 14748 -3 14461 14462 14750 -3 14461 14750 14749 -3 14462 14463 14751 -3 14462 14751 14750 -3 14463 14464 14752 -3 14463 14752 14751 -3 14464 14465 14753 -3 14464 14753 14752 -3 14465 14466 14754 -3 14465 14754 14753 -3 14466 14467 14755 -3 14466 14755 14754 -3 14467 14468 14756 -3 14467 14756 14755 -3 14468 14469 14757 -3 14468 14757 14756 -3 14469 14470 14758 -3 14469 14758 14757 -3 14470 14471 14759 -3 14470 14759 14758 -3 14471 14472 14760 -3 14471 14760 14759 -3 14472 14473 14761 -3 14472 14761 14760 -3 14473 14474 14762 -3 14473 14762 14761 -3 14474 14475 14763 -3 14474 14763 14762 -3 14475 14476 14764 -3 14475 14764 14763 -3 14476 14477 14765 -3 14476 14765 14764 -3 14477 14478 14766 -3 14477 14766 14765 -3 14478 14479 14767 -3 14478 14767 14766 -3 14479 14480 14768 -3 14479 14768 14767 -3 14480 14481 14769 -3 14480 14769 14768 -3 14481 14482 14770 -3 14481 14770 14769 -3 14482 14483 14771 -3 14482 14771 14770 -3 14483 14484 14772 -3 14483 14772 14771 -3 14484 14485 14773 -3 14484 14773 14772 -3 14485 14486 14774 -3 14485 14774 14773 -3 14486 14487 14775 -3 14486 14775 14774 -3 14487 14488 14776 -3 14487 14776 14775 -3 14488 14489 14777 -3 14488 14777 14776 -3 14489 14490 14778 -3 14489 14778 14777 -3 14490 14491 14779 -3 14490 14779 14778 -3 14491 14492 14780 -3 14491 14780 14779 -3 14492 14493 14781 -3 14492 14781 14780 -3 14493 14494 14782 -3 14493 14782 14781 -3 14494 14495 14783 -3 14494 14783 14782 -3 14495 14496 14784 -3 14495 14784 14783 -3 14496 14497 14785 -3 14496 14785 14784 -3 14497 14498 14786 -3 14497 14786 14785 -3 14498 14499 14787 -3 14498 14787 14786 -3 14499 14500 14788 -3 14499 14788 14787 -3 14500 14501 14789 -3 14500 14789 14788 -3 14501 14502 14790 -3 14501 14790 14789 -3 14502 14503 14791 -3 14502 14791 14790 -3 14503 14504 14792 -3 14503 14792 14791 -3 14504 14505 14793 -3 14504 14793 14792 -3 14505 14506 14794 -3 14505 14794 14793 -3 14506 14507 14795 -3 14506 14795 14794 -3 14507 14508 14796 -3 14507 14796 14795 -3 14509 14797 14510 -3 14798 14510 14797 -3 14509 14579 14797 -3 14867 14797 14579 -3 14510 14798 14511 -3 14799 14511 14798 -3 14511 14799 14512 -3 14800 14512 14799 -3 14512 14800 14513 -3 14801 14513 14800 -3 14513 14801 14514 -3 14802 14514 14801 -3 14514 14802 14515 -3 14803 14515 14802 -3 14515 14803 14516 -3 14804 14516 14803 -3 14516 14804 14517 -3 14805 14517 14804 -3 14517 14805 14518 -3 14806 14518 14805 -3 14518 14806 14519 -3 14807 14519 14806 -3 14519 14807 14520 -3 14808 14520 14807 -3 14520 14808 14521 -3 14809 14521 14808 -3 14521 14809 14522 -3 14810 14522 14809 -3 14522 14810 14523 -3 14811 14523 14810 -3 14523 14811 14524 -3 14812 14524 14811 -3 14524 14812 14525 -3 14813 14525 14812 -3 14525 14813 14526 -3 14814 14526 14813 -3 14526 14814 14527 -3 14815 14527 14814 -3 14527 14815 14528 -3 14816 14528 14815 -3 14528 14816 14529 -3 14817 14529 14816 -3 14529 14817 14530 -3 14818 14530 14817 -3 14530 14818 14531 -3 14819 14531 14818 -3 14531 14819 14532 -3 14820 14532 14819 -3 14532 14820 14533 -3 14821 14533 14820 -3 14533 14821 14534 -3 14822 14534 14821 -3 14534 14822 14535 -3 14823 14535 14822 -3 14535 14823 14536 -3 14824 14536 14823 -3 14536 14824 14537 -3 14825 14537 14824 -3 14537 14825 14538 -3 14826 14538 14825 -3 14538 14826 14539 -3 14827 14539 14826 -3 14539 14827 14540 -3 14828 14540 14827 -3 14540 14828 14541 -3 14829 14541 14828 -3 14541 14829 14542 -3 14830 14542 14829 -3 14542 14830 14543 -3 14831 14543 14830 -3 14543 14831 14544 -3 14832 14544 14831 -3 14544 14832 14545 -3 14833 14545 14832 -3 14545 14833 14546 -3 14834 14546 14833 -3 14546 14834 14547 -3 14835 14547 14834 -3 14547 14835 14548 -3 14836 14548 14835 -3 14548 14836 14549 -3 14837 14549 14836 -3 14549 14837 14550 -3 14838 14550 14837 -3 14550 14838 14551 -3 14839 14551 14838 -3 14551 14839 14552 -3 14840 14552 14839 -3 14552 14840 14553 -3 14841 14553 14840 -3 14553 14841 14554 -3 14842 14554 14841 -3 14554 14842 14555 -3 14843 14555 14842 -3 14555 14843 14556 -3 14844 14556 14843 -3 14556 14844 14557 -3 14845 14557 14844 -3 14557 14845 14558 -3 14846 14558 14845 -3 14558 14846 14559 -3 14847 14559 14846 -3 14559 14847 14560 -3 14848 14560 14847 -3 14560 14848 14561 -3 14849 14561 14848 -3 14561 14849 14562 -3 14850 14562 14849 -3 14562 14850 14563 -3 14851 14563 14850 -3 14563 14851 14564 -3 14852 14564 14851 -3 14564 14852 14565 -3 14853 14565 14852 -3 14565 14853 14566 -3 14854 14566 14853 -3 14566 14854 14567 -3 14855 14567 14854 -3 14567 14855 14568 -3 14856 14568 14855 -3 14568 14856 14569 -3 14857 14569 14856 -3 14569 14857 14570 -3 14858 14570 14857 -3 14570 14858 14571 -3 14859 14571 14858 -3 14571 14859 14572 -3 14860 14572 14859 -3 14572 14860 14573 -3 14861 14573 14860 -3 14573 14861 14574 -3 14862 14574 14861 -3 14574 14862 14575 -3 14863 14575 14862 -3 14575 14863 14576 -3 14864 14576 14863 -3 14576 14864 14865 -3 14576 14865 14577 -3 14577 14865 14868 -3 14577 14868 14580 -3 14578 14866 14867 -3 14578 14867 14579 -3 14578 14583 14866 -3 14871 14866 14583 -3 14580 14868 14869 -3 14580 14869 14581 -3 14581 14869 14872 -3 14581 14872 14584 -3 14582 14870 14871 -3 14582 14871 14583 -3 14582 14586 14870 -3 14874 14870 14586 -3 14584 14872 14873 -3 14584 14873 14585 -3 14585 14873 14875 -3 14585 14875 14587 -3 14586 14588 14874 -3 14876 14874 14588 -3 14587 14875 14877 -3 14587 14877 14589 -3 14588 14590 14876 -3 14878 14876 14590 -3 14589 14877 14879 -3 14589 14879 14591 -3 14590 14592 14878 -3 14880 14878 14592 -3 14591 14879 14881 -3 14591 14881 14593 -3 14592 14594 14882 -3 14592 14882 14880 -3 14593 14881 14883 -3 14593 14883 14595 -3 14594 14596 14884 -3 14594 14884 14882 -3 14595 14883 14885 -3 14595 14885 14597 -3 14596 14598 14886 -3 14596 14886 14884 -3 14597 14885 14887 -3 14597 14887 14599 -3 14598 14600 14888 -3 14598 14888 14886 -3 14599 14887 14889 -3 14599 14889 14601 -3 14600 14602 14890 -3 14600 14890 14888 -3 14601 14889 14891 -3 14601 14891 14603 -3 14602 14604 14892 -3 14602 14892 14890 -3 14603 14891 14893 -3 14603 14893 14605 -3 14604 14606 14894 -3 14604 14894 14892 -3 14605 14893 14895 -3 14605 14895 14607 -3 14606 14608 14896 -3 14606 14896 14894 -3 14607 14895 14897 -3 14607 14897 14609 -3 14608 14610 14898 -3 14608 14898 14896 -3 14609 14897 14899 -3 14609 14899 14611 -3 14610 14612 14900 -3 14610 14900 14898 -3 14611 14899 14901 -3 14611 14901 14613 -3 14612 14614 14902 -3 14612 14902 14900 -3 14613 14901 14903 -3 14613 14903 14615 -3 14614 14616 14904 -3 14614 14904 14902 -3 14615 14903 14905 -3 14615 14905 14617 -3 14616 14618 14906 -3 14616 14906 14904 -3 14617 14905 14907 -3 14617 14907 14619 -3 14618 14620 14908 -3 14618 14908 14906 -3 14619 14907 14909 -3 14619 14909 14621 -3 14620 14622 14910 -3 14620 14910 14908 -3 14621 14909 14911 -3 14621 14911 14623 -3 14622 14624 14912 -3 14622 14912 14910 -3 14623 14911 14913 -3 14623 14913 14625 -3 14624 14626 14914 -3 14624 14914 14912 -3 14625 14913 14915 -3 14625 14915 14627 -3 14626 14628 14916 -3 14626 14916 14914 -3 14627 14915 14917 -3 14627 14917 14629 -3 14628 14630 14918 -3 14628 14918 14916 -3 14629 14917 14919 -3 14629 14919 14631 -3 14630 14632 14920 -3 14630 14920 14918 -3 14631 14919 14921 -3 14631 14921 14633 -3 14632 14634 14922 -3 14632 14922 14920 -3 14633 14921 14923 -3 14633 14923 14635 -3 14634 14636 14924 -3 14634 14924 14922 -3 14635 14923 14925 -3 14635 14925 14637 -3 14636 14638 14926 -3 14636 14926 14924 -3 14637 14925 14927 -3 14637 14927 14639 -3 14638 14640 14928 -3 14638 14928 14926 -3 14639 14927 14929 -3 14639 14929 14641 -3 14640 14642 14930 -3 14640 14930 14928 -3 14641 14929 14931 -3 14641 14931 14643 -3 14642 14644 14932 -3 14642 14932 14930 -3 14643 14931 14933 -3 14643 14933 14645 -3 14644 14646 14934 -3 14644 14934 14932 -3 14645 14933 14935 -3 14645 14935 14647 -3 14646 14648 14936 -3 14646 14936 14934 -3 14647 14935 14937 -3 14647 14937 14649 -3 14648 14650 14938 -3 14648 14938 14936 -3 14649 14937 14939 -3 14649 14939 14651 -3 14650 14652 14940 -3 14650 14940 14938 -3 14651 14939 14653 -3 14941 14653 14939 -3 14652 14654 14942 -3 14652 14942 14940 -3 14653 14941 14655 -3 14943 14655 14941 -3 14654 14656 14944 -3 14654 14944 14942 -3 14655 14943 14657 -3 14945 14657 14943 -3 14656 14658 14946 -3 14656 14946 14944 -3 14657 14945 14659 -3 14947 14659 14945 -3 14658 14660 14948 -3 14658 14948 14946 -3 14659 14947 14661 -3 14949 14661 14947 -3 14660 14662 14950 -3 14660 14950 14948 -3 14661 14949 14663 -3 14951 14663 14949 -3 14662 14664 14952 -3 14662 14952 14950 -3 14663 14951 14665 -3 14953 14665 14951 -3 14664 14666 14954 -3 14664 14954 14952 -3 14665 14953 14667 -3 14955 14667 14953 -3 14666 14668 14954 -3 14956 14954 14668 -3 14667 14955 14669 -3 14957 14669 14955 -3 14668 14670 14956 -3 14958 14956 14670 -3 14669 14957 14671 -3 14959 14671 14957 -3 14670 14672 14958 -3 14960 14958 14672 -3 14671 14959 14673 -3 14961 14673 14959 -3 14672 14674 14960 -3 14962 14960 14674 -3 14673 14961 14675 -3 14963 14675 14961 -3 14674 14676 14962 -3 14964 14962 14676 -3 14675 14963 14677 -3 14965 14677 14963 -3 14676 14678 14964 -3 14966 14964 14678 -3 14677 14965 14679 -3 14967 14679 14965 -3 14678 14680 14966 -3 14968 14966 14680 -3 14679 14967 14681 -3 14969 14681 14967 -3 14680 14682 14968 -3 14970 14968 14682 -3 14681 14969 14683 -3 14971 14683 14969 -3 14682 14684 14970 -3 14972 14970 14684 -3 14683 14971 14685 -3 14973 14685 14971 -3 14684 14686 14972 -3 14974 14972 14686 -3 14685 14973 14687 -3 14975 14687 14973 -3 14686 14688 14974 -3 14976 14974 14688 -3 14687 14975 14689 -3 14977 14689 14975 -3 14688 14690 14976 -3 14978 14976 14690 -3 14689 14977 14691 -3 14979 14691 14977 -3 14690 14692 14978 -3 14980 14978 14692 -3 14691 14979 14693 -3 14981 14693 14979 -3 14692 14694 14980 -3 14982 14980 14694 -3 14693 14981 14695 -3 14983 14695 14981 -3 14694 14696 14982 -3 14984 14982 14696 -3 14695 14983 14697 -3 14985 14697 14983 -3 14696 14698 14984 -3 14986 14984 14698 -3 14697 14985 14699 -3 14987 14699 14985 -3 14698 14700 14986 -3 14988 14986 14700 -3 14699 14987 14701 -3 14989 14701 14987 -3 14700 14702 14988 -3 14990 14988 14702 -3 14701 14989 14703 -3 14991 14703 14989 -3 14702 14704 14990 -3 14992 14990 14704 -3 14703 14991 14705 -3 14993 14705 14991 -3 14704 14706 14992 -3 14994 14992 14706 -3 14705 14993 14707 -3 14995 14707 14993 -3 14706 14708 14994 -3 14996 14994 14708 -3 14707 14995 14709 -3 14997 14709 14995 -3 14708 14710 14996 -3 14998 14996 14710 -3 14709 14997 14711 -3 14999 14711 14997 -3 14710 14712 14998 -3 15000 14998 14712 -3 14711 14999 14713 -3 15001 14713 14999 -3 14712 14714 15000 -3 15002 15000 14714 -3 14713 15001 14715 -3 15003 14715 15001 -3 14714 14716 15002 -3 15004 15002 14716 -3 14715 15003 14717 -3 15005 14717 15003 -3 14716 14718 15004 -3 15006 15004 14718 -3 14717 15005 14719 -3 15007 14719 15005 -3 14718 14720 15006 -3 15008 15006 14720 -3 14719 15007 14723 -3 15011 14723 15007 -3 14720 14721 15008 -3 15009 15008 14721 -3 14721 14724 15009 -3 15012 15009 14724 -3 14722 14723 15010 -3 15011 15010 14723 -3 14722 15010 14727 -3 15015 14727 15010 -3 14724 14725 15012 -3 15013 15012 14725 -3 14725 14728 15013 -3 15016 15013 14728 -3 14726 14727 15014 -3 15015 15014 14727 -3 14726 15014 15084 -3 14726 15084 14796 -3 14728 14729 15016 -3 15017 15016 14729 -3 14729 14730 15017 -3 15018 15017 14730 -3 14730 14731 15018 -3 15019 15018 14731 -3 14731 14732 15019 -3 15020 15019 14732 -3 14732 14733 15020 -3 15021 15020 14733 -3 14733 14734 15021 -3 15022 15021 14734 -3 14734 14735 15022 -3 15023 15022 14735 -3 14735 14736 15023 -3 15024 15023 14736 -3 14736 14737 15024 -3 15025 15024 14737 -3 14737 14738 15025 -3 15026 15025 14738 -3 14738 14739 15026 -3 15027 15026 14739 -3 14739 14740 15027 -3 15028 15027 14740 -3 14740 14741 15028 -3 15029 15028 14741 -3 14741 14742 15030 -3 14741 15030 15029 -3 14742 14743 15031 -3 14742 15031 15030 -3 14743 14744 15032 -3 14743 15032 15031 -3 14744 14745 15033 -3 14744 15033 15032 -3 14745 14746 15034 -3 14745 15034 15033 -3 14746 14747 15035 -3 14746 15035 15034 -3 14747 14748 15036 -3 14747 15036 15035 -3 14748 14749 15037 -3 14748 15037 15036 -3 14749 14750 15038 -3 14749 15038 15037 -3 14750 14751 15039 -3 14750 15039 15038 -3 14751 14752 15040 -3 14751 15040 15039 -3 14752 14753 15041 -3 14752 15041 15040 -3 14753 14754 15042 -3 14753 15042 15041 -3 14754 14755 15043 -3 14754 15043 15042 -3 14755 14756 15044 -3 14755 15044 15043 -3 14756 14757 15045 -3 14756 15045 15044 -3 14757 14758 15046 -3 14757 15046 15045 -3 14758 14759 15047 -3 14758 15047 15046 -3 14759 14760 15048 -3 14759 15048 15047 -3 14760 14761 15049 -3 14760 15049 15048 -3 14761 14762 15050 -3 14761 15050 15049 -3 14762 14763 15051 -3 14762 15051 15050 -3 14763 14764 15052 -3 14763 15052 15051 -3 14764 14765 15053 -3 14764 15053 15052 -3 14765 14766 15054 -3 14765 15054 15053 -3 14766 14767 15055 -3 14766 15055 15054 -3 14767 14768 15056 -3 14767 15056 15055 -3 14768 14769 15057 -3 14768 15057 15056 -3 14769 14770 15058 -3 14769 15058 15057 -3 14770 14771 15059 -3 14770 15059 15058 -3 14771 14772 15060 -3 14771 15060 15059 -3 14772 14773 15061 -3 14772 15061 15060 -3 14773 14774 15062 -3 14773 15062 15061 -3 14774 14775 15063 -3 14774 15063 15062 -3 14775 14776 15064 -3 14775 15064 15063 -3 14776 14777 15065 -3 14776 15065 15064 -3 14777 14778 15066 -3 14777 15066 15065 -3 14778 14779 15067 -3 14778 15067 15066 -3 14779 14780 15068 -3 14779 15068 15067 -3 14780 14781 15069 -3 14780 15069 15068 -3 14781 14782 15070 -3 14781 15070 15069 -3 14782 14783 15071 -3 14782 15071 15070 -3 14783 14784 15072 -3 14783 15072 15071 -3 14784 14785 15073 -3 14784 15073 15072 -3 14785 14786 15074 -3 14785 15074 15073 -3 14786 14787 15075 -3 14786 15075 15074 -3 14787 14788 15076 -3 14787 15076 15075 -3 14788 14789 15077 -3 14788 15077 15076 -3 14789 14790 15078 -3 14789 15078 15077 -3 14790 14791 15079 -3 14790 15079 15078 -3 14791 14792 15080 -3 14791 15080 15079 -3 14792 14793 15081 -3 14792 15081 15080 -3 14793 14794 15082 -3 14793 15082 15081 -3 14794 14795 15083 -3 14794 15083 15082 -3 14795 14796 15084 -3 14795 15084 15083 -3 14797 15085 15086 -3 14797 15086 14798 -3 14797 14867 15085 -3 15155 15085 14867 -3 14798 15086 15087 -3 14798 15087 14799 -3 14799 15087 15088 -3 14799 15088 14800 -3 14800 15088 15089 -3 14800 15089 14801 -3 14801 15089 14802 -3 15090 14802 15089 -3 14802 15090 14803 -3 15091 14803 15090 -3 14803 15091 14804 -3 15092 14804 15091 -3 14804 15092 14805 -3 15093 14805 15092 -3 14805 15093 14806 -3 15094 14806 15093 -3 14806 15094 14807 -3 15095 14807 15094 -3 14807 15095 14808 -3 15096 14808 15095 -3 14808 15096 14809 -3 15097 14809 15096 -3 14809 15097 14810 -3 15098 14810 15097 -3 14810 15098 14811 -3 15099 14811 15098 -3 14811 15099 14812 -3 15100 14812 15099 -3 14812 15100 14813 -3 15101 14813 15100 -3 14813 15101 14814 -3 15102 14814 15101 -3 14814 15102 14815 -3 15103 14815 15102 -3 14815 15103 14816 -3 15104 14816 15103 -3 14816 15104 14817 -3 15105 14817 15104 -3 14817 15105 14818 -3 15106 14818 15105 -3 14818 15106 14819 -3 15107 14819 15106 -3 14819 15107 14820 -3 15108 14820 15107 -3 14820 15108 14821 -3 15109 14821 15108 -3 14821 15109 14822 -3 15110 14822 15109 -3 14822 15110 14823 -3 15111 14823 15110 -3 14823 15111 14824 -3 15112 14824 15111 -3 14824 15112 14825 -3 15113 14825 15112 -3 14825 15113 14826 -3 15114 14826 15113 -3 14826 15114 14827 -3 15115 14827 15114 -3 14827 15115 14828 -3 15116 14828 15115 -3 14828 15116 14829 -3 15117 14829 15116 -3 14829 15117 14830 -3 15118 14830 15117 -3 14830 15118 14831 -3 15119 14831 15118 -3 14831 15119 14832 -3 15120 14832 15119 -3 14832 15120 14833 -3 15121 14833 15120 -3 14833 15121 14834 -3 15122 14834 15121 -3 14834 15122 14835 -3 15123 14835 15122 -3 14835 15123 14836 -3 15124 14836 15123 -3 14836 15124 14837 -3 15125 14837 15124 -3 14837 15125 14838 -3 15126 14838 15125 -3 14838 15126 14839 -3 15127 14839 15126 -3 14839 15127 14840 -3 15128 14840 15127 -3 14840 15128 14841 -3 15129 14841 15128 -3 14841 15129 14842 -3 15130 14842 15129 -3 14842 15130 14843 -3 15131 14843 15130 -3 14843 15131 14844 -3 15132 14844 15131 -3 14844 15132 14845 -3 15133 14845 15132 -3 14845 15133 14846 -3 15134 14846 15133 -3 14846 15134 14847 -3 15135 14847 15134 -3 14847 15135 14848 -3 15136 14848 15135 -3 14848 15136 14849 -3 15137 14849 15136 -3 14849 15137 14850 -3 15138 14850 15137 -3 14850 15138 14851 -3 15139 14851 15138 -3 14851 15139 14852 -3 15140 14852 15139 -3 14852 15140 14853 -3 15141 14853 15140 -3 14853 15141 14854 -3 15142 14854 15141 -3 14854 15142 14855 -3 15143 14855 15142 -3 14855 15143 14856 -3 15144 14856 15143 -3 14856 15144 14857 -3 15145 14857 15144 -3 14857 15145 14858 -3 15146 14858 15145 -3 14858 15146 14859 -3 15147 14859 15146 -3 14859 15147 14860 -3 15148 14860 15147 -3 14860 15148 14861 -3 15149 14861 15148 -3 14861 15149 14862 -3 15150 14862 15149 -3 14862 15150 14863 -3 15151 14863 15150 -3 14863 15151 14864 -3 15152 14864 15151 -3 14864 15152 14865 -3 15153 14865 15152 -3 14865 15153 14868 -3 15156 14868 15153 -3 14866 15154 14867 -3 15155 14867 15154 -3 14866 14871 15154 -3 15159 15154 14871 -3 14868 15156 14869 -3 15157 14869 15156 -3 14869 15157 14872 -3 15160 14872 15157 -3 14870 15158 14871 -3 15159 14871 15158 -3 14870 14874 15158 -3 15162 15158 14874 -3 14872 15160 14873 -3 15161 14873 15160 -3 14873 15161 14875 -3 15163 14875 15161 -3 14874 14876 15162 -3 15164 15162 14876 -3 14875 15163 14877 -3 15165 14877 15163 -3 14876 14878 15164 -3 15166 15164 14878 -3 14877 15165 15167 -3 14877 15167 14879 -3 14878 14880 15166 -3 15168 15166 14880 -3 14879 15167 15169 -3 14879 15169 14881 -3 14880 14882 15168 -3 15170 15168 14882 -3 14881 15169 15171 -3 14881 15171 14883 -3 14882 14884 15170 -3 15172 15170 14884 -3 14883 15171 15173 -3 14883 15173 14885 -3 14884 14886 15172 -3 15174 15172 14886 -3 14885 15173 15175 -3 14885 15175 14887 -3 14886 14888 15174 -3 15176 15174 14888 -3 14887 15175 15177 -3 14887 15177 14889 -3 14888 14890 15176 -3 15178 15176 14890 -3 14889 15177 15179 -3 14889 15179 14891 -3 14890 14892 15180 -3 14890 15180 15178 -3 14891 15179 15181 -3 14891 15181 14893 -3 14892 14894 15182 -3 14892 15182 15180 -3 14893 15181 15183 -3 14893 15183 14895 -3 14894 14896 15184 -3 14894 15184 15182 -3 14895 15183 15185 -3 14895 15185 14897 -3 14896 14898 15186 -3 14896 15186 15184 -3 14897 15185 15187 -3 14897 15187 14899 -3 14898 14900 15188 -3 14898 15188 15186 -3 14899 15187 15189 -3 14899 15189 14901 -3 14900 14902 15190 -3 14900 15190 15188 -3 14901 15189 15191 -3 14901 15191 14903 -3 14902 14904 15192 -3 14902 15192 15190 -3 14903 15191 15193 -3 14903 15193 14905 -3 14904 14906 15194 -3 14904 15194 15192 -3 14905 15193 15195 -3 14905 15195 14907 -3 14906 14908 15196 -3 14906 15196 15194 -3 14907 15195 15197 -3 14907 15197 14909 -3 14908 14910 15198 -3 14908 15198 15196 -3 14909 15197 15199 -3 14909 15199 14911 -3 14910 14912 15200 -3 14910 15200 15198 -3 14911 15199 15201 -3 14911 15201 14913 -3 14912 14914 15202 -3 14912 15202 15200 -3 14913 15201 15203 -3 14913 15203 14915 -3 14914 14916 15204 -3 14914 15204 15202 -3 14915 15203 15205 -3 14915 15205 14917 -3 14916 14918 15206 -3 14916 15206 15204 -3 14917 15205 15207 -3 14917 15207 14919 -3 14918 14920 15208 -3 14918 15208 15206 -3 14919 15207 15209 -3 14919 15209 14921 -3 14920 14922 15210 -3 14920 15210 15208 -3 14921 15209 15211 -3 14921 15211 14923 -3 14922 14924 15212 -3 14922 15212 15210 -3 14923 15211 15213 -3 14923 15213 14925 -3 14924 14926 15214 -3 14924 15214 15212 -3 14925 15213 15215 -3 14925 15215 14927 -3 14926 14928 15216 -3 14926 15216 15214 -3 14927 15215 15217 -3 14927 15217 14929 -3 14928 14930 15218 -3 14928 15218 15216 -3 14929 15217 15219 -3 14929 15219 14931 -3 14930 14932 15220 -3 14930 15220 15218 -3 14931 15219 15221 -3 14931 15221 14933 -3 14932 14934 15222 -3 14932 15222 15220 -3 14933 15221 15223 -3 14933 15223 14935 -3 14934 14936 15224 -3 14934 15224 15222 -3 14935 15223 15225 -3 14935 15225 14937 -3 14936 14938 15226 -3 14936 15226 15224 -3 14937 15225 15227 -3 14937 15227 14939 -3 14938 14940 15228 -3 14938 15228 15226 -3 14939 15227 15229 -3 14939 15229 14941 -3 14940 14942 15230 -3 14940 15230 15228 -3 14941 15229 15231 -3 14941 15231 14943 -3 14942 14944 15232 -3 14942 15232 15230 -3 14943 15231 15233 -3 14943 15233 14945 -3 14944 14946 15234 -3 14944 15234 15232 -3 14945 15233 15235 -3 14945 15235 14947 -3 14946 14948 15236 -3 14946 15236 15234 -3 14947 15235 15237 -3 14947 15237 14949 -3 14948 14950 15238 -3 14948 15238 15236 -3 14949 15237 15239 -3 14949 15239 14951 -3 14950 14952 15240 -3 14950 15240 15238 -3 14951 15239 15241 -3 14951 15241 14953 -3 14952 14954 15242 -3 14952 15242 15240 -3 14953 15241 14955 -3 15243 14955 15241 -3 14954 14956 15244 -3 14954 15244 15242 -3 14955 15243 14957 -3 15245 14957 15243 -3 14956 14958 15246 -3 14956 15246 15244 -3 14957 15245 14959 -3 15247 14959 15245 -3 14958 14960 15248 -3 14958 15248 15246 -3 14959 15247 14961 -3 15249 14961 15247 -3 14960 14962 15250 -3 14960 15250 15248 -3 14961 15249 14963 -3 15251 14963 15249 -3 14962 14964 15252 -3 14962 15252 15250 -3 14963 15251 14965 -3 15253 14965 15251 -3 14964 14966 15254 -3 14964 15254 15252 -3 14965 15253 14967 -3 15255 14967 15253 -3 14966 14968 15254 -3 15256 15254 14968 -3 14967 15255 14969 -3 15257 14969 15255 -3 14968 14970 15256 -3 15258 15256 14970 -3 14969 15257 14971 -3 15259 14971 15257 -3 14970 14972 15258 -3 15260 15258 14972 -3 14971 15259 14973 -3 15261 14973 15259 -3 14972 14974 15260 -3 15262 15260 14974 -3 14973 15261 14975 -3 15263 14975 15261 -3 14974 14976 15262 -3 15264 15262 14976 -3 14975 15263 14977 -3 15265 14977 15263 -3 14976 14978 15264 -3 15266 15264 14978 -3 14977 15265 14979 -3 15267 14979 15265 -3 14978 14980 15266 -3 15268 15266 14980 -3 14979 15267 14981 -3 15269 14981 15267 -3 14980 14982 15268 -3 15270 15268 14982 -3 14981 15269 14983 -3 15271 14983 15269 -3 14982 14984 15270 -3 15272 15270 14984 -3 14983 15271 14985 -3 15273 14985 15271 -3 14984 14986 15272 -3 15274 15272 14986 -3 14985 15273 14987 -3 15275 14987 15273 -3 14986 14988 15274 -3 15276 15274 14988 -3 14987 15275 14989 -3 15277 14989 15275 -3 14988 14990 15276 -3 15278 15276 14990 -3 14989 15277 14991 -3 15279 14991 15277 -3 14990 14992 15278 -3 15280 15278 14992 -3 14991 15279 14993 -3 15281 14993 15279 -3 14992 14994 15280 -3 15282 15280 14994 -3 14993 15281 14995 -3 15283 14995 15281 -3 14994 14996 15282 -3 15284 15282 14996 -3 14995 15283 14997 -3 15285 14997 15283 -3 14996 14998 15284 -3 15286 15284 14998 -3 14997 15285 14999 -3 15287 14999 15285 -3 14998 15000 15286 -3 15288 15286 15000 -3 14999 15287 15001 -3 15289 15001 15287 -3 15000 15002 15288 -3 15290 15288 15002 -3 15001 15289 15003 -3 15291 15003 15289 -3 15002 15004 15290 -3 15292 15290 15004 -3 15003 15291 15005 -3 15293 15005 15291 -3 15004 15006 15292 -3 15294 15292 15006 -3 15005 15293 15007 -3 15295 15007 15293 -3 15006 15008 15294 -3 15296 15294 15008 -3 15007 15295 15011 -3 15299 15011 15295 -3 15008 15009 15296 -3 15297 15296 15009 -3 15009 15012 15297 -3 15300 15297 15012 -3 15010 15011 15298 -3 15299 15298 15011 -3 15010 15298 15015 -3 15303 15015 15298 -3 15012 15013 15300 -3 15301 15300 15013 -3 15013 15016 15301 -3 15304 15301 15016 -3 15014 15015 15302 -3 15303 15302 15015 -3 15014 15302 15084 -3 15372 15084 15302 -3 15016 15017 15304 -3 15305 15304 15017 -3 15017 15018 15305 -3 15306 15305 15018 -3 15018 15019 15306 -3 15307 15306 15019 -3 15019 15020 15307 -3 15308 15307 15020 -3 15020 15021 15308 -3 15309 15308 15021 -3 15021 15022 15309 -3 15310 15309 15022 -3 15022 15023 15310 -3 15311 15310 15023 -3 15023 15024 15311 -3 15312 15311 15024 -3 15024 15025 15312 -3 15313 15312 15025 -3 15025 15026 15313 -3 15314 15313 15026 -3 15026 15027 15314 -3 15315 15314 15027 -3 15027 15028 15315 -3 15316 15315 15028 -3 15028 15029 15316 -3 15317 15316 15029 -3 15029 15030 15317 -3 15318 15317 15030 -3 15030 15031 15318 -3 15319 15318 15031 -3 15031 15032 15319 -3 15320 15319 15032 -3 15032 15033 15320 -3 15321 15320 15033 -3 15033 15034 15321 -3 15322 15321 15034 -3 15034 15035 15322 -3 15323 15322 15035 -3 15035 15036 15323 -3 15324 15323 15036 -3 15036 15037 15324 -3 15325 15324 15037 -3 15037 15038 15325 -3 15326 15325 15038 -3 15038 15039 15326 -3 15327 15326 15039 -3 15039 15040 15327 -3 15328 15327 15040 -3 15040 15041 15328 -3 15329 15328 15041 -3 15041 15042 15329 -3 15330 15329 15042 -3 15042 15043 15331 -3 15042 15331 15330 -3 15043 15044 15332 -3 15043 15332 15331 -3 15044 15045 15333 -3 15044 15333 15332 -3 15045 15046 15334 -3 15045 15334 15333 -3 15046 15047 15335 -3 15046 15335 15334 -3 15047 15048 15336 -3 15047 15336 15335 -3 15048 15049 15337 -3 15048 15337 15336 -3 15049 15050 15338 -3 15049 15338 15337 -3 15050 15051 15339 -3 15050 15339 15338 -3 15051 15052 15340 -3 15051 15340 15339 -3 15052 15053 15341 -3 15052 15341 15340 -3 15053 15054 15342 -3 15053 15342 15341 -3 15054 15055 15343 -3 15054 15343 15342 -3 15055 15056 15344 -3 15055 15344 15343 -3 15056 15057 15345 -3 15056 15345 15344 -3 15057 15058 15346 -3 15057 15346 15345 -3 15058 15059 15347 -3 15058 15347 15346 -3 15059 15060 15348 -3 15059 15348 15347 -3 15060 15061 15349 -3 15060 15349 15348 -3 15061 15062 15350 -3 15061 15350 15349 -3 15062 15063 15351 -3 15062 15351 15350 -3 15063 15064 15352 -3 15063 15352 15351 -3 15064 15065 15353 -3 15064 15353 15352 -3 15065 15066 15354 -3 15065 15354 15353 -3 15066 15067 15355 -3 15066 15355 15354 -3 15067 15068 15356 -3 15067 15356 15355 -3 15068 15069 15357 -3 15068 15357 15356 -3 15069 15070 15358 -3 15069 15358 15357 -3 15070 15071 15359 -3 15070 15359 15358 -3 15071 15072 15360 -3 15071 15360 15359 -3 15072 15073 15361 -3 15072 15361 15360 -3 15073 15074 15362 -3 15073 15362 15361 -3 15074 15075 15363 -3 15074 15363 15362 -3 15075 15076 15364 -3 15075 15364 15363 -3 15076 15077 15365 -3 15076 15365 15364 -3 15077 15078 15366 -3 15077 15366 15365 -3 15078 15079 15367 -3 15078 15367 15366 -3 15079 15080 15368 -3 15079 15368 15367 -3 15080 15081 15369 -3 15080 15369 15368 -3 15081 15082 15370 -3 15081 15370 15369 -3 15082 15083 15371 -3 15082 15371 15370 -3 15083 15084 15372 -3 15083 15372 15371 -3 15085 15373 15374 -3 15085 15374 15086 -3 15085 15155 15373 -3 15443 15373 15155 -3 15086 15374 15375 -3 15086 15375 15087 -3 15087 15375 15376 -3 15087 15376 15088 -3 15088 15376 15377 -3 15088 15377 15089 -3 15089 15377 15378 -3 15089 15378 15090 -3 15090 15378 15379 -3 15090 15379 15091 -3 15091 15379 15380 -3 15091 15380 15092 -3 15092 15380 15381 -3 15092 15381 15093 -3 15093 15381 15382 -3 15093 15382 15094 -3 15094 15382 15383 -3 15094 15383 15095 -3 15095 15383 15384 -3 15095 15384 15096 -3 15096 15384 15385 -3 15096 15385 15097 -3 15097 15385 15386 -3 15097 15386 15098 -3 15098 15386 15387 -3 15098 15387 15099 -3 15099 15387 15388 -3 15099 15388 15100 -3 15100 15388 15389 -3 15100 15389 15101 -3 15101 15389 15390 -3 15101 15390 15102 -3 15102 15390 15391 -3 15102 15391 15103 -3 15103 15391 15392 -3 15103 15392 15104 -3 15104 15392 15105 -3 15393 15105 15392 -3 15105 15393 15106 -3 15394 15106 15393 -3 15106 15394 15107 -3 15395 15107 15394 -3 15107 15395 15108 -3 15396 15108 15395 -3 15108 15396 15109 -3 15397 15109 15396 -3 15109 15397 15110 -3 15398 15110 15397 -3 15110 15398 15111 -3 15399 15111 15398 -3 15111 15399 15112 -3 15400 15112 15399 -3 15112 15400 15113 -3 15401 15113 15400 -3 15113 15401 15114 -3 15402 15114 15401 -3 15114 15402 15115 -3 15403 15115 15402 -3 15115 15403 15116 -3 15404 15116 15403 -3 15116 15404 15117 -3 15405 15117 15404 -3 15117 15405 15118 -3 15406 15118 15405 -3 15118 15406 15119 -3 15407 15119 15406 -3 15119 15407 15120 -3 15408 15120 15407 -3 15120 15408 15121 -3 15409 15121 15408 -3 15121 15409 15122 -3 15410 15122 15409 -3 15122 15410 15123 -3 15411 15123 15410 -3 15123 15411 15124 -3 15412 15124 15411 -3 15124 15412 15125 -3 15413 15125 15412 -3 15125 15413 15126 -3 15414 15126 15413 -3 15126 15414 15127 -3 15415 15127 15414 -3 15127 15415 15128 -3 15416 15128 15415 -3 15128 15416 15129 -3 15417 15129 15416 -3 15129 15417 15130 -3 15418 15130 15417 -3 15130 15418 15131 -3 15419 15131 15418 -3 15131 15419 15132 -3 15420 15132 15419 -3 15132 15420 15133 -3 15421 15133 15420 -3 15133 15421 15134 -3 15422 15134 15421 -3 15134 15422 15135 -3 15423 15135 15422 -3 15135 15423 15136 -3 15424 15136 15423 -3 15136 15424 15137 -3 15425 15137 15424 -3 15137 15425 15138 -3 15426 15138 15425 -3 15138 15426 15139 -3 15427 15139 15426 -3 15139 15427 15140 -3 15428 15140 15427 -3 15140 15428 15141 -3 15429 15141 15428 -3 15141 15429 15142 -3 15430 15142 15429 -3 15142 15430 15143 -3 15431 15143 15430 -3 15143 15431 15144 -3 15432 15144 15431 -3 15144 15432 15145 -3 15433 15145 15432 -3 15145 15433 15146 -3 15434 15146 15433 -3 15146 15434 15147 -3 15435 15147 15434 -3 15147 15435 15148 -3 15436 15148 15435 -3 15148 15436 15149 -3 15437 15149 15436 -3 15149 15437 15150 -3 15438 15150 15437 -3 15150 15438 15151 -3 15439 15151 15438 -3 15151 15439 15152 -3 15440 15152 15439 -3 15152 15440 15153 -3 15441 15153 15440 -3 15153 15441 15156 -3 15444 15156 15441 -3 15154 15442 15155 -3 15443 15155 15442 -3 15154 15159 15442 -3 15447 15442 15159 -3 15156 15444 15157 -3 15445 15157 15444 -3 15157 15445 15160 -3 15448 15160 15445 -3 15158 15446 15159 -3 15447 15159 15446 -3 15158 15162 15446 -3 15450 15446 15162 -3 15160 15448 15161 -3 15449 15161 15448 -3 15161 15449 15163 -3 15451 15163 15449 -3 15162 15164 15450 -3 15452 15450 15164 -3 15163 15451 15165 -3 15453 15165 15451 -3 15164 15166 15452 -3 15454 15452 15166 -3 15165 15453 15167 -3 15455 15167 15453 -3 15166 15168 15454 -3 15456 15454 15168 -3 15167 15455 15169 -3 15457 15169 15455 -3 15168 15170 15456 -3 15458 15456 15170 -3 15169 15457 15171 -3 15459 15171 15457 -3 15170 15172 15458 -3 15460 15458 15172 -3 15171 15459 15173 -3 15461 15173 15459 -3 15172 15174 15460 -3 15462 15460 15174 -3 15173 15461 15175 -3 15463 15175 15461 -3 15174 15176 15462 -3 15464 15462 15176 -3 15175 15463 15177 -3 15465 15177 15463 -3 15176 15178 15464 -3 15466 15464 15178 -3 15177 15465 15179 -3 15467 15179 15465 -3 15178 15180 15466 -3 15468 15466 15180 -3 15179 15467 15181 -3 15469 15181 15467 -3 15180 15182 15468 -3 15470 15468 15182 -3 15181 15469 15471 -3 15181 15471 15183 -3 15182 15184 15470 -3 15472 15470 15184 -3 15183 15471 15473 -3 15183 15473 15185 -3 15184 15186 15472 -3 15474 15472 15186 -3 15185 15473 15475 -3 15185 15475 15187 -3 15186 15188 15474 -3 15476 15474 15188 -3 15187 15475 15477 -3 15187 15477 15189 -3 15188 15190 15476 -3 15478 15476 15190 -3 15189 15477 15479 -3 15189 15479 15191 -3 15190 15192 15478 -3 15480 15478 15192 -3 15191 15479 15481 -3 15191 15481 15193 -3 15192 15194 15482 -3 15192 15482 15480 -3 15193 15481 15483 -3 15193 15483 15195 -3 15194 15196 15484 -3 15194 15484 15482 -3 15195 15483 15485 -3 15195 15485 15197 -3 15196 15198 15486 -3 15196 15486 15484 -3 15197 15485 15487 -3 15197 15487 15199 -3 15198 15200 15488 -3 15198 15488 15486 -3 15199 15487 15489 -3 15199 15489 15201 -3 15200 15202 15490 -3 15200 15490 15488 -3 15201 15489 15491 -3 15201 15491 15203 -3 15202 15204 15492 -3 15202 15492 15490 -3 15203 15491 15493 -3 15203 15493 15205 -3 15204 15206 15494 -3 15204 15494 15492 -3 15205 15493 15495 -3 15205 15495 15207 -3 15206 15208 15496 -3 15206 15496 15494 -3 15207 15495 15497 -3 15207 15497 15209 -3 15208 15210 15498 -3 15208 15498 15496 -3 15209 15497 15499 -3 15209 15499 15211 -3 15210 15212 15500 -3 15210 15500 15498 -3 15211 15499 15501 -3 15211 15501 15213 -3 15212 15214 15502 -3 15212 15502 15500 -3 15213 15501 15503 -3 15213 15503 15215 -3 15214 15216 15504 -3 15214 15504 15502 -3 15215 15503 15505 -3 15215 15505 15217 -3 15216 15218 15506 -3 15216 15506 15504 -3 15217 15505 15507 -3 15217 15507 15219 -3 15218 15220 15508 -3 15218 15508 15506 -3 15219 15507 15509 -3 15219 15509 15221 -3 15220 15222 15510 -3 15220 15510 15508 -3 15221 15509 15511 -3 15221 15511 15223 -3 15222 15224 15512 -3 15222 15512 15510 -3 15223 15511 15513 -3 15223 15513 15225 -3 15224 15226 15514 -3 15224 15514 15512 -3 15225 15513 15515 -3 15225 15515 15227 -3 15226 15228 15516 -3 15226 15516 15514 -3 15227 15515 15517 -3 15227 15517 15229 -3 15228 15230 15518 -3 15228 15518 15516 -3 15229 15517 15519 -3 15229 15519 15231 -3 15230 15232 15520 -3 15230 15520 15518 -3 15231 15519 15521 -3 15231 15521 15233 -3 15232 15234 15522 -3 15232 15522 15520 -3 15233 15521 15523 -3 15233 15523 15235 -3 15234 15236 15524 -3 15234 15524 15522 -3 15235 15523 15525 -3 15235 15525 15237 -3 15236 15238 15526 -3 15236 15526 15524 -3 15237 15525 15527 -3 15237 15527 15239 -3 15238 15240 15528 -3 15238 15528 15526 -3 15239 15527 15529 -3 15239 15529 15241 -3 15240 15242 15530 -3 15240 15530 15528 -3 15241 15529 15531 -3 15241 15531 15243 -3 15242 15244 15532 -3 15242 15532 15530 -3 15243 15531 15533 -3 15243 15533 15245 -3 15244 15246 15534 -3 15244 15534 15532 -3 15245 15533 15535 -3 15245 15535 15247 -3 15246 15248 15536 -3 15246 15536 15534 -3 15247 15535 15537 -3 15247 15537 15249 -3 15248 15250 15538 -3 15248 15538 15536 -3 15249 15537 15539 -3 15249 15539 15251 -3 15250 15252 15540 -3 15250 15540 15538 -3 15251 15539 15541 -3 15251 15541 15253 -3 15252 15254 15542 -3 15252 15542 15540 -3 15253 15541 15543 -3 15253 15543 15255 -3 15254 15256 15544 -3 15254 15544 15542 -3 15255 15543 15545 -3 15255 15545 15257 -3 15256 15258 15546 -3 15256 15546 15544 -3 15257 15545 15259 -3 15547 15259 15545 -3 15258 15260 15548 -3 15258 15548 15546 -3 15259 15547 15261 -3 15549 15261 15547 -3 15260 15262 15550 -3 15260 15550 15548 -3 15261 15549 15263 -3 15551 15263 15549 -3 15262 15264 15552 -3 15262 15552 15550 -3 15263 15551 15265 -3 15553 15265 15551 -3 15264 15266 15554 -3 15264 15554 15552 -3 15265 15553 15267 -3 15555 15267 15553 -3 15266 15268 15556 -3 15266 15556 15554 -3 15267 15555 15269 -3 15557 15269 15555 -3 15268 15270 15556 -3 15558 15556 15270 -3 15269 15557 15271 -3 15559 15271 15557 -3 15270 15272 15558 -3 15560 15558 15272 -3 15271 15559 15273 -3 15561 15273 15559 -3 15272 15274 15560 -3 15562 15560 15274 -3 15273 15561 15275 -3 15563 15275 15561 -3 15274 15276 15562 -3 15564 15562 15276 -3 15275 15563 15277 -3 15565 15277 15563 -3 15276 15278 15564 -3 15566 15564 15278 -3 15277 15565 15279 -3 15567 15279 15565 -3 15278 15280 15566 -3 15568 15566 15280 -3 15279 15567 15281 -3 15569 15281 15567 -3 15280 15282 15568 -3 15570 15568 15282 -3 15281 15569 15283 -3 15571 15283 15569 -3 15282 15284 15570 -3 15572 15570 15284 -3 15283 15571 15285 -3 15573 15285 15571 -3 15284 15286 15572 -3 15574 15572 15286 -3 15285 15573 15287 -3 15575 15287 15573 -3 15286 15288 15574 -3 15576 15574 15288 -3 15287 15575 15289 -3 15577 15289 15575 -3 15288 15290 15576 -3 15578 15576 15290 -3 15289 15577 15291 -3 15579 15291 15577 -3 15290 15292 15578 -3 15580 15578 15292 -3 15291 15579 15293 -3 15581 15293 15579 -3 15292 15294 15580 -3 15582 15580 15294 -3 15293 15581 15295 -3 15583 15295 15581 -3 15294 15296 15582 -3 15584 15582 15296 -3 15295 15583 15299 -3 15587 15299 15583 -3 15296 15297 15584 -3 15585 15584 15297 -3 15297 15300 15585 -3 15588 15585 15300 -3 15298 15299 15586 -3 15587 15586 15299 -3 15298 15586 15303 -3 15591 15303 15586 -3 15300 15301 15588 -3 15589 15588 15301 -3 15301 15304 15589 -3 15592 15589 15304 -3 15302 15303 15590 -3 15591 15590 15303 -3 15302 15590 15372 -3 15660 15372 15590 -3 15304 15305 15592 -3 15593 15592 15305 -3 15305 15306 15593 -3 15594 15593 15306 -3 15306 15307 15594 -3 15595 15594 15307 -3 15307 15308 15595 -3 15596 15595 15308 -3 15308 15309 15596 -3 15597 15596 15309 -3 15309 15310 15597 -3 15598 15597 15310 -3 15310 15311 15598 -3 15599 15598 15311 -3 15311 15312 15599 -3 15600 15599 15312 -3 15312 15313 15600 -3 15601 15600 15313 -3 15313 15314 15601 -3 15602 15601 15314 -3 15314 15315 15602 -3 15603 15602 15315 -3 15315 15316 15603 -3 15604 15603 15316 -3 15316 15317 15604 -3 15605 15604 15317 -3 15317 15318 15605 -3 15606 15605 15318 -3 15318 15319 15606 -3 15607 15606 15319 -3 15319 15320 15607 -3 15608 15607 15320 -3 15320 15321 15608 -3 15609 15608 15321 -3 15321 15322 15609 -3 15610 15609 15322 -3 15322 15323 15610 -3 15611 15610 15323 -3 15323 15324 15611 -3 15612 15611 15324 -3 15324 15325 15612 -3 15613 15612 15325 -3 15325 15326 15613 -3 15614 15613 15326 -3 15326 15327 15614 -3 15615 15614 15327 -3 15327 15328 15615 -3 15616 15615 15328 -3 15328 15329 15616 -3 15617 15616 15329 -3 15329 15330 15617 -3 15618 15617 15330 -3 15330 15331 15618 -3 15619 15618 15331 -3 15331 15332 15619 -3 15620 15619 15332 -3 15332 15333 15620 -3 15621 15620 15333 -3 15333 15334 15621 -3 15622 15621 15334 -3 15334 15335 15622 -3 15623 15622 15335 -3 15335 15336 15623 -3 15624 15623 15336 -3 15336 15337 15624 -3 15625 15624 15337 -3 15337 15338 15625 -3 15626 15625 15338 -3 15338 15339 15626 -3 15627 15626 15339 -3 15339 15340 15627 -3 15628 15627 15340 -3 15340 15341 15628 -3 15629 15628 15341 -3 15341 15342 15629 -3 15630 15629 15342 -3 15342 15343 15630 -3 15631 15630 15343 -3 15343 15344 15631 -3 15632 15631 15344 -3 15344 15345 15632 -3 15633 15632 15345 -3 15345 15346 15634 -3 15345 15634 15633 -3 15346 15347 15635 -3 15346 15635 15634 -3 15347 15348 15636 -3 15347 15636 15635 -3 15348 15349 15637 -3 15348 15637 15636 -3 15349 15350 15638 -3 15349 15638 15637 -3 15350 15351 15639 -3 15350 15639 15638 -3 15351 15352 15640 -3 15351 15640 15639 -3 15352 15353 15641 -3 15352 15641 15640 -3 15353 15354 15642 -3 15353 15642 15641 -3 15354 15355 15643 -3 15354 15643 15642 -3 15355 15356 15644 -3 15355 15644 15643 -3 15356 15357 15645 -3 15356 15645 15644 -3 15357 15358 15646 -3 15357 15646 15645 -3 15358 15359 15647 -3 15358 15647 15646 -3 15359 15360 15648 -3 15359 15648 15647 -3 15360 15361 15649 -3 15360 15649 15648 -3 15361 15362 15650 -3 15361 15650 15649 -3 15362 15363 15651 -3 15362 15651 15650 -3 15363 15364 15652 -3 15363 15652 15651 -3 15364 15365 15653 -3 15364 15653 15652 -3 15365 15366 15654 -3 15365 15654 15653 -3 15366 15367 15655 -3 15366 15655 15654 -3 15367 15368 15656 -3 15367 15656 15655 -3 15368 15369 15657 -3 15368 15657 15656 -3 15369 15370 15658 -3 15369 15658 15657 -3 15370 15371 15659 -3 15370 15659 15658 -3 15371 15372 15660 -3 15371 15660 15659 -3 15373 15661 15662 -3 15373 15662 15374 -3 15373 15443 15731 -3 15373 15731 15661 -3 15374 15662 15663 -3 15374 15663 15375 -3 15375 15663 15664 -3 15375 15664 15376 -3 15376 15664 15665 -3 15376 15665 15377 -3 15377 15665 15666 -3 15377 15666 15378 -3 15378 15666 15667 -3 15378 15667 15379 -3 15379 15667 15668 -3 15379 15668 15380 -3 15380 15668 15669 -3 15380 15669 15381 -3 15381 15669 15670 -3 15381 15670 15382 -3 15382 15670 15671 -3 15382 15671 15383 -3 15383 15671 15672 -3 15383 15672 15384 -3 15384 15672 15673 -3 15384 15673 15385 -3 15385 15673 15674 -3 15385 15674 15386 -3 15386 15674 15675 -3 15386 15675 15387 -3 15387 15675 15676 -3 15387 15676 15388 -3 15388 15676 15677 -3 15388 15677 15389 -3 15389 15677 15678 -3 15389 15678 15390 -3 15390 15678 15679 -3 15390 15679 15391 -3 15391 15679 15680 -3 15391 15680 15392 -3 15392 15680 15681 -3 15392 15681 15393 -3 15393 15681 15682 -3 15393 15682 15394 -3 15394 15682 15683 -3 15394 15683 15395 -3 15395 15683 15684 -3 15395 15684 15396 -3 15396 15684 15685 -3 15396 15685 15397 -3 15397 15685 15686 -3 15397 15686 15398 -3 15398 15686 15687 -3 15398 15687 15399 -3 15399 15687 15688 -3 15399 15688 15400 -3 15400 15688 15689 -3 15400 15689 15401 -3 15401 15689 15690 -3 15401 15690 15402 -3 15402 15690 15691 -3 15402 15691 15403 -3 15403 15691 15692 -3 15403 15692 15404 -3 15404 15692 15693 -3 15404 15693 15405 -3 15405 15693 15694 -3 15405 15694 15406 -3 15406 15694 15695 -3 15406 15695 15407 -3 15407 15695 15696 -3 15407 15696 15408 -3 15408 15696 15697 -3 15408 15697 15409 -3 15409 15697 15698 -3 15409 15698 15410 -3 15410 15698 15411 -3 15699 15411 15698 -3 15411 15699 15412 -3 15700 15412 15699 -3 15412 15700 15413 -3 15701 15413 15700 -3 15413 15701 15414 -3 15702 15414 15701 -3 15414 15702 15415 -3 15703 15415 15702 -3 15415 15703 15416 -3 15704 15416 15703 -3 15416 15704 15417 -3 15705 15417 15704 -3 15417 15705 15418 -3 15706 15418 15705 -3 15418 15706 15419 -3 15707 15419 15706 -3 15419 15707 15420 -3 15708 15420 15707 -3 15420 15708 15421 -3 15709 15421 15708 -3 15421 15709 15422 -3 15710 15422 15709 -3 15422 15710 15423 -3 15711 15423 15710 -3 15423 15711 15424 -3 15712 15424 15711 -3 15424 15712 15425 -3 15713 15425 15712 -3 15425 15713 15426 -3 15714 15426 15713 -3 15426 15714 15427 -3 15715 15427 15714 -3 15427 15715 15428 -3 15716 15428 15715 -3 15428 15716 15429 -3 15717 15429 15716 -3 15429 15717 15430 -3 15718 15430 15717 -3 15430 15718 15431 -3 15719 15431 15718 -3 15431 15719 15432 -3 15720 15432 15719 -3 15432 15720 15433 -3 15721 15433 15720 -3 15433 15721 15434 -3 15722 15434 15721 -3 15434 15722 15435 -3 15723 15435 15722 -3 15435 15723 15436 -3 15724 15436 15723 -3 15436 15724 15437 -3 15725 15437 15724 -3 15437 15725 15438 -3 15726 15438 15725 -3 15438 15726 15439 -3 15727 15439 15726 -3 15439 15727 15440 -3 15728 15440 15727 -3 15440 15728 15441 -3 15729 15441 15728 -3 15441 15729 15444 -3 15732 15444 15729 -3 15442 15730 15443 -3 15731 15443 15730 -3 15442 15447 15730 -3 15735 15730 15447 -3 15444 15732 15445 -3 15733 15445 15732 -3 15445 15733 15448 -3 15736 15448 15733 -3 15446 15734 15447 -3 15735 15447 15734 -3 15446 15450 15734 -3 15738 15734 15450 -3 15448 15736 15449 -3 15737 15449 15736 -3 15449 15737 15451 -3 15739 15451 15737 -3 15450 15452 15738 -3 15740 15738 15452 -3 15451 15739 15453 -3 15741 15453 15739 -3 15452 15454 15740 -3 15742 15740 15454 -3 15453 15741 15455 -3 15743 15455 15741 -3 15454 15456 15742 -3 15744 15742 15456 -3 15455 15743 15457 -3 15745 15457 15743 -3 15456 15458 15744 -3 15746 15744 15458 -3 15457 15745 15459 -3 15747 15459 15745 -3 15458 15460 15746 -3 15748 15746 15460 -3 15459 15747 15461 -3 15749 15461 15747 -3 15460 15462 15748 -3 15750 15748 15462 -3 15461 15749 15463 -3 15751 15463 15749 -3 15462 15464 15750 -3 15752 15750 15464 -3 15463 15751 15465 -3 15753 15465 15751 -3 15464 15466 15752 -3 15754 15752 15466 -3 15465 15753 15467 -3 15755 15467 15753 -3 15466 15468 15754 -3 15756 15754 15468 -3 15467 15755 15469 -3 15757 15469 15755 -3 15468 15470 15756 -3 15758 15756 15470 -3 15469 15757 15471 -3 15759 15471 15757 -3 15470 15472 15758 -3 15760 15758 15472 -3 15471 15759 15473 -3 15761 15473 15759 -3 15472 15474 15760 -3 15762 15760 15474 -3 15473 15761 15475 -3 15763 15475 15761 -3 15474 15476 15762 -3 15764 15762 15476 -3 15475 15763 15477 -3 15765 15477 15763 -3 15476 15478 15764 -3 15766 15764 15478 -3 15477 15765 15479 -3 15767 15479 15765 -3 15478 15480 15766 -3 15768 15766 15480 -3 15479 15767 15481 -3 15769 15481 15767 -3 15480 15482 15768 -3 15770 15768 15482 -3 15481 15769 15483 -3 15771 15483 15769 -3 15482 15484 15770 -3 15772 15770 15484 -3 15483 15771 15485 -3 15773 15485 15771 -3 15484 15486 15772 -3 15774 15772 15486 -3 15485 15773 15487 -3 15775 15487 15773 -3 15486 15488 15774 -3 15776 15774 15488 -3 15487 15775 15777 -3 15487 15777 15489 -3 15488 15490 15776 -3 15778 15776 15490 -3 15489 15777 15779 -3 15489 15779 15491 -3 15490 15492 15778 -3 15780 15778 15492 -3 15491 15779 15781 -3 15491 15781 15493 -3 15492 15494 15780 -3 15782 15780 15494 -3 15493 15781 15783 -3 15493 15783 15495 -3 15494 15496 15782 -3 15784 15782 15496 -3 15495 15783 15785 -3 15495 15785 15497 -3 15496 15498 15784 -3 15786 15784 15498 -3 15497 15785 15787 -3 15497 15787 15499 -3 15498 15500 15788 -3 15498 15788 15786 -3 15499 15787 15789 -3 15499 15789 15501 -3 15500 15502 15790 -3 15500 15790 15788 -3 15501 15789 15791 -3 15501 15791 15503 -3 15502 15504 15792 -3 15502 15792 15790 -3 15503 15791 15793 -3 15503 15793 15505 -3 15504 15506 15794 -3 15504 15794 15792 -3 15505 15793 15795 -3 15505 15795 15507 -3 15506 15508 15796 -3 15506 15796 15794 -3 15507 15795 15797 -3 15507 15797 15509 -3 15508 15510 15798 -3 15508 15798 15796 -3 15509 15797 15799 -3 15509 15799 15511 -3 15510 15512 15800 -3 15510 15800 15798 -3 15511 15799 15801 -3 15511 15801 15513 -3 15512 15514 15802 -3 15512 15802 15800 -3 15513 15801 15803 -3 15513 15803 15515 -3 15514 15516 15804 -3 15514 15804 15802 -3 15515 15803 15805 -3 15515 15805 15517 -3 15516 15518 15806 -3 15516 15806 15804 -3 15517 15805 15807 -3 15517 15807 15519 -3 15518 15520 15808 -3 15518 15808 15806 -3 15519 15807 15809 -3 15519 15809 15521 -3 15520 15522 15810 -3 15520 15810 15808 -3 15521 15809 15811 -3 15521 15811 15523 -3 15522 15524 15812 -3 15522 15812 15810 -3 15523 15811 15813 -3 15523 15813 15525 -3 15524 15526 15814 -3 15524 15814 15812 -3 15525 15813 15815 -3 15525 15815 15527 -3 15526 15528 15816 -3 15526 15816 15814 -3 15527 15815 15817 -3 15527 15817 15529 -3 15528 15530 15818 -3 15528 15818 15816 -3 15529 15817 15819 -3 15529 15819 15531 -3 15530 15532 15820 -3 15530 15820 15818 -3 15531 15819 15821 -3 15531 15821 15533 -3 15532 15534 15822 -3 15532 15822 15820 -3 15533 15821 15823 -3 15533 15823 15535 -3 15534 15536 15824 -3 15534 15824 15822 -3 15535 15823 15825 -3 15535 15825 15537 -3 15536 15538 15826 -3 15536 15826 15824 -3 15537 15825 15827 -3 15537 15827 15539 -3 15538 15540 15828 -3 15538 15828 15826 -3 15539 15827 15829 -3 15539 15829 15541 -3 15540 15542 15830 -3 15540 15830 15828 -3 15541 15829 15831 -3 15541 15831 15543 -3 15542 15544 15832 -3 15542 15832 15830 -3 15543 15831 15833 -3 15543 15833 15545 -3 15544 15546 15834 -3 15544 15834 15832 -3 15545 15833 15835 -3 15545 15835 15547 -3 15546 15548 15836 -3 15546 15836 15834 -3 15547 15835 15837 -3 15547 15837 15549 -3 15548 15550 15838 -3 15548 15838 15836 -3 15549 15837 15839 -3 15549 15839 15551 -3 15550 15552 15840 -3 15550 15840 15838 -3 15551 15839 15841 -3 15551 15841 15553 -3 15552 15554 15842 -3 15552 15842 15840 -3 15553 15841 15843 -3 15553 15843 15555 -3 15554 15556 15844 -3 15554 15844 15842 -3 15555 15843 15845 -3 15555 15845 15557 -3 15556 15558 15846 -3 15556 15846 15844 -3 15557 15845 15847 -3 15557 15847 15559 -3 15558 15560 15848 -3 15558 15848 15846 -3 15559 15847 15849 -3 15559 15849 15561 -3 15560 15562 15850 -3 15560 15850 15848 -3 15561 15849 15851 -3 15561 15851 15563 -3 15562 15564 15852 -3 15562 15852 15850 -3 15563 15851 15853 -3 15563 15853 15565 -3 15564 15566 15854 -3 15564 15854 15852 -3 15565 15853 15567 -3 15855 15567 15853 -3 15566 15568 15856 -3 15566 15856 15854 -3 15567 15855 15569 -3 15857 15569 15855 -3 15568 15570 15858 -3 15568 15858 15856 -3 15569 15857 15571 -3 15859 15571 15857 -3 15570 15572 15860 -3 15570 15860 15858 -3 15571 15859 15573 -3 15861 15573 15859 -3 15572 15574 15862 -3 15572 15862 15860 -3 15573 15861 15575 -3 15863 15575 15861 -3 15574 15576 15862 -3 15864 15862 15576 -3 15575 15863 15577 -3 15865 15577 15863 -3 15576 15578 15864 -3 15866 15864 15578 -3 15577 15865 15579 -3 15867 15579 15865 -3 15578 15580 15866 -3 15868 15866 15580 -3 15579 15867 15581 -3 15869 15581 15867 -3 15580 15582 15868 -3 15870 15868 15582 -3 15581 15869 15583 -3 15871 15583 15869 -3 15582 15584 15870 -3 15872 15870 15584 -3 15583 15871 15587 -3 15875 15587 15871 -3 15584 15585 15872 -3 15873 15872 15585 -3 15585 15588 15873 -3 15876 15873 15588 -3 15586 15587 15874 -3 15875 15874 15587 -3 15586 15874 15591 -3 15879 15591 15874 -3 15588 15589 15876 -3 15877 15876 15589 -3 15589 15592 15877 -3 15880 15877 15592 -3 15590 15591 15878 -3 15879 15878 15591 -3 15590 15878 15660 -3 15948 15660 15878 -3 15592 15593 15880 -3 15881 15880 15593 -3 15593 15594 15881 -3 15882 15881 15594 -3 15594 15595 15882 -3 15883 15882 15595 -3 15595 15596 15883 -3 15884 15883 15596 -3 15596 15597 15884 -3 15885 15884 15597 -3 15597 15598 15885 -3 15886 15885 15598 -3 15598 15599 15886 -3 15887 15886 15599 -3 15599 15600 15887 -3 15888 15887 15600 -3 15600 15601 15888 -3 15889 15888 15601 -3 15601 15602 15889 -3 15890 15889 15602 -3 15602 15603 15890 -3 15891 15890 15603 -3 15603 15604 15891 -3 15892 15891 15604 -3 15604 15605 15892 -3 15893 15892 15605 -3 15605 15606 15893 -3 15894 15893 15606 -3 15606 15607 15894 -3 15895 15894 15607 -3 15607 15608 15895 -3 15896 15895 15608 -3 15608 15609 15896 -3 15897 15896 15609 -3 15609 15610 15897 -3 15898 15897 15610 -3 15610 15611 15898 -3 15899 15898 15611 -3 15611 15612 15899 -3 15900 15899 15612 -3 15612 15613 15900 -3 15901 15900 15613 -3 15613 15614 15901 -3 15902 15901 15614 -3 15614 15615 15902 -3 15903 15902 15615 -3 15615 15616 15903 -3 15904 15903 15616 -3 15616 15617 15904 -3 15905 15904 15617 -3 15617 15618 15905 -3 15906 15905 15618 -3 15618 15619 15906 -3 15907 15906 15619 -3 15619 15620 15907 -3 15908 15907 15620 -3 15620 15621 15908 -3 15909 15908 15621 -3 15621 15622 15909 -3 15910 15909 15622 -3 15622 15623 15910 -3 15911 15910 15623 -3 15623 15624 15911 -3 15912 15911 15624 -3 15624 15625 15912 -3 15913 15912 15625 -3 15625 15626 15913 -3 15914 15913 15626 -3 15626 15627 15914 -3 15915 15914 15627 -3 15627 15628 15915 -3 15916 15915 15628 -3 15628 15629 15916 -3 15917 15916 15629 -3 15629 15630 15917 -3 15918 15917 15630 -3 15630 15631 15918 -3 15919 15918 15631 -3 15631 15632 15919 -3 15920 15919 15632 -3 15632 15633 15920 -3 15921 15920 15633 -3 15633 15634 15921 -3 15922 15921 15634 -3 15634 15635 15922 -3 15923 15922 15635 -3 15635 15636 15923 -3 15924 15923 15636 -3 15636 15637 15924 -3 15925 15924 15637 -3 15637 15638 15925 -3 15926 15925 15638 -3 15638 15639 15926 -3 15927 15926 15639 -3 15639 15640 15927 -3 15928 15927 15640 -3 15640 15641 15928 -3 15929 15928 15641 -3 15641 15642 15929 -3 15930 15929 15642 -3 15642 15643 15930 -3 15931 15930 15643 -3 15643 15644 15931 -3 15932 15931 15644 -3 15644 15645 15932 -3 15933 15932 15645 -3 15645 15646 15933 -3 15934 15933 15646 -3 15646 15647 15934 -3 15935 15934 15647 -3 15647 15648 15935 -3 15936 15935 15648 -3 15648 15649 15936 -3 15937 15936 15649 -3 15649 15650 15937 -3 15938 15937 15650 -3 15650 15651 15938 -3 15939 15938 15651 -3 15651 15652 15939 -3 15940 15939 15652 -3 15652 15653 15941 -3 15652 15941 15940 -3 15653 15654 15942 -3 15653 15942 15941 -3 15654 15655 15943 -3 15654 15943 15942 -3 15655 15656 15944 -3 15655 15944 15943 -3 15656 15657 15945 -3 15656 15945 15944 -3 15657 15658 15946 -3 15657 15946 15945 -3 15658 15659 15947 -3 15658 15947 15946 -3 15659 15660 15948 -3 15659 15948 15947 -3 15661 15949 15950 -3 15661 15950 15662 -3 15661 15731 16019 -3 15661 16019 15949 -3 15662 15950 15951 -3 15662 15951 15663 -3 15663 15951 15952 -3 15663 15952 15664 -3 15664 15952 15953 -3 15664 15953 15665 -3 15665 15953 15954 -3 15665 15954 15666 -3 15666 15954 15955 -3 15666 15955 15667 -3 15667 15955 15956 -3 15667 15956 15668 -3 15668 15956 15957 -3 15668 15957 15669 -3 15669 15957 15958 -3 15669 15958 15670 -3 15670 15958 15959 -3 15670 15959 15671 -3 15671 15959 15960 -3 15671 15960 15672 -3 15672 15960 15961 -3 15672 15961 15673 -3 15673 15961 15962 -3 15673 15962 15674 -3 15674 15962 15963 -3 15674 15963 15675 -3 15675 15963 15964 -3 15675 15964 15676 -3 15676 15964 15965 -3 15676 15965 15677 -3 15677 15965 15966 -3 15677 15966 15678 -3 15678 15966 15967 -3 15678 15967 15679 -3 15679 15967 15968 -3 15679 15968 15680 -3 15680 15968 15969 -3 15680 15969 15681 -3 15681 15969 15970 -3 15681 15970 15682 -3 15682 15970 15971 -3 15682 15971 15683 -3 15683 15971 15972 -3 15683 15972 15684 -3 15684 15972 15973 -3 15684 15973 15685 -3 15685 15973 15974 -3 15685 15974 15686 -3 15686 15974 15975 -3 15686 15975 15687 -3 15687 15975 15976 -3 15687 15976 15688 -3 15688 15976 15977 -3 15688 15977 15689 -3 15689 15977 15978 -3 15689 15978 15690 -3 15690 15978 15979 -3 15690 15979 15691 -3 15691 15979 15980 -3 15691 15980 15692 -3 15692 15980 15981 -3 15692 15981 15693 -3 15693 15981 15982 -3 15693 15982 15694 -3 15694 15982 15983 -3 15694 15983 15695 -3 15695 15983 15984 -3 15695 15984 15696 -3 15696 15984 15985 -3 15696 15985 15697 -3 15697 15985 15986 -3 15697 15986 15698 -3 15698 15986 15987 -3 15698 15987 15699 -3 15699 15987 15988 -3 15699 15988 15700 -3 15700 15988 15989 -3 15700 15989 15701 -3 15701 15989 15990 -3 15701 15990 15702 -3 15702 15990 15991 -3 15702 15991 15703 -3 15703 15991 15992 -3 15703 15992 15704 -3 15704 15992 15993 -3 15704 15993 15705 -3 15705 15993 15994 -3 15705 15994 15706 -3 15706 15994 15995 -3 15706 15995 15707 -3 15707 15995 15996 -3 15707 15996 15708 -3 15708 15996 15997 -3 15708 15997 15709 -3 15709 15997 15998 -3 15709 15998 15710 -3 15710 15998 15999 -3 15710 15999 15711 -3 15711 15999 16000 -3 15711 16000 15712 -3 15712 16000 16001 -3 15712 16001 15713 -3 15713 16001 16002 -3 15713 16002 15714 -3 15714 16002 16003 -3 15714 16003 15715 -3 15715 16003 16004 -3 15715 16004 15716 -3 15716 16004 16005 -3 15716 16005 15717 -3 15717 16005 16006 -3 15717 16006 15718 -3 15718 16006 15719 -3 16007 15719 16006 -3 15719 16007 15720 -3 16008 15720 16007 -3 15720 16008 15721 -3 16009 15721 16008 -3 15721 16009 15722 -3 16010 15722 16009 -3 15722 16010 15723 -3 16011 15723 16010 -3 15723 16011 15724 -3 16012 15724 16011 -3 15724 16012 15725 -3 16013 15725 16012 -3 15725 16013 15726 -3 16014 15726 16013 -3 15726 16014 15727 -3 16015 15727 16014 -3 15727 16015 15728 -3 16016 15728 16015 -3 15728 16016 15729 -3 16017 15729 16016 -3 15729 16017 15732 -3 16020 15732 16017 -3 15730 16018 15731 -3 16019 15731 16018 -3 15730 15735 16018 -3 16023 16018 15735 -3 15732 16020 15733 -3 16021 15733 16020 -3 15733 16021 15736 -3 16024 15736 16021 -3 15734 16022 15735 -3 16023 15735 16022 -3 15734 15738 16022 -3 16026 16022 15738 -3 15736 16024 15737 -3 16025 15737 16024 -3 15737 16025 15739 -3 16027 15739 16025 -3 15738 15740 16026 -3 16028 16026 15740 -3 15739 16027 15741 -3 16029 15741 16027 -3 15740 15742 16028 -3 16030 16028 15742 -3 15741 16029 15743 -3 16031 15743 16029 -3 15742 15744 16030 -3 16032 16030 15744 -3 15743 16031 15745 -3 16033 15745 16031 -3 15744 15746 16032 -3 16034 16032 15746 -3 15745 16033 15747 -3 16035 15747 16033 -3 15746 15748 16034 -3 16036 16034 15748 -3 15747 16035 15749 -3 16037 15749 16035 -3 15748 15750 16036 -3 16038 16036 15750 -3 15749 16037 15751 -3 16039 15751 16037 -3 15750 15752 16038 -3 16040 16038 15752 -3 15751 16039 15753 -3 16041 15753 16039 -3 15752 15754 16040 -3 16042 16040 15754 -3 15753 16041 15755 -3 16043 15755 16041 -3 15754 15756 16042 -3 16044 16042 15756 -3 15755 16043 15757 -3 16045 15757 16043 -3 15756 15758 16044 -3 16046 16044 15758 -3 15757 16045 15759 -3 16047 15759 16045 -3 15758 15760 16046 -3 16048 16046 15760 -3 15759 16047 15761 -3 16049 15761 16047 -3 15760 15762 16048 -3 16050 16048 15762 -3 15761 16049 15763 -3 16051 15763 16049 -3 15762 15764 16050 -3 16052 16050 15764 -3 15763 16051 15765 -3 16053 15765 16051 -3 15764 15766 16052 -3 16054 16052 15766 -3 15765 16053 15767 -3 16055 15767 16053 -3 15766 15768 16054 -3 16056 16054 15768 -3 15767 16055 15769 -3 16057 15769 16055 -3 15768 15770 16056 -3 16058 16056 15770 -3 15769 16057 15771 -3 16059 15771 16057 -3 15770 15772 16058 -3 16060 16058 15772 -3 15771 16059 15773 -3 16061 15773 16059 -3 15772 15774 16060 -3 16062 16060 15774 -3 15773 16061 15775 -3 16063 15775 16061 -3 15774 15776 16062 -3 16064 16062 15776 -3 15775 16063 15777 -3 16065 15777 16063 -3 15776 15778 16064 -3 16066 16064 15778 -3 15777 16065 15779 -3 16067 15779 16065 -3 15778 15780 16066 -3 16068 16066 15780 -3 15779 16067 15781 -3 16069 15781 16067 -3 15780 15782 16068 -3 16070 16068 15782 -3 15781 16069 15783 -3 16071 15783 16069 -3 15782 15784 16070 -3 16072 16070 15784 -3 15783 16071 15785 -3 16073 15785 16071 -3 15784 15786 16072 -3 16074 16072 15786 -3 15785 16073 15787 -3 16075 15787 16073 -3 15786 15788 16074 -3 16076 16074 15788 -3 15787 16075 15789 -3 16077 15789 16075 -3 15788 15790 16076 -3 16078 16076 15790 -3 15789 16077 15791 -3 16079 15791 16077 -3 15790 15792 16078 -3 16080 16078 15792 -3 15791 16079 15793 -3 16081 15793 16079 -3 15792 15794 16080 -3 16082 16080 15794 -3 15793 16081 15795 -3 16083 15795 16081 -3 15794 15796 16082 -3 16084 16082 15796 -3 15795 16083 15797 -3 16085 15797 16083 -3 15796 15798 16084 -3 16086 16084 15798 -3 15797 16085 16087 -3 15797 16087 15799 -3 15798 15800 16086 -3 16088 16086 15800 -3 15799 16087 16089 -3 15799 16089 15801 -3 15800 15802 16088 -3 16090 16088 15802 -3 15801 16089 16091 -3 15801 16091 15803 -3 15802 15804 16090 -3 16092 16090 15804 -3 15803 16091 16093 -3 15803 16093 15805 -3 15804 15806 16092 -3 16094 16092 15806 -3 15805 16093 16095 -3 15805 16095 15807 -3 15806 15808 16096 -3 15806 16096 16094 -3 15807 16095 16097 -3 15807 16097 15809 -3 15808 15810 16098 -3 15808 16098 16096 -3 15809 16097 16099 -3 15809 16099 15811 -3 15810 15812 16100 -3 15810 16100 16098 -3 15811 16099 16101 -3 15811 16101 15813 -3 15812 15814 16102 -3 15812 16102 16100 -3 15813 16101 16103 -3 15813 16103 15815 -3 15814 15816 16104 -3 15814 16104 16102 -3 15815 16103 16105 -3 15815 16105 15817 -3 15816 15818 16106 -3 15816 16106 16104 -3 15817 16105 16107 -3 15817 16107 15819 -3 15818 15820 16108 -3 15818 16108 16106 -3 15819 16107 16109 -3 15819 16109 15821 -3 15820 15822 16110 -3 15820 16110 16108 -3 15821 16109 16111 -3 15821 16111 15823 -3 15822 15824 16112 -3 15822 16112 16110 -3 15823 16111 16113 -3 15823 16113 15825 -3 15824 15826 16114 -3 15824 16114 16112 -3 15825 16113 16115 -3 15825 16115 15827 -3 15826 15828 16116 -3 15826 16116 16114 -3 15827 16115 16117 -3 15827 16117 15829 -3 15828 15830 16118 -3 15828 16118 16116 -3 15829 16117 16119 -3 15829 16119 15831 -3 15830 15832 16120 -3 15830 16120 16118 -3 15831 16119 16121 -3 15831 16121 15833 -3 15832 15834 16122 -3 15832 16122 16120 -3 15833 16121 16123 -3 15833 16123 15835 -3 15834 15836 16124 -3 15834 16124 16122 -3 15835 16123 16125 -3 15835 16125 15837 -3 15836 15838 16126 -3 15836 16126 16124 -3 15837 16125 16127 -3 15837 16127 15839 -3 15838 15840 16128 -3 15838 16128 16126 -3 15839 16127 16129 -3 15839 16129 15841 -3 15840 15842 16130 -3 15840 16130 16128 -3 15841 16129 16131 -3 15841 16131 15843 -3 15842 15844 16132 -3 15842 16132 16130 -3 15843 16131 16133 -3 15843 16133 15845 -3 15844 15846 16134 -3 15844 16134 16132 -3 15845 16133 16135 -3 15845 16135 15847 -3 15846 15848 16136 -3 15846 16136 16134 -3 15847 16135 16137 -3 15847 16137 15849 -3 15848 15850 16138 -3 15848 16138 16136 -3 15849 16137 16139 -3 15849 16139 15851 -3 15850 15852 16140 -3 15850 16140 16138 -3 15851 16139 16141 -3 15851 16141 15853 -3 15852 15854 16142 -3 15852 16142 16140 -3 15853 16141 16143 -3 15853 16143 15855 -3 15854 15856 16144 -3 15854 16144 16142 -3 15855 16143 16145 -3 15855 16145 15857 -3 15856 15858 16146 -3 15856 16146 16144 -3 15857 16145 16147 -3 15857 16147 15859 -3 15858 15860 16148 -3 15858 16148 16146 -3 15859 16147 16149 -3 15859 16149 15861 -3 15860 15862 16150 -3 15860 16150 16148 -3 15861 16149 16151 -3 15861 16151 15863 -3 15862 15864 16152 -3 15862 16152 16150 -3 15863 16151 16153 -3 15863 16153 15865 -3 15864 15866 16154 -3 15864 16154 16152 -3 15865 16153 16155 -3 15865 16155 15867 -3 15866 15868 16156 -3 15866 16156 16154 -3 15867 16155 16157 -3 15867 16157 15869 -3 15868 15870 16158 -3 15868 16158 16156 -3 15869 16157 16159 -3 15869 16159 15871 -3 15870 15872 16160 -3 15870 16160 16158 -3 15871 16159 16163 -3 15871 16163 15875 -3 15872 15873 16161 -3 15872 16161 16160 -3 15873 15876 16164 -3 15873 16164 16161 -3 15874 15875 16163 -3 15874 16163 16162 -3 15874 16162 15879 -3 16167 15879 16162 -3 15876 15877 16165 -3 15876 16165 16164 -3 15877 15880 16168 -3 15877 16168 16165 -3 15878 15879 16167 -3 15878 16167 16166 -3 15878 16166 15948 -3 16236 15948 16166 -3 15880 15881 16169 -3 15880 16169 16168 -3 15881 15882 16170 -3 15881 16170 16169 -3 15882 15883 16171 -3 15882 16171 16170 -3 15883 15884 16171 -3 16172 16171 15884 -3 15884 15885 16172 -3 16173 16172 15885 -3 15885 15886 16173 -3 16174 16173 15886 -3 15886 15887 16174 -3 16175 16174 15887 -3 15887 15888 16175 -3 16176 16175 15888 -3 15888 15889 16176 -3 16177 16176 15889 -3 15889 15890 16177 -3 16178 16177 15890 -3 15890 15891 16178 -3 16179 16178 15891 -3 15891 15892 16179 -3 16180 16179 15892 -3 15892 15893 16180 -3 16181 16180 15893 -3 15893 15894 16181 -3 16182 16181 15894 -3 15894 15895 16182 -3 16183 16182 15895 -3 15895 15896 16183 -3 16184 16183 15896 -3 15896 15897 16184 -3 16185 16184 15897 -3 15897 15898 16185 -3 16186 16185 15898 -3 15898 15899 16186 -3 16187 16186 15899 -3 15899 15900 16187 -3 16188 16187 15900 -3 15900 15901 16188 -3 16189 16188 15901 -3 15901 15902 16189 -3 16190 16189 15902 -3 15902 15903 16190 -3 16191 16190 15903 -3 15903 15904 16191 -3 16192 16191 15904 -3 15904 15905 16192 -3 16193 16192 15905 -3 15905 15906 16193 -3 16194 16193 15906 -3 15906 15907 16194 -3 16195 16194 15907 -3 15907 15908 16195 -3 16196 16195 15908 -3 15908 15909 16196 -3 16197 16196 15909 -3 15909 15910 16197 -3 16198 16197 15910 -3 15910 15911 16198 -3 16199 16198 15911 -3 15911 15912 16199 -3 16200 16199 15912 -3 15912 15913 16200 -3 16201 16200 15913 -3 15913 15914 16201 -3 16202 16201 15914 -3 15914 15915 16202 -3 16203 16202 15915 -3 15915 15916 16203 -3 16204 16203 15916 -3 15916 15917 16204 -3 16205 16204 15917 -3 15917 15918 16205 -3 16206 16205 15918 -3 15918 15919 16206 -3 16207 16206 15919 -3 15919 15920 16207 -3 16208 16207 15920 -3 15920 15921 16208 -3 16209 16208 15921 -3 15921 15922 16209 -3 16210 16209 15922 -3 15922 15923 16210 -3 16211 16210 15923 -3 15923 15924 16211 -3 16212 16211 15924 -3 15924 15925 16212 -3 16213 16212 15925 -3 15925 15926 16213 -3 16214 16213 15926 -3 15926 15927 16214 -3 16215 16214 15927 -3 15927 15928 16215 -3 16216 16215 15928 -3 15928 15929 16216 -3 16217 16216 15929 -3 15929 15930 16217 -3 16218 16217 15930 -3 15930 15931 16218 -3 16219 16218 15931 -3 15931 15932 16219 -3 16220 16219 15932 -3 15932 15933 16220 -3 16221 16220 15933 -3 15933 15934 16221 -3 16222 16221 15934 -3 15934 15935 16222 -3 16223 16222 15935 -3 15935 15936 16223 -3 16224 16223 15936 -3 15936 15937 16224 -3 16225 16224 15937 -3 15937 15938 16225 -3 16226 16225 15938 -3 15938 15939 16226 -3 16227 16226 15939 -3 15939 15940 16227 -3 16228 16227 15940 -3 15940 15941 16228 -3 16229 16228 15941 -3 15941 15942 16229 -3 16230 16229 15942 -3 15942 15943 16230 -3 16231 16230 15943 -3 15943 15944 16231 -3 16232 16231 15944 -3 15944 15945 16232 -3 16233 16232 15945 -3 15945 15946 16233 -3 16234 16233 15946 -3 15946 15947 16234 -3 16235 16234 15947 -3 15947 15948 16235 -3 16236 16235 15948 -3 15949 16237 15950 -3 16238 15950 16237 -3 15949 16019 16307 -3 15949 16307 16237 -3 15950 16238 15951 -3 16239 15951 16238 -3 15951 16239 15952 -3 16240 15952 16239 -3 15952 16240 16241 -3 15952 16241 15953 -3 15953 16241 16242 -3 15953 16242 15954 -3 15954 16242 16243 -3 15954 16243 15955 -3 15955 16243 16244 -3 15955 16244 15956 -3 15956 16244 16245 -3 15956 16245 15957 -3 15957 16245 16246 -3 15957 16246 15958 -3 15958 16246 16247 -3 15958 16247 15959 -3 15959 16247 16248 -3 15959 16248 15960 -3 15960 16248 16249 -3 15960 16249 15961 -3 15961 16249 16250 -3 15961 16250 15962 -3 15962 16250 16251 -3 15962 16251 15963 -3 15963 16251 16252 -3 15963 16252 15964 -3 15964 16252 16253 -3 15964 16253 15965 -3 15965 16253 16254 -3 15965 16254 15966 -3 15966 16254 16255 -3 15966 16255 15967 -3 15967 16255 16256 -3 15967 16256 15968 -3 15968 16256 16257 -3 15968 16257 15969 -3 15969 16257 16258 -3 15969 16258 15970 -3 15970 16258 16259 -3 15970 16259 15971 -3 15971 16259 16260 -3 15971 16260 15972 -3 15972 16260 16261 -3 15972 16261 15973 -3 15973 16261 16262 -3 15973 16262 15974 -3 15974 16262 16263 -3 15974 16263 15975 -3 15975 16263 16264 -3 15975 16264 15976 -3 15976 16264 16265 -3 15976 16265 15977 -3 15977 16265 16266 -3 15977 16266 15978 -3 15978 16266 16267 -3 15978 16267 15979 -3 15979 16267 16268 -3 15979 16268 15980 -3 15980 16268 16269 -3 15980 16269 15981 -3 15981 16269 16270 -3 15981 16270 15982 -3 15982 16270 16271 -3 15982 16271 15983 -3 15983 16271 16272 -3 15983 16272 15984 -3 15984 16272 16273 -3 15984 16273 15985 -3 15985 16273 16274 -3 15985 16274 15986 -3 15986 16274 16275 -3 15986 16275 15987 -3 15987 16275 16276 -3 15987 16276 15988 -3 15988 16276 16277 -3 15988 16277 15989 -3 15989 16277 16278 -3 15989 16278 15990 -3 15990 16278 16279 -3 15990 16279 15991 -3 15991 16279 16280 -3 15991 16280 15992 -3 15992 16280 16281 -3 15992 16281 15993 -3 15993 16281 16282 -3 15993 16282 15994 -3 15994 16282 16283 -3 15994 16283 15995 -3 15995 16283 16284 -3 15995 16284 15996 -3 15996 16284 16285 -3 15996 16285 15997 -3 15997 16285 16286 -3 15997 16286 15998 -3 15998 16286 16287 -3 15998 16287 15999 -3 15999 16287 16288 -3 15999 16288 16000 -3 16000 16288 16289 -3 16000 16289 16001 -3 16001 16289 16290 -3 16001 16290 16002 -3 16002 16290 16291 -3 16002 16291 16003 -3 16003 16291 16292 -3 16003 16292 16004 -3 16004 16292 16293 -3 16004 16293 16005 -3 16005 16293 16294 -3 16005 16294 16006 -3 16006 16294 16295 -3 16006 16295 16007 -3 16007 16295 16296 -3 16007 16296 16008 -3 16008 16296 16297 -3 16008 16297 16009 -3 16009 16297 16298 -3 16009 16298 16010 -3 16010 16298 16299 -3 16010 16299 16011 -3 16011 16299 16300 -3 16011 16300 16012 -3 16012 16300 16301 -3 16012 16301 16013 -3 16013 16301 16302 -3 16013 16302 16014 -3 16014 16302 16303 -3 16014 16303 16015 -3 16015 16303 16304 -3 16015 16304 16016 -3 16016 16304 16305 -3 16016 16305 16017 -3 16017 16305 16308 -3 16017 16308 16020 -3 16018 16306 16307 -3 16018 16307 16019 -3 16018 16023 16311 -3 16018 16311 16306 -3 16020 16308 16309 -3 16020 16309 16021 -3 16021 16309 16312 -3 16021 16312 16024 -3 16022 16310 16311 -3 16022 16311 16023 -3 16022 16026 16314 -3 16022 16314 16310 -3 16024 16312 16313 -3 16024 16313 16025 -3 16025 16313 16315 -3 16025 16315 16027 -3 16026 16028 16316 -3 16026 16316 16314 -3 16027 16315 16317 -3 16027 16317 16029 -3 16028 16030 16318 -3 16028 16318 16316 -3 16029 16317 16319 -3 16029 16319 16031 -3 16030 16032 16320 -3 16030 16320 16318 -3 16031 16319 16033 -3 16321 16033 16319 -3 16032 16034 16322 -3 16032 16322 16320 -3 16033 16321 16035 -3 16323 16035 16321 -3 16034 16036 16324 -3 16034 16324 16322 -3 16035 16323 16037 -3 16325 16037 16323 -3 16036 16038 16326 -3 16036 16326 16324 -3 16037 16325 16039 -3 16327 16039 16325 -3 16038 16040 16326 -3 16328 16326 16040 -3 16039 16327 16041 -3 16329 16041 16327 -3 16040 16042 16328 -3 16330 16328 16042 -3 16041 16329 16043 -3 16331 16043 16329 -3 16042 16044 16330 -3 16332 16330 16044 -3 16043 16331 16045 -3 16333 16045 16331 -3 16044 16046 16332 -3 16334 16332 16046 -3 16045 16333 16047 -3 16335 16047 16333 -3 16046 16048 16334 -3 16336 16334 16048 -3 16047 16335 16049 -3 16337 16049 16335 -3 16048 16050 16336 -3 16338 16336 16050 -3 16049 16337 16051 -3 16339 16051 16337 -3 16050 16052 16338 -3 16340 16338 16052 -3 16051 16339 16053 -3 16341 16053 16339 -3 16052 16054 16340 -3 16342 16340 16054 -3 16053 16341 16055 -3 16343 16055 16341 -3 16054 16056 16342 -3 16344 16342 16056 -3 16055 16343 16057 -3 16345 16057 16343 -3 16056 16058 16344 -3 16346 16344 16058 -3 16057 16345 16059 -3 16347 16059 16345 -3 16058 16060 16346 -3 16348 16346 16060 -3 16059 16347 16061 -3 16349 16061 16347 -3 16060 16062 16348 -3 16350 16348 16062 -3 16061 16349 16063 -3 16351 16063 16349 -3 16062 16064 16350 -3 16352 16350 16064 -3 16063 16351 16065 -3 16353 16065 16351 -3 16064 16066 16352 -3 16354 16352 16066 -3 16065 16353 16067 -3 16355 16067 16353 -3 16066 16068 16354 -3 16356 16354 16068 -3 16067 16355 16069 -3 16357 16069 16355 -3 16068 16070 16356 -3 16358 16356 16070 -3 16069 16357 16071 -3 16359 16071 16357 -3 16070 16072 16358 -3 16360 16358 16072 -3 16071 16359 16073 -3 16361 16073 16359 -3 16072 16074 16360 -3 16362 16360 16074 -3 16073 16361 16075 -3 16363 16075 16361 -3 16074 16076 16362 -3 16364 16362 16076 -3 16075 16363 16077 -3 16365 16077 16363 -3 16076 16078 16364 -3 16366 16364 16078 -3 16077 16365 16079 -3 16367 16079 16365 -3 16078 16080 16366 -3 16368 16366 16080 -3 16079 16367 16081 -3 16369 16081 16367 -3 16080 16082 16368 -3 16370 16368 16082 -3 16081 16369 16083 -3 16371 16083 16369 -3 16082 16084 16370 -3 16372 16370 16084 -3 16083 16371 16085 -3 16373 16085 16371 -3 16084 16086 16372 -3 16374 16372 16086 -3 16085 16373 16087 -3 16375 16087 16373 -3 16086 16088 16374 -3 16376 16374 16088 -3 16087 16375 16089 -3 16377 16089 16375 -3 16088 16090 16376 -3 16378 16376 16090 -3 16089 16377 16091 -3 16379 16091 16377 -3 16090 16092 16378 -3 16380 16378 16092 -3 16091 16379 16093 -3 16381 16093 16379 -3 16092 16094 16380 -3 16382 16380 16094 -3 16093 16381 16095 -3 16383 16095 16381 -3 16094 16096 16382 -3 16384 16382 16096 -3 16095 16383 16097 -3 16385 16097 16383 -3 16096 16098 16384 -3 16386 16384 16098 -3 16097 16385 16099 -3 16387 16099 16385 -3 16098 16100 16386 -3 16388 16386 16100 -3 16099 16387 16101 -3 16389 16101 16387 -3 16100 16102 16388 -3 16390 16388 16102 -3 16101 16389 16103 -3 16391 16103 16389 -3 16102 16104 16390 -3 16392 16390 16104 -3 16103 16391 16105 -3 16393 16105 16391 -3 16104 16106 16392 -3 16394 16392 16106 -3 16105 16393 16107 -3 16395 16107 16393 -3 16106 16108 16394 -3 16396 16394 16108 -3 16107 16395 16109 -3 16397 16109 16395 -3 16108 16110 16396 -3 16398 16396 16110 -3 16109 16397 16399 -3 16109 16399 16111 -3 16110 16112 16398 -3 16400 16398 16112 -3 16111 16399 16401 -3 16111 16401 16113 -3 16112 16114 16400 -3 16402 16400 16114 -3 16113 16401 16403 -3 16113 16403 16115 -3 16114 16116 16402 -3 16404 16402 16116 -3 16115 16403 16405 -3 16115 16405 16117 -3 16116 16118 16406 -3 16116 16406 16404 -3 16117 16405 16407 -3 16117 16407 16119 -3 16118 16120 16408 -3 16118 16408 16406 -3 16119 16407 16409 -3 16119 16409 16121 -3 16120 16122 16410 -3 16120 16410 16408 -3 16121 16409 16411 -3 16121 16411 16123 -3 16122 16124 16412 -3 16122 16412 16410 -3 16123 16411 16413 -3 16123 16413 16125 -3 16124 16126 16414 -3 16124 16414 16412 -3 16125 16413 16415 -3 16125 16415 16127 -3 16126 16128 16416 -3 16126 16416 16414 -3 16127 16415 16417 -3 16127 16417 16129 -3 16128 16130 16418 -3 16128 16418 16416 -3 16129 16417 16419 -3 16129 16419 16131 -3 16130 16132 16420 -3 16130 16420 16418 -3 16131 16419 16421 -3 16131 16421 16133 -3 16132 16134 16422 -3 16132 16422 16420 -3 16133 16421 16423 -3 16133 16423 16135 -3 16134 16136 16424 -3 16134 16424 16422 -3 16135 16423 16425 -3 16135 16425 16137 -3 16136 16138 16426 -3 16136 16426 16424 -3 16137 16425 16427 -3 16137 16427 16139 -3 16138 16140 16428 -3 16138 16428 16426 -3 16139 16427 16429 -3 16139 16429 16141 -3 16140 16142 16430 -3 16140 16430 16428 -3 16141 16429 16431 -3 16141 16431 16143 -3 16142 16144 16432 -3 16142 16432 16430 -3 16143 16431 16433 -3 16143 16433 16145 -3 16144 16146 16434 -3 16144 16434 16432 -3 16145 16433 16435 -3 16145 16435 16147 -3 16146 16148 16436 -3 16146 16436 16434 -3 16147 16435 16437 -3 16147 16437 16149 -3 16148 16150 16438 -3 16148 16438 16436 -3 16149 16437 16439 -3 16149 16439 16151 -3 16150 16152 16440 -3 16150 16440 16438 -3 16151 16439 16441 -3 16151 16441 16153 -3 16152 16154 16442 -3 16152 16442 16440 -3 16153 16441 16443 -3 16153 16443 16155 -3 16154 16156 16444 -3 16154 16444 16442 -3 16155 16443 16445 -3 16155 16445 16157 -3 16156 16158 16446 -3 16156 16446 16444 -3 16157 16445 16447 -3 16157 16447 16159 -3 16158 16160 16448 -3 16158 16448 16446 -3 16159 16447 16451 -3 16159 16451 16163 -3 16160 16161 16449 -3 16160 16449 16448 -3 16161 16164 16452 -3 16161 16452 16449 -3 16162 16163 16451 -3 16162 16451 16450 -3 16162 16450 16455 -3 16162 16455 16167 -3 16164 16165 16453 -3 16164 16453 16452 -3 16165 16168 16456 -3 16165 16456 16453 -3 16166 16167 16455 -3 16166 16455 16454 -3 16166 16454 16524 -3 16166 16524 16236 -3 16168 16169 16457 -3 16168 16457 16456 -3 16169 16170 16458 -3 16169 16458 16457 -3 16170 16171 16459 -3 16170 16459 16458 -3 16171 16172 16460 -3 16171 16460 16459 -3 16172 16173 16461 -3 16172 16461 16460 -3 16173 16174 16462 -3 16173 16462 16461 -3 16174 16175 16463 -3 16174 16463 16462 -3 16175 16176 16464 -3 16175 16464 16463 -3 16176 16177 16465 -3 16176 16465 16464 -3 16177 16178 16466 -3 16177 16466 16465 -3 16178 16179 16467 -3 16178 16467 16466 -3 16179 16180 16468 -3 16179 16468 16467 -3 16180 16181 16469 -3 16180 16469 16468 -3 16181 16182 16470 -3 16181 16470 16469 -3 16182 16183 16471 -3 16182 16471 16470 -3 16183 16184 16472 -3 16183 16472 16471 -3 16184 16185 16473 -3 16184 16473 16472 -3 16185 16186 16474 -3 16185 16474 16473 -3 16186 16187 16475 -3 16186 16475 16474 -3 16187 16188 16476 -3 16187 16476 16475 -3 16188 16189 16477 -3 16188 16477 16476 -3 16189 16190 16478 -3 16189 16478 16477 -3 16190 16191 16479 -3 16190 16479 16478 -3 16191 16192 16480 -3 16191 16480 16479 -3 16192 16193 16481 -3 16192 16481 16480 -3 16193 16194 16482 -3 16193 16482 16481 -3 16194 16195 16483 -3 16194 16483 16482 -3 16195 16196 16483 -3 16484 16483 16196 -3 16196 16197 16484 -3 16485 16484 16197 -3 16197 16198 16485 -3 16486 16485 16198 -3 16198 16199 16486 -3 16487 16486 16199 -3 16199 16200 16487 -3 16488 16487 16200 -3 16200 16201 16488 -3 16489 16488 16201 -3 16201 16202 16489 -3 16490 16489 16202 -3 16202 16203 16490 -3 16491 16490 16203 -3 16203 16204 16491 -3 16492 16491 16204 -3 16204 16205 16492 -3 16493 16492 16205 -3 16205 16206 16493 -3 16494 16493 16206 -3 16206 16207 16494 -3 16495 16494 16207 -3 16207 16208 16495 -3 16496 16495 16208 -3 16208 16209 16496 -3 16497 16496 16209 -3 16209 16210 16497 -3 16498 16497 16210 -3 16210 16211 16498 -3 16499 16498 16211 -3 16211 16212 16499 -3 16500 16499 16212 -3 16212 16213 16500 -3 16501 16500 16213 -3 16213 16214 16501 -3 16502 16501 16214 -3 16214 16215 16502 -3 16503 16502 16215 -3 16215 16216 16503 -3 16504 16503 16216 -3 16216 16217 16504 -3 16505 16504 16217 -3 16217 16218 16505 -3 16506 16505 16218 -3 16218 16219 16506 -3 16507 16506 16219 -3 16219 16220 16507 -3 16508 16507 16220 -3 16220 16221 16508 -3 16509 16508 16221 -3 16221 16222 16509 -3 16510 16509 16222 -3 16222 16223 16510 -3 16511 16510 16223 -3 16223 16224 16511 -3 16512 16511 16224 -3 16224 16225 16512 -3 16513 16512 16225 -3 16225 16226 16513 -3 16514 16513 16226 -3 16226 16227 16514 -3 16515 16514 16227 -3 16227 16228 16515 -3 16516 16515 16228 -3 16228 16229 16516 -3 16517 16516 16229 -3 16229 16230 16517 -3 16518 16517 16230 -3 16230 16231 16518 -3 16519 16518 16231 -3 16231 16232 16519 -3 16520 16519 16232 -3 16232 16233 16520 -3 16521 16520 16233 -3 16233 16234 16521 -3 16522 16521 16234 -3 16234 16235 16522 -3 16523 16522 16235 -3 16235 16236 16523 -3 16524 16523 16236 -3 16237 16525 16238 -3 16526 16238 16525 -3 16237 16307 16595 -3 16237 16595 16525 -3 16238 16526 16239 -3 16527 16239 16526 -3 16239 16527 16240 -3 16528 16240 16527 -3 16240 16528 16241 -3 16529 16241 16528 -3 16241 16529 16242 -3 16530 16242 16529 -3 16242 16530 16243 -3 16531 16243 16530 -3 16243 16531 16244 -3 16532 16244 16531 -3 16244 16532 16245 -3 16533 16245 16532 -3 16245 16533 16246 -3 16534 16246 16533 -3 16246 16534 16247 -3 16535 16247 16534 -3 16247 16535 16248 -3 16536 16248 16535 -3 16248 16536 16249 -3 16537 16249 16536 -3 16249 16537 16250 -3 16538 16250 16537 -3 16250 16538 16251 -3 16539 16251 16538 -3 16251 16539 16252 -3 16540 16252 16539 -3 16252 16540 16253 -3 16541 16253 16540 -3 16253 16541 16254 -3 16542 16254 16541 -3 16254 16542 16255 -3 16543 16255 16542 -3 16255 16543 16256 -3 16544 16256 16543 -3 16256 16544 16257 -3 16545 16257 16544 -3 16257 16545 16258 -3 16546 16258 16545 -3 16258 16546 16259 -3 16547 16259 16546 -3 16259 16547 16260 -3 16548 16260 16547 -3 16260 16548 16261 -3 16549 16261 16548 -3 16261 16549 16262 -3 16550 16262 16549 -3 16262 16550 16263 -3 16551 16263 16550 -3 16263 16551 16264 -3 16552 16264 16551 -3 16264 16552 16265 -3 16553 16265 16552 -3 16265 16553 16266 -3 16554 16266 16553 -3 16266 16554 16555 -3 16266 16555 16267 -3 16267 16555 16556 -3 16267 16556 16268 -3 16268 16556 16557 -3 16268 16557 16269 -3 16269 16557 16558 -3 16269 16558 16270 -3 16270 16558 16559 -3 16270 16559 16271 -3 16271 16559 16560 -3 16271 16560 16272 -3 16272 16560 16561 -3 16272 16561 16273 -3 16273 16561 16562 -3 16273 16562 16274 -3 16274 16562 16563 -3 16274 16563 16275 -3 16275 16563 16564 -3 16275 16564 16276 -3 16276 16564 16565 -3 16276 16565 16277 -3 16277 16565 16566 -3 16277 16566 16278 -3 16278 16566 16567 -3 16278 16567 16279 -3 16279 16567 16568 -3 16279 16568 16280 -3 16280 16568 16569 -3 16280 16569 16281 -3 16281 16569 16570 -3 16281 16570 16282 -3 16282 16570 16571 -3 16282 16571 16283 -3 16283 16571 16572 -3 16283 16572 16284 -3 16284 16572 16573 -3 16284 16573 16285 -3 16285 16573 16574 -3 16285 16574 16286 -3 16286 16574 16575 -3 16286 16575 16287 -3 16287 16575 16576 -3 16287 16576 16288 -3 16288 16576 16577 -3 16288 16577 16289 -3 16289 16577 16578 -3 16289 16578 16290 -3 16290 16578 16579 -3 16290 16579 16291 -3 16291 16579 16580 -3 16291 16580 16292 -3 16292 16580 16581 -3 16292 16581 16293 -3 16293 16581 16582 -3 16293 16582 16294 -3 16294 16582 16583 -3 16294 16583 16295 -3 16295 16583 16584 -3 16295 16584 16296 -3 16296 16584 16585 -3 16296 16585 16297 -3 16297 16585 16586 -3 16297 16586 16298 -3 16298 16586 16587 -3 16298 16587 16299 -3 16299 16587 16588 -3 16299 16588 16300 -3 16300 16588 16589 -3 16300 16589 16301 -3 16301 16589 16590 -3 16301 16590 16302 -3 16302 16590 16591 -3 16302 16591 16303 -3 16303 16591 16592 -3 16303 16592 16304 -3 16304 16592 16593 -3 16304 16593 16305 -3 16305 16593 16596 -3 16305 16596 16308 -3 16306 16594 16595 -3 16306 16595 16307 -3 16306 16311 16599 -3 16306 16599 16594 -3 16308 16596 16597 -3 16308 16597 16309 -3 16309 16597 16600 -3 16309 16600 16312 -3 16310 16598 16599 -3 16310 16599 16311 -3 16310 16314 16602 -3 16310 16602 16598 -3 16312 16600 16601 -3 16312 16601 16313 -3 16313 16601 16603 -3 16313 16603 16315 -3 16314 16316 16604 -3 16314 16604 16602 -3 16315 16603 16605 -3 16315 16605 16317 -3 16316 16318 16606 -3 16316 16606 16604 -3 16317 16605 16607 -3 16317 16607 16319 -3 16318 16320 16608 -3 16318 16608 16606 -3 16319 16607 16609 -3 16319 16609 16321 -3 16320 16322 16610 -3 16320 16610 16608 -3 16321 16609 16611 -3 16321 16611 16323 -3 16322 16324 16612 -3 16322 16612 16610 -3 16323 16611 16613 -3 16323 16613 16325 -3 16324 16326 16614 -3 16324 16614 16612 -3 16325 16613 16615 -3 16325 16615 16327 -3 16326 16328 16616 -3 16326 16616 16614 -3 16327 16615 16617 -3 16327 16617 16329 -3 16328 16330 16618 -3 16328 16618 16616 -3 16329 16617 16619 -3 16329 16619 16331 -3 16330 16332 16620 -3 16330 16620 16618 -3 16331 16619 16621 -3 16331 16621 16333 -3 16332 16334 16622 -3 16332 16622 16620 -3 16333 16621 16623 -3 16333 16623 16335 -3 16334 16336 16624 -3 16334 16624 16622 -3 16335 16623 16625 -3 16335 16625 16337 -3 16336 16338 16626 -3 16336 16626 16624 -3 16337 16625 16627 -3 16337 16627 16339 -3 16338 16340 16628 -3 16338 16628 16626 -3 16339 16627 16629 -3 16339 16629 16341 -3 16340 16342 16630 -3 16340 16630 16628 -3 16341 16629 16631 -3 16341 16631 16343 -3 16342 16344 16632 -3 16342 16632 16630 -3 16343 16631 16633 -3 16343 16633 16345 -3 16344 16346 16634 -3 16344 16634 16632 -3 16345 16633 16347 -3 16635 16347 16633 -3 16346 16348 16636 -3 16346 16636 16634 -3 16347 16635 16349 -3 16637 16349 16635 -3 16348 16350 16638 -3 16348 16638 16636 -3 16349 16637 16351 -3 16639 16351 16637 -3 16350 16352 16640 -3 16350 16640 16638 -3 16351 16639 16353 -3 16641 16353 16639 -3 16352 16354 16640 -3 16642 16640 16354 -3 16353 16641 16355 -3 16643 16355 16641 -3 16354 16356 16642 -3 16644 16642 16356 -3 16355 16643 16357 -3 16645 16357 16643 -3 16356 16358 16644 -3 16646 16644 16358 -3 16357 16645 16359 -3 16647 16359 16645 -3 16358 16360 16646 -3 16648 16646 16360 -3 16359 16647 16361 -3 16649 16361 16647 -3 16360 16362 16648 -3 16650 16648 16362 -3 16361 16649 16363 -3 16651 16363 16649 -3 16362 16364 16650 -3 16652 16650 16364 -3 16363 16651 16365 -3 16653 16365 16651 -3 16364 16366 16652 -3 16654 16652 16366 -3 16365 16653 16367 -3 16655 16367 16653 -3 16366 16368 16654 -3 16656 16654 16368 -3 16367 16655 16369 -3 16657 16369 16655 -3 16368 16370 16656 -3 16658 16656 16370 -3 16369 16657 16371 -3 16659 16371 16657 -3 16370 16372 16658 -3 16660 16658 16372 -3 16371 16659 16373 -3 16661 16373 16659 -3 16372 16374 16660 -3 16662 16660 16374 -3 16373 16661 16375 -3 16663 16375 16661 -3 16374 16376 16662 -3 16664 16662 16376 -3 16375 16663 16377 -3 16665 16377 16663 -3 16376 16378 16664 -3 16666 16664 16378 -3 16377 16665 16379 -3 16667 16379 16665 -3 16378 16380 16666 -3 16668 16666 16380 -3 16379 16667 16381 -3 16669 16381 16667 -3 16380 16382 16668 -3 16670 16668 16382 -3 16381 16669 16383 -3 16671 16383 16669 -3 16382 16384 16670 -3 16672 16670 16384 -3 16383 16671 16385 -3 16673 16385 16671 -3 16384 16386 16672 -3 16674 16672 16386 -3 16385 16673 16387 -3 16675 16387 16673 -3 16386 16388 16674 -3 16676 16674 16388 -3 16387 16675 16389 -3 16677 16389 16675 -3 16388 16390 16676 -3 16678 16676 16390 -3 16389 16677 16391 -3 16679 16391 16677 -3 16390 16392 16678 -3 16680 16678 16392 -3 16391 16679 16393 -3 16681 16393 16679 -3 16392 16394 16680 -3 16682 16680 16394 -3 16393 16681 16395 -3 16683 16395 16681 -3 16394 16396 16682 -3 16684 16682 16396 -3 16395 16683 16397 -3 16685 16397 16683 -3 16396 16398 16684 -3 16686 16684 16398 -3 16397 16685 16399 -3 16687 16399 16685 -3 16398 16400 16686 -3 16688 16686 16400 -3 16399 16687 16401 -3 16689 16401 16687 -3 16400 16402 16688 -3 16690 16688 16402 -3 16401 16689 16403 -3 16691 16403 16689 -3 16402 16404 16690 -3 16692 16690 16404 -3 16403 16691 16405 -3 16693 16405 16691 -3 16404 16406 16692 -3 16694 16692 16406 -3 16405 16693 16407 -3 16695 16407 16693 -3 16406 16408 16694 -3 16696 16694 16408 -3 16407 16695 16409 -3 16697 16409 16695 -3 16408 16410 16696 -3 16698 16696 16410 -3 16409 16697 16411 -3 16699 16411 16697 -3 16410 16412 16698 -3 16700 16698 16412 -3 16411 16699 16413 -3 16701 16413 16699 -3 16412 16414 16700 -3 16702 16700 16414 -3 16413 16701 16415 -3 16703 16415 16701 -3 16414 16416 16702 -3 16704 16702 16416 -3 16415 16703 16417 -3 16705 16417 16703 -3 16416 16418 16704 -3 16706 16704 16418 -3 16417 16705 16419 -3 16707 16419 16705 -3 16418 16420 16706 -3 16708 16706 16420 -3 16419 16707 16421 -3 16709 16421 16707 -3 16420 16422 16708 -3 16710 16708 16422 -3 16421 16709 16423 -3 16711 16423 16709 -3 16422 16424 16710 -3 16712 16710 16424 -3 16423 16711 16425 -3 16713 16425 16711 -3 16424 16426 16712 -3 16714 16712 16426 -3 16425 16713 16715 -3 16425 16715 16427 -3 16426 16428 16714 -3 16716 16714 16428 -3 16427 16715 16717 -3 16427 16717 16429 -3 16428 16430 16716 -3 16718 16716 16430 -3 16429 16717 16719 -3 16429 16719 16431 -3 16430 16432 16718 -3 16720 16718 16432 -3 16431 16719 16721 -3 16431 16721 16433 -3 16432 16434 16722 -3 16432 16722 16720 -3 16433 16721 16723 -3 16433 16723 16435 -3 16434 16436 16724 -3 16434 16724 16722 -3 16435 16723 16725 -3 16435 16725 16437 -3 16436 16438 16726 -3 16436 16726 16724 -3 16437 16725 16727 -3 16437 16727 16439 -3 16438 16440 16728 -3 16438 16728 16726 -3 16439 16727 16729 -3 16439 16729 16441 -3 16440 16442 16730 -3 16440 16730 16728 -3 16441 16729 16731 -3 16441 16731 16443 -3 16442 16444 16732 -3 16442 16732 16730 -3 16443 16731 16733 -3 16443 16733 16445 -3 16444 16446 16734 -3 16444 16734 16732 -3 16445 16733 16735 -3 16445 16735 16447 -3 16446 16448 16736 -3 16446 16736 16734 -3 16447 16735 16739 -3 16447 16739 16451 -3 16448 16449 16737 -3 16448 16737 16736 -3 16449 16452 16740 -3 16449 16740 16737 -3 16450 16451 16739 -3 16450 16739 16738 -3 16450 16738 16743 -3 16450 16743 16455 -3 16452 16453 16741 -3 16452 16741 16740 -3 16453 16456 16744 -3 16453 16744 16741 -3 16454 16455 16743 -3 16454 16743 16742 -3 16454 16742 16812 -3 16454 16812 16524 -3 16456 16457 16745 -3 16456 16745 16744 -3 16457 16458 16746 -3 16457 16746 16745 -3 16458 16459 16747 -3 16458 16747 16746 -3 16459 16460 16748 -3 16459 16748 16747 -3 16460 16461 16749 -3 16460 16749 16748 -3 16461 16462 16750 -3 16461 16750 16749 -3 16462 16463 16751 -3 16462 16751 16750 -3 16463 16464 16752 -3 16463 16752 16751 -3 16464 16465 16753 -3 16464 16753 16752 -3 16465 16466 16754 -3 16465 16754 16753 -3 16466 16467 16755 -3 16466 16755 16754 -3 16467 16468 16756 -3 16467 16756 16755 -3 16468 16469 16757 -3 16468 16757 16756 -3 16469 16470 16758 -3 16469 16758 16757 -3 16470 16471 16759 -3 16470 16759 16758 -3 16471 16472 16760 -3 16471 16760 16759 -3 16472 16473 16761 -3 16472 16761 16760 -3 16473 16474 16762 -3 16473 16762 16761 -3 16474 16475 16763 -3 16474 16763 16762 -3 16475 16476 16764 -3 16475 16764 16763 -3 16476 16477 16765 -3 16476 16765 16764 -3 16477 16478 16766 -3 16477 16766 16765 -3 16478 16479 16767 -3 16478 16767 16766 -3 16479 16480 16768 -3 16479 16768 16767 -3 16480 16481 16769 -3 16480 16769 16768 -3 16481 16482 16770 -3 16481 16770 16769 -3 16482 16483 16771 -3 16482 16771 16770 -3 16483 16484 16772 -3 16483 16772 16771 -3 16484 16485 16773 -3 16484 16773 16772 -3 16485 16486 16774 -3 16485 16774 16773 -3 16486 16487 16775 -3 16486 16775 16774 -3 16487 16488 16776 -3 16487 16776 16775 -3 16488 16489 16777 -3 16488 16777 16776 -3 16489 16490 16778 -3 16489 16778 16777 -3 16490 16491 16779 -3 16490 16779 16778 -3 16491 16492 16780 -3 16491 16780 16779 -3 16492 16493 16781 -3 16492 16781 16780 -3 16493 16494 16782 -3 16493 16782 16781 -3 16494 16495 16783 -3 16494 16783 16782 -3 16495 16496 16784 -3 16495 16784 16783 -3 16496 16497 16785 -3 16496 16785 16784 -3 16497 16498 16786 -3 16497 16786 16785 -3 16498 16499 16787 -3 16498 16787 16786 -3 16499 16500 16788 -3 16499 16788 16787 -3 16500 16501 16789 -3 16500 16789 16788 -3 16501 16502 16790 -3 16501 16790 16789 -3 16502 16503 16791 -3 16502 16791 16790 -3 16503 16504 16792 -3 16503 16792 16791 -3 16504 16505 16793 -3 16504 16793 16792 -3 16505 16506 16794 -3 16505 16794 16793 -3 16506 16507 16795 -3 16506 16795 16794 -3 16507 16508 16796 -3 16507 16796 16795 -3 16508 16509 16797 -3 16508 16797 16796 -3 16509 16510 16798 -3 16509 16798 16797 -3 16510 16511 16798 -3 16799 16798 16511 -3 16511 16512 16799 -3 16800 16799 16512 -3 16512 16513 16800 -3 16801 16800 16513 -3 16513 16514 16801 -3 16802 16801 16514 -3 16514 16515 16802 -3 16803 16802 16515 -3 16515 16516 16803 -3 16804 16803 16516 -3 16516 16517 16804 -3 16805 16804 16517 -3 16517 16518 16805 -3 16806 16805 16518 -3 16518 16519 16806 -3 16807 16806 16519 -3 16519 16520 16807 -3 16808 16807 16520 -3 16520 16521 16808 -3 16809 16808 16521 -3 16521 16522 16809 -3 16810 16809 16522 -3 16522 16523 16810 -3 16811 16810 16523 -3 16523 16524 16811 -3 16812 16811 16524 -3 16525 16813 16526 -3 16814 16526 16813 -3 16525 16595 16813 -3 16883 16813 16595 -3 16526 16814 16527 -3 16815 16527 16814 -3 16527 16815 16528 -3 16816 16528 16815 -3 16528 16816 16529 -3 16817 16529 16816 -3 16529 16817 16530 -3 16818 16530 16817 -3 16530 16818 16531 -3 16819 16531 16818 -3 16531 16819 16532 -3 16820 16532 16819 -3 16532 16820 16533 -3 16821 16533 16820 -3 16533 16821 16534 -3 16822 16534 16821 -3 16534 16822 16535 -3 16823 16535 16822 -3 16535 16823 16536 -3 16824 16536 16823 -3 16536 16824 16537 -3 16825 16537 16824 -3 16537 16825 16538 -3 16826 16538 16825 -3 16538 16826 16539 -3 16827 16539 16826 -3 16539 16827 16540 -3 16828 16540 16827 -3 16540 16828 16541 -3 16829 16541 16828 -3 16541 16829 16542 -3 16830 16542 16829 -3 16542 16830 16543 -3 16831 16543 16830 -3 16543 16831 16544 -3 16832 16544 16831 -3 16544 16832 16545 -3 16833 16545 16832 -3 16545 16833 16546 -3 16834 16546 16833 -3 16546 16834 16547 -3 16835 16547 16834 -3 16547 16835 16548 -3 16836 16548 16835 -3 16548 16836 16549 -3 16837 16549 16836 -3 16549 16837 16550 -3 16838 16550 16837 -3 16550 16838 16551 -3 16839 16551 16838 -3 16551 16839 16552 -3 16840 16552 16839 -3 16552 16840 16553 -3 16841 16553 16840 -3 16553 16841 16554 -3 16842 16554 16841 -3 16554 16842 16555 -3 16843 16555 16842 -3 16555 16843 16556 -3 16844 16556 16843 -3 16556 16844 16557 -3 16845 16557 16844 -3 16557 16845 16558 -3 16846 16558 16845 -3 16558 16846 16559 -3 16847 16559 16846 -3 16559 16847 16560 -3 16848 16560 16847 -3 16560 16848 16561 -3 16849 16561 16848 -3 16561 16849 16562 -3 16850 16562 16849 -3 16562 16850 16563 -3 16851 16563 16850 -3 16563 16851 16564 -3 16852 16564 16851 -3 16564 16852 16565 -3 16853 16565 16852 -3 16565 16853 16566 -3 16854 16566 16853 -3 16566 16854 16567 -3 16855 16567 16854 -3 16567 16855 16568 -3 16856 16568 16855 -3 16568 16856 16569 -3 16857 16569 16856 -3 16569 16857 16570 -3 16858 16570 16857 -3 16570 16858 16571 -3 16859 16571 16858 -3 16571 16859 16572 -3 16860 16572 16859 -3 16572 16860 16573 -3 16861 16573 16860 -3 16573 16861 16574 -3 16862 16574 16861 -3 16574 16862 16575 -3 16863 16575 16862 -3 16575 16863 16576 -3 16864 16576 16863 -3 16576 16864 16577 -3 16865 16577 16864 -3 16577 16865 16578 -3 16866 16578 16865 -3 16578 16866 16579 -3 16867 16579 16866 -3 16579 16867 16580 -3 16868 16580 16867 -3 16580 16868 16581 -3 16869 16581 16868 -3 16581 16869 16582 -3 16870 16582 16869 -3 16582 16870 16583 -3 16871 16583 16870 -3 16583 16871 16872 -3 16583 16872 16584 -3 16584 16872 16873 -3 16584 16873 16585 -3 16585 16873 16874 -3 16585 16874 16586 -3 16586 16874 16875 -3 16586 16875 16587 -3 16587 16875 16876 -3 16587 16876 16588 -3 16588 16876 16877 -3 16588 16877 16589 -3 16589 16877 16878 -3 16589 16878 16590 -3 16590 16878 16879 -3 16590 16879 16591 -3 16591 16879 16880 -3 16591 16880 16592 -3 16592 16880 16881 -3 16592 16881 16593 -3 16593 16881 16884 -3 16593 16884 16596 -3 16594 16882 16883 -3 16594 16883 16595 -3 16594 16599 16887 -3 16594 16887 16882 -3 16596 16884 16885 -3 16596 16885 16597 -3 16597 16885 16888 -3 16597 16888 16600 -3 16598 16886 16887 -3 16598 16887 16599 -3 16598 16602 16890 -3 16598 16890 16886 -3 16600 16888 16889 -3 16600 16889 16601 -3 16601 16889 16891 -3 16601 16891 16603 -3 16602 16604 16892 -3 16602 16892 16890 -3 16603 16891 16893 -3 16603 16893 16605 -3 16604 16606 16894 -3 16604 16894 16892 -3 16605 16893 16895 -3 16605 16895 16607 -3 16606 16608 16896 -3 16606 16896 16894 -3 16607 16895 16897 -3 16607 16897 16609 -3 16608 16610 16898 -3 16608 16898 16896 -3 16609 16897 16899 -3 16609 16899 16611 -3 16610 16612 16900 -3 16610 16900 16898 -3 16611 16899 16901 -3 16611 16901 16613 -3 16612 16614 16902 -3 16612 16902 16900 -3 16613 16901 16903 -3 16613 16903 16615 -3 16614 16616 16904 -3 16614 16904 16902 -3 16615 16903 16905 -3 16615 16905 16617 -3 16616 16618 16906 -3 16616 16906 16904 -3 16617 16905 16907 -3 16617 16907 16619 -3 16618 16620 16908 -3 16618 16908 16906 -3 16619 16907 16909 -3 16619 16909 16621 -3 16620 16622 16910 -3 16620 16910 16908 -3 16621 16909 16911 -3 16621 16911 16623 -3 16622 16624 16912 -3 16622 16912 16910 -3 16623 16911 16913 -3 16623 16913 16625 -3 16624 16626 16914 -3 16624 16914 16912 -3 16625 16913 16915 -3 16625 16915 16627 -3 16626 16628 16916 -3 16626 16916 16914 -3 16627 16915 16917 -3 16627 16917 16629 -3 16628 16630 16918 -3 16628 16918 16916 -3 16629 16917 16919 -3 16629 16919 16631 -3 16630 16632 16920 -3 16630 16920 16918 -3 16631 16919 16921 -3 16631 16921 16633 -3 16632 16634 16922 -3 16632 16922 16920 -3 16633 16921 16923 -3 16633 16923 16635 -3 16634 16636 16924 -3 16634 16924 16922 -3 16635 16923 16925 -3 16635 16925 16637 -3 16636 16638 16926 -3 16636 16926 16924 -3 16637 16925 16927 -3 16637 16927 16639 -3 16638 16640 16928 -3 16638 16928 16926 -3 16639 16927 16929 -3 16639 16929 16641 -3 16640 16642 16930 -3 16640 16930 16928 -3 16641 16929 16931 -3 16641 16931 16643 -3 16642 16644 16932 -3 16642 16932 16930 -3 16643 16931 16933 -3 16643 16933 16645 -3 16644 16646 16934 -3 16644 16934 16932 -3 16645 16933 16935 -3 16645 16935 16647 -3 16646 16648 16936 -3 16646 16936 16934 -3 16647 16935 16937 -3 16647 16937 16649 -3 16648 16650 16938 -3 16648 16938 16936 -3 16649 16937 16939 -3 16649 16939 16651 -3 16650 16652 16940 -3 16650 16940 16938 -3 16651 16939 16941 -3 16651 16941 16653 -3 16652 16654 16942 -3 16652 16942 16940 -3 16653 16941 16943 -3 16653 16943 16655 -3 16654 16656 16944 -3 16654 16944 16942 -3 16655 16943 16945 -3 16655 16945 16657 -3 16656 16658 16946 -3 16656 16946 16944 -3 16657 16945 16947 -3 16657 16947 16659 -3 16658 16660 16948 -3 16658 16948 16946 -3 16659 16947 16949 -3 16659 16949 16661 -3 16660 16662 16950 -3 16660 16950 16948 -3 16661 16949 16951 -3 16661 16951 16663 -3 16662 16664 16952 -3 16662 16952 16950 -3 16663 16951 16665 -3 16953 16665 16951 -3 16664 16666 16954 -3 16664 16954 16952 -3 16665 16953 16667 -3 16955 16667 16953 -3 16666 16668 16956 -3 16666 16956 16954 -3 16667 16955 16669 -3 16957 16669 16955 -3 16668 16670 16956 -3 16958 16956 16670 -3 16669 16957 16671 -3 16959 16671 16957 -3 16670 16672 16958 -3 16960 16958 16672 -3 16671 16959 16673 -3 16961 16673 16959 -3 16672 16674 16960 -3 16962 16960 16674 -3 16673 16961 16675 -3 16963 16675 16961 -3 16674 16676 16962 -3 16964 16962 16676 -3 16675 16963 16677 -3 16965 16677 16963 -3 16676 16678 16964 -3 16966 16964 16678 -3 16677 16965 16679 -3 16967 16679 16965 -3 16678 16680 16966 -3 16968 16966 16680 -3 16679 16967 16681 -3 16969 16681 16967 -3 16680 16682 16968 -3 16970 16968 16682 -3 16681 16969 16683 -3 16971 16683 16969 -3 16682 16684 16970 -3 16972 16970 16684 -3 16683 16971 16685 -3 16973 16685 16971 -3 16684 16686 16972 -3 16974 16972 16686 -3 16685 16973 16687 -3 16975 16687 16973 -3 16686 16688 16974 -3 16976 16974 16688 -3 16687 16975 16689 -3 16977 16689 16975 -3 16688 16690 16976 -3 16978 16976 16690 -3 16689 16977 16691 -3 16979 16691 16977 -3 16690 16692 16978 -3 16980 16978 16692 -3 16691 16979 16693 -3 16981 16693 16979 -3 16692 16694 16980 -3 16982 16980 16694 -3 16693 16981 16695 -3 16983 16695 16981 -3 16694 16696 16982 -3 16984 16982 16696 -3 16695 16983 16697 -3 16985 16697 16983 -3 16696 16698 16984 -3 16986 16984 16698 -3 16697 16985 16699 -3 16987 16699 16985 -3 16698 16700 16986 -3 16988 16986 16700 -3 16699 16987 16701 -3 16989 16701 16987 -3 16700 16702 16988 -3 16990 16988 16702 -3 16701 16989 16703 -3 16991 16703 16989 -3 16702 16704 16990 -3 16992 16990 16704 -3 16703 16991 16705 -3 16993 16705 16991 -3 16704 16706 16992 -3 16994 16992 16706 -3 16705 16993 16707 -3 16995 16707 16993 -3 16706 16708 16994 -3 16996 16994 16708 -3 16707 16995 16709 -3 16997 16709 16995 -3 16708 16710 16996 -3 16998 16996 16710 -3 16709 16997 16711 -3 16999 16711 16997 -3 16710 16712 16998 -3 17000 16998 16712 -3 16711 16999 16713 -3 17001 16713 16999 -3 16712 16714 17000 -3 17002 17000 16714 -3 16713 17001 16715 -3 17003 16715 17001 -3 16714 16716 17002 -3 17004 17002 16716 -3 16715 17003 16717 -3 17005 16717 17003 -3 16716 16718 17004 -3 17006 17004 16718 -3 16717 17005 16719 -3 17007 16719 17005 -3 16718 16720 17006 -3 17008 17006 16720 -3 16719 17007 16721 -3 17009 16721 17007 -3 16720 16722 17008 -3 17010 17008 16722 -3 16721 17009 16723 -3 17011 16723 17009 -3 16722 16724 17010 -3 17012 17010 16724 -3 16723 17011 16725 -3 17013 16725 17011 -3 16724 16726 17012 -3 17014 17012 16726 -3 16725 17013 16727 -3 17015 16727 17013 -3 16726 16728 17014 -3 17016 17014 16728 -3 16727 17015 16729 -3 17017 16729 17015 -3 16728 16730 17016 -3 17018 17016 16730 -3 16729 17017 16731 -3 17019 16731 17017 -3 16730 16732 17018 -3 17020 17018 16732 -3 16731 17019 16733 -3 17021 16733 17019 -3 16732 16734 17020 -3 17022 17020 16734 -3 16733 17021 16735 -3 17023 16735 17021 -3 16734 16736 17022 -3 17024 17022 16736 -3 16735 17023 16739 -3 17027 16739 17023 -3 16736 16737 17024 -3 17025 17024 16737 -3 16737 16740 17025 -3 17028 17025 16740 -3 16738 16739 17026 -3 17027 17026 16739 -3 16738 17026 16743 -3 17031 16743 17026 -3 16740 16741 17028 -3 17029 17028 16741 -3 16741 16744 17029 -3 17032 17029 16744 -3 16742 16743 17030 -3 17031 17030 16743 -3 16742 17030 16812 -3 17100 16812 17030 -3 16744 16745 17032 -3 17033 17032 16745 -3 16745 16746 17033 -3 17034 17033 16746 -3 16746 16747 17034 -3 17035 17034 16747 -3 16747 16748 17035 -3 17036 17035 16748 -3 16748 16749 17037 -3 16748 17037 17036 -3 16749 16750 17038 -3 16749 17038 17037 -3 16750 16751 17039 -3 16750 17039 17038 -3 16751 16752 17040 -3 16751 17040 17039 -3 16752 16753 17041 -3 16752 17041 17040 -3 16753 16754 17042 -3 16753 17042 17041 -3 16754 16755 17043 -3 16754 17043 17042 -3 16755 16756 17044 -3 16755 17044 17043 -3 16756 16757 17045 -3 16756 17045 17044 -3 16757 16758 17046 -3 16757 17046 17045 -3 16758 16759 17047 -3 16758 17047 17046 -3 16759 16760 17048 -3 16759 17048 17047 -3 16760 16761 17049 -3 16760 17049 17048 -3 16761 16762 17050 -3 16761 17050 17049 -3 16762 16763 17051 -3 16762 17051 17050 -3 16763 16764 17052 -3 16763 17052 17051 -3 16764 16765 17053 -3 16764 17053 17052 -3 16765 16766 17054 -3 16765 17054 17053 -3 16766 16767 17055 -3 16766 17055 17054 -3 16767 16768 17056 -3 16767 17056 17055 -3 16768 16769 17057 -3 16768 17057 17056 -3 16769 16770 17058 -3 16769 17058 17057 -3 16770 16771 17059 -3 16770 17059 17058 -3 16771 16772 17060 -3 16771 17060 17059 -3 16772 16773 17061 -3 16772 17061 17060 -3 16773 16774 17062 -3 16773 17062 17061 -3 16774 16775 17063 -3 16774 17063 17062 -3 16775 16776 17064 -3 16775 17064 17063 -3 16776 16777 17065 -3 16776 17065 17064 -3 16777 16778 17066 -3 16777 17066 17065 -3 16778 16779 17067 -3 16778 17067 17066 -3 16779 16780 17068 -3 16779 17068 17067 -3 16780 16781 17069 -3 16780 17069 17068 -3 16781 16782 17070 -3 16781 17070 17069 -3 16782 16783 17071 -3 16782 17071 17070 -3 16783 16784 17072 -3 16783 17072 17071 -3 16784 16785 17073 -3 16784 17073 17072 -3 16785 16786 17074 -3 16785 17074 17073 -3 16786 16787 17075 -3 16786 17075 17074 -3 16787 16788 17076 -3 16787 17076 17075 -3 16788 16789 17077 -3 16788 17077 17076 -3 16789 16790 17078 -3 16789 17078 17077 -3 16790 16791 17079 -3 16790 17079 17078 -3 16791 16792 17080 -3 16791 17080 17079 -3 16792 16793 17081 -3 16792 17081 17080 -3 16793 16794 17082 -3 16793 17082 17081 -3 16794 16795 17083 -3 16794 17083 17082 -3 16795 16796 17084 -3 16795 17084 17083 -3 16796 16797 17085 -3 16796 17085 17084 -3 16797 16798 17086 -3 16797 17086 17085 -3 16798 16799 17087 -3 16798 17087 17086 -3 16799 16800 17088 -3 16799 17088 17087 -3 16800 16801 17089 -3 16800 17089 17088 -3 16801 16802 17090 -3 16801 17090 17089 -3 16802 16803 17091 -3 16802 17091 17090 -3 16803 16804 17092 -3 16803 17092 17091 -3 16804 16805 17093 -3 16804 17093 17092 -3 16805 16806 17094 -3 16805 17094 17093 -3 16806 16807 17095 -3 16806 17095 17094 -3 16807 16808 17096 -3 16807 17096 17095 -3 16808 16809 17097 -3 16808 17097 17096 -3 16809 16810 17098 -3 16809 17098 17097 -3 16810 16811 17099 -3 16810 17099 17098 -3 16811 16812 17100 -3 16811 17100 17099 -3 16813 17101 17102 -3 16813 17102 16814 -3 16813 16883 17101 -3 17171 17101 16883 -3 16814 17102 17103 -3 16814 17103 16815 -3 16815 17103 17104 -3 16815 17104 16816 -3 16816 17104 17105 -3 16816 17105 16817 -3 16817 17105 17106 -3 16817 17106 16818 -3 16818 17106 17107 -3 16818 17107 16819 -3 16819 17107 17108 -3 16819 17108 16820 -3 16820 17108 17109 -3 16820 17109 16821 -3 16821 17109 17110 -3 16821 17110 16822 -3 16822 17110 17111 -3 16822 17111 16823 -3 16823 17111 16824 -3 17112 16824 17111 -3 16824 17112 16825 -3 17113 16825 17112 -3 16825 17113 16826 -3 17114 16826 17113 -3 16826 17114 16827 -3 17115 16827 17114 -3 16827 17115 16828 -3 17116 16828 17115 -3 16828 17116 16829 -3 17117 16829 17116 -3 16829 17117 16830 -3 17118 16830 17117 -3 16830 17118 16831 -3 17119 16831 17118 -3 16831 17119 16832 -3 17120 16832 17119 -3 16832 17120 16833 -3 17121 16833 17120 -3 16833 17121 16834 -3 17122 16834 17121 -3 16834 17122 16835 -3 17123 16835 17122 -3 16835 17123 16836 -3 17124 16836 17123 -3 16836 17124 16837 -3 17125 16837 17124 -3 16837 17125 16838 -3 17126 16838 17125 -3 16838 17126 16839 -3 17127 16839 17126 -3 16839 17127 16840 -3 17128 16840 17127 -3 16840 17128 16841 -3 17129 16841 17128 -3 16841 17129 16842 -3 17130 16842 17129 -3 16842 17130 16843 -3 17131 16843 17130 -3 16843 17131 16844 -3 17132 16844 17131 -3 16844 17132 16845 -3 17133 16845 17132 -3 16845 17133 16846 -3 17134 16846 17133 -3 16846 17134 16847 -3 17135 16847 17134 -3 16847 17135 16848 -3 17136 16848 17135 -3 16848 17136 16849 -3 17137 16849 17136 -3 16849 17137 16850 -3 17138 16850 17137 -3 16850 17138 16851 -3 17139 16851 17138 -3 16851 17139 16852 -3 17140 16852 17139 -3 16852 17140 16853 -3 17141 16853 17140 -3 16853 17141 16854 -3 17142 16854 17141 -3 16854 17142 16855 -3 17143 16855 17142 -3 16855 17143 16856 -3 17144 16856 17143 -3 16856 17144 16857 -3 17145 16857 17144 -3 16857 17145 16858 -3 17146 16858 17145 -3 16858 17146 16859 -3 17147 16859 17146 -3 16859 17147 16860 -3 17148 16860 17147 -3 16860 17148 16861 -3 17149 16861 17148 -3 16861 17149 16862 -3 17150 16862 17149 -3 16862 17150 16863 -3 17151 16863 17150 -3 16863 17151 16864 -3 17152 16864 17151 -3 16864 17152 16865 -3 17153 16865 17152 -3 16865 17153 16866 -3 17154 16866 17153 -3 16866 17154 16867 -3 17155 16867 17154 -3 16867 17155 16868 -3 17156 16868 17155 -3 16868 17156 16869 -3 17157 16869 17156 -3 16869 17157 16870 -3 17158 16870 17157 -3 16870 17158 16871 -3 17159 16871 17158 -3 16871 17159 16872 -3 17160 16872 17159 -3 16872 17160 16873 -3 17161 16873 17160 -3 16873 17161 16874 -3 17162 16874 17161 -3 16874 17162 16875 -3 17163 16875 17162 -3 16875 17163 16876 -3 17164 16876 17163 -3 16876 17164 16877 -3 17165 16877 17164 -3 16877 17165 16878 -3 17166 16878 17165 -3 16878 17166 16879 -3 17167 16879 17166 -3 16879 17167 16880 -3 17168 16880 17167 -3 16880 17168 16881 -3 17169 16881 17168 -3 16881 17169 16884 -3 17172 16884 17169 -3 16882 17170 16883 -3 17171 16883 17170 -3 16882 16887 17170 -3 17175 17170 16887 -3 16884 17172 16885 -3 17173 16885 17172 -3 16885 17173 16888 -3 17176 16888 17173 -3 16886 17174 16887 -3 17175 16887 17174 -3 16886 16890 17174 -3 17178 17174 16890 -3 16888 17176 16889 -3 17177 16889 17176 -3 16889 17177 16891 -3 17179 16891 17177 -3 16890 16892 17178 -3 17180 17178 16892 -3 16891 17179 16893 -3 17181 16893 17179 -3 16892 16894 17180 -3 17182 17180 16894 -3 16893 17181 16895 -3 17183 16895 17181 -3 16894 16896 17182 -3 17184 17182 16896 -3 16895 17183 16897 -3 17185 16897 17183 -3 16896 16898 17184 -3 17186 17184 16898 -3 16897 17185 16899 -3 17187 16899 17185 -3 16898 16900 17186 -3 17188 17186 16900 -3 16899 17187 16901 -3 17189 16901 17187 -3 16900 16902 17188 -3 17190 17188 16902 -3 16901 17189 16903 -3 17191 16903 17189 -3 16902 16904 17190 -3 17192 17190 16904 -3 16903 17191 17193 -3 16903 17193 16905 -3 16904 16906 17192 -3 17194 17192 16906 -3 16905 17193 17195 -3 16905 17195 16907 -3 16906 16908 17194 -3 17196 17194 16908 -3 16907 17195 17197 -3 16907 17197 16909 -3 16908 16910 17198 -3 16908 17198 17196 -3 16909 17197 17199 -3 16909 17199 16911 -3 16910 16912 17200 -3 16910 17200 17198 -3 16911 17199 17201 -3 16911 17201 16913 -3 16912 16914 17202 -3 16912 17202 17200 -3 16913 17201 17203 -3 16913 17203 16915 -3 16914 16916 17204 -3 16914 17204 17202 -3 16915 17203 17205 -3 16915 17205 16917 -3 16916 16918 17206 -3 16916 17206 17204 -3 16917 17205 17207 -3 16917 17207 16919 -3 16918 16920 17208 -3 16918 17208 17206 -3 16919 17207 17209 -3 16919 17209 16921 -3 16920 16922 17210 -3 16920 17210 17208 -3 16921 17209 17211 -3 16921 17211 16923 -3 16922 16924 17212 -3 16922 17212 17210 -3 16923 17211 17213 -3 16923 17213 16925 -3 16924 16926 17214 -3 16924 17214 17212 -3 16925 17213 17215 -3 16925 17215 16927 -3 16926 16928 17216 -3 16926 17216 17214 -3 16927 17215 17217 -3 16927 17217 16929 -3 16928 16930 17218 -3 16928 17218 17216 -3 16929 17217 17219 -3 16929 17219 16931 -3 16930 16932 17220 -3 16930 17220 17218 -3 16931 17219 17221 -3 16931 17221 16933 -3 16932 16934 17222 -3 16932 17222 17220 -3 16933 17221 17223 -3 16933 17223 16935 -3 16934 16936 17224 -3 16934 17224 17222 -3 16935 17223 17225 -3 16935 17225 16937 -3 16936 16938 17226 -3 16936 17226 17224 -3 16937 17225 17227 -3 16937 17227 16939 -3 16938 16940 17228 -3 16938 17228 17226 -3 16939 17227 17229 -3 16939 17229 16941 -3 16940 16942 17230 -3 16940 17230 17228 -3 16941 17229 17231 -3 16941 17231 16943 -3 16942 16944 17232 -3 16942 17232 17230 -3 16943 17231 17233 -3 16943 17233 16945 -3 16944 16946 17234 -3 16944 17234 17232 -3 16945 17233 17235 -3 16945 17235 16947 -3 16946 16948 17236 -3 16946 17236 17234 -3 16947 17235 17237 -3 16947 17237 16949 -3 16948 16950 17238 -3 16948 17238 17236 -3 16949 17237 17239 -3 16949 17239 16951 -3 16950 16952 17240 -3 16950 17240 17238 -3 16951 17239 17241 -3 16951 17241 16953 -3 16952 16954 17242 -3 16952 17242 17240 -3 16953 17241 17243 -3 16953 17243 16955 -3 16954 16956 17244 -3 16954 17244 17242 -3 16955 17243 17245 -3 16955 17245 16957 -3 16956 16958 17246 -3 16956 17246 17244 -3 16957 17245 17247 -3 16957 17247 16959 -3 16958 16960 17248 -3 16958 17248 17246 -3 16959 17247 17249 -3 16959 17249 16961 -3 16960 16962 17250 -3 16960 17250 17248 -3 16961 17249 17251 -3 16961 17251 16963 -3 16962 16964 17252 -3 16962 17252 17250 -3 16963 17251 17253 -3 16963 17253 16965 -3 16964 16966 17254 -3 16964 17254 17252 -3 16965 17253 17255 -3 16965 17255 16967 -3 16966 16968 17256 -3 16966 17256 17254 -3 16967 17255 17257 -3 16967 17257 16969 -3 16968 16970 17258 -3 16968 17258 17256 -3 16969 17257 17259 -3 16969 17259 16971 -3 16970 16972 17260 -3 16970 17260 17258 -3 16971 17259 17261 -3 16971 17261 16973 -3 16972 16974 17262 -3 16972 17262 17260 -3 16973 17261 17263 -3 16973 17263 16975 -3 16974 16976 17264 -3 16974 17264 17262 -3 16975 17263 17265 -3 16975 17265 16977 -3 16976 16978 17266 -3 16976 17266 17264 -3 16977 17265 17267 -3 16977 17267 16979 -3 16978 16980 17268 -3 16978 17268 17266 -3 16979 17267 17269 -3 16979 17269 16981 -3 16980 16982 17270 -3 16980 17270 17268 -3 16981 17269 17271 -3 16981 17271 16983 -3 16982 16984 17272 -3 16982 17272 17270 -3 16983 17271 17273 -3 16983 17273 16985 -3 16984 16986 17274 -3 16984 17274 17272 -3 16985 17273 16987 -3 17275 16987 17273 -3 16986 16988 17276 -3 16986 17276 17274 -3 16987 17275 16989 -3 17277 16989 17275 -3 16988 16990 17276 -3 17278 17276 16990 -3 16989 17277 16991 -3 17279 16991 17277 -3 16990 16992 17278 -3 17280 17278 16992 -3 16991 17279 16993 -3 17281 16993 17279 -3 16992 16994 17280 -3 17282 17280 16994 -3 16993 17281 16995 -3 17283 16995 17281 -3 16994 16996 17282 -3 17284 17282 16996 -3 16995 17283 16997 -3 17285 16997 17283 -3 16996 16998 17284 -3 17286 17284 16998 -3 16997 17285 16999 -3 17287 16999 17285 -3 16998 17000 17286 -3 17288 17286 17000 -3 16999 17287 17001 -3 17289 17001 17287 -3 17000 17002 17288 -3 17290 17288 17002 -3 17001 17289 17003 -3 17291 17003 17289 -3 17002 17004 17290 -3 17292 17290 17004 -3 17003 17291 17005 -3 17293 17005 17291 -3 17004 17006 17292 -3 17294 17292 17006 -3 17005 17293 17007 -3 17295 17007 17293 -3 17006 17008 17294 -3 17296 17294 17008 -3 17007 17295 17009 -3 17297 17009 17295 -3 17008 17010 17296 -3 17298 17296 17010 -3 17009 17297 17011 -3 17299 17011 17297 -3 17010 17012 17298 -3 17300 17298 17012 -3 17011 17299 17013 -3 17301 17013 17299 -3 17012 17014 17300 -3 17302 17300 17014 -3 17013 17301 17015 -3 17303 17015 17301 -3 17014 17016 17302 -3 17304 17302 17016 -3 17015 17303 17017 -3 17305 17017 17303 -3 17016 17018 17304 -3 17306 17304 17018 -3 17017 17305 17019 -3 17307 17019 17305 -3 17018 17020 17306 -3 17308 17306 17020 -3 17019 17307 17021 -3 17309 17021 17307 -3 17020 17022 17308 -3 17310 17308 17022 -3 17021 17309 17023 -3 17311 17023 17309 -3 17022 17024 17310 -3 17312 17310 17024 -3 17023 17311 17027 -3 17315 17027 17311 -3 17024 17025 17312 -3 17313 17312 17025 -3 17025 17028 17313 -3 17316 17313 17028 -3 17026 17027 17314 -3 17315 17314 17027 -3 17026 17314 17031 -3 17319 17031 17314 -3 17028 17029 17316 -3 17317 17316 17029 -3 17029 17032 17317 -3 17320 17317 17032 -3 17030 17031 17318 -3 17319 17318 17031 -3 17030 17318 17100 -3 17388 17100 17318 -3 17032 17033 17320 -3 17321 17320 17033 -3 17033 17034 17321 -3 17322 17321 17034 -3 17034 17035 17322 -3 17323 17322 17035 -3 17035 17036 17323 -3 17324 17323 17036 -3 17036 17037 17324 -3 17325 17324 17037 -3 17037 17038 17325 -3 17326 17325 17038 -3 17038 17039 17326 -3 17327 17326 17039 -3 17039 17040 17327 -3 17328 17327 17040 -3 17040 17041 17328 -3 17329 17328 17041 -3 17041 17042 17329 -3 17330 17329 17042 -3 17042 17043 17330 -3 17331 17330 17043 -3 17043 17044 17331 -3 17332 17331 17044 -3 17044 17045 17332 -3 17333 17332 17045 -3 17045 17046 17333 -3 17334 17333 17046 -3 17046 17047 17334 -3 17335 17334 17047 -3 17047 17048 17335 -3 17336 17335 17048 -3 17048 17049 17336 -3 17337 17336 17049 -3 17049 17050 17337 -3 17338 17337 17050 -3 17050 17051 17338 -3 17339 17338 17051 -3 17051 17052 17339 -3 17340 17339 17052 -3 17052 17053 17340 -3 17341 17340 17053 -3 17053 17054 17341 -3 17342 17341 17054 -3 17054 17055 17342 -3 17343 17342 17055 -3 17055 17056 17343 -3 17344 17343 17056 -3 17056 17057 17344 -3 17345 17344 17057 -3 17057 17058 17345 -3 17346 17345 17058 -3 17058 17059 17346 -3 17347 17346 17059 -3 17059 17060 17347 -3 17348 17347 17060 -3 17060 17061 17348 -3 17349 17348 17061 -3 17061 17062 17349 -3 17350 17349 17062 -3 17062 17063 17350 -3 17351 17350 17063 -3 17063 17064 17351 -3 17352 17351 17064 -3 17064 17065 17352 -3 17353 17352 17065 -3 17065 17066 17353 -3 17354 17353 17066 -3 17066 17067 17354 -3 17355 17354 17067 -3 17067 17068 17355 -3 17356 17355 17068 -3 17068 17069 17357 -3 17068 17357 17356 -3 17069 17070 17358 -3 17069 17358 17357 -3 17070 17071 17359 -3 17070 17359 17358 -3 17071 17072 17360 -3 17071 17360 17359 -3 17072 17073 17361 -3 17072 17361 17360 -3 17073 17074 17362 -3 17073 17362 17361 -3 17074 17075 17363 -3 17074 17363 17362 -3 17075 17076 17364 -3 17075 17364 17363 -3 17076 17077 17365 -3 17076 17365 17364 -3 17077 17078 17366 -3 17077 17366 17365 -3 17078 17079 17367 -3 17078 17367 17366 -3 17079 17080 17368 -3 17079 17368 17367 -3 17080 17081 17369 -3 17080 17369 17368 -3 17081 17082 17370 -3 17081 17370 17369 -3 17082 17083 17371 -3 17082 17371 17370 -3 17083 17084 17372 -3 17083 17372 17371 -3 17084 17085 17373 -3 17084 17373 17372 -3 17085 17086 17374 -3 17085 17374 17373 -3 17086 17087 17375 -3 17086 17375 17374 -3 17087 17088 17376 -3 17087 17376 17375 -3 17088 17089 17377 -3 17088 17377 17376 -3 17089 17090 17378 -3 17089 17378 17377 -3 17090 17091 17379 -3 17090 17379 17378 -3 17091 17092 17380 -3 17091 17380 17379 -3 17092 17093 17381 -3 17092 17381 17380 -3 17093 17094 17382 -3 17093 17382 17381 -3 17094 17095 17383 -3 17094 17383 17382 -3 17095 17096 17384 -3 17095 17384 17383 -3 17096 17097 17385 -3 17096 17385 17384 -3 17097 17098 17386 -3 17097 17386 17385 -3 17098 17099 17387 -3 17098 17387 17386 -3 17099 17100 17388 -3 17099 17388 17387 -3 17101 17389 17390 -3 17101 17390 17102 -3 17101 17171 17459 -3 17101 17459 17389 -3 17102 17390 17391 -3 17102 17391 17103 -3 17103 17391 17392 -3 17103 17392 17104 -3 17104 17392 17393 -3 17104 17393 17105 -3 17105 17393 17394 -3 17105 17394 17106 -3 17106 17394 17395 -3 17106 17395 17107 -3 17107 17395 17396 -3 17107 17396 17108 -3 17108 17396 17397 -3 17108 17397 17109 -3 17109 17397 17398 -3 17109 17398 17110 -3 17110 17398 17399 -3 17110 17399 17111 -3 17111 17399 17400 -3 17111 17400 17112 -3 17112 17400 17401 -3 17112 17401 17113 -3 17113 17401 17402 -3 17113 17402 17114 -3 17114 17402 17403 -3 17114 17403 17115 -3 17115 17403 17404 -3 17115 17404 17116 -3 17116 17404 17405 -3 17116 17405 17117 -3 17117 17405 17406 -3 17117 17406 17118 -3 17118 17406 17407 -3 17118 17407 17119 -3 17119 17407 17408 -3 17119 17408 17120 -3 17120 17408 17409 -3 17120 17409 17121 -3 17121 17409 17410 -3 17121 17410 17122 -3 17122 17410 17411 -3 17122 17411 17123 -3 17123 17411 17412 -3 17123 17412 17124 -3 17124 17412 17413 -3 17124 17413 17125 -3 17125 17413 17414 -3 17125 17414 17126 -3 17126 17414 17415 -3 17126 17415 17127 -3 17127 17415 17416 -3 17127 17416 17128 -3 17128 17416 17417 -3 17128 17417 17129 -3 17129 17417 17418 -3 17129 17418 17130 -3 17130 17418 17419 -3 17130 17419 17131 -3 17131 17419 17420 -3 17131 17420 17132 -3 17132 17420 17421 -3 17132 17421 17133 -3 17133 17421 17422 -3 17133 17422 17134 -3 17134 17422 17423 -3 17134 17423 17135 -3 17135 17423 17424 -3 17135 17424 17136 -3 17136 17424 17425 -3 17136 17425 17137 -3 17137 17425 17426 -3 17137 17426 17138 -3 17138 17426 17427 -3 17138 17427 17139 -3 17139 17427 17428 -3 17139 17428 17140 -3 17140 17428 17429 -3 17140 17429 17141 -3 17141 17429 17430 -3 17141 17430 17142 -3 17142 17430 17431 -3 17142 17431 17143 -3 17143 17431 17432 -3 17143 17432 17144 -3 17144 17432 17433 -3 17144 17433 17145 -3 17145 17433 17434 -3 17145 17434 17146 -3 17146 17434 17147 -3 17435 17147 17434 -3 17147 17435 17148 -3 17436 17148 17435 -3 17148 17436 17149 -3 17437 17149 17436 -3 17149 17437 17150 -3 17438 17150 17437 -3 17150 17438 17151 -3 17439 17151 17438 -3 17151 17439 17152 -3 17440 17152 17439 -3 17152 17440 17153 -3 17441 17153 17440 -3 17153 17441 17154 -3 17442 17154 17441 -3 17154 17442 17155 -3 17443 17155 17442 -3 17155 17443 17156 -3 17444 17156 17443 -3 17156 17444 17157 -3 17445 17157 17444 -3 17157 17445 17158 -3 17446 17158 17445 -3 17158 17446 17159 -3 17447 17159 17446 -3 17159 17447 17160 -3 17448 17160 17447 -3 17160 17448 17161 -3 17449 17161 17448 -3 17161 17449 17162 -3 17450 17162 17449 -3 17162 17450 17163 -3 17451 17163 17450 -3 17163 17451 17164 -3 17452 17164 17451 -3 17164 17452 17165 -3 17453 17165 17452 -3 17165 17453 17166 -3 17454 17166 17453 -3 17166 17454 17167 -3 17455 17167 17454 -3 17167 17455 17168 -3 17456 17168 17455 -3 17168 17456 17169 -3 17457 17169 17456 -3 17169 17457 17172 -3 17460 17172 17457 -3 17170 17458 17171 -3 17459 17171 17458 -3 17170 17175 17458 -3 17463 17458 17175 -3 17172 17460 17173 -3 17461 17173 17460 -3 17173 17461 17176 -3 17464 17176 17461 -3 17174 17462 17175 -3 17463 17175 17462 -3 17174 17178 17462 -3 17466 17462 17178 -3 17176 17464 17177 -3 17465 17177 17464 -3 17177 17465 17179 -3 17467 17179 17465 -3 17178 17180 17466 -3 17468 17466 17180 -3 17179 17467 17181 -3 17469 17181 17467 -3 17180 17182 17468 -3 17470 17468 17182 -3 17181 17469 17183 -3 17471 17183 17469 -3 17182 17184 17470 -3 17472 17470 17184 -3 17183 17471 17185 -3 17473 17185 17471 -3 17184 17186 17472 -3 17474 17472 17186 -3 17185 17473 17187 -3 17475 17187 17473 -3 17186 17188 17474 -3 17476 17474 17188 -3 17187 17475 17189 -3 17477 17189 17475 -3 17188 17190 17476 -3 17478 17476 17190 -3 17189 17477 17191 -3 17479 17191 17477 -3 17190 17192 17478 -3 17480 17478 17192 -3 17191 17479 17193 -3 17481 17193 17479 -3 17192 17194 17480 -3 17482 17480 17194 -3 17193 17481 17195 -3 17483 17195 17481 -3 17194 17196 17482 -3 17484 17482 17196 -3 17195 17483 17197 -3 17485 17197 17483 -3 17196 17198 17484 -3 17486 17484 17198 -3 17197 17485 17199 -3 17487 17199 17485 -3 17198 17200 17486 -3 17488 17486 17200 -3 17199 17487 17201 -3 17489 17201 17487 -3 17200 17202 17488 -3 17490 17488 17202 -3 17201 17489 17203 -3 17491 17203 17489 -3 17202 17204 17490 -3 17492 17490 17204 -3 17203 17491 17205 -3 17493 17205 17491 -3 17204 17206 17492 -3 17494 17492 17206 -3 17205 17493 17207 -3 17495 17207 17493 -3 17206 17208 17494 -3 17496 17494 17208 -3 17207 17495 17209 -3 17497 17209 17495 -3 17208 17210 17496 -3 17498 17496 17210 -3 17209 17497 17211 -3 17499 17211 17497 -3 17210 17212 17498 -3 17500 17498 17212 -3 17211 17499 17213 -3 17501 17213 17499 -3 17212 17214 17500 -3 17502 17500 17214 -3 17213 17501 17215 -3 17503 17215 17501 -3 17214 17216 17502 -3 17504 17502 17216 -3 17215 17503 17217 -3 17505 17217 17503 -3 17216 17218 17504 -3 17506 17504 17218 -3 17217 17505 17219 -3 17507 17219 17505 -3 17218 17220 17506 -3 17508 17506 17220 -3 17219 17507 17221 -3 17509 17221 17507 -3 17220 17222 17508 -3 17510 17508 17222 -3 17221 17509 17223 -3 17511 17223 17509 -3 17222 17224 17510 -3 17512 17510 17224 -3 17223 17511 17225 -3 17513 17225 17511 -3 17224 17226 17512 -3 17514 17512 17226 -3 17225 17513 17227 -3 17515 17227 17513 -3 17226 17228 17514 -3 17516 17514 17228 -3 17227 17515 17517 -3 17227 17517 17229 -3 17228 17230 17516 -3 17518 17516 17230 -3 17229 17517 17519 -3 17229 17519 17231 -3 17230 17232 17520 -3 17230 17520 17518 -3 17231 17519 17521 -3 17231 17521 17233 -3 17232 17234 17522 -3 17232 17522 17520 -3 17233 17521 17523 -3 17233 17523 17235 -3 17234 17236 17524 -3 17234 17524 17522 -3 17235 17523 17525 -3 17235 17525 17237 -3 17236 17238 17526 -3 17236 17526 17524 -3 17237 17525 17527 -3 17237 17527 17239 -3 17238 17240 17528 -3 17238 17528 17526 -3 17239 17527 17529 -3 17239 17529 17241 -3 17240 17242 17530 -3 17240 17530 17528 -3 17241 17529 17531 -3 17241 17531 17243 -3 17242 17244 17532 -3 17242 17532 17530 -3 17243 17531 17533 -3 17243 17533 17245 -3 17244 17246 17534 -3 17244 17534 17532 -3 17245 17533 17535 -3 17245 17535 17247 -3 17246 17248 17536 -3 17246 17536 17534 -3 17247 17535 17537 -3 17247 17537 17249 -3 17248 17250 17538 -3 17248 17538 17536 -3 17249 17537 17539 -3 17249 17539 17251 -3 17250 17252 17540 -3 17250 17540 17538 -3 17251 17539 17541 -3 17251 17541 17253 -3 17252 17254 17542 -3 17252 17542 17540 -3 17253 17541 17543 -3 17253 17543 17255 -3 17254 17256 17544 -3 17254 17544 17542 -3 17255 17543 17545 -3 17255 17545 17257 -3 17256 17258 17546 -3 17256 17546 17544 -3 17257 17545 17547 -3 17257 17547 17259 -3 17258 17260 17548 -3 17258 17548 17546 -3 17259 17547 17549 -3 17259 17549 17261 -3 17260 17262 17550 -3 17260 17550 17548 -3 17261 17549 17551 -3 17261 17551 17263 -3 17262 17264 17552 -3 17262 17552 17550 -3 17263 17551 17553 -3 17263 17553 17265 -3 17264 17266 17554 -3 17264 17554 17552 -3 17265 17553 17555 -3 17265 17555 17267 -3 17266 17268 17556 -3 17266 17556 17554 -3 17267 17555 17557 -3 17267 17557 17269 -3 17268 17270 17558 -3 17268 17558 17556 -3 17269 17557 17559 -3 17269 17559 17271 -3 17270 17272 17560 -3 17270 17560 17558 -3 17271 17559 17561 -3 17271 17561 17273 -3 17272 17274 17562 -3 17272 17562 17560 -3 17273 17561 17563 -3 17273 17563 17275 -3 17274 17276 17564 -3 17274 17564 17562 -3 17275 17563 17565 -3 17275 17565 17277 -3 17276 17278 17566 -3 17276 17566 17564 -3 17277 17565 17567 -3 17277 17567 17279 -3 17278 17280 17568 -3 17278 17568 17566 -3 17279 17567 17569 -3 17279 17569 17281 -3 17280 17282 17570 -3 17280 17570 17568 -3 17281 17569 17571 -3 17281 17571 17283 -3 17282 17284 17572 -3 17282 17572 17570 -3 17283 17571 17573 -3 17283 17573 17285 -3 17284 17286 17574 -3 17284 17574 17572 -3 17285 17573 17575 -3 17285 17575 17287 -3 17286 17288 17576 -3 17286 17576 17574 -3 17287 17575 17577 -3 17287 17577 17289 -3 17288 17290 17578 -3 17288 17578 17576 -3 17289 17577 17579 -3 17289 17579 17291 -3 17290 17292 17580 -3 17290 17580 17578 -3 17291 17579 17581 -3 17291 17581 17293 -3 17292 17294 17582 -3 17292 17582 17580 -3 17293 17581 17583 -3 17293 17583 17295 -3 17294 17296 17584 -3 17294 17584 17582 -3 17295 17583 17585 -3 17295 17585 17297 -3 17296 17298 17586 -3 17296 17586 17584 -3 17297 17585 17587 -3 17297 17587 17299 -3 17298 17300 17588 -3 17298 17588 17586 -3 17299 17587 17589 -3 17299 17589 17301 -3 17300 17302 17590 -3 17300 17590 17588 -3 17301 17589 17591 -3 17301 17591 17303 -3 17302 17304 17592 -3 17302 17592 17590 -3 17303 17591 17593 -3 17303 17593 17305 -3 17304 17306 17594 -3 17304 17594 17592 -3 17305 17593 17595 -3 17305 17595 17307 -3 17306 17308 17596 -3 17306 17596 17594 -3 17307 17595 17597 -3 17307 17597 17309 -3 17308 17310 17598 -3 17308 17598 17596 -3 17309 17597 17311 -3 17599 17311 17597 -3 17310 17312 17598 -3 17600 17598 17312 -3 17311 17599 17315 -3 17603 17315 17599 -3 17312 17313 17600 -3 17601 17600 17313 -3 17313 17316 17601 -3 17604 17601 17316 -3 17314 17315 17602 -3 17603 17602 17315 -3 17314 17602 17319 -3 17607 17319 17602 -3 17316 17317 17604 -3 17605 17604 17317 -3 17317 17320 17605 -3 17608 17605 17320 -3 17318 17319 17606 -3 17607 17606 17319 -3 17318 17606 17388 -3 17676 17388 17606 -3 17320 17321 17608 -3 17609 17608 17321 -3 17321 17322 17609 -3 17610 17609 17322 -3 17322 17323 17610 -3 17611 17610 17323 -3 17323 17324 17611 -3 17612 17611 17324 -3 17324 17325 17612 -3 17613 17612 17325 -3 17325 17326 17613 -3 17614 17613 17326 -3 17326 17327 17614 -3 17615 17614 17327 -3 17327 17328 17615 -3 17616 17615 17328 -3 17328 17329 17616 -3 17617 17616 17329 -3 17329 17330 17617 -3 17618 17617 17330 -3 17330 17331 17618 -3 17619 17618 17331 -3 17331 17332 17619 -3 17620 17619 17332 -3 17332 17333 17620 -3 17621 17620 17333 -3 17333 17334 17621 -3 17622 17621 17334 -3 17334 17335 17622 -3 17623 17622 17335 -3 17335 17336 17623 -3 17624 17623 17336 -3 17336 17337 17624 -3 17625 17624 17337 -3 17337 17338 17625 -3 17626 17625 17338 -3 17338 17339 17626 -3 17627 17626 17339 -3 17339 17340 17627 -3 17628 17627 17340 -3 17340 17341 17628 -3 17629 17628 17341 -3 17341 17342 17629 -3 17630 17629 17342 -3 17342 17343 17630 -3 17631 17630 17343 -3 17343 17344 17631 -3 17632 17631 17344 -3 17344 17345 17632 -3 17633 17632 17345 -3 17345 17346 17633 -3 17634 17633 17346 -3 17346 17347 17634 -3 17635 17634 17347 -3 17347 17348 17635 -3 17636 17635 17348 -3 17348 17349 17636 -3 17637 17636 17349 -3 17349 17350 17637 -3 17638 17637 17350 -3 17350 17351 17638 -3 17639 17638 17351 -3 17351 17352 17639 -3 17640 17639 17352 -3 17352 17353 17640 -3 17641 17640 17353 -3 17353 17354 17641 -3 17642 17641 17354 -3 17354 17355 17642 -3 17643 17642 17355 -3 17355 17356 17643 -3 17644 17643 17356 -3 17356 17357 17644 -3 17645 17644 17357 -3 17357 17358 17645 -3 17646 17645 17358 -3 17358 17359 17646 -3 17647 17646 17359 -3 17359 17360 17647 -3 17648 17647 17360 -3 17360 17361 17648 -3 17649 17648 17361 -3 17361 17362 17649 -3 17650 17649 17362 -3 17362 17363 17650 -3 17651 17650 17363 -3 17363 17364 17651 -3 17652 17651 17364 -3 17364 17365 17652 -3 17653 17652 17365 -3 17365 17366 17653 -3 17654 17653 17366 -3 17366 17367 17654 -3 17655 17654 17367 -3 17367 17368 17655 -3 17656 17655 17368 -3 17368 17369 17656 -3 17657 17656 17369 -3 17369 17370 17657 -3 17658 17657 17370 -3 17370 17371 17658 -3 17659 17658 17371 -3 17371 17372 17659 -3 17660 17659 17372 -3 17372 17373 17660 -3 17661 17660 17373 -3 17373 17374 17661 -3 17662 17661 17374 -3 17374 17375 17662 -3 17663 17662 17375 -3 17375 17376 17663 -3 17664 17663 17376 -3 17376 17377 17664 -3 17665 17664 17377 -3 17377 17378 17665 -3 17666 17665 17378 -3 17378 17379 17666 -3 17667 17666 17379 -3 17379 17380 17667 -3 17668 17667 17380 -3 17380 17381 17668 -3 17669 17668 17381 -3 17381 17382 17669 -3 17670 17669 17382 -3 17382 17383 17670 -3 17671 17670 17383 -3 17383 17384 17671 -3 17672 17671 17384 -3 17384 17385 17672 -3 17673 17672 17385 -3 17385 17386 17673 -3 17674 17673 17386 -3 17386 17387 17674 -3 17675 17674 17387 -3 17387 17388 17675 -3 17676 17675 17388 -3 17389 17677 17678 -3 17389 17678 17390 -3 17389 17459 17747 -3 17389 17747 17677 -3 17390 17678 17679 -3 17390 17679 17391 -3 17391 17679 17680 -3 17391 17680 17392 -3 17392 17680 17681 -3 17392 17681 17393 -3 17393 17681 17682 -3 17393 17682 17394 -3 17394 17682 17683 -3 17394 17683 17395 -3 17395 17683 17684 -3 17395 17684 17396 -3 17396 17684 17685 -3 17396 17685 17397 -3 17397 17685 17686 -3 17397 17686 17398 -3 17398 17686 17687 -3 17398 17687 17399 -3 17399 17687 17688 -3 17399 17688 17400 -3 17400 17688 17689 -3 17400 17689 17401 -3 17401 17689 17690 -3 17401 17690 17402 -3 17402 17690 17691 -3 17402 17691 17403 -3 17403 17691 17692 -3 17403 17692 17404 -3 17404 17692 17693 -3 17404 17693 17405 -3 17405 17693 17694 -3 17405 17694 17406 -3 17406 17694 17695 -3 17406 17695 17407 -3 17407 17695 17696 -3 17407 17696 17408 -3 17408 17696 17697 -3 17408 17697 17409 -3 17409 17697 17698 -3 17409 17698 17410 -3 17410 17698 17699 -3 17410 17699 17411 -3 17411 17699 17700 -3 17411 17700 17412 -3 17412 17700 17701 -3 17412 17701 17413 -3 17413 17701 17702 -3 17413 17702 17414 -3 17414 17702 17703 -3 17414 17703 17415 -3 17415 17703 17704 -3 17415 17704 17416 -3 17416 17704 17705 -3 17416 17705 17417 -3 17417 17705 17706 -3 17417 17706 17418 -3 17418 17706 17707 -3 17418 17707 17419 -3 17419 17707 17708 -3 17419 17708 17420 -3 17420 17708 17709 -3 17420 17709 17421 -3 17421 17709 17710 -3 17421 17710 17422 -3 17422 17710 17711 -3 17422 17711 17423 -3 17423 17711 17712 -3 17423 17712 17424 -3 17424 17712 17713 -3 17424 17713 17425 -3 17425 17713 17714 -3 17425 17714 17426 -3 17426 17714 17715 -3 17426 17715 17427 -3 17427 17715 17716 -3 17427 17716 17428 -3 17428 17716 17717 -3 17428 17717 17429 -3 17429 17717 17718 -3 17429 17718 17430 -3 17430 17718 17719 -3 17430 17719 17431 -3 17431 17719 17720 -3 17431 17720 17432 -3 17432 17720 17721 -3 17432 17721 17433 -3 17433 17721 17722 -3 17433 17722 17434 -3 17434 17722 17723 -3 17434 17723 17435 -3 17435 17723 17724 -3 17435 17724 17436 -3 17436 17724 17725 -3 17436 17725 17437 -3 17437 17725 17726 -3 17437 17726 17438 -3 17438 17726 17727 -3 17438 17727 17439 -3 17439 17727 17728 -3 17439 17728 17440 -3 17440 17728 17729 -3 17440 17729 17441 -3 17441 17729 17730 -3 17441 17730 17442 -3 17442 17730 17731 -3 17442 17731 17443 -3 17443 17731 17732 -3 17443 17732 17444 -3 17444 17732 17733 -3 17444 17733 17445 -3 17445 17733 17734 -3 17445 17734 17446 -3 17446 17734 17735 -3 17446 17735 17447 -3 17447 17735 17736 -3 17447 17736 17448 -3 17448 17736 17737 -3 17448 17737 17449 -3 17449 17737 17738 -3 17449 17738 17450 -3 17450 17738 17739 -3 17450 17739 17451 -3 17451 17739 17740 -3 17451 17740 17452 -3 17452 17740 17741 -3 17452 17741 17453 -3 17453 17741 17742 -3 17453 17742 17454 -3 17454 17742 17743 -3 17454 17743 17455 -3 17455 17743 17744 -3 17455 17744 17456 -3 17456 17744 17745 -3 17456 17745 17457 -3 17457 17745 17748 -3 17457 17748 17460 -3 17458 17746 17747 -3 17458 17747 17459 -3 17458 17463 17751 -3 17458 17751 17746 -3 17460 17748 17749 -3 17460 17749 17461 -3 17461 17749 17752 -3 17461 17752 17464 -3 17462 17750 17751 -3 17462 17751 17463 -3 17462 17466 17754 -3 17462 17754 17750 -3 17464 17752 17753 -3 17464 17753 17465 -3 17465 17753 17755 -3 17465 17755 17467 -3 17466 17468 17756 -3 17466 17756 17754 -3 17467 17755 17757 -3 17467 17757 17469 -3 17468 17470 17758 -3 17468 17758 17756 -3 17469 17757 17759 -3 17469 17759 17471 -3 17470 17472 17760 -3 17470 17760 17758 -3 17471 17759 17473 -3 17761 17473 17759 -3 17472 17474 17760 -3 17762 17760 17474 -3 17473 17761 17475 -3 17763 17475 17761 -3 17474 17476 17762 -3 17764 17762 17476 -3 17475 17763 17477 -3 17765 17477 17763 -3 17476 17478 17764 -3 17766 17764 17478 -3 17477 17765 17479 -3 17767 17479 17765 -3 17478 17480 17766 -3 17768 17766 17480 -3 17479 17767 17481 -3 17769 17481 17767 -3 17480 17482 17768 -3 17770 17768 17482 -3 17481 17769 17483 -3 17771 17483 17769 -3 17482 17484 17770 -3 17772 17770 17484 -3 17483 17771 17485 -3 17773 17485 17771 -3 17484 17486 17772 -3 17774 17772 17486 -3 17485 17773 17487 -3 17775 17487 17773 -3 17486 17488 17774 -3 17776 17774 17488 -3 17487 17775 17489 -3 17777 17489 17775 -3 17488 17490 17776 -3 17778 17776 17490 -3 17489 17777 17491 -3 17779 17491 17777 -3 17490 17492 17778 -3 17780 17778 17492 -3 17491 17779 17493 -3 17781 17493 17779 -3 17492 17494 17780 -3 17782 17780 17494 -3 17493 17781 17495 -3 17783 17495 17781 -3 17494 17496 17782 -3 17784 17782 17496 -3 17495 17783 17497 -3 17785 17497 17783 -3 17496 17498 17784 -3 17786 17784 17498 -3 17497 17785 17499 -3 17787 17499 17785 -3 17498 17500 17786 -3 17788 17786 17500 -3 17499 17787 17501 -3 17789 17501 17787 -3 17500 17502 17788 -3 17790 17788 17502 -3 17501 17789 17503 -3 17791 17503 17789 -3 17502 17504 17790 -3 17792 17790 17504 -3 17503 17791 17505 -3 17793 17505 17791 -3 17504 17506 17792 -3 17794 17792 17506 -3 17505 17793 17507 -3 17795 17507 17793 -3 17506 17508 17794 -3 17796 17794 17508 -3 17507 17795 17509 -3 17797 17509 17795 -3 17508 17510 17796 -3 17798 17796 17510 -3 17509 17797 17511 -3 17799 17511 17797 -3 17510 17512 17798 -3 17800 17798 17512 -3 17511 17799 17513 -3 17801 17513 17799 -3 17512 17514 17800 -3 17802 17800 17514 -3 17513 17801 17515 -3 17803 17515 17801 -3 17514 17516 17802 -3 17804 17802 17516 -3 17515 17803 17517 -3 17805 17517 17803 -3 17516 17518 17804 -3 17806 17804 17518 -3 17517 17805 17519 -3 17807 17519 17805 -3 17518 17520 17806 -3 17808 17806 17520 -3 17519 17807 17521 -3 17809 17521 17807 -3 17520 17522 17808 -3 17810 17808 17522 -3 17521 17809 17523 -3 17811 17523 17809 -3 17522 17524 17810 -3 17812 17810 17524 -3 17523 17811 17525 -3 17813 17525 17811 -3 17524 17526 17812 -3 17814 17812 17526 -3 17525 17813 17527 -3 17815 17527 17813 -3 17526 17528 17814 -3 17816 17814 17528 -3 17527 17815 17529 -3 17817 17529 17815 -3 17528 17530 17816 -3 17818 17816 17530 -3 17529 17817 17531 -3 17819 17531 17817 -3 17530 17532 17818 -3 17820 17818 17532 -3 17531 17819 17533 -3 17821 17533 17819 -3 17532 17534 17820 -3 17822 17820 17534 -3 17533 17821 17535 -3 17823 17535 17821 -3 17534 17536 17822 -3 17824 17822 17536 -3 17535 17823 17537 -3 17825 17537 17823 -3 17536 17538 17824 -3 17826 17824 17538 -3 17537 17825 17539 -3 17827 17539 17825 -3 17538 17540 17826 -3 17828 17826 17540 -3 17539 17827 17541 -3 17829 17541 17827 -3 17540 17542 17828 -3 17830 17828 17542 -3 17541 17829 17543 -3 17831 17543 17829 -3 17542 17544 17830 -3 17832 17830 17544 -3 17543 17831 17545 -3 17833 17545 17831 -3 17544 17546 17832 -3 17834 17832 17546 -3 17545 17833 17547 -3 17835 17547 17833 -3 17546 17548 17834 -3 17836 17834 17548 -3 17547 17835 17549 -3 17837 17549 17835 -3 17548 17550 17836 -3 17838 17836 17550 -3 17549 17837 17551 -3 17839 17551 17837 -3 17550 17552 17838 -3 17840 17838 17552 -3 17551 17839 17553 -3 17841 17553 17839 -3 17552 17554 17840 -3 17842 17840 17554 -3 17553 17841 17843 -3 17553 17843 17555 -3 17554 17556 17844 -3 17554 17844 17842 -3 17555 17843 17845 -3 17555 17845 17557 -3 17556 17558 17846 -3 17556 17846 17844 -3 17557 17845 17847 -3 17557 17847 17559 -3 17558 17560 17848 -3 17558 17848 17846 -3 17559 17847 17849 -3 17559 17849 17561 -3 17560 17562 17850 -3 17560 17850 17848 -3 17561 17849 17851 -3 17561 17851 17563 -3 17562 17564 17852 -3 17562 17852 17850 -3 17563 17851 17853 -3 17563 17853 17565 -3 17564 17566 17854 -3 17564 17854 17852 -3 17565 17853 17855 -3 17565 17855 17567 -3 17566 17568 17856 -3 17566 17856 17854 -3 17567 17855 17857 -3 17567 17857 17569 -3 17568 17570 17858 -3 17568 17858 17856 -3 17569 17857 17859 -3 17569 17859 17571 -3 17570 17572 17860 -3 17570 17860 17858 -3 17571 17859 17861 -3 17571 17861 17573 -3 17572 17574 17862 -3 17572 17862 17860 -3 17573 17861 17863 -3 17573 17863 17575 -3 17574 17576 17864 -3 17574 17864 17862 -3 17575 17863 17865 -3 17575 17865 17577 -3 17576 17578 17866 -3 17576 17866 17864 -3 17577 17865 17867 -3 17577 17867 17579 -3 17578 17580 17868 -3 17578 17868 17866 -3 17579 17867 17869 -3 17579 17869 17581 -3 17580 17582 17870 -3 17580 17870 17868 -3 17581 17869 17871 -3 17581 17871 17583 -3 17582 17584 17872 -3 17582 17872 17870 -3 17583 17871 17873 -3 17583 17873 17585 -3 17584 17586 17874 -3 17584 17874 17872 -3 17585 17873 17875 -3 17585 17875 17587 -3 17586 17588 17876 -3 17586 17876 17874 -3 17587 17875 17877 -3 17587 17877 17589 -3 17588 17590 17878 -3 17588 17878 17876 -3 17589 17877 17879 -3 17589 17879 17591 -3 17590 17592 17880 -3 17590 17880 17878 -3 17591 17879 17881 -3 17591 17881 17593 -3 17592 17594 17882 -3 17592 17882 17880 -3 17593 17881 17883 -3 17593 17883 17595 -3 17594 17596 17884 -3 17594 17884 17882 -3 17595 17883 17885 -3 17595 17885 17597 -3 17596 17598 17886 -3 17596 17886 17884 -3 17597 17885 17887 -3 17597 17887 17599 -3 17598 17600 17888 -3 17598 17888 17886 -3 17599 17887 17891 -3 17599 17891 17603 -3 17600 17601 17889 -3 17600 17889 17888 -3 17601 17604 17892 -3 17601 17892 17889 -3 17602 17603 17891 -3 17602 17891 17890 -3 17602 17890 17895 -3 17602 17895 17607 -3 17604 17605 17893 -3 17604 17893 17892 -3 17605 17608 17896 -3 17605 17896 17893 -3 17606 17607 17895 -3 17606 17895 17894 -3 17606 17894 17964 -3 17606 17964 17676 -3 17608 17609 17897 -3 17608 17897 17896 -3 17609 17610 17898 -3 17609 17898 17897 -3 17610 17611 17899 -3 17610 17899 17898 -3 17611 17612 17900 -3 17611 17900 17899 -3 17612 17613 17901 -3 17612 17901 17900 -3 17613 17614 17902 -3 17613 17902 17901 -3 17614 17615 17903 -3 17614 17903 17902 -3 17615 17616 17904 -3 17615 17904 17903 -3 17616 17617 17905 -3 17616 17905 17904 -3 17617 17618 17906 -3 17617 17906 17905 -3 17618 17619 17907 -3 17618 17907 17906 -3 17619 17620 17908 -3 17619 17908 17907 -3 17620 17621 17909 -3 17620 17909 17908 -3 17621 17622 17910 -3 17621 17910 17909 -3 17622 17623 17911 -3 17622 17911 17910 -3 17623 17624 17912 -3 17623 17912 17911 -3 17624 17625 17913 -3 17624 17913 17912 -3 17625 17626 17914 -3 17625 17914 17913 -3 17626 17627 17915 -3 17626 17915 17914 -3 17627 17628 17916 -3 17627 17916 17915 -3 17628 17629 17917 -3 17628 17917 17916 -3 17629 17630 17918 -3 17629 17918 17917 -3 17630 17631 17919 -3 17630 17919 17918 -3 17631 17632 17920 -3 17631 17920 17919 -3 17632 17633 17921 -3 17632 17921 17920 -3 17633 17634 17922 -3 17633 17922 17921 -3 17634 17635 17923 -3 17634 17923 17922 -3 17635 17636 17924 -3 17635 17924 17923 -3 17636 17637 17924 -3 17925 17924 17637 -3 17637 17638 17925 -3 17926 17925 17638 -3 17638 17639 17926 -3 17927 17926 17639 -3 17639 17640 17927 -3 17928 17927 17640 -3 17640 17641 17928 -3 17929 17928 17641 -3 17641 17642 17929 -3 17930 17929 17642 -3 17642 17643 17930 -3 17931 17930 17643 -3 17643 17644 17931 -3 17932 17931 17644 -3 17644 17645 17932 -3 17933 17932 17645 -3 17645 17646 17933 -3 17934 17933 17646 -3 17646 17647 17934 -3 17935 17934 17647 -3 17647 17648 17935 -3 17936 17935 17648 -3 17648 17649 17936 -3 17937 17936 17649 -3 17649 17650 17937 -3 17938 17937 17650 -3 17650 17651 17938 -3 17939 17938 17651 -3 17651 17652 17939 -3 17940 17939 17652 -3 17652 17653 17940 -3 17941 17940 17653 -3 17653 17654 17941 -3 17942 17941 17654 -3 17654 17655 17942 -3 17943 17942 17655 -3 17655 17656 17943 -3 17944 17943 17656 -3 17656 17657 17944 -3 17945 17944 17657 -3 17657 17658 17945 -3 17946 17945 17658 -3 17658 17659 17946 -3 17947 17946 17659 -3 17659 17660 17947 -3 17948 17947 17660 -3 17660 17661 17948 -3 17949 17948 17661 -3 17661 17662 17949 -3 17950 17949 17662 -3 17662 17663 17950 -3 17951 17950 17663 -3 17663 17664 17951 -3 17952 17951 17664 -3 17664 17665 17952 -3 17953 17952 17665 -3 17665 17666 17953 -3 17954 17953 17666 -3 17666 17667 17954 -3 17955 17954 17667 -3 17667 17668 17955 -3 17956 17955 17668 -3 17668 17669 17956 -3 17957 17956 17669 -3 17669 17670 17957 -3 17958 17957 17670 -3 17670 17671 17958 -3 17959 17958 17671 -3 17671 17672 17959 -3 17960 17959 17672 -3 17672 17673 17960 -3 17961 17960 17673 -3 17673 17674 17961 -3 17962 17961 17674 -3 17674 17675 17962 -3 17963 17962 17675 -3 17675 17676 17963 -3 17964 17963 17676 -3 17677 17965 17678 -3 17966 17678 17965 -3 17677 17747 17965 -3 18035 17965 17747 -3 17678 17966 17679 -3 17967 17679 17966 -3 17679 17967 17680 -3 17968 17680 17967 -3 17680 17968 17681 -3 17969 17681 17968 -3 17681 17969 17682 -3 17970 17682 17969 -3 17682 17970 17683 -3 17971 17683 17970 -3 17683 17971 17684 -3 17972 17684 17971 -3 17684 17972 17685 -3 17973 17685 17972 -3 17685 17973 17686 -3 17974 17686 17973 -3 17686 17974 17687 -3 17975 17687 17974 -3 17687 17975 17688 -3 17976 17688 17975 -3 17688 17976 17689 -3 17977 17689 17976 -3 17689 17977 17690 -3 17978 17690 17977 -3 17690 17978 17691 -3 17979 17691 17978 -3 17691 17979 17692 -3 17980 17692 17979 -3 17692 17980 17693 -3 17981 17693 17980 -3 17693 17981 17694 -3 17982 17694 17981 -3 17694 17982 17695 -3 17983 17695 17982 -3 17695 17983 17696 -3 17984 17696 17983 -3 17696 17984 17697 -3 17985 17697 17984 -3 17697 17985 17698 -3 17986 17698 17985 -3 17698 17986 17699 -3 17987 17699 17986 -3 17699 17987 17700 -3 17988 17700 17987 -3 17700 17988 17701 -3 17989 17701 17988 -3 17701 17989 17702 -3 17990 17702 17989 -3 17702 17990 17703 -3 17991 17703 17990 -3 17703 17991 17704 -3 17992 17704 17991 -3 17704 17992 17705 -3 17993 17705 17992 -3 17705 17993 17706 -3 17994 17706 17993 -3 17706 17994 17707 -3 17995 17707 17994 -3 17707 17995 17708 -3 17996 17708 17995 -3 17708 17996 17709 -3 17997 17709 17996 -3 17709 17997 17710 -3 17998 17710 17997 -3 17710 17998 17711 -3 17999 17711 17998 -3 17711 17999 17712 -3 18000 17712 17999 -3 17712 18000 17713 -3 18001 17713 18000 -3 17713 18001 17714 -3 18002 17714 18001 -3 17714 18002 17715 -3 18003 17715 18002 -3 17715 18003 17716 -3 18004 17716 18003 -3 17716 18004 17717 -3 18005 17717 18004 -3 17717 18005 18006 -3 17717 18006 17718 -3 17718 18006 18007 -3 17718 18007 17719 -3 17719 18007 18008 -3 17719 18008 17720 -3 17720 18008 18009 -3 17720 18009 17721 -3 17721 18009 18010 -3 17721 18010 17722 -3 17722 18010 18011 -3 17722 18011 17723 -3 17723 18011 18012 -3 17723 18012 17724 -3 17724 18012 18013 -3 17724 18013 17725 -3 17725 18013 18014 -3 17725 18014 17726 -3 17726 18014 18015 -3 17726 18015 17727 -3 17727 18015 18016 -3 17727 18016 17728 -3 17728 18016 18017 -3 17728 18017 17729 -3 17729 18017 18018 -3 17729 18018 17730 -3 17730 18018 18019 -3 17730 18019 17731 -3 17731 18019 18020 -3 17731 18020 17732 -3 17732 18020 18021 -3 17732 18021 17733 -3 17733 18021 18022 -3 17733 18022 17734 -3 17734 18022 18023 -3 17734 18023 17735 -3 17735 18023 18024 -3 17735 18024 17736 -3 17736 18024 18025 -3 17736 18025 17737 -3 17737 18025 18026 -3 17737 18026 17738 -3 17738 18026 18027 -3 17738 18027 17739 -3 17739 18027 18028 -3 17739 18028 17740 -3 17740 18028 18029 -3 17740 18029 17741 -3 17741 18029 18030 -3 17741 18030 17742 -3 17742 18030 18031 -3 17742 18031 17743 -3 17743 18031 18032 -3 17743 18032 17744 -3 17744 18032 18033 -3 17744 18033 17745 -3 17745 18033 18036 -3 17745 18036 17748 -3 17746 18034 18035 -3 17746 18035 17747 -3 17746 17751 18039 -3 17746 18039 18034 -3 17748 18036 18037 -3 17748 18037 17749 -3 17749 18037 18040 -3 17749 18040 17752 -3 17750 18038 18039 -3 17750 18039 17751 -3 17750 17754 18042 -3 17750 18042 18038 -3 17752 18040 18041 -3 17752 18041 17753 -3 17753 18041 18043 -3 17753 18043 17755 -3 17754 17756 18044 -3 17754 18044 18042 -3 17755 18043 18045 -3 17755 18045 17757 -3 17756 17758 18046 -3 17756 18046 18044 -3 17757 18045 18047 -3 17757 18047 17759 -3 17758 17760 18048 -3 17758 18048 18046 -3 17759 18047 18049 -3 17759 18049 17761 -3 17760 17762 18050 -3 17760 18050 18048 -3 17761 18049 18051 -3 17761 18051 17763 -3 17762 17764 18052 -3 17762 18052 18050 -3 17763 18051 18053 -3 17763 18053 17765 -3 17764 17766 18054 -3 17764 18054 18052 -3 17765 18053 18055 -3 17765 18055 17767 -3 17766 17768 18056 -3 17766 18056 18054 -3 17767 18055 18057 -3 17767 18057 17769 -3 17768 17770 18058 -3 17768 18058 18056 -3 17769 18057 18059 -3 17769 18059 17771 -3 17770 17772 18060 -3 17770 18060 18058 -3 17771 18059 18061 -3 17771 18061 17773 -3 17772 17774 18062 -3 17772 18062 18060 -3 17773 18061 18063 -3 17773 18063 17775 -3 17774 17776 18064 -3 17774 18064 18062 -3 17775 18063 18065 -3 17775 18065 17777 -3 17776 17778 18066 -3 17776 18066 18064 -3 17777 18065 18067 -3 17777 18067 17779 -3 17778 17780 18068 -3 17778 18068 18066 -3 17779 18067 18069 -3 17779 18069 17781 -3 17780 17782 18070 -3 17780 18070 18068 -3 17781 18069 18071 -3 17781 18071 17783 -3 17782 17784 18072 -3 17782 18072 18070 -3 17783 18071 18073 -3 17783 18073 17785 -3 17784 17786 18074 -3 17784 18074 18072 -3 17785 18073 18075 -3 17785 18075 17787 -3 17786 17788 18076 -3 17786 18076 18074 -3 17787 18075 18077 -3 17787 18077 17789 -3 17788 17790 18078 -3 17788 18078 18076 -3 17789 18077 18079 -3 17789 18079 17791 -3 17790 17792 18080 -3 17790 18080 18078 -3 17791 18079 18081 -3 17791 18081 17793 -3 17792 17794 18082 -3 17792 18082 18080 -3 17793 18081 18083 -3 17793 18083 17795 -3 17794 17796 18084 -3 17794 18084 18082 -3 17795 18083 18085 -3 17795 18085 17797 -3 17796 17798 18086 -3 17796 18086 18084 -3 17797 18085 18087 -3 17797 18087 17799 -3 17798 17800 18088 -3 17798 18088 18086 -3 17799 18087 17801 -3 18089 17801 18087 -3 17800 17802 18088 -3 18090 18088 17802 -3 17801 18089 17803 -3 18091 17803 18089 -3 17802 17804 18090 -3 18092 18090 17804 -3 17803 18091 17805 -3 18093 17805 18091 -3 17804 17806 18092 -3 18094 18092 17806 -3 17805 18093 17807 -3 18095 17807 18093 -3 17806 17808 18094 -3 18096 18094 17808 -3 17807 18095 17809 -3 18097 17809 18095 -3 17808 17810 18096 -3 18098 18096 17810 -3 17809 18097 17811 -3 18099 17811 18097 -3 17810 17812 18098 -3 18100 18098 17812 -3 17811 18099 17813 -3 18101 17813 18099 -3 17812 17814 18100 -3 18102 18100 17814 -3 17813 18101 17815 -3 18103 17815 18101 -3 17814 17816 18102 -3 18104 18102 17816 -3 17815 18103 17817 -3 18105 17817 18103 -3 17816 17818 18104 -3 18106 18104 17818 -3 17817 18105 17819 -3 18107 17819 18105 -3 17818 17820 18106 -3 18108 18106 17820 -3 17819 18107 17821 -3 18109 17821 18107 -3 17820 17822 18108 -3 18110 18108 17822 -3 17821 18109 17823 -3 18111 17823 18109 -3 17822 17824 18110 -3 18112 18110 17824 -3 17823 18111 17825 -3 18113 17825 18111 -3 17824 17826 18112 -3 18114 18112 17826 -3 17825 18113 17827 -3 18115 17827 18113 -3 17826 17828 18114 -3 18116 18114 17828 -3 17827 18115 17829 -3 18117 17829 18115 -3 17828 17830 18116 -3 18118 18116 17830 -3 17829 18117 17831 -3 18119 17831 18117 -3 17830 17832 18118 -3 18120 18118 17832 -3 17831 18119 17833 -3 18121 17833 18119 -3 17832 17834 18120 -3 18122 18120 17834 -3 17833 18121 17835 -3 18123 17835 18121 -3 17834 17836 18122 -3 18124 18122 17836 -3 17835 18123 17837 -3 18125 17837 18123 -3 17836 17838 18124 -3 18126 18124 17838 -3 17837 18125 17839 -3 18127 17839 18125 -3 17838 17840 18126 -3 18128 18126 17840 -3 17839 18127 17841 -3 18129 17841 18127 -3 17840 17842 18128 -3 18130 18128 17842 -3 17841 18129 17843 -3 18131 17843 18129 -3 17842 17844 18130 -3 18132 18130 17844 -3 17843 18131 17845 -3 18133 17845 18131 -3 17844 17846 18132 -3 18134 18132 17846 -3 17845 18133 17847 -3 18135 17847 18133 -3 17846 17848 18134 -3 18136 18134 17848 -3 17847 18135 17849 -3 18137 17849 18135 -3 17848 17850 18136 -3 18138 18136 17850 -3 17849 18137 17851 -3 18139 17851 18137 -3 17850 17852 18138 -3 18140 18138 17852 -3 17851 18139 17853 -3 18141 17853 18139 -3 17852 17854 18140 -3 18142 18140 17854 -3 17853 18141 17855 -3 18143 17855 18141 -3 17854 17856 18142 -3 18144 18142 17856 -3 17855 18143 17857 -3 18145 17857 18143 -3 17856 17858 18144 -3 18146 18144 17858 -3 17857 18145 17859 -3 18147 17859 18145 -3 17858 17860 18146 -3 18148 18146 17860 -3 17859 18147 17861 -3 18149 17861 18147 -3 17860 17862 18148 -3 18150 18148 17862 -3 17861 18149 17863 -3 18151 17863 18149 -3 17862 17864 18150 -3 18152 18150 17864 -3 17863 18151 17865 -3 18153 17865 18151 -3 17864 17866 18152 -3 18154 18152 17866 -3 17865 18153 17867 -3 18155 17867 18153 -3 17866 17868 18154 -3 18156 18154 17868 -3 17867 18155 17869 -3 18157 17869 18155 -3 17868 17870 18156 -3 18158 18156 17870 -3 17869 18157 17871 -3 18159 17871 18157 -3 17870 17872 18158 -3 18160 18158 17872 -3 17871 18159 17873 -3 18161 17873 18159 -3 17872 17874 18160 -3 18162 18160 17874 -3 17873 18161 17875 -3 18163 17875 18161 -3 17874 17876 18162 -3 18164 18162 17876 -3 17875 18163 17877 -3 18165 17877 18163 -3 17876 17878 18164 -3 18166 18164 17878 -3 17877 18165 17879 -3 18167 17879 18165 -3 17878 17880 18166 -3 18168 18166 17880 -3 17879 18167 17881 -3 18169 17881 18167 -3 17880 17882 18168 -3 18170 18168 17882 -3 17881 18169 17883 -3 18171 17883 18169 -3 17882 17884 18172 -3 17882 18172 18170 -3 17883 18171 18173 -3 17883 18173 17885 -3 17884 17886 18174 -3 17884 18174 18172 -3 17885 18173 18175 -3 17885 18175 17887 -3 17886 17888 18176 -3 17886 18176 18174 -3 17887 18175 18179 -3 17887 18179 17891 -3 17888 17889 18177 -3 17888 18177 18176 -3 17889 17892 18180 -3 17889 18180 18177 -3 17890 17891 18179 -3 17890 18179 18178 -3 17890 18178 18183 -3 17890 18183 17895 -3 17892 17893 18181 -3 17892 18181 18180 -3 17893 17896 18184 -3 17893 18184 18181 -3 17894 17895 18183 -3 17894 18183 18182 -3 17894 18182 18252 -3 17894 18252 17964 -3 17896 17897 18185 -3 17896 18185 18184 -3 17897 17898 18186 -3 17897 18186 18185 -3 17898 17899 18187 -3 17898 18187 18186 -3 17899 17900 18188 -3 17899 18188 18187 -3 17900 17901 18189 -3 17900 18189 18188 -3 17901 17902 18190 -3 17901 18190 18189 -3 17902 17903 18191 -3 17902 18191 18190 -3 17903 17904 18192 -3 17903 18192 18191 -3 17904 17905 18193 -3 17904 18193 18192 -3 17905 17906 18194 -3 17905 18194 18193 -3 17906 17907 18195 -3 17906 18195 18194 -3 17907 17908 18196 -3 17907 18196 18195 -3 17908 17909 18197 -3 17908 18197 18196 -3 17909 17910 18198 -3 17909 18198 18197 -3 17910 17911 18199 -3 17910 18199 18198 -3 17911 17912 18200 -3 17911 18200 18199 -3 17912 17913 18201 -3 17912 18201 18200 -3 17913 17914 18202 -3 17913 18202 18201 -3 17914 17915 18203 -3 17914 18203 18202 -3 17915 17916 18204 -3 17915 18204 18203 -3 17916 17917 18205 -3 17916 18205 18204 -3 17917 17918 18206 -3 17917 18206 18205 -3 17918 17919 18207 -3 17918 18207 18206 -3 17919 17920 18208 -3 17919 18208 18207 -3 17920 17921 18209 -3 17920 18209 18208 -3 17921 17922 18210 -3 17921 18210 18209 -3 17922 17923 18211 -3 17922 18211 18210 -3 17923 17924 18212 -3 17923 18212 18211 -3 17924 17925 18213 -3 17924 18213 18212 -3 17925 17926 18214 -3 17925 18214 18213 -3 17926 17927 18215 -3 17926 18215 18214 -3 17927 17928 18216 -3 17927 18216 18215 -3 17928 17929 18217 -3 17928 18217 18216 -3 17929 17930 18218 -3 17929 18218 18217 -3 17930 17931 18219 -3 17930 18219 18218 -3 17931 17932 18220 -3 17931 18220 18219 -3 17932 17933 18221 -3 17932 18221 18220 -3 17933 17934 18222 -3 17933 18222 18221 -3 17934 17935 18223 -3 17934 18223 18222 -3 17935 17936 18224 -3 17935 18224 18223 -3 17936 17937 18225 -3 17936 18225 18224 -3 17937 17938 18226 -3 17937 18226 18225 -3 17938 17939 18227 -3 17938 18227 18226 -3 17939 17940 18228 -3 17939 18228 18227 -3 17940 17941 18229 -3 17940 18229 18228 -3 17941 17942 18230 -3 17941 18230 18229 -3 17942 17943 18231 -3 17942 18231 18230 -3 17943 17944 18232 -3 17943 18232 18231 -3 17944 17945 18233 -3 17944 18233 18232 -3 17945 17946 18234 -3 17945 18234 18233 -3 17946 17947 18235 -3 17946 18235 18234 -3 17947 17948 18236 -3 17947 18236 18235 -3 17948 17949 18237 -3 17948 18237 18236 -3 17949 17950 18238 -3 17949 18238 18237 -3 17950 17951 18239 -3 17950 18239 18238 -3 17951 17952 18240 -3 17951 18240 18239 -3 17952 17953 18241 -3 17952 18241 18240 -3 17953 17954 18242 -3 17953 18242 18241 -3 17954 17955 18243 -3 17954 18243 18242 -3 17955 17956 18244 -3 17955 18244 18243 -3 17956 17957 18245 -3 17956 18245 18244 -3 17957 17958 18246 -3 17957 18246 18245 -3 17958 17959 18247 -3 17958 18247 18246 -3 17959 17960 18248 -3 17959 18248 18247 -3 17960 17961 18249 -3 17960 18249 18248 -3 17961 17962 18250 -3 17961 18250 18249 -3 17962 17963 18251 -3 17962 18251 18250 -3 17963 17964 18252 -3 17963 18252 18251 -3 17965 18253 17966 -3 18254 17966 18253 -3 17965 18035 18253 -3 18323 18253 18035 -3 17966 18254 17967 -3 18255 17967 18254 -3 17967 18255 17968 -3 18256 17968 18255 -3 17968 18256 17969 -3 18257 17969 18256 -3 17969 18257 17970 -3 18258 17970 18257 -3 17970 18258 17971 -3 18259 17971 18258 -3 17971 18259 17972 -3 18260 17972 18259 -3 17972 18260 17973 -3 18261 17973 18260 -3 17973 18261 17974 -3 18262 17974 18261 -3 17974 18262 17975 -3 18263 17975 18262 -3 17975 18263 17976 -3 18264 17976 18263 -3 17976 18264 17977 -3 18265 17977 18264 -3 17977 18265 17978 -3 18266 17978 18265 -3 17978 18266 17979 -3 18267 17979 18266 -3 17979 18267 17980 -3 18268 17980 18267 -3 17980 18268 17981 -3 18269 17981 18268 -3 17981 18269 17982 -3 18270 17982 18269 -3 17982 18270 17983 -3 18271 17983 18270 -3 17983 18271 17984 -3 18272 17984 18271 -3 17984 18272 17985 -3 18273 17985 18272 -3 17985 18273 17986 -3 18274 17986 18273 -3 17986 18274 17987 -3 18275 17987 18274 -3 17987 18275 17988 -3 18276 17988 18275 -3 17988 18276 17989 -3 18277 17989 18276 -3 17989 18277 17990 -3 18278 17990 18277 -3 17990 18278 17991 -3 18279 17991 18278 -3 17991 18279 17992 -3 18280 17992 18279 -3 17992 18280 17993 -3 18281 17993 18280 -3 17993 18281 17994 -3 18282 17994 18281 -3 17994 18282 17995 -3 18283 17995 18282 -3 17995 18283 17996 -3 18284 17996 18283 -3 17996 18284 17997 -3 18285 17997 18284 -3 17997 18285 17998 -3 18286 17998 18285 -3 17998 18286 17999 -3 18287 17999 18286 -3 17999 18287 18000 -3 18288 18000 18287 -3 18000 18288 18001 -3 18289 18001 18288 -3 18001 18289 18002 -3 18290 18002 18289 -3 18002 18290 18003 -3 18291 18003 18290 -3 18003 18291 18004 -3 18292 18004 18291 -3 18004 18292 18005 -3 18293 18005 18292 -3 18005 18293 18006 -3 18294 18006 18293 -3 18006 18294 18007 -3 18295 18007 18294 -3 18007 18295 18008 -3 18296 18008 18295 -3 18008 18296 18009 -3 18297 18009 18296 -3 18009 18297 18010 -3 18298 18010 18297 -3 18010 18298 18011 -3 18299 18011 18298 -3 18011 18299 18012 -3 18300 18012 18299 -3 18012 18300 18013 -3 18301 18013 18300 -3 18013 18301 18014 -3 18302 18014 18301 -3 18014 18302 18015 -3 18303 18015 18302 -3 18015 18303 18016 -3 18304 18016 18303 -3 18016 18304 18017 -3 18305 18017 18304 -3 18017 18305 18018 -3 18306 18018 18305 -3 18018 18306 18019 -3 18307 18019 18306 -3 18019 18307 18020 -3 18308 18020 18307 -3 18020 18308 18021 -3 18309 18021 18308 -3 18021 18309 18022 -3 18310 18022 18309 -3 18022 18310 18023 -3 18311 18023 18310 -3 18023 18311 18024 -3 18312 18024 18311 -3 18024 18312 18025 -3 18313 18025 18312 -3 18025 18313 18026 -3 18314 18026 18313 -3 18026 18314 18027 -3 18315 18027 18314 -3 18027 18315 18028 -3 18316 18028 18315 -3 18028 18316 18029 -3 18317 18029 18316 -3 18029 18317 18030 -3 18318 18030 18317 -3 18030 18318 18031 -3 18319 18031 18318 -3 18031 18319 18032 -3 18320 18032 18319 -3 18032 18320 18033 -3 18321 18033 18320 -3 18033 18321 18036 -3 18324 18036 18321 -3 18034 18322 18035 -3 18323 18035 18322 -3 18034 18039 18322 -3 18327 18322 18039 -3 18036 18324 18037 -3 18325 18037 18324 -3 18037 18325 18040 -3 18328 18040 18325 -3 18038 18326 18039 -3 18327 18039 18326 -3 18038 18042 18326 -3 18330 18326 18042 -3 18040 18328 18041 -3 18329 18041 18328 -3 18041 18329 18043 -3 18331 18043 18329 -3 18042 18044 18330 -3 18332 18330 18044 -3 18043 18331 18045 -3 18333 18045 18331 -3 18044 18046 18332 -3 18334 18332 18046 -3 18045 18333 18047 -3 18335 18047 18333 -3 18046 18048 18336 -3 18046 18336 18334 -3 18047 18335 18049 -3 18337 18049 18335 -3 18048 18050 18338 -3 18048 18338 18336 -3 18049 18337 18339 -3 18049 18339 18051 -3 18050 18052 18340 -3 18050 18340 18338 -3 18051 18339 18341 -3 18051 18341 18053 -3 18052 18054 18342 -3 18052 18342 18340 -3 18053 18341 18343 -3 18053 18343 18055 -3 18054 18056 18344 -3 18054 18344 18342 -3 18055 18343 18345 -3 18055 18345 18057 -3 18056 18058 18346 -3 18056 18346 18344 -3 18057 18345 18347 -3 18057 18347 18059 -3 18058 18060 18348 -3 18058 18348 18346 -3 18059 18347 18349 -3 18059 18349 18061 -3 18060 18062 18350 -3 18060 18350 18348 -3 18061 18349 18351 -3 18061 18351 18063 -3 18062 18064 18352 -3 18062 18352 18350 -3 18063 18351 18353 -3 18063 18353 18065 -3 18064 18066 18354 -3 18064 18354 18352 -3 18065 18353 18355 -3 18065 18355 18067 -3 18066 18068 18356 -3 18066 18356 18354 -3 18067 18355 18357 -3 18067 18357 18069 -3 18068 18070 18358 -3 18068 18358 18356 -3 18069 18357 18359 -3 18069 18359 18071 -3 18070 18072 18360 -3 18070 18360 18358 -3 18071 18359 18361 -3 18071 18361 18073 -3 18072 18074 18362 -3 18072 18362 18360 -3 18073 18361 18363 -3 18073 18363 18075 -3 18074 18076 18364 -3 18074 18364 18362 -3 18075 18363 18365 -3 18075 18365 18077 -3 18076 18078 18366 -3 18076 18366 18364 -3 18077 18365 18367 -3 18077 18367 18079 -3 18078 18080 18368 -3 18078 18368 18366 -3 18079 18367 18369 -3 18079 18369 18081 -3 18080 18082 18370 -3 18080 18370 18368 -3 18081 18369 18371 -3 18081 18371 18083 -3 18082 18084 18372 -3 18082 18372 18370 -3 18083 18371 18373 -3 18083 18373 18085 -3 18084 18086 18374 -3 18084 18374 18372 -3 18085 18373 18375 -3 18085 18375 18087 -3 18086 18088 18376 -3 18086 18376 18374 -3 18087 18375 18377 -3 18087 18377 18089 -3 18088 18090 18378 -3 18088 18378 18376 -3 18089 18377 18379 -3 18089 18379 18091 -3 18090 18092 18380 -3 18090 18380 18378 -3 18091 18379 18381 -3 18091 18381 18093 -3 18092 18094 18382 -3 18092 18382 18380 -3 18093 18381 18383 -3 18093 18383 18095 -3 18094 18096 18384 -3 18094 18384 18382 -3 18095 18383 18385 -3 18095 18385 18097 -3 18096 18098 18386 -3 18096 18386 18384 -3 18097 18385 18387 -3 18097 18387 18099 -3 18098 18100 18388 -3 18098 18388 18386 -3 18099 18387 18389 -3 18099 18389 18101 -3 18100 18102 18390 -3 18100 18390 18388 -3 18101 18389 18391 -3 18101 18391 18103 -3 18102 18104 18392 -3 18102 18392 18390 -3 18103 18391 18393 -3 18103 18393 18105 -3 18104 18106 18394 -3 18104 18394 18392 -3 18105 18393 18395 -3 18105 18395 18107 -3 18106 18108 18396 -3 18106 18396 18394 -3 18107 18395 18397 -3 18107 18397 18109 -3 18108 18110 18398 -3 18108 18398 18396 -3 18109 18397 18399 -3 18109 18399 18111 -3 18110 18112 18400 -3 18110 18400 18398 -3 18111 18399 18401 -3 18111 18401 18113 -3 18112 18114 18402 -3 18112 18402 18400 -3 18113 18401 18403 -3 18113 18403 18115 -3 18114 18116 18404 -3 18114 18404 18402 -3 18115 18403 18405 -3 18115 18405 18117 -3 18116 18118 18406 -3 18116 18406 18404 -3 18117 18405 18407 -3 18117 18407 18119 -3 18118 18120 18408 -3 18118 18408 18406 -3 18119 18407 18409 -3 18119 18409 18121 -3 18120 18122 18410 -3 18120 18410 18408 -3 18121 18409 18411 -3 18121 18411 18123 -3 18122 18124 18412 -3 18122 18412 18410 -3 18123 18411 18413 -3 18123 18413 18125 -3 18124 18126 18414 -3 18124 18414 18412 -3 18125 18413 18415 -3 18125 18415 18127 -3 18126 18128 18416 -3 18126 18416 18414 -3 18127 18415 18417 -3 18127 18417 18129 -3 18128 18130 18418 -3 18128 18418 18416 -3 18129 18417 18419 -3 18129 18419 18131 -3 18130 18132 18418 -3 18420 18418 18132 -3 18131 18419 18133 -3 18421 18133 18419 -3 18132 18134 18420 -3 18422 18420 18134 -3 18133 18421 18135 -3 18423 18135 18421 -3 18134 18136 18422 -3 18424 18422 18136 -3 18135 18423 18137 -3 18425 18137 18423 -3 18136 18138 18424 -3 18426 18424 18138 -3 18137 18425 18139 -3 18427 18139 18425 -3 18138 18140 18426 -3 18428 18426 18140 -3 18139 18427 18141 -3 18429 18141 18427 -3 18140 18142 18428 -3 18430 18428 18142 -3 18141 18429 18143 -3 18431 18143 18429 -3 18142 18144 18430 -3 18432 18430 18144 -3 18143 18431 18145 -3 18433 18145 18431 -3 18144 18146 18432 -3 18434 18432 18146 -3 18145 18433 18147 -3 18435 18147 18433 -3 18146 18148 18434 -3 18436 18434 18148 -3 18147 18435 18149 -3 18437 18149 18435 -3 18148 18150 18436 -3 18438 18436 18150 -3 18149 18437 18151 -3 18439 18151 18437 -3 18150 18152 18438 -3 18440 18438 18152 -3 18151 18439 18153 -3 18441 18153 18439 -3 18152 18154 18440 -3 18442 18440 18154 -3 18153 18441 18155 -3 18443 18155 18441 -3 18154 18156 18442 -3 18444 18442 18156 -3 18155 18443 18157 -3 18445 18157 18443 -3 18156 18158 18444 -3 18446 18444 18158 -3 18157 18445 18159 -3 18447 18159 18445 -3 18158 18160 18446 -3 18448 18446 18160 -3 18159 18447 18161 -3 18449 18161 18447 -3 18160 18162 18448 -3 18450 18448 18162 -3 18161 18449 18163 -3 18451 18163 18449 -3 18162 18164 18450 -3 18452 18450 18164 -3 18163 18451 18165 -3 18453 18165 18451 -3 18164 18166 18452 -3 18454 18452 18166 -3 18165 18453 18167 -3 18455 18167 18453 -3 18166 18168 18454 -3 18456 18454 18168 -3 18167 18455 18169 -3 18457 18169 18455 -3 18168 18170 18456 -3 18458 18456 18170 -3 18169 18457 18171 -3 18459 18171 18457 -3 18170 18172 18458 -3 18460 18458 18172 -3 18171 18459 18173 -3 18461 18173 18459 -3 18172 18174 18460 -3 18462 18460 18174 -3 18173 18461 18175 -3 18463 18175 18461 -3 18174 18176 18462 -3 18464 18462 18176 -3 18175 18463 18179 -3 18467 18179 18463 -3 18176 18177 18464 -3 18465 18464 18177 -3 18177 18180 18465 -3 18468 18465 18180 -3 18178 18179 18466 -3 18467 18466 18179 -3 18178 18466 18183 -3 18471 18183 18466 -3 18180 18181 18468 -3 18469 18468 18181 -3 18181 18184 18469 -3 18472 18469 18184 -3 18182 18183 18470 -3 18471 18470 18183 -3 18182 18470 18252 -3 18540 18252 18470 -3 18184 18185 18472 -3 18473 18472 18185 -3 18185 18186 18473 -3 18474 18473 18186 -3 18186 18187 18474 -3 18475 18474 18187 -3 18187 18188 18475 -3 18476 18475 18188 -3 18188 18189 18476 -3 18477 18476 18189 -3 18189 18190 18477 -3 18478 18477 18190 -3 18190 18191 18478 -3 18479 18478 18191 -3 18191 18192 18479 -3 18480 18479 18192 -3 18192 18193 18480 -3 18481 18480 18193 -3 18193 18194 18481 -3 18482 18481 18194 -3 18194 18195 18482 -3 18483 18482 18195 -3 18195 18196 18483 -3 18484 18483 18196 -3 18196 18197 18484 -3 18485 18484 18197 -3 18197 18198 18485 -3 18486 18485 18198 -3 18198 18199 18486 -3 18487 18486 18199 -3 18199 18200 18487 -3 18488 18487 18200 -3 18200 18201 18488 -3 18489 18488 18201 -3 18201 18202 18489 -3 18490 18489 18202 -3 18202 18203 18490 -3 18491 18490 18203 -3 18203 18204 18491 -3 18492 18491 18204 -3 18204 18205 18492 -3 18493 18492 18205 -3 18205 18206 18493 -3 18494 18493 18206 -3 18206 18207 18494 -3 18495 18494 18207 -3 18207 18208 18495 -3 18496 18495 18208 -3 18208 18209 18496 -3 18497 18496 18209 -3 18209 18210 18497 -3 18498 18497 18210 -3 18210 18211 18498 -3 18499 18498 18211 -3 18211 18212 18499 -3 18500 18499 18212 -3 18212 18213 18500 -3 18501 18500 18213 -3 18213 18214 18502 -3 18213 18502 18501 -3 18214 18215 18503 -3 18214 18503 18502 -3 18215 18216 18504 -3 18215 18504 18503 -3 18216 18217 18505 -3 18216 18505 18504 -3 18217 18218 18506 -3 18217 18506 18505 -3 18218 18219 18507 -3 18218 18507 18506 -3 18219 18220 18508 -3 18219 18508 18507 -3 18220 18221 18509 -3 18220 18509 18508 -3 18221 18222 18510 -3 18221 18510 18509 -3 18222 18223 18511 -3 18222 18511 18510 -3 18223 18224 18512 -3 18223 18512 18511 -3 18224 18225 18513 -3 18224 18513 18512 -3 18225 18226 18514 -3 18225 18514 18513 -3 18226 18227 18515 -3 18226 18515 18514 -3 18227 18228 18516 -3 18227 18516 18515 -3 18228 18229 18517 -3 18228 18517 18516 -3 18229 18230 18518 -3 18229 18518 18517 -3 18230 18231 18519 -3 18230 18519 18518 -3 18231 18232 18520 -3 18231 18520 18519 -3 18232 18233 18521 -3 18232 18521 18520 -3 18233 18234 18522 -3 18233 18522 18521 -3 18234 18235 18523 -3 18234 18523 18522 -3 18235 18236 18524 -3 18235 18524 18523 -3 18236 18237 18525 -3 18236 18525 18524 -3 18237 18238 18526 -3 18237 18526 18525 -3 18238 18239 18527 -3 18238 18527 18526 -3 18239 18240 18528 -3 18239 18528 18527 -3 18240 18241 18529 -3 18240 18529 18528 -3 18241 18242 18530 -3 18241 18530 18529 -3 18242 18243 18531 -3 18242 18531 18530 -3 18243 18244 18532 -3 18243 18532 18531 -3 18244 18245 18533 -3 18244 18533 18532 -3 18245 18246 18534 -3 18245 18534 18533 -3 18246 18247 18535 -3 18246 18535 18534 -3 18247 18248 18536 -3 18247 18536 18535 -3 18248 18249 18537 -3 18248 18537 18536 -3 18249 18250 18538 -3 18249 18538 18537 -3 18250 18251 18539 -3 18250 18539 18538 -3 18251 18252 18540 -3 18251 18540 18539 -3 18253 18541 18542 -3 18253 18542 18254 -3 18253 18323 18611 -3 18253 18611 18541 -3 18254 18542 18543 -3 18254 18543 18255 -3 18255 18543 18544 -3 18255 18544 18256 -3 18256 18544 18545 -3 18256 18545 18257 -3 18257 18545 18546 -3 18257 18546 18258 -3 18258 18546 18547 -3 18258 18547 18259 -3 18259 18547 18548 -3 18259 18548 18260 -3 18260 18548 18549 -3 18260 18549 18261 -3 18261 18549 18550 -3 18261 18550 18262 -3 18262 18550 18551 -3 18262 18551 18263 -3 18263 18551 18552 -3 18263 18552 18264 -3 18264 18552 18553 -3 18264 18553 18265 -3 18265 18553 18554 -3 18265 18554 18266 -3 18266 18554 18555 -3 18266 18555 18267 -3 18267 18555 18556 -3 18267 18556 18268 -3 18268 18556 18557 -3 18268 18557 18269 -3 18269 18557 18558 -3 18269 18558 18270 -3 18270 18558 18559 -3 18270 18559 18271 -3 18271 18559 18560 -3 18271 18560 18272 -3 18272 18560 18561 -3 18272 18561 18273 -3 18273 18561 18562 -3 18273 18562 18274 -3 18274 18562 18563 -3 18274 18563 18275 -3 18275 18563 18564 -3 18275 18564 18276 -3 18276 18564 18565 -3 18276 18565 18277 -3 18277 18565 18566 -3 18277 18566 18278 -3 18278 18566 18567 -3 18278 18567 18279 -3 18279 18567 18568 -3 18279 18568 18280 -3 18280 18568 18569 -3 18280 18569 18281 -3 18281 18569 18570 -3 18281 18570 18282 -3 18282 18570 18571 -3 18282 18571 18283 -3 18283 18571 18572 -3 18283 18572 18284 -3 18284 18572 18573 -3 18284 18573 18285 -3 18285 18573 18574 -3 18285 18574 18286 -3 18286 18574 18575 -3 18286 18575 18287 -3 18287 18575 18576 -3 18287 18576 18288 -3 18288 18576 18577 -3 18288 18577 18289 -3 18289 18577 18578 -3 18289 18578 18290 -3 18290 18578 18579 -3 18290 18579 18291 -3 18291 18579 18580 -3 18291 18580 18292 -3 18292 18580 18581 -3 18292 18581 18293 -3 18293 18581 18582 -3 18293 18582 18294 -3 18294 18582 18583 -3 18294 18583 18295 -3 18295 18583 18584 -3 18295 18584 18296 -3 18296 18584 18585 -3 18296 18585 18297 -3 18297 18585 18586 -3 18297 18586 18298 -3 18298 18586 18299 -3 18587 18299 18586 -3 18299 18587 18300 -3 18588 18300 18587 -3 18300 18588 18301 -3 18589 18301 18588 -3 18301 18589 18302 -3 18590 18302 18589 -3 18302 18590 18303 -3 18591 18303 18590 -3 18303 18591 18304 -3 18592 18304 18591 -3 18304 18592 18305 -3 18593 18305 18592 -3 18305 18593 18306 -3 18594 18306 18593 -3 18306 18594 18307 -3 18595 18307 18594 -3 18307 18595 18308 -3 18596 18308 18595 -3 18308 18596 18309 -3 18597 18309 18596 -3 18309 18597 18310 -3 18598 18310 18597 -3 18310 18598 18311 -3 18599 18311 18598 -3 18311 18599 18312 -3 18600 18312 18599 -3 18312 18600 18313 -3 18601 18313 18600 -3 18313 18601 18314 -3 18602 18314 18601 -3 18314 18602 18315 -3 18603 18315 18602 -3 18315 18603 18316 -3 18604 18316 18603 -3 18316 18604 18317 -3 18605 18317 18604 -3 18317 18605 18318 -3 18606 18318 18605 -3 18318 18606 18319 -3 18607 18319 18606 -3 18319 18607 18320 -3 18608 18320 18607 -3 18320 18608 18321 -3 18609 18321 18608 -3 18321 18609 18324 -3 18612 18324 18609 -3 18322 18610 18323 -3 18611 18323 18610 -3 18322 18327 18610 -3 18615 18610 18327 -3 18324 18612 18325 -3 18613 18325 18612 -3 18325 18613 18328 -3 18616 18328 18613 -3 18326 18614 18327 -3 18615 18327 18614 -3 18326 18330 18614 -3 18618 18614 18330 -3 18328 18616 18329 -3 18617 18329 18616 -3 18329 18617 18331 -3 18619 18331 18617 -3 18330 18332 18618 -3 18620 18618 18332 -3 18331 18619 18333 -3 18621 18333 18619 -3 18332 18334 18620 -3 18622 18620 18334 -3 18333 18621 18335 -3 18623 18335 18621 -3 18334 18336 18622 -3 18624 18622 18336 -3 18335 18623 18337 -3 18625 18337 18623 -3 18336 18338 18624 -3 18626 18624 18338 -3 18337 18625 18339 -3 18627 18339 18625 -3 18338 18340 18626 -3 18628 18626 18340 -3 18339 18627 18341 -3 18629 18341 18627 -3 18340 18342 18628 -3 18630 18628 18342 -3 18341 18629 18343 -3 18631 18343 18629 -3 18342 18344 18630 -3 18632 18630 18344 -3 18343 18631 18345 -3 18633 18345 18631 -3 18344 18346 18632 -3 18634 18632 18346 -3 18345 18633 18347 -3 18635 18347 18633 -3 18346 18348 18634 -3 18636 18634 18348 -3 18347 18635 18349 -3 18637 18349 18635 -3 18348 18350 18636 -3 18638 18636 18350 -3 18349 18637 18351 -3 18639 18351 18637 -3 18350 18352 18638 -3 18640 18638 18352 -3 18351 18639 18353 -3 18641 18353 18639 -3 18352 18354 18640 -3 18642 18640 18354 -3 18353 18641 18355 -3 18643 18355 18641 -3 18354 18356 18642 -3 18644 18642 18356 -3 18355 18643 18357 -3 18645 18357 18643 -3 18356 18358 18644 -3 18646 18644 18358 -3 18357 18645 18359 -3 18647 18359 18645 -3 18358 18360 18646 -3 18648 18646 18360 -3 18359 18647 18361 -3 18649 18361 18647 -3 18360 18362 18648 -3 18650 18648 18362 -3 18361 18649 18363 -3 18651 18363 18649 -3 18362 18364 18650 -3 18652 18650 18364 -3 18363 18651 18365 -3 18653 18365 18651 -3 18364 18366 18652 -3 18654 18652 18366 -3 18365 18653 18367 -3 18655 18367 18653 -3 18366 18368 18654 -3 18656 18654 18368 -3 18367 18655 18369 -3 18657 18369 18655 -3 18368 18370 18656 -3 18658 18656 18370 -3 18369 18657 18371 -3 18659 18371 18657 -3 18370 18372 18658 -3 18660 18658 18372 -3 18371 18659 18373 -3 18661 18373 18659 -3 18372 18374 18660 -3 18662 18660 18374 -3 18373 18661 18375 -3 18663 18375 18661 -3 18374 18376 18662 -3 18664 18662 18376 -3 18375 18663 18377 -3 18665 18377 18663 -3 18376 18378 18664 -3 18666 18664 18378 -3 18377 18665 18379 -3 18667 18379 18665 -3 18378 18380 18666 -3 18668 18666 18380 -3 18379 18667 18381 -3 18669 18381 18667 -3 18380 18382 18670 -3 18380 18670 18668 -3 18381 18669 18383 -3 18671 18383 18669 -3 18382 18384 18672 -3 18382 18672 18670 -3 18383 18671 18673 -3 18383 18673 18385 -3 18384 18386 18674 -3 18384 18674 18672 -3 18385 18673 18675 -3 18385 18675 18387 -3 18386 18388 18676 -3 18386 18676 18674 -3 18387 18675 18677 -3 18387 18677 18389 -3 18388 18390 18678 -3 18388 18678 18676 -3 18389 18677 18679 -3 18389 18679 18391 -3 18390 18392 18680 -3 18390 18680 18678 -3 18391 18679 18681 -3 18391 18681 18393 -3 18392 18394 18682 -3 18392 18682 18680 -3 18393 18681 18683 -3 18393 18683 18395 -3 18394 18396 18684 -3 18394 18684 18682 -3 18395 18683 18685 -3 18395 18685 18397 -3 18396 18398 18686 -3 18396 18686 18684 -3 18397 18685 18687 -3 18397 18687 18399 -3 18398 18400 18688 -3 18398 18688 18686 -3 18399 18687 18689 -3 18399 18689 18401 -3 18400 18402 18690 -3 18400 18690 18688 -3 18401 18689 18691 -3 18401 18691 18403 -3 18402 18404 18692 -3 18402 18692 18690 -3 18403 18691 18693 -3 18403 18693 18405 -3 18404 18406 18694 -3 18404 18694 18692 -3 18405 18693 18695 -3 18405 18695 18407 -3 18406 18408 18696 -3 18406 18696 18694 -3 18407 18695 18697 -3 18407 18697 18409 -3 18408 18410 18698 -3 18408 18698 18696 -3 18409 18697 18699 -3 18409 18699 18411 -3 18410 18412 18700 -3 18410 18700 18698 -3 18411 18699 18701 -3 18411 18701 18413 -3 18412 18414 18702 -3 18412 18702 18700 -3 18413 18701 18703 -3 18413 18703 18415 -3 18414 18416 18704 -3 18414 18704 18702 -3 18415 18703 18705 -3 18415 18705 18417 -3 18416 18418 18706 -3 18416 18706 18704 -3 18417 18705 18707 -3 18417 18707 18419 -3 18418 18420 18708 -3 18418 18708 18706 -3 18419 18707 18709 -3 18419 18709 18421 -3 18420 18422 18710 -3 18420 18710 18708 -3 18421 18709 18711 -3 18421 18711 18423 -3 18422 18424 18712 -3 18422 18712 18710 -3 18423 18711 18713 -3 18423 18713 18425 -3 18424 18426 18714 -3 18424 18714 18712 -3 18425 18713 18715 -3 18425 18715 18427 -3 18426 18428 18716 -3 18426 18716 18714 -3 18427 18715 18717 -3 18427 18717 18429 -3 18428 18430 18718 -3 18428 18718 18716 -3 18429 18717 18719 -3 18429 18719 18431 -3 18430 18432 18720 -3 18430 18720 18718 -3 18431 18719 18721 -3 18431 18721 18433 -3 18432 18434 18722 -3 18432 18722 18720 -3 18433 18721 18723 -3 18433 18723 18435 -3 18434 18436 18724 -3 18434 18724 18722 -3 18435 18723 18725 -3 18435 18725 18437 -3 18436 18438 18726 -3 18436 18726 18724 -3 18437 18725 18727 -3 18437 18727 18439 -3 18438 18440 18728 -3 18438 18728 18726 -3 18439 18727 18729 -3 18439 18729 18441 -3 18440 18442 18730 -3 18440 18730 18728 -3 18441 18729 18731 -3 18441 18731 18443 -3 18442 18444 18732 -3 18442 18732 18730 -3 18443 18731 18733 -3 18443 18733 18445 -3 18444 18446 18734 -3 18444 18734 18732 -3 18445 18733 18735 -3 18445 18735 18447 -3 18446 18448 18736 -3 18446 18736 18734 -3 18447 18735 18737 -3 18447 18737 18449 -3 18448 18450 18738 -3 18448 18738 18736 -3 18449 18737 18739 -3 18449 18739 18451 -3 18450 18452 18740 -3 18450 18740 18738 -3 18451 18739 18741 -3 18451 18741 18453 -3 18452 18454 18742 -3 18452 18742 18740 -3 18453 18741 18743 -3 18453 18743 18455 -3 18454 18456 18744 -3 18454 18744 18742 -3 18455 18743 18745 -3 18455 18745 18457 -3 18456 18458 18746 -3 18456 18746 18744 -3 18457 18745 18747 -3 18457 18747 18459 -3 18458 18460 18748 -3 18458 18748 18746 -3 18459 18747 18749 -3 18459 18749 18461 -3 18460 18462 18750 -3 18460 18750 18748 -3 18461 18749 18751 -3 18461 18751 18463 -3 18462 18464 18750 -3 18752 18750 18464 -3 18463 18751 18755 -3 18463 18755 18467 -3 18464 18465 18752 -3 18753 18752 18465 -3 18465 18468 18753 -3 18756 18753 18468 -3 18466 18467 18754 -3 18755 18754 18467 -3 18466 18754 18471 -3 18759 18471 18754 -3 18468 18469 18756 -3 18757 18756 18469 -3 18469 18472 18757 -3 18760 18757 18472 -3 18470 18471 18758 -3 18759 18758 18471 -3 18470 18758 18540 -3 18828 18540 18758 -3 18472 18473 18760 -3 18761 18760 18473 -3 18473 18474 18761 -3 18762 18761 18474 -3 18474 18475 18762 -3 18763 18762 18475 -3 18475 18476 18763 -3 18764 18763 18476 -3 18476 18477 18764 -3 18765 18764 18477 -3 18477 18478 18765 -3 18766 18765 18478 -3 18478 18479 18766 -3 18767 18766 18479 -3 18479 18480 18767 -3 18768 18767 18480 -3 18480 18481 18768 -3 18769 18768 18481 -3 18481 18482 18769 -3 18770 18769 18482 -3 18482 18483 18770 -3 18771 18770 18483 -3 18483 18484 18771 -3 18772 18771 18484 -3 18484 18485 18772 -3 18773 18772 18485 -3 18485 18486 18773 -3 18774 18773 18486 -3 18486 18487 18774 -3 18775 18774 18487 -3 18487 18488 18775 -3 18776 18775 18488 -3 18488 18489 18776 -3 18777 18776 18489 -3 18489 18490 18777 -3 18778 18777 18490 -3 18490 18491 18778 -3 18779 18778 18491 -3 18491 18492 18779 -3 18780 18779 18492 -3 18492 18493 18780 -3 18781 18780 18493 -3 18493 18494 18781 -3 18782 18781 18494 -3 18494 18495 18782 -3 18783 18782 18495 -3 18495 18496 18783 -3 18784 18783 18496 -3 18496 18497 18784 -3 18785 18784 18497 -3 18497 18498 18785 -3 18786 18785 18498 -3 18498 18499 18786 -3 18787 18786 18499 -3 18499 18500 18787 -3 18788 18787 18500 -3 18500 18501 18788 -3 18789 18788 18501 -3 18501 18502 18789 -3 18790 18789 18502 -3 18502 18503 18790 -3 18791 18790 18503 -3 18503 18504 18791 -3 18792 18791 18504 -3 18504 18505 18792 -3 18793 18792 18505 -3 18505 18506 18793 -3 18794 18793 18506 -3 18506 18507 18794 -3 18795 18794 18507 -3 18507 18508 18795 -3 18796 18795 18508 -3 18508 18509 18796 -3 18797 18796 18509 -3 18509 18510 18797 -3 18798 18797 18510 -3 18510 18511 18798 -3 18799 18798 18511 -3 18511 18512 18799 -3 18800 18799 18512 -3 18512 18513 18800 -3 18801 18800 18513 -3 18513 18514 18801 -3 18802 18801 18514 -3 18514 18515 18802 -3 18803 18802 18515 -3 18515 18516 18803 -3 18804 18803 18516 -3 18516 18517 18804 -3 18805 18804 18517 -3 18517 18518 18805 -3 18806 18805 18518 -3 18518 18519 18806 -3 18807 18806 18519 -3 18519 18520 18807 -3 18808 18807 18520 -3 18520 18521 18808 -3 18809 18808 18521 -3 18521 18522 18809 -3 18810 18809 18522 -3 18522 18523 18810 -3 18811 18810 18523 -3 18523 18524 18811 -3 18812 18811 18524 -3 18524 18525 18812 -3 18813 18812 18525 -3 18525 18526 18813 -3 18814 18813 18526 -3 18526 18527 18814 -3 18815 18814 18527 -3 18527 18528 18815 -3 18816 18815 18528 -3 18528 18529 18816 -3 18817 18816 18529 -3 18529 18530 18817 -3 18818 18817 18530 -3 18530 18531 18818 -3 18819 18818 18531 -3 18531 18532 18819 -3 18820 18819 18532 -3 18532 18533 18820 -3 18821 18820 18533 -3 18533 18534 18821 -3 18822 18821 18534 -3 18534 18535 18822 -3 18823 18822 18535 -3 18535 18536 18823 -3 18824 18823 18536 -3 18536 18537 18824 -3 18825 18824 18537 -3 18537 18538 18825 -3 18826 18825 18538 -3 18538 18539 18826 -3 18827 18826 18539 -3 18539 18540 18827 -3 18828 18827 18540 -3 18541 18829 18542 -3 18830 18542 18829 -3 18541 18611 18899 -3 18541 18899 18829 -3 18542 18830 18543 -3 18831 18543 18830 -3 18543 18831 18544 -3 18832 18544 18831 -3 18544 18832 18545 -3 18833 18545 18832 -3 18545 18833 18546 -3 18834 18546 18833 -3 18546 18834 18547 -3 18835 18547 18834 -3 18547 18835 18548 -3 18836 18548 18835 -3 18548 18836 18549 -3 18837 18549 18836 -3 18549 18837 18550 -3 18838 18550 18837 -3 18550 18838 18839 -3 18550 18839 18551 -3 18551 18839 18840 -3 18551 18840 18552 -3 18552 18840 18841 -3 18552 18841 18553 -3 18553 18841 18842 -3 18553 18842 18554 -3 18554 18842 18843 -3 18554 18843 18555 -3 18555 18843 18844 -3 18555 18844 18556 -3 18556 18844 18845 -3 18556 18845 18557 -3 18557 18845 18846 -3 18557 18846 18558 -3 18558 18846 18847 -3 18558 18847 18559 -3 18559 18847 18848 -3 18559 18848 18560 -3 18560 18848 18849 -3 18560 18849 18561 -3 18561 18849 18850 -3 18561 18850 18562 -3 18562 18850 18851 -3 18562 18851 18563 -3 18563 18851 18852 -3 18563 18852 18564 -3 18564 18852 18853 -3 18564 18853 18565 -3 18565 18853 18854 -3 18565 18854 18566 -3 18566 18854 18855 -3 18566 18855 18567 -3 18567 18855 18856 -3 18567 18856 18568 -3 18568 18856 18857 -3 18568 18857 18569 -3 18569 18857 18858 -3 18569 18858 18570 -3 18570 18858 18859 -3 18570 18859 18571 -3 18571 18859 18860 -3 18571 18860 18572 -3 18572 18860 18861 -3 18572 18861 18573 -3 18573 18861 18862 -3 18573 18862 18574 -3 18574 18862 18863 -3 18574 18863 18575 -3 18575 18863 18864 -3 18575 18864 18576 -3 18576 18864 18865 -3 18576 18865 18577 -3 18577 18865 18866 -3 18577 18866 18578 -3 18578 18866 18867 -3 18578 18867 18579 -3 18579 18867 18868 -3 18579 18868 18580 -3 18580 18868 18869 -3 18580 18869 18581 -3 18581 18869 18870 -3 18581 18870 18582 -3 18582 18870 18871 -3 18582 18871 18583 -3 18583 18871 18872 -3 18583 18872 18584 -3 18584 18872 18873 -3 18584 18873 18585 -3 18585 18873 18874 -3 18585 18874 18586 -3 18586 18874 18875 -3 18586 18875 18587 -3 18587 18875 18876 -3 18587 18876 18588 -3 18588 18876 18877 -3 18588 18877 18589 -3 18589 18877 18878 -3 18589 18878 18590 -3 18590 18878 18879 -3 18590 18879 18591 -3 18591 18879 18880 -3 18591 18880 18592 -3 18592 18880 18881 -3 18592 18881 18593 -3 18593 18881 18882 -3 18593 18882 18594 -3 18594 18882 18883 -3 18594 18883 18595 -3 18595 18883 18884 -3 18595 18884 18596 -3 18596 18884 18885 -3 18596 18885 18597 -3 18597 18885 18886 -3 18597 18886 18598 -3 18598 18886 18887 -3 18598 18887 18599 -3 18599 18887 18888 -3 18599 18888 18600 -3 18600 18888 18889 -3 18600 18889 18601 -3 18601 18889 18890 -3 18601 18890 18602 -3 18602 18890 18891 -3 18602 18891 18603 -3 18603 18891 18892 -3 18603 18892 18604 -3 18604 18892 18893 -3 18604 18893 18605 -3 18605 18893 18894 -3 18605 18894 18606 -3 18606 18894 18895 -3 18606 18895 18607 -3 18607 18895 18896 -3 18607 18896 18608 -3 18608 18896 18897 -3 18608 18897 18609 -3 18609 18897 18900 -3 18609 18900 18612 -3 18610 18898 18899 -3 18610 18899 18611 -3 18610 18615 18903 -3 18610 18903 18898 -3 18612 18900 18901 -3 18612 18901 18613 -3 18613 18901 18904 -3 18613 18904 18616 -3 18614 18902 18903 -3 18614 18903 18615 -3 18614 18618 18906 -3 18614 18906 18902 -3 18616 18904 18905 -3 18616 18905 18617 -3 18617 18905 18907 -3 18617 18907 18619 -3 18618 18620 18908 -3 18618 18908 18906 -3 18619 18907 18909 -3 18619 18909 18621 -3 18620 18622 18910 -3 18620 18910 18908 -3 18621 18909 18911 -3 18621 18911 18623 -3 18622 18624 18912 -3 18622 18912 18910 -3 18623 18911 18913 -3 18623 18913 18625 -3 18624 18626 18914 -3 18624 18914 18912 -3 18625 18913 18915 -3 18625 18915 18627 -3 18626 18628 18916 -3 18626 18916 18914 -3 18627 18915 18917 -3 18627 18917 18629 -3 18628 18630 18918 -3 18628 18918 18916 -3 18629 18917 18919 -3 18629 18919 18631 -3 18630 18632 18918 -3 18920 18918 18632 -3 18631 18919 18921 -3 18631 18921 18633 -3 18632 18634 18920 -3 18922 18920 18634 -3 18633 18921 18923 -3 18633 18923 18635 -3 18634 18636 18922 -3 18924 18922 18636 -3 18635 18923 18637 -3 18925 18637 18923 -3 18636 18638 18924 -3 18926 18924 18638 -3 18637 18925 18639 -3 18927 18639 18925 -3 18638 18640 18926 -3 18928 18926 18640 -3 18639 18927 18641 -3 18929 18641 18927 -3 18640 18642 18928 -3 18930 18928 18642 -3 18641 18929 18643 -3 18931 18643 18929 -3 18642 18644 18930 -3 18932 18930 18644 -3 18643 18931 18645 -3 18933 18645 18931 -3 18644 18646 18932 -3 18934 18932 18646 -3 18645 18933 18647 -3 18935 18647 18933 -3 18646 18648 18934 -3 18936 18934 18648 -3 18647 18935 18649 -3 18937 18649 18935 -3 18648 18650 18936 -3 18938 18936 18650 -3 18649 18937 18651 -3 18939 18651 18937 -3 18650 18652 18938 -3 18940 18938 18652 -3 18651 18939 18653 -3 18941 18653 18939 -3 18652 18654 18940 -3 18942 18940 18654 -3 18653 18941 18655 -3 18943 18655 18941 -3 18654 18656 18942 -3 18944 18942 18656 -3 18655 18943 18657 -3 18945 18657 18943 -3 18656 18658 18944 -3 18946 18944 18658 -3 18657 18945 18659 -3 18947 18659 18945 -3 18658 18660 18946 -3 18948 18946 18660 -3 18659 18947 18661 -3 18949 18661 18947 -3 18660 18662 18948 -3 18950 18948 18662 -3 18661 18949 18663 -3 18951 18663 18949 -3 18662 18664 18950 -3 18952 18950 18664 -3 18663 18951 18665 -3 18953 18665 18951 -3 18664 18666 18952 -3 18954 18952 18666 -3 18665 18953 18667 -3 18955 18667 18953 -3 18666 18668 18954 -3 18956 18954 18668 -3 18667 18955 18669 -3 18957 18669 18955 -3 18668 18670 18956 -3 18958 18956 18670 -3 18669 18957 18671 -3 18959 18671 18957 -3 18670 18672 18958 -3 18960 18958 18672 -3 18671 18959 18673 -3 18961 18673 18959 -3 18672 18674 18960 -3 18962 18960 18674 -3 18673 18961 18675 -3 18963 18675 18961 -3 18674 18676 18962 -3 18964 18962 18676 -3 18675 18963 18677 -3 18965 18677 18963 -3 18676 18678 18964 -3 18966 18964 18678 -3 18677 18965 18679 -3 18967 18679 18965 -3 18678 18680 18966 -3 18968 18966 18680 -3 18679 18967 18681 -3 18969 18681 18967 -3 18680 18682 18968 -3 18970 18968 18682 -3 18681 18969 18683 -3 18971 18683 18969 -3 18682 18684 18970 -3 18972 18970 18684 -3 18683 18971 18685 -3 18973 18685 18971 -3 18684 18686 18972 -3 18974 18972 18686 -3 18685 18973 18687 -3 18975 18687 18973 -3 18686 18688 18974 -3 18976 18974 18688 -3 18687 18975 18689 -3 18977 18689 18975 -3 18688 18690 18976 -3 18978 18976 18690 -3 18689 18977 18691 -3 18979 18691 18977 -3 18690 18692 18978 -3 18980 18978 18692 -3 18691 18979 18693 -3 18981 18693 18979 -3 18692 18694 18980 -3 18982 18980 18694 -3 18693 18981 18695 -3 18983 18695 18981 -3 18694 18696 18982 -3 18984 18982 18696 -3 18695 18983 18697 -3 18985 18697 18983 -3 18696 18698 18984 -3 18986 18984 18698 -3 18697 18985 18699 -3 18987 18699 18985 -3 18698 18700 18986 -3 18988 18986 18700 -3 18699 18987 18701 -3 18989 18701 18987 -3 18700 18702 18988 -3 18990 18988 18702 -3 18701 18989 18703 -3 18991 18703 18989 -3 18702 18704 18990 -3 18992 18990 18704 -3 18703 18991 18705 -3 18993 18705 18991 -3 18704 18706 18992 -3 18994 18992 18706 -3 18705 18993 18707 -3 18995 18707 18993 -3 18706 18708 18994 -3 18996 18994 18708 -3 18707 18995 18709 -3 18997 18709 18995 -3 18708 18710 18996 -3 18998 18996 18710 -3 18709 18997 18711 -3 18999 18711 18997 -3 18710 18712 18998 -3 19000 18998 18712 -3 18711 18999 18713 -3 19001 18713 18999 -3 18712 18714 19000 -3 19002 19000 18714 -3 18713 19001 18715 -3 19003 18715 19001 -3 18714 18716 19004 -3 18714 19004 19002 -3 18715 19003 18717 -3 19005 18717 19003 -3 18716 18718 19006 -3 18716 19006 19004 -3 18717 19005 18719 -3 19007 18719 19005 -3 18718 18720 19008 -3 18718 19008 19006 -3 18719 19007 19009 -3 18719 19009 18721 -3 18720 18722 19010 -3 18720 19010 19008 -3 18721 19009 19011 -3 18721 19011 18723 -3 18722 18724 19012 -3 18722 19012 19010 -3 18723 19011 19013 -3 18723 19013 18725 -3 18724 18726 19014 -3 18724 19014 19012 -3 18725 19013 19015 -3 18725 19015 18727 -3 18726 18728 19016 -3 18726 19016 19014 -3 18727 19015 19017 -3 18727 19017 18729 -3 18728 18730 19018 -3 18728 19018 19016 -3 18729 19017 19019 -3 18729 19019 18731 -3 18730 18732 19020 -3 18730 19020 19018 -3 18731 19019 19021 -3 18731 19021 18733 -3 18732 18734 19022 -3 18732 19022 19020 -3 18733 19021 19023 -3 18733 19023 18735 -3 18734 18736 19024 -3 18734 19024 19022 -3 18735 19023 19025 -3 18735 19025 18737 -3 18736 18738 19026 -3 18736 19026 19024 -3 18737 19025 19027 -3 18737 19027 18739 -3 18738 18740 19028 -3 18738 19028 19026 -3 18739 19027 19029 -3 18739 19029 18741 -3 18740 18742 19030 -3 18740 19030 19028 -3 18741 19029 19031 -3 18741 19031 18743 -3 18742 18744 19032 -3 18742 19032 19030 -3 18743 19031 19033 -3 18743 19033 18745 -3 18744 18746 19034 -3 18744 19034 19032 -3 18745 19033 19035 -3 18745 19035 18747 -3 18746 18748 19036 -3 18746 19036 19034 -3 18747 19035 19037 -3 18747 19037 18749 -3 18748 18750 19038 -3 18748 19038 19036 -3 18749 19037 19039 -3 18749 19039 18751 -3 18750 18752 19040 -3 18750 19040 19038 -3 18751 19039 19043 -3 18751 19043 18755 -3 18752 18753 19041 -3 18752 19041 19040 -3 18753 18756 19044 -3 18753 19044 19041 -3 18754 18755 19043 -3 18754 19043 19042 -3 18754 19042 19047 -3 18754 19047 18759 -3 18756 18757 19045 -3 18756 19045 19044 -3 18757 18760 19048 -3 18757 19048 19045 -3 18758 18759 19047 -3 18758 19047 19046 -3 18758 19046 19116 -3 18758 19116 18828 -3 18760 18761 19049 -3 18760 19049 19048 -3 18761 18762 19050 -3 18761 19050 19049 -3 18762 18763 19051 -3 18762 19051 19050 -3 18763 18764 19052 -3 18763 19052 19051 -3 18764 18765 19053 -3 18764 19053 19052 -3 18765 18766 19054 -3 18765 19054 19053 -3 18766 18767 19055 -3 18766 19055 19054 -3 18767 18768 19056 -3 18767 19056 19055 -3 18768 18769 19057 -3 18768 19057 19056 -3 18769 18770 19058 -3 18769 19058 19057 -3 18770 18771 19059 -3 18770 19059 19058 -3 18771 18772 19060 -3 18771 19060 19059 -3 18772 18773 19061 -3 18772 19061 19060 -3 18773 18774 19062 -3 18773 19062 19061 -3 18774 18775 19063 -3 18774 19063 19062 -3 18775 18776 19064 -3 18775 19064 19063 -3 18776 18777 19065 -3 18776 19065 19064 -3 18777 18778 19066 -3 18777 19066 19065 -3 18778 18779 19067 -3 18778 19067 19066 -3 18779 18780 19068 -3 18779 19068 19067 -3 18780 18781 19069 -3 18780 19069 19068 -3 18781 18782 19070 -3 18781 19070 19069 -3 18782 18783 19071 -3 18782 19071 19070 -3 18783 18784 19072 -3 18783 19072 19071 -3 18784 18785 19073 -3 18784 19073 19072 -3 18785 18786 19074 -3 18785 19074 19073 -3 18786 18787 19075 -3 18786 19075 19074 -3 18787 18788 19076 -3 18787 19076 19075 -3 18788 18789 19077 -3 18788 19077 19076 -3 18789 18790 19078 -3 18789 19078 19077 -3 18790 18791 19079 -3 18790 19079 19078 -3 18791 18792 19080 -3 18791 19080 19079 -3 18792 18793 19081 -3 18792 19081 19080 -3 18793 18794 19082 -3 18793 19082 19081 -3 18794 18795 19083 -3 18794 19083 19082 -3 18795 18796 19084 -3 18795 19084 19083 -3 18796 18797 19085 -3 18796 19085 19084 -3 18797 18798 19086 -3 18797 19086 19085 -3 18798 18799 19087 -3 18798 19087 19086 -3 18799 18800 19087 -3 19088 19087 18800 -3 18800 18801 19088 -3 19089 19088 18801 -3 18801 18802 19089 -3 19090 19089 18802 -3 18802 18803 19090 -3 19091 19090 18803 -3 18803 18804 19091 -3 19092 19091 18804 -3 18804 18805 19092 -3 19093 19092 18805 -3 18805 18806 19093 -3 19094 19093 18806 -3 18806 18807 19094 -3 19095 19094 18807 -3 18807 18808 19095 -3 19096 19095 18808 -3 18808 18809 19096 -3 19097 19096 18809 -3 18809 18810 19097 -3 19098 19097 18810 -3 18810 18811 19098 -3 19099 19098 18811 -3 18811 18812 19099 -3 19100 19099 18812 -3 18812 18813 19100 -3 19101 19100 18813 -3 18813 18814 19101 -3 19102 19101 18814 -3 18814 18815 19102 -3 19103 19102 18815 -3 18815 18816 19103 -3 19104 19103 18816 -3 18816 18817 19104 -3 19105 19104 18817 -3 18817 18818 19105 -3 19106 19105 18818 -3 18818 18819 19106 -3 19107 19106 18819 -3 18819 18820 19107 -3 19108 19107 18820 -3 18820 18821 19108 -3 19109 19108 18821 -3 18821 18822 19109 -3 19110 19109 18822 -3 18822 18823 19110 -3 19111 19110 18823 -3 18823 18824 19111 -3 19112 19111 18824 -3 18824 18825 19112 -3 19113 19112 18825 -3 18825 18826 19113 -3 19114 19113 18826 -3 18826 18827 19114 -3 19115 19114 18827 -3 18827 18828 19115 -3 19116 19115 18828 -3 18829 19117 18830 -3 19118 18830 19117 -3 18829 18899 19117 -3 19187 19117 18899 -3 18830 19118 18831 -3 19119 18831 19118 -3 18831 19119 18832 -3 19120 18832 19119 -3 18832 19120 18833 -3 19121 18833 19120 -3 18833 19121 18834 -3 19122 18834 19121 -3 18834 19122 18835 -3 19123 18835 19122 -3 18835 19123 18836 -3 19124 18836 19123 -3 18836 19124 18837 -3 19125 18837 19124 -3 18837 19125 18838 -3 19126 18838 19125 -3 18838 19126 18839 -3 19127 18839 19126 -3 18839 19127 18840 -3 19128 18840 19127 -3 18840 19128 18841 -3 19129 18841 19128 -3 18841 19129 18842 -3 19130 18842 19129 -3 18842 19130 18843 -3 19131 18843 19130 -3 18843 19131 18844 -3 19132 18844 19131 -3 18844 19132 18845 -3 19133 18845 19132 -3 18845 19133 18846 -3 19134 18846 19133 -3 18846 19134 18847 -3 19135 18847 19134 -3 18847 19135 18848 -3 19136 18848 19135 -3 18848 19136 18849 -3 19137 18849 19136 -3 18849 19137 18850 -3 19138 18850 19137 -3 18850 19138 18851 -3 19139 18851 19138 -3 18851 19139 18852 -3 19140 18852 19139 -3 18852 19140 18853 -3 19141 18853 19140 -3 18853 19141 18854 -3 19142 18854 19141 -3 18854 19142 18855 -3 19143 18855 19142 -3 18855 19143 18856 -3 19144 18856 19143 -3 18856 19144 18857 -3 19145 18857 19144 -3 18857 19145 18858 -3 19146 18858 19145 -3 18858 19146 18859 -3 19147 18859 19146 -3 18859 19147 18860 -3 19148 18860 19147 -3 18860 19148 18861 -3 19149 18861 19148 -3 18861 19149 18862 -3 19150 18862 19149 -3 18862 19150 18863 -3 19151 18863 19150 -3 18863 19151 18864 -3 19152 18864 19151 -3 18864 19152 18865 -3 19153 18865 19152 -3 18865 19153 18866 -3 19154 18866 19153 -3 18866 19154 18867 -3 19155 18867 19154 -3 18867 19155 18868 -3 19156 18868 19155 -3 18868 19156 18869 -3 19157 18869 19156 -3 18869 19157 18870 -3 19158 18870 19157 -3 18870 19158 18871 -3 19159 18871 19158 -3 18871 19159 18872 -3 19160 18872 19159 -3 18872 19160 18873 -3 19161 18873 19160 -3 18873 19161 18874 -3 19162 18874 19161 -3 18874 19162 18875 -3 19163 18875 19162 -3 18875 19163 18876 -3 19164 18876 19163 -3 18876 19164 18877 -3 19165 18877 19164 -3 18877 19165 18878 -3 19166 18878 19165 -3 18878 19166 18879 -3 19167 18879 19166 -3 18879 19167 18880 -3 19168 18880 19167 -3 18880 19168 18881 -3 19169 18881 19168 -3 18881 19169 18882 -3 19170 18882 19169 -3 18882 19170 18883 -3 19171 18883 19170 -3 18883 19171 18884 -3 19172 18884 19171 -3 18884 19172 18885 -3 19173 18885 19172 -3 18885 19173 18886 -3 19174 18886 19173 -3 18886 19174 18887 -3 19175 18887 19174 -3 18887 19175 18888 -3 19176 18888 19175 -3 18888 19176 19177 -3 18888 19177 18889 -3 18889 19177 19178 -3 18889 19178 18890 -3 18890 19178 19179 -3 18890 19179 18891 -3 18891 19179 19180 -3 18891 19180 18892 -3 18892 19180 19181 -3 18892 19181 18893 -3 18893 19181 19182 -3 18893 19182 18894 -3 18894 19182 19183 -3 18894 19183 18895 -3 18895 19183 19184 -3 18895 19184 18896 -3 18896 19184 19185 -3 18896 19185 18897 -3 18897 19185 19188 -3 18897 19188 18900 -3 18898 19186 19187 -3 18898 19187 18899 -3 18898 18903 19191 -3 18898 19191 19186 -3 18900 19188 19189 -3 18900 19189 18901 -3 18901 19189 19192 -3 18901 19192 18904 -3 18902 19190 19191 -3 18902 19191 18903 -3 18902 18906 19194 -3 18902 19194 19190 -3 18904 19192 19193 -3 18904 19193 18905 -3 18905 19193 19195 -3 18905 19195 18907 -3 18906 18908 19196 -3 18906 19196 19194 -3 18907 19195 19197 -3 18907 19197 18909 -3 18908 18910 19198 -3 18908 19198 19196 -3 18909 19197 19199 -3 18909 19199 18911 -3 18910 18912 19200 -3 18910 19200 19198 -3 18911 19199 19201 -3 18911 19201 18913 -3 18912 18914 19202 -3 18912 19202 19200 -3 18913 19201 19203 -3 18913 19203 18915 -3 18914 18916 19204 -3 18914 19204 19202 -3 18915 19203 19205 -3 18915 19205 18917 -3 18916 18918 19206 -3 18916 19206 19204 -3 18917 19205 19207 -3 18917 19207 18919 -3 18918 18920 19208 -3 18918 19208 19206 -3 18919 19207 19209 -3 18919 19209 18921 -3 18920 18922 19210 -3 18920 19210 19208 -3 18921 19209 19211 -3 18921 19211 18923 -3 18922 18924 19212 -3 18922 19212 19210 -3 18923 19211 19213 -3 18923 19213 18925 -3 18924 18926 19214 -3 18924 19214 19212 -3 18925 19213 19215 -3 18925 19215 18927 -3 18926 18928 19216 -3 18926 19216 19214 -3 18927 19215 19217 -3 18927 19217 18929 -3 18928 18930 19218 -3 18928 19218 19216 -3 18929 19217 19219 -3 18929 19219 18931 -3 18930 18932 19220 -3 18930 19220 19218 -3 18931 19219 19221 -3 18931 19221 18933 -3 18932 18934 19222 -3 18932 19222 19220 -3 18933 19221 19223 -3 18933 19223 18935 -3 18934 18936 19224 -3 18934 19224 19222 -3 18935 19223 19225 -3 18935 19225 18937 -3 18936 18938 19226 -3 18936 19226 19224 -3 18937 19225 19227 -3 18937 19227 18939 -3 18938 18940 19228 -3 18938 19228 19226 -3 18939 19227 19229 -3 18939 19229 18941 -3 18940 18942 19230 -3 18940 19230 19228 -3 18941 19229 19231 -3 18941 19231 18943 -3 18942 18944 19232 -3 18942 19232 19230 -3 18943 19231 19233 -3 18943 19233 18945 -3 18944 18946 19234 -3 18944 19234 19232 -3 18945 19233 19235 -3 18945 19235 18947 -3 18946 18948 19236 -3 18946 19236 19234 -3 18947 19235 19237 -3 18947 19237 18949 -3 18948 18950 19238 -3 18948 19238 19236 -3 18949 19237 19239 -3 18949 19239 18951 -3 18950 18952 19240 -3 18950 19240 19238 -3 18951 19239 19241 -3 18951 19241 18953 -3 18952 18954 19242 -3 18952 19242 19240 -3 18953 19241 19243 -3 18953 19243 18955 -3 18954 18956 19244 -3 18954 19244 19242 -3 18955 19243 19245 -3 18955 19245 18957 -3 18956 18958 19246 -3 18956 19246 19244 -3 18957 19245 19247 -3 18957 19247 18959 -3 18958 18960 19248 -3 18958 19248 19246 -3 18959 19247 19249 -3 18959 19249 18961 -3 18960 18962 19250 -3 18960 19250 19248 -3 18961 19249 19251 -3 18961 19251 18963 -3 18962 18964 19252 -3 18962 19252 19250 -3 18963 19251 19253 -3 18963 19253 18965 -3 18964 18966 19254 -3 18964 19254 19252 -3 18965 19253 19255 -3 18965 19255 18967 -3 18966 18968 19256 -3 18966 19256 19254 -3 18967 19255 19257 -3 18967 19257 18969 -3 18968 18970 19256 -3 19258 19256 18970 -3 18969 19257 19259 -3 18969 19259 18971 -3 18970 18972 19258 -3 19260 19258 18972 -3 18971 19259 19261 -3 18971 19261 18973 -3 18972 18974 19260 -3 19262 19260 18974 -3 18973 19261 18975 -3 19263 18975 19261 -3 18974 18976 19262 -3 19264 19262 18976 -3 18975 19263 18977 -3 19265 18977 19263 -3 18976 18978 19264 -3 19266 19264 18978 -3 18977 19265 18979 -3 19267 18979 19265 -3 18978 18980 19266 -3 19268 19266 18980 -3 18979 19267 18981 -3 19269 18981 19267 -3 18980 18982 19268 -3 19270 19268 18982 -3 18981 19269 18983 -3 19271 18983 19269 -3 18982 18984 19270 -3 19272 19270 18984 -3 18983 19271 18985 -3 19273 18985 19271 -3 18984 18986 19272 -3 19274 19272 18986 -3 18985 19273 18987 -3 19275 18987 19273 -3 18986 18988 19274 -3 19276 19274 18988 -3 18987 19275 18989 -3 19277 18989 19275 -3 18988 18990 19276 -3 19278 19276 18990 -3 18989 19277 18991 -3 19279 18991 19277 -3 18990 18992 19278 -3 19280 19278 18992 -3 18991 19279 18993 -3 19281 18993 19279 -3 18992 18994 19280 -3 19282 19280 18994 -3 18993 19281 18995 -3 19283 18995 19281 -3 18994 18996 19282 -3 19284 19282 18996 -3 18995 19283 18997 -3 19285 18997 19283 -3 18996 18998 19284 -3 19286 19284 18998 -3 18997 19285 18999 -3 19287 18999 19285 -3 18998 19000 19286 -3 19288 19286 19000 -3 18999 19287 19001 -3 19289 19001 19287 -3 19000 19002 19288 -3 19290 19288 19002 -3 19001 19289 19003 -3 19291 19003 19289 -3 19002 19004 19290 -3 19292 19290 19004 -3 19003 19291 19005 -3 19293 19005 19291 -3 19004 19006 19292 -3 19294 19292 19006 -3 19005 19293 19007 -3 19295 19007 19293 -3 19006 19008 19294 -3 19296 19294 19008 -3 19007 19295 19009 -3 19297 19009 19295 -3 19008 19010 19296 -3 19298 19296 19010 -3 19009 19297 19011 -3 19299 19011 19297 -3 19010 19012 19298 -3 19300 19298 19012 -3 19011 19299 19013 -3 19301 19013 19299 -3 19012 19014 19300 -3 19302 19300 19014 -3 19013 19301 19015 -3 19303 19015 19301 -3 19014 19016 19302 -3 19304 19302 19016 -3 19015 19303 19017 -3 19305 19017 19303 -3 19016 19018 19304 -3 19306 19304 19018 -3 19017 19305 19019 -3 19307 19019 19305 -3 19018 19020 19306 -3 19308 19306 19020 -3 19019 19307 19021 -3 19309 19021 19307 -3 19020 19022 19308 -3 19310 19308 19022 -3 19021 19309 19023 -3 19311 19023 19309 -3 19022 19024 19310 -3 19312 19310 19024 -3 19023 19311 19025 -3 19313 19025 19311 -3 19024 19026 19312 -3 19314 19312 19026 -3 19025 19313 19027 -3 19315 19027 19313 -3 19026 19028 19314 -3 19316 19314 19028 -3 19027 19315 19029 -3 19317 19029 19315 -3 19028 19030 19316 -3 19318 19316 19030 -3 19029 19317 19031 -3 19319 19031 19317 -3 19030 19032 19318 -3 19320 19318 19032 -3 19031 19319 19033 -3 19321 19033 19319 -3 19032 19034 19320 -3 19322 19320 19034 -3 19033 19321 19035 -3 19323 19035 19321 -3 19034 19036 19322 -3 19324 19322 19036 -3 19035 19323 19037 -3 19325 19037 19323 -3 19036 19038 19324 -3 19326 19324 19038 -3 19037 19325 19039 -3 19327 19039 19325 -3 19038 19040 19326 -3 19328 19326 19040 -3 19039 19327 19043 -3 19331 19043 19327 -3 19040 19041 19328 -3 19329 19328 19041 -3 19041 19044 19329 -3 19332 19329 19044 -3 19042 19043 19330 -3 19331 19330 19043 -3 19042 19330 19047 -3 19335 19047 19330 -3 19044 19045 19332 -3 19333 19332 19045 -3 19045 19048 19333 -3 19336 19333 19048 -3 19046 19047 19334 -3 19335 19334 19047 -3 19046 19334 19116 -3 19404 19116 19334 -3 19048 19049 19336 -3 19337 19336 19049 -3 19049 19050 19337 -3 19338 19337 19050 -3 19050 19051 19338 -3 19339 19338 19051 -3 19051 19052 19339 -3 19340 19339 19052 -3 19052 19053 19340 -3 19341 19340 19053 -3 19053 19054 19342 -3 19053 19342 19341 -3 19054 19055 19343 -3 19054 19343 19342 -3 19055 19056 19344 -3 19055 19344 19343 -3 19056 19057 19345 -3 19056 19345 19344 -3 19057 19058 19346 -3 19057 19346 19345 -3 19058 19059 19347 -3 19058 19347 19346 -3 19059 19060 19348 -3 19059 19348 19347 -3 19060 19061 19349 -3 19060 19349 19348 -3 19061 19062 19350 -3 19061 19350 19349 -3 19062 19063 19351 -3 19062 19351 19350 -3 19063 19064 19352 -3 19063 19352 19351 -3 19064 19065 19353 -3 19064 19353 19352 -3 19065 19066 19354 -3 19065 19354 19353 -3 19066 19067 19355 -3 19066 19355 19354 -3 19067 19068 19356 -3 19067 19356 19355 -3 19068 19069 19357 -3 19068 19357 19356 -3 19069 19070 19358 -3 19069 19358 19357 -3 19070 19071 19359 -3 19070 19359 19358 -3 19071 19072 19360 -3 19071 19360 19359 -3 19072 19073 19361 -3 19072 19361 19360 -3 19073 19074 19362 -3 19073 19362 19361 -3 19074 19075 19363 -3 19074 19363 19362 -3 19075 19076 19364 -3 19075 19364 19363 -3 19076 19077 19365 -3 19076 19365 19364 -3 19077 19078 19366 -3 19077 19366 19365 -3 19078 19079 19367 -3 19078 19367 19366 -3 19079 19080 19368 -3 19079 19368 19367 -3 19080 19081 19369 -3 19080 19369 19368 -3 19081 19082 19370 -3 19081 19370 19369 -3 19082 19083 19371 -3 19082 19371 19370 -3 19083 19084 19372 -3 19083 19372 19371 -3 19084 19085 19373 -3 19084 19373 19372 -3 19085 19086 19374 -3 19085 19374 19373 -3 19086 19087 19375 -3 19086 19375 19374 -3 19087 19088 19376 -3 19087 19376 19375 -3 19088 19089 19377 -3 19088 19377 19376 -3 19089 19090 19378 -3 19089 19378 19377 -3 19090 19091 19379 -3 19090 19379 19378 -3 19091 19092 19380 -3 19091 19380 19379 -3 19092 19093 19381 -3 19092 19381 19380 -3 19093 19094 19382 -3 19093 19382 19381 -3 19094 19095 19383 -3 19094 19383 19382 -3 19095 19096 19384 -3 19095 19384 19383 -3 19096 19097 19385 -3 19096 19385 19384 -3 19097 19098 19386 -3 19097 19386 19385 -3 19098 19099 19387 -3 19098 19387 19386 -3 19099 19100 19388 -3 19099 19388 19387 -3 19100 19101 19389 -3 19100 19389 19388 -3 19101 19102 19390 -3 19101 19390 19389 -3 19102 19103 19391 -3 19102 19391 19390 -3 19103 19104 19392 -3 19103 19392 19391 -3 19104 19105 19393 -3 19104 19393 19392 -3 19105 19106 19394 -3 19105 19394 19393 -3 19106 19107 19395 -3 19106 19395 19394 -3 19107 19108 19396 -3 19107 19396 19395 -3 19108 19109 19397 -3 19108 19397 19396 -3 19109 19110 19398 -3 19109 19398 19397 -3 19110 19111 19399 -3 19110 19399 19398 -3 19111 19112 19400 -3 19111 19400 19399 -3 19112 19113 19401 -3 19112 19401 19400 -3 19113 19114 19402 -3 19113 19402 19401 -3 19114 19115 19403 -3 19114 19403 19402 -3 19115 19116 19404 -3 19115 19404 19403 -3 19117 19405 19406 -3 19117 19406 19118 -3 19117 19187 19405 -3 19475 19405 19187 -3 19118 19406 19407 -3 19118 19407 19119 -3 19119 19407 19408 -3 19119 19408 19120 -3 19120 19408 19409 -3 19120 19409 19121 -3 19121 19409 19410 -3 19121 19410 19122 -3 19122 19410 19411 -3 19122 19411 19123 -3 19123 19411 19412 -3 19123 19412 19124 -3 19124 19412 19413 -3 19124 19413 19125 -3 19125 19413 19414 -3 19125 19414 19126 -3 19126 19414 19415 -3 19126 19415 19127 -3 19127 19415 19416 -3 19127 19416 19128 -3 19128 19416 19417 -3 19128 19417 19129 -3 19129 19417 19418 -3 19129 19418 19130 -3 19130 19418 19419 -3 19130 19419 19131 -3 19131 19419 19420 -3 19131 19420 19132 -3 19132 19420 19421 -3 19132 19421 19133 -3 19133 19421 19422 -3 19133 19422 19134 -3 19134 19422 19423 -3 19134 19423 19135 -3 19135 19423 19424 -3 19135 19424 19136 -3 19136 19424 19425 -3 19136 19425 19137 -3 19137 19425 19426 -3 19137 19426 19138 -3 19138 19426 19427 -3 19138 19427 19139 -3 19139 19427 19428 -3 19139 19428 19140 -3 19140 19428 19429 -3 19140 19429 19141 -3 19141 19429 19430 -3 19141 19430 19142 -3 19142 19430 19431 -3 19142 19431 19143 -3 19143 19431 19432 -3 19143 19432 19144 -3 19144 19432 19145 -3 19433 19145 19432 -3 19145 19433 19146 -3 19434 19146 19433 -3 19146 19434 19147 -3 19435 19147 19434 -3 19147 19435 19148 -3 19436 19148 19435 -3 19148 19436 19149 -3 19437 19149 19436 -3 19149 19437 19150 -3 19438 19150 19437 -3 19150 19438 19151 -3 19439 19151 19438 -3 19151 19439 19152 -3 19440 19152 19439 -3 19152 19440 19153 -3 19441 19153 19440 -3 19153 19441 19154 -3 19442 19154 19441 -3 19154 19442 19155 -3 19443 19155 19442 -3 19155 19443 19156 -3 19444 19156 19443 -3 19156 19444 19157 -3 19445 19157 19444 -3 19157 19445 19158 -3 19446 19158 19445 -3 19158 19446 19159 -3 19447 19159 19446 -3 19159 19447 19160 -3 19448 19160 19447 -3 19160 19448 19161 -3 19449 19161 19448 -3 19161 19449 19162 -3 19450 19162 19449 -3 19162 19450 19163 -3 19451 19163 19450 -3 19163 19451 19164 -3 19452 19164 19451 -3 19164 19452 19165 -3 19453 19165 19452 -3 19165 19453 19166 -3 19454 19166 19453 -3 19166 19454 19167 -3 19455 19167 19454 -3 19167 19455 19168 -3 19456 19168 19455 -3 19168 19456 19169 -3 19457 19169 19456 -3 19169 19457 19170 -3 19458 19170 19457 -3 19170 19458 19171 -3 19459 19171 19458 -3 19171 19459 19172 -3 19460 19172 19459 -3 19172 19460 19173 -3 19461 19173 19460 -3 19173 19461 19174 -3 19462 19174 19461 -3 19174 19462 19175 -3 19463 19175 19462 -3 19175 19463 19176 -3 19464 19176 19463 -3 19176 19464 19177 -3 19465 19177 19464 -3 19177 19465 19178 -3 19466 19178 19465 -3 19178 19466 19179 -3 19467 19179 19466 -3 19179 19467 19180 -3 19468 19180 19467 -3 19180 19468 19181 -3 19469 19181 19468 -3 19181 19469 19182 -3 19470 19182 19469 -3 19182 19470 19183 -3 19471 19183 19470 -3 19183 19471 19184 -3 19472 19184 19471 -3 19184 19472 19185 -3 19473 19185 19472 -3 19185 19473 19188 -3 19476 19188 19473 -3 19186 19474 19187 -3 19475 19187 19474 -3 19186 19191 19474 -3 19479 19474 19191 -3 19188 19476 19189 -3 19477 19189 19476 -3 19189 19477 19192 -3 19480 19192 19477 -3 19190 19478 19191 -3 19479 19191 19478 -3 19190 19194 19478 -3 19482 19478 19194 -3 19192 19480 19193 -3 19481 19193 19480 -3 19193 19481 19195 -3 19483 19195 19481 -3 19194 19196 19482 -3 19484 19482 19196 -3 19195 19483 19197 -3 19485 19197 19483 -3 19196 19198 19484 -3 19486 19484 19198 -3 19197 19485 19199 -3 19487 19199 19485 -3 19198 19200 19486 -3 19488 19486 19200 -3 19199 19487 19201 -3 19489 19201 19487 -3 19200 19202 19488 -3 19490 19488 19202 -3 19201 19489 19203 -3 19491 19203 19489 -3 19202 19204 19490 -3 19492 19490 19204 -3 19203 19491 19205 -3 19493 19205 19491 -3 19204 19206 19492 -3 19494 19492 19206 -3 19205 19493 19207 -3 19495 19207 19493 -3 19206 19208 19494 -3 19496 19494 19208 -3 19207 19495 19209 -3 19497 19209 19495 -3 19208 19210 19496 -3 19498 19496 19210 -3 19209 19497 19211 -3 19499 19211 19497 -3 19210 19212 19498 -3 19500 19498 19212 -3 19211 19499 19213 -3 19501 19213 19499 -3 19212 19214 19500 -3 19502 19500 19214 -3 19213 19501 19215 -3 19503 19215 19501 -3 19214 19216 19502 -3 19504 19502 19216 -3 19215 19503 19217 -3 19505 19217 19503 -3 19216 19218 19504 -3 19506 19504 19218 -3 19217 19505 19219 -3 19507 19219 19505 -3 19218 19220 19506 -3 19508 19506 19220 -3 19219 19507 19221 -3 19509 19221 19507 -3 19220 19222 19508 -3 19510 19508 19222 -3 19221 19509 19223 -3 19511 19223 19509 -3 19222 19224 19510 -3 19512 19510 19224 -3 19223 19511 19225 -3 19513 19225 19511 -3 19224 19226 19514 -3 19224 19514 19512 -3 19225 19513 19227 -3 19515 19227 19513 -3 19226 19228 19516 -3 19226 19516 19514 -3 19227 19515 19229 -3 19517 19229 19515 -3 19228 19230 19518 -3 19228 19518 19516 -3 19229 19517 19519 -3 19229 19519 19231 -3 19230 19232 19520 -3 19230 19520 19518 -3 19231 19519 19521 -3 19231 19521 19233 -3 19232 19234 19522 -3 19232 19522 19520 -3 19233 19521 19523 -3 19233 19523 19235 -3 19234 19236 19524 -3 19234 19524 19522 -3 19235 19523 19525 -3 19235 19525 19237 -3 19236 19238 19526 -3 19236 19526 19524 -3 19237 19525 19527 -3 19237 19527 19239 -3 19238 19240 19528 -3 19238 19528 19526 -3 19239 19527 19529 -3 19239 19529 19241 -3 19240 19242 19530 -3 19240 19530 19528 -3 19241 19529 19531 -3 19241 19531 19243 -3 19242 19244 19532 -3 19242 19532 19530 -3 19243 19531 19533 -3 19243 19533 19245 -3 19244 19246 19534 -3 19244 19534 19532 -3 19245 19533 19535 -3 19245 19535 19247 -3 19246 19248 19536 -3 19246 19536 19534 -3 19247 19535 19537 -3 19247 19537 19249 -3 19248 19250 19538 -3 19248 19538 19536 -3 19249 19537 19539 -3 19249 19539 19251 -3 19250 19252 19540 -3 19250 19540 19538 -3 19251 19539 19541 -3 19251 19541 19253 -3 19252 19254 19542 -3 19252 19542 19540 -3 19253 19541 19543 -3 19253 19543 19255 -3 19254 19256 19544 -3 19254 19544 19542 -3 19255 19543 19545 -3 19255 19545 19257 -3 19256 19258 19546 -3 19256 19546 19544 -3 19257 19545 19547 -3 19257 19547 19259 -3 19258 19260 19548 -3 19258 19548 19546 -3 19259 19547 19549 -3 19259 19549 19261 -3 19260 19262 19550 -3 19260 19550 19548 -3 19261 19549 19551 -3 19261 19551 19263 -3 19262 19264 19552 -3 19262 19552 19550 -3 19263 19551 19553 -3 19263 19553 19265 -3 19264 19266 19554 -3 19264 19554 19552 -3 19265 19553 19555 -3 19265 19555 19267 -3 19266 19268 19556 -3 19266 19556 19554 -3 19267 19555 19557 -3 19267 19557 19269 -3 19268 19270 19558 -3 19268 19558 19556 -3 19269 19557 19559 -3 19269 19559 19271 -3 19270 19272 19560 -3 19270 19560 19558 -3 19271 19559 19561 -3 19271 19561 19273 -3 19272 19274 19562 -3 19272 19562 19560 -3 19273 19561 19563 -3 19273 19563 19275 -3 19274 19276 19564 -3 19274 19564 19562 -3 19275 19563 19565 -3 19275 19565 19277 -3 19276 19278 19566 -3 19276 19566 19564 -3 19277 19565 19567 -3 19277 19567 19279 -3 19278 19280 19568 -3 19278 19568 19566 -3 19279 19567 19569 -3 19279 19569 19281 -3 19280 19282 19570 -3 19280 19570 19568 -3 19281 19569 19571 -3 19281 19571 19283 -3 19282 19284 19572 -3 19282 19572 19570 -3 19283 19571 19573 -3 19283 19573 19285 -3 19284 19286 19574 -3 19284 19574 19572 -3 19285 19573 19575 -3 19285 19575 19287 -3 19286 19288 19576 -3 19286 19576 19574 -3 19287 19575 19577 -3 19287 19577 19289 -3 19288 19290 19578 -3 19288 19578 19576 -3 19289 19577 19579 -3 19289 19579 19291 -3 19290 19292 19580 -3 19290 19580 19578 -3 19291 19579 19581 -3 19291 19581 19293 -3 19292 19294 19582 -3 19292 19582 19580 -3 19293 19581 19583 -3 19293 19583 19295 -3 19294 19296 19584 -3 19294 19584 19582 -3 19295 19583 19585 -3 19295 19585 19297 -3 19296 19298 19586 -3 19296 19586 19584 -3 19297 19585 19587 -3 19297 19587 19299 -3 19298 19300 19588 -3 19298 19588 19586 -3 19299 19587 19589 -3 19299 19589 19301 -3 19300 19302 19590 -3 19300 19590 19588 -3 19301 19589 19591 -3 19301 19591 19303 -3 19302 19304 19592 -3 19302 19592 19590 -3 19303 19591 19593 -3 19303 19593 19305 -3 19304 19306 19594 -3 19304 19594 19592 -3 19305 19593 19595 -3 19305 19595 19307 -3 19306 19308 19596 -3 19306 19596 19594 -3 19307 19595 19597 -3 19307 19597 19309 -3 19308 19310 19596 -3 19598 19596 19310 -3 19309 19597 19599 -3 19309 19599 19311 -3 19310 19312 19598 -3 19600 19598 19312 -3 19311 19599 19601 -3 19311 19601 19313 -3 19312 19314 19600 -3 19602 19600 19314 -3 19313 19601 19603 -3 19313 19603 19315 -3 19314 19316 19602 -3 19604 19602 19316 -3 19315 19603 19317 -3 19605 19317 19603 -3 19316 19318 19604 -3 19606 19604 19318 -3 19317 19605 19319 -3 19607 19319 19605 -3 19318 19320 19606 -3 19608 19606 19320 -3 19319 19607 19321 -3 19609 19321 19607 -3 19320 19322 19608 -3 19610 19608 19322 -3 19321 19609 19323 -3 19611 19323 19609 -3 19322 19324 19610 -3 19612 19610 19324 -3 19323 19611 19325 -3 19613 19325 19611 -3 19324 19326 19612 -3 19614 19612 19326 -3 19325 19613 19327 -3 19615 19327 19613 -3 19326 19328 19614 -3 19616 19614 19328 -3 19327 19615 19331 -3 19619 19331 19615 -3 19328 19329 19616 -3 19617 19616 19329 -3 19329 19332 19617 -3 19620 19617 19332 -3 19330 19331 19618 -3 19619 19618 19331 -3 19330 19618 19335 -3 19623 19335 19618 -3 19332 19333 19620 -3 19621 19620 19333 -3 19333 19336 19621 -3 19624 19621 19336 -3 19334 19335 19622 -3 19623 19622 19335 -3 19334 19622 19404 -3 19692 19404 19622 -3 19336 19337 19624 -3 19625 19624 19337 -3 19337 19338 19625 -3 19626 19625 19338 -3 19338 19339 19626 -3 19627 19626 19339 -3 19339 19340 19627 -3 19628 19627 19340 -3 19340 19341 19628 -3 19629 19628 19341 -3 19341 19342 19629 -3 19630 19629 19342 -3 19342 19343 19630 -3 19631 19630 19343 -3 19343 19344 19631 -3 19632 19631 19344 -3 19344 19345 19632 -3 19633 19632 19345 -3 19345 19346 19633 -3 19634 19633 19346 -3 19346 19347 19634 -3 19635 19634 19347 -3 19347 19348 19635 -3 19636 19635 19348 -3 19348 19349 19636 -3 19637 19636 19349 -3 19349 19350 19637 -3 19638 19637 19350 -3 19350 19351 19638 -3 19639 19638 19351 -3 19351 19352 19639 -3 19640 19639 19352 -3 19352 19353 19640 -3 19641 19640 19353 -3 19353 19354 19641 -3 19642 19641 19354 -3 19354 19355 19642 -3 19643 19642 19355 -3 19355 19356 19643 -3 19644 19643 19356 -3 19356 19357 19644 -3 19645 19644 19357 -3 19357 19358 19645 -3 19646 19645 19358 -3 19358 19359 19646 -3 19647 19646 19359 -3 19359 19360 19647 -3 19648 19647 19360 -3 19360 19361 19648 -3 19649 19648 19361 -3 19361 19362 19649 -3 19650 19649 19362 -3 19362 19363 19650 -3 19651 19650 19363 -3 19363 19364 19651 -3 19652 19651 19364 -3 19364 19365 19652 -3 19653 19652 19365 -3 19365 19366 19653 -3 19654 19653 19366 -3 19366 19367 19654 -3 19655 19654 19367 -3 19367 19368 19655 -3 19656 19655 19368 -3 19368 19369 19656 -3 19657 19656 19369 -3 19369 19370 19657 -3 19658 19657 19370 -3 19370 19371 19658 -3 19659 19658 19371 -3 19371 19372 19659 -3 19660 19659 19372 -3 19372 19373 19660 -3 19661 19660 19373 -3 19373 19374 19661 -3 19662 19661 19374 -3 19374 19375 19662 -3 19663 19662 19375 -3 19375 19376 19663 -3 19664 19663 19376 -3 19376 19377 19664 -3 19665 19664 19377 -3 19377 19378 19665 -3 19666 19665 19378 -3 19378 19379 19666 -3 19667 19666 19379 -3 19379 19380 19667 -3 19668 19667 19380 -3 19380 19381 19668 -3 19669 19668 19381 -3 19381 19382 19669 -3 19670 19669 19382 -3 19382 19383 19670 -3 19671 19670 19383 -3 19383 19384 19671 -3 19672 19671 19384 -3 19384 19385 19672 -3 19673 19672 19385 -3 19385 19386 19673 -3 19674 19673 19386 -3 19386 19387 19674 -3 19675 19674 19387 -3 19387 19388 19675 -3 19676 19675 19388 -3 19388 19389 19676 -3 19677 19676 19389 -3 19389 19390 19677 -3 19678 19677 19390 -3 19390 19391 19678 -3 19679 19678 19391 -3 19391 19392 19679 -3 19680 19679 19392 -3 19392 19393 19680 -3 19681 19680 19393 -3 19393 19394 19681 -3 19682 19681 19394 -3 19394 19395 19683 -3 19394 19683 19682 -3 19395 19396 19684 -3 19395 19684 19683 -3 19396 19397 19685 -3 19396 19685 19684 -3 19397 19398 19686 -3 19397 19686 19685 -3 19398 19399 19687 -3 19398 19687 19686 -3 19399 19400 19688 -3 19399 19688 19687 -3 19400 19401 19689 -3 19400 19689 19688 -3 19401 19402 19690 -3 19401 19690 19689 -3 19402 19403 19691 -3 19402 19691 19690 -3 19403 19404 19692 -3 19403 19692 19691 -3 19405 19693 19694 -3 19405 19694 19406 -3 19405 19475 19763 -3 19405 19763 19693 -3 19406 19694 19695 -3 19406 19695 19407 -3 19407 19695 19696 -3 19407 19696 19408 -3 19408 19696 19697 -3 19408 19697 19409 -3 19409 19697 19698 -3 19409 19698 19410 -3 19410 19698 19699 -3 19410 19699 19411 -3 19411 19699 19700 -3 19411 19700 19412 -3 19412 19700 19701 -3 19412 19701 19413 -3 19413 19701 19702 -3 19413 19702 19414 -3 19414 19702 19703 -3 19414 19703 19415 -3 19415 19703 19704 -3 19415 19704 19416 -3 19416 19704 19705 -3 19416 19705 19417 -3 19417 19705 19706 -3 19417 19706 19418 -3 19418 19706 19707 -3 19418 19707 19419 -3 19419 19707 19708 -3 19419 19708 19420 -3 19420 19708 19709 -3 19420 19709 19421 -3 19421 19709 19710 -3 19421 19710 19422 -3 19422 19710 19711 -3 19422 19711 19423 -3 19423 19711 19712 -3 19423 19712 19424 -3 19424 19712 19713 -3 19424 19713 19425 -3 19425 19713 19714 -3 19425 19714 19426 -3 19426 19714 19715 -3 19426 19715 19427 -3 19427 19715 19716 -3 19427 19716 19428 -3 19428 19716 19717 -3 19428 19717 19429 -3 19429 19717 19718 -3 19429 19718 19430 -3 19430 19718 19719 -3 19430 19719 19431 -3 19431 19719 19720 -3 19431 19720 19432 -3 19432 19720 19721 -3 19432 19721 19433 -3 19433 19721 19722 -3 19433 19722 19434 -3 19434 19722 19723 -3 19434 19723 19435 -3 19435 19723 19724 -3 19435 19724 19436 -3 19436 19724 19725 -3 19436 19725 19437 -3 19437 19725 19726 -3 19437 19726 19438 -3 19438 19726 19727 -3 19438 19727 19439 -3 19439 19727 19728 -3 19439 19728 19440 -3 19440 19728 19729 -3 19440 19729 19441 -3 19441 19729 19730 -3 19441 19730 19442 -3 19442 19730 19731 -3 19442 19731 19443 -3 19443 19731 19732 -3 19443 19732 19444 -3 19444 19732 19733 -3 19444 19733 19445 -3 19445 19733 19734 -3 19445 19734 19446 -3 19446 19734 19735 -3 19446 19735 19447 -3 19447 19735 19736 -3 19447 19736 19448 -3 19448 19736 19737 -3 19448 19737 19449 -3 19449 19737 19738 -3 19449 19738 19450 -3 19450 19738 19739 -3 19450 19739 19451 -3 19451 19739 19740 -3 19451 19740 19452 -3 19452 19740 19741 -3 19452 19741 19453 -3 19453 19741 19742 -3 19453 19742 19454 -3 19454 19742 19743 -3 19454 19743 19455 -3 19455 19743 19744 -3 19455 19744 19456 -3 19456 19744 19745 -3 19456 19745 19457 -3 19457 19745 19746 -3 19457 19746 19458 -3 19458 19746 19747 -3 19458 19747 19459 -3 19459 19747 19748 -3 19459 19748 19460 -3 19460 19748 19749 -3 19460 19749 19461 -3 19461 19749 19750 -3 19461 19750 19462 -3 19462 19750 19751 -3 19462 19751 19463 -3 19463 19751 19752 -3 19463 19752 19464 -3 19464 19752 19753 -3 19464 19753 19465 -3 19465 19753 19754 -3 19465 19754 19466 -3 19466 19754 19755 -3 19466 19755 19467 -3 19467 19755 19756 -3 19467 19756 19468 -3 19468 19756 19757 -3 19468 19757 19469 -3 19469 19757 19758 -3 19469 19758 19470 -3 19470 19758 19759 -3 19470 19759 19471 -3 19471 19759 19760 -3 19471 19760 19472 -3 19472 19760 19761 -3 19472 19761 19473 -3 19473 19761 19764 -3 19473 19764 19476 -3 19474 19762 19763 -3 19474 19763 19475 -3 19474 19479 19767 -3 19474 19767 19762 -3 19476 19764 19765 -3 19476 19765 19477 -3 19477 19765 19768 -3 19477 19768 19480 -3 19478 19766 19767 -3 19478 19767 19479 -3 19478 19482 19766 -3 19770 19766 19482 -3 19480 19768 19769 -3 19480 19769 19481 -3 19481 19769 19771 -3 19481 19771 19483 -3 19482 19484 19770 -3 19772 19770 19484 -3 19483 19771 19773 -3 19483 19773 19485 -3 19484 19486 19772 -3 19774 19772 19486 -3 19485 19773 19775 -3 19485 19775 19487 -3 19486 19488 19774 -3 19776 19774 19488 -3 19487 19775 19489 -3 19777 19489 19775 -3 19488 19490 19776 -3 19778 19776 19490 -3 19489 19777 19491 -3 19779 19491 19777 -3 19490 19492 19778 -3 19780 19778 19492 -3 19491 19779 19493 -3 19781 19493 19779 -3 19492 19494 19780 -3 19782 19780 19494 -3 19493 19781 19495 -3 19783 19495 19781 -3 19494 19496 19782 -3 19784 19782 19496 -3 19495 19783 19497 -3 19785 19497 19783 -3 19496 19498 19784 -3 19786 19784 19498 -3 19497 19785 19499 -3 19787 19499 19785 -3 19498 19500 19786 -3 19788 19786 19500 -3 19499 19787 19501 -3 19789 19501 19787 -3 19500 19502 19788 -3 19790 19788 19502 -3 19501 19789 19503 -3 19791 19503 19789 -3 19502 19504 19790 -3 19792 19790 19504 -3 19503 19791 19505 -3 19793 19505 19791 -3 19504 19506 19792 -3 19794 19792 19506 -3 19505 19793 19507 -3 19795 19507 19793 -3 19506 19508 19794 -3 19796 19794 19508 -3 19507 19795 19509 -3 19797 19509 19795 -3 19508 19510 19796 -3 19798 19796 19510 -3 19509 19797 19511 -3 19799 19511 19797 -3 19510 19512 19798 -3 19800 19798 19512 -3 19511 19799 19513 -3 19801 19513 19799 -3 19512 19514 19800 -3 19802 19800 19514 -3 19513 19801 19515 -3 19803 19515 19801 -3 19514 19516 19802 -3 19804 19802 19516 -3 19515 19803 19517 -3 19805 19517 19803 -3 19516 19518 19804 -3 19806 19804 19518 -3 19517 19805 19519 -3 19807 19519 19805 -3 19518 19520 19806 -3 19808 19806 19520 -3 19519 19807 19521 -3 19809 19521 19807 -3 19520 19522 19808 -3 19810 19808 19522 -3 19521 19809 19523 -3 19811 19523 19809 -3 19522 19524 19810 -3 19812 19810 19524 -3 19523 19811 19525 -3 19813 19525 19811 -3 19524 19526 19812 -3 19814 19812 19526 -3 19525 19813 19527 -3 19815 19527 19813 -3 19526 19528 19814 -3 19816 19814 19528 -3 19527 19815 19529 -3 19817 19529 19815 -3 19528 19530 19816 -3 19818 19816 19530 -3 19529 19817 19531 -3 19819 19531 19817 -3 19530 19532 19818 -3 19820 19818 19532 -3 19531 19819 19533 -3 19821 19533 19819 -3 19532 19534 19820 -3 19822 19820 19534 -3 19533 19821 19535 -3 19823 19535 19821 -3 19534 19536 19822 -3 19824 19822 19536 -3 19535 19823 19537 -3 19825 19537 19823 -3 19536 19538 19824 -3 19826 19824 19538 -3 19537 19825 19539 -3 19827 19539 19825 -3 19538 19540 19826 -3 19828 19826 19540 -3 19539 19827 19541 -3 19829 19541 19827 -3 19540 19542 19828 -3 19830 19828 19542 -3 19541 19829 19543 -3 19831 19543 19829 -3 19542 19544 19830 -3 19832 19830 19544 -3 19543 19831 19545 -3 19833 19545 19831 -3 19544 19546 19832 -3 19834 19832 19546 -3 19545 19833 19547 -3 19835 19547 19833 -3 19546 19548 19834 -3 19836 19834 19548 -3 19547 19835 19549 -3 19837 19549 19835 -3 19548 19550 19836 -3 19838 19836 19550 -3 19549 19837 19551 -3 19839 19551 19837 -3 19550 19552 19838 -3 19840 19838 19552 -3 19551 19839 19553 -3 19841 19553 19839 -3 19552 19554 19840 -3 19842 19840 19554 -3 19553 19841 19555 -3 19843 19555 19841 -3 19554 19556 19842 -3 19844 19842 19556 -3 19555 19843 19557 -3 19845 19557 19843 -3 19556 19558 19844 -3 19846 19844 19558 -3 19557 19845 19559 -3 19847 19559 19845 -3 19558 19560 19846 -3 19848 19846 19560 -3 19559 19847 19561 -3 19849 19561 19847 -3 19560 19562 19848 -3 19850 19848 19562 -3 19561 19849 19563 -3 19851 19563 19849 -3 19562 19564 19850 -3 19852 19850 19564 -3 19563 19851 19565 -3 19853 19565 19851 -3 19564 19566 19852 -3 19854 19852 19566 -3 19565 19853 19567 -3 19855 19567 19853 -3 19566 19568 19856 -3 19566 19856 19854 -3 19567 19855 19569 -3 19857 19569 19855 -3 19568 19570 19858 -3 19568 19858 19856 -3 19569 19857 19571 -3 19859 19571 19857 -3 19570 19572 19860 -3 19570 19860 19858 -3 19571 19859 19573 -3 19861 19573 19859 -3 19572 19574 19862 -3 19572 19862 19860 -3 19573 19861 19575 -3 19863 19575 19861 -3 19574 19576 19864 -3 19574 19864 19862 -3 19575 19863 19865 -3 19575 19865 19577 -3 19576 19578 19866 -3 19576 19866 19864 -3 19577 19865 19867 -3 19577 19867 19579 -3 19578 19580 19868 -3 19578 19868 19866 -3 19579 19867 19869 -3 19579 19869 19581 -3 19580 19582 19870 -3 19580 19870 19868 -3 19581 19869 19871 -3 19581 19871 19583 -3 19582 19584 19872 -3 19582 19872 19870 -3 19583 19871 19873 -3 19583 19873 19585 -3 19584 19586 19874 -3 19584 19874 19872 -3 19585 19873 19875 -3 19585 19875 19587 -3 19586 19588 19876 -3 19586 19876 19874 -3 19587 19875 19877 -3 19587 19877 19589 -3 19588 19590 19878 -3 19588 19878 19876 -3 19589 19877 19879 -3 19589 19879 19591 -3 19590 19592 19880 -3 19590 19880 19878 -3 19591 19879 19881 -3 19591 19881 19593 -3 19592 19594 19882 -3 19592 19882 19880 -3 19593 19881 19883 -3 19593 19883 19595 -3 19594 19596 19884 -3 19594 19884 19882 -3 19595 19883 19885 -3 19595 19885 19597 -3 19596 19598 19886 -3 19596 19886 19884 -3 19597 19885 19887 -3 19597 19887 19599 -3 19598 19600 19888 -3 19598 19888 19886 -3 19599 19887 19889 -3 19599 19889 19601 -3 19600 19602 19890 -3 19600 19890 19888 -3 19601 19889 19891 -3 19601 19891 19603 -3 19602 19604 19892 -3 19602 19892 19890 -3 19603 19891 19893 -3 19603 19893 19605 -3 19604 19606 19894 -3 19604 19894 19892 -3 19605 19893 19895 -3 19605 19895 19607 -3 19606 19608 19896 -3 19606 19896 19894 -3 19607 19895 19897 -3 19607 19897 19609 -3 19608 19610 19898 -3 19608 19898 19896 -3 19609 19897 19899 -3 19609 19899 19611 -3 19610 19612 19900 -3 19610 19900 19898 -3 19611 19899 19901 -3 19611 19901 19613 -3 19612 19614 19902 -3 19612 19902 19900 -3 19613 19901 19903 -3 19613 19903 19615 -3 19614 19616 19904 -3 19614 19904 19902 -3 19615 19903 19907 -3 19615 19907 19619 -3 19616 19617 19905 -3 19616 19905 19904 -3 19617 19620 19908 -3 19617 19908 19905 -3 19618 19619 19907 -3 19618 19907 19906 -3 19618 19906 19911 -3 19618 19911 19623 -3 19620 19621 19909 -3 19620 19909 19908 -3 19621 19624 19912 -3 19621 19912 19909 -3 19622 19623 19911 -3 19622 19911 19910 -3 19622 19910 19980 -3 19622 19980 19692 -3 19624 19625 19913 -3 19624 19913 19912 -3 19625 19626 19914 -3 19625 19914 19913 -3 19626 19627 19915 -3 19626 19915 19914 -3 19627 19628 19916 -3 19627 19916 19915 -3 19628 19629 19917 -3 19628 19917 19916 -3 19629 19630 19918 -3 19629 19918 19917 -3 19630 19631 19919 -3 19630 19919 19918 -3 19631 19632 19920 -3 19631 19920 19919 -3 19632 19633 19921 -3 19632 19921 19920 -3 19633 19634 19922 -3 19633 19922 19921 -3 19634 19635 19923 -3 19634 19923 19922 -3 19635 19636 19924 -3 19635 19924 19923 -3 19636 19637 19925 -3 19636 19925 19924 -3 19637 19638 19926 -3 19637 19926 19925 -3 19638 19639 19927 -3 19638 19927 19926 -3 19639 19640 19928 -3 19639 19928 19927 -3 19640 19641 19929 -3 19640 19929 19928 -3 19641 19642 19930 -3 19641 19930 19929 -3 19642 19643 19931 -3 19642 19931 19930 -3 19643 19644 19932 -3 19643 19932 19931 -3 19644 19645 19933 -3 19644 19933 19932 -3 19645 19646 19934 -3 19645 19934 19933 -3 19646 19647 19935 -3 19646 19935 19934 -3 19647 19648 19936 -3 19647 19936 19935 -3 19648 19649 19937 -3 19648 19937 19936 -3 19649 19650 19938 -3 19649 19938 19937 -3 19650 19651 19939 -3 19650 19939 19938 -3 19651 19652 19940 -3 19651 19940 19939 -3 19652 19653 19940 -3 19941 19940 19653 -3 19653 19654 19941 -3 19942 19941 19654 -3 19654 19655 19942 -3 19943 19942 19655 -3 19655 19656 19943 -3 19944 19943 19656 -3 19656 19657 19944 -3 19945 19944 19657 -3 19657 19658 19945 -3 19946 19945 19658 -3 19658 19659 19946 -3 19947 19946 19659 -3 19659 19660 19947 -3 19948 19947 19660 -3 19660 19661 19948 -3 19949 19948 19661 -3 19661 19662 19949 -3 19950 19949 19662 -3 19662 19663 19950 -3 19951 19950 19663 -3 19663 19664 19951 -3 19952 19951 19664 -3 19664 19665 19952 -3 19953 19952 19665 -3 19665 19666 19953 -3 19954 19953 19666 -3 19666 19667 19954 -3 19955 19954 19667 -3 19667 19668 19955 -3 19956 19955 19668 -3 19668 19669 19956 -3 19957 19956 19669 -3 19669 19670 19957 -3 19958 19957 19670 -3 19670 19671 19958 -3 19959 19958 19671 -3 19671 19672 19959 -3 19960 19959 19672 -3 19672 19673 19960 -3 19961 19960 19673 -3 19673 19674 19961 -3 19962 19961 19674 -3 19674 19675 19962 -3 19963 19962 19675 -3 19675 19676 19963 -3 19964 19963 19676 -3 19676 19677 19964 -3 19965 19964 19677 -3 19677 19678 19965 -3 19966 19965 19678 -3 19678 19679 19966 -3 19967 19966 19679 -3 19679 19680 19967 -3 19968 19967 19680 -3 19680 19681 19968 -3 19969 19968 19681 -3 19681 19682 19969 -3 19970 19969 19682 -3 19682 19683 19970 -3 19971 19970 19683 -3 19683 19684 19971 -3 19972 19971 19684 -3 19684 19685 19972 -3 19973 19972 19685 -3 19685 19686 19973 -3 19974 19973 19686 -3 19686 19687 19974 -3 19975 19974 19687 -3 19687 19688 19975 -3 19976 19975 19688 -3 19688 19689 19976 -3 19977 19976 19689 -3 19689 19690 19977 -3 19978 19977 19690 -3 19690 19691 19978 -3 19979 19978 19691 -3 19691 19692 19979 -3 19980 19979 19692 -3 19693 19981 19694 -3 19982 19694 19981 -3 19693 19763 19981 -3 20051 19981 19763 -3 19694 19982 19695 -3 19983 19695 19982 -3 19695 19983 19696 -3 19984 19696 19983 -3 19696 19984 19697 -3 19985 19697 19984 -3 19697 19985 19698 -3 19986 19698 19985 -3 19698 19986 19699 -3 19987 19699 19986 -3 19699 19987 19700 -3 19988 19700 19987 -3 19700 19988 19701 -3 19989 19701 19988 -3 19701 19989 19702 -3 19990 19702 19989 -3 19702 19990 19703 -3 19991 19703 19990 -3 19703 19991 19704 -3 19992 19704 19991 -3 19704 19992 19705 -3 19993 19705 19992 -3 19705 19993 19706 -3 19994 19706 19993 -3 19706 19994 19707 -3 19995 19707 19994 -3 19707 19995 19708 -3 19996 19708 19995 -3 19708 19996 19709 -3 19997 19709 19996 -3 19709 19997 19710 -3 19998 19710 19997 -3 19710 19998 19711 -3 19999 19711 19998 -3 19711 19999 19712 -3 20000 19712 19999 -3 19712 20000 19713 -3 20001 19713 20000 -3 19713 20001 19714 -3 20002 19714 20001 -3 19714 20002 19715 -3 20003 19715 20002 -3 19715 20003 19716 -3 20004 19716 20003 -3 19716 20004 19717 -3 20005 19717 20004 -3 19717 20005 19718 -3 20006 19718 20005 -3 19718 20006 19719 -3 20007 19719 20006 -3 19719 20007 19720 -3 20008 19720 20007 -3 19720 20008 19721 -3 20009 19721 20008 -3 19721 20009 19722 -3 20010 19722 20009 -3 19722 20010 19723 -3 20011 19723 20010 -3 19723 20011 19724 -3 20012 19724 20011 -3 19724 20012 19725 -3 20013 19725 20012 -3 19725 20013 19726 -3 20014 19726 20013 -3 19726 20014 19727 -3 20015 19727 20014 -3 19727 20015 19728 -3 20016 19728 20015 -3 19728 20016 19729 -3 20017 19729 20016 -3 19729 20017 19730 -3 20018 19730 20017 -3 19730 20018 19731 -3 20019 19731 20018 -3 19731 20019 19732 -3 20020 19732 20019 -3 19732 20020 19733 -3 20021 19733 20020 -3 19733 20021 19734 -3 20022 19734 20021 -3 19734 20022 19735 -3 20023 19735 20022 -3 19735 20023 19736 -3 20024 19736 20023 -3 19736 20024 19737 -3 20025 19737 20024 -3 19737 20025 19738 -3 20026 19738 20025 -3 19738 20026 19739 -3 20027 19739 20026 -3 19739 20027 19740 -3 20028 19740 20027 -3 19740 20028 19741 -3 20029 19741 20028 -3 19741 20029 19742 -3 20030 19742 20029 -3 19742 20030 19743 -3 20031 19743 20030 -3 19743 20031 19744 -3 20032 19744 20031 -3 19744 20032 19745 -3 20033 19745 20032 -3 19745 20033 19746 -3 20034 19746 20033 -3 19746 20034 19747 -3 20035 19747 20034 -3 19747 20035 20036 -3 19747 20036 19748 -3 19748 20036 20037 -3 19748 20037 19749 -3 19749 20037 20038 -3 19749 20038 19750 -3 19750 20038 20039 -3 19750 20039 19751 -3 19751 20039 20040 -3 19751 20040 19752 -3 19752 20040 20041 -3 19752 20041 19753 -3 19753 20041 20042 -3 19753 20042 19754 -3 19754 20042 20043 -3 19754 20043 19755 -3 19755 20043 20044 -3 19755 20044 19756 -3 19756 20044 20045 -3 19756 20045 19757 -3 19757 20045 20046 -3 19757 20046 19758 -3 19758 20046 20047 -3 19758 20047 19759 -3 19759 20047 20048 -3 19759 20048 19760 -3 19760 20048 20049 -3 19760 20049 19761 -3 19761 20049 20052 -3 19761 20052 19764 -3 19762 20050 20051 -3 19762 20051 19763 -3 19762 19767 20055 -3 19762 20055 20050 -3 19764 20052 20053 -3 19764 20053 19765 -3 19765 20053 20056 -3 19765 20056 19768 -3 19766 20054 20055 -3 19766 20055 19767 -3 19766 19770 20058 -3 19766 20058 20054 -3 19768 20056 20057 -3 19768 20057 19769 -3 19769 20057 20059 -3 19769 20059 19771 -3 19770 19772 20060 -3 19770 20060 20058 -3 19771 20059 20061 -3 19771 20061 19773 -3 19772 19774 20062 -3 19772 20062 20060 -3 19773 20061 20063 -3 19773 20063 19775 -3 19774 19776 20064 -3 19774 20064 20062 -3 19775 20063 20065 -3 19775 20065 19777 -3 19776 19778 20066 -3 19776 20066 20064 -3 19777 20065 20067 -3 19777 20067 19779 -3 19778 19780 20068 -3 19778 20068 20066 -3 19779 20067 20069 -3 19779 20069 19781 -3 19780 19782 20070 -3 19780 20070 20068 -3 19781 20069 20071 -3 19781 20071 19783 -3 19782 19784 20072 -3 19782 20072 20070 -3 19783 20071 20073 -3 19783 20073 19785 -3 19784 19786 20074 -3 19784 20074 20072 -3 19785 20073 20075 -3 19785 20075 19787 -3 19786 19788 20076 -3 19786 20076 20074 -3 19787 20075 20077 -3 19787 20077 19789 -3 19788 19790 20078 -3 19788 20078 20076 -3 19789 20077 20079 -3 19789 20079 19791 -3 19790 19792 20080 -3 19790 20080 20078 -3 19791 20079 20081 -3 19791 20081 19793 -3 19792 19794 20082 -3 19792 20082 20080 -3 19793 20081 20083 -3 19793 20083 19795 -3 19794 19796 20084 -3 19794 20084 20082 -3 19795 20083 20085 -3 19795 20085 19797 -3 19796 19798 20086 -3 19796 20086 20084 -3 19797 20085 20087 -3 19797 20087 19799 -3 19798 19800 20088 -3 19798 20088 20086 -3 19799 20087 20089 -3 19799 20089 19801 -3 19800 19802 20090 -3 19800 20090 20088 -3 19801 20089 20091 -3 19801 20091 19803 -3 19802 19804 20092 -3 19802 20092 20090 -3 19803 20091 20093 -3 19803 20093 19805 -3 19804 19806 20094 -3 19804 20094 20092 -3 19805 20093 20095 -3 19805 20095 19807 -3 19806 19808 20096 -3 19806 20096 20094 -3 19807 20095 20097 -3 19807 20097 19809 -3 19808 19810 20098 -3 19808 20098 20096 -3 19809 20097 20099 -3 19809 20099 19811 -3 19810 19812 20100 -3 19810 20100 20098 -3 19811 20099 20101 -3 19811 20101 19813 -3 19812 19814 20102 -3 19812 20102 20100 -3 19813 20101 20103 -3 19813 20103 19815 -3 19814 19816 20104 -3 19814 20104 20102 -3 19815 20103 20105 -3 19815 20105 19817 -3 19816 19818 20106 -3 19816 20106 20104 -3 19817 20105 20107 -3 19817 20107 19819 -3 19818 19820 20108 -3 19818 20108 20106 -3 19819 20107 20109 -3 19819 20109 19821 -3 19820 19822 20110 -3 19820 20110 20108 -3 19821 20109 20111 -3 19821 20111 19823 -3 19822 19824 20112 -3 19822 20112 20110 -3 19823 20111 20113 -3 19823 20113 19825 -3 19824 19826 20114 -3 19824 20114 20112 -3 19825 20113 20115 -3 19825 20115 19827 -3 19826 19828 20114 -3 20116 20114 19828 -3 19827 20115 20117 -3 19827 20117 19829 -3 19828 19830 20116 -3 20118 20116 19830 -3 19829 20117 20119 -3 19829 20119 19831 -3 19830 19832 20118 -3 20120 20118 19832 -3 19831 20119 20121 -3 19831 20121 19833 -3 19832 19834 20120 -3 20122 20120 19834 -3 19833 20121 20123 -3 19833 20123 19835 -3 19834 19836 20122 -3 20124 20122 19836 -3 19835 20123 19837 -3 20125 19837 20123 -3 19836 19838 20124 -3 20126 20124 19838 -3 19837 20125 19839 -3 20127 19839 20125 -3 19838 19840 20126 -3 20128 20126 19840 -3 19839 20127 19841 -3 20129 19841 20127 -3 19840 19842 20128 -3 20130 20128 19842 -3 19841 20129 19843 -3 20131 19843 20129 -3 19842 19844 20130 -3 20132 20130 19844 -3 19843 20131 19845 -3 20133 19845 20131 -3 19844 19846 20132 -3 20134 20132 19846 -3 19845 20133 19847 -3 20135 19847 20133 -3 19846 19848 20134 -3 20136 20134 19848 -3 19847 20135 19849 -3 20137 19849 20135 -3 19848 19850 20136 -3 20138 20136 19850 -3 19849 20137 19851 -3 20139 19851 20137 -3 19850 19852 20138 -3 20140 20138 19852 -3 19851 20139 19853 -3 20141 19853 20139 -3 19852 19854 20140 -3 20142 20140 19854 -3 19853 20141 19855 -3 20143 19855 20141 -3 19854 19856 20142 -3 20144 20142 19856 -3 19855 20143 19857 -3 20145 19857 20143 -3 19856 19858 20144 -3 20146 20144 19858 -3 19857 20145 19859 -3 20147 19859 20145 -3 19858 19860 20146 -3 20148 20146 19860 -3 19859 20147 19861 -3 20149 19861 20147 -3 19860 19862 20148 -3 20150 20148 19862 -3 19861 20149 19863 -3 20151 19863 20149 -3 19862 19864 20150 -3 20152 20150 19864 -3 19863 20151 19865 -3 20153 19865 20151 -3 19864 19866 20152 -3 20154 20152 19866 -3 19865 20153 19867 -3 20155 19867 20153 -3 19866 19868 20154 -3 20156 20154 19868 -3 19867 20155 19869 -3 20157 19869 20155 -3 19868 19870 20156 -3 20158 20156 19870 -3 19869 20157 19871 -3 20159 19871 20157 -3 19870 19872 20158 -3 20160 20158 19872 -3 19871 20159 19873 -3 20161 19873 20159 -3 19872 19874 20160 -3 20162 20160 19874 -3 19873 20161 19875 -3 20163 19875 20161 -3 19874 19876 20162 -3 20164 20162 19876 -3 19875 20163 19877 -3 20165 19877 20163 -3 19876 19878 20164 -3 20166 20164 19878 -3 19877 20165 19879 -3 20167 19879 20165 -3 19878 19880 20166 -3 20168 20166 19880 -3 19879 20167 19881 -3 20169 19881 20167 -3 19880 19882 20168 -3 20170 20168 19882 -3 19881 20169 19883 -3 20171 19883 20169 -3 19882 19884 20170 -3 20172 20170 19884 -3 19883 20171 19885 -3 20173 19885 20171 -3 19884 19886 20172 -3 20174 20172 19886 -3 19885 20173 19887 -3 20175 19887 20173 -3 19886 19888 20174 -3 20176 20174 19888 -3 19887 20175 19889 -3 20177 19889 20175 -3 19888 19890 20176 -3 20178 20176 19890 -3 19889 20177 19891 -3 20179 19891 20177 -3 19890 19892 20178 -3 20180 20178 19892 -3 19891 20179 19893 -3 20181 19893 20179 -3 19892 19894 20180 -3 20182 20180 19894 -3 19893 20181 19895 -3 20183 19895 20181 -3 19894 19896 20182 -3 20184 20182 19896 -3 19895 20183 19897 -3 20185 19897 20183 -3 19896 19898 20184 -3 20186 20184 19898 -3 19897 20185 19899 -3 20187 19899 20185 -3 19898 19900 20186 -3 20188 20186 19900 -3 19899 20187 19901 -3 20189 19901 20187 -3 19900 19902 20188 -3 20190 20188 19902 -3 19901 20189 19903 -3 20191 19903 20189 -3 19902 19904 20190 -3 20192 20190 19904 -3 19903 20191 19907 -3 20195 19907 20191 -3 19904 19905 20192 -3 20193 20192 19905 -3 19905 19908 20193 -3 20196 20193 19908 -3 19906 19907 20194 -3 20195 20194 19907 -3 19906 20194 19911 -3 20199 19911 20194 -3 19908 19909 20196 -3 20197 20196 19909 -3 19909 19912 20197 -3 20200 20197 19912 -3 19910 19911 20198 -3 20199 20198 19911 -3 19910 20198 19980 -3 20268 19980 20198 -3 19912 19913 20201 -3 19912 20201 20200 -3 19913 19914 20202 -3 19913 20202 20201 -3 19914 19915 20203 -3 19914 20203 20202 -3 19915 19916 20204 -3 19915 20204 20203 -3 19916 19917 20205 -3 19916 20205 20204 -3 19917 19918 20206 -3 19917 20206 20205 -3 19918 19919 20207 -3 19918 20207 20206 -3 19919 19920 20208 -3 19919 20208 20207 -3 19920 19921 20209 -3 19920 20209 20208 -3 19921 19922 20210 -3 19921 20210 20209 -3 19922 19923 20211 -3 19922 20211 20210 -3 19923 19924 20212 -3 19923 20212 20211 -3 19924 19925 20213 -3 19924 20213 20212 -3 19925 19926 20214 -3 19925 20214 20213 -3 19926 19927 20215 -3 19926 20215 20214 -3 19927 19928 20216 -3 19927 20216 20215 -3 19928 19929 20217 -3 19928 20217 20216 -3 19929 19930 20218 -3 19929 20218 20217 -3 19930 19931 20219 -3 19930 20219 20218 -3 19931 19932 20220 -3 19931 20220 20219 -3 19932 19933 20221 -3 19932 20221 20220 -3 19933 19934 20222 -3 19933 20222 20221 -3 19934 19935 20223 -3 19934 20223 20222 -3 19935 19936 20224 -3 19935 20224 20223 -3 19936 19937 20225 -3 19936 20225 20224 -3 19937 19938 20226 -3 19937 20226 20225 -3 19938 19939 20227 -3 19938 20227 20226 -3 19939 19940 20228 -3 19939 20228 20227 -3 19940 19941 20229 -3 19940 20229 20228 -3 19941 19942 20230 -3 19941 20230 20229 -3 19942 19943 20231 -3 19942 20231 20230 -3 19943 19944 20232 -3 19943 20232 20231 -3 19944 19945 20233 -3 19944 20233 20232 -3 19945 19946 20234 -3 19945 20234 20233 -3 19946 19947 20235 -3 19946 20235 20234 -3 19947 19948 20236 -3 19947 20236 20235 -3 19948 19949 20237 -3 19948 20237 20236 -3 19949 19950 20238 -3 19949 20238 20237 -3 19950 19951 20239 -3 19950 20239 20238 -3 19951 19952 20240 -3 19951 20240 20239 -3 19952 19953 20241 -3 19952 20241 20240 -3 19953 19954 20242 -3 19953 20242 20241 -3 19954 19955 20243 -3 19954 20243 20242 -3 19955 19956 20244 -3 19955 20244 20243 -3 19956 19957 20245 -3 19956 20245 20244 -3 19957 19958 20246 -3 19957 20246 20245 -3 19958 19959 20247 -3 19958 20247 20246 -3 19959 19960 20248 -3 19959 20248 20247 -3 19960 19961 20249 -3 19960 20249 20248 -3 19961 19962 20250 -3 19961 20250 20249 -3 19962 19963 20251 -3 19962 20251 20250 -3 19963 19964 20252 -3 19963 20252 20251 -3 19964 19965 20253 -3 19964 20253 20252 -3 19965 19966 20254 -3 19965 20254 20253 -3 19966 19967 20255 -3 19966 20255 20254 -3 19967 19968 20256 -3 19967 20256 20255 -3 19968 19969 20257 -3 19968 20257 20256 -3 19969 19970 20258 -3 19969 20258 20257 -3 19970 19971 20259 -3 19970 20259 20258 -3 19971 19972 20260 -3 19971 20260 20259 -3 19972 19973 20261 -3 19972 20261 20260 -3 19973 19974 20262 -3 19973 20262 20261 -3 19974 19975 20263 -3 19974 20263 20262 -3 19975 19976 20264 -3 19975 20264 20263 -3 19976 19977 20265 -3 19976 20265 20264 -3 19977 19978 20266 -3 19977 20266 20265 -3 19978 19979 20267 -3 19978 20267 20266 -3 19979 19980 20268 -3 19979 20268 20267 -3 19981 20269 20270 -3 19981 20270 19982 -3 19981 20051 20269 -3 20339 20269 20051 -3 19982 20270 20271 -3 19982 20271 19983 -3 19983 20271 20272 -3 19983 20272 19984 -3 19984 20272 20273 -3 19984 20273 19985 -3 19985 20273 20274 -3 19985 20274 19986 -3 19986 20274 20275 -3 19986 20275 19987 -3 19987 20275 20276 -3 19987 20276 19988 -3 19988 20276 20277 -3 19988 20277 19989 -3 19989 20277 20278 -3 19989 20278 19990 -3 19990 20278 20279 -3 19990 20279 19991 -3 19991 20279 20280 -3 19991 20280 19992 -3 19992 20280 20281 -3 19992 20281 19993 -3 19993 20281 20282 -3 19993 20282 19994 -3 19994 20282 20283 -3 19994 20283 19995 -3 19995 20283 20284 -3 19995 20284 19996 -3 19996 20284 20285 -3 19996 20285 19997 -3 19997 20285 20286 -3 19997 20286 19998 -3 19998 20286 20287 -3 19998 20287 19999 -3 19999 20287 20288 -3 19999 20288 20000 -3 20000 20288 20289 -3 20000 20289 20001 -3 20001 20289 20290 -3 20001 20290 20002 -3 20002 20290 20291 -3 20002 20291 20003 -3 20003 20291 20292 -3 20003 20292 20004 -3 20004 20292 20293 -3 20004 20293 20005 -3 20005 20293 20294 -3 20005 20294 20006 -3 20006 20294 20295 -3 20006 20295 20007 -3 20007 20295 20296 -3 20007 20296 20008 -3 20008 20296 20009 -3 20297 20009 20296 -3 20009 20297 20010 -3 20298 20010 20297 -3 20010 20298 20011 -3 20299 20011 20298 -3 20011 20299 20012 -3 20300 20012 20299 -3 20012 20300 20013 -3 20301 20013 20300 -3 20013 20301 20014 -3 20302 20014 20301 -3 20014 20302 20015 -3 20303 20015 20302 -3 20015 20303 20016 -3 20304 20016 20303 -3 20016 20304 20017 -3 20305 20017 20304 -3 20017 20305 20018 -3 20306 20018 20305 -3 20018 20306 20019 -3 20307 20019 20306 -3 20019 20307 20020 -3 20308 20020 20307 -3 20020 20308 20021 -3 20309 20021 20308 -3 20021 20309 20022 -3 20310 20022 20309 -3 20022 20310 20023 -3 20311 20023 20310 -3 20023 20311 20024 -3 20312 20024 20311 -3 20024 20312 20025 -3 20313 20025 20312 -3 20025 20313 20026 -3 20314 20026 20313 -3 20026 20314 20027 -3 20315 20027 20314 -3 20027 20315 20028 -3 20316 20028 20315 -3 20028 20316 20029 -3 20317 20029 20316 -3 20029 20317 20030 -3 20318 20030 20317 -3 20030 20318 20031 -3 20319 20031 20318 -3 20031 20319 20032 -3 20320 20032 20319 -3 20032 20320 20033 -3 20321 20033 20320 -3 20033 20321 20034 -3 20322 20034 20321 -3 20034 20322 20035 -3 20323 20035 20322 -3 20035 20323 20036 -3 20324 20036 20323 -3 20036 20324 20037 -3 20325 20037 20324 -3 20037 20325 20038 -3 20326 20038 20325 -3 20038 20326 20039 -3 20327 20039 20326 -3 20039 20327 20040 -3 20328 20040 20327 -3 20040 20328 20041 -3 20329 20041 20328 -3 20041 20329 20042 -3 20330 20042 20329 -3 20042 20330 20043 -3 20331 20043 20330 -3 20043 20331 20044 -3 20332 20044 20331 -3 20044 20332 20045 -3 20333 20045 20332 -3 20045 20333 20046 -3 20334 20046 20333 -3 20046 20334 20047 -3 20335 20047 20334 -3 20047 20335 20048 -3 20336 20048 20335 -3 20048 20336 20049 -3 20337 20049 20336 -3 20049 20337 20052 -3 20340 20052 20337 -3 20050 20338 20051 -3 20339 20051 20338 -3 20050 20055 20338 -3 20343 20338 20055 -3 20052 20340 20053 -3 20341 20053 20340 -3 20053 20341 20056 -3 20344 20056 20341 -3 20054 20342 20055 -3 20343 20055 20342 -3 20054 20058 20342 -3 20346 20342 20058 -3 20056 20344 20057 -3 20345 20057 20344 -3 20057 20345 20059 -3 20347 20059 20345 -3 20058 20060 20346 -3 20348 20346 20060 -3 20059 20347 20061 -3 20349 20061 20347 -3 20060 20062 20348 -3 20350 20348 20062 -3 20061 20349 20063 -3 20351 20063 20349 -3 20062 20064 20350 -3 20352 20350 20064 -3 20063 20351 20065 -3 20353 20065 20351 -3 20064 20066 20352 -3 20354 20352 20066 -3 20065 20353 20067 -3 20355 20067 20353 -3 20066 20068 20354 -3 20356 20354 20068 -3 20067 20355 20069 -3 20357 20069 20355 -3 20068 20070 20356 -3 20358 20356 20070 -3 20069 20357 20071 -3 20359 20071 20357 -3 20070 20072 20358 -3 20360 20358 20072 -3 20071 20359 20073 -3 20361 20073 20359 -3 20072 20074 20360 -3 20362 20360 20074 -3 20073 20361 20075 -3 20363 20075 20361 -3 20074 20076 20362 -3 20364 20362 20076 -3 20075 20363 20077 -3 20365 20077 20363 -3 20076 20078 20364 -3 20366 20364 20078 -3 20077 20365 20079 -3 20367 20079 20365 -3 20078 20080 20366 -3 20368 20366 20080 -3 20079 20367 20081 -3 20369 20081 20367 -3 20080 20082 20368 -3 20370 20368 20082 -3 20081 20369 20083 -3 20371 20083 20369 -3 20082 20084 20370 -3 20372 20370 20084 -3 20083 20371 20085 -3 20373 20085 20371 -3 20084 20086 20372 -3 20374 20372 20086 -3 20085 20373 20087 -3 20375 20087 20373 -3 20086 20088 20376 -3 20086 20376 20374 -3 20087 20375 20089 -3 20377 20089 20375 -3 20088 20090 20378 -3 20088 20378 20376 -3 20089 20377 20091 -3 20379 20091 20377 -3 20090 20092 20380 -3 20090 20380 20378 -3 20091 20379 20093 -3 20381 20093 20379 -3 20092 20094 20382 -3 20092 20382 20380 -3 20093 20381 20095 -3 20383 20095 20381 -3 20094 20096 20384 -3 20094 20384 20382 -3 20095 20383 20097 -3 20385 20097 20383 -3 20096 20098 20386 -3 20096 20386 20384 -3 20097 20385 20387 -3 20097 20387 20099 -3 20098 20100 20388 -3 20098 20388 20386 -3 20099 20387 20389 -3 20099 20389 20101 -3 20100 20102 20390 -3 20100 20390 20388 -3 20101 20389 20391 -3 20101 20391 20103 -3 20102 20104 20392 -3 20102 20392 20390 -3 20103 20391 20393 -3 20103 20393 20105 -3 20104 20106 20394 -3 20104 20394 20392 -3 20105 20393 20395 -3 20105 20395 20107 -3 20106 20108 20396 -3 20106 20396 20394 -3 20107 20395 20397 -3 20107 20397 20109 -3 20108 20110 20398 -3 20108 20398 20396 -3 20109 20397 20399 -3 20109 20399 20111 -3 20110 20112 20400 -3 20110 20400 20398 -3 20111 20399 20401 -3 20111 20401 20113 -3 20112 20114 20402 -3 20112 20402 20400 -3 20113 20401 20403 -3 20113 20403 20115 -3 20114 20116 20404 -3 20114 20404 20402 -3 20115 20403 20405 -3 20115 20405 20117 -3 20116 20118 20406 -3 20116 20406 20404 -3 20117 20405 20407 -3 20117 20407 20119 -3 20118 20120 20408 -3 20118 20408 20406 -3 20119 20407 20409 -3 20119 20409 20121 -3 20120 20122 20410 -3 20120 20410 20408 -3 20121 20409 20411 -3 20121 20411 20123 -3 20122 20124 20412 -3 20122 20412 20410 -3 20123 20411 20413 -3 20123 20413 20125 -3 20124 20126 20414 -3 20124 20414 20412 -3 20125 20413 20415 -3 20125 20415 20127 -3 20126 20128 20416 -3 20126 20416 20414 -3 20127 20415 20417 -3 20127 20417 20129 -3 20128 20130 20418 -3 20128 20418 20416 -3 20129 20417 20419 -3 20129 20419 20131 -3 20130 20132 20420 -3 20130 20420 20418 -3 20131 20419 20421 -3 20131 20421 20133 -3 20132 20134 20422 -3 20132 20422 20420 -3 20133 20421 20423 -3 20133 20423 20135 -3 20134 20136 20424 -3 20134 20424 20422 -3 20135 20423 20425 -3 20135 20425 20137 -3 20136 20138 20426 -3 20136 20426 20424 -3 20137 20425 20427 -3 20137 20427 20139 -3 20138 20140 20428 -3 20138 20428 20426 -3 20139 20427 20429 -3 20139 20429 20141 -3 20140 20142 20430 -3 20140 20430 20428 -3 20141 20429 20431 -3 20141 20431 20143 -3 20142 20144 20432 -3 20142 20432 20430 -3 20143 20431 20433 -3 20143 20433 20145 -3 20144 20146 20434 -3 20144 20434 20432 -3 20145 20433 20435 -3 20145 20435 20147 -3 20146 20148 20436 -3 20146 20436 20434 -3 20147 20435 20437 -3 20147 20437 20149 -3 20148 20150 20438 -3 20148 20438 20436 -3 20149 20437 20439 -3 20149 20439 20151 -3 20150 20152 20440 -3 20150 20440 20438 -3 20151 20439 20441 -3 20151 20441 20153 -3 20152 20154 20442 -3 20152 20442 20440 -3 20153 20441 20443 -3 20153 20443 20155 -3 20154 20156 20444 -3 20154 20444 20442 -3 20155 20443 20445 -3 20155 20445 20157 -3 20156 20158 20446 -3 20156 20446 20444 -3 20157 20445 20447 -3 20157 20447 20159 -3 20158 20160 20448 -3 20158 20448 20446 -3 20159 20447 20449 -3 20159 20449 20161 -3 20160 20162 20450 -3 20160 20450 20448 -3 20161 20449 20451 -3 20161 20451 20163 -3 20162 20164 20452 -3 20162 20452 20450 -3 20163 20451 20453 -3 20163 20453 20165 -3 20164 20166 20454 -3 20164 20454 20452 -3 20165 20453 20455 -3 20165 20455 20167 -3 20166 20168 20456 -3 20166 20456 20454 -3 20167 20455 20457 -3 20167 20457 20169 -3 20168 20170 20458 -3 20168 20458 20456 -3 20169 20457 20459 -3 20169 20459 20171 -3 20170 20172 20460 -3 20170 20460 20458 -3 20171 20459 20461 -3 20171 20461 20173 -3 20172 20174 20462 -3 20172 20462 20460 -3 20173 20461 20463 -3 20173 20463 20175 -3 20174 20176 20462 -3 20464 20462 20176 -3 20175 20463 20465 -3 20175 20465 20177 -3 20176 20178 20464 -3 20466 20464 20178 -3 20177 20465 20467 -3 20177 20467 20179 -3 20178 20180 20466 -3 20468 20466 20180 -3 20179 20467 20469 -3 20179 20469 20181 -3 20180 20182 20468 -3 20470 20468 20182 -3 20181 20469 20471 -3 20181 20471 20183 -3 20182 20184 20470 -3 20472 20470 20184 -3 20183 20471 20473 -3 20183 20473 20185 -3 20184 20186 20472 -3 20474 20472 20186 -3 20185 20473 20187 -3 20475 20187 20473 -3 20186 20188 20474 -3 20476 20474 20188 -3 20187 20475 20189 -3 20477 20189 20475 -3 20188 20190 20476 -3 20478 20476 20190 -3 20189 20477 20191 -3 20479 20191 20477 -3 20190 20192 20478 -3 20480 20478 20192 -3 20191 20479 20195 -3 20483 20195 20479 -3 20192 20193 20480 -3 20481 20480 20193 -3 20193 20196 20481 -3 20484 20481 20196 -3 20194 20195 20482 -3 20483 20482 20195 -3 20194 20482 20199 -3 20487 20199 20482 -3 20196 20197 20484 -3 20485 20484 20197 -3 20197 20200 20485 -3 20488 20485 20200 -3 20198 20199 20486 -3 20487 20486 20199 -3 20198 20486 20268 -3 20556 20268 20486 -3 20200 20201 20488 -3 20489 20488 20201 -3 20201 20202 20489 -3 20490 20489 20202 -3 20202 20203 20490 -3 20491 20490 20203 -3 20203 20204 20491 -3 20492 20491 20204 -3 20204 20205 20492 -3 20493 20492 20205 -3 20205 20206 20493 -3 20494 20493 20206 -3 20206 20207 20494 -3 20495 20494 20207 -3 20207 20208 20495 -3 20496 20495 20208 -3 20208 20209 20496 -3 20497 20496 20209 -3 20209 20210 20497 -3 20498 20497 20210 -3 20210 20211 20498 -3 20499 20498 20211 -3 20211 20212 20499 -3 20500 20499 20212 -3 20212 20213 20500 -3 20501 20500 20213 -3 20213 20214 20501 -3 20502 20501 20214 -3 20214 20215 20502 -3 20503 20502 20215 -3 20215 20216 20503 -3 20504 20503 20216 -3 20216 20217 20504 -3 20505 20504 20217 -3 20217 20218 20505 -3 20506 20505 20218 -3 20218 20219 20506 -3 20507 20506 20219 -3 20219 20220 20507 -3 20508 20507 20220 -3 20220 20221 20508 -3 20509 20508 20221 -3 20221 20222 20509 -3 20510 20509 20222 -3 20222 20223 20510 -3 20511 20510 20223 -3 20223 20224 20511 -3 20512 20511 20224 -3 20224 20225 20512 -3 20513 20512 20225 -3 20225 20226 20513 -3 20514 20513 20226 -3 20226 20227 20514 -3 20515 20514 20227 -3 20227 20228 20515 -3 20516 20515 20228 -3 20228 20229 20516 -3 20517 20516 20229 -3 20229 20230 20517 -3 20518 20517 20230 -3 20230 20231 20518 -3 20519 20518 20231 -3 20231 20232 20519 -3 20520 20519 20232 -3 20232 20233 20520 -3 20521 20520 20233 -3 20233 20234 20521 -3 20522 20521 20234 -3 20234 20235 20522 -3 20523 20522 20235 -3 20235 20236 20523 -3 20524 20523 20236 -3 20236 20237 20524 -3 20525 20524 20237 -3 20237 20238 20525 -3 20526 20525 20238 -3 20238 20239 20526 -3 20527 20526 20239 -3 20239 20240 20527 -3 20528 20527 20240 -3 20240 20241 20528 -3 20529 20528 20241 -3 20241 20242 20529 -3 20530 20529 20242 -3 20242 20243 20530 -3 20531 20530 20243 -3 20243 20244 20531 -3 20532 20531 20244 -3 20244 20245 20532 -3 20533 20532 20245 -3 20245 20246 20533 -3 20534 20533 20246 -3 20246 20247 20534 -3 20535 20534 20247 -3 20247 20248 20535 -3 20536 20535 20248 -3 20248 20249 20536 -3 20537 20536 20249 -3 20249 20250 20537 -3 20538 20537 20250 -3 20250 20251 20538 -3 20539 20538 20251 -3 20251 20252 20539 -3 20540 20539 20252 -3 20252 20253 20540 -3 20541 20540 20253 -3 20253 20254 20541 -3 20542 20541 20254 -3 20254 20255 20542 -3 20543 20542 20255 -3 20255 20256 20543 -3 20544 20543 20256 -3 20256 20257 20544 -3 20545 20544 20257 -3 20257 20258 20545 -3 20546 20545 20258 -3 20258 20259 20546 -3 20547 20546 20259 -3 20259 20260 20547 -3 20548 20547 20260 -3 20260 20261 20548 -3 20549 20548 20261 -3 20261 20262 20550 -3 20261 20550 20549 -3 20262 20263 20551 -3 20262 20551 20550 -3 20263 20264 20552 -3 20263 20552 20551 -3 20264 20265 20553 -3 20264 20553 20552 -3 20265 20266 20554 -3 20265 20554 20553 -3 20266 20267 20555 -3 20266 20555 20554 -3 20267 20268 20556 -3 20267 20556 20555 -3 20269 20557 20270 -3 20558 20270 20557 -3 20269 20339 20627 -3 20269 20627 20557 -3 20270 20558 20271 -3 20559 20271 20558 -3 20271 20559 20272 -3 20560 20272 20559 -3 20272 20560 20561 -3 20272 20561 20273 -3 20273 20561 20562 -3 20273 20562 20274 -3 20274 20562 20563 -3 20274 20563 20275 -3 20275 20563 20564 -3 20275 20564 20276 -3 20276 20564 20565 -3 20276 20565 20277 -3 20277 20565 20566 -3 20277 20566 20278 -3 20278 20566 20567 -3 20278 20567 20279 -3 20279 20567 20568 -3 20279 20568 20280 -3 20280 20568 20569 -3 20280 20569 20281 -3 20281 20569 20570 -3 20281 20570 20282 -3 20282 20570 20571 -3 20282 20571 20283 -3 20283 20571 20572 -3 20283 20572 20284 -3 20284 20572 20573 -3 20284 20573 20285 -3 20285 20573 20574 -3 20285 20574 20286 -3 20286 20574 20575 -3 20286 20575 20287 -3 20287 20575 20576 -3 20287 20576 20288 -3 20288 20576 20577 -3 20288 20577 20289 -3 20289 20577 20578 -3 20289 20578 20290 -3 20290 20578 20579 -3 20290 20579 20291 -3 20291 20579 20580 -3 20291 20580 20292 -3 20292 20580 20581 -3 20292 20581 20293 -3 20293 20581 20582 -3 20293 20582 20294 -3 20294 20582 20583 -3 20294 20583 20295 -3 20295 20583 20584 -3 20295 20584 20296 -3 20296 20584 20585 -3 20296 20585 20297 -3 20297 20585 20586 -3 20297 20586 20298 -3 20298 20586 20587 -3 20298 20587 20299 -3 20299 20587 20588 -3 20299 20588 20300 -3 20300 20588 20589 -3 20300 20589 20301 -3 20301 20589 20590 -3 20301 20590 20302 -3 20302 20590 20591 -3 20302 20591 20303 -3 20303 20591 20592 -3 20303 20592 20304 -3 20304 20592 20593 -3 20304 20593 20305 -3 20305 20593 20594 -3 20305 20594 20306 -3 20306 20594 20595 -3 20306 20595 20307 -3 20307 20595 20596 -3 20307 20596 20308 -3 20308 20596 20597 -3 20308 20597 20309 -3 20309 20597 20598 -3 20309 20598 20310 -3 20310 20598 20599 -3 20310 20599 20311 -3 20311 20599 20600 -3 20311 20600 20312 -3 20312 20600 20601 -3 20312 20601 20313 -3 20313 20601 20602 -3 20313 20602 20314 -3 20314 20602 20603 -3 20314 20603 20315 -3 20315 20603 20604 -3 20315 20604 20316 -3 20316 20604 20605 -3 20316 20605 20317 -3 20317 20605 20606 -3 20317 20606 20318 -3 20318 20606 20607 -3 20318 20607 20319 -3 20319 20607 20608 -3 20319 20608 20320 -3 20320 20608 20609 -3 20320 20609 20321 -3 20321 20609 20610 -3 20321 20610 20322 -3 20322 20610 20611 -3 20322 20611 20323 -3 20323 20611 20612 -3 20323 20612 20324 -3 20324 20612 20613 -3 20324 20613 20325 -3 20325 20613 20614 -3 20325 20614 20326 -3 20326 20614 20615 -3 20326 20615 20327 -3 20327 20615 20616 -3 20327 20616 20328 -3 20328 20616 20617 -3 20328 20617 20329 -3 20329 20617 20618 -3 20329 20618 20330 -3 20330 20618 20619 -3 20330 20619 20331 -3 20331 20619 20620 -3 20331 20620 20332 -3 20332 20620 20621 -3 20332 20621 20333 -3 20333 20621 20622 -3 20333 20622 20334 -3 20334 20622 20623 -3 20334 20623 20335 -3 20335 20623 20624 -3 20335 20624 20336 -3 20336 20624 20625 -3 20336 20625 20337 -3 20337 20625 20628 -3 20337 20628 20340 -3 20338 20626 20627 -3 20338 20627 20339 -3 20338 20343 20631 -3 20338 20631 20626 -3 20340 20628 20629 -3 20340 20629 20341 -3 20341 20629 20632 -3 20341 20632 20344 -3 20342 20630 20631 -3 20342 20631 20343 -3 20342 20346 20634 -3 20342 20634 20630 -3 20344 20632 20633 -3 20344 20633 20345 -3 20345 20633 20635 -3 20345 20635 20347 -3 20346 20348 20636 -3 20346 20636 20634 -3 20347 20635 20637 -3 20347 20637 20349 -3 20348 20350 20636 -3 20638 20636 20350 -3 20349 20637 20639 -3 20349 20639 20351 -3 20350 20352 20638 -3 20640 20638 20352 -3 20351 20639 20641 -3 20351 20641 20353 -3 20352 20354 20640 -3 20642 20640 20354 -3 20353 20641 20643 -3 20353 20643 20355 -3 20354 20356 20642 -3 20644 20642 20356 -3 20355 20643 20645 -3 20355 20645 20357 -3 20356 20358 20644 -3 20646 20644 20358 -3 20357 20645 20647 -3 20357 20647 20359 -3 20358 20360 20646 -3 20648 20646 20360 -3 20359 20647 20649 -3 20359 20649 20361 -3 20360 20362 20648 -3 20650 20648 20362 -3 20361 20649 20363 -3 20651 20363 20649 -3 20362 20364 20650 -3 20652 20650 20364 -3 20363 20651 20365 -3 20653 20365 20651 -3 20364 20366 20652 -3 20654 20652 20366 -3 20365 20653 20367 -3 20655 20367 20653 -3 20366 20368 20654 -3 20656 20654 20368 -3 20367 20655 20369 -3 20657 20369 20655 -3 20368 20370 20656 -3 20658 20656 20370 -3 20369 20657 20371 -3 20659 20371 20657 -3 20370 20372 20658 -3 20660 20658 20372 -3 20371 20659 20373 -3 20661 20373 20659 -3 20372 20374 20660 -3 20662 20660 20374 -3 20373 20661 20375 -3 20663 20375 20661 -3 20374 20376 20662 -3 20664 20662 20376 -3 20375 20663 20377 -3 20665 20377 20663 -3 20376 20378 20664 -3 20666 20664 20378 -3 20377 20665 20379 -3 20667 20379 20665 -3 20378 20380 20666 -3 20668 20666 20380 -3 20379 20667 20381 -3 20669 20381 20667 -3 20380 20382 20668 -3 20670 20668 20382 -3 20381 20669 20383 -3 20671 20383 20669 -3 20382 20384 20670 -3 20672 20670 20384 -3 20383 20671 20385 -3 20673 20385 20671 -3 20384 20386 20672 -3 20674 20672 20386 -3 20385 20673 20387 -3 20675 20387 20673 -3 20386 20388 20674 -3 20676 20674 20388 -3 20387 20675 20389 -3 20677 20389 20675 -3 20388 20390 20676 -3 20678 20676 20390 -3 20389 20677 20391 -3 20679 20391 20677 -3 20390 20392 20678 -3 20680 20678 20392 -3 20391 20679 20393 -3 20681 20393 20679 -3 20392 20394 20680 -3 20682 20680 20394 -3 20393 20681 20395 -3 20683 20395 20681 -3 20394 20396 20682 -3 20684 20682 20396 -3 20395 20683 20397 -3 20685 20397 20683 -3 20396 20398 20684 -3 20686 20684 20398 -3 20397 20685 20399 -3 20687 20399 20685 -3 20398 20400 20686 -3 20688 20686 20400 -3 20399 20687 20401 -3 20689 20401 20687 -3 20400 20402 20688 -3 20690 20688 20402 -3 20401 20689 20403 -3 20691 20403 20689 -3 20402 20404 20690 -3 20692 20690 20404 -3 20403 20691 20405 -3 20693 20405 20691 -3 20404 20406 20692 -3 20694 20692 20406 -3 20405 20693 20407 -3 20695 20407 20693 -3 20406 20408 20694 -3 20696 20694 20408 -3 20407 20695 20409 -3 20697 20409 20695 -3 20408 20410 20696 -3 20698 20696 20410 -3 20409 20697 20411 -3 20699 20411 20697 -3 20410 20412 20698 -3 20700 20698 20412 -3 20411 20699 20413 -3 20701 20413 20699 -3 20412 20414 20700 -3 20702 20700 20414 -3 20413 20701 20415 -3 20703 20415 20701 -3 20414 20416 20702 -3 20704 20702 20416 -3 20415 20703 20417 -3 20705 20417 20703 -3 20416 20418 20704 -3 20706 20704 20418 -3 20417 20705 20419 -3 20707 20419 20705 -3 20418 20420 20706 -3 20708 20706 20420 -3 20419 20707 20421 -3 20709 20421 20707 -3 20420 20422 20708 -3 20710 20708 20422 -3 20421 20709 20423 -3 20711 20423 20709 -3 20422 20424 20710 -3 20712 20710 20424 -3 20423 20711 20425 -3 20713 20425 20711 -3 20424 20426 20712 -3 20714 20712 20426 -3 20425 20713 20427 -3 20715 20427 20713 -3 20426 20428 20714 -3 20716 20714 20428 -3 20427 20715 20429 -3 20717 20429 20715 -3 20428 20430 20716 -3 20718 20716 20430 -3 20429 20717 20431 -3 20719 20431 20717 -3 20430 20432 20718 -3 20720 20718 20432 -3 20431 20719 20433 -3 20721 20433 20719 -3 20432 20434 20720 -3 20722 20720 20434 -3 20433 20721 20435 -3 20723 20435 20721 -3 20434 20436 20722 -3 20724 20722 20436 -3 20435 20723 20437 -3 20725 20437 20723 -3 20436 20438 20726 -3 20436 20726 20724 -3 20437 20725 20439 -3 20727 20439 20725 -3 20438 20440 20728 -3 20438 20728 20726 -3 20439 20727 20441 -3 20729 20441 20727 -3 20440 20442 20730 -3 20440 20730 20728 -3 20441 20729 20443 -3 20731 20443 20729 -3 20442 20444 20732 -3 20442 20732 20730 -3 20443 20731 20445 -3 20733 20445 20731 -3 20444 20446 20734 -3 20444 20734 20732 -3 20445 20733 20447 -3 20735 20447 20733 -3 20446 20448 20736 -3 20446 20736 20734 -3 20447 20735 20449 -3 20737 20449 20735 -3 20448 20450 20738 -3 20448 20738 20736 -3 20449 20737 20739 -3 20449 20739 20451 -3 20450 20452 20740 -3 20450 20740 20738 -3 20451 20739 20741 -3 20451 20741 20453 -3 20452 20454 20742 -3 20452 20742 20740 -3 20453 20741 20743 -3 20453 20743 20455 -3 20454 20456 20744 -3 20454 20744 20742 -3 20455 20743 20745 -3 20455 20745 20457 -3 20456 20458 20746 -3 20456 20746 20744 -3 20457 20745 20747 -3 20457 20747 20459 -3 20458 20460 20748 -3 20458 20748 20746 -3 20459 20747 20749 -3 20459 20749 20461 -3 20460 20462 20750 -3 20460 20750 20748 -3 20461 20749 20751 -3 20461 20751 20463 -3 20462 20464 20752 -3 20462 20752 20750 -3 20463 20751 20753 -3 20463 20753 20465 -3 20464 20466 20754 -3 20464 20754 20752 -3 20465 20753 20755 -3 20465 20755 20467 -3 20466 20468 20756 -3 20466 20756 20754 -3 20467 20755 20757 -3 20467 20757 20469 -3 20468 20470 20758 -3 20468 20758 20756 -3 20469 20757 20759 -3 20469 20759 20471 -3 20470 20472 20760 -3 20470 20760 20758 -3 20471 20759 20761 -3 20471 20761 20473 -3 20472 20474 20762 -3 20472 20762 20760 -3 20473 20761 20763 -3 20473 20763 20475 -3 20474 20476 20764 -3 20474 20764 20762 -3 20475 20763 20765 -3 20475 20765 20477 -3 20476 20478 20766 -3 20476 20766 20764 -3 20477 20765 20767 -3 20477 20767 20479 -3 20478 20480 20768 -3 20478 20768 20766 -3 20479 20767 20771 -3 20479 20771 20483 -3 20480 20481 20769 -3 20480 20769 20768 -3 20481 20484 20772 -3 20481 20772 20769 -3 20482 20483 20771 -3 20482 20771 20770 -3 20482 20770 20775 -3 20482 20775 20487 -3 20484 20485 20773 -3 20484 20773 20772 -3 20485 20488 20776 -3 20485 20776 20773 -3 20486 20487 20775 -3 20486 20775 20774 -3 20486 20774 20844 -3 20486 20844 20556 -3 20488 20489 20777 -3 20488 20777 20776 -3 20489 20490 20778 -3 20489 20778 20777 -3 20490 20491 20779 -3 20490 20779 20778 -3 20491 20492 20780 -3 20491 20780 20779 -3 20492 20493 20781 -3 20492 20781 20780 -3 20493 20494 20782 -3 20493 20782 20781 -3 20494 20495 20783 -3 20494 20783 20782 -3 20495 20496 20784 -3 20495 20784 20783 -3 20496 20497 20785 -3 20496 20785 20784 -3 20497 20498 20786 -3 20497 20786 20785 -3 20498 20499 20787 -3 20498 20787 20786 -3 20499 20500 20788 -3 20499 20788 20787 -3 20500 20501 20789 -3 20500 20789 20788 -3 20501 20502 20790 -3 20501 20790 20789 -3 20502 20503 20791 -3 20502 20791 20790 -3 20503 20504 20792 -3 20503 20792 20791 -3 20504 20505 20793 -3 20504 20793 20792 -3 20505 20506 20794 -3 20505 20794 20793 -3 20506 20507 20795 -3 20506 20795 20794 -3 20507 20508 20796 -3 20507 20796 20795 -3 20508 20509 20797 -3 20508 20797 20796 -3 20509 20510 20798 -3 20509 20798 20797 -3 20510 20511 20799 -3 20510 20799 20798 -3 20511 20512 20800 -3 20511 20800 20799 -3 20512 20513 20801 -3 20512 20801 20800 -3 20513 20514 20802 -3 20513 20802 20801 -3 20514 20515 20803 -3 20514 20803 20802 -3 20515 20516 20804 -3 20515 20804 20803 -3 20516 20517 20805 -3 20516 20805 20804 -3 20517 20518 20806 -3 20517 20806 20805 -3 20518 20519 20807 -3 20518 20807 20806 -3 20519 20520 20808 -3 20519 20808 20807 -3 20520 20521 20809 -3 20520 20809 20808 -3 20521 20522 20810 -3 20521 20810 20809 -3 20522 20523 20811 -3 20522 20811 20810 -3 20523 20524 20812 -3 20523 20812 20811 -3 20524 20525 20813 -3 20524 20813 20812 -3 20525 20526 20813 -3 20814 20813 20526 -3 20526 20527 20814 -3 20815 20814 20527 -3 20527 20528 20815 -3 20816 20815 20528 -3 20528 20529 20816 -3 20817 20816 20529 -3 20529 20530 20817 -3 20818 20817 20530 -3 20530 20531 20818 -3 20819 20818 20531 -3 20531 20532 20819 -3 20820 20819 20532 -3 20532 20533 20820 -3 20821 20820 20533 -3 20533 20534 20821 -3 20822 20821 20534 -3 20534 20535 20822 -3 20823 20822 20535 -3 20535 20536 20823 -3 20824 20823 20536 -3 20536 20537 20824 -3 20825 20824 20537 -3 20537 20538 20825 -3 20826 20825 20538 -3 20538 20539 20826 -3 20827 20826 20539 -3 20539 20540 20827 -3 20828 20827 20540 -3 20540 20541 20828 -3 20829 20828 20541 -3 20541 20542 20829 -3 20830 20829 20542 -3 20542 20543 20830 -3 20831 20830 20543 -3 20543 20544 20831 -3 20832 20831 20544 -3 20544 20545 20832 -3 20833 20832 20545 -3 20545 20546 20833 -3 20834 20833 20546 -3 20546 20547 20834 -3 20835 20834 20547 -3 20547 20548 20835 -3 20836 20835 20548 -3 20548 20549 20836 -3 20837 20836 20549 -3 20549 20550 20837 -3 20838 20837 20550 -3 20550 20551 20838 -3 20839 20838 20551 -3 20551 20552 20839 -3 20840 20839 20552 -3 20552 20553 20840 -3 20841 20840 20553 -3 20553 20554 20841 -3 20842 20841 20554 -3 20554 20555 20842 -3 20843 20842 20555 -3 20555 20556 20843 -3 20844 20843 20556 -3 20557 20845 20558 -3 20846 20558 20845 -3 20557 20627 20845 -3 20915 20845 20627 -3 20558 20846 20559 -3 20847 20559 20846 -3 20559 20847 20560 -3 20848 20560 20847 -3 20560 20848 20561 -3 20849 20561 20848 -3 20561 20849 20562 -3 20850 20562 20849 -3 20562 20850 20563 -3 20851 20563 20850 -3 20563 20851 20564 -3 20852 20564 20851 -3 20564 20852 20565 -3 20853 20565 20852 -3 20565 20853 20566 -3 20854 20566 20853 -3 20566 20854 20567 -3 20855 20567 20854 -3 20567 20855 20568 -3 20856 20568 20855 -3 20568 20856 20569 -3 20857 20569 20856 -3 20569 20857 20570 -3 20858 20570 20857 -3 20570 20858 20571 -3 20859 20571 20858 -3 20571 20859 20572 -3 20860 20572 20859 -3 20572 20860 20573 -3 20861 20573 20860 -3 20573 20861 20574 -3 20862 20574 20861 -3 20574 20862 20575 -3 20863 20575 20862 -3 20575 20863 20576 -3 20864 20576 20863 -3 20576 20864 20577 -3 20865 20577 20864 -3 20577 20865 20578 -3 20866 20578 20865 -3 20578 20866 20579 -3 20867 20579 20866 -3 20579 20867 20580 -3 20868 20580 20867 -3 20580 20868 20581 -3 20869 20581 20868 -3 20581 20869 20582 -3 20870 20582 20869 -3 20582 20870 20583 -3 20871 20583 20870 -3 20583 20871 20584 -3 20872 20584 20871 -3 20584 20872 20585 -3 20873 20585 20872 -3 20585 20873 20586 -3 20874 20586 20873 -3 20586 20874 20587 -3 20875 20587 20874 -3 20587 20875 20588 -3 20876 20588 20875 -3 20588 20876 20589 -3 20877 20589 20876 -3 20589 20877 20590 -3 20878 20590 20877 -3 20590 20878 20591 -3 20879 20591 20878 -3 20591 20879 20592 -3 20880 20592 20879 -3 20592 20880 20593 -3 20881 20593 20880 -3 20593 20881 20594 -3 20882 20594 20881 -3 20594 20882 20595 -3 20883 20595 20882 -3 20595 20883 20596 -3 20884 20596 20883 -3 20596 20884 20597 -3 20885 20597 20884 -3 20597 20885 20598 -3 20886 20598 20885 -3 20598 20886 20599 -3 20887 20599 20886 -3 20599 20887 20600 -3 20888 20600 20887 -3 20600 20888 20601 -3 20889 20601 20888 -3 20601 20889 20602 -3 20890 20602 20889 -3 20602 20890 20603 -3 20891 20603 20890 -3 20603 20891 20604 -3 20892 20604 20891 -3 20604 20892 20605 -3 20893 20605 20892 -3 20605 20893 20606 -3 20894 20606 20893 -3 20606 20894 20607 -3 20895 20607 20894 -3 20607 20895 20608 -3 20896 20608 20895 -3 20608 20896 20609 -3 20897 20609 20896 -3 20609 20897 20610 -3 20898 20610 20897 -3 20610 20898 20611 -3 20899 20611 20898 -3 20611 20899 20612 -3 20900 20612 20899 -3 20612 20900 20613 -3 20901 20613 20900 -3 20613 20901 20614 -3 20902 20614 20901 -3 20614 20902 20615 -3 20903 20615 20902 -3 20615 20903 20616 -3 20904 20616 20903 -3 20616 20904 20617 -3 20905 20617 20904 -3 20617 20905 20618 -3 20906 20618 20905 -3 20618 20906 20619 -3 20907 20619 20906 -3 20619 20907 20620 -3 20908 20620 20907 -3 20620 20908 20621 -3 20909 20621 20908 -3 20621 20909 20622 -3 20910 20622 20909 -3 20622 20910 20623 -3 20911 20623 20910 -3 20623 20911 20624 -3 20912 20624 20911 -3 20624 20912 20625 -3 20913 20625 20912 -3 20625 20913 20916 -3 20625 20916 20628 -3 20626 20914 20915 -3 20626 20915 20627 -3 20626 20631 20919 -3 20626 20919 20914 -3 20628 20916 20917 -3 20628 20917 20629 -3 20629 20917 20920 -3 20629 20920 20632 -3 20630 20918 20919 -3 20630 20919 20631 -3 20630 20634 20922 -3 20630 20922 20918 -3 20632 20920 20921 -3 20632 20921 20633 -3 20633 20921 20923 -3 20633 20923 20635 -3 20634 20636 20924 -3 20634 20924 20922 -3 20635 20923 20925 -3 20635 20925 20637 -3 20636 20638 20926 -3 20636 20926 20924 -3 20637 20925 20927 -3 20637 20927 20639 -3 20638 20640 20928 -3 20638 20928 20926 -3 20639 20927 20929 -3 20639 20929 20641 -3 20640 20642 20930 -3 20640 20930 20928 -3 20641 20929 20931 -3 20641 20931 20643 -3 20642 20644 20932 -3 20642 20932 20930 -3 20643 20931 20933 -3 20643 20933 20645 -3 20644 20646 20934 -3 20644 20934 20932 -3 20645 20933 20935 -3 20645 20935 20647 -3 20646 20648 20936 -3 20646 20936 20934 -3 20647 20935 20937 -3 20647 20937 20649 -3 20648 20650 20938 -3 20648 20938 20936 -3 20649 20937 20939 -3 20649 20939 20651 -3 20650 20652 20940 -3 20650 20940 20938 -3 20651 20939 20941 -3 20651 20941 20653 -3 20652 20654 20942 -3 20652 20942 20940 -3 20653 20941 20943 -3 20653 20943 20655 -3 20654 20656 20944 -3 20654 20944 20942 -3 20655 20943 20945 -3 20655 20945 20657 -3 20656 20658 20946 -3 20656 20946 20944 -3 20657 20945 20947 -3 20657 20947 20659 -3 20658 20660 20948 -3 20658 20948 20946 -3 20659 20947 20949 -3 20659 20949 20661 -3 20660 20662 20950 -3 20660 20950 20948 -3 20661 20949 20951 -3 20661 20951 20663 -3 20662 20664 20952 -3 20662 20952 20950 -3 20663 20951 20953 -3 20663 20953 20665 -3 20664 20666 20954 -3 20664 20954 20952 -3 20665 20953 20955 -3 20665 20955 20667 -3 20666 20668 20956 -3 20666 20956 20954 -3 20667 20955 20957 -3 20667 20957 20669 -3 20668 20670 20958 -3 20668 20958 20956 -3 20669 20957 20959 -3 20669 20959 20671 -3 20670 20672 20960 -3 20670 20960 20958 -3 20671 20959 20961 -3 20671 20961 20673 -3 20672 20674 20962 -3 20672 20962 20960 -3 20673 20961 20963 -3 20673 20963 20675 -3 20674 20676 20964 -3 20674 20964 20962 -3 20675 20963 20965 -3 20675 20965 20677 -3 20676 20678 20966 -3 20676 20966 20964 -3 20677 20965 20967 -3 20677 20967 20679 -3 20678 20680 20968 -3 20678 20968 20966 -3 20679 20967 20969 -3 20679 20969 20681 -3 20680 20682 20970 -3 20680 20970 20968 -3 20681 20969 20971 -3 20681 20971 20683 -3 20682 20684 20972 -3 20682 20972 20970 -3 20683 20971 20973 -3 20683 20973 20685 -3 20684 20686 20974 -3 20684 20974 20972 -3 20685 20973 20975 -3 20685 20975 20687 -3 20686 20688 20976 -3 20686 20976 20974 -3 20687 20975 20977 -3 20687 20977 20689 -3 20688 20690 20978 -3 20688 20978 20976 -3 20689 20977 20979 -3 20689 20979 20691 -3 20690 20692 20980 -3 20690 20980 20978 -3 20691 20979 20981 -3 20691 20981 20693 -3 20692 20694 20982 -3 20692 20982 20980 -3 20693 20981 20983 -3 20693 20983 20695 -3 20694 20696 20984 -3 20694 20984 20982 -3 20695 20983 20985 -3 20695 20985 20697 -3 20696 20698 20986 -3 20696 20986 20984 -3 20697 20985 20987 -3 20697 20987 20699 -3 20698 20700 20988 -3 20698 20988 20986 -3 20699 20987 20989 -3 20699 20989 20701 -3 20700 20702 20990 -3 20700 20990 20988 -3 20701 20989 20991 -3 20701 20991 20703 -3 20702 20704 20990 -3 20992 20990 20704 -3 20703 20991 20993 -3 20703 20993 20705 -3 20704 20706 20992 -3 20994 20992 20706 -3 20705 20993 20995 -3 20705 20995 20707 -3 20706 20708 20994 -3 20996 20994 20708 -3 20707 20995 20997 -3 20707 20997 20709 -3 20708 20710 20996 -3 20998 20996 20710 -3 20709 20997 20999 -3 20709 20999 20711 -3 20710 20712 20998 -3 21000 20998 20712 -3 20711 20999 21001 -3 20711 21001 20713 -3 20712 20714 21000 -3 21002 21000 20714 -3 20713 21001 21003 -3 20713 21003 20715 -3 20714 20716 21002 -3 21004 21002 20716 -3 20715 21003 20717 -3 21005 20717 21003 -3 20716 20718 21004 -3 21006 21004 20718 -3 20717 21005 20719 -3 21007 20719 21005 -3 20718 20720 21006 -3 21008 21006 20720 -3 20719 21007 20721 -3 21009 20721 21007 -3 20720 20722 21008 -3 21010 21008 20722 -3 20721 21009 20723 -3 21011 20723 21009 -3 20722 20724 21010 -3 21012 21010 20724 -3 20723 21011 20725 -3 21013 20725 21011 -3 20724 20726 21012 -3 21014 21012 20726 -3 20725 21013 20727 -3 21015 20727 21013 -3 20726 20728 21014 -3 21016 21014 20728 -3 20727 21015 20729 -3 21017 20729 21015 -3 20728 20730 21016 -3 21018 21016 20730 -3 20729 21017 20731 -3 21019 20731 21017 -3 20730 20732 21018 -3 21020 21018 20732 -3 20731 21019 20733 -3 21021 20733 21019 -3 20732 20734 21020 -3 21022 21020 20734 -3 20733 21021 20735 -3 21023 20735 21021 -3 20734 20736 21022 -3 21024 21022 20736 -3 20735 21023 20737 -3 21025 20737 21023 -3 20736 20738 21024 -3 21026 21024 20738 -3 20737 21025 20739 -3 21027 20739 21025 -3 20738 20740 21026 -3 21028 21026 20740 -3 20739 21027 20741 -3 21029 20741 21027 -3 20740 20742 21028 -3 21030 21028 20742 -3 20741 21029 20743 -3 21031 20743 21029 -3 20742 20744 21030 -3 21032 21030 20744 -3 20743 21031 20745 -3 21033 20745 21031 -3 20744 20746 21032 -3 21034 21032 20746 -3 20745 21033 20747 -3 21035 20747 21033 -3 20746 20748 21034 -3 21036 21034 20748 -3 20747 21035 20749 -3 21037 20749 21035 -3 20748 20750 21036 -3 21038 21036 20750 -3 20749 21037 20751 -3 21039 20751 21037 -3 20750 20752 21038 -3 21040 21038 20752 -3 20751 21039 20753 -3 21041 20753 21039 -3 20752 20754 21040 -3 21042 21040 20754 -3 20753 21041 20755 -3 21043 20755 21041 -3 20754 20756 21042 -3 21044 21042 20756 -3 20755 21043 20757 -3 21045 20757 21043 -3 20756 20758 21044 -3 21046 21044 20758 -3 20757 21045 20759 -3 21047 20759 21045 -3 20758 20760 21046 -3 21048 21046 20760 -3 20759 21047 20761 -3 21049 20761 21047 -3 20760 20762 21048 -3 21050 21048 20762 -3 20761 21049 20763 -3 21051 20763 21049 -3 20762 20764 21050 -3 21052 21050 20764 -3 20763 21051 20765 -3 21053 20765 21051 -3 20764 20766 21052 -3 21054 21052 20766 -3 20765 21053 20767 -3 21055 20767 21053 -3 20766 20768 21054 -3 21056 21054 20768 -3 20767 21055 20771 -3 21059 20771 21055 -3 20768 20769 21056 -3 21057 21056 20769 -3 20769 20772 21057 -3 21060 21057 20772 -3 20770 20771 21058 -3 21059 21058 20771 -3 20770 21058 20775 -3 21063 20775 21058 -3 20772 20773 21060 -3 21061 21060 20773 -3 20773 20776 21061 -3 21064 21061 20776 -3 20774 20775 21062 -3 21063 21062 20775 -3 20774 21062 20844 -3 21132 20844 21062 -3 20776 20777 21064 -3 21065 21064 20777 -3 20777 20778 21065 -3 21066 21065 20778 -3 20778 20779 21066 -3 21067 21066 20779 -3 20779 20780 21067 -3 21068 21067 20780 -3 20780 20781 21068 -3 21069 21068 20781 -3 20781 20782 21069 -3 21070 21069 20782 -3 20782 20783 21070 -3 21071 21070 20783 -3 20783 20784 21071 -3 21072 21071 20784 -3 20784 20785 21072 -3 21073 21072 20785 -3 20785 20786 21073 -3 21074 21073 20786 -3 20786 20787 21074 -3 21075 21074 20787 -3 20787 20788 21075 -3 21076 21075 20788 -3 20788 20789 21076 -3 21077 21076 20789 -3 20789 20790 21077 -3 21078 21077 20790 -3 20790 20791 21079 -3 20790 21079 21078 -3 20791 20792 21080 -3 20791 21080 21079 -3 20792 20793 21081 -3 20792 21081 21080 -3 20793 20794 21082 -3 20793 21082 21081 -3 20794 20795 21083 -3 20794 21083 21082 -3 20795 20796 21084 -3 20795 21084 21083 -3 20796 20797 21085 -3 20796 21085 21084 -3 20797 20798 21086 -3 20797 21086 21085 -3 20798 20799 21087 -3 20798 21087 21086 -3 20799 20800 21088 -3 20799 21088 21087 -3 20800 20801 21089 -3 20800 21089 21088 -3 20801 20802 21090 -3 20801 21090 21089 -3 20802 20803 21091 -3 20802 21091 21090 -3 20803 20804 21092 -3 20803 21092 21091 -3 20804 20805 21093 -3 20804 21093 21092 -3 20805 20806 21094 -3 20805 21094 21093 -3 20806 20807 21095 -3 20806 21095 21094 -3 20807 20808 21096 -3 20807 21096 21095 -3 20808 20809 21097 -3 20808 21097 21096 -3 20809 20810 21098 -3 20809 21098 21097 -3 20810 20811 21099 -3 20810 21099 21098 -3 20811 20812 21100 -3 20811 21100 21099 -3 20812 20813 21101 -3 20812 21101 21100 -3 20813 20814 21102 -3 20813 21102 21101 -3 20814 20815 21103 -3 20814 21103 21102 -3 20815 20816 21104 -3 20815 21104 21103 -3 20816 20817 21105 -3 20816 21105 21104 -3 20817 20818 21106 -3 20817 21106 21105 -3 20818 20819 21107 -3 20818 21107 21106 -3 20819 20820 21108 -3 20819 21108 21107 -3 20820 20821 21109 -3 20820 21109 21108 -3 20821 20822 21110 -3 20821 21110 21109 -3 20822 20823 21111 -3 20822 21111 21110 -3 20823 20824 21112 -3 20823 21112 21111 -3 20824 20825 21113 -3 20824 21113 21112 -3 20825 20826 21114 -3 20825 21114 21113 -3 20826 20827 21115 -3 20826 21115 21114 -3 20827 20828 21116 -3 20827 21116 21115 -3 20828 20829 21117 -3 20828 21117 21116 -3 20829 20830 21118 -3 20829 21118 21117 -3 20830 20831 21119 -3 20830 21119 21118 -3 20831 20832 21120 -3 20831 21120 21119 -3 20832 20833 21121 -3 20832 21121 21120 -3 20833 20834 21122 -3 20833 21122 21121 -3 20834 20835 21123 -3 20834 21123 21122 -3 20835 20836 21124 -3 20835 21124 21123 -3 20836 20837 21125 -3 20836 21125 21124 -3 20837 20838 21126 -3 20837 21126 21125 -3 20838 20839 21127 -3 20838 21127 21126 -3 20839 20840 21128 -3 20839 21128 21127 -3 20840 20841 21129 -3 20840 21129 21128 -3 20841 20842 21130 -3 20841 21130 21129 -3 20842 20843 21131 -3 20842 21131 21130 -3 20843 20844 21132 -3 20843 21132 21131 -3 20845 21133 21134 -3 20845 21134 20846 -3 20845 20915 21133 -3 21203 21133 20915 -3 20846 21134 21135 -3 20846 21135 20847 -3 20847 21135 21136 -3 20847 21136 20848 -3 20848 21136 21137 -3 20848 21137 20849 -3 20849 21137 21138 -3 20849 21138 20850 -3 20850 21138 21139 -3 20850 21139 20851 -3 20851 21139 21140 -3 20851 21140 20852 -3 20852 21140 21141 -3 20852 21141 20853 -3 20853 21141 21142 -3 20853 21142 20854 -3 20854 21142 21143 -3 20854 21143 20855 -3 20855 21143 21144 -3 20855 21144 20856 -3 20856 21144 21145 -3 20856 21145 20857 -3 20857 21145 21146 -3 20857 21146 20858 -3 20858 21146 21147 -3 20858 21147 20859 -3 20859 21147 21148 -3 20859 21148 20860 -3 20860 21148 21149 -3 20860 21149 20861 -3 20861 21149 21150 -3 20861 21150 20862 -3 20862 21150 21151 -3 20862 21151 20863 -3 20863 21151 21152 -3 20863 21152 20864 -3 20864 21152 21153 -3 20864 21153 20865 -3 20865 21153 21154 -3 20865 21154 20866 -3 20866 21154 21155 -3 20866 21155 20867 -3 20867 21155 21156 -3 20867 21156 20868 -3 20868 21156 21157 -3 20868 21157 20869 -3 20869 21157 21158 -3 20869 21158 20870 -3 20870 21158 21159 -3 20870 21159 20871 -3 20871 21159 21160 -3 20871 21160 20872 -3 20872 21160 21161 -3 20872 21161 20873 -3 20873 21161 21162 -3 20873 21162 20874 -3 20874 21162 21163 -3 20874 21163 20875 -3 20875 21163 21164 -3 20875 21164 20876 -3 20876 21164 21165 -3 20876 21165 20877 -3 20877 21165 21166 -3 20877 21166 20878 -3 20878 21166 21167 -3 20878 21167 20879 -3 20879 21167 21168 -3 20879 21168 20880 -3 20880 21168 21169 -3 20880 21169 20881 -3 20881 21169 21170 -3 20881 21170 20882 -3 20882 21170 21171 -3 20882 21171 20883 -3 20883 21171 21172 -3 20883 21172 20884 -3 20884 21172 21173 -3 20884 21173 20885 -3 20885 21173 21174 -3 20885 21174 20886 -3 20886 21174 21175 -3 20886 21175 20887 -3 20887 21175 21176 -3 20887 21176 20888 -3 20888 21176 21177 -3 20888 21177 20889 -3 20889 21177 21178 -3 20889 21178 20890 -3 20890 21178 21179 -3 20890 21179 20891 -3 20891 21179 21180 -3 20891 21180 20892 -3 20892 21180 20893 -3 21181 20893 21180 -3 20893 21181 20894 -3 21182 20894 21181 -3 20894 21182 20895 -3 21183 20895 21182 -3 20895 21183 20896 -3 21184 20896 21183 -3 20896 21184 20897 -3 21185 20897 21184 -3 20897 21185 20898 -3 21186 20898 21185 -3 20898 21186 20899 -3 21187 20899 21186 -3 20899 21187 20900 -3 21188 20900 21187 -3 20900 21188 20901 -3 21189 20901 21188 -3 20901 21189 20902 -3 21190 20902 21189 -3 20902 21190 20903 -3 21191 20903 21190 -3 20903 21191 20904 -3 21192 20904 21191 -3 20904 21192 20905 -3 21193 20905 21192 -3 20905 21193 20906 -3 21194 20906 21193 -3 20906 21194 20907 -3 21195 20907 21194 -3 20907 21195 20908 -3 21196 20908 21195 -3 20908 21196 20909 -3 21197 20909 21196 -3 20909 21197 20910 -3 21198 20910 21197 -3 20910 21198 20911 -3 21199 20911 21198 -3 20911 21199 20912 -3 21200 20912 21199 -3 20912 21200 20913 -3 21201 20913 21200 -3 20913 21201 20916 -3 21204 20916 21201 -3 20914 21202 20915 -3 21203 20915 21202 -3 20914 20919 21202 -3 21207 21202 20919 -3 20916 21204 20917 -3 21205 20917 21204 -3 20917 21205 20920 -3 21208 20920 21205 -3 20918 21206 20919 -3 21207 20919 21206 -3 20918 20922 21206 -3 21210 21206 20922 -3 20920 21208 20921 -3 21209 20921 21208 -3 20921 21209 20923 -3 21211 20923 21209 -3 20922 20924 21210 -3 21212 21210 20924 -3 20923 21211 20925 -3 21213 20925 21211 -3 20924 20926 21212 -3 21214 21212 20926 -3 20925 21213 20927 -3 21215 20927 21213 -3 20926 20928 21214 -3 21216 21214 20928 -3 20927 21215 20929 -3 21217 20929 21215 -3 20928 20930 21216 -3 21218 21216 20930 -3 20929 21217 20931 -3 21219 20931 21217 -3 20930 20932 21218 -3 21220 21218 20932 -3 20931 21219 20933 -3 21221 20933 21219 -3 20932 20934 21220 -3 21222 21220 20934 -3 20933 21221 20935 -3 21223 20935 21221 -3 20934 20936 21222 -3 21224 21222 20936 -3 20935 21223 20937 -3 21225 20937 21223 -3 20936 20938 21224 -3 21226 21224 20938 -3 20937 21225 20939 -3 21227 20939 21225 -3 20938 20940 21226 -3 21228 21226 20940 -3 20939 21227 20941 -3 21229 20941 21227 -3 20940 20942 21228 -3 21230 21228 20942 -3 20941 21229 20943 -3 21231 20943 21229 -3 20942 20944 21230 -3 21232 21230 20944 -3 20943 21231 20945 -3 21233 20945 21231 -3 20944 20946 21232 -3 21234 21232 20946 -3 20945 21233 20947 -3 21235 20947 21233 -3 20946 20948 21234 -3 21236 21234 20948 -3 20947 21235 20949 -3 21237 20949 21235 -3 20948 20950 21236 -3 21238 21236 20950 -3 20949 21237 20951 -3 21239 20951 21237 -3 20950 20952 21238 -3 21240 21238 20952 -3 20951 21239 20953 -3 21241 20953 21239 -3 20952 20954 21240 -3 21242 21240 20954 -3 20953 21241 20955 -3 21243 20955 21241 -3 20954 20956 21242 -3 21244 21242 20956 -3 20955 21243 20957 -3 21245 20957 21243 -3 20956 20958 21244 -3 21246 21244 20958 -3 20957 21245 20959 -3 21247 20959 21245 -3 20958 20960 21246 -3 21248 21246 20960 -3 20959 21247 20961 -3 21249 20961 21247 -3 20960 20962 21248 -3 21250 21248 20962 -3 20961 21249 20963 -3 21251 20963 21249 -3 20962 20964 21250 -3 21252 21250 20964 -3 20963 21251 20965 -3 21253 20965 21251 -3 20964 20966 21252 -3 21254 21252 20966 -3 20965 21253 20967 -3 21255 20967 21253 -3 20966 20968 21254 -3 21256 21254 20968 -3 20967 21255 20969 -3 21257 20969 21255 -3 20968 20970 21258 -3 20968 21258 21256 -3 20969 21257 20971 -3 21259 20971 21257 -3 20970 20972 21260 -3 20970 21260 21258 -3 20971 21259 20973 -3 21261 20973 21259 -3 20972 20974 21262 -3 20972 21262 21260 -3 20973 21261 20975 -3 21263 20975 21261 -3 20974 20976 21264 -3 20974 21264 21262 -3 20975 21263 20977 -3 21265 20977 21263 -3 20976 20978 21266 -3 20976 21266 21264 -3 20977 21265 20979 -3 21267 20979 21265 -3 20978 20980 21268 -3 20978 21268 21266 -3 20979 21267 20981 -3 21269 20981 21267 -3 20980 20982 21270 -3 20980 21270 21268 -3 20981 21269 20983 -3 21271 20983 21269 -3 20982 20984 21272 -3 20982 21272 21270 -3 20983 21271 21273 -3 20983 21273 20985 -3 20984 20986 21274 -3 20984 21274 21272 -3 20985 21273 21275 -3 20985 21275 20987 -3 20986 20988 21276 -3 20986 21276 21274 -3 20987 21275 21277 -3 20987 21277 20989 -3 20988 20990 21278 -3 20988 21278 21276 -3 20989 21277 21279 -3 20989 21279 20991 -3 20990 20992 21280 -3 20990 21280 21278 -3 20991 21279 21281 -3 20991 21281 20993 -3 20992 20994 21282 -3 20992 21282 21280 -3 20993 21281 21283 -3 20993 21283 20995 -3 20994 20996 21284 -3 20994 21284 21282 -3 20995 21283 21285 -3 20995 21285 20997 -3 20996 20998 21286 -3 20996 21286 21284 -3 20997 21285 21287 -3 20997 21287 20999 -3 20998 21000 21288 -3 20998 21288 21286 -3 20999 21287 21289 -3 20999 21289 21001 -3 21000 21002 21290 -3 21000 21290 21288 -3 21001 21289 21291 -3 21001 21291 21003 -3 21002 21004 21292 -3 21002 21292 21290 -3 21003 21291 21293 -3 21003 21293 21005 -3 21004 21006 21294 -3 21004 21294 21292 -3 21005 21293 21295 -3 21005 21295 21007 -3 21006 21008 21296 -3 21006 21296 21294 -3 21007 21295 21297 -3 21007 21297 21009 -3 21008 21010 21298 -3 21008 21298 21296 -3 21009 21297 21299 -3 21009 21299 21011 -3 21010 21012 21300 -3 21010 21300 21298 -3 21011 21299 21301 -3 21011 21301 21013 -3 21012 21014 21302 -3 21012 21302 21300 -3 21013 21301 21303 -3 21013 21303 21015 -3 21014 21016 21304 -3 21014 21304 21302 -3 21015 21303 21305 -3 21015 21305 21017 -3 21016 21018 21306 -3 21016 21306 21304 -3 21017 21305 21307 -3 21017 21307 21019 -3 21018 21020 21308 -3 21018 21308 21306 -3 21019 21307 21309 -3 21019 21309 21021 -3 21020 21022 21310 -3 21020 21310 21308 -3 21021 21309 21311 -3 21021 21311 21023 -3 21022 21024 21312 -3 21022 21312 21310 -3 21023 21311 21313 -3 21023 21313 21025 -3 21024 21026 21314 -3 21024 21314 21312 -3 21025 21313 21315 -3 21025 21315 21027 -3 21026 21028 21316 -3 21026 21316 21314 -3 21027 21315 21317 -3 21027 21317 21029 -3 21028 21030 21318 -3 21028 21318 21316 -3 21029 21317 21319 -3 21029 21319 21031 -3 21030 21032 21320 -3 21030 21320 21318 -3 21031 21319 21321 -3 21031 21321 21033 -3 21032 21034 21322 -3 21032 21322 21320 -3 21033 21321 21323 -3 21033 21323 21035 -3 21034 21036 21324 -3 21034 21324 21322 -3 21035 21323 21325 -3 21035 21325 21037 -3 21036 21038 21326 -3 21036 21326 21324 -3 21037 21325 21327 -3 21037 21327 21039 -3 21038 21040 21328 -3 21038 21328 21326 -3 21039 21327 21329 -3 21039 21329 21041 -3 21040 21042 21330 -3 21040 21330 21328 -3 21041 21329 21331 -3 21041 21331 21043 -3 21042 21044 21332 -3 21042 21332 21330 -3 21043 21331 21333 -3 21043 21333 21045 -3 21044 21046 21334 -3 21044 21334 21332 -3 21045 21333 21335 -3 21045 21335 21047 -3 21046 21048 21336 -3 21046 21336 21334 -3 21047 21335 21337 -3 21047 21337 21049 -3 21048 21050 21338 -3 21048 21338 21336 -3 21049 21337 21339 -3 21049 21339 21051 -3 21050 21052 21340 -3 21050 21340 21338 -3 21051 21339 21341 -3 21051 21341 21053 -3 21052 21054 21342 -3 21052 21342 21340 -3 21053 21341 21343 -3 21053 21343 21055 -3 21054 21056 21344 -3 21054 21344 21342 -3 21055 21343 21347 -3 21055 21347 21059 -3 21056 21057 21345 -3 21056 21345 21344 -3 21057 21060 21345 -3 21348 21345 21060 -3 21058 21059 21346 -3 21347 21346 21059 -3 21058 21346 21351 -3 21058 21351 21063 -3 21060 21061 21348 -3 21349 21348 21061 -3 21061 21064 21349 -3 21352 21349 21064 -3 21062 21063 21350 -3 21351 21350 21063 -3 21062 21350 21420 -3 21062 21420 21132 -3 21064 21065 21352 -3 21353 21352 21065 -3 21065 21066 21353 -3 21354 21353 21066 -3 21066 21067 21354 -3 21355 21354 21067 -3 21067 21068 21355 -3 21356 21355 21068 -3 21068 21069 21356 -3 21357 21356 21069 -3 21069 21070 21357 -3 21358 21357 21070 -3 21070 21071 21358 -3 21359 21358 21071 -3 21071 21072 21359 -3 21360 21359 21072 -3 21072 21073 21360 -3 21361 21360 21073 -3 21073 21074 21361 -3 21362 21361 21074 -3 21074 21075 21362 -3 21363 21362 21075 -3 21075 21076 21363 -3 21364 21363 21076 -3 21076 21077 21364 -3 21365 21364 21077 -3 21077 21078 21365 -3 21366 21365 21078 -3 21078 21079 21366 -3 21367 21366 21079 -3 21079 21080 21367 -3 21368 21367 21080 -3 21080 21081 21368 -3 21369 21368 21081 -3 21081 21082 21369 -3 21370 21369 21082 -3 21082 21083 21370 -3 21371 21370 21083 -3 21083 21084 21371 -3 21372 21371 21084 -3 21084 21085 21372 -3 21373 21372 21085 -3 21085 21086 21373 -3 21374 21373 21086 -3 21086 21087 21374 -3 21375 21374 21087 -3 21087 21088 21375 -3 21376 21375 21088 -3 21088 21089 21376 -3 21377 21376 21089 -3 21089 21090 21377 -3 21378 21377 21090 -3 21090 21091 21378 -3 21379 21378 21091 -3 21091 21092 21379 -3 21380 21379 21092 -3 21092 21093 21380 -3 21381 21380 21093 -3 21093 21094 21381 -3 21382 21381 21094 -3 21094 21095 21382 -3 21383 21382 21095 -3 21095 21096 21383 -3 21384 21383 21096 -3 21096 21097 21384 -3 21385 21384 21097 -3 21097 21098 21385 -3 21386 21385 21098 -3 21098 21099 21386 -3 21387 21386 21099 -3 21099 21100 21387 -3 21388 21387 21100 -3 21100 21101 21388 -3 21389 21388 21101 -3 21101 21102 21389 -3 21390 21389 21102 -3 21102 21103 21390 -3 21391 21390 21103 -3 21103 21104 21391 -3 21392 21391 21104 -3 21104 21105 21392 -3 21393 21392 21105 -3 21105 21106 21393 -3 21394 21393 21106 -3 21106 21107 21394 -3 21395 21394 21107 -3 21107 21108 21395 -3 21396 21395 21108 -3 21108 21109 21396 -3 21397 21396 21109 -3 21109 21110 21397 -3 21398 21397 21110 -3 21110 21111 21398 -3 21399 21398 21111 -3 21111 21112 21399 -3 21400 21399 21112 -3 21112 21113 21400 -3 21401 21400 21113 -3 21113 21114 21401 -3 21402 21401 21114 -3 21114 21115 21402 -3 21403 21402 21115 -3 21115 21116 21403 -3 21404 21403 21116 -3 21116 21117 21404 -3 21405 21404 21117 -3 21117 21118 21405 -3 21406 21405 21118 -3 21118 21119 21406 -3 21407 21406 21119 -3 21119 21120 21407 -3 21408 21407 21120 -3 21120 21121 21408 -3 21409 21408 21121 -3 21121 21122 21409 -3 21410 21409 21122 -3 21122 21123 21410 -3 21411 21410 21123 -3 21123 21124 21411 -3 21412 21411 21124 -3 21124 21125 21412 -3 21413 21412 21125 -3 21125 21126 21413 -3 21414 21413 21126 -3 21126 21127 21414 -3 21415 21414 21127 -3 21127 21128 21415 -3 21416 21415 21128 -3 21128 21129 21416 -3 21417 21416 21129 -3 21129 21130 21417 -3 21418 21417 21130 -3 21130 21131 21418 -3 21419 21418 21131 -3 21131 21132 21419 -3 21420 21419 21132 -3 21133 21421 21134 -3 21422 21134 21421 -3 21133 21203 21491 -3 21133 21491 21421 -3 21134 21422 21135 -3 21423 21135 21422 -3 21135 21423 21136 -3 21424 21136 21423 -3 21136 21424 21137 -3 21425 21137 21424 -3 21137 21425 21138 -3 21426 21138 21425 -3 21138 21426 21139 -3 21427 21139 21426 -3 21139 21427 21140 -3 21428 21140 21427 -3 21140 21428 21141 -3 21429 21141 21428 -3 21141 21429 21142 -3 21430 21142 21429 -3 21142 21430 21143 -3 21431 21143 21430 -3 21143 21431 21144 -3 21432 21144 21431 -3 21144 21432 21145 -3 21433 21145 21432 -3 21145 21433 21146 -3 21434 21146 21433 -3 21146 21434 21147 -3 21435 21147 21434 -3 21147 21435 21148 -3 21436 21148 21435 -3 21148 21436 21149 -3 21437 21149 21436 -3 21149 21437 21150 -3 21438 21150 21437 -3 21150 21438 21151 -3 21439 21151 21438 -3 21151 21439 21152 -3 21440 21152 21439 -3 21152 21440 21153 -3 21441 21153 21440 -3 21153 21441 21154 -3 21442 21154 21441 -3 21154 21442 21155 -3 21443 21155 21442 -3 21155 21443 21156 -3 21444 21156 21443 -3 21156 21444 21157 -3 21445 21157 21444 -3 21157 21445 21158 -3 21446 21158 21445 -3 21158 21446 21159 -3 21447 21159 21446 -3 21159 21447 21160 -3 21448 21160 21447 -3 21160 21448 21161 -3 21449 21161 21448 -3 21161 21449 21450 -3 21161 21450 21162 -3 21162 21450 21451 -3 21162 21451 21163 -3 21163 21451 21452 -3 21163 21452 21164 -3 21164 21452 21453 -3 21164 21453 21165 -3 21165 21453 21454 -3 21165 21454 21166 -3 21166 21454 21455 -3 21166 21455 21167 -3 21167 21455 21456 -3 21167 21456 21168 -3 21168 21456 21457 -3 21168 21457 21169 -3 21169 21457 21458 -3 21169 21458 21170 -3 21170 21458 21459 -3 21170 21459 21171 -3 21171 21459 21460 -3 21171 21460 21172 -3 21172 21460 21461 -3 21172 21461 21173 -3 21173 21461 21462 -3 21173 21462 21174 -3 21174 21462 21463 -3 21174 21463 21175 -3 21175 21463 21464 -3 21175 21464 21176 -3 21176 21464 21465 -3 21176 21465 21177 -3 21177 21465 21466 -3 21177 21466 21178 -3 21178 21466 21467 -3 21178 21467 21179 -3 21179 21467 21468 -3 21179 21468 21180 -3 21180 21468 21469 -3 21180 21469 21181 -3 21181 21469 21470 -3 21181 21470 21182 -3 21182 21470 21471 -3 21182 21471 21183 -3 21183 21471 21472 -3 21183 21472 21184 -3 21184 21472 21473 -3 21184 21473 21185 -3 21185 21473 21474 -3 21185 21474 21186 -3 21186 21474 21475 -3 21186 21475 21187 -3 21187 21475 21476 -3 21187 21476 21188 -3 21188 21476 21477 -3 21188 21477 21189 -3 21189 21477 21478 -3 21189 21478 21190 -3 21190 21478 21479 -3 21190 21479 21191 -3 21191 21479 21480 -3 21191 21480 21192 -3 21192 21480 21481 -3 21192 21481 21193 -3 21193 21481 21482 -3 21193 21482 21194 -3 21194 21482 21483 -3 21194 21483 21195 -3 21195 21483 21484 -3 21195 21484 21196 -3 21196 21484 21485 -3 21196 21485 21197 -3 21197 21485 21486 -3 21197 21486 21198 -3 21198 21486 21487 -3 21198 21487 21199 -3 21199 21487 21488 -3 21199 21488 21200 -3 21200 21488 21489 -3 21200 21489 21201 -3 21201 21489 21492 -3 21201 21492 21204 -3 21202 21490 21491 -3 21202 21491 21203 -3 21202 21207 21495 -3 21202 21495 21490 -3 21204 21492 21493 -3 21204 21493 21205 -3 21205 21493 21496 -3 21205 21496 21208 -3 21206 21494 21495 -3 21206 21495 21207 -3 21206 21210 21498 -3 21206 21498 21494 -3 21208 21496 21497 -3 21208 21497 21209 -3 21209 21497 21499 -3 21209 21499 21211 -3 21210 21212 21500 -3 21210 21500 21498 -3 21211 21499 21501 -3 21211 21501 21213 -3 21212 21214 21502 -3 21212 21502 21500 -3 21213 21501 21503 -3 21213 21503 21215 -3 21214 21216 21504 -3 21214 21504 21502 -3 21215 21503 21505 -3 21215 21505 21217 -3 21216 21218 21506 -3 21216 21506 21504 -3 21217 21505 21507 -3 21217 21507 21219 -3 21218 21220 21508 -3 21218 21508 21506 -3 21219 21507 21509 -3 21219 21509 21221 -3 21220 21222 21510 -3 21220 21510 21508 -3 21221 21509 21511 -3 21221 21511 21223 -3 21222 21224 21512 -3 21222 21512 21510 -3 21223 21511 21513 -3 21223 21513 21225 -3 21224 21226 21514 -3 21224 21514 21512 -3 21225 21513 21515 -3 21225 21515 21227 -3 21226 21228 21516 -3 21226 21516 21514 -3 21227 21515 21517 -3 21227 21517 21229 -3 21228 21230 21518 -3 21228 21518 21516 -3 21229 21517 21519 -3 21229 21519 21231 -3 21230 21232 21520 -3 21230 21520 21518 -3 21231 21519 21521 -3 21231 21521 21233 -3 21232 21234 21522 -3 21232 21522 21520 -3 21233 21521 21523 -3 21233 21523 21235 -3 21234 21236 21524 -3 21234 21524 21522 -3 21235 21523 21525 -3 21235 21525 21237 -3 21236 21238 21524 -3 21526 21524 21238 -3 21237 21525 21527 -3 21237 21527 21239 -3 21238 21240 21526 -3 21528 21526 21240 -3 21239 21527 21529 -3 21239 21529 21241 -3 21240 21242 21528 -3 21530 21528 21242 -3 21241 21529 21531 -3 21241 21531 21243 -3 21242 21244 21530 -3 21532 21530 21244 -3 21243 21531 21533 -3 21243 21533 21245 -3 21244 21246 21532 -3 21534 21532 21246 -3 21245 21533 21535 -3 21245 21535 21247 -3 21246 21248 21534 -3 21536 21534 21248 -3 21247 21535 21537 -3 21247 21537 21249 -3 21248 21250 21536 -3 21538 21536 21250 -3 21249 21537 21539 -3 21249 21539 21251 -3 21250 21252 21538 -3 21540 21538 21252 -3 21251 21539 21253 -3 21541 21253 21539 -3 21252 21254 21540 -3 21542 21540 21254 -3 21253 21541 21255 -3 21543 21255 21541 -3 21254 21256 21542 -3 21544 21542 21256 -3 21255 21543 21257 -3 21545 21257 21543 -3 21256 21258 21544 -3 21546 21544 21258 -3 21257 21545 21259 -3 21547 21259 21545 -3 21258 21260 21546 -3 21548 21546 21260 -3 21259 21547 21261 -3 21549 21261 21547 -3 21260 21262 21548 -3 21550 21548 21262 -3 21261 21549 21263 -3 21551 21263 21549 -3 21262 21264 21550 -3 21552 21550 21264 -3 21263 21551 21265 -3 21553 21265 21551 -3 21264 21266 21552 -3 21554 21552 21266 -3 21265 21553 21267 -3 21555 21267 21553 -3 21266 21268 21554 -3 21556 21554 21268 -3 21267 21555 21269 -3 21557 21269 21555 -3 21268 21270 21556 -3 21558 21556 21270 -3 21269 21557 21271 -3 21559 21271 21557 -3 21270 21272 21558 -3 21560 21558 21272 -3 21271 21559 21273 -3 21561 21273 21559 -3 21272 21274 21560 -3 21562 21560 21274 -3 21273 21561 21275 -3 21563 21275 21561 -3 21274 21276 21562 -3 21564 21562 21276 -3 21275 21563 21277 -3 21565 21277 21563 -3 21276 21278 21564 -3 21566 21564 21278 -3 21277 21565 21279 -3 21567 21279 21565 -3 21278 21280 21566 -3 21568 21566 21280 -3 21279 21567 21281 -3 21569 21281 21567 -3 21280 21282 21568 -3 21570 21568 21282 -3 21281 21569 21283 -3 21571 21283 21569 -3 21282 21284 21570 -3 21572 21570 21284 -3 21283 21571 21285 -3 21573 21285 21571 -3 21284 21286 21572 -3 21574 21572 21286 -3 21285 21573 21287 -3 21575 21287 21573 -3 21286 21288 21574 -3 21576 21574 21288 -3 21287 21575 21289 -3 21577 21289 21575 -3 21288 21290 21576 -3 21578 21576 21290 -3 21289 21577 21291 -3 21579 21291 21577 -3 21290 21292 21578 -3 21580 21578 21292 -3 21291 21579 21293 -3 21581 21293 21579 -3 21292 21294 21580 -3 21582 21580 21294 -3 21293 21581 21295 -3 21583 21295 21581 -3 21294 21296 21582 -3 21584 21582 21296 -3 21295 21583 21297 -3 21585 21297 21583 -3 21296 21298 21584 -3 21586 21584 21298 -3 21297 21585 21299 -3 21587 21299 21585 -3 21298 21300 21586 -3 21588 21586 21300 -3 21299 21587 21301 -3 21589 21301 21587 -3 21300 21302 21588 -3 21590 21588 21302 -3 21301 21589 21303 -3 21591 21303 21589 -3 21302 21304 21590 -3 21592 21590 21304 -3 21303 21591 21305 -3 21593 21305 21591 -3 21304 21306 21592 -3 21594 21592 21306 -3 21305 21593 21307 -3 21595 21307 21593 -3 21306 21308 21594 -3 21596 21594 21308 -3 21307 21595 21309 -3 21597 21309 21595 -3 21308 21310 21596 -3 21598 21596 21310 -3 21309 21597 21311 -3 21599 21311 21597 -3 21310 21312 21598 -3 21600 21598 21312 -3 21311 21599 21313 -3 21601 21313 21599 -3 21312 21314 21600 -3 21602 21600 21314 -3 21313 21601 21315 -3 21603 21315 21601 -3 21314 21316 21602 -3 21604 21602 21316 -3 21315 21603 21317 -3 21605 21317 21603 -3 21316 21318 21604 -3 21606 21604 21318 -3 21317 21605 21319 -3 21607 21319 21605 -3 21318 21320 21606 -3 21608 21606 21320 -3 21319 21607 21321 -3 21609 21321 21607 -3 21320 21322 21608 -3 21610 21608 21322 -3 21321 21609 21323 -3 21611 21323 21609 -3 21322 21324 21610 -3 21612 21610 21324 -3 21323 21611 21325 -3 21613 21325 21611 -3 21324 21326 21612 -3 21614 21612 21326 -3 21325 21613 21327 -3 21615 21327 21613 -3 21326 21328 21616 -3 21326 21616 21614 -3 21327 21615 21329 -3 21617 21329 21615 -3 21328 21330 21618 -3 21328 21618 21616 -3 21329 21617 21331 -3 21619 21331 21617 -3 21330 21332 21620 -3 21330 21620 21618 -3 21331 21619 21333 -3 21621 21333 21619 -3 21332 21334 21622 -3 21332 21622 21620 -3 21333 21621 21335 -3 21623 21335 21621 -3 21334 21336 21624 -3 21334 21624 21622 -3 21335 21623 21337 -3 21625 21337 21623 -3 21336 21338 21626 -3 21336 21626 21624 -3 21337 21625 21339 -3 21627 21339 21625 -3 21338 21340 21628 -3 21338 21628 21626 -3 21339 21627 21341 -3 21629 21341 21627 -3 21340 21342 21630 -3 21340 21630 21628 -3 21341 21629 21631 -3 21341 21631 21343 -3 21342 21344 21632 -3 21342 21632 21630 -3 21343 21631 21635 -3 21343 21635 21347 -3 21344 21345 21633 -3 21344 21633 21632 -3 21345 21348 21636 -3 21345 21636 21633 -3 21346 21347 21635 -3 21346 21635 21634 -3 21346 21634 21639 -3 21346 21639 21351 -3 21348 21349 21637 -3 21348 21637 21636 -3 21349 21352 21640 -3 21349 21640 21637 -3 21350 21351 21639 -3 21350 21639 21638 -3 21350 21638 21708 -3 21350 21708 21420 -3 21352 21353 21641 -3 21352 21641 21640 -3 21353 21354 21642 -3 21353 21642 21641 -3 21354 21355 21643 -3 21354 21643 21642 -3 21355 21356 21644 -3 21355 21644 21643 -3 21356 21357 21645 -3 21356 21645 21644 -3 21357 21358 21646 -3 21357 21646 21645 -3 21358 21359 21647 -3 21358 21647 21646 -3 21359 21360 21648 -3 21359 21648 21647 -3 21360 21361 21649 -3 21360 21649 21648 -3 21361 21362 21650 -3 21361 21650 21649 -3 21362 21363 21651 -3 21362 21651 21650 -3 21363 21364 21652 -3 21363 21652 21651 -3 21364 21365 21653 -3 21364 21653 21652 -3 21365 21366 21654 -3 21365 21654 21653 -3 21366 21367 21655 -3 21366 21655 21654 -3 21367 21368 21656 -3 21367 21656 21655 -3 21368 21369 21657 -3 21368 21657 21656 -3 21369 21370 21658 -3 21369 21658 21657 -3 21370 21371 21659 -3 21370 21659 21658 -3 21371 21372 21660 -3 21371 21660 21659 -3 21372 21373 21661 -3 21372 21661 21660 -3 21373 21374 21662 -3 21373 21662 21661 -3 21374 21375 21663 -3 21374 21663 21662 -3 21375 21376 21664 -3 21375 21664 21663 -3 21376 21377 21665 -3 21376 21665 21664 -3 21377 21378 21666 -3 21377 21666 21665 -3 21378 21379 21667 -3 21378 21667 21666 -3 21379 21380 21668 -3 21379 21668 21667 -3 21380 21381 21669 -3 21380 21669 21668 -3 21381 21382 21670 -3 21381 21670 21669 -3 21382 21383 21671 -3 21382 21671 21670 -3 21383 21384 21672 -3 21383 21672 21671 -3 21384 21385 21673 -3 21384 21673 21672 -3 21385 21386 21674 -3 21385 21674 21673 -3 21386 21387 21675 -3 21386 21675 21674 -3 21387 21388 21676 -3 21387 21676 21675 -3 21388 21389 21677 -3 21388 21677 21676 -3 21389 21390 21678 -3 21389 21678 21677 -3 21390 21391 21679 -3 21390 21679 21678 -3 21391 21392 21680 -3 21391 21680 21679 -3 21392 21393 21681 -3 21392 21681 21680 -3 21393 21394 21682 -3 21393 21682 21681 -3 21394 21395 21683 -3 21394 21683 21682 -3 21395 21396 21684 -3 21395 21684 21683 -3 21396 21397 21685 -3 21396 21685 21684 -3 21397 21398 21686 -3 21397 21686 21685 -3 21398 21399 21687 -3 21398 21687 21686 -3 21399 21400 21688 -3 21399 21688 21687 -3 21400 21401 21689 -3 21400 21689 21688 -3 21401 21402 21690 -3 21401 21690 21689 -3 21402 21403 21691 -3 21402 21691 21690 -3 21403 21404 21692 -3 21403 21692 21691 -3 21404 21405 21693 -3 21404 21693 21692 -3 21405 21406 21694 -3 21405 21694 21693 -3 21406 21407 21695 -3 21406 21695 21694 -3 21407 21408 21696 -3 21407 21696 21695 -3 21408 21409 21697 -3 21408 21697 21696 -3 21409 21410 21698 -3 21409 21698 21697 -3 21410 21411 21699 -3 21410 21699 21698 -3 21411 21412 21700 -3 21411 21700 21699 -3 21412 21413 21701 -3 21412 21701 21700 -3 21413 21414 21702 -3 21413 21702 21701 -3 21414 21415 21703 -3 21414 21703 21702 -3 21415 21416 21704 -3 21415 21704 21703 -3 21416 21417 21704 -3 21705 21704 21417 -3 21417 21418 21705 -3 21706 21705 21418 -3 21418 21419 21706 -3 21707 21706 21419 -3 21419 21420 21707 -3 21708 21707 21420 -3 21421 21709 21710 -3 21421 21710 21422 -3 21421 21491 21709 -3 21779 21709 21491 -3 21422 21710 21711 -3 21422 21711 21423 -3 21423 21711 21712 -3 21423 21712 21424 -3 21424 21712 21713 -3 21424 21713 21425 -3 21425 21713 21714 -3 21425 21714 21426 -3 21426 21714 21715 -3 21426 21715 21427 -3 21427 21715 21716 -3 21427 21716 21428 -3 21428 21716 21717 -3 21428 21717 21429 -3 21429 21717 21718 -3 21429 21718 21430 -3 21430 21718 21719 -3 21430 21719 21431 -3 21431 21719 21432 -3 21720 21432 21719 -3 21432 21720 21433 -3 21721 21433 21720 -3 21433 21721 21434 -3 21722 21434 21721 -3 21434 21722 21435 -3 21723 21435 21722 -3 21435 21723 21436 -3 21724 21436 21723 -3 21436 21724 21437 -3 21725 21437 21724 -3 21437 21725 21438 -3 21726 21438 21725 -3 21438 21726 21439 -3 21727 21439 21726 -3 21439 21727 21440 -3 21728 21440 21727 -3 21440 21728 21441 -3 21729 21441 21728 -3 21441 21729 21442 -3 21730 21442 21729 -3 21442 21730 21443 -3 21731 21443 21730 -3 21443 21731 21444 -3 21732 21444 21731 -3 21444 21732 21445 -3 21733 21445 21732 -3 21445 21733 21446 -3 21734 21446 21733 -3 21446 21734 21447 -3 21735 21447 21734 -3 21447 21735 21448 -3 21736 21448 21735 -3 21448 21736 21449 -3 21737 21449 21736 -3 21449 21737 21450 -3 21738 21450 21737 -3 21450 21738 21451 -3 21739 21451 21738 -3 21451 21739 21452 -3 21740 21452 21739 -3 21452 21740 21453 -3 21741 21453 21740 -3 21453 21741 21454 -3 21742 21454 21741 -3 21454 21742 21455 -3 21743 21455 21742 -3 21455 21743 21456 -3 21744 21456 21743 -3 21456 21744 21457 -3 21745 21457 21744 -3 21457 21745 21458 -3 21746 21458 21745 -3 21458 21746 21459 -3 21747 21459 21746 -3 21459 21747 21460 -3 21748 21460 21747 -3 21460 21748 21461 -3 21749 21461 21748 -3 21461 21749 21462 -3 21750 21462 21749 -3 21462 21750 21463 -3 21751 21463 21750 -3 21463 21751 21464 -3 21752 21464 21751 -3 21464 21752 21465 -3 21753 21465 21752 -3 21465 21753 21466 -3 21754 21466 21753 -3 21466 21754 21467 -3 21755 21467 21754 -3 21467 21755 21468 -3 21756 21468 21755 -3 21468 21756 21469 -3 21757 21469 21756 -3 21469 21757 21470 -3 21758 21470 21757 -3 21470 21758 21471 -3 21759 21471 21758 -3 21471 21759 21472 -3 21760 21472 21759 -3 21472 21760 21473 -3 21761 21473 21760 -3 21473 21761 21474 -3 21762 21474 21761 -3 21474 21762 21475 -3 21763 21475 21762 -3 21475 21763 21476 -3 21764 21476 21763 -3 21476 21764 21477 -3 21765 21477 21764 -3 21477 21765 21478 -3 21766 21478 21765 -3 21478 21766 21479 -3 21767 21479 21766 -3 21479 21767 21480 -3 21768 21480 21767 -3 21480 21768 21481 -3 21769 21481 21768 -3 21481 21769 21482 -3 21770 21482 21769 -3 21482 21770 21483 -3 21771 21483 21770 -3 21483 21771 21484 -3 21772 21484 21771 -3 21484 21772 21485 -3 21773 21485 21772 -3 21485 21773 21486 -3 21774 21486 21773 -3 21486 21774 21487 -3 21775 21487 21774 -3 21487 21775 21488 -3 21776 21488 21775 -3 21488 21776 21489 -3 21777 21489 21776 -3 21489 21777 21492 -3 21780 21492 21777 -3 21490 21778 21491 -3 21779 21491 21778 -3 21490 21495 21778 -3 21783 21778 21495 -3 21492 21780 21493 -3 21781 21493 21780 -3 21493 21781 21496 -3 21784 21496 21781 -3 21494 21782 21495 -3 21783 21495 21782 -3 21494 21498 21782 -3 21786 21782 21498 -3 21496 21784 21497 -3 21785 21497 21784 -3 21497 21785 21499 -3 21787 21499 21785 -3 21498 21500 21786 -3 21788 21786 21500 -3 21499 21787 21501 -3 21789 21501 21787 -3 21500 21502 21788 -3 21790 21788 21502 -3 21501 21789 21503 -3 21791 21503 21789 -3 21502 21504 21790 -3 21792 21790 21504 -3 21503 21791 21505 -3 21793 21505 21791 -3 21504 21506 21792 -3 21794 21792 21506 -3 21505 21793 21507 -3 21795 21507 21793 -3 21506 21508 21796 -3 21506 21796 21794 -3 21507 21795 21509 -3 21797 21509 21795 -3 21508 21510 21798 -3 21508 21798 21796 -3 21509 21797 21511 -3 21799 21511 21797 -3 21510 21512 21800 -3 21510 21800 21798 -3 21511 21799 21513 -3 21801 21513 21799 -3 21512 21514 21802 -3 21512 21802 21800 -3 21513 21801 21515 -3 21803 21515 21801 -3 21514 21516 21804 -3 21514 21804 21802 -3 21515 21803 21517 -3 21805 21517 21803 -3 21516 21518 21806 -3 21516 21806 21804 -3 21517 21805 21519 -3 21807 21519 21805 -3 21518 21520 21808 -3 21518 21808 21806 -3 21519 21807 21521 -3 21809 21521 21807 -3 21520 21522 21810 -3 21520 21810 21808 -3 21521 21809 21523 -3 21811 21523 21809 -3 21522 21524 21812 -3 21522 21812 21810 -3 21523 21811 21813 -3 21523 21813 21525 -3 21524 21526 21814 -3 21524 21814 21812 -3 21525 21813 21815 -3 21525 21815 21527 -3 21526 21528 21816 -3 21526 21816 21814 -3 21527 21815 21817 -3 21527 21817 21529 -3 21528 21530 21818 -3 21528 21818 21816 -3 21529 21817 21819 -3 21529 21819 21531 -3 21530 21532 21820 -3 21530 21820 21818 -3 21531 21819 21821 -3 21531 21821 21533 -3 21532 21534 21822 -3 21532 21822 21820 -3 21533 21821 21823 -3 21533 21823 21535 -3 21534 21536 21824 -3 21534 21824 21822 -3 21535 21823 21825 -3 21535 21825 21537 -3 21536 21538 21826 -3 21536 21826 21824 -3 21537 21825 21827 -3 21537 21827 21539 -3 21538 21540 21828 -3 21538 21828 21826 -3 21539 21827 21829 -3 21539 21829 21541 -3 21540 21542 21830 -3 21540 21830 21828 -3 21541 21829 21831 -3 21541 21831 21543 -3 21542 21544 21832 -3 21542 21832 21830 -3 21543 21831 21833 -3 21543 21833 21545 -3 21544 21546 21834 -3 21544 21834 21832 -3 21545 21833 21835 -3 21545 21835 21547 -3 21546 21548 21836 -3 21546 21836 21834 -3 21547 21835 21837 -3 21547 21837 21549 -3 21548 21550 21838 -3 21548 21838 21836 -3 21549 21837 21839 -3 21549 21839 21551 -3 21550 21552 21840 -3 21550 21840 21838 -3 21551 21839 21841 -3 21551 21841 21553 -3 21552 21554 21842 -3 21552 21842 21840 -3 21553 21841 21843 -3 21553 21843 21555 -3 21554 21556 21844 -3 21554 21844 21842 -3 21555 21843 21845 -3 21555 21845 21557 -3 21556 21558 21846 -3 21556 21846 21844 -3 21557 21845 21847 -3 21557 21847 21559 -3 21558 21560 21848 -3 21558 21848 21846 -3 21559 21847 21849 -3 21559 21849 21561 -3 21560 21562 21850 -3 21560 21850 21848 -3 21561 21849 21851 -3 21561 21851 21563 -3 21562 21564 21852 -3 21562 21852 21850 -3 21563 21851 21853 -3 21563 21853 21565 -3 21564 21566 21854 -3 21564 21854 21852 -3 21565 21853 21855 -3 21565 21855 21567 -3 21566 21568 21856 -3 21566 21856 21854 -3 21567 21855 21857 -3 21567 21857 21569 -3 21568 21570 21858 -3 21568 21858 21856 -3 21569 21857 21859 -3 21569 21859 21571 -3 21570 21572 21860 -3 21570 21860 21858 -3 21571 21859 21861 -3 21571 21861 21573 -3 21572 21574 21862 -3 21572 21862 21860 -3 21573 21861 21863 -3 21573 21863 21575 -3 21574 21576 21864 -3 21574 21864 21862 -3 21575 21863 21865 -3 21575 21865 21577 -3 21576 21578 21866 -3 21576 21866 21864 -3 21577 21865 21867 -3 21577 21867 21579 -3 21578 21580 21868 -3 21578 21868 21866 -3 21579 21867 21869 -3 21579 21869 21581 -3 21580 21582 21870 -3 21580 21870 21868 -3 21581 21869 21871 -3 21581 21871 21583 -3 21582 21584 21872 -3 21582 21872 21870 -3 21583 21871 21873 -3 21583 21873 21585 -3 21584 21586 21874 -3 21584 21874 21872 -3 21585 21873 21875 -3 21585 21875 21587 -3 21586 21588 21876 -3 21586 21876 21874 -3 21587 21875 21877 -3 21587 21877 21589 -3 21588 21590 21878 -3 21588 21878 21876 -3 21589 21877 21879 -3 21589 21879 21591 -3 21590 21592 21880 -3 21590 21880 21878 -3 21591 21879 21881 -3 21591 21881 21593 -3 21592 21594 21882 -3 21592 21882 21880 -3 21593 21881 21883 -3 21593 21883 21595 -3 21594 21596 21884 -3 21594 21884 21882 -3 21595 21883 21885 -3 21595 21885 21597 -3 21596 21598 21884 -3 21886 21884 21598 -3 21597 21885 21887 -3 21597 21887 21599 -3 21598 21600 21886 -3 21888 21886 21600 -3 21599 21887 21889 -3 21599 21889 21601 -3 21600 21602 21888 -3 21890 21888 21602 -3 21601 21889 21891 -3 21601 21891 21603 -3 21602 21604 21890 -3 21892 21890 21604 -3 21603 21891 21893 -3 21603 21893 21605 -3 21604 21606 21892 -3 21894 21892 21606 -3 21605 21893 21895 -3 21605 21895 21607 -3 21606 21608 21894 -3 21896 21894 21608 -3 21607 21895 21897 -3 21607 21897 21609 -3 21608 21610 21896 -3 21898 21896 21610 -3 21609 21897 21899 -3 21609 21899 21611 -3 21610 21612 21898 -3 21900 21898 21612 -3 21611 21899 21901 -3 21611 21901 21613 -3 21612 21614 21900 -3 21902 21900 21614 -3 21613 21901 21615 -3 21903 21615 21901 -3 21614 21616 21902 -3 21904 21902 21616 -3 21615 21903 21617 -3 21905 21617 21903 -3 21616 21618 21904 -3 21906 21904 21618 -3 21617 21905 21619 -3 21907 21619 21905 -3 21618 21620 21906 -3 21908 21906 21620 -3 21619 21907 21621 -3 21909 21621 21907 -3 21620 21622 21908 -3 21910 21908 21622 -3 21621 21909 21623 -3 21911 21623 21909 -3 21622 21624 21910 -3 21912 21910 21624 -3 21623 21911 21625 -3 21913 21625 21911 -3 21624 21626 21912 -3 21914 21912 21626 -3 21625 21913 21627 -3 21915 21627 21913 -3 21626 21628 21914 -3 21916 21914 21628 -3 21627 21915 21629 -3 21917 21629 21915 -3 21628 21630 21916 -3 21918 21916 21630 -3 21629 21917 21631 -3 21919 21631 21917 -3 21630 21632 21918 -3 21920 21918 21632 -3 21631 21919 21635 -3 21923 21635 21919 -3 21632 21633 21920 -3 21921 21920 21633 -3 21633 21636 21921 -3 21924 21921 21636 -3 21634 21635 21922 -3 21923 21922 21635 -3 21634 21922 21639 -3 21927 21639 21922 -3 21636 21637 21924 -3 21925 21924 21637 -3 21637 21640 21925 -3 21928 21925 21640 -3 21638 21639 21926 -3 21927 21926 21639 -3 21638 21926 21708 -3 21996 21708 21926 -3 21640 21641 21928 -3 21929 21928 21641 -3 21641 21642 21929 -3 21930 21929 21642 -3 21642 21643 21930 -3 21931 21930 21643 -3 21643 21644 21931 -3 21932 21931 21644 -3 21644 21645 21932 -3 21933 21932 21645 -3 21645 21646 21933 -3 21934 21933 21646 -3 21646 21647 21934 -3 21935 21934 21647 -3 21647 21648 21935 -3 21936 21935 21648 -3 21648 21649 21936 -3 21937 21936 21649 -3 21649 21650 21937 -3 21938 21937 21650 -3 21650 21651 21938 -3 21939 21938 21651 -3 21651 21652 21939 -3 21940 21939 21652 -3 21652 21653 21940 -3 21941 21940 21653 -3 21653 21654 21941 -3 21942 21941 21654 -3 21654 21655 21942 -3 21943 21942 21655 -3 21655 21656 21943 -3 21944 21943 21656 -3 21656 21657 21944 -3 21945 21944 21657 -3 21657 21658 21945 -3 21946 21945 21658 -3 21658 21659 21946 -3 21947 21946 21659 -3 21659 21660 21947 -3 21948 21947 21660 -3 21660 21661 21948 -3 21949 21948 21661 -3 21661 21662 21949 -3 21950 21949 21662 -3 21662 21663 21950 -3 21951 21950 21663 -3 21663 21664 21951 -3 21952 21951 21664 -3 21664 21665 21952 -3 21953 21952 21665 -3 21665 21666 21953 -3 21954 21953 21666 -3 21666 21667 21954 -3 21955 21954 21667 -3 21667 21668 21955 -3 21956 21955 21668 -3 21668 21669 21956 -3 21957 21956 21669 -3 21669 21670 21957 -3 21958 21957 21670 -3 21670 21671 21958 -3 21959 21958 21671 -3 21671 21672 21959 -3 21960 21959 21672 -3 21672 21673 21960 -3 21961 21960 21673 -3 21673 21674 21961 -3 21962 21961 21674 -3 21674 21675 21962 -3 21963 21962 21675 -3 21675 21676 21963 -3 21964 21963 21676 -3 21676 21677 21964 -3 21965 21964 21677 -3 21677 21678 21965 -3 21966 21965 21678 -3 21678 21679 21966 -3 21967 21966 21679 -3 21679 21680 21967 -3 21968 21967 21680 -3 21680 21681 21968 -3 21969 21968 21681 -3 21681 21682 21969 -3 21970 21969 21682 -3 21682 21683 21970 -3 21971 21970 21683 -3 21683 21684 21971 -3 21972 21971 21684 -3 21684 21685 21972 -3 21973 21972 21685 -3 21685 21686 21973 -3 21974 21973 21686 -3 21686 21687 21974 -3 21975 21974 21687 -3 21687 21688 21976 -3 21687 21976 21975 -3 21688 21689 21977 -3 21688 21977 21976 -3 21689 21690 21978 -3 21689 21978 21977 -3 21690 21691 21979 -3 21690 21979 21978 -3 21691 21692 21980 -3 21691 21980 21979 -3 21692 21693 21981 -3 21692 21981 21980 -3 21693 21694 21982 -3 21693 21982 21981 -3 21694 21695 21983 -3 21694 21983 21982 -3 21695 21696 21984 -3 21695 21984 21983 -3 21696 21697 21985 -3 21696 21985 21984 -3 21697 21698 21986 -3 21697 21986 21985 -3 21698 21699 21987 -3 21698 21987 21986 -3 21699 21700 21988 -3 21699 21988 21987 -3 21700 21701 21989 -3 21700 21989 21988 -3 21701 21702 21990 -3 21701 21990 21989 -3 21702 21703 21991 -3 21702 21991 21990 -3 21703 21704 21992 -3 21703 21992 21991 -3 21704 21705 21993 -3 21704 21993 21992 -3 21705 21706 21994 -3 21705 21994 21993 -3 21706 21707 21995 -3 21706 21995 21994 -3 21707 21708 21996 -3 21707 21996 21995 -3 21709 21997 21998 -3 21709 21998 21710 -3 21709 21779 22067 -3 21709 22067 21997 -3 21710 21998 21999 -3 21710 21999 21711 -3 21711 21999 22000 -3 21711 22000 21712 -3 21712 22000 22001 -3 21712 22001 21713 -3 21713 22001 22002 -3 21713 22002 21714 -3 21714 22002 22003 -3 21714 22003 21715 -3 21715 22003 22004 -3 21715 22004 21716 -3 21716 22004 22005 -3 21716 22005 21717 -3 21717 22005 22006 -3 21717 22006 21718 -3 21718 22006 22007 -3 21718 22007 21719 -3 21719 22007 22008 -3 21719 22008 21720 -3 21720 22008 22009 -3 21720 22009 21721 -3 21721 22009 22010 -3 21721 22010 21722 -3 21722 22010 22011 -3 21722 22011 21723 -3 21723 22011 22012 -3 21723 22012 21724 -3 21724 22012 22013 -3 21724 22013 21725 -3 21725 22013 22014 -3 21725 22014 21726 -3 21726 22014 22015 -3 21726 22015 21727 -3 21727 22015 22016 -3 21727 22016 21728 -3 21728 22016 22017 -3 21728 22017 21729 -3 21729 22017 22018 -3 21729 22018 21730 -3 21730 22018 22019 -3 21730 22019 21731 -3 21731 22019 22020 -3 21731 22020 21732 -3 21732 22020 22021 -3 21732 22021 21733 -3 21733 22021 22022 -3 21733 22022 21734 -3 21734 22022 22023 -3 21734 22023 21735 -3 21735 22023 22024 -3 21735 22024 21736 -3 21736 22024 22025 -3 21736 22025 21737 -3 21737 22025 22026 -3 21737 22026 21738 -3 21738 22026 22027 -3 21738 22027 21739 -3 21739 22027 22028 -3 21739 22028 21740 -3 21740 22028 22029 -3 21740 22029 21741 -3 21741 22029 22030 -3 21741 22030 21742 -3 21742 22030 22031 -3 21742 22031 21743 -3 21743 22031 22032 -3 21743 22032 21744 -3 21744 22032 22033 -3 21744 22033 21745 -3 21745 22033 22034 -3 21745 22034 21746 -3 21746 22034 22035 -3 21746 22035 21747 -3 21747 22035 22036 -3 21747 22036 21748 -3 21748 22036 22037 -3 21748 22037 21749 -3 21749 22037 22038 -3 21749 22038 21750 -3 21750 22038 22039 -3 21750 22039 21751 -3 21751 22039 22040 -3 21751 22040 21752 -3 21752 22040 22041 -3 21752 22041 21753 -3 21753 22041 22042 -3 21753 22042 21754 -3 21754 22042 22043 -3 21754 22043 21755 -3 21755 22043 22044 -3 21755 22044 21756 -3 21756 22044 22045 -3 21756 22045 21757 -3 21757 22045 22046 -3 21757 22046 21758 -3 21758 22046 22047 -3 21758 22047 21759 -3 21759 22047 22048 -3 21759 22048 21760 -3 21760 22048 22049 -3 21760 22049 21761 -3 21761 22049 22050 -3 21761 22050 21762 -3 21762 22050 22051 -3 21762 22051 21763 -3 21763 22051 22052 -3 21763 22052 21764 -3 21764 22052 22053 -3 21764 22053 21765 -3 21765 22053 22054 -3 21765 22054 21766 -3 21766 22054 22055 -3 21766 22055 21767 -3 21767 22055 22056 -3 21767 22056 21768 -3 21768 22056 22057 -3 21768 22057 21769 -3 21769 22057 22058 -3 21769 22058 21770 -3 21770 22058 22059 -3 21770 22059 21771 -3 21771 22059 22060 -3 21771 22060 21772 -3 21772 22060 22061 -3 21772 22061 21773 -3 21773 22061 22062 -3 21773 22062 21774 -3 21774 22062 22063 -3 21774 22063 21775 -3 21775 22063 22064 -3 21775 22064 21776 -3 21776 22064 22065 -3 21776 22065 21777 -3 21777 22065 22068 -3 21777 22068 21780 -3 21778 22066 22067 -3 21778 22067 21779 -3 21778 21783 22066 -3 22071 22066 21783 -3 21780 22068 22069 -3 21780 22069 21781 -3 21781 22069 22072 -3 21781 22072 21784 -3 21782 22070 22071 -3 21782 22071 21783 -3 21782 21786 22070 -3 22074 22070 21786 -3 21784 22072 22073 -3 21784 22073 21785 -3 21785 22073 22075 -3 21785 22075 21787 -3 21786 21788 22074 -3 22076 22074 21788 -3 21787 22075 22077 -3 21787 22077 21789 -3 21788 21790 22076 -3 22078 22076 21790 -3 21789 22077 22079 -3 21789 22079 21791 -3 21790 21792 22078 -3 22080 22078 21792 -3 21791 22079 22081 -3 21791 22081 21793 -3 21792 21794 22080 -3 22082 22080 21794 -3 21793 22081 22083 -3 21793 22083 21795 -3 21794 21796 22082 -3 22084 22082 21796 -3 21795 22083 21797 -3 22085 21797 22083 -3 21796 21798 22084 -3 22086 22084 21798 -3 21797 22085 21799 -3 22087 21799 22085 -3 21798 21800 22086 -3 22088 22086 21800 -3 21799 22087 21801 -3 22089 21801 22087 -3 21800 21802 22088 -3 22090 22088 21802 -3 21801 22089 21803 -3 22091 21803 22089 -3 21802 21804 22090 -3 22092 22090 21804 -3 21803 22091 21805 -3 22093 21805 22091 -3 21804 21806 22092 -3 22094 22092 21806 -3 21805 22093 21807 -3 22095 21807 22093 -3 21806 21808 22094 -3 22096 22094 21808 -3 21807 22095 21809 -3 22097 21809 22095 -3 21808 21810 22096 -3 22098 22096 21810 -3 21809 22097 21811 -3 22099 21811 22097 -3 21810 21812 22098 -3 22100 22098 21812 -3 21811 22099 21813 -3 22101 21813 22099 -3 21812 21814 22100 -3 22102 22100 21814 -3 21813 22101 21815 -3 22103 21815 22101 -3 21814 21816 22102 -3 22104 22102 21816 -3 21815 22103 21817 -3 22105 21817 22103 -3 21816 21818 22104 -3 22106 22104 21818 -3 21817 22105 21819 -3 22107 21819 22105 -3 21818 21820 22106 -3 22108 22106 21820 -3 21819 22107 21821 -3 22109 21821 22107 -3 21820 21822 22108 -3 22110 22108 21822 -3 21821 22109 21823 -3 22111 21823 22109 -3 21822 21824 22110 -3 22112 22110 21824 -3 21823 22111 21825 -3 22113 21825 22111 -3 21824 21826 22112 -3 22114 22112 21826 -3 21825 22113 21827 -3 22115 21827 22113 -3 21826 21828 22114 -3 22116 22114 21828 -3 21827 22115 21829 -3 22117 21829 22115 -3 21828 21830 22116 -3 22118 22116 21830 -3 21829 22117 21831 -3 22119 21831 22117 -3 21830 21832 22118 -3 22120 22118 21832 -3 21831 22119 21833 -3 22121 21833 22119 -3 21832 21834 22120 -3 22122 22120 21834 -3 21833 22121 21835 -3 22123 21835 22121 -3 21834 21836 22122 -3 22124 22122 21836 -3 21835 22123 21837 -3 22125 21837 22123 -3 21836 21838 22124 -3 22126 22124 21838 -3 21837 22125 21839 -3 22127 21839 22125 -3 21838 21840 22126 -3 22128 22126 21840 -3 21839 22127 21841 -3 22129 21841 22127 -3 21840 21842 22128 -3 22130 22128 21842 -3 21841 22129 21843 -3 22131 21843 22129 -3 21842 21844 22130 -3 22132 22130 21844 -3 21843 22131 21845 -3 22133 21845 22131 -3 21844 21846 22132 -3 22134 22132 21846 -3 21845 22133 21847 -3 22135 21847 22133 -3 21846 21848 22134 -3 22136 22134 21848 -3 21847 22135 21849 -3 22137 21849 22135 -3 21848 21850 22136 -3 22138 22136 21850 -3 21849 22137 21851 -3 22139 21851 22137 -3 21850 21852 22138 -3 22140 22138 21852 -3 21851 22139 21853 -3 22141 21853 22139 -3 21852 21854 22140 -3 22142 22140 21854 -3 21853 22141 21855 -3 22143 21855 22141 -3 21854 21856 22142 -3 22144 22142 21856 -3 21855 22143 21857 -3 22145 21857 22143 -3 21856 21858 22144 -3 22146 22144 21858 -3 21857 22145 21859 -3 22147 21859 22145 -3 21858 21860 22146 -3 22148 22146 21860 -3 21859 22147 21861 -3 22149 21861 22147 -3 21860 21862 22148 -3 22150 22148 21862 -3 21861 22149 21863 -3 22151 21863 22149 -3 21862 21864 22150 -3 22152 22150 21864 -3 21863 22151 21865 -3 22153 21865 22151 -3 21864 21866 22152 -3 22154 22152 21866 -3 21865 22153 21867 -3 22155 21867 22153 -3 21866 21868 22154 -3 22156 22154 21868 -3 21867 22155 21869 -3 22157 21869 22155 -3 21868 21870 22158 -3 21868 22158 22156 -3 21869 22157 21871 -3 22159 21871 22157 -3 21870 21872 22160 -3 21870 22160 22158 -3 21871 22159 21873 -3 22161 21873 22159 -3 21872 21874 22162 -3 21872 22162 22160 -3 21873 22161 21875 -3 22163 21875 22161 -3 21874 21876 22164 -3 21874 22164 22162 -3 21875 22163 21877 -3 22165 21877 22163 -3 21876 21878 22166 -3 21876 22166 22164 -3 21877 22165 21879 -3 22167 21879 22165 -3 21878 21880 22168 -3 21878 22168 22166 -3 21879 22167 21881 -3 22169 21881 22167 -3 21880 21882 22170 -3 21880 22170 22168 -3 21881 22169 21883 -3 22171 21883 22169 -3 21882 21884 22172 -3 21882 22172 22170 -3 21883 22171 21885 -3 22173 21885 22171 -3 21884 21886 22174 -3 21884 22174 22172 -3 21885 22173 21887 -3 22175 21887 22173 -3 21886 21888 22176 -3 21886 22176 22174 -3 21887 22175 22177 -3 21887 22177 21889 -3 21888 21890 22178 -3 21888 22178 22176 -3 21889 22177 22179 -3 21889 22179 21891 -3 21890 21892 22180 -3 21890 22180 22178 -3 21891 22179 22181 -3 21891 22181 21893 -3 21892 21894 22182 -3 21892 22182 22180 -3 21893 22181 22183 -3 21893 22183 21895 -3 21894 21896 22184 -3 21894 22184 22182 -3 21895 22183 22185 -3 21895 22185 21897 -3 21896 21898 22186 -3 21896 22186 22184 -3 21897 22185 22187 -3 21897 22187 21899 -3 21898 21900 22188 -3 21898 22188 22186 -3 21899 22187 22189 -3 21899 22189 21901 -3 21900 21902 22190 -3 21900 22190 22188 -3 21901 22189 22191 -3 21901 22191 21903 -3 21902 21904 22192 -3 21902 22192 22190 -3 21903 22191 22193 -3 21903 22193 21905 -3 21904 21906 22194 -3 21904 22194 22192 -3 21905 22193 22195 -3 21905 22195 21907 -3 21906 21908 22196 -3 21906 22196 22194 -3 21907 22195 22197 -3 21907 22197 21909 -3 21908 21910 22198 -3 21908 22198 22196 -3 21909 22197 22199 -3 21909 22199 21911 -3 21910 21912 22200 -3 21910 22200 22198 -3 21911 22199 22201 -3 21911 22201 21913 -3 21912 21914 22202 -3 21912 22202 22200 -3 21913 22201 22203 -3 21913 22203 21915 -3 21914 21916 22204 -3 21914 22204 22202 -3 21915 22203 22205 -3 21915 22205 21917 -3 21916 21918 22206 -3 21916 22206 22204 -3 21917 22205 22207 -3 21917 22207 21919 -3 21918 21920 22208 -3 21918 22208 22206 -3 21919 22207 22211 -3 21919 22211 21923 -3 21920 21921 22209 -3 21920 22209 22208 -3 21921 21924 22212 -3 21921 22212 22209 -3 21922 21923 22211 -3 21922 22211 22210 -3 21922 22210 22215 -3 21922 22215 21927 -3 21924 21925 22213 -3 21924 22213 22212 -3 21925 21928 22216 -3 21925 22216 22213 -3 21926 21927 22215 -3 21926 22215 22214 -3 21926 22214 22284 -3 21926 22284 21996 -3 21928 21929 22217 -3 21928 22217 22216 -3 21929 21930 22218 -3 21929 22218 22217 -3 21930 21931 22219 -3 21930 22219 22218 -3 21931 21932 22220 -3 21931 22220 22219 -3 21932 21933 22221 -3 21932 22221 22220 -3 21933 21934 22222 -3 21933 22222 22221 -3 21934 21935 22223 -3 21934 22223 22222 -3 21935 21936 22224 -3 21935 22224 22223 -3 21936 21937 22225 -3 21936 22225 22224 -3 21937 21938 22226 -3 21937 22226 22225 -3 21938 21939 22227 -3 21938 22227 22226 -3 21939 21940 22228 -3 21939 22228 22227 -3 21940 21941 22229 -3 21940 22229 22228 -3 21941 21942 22230 -3 21941 22230 22229 -3 21942 21943 22231 -3 21942 22231 22230 -3 21943 21944 22232 -3 21943 22232 22231 -3 21944 21945 22233 -3 21944 22233 22232 -3 21945 21946 22234 -3 21945 22234 22233 -3 21946 21947 22235 -3 21946 22235 22234 -3 21947 21948 22236 -3 21947 22236 22235 -3 21948 21949 22237 -3 21948 22237 22236 -3 21949 21950 22238 -3 21949 22238 22237 -3 21950 21951 22239 -3 21950 22239 22238 -3 21951 21952 22240 -3 21951 22240 22239 -3 21952 21953 22241 -3 21952 22241 22240 -3 21953 21954 22242 -3 21953 22242 22241 -3 21954 21955 22243 -3 21954 22243 22242 -3 21955 21956 22244 -3 21955 22244 22243 -3 21956 21957 22245 -3 21956 22245 22244 -3 21957 21958 22246 -3 21957 22246 22245 -3 21958 21959 22247 -3 21958 22247 22246 -3 21959 21960 22248 -3 21959 22248 22247 -3 21960 21961 22248 -3 22249 22248 21961 -3 21961 21962 22249 -3 22250 22249 21962 -3 21962 21963 22250 -3 22251 22250 21963 -3 21963 21964 22251 -3 22252 22251 21964 -3 21964 21965 22252 -3 22253 22252 21965 -3 21965 21966 22253 -3 22254 22253 21966 -3 21966 21967 22254 -3 22255 22254 21967 -3 21967 21968 22255 -3 22256 22255 21968 -3 21968 21969 22256 -3 22257 22256 21969 -3 21969 21970 22257 -3 22258 22257 21970 -3 21970 21971 22258 -3 22259 22258 21971 -3 21971 21972 22259 -3 22260 22259 21972 -3 21972 21973 22260 -3 22261 22260 21973 -3 21973 21974 22261 -3 22262 22261 21974 -3 21974 21975 22262 -3 22263 22262 21975 -3 21975 21976 22263 -3 22264 22263 21976 -3 21976 21977 22264 -3 22265 22264 21977 -3 21977 21978 22265 -3 22266 22265 21978 -3 21978 21979 22266 -3 22267 22266 21979 -3 21979 21980 22267 -3 22268 22267 21980 -3 21980 21981 22268 -3 22269 22268 21981 -3 21981 21982 22269 -3 22270 22269 21982 -3 21982 21983 22270 -3 22271 22270 21983 -3 21983 21984 22271 -3 22272 22271 21984 -3 21984 21985 22272 -3 22273 22272 21985 -3 21985 21986 22273 -3 22274 22273 21986 -3 21986 21987 22274 -3 22275 22274 21987 -3 21987 21988 22275 -3 22276 22275 21988 -3 21988 21989 22276 -3 22277 22276 21989 -3 21989 21990 22277 -3 22278 22277 21990 -3 21990 21991 22278 -3 22279 22278 21991 -3 21991 21992 22279 -3 22280 22279 21992 -3 21992 21993 22280 -3 22281 22280 21993 -3 21993 21994 22281 -3 22282 22281 21994 -3 21994 21995 22282 -3 22283 22282 21995 -3 21995 21996 22283 -3 22284 22283 21996 -3 21997 22285 21998 -3 22286 21998 22285 -3 21997 22067 22285 -3 22355 22285 22067 -3 21998 22286 21999 -3 22287 21999 22286 -3 21999 22287 22000 -3 22288 22000 22287 -3 22000 22288 22001 -3 22289 22001 22288 -3 22001 22289 22002 -3 22290 22002 22289 -3 22002 22290 22003 -3 22291 22003 22290 -3 22003 22291 22004 -3 22292 22004 22291 -3 22004 22292 22005 -3 22293 22005 22292 -3 22005 22293 22006 -3 22294 22006 22293 -3 22006 22294 22007 -3 22295 22007 22294 -3 22007 22295 22008 -3 22296 22008 22295 -3 22008 22296 22009 -3 22297 22009 22296 -3 22009 22297 22010 -3 22298 22010 22297 -3 22010 22298 22011 -3 22299 22011 22298 -3 22011 22299 22012 -3 22300 22012 22299 -3 22012 22300 22013 -3 22301 22013 22300 -3 22013 22301 22014 -3 22302 22014 22301 -3 22014 22302 22015 -3 22303 22015 22302 -3 22015 22303 22016 -3 22304 22016 22303 -3 22016 22304 22017 -3 22305 22017 22304 -3 22017 22305 22018 -3 22306 22018 22305 -3 22018 22306 22019 -3 22307 22019 22306 -3 22019 22307 22020 -3 22308 22020 22307 -3 22020 22308 22021 -3 22309 22021 22308 -3 22021 22309 22022 -3 22310 22022 22309 -3 22022 22310 22023 -3 22311 22023 22310 -3 22023 22311 22024 -3 22312 22024 22311 -3 22024 22312 22025 -3 22313 22025 22312 -3 22025 22313 22026 -3 22314 22026 22313 -3 22026 22314 22027 -3 22315 22027 22314 -3 22027 22315 22028 -3 22316 22028 22315 -3 22028 22316 22029 -3 22317 22029 22316 -3 22029 22317 22030 -3 22318 22030 22317 -3 22030 22318 22031 -3 22319 22031 22318 -3 22031 22319 22032 -3 22320 22032 22319 -3 22032 22320 22033 -3 22321 22033 22320 -3 22033 22321 22034 -3 22322 22034 22321 -3 22034 22322 22035 -3 22323 22035 22322 -3 22035 22323 22036 -3 22324 22036 22323 -3 22036 22324 22037 -3 22325 22037 22324 -3 22037 22325 22038 -3 22326 22038 22325 -3 22038 22326 22039 -3 22327 22039 22326 -3 22039 22327 22040 -3 22328 22040 22327 -3 22040 22328 22041 -3 22329 22041 22328 -3 22041 22329 22042 -3 22330 22042 22329 -3 22042 22330 22043 -3 22331 22043 22330 -3 22043 22331 22044 -3 22332 22044 22331 -3 22044 22332 22045 -3 22333 22045 22332 -3 22045 22333 22046 -3 22334 22046 22333 -3 22046 22334 22047 -3 22335 22047 22334 -3 22047 22335 22048 -3 22336 22048 22335 -3 22048 22336 22049 -3 22337 22049 22336 -3 22049 22337 22050 -3 22338 22050 22337 -3 22050 22338 22051 -3 22339 22051 22338 -3 22051 22339 22052 -3 22340 22052 22339 -3 22052 22340 22053 -3 22341 22053 22340 -3 22053 22341 22054 -3 22342 22054 22341 -3 22054 22342 22055 -3 22343 22055 22342 -3 22055 22343 22056 -3 22344 22056 22343 -3 22056 22344 22057 -3 22345 22057 22344 -3 22057 22345 22058 -3 22346 22058 22345 -3 22058 22346 22059 -3 22347 22059 22346 -3 22059 22347 22060 -3 22348 22060 22347 -3 22060 22348 22061 -3 22349 22061 22348 -3 22061 22349 22062 -3 22350 22062 22349 -3 22062 22350 22063 -3 22351 22063 22350 -3 22063 22351 22064 -3 22352 22064 22351 -3 22064 22352 22065 -3 22353 22065 22352 -3 22065 22353 22068 -3 22356 22068 22353 -3 22066 22354 22067 -3 22355 22067 22354 -3 22066 22071 22359 -3 22066 22359 22354 -3 22068 22356 22069 -3 22357 22069 22356 -3 22069 22357 22360 -3 22069 22360 22072 -3 22070 22358 22359 -3 22070 22359 22071 -3 22070 22074 22362 -3 22070 22362 22358 -3 22072 22360 22361 -3 22072 22361 22073 -3 22073 22361 22363 -3 22073 22363 22075 -3 22074 22076 22364 -3 22074 22364 22362 -3 22075 22363 22365 -3 22075 22365 22077 -3 22076 22078 22366 -3 22076 22366 22364 -3 22077 22365 22367 -3 22077 22367 22079 -3 22078 22080 22368 -3 22078 22368 22366 -3 22079 22367 22369 -3 22079 22369 22081 -3 22080 22082 22370 -3 22080 22370 22368 -3 22081 22369 22371 -3 22081 22371 22083 -3 22082 22084 22372 -3 22082 22372 22370 -3 22083 22371 22373 -3 22083 22373 22085 -3 22084 22086 22374 -3 22084 22374 22372 -3 22085 22373 22375 -3 22085 22375 22087 -3 22086 22088 22376 -3 22086 22376 22374 -3 22087 22375 22377 -3 22087 22377 22089 -3 22088 22090 22378 -3 22088 22378 22376 -3 22089 22377 22379 -3 22089 22379 22091 -3 22090 22092 22380 -3 22090 22380 22378 -3 22091 22379 22381 -3 22091 22381 22093 -3 22092 22094 22382 -3 22092 22382 22380 -3 22093 22381 22383 -3 22093 22383 22095 -3 22094 22096 22384 -3 22094 22384 22382 -3 22095 22383 22385 -3 22095 22385 22097 -3 22096 22098 22386 -3 22096 22386 22384 -3 22097 22385 22387 -3 22097 22387 22099 -3 22098 22100 22388 -3 22098 22388 22386 -3 22099 22387 22389 -3 22099 22389 22101 -3 22100 22102 22390 -3 22100 22390 22388 -3 22101 22389 22391 -3 22101 22391 22103 -3 22102 22104 22392 -3 22102 22392 22390 -3 22103 22391 22393 -3 22103 22393 22105 -3 22104 22106 22394 -3 22104 22394 22392 -3 22105 22393 22395 -3 22105 22395 22107 -3 22106 22108 22396 -3 22106 22396 22394 -3 22107 22395 22397 -3 22107 22397 22109 -3 22108 22110 22398 -3 22108 22398 22396 -3 22109 22397 22399 -3 22109 22399 22111 -3 22110 22112 22400 -3 22110 22400 22398 -3 22111 22399 22401 -3 22111 22401 22113 -3 22112 22114 22402 -3 22112 22402 22400 -3 22113 22401 22403 -3 22113 22403 22115 -3 22114 22116 22404 -3 22114 22404 22402 -3 22115 22403 22405 -3 22115 22405 22117 -3 22116 22118 22406 -3 22116 22406 22404 -3 22117 22405 22407 -3 22117 22407 22119 -3 22118 22120 22408 -3 22118 22408 22406 -3 22119 22407 22409 -3 22119 22409 22121 -3 22120 22122 22410 -3 22120 22410 22408 -3 22121 22409 22411 -3 22121 22411 22123 -3 22122 22124 22412 -3 22122 22412 22410 -3 22123 22411 22413 -3 22123 22413 22125 -3 22124 22126 22414 -3 22124 22414 22412 -3 22125 22413 22415 -3 22125 22415 22127 -3 22126 22128 22416 -3 22126 22416 22414 -3 22127 22415 22417 -3 22127 22417 22129 -3 22128 22130 22418 -3 22128 22418 22416 -3 22129 22417 22419 -3 22129 22419 22131 -3 22130 22132 22420 -3 22130 22420 22418 -3 22131 22419 22421 -3 22131 22421 22133 -3 22132 22134 22422 -3 22132 22422 22420 -3 22133 22421 22423 -3 22133 22423 22135 -3 22134 22136 22424 -3 22134 22424 22422 -3 22135 22423 22425 -3 22135 22425 22137 -3 22136 22138 22426 -3 22136 22426 22424 -3 22137 22425 22427 -3 22137 22427 22139 -3 22138 22140 22428 -3 22138 22428 22426 -3 22139 22427 22429 -3 22139 22429 22141 -3 22140 22142 22430 -3 22140 22430 22428 -3 22141 22429 22431 -3 22141 22431 22143 -3 22142 22144 22430 -3 22432 22430 22144 -3 22143 22431 22433 -3 22143 22433 22145 -3 22144 22146 22432 -3 22434 22432 22146 -3 22145 22433 22435 -3 22145 22435 22147 -3 22146 22148 22434 -3 22436 22434 22148 -3 22147 22435 22437 -3 22147 22437 22149 -3 22148 22150 22436 -3 22438 22436 22150 -3 22149 22437 22439 -3 22149 22439 22151 -3 22150 22152 22438 -3 22440 22438 22152 -3 22151 22439 22441 -3 22151 22441 22153 -3 22152 22154 22440 -3 22442 22440 22154 -3 22153 22441 22443 -3 22153 22443 22155 -3 22154 22156 22442 -3 22444 22442 22156 -3 22155 22443 22445 -3 22155 22445 22157 -3 22156 22158 22444 -3 22446 22444 22158 -3 22157 22445 22447 -3 22157 22447 22159 -3 22158 22160 22446 -3 22448 22446 22160 -3 22159 22447 22449 -3 22159 22449 22161 -3 22160 22162 22448 -3 22450 22448 22162 -3 22161 22449 22163 -3 22451 22163 22449 -3 22162 22164 22450 -3 22452 22450 22164 -3 22163 22451 22165 -3 22453 22165 22451 -3 22164 22166 22452 -3 22454 22452 22166 -3 22165 22453 22167 -3 22455 22167 22453 -3 22166 22168 22454 -3 22456 22454 22168 -3 22167 22455 22169 -3 22457 22169 22455 -3 22168 22170 22456 -3 22458 22456 22170 -3 22169 22457 22171 -3 22459 22171 22457 -3 22170 22172 22458 -3 22460 22458 22172 -3 22171 22459 22173 -3 22461 22173 22459 -3 22172 22174 22460 -3 22462 22460 22174 -3 22173 22461 22175 -3 22463 22175 22461 -3 22174 22176 22462 -3 22464 22462 22176 -3 22175 22463 22177 -3 22465 22177 22463 -3 22176 22178 22464 -3 22466 22464 22178 -3 22177 22465 22179 -3 22467 22179 22465 -3 22178 22180 22466 -3 22468 22466 22180 -3 22179 22467 22181 -3 22469 22181 22467 -3 22180 22182 22468 -3 22470 22468 22182 -3 22181 22469 22183 -3 22471 22183 22469 -3 22182 22184 22470 -3 22472 22470 22184 -3 22183 22471 22185 -3 22473 22185 22471 -3 22184 22186 22472 -3 22474 22472 22186 -3 22185 22473 22187 -3 22475 22187 22473 -3 22186 22188 22474 -3 22476 22474 22188 -3 22187 22475 22189 -3 22477 22189 22475 -3 22188 22190 22476 -3 22478 22476 22190 -3 22189 22477 22191 -3 22479 22191 22477 -3 22190 22192 22478 -3 22480 22478 22192 -3 22191 22479 22193 -3 22481 22193 22479 -3 22192 22194 22480 -3 22482 22480 22194 -3 22193 22481 22195 -3 22483 22195 22481 -3 22194 22196 22482 -3 22484 22482 22196 -3 22195 22483 22197 -3 22485 22197 22483 -3 22196 22198 22484 -3 22486 22484 22198 -3 22197 22485 22199 -3 22487 22199 22485 -3 22198 22200 22486 -3 22488 22486 22200 -3 22199 22487 22201 -3 22489 22201 22487 -3 22200 22202 22488 -3 22490 22488 22202 -3 22201 22489 22203 -3 22491 22203 22489 -3 22202 22204 22490 -3 22492 22490 22204 -3 22203 22491 22205 -3 22493 22205 22491 -3 22204 22206 22492 -3 22494 22492 22206 -3 22205 22493 22207 -3 22495 22207 22493 -3 22206 22208 22494 -3 22496 22494 22208 -3 22207 22495 22211 -3 22499 22211 22495 -3 22208 22209 22496 -3 22497 22496 22209 -3 22209 22212 22497 -3 22500 22497 22212 -3 22210 22211 22498 -3 22499 22498 22211 -3 22210 22498 22215 -3 22503 22215 22498 -3 22212 22213 22500 -3 22501 22500 22213 -3 22213 22216 22501 -3 22504 22501 22216 -3 22214 22215 22502 -3 22503 22502 22215 -3 22214 22502 22284 -3 22572 22284 22502 -3 22216 22217 22504 -3 22505 22504 22217 -3 22217 22218 22505 -3 22506 22505 22218 -3 22218 22219 22506 -3 22507 22506 22219 -3 22219 22220 22507 -3 22508 22507 22220 -3 22220 22221 22508 -3 22509 22508 22221 -3 22221 22222 22509 -3 22510 22509 22222 -3 22222 22223 22510 -3 22511 22510 22223 -3 22223 22224 22511 -3 22512 22511 22224 -3 22224 22225 22512 -3 22513 22512 22225 -3 22225 22226 22513 -3 22514 22513 22226 -3 22226 22227 22514 -3 22515 22514 22227 -3 22227 22228 22515 -3 22516 22515 22228 -3 22228 22229 22516 -3 22517 22516 22229 -3 22229 22230 22517 -3 22518 22517 22230 -3 22230 22231 22518 -3 22519 22518 22231 -3 22231 22232 22519 -3 22520 22519 22232 -3 22232 22233 22520 -3 22521 22520 22233 -3 22233 22234 22521 -3 22522 22521 22234 -3 22234 22235 22523 -3 22234 22523 22522 -3 22235 22236 22524 -3 22235 22524 22523 -3 22236 22237 22525 -3 22236 22525 22524 -3 22237 22238 22526 -3 22237 22526 22525 -3 22238 22239 22527 -3 22238 22527 22526 -3 22239 22240 22528 -3 22239 22528 22527 -3 22240 22241 22529 -3 22240 22529 22528 -3 22241 22242 22530 -3 22241 22530 22529 -3 22242 22243 22531 -3 22242 22531 22530 -3 22243 22244 22532 -3 22243 22532 22531 -3 22244 22245 22533 -3 22244 22533 22532 -3 22245 22246 22534 -3 22245 22534 22533 -3 22246 22247 22535 -3 22246 22535 22534 -3 22247 22248 22536 -3 22247 22536 22535 -3 22248 22249 22537 -3 22248 22537 22536 -3 22249 22250 22538 -3 22249 22538 22537 -3 22250 22251 22539 -3 22250 22539 22538 -3 22251 22252 22540 -3 22251 22540 22539 -3 22252 22253 22541 -3 22252 22541 22540 -3 22253 22254 22542 -3 22253 22542 22541 -3 22254 22255 22543 -3 22254 22543 22542 -3 22255 22256 22544 -3 22255 22544 22543 -3 22256 22257 22545 -3 22256 22545 22544 -3 22257 22258 22546 -3 22257 22546 22545 -3 22258 22259 22547 -3 22258 22547 22546 -3 22259 22260 22548 -3 22259 22548 22547 -3 22260 22261 22549 -3 22260 22549 22548 -3 22261 22262 22550 -3 22261 22550 22549 -3 22262 22263 22551 -3 22262 22551 22550 -3 22263 22264 22552 -3 22263 22552 22551 -3 22264 22265 22553 -3 22264 22553 22552 -3 22265 22266 22554 -3 22265 22554 22553 -3 22266 22267 22555 -3 22266 22555 22554 -3 22267 22268 22556 -3 22267 22556 22555 -3 22268 22269 22557 -3 22268 22557 22556 -3 22269 22270 22558 -3 22269 22558 22557 -3 22270 22271 22559 -3 22270 22559 22558 -3 22271 22272 22560 -3 22271 22560 22559 -3 22272 22273 22561 -3 22272 22561 22560 -3 22273 22274 22562 -3 22273 22562 22561 -3 22274 22275 22563 -3 22274 22563 22562 -3 22275 22276 22564 -3 22275 22564 22563 -3 22276 22277 22565 -3 22276 22565 22564 -3 22277 22278 22566 -3 22277 22566 22565 -3 22278 22279 22567 -3 22278 22567 22566 -3 22279 22280 22568 -3 22279 22568 22567 -3 22280 22281 22569 -3 22280 22569 22568 -3 22281 22282 22570 -3 22281 22570 22569 -3 22282 22283 22571 -3 22282 22571 22570 -3 22283 22284 22572 -3 22283 22572 22571 -3 22285 22573 22574 -3 22285 22574 22286 -3 22285 22355 22643 -3 22285 22643 22573 -3 22286 22574 22575 -3 22286 22575 22287 -3 22287 22575 22576 -3 22287 22576 22288 -3 22288 22576 22577 -3 22288 22577 22289 -3 22289 22577 22578 -3 22289 22578 22290 -3 22290 22578 22579 -3 22290 22579 22291 -3 22291 22579 22580 -3 22291 22580 22292 -3 22292 22580 22581 -3 22292 22581 22293 -3 22293 22581 22582 -3 22293 22582 22294 -3 22294 22582 22583 -3 22294 22583 22295 -3 22295 22583 22584 -3 22295 22584 22296 -3 22296 22584 22585 -3 22296 22585 22297 -3 22297 22585 22586 -3 22297 22586 22298 -3 22298 22586 22587 -3 22298 22587 22299 -3 22299 22587 22588 -3 22299 22588 22300 -3 22300 22588 22589 -3 22300 22589 22301 -3 22301 22589 22590 -3 22301 22590 22302 -3 22302 22590 22591 -3 22302 22591 22303 -3 22303 22591 22592 -3 22303 22592 22304 -3 22304 22592 22593 -3 22304 22593 22305 -3 22305 22593 22594 -3 22305 22594 22306 -3 22306 22594 22595 -3 22306 22595 22307 -3 22307 22595 22596 -3 22307 22596 22308 -3 22308 22596 22597 -3 22308 22597 22309 -3 22309 22597 22598 -3 22309 22598 22310 -3 22310 22598 22599 -3 22310 22599 22311 -3 22311 22599 22600 -3 22311 22600 22312 -3 22312 22600 22601 -3 22312 22601 22313 -3 22313 22601 22602 -3 22313 22602 22314 -3 22314 22602 22603 -3 22314 22603 22315 -3 22315 22603 22604 -3 22315 22604 22316 -3 22316 22604 22605 -3 22316 22605 22317 -3 22317 22605 22606 -3 22317 22606 22318 -3 22318 22606 22607 -3 22318 22607 22319 -3 22319 22607 22608 -3 22319 22608 22320 -3 22320 22608 22609 -3 22320 22609 22321 -3 22321 22609 22610 -3 22321 22610 22322 -3 22322 22610 22611 -3 22322 22611 22323 -3 22323 22611 22612 -3 22323 22612 22324 -3 22324 22612 22613 -3 22324 22613 22325 -3 22325 22613 22614 -3 22325 22614 22326 -3 22326 22614 22615 -3 22326 22615 22327 -3 22327 22615 22616 -3 22327 22616 22328 -3 22328 22616 22617 -3 22328 22617 22329 -3 22329 22617 22618 -3 22329 22618 22330 -3 22330 22618 22619 -3 22330 22619 22331 -3 22331 22619 22620 -3 22331 22620 22332 -3 22332 22620 22621 -3 22332 22621 22333 -3 22333 22621 22622 -3 22333 22622 22334 -3 22334 22622 22623 -3 22334 22623 22335 -3 22335 22623 22624 -3 22335 22624 22336 -3 22336 22624 22625 -3 22336 22625 22337 -3 22337 22625 22626 -3 22337 22626 22338 -3 22338 22626 22627 -3 22338 22627 22339 -3 22339 22627 22628 -3 22339 22628 22340 -3 22340 22628 22629 -3 22340 22629 22341 -3 22341 22629 22630 -3 22341 22630 22342 -3 22342 22630 22631 -3 22342 22631 22343 -3 22343 22631 22632 -3 22343 22632 22344 -3 22344 22632 22633 -3 22344 22633 22345 -3 22345 22633 22634 -3 22345 22634 22346 -3 22346 22634 22347 -3 22635 22347 22634 -3 22347 22635 22348 -3 22636 22348 22635 -3 22348 22636 22349 -3 22637 22349 22636 -3 22349 22637 22350 -3 22638 22350 22637 -3 22350 22638 22351 -3 22639 22351 22638 -3 22351 22639 22352 -3 22640 22352 22639 -3 22352 22640 22353 -3 22641 22353 22640 -3 22353 22641 22356 -3 22644 22356 22641 -3 22354 22642 22355 -3 22643 22355 22642 -3 22354 22359 22642 -3 22647 22642 22359 -3 22356 22644 22357 -3 22645 22357 22644 -3 22357 22645 22360 -3 22648 22360 22645 -3 22358 22646 22359 -3 22647 22359 22646 -3 22358 22362 22646 -3 22650 22646 22362 -3 22360 22648 22361 -3 22649 22361 22648 -3 22361 22649 22363 -3 22651 22363 22649 -3 22362 22364 22650 -3 22652 22650 22364 -3 22363 22651 22365 -3 22653 22365 22651 -3 22364 22366 22652 -3 22654 22652 22366 -3 22365 22653 22367 -3 22655 22367 22653 -3 22366 22368 22654 -3 22656 22654 22368 -3 22367 22655 22369 -3 22657 22369 22655 -3 22368 22370 22656 -3 22658 22656 22370 -3 22369 22657 22371 -3 22659 22371 22657 -3 22370 22372 22658 -3 22660 22658 22372 -3 22371 22659 22373 -3 22661 22373 22659 -3 22372 22374 22660 -3 22662 22660 22374 -3 22373 22661 22375 -3 22663 22375 22661 -3 22374 22376 22662 -3 22664 22662 22376 -3 22375 22663 22377 -3 22665 22377 22663 -3 22376 22378 22664 -3 22666 22664 22378 -3 22377 22665 22379 -3 22667 22379 22665 -3 22378 22380 22666 -3 22668 22666 22380 -3 22379 22667 22381 -3 22669 22381 22667 -3 22380 22382 22668 -3 22670 22668 22382 -3 22381 22669 22383 -3 22671 22383 22669 -3 22382 22384 22670 -3 22672 22670 22384 -3 22383 22671 22385 -3 22673 22385 22671 -3 22384 22386 22672 -3 22674 22672 22386 -3 22385 22673 22387 -3 22675 22387 22673 -3 22386 22388 22674 -3 22676 22674 22388 -3 22387 22675 22389 -3 22677 22389 22675 -3 22388 22390 22676 -3 22678 22676 22390 -3 22389 22677 22391 -3 22679 22391 22677 -3 22390 22392 22678 -3 22680 22678 22392 -3 22391 22679 22393 -3 22681 22393 22679 -3 22392 22394 22680 -3 22682 22680 22394 -3 22393 22681 22395 -3 22683 22395 22681 -3 22394 22396 22682 -3 22684 22682 22396 -3 22395 22683 22397 -3 22685 22397 22683 -3 22396 22398 22684 -3 22686 22684 22398 -3 22397 22685 22399 -3 22687 22399 22685 -3 22398 22400 22686 -3 22688 22686 22400 -3 22399 22687 22401 -3 22689 22401 22687 -3 22400 22402 22688 -3 22690 22688 22402 -3 22401 22689 22403 -3 22691 22403 22689 -3 22402 22404 22690 -3 22692 22690 22404 -3 22403 22691 22405 -3 22693 22405 22691 -3 22404 22406 22692 -3 22694 22692 22406 -3 22405 22693 22407 -3 22695 22407 22693 -3 22406 22408 22694 -3 22696 22694 22408 -3 22407 22695 22409 -3 22697 22409 22695 -3 22408 22410 22696 -3 22698 22696 22410 -3 22409 22697 22411 -3 22699 22411 22697 -3 22410 22412 22698 -3 22700 22698 22412 -3 22411 22699 22413 -3 22701 22413 22699 -3 22412 22414 22700 -3 22702 22700 22414 -3 22413 22701 22415 -3 22703 22415 22701 -3 22414 22416 22702 -3 22704 22702 22416 -3 22415 22703 22417 -3 22705 22417 22703 -3 22416 22418 22704 -3 22706 22704 22418 -3 22417 22705 22419 -3 22707 22419 22705 -3 22418 22420 22708 -3 22418 22708 22706 -3 22419 22707 22421 -3 22709 22421 22707 -3 22420 22422 22710 -3 22420 22710 22708 -3 22421 22709 22423 -3 22711 22423 22709 -3 22422 22424 22712 -3 22422 22712 22710 -3 22423 22711 22425 -3 22713 22425 22711 -3 22424 22426 22714 -3 22424 22714 22712 -3 22425 22713 22427 -3 22715 22427 22713 -3 22426 22428 22716 -3 22426 22716 22714 -3 22427 22715 22429 -3 22717 22429 22715 -3 22428 22430 22718 -3 22428 22718 22716 -3 22429 22717 22431 -3 22719 22431 22717 -3 22430 22432 22720 -3 22430 22720 22718 -3 22431 22719 22433 -3 22721 22433 22719 -3 22432 22434 22722 -3 22432 22722 22720 -3 22433 22721 22435 -3 22723 22435 22721 -3 22434 22436 22724 -3 22434 22724 22722 -3 22435 22723 22437 -3 22725 22437 22723 -3 22436 22438 22726 -3 22436 22726 22724 -3 22437 22725 22439 -3 22727 22439 22725 -3 22438 22440 22728 -3 22438 22728 22726 -3 22439 22727 22729 -3 22439 22729 22441 -3 22440 22442 22730 -3 22440 22730 22728 -3 22441 22729 22731 -3 22441 22731 22443 -3 22442 22444 22732 -3 22442 22732 22730 -3 22443 22731 22733 -3 22443 22733 22445 -3 22444 22446 22734 -3 22444 22734 22732 -3 22445 22733 22735 -3 22445 22735 22447 -3 22446 22448 22736 -3 22446 22736 22734 -3 22447 22735 22737 -3 22447 22737 22449 -3 22448 22450 22738 -3 22448 22738 22736 -3 22449 22737 22739 -3 22449 22739 22451 -3 22450 22452 22740 -3 22450 22740 22738 -3 22451 22739 22741 -3 22451 22741 22453 -3 22452 22454 22742 -3 22452 22742 22740 -3 22453 22741 22743 -3 22453 22743 22455 -3 22454 22456 22744 -3 22454 22744 22742 -3 22455 22743 22745 -3 22455 22745 22457 -3 22456 22458 22746 -3 22456 22746 22744 -3 22457 22745 22747 -3 22457 22747 22459 -3 22458 22460 22748 -3 22458 22748 22746 -3 22459 22747 22749 -3 22459 22749 22461 -3 22460 22462 22750 -3 22460 22750 22748 -3 22461 22749 22751 -3 22461 22751 22463 -3 22462 22464 22752 -3 22462 22752 22750 -3 22463 22751 22753 -3 22463 22753 22465 -3 22464 22466 22754 -3 22464 22754 22752 -3 22465 22753 22755 -3 22465 22755 22467 -3 22466 22468 22756 -3 22466 22756 22754 -3 22467 22755 22757 -3 22467 22757 22469 -3 22468 22470 22758 -3 22468 22758 22756 -3 22469 22757 22759 -3 22469 22759 22471 -3 22470 22472 22760 -3 22470 22760 22758 -3 22471 22759 22761 -3 22471 22761 22473 -3 22472 22474 22762 -3 22472 22762 22760 -3 22473 22761 22763 -3 22473 22763 22475 -3 22474 22476 22764 -3 22474 22764 22762 -3 22475 22763 22765 -3 22475 22765 22477 -3 22476 22478 22766 -3 22476 22766 22764 -3 22477 22765 22767 -3 22477 22767 22479 -3 22478 22480 22768 -3 22478 22768 22766 -3 22479 22767 22769 -3 22479 22769 22481 -3 22480 22482 22770 -3 22480 22770 22768 -3 22481 22769 22771 -3 22481 22771 22483 -3 22482 22484 22772 -3 22482 22772 22770 -3 22483 22771 22773 -3 22483 22773 22485 -3 22484 22486 22774 -3 22484 22774 22772 -3 22485 22773 22775 -3 22485 22775 22487 -3 22486 22488 22776 -3 22486 22776 22774 -3 22487 22775 22777 -3 22487 22777 22489 -3 22488 22490 22778 -3 22488 22778 22776 -3 22489 22777 22779 -3 22489 22779 22491 -3 22490 22492 22780 -3 22490 22780 22778 -3 22491 22779 22781 -3 22491 22781 22493 -3 22492 22494 22782 -3 22492 22782 22780 -3 22493 22781 22783 -3 22493 22783 22495 -3 22494 22496 22784 -3 22494 22784 22782 -3 22495 22783 22787 -3 22495 22787 22499 -3 22496 22497 22785 -3 22496 22785 22784 -3 22497 22500 22788 -3 22497 22788 22785 -3 22498 22499 22787 -3 22498 22787 22786 -3 22498 22786 22791 -3 22498 22791 22503 -3 22500 22501 22789 -3 22500 22789 22788 -3 22501 22504 22792 -3 22501 22792 22789 -3 22502 22503 22791 -3 22502 22791 22790 -3 22502 22790 22860 -3 22502 22860 22572 -3 22504 22505 22793 -3 22504 22793 22792 -3 22505 22506 22794 -3 22505 22794 22793 -3 22506 22507 22795 -3 22506 22795 22794 -3 22507 22508 22796 -3 22507 22796 22795 -3 22508 22509 22797 -3 22508 22797 22796 -3 22509 22510 22798 -3 22509 22798 22797 -3 22510 22511 22799 -3 22510 22799 22798 -3 22511 22512 22799 -3 22800 22799 22512 -3 22512 22513 22800 -3 22801 22800 22513 -3 22513 22514 22801 -3 22802 22801 22514 -3 22514 22515 22802 -3 22803 22802 22515 -3 22515 22516 22803 -3 22804 22803 22516 -3 22516 22517 22804 -3 22805 22804 22517 -3 22517 22518 22805 -3 22806 22805 22518 -3 22518 22519 22806 -3 22807 22806 22519 -3 22519 22520 22807 -3 22808 22807 22520 -3 22520 22521 22808 -3 22809 22808 22521 -3 22521 22522 22809 -3 22810 22809 22522 -3 22522 22523 22810 -3 22811 22810 22523 -3 22523 22524 22811 -3 22812 22811 22524 -3 22524 22525 22812 -3 22813 22812 22525 -3 22525 22526 22813 -3 22814 22813 22526 -3 22526 22527 22814 -3 22815 22814 22527 -3 22527 22528 22815 -3 22816 22815 22528 -3 22528 22529 22816 -3 22817 22816 22529 -3 22529 22530 22817 -3 22818 22817 22530 -3 22530 22531 22818 -3 22819 22818 22531 -3 22531 22532 22819 -3 22820 22819 22532 -3 22532 22533 22820 -3 22821 22820 22533 -3 22533 22534 22821 -3 22822 22821 22534 -3 22534 22535 22822 -3 22823 22822 22535 -3 22535 22536 22823 -3 22824 22823 22536 -3 22536 22537 22824 -3 22825 22824 22537 -3 22537 22538 22825 -3 22826 22825 22538 -3 22538 22539 22826 -3 22827 22826 22539 -3 22539 22540 22827 -3 22828 22827 22540 -3 22540 22541 22828 -3 22829 22828 22541 -3 22541 22542 22829 -3 22830 22829 22542 -3 22542 22543 22830 -3 22831 22830 22543 -3 22543 22544 22831 -3 22832 22831 22544 -3 22544 22545 22832 -3 22833 22832 22545 -3 22545 22546 22833 -3 22834 22833 22546 -3 22546 22547 22834 -3 22835 22834 22547 -3 22547 22548 22835 -3 22836 22835 22548 -3 22548 22549 22836 -3 22837 22836 22549 -3 22549 22550 22837 -3 22838 22837 22550 -3 22550 22551 22838 -3 22839 22838 22551 -3 22551 22552 22839 -3 22840 22839 22552 -3 22552 22553 22840 -3 22841 22840 22553 -3 22553 22554 22841 -3 22842 22841 22554 -3 22554 22555 22842 -3 22843 22842 22555 -3 22555 22556 22843 -3 22844 22843 22556 -3 22556 22557 22844 -3 22845 22844 22557 -3 22557 22558 22845 -3 22846 22845 22558 -3 22558 22559 22846 -3 22847 22846 22559 -3 22559 22560 22847 -3 22848 22847 22560 -3 22560 22561 22848 -3 22849 22848 22561 -3 22561 22562 22849 -3 22850 22849 22562 -3 22562 22563 22850 -3 22851 22850 22563 -3 22563 22564 22851 -3 22852 22851 22564 -3 22564 22565 22852 -3 22853 22852 22565 -3 22565 22566 22853 -3 22854 22853 22566 -3 22566 22567 22854 -3 22855 22854 22567 -3 22567 22568 22855 -3 22856 22855 22568 -3 22568 22569 22856 -3 22857 22856 22569 -3 22569 22570 22857 -3 22858 22857 22570 -3 22570 22571 22858 -3 22859 22858 22571 -3 22571 22572 22859 -3 22860 22859 22572 -3 22573 22861 22574 -3 22862 22574 22861 -3 22573 22643 22931 -3 22573 22931 22861 -3 22574 22862 22575 -3 22863 22575 22862 -3 22575 22863 22576 -3 22864 22576 22863 -3 22576 22864 22577 -3 22865 22577 22864 -3 22577 22865 22578 -3 22866 22578 22865 -3 22578 22866 22579 -3 22867 22579 22866 -3 22579 22867 22580 -3 22868 22580 22867 -3 22580 22868 22581 -3 22869 22581 22868 -3 22581 22869 22582 -3 22870 22582 22869 -3 22582 22870 22583 -3 22871 22583 22870 -3 22583 22871 22584 -3 22872 22584 22871 -3 22584 22872 22585 -3 22873 22585 22872 -3 22585 22873 22586 -3 22874 22586 22873 -3 22586 22874 22587 -3 22875 22587 22874 -3 22587 22875 22588 -3 22876 22588 22875 -3 22588 22876 22589 -3 22877 22589 22876 -3 22589 22877 22590 -3 22878 22590 22877 -3 22590 22878 22591 -3 22879 22591 22878 -3 22591 22879 22592 -3 22880 22592 22879 -3 22592 22880 22593 -3 22881 22593 22880 -3 22593 22881 22594 -3 22882 22594 22881 -3 22594 22882 22595 -3 22883 22595 22882 -3 22595 22883 22596 -3 22884 22596 22883 -3 22596 22884 22597 -3 22885 22597 22884 -3 22597 22885 22598 -3 22886 22598 22885 -3 22598 22886 22599 -3 22887 22599 22886 -3 22599 22887 22600 -3 22888 22600 22887 -3 22600 22888 22601 -3 22889 22601 22888 -3 22601 22889 22602 -3 22890 22602 22889 -3 22602 22890 22603 -3 22891 22603 22890 -3 22603 22891 22604 -3 22892 22604 22891 -3 22604 22892 22605 -3 22893 22605 22892 -3 22605 22893 22606 -3 22894 22606 22893 -3 22606 22894 22607 -3 22895 22607 22894 -3 22607 22895 22608 -3 22896 22608 22895 -3 22608 22896 22609 -3 22897 22609 22896 -3 22609 22897 22610 -3 22898 22610 22897 -3 22610 22898 22611 -3 22899 22611 22898 -3 22611 22899 22612 -3 22900 22612 22899 -3 22612 22900 22613 -3 22901 22613 22900 -3 22613 22901 22614 -3 22902 22614 22901 -3 22614 22902 22615 -3 22903 22615 22902 -3 22615 22903 22616 -3 22904 22616 22903 -3 22616 22904 22617 -3 22905 22617 22904 -3 22617 22905 22618 -3 22906 22618 22905 -3 22618 22906 22619 -3 22907 22619 22906 -3 22619 22907 22620 -3 22908 22620 22907 -3 22620 22908 22621 -3 22909 22621 22908 -3 22621 22909 22622 -3 22910 22622 22909 -3 22622 22910 22623 -3 22911 22623 22910 -3 22623 22911 22912 -3 22623 22912 22624 -3 22624 22912 22913 -3 22624 22913 22625 -3 22625 22913 22914 -3 22625 22914 22626 -3 22626 22914 22915 -3 22626 22915 22627 -3 22627 22915 22916 -3 22627 22916 22628 -3 22628 22916 22917 -3 22628 22917 22629 -3 22629 22917 22918 -3 22629 22918 22630 -3 22630 22918 22919 -3 22630 22919 22631 -3 22631 22919 22920 -3 22631 22920 22632 -3 22632 22920 22921 -3 22632 22921 22633 -3 22633 22921 22922 -3 22633 22922 22634 -3 22634 22922 22923 -3 22634 22923 22635 -3 22635 22923 22924 -3 22635 22924 22636 -3 22636 22924 22925 -3 22636 22925 22637 -3 22637 22925 22926 -3 22637 22926 22638 -3 22638 22926 22927 -3 22638 22927 22639 -3 22639 22927 22928 -3 22639 22928 22640 -3 22640 22928 22929 -3 22640 22929 22641 -3 22641 22929 22932 -3 22641 22932 22644 -3 22642 22930 22931 -3 22642 22931 22643 -3 22642 22647 22935 -3 22642 22935 22930 -3 22644 22932 22933 -3 22644 22933 22645 -3 22645 22933 22936 -3 22645 22936 22648 -3 22646 22934 22935 -3 22646 22935 22647 -3 22646 22650 22938 -3 22646 22938 22934 -3 22648 22936 22937 -3 22648 22937 22649 -3 22649 22937 22939 -3 22649 22939 22651 -3 22650 22652 22940 -3 22650 22940 22938 -3 22651 22939 22941 -3 22651 22941 22653 -3 22652 22654 22942 -3 22652 22942 22940 -3 22653 22941 22943 -3 22653 22943 22655 -3 22654 22656 22944 -3 22654 22944 22942 -3 22655 22943 22945 -3 22655 22945 22657 -3 22656 22658 22946 -3 22656 22946 22944 -3 22657 22945 22947 -3 22657 22947 22659 -3 22658 22660 22948 -3 22658 22948 22946 -3 22659 22947 22949 -3 22659 22949 22661 -3 22660 22662 22950 -3 22660 22950 22948 -3 22661 22949 22951 -3 22661 22951 22663 -3 22662 22664 22952 -3 22662 22952 22950 -3 22663 22951 22953 -3 22663 22953 22665 -3 22664 22666 22954 -3 22664 22954 22952 -3 22665 22953 22955 -3 22665 22955 22667 -3 22666 22668 22956 -3 22666 22956 22954 -3 22667 22955 22957 -3 22667 22957 22669 -3 22668 22670 22958 -3 22668 22958 22956 -3 22669 22957 22959 -3 22669 22959 22671 -3 22670 22672 22960 -3 22670 22960 22958 -3 22671 22959 22961 -3 22671 22961 22673 -3 22672 22674 22962 -3 22672 22962 22960 -3 22673 22961 22963 -3 22673 22963 22675 -3 22674 22676 22964 -3 22674 22964 22962 -3 22675 22963 22965 -3 22675 22965 22677 -3 22676 22678 22966 -3 22676 22966 22964 -3 22677 22965 22967 -3 22677 22967 22679 -3 22678 22680 22968 -3 22678 22968 22966 -3 22679 22967 22969 -3 22679 22969 22681 -3 22680 22682 22970 -3 22680 22970 22968 -3 22681 22969 22971 -3 22681 22971 22683 -3 22682 22684 22972 -3 22682 22972 22970 -3 22683 22971 22973 -3 22683 22973 22685 -3 22684 22686 22974 -3 22684 22974 22972 -3 22685 22973 22975 -3 22685 22975 22687 -3 22686 22688 22976 -3 22686 22976 22974 -3 22687 22975 22977 -3 22687 22977 22689 -3 22688 22690 22978 -3 22688 22978 22976 -3 22689 22977 22979 -3 22689 22979 22691 -3 22690 22692 22980 -3 22690 22980 22978 -3 22691 22979 22981 -3 22691 22981 22693 -3 22692 22694 22982 -3 22692 22982 22980 -3 22693 22981 22983 -3 22693 22983 22695 -3 22694 22696 22984 -3 22694 22984 22982 -3 22695 22983 22985 -3 22695 22985 22697 -3 22696 22698 22984 -3 22986 22984 22698 -3 22697 22985 22987 -3 22697 22987 22699 -3 22698 22700 22986 -3 22988 22986 22700 -3 22699 22987 22989 -3 22699 22989 22701 -3 22700 22702 22988 -3 22990 22988 22702 -3 22701 22989 22991 -3 22701 22991 22703 -3 22702 22704 22990 -3 22992 22990 22704 -3 22703 22991 22993 -3 22703 22993 22705 -3 22704 22706 22992 -3 22994 22992 22706 -3 22705 22993 22995 -3 22705 22995 22707 -3 22706 22708 22994 -3 22996 22994 22708 -3 22707 22995 22997 -3 22707 22997 22709 -3 22708 22710 22996 -3 22998 22996 22710 -3 22709 22997 22999 -3 22709 22999 22711 -3 22710 22712 22998 -3 23000 22998 22712 -3 22711 22999 23001 -3 22711 23001 22713 -3 22712 22714 23000 -3 23002 23000 22714 -3 22713 23001 23003 -3 22713 23003 22715 -3 22714 22716 23002 -3 23004 23002 22716 -3 22715 23003 23005 -3 22715 23005 22717 -3 22716 22718 23004 -3 23006 23004 22718 -3 22717 23005 22719 -3 23007 22719 23005 -3 22718 22720 23006 -3 23008 23006 22720 -3 22719 23007 22721 -3 23009 22721 23007 -3 22720 22722 23008 -3 23010 23008 22722 -3 22721 23009 22723 -3 23011 22723 23009 -3 22722 22724 23010 -3 23012 23010 22724 -3 22723 23011 22725 -3 23013 22725 23011 -3 22724 22726 23012 -3 23014 23012 22726 -3 22725 23013 22727 -3 23015 22727 23013 -3 22726 22728 23014 -3 23016 23014 22728 -3 22727 23015 22729 -3 23017 22729 23015 -3 22728 22730 23016 -3 23018 23016 22730 -3 22729 23017 22731 -3 23019 22731 23017 -3 22730 22732 23018 -3 23020 23018 22732 -3 22731 23019 22733 -3 23021 22733 23019 -3 22732 22734 23020 -3 23022 23020 22734 -3 22733 23021 22735 -3 23023 22735 23021 -3 22734 22736 23022 -3 23024 23022 22736 -3 22735 23023 22737 -3 23025 22737 23023 -3 22736 22738 23024 -3 23026 23024 22738 -3 22737 23025 22739 -3 23027 22739 23025 -3 22738 22740 23026 -3 23028 23026 22740 -3 22739 23027 22741 -3 23029 22741 23027 -3 22740 22742 23028 -3 23030 23028 22742 -3 22741 23029 22743 -3 23031 22743 23029 -3 22742 22744 23030 -3 23032 23030 22744 -3 22743 23031 22745 -3 23033 22745 23031 -3 22744 22746 23032 -3 23034 23032 22746 -3 22745 23033 22747 -3 23035 22747 23033 -3 22746 22748 23034 -3 23036 23034 22748 -3 22747 23035 22749 -3 23037 22749 23035 -3 22748 22750 23036 -3 23038 23036 22750 -3 22749 23037 22751 -3 23039 22751 23037 -3 22750 22752 23038 -3 23040 23038 22752 -3 22751 23039 22753 -3 23041 22753 23039 -3 22752 22754 23040 -3 23042 23040 22754 -3 22753 23041 22755 -3 23043 22755 23041 -3 22754 22756 23042 -3 23044 23042 22756 -3 22755 23043 22757 -3 23045 22757 23043 -3 22756 22758 23044 -3 23046 23044 22758 -3 22757 23045 22759 -3 23047 22759 23045 -3 22758 22760 23046 -3 23048 23046 22760 -3 22759 23047 22761 -3 23049 22761 23047 -3 22760 22762 23048 -3 23050 23048 22762 -3 22761 23049 22763 -3 23051 22763 23049 -3 22762 22764 23050 -3 23052 23050 22764 -3 22763 23051 22765 -3 23053 22765 23051 -3 22764 22766 23052 -3 23054 23052 22766 -3 22765 23053 22767 -3 23055 22767 23053 -3 22766 22768 23054 -3 23056 23054 22768 -3 22767 23055 22769 -3 23057 22769 23055 -3 22768 22770 23056 -3 23058 23056 22770 -3 22769 23057 22771 -3 23059 22771 23057 -3 22770 22772 23058 -3 23060 23058 22772 -3 22771 23059 22773 -3 23061 22773 23059 -3 22772 22774 23060 -3 23062 23060 22774 -3 22773 23061 22775 -3 23063 22775 23061 -3 22774 22776 23062 -3 23064 23062 22776 -3 22775 23063 22777 -3 23065 22777 23063 -3 22776 22778 23064 -3 23066 23064 22778 -3 22777 23065 22779 -3 23067 22779 23065 -3 22778 22780 23066 -3 23068 23066 22780 -3 22779 23067 22781 -3 23069 22781 23067 -3 22780 22782 23068 -3 23070 23068 22782 -3 22781 23069 22783 -3 23071 22783 23069 -3 22782 22784 23070 -3 23072 23070 22784 -3 22783 23071 22787 -3 23075 22787 23071 -3 22784 22785 23072 -3 23073 23072 22785 -3 22785 22788 23073 -3 23076 23073 22788 -3 22786 22787 23074 -3 23075 23074 22787 -3 22786 23074 22791 -3 23079 22791 23074 -3 22788 22789 23077 -3 22788 23077 23076 -3 22789 22792 23080 -3 22789 23080 23077 -3 22790 22791 23079 -3 22790 23079 23078 -3 22790 23078 22860 -3 23148 22860 23078 -3 22792 22793 23081 -3 22792 23081 23080 -3 22793 22794 23082 -3 22793 23082 23081 -3 22794 22795 23083 -3 22794 23083 23082 -3 22795 22796 23084 -3 22795 23084 23083 -3 22796 22797 23085 -3 22796 23085 23084 -3 22797 22798 23086 -3 22797 23086 23085 -3 22798 22799 23087 -3 22798 23087 23086 -3 22799 22800 23088 -3 22799 23088 23087 -3 22800 22801 23089 -3 22800 23089 23088 -3 22801 22802 23090 -3 22801 23090 23089 -3 22802 22803 23091 -3 22802 23091 23090 -3 22803 22804 23092 -3 22803 23092 23091 -3 22804 22805 23093 -3 22804 23093 23092 -3 22805 22806 23094 -3 22805 23094 23093 -3 22806 22807 23095 -3 22806 23095 23094 -3 22807 22808 23096 -3 22807 23096 23095 -3 22808 22809 23097 -3 22808 23097 23096 -3 22809 22810 23098 -3 22809 23098 23097 -3 22810 22811 23099 -3 22810 23099 23098 -3 22811 22812 23100 -3 22811 23100 23099 -3 22812 22813 23101 -3 22812 23101 23100 -3 22813 22814 23102 -3 22813 23102 23101 -3 22814 22815 23103 -3 22814 23103 23102 -3 22815 22816 23104 -3 22815 23104 23103 -3 22816 22817 23105 -3 22816 23105 23104 -3 22817 22818 23106 -3 22817 23106 23105 -3 22818 22819 23107 -3 22818 23107 23106 -3 22819 22820 23108 -3 22819 23108 23107 -3 22820 22821 23109 -3 22820 23109 23108 -3 22821 22822 23110 -3 22821 23110 23109 -3 22822 22823 23111 -3 22822 23111 23110 -3 22823 22824 23112 -3 22823 23112 23111 -3 22824 22825 23113 -3 22824 23113 23112 -3 22825 22826 23114 -3 22825 23114 23113 -3 22826 22827 23115 -3 22826 23115 23114 -3 22827 22828 23116 -3 22827 23116 23115 -3 22828 22829 23117 -3 22828 23117 23116 -3 22829 22830 23118 -3 22829 23118 23117 -3 22830 22831 23119 -3 22830 23119 23118 -3 22831 22832 23120 -3 22831 23120 23119 -3 22832 22833 23121 -3 22832 23121 23120 -3 22833 22834 23122 -3 22833 23122 23121 -3 22834 22835 23123 -3 22834 23123 23122 -3 22835 22836 23124 -3 22835 23124 23123 -3 22836 22837 23125 -3 22836 23125 23124 -3 22837 22838 23126 -3 22837 23126 23125 -3 22838 22839 23127 -3 22838 23127 23126 -3 22839 22840 23128 -3 22839 23128 23127 -3 22840 22841 23129 -3 22840 23129 23128 -3 22841 22842 23130 -3 22841 23130 23129 -3 22842 22843 23131 -3 22842 23131 23130 -3 22843 22844 23132 -3 22843 23132 23131 -3 22844 22845 23133 -3 22844 23133 23132 -3 22845 22846 23134 -3 22845 23134 23133 -3 22846 22847 23135 -3 22846 23135 23134 -3 22847 22848 23136 -3 22847 23136 23135 -3 22848 22849 23137 -3 22848 23137 23136 -3 22849 22850 23138 -3 22849 23138 23137 -3 22850 22851 23139 -3 22850 23139 23138 -3 22851 22852 23140 -3 22851 23140 23139 -3 22852 22853 23141 -3 22852 23141 23140 -3 22853 22854 23142 -3 22853 23142 23141 -3 22854 22855 23143 -3 22854 23143 23142 -3 22855 22856 23144 -3 22855 23144 23143 -3 22856 22857 23145 -3 22856 23145 23144 -3 22857 22858 23146 -3 22857 23146 23145 -3 22858 22859 23147 -3 22858 23147 23146 -3 22859 22860 23148 -3 22859 23148 23147 -3 22861 23149 23150 -3 22861 23150 22862 -3 22861 22931 23149 -3 23219 23149 22931 -3 22862 23150 23151 -3 22862 23151 22863 -3 22863 23151 23152 -3 22863 23152 22864 -3 22864 23152 23153 -3 22864 23153 22865 -3 22865 23153 23154 -3 22865 23154 22866 -3 22866 23154 23155 -3 22866 23155 22867 -3 22867 23155 23156 -3 22867 23156 22868 -3 22868 23156 23157 -3 22868 23157 22869 -3 22869 23157 23158 -3 22869 23158 22870 -3 22870 23158 23159 -3 22870 23159 22871 -3 22871 23159 23160 -3 22871 23160 22872 -3 22872 23160 23161 -3 22872 23161 22873 -3 22873 23161 23162 -3 22873 23162 22874 -3 22874 23162 23163 -3 22874 23163 22875 -3 22875 23163 23164 -3 22875 23164 22876 -3 22876 23164 23165 -3 22876 23165 22877 -3 22877 23165 23166 -3 22877 23166 22878 -3 22878 23166 23167 -3 22878 23167 22879 -3 22879 23167 23168 -3 22879 23168 22880 -3 22880 23168 23169 -3 22880 23169 22881 -3 22881 23169 23170 -3 22881 23170 22882 -3 22882 23170 23171 -3 22882 23171 22883 -3 22883 23171 23172 -3 22883 23172 22884 -3 22884 23172 23173 -3 22884 23173 22885 -3 22885 23173 23174 -3 22885 23174 22886 -3 22886 23174 23175 -3 22886 23175 22887 -3 22887 23175 23176 -3 22887 23176 22888 -3 22888 23176 23177 -3 22888 23177 22889 -3 22889 23177 23178 -3 22889 23178 22890 -3 22890 23178 23179 -3 22890 23179 22891 -3 22891 23179 23180 -3 22891 23180 22892 -3 22892 23180 23181 -3 22892 23181 22893 -3 22893 23181 23182 -3 22893 23182 22894 -3 22894 23182 23183 -3 22894 23183 22895 -3 22895 23183 23184 -3 22895 23184 22896 -3 22896 23184 23185 -3 22896 23185 22897 -3 22897 23185 23186 -3 22897 23186 22898 -3 22898 23186 23187 -3 22898 23187 22899 -3 22899 23187 23188 -3 22899 23188 22900 -3 22900 23188 23189 -3 22900 23189 22901 -3 22901 23189 23190 -3 22901 23190 22902 -3 22902 23190 23191 -3 22902 23191 22903 -3 22903 23191 22904 -3 23192 22904 23191 -3 22904 23192 22905 -3 23193 22905 23192 -3 22905 23193 22906 -3 23194 22906 23193 -3 22906 23194 22907 -3 23195 22907 23194 -3 22907 23195 22908 -3 23196 22908 23195 -3 22908 23196 22909 -3 23197 22909 23196 -3 22909 23197 22910 -3 23198 22910 23197 -3 22910 23198 22911 -3 23199 22911 23198 -3 22911 23199 22912 -3 23200 22912 23199 -3 22912 23200 22913 -3 23201 22913 23200 -3 22913 23201 22914 -3 23202 22914 23201 -3 22914 23202 22915 -3 23203 22915 23202 -3 22915 23203 22916 -3 23204 22916 23203 -3 22916 23204 22917 -3 23205 22917 23204 -3 22917 23205 22918 -3 23206 22918 23205 -3 22918 23206 22919 -3 23207 22919 23206 -3 22919 23207 22920 -3 23208 22920 23207 -3 22920 23208 22921 -3 23209 22921 23208 -3 22921 23209 22922 -3 23210 22922 23209 -3 22922 23210 22923 -3 23211 22923 23210 -3 22923 23211 22924 -3 23212 22924 23211 -3 22924 23212 22925 -3 23213 22925 23212 -3 22925 23213 22926 -3 23214 22926 23213 -3 22926 23214 22927 -3 23215 22927 23214 -3 22927 23215 22928 -3 23216 22928 23215 -3 22928 23216 22929 -3 23217 22929 23216 -3 22929 23217 22932 -3 23220 22932 23217 -3 22930 23218 22931 -3 23219 22931 23218 -3 22930 22935 23218 -3 23223 23218 22935 -3 22932 23220 22933 -3 23221 22933 23220 -3 22933 23221 22936 -3 23224 22936 23221 -3 22934 23222 22935 -3 23223 22935 23222 -3 22934 22938 23222 -3 23226 23222 22938 -3 22936 23224 22937 -3 23225 22937 23224 -3 22937 23225 22939 -3 23227 22939 23225 -3 22938 22940 23226 -3 23228 23226 22940 -3 22939 23227 22941 -3 23229 22941 23227 -3 22940 22942 23228 -3 23230 23228 22942 -3 22941 23229 22943 -3 23231 22943 23229 -3 22942 22944 23230 -3 23232 23230 22944 -3 22943 23231 22945 -3 23233 22945 23231 -3 22944 22946 23232 -3 23234 23232 22946 -3 22945 23233 22947 -3 23235 22947 23233 -3 22946 22948 23234 -3 23236 23234 22948 -3 22947 23235 22949 -3 23237 22949 23235 -3 22948 22950 23236 -3 23238 23236 22950 -3 22949 23237 22951 -3 23239 22951 23237 -3 22950 22952 23238 -3 23240 23238 22952 -3 22951 23239 22953 -3 23241 22953 23239 -3 22952 22954 23240 -3 23242 23240 22954 -3 22953 23241 22955 -3 23243 22955 23241 -3 22954 22956 23242 -3 23244 23242 22956 -3 22955 23243 22957 -3 23245 22957 23243 -3 22956 22958 23244 -3 23246 23244 22958 -3 22957 23245 22959 -3 23247 22959 23245 -3 22958 22960 23246 -3 23248 23246 22960 -3 22959 23247 22961 -3 23249 22961 23247 -3 22960 22962 23248 -3 23250 23248 22962 -3 22961 23249 22963 -3 23251 22963 23249 -3 22962 22964 23250 -3 23252 23250 22964 -3 22963 23251 22965 -3 23253 22965 23251 -3 22964 22966 23252 -3 23254 23252 22966 -3 22965 23253 22967 -3 23255 22967 23253 -3 22966 22968 23254 -3 23256 23254 22968 -3 22967 23255 22969 -3 23257 22969 23255 -3 22968 22970 23256 -3 23258 23256 22970 -3 22969 23257 22971 -3 23259 22971 23257 -3 22970 22972 23258 -3 23260 23258 22972 -3 22971 23259 22973 -3 23261 22973 23259 -3 22972 22974 23260 -3 23262 23260 22974 -3 22973 23261 22975 -3 23263 22975 23261 -3 22974 22976 23264 -3 22974 23264 23262 -3 22975 23263 22977 -3 23265 22977 23263 -3 22976 22978 23266 -3 22976 23266 23264 -3 22977 23265 22979 -3 23267 22979 23265 -3 22978 22980 23268 -3 22978 23268 23266 -3 22979 23267 22981 -3 23269 22981 23267 -3 22980 22982 23270 -3 22980 23270 23268 -3 22981 23269 22983 -3 23271 22983 23269 -3 22982 22984 23272 -3 22982 23272 23270 -3 22983 23271 22985 -3 23273 22985 23271 -3 22984 22986 23274 -3 22984 23274 23272 -3 22985 23273 22987 -3 23275 22987 23273 -3 22986 22988 23276 -3 22986 23276 23274 -3 22987 23275 22989 -3 23277 22989 23275 -3 22988 22990 23278 -3 22988 23278 23276 -3 22989 23277 22991 -3 23279 22991 23277 -3 22990 22992 23280 -3 22990 23280 23278 -3 22991 23279 22993 -3 23281 22993 23279 -3 22992 22994 23282 -3 22992 23282 23280 -3 22993 23281 22995 -3 23283 22995 23281 -3 22994 22996 23284 -3 22994 23284 23282 -3 22995 23283 22997 -3 23285 22997 23283 -3 22996 22998 23286 -3 22996 23286 23284 -3 22997 23285 23287 -3 22997 23287 22999 -3 22998 23000 23288 -3 22998 23288 23286 -3 22999 23287 23289 -3 22999 23289 23001 -3 23000 23002 23290 -3 23000 23290 23288 -3 23001 23289 23291 -3 23001 23291 23003 -3 23002 23004 23292 -3 23002 23292 23290 -3 23003 23291 23293 -3 23003 23293 23005 -3 23004 23006 23294 -3 23004 23294 23292 -3 23005 23293 23295 -3 23005 23295 23007 -3 23006 23008 23296 -3 23006 23296 23294 -3 23007 23295 23297 -3 23007 23297 23009 -3 23008 23010 23298 -3 23008 23298 23296 -3 23009 23297 23299 -3 23009 23299 23011 -3 23010 23012 23300 -3 23010 23300 23298 -3 23011 23299 23301 -3 23011 23301 23013 -3 23012 23014 23302 -3 23012 23302 23300 -3 23013 23301 23303 -3 23013 23303 23015 -3 23014 23016 23304 -3 23014 23304 23302 -3 23015 23303 23305 -3 23015 23305 23017 -3 23016 23018 23306 -3 23016 23306 23304 -3 23017 23305 23307 -3 23017 23307 23019 -3 23018 23020 23308 -3 23018 23308 23306 -3 23019 23307 23309 -3 23019 23309 23021 -3 23020 23022 23310 -3 23020 23310 23308 -3 23021 23309 23311 -3 23021 23311 23023 -3 23022 23024 23312 -3 23022 23312 23310 -3 23023 23311 23313 -3 23023 23313 23025 -3 23024 23026 23314 -3 23024 23314 23312 -3 23025 23313 23315 -3 23025 23315 23027 -3 23026 23028 23316 -3 23026 23316 23314 -3 23027 23315 23317 -3 23027 23317 23029 -3 23028 23030 23318 -3 23028 23318 23316 -3 23029 23317 23319 -3 23029 23319 23031 -3 23030 23032 23320 -3 23030 23320 23318 -3 23031 23319 23321 -3 23031 23321 23033 -3 23032 23034 23322 -3 23032 23322 23320 -3 23033 23321 23323 -3 23033 23323 23035 -3 23034 23036 23324 -3 23034 23324 23322 -3 23035 23323 23325 -3 23035 23325 23037 -3 23036 23038 23326 -3 23036 23326 23324 -3 23037 23325 23327 -3 23037 23327 23039 -3 23038 23040 23328 -3 23038 23328 23326 -3 23039 23327 23329 -3 23039 23329 23041 -3 23040 23042 23330 -3 23040 23330 23328 -3 23041 23329 23331 -3 23041 23331 23043 -3 23042 23044 23332 -3 23042 23332 23330 -3 23043 23331 23333 -3 23043 23333 23045 -3 23044 23046 23334 -3 23044 23334 23332 -3 23045 23333 23335 -3 23045 23335 23047 -3 23046 23048 23336 -3 23046 23336 23334 -3 23047 23335 23337 -3 23047 23337 23049 -3 23048 23050 23338 -3 23048 23338 23336 -3 23049 23337 23339 -3 23049 23339 23051 -3 23050 23052 23340 -3 23050 23340 23338 -3 23051 23339 23341 -3 23051 23341 23053 -3 23052 23054 23342 -3 23052 23342 23340 -3 23053 23341 23343 -3 23053 23343 23055 -3 23054 23056 23344 -3 23054 23344 23342 -3 23055 23343 23345 -3 23055 23345 23057 -3 23056 23058 23346 -3 23056 23346 23344 -3 23057 23345 23347 -3 23057 23347 23059 -3 23058 23060 23348 -3 23058 23348 23346 -3 23059 23347 23349 -3 23059 23349 23061 -3 23060 23062 23350 -3 23060 23350 23348 -3 23061 23349 23351 -3 23061 23351 23063 -3 23062 23064 23352 -3 23062 23352 23350 -3 23063 23351 23353 -3 23063 23353 23065 -3 23064 23066 23354 -3 23064 23354 23352 -3 23065 23353 23355 -3 23065 23355 23067 -3 23066 23068 23356 -3 23066 23356 23354 -3 23067 23355 23357 -3 23067 23357 23069 -3 23068 23070 23356 -3 23358 23356 23070 -3 23069 23357 23359 -3 23069 23359 23071 -3 23070 23072 23358 -3 23360 23358 23072 -3 23071 23359 23363 -3 23071 23363 23075 -3 23072 23073 23360 -3 23361 23360 23073 -3 23073 23076 23361 -3 23364 23361 23076 -3 23074 23075 23362 -3 23363 23362 23075 -3 23074 23362 23367 -3 23074 23367 23079 -3 23076 23077 23364 -3 23365 23364 23077 -3 23077 23080 23365 -3 23368 23365 23080 -3 23078 23079 23366 -3 23367 23366 23079 -3 23078 23366 23436 -3 23078 23436 23148 -3 23080 23081 23368 -3 23369 23368 23081 -3 23081 23082 23369 -3 23370 23369 23082 -3 23082 23083 23370 -3 23371 23370 23083 -3 23083 23084 23371 -3 23372 23371 23084 -3 23084 23085 23372 -3 23373 23372 23085 -3 23085 23086 23373 -3 23374 23373 23086 -3 23086 23087 23374 -3 23375 23374 23087 -3 23087 23088 23375 -3 23376 23375 23088 -3 23088 23089 23376 -3 23377 23376 23089 -3 23089 23090 23377 -3 23378 23377 23090 -3 23090 23091 23378 -3 23379 23378 23091 -3 23091 23092 23379 -3 23380 23379 23092 -3 23092 23093 23380 -3 23381 23380 23093 -3 23093 23094 23381 -3 23382 23381 23094 -3 23094 23095 23382 -3 23383 23382 23095 -3 23095 23096 23383 -3 23384 23383 23096 -3 23096 23097 23384 -3 23385 23384 23097 -3 23097 23098 23385 -3 23386 23385 23098 -3 23098 23099 23386 -3 23387 23386 23099 -3 23099 23100 23387 -3 23388 23387 23100 -3 23100 23101 23388 -3 23389 23388 23101 -3 23101 23102 23389 -3 23390 23389 23102 -3 23102 23103 23390 -3 23391 23390 23103 -3 23103 23104 23391 -3 23392 23391 23104 -3 23104 23105 23392 -3 23393 23392 23105 -3 23105 23106 23393 -3 23394 23393 23106 -3 23106 23107 23394 -3 23395 23394 23107 -3 23107 23108 23395 -3 23396 23395 23108 -3 23108 23109 23396 -3 23397 23396 23109 -3 23109 23110 23397 -3 23398 23397 23110 -3 23110 23111 23398 -3 23399 23398 23111 -3 23111 23112 23399 -3 23400 23399 23112 -3 23112 23113 23400 -3 23401 23400 23113 -3 23113 23114 23401 -3 23402 23401 23114 -3 23114 23115 23402 -3 23403 23402 23115 -3 23115 23116 23403 -3 23404 23403 23116 -3 23116 23117 23404 -3 23405 23404 23117 -3 23117 23118 23405 -3 23406 23405 23118 -3 23118 23119 23406 -3 23407 23406 23119 -3 23119 23120 23407 -3 23408 23407 23120 -3 23120 23121 23408 -3 23409 23408 23121 -3 23121 23122 23409 -3 23410 23409 23122 -3 23122 23123 23410 -3 23411 23410 23123 -3 23123 23124 23411 -3 23412 23411 23124 -3 23124 23125 23412 -3 23413 23412 23125 -3 23125 23126 23413 -3 23414 23413 23126 -3 23126 23127 23414 -3 23415 23414 23127 -3 23127 23128 23415 -3 23416 23415 23128 -3 23128 23129 23416 -3 23417 23416 23129 -3 23129 23130 23417 -3 23418 23417 23130 -3 23130 23131 23418 -3 23419 23418 23131 -3 23131 23132 23419 -3 23420 23419 23132 -3 23132 23133 23420 -3 23421 23420 23133 -3 23133 23134 23421 -3 23422 23421 23134 -3 23134 23135 23422 -3 23423 23422 23135 -3 23135 23136 23423 -3 23424 23423 23136 -3 23136 23137 23424 -3 23425 23424 23137 -3 23137 23138 23425 -3 23426 23425 23138 -3 23138 23139 23426 -3 23427 23426 23139 -3 23139 23140 23427 -3 23428 23427 23140 -3 23140 23141 23428 -3 23429 23428 23141 -3 23141 23142 23429 -3 23430 23429 23142 -3 23142 23143 23430 -3 23431 23430 23143 -3 23143 23144 23431 -3 23432 23431 23144 -3 23144 23145 23432 -3 23433 23432 23145 -3 23145 23146 23433 -3 23434 23433 23146 -3 23146 23147 23434 -3 23435 23434 23147 -3 23147 23148 23435 -3 23436 23435 23148 -3 23149 23437 23150 -3 23438 23150 23437 -3 23149 23219 23507 -3 23149 23507 23437 -3 23150 23438 23151 -3 23439 23151 23438 -3 23151 23439 23152 -3 23440 23152 23439 -3 23152 23440 23153 -3 23441 23153 23440 -3 23153 23441 23154 -3 23442 23154 23441 -3 23154 23442 23155 -3 23443 23155 23442 -3 23155 23443 23156 -3 23444 23156 23443 -3 23156 23444 23157 -3 23445 23157 23444 -3 23157 23445 23158 -3 23446 23158 23445 -3 23158 23446 23159 -3 23447 23159 23446 -3 23159 23447 23160 -3 23448 23160 23447 -3 23160 23448 23161 -3 23449 23161 23448 -3 23161 23449 23162 -3 23450 23162 23449 -3 23162 23450 23163 -3 23451 23163 23450 -3 23163 23451 23164 -3 23452 23164 23451 -3 23164 23452 23165 -3 23453 23165 23452 -3 23165 23453 23166 -3 23454 23166 23453 -3 23166 23454 23167 -3 23455 23167 23454 -3 23167 23455 23168 -3 23456 23168 23455 -3 23168 23456 23169 -3 23457 23169 23456 -3 23169 23457 23170 -3 23458 23170 23457 -3 23170 23458 23171 -3 23459 23171 23458 -3 23171 23459 23172 -3 23460 23172 23459 -3 23172 23460 23173 -3 23461 23173 23460 -3 23173 23461 23174 -3 23462 23174 23461 -3 23174 23462 23175 -3 23463 23175 23462 -3 23175 23463 23176 -3 23464 23176 23463 -3 23176 23464 23177 -3 23465 23177 23464 -3 23177 23465 23178 -3 23466 23178 23465 -3 23178 23466 23179 -3 23467 23179 23466 -3 23179 23467 23180 -3 23468 23180 23467 -3 23180 23468 23181 -3 23469 23181 23468 -3 23181 23469 23182 -3 23470 23182 23469 -3 23182 23470 23183 -3 23471 23183 23470 -3 23183 23471 23184 -3 23472 23184 23471 -3 23184 23472 23473 -3 23184 23473 23185 -3 23185 23473 23474 -3 23185 23474 23186 -3 23186 23474 23475 -3 23186 23475 23187 -3 23187 23475 23476 -3 23187 23476 23188 -3 23188 23476 23477 -3 23188 23477 23189 -3 23189 23477 23478 -3 23189 23478 23190 -3 23190 23478 23479 -3 23190 23479 23191 -3 23191 23479 23480 -3 23191 23480 23192 -3 23192 23480 23481 -3 23192 23481 23193 -3 23193 23481 23482 -3 23193 23482 23194 -3 23194 23482 23483 -3 23194 23483 23195 -3 23195 23483 23484 -3 23195 23484 23196 -3 23196 23484 23485 -3 23196 23485 23197 -3 23197 23485 23486 -3 23197 23486 23198 -3 23198 23486 23487 -3 23198 23487 23199 -3 23199 23487 23488 -3 23199 23488 23200 -3 23200 23488 23489 -3 23200 23489 23201 -3 23201 23489 23490 -3 23201 23490 23202 -3 23202 23490 23491 -3 23202 23491 23203 -3 23203 23491 23492 -3 23203 23492 23204 -3 23204 23492 23493 -3 23204 23493 23205 -3 23205 23493 23494 -3 23205 23494 23206 -3 23206 23494 23495 -3 23206 23495 23207 -3 23207 23495 23496 -3 23207 23496 23208 -3 23208 23496 23497 -3 23208 23497 23209 -3 23209 23497 23498 -3 23209 23498 23210 -3 23210 23498 23499 -3 23210 23499 23211 -3 23211 23499 23500 -3 23211 23500 23212 -3 23212 23500 23501 -3 23212 23501 23213 -3 23213 23501 23502 -3 23213 23502 23214 -3 23214 23502 23503 -3 23214 23503 23215 -3 23215 23503 23504 -3 23215 23504 23216 -3 23216 23504 23505 -3 23216 23505 23217 -3 23217 23505 23508 -3 23217 23508 23220 -3 23218 23506 23507 -3 23218 23507 23219 -3 23218 23223 23511 -3 23218 23511 23506 -3 23220 23508 23509 -3 23220 23509 23221 -3 23221 23509 23512 -3 23221 23512 23224 -3 23222 23510 23511 -3 23222 23511 23223 -3 23222 23226 23514 -3 23222 23514 23510 -3 23224 23512 23513 -3 23224 23513 23225 -3 23225 23513 23515 -3 23225 23515 23227 -3 23226 23228 23516 -3 23226 23516 23514 -3 23227 23515 23517 -3 23227 23517 23229 -3 23228 23230 23518 -3 23228 23518 23516 -3 23229 23517 23519 -3 23229 23519 23231 -3 23230 23232 23520 -3 23230 23520 23518 -3 23231 23519 23521 -3 23231 23521 23233 -3 23232 23234 23522 -3 23232 23522 23520 -3 23233 23521 23523 -3 23233 23523 23235 -3 23234 23236 23524 -3 23234 23524 23522 -3 23235 23523 23525 -3 23235 23525 23237 -3 23236 23238 23526 -3 23236 23526 23524 -3 23237 23525 23527 -3 23237 23527 23239 -3 23238 23240 23528 -3 23238 23528 23526 -3 23239 23527 23529 -3 23239 23529 23241 -3 23240 23242 23530 -3 23240 23530 23528 -3 23241 23529 23531 -3 23241 23531 23243 -3 23242 23244 23532 -3 23242 23532 23530 -3 23243 23531 23533 -3 23243 23533 23245 -3 23244 23246 23534 -3 23244 23534 23532 -3 23245 23533 23535 -3 23245 23535 23247 -3 23246 23248 23536 -3 23246 23536 23534 -3 23247 23535 23537 -3 23247 23537 23249 -3 23248 23250 23538 -3 23248 23538 23536 -3 23249 23537 23539 -3 23249 23539 23251 -3 23250 23252 23540 -3 23250 23540 23538 -3 23251 23539 23541 -3 23251 23541 23253 -3 23252 23254 23542 -3 23252 23542 23540 -3 23253 23541 23543 -3 23253 23543 23255 -3 23254 23256 23544 -3 23254 23544 23542 -3 23255 23543 23545 -3 23255 23545 23257 -3 23256 23258 23544 -3 23546 23544 23258 -3 23257 23545 23547 -3 23257 23547 23259 -3 23258 23260 23546 -3 23548 23546 23260 -3 23259 23547 23549 -3 23259 23549 23261 -3 23260 23262 23548 -3 23550 23548 23262 -3 23261 23549 23551 -3 23261 23551 23263 -3 23262 23264 23550 -3 23552 23550 23264 -3 23263 23551 23553 -3 23263 23553 23265 -3 23264 23266 23552 -3 23554 23552 23266 -3 23265 23553 23555 -3 23265 23555 23267 -3 23266 23268 23554 -3 23556 23554 23268 -3 23267 23555 23557 -3 23267 23557 23269 -3 23268 23270 23556 -3 23558 23556 23270 -3 23269 23557 23559 -3 23269 23559 23271 -3 23270 23272 23558 -3 23560 23558 23272 -3 23271 23559 23561 -3 23271 23561 23273 -3 23272 23274 23560 -3 23562 23560 23274 -3 23273 23561 23563 -3 23273 23563 23275 -3 23274 23276 23562 -3 23564 23562 23276 -3 23275 23563 23565 -3 23275 23565 23277 -3 23276 23278 23564 -3 23566 23564 23278 -3 23277 23565 23567 -3 23277 23567 23279 -3 23278 23280 23566 -3 23568 23566 23280 -3 23279 23567 23281 -3 23569 23281 23567 -3 23280 23282 23568 -3 23570 23568 23282 -3 23281 23569 23283 -3 23571 23283 23569 -3 23282 23284 23570 -3 23572 23570 23284 -3 23283 23571 23285 -3 23573 23285 23571 -3 23284 23286 23572 -3 23574 23572 23286 -3 23285 23573 23287 -3 23575 23287 23573 -3 23286 23288 23574 -3 23576 23574 23288 -3 23287 23575 23289 -3 23577 23289 23575 -3 23288 23290 23576 -3 23578 23576 23290 -3 23289 23577 23291 -3 23579 23291 23577 -3 23290 23292 23578 -3 23580 23578 23292 -3 23291 23579 23293 -3 23581 23293 23579 -3 23292 23294 23580 -3 23582 23580 23294 -3 23293 23581 23295 -3 23583 23295 23581 -3 23294 23296 23582 -3 23584 23582 23296 -3 23295 23583 23297 -3 23585 23297 23583 -3 23296 23298 23584 -3 23586 23584 23298 -3 23297 23585 23299 -3 23587 23299 23585 -3 23298 23300 23586 -3 23588 23586 23300 -3 23299 23587 23301 -3 23589 23301 23587 -3 23300 23302 23588 -3 23590 23588 23302 -3 23301 23589 23303 -3 23591 23303 23589 -3 23302 23304 23590 -3 23592 23590 23304 -3 23303 23591 23305 -3 23593 23305 23591 -3 23304 23306 23592 -3 23594 23592 23306 -3 23305 23593 23307 -3 23595 23307 23593 -3 23306 23308 23594 -3 23596 23594 23308 -3 23307 23595 23309 -3 23597 23309 23595 -3 23308 23310 23596 -3 23598 23596 23310 -3 23309 23597 23311 -3 23599 23311 23597 -3 23310 23312 23598 -3 23600 23598 23312 -3 23311 23599 23313 -3 23601 23313 23599 -3 23312 23314 23600 -3 23602 23600 23314 -3 23313 23601 23315 -3 23603 23315 23601 -3 23314 23316 23602 -3 23604 23602 23316 -3 23315 23603 23317 -3 23605 23317 23603 -3 23316 23318 23604 -3 23606 23604 23318 -3 23317 23605 23319 -3 23607 23319 23605 -3 23318 23320 23606 -3 23608 23606 23320 -3 23319 23607 23321 -3 23609 23321 23607 -3 23320 23322 23608 -3 23610 23608 23322 -3 23321 23609 23323 -3 23611 23323 23609 -3 23322 23324 23610 -3 23612 23610 23324 -3 23323 23611 23325 -3 23613 23325 23611 -3 23324 23326 23612 -3 23614 23612 23326 -3 23325 23613 23327 -3 23615 23327 23613 -3 23326 23328 23614 -3 23616 23614 23328 -3 23327 23615 23329 -3 23617 23329 23615 -3 23328 23330 23616 -3 23618 23616 23330 -3 23329 23617 23331 -3 23619 23331 23617 -3 23330 23332 23618 -3 23620 23618 23332 -3 23331 23619 23333 -3 23621 23333 23619 -3 23332 23334 23620 -3 23622 23620 23334 -3 23333 23621 23335 -3 23623 23335 23621 -3 23334 23336 23622 -3 23624 23622 23336 -3 23335 23623 23337 -3 23625 23337 23623 -3 23336 23338 23624 -3 23626 23624 23338 -3 23337 23625 23339 -3 23627 23339 23625 -3 23338 23340 23626 -3 23628 23626 23340 -3 23339 23627 23341 -3 23629 23341 23627 -3 23340 23342 23628 -3 23630 23628 23342 -3 23341 23629 23343 -3 23631 23343 23629 -3 23342 23344 23630 -3 23632 23630 23344 -3 23343 23631 23345 -3 23633 23345 23631 -3 23344 23346 23632 -3 23634 23632 23346 -3 23345 23633 23347 -3 23635 23347 23633 -3 23346 23348 23634 -3 23636 23634 23348 -3 23347 23635 23349 -3 23637 23349 23635 -3 23348 23350 23636 -3 23638 23636 23350 -3 23349 23637 23351 -3 23639 23351 23637 -3 23350 23352 23640 -3 23350 23640 23638 -3 23351 23639 23353 -3 23641 23353 23639 -3 23352 23354 23642 -3 23352 23642 23640 -3 23353 23641 23355 -3 23643 23355 23641 -3 23354 23356 23644 -3 23354 23644 23642 -3 23355 23643 23357 -3 23645 23357 23643 -3 23356 23358 23646 -3 23356 23646 23644 -3 23357 23645 23359 -3 23647 23359 23645 -3 23358 23360 23648 -3 23358 23648 23646 -3 23359 23647 23363 -3 23651 23363 23647 -3 23360 23361 23649 -3 23360 23649 23648 -3 23361 23364 23652 -3 23361 23652 23649 -3 23362 23363 23651 -3 23362 23651 23650 -3 23362 23650 23367 -3 23655 23367 23650 -3 23364 23365 23653 -3 23364 23653 23652 -3 23365 23368 23656 -3 23365 23656 23653 -3 23366 23367 23655 -3 23366 23655 23654 -3 23366 23654 23436 -3 23724 23436 23654 -3 23368 23369 23657 -3 23368 23657 23656 -3 23369 23370 23658 -3 23369 23658 23657 -3 23370 23371 23659 -3 23370 23659 23658 -3 23371 23372 23660 -3 23371 23660 23659 -3 23372 23373 23661 -3 23372 23661 23660 -3 23373 23374 23662 -3 23373 23662 23661 -3 23374 23375 23663 -3 23374 23663 23662 -3 23375 23376 23664 -3 23375 23664 23663 -3 23376 23377 23665 -3 23376 23665 23664 -3 23377 23378 23666 -3 23377 23666 23665 -3 23378 23379 23667 -3 23378 23667 23666 -3 23379 23380 23668 -3 23379 23668 23667 -3 23380 23381 23669 -3 23380 23669 23668 -3 23381 23382 23670 -3 23381 23670 23669 -3 23382 23383 23671 -3 23382 23671 23670 -3 23383 23384 23672 -3 23383 23672 23671 -3 23384 23385 23673 -3 23384 23673 23672 -3 23385 23386 23674 -3 23385 23674 23673 -3 23386 23387 23675 -3 23386 23675 23674 -3 23387 23388 23676 -3 23387 23676 23675 -3 23388 23389 23677 -3 23388 23677 23676 -3 23389 23390 23678 -3 23389 23678 23677 -3 23390 23391 23679 -3 23390 23679 23678 -3 23391 23392 23680 -3 23391 23680 23679 -3 23392 23393 23681 -3 23392 23681 23680 -3 23393 23394 23682 -3 23393 23682 23681 -3 23394 23395 23683 -3 23394 23683 23682 -3 23395 23396 23684 -3 23395 23684 23683 -3 23396 23397 23685 -3 23396 23685 23684 -3 23397 23398 23686 -3 23397 23686 23685 -3 23398 23399 23687 -3 23398 23687 23686 -3 23399 23400 23688 -3 23399 23688 23687 -3 23400 23401 23689 -3 23400 23689 23688 -3 23401 23402 23690 -3 23401 23690 23689 -3 23402 23403 23691 -3 23402 23691 23690 -3 23403 23404 23692 -3 23403 23692 23691 -3 23404 23405 23693 -3 23404 23693 23692 -3 23405 23406 23694 -3 23405 23694 23693 -3 23406 23407 23695 -3 23406 23695 23694 -3 23407 23408 23696 -3 23407 23696 23695 -3 23408 23409 23697 -3 23408 23697 23696 -3 23409 23410 23698 -3 23409 23698 23697 -3 23410 23411 23699 -3 23410 23699 23698 -3 23411 23412 23700 -3 23411 23700 23699 -3 23412 23413 23701 -3 23412 23701 23700 -3 23413 23414 23702 -3 23413 23702 23701 -3 23414 23415 23703 -3 23414 23703 23702 -3 23415 23416 23704 -3 23415 23704 23703 -3 23416 23417 23705 -3 23416 23705 23704 -3 23417 23418 23706 -3 23417 23706 23705 -3 23418 23419 23707 -3 23418 23707 23706 -3 23419 23420 23708 -3 23419 23708 23707 -3 23420 23421 23709 -3 23420 23709 23708 -3 23421 23422 23710 -3 23421 23710 23709 -3 23422 23423 23711 -3 23422 23711 23710 -3 23423 23424 23712 -3 23423 23712 23711 -3 23424 23425 23713 -3 23424 23713 23712 -3 23425 23426 23714 -3 23425 23714 23713 -3 23426 23427 23715 -3 23426 23715 23714 -3 23427 23428 23716 -3 23427 23716 23715 -3 23428 23429 23717 -3 23428 23717 23716 -3 23429 23430 23718 -3 23429 23718 23717 -3 23430 23431 23719 -3 23430 23719 23718 -3 23431 23432 23720 -3 23431 23720 23719 -3 23432 23433 23721 -3 23432 23721 23720 -3 23433 23434 23722 -3 23433 23722 23721 -3 23434 23435 23723 -3 23434 23723 23722 -3 23435 23436 23724 -3 23435 23724 23723 -3 23437 23725 23726 -3 23437 23726 23438 -3 23437 23507 23725 -3 23795 23725 23507 -3 23438 23726 23727 -3 23438 23727 23439 -3 23439 23727 23728 -3 23439 23728 23440 -3 23440 23728 23729 -3 23440 23729 23441 -3 23441 23729 23730 -3 23441 23730 23442 -3 23442 23730 23731 -3 23442 23731 23443 -3 23443 23731 23732 -3 23443 23732 23444 -3 23444 23732 23733 -3 23444 23733 23445 -3 23445 23733 23734 -3 23445 23734 23446 -3 23446 23734 23735 -3 23446 23735 23447 -3 23447 23735 23736 -3 23447 23736 23448 -3 23448 23736 23737 -3 23448 23737 23449 -3 23449 23737 23738 -3 23449 23738 23450 -3 23450 23738 23739 -3 23450 23739 23451 -3 23451 23739 23740 -3 23451 23740 23452 -3 23452 23740 23741 -3 23452 23741 23453 -3 23453 23741 23742 -3 23453 23742 23454 -3 23454 23742 23743 -3 23454 23743 23455 -3 23455 23743 23744 -3 23455 23744 23456 -3 23456 23744 23745 -3 23456 23745 23457 -3 23457 23745 23746 -3 23457 23746 23458 -3 23458 23746 23747 -3 23458 23747 23459 -3 23459 23747 23748 -3 23459 23748 23460 -3 23460 23748 23749 -3 23460 23749 23461 -3 23461 23749 23750 -3 23461 23750 23462 -3 23462 23750 23751 -3 23462 23751 23463 -3 23463 23751 23752 -3 23463 23752 23464 -3 23464 23752 23753 -3 23464 23753 23465 -3 23465 23753 23754 -3 23465 23754 23466 -3 23466 23754 23755 -3 23466 23755 23467 -3 23467 23755 23468 -3 23756 23468 23755 -3 23468 23756 23469 -3 23757 23469 23756 -3 23469 23757 23470 -3 23758 23470 23757 -3 23470 23758 23471 -3 23759 23471 23758 -3 23471 23759 23472 -3 23760 23472 23759 -3 23472 23760 23473 -3 23761 23473 23760 -3 23473 23761 23474 -3 23762 23474 23761 -3 23474 23762 23475 -3 23763 23475 23762 -3 23475 23763 23476 -3 23764 23476 23763 -3 23476 23764 23477 -3 23765 23477 23764 -3 23477 23765 23478 -3 23766 23478 23765 -3 23478 23766 23479 -3 23767 23479 23766 -3 23479 23767 23480 -3 23768 23480 23767 -3 23480 23768 23481 -3 23769 23481 23768 -3 23481 23769 23482 -3 23770 23482 23769 -3 23482 23770 23483 -3 23771 23483 23770 -3 23483 23771 23484 -3 23772 23484 23771 -3 23484 23772 23485 -3 23773 23485 23772 -3 23485 23773 23486 -3 23774 23486 23773 -3 23486 23774 23487 -3 23775 23487 23774 -3 23487 23775 23488 -3 23776 23488 23775 -3 23488 23776 23489 -3 23777 23489 23776 -3 23489 23777 23490 -3 23778 23490 23777 -3 23490 23778 23491 -3 23779 23491 23778 -3 23491 23779 23492 -3 23780 23492 23779 -3 23492 23780 23493 -3 23781 23493 23780 -3 23493 23781 23494 -3 23782 23494 23781 -3 23494 23782 23495 -3 23783 23495 23782 -3 23495 23783 23496 -3 23784 23496 23783 -3 23496 23784 23497 -3 23785 23497 23784 -3 23497 23785 23498 -3 23786 23498 23785 -3 23498 23786 23499 -3 23787 23499 23786 -3 23499 23787 23500 -3 23788 23500 23787 -3 23500 23788 23501 -3 23789 23501 23788 -3 23501 23789 23502 -3 23790 23502 23789 -3 23502 23790 23503 -3 23791 23503 23790 -3 23503 23791 23504 -3 23792 23504 23791 -3 23504 23792 23505 -3 23793 23505 23792 -3 23505 23793 23508 -3 23796 23508 23793 -3 23506 23794 23507 -3 23795 23507 23794 -3 23506 23511 23794 -3 23799 23794 23511 -3 23508 23796 23509 -3 23797 23509 23796 -3 23509 23797 23512 -3 23800 23512 23797 -3 23510 23798 23511 -3 23799 23511 23798 -3 23510 23514 23798 -3 23802 23798 23514 -3 23512 23800 23513 -3 23801 23513 23800 -3 23513 23801 23515 -3 23803 23515 23801 -3 23514 23516 23802 -3 23804 23802 23516 -3 23515 23803 23517 -3 23805 23517 23803 -3 23516 23518 23804 -3 23806 23804 23518 -3 23517 23805 23519 -3 23807 23519 23805 -3 23518 23520 23806 -3 23808 23806 23520 -3 23519 23807 23521 -3 23809 23521 23807 -3 23520 23522 23808 -3 23810 23808 23522 -3 23521 23809 23523 -3 23811 23523 23809 -3 23522 23524 23810 -3 23812 23810 23524 -3 23523 23811 23525 -3 23813 23525 23811 -3 23524 23526 23812 -3 23814 23812 23526 -3 23525 23813 23527 -3 23815 23527 23813 -3 23526 23528 23814 -3 23816 23814 23528 -3 23527 23815 23529 -3 23817 23529 23815 -3 23528 23530 23816 -3 23818 23816 23530 -3 23529 23817 23531 -3 23819 23531 23817 -3 23530 23532 23818 -3 23820 23818 23532 -3 23531 23819 23533 -3 23821 23533 23819 -3 23532 23534 23820 -3 23822 23820 23534 -3 23533 23821 23535 -3 23823 23535 23821 -3 23534 23536 23822 -3 23824 23822 23536 -3 23535 23823 23537 -3 23825 23537 23823 -3 23536 23538 23824 -3 23826 23824 23538 -3 23537 23825 23539 -3 23827 23539 23825 -3 23538 23540 23828 -3 23538 23828 23826 -3 23539 23827 23541 -3 23829 23541 23827 -3 23540 23542 23830 -3 23540 23830 23828 -3 23541 23829 23543 -3 23831 23543 23829 -3 23542 23544 23832 -3 23542 23832 23830 -3 23543 23831 23545 -3 23833 23545 23831 -3 23544 23546 23834 -3 23544 23834 23832 -3 23545 23833 23547 -3 23835 23547 23833 -3 23546 23548 23836 -3 23546 23836 23834 -3 23547 23835 23549 -3 23837 23549 23835 -3 23548 23550 23838 -3 23548 23838 23836 -3 23549 23837 23551 -3 23839 23551 23837 -3 23550 23552 23840 -3 23550 23840 23838 -3 23551 23839 23553 -3 23841 23553 23839 -3 23552 23554 23842 -3 23552 23842 23840 -3 23553 23841 23555 -3 23843 23555 23841 -3 23554 23556 23844 -3 23554 23844 23842 -3 23555 23843 23557 -3 23845 23557 23843 -3 23556 23558 23846 -3 23556 23846 23844 -3 23557 23845 23559 -3 23847 23559 23845 -3 23558 23560 23848 -3 23558 23848 23846 -3 23559 23847 23561 -3 23849 23561 23847 -3 23560 23562 23850 -3 23560 23850 23848 -3 23561 23849 23563 -3 23851 23563 23849 -3 23562 23564 23852 -3 23562 23852 23850 -3 23563 23851 23853 -3 23563 23853 23565 -3 23564 23566 23854 -3 23564 23854 23852 -3 23565 23853 23855 -3 23565 23855 23567 -3 23566 23568 23856 -3 23566 23856 23854 -3 23567 23855 23857 -3 23567 23857 23569 -3 23568 23570 23858 -3 23568 23858 23856 -3 23569 23857 23859 -3 23569 23859 23571 -3 23570 23572 23860 -3 23570 23860 23858 -3 23571 23859 23861 -3 23571 23861 23573 -3 23572 23574 23862 -3 23572 23862 23860 -3 23573 23861 23863 -3 23573 23863 23575 -3 23574 23576 23864 -3 23574 23864 23862 -3 23575 23863 23865 -3 23575 23865 23577 -3 23576 23578 23866 -3 23576 23866 23864 -3 23577 23865 23867 -3 23577 23867 23579 -3 23578 23580 23868 -3 23578 23868 23866 -3 23579 23867 23869 -3 23579 23869 23581 -3 23580 23582 23870 -3 23580 23870 23868 -3 23581 23869 23871 -3 23581 23871 23583 -3 23582 23584 23872 -3 23582 23872 23870 -3 23583 23871 23873 -3 23583 23873 23585 -3 23584 23586 23874 -3 23584 23874 23872 -3 23585 23873 23875 -3 23585 23875 23587 -3 23586 23588 23876 -3 23586 23876 23874 -3 23587 23875 23877 -3 23587 23877 23589 -3 23588 23590 23878 -3 23588 23878 23876 -3 23589 23877 23879 -3 23589 23879 23591 -3 23590 23592 23880 -3 23590 23880 23878 -3 23591 23879 23881 -3 23591 23881 23593 -3 23592 23594 23882 -3 23592 23882 23880 -3 23593 23881 23883 -3 23593 23883 23595 -3 23594 23596 23884 -3 23594 23884 23882 -3 23595 23883 23885 -3 23595 23885 23597 -3 23596 23598 23886 -3 23596 23886 23884 -3 23597 23885 23887 -3 23597 23887 23599 -3 23598 23600 23888 -3 23598 23888 23886 -3 23599 23887 23889 -3 23599 23889 23601 -3 23600 23602 23890 -3 23600 23890 23888 -3 23601 23889 23891 -3 23601 23891 23603 -3 23602 23604 23892 -3 23602 23892 23890 -3 23603 23891 23893 -3 23603 23893 23605 -3 23604 23606 23894 -3 23604 23894 23892 -3 23605 23893 23895 -3 23605 23895 23607 -3 23606 23608 23896 -3 23606 23896 23894 -3 23607 23895 23897 -3 23607 23897 23609 -3 23608 23610 23898 -3 23608 23898 23896 -3 23609 23897 23899 -3 23609 23899 23611 -3 23610 23612 23900 -3 23610 23900 23898 -3 23611 23899 23901 -3 23611 23901 23613 -3 23612 23614 23902 -3 23612 23902 23900 -3 23613 23901 23903 -3 23613 23903 23615 -3 23614 23616 23904 -3 23614 23904 23902 -3 23615 23903 23905 -3 23615 23905 23617 -3 23616 23618 23906 -3 23616 23906 23904 -3 23617 23905 23907 -3 23617 23907 23619 -3 23618 23620 23908 -3 23618 23908 23906 -3 23619 23907 23909 -3 23619 23909 23621 -3 23620 23622 23910 -3 23620 23910 23908 -3 23621 23909 23911 -3 23621 23911 23623 -3 23622 23624 23912 -3 23622 23912 23910 -3 23623 23911 23913 -3 23623 23913 23625 -3 23624 23626 23914 -3 23624 23914 23912 -3 23625 23913 23915 -3 23625 23915 23627 -3 23626 23628 23916 -3 23626 23916 23914 -3 23627 23915 23917 -3 23627 23917 23629 -3 23628 23630 23918 -3 23628 23918 23916 -3 23629 23917 23919 -3 23629 23919 23631 -3 23630 23632 23920 -3 23630 23920 23918 -3 23631 23919 23921 -3 23631 23921 23633 -3 23632 23634 23920 -3 23922 23920 23634 -3 23633 23921 23923 -3 23633 23923 23635 -3 23634 23636 23922 -3 23924 23922 23636 -3 23635 23923 23925 -3 23635 23925 23637 -3 23636 23638 23924 -3 23926 23924 23638 -3 23637 23925 23927 -3 23637 23927 23639 -3 23638 23640 23926 -3 23928 23926 23640 -3 23639 23927 23929 -3 23639 23929 23641 -3 23640 23642 23928 -3 23930 23928 23642 -3 23641 23929 23931 -3 23641 23931 23643 -3 23642 23644 23930 -3 23932 23930 23644 -3 23643 23931 23933 -3 23643 23933 23645 -3 23644 23646 23932 -3 23934 23932 23646 -3 23645 23933 23935 -3 23645 23935 23647 -3 23646 23648 23934 -3 23936 23934 23648 -3 23647 23935 23939 -3 23647 23939 23651 -3 23648 23649 23936 -3 23937 23936 23649 -3 23649 23652 23937 -3 23940 23937 23652 -3 23650 23651 23938 -3 23939 23938 23651 -3 23650 23938 23943 -3 23650 23943 23655 -3 23652 23653 23940 -3 23941 23940 23653 -3 23653 23656 23941 -3 23944 23941 23656 -3 23654 23655 23942 -3 23943 23942 23655 -3 23654 23942 24012 -3 23654 24012 23724 -3 23656 23657 23944 -3 23945 23944 23657 -3 23657 23658 23945 -3 23946 23945 23658 -3 23658 23659 23946 -3 23947 23946 23659 -3 23659 23660 23947 -3 23948 23947 23660 -3 23660 23661 23948 -3 23949 23948 23661 -3 23661 23662 23949 -3 23950 23949 23662 -3 23662 23663 23950 -3 23951 23950 23663 -3 23663 23664 23951 -3 23952 23951 23664 -3 23664 23665 23952 -3 23953 23952 23665 -3 23665 23666 23953 -3 23954 23953 23666 -3 23666 23667 23954 -3 23955 23954 23667 -3 23667 23668 23955 -3 23956 23955 23668 -3 23668 23669 23956 -3 23957 23956 23669 -3 23669 23670 23957 -3 23958 23957 23670 -3 23670 23671 23958 -3 23959 23958 23671 -3 23671 23672 23959 -3 23960 23959 23672 -3 23672 23673 23960 -3 23961 23960 23673 -3 23673 23674 23961 -3 23962 23961 23674 -3 23674 23675 23962 -3 23963 23962 23675 -3 23675 23676 23963 -3 23964 23963 23676 -3 23676 23677 23964 -3 23965 23964 23677 -3 23677 23678 23965 -3 23966 23965 23678 -3 23678 23679 23966 -3 23967 23966 23679 -3 23679 23680 23967 -3 23968 23967 23680 -3 23680 23681 23968 -3 23969 23968 23681 -3 23681 23682 23969 -3 23970 23969 23682 -3 23682 23683 23970 -3 23971 23970 23683 -3 23683 23684 23971 -3 23972 23971 23684 -3 23684 23685 23972 -3 23973 23972 23685 -3 23685 23686 23973 -3 23974 23973 23686 -3 23686 23687 23974 -3 23975 23974 23687 -3 23687 23688 23975 -3 23976 23975 23688 -3 23688 23689 23976 -3 23977 23976 23689 -3 23689 23690 23977 -3 23978 23977 23690 -3 23690 23691 23978 -3 23979 23978 23691 -3 23691 23692 23979 -3 23980 23979 23692 -3 23692 23693 23980 -3 23981 23980 23693 -3 23693 23694 23981 -3 23982 23981 23694 -3 23694 23695 23982 -3 23983 23982 23695 -3 23695 23696 23983 -3 23984 23983 23696 -3 23696 23697 23984 -3 23985 23984 23697 -3 23697 23698 23985 -3 23986 23985 23698 -3 23698 23699 23986 -3 23987 23986 23699 -3 23699 23700 23987 -3 23988 23987 23700 -3 23700 23701 23988 -3 23989 23988 23701 -3 23701 23702 23989 -3 23990 23989 23702 -3 23702 23703 23990 -3 23991 23990 23703 -3 23703 23704 23991 -3 23992 23991 23704 -3 23704 23705 23992 -3 23993 23992 23705 -3 23705 23706 23993 -3 23994 23993 23706 -3 23706 23707 23994 -3 23995 23994 23707 -3 23707 23708 23995 -3 23996 23995 23708 -3 23708 23709 23996 -3 23997 23996 23709 -3 23709 23710 23997 -3 23998 23997 23710 -3 23710 23711 23998 -3 23999 23998 23711 -3 23711 23712 23999 -3 24000 23999 23712 -3 23712 23713 24000 -3 24001 24000 23713 -3 23713 23714 24001 -3 24002 24001 23714 -3 23714 23715 24002 -3 24003 24002 23715 -3 23715 23716 24003 -3 24004 24003 23716 -3 23716 23717 24004 -3 24005 24004 23717 -3 23717 23718 24005 -3 24006 24005 23718 -3 23718 23719 24006 -3 24007 24006 23719 -3 23719 23720 24007 -3 24008 24007 23720 -3 23720 23721 24008 -3 24009 24008 23721 -3 23721 23722 24009 -3 24010 24009 23722 -3 23722 23723 24010 -3 24011 24010 23723 -3 23723 23724 24011 -3 24012 24011 23724 -3 23725 24013 23726 -3 24014 23726 24013 -3 23725 23795 24083 -3 23725 24083 24013 -3 23726 24014 23727 -3 24015 23727 24014 -3 23727 24015 23728 -3 24016 23728 24015 -3 23728 24016 23729 -3 24017 23729 24016 -3 23729 24017 23730 -3 24018 23730 24017 -3 23730 24018 23731 -3 24019 23731 24018 -3 23731 24019 23732 -3 24020 23732 24019 -3 23732 24020 23733 -3 24021 23733 24020 -3 23733 24021 23734 -3 24022 23734 24021 -3 23734 24022 23735 -3 24023 23735 24022 -3 23735 24023 23736 -3 24024 23736 24023 -3 23736 24024 23737 -3 24025 23737 24024 -3 23737 24025 23738 -3 24026 23738 24025 -3 23738 24026 23739 -3 24027 23739 24026 -3 23739 24027 23740 -3 24028 23740 24027 -3 23740 24028 23741 -3 24029 23741 24028 -3 23741 24029 23742 -3 24030 23742 24029 -3 23742 24030 23743 -3 24031 23743 24030 -3 23743 24031 23744 -3 24032 23744 24031 -3 23744 24032 23745 -3 24033 23745 24032 -3 23745 24033 23746 -3 24034 23746 24033 -3 23746 24034 23747 -3 24035 23747 24034 -3 23747 24035 23748 -3 24036 23748 24035 -3 23748 24036 23749 -3 24037 23749 24036 -3 23749 24037 23750 -3 24038 23750 24037 -3 23750 24038 23751 -3 24039 23751 24038 -3 23751 24039 23752 -3 24040 23752 24039 -3 23752 24040 24041 -3 23752 24041 23753 -3 23753 24041 24042 -3 23753 24042 23754 -3 23754 24042 24043 -3 23754 24043 23755 -3 23755 24043 24044 -3 23755 24044 23756 -3 23756 24044 24045 -3 23756 24045 23757 -3 23757 24045 24046 -3 23757 24046 23758 -3 23758 24046 24047 -3 23758 24047 23759 -3 23759 24047 24048 -3 23759 24048 23760 -3 23760 24048 24049 -3 23760 24049 23761 -3 23761 24049 24050 -3 23761 24050 23762 -3 23762 24050 24051 -3 23762 24051 23763 -3 23763 24051 24052 -3 23763 24052 23764 -3 23764 24052 24053 -3 23764 24053 23765 -3 23765 24053 24054 -3 23765 24054 23766 -3 23766 24054 24055 -3 23766 24055 23767 -3 23767 24055 24056 -3 23767 24056 23768 -3 23768 24056 24057 -3 23768 24057 23769 -3 23769 24057 24058 -3 23769 24058 23770 -3 23770 24058 24059 -3 23770 24059 23771 -3 23771 24059 24060 -3 23771 24060 23772 -3 23772 24060 24061 -3 23772 24061 23773 -3 23773 24061 24062 -3 23773 24062 23774 -3 23774 24062 24063 -3 23774 24063 23775 -3 23775 24063 24064 -3 23775 24064 23776 -3 23776 24064 24065 -3 23776 24065 23777 -3 23777 24065 24066 -3 23777 24066 23778 -3 23778 24066 24067 -3 23778 24067 23779 -3 23779 24067 24068 -3 23779 24068 23780 -3 23780 24068 24069 -3 23780 24069 23781 -3 23781 24069 24070 -3 23781 24070 23782 -3 23782 24070 24071 -3 23782 24071 23783 -3 23783 24071 24072 -3 23783 24072 23784 -3 23784 24072 24073 -3 23784 24073 23785 -3 23785 24073 24074 -3 23785 24074 23786 -3 23786 24074 24075 -3 23786 24075 23787 -3 23787 24075 24076 -3 23787 24076 23788 -3 23788 24076 24077 -3 23788 24077 23789 -3 23789 24077 24078 -3 23789 24078 23790 -3 23790 24078 24079 -3 23790 24079 23791 -3 23791 24079 24080 -3 23791 24080 23792 -3 23792 24080 24081 -3 23792 24081 23793 -3 23793 24081 24084 -3 23793 24084 23796 -3 23794 24082 24083 -3 23794 24083 23795 -3 23794 23799 24087 -3 23794 24087 24082 -3 23796 24084 24085 -3 23796 24085 23797 -3 23797 24085 24088 -3 23797 24088 23800 -3 23798 24086 24087 -3 23798 24087 23799 -3 23798 23802 24090 -3 23798 24090 24086 -3 23800 24088 24089 -3 23800 24089 23801 -3 23801 24089 24091 -3 23801 24091 23803 -3 23802 23804 24092 -3 23802 24092 24090 -3 23803 24091 24093 -3 23803 24093 23805 -3 23804 23806 24094 -3 23804 24094 24092 -3 23805 24093 24095 -3 23805 24095 23807 -3 23806 23808 24096 -3 23806 24096 24094 -3 23807 24095 24097 -3 23807 24097 23809 -3 23808 23810 24098 -3 23808 24098 24096 -3 23809 24097 24099 -3 23809 24099 23811 -3 23810 23812 24100 -3 23810 24100 24098 -3 23811 24099 24101 -3 23811 24101 23813 -3 23812 23814 24102 -3 23812 24102 24100 -3 23813 24101 24103 -3 23813 24103 23815 -3 23814 23816 24104 -3 23814 24104 24102 -3 23815 24103 24105 -3 23815 24105 23817 -3 23816 23818 24106 -3 23816 24106 24104 -3 23817 24105 24107 -3 23817 24107 23819 -3 23818 23820 24108 -3 23818 24108 24106 -3 23819 24107 24109 -3 23819 24109 23821 -3 23820 23822 24110 -3 23820 24110 24108 -3 23821 24109 24111 -3 23821 24111 23823 -3 23822 23824 24110 -3 24112 24110 23824 -3 23823 24111 24113 -3 23823 24113 23825 -3 23824 23826 24112 -3 24114 24112 23826 -3 23825 24113 24115 -3 23825 24115 23827 -3 23826 23828 24114 -3 24116 24114 23828 -3 23827 24115 24117 -3 23827 24117 23829 -3 23828 23830 24116 -3 24118 24116 23830 -3 23829 24117 24119 -3 23829 24119 23831 -3 23830 23832 24118 -3 24120 24118 23832 -3 23831 24119 24121 -3 23831 24121 23833 -3 23832 23834 24120 -3 24122 24120 23834 -3 23833 24121 24123 -3 23833 24123 23835 -3 23834 23836 24122 -3 24124 24122 23836 -3 23835 24123 24125 -3 23835 24125 23837 -3 23836 23838 24124 -3 24126 24124 23838 -3 23837 24125 24127 -3 23837 24127 23839 -3 23838 23840 24126 -3 24128 24126 23840 -3 23839 24127 24129 -3 23839 24129 23841 -3 23840 23842 24128 -3 24130 24128 23842 -3 23841 24129 24131 -3 23841 24131 23843 -3 23842 23844 24130 -3 24132 24130 23844 -3 23843 24131 24133 -3 23843 24133 23845 -3 23844 23846 24132 -3 24134 24132 23846 -3 23845 24133 24135 -3 23845 24135 23847 -3 23846 23848 24134 -3 24136 24134 23848 -3 23847 24135 23849 -3 24137 23849 24135 -3 23848 23850 24136 -3 24138 24136 23850 -3 23849 24137 23851 -3 24139 23851 24137 -3 23850 23852 24138 -3 24140 24138 23852 -3 23851 24139 23853 -3 24141 23853 24139 -3 23852 23854 24140 -3 24142 24140 23854 -3 23853 24141 23855 -3 24143 23855 24141 -3 23854 23856 24142 -3 24144 24142 23856 -3 23855 24143 23857 -3 24145 23857 24143 -3 23856 23858 24144 -3 24146 24144 23858 -3 23857 24145 23859 -3 24147 23859 24145 -3 23858 23860 24146 -3 24148 24146 23860 -3 23859 24147 23861 -3 24149 23861 24147 -3 23860 23862 24148 -3 24150 24148 23862 -3 23861 24149 23863 -3 24151 23863 24149 -3 23862 23864 24150 -3 24152 24150 23864 -3 23863 24151 23865 -3 24153 23865 24151 -3 23864 23866 24152 -3 24154 24152 23866 -3 23865 24153 23867 -3 24155 23867 24153 -3 23866 23868 24154 -3 24156 24154 23868 -3 23867 24155 23869 -3 24157 23869 24155 -3 23868 23870 24156 -3 24158 24156 23870 -3 23869 24157 23871 -3 24159 23871 24157 -3 23870 23872 24158 -3 24160 24158 23872 -3 23871 24159 23873 -3 24161 23873 24159 -3 23872 23874 24160 -3 24162 24160 23874 -3 23873 24161 23875 -3 24163 23875 24161 -3 23874 23876 24162 -3 24164 24162 23876 -3 23875 24163 23877 -3 24165 23877 24163 -3 23876 23878 24164 -3 24166 24164 23878 -3 23877 24165 23879 -3 24167 23879 24165 -3 23878 23880 24166 -3 24168 24166 23880 -3 23879 24167 23881 -3 24169 23881 24167 -3 23880 23882 24168 -3 24170 24168 23882 -3 23881 24169 23883 -3 24171 23883 24169 -3 23882 23884 24170 -3 24172 24170 23884 -3 23883 24171 23885 -3 24173 23885 24171 -3 23884 23886 24172 -3 24174 24172 23886 -3 23885 24173 23887 -3 24175 23887 24173 -3 23886 23888 24174 -3 24176 24174 23888 -3 23887 24175 23889 -3 24177 23889 24175 -3 23888 23890 24176 -3 24178 24176 23890 -3 23889 24177 23891 -3 24179 23891 24177 -3 23890 23892 24178 -3 24180 24178 23892 -3 23891 24179 23893 -3 24181 23893 24179 -3 23892 23894 24180 -3 24182 24180 23894 -3 23893 24181 23895 -3 24183 23895 24181 -3 23894 23896 24182 -3 24184 24182 23896 -3 23895 24183 23897 -3 24185 23897 24183 -3 23896 23898 24184 -3 24186 24184 23898 -3 23897 24185 23899 -3 24187 23899 24185 -3 23898 23900 24186 -3 24188 24186 23900 -3 23899 24187 23901 -3 24189 23901 24187 -3 23900 23902 24188 -3 24190 24188 23902 -3 23901 24189 23903 -3 24191 23903 24189 -3 23902 23904 24190 -3 24192 24190 23904 -3 23903 24191 23905 -3 24193 23905 24191 -3 23904 23906 24192 -3 24194 24192 23906 -3 23905 24193 23907 -3 24195 23907 24193 -3 23906 23908 24194 -3 24196 24194 23908 -3 23907 24195 23909 -3 24197 23909 24195 -3 23908 23910 24196 -3 24198 24196 23910 -3 23909 24197 23911 -3 24199 23911 24197 -3 23910 23912 24198 -3 24200 24198 23912 -3 23911 24199 23913 -3 24201 23913 24199 -3 23912 23914 24200 -3 24202 24200 23914 -3 23913 24201 23915 -3 24203 23915 24201 -3 23914 23916 24202 -3 24204 24202 23916 -3 23915 24203 23917 -3 24205 23917 24203 -3 23916 23918 24204 -3 24206 24204 23918 -3 23917 24205 23919 -3 24207 23919 24205 -3 23918 23920 24208 -3 23918 24208 24206 -3 23919 24207 23921 -3 24209 23921 24207 -3 23920 23922 24210 -3 23920 24210 24208 -3 23921 24209 23923 -3 24211 23923 24209 -3 23922 23924 24212 -3 23922 24212 24210 -3 23923 24211 23925 -3 24213 23925 24211 -3 23924 23926 24214 -3 23924 24214 24212 -3 23925 24213 23927 -3 24215 23927 24213 -3 23926 23928 24216 -3 23926 24216 24214 -3 23927 24215 23929 -3 24217 23929 24215 -3 23928 23930 24218 -3 23928 24218 24216 -3 23929 24217 23931 -3 24219 23931 24217 -3 23930 23932 24220 -3 23930 24220 24218 -3 23931 24219 23933 -3 24221 23933 24219 -3 23932 23934 24222 -3 23932 24222 24220 -3 23933 24221 23935 -3 24223 23935 24221 -3 23934 23936 24224 -3 23934 24224 24222 -3 23935 24223 23939 -3 24227 23939 24223 -3 23936 23937 24225 -3 23936 24225 24224 -3 23937 23940 24228 -3 23937 24228 24225 -3 23938 23939 24227 -3 23938 24227 24226 -3 23938 24226 23943 -3 24231 23943 24226 -3 23940 23941 24229 -3 23940 24229 24228 -3 23941 23944 24232 -3 23941 24232 24229 -3 23942 23943 24231 -3 23942 24231 24230 -3 23942 24230 24012 -3 24300 24012 24230 -3 23944 23945 24233 -3 23944 24233 24232 -3 23945 23946 24234 -3 23945 24234 24233 -3 23946 23947 24235 -3 23946 24235 24234 -3 23947 23948 24236 -3 23947 24236 24235 -3 23948 23949 24237 -3 23948 24237 24236 -3 23949 23950 24238 -3 23949 24238 24237 -3 23950 23951 24239 -3 23950 24239 24238 -3 23951 23952 24240 -3 23951 24240 24239 -3 23952 23953 24241 -3 23952 24241 24240 -3 23953 23954 24242 -3 23953 24242 24241 -3 23954 23955 24243 -3 23954 24243 24242 -3 23955 23956 24244 -3 23955 24244 24243 -3 23956 23957 24245 -3 23956 24245 24244 -3 23957 23958 24246 -3 23957 24246 24245 -3 23958 23959 24247 -3 23958 24247 24246 -3 23959 23960 24248 -3 23959 24248 24247 -3 23960 23961 24249 -3 23960 24249 24248 -3 23961 23962 24250 -3 23961 24250 24249 -3 23962 23963 24251 -3 23962 24251 24250 -3 23963 23964 24252 -3 23963 24252 24251 -3 23964 23965 24253 -3 23964 24253 24252 -3 23965 23966 24254 -3 23965 24254 24253 -3 23966 23967 24255 -3 23966 24255 24254 -3 23967 23968 24256 -3 23967 24256 24255 -3 23968 23969 24257 -3 23968 24257 24256 -3 23969 23970 24258 -3 23969 24258 24257 -3 23970 23971 24259 -3 23970 24259 24258 -3 23971 23972 24260 -3 23971 24260 24259 -3 23972 23973 24261 -3 23972 24261 24260 -3 23973 23974 24262 -3 23973 24262 24261 -3 23974 23975 24263 -3 23974 24263 24262 -3 23975 23976 24264 -3 23975 24264 24263 -3 23976 23977 24265 -3 23976 24265 24264 -3 23977 23978 24266 -3 23977 24266 24265 -3 23978 23979 24267 -3 23978 24267 24266 -3 23979 23980 24268 -3 23979 24268 24267 -3 23980 23981 24269 -3 23980 24269 24268 -3 23981 23982 24270 -3 23981 24270 24269 -3 23982 23983 24271 -3 23982 24271 24270 -3 23983 23984 24272 -3 23983 24272 24271 -3 23984 23985 24273 -3 23984 24273 24272 -3 23985 23986 24274 -3 23985 24274 24273 -3 23986 23987 24275 -3 23986 24275 24274 -3 23987 23988 24276 -3 23987 24276 24275 -3 23988 23989 24277 -3 23988 24277 24276 -3 23989 23990 24278 -3 23989 24278 24277 -3 23990 23991 24279 -3 23990 24279 24278 -3 23991 23992 24280 -3 23991 24280 24279 -3 23992 23993 24281 -3 23992 24281 24280 -3 23993 23994 24282 -3 23993 24282 24281 -3 23994 23995 24283 -3 23994 24283 24282 -3 23995 23996 24284 -3 23995 24284 24283 -3 23996 23997 24285 -3 23996 24285 24284 -3 23997 23998 24286 -3 23997 24286 24285 -3 23998 23999 24287 -3 23998 24287 24286 -3 23999 24000 24288 -3 23999 24288 24287 -3 24000 24001 24289 -3 24000 24289 24288 -3 24001 24002 24290 -3 24001 24290 24289 -3 24002 24003 24291 -3 24002 24291 24290 -3 24003 24004 24292 -3 24003 24292 24291 -3 24004 24005 24293 -3 24004 24293 24292 -3 24005 24006 24294 -3 24005 24294 24293 -3 24006 24007 24295 -3 24006 24295 24294 -3 24007 24008 24296 -3 24007 24296 24295 -3 24008 24009 24297 -3 24008 24297 24296 -3 24009 24010 24298 -3 24009 24298 24297 -3 24010 24011 24299 -3 24010 24299 24298 -3 24011 24012 24300 -3 24011 24300 24299 -3 24013 24301 24302 -3 24013 24302 24014 -3 24013 24083 24301 -3 24371 24301 24083 -3 24014 24302 24303 -3 24014 24303 24015 -3 24015 24303 24304 -3 24015 24304 24016 -3 24016 24304 24305 -3 24016 24305 24017 -3 24017 24305 24306 -3 24017 24306 24018 -3 24018 24306 24307 -3 24018 24307 24019 -3 24019 24307 24308 -3 24019 24308 24020 -3 24020 24308 24309 -3 24020 24309 24021 -3 24021 24309 24310 -3 24021 24310 24022 -3 24022 24310 24311 -3 24022 24311 24023 -3 24023 24311 24312 -3 24023 24312 24024 -3 24024 24312 24313 -3 24024 24313 24025 -3 24025 24313 24314 -3 24025 24314 24026 -3 24026 24314 24315 -3 24026 24315 24027 -3 24027 24315 24316 -3 24027 24316 24028 -3 24028 24316 24317 -3 24028 24317 24029 -3 24029 24317 24318 -3 24029 24318 24030 -3 24030 24318 24319 -3 24030 24319 24031 -3 24031 24319 24320 -3 24031 24320 24032 -3 24032 24320 24321 -3 24032 24321 24033 -3 24033 24321 24322 -3 24033 24322 24034 -3 24034 24322 24323 -3 24034 24323 24035 -3 24035 24323 24324 -3 24035 24324 24036 -3 24036 24324 24325 -3 24036 24325 24037 -3 24037 24325 24326 -3 24037 24326 24038 -3 24038 24326 24327 -3 24038 24327 24039 -3 24039 24327 24040 -3 24328 24040 24327 -3 24040 24328 24041 -3 24329 24041 24328 -3 24041 24329 24042 -3 24330 24042 24329 -3 24042 24330 24043 -3 24331 24043 24330 -3 24043 24331 24044 -3 24332 24044 24331 -3 24044 24332 24045 -3 24333 24045 24332 -3 24045 24333 24046 -3 24334 24046 24333 -3 24046 24334 24047 -3 24335 24047 24334 -3 24047 24335 24048 -3 24336 24048 24335 -3 24048 24336 24049 -3 24337 24049 24336 -3 24049 24337 24050 -3 24338 24050 24337 -3 24050 24338 24051 -3 24339 24051 24338 -3 24051 24339 24052 -3 24340 24052 24339 -3 24052 24340 24053 -3 24341 24053 24340 -3 24053 24341 24054 -3 24342 24054 24341 -3 24054 24342 24055 -3 24343 24055 24342 -3 24055 24343 24056 -3 24344 24056 24343 -3 24056 24344 24057 -3 24345 24057 24344 -3 24057 24345 24058 -3 24346 24058 24345 -3 24058 24346 24059 -3 24347 24059 24346 -3 24059 24347 24060 -3 24348 24060 24347 -3 24060 24348 24061 -3 24349 24061 24348 -3 24061 24349 24062 -3 24350 24062 24349 -3 24062 24350 24063 -3 24351 24063 24350 -3 24063 24351 24064 -3 24352 24064 24351 -3 24064 24352 24065 -3 24353 24065 24352 -3 24065 24353 24066 -3 24354 24066 24353 -3 24066 24354 24067 -3 24355 24067 24354 -3 24067 24355 24068 -3 24356 24068 24355 -3 24068 24356 24069 -3 24357 24069 24356 -3 24069 24357 24070 -3 24358 24070 24357 -3 24070 24358 24071 -3 24359 24071 24358 -3 24071 24359 24072 -3 24360 24072 24359 -3 24072 24360 24073 -3 24361 24073 24360 -3 24073 24361 24074 -3 24362 24074 24361 -3 24074 24362 24075 -3 24363 24075 24362 -3 24075 24363 24076 -3 24364 24076 24363 -3 24076 24364 24077 -3 24365 24077 24364 -3 24077 24365 24078 -3 24366 24078 24365 -3 24078 24366 24079 -3 24367 24079 24366 -3 24079 24367 24080 -3 24368 24080 24367 -3 24080 24368 24081 -3 24369 24081 24368 -3 24081 24369 24084 -3 24372 24084 24369 -3 24082 24370 24083 -3 24371 24083 24370 -3 24082 24087 24370 -3 24375 24370 24087 -3 24084 24372 24085 -3 24373 24085 24372 -3 24085 24373 24088 -3 24376 24088 24373 -3 24086 24374 24087 -3 24375 24087 24374 -3 24086 24090 24374 -3 24378 24374 24090 -3 24088 24376 24089 -3 24377 24089 24376 -3 24089 24377 24091 -3 24379 24091 24377 -3 24090 24092 24378 -3 24380 24378 24092 -3 24091 24379 24093 -3 24381 24093 24379 -3 24092 24094 24380 -3 24382 24380 24094 -3 24093 24381 24095 -3 24383 24095 24381 -3 24094 24096 24382 -3 24384 24382 24096 -3 24095 24383 24097 -3 24385 24097 24383 -3 24096 24098 24384 -3 24386 24384 24098 -3 24097 24385 24099 -3 24387 24099 24385 -3 24098 24100 24386 -3 24388 24386 24100 -3 24099 24387 24101 -3 24389 24101 24387 -3 24100 24102 24388 -3 24390 24388 24102 -3 24101 24389 24103 -3 24391 24103 24389 -3 24102 24104 24390 -3 24392 24390 24104 -3 24103 24391 24105 -3 24393 24105 24391 -3 24104 24106 24392 -3 24394 24392 24106 -3 24105 24393 24107 -3 24395 24107 24393 -3 24106 24108 24394 -3 24396 24394 24108 -3 24107 24395 24109 -3 24397 24109 24395 -3 24108 24110 24398 -3 24108 24398 24396 -3 24109 24397 24111 -3 24399 24111 24397 -3 24110 24112 24400 -3 24110 24400 24398 -3 24111 24399 24113 -3 24401 24113 24399 -3 24112 24114 24402 -3 24112 24402 24400 -3 24113 24401 24115 -3 24403 24115 24401 -3 24114 24116 24404 -3 24114 24404 24402 -3 24115 24403 24117 -3 24405 24117 24403 -3 24116 24118 24406 -3 24116 24406 24404 -3 24117 24405 24119 -3 24407 24119 24405 -3 24118 24120 24408 -3 24118 24408 24406 -3 24119 24407 24121 -3 24409 24121 24407 -3 24120 24122 24410 -3 24120 24410 24408 -3 24121 24409 24123 -3 24411 24123 24409 -3 24122 24124 24412 -3 24122 24412 24410 -3 24123 24411 24125 -3 24413 24125 24411 -3 24124 24126 24414 -3 24124 24414 24412 -3 24125 24413 24127 -3 24415 24127 24413 -3 24126 24128 24416 -3 24126 24416 24414 -3 24127 24415 24129 -3 24417 24129 24415 -3 24128 24130 24418 -3 24128 24418 24416 -3 24129 24417 24131 -3 24419 24131 24417 -3 24130 24132 24420 -3 24130 24420 24418 -3 24131 24419 24133 -3 24421 24133 24419 -3 24132 24134 24422 -3 24132 24422 24420 -3 24133 24421 24135 -3 24423 24135 24421 -3 24134 24136 24424 -3 24134 24424 24422 -3 24135 24423 24425 -3 24135 24425 24137 -3 24136 24138 24426 -3 24136 24426 24424 -3 24137 24425 24427 -3 24137 24427 24139 -3 24138 24140 24428 -3 24138 24428 24426 -3 24139 24427 24429 -3 24139 24429 24141 -3 24140 24142 24430 -3 24140 24430 24428 -3 24141 24429 24431 -3 24141 24431 24143 -3 24142 24144 24432 -3 24142 24432 24430 -3 24143 24431 24433 -3 24143 24433 24145 -3 24144 24146 24434 -3 24144 24434 24432 -3 24145 24433 24435 -3 24145 24435 24147 -3 24146 24148 24436 -3 24146 24436 24434 -3 24147 24435 24437 -3 24147 24437 24149 -3 24148 24150 24438 -3 24148 24438 24436 -3 24149 24437 24439 -3 24149 24439 24151 -3 24150 24152 24440 -3 24150 24440 24438 -3 24151 24439 24441 -3 24151 24441 24153 -3 24152 24154 24442 -3 24152 24442 24440 -3 24153 24441 24443 -3 24153 24443 24155 -3 24154 24156 24444 -3 24154 24444 24442 -3 24155 24443 24445 -3 24155 24445 24157 -3 24156 24158 24446 -3 24156 24446 24444 -3 24157 24445 24447 -3 24157 24447 24159 -3 24158 24160 24448 -3 24158 24448 24446 -3 24159 24447 24449 -3 24159 24449 24161 -3 24160 24162 24450 -3 24160 24450 24448 -3 24161 24449 24451 -3 24161 24451 24163 -3 24162 24164 24452 -3 24162 24452 24450 -3 24163 24451 24453 -3 24163 24453 24165 -3 24164 24166 24454 -3 24164 24454 24452 -3 24165 24453 24455 -3 24165 24455 24167 -3 24166 24168 24456 -3 24166 24456 24454 -3 24167 24455 24457 -3 24167 24457 24169 -3 24168 24170 24458 -3 24168 24458 24456 -3 24169 24457 24459 -3 24169 24459 24171 -3 24170 24172 24460 -3 24170 24460 24458 -3 24171 24459 24461 -3 24171 24461 24173 -3 24172 24174 24462 -3 24172 24462 24460 -3 24173 24461 24463 -3 24173 24463 24175 -3 24174 24176 24464 -3 24174 24464 24462 -3 24175 24463 24465 -3 24175 24465 24177 -3 24176 24178 24466 -3 24176 24466 24464 -3 24177 24465 24467 -3 24177 24467 24179 -3 24178 24180 24468 -3 24178 24468 24466 -3 24179 24467 24469 -3 24179 24469 24181 -3 24180 24182 24470 -3 24180 24470 24468 -3 24181 24469 24471 -3 24181 24471 24183 -3 24182 24184 24472 -3 24182 24472 24470 -3 24183 24471 24473 -3 24183 24473 24185 -3 24184 24186 24474 -3 24184 24474 24472 -3 24185 24473 24475 -3 24185 24475 24187 -3 24186 24188 24476 -3 24186 24476 24474 -3 24187 24475 24477 -3 24187 24477 24189 -3 24188 24190 24478 -3 24188 24478 24476 -3 24189 24477 24479 -3 24189 24479 24191 -3 24190 24192 24480 -3 24190 24480 24478 -3 24191 24479 24481 -3 24191 24481 24193 -3 24192 24194 24482 -3 24192 24482 24480 -3 24193 24481 24483 -3 24193 24483 24195 -3 24194 24196 24484 -3 24194 24484 24482 -3 24195 24483 24485 -3 24195 24485 24197 -3 24196 24198 24486 -3 24196 24486 24484 -3 24197 24485 24487 -3 24197 24487 24199 -3 24198 24200 24488 -3 24198 24488 24486 -3 24199 24487 24489 -3 24199 24489 24201 -3 24200 24202 24490 -3 24200 24490 24488 -3 24201 24489 24491 -3 24201 24491 24203 -3 24202 24204 24492 -3 24202 24492 24490 -3 24203 24491 24493 -3 24203 24493 24205 -3 24204 24206 24492 -3 24494 24492 24206 -3 24205 24493 24495 -3 24205 24495 24207 -3 24206 24208 24494 -3 24496 24494 24208 -3 24207 24495 24497 -3 24207 24497 24209 -3 24208 24210 24496 -3 24498 24496 24210 -3 24209 24497 24499 -3 24209 24499 24211 -3 24210 24212 24498 -3 24500 24498 24212 -3 24211 24499 24501 -3 24211 24501 24213 -3 24212 24214 24500 -3 24502 24500 24214 -3 24213 24501 24503 -3 24213 24503 24215 -3 24214 24216 24502 -3 24504 24502 24216 -3 24215 24503 24505 -3 24215 24505 24217 -3 24216 24218 24504 -3 24506 24504 24218 -3 24217 24505 24507 -3 24217 24507 24219 -3 24218 24220 24506 -3 24508 24506 24220 -3 24219 24507 24509 -3 24219 24509 24221 -3 24220 24222 24508 -3 24510 24508 24222 -3 24221 24509 24511 -3 24221 24511 24223 -3 24222 24224 24510 -3 24512 24510 24224 -3 24223 24511 24515 -3 24223 24515 24227 -3 24224 24225 24512 -3 24513 24512 24225 -3 24225 24228 24513 -3 24516 24513 24228 -3 24226 24227 24514 -3 24515 24514 24227 -3 24226 24514 24519 -3 24226 24519 24231 -3 24228 24229 24516 -3 24517 24516 24229 -3 24229 24232 24517 -3 24520 24517 24232 -3 24230 24231 24518 -3 24519 24518 24231 -3 24230 24518 24300 -3 24588 24300 24518 -3 24232 24233 24520 -3 24521 24520 24233 -3 24233 24234 24521 -3 24522 24521 24234 -3 24234 24235 24522 -3 24523 24522 24235 -3 24235 24236 24523 -3 24524 24523 24236 -3 24236 24237 24524 -3 24525 24524 24237 -3 24237 24238 24525 -3 24526 24525 24238 -3 24238 24239 24526 -3 24527 24526 24239 -3 24239 24240 24527 -3 24528 24527 24240 -3 24240 24241 24528 -3 24529 24528 24241 -3 24241 24242 24529 -3 24530 24529 24242 -3 24242 24243 24530 -3 24531 24530 24243 -3 24243 24244 24531 -3 24532 24531 24244 -3 24244 24245 24532 -3 24533 24532 24245 -3 24245 24246 24533 -3 24534 24533 24246 -3 24246 24247 24534 -3 24535 24534 24247 -3 24247 24248 24535 -3 24536 24535 24248 -3 24248 24249 24536 -3 24537 24536 24249 -3 24249 24250 24537 -3 24538 24537 24250 -3 24250 24251 24538 -3 24539 24538 24251 -3 24251 24252 24539 -3 24540 24539 24252 -3 24252 24253 24540 -3 24541 24540 24253 -3 24253 24254 24541 -3 24542 24541 24254 -3 24254 24255 24542 -3 24543 24542 24255 -3 24255 24256 24543 -3 24544 24543 24256 -3 24256 24257 24544 -3 24545 24544 24257 -3 24257 24258 24545 -3 24546 24545 24258 -3 24258 24259 24546 -3 24547 24546 24259 -3 24259 24260 24547 -3 24548 24547 24260 -3 24260 24261 24548 -3 24549 24548 24261 -3 24261 24262 24549 -3 24550 24549 24262 -3 24262 24263 24550 -3 24551 24550 24263 -3 24263 24264 24551 -3 24552 24551 24264 -3 24264 24265 24552 -3 24553 24552 24265 -3 24265 24266 24553 -3 24554 24553 24266 -3 24266 24267 24554 -3 24555 24554 24267 -3 24267 24268 24555 -3 24556 24555 24268 -3 24268 24269 24556 -3 24557 24556 24269 -3 24269 24270 24557 -3 24558 24557 24270 -3 24270 24271 24558 -3 24559 24558 24271 -3 24271 24272 24559 -3 24560 24559 24272 -3 24272 24273 24560 -3 24561 24560 24273 -3 24273 24274 24561 -3 24562 24561 24274 -3 24274 24275 24562 -3 24563 24562 24275 -3 24275 24276 24563 -3 24564 24563 24276 -3 24276 24277 24564 -3 24565 24564 24277 -3 24277 24278 24565 -3 24566 24565 24278 -3 24278 24279 24566 -3 24567 24566 24279 -3 24279 24280 24567 -3 24568 24567 24280 -3 24280 24281 24568 -3 24569 24568 24281 -3 24281 24282 24569 -3 24570 24569 24282 -3 24282 24283 24570 -3 24571 24570 24283 -3 24283 24284 24571 -3 24572 24571 24284 -3 24284 24285 24572 -3 24573 24572 24285 -3 24285 24286 24573 -3 24574 24573 24286 -3 24286 24287 24574 -3 24575 24574 24287 -3 24287 24288 24575 -3 24576 24575 24288 -3 24288 24289 24576 -3 24577 24576 24289 -3 24289 24290 24577 -3 24578 24577 24290 -3 24290 24291 24578 -3 24579 24578 24291 -3 24291 24292 24579 -3 24580 24579 24292 -3 24292 24293 24580 -3 24581 24580 24293 -3 24293 24294 24581 -3 24582 24581 24294 -3 24294 24295 24582 -3 24583 24582 24295 -3 24295 24296 24583 -3 24584 24583 24296 -3 24296 24297 24584 -3 24585 24584 24297 -3 24297 24298 24585 -3 24586 24585 24298 -3 24298 24299 24586 -3 24587 24586 24299 -3 24299 24300 24588 -3 24299 24588 24587 -3 24301 24589 24302 -3 24590 24302 24589 -3 24301 24371 24659 -3 24301 24659 24589 -3 24302 24590 24303 -3 24591 24303 24590 -3 24303 24591 24304 -3 24592 24304 24591 -3 24304 24592 24305 -3 24593 24305 24592 -3 24305 24593 24306 -3 24594 24306 24593 -3 24306 24594 24307 -3 24595 24307 24594 -3 24307 24595 24308 -3 24596 24308 24595 -3 24308 24596 24309 -3 24597 24309 24596 -3 24309 24597 24310 -3 24598 24310 24597 -3 24310 24598 24311 -3 24599 24311 24598 -3 24311 24599 24312 -3 24600 24312 24599 -3 24312 24600 24313 -3 24601 24313 24600 -3 24313 24601 24314 -3 24602 24314 24601 -3 24314 24602 24315 -3 24603 24315 24602 -3 24315 24603 24316 -3 24604 24316 24603 -3 24316 24604 24317 -3 24605 24317 24604 -3 24317 24605 24318 -3 24606 24318 24605 -3 24318 24606 24319 -3 24607 24319 24606 -3 24319 24607 24320 -3 24608 24320 24607 -3 24320 24608 24321 -3 24609 24321 24608 -3 24321 24609 24322 -3 24610 24322 24609 -3 24322 24610 24323 -3 24611 24323 24610 -3 24323 24611 24324 -3 24612 24324 24611 -3 24324 24612 24325 -3 24613 24325 24612 -3 24325 24613 24326 -3 24614 24326 24613 -3 24326 24614 24327 -3 24615 24327 24614 -3 24327 24615 24616 -3 24327 24616 24328 -3 24328 24616 24617 -3 24328 24617 24329 -3 24329 24617 24618 -3 24329 24618 24330 -3 24330 24618 24619 -3 24330 24619 24331 -3 24331 24619 24620 -3 24331 24620 24332 -3 24332 24620 24621 -3 24332 24621 24333 -3 24333 24621 24622 -3 24333 24622 24334 -3 24334 24622 24623 -3 24334 24623 24335 -3 24335 24623 24624 -3 24335 24624 24336 -3 24336 24624 24625 -3 24336 24625 24337 -3 24337 24625 24626 -3 24337 24626 24338 -3 24338 24626 24627 -3 24338 24627 24339 -3 24339 24627 24628 -3 24339 24628 24340 -3 24340 24628 24629 -3 24340 24629 24341 -3 24341 24629 24630 -3 24341 24630 24342 -3 24342 24630 24631 -3 24342 24631 24343 -3 24343 24631 24632 -3 24343 24632 24344 -3 24344 24632 24633 -3 24344 24633 24345 -3 24345 24633 24634 -3 24345 24634 24346 -3 24346 24634 24635 -3 24346 24635 24347 -3 24347 24635 24636 -3 24347 24636 24348 -3 24348 24636 24637 -3 24348 24637 24349 -3 24349 24637 24638 -3 24349 24638 24350 -3 24350 24638 24639 -3 24350 24639 24351 -3 24351 24639 24640 -3 24351 24640 24352 -3 24352 24640 24641 -3 24352 24641 24353 -3 24353 24641 24642 -3 24353 24642 24354 -3 24354 24642 24643 -3 24354 24643 24355 -3 24355 24643 24644 -3 24355 24644 24356 -3 24356 24644 24645 -3 24356 24645 24357 -3 24357 24645 24646 -3 24357 24646 24358 -3 24358 24646 24647 -3 24358 24647 24359 -3 24359 24647 24648 -3 24359 24648 24360 -3 24360 24648 24649 -3 24360 24649 24361 -3 24361 24649 24650 -3 24361 24650 24362 -3 24362 24650 24651 -3 24362 24651 24363 -3 24363 24651 24652 -3 24363 24652 24364 -3 24364 24652 24653 -3 24364 24653 24365 -3 24365 24653 24654 -3 24365 24654 24366 -3 24366 24654 24655 -3 24366 24655 24367 -3 24367 24655 24656 -3 24367 24656 24368 -3 24368 24656 24657 -3 24368 24657 24369 -3 24369 24657 24662 -3 24369 24662 24372 -3 24370 24658 24659 -3 24370 24659 24371 -3 24370 24375 24665 -3 24370 24665 24658 -3 24372 24662 24663 -3 24372 24663 24373 -3 24373 24663 24666 -3 24373 24666 24376 -3 24374 24664 24665 -3 24374 24665 24375 -3 24374 24378 24668 -3 24374 24668 24664 -3 24376 24666 24667 -3 24376 24667 24377 -3 24377 24667 24671 -3 24377 24671 24379 -3 24378 24380 24672 -3 24378 24672 24668 -3 24379 24671 24673 -3 24379 24673 24381 -3 24380 24382 24674 -3 24380 24674 24672 -3 24381 24673 24675 -3 24381 24675 24383 -3 24382 24384 24676 -3 24382 24676 24674 -3 24383 24675 24677 -3 24383 24677 24385 -3 24384 24386 24678 -3 24384 24678 24676 -3 24385 24677 24679 -3 24385 24679 24387 -3 24386 24388 24680 -3 24386 24680 24678 -3 24387 24679 24681 -3 24387 24681 24389 -3 24388 24390 24682 -3 24388 24682 24680 -3 24389 24681 24683 -3 24389 24683 24391 -3 24390 24392 24684 -3 24390 24684 24682 -3 24391 24683 24685 -3 24391 24685 24393 -3 24392 24394 24686 -3 24392 24686 24684 -3 24393 24685 24687 -3 24393 24687 24395 -3 24394 24396 24688 -3 24394 24688 24686 -3 24395 24687 24689 -3 24395 24689 24397 -3 24396 24398 24688 -3 24690 24688 24398 -3 24397 24689 24691 -3 24397 24691 24399 -3 24398 24400 24690 -3 24692 24690 24400 -3 24399 24691 24693 -3 24399 24693 24401 -3 24400 24402 24692 -3 24694 24692 24402 -3 24401 24693 24695 -3 24401 24695 24403 -3 24402 24404 24694 -3 24696 24694 24404 -3 24403 24695 24697 -3 24403 24697 24405 -3 24404 24406 24696 -3 24698 24696 24406 -3 24405 24697 24699 -3 24405 24699 24407 -3 24406 24408 24698 -3 24700 24698 24408 -3 24407 24699 24701 -3 24407 24701 24409 -3 24408 24410 24700 -3 24702 24700 24410 -3 24409 24701 24703 -3 24409 24703 24411 -3 24410 24412 24702 -3 24704 24702 24412 -3 24411 24703 24705 -3 24411 24705 24413 -3 24412 24414 24704 -3 24706 24704 24414 -3 24413 24705 24707 -3 24413 24707 24415 -3 24414 24416 24706 -3 24708 24706 24416 -3 24415 24707 24709 -3 24415 24709 24417 -3 24416 24418 24708 -3 24710 24708 24418 -3 24417 24709 24711 -3 24417 24711 24419 -3 24418 24420 24710 -3 24712 24710 24420 -3 24419 24711 24713 -3 24419 24713 24421 -3 24420 24422 24712 -3 24714 24712 24422 -3 24421 24713 24423 -3 24715 24423 24713 -3 24422 24424 24714 -3 24716 24714 24424 -3 24423 24715 24425 -3 24717 24425 24715 -3 24424 24426 24716 -3 24718 24716 24426 -3 24425 24717 24427 -3 24719 24427 24717 -3 24426 24428 24718 -3 24720 24718 24428 -3 24427 24719 24429 -3 24721 24429 24719 -3 24428 24430 24720 -3 24722 24720 24430 -3 24429 24721 24431 -3 24723 24431 24721 -3 24430 24432 24722 -3 24724 24722 24432 -3 24431 24723 24433 -3 24725 24433 24723 -3 24432 24434 24724 -3 24726 24724 24434 -3 24433 24725 24435 -3 24727 24435 24725 -3 24434 24436 24726 -3 24728 24726 24436 -3 24435 24727 24437 -3 24729 24437 24727 -3 24436 24438 24728 -3 24730 24728 24438 -3 24437 24729 24439 -3 24731 24439 24729 -3 24438 24440 24730 -3 24732 24730 24440 -3 24439 24731 24441 -3 24733 24441 24731 -3 24440 24442 24732 -3 24734 24732 24442 -3 24441 24733 24443 -3 24735 24443 24733 -3 24442 24444 24734 -3 24736 24734 24444 -3 24443 24735 24445 -3 24737 24445 24735 -3 24444 24446 24736 -3 24738 24736 24446 -3 24445 24737 24447 -3 24739 24447 24737 -3 24446 24448 24738 -3 24740 24738 24448 -3 24447 24739 24449 -3 24741 24449 24739 -3 24448 24450 24740 -3 24742 24740 24450 -3 24449 24741 24451 -3 24743 24451 24741 -3 24450 24452 24742 -3 24744 24742 24452 -3 24451 24743 24453 -3 24745 24453 24743 -3 24452 24454 24744 -3 24746 24744 24454 -3 24453 24745 24455 -3 24747 24455 24745 -3 24454 24456 24746 -3 24748 24746 24456 -3 24455 24747 24457 -3 24749 24457 24747 -3 24456 24458 24748 -3 24750 24748 24458 -3 24457 24749 24459 -3 24751 24459 24749 -3 24458 24460 24750 -3 24752 24750 24460 -3 24459 24751 24461 -3 24753 24461 24751 -3 24460 24462 24752 -3 24754 24752 24462 -3 24461 24753 24463 -3 24755 24463 24753 -3 24462 24464 24754 -3 24756 24754 24464 -3 24463 24755 24465 -3 24757 24465 24755 -3 24464 24466 24756 -3 24758 24756 24466 -3 24465 24757 24467 -3 24759 24467 24757 -3 24466 24468 24758 -3 24760 24758 24468 -3 24467 24759 24469 -3 24761 24469 24759 -3 24468 24470 24760 -3 24762 24760 24470 -3 24469 24761 24471 -3 24763 24471 24761 -3 24470 24472 24762 -3 24764 24762 24472 -3 24471 24763 24473 -3 24765 24473 24763 -3 24472 24474 24764 -3 24766 24764 24474 -3 24473 24765 24475 -3 24767 24475 24765 -3 24474 24476 24766 -3 24768 24766 24476 -3 24475 24767 24477 -3 24769 24477 24767 -3 24476 24478 24768 -3 24770 24768 24478 -3 24477 24769 24479 -3 24771 24479 24769 -3 24478 24480 24770 -3 24772 24770 24480 -3 24479 24771 24481 -3 24773 24481 24771 -3 24480 24482 24772 -3 24774 24772 24482 -3 24481 24773 24483 -3 24775 24483 24773 -3 24482 24484 24774 -3 24776 24774 24484 -3 24483 24775 24485 -3 24777 24485 24775 -3 24484 24486 24776 -3 24778 24776 24486 -3 24485 24777 24487 -3 24779 24487 24777 -3 24486 24488 24778 -3 24780 24778 24488 -3 24487 24779 24489 -3 24781 24489 24779 -3 24488 24490 24780 -3 24782 24780 24490 -3 24489 24781 24491 -3 24783 24491 24781 -3 24490 24492 24782 -3 24784 24782 24492 -3 24491 24783 24493 -3 24785 24493 24783 -3 24492 24494 24786 -3 24492 24786 24784 -3 24493 24785 24495 -3 24787 24495 24785 -3 24494 24496 24788 -3 24494 24788 24786 -3 24495 24787 24497 -3 24789 24497 24787 -3 24496 24498 24790 -3 24496 24790 24788 -3 24497 24789 24499 -3 24791 24499 24789 -3 24498 24500 24792 -3 24498 24792 24790 -3 24499 24791 24501 -3 24793 24501 24791 -3 24500 24502 24794 -3 24500 24794 24792 -3 24501 24793 24503 -3 24795 24503 24793 -3 24502 24504 24796 -3 24502 24796 24794 -3 24503 24795 24505 -3 24797 24505 24795 -3 24504 24506 24798 -3 24504 24798 24796 -3 24505 24797 24507 -3 24799 24507 24797 -3 24506 24508 24800 -3 24506 24800 24798 -3 24507 24799 24509 -3 24801 24509 24799 -3 24508 24510 24802 -3 24508 24802 24800 -3 24509 24801 24511 -3 24805 24511 24801 -3 24510 24512 24806 -3 24510 24806 24802 -3 24511 24805 24515 -3 24809 24515 24805 -3 24512 24513 24807 -3 24512 24807 24806 -3 24513 24516 24810 -3 24513 24810 24807 -3 24514 24515 24809 -3 24514 24809 24808 -3 24514 24808 24519 -3 24815 24519 24808 -3 24516 24517 24811 -3 24516 24811 24810 -3 24517 24520 24816 -3 24517 24816 24811 -3 24518 24519 24815 -3 24518 24815 24814 -3 24518 24814 24884 -3 24518 24884 24588 -3 24520 24521 24817 -3 24520 24817 24816 -3 24521 24522 24818 -3 24521 24818 24817 -3 24522 24523 24819 -3 24522 24819 24818 -3 24523 24524 24820 -3 24523 24820 24819 -3 24524 24525 24821 -3 24524 24821 24820 -3 24525 24526 24822 -3 24525 24822 24821 -3 24526 24527 24823 -3 24526 24823 24822 -3 24527 24528 24824 -3 24527 24824 24823 -3 24528 24529 24825 -3 24528 24825 24824 -3 24529 24530 24826 -3 24529 24826 24825 -3 24530 24531 24827 -3 24530 24827 24826 -3 24531 24532 24828 -3 24531 24828 24827 -3 24532 24533 24829 -3 24532 24829 24828 -3 24533 24534 24830 -3 24533 24830 24829 -3 24534 24535 24831 -3 24534 24831 24830 -3 24535 24536 24832 -3 24535 24832 24831 -3 24536 24537 24833 -3 24536 24833 24832 -3 24537 24538 24834 -3 24537 24834 24833 -3 24538 24539 24835 -3 24538 24835 24834 -3 24539 24540 24836 -3 24539 24836 24835 -3 24540 24541 24837 -3 24540 24837 24836 -3 24541 24542 24838 -3 24541 24838 24837 -3 24542 24543 24839 -3 24542 24839 24838 -3 24543 24544 24840 -3 24543 24840 24839 -3 24544 24545 24841 -3 24544 24841 24840 -3 24545 24546 24842 -3 24545 24842 24841 -3 24546 24547 24843 -3 24546 24843 24842 -3 24547 24548 24844 -3 24547 24844 24843 -3 24548 24549 24845 -3 24548 24845 24844 -3 24549 24550 24846 -3 24549 24846 24845 -3 24550 24551 24847 -3 24550 24847 24846 -3 24551 24552 24848 -3 24551 24848 24847 -3 24552 24553 24849 -3 24552 24849 24848 -3 24553 24554 24850 -3 24553 24850 24849 -3 24554 24555 24851 -3 24554 24851 24850 -3 24555 24556 24852 -3 24555 24852 24851 -3 24556 24557 24853 -3 24556 24853 24852 -3 24557 24558 24854 -3 24557 24854 24853 -3 24558 24559 24855 -3 24558 24855 24854 -3 24559 24560 24856 -3 24559 24856 24855 -3 24560 24561 24857 -3 24560 24857 24856 -3 24561 24562 24858 -3 24561 24858 24857 -3 24562 24563 24859 -3 24562 24859 24858 -3 24563 24564 24860 -3 24563 24860 24859 -3 24564 24565 24861 -3 24564 24861 24860 -3 24565 24566 24862 -3 24565 24862 24861 -3 24566 24567 24863 -3 24566 24863 24862 -3 24567 24568 24864 -3 24567 24864 24863 -3 24568 24569 24865 -3 24568 24865 24864 -3 24569 24570 24866 -3 24569 24866 24865 -3 24570 24571 24867 -3 24570 24867 24866 -3 24571 24572 24868 -3 24571 24868 24867 -3 24572 24573 24869 -3 24572 24869 24868 -3 24573 24574 24870 -3 24573 24870 24869 -3 24574 24575 24871 -3 24574 24871 24870 -3 24575 24576 24872 -3 24575 24872 24871 -3 24576 24577 24873 -3 24576 24873 24872 -3 24577 24578 24874 -3 24577 24874 24873 -3 24578 24579 24875 -3 24578 24875 24874 -3 24579 24580 24876 -3 24579 24876 24875 -3 24580 24581 24877 -3 24580 24877 24876 -3 24581 24582 24878 -3 24581 24878 24877 -3 24582 24583 24879 -3 24582 24879 24878 -3 24583 24584 24880 -3 24583 24880 24879 -3 24584 24585 24881 -3 24584 24881 24880 -3 24585 24586 24882 -3 24585 24882 24881 -3 24586 24587 24883 -3 24586 24883 24882 -3 24587 24588 24884 -3 24587 24884 24883 -3 24589 24659 24590 -3 24660 24590 24659 -3 24590 24885 24886 -3 24590 24886 24591 -3 24590 24660 24885 -3 24954 24885 24660 -3 24591 24886 24887 -3 24591 24887 24592 -3 24592 24887 24888 -3 24592 24888 24593 -3 24593 24888 24889 -3 24593 24889 24594 -3 24594 24889 24890 -3 24594 24890 24595 -3 24595 24890 24891 -3 24595 24891 24596 -3 24596 24891 24892 -3 24596 24892 24597 -3 24597 24892 24893 -3 24597 24893 24598 -3 24598 24893 24894 -3 24598 24894 24599 -3 24599 24894 24895 -3 24599 24895 24600 -3 24600 24895 24896 -3 24600 24896 24601 -3 24601 24896 24897 -3 24601 24897 24602 -3 24602 24897 24898 -3 24602 24898 24603 -3 24603 24898 24899 -3 24603 24899 24604 -3 24604 24899 24900 -3 24604 24900 24605 -3 24605 24900 24901 -3 24605 24901 24606 -3 24606 24901 24902 -3 24606 24902 24607 -3 24607 24902 24903 -3 24607 24903 24608 -3 24608 24903 24904 -3 24608 24904 24609 -3 24609 24904 24905 -3 24609 24905 24610 -3 24610 24905 24906 -3 24610 24906 24611 -3 24611 24906 24907 -3 24611 24907 24612 -3 24612 24907 24613 -3 24908 24613 24907 -3 24613 24908 24614 -3 24909 24614 24908 -3 24614 24909 24615 -3 24910 24615 24909 -3 24615 24910 24616 -3 24911 24616 24910 -3 24616 24911 24617 -3 24912 24617 24911 -3 24617 24912 24618 -3 24913 24618 24912 -3 24618 24913 24619 -3 24914 24619 24913 -3 24619 24914 24620 -3 24915 24620 24914 -3 24620 24915 24621 -3 24916 24621 24915 -3 24621 24916 24622 -3 24917 24622 24916 -3 24622 24917 24623 -3 24918 24623 24917 -3 24623 24918 24624 -3 24919 24624 24918 -3 24624 24919 24625 -3 24920 24625 24919 -3 24625 24920 24626 -3 24921 24626 24920 -3 24626 24921 24627 -3 24922 24627 24921 -3 24627 24922 24628 -3 24923 24628 24922 -3 24628 24923 24629 -3 24924 24629 24923 -3 24629 24924 24630 -3 24925 24630 24924 -3 24630 24925 24631 -3 24926 24631 24925 -3 24631 24926 24632 -3 24927 24632 24926 -3 24632 24927 24633 -3 24928 24633 24927 -3 24633 24928 24634 -3 24929 24634 24928 -3 24634 24929 24635 -3 24930 24635 24929 -3 24635 24930 24636 -3 24931 24636 24930 -3 24636 24931 24637 -3 24932 24637 24931 -3 24637 24932 24638 -3 24933 24638 24932 -3 24638 24933 24639 -3 24934 24639 24933 -3 24639 24934 24640 -3 24935 24640 24934 -3 24640 24935 24641 -3 24936 24641 24935 -3 24641 24936 24642 -3 24937 24642 24936 -3 24642 24937 24643 -3 24938 24643 24937 -3 24643 24938 24644 -3 24939 24644 24938 -3 24644 24939 24645 -3 24940 24645 24939 -3 24645 24940 24646 -3 24941 24646 24940 -3 24646 24941 24647 -3 24942 24647 24941 -3 24647 24942 24648 -3 24943 24648 24942 -3 24648 24943 24649 -3 24944 24649 24943 -3 24649 24944 24650 -3 24945 24650 24944 -3 24650 24945 24651 -3 24946 24651 24945 -3 24651 24946 24652 -3 24947 24652 24946 -3 24652 24947 24653 -3 24948 24653 24947 -3 24653 24948 24654 -3 24949 24654 24948 -3 24654 24949 24655 -3 24950 24655 24949 -3 24655 24950 24656 -3 24951 24656 24950 -3 24656 24661 24657 -3 24662 24657 24661 -3 24656 24951 24661 -3 24957 24661 24951 -3 24658 24952 24659 -3 24953 24659 24952 -3 24658 24665 24952 -3 24960 24952 24665 -3 24659 24953 24660 -3 24954 24660 24953 -3 24661 24957 24662 -3 24958 24662 24957 -3 24662 24958 24663 -3 24959 24663 24958 -3 24663 24959 24666 -3 24963 24666 24959 -3 24664 24668 24665 -3 24669 24665 24668 -3 24665 24669 24960 -3 24965 24960 24669 -3 24666 24670 24667 -3 24671 24667 24670 -3 24666 24963 24670 -3 24966 24670 24963 -3 24668 24964 24669 -3 24965 24669 24964 -3 24668 24672 24964 -3 24968 24964 24672 -3 24670 24966 24671 -3 24967 24671 24966 -3 24671 24967 24673 -3 24971 24673 24967 -3 24672 24674 24968 -3 24972 24968 24674 -3 24673 24971 24675 -3 24973 24675 24971 -3 24674 24676 24972 -3 24974 24972 24676 -3 24675 24973 24677 -3 24975 24677 24973 -3 24676 24678 24974 -3 24976 24974 24678 -3 24677 24975 24679 -3 24977 24679 24975 -3 24678 24680 24976 -3 24978 24976 24680 -3 24679 24977 24681 -3 24979 24681 24977 -3 24680 24682 24978 -3 24980 24978 24682 -3 24681 24979 24683 -3 24981 24683 24979 -3 24682 24684 24980 -3 24982 24980 24684 -3 24683 24981 24685 -3 24983 24685 24981 -3 24684 24686 24984 -3 24684 24984 24982 -3 24685 24983 24687 -3 24985 24687 24983 -3 24686 24688 24986 -3 24686 24986 24984 -3 24687 24985 24689 -3 24987 24689 24985 -3 24688 24690 24988 -3 24688 24988 24986 -3 24689 24987 24691 -3 24989 24691 24987 -3 24690 24692 24990 -3 24690 24990 24988 -3 24691 24989 24693 -3 24991 24693 24989 -3 24692 24694 24992 -3 24692 24992 24990 -3 24693 24991 24695 -3 24993 24695 24991 -3 24694 24696 24994 -3 24694 24994 24992 -3 24695 24993 24697 -3 24995 24697 24993 -3 24696 24698 24996 -3 24696 24996 24994 -3 24697 24995 24699 -3 24997 24699 24995 -3 24698 24700 24998 -3 24698 24998 24996 -3 24699 24997 24701 -3 24999 24701 24997 -3 24700 24702 25000 -3 24700 25000 24998 -3 24701 24999 24703 -3 25001 24703 24999 -3 24702 24704 25002 -3 24702 25002 25000 -3 24703 25001 24705 -3 25003 24705 25001 -3 24704 24706 25004 -3 24704 25004 25002 -3 24705 25003 24707 -3 25005 24707 25003 -3 24706 24708 25006 -3 24706 25006 25004 -3 24707 25005 24709 -3 25007 24709 25005 -3 24708 24710 25008 -3 24708 25008 25006 -3 24709 25007 25009 -3 24709 25009 24711 -3 24710 24712 25010 -3 24710 25010 25008 -3 24711 25009 25011 -3 24711 25011 24713 -3 24712 24714 25012 -3 24712 25012 25010 -3 24713 25011 25013 -3 24713 25013 24715 -3 24714 24716 25014 -3 24714 25014 25012 -3 24715 25013 25015 -3 24715 25015 24717 -3 24716 24718 25016 -3 24716 25016 25014 -3 24717 25015 25017 -3 24717 25017 24719 -3 24718 24720 25018 -3 24718 25018 25016 -3 24719 25017 25019 -3 24719 25019 24721 -3 24720 24722 25020 -3 24720 25020 25018 -3 24721 25019 25021 -3 24721 25021 24723 -3 24722 24724 25022 -3 24722 25022 25020 -3 24723 25021 25023 -3 24723 25023 24725 -3 24724 24726 25024 -3 24724 25024 25022 -3 24725 25023 25025 -3 24725 25025 24727 -3 24726 24728 25026 -3 24726 25026 25024 -3 24727 25025 25027 -3 24727 25027 24729 -3 24728 24730 25028 -3 24728 25028 25026 -3 24729 25027 25029 -3 24729 25029 24731 -3 24730 24732 25030 -3 24730 25030 25028 -3 24731 25029 25031 -3 24731 25031 24733 -3 24732 24734 25032 -3 24732 25032 25030 -3 24733 25031 25033 -3 24733 25033 24735 -3 24734 24736 25034 -3 24734 25034 25032 -3 24735 25033 25035 -3 24735 25035 24737 -3 24736 24738 25036 -3 24736 25036 25034 -3 24737 25035 25037 -3 24737 25037 24739 -3 24738 24740 25038 -3 24738 25038 25036 -3 24739 25037 25039 -3 24739 25039 24741 -3 24740 24742 25040 -3 24740 25040 25038 -3 24741 25039 25041 -3 24741 25041 24743 -3 24742 24744 25042 -3 24742 25042 25040 -3 24743 25041 25043 -3 24743 25043 24745 -3 24744 24746 25044 -3 24744 25044 25042 -3 24745 25043 25045 -3 24745 25045 24747 -3 24746 24748 25046 -3 24746 25046 25044 -3 24747 25045 25047 -3 24747 25047 24749 -3 24748 24750 25048 -3 24748 25048 25046 -3 24749 25047 25049 -3 24749 25049 24751 -3 24750 24752 25050 -3 24750 25050 25048 -3 24751 25049 25051 -3 24751 25051 24753 -3 24752 24754 25052 -3 24752 25052 25050 -3 24753 25051 25053 -3 24753 25053 24755 -3 24754 24756 25054 -3 24754 25054 25052 -3 24755 25053 25055 -3 24755 25055 24757 -3 24756 24758 25056 -3 24756 25056 25054 -3 24757 25055 25057 -3 24757 25057 24759 -3 24758 24760 25058 -3 24758 25058 25056 -3 24759 25057 25059 -3 24759 25059 24761 -3 24760 24762 25060 -3 24760 25060 25058 -3 24761 25059 25061 -3 24761 25061 24763 -3 24762 24764 25062 -3 24762 25062 25060 -3 24763 25061 25063 -3 24763 25063 24765 -3 24764 24766 25064 -3 24764 25064 25062 -3 24765 25063 25065 -3 24765 25065 24767 -3 24766 24768 25066 -3 24766 25066 25064 -3 24767 25065 25067 -3 24767 25067 24769 -3 24768 24770 25068 -3 24768 25068 25066 -3 24769 25067 25069 -3 24769 25069 24771 -3 24770 24772 25070 -3 24770 25070 25068 -3 24771 25069 25071 -3 24771 25071 24773 -3 24772 24774 25072 -3 24772 25072 25070 -3 24773 25071 25073 -3 24773 25073 24775 -3 24774 24776 25074 -3 24774 25074 25072 -3 24775 25073 25075 -3 24775 25075 24777 -3 24776 24778 25076 -3 24776 25076 25074 -3 24777 25075 25077 -3 24777 25077 24779 -3 24778 24780 25078 -3 24778 25078 25076 -3 24779 25077 25079 -3 24779 25079 24781 -3 24780 24782 25080 -3 24780 25080 25078 -3 24781 25079 25081 -3 24781 25081 24783 -3 24782 24784 25080 -3 25082 25080 24784 -3 24783 25081 25083 -3 24783 25083 24785 -3 24784 24786 25082 -3 25084 25082 24786 -3 24785 25083 25085 -3 24785 25085 24787 -3 24786 24788 25084 -3 25086 25084 24788 -3 24787 25085 25087 -3 24787 25087 24789 -3 24788 24790 25086 -3 25088 25086 24790 -3 24789 25087 25089 -3 24789 25089 24791 -3 24790 24792 25088 -3 25090 25088 24792 -3 24791 25089 25091 -3 24791 25091 24793 -3 24792 24794 25090 -3 25092 25090 24794 -3 24793 25091 25093 -3 24793 25093 24795 -3 24794 24796 25092 -3 25094 25092 24796 -3 24795 25093 25095 -3 24795 25095 24797 -3 24796 24798 25094 -3 25096 25094 24798 -3 24797 25095 25097 -3 24797 25097 24799 -3 24798 24800 25096 -3 25098 25096 24800 -3 24799 25097 25101 -3 24799 25101 24801 -3 24800 24802 25098 -3 25102 25098 24802 -3 24801 25101 25105 -3 24801 25105 24805 -3 24802 24806 24803 -3 24807 24803 24806 -3 24802 24803 25102 -3 25103 25102 24803 -3 24803 24807 25103 -3 25106 25103 24807 -3 24804 24808 24805 -3 24809 24805 24808 -3 24804 24805 25104 -3 25105 25104 24805 -3 24804 25104 24808 -3 25109 24808 25104 -3 24807 24810 25106 -3 25110 25106 24810 -3 24808 25109 24815 -3 25117 24815 25109 -3 24810 24811 25110 -3 25111 25110 24811 -3 24811 24816 24812 -3 24817 24812 24816 -3 24811 24812 25111 -3 25112 25111 24812 -3 24812 24817 25112 -3 25118 25112 24817 -3 24813 24883 24814 -3 24884 24814 24883 -3 24813 24814 25115 -3 25116 25115 24814 -3 24813 25115 24883 -3 25184 24883 25115 -3 24814 24815 25116 -3 25117 25116 24815 -3 24817 24818 25118 -3 25119 25118 24818 -3 24818 24819 25119 -3 25120 25119 24819 -3 24819 24820 25120 -3 25121 25120 24820 -3 24820 24821 25121 -3 25122 25121 24821 -3 24821 24822 25122 -3 25123 25122 24822 -3 24822 24823 25123 -3 25124 25123 24823 -3 24823 24824 25124 -3 25125 25124 24824 -3 24824 24825 25125 -3 25126 25125 24825 -3 24825 24826 25126 -3 25127 25126 24826 -3 24826 24827 25127 -3 25128 25127 24827 -3 24827 24828 25128 -3 25129 25128 24828 -3 24828 24829 25129 -3 25130 25129 24829 -3 24829 24830 25130 -3 25131 25130 24830 -3 24830 24831 25131 -3 25132 25131 24831 -3 24831 24832 25132 -3 25133 25132 24832 -3 24832 24833 25133 -3 25134 25133 24833 -3 24833 24834 25134 -3 25135 25134 24834 -3 24834 24835 25135 -3 25136 25135 24835 -3 24835 24836 25136 -3 25137 25136 24836 -3 24836 24837 25137 -3 25138 25137 24837 -3 24837 24838 25138 -3 25139 25138 24838 -3 24838 24839 25139 -3 25140 25139 24839 -3 24839 24840 25140 -3 25141 25140 24840 -3 24840 24841 25141 -3 25142 25141 24841 -3 24841 24842 25142 -3 25143 25142 24842 -3 24842 24843 25143 -3 25144 25143 24843 -3 24843 24844 25144 -3 25145 25144 24844 -3 24844 24845 25145 -3 25146 25145 24845 -3 24845 24846 25146 -3 25147 25146 24846 -3 24846 24847 25147 -3 25148 25147 24847 -3 24847 24848 25148 -3 25149 25148 24848 -3 24848 24849 25149 -3 25150 25149 24849 -3 24849 24850 25150 -3 25151 25150 24850 -3 24850 24851 25151 -3 25152 25151 24851 -3 24851 24852 25152 -3 25153 25152 24852 -3 24852 24853 25153 -3 25154 25153 24853 -3 24853 24854 25154 -3 25155 25154 24854 -3 24854 24855 25155 -3 25156 25155 24855 -3 24855 24856 25156 -3 25157 25156 24856 -3 24856 24857 25157 -3 25158 25157 24857 -3 24857 24858 25158 -3 25159 25158 24858 -3 24858 24859 25159 -3 25160 25159 24859 -3 24859 24860 25160 -3 25161 25160 24860 -3 24860 24861 25161 -3 25162 25161 24861 -3 24861 24862 25162 -3 25163 25162 24862 -3 24862 24863 25163 -3 25164 25163 24863 -3 24863 24864 25164 -3 25165 25164 24864 -3 24864 24865 25165 -3 25166 25165 24865 -3 24865 24866 25166 -3 25167 25166 24866 -3 24866 24867 25167 -3 25168 25167 24867 -3 24867 24868 25168 -3 25169 25168 24868 -3 24868 24869 25169 -3 25170 25169 24869 -3 24869 24870 25170 -3 25171 25170 24870 -3 24870 24871 25171 -3 25172 25171 24871 -3 24871 24872 25172 -3 25173 25172 24872 -3 24872 24873 25173 -3 25174 25173 24873 -3 24873 24874 25174 -3 25175 25174 24874 -3 24874 24875 25175 -3 25176 25175 24875 -3 24875 24876 25176 -3 25177 25176 24876 -3 24876 24877 25177 -3 25178 25177 24877 -3 24877 24878 25178 -3 25179 25178 24878 -3 24878 24879 25179 -3 25180 25179 24879 -3 24879 24880 25181 -3 24879 25181 25180 -3 24880 24881 25182 -3 24880 25182 25181 -3 24881 24882 25183 -3 24881 25183 25182 -3 24882 24883 25184 -3 24882 25184 25183 -3 24885 24954 24955 -3 24885 24955 24886 -3 24886 25185 24887 -3 25186 24887 25185 -3 24886 24955 25252 -3 24886 25252 25185 -3 24887 25186 24888 -3 25187 24888 25186 -3 24888 25187 24889 -3 25188 24889 25187 -3 24889 25188 24890 -3 25189 24890 25188 -3 24890 25189 24891 -3 25190 24891 25189 -3 24891 25190 24892 -3 25191 24892 25190 -3 24892 25191 24893 -3 25192 24893 25191 -3 24893 25192 24894 -3 25193 24894 25192 -3 24894 25193 24895 -3 25194 24895 25193 -3 24895 25194 24896 -3 25195 24896 25194 -3 24896 25195 24897 -3 25196 24897 25195 -3 24897 25196 24898 -3 25197 24898 25196 -3 24898 25197 24899 -3 25198 24899 25197 -3 24899 25198 24900 -3 25199 24900 25198 -3 24900 25199 24901 -3 25200 24901 25199 -3 24901 25200 24902 -3 25201 24902 25200 -3 24902 25201 25202 -3 24902 25202 24903 -3 24903 25202 25203 -3 24903 25203 24904 -3 24904 25203 25204 -3 24904 25204 24905 -3 24905 25204 25205 -3 24905 25205 24906 -3 24906 25205 25206 -3 24906 25206 24907 -3 24907 25206 25207 -3 24907 25207 24908 -3 24908 25207 25208 -3 24908 25208 24909 -3 24909 25208 25209 -3 24909 25209 24910 -3 24910 25209 25210 -3 24910 25210 24911 -3 24911 25210 25211 -3 24911 25211 24912 -3 24912 25211 25212 -3 24912 25212 24913 -3 24913 25212 25213 -3 24913 25213 24914 -3 24914 25213 25214 -3 24914 25214 24915 -3 24915 25214 25215 -3 24915 25215 24916 -3 24916 25215 25216 -3 24916 25216 24917 -3 24917 25216 25217 -3 24917 25217 24918 -3 24918 25217 25218 -3 24918 25218 24919 -3 24919 25218 25219 -3 24919 25219 24920 -3 24920 25219 25220 -3 24920 25220 24921 -3 24921 25220 25221 -3 24921 25221 24922 -3 24922 25221 25222 -3 24922 25222 24923 -3 24923 25222 25223 -3 24923 25223 24924 -3 24924 25223 25224 -3 24924 25224 24925 -3 24925 25224 25225 -3 24925 25225 24926 -3 24926 25225 25226 -3 24926 25226 24927 -3 24927 25226 25227 -3 24927 25227 24928 -3 24928 25227 25228 -3 24928 25228 24929 -3 24929 25228 25229 -3 24929 25229 24930 -3 24930 25229 25230 -3 24930 25230 24931 -3 24931 25230 25231 -3 24931 25231 24932 -3 24932 25231 25232 -3 24932 25232 24933 -3 24933 25232 25233 -3 24933 25233 24934 -3 24934 25233 25234 -3 24934 25234 24935 -3 24935 25234 25235 -3 24935 25235 24936 -3 24936 25235 25236 -3 24936 25236 24937 -3 24937 25236 25237 -3 24937 25237 24938 -3 24938 25237 25238 -3 24938 25238 24939 -3 24939 25238 25239 -3 24939 25239 24940 -3 24940 25239 25240 -3 24940 25240 24941 -3 24941 25240 25241 -3 24941 25241 24942 -3 24942 25241 25242 -3 24942 25242 24943 -3 24943 25242 25243 -3 24943 25243 24944 -3 24944 25243 25244 -3 24944 25244 24945 -3 24945 25244 25245 -3 24945 25245 24946 -3 24946 25245 25246 -3 24946 25246 24947 -3 24947 25246 25247 -3 24947 25247 24948 -3 24948 25247 25248 -3 24948 25248 24949 -3 24949 25248 25249 -3 24949 25249 24950 -3 24950 24956 24957 -3 24950 24957 24951 -3 24950 25249 25316 -3 24950 25316 24956 -3 24952 24960 24961 -3 24952 24961 24953 -3 24953 25250 25251 -3 24953 25251 24954 -3 24953 24961 25320 -3 24953 25320 25250 -3 24954 25251 25252 -3 24954 25252 24955 -3 24956 25316 25317 -3 24956 25317 24957 -3 24957 25317 25318 -3 24957 25318 24958 -3 24958 24962 24963 -3 24958 24963 24959 -3 24958 25318 25323 -3 24958 25323 24962 -3 24960 25319 25320 -3 24960 25320 24961 -3 24960 24965 25325 -3 24960 25325 25319 -3 24962 25323 25324 -3 24962 25324 24963 -3 24963 25324 25328 -3 24963 25328 24966 -3 24964 24968 24969 -3 24964 24969 24965 -3 24965 24969 25330 -3 24965 25330 25325 -3 24966 24970 24971 -3 24966 24971 24967 -3 24966 25328 24970 -3 25331 24970 25328 -3 24968 25329 24969 -3 25330 24969 25329 -3 24968 24972 25333 -3 24968 25333 25329 -3 24970 25331 24971 -3 25332 24971 25331 -3 24971 25332 24973 -3 25336 24973 25332 -3 24972 24974 25337 -3 24972 25337 25333 -3 24973 25336 24975 -3 25340 24975 25336 -3 24974 24976 25341 -3 24974 25341 25337 -3 24975 25340 24977 -3 25344 24977 25340 -3 24976 24978 25341 -3 25345 25341 24978 -3 24977 25344 24979 -3 25348 24979 25344 -3 24978 24980 25345 -3 25349 25345 24980 -3 24979 25348 24981 -3 25352 24981 25348 -3 24980 24982 25349 -3 25353 25349 24982 -3 24981 25352 24983 -3 25356 24983 25352 -3 24982 24984 25353 -3 25357 25353 24984 -3 24983 25356 24985 -3 25360 24985 25356 -3 24984 24986 25357 -3 25361 25357 24986 -3 24985 25360 24987 -3 25364 24987 25360 -3 24986 24988 25361 -3 25365 25361 24988 -3 24987 25364 24989 -3 25368 24989 25364 -3 24988 24990 25365 -3 25369 25365 24990 -3 24989 25368 24991 -3 25372 24991 25368 -3 24990 24992 25369 -3 25373 25369 24992 -3 24991 25372 24993 -3 25376 24993 25372 -3 24992 24994 25373 -3 25377 25373 24994 -3 24993 25376 24995 -3 25380 24995 25376 -3 24994 24996 25377 -3 25381 25377 24996 -3 24995 25380 24997 -3 25384 24997 25380 -3 24996 24998 25381 -3 25385 25381 24998 -3 24997 25384 24999 -3 25388 24999 25384 -3 24998 25000 25385 -3 25389 25385 25000 -3 24999 25388 25001 -3 25392 25001 25388 -3 25000 25002 25389 -3 25393 25389 25002 -3 25001 25392 25003 -3 25396 25003 25392 -3 25002 25004 25393 -3 25397 25393 25004 -3 25003 25396 25005 -3 25400 25005 25396 -3 25004 25006 25397 -3 25401 25397 25006 -3 25005 25400 25007 -3 25404 25007 25400 -3 25006 25008 25401 -3 25405 25401 25008 -3 25007 25404 25009 -3 25408 25009 25404 -3 25008 25010 25405 -3 25409 25405 25010 -3 25009 25408 25011 -3 25412 25011 25408 -3 25010 25012 25409 -3 25413 25409 25012 -3 25011 25412 25013 -3 25416 25013 25412 -3 25012 25014 25413 -3 25417 25413 25014 -3 25013 25416 25015 -3 25420 25015 25416 -3 25014 25016 25417 -3 25421 25417 25016 -3 25015 25420 25017 -3 25424 25017 25420 -3 25016 25018 25421 -3 25425 25421 25018 -3 25017 25424 25019 -3 25428 25019 25424 -3 25018 25020 25425 -3 25429 25425 25020 -3 25019 25428 25021 -3 25432 25021 25428 -3 25020 25022 25429 -3 25433 25429 25022 -3 25021 25432 25023 -3 25436 25023 25432 -3 25022 25024 25433 -3 25437 25433 25024 -3 25023 25436 25025 -3 25440 25025 25436 -3 25024 25026 25437 -3 25441 25437 25026 -3 25025 25440 25027 -3 25444 25027 25440 -3 25026 25028 25441 -3 25445 25441 25028 -3 25027 25444 25029 -3 25448 25029 25444 -3 25028 25030 25445 -3 25449 25445 25030 -3 25029 25448 25452 -3 25029 25452 25031 -3 25030 25032 25449 -3 25453 25449 25032 -3 25031 25452 25456 -3 25031 25456 25033 -3 25032 25034 25453 -3 25457 25453 25034 -3 25033 25456 25460 -3 25033 25460 25035 -3 25034 25036 25457 -3 25461 25457 25036 -3 25035 25460 25464 -3 25035 25464 25037 -3 25036 25038 25461 -3 25465 25461 25038 -3 25037 25464 25468 -3 25037 25468 25039 -3 25038 25040 25465 -3 25469 25465 25040 -3 25039 25468 25472 -3 25039 25472 25041 -3 25040 25042 25469 -3 25473 25469 25042 -3 25041 25472 25476 -3 25041 25476 25043 -3 25042 25044 25473 -3 25477 25473 25044 -3 25043 25476 25480 -3 25043 25480 25045 -3 25044 25046 25477 -3 25481 25477 25046 -3 25045 25480 25484 -3 25045 25484 25047 -3 25046 25048 25481 -3 25485 25481 25048 -3 25047 25484 25488 -3 25047 25488 25049 -3 25048 25050 25485 -3 25489 25485 25050 -3 25049 25488 25492 -3 25049 25492 25051 -3 25050 25052 25489 -3 25493 25489 25052 -3 25051 25492 25496 -3 25051 25496 25053 -3 25052 25054 25493 -3 25497 25493 25054 -3 25053 25496 25500 -3 25053 25500 25055 -3 25054 25056 25497 -3 25501 25497 25056 -3 25055 25500 25504 -3 25055 25504 25057 -3 25056 25058 25501 -3 25505 25501 25058 -3 25057 25504 25508 -3 25057 25508 25059 -3 25058 25060 25505 -3 25509 25505 25060 -3 25059 25508 25512 -3 25059 25512 25061 -3 25060 25062 25509 -3 25513 25509 25062 -3 25061 25512 25516 -3 25061 25516 25063 -3 25062 25064 25513 -3 25517 25513 25064 -3 25063 25516 25520 -3 25063 25520 25065 -3 25064 25066 25517 -3 25521 25517 25066 -3 25065 25520 25524 -3 25065 25524 25067 -3 25066 25068 25521 -3 25525 25521 25068 -3 25067 25524 25528 -3 25067 25528 25069 -3 25068 25070 25525 -3 25529 25525 25070 -3 25069 25528 25532 -3 25069 25532 25071 -3 25070 25072 25529 -3 25533 25529 25072 -3 25071 25532 25536 -3 25071 25536 25073 -3 25072 25074 25537 -3 25072 25537 25533 -3 25073 25536 25540 -3 25073 25540 25075 -3 25074 25076 25541 -3 25074 25541 25537 -3 25075 25540 25544 -3 25075 25544 25077 -3 25076 25078 25545 -3 25076 25545 25541 -3 25077 25544 25548 -3 25077 25548 25079 -3 25078 25080 25549 -3 25078 25549 25545 -3 25079 25548 25552 -3 25079 25552 25081 -3 25080 25082 25553 -3 25080 25553 25549 -3 25081 25552 25556 -3 25081 25556 25083 -3 25082 25084 25557 -3 25082 25557 25553 -3 25083 25556 25560 -3 25083 25560 25085 -3 25084 25086 25561 -3 25084 25561 25557 -3 25085 25560 25564 -3 25085 25564 25087 -3 25086 25088 25565 -3 25086 25565 25561 -3 25087 25564 25568 -3 25087 25568 25089 -3 25088 25090 25569 -3 25088 25569 25565 -3 25089 25568 25572 -3 25089 25572 25091 -3 25090 25092 25573 -3 25090 25573 25569 -3 25091 25572 25093 -3 25576 25093 25572 -3 25092 25094 25577 -3 25092 25577 25573 -3 25093 25576 25095 -3 25580 25095 25576 -3 25094 25096 25581 -3 25094 25581 25577 -3 25095 25580 25097 -3 25584 25097 25580 -3 25096 25098 25585 -3 25096 25585 25581 -3 25097 25584 25101 -3 25588 25101 25584 -3 25098 25102 25103 -3 25098 25103 25099 -3 25098 25099 25586 -3 25098 25586 25585 -3 25099 25103 25589 -3 25099 25589 25586 -3 25100 25104 25105 -3 25100 25105 25101 -3 25100 25101 25588 -3 25100 25588 25587 -3 25100 25587 25104 -3 25592 25104 25587 -3 25103 25106 25593 -3 25103 25593 25589 -3 25104 25592 25109 -3 25598 25109 25592 -3 25106 25110 25111 -3 25106 25111 25107 -3 25106 25107 25594 -3 25106 25594 25593 -3 25107 25111 25599 -3 25107 25599 25594 -3 25108 25116 25117 -3 25108 25117 25109 -3 25108 25109 25598 -3 25108 25598 25597 -3 25108 25597 25116 -3 25667 25116 25597 -3 25111 25112 25600 -3 25111 25600 25599 -3 25112 25118 25119 -3 25112 25119 25113 -3 25112 25113 25601 -3 25112 25601 25600 -3 25113 25119 25668 -3 25113 25668 25601 -3 25114 25183 25184 -3 25114 25184 25115 -3 25114 25115 25666 -3 25114 25666 25665 -3 25114 25665 25183 -3 25732 25183 25665 -3 25115 25116 25667 -3 25115 25667 25666 -3 25119 25120 25669 -3 25119 25669 25668 -3 25120 25121 25670 -3 25120 25670 25669 -3 25121 25122 25671 -3 25121 25671 25670 -3 25122 25123 25672 -3 25122 25672 25671 -3 25123 25124 25673 -3 25123 25673 25672 -3 25124 25125 25674 -3 25124 25674 25673 -3 25125 25126 25675 -3 25125 25675 25674 -3 25126 25127 25676 -3 25126 25676 25675 -3 25127 25128 25677 -3 25127 25677 25676 -3 25128 25129 25678 -3 25128 25678 25677 -3 25129 25130 25679 -3 25129 25679 25678 -3 25130 25131 25680 -3 25130 25680 25679 -3 25131 25132 25681 -3 25131 25681 25680 -3 25132 25133 25682 -3 25132 25682 25681 -3 25133 25134 25683 -3 25133 25683 25682 -3 25134 25135 25684 -3 25134 25684 25683 -3 25135 25136 25685 -3 25135 25685 25684 -3 25136 25137 25686 -3 25136 25686 25685 -3 25137 25138 25687 -3 25137 25687 25686 -3 25138 25139 25688 -3 25138 25688 25687 -3 25139 25140 25689 -3 25139 25689 25688 -3 25140 25141 25690 -3 25140 25690 25689 -3 25141 25142 25691 -3 25141 25691 25690 -3 25142 25143 25692 -3 25142 25692 25691 -3 25143 25144 25693 -3 25143 25693 25692 -3 25144 25145 25694 -3 25144 25694 25693 -3 25145 25146 25695 -3 25145 25695 25694 -3 25146 25147 25696 -3 25146 25696 25695 -3 25147 25148 25697 -3 25147 25697 25696 -3 25148 25149 25698 -3 25148 25698 25697 -3 25149 25150 25699 -3 25149 25699 25698 -3 25150 25151 25700 -3 25150 25700 25699 -3 25151 25152 25701 -3 25151 25701 25700 -3 25152 25153 25702 -3 25152 25702 25701 -3 25153 25154 25703 -3 25153 25703 25702 -3 25154 25155 25704 -3 25154 25704 25703 -3 25155 25156 25705 -3 25155 25705 25704 -3 25156 25157 25706 -3 25156 25706 25705 -3 25157 25158 25707 -3 25157 25707 25706 -3 25158 25159 25708 -3 25158 25708 25707 -3 25159 25160 25709 -3 25159 25709 25708 -3 25160 25161 25710 -3 25160 25710 25709 -3 25161 25162 25711 -3 25161 25711 25710 -3 25162 25163 25712 -3 25162 25712 25711 -3 25163 25164 25713 -3 25163 25713 25712 -3 25164 25165 25714 -3 25164 25714 25713 -3 25165 25166 25715 -3 25165 25715 25714 -3 25166 25167 25716 -3 25166 25716 25715 -3 25167 25168 25717 -3 25167 25717 25716 -3 25168 25169 25718 -3 25168 25718 25717 -3 25169 25170 25719 -3 25169 25719 25718 -3 25170 25171 25720 -3 25170 25720 25719 -3 25171 25172 25720 -3 25721 25720 25172 -3 25172 25173 25721 -3 25722 25721 25173 -3 25173 25174 25722 -3 25723 25722 25174 -3 25174 25175 25723 -3 25724 25723 25175 -3 25175 25176 25724 -3 25725 25724 25176 -3 25176 25177 25725 -3 25726 25725 25177 -3 25177 25178 25726 -3 25727 25726 25178 -3 25178 25179 25727 -3 25728 25727 25179 -3 25179 25180 25728 -3 25729 25728 25180 -3 25180 25181 25729 -3 25730 25729 25181 -3 25181 25182 25730 -3 25731 25730 25182 -3 25182 25183 25731 -3 25732 25731 25183 -3 25185 25252 25186 -3 25253 25186 25252 -3 25186 25253 25187 -3 25254 25187 25253 -3 25187 25254 25188 -3 25255 25188 25254 -3 25188 25255 25189 -3 25256 25189 25255 -3 25189 25256 25190 -3 25257 25190 25256 -3 25190 25257 25191 -3 25258 25191 25257 -3 25191 25258 25192 -3 25259 25192 25258 -3 25192 25259 25193 -3 25260 25193 25259 -3 25193 25260 25194 -3 25261 25194 25260 -3 25194 25261 25195 -3 25262 25195 25261 -3 25195 25262 25196 -3 25263 25196 25262 -3 25196 25263 25197 -3 25264 25197 25263 -3 25197 25264 25198 -3 25265 25198 25264 -3 25198 25265 25199 -3 25266 25199 25265 -3 25199 25266 25200 -3 25267 25200 25266 -3 25200 25267 25201 -3 25268 25201 25267 -3 25201 25268 25202 -3 25269 25202 25268 -3 25202 25269 25203 -3 25270 25203 25269 -3 25203 25270 25204 -3 25271 25204 25270 -3 25204 25271 25205 -3 25272 25205 25271 -3 25205 25272 25206 -3 25273 25206 25272 -3 25206 25273 25207 -3 25274 25207 25273 -3 25207 25274 25208 -3 25275 25208 25274 -3 25208 25275 25209 -3 25276 25209 25275 -3 25209 25276 25210 -3 25277 25210 25276 -3 25210 25277 25211 -3 25278 25211 25277 -3 25211 25278 25212 -3 25279 25212 25278 -3 25212 25279 25213 -3 25280 25213 25279 -3 25213 25280 25214 -3 25281 25214 25280 -3 25214 25281 25215 -3 25282 25215 25281 -3 25215 25282 25216 -3 25283 25216 25282 -3 25216 25283 25217 -3 25284 25217 25283 -3 25217 25284 25218 -3 25285 25218 25284 -3 25218 25285 25219 -3 25286 25219 25285 -3 25219 25286 25220 -3 25287 25220 25286 -3 25220 25287 25221 -3 25288 25221 25287 -3 25221 25288 25222 -3 25289 25222 25288 -3 25222 25289 25223 -3 25290 25223 25289 -3 25223 25290 25224 -3 25291 25224 25290 -3 25224 25291 25225 -3 25292 25225 25291 -3 25225 25292 25226 -3 25293 25226 25292 -3 25226 25293 25227 -3 25294 25227 25293 -3 25227 25294 25228 -3 25295 25228 25294 -3 25228 25295 25229 -3 25296 25229 25295 -3 25229 25296 25230 -3 25297 25230 25296 -3 25230 25297 25298 -3 25230 25298 25231 -3 25231 25298 25299 -3 25231 25299 25232 -3 25232 25299 25300 -3 25232 25300 25233 -3 25233 25300 25301 -3 25233 25301 25234 -3 25234 25301 25302 -3 25234 25302 25235 -3 25235 25302 25303 -3 25235 25303 25236 -3 25236 25303 25304 -3 25236 25304 25237 -3 25237 25304 25305 -3 25237 25305 25238 -3 25238 25305 25306 -3 25238 25306 25239 -3 25239 25306 25307 -3 25239 25307 25240 -3 25240 25307 25308 -3 25240 25308 25241 -3 25241 25308 25309 -3 25241 25309 25242 -3 25242 25309 25310 -3 25242 25310 25243 -3 25243 25310 25311 -3 25243 25311 25244 -3 25244 25311 25312 -3 25244 25312 25245 -3 25245 25312 25313 -3 25245 25313 25246 -3 25246 25313 25314 -3 25246 25314 25247 -3 25247 25314 25315 -3 25247 25315 25248 -3 25248 25315 25316 -3 25248 25316 25249 -3 25250 25320 25321 -3 25250 25321 25251 -3 25251 25733 25734 -3 25251 25734 25252 -3 25251 25321 25801 -3 25251 25801 25733 -3 25252 25734 25735 -3 25252 25735 25253 -3 25253 25735 25736 -3 25253 25736 25254 -3 25254 25736 25737 -3 25254 25737 25255 -3 25255 25737 25738 -3 25255 25738 25256 -3 25256 25738 25739 -3 25256 25739 25257 -3 25257 25739 25740 -3 25257 25740 25258 -3 25258 25740 25741 -3 25258 25741 25259 -3 25259 25741 25742 -3 25259 25742 25260 -3 25260 25742 25743 -3 25260 25743 25261 -3 25261 25743 25744 -3 25261 25744 25262 -3 25262 25744 25745 -3 25262 25745 25263 -3 25263 25745 25746 -3 25263 25746 25264 -3 25264 25746 25747 -3 25264 25747 25265 -3 25265 25747 25748 -3 25265 25748 25266 -3 25266 25748 25749 -3 25266 25749 25267 -3 25267 25749 25750 -3 25267 25750 25268 -3 25268 25750 25751 -3 25268 25751 25269 -3 25269 25751 25752 -3 25269 25752 25270 -3 25270 25752 25753 -3 25270 25753 25271 -3 25271 25753 25754 -3 25271 25754 25272 -3 25272 25754 25755 -3 25272 25755 25273 -3 25273 25755 25756 -3 25273 25756 25274 -3 25274 25756 25757 -3 25274 25757 25275 -3 25275 25757 25758 -3 25275 25758 25276 -3 25276 25758 25759 -3 25276 25759 25277 -3 25277 25759 25760 -3 25277 25760 25278 -3 25278 25760 25761 -3 25278 25761 25279 -3 25279 25761 25762 -3 25279 25762 25280 -3 25280 25762 25763 -3 25280 25763 25281 -3 25281 25763 25764 -3 25281 25764 25282 -3 25282 25764 25765 -3 25282 25765 25283 -3 25283 25765 25766 -3 25283 25766 25284 -3 25284 25766 25767 -3 25284 25767 25285 -3 25285 25767 25768 -3 25285 25768 25286 -3 25286 25768 25287 -3 25769 25287 25768 -3 25287 25769 25288 -3 25770 25288 25769 -3 25288 25770 25289 -3 25771 25289 25770 -3 25289 25771 25290 -3 25772 25290 25771 -3 25290 25772 25291 -3 25773 25291 25772 -3 25291 25773 25292 -3 25774 25292 25773 -3 25292 25774 25293 -3 25775 25293 25774 -3 25293 25775 25294 -3 25776 25294 25775 -3 25294 25776 25295 -3 25777 25295 25776 -3 25295 25777 25296 -3 25778 25296 25777 -3 25296 25778 25297 -3 25779 25297 25778 -3 25297 25779 25298 -3 25780 25298 25779 -3 25298 25780 25299 -3 25781 25299 25780 -3 25299 25781 25300 -3 25782 25300 25781 -3 25300 25782 25301 -3 25783 25301 25782 -3 25301 25783 25302 -3 25784 25302 25783 -3 25302 25784 25303 -3 25785 25303 25784 -3 25303 25785 25304 -3 25786 25304 25785 -3 25304 25786 25305 -3 25787 25305 25786 -3 25305 25787 25306 -3 25788 25306 25787 -3 25306 25788 25307 -3 25789 25307 25788 -3 25307 25789 25308 -3 25790 25308 25789 -3 25308 25790 25309 -3 25791 25309 25790 -3 25309 25791 25310 -3 25792 25310 25791 -3 25310 25792 25311 -3 25793 25311 25792 -3 25311 25793 25312 -3 25794 25312 25793 -3 25312 25794 25313 -3 25795 25313 25794 -3 25313 25795 25314 -3 25796 25314 25795 -3 25314 25796 25315 -3 25797 25315 25796 -3 25315 25797 25316 -3 25798 25316 25797 -3 25316 25798 25317 -3 25799 25317 25798 -3 25317 25322 25323 -3 25317 25323 25318 -3 25317 25799 25322 -3 25867 25322 25799 -3 25319 25325 25326 -3 25319 25326 25320 -3 25320 25800 25321 -3 25801 25321 25800 -3 25320 25326 25870 -3 25320 25870 25800 -3 25322 25867 25323 -3 25868 25323 25867 -3 25323 25327 25328 -3 25323 25328 25324 -3 25323 25868 25327 -3 25875 25327 25868 -3 25325 25869 25326 -3 25870 25326 25869 -3 25325 25330 25877 -3 25325 25877 25869 -3 25327 25875 25328 -3 25876 25328 25875 -3 25328 25876 25331 -3 25882 25331 25876 -3 25329 25333 25334 -3 25329 25334 25330 -3 25330 25334 25883 -3 25330 25883 25877 -3 25331 25335 25336 -3 25331 25336 25332 -3 25331 25882 25335 -3 25886 25335 25882 -3 25333 25337 25338 -3 25333 25338 25334 -3 25334 25338 25887 -3 25334 25887 25883 -3 25335 25339 25340 -3 25335 25340 25336 -3 25335 25886 25339 -3 25890 25339 25886 -3 25337 25341 25342 -3 25337 25342 25338 -3 25338 25342 25891 -3 25338 25891 25887 -3 25339 25343 25344 -3 25339 25344 25340 -3 25339 25890 25343 -3 25894 25343 25890 -3 25341 25345 25346 -3 25341 25346 25342 -3 25342 25346 25895 -3 25342 25895 25891 -3 25343 25347 25348 -3 25343 25348 25344 -3 25343 25894 25347 -3 25898 25347 25894 -3 25345 25349 25350 -3 25345 25350 25346 -3 25346 25350 25899 -3 25346 25899 25895 -3 25347 25351 25352 -3 25347 25352 25348 -3 25347 25898 25902 -3 25347 25902 25351 -3 25349 25353 25354 -3 25349 25354 25350 -3 25350 25354 25903 -3 25350 25903 25899 -3 25351 25355 25356 -3 25351 25356 25352 -3 25351 25902 25906 -3 25351 25906 25355 -3 25353 25357 25358 -3 25353 25358 25354 -3 25354 25358 25907 -3 25354 25907 25903 -3 25355 25359 25360 -3 25355 25360 25356 -3 25355 25906 25910 -3 25355 25910 25359 -3 25357 25361 25362 -3 25357 25362 25358 -3 25358 25362 25911 -3 25358 25911 25907 -3 25359 25363 25364 -3 25359 25364 25360 -3 25359 25910 25914 -3 25359 25914 25363 -3 25361 25365 25366 -3 25361 25366 25362 -3 25362 25366 25915 -3 25362 25915 25911 -3 25363 25367 25368 -3 25363 25368 25364 -3 25363 25914 25918 -3 25363 25918 25367 -3 25365 25369 25366 -3 25370 25366 25369 -3 25366 25370 25915 -3 25919 25915 25370 -3 25367 25371 25368 -3 25372 25368 25371 -3 25367 25918 25922 -3 25367 25922 25371 -3 25369 25373 25370 -3 25374 25370 25373 -3 25370 25374 25919 -3 25923 25919 25374 -3 25371 25375 25372 -3 25376 25372 25375 -3 25371 25922 25926 -3 25371 25926 25375 -3 25373 25377 25374 -3 25378 25374 25377 -3 25374 25378 25923 -3 25927 25923 25378 -3 25375 25379 25376 -3 25380 25376 25379 -3 25375 25926 25930 -3 25375 25930 25379 -3 25377 25381 25378 -3 25382 25378 25381 -3 25378 25382 25927 -3 25931 25927 25382 -3 25379 25383 25380 -3 25384 25380 25383 -3 25379 25930 25934 -3 25379 25934 25383 -3 25381 25385 25382 -3 25386 25382 25385 -3 25382 25386 25931 -3 25935 25931 25386 -3 25383 25387 25384 -3 25388 25384 25387 -3 25383 25934 25938 -3 25383 25938 25387 -3 25385 25389 25386 -3 25390 25386 25389 -3 25386 25390 25935 -3 25939 25935 25390 -3 25387 25391 25388 -3 25392 25388 25391 -3 25387 25938 25942 -3 25387 25942 25391 -3 25389 25393 25390 -3 25394 25390 25393 -3 25390 25394 25939 -3 25943 25939 25394 -3 25391 25395 25392 -3 25396 25392 25395 -3 25391 25942 25946 -3 25391 25946 25395 -3 25393 25397 25394 -3 25398 25394 25397 -3 25394 25398 25943 -3 25947 25943 25398 -3 25395 25399 25396 -3 25400 25396 25399 -3 25395 25946 25950 -3 25395 25950 25399 -3 25397 25401 25398 -3 25402 25398 25401 -3 25398 25402 25947 -3 25951 25947 25402 -3 25399 25403 25400 -3 25404 25400 25403 -3 25399 25950 25954 -3 25399 25954 25403 -3 25401 25405 25402 -3 25406 25402 25405 -3 25402 25406 25951 -3 25955 25951 25406 -3 25403 25407 25404 -3 25408 25404 25407 -3 25403 25954 25958 -3 25403 25958 25407 -3 25405 25409 25406 -3 25410 25406 25409 -3 25406 25410 25955 -3 25959 25955 25410 -3 25407 25411 25408 -3 25412 25408 25411 -3 25407 25958 25962 -3 25407 25962 25411 -3 25409 25413 25410 -3 25414 25410 25413 -3 25410 25414 25959 -3 25963 25959 25414 -3 25411 25415 25412 -3 25416 25412 25415 -3 25411 25962 25966 -3 25411 25966 25415 -3 25413 25417 25414 -3 25418 25414 25417 -3 25414 25418 25963 -3 25967 25963 25418 -3 25415 25419 25416 -3 25420 25416 25419 -3 25415 25966 25970 -3 25415 25970 25419 -3 25417 25421 25418 -3 25422 25418 25421 -3 25418 25422 25967 -3 25971 25967 25422 -3 25419 25423 25420 -3 25424 25420 25423 -3 25419 25970 25974 -3 25419 25974 25423 -3 25421 25425 25422 -3 25426 25422 25425 -3 25422 25426 25971 -3 25975 25971 25426 -3 25423 25427 25424 -3 25428 25424 25427 -3 25423 25974 25978 -3 25423 25978 25427 -3 25425 25429 25426 -3 25430 25426 25429 -3 25426 25430 25975 -3 25979 25975 25430 -3 25427 25431 25428 -3 25432 25428 25431 -3 25427 25978 25982 -3 25427 25982 25431 -3 25429 25433 25430 -3 25434 25430 25433 -3 25430 25434 25979 -3 25983 25979 25434 -3 25431 25435 25432 -3 25436 25432 25435 -3 25431 25982 25986 -3 25431 25986 25435 -3 25433 25437 25434 -3 25438 25434 25437 -3 25434 25438 25983 -3 25987 25983 25438 -3 25435 25439 25436 -3 25440 25436 25439 -3 25435 25986 25990 -3 25435 25990 25439 -3 25437 25441 25438 -3 25442 25438 25441 -3 25438 25442 25987 -3 25991 25987 25442 -3 25439 25443 25440 -3 25444 25440 25443 -3 25439 25990 25994 -3 25439 25994 25443 -3 25441 25445 25442 -3 25446 25442 25445 -3 25442 25446 25991 -3 25995 25991 25446 -3 25443 25447 25444 -3 25448 25444 25447 -3 25443 25994 25998 -3 25443 25998 25447 -3 25445 25449 25446 -3 25450 25446 25449 -3 25446 25450 25995 -3 25999 25995 25450 -3 25447 25451 25448 -3 25452 25448 25451 -3 25447 25998 25451 -3 26002 25451 25998 -3 25449 25453 25450 -3 25454 25450 25453 -3 25450 25454 25999 -3 26003 25999 25454 -3 25451 25455 25452 -3 25456 25452 25455 -3 25451 26002 25455 -3 26006 25455 26002 -3 25453 25457 25454 -3 25458 25454 25457 -3 25454 25458 26003 -3 26007 26003 25458 -3 25455 25459 25456 -3 25460 25456 25459 -3 25455 26006 25459 -3 26010 25459 26006 -3 25457 25461 25458 -3 25462 25458 25461 -3 25458 25462 26007 -3 26011 26007 25462 -3 25459 25463 25460 -3 25464 25460 25463 -3 25459 26010 25463 -3 26014 25463 26010 -3 25461 25465 25462 -3 25466 25462 25465 -3 25462 25466 26011 -3 26015 26011 25466 -3 25463 25467 25468 -3 25463 25468 25464 -3 25463 26014 25467 -3 26018 25467 26014 -3 25465 25469 25470 -3 25465 25470 25466 -3 25466 25470 26019 -3 25466 26019 26015 -3 25467 25471 25472 -3 25467 25472 25468 -3 25467 26018 25471 -3 26022 25471 26018 -3 25469 25473 25474 -3 25469 25474 25470 -3 25470 25474 26023 -3 25470 26023 26019 -3 25471 25475 25476 -3 25471 25476 25472 -3 25471 26022 25475 -3 26026 25475 26022 -3 25473 25477 25478 -3 25473 25478 25474 -3 25474 25478 26027 -3 25474 26027 26023 -3 25475 25479 25480 -3 25475 25480 25476 -3 25475 26026 25479 -3 26030 25479 26026 -3 25477 25481 25482 -3 25477 25482 25478 -3 25478 25482 26031 -3 25478 26031 26027 -3 25479 25483 25484 -3 25479 25484 25480 -3 25479 26030 25483 -3 26034 25483 26030 -3 25481 25485 25486 -3 25481 25486 25482 -3 25482 25486 26035 -3 25482 26035 26031 -3 25483 25487 25488 -3 25483 25488 25484 -3 25483 26034 25487 -3 26038 25487 26034 -3 25485 25489 25490 -3 25485 25490 25486 -3 25486 25490 26039 -3 25486 26039 26035 -3 25487 25491 25492 -3 25487 25492 25488 -3 25487 26038 25491 -3 26042 25491 26038 -3 25489 25493 25494 -3 25489 25494 25490 -3 25490 25494 26043 -3 25490 26043 26039 -3 25491 25495 25496 -3 25491 25496 25492 -3 25491 26042 25495 -3 26046 25495 26042 -3 25493 25497 25498 -3 25493 25498 25494 -3 25494 25498 26047 -3 25494 26047 26043 -3 25495 25499 25500 -3 25495 25500 25496 -3 25495 26046 25499 -3 26050 25499 26046 -3 25497 25501 25502 -3 25497 25502 25498 -3 25498 25502 26051 -3 25498 26051 26047 -3 25499 25503 25504 -3 25499 25504 25500 -3 25499 26050 25503 -3 26054 25503 26050 -3 25501 25505 25506 -3 25501 25506 25502 -3 25502 25506 26055 -3 25502 26055 26051 -3 25503 25507 25508 -3 25503 25508 25504 -3 25503 26054 25507 -3 26058 25507 26054 -3 25505 25509 25510 -3 25505 25510 25506 -3 25506 25510 26059 -3 25506 26059 26055 -3 25507 25511 25512 -3 25507 25512 25508 -3 25507 26058 25511 -3 26062 25511 26058 -3 25509 25513 25514 -3 25509 25514 25510 -3 25510 25514 26063 -3 25510 26063 26059 -3 25511 25515 25516 -3 25511 25516 25512 -3 25511 26062 25515 -3 26066 25515 26062 -3 25513 25517 25518 -3 25513 25518 25514 -3 25514 25518 26067 -3 25514 26067 26063 -3 25515 25519 25520 -3 25515 25520 25516 -3 25515 26066 25519 -3 26070 25519 26066 -3 25517 25521 25522 -3 25517 25522 25518 -3 25518 25522 26071 -3 25518 26071 26067 -3 25519 25523 25524 -3 25519 25524 25520 -3 25519 26070 25523 -3 26074 25523 26070 -3 25521 25525 25526 -3 25521 25526 25522 -3 25522 25526 26075 -3 25522 26075 26071 -3 25523 25527 25528 -3 25523 25528 25524 -3 25523 26074 25527 -3 26078 25527 26074 -3 25525 25529 25530 -3 25525 25530 25526 -3 25526 25530 26079 -3 25526 26079 26075 -3 25527 25531 25532 -3 25527 25532 25528 -3 25527 26078 25531 -3 26082 25531 26078 -3 25529 25533 25534 -3 25529 25534 25530 -3 25530 25534 26083 -3 25530 26083 26079 -3 25531 25535 25536 -3 25531 25536 25532 -3 25531 26082 25535 -3 26086 25535 26082 -3 25533 25537 25538 -3 25533 25538 25534 -3 25534 25538 26087 -3 25534 26087 26083 -3 25535 25539 25540 -3 25535 25540 25536 -3 25535 26086 25539 -3 26090 25539 26086 -3 25537 25541 25542 -3 25537 25542 25538 -3 25538 25542 26091 -3 25538 26091 26087 -3 25539 25543 25544 -3 25539 25544 25540 -3 25539 26090 25543 -3 26094 25543 26090 -3 25541 25545 25546 -3 25541 25546 25542 -3 25542 25546 26095 -3 25542 26095 26091 -3 25543 25547 25548 -3 25543 25548 25544 -3 25543 26094 26098 -3 25543 26098 25547 -3 25545 25549 25550 -3 25545 25550 25546 -3 25546 25550 26099 -3 25546 26099 26095 -3 25547 25551 25552 -3 25547 25552 25548 -3 25547 26098 26102 -3 25547 26102 25551 -3 25549 25553 25554 -3 25549 25554 25550 -3 25550 25554 26103 -3 25550 26103 26099 -3 25551 25555 25556 -3 25551 25556 25552 -3 25551 26102 26106 -3 25551 26106 25555 -3 25553 25557 25558 -3 25553 25558 25554 -3 25554 25558 26107 -3 25554 26107 26103 -3 25555 25559 25560 -3 25555 25560 25556 -3 25555 26106 26110 -3 25555 26110 25559 -3 25557 25561 25562 -3 25557 25562 25558 -3 25558 25562 26111 -3 25558 26111 26107 -3 25559 25563 25564 -3 25559 25564 25560 -3 25559 26110 26114 -3 25559 26114 25563 -3 25561 25565 25562 -3 25566 25562 25565 -3 25562 25566 26111 -3 26115 26111 25566 -3 25563 25567 25564 -3 25568 25564 25567 -3 25563 26114 26118 -3 25563 26118 25567 -3 25565 25569 25566 -3 25570 25566 25569 -3 25566 25570 26115 -3 26119 26115 25570 -3 25567 25571 25568 -3 25572 25568 25571 -3 25567 26118 26122 -3 25567 26122 25571 -3 25569 25573 25570 -3 25574 25570 25573 -3 25570 25574 26119 -3 26123 26119 25574 -3 25571 25575 25572 -3 25576 25572 25575 -3 25571 26122 26126 -3 25571 26126 25575 -3 25573 25577 25574 -3 25578 25574 25577 -3 25574 25578 26123 -3 26127 26123 25578 -3 25575 25579 25576 -3 25580 25576 25579 -3 25575 26126 26130 -3 25575 26130 25579 -3 25577 25581 25578 -3 25582 25578 25581 -3 25578 25582 26127 -3 26131 26127 25582 -3 25579 25583 25580 -3 25584 25580 25583 -3 25579 26130 26134 -3 25579 26134 25583 -3 25581 25585 25582 -3 25586 25582 25585 -3 25582 25586 26131 -3 26135 26131 25586 -3 25583 25587 25584 -3 25588 25584 25587 -3 25583 26134 26140 -3 25583 26140 25587 -3 25586 25589 26135 -3 26141 26135 25589 -3 25587 26140 26148 -3 25587 26148 25592 -3 25589 25593 25590 -3 25594 25590 25593 -3 25589 25590 26141 -3 26142 26141 25590 -3 25590 25594 26142 -3 26149 26142 25594 -3 25591 25597 25592 -3 25598 25592 25597 -3 25591 25592 26147 -3 26148 26147 25592 -3 25591 26147 26217 -3 25591 26217 25597 -3 25594 25599 25595 -3 25600 25595 25599 -3 25594 25595 26149 -3 26150 26149 25595 -3 25595 25600 26150 -3 26218 26150 25600 -3 25596 25666 25597 -3 25667 25597 25666 -3 25596 25597 26216 -3 26217 26216 25597 -3 25596 26216 26284 -3 25596 26284 25666 -3 25600 25601 26218 -3 26219 26218 25601 -3 25601 25668 25602 -3 25669 25602 25668 -3 25601 25602 26219 -3 26220 26219 25602 -3 25602 25669 25603 -3 25670 25603 25669 -3 25602 25603 26220 -3 26221 26220 25603 -3 25603 25670 25604 -3 25671 25604 25670 -3 25603 25604 26221 -3 26222 26221 25604 -3 25604 25671 25605 -3 25672 25605 25671 -3 25604 25605 26222 -3 26223 26222 25605 -3 25605 25672 25606 -3 25673 25606 25672 -3 25605 25606 26223 -3 26224 26223 25606 -3 25606 25673 25607 -3 25674 25607 25673 -3 25606 25607 26224 -3 26225 26224 25607 -3 25607 25674 25608 -3 25675 25608 25674 -3 25607 25608 26225 -3 26226 26225 25608 -3 25608 25675 25609 -3 25676 25609 25675 -3 25608 25609 26226 -3 26227 26226 25609 -3 25609 25676 25610 -3 25677 25610 25676 -3 25609 25610 26227 -3 26228 26227 25610 -3 25610 25677 25611 -3 25678 25611 25677 -3 25610 25611 26228 -3 26229 26228 25611 -3 25611 25678 25612 -3 25679 25612 25678 -3 25611 25612 26229 -3 26230 26229 25612 -3 25612 25679 25613 -3 25680 25613 25679 -3 25612 25613 26230 -3 26231 26230 25613 -3 25613 25680 25614 -3 25681 25614 25680 -3 25613 25614 26231 -3 26232 26231 25614 -3 25614 25681 25615 -3 25682 25615 25681 -3 25614 25615 26232 -3 26233 26232 25615 -3 25615 25682 25616 -3 25683 25616 25682 -3 25615 25616 26233 -3 26234 26233 25616 -3 25616 25683 25617 -3 25684 25617 25683 -3 25616 25617 26234 -3 26235 26234 25617 -3 25617 25684 25618 -3 25685 25618 25684 -3 25617 25618 26235 -3 26236 26235 25618 -3 25618 25685 25619 -3 25686 25619 25685 -3 25618 25619 26236 -3 26237 26236 25619 -3 25619 25686 25620 -3 25687 25620 25686 -3 25619 25620 26237 -3 26238 26237 25620 -3 25620 25687 25621 -3 25688 25621 25687 -3 25620 25621 26238 -3 26239 26238 25621 -3 25621 25688 25622 -3 25689 25622 25688 -3 25621 25622 26239 -3 26240 26239 25622 -3 25622 25689 25623 -3 25690 25623 25689 -3 25622 25623 26240 -3 26241 26240 25623 -3 25623 25690 25691 -3 25623 25691 25624 -3 25623 25624 26241 -3 26242 26241 25624 -3 25624 25691 25692 -3 25624 25692 25625 -3 25624 25625 26242 -3 26243 26242 25625 -3 25625 25692 25693 -3 25625 25693 25626 -3 25625 25626 26243 -3 26244 26243 25626 -3 25626 25693 25694 -3 25626 25694 25627 -3 25626 25627 26244 -3 26245 26244 25627 -3 25627 25694 25695 -3 25627 25695 25628 -3 25627 25628 26245 -3 26246 26245 25628 -3 25628 25695 25696 -3 25628 25696 25629 -3 25628 25629 26246 -3 26247 26246 25629 -3 25629 25696 25697 -3 25629 25697 25630 -3 25629 25630 26247 -3 26248 26247 25630 -3 25630 25697 25698 -3 25630 25698 25631 -3 25630 25631 26248 -3 26249 26248 25631 -3 25631 25698 25699 -3 25631 25699 25632 -3 25631 25632 26249 -3 26250 26249 25632 -3 25632 25699 25700 -3 25632 25700 25633 -3 25632 25633 26250 -3 26251 26250 25633 -3 25633 25700 25701 -3 25633 25701 25634 -3 25633 25634 26251 -3 26252 26251 25634 -3 25634 25701 25702 -3 25634 25702 25635 -3 25634 25635 26252 -3 26253 26252 25635 -3 25635 25702 25703 -3 25635 25703 25636 -3 25635 25636 26253 -3 26254 26253 25636 -3 25636 25703 25704 -3 25636 25704 25637 -3 25636 25637 26254 -3 26255 26254 25637 -3 25637 25704 25705 -3 25637 25705 25638 -3 25637 25638 26255 -3 26256 26255 25638 -3 25638 25705 25706 -3 25638 25706 25639 -3 25638 25639 26256 -3 26257 26256 25639 -3 25639 25706 25707 -3 25639 25707 25640 -3 25639 25640 26257 -3 26258 26257 25640 -3 25640 25707 25708 -3 25640 25708 25641 -3 25640 25641 26258 -3 26259 26258 25641 -3 25641 25708 25709 -3 25641 25709 25642 -3 25641 25642 26259 -3 26260 26259 25642 -3 25642 25709 25710 -3 25642 25710 25643 -3 25642 25643 26260 -3 26261 26260 25643 -3 25643 25710 25711 -3 25643 25711 25644 -3 25643 25644 26261 -3 26262 26261 25644 -3 25644 25711 25712 -3 25644 25712 25645 -3 25644 25645 26262 -3 26263 26262 25645 -3 25645 25712 25713 -3 25645 25713 25646 -3 25645 25646 26263 -3 26264 26263 25646 -3 25646 25713 25714 -3 25646 25714 25647 -3 25646 25647 26264 -3 26265 26264 25647 -3 25647 25714 25715 -3 25647 25715 25648 -3 25647 25648 26265 -3 26266 26265 25648 -3 25648 25715 25716 -3 25648 25716 25649 -3 25648 25649 26266 -3 26267 26266 25649 -3 25649 25716 25717 -3 25649 25717 25650 -3 25649 25650 26267 -3 26268 26267 25650 -3 25650 25717 25718 -3 25650 25718 25651 -3 25650 25651 26268 -3 26269 26268 25651 -3 25651 25718 25719 -3 25651 25719 25652 -3 25651 25652 26269 -3 26270 26269 25652 -3 25652 25719 25720 -3 25652 25720 25653 -3 25652 25653 26270 -3 26271 26270 25653 -3 25653 25720 25721 -3 25653 25721 25654 -3 25653 25654 26271 -3 26272 26271 25654 -3 25654 25721 25722 -3 25654 25722 25655 -3 25654 25655 26272 -3 26273 26272 25655 -3 25655 25722 25723 -3 25655 25723 25656 -3 25655 25656 26273 -3 26274 26273 25656 -3 25656 25723 25724 -3 25656 25724 25657 -3 25656 25657 26274 -3 26275 26274 25657 -3 25657 25724 25725 -3 25657 25725 25658 -3 25657 25658 26275 -3 26276 26275 25658 -3 25658 25725 25726 -3 25658 25726 25659 -3 25658 25659 26276 -3 26277 26276 25659 -3 25659 25726 25727 -3 25659 25727 25660 -3 25659 25660 26277 -3 26278 26277 25660 -3 25660 25727 25728 -3 25660 25728 25661 -3 25660 25661 26278 -3 26279 26278 25661 -3 25661 25728 25729 -3 25661 25729 25662 -3 25661 25662 26280 -3 25661 26280 26279 -3 25662 25729 25730 -3 25662 25730 25663 -3 25662 25663 26281 -3 25662 26281 26280 -3 25663 25730 25731 -3 25663 25731 25664 -3 25663 25664 26282 -3 25663 26282 26281 -3 25664 25731 25732 -3 25664 25732 25665 -3 25664 25665 26283 -3 25664 26283 26282 -3 25665 25666 26284 -3 25665 26284 26283 -3 25733 25801 25734 -3 25802 25734 25801 -3 25734 25802 25735 -3 25803 25735 25802 -3 25735 25803 25736 -3 25804 25736 25803 -3 25736 25804 25737 -3 25805 25737 25804 -3 25737 25805 25738 -3 25806 25738 25805 -3 25738 25806 25739 -3 25807 25739 25806 -3 25739 25807 25740 -3 25808 25740 25807 -3 25740 25808 25741 -3 25809 25741 25808 -3 25741 25809 25742 -3 25810 25742 25809 -3 25742 25810 25743 -3 25811 25743 25810 -3 25743 25811 25744 -3 25812 25744 25811 -3 25744 25812 25745 -3 25813 25745 25812 -3 25745 25813 25746 -3 25814 25746 25813 -3 25746 25814 25747 -3 25815 25747 25814 -3 25747 25815 25748 -3 25816 25748 25815 -3 25748 25816 25749 -3 25817 25749 25816 -3 25749 25817 25750 -3 25818 25750 25817 -3 25750 25818 25751 -3 25819 25751 25818 -3 25751 25819 25752 -3 25820 25752 25819 -3 25752 25820 25753 -3 25821 25753 25820 -3 25753 25821 25754 -3 25822 25754 25821 -3 25754 25822 25755 -3 25823 25755 25822 -3 25755 25823 25756 -3 25824 25756 25823 -3 25756 25824 25757 -3 25825 25757 25824 -3 25757 25825 25758 -3 25826 25758 25825 -3 25758 25826 25759 -3 25827 25759 25826 -3 25759 25827 25760 -3 25828 25760 25827 -3 25760 25828 25761 -3 25829 25761 25828 -3 25761 25829 25762 -3 25830 25762 25829 -3 25762 25830 25763 -3 25831 25763 25830 -3 25763 25831 25764 -3 25832 25764 25831 -3 25764 25832 25765 -3 25833 25765 25832 -3 25765 25833 25766 -3 25834 25766 25833 -3 25766 25834 25767 -3 25835 25767 25834 -3 25767 25835 25768 -3 25836 25768 25835 -3 25768 25836 25769 -3 25837 25769 25836 -3 25769 25837 25770 -3 25838 25770 25837 -3 25770 25838 25771 -3 25839 25771 25838 -3 25771 25839 25772 -3 25840 25772 25839 -3 25772 25840 25773 -3 25841 25773 25840 -3 25773 25841 25774 -3 25842 25774 25841 -3 25774 25842 25775 -3 25843 25775 25842 -3 25775 25843 25776 -3 25844 25776 25843 -3 25776 25844 25777 -3 25845 25777 25844 -3 25777 25845 25778 -3 25846 25778 25845 -3 25778 25846 25779 -3 25847 25779 25846 -3 25779 25847 25780 -3 25848 25780 25847 -3 25780 25848 25781 -3 25849 25781 25848 -3 25781 25849 25782 -3 25850 25782 25849 -3 25782 25850 25783 -3 25851 25783 25850 -3 25783 25851 25784 -3 25852 25784 25851 -3 25784 25852 25785 -3 25853 25785 25852 -3 25785 25853 25786 -3 25854 25786 25853 -3 25786 25854 25787 -3 25855 25787 25854 -3 25787 25855 25788 -3 25856 25788 25855 -3 25788 25856 25789 -3 25857 25789 25856 -3 25789 25857 25790 -3 25858 25790 25857 -3 25790 25858 25791 -3 25859 25791 25858 -3 25791 25859 25792 -3 25860 25792 25859 -3 25792 25860 25793 -3 25861 25793 25860 -3 25793 25861 25794 -3 25862 25794 25861 -3 25794 25862 25795 -3 25863 25795 25862 -3 25795 25863 25796 -3 25864 25796 25863 -3 25796 25864 25797 -3 25865 25797 25864 -3 25797 25865 25798 -3 25866 25798 25865 -3 25798 25866 25799 -3 25867 25799 25866 -3 25800 25870 25801 -3 25871 25801 25870 -3 25801 25871 25802 -3 25872 25802 25871 -3 25802 26285 26286 -3 25802 26286 25803 -3 25802 25872 26285 -3 26351 26285 25872 -3 25803 26286 26287 -3 25803 26287 25804 -3 25804 26287 26288 -3 25804 26288 25805 -3 25805 26288 26289 -3 25805 26289 25806 -3 25806 26289 26290 -3 25806 26290 25807 -3 25807 26290 26291 -3 25807 26291 25808 -3 25808 26291 26292 -3 25808 26292 25809 -3 25809 26292 26293 -3 25809 26293 25810 -3 25810 26293 26294 -3 25810 26294 25811 -3 25811 26294 26295 -3 25811 26295 25812 -3 25812 26295 26296 -3 25812 26296 25813 -3 25813 26296 26297 -3 25813 26297 25814 -3 25814 26297 26298 -3 25814 26298 25815 -3 25815 26298 26299 -3 25815 26299 25816 -3 25816 26299 26300 -3 25816 26300 25817 -3 25817 26300 26301 -3 25817 26301 25818 -3 25818 26301 26302 -3 25818 26302 25819 -3 25819 26302 26303 -3 25819 26303 25820 -3 25820 26303 26304 -3 25820 26304 25821 -3 25821 26304 26305 -3 25821 26305 25822 -3 25822 26305 26306 -3 25822 26306 25823 -3 25823 26306 26307 -3 25823 26307 25824 -3 25824 26307 26308 -3 25824 26308 25825 -3 25825 26308 26309 -3 25825 26309 25826 -3 25826 26309 26310 -3 25826 26310 25827 -3 25827 26310 26311 -3 25827 26311 25828 -3 25828 26311 26312 -3 25828 26312 25829 -3 25829 26312 26313 -3 25829 26313 25830 -3 25830 26313 26314 -3 25830 26314 25831 -3 25831 26314 26315 -3 25831 26315 25832 -3 25832 26315 26316 -3 25832 26316 25833 -3 25833 26316 26317 -3 25833 26317 25834 -3 25834 26317 26318 -3 25834 26318 25835 -3 25835 26318 26319 -3 25835 26319 25836 -3 25836 26319 26320 -3 25836 26320 25837 -3 25837 26320 26321 -3 25837 26321 25838 -3 25838 26321 26322 -3 25838 26322 25839 -3 25839 26322 26323 -3 25839 26323 25840 -3 25840 26323 26324 -3 25840 26324 25841 -3 25841 26324 26325 -3 25841 26325 25842 -3 25842 26325 26326 -3 25842 26326 25843 -3 25843 26326 26327 -3 25843 26327 25844 -3 25844 26327 26328 -3 25844 26328 25845 -3 25845 26328 26329 -3 25845 26329 25846 -3 25846 26329 26330 -3 25846 26330 25847 -3 25847 26330 26331 -3 25847 26331 25848 -3 25848 26331 26332 -3 25848 26332 25849 -3 25849 26332 26333 -3 25849 26333 25850 -3 25850 26333 26334 -3 25850 26334 25851 -3 25851 26334 26335 -3 25851 26335 25852 -3 25852 26335 26336 -3 25852 26336 25853 -3 25853 26336 26337 -3 25853 26337 25854 -3 25854 26337 26338 -3 25854 26338 25855 -3 25855 26338 26339 -3 25855 26339 25856 -3 25856 26339 26340 -3 25856 26340 25857 -3 25857 26340 26341 -3 25857 26341 25858 -3 25858 26341 26342 -3 25858 26342 25859 -3 25859 26342 26343 -3 25859 26343 25860 -3 25860 26343 26344 -3 25860 26344 25861 -3 25861 26344 26345 -3 25861 26345 25862 -3 25862 26345 26346 -3 25862 26346 25863 -3 25863 26346 26347 -3 25863 26347 25864 -3 25864 26347 26348 -3 25864 26348 25865 -3 25865 26348 26349 -3 25865 26349 25866 -3 25866 25873 25874 -3 25866 25874 25867 -3 25866 26349 26415 -3 25866 26415 25873 -3 25867 25874 25875 -3 25867 25875 25868 -3 25869 25877 25878 -3 25869 25878 25870 -3 25870 25878 25879 -3 25870 25879 25871 -3 25871 26350 26351 -3 25871 26351 25872 -3 25871 25879 26418 -3 25871 26418 26350 -3 25873 26415 25874 -3 26416 25874 26415 -3 25874 25880 25881 -3 25874 25881 25875 -3 25874 26416 25880 -3 26484 25880 26416 -3 25875 25881 25882 -3 25875 25882 25876 -3 25877 25883 25884 -3 25877 25884 25878 -3 25878 26417 25879 -3 26418 25879 26417 -3 25878 25884 26486 -3 25878 26486 26417 -3 25880 26484 25881 -3 26485 25881 26484 -3 25881 25885 25886 -3 25881 25886 25882 -3 25881 26485 25885 -3 26554 25885 26485 -3 25883 25887 25888 -3 25883 25888 25884 -3 25884 25888 26555 -3 25884 26555 26486 -3 25885 25889 25890 -3 25885 25890 25886 -3 25885 26554 26623 -3 25885 26623 25889 -3 25887 25891 25892 -3 25887 25892 25888 -3 25888 25892 26624 -3 25888 26624 26555 -3 25889 25893 25894 -3 25889 25894 25890 -3 25889 26623 26692 -3 25889 26692 25893 -3 25891 25895 25896 -3 25891 25896 25892 -3 25892 25896 26693 -3 25892 26693 26624 -3 25893 25897 25898 -3 25893 25898 25894 -3 25893 26692 26761 -3 25893 26761 25897 -3 25895 25899 25900 -3 25895 25900 25896 -3 25896 25900 26762 -3 25896 26762 26693 -3 25897 25901 25902 -3 25897 25902 25898 -3 25897 26761 25901 -3 26830 25901 26761 -3 25899 25903 25904 -3 25899 25904 25900 -3 25900 25904 26831 -3 25900 26831 26762 -3 25901 25905 25906 -3 25901 25906 25902 -3 25901 26830 25905 -3 26899 25905 26830 -3 25903 25907 25908 -3 25903 25908 25904 -3 25904 25908 26900 -3 25904 26900 26831 -3 25905 25909 25910 -3 25905 25910 25906 -3 25905 26899 26968 -3 25905 26968 25909 -3 25907 25911 25912 -3 25907 25912 25908 -3 25908 25912 26969 -3 25908 26969 26900 -3 25909 25913 25914 -3 25909 25914 25910 -3 25909 26968 27037 -3 25909 27037 25913 -3 25911 25915 25916 -3 25911 25916 25912 -3 25912 25916 27038 -3 25912 27038 26969 -3 25913 25917 25918 -3 25913 25918 25914 -3 25913 27037 25917 -3 27106 25917 27037 -3 25915 25919 25920 -3 25915 25920 25916 -3 25916 25920 27107 -3 25916 27107 27038 -3 25917 25921 25922 -3 25917 25922 25918 -3 25917 27106 25921 -3 27175 25921 27106 -3 25919 25923 25924 -3 25919 25924 25920 -3 25920 25924 27176 -3 25920 27176 27107 -3 25921 25925 25926 -3 25921 25926 25922 -3 25921 27175 25925 -3 27244 25925 27175 -3 25923 25927 25928 -3 25923 25928 25924 -3 25924 25928 27245 -3 25924 27245 27176 -3 25925 25929 25930 -3 25925 25930 25926 -3 25925 27244 27313 -3 25925 27313 25929 -3 25927 25931 25932 -3 25927 25932 25928 -3 25928 25932 27314 -3 25928 27314 27245 -3 25929 25933 25934 -3 25929 25934 25930 -3 25929 27313 27382 -3 25929 27382 25933 -3 25931 25935 25936 -3 25931 25936 25932 -3 25932 25936 27383 -3 25932 27383 27314 -3 25933 25937 25938 -3 25933 25938 25934 -3 25933 27382 25937 -3 27451 25937 27382 -3 25935 25939 25940 -3 25935 25940 25936 -3 25936 25940 27452 -3 25936 27452 27383 -3 25937 25941 25942 -3 25937 25942 25938 -3 25937 27451 25941 -3 27520 25941 27451 -3 25939 25943 25944 -3 25939 25944 25940 -3 25940 25944 27521 -3 25940 27521 27452 -3 25941 25945 25946 -3 25941 25946 25942 -3 25941 27520 25945 -3 27589 25945 27520 -3 25943 25947 25948 -3 25943 25948 25944 -3 25944 25948 27590 -3 25944 27590 27521 -3 25945 25949 25950 -3 25945 25950 25946 -3 25945 27589 27658 -3 25945 27658 25949 -3 25947 25951 25952 -3 25947 25952 25948 -3 25948 25952 27659 -3 25948 27659 27590 -3 25949 25953 25954 -3 25949 25954 25950 -3 25949 27658 27727 -3 25949 27727 25953 -3 25951 25955 25956 -3 25951 25956 25952 -3 25952 25956 27728 -3 25952 27728 27659 -3 25953 25957 25958 -3 25953 25958 25954 -3 25953 27727 25957 -3 27796 25957 27727 -3 25955 25959 25960 -3 25955 25960 25956 -3 25956 25960 27728 -3 27797 27728 25960 -3 25957 25961 25958 -3 25962 25958 25961 -3 25957 27796 25961 -3 27865 25961 27796 -3 25959 25963 25960 -3 25964 25960 25963 -3 25960 25964 27797 -3 27866 27797 25964 -3 25961 25965 25962 -3 25966 25962 25965 -3 25961 27865 25965 -3 27934 25965 27865 -3 25963 25967 25964 -3 25968 25964 25967 -3 25964 25968 27866 -3 27935 27866 25968 -3 25965 25969 25966 -3 25970 25966 25969 -3 25965 27934 28003 -3 25965 28003 25969 -3 25967 25971 25968 -3 25972 25968 25971 -3 25968 25972 27935 -3 28004 27935 25972 -3 25969 25973 25970 -3 25974 25970 25973 -3 25969 28003 28072 -3 25969 28072 25973 -3 25971 25975 25972 -3 25976 25972 25975 -3 25972 25976 28004 -3 28073 28004 25976 -3 25973 25977 25974 -3 25978 25974 25977 -3 25973 28072 25977 -3 28141 25977 28072 -3 25975 25979 25976 -3 25980 25976 25979 -3 25976 25980 28073 -3 28142 28073 25980 -3 25977 25981 25978 -3 25982 25978 25981 -3 25977 28141 25981 -3 28210 25981 28141 -3 25979 25983 25980 -3 25984 25980 25983 -3 25980 25984 28142 -3 28211 28142 25984 -3 25981 25985 25982 -3 25986 25982 25985 -3 25981 28210 25985 -3 28279 25985 28210 -3 25983 25987 25984 -3 25988 25984 25987 -3 25984 25988 28211 -3 28280 28211 25988 -3 25985 25989 25986 -3 25990 25986 25989 -3 25985 28279 28348 -3 25985 28348 25989 -3 25987 25991 25988 -3 25992 25988 25991 -3 25988 25992 28280 -3 28349 28280 25992 -3 25989 25993 25990 -3 25994 25990 25993 -3 25989 28348 28417 -3 25989 28417 25993 -3 25991 25995 25992 -3 25996 25992 25995 -3 25992 25996 28349 -3 28418 28349 25996 -3 25993 25997 25994 -3 25998 25994 25997 -3 25993 28417 25997 -3 28486 25997 28417 -3 25995 25999 25996 -3 26000 25996 25999 -3 25996 26000 28418 -3 28487 28418 26000 -3 25997 26001 25998 -3 26002 25998 26001 -3 25997 28486 26001 -3 28555 26001 28486 -3 25999 26003 26000 -3 26004 26000 26003 -3 26000 26004 28487 -3 28556 28487 26004 -3 26001 26005 26002 -3 26006 26002 26005 -3 26001 28555 26005 -3 28624 26005 28555 -3 26003 26007 26004 -3 26008 26004 26007 -3 26004 26008 28556 -3 28625 28556 26008 -3 26005 26009 26006 -3 26010 26006 26009 -3 26005 28624 28693 -3 26005 28693 26009 -3 26007 26011 26008 -3 26012 26008 26011 -3 26008 26012 28625 -3 28694 28625 26012 -3 26009 26013 26010 -3 26014 26010 26013 -3 26009 28693 28762 -3 26009 28762 26013 -3 26011 26015 26012 -3 26016 26012 26015 -3 26012 26016 28694 -3 28763 28694 26016 -3 26013 26017 26014 -3 26018 26014 26017 -3 26013 28762 26017 -3 28831 26017 28762 -3 26015 26019 26016 -3 26020 26016 26019 -3 26016 26020 28763 -3 28832 28763 26020 -3 26017 26021 26018 -3 26022 26018 26021 -3 26017 28831 26021 -3 28900 26021 28831 -3 26019 26023 26020 -3 26024 26020 26023 -3 26020 26024 28832 -3 28901 28832 26024 -3 26021 26025 26022 -3 26026 26022 26025 -3 26021 28900 26025 -3 28969 26025 28900 -3 26023 26027 26024 -3 26028 26024 26027 -3 26024 26028 28901 -3 28970 28901 26028 -3 26025 26029 26026 -3 26030 26026 26029 -3 26025 28969 29038 -3 26025 29038 26029 -3 26027 26031 26028 -3 26032 26028 26031 -3 26028 26032 28970 -3 29039 28970 26032 -3 26029 26033 26030 -3 26034 26030 26033 -3 26029 29038 29107 -3 26029 29107 26033 -3 26031 26035 26032 -3 26036 26032 26035 -3 26032 26036 29039 -3 29108 29039 26036 -3 26033 26037 26034 -3 26038 26034 26037 -3 26033 29107 26037 -3 29176 26037 29107 -3 26035 26039 26036 -3 26040 26036 26039 -3 26036 26040 29108 -3 29177 29108 26040 -3 26037 26041 26038 -3 26042 26038 26041 -3 26037 29176 26041 -3 29245 26041 29176 -3 26039 26043 26040 -3 26044 26040 26043 -3 26040 26044 29177 -3 29246 29177 26044 -3 26041 26045 26042 -3 26046 26042 26045 -3 26041 29245 26045 -3 29314 26045 29245 -3 26043 26047 26044 -3 26048 26044 26047 -3 26044 26048 29246 -3 29315 29246 26048 -3 26045 26049 26046 -3 26050 26046 26049 -3 26045 29314 29383 -3 26045 29383 26049 -3 26047 26051 26048 -3 26052 26048 26051 -3 26048 26052 29315 -3 29384 29315 26052 -3 26049 26053 26050 -3 26054 26050 26053 -3 26049 29383 29452 -3 26049 29452 26053 -3 26051 26055 26052 -3 26056 26052 26055 -3 26052 26056 29384 -3 29453 29384 26056 -3 26053 26057 26054 -3 26058 26054 26057 -3 26053 29452 26057 -3 29521 26057 29452 -3 26055 26059 26060 -3 26055 26060 26056 -3 26056 26060 29522 -3 26056 29522 29453 -3 26057 26061 26062 -3 26057 26062 26058 -3 26057 29521 26061 -3 29590 26061 29521 -3 26059 26063 26064 -3 26059 26064 26060 -3 26060 26064 29591 -3 26060 29591 29522 -3 26061 26065 26066 -3 26061 26066 26062 -3 26061 29590 26065 -3 29659 26065 29590 -3 26063 26067 26068 -3 26063 26068 26064 -3 26064 26068 29660 -3 26064 29660 29591 -3 26065 26069 26070 -3 26065 26070 26066 -3 26065 29659 29728 -3 26065 29728 26069 -3 26067 26071 26072 -3 26067 26072 26068 -3 26068 26072 29729 -3 26068 29729 29660 -3 26069 26073 26074 -3 26069 26074 26070 -3 26069 29728 29797 -3 26069 29797 26073 -3 26071 26075 26076 -3 26071 26076 26072 -3 26072 26076 29798 -3 26072 29798 29729 -3 26073 26077 26078 -3 26073 26078 26074 -3 26073 29797 26077 -3 29866 26077 29797 -3 26075 26079 26080 -3 26075 26080 26076 -3 26076 26080 29867 -3 26076 29867 29798 -3 26077 26081 26082 -3 26077 26082 26078 -3 26077 29866 26081 -3 29935 26081 29866 -3 26079 26083 26084 -3 26079 26084 26080 -3 26080 26084 29936 -3 26080 29936 29867 -3 26081 26085 26086 -3 26081 26086 26082 -3 26081 29935 26085 -3 30004 26085 29935 -3 26083 26087 26088 -3 26083 26088 26084 -3 26084 26088 30005 -3 26084 30005 29936 -3 26085 26089 26090 -3 26085 26090 26086 -3 26085 30004 30073 -3 26085 30073 26089 -3 26087 26091 26092 -3 26087 26092 26088 -3 26088 26092 30074 -3 26088 30074 30005 -3 26089 26093 26094 -3 26089 26094 26090 -3 26089 30073 30142 -3 26089 30142 26093 -3 26091 26095 26096 -3 26091 26096 26092 -3 26092 26096 30143 -3 26092 30143 30074 -3 26093 26097 26098 -3 26093 26098 26094 -3 26093 30142 30211 -3 26093 30211 26097 -3 26095 26099 26100 -3 26095 26100 26096 -3 26096 26100 30212 -3 26096 30212 30143 -3 26097 26101 26102 -3 26097 26102 26098 -3 26097 30211 26101 -3 30280 26101 30211 -3 26099 26103 26104 -3 26099 26104 26100 -3 26100 26104 30281 -3 26100 30281 30212 -3 26101 26105 26106 -3 26101 26106 26102 -3 26101 30280 26105 -3 30349 26105 30280 -3 26103 26107 26108 -3 26103 26108 26104 -3 26104 26108 30350 -3 26104 30350 30281 -3 26105 26109 26110 -3 26105 26110 26106 -3 26105 30349 30418 -3 26105 30418 26109 -3 26107 26111 26112 -3 26107 26112 26108 -3 26108 26112 30419 -3 26108 30419 30350 -3 26109 26113 26114 -3 26109 26114 26110 -3 26109 30418 30487 -3 26109 30487 26113 -3 26111 26115 26116 -3 26111 26116 26112 -3 26112 26116 30488 -3 26112 30488 30419 -3 26113 26117 26118 -3 26113 26118 26114 -3 26113 30487 30556 -3 26113 30556 26117 -3 26115 26119 26120 -3 26115 26120 26116 -3 26116 26120 30557 -3 26116 30557 30488 -3 26117 26121 26122 -3 26117 26122 26118 -3 26117 30556 26121 -3 30625 26121 30556 -3 26119 26123 26124 -3 26119 26124 26120 -3 26120 26124 30626 -3 26120 30626 30557 -3 26121 26125 26126 -3 26121 26126 26122 -3 26121 30625 26125 -3 30694 26125 30625 -3 26123 26127 26128 -3 26123 26128 26124 -3 26124 26128 30695 -3 26124 30695 30626 -3 26125 26129 26130 -3 26125 26130 26126 -3 26125 30694 30763 -3 26125 30763 26129 -3 26127 26131 26132 -3 26127 26132 26128 -3 26128 26132 30764 -3 26128 30764 30695 -3 26129 26133 26134 -3 26129 26134 26130 -3 26129 30763 30832 -3 26129 30832 26133 -3 26131 26135 26136 -3 26131 26136 26132 -3 26132 26136 30833 -3 26132 30833 30764 -3 26133 26139 26140 -3 26133 26140 26134 -3 26133 30832 30901 -3 26133 30901 26139 -3 26135 26141 26142 -3 26135 26142 26136 -3 26136 26142 26143 -3 26136 26143 26137 -3 26136 26137 30834 -3 26136 30834 30833 -3 26137 26143 30902 -3 26137 30902 30834 -3 26138 26146 26147 -3 26138 26147 26139 -3 26138 26139 30901 -3 26138 30901 30900 -3 26138 30900 26146 -3 30968 26146 30900 -3 26139 26147 26148 -3 26139 26148 26140 -3 26142 26149 26150 -3 26142 26150 26143 -3 26143 26150 26151 -3 26143 26151 26144 -3 26143 26144 30903 -3 26143 30903 30902 -3 26144 26151 30969 -3 26144 30969 30903 -3 26145 26215 26146 -3 26216 26146 26215 -3 26145 26146 30968 -3 26145 30968 30967 -3 26145 30967 26215 -3 31033 26215 30967 -3 26146 26216 26147 -3 26217 26147 26216 -3 26150 26218 26151 -3 26219 26151 26218 -3 26151 26219 26152 -3 26220 26152 26219 -3 26151 26152 30970 -3 26151 30970 30969 -3 26152 26220 26153 -3 26221 26153 26220 -3 26152 26153 30971 -3 26152 30971 30970 -3 26153 26221 26154 -3 26222 26154 26221 -3 26153 26154 30972 -3 26153 30972 30971 -3 26154 26222 26155 -3 26223 26155 26222 -3 26154 26155 30973 -3 26154 30973 30972 -3 26155 26223 26156 -3 26224 26156 26223 -3 26155 26156 30974 -3 26155 30974 30973 -3 26156 26224 26157 -3 26225 26157 26224 -3 26156 26157 30975 -3 26156 30975 30974 -3 26157 26225 26158 -3 26226 26158 26225 -3 26157 26158 30975 -3 30976 30975 26158 -3 26158 26226 26159 -3 26227 26159 26226 -3 26158 26159 30976 -3 30977 30976 26159 -3 26159 26227 26160 -3 26228 26160 26227 -3 26159 26160 30977 -3 30978 30977 26160 -3 26160 26228 26161 -3 26229 26161 26228 -3 26160 26161 30978 -3 30979 30978 26161 -3 26161 26229 26162 -3 26230 26162 26229 -3 26161 26162 30979 -3 30980 30979 26162 -3 26162 26230 26163 -3 26231 26163 26230 -3 26162 26163 30980 -3 30981 30980 26163 -3 26163 26231 26164 -3 26232 26164 26231 -3 26163 26164 30981 -3 30982 30981 26164 -3 26164 26232 26165 -3 26233 26165 26232 -3 26164 26165 30982 -3 30983 30982 26165 -3 26165 26233 26166 -3 26234 26166 26233 -3 26165 26166 30983 -3 30984 30983 26166 -3 26166 26234 26167 -3 26235 26167 26234 -3 26166 26167 30984 -3 30985 30984 26167 -3 26167 26235 26168 -3 26236 26168 26235 -3 26167 26168 30985 -3 30986 30985 26168 -3 26168 26236 26169 -3 26237 26169 26236 -3 26168 26169 30986 -3 30987 30986 26169 -3 26169 26237 26170 -3 26238 26170 26237 -3 26169 26170 30987 -3 30988 30987 26170 -3 26170 26238 26171 -3 26239 26171 26238 -3 26170 26171 30988 -3 30989 30988 26171 -3 26171 26239 26172 -3 26240 26172 26239 -3 26171 26172 30989 -3 30990 30989 26172 -3 26172 26240 26173 -3 26241 26173 26240 -3 26172 26173 30990 -3 30991 30990 26173 -3 26173 26241 26174 -3 26242 26174 26241 -3 26173 26174 30991 -3 30992 30991 26174 -3 26174 26242 26175 -3 26243 26175 26242 -3 26174 26175 30992 -3 30993 30992 26175 -3 26175 26243 26176 -3 26244 26176 26243 -3 26175 26176 30993 -3 30994 30993 26176 -3 26176 26244 26177 -3 26245 26177 26244 -3 26176 26177 30994 -3 30995 30994 26177 -3 26177 26245 26178 -3 26246 26178 26245 -3 26177 26178 30995 -3 30996 30995 26178 -3 26178 26246 26179 -3 26247 26179 26246 -3 26178 26179 30996 -3 30997 30996 26179 -3 26179 26247 26180 -3 26248 26180 26247 -3 26179 26180 30997 -3 30998 30997 26180 -3 26180 26248 26181 -3 26249 26181 26248 -3 26180 26181 30998 -3 30999 30998 26181 -3 26181 26249 26182 -3 26250 26182 26249 -3 26181 26182 30999 -3 31000 30999 26182 -3 26182 26250 26183 -3 26251 26183 26250 -3 26182 26183 31000 -3 31001 31000 26183 -3 26183 26251 26184 -3 26252 26184 26251 -3 26183 26184 31001 -3 31002 31001 26184 -3 26184 26252 26185 -3 26253 26185 26252 -3 26184 26185 31002 -3 31003 31002 26185 -3 26185 26253 26186 -3 26254 26186 26253 -3 26185 26186 31003 -3 31004 31003 26186 -3 26186 26254 26187 -3 26255 26187 26254 -3 26186 26187 31004 -3 31005 31004 26187 -3 26187 26255 26188 -3 26256 26188 26255 -3 26187 26188 31005 -3 31006 31005 26188 -3 26188 26256 26189 -3 26257 26189 26256 -3 26188 26189 31006 -3 31007 31006 26189 -3 26189 26257 26190 -3 26258 26190 26257 -3 26189 26190 31007 -3 31008 31007 26190 -3 26190 26258 26191 -3 26259 26191 26258 -3 26190 26191 31008 -3 31009 31008 26191 -3 26191 26259 26192 -3 26260 26192 26259 -3 26191 26192 31009 -3 31010 31009 26192 -3 26192 26260 26193 -3 26261 26193 26260 -3 26192 26193 31010 -3 31011 31010 26193 -3 26193 26261 26194 -3 26262 26194 26261 -3 26193 26194 31011 -3 31012 31011 26194 -3 26194 26262 26195 -3 26263 26195 26262 -3 26194 26195 31012 -3 31013 31012 26195 -3 26195 26263 26196 -3 26264 26196 26263 -3 26195 26196 31013 -3 31014 31013 26196 -3 26196 26264 26197 -3 26265 26197 26264 -3 26196 26197 31014 -3 31015 31014 26197 -3 26197 26265 26198 -3 26266 26198 26265 -3 26197 26198 31015 -3 31016 31015 26198 -3 26198 26266 26199 -3 26267 26199 26266 -3 26198 26199 31016 -3 31017 31016 26199 -3 26199 26267 26200 -3 26268 26200 26267 -3 26199 26200 31017 -3 31018 31017 26200 -3 26200 26268 26201 -3 26269 26201 26268 -3 26200 26201 31018 -3 31019 31018 26201 -3 26201 26269 26202 -3 26270 26202 26269 -3 26201 26202 31019 -3 31020 31019 26202 -3 26202 26270 26203 -3 26271 26203 26270 -3 26202 26203 31020 -3 31021 31020 26203 -3 26203 26271 26204 -3 26272 26204 26271 -3 26203 26204 31021 -3 31022 31021 26204 -3 26204 26272 26205 -3 26273 26205 26272 -3 26204 26205 31022 -3 31023 31022 26205 -3 26205 26273 26206 -3 26274 26206 26273 -3 26205 26206 31023 -3 31024 31023 26206 -3 26206 26274 26207 -3 26275 26207 26274 -3 26206 26207 31024 -3 31025 31024 26207 -3 26207 26275 26208 -3 26276 26208 26275 -3 26207 26208 31025 -3 31026 31025 26208 -3 26208 26276 26209 -3 26277 26209 26276 -3 26208 26209 31026 -3 31027 31026 26209 -3 26209 26277 26210 -3 26278 26210 26277 -3 26209 26210 31027 -3 31028 31027 26210 -3 26210 26278 26211 -3 26279 26211 26278 -3 26210 26211 31028 -3 31029 31028 26211 -3 26211 26279 26212 -3 26280 26212 26279 -3 26211 26212 31029 -3 31030 31029 26212 -3 26212 26280 26213 -3 26281 26213 26280 -3 26212 26213 31030 -3 31031 31030 26213 -3 26213 26281 26214 -3 26282 26214 26281 -3 26213 26214 31031 -3 31032 31031 26214 -3 26214 26282 26215 -3 26283 26215 26282 -3 26214 26215 31032 -3 31033 31032 26215 -3 26215 26283 26216 -3 26284 26216 26283 -3 26285 26351 26352 -3 26285 26352 26286 -3 26286 26352 26353 -3 26286 26353 26287 -3 26287 26353 26354 -3 26287 26354 26288 -3 26288 26354 26355 -3 26288 26355 26289 -3 26289 26355 26356 -3 26289 26356 26290 -3 26290 26356 26357 -3 26290 26357 26291 -3 26291 26357 26358 -3 26291 26358 26292 -3 26292 26358 26359 -3 26292 26359 26293 -3 26293 26359 26360 -3 26293 26360 26294 -3 26294 26360 26361 -3 26294 26361 26295 -3 26295 26361 26362 -3 26295 26362 26296 -3 26296 26362 26363 -3 26296 26363 26297 -3 26297 26363 26364 -3 26297 26364 26298 -3 26298 26364 26365 -3 26298 26365 26299 -3 26299 26365 26366 -3 26299 26366 26300 -3 26300 26366 26367 -3 26300 26367 26301 -3 26301 26367 26368 -3 26301 26368 26302 -3 26302 26368 26369 -3 26302 26369 26303 -3 26303 26369 26370 -3 26303 26370 26304 -3 26304 26370 26371 -3 26304 26371 26305 -3 26305 26371 26372 -3 26305 26372 26306 -3 26306 26372 26373 -3 26306 26373 26307 -3 26307 26373 26374 -3 26307 26374 26308 -3 26308 26374 26375 -3 26308 26375 26309 -3 26309 26375 26376 -3 26309 26376 26310 -3 26310 26376 26377 -3 26310 26377 26311 -3 26311 26377 26378 -3 26311 26378 26312 -3 26312 26378 26379 -3 26312 26379 26313 -3 26313 26379 26380 -3 26313 26380 26314 -3 26314 26380 26381 -3 26314 26381 26315 -3 26315 26381 26382 -3 26315 26382 26316 -3 26316 26382 26383 -3 26316 26383 26317 -3 26317 26383 26384 -3 26317 26384 26318 -3 26318 26384 26385 -3 26318 26385 26319 -3 26319 26385 26320 -3 26386 26320 26385 -3 26320 26386 26321 -3 26387 26321 26386 -3 26321 26387 26322 -3 26388 26322 26387 -3 26322 26388 26323 -3 26389 26323 26388 -3 26323 26389 26324 -3 26390 26324 26389 -3 26324 26390 26325 -3 26391 26325 26390 -3 26325 26391 26326 -3 26392 26326 26391 -3 26326 26392 26327 -3 26393 26327 26392 -3 26327 26393 26328 -3 26394 26328 26393 -3 26328 26394 26329 -3 26395 26329 26394 -3 26329 26395 26330 -3 26396 26330 26395 -3 26330 26396 26331 -3 26397 26331 26396 -3 26331 26397 26332 -3 26398 26332 26397 -3 26332 26398 26333 -3 26399 26333 26398 -3 26333 26399 26334 -3 26400 26334 26399 -3 26334 26400 26335 -3 26401 26335 26400 -3 26335 26401 26336 -3 26402 26336 26401 -3 26336 26402 26337 -3 26403 26337 26402 -3 26337 26403 26338 -3 26404 26338 26403 -3 26338 26404 26339 -3 26405 26339 26404 -3 26339 26405 26340 -3 26406 26340 26405 -3 26340 26406 26341 -3 26407 26341 26406 -3 26341 26407 26342 -3 26408 26342 26407 -3 26342 26408 26343 -3 26409 26343 26408 -3 26343 26409 26344 -3 26410 26344 26409 -3 26344 26410 26345 -3 26411 26345 26410 -3 26345 26411 26346 -3 26412 26346 26411 -3 26346 26412 26347 -3 26413 26347 26412 -3 26347 26413 26348 -3 26414 26348 26413 -3 26348 26414 26349 -3 26415 26349 26414 -3 26350 26418 26351 -3 26419 26351 26418 -3 26351 26419 26352 -3 26420 26352 26419 -3 26352 26420 26353 -3 26421 26353 26420 -3 26353 26421 26354 -3 26422 26354 26421 -3 26354 26422 26355 -3 26423 26355 26422 -3 26355 26423 26356 -3 26424 26356 26423 -3 26356 26424 26357 -3 26425 26357 26424 -3 26357 26425 26358 -3 26426 26358 26425 -3 26358 26426 26359 -3 26427 26359 26426 -3 26359 26427 26360 -3 26428 26360 26427 -3 26360 26428 26361 -3 26429 26361 26428 -3 26361 26429 26362 -3 26430 26362 26429 -3 26362 26430 26363 -3 26431 26363 26430 -3 26363 26431 26364 -3 26432 26364 26431 -3 26364 26432 26365 -3 26433 26365 26432 -3 26365 26433 26366 -3 26434 26366 26433 -3 26366 26434 26367 -3 26435 26367 26434 -3 26367 26435 26368 -3 26436 26368 26435 -3 26368 26436 26369 -3 26437 26369 26436 -3 26369 26437 26370 -3 26438 26370 26437 -3 26370 26438 26371 -3 26439 26371 26438 -3 26371 26439 26372 -3 26440 26372 26439 -3 26372 26440 26373 -3 26441 26373 26440 -3 26373 26441 26374 -3 26442 26374 26441 -3 26374 26442 26375 -3 26443 26375 26442 -3 26375 26443 26376 -3 26444 26376 26443 -3 26376 26444 26377 -3 26445 26377 26444 -3 26377 26445 26378 -3 26446 26378 26445 -3 26378 26446 26379 -3 26447 26379 26446 -3 26379 26447 26380 -3 26448 26380 26447 -3 26380 26448 26381 -3 26449 26381 26448 -3 26381 26449 26382 -3 26450 26382 26449 -3 26382 26450 26383 -3 26451 26383 26450 -3 26383 26451 26384 -3 26452 26384 26451 -3 26384 26452 26385 -3 26453 26385 26452 -3 26385 26453 26386 -3 26454 26386 26453 -3 26386 26454 26387 -3 26455 26387 26454 -3 26387 26455 26388 -3 26456 26388 26455 -3 26388 26456 26389 -3 26457 26389 26456 -3 26389 26457 26390 -3 26458 26390 26457 -3 26390 26458 26391 -3 26459 26391 26458 -3 26391 26459 26392 -3 26460 26392 26459 -3 26392 26460 26393 -3 26461 26393 26460 -3 26393 26461 26394 -3 26462 26394 26461 -3 26394 26462 26395 -3 26463 26395 26462 -3 26395 26463 26396 -3 26464 26396 26463 -3 26396 26464 26397 -3 26465 26397 26464 -3 26397 26465 26398 -3 26466 26398 26465 -3 26398 26466 26399 -3 26467 26399 26466 -3 26399 26467 26400 -3 26468 26400 26467 -3 26400 26468 26401 -3 26469 26401 26468 -3 26401 26469 26402 -3 26470 26402 26469 -3 26402 26470 26403 -3 26471 26403 26470 -3 26403 26471 26404 -3 26472 26404 26471 -3 26404 26472 26405 -3 26473 26405 26472 -3 26405 26473 26406 -3 26474 26406 26473 -3 26406 26474 26407 -3 26475 26407 26474 -3 26407 26475 26408 -3 26476 26408 26475 -3 26408 26476 26409 -3 26477 26409 26476 -3 26409 26477 26410 -3 26478 26410 26477 -3 26410 26478 26411 -3 26479 26411 26478 -3 26411 26479 26412 -3 26480 26412 26479 -3 26412 26480 26413 -3 26481 26413 26480 -3 26413 26481 26414 -3 26482 26414 26481 -3 26414 26482 26415 -3 26483 26415 26482 -3 26415 26483 26416 -3 26484 26416 26483 -3 26417 26486 26487 -3 26417 26487 26418 -3 26418 26487 26488 -3 26418 26488 26419 -3 26419 26488 26489 -3 26419 26489 26420 -3 26420 26489 26490 -3 26420 26490 26421 -3 26421 26490 26491 -3 26421 26491 26422 -3 26422 26491 26492 -3 26422 26492 26423 -3 26423 26492 26493 -3 26423 26493 26424 -3 26424 26493 26494 -3 26424 26494 26425 -3 26425 26494 26495 -3 26425 26495 26426 -3 26426 26495 26496 -3 26426 26496 26427 -3 26427 26496 26497 -3 26427 26497 26428 -3 26428 26497 26498 -3 26428 26498 26429 -3 26429 26498 26499 -3 26429 26499 26430 -3 26430 26499 26500 -3 26430 26500 26431 -3 26431 26500 26501 -3 26431 26501 26432 -3 26432 26501 26502 -3 26432 26502 26433 -3 26433 26502 26503 -3 26433 26503 26434 -3 26434 26503 26504 -3 26434 26504 26435 -3 26435 26504 26505 -3 26435 26505 26436 -3 26436 26505 26506 -3 26436 26506 26437 -3 26437 26506 26507 -3 26437 26507 26438 -3 26438 26507 26508 -3 26438 26508 26439 -3 26439 26508 26509 -3 26439 26509 26440 -3 26440 26509 26510 -3 26440 26510 26441 -3 26441 26510 26511 -3 26441 26511 26442 -3 26442 26511 26512 -3 26442 26512 26443 -3 26443 26512 26513 -3 26443 26513 26444 -3 26444 26513 26514 -3 26444 26514 26445 -3 26445 26514 26515 -3 26445 26515 26446 -3 26446 26515 26516 -3 26446 26516 26447 -3 26447 26516 26517 -3 26447 26517 26448 -3 26448 26517 26518 -3 26448 26518 26449 -3 26449 26518 26519 -3 26449 26519 26450 -3 26450 26519 26520 -3 26450 26520 26451 -3 26451 26520 26521 -3 26451 26521 26452 -3 26452 26521 26522 -3 26452 26522 26453 -3 26453 26522 26523 -3 26453 26523 26454 -3 26454 26523 26524 -3 26454 26524 26455 -3 26455 26524 26525 -3 26455 26525 26456 -3 26456 26525 26526 -3 26456 26526 26457 -3 26457 26526 26527 -3 26457 26527 26458 -3 26458 26527 26528 -3 26458 26528 26459 -3 26459 26528 26529 -3 26459 26529 26460 -3 26460 26529 26530 -3 26460 26530 26461 -3 26461 26530 26531 -3 26461 26531 26462 -3 26462 26531 26532 -3 26462 26532 26463 -3 26463 26532 26533 -3 26463 26533 26464 -3 26464 26533 26534 -3 26464 26534 26465 -3 26465 26534 26535 -3 26465 26535 26466 -3 26466 26535 26536 -3 26466 26536 26467 -3 26467 26536 26537 -3 26467 26537 26468 -3 26468 26537 26538 -3 26468 26538 26469 -3 26469 26538 26539 -3 26469 26539 26470 -3 26470 26539 26540 -3 26470 26540 26471 -3 26471 26540 26541 -3 26471 26541 26472 -3 26472 26541 26542 -3 26472 26542 26473 -3 26473 26542 26543 -3 26473 26543 26474 -3 26474 26543 26544 -3 26474 26544 26475 -3 26475 26544 26545 -3 26475 26545 26476 -3 26476 26545 26546 -3 26476 26546 26477 -3 26477 26546 26547 -3 26477 26547 26478 -3 26478 26547 26548 -3 26478 26548 26479 -3 26479 26548 26549 -3 26479 26549 26480 -3 26480 26549 26550 -3 26480 26550 26481 -3 26481 26550 26551 -3 26481 26551 26482 -3 26482 26551 26552 -3 26482 26552 26483 -3 26483 26552 26553 -3 26483 26553 26484 -3 26484 26553 26554 -3 26484 26554 26485 -3 26486 26555 26556 -3 26486 26556 26487 -3 26487 26556 26557 -3 26487 26557 26488 -3 26488 26557 26558 -3 26488 26558 26489 -3 26489 26558 26559 -3 26489 26559 26490 -3 26490 26559 26560 -3 26490 26560 26491 -3 26491 26560 26561 -3 26491 26561 26492 -3 26492 26561 26562 -3 26492 26562 26493 -3 26493 26562 26563 -3 26493 26563 26494 -3 26494 26563 26564 -3 26494 26564 26495 -3 26495 26564 26565 -3 26495 26565 26496 -3 26496 26565 26566 -3 26496 26566 26497 -3 26497 26566 26567 -3 26497 26567 26498 -3 26498 26567 26568 -3 26498 26568 26499 -3 26499 26568 26569 -3 26499 26569 26500 -3 26500 26569 26570 -3 26500 26570 26501 -3 26501 26570 26571 -3 26501 26571 26502 -3 26502 26571 26572 -3 26502 26572 26503 -3 26503 26572 26573 -3 26503 26573 26504 -3 26504 26573 26574 -3 26504 26574 26505 -3 26505 26574 26575 -3 26505 26575 26506 -3 26506 26575 26576 -3 26506 26576 26507 -3 26507 26576 26577 -3 26507 26577 26508 -3 26508 26577 26578 -3 26508 26578 26509 -3 26509 26578 26579 -3 26509 26579 26510 -3 26510 26579 26580 -3 26510 26580 26511 -3 26511 26580 26581 -3 26511 26581 26512 -3 26512 26581 26582 -3 26512 26582 26513 -3 26513 26582 26583 -3 26513 26583 26514 -3 26514 26583 26584 -3 26514 26584 26515 -3 26515 26584 26585 -3 26515 26585 26516 -3 26516 26585 26586 -3 26516 26586 26517 -3 26517 26586 26518 -3 26587 26518 26586 -3 26518 26587 26519 -3 26588 26519 26587 -3 26519 26588 26520 -3 26589 26520 26588 -3 26520 26589 26521 -3 26590 26521 26589 -3 26521 26590 26522 -3 26591 26522 26590 -3 26522 26591 26523 -3 26592 26523 26591 -3 26523 26592 26524 -3 26593 26524 26592 -3 26524 26593 26525 -3 26594 26525 26593 -3 26525 26594 26526 -3 26595 26526 26594 -3 26526 26595 26527 -3 26596 26527 26595 -3 26527 26596 26528 -3 26597 26528 26596 -3 26528 26597 26529 -3 26598 26529 26597 -3 26529 26598 26530 -3 26599 26530 26598 -3 26530 26599 26531 -3 26600 26531 26599 -3 26531 26600 26532 -3 26601 26532 26600 -3 26532 26601 26533 -3 26602 26533 26601 -3 26533 26602 26534 -3 26603 26534 26602 -3 26534 26603 26535 -3 26604 26535 26603 -3 26535 26604 26536 -3 26605 26536 26604 -3 26536 26605 26537 -3 26606 26537 26605 -3 26537 26606 26538 -3 26607 26538 26606 -3 26538 26607 26539 -3 26608 26539 26607 -3 26539 26608 26540 -3 26609 26540 26608 -3 26540 26609 26541 -3 26610 26541 26609 -3 26541 26610 26542 -3 26611 26542 26610 -3 26542 26611 26543 -3 26612 26543 26611 -3 26543 26612 26544 -3 26613 26544 26612 -3 26544 26613 26545 -3 26614 26545 26613 -3 26545 26614 26546 -3 26615 26546 26614 -3 26546 26615 26547 -3 26616 26547 26615 -3 26547 26616 26548 -3 26617 26548 26616 -3 26548 26617 26549 -3 26618 26549 26617 -3 26549 26618 26550 -3 26619 26550 26618 -3 26550 26619 26551 -3 26620 26551 26619 -3 26551 26620 26552 -3 26621 26552 26620 -3 26552 26621 26553 -3 26622 26553 26621 -3 26553 26622 26554 -3 26623 26554 26622 -3 26555 26624 26556 -3 26625 26556 26624 -3 26556 26625 26557 -3 26626 26557 26625 -3 26557 26626 26558 -3 26627 26558 26626 -3 26558 26627 26559 -3 26628 26559 26627 -3 26559 26628 26560 -3 26629 26560 26628 -3 26560 26629 26561 -3 26630 26561 26629 -3 26561 26630 26562 -3 26631 26562 26630 -3 26562 26631 26563 -3 26632 26563 26631 -3 26563 26632 26564 -3 26633 26564 26632 -3 26564 26633 26565 -3 26634 26565 26633 -3 26565 26634 26566 -3 26635 26566 26634 -3 26566 26635 26567 -3 26636 26567 26635 -3 26567 26636 26568 -3 26637 26568 26636 -3 26568 26637 26569 -3 26638 26569 26637 -3 26569 26638 26570 -3 26639 26570 26638 -3 26570 26639 26571 -3 26640 26571 26639 -3 26571 26640 26572 -3 26641 26572 26640 -3 26572 26641 26573 -3 26642 26573 26641 -3 26573 26642 26574 -3 26643 26574 26642 -3 26574 26643 26575 -3 26644 26575 26643 -3 26575 26644 26576 -3 26645 26576 26644 -3 26576 26645 26577 -3 26646 26577 26645 -3 26577 26646 26578 -3 26647 26578 26646 -3 26578 26647 26579 -3 26648 26579 26647 -3 26579 26648 26580 -3 26649 26580 26648 -3 26580 26649 26581 -3 26650 26581 26649 -3 26581 26650 26582 -3 26651 26582 26650 -3 26582 26651 26583 -3 26652 26583 26651 -3 26583 26652 26584 -3 26653 26584 26652 -3 26584 26653 26585 -3 26654 26585 26653 -3 26585 26654 26586 -3 26655 26586 26654 -3 26586 26655 26587 -3 26656 26587 26655 -3 26587 26656 26588 -3 26657 26588 26656 -3 26588 26657 26589 -3 26658 26589 26657 -3 26589 26658 26590 -3 26659 26590 26658 -3 26590 26659 26591 -3 26660 26591 26659 -3 26591 26660 26592 -3 26661 26592 26660 -3 26592 26661 26593 -3 26662 26593 26661 -3 26593 26662 26594 -3 26663 26594 26662 -3 26594 26663 26595 -3 26664 26595 26663 -3 26595 26664 26596 -3 26665 26596 26664 -3 26596 26665 26597 -3 26666 26597 26665 -3 26597 26666 26598 -3 26667 26598 26666 -3 26598 26667 26599 -3 26668 26599 26667 -3 26599 26668 26600 -3 26669 26600 26668 -3 26600 26669 26601 -3 26670 26601 26669 -3 26601 26670 26602 -3 26671 26602 26670 -3 26602 26671 26603 -3 26672 26603 26671 -3 26603 26672 26604 -3 26673 26604 26672 -3 26604 26673 26605 -3 26674 26605 26673 -3 26605 26674 26606 -3 26675 26606 26674 -3 26606 26675 26607 -3 26676 26607 26675 -3 26607 26676 26608 -3 26677 26608 26676 -3 26608 26677 26609 -3 26678 26609 26677 -3 26609 26678 26610 -3 26679 26610 26678 -3 26610 26679 26611 -3 26680 26611 26679 -3 26611 26680 26612 -3 26681 26612 26680 -3 26612 26681 26613 -3 26682 26613 26681 -3 26613 26682 26614 -3 26683 26614 26682 -3 26614 26683 26615 -3 26684 26615 26683 -3 26615 26684 26616 -3 26685 26616 26684 -3 26616 26685 26617 -3 26686 26617 26685 -3 26617 26686 26618 -3 26687 26618 26686 -3 26618 26687 26688 -3 26618 26688 26619 -3 26619 26688 26689 -3 26619 26689 26620 -3 26620 26689 26690 -3 26620 26690 26621 -3 26621 26690 26691 -3 26621 26691 26622 -3 26622 26691 26692 -3 26622 26692 26623 -3 26624 26693 26694 -3 26624 26694 26625 -3 26625 26694 26695 -3 26625 26695 26626 -3 26626 26695 26696 -3 26626 26696 26627 -3 26627 26696 26697 -3 26627 26697 26628 -3 26628 26697 26698 -3 26628 26698 26629 -3 26629 26698 26699 -3 26629 26699 26630 -3 26630 26699 26700 -3 26630 26700 26631 -3 26631 26700 26701 -3 26631 26701 26632 -3 26632 26701 26702 -3 26632 26702 26633 -3 26633 26702 26703 -3 26633 26703 26634 -3 26634 26703 26704 -3 26634 26704 26635 -3 26635 26704 26705 -3 26635 26705 26636 -3 26636 26705 26706 -3 26636 26706 26637 -3 26637 26706 26707 -3 26637 26707 26638 -3 26638 26707 26708 -3 26638 26708 26639 -3 26639 26708 26709 -3 26639 26709 26640 -3 26640 26709 26710 -3 26640 26710 26641 -3 26641 26710 26711 -3 26641 26711 26642 -3 26642 26711 26712 -3 26642 26712 26643 -3 26643 26712 26713 -3 26643 26713 26644 -3 26644 26713 26714 -3 26644 26714 26645 -3 26645 26714 26715 -3 26645 26715 26646 -3 26646 26715 26716 -3 26646 26716 26647 -3 26647 26716 26717 -3 26647 26717 26648 -3 26648 26717 26718 -3 26648 26718 26649 -3 26649 26718 26719 -3 26649 26719 26650 -3 26650 26719 26720 -3 26650 26720 26651 -3 26651 26720 26721 -3 26651 26721 26652 -3 26652 26721 26722 -3 26652 26722 26653 -3 26653 26722 26723 -3 26653 26723 26654 -3 26654 26723 26724 -3 26654 26724 26655 -3 26655 26724 26725 -3 26655 26725 26656 -3 26656 26725 26726 -3 26656 26726 26657 -3 26657 26726 26727 -3 26657 26727 26658 -3 26658 26727 26728 -3 26658 26728 26659 -3 26659 26728 26729 -3 26659 26729 26660 -3 26660 26729 26730 -3 26660 26730 26661 -3 26661 26730 26731 -3 26661 26731 26662 -3 26662 26731 26732 -3 26662 26732 26663 -3 26663 26732 26733 -3 26663 26733 26664 -3 26664 26733 26734 -3 26664 26734 26665 -3 26665 26734 26735 -3 26665 26735 26666 -3 26666 26735 26736 -3 26666 26736 26667 -3 26667 26736 26737 -3 26667 26737 26668 -3 26668 26737 26738 -3 26668 26738 26669 -3 26669 26738 26739 -3 26669 26739 26670 -3 26670 26739 26740 -3 26670 26740 26671 -3 26671 26740 26741 -3 26671 26741 26672 -3 26672 26741 26742 -3 26672 26742 26673 -3 26673 26742 26743 -3 26673 26743 26674 -3 26674 26743 26744 -3 26674 26744 26675 -3 26675 26744 26745 -3 26675 26745 26676 -3 26676 26745 26746 -3 26676 26746 26677 -3 26677 26746 26747 -3 26677 26747 26678 -3 26678 26747 26748 -3 26678 26748 26679 -3 26679 26748 26749 -3 26679 26749 26680 -3 26680 26749 26750 -3 26680 26750 26681 -3 26681 26750 26751 -3 26681 26751 26682 -3 26682 26751 26752 -3 26682 26752 26683 -3 26683 26752 26753 -3 26683 26753 26684 -3 26684 26753 26754 -3 26684 26754 26685 -3 26685 26754 26755 -3 26685 26755 26686 -3 26686 26755 26756 -3 26686 26756 26687 -3 26687 26756 26757 -3 26687 26757 26688 -3 26688 26757 26758 -3 26688 26758 26689 -3 26689 26758 26759 -3 26689 26759 26690 -3 26690 26759 26760 -3 26690 26760 26691 -3 26691 26760 26761 -3 26691 26761 26692 -3 26693 26762 26763 -3 26693 26763 26694 -3 26694 26763 26764 -3 26694 26764 26695 -3 26695 26764 26765 -3 26695 26765 26696 -3 26696 26765 26766 -3 26696 26766 26697 -3 26697 26766 26767 -3 26697 26767 26698 -3 26698 26767 26768 -3 26698 26768 26699 -3 26699 26768 26769 -3 26699 26769 26700 -3 26700 26769 26770 -3 26700 26770 26701 -3 26701 26770 26771 -3 26701 26771 26702 -3 26702 26771 26772 -3 26702 26772 26703 -3 26703 26772 26773 -3 26703 26773 26704 -3 26704 26773 26774 -3 26704 26774 26705 -3 26705 26774 26775 -3 26705 26775 26706 -3 26706 26775 26776 -3 26706 26776 26707 -3 26707 26776 26777 -3 26707 26777 26708 -3 26708 26777 26778 -3 26708 26778 26709 -3 26709 26778 26779 -3 26709 26779 26710 -3 26710 26779 26780 -3 26710 26780 26711 -3 26711 26780 26781 -3 26711 26781 26712 -3 26712 26781 26782 -3 26712 26782 26713 -3 26713 26782 26783 -3 26713 26783 26714 -3 26714 26783 26784 -3 26714 26784 26715 -3 26715 26784 26785 -3 26715 26785 26716 -3 26716 26785 26786 -3 26716 26786 26717 -3 26717 26786 26787 -3 26717 26787 26718 -3 26718 26787 26719 -3 26788 26719 26787 -3 26719 26788 26720 -3 26789 26720 26788 -3 26720 26789 26721 -3 26790 26721 26789 -3 26721 26790 26722 -3 26791 26722 26790 -3 26722 26791 26723 -3 26792 26723 26791 -3 26723 26792 26724 -3 26793 26724 26792 -3 26724 26793 26725 -3 26794 26725 26793 -3 26725 26794 26726 -3 26795 26726 26794 -3 26726 26795 26727 -3 26796 26727 26795 -3 26727 26796 26728 -3 26797 26728 26796 -3 26728 26797 26729 -3 26798 26729 26797 -3 26729 26798 26730 -3 26799 26730 26798 -3 26730 26799 26731 -3 26800 26731 26799 -3 26731 26800 26732 -3 26801 26732 26800 -3 26732 26801 26733 -3 26802 26733 26801 -3 26733 26802 26734 -3 26803 26734 26802 -3 26734 26803 26735 -3 26804 26735 26803 -3 26735 26804 26736 -3 26805 26736 26804 -3 26736 26805 26737 -3 26806 26737 26805 -3 26737 26806 26738 -3 26807 26738 26806 -3 26738 26807 26739 -3 26808 26739 26807 -3 26739 26808 26740 -3 26809 26740 26808 -3 26740 26809 26741 -3 26810 26741 26809 -3 26741 26810 26742 -3 26811 26742 26810 -3 26742 26811 26743 -3 26812 26743 26811 -3 26743 26812 26744 -3 26813 26744 26812 -3 26744 26813 26745 -3 26814 26745 26813 -3 26745 26814 26746 -3 26815 26746 26814 -3 26746 26815 26747 -3 26816 26747 26815 -3 26747 26816 26748 -3 26817 26748 26816 -3 26748 26817 26749 -3 26818 26749 26817 -3 26749 26818 26750 -3 26819 26750 26818 -3 26750 26819 26751 -3 26820 26751 26819 -3 26751 26820 26752 -3 26821 26752 26820 -3 26752 26821 26753 -3 26822 26753 26821 -3 26753 26822 26754 -3 26823 26754 26822 -3 26754 26823 26755 -3 26824 26755 26823 -3 26755 26824 26756 -3 26825 26756 26824 -3 26756 26825 26757 -3 26826 26757 26825 -3 26757 26826 26758 -3 26827 26758 26826 -3 26758 26827 26759 -3 26828 26759 26827 -3 26759 26828 26760 -3 26829 26760 26828 -3 26760 26829 26761 -3 26830 26761 26829 -3 26762 26831 26763 -3 26832 26763 26831 -3 26763 26832 26764 -3 26833 26764 26832 -3 26764 26833 26765 -3 26834 26765 26833 -3 26765 26834 26766 -3 26835 26766 26834 -3 26766 26835 26767 -3 26836 26767 26835 -3 26767 26836 26768 -3 26837 26768 26836 -3 26768 26837 26769 -3 26838 26769 26837 -3 26769 26838 26770 -3 26839 26770 26838 -3 26770 26839 26771 -3 26840 26771 26839 -3 26771 26840 26772 -3 26841 26772 26840 -3 26772 26841 26773 -3 26842 26773 26841 -3 26773 26842 26774 -3 26843 26774 26842 -3 26774 26843 26775 -3 26844 26775 26843 -3 26775 26844 26776 -3 26845 26776 26844 -3 26776 26845 26777 -3 26846 26777 26845 -3 26777 26846 26778 -3 26847 26778 26846 -3 26778 26847 26779 -3 26848 26779 26847 -3 26779 26848 26780 -3 26849 26780 26848 -3 26780 26849 26781 -3 26850 26781 26849 -3 26781 26850 26782 -3 26851 26782 26850 -3 26782 26851 26783 -3 26852 26783 26851 -3 26783 26852 26784 -3 26853 26784 26852 -3 26784 26853 26785 -3 26854 26785 26853 -3 26785 26854 26786 -3 26855 26786 26854 -3 26786 26855 26787 -3 26856 26787 26855 -3 26787 26856 26788 -3 26857 26788 26856 -3 26788 26857 26789 -3 26858 26789 26857 -3 26789 26858 26790 -3 26859 26790 26858 -3 26790 26859 26791 -3 26860 26791 26859 -3 26791 26860 26792 -3 26861 26792 26860 -3 26792 26861 26793 -3 26862 26793 26861 -3 26793 26862 26794 -3 26863 26794 26862 -3 26794 26863 26795 -3 26864 26795 26863 -3 26795 26864 26796 -3 26865 26796 26864 -3 26796 26865 26797 -3 26866 26797 26865 -3 26797 26866 26798 -3 26867 26798 26866 -3 26798 26867 26799 -3 26868 26799 26867 -3 26799 26868 26800 -3 26869 26800 26868 -3 26800 26869 26801 -3 26870 26801 26869 -3 26801 26870 26802 -3 26871 26802 26870 -3 26802 26871 26803 -3 26872 26803 26871 -3 26803 26872 26804 -3 26873 26804 26872 -3 26804 26873 26805 -3 26874 26805 26873 -3 26805 26874 26806 -3 26875 26806 26874 -3 26806 26875 26807 -3 26876 26807 26875 -3 26807 26876 26808 -3 26877 26808 26876 -3 26808 26877 26809 -3 26878 26809 26877 -3 26809 26878 26810 -3 26879 26810 26878 -3 26810 26879 26811 -3 26880 26811 26879 -3 26811 26880 26812 -3 26881 26812 26880 -3 26812 26881 26813 -3 26882 26813 26881 -3 26813 26882 26814 -3 26883 26814 26882 -3 26814 26883 26815 -3 26884 26815 26883 -3 26815 26884 26816 -3 26885 26816 26884 -3 26816 26885 26817 -3 26886 26817 26885 -3 26817 26886 26818 -3 26887 26818 26886 -3 26818 26887 26819 -3 26888 26819 26887 -3 26819 26888 26889 -3 26819 26889 26820 -3 26820 26889 26890 -3 26820 26890 26821 -3 26821 26890 26891 -3 26821 26891 26822 -3 26822 26891 26892 -3 26822 26892 26823 -3 26823 26892 26893 -3 26823 26893 26824 -3 26824 26893 26894 -3 26824 26894 26825 -3 26825 26894 26895 -3 26825 26895 26826 -3 26826 26895 26896 -3 26826 26896 26827 -3 26827 26896 26897 -3 26827 26897 26828 -3 26828 26897 26898 -3 26828 26898 26829 -3 26829 26898 26899 -3 26829 26899 26830 -3 26831 26900 26901 -3 26831 26901 26832 -3 26832 26901 26902 -3 26832 26902 26833 -3 26833 26902 26903 -3 26833 26903 26834 -3 26834 26903 26904 -3 26834 26904 26835 -3 26835 26904 26905 -3 26835 26905 26836 -3 26836 26905 26906 -3 26836 26906 26837 -3 26837 26906 26907 -3 26837 26907 26838 -3 26838 26907 26908 -3 26838 26908 26839 -3 26839 26908 26909 -3 26839 26909 26840 -3 26840 26909 26910 -3 26840 26910 26841 -3 26841 26910 26911 -3 26841 26911 26842 -3 26842 26911 26912 -3 26842 26912 26843 -3 26843 26912 26913 -3 26843 26913 26844 -3 26844 26913 26914 -3 26844 26914 26845 -3 26845 26914 26915 -3 26845 26915 26846 -3 26846 26915 26916 -3 26846 26916 26847 -3 26847 26916 26917 -3 26847 26917 26848 -3 26848 26917 26918 -3 26848 26918 26849 -3 26849 26918 26919 -3 26849 26919 26850 -3 26850 26919 26920 -3 26850 26920 26851 -3 26851 26920 26921 -3 26851 26921 26852 -3 26852 26921 26922 -3 26852 26922 26853 -3 26853 26922 26923 -3 26853 26923 26854 -3 26854 26923 26924 -3 26854 26924 26855 -3 26855 26924 26925 -3 26855 26925 26856 -3 26856 26925 26926 -3 26856 26926 26857 -3 26857 26926 26927 -3 26857 26927 26858 -3 26858 26927 26928 -3 26858 26928 26859 -3 26859 26928 26929 -3 26859 26929 26860 -3 26860 26929 26930 -3 26860 26930 26861 -3 26861 26930 26931 -3 26861 26931 26862 -3 26862 26931 26932 -3 26862 26932 26863 -3 26863 26932 26933 -3 26863 26933 26864 -3 26864 26933 26934 -3 26864 26934 26865 -3 26865 26934 26935 -3 26865 26935 26866 -3 26866 26935 26936 -3 26866 26936 26867 -3 26867 26936 26937 -3 26867 26937 26868 -3 26868 26937 26938 -3 26868 26938 26869 -3 26869 26938 26939 -3 26869 26939 26870 -3 26870 26939 26940 -3 26870 26940 26871 -3 26871 26940 26941 -3 26871 26941 26872 -3 26872 26941 26942 -3 26872 26942 26873 -3 26873 26942 26943 -3 26873 26943 26874 -3 26874 26943 26944 -3 26874 26944 26875 -3 26875 26944 26945 -3 26875 26945 26876 -3 26876 26945 26946 -3 26876 26946 26877 -3 26877 26946 26947 -3 26877 26947 26878 -3 26878 26947 26948 -3 26878 26948 26879 -3 26879 26948 26949 -3 26879 26949 26880 -3 26880 26949 26950 -3 26880 26950 26881 -3 26881 26950 26951 -3 26881 26951 26882 -3 26882 26951 26952 -3 26882 26952 26883 -3 26883 26952 26953 -3 26883 26953 26884 -3 26884 26953 26954 -3 26884 26954 26885 -3 26885 26954 26955 -3 26885 26955 26886 -3 26886 26955 26956 -3 26886 26956 26887 -3 26887 26956 26957 -3 26887 26957 26888 -3 26888 26957 26958 -3 26888 26958 26889 -3 26889 26958 26959 -3 26889 26959 26890 -3 26890 26959 26960 -3 26890 26960 26891 -3 26891 26960 26961 -3 26891 26961 26892 -3 26892 26961 26962 -3 26892 26962 26893 -3 26893 26962 26963 -3 26893 26963 26894 -3 26894 26963 26964 -3 26894 26964 26895 -3 26895 26964 26965 -3 26895 26965 26896 -3 26896 26965 26966 -3 26896 26966 26897 -3 26897 26966 26967 -3 26897 26967 26898 -3 26898 26967 26968 -3 26898 26968 26899 -3 26900 26969 26970 -3 26900 26970 26901 -3 26901 26970 26971 -3 26901 26971 26902 -3 26902 26971 26972 -3 26902 26972 26903 -3 26903 26972 26973 -3 26903 26973 26904 -3 26904 26973 26974 -3 26904 26974 26905 -3 26905 26974 26975 -3 26905 26975 26906 -3 26906 26975 26976 -3 26906 26976 26907 -3 26907 26976 26977 -3 26907 26977 26908 -3 26908 26977 26978 -3 26908 26978 26909 -3 26909 26978 26979 -3 26909 26979 26910 -3 26910 26979 26980 -3 26910 26980 26911 -3 26911 26980 26981 -3 26911 26981 26912 -3 26912 26981 26982 -3 26912 26982 26913 -3 26913 26982 26983 -3 26913 26983 26914 -3 26914 26983 26984 -3 26914 26984 26915 -3 26915 26984 26985 -3 26915 26985 26916 -3 26916 26985 26986 -3 26916 26986 26917 -3 26917 26986 26987 -3 26917 26987 26918 -3 26918 26987 26988 -3 26918 26988 26919 -3 26919 26988 26989 -3 26919 26989 26920 -3 26920 26989 26921 -3 26990 26921 26989 -3 26921 26990 26922 -3 26991 26922 26990 -3 26922 26991 26923 -3 26992 26923 26991 -3 26923 26992 26924 -3 26993 26924 26992 -3 26924 26993 26925 -3 26994 26925 26993 -3 26925 26994 26926 -3 26995 26926 26994 -3 26926 26995 26927 -3 26996 26927 26995 -3 26927 26996 26928 -3 26997 26928 26996 -3 26928 26997 26929 -3 26998 26929 26997 -3 26929 26998 26930 -3 26999 26930 26998 -3 26930 26999 26931 -3 27000 26931 26999 -3 26931 27000 26932 -3 27001 26932 27000 -3 26932 27001 26933 -3 27002 26933 27001 -3 26933 27002 26934 -3 27003 26934 27002 -3 26934 27003 26935 -3 27004 26935 27003 -3 26935 27004 26936 -3 27005 26936 27004 -3 26936 27005 26937 -3 27006 26937 27005 -3 26937 27006 26938 -3 27007 26938 27006 -3 26938 27007 26939 -3 27008 26939 27007 -3 26939 27008 26940 -3 27009 26940 27008 -3 26940 27009 26941 -3 27010 26941 27009 -3 26941 27010 26942 -3 27011 26942 27010 -3 26942 27011 26943 -3 27012 26943 27011 -3 26943 27012 26944 -3 27013 26944 27012 -3 26944 27013 26945 -3 27014 26945 27013 -3 26945 27014 26946 -3 27015 26946 27014 -3 26946 27015 26947 -3 27016 26947 27015 -3 26947 27016 26948 -3 27017 26948 27016 -3 26948 27017 26949 -3 27018 26949 27017 -3 26949 27018 26950 -3 27019 26950 27018 -3 26950 27019 26951 -3 27020 26951 27019 -3 26951 27020 26952 -3 27021 26952 27020 -3 26952 27021 26953 -3 27022 26953 27021 -3 26953 27022 26954 -3 27023 26954 27022 -3 26954 27023 26955 -3 27024 26955 27023 -3 26955 27024 26956 -3 27025 26956 27024 -3 26956 27025 26957 -3 27026 26957 27025 -3 26957 27026 26958 -3 27027 26958 27026 -3 26958 27027 26959 -3 27028 26959 27027 -3 26959 27028 26960 -3 27029 26960 27028 -3 26960 27029 26961 -3 27030 26961 27029 -3 26961 27030 26962 -3 27031 26962 27030 -3 26962 27031 26963 -3 27032 26963 27031 -3 26963 27032 26964 -3 27033 26964 27032 -3 26964 27033 26965 -3 27034 26965 27033 -3 26965 27034 26966 -3 27035 26966 27034 -3 26966 27035 26967 -3 27036 26967 27035 -3 26967 27036 26968 -3 27037 26968 27036 -3 26969 27038 26970 -3 27039 26970 27038 -3 26970 27039 26971 -3 27040 26971 27039 -3 26971 27040 26972 -3 27041 26972 27040 -3 26972 27041 26973 -3 27042 26973 27041 -3 26973 27042 26974 -3 27043 26974 27042 -3 26974 27043 26975 -3 27044 26975 27043 -3 26975 27044 26976 -3 27045 26976 27044 -3 26976 27045 26977 -3 27046 26977 27045 -3 26977 27046 26978 -3 27047 26978 27046 -3 26978 27047 26979 -3 27048 26979 27047 -3 26979 27048 26980 -3 27049 26980 27048 -3 26980 27049 26981 -3 27050 26981 27049 -3 26981 27050 26982 -3 27051 26982 27050 -3 26982 27051 26983 -3 27052 26983 27051 -3 26983 27052 26984 -3 27053 26984 27052 -3 26984 27053 26985 -3 27054 26985 27053 -3 26985 27054 26986 -3 27055 26986 27054 -3 26986 27055 26987 -3 27056 26987 27055 -3 26987 27056 26988 -3 27057 26988 27056 -3 26988 27057 26989 -3 27058 26989 27057 -3 26989 27058 26990 -3 27059 26990 27058 -3 26990 27059 26991 -3 27060 26991 27059 -3 26991 27060 26992 -3 27061 26992 27060 -3 26992 27061 26993 -3 27062 26993 27061 -3 26993 27062 26994 -3 27063 26994 27062 -3 26994 27063 26995 -3 27064 26995 27063 -3 26995 27064 26996 -3 27065 26996 27064 -3 26996 27065 26997 -3 27066 26997 27065 -3 26997 27066 26998 -3 27067 26998 27066 -3 26998 27067 26999 -3 27068 26999 27067 -3 26999 27068 27000 -3 27069 27000 27068 -3 27000 27069 27001 -3 27070 27001 27069 -3 27001 27070 27002 -3 27071 27002 27070 -3 27002 27071 27003 -3 27072 27003 27071 -3 27003 27072 27004 -3 27073 27004 27072 -3 27004 27073 27005 -3 27074 27005 27073 -3 27005 27074 27006 -3 27075 27006 27074 -3 27006 27075 27007 -3 27076 27007 27075 -3 27007 27076 27008 -3 27077 27008 27076 -3 27008 27077 27009 -3 27078 27009 27077 -3 27009 27078 27010 -3 27079 27010 27078 -3 27010 27079 27011 -3 27080 27011 27079 -3 27011 27080 27012 -3 27081 27012 27080 -3 27012 27081 27013 -3 27082 27013 27081 -3 27013 27082 27014 -3 27083 27014 27082 -3 27014 27083 27015 -3 27084 27015 27083 -3 27015 27084 27016 -3 27085 27016 27084 -3 27016 27085 27017 -3 27086 27017 27085 -3 27017 27086 27018 -3 27087 27018 27086 -3 27018 27087 27019 -3 27088 27019 27087 -3 27019 27088 27020 -3 27089 27020 27088 -3 27020 27089 27021 -3 27090 27021 27089 -3 27021 27090 27091 -3 27021 27091 27022 -3 27022 27091 27092 -3 27022 27092 27023 -3 27023 27092 27093 -3 27023 27093 27024 -3 27024 27093 27094 -3 27024 27094 27025 -3 27025 27094 27095 -3 27025 27095 27026 -3 27026 27095 27096 -3 27026 27096 27027 -3 27027 27096 27097 -3 27027 27097 27028 -3 27028 27097 27098 -3 27028 27098 27029 -3 27029 27098 27099 -3 27029 27099 27030 -3 27030 27099 27100 -3 27030 27100 27031 -3 27031 27100 27101 -3 27031 27101 27032 -3 27032 27101 27102 -3 27032 27102 27033 -3 27033 27102 27103 -3 27033 27103 27034 -3 27034 27103 27104 -3 27034 27104 27035 -3 27035 27104 27105 -3 27035 27105 27036 -3 27036 27105 27106 -3 27036 27106 27037 -3 27038 27107 27108 -3 27038 27108 27039 -3 27039 27108 27109 -3 27039 27109 27040 -3 27040 27109 27110 -3 27040 27110 27041 -3 27041 27110 27111 -3 27041 27111 27042 -3 27042 27111 27112 -3 27042 27112 27043 -3 27043 27112 27113 -3 27043 27113 27044 -3 27044 27113 27114 -3 27044 27114 27045 -3 27045 27114 27115 -3 27045 27115 27046 -3 27046 27115 27116 -3 27046 27116 27047 -3 27047 27116 27117 -3 27047 27117 27048 -3 27048 27117 27118 -3 27048 27118 27049 -3 27049 27118 27119 -3 27049 27119 27050 -3 27050 27119 27120 -3 27050 27120 27051 -3 27051 27120 27121 -3 27051 27121 27052 -3 27052 27121 27122 -3 27052 27122 27053 -3 27053 27122 27123 -3 27053 27123 27054 -3 27054 27123 27124 -3 27054 27124 27055 -3 27055 27124 27125 -3 27055 27125 27056 -3 27056 27125 27126 -3 27056 27126 27057 -3 27057 27126 27127 -3 27057 27127 27058 -3 27058 27127 27128 -3 27058 27128 27059 -3 27059 27128 27129 -3 27059 27129 27060 -3 27060 27129 27130 -3 27060 27130 27061 -3 27061 27130 27131 -3 27061 27131 27062 -3 27062 27131 27132 -3 27062 27132 27063 -3 27063 27132 27133 -3 27063 27133 27064 -3 27064 27133 27134 -3 27064 27134 27065 -3 27065 27134 27135 -3 27065 27135 27066 -3 27066 27135 27136 -3 27066 27136 27067 -3 27067 27136 27137 -3 27067 27137 27068 -3 27068 27137 27138 -3 27068 27138 27069 -3 27069 27138 27139 -3 27069 27139 27070 -3 27070 27139 27140 -3 27070 27140 27071 -3 27071 27140 27141 -3 27071 27141 27072 -3 27072 27141 27142 -3 27072 27142 27073 -3 27073 27142 27143 -3 27073 27143 27074 -3 27074 27143 27144 -3 27074 27144 27075 -3 27075 27144 27145 -3 27075 27145 27076 -3 27076 27145 27146 -3 27076 27146 27077 -3 27077 27146 27147 -3 27077 27147 27078 -3 27078 27147 27148 -3 27078 27148 27079 -3 27079 27148 27149 -3 27079 27149 27080 -3 27080 27149 27150 -3 27080 27150 27081 -3 27081 27150 27151 -3 27081 27151 27082 -3 27082 27151 27152 -3 27082 27152 27083 -3 27083 27152 27153 -3 27083 27153 27084 -3 27084 27153 27154 -3 27084 27154 27085 -3 27085 27154 27155 -3 27085 27155 27086 -3 27086 27155 27156 -3 27086 27156 27087 -3 27087 27156 27157 -3 27087 27157 27088 -3 27088 27157 27158 -3 27088 27158 27089 -3 27089 27158 27159 -3 27089 27159 27090 -3 27090 27159 27160 -3 27090 27160 27091 -3 27091 27160 27161 -3 27091 27161 27092 -3 27092 27161 27162 -3 27092 27162 27093 -3 27093 27162 27163 -3 27093 27163 27094 -3 27094 27163 27164 -3 27094 27164 27095 -3 27095 27164 27165 -3 27095 27165 27096 -3 27096 27165 27166 -3 27096 27166 27097 -3 27097 27166 27167 -3 27097 27167 27098 -3 27098 27167 27168 -3 27098 27168 27099 -3 27099 27168 27169 -3 27099 27169 27100 -3 27100 27169 27170 -3 27100 27170 27101 -3 27101 27170 27171 -3 27101 27171 27102 -3 27102 27171 27172 -3 27102 27172 27103 -3 27103 27172 27173 -3 27103 27173 27104 -3 27104 27173 27174 -3 27104 27174 27105 -3 27105 27174 27175 -3 27105 27175 27106 -3 27107 27176 27177 -3 27107 27177 27108 -3 27108 27177 27178 -3 27108 27178 27109 -3 27109 27178 27179 -3 27109 27179 27110 -3 27110 27179 27180 -3 27110 27180 27111 -3 27111 27180 27181 -3 27111 27181 27112 -3 27112 27181 27182 -3 27112 27182 27113 -3 27113 27182 27183 -3 27113 27183 27114 -3 27114 27183 27184 -3 27114 27184 27115 -3 27115 27184 27185 -3 27115 27185 27116 -3 27116 27185 27186 -3 27116 27186 27117 -3 27117 27186 27187 -3 27117 27187 27118 -3 27118 27187 27188 -3 27118 27188 27119 -3 27119 27188 27189 -3 27119 27189 27120 -3 27120 27189 27190 -3 27120 27190 27121 -3 27121 27190 27191 -3 27121 27191 27122 -3 27122 27191 27123 -3 27192 27123 27191 -3 27123 27192 27124 -3 27193 27124 27192 -3 27124 27193 27125 -3 27194 27125 27193 -3 27125 27194 27126 -3 27195 27126 27194 -3 27126 27195 27127 -3 27196 27127 27195 -3 27127 27196 27128 -3 27197 27128 27196 -3 27128 27197 27129 -3 27198 27129 27197 -3 27129 27198 27130 -3 27199 27130 27198 -3 27130 27199 27131 -3 27200 27131 27199 -3 27131 27200 27132 -3 27201 27132 27200 -3 27132 27201 27133 -3 27202 27133 27201 -3 27133 27202 27134 -3 27203 27134 27202 -3 27134 27203 27135 -3 27204 27135 27203 -3 27135 27204 27136 -3 27205 27136 27204 -3 27136 27205 27137 -3 27206 27137 27205 -3 27137 27206 27138 -3 27207 27138 27206 -3 27138 27207 27139 -3 27208 27139 27207 -3 27139 27208 27140 -3 27209 27140 27208 -3 27140 27209 27141 -3 27210 27141 27209 -3 27141 27210 27142 -3 27211 27142 27210 -3 27142 27211 27143 -3 27212 27143 27211 -3 27143 27212 27144 -3 27213 27144 27212 -3 27144 27213 27145 -3 27214 27145 27213 -3 27145 27214 27146 -3 27215 27146 27214 -3 27146 27215 27147 -3 27216 27147 27215 -3 27147 27216 27148 -3 27217 27148 27216 -3 27148 27217 27149 -3 27218 27149 27217 -3 27149 27218 27150 -3 27219 27150 27218 -3 27150 27219 27151 -3 27220 27151 27219 -3 27151 27220 27152 -3 27221 27152 27220 -3 27152 27221 27153 -3 27222 27153 27221 -3 27153 27222 27154 -3 27223 27154 27222 -3 27154 27223 27155 -3 27224 27155 27223 -3 27155 27224 27156 -3 27225 27156 27224 -3 27156 27225 27157 -3 27226 27157 27225 -3 27157 27226 27158 -3 27227 27158 27226 -3 27158 27227 27159 -3 27228 27159 27227 -3 27159 27228 27160 -3 27229 27160 27228 -3 27160 27229 27161 -3 27230 27161 27229 -3 27161 27230 27162 -3 27231 27162 27230 -3 27162 27231 27163 -3 27232 27163 27231 -3 27163 27232 27164 -3 27233 27164 27232 -3 27164 27233 27165 -3 27234 27165 27233 -3 27165 27234 27166 -3 27235 27166 27234 -3 27166 27235 27167 -3 27236 27167 27235 -3 27167 27236 27168 -3 27237 27168 27236 -3 27168 27237 27169 -3 27238 27169 27237 -3 27169 27238 27170 -3 27239 27170 27238 -3 27170 27239 27171 -3 27240 27171 27239 -3 27171 27240 27172 -3 27241 27172 27240 -3 27172 27241 27173 -3 27242 27173 27241 -3 27173 27242 27174 -3 27243 27174 27242 -3 27174 27243 27175 -3 27244 27175 27243 -3 27176 27245 27177 -3 27246 27177 27245 -3 27177 27246 27178 -3 27247 27178 27246 -3 27178 27247 27179 -3 27248 27179 27247 -3 27179 27248 27180 -3 27249 27180 27248 -3 27180 27249 27181 -3 27250 27181 27249 -3 27181 27250 27182 -3 27251 27182 27250 -3 27182 27251 27183 -3 27252 27183 27251 -3 27183 27252 27184 -3 27253 27184 27252 -3 27184 27253 27185 -3 27254 27185 27253 -3 27185 27254 27186 -3 27255 27186 27254 -3 27186 27255 27187 -3 27256 27187 27255 -3 27187 27256 27188 -3 27257 27188 27256 -3 27188 27257 27189 -3 27258 27189 27257 -3 27189 27258 27190 -3 27259 27190 27258 -3 27190 27259 27191 -3 27260 27191 27259 -3 27191 27260 27192 -3 27261 27192 27260 -3 27192 27261 27193 -3 27262 27193 27261 -3 27193 27262 27194 -3 27263 27194 27262 -3 27194 27263 27195 -3 27264 27195 27263 -3 27195 27264 27196 -3 27265 27196 27264 -3 27196 27265 27197 -3 27266 27197 27265 -3 27197 27266 27198 -3 27267 27198 27266 -3 27198 27267 27199 -3 27268 27199 27267 -3 27199 27268 27200 -3 27269 27200 27268 -3 27200 27269 27201 -3 27270 27201 27269 -3 27201 27270 27202 -3 27271 27202 27270 -3 27202 27271 27203 -3 27272 27203 27271 -3 27203 27272 27204 -3 27273 27204 27272 -3 27204 27273 27205 -3 27274 27205 27273 -3 27205 27274 27206 -3 27275 27206 27274 -3 27206 27275 27207 -3 27276 27207 27275 -3 27207 27276 27208 -3 27277 27208 27276 -3 27208 27277 27209 -3 27278 27209 27277 -3 27209 27278 27210 -3 27279 27210 27278 -3 27210 27279 27211 -3 27280 27211 27279 -3 27211 27280 27212 -3 27281 27212 27280 -3 27212 27281 27213 -3 27282 27213 27281 -3 27213 27282 27214 -3 27283 27214 27282 -3 27214 27283 27215 -3 27284 27215 27283 -3 27215 27284 27216 -3 27285 27216 27284 -3 27216 27285 27217 -3 27286 27217 27285 -3 27217 27286 27218 -3 27287 27218 27286 -3 27218 27287 27219 -3 27288 27219 27287 -3 27219 27288 27220 -3 27289 27220 27288 -3 27220 27289 27221 -3 27290 27221 27289 -3 27221 27290 27222 -3 27291 27222 27290 -3 27222 27291 27223 -3 27292 27223 27291 -3 27223 27292 27224 -3 27293 27224 27292 -3 27224 27293 27294 -3 27224 27294 27225 -3 27225 27294 27295 -3 27225 27295 27226 -3 27226 27295 27296 -3 27226 27296 27227 -3 27227 27296 27297 -3 27227 27297 27228 -3 27228 27297 27298 -3 27228 27298 27229 -3 27229 27298 27299 -3 27229 27299 27230 -3 27230 27299 27300 -3 27230 27300 27231 -3 27231 27300 27301 -3 27231 27301 27232 -3 27232 27301 27302 -3 27232 27302 27233 -3 27233 27302 27303 -3 27233 27303 27234 -3 27234 27303 27304 -3 27234 27304 27235 -3 27235 27304 27305 -3 27235 27305 27236 -3 27236 27305 27306 -3 27236 27306 27237 -3 27237 27306 27307 -3 27237 27307 27238 -3 27238 27307 27308 -3 27238 27308 27239 -3 27239 27308 27309 -3 27239 27309 27240 -3 27240 27309 27310 -3 27240 27310 27241 -3 27241 27310 27311 -3 27241 27311 27242 -3 27242 27311 27312 -3 27242 27312 27243 -3 27243 27312 27313 -3 27243 27313 27244 -3 27245 27314 27315 -3 27245 27315 27246 -3 27246 27315 27316 -3 27246 27316 27247 -3 27247 27316 27317 -3 27247 27317 27248 -3 27248 27317 27318 -3 27248 27318 27249 -3 27249 27318 27319 -3 27249 27319 27250 -3 27250 27319 27320 -3 27250 27320 27251 -3 27251 27320 27321 -3 27251 27321 27252 -3 27252 27321 27322 -3 27252 27322 27253 -3 27253 27322 27323 -3 27253 27323 27254 -3 27254 27323 27324 -3 27254 27324 27255 -3 27255 27324 27325 -3 27255 27325 27256 -3 27256 27325 27326 -3 27256 27326 27257 -3 27257 27326 27327 -3 27257 27327 27258 -3 27258 27327 27328 -3 27258 27328 27259 -3 27259 27328 27329 -3 27259 27329 27260 -3 27260 27329 27330 -3 27260 27330 27261 -3 27261 27330 27331 -3 27261 27331 27262 -3 27262 27331 27332 -3 27262 27332 27263 -3 27263 27332 27333 -3 27263 27333 27264 -3 27264 27333 27334 -3 27264 27334 27265 -3 27265 27334 27335 -3 27265 27335 27266 -3 27266 27335 27336 -3 27266 27336 27267 -3 27267 27336 27337 -3 27267 27337 27268 -3 27268 27337 27338 -3 27268 27338 27269 -3 27269 27338 27339 -3 27269 27339 27270 -3 27270 27339 27340 -3 27270 27340 27271 -3 27271 27340 27341 -3 27271 27341 27272 -3 27272 27341 27342 -3 27272 27342 27273 -3 27273 27342 27343 -3 27273 27343 27274 -3 27274 27343 27344 -3 27274 27344 27275 -3 27275 27344 27345 -3 27275 27345 27276 -3 27276 27345 27346 -3 27276 27346 27277 -3 27277 27346 27347 -3 27277 27347 27278 -3 27278 27347 27348 -3 27278 27348 27279 -3 27279 27348 27349 -3 27279 27349 27280 -3 27280 27349 27350 -3 27280 27350 27281 -3 27281 27350 27351 -3 27281 27351 27282 -3 27282 27351 27352 -3 27282 27352 27283 -3 27283 27352 27353 -3 27283 27353 27284 -3 27284 27353 27354 -3 27284 27354 27285 -3 27285 27354 27355 -3 27285 27355 27286 -3 27286 27355 27356 -3 27286 27356 27287 -3 27287 27356 27357 -3 27287 27357 27288 -3 27288 27357 27358 -3 27288 27358 27289 -3 27289 27358 27359 -3 27289 27359 27290 -3 27290 27359 27360 -3 27290 27360 27291 -3 27291 27360 27361 -3 27291 27361 27292 -3 27292 27361 27362 -3 27292 27362 27293 -3 27293 27362 27363 -3 27293 27363 27294 -3 27294 27363 27364 -3 27294 27364 27295 -3 27295 27364 27365 -3 27295 27365 27296 -3 27296 27365 27366 -3 27296 27366 27297 -3 27297 27366 27367 -3 27297 27367 27298 -3 27298 27367 27368 -3 27298 27368 27299 -3 27299 27368 27369 -3 27299 27369 27300 -3 27300 27369 27370 -3 27300 27370 27301 -3 27301 27370 27371 -3 27301 27371 27302 -3 27302 27371 27372 -3 27302 27372 27303 -3 27303 27372 27373 -3 27303 27373 27304 -3 27304 27373 27374 -3 27304 27374 27305 -3 27305 27374 27375 -3 27305 27375 27306 -3 27306 27375 27376 -3 27306 27376 27307 -3 27307 27376 27377 -3 27307 27377 27308 -3 27308 27377 27378 -3 27308 27378 27309 -3 27309 27378 27379 -3 27309 27379 27310 -3 27310 27379 27380 -3 27310 27380 27311 -3 27311 27380 27381 -3 27311 27381 27312 -3 27312 27381 27382 -3 27312 27382 27313 -3 27314 27383 27384 -3 27314 27384 27315 -3 27315 27384 27385 -3 27315 27385 27316 -3 27316 27385 27386 -3 27316 27386 27317 -3 27317 27386 27387 -3 27317 27387 27318 -3 27318 27387 27388 -3 27318 27388 27319 -3 27319 27388 27389 -3 27319 27389 27320 -3 27320 27389 27390 -3 27320 27390 27321 -3 27321 27390 27391 -3 27321 27391 27322 -3 27322 27391 27392 -3 27322 27392 27323 -3 27323 27392 27393 -3 27323 27393 27324 -3 27324 27393 27394 -3 27324 27394 27325 -3 27325 27394 27395 -3 27325 27395 27326 -3 27326 27395 27327 -3 27396 27327 27395 -3 27327 27396 27328 -3 27397 27328 27396 -3 27328 27397 27329 -3 27398 27329 27397 -3 27329 27398 27330 -3 27399 27330 27398 -3 27330 27399 27331 -3 27400 27331 27399 -3 27331 27400 27332 -3 27401 27332 27400 -3 27332 27401 27333 -3 27402 27333 27401 -3 27333 27402 27334 -3 27403 27334 27402 -3 27334 27403 27335 -3 27404 27335 27403 -3 27335 27404 27336 -3 27405 27336 27404 -3 27336 27405 27337 -3 27406 27337 27405 -3 27337 27406 27338 -3 27407 27338 27406 -3 27338 27407 27339 -3 27408 27339 27407 -3 27339 27408 27340 -3 27409 27340 27408 -3 27340 27409 27341 -3 27410 27341 27409 -3 27341 27410 27342 -3 27411 27342 27410 -3 27342 27411 27343 -3 27412 27343 27411 -3 27343 27412 27344 -3 27413 27344 27412 -3 27344 27413 27345 -3 27414 27345 27413 -3 27345 27414 27346 -3 27415 27346 27414 -3 27346 27415 27347 -3 27416 27347 27415 -3 27347 27416 27348 -3 27417 27348 27416 -3 27348 27417 27349 -3 27418 27349 27417 -3 27349 27418 27350 -3 27419 27350 27418 -3 27350 27419 27351 -3 27420 27351 27419 -3 27351 27420 27352 -3 27421 27352 27420 -3 27352 27421 27353 -3 27422 27353 27421 -3 27353 27422 27354 -3 27423 27354 27422 -3 27354 27423 27355 -3 27424 27355 27423 -3 27355 27424 27356 -3 27425 27356 27424 -3 27356 27425 27357 -3 27426 27357 27425 -3 27357 27426 27358 -3 27427 27358 27426 -3 27358 27427 27359 -3 27428 27359 27427 -3 27359 27428 27360 -3 27429 27360 27428 -3 27360 27429 27361 -3 27430 27361 27429 -3 27361 27430 27362 -3 27431 27362 27430 -3 27362 27431 27363 -3 27432 27363 27431 -3 27363 27432 27364 -3 27433 27364 27432 -3 27364 27433 27365 -3 27434 27365 27433 -3 27365 27434 27366 -3 27435 27366 27434 -3 27366 27435 27367 -3 27436 27367 27435 -3 27367 27436 27368 -3 27437 27368 27436 -3 27368 27437 27369 -3 27438 27369 27437 -3 27369 27438 27370 -3 27439 27370 27438 -3 27370 27439 27371 -3 27440 27371 27439 -3 27371 27440 27372 -3 27441 27372 27440 -3 27372 27441 27373 -3 27442 27373 27441 -3 27373 27442 27374 -3 27443 27374 27442 -3 27374 27443 27375 -3 27444 27375 27443 -3 27375 27444 27376 -3 27445 27376 27444 -3 27376 27445 27377 -3 27446 27377 27445 -3 27377 27446 27378 -3 27447 27378 27446 -3 27378 27447 27379 -3 27448 27379 27447 -3 27379 27448 27380 -3 27449 27380 27448 -3 27380 27449 27381 -3 27450 27381 27449 -3 27381 27450 27382 -3 27451 27382 27450 -3 27383 27452 27384 -3 27453 27384 27452 -3 27384 27453 27385 -3 27454 27385 27453 -3 27385 27454 27386 -3 27455 27386 27454 -3 27386 27455 27387 -3 27456 27387 27455 -3 27387 27456 27388 -3 27457 27388 27456 -3 27388 27457 27389 -3 27458 27389 27457 -3 27389 27458 27390 -3 27459 27390 27458 -3 27390 27459 27391 -3 27460 27391 27459 -3 27391 27460 27392 -3 27461 27392 27460 -3 27392 27461 27393 -3 27462 27393 27461 -3 27393 27462 27394 -3 27463 27394 27462 -3 27394 27463 27395 -3 27464 27395 27463 -3 27395 27464 27396 -3 27465 27396 27464 -3 27396 27465 27397 -3 27466 27397 27465 -3 27397 27466 27398 -3 27467 27398 27466 -3 27398 27467 27399 -3 27468 27399 27467 -3 27399 27468 27400 -3 27469 27400 27468 -3 27400 27469 27401 -3 27470 27401 27469 -3 27401 27470 27402 -3 27471 27402 27470 -3 27402 27471 27403 -3 27472 27403 27471 -3 27403 27472 27404 -3 27473 27404 27472 -3 27404 27473 27405 -3 27474 27405 27473 -3 27405 27474 27406 -3 27475 27406 27474 -3 27406 27475 27407 -3 27476 27407 27475 -3 27407 27476 27408 -3 27477 27408 27476 -3 27408 27477 27409 -3 27478 27409 27477 -3 27409 27478 27410 -3 27479 27410 27478 -3 27410 27479 27411 -3 27480 27411 27479 -3 27411 27480 27412 -3 27481 27412 27480 -3 27412 27481 27413 -3 27482 27413 27481 -3 27413 27482 27414 -3 27483 27414 27482 -3 27414 27483 27415 -3 27484 27415 27483 -3 27415 27484 27416 -3 27485 27416 27484 -3 27416 27485 27417 -3 27486 27417 27485 -3 27417 27486 27418 -3 27487 27418 27486 -3 27418 27487 27419 -3 27488 27419 27487 -3 27419 27488 27420 -3 27489 27420 27488 -3 27420 27489 27421 -3 27490 27421 27489 -3 27421 27490 27422 -3 27491 27422 27490 -3 27422 27491 27423 -3 27492 27423 27491 -3 27423 27492 27424 -3 27493 27424 27492 -3 27424 27493 27425 -3 27494 27425 27493 -3 27425 27494 27426 -3 27495 27426 27494 -3 27426 27495 27427 -3 27496 27427 27495 -3 27427 27496 27428 -3 27497 27428 27496 -3 27428 27497 27498 -3 27428 27498 27429 -3 27429 27498 27499 -3 27429 27499 27430 -3 27430 27499 27500 -3 27430 27500 27431 -3 27431 27500 27501 -3 27431 27501 27432 -3 27432 27501 27502 -3 27432 27502 27433 -3 27433 27502 27503 -3 27433 27503 27434 -3 27434 27503 27504 -3 27434 27504 27435 -3 27435 27504 27505 -3 27435 27505 27436 -3 27436 27505 27506 -3 27436 27506 27437 -3 27437 27506 27507 -3 27437 27507 27438 -3 27438 27507 27508 -3 27438 27508 27439 -3 27439 27508 27509 -3 27439 27509 27440 -3 27440 27509 27510 -3 27440 27510 27441 -3 27441 27510 27511 -3 27441 27511 27442 -3 27442 27511 27512 -3 27442 27512 27443 -3 27443 27512 27513 -3 27443 27513 27444 -3 27444 27513 27514 -3 27444 27514 27445 -3 27445 27514 27515 -3 27445 27515 27446 -3 27446 27515 27516 -3 27446 27516 27447 -3 27447 27516 27517 -3 27447 27517 27448 -3 27448 27517 27518 -3 27448 27518 27449 -3 27449 27518 27519 -3 27449 27519 27450 -3 27450 27519 27520 -3 27450 27520 27451 -3 27452 27521 27522 -3 27452 27522 27453 -3 27453 27522 27523 -3 27453 27523 27454 -3 27454 27523 27524 -3 27454 27524 27455 -3 27455 27524 27525 -3 27455 27525 27456 -3 27456 27525 27526 -3 27456 27526 27457 -3 27457 27526 27527 -3 27457 27527 27458 -3 27458 27527 27528 -3 27458 27528 27459 -3 27459 27528 27529 -3 27459 27529 27460 -3 27460 27529 27530 -3 27460 27530 27461 -3 27461 27530 27531 -3 27461 27531 27462 -3 27462 27531 27532 -3 27462 27532 27463 -3 27463 27532 27533 -3 27463 27533 27464 -3 27464 27533 27534 -3 27464 27534 27465 -3 27465 27534 27535 -3 27465 27535 27466 -3 27466 27535 27536 -3 27466 27536 27467 -3 27467 27536 27537 -3 27467 27537 27468 -3 27468 27537 27538 -3 27468 27538 27469 -3 27469 27538 27539 -3 27469 27539 27470 -3 27470 27539 27540 -3 27470 27540 27471 -3 27471 27540 27541 -3 27471 27541 27472 -3 27472 27541 27542 -3 27472 27542 27473 -3 27473 27542 27543 -3 27473 27543 27474 -3 27474 27543 27544 -3 27474 27544 27475 -3 27475 27544 27545 -3 27475 27545 27476 -3 27476 27545 27546 -3 27476 27546 27477 -3 27477 27546 27547 -3 27477 27547 27478 -3 27478 27547 27548 -3 27478 27548 27479 -3 27479 27548 27549 -3 27479 27549 27480 -3 27480 27549 27550 -3 27480 27550 27481 -3 27481 27550 27551 -3 27481 27551 27482 -3 27482 27551 27552 -3 27482 27552 27483 -3 27483 27552 27553 -3 27483 27553 27484 -3 27484 27553 27554 -3 27484 27554 27485 -3 27485 27554 27555 -3 27485 27555 27486 -3 27486 27555 27556 -3 27486 27556 27487 -3 27487 27556 27557 -3 27487 27557 27488 -3 27488 27557 27558 -3 27488 27558 27489 -3 27489 27558 27559 -3 27489 27559 27490 -3 27490 27559 27560 -3 27490 27560 27491 -3 27491 27560 27561 -3 27491 27561 27492 -3 27492 27561 27562 -3 27492 27562 27493 -3 27493 27562 27563 -3 27493 27563 27494 -3 27494 27563 27564 -3 27494 27564 27495 -3 27495 27564 27565 -3 27495 27565 27496 -3 27496 27565 27566 -3 27496 27566 27497 -3 27497 27566 27567 -3 27497 27567 27498 -3 27498 27567 27568 -3 27498 27568 27499 -3 27499 27568 27569 -3 27499 27569 27500 -3 27500 27569 27570 -3 27500 27570 27501 -3 27501 27570 27571 -3 27501 27571 27502 -3 27502 27571 27572 -3 27502 27572 27503 -3 27503 27572 27573 -3 27503 27573 27504 -3 27504 27573 27574 -3 27504 27574 27505 -3 27505 27574 27575 -3 27505 27575 27506 -3 27506 27575 27576 -3 27506 27576 27507 -3 27507 27576 27577 -3 27507 27577 27508 -3 27508 27577 27578 -3 27508 27578 27509 -3 27509 27578 27579 -3 27509 27579 27510 -3 27510 27579 27580 -3 27510 27580 27511 -3 27511 27580 27581 -3 27511 27581 27512 -3 27512 27581 27582 -3 27512 27582 27513 -3 27513 27582 27583 -3 27513 27583 27514 -3 27514 27583 27584 -3 27514 27584 27515 -3 27515 27584 27585 -3 27515 27585 27516 -3 27516 27585 27586 -3 27516 27586 27517 -3 27517 27586 27587 -3 27517 27587 27518 -3 27518 27587 27588 -3 27518 27588 27519 -3 27519 27588 27589 -3 27519 27589 27520 -3 27521 27590 27591 -3 27521 27591 27522 -3 27522 27591 27592 -3 27522 27592 27523 -3 27523 27592 27593 -3 27523 27593 27524 -3 27524 27593 27594 -3 27524 27594 27525 -3 27525 27594 27595 -3 27525 27595 27526 -3 27526 27595 27596 -3 27526 27596 27527 -3 27527 27596 27597 -3 27527 27597 27528 -3 27528 27597 27598 -3 27528 27598 27529 -3 27529 27598 27599 -3 27529 27599 27530 -3 27530 27599 27531 -3 27600 27531 27599 -3 27531 27600 27532 -3 27601 27532 27600 -3 27532 27601 27533 -3 27602 27533 27601 -3 27533 27602 27534 -3 27603 27534 27602 -3 27534 27603 27535 -3 27604 27535 27603 -3 27535 27604 27536 -3 27605 27536 27604 -3 27536 27605 27537 -3 27606 27537 27605 -3 27537 27606 27538 -3 27607 27538 27606 -3 27538 27607 27539 -3 27608 27539 27607 -3 27539 27608 27540 -3 27609 27540 27608 -3 27540 27609 27541 -3 27610 27541 27609 -3 27541 27610 27542 -3 27611 27542 27610 -3 27542 27611 27543 -3 27612 27543 27611 -3 27543 27612 27544 -3 27613 27544 27612 -3 27544 27613 27545 -3 27614 27545 27613 -3 27545 27614 27546 -3 27615 27546 27614 -3 27546 27615 27547 -3 27616 27547 27615 -3 27547 27616 27548 -3 27617 27548 27616 -3 27548 27617 27549 -3 27618 27549 27617 -3 27549 27618 27550 -3 27619 27550 27618 -3 27550 27619 27551 -3 27620 27551 27619 -3 27551 27620 27552 -3 27621 27552 27620 -3 27552 27621 27553 -3 27622 27553 27621 -3 27553 27622 27554 -3 27623 27554 27622 -3 27554 27623 27555 -3 27624 27555 27623 -3 27555 27624 27556 -3 27625 27556 27624 -3 27556 27625 27557 -3 27626 27557 27625 -3 27557 27626 27558 -3 27627 27558 27626 -3 27558 27627 27559 -3 27628 27559 27627 -3 27559 27628 27560 -3 27629 27560 27628 -3 27560 27629 27561 -3 27630 27561 27629 -3 27561 27630 27562 -3 27631 27562 27630 -3 27562 27631 27563 -3 27632 27563 27631 -3 27563 27632 27564 -3 27633 27564 27632 -3 27564 27633 27565 -3 27634 27565 27633 -3 27565 27634 27566 -3 27635 27566 27634 -3 27566 27635 27567 -3 27636 27567 27635 -3 27567 27636 27568 -3 27637 27568 27636 -3 27568 27637 27569 -3 27638 27569 27637 -3 27569 27638 27570 -3 27639 27570 27638 -3 27570 27639 27571 -3 27640 27571 27639 -3 27571 27640 27572 -3 27641 27572 27640 -3 27572 27641 27573 -3 27642 27573 27641 -3 27573 27642 27574 -3 27643 27574 27642 -3 27574 27643 27575 -3 27644 27575 27643 -3 27575 27644 27576 -3 27645 27576 27644 -3 27576 27645 27577 -3 27646 27577 27645 -3 27577 27646 27578 -3 27647 27578 27646 -3 27578 27647 27579 -3 27648 27579 27647 -3 27579 27648 27580 -3 27649 27580 27648 -3 27580 27649 27581 -3 27650 27581 27649 -3 27581 27650 27582 -3 27651 27582 27650 -3 27582 27651 27583 -3 27652 27583 27651 -3 27583 27652 27584 -3 27653 27584 27652 -3 27584 27653 27585 -3 27654 27585 27653 -3 27585 27654 27586 -3 27655 27586 27654 -3 27586 27655 27587 -3 27656 27587 27655 -3 27587 27656 27588 -3 27657 27588 27656 -3 27588 27657 27589 -3 27658 27589 27657 -3 27590 27659 27591 -3 27660 27591 27659 -3 27591 27660 27592 -3 27661 27592 27660 -3 27592 27661 27593 -3 27662 27593 27661 -3 27593 27662 27594 -3 27663 27594 27662 -3 27594 27663 27595 -3 27664 27595 27663 -3 27595 27664 27596 -3 27665 27596 27664 -3 27596 27665 27597 -3 27666 27597 27665 -3 27597 27666 27598 -3 27667 27598 27666 -3 27598 27667 27599 -3 27668 27599 27667 -3 27599 27668 27600 -3 27669 27600 27668 -3 27600 27669 27601 -3 27670 27601 27669 -3 27601 27670 27602 -3 27671 27602 27670 -3 27602 27671 27603 -3 27672 27603 27671 -3 27603 27672 27604 -3 27673 27604 27672 -3 27604 27673 27605 -3 27674 27605 27673 -3 27605 27674 27606 -3 27675 27606 27674 -3 27606 27675 27607 -3 27676 27607 27675 -3 27607 27676 27608 -3 27677 27608 27676 -3 27608 27677 27609 -3 27678 27609 27677 -3 27609 27678 27610 -3 27679 27610 27678 -3 27610 27679 27611 -3 27680 27611 27679 -3 27611 27680 27612 -3 27681 27612 27680 -3 27612 27681 27613 -3 27682 27613 27681 -3 27613 27682 27614 -3 27683 27614 27682 -3 27614 27683 27615 -3 27684 27615 27683 -3 27615 27684 27616 -3 27685 27616 27684 -3 27616 27685 27617 -3 27686 27617 27685 -3 27617 27686 27618 -3 27687 27618 27686 -3 27618 27687 27619 -3 27688 27619 27687 -3 27619 27688 27620 -3 27689 27620 27688 -3 27620 27689 27621 -3 27690 27621 27689 -3 27621 27690 27622 -3 27691 27622 27690 -3 27622 27691 27623 -3 27692 27623 27691 -3 27623 27692 27624 -3 27693 27624 27692 -3 27624 27693 27625 -3 27694 27625 27693 -3 27625 27694 27626 -3 27695 27626 27694 -3 27626 27695 27627 -3 27696 27627 27695 -3 27627 27696 27628 -3 27697 27628 27696 -3 27628 27697 27629 -3 27698 27629 27697 -3 27629 27698 27630 -3 27699 27630 27698 -3 27630 27699 27631 -3 27700 27631 27699 -3 27631 27700 27632 -3 27701 27632 27700 -3 27632 27701 27702 -3 27632 27702 27633 -3 27633 27702 27703 -3 27633 27703 27634 -3 27634 27703 27704 -3 27634 27704 27635 -3 27635 27704 27705 -3 27635 27705 27636 -3 27636 27705 27706 -3 27636 27706 27637 -3 27637 27706 27707 -3 27637 27707 27638 -3 27638 27707 27708 -3 27638 27708 27639 -3 27639 27708 27709 -3 27639 27709 27640 -3 27640 27709 27710 -3 27640 27710 27641 -3 27641 27710 27711 -3 27641 27711 27642 -3 27642 27711 27712 -3 27642 27712 27643 -3 27643 27712 27713 -3 27643 27713 27644 -3 27644 27713 27714 -3 27644 27714 27645 -3 27645 27714 27715 -3 27645 27715 27646 -3 27646 27715 27716 -3 27646 27716 27647 -3 27647 27716 27717 -3 27647 27717 27648 -3 27648 27717 27718 -3 27648 27718 27649 -3 27649 27718 27719 -3 27649 27719 27650 -3 27650 27719 27720 -3 27650 27720 27651 -3 27651 27720 27721 -3 27651 27721 27652 -3 27652 27721 27722 -3 27652 27722 27653 -3 27653 27722 27723 -3 27653 27723 27654 -3 27654 27723 27724 -3 27654 27724 27655 -3 27655 27724 27725 -3 27655 27725 27656 -3 27656 27725 27726 -3 27656 27726 27657 -3 27657 27726 27727 -3 27657 27727 27658 -3 27659 27728 27729 -3 27659 27729 27660 -3 27660 27729 27730 -3 27660 27730 27661 -3 27661 27730 27731 -3 27661 27731 27662 -3 27662 27731 27732 -3 27662 27732 27663 -3 27663 27732 27733 -3 27663 27733 27664 -3 27664 27733 27734 -3 27664 27734 27665 -3 27665 27734 27735 -3 27665 27735 27666 -3 27666 27735 27736 -3 27666 27736 27667 -3 27667 27736 27737 -3 27667 27737 27668 -3 27668 27737 27738 -3 27668 27738 27669 -3 27669 27738 27739 -3 27669 27739 27670 -3 27670 27739 27740 -3 27670 27740 27671 -3 27671 27740 27741 -3 27671 27741 27672 -3 27672 27741 27742 -3 27672 27742 27673 -3 27673 27742 27743 -3 27673 27743 27674 -3 27674 27743 27744 -3 27674 27744 27675 -3 27675 27744 27745 -3 27675 27745 27676 -3 27676 27745 27746 -3 27676 27746 27677 -3 27677 27746 27747 -3 27677 27747 27678 -3 27678 27747 27748 -3 27678 27748 27679 -3 27679 27748 27749 -3 27679 27749 27680 -3 27680 27749 27750 -3 27680 27750 27681 -3 27681 27750 27751 -3 27681 27751 27682 -3 27682 27751 27752 -3 27682 27752 27683 -3 27683 27752 27753 -3 27683 27753 27684 -3 27684 27753 27754 -3 27684 27754 27685 -3 27685 27754 27755 -3 27685 27755 27686 -3 27686 27755 27756 -3 27686 27756 27687 -3 27687 27756 27757 -3 27687 27757 27688 -3 27688 27757 27758 -3 27688 27758 27689 -3 27689 27758 27759 -3 27689 27759 27690 -3 27690 27759 27760 -3 27690 27760 27691 -3 27691 27760 27761 -3 27691 27761 27692 -3 27692 27761 27762 -3 27692 27762 27693 -3 27693 27762 27763 -3 27693 27763 27694 -3 27694 27763 27764 -3 27694 27764 27695 -3 27695 27764 27765 -3 27695 27765 27696 -3 27696 27765 27766 -3 27696 27766 27697 -3 27697 27766 27767 -3 27697 27767 27698 -3 27698 27767 27768 -3 27698 27768 27699 -3 27699 27768 27769 -3 27699 27769 27700 -3 27700 27769 27770 -3 27700 27770 27701 -3 27701 27770 27771 -3 27701 27771 27702 -3 27702 27771 27772 -3 27702 27772 27703 -3 27703 27772 27773 -3 27703 27773 27704 -3 27704 27773 27774 -3 27704 27774 27705 -3 27705 27774 27775 -3 27705 27775 27706 -3 27706 27775 27776 -3 27706 27776 27707 -3 27707 27776 27777 -3 27707 27777 27708 -3 27708 27777 27778 -3 27708 27778 27709 -3 27709 27778 27779 -3 27709 27779 27710 -3 27710 27779 27780 -3 27710 27780 27711 -3 27711 27780 27781 -3 27711 27781 27712 -3 27712 27781 27782 -3 27712 27782 27713 -3 27713 27782 27783 -3 27713 27783 27714 -3 27714 27783 27784 -3 27714 27784 27715 -3 27715 27784 27785 -3 27715 27785 27716 -3 27716 27785 27786 -3 27716 27786 27717 -3 27717 27786 27787 -3 27717 27787 27718 -3 27718 27787 27788 -3 27718 27788 27719 -3 27719 27788 27789 -3 27719 27789 27720 -3 27720 27789 27790 -3 27720 27790 27721 -3 27721 27790 27791 -3 27721 27791 27722 -3 27722 27791 27792 -3 27722 27792 27723 -3 27723 27792 27793 -3 27723 27793 27724 -3 27724 27793 27794 -3 27724 27794 27725 -3 27725 27794 27795 -3 27725 27795 27726 -3 27726 27795 27796 -3 27726 27796 27727 -3 27728 27797 27798 -3 27728 27798 27729 -3 27729 27798 27799 -3 27729 27799 27730 -3 27730 27799 27800 -3 27730 27800 27731 -3 27731 27800 27801 -3 27731 27801 27732 -3 27732 27801 27802 -3 27732 27802 27733 -3 27733 27802 27803 -3 27733 27803 27734 -3 27734 27803 27804 -3 27734 27804 27735 -3 27735 27804 27736 -3 27805 27736 27804 -3 27736 27805 27737 -3 27806 27737 27805 -3 27737 27806 27738 -3 27807 27738 27806 -3 27738 27807 27739 -3 27808 27739 27807 -3 27739 27808 27740 -3 27809 27740 27808 -3 27740 27809 27741 -3 27810 27741 27809 -3 27741 27810 27742 -3 27811 27742 27810 -3 27742 27811 27743 -3 27812 27743 27811 -3 27743 27812 27744 -3 27813 27744 27812 -3 27744 27813 27745 -3 27814 27745 27813 -3 27745 27814 27746 -3 27815 27746 27814 -3 27746 27815 27747 -3 27816 27747 27815 -3 27747 27816 27748 -3 27817 27748 27816 -3 27748 27817 27749 -3 27818 27749 27817 -3 27749 27818 27750 -3 27819 27750 27818 -3 27750 27819 27751 -3 27820 27751 27819 -3 27751 27820 27752 -3 27821 27752 27820 -3 27752 27821 27753 -3 27822 27753 27821 -3 27753 27822 27754 -3 27823 27754 27822 -3 27754 27823 27755 -3 27824 27755 27823 -3 27755 27824 27756 -3 27825 27756 27824 -3 27756 27825 27757 -3 27826 27757 27825 -3 27757 27826 27758 -3 27827 27758 27826 -3 27758 27827 27759 -3 27828 27759 27827 -3 27759 27828 27760 -3 27829 27760 27828 -3 27760 27829 27761 -3 27830 27761 27829 -3 27761 27830 27762 -3 27831 27762 27830 -3 27762 27831 27763 -3 27832 27763 27831 -3 27763 27832 27764 -3 27833 27764 27832 -3 27764 27833 27765 -3 27834 27765 27833 -3 27765 27834 27766 -3 27835 27766 27834 -3 27766 27835 27767 -3 27836 27767 27835 -3 27767 27836 27768 -3 27837 27768 27836 -3 27768 27837 27769 -3 27838 27769 27837 -3 27769 27838 27770 -3 27839 27770 27838 -3 27770 27839 27771 -3 27840 27771 27839 -3 27771 27840 27772 -3 27841 27772 27840 -3 27772 27841 27773 -3 27842 27773 27841 -3 27773 27842 27774 -3 27843 27774 27842 -3 27774 27843 27775 -3 27844 27775 27843 -3 27775 27844 27776 -3 27845 27776 27844 -3 27776 27845 27777 -3 27846 27777 27845 -3 27777 27846 27778 -3 27847 27778 27846 -3 27778 27847 27779 -3 27848 27779 27847 -3 27779 27848 27780 -3 27849 27780 27848 -3 27780 27849 27781 -3 27850 27781 27849 -3 27781 27850 27782 -3 27851 27782 27850 -3 27782 27851 27783 -3 27852 27783 27851 -3 27783 27852 27784 -3 27853 27784 27852 -3 27784 27853 27785 -3 27854 27785 27853 -3 27785 27854 27786 -3 27855 27786 27854 -3 27786 27855 27787 -3 27856 27787 27855 -3 27787 27856 27788 -3 27857 27788 27856 -3 27788 27857 27789 -3 27858 27789 27857 -3 27789 27858 27790 -3 27859 27790 27858 -3 27790 27859 27791 -3 27860 27791 27859 -3 27791 27860 27792 -3 27861 27792 27860 -3 27792 27861 27793 -3 27862 27793 27861 -3 27793 27862 27794 -3 27863 27794 27862 -3 27794 27863 27795 -3 27864 27795 27863 -3 27795 27864 27796 -3 27865 27796 27864 -3 27797 27866 27798 -3 27867 27798 27866 -3 27798 27867 27799 -3 27868 27799 27867 -3 27799 27868 27800 -3 27869 27800 27868 -3 27800 27869 27801 -3 27870 27801 27869 -3 27801 27870 27802 -3 27871 27802 27870 -3 27802 27871 27803 -3 27872 27803 27871 -3 27803 27872 27804 -3 27873 27804 27872 -3 27804 27873 27805 -3 27874 27805 27873 -3 27805 27874 27806 -3 27875 27806 27874 -3 27806 27875 27807 -3 27876 27807 27875 -3 27807 27876 27808 -3 27877 27808 27876 -3 27808 27877 27809 -3 27878 27809 27877 -3 27809 27878 27810 -3 27879 27810 27878 -3 27810 27879 27811 -3 27880 27811 27879 -3 27811 27880 27812 -3 27881 27812 27880 -3 27812 27881 27813 -3 27882 27813 27881 -3 27813 27882 27814 -3 27883 27814 27882 -3 27814 27883 27815 -3 27884 27815 27883 -3 27815 27884 27816 -3 27885 27816 27884 -3 27816 27885 27817 -3 27886 27817 27885 -3 27817 27886 27818 -3 27887 27818 27886 -3 27818 27887 27819 -3 27888 27819 27887 -3 27819 27888 27820 -3 27889 27820 27888 -3 27820 27889 27821 -3 27890 27821 27889 -3 27821 27890 27822 -3 27891 27822 27890 -3 27822 27891 27823 -3 27892 27823 27891 -3 27823 27892 27824 -3 27893 27824 27892 -3 27824 27893 27825 -3 27894 27825 27893 -3 27825 27894 27826 -3 27895 27826 27894 -3 27826 27895 27827 -3 27896 27827 27895 -3 27827 27896 27828 -3 27897 27828 27896 -3 27828 27897 27829 -3 27898 27829 27897 -3 27829 27898 27830 -3 27899 27830 27898 -3 27830 27899 27831 -3 27900 27831 27899 -3 27831 27900 27832 -3 27901 27832 27900 -3 27832 27901 27833 -3 27902 27833 27901 -3 27833 27902 27834 -3 27903 27834 27902 -3 27834 27903 27835 -3 27904 27835 27903 -3 27835 27904 27836 -3 27905 27836 27904 -3 27836 27905 27837 -3 27906 27837 27905 -3 27837 27906 27907 -3 27837 27907 27838 -3 27838 27907 27908 -3 27838 27908 27839 -3 27839 27908 27909 -3 27839 27909 27840 -3 27840 27909 27910 -3 27840 27910 27841 -3 27841 27910 27911 -3 27841 27911 27842 -3 27842 27911 27912 -3 27842 27912 27843 -3 27843 27912 27913 -3 27843 27913 27844 -3 27844 27913 27914 -3 27844 27914 27845 -3 27845 27914 27915 -3 27845 27915 27846 -3 27846 27915 27916 -3 27846 27916 27847 -3 27847 27916 27917 -3 27847 27917 27848 -3 27848 27917 27918 -3 27848 27918 27849 -3 27849 27918 27919 -3 27849 27919 27850 -3 27850 27919 27920 -3 27850 27920 27851 -3 27851 27920 27921 -3 27851 27921 27852 -3 27852 27921 27922 -3 27852 27922 27853 -3 27853 27922 27923 -3 27853 27923 27854 -3 27854 27923 27924 -3 27854 27924 27855 -3 27855 27924 27925 -3 27855 27925 27856 -3 27856 27925 27926 -3 27856 27926 27857 -3 27857 27926 27927 -3 27857 27927 27858 -3 27858 27927 27928 -3 27858 27928 27859 -3 27859 27928 27929 -3 27859 27929 27860 -3 27860 27929 27930 -3 27860 27930 27861 -3 27861 27930 27931 -3 27861 27931 27862 -3 27862 27931 27932 -3 27862 27932 27863 -3 27863 27932 27933 -3 27863 27933 27864 -3 27864 27933 27934 -3 27864 27934 27865 -3 27866 27935 27936 -3 27866 27936 27867 -3 27867 27936 27937 -3 27867 27937 27868 -3 27868 27937 27938 -3 27868 27938 27869 -3 27869 27938 27939 -3 27869 27939 27870 -3 27870 27939 27940 -3 27870 27940 27871 -3 27871 27940 27941 -3 27871 27941 27872 -3 27872 27941 27942 -3 27872 27942 27873 -3 27873 27942 27943 -3 27873 27943 27874 -3 27874 27943 27944 -3 27874 27944 27875 -3 27875 27944 27945 -3 27875 27945 27876 -3 27876 27945 27946 -3 27876 27946 27877 -3 27877 27946 27947 -3 27877 27947 27878 -3 27878 27947 27948 -3 27878 27948 27879 -3 27879 27948 27949 -3 27879 27949 27880 -3 27880 27949 27950 -3 27880 27950 27881 -3 27881 27950 27951 -3 27881 27951 27882 -3 27882 27951 27952 -3 27882 27952 27883 -3 27883 27952 27953 -3 27883 27953 27884 -3 27884 27953 27954 -3 27884 27954 27885 -3 27885 27954 27955 -3 27885 27955 27886 -3 27886 27955 27956 -3 27886 27956 27887 -3 27887 27956 27957 -3 27887 27957 27888 -3 27888 27957 27958 -3 27888 27958 27889 -3 27889 27958 27959 -3 27889 27959 27890 -3 27890 27959 27960 -3 27890 27960 27891 -3 27891 27960 27961 -3 27891 27961 27892 -3 27892 27961 27962 -3 27892 27962 27893 -3 27893 27962 27963 -3 27893 27963 27894 -3 27894 27963 27964 -3 27894 27964 27895 -3 27895 27964 27965 -3 27895 27965 27896 -3 27896 27965 27966 -3 27896 27966 27897 -3 27897 27966 27967 -3 27897 27967 27898 -3 27898 27967 27968 -3 27898 27968 27899 -3 27899 27968 27969 -3 27899 27969 27900 -3 27900 27969 27970 -3 27900 27970 27901 -3 27901 27970 27971 -3 27901 27971 27902 -3 27902 27971 27972 -3 27902 27972 27903 -3 27903 27972 27973 -3 27903 27973 27904 -3 27904 27973 27974 -3 27904 27974 27905 -3 27905 27974 27975 -3 27905 27975 27906 -3 27906 27975 27976 -3 27906 27976 27907 -3 27907 27976 27977 -3 27907 27977 27908 -3 27908 27977 27978 -3 27908 27978 27909 -3 27909 27978 27979 -3 27909 27979 27910 -3 27910 27979 27980 -3 27910 27980 27911 -3 27911 27980 27981 -3 27911 27981 27912 -3 27912 27981 27982 -3 27912 27982 27913 -3 27913 27982 27983 -3 27913 27983 27914 -3 27914 27983 27984 -3 27914 27984 27915 -3 27915 27984 27985 -3 27915 27985 27916 -3 27916 27985 27986 -3 27916 27986 27917 -3 27917 27986 27987 -3 27917 27987 27918 -3 27918 27987 27988 -3 27918 27988 27919 -3 27919 27988 27989 -3 27919 27989 27920 -3 27920 27989 27990 -3 27920 27990 27921 -3 27921 27990 27991 -3 27921 27991 27922 -3 27922 27991 27992 -3 27922 27992 27923 -3 27923 27992 27993 -3 27923 27993 27924 -3 27924 27993 27994 -3 27924 27994 27925 -3 27925 27994 27995 -3 27925 27995 27926 -3 27926 27995 27996 -3 27926 27996 27927 -3 27927 27996 27997 -3 27927 27997 27928 -3 27928 27997 27998 -3 27928 27998 27929 -3 27929 27998 27999 -3 27929 27999 27930 -3 27930 27999 28000 -3 27930 28000 27931 -3 27931 28000 28001 -3 27931 28001 27932 -3 27932 28001 28002 -3 27932 28002 27933 -3 27933 28002 28003 -3 27933 28003 27934 -3 27935 28004 28005 -3 27935 28005 27936 -3 27936 28005 28006 -3 27936 28006 27937 -3 27937 28006 28007 -3 27937 28007 27938 -3 27938 28007 28008 -3 27938 28008 27939 -3 27939 28008 28009 -3 27939 28009 27940 -3 27940 28009 27941 -3 28010 27941 28009 -3 27941 28010 27942 -3 28011 27942 28010 -3 27942 28011 27943 -3 28012 27943 28011 -3 27943 28012 27944 -3 28013 27944 28012 -3 27944 28013 27945 -3 28014 27945 28013 -3 27945 28014 27946 -3 28015 27946 28014 -3 27946 28015 27947 -3 28016 27947 28015 -3 27947 28016 27948 -3 28017 27948 28016 -3 27948 28017 27949 -3 28018 27949 28017 -3 27949 28018 27950 -3 28019 27950 28018 -3 27950 28019 27951 -3 28020 27951 28019 -3 27951 28020 27952 -3 28021 27952 28020 -3 27952 28021 27953 -3 28022 27953 28021 -3 27953 28022 27954 -3 28023 27954 28022 -3 27954 28023 27955 -3 28024 27955 28023 -3 27955 28024 27956 -3 28025 27956 28024 -3 27956 28025 27957 -3 28026 27957 28025 -3 27957 28026 27958 -3 28027 27958 28026 -3 27958 28027 27959 -3 28028 27959 28027 -3 27959 28028 27960 -3 28029 27960 28028 -3 27960 28029 27961 -3 28030 27961 28029 -3 27961 28030 27962 -3 28031 27962 28030 -3 27962 28031 27963 -3 28032 27963 28031 -3 27963 28032 27964 -3 28033 27964 28032 -3 27964 28033 27965 -3 28034 27965 28033 -3 27965 28034 27966 -3 28035 27966 28034 -3 27966 28035 27967 -3 28036 27967 28035 -3 27967 28036 27968 -3 28037 27968 28036 -3 27968 28037 27969 -3 28038 27969 28037 -3 27969 28038 27970 -3 28039 27970 28038 -3 27970 28039 27971 -3 28040 27971 28039 -3 27971 28040 27972 -3 28041 27972 28040 -3 27972 28041 27973 -3 28042 27973 28041 -3 27973 28042 27974 -3 28043 27974 28042 -3 27974 28043 27975 -3 28044 27975 28043 -3 27975 28044 27976 -3 28045 27976 28044 -3 27976 28045 27977 -3 28046 27977 28045 -3 27977 28046 27978 -3 28047 27978 28046 -3 27978 28047 27979 -3 28048 27979 28047 -3 27979 28048 27980 -3 28049 27980 28048 -3 27980 28049 27981 -3 28050 27981 28049 -3 27981 28050 27982 -3 28051 27982 28050 -3 27982 28051 27983 -3 28052 27983 28051 -3 27983 28052 27984 -3 28053 27984 28052 -3 27984 28053 27985 -3 28054 27985 28053 -3 27985 28054 27986 -3 28055 27986 28054 -3 27986 28055 27987 -3 28056 27987 28055 -3 27987 28056 27988 -3 28057 27988 28056 -3 27988 28057 27989 -3 28058 27989 28057 -3 27989 28058 27990 -3 28059 27990 28058 -3 27990 28059 27991 -3 28060 27991 28059 -3 27991 28060 27992 -3 28061 27992 28060 -3 27992 28061 27993 -3 28062 27993 28061 -3 27993 28062 27994 -3 28063 27994 28062 -3 27994 28063 27995 -3 28064 27995 28063 -3 27995 28064 27996 -3 28065 27996 28064 -3 27996 28065 27997 -3 28066 27997 28065 -3 27997 28066 27998 -3 28067 27998 28066 -3 27998 28067 27999 -3 28068 27999 28067 -3 27999 28068 28000 -3 28069 28000 28068 -3 28000 28069 28001 -3 28070 28001 28069 -3 28001 28070 28002 -3 28071 28002 28070 -3 28002 28071 28003 -3 28072 28003 28071 -3 28004 28073 28005 -3 28074 28005 28073 -3 28005 28074 28006 -3 28075 28006 28074 -3 28006 28075 28007 -3 28076 28007 28075 -3 28007 28076 28008 -3 28077 28008 28076 -3 28008 28077 28009 -3 28078 28009 28077 -3 28009 28078 28010 -3 28079 28010 28078 -3 28010 28079 28011 -3 28080 28011 28079 -3 28011 28080 28012 -3 28081 28012 28080 -3 28012 28081 28013 -3 28082 28013 28081 -3 28013 28082 28014 -3 28083 28014 28082 -3 28014 28083 28015 -3 28084 28015 28083 -3 28015 28084 28016 -3 28085 28016 28084 -3 28016 28085 28017 -3 28086 28017 28085 -3 28017 28086 28018 -3 28087 28018 28086 -3 28018 28087 28019 -3 28088 28019 28087 -3 28019 28088 28020 -3 28089 28020 28088 -3 28020 28089 28021 -3 28090 28021 28089 -3 28021 28090 28022 -3 28091 28022 28090 -3 28022 28091 28023 -3 28092 28023 28091 -3 28023 28092 28024 -3 28093 28024 28092 -3 28024 28093 28025 -3 28094 28025 28093 -3 28025 28094 28026 -3 28095 28026 28094 -3 28026 28095 28027 -3 28096 28027 28095 -3 28027 28096 28028 -3 28097 28028 28096 -3 28028 28097 28029 -3 28098 28029 28097 -3 28029 28098 28030 -3 28099 28030 28098 -3 28030 28099 28031 -3 28100 28031 28099 -3 28031 28100 28032 -3 28101 28032 28100 -3 28032 28101 28033 -3 28102 28033 28101 -3 28033 28102 28034 -3 28103 28034 28102 -3 28034 28103 28035 -3 28104 28035 28103 -3 28035 28104 28036 -3 28105 28036 28104 -3 28036 28105 28037 -3 28106 28037 28105 -3 28037 28106 28038 -3 28107 28038 28106 -3 28038 28107 28039 -3 28108 28039 28107 -3 28039 28108 28040 -3 28109 28040 28108 -3 28040 28109 28041 -3 28110 28041 28109 -3 28041 28110 28042 -3 28111 28042 28110 -3 28042 28111 28043 -3 28112 28043 28111 -3 28043 28112 28113 -3 28043 28113 28044 -3 28044 28113 28114 -3 28044 28114 28045 -3 28045 28114 28115 -3 28045 28115 28046 -3 28046 28115 28116 -3 28046 28116 28047 -3 28047 28116 28117 -3 28047 28117 28048 -3 28048 28117 28118 -3 28048 28118 28049 -3 28049 28118 28119 -3 28049 28119 28050 -3 28050 28119 28120 -3 28050 28120 28051 -3 28051 28120 28121 -3 28051 28121 28052 -3 28052 28121 28122 -3 28052 28122 28053 -3 28053 28122 28123 -3 28053 28123 28054 -3 28054 28123 28124 -3 28054 28124 28055 -3 28055 28124 28125 -3 28055 28125 28056 -3 28056 28125 28126 -3 28056 28126 28057 -3 28057 28126 28127 -3 28057 28127 28058 -3 28058 28127 28128 -3 28058 28128 28059 -3 28059 28128 28129 -3 28059 28129 28060 -3 28060 28129 28130 -3 28060 28130 28061 -3 28061 28130 28131 -3 28061 28131 28062 -3 28062 28131 28132 -3 28062 28132 28063 -3 28063 28132 28133 -3 28063 28133 28064 -3 28064 28133 28134 -3 28064 28134 28065 -3 28065 28134 28135 -3 28065 28135 28066 -3 28066 28135 28136 -3 28066 28136 28067 -3 28067 28136 28137 -3 28067 28137 28068 -3 28068 28137 28138 -3 28068 28138 28069 -3 28069 28138 28139 -3 28069 28139 28070 -3 28070 28139 28140 -3 28070 28140 28071 -3 28071 28140 28141 -3 28071 28141 28072 -3 28073 28142 28143 -3 28073 28143 28074 -3 28074 28143 28144 -3 28074 28144 28075 -3 28075 28144 28145 -3 28075 28145 28076 -3 28076 28145 28146 -3 28076 28146 28077 -3 28077 28146 28147 -3 28077 28147 28078 -3 28078 28147 28148 -3 28078 28148 28079 -3 28079 28148 28149 -3 28079 28149 28080 -3 28080 28149 28150 -3 28080 28150 28081 -3 28081 28150 28151 -3 28081 28151 28082 -3 28082 28151 28152 -3 28082 28152 28083 -3 28083 28152 28153 -3 28083 28153 28084 -3 28084 28153 28154 -3 28084 28154 28085 -3 28085 28154 28155 -3 28085 28155 28086 -3 28086 28155 28156 -3 28086 28156 28087 -3 28087 28156 28157 -3 28087 28157 28088 -3 28088 28157 28158 -3 28088 28158 28089 -3 28089 28158 28159 -3 28089 28159 28090 -3 28090 28159 28160 -3 28090 28160 28091 -3 28091 28160 28161 -3 28091 28161 28092 -3 28092 28161 28162 -3 28092 28162 28093 -3 28093 28162 28163 -3 28093 28163 28094 -3 28094 28163 28164 -3 28094 28164 28095 -3 28095 28164 28165 -3 28095 28165 28096 -3 28096 28165 28166 -3 28096 28166 28097 -3 28097 28166 28167 -3 28097 28167 28098 -3 28098 28167 28168 -3 28098 28168 28099 -3 28099 28168 28169 -3 28099 28169 28100 -3 28100 28169 28170 -3 28100 28170 28101 -3 28101 28170 28171 -3 28101 28171 28102 -3 28102 28171 28172 -3 28102 28172 28103 -3 28103 28172 28173 -3 28103 28173 28104 -3 28104 28173 28174 -3 28104 28174 28105 -3 28105 28174 28175 -3 28105 28175 28106 -3 28106 28175 28176 -3 28106 28176 28107 -3 28107 28176 28177 -3 28107 28177 28108 -3 28108 28177 28178 -3 28108 28178 28109 -3 28109 28178 28179 -3 28109 28179 28110 -3 28110 28179 28180 -3 28110 28180 28111 -3 28111 28180 28181 -3 28111 28181 28112 -3 28112 28181 28182 -3 28112 28182 28113 -3 28113 28182 28183 -3 28113 28183 28114 -3 28114 28183 28184 -3 28114 28184 28115 -3 28115 28184 28185 -3 28115 28185 28116 -3 28116 28185 28186 -3 28116 28186 28117 -3 28117 28186 28187 -3 28117 28187 28118 -3 28118 28187 28188 -3 28118 28188 28119 -3 28119 28188 28189 -3 28119 28189 28120 -3 28120 28189 28190 -3 28120 28190 28121 -3 28121 28190 28191 -3 28121 28191 28122 -3 28122 28191 28192 -3 28122 28192 28123 -3 28123 28192 28193 -3 28123 28193 28124 -3 28124 28193 28194 -3 28124 28194 28125 -3 28125 28194 28195 -3 28125 28195 28126 -3 28126 28195 28196 -3 28126 28196 28127 -3 28127 28196 28197 -3 28127 28197 28128 -3 28128 28197 28198 -3 28128 28198 28129 -3 28129 28198 28199 -3 28129 28199 28130 -3 28130 28199 28200 -3 28130 28200 28131 -3 28131 28200 28201 -3 28131 28201 28132 -3 28132 28201 28202 -3 28132 28202 28133 -3 28133 28202 28203 -3 28133 28203 28134 -3 28134 28203 28204 -3 28134 28204 28135 -3 28135 28204 28205 -3 28135 28205 28136 -3 28136 28205 28206 -3 28136 28206 28137 -3 28137 28206 28207 -3 28137 28207 28138 -3 28138 28207 28208 -3 28138 28208 28139 -3 28139 28208 28209 -3 28139 28209 28140 -3 28140 28209 28210 -3 28140 28210 28141 -3 28142 28211 28212 -3 28142 28212 28143 -3 28143 28212 28213 -3 28143 28213 28144 -3 28144 28213 28214 -3 28144 28214 28145 -3 28145 28214 28215 -3 28145 28215 28146 -3 28146 28215 28147 -3 28216 28147 28215 -3 28147 28216 28148 -3 28217 28148 28216 -3 28148 28217 28149 -3 28218 28149 28217 -3 28149 28218 28150 -3 28219 28150 28218 -3 28150 28219 28151 -3 28220 28151 28219 -3 28151 28220 28152 -3 28221 28152 28220 -3 28152 28221 28153 -3 28222 28153 28221 -3 28153 28222 28154 -3 28223 28154 28222 -3 28154 28223 28155 -3 28224 28155 28223 -3 28155 28224 28156 -3 28225 28156 28224 -3 28156 28225 28157 -3 28226 28157 28225 -3 28157 28226 28158 -3 28227 28158 28226 -3 28158 28227 28159 -3 28228 28159 28227 -3 28159 28228 28160 -3 28229 28160 28228 -3 28160 28229 28161 -3 28230 28161 28229 -3 28161 28230 28162 -3 28231 28162 28230 -3 28162 28231 28163 -3 28232 28163 28231 -3 28163 28232 28164 -3 28233 28164 28232 -3 28164 28233 28165 -3 28234 28165 28233 -3 28165 28234 28166 -3 28235 28166 28234 -3 28166 28235 28167 -3 28236 28167 28235 -3 28167 28236 28168 -3 28237 28168 28236 -3 28168 28237 28169 -3 28238 28169 28237 -3 28169 28238 28170 -3 28239 28170 28238 -3 28170 28239 28171 -3 28240 28171 28239 -3 28171 28240 28172 -3 28241 28172 28240 -3 28172 28241 28173 -3 28242 28173 28241 -3 28173 28242 28174 -3 28243 28174 28242 -3 28174 28243 28175 -3 28244 28175 28243 -3 28175 28244 28176 -3 28245 28176 28244 -3 28176 28245 28177 -3 28246 28177 28245 -3 28177 28246 28178 -3 28247 28178 28246 -3 28178 28247 28179 -3 28248 28179 28247 -3 28179 28248 28180 -3 28249 28180 28248 -3 28180 28249 28181 -3 28250 28181 28249 -3 28181 28250 28182 -3 28251 28182 28250 -3 28182 28251 28183 -3 28252 28183 28251 -3 28183 28252 28184 -3 28253 28184 28252 -3 28184 28253 28185 -3 28254 28185 28253 -3 28185 28254 28186 -3 28255 28186 28254 -3 28186 28255 28187 -3 28256 28187 28255 -3 28187 28256 28188 -3 28257 28188 28256 -3 28188 28257 28189 -3 28258 28189 28257 -3 28189 28258 28190 -3 28259 28190 28258 -3 28190 28259 28191 -3 28260 28191 28259 -3 28191 28260 28192 -3 28261 28192 28260 -3 28192 28261 28193 -3 28262 28193 28261 -3 28193 28262 28194 -3 28263 28194 28262 -3 28194 28263 28195 -3 28264 28195 28263 -3 28195 28264 28196 -3 28265 28196 28264 -3 28196 28265 28197 -3 28266 28197 28265 -3 28197 28266 28198 -3 28267 28198 28266 -3 28198 28267 28199 -3 28268 28199 28267 -3 28199 28268 28200 -3 28269 28200 28268 -3 28200 28269 28201 -3 28270 28201 28269 -3 28201 28270 28202 -3 28271 28202 28270 -3 28202 28271 28203 -3 28272 28203 28271 -3 28203 28272 28204 -3 28273 28204 28272 -3 28204 28273 28205 -3 28274 28205 28273 -3 28205 28274 28206 -3 28275 28206 28274 -3 28206 28275 28207 -3 28276 28207 28275 -3 28207 28276 28208 -3 28277 28208 28276 -3 28208 28277 28209 -3 28278 28209 28277 -3 28209 28278 28210 -3 28279 28210 28278 -3 28211 28280 28212 -3 28281 28212 28280 -3 28212 28281 28213 -3 28282 28213 28281 -3 28213 28282 28214 -3 28283 28214 28282 -3 28214 28283 28215 -3 28284 28215 28283 -3 28215 28284 28216 -3 28285 28216 28284 -3 28216 28285 28217 -3 28286 28217 28285 -3 28217 28286 28218 -3 28287 28218 28286 -3 28218 28287 28219 -3 28288 28219 28287 -3 28219 28288 28220 -3 28289 28220 28288 -3 28220 28289 28221 -3 28290 28221 28289 -3 28221 28290 28222 -3 28291 28222 28290 -3 28222 28291 28223 -3 28292 28223 28291 -3 28223 28292 28224 -3 28293 28224 28292 -3 28224 28293 28225 -3 28294 28225 28293 -3 28225 28294 28226 -3 28295 28226 28294 -3 28226 28295 28227 -3 28296 28227 28295 -3 28227 28296 28228 -3 28297 28228 28296 -3 28228 28297 28229 -3 28298 28229 28297 -3 28229 28298 28230 -3 28299 28230 28298 -3 28230 28299 28231 -3 28300 28231 28299 -3 28231 28300 28232 -3 28301 28232 28300 -3 28232 28301 28233 -3 28302 28233 28301 -3 28233 28302 28234 -3 28303 28234 28302 -3 28234 28303 28235 -3 28304 28235 28303 -3 28235 28304 28236 -3 28305 28236 28304 -3 28236 28305 28237 -3 28306 28237 28305 -3 28237 28306 28238 -3 28307 28238 28306 -3 28238 28307 28239 -3 28308 28239 28307 -3 28239 28308 28240 -3 28309 28240 28308 -3 28240 28309 28241 -3 28310 28241 28309 -3 28241 28310 28242 -3 28311 28242 28310 -3 28242 28311 28243 -3 28312 28243 28311 -3 28243 28312 28244 -3 28313 28244 28312 -3 28244 28313 28245 -3 28314 28245 28313 -3 28245 28314 28246 -3 28315 28246 28314 -3 28246 28315 28247 -3 28316 28247 28315 -3 28247 28316 28248 -3 28317 28248 28316 -3 28248 28317 28249 -3 28318 28249 28317 -3 28249 28318 28250 -3 28319 28250 28318 -3 28250 28319 28320 -3 28250 28320 28251 -3 28251 28320 28321 -3 28251 28321 28252 -3 28252 28321 28322 -3 28252 28322 28253 -3 28253 28322 28323 -3 28253 28323 28254 -3 28254 28323 28324 -3 28254 28324 28255 -3 28255 28324 28325 -3 28255 28325 28256 -3 28256 28325 28326 -3 28256 28326 28257 -3 28257 28326 28327 -3 28257 28327 28258 -3 28258 28327 28328 -3 28258 28328 28259 -3 28259 28328 28329 -3 28259 28329 28260 -3 28260 28329 28330 -3 28260 28330 28261 -3 28261 28330 28331 -3 28261 28331 28262 -3 28262 28331 28332 -3 28262 28332 28263 -3 28263 28332 28333 -3 28263 28333 28264 -3 28264 28333 28334 -3 28264 28334 28265 -3 28265 28334 28335 -3 28265 28335 28266 -3 28266 28335 28336 -3 28266 28336 28267 -3 28267 28336 28337 -3 28267 28337 28268 -3 28268 28337 28338 -3 28268 28338 28269 -3 28269 28338 28339 -3 28269 28339 28270 -3 28270 28339 28340 -3 28270 28340 28271 -3 28271 28340 28341 -3 28271 28341 28272 -3 28272 28341 28342 -3 28272 28342 28273 -3 28273 28342 28343 -3 28273 28343 28274 -3 28274 28343 28344 -3 28274 28344 28275 -3 28275 28344 28345 -3 28275 28345 28276 -3 28276 28345 28346 -3 28276 28346 28277 -3 28277 28346 28347 -3 28277 28347 28278 -3 28278 28347 28348 -3 28278 28348 28279 -3 28280 28349 28350 -3 28280 28350 28281 -3 28281 28350 28351 -3 28281 28351 28282 -3 28282 28351 28352 -3 28282 28352 28283 -3 28283 28352 28353 -3 28283 28353 28284 -3 28284 28353 28354 -3 28284 28354 28285 -3 28285 28354 28355 -3 28285 28355 28286 -3 28286 28355 28356 -3 28286 28356 28287 -3 28287 28356 28357 -3 28287 28357 28288 -3 28288 28357 28358 -3 28288 28358 28289 -3 28289 28358 28359 -3 28289 28359 28290 -3 28290 28359 28360 -3 28290 28360 28291 -3 28291 28360 28361 -3 28291 28361 28292 -3 28292 28361 28362 -3 28292 28362 28293 -3 28293 28362 28363 -3 28293 28363 28294 -3 28294 28363 28364 -3 28294 28364 28295 -3 28295 28364 28365 -3 28295 28365 28296 -3 28296 28365 28366 -3 28296 28366 28297 -3 28297 28366 28367 -3 28297 28367 28298 -3 28298 28367 28368 -3 28298 28368 28299 -3 28299 28368 28369 -3 28299 28369 28300 -3 28300 28369 28370 -3 28300 28370 28301 -3 28301 28370 28371 -3 28301 28371 28302 -3 28302 28371 28372 -3 28302 28372 28303 -3 28303 28372 28373 -3 28303 28373 28304 -3 28304 28373 28374 -3 28304 28374 28305 -3 28305 28374 28375 -3 28305 28375 28306 -3 28306 28375 28376 -3 28306 28376 28307 -3 28307 28376 28377 -3 28307 28377 28308 -3 28308 28377 28378 -3 28308 28378 28309 -3 28309 28378 28379 -3 28309 28379 28310 -3 28310 28379 28380 -3 28310 28380 28311 -3 28311 28380 28381 -3 28311 28381 28312 -3 28312 28381 28382 -3 28312 28382 28313 -3 28313 28382 28383 -3 28313 28383 28314 -3 28314 28383 28384 -3 28314 28384 28315 -3 28315 28384 28385 -3 28315 28385 28316 -3 28316 28385 28386 -3 28316 28386 28317 -3 28317 28386 28387 -3 28317 28387 28318 -3 28318 28387 28388 -3 28318 28388 28319 -3 28319 28388 28389 -3 28319 28389 28320 -3 28320 28389 28390 -3 28320 28390 28321 -3 28321 28390 28391 -3 28321 28391 28322 -3 28322 28391 28392 -3 28322 28392 28323 -3 28323 28392 28393 -3 28323 28393 28324 -3 28324 28393 28394 -3 28324 28394 28325 -3 28325 28394 28395 -3 28325 28395 28326 -3 28326 28395 28396 -3 28326 28396 28327 -3 28327 28396 28397 -3 28327 28397 28328 -3 28328 28397 28398 -3 28328 28398 28329 -3 28329 28398 28399 -3 28329 28399 28330 -3 28330 28399 28400 -3 28330 28400 28331 -3 28331 28400 28401 -3 28331 28401 28332 -3 28332 28401 28402 -3 28332 28402 28333 -3 28333 28402 28403 -3 28333 28403 28334 -3 28334 28403 28404 -3 28334 28404 28335 -3 28335 28404 28405 -3 28335 28405 28336 -3 28336 28405 28406 -3 28336 28406 28337 -3 28337 28406 28407 -3 28337 28407 28338 -3 28338 28407 28408 -3 28338 28408 28339 -3 28339 28408 28409 -3 28339 28409 28340 -3 28340 28409 28410 -3 28340 28410 28341 -3 28341 28410 28411 -3 28341 28411 28342 -3 28342 28411 28412 -3 28342 28412 28343 -3 28343 28412 28413 -3 28343 28413 28344 -3 28344 28413 28414 -3 28344 28414 28345 -3 28345 28414 28415 -3 28345 28415 28346 -3 28346 28415 28416 -3 28346 28416 28347 -3 28347 28416 28417 -3 28347 28417 28348 -3 28349 28418 28419 -3 28349 28419 28350 -3 28350 28419 28420 -3 28350 28420 28351 -3 28351 28420 28421 -3 28351 28421 28352 -3 28352 28421 28422 -3 28352 28422 28353 -3 28353 28422 28354 -3 28423 28354 28422 -3 28354 28423 28355 -3 28424 28355 28423 -3 28355 28424 28356 -3 28425 28356 28424 -3 28356 28425 28357 -3 28426 28357 28425 -3 28357 28426 28358 -3 28427 28358 28426 -3 28358 28427 28359 -3 28428 28359 28427 -3 28359 28428 28360 -3 28429 28360 28428 -3 28360 28429 28361 -3 28430 28361 28429 -3 28361 28430 28362 -3 28431 28362 28430 -3 28362 28431 28363 -3 28432 28363 28431 -3 28363 28432 28364 -3 28433 28364 28432 -3 28364 28433 28365 -3 28434 28365 28433 -3 28365 28434 28366 -3 28435 28366 28434 -3 28366 28435 28367 -3 28436 28367 28435 -3 28367 28436 28368 -3 28437 28368 28436 -3 28368 28437 28369 -3 28438 28369 28437 -3 28369 28438 28370 -3 28439 28370 28438 -3 28370 28439 28371 -3 28440 28371 28439 -3 28371 28440 28372 -3 28441 28372 28440 -3 28372 28441 28373 -3 28442 28373 28441 -3 28373 28442 28374 -3 28443 28374 28442 -3 28374 28443 28375 -3 28444 28375 28443 -3 28375 28444 28376 -3 28445 28376 28444 -3 28376 28445 28377 -3 28446 28377 28445 -3 28377 28446 28378 -3 28447 28378 28446 -3 28378 28447 28379 -3 28448 28379 28447 -3 28379 28448 28380 -3 28449 28380 28448 -3 28380 28449 28381 -3 28450 28381 28449 -3 28381 28450 28382 -3 28451 28382 28450 -3 28382 28451 28383 -3 28452 28383 28451 -3 28383 28452 28384 -3 28453 28384 28452 -3 28384 28453 28385 -3 28454 28385 28453 -3 28385 28454 28386 -3 28455 28386 28454 -3 28386 28455 28387 -3 28456 28387 28455 -3 28387 28456 28388 -3 28457 28388 28456 -3 28388 28457 28389 -3 28458 28389 28457 -3 28389 28458 28390 -3 28459 28390 28458 -3 28390 28459 28391 -3 28460 28391 28459 -3 28391 28460 28392 -3 28461 28392 28460 -3 28392 28461 28393 -3 28462 28393 28461 -3 28393 28462 28394 -3 28463 28394 28462 -3 28394 28463 28395 -3 28464 28395 28463 -3 28395 28464 28396 -3 28465 28396 28464 -3 28396 28465 28397 -3 28466 28397 28465 -3 28397 28466 28398 -3 28467 28398 28466 -3 28398 28467 28399 -3 28468 28399 28467 -3 28399 28468 28400 -3 28469 28400 28468 -3 28400 28469 28401 -3 28470 28401 28469 -3 28401 28470 28402 -3 28471 28402 28470 -3 28402 28471 28403 -3 28472 28403 28471 -3 28403 28472 28404 -3 28473 28404 28472 -3 28404 28473 28405 -3 28474 28405 28473 -3 28405 28474 28406 -3 28475 28406 28474 -3 28406 28475 28407 -3 28476 28407 28475 -3 28407 28476 28408 -3 28477 28408 28476 -3 28408 28477 28409 -3 28478 28409 28477 -3 28409 28478 28410 -3 28479 28410 28478 -3 28410 28479 28411 -3 28480 28411 28479 -3 28411 28480 28412 -3 28481 28412 28480 -3 28412 28481 28413 -3 28482 28413 28481 -3 28413 28482 28414 -3 28483 28414 28482 -3 28414 28483 28415 -3 28484 28415 28483 -3 28415 28484 28416 -3 28485 28416 28484 -3 28416 28485 28417 -3 28486 28417 28485 -3 28418 28487 28419 -3 28488 28419 28487 -3 28419 28488 28420 -3 28489 28420 28488 -3 28420 28489 28421 -3 28490 28421 28489 -3 28421 28490 28422 -3 28491 28422 28490 -3 28422 28491 28423 -3 28492 28423 28491 -3 28423 28492 28424 -3 28493 28424 28492 -3 28424 28493 28425 -3 28494 28425 28493 -3 28425 28494 28426 -3 28495 28426 28494 -3 28426 28495 28427 -3 28496 28427 28495 -3 28427 28496 28428 -3 28497 28428 28496 -3 28428 28497 28429 -3 28498 28429 28497 -3 28429 28498 28430 -3 28499 28430 28498 -3 28430 28499 28431 -3 28500 28431 28499 -3 28431 28500 28432 -3 28501 28432 28500 -3 28432 28501 28433 -3 28502 28433 28501 -3 28433 28502 28434 -3 28503 28434 28502 -3 28434 28503 28435 -3 28504 28435 28503 -3 28435 28504 28436 -3 28505 28436 28504 -3 28436 28505 28437 -3 28506 28437 28505 -3 28437 28506 28438 -3 28507 28438 28506 -3 28438 28507 28439 -3 28508 28439 28507 -3 28439 28508 28440 -3 28509 28440 28508 -3 28440 28509 28441 -3 28510 28441 28509 -3 28441 28510 28442 -3 28511 28442 28510 -3 28442 28511 28443 -3 28512 28443 28511 -3 28443 28512 28444 -3 28513 28444 28512 -3 28444 28513 28445 -3 28514 28445 28513 -3 28445 28514 28446 -3 28515 28446 28514 -3 28446 28515 28447 -3 28516 28447 28515 -3 28447 28516 28448 -3 28517 28448 28516 -3 28448 28517 28449 -3 28518 28449 28517 -3 28449 28518 28450 -3 28519 28450 28518 -3 28450 28519 28451 -3 28520 28451 28519 -3 28451 28520 28452 -3 28521 28452 28520 -3 28452 28521 28453 -3 28522 28453 28521 -3 28453 28522 28454 -3 28523 28454 28522 -3 28454 28523 28455 -3 28524 28455 28523 -3 28455 28524 28456 -3 28525 28456 28524 -3 28456 28525 28457 -3 28526 28457 28525 -3 28457 28526 28527 -3 28457 28527 28458 -3 28458 28527 28528 -3 28458 28528 28459 -3 28459 28528 28529 -3 28459 28529 28460 -3 28460 28529 28530 -3 28460 28530 28461 -3 28461 28530 28531 -3 28461 28531 28462 -3 28462 28531 28532 -3 28462 28532 28463 -3 28463 28532 28533 -3 28463 28533 28464 -3 28464 28533 28534 -3 28464 28534 28465 -3 28465 28534 28535 -3 28465 28535 28466 -3 28466 28535 28536 -3 28466 28536 28467 -3 28467 28536 28537 -3 28467 28537 28468 -3 28468 28537 28538 -3 28468 28538 28469 -3 28469 28538 28539 -3 28469 28539 28470 -3 28470 28539 28540 -3 28470 28540 28471 -3 28471 28540 28541 -3 28471 28541 28472 -3 28472 28541 28542 -3 28472 28542 28473 -3 28473 28542 28543 -3 28473 28543 28474 -3 28474 28543 28544 -3 28474 28544 28475 -3 28475 28544 28545 -3 28475 28545 28476 -3 28476 28545 28546 -3 28476 28546 28477 -3 28477 28546 28547 -3 28477 28547 28478 -3 28478 28547 28548 -3 28478 28548 28479 -3 28479 28548 28549 -3 28479 28549 28480 -3 28480 28549 28550 -3 28480 28550 28481 -3 28481 28550 28551 -3 28481 28551 28482 -3 28482 28551 28552 -3 28482 28552 28483 -3 28483 28552 28553 -3 28483 28553 28484 -3 28484 28553 28554 -3 28484 28554 28485 -3 28485 28554 28555 -3 28485 28555 28486 -3 28487 28556 28557 -3 28487 28557 28488 -3 28488 28557 28558 -3 28488 28558 28489 -3 28489 28558 28559 -3 28489 28559 28490 -3 28490 28559 28560 -3 28490 28560 28491 -3 28491 28560 28561 -3 28491 28561 28492 -3 28492 28561 28562 -3 28492 28562 28493 -3 28493 28562 28563 -3 28493 28563 28494 -3 28494 28563 28564 -3 28494 28564 28495 -3 28495 28564 28565 -3 28495 28565 28496 -3 28496 28565 28566 -3 28496 28566 28497 -3 28497 28566 28567 -3 28497 28567 28498 -3 28498 28567 28568 -3 28498 28568 28499 -3 28499 28568 28569 -3 28499 28569 28500 -3 28500 28569 28570 -3 28500 28570 28501 -3 28501 28570 28571 -3 28501 28571 28502 -3 28502 28571 28572 -3 28502 28572 28503 -3 28503 28572 28573 -3 28503 28573 28504 -3 28504 28573 28574 -3 28504 28574 28505 -3 28505 28574 28575 -3 28505 28575 28506 -3 28506 28575 28576 -3 28506 28576 28507 -3 28507 28576 28577 -3 28507 28577 28508 -3 28508 28577 28578 -3 28508 28578 28509 -3 28509 28578 28579 -3 28509 28579 28510 -3 28510 28579 28580 -3 28510 28580 28511 -3 28511 28580 28581 -3 28511 28581 28512 -3 28512 28581 28582 -3 28512 28582 28513 -3 28513 28582 28583 -3 28513 28583 28514 -3 28514 28583 28584 -3 28514 28584 28515 -3 28515 28584 28585 -3 28515 28585 28516 -3 28516 28585 28586 -3 28516 28586 28517 -3 28517 28586 28587 -3 28517 28587 28518 -3 28518 28587 28588 -3 28518 28588 28519 -3 28519 28588 28589 -3 28519 28589 28520 -3 28520 28589 28590 -3 28520 28590 28521 -3 28521 28590 28591 -3 28521 28591 28522 -3 28522 28591 28592 -3 28522 28592 28523 -3 28523 28592 28593 -3 28523 28593 28524 -3 28524 28593 28594 -3 28524 28594 28525 -3 28525 28594 28595 -3 28525 28595 28526 -3 28526 28595 28596 -3 28526 28596 28527 -3 28527 28596 28597 -3 28527 28597 28528 -3 28528 28597 28598 -3 28528 28598 28529 -3 28529 28598 28599 -3 28529 28599 28530 -3 28530 28599 28600 -3 28530 28600 28531 -3 28531 28600 28601 -3 28531 28601 28532 -3 28532 28601 28602 -3 28532 28602 28533 -3 28533 28602 28603 -3 28533 28603 28534 -3 28534 28603 28604 -3 28534 28604 28535 -3 28535 28604 28605 -3 28535 28605 28536 -3 28536 28605 28606 -3 28536 28606 28537 -3 28537 28606 28607 -3 28537 28607 28538 -3 28538 28607 28608 -3 28538 28608 28539 -3 28539 28608 28609 -3 28539 28609 28540 -3 28540 28609 28610 -3 28540 28610 28541 -3 28541 28610 28611 -3 28541 28611 28542 -3 28542 28611 28612 -3 28542 28612 28543 -3 28543 28612 28613 -3 28543 28613 28544 -3 28544 28613 28614 -3 28544 28614 28545 -3 28545 28614 28615 -3 28545 28615 28546 -3 28546 28615 28616 -3 28546 28616 28547 -3 28547 28616 28617 -3 28547 28617 28548 -3 28548 28617 28618 -3 28548 28618 28549 -3 28549 28618 28619 -3 28549 28619 28550 -3 28550 28619 28620 -3 28550 28620 28551 -3 28551 28620 28621 -3 28551 28621 28552 -3 28552 28621 28622 -3 28552 28622 28553 -3 28553 28622 28623 -3 28553 28623 28554 -3 28554 28623 28624 -3 28554 28624 28555 -3 28556 28625 28626 -3 28556 28626 28557 -3 28557 28626 28627 -3 28557 28627 28558 -3 28558 28627 28628 -3 28558 28628 28559 -3 28559 28628 28629 -3 28559 28629 28560 -3 28560 28629 28630 -3 28560 28630 28561 -3 28561 28630 28562 -3 28631 28562 28630 -3 28562 28631 28563 -3 28632 28563 28631 -3 28563 28632 28564 -3 28633 28564 28632 -3 28564 28633 28565 -3 28634 28565 28633 -3 28565 28634 28566 -3 28635 28566 28634 -3 28566 28635 28567 -3 28636 28567 28635 -3 28567 28636 28568 -3 28637 28568 28636 -3 28568 28637 28569 -3 28638 28569 28637 -3 28569 28638 28570 -3 28639 28570 28638 -3 28570 28639 28571 -3 28640 28571 28639 -3 28571 28640 28572 -3 28641 28572 28640 -3 28572 28641 28573 -3 28642 28573 28641 -3 28573 28642 28574 -3 28643 28574 28642 -3 28574 28643 28575 -3 28644 28575 28643 -3 28575 28644 28576 -3 28645 28576 28644 -3 28576 28645 28577 -3 28646 28577 28645 -3 28577 28646 28578 -3 28647 28578 28646 -3 28578 28647 28579 -3 28648 28579 28647 -3 28579 28648 28580 -3 28649 28580 28648 -3 28580 28649 28581 -3 28650 28581 28649 -3 28581 28650 28582 -3 28651 28582 28650 -3 28582 28651 28583 -3 28652 28583 28651 -3 28583 28652 28584 -3 28653 28584 28652 -3 28584 28653 28585 -3 28654 28585 28653 -3 28585 28654 28586 -3 28655 28586 28654 -3 28586 28655 28587 -3 28656 28587 28655 -3 28587 28656 28588 -3 28657 28588 28656 -3 28588 28657 28589 -3 28658 28589 28657 -3 28589 28658 28590 -3 28659 28590 28658 -3 28590 28659 28591 -3 28660 28591 28659 -3 28591 28660 28592 -3 28661 28592 28660 -3 28592 28661 28593 -3 28662 28593 28661 -3 28593 28662 28594 -3 28663 28594 28662 -3 28594 28663 28595 -3 28664 28595 28663 -3 28595 28664 28596 -3 28665 28596 28664 -3 28596 28665 28597 -3 28666 28597 28665 -3 28597 28666 28598 -3 28667 28598 28666 -3 28598 28667 28599 -3 28668 28599 28667 -3 28599 28668 28600 -3 28669 28600 28668 -3 28600 28669 28601 -3 28670 28601 28669 -3 28601 28670 28602 -3 28671 28602 28670 -3 28602 28671 28603 -3 28672 28603 28671 -3 28603 28672 28604 -3 28673 28604 28672 -3 28604 28673 28605 -3 28674 28605 28673 -3 28605 28674 28606 -3 28675 28606 28674 -3 28606 28675 28607 -3 28676 28607 28675 -3 28607 28676 28608 -3 28677 28608 28676 -3 28608 28677 28609 -3 28678 28609 28677 -3 28609 28678 28610 -3 28679 28610 28678 -3 28610 28679 28611 -3 28680 28611 28679 -3 28611 28680 28612 -3 28681 28612 28680 -3 28612 28681 28613 -3 28682 28613 28681 -3 28613 28682 28614 -3 28683 28614 28682 -3 28614 28683 28615 -3 28684 28615 28683 -3 28615 28684 28616 -3 28685 28616 28684 -3 28616 28685 28617 -3 28686 28617 28685 -3 28617 28686 28618 -3 28687 28618 28686 -3 28618 28687 28619 -3 28688 28619 28687 -3 28619 28688 28620 -3 28689 28620 28688 -3 28620 28689 28621 -3 28690 28621 28689 -3 28621 28690 28622 -3 28691 28622 28690 -3 28622 28691 28623 -3 28692 28623 28691 -3 28623 28692 28624 -3 28693 28624 28692 -3 28625 28694 28626 -3 28695 28626 28694 -3 28626 28695 28627 -3 28696 28627 28695 -3 28627 28696 28628 -3 28697 28628 28696 -3 28628 28697 28629 -3 28698 28629 28697 -3 28629 28698 28630 -3 28699 28630 28698 -3 28630 28699 28631 -3 28700 28631 28699 -3 28631 28700 28632 -3 28701 28632 28700 -3 28632 28701 28633 -3 28702 28633 28701 -3 28633 28702 28634 -3 28703 28634 28702 -3 28634 28703 28635 -3 28704 28635 28703 -3 28635 28704 28636 -3 28705 28636 28704 -3 28636 28705 28637 -3 28706 28637 28705 -3 28637 28706 28638 -3 28707 28638 28706 -3 28638 28707 28639 -3 28708 28639 28707 -3 28639 28708 28640 -3 28709 28640 28708 -3 28640 28709 28641 -3 28710 28641 28709 -3 28641 28710 28642 -3 28711 28642 28710 -3 28642 28711 28643 -3 28712 28643 28711 -3 28643 28712 28644 -3 28713 28644 28712 -3 28644 28713 28645 -3 28714 28645 28713 -3 28645 28714 28646 -3 28715 28646 28714 -3 28646 28715 28647 -3 28716 28647 28715 -3 28647 28716 28648 -3 28717 28648 28716 -3 28648 28717 28649 -3 28718 28649 28717 -3 28649 28718 28650 -3 28719 28650 28718 -3 28650 28719 28651 -3 28720 28651 28719 -3 28651 28720 28652 -3 28721 28652 28720 -3 28652 28721 28653 -3 28722 28653 28721 -3 28653 28722 28654 -3 28723 28654 28722 -3 28654 28723 28655 -3 28724 28655 28723 -3 28655 28724 28656 -3 28725 28656 28724 -3 28656 28725 28657 -3 28726 28657 28725 -3 28657 28726 28658 -3 28727 28658 28726 -3 28658 28727 28659 -3 28728 28659 28727 -3 28659 28728 28660 -3 28729 28660 28728 -3 28660 28729 28661 -3 28730 28661 28729 -3 28661 28730 28662 -3 28731 28662 28730 -3 28662 28731 28663 -3 28732 28663 28731 -3 28663 28732 28664 -3 28733 28664 28732 -3 28664 28733 28665 -3 28734 28665 28733 -3 28665 28734 28735 -3 28665 28735 28666 -3 28666 28735 28736 -3 28666 28736 28667 -3 28667 28736 28737 -3 28667 28737 28668 -3 28668 28737 28738 -3 28668 28738 28669 -3 28669 28738 28739 -3 28669 28739 28670 -3 28670 28739 28740 -3 28670 28740 28671 -3 28671 28740 28741 -3 28671 28741 28672 -3 28672 28741 28742 -3 28672 28742 28673 -3 28673 28742 28743 -3 28673 28743 28674 -3 28674 28743 28744 -3 28674 28744 28675 -3 28675 28744 28745 -3 28675 28745 28676 -3 28676 28745 28746 -3 28676 28746 28677 -3 28677 28746 28747 -3 28677 28747 28678 -3 28678 28747 28748 -3 28678 28748 28679 -3 28679 28748 28749 -3 28679 28749 28680 -3 28680 28749 28750 -3 28680 28750 28681 -3 28681 28750 28751 -3 28681 28751 28682 -3 28682 28751 28752 -3 28682 28752 28683 -3 28683 28752 28753 -3 28683 28753 28684 -3 28684 28753 28754 -3 28684 28754 28685 -3 28685 28754 28755 -3 28685 28755 28686 -3 28686 28755 28756 -3 28686 28756 28687 -3 28687 28756 28757 -3 28687 28757 28688 -3 28688 28757 28758 -3 28688 28758 28689 -3 28689 28758 28759 -3 28689 28759 28690 -3 28690 28759 28760 -3 28690 28760 28691 -3 28691 28760 28761 -3 28691 28761 28692 -3 28692 28761 28762 -3 28692 28762 28693 -3 28694 28763 28764 -3 28694 28764 28695 -3 28695 28764 28765 -3 28695 28765 28696 -3 28696 28765 28766 -3 28696 28766 28697 -3 28697 28766 28767 -3 28697 28767 28698 -3 28698 28767 28768 -3 28698 28768 28699 -3 28699 28768 28769 -3 28699 28769 28700 -3 28700 28769 28770 -3 28700 28770 28701 -3 28701 28770 28771 -3 28701 28771 28702 -3 28702 28771 28772 -3 28702 28772 28703 -3 28703 28772 28773 -3 28703 28773 28704 -3 28704 28773 28774 -3 28704 28774 28705 -3 28705 28774 28775 -3 28705 28775 28706 -3 28706 28775 28776 -3 28706 28776 28707 -3 28707 28776 28777 -3 28707 28777 28708 -3 28708 28777 28778 -3 28708 28778 28709 -3 28709 28778 28779 -3 28709 28779 28710 -3 28710 28779 28780 -3 28710 28780 28711 -3 28711 28780 28781 -3 28711 28781 28712 -3 28712 28781 28782 -3 28712 28782 28713 -3 28713 28782 28783 -3 28713 28783 28714 -3 28714 28783 28784 -3 28714 28784 28715 -3 28715 28784 28785 -3 28715 28785 28716 -3 28716 28785 28786 -3 28716 28786 28717 -3 28717 28786 28787 -3 28717 28787 28718 -3 28718 28787 28788 -3 28718 28788 28719 -3 28719 28788 28789 -3 28719 28789 28720 -3 28720 28789 28790 -3 28720 28790 28721 -3 28721 28790 28791 -3 28721 28791 28722 -3 28722 28791 28792 -3 28722 28792 28723 -3 28723 28792 28793 -3 28723 28793 28724 -3 28724 28793 28794 -3 28724 28794 28725 -3 28725 28794 28795 -3 28725 28795 28726 -3 28726 28795 28796 -3 28726 28796 28727 -3 28727 28796 28797 -3 28727 28797 28728 -3 28728 28797 28798 -3 28728 28798 28729 -3 28729 28798 28799 -3 28729 28799 28730 -3 28730 28799 28800 -3 28730 28800 28731 -3 28731 28800 28801 -3 28731 28801 28732 -3 28732 28801 28802 -3 28732 28802 28733 -3 28733 28802 28803 -3 28733 28803 28734 -3 28734 28803 28804 -3 28734 28804 28735 -3 28735 28804 28805 -3 28735 28805 28736 -3 28736 28805 28806 -3 28736 28806 28737 -3 28737 28806 28807 -3 28737 28807 28738 -3 28738 28807 28808 -3 28738 28808 28739 -3 28739 28808 28809 -3 28739 28809 28740 -3 28740 28809 28810 -3 28740 28810 28741 -3 28741 28810 28811 -3 28741 28811 28742 -3 28742 28811 28812 -3 28742 28812 28743 -3 28743 28812 28813 -3 28743 28813 28744 -3 28744 28813 28814 -3 28744 28814 28745 -3 28745 28814 28815 -3 28745 28815 28746 -3 28746 28815 28816 -3 28746 28816 28747 -3 28747 28816 28817 -3 28747 28817 28748 -3 28748 28817 28818 -3 28748 28818 28749 -3 28749 28818 28819 -3 28749 28819 28750 -3 28750 28819 28820 -3 28750 28820 28751 -3 28751 28820 28821 -3 28751 28821 28752 -3 28752 28821 28822 -3 28752 28822 28753 -3 28753 28822 28823 -3 28753 28823 28754 -3 28754 28823 28824 -3 28754 28824 28755 -3 28755 28824 28825 -3 28755 28825 28756 -3 28756 28825 28826 -3 28756 28826 28757 -3 28757 28826 28827 -3 28757 28827 28758 -3 28758 28827 28828 -3 28758 28828 28759 -3 28759 28828 28829 -3 28759 28829 28760 -3 28760 28829 28830 -3 28760 28830 28761 -3 28761 28830 28831 -3 28761 28831 28762 -3 28763 28832 28833 -3 28763 28833 28764 -3 28764 28833 28834 -3 28764 28834 28765 -3 28765 28834 28835 -3 28765 28835 28766 -3 28766 28835 28836 -3 28766 28836 28767 -3 28767 28836 28837 -3 28767 28837 28768 -3 28768 28837 28838 -3 28768 28838 28769 -3 28769 28838 28839 -3 28769 28839 28770 -3 28770 28839 28771 -3 28840 28771 28839 -3 28771 28840 28772 -3 28841 28772 28840 -3 28772 28841 28773 -3 28842 28773 28841 -3 28773 28842 28774 -3 28843 28774 28842 -3 28774 28843 28775 -3 28844 28775 28843 -3 28775 28844 28776 -3 28845 28776 28844 -3 28776 28845 28777 -3 28846 28777 28845 -3 28777 28846 28778 -3 28847 28778 28846 -3 28778 28847 28779 -3 28848 28779 28847 -3 28779 28848 28780 -3 28849 28780 28848 -3 28780 28849 28781 -3 28850 28781 28849 -3 28781 28850 28782 -3 28851 28782 28850 -3 28782 28851 28783 -3 28852 28783 28851 -3 28783 28852 28784 -3 28853 28784 28852 -3 28784 28853 28785 -3 28854 28785 28853 -3 28785 28854 28786 -3 28855 28786 28854 -3 28786 28855 28787 -3 28856 28787 28855 -3 28787 28856 28788 -3 28857 28788 28856 -3 28788 28857 28789 -3 28858 28789 28857 -3 28789 28858 28790 -3 28859 28790 28858 -3 28790 28859 28791 -3 28860 28791 28859 -3 28791 28860 28792 -3 28861 28792 28860 -3 28792 28861 28793 -3 28862 28793 28861 -3 28793 28862 28794 -3 28863 28794 28862 -3 28794 28863 28795 -3 28864 28795 28863 -3 28795 28864 28796 -3 28865 28796 28864 -3 28796 28865 28797 -3 28866 28797 28865 -3 28797 28866 28798 -3 28867 28798 28866 -3 28798 28867 28799 -3 28868 28799 28867 -3 28799 28868 28800 -3 28869 28800 28868 -3 28800 28869 28801 -3 28870 28801 28869 -3 28801 28870 28802 -3 28871 28802 28870 -3 28802 28871 28803 -3 28872 28803 28871 -3 28803 28872 28804 -3 28873 28804 28872 -3 28804 28873 28805 -3 28874 28805 28873 -3 28805 28874 28806 -3 28875 28806 28874 -3 28806 28875 28807 -3 28876 28807 28875 -3 28807 28876 28808 -3 28877 28808 28876 -3 28808 28877 28809 -3 28878 28809 28877 -3 28809 28878 28810 -3 28879 28810 28878 -3 28810 28879 28811 -3 28880 28811 28879 -3 28811 28880 28812 -3 28881 28812 28880 -3 28812 28881 28813 -3 28882 28813 28881 -3 28813 28882 28814 -3 28883 28814 28882 -3 28814 28883 28815 -3 28884 28815 28883 -3 28815 28884 28816 -3 28885 28816 28884 -3 28816 28885 28817 -3 28886 28817 28885 -3 28817 28886 28818 -3 28887 28818 28886 -3 28818 28887 28819 -3 28888 28819 28887 -3 28819 28888 28820 -3 28889 28820 28888 -3 28820 28889 28821 -3 28890 28821 28889 -3 28821 28890 28822 -3 28891 28822 28890 -3 28822 28891 28823 -3 28892 28823 28891 -3 28823 28892 28824 -3 28893 28824 28892 -3 28824 28893 28825 -3 28894 28825 28893 -3 28825 28894 28826 -3 28895 28826 28894 -3 28826 28895 28827 -3 28896 28827 28895 -3 28827 28896 28828 -3 28897 28828 28896 -3 28828 28897 28829 -3 28898 28829 28897 -3 28829 28898 28830 -3 28899 28830 28898 -3 28830 28899 28831 -3 28900 28831 28899 -3 28832 28901 28833 -3 28902 28833 28901 -3 28833 28902 28834 -3 28903 28834 28902 -3 28834 28903 28835 -3 28904 28835 28903 -3 28835 28904 28836 -3 28905 28836 28904 -3 28836 28905 28837 -3 28906 28837 28905 -3 28837 28906 28838 -3 28907 28838 28906 -3 28838 28907 28839 -3 28908 28839 28907 -3 28839 28908 28840 -3 28909 28840 28908 -3 28840 28909 28841 -3 28910 28841 28909 -3 28841 28910 28842 -3 28911 28842 28910 -3 28842 28911 28843 -3 28912 28843 28911 -3 28843 28912 28844 -3 28913 28844 28912 -3 28844 28913 28845 -3 28914 28845 28913 -3 28845 28914 28846 -3 28915 28846 28914 -3 28846 28915 28847 -3 28916 28847 28915 -3 28847 28916 28848 -3 28917 28848 28916 -3 28848 28917 28849 -3 28918 28849 28917 -3 28849 28918 28850 -3 28919 28850 28918 -3 28850 28919 28851 -3 28920 28851 28919 -3 28851 28920 28852 -3 28921 28852 28920 -3 28852 28921 28853 -3 28922 28853 28921 -3 28853 28922 28854 -3 28923 28854 28922 -3 28854 28923 28855 -3 28924 28855 28923 -3 28855 28924 28856 -3 28925 28856 28924 -3 28856 28925 28857 -3 28926 28857 28925 -3 28857 28926 28858 -3 28927 28858 28926 -3 28858 28927 28859 -3 28928 28859 28927 -3 28859 28928 28860 -3 28929 28860 28928 -3 28860 28929 28861 -3 28930 28861 28929 -3 28861 28930 28862 -3 28931 28862 28930 -3 28862 28931 28863 -3 28932 28863 28931 -3 28863 28932 28864 -3 28933 28864 28932 -3 28864 28933 28865 -3 28934 28865 28933 -3 28865 28934 28866 -3 28935 28866 28934 -3 28866 28935 28867 -3 28936 28867 28935 -3 28867 28936 28868 -3 28937 28868 28936 -3 28868 28937 28869 -3 28938 28869 28937 -3 28869 28938 28870 -3 28939 28870 28938 -3 28870 28939 28871 -3 28940 28871 28939 -3 28871 28940 28872 -3 28941 28872 28940 -3 28872 28941 28873 -3 28942 28873 28941 -3 28873 28942 28874 -3 28943 28874 28942 -3 28874 28943 28944 -3 28874 28944 28875 -3 28875 28944 28945 -3 28875 28945 28876 -3 28876 28945 28946 -3 28876 28946 28877 -3 28877 28946 28947 -3 28877 28947 28878 -3 28878 28947 28948 -3 28878 28948 28879 -3 28879 28948 28949 -3 28879 28949 28880 -3 28880 28949 28950 -3 28880 28950 28881 -3 28881 28950 28951 -3 28881 28951 28882 -3 28882 28951 28952 -3 28882 28952 28883 -3 28883 28952 28953 -3 28883 28953 28884 -3 28884 28953 28954 -3 28884 28954 28885 -3 28885 28954 28955 -3 28885 28955 28886 -3 28886 28955 28956 -3 28886 28956 28887 -3 28887 28956 28957 -3 28887 28957 28888 -3 28888 28957 28958 -3 28888 28958 28889 -3 28889 28958 28959 -3 28889 28959 28890 -3 28890 28959 28960 -3 28890 28960 28891 -3 28891 28960 28961 -3 28891 28961 28892 -3 28892 28961 28962 -3 28892 28962 28893 -3 28893 28962 28963 -3 28893 28963 28894 -3 28894 28963 28964 -3 28894 28964 28895 -3 28895 28964 28965 -3 28895 28965 28896 -3 28896 28965 28966 -3 28896 28966 28897 -3 28897 28966 28967 -3 28897 28967 28898 -3 28898 28967 28968 -3 28898 28968 28899 -3 28899 28968 28969 -3 28899 28969 28900 -3 28901 28970 28971 -3 28901 28971 28902 -3 28902 28971 28972 -3 28902 28972 28903 -3 28903 28972 28973 -3 28903 28973 28904 -3 28904 28973 28974 -3 28904 28974 28905 -3 28905 28974 28975 -3 28905 28975 28906 -3 28906 28975 28976 -3 28906 28976 28907 -3 28907 28976 28977 -3 28907 28977 28908 -3 28908 28977 28978 -3 28908 28978 28909 -3 28909 28978 28979 -3 28909 28979 28910 -3 28910 28979 28980 -3 28910 28980 28911 -3 28911 28980 28981 -3 28911 28981 28912 -3 28912 28981 28982 -3 28912 28982 28913 -3 28913 28982 28983 -3 28913 28983 28914 -3 28914 28983 28984 -3 28914 28984 28915 -3 28915 28984 28985 -3 28915 28985 28916 -3 28916 28985 28986 -3 28916 28986 28917 -3 28917 28986 28987 -3 28917 28987 28918 -3 28918 28987 28988 -3 28918 28988 28919 -3 28919 28988 28989 -3 28919 28989 28920 -3 28920 28989 28990 -3 28920 28990 28921 -3 28921 28990 28991 -3 28921 28991 28922 -3 28922 28991 28992 -3 28922 28992 28923 -3 28923 28992 28993 -3 28923 28993 28924 -3 28924 28993 28994 -3 28924 28994 28925 -3 28925 28994 28995 -3 28925 28995 28926 -3 28926 28995 28996 -3 28926 28996 28927 -3 28927 28996 28997 -3 28927 28997 28928 -3 28928 28997 28998 -3 28928 28998 28929 -3 28929 28998 28999 -3 28929 28999 28930 -3 28930 28999 29000 -3 28930 29000 28931 -3 28931 29000 29001 -3 28931 29001 28932 -3 28932 29001 29002 -3 28932 29002 28933 -3 28933 29002 29003 -3 28933 29003 28934 -3 28934 29003 29004 -3 28934 29004 28935 -3 28935 29004 29005 -3 28935 29005 28936 -3 28936 29005 29006 -3 28936 29006 28937 -3 28937 29006 29007 -3 28937 29007 28938 -3 28938 29007 29008 -3 28938 29008 28939 -3 28939 29008 29009 -3 28939 29009 28940 -3 28940 29009 29010 -3 28940 29010 28941 -3 28941 29010 29011 -3 28941 29011 28942 -3 28942 29011 29012 -3 28942 29012 28943 -3 28943 29012 29013 -3 28943 29013 28944 -3 28944 29013 29014 -3 28944 29014 28945 -3 28945 29014 29015 -3 28945 29015 28946 -3 28946 29015 29016 -3 28946 29016 28947 -3 28947 29016 29017 -3 28947 29017 28948 -3 28948 29017 29018 -3 28948 29018 28949 -3 28949 29018 29019 -3 28949 29019 28950 -3 28950 29019 29020 -3 28950 29020 28951 -3 28951 29020 29021 -3 28951 29021 28952 -3 28952 29021 29022 -3 28952 29022 28953 -3 28953 29022 29023 -3 28953 29023 28954 -3 28954 29023 29024 -3 28954 29024 28955 -3 28955 29024 29025 -3 28955 29025 28956 -3 28956 29025 29026 -3 28956 29026 28957 -3 28957 29026 29027 -3 28957 29027 28958 -3 28958 29027 29028 -3 28958 29028 28959 -3 28959 29028 29029 -3 28959 29029 28960 -3 28960 29029 29030 -3 28960 29030 28961 -3 28961 29030 29031 -3 28961 29031 28962 -3 28962 29031 29032 -3 28962 29032 28963 -3 28963 29032 29033 -3 28963 29033 28964 -3 28964 29033 29034 -3 28964 29034 28965 -3 28965 29034 29035 -3 28965 29035 28966 -3 28966 29035 29036 -3 28966 29036 28967 -3 28967 29036 29037 -3 28967 29037 28968 -3 28968 29037 29038 -3 28968 29038 28969 -3 28970 29039 29040 -3 28970 29040 28971 -3 28971 29040 29041 -3 28971 29041 28972 -3 28972 29041 29042 -3 28972 29042 28973 -3 28973 29042 29043 -3 28973 29043 28974 -3 28974 29043 29044 -3 28974 29044 28975 -3 28975 29044 29045 -3 28975 29045 28976 -3 28976 29045 29046 -3 28976 29046 28977 -3 28977 29046 29047 -3 28977 29047 28978 -3 28978 29047 29048 -3 28978 29048 28979 -3 28979 29048 28980 -3 29049 28980 29048 -3 28980 29049 28981 -3 29050 28981 29049 -3 28981 29050 28982 -3 29051 28982 29050 -3 28982 29051 28983 -3 29052 28983 29051 -3 28983 29052 28984 -3 29053 28984 29052 -3 28984 29053 28985 -3 29054 28985 29053 -3 28985 29054 28986 -3 29055 28986 29054 -3 28986 29055 28987 -3 29056 28987 29055 -3 28987 29056 28988 -3 29057 28988 29056 -3 28988 29057 28989 -3 29058 28989 29057 -3 28989 29058 28990 -3 29059 28990 29058 -3 28990 29059 28991 -3 29060 28991 29059 -3 28991 29060 28992 -3 29061 28992 29060 -3 28992 29061 28993 -3 29062 28993 29061 -3 28993 29062 28994 -3 29063 28994 29062 -3 28994 29063 28995 -3 29064 28995 29063 -3 28995 29064 28996 -3 29065 28996 29064 -3 28996 29065 28997 -3 29066 28997 29065 -3 28997 29066 28998 -3 29067 28998 29066 -3 28998 29067 28999 -3 29068 28999 29067 -3 28999 29068 29000 -3 29069 29000 29068 -3 29000 29069 29001 -3 29070 29001 29069 -3 29001 29070 29002 -3 29071 29002 29070 -3 29002 29071 29003 -3 29072 29003 29071 -3 29003 29072 29004 -3 29073 29004 29072 -3 29004 29073 29005 -3 29074 29005 29073 -3 29005 29074 29006 -3 29075 29006 29074 -3 29006 29075 29007 -3 29076 29007 29075 -3 29007 29076 29008 -3 29077 29008 29076 -3 29008 29077 29009 -3 29078 29009 29077 -3 29009 29078 29010 -3 29079 29010 29078 -3 29010 29079 29011 -3 29080 29011 29079 -3 29011 29080 29012 -3 29081 29012 29080 -3 29012 29081 29013 -3 29082 29013 29081 -3 29013 29082 29014 -3 29083 29014 29082 -3 29014 29083 29015 -3 29084 29015 29083 -3 29015 29084 29016 -3 29085 29016 29084 -3 29016 29085 29017 -3 29086 29017 29085 -3 29017 29086 29018 -3 29087 29018 29086 -3 29018 29087 29019 -3 29088 29019 29087 -3 29019 29088 29020 -3 29089 29020 29088 -3 29020 29089 29021 -3 29090 29021 29089 -3 29021 29090 29022 -3 29091 29022 29090 -3 29022 29091 29023 -3 29092 29023 29091 -3 29023 29092 29024 -3 29093 29024 29092 -3 29024 29093 29025 -3 29094 29025 29093 -3 29025 29094 29026 -3 29095 29026 29094 -3 29026 29095 29027 -3 29096 29027 29095 -3 29027 29096 29028 -3 29097 29028 29096 -3 29028 29097 29029 -3 29098 29029 29097 -3 29029 29098 29030 -3 29099 29030 29098 -3 29030 29099 29031 -3 29100 29031 29099 -3 29031 29100 29032 -3 29101 29032 29100 -3 29032 29101 29033 -3 29102 29033 29101 -3 29033 29102 29034 -3 29103 29034 29102 -3 29034 29103 29035 -3 29104 29035 29103 -3 29035 29104 29036 -3 29105 29036 29104 -3 29036 29105 29037 -3 29106 29037 29105 -3 29037 29106 29038 -3 29107 29038 29106 -3 29039 29108 29040 -3 29109 29040 29108 -3 29040 29109 29041 -3 29110 29041 29109 -3 29041 29110 29042 -3 29111 29042 29110 -3 29042 29111 29043 -3 29112 29043 29111 -3 29043 29112 29044 -3 29113 29044 29112 -3 29044 29113 29045 -3 29114 29045 29113 -3 29045 29114 29046 -3 29115 29046 29114 -3 29046 29115 29047 -3 29116 29047 29115 -3 29047 29116 29048 -3 29117 29048 29116 -3 29048 29117 29049 -3 29118 29049 29117 -3 29049 29118 29050 -3 29119 29050 29118 -3 29050 29119 29051 -3 29120 29051 29119 -3 29051 29120 29052 -3 29121 29052 29120 -3 29052 29121 29053 -3 29122 29053 29121 -3 29053 29122 29054 -3 29123 29054 29122 -3 29054 29123 29055 -3 29124 29055 29123 -3 29055 29124 29056 -3 29125 29056 29124 -3 29056 29125 29057 -3 29126 29057 29125 -3 29057 29126 29058 -3 29127 29058 29126 -3 29058 29127 29059 -3 29128 29059 29127 -3 29059 29128 29060 -3 29129 29060 29128 -3 29060 29129 29061 -3 29130 29061 29129 -3 29061 29130 29062 -3 29131 29062 29130 -3 29062 29131 29063 -3 29132 29063 29131 -3 29063 29132 29064 -3 29133 29064 29132 -3 29064 29133 29065 -3 29134 29065 29133 -3 29065 29134 29066 -3 29135 29066 29134 -3 29066 29135 29067 -3 29136 29067 29135 -3 29067 29136 29068 -3 29137 29068 29136 -3 29068 29137 29069 -3 29138 29069 29137 -3 29069 29138 29070 -3 29139 29070 29138 -3 29070 29139 29071 -3 29140 29071 29139 -3 29071 29140 29072 -3 29141 29072 29140 -3 29072 29141 29073 -3 29142 29073 29141 -3 29073 29142 29074 -3 29143 29074 29142 -3 29074 29143 29075 -3 29144 29075 29143 -3 29075 29144 29076 -3 29145 29076 29144 -3 29076 29145 29077 -3 29146 29077 29145 -3 29077 29146 29078 -3 29147 29078 29146 -3 29078 29147 29079 -3 29148 29079 29147 -3 29079 29148 29080 -3 29149 29080 29148 -3 29080 29149 29081 -3 29150 29081 29149 -3 29081 29150 29082 -3 29151 29082 29150 -3 29082 29151 29083 -3 29152 29083 29151 -3 29083 29152 29084 -3 29153 29084 29152 -3 29084 29153 29154 -3 29084 29154 29085 -3 29085 29154 29155 -3 29085 29155 29086 -3 29086 29155 29156 -3 29086 29156 29087 -3 29087 29156 29157 -3 29087 29157 29088 -3 29088 29157 29158 -3 29088 29158 29089 -3 29089 29158 29159 -3 29089 29159 29090 -3 29090 29159 29160 -3 29090 29160 29091 -3 29091 29160 29161 -3 29091 29161 29092 -3 29092 29161 29162 -3 29092 29162 29093 -3 29093 29162 29163 -3 29093 29163 29094 -3 29094 29163 29164 -3 29094 29164 29095 -3 29095 29164 29165 -3 29095 29165 29096 -3 29096 29165 29166 -3 29096 29166 29097 -3 29097 29166 29167 -3 29097 29167 29098 -3 29098 29167 29168 -3 29098 29168 29099 -3 29099 29168 29169 -3 29099 29169 29100 -3 29100 29169 29170 -3 29100 29170 29101 -3 29101 29170 29171 -3 29101 29171 29102 -3 29102 29171 29172 -3 29102 29172 29103 -3 29103 29172 29173 -3 29103 29173 29104 -3 29104 29173 29174 -3 29104 29174 29105 -3 29105 29174 29175 -3 29105 29175 29106 -3 29106 29175 29176 -3 29106 29176 29107 -3 29108 29177 29178 -3 29108 29178 29109 -3 29109 29178 29179 -3 29109 29179 29110 -3 29110 29179 29180 -3 29110 29180 29111 -3 29111 29180 29181 -3 29111 29181 29112 -3 29112 29181 29182 -3 29112 29182 29113 -3 29113 29182 29183 -3 29113 29183 29114 -3 29114 29183 29184 -3 29114 29184 29115 -3 29115 29184 29185 -3 29115 29185 29116 -3 29116 29185 29186 -3 29116 29186 29117 -3 29117 29186 29187 -3 29117 29187 29118 -3 29118 29187 29188 -3 29118 29188 29119 -3 29119 29188 29189 -3 29119 29189 29120 -3 29120 29189 29190 -3 29120 29190 29121 -3 29121 29190 29191 -3 29121 29191 29122 -3 29122 29191 29192 -3 29122 29192 29123 -3 29123 29192 29193 -3 29123 29193 29124 -3 29124 29193 29194 -3 29124 29194 29125 -3 29125 29194 29195 -3 29125 29195 29126 -3 29126 29195 29196 -3 29126 29196 29127 -3 29127 29196 29197 -3 29127 29197 29128 -3 29128 29197 29198 -3 29128 29198 29129 -3 29129 29198 29199 -3 29129 29199 29130 -3 29130 29199 29200 -3 29130 29200 29131 -3 29131 29200 29201 -3 29131 29201 29132 -3 29132 29201 29202 -3 29132 29202 29133 -3 29133 29202 29203 -3 29133 29203 29134 -3 29134 29203 29204 -3 29134 29204 29135 -3 29135 29204 29205 -3 29135 29205 29136 -3 29136 29205 29206 -3 29136 29206 29137 -3 29137 29206 29207 -3 29137 29207 29138 -3 29138 29207 29208 -3 29138 29208 29139 -3 29139 29208 29209 -3 29139 29209 29140 -3 29140 29209 29210 -3 29140 29210 29141 -3 29141 29210 29211 -3 29141 29211 29142 -3 29142 29211 29212 -3 29142 29212 29143 -3 29143 29212 29213 -3 29143 29213 29144 -3 29144 29213 29214 -3 29144 29214 29145 -3 29145 29214 29215 -3 29145 29215 29146 -3 29146 29215 29216 -3 29146 29216 29147 -3 29147 29216 29217 -3 29147 29217 29148 -3 29148 29217 29218 -3 29148 29218 29149 -3 29149 29218 29219 -3 29149 29219 29150 -3 29150 29219 29220 -3 29150 29220 29151 -3 29151 29220 29221 -3 29151 29221 29152 -3 29152 29221 29222 -3 29152 29222 29153 -3 29153 29222 29223 -3 29153 29223 29154 -3 29154 29223 29224 -3 29154 29224 29155 -3 29155 29224 29225 -3 29155 29225 29156 -3 29156 29225 29226 -3 29156 29226 29157 -3 29157 29226 29227 -3 29157 29227 29158 -3 29158 29227 29228 -3 29158 29228 29159 -3 29159 29228 29229 -3 29159 29229 29160 -3 29160 29229 29230 -3 29160 29230 29161 -3 29161 29230 29231 -3 29161 29231 29162 -3 29162 29231 29232 -3 29162 29232 29163 -3 29163 29232 29233 -3 29163 29233 29164 -3 29164 29233 29234 -3 29164 29234 29165 -3 29165 29234 29235 -3 29165 29235 29166 -3 29166 29235 29236 -3 29166 29236 29167 -3 29167 29236 29237 -3 29167 29237 29168 -3 29168 29237 29238 -3 29168 29238 29169 -3 29169 29238 29239 -3 29169 29239 29170 -3 29170 29239 29240 -3 29170 29240 29171 -3 29171 29240 29241 -3 29171 29241 29172 -3 29172 29241 29242 -3 29172 29242 29173 -3 29173 29242 29243 -3 29173 29243 29174 -3 29174 29243 29244 -3 29174 29244 29175 -3 29175 29244 29245 -3 29175 29245 29176 -3 29177 29246 29247 -3 29177 29247 29178 -3 29178 29247 29248 -3 29178 29248 29179 -3 29179 29248 29249 -3 29179 29249 29180 -3 29180 29249 29250 -3 29180 29250 29181 -3 29181 29250 29251 -3 29181 29251 29182 -3 29182 29251 29252 -3 29182 29252 29183 -3 29183 29252 29253 -3 29183 29253 29184 -3 29184 29253 29254 -3 29184 29254 29185 -3 29185 29254 29255 -3 29185 29255 29186 -3 29186 29255 29256 -3 29186 29256 29187 -3 29187 29256 29257 -3 29187 29257 29188 -3 29188 29257 29258 -3 29188 29258 29189 -3 29189 29258 29190 -3 29259 29190 29258 -3 29190 29259 29191 -3 29260 29191 29259 -3 29191 29260 29192 -3 29261 29192 29260 -3 29192 29261 29193 -3 29262 29193 29261 -3 29193 29262 29194 -3 29263 29194 29262 -3 29194 29263 29195 -3 29264 29195 29263 -3 29195 29264 29196 -3 29265 29196 29264 -3 29196 29265 29197 -3 29266 29197 29265 -3 29197 29266 29198 -3 29267 29198 29266 -3 29198 29267 29199 -3 29268 29199 29267 -3 29199 29268 29200 -3 29269 29200 29268 -3 29200 29269 29201 -3 29270 29201 29269 -3 29201 29270 29202 -3 29271 29202 29270 -3 29202 29271 29203 -3 29272 29203 29271 -3 29203 29272 29204 -3 29273 29204 29272 -3 29204 29273 29205 -3 29274 29205 29273 -3 29205 29274 29206 -3 29275 29206 29274 -3 29206 29275 29207 -3 29276 29207 29275 -3 29207 29276 29208 -3 29277 29208 29276 -3 29208 29277 29209 -3 29278 29209 29277 -3 29209 29278 29210 -3 29279 29210 29278 -3 29210 29279 29211 -3 29280 29211 29279 -3 29211 29280 29212 -3 29281 29212 29280 -3 29212 29281 29213 -3 29282 29213 29281 -3 29213 29282 29214 -3 29283 29214 29282 -3 29214 29283 29215 -3 29284 29215 29283 -3 29215 29284 29216 -3 29285 29216 29284 -3 29216 29285 29217 -3 29286 29217 29285 -3 29217 29286 29218 -3 29287 29218 29286 -3 29218 29287 29219 -3 29288 29219 29287 -3 29219 29288 29220 -3 29289 29220 29288 -3 29220 29289 29221 -3 29290 29221 29289 -3 29221 29290 29222 -3 29291 29222 29290 -3 29222 29291 29223 -3 29292 29223 29291 -3 29223 29292 29224 -3 29293 29224 29292 -3 29224 29293 29225 -3 29294 29225 29293 -3 29225 29294 29226 -3 29295 29226 29294 -3 29226 29295 29227 -3 29296 29227 29295 -3 29227 29296 29228 -3 29297 29228 29296 -3 29228 29297 29229 -3 29298 29229 29297 -3 29229 29298 29230 -3 29299 29230 29298 -3 29230 29299 29231 -3 29300 29231 29299 -3 29231 29300 29232 -3 29301 29232 29300 -3 29232 29301 29233 -3 29302 29233 29301 -3 29233 29302 29234 -3 29303 29234 29302 -3 29234 29303 29235 -3 29304 29235 29303 -3 29235 29304 29236 -3 29305 29236 29304 -3 29236 29305 29237 -3 29306 29237 29305 -3 29237 29306 29238 -3 29307 29238 29306 -3 29238 29307 29239 -3 29308 29239 29307 -3 29239 29308 29240 -3 29309 29240 29308 -3 29240 29309 29241 -3 29310 29241 29309 -3 29241 29310 29242 -3 29311 29242 29310 -3 29242 29311 29243 -3 29312 29243 29311 -3 29243 29312 29244 -3 29313 29244 29312 -3 29244 29313 29245 -3 29314 29245 29313 -3 29246 29315 29247 -3 29316 29247 29315 -3 29247 29316 29248 -3 29317 29248 29316 -3 29248 29317 29249 -3 29318 29249 29317 -3 29249 29318 29250 -3 29319 29250 29318 -3 29250 29319 29251 -3 29320 29251 29319 -3 29251 29320 29252 -3 29321 29252 29320 -3 29252 29321 29253 -3 29322 29253 29321 -3 29253 29322 29254 -3 29323 29254 29322 -3 29254 29323 29255 -3 29324 29255 29323 -3 29255 29324 29256 -3 29325 29256 29324 -3 29256 29325 29257 -3 29326 29257 29325 -3 29257 29326 29258 -3 29327 29258 29326 -3 29258 29327 29259 -3 29328 29259 29327 -3 29259 29328 29260 -3 29329 29260 29328 -3 29260 29329 29261 -3 29330 29261 29329 -3 29261 29330 29262 -3 29331 29262 29330 -3 29262 29331 29263 -3 29332 29263 29331 -3 29263 29332 29264 -3 29333 29264 29332 -3 29264 29333 29265 -3 29334 29265 29333 -3 29265 29334 29266 -3 29335 29266 29334 -3 29266 29335 29267 -3 29336 29267 29335 -3 29267 29336 29268 -3 29337 29268 29336 -3 29268 29337 29269 -3 29338 29269 29337 -3 29269 29338 29270 -3 29339 29270 29338 -3 29270 29339 29271 -3 29340 29271 29339 -3 29271 29340 29272 -3 29341 29272 29340 -3 29272 29341 29273 -3 29342 29273 29341 -3 29273 29342 29274 -3 29343 29274 29342 -3 29274 29343 29275 -3 29344 29275 29343 -3 29275 29344 29276 -3 29345 29276 29344 -3 29276 29345 29277 -3 29346 29277 29345 -3 29277 29346 29278 -3 29347 29278 29346 -3 29278 29347 29279 -3 29348 29279 29347 -3 29279 29348 29280 -3 29349 29280 29348 -3 29280 29349 29281 -3 29350 29281 29349 -3 29281 29350 29282 -3 29351 29282 29350 -3 29282 29351 29283 -3 29352 29283 29351 -3 29283 29352 29284 -3 29353 29284 29352 -3 29284 29353 29285 -3 29354 29285 29353 -3 29285 29354 29286 -3 29355 29286 29354 -3 29286 29355 29287 -3 29356 29287 29355 -3 29287 29356 29288 -3 29357 29288 29356 -3 29288 29357 29289 -3 29358 29289 29357 -3 29289 29358 29290 -3 29359 29290 29358 -3 29290 29359 29291 -3 29360 29291 29359 -3 29291 29360 29292 -3 29361 29292 29360 -3 29292 29361 29293 -3 29362 29293 29361 -3 29293 29362 29294 -3 29363 29294 29362 -3 29294 29363 29295 -3 29364 29295 29363 -3 29295 29364 29365 -3 29295 29365 29296 -3 29296 29365 29366 -3 29296 29366 29297 -3 29297 29366 29367 -3 29297 29367 29298 -3 29298 29367 29368 -3 29298 29368 29299 -3 29299 29368 29369 -3 29299 29369 29300 -3 29300 29369 29370 -3 29300 29370 29301 -3 29301 29370 29371 -3 29301 29371 29302 -3 29302 29371 29372 -3 29302 29372 29303 -3 29303 29372 29373 -3 29303 29373 29304 -3 29304 29373 29374 -3 29304 29374 29305 -3 29305 29374 29375 -3 29305 29375 29306 -3 29306 29375 29376 -3 29306 29376 29307 -3 29307 29376 29377 -3 29307 29377 29308 -3 29308 29377 29378 -3 29308 29378 29309 -3 29309 29378 29379 -3 29309 29379 29310 -3 29310 29379 29380 -3 29310 29380 29311 -3 29311 29380 29381 -3 29311 29381 29312 -3 29312 29381 29382 -3 29312 29382 29313 -3 29313 29382 29383 -3 29313 29383 29314 -3 29315 29384 29385 -3 29315 29385 29316 -3 29316 29385 29386 -3 29316 29386 29317 -3 29317 29386 29387 -3 29317 29387 29318 -3 29318 29387 29388 -3 29318 29388 29319 -3 29319 29388 29389 -3 29319 29389 29320 -3 29320 29389 29390 -3 29320 29390 29321 -3 29321 29390 29391 -3 29321 29391 29322 -3 29322 29391 29392 -3 29322 29392 29323 -3 29323 29392 29393 -3 29323 29393 29324 -3 29324 29393 29394 -3 29324 29394 29325 -3 29325 29394 29395 -3 29325 29395 29326 -3 29326 29395 29396 -3 29326 29396 29327 -3 29327 29396 29397 -3 29327 29397 29328 -3 29328 29397 29398 -3 29328 29398 29329 -3 29329 29398 29399 -3 29329 29399 29330 -3 29330 29399 29400 -3 29330 29400 29331 -3 29331 29400 29401 -3 29331 29401 29332 -3 29332 29401 29402 -3 29332 29402 29333 -3 29333 29402 29403 -3 29333 29403 29334 -3 29334 29403 29404 -3 29334 29404 29335 -3 29335 29404 29405 -3 29335 29405 29336 -3 29336 29405 29406 -3 29336 29406 29337 -3 29337 29406 29407 -3 29337 29407 29338 -3 29338 29407 29408 -3 29338 29408 29339 -3 29339 29408 29409 -3 29339 29409 29340 -3 29340 29409 29410 -3 29340 29410 29341 -3 29341 29410 29411 -3 29341 29411 29342 -3 29342 29411 29412 -3 29342 29412 29343 -3 29343 29412 29413 -3 29343 29413 29344 -3 29344 29413 29414 -3 29344 29414 29345 -3 29345 29414 29415 -3 29345 29415 29346 -3 29346 29415 29416 -3 29346 29416 29347 -3 29347 29416 29417 -3 29347 29417 29348 -3 29348 29417 29418 -3 29348 29418 29349 -3 29349 29418 29419 -3 29349 29419 29350 -3 29350 29419 29420 -3 29350 29420 29351 -3 29351 29420 29421 -3 29351 29421 29352 -3 29352 29421 29422 -3 29352 29422 29353 -3 29353 29422 29423 -3 29353 29423 29354 -3 29354 29423 29424 -3 29354 29424 29355 -3 29355 29424 29425 -3 29355 29425 29356 -3 29356 29425 29426 -3 29356 29426 29357 -3 29357 29426 29427 -3 29357 29427 29358 -3 29358 29427 29428 -3 29358 29428 29359 -3 29359 29428 29429 -3 29359 29429 29360 -3 29360 29429 29430 -3 29360 29430 29361 -3 29361 29430 29431 -3 29361 29431 29362 -3 29362 29431 29432 -3 29362 29432 29363 -3 29363 29432 29433 -3 29363 29433 29364 -3 29364 29433 29434 -3 29364 29434 29365 -3 29365 29434 29435 -3 29365 29435 29366 -3 29366 29435 29436 -3 29366 29436 29367 -3 29367 29436 29437 -3 29367 29437 29368 -3 29368 29437 29438 -3 29368 29438 29369 -3 29369 29438 29439 -3 29369 29439 29370 -3 29370 29439 29440 -3 29370 29440 29371 -3 29371 29440 29441 -3 29371 29441 29372 -3 29372 29441 29442 -3 29372 29442 29373 -3 29373 29442 29443 -3 29373 29443 29374 -3 29374 29443 29444 -3 29374 29444 29375 -3 29375 29444 29445 -3 29375 29445 29376 -3 29376 29445 29446 -3 29376 29446 29377 -3 29377 29446 29447 -3 29377 29447 29378 -3 29378 29447 29448 -3 29378 29448 29379 -3 29379 29448 29449 -3 29379 29449 29380 -3 29380 29449 29450 -3 29380 29450 29381 -3 29381 29450 29451 -3 29381 29451 29382 -3 29382 29451 29452 -3 29382 29452 29383 -3 29384 29453 29454 -3 29384 29454 29385 -3 29385 29454 29455 -3 29385 29455 29386 -3 29386 29455 29456 -3 29386 29456 29387 -3 29387 29456 29457 -3 29387 29457 29388 -3 29388 29457 29458 -3 29388 29458 29389 -3 29389 29458 29459 -3 29389 29459 29390 -3 29390 29459 29460 -3 29390 29460 29391 -3 29391 29460 29461 -3 29391 29461 29392 -3 29392 29461 29462 -3 29392 29462 29393 -3 29393 29462 29463 -3 29393 29463 29394 -3 29394 29463 29464 -3 29394 29464 29395 -3 29395 29464 29465 -3 29395 29465 29396 -3 29396 29465 29466 -3 29396 29466 29397 -3 29397 29466 29467 -3 29397 29467 29398 -3 29398 29467 29468 -3 29398 29468 29399 -3 29399 29468 29469 -3 29399 29469 29400 -3 29400 29469 29401 -3 29470 29401 29469 -3 29401 29470 29402 -3 29471 29402 29470 -3 29402 29471 29403 -3 29472 29403 29471 -3 29403 29472 29404 -3 29473 29404 29472 -3 29404 29473 29405 -3 29474 29405 29473 -3 29405 29474 29406 -3 29475 29406 29474 -3 29406 29475 29407 -3 29476 29407 29475 -3 29407 29476 29408 -3 29477 29408 29476 -3 29408 29477 29409 -3 29478 29409 29477 -3 29409 29478 29410 -3 29479 29410 29478 -3 29410 29479 29411 -3 29480 29411 29479 -3 29411 29480 29412 -3 29481 29412 29480 -3 29412 29481 29413 -3 29482 29413 29481 -3 29413 29482 29414 -3 29483 29414 29482 -3 29414 29483 29415 -3 29484 29415 29483 -3 29415 29484 29416 -3 29485 29416 29484 -3 29416 29485 29417 -3 29486 29417 29485 -3 29417 29486 29418 -3 29487 29418 29486 -3 29418 29487 29419 -3 29488 29419 29487 -3 29419 29488 29420 -3 29489 29420 29488 -3 29420 29489 29421 -3 29490 29421 29489 -3 29421 29490 29422 -3 29491 29422 29490 -3 29422 29491 29423 -3 29492 29423 29491 -3 29423 29492 29424 -3 29493 29424 29492 -3 29424 29493 29425 -3 29494 29425 29493 -3 29425 29494 29426 -3 29495 29426 29494 -3 29426 29495 29427 -3 29496 29427 29495 -3 29427 29496 29428 -3 29497 29428 29496 -3 29428 29497 29429 -3 29498 29429 29497 -3 29429 29498 29430 -3 29499 29430 29498 -3 29430 29499 29431 -3 29500 29431 29499 -3 29431 29500 29432 -3 29501 29432 29500 -3 29432 29501 29433 -3 29502 29433 29501 -3 29433 29502 29434 -3 29503 29434 29502 -3 29434 29503 29435 -3 29504 29435 29503 -3 29435 29504 29436 -3 29505 29436 29504 -3 29436 29505 29437 -3 29506 29437 29505 -3 29437 29506 29438 -3 29507 29438 29506 -3 29438 29507 29439 -3 29508 29439 29507 -3 29439 29508 29440 -3 29509 29440 29508 -3 29440 29509 29441 -3 29510 29441 29509 -3 29441 29510 29442 -3 29511 29442 29510 -3 29442 29511 29443 -3 29512 29443 29511 -3 29443 29512 29444 -3 29513 29444 29512 -3 29444 29513 29445 -3 29514 29445 29513 -3 29445 29514 29446 -3 29515 29446 29514 -3 29446 29515 29447 -3 29516 29447 29515 -3 29447 29516 29448 -3 29517 29448 29516 -3 29448 29517 29449 -3 29518 29449 29517 -3 29449 29518 29450 -3 29519 29450 29518 -3 29450 29519 29451 -3 29520 29451 29519 -3 29451 29520 29452 -3 29521 29452 29520 -3 29453 29522 29454 -3 29523 29454 29522 -3 29454 29523 29455 -3 29524 29455 29523 -3 29455 29524 29456 -3 29525 29456 29524 -3 29456 29525 29457 -3 29526 29457 29525 -3 29457 29526 29458 -3 29527 29458 29526 -3 29458 29527 29459 -3 29528 29459 29527 -3 29459 29528 29460 -3 29529 29460 29528 -3 29460 29529 29461 -3 29530 29461 29529 -3 29461 29530 29462 -3 29531 29462 29530 -3 29462 29531 29463 -3 29532 29463 29531 -3 29463 29532 29464 -3 29533 29464 29532 -3 29464 29533 29465 -3 29534 29465 29533 -3 29465 29534 29466 -3 29535 29466 29534 -3 29466 29535 29467 -3 29536 29467 29535 -3 29467 29536 29468 -3 29537 29468 29536 -3 29468 29537 29469 -3 29538 29469 29537 -3 29469 29538 29470 -3 29539 29470 29538 -3 29470 29539 29471 -3 29540 29471 29539 -3 29471 29540 29472 -3 29541 29472 29540 -3 29472 29541 29473 -3 29542 29473 29541 -3 29473 29542 29474 -3 29543 29474 29542 -3 29474 29543 29475 -3 29544 29475 29543 -3 29475 29544 29476 -3 29545 29476 29544 -3 29476 29545 29477 -3 29546 29477 29545 -3 29477 29546 29478 -3 29547 29478 29546 -3 29478 29547 29479 -3 29548 29479 29547 -3 29479 29548 29480 -3 29549 29480 29548 -3 29480 29549 29481 -3 29550 29481 29549 -3 29481 29550 29482 -3 29551 29482 29550 -3 29482 29551 29483 -3 29552 29483 29551 -3 29483 29552 29484 -3 29553 29484 29552 -3 29484 29553 29485 -3 29554 29485 29553 -3 29485 29554 29486 -3 29555 29486 29554 -3 29486 29555 29487 -3 29556 29487 29555 -3 29487 29556 29488 -3 29557 29488 29556 -3 29488 29557 29489 -3 29558 29489 29557 -3 29489 29558 29490 -3 29559 29490 29558 -3 29490 29559 29491 -3 29560 29491 29559 -3 29491 29560 29492 -3 29561 29492 29560 -3 29492 29561 29493 -3 29562 29493 29561 -3 29493 29562 29494 -3 29563 29494 29562 -3 29494 29563 29495 -3 29564 29495 29563 -3 29495 29564 29496 -3 29565 29496 29564 -3 29496 29565 29497 -3 29566 29497 29565 -3 29497 29566 29498 -3 29567 29498 29566 -3 29498 29567 29499 -3 29568 29499 29567 -3 29499 29568 29500 -3 29569 29500 29568 -3 29500 29569 29501 -3 29570 29501 29569 -3 29501 29570 29502 -3 29571 29502 29570 -3 29502 29571 29503 -3 29572 29503 29571 -3 29503 29572 29504 -3 29573 29504 29572 -3 29504 29573 29505 -3 29574 29505 29573 -3 29505 29574 29506 -3 29575 29506 29574 -3 29506 29575 29576 -3 29506 29576 29507 -3 29507 29576 29577 -3 29507 29577 29508 -3 29508 29577 29578 -3 29508 29578 29509 -3 29509 29578 29579 -3 29509 29579 29510 -3 29510 29579 29580 -3 29510 29580 29511 -3 29511 29580 29581 -3 29511 29581 29512 -3 29512 29581 29582 -3 29512 29582 29513 -3 29513 29582 29583 -3 29513 29583 29514 -3 29514 29583 29584 -3 29514 29584 29515 -3 29515 29584 29585 -3 29515 29585 29516 -3 29516 29585 29586 -3 29516 29586 29517 -3 29517 29586 29587 -3 29517 29587 29518 -3 29518 29587 29588 -3 29518 29588 29519 -3 29519 29588 29589 -3 29519 29589 29520 -3 29520 29589 29590 -3 29520 29590 29521 -3 29522 29591 29592 -3 29522 29592 29523 -3 29523 29592 29593 -3 29523 29593 29524 -3 29524 29593 29594 -3 29524 29594 29525 -3 29525 29594 29595 -3 29525 29595 29526 -3 29526 29595 29596 -3 29526 29596 29527 -3 29527 29596 29597 -3 29527 29597 29528 -3 29528 29597 29598 -3 29528 29598 29529 -3 29529 29598 29599 -3 29529 29599 29530 -3 29530 29599 29600 -3 29530 29600 29531 -3 29531 29600 29601 -3 29531 29601 29532 -3 29532 29601 29602 -3 29532 29602 29533 -3 29533 29602 29603 -3 29533 29603 29534 -3 29534 29603 29604 -3 29534 29604 29535 -3 29535 29604 29605 -3 29535 29605 29536 -3 29536 29605 29606 -3 29536 29606 29537 -3 29537 29606 29607 -3 29537 29607 29538 -3 29538 29607 29608 -3 29538 29608 29539 -3 29539 29608 29609 -3 29539 29609 29540 -3 29540 29609 29610 -3 29540 29610 29541 -3 29541 29610 29611 -3 29541 29611 29542 -3 29542 29611 29612 -3 29542 29612 29543 -3 29543 29612 29613 -3 29543 29613 29544 -3 29544 29613 29614 -3 29544 29614 29545 -3 29545 29614 29615 -3 29545 29615 29546 -3 29546 29615 29616 -3 29546 29616 29547 -3 29547 29616 29617 -3 29547 29617 29548 -3 29548 29617 29618 -3 29548 29618 29549 -3 29549 29618 29619 -3 29549 29619 29550 -3 29550 29619 29620 -3 29550 29620 29551 -3 29551 29620 29621 -3 29551 29621 29552 -3 29552 29621 29622 -3 29552 29622 29553 -3 29553 29622 29623 -3 29553 29623 29554 -3 29554 29623 29624 -3 29554 29624 29555 -3 29555 29624 29625 -3 29555 29625 29556 -3 29556 29625 29626 -3 29556 29626 29557 -3 29557 29626 29627 -3 29557 29627 29558 -3 29558 29627 29628 -3 29558 29628 29559 -3 29559 29628 29629 -3 29559 29629 29560 -3 29560 29629 29630 -3 29560 29630 29561 -3 29561 29630 29631 -3 29561 29631 29562 -3 29562 29631 29632 -3 29562 29632 29563 -3 29563 29632 29633 -3 29563 29633 29564 -3 29564 29633 29634 -3 29564 29634 29565 -3 29565 29634 29635 -3 29565 29635 29566 -3 29566 29635 29636 -3 29566 29636 29567 -3 29567 29636 29637 -3 29567 29637 29568 -3 29568 29637 29638 -3 29568 29638 29569 -3 29569 29638 29639 -3 29569 29639 29570 -3 29570 29639 29640 -3 29570 29640 29571 -3 29571 29640 29641 -3 29571 29641 29572 -3 29572 29641 29642 -3 29572 29642 29573 -3 29573 29642 29643 -3 29573 29643 29574 -3 29574 29643 29644 -3 29574 29644 29575 -3 29575 29644 29645 -3 29575 29645 29576 -3 29576 29645 29646 -3 29576 29646 29577 -3 29577 29646 29647 -3 29577 29647 29578 -3 29578 29647 29648 -3 29578 29648 29579 -3 29579 29648 29649 -3 29579 29649 29580 -3 29580 29649 29650 -3 29580 29650 29581 -3 29581 29650 29651 -3 29581 29651 29582 -3 29582 29651 29652 -3 29582 29652 29583 -3 29583 29652 29653 -3 29583 29653 29584 -3 29584 29653 29654 -3 29584 29654 29585 -3 29585 29654 29655 -3 29585 29655 29586 -3 29586 29655 29656 -3 29586 29656 29587 -3 29587 29656 29657 -3 29587 29657 29588 -3 29588 29657 29658 -3 29588 29658 29589 -3 29589 29658 29659 -3 29589 29659 29590 -3 29591 29660 29661 -3 29591 29661 29592 -3 29592 29661 29662 -3 29592 29662 29593 -3 29593 29662 29663 -3 29593 29663 29594 -3 29594 29663 29664 -3 29594 29664 29595 -3 29595 29664 29665 -3 29595 29665 29596 -3 29596 29665 29666 -3 29596 29666 29597 -3 29597 29666 29667 -3 29597 29667 29598 -3 29598 29667 29668 -3 29598 29668 29599 -3 29599 29668 29669 -3 29599 29669 29600 -3 29600 29669 29670 -3 29600 29670 29601 -3 29601 29670 29671 -3 29601 29671 29602 -3 29602 29671 29672 -3 29602 29672 29603 -3 29603 29672 29673 -3 29603 29673 29604 -3 29604 29673 29674 -3 29604 29674 29605 -3 29605 29674 29675 -3 29605 29675 29606 -3 29606 29675 29676 -3 29606 29676 29607 -3 29607 29676 29677 -3 29607 29677 29608 -3 29608 29677 29678 -3 29608 29678 29609 -3 29609 29678 29679 -3 29609 29679 29610 -3 29610 29679 29680 -3 29610 29680 29611 -3 29611 29680 29681 -3 29611 29681 29612 -3 29612 29681 29613 -3 29682 29613 29681 -3 29613 29682 29614 -3 29683 29614 29682 -3 29614 29683 29615 -3 29684 29615 29683 -3 29615 29684 29616 -3 29685 29616 29684 -3 29616 29685 29617 -3 29686 29617 29685 -3 29617 29686 29618 -3 29687 29618 29686 -3 29618 29687 29619 -3 29688 29619 29687 -3 29619 29688 29620 -3 29689 29620 29688 -3 29620 29689 29621 -3 29690 29621 29689 -3 29621 29690 29622 -3 29691 29622 29690 -3 29622 29691 29623 -3 29692 29623 29691 -3 29623 29692 29624 -3 29693 29624 29692 -3 29624 29693 29625 -3 29694 29625 29693 -3 29625 29694 29626 -3 29695 29626 29694 -3 29626 29695 29627 -3 29696 29627 29695 -3 29627 29696 29628 -3 29697 29628 29696 -3 29628 29697 29629 -3 29698 29629 29697 -3 29629 29698 29630 -3 29699 29630 29698 -3 29630 29699 29631 -3 29700 29631 29699 -3 29631 29700 29632 -3 29701 29632 29700 -3 29632 29701 29633 -3 29702 29633 29701 -3 29633 29702 29634 -3 29703 29634 29702 -3 29634 29703 29635 -3 29704 29635 29703 -3 29635 29704 29636 -3 29705 29636 29704 -3 29636 29705 29637 -3 29706 29637 29705 -3 29637 29706 29638 -3 29707 29638 29706 -3 29638 29707 29639 -3 29708 29639 29707 -3 29639 29708 29640 -3 29709 29640 29708 -3 29640 29709 29641 -3 29710 29641 29709 -3 29641 29710 29642 -3 29711 29642 29710 -3 29642 29711 29643 -3 29712 29643 29711 -3 29643 29712 29644 -3 29713 29644 29712 -3 29644 29713 29645 -3 29714 29645 29713 -3 29645 29714 29646 -3 29715 29646 29714 -3 29646 29715 29647 -3 29716 29647 29715 -3 29647 29716 29648 -3 29717 29648 29716 -3 29648 29717 29649 -3 29718 29649 29717 -3 29649 29718 29650 -3 29719 29650 29718 -3 29650 29719 29651 -3 29720 29651 29719 -3 29651 29720 29652 -3 29721 29652 29720 -3 29652 29721 29653 -3 29722 29653 29721 -3 29653 29722 29654 -3 29723 29654 29722 -3 29654 29723 29655 -3 29724 29655 29723 -3 29655 29724 29656 -3 29725 29656 29724 -3 29656 29725 29657 -3 29726 29657 29725 -3 29657 29726 29658 -3 29727 29658 29726 -3 29658 29727 29659 -3 29728 29659 29727 -3 29660 29729 29661 -3 29730 29661 29729 -3 29661 29730 29662 -3 29731 29662 29730 -3 29662 29731 29663 -3 29732 29663 29731 -3 29663 29732 29664 -3 29733 29664 29732 -3 29664 29733 29665 -3 29734 29665 29733 -3 29665 29734 29666 -3 29735 29666 29734 -3 29666 29735 29667 -3 29736 29667 29735 -3 29667 29736 29668 -3 29737 29668 29736 -3 29668 29737 29669 -3 29738 29669 29737 -3 29669 29738 29670 -3 29739 29670 29738 -3 29670 29739 29671 -3 29740 29671 29739 -3 29671 29740 29672 -3 29741 29672 29740 -3 29672 29741 29673 -3 29742 29673 29741 -3 29673 29742 29674 -3 29743 29674 29742 -3 29674 29743 29675 -3 29744 29675 29743 -3 29675 29744 29676 -3 29745 29676 29744 -3 29676 29745 29677 -3 29746 29677 29745 -3 29677 29746 29678 -3 29747 29678 29746 -3 29678 29747 29679 -3 29748 29679 29747 -3 29679 29748 29680 -3 29749 29680 29748 -3 29680 29749 29681 -3 29750 29681 29749 -3 29681 29750 29682 -3 29751 29682 29750 -3 29682 29751 29683 -3 29752 29683 29751 -3 29683 29752 29684 -3 29753 29684 29752 -3 29684 29753 29685 -3 29754 29685 29753 -3 29685 29754 29686 -3 29755 29686 29754 -3 29686 29755 29687 -3 29756 29687 29755 -3 29687 29756 29688 -3 29757 29688 29756 -3 29688 29757 29689 -3 29758 29689 29757 -3 29689 29758 29690 -3 29759 29690 29758 -3 29690 29759 29691 -3 29760 29691 29759 -3 29691 29760 29692 -3 29761 29692 29760 -3 29692 29761 29693 -3 29762 29693 29761 -3 29693 29762 29694 -3 29763 29694 29762 -3 29694 29763 29695 -3 29764 29695 29763 -3 29695 29764 29696 -3 29765 29696 29764 -3 29696 29765 29697 -3 29766 29697 29765 -3 29697 29766 29698 -3 29767 29698 29766 -3 29698 29767 29699 -3 29768 29699 29767 -3 29699 29768 29700 -3 29769 29700 29768 -3 29700 29769 29701 -3 29770 29701 29769 -3 29701 29770 29702 -3 29771 29702 29770 -3 29702 29771 29703 -3 29772 29703 29771 -3 29703 29772 29704 -3 29773 29704 29772 -3 29704 29773 29705 -3 29774 29705 29773 -3 29705 29774 29706 -3 29775 29706 29774 -3 29706 29775 29707 -3 29776 29707 29775 -3 29707 29776 29708 -3 29777 29708 29776 -3 29708 29777 29709 -3 29778 29709 29777 -3 29709 29778 29710 -3 29779 29710 29778 -3 29710 29779 29711 -3 29780 29711 29779 -3 29711 29780 29712 -3 29781 29712 29780 -3 29712 29781 29713 -3 29782 29713 29781 -3 29713 29782 29714 -3 29783 29714 29782 -3 29714 29783 29715 -3 29784 29715 29783 -3 29715 29784 29716 -3 29785 29716 29784 -3 29716 29785 29717 -3 29786 29717 29785 -3 29717 29786 29718 -3 29787 29718 29786 -3 29718 29787 29788 -3 29718 29788 29719 -3 29719 29788 29789 -3 29719 29789 29720 -3 29720 29789 29790 -3 29720 29790 29721 -3 29721 29790 29791 -3 29721 29791 29722 -3 29722 29791 29792 -3 29722 29792 29723 -3 29723 29792 29793 -3 29723 29793 29724 -3 29724 29793 29794 -3 29724 29794 29725 -3 29725 29794 29795 -3 29725 29795 29726 -3 29726 29795 29796 -3 29726 29796 29727 -3 29727 29796 29797 -3 29727 29797 29728 -3 29729 29798 29799 -3 29729 29799 29730 -3 29730 29799 29800 -3 29730 29800 29731 -3 29731 29800 29801 -3 29731 29801 29732 -3 29732 29801 29802 -3 29732 29802 29733 -3 29733 29802 29803 -3 29733 29803 29734 -3 29734 29803 29804 -3 29734 29804 29735 -3 29735 29804 29805 -3 29735 29805 29736 -3 29736 29805 29806 -3 29736 29806 29737 -3 29737 29806 29807 -3 29737 29807 29738 -3 29738 29807 29808 -3 29738 29808 29739 -3 29739 29808 29809 -3 29739 29809 29740 -3 29740 29809 29810 -3 29740 29810 29741 -3 29741 29810 29811 -3 29741 29811 29742 -3 29742 29811 29812 -3 29742 29812 29743 -3 29743 29812 29813 -3 29743 29813 29744 -3 29744 29813 29814 -3 29744 29814 29745 -3 29745 29814 29815 -3 29745 29815 29746 -3 29746 29815 29816 -3 29746 29816 29747 -3 29747 29816 29817 -3 29747 29817 29748 -3 29748 29817 29818 -3 29748 29818 29749 -3 29749 29818 29819 -3 29749 29819 29750 -3 29750 29819 29820 -3 29750 29820 29751 -3 29751 29820 29821 -3 29751 29821 29752 -3 29752 29821 29822 -3 29752 29822 29753 -3 29753 29822 29823 -3 29753 29823 29754 -3 29754 29823 29824 -3 29754 29824 29755 -3 29755 29824 29825 -3 29755 29825 29756 -3 29756 29825 29826 -3 29756 29826 29757 -3 29757 29826 29827 -3 29757 29827 29758 -3 29758 29827 29828 -3 29758 29828 29759 -3 29759 29828 29829 -3 29759 29829 29760 -3 29760 29829 29830 -3 29760 29830 29761 -3 29761 29830 29831 -3 29761 29831 29762 -3 29762 29831 29832 -3 29762 29832 29763 -3 29763 29832 29833 -3 29763 29833 29764 -3 29764 29833 29834 -3 29764 29834 29765 -3 29765 29834 29835 -3 29765 29835 29766 -3 29766 29835 29836 -3 29766 29836 29767 -3 29767 29836 29837 -3 29767 29837 29768 -3 29768 29837 29838 -3 29768 29838 29769 -3 29769 29838 29839 -3 29769 29839 29770 -3 29770 29839 29840 -3 29770 29840 29771 -3 29771 29840 29841 -3 29771 29841 29772 -3 29772 29841 29842 -3 29772 29842 29773 -3 29773 29842 29843 -3 29773 29843 29774 -3 29774 29843 29844 -3 29774 29844 29775 -3 29775 29844 29845 -3 29775 29845 29776 -3 29776 29845 29846 -3 29776 29846 29777 -3 29777 29846 29847 -3 29777 29847 29778 -3 29778 29847 29848 -3 29778 29848 29779 -3 29779 29848 29849 -3 29779 29849 29780 -3 29780 29849 29850 -3 29780 29850 29781 -3 29781 29850 29851 -3 29781 29851 29782 -3 29782 29851 29852 -3 29782 29852 29783 -3 29783 29852 29853 -3 29783 29853 29784 -3 29784 29853 29854 -3 29784 29854 29785 -3 29785 29854 29855 -3 29785 29855 29786 -3 29786 29855 29856 -3 29786 29856 29787 -3 29787 29856 29857 -3 29787 29857 29788 -3 29788 29857 29858 -3 29788 29858 29789 -3 29789 29858 29859 -3 29789 29859 29790 -3 29790 29859 29860 -3 29790 29860 29791 -3 29791 29860 29861 -3 29791 29861 29792 -3 29792 29861 29862 -3 29792 29862 29793 -3 29793 29862 29863 -3 29793 29863 29794 -3 29794 29863 29864 -3 29794 29864 29795 -3 29795 29864 29865 -3 29795 29865 29796 -3 29796 29865 29866 -3 29796 29866 29797 -3 29798 29867 29868 -3 29798 29868 29799 -3 29799 29868 29869 -3 29799 29869 29800 -3 29800 29869 29870 -3 29800 29870 29801 -3 29801 29870 29871 -3 29801 29871 29802 -3 29802 29871 29872 -3 29802 29872 29803 -3 29803 29872 29873 -3 29803 29873 29804 -3 29804 29873 29874 -3 29804 29874 29805 -3 29805 29874 29875 -3 29805 29875 29806 -3 29806 29875 29876 -3 29806 29876 29807 -3 29807 29876 29877 -3 29807 29877 29808 -3 29808 29877 29878 -3 29808 29878 29809 -3 29809 29878 29879 -3 29809 29879 29810 -3 29810 29879 29880 -3 29810 29880 29811 -3 29811 29880 29881 -3 29811 29881 29812 -3 29812 29881 29882 -3 29812 29882 29813 -3 29813 29882 29883 -3 29813 29883 29814 -3 29814 29883 29884 -3 29814 29884 29815 -3 29815 29884 29885 -3 29815 29885 29816 -3 29816 29885 29886 -3 29816 29886 29817 -3 29817 29886 29887 -3 29817 29887 29818 -3 29818 29887 29888 -3 29818 29888 29819 -3 29819 29888 29889 -3 29819 29889 29820 -3 29820 29889 29890 -3 29820 29890 29821 -3 29821 29890 29891 -3 29821 29891 29822 -3 29822 29891 29892 -3 29822 29892 29823 -3 29823 29892 29893 -3 29823 29893 29824 -3 29824 29893 29825 -3 29894 29825 29893 -3 29825 29894 29826 -3 29895 29826 29894 -3 29826 29895 29827 -3 29896 29827 29895 -3 29827 29896 29828 -3 29897 29828 29896 -3 29828 29897 29829 -3 29898 29829 29897 -3 29829 29898 29830 -3 29899 29830 29898 -3 29830 29899 29831 -3 29900 29831 29899 -3 29831 29900 29832 -3 29901 29832 29900 -3 29832 29901 29833 -3 29902 29833 29901 -3 29833 29902 29834 -3 29903 29834 29902 -3 29834 29903 29835 -3 29904 29835 29903 -3 29835 29904 29836 -3 29905 29836 29904 -3 29836 29905 29837 -3 29906 29837 29905 -3 29837 29906 29838 -3 29907 29838 29906 -3 29838 29907 29839 -3 29908 29839 29907 -3 29839 29908 29840 -3 29909 29840 29908 -3 29840 29909 29841 -3 29910 29841 29909 -3 29841 29910 29842 -3 29911 29842 29910 -3 29842 29911 29843 -3 29912 29843 29911 -3 29843 29912 29844 -3 29913 29844 29912 -3 29844 29913 29845 -3 29914 29845 29913 -3 29845 29914 29846 -3 29915 29846 29914 -3 29846 29915 29847 -3 29916 29847 29915 -3 29847 29916 29848 -3 29917 29848 29916 -3 29848 29917 29849 -3 29918 29849 29917 -3 29849 29918 29850 -3 29919 29850 29918 -3 29850 29919 29851 -3 29920 29851 29919 -3 29851 29920 29852 -3 29921 29852 29920 -3 29852 29921 29853 -3 29922 29853 29921 -3 29853 29922 29854 -3 29923 29854 29922 -3 29854 29923 29855 -3 29924 29855 29923 -3 29855 29924 29856 -3 29925 29856 29924 -3 29856 29925 29857 -3 29926 29857 29925 -3 29857 29926 29858 -3 29927 29858 29926 -3 29858 29927 29859 -3 29928 29859 29927 -3 29859 29928 29860 -3 29929 29860 29928 -3 29860 29929 29861 -3 29930 29861 29929 -3 29861 29930 29862 -3 29931 29862 29930 -3 29862 29931 29863 -3 29932 29863 29931 -3 29863 29932 29864 -3 29933 29864 29932 -3 29864 29933 29865 -3 29934 29865 29933 -3 29865 29934 29866 -3 29935 29866 29934 -3 29867 29936 29868 -3 29937 29868 29936 -3 29868 29937 29869 -3 29938 29869 29937 -3 29869 29938 29870 -3 29939 29870 29938 -3 29870 29939 29871 -3 29940 29871 29939 -3 29871 29940 29872 -3 29941 29872 29940 -3 29872 29941 29873 -3 29942 29873 29941 -3 29873 29942 29874 -3 29943 29874 29942 -3 29874 29943 29875 -3 29944 29875 29943 -3 29875 29944 29876 -3 29945 29876 29944 -3 29876 29945 29877 -3 29946 29877 29945 -3 29877 29946 29878 -3 29947 29878 29946 -3 29878 29947 29879 -3 29948 29879 29947 -3 29879 29948 29880 -3 29949 29880 29948 -3 29880 29949 29881 -3 29950 29881 29949 -3 29881 29950 29882 -3 29951 29882 29950 -3 29882 29951 29883 -3 29952 29883 29951 -3 29883 29952 29884 -3 29953 29884 29952 -3 29884 29953 29885 -3 29954 29885 29953 -3 29885 29954 29886 -3 29955 29886 29954 -3 29886 29955 29887 -3 29956 29887 29955 -3 29887 29956 29888 -3 29957 29888 29956 -3 29888 29957 29889 -3 29958 29889 29957 -3 29889 29958 29890 -3 29959 29890 29958 -3 29890 29959 29891 -3 29960 29891 29959 -3 29891 29960 29892 -3 29961 29892 29960 -3 29892 29961 29893 -3 29962 29893 29961 -3 29893 29962 29894 -3 29963 29894 29962 -3 29894 29963 29895 -3 29964 29895 29963 -3 29895 29964 29896 -3 29965 29896 29964 -3 29896 29965 29897 -3 29966 29897 29965 -3 29897 29966 29898 -3 29967 29898 29966 -3 29898 29967 29899 -3 29968 29899 29967 -3 29899 29968 29900 -3 29969 29900 29968 -3 29900 29969 29901 -3 29970 29901 29969 -3 29901 29970 29902 -3 29971 29902 29970 -3 29902 29971 29903 -3 29972 29903 29971 -3 29903 29972 29904 -3 29973 29904 29972 -3 29904 29973 29905 -3 29974 29905 29973 -3 29905 29974 29906 -3 29975 29906 29974 -3 29906 29975 29907 -3 29976 29907 29975 -3 29907 29976 29908 -3 29977 29908 29976 -3 29908 29977 29909 -3 29978 29909 29977 -3 29909 29978 29910 -3 29979 29910 29978 -3 29910 29979 29911 -3 29980 29911 29979 -3 29911 29980 29912 -3 29981 29912 29980 -3 29912 29981 29913 -3 29982 29913 29981 -3 29913 29982 29914 -3 29983 29914 29982 -3 29914 29983 29915 -3 29984 29915 29983 -3 29915 29984 29916 -3 29985 29916 29984 -3 29916 29985 29917 -3 29986 29917 29985 -3 29917 29986 29918 -3 29987 29918 29986 -3 29918 29987 29919 -3 29988 29919 29987 -3 29919 29988 29920 -3 29989 29920 29988 -3 29920 29989 29921 -3 29990 29921 29989 -3 29921 29990 29922 -3 29991 29922 29990 -3 29922 29991 29923 -3 29992 29923 29991 -3 29923 29992 29924 -3 29993 29924 29992 -3 29924 29993 29925 -3 29994 29925 29993 -3 29925 29994 29926 -3 29995 29926 29994 -3 29926 29995 29927 -3 29996 29927 29995 -3 29927 29996 29928 -3 29997 29928 29996 -3 29928 29997 29929 -3 29998 29929 29997 -3 29929 29998 29930 -3 29999 29930 29998 -3 29930 29999 29931 -3 30000 29931 29999 -3 29931 30000 30001 -3 29931 30001 29932 -3 29932 30001 30002 -3 29932 30002 29933 -3 29933 30002 30003 -3 29933 30003 29934 -3 29934 30003 30004 -3 29934 30004 29935 -3 29936 30005 30006 -3 29936 30006 29937 -3 29937 30006 30007 -3 29937 30007 29938 -3 29938 30007 30008 -3 29938 30008 29939 -3 29939 30008 30009 -3 29939 30009 29940 -3 29940 30009 30010 -3 29940 30010 29941 -3 29941 30010 30011 -3 29941 30011 29942 -3 29942 30011 30012 -3 29942 30012 29943 -3 29943 30012 30013 -3 29943 30013 29944 -3 29944 30013 30014 -3 29944 30014 29945 -3 29945 30014 30015 -3 29945 30015 29946 -3 29946 30015 30016 -3 29946 30016 29947 -3 29947 30016 30017 -3 29947 30017 29948 -3 29948 30017 30018 -3 29948 30018 29949 -3 29949 30018 30019 -3 29949 30019 29950 -3 29950 30019 30020 -3 29950 30020 29951 -3 29951 30020 30021 -3 29951 30021 29952 -3 29952 30021 30022 -3 29952 30022 29953 -3 29953 30022 30023 -3 29953 30023 29954 -3 29954 30023 30024 -3 29954 30024 29955 -3 29955 30024 30025 -3 29955 30025 29956 -3 29956 30025 30026 -3 29956 30026 29957 -3 29957 30026 30027 -3 29957 30027 29958 -3 29958 30027 30028 -3 29958 30028 29959 -3 29959 30028 30029 -3 29959 30029 29960 -3 29960 30029 30030 -3 29960 30030 29961 -3 29961 30030 30031 -3 29961 30031 29962 -3 29962 30031 30032 -3 29962 30032 29963 -3 29963 30032 30033 -3 29963 30033 29964 -3 29964 30033 30034 -3 29964 30034 29965 -3 29965 30034 30035 -3 29965 30035 29966 -3 29966 30035 30036 -3 29966 30036 29967 -3 29967 30036 30037 -3 29967 30037 29968 -3 29968 30037 30038 -3 29968 30038 29969 -3 29969 30038 30039 -3 29969 30039 29970 -3 29970 30039 30040 -3 29970 30040 29971 -3 29971 30040 30041 -3 29971 30041 29972 -3 29972 30041 30042 -3 29972 30042 29973 -3 29973 30042 30043 -3 29973 30043 29974 -3 29974 30043 30044 -3 29974 30044 29975 -3 29975 30044 30045 -3 29975 30045 29976 -3 29976 30045 30046 -3 29976 30046 29977 -3 29977 30046 30047 -3 29977 30047 29978 -3 29978 30047 30048 -3 29978 30048 29979 -3 29979 30048 30049 -3 29979 30049 29980 -3 29980 30049 30050 -3 29980 30050 29981 -3 29981 30050 30051 -3 29981 30051 29982 -3 29982 30051 30052 -3 29982 30052 29983 -3 29983 30052 30053 -3 29983 30053 29984 -3 29984 30053 30054 -3 29984 30054 29985 -3 29985 30054 30055 -3 29985 30055 29986 -3 29986 30055 30056 -3 29986 30056 29987 -3 29987 30056 30057 -3 29987 30057 29988 -3 29988 30057 30058 -3 29988 30058 29989 -3 29989 30058 30059 -3 29989 30059 29990 -3 29990 30059 30060 -3 29990 30060 29991 -3 29991 30060 30061 -3 29991 30061 29992 -3 29992 30061 30062 -3 29992 30062 29993 -3 29993 30062 30063 -3 29993 30063 29994 -3 29994 30063 30064 -3 29994 30064 29995 -3 29995 30064 30065 -3 29995 30065 29996 -3 29996 30065 30066 -3 29996 30066 29997 -3 29997 30066 30067 -3 29997 30067 29998 -3 29998 30067 30068 -3 29998 30068 29999 -3 29999 30068 30069 -3 29999 30069 30000 -3 30000 30069 30070 -3 30000 30070 30001 -3 30001 30070 30071 -3 30001 30071 30002 -3 30002 30071 30072 -3 30002 30072 30003 -3 30003 30072 30073 -3 30003 30073 30004 -3 30005 30074 30075 -3 30005 30075 30006 -3 30006 30075 30076 -3 30006 30076 30007 -3 30007 30076 30077 -3 30007 30077 30008 -3 30008 30077 30078 -3 30008 30078 30009 -3 30009 30078 30079 -3 30009 30079 30010 -3 30010 30079 30080 -3 30010 30080 30011 -3 30011 30080 30081 -3 30011 30081 30012 -3 30012 30081 30082 -3 30012 30082 30013 -3 30013 30082 30083 -3 30013 30083 30014 -3 30014 30083 30084 -3 30014 30084 30015 -3 30015 30084 30085 -3 30015 30085 30016 -3 30016 30085 30086 -3 30016 30086 30017 -3 30017 30086 30087 -3 30017 30087 30018 -3 30018 30087 30088 -3 30018 30088 30019 -3 30019 30088 30089 -3 30019 30089 30020 -3 30020 30089 30090 -3 30020 30090 30021 -3 30021 30090 30091 -3 30021 30091 30022 -3 30022 30091 30092 -3 30022 30092 30023 -3 30023 30092 30093 -3 30023 30093 30024 -3 30024 30093 30094 -3 30024 30094 30025 -3 30025 30094 30095 -3 30025 30095 30026 -3 30026 30095 30096 -3 30026 30096 30027 -3 30027 30096 30097 -3 30027 30097 30028 -3 30028 30097 30098 -3 30028 30098 30029 -3 30029 30098 30099 -3 30029 30099 30030 -3 30030 30099 30100 -3 30030 30100 30031 -3 30031 30100 30101 -3 30031 30101 30032 -3 30032 30101 30102 -3 30032 30102 30033 -3 30033 30102 30103 -3 30033 30103 30034 -3 30034 30103 30104 -3 30034 30104 30035 -3 30035 30104 30105 -3 30035 30105 30036 -3 30036 30105 30106 -3 30036 30106 30037 -3 30037 30106 30038 -3 30107 30038 30106 -3 30038 30107 30039 -3 30108 30039 30107 -3 30039 30108 30040 -3 30109 30040 30108 -3 30040 30109 30041 -3 30110 30041 30109 -3 30041 30110 30042 -3 30111 30042 30110 -3 30042 30111 30043 -3 30112 30043 30111 -3 30043 30112 30044 -3 30113 30044 30112 -3 30044 30113 30045 -3 30114 30045 30113 -3 30045 30114 30046 -3 30115 30046 30114 -3 30046 30115 30047 -3 30116 30047 30115 -3 30047 30116 30048 -3 30117 30048 30116 -3 30048 30117 30049 -3 30118 30049 30117 -3 30049 30118 30050 -3 30119 30050 30118 -3 30050 30119 30051 -3 30120 30051 30119 -3 30051 30120 30052 -3 30121 30052 30120 -3 30052 30121 30053 -3 30122 30053 30121 -3 30053 30122 30054 -3 30123 30054 30122 -3 30054 30123 30055 -3 30124 30055 30123 -3 30055 30124 30056 -3 30125 30056 30124 -3 30056 30125 30057 -3 30126 30057 30125 -3 30057 30126 30058 -3 30127 30058 30126 -3 30058 30127 30059 -3 30128 30059 30127 -3 30059 30128 30060 -3 30129 30060 30128 -3 30060 30129 30061 -3 30130 30061 30129 -3 30061 30130 30062 -3 30131 30062 30130 -3 30062 30131 30063 -3 30132 30063 30131 -3 30063 30132 30064 -3 30133 30064 30132 -3 30064 30133 30065 -3 30134 30065 30133 -3 30065 30134 30066 -3 30135 30066 30134 -3 30066 30135 30067 -3 30136 30067 30135 -3 30067 30136 30068 -3 30137 30068 30136 -3 30068 30137 30069 -3 30138 30069 30137 -3 30069 30138 30070 -3 30139 30070 30138 -3 30070 30139 30071 -3 30140 30071 30139 -3 30071 30140 30072 -3 30141 30072 30140 -3 30072 30141 30073 -3 30142 30073 30141 -3 30074 30143 30075 -3 30144 30075 30143 -3 30075 30144 30076 -3 30145 30076 30144 -3 30076 30145 30077 -3 30146 30077 30145 -3 30077 30146 30078 -3 30147 30078 30146 -3 30078 30147 30079 -3 30148 30079 30147 -3 30079 30148 30080 -3 30149 30080 30148 -3 30080 30149 30081 -3 30150 30081 30149 -3 30081 30150 30082 -3 30151 30082 30150 -3 30082 30151 30083 -3 30152 30083 30151 -3 30083 30152 30084 -3 30153 30084 30152 -3 30084 30153 30085 -3 30154 30085 30153 -3 30085 30154 30086 -3 30155 30086 30154 -3 30086 30155 30087 -3 30156 30087 30155 -3 30087 30156 30088 -3 30157 30088 30156 -3 30088 30157 30089 -3 30158 30089 30157 -3 30089 30158 30090 -3 30159 30090 30158 -3 30090 30159 30091 -3 30160 30091 30159 -3 30091 30160 30092 -3 30161 30092 30160 -3 30092 30161 30093 -3 30162 30093 30161 -3 30093 30162 30094 -3 30163 30094 30162 -3 30094 30163 30095 -3 30164 30095 30163 -3 30095 30164 30096 -3 30165 30096 30164 -3 30096 30165 30097 -3 30166 30097 30165 -3 30097 30166 30098 -3 30167 30098 30166 -3 30098 30167 30099 -3 30168 30099 30167 -3 30099 30168 30100 -3 30169 30100 30168 -3 30100 30169 30101 -3 30170 30101 30169 -3 30101 30170 30102 -3 30171 30102 30170 -3 30102 30171 30103 -3 30172 30103 30171 -3 30103 30172 30104 -3 30173 30104 30172 -3 30104 30173 30105 -3 30174 30105 30173 -3 30105 30174 30106 -3 30175 30106 30174 -3 30106 30175 30107 -3 30176 30107 30175 -3 30107 30176 30108 -3 30177 30108 30176 -3 30108 30177 30109 -3 30178 30109 30177 -3 30109 30178 30110 -3 30179 30110 30178 -3 30110 30179 30111 -3 30180 30111 30179 -3 30111 30180 30112 -3 30181 30112 30180 -3 30112 30181 30113 -3 30182 30113 30181 -3 30113 30182 30114 -3 30183 30114 30182 -3 30114 30183 30115 -3 30184 30115 30183 -3 30115 30184 30116 -3 30185 30116 30184 -3 30116 30185 30117 -3 30186 30117 30185 -3 30117 30186 30118 -3 30187 30118 30186 -3 30118 30187 30119 -3 30188 30119 30187 -3 30119 30188 30120 -3 30189 30120 30188 -3 30120 30189 30121 -3 30190 30121 30189 -3 30121 30190 30122 -3 30191 30122 30190 -3 30122 30191 30123 -3 30192 30123 30191 -3 30123 30192 30124 -3 30193 30124 30192 -3 30124 30193 30125 -3 30194 30125 30193 -3 30125 30194 30126 -3 30195 30126 30194 -3 30126 30195 30127 -3 30196 30127 30195 -3 30127 30196 30128 -3 30197 30128 30196 -3 30128 30197 30129 -3 30198 30129 30197 -3 30129 30198 30130 -3 30199 30130 30198 -3 30130 30199 30131 -3 30200 30131 30199 -3 30131 30200 30132 -3 30201 30132 30200 -3 30132 30201 30133 -3 30202 30133 30201 -3 30133 30202 30134 -3 30203 30134 30202 -3 30134 30203 30135 -3 30204 30135 30203 -3 30135 30204 30136 -3 30205 30136 30204 -3 30136 30205 30137 -3 30206 30137 30205 -3 30137 30206 30138 -3 30207 30138 30206 -3 30138 30207 30139 -3 30208 30139 30207 -3 30139 30208 30140 -3 30209 30140 30208 -3 30140 30209 30141 -3 30210 30141 30209 -3 30141 30210 30142 -3 30211 30142 30210 -3 30143 30212 30144 -3 30213 30144 30212 -3 30144 30213 30214 -3 30144 30214 30145 -3 30145 30214 30215 -3 30145 30215 30146 -3 30146 30215 30216 -3 30146 30216 30147 -3 30147 30216 30217 -3 30147 30217 30148 -3 30148 30217 30218 -3 30148 30218 30149 -3 30149 30218 30219 -3 30149 30219 30150 -3 30150 30219 30220 -3 30150 30220 30151 -3 30151 30220 30221 -3 30151 30221 30152 -3 30152 30221 30222 -3 30152 30222 30153 -3 30153 30222 30223 -3 30153 30223 30154 -3 30154 30223 30224 -3 30154 30224 30155 -3 30155 30224 30225 -3 30155 30225 30156 -3 30156 30225 30226 -3 30156 30226 30157 -3 30157 30226 30227 -3 30157 30227 30158 -3 30158 30227 30228 -3 30158 30228 30159 -3 30159 30228 30229 -3 30159 30229 30160 -3 30160 30229 30230 -3 30160 30230 30161 -3 30161 30230 30231 -3 30161 30231 30162 -3 30162 30231 30232 -3 30162 30232 30163 -3 30163 30232 30233 -3 30163 30233 30164 -3 30164 30233 30234 -3 30164 30234 30165 -3 30165 30234 30235 -3 30165 30235 30166 -3 30166 30235 30236 -3 30166 30236 30167 -3 30167 30236 30237 -3 30167 30237 30168 -3 30168 30237 30238 -3 30168 30238 30169 -3 30169 30238 30239 -3 30169 30239 30170 -3 30170 30239 30240 -3 30170 30240 30171 -3 30171 30240 30241 -3 30171 30241 30172 -3 30172 30241 30242 -3 30172 30242 30173 -3 30173 30242 30243 -3 30173 30243 30174 -3 30174 30243 30244 -3 30174 30244 30175 -3 30175 30244 30245 -3 30175 30245 30176 -3 30176 30245 30246 -3 30176 30246 30177 -3 30177 30246 30247 -3 30177 30247 30178 -3 30178 30247 30248 -3 30178 30248 30179 -3 30179 30248 30249 -3 30179 30249 30180 -3 30180 30249 30250 -3 30180 30250 30181 -3 30181 30250 30251 -3 30181 30251 30182 -3 30182 30251 30252 -3 30182 30252 30183 -3 30183 30252 30253 -3 30183 30253 30184 -3 30184 30253 30254 -3 30184 30254 30185 -3 30185 30254 30255 -3 30185 30255 30186 -3 30186 30255 30256 -3 30186 30256 30187 -3 30187 30256 30257 -3 30187 30257 30188 -3 30188 30257 30258 -3 30188 30258 30189 -3 30189 30258 30259 -3 30189 30259 30190 -3 30190 30259 30260 -3 30190 30260 30191 -3 30191 30260 30261 -3 30191 30261 30192 -3 30192 30261 30262 -3 30192 30262 30193 -3 30193 30262 30263 -3 30193 30263 30194 -3 30194 30263 30264 -3 30194 30264 30195 -3 30195 30264 30265 -3 30195 30265 30196 -3 30196 30265 30266 -3 30196 30266 30197 -3 30197 30266 30267 -3 30197 30267 30198 -3 30198 30267 30268 -3 30198 30268 30199 -3 30199 30268 30269 -3 30199 30269 30200 -3 30200 30269 30270 -3 30200 30270 30201 -3 30201 30270 30271 -3 30201 30271 30202 -3 30202 30271 30272 -3 30202 30272 30203 -3 30203 30272 30273 -3 30203 30273 30204 -3 30204 30273 30274 -3 30204 30274 30205 -3 30205 30274 30275 -3 30205 30275 30206 -3 30206 30275 30276 -3 30206 30276 30207 -3 30207 30276 30277 -3 30207 30277 30208 -3 30208 30277 30278 -3 30208 30278 30209 -3 30209 30278 30279 -3 30209 30279 30210 -3 30210 30279 30280 -3 30210 30280 30211 -3 30212 30281 30282 -3 30212 30282 30213 -3 30213 30282 30283 -3 30213 30283 30214 -3 30214 30283 30284 -3 30214 30284 30215 -3 30215 30284 30285 -3 30215 30285 30216 -3 30216 30285 30286 -3 30216 30286 30217 -3 30217 30286 30287 -3 30217 30287 30218 -3 30218 30287 30288 -3 30218 30288 30219 -3 30219 30288 30289 -3 30219 30289 30220 -3 30220 30289 30290 -3 30220 30290 30221 -3 30221 30290 30291 -3 30221 30291 30222 -3 30222 30291 30292 -3 30222 30292 30223 -3 30223 30292 30293 -3 30223 30293 30224 -3 30224 30293 30294 -3 30224 30294 30225 -3 30225 30294 30295 -3 30225 30295 30226 -3 30226 30295 30296 -3 30226 30296 30227 -3 30227 30296 30297 -3 30227 30297 30228 -3 30228 30297 30298 -3 30228 30298 30229 -3 30229 30298 30299 -3 30229 30299 30230 -3 30230 30299 30300 -3 30230 30300 30231 -3 30231 30300 30301 -3 30231 30301 30232 -3 30232 30301 30302 -3 30232 30302 30233 -3 30233 30302 30303 -3 30233 30303 30234 -3 30234 30303 30304 -3 30234 30304 30235 -3 30235 30304 30305 -3 30235 30305 30236 -3 30236 30305 30306 -3 30236 30306 30237 -3 30237 30306 30307 -3 30237 30307 30238 -3 30238 30307 30308 -3 30238 30308 30239 -3 30239 30308 30309 -3 30239 30309 30240 -3 30240 30309 30310 -3 30240 30310 30241 -3 30241 30310 30311 -3 30241 30311 30242 -3 30242 30311 30312 -3 30242 30312 30243 -3 30243 30312 30313 -3 30243 30313 30244 -3 30244 30313 30314 -3 30244 30314 30245 -3 30245 30314 30315 -3 30245 30315 30246 -3 30246 30315 30316 -3 30246 30316 30247 -3 30247 30316 30317 -3 30247 30317 30248 -3 30248 30317 30318 -3 30248 30318 30249 -3 30249 30318 30319 -3 30249 30319 30250 -3 30250 30319 30320 -3 30250 30320 30251 -3 30251 30320 30252 -3 30321 30252 30320 -3 30252 30321 30253 -3 30322 30253 30321 -3 30253 30322 30254 -3 30323 30254 30322 -3 30254 30323 30255 -3 30324 30255 30323 -3 30255 30324 30256 -3 30325 30256 30324 -3 30256 30325 30257 -3 30326 30257 30325 -3 30257 30326 30258 -3 30327 30258 30326 -3 30258 30327 30259 -3 30328 30259 30327 -3 30259 30328 30260 -3 30329 30260 30328 -3 30260 30329 30261 -3 30330 30261 30329 -3 30261 30330 30262 -3 30331 30262 30330 -3 30262 30331 30263 -3 30332 30263 30331 -3 30263 30332 30264 -3 30333 30264 30332 -3 30264 30333 30265 -3 30334 30265 30333 -3 30265 30334 30266 -3 30335 30266 30334 -3 30266 30335 30267 -3 30336 30267 30335 -3 30267 30336 30268 -3 30337 30268 30336 -3 30268 30337 30269 -3 30338 30269 30337 -3 30269 30338 30270 -3 30339 30270 30338 -3 30270 30339 30271 -3 30340 30271 30339 -3 30271 30340 30272 -3 30341 30272 30340 -3 30272 30341 30273 -3 30342 30273 30341 -3 30273 30342 30274 -3 30343 30274 30342 -3 30274 30343 30275 -3 30344 30275 30343 -3 30275 30344 30276 -3 30345 30276 30344 -3 30276 30345 30277 -3 30346 30277 30345 -3 30277 30346 30278 -3 30347 30278 30346 -3 30278 30347 30279 -3 30348 30279 30347 -3 30279 30348 30280 -3 30349 30280 30348 -3 30281 30350 30282 -3 30351 30282 30350 -3 30282 30351 30283 -3 30352 30283 30351 -3 30283 30352 30284 -3 30353 30284 30352 -3 30284 30353 30285 -3 30354 30285 30353 -3 30285 30354 30286 -3 30355 30286 30354 -3 30286 30355 30287 -3 30356 30287 30355 -3 30287 30356 30288 -3 30357 30288 30356 -3 30288 30357 30289 -3 30358 30289 30357 -3 30289 30358 30290 -3 30359 30290 30358 -3 30290 30359 30291 -3 30360 30291 30359 -3 30291 30360 30292 -3 30361 30292 30360 -3 30292 30361 30293 -3 30362 30293 30361 -3 30293 30362 30294 -3 30363 30294 30362 -3 30294 30363 30295 -3 30364 30295 30363 -3 30295 30364 30296 -3 30365 30296 30364 -3 30296 30365 30297 -3 30366 30297 30365 -3 30297 30366 30298 -3 30367 30298 30366 -3 30298 30367 30299 -3 30368 30299 30367 -3 30299 30368 30300 -3 30369 30300 30368 -3 30300 30369 30301 -3 30370 30301 30369 -3 30301 30370 30302 -3 30371 30302 30370 -3 30302 30371 30303 -3 30372 30303 30371 -3 30303 30372 30304 -3 30373 30304 30372 -3 30304 30373 30305 -3 30374 30305 30373 -3 30305 30374 30306 -3 30375 30306 30374 -3 30306 30375 30307 -3 30376 30307 30375 -3 30307 30376 30308 -3 30377 30308 30376 -3 30308 30377 30309 -3 30378 30309 30377 -3 30309 30378 30310 -3 30379 30310 30378 -3 30310 30379 30311 -3 30380 30311 30379 -3 30311 30380 30312 -3 30381 30312 30380 -3 30312 30381 30313 -3 30382 30313 30381 -3 30313 30382 30314 -3 30383 30314 30382 -3 30314 30383 30315 -3 30384 30315 30383 -3 30315 30384 30316 -3 30385 30316 30384 -3 30316 30385 30317 -3 30386 30317 30385 -3 30317 30386 30318 -3 30387 30318 30386 -3 30318 30387 30319 -3 30388 30319 30387 -3 30319 30388 30320 -3 30389 30320 30388 -3 30320 30389 30321 -3 30390 30321 30389 -3 30321 30390 30322 -3 30391 30322 30390 -3 30322 30391 30323 -3 30392 30323 30391 -3 30323 30392 30324 -3 30393 30324 30392 -3 30324 30393 30325 -3 30394 30325 30393 -3 30325 30394 30326 -3 30395 30326 30394 -3 30326 30395 30327 -3 30396 30327 30395 -3 30327 30396 30328 -3 30397 30328 30396 -3 30328 30397 30329 -3 30398 30329 30397 -3 30329 30398 30330 -3 30399 30330 30398 -3 30330 30399 30331 -3 30400 30331 30399 -3 30331 30400 30332 -3 30401 30332 30400 -3 30332 30401 30333 -3 30402 30333 30401 -3 30333 30402 30334 -3 30403 30334 30402 -3 30334 30403 30335 -3 30404 30335 30403 -3 30335 30404 30336 -3 30405 30336 30404 -3 30336 30405 30337 -3 30406 30337 30405 -3 30337 30406 30338 -3 30407 30338 30406 -3 30338 30407 30339 -3 30408 30339 30407 -3 30339 30408 30340 -3 30409 30340 30408 -3 30340 30409 30341 -3 30410 30341 30409 -3 30341 30410 30342 -3 30411 30342 30410 -3 30342 30411 30343 -3 30412 30343 30411 -3 30343 30412 30344 -3 30413 30344 30412 -3 30344 30413 30345 -3 30414 30345 30413 -3 30345 30414 30346 -3 30415 30346 30414 -3 30346 30415 30347 -3 30416 30347 30415 -3 30347 30416 30348 -3 30417 30348 30416 -3 30348 30417 30349 -3 30418 30349 30417 -3 30350 30419 30351 -3 30420 30351 30419 -3 30351 30420 30352 -3 30421 30352 30420 -3 30352 30421 30353 -3 30422 30353 30421 -3 30353 30422 30354 -3 30423 30354 30422 -3 30354 30423 30355 -3 30424 30355 30423 -3 30355 30424 30356 -3 30425 30356 30424 -3 30356 30425 30357 -3 30426 30357 30425 -3 30357 30426 30358 -3 30427 30358 30426 -3 30358 30427 30428 -3 30358 30428 30359 -3 30359 30428 30429 -3 30359 30429 30360 -3 30360 30429 30430 -3 30360 30430 30361 -3 30361 30430 30431 -3 30361 30431 30362 -3 30362 30431 30432 -3 30362 30432 30363 -3 30363 30432 30433 -3 30363 30433 30364 -3 30364 30433 30434 -3 30364 30434 30365 -3 30365 30434 30435 -3 30365 30435 30366 -3 30366 30435 30436 -3 30366 30436 30367 -3 30367 30436 30437 -3 30367 30437 30368 -3 30368 30437 30438 -3 30368 30438 30369 -3 30369 30438 30439 -3 30369 30439 30370 -3 30370 30439 30440 -3 30370 30440 30371 -3 30371 30440 30441 -3 30371 30441 30372 -3 30372 30441 30442 -3 30372 30442 30373 -3 30373 30442 30443 -3 30373 30443 30374 -3 30374 30443 30444 -3 30374 30444 30375 -3 30375 30444 30445 -3 30375 30445 30376 -3 30376 30445 30446 -3 30376 30446 30377 -3 30377 30446 30447 -3 30377 30447 30378 -3 30378 30447 30448 -3 30378 30448 30379 -3 30379 30448 30449 -3 30379 30449 30380 -3 30380 30449 30450 -3 30380 30450 30381 -3 30381 30450 30451 -3 30381 30451 30382 -3 30382 30451 30452 -3 30382 30452 30383 -3 30383 30452 30453 -3 30383 30453 30384 -3 30384 30453 30454 -3 30384 30454 30385 -3 30385 30454 30455 -3 30385 30455 30386 -3 30386 30455 30456 -3 30386 30456 30387 -3 30387 30456 30457 -3 30387 30457 30388 -3 30388 30457 30458 -3 30388 30458 30389 -3 30389 30458 30459 -3 30389 30459 30390 -3 30390 30459 30460 -3 30390 30460 30391 -3 30391 30460 30461 -3 30391 30461 30392 -3 30392 30461 30462 -3 30392 30462 30393 -3 30393 30462 30463 -3 30393 30463 30394 -3 30394 30463 30464 -3 30394 30464 30395 -3 30395 30464 30465 -3 30395 30465 30396 -3 30396 30465 30466 -3 30396 30466 30397 -3 30397 30466 30467 -3 30397 30467 30398 -3 30398 30467 30468 -3 30398 30468 30399 -3 30399 30468 30469 -3 30399 30469 30400 -3 30400 30469 30470 -3 30400 30470 30401 -3 30401 30470 30471 -3 30401 30471 30402 -3 30402 30471 30472 -3 30402 30472 30403 -3 30403 30472 30473 -3 30403 30473 30404 -3 30404 30473 30474 -3 30404 30474 30405 -3 30405 30474 30475 -3 30405 30475 30406 -3 30406 30475 30476 -3 30406 30476 30407 -3 30407 30476 30477 -3 30407 30477 30408 -3 30408 30477 30478 -3 30408 30478 30409 -3 30409 30478 30479 -3 30409 30479 30410 -3 30410 30479 30480 -3 30410 30480 30411 -3 30411 30480 30481 -3 30411 30481 30412 -3 30412 30481 30482 -3 30412 30482 30413 -3 30413 30482 30483 -3 30413 30483 30414 -3 30414 30483 30484 -3 30414 30484 30415 -3 30415 30484 30485 -3 30415 30485 30416 -3 30416 30485 30486 -3 30416 30486 30417 -3 30417 30486 30487 -3 30417 30487 30418 -3 30419 30488 30489 -3 30419 30489 30420 -3 30420 30489 30490 -3 30420 30490 30421 -3 30421 30490 30491 -3 30421 30491 30422 -3 30422 30491 30492 -3 30422 30492 30423 -3 30423 30492 30493 -3 30423 30493 30424 -3 30424 30493 30494 -3 30424 30494 30425 -3 30425 30494 30495 -3 30425 30495 30426 -3 30426 30495 30496 -3 30426 30496 30427 -3 30427 30496 30497 -3 30427 30497 30428 -3 30428 30497 30498 -3 30428 30498 30429 -3 30429 30498 30499 -3 30429 30499 30430 -3 30430 30499 30500 -3 30430 30500 30431 -3 30431 30500 30501 -3 30431 30501 30432 -3 30432 30501 30502 -3 30432 30502 30433 -3 30433 30502 30503 -3 30433 30503 30434 -3 30434 30503 30504 -3 30434 30504 30435 -3 30435 30504 30505 -3 30435 30505 30436 -3 30436 30505 30506 -3 30436 30506 30437 -3 30437 30506 30507 -3 30437 30507 30438 -3 30438 30507 30508 -3 30438 30508 30439 -3 30439 30508 30509 -3 30439 30509 30440 -3 30440 30509 30510 -3 30440 30510 30441 -3 30441 30510 30511 -3 30441 30511 30442 -3 30442 30511 30512 -3 30442 30512 30443 -3 30443 30512 30513 -3 30443 30513 30444 -3 30444 30513 30514 -3 30444 30514 30445 -3 30445 30514 30515 -3 30445 30515 30446 -3 30446 30515 30516 -3 30446 30516 30447 -3 30447 30516 30517 -3 30447 30517 30448 -3 30448 30517 30518 -3 30448 30518 30449 -3 30449 30518 30519 -3 30449 30519 30450 -3 30450 30519 30520 -3 30450 30520 30451 -3 30451 30520 30521 -3 30451 30521 30452 -3 30452 30521 30522 -3 30452 30522 30453 -3 30453 30522 30523 -3 30453 30523 30454 -3 30454 30523 30524 -3 30454 30524 30455 -3 30455 30524 30525 -3 30455 30525 30456 -3 30456 30525 30526 -3 30456 30526 30457 -3 30457 30526 30527 -3 30457 30527 30458 -3 30458 30527 30528 -3 30458 30528 30459 -3 30459 30528 30529 -3 30459 30529 30460 -3 30460 30529 30530 -3 30460 30530 30461 -3 30461 30530 30531 -3 30461 30531 30462 -3 30462 30531 30532 -3 30462 30532 30463 -3 30463 30532 30533 -3 30463 30533 30464 -3 30464 30533 30534 -3 30464 30534 30465 -3 30465 30534 30535 -3 30465 30535 30466 -3 30466 30535 30467 -3 30536 30467 30535 -3 30467 30536 30468 -3 30537 30468 30536 -3 30468 30537 30469 -3 30538 30469 30537 -3 30469 30538 30470 -3 30539 30470 30538 -3 30470 30539 30471 -3 30540 30471 30539 -3 30471 30540 30472 -3 30541 30472 30540 -3 30472 30541 30473 -3 30542 30473 30541 -3 30473 30542 30474 -3 30543 30474 30542 -3 30474 30543 30475 -3 30544 30475 30543 -3 30475 30544 30476 -3 30545 30476 30544 -3 30476 30545 30477 -3 30546 30477 30545 -3 30477 30546 30478 -3 30547 30478 30546 -3 30478 30547 30479 -3 30548 30479 30547 -3 30479 30548 30480 -3 30549 30480 30548 -3 30480 30549 30481 -3 30550 30481 30549 -3 30481 30550 30482 -3 30551 30482 30550 -3 30482 30551 30483 -3 30552 30483 30551 -3 30483 30552 30484 -3 30553 30484 30552 -3 30484 30553 30485 -3 30554 30485 30553 -3 30485 30554 30486 -3 30555 30486 30554 -3 30486 30555 30487 -3 30556 30487 30555 -3 30488 30557 30489 -3 30558 30489 30557 -3 30489 30558 30490 -3 30559 30490 30558 -3 30490 30559 30491 -3 30560 30491 30559 -3 30491 30560 30492 -3 30561 30492 30560 -3 30492 30561 30493 -3 30562 30493 30561 -3 30493 30562 30494 -3 30563 30494 30562 -3 30494 30563 30495 -3 30564 30495 30563 -3 30495 30564 30496 -3 30565 30496 30564 -3 30496 30565 30497 -3 30566 30497 30565 -3 30497 30566 30498 -3 30567 30498 30566 -3 30498 30567 30499 -3 30568 30499 30567 -3 30499 30568 30500 -3 30569 30500 30568 -3 30500 30569 30501 -3 30570 30501 30569 -3 30501 30570 30502 -3 30571 30502 30570 -3 30502 30571 30503 -3 30572 30503 30571 -3 30503 30572 30504 -3 30573 30504 30572 -3 30504 30573 30505 -3 30574 30505 30573 -3 30505 30574 30506 -3 30575 30506 30574 -3 30506 30575 30507 -3 30576 30507 30575 -3 30507 30576 30508 -3 30577 30508 30576 -3 30508 30577 30509 -3 30578 30509 30577 -3 30509 30578 30510 -3 30579 30510 30578 -3 30510 30579 30511 -3 30580 30511 30579 -3 30511 30580 30512 -3 30581 30512 30580 -3 30512 30581 30513 -3 30582 30513 30581 -3 30513 30582 30514 -3 30583 30514 30582 -3 30514 30583 30515 -3 30584 30515 30583 -3 30515 30584 30516 -3 30585 30516 30584 -3 30516 30585 30517 -3 30586 30517 30585 -3 30517 30586 30518 -3 30587 30518 30586 -3 30518 30587 30519 -3 30588 30519 30587 -3 30519 30588 30520 -3 30589 30520 30588 -3 30520 30589 30521 -3 30590 30521 30589 -3 30521 30590 30522 -3 30591 30522 30590 -3 30522 30591 30523 -3 30592 30523 30591 -3 30523 30592 30524 -3 30593 30524 30592 -3 30524 30593 30525 -3 30594 30525 30593 -3 30525 30594 30526 -3 30595 30526 30594 -3 30526 30595 30527 -3 30596 30527 30595 -3 30527 30596 30528 -3 30597 30528 30596 -3 30528 30597 30529 -3 30598 30529 30597 -3 30529 30598 30530 -3 30599 30530 30598 -3 30530 30599 30531 -3 30600 30531 30599 -3 30531 30600 30532 -3 30601 30532 30600 -3 30532 30601 30533 -3 30602 30533 30601 -3 30533 30602 30534 -3 30603 30534 30602 -3 30534 30603 30535 -3 30604 30535 30603 -3 30535 30604 30536 -3 30605 30536 30604 -3 30536 30605 30537 -3 30606 30537 30605 -3 30537 30606 30538 -3 30607 30538 30606 -3 30538 30607 30539 -3 30608 30539 30607 -3 30539 30608 30540 -3 30609 30540 30608 -3 30540 30609 30541 -3 30610 30541 30609 -3 30541 30610 30542 -3 30611 30542 30610 -3 30542 30611 30543 -3 30612 30543 30611 -3 30543 30612 30544 -3 30613 30544 30612 -3 30544 30613 30545 -3 30614 30545 30613 -3 30545 30614 30546 -3 30615 30546 30614 -3 30546 30615 30547 -3 30616 30547 30615 -3 30547 30616 30548 -3 30617 30548 30616 -3 30548 30617 30549 -3 30618 30549 30617 -3 30549 30618 30550 -3 30619 30550 30618 -3 30550 30619 30551 -3 30620 30551 30619 -3 30551 30620 30552 -3 30621 30552 30620 -3 30552 30621 30553 -3 30622 30553 30621 -3 30553 30622 30554 -3 30623 30554 30622 -3 30554 30623 30555 -3 30624 30555 30623 -3 30555 30624 30556 -3 30625 30556 30624 -3 30557 30626 30558 -3 30627 30558 30626 -3 30558 30627 30559 -3 30628 30559 30627 -3 30559 30628 30560 -3 30629 30560 30628 -3 30560 30629 30561 -3 30630 30561 30629 -3 30561 30630 30562 -3 30631 30562 30630 -3 30562 30631 30563 -3 30632 30563 30631 -3 30563 30632 30564 -3 30633 30564 30632 -3 30564 30633 30565 -3 30634 30565 30633 -3 30565 30634 30566 -3 30635 30566 30634 -3 30566 30635 30567 -3 30636 30567 30635 -3 30567 30636 30568 -3 30637 30568 30636 -3 30568 30637 30569 -3 30638 30569 30637 -3 30569 30638 30570 -3 30639 30570 30638 -3 30570 30639 30571 -3 30640 30571 30639 -3 30571 30640 30572 -3 30641 30572 30640 -3 30572 30641 30573 -3 30642 30573 30641 -3 30573 30642 30643 -3 30573 30643 30574 -3 30574 30643 30644 -3 30574 30644 30575 -3 30575 30644 30645 -3 30575 30645 30576 -3 30576 30645 30646 -3 30576 30646 30577 -3 30577 30646 30647 -3 30577 30647 30578 -3 30578 30647 30648 -3 30578 30648 30579 -3 30579 30648 30649 -3 30579 30649 30580 -3 30580 30649 30650 -3 30580 30650 30581 -3 30581 30650 30651 -3 30581 30651 30582 -3 30582 30651 30652 -3 30582 30652 30583 -3 30583 30652 30653 -3 30583 30653 30584 -3 30584 30653 30654 -3 30584 30654 30585 -3 30585 30654 30655 -3 30585 30655 30586 -3 30586 30655 30656 -3 30586 30656 30587 -3 30587 30656 30657 -3 30587 30657 30588 -3 30588 30657 30658 -3 30588 30658 30589 -3 30589 30658 30659 -3 30589 30659 30590 -3 30590 30659 30660 -3 30590 30660 30591 -3 30591 30660 30661 -3 30591 30661 30592 -3 30592 30661 30662 -3 30592 30662 30593 -3 30593 30662 30663 -3 30593 30663 30594 -3 30594 30663 30664 -3 30594 30664 30595 -3 30595 30664 30665 -3 30595 30665 30596 -3 30596 30665 30666 -3 30596 30666 30597 -3 30597 30666 30667 -3 30597 30667 30598 -3 30598 30667 30668 -3 30598 30668 30599 -3 30599 30668 30669 -3 30599 30669 30600 -3 30600 30669 30670 -3 30600 30670 30601 -3 30601 30670 30671 -3 30601 30671 30602 -3 30602 30671 30672 -3 30602 30672 30603 -3 30603 30672 30673 -3 30603 30673 30604 -3 30604 30673 30674 -3 30604 30674 30605 -3 30605 30674 30675 -3 30605 30675 30606 -3 30606 30675 30676 -3 30606 30676 30607 -3 30607 30676 30677 -3 30607 30677 30608 -3 30608 30677 30678 -3 30608 30678 30609 -3 30609 30678 30679 -3 30609 30679 30610 -3 30610 30679 30680 -3 30610 30680 30611 -3 30611 30680 30681 -3 30611 30681 30612 -3 30612 30681 30682 -3 30612 30682 30613 -3 30613 30682 30683 -3 30613 30683 30614 -3 30614 30683 30684 -3 30614 30684 30615 -3 30615 30684 30685 -3 30615 30685 30616 -3 30616 30685 30686 -3 30616 30686 30617 -3 30617 30686 30687 -3 30617 30687 30618 -3 30618 30687 30688 -3 30618 30688 30619 -3 30619 30688 30689 -3 30619 30689 30620 -3 30620 30689 30690 -3 30620 30690 30621 -3 30621 30690 30691 -3 30621 30691 30622 -3 30622 30691 30692 -3 30622 30692 30623 -3 30623 30692 30693 -3 30623 30693 30624 -3 30624 30693 30694 -3 30624 30694 30625 -3 30626 30695 30696 -3 30626 30696 30627 -3 30627 30696 30697 -3 30627 30697 30628 -3 30628 30697 30698 -3 30628 30698 30629 -3 30629 30698 30699 -3 30629 30699 30630 -3 30630 30699 30700 -3 30630 30700 30631 -3 30631 30700 30701 -3 30631 30701 30632 -3 30632 30701 30702 -3 30632 30702 30633 -3 30633 30702 30703 -3 30633 30703 30634 -3 30634 30703 30704 -3 30634 30704 30635 -3 30635 30704 30705 -3 30635 30705 30636 -3 30636 30705 30706 -3 30636 30706 30637 -3 30637 30706 30707 -3 30637 30707 30638 -3 30638 30707 30708 -3 30638 30708 30639 -3 30639 30708 30709 -3 30639 30709 30640 -3 30640 30709 30710 -3 30640 30710 30641 -3 30641 30710 30711 -3 30641 30711 30642 -3 30642 30711 30712 -3 30642 30712 30643 -3 30643 30712 30713 -3 30643 30713 30644 -3 30644 30713 30714 -3 30644 30714 30645 -3 30645 30714 30715 -3 30645 30715 30646 -3 30646 30715 30716 -3 30646 30716 30647 -3 30647 30716 30717 -3 30647 30717 30648 -3 30648 30717 30718 -3 30648 30718 30649 -3 30649 30718 30719 -3 30649 30719 30650 -3 30650 30719 30720 -3 30650 30720 30651 -3 30651 30720 30721 -3 30651 30721 30652 -3 30652 30721 30722 -3 30652 30722 30653 -3 30653 30722 30723 -3 30653 30723 30654 -3 30654 30723 30724 -3 30654 30724 30655 -3 30655 30724 30725 -3 30655 30725 30656 -3 30656 30725 30726 -3 30656 30726 30657 -3 30657 30726 30727 -3 30657 30727 30658 -3 30658 30727 30728 -3 30658 30728 30659 -3 30659 30728 30729 -3 30659 30729 30660 -3 30660 30729 30730 -3 30660 30730 30661 -3 30661 30730 30731 -3 30661 30731 30662 -3 30662 30731 30732 -3 30662 30732 30663 -3 30663 30732 30733 -3 30663 30733 30664 -3 30664 30733 30734 -3 30664 30734 30665 -3 30665 30734 30735 -3 30665 30735 30666 -3 30666 30735 30736 -3 30666 30736 30667 -3 30667 30736 30737 -3 30667 30737 30668 -3 30668 30737 30738 -3 30668 30738 30669 -3 30669 30738 30739 -3 30669 30739 30670 -3 30670 30739 30740 -3 30670 30740 30671 -3 30671 30740 30741 -3 30671 30741 30672 -3 30672 30741 30742 -3 30672 30742 30673 -3 30673 30742 30743 -3 30673 30743 30674 -3 30674 30743 30744 -3 30674 30744 30675 -3 30675 30744 30745 -3 30675 30745 30676 -3 30676 30745 30746 -3 30676 30746 30677 -3 30677 30746 30747 -3 30677 30747 30678 -3 30678 30747 30748 -3 30678 30748 30679 -3 30679 30748 30749 -3 30679 30749 30680 -3 30680 30749 30750 -3 30680 30750 30681 -3 30681 30750 30682 -3 30751 30682 30750 -3 30682 30751 30683 -3 30752 30683 30751 -3 30683 30752 30684 -3 30753 30684 30752 -3 30684 30753 30685 -3 30754 30685 30753 -3 30685 30754 30686 -3 30755 30686 30754 -3 30686 30755 30687 -3 30756 30687 30755 -3 30687 30756 30688 -3 30757 30688 30756 -3 30688 30757 30689 -3 30758 30689 30757 -3 30689 30758 30690 -3 30759 30690 30758 -3 30690 30759 30691 -3 30760 30691 30759 -3 30691 30760 30692 -3 30761 30692 30760 -3 30692 30761 30693 -3 30762 30693 30761 -3 30693 30762 30694 -3 30763 30694 30762 -3 30695 30764 30696 -3 30765 30696 30764 -3 30696 30765 30697 -3 30766 30697 30765 -3 30697 30766 30698 -3 30767 30698 30766 -3 30698 30767 30699 -3 30768 30699 30767 -3 30699 30768 30700 -3 30769 30700 30768 -3 30700 30769 30701 -3 30770 30701 30769 -3 30701 30770 30702 -3 30771 30702 30770 -3 30702 30771 30703 -3 30772 30703 30771 -3 30703 30772 30704 -3 30773 30704 30772 -3 30704 30773 30705 -3 30774 30705 30773 -3 30705 30774 30706 -3 30775 30706 30774 -3 30706 30775 30707 -3 30776 30707 30775 -3 30707 30776 30708 -3 30777 30708 30776 -3 30708 30777 30709 -3 30778 30709 30777 -3 30709 30778 30710 -3 30779 30710 30778 -3 30710 30779 30711 -3 30780 30711 30779 -3 30711 30780 30712 -3 30781 30712 30780 -3 30712 30781 30713 -3 30782 30713 30781 -3 30713 30782 30714 -3 30783 30714 30782 -3 30714 30783 30715 -3 30784 30715 30783 -3 30715 30784 30716 -3 30785 30716 30784 -3 30716 30785 30717 -3 30786 30717 30785 -3 30717 30786 30718 -3 30787 30718 30786 -3 30718 30787 30719 -3 30788 30719 30787 -3 30719 30788 30720 -3 30789 30720 30788 -3 30720 30789 30721 -3 30790 30721 30789 -3 30721 30790 30722 -3 30791 30722 30790 -3 30722 30791 30723 -3 30792 30723 30791 -3 30723 30792 30724 -3 30793 30724 30792 -3 30724 30793 30725 -3 30794 30725 30793 -3 30725 30794 30726 -3 30795 30726 30794 -3 30726 30795 30727 -3 30796 30727 30795 -3 30727 30796 30728 -3 30797 30728 30796 -3 30728 30797 30729 -3 30798 30729 30797 -3 30729 30798 30730 -3 30799 30730 30798 -3 30730 30799 30731 -3 30800 30731 30799 -3 30731 30800 30732 -3 30801 30732 30800 -3 30732 30801 30733 -3 30802 30733 30801 -3 30733 30802 30734 -3 30803 30734 30802 -3 30734 30803 30735 -3 30804 30735 30803 -3 30735 30804 30736 -3 30805 30736 30804 -3 30736 30805 30737 -3 30806 30737 30805 -3 30737 30806 30738 -3 30807 30738 30806 -3 30738 30807 30739 -3 30808 30739 30807 -3 30739 30808 30740 -3 30809 30740 30808 -3 30740 30809 30741 -3 30810 30741 30809 -3 30741 30810 30742 -3 30811 30742 30810 -3 30742 30811 30743 -3 30812 30743 30811 -3 30743 30812 30744 -3 30813 30744 30812 -3 30744 30813 30745 -3 30814 30745 30813 -3 30745 30814 30746 -3 30815 30746 30814 -3 30746 30815 30747 -3 30816 30747 30815 -3 30747 30816 30748 -3 30817 30748 30816 -3 30748 30817 30749 -3 30818 30749 30817 -3 30749 30818 30750 -3 30819 30750 30818 -3 30750 30819 30751 -3 30820 30751 30819 -3 30751 30820 30752 -3 30821 30752 30820 -3 30752 30821 30753 -3 30822 30753 30821 -3 30753 30822 30754 -3 30823 30754 30822 -3 30754 30823 30755 -3 30824 30755 30823 -3 30755 30824 30756 -3 30825 30756 30824 -3 30756 30825 30757 -3 30826 30757 30825 -3 30757 30826 30758 -3 30827 30758 30826 -3 30758 30827 30759 -3 30828 30759 30827 -3 30759 30828 30760 -3 30829 30760 30828 -3 30760 30829 30761 -3 30830 30761 30829 -3 30761 30830 30762 -3 30831 30762 30830 -3 30762 30831 30763 -3 30832 30763 30831 -3 30764 30833 30765 -3 30834 30765 30833 -3 30765 30834 30766 -3 30835 30766 30834 -3 30766 30835 30767 -3 30836 30767 30835 -3 30767 30836 30768 -3 30837 30768 30836 -3 30768 30837 30769 -3 30838 30769 30837 -3 30769 30838 30770 -3 30839 30770 30838 -3 30770 30839 30771 -3 30840 30771 30839 -3 30771 30840 30772 -3 30841 30772 30840 -3 30772 30841 30773 -3 30842 30773 30841 -3 30773 30842 30774 -3 30843 30774 30842 -3 30774 30843 30775 -3 30844 30775 30843 -3 30775 30844 30776 -3 30845 30776 30844 -3 30776 30845 30777 -3 30846 30777 30845 -3 30777 30846 30778 -3 30847 30778 30846 -3 30778 30847 30779 -3 30848 30779 30847 -3 30779 30848 30780 -3 30849 30780 30848 -3 30780 30849 30781 -3 30850 30781 30849 -3 30781 30850 30782 -3 30851 30782 30850 -3 30782 30851 30783 -3 30852 30783 30851 -3 30783 30852 30784 -3 30853 30784 30852 -3 30784 30853 30785 -3 30854 30785 30853 -3 30785 30854 30786 -3 30855 30786 30854 -3 30786 30855 30787 -3 30856 30787 30855 -3 30787 30856 30788 -3 30857 30788 30856 -3 30788 30857 30789 -3 30858 30789 30857 -3 30789 30858 30859 -3 30789 30859 30790 -3 30790 30859 30860 -3 30790 30860 30791 -3 30791 30860 30861 -3 30791 30861 30792 -3 30792 30861 30862 -3 30792 30862 30793 -3 30793 30862 30863 -3 30793 30863 30794 -3 30794 30863 30864 -3 30794 30864 30795 -3 30795 30864 30865 -3 30795 30865 30796 -3 30796 30865 30866 -3 30796 30866 30797 -3 30797 30866 30867 -3 30797 30867 30798 -3 30798 30867 30868 -3 30798 30868 30799 -3 30799 30868 30869 -3 30799 30869 30800 -3 30800 30869 30870 -3 30800 30870 30801 -3 30801 30870 30871 -3 30801 30871 30802 -3 30802 30871 30872 -3 30802 30872 30803 -3 30803 30872 30873 -3 30803 30873 30804 -3 30804 30873 30874 -3 30804 30874 30805 -3 30805 30874 30875 -3 30805 30875 30806 -3 30806 30875 30876 -3 30806 30876 30807 -3 30807 30876 30877 -3 30807 30877 30808 -3 30808 30877 30878 -3 30808 30878 30809 -3 30809 30878 30879 -3 30809 30879 30810 -3 30810 30879 30880 -3 30810 30880 30811 -3 30811 30880 30881 -3 30811 30881 30812 -3 30812 30881 30882 -3 30812 30882 30813 -3 30813 30882 30883 -3 30813 30883 30814 -3 30814 30883 30884 -3 30814 30884 30815 -3 30815 30884 30885 -3 30815 30885 30816 -3 30816 30885 30886 -3 30816 30886 30817 -3 30817 30886 30887 -3 30817 30887 30818 -3 30818 30887 30888 -3 30818 30888 30819 -3 30819 30888 30889 -3 30819 30889 30820 -3 30820 30889 30890 -3 30820 30890 30821 -3 30821 30890 30891 -3 30821 30891 30822 -3 30822 30891 30892 -3 30822 30892 30823 -3 30823 30892 30893 -3 30823 30893 30824 -3 30824 30893 30894 -3 30824 30894 30825 -3 30825 30894 30895 -3 30825 30895 30826 -3 30826 30895 30896 -3 30826 30896 30827 -3 30827 30896 30897 -3 30827 30897 30828 -3 30828 30897 30898 -3 30828 30898 30829 -3 30829 30898 30899 -3 30829 30899 30830 -3 30830 30899 30900 -3 30830 30900 30831 -3 30831 30900 30901 -3 30831 30901 30832 -3 30834 30902 30903 -3 30834 30903 30835 -3 30835 30903 30904 -3 30835 30904 30836 -3 30836 30904 30905 -3 30836 30905 30837 -3 30837 30905 30906 -3 30837 30906 30838 -3 30838 30906 30907 -3 30838 30907 30839 -3 30839 30907 30908 -3 30839 30908 30840 -3 30840 30908 30909 -3 30840 30909 30841 -3 30841 30909 30910 -3 30841 30910 30842 -3 30842 30910 30911 -3 30842 30911 30843 -3 30843 30911 30912 -3 30843 30912 30844 -3 30844 30912 30913 -3 30844 30913 30845 -3 30845 30913 30914 -3 30845 30914 30846 -3 30846 30914 30915 -3 30846 30915 30847 -3 30847 30915 30916 -3 30847 30916 30848 -3 30848 30916 30917 -3 30848 30917 30849 -3 30849 30917 30918 -3 30849 30918 30850 -3 30850 30918 30919 -3 30850 30919 30851 -3 30851 30919 30920 -3 30851 30920 30852 -3 30852 30920 30921 -3 30852 30921 30853 -3 30853 30921 30922 -3 30853 30922 30854 -3 30854 30922 30923 -3 30854 30923 30855 -3 30855 30923 30924 -3 30855 30924 30856 -3 30856 30924 30925 -3 30856 30925 30857 -3 30857 30925 30926 -3 30857 30926 30858 -3 30858 30926 30927 -3 30858 30927 30859 -3 30859 30927 30928 -3 30859 30928 30860 -3 30860 30928 30929 -3 30860 30929 30861 -3 30861 30929 30930 -3 30861 30930 30862 -3 30862 30930 30931 -3 30862 30931 30863 -3 30863 30931 30932 -3 30863 30932 30864 -3 30864 30932 30933 -3 30864 30933 30865 -3 30865 30933 30934 -3 30865 30934 30866 -3 30866 30934 30935 -3 30866 30935 30867 -3 30867 30935 30936 -3 30867 30936 30868 -3 30868 30936 30937 -3 30868 30937 30869 -3 30869 30937 30938 -3 30869 30938 30870 -3 30870 30938 30939 -3 30870 30939 30871 -3 30871 30939 30940 -3 30871 30940 30872 -3 30872 30940 30941 -3 30872 30941 30873 -3 30873 30941 30942 -3 30873 30942 30874 -3 30874 30942 30943 -3 30874 30943 30875 -3 30875 30943 30944 -3 30875 30944 30876 -3 30876 30944 30945 -3 30876 30945 30877 -3 30877 30945 30946 -3 30877 30946 30878 -3 30878 30946 30947 -3 30878 30947 30879 -3 30879 30947 30948 -3 30879 30948 30880 -3 30880 30948 30949 -3 30880 30949 30881 -3 30881 30949 30950 -3 30881 30950 30882 -3 30882 30950 30951 -3 30882 30951 30883 -3 30883 30951 30952 -3 30883 30952 30884 -3 30884 30952 30953 -3 30884 30953 30885 -3 30885 30953 30954 -3 30885 30954 30886 -3 30886 30954 30955 -3 30886 30955 30887 -3 30887 30955 30956 -3 30887 30956 30888 -3 30888 30956 30957 -3 30888 30957 30889 -3 30889 30957 30958 -3 30889 30958 30890 -3 30890 30958 30959 -3 30890 30959 30891 -3 30891 30959 30960 -3 30891 30960 30892 -3 30892 30960 30961 -3 30892 30961 30893 -3 30893 30961 30962 -3 30893 30962 30894 -3 30894 30962 30963 -3 30894 30963 30895 -3 30895 30963 30964 -3 30895 30964 30896 -3 30896 30964 30965 -3 30896 30965 30897 -3 30897 30965 30966 -3 30897 30966 30898 -3 30898 30966 30899 -3 30967 30899 30966 -3 30899 30967 30900 -3 30968 30900 30967 -3 30903 30969 30904 -3 30970 30904 30969 -3 30904 30970 30905 -3 30971 30905 30970 -3 30905 30971 30906 -3 30972 30906 30971 -3 30906 30972 30907 -3 30973 30907 30972 -3 30907 30973 30908 -3 30974 30908 30973 -3 30908 30974 30909 -3 30975 30909 30974 -3 30909 30975 30910 -3 30976 30910 30975 -3 30910 30976 30911 -3 30977 30911 30976 -3 30911 30977 30912 -3 30978 30912 30977 -3 30912 30978 30913 -3 30979 30913 30978 -3 30913 30979 30914 -3 30980 30914 30979 -3 30914 30980 30915 -3 30981 30915 30980 -3 30915 30981 30916 -3 30982 30916 30981 -3 30916 30982 30917 -3 30983 30917 30982 -3 30917 30983 30918 -3 30984 30918 30983 -3 30918 30984 30919 -3 30985 30919 30984 -3 30919 30985 30920 -3 30986 30920 30985 -3 30920 30986 30921 -3 30987 30921 30986 -3 30921 30987 30922 -3 30988 30922 30987 -3 30922 30988 30923 -3 30989 30923 30988 -3 30923 30989 30924 -3 30990 30924 30989 -3 30924 30990 30925 -3 30991 30925 30990 -3 30925 30991 30926 -3 30992 30926 30991 -3 30926 30992 30927 -3 30993 30927 30992 -3 30927 30993 30928 -3 30994 30928 30993 -3 30928 30994 30929 -3 30995 30929 30994 -3 30929 30995 30930 -3 30996 30930 30995 -3 30930 30996 30931 -3 30997 30931 30996 -3 30931 30997 30932 -3 30998 30932 30997 -3 30932 30998 30933 -3 30999 30933 30998 -3 30933 30999 30934 -3 31000 30934 30999 -3 30934 31000 30935 -3 31001 30935 31000 -3 30935 31001 30936 -3 31002 30936 31001 -3 30936 31002 30937 -3 31003 30937 31002 -3 30937 31003 30938 -3 31004 30938 31003 -3 30938 31004 30939 -3 31005 30939 31004 -3 30939 31005 30940 -3 31006 30940 31005 -3 30940 31006 30941 -3 31007 30941 31006 -3 30941 31007 30942 -3 31008 30942 31007 -3 30942 31008 30943 -3 31009 30943 31008 -3 30943 31009 30944 -3 31010 30944 31009 -3 30944 31010 30945 -3 31011 30945 31010 -3 30945 31011 30946 -3 31012 30946 31011 -3 30946 31012 30947 -3 31013 30947 31012 -3 30947 31013 30948 -3 31014 30948 31013 -3 30948 31014 30949 -3 31015 30949 31014 -3 30949 31015 30950 -3 31016 30950 31015 -3 30950 31016 30951 -3 31017 30951 31016 -3 30951 31017 30952 -3 31018 30952 31017 -3 30952 31018 30953 -3 31019 30953 31018 -3 30953 31019 30954 -3 31020 30954 31019 -3 30954 31020 30955 -3 31021 30955 31020 -3 30955 31021 30956 -3 31022 30956 31021 -3 30956 31022 30957 -3 31023 30957 31022 -3 30957 31023 30958 -3 31024 30958 31023 -3 30958 31024 30959 -3 31025 30959 31024 -3 30959 31025 30960 -3 31026 30960 31025 -3 30960 31026 30961 -3 31027 30961 31026 -3 30961 31027 30962 -3 31028 30962 31027 -3 30962 31028 30963 -3 31029 30963 31028 -3 30963 31029 30964 -3 31030 30964 31029 -3 30964 31030 30965 -3 31031 30965 31030 -3 30965 31031 30966 -3 31032 30966 31031 -3 30966 31032 30967 -3 31033 30967 31032 From 8bf5af28bc1f442b6e73bf5da802b08e57be445e Mon Sep 17 00:00:00 2001 From: Rinat Mukhometzianov <25082858+rmukh@users.noreply.github.com> Date: Sat, 21 Aug 2021 05:49:37 -0400 Subject: [PATCH 1038/1665] coord for mrconvert mrconvert crashes if -coord is float --- nipype/interfaces/mrtrix3/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index f999db7001..77c3047f20 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -560,7 +560,7 @@ class MRConvertInputSpec(MRTrix3BaseInputSpec): desc="output image", ) coord = traits.List( - traits.Float, + traits.Int, sep=" ", argstr="-coord %s", desc="extract data at the specified coordinates", From 204ba9896a295503d45c386826263dff04602bb3 Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Tue, 31 Aug 2021 11:52:12 +0200 Subject: [PATCH 1039/1665] Update mri_glmfit to include pharmacokinetic models --- nipype/interfaces/freesurfer/model.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/freesurfer/model.py b/nipype/interfaces/freesurfer/model.py index 81758b6ac3..b12f1e8688 100644 --- a/nipype/interfaces/freesurfer/model.py +++ b/nipype/interfaces/freesurfer/model.py @@ -401,6 +401,21 @@ class GLMFitInputSpec(FSTraitedSpec): synth = traits.Bool(argstr="--synth", desc="replace input with gaussian") resynth_test = traits.Int(argstr="--resynthtest %d", desc="test GLM by resynthsis") profile = traits.Int(argstr="--profile %d", desc="niters : test speed") + mrtm1 = InputMultiPath( + traits.Tuple(File(exists=True), File(exists=True)), + argstr="--mrtm1 %s %s...", + desc="Reference time activity curve and frame times", + ) + mrtm2 = InputMultiPath( + traits.Tuple(File(exists=True), File(exists=True), File(exists=True)), + argstr="--mrtm2 %s %s %s...", + desc="Reference time activity curve, frame times and k2p", + ) + logan = InputMultiPath( + traits.Tuple(File(exists=True), File(exists=True), File(exists=True)), + argstr="--logan %s %s %s...", + desc="Reference time activity curve, frame times and tstar", + ) force_perm = traits.Bool( argstr="--perm-force", desc="force perumtation test, even when design matrix is not orthog", @@ -423,7 +438,10 @@ class GLMFitInputSpec(FSTraitedSpec): sim_done_file = File( argstr="--sim-done %s", desc="create file when simulation finished" ) - + nii_gz = traits.Bool( + argstr='--nii.gz', + desc='save outputs as nii.gz', + ) class GLMFitOutputSpec(TraitedSpec): @@ -444,7 +462,7 @@ class GLMFitOutputSpec(TraitedSpec): frame_eigenvectors = File(desc="matrix of frame eigenvectors from residual PCA") singular_values = File(desc="matrix singular values from residual PCA") svd_stats_file = File(desc="text file summarizing the residual PCA") - + class GLMFit(FSCommand): """Use FreeSurfer's mri_glmfit to specify and estimate a general linear model. From dbbe6dcfb10dee3d36d29990106db913f0b9cfa7 Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Wed, 8 Sep 2021 16:10:07 +0200 Subject: [PATCH 1040/1665] Add petsurfer module --- nipype/interfaces/freesurfer/petsurfer.py | 286 ++++++++++++++++++++++ 1 file changed, 286 insertions(+) create mode 100644 nipype/interfaces/freesurfer/petsurfer.py diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py new file mode 100644 index 0000000000..6dc87433c0 --- /dev/null +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -0,0 +1,286 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Provides interfaces to various commands for running PET analyses provided by FreeSurfer +""" + +import os +import os.path as op +from glob import glob +import shutil +import sys + +import numpy as np +from nibabel import load + +from ... import logging, LooseVersion +from ...utils.filemanip import fname_presuffix, check_depends +from ..io import FreeSurferSource +from ..base import ( + TraitedSpec, + File, + traits, + Directory, + InputMultiPath, + OutputMultiPath, + CommandLine, + CommandLineInputSpec, + isdefined, +) +from .base import FSCommand, FSTraitedSpec, FSTraitedSpecOpenMP, FSCommandOpenMP, Info +from .utils import copy2subjdir + +__docformat__ = "restructuredtext" +iflogger = logging.getLogger("nipype.interface") + +# Keeping this to avoid breaking external programs that depend on it, but +# this should not be used internally +FSVersion = Info.looseversion().vstring + +class GTMSegInputSpec(FSTraitedSpec): + + subject_id = traits.String( + argstr="--s %s", + desc="subject id", + mandatory=True + ) + + xcerseg = traits.Bool( + argstr="--xcerseg", + desc="run xcerebralseg on this subject to create apas+head.mgz" + ) + + out_file = File( + argstr="--o %s", + desc="output volume relative to subject/mri (default is gtmseg.mgz)" + ) + + usf = traits.Int( + argstr="--usf %i", + desc="upsampling factor (default is 2)" + ) + + subsegwm = traits.Bool( + argstr="--subsegwm", + desc="subsegment WM into lobes (default)" + ) + + keep_hypo = traits.Bool( + argstr="--keep-hypo", + desc="do not relabel hypointensities as WM when subsegmenting WM" + ) + + keep_cc = traits.Bool( + argstr="--keep-cc", + desc="do not relabel corpus callosum as WM" + ) + + dmax = traits.Float( + argstr="--dmax %f", + desc="distance threshold to use when subsegmenting WM (default is 5)" + ) + + ctx_annot = traits.Tuple( + traits.String, + traits.Int, + traits.Int, + argstr="--ctx-annot %s %i %i", + desc="annot lhbase rhbase : annotation to use for cortical segmentation (default is aparc 1000 2000)" + ) + + wm_annot = traits.Tuple( + traits.String, + traits.Int, + traits.Int, + argstr="--wm-annot %s %i %i", + desc="annot lhbase rhbase : annotation to use for WM segmentation (with --subsegwm, default is lobes 3200 4200)" + ) + + output_usf = traits.Int( + argstr="--output-usf %i", + desc="set output USF different than USF, mostly for debugging" + ) + + head = traits.String( + argstr="--head %s", + desc="use headseg instead of apas+head.mgz" + ) + + subseg_cblum_wm = traits.Bool( + argstr="--subseg-cblum-wm", + desc="subsegment cerebellum WM into core and gyri" + ) + + no_pons = traits.Bool( + argstr="--no-pons", + desc="do not add pons segmentation when doing ---xcerseg" + ) + + no_vermis = traits.Bool( + argstr="--no-vermis", + desc="do not add vermis segmentation when doing ---xcerseg" + ) + + ctab = File( + exists=True, + argstr="--ctab %s", + desc="colortable" + ) + no_seg_stats = traits.Bool( + argstr="--no-seg-stats", + desc="do not compute segmentation stats" + ) + + +class GTMSegOutputSpec(TraitedSpec): + out_file = File(exists=True, desc="GTM segmentation") + + +class GTMSeg(FSCommand): + """create an anatomical segmentation for the geometric transfer matrix (GTM). + + Examples + -------- + >>> gtmseg = GTMSeg() + >>> gtmseg.inputs.out_file = 'gtmseg.nii' + >>> gtmseg.inputs.subject_id = 'subjec_id' + >>> gtmseg.cmdline == 'gtmseg --o gtmseg.nii --s subject_id' + + """ + + _cmd = "gtmseg" + input_spec = GTMSegInputSpec + output_spec = GTMSegOutputSpec + + def _format_arg(self, name, spec, value): + return super(GTMSeg, self)._format_arg(name, spec, value) + +class GTMPVCInputSpec(FSTraitedSpec): + + in_file = File( + exists=True, + argstr="--i %s", + mandatory=True, + copyfile=False, + desc="input volume - source data to pvc", + ) + + frame = + + psf = + + seg = + + reg = + + regheader = + + reg_identity = + + output_dir = traits.Str(argstr="--o %s", desc="save outputs to dir", genfile=True) + + mask = + + auto_mask = + + no_reduce_fov = + + reduce_fox_eqodd = + + contrast = InputMultiPath( + File(exists=True), argstr="--C %s...", desc="contrast file" + ) + + default_seg_merge = + + merge_hypos = + + merge_cblum_wm_gyri = + + tt_reduce = + + replace = + + replace_file = + + rescale = + + no_rescale = + + scale_refval = + + ctab = + + ctab_default = + + tt_update = + + lat = + + no_tfe = + + segpvfres = + + rbv = + + rbv_res = + + mg = + + mg_ref_cerebral_wm = + + mg_ref_lobes_wm = + + mgx = + + km_ref = + + km_hb = + + ss = + + X = + + y = + + beta = + + X0 = + + save_input = + + save_eres = + + save_yhat = + + save_yhat_with_noise = + + save_yhat_full_fov = + + save_yhat0 = + + synth = + + synth_only = + + synth_save = + + save_text = + + + +class GTMPVCOutputSpec(TraitedSpec): + +class GTMPVC(FSCommand): + +class MRTMInputSpec(FSTraitedSpec): + +class MRTMOutputSpec(TraitedSpec): + +class MRTM(FSCommand): + +class MRTM2InputSpec(FSTraitedSpec): + +class MRTM2OutputSpec(TraitedSpec): + +class MRTM2(FSCommand): \ No newline at end of file From 0d0689392bbd589a3d06fd47f081aa0347d8fa68 Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Wed, 8 Sep 2021 16:30:24 +0200 Subject: [PATCH 1041/1665] Add arguments to petsurfer.py --- nipype/interfaces/freesurfer/petsurfer.py | 65 +++++++++++++++++------ 1 file changed, 50 insertions(+), 15 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index 6dc87433c0..42998163f9 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -165,17 +165,39 @@ class GTMPVCInputSpec(FSTraitedSpec): desc="input volume - source data to pvc", ) - frame = + frame = traits.Int( + argstr="--frame %i", + desc="only process 0-based frame F from inputvol" + ) - psf = + psf = traits.Float( + argstr="--psf %f", + desc="scanner PSF FWHM in mm" + ) - seg = + segmentation_file = File( + exists=True, + argstr="--seg %s", + xor=_xor_inputs, + mandatory=True, + desc="anatomical segmentation to define regions for GTM", + ) - reg = + reg_file = File( + exists=True, + xor=_reg_xor, + argstr="--reg %s", + mandatory=True, + desc="LTA registration file that maps PET to anatomical", + ) - regheader = + regheader = traits.Bool( + argstr="--regheader", desc="assume input and seg share scanner space" + ) - reg_identity = + reg_identity = traits.Bool( + argstr="--regheader", desc="assume that input is in anatomical space" + ) output_dir = traits.Str(argstr="--o %s", desc="save outputs to dir", genfile=True) @@ -183,17 +205,23 @@ class GTMPVCInputSpec(FSTraitedSpec): auto_mask = - no_reduce_fov = + no_reduce_fov = traits.Bool( + argstr="--no_reduce_fov", desc="do not reduce FoV to encompass mask" + ) - reduce_fox_eqodd = + reduce_fox_eqodd = traits.Bool( + argstr="--reduce_fox_eqodd", desc="reduce FoV to encompass mask but force nc=nr and ns to be odd" + ) contrast = InputMultiPath( File(exists=True), argstr="--C %s...", desc="contrast file" ) - default_seg_merge = + default_seg_merge = traits.Bool( + argstr="--default_seg_merge", desc="default schema for merging ROIs" + ) - merge_hypos = + merge_hypos = merge_cblum_wm_gyri = @@ -251,13 +279,21 @@ class GTMPVCInputSpec(FSTraitedSpec): save_eres = - save_yhat = + save_yhat = traits.Bool( + argstr="--save-yhat", desc="save signal estimate (yhat)" + ) - save_yhat_with_noise = + save_yhat_with_noise = traits.Bool( + argstr="--save-yhat-with-noise", desc="save signal estimate (yhat) with noise" + ) - save_yhat_full_fov = + save_yhat_full_fov = traits.Bool( + argstr="--save_yhat_full_fov", desc="save signal estimate (yhat)" + ) - save_yhat0 = + save_yhat0 = traits.Bool( + argstr="--save_yhat0", desc="save signal estimate (yhat)" + ) synth = @@ -267,7 +303,6 @@ class GTMPVCInputSpec(FSTraitedSpec): save_text = - class GTMPVCOutputSpec(TraitedSpec): From 4df2bf6ed605727eb3b0300847cee946adb3aa4c Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Wed, 8 Sep 2021 17:02:05 +0200 Subject: [PATCH 1042/1665] Update petsurfer.py --- nipype/interfaces/freesurfer/petsurfer.py | 42 ++++++++++++++++++++--- 1 file changed, 37 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index 42998163f9..ea5c3db119 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -174,7 +174,7 @@ class GTMPVCInputSpec(FSTraitedSpec): argstr="--psf %f", desc="scanner PSF FWHM in mm" ) - + _xor_inputs = ("segmentation_file", "annot", "surf_label") segmentation_file = File( exists=True, argstr="--seg %s", @@ -182,7 +182,10 @@ class GTMPVCInputSpec(FSTraitedSpec): mandatory=True, desc="anatomical segmentation to define regions for GTM", ) - + _reg_xor = ( + "reg_file", + "lta_file" + ) reg_file = File( exists=True, xor=_reg_xor, @@ -201,9 +204,16 @@ class GTMPVCInputSpec(FSTraitedSpec): output_dir = traits.Str(argstr="--o %s", desc="save outputs to dir", genfile=True) - mask = + mask_file = File( + exists=True, argstr="--mask %s", desc="ignore areas outside of the mask (in input vol space)" + ) - auto_mask = + auto_mask = traits.Tuple( + traits.Float, + traits.Float, + argstr="--auto-mask %f %f", + desc="FWHM thresh : automatically compute mask" + ) no_reduce_fov = traits.Bool( argstr="--no_reduce_fov", desc="do not reduce FoV to encompass mask" @@ -307,6 +317,22 @@ class GTMPVCInputSpec(FSTraitedSpec): class GTMPVCOutputSpec(TraitedSpec): class GTMPVC(FSCommand): + """create an anatomical segmentation for the geometric transfer matrix (GTM). + + Examples + -------- + >>> gtmpvc = GTMPVC() + >>> gtmpvc.inputs.out_file = '' + >>> gtmpvc.cmdline == 'mri_gtmpvc ' + + """ + + _cmd = "mri_gtmpvc" + input_spec = GTMPVCInputSpec + output_spec = GTMPVCOutputSpec + + def _format_arg(self, name, spec, value): + return super(GTMPVC, self)._format_arg(name, spec, value) class MRTMInputSpec(FSTraitedSpec): @@ -318,4 +344,10 @@ class MRTM2InputSpec(FSTraitedSpec): class MRTM2OutputSpec(TraitedSpec): -class MRTM2(FSCommand): \ No newline at end of file +class MRTM2(FSCommand): + +class LoganRefInputSpec(FSTraitedSpec): + +class LoganRefOutputSpec(TraitedSpec): + +class LoganRef(FSCommand): \ No newline at end of file From 552d6b9033c76b90baa044396970ac0373c98827 Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Wed, 8 Sep 2021 18:24:38 +0200 Subject: [PATCH 1043/1665] Update petsurfer.py --- nipype/interfaces/freesurfer/petsurfer.py | 73 +++++++++++++++++------ 1 file changed, 56 insertions(+), 17 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index ea5c3db119..e90ec2c71d 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -216,11 +216,11 @@ class GTMPVCInputSpec(FSTraitedSpec): ) no_reduce_fov = traits.Bool( - argstr="--no_reduce_fov", desc="do not reduce FoV to encompass mask" + argstr="--no-reduce-fov", desc="do not reduce FoV to encompass mask" ) reduce_fox_eqodd = traits.Bool( - argstr="--reduce_fox_eqodd", desc="reduce FoV to encompass mask but force nc=nr and ns to be odd" + argstr="--reduce-fox-eqodd", desc="reduce FoV to encompass mask but force nc=nr and ns to be odd" ) contrast = InputMultiPath( @@ -228,40 +228,79 @@ class GTMPVCInputSpec(FSTraitedSpec): ) default_seg_merge = traits.Bool( - argstr="--default_seg_merge", desc="default schema for merging ROIs" + argstr="--default-seg-merge", desc="default schema for merging ROIs" ) - merge_hypos = + merge_hypos = traits.Bool( + argstr="--merge-hypos", desc="merge left and right hypointensites into to ROI" + ) - merge_cblum_wm_gyri = + merge_cblum_wm_gyri = traits.Bool( + argstr="--merge-cblum-wm-gyri", desc="cerebellum WM gyri back into cerebellum WM" + ) - tt_reduce = + tt_reduce = traits.Bool( + argstr="--tt-reduce", desc="reduce segmentation to that of a tissue type" + ) - replace = + replace = traits.Tuple( + traits.Int, + traits.Int, + argstr="--replace %i %i", + desc="Id1 Id2 : replace seg Id1 with seg Id2" replace_file = rescale = - no_rescale = + no_rescale = traits.Bool( + argstr="--no-rescale", desc="do not global rescale such that mean of reference region is scaleref" + ) - scale_refval = + scale_refval = traits.Float( + argstr="--scale-refval %f", + desc="refval : scale such that mean in reference region is refval" + ) - ctab = + _ctab_inputs = ("color_table_file", "default_color_table", "gca_color_table") + color_table_file = File( + exists=True, + argstr="--ctab %s", + xor=_ctab_inputs, + desc="color table file with seg id names", + ) - ctab_default = + default_color_table = traits.Bool( + argstr="--ctab-default", + xor=_ctab_inputs, + desc="use $FREESURFER_HOME/FreeSurferColorLUT.txt", + ) - tt_update = + tt_update = traits.Bool( + argstr="--tt-update", desc="changes tissue type of VentralDC, BrainStem, and Pons to be SubcortGM" + ) - lat = + lat = traits.Bool( + argstr="--lat", desc="lateralize tissue types" + ) - no_tfe = + no_tfe = traits.Bool( + argstr="--no-tfe", desc="do not correction for tissue fraction effect (with --psf 0 turns off PVC entirely)" + ) - segpvfres = + segpvfres = traits.Float( + argstr="--segpvfres %f", + desc="set the tissue fraction resolution parameter (def is 0.5)" + ) - rbv = + rbv = traits.Bool( + argstr="--rbv", desc="perform RBV PVC" + ) - rbv_res = + rbv_res = traits.Float( + argstr="--rbv-res %f", + desc="voxsize : set RBV voxel resolution (good for when standard res takes too much memory)" + ) mg = From 7fc4887e1c5df52b21fd9641130db99384038ba4 Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Wed, 8 Sep 2021 21:47:13 +0200 Subject: [PATCH 1044/1665] Update petsurfer.py --- nipype/interfaces/freesurfer/petsurfer.py | 82 ++++++++++++++++------- 1 file changed, 56 insertions(+), 26 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index e90ec2c71d..02205d5aa6 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -248,10 +248,11 @@ class GTMPVCInputSpec(FSTraitedSpec): traits.Int, argstr="--replace %i %i", desc="Id1 Id2 : replace seg Id1 with seg Id2" + ) - replace_file = - - rescale = + rescale = traits.List( + argstr="--rescale %s...", desc="Id1 : specify reference region(s) used to rescale (default is pons)" + ) no_rescale = traits.Bool( argstr="--no-rescale", desc="do not global rescale such that mean of reference region is scaleref" @@ -302,37 +303,75 @@ class GTMPVCInputSpec(FSTraitedSpec): desc="voxsize : set RBV voxel resolution (good for when standard res takes too much memory)" ) - mg = + mg = traits.List( + argstr="--id %s...", desc="Manually specify segmentation ids" + ) - mg_ref_cerebral_wm = + mg_ref_cerebral_wm = traits.Bool( + argstr="--mg-ref-cerebral-wm", desc=" set MG RefIds to 2 and 41" + ) - mg_ref_lobes_wm = + mg_ref_lobes_wm = traits.Bool( + argstr="--mg-ref-lobes-wm", desc="set MG RefIds to those for lobes when using wm subseg" + ) - mgx = + mgx = traits.Float( + argstr="--mgx %f", + desc="gmxthresh : GLM-based Mueller-Gaertner PVC, gmxthresh is min gm pvf bet 0 and 1" + ) - km_ref = + km_ref = traits.List( + argstr="--km-ref %s...", desc="RefId1 RefId2 ... : compute reference TAC for KM as mean of given RefIds" + ) - km_hb = + km_hb = traits.List( + argstr="--km-hb %s...", desc="RefId1 RefId2 ... : compute HiBinding TAC for KM as mean of given RefIds" + ) - ss = + ss = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr="--ss %f %f %f", + desc="bpc scale dcf : steady-state analysis spec blood plasma concentration, unit scale and decay correction factor. You must also spec --km-ref. Turns off rescaling" + ) - X = + X = traits.Bool( + argstr="--X", desc="save X matrix in matlab4 format as X.mat (it will be big)" + ) - y = + y = traits.Bool( + argstr="--y", desc="save y matrix in matlab4 format as y.mat" + ) - beta = + beta = traits.Bool( + argstr="--beta", desc="save beta matrix in matlab4 format as beta.mat" + ) - X0 = + X0 = traits.Bool( + argstr="--X0", desc="save X0 matrix in matlab4 format as X0.mat (it will be big)" + ) - save_input = + save_input = traits.Bool( + argstr="--save-input", desc="saves rescaled input as input.rescaled.nii.gz" + ) - save_eres = + save_eres = traits.Bool( + argstr="--save-eres", desc="saves residual error" + ) save_yhat = traits.Bool( argstr="--save-yhat", desc="save signal estimate (yhat)" ) - save_yhat_with_noise = traits.Bool( + save_yhat_with_noise = traits.Tuple( + traits.Int, + traits.Int, + argstr="--ss %i %i", + desc="seed nreps : saves yhat with noise, seed < 0 for TOD" + ) + + traits.Bool( argstr="--save-yhat-with-noise", desc="save signal estimate (yhat) with noise" ) @@ -344,15 +383,6 @@ class GTMPVCInputSpec(FSTraitedSpec): argstr="--save_yhat0", desc="save signal estimate (yhat)" ) - synth = - - synth_only = - - synth_save = - - save_text = - - class GTMPVCOutputSpec(TraitedSpec): class GTMPVC(FSCommand): From bf37e28dcb5201de7fa052b5701d6947fe7f65ce Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Wed, 8 Sep 2021 21:50:37 +0200 Subject: [PATCH 1045/1665] Update petsurfer.py --- nipype/interfaces/freesurfer/petsurfer.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index 02205d5aa6..f92d46c873 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -384,6 +384,7 @@ class GTMPVCInputSpec(FSTraitedSpec): ) class GTMPVCOutputSpec(TraitedSpec): + out_file = File(exists=True, desc="PVC correction") class GTMPVC(FSCommand): """create an anatomical segmentation for the geometric transfer matrix (GTM). @@ -403,20 +404,20 @@ class GTMPVC(FSCommand): def _format_arg(self, name, spec, value): return super(GTMPVC, self)._format_arg(name, spec, value) -class MRTMInputSpec(FSTraitedSpec): +#class MRTMInputSpec(FSTraitedSpec): -class MRTMOutputSpec(TraitedSpec): +#class MRTMOutputSpec(TraitedSpec): -class MRTM(FSCommand): +#class MRTM(FSCommand): -class MRTM2InputSpec(FSTraitedSpec): +#class MRTM2InputSpec(FSTraitedSpec): -class MRTM2OutputSpec(TraitedSpec): +#class MRTM2OutputSpec(TraitedSpec): -class MRTM2(FSCommand): +#class MRTM2(FSCommand): -class LoganRefInputSpec(FSTraitedSpec): +#class LoganRefInputSpec(FSTraitedSpec): -class LoganRefOutputSpec(TraitedSpec): +#class LoganRefOutputSpec(TraitedSpec): -class LoganRef(FSCommand): \ No newline at end of file +#class LoganRef(FSCommand): \ No newline at end of file From f6e58edb1907f2d8702e2eda29c5bf4a8905d1b8 Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Wed, 8 Sep 2021 21:57:58 +0200 Subject: [PATCH 1046/1665] Update __init__.py --- nipype/interfaces/freesurfer/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nipype/interfaces/freesurfer/__init__.py b/nipype/interfaces/freesurfer/__init__.py index 705cf895e4..4efa90039a 100644 --- a/nipype/interfaces/freesurfer/__init__.py +++ b/nipype/interfaces/freesurfer/__init__.py @@ -93,3 +93,7 @@ Paint, MRICoreg, ) +from .petsurfer import ( + GTMSeg, + GTMPVC, +) From 8c9b00e310b1f13c5dab89f1d1124796d81fa0fe Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Wed, 8 Sep 2021 22:17:36 +0200 Subject: [PATCH 1047/1665] Update petsurfer.py --- nipype/interfaces/freesurfer/petsurfer.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index f92d46c873..b710a1d597 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -174,13 +174,9 @@ class GTMPVCInputSpec(FSTraitedSpec): argstr="--psf %f", desc="scanner PSF FWHM in mm" ) - _xor_inputs = ("segmentation_file", "annot", "surf_label") - segmentation_file = File( - exists=True, - argstr="--seg %s", - xor=_xor_inputs, - mandatory=True, - desc="anatomical segmentation to define regions for GTM", + + segmentation = File( + argstr="-seg %s", exists=True, desc="segfile : anatomical segmentation to define regions for GTM" ) _reg_xor = ( "reg_file", From 32ce6ecb4c9b6681e4e89ed385f6bd4fa81dcfd8 Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Wed, 8 Sep 2021 22:18:36 +0200 Subject: [PATCH 1048/1665] Update petsurfer.py --- nipype/interfaces/freesurfer/petsurfer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index b710a1d597..2edebc11d2 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -176,7 +176,7 @@ class GTMPVCInputSpec(FSTraitedSpec): ) segmentation = File( - argstr="-seg %s", exists=True, desc="segfile : anatomical segmentation to define regions for GTM" + argstr="-seg %s", exists=True, mandatory=True, desc="segfile : anatomical segmentation to define regions for GTM" ) _reg_xor = ( "reg_file", From 566c4c6cd9e86c2cdc2523406cfea8e739388fc9 Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Thu, 9 Sep 2021 12:55:41 +0200 Subject: [PATCH 1049/1665] Update petsurfer.py --- nipype/interfaces/freesurfer/petsurfer.py | 90 ++++++++++++++++++++--- 1 file changed, 81 insertions(+), 9 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index 2edebc11d2..fdceb6074b 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -4,6 +4,7 @@ """Provides interfaces to various commands for running PET analyses provided by FreeSurfer """ +from nipype.interfaces.freesurfer.model import GLMFitInputSpec import os import os.path as op from glob import glob @@ -30,6 +31,8 @@ from .base import FSCommand, FSTraitedSpec, FSTraitedSpecOpenMP, FSCommandOpenMP, Info from .utils import copy2subjdir +from .model import GLMFitInputSpec, GLMFitInputSpec, GLMFit + __docformat__ = "restructuredtext" iflogger = logging.getLogger("nipype.interface") @@ -400,20 +403,89 @@ class GTMPVC(FSCommand): def _format_arg(self, name, spec, value): return super(GTMPVC, self)._format_arg(name, spec, value) -#class MRTMInputSpec(FSTraitedSpec): +class MRTMInputSpec(GLMFitInputSpec): + + mrtm1 = InputMultiPath( + traits.Tuple(File(exists=True, mandatory=True), File(exists=True, mandatory=True)), + argstr="--mrtm1 %s %s...", + desc="RefTac TimeSec : perform MRTM1 kinetic modeling", + ) + +class MRTMOutputSpec(GLMFitInputSpec): + + k2p = File(desc="estimate of k2p parameter") + +class MRTM(GLMFit): + """Perform MRTM1 kinetic modeling. + Examples + -------- + >>> mrtm = MRTM() + >>> mrtm.inputs.in_file = 'tac.nii' + >>> gtmseg.inputs.mrtm = ('ref_tac.dat', 'timing.dat') + >>> mrtm.inputs.glmdir = 'mrtm' + >>> mrtm.cmdline == 'mri_glmfit --glmdir mrtm --y tac.nii --mrtm1 ref_tac.dat timing.dat' + """ + + _cmd = "mri_glmfit" + input_spec = MRTMInputSpec + output_spec = MRTMOutputSpec -#class MRTMOutputSpec(TraitedSpec): + def _list_outputs(self): + outputs = self.output_spec().get() + return outputs -#class MRTM(FSCommand): +class MRTM2InputSpec(GLMFitInputSpec): + + mrtm2 = InputMultiPath( + traits.Tuple(File(exists=True), File(exists=True), traits.Float), + mandatory=True, + argstr="--mrtm2 %s %s %f...", + desc="RefTac TimeSec k2prime : perform MRTM2 kinetic modeling", + ) + + _ext_xor = ['nii', 'nii_gz'] + nii = traits.Bool( + argstr='--nii', + desc='save outputs as nii', + xor=_ext_xor + ) + nii_gz = traits.Bool( + argstr='--nii.gz', + desc='save outputs as nii.gz', + xor=_ext_xor + ) + +class MRTM2OutputSpec(GLMFitInputSpec): + bp = File(desc="BP estimates") -#class MRTM2InputSpec(FSTraitedSpec): +class MRTM2(GLMFit): + """Perform MRTM2 kinetic modeling. + Examples + -------- + >>> mrtm = MRTM() + >>> mrtm.inputs.in_file = 'tac.nii' + >>> gtmseg.inputs.mrtm = ('ref_tac.dat', 'timing.dat', 'k2prime.dat') + >>> mrtm.inputs.glmdir = 'mrtm2' + >>> mrtm2.cmdline == 'mri_glmfit --glmdir mrtm2 --y tac.nii --mrtm2 ref_tac.dat timing.dat k2prime.dat' + """ -#class MRTM2OutputSpec(TraitedSpec): + _cmd = "mri_glmfit" + input_spec = MRTM2InputSpec + output_spec = MRTM2OutputSpec -#class MRTM2(FSCommand): + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.nii_gz): + ext = '.nii.gz' + if isdefined(self.inputs.nii): + ext = '.nii' + else: + ext = '.mgh' + outputs['bp'] = join(self.inputs.glm_dir, 'bp' + ext) + return outputs -#class LoganRefInputSpec(FSTraitedSpec): +class LoganRefInputSpec(FSTraitedSpec): -#class LoganRefOutputSpec(TraitedSpec): +class LoganRefOutputSpec(TraitedSpec): -#class LoganRef(FSCommand): \ No newline at end of file +class LoganRef(FSCommand): \ No newline at end of file From 06d22796138658081dfa50aa317a0ef096e141dc Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Thu, 9 Sep 2021 13:00:40 +0200 Subject: [PATCH 1050/1665] Update petsurfer.py --- nipype/interfaces/freesurfer/petsurfer.py | 41 ++++++++++++++++++++--- 1 file changed, 37 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index fdceb6074b..3c81d8aa61 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -481,11 +481,44 @@ def _list_outputs(self): ext = '.nii' else: ext = '.mgh' - outputs['bp'] = join(self.inputs.glm_dir, 'bp' + ext) + outputs['bp'] = os.join(self.inputs.glm_dir, 'bp' + ext) return outputs -class LoganRefInputSpec(FSTraitedSpec): +class LoganRefInputSpec(GLMFitInputSpec): + logan = InputMultiPath( + traits.Tuple(File(exists=True), File(exists=True), traits.Float), + mandatory=True, + argstr="--logan %s %s %f...", + desc="RefTac TimeSec tstar : perform Logan kinetic modeling", + ) -class LoganRefOutputSpec(TraitedSpec): + _ext_xor = ['nii', 'nii_gz'] + nii = traits.Bool( + argstr='--nii', + desc='save outputs as nii', + xor=_ext_xor + ) + nii_gz = traits.Bool( + argstr='--nii.gz', + desc='save outputs as nii.gz', + xor=_ext_xor + ) + +class LoganRefOutputSpec(GLMFitInputSpec): + vd = File(desc="BP estimates") + +class LoganRef(GLMFit): + _cmd = "mri_glmfit" + input_spec = LoganRefInputSpec + output_spec = LoganRefOutputSpec -class LoganRef(FSCommand): \ No newline at end of file + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.nii_gz): + ext = '.nii.gz' + if isdefined(self.inputs.nii): + ext = '.nii' + else: + ext = '.mgh' + outputs['vd'] = os.join(self.inputs.glm_dir, 'vd' + ext) + return outputs \ No newline at end of file From 0fa255e1cd2d1f25f750036b3581bfc527540e9f Mon Sep 17 00:00:00 2001 From: 0rC0 Date: Sun, 12 Sep 2021 19:40:08 +0200 Subject: [PATCH 1051/1665] CAT12SANLMDenoising_1st_commit --- nipype/interfaces/cat12/__init__.py | 5 +- nipype/interfaces/cat12/preprocess.py | 136 ++++++++++++++++++++++++++ 2 files changed, 140 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/cat12/__init__.py b/nipype/interfaces/cat12/__init__.py index 99498dc922..3d83da434d 100644 --- a/nipype/interfaces/cat12/__init__.py +++ b/nipype/interfaces/cat12/__init__.py @@ -1,4 +1,7 @@ -from .preprocess import CAT12Segment +from .preprocess import ( + CAT12Segment, + CAT12SANLMDenoising +) from .surface import ( ExtractAdditionalSurfaceParameters, ExtractROIBasedSurfaceMeasures, diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 5a73f42443..9d164ff1a6 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -593,6 +593,142 @@ def _list_outputs(self): return outputs +class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): + + in_files = InputMultiPath( + ImageFileSPM(exists=True), + field="data", + desc="Images for filtering.", + mandatory=True, + copyfile=False, + ) + + spm_type = traits.Enum( + 16, + 0, + 2, + 512, + field='spm_type', + usedefault=True, + desc='Data type of the output images. 0 = same, 2 = uint8, 512 = uint16, 16 = single (32 bit)' + + ) + + intlim = traits.Int( + field='intlim', + default_value=100, + usedefault=True, + ) + + filename_prefix = traits.Str( + field='prefix', + default_value='sanlm_', + usedefault=True, + desc='Filename prefix. Specify the string to be prepended to the filenames of the filtered image file(s). Default prefix is "samlm_".', + ) + + filename_suffix= traits.Str( + field='suffix', + default_value='', + usedefault=True, + desc='Filename suffix. Specify the string to be appended to the filenames of the filtered image file(s). Default suffix is "".' + ) + + addnoise = traits.Float(default_value=0.5, + usedefault=True, + field='addnoise', + desc='Strength of additional noise in noise-free regions. Add minimal amount of noise in regions without any noise to avoid image segmentation problems. This parameter defines the strength of additional noise as percentage of the average signal intensity.') + + rician = traits.Enum( + 0, + 1, + field='rician', + usedefault=True, + desc='''Rician noise + MRIs can have Gaussian or Rician distributed noise with uniform or nonuniform variance across the image. If SNR is high enough + (>3) noise can be well approximated by Gaussian noise in the foreground. However, for SENSE reconstruction or DTI data a Rician + distribution is expected. Please note that the Rician noise estimation is sensitive for large signals in the neighbourhood and can lead to + artefacts, e.g. cortex can be affected by very high values in the scalp or in blood vessels.''') + + replaceNANandINF = traits.Enum( + 1, + 0, + field='replaceNANandINF', + usedefault=True, + desc='Replace NAN by 0, -INF by the minimum and INF by the maximum of the image.' + ) + + NCstr = traits.Enum( + '-Inf', + 2, + 4, + field='nlmfilter.optimized.NCstr', + usedefault=True, + desc='''Strength of Noise Corrections + Strength of the (sub-resolution) spatial adaptive non local means (SANLM) noise correction. Please note that the filter strength is + automatically estimated. Change this parameter only for specific conditions. The "light" option applies half of the filter strength of the + adaptive "medium" cases, whereas the "strong" option uses the full filter strength, force sub-resolution filtering and applies an + additional iteration. Sub-resolution filtering is only used in case of high image resolution below 0.8 mm or in case of the "strong" + option. light = 2, medium = -Inf, strong = 4''' + ) + + +class CAT12SANLMDenoisingOutputSpec(TraitedSpec): + + out_file = File(desc='out file') + + +class CAT12SANLMDenoising(SPMCommand): + """ + Spatially adaptive non-local means (SANLM) denoising filter + + This function applies an spatial adaptive (sub-resolution) non-local means denoising filter + to the data. This filter will remove noise while preserving edges. The filter strength is + automatically estimated based on the standard deviation of the noise. + + This filter is internally used in the segmentation procedure anyway. Thus, it is not + necessary (and not recommended) to apply the filter before segmentation. + + + Example: + ======= + from nipype.interfaces import cat12 + c = cat12.CAT12SANLMDenoising() + c.inputs.in_files='sub-test_FLAIR.nii' + c.run() + """ + + input_spec = CAT12SANLMDenoisingInputSpec + output_spec = CAT12SANLMDenoisingOutputSpec + + def __init__(self, **inputs): + _local_version = SPMCommand().version + if _local_version and "12." in _local_version: + self._jobtype = "tools" + self._jobname = "cat.tools.sanlm" + + SPMCommand.__init__(self, **inputs) + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm""" + if opt == "in_files": + if isinstance(val, list): + return scans_for_fnames(val) + else: + return scans_for_fname(val) + + return super(CAT12SANLMDenoising, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + pth, base, ext = split_filename(self.inputs.in_files[0]) + outputs['out_file'] = os.path.join(os.getcwd(), self.inputs.filename_prefix + + base + + self.inputs.filename_suffix + + ext) + return outputs + + class Cell2Str(Cell): def __str__(self): """Convert input to appropriate format for cat12""" From 76491e75de20be90985ec0b4fd760f482052c532 Mon Sep 17 00:00:00 2001 From: 0rC0 Date: Sun, 12 Sep 2021 20:25:46 +0200 Subject: [PATCH 1052/1665] example --- nipype/interfaces/cat12/preprocess.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 9d164ff1a6..62fe60d8fe 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -690,12 +690,12 @@ class CAT12SANLMDenoising(SPMCommand): necessary (and not recommended) to apply the filter before segmentation. - Example: - ======= - from nipype.interfaces import cat12 - c = cat12.CAT12SANLMDenoising() - c.inputs.in_files='sub-test_FLAIR.nii' - c.run() + Examples + -------- + >>> from nipype.interfaces import cat12 + >>> c = cat12.CAT12SANLMDenoising() + >>> c.inputs.in_files='sub-test_FLAIR.nii' + >>> c.run() """ input_spec = CAT12SANLMDenoisingInputSpec From af6b6270d813f647beaa7e6f95a1ac5e0414c466 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 22 Sep 2021 10:10:16 +0200 Subject: [PATCH 1053/1665] ENH: ``verbose`` input should not be hashed in ``ants.Registration`` --- nipype/interfaces/ants/registration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/ants/registration.py b/nipype/interfaces/ants/registration.py index 049d5c5882..7a150a81d5 100644 --- a/nipype/interfaces/ants/registration.py +++ b/nipype/interfaces/ants/registration.py @@ -584,7 +584,7 @@ class RegistrationInputSpec(ANTSCommandInputSpec): desc="The Lower quantile to clip image ranges", ) - verbose = traits.Bool(argstr="-v", default_value=False, usedefault=True) + verbose = traits.Bool(argstr="-v", default_value=False, usedefault=True, nohash=True) class RegistrationOutputSpec(TraitedSpec): From 91f975339b996915bb7e07f9a7f069a912e62dcb Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Thu, 23 Sep 2021 12:46:26 +0200 Subject: [PATCH 1054/1665] fix: update unittest oracle and autotest metadata --- nipype/interfaces/ants/tests/test_auto_Registration.py | 1 + nipype/interfaces/base/tests/test_core.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/ants/tests/test_auto_Registration.py b/nipype/interfaces/ants/tests/test_auto_Registration.py index 7bc7c04a15..507e0effe2 100644 --- a/nipype/interfaces/ants/tests/test_auto_Registration.py +++ b/nipype/interfaces/ants/tests/test_auto_Registration.py @@ -154,6 +154,7 @@ def test_Registration_inputs(): ), verbose=dict( argstr="-v", + nohash=True, usedefault=True, ), winsorize_lower_quantile=dict( diff --git a/nipype/interfaces/base/tests/test_core.py b/nipype/interfaces/base/tests/test_core.py index 6b587554fa..cdfef51193 100644 --- a/nipype/interfaces/base/tests/test_core.py +++ b/nipype/interfaces/base/tests/test_core.py @@ -157,7 +157,7 @@ def __init__(self, **inputs): assert {} == check_dict(data_dict, tsthash2.inputs.get_traitsfree()) _, hashvalue = tsthash.inputs.get_hashval(hash_method="timestamp") - assert "8562a5623562a871115eb14822ee8d02" == hashvalue + assert hashvalue == "e35bf07fea8049cc02de9235f85e8903" class MinVerInputSpec(nib.TraitedSpec): From f69b3fb09560616822737764bb07272cd587e4a0 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 23 Sep 2021 08:56:28 -0400 Subject: [PATCH 1055/1665] STY: black --- nipype/interfaces/ants/registration.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/ants/registration.py b/nipype/interfaces/ants/registration.py index 7a150a81d5..87c2848be3 100644 --- a/nipype/interfaces/ants/registration.py +++ b/nipype/interfaces/ants/registration.py @@ -584,7 +584,9 @@ class RegistrationInputSpec(ANTSCommandInputSpec): desc="The Lower quantile to clip image ranges", ) - verbose = traits.Bool(argstr="-v", default_value=False, usedefault=True, nohash=True) + verbose = traits.Bool( + argstr="-v", default_value=False, usedefault=True, nohash=True + ) class RegistrationOutputSpec(TraitedSpec): From 7244d9c9c1b599e4adb1e70515f5f3c92cd8e4f4 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Tue, 22 Jun 2021 18:03:22 +0200 Subject: [PATCH 1056/1665] ENH: Simplify interface execution of ``Node`` --- nipype/interfaces/base/core.py | 4 +- nipype/interfaces/base/support.py | 11 ++++ nipype/pipeline/engine/nodes.py | 95 ++++++++++++------------------- 3 files changed, 48 insertions(+), 62 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index e329b9e257..65b35cadce 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -723,6 +723,7 @@ def _run_interface(self, runtime, correct_return_codes=(0,)): runtime.stderr = None runtime.cmdline = self.cmdline runtime.environ.update(out_environ) + runtime.success_codes = correct_return_codes # which $cmd executable_name = shlex.split(self._cmd_prefix + self.cmd)[0] @@ -742,9 +743,6 @@ def _run_interface(self, runtime, correct_return_codes=(0,)): else "" ) runtime = run_command(runtime, output=self.terminal_output) - if runtime.returncode is None or runtime.returncode not in correct_return_codes: - self.raise_exception(runtime) - return runtime def _format_arg(self, name, trait_spec, value): diff --git a/nipype/interfaces/base/support.py b/nipype/interfaces/base/support.py index 58b384d75d..f00ecefed0 100644 --- a/nipype/interfaces/base/support.py +++ b/nipype/interfaces/base/support.py @@ -110,6 +110,17 @@ def __exit__(self, exc_type, exc_value, exc_tb): if self._ignore_exc: return True + _exitcode = ( + getattr(self._runtime, "returncode", None) + if getattr(self._runtime, "cmdline", None) + else 0 + ) + _success_codes = getattr(self._runtime, "success_codes", (0,)) + if _exitcode not in _success_codes: + self._runtime.traceback = ( + f"RuntimeError: subprocess exited with code {self._runtime.returncode}." + ) + @property def runtime(self): return self._runtime diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index b458b3f820..6f856b887a 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -9,6 +9,7 @@ import os import os.path as op +from pathlib import Path import shutil import socket from copy import deepcopy @@ -30,7 +31,6 @@ load_json, emptydirs, savepkl, - indirectory, silentrm, ) @@ -98,7 +98,7 @@ def __init__( run_without_submitting=False, n_procs=None, mem_gb=0.20, - **kwargs + **kwargs, ): """ Parameters @@ -697,72 +697,44 @@ def _run_command(self, execute, copyfiles=True): ) return result - outdir = self.output_dir() - # Run command: either execute is true or load_results failed. - result = InterfaceResult( - interface=self._interface.__class__, - runtime=Bunch( - cwd=outdir, - returncode=1, - environ=dict(os.environ), - hostname=socket.gethostname(), - ), - inputs=self._interface.inputs.get_traitsfree(), - ) - + outdir = Path(self.output_dir()) if copyfiles: self._originputs = deepcopy(self._interface.inputs) self._copyfiles_to_wd(execute=execute) - message = '[Node] Running "{}" ("{}.{}")'.format( - self.name, self._interface.__module__, self._interface.__class__.__name__ + # Run command: either execute is true or load_results failed. + logger.info( + f'[Node] Executing "{self.name}" <{self._interface.__module__}' + f".{self._interface.__class__.__name__}>" + ) + # Invoke core run method of the interface ignoring exceptions + result = self._interface.run(cwd=outdir, ignore_exception=True) + logger.info( + f'[Node] Finished "{self.name}", elapsed time {result.runtime.duration}s.' ) + if issubclass(self._interface.__class__, CommandLine): - try: - with indirectory(outdir): - cmd = self._interface.cmdline - except Exception as msg: - result.runtime.stderr = "{}\n\n{}".format( - getattr(result.runtime, "stderr", ""), msg - ) - _save_resultfile( - result, - outdir, - self.name, - rebase=str2bool(self.config["execution"]["use_relative_paths"]), - ) - raise - cmdfile = op.join(outdir, "command.txt") - with open(cmdfile, "wt") as fd: - print(cmd + "\n", file=fd) - message += ", a CommandLine Interface with command:\n{}".format(cmd) - logger.info(message) - try: - result = self._interface.run(cwd=outdir) - except Exception as msg: - result.runtime.stderr = "%s\n\n%s".format( - getattr(result.runtime, "stderr", ""), msg - ) - _save_resultfile( - result, + # Write out command line as it happened + (outdir / "command.txt").write_text(f"{result.runtime.cmdline}\n") + + exc_tb = getattr(result, "traceback", None) + + if not exc_tb: + # Clean working directory if no errors + dirs2keep = None + if isinstance(self, MapNode): + dirs2keep = [op.join(outdir, "mapflow")] + + result.outputs = clean_working_directory( + result.outputs, outdir, - self.name, - rebase=str2bool(self.config["execution"]["use_relative_paths"]), + self._interface.inputs, + self.needed_outputs, + self.config, + dirs2keep=dirs2keep, ) - raise - - dirs2keep = None - if isinstance(self, MapNode): - dirs2keep = [op.join(outdir, "mapflow")] - result.outputs = clean_working_directory( - result.outputs, - outdir, - self._interface.inputs, - self.needed_outputs, - self.config, - dirs2keep=dirs2keep, - ) + # Store results file under all circumstances _save_resultfile( result, outdir, @@ -770,6 +742,11 @@ def _run_command(self, execute, copyfiles=True): rebase=str2bool(self.config["execution"]["use_relative_paths"]), ) + if exc_tb: + raise RuntimeError( + f"Exception raised while executing Node {self.name}.\n\n{result.runtime.traceback}" + ) + return result def _copyfiles_to_wd(self, execute=True, linksonly=False): From 58c90c4424cd1c7fc8991bb8920a0dbe5078d71f Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Thu, 24 Jun 2021 17:07:27 +0200 Subject: [PATCH 1057/1665] enh: better error handling --- .../interfaces/utility/tests/test_wrappers.py | 2 +- nipype/pipeline/engine/nodes.py | 12 ++++++++---- nipype/pipeline/engine/tests/test_utils.py | 2 +- nipype/pipeline/plugins/base.py | 12 +++++++++++- nipype/pipeline/plugins/linear.py | 17 ++++++++++------- nipype/pipeline/plugins/tests/test_sgelike.py | 9 ++++++--- nipype/pipeline/plugins/tools.py | 3 --- 7 files changed, 37 insertions(+), 20 deletions(-) diff --git a/nipype/interfaces/utility/tests/test_wrappers.py b/nipype/interfaces/utility/tests/test_wrappers.py index 76413e5760..fda81b2f5b 100644 --- a/nipype/interfaces/utility/tests/test_wrappers.py +++ b/nipype/interfaces/utility/tests/test_wrappers.py @@ -73,7 +73,7 @@ def should_fail(tmp): def test_should_fail(tmpdir): - with pytest.raises(NameError): + with pytest.raises(pe.nodes.NodeExecutionError): should_fail(tmpdir) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 6f856b887a..378f88957c 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -64,6 +64,10 @@ logger = logging.getLogger("nipype.workflow") +class NodeExecutionError(RuntimeError): + """A nipype-specific name for exceptions when executing a Node.""" + + class Node(EngineBase): """ Wraps interface objects for use in pipeline @@ -582,7 +586,7 @@ def _get_inputs(self): logger.critical("%s", e) if outputs is None: - raise RuntimeError( + raise NodeExecutionError( """\ Error populating the inputs of node "%s": the results file of the source node \ (%s) does not contain any outputs.""" @@ -717,7 +721,7 @@ def _run_command(self, execute, copyfiles=True): # Write out command line as it happened (outdir / "command.txt").write_text(f"{result.runtime.cmdline}\n") - exc_tb = getattr(result, "traceback", None) + exc_tb = getattr(result.runtime, "traceback", None) if not exc_tb: # Clean working directory if no errors @@ -743,7 +747,7 @@ def _run_command(self, execute, copyfiles=True): ) if exc_tb: - raise RuntimeError( + raise NodeExecutionError( f"Exception raised while executing Node {self.name}.\n\n{result.runtime.traceback}" ) @@ -1267,7 +1271,7 @@ def _collate_results(self, nodes): if code is not None: msg += ["Subnode %d failed" % i] msg += ["Error: %s" % str(code)] - raise Exception( + raise NodeExecutionError( "Subnodes of node: %s failed:\n%s" % (self.name, "\n".join(msg)) ) diff --git a/nipype/pipeline/engine/tests/test_utils.py b/nipype/pipeline/engine/tests/test_utils.py index 0705f0ad53..07b01bd3ba 100644 --- a/nipype/pipeline/engine/tests/test_utils.py +++ b/nipype/pipeline/engine/tests/test_utils.py @@ -183,7 +183,7 @@ def test_mapnode_crash(tmpdir): node.config = deepcopy(config._sections) node.config["execution"]["stop_on_first_crash"] = True node.base_dir = tmpdir.strpath - with pytest.raises(TypeError): + with pytest.raises(pe.nodes.NodeExecutionError): node.run() os.chdir(cwd) diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index a33a40aab6..a68ac6957f 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -123,6 +123,7 @@ def run(self, graph, config, updatehash=False): self.mapnodesubids = {} # setup polling - TODO: change to threaded model notrun = [] + errors = [] old_progress_stats = None old_presub_stats = None @@ -155,14 +156,16 @@ def run(self, graph, config, updatehash=False): taskid, jobid = self.pending_tasks.pop() try: result = self._get_result(taskid) - except Exception: + except Exception as exc: notrun.append(self._clean_queue(jobid, graph)) + errors.append(exc) else: if result: if result["traceback"]: notrun.append( self._clean_queue(jobid, graph, result=result) ) + errors.append("".join(result["traceback"])) else: self._task_finished_cb(jobid) self._remove_node_dirs() @@ -194,6 +197,13 @@ def run(self, graph, config, updatehash=False): # close any open resources self._postrun_check() + if errors: + # If one or more nodes failed, re-rise first of them + if isinstance(errors[0], str): + raise RuntimeError(errors[0]) + + raise errors[0] + def _get_result(self, taskid): raise NotImplementedError diff --git a/nipype/pipeline/plugins/linear.py b/nipype/pipeline/plugins/linear.py index 8471a187d3..196e40cbd2 100644 --- a/nipype/pipeline/plugins/linear.py +++ b/nipype/pipeline/plugins/linear.py @@ -34,6 +34,8 @@ def run(self, graph, config, updatehash=False): old_wd = os.getcwd() notrun = [] donotrun = [] + stop_on_first_crash = str2bool(config["execution"]["stop_on_first_crash"]) + errors = [] nodes, _ = topological_sort(graph) for node in nodes: endstatus = "end" @@ -43,13 +45,11 @@ def run(self, graph, config, updatehash=False): if self._status_callback: self._status_callback(node, "start") node.run(updatehash=updatehash) - except: + except Exception as exc: endstatus = "exception" # bare except, but i really don't know where a # node might fail crashfile = report_crash(node) - if str2bool(config["execution"]["stop_on_first_crash"]): - raise # remove dependencies from queue subnodes = [s for s in dfs_preorder(graph, node)] notrun.append( @@ -57,13 +57,16 @@ def run(self, graph, config, updatehash=False): ) donotrun.extend(subnodes) # Delay raising the crash until we cleaned the house - if str2bool(config["execution"]["stop_on_first_crash"]): - os.chdir(old_wd) # Return wherever we were before - report_nodes_not_run(notrun) # report before raising - raise + errors.append(exc) + + if stop_on_first_crash: + break finally: if self._status_callback: self._status_callback(node, endstatus) os.chdir(old_wd) # Return wherever we were before report_nodes_not_run(notrun) + if errors: + # Re-raise exception of first failed node + raise errors[0] diff --git a/nipype/pipeline/plugins/tests/test_sgelike.py b/nipype/pipeline/plugins/tests/test_sgelike.py index 9c7cdc1412..140150c9b0 100644 --- a/nipype/pipeline/plugins/tests/test_sgelike.py +++ b/nipype/pipeline/plugins/tests/test_sgelike.py @@ -29,7 +29,10 @@ def test_crashfile_creation(tmp_path): sgelike_plugin = SGELikeBatchManagerBase("") with pytest.raises(RuntimeError) as e: assert pipe.run(plugin=sgelike_plugin) - assert str(e.value) == "Workflow did not execute cleanly. Check log for details" + assert str(e.value) == "Workflow did not execute cleanly. Check log for details" - crashfiles = tmp_path.glob("crash*crasher*.pklz") - assert len(list(crashfiles)) == 1 + crashfiles = ( + list(tmp_path.glob("crash*crasher*.pklz")) + + list(tmp_path.glob("crash*crasher*.txt")) + ) + assert len(crashfiles) == 1 diff --git a/nipype/pipeline/plugins/tools.py b/nipype/pipeline/plugins/tools.py index b816e61463..86fdf67ac6 100644 --- a/nipype/pipeline/plugins/tools.py +++ b/nipype/pipeline/plugins/tools.py @@ -93,9 +93,6 @@ def report_nodes_not_run(notrun): for subnode in info["dependents"]: logger.debug(subnode._id) logger.info("***********************************") - raise RuntimeError( - ("Workflow did not execute cleanly. " "Check log for details") - ) def create_pyscript(node, updatehash=False, store_exception=True): From 135ce497a18adbe0811441c2b720910ec549aa6f Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Thu, 24 Jun 2021 17:43:22 +0200 Subject: [PATCH 1058/1665] sty: run black --- nipype/pipeline/plugins/tests/test_sgelike.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nipype/pipeline/plugins/tests/test_sgelike.py b/nipype/pipeline/plugins/tests/test_sgelike.py index 140150c9b0..65c49b9ba8 100644 --- a/nipype/pipeline/plugins/tests/test_sgelike.py +++ b/nipype/pipeline/plugins/tests/test_sgelike.py @@ -31,8 +31,7 @@ def test_crashfile_creation(tmp_path): assert pipe.run(plugin=sgelike_plugin) assert str(e.value) == "Workflow did not execute cleanly. Check log for details" - crashfiles = ( - list(tmp_path.glob("crash*crasher*.pklz")) - + list(tmp_path.glob("crash*crasher*.txt")) + crashfiles = list(tmp_path.glob("crash*crasher*.pklz")) + list( + tmp_path.glob("crash*crasher*.txt") ) assert len(crashfiles) == 1 From 21c0b2621504730bb104bd2613fbd758e66555d5 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Fri, 25 Jun 2021 11:05:56 +0200 Subject: [PATCH 1059/1665] fix: remove duplicate log entries --- nipype/pipeline/engine/nodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 378f88957c..1c1f89c102 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -443,7 +443,8 @@ def run(self, updatehash=False): ) # Check hash, check whether run should be enforced - logger.info('[Node] Setting-up "%s" in "%s".', self.fullname, outdir) + if not isinstance(self, MapNode): + logger.info(f'[Node] Setting-up "{self.fullname}" in "{outdir}".') cached, updated = self.is_cached() # If the node is cached, check on pklz files and finish @@ -534,7 +535,6 @@ def run(self, updatehash=False): # Tear-up after success shutil.move(hashfile_unfinished, hashfile_unfinished.replace("_unfinished", "")) write_node_report(self, result=result, is_mapnode=isinstance(self, MapNode)) - logger.info('[Node] Finished "%s".', self.fullname) return result def _get_hashval(self): From c208e7b261201a2695b95262c0853d55b1a60dd5 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Fri, 25 Jun 2021 11:07:12 +0200 Subject: [PATCH 1060/1665] fix: robust matlab installation check --- nipype/interfaces/matlab.py | 24 +++++------------------- nipype/interfaces/tests/test_matlab.py | 2 +- 2 files changed, 6 insertions(+), 20 deletions(-) diff --git a/nipype/interfaces/matlab.py b/nipype/interfaces/matlab.py index 543b1e7a55..03e34b0b43 100644 --- a/nipype/interfaces/matlab.py +++ b/nipype/interfaces/matlab.py @@ -17,25 +17,11 @@ def get_matlab_command(): - if "NIPYPE_NO_MATLAB" in os.environ: - return None - - try: - matlab_cmd = os.environ["MATLABCMD"] - except: - matlab_cmd = "matlab" - - try: - res = CommandLine( - command="which", - args=matlab_cmd, - resource_monitor=False, - terminal_output="allatonce", - ).run() - matlab_path = res.runtime.stdout.strip() - except Exception: - return None - return matlab_cmd + """Determine whether Matlab is installed and can be executed.""" + if "NIPYPE_NO_MATLAB" not in os.environ: + from nipype.utils.filemanip import which + + return which(os.getenv("MATLABCMD", "matlab")) no_matlab = get_matlab_command() is None diff --git a/nipype/interfaces/tests/test_matlab.py b/nipype/interfaces/tests/test_matlab.py index 64f1de846f..21679a78e2 100644 --- a/nipype/interfaces/tests/test_matlab.py +++ b/nipype/interfaces/tests/test_matlab.py @@ -103,7 +103,7 @@ def test_run_interface(tmpdir): # bypasses ubuntu dash issue mc = mlab.MatlabCommand(script="foo;", paths=[tmpdir.strpath], mfile=True) assert not os.path.exists(default_script_file), "scriptfile should not exist 4." - with pytest.raises(RuntimeError): + with pytest.raises(OSError): mc.run() assert os.path.exists(default_script_file), "scriptfile should exist 4." if os.path.exists(default_script_file): # cleanup From a08638e2bb727d69b6814a87a15474fcefbdeeec Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Mon, 2 Aug 2021 17:43:03 +0200 Subject: [PATCH 1061/1665] Update nipype/pipeline/engine/nodes.py Co-authored-by: Chris Markiewicz --- nipype/pipeline/engine/nodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 1c1f89c102..bda77b1956 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -719,7 +719,7 @@ def _run_command(self, execute, copyfiles=True): if issubclass(self._interface.__class__, CommandLine): # Write out command line as it happened - (outdir / "command.txt").write_text(f"{result.runtime.cmdline}\n") + Path.write_text(outdir / "command.txt", f"{result.runtime.cmdline}\n") exc_tb = getattr(result.runtime, "traceback", None) From bbcd4ff1cc20564b92cc0e615f8443abbf95b28d Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Mon, 2 Aug 2021 17:45:34 +0200 Subject: [PATCH 1062/1665] Update nipype/interfaces/base/support.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/base/support.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/nipype/interfaces/base/support.py b/nipype/interfaces/base/support.py index f00ecefed0..8a90942feb 100644 --- a/nipype/interfaces/base/support.py +++ b/nipype/interfaces/base/support.py @@ -110,16 +110,10 @@ def __exit__(self, exc_type, exc_value, exc_tb): if self._ignore_exc: return True - _exitcode = ( - getattr(self._runtime, "returncode", None) - if getattr(self._runtime, "cmdline", None) - else 0 - ) - _success_codes = getattr(self._runtime, "success_codes", (0,)) - if _exitcode not in _success_codes: - self._runtime.traceback = ( - f"RuntimeError: subprocess exited with code {self._runtime.returncode}." - ) + if hasattr(self._runtime, "cmdline"): + retcode = self._runtime.returncode + if retcode not in self._runtime.success_codes: + self._runtime.traceback = f"RuntimeError: subprocess exited with code {retcode}." @property def runtime(self): From 235f5800b1c349cc269ed52e1a444ddd6cf2d461 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 23 Sep 2021 09:28:04 -0400 Subject: [PATCH 1063/1665] MNT: Add user name and email to Docker to appease git/annex/datalad --- docker/generate_dockerfiles.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker/generate_dockerfiles.sh b/docker/generate_dockerfiles.sh index e8af4d0042..a2982bb003 100755 --- a/docker/generate_dockerfiles.sh +++ b/docker/generate_dockerfiles.sh @@ -89,6 +89,8 @@ function generate_main_dockerfile() { OMP_NUM_THREADS=1 \ --arg PYTHON_VERSION_MAJOR=3 PYTHON_VERSION_MINOR=8 BUILD_DATE VCS_REF VERSION \ --user neuro \ + --run 'git config --global user.name nipybot + && git config --global user.email "nipybot@gmail.com"' \ --workdir /home/neuro \ --miniconda create_env=neuro \ conda_install='python=${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR} From 5d39ee0701029375d7136e364a9bf15a89e35383 Mon Sep 17 00:00:00 2001 From: Alejandro de la Vega Date: Mon, 27 Sep 2021 10:42:14 -0500 Subject: [PATCH 1064/1665] Extension not extensions, after pybids v0.9 --- nipype/interfaces/io.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index bfa9353b98..142139548c 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -2935,12 +2935,12 @@ def __init__(self, infields=None, **kwargs): "bold": { "datatype": "func", "suffix": "bold", - "extensions": ["nii", ".nii.gz"], + "extension": ["nii", ".nii.gz"], }, "T1w": { "datatype": "anat", "suffix": "T1w", - "extensions": ["nii", ".nii.gz"], + "extension": ["nii", ".nii.gz"], }, } From 1833a12f7b497a82bf4f1ff1c26a3351cc3c0b0e Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Tue, 28 Sep 2021 09:23:27 +0200 Subject: [PATCH 1065/1665] Apply suggestions from code review Co-authored-by: Chris Markiewicz --- nipype/pipeline/plugins/base.py | 10 +++++++--- nipype/pipeline/plugins/tests/test_sgelike.py | 1 - 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index a68ac6957f..89eb9ee275 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -199,10 +199,14 @@ def run(self, graph, config, updatehash=False): if errors: # If one or more nodes failed, re-rise first of them - if isinstance(errors[0], str): - raise RuntimeError(errors[0]) + error, cause = errors[0], None + if isinstance(error, str): + error = RuntimeError(error) - raise errors[0] + if len(errors) > 1: + error, cause = RuntimeError(f"{len(errors)} raised. Re-raising first."), error + + raise error from cause def _get_result(self, taskid): raise NotImplementedError diff --git a/nipype/pipeline/plugins/tests/test_sgelike.py b/nipype/pipeline/plugins/tests/test_sgelike.py index 65c49b9ba8..4c5807e262 100644 --- a/nipype/pipeline/plugins/tests/test_sgelike.py +++ b/nipype/pipeline/plugins/tests/test_sgelike.py @@ -29,7 +29,6 @@ def test_crashfile_creation(tmp_path): sgelike_plugin = SGELikeBatchManagerBase("") with pytest.raises(RuntimeError) as e: assert pipe.run(plugin=sgelike_plugin) - assert str(e.value) == "Workflow did not execute cleanly. Check log for details" crashfiles = list(tmp_path.glob("crash*crasher*.pklz")) + list( tmp_path.glob("crash*crasher*.txt") From bed65aadb7503d62b7f174f7ed643f35f162e6d3 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 29 Sep 2021 10:08:01 +0200 Subject: [PATCH 1066/1665] Update nipype/pipeline/plugins/linear.py Co-authored-by: Chris Markiewicz --- nipype/pipeline/plugins/linear.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/nipype/pipeline/plugins/linear.py b/nipype/pipeline/plugins/linear.py index 196e40cbd2..ed62c06319 100644 --- a/nipype/pipeline/plugins/linear.py +++ b/nipype/pipeline/plugins/linear.py @@ -68,5 +68,12 @@ def run(self, graph, config, updatehash=False): os.chdir(old_wd) # Return wherever we were before report_nodes_not_run(notrun) if errors: - # Re-raise exception of first failed node - raise errors[0] + # If one or more nodes failed, re-rise first of them + error, cause = errors[0], None + if isinstance(error, str): + error = RuntimeError(error) + + if len(errors) > 1: + error, cause = RuntimeError(f"{len(errors)} raised. Re-raising first."), error + + raise error from cause From 5f280da629bb7b5dce908633d2deea85b55dd67b Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 29 Sep 2021 16:53:54 +0200 Subject: [PATCH 1067/1665] sty: run black on affected files --- nipype/interfaces/base/support.py | 4 +++- nipype/pipeline/plugins/base.py | 7 +++++-- nipype/pipeline/plugins/linear.py | 5 ++++- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/base/support.py b/nipype/interfaces/base/support.py index 8a90942feb..175438b6d5 100644 --- a/nipype/interfaces/base/support.py +++ b/nipype/interfaces/base/support.py @@ -113,7 +113,9 @@ def __exit__(self, exc_type, exc_value, exc_tb): if hasattr(self._runtime, "cmdline"): retcode = self._runtime.returncode if retcode not in self._runtime.success_codes: - self._runtime.traceback = f"RuntimeError: subprocess exited with code {retcode}." + self._runtime.traceback = ( + f"RuntimeError: subprocess exited with code {retcode}." + ) @property def runtime(self): diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index 89eb9ee275..dbcf415b4e 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -147,7 +147,7 @@ def run(self, graph, config, updatehash=False): "Progress: %d jobs, %d/%d/%d " "(done/running/ready), %d/%d " "(pending_tasks/waiting).", - *progress_stats + *progress_stats, ) old_progress_stats = progress_stats toappend = [] @@ -204,7 +204,10 @@ def run(self, graph, config, updatehash=False): error = RuntimeError(error) if len(errors) > 1: - error, cause = RuntimeError(f"{len(errors)} raised. Re-raising first."), error + error, cause = ( + RuntimeError(f"{len(errors)} raised. Re-raising first."), + error, + ) raise error from cause diff --git a/nipype/pipeline/plugins/linear.py b/nipype/pipeline/plugins/linear.py index ed62c06319..8449e34111 100644 --- a/nipype/pipeline/plugins/linear.py +++ b/nipype/pipeline/plugins/linear.py @@ -74,6 +74,9 @@ def run(self, graph, config, updatehash=False): error = RuntimeError(error) if len(errors) > 1: - error, cause = RuntimeError(f"{len(errors)} raised. Re-raising first."), error + error, cause = ( + RuntimeError(f"{len(errors)} raised. Re-raising first."), + error, + ) raise error from cause From 43628090fa7cbd917c336fce65164f81100dadf0 Mon Sep 17 00:00:00 2001 From: o_nu Date: Thu, 30 Sep 2021 14:09:26 +0200 Subject: [PATCH 1068/1665] Removed exists=True from MathsOutput --- nipype/interfaces/fsl/maths.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/maths.py b/nipype/interfaces/fsl/maths.py index 9e05b4d102..f3276024b7 100644 --- a/nipype/interfaces/fsl/maths.py +++ b/nipype/interfaces/fsl/maths.py @@ -41,7 +41,7 @@ class MathsInput(FSLCommandInputSpec): class MathsOutput(TraitedSpec): - out_file = File(exists=True, desc="image written after calculations") + out_file = File(desc="image written after calculations") class MathsCommand(FSLCommand): From 0e16755a04ec8b8d340361bdf5a9b75924f96d5f Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Mon, 4 Oct 2021 11:11:37 +0200 Subject: [PATCH 1069/1665] minor updates to petsurfer.py CHANGES: 1. '-seg' changed to '--seg' in the input specs for GTMPVC 2. removed xor from registration flag --- nipype/interfaces/freesurfer/petsurfer.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index 3c81d8aa61..b148ceba09 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -179,15 +179,11 @@ class GTMPVCInputSpec(FSTraitedSpec): ) segmentation = File( - argstr="-seg %s", exists=True, mandatory=True, desc="segfile : anatomical segmentation to define regions for GTM" - ) - _reg_xor = ( - "reg_file", - "lta_file" + argstr="--seg %s", exists=True, mandatory=True, desc="segfile : anatomical segmentation to define regions for GTM" ) + reg_file = File( exists=True, - xor=_reg_xor, argstr="--reg %s", mandatory=True, desc="LTA registration file that maps PET to anatomical", From 4374348cf1c664e75ef3976b69be28bc4f9ab0cb Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Mon, 4 Oct 2021 12:23:34 +0200 Subject: [PATCH 1070/1665] Update docstring for mri_gtmpvc --- nipype/interfaces/freesurfer/petsurfer.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index b148ceba09..749426aa3e 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -387,9 +387,20 @@ class GTMPVC(FSCommand): Examples -------- >>> gtmpvc = GTMPVC() - >>> gtmpvc.inputs.out_file = '' - >>> gtmpvc.cmdline == 'mri_gtmpvc ' - + >>> gtmpvc.inputs.in_file = 'sub-01_ses-baseline_pet.nii.gz' + >>> gtmpvc.inputs.segmentation = 'gtmseg.mgz' + >>> gtmpvc.inputs.reg_file = 'sub-01_ses-baseline_pet_mean_reg.lta' + >>> gtmpvc.inputs.output_dir = 'pvc' + >>> gtmpvc.inputs.psf = 4 + >>> gtmpvc.inputs.default_seg_merge = True + >>> gtmpvc.inputs.auto_mask = (1, 0.1) + >>> gtmpvc.inputs.km_ref = ['8 47'] + >>> gtmpvc.inputs.km_hb = ['11 12 50 51'] + >>> gtmpvc.inputs.no_rescale = True + >>> gtmpvc.inputs.save_input = True + >>> gtmpvc.cmdline == 'mri_gtmpvc --auto-mask 1.000000 0.100000 --default-seg-merge + --i sub-01_ses-baseline_pet.nii.gz --km-hb 11 12 50 51 --km-ref 8 47 --no-rescale + --o pvc --psf 4.000000 --reg sub-01_ses-baseline_pet_mean_reg.lta --save-input --seg gtmseg.mgz' """ _cmd = "mri_gtmpvc" From c6900411ec08344f5f17f8cba5ab99448dbf0a41 Mon Sep 17 00:00:00 2001 From: 0rC0 Date: Tue, 5 Oct 2021 20:55:57 +0200 Subject: [PATCH 1071/1665] Updated .zenodo.json --- .zenodo.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index 83764c5417..e88f5e55ee 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -835,6 +835,11 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" + }, + { + "affiliation": "Charite Universitatsmedizin Berlin, Germany", + "name": "Dell\'Orco, Andrea", + "orcid": "0000-0002-3964-8360" } ], "keywords": [ From 15469d5948ff827ec2b5c27732e472a1dad113f9 Mon Sep 17 00:00:00 2001 From: 0rC0 Date: Tue, 5 Oct 2021 21:00:26 +0200 Subject: [PATCH 1072/1665] Updated .zenodo.json --- .zenodo.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zenodo.json b/.zenodo.json index e88f5e55ee..0b2d0f65fb 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -837,7 +837,7 @@ "orcid": "0000-0002-5312-6729" }, { - "affiliation": "Charite Universitatsmedizin Berlin, Germany", + "affiliation": "Charitè Universitätsmedizin Berlin, Germany", "name": "Dell\'Orco, Andrea", "orcid": "0000-0002-3964-8360" } From 57160c9dd93c46939f4a138db00b47793cafcea8 Mon Sep 17 00:00:00 2001 From: orco Date: Tue, 5 Oct 2021 21:13:46 +0200 Subject: [PATCH 1073/1665] Update nipype/interfaces/cat12/preprocess.py default suffix removed Co-authored-by: Chris Markiewicz --- nipype/interfaces/cat12/preprocess.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 62fe60d8fe..e963a3fa0d 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -624,14 +624,14 @@ class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): field='prefix', default_value='sanlm_', usedefault=True, - desc='Filename prefix. Specify the string to be prepended to the filenames of the filtered image file(s). Default prefix is "samlm_".', + desc='Filename prefix. Specify the string to be prepended to the filenames of the filtered image file(s).', ) filename_suffix= traits.Str( field='suffix', default_value='', usedefault=True, - desc='Filename suffix. Specify the string to be appended to the filenames of the filtered image file(s). Default suffix is "".' + desc='Filename suffix. Specify the string to be appended to the filenames of the filtered image file(s).' ) addnoise = traits.Float(default_value=0.5, From 94f6bf98801f4d2759118c29973f44d79198c1b8 Mon Sep 17 00:00:00 2001 From: orco Date: Tue, 5 Oct 2021 21:34:22 +0200 Subject: [PATCH 1074/1665] Update nipype/interfaces/cat12/preprocess.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/cat12/preprocess.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index e963a3fa0d..4ff3f6e539 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -650,9 +650,8 @@ class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): distribution is expected. Please note that the Rician noise estimation is sensitive for large signals in the neighbourhood and can lead to artefacts, e.g. cortex can be affected by very high values in the scalp or in blood vessels.''') - replaceNANandINF = traits.Enum( - 1, - 0, + replaceNANandINF = traits.Bool( + True field='replaceNANandINF', usedefault=True, desc='Replace NAN by 0, -INF by the minimum and INF by the maximum of the image.' From 5766e9d6d17dac95675583a2b05537b28fa18529 Mon Sep 17 00:00:00 2001 From: 0rC0 Date: Tue, 5 Oct 2021 21:39:24 +0200 Subject: [PATCH 1075/1665] rician as bool --- nipype/interfaces/cat12/preprocess.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 4ff3f6e539..44b0ae2f64 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -627,7 +627,7 @@ class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): desc='Filename prefix. Specify the string to be prepended to the filenames of the filtered image file(s).', ) - filename_suffix= traits.Str( + filename_suffix = traits.Str( field='suffix', default_value='', usedefault=True, @@ -639,9 +639,8 @@ class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): field='addnoise', desc='Strength of additional noise in noise-free regions. Add minimal amount of noise in regions without any noise to avoid image segmentation problems. This parameter defines the strength of additional noise as percentage of the average signal intensity.') - rician = traits.Enum( - 0, - 1, + rician = traits.Bool( + True, field='rician', usedefault=True, desc='''Rician noise @@ -651,7 +650,7 @@ class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): artefacts, e.g. cortex can be affected by very high values in the scalp or in blood vessels.''') replaceNANandINF = traits.Bool( - True + True, field='replaceNANandINF', usedefault=True, desc='Replace NAN by 0, -INF by the minimum and INF by the maximum of the image.' From 0b730027752516e2d8c3a3563d6460cf29f1e9b2 Mon Sep 17 00:00:00 2001 From: orco Date: Tue, 5 Oct 2021 21:39:52 +0200 Subject: [PATCH 1076/1665] Update nipype/interfaces/cat12/preprocess.py example filename Co-authored-by: Chris Markiewicz --- nipype/interfaces/cat12/preprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 4ff3f6e539..842bc98b11 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -693,7 +693,7 @@ class CAT12SANLMDenoising(SPMCommand): -------- >>> from nipype.interfaces import cat12 >>> c = cat12.CAT12SANLMDenoising() - >>> c.inputs.in_files='sub-test_FLAIR.nii' + >>> c.inputs.in_files='anatomical.nii' >>> c.run() """ From d737a2087f42110fee164b0e0a4e4ae514ec6f7c Mon Sep 17 00:00:00 2001 From: orco Date: Tue, 5 Oct 2021 21:41:03 +0200 Subject: [PATCH 1077/1665] Update nipype/interfaces/cat12/preprocess.py use fname_presuffix for output file Co-authored-by: Chris Markiewicz --- nipype/interfaces/cat12/preprocess.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 842bc98b11..a6c43d67b4 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -720,11 +720,10 @@ def _format_arg(self, opt, spec, val): def _list_outputs(self): outputs = self._outputs().get() - pth, base, ext = split_filename(self.inputs.in_files[0]) - outputs['out_file'] = os.path.join(os.getcwd(), self.inputs.filename_prefix + - base + - self.inputs.filename_suffix + - ext) + outputs['out_file'] = fname_presuffix(self.inputs.in_files[0], + newpath=os.getcwd(), + prefix=self.inputs.filename_prefix, + suffix=self.inputs.filename_suffix) return outputs From 1dedc71e6ec057f30c0ff7a0152cb8802a115b86 Mon Sep 17 00:00:00 2001 From: 0rC0 Date: Tue, 5 Oct 2021 21:53:42 +0200 Subject: [PATCH 1078/1665] style --- nipype/interfaces/cat12/preprocess.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 7de78f40f7..39627692cd 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -634,10 +634,11 @@ class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): desc='Filename suffix. Specify the string to be appended to the filenames of the filtered image file(s).' ) - addnoise = traits.Float(default_value=0.5, - usedefault=True, - field='addnoise', - desc='Strength of additional noise in noise-free regions. Add minimal amount of noise in regions without any noise to avoid image segmentation problems. This parameter defines the strength of additional noise as percentage of the average signal intensity.') + addnoise = traits.Float( + default_value=0.5, + usedefault=True, + field='addnoise', + desc='Strength of additional noise in noise-free regions. Add minimal amount of noise in regions without any noise to avoid image segmentation problems. This parameter defines the strength of additional noise as percentage of the average signal intensity.') rician = traits.Bool( True, @@ -649,14 +650,14 @@ class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): distribution is expected. Please note that the Rician noise estimation is sensitive for large signals in the neighbourhood and can lead to artefacts, e.g. cortex can be affected by very high values in the scalp or in blood vessels.''') - replaceNANandINF = traits.Bool( + replace_nan_and_inf = traits.Bool( True, field='replaceNANandINF', usedefault=True, desc='Replace NAN by 0, -INF by the minimum and INF by the maximum of the image.' ) - NCstr = traits.Enum( + noisecorr_strength = traits.Enum( '-Inf', 2, 4, From aecdacde0775b8baa7bbff87d8ac42616f51050c Mon Sep 17 00:00:00 2001 From: 0rC0 Date: Tue, 5 Oct 2021 21:53:56 +0200 Subject: [PATCH 1079/1665] non documented variable --- nipype/interfaces/cat12/preprocess.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 39627692cd..fd7f1f165e 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -614,12 +614,6 @@ class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): ) - intlim = traits.Int( - field='intlim', - default_value=100, - usedefault=True, - ) - filename_prefix = traits.Str( field='prefix', default_value='sanlm_', From 6d7ba1d5e746abaad053d9b4eae47a9e7264332e Mon Sep 17 00:00:00 2001 From: 0rC0 Date: Tue, 5 Oct 2021 22:24:21 +0200 Subject: [PATCH 1080/1665] run_black --- nipype/interfaces/cat12/preprocess.py | 83 +++++++++++++++++---------- 1 file changed, 52 insertions(+), 31 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index fd7f1f165e..482ad3c852 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -608,67 +608,79 @@ class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): 0, 2, 512, - field='spm_type', + field="spm_type", usedefault=True, - desc='Data type of the output images. 0 = same, 2 = uint8, 512 = uint16, 16 = single (32 bit)' + desc="Data type of the output images. 0 = same, 2 = uint8, 512 = uint16, 16 = single (32 bit)", + ) + intlim = traits.Int( + field="intlim", + default_value=100, + usedefault=True, + desc="intensity limitation (default = 100)", ) filename_prefix = traits.Str( - field='prefix', - default_value='sanlm_', + field="prefix", + default_value="sanlm_", usedefault=True, - desc='Filename prefix. Specify the string to be prepended to the filenames of the filtered image file(s).', + desc="Filename prefix. Specify the string to be prepended to the filenames of the filtered image file(s).", ) filename_suffix = traits.Str( - field='suffix', - default_value='', + field="suffix", + default_value="", usedefault=True, - desc='Filename suffix. Specify the string to be appended to the filenames of the filtered image file(s).' + desc="Filename suffix. Specify the string to be appended to the filenames of the filtered image file(s).", ) addnoise = traits.Float( default_value=0.5, usedefault=True, - field='addnoise', - desc='Strength of additional noise in noise-free regions. Add minimal amount of noise in regions without any noise to avoid image segmentation problems. This parameter defines the strength of additional noise as percentage of the average signal intensity.') + field="addnoise", + desc="""Strength of additional noise in noise-free regions. + Add minimal amount of noise in regions without any noise to avoid image segmentation problems. + This parameter defines the strength of additional noise as percentage of the average signal intensity.""", + ) rician = traits.Bool( True, - field='rician', + field="rician", usedefault=True, - desc='''Rician noise - MRIs can have Gaussian or Rician distributed noise with uniform or nonuniform variance across the image. If SNR is high enough - (>3) noise can be well approximated by Gaussian noise in the foreground. However, for SENSE reconstruction or DTI data a Rician - distribution is expected. Please note that the Rician noise estimation is sensitive for large signals in the neighbourhood and can lead to - artefacts, e.g. cortex can be affected by very high values in the scalp or in blood vessels.''') + desc="""Rician noise + MRIs can have Gaussian or Rician distributed noise with uniform or nonuniform variance across the image. + If SNR is high enough (>3) noise can be well approximated by Gaussian noise in the foreground. However, for + SENSE reconstruction or DTI data a Rician distribution is expected. Please note that the Rician noise estimation + is sensitive for large signals in the neighbourhood and can lead to artefacts, e.g. cortex can be affected by + very high values in the scalp or in blood vessels.""", + ) replace_nan_and_inf = traits.Bool( True, - field='replaceNANandINF', + field="replaceNANandINF", usedefault=True, - desc='Replace NAN by 0, -INF by the minimum and INF by the maximum of the image.' + desc="Replace NAN by 0, -INF by the minimum and INF by the maximum of the image.", ) noisecorr_strength = traits.Enum( - '-Inf', + "-Inf", 2, 4, - field='nlmfilter.optimized.NCstr', + field="nlmfilter.optimized.NCstr", usedefault=True, - desc='''Strength of Noise Corrections - Strength of the (sub-resolution) spatial adaptive non local means (SANLM) noise correction. Please note that the filter strength is - automatically estimated. Change this parameter only for specific conditions. The "light" option applies half of the filter strength of the - adaptive "medium" cases, whereas the "strong" option uses the full filter strength, force sub-resolution filtering and applies an - additional iteration. Sub-resolution filtering is only used in case of high image resolution below 0.8 mm or in case of the "strong" - option. light = 2, medium = -Inf, strong = 4''' + desc="""Strength of Noise Corrections + Strength of the (sub-resolution) spatial adaptive non local means (SANLM) noise correction. Please note + that the filter strength is automatically estimated. Change this parameter only for specific conditions. The + "light" option applies half of the filter strength of the adaptive "medium" cases, whereas the "strong" + option uses the full filter strength, force sub-resolution filtering and applies an additional iteration. + Sub-resolution filtering is only used in case of high image resolution below 0.8 mm or in case of the + "strong" option. light = 2, medium = -Inf, strong = 4""", ) class CAT12SANLMDenoisingOutputSpec(TraitedSpec): - out_file = File(desc='out file') + out_file = File(desc="out file") class CAT12SANLMDenoising(SPMCommand): @@ -682,6 +694,13 @@ class CAT12SANLMDenoising(SPMCommand): This filter is internally used in the segmentation procedure anyway. Thus, it is not necessary (and not recommended) to apply the filter before segmentation. + ______________________________________________________________________ + Christian Gaser, Robert Dahnke + Structural Brain Mapping Group (http://www.neuro.uni-jena.de) + Departments of Neurology and Psychiatry + Jena University Hospital + ______________________________________________________________________ + Examples -------- @@ -714,10 +733,12 @@ def _format_arg(self, opt, spec, val): def _list_outputs(self): outputs = self._outputs().get() - outputs['out_file'] = fname_presuffix(self.inputs.in_files[0], - newpath=os.getcwd(), - prefix=self.inputs.filename_prefix, - suffix=self.inputs.filename_suffix) + outputs["out_file"] = fname_presuffix( + self.inputs.in_files[0], + newpath=os.getcwd(), + prefix=self.inputs.filename_prefix, + suffix=self.inputs.filename_suffix, + ) return outputs From 58974f66f515f135d033dce5a99020a5221964d9 Mon Sep 17 00:00:00 2001 From: 0rC0 Date: Tue, 5 Oct 2021 22:44:49 +0200 Subject: [PATCH 1081/1665] fix --- nipype/interfaces/cat12/preprocess.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 482ad3c852..34cbe91491 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -638,8 +638,8 @@ class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): default_value=0.5, usedefault=True, field="addnoise", - desc="""Strength of additional noise in noise-free regions. - Add minimal amount of noise in regions without any noise to avoid image segmentation problems. + desc="""Strength of additional noise in noise-free regions. + Add minimal amount of noise in regions without any noise to avoid image segmentation problems. This parameter defines the strength of additional noise as percentage of the average signal intensity.""", ) @@ -648,10 +648,10 @@ class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): field="rician", usedefault=True, desc="""Rician noise - MRIs can have Gaussian or Rician distributed noise with uniform or nonuniform variance across the image. - If SNR is high enough (>3) noise can be well approximated by Gaussian noise in the foreground. However, for - SENSE reconstruction or DTI data a Rician distribution is expected. Please note that the Rician noise estimation - is sensitive for large signals in the neighbourhood and can lead to artefacts, e.g. cortex can be affected by + MRIs can have Gaussian or Rician distributed noise with uniform or nonuniform variance across the image. + If SNR is high enough (>3) noise can be well approximated by Gaussian noise in the foreground. However, for + SENSE reconstruction or DTI data a Rician distribution is expected. Please note that the Rician noise estimation + is sensitive for large signals in the neighbourhood and can lead to artefacts, e.g. cortex can be affected by very high values in the scalp or in blood vessels.""", ) @@ -669,11 +669,11 @@ class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): field="nlmfilter.optimized.NCstr", usedefault=True, desc="""Strength of Noise Corrections - Strength of the (sub-resolution) spatial adaptive non local means (SANLM) noise correction. Please note - that the filter strength is automatically estimated. Change this parameter only for specific conditions. The - "light" option applies half of the filter strength of the adaptive "medium" cases, whereas the "strong" + Strength of the (sub-resolution) spatial adaptive non local means (SANLM) noise correction. Please note + that the filter strength is automatically estimated. Change this parameter only for specific conditions. The + "light" option applies half of the filter strength of the adaptive "medium" cases, whereas the "strong" option uses the full filter strength, force sub-resolution filtering and applies an additional iteration. - Sub-resolution filtering is only used in case of high image resolution below 0.8 mm or in case of the + Sub-resolution filtering is only used in case of high image resolution below 0.8 mm or in case of the "strong" option. light = 2, medium = -Inf, strong = 4""", ) From 70f193f9c7a83ddc1c3cac42a482feff515ad1aa Mon Sep 17 00:00:00 2001 From: 0rC0 Date: Tue, 5 Oct 2021 23:00:46 +0200 Subject: [PATCH 1082/1665] black __init__.py --- nipype/interfaces/cat12/__init__.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/nipype/interfaces/cat12/__init__.py b/nipype/interfaces/cat12/__init__.py index 3d83da434d..40059b23e1 100644 --- a/nipype/interfaces/cat12/__init__.py +++ b/nipype/interfaces/cat12/__init__.py @@ -1,7 +1,4 @@ -from .preprocess import ( - CAT12Segment, - CAT12SANLMDenoising -) +from .preprocess import CAT12Segment, CAT12SANLMDenoising from .surface import ( ExtractAdditionalSurfaceParameters, ExtractROIBasedSurfaceMeasures, From 3e3ba8733d615cff0256532df85f48b766b988ad Mon Sep 17 00:00:00 2001 From: 0rC0 Date: Tue, 5 Oct 2021 23:14:09 +0200 Subject: [PATCH 1083/1665] example_skip --- nipype/interfaces/cat12/preprocess.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 34cbe91491..33ec9ff205 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -693,7 +693,6 @@ class CAT12SANLMDenoising(SPMCommand): This filter is internally used in the segmentation procedure anyway. Thus, it is not necessary (and not recommended) to apply the filter before segmentation. - ______________________________________________________________________ Christian Gaser, Robert Dahnke Structural Brain Mapping Group (http://www.neuro.uni-jena.de) @@ -701,13 +700,12 @@ class CAT12SANLMDenoising(SPMCommand): Jena University Hospital ______________________________________________________________________ - Examples -------- >>> from nipype.interfaces import cat12 >>> c = cat12.CAT12SANLMDenoising() - >>> c.inputs.in_files='anatomical.nii' - >>> c.run() + >>> c.inputs.in_files = 'anatomical.nii' + >>> c.run() # doctest: +SKIP """ input_spec = CAT12SANLMDenoisingInputSpec From 2c56ae7403ffac289b0198797ef853ce906d0b2b Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Wed, 6 Oct 2021 18:26:50 +0200 Subject: [PATCH 1084/1665] Add output specs for mri_gtmpvc --- nipype/interfaces/freesurfer/petsurfer.py | 36 +++++++++++++++++++---- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index 749426aa3e..eb077f337d 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -146,7 +146,7 @@ class GTMSeg(FSCommand): -------- >>> gtmseg = GTMSeg() >>> gtmseg.inputs.out_file = 'gtmseg.nii' - >>> gtmseg.inputs.subject_id = 'subjec_id' + >>> gtmseg.inputs.subject_id = 'subject_id' >>> gtmseg.cmdline == 'gtmseg --o gtmseg.nii --s subject_id' """ @@ -197,7 +197,7 @@ class GTMPVCInputSpec(FSTraitedSpec): argstr="--regheader", desc="assume that input is in anatomical space" ) - output_dir = traits.Str(argstr="--o %s", desc="save outputs to dir", genfile=True) + pvc_dir = traits.Str(argstr="--o %s", desc="save outputs to dir", genfile=True) mask_file = File( exists=True, argstr="--mask %s", desc="ignore areas outside of the mask (in input vol space)" @@ -379,7 +379,14 @@ class GTMPVCInputSpec(FSTraitedSpec): ) class GTMPVCOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="PVC correction") + + pvc_dir = Directory(exists=True, desc="output directory") + ref_file = File(exists=True, desc="Reference TAC in .dat") + hb_nifti = File(exists=True, desc="High-binding TAC in nifti") + hb_dat = File(exists=True, desc="High-binding TAC in .dat") + nopvc_file = File(exists=True, desc="TACs for all regions with no PVC") + gtm_file = File(exists=True, desc="TACs for all regions with GTM PVC") + gtm_stats = File(exists=True, desc="Statistics for the GTM PVC") class GTMPVC(FSCommand): """create an anatomical segmentation for the geometric transfer matrix (GTM). @@ -399,14 +406,33 @@ class GTMPVC(FSCommand): >>> gtmpvc.inputs.no_rescale = True >>> gtmpvc.inputs.save_input = True >>> gtmpvc.cmdline == 'mri_gtmpvc --auto-mask 1.000000 0.100000 --default-seg-merge - --i sub-01_ses-baseline_pet.nii.gz --km-hb 11 12 50 51 --km-ref 8 47 --no-rescale - --o pvc --psf 4.000000 --reg sub-01_ses-baseline_pet_mean_reg.lta --save-input --seg gtmseg.mgz' + --i sub-01_ses-baseline_pet.nii.gz --km-hb 11 12 50 51 --km-ref 8 47 --no-rescale + --o pvc --psf 4.000000 --reg sub-01_ses-baseline_pet_mean_reg.lta --save-input --seg gtmseg.mgz' """ _cmd = "mri_gtmpvc" input_spec = GTMPVCInputSpec output_spec = GTMPVCOutputSpec + def _list_outputs(self): + outputs = self.output_spec().get() + # Get the top-level output directory + if not isdefined(self.inputs.pvc_dir): + pvcdir = os.getcwd() + else: + pvcdir = os.path.abspath(self.inputs.pvc_dir) + outputs["pvc_dir"] = pvcdir + + # Assign the output files that always get created + outputs["ref_file"] = os.path.join(pvcdir, "km.ref.tac.dat") + outputs["hb_nifti"] = os.path.join(pvcdir, "km.hb.tac.nii.gz") + outputs["hb_dat"] = os.path.join(pvcdir, "km.hb.tac.dat") + outputs["nopvc_file"] = os.path.join(pvcdir, "nopvc.nii.gz") + outputs["gtm_file"] = os.path.join(pvcdir, "gtm.nii.gz") + outputs["gtm_stats"] = os.path.join(pvcdir, "gtm.stats.dat") + + return outputs + def _format_arg(self, name, spec, value): return super(GTMPVC, self)._format_arg(name, spec, value) From f046d55932ed8e8fd499bb0df123352d45d3834f Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Wed, 6 Oct 2021 22:43:09 +0200 Subject: [PATCH 1085/1665] Update logan function to correct output (bp) --- nipype/interfaces/freesurfer/petsurfer.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index eb077f337d..d049ce19a2 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -379,7 +379,7 @@ class GTMPVCInputSpec(FSTraitedSpec): ) class GTMPVCOutputSpec(TraitedSpec): - + pvc_dir = Directory(exists=True, desc="output directory") ref_file = File(exists=True, desc="Reference TAC in .dat") hb_nifti = File(exists=True, desc="High-binding TAC in nifti") @@ -387,6 +387,7 @@ class GTMPVCOutputSpec(TraitedSpec): nopvc_file = File(exists=True, desc="TACs for all regions with no PVC") gtm_file = File(exists=True, desc="TACs for all regions with GTM PVC") gtm_stats = File(exists=True, desc="Statistics for the GTM PVC") + class GTMPVC(FSCommand): """create an anatomical segmentation for the geometric transfer matrix (GTM). @@ -538,7 +539,7 @@ class LoganRefInputSpec(GLMFitInputSpec): ) class LoganRefOutputSpec(GLMFitInputSpec): - vd = File(desc="BP estimates") + bp = File(desc="BP estimates") class LoganRef(GLMFit): _cmd = "mri_glmfit" @@ -553,5 +554,5 @@ def _list_outputs(self): ext = '.nii' else: ext = '.mgh' - outputs['vd'] = os.join(self.inputs.glm_dir, 'vd' + ext) + outputs['bp'] = os.join(self.inputs.glm_dir, 'bp' + ext) return outputs \ No newline at end of file From cd0afae497e6b9cb05ebcfbb62848d6ca909eb5e Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Mon, 11 Oct 2021 17:05:13 +0200 Subject: [PATCH 1086/1665] Update petsurfer.py --- nipype/interfaces/freesurfer/petsurfer.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index d049ce19a2..8b8ecd8f13 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -455,7 +455,7 @@ class MRTM(GLMFit): -------- >>> mrtm = MRTM() >>> mrtm.inputs.in_file = 'tac.nii' - >>> gtmseg.inputs.mrtm = ('ref_tac.dat', 'timing.dat') + >>> mrtm.inputs.mrtm = ('ref_tac.dat', 'timing.dat') >>> mrtm.inputs.glmdir = 'mrtm' >>> mrtm.cmdline == 'mri_glmfit --glmdir mrtm --y tac.nii --mrtm1 ref_tac.dat timing.dat' """ @@ -496,10 +496,10 @@ class MRTM2(GLMFit): """Perform MRTM2 kinetic modeling. Examples -------- - >>> mrtm = MRTM() - >>> mrtm.inputs.in_file = 'tac.nii' - >>> gtmseg.inputs.mrtm = ('ref_tac.dat', 'timing.dat', 'k2prime.dat') - >>> mrtm.inputs.glmdir = 'mrtm2' + >>> mrtm2 = MRTM2() + >>> mrtm2.inputs.in_file = 'tac.nii' + >>> mrtm2.inputs.mrtm = ('ref_tac.dat', 'timing.dat', 'k2prime.dat') + >>> mrtm2.inputs.glmdir = 'mrtm2' >>> mrtm2.cmdline == 'mri_glmfit --glmdir mrtm2 --y tac.nii --mrtm2 ref_tac.dat timing.dat k2prime.dat' """ From d52ac5c3f939b17c0c58845e974fa2e86fbfa924 Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Mon, 11 Oct 2021 17:21:13 +0200 Subject: [PATCH 1087/1665] Update petsurfer.py --- nipype/interfaces/freesurfer/petsurfer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index 8b8ecd8f13..50824d62f8 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -455,7 +455,7 @@ class MRTM(GLMFit): -------- >>> mrtm = MRTM() >>> mrtm.inputs.in_file = 'tac.nii' - >>> mrtm.inputs.mrtm = ('ref_tac.dat', 'timing.dat') + >>> mrtm.inputs.mrtm1 = [('ref_tac.dat', 'timing.dat')] >>> mrtm.inputs.glmdir = 'mrtm' >>> mrtm.cmdline == 'mri_glmfit --glmdir mrtm --y tac.nii --mrtm1 ref_tac.dat timing.dat' """ @@ -498,9 +498,9 @@ class MRTM2(GLMFit): -------- >>> mrtm2 = MRTM2() >>> mrtm2.inputs.in_file = 'tac.nii' - >>> mrtm2.inputs.mrtm = ('ref_tac.dat', 'timing.dat', 'k2prime.dat') + >>> mrtm2.inputs.mrtm2 = [('ref_tac.dat', 'timing.dat', 0.07872)] >>> mrtm2.inputs.glmdir = 'mrtm2' - >>> mrtm2.cmdline == 'mri_glmfit --glmdir mrtm2 --y tac.nii --mrtm2 ref_tac.dat timing.dat k2prime.dat' + >>> mrtm2.cmdline == 'mri_glmfit --glmdir mrtm2 --y tac.nii --mrtm2 ref_tac.dat timing.dat 0.07872' """ _cmd = "mri_glmfit" From 0755948bd5b9406e9b049f05c43f30c6f1d884de Mon Sep 17 00:00:00 2001 From: avneet14027 Date: Tue, 12 Oct 2021 08:30:11 +0100 Subject: [PATCH 1088/1665] update petsurfer.py --- nipype/interfaces/freesurfer/petsurfer.py | 28 ++++++++++++++--------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index 50824d62f8..c668df276a 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -136,7 +136,7 @@ class GTMSegInputSpec(FSTraitedSpec): class GTMSegOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="GTM segmentation") + gtm_file = File(exists=True, desc="GTM segmentation") class GTMSeg(FSCommand): @@ -155,6 +155,11 @@ class GTMSeg(FSCommand): input_spec = GTMSegInputSpec output_spec = GTMSegOutputSpec + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['gtm_file'] = os.path.join(self.inputs.subjects_dir,self.inputs.subject_id,'mri','gtmseg.mgz') + return outputs + def _format_arg(self, name, spec, value): return super(GTMSeg, self)._format_arg(name, spec, value) @@ -398,7 +403,7 @@ class GTMPVC(FSCommand): >>> gtmpvc.inputs.in_file = 'sub-01_ses-baseline_pet.nii.gz' >>> gtmpvc.inputs.segmentation = 'gtmseg.mgz' >>> gtmpvc.inputs.reg_file = 'sub-01_ses-baseline_pet_mean_reg.lta' - >>> gtmpvc.inputs.output_dir = 'pvc' + >>> gtmpvc.inputs.pvc_dir = 'pvc' >>> gtmpvc.inputs.psf = 4 >>> gtmpvc.inputs.default_seg_merge = True >>> gtmpvc.inputs.auto_mask = (1, 0.1) @@ -440,7 +445,8 @@ def _format_arg(self, name, spec, value): class MRTMInputSpec(GLMFitInputSpec): mrtm1 = InputMultiPath( - traits.Tuple(File(exists=True, mandatory=True), File(exists=True, mandatory=True)), + (File(exists=True, mandatory=True), + File(exists=True, mandatory=True)), argstr="--mrtm1 %s %s...", desc="RefTac TimeSec : perform MRTM1 kinetic modeling", ) @@ -455,7 +461,7 @@ class MRTM(GLMFit): -------- >>> mrtm = MRTM() >>> mrtm.inputs.in_file = 'tac.nii' - >>> mrtm.inputs.mrtm1 = [('ref_tac.dat', 'timing.dat')] + >>> gtmseg.inputs.mrtm = [('ref_tac.dat', 'timing.dat')] >>> mrtm.inputs.glmdir = 'mrtm' >>> mrtm.cmdline == 'mri_glmfit --glmdir mrtm --y tac.nii --mrtm1 ref_tac.dat timing.dat' """ @@ -496,11 +502,11 @@ class MRTM2(GLMFit): """Perform MRTM2 kinetic modeling. Examples -------- - >>> mrtm2 = MRTM2() - >>> mrtm2.inputs.in_file = 'tac.nii' - >>> mrtm2.inputs.mrtm2 = [('ref_tac.dat', 'timing.dat', 0.07872)] - >>> mrtm2.inputs.glmdir = 'mrtm2' - >>> mrtm2.cmdline == 'mri_glmfit --glmdir mrtm2 --y tac.nii --mrtm2 ref_tac.dat timing.dat 0.07872' + >>> mrtm = MRTM() + >>> mrtm.inputs.in_file = 'tac.nii' + >>> gtmseg.inputs.mrtm = [('ref_tac.dat', 'timing.dat', 'k2prime.dat')] + >>> mrtm.inputs.glmdir = 'mrtm2' + >>> mrtm2.cmdline == 'mri_glmfit --glmdir mrtm2 --y tac.nii --mrtm2 ref_tac.dat timing.dat k2prime.dat' """ _cmd = "mri_glmfit" @@ -515,7 +521,7 @@ def _list_outputs(self): ext = '.nii' else: ext = '.mgh' - outputs['bp'] = os.join(self.inputs.glm_dir, 'bp' + ext) + outputs['bp'] = os.path.join(self.inputs.glm_dir, 'bp', ext) return outputs class LoganRefInputSpec(GLMFitInputSpec): @@ -554,5 +560,5 @@ def _list_outputs(self): ext = '.nii' else: ext = '.mgh' - outputs['bp'] = os.join(self.inputs.glm_dir, 'bp' + ext) + outputs['bp'] = os.path.join(self.inputs.glm_dir, 'bp' + ext) return outputs \ No newline at end of file From 03656b310e53da1494ea723c31e77070b6946f87 Mon Sep 17 00:00:00 2001 From: avneet14027 Date: Tue, 12 Oct 2021 09:11:47 +0100 Subject: [PATCH 1089/1665] update petsurfer.py --- nipype/interfaces/freesurfer/petsurfer.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index c668df276a..55adc7efd5 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -53,7 +53,7 @@ class GTMSegInputSpec(FSTraitedSpec): desc="run xcerebralseg on this subject to create apas+head.mgz" ) - out_file = File( + gtm_file = File( argstr="--o %s", desc="output volume relative to subject/mri (default is gtmseg.mgz)" ) @@ -445,8 +445,7 @@ def _format_arg(self, name, spec, value): class MRTMInputSpec(GLMFitInputSpec): mrtm1 = InputMultiPath( - (File(exists=True, mandatory=True), - File(exists=True, mandatory=True)), + traits.Tuple(File(exists=True, mandatory=True), File(exists=True, mandatory=True)), argstr="--mrtm1 %s %s...", desc="RefTac TimeSec : perform MRTM1 kinetic modeling", ) @@ -461,7 +460,7 @@ class MRTM(GLMFit): -------- >>> mrtm = MRTM() >>> mrtm.inputs.in_file = 'tac.nii' - >>> gtmseg.inputs.mrtm = [('ref_tac.dat', 'timing.dat')] + >>> mrtm.inputs.mrtm1 = [('ref_tac.dat', 'timing.dat')] >>> mrtm.inputs.glmdir = 'mrtm' >>> mrtm.cmdline == 'mri_glmfit --glmdir mrtm --y tac.nii --mrtm1 ref_tac.dat timing.dat' """ @@ -502,11 +501,11 @@ class MRTM2(GLMFit): """Perform MRTM2 kinetic modeling. Examples -------- - >>> mrtm = MRTM() - >>> mrtm.inputs.in_file = 'tac.nii' - >>> gtmseg.inputs.mrtm = [('ref_tac.dat', 'timing.dat', 'k2prime.dat')] - >>> mrtm.inputs.glmdir = 'mrtm2' - >>> mrtm2.cmdline == 'mri_glmfit --glmdir mrtm2 --y tac.nii --mrtm2 ref_tac.dat timing.dat k2prime.dat' + >>> mrtm2 = MRTM2() + >>> mrtm2.inputs.in_file = 'tac.nii' + >>> mrtm2.inputs.mrtm2 = [('ref_tac.dat', 'timing.dat', 0.07872)] + >>> mrtm2.inputs.glmdir = 'mrtm2' + >>> mrtm2.cmdline == 'mri_glmfit --glmdir mrtm2 --y tac.nii --mrtm2 ref_tac.dat timing.dat 0.07872' """ _cmd = "mri_glmfit" @@ -521,7 +520,7 @@ def _list_outputs(self): ext = '.nii' else: ext = '.mgh' - outputs['bp'] = os.path.join(self.inputs.glm_dir, 'bp', ext) + outputs['bp'] = os.join(self.inputs.glm_dir, 'bp', ext) return outputs class LoganRefInputSpec(GLMFitInputSpec): @@ -560,5 +559,5 @@ def _list_outputs(self): ext = '.nii' else: ext = '.mgh' - outputs['bp'] = os.path.join(self.inputs.glm_dir, 'bp' + ext) + outputs['bp'] = os.join(self.inputs.glm_dir, 'bp', ext) return outputs \ No newline at end of file From 14aaca18f891e45134f87fdde1c0adda1762fe27 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 13 Oct 2021 13:24:44 -0400 Subject: [PATCH 1090/1665] CI: Update CircleCI machine image (#3391) * CI: Acknowledge images are built with 3.8 * CI: Try ubuntu-2004 machine --- .circleci/config.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 26369371b1..0afcb62533 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,5 +1,5 @@ _machine_kwds: &machine_kwds - image: circleci/classic:201808-01 + image: ubuntu-2004:202107-02 _store_artifacts_kwds: &store_artifacts_kwds path: /home/circleci/work/tests @@ -43,15 +43,15 @@ _get_base_image: &get_base_image exit 1 fi -_build_main_image_py36: &build_main_image_py36 - name: Build main image (py36) +_build_main_image_py38: &build_main_image_py38 + name: Build main image (py38) no_output_timeout: 60m command: | tools/retry_cmd.sh -n 5 -s 15 \ docker build \ --rm=false \ --tag nipype/nipype:latest \ - --tag nipype/nipype:py36 \ + --tag nipype/nipype:py38 \ $(test -z "${CIRCLE_TAG}" || echo --tag nipype/nipype:"${CIRCLE_TAG}") \ --build-arg BUILD_DATE="$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ --build-arg VCS_REF="$(git rev-parse --short HEAD)" \ @@ -146,7 +146,7 @@ jobs: - run: *generate_dockerfiles - run: *modify_nipype_version - run: *get_base_image - - run: *build_main_image_py36 + - run: *build_main_image_py38 - run: *_get_codecov - run: *_download_test_data - run: *prepare_working_directory @@ -165,8 +165,8 @@ jobs: if [ "$CIRCLE_BRANCH" = "master" -a -z "$CIRCLE_PULL_REQUEST" ]; then docker save nipype/nipype:base \ nipype/nipype:latest \ - nipype/nipype:py36 | gzip -1 > /tmp/docker/nipype-base-latest-py36.tar.gz \ - && du -h /tmp/docker/nipype-base-latest-py36.tar.gz + nipype/nipype:py38 | gzip -1 > /tmp/docker/nipype-base-latest-py38.tar.gz \ + && du -h /tmp/docker/nipype-base-latest-py38.tar.gz fi - persist_to_workspace: root: /tmp @@ -184,7 +184,7 @@ jobs: name: Load saved Docker images. no_output_timeout: 60m command: | - docker load < /tmp/docker/nipype-base-latest-py36.tar.gz + docker load < /tmp/docker/nipype-base-latest-py38.tar.gz - run: name: Push to DockerHub no_output_timeout: 120m @@ -192,7 +192,7 @@ jobs: echo "$DOCKER_PASS" | docker login -u "$DOCKER_USER" --password-stdin docker push nipype/nipype:base docker push nipype/nipype:latest - docker push nipype/nipype:py36 + docker push nipype/nipype:py38 test -z "${CIRCLE_TAG}" || docker push nipype/nipype:"${CIRCLE_TAG}" - run: name: Move pruned Dockerfile to /tmp/docker/cache directory From 3cd5e5256a5dd4bba0efd38d68c64258049d49c8 Mon Sep 17 00:00:00 2001 From: Koen Helwegen Date: Wed, 13 Oct 2021 19:26:25 +0200 Subject: [PATCH 1091/1665] [DOC] Fix typo in README (#3386) Missing `>` results in link being rendered incorrectly --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 70e64061bd..b51c2bf401 100644 --- a/README.rst +++ b/README.rst @@ -82,7 +82,7 @@ Python 2 Statement Python 2.7 reaches its end-of-life in January 2020, which means it will *no longer be maintained* by Python developers. `Many projects -`__ are removing support in advance of this deadline, which will make it increasingly untenable to try to support Python 2, even if we wanted to. From 9e127cfa7b58596da0633b4dcb7bdb1977c635c7 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 13 Oct 2021 13:42:17 -0400 Subject: [PATCH 1092/1665] CI: Run tests on latest image --- .circleci/test_pytest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/test_pytest.sh b/.circleci/test_pytest.sh index b93208f395..832a52a02b 100644 --- a/.circleci/test_pytest.sh +++ b/.circleci/test_pytest.sh @@ -1,3 +1,3 @@ #!/bin/bash -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work -e CI_SKIP_TEST=1 -e NIPYPE_RESOURCE_MONITOR=1 "${DOCKER_IMAGE}:py36" /usr/bin/run_pytests.sh +docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work -e CI_SKIP_TEST=1 -e NIPYPE_RESOURCE_MONITOR=1 "${DOCKER_IMAGE}:py38" /usr/bin/run_pytests.sh From 4e1080155e3c2942c9a4550e35880031534c3ca3 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 13 Oct 2021 13:49:28 -0400 Subject: [PATCH 1093/1665] DOC: Update testing instructions [skip ci] --- doc/devel/testing_nipype.rst | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/doc/devel/testing_nipype.rst b/doc/devel/testing_nipype.rst index 5713f6727b..0c01531f0a 100644 --- a/doc/devel/testing_nipype.rst +++ b/doc/devel/testing_nipype.rst @@ -90,16 +90,15 @@ Testing Nipype using Docker Nipype is tested inside Docker containers and users can use nipype images to test local versions. First, install the `Docker Engine `_. Nipype has one base docker image called nipype/nipype:base, that contains several useful tools -(FreeSurfer, AFNI, FSL, ANTs, etc.), and additional test images -for specific Python versions: py27 for Python 2.7 and py36 for Python 3.6. +(FreeSurfer, AFNI, FSL, ANTs, etc.), and an additional test image for Python 3.8: -Users can pull the nipype image for Python 3.6 as follows:: +Users can pull the nipype image for Python 3.8 as follows:: - docker pull nipype/nipype:py36 + docker pull nipype/nipype:py38 In order to test a local version of nipype you can run test within container as follows:: - docker run -it -v $PWD:/src/nipype --rm nipype/nipype:py36 py.test -v --doctest-modules /src/nipype/nipype + docker run -it -v $PWD:/src/nipype --rm nipype/nipype:py38 py.test -v --doctest-modules /src/nipype/nipype Additional comments From 4d915a80512aa503b53a986ef140aaa406fb9b66 Mon Sep 17 00:00:00 2001 From: Lea Waller Date: Wed, 13 Oct 2021 20:20:05 +0200 Subject: [PATCH 1094/1665] [FIX] Also allow `errno.EBUSY` during `emptydirs` on NFS (#3357) * Also allow `errno.EBUSY` during `emptydirs` on NFS - Can occur if a file is still open somewhere, so NFS will rename it to a hidden file in the same directory - When `shutil` tries to delete that hidden file, we get an `OSError` with `errno.EBUSY` * Ignore `.nfs` placeholder files when catching the error - I forgot that `os.listdir` also lists hidden files in the previous commit * Add unit test for `emptydirs` on NFS - With mock for NFS silly-rename (yes, it's really called that) behavior * Run `black` * Update nipype/utils/tests/test_filemanip.py Handle mock test case when no `dir_fd` is passed --- nipype/utils/filemanip.py | 11 ++++++++--- nipype/utils/tests/test_filemanip.py | 24 ++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index f02efa163f..03786ec935 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -780,8 +780,13 @@ def emptydirs(path, noexist_ok=False): try: shutil.rmtree(path) except OSError as ex: - elcont = os.listdir(path) - if ex.errno == errno.ENOTEMPTY and not elcont: + elcont = [ + Path(root) / file + for root, _, files in os.walk(path) + for file in files + if not file.startswith(".nfs") + ] + if ex.errno in [errno.ENOTEMPTY, errno.EBUSY] and not elcont: fmlogger.warning( "An exception was raised trying to remove old %s, but the path" " seems empty. Is it an NFS mount?. Passing the exception.", @@ -793,7 +798,7 @@ def emptydirs(path, noexist_ok=False): else: raise ex - os.makedirs(path) + os.makedirs(path, exist_ok=True) def silentrm(filename): diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index 299029a8d2..f02ad4164e 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -31,6 +31,7 @@ savepkl, path_resolve, write_rst_list, + emptydirs, ) @@ -670,3 +671,26 @@ def test_write_rst_list(tmp_path, items, expected): else: with pytest.raises(expected): write_rst_list(items) + + +def nfs_unlink(pathlike, *, dir_fd=None): + if dir_fd is None: + path = Path(pathlike) + deleted = path.with_name(".nfs00000000") + path.rename(deleted) + else: + os.rename(pathlike, ".nfs1111111111", src_dir_fd=dir_fd, dst_dir_fd=dir_fd) + + +def test_emptydirs_dangling_nfs(tmp_path): + busyfile = tmp_path / "base" / "subdir" / "busyfile" + busyfile.parent.mkdir(parents=True) + busyfile.touch() + + with mock.patch("os.unlink") as mocked: + mocked.side_effect = nfs_unlink + emptydirs(tmp_path / "base") + + assert Path.exists(tmp_path / "base") + assert not busyfile.exists() + assert busyfile.parent.exists() # Couldn't remove From 922db8e512143f8caf57fe02c35579e0eadc6a20 Mon Sep 17 00:00:00 2001 From: Alexandre Routier Date: Wed, 13 Oct 2021 20:23:30 +0200 Subject: [PATCH 1095/1665] ENH: Add new flags to MRtrix/preprocess.py (DWI2Tensor, MRtransform) (#3365) * Add -mask flag to DWI2Tensor * Add -linear flag to MRTransform * Add tests from make check-before-commit --- nipype/interfaces/mrtrix/preprocess.py | 18 ++++++++++++++++++ .../mrtrix/tests/test_auto_DWI2Tensor.py | 4 ++++ .../mrtrix/tests/test_auto_MRTransform.py | 5 +++++ 3 files changed, 27 insertions(+) diff --git a/nipype/interfaces/mrtrix/preprocess.py b/nipype/interfaces/mrtrix/preprocess.py index 48f4bde719..495f3af703 100644 --- a/nipype/interfaces/mrtrix/preprocess.py +++ b/nipype/interfaces/mrtrix/preprocess.py @@ -217,6 +217,13 @@ class DWI2TensorInputSpec(CommandLineInputSpec): "specified when computing the tensor." ), ) + mask = File( + exists=True, + argstr="-mask %s", + desc=( + "Only perform computation within the specified binary brain mask image." + ), + ) quiet = traits.Bool( argstr="-quiet", position=1, @@ -865,6 +872,17 @@ class MRTransformInputSpec(CommandLineInputSpec): position=1, desc="Invert the specified transform before using it", ) + linear_transform = File( + exists=True, + argstr="-linear %s", + position=1, + desc=( + "Specify a linear transform to apply, in the form of a 3x4 or 4x4 ascii file. " + "Note the standard reverse convention is used, " + "where the transform maps points in the template image to the moving image. " + "Note that the reverse convention is still assumed even if no -template image is supplied." + ), + ) replace_transform = traits.Bool( argstr="-replace", position=1, diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py index 25bae449e3..72a2820cc1 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py @@ -35,6 +35,10 @@ def test_DWI2Tensor_inputs(): mandatory=True, position=-2, ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), out_filename=dict( argstr="%s", extensions=None, diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py index c1e91da1c1..f619525575 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py @@ -28,6 +28,11 @@ def test_MRTransform_inputs(): argstr="-inverse", position=1, ), + linear_transform=dict( + argstr="-linear %s", + extensions=None, + position=1, + ), out_filename=dict( argstr="%s", extensions=None, From d8dbc6f7b6a5385535e2fa53b7c6af7aa1370f46 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 13 Oct 2021 14:26:48 -0400 Subject: [PATCH 1096/1665] STY: black --- nipype/interfaces/mrtrix/preprocess.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nipype/interfaces/mrtrix/preprocess.py b/nipype/interfaces/mrtrix/preprocess.py index 495f3af703..c79f9016e9 100644 --- a/nipype/interfaces/mrtrix/preprocess.py +++ b/nipype/interfaces/mrtrix/preprocess.py @@ -220,9 +220,7 @@ class DWI2TensorInputSpec(CommandLineInputSpec): mask = File( exists=True, argstr="-mask %s", - desc=( - "Only perform computation within the specified binary brain mask image." - ), + desc="Only perform computation within the specified binary brain mask image.", ) quiet = traits.Bool( argstr="-quiet", From ab6c616cb8749c21ab541204cffea45b1119fcd5 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 13 Oct 2021 21:19:11 +0200 Subject: [PATCH 1097/1665] fix: ``antsRegistration`` allows the ``restrict_deformation`` to be float (#3387) --- nipype/interfaces/ants/registration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/ants/registration.py b/nipype/interfaces/ants/registration.py index 87c2848be3..b9bd38f2df 100644 --- a/nipype/interfaces/ants/registration.py +++ b/nipype/interfaces/ants/registration.py @@ -526,7 +526,7 @@ class RegistrationInputSpec(ANTSCommandInputSpec): ) ) restrict_deformation = traits.List( - traits.List(traits.Enum(0, 1)), + traits.List(traits.Range(low=0.0, high=1.0)), desc=( "This option allows the user to restrict the optimization of " "the displacement field, translation, rigid or affine transform " From c0d450e16cbc3895f672cea4ec86ac4c4416426d Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Wed, 13 Oct 2021 21:41:15 +0200 Subject: [PATCH 1098/1665] ENH: Stop printing false positive differences when logging cached nodes (#3376) * enh: cast lists into tuples when printing inputs diffs * fix: correctly deal with dictionaries, insert ellipsis for very long diffs * sty: fix some style errors * Apply suggestions from code review Co-authored-by: Chris Markiewicz * enh: apply comments from review Co-authored-by: Chris Markiewicz * TEST: Thoroughly test dict_diff() * FIX: Correct call of indent; drop compatibility shim Co-authored-by: Chris Markiewicz Co-authored-by: Christopher J. Markiewicz --- nipype/utils/misc.py | 63 ++++++++++++++++++--------------- nipype/utils/tests/test_misc.py | 48 ++++++++++++++++++++++++- 2 files changed, 81 insertions(+), 30 deletions(-) diff --git a/nipype/utils/misc.py b/nipype/utils/misc.py index 8ec6ee5342..6b106da952 100644 --- a/nipype/utils/misc.py +++ b/nipype/utils/misc.py @@ -13,16 +13,7 @@ import numpy as np -try: - from textwrap import indent as textwrap_indent -except ImportError: - - def textwrap_indent(text, prefix): - """A textwrap.indent replacement for Python < 3.3""" - if not prefix: - return text - splittext = text.splitlines(True) - return prefix + prefix.join(splittext) +import textwrap def human_order_sorted(l): @@ -296,12 +287,16 @@ def dict_diff(dold, dnew, indent=0): typical use -- log difference for hashed_inputs """ - # First check inputs, since they usually are lists of tuples - # and dicts are required. - if isinstance(dnew, list): - dnew = dict(dnew) - if isinstance(dold, list): - dold = dict(dold) + try: + dnew, dold = dict(dnew), dict(dold) + except Exception: + return textwrap.indent( + f"""\ +Diff between nipype inputs failed: +* Cached inputs: {dold} +* New inputs: {dnew}""", + " " * indent, + ) # Compare against hashed_inputs # Keys: should rarely differ @@ -321,26 +316,36 @@ def dict_diff(dold, dnew, indent=0): diffkeys = len(diff) + def _shorten(value): + if isinstance(value, str) and len(value) > 50: + return f"{value[:10]}...{value[-10:]}" + if isinstance(value, (tuple, list)) and len(value) > 10: + return tuple(list(value[:2]) + ["..."] + list(value[-2:])) + return value + + def _uniformize(val): + if isinstance(val, dict): + return {k: _uniformize(v) for k, v in val.items()} + if isinstance(val, (list, tuple)): + return tuple(_uniformize(el) for el in val) + return val + # Values in common keys would differ quite often, # so we need to join the messages together for k in new_keys.intersection(old_keys): - try: - new, old = dnew[k], dold[k] - same = new == old - if not same: - # Since JSON does not discriminate between lists and - # tuples, we might need to cast them into the same type - # as the last resort. And lets try to be more generic - same = old.__class__(new) == old - except Exception: - same = False - if not same: - diff += [" * %s: %r != %r" % (k, dnew[k], dold[k])] + # Reading from JSON produces lists, but internally we typically + # use tuples. At this point these dictionary values can be + # immutable (and therefore the preference for tuple). + new = _uniformize(dnew[k]) + old = _uniformize(dold[k]) + + if new != old: + diff += [" * %s: %r != %r" % (k, _shorten(new), _shorten(old))] if len(diff) > diffkeys: diff.insert(diffkeys, "Some dictionary entries had differing values:") - return textwrap_indent("\n".join(diff), " " * indent) + return textwrap.indent("\n".join(diff), " " * indent) def rgetcwd(error=True): diff --git a/nipype/utils/tests/test_misc.py b/nipype/utils/tests/test_misc.py index ad25c6ba14..13ae3740d6 100644 --- a/nipype/utils/tests/test_misc.py +++ b/nipype/utils/tests/test_misc.py @@ -6,7 +6,13 @@ import pytest -from nipype.utils.misc import container_to_string, str2bool, flatten, unflatten +from nipype.utils.misc import ( + container_to_string, + str2bool, + flatten, + unflatten, + dict_diff, +) def test_cont_to_str(): @@ -95,3 +101,43 @@ def test_rgetcwd(monkeypatch, tmpdir): monkeypatch.delenv("PWD") with pytest.raises(OSError): rgetcwd(error=False) + + +def test_dict_diff(): + abtuple = [("a", "b")] + abdict = dict(abtuple) + + # Unchanged + assert dict_diff(abdict, abdict) == "" + assert dict_diff(abdict, abtuple) == "" + assert dict_diff(abtuple, abdict) == "" + assert dict_diff(abtuple, abtuple) == "" + + # Changed keys + diff = dict_diff({"a": "b"}, {"b": "a"}) + assert "Dictionaries had differing keys" in diff + assert "keys not previously seen: {'b'}" in diff + assert "keys not presently seen: {'a'}" in diff + + # Trigger recursive uniformization + complicated_val1 = [{"a": ["b"], "c": ("d", "e")}] + complicated_val2 = [{"a": ["x"], "c": ("d", "e")}] + uniformized_val1 = ({"a": ("b",), "c": ("d", "e")},) + uniformized_val2 = ({"a": ("x",), "c": ("d", "e")},) + + diff = dict_diff({"a": complicated_val1}, {"a": complicated_val2}) + assert "Some dictionary entries had differing values:" in diff + assert "a: {!r} != {!r}".format(uniformized_val2, uniformized_val1) in diff + + # Trigger shortening + diff = dict_diff({"a": "b" * 60}, {"a": "c" * 70}) + assert "Some dictionary entries had differing values:" in diff + assert "a: 'cccccccccc...cccccccccc' != 'bbbbbbbbbb...bbbbbbbbbb'" in diff + + # Fail the dict conversion + diff = dict_diff({}, "not a dict") + assert diff == ( + "Diff between nipype inputs failed:\n" + "* Cached inputs: {}\n" + "* New inputs: not a dict" + ) From 8f7c0bf2ec9c819844a2736a9ae2f6eef19a8e7f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Wed, 13 Oct 2021 16:08:23 -0400 Subject: [PATCH 1099/1665] ENH: Add expected steps for FreeSurfer 7 recon-all (#3389) * ENH: Copy FS6 steps for FS7 * ENH: Update expected outputs for FS7 * STY: black --- nipype/interfaces/freesurfer/preprocess.py | 121 ++++++++++++++++++++- 1 file changed, 120 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/freesurfer/preprocess.py b/nipype/interfaces/freesurfer/preprocess.py index cf4e97c9c9..2b62d155e3 100644 --- a/nipype/interfaces/freesurfer/preprocess.py +++ b/nipype/interfaces/freesurfer/preprocess.py @@ -1081,6 +1081,7 @@ class ReconAll(CommandLine): # # [0] https://surfer.nmr.mgh.harvard.edu/fswiki/ReconAllTableStableV5.3 # [1] https://surfer.nmr.mgh.harvard.edu/fswiki/ReconAllTableStableV6.0 + # [2] https://surfer.nmr.mgh.harvard.edu/fswiki/ReconAllTableStableV6.0#ReconAllTableStable7.1.1 _autorecon1_steps = [ ("motioncor", ["mri/rawavg.mgz", "mri/orig.mgz"], []), ( @@ -1210,7 +1211,7 @@ class ReconAll(CommandLine): ("wmparc", ["mri/wmparc.mgz", "stats/wmparc.stats"], []), ("balabels", ["label/BA.ctab", "label/BA.thresh.ctab"], []), ] - else: + elif Info.looseversion() < LooseVersion("7.0.0"): _autorecon2_volonly_steps = [ ("gcareg", ["mri/transforms/talairach.lta"], []), ("canorm", ["mri/norm.mgz"], []), @@ -1322,6 +1323,124 @@ class ReconAll(CommandLine): [], ), ] + else: + _autorecon2_volonly_steps = [ + ("gcareg", ["mri/transforms/talairach.lta"], []), + ("canorm", ["mri/norm.mgz"], []), + ("careg", ["mri/transforms/talairach.m3z"], []), + ( + "calabel", + [ + "mri/aseg.auto_noCCseg.mgz", + "mri/aseg.auto.mgz", + "mri/aseg.presurf.mgz", + ], + [], + ), + ("normalization2", ["mri/brain.mgz"], []), + ("maskbfs", ["mri/brain.finalsurfs.mgz"], []), + ( + "segmentation", + ["mri/wm.seg.mgz", "mri/wm.asegedit.mgz", "mri/wm.mgz"], + [], + ), + ( + "fill", + [ + "mri/filled.mgz", + # 'scripts/ponscc.cut.log', + ], + [], + ), + ] + _autorecon2_lh_steps = [ + ("tessellate", ["surf/lh.orig.nofix"], []), + ("smooth1", ["surf/lh.smoothwm.nofix"], []), + ("inflate1", ["surf/lh.inflated.nofix"], []), + ("qsphere", ["surf/lh.qsphere.nofix"], []), + ("fix", ["surf/lh.inflated", "surf/lh.orig"], []), + ( + "white", + [ + "surf/lh.white.preaparc", + "surf/lh.curv", + "surf/lh.area", + "label/lh.cortex.label", + ], + [], + ), + ("smooth2", ["surf/lh.smoothwm"], []), + ("inflate2", ["surf/lh.inflated", "surf/lh.sulc"], []), + ( + "curvHK", + [ + "surf/lh.white.H", + "surf/lh.white.K", + "surf/lh.inflated.H", + "surf/lh.inflated.K", + ], + [], + ), + ("curvstats", ["stats/lh.curv.stats"], []), + ] + _autorecon3_lh_steps = [ + ("sphere", ["surf/lh.sphere"], []), + ("surfreg", ["surf/lh.sphere.reg"], []), + ("jacobian_white", ["surf/lh.jacobian_white"], []), + ("avgcurv", ["surf/lh.avg_curv"], []), + ("cortparc", ["label/lh.aparc.annot"], []), + ( + "pial", + [ + "surf/lh.pial", + "surf/lh.curv.pial", + "surf/lh.area.pial", + "surf/lh.thickness", + "surf/lh.white", + ], + [], + ), + ("parcstats", ["stats/lh.aparc.stats"], []), + ("cortparc2", ["label/lh.aparc.a2009s.annot"], []), + ("parcstats2", ["stats/lh.aparc.a2009s.stats"], []), + ("cortparc3", ["label/lh.aparc.DKTatlas.annot"], []), + ("parcstats3", ["stats/lh.aparc.DKTatlas.stats"], []), + ("pctsurfcon", ["surf/lh.w-g.pct.mgh", "stats/lh.w-g.pct.stats"], []), + ] + _autorecon3_added_steps = [ + ( + "cortribbon", + ["mri/lh.ribbon.mgz", "mri/rh.ribbon.mgz", "mri/ribbon.mgz"], + [], + ), + ("hyporelabel", ["mri/aseg.presurf.hypos.mgz"], []), + ( + "aparc2aseg", + [ + "mri/aparc+aseg.mgz", + "mri/aparc.a2009s+aseg.mgz", + "mri/aparc.DKTatlas+aseg.mgz", + ], + [], + ), + ("apas2aseg", ["mri/aseg.mgz"], ["mri/aparc+aseg.mgz"]), + ("segstats", ["stats/aseg.stats"], []), + ("wmparc", ["mri/wmparc.mgz", "stats/wmparc.stats"], []), + # Note that this is a very incomplete list; however the ctab + # files are last to be touched, so this should be reasonable + ( + "balabels", + [ + "label/BA_exvivo.ctab", + "label/BA_exvivo.thresh.ctab", + "label/lh.entorhinal_exvivo.label", + "label/rh.entorhinal_exvivo.label", + "label/lh.perirhinal_exvivo.label", + "label/rh.perirhinal_exvivo.label", + ], + [], + ), + ] # Fill out autorecon2 steps _autorecon2_rh_steps = [ From 329e9be229f8ef98bb245ca646927b647b7a1f95 Mon Sep 17 00:00:00 2001 From: orco Date: Sun, 17 Oct 2021 20:47:22 +0200 Subject: [PATCH 1100/1665] Update nipype/interfaces/cat12/preprocess.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/cat12/preprocess.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 33ec9ff205..998202019a 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -604,13 +604,13 @@ class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): ) spm_type = traits.Enum( - 16, - 0, - 2, - 512, + "float32", + "uint16", + "uint8" + "same", field="spm_type", usedefault=True, - desc="Data type of the output images. 0 = same, 2 = uint8, 512 = uint16, 16 = single (32 bit)", + desc="Data type of the output images. 'same' matches the input image type.", ) intlim = traits.Int( From 1809526eac171a89588485a711b49d83d8e18347 Mon Sep 17 00:00:00 2001 From: orco Date: Sun, 17 Oct 2021 20:47:32 +0200 Subject: [PATCH 1101/1665] Update nipype/interfaces/cat12/preprocess.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/cat12/preprocess.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 998202019a..09ad16b6d9 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -726,7 +726,9 @@ def _format_arg(self, opt, spec, val): return scans_for_fnames(val) else: return scans_for_fname(val) - + if opt == "spm_type": + type_map = {"same": 0, "uint8": 2, "uint16": 512, "float32": 16} + val = type_map[val] return super(CAT12SANLMDenoising, self)._format_arg(opt, spec, val) def _list_outputs(self): From c27823960108245887d9fec7a61654bf16d182cf Mon Sep 17 00:00:00 2001 From: 0rC0 Date: Sun, 17 Oct 2021 20:48:39 +0200 Subject: [PATCH 1102/1665] make_specs --- .../tests/test_auto_CAT12SANLMDenoising.py | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 nipype/interfaces/cat12/tests/test_auto_CAT12SANLMDenoising.py diff --git a/nipype/interfaces/cat12/tests/test_auto_CAT12SANLMDenoising.py b/nipype/interfaces/cat12/tests/test_auto_CAT12SANLMDenoising.py new file mode 100644 index 0000000000..43c0d5e4ea --- /dev/null +++ b/nipype/interfaces/cat12/tests/test_auto_CAT12SANLMDenoising.py @@ -0,0 +1,72 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..preprocess import CAT12SANLMDenoising + + +def test_CAT12SANLMDenoising_inputs(): + input_map = dict( + addnoise=dict( + field="addnoise", + usedefault=True, + ), + filename_prefix=dict( + field="prefix", + usedefault=True, + ), + filename_suffix=dict( + field="suffix", + usedefault=True, + ), + in_files=dict( + copyfile=False, + field="data", + mandatory=True, + ), + intlim=dict( + field="intlim", + usedefault=True, + ), + matlab_cmd=dict(), + mfile=dict( + usedefault=True, + ), + noisecorr_strength=dict( + field="nlmfilter.optimized.NCstr", + usedefault=True, + ), + paths=dict(), + replace_nan_and_inf=dict( + field="replaceNANandINF", + usedefault=True, + ), + rician=dict( + field="rician", + usedefault=True, + ), + spm_type=dict( + field="spm_type", + usedefault=True, + ), + use_mcr=dict(), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + ) + inputs = CAT12SANLMDenoising.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_CAT12SANLMDenoising_outputs(): + output_map = dict( + out_file=dict( + extensions=None, + ), + ) + outputs = CAT12SANLMDenoising.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value From 5df9ce8dab261e41ff5cb3c1ae30be60281c0abf Mon Sep 17 00:00:00 2001 From: Fabio Bernardoni Date: Mon, 18 Oct 2021 18:05:37 +0200 Subject: [PATCH 1103/1665] merged my changes from 2019 into the new version of nipype --- nipype/interfaces/spm/preprocess.py | 118 ++++++++++++++++++++++++++-- 1 file changed, 110 insertions(+), 8 deletions(-) diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index 5dc2a8fa3e..b3c78787a2 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -46,14 +46,14 @@ class FieldMapInputSpec(SPMCommandInputSpec): desc="one of: calculatevdm, applyvdm", ) phase_file = File( - mandatory=True, + #mandatory=True, exists=True, copyfile=False, field="subj.data.presubphasemag.phase", desc="presubstracted phase file", ) magnitude_file = File( - mandatory=True, + #mandatory=True, exists=True, copyfile=False, field="subj.data.presubphasemag.magnitude", @@ -62,7 +62,7 @@ class FieldMapInputSpec(SPMCommandInputSpec): echo_times = traits.Tuple( traits.Float, traits.Float, - mandatory=True, + #mandatory=True, field="subj.defaults.defaultsval.et", desc="short and long echo times", ) @@ -169,7 +169,7 @@ class FieldMapInputSpec(SPMCommandInputSpec): epi_file = File( copyfile=False, exists=True, - mandatory=True, + #mandatory=True, field="subj.session.epi", desc="EPI to unwarp", ) @@ -195,10 +195,61 @@ class FieldMapInputSpec(SPMCommandInputSpec): desc="match anatomical image to EPI", ) + in_files = InputMultiObject( + traits.Either(ImageFileSPM(exists=True), + traits.List(ImageFileSPM(exists=True))), + field='data.scans',mandatory=True, + copyfile=True, + desc='list of filenames to apply the vdm to') + vdmfile = File( + field='data.vdmfile', + desc='Voxel displacement map to use',mandatory=True, + copyfile=True) + distortion_direction = traits.Int( + 2, field='roptions.pedir', desc='phase encode direction input data have been acquired with', + usedefault=True) + write_which = traits.ListInt( + [2, 1], + field='roptions.which', + minlen=2, + maxlen=2, + usedefault=True, + desc='determines which images to apply vdm to') + interpolation = traits.Int( + 4, field='roptions.rinterp', desc='phase encode direction input data have been acquired with', + usedefault=True) + reslice_interp = traits.Range( + low=0, + high=7, + field='roptions.rinterp', + desc='degree of b-spline used for interpolation') + write_wrap = traits.List( + traits.Int(), + minlen=3, + maxlen=3, + field='roptions.wrap', + desc=('Check if interpolation should wrap in [x,y,z]')) + write_mask = traits.Bool( + field='roptions.mask', desc='True/False mask time series images') + out_prefix = traits.String( + 'u', + field='roptions.prefix', + usedefault=True, + desc='fieldmap corrected output prefix') + class FieldMapOutputSpec(TraitedSpec): vdm = File(exists=True, desc="voxel difference map") + out_files = OutputMultiPath( + traits.Either(traits.List(File(exists=True)), File(exists=True)), + desc=('If jobtype is applyvdm, ' + 'these will be the fieldmap corrected files.' + ' Otherwise, they will be copies ' + 'of in_files that have had their ' + 'headers rewritten.')) + mean_image = File(exists=True, desc='Mean image') + class FieldMap(SPMCommand): """Use the fieldmap toolbox from spm to calculate the voxel displacement map (VDM). @@ -231,25 +282,76 @@ class FieldMap(SPMCommand): def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm""" - if opt in ["phase_file", "magnitude_file", "anat_file", "epi_file"]: + + if ((self.inputs.jobtype == "calculatevdm") and (opt in ['phase_file', 'magnitude_file', 'anat_file', 'epi_file'])): return scans_for_fname(ensure_list(val)) + if ((self.inputs.jobtype == "applyvdm") and (opt =='in_files')): + return scans_for_fnames(ensure_list(val)) + if ((self.inputs.jobtype == "applyvdm") and (opt =='vdmfile')): + return scans_for_fname(ensure_list(val)) return super(FieldMap, self)._format_arg(opt, spec, val) + def _parse_inputs(self): """validate spm fieldmap options if set to None ignore""" - einputs = super(FieldMap, self)._parse_inputs() - return [{self.inputs.jobtype: einputs[0]}] + + if self.inputs.jobtype == "applyvdm": + einputs = (super(FieldMap, self) + ._parse_inputs(skip=('jobtype','phase_file', 'magnitude_file', + 'echo_times', 'blip_direction', + 'total_readout_time','maskbrain', + 'epifm','jacobian_modulation', + 'method','unwarp_fwhm','pad','ws', + 'template','mask_fwhm','nerode','ndilate', + 'thresh','reg','epi_file','matchvdm', + 'sessname','writeunwarped', + 'anat_file','matchanat'))) + + else: + einputs = (super(FieldMap, self) + ._parse_inputs(skip=('jobtype','in_files', 'vdmfile'))) + jobtype = self.inputs.jobtype + + return [{'%s' % (jobtype): einputs[0]}] + def _list_outputs(self): outputs = self._outputs().get() jobtype = self.inputs.jobtype + resliced_all = self.inputs.write_which[0] > 0 + resliced_mean = self.inputs.write_which[1] > 0 if jobtype == "calculatevdm": - outputs["vdm"] = fname_presuffix(self.inputs.phase_file, prefix="vdm5_sc") + outputs['vdm'] = fname_presuffix(self.inputs.phase_file, prefix='vdm5_sc') + elif jobtype == "applyvdm": + if resliced_mean: + if isinstance(self.inputs.in_files[0], list): + first_image = self.inputs.in_files[0][0] + else: + first_image = self.inputs.in_files[0] + outputs['mean_image'] = fname_presuffix( + first_image, prefix='meanu') + + if resliced_all: + outputs['out_files'] = [] + for idx, imgf in enumerate(ensure_list(self.inputs.in_files)): + appliedvdm_run = [] + if isinstance(imgf, list): + for i, inner_imgf in enumerate(ensure_list(imgf)): + newfile = fname_presuffix(inner_imgf, + prefix=self.inputs.out_prefix) + appliedvdm_run.append(newfile) + else: + appliedvdm_run = fname_presuffix(imgf, + prefix=self.inputs.out_prefix) + outputs['out_files'].append(appliedvdm_run) + return outputs + return outputs + class SliceTimingInputSpec(SPMCommandInputSpec): in_files = InputMultiPath( traits.Either( From cdb064d6caa90d00537520c97679bde6fc2c2646 Mon Sep 17 00:00:00 2001 From: 0rC0 Date: Mon, 18 Oct 2021 19:51:43 +0200 Subject: [PATCH 1104/1665] black --- nipype/interfaces/cat12/preprocess.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index 09ad16b6d9..b6b6943fef 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -606,8 +606,7 @@ class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): spm_type = traits.Enum( "float32", "uint16", - "uint8" - "same", + "uint8" "same", field="spm_type", usedefault=True, desc="Data type of the output images. 'same' matches the input image type.", From 2dfea7acc5283a0821a97a5eaed22babd6da0e30 Mon Sep 17 00:00:00 2001 From: orco Date: Mon, 18 Oct 2021 20:06:10 +0200 Subject: [PATCH 1105/1665] Update nipype/interfaces/cat12/preprocess.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/cat12/preprocess.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/cat12/preprocess.py b/nipype/interfaces/cat12/preprocess.py index b6b6943fef..69fe16b752 100644 --- a/nipype/interfaces/cat12/preprocess.py +++ b/nipype/interfaces/cat12/preprocess.py @@ -606,7 +606,8 @@ class CAT12SANLMDenoisingInputSpec(SPMCommandInputSpec): spm_type = traits.Enum( "float32", "uint16", - "uint8" "same", + "uint8", + "same", field="spm_type", usedefault=True, desc="Data type of the output images. 'same' matches the input image type.", From 93d6c3b351b7d9b0c30fe5e55cb3567d8f0b139a Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 18 Oct 2021 14:55:13 -0400 Subject: [PATCH 1106/1665] FIX: Make ants.LaplacianThickness output_image a string, not file Closes gh-3390 --- nipype/interfaces/ants/segmentation.py | 2 +- nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index 33c92fc005..5f8a76e302 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -268,7 +268,7 @@ class LaplacianThicknessInputSpec(ANTSCommandInputSpec): desc="gray matter segmentation image", position=2, ) - output_image = File( + output_image = traits.Str( desc="name of output file", argstr="%s", position=3, diff --git a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py index 74c9ef076d..f6a8ffde25 100644 --- a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py @@ -36,7 +36,6 @@ def test_LaplacianThickness_inputs(): ), output_image=dict( argstr="%s", - extensions=None, hash_files=False, keep_extension=True, name_source=["input_wm"], From d1a7d3c92eae85034079315beb3a4454bd784691 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 15 Oct 2021 11:23:45 -0400 Subject: [PATCH 1107/1665] MNT: Update update_changes to handle squashed merges --- tools/update_changes.sh | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/tools/update_changes.sh b/tools/update_changes.sh index 387ec3442f..b5f5f8c0a4 100755 --- a/tools/update_changes.sh +++ b/tools/update_changes.sh @@ -23,15 +23,30 @@ UPCOMING=$? HEADER="$1 ($(date '+%B %d, %Y'))" echo $HEADER >> newchanges echo $( printf "%${#HEADER}s" | tr " " "=" ) >> newchanges +echo >> newchanges if [[ "x$2" != "x" ]]; then - echo "(\`Full changelog \`__)" >> newchanges + echo "(\`Full changelog \`__)" >> newchanges + echo >> newchanges fi # Search for PRs since previous release -git log --grep="Merge pull request" `git describe --tags --abbrev=0`..HEAD --pretty='format: * %b %s' | sed 's+Merge pull request \#\([^\d]*\)\ from\ .*+(https://github.com/nipy/nipype/pull/\1)+' >> newchanges -echo "" >> newchanges -echo "" >> newchanges +MERGE_COMMITS=$( git log --grep="Merge pull request\|(#.*)$" `git describe --tags --abbrev=0`..HEAD --pretty='format:%h' ) +for COMMIT in ${MERGE_COMMITS//\n}; do + SUB=$( git log -n 1 --pretty="format:%s" $COMMIT ) + if ( echo $SUB | grep "^Merge pull request" ); then + # Merge commit + PR=$( echo $SUB | sed -e "s/Merge pull request \#\([0-9]*\).*/\1/" ) + TITLE=$( git log -n 1 --pretty="format:%b" $COMMIT ) + else + # Squashed merge + PR=$( echo $SUB | sed -e "s/.*(\#\([0-9]*\))$/\1/" ) + TITLE=$( echo $SUB | sed -e "s/\(.*\)(\#[0-9]*)$/\1/" ) + fi + echo " * $TITLE (https://github.com/nipy/nipype/pull/$PR)" >> newchanges +done +echo >> newchanges +echo >> newchanges # Append old CHANGES if [[ "$UPCOMING" == "0" ]]; then @@ -43,4 +58,3 @@ fi # Replace old CHANGES with new file mv newchanges $CHANGES - From ece23f1ad59f1360155cfb006630efc09ac7a599 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 15 Oct 2021 13:16:45 -0400 Subject: [PATCH 1108/1665] DOC: Update previous versions --- doc/interfaces.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/interfaces.rst b/doc/interfaces.rst index 107eb6519b..a7523a9cc9 100644 --- a/doc/interfaces.rst +++ b/doc/interfaces.rst @@ -8,7 +8,7 @@ Interfaces and Workflows :Release: |version| :Date: |today| -Previous versions: `1.6.0 `_ `1.5.1 `_ +Previous versions: `1.6.1 `_ `1.6.0 `_ Workflows --------- From 107dfa965c8078c44d1f04a384c1a654ad26bdad Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 15 Oct 2021 13:16:57 -0400 Subject: [PATCH 1109/1665] MNT: Bump version to 1.7.0 --- nipype/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/info.py b/nipype/info.py index 4f065939ed..c92f48df5d 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -5,7 +5,7 @@ # nipype version information # Remove -dev for release -__version__ = "1.6.1" +__version__ = "1.7.0" def get_nipype_gitversion(): From a309d90bc96219016c5c73610bea96394a1c667d Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 18 Oct 2021 15:18:32 -0400 Subject: [PATCH 1110/1665] DOC: Update changelog --- doc/changelog/1.X.X-changelog.rst | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/doc/changelog/1.X.X-changelog.rst b/doc/changelog/1.X.X-changelog.rst index 6d8cd64652..10011c6830 100644 --- a/doc/changelog/1.X.X-changelog.rst +++ b/doc/changelog/1.X.X-changelog.rst @@ -1,3 +1,26 @@ +1.7.0 (October 18, 2021) +======================== + +(`Full changelog `__) + + * FIX: Make ants.LaplacianThickness output_image a string, not file (https://github.com/nipy/nipype/pull/3393) + * FIX: coord for mrconvert (https://github.com/nipy/nipype/pull/3369) + * FIX: ``antsRegistration`` allows the ``restrict_deformation`` to be float (https://github.com/nipy/nipype/pull/3387) + * FIX: Also allow `errno.EBUSY` during `emptydirs` on NFS (https://github.com/nipy/nipype/pull/3357) + * FIX: Removed exists=True from MathsOutput (https://github.com/nipy/nipype/pull/3385) + * FIX: Extension not extensions, after pybids v0.9 (https://github.com/nipy/nipype/pull/3380) + * ENH: Add CAT12 SANLM denoising filter (https://github.com/nipy/nipype/pull/3374) + * ENH: Add expected steps for FreeSurfer 7 recon-all (https://github.com/nipy/nipype/pull/3389) + * ENH: Stop printing false positive differences when logging cached nodes (https://github.com/nipy/nipype/pull/3376) + * ENH: Add new flags to MRtrix/preprocess.py (DWI2Tensor, MRtransform) (https://github.com/nipy/nipype/pull/3365) + * ENH: ``verbose`` input should not be hashed in ``ants.Registration`` (https://github.com/nipy/nipype/pull/3377) + * REF: Clean-up the BaseInterface ``run()`` function using context (https://github.com/nipy/nipype/pull/3347) + * DOC: Fix typo in README (https://github.com/nipy/nipype/pull/3386) + * STY: Make private member name consistent with the rest of them (https://github.com/nipy/nipype/pull/3346) + * MNT: Simplify interface execution and better error handling of ``Node`` (https://github.com/nipy/nipype/pull/3349) + * MNT: Add user name and email to Docker to appease git/annex/datalad (https://github.com/nipy/nipype/pull/3378) + * CI: Update CircleCI machine image (https://github.com/nipy/nipype/pull/3391) + 1.6.1 (June 16, 2021) ===================== From 7d85071ee17565894ce12ff2bff1aa6e592a3757 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 18 Oct 2021 15:21:10 -0400 Subject: [PATCH 1111/1665] MNT: Update mailmap --- .mailmap | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.mailmap b/.mailmap index a233bff1e0..0509365e65 100644 --- a/.mailmap +++ b/.mailmap @@ -10,6 +10,7 @@ Alexander Schaefer Alexander Schaefer Alexandre M. Savio Alexandre M. Savio +Andrea Dell'Orco Andrew Floren Andrey Chetverikov Andrey Chetverikov @@ -144,6 +145,7 @@ Michael Waskom Miguel Molina-Romero Murat Bilgel Nat Lee +Ole Numssen Oliver Contier Olivia Stanley Oscar Esteban From dbe25eb0d23cffa5141bc1b3c4909c7888526b7a Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 18 Oct 2021 15:23:29 -0400 Subject: [PATCH 1112/1665] MNT: Add Zenodo authors, update ordering --- .zenodo.json | 43 +++++++++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 0b2d0f65fb..99cba0602a 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -148,15 +148,15 @@ "name": "Visconti di Oleggio Castello, Matteo", "orcid": "0000-0001-7931-5272" }, - { - "affiliation": "Shattuck Lab, UCLA Brain Mapping Center", - "name": "Wong, Jason" - }, { "affiliation": "University of Texas at Austin", "name": "De La Vega, Alejandro", "orcid": "0000-0001-9062-3778" }, + { + "affiliation": "Shattuck Lab, UCLA Brain Mapping Center", + "name": "Wong, Jason" + }, { "affiliation": "MIT", "name": "Kaczmarzyk, Jakub", @@ -217,6 +217,11 @@ "name": "de Hollander, Gilles", "orcid": "0000-0003-1988-5091" }, + { + "affiliation": "Charit\u00e8 Universit\u00e4tsmedizin Berlin, Germany", + "name": "Dell'Orco, Andrea", + "orcid": "0000-0002-3964-8360" + }, { "affiliation": "University College London", "name": "Eshaghi, Arman", @@ -251,6 +256,11 @@ { "name": "Mordom, David" }, + { + "affiliation": "Charite Universitatsmedizin Berlin, Germany", + "name": "Waller, Lea", + "orcid": "0000-0002-3239-6957" + }, { "affiliation": "CIBIT, UC", "name": "Machado, F\u00e1tima", @@ -261,11 +271,6 @@ "name": "Guillon, Je\u0301re\u0301my", "orcid": "0000-0002-2672-7510" }, - { - "affiliation": "Charite Universitatsmedizin Berlin, Germany", - "name": "Waller, Lea", - "orcid": "0000-0002-3239-6957" - }, { "affiliation": "Indiana University, IN, USA", "name": "Koudoro, Serge" @@ -726,6 +731,11 @@ "affiliation": "Universidad de Guadalajara", "name": "Gonz\u00e1lez Orozco, Abel A." }, + { + "affiliation": "ARAMIS Lab", + "name": "Routier, Alexandre", + "orcid": "0000-0003-1603-8049" + }, { "name": "Marina, Ana" }, @@ -794,9 +804,19 @@ "name": "Pannetier, Nicolas", "orcid": "0000-0002-0744-5155" }, + { + "affiliation": "Max Planck Institute for Human Cognitive and Brain Sciences", + "name": "Numssen, Ole", + "orcid": "0000-0001-7164-2682" + }, { "name": "Khanuja, Ranjeet" }, + { + "affiliation": "University of Waterloo", + "name": "Mukhometzianov, Rinat", + "orcid": "0000-0003-1274-4827" + }, { "name": "Urchs, Sebastian" }, @@ -835,11 +855,6 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" - }, - { - "affiliation": "Charitè Universitätsmedizin Berlin, Germany", - "name": "Dell\'Orco, Andrea", - "orcid": "0000-0002-3964-8360" } ], "keywords": [ From a57cdc28ca28861b4fa32ede55b99fe85d0b5ea8 Mon Sep 17 00:00:00 2001 From: Fabio Bernardoni Date: Tue, 19 Oct 2021 15:28:14 +0200 Subject: [PATCH 1113/1665] run the make check-before-commit and edited the .zenodo file --- .zenodo.json | 5 + Makefile | 2 +- nipype/algorithms/tests/test_auto_ACompCor.py | 72 +--- .../tests/test_auto_ActivationCount.py | 21 +- .../tests/test_auto_AddCSVColumn.py | 16 +- .../algorithms/tests/test_auto_AddCSVRow.py | 14 +- nipype/algorithms/tests/test_auto_AddNoise.py | 33 +- .../tests/test_auto_ArtifactDetect.py | 70 +--- .../tests/test_auto_CalculateMedian.py | 10 +- .../test_auto_CalculateNormalizedMoments.py | 13 +- .../tests/test_auto_ComputeDVARS.py | 78 +--- .../tests/test_auto_ComputeMeshWarp.py | 36 +- .../algorithms/tests/test_auto_CreateNifti.py | 16 +- nipype/algorithms/tests/test_auto_Distance.py | 25 +- .../tests/test_auto_FramewiseDisplacement.py | 47 +-- .../tests/test_auto_FuzzyOverlap.py | 28 +- nipype/algorithms/tests/test_auto_Gunzip.py | 13 +- nipype/algorithms/tests/test_auto_ICC.py | 21 +- .../algorithms/tests/test_auto_Matlab2CSV.py | 13 +- .../tests/test_auto_MergeCSVFiles.py | 19 +- .../algorithms/tests/test_auto_MergeROIs.py | 12 +- .../tests/test_auto_MeshWarpMaths.py | 33 +- .../tests/test_auto_ModifyAffine.py | 11 +- .../tests/test_auto_NonSteadyStateDetector.py | 11 +- .../test_auto_NormalizeProbabilityMapSet.py | 11 +- .../algorithms/tests/test_auto_P2PDistance.py | 36 +- .../algorithms/tests/test_auto_PickAtlas.py | 27 +- .../algorithms/tests/test_auto_Similarity.py | 26 +- .../tests/test_auto_SimpleThreshold.py | 13 +- .../tests/test_auto_SpecifyModel.py | 48 +-- .../tests/test_auto_SpecifySPMModel.py | 56 +-- .../tests/test_auto_SpecifySparseModel.py | 74 +--- .../algorithms/tests/test_auto_SplitROIs.py | 15 +- .../tests/test_auto_StimulusCorrelation.py | 21 +- nipype/algorithms/tests/test_auto_TCompCor.py | 76 +--- .../algorithms/tests/test_auto_WarpPoints.py | 21 +- .../afni/tests/test_auto_ABoverlap.py | 50 +-- .../afni/tests/test_auto_AFNICommand.py | 14 +- .../afni/tests/test_auto_AFNICommandBase.py | 10 +- .../afni/tests/test_auto_AFNIPythonCommand.py | 14 +- .../afni/tests/test_auto_AFNItoNIFTI.py | 44 +- .../afni/tests/test_auto_AlignEpiAnatPy.py | 103 ++--- .../afni/tests/test_auto_Allineate.py | 227 +++-------- .../afni/tests/test_auto_AutoTLRC.py | 29 +- .../afni/tests/test_auto_AutoTcorrelate.py | 48 +-- .../afni/tests/test_auto_Autobox.py | 31 +- .../afni/tests/test_auto_Automask.py | 41 +- .../afni/tests/test_auto_Axialize.py | 49 +-- .../afni/tests/test_auto_Bandpass.py | 91 +---- .../afni/tests/test_auto_BlurInMask.py | 52 +-- .../afni/tests/test_auto_BlurToFWHM.py | 48 +-- .../afni/tests/test_auto_BrickStat.py | 55 +-- .../interfaces/afni/tests/test_auto_Bucket.py | 32 +- .../interfaces/afni/tests/test_auto_Calc.py | 62 +-- nipype/interfaces/afni/tests/test_auto_Cat.py | 50 +-- .../afni/tests/test_auto_CatMatvec.py | 41 +- .../afni/tests/test_auto_CenterMass.py | 48 +-- .../afni/tests/test_auto_ClipLevel.py | 38 +- .../afni/tests/test_auto_ConvertDset.py | 38 +- .../interfaces/afni/tests/test_auto_Copy.py | 30 +- .../afni/tests/test_auto_Deconvolve.py | 224 +++------- .../afni/tests/test_auto_DegreeCentrality.py | 58 +-- .../afni/tests/test_auto_Despike.py | 26 +- .../afni/tests/test_auto_Detrend.py | 26 +- nipype/interfaces/afni/tests/test_auto_Dot.py | 80 +--- nipype/interfaces/afni/tests/test_auto_ECM.py | 79 +--- .../interfaces/afni/tests/test_auto_Edge3.py | 52 +-- .../interfaces/afni/tests/test_auto_Eval.py | 62 +-- .../interfaces/afni/tests/test_auto_FWHMx.py | 79 +--- nipype/interfaces/afni/tests/test_auto_Fim.py | 35 +- .../afni/tests/test_auto_Fourier.py | 40 +- .../interfaces/afni/tests/test_auto_GCOR.py | 27 +- .../interfaces/afni/tests/test_auto_Hist.py | 44 +- .../interfaces/afni/tests/test_auto_LFCD.py | 47 +-- .../afni/tests/test_auto_LocalBistat.py | 60 +-- .../afni/tests/test_auto_Localstat.py | 72 +--- .../afni/tests/test_auto_MaskTool.py | 69 +--- .../afni/tests/test_auto_Maskave.py | 37 +- .../interfaces/afni/tests/test_auto_Means.py | 69 +--- .../interfaces/afni/tests/test_auto_Merge.py | 36 +- .../afni/tests/test_auto_NetCorr.py | 91 +---- .../interfaces/afni/tests/test_auto_Notes.py | 53 +-- .../afni/tests/test_auto_NwarpAdjust.py | 29 +- .../afni/tests/test_auto_NwarpApply.py | 57 +-- .../afni/tests/test_auto_NwarpCat.py | 47 +-- .../afni/tests/test_auto_OneDToolPy.py | 65 +-- .../afni/tests/test_auto_OutlierCount.py | 69 +--- .../afni/tests/test_auto_QualityIndex.py | 59 +-- .../interfaces/afni/tests/test_auto_Qwarp.py | 241 +++-------- .../afni/tests/test_auto_QwarpPlusMinus.py | 241 +++-------- .../afni/tests/test_auto_ROIStats.py | 75 +--- .../interfaces/afni/tests/test_auto_ReHo.py | 56 +-- .../interfaces/afni/tests/test_auto_Refit.py | 88 +--- .../afni/tests/test_auto_Remlfit.py | 240 +++-------- .../afni/tests/test_auto_Resample.py | 37 +- .../afni/tests/test_auto_Retroicor.py | 58 +-- .../afni/tests/test_auto_SVMTest.py | 60 +-- .../afni/tests/test_auto_SVMTrain.py | 73 +--- nipype/interfaces/afni/tests/test_auto_Seg.py | 57 +-- .../afni/tests/test_auto_SkullStrip.py | 20 +- .../afni/tests/test_auto_Synthesize.py | 53 +-- .../interfaces/afni/tests/test_auto_TCat.py | 36 +- .../afni/tests/test_auto_TCatSubBrick.py | 38 +- .../afni/tests/test_auto_TCorr1D.py | 49 +-- .../afni/tests/test_auto_TCorrMap.py | 141 ++----- .../afni/tests/test_auto_TCorrelate.py | 40 +- .../interfaces/afni/tests/test_auto_TNorm.py | 50 +-- .../afni/tests/test_auto_TProject.py | 85 +--- .../interfaces/afni/tests/test_auto_TShift.py | 73 +--- .../afni/tests/test_auto_TSmooth.py | 63 +-- .../interfaces/afni/tests/test_auto_TStat.py | 35 +- .../interfaces/afni/tests/test_auto_To3D.py | 46 +-- .../interfaces/afni/tests/test_auto_Undump.py | 59 +-- .../afni/tests/test_auto_Unifize.py | 66 +-- .../interfaces/afni/tests/test_auto_Volreg.py | 67 +-- .../interfaces/afni/tests/test_auto_Warp.py | 78 +--- .../interfaces/afni/tests/test_auto_ZCutUp.py | 30 +- .../interfaces/afni/tests/test_auto_Zcat.py | 45 +-- .../afni/tests/test_auto_Zeropad.py | 87 +--- nipype/interfaces/ants/tests/test_auto_AI.py | 87 +--- .../interfaces/ants/tests/test_auto_ANTS.py | 134 ++---- .../ants/tests/test_auto_ANTSCommand.py | 14 +- .../ants/tests/test_auto_AffineInitializer.py | 71 +--- .../ants/tests/test_auto_ApplyTransforms.py | 72 +--- .../test_auto_ApplyTransformsToPoints.py | 35 +- .../ants/tests/test_auto_Atropos.py | 99 +---- .../tests/test_auto_AverageAffineTransform.py | 37 +- .../ants/tests/test_auto_AverageImages.py | 44 +- .../ants/tests/test_auto_BrainExtraction.py | 141 ++----- .../tests/test_auto_ComposeMultiTransform.py | 38 +- .../tests/test_auto_CompositeTransformUtil.py | 50 +-- .../test_auto_ConvertScalarImageToRGB.py | 83 +--- .../ants/tests/test_auto_CorticalThickness.py | 160 ++------ ...est_auto_CreateJacobianDeterminantImage.py | 50 +-- .../ants/tests/test_auto_CreateTiledMosaic.py | 70 +--- .../ants/tests/test_auto_DenoiseImage.py | 51 +-- .../ants/tests/test_auto_GenWarpFields.py | 85 +--- .../ants/tests/test_auto_ImageMath.py | 48 +-- .../ants/tests/test_auto_JointFusion.py | 104 ++--- .../ants/tests/test_auto_KellyKapowski.py | 76 +--- .../ants/tests/test_auto_LabelGeometry.py | 44 +- .../tests/test_auto_LaplacianThickness.py | 61 +-- .../tests/test_auto_MeasureImageSimilarity.py | 68 +--- .../ants/tests/test_auto_MultiplyImages.py | 44 +- .../tests/test_auto_N4BiasFieldCorrection.py | 88 +--- .../ants/tests/test_auto_Registration.py | 187 ++------- .../tests/test_auto_RegistrationSynQuick.py | 78 +--- .../tests/test_auto_ResampleImageBySpacing.py | 56 +-- .../ants/tests/test_auto_ThresholdImage.py | 73 +--- .../test_auto_WarpImageMultiTransform.py | 67 +-- ..._auto_WarpTimeSeriesImageMultiTransform.py | 67 +-- .../ants/tests/test_auto_antsIntroduction.py | 85 +--- .../tests/test_auto_buildtemplateparallel.py | 76 +--- .../base/tests/test_auto_CommandLine.py | 10 +- .../base/tests/test_auto_MpiCommandLine.py | 13 +- .../tests/test_auto_SEMLikeCommandLine.py | 10 +- .../base/tests/test_auto_StdOutCommandLine.py | 16 +- .../brainsuite/tests/test_auto_BDP.py | 180 +++------ .../brainsuite/tests/test_auto_Bfc.py | 127 ++---- .../brainsuite/tests/test_auto_Bse.py | 112 ++--- .../brainsuite/tests/test_auto_Cerebro.py | 107 ++--- .../brainsuite/tests/test_auto_Cortex.py | 60 +-- .../brainsuite/tests/test_auto_Dewisp.py | 43 +- .../brainsuite/tests/test_auto_Dfs.py | 78 +--- .../brainsuite/tests/test_auto_Hemisplit.py | 74 +--- .../brainsuite/tests/test_auto_Pialmesh.py | 99 +---- .../brainsuite/tests/test_auto_Pvc.py | 56 +-- .../brainsuite/tests/test_auto_SVReg.py | 99 ++--- .../brainsuite/tests/test_auto_Scrubmask.py | 49 +-- .../brainsuite/tests/test_auto_Skullfinder.py | 73 +--- .../brainsuite/tests/test_auto_Tca.py | 49 +-- .../tests/test_auto_ThicknessPVC.py | 14 +- .../camino/tests/test_auto_AnalyzeHeader.py | 133 ++---- .../tests/test_auto_ComputeEigensystem.py | 47 +-- .../test_auto_ComputeFractionalAnisotropy.py | 47 +-- .../tests/test_auto_ComputeMeanDiffusivity.py | 47 +-- .../tests/test_auto_ComputeTensorTrace.py | 47 +-- .../camino/tests/test_auto_Conmat.py | 51 +-- .../camino/tests/test_auto_DT2NIfTI.py | 36 +- .../camino/tests/test_auto_DTIFit.py | 46 +-- .../camino/tests/test_auto_DTLUTGen.py | 76 +--- .../camino/tests/test_auto_DTMetric.py | 47 +-- .../camino/tests/test_auto_FSL2Scheme.py | 69 +--- .../camino/tests/test_auto_Image2Voxel.py | 35 +- .../camino/tests/test_auto_ImageStats.py | 38 +- .../camino/tests/test_auto_LinRecon.py | 56 +-- .../interfaces/camino/tests/test_auto_MESD.py | 67 +-- .../camino/tests/test_auto_ModelFit.py | 92 +---- .../camino/tests/test_auto_NIfTIDT2Camino.py | 56 +-- .../camino/tests/test_auto_PicoPDFs.py | 60 +-- .../camino/tests/test_auto_ProcStreamlines.py | 173 ++------ .../camino/tests/test_auto_QBallMX.py | 53 +-- .../camino/tests/test_auto_SFLUTGen.py | 64 +-- .../camino/tests/test_auto_SFPICOCalibData.py | 86 +--- .../camino/tests/test_auto_SFPeaks.py | 87 +--- .../camino/tests/test_auto_Shredder.py | 47 +-- .../camino/tests/test_auto_Track.py | 113 ++---- .../camino/tests/test_auto_TrackBallStick.py | 113 ++---- .../camino/tests/test_auto_TrackBayesDirac.py | 153 ++----- .../tests/test_auto_TrackBedpostxDeter.py | 123 ++---- .../tests/test_auto_TrackBedpostxProba.py | 128 ++---- .../camino/tests/test_auto_TrackBootstrap.py | 138 ++----- .../camino/tests/test_auto_TrackDT.py | 113 ++---- .../camino/tests/test_auto_TrackPICo.py | 122 ++---- .../camino/tests/test_auto_TractShredder.py | 47 +-- .../camino/tests/test_auto_VtkStreamlines.py | 70 +--- .../tests/test_auto_Camino2Trackvis.py | 60 +-- .../tests/test_auto_Trackvis2Camino.py | 35 +- .../tests/test_auto_CAT12SANLMDenoising.py | 61 +-- .../cat12/tests/test_auto_CAT12Segment.py | 276 +++---------- ...auto_ExtractAdditionalSurfaceParameters.py | 45 +-- ...est_auto_ExtractROIBasedSurfaceMeasures.py | 40 +- .../cmtk/tests/test_auto_AverageNetworks.py | 28 +- .../cmtk/tests/test_auto_CFFConverter.py | 19 +- .../cmtk/tests/test_auto_CreateMatrix.py | 114 ++---- .../cmtk/tests/test_auto_CreateNodes.py | 21 +- .../cmtk/tests/test_auto_MergeCNetworks.py | 14 +- .../tests/test_auto_NetworkBasedStatistic.py | 44 +- .../cmtk/tests/test_auto_NetworkXMetrics.py | 76 +--- .../cmtk/tests/test_auto_Parcellate.py | 49 +-- .../interfaces/cmtk/tests/test_auto_ROIGen.py | 37 +- .../tests/test_auto_DTIRecon.py | 102 ++--- .../tests/test_auto_DTITracker.py | 113 ++---- .../tests/test_auto_HARDIMat.py | 61 +-- .../tests/test_auto_ODFRecon.py | 98 +---- .../tests/test_auto_ODFTracker.py | 128 ++---- .../tests/test_auto_SplineFilter.py | 35 +- .../tests/test_auto_TrackMerge.py | 28 +- .../dipy/tests/test_auto_APMQball.py | 29 +- nipype/interfaces/dipy/tests/test_auto_CSD.py | 48 +-- nipype/interfaces/dipy/tests/test_auto_DTI.py | 47 +-- .../dipy/tests/test_auto_Denoise.py | 36 +- .../tests/test_auto_DipyDiffusionInterface.py | 19 +- .../tests/test_auto_EstimateResponseSH.py | 63 +-- .../dipy/tests/test_auto_RESTORE.py | 55 +-- .../dipy/tests/test_auto_Resample.py | 16 +- .../tests/test_auto_SimulateMultiTensor.py | 93 +---- .../tests/test_auto_StreamlineTractography.py | 71 +--- .../dipy/tests/test_auto_TensorMode.py | 29 +- .../dipy/tests/test_auto_TrackDensityMap.py | 24 +- .../dtitk/tests/test_auto_AffScalarVol.py | 47 +-- .../tests/test_auto_AffSymTensor3DVol.py | 52 +-- .../dtitk/tests/test_auto_Affine.py | 52 +-- .../dtitk/tests/test_auto_AffineTask.py | 52 +-- .../dtitk/tests/test_auto_BinThresh.py | 50 +-- .../dtitk/tests/test_auto_BinThreshTask.py | 50 +-- .../dtitk/tests/test_auto_CommandLineDtitk.py | 10 +- .../dtitk/tests/test_auto_ComposeXfm.py | 33 +- .../dtitk/tests/test_auto_ComposeXfmTask.py | 33 +- .../dtitk/tests/test_auto_Diffeo.py | 56 +-- .../dtitk/tests/test_auto_DiffeoScalarVol.py | 51 +-- .../tests/test_auto_DiffeoSymTensor3DVol.py | 61 +-- .../dtitk/tests/test_auto_DiffeoTask.py | 56 +-- .../interfaces/dtitk/tests/test_auto_Rigid.py | 52 +-- .../dtitk/tests/test_auto_RigidTask.py | 52 +-- .../dtitk/tests/test_auto_SVAdjustVoxSp.py | 35 +- .../tests/test_auto_SVAdjustVoxSpTask.py | 35 +- .../dtitk/tests/test_auto_SVResample.py | 40 +- .../dtitk/tests/test_auto_SVResampleTask.py | 40 +- .../tests/test_auto_TVAdjustOriginTask.py | 35 +- .../dtitk/tests/test_auto_TVAdjustVoxSp.py | 35 +- .../tests/test_auto_TVAdjustVoxSpTask.py | 35 +- .../dtitk/tests/test_auto_TVResample.py | 44 +- .../dtitk/tests/test_auto_TVResampleTask.py | 44 +- .../dtitk/tests/test_auto_TVtool.py | 31 +- .../dtitk/tests/test_auto_TVtoolTask.py | 31 +- .../dtitk/tests/test_auto_affScalarVolTask.py | 47 +-- .../tests/test_auto_affSymTensor3DVolTask.py | 52 +-- .../tests/test_auto_diffeoScalarVolTask.py | 51 +-- .../test_auto_diffeoSymTensor3DVolTask.py | 61 +-- .../elastix/tests/test_auto_AnalyzeWarp.py | 60 +-- .../elastix/tests/test_auto_ApplyWarp.py | 39 +- .../elastix/tests/test_auto_EditTransform.py | 32 +- .../elastix/tests/test_auto_PointsWarp.py | 39 +- .../elastix/tests/test_auto_Registration.py | 57 +-- .../tests/test_auto_AddXFormToHeader.py | 44 +- .../freesurfer/tests/test_auto_Aparc2Aseg.py | 107 +---- .../freesurfer/tests/test_auto_Apas2Aseg.py | 28 +- .../freesurfer/tests/test_auto_ApplyMask.py | 64 +-- .../tests/test_auto_ApplyVolTransform.py | 61 +-- .../freesurfer/tests/test_auto_Binarize.py | 130 ++---- .../freesurfer/tests/test_auto_CALabel.py | 79 +--- .../freesurfer/tests/test_auto_CANormalize.py | 52 +-- .../freesurfer/tests/test_auto_CARegister.py | 70 +--- .../test_auto_CheckTalairachAlignment.py | 27 +- .../freesurfer/tests/test_auto_Concatenate.py | 88 +--- .../tests/test_auto_ConcatenateLTA.py | 53 +-- .../freesurfer/tests/test_auto_Contrast.py | 62 +-- .../freesurfer/tests/test_auto_Curvature.py | 44 +- .../tests/test_auto_CurvatureStats.py | 59 +-- .../tests/test_auto_DICOMConvert.py | 37 +- .../freesurfer/tests/test_auto_EMRegister.py | 47 +-- .../tests/test_auto_EditWMwithAseg.py | 47 +-- .../freesurfer/tests/test_auto_EulerNumber.py | 21 +- .../tests/test_auto_ExtractMainComponent.py | 22 +- .../freesurfer/tests/test_auto_FSCommand.py | 9 +- .../tests/test_auto_FSCommandOpenMP.py | 9 +- .../tests/test_auto_FSScriptCommand.py | 9 +- .../freesurfer/tests/test_auto_FitMSParams.py | 33 +- .../freesurfer/tests/test_auto_FixTopology.py | 69 +--- .../tests/test_auto_FuseSegmentations.py | 47 +-- .../freesurfer/tests/test_auto_GLMFit.py | 280 +++---------- .../freesurfer/tests/test_auto_ImageInfo.py | 19 +- .../freesurfer/tests/test_auto_Jacobian.py | 29 +- .../freesurfer/tests/test_auto_LTAConvert.py | 72 +--- .../freesurfer/tests/test_auto_Label2Annot.py | 54 +-- .../freesurfer/tests/test_auto_Label2Label.py | 62 +-- .../freesurfer/tests/test_auto_Label2Vol.py | 75 +--- .../tests/test_auto_MNIBiasCorrection.py | 56 +-- .../freesurfer/tests/test_auto_MPRtoMNI305.py | 33 +- .../freesurfer/tests/test_auto_MRIConvert.py | 359 ++++------------ .../freesurfer/tests/test_auto_MRICoreg.py | 138 ++----- .../freesurfer/tests/test_auto_MRIFill.py | 47 +-- .../tests/test_auto_MRIMarchingCubes.py | 41 +- .../freesurfer/tests/test_auto_MRIPretess.py | 48 +-- .../freesurfer/tests/test_auto_MRISPreproc.py | 83 +--- .../tests/test_auto_MRISPreprocReconAll.py | 93 +---- .../tests/test_auto_MRITessellate.py | 43 +- .../freesurfer/tests/test_auto_MRIsCALabel.py | 71 +--- .../freesurfer/tests/test_auto_MRIsCalc.py | 51 +-- .../freesurfer/tests/test_auto_MRIsCombine.py | 27 +- .../freesurfer/tests/test_auto_MRIsConvert.py | 101 +---- .../freesurfer/tests/test_auto_MRIsExpand.py | 74 +--- .../freesurfer/tests/test_auto_MRIsInflate.py | 34 +- .../freesurfer/tests/test_auto_MS_LDA.py | 63 +-- .../tests/test_auto_MakeAverageSubject.py | 25 +- .../tests/test_auto_MakeSurfaces.py | 125 ++---- .../freesurfer/tests/test_auto_Normalize.py | 40 +- .../tests/test_auto_OneSampleTTest.py | 280 +++---------- .../freesurfer/tests/test_auto_Paint.py | 33 +- .../tests/test_auto_ParcellationStats.py | 130 ++---- .../tests/test_auto_ParseDICOMDir.py | 34 +- .../freesurfer/tests/test_auto_ReconAll.py | 382 ++++-------------- .../freesurfer/tests/test_auto_Register.py | 51 +-- .../tests/test_auto_RegisterAVItoTalairach.py | 45 +-- .../tests/test_auto_RelabelHypointensities.py | 41 +- .../tests/test_auto_RemoveIntersection.py | 21 +- .../freesurfer/tests/test_auto_RemoveNeck.py | 40 +- .../freesurfer/tests/test_auto_Resample.py | 34 +- .../tests/test_auto_RobustRegister.py | 169 ++------ .../tests/test_auto_RobustTemplate.py | 73 +--- .../tests/test_auto_SampleToSurface.py | 148 ++----- .../freesurfer/tests/test_auto_SegStats.py | 158 ++------ .../tests/test_auto_SegStatsReconAll.py | 214 +++------- .../freesurfer/tests/test_auto_SegmentCC.py | 40 +- .../freesurfer/tests/test_auto_SegmentWM.py | 29 +- .../freesurfer/tests/test_auto_Smooth.py | 58 +-- .../tests/test_auto_SmoothTessellation.py | 80 +--- .../freesurfer/tests/test_auto_Sphere.py | 34 +- .../tests/test_auto_SphericalAverage.py | 71 +--- .../tests/test_auto_Surface2VolTransform.py | 53 +-- .../tests/test_auto_SurfaceSmooth.py | 56 +-- .../tests/test_auto_SurfaceSnapshots.py | 140 ++----- .../tests/test_auto_SurfaceTransform.py | 57 +-- .../tests/test_auto_SynthesizeFLASH.py | 58 +-- .../tests/test_auto_TalairachAVI.py | 37 +- .../freesurfer/tests/test_auto_TalairachQC.py | 23 +- .../freesurfer/tests/test_auto_Tkregister2.py | 97 +---- .../tests/test_auto_UnpackSDICOMDir.py | 44 +- .../freesurfer/tests/test_auto_VolumeMask.py | 83 +--- .../tests/test_auto_WatershedSkullStrip.py | 44 +- .../fsl/tests/test_auto_AR1Image.py | 49 +-- .../fsl/tests/test_auto_AccuracyTester.py | 36 +- .../fsl/tests/test_auto_ApplyMask.py | 50 +-- .../fsl/tests/test_auto_ApplyTOPUP.py | 48 +-- .../fsl/tests/test_auto_ApplyWarp.py | 77 +--- .../fsl/tests/test_auto_ApplyXFM.py | 227 +++-------- .../interfaces/fsl/tests/test_auto_AvScale.py | 25 +- .../interfaces/fsl/tests/test_auto_B0Calc.py | 83 +--- .../fsl/tests/test_auto_BEDPOSTX5.py | 141 ++----- nipype/interfaces/fsl/tests/test_auto_BET.py | 111 ++--- .../fsl/tests/test_auto_BinaryMaths.py | 54 +-- .../fsl/tests/test_auto_ChangeDataType.py | 44 +- .../fsl/tests/test_auto_Classifier.py | 37 +- .../interfaces/fsl/tests/test_auto_Cleaner.py | 56 +-- .../interfaces/fsl/tests/test_auto_Cluster.py | 162 ++------ .../interfaces/fsl/tests/test_auto_Complex.py | 75 +--- .../fsl/tests/test_auto_ContrastMgr.py | 54 +-- .../fsl/tests/test_auto_ConvertWarp.py | 89 +--- .../fsl/tests/test_auto_ConvertXFM.py | 28 +- .../fsl/tests/test_auto_CopyGeom.py | 27 +- .../interfaces/fsl/tests/test_auto_DTIFit.py | 137 ++----- .../fsl/tests/test_auto_DilateImage.py | 67 +-- .../fsl/tests/test_auto_DistanceMap.py | 41 +- .../fsl/tests/test_auto_DualRegression.py | 63 +-- .../fsl/tests/test_auto_EPIDeWarp.py | 92 +---- nipype/interfaces/fsl/tests/test_auto_Eddy.py | 282 +++---------- .../fsl/tests/test_auto_EddyCorrect.py | 29 +- .../fsl/tests/test_auto_EddyQuad.py | 82 +--- .../interfaces/fsl/tests/test_auto_EpiReg.py | 132 ++---- .../fsl/tests/test_auto_ErodeImage.py | 67 +-- .../fsl/tests/test_auto_ExtractROI.py | 70 +--- nipype/interfaces/fsl/tests/test_auto_FAST.py | 116 ++---- nipype/interfaces/fsl/tests/test_auto_FEAT.py | 20 +- .../fsl/tests/test_auto_FEATModel.py | 42 +- .../fsl/tests/test_auto_FEATRegister.py | 19 +- .../interfaces/fsl/tests/test_auto_FIRST.py | 55 +-- .../interfaces/fsl/tests/test_auto_FLAMEO.py | 96 +---- .../interfaces/fsl/tests/test_auto_FLIRT.py | 226 +++-------- .../interfaces/fsl/tests/test_auto_FNIRT.py | 224 +++------- .../fsl/tests/test_auto_FSLCommand.py | 9 +- .../fsl/tests/test_auto_FSLXCommand.py | 135 ++----- .../interfaces/fsl/tests/test_auto_FUGUE.py | 156 ++----- .../fsl/tests/test_auto_FeatureExtractor.py | 23 +- .../fsl/tests/test_auto_FilterRegressor.py | 58 +-- .../fsl/tests/test_auto_FindTheBiggest.py | 28 +- nipype/interfaces/fsl/tests/test_auto_GLM.py | 112 ++--- .../fsl/tests/test_auto_ICA_AROMA.py | 71 +--- .../fsl/tests/test_auto_ImageMaths.py | 49 +-- .../fsl/tests/test_auto_ImageMeants.py | 63 +-- .../fsl/tests/test_auto_ImageStats.py | 42 +- .../interfaces/fsl/tests/test_auto_InvWarp.py | 57 +-- .../fsl/tests/test_auto_IsotropicSmooth.py | 57 +-- .../interfaces/fsl/tests/test_auto_L2Model.py | 18 +- .../fsl/tests/test_auto_Level1Design.py | 25 +- .../interfaces/fsl/tests/test_auto_MCFLIRT.py | 115 ++---- .../interfaces/fsl/tests/test_auto_MELODIC.py | 219 +++------- .../fsl/tests/test_auto_MakeDyadicVectors.py | 49 +-- .../fsl/tests/test_auto_MathsCommand.py | 43 +- .../fsl/tests/test_auto_MaxImage.py | 49 +-- .../fsl/tests/test_auto_MaxnImage.py | 49 +-- .../fsl/tests/test_auto_MeanImage.py | 49 +-- .../fsl/tests/test_auto_MedianImage.py | 49 +-- .../interfaces/fsl/tests/test_auto_Merge.py | 32 +- .../fsl/tests/test_auto_MinImage.py | 49 +-- .../fsl/tests/test_auto_MotionOutliers.py | 48 +-- .../fsl/tests/test_auto_MultiImageMaths.py | 53 +-- .../tests/test_auto_MultipleRegressDesign.py | 24 +- .../interfaces/fsl/tests/test_auto_Overlay.py | 74 +--- .../interfaces/fsl/tests/test_auto_PRELUDE.py | 77 +--- .../fsl/tests/test_auto_PercentileImage.py | 54 +-- .../fsl/tests/test_auto_PlotMotionParams.py | 41 +- .../fsl/tests/test_auto_PlotTimeSeries.py | 88 +--- .../fsl/tests/test_auto_PowerSpectrum.py | 28 +- .../fsl/tests/test_auto_PrepareFieldmap.py | 54 +-- .../fsl/tests/test_auto_ProbTrackX.py | 178 ++------ .../fsl/tests/test_auto_ProbTrackX2.py | 259 +++--------- .../fsl/tests/test_auto_ProjThresh.py | 25 +- .../fsl/tests/test_auto_Randomise.py | 129 ++---- .../fsl/tests/test_auto_Reorient2Std.py | 28 +- .../fsl/tests/test_auto_RobustFOV.py | 27 +- nipype/interfaces/fsl/tests/test_auto_SMM.py | 26 +- .../interfaces/fsl/tests/test_auto_SUSAN.py | 58 +-- .../interfaces/fsl/tests/test_auto_SigLoss.py | 40 +- .../interfaces/fsl/tests/test_auto_Slice.py | 24 +- .../fsl/tests/test_auto_SliceTimer.py | 57 +-- .../interfaces/fsl/tests/test_auto_Slicer.py | 87 +--- .../interfaces/fsl/tests/test_auto_Smooth.py | 22 +- .../fsl/tests/test_auto_SmoothEstimate.py | 39 +- .../fsl/tests/test_auto_SpatialFilter.py | 67 +-- .../interfaces/fsl/tests/test_auto_Split.py | 31 +- .../fsl/tests/test_auto_StdImage.py | 49 +-- .../fsl/tests/test_auto_SwapDimensions.py | 34 +- .../interfaces/fsl/tests/test_auto_TOPUP.py | 124 ++---- .../fsl/tests/test_auto_TemporalFilter.py | 55 +-- .../fsl/tests/test_auto_Threshold.py | 57 +-- .../fsl/tests/test_auto_TractSkeleton.py | 58 +-- .../fsl/tests/test_auto_Training.py | 31 +- .../fsl/tests/test_auto_TrainingSetCreator.py | 16 +- .../fsl/tests/test_auto_UnaryMaths.py | 49 +-- .../interfaces/fsl/tests/test_auto_VecReg.py | 68 +--- .../fsl/tests/test_auto_WarpPoints.py | 56 +-- .../fsl/tests/test_auto_WarpPointsFromStd.py | 56 +-- .../fsl/tests/test_auto_WarpPointsToStd.py | 61 +-- .../fsl/tests/test_auto_WarpUtils.py | 54 +-- .../fsl/tests/test_auto_XFibres5.py | 140 ++----- .../minc/tests/test_auto_Average.py | 99 +---- .../interfaces/minc/tests/test_auto_BBox.py | 55 +-- .../interfaces/minc/tests/test_auto_Beast.py | 106 +---- .../minc/tests/test_auto_BestLinReg.py | 39 +- .../minc/tests/test_auto_BigAverage.py | 46 +-- .../interfaces/minc/tests/test_auto_Blob.py | 38 +- .../interfaces/minc/tests/test_auto_Blur.py | 81 +--- .../interfaces/minc/tests/test_auto_Calc.py | 86 +--- .../minc/tests/test_auto_Convert.py | 43 +- .../interfaces/minc/tests/test_auto_Copy.py | 32 +- .../interfaces/minc/tests/test_auto_Dump.py | 64 +-- .../minc/tests/test_auto_Extract.py | 75 +--- .../minc/tests/test_auto_Gennlxfm.py | 38 +- .../interfaces/minc/tests/test_auto_Math.py | 182 +++------ .../interfaces/minc/tests/test_auto_NlpFit.py | 64 +-- .../interfaces/minc/tests/test_auto_Norm.py | 74 +--- nipype/interfaces/minc/tests/test_auto_Pik.py | 112 ++--- .../minc/tests/test_auto_Resample.py | 126 ++---- .../minc/tests/test_auto_Reshape.py | 35 +- .../interfaces/minc/tests/test_auto_ToEcat.py | 54 +-- .../interfaces/minc/tests/test_auto_ToRaw.py | 53 +-- .../minc/tests/test_auto_VolSymm.py | 66 +-- .../minc/tests/test_auto_Volcentre.py | 43 +- .../interfaces/minc/tests/test_auto_Voliso.py | 43 +- .../interfaces/minc/tests/test_auto_Volpad.py | 51 +-- .../interfaces/minc/tests/test_auto_XfmAvg.py | 55 +-- .../minc/tests/test_auto_XfmConcat.py | 32 +- .../minc/tests/test_auto_XfmInvert.py | 39 +- .../test_auto_JistBrainMgdmSegmentation.py | 132 ++---- ...est_auto_JistBrainMp2rageDuraEstimation.py | 55 +-- ...est_auto_JistBrainMp2rageSkullStripping.py | 86 +--- .../test_auto_JistBrainPartialVolumeFilter.py | 50 +-- ...est_auto_JistCortexSurfaceMeshInflation.py | 72 +--- .../test_auto_JistIntensityMp2rageMasking.py | 89 +--- .../test_auto_JistLaminarProfileCalculator.py | 51 +-- .../test_auto_JistLaminarProfileGeometry.py | 58 +-- .../test_auto_JistLaminarProfileSampling.py | 58 +-- .../test_auto_JistLaminarROIAveraging.py | 56 +-- ...test_auto_JistLaminarVolumetricLayering.py | 99 ++--- ...test_auto_MedicAlgorithmImageCalculator.py | 51 +-- .../test_auto_MedicAlgorithmLesionToads.py | 197 +++------ .../test_auto_MedicAlgorithmMipavReorient.py | 69 +--- .../mipav/tests/test_auto_MedicAlgorithmN3.py | 80 +--- .../test_auto_MedicAlgorithmSPECTRE2010.py | 245 +++-------- ...uto_MedicAlgorithmThresholdToBinaryMask.py | 48 +-- .../mipav/tests/test_auto_RandomVol.py | 73 +--- .../mne/tests/test_auto_WatershedBEM.py | 64 +-- ..._auto_ConstrainedSphericalDeconvolution.py | 85 +--- .../test_auto_DWI2SphericalHarmonicsImage.py | 43 +- .../mrtrix/tests/test_auto_DWI2Tensor.py | 54 +-- ...est_auto_DiffusionTensorStreamlineTrack.py | 108 ++--- .../tests/test_auto_Directions2Amplitude.py | 48 +-- .../mrtrix/tests/test_auto_Erode.py | 48 +-- .../tests/test_auto_EstimateResponseForSH.py | 59 +-- .../mrtrix/tests/test_auto_FSL2MRTrix.py | 33 +- .../mrtrix/tests/test_auto_FilterTracks.py | 53 +-- .../mrtrix/tests/test_auto_FindShPeaks.py | 59 +-- .../tests/test_auto_GenerateDirections.py | 41 +- .../test_auto_GenerateWhiteMatterMask.py | 43 +- .../mrtrix/tests/test_auto_MRConvert.py | 83 +--- .../mrtrix/tests/test_auto_MRMultiply.py | 38 +- .../mrtrix/tests/test_auto_MRTransform.py | 77 +--- .../mrtrix/tests/test_auto_MRTrix2TrackVis.py | 29 +- .../mrtrix/tests/test_auto_MRTrixInfo.py | 16 +- .../mrtrix/tests/test_auto_MRTrixViewer.py | 25 +- .../mrtrix/tests/test_auto_MedianFilter3D.py | 39 +- ...cSphericallyDeconvolutedStreamlineTrack.py | 107 ++--- ..._SphericallyDeconvolutedStreamlineTrack.py | 103 +---- .../mrtrix/tests/test_auto_StreamlineTrack.py | 103 +---- .../test_auto_Tensor2ApparentDiffusion.py | 39 +- .../test_auto_Tensor2FractionalAnisotropy.py | 39 +- .../mrtrix/tests/test_auto_Tensor2Vector.py | 39 +- .../mrtrix/tests/test_auto_Threshold.py | 57 +-- .../mrtrix/tests/test_auto_Tracks2Prob.py | 62 +-- .../mrtrix3/tests/test_auto_ACTPrepareFSL.py | 28 +- .../mrtrix3/tests/test_auto_BrainMask.py | 57 +-- .../tests/test_auto_BuildConnectome.py | 77 +--- .../mrtrix3/tests/test_auto_ComputeTDI.py | 103 +---- ..._auto_ConstrainedSphericalDeconvolution.py | 132 ++---- .../mrtrix3/tests/test_auto_DWIBiasCorrect.py | 78 +--- .../mrtrix3/tests/test_auto_DWIDenoise.py | 64 +-- .../mrtrix3/tests/test_auto_DWIExtract.py | 75 +--- .../mrtrix3/tests/test_auto_DWIPreproc.py | 119 ++---- .../mrtrix3/tests/test_auto_EstimateFOD.py | 135 ++----- .../mrtrix3/tests/test_auto_FitTensor.py | 77 +--- .../mrtrix3/tests/test_auto_Generate5tt.py | 64 +-- .../mrtrix3/tests/test_auto_LabelConfig.py | 64 +-- .../mrtrix3/tests/test_auto_LabelConvert.py | 51 +-- .../mrtrix3/tests/test_auto_MRConvert.py | 77 +--- .../mrtrix3/tests/test_auto_MRDeGibbs.py | 74 +--- .../mrtrix3/tests/test_auto_MRMath.py | 68 +--- .../mrtrix3/tests/test_auto_MRResize.py | 68 +--- .../mrtrix3/tests/test_auto_MRTrix3Base.py | 10 +- .../mrtrix3/tests/test_auto_Mesh2PVE.py | 40 +- .../tests/test_auto_ReplaceFSwithFIRST.py | 41 +- .../mrtrix3/tests/test_auto_ResponseSD.py | 101 +---- .../mrtrix3/tests/test_auto_SH2Amp.py | 33 +- .../mrtrix3/tests/test_auto_SHConv.py | 29 +- .../mrtrix3/tests/test_auto_TCK2VTK.py | 44 +- .../mrtrix3/tests/test_auto_TensorMetrics.py | 67 +-- .../mrtrix3/tests/test_auto_Tractography.py | 212 +++------- .../niftyfit/tests/test_auto_DwiTool.py | 75 +--- .../niftyfit/tests/test_auto_FitAsl.py | 166 ++------ .../niftyfit/tests/test_auto_FitDwi.py | 179 ++------ .../niftyfit/tests/test_auto_FitQt1.py | 164 ++------ .../tests/test_auto_NiftyFitCommand.py | 10 +- .../tests/test_auto_NiftyRegCommand.py | 14 +- .../niftyreg/tests/test_auto_RegAladin.py | 125 ++---- .../niftyreg/tests/test_auto_RegAverage.py | 32 +- .../niftyreg/tests/test_auto_RegF3D.py | 230 +++-------- .../niftyreg/tests/test_auto_RegJacobian.py | 37 +- .../niftyreg/tests/test_auto_RegMeasure.py | 37 +- .../niftyreg/tests/test_auto_RegResample.py | 67 +-- .../niftyreg/tests/test_auto_RegTools.py | 88 +--- .../niftyreg/tests/test_auto_RegTransform.py | 49 +-- .../niftyseg/tests/test_auto_BinaryMaths.py | 33 +- .../tests/test_auto_BinaryMathsInteger.py | 39 +- .../niftyseg/tests/test_auto_BinaryStats.py | 42 +- .../niftyseg/tests/test_auto_CalcTopNCC.py | 43 +- .../interfaces/niftyseg/tests/test_auto_EM.py | 79 +--- .../niftyseg/tests/test_auto_FillLesions.py | 78 +--- .../niftyseg/tests/test_auto_LabelFusion.py | 88 +--- .../niftyseg/tests/test_auto_MathsCommand.py | 27 +- .../niftyseg/tests/test_auto_Merge.py | 37 +- .../tests/test_auto_NiftySegCommand.py | 10 +- .../niftyseg/tests/test_auto_PatchMatch.py | 54 +-- .../niftyseg/tests/test_auto_StatsCommand.py | 31 +- .../niftyseg/tests/test_auto_TupleMaths.py | 43 +- .../niftyseg/tests/test_auto_UnaryMaths.py | 33 +- .../niftyseg/tests/test_auto_UnaryStats.py | 37 +- .../nipy/tests/test_auto_ComputeMask.py | 15 +- .../nipy/tests/test_auto_EstimateContrast.py | 44 +- .../interfaces/nipy/tests/test_auto_FitGLM.py | 56 +-- .../nipy/tests/test_auto_Similarity.py | 26 +- .../tests/test_auto_SpaceTimeRealigner.py | 18 +- .../interfaces/nipy/tests/test_auto_Trim.py | 27 +- .../tests/test_auto_CoherenceAnalyzer.py | 45 +-- ...t_auto_BRAINSPosteriorToContinuousClass.py | 55 +-- .../brains/tests/test_auto_BRAINSTalairach.py | 69 +--- .../tests/test_auto_BRAINSTalairachMask.py | 43 +- .../tests/test_auto_GenerateEdgeMapImage.py | 54 +-- .../tests/test_auto_GeneratePurePlugMask.py | 33 +- .../test_auto_HistogramMatchingFilter.py | 58 +-- .../brains/tests/test_auto_SimilarityIndex.py | 28 +- .../diffusion/tests/test_auto_DWIConvert.py | 106 ++--- .../tests/test_auto_compareTractInclusion.py | 43 +- .../diffusion/tests/test_auto_dtiaverage.py | 32 +- .../diffusion/tests/test_auto_dtiestim.py | 105 ++--- .../diffusion/tests/test_auto_dtiprocess.py | 202 +++------ .../tests/test_auto_extractNrrdVectorIndex.py | 37 +- .../tests/test_auto_gtractAnisotropyMap.py | 33 +- .../tests/test_auto_gtractAverageBvalues.py | 37 +- .../tests/test_auto_gtractClipAnisotropy.py | 37 +- .../tests/test_auto_gtractCoRegAnatomy.py | 116 ++---- .../tests/test_auto_gtractConcatDwi.py | 32 +- .../test_auto_gtractCopyImageOrientation.py | 34 +- .../tests/test_auto_gtractCoregBvalues.py | 84 +--- .../tests/test_auto_gtractCostFastMarching.py | 59 +-- .../tests/test_auto_gtractCreateGuideFiber.py | 37 +- .../test_auto_gtractFastMarchingTracking.py | 74 +--- .../tests/test_auto_gtractFiberTracking.py | 129 ++---- .../tests/test_auto_gtractImageConformity.py | 34 +- .../test_auto_gtractInvertBSplineTransform.py | 39 +- ...test_auto_gtractInvertDisplacementField.py | 38 +- .../test_auto_gtractInvertRigidTransform.py | 29 +- .../test_auto_gtractResampleAnisotropy.py | 39 +- .../tests/test_auto_gtractResampleB0.py | 47 +-- .../test_auto_gtractResampleCodeImage.py | 43 +- .../test_auto_gtractResampleDWIInPlace.py | 59 +-- .../tests/test_auto_gtractResampleFibers.py | 39 +- .../diffusion/tests/test_auto_gtractTensor.py | 68 +--- ...auto_gtractTransformToDisplacementField.py | 32 +- .../diffusion/tests/test_auto_maxcurvature.py | 33 +- .../tests/test_auto_UKFTractography.py | 152 ++----- .../tests/test_auto_fiberprocess.py | 78 +--- .../tests/test_auto_fiberstats.py | 18 +- .../tests/test_auto_fibertrack.py | 70 +--- .../filtering/tests/test_auto_CannyEdge.py | 37 +- ...to_CannySegmentationLevelSetImageFilter.py | 56 +-- .../filtering/tests/test_auto_DilateImage.py | 34 +- .../filtering/tests/test_auto_DilateMask.py | 38 +- .../filtering/tests/test_auto_DistanceMaps.py | 34 +- .../test_auto_DumpBinaryTrainingVectors.py | 19 +- .../filtering/tests/test_auto_ErodeImage.py | 34 +- .../tests/test_auto_FlippedDifference.py | 30 +- .../test_auto_GenerateBrainClippedImage.py | 34 +- .../test_auto_GenerateSummedGradientImage.py | 38 +- .../tests/test_auto_GenerateTestImage.py | 37 +- ...GradientAnisotropicDiffusionImageFilter.py | 37 +- .../tests/test_auto_HammerAttributeCreator.py | 36 +- .../tests/test_auto_NeighborhoodMean.py | 34 +- .../tests/test_auto_NeighborhoodMedian.py | 34 +- .../tests/test_auto_STAPLEAnalysis.py | 28 +- .../test_auto_TextureFromNoiseImageFilter.py | 29 +- .../tests/test_auto_TextureMeasureFilter.py | 38 +- .../tests/test_auto_UnbiasedNonLocalMeans.py | 50 +-- .../legacy/tests/test_auto_scalartransform.py | 48 +-- .../tests/test_auto_BRAINSDemonWarp.py | 191 +++------ .../registration/tests/test_auto_BRAINSFit.py | 321 ++++----------- .../tests/test_auto_BRAINSResample.py | 65 +-- .../tests/test_auto_BRAINSResize.py | 33 +- .../test_auto_BRAINSTransformFromFiducials.py | 46 +-- .../tests/test_auto_VBRAINSDemonWarp.py | 194 +++------ .../segmentation/tests/test_auto_BRAINSABC.py | 169 ++------ .../test_auto_BRAINSConstellationDetector.py | 217 +++------- ...BRAINSCreateLabelMapFromProbabilityMaps.py | 49 +-- .../segmentation/tests/test_auto_BRAINSCut.py | 79 +--- .../tests/test_auto_BRAINSMultiSTAPLE.py | 48 +-- .../tests/test_auto_BRAINSROIAuto.py | 63 +-- ...t_auto_BinaryMaskEditorBasedOnLandmarks.py | 42 +- .../segmentation/tests/test_auto_ESLR.py | 53 +-- .../semtools/tests/test_auto_DWICompare.py | 19 +- .../tests/test_auto_DWISimpleCompare.py | 23 +- ...o_GenerateCsfClippedFromClassifiedImage.py | 25 +- .../tests/test_auto_BRAINSAlignMSP.py | 66 +-- .../tests/test_auto_BRAINSClipInferior.py | 37 +- .../test_auto_BRAINSConstellationModeler.py | 68 +--- .../tests/test_auto_BRAINSEyeDetector.py | 33 +- ...est_auto_BRAINSInitializedControlPoints.py | 43 +- .../test_auto_BRAINSLandmarkInitializer.py | 29 +- .../test_auto_BRAINSLinearModelerEPCA.py | 18 +- .../tests/test_auto_BRAINSLmkTransform.py | 47 +-- .../utilities/tests/test_auto_BRAINSMush.py | 94 +---- .../tests/test_auto_BRAINSSnapShotWriter.py | 42 +- .../tests/test_auto_BRAINSTransformConvert.py | 44 +- ...st_auto_BRAINSTrimForegroundInDirection.py | 49 +-- .../tests/test_auto_CleanUpOverlapLabels.py | 20 +- .../tests/test_auto_FindCenterOfBrain.py | 103 ++--- ...auto_GenerateLabelMapFromProbabilityMap.py | 28 +- .../tests/test_auto_ImageRegionPlotter.py | 49 +-- .../tests/test_auto_JointHistogram.py | 33 +- .../tests/test_auto_ShuffleVectorsModule.py | 25 +- .../utilities/tests/test_auto_fcsv_to_hdf5.py | 41 +- .../tests/test_auto_insertMidACPCpoint.py | 25 +- ...test_auto_landmarksConstellationAligner.py | 23 +- ...test_auto_landmarksConstellationWeights.py | 35 +- .../diffusion/tests/test_auto_DTIexport.py | 28 +- .../diffusion/tests/test_auto_DTIimport.py | 32 +- .../test_auto_DWIJointRicianLMMSEFilter.py | 46 +-- .../tests/test_auto_DWIRicianLMMSEFilter.py | 70 +--- .../tests/test_auto_DWIToDTIEstimation.py | 50 +-- ..._auto_DiffusionTensorScalarMeasurements.py | 32 +- ...est_auto_DiffusionWeightedVolumeMasking.py | 45 +-- .../tests/test_auto_ResampleDTIVolume.py | 131 ++---- .../test_auto_TractographyLabelMapSeeding.py | 89 +--- .../tests/test_auto_AddScalarVolumes.py | 38 +- .../tests/test_auto_CastScalarVolume.py | 32 +- .../tests/test_auto_CheckerBoardFilter.py | 39 +- ...test_auto_CurvatureAnisotropicDiffusion.py | 40 +- .../tests/test_auto_ExtractSkeleton.py | 44 +- .../test_auto_GaussianBlurImageFilter.py | 32 +- .../test_auto_GradientAnisotropicDiffusion.py | 40 +- .../test_auto_GrayscaleFillHoleImageFilter.py | 28 +- ...test_auto_GrayscaleGrindPeakImageFilter.py | 28 +- .../tests/test_auto_HistogramMatching.py | 46 +-- .../tests/test_auto_ImageLabelCombine.py | 38 +- .../tests/test_auto_MaskScalarVolume.py | 42 +- .../tests/test_auto_MedianImageFilter.py | 33 +- .../tests/test_auto_MultiplyScalarVolumes.py | 38 +- .../test_auto_N4ITKBiasFieldCorrection.py | 72 +--- ...test_auto_ResampleScalarVectorDWIVolume.py | 123 ++---- .../tests/test_auto_SubtractScalarVolumes.py | 38 +- .../tests/test_auto_ThresholdScalarVolume.py | 48 +-- ...auto_VotingBinaryHoleFillingImageFilter.py | 45 +-- ...est_auto_DWIUnbiasedNonLocalMeansFilter.py | 51 +-- .../tests/test_auto_AffineRegistration.py | 66 +-- ...test_auto_BSplineDeformableRegistration.py | 79 +--- .../test_auto_BSplineToDeformationField.py | 30 +- .../test_auto_ExpertAutomatedRegistration.py | 136 ++----- .../tests/test_auto_LinearRegistration.py | 72 +--- ..._auto_MultiResolutionAffineRegistration.py | 67 +-- .../test_auto_OtsuThresholdImageFilter.py | 40 +- .../test_auto_OtsuThresholdSegmentation.py | 44 +- .../tests/test_auto_ResampleScalarVolume.py | 37 +- .../tests/test_auto_RigidRegistration.py | 76 +--- .../test_auto_IntensityDifferenceMetric.py | 55 +-- ..._auto_PETStandardUptakeValueComputation.py | 59 +-- .../tests/test_auto_ACPCTransform.py | 32 +- .../tests/test_auto_BRAINSDemonWarp.py | 191 +++------ .../registration/tests/test_auto_BRAINSFit.py | 293 ++++---------- .../tests/test_auto_BRAINSResample.py | 65 +-- .../tests/test_auto_FiducialRegistration.py | 40 +- .../tests/test_auto_VBRAINSDemonWarp.py | 194 +++------ .../tests/test_auto_BRAINSROIAuto.py | 54 +-- .../tests/test_auto_EMSegmentCommandLine.py | 106 ++--- .../test_auto_RobustStatisticsSegmenter.py | 54 +-- ...st_auto_SimpleRegionGrowingSegmentation.py | 56 +-- .../tests/test_auto_DicomToNrrdConverter.py | 42 +- ...test_auto_EMSegmentTransformToNewFormat.py | 29 +- .../tests/test_auto_GrayscaleModelMaker.py | 52 +-- .../tests/test_auto_LabelMapSmoothing.py | 44 +- .../slicer/tests/test_auto_MergeModels.py | 34 +- .../slicer/tests/test_auto_ModelMaker.py | 90 +---- .../slicer/tests/test_auto_ModelToLabelMap.py | 38 +- .../tests/test_auto_OrientScalarVolume.py | 32 +- .../tests/test_auto_ProbeVolumeWithModel.py | 34 +- .../tests/test_auto_SlicerCommandLine.py | 10 +- nipype/interfaces/spm/preprocess.py | 2 +- .../spm/tests/test_auto_Analyze2nii.py | 27 +- .../spm/tests/test_auto_ApplyDeformations.py | 28 +- .../test_auto_ApplyInverseDeformation.py | 39 +- .../spm/tests/test_auto_ApplyTransform.py | 31 +- .../spm/tests/test_auto_CalcCoregAffine.py | 37 +- .../spm/tests/test_auto_Coregister.py | 62 +-- .../spm/tests/test_auto_CreateWarped.py | 35 +- .../interfaces/spm/tests/test_auto_DARTEL.py | 36 +- .../spm/tests/test_auto_DARTELNorm2MNI.py | 39 +- .../spm/tests/test_auto_DicomImport.py | 38 +- .../spm/tests/test_auto_EstimateContrast.py | 41 +- .../spm/tests/test_auto_EstimateModel.py | 43 +- .../spm/tests/test_auto_FactorialDesign.py | 46 +-- .../spm/tests/test_auto_FieldMap.py | 142 ++----- .../spm/tests/test_auto_Level1Design.py | 72 +--- .../tests/test_auto_MultiChannelNewSegment.py | 33 +- .../test_auto_MultipleRegressionDesign.py | 60 +-- .../spm/tests/test_auto_NewSegment.py | 39 +- .../spm/tests/test_auto_Normalize.py | 84 +--- .../spm/tests/test_auto_Normalize12.py | 63 +-- .../tests/test_auto_OneSampleTTestDesign.py | 51 +-- .../spm/tests/test_auto_PairedTTestDesign.py | 59 +-- .../interfaces/spm/tests/test_auto_Realign.py | 76 +--- .../spm/tests/test_auto_RealignUnwarp.py | 121 ++---- .../interfaces/spm/tests/test_auto_Reslice.py | 33 +- .../spm/tests/test_auto_ResliceToReference.py | 35 +- .../spm/tests/test_auto_SPMCommand.py | 9 +- .../interfaces/spm/tests/test_auto_Segment.py | 126 ++---- .../spm/tests/test_auto_SliceTiming.py | 49 +-- .../interfaces/spm/tests/test_auto_Smooth.py | 36 +- .../spm/tests/test_auto_Threshold.py | 61 +-- .../tests/test_auto_ThresholdStatistics.py | 33 +- .../tests/test_auto_TwoSampleTTestDesign.py | 64 +-- .../spm/tests/test_auto_VBMSegment.py | 150 ++----- .../tests/test_auto_BIDSDataGrabber.py | 13 +- nipype/interfaces/tests/test_auto_Bru2.py | 42 +- nipype/interfaces/tests/test_auto_C3d.py | 66 +-- .../tests/test_auto_C3dAffineTool.py | 44 +- nipype/interfaces/tests/test_auto_CopyMeta.py | 16 +- .../interfaces/tests/test_auto_DataFinder.py | 12 +- .../interfaces/tests/test_auto_DataGrabber.py | 16 +- nipype/interfaces/tests/test_auto_DataSink.py | 16 +- nipype/interfaces/tests/test_auto_Dcm2nii.py | 97 +---- nipype/interfaces/tests/test_auto_Dcm2niix.py | 102 +---- nipype/interfaces/tests/test_auto_DcmStack.py | 18 +- .../interfaces/tests/test_auto_ExportFile.py | 20 +- .../tests/test_auto_FreeSurferSource.py | 186 ++------- .../tests/test_auto_GroupAndStack.py | 16 +- .../tests/test_auto_JSONFileGrabber.py | 7 +- .../tests/test_auto_JSONFileSink.py | 18 +- .../interfaces/tests/test_auto_LookupMeta.py | 8 +- .../tests/test_auto_MatlabCommand.py | 59 +-- .../interfaces/tests/test_auto_MergeNifti.py | 14 +- nipype/interfaces/tests/test_auto_MeshFix.py | 127 ++---- .../interfaces/tests/test_auto_MySQLSink.py | 14 +- nipype/interfaces/tests/test_auto_PETPVC.py | 79 +--- .../interfaces/tests/test_auto_Quickshear.py | 34 +- nipype/interfaces/tests/test_auto_RCommand.py | 24 +- nipype/interfaces/tests/test_auto_Reorient.py | 17 +- nipype/interfaces/tests/test_auto_Rescale.py | 20 +- .../tests/test_auto_S3DataGrabber.py | 28 +- .../interfaces/tests/test_auto_SQLiteSink.py | 9 +- .../tests/test_auto_SSHDataGrabber.py | 36 +- .../interfaces/tests/test_auto_SelectFiles.py | 12 +- .../tests/test_auto_SignalExtraction.py | 36 +- .../tests/test_auto_SlicerCommandLine.py | 9 +- .../interfaces/tests/test_auto_SplitNifti.py | 13 +- nipype/interfaces/tests/test_auto_XNATSink.py | 40 +- .../interfaces/tests/test_auto_XNATSource.py | 20 +- .../utility/tests/test_auto_AssertEqual.py | 10 +- .../utility/tests/test_auto_CSVReader.py | 8 +- .../utility/tests/test_auto_Function.py | 6 +- .../utility/tests/test_auto_Merge.py | 16 +- .../utility/tests/test_auto_Rename.py | 19 +- .../utility/tests/test_auto_Select.py | 13 +- .../utility/tests/test_auto_Split.py | 12 +- .../vista/tests/test_auto_Vnifti2Image.py | 28 +- .../vista/tests/test_auto_VtoMat.py | 22 +- .../workbench/tests/test_auto_CiftiSmooth.py | 85 +--- .../tests/test_auto_MetricResample.py | 85 +--- .../workbench/tests/test_auto_WBCommand.py | 10 +- 845 files changed, 11093 insertions(+), 41133 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 0b2d0f65fb..e152cb308f 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -313,6 +313,11 @@ "name": "Geisler, Daniel", "orcid": "0000-0003-2076-5329" }, + { + "affiliation": "Division of Psychological and Social Medicine and Developmental Neuroscience, Faculty of Medicine, Technische Universit\u00e4t Dresden, Dresden, Germany", + "name": "Bernardoni, Fabio", + "orcid": "0000-0002-5112-405X" + }, { "name": "Salvatore, John" }, diff --git a/Makefile b/Makefile index 03c1152053..75bc40924f 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ # Files are then pushed to sourceforge using rsync with a command like this: # rsync -e ssh nipype-0.1-py2.5.egg cburns,nipy@frs.sourceforge.net:/home/frs/project/n/ni/nipy/nipype/nipype-0.1/ -PYTHON ?= python +PYTHON ?= python3 .PHONY: zipdoc sdist egg upload_to_pypi trailing-spaces clean-pyc clean-so clean-build clean-ctags clean in inplace test-code test-coverage test html specs check-before-commit check gen-base-dockerfile gen-main-dockerfile gen-dockerfiles diff --git a/nipype/algorithms/tests/test_auto_ACompCor.py b/nipype/algorithms/tests/test_auto_ACompCor.py index 814aa71704..2ad9bf459f 100644 --- a/nipype/algorithms/tests/test_auto_ACompCor.py +++ b/nipype/algorithms/tests/test_auto_ACompCor.py @@ -4,56 +4,24 @@ def test_ACompCor_inputs(): input_map = dict( - components_file=dict( - usedefault=True, - ), - failure_mode=dict( - usedefault=True, - ), + components_file=dict(usedefault=True), + failure_mode=dict(usedefault=True), header_prefix=dict(), - high_pass_cutoff=dict( - usedefault=True, - ), - ignore_initial_volumes=dict( - usedefault=True, - ), + high_pass_cutoff=dict(usedefault=True), + ignore_initial_volumes=dict(usedefault=True), mask_files=dict(), - mask_index=dict( - requires=["mask_files"], - xor=["merge_method"], - ), + mask_index=dict(requires=["mask_files"], xor=["merge_method"]), mask_names=dict(), - merge_method=dict( - requires=["mask_files"], - xor=["mask_index"], - ), - num_components=dict( - xor=["variance_threshold"], - ), - pre_filter=dict( - usedefault=True, - ), - realigned_file=dict( - extensions=None, - mandatory=True, - ), - regress_poly_degree=dict( - usedefault=True, - ), + merge_method=dict(requires=["mask_files"], xor=["mask_index"]), + num_components=dict(xor=["variance_threshold"]), + pre_filter=dict(usedefault=True), + realigned_file=dict(extensions=None, mandatory=True), + regress_poly_degree=dict(usedefault=True), repetition_time=dict(), - save_metadata=dict( - usedefault=True, - ), - save_pre_filter=dict( - usedefault=True, - ), - use_regress_poly=dict( - deprecated="0.15.0", - new_name="pre_filter", - ), - variance_threshold=dict( - xor=["num_components"], - ), + save_metadata=dict(usedefault=True), + save_pre_filter=dict(usedefault=True), + use_regress_poly=dict(deprecated="0.15.0", new_name="pre_filter"), + variance_threshold=dict(xor=["num_components"]), ) inputs = ACompCor.input_spec() @@ -64,15 +32,9 @@ def test_ACompCor_inputs(): def test_ACompCor_outputs(): output_map = dict( - components_file=dict( - extensions=None, - ), - metadata_file=dict( - extensions=None, - ), - pre_filter_file=dict( - extensions=None, - ), + components_file=dict(extensions=None), + metadata_file=dict(extensions=None), + pre_filter_file=dict(extensions=None), ) outputs = ACompCor.output_spec() diff --git a/nipype/algorithms/tests/test_auto_ActivationCount.py b/nipype/algorithms/tests/test_auto_ActivationCount.py index 7df84ee122..6492d4d9d7 100644 --- a/nipype/algorithms/tests/test_auto_ActivationCount.py +++ b/nipype/algorithms/tests/test_auto_ActivationCount.py @@ -3,14 +3,7 @@ def test_ActivationCount_inputs(): - input_map = dict( - in_files=dict( - mandatory=True, - ), - threshold=dict( - mandatory=True, - ), - ) + input_map = dict(in_files=dict(mandatory=True), threshold=dict(mandatory=True)) inputs = ActivationCount.input_spec() for key, metadata in list(input_map.items()): @@ -20,15 +13,9 @@ def test_ActivationCount_inputs(): def test_ActivationCount_outputs(): output_map = dict( - acm_neg=dict( - extensions=None, - ), - acm_pos=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), + acm_neg=dict(extensions=None), + acm_pos=dict(extensions=None), + out_file=dict(extensions=None), ) outputs = ActivationCount.output_spec() diff --git a/nipype/algorithms/tests/test_auto_AddCSVColumn.py b/nipype/algorithms/tests/test_auto_AddCSVColumn.py index b76fd46457..1afe128aa4 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVColumn.py +++ b/nipype/algorithms/tests/test_auto_AddCSVColumn.py @@ -6,14 +6,8 @@ def test_AddCSVColumn_inputs(): input_map = dict( extra_column_heading=dict(), extra_field=dict(), - in_file=dict( - extensions=None, - mandatory=True, - ), - out_file=dict( - extensions=None, - usedefault=True, - ), + in_file=dict(extensions=None, mandatory=True), + out_file=dict(extensions=None, usedefault=True), ) inputs = AddCSVColumn.input_spec() @@ -23,11 +17,7 @@ def test_AddCSVColumn_inputs(): def test_AddCSVColumn_outputs(): - output_map = dict( - csv_file=dict( - extensions=None, - ), - ) + output_map = dict(csv_file=dict(extensions=None)) outputs = AddCSVColumn.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_AddCSVRow.py b/nipype/algorithms/tests/test_auto_AddCSVRow.py index 78976f418d..5806842dc3 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVRow.py +++ b/nipype/algorithms/tests/test_auto_AddCSVRow.py @@ -4,13 +4,7 @@ def test_AddCSVRow_inputs(): input_map = dict( - _outputs=dict( - usedefault=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), + _outputs=dict(usedefault=True), in_file=dict(extensions=None, mandatory=True) ) inputs = AddCSVRow.input_spec() @@ -20,11 +14,7 @@ def test_AddCSVRow_inputs(): def test_AddCSVRow_outputs(): - output_map = dict( - csv_file=dict( - extensions=None, - ), - ) + output_map = dict(csv_file=dict(extensions=None)) outputs = AddCSVRow.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_AddNoise.py b/nipype/algorithms/tests/test_auto_AddNoise.py index 5cf92e33f7..5a44d2eaa7 100644 --- a/nipype/algorithms/tests/test_auto_AddNoise.py +++ b/nipype/algorithms/tests/test_auto_AddNoise.py @@ -4,27 +4,12 @@ def test_AddNoise_inputs(): input_map = dict( - bg_dist=dict( - mandatory=True, - usedefault=True, - ), - dist=dict( - mandatory=True, - usedefault=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - in_mask=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - snr=dict( - usedefault=True, - ), + bg_dist=dict(mandatory=True, usedefault=True), + dist=dict(mandatory=True, usedefault=True), + in_file=dict(extensions=None, mandatory=True), + in_mask=dict(extensions=None), + out_file=dict(extensions=None), + snr=dict(usedefault=True), ) inputs = AddNoise.input_spec() @@ -34,11 +19,7 @@ def test_AddNoise_inputs(): def test_AddNoise_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = AddNoise.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ArtifactDetect.py b/nipype/algorithms/tests/test_auto_ArtifactDetect.py index 51010aea3a..ed7ff3808c 100644 --- a/nipype/algorithms/tests/test_auto_ArtifactDetect.py +++ b/nipype/algorithms/tests/test_auto_ArtifactDetect.py @@ -4,61 +4,25 @@ def test_ArtifactDetect_inputs(): input_map = dict( - bound_by_brainmask=dict( - usedefault=True, - ), - global_threshold=dict( - usedefault=True, - ), - intersect_mask=dict( - usedefault=True, - ), - mask_file=dict( - extensions=None, - ), + bound_by_brainmask=dict(usedefault=True), + global_threshold=dict(usedefault=True), + intersect_mask=dict(usedefault=True), + mask_file=dict(extensions=None), mask_threshold=dict(), - mask_type=dict( - mandatory=True, - ), + mask_type=dict(mandatory=True), norm_threshold=dict( - mandatory=True, - xor=["rotation_threshold", "translation_threshold"], - ), - parameter_source=dict( - mandatory=True, - ), - plot_type=dict( - usedefault=True, - ), - realigned_files=dict( - mandatory=True, - ), - realignment_parameters=dict( - mandatory=True, - ), - rotation_threshold=dict( - mandatory=True, - xor=["norm_threshold"], - ), - save_plot=dict( - usedefault=True, - ), - translation_threshold=dict( - mandatory=True, - xor=["norm_threshold"], - ), - use_differences=dict( - maxlen=2, - minlen=2, - usedefault=True, - ), - use_norm=dict( - requires=["norm_threshold"], - usedefault=True, - ), - zintensity_threshold=dict( - mandatory=True, - ), + mandatory=True, xor=["rotation_threshold", "translation_threshold"] + ), + parameter_source=dict(mandatory=True), + plot_type=dict(usedefault=True), + realigned_files=dict(mandatory=True), + realignment_parameters=dict(mandatory=True), + rotation_threshold=dict(mandatory=True, xor=["norm_threshold"]), + save_plot=dict(usedefault=True), + translation_threshold=dict(mandatory=True, xor=["norm_threshold"]), + use_differences=dict(maxlen=2, minlen=2, usedefault=True), + use_norm=dict(requires=["norm_threshold"], usedefault=True), + zintensity_threshold=dict(mandatory=True), ) inputs = ArtifactDetect.input_spec() diff --git a/nipype/algorithms/tests/test_auto_CalculateMedian.py b/nipype/algorithms/tests/test_auto_CalculateMedian.py index ddc8b9814d..2b3e42314e 100644 --- a/nipype/algorithms/tests/test_auto_CalculateMedian.py +++ b/nipype/algorithms/tests/test_auto_CalculateMedian.py @@ -4,11 +4,7 @@ def test_CalculateMedian_inputs(): input_map = dict( - in_files=dict(), - median_file=dict(), - median_per_file=dict( - usedefault=True, - ), + in_files=dict(), median_file=dict(), median_per_file=dict(usedefault=True) ) inputs = CalculateMedian.input_spec() @@ -18,9 +14,7 @@ def test_CalculateMedian_inputs(): def test_CalculateMedian_outputs(): - output_map = dict( - median_files=dict(), - ) + output_map = dict(median_files=dict()) outputs = CalculateMedian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py b/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py index a67f959176..ee43585d0b 100644 --- a/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py +++ b/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py @@ -4,13 +4,8 @@ def test_CalculateNormalizedMoments_inputs(): input_map = dict( - moment=dict( - mandatory=True, - ), - timeseries_file=dict( - extensions=None, - mandatory=True, - ), + moment=dict(mandatory=True), + timeseries_file=dict(extensions=None, mandatory=True), ) inputs = CalculateNormalizedMoments.input_spec() @@ -20,9 +15,7 @@ def test_CalculateNormalizedMoments_inputs(): def test_CalculateNormalizedMoments_outputs(): - output_map = dict( - moments=dict(), - ) + output_map = dict(moments=dict()) outputs = CalculateNormalizedMoments.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ComputeDVARS.py b/nipype/algorithms/tests/test_auto_ComputeDVARS.py index 5fe2d241b9..cb47ffd14e 100644 --- a/nipype/algorithms/tests/test_auto_ComputeDVARS.py +++ b/nipype/algorithms/tests/test_auto_ComputeDVARS.py @@ -4,44 +4,18 @@ def test_ComputeDVARS_inputs(): input_map = dict( - figdpi=dict( - usedefault=True, - ), - figformat=dict( - usedefault=True, - ), - figsize=dict( - usedefault=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - in_mask=dict( - extensions=None, - mandatory=True, - ), - intensity_normalization=dict( - usedefault=True, - ), - remove_zerovariance=dict( - usedefault=True, - ), - save_all=dict( - usedefault=True, - ), - save_nstd=dict( - usedefault=True, - ), - save_plot=dict( - usedefault=True, - ), - save_std=dict( - usedefault=True, - ), - save_vxstd=dict( - usedefault=True, - ), + figdpi=dict(usedefault=True), + figformat=dict(usedefault=True), + figsize=dict(usedefault=True), + in_file=dict(extensions=None, mandatory=True), + in_mask=dict(extensions=None, mandatory=True), + intensity_normalization=dict(usedefault=True), + remove_zerovariance=dict(usedefault=True), + save_all=dict(usedefault=True), + save_nstd=dict(usedefault=True), + save_plot=dict(usedefault=True), + save_std=dict(usedefault=True), + save_vxstd=dict(usedefault=True), series_tr=dict(), ) inputs = ComputeDVARS.input_spec() @@ -56,27 +30,13 @@ def test_ComputeDVARS_outputs(): avg_nstd=dict(), avg_std=dict(), avg_vxstd=dict(), - fig_nstd=dict( - extensions=None, - ), - fig_std=dict( - extensions=None, - ), - fig_vxstd=dict( - extensions=None, - ), - out_all=dict( - extensions=None, - ), - out_nstd=dict( - extensions=None, - ), - out_std=dict( - extensions=None, - ), - out_vxstd=dict( - extensions=None, - ), + fig_nstd=dict(extensions=None), + fig_std=dict(extensions=None), + fig_vxstd=dict(extensions=None), + out_all=dict(extensions=None), + out_nstd=dict(extensions=None), + out_std=dict(extensions=None), + out_vxstd=dict(extensions=None), ) outputs = ComputeDVARS.output_spec() diff --git a/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py index 639f03770c..c2157e04cc 100644 --- a/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py +++ b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py @@ -4,28 +4,12 @@ def test_ComputeMeshWarp_inputs(): input_map = dict( - metric=dict( - usedefault=True, - ), - out_file=dict( - extensions=None, - usedefault=True, - ), - out_warp=dict( - extensions=None, - usedefault=True, - ), - surface1=dict( - extensions=None, - mandatory=True, - ), - surface2=dict( - extensions=None, - mandatory=True, - ), - weighting=dict( - usedefault=True, - ), + metric=dict(usedefault=True), + out_file=dict(extensions=None, usedefault=True), + out_warp=dict(extensions=None, usedefault=True), + surface1=dict(extensions=None, mandatory=True), + surface2=dict(extensions=None, mandatory=True), + weighting=dict(usedefault=True), ) inputs = ComputeMeshWarp.input_spec() @@ -36,13 +20,7 @@ def test_ComputeMeshWarp_inputs(): def test_ComputeMeshWarp_outputs(): output_map = dict( - distance=dict(), - out_file=dict( - extensions=None, - ), - out_warp=dict( - extensions=None, - ), + distance=dict(), out_file=dict(extensions=None), out_warp=dict(extensions=None) ) outputs = ComputeMeshWarp.output_spec() diff --git a/nipype/algorithms/tests/test_auto_CreateNifti.py b/nipype/algorithms/tests/test_auto_CreateNifti.py index f5c5c4a2f5..bfa3a5a0ab 100644 --- a/nipype/algorithms/tests/test_auto_CreateNifti.py +++ b/nipype/algorithms/tests/test_auto_CreateNifti.py @@ -5,14 +5,8 @@ def test_CreateNifti_inputs(): input_map = dict( affine=dict(), - data_file=dict( - extensions=None, - mandatory=True, - ), - header_file=dict( - extensions=None, - mandatory=True, - ), + data_file=dict(extensions=None, mandatory=True), + header_file=dict(extensions=None, mandatory=True), ) inputs = CreateNifti.input_spec() @@ -22,11 +16,7 @@ def test_CreateNifti_inputs(): def test_CreateNifti_outputs(): - output_map = dict( - nifti_file=dict( - extensions=None, - ), - ) + output_map = dict(nifti_file=dict(extensions=None)) outputs = CreateNifti.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_Distance.py b/nipype/algorithms/tests/test_auto_Distance.py index 46e48342c4..9a7e696ff7 100644 --- a/nipype/algorithms/tests/test_auto_Distance.py +++ b/nipype/algorithms/tests/test_auto_Distance.py @@ -4,20 +4,10 @@ def test_Distance_inputs(): input_map = dict( - mask_volume=dict( - extensions=None, - ), - method=dict( - usedefault=True, - ), - volume1=dict( - extensions=None, - mandatory=True, - ), - volume2=dict( - extensions=None, - mandatory=True, - ), + mask_volume=dict(extensions=None), + method=dict(usedefault=True), + volume1=dict(extensions=None, mandatory=True), + volume2=dict(extensions=None, mandatory=True), ) inputs = Distance.input_spec() @@ -28,12 +18,7 @@ def test_Distance_inputs(): def test_Distance_outputs(): output_map = dict( - distance=dict(), - histogram=dict( - extensions=None, - ), - point1=dict(), - point2=dict(), + distance=dict(), histogram=dict(extensions=None), point1=dict(), point2=dict() ) outputs = Distance.output_spec() diff --git a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py index 1308b4d97d..2307b28f43 100644 --- a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py +++ b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py @@ -4,36 +4,15 @@ def test_FramewiseDisplacement_inputs(): input_map = dict( - figdpi=dict( - usedefault=True, - ), - figsize=dict( - usedefault=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - normalize=dict( - usedefault=True, - ), - out_figure=dict( - extensions=None, - usedefault=True, - ), - out_file=dict( - extensions=None, - usedefault=True, - ), - parameter_source=dict( - mandatory=True, - ), - radius=dict( - usedefault=True, - ), - save_plot=dict( - usedefault=True, - ), + figdpi=dict(usedefault=True), + figsize=dict(usedefault=True), + in_file=dict(extensions=None, mandatory=True), + normalize=dict(usedefault=True), + out_figure=dict(extensions=None, usedefault=True), + out_file=dict(extensions=None, usedefault=True), + parameter_source=dict(mandatory=True), + radius=dict(usedefault=True), + save_plot=dict(usedefault=True), series_tr=dict(), ) inputs = FramewiseDisplacement.input_spec() @@ -46,12 +25,8 @@ def test_FramewiseDisplacement_inputs(): def test_FramewiseDisplacement_outputs(): output_map = dict( fd_average=dict(), - out_figure=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), + out_figure=dict(extensions=None), + out_file=dict(extensions=None), ) outputs = FramewiseDisplacement.output_spec() diff --git a/nipype/algorithms/tests/test_auto_FuzzyOverlap.py b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py index e8a7fe5ef1..9ae90df356 100644 --- a/nipype/algorithms/tests/test_auto_FuzzyOverlap.py +++ b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py @@ -4,22 +4,11 @@ def test_FuzzyOverlap_inputs(): input_map = dict( - in_mask=dict( - extensions=None, - ), - in_ref=dict( - mandatory=True, - ), - in_tst=dict( - mandatory=True, - ), - out_file=dict( - extensions=None, - usedefault=True, - ), - weighting=dict( - usedefault=True, - ), + in_mask=dict(extensions=None), + in_ref=dict(mandatory=True), + in_tst=dict(mandatory=True), + out_file=dict(extensions=None, usedefault=True), + weighting=dict(usedefault=True), ) inputs = FuzzyOverlap.input_spec() @@ -29,12 +18,7 @@ def test_FuzzyOverlap_inputs(): def test_FuzzyOverlap_outputs(): - output_map = dict( - class_fdi=dict(), - class_fji=dict(), - dice=dict(), - jaccard=dict(), - ) + output_map = dict(class_fdi=dict(), class_fji=dict(), dice=dict(), jaccard=dict()) outputs = FuzzyOverlap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_Gunzip.py b/nipype/algorithms/tests/test_auto_Gunzip.py index 7629feb820..d238a7c4a9 100644 --- a/nipype/algorithms/tests/test_auto_Gunzip.py +++ b/nipype/algorithms/tests/test_auto_Gunzip.py @@ -3,12 +3,7 @@ def test_Gunzip_inputs(): - input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - ) + input_map = dict(in_file=dict(extensions=None, mandatory=True)) inputs = Gunzip.input_spec() for key, metadata in list(input_map.items()): @@ -17,11 +12,7 @@ def test_Gunzip_inputs(): def test_Gunzip_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Gunzip.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ICC.py b/nipype/algorithms/tests/test_auto_ICC.py index 4a2389202c..a5ac327e80 100644 --- a/nipype/algorithms/tests/test_auto_ICC.py +++ b/nipype/algorithms/tests/test_auto_ICC.py @@ -4,13 +4,8 @@ def test_ICC_inputs(): input_map = dict( - mask=dict( - extensions=None, - mandatory=True, - ), - subjects_sessions=dict( - mandatory=True, - ), + mask=dict(extensions=None, mandatory=True), + subjects_sessions=dict(mandatory=True), ) inputs = ICC.input_spec() @@ -21,15 +16,9 @@ def test_ICC_inputs(): def test_ICC_outputs(): output_map = dict( - icc_map=dict( - extensions=None, - ), - session_var_map=dict( - extensions=None, - ), - subject_var_map=dict( - extensions=None, - ), + icc_map=dict(extensions=None), + session_var_map=dict(extensions=None), + subject_var_map=dict(extensions=None), ) outputs = ICC.output_spec() diff --git a/nipype/algorithms/tests/test_auto_Matlab2CSV.py b/nipype/algorithms/tests/test_auto_Matlab2CSV.py index 42acbd514a..d16bf2806d 100644 --- a/nipype/algorithms/tests/test_auto_Matlab2CSV.py +++ b/nipype/algorithms/tests/test_auto_Matlab2CSV.py @@ -4,13 +4,8 @@ def test_Matlab2CSV_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - reshape_matrix=dict( - usedefault=True, - ), + in_file=dict(extensions=None, mandatory=True), + reshape_matrix=dict(usedefault=True), ) inputs = Matlab2CSV.input_spec() @@ -20,9 +15,7 @@ def test_Matlab2CSV_inputs(): def test_Matlab2CSV_outputs(): - output_map = dict( - csv_files=dict(), - ) + output_map = dict(csv_files=dict()) outputs = Matlab2CSV.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_MergeCSVFiles.py b/nipype/algorithms/tests/test_auto_MergeCSVFiles.py index bb7e9ed65a..097f6b2828 100644 --- a/nipype/algorithms/tests/test_auto_MergeCSVFiles.py +++ b/nipype/algorithms/tests/test_auto_MergeCSVFiles.py @@ -7,16 +7,9 @@ def test_MergeCSVFiles_inputs(): column_headings=dict(), extra_column_heading=dict(), extra_field=dict(), - in_files=dict( - mandatory=True, - ), - out_file=dict( - extensions=None, - usedefault=True, - ), - row_heading_title=dict( - usedefault=True, - ), + in_files=dict(mandatory=True), + out_file=dict(extensions=None, usedefault=True), + row_heading_title=dict(usedefault=True), row_headings=dict(), ) inputs = MergeCSVFiles.input_spec() @@ -27,11 +20,7 @@ def test_MergeCSVFiles_inputs(): def test_MergeCSVFiles_outputs(): - output_map = dict( - csv_file=dict( - extensions=None, - ), - ) + output_map = dict(csv_file=dict(extensions=None)) outputs = MergeCSVFiles.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_MergeROIs.py b/nipype/algorithms/tests/test_auto_MergeROIs.py index c43a33b686..a7f36008be 100644 --- a/nipype/algorithms/tests/test_auto_MergeROIs.py +++ b/nipype/algorithms/tests/test_auto_MergeROIs.py @@ -4,11 +4,7 @@ def test_MergeROIs_inputs(): input_map = dict( - in_files=dict(), - in_index=dict(), - in_reference=dict( - extensions=None, - ), + in_files=dict(), in_index=dict(), in_reference=dict(extensions=None) ) inputs = MergeROIs.input_spec() @@ -18,11 +14,7 @@ def test_MergeROIs_inputs(): def test_MergeROIs_outputs(): - output_map = dict( - merged_file=dict( - extensions=None, - ), - ) + output_map = dict(merged_file=dict(extensions=None)) outputs = MergeROIs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_MeshWarpMaths.py b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py index a4295b8f46..50a9e14a25 100644 --- a/nipype/algorithms/tests/test_auto_MeshWarpMaths.py +++ b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py @@ -5,25 +5,11 @@ def test_MeshWarpMaths_inputs(): input_map = dict( float_trait=dict(), - in_surf=dict( - extensions=None, - mandatory=True, - ), - operation=dict( - usedefault=True, - ), - operator=dict( - mandatory=True, - usedefault=True, - ), - out_file=dict( - extensions=None, - usedefault=True, - ), - out_warp=dict( - extensions=None, - usedefault=True, - ), + in_surf=dict(extensions=None, mandatory=True), + operation=dict(usedefault=True), + operator=dict(mandatory=True, usedefault=True), + out_file=dict(extensions=None, usedefault=True), + out_warp=dict(extensions=None, usedefault=True), ) inputs = MeshWarpMaths.input_spec() @@ -33,14 +19,7 @@ def test_MeshWarpMaths_inputs(): def test_MeshWarpMaths_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - out_warp=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None), out_warp=dict(extensions=None)) outputs = MeshWarpMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ModifyAffine.py b/nipype/algorithms/tests/test_auto_ModifyAffine.py index 6592e28690..ddef881629 100644 --- a/nipype/algorithms/tests/test_auto_ModifyAffine.py +++ b/nipype/algorithms/tests/test_auto_ModifyAffine.py @@ -4,12 +4,7 @@ def test_ModifyAffine_inputs(): input_map = dict( - transformation_matrix=dict( - usedefault=True, - ), - volumes=dict( - mandatory=True, - ), + transformation_matrix=dict(usedefault=True), volumes=dict(mandatory=True) ) inputs = ModifyAffine.input_spec() @@ -19,9 +14,7 @@ def test_ModifyAffine_inputs(): def test_ModifyAffine_outputs(): - output_map = dict( - transformed_volumes=dict(), - ) + output_map = dict(transformed_volumes=dict()) outputs = ModifyAffine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py index 9e14e00595..1217a82dbf 100644 --- a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py +++ b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py @@ -3,12 +3,7 @@ def test_NonSteadyStateDetector_inputs(): - input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - ) + input_map = dict(in_file=dict(extensions=None, mandatory=True)) inputs = NonSteadyStateDetector.input_spec() for key, metadata in list(input_map.items()): @@ -17,9 +12,7 @@ def test_NonSteadyStateDetector_inputs(): def test_NonSteadyStateDetector_outputs(): - output_map = dict( - n_volumes_to_discard=dict(), - ) + output_map = dict(n_volumes_to_discard=dict()) outputs = NonSteadyStateDetector.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py b/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py index be18979a85..ea852e286c 100644 --- a/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py +++ b/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py @@ -3,12 +3,7 @@ def test_NormalizeProbabilityMapSet_inputs(): - input_map = dict( - in_files=dict(), - in_mask=dict( - extensions=None, - ), - ) + input_map = dict(in_files=dict(), in_mask=dict(extensions=None)) inputs = NormalizeProbabilityMapSet.input_spec() for key, metadata in list(input_map.items()): @@ -17,9 +12,7 @@ def test_NormalizeProbabilityMapSet_inputs(): def test_NormalizeProbabilityMapSet_outputs(): - output_map = dict( - out_files=dict(), - ) + output_map = dict(out_files=dict()) outputs = NormalizeProbabilityMapSet.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_P2PDistance.py b/nipype/algorithms/tests/test_auto_P2PDistance.py index a5623353ec..75e9a76a1b 100644 --- a/nipype/algorithms/tests/test_auto_P2PDistance.py +++ b/nipype/algorithms/tests/test_auto_P2PDistance.py @@ -4,28 +4,12 @@ def test_P2PDistance_inputs(): input_map = dict( - metric=dict( - usedefault=True, - ), - out_file=dict( - extensions=None, - usedefault=True, - ), - out_warp=dict( - extensions=None, - usedefault=True, - ), - surface1=dict( - extensions=None, - mandatory=True, - ), - surface2=dict( - extensions=None, - mandatory=True, - ), - weighting=dict( - usedefault=True, - ), + metric=dict(usedefault=True), + out_file=dict(extensions=None, usedefault=True), + out_warp=dict(extensions=None, usedefault=True), + surface1=dict(extensions=None, mandatory=True), + surface2=dict(extensions=None, mandatory=True), + weighting=dict(usedefault=True), ) inputs = P2PDistance.input_spec() @@ -36,13 +20,7 @@ def test_P2PDistance_inputs(): def test_P2PDistance_outputs(): output_map = dict( - distance=dict(), - out_file=dict( - extensions=None, - ), - out_warp=dict( - extensions=None, - ), + distance=dict(), out_file=dict(extensions=None), out_warp=dict(extensions=None) ) outputs = P2PDistance.output_spec() diff --git a/nipype/algorithms/tests/test_auto_PickAtlas.py b/nipype/algorithms/tests/test_auto_PickAtlas.py index 2a29ca8d23..9369818ba0 100644 --- a/nipype/algorithms/tests/test_auto_PickAtlas.py +++ b/nipype/algorithms/tests/test_auto_PickAtlas.py @@ -4,22 +4,11 @@ def test_PickAtlas_inputs(): input_map = dict( - atlas=dict( - extensions=None, - mandatory=True, - ), - dilation_size=dict( - usedefault=True, - ), - hemi=dict( - usedefault=True, - ), - labels=dict( - mandatory=True, - ), - output_file=dict( - extensions=None, - ), + atlas=dict(extensions=None, mandatory=True), + dilation_size=dict(usedefault=True), + hemi=dict(usedefault=True), + labels=dict(mandatory=True), + output_file=dict(extensions=None), ) inputs = PickAtlas.input_spec() @@ -29,11 +18,7 @@ def test_PickAtlas_inputs(): def test_PickAtlas_outputs(): - output_map = dict( - mask_file=dict( - extensions=None, - ), - ) + output_map = dict(mask_file=dict(extensions=None)) outputs = PickAtlas.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_Similarity.py b/nipype/algorithms/tests/test_auto_Similarity.py index a5e5f583d5..e0287b04d3 100644 --- a/nipype/algorithms/tests/test_auto_Similarity.py +++ b/nipype/algorithms/tests/test_auto_Similarity.py @@ -4,23 +4,11 @@ def test_Similarity_inputs(): input_map = dict( - mask1=dict( - extensions=None, - ), - mask2=dict( - extensions=None, - ), - metric=dict( - usedefault=True, - ), - volume1=dict( - extensions=None, - mandatory=True, - ), - volume2=dict( - extensions=None, - mandatory=True, - ), + mask1=dict(extensions=None), + mask2=dict(extensions=None), + metric=dict(usedefault=True), + volume1=dict(extensions=None, mandatory=True), + volume2=dict(extensions=None, mandatory=True), ) inputs = Similarity.input_spec() @@ -30,9 +18,7 @@ def test_Similarity_inputs(): def test_Similarity_outputs(): - output_map = dict( - similarity=dict(), - ) + output_map = dict(similarity=dict()) outputs = Similarity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SimpleThreshold.py b/nipype/algorithms/tests/test_auto_SimpleThreshold.py index ab7141f0de..f6b47ae8c7 100644 --- a/nipype/algorithms/tests/test_auto_SimpleThreshold.py +++ b/nipype/algorithms/tests/test_auto_SimpleThreshold.py @@ -3,14 +3,7 @@ def test_SimpleThreshold_inputs(): - input_map = dict( - threshold=dict( - mandatory=True, - ), - volumes=dict( - mandatory=True, - ), - ) + input_map = dict(threshold=dict(mandatory=True), volumes=dict(mandatory=True)) inputs = SimpleThreshold.input_spec() for key, metadata in list(input_map.items()): @@ -19,9 +12,7 @@ def test_SimpleThreshold_inputs(): def test_SimpleThreshold_outputs(): - output_map = dict( - thresholded_volumes=dict(), - ) + output_map = dict(thresholded_volumes=dict()) outputs = SimpleThreshold.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SpecifyModel.py b/nipype/algorithms/tests/test_auto_SpecifyModel.py index 15d9e4994e..4d7dd90eae 100644 --- a/nipype/algorithms/tests/test_auto_SpecifyModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifyModel.py @@ -5,43 +5,23 @@ def test_SpecifyModel_inputs(): input_map = dict( bids_amplitude_column=dict(), - bids_condition_column=dict( - usedefault=True, - ), + bids_condition_column=dict(usedefault=True), bids_event_file=dict( - mandatory=True, - xor=["subject_info", "event_files", "bids_event_file"], + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] ), event_files=dict( - mandatory=True, - xor=["subject_info", "event_files", "bids_event_file"], - ), - functional_runs=dict( - copyfile=False, - mandatory=True, - ), - high_pass_filter_cutoff=dict( - mandatory=True, - ), - input_units=dict( - mandatory=True, - ), - outlier_files=dict( - copyfile=False, - ), - parameter_source=dict( - usedefault=True, - ), - realignment_parameters=dict( - copyfile=False, - ), + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] + ), + functional_runs=dict(copyfile=False, mandatory=True), + high_pass_filter_cutoff=dict(mandatory=True), + input_units=dict(mandatory=True), + outlier_files=dict(copyfile=False), + parameter_source=dict(usedefault=True), + realignment_parameters=dict(copyfile=False), subject_info=dict( - mandatory=True, - xor=["subject_info", "event_files", "bids_event_file"], - ), - time_repetition=dict( - mandatory=True, + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] ), + time_repetition=dict(mandatory=True), ) inputs = SpecifyModel.input_spec() @@ -51,9 +31,7 @@ def test_SpecifyModel_inputs(): def test_SpecifyModel_outputs(): - output_map = dict( - session_info=dict(), - ) + output_map = dict(session_info=dict()) outputs = SpecifyModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py index 64bb206359..1a08610f5e 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py @@ -5,49 +5,25 @@ def test_SpecifySPMModel_inputs(): input_map = dict( bids_amplitude_column=dict(), - bids_condition_column=dict( - usedefault=True, - ), + bids_condition_column=dict(usedefault=True), bids_event_file=dict( - mandatory=True, - xor=["subject_info", "event_files", "bids_event_file"], - ), - concatenate_runs=dict( - usedefault=True, + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] ), + concatenate_runs=dict(usedefault=True), event_files=dict( - mandatory=True, - xor=["subject_info", "event_files", "bids_event_file"], - ), - functional_runs=dict( - copyfile=False, - mandatory=True, - ), - high_pass_filter_cutoff=dict( - mandatory=True, - ), - input_units=dict( - mandatory=True, - ), - outlier_files=dict( - copyfile=False, - ), - output_units=dict( - usedefault=True, - ), - parameter_source=dict( - usedefault=True, - ), - realignment_parameters=dict( - copyfile=False, - ), + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] + ), + functional_runs=dict(copyfile=False, mandatory=True), + high_pass_filter_cutoff=dict(mandatory=True), + input_units=dict(mandatory=True), + outlier_files=dict(copyfile=False), + output_units=dict(usedefault=True), + parameter_source=dict(usedefault=True), + realignment_parameters=dict(copyfile=False), subject_info=dict( - mandatory=True, - xor=["subject_info", "event_files", "bids_event_file"], - ), - time_repetition=dict( - mandatory=True, + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] ), + time_repetition=dict(mandatory=True), ) inputs = SpecifySPMModel.input_spec() @@ -57,9 +33,7 @@ def test_SpecifySPMModel_inputs(): def test_SpecifySPMModel_outputs(): - output_map = dict( - session_info=dict(), - ) + output_map = dict(session_info=dict()) outputs = SpecifySPMModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py index cac4ce5770..7e91677144 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py @@ -5,63 +5,31 @@ def test_SpecifySparseModel_inputs(): input_map = dict( bids_amplitude_column=dict(), - bids_condition_column=dict( - usedefault=True, - ), + bids_condition_column=dict(usedefault=True), bids_event_file=dict( - mandatory=True, - xor=["subject_info", "event_files", "bids_event_file"], + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] ), event_files=dict( - mandatory=True, - xor=["subject_info", "event_files", "bids_event_file"], - ), - functional_runs=dict( - copyfile=False, - mandatory=True, - ), - high_pass_filter_cutoff=dict( - mandatory=True, - ), - input_units=dict( - mandatory=True, + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] ), + functional_runs=dict(copyfile=False, mandatory=True), + high_pass_filter_cutoff=dict(mandatory=True), + input_units=dict(mandatory=True), model_hrf=dict(), - outlier_files=dict( - copyfile=False, - ), - parameter_source=dict( - usedefault=True, - ), - realignment_parameters=dict( - copyfile=False, - ), + outlier_files=dict(copyfile=False), + parameter_source=dict(usedefault=True), + realignment_parameters=dict(copyfile=False), save_plot=dict(), - scale_regressors=dict( - usedefault=True, - ), - scan_onset=dict( - usedefault=True, - ), - stimuli_as_impulses=dict( - usedefault=True, - ), + scale_regressors=dict(usedefault=True), + scan_onset=dict(usedefault=True), + stimuli_as_impulses=dict(usedefault=True), subject_info=dict( - mandatory=True, - xor=["subject_info", "event_files", "bids_event_file"], - ), - time_acquisition=dict( - mandatory=True, - ), - time_repetition=dict( - mandatory=True, - ), - use_temporal_deriv=dict( - requires=["model_hrf"], - ), - volumes_in_cluster=dict( - usedefault=True, + mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] ), + time_acquisition=dict(mandatory=True), + time_repetition=dict(mandatory=True), + use_temporal_deriv=dict(requires=["model_hrf"]), + volumes_in_cluster=dict(usedefault=True), ) inputs = SpecifySparseModel.input_spec() @@ -73,12 +41,8 @@ def test_SpecifySparseModel_inputs(): def test_SpecifySparseModel_outputs(): output_map = dict( session_info=dict(), - sparse_png_file=dict( - extensions=None, - ), - sparse_svg_file=dict( - extensions=None, - ), + sparse_png_file=dict(extensions=None), + sparse_svg_file=dict(extensions=None), ) outputs = SpecifySparseModel.output_spec() diff --git a/nipype/algorithms/tests/test_auto_SplitROIs.py b/nipype/algorithms/tests/test_auto_SplitROIs.py index c9eec86058..3760e32cf4 100644 --- a/nipype/algorithms/tests/test_auto_SplitROIs.py +++ b/nipype/algorithms/tests/test_auto_SplitROIs.py @@ -4,13 +4,8 @@ def test_SplitROIs_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - in_mask=dict( - extensions=None, - ), + in_file=dict(extensions=None, mandatory=True), + in_mask=dict(extensions=None), roi_size=dict(), ) inputs = SplitROIs.input_spec() @@ -21,11 +16,7 @@ def test_SplitROIs_inputs(): def test_SplitROIs_outputs(): - output_map = dict( - out_files=dict(), - out_index=dict(), - out_masks=dict(), - ) + output_map = dict(out_files=dict(), out_index=dict(), out_masks=dict()) outputs = SplitROIs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_StimulusCorrelation.py b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py index 19cec418c4..5bcffb6576 100644 --- a/nipype/algorithms/tests/test_auto_StimulusCorrelation.py +++ b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py @@ -4,19 +4,10 @@ def test_StimulusCorrelation_inputs(): input_map = dict( - concatenated_design=dict( - mandatory=True, - ), - intensity_values=dict( - mandatory=True, - ), - realignment_parameters=dict( - mandatory=True, - ), - spm_mat_file=dict( - extensions=None, - mandatory=True, - ), + concatenated_design=dict(mandatory=True), + intensity_values=dict(mandatory=True), + realignment_parameters=dict(mandatory=True), + spm_mat_file=dict(extensions=None, mandatory=True), ) inputs = StimulusCorrelation.input_spec() @@ -26,9 +17,7 @@ def test_StimulusCorrelation_inputs(): def test_StimulusCorrelation_outputs(): - output_map = dict( - stimcorr_files=dict(), - ) + output_map = dict(stimcorr_files=dict()) outputs = StimulusCorrelation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_TCompCor.py b/nipype/algorithms/tests/test_auto_TCompCor.py index 0f802cc92e..d4df6380ab 100644 --- a/nipype/algorithms/tests/test_auto_TCompCor.py +++ b/nipype/algorithms/tests/test_auto_TCompCor.py @@ -4,59 +4,25 @@ def test_TCompCor_inputs(): input_map = dict( - components_file=dict( - usedefault=True, - ), - failure_mode=dict( - usedefault=True, - ), + components_file=dict(usedefault=True), + failure_mode=dict(usedefault=True), header_prefix=dict(), - high_pass_cutoff=dict( - usedefault=True, - ), - ignore_initial_volumes=dict( - usedefault=True, - ), + high_pass_cutoff=dict(usedefault=True), + ignore_initial_volumes=dict(usedefault=True), mask_files=dict(), - mask_index=dict( - requires=["mask_files"], - xor=["merge_method"], - ), + mask_index=dict(requires=["mask_files"], xor=["merge_method"]), mask_names=dict(), - merge_method=dict( - requires=["mask_files"], - xor=["mask_index"], - ), - num_components=dict( - xor=["variance_threshold"], - ), - percentile_threshold=dict( - usedefault=True, - ), - pre_filter=dict( - usedefault=True, - ), - realigned_file=dict( - extensions=None, - mandatory=True, - ), - regress_poly_degree=dict( - usedefault=True, - ), + merge_method=dict(requires=["mask_files"], xor=["mask_index"]), + num_components=dict(xor=["variance_threshold"]), + percentile_threshold=dict(usedefault=True), + pre_filter=dict(usedefault=True), + realigned_file=dict(extensions=None, mandatory=True), + regress_poly_degree=dict(usedefault=True), repetition_time=dict(), - save_metadata=dict( - usedefault=True, - ), - save_pre_filter=dict( - usedefault=True, - ), - use_regress_poly=dict( - deprecated="0.15.0", - new_name="pre_filter", - ), - variance_threshold=dict( - xor=["num_components"], - ), + save_metadata=dict(usedefault=True), + save_pre_filter=dict(usedefault=True), + use_regress_poly=dict(deprecated="0.15.0", new_name="pre_filter"), + variance_threshold=dict(xor=["num_components"]), ) inputs = TCompCor.input_spec() @@ -67,16 +33,10 @@ def test_TCompCor_inputs(): def test_TCompCor_outputs(): output_map = dict( - components_file=dict( - extensions=None, - ), + components_file=dict(extensions=None), high_variance_masks=dict(), - metadata_file=dict( - extensions=None, - ), - pre_filter_file=dict( - extensions=None, - ), + metadata_file=dict(extensions=None), + pre_filter_file=dict(extensions=None), ) outputs = TCompCor.output_spec() diff --git a/nipype/algorithms/tests/test_auto_WarpPoints.py b/nipype/algorithms/tests/test_auto_WarpPoints.py index fc72866d83..f8b99dfe9a 100644 --- a/nipype/algorithms/tests/test_auto_WarpPoints.py +++ b/nipype/algorithms/tests/test_auto_WarpPoints.py @@ -4,10 +4,7 @@ def test_WarpPoints_inputs(): input_map = dict( - interp=dict( - mandatory=True, - usedefault=True, - ), + interp=dict(mandatory=True, usedefault=True), out_points=dict( extensions=None, keep_extension=True, @@ -15,14 +12,8 @@ def test_WarpPoints_inputs(): name_template="%s_warped", output_name="out_points", ), - points=dict( - extensions=None, - mandatory=True, - ), - warp=dict( - extensions=None, - mandatory=True, - ), + points=dict(extensions=None, mandatory=True), + warp=dict(extensions=None, mandatory=True), ) inputs = WarpPoints.input_spec() @@ -32,11 +23,7 @@ def test_WarpPoints_inputs(): def test_WarpPoints_outputs(): - output_map = dict( - out_points=dict( - extensions=None, - ), - ) + output_map = dict(out_points=dict(extensions=None)) outputs = WarpPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ABoverlap.py b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py index 501f5331b7..63e7c79717 100644 --- a/nipype/interfaces/afni/tests/test_auto_ABoverlap.py +++ b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py @@ -4,46 +4,20 @@ def test_ABoverlap_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file_a=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-3, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-3 ), in_file_b=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-2, - ), - no_automask=dict( - argstr="-no_automask", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr=" |& tee %s", - extensions=None, - position=-1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2 ), + no_automask=dict(argstr="-no_automask"), + num_threads=dict(nohash=True, usedefault=True), + out_file=dict(argstr=" |& tee %s", extensions=None, position=-1), outputtype=dict(), - quiet=dict( - argstr="-quiet", - ), - verb=dict( - argstr="-verb", - ), + quiet=dict(argstr="-quiet"), + verb=dict(argstr="-verb"), ) inputs = ABoverlap.input_spec() @@ -53,11 +27,7 @@ def test_ABoverlap_inputs(): def test_ABoverlap_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ABoverlap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AFNICommand.py b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py index 941667f49f..b9e43c583a 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNICommand.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py @@ -4,17 +4,9 @@ def test_AFNICommand_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, diff --git a/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py b/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py index de23f6c05b..47e6d2186f 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py @@ -3,15 +3,7 @@ def test_AFNICommandBase_inputs(): - input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ) + input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) inputs = AFNICommandBase.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py index fd4682947b..c4609dc9d9 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py @@ -4,17 +4,9 @@ def test_AFNIPythonCommand_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, diff --git a/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py index 6983e839fb..ae95b3b575 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py @@ -4,35 +4,15 @@ def test_AFNItoNIFTI_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - denote=dict( - argstr="-denote", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + denote=dict(argstr="-denote"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - newid=dict( - argstr="-newid", - xor=["oldid"], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - oldid=dict( - argstr="-oldid", - xor=["newid"], + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + newid=dict(argstr="-newid", xor=["oldid"]), + num_threads=dict(nohash=True, usedefault=True), + oldid=dict(argstr="-oldid", xor=["newid"]), out_file=dict( argstr="-prefix %s", extensions=None, @@ -41,9 +21,7 @@ def test_AFNItoNIFTI_inputs(): name_template="%s.nii", ), outputtype=dict(), - pure=dict( - argstr="-pure", - ), + pure=dict(argstr="-pure"), ) inputs = AFNItoNIFTI.input_spec() @@ -53,11 +31,7 @@ def test_AFNItoNIFTI_inputs(): def test_AFNItoNIFTI_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = AFNItoNIFTI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py index a3b376f55b..e5d49ecec2 100644 --- a/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py +++ b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py @@ -4,57 +4,20 @@ def test_AlignEpiAnatPy_inputs(): input_map = dict( - anat=dict( - argstr="-anat %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - anat2epi=dict( - argstr="-anat2epi", - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - epi2anat=dict( - argstr="-epi2anat", - ), - epi_base=dict( - argstr="-epi_base %s", - mandatory=True, - ), - epi_strip=dict( - argstr="-epi_strip %s", - ), - in_file=dict( - argstr="-epi %s", - copyfile=False, - extensions=None, - mandatory=True, - ), + anat=dict(argstr="-anat %s", copyfile=False, extensions=None, mandatory=True), + anat2epi=dict(argstr="-anat2epi"), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + epi2anat=dict(argstr="-epi2anat"), + epi_base=dict(argstr="-epi_base %s", mandatory=True), + epi_strip=dict(argstr="-epi_strip %s"), + in_file=dict(argstr="-epi %s", copyfile=False, extensions=None, mandatory=True), outputtype=dict(), - py27_path=dict( - usedefault=True, - ), - save_skullstrip=dict( - argstr="-save_skullstrip", - ), - suffix=dict( - argstr="-suffix %s", - usedefault=True, - ), - tshift=dict( - argstr="-tshift %s", - usedefault=True, - ), - volreg=dict( - argstr="-volreg %s", - usedefault=True, - ), + py27_path=dict(usedefault=True), + save_skullstrip=dict(argstr="-save_skullstrip"), + suffix=dict(argstr="-suffix %s", usedefault=True), + tshift=dict(argstr="-tshift %s", usedefault=True), + volreg=dict(argstr="-volreg %s", usedefault=True), ) inputs = AlignEpiAnatPy.input_spec() @@ -65,36 +28,16 @@ def test_AlignEpiAnatPy_inputs(): def test_AlignEpiAnatPy_outputs(): output_map = dict( - anat_al_mat=dict( - extensions=None, - ), - anat_al_orig=dict( - extensions=None, - ), - epi_al_mat=dict( - extensions=None, - ), - epi_al_orig=dict( - extensions=None, - ), - epi_al_tlrc_mat=dict( - extensions=None, - ), - epi_reg_al_mat=dict( - extensions=None, - ), - epi_tlrc_al=dict( - extensions=None, - ), - epi_vr_al_mat=dict( - extensions=None, - ), - epi_vr_motion=dict( - extensions=None, - ), - skullstrip=dict( - extensions=None, - ), + anat_al_mat=dict(extensions=None), + anat_al_orig=dict(extensions=None), + epi_al_mat=dict(extensions=None), + epi_al_orig=dict(extensions=None), + epi_al_tlrc_mat=dict(extensions=None), + epi_reg_al_mat=dict(extensions=None), + epi_tlrc_al=dict(extensions=None), + epi_vr_al_mat=dict(extensions=None), + epi_vr_motion=dict(extensions=None), + skullstrip=dict(extensions=None), ) outputs = AlignEpiAnatPy.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Allineate.py b/nipype/interfaces/afni/tests/test_auto_Allineate.py index afe6c3f24d..39f5e9b1bd 100644 --- a/nipype/interfaces/afni/tests/test_auto_Allineate.py +++ b/nipype/interfaces/afni/tests/test_auto_Allineate.py @@ -10,48 +10,20 @@ def test_Allineate_inputs(): position=-1, xor=["out_file", "out_matrix", "out_param_file", "out_weight_file"], ), - args=dict( - argstr="%s", - ), - autobox=dict( - argstr="-autobox", - ), - automask=dict( - argstr="-automask+%d", - ), - autoweight=dict( - argstr="-autoweight%s", - ), - center_of_mass=dict( - argstr="-cmass%s", - ), - check=dict( - argstr="-check %s", - ), - convergence=dict( - argstr="-conv %f", - ), - cost=dict( - argstr="-cost %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - epi=dict( - argstr="-EPI", - ), - final_interpolation=dict( - argstr="-final %s", - ), - fine_blur=dict( - argstr="-fineblur %f", - ), + args=dict(argstr="%s"), + autobox=dict(argstr="-autobox"), + automask=dict(argstr="-automask+%d"), + autoweight=dict(argstr="-autoweight%s"), + center_of_mass=dict(argstr="-cmass%s"), + check=dict(argstr="-check %s"), + convergence=dict(argstr="-conv %f"), + cost=dict(argstr="-cost %s"), + environ=dict(nohash=True, usedefault=True), + epi=dict(argstr="-EPI"), + final_interpolation=dict(argstr="-final %s"), + fine_blur=dict(argstr="-fineblur %f"), in_file=dict( - argstr="-source %s", - copyfile=False, - extensions=None, - mandatory=True, + argstr="-source %s", copyfile=False, extensions=None, mandatory=True ), in_matrix=dict( argstr="-1Dmatrix_apply %s", @@ -60,57 +32,23 @@ def test_Allineate_inputs(): xor=["out_matrix"], ), in_param_file=dict( - argstr="-1Dparam_apply %s", - extensions=None, - xor=["out_param_file"], - ), - interpolation=dict( - argstr="-interp %s", - ), - master=dict( - argstr="-master %s", - extensions=None, - ), - maxrot=dict( - argstr="-maxrot %f", - ), - maxscl=dict( - argstr="-maxscl %f", - ), - maxshf=dict( - argstr="-maxshf %f", - ), - maxshr=dict( - argstr="-maxshr %f", - ), - newgrid=dict( - argstr="-newgrid %f", - ), - nmatch=dict( - argstr="-nmatch %d", - ), - no_pad=dict( - argstr="-nopad", - ), - nomask=dict( - argstr="-nomask", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - nwarp=dict( - argstr="-nwarp %s", - ), - nwarp_fixdep=dict( - argstr="-nwarp_fixdep%s...", - ), - nwarp_fixmot=dict( - argstr="-nwarp_fixmot%s...", - ), - one_pass=dict( - argstr="-onepass", - ), + argstr="-1Dparam_apply %s", extensions=None, xor=["out_param_file"] + ), + interpolation=dict(argstr="-interp %s"), + master=dict(argstr="-master %s", extensions=None), + maxrot=dict(argstr="-maxrot %f"), + maxscl=dict(argstr="-maxscl %f"), + maxshf=dict(argstr="-maxshf %f"), + maxshr=dict(argstr="-maxshr %f"), + newgrid=dict(argstr="-newgrid %f"), + nmatch=dict(argstr="-nmatch %d"), + no_pad=dict(argstr="-nopad"), + nomask=dict(argstr="-nomask"), + num_threads=dict(nohash=True, usedefault=True), + nwarp=dict(argstr="-nwarp %s"), + nwarp_fixdep=dict(argstr="-nwarp_fixdep%s..."), + nwarp_fixmot=dict(argstr="-nwarp_fixmot%s..."), + one_pass=dict(argstr="-onepass"), out_file=dict( argstr="-prefix %s", extensions=None, @@ -120,80 +58,35 @@ def test_Allineate_inputs(): xor=["allcostx"], ), out_matrix=dict( - argstr="-1Dmatrix_save %s", - extensions=None, - xor=["in_matrix", "allcostx"], + argstr="-1Dmatrix_save %s", extensions=None, xor=["in_matrix", "allcostx"] ), out_param_file=dict( argstr="-1Dparam_save %s", extensions=None, xor=["in_param_file", "allcostx"], ), - out_weight_file=dict( - argstr="-wtprefix %s", - extensions=None, - xor=["allcostx"], - ), + out_weight_file=dict(argstr="-wtprefix %s", extensions=None, xor=["allcostx"]), outputtype=dict(), - overwrite=dict( - argstr="-overwrite", - ), - quiet=dict( - argstr="-quiet", - ), - reference=dict( - argstr="-base %s", - extensions=None, - ), - replacebase=dict( - argstr="-replacebase", - ), - replacemeth=dict( - argstr="-replacemeth %s", - ), - source_automask=dict( - argstr="-source_automask+%d", - ), - source_mask=dict( - argstr="-source_mask %s", - extensions=None, - ), - two_best=dict( - argstr="-twobest %d", - ), - two_blur=dict( - argstr="-twoblur %f", - ), - two_first=dict( - argstr="-twofirst", - ), - two_pass=dict( - argstr="-twopass", - ), - usetemp=dict( - argstr="-usetemp", - ), - verbose=dict( - argstr="-verb", - ), - warp_type=dict( - argstr="-warp %s", - ), - warpfreeze=dict( - argstr="-warpfreeze", - ), - weight=dict( - argstr="-weight %s", - ), + overwrite=dict(argstr="-overwrite"), + quiet=dict(argstr="-quiet"), + reference=dict(argstr="-base %s", extensions=None), + replacebase=dict(argstr="-replacebase"), + replacemeth=dict(argstr="-replacemeth %s"), + source_automask=dict(argstr="-source_automask+%d"), + source_mask=dict(argstr="-source_mask %s", extensions=None), + two_best=dict(argstr="-twobest %d"), + two_blur=dict(argstr="-twoblur %f"), + two_first=dict(argstr="-twofirst"), + two_pass=dict(argstr="-twopass"), + usetemp=dict(argstr="-usetemp"), + verbose=dict(argstr="-verb"), + warp_type=dict(argstr="-warp %s"), + warpfreeze=dict(argstr="-warpfreeze"), + weight=dict(argstr="-weight %s"), weight_file=dict( - argstr="-weight %s", - deprecated="1.0.0", - extensions=None, - new_name="weight", - ), - zclip=dict( - argstr="-zclip", + argstr="-weight %s", deprecated="1.0.0", extensions=None, new_name="weight" ), + zclip=dict(argstr="-zclip"), ) inputs = Allineate.input_spec() @@ -204,21 +97,11 @@ def test_Allineate_inputs(): def test_Allineate_outputs(): output_map = dict( - allcostx=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - out_matrix=dict( - extensions=None, - ), - out_param_file=dict( - extensions=None, - ), - out_weight_file=dict( - extensions=None, - ), + allcostx=dict(extensions=None), + out_file=dict(extensions=None), + out_matrix=dict(extensions=None), + out_param_file=dict(extensions=None), + out_weight_file=dict(extensions=None), ) outputs = Allineate.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py index eebfc73b6b..10d73d8011 100644 --- a/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py +++ b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py @@ -4,26 +4,13 @@ def test_AutoTLRC_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - base=dict( - argstr="-base %s", - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + base=dict(argstr="-base %s", mandatory=True), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="-input %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - no_ss=dict( - argstr="-no_ss", + argstr="-input %s", copyfile=False, extensions=None, mandatory=True ), + no_ss=dict(argstr="-no_ss"), outputtype=dict(), ) inputs = AutoTLRC.input_spec() @@ -34,11 +21,7 @@ def test_AutoTLRC_inputs(): def test_AutoTLRC_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = AutoTLRC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py index 14c59cba0c..1d07a0002a 100644 --- a/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py +++ b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py @@ -4,40 +4,18 @@ def test_AutoTcorrelate_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - eta2=dict( - argstr="-eta2", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + eta2=dict(argstr="-eta2"), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - mask_only_targets=dict( - argstr="-mask_only_targets", - xor=["mask_source"], + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + mask=dict(argstr="-mask %s", extensions=None), + mask_only_targets=dict(argstr="-mask_only_targets", xor=["mask_source"]), mask_source=dict( - argstr="-mask_source %s", - extensions=None, - xor=["mask_only_targets"], - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="-mask_source %s", extensions=None, xor=["mask_only_targets"] ), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -45,9 +23,7 @@ def test_AutoTcorrelate_inputs(): name_template="%s_similarity_matrix.1D", ), outputtype=dict(), - polort=dict( - argstr="-polort %d", - ), + polort=dict(argstr="-polort %d"), ) inputs = AutoTcorrelate.input_spec() @@ -57,11 +33,7 @@ def test_AutoTcorrelate_inputs(): def test_AutoTcorrelate_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = AutoTcorrelate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Autobox.py b/nipype/interfaces/afni/tests/test_auto_Autobox.py index 8a13b14742..eda0062a6b 100644 --- a/nipype/interfaces/afni/tests/test_auto_Autobox.py +++ b/nipype/interfaces/afni/tests/test_auto_Autobox.py @@ -4,26 +4,13 @@ def test_Autobox_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="-input %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - no_clustering=dict( - argstr="-noclust", - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="-input %s", copyfile=False, extensions=None, mandatory=True ), + no_clustering=dict(argstr="-noclust"), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -31,9 +18,7 @@ def test_Autobox_inputs(): name_template="%s_autobox", ), outputtype=dict(), - padding=dict( - argstr="-npad %d", - ), + padding=dict(argstr="-npad %d"), ) inputs = Autobox.input_spec() @@ -44,9 +29,7 @@ def test_Autobox_inputs(): def test_Autobox_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), + out_file=dict(extensions=None), x_max=dict(), x_min=dict(), y_max=dict(), diff --git a/nipype/interfaces/afni/tests/test_auto_Automask.py b/nipype/interfaces/afni/tests/test_auto_Automask.py index 1c2a3c4ee9..5c21392ddf 100644 --- a/nipype/interfaces/afni/tests/test_auto_Automask.py +++ b/nipype/interfaces/afni/tests/test_auto_Automask.py @@ -4,39 +4,21 @@ def test_Automask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), brain_file=dict( argstr="-apply_prefix %s", extensions=None, name_source="in_file", name_template="%s_masked", ), - clfrac=dict( - argstr="-clfrac %s", - ), - dilate=dict( - argstr="-dilate %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - erode=dict( - argstr="-erode %s", - ), + clfrac=dict(argstr="-clfrac %s"), + dilate=dict(argstr="-dilate %s"), + environ=dict(nohash=True, usedefault=True), + erode=dict(argstr="-erode %s"), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -53,14 +35,7 @@ def test_Automask_inputs(): def test_Automask_outputs(): - output_map = dict( - brain_file=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - ) + output_map = dict(brain_file=dict(extensions=None), out_file=dict(extensions=None)) outputs = Automask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Axialize.py b/nipype/interfaces/afni/tests/test_auto_Axialize.py index bac640d601..0145b89cd1 100644 --- a/nipype/interfaces/afni/tests/test_auto_Axialize.py +++ b/nipype/interfaces/afni/tests/test_auto_Axialize.py @@ -4,35 +4,15 @@ def test_Axialize_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - axial=dict( - argstr="-axial", - xor=["coronal", "sagittal"], - ), - coronal=dict( - argstr="-coronal", - xor=["sagittal", "axial"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + axial=dict(argstr="-axial", xor=["coronal", "sagittal"]), + coronal=dict(argstr="-coronal", xor=["sagittal", "axial"]), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-2, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - orientation=dict( - argstr="-orient %s", + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2 ), + num_threads=dict(nohash=True, usedefault=True), + orientation=dict(argstr="-orient %s"), out_file=dict( argstr="-prefix %s", extensions=None, @@ -40,13 +20,8 @@ def test_Axialize_inputs(): name_template="%s_axialize", ), outputtype=dict(), - sagittal=dict( - argstr="-sagittal", - xor=["coronal", "axial"], - ), - verb=dict( - argstr="-verb", - ), + sagittal=dict(argstr="-sagittal", xor=["coronal", "axial"]), + verb=dict(argstr="-verb"), ) inputs = Axialize.input_spec() @@ -56,11 +31,7 @@ def test_Axialize_inputs(): def test_Axialize_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Axialize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Bandpass.py b/nipype/interfaces/afni/tests/test_auto_Bandpass.py index 8ae9966240..d361c4ed2c 100644 --- a/nipype/interfaces/afni/tests/test_auto_Bandpass.py +++ b/nipype/interfaces/afni/tests/test_auto_Bandpass.py @@ -4,70 +4,25 @@ def test_Bandpass_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - automask=dict( - argstr="-automask", - ), - blur=dict( - argstr="-blur %f", - ), - despike=dict( - argstr="-despike", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - highpass=dict( - argstr="%f", - mandatory=True, - position=-3, - ), + args=dict(argstr="%s"), + automask=dict(argstr="-automask"), + blur=dict(argstr="-blur %f"), + despike=dict(argstr="-despike"), + environ=dict(nohash=True, usedefault=True), + highpass=dict(argstr="%f", mandatory=True, position=-3), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - localPV=dict( - argstr="-localPV %f", - ), - lowpass=dict( - argstr="%f", - mandatory=True, - position=-2, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - position=2, - ), - nfft=dict( - argstr="-nfft %d", - ), - no_detrend=dict( - argstr="-nodetrend", - ), - normalize=dict( - argstr="-norm", - ), - notrans=dict( - argstr="-notrans", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - orthogonalize_dset=dict( - argstr="-dsort %s", - extensions=None, - ), - orthogonalize_file=dict( - argstr="-ort %s", - ), + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + ), + localPV=dict(argstr="-localPV %f"), + lowpass=dict(argstr="%f", mandatory=True, position=-2), + mask=dict(argstr="-mask %s", extensions=None, position=2), + nfft=dict(argstr="-nfft %d"), + no_detrend=dict(argstr="-nodetrend"), + normalize=dict(argstr="-norm"), + notrans=dict(argstr="-notrans"), + num_threads=dict(nohash=True, usedefault=True), + orthogonalize_dset=dict(argstr="-dsort %s", extensions=None), + orthogonalize_file=dict(argstr="-ort %s"), out_file=dict( argstr="-prefix %s", extensions=None, @@ -76,9 +31,7 @@ def test_Bandpass_inputs(): position=1, ), outputtype=dict(), - tr=dict( - argstr="-dt %f", - ), + tr=dict(argstr="-dt %f"), ) inputs = Bandpass.input_spec() @@ -88,11 +41,7 @@ def test_Bandpass_inputs(): def test_Bandpass_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Bandpass.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_BlurInMask.py b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py index 91114611dc..9e66102557 100644 --- a/nipype/interfaces/afni/tests/test_auto_BlurInMask.py +++ b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py @@ -4,23 +4,11 @@ def test_BlurInMask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - automask=dict( - argstr="-automask", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - float_out=dict( - argstr="-float", - ), - fwhm=dict( - argstr="-FWHM %f", - mandatory=True, - ), + args=dict(argstr="%s"), + automask=dict(argstr="-automask"), + environ=dict(nohash=True, usedefault=True), + float_out=dict(argstr="-float"), + fwhm=dict(argstr="-FWHM %f", mandatory=True), in_file=dict( argstr="-input %s", copyfile=False, @@ -28,22 +16,10 @@ def test_BlurInMask_inputs(): mandatory=True, position=1, ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - multimask=dict( - argstr="-Mmask %s", - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - options=dict( - argstr="%s", - position=2, - ), + mask=dict(argstr="-mask %s", extensions=None), + multimask=dict(argstr="-Mmask %s", extensions=None), + num_threads=dict(nohash=True, usedefault=True), + options=dict(argstr="%s", position=2), out_file=dict( argstr="-prefix %s", extensions=None, @@ -52,9 +28,7 @@ def test_BlurInMask_inputs(): position=-1, ), outputtype=dict(), - preserve=dict( - argstr="-preserve", - ), + preserve=dict(argstr="-preserve"), ) inputs = BlurInMask.input_spec() @@ -64,11 +38,7 @@ def test_BlurInMask_inputs(): def test_BlurInMask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = BlurInMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py index f164ae815e..0af245f3dc 100644 --- a/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py +++ b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py @@ -4,39 +4,15 @@ def test_BlurToFWHM_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - automask=dict( - argstr="-automask", - ), - blurmaster=dict( - argstr="-blurmaster %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fwhm=dict( - argstr="-FWHM %f", - ), - fwhmxy=dict( - argstr="-FWHMxy %f", - ), - in_file=dict( - argstr="-input %s", - extensions=None, - mandatory=True, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + automask=dict(argstr="-automask"), + blurmaster=dict(argstr="-blurmaster %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + fwhm=dict(argstr="-FWHM %f"), + fwhmxy=dict(argstr="-FWHMxy %f"), + in_file=dict(argstr="-input %s", extensions=None, mandatory=True), + mask=dict(argstr="-mask %s", extensions=None), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -53,11 +29,7 @@ def test_BlurToFWHM_inputs(): def test_BlurToFWHM_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = BlurToFWHM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_BrickStat.py b/nipype/interfaces/afni/tests/test_auto_BrickStat.py index a366953a5b..4d1d10c5d0 100644 --- a/nipype/interfaces/afni/tests/test_auto_BrickStat.py +++ b/nipype/interfaces/afni/tests/test_auto_BrickStat.py @@ -4,46 +4,17 @@ def test_BrickStat_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - position=2, - ), - max=dict( - argstr="-max", - ), - mean=dict( - argstr="-mean", - ), - min=dict( - argstr="-min", - position=1, - ), - percentile=dict( - argstr="-percentile %.3f %.3f %.3f", - ), - slow=dict( - argstr="-slow", - ), - sum=dict( - argstr="-sum", - ), - var=dict( - argstr="-var", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + mask=dict(argstr="-mask %s", extensions=None, position=2), + max=dict(argstr="-max"), + mean=dict(argstr="-mean"), + min=dict(argstr="-min", position=1), + percentile=dict(argstr="-percentile %.3f %.3f %.3f"), + slow=dict(argstr="-slow"), + sum=dict(argstr="-sum"), + var=dict(argstr="-var"), ) inputs = BrickStat.input_spec() @@ -53,9 +24,7 @@ def test_BrickStat_inputs(): def test_BrickStat_outputs(): - output_map = dict( - min_val=dict(), - ) + output_map = dict(min_val=dict()) outputs = BrickStat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Bucket.py b/nipype/interfaces/afni/tests/test_auto_Bucket.py index 34dbd18bc2..9694294ada 100644 --- a/nipype/interfaces/afni/tests/test_auto_Bucket.py +++ b/nipype/interfaces/afni/tests/test_auto_Bucket.py @@ -4,27 +4,11 @@ def test_Bucket_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr="-prefix %s", - extensions=None, - name_template="buck", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", mandatory=True, position=-1), + num_threads=dict(nohash=True, usedefault=True), + out_file=dict(argstr="-prefix %s", extensions=None, name_template="buck"), outputtype=dict(), ) inputs = Bucket.input_spec() @@ -35,11 +19,7 @@ def test_Bucket_inputs(): def test_Bucket_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Bucket.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Calc.py b/nipype/interfaces/afni/tests/test_auto_Calc.py index dc50380317..0b4947262d 100644 --- a/nipype/interfaces/afni/tests/test_auto_Calc.py +++ b/nipype/interfaces/afni/tests/test_auto_Calc.py @@ -4,42 +4,14 @@ def test_Calc_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - expr=dict( - argstr='-expr "%s"', - mandatory=True, - position=3, - ), - in_file_a=dict( - argstr="-a %s", - extensions=None, - mandatory=True, - position=0, - ), - in_file_b=dict( - argstr="-b %s", - extensions=None, - position=1, - ), - in_file_c=dict( - argstr="-c %s", - extensions=None, - position=2, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - other=dict( - argstr="", - extensions=None, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + expr=dict(argstr='-expr "%s"', mandatory=True, position=3), + in_file_a=dict(argstr="-a %s", extensions=None, mandatory=True, position=0), + in_file_b=dict(argstr="-b %s", extensions=None, position=1), + in_file_c=dict(argstr="-c %s", extensions=None, position=2), + num_threads=dict(nohash=True, usedefault=True), + other=dict(argstr="", extensions=None), out_file=dict( argstr="-prefix %s", extensions=None, @@ -47,16 +19,10 @@ def test_Calc_inputs(): name_template="%s_calc", ), outputtype=dict(), - overwrite=dict( - argstr="-overwrite", - ), + overwrite=dict(argstr="-overwrite"), single_idx=dict(), - start_idx=dict( - requires=["stop_idx"], - ), - stop_idx=dict( - requires=["start_idx"], - ), + start_idx=dict(requires=["stop_idx"]), + stop_idx=dict(requires=["start_idx"]), ) inputs = Calc.input_spec() @@ -66,11 +32,7 @@ def test_Calc_inputs(): def test_Calc_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Calc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Cat.py b/nipype/interfaces/afni/tests/test_auto_Cat.py index e5c76b34b1..d6b8b796c8 100644 --- a/nipype/interfaces/afni/tests/test_auto_Cat.py +++ b/nipype/interfaces/afni/tests/test_auto_Cat.py @@ -4,41 +4,21 @@ def test_Cat_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr="%s", - mandatory=True, - position=-2, - ), - keepfree=dict( - argstr="-nonfixed", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - omitconst=dict( - argstr="-nonconst", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_files=dict(argstr="%s", mandatory=True, position=-2), + keepfree=dict(argstr="-nonfixed"), + num_threads=dict(nohash=True, usedefault=True), + omitconst=dict(argstr="-nonconst"), out_cint=dict( - xor=["out_format", "out_nice", "out_double", "out_fint", "out_int"], + xor=["out_format", "out_nice", "out_double", "out_fint", "out_int"] ), out_double=dict( argstr="-d", xor=["out_format", "out_nice", "out_int", "out_fint", "out_cint"], ), out_file=dict( - argstr="> %s", - extensions=None, - mandatory=True, - position=-1, - usedefault=True, + argstr="> %s", extensions=None, mandatory=True, position=-1, usedefault=True ), out_fint=dict( argstr="-f", @@ -57,12 +37,8 @@ def test_Cat_inputs(): xor=["out_format", "out_int", "out_double", "out_fint", "out_cint"], ), outputtype=dict(), - sel=dict( - argstr="-sel %s", - ), - stack=dict( - argstr="-stack", - ), + sel=dict(argstr="-sel %s"), + stack=dict(argstr="-stack"), ) inputs = Cat.input_spec() @@ -72,11 +48,7 @@ def test_Cat_inputs(): def test_Cat_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Cat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py index 6b6c2630f6..83edf1e484 100644 --- a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py +++ b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py @@ -4,34 +4,13 @@ def test_CatMatvec_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fourxfour=dict( - argstr="-4x4", - xor=["matrix", "oneline"], - ), - in_file=dict( - argstr="%s", - mandatory=True, - position=-2, - ), - matrix=dict( - argstr="-MATRIX", - xor=["oneline", "fourxfour"], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - oneline=dict( - argstr="-ONELINE", - xor=["matrix", "fourxfour"], - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fourxfour=dict(argstr="-4x4", xor=["matrix", "oneline"]), + in_file=dict(argstr="%s", mandatory=True, position=-2), + matrix=dict(argstr="-MATRIX", xor=["oneline", "fourxfour"]), + num_threads=dict(nohash=True, usedefault=True), + oneline=dict(argstr="-ONELINE", xor=["matrix", "fourxfour"]), out_file=dict( argstr=" > %s", extensions=None, @@ -51,11 +30,7 @@ def test_CatMatvec_inputs(): def test_CatMatvec_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = CatMatvec.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_CenterMass.py b/nipype/interfaces/afni/tests/test_auto_CenterMass.py index 7ec95938b4..bd293db0d4 100644 --- a/nipype/interfaces/afni/tests/test_auto_CenterMass.py +++ b/nipype/interfaces/afni/tests/test_auto_CenterMass.py @@ -4,15 +4,9 @@ def test_CenterMass_inputs(): input_map = dict( - all_rois=dict( - argstr="-all_rois", - ), - args=dict( - argstr="%s", - ), - automask=dict( - argstr="-automask", - ), + all_rois=dict(argstr="-all_rois"), + args=dict(argstr="%s"), + automask=dict(argstr="-automask"), cm_file=dict( argstr="> %s", extensions=None, @@ -22,30 +16,14 @@ def test_CenterMass_inputs(): name_template="%s_cm.out", position=-1, ), - environ=dict( - nohash=True, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=True, - extensions=None, - mandatory=True, - position=-2, - ), - local_ijk=dict( - argstr="-local_ijk", - ), - mask_file=dict( - argstr="-mask %s", - extensions=None, - ), - roi_vals=dict( - argstr="-roi_vals %s", - ), - set_cm=dict( - argstr="-set %f %f %f", + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2 ), + local_ijk=dict(argstr="-local_ijk"), + mask_file=dict(argstr="-mask %s", extensions=None), + roi_vals=dict(argstr="-roi_vals %s"), + set_cm=dict(argstr="-set %f %f %f"), ) inputs = CenterMass.input_spec() @@ -56,13 +34,7 @@ def test_CenterMass_inputs(): def test_CenterMass_outputs(): output_map = dict( - cm=dict(), - cm_file=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), + cm=dict(), cm_file=dict(extensions=None), out_file=dict(extensions=None) ) outputs = CenterMass.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py index 7a324fe7d4..5c7a318fc6 100644 --- a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py +++ b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py @@ -4,34 +4,12 @@ def test_ClipLevel_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - doall=dict( - argstr="-doall", - position=3, - xor="grad", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad=dict( - argstr="-grad %s", - extensions=None, - position=3, - xor="doall", - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), - mfrac=dict( - argstr="-mfrac %s", - position=2, - ), + args=dict(argstr="%s"), + doall=dict(argstr="-doall", position=3, xor="grad"), + environ=dict(nohash=True, usedefault=True), + grad=dict(argstr="-grad %s", extensions=None, position=3, xor="doall"), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + mfrac=dict(argstr="-mfrac %s", position=2), ) inputs = ClipLevel.input_spec() @@ -41,9 +19,7 @@ def test_ClipLevel_inputs(): def test_ClipLevel_outputs(): - output_map = dict( - clip_val=dict(), - ) + output_map = dict(clip_val=dict()) outputs = ClipLevel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ConvertDset.py b/nipype/interfaces/afni/tests/test_auto_ConvertDset.py index 226eac97b5..51e0dbb300 100644 --- a/nipype/interfaces/afni/tests/test_auto_ConvertDset.py +++ b/nipype/interfaces/afni/tests/test_auto_ConvertDset.py @@ -4,34 +4,14 @@ def test_ConvertDset_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-input %s", - extensions=None, - mandatory=True, - position=-2, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-input %s", extensions=None, mandatory=True, position=-2), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( - argstr="-prefix %s", - extensions=None, - mandatory=True, - position=-1, - ), - out_type=dict( - argstr="-o_%s", - mandatory=True, - position=0, + argstr="-prefix %s", extensions=None, mandatory=True, position=-1 ), + out_type=dict(argstr="-o_%s", mandatory=True, position=0), outputtype=dict(), ) inputs = ConvertDset.input_spec() @@ -42,11 +22,7 @@ def test_ConvertDset_inputs(): def test_ConvertDset_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ConvertDset.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Copy.py b/nipype/interfaces/afni/tests/test_auto_Copy.py index e96592b184..c5d0c3736a 100644 --- a/nipype/interfaces/afni/tests/test_auto_Copy.py +++ b/nipype/interfaces/afni/tests/test_auto_Copy.py @@ -4,24 +4,12 @@ def test_Copy_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-2, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2 ), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="%s", extensions=None, @@ -30,9 +18,7 @@ def test_Copy_inputs(): position=-1, ), outputtype=dict(), - verbose=dict( - argstr="-verb", - ), + verbose=dict(argstr="-verb"), ) inputs = Copy.input_spec() @@ -42,11 +28,7 @@ def test_Copy_inputs(): def test_Copy_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Copy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Deconvolve.py b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py index c4195807eb..3137a455d4 100644 --- a/nipype/interfaces/afni/tests/test_auto_Deconvolve.py +++ b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py @@ -4,169 +4,55 @@ def test_Deconvolve_inputs(): input_map = dict( - STATmask=dict( - argstr="-STATmask %s", - extensions=None, - ), - TR_1D=dict( - argstr="-TR_1D %f", - ), - allzero_OK=dict( - argstr="-allzero_OK", - ), - args=dict( - argstr="%s", - ), - automask=dict( - argstr="-automask", - ), - cbucket=dict( - argstr="-cbucket %s", - ), - censor=dict( - argstr="-censor %s", - extensions=None, - ), - dmbase=dict( - argstr="-dmbase", - ), - dname=dict( - argstr="-D%s=%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - force_TR=dict( - argstr="-force_TR %f", - position=0, - ), - fout=dict( - argstr="-fout", - ), - global_times=dict( - argstr="-global_times", - xor=["local_times"], - ), - glt_label=dict( - argstr="-glt_label %d %s...", - position=-1, - requires=["gltsym"], - ), - gltsym=dict( - argstr="-gltsym 'SYM: %s'...", - position=-2, - ), - goforit=dict( - argstr="-GOFORIT %i", - ), - in_files=dict( - argstr="-input %s", - copyfile=False, - position=1, - sep=" ", - ), - input1D=dict( - argstr="-input1D %s", - extensions=None, - ), - legendre=dict( - argstr="-legendre", - ), - local_times=dict( - argstr="-local_times", - xor=["global_times"], - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - noblock=dict( - argstr="-noblock", - ), - nocond=dict( - argstr="-nocond", - ), - nodmbase=dict( - argstr="-nodmbase", - ), - nofdr=dict( - argstr="-noFDR", - ), - nolegendre=dict( - argstr="-nolegendre", - ), - nosvd=dict( - argstr="-nosvd", - ), - num_glt=dict( - argstr="-num_glt %d", - position=-3, - ), - num_stimts=dict( - argstr="-num_stimts %d", - position=-6, - ), - num_threads=dict( - argstr="-jobs %d", - nohash=True, - ), - ortvec=dict( - argstr="-ortvec %s %s", - ), - out_file=dict( - argstr="-bucket %s", - extensions=None, - ), + STATmask=dict(argstr="-STATmask %s", extensions=None), + TR_1D=dict(argstr="-TR_1D %f"), + allzero_OK=dict(argstr="-allzero_OK"), + args=dict(argstr="%s"), + automask=dict(argstr="-automask"), + cbucket=dict(argstr="-cbucket %s"), + censor=dict(argstr="-censor %s", extensions=None), + dmbase=dict(argstr="-dmbase"), + dname=dict(argstr="-D%s=%s"), + environ=dict(nohash=True, usedefault=True), + force_TR=dict(argstr="-force_TR %f", position=0), + fout=dict(argstr="-fout"), + global_times=dict(argstr="-global_times", xor=["local_times"]), + glt_label=dict(argstr="-glt_label %d %s...", position=-1, requires=["gltsym"]), + gltsym=dict(argstr="-gltsym 'SYM: %s'...", position=-2), + goforit=dict(argstr="-GOFORIT %i"), + in_files=dict(argstr="-input %s", copyfile=False, position=1, sep=" "), + input1D=dict(argstr="-input1D %s", extensions=None), + legendre=dict(argstr="-legendre"), + local_times=dict(argstr="-local_times", xor=["global_times"]), + mask=dict(argstr="-mask %s", extensions=None), + noblock=dict(argstr="-noblock"), + nocond=dict(argstr="-nocond"), + nodmbase=dict(argstr="-nodmbase"), + nofdr=dict(argstr="-noFDR"), + nolegendre=dict(argstr="-nolegendre"), + nosvd=dict(argstr="-nosvd"), + num_glt=dict(argstr="-num_glt %d", position=-3), + num_stimts=dict(argstr="-num_stimts %d", position=-6), + num_threads=dict(argstr="-jobs %d", nohash=True), + ortvec=dict(argstr="-ortvec %s %s"), + out_file=dict(argstr="-bucket %s", extensions=None), outputtype=dict(), - polort=dict( - argstr="-polort %d", - ), - rmsmin=dict( - argstr="-rmsmin %f", - ), - rout=dict( - argstr="-rout", - ), - sat=dict( - argstr="-sat", - xor=["trans"], - ), - singvals=dict( - argstr="-singvals", - ), + polort=dict(argstr="-polort %d"), + rmsmin=dict(argstr="-rmsmin %f"), + rout=dict(argstr="-rout"), + sat=dict(argstr="-sat", xor=["trans"]), + singvals=dict(argstr="-singvals"), stim_label=dict( - argstr="-stim_label %d %s...", - position=-4, - requires=["stim_times"], - ), - stim_times=dict( - argstr="-stim_times %d %s '%s'...", - position=-5, - ), - stim_times_subtract=dict( - argstr="-stim_times_subtract %f", - ), - svd=dict( - argstr="-svd", - ), - tout=dict( - argstr="-tout", - ), - trans=dict( - argstr="-trans", - xor=["sat"], - ), - vout=dict( - argstr="-vout", - ), - x1D=dict( - argstr="-x1D %s", - extensions=None, - ), - x1D_stop=dict( - argstr="-x1D_stop", - ), + argstr="-stim_label %d %s...", position=-4, requires=["stim_times"] + ), + stim_times=dict(argstr="-stim_times %d %s '%s'...", position=-5), + stim_times_subtract=dict(argstr="-stim_times_subtract %f"), + svd=dict(argstr="-svd"), + tout=dict(argstr="-tout"), + trans=dict(argstr="-trans", xor=["sat"]), + vout=dict(argstr="-vout"), + x1D=dict(argstr="-x1D %s", extensions=None), + x1D_stop=dict(argstr="-x1D_stop"), ) inputs = Deconvolve.input_spec() @@ -177,18 +63,10 @@ def test_Deconvolve_inputs(): def test_Deconvolve_outputs(): output_map = dict( - cbucket=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - reml_script=dict( - extensions=None, - ), - x1D=dict( - extensions=None, - ), + cbucket=dict(extensions=None), + out_file=dict(extensions=None), + reml_script=dict(extensions=None), + x1D=dict(extensions=None), ) outputs = Deconvolve.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py index afbc5a7d4f..da68d04da2 100644 --- a/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py +++ b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py @@ -4,37 +4,16 @@ def test_DegreeCentrality_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - autoclip=dict( - argstr="-autoclip", - ), - automask=dict( - argstr="-automask", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + autoclip=dict(argstr="-autoclip"), + automask=dict(argstr="-automask"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - oned_file=dict( - argstr="-out1D %s", + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + mask=dict(argstr="-mask %s", extensions=None), + num_threads=dict(nohash=True, usedefault=True), + oned_file=dict(argstr="-out1D %s"), out_file=dict( argstr="-prefix %s", extensions=None, @@ -42,15 +21,9 @@ def test_DegreeCentrality_inputs(): name_template="%s_afni", ), outputtype=dict(), - polort=dict( - argstr="-polort %d", - ), - sparsity=dict( - argstr="-sparsity %f", - ), - thresh=dict( - argstr="-thresh %f", - ), + polort=dict(argstr="-polort %d"), + sparsity=dict(argstr="-sparsity %f"), + thresh=dict(argstr="-thresh %f"), ) inputs = DegreeCentrality.input_spec() @@ -60,14 +33,7 @@ def test_DegreeCentrality_inputs(): def test_DegreeCentrality_outputs(): - output_map = dict( - oned_file=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - ) + output_map = dict(oned_file=dict(extensions=None), out_file=dict(extensions=None)) outputs = DegreeCentrality.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Despike.py b/nipype/interfaces/afni/tests/test_auto_Despike.py index 8835dd7e07..025a88ef52 100644 --- a/nipype/interfaces/afni/tests/test_auto_Despike.py +++ b/nipype/interfaces/afni/tests/test_auto_Despike.py @@ -4,24 +4,12 @@ def test_Despike_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -38,11 +26,7 @@ def test_Despike_inputs(): def test_Despike_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Despike.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Detrend.py b/nipype/interfaces/afni/tests/test_auto_Detrend.py index 5edbdd74ee..c662529685 100644 --- a/nipype/interfaces/afni/tests/test_auto_Detrend.py +++ b/nipype/interfaces/afni/tests/test_auto_Detrend.py @@ -4,24 +4,12 @@ def test_Detrend_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -38,11 +26,7 @@ def test_Detrend_inputs(): def test_Detrend_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Detrend.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Dot.py b/nipype/interfaces/afni/tests/test_auto_Dot.py index 9cf8083ab9..dd780b6815 100644 --- a/nipype/interfaces/afni/tests/test_auto_Dot.py +++ b/nipype/interfaces/afni/tests/test_auto_Dot.py @@ -4,64 +4,24 @@ def test_Dot_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - demean=dict( - argstr="-demean", - ), - docoef=dict( - argstr="-docoef", - ), - docor=dict( - argstr="-docor", - ), - dodice=dict( - argstr="-dodice", - ), - dodot=dict( - argstr="-dodot", - ), - doeta2=dict( - argstr="-doeta2", - ), - dosums=dict( - argstr="-dosums", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - full=dict( - argstr="-full", - ), - in_files=dict( - argstr="%s ...", - position=-2, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - mrange=dict( - argstr="-mrange %s %s", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr=" |& tee %s", - extensions=None, - position=-1, - ), + args=dict(argstr="%s"), + demean=dict(argstr="-demean"), + docoef=dict(argstr="-docoef"), + docor=dict(argstr="-docor"), + dodice=dict(argstr="-dodice"), + dodot=dict(argstr="-dodot"), + doeta2=dict(argstr="-doeta2"), + dosums=dict(argstr="-dosums"), + environ=dict(nohash=True, usedefault=True), + full=dict(argstr="-full"), + in_files=dict(argstr="%s ...", position=-2), + mask=dict(argstr="-mask %s", extensions=None), + mrange=dict(argstr="-mrange %s %s"), + num_threads=dict(nohash=True, usedefault=True), + out_file=dict(argstr=" |& tee %s", extensions=None, position=-1), outputtype=dict(), - show_labels=dict( - argstr="-show_labels", - ), - upper=dict( - argstr="-upper", - ), + show_labels=dict(argstr="-show_labels"), + upper=dict(argstr="-upper"), ) inputs = Dot.input_spec() @@ -71,11 +31,7 @@ def test_Dot_inputs(): def test_Dot_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Dot.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ECM.py b/nipype/interfaces/afni/tests/test_auto_ECM.py index 030aaffe6a..12364c46b4 100644 --- a/nipype/interfaces/afni/tests/test_auto_ECM.py +++ b/nipype/interfaces/afni/tests/test_auto_ECM.py @@ -4,49 +4,20 @@ def test_ECM_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - autoclip=dict( - argstr="-autoclip", - ), - automask=dict( - argstr="-automask", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - eps=dict( - argstr="-eps %f", - ), - fecm=dict( - argstr="-fecm", - ), - full=dict( - argstr="-full", - ), + args=dict(argstr="%s"), + autoclip=dict(argstr="-autoclip"), + automask=dict(argstr="-automask"), + environ=dict(nohash=True, usedefault=True), + eps=dict(argstr="-eps %f"), + fecm=dict(argstr="-fecm"), + full=dict(argstr="-full"), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - max_iter=dict( - argstr="-max_iter %d", - ), - memory=dict( - argstr="-memory %f", - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + mask=dict(argstr="-mask %s", extensions=None), + max_iter=dict(argstr="-max_iter %d"), + memory=dict(argstr="-memory %f"), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -54,21 +25,11 @@ def test_ECM_inputs(): name_template="%s_afni", ), outputtype=dict(), - polort=dict( - argstr="-polort %d", - ), - scale=dict( - argstr="-scale %f", - ), - shift=dict( - argstr="-shift %f", - ), - sparsity=dict( - argstr="-sparsity %f", - ), - thresh=dict( - argstr="-thresh %f", - ), + polort=dict(argstr="-polort %d"), + scale=dict(argstr="-scale %f"), + shift=dict(argstr="-shift %f"), + sparsity=dict(argstr="-sparsity %f"), + thresh=dict(argstr="-thresh %f"), ) inputs = ECM.input_spec() @@ -78,11 +39,7 @@ def test_ECM_inputs(): def test_ECM_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ECM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Edge3.py b/nipype/interfaces/afni/tests/test_auto_Edge3.py index 45b49fd243..966a24320a 100644 --- a/nipype/interfaces/afni/tests/test_auto_Edge3.py +++ b/nipype/interfaces/afni/tests/test_auto_Edge3.py @@ -4,24 +4,11 @@ def test_Edge3_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - datum=dict( - argstr="-datum %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fscale=dict( - argstr="-fscale", - xor=["gscale", "nscale", "scale_floats"], - ), - gscale=dict( - argstr="-gscale", - xor=["fscale", "nscale", "scale_floats"], - ), + args=dict(argstr="%s"), + datum=dict(argstr="-datum %s"), + environ=dict(nohash=True, usedefault=True), + fscale=dict(argstr="-fscale", xor=["gscale", "nscale", "scale_floats"]), + gscale=dict(argstr="-gscale", xor=["fscale", "nscale", "scale_floats"]), in_file=dict( argstr="-input %s", copyfile=False, @@ -29,27 +16,14 @@ def test_Edge3_inputs(): mandatory=True, position=0, ), - nscale=dict( - argstr="-nscale", - xor=["fscale", "gscale", "scale_floats"], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr="-prefix %s", - extensions=None, - position=-1, - ), + nscale=dict(argstr="-nscale", xor=["fscale", "gscale", "scale_floats"]), + num_threads=dict(nohash=True, usedefault=True), + out_file=dict(argstr="-prefix %s", extensions=None, position=-1), outputtype=dict(), scale_floats=dict( - argstr="-scale_floats %f", - xor=["fscale", "gscale", "nscale"], - ), - verbose=dict( - argstr="-verbose", + argstr="-scale_floats %f", xor=["fscale", "gscale", "nscale"] ), + verbose=dict(argstr="-verbose"), ) inputs = Edge3.input_spec() @@ -59,11 +33,7 @@ def test_Edge3_inputs(): def test_Edge3_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Edge3.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Eval.py b/nipype/interfaces/afni/tests/test_auto_Eval.py index 748bf05dfd..29ca4f1433 100644 --- a/nipype/interfaces/afni/tests/test_auto_Eval.py +++ b/nipype/interfaces/afni/tests/test_auto_Eval.py @@ -4,45 +4,15 @@ def test_Eval_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - expr=dict( - argstr='-expr "%s"', - mandatory=True, - position=3, - ), - in_file_a=dict( - argstr="-a %s", - extensions=None, - mandatory=True, - position=0, - ), - in_file_b=dict( - argstr="-b %s", - extensions=None, - position=1, - ), - in_file_c=dict( - argstr="-c %s", - extensions=None, - position=2, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - other=dict( - argstr="", - extensions=None, - ), - out1D=dict( - argstr="-1D", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + expr=dict(argstr='-expr "%s"', mandatory=True, position=3), + in_file_a=dict(argstr="-a %s", extensions=None, mandatory=True, position=0), + in_file_b=dict(argstr="-b %s", extensions=None, position=1), + in_file_c=dict(argstr="-c %s", extensions=None, position=2), + num_threads=dict(nohash=True, usedefault=True), + other=dict(argstr="", extensions=None), + out1D=dict(argstr="-1D"), out_file=dict( argstr="-prefix %s", extensions=None, @@ -51,12 +21,8 @@ def test_Eval_inputs(): ), outputtype=dict(), single_idx=dict(), - start_idx=dict( - requires=["stop_idx"], - ), - stop_idx=dict( - requires=["start_idx"], - ), + start_idx=dict(requires=["stop_idx"]), + stop_idx=dict(requires=["start_idx"]), ) inputs = Eval.input_spec() @@ -66,11 +32,7 @@ def test_Eval_inputs(): def test_Eval_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Eval.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_FWHMx.py b/nipype/interfaces/afni/tests/test_auto_FWHMx.py index 06151c569a..f1126d0e1d 100644 --- a/nipype/interfaces/afni/tests/test_auto_FWHMx.py +++ b/nipype/interfaces/afni/tests/test_auto_FWHMx.py @@ -4,53 +4,18 @@ def test_FWHMx_inputs(): input_map = dict( - acf=dict( - argstr="-acf", - usedefault=True, - ), - args=dict( - argstr="%s", - ), - arith=dict( - argstr="-arith", - xor=["geom"], - ), - automask=dict( - argstr="-automask", - usedefault=True, - ), - combine=dict( - argstr="-combine", - ), - compat=dict( - argstr="-compat", - ), - demed=dict( - argstr="-demed", - xor=["detrend"], - ), - detrend=dict( - argstr="-detrend", - usedefault=True, - xor=["demed"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - geom=dict( - argstr="-geom", - xor=["arith"], - ), - in_file=dict( - argstr="-input %s", - extensions=None, - mandatory=True, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), + acf=dict(argstr="-acf", usedefault=True), + args=dict(argstr="%s"), + arith=dict(argstr="-arith", xor=["geom"]), + automask=dict(argstr="-automask", usedefault=True), + combine=dict(argstr="-combine"), + compat=dict(argstr="-compat"), + demed=dict(argstr="-demed", xor=["detrend"]), + detrend=dict(argstr="-detrend", usedefault=True, xor=["demed"]), + environ=dict(nohash=True, usedefault=True), + geom=dict(argstr="-geom", xor=["arith"]), + in_file=dict(argstr="-input %s", extensions=None, mandatory=True), + mask=dict(argstr="-mask %s", extensions=None), out_detrend=dict( argstr="-detprefix %s", extensions=None, @@ -73,9 +38,7 @@ def test_FWHMx_inputs(): name_source="in_file", name_template="%s_subbricks.out", ), - unif=dict( - argstr="-unif", - ), + unif=dict(argstr="-unif"), ) inputs = FWHMx.input_spec() @@ -88,18 +51,10 @@ def test_FWHMx_outputs(): output_map = dict( acf_param=dict(), fwhm=dict(), - out_acf=dict( - extensions=None, - ), - out_detrend=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - out_subbricks=dict( - extensions=None, - ), + out_acf=dict(extensions=None), + out_detrend=dict(extensions=None), + out_file=dict(extensions=None), + out_subbricks=dict(extensions=None), ) outputs = FWHMx.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Fim.py b/nipype/interfaces/afni/tests/test_auto_Fim.py index aea43391bc..8be7f6e8f7 100644 --- a/nipype/interfaces/afni/tests/test_auto_Fim.py +++ b/nipype/interfaces/afni/tests/test_auto_Fim.py @@ -4,22 +4,11 @@ def test_Fim_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fim_thr=dict( - argstr="-fim_thr %f", - position=3, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fim_thr=dict(argstr="-fim_thr %f", position=3), ideal_file=dict( - argstr="-ideal_file %s", - extensions=None, - mandatory=True, - position=2, + argstr="-ideal_file %s", extensions=None, mandatory=True, position=2 ), in_file=dict( argstr="-input %s", @@ -28,14 +17,8 @@ def test_Fim_inputs(): mandatory=True, position=1, ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out=dict( - argstr="-out %s", - position=4, - ), + num_threads=dict(nohash=True, usedefault=True), + out=dict(argstr="-out %s", position=4), out_file=dict( argstr="-bucket %s", extensions=None, @@ -52,11 +35,7 @@ def test_Fim_inputs(): def test_Fim_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Fim.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Fourier.py b/nipype/interfaces/afni/tests/test_auto_Fourier.py index 97764a2b9b..2604f95de5 100644 --- a/nipype/interfaces/afni/tests/test_auto_Fourier.py +++ b/nipype/interfaces/afni/tests/test_auto_Fourier.py @@ -4,32 +4,14 @@ def test_Fourier_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - highpass=dict( - argstr="-highpass %f", - mandatory=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + highpass=dict(argstr="-highpass %f", mandatory=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - lowpass=dict( - argstr="-lowpass %f", - mandatory=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + lowpass=dict(argstr="-lowpass %f", mandatory=True), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -37,9 +19,7 @@ def test_Fourier_inputs(): name_template="%s_fourier", ), outputtype=dict(), - retrend=dict( - argstr="-retrend", - ), + retrend=dict(argstr="-retrend"), ) inputs = Fourier.input_spec() @@ -49,11 +29,7 @@ def test_Fourier_inputs(): def test_Fourier_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Fourier.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_GCOR.py b/nipype/interfaces/afni/tests/test_auto_GCOR.py index 2e5f0f372d..c3521629c3 100644 --- a/nipype/interfaces/afni/tests/test_auto_GCOR.py +++ b/nipype/interfaces/afni/tests/test_auto_GCOR.py @@ -4,13 +4,8 @@ def test_GCOR_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( argstr="-input %s", copyfile=False, @@ -18,17 +13,9 @@ def test_GCOR_inputs(): mandatory=True, position=-1, ), - mask=dict( - argstr="-mask %s", - copyfile=False, - extensions=None, - ), - nfirst=dict( - argstr="-nfirst %d", - ), - no_demean=dict( - argstr="-no_demean", - ), + mask=dict(argstr="-mask %s", copyfile=False, extensions=None), + nfirst=dict(argstr="-nfirst %d"), + no_demean=dict(argstr="-no_demean"), ) inputs = GCOR.input_spec() @@ -38,9 +25,7 @@ def test_GCOR_inputs(): def test_GCOR_outputs(): - output_map = dict( - out=dict(), - ) + output_map = dict(out=dict()) outputs = GCOR.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Hist.py b/nipype/interfaces/afni/tests/test_auto_Hist.py index 2263f3632b..117b04f705 100644 --- a/nipype/interfaces/afni/tests/test_auto_Hist.py +++ b/nipype/interfaces/afni/tests/test_auto_Hist.py @@ -4,16 +4,9 @@ def test_Hist_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bin_width=dict( - argstr="-binwidth %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + bin_width=dict(argstr="-binwidth %f"), + environ=dict(nohash=True, usedefault=True), in_file=dict( argstr="-input %s", copyfile=False, @@ -21,19 +14,10 @@ def test_Hist_inputs(): mandatory=True, position=1, ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - max_value=dict( - argstr="-max %f", - ), - min_value=dict( - argstr="-min %f", - ), - nbin=dict( - argstr="-nbin %d", - ), + mask=dict(argstr="-mask %s", extensions=None), + max_value=dict(argstr="-max %f"), + min_value=dict(argstr="-min %f"), + nbin=dict(argstr="-nbin %d"), out_file=dict( argstr="-prefix %s", extensions=None, @@ -49,10 +33,7 @@ def test_Hist_inputs(): name_template="%s_hist.out", position=-1, ), - showhist=dict( - argstr="-showhist", - usedefault=True, - ), + showhist=dict(argstr="-showhist", usedefault=True), ) inputs = Hist.input_spec() @@ -62,14 +43,7 @@ def test_Hist_inputs(): def test_Hist_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - out_show=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None), out_show=dict(extensions=None)) outputs = Hist.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_LFCD.py b/nipype/interfaces/afni/tests/test_auto_LFCD.py index bd4b76baee..fff85c330c 100644 --- a/nipype/interfaces/afni/tests/test_auto_LFCD.py +++ b/nipype/interfaces/afni/tests/test_auto_LFCD.py @@ -4,34 +4,15 @@ def test_LFCD_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - autoclip=dict( - argstr="-autoclip", - ), - automask=dict( - argstr="-automask", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + autoclip=dict(argstr="-autoclip"), + automask=dict(argstr="-automask"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + mask=dict(argstr="-mask %s", extensions=None), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -39,12 +20,8 @@ def test_LFCD_inputs(): name_template="%s_afni", ), outputtype=dict(), - polort=dict( - argstr="-polort %d", - ), - thresh=dict( - argstr="-thresh %f", - ), + polort=dict(argstr="-polort %d"), + thresh=dict(argstr="-thresh %f"), ) inputs = LFCD.input_spec() @@ -54,11 +31,7 @@ def test_LFCD_inputs(): def test_LFCD_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = LFCD.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py index 2ffe29dda0..87cb3b6dbd 100644 --- a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py +++ b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py @@ -4,41 +4,14 @@ def test_LocalBistat_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - automask=dict( - argstr="-automask", - xor=["weight_file"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file1=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - in_file2=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), - mask_file=dict( - argstr="-mask %s", - extensions=None, - ), - neighborhood=dict( - argstr="-nbhd '%s(%s)'", - mandatory=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + automask=dict(argstr="-automask", xor=["weight_file"]), + environ=dict(nohash=True, usedefault=True), + in_file1=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + in_file2=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + mask_file=dict(argstr="-mask %s", extensions=None), + neighborhood=dict(argstr="-nbhd '%s(%s)'", mandatory=True), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -48,15 +21,8 @@ def test_LocalBistat_inputs(): position=0, ), outputtype=dict(), - stat=dict( - argstr="-stat %s...", - mandatory=True, - ), - weight_file=dict( - argstr="-weight %s", - extensions=None, - xor=["automask"], - ), + stat=dict(argstr="-stat %s...", mandatory=True), + weight_file=dict(argstr="-weight %s", extensions=None, xor=["automask"]), ) inputs = LocalBistat.input_spec() @@ -66,11 +32,7 @@ def test_LocalBistat_inputs(): def test_LocalBistat_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = LocalBistat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Localstat.py b/nipype/interfaces/afni/tests/test_auto_Localstat.py index 54c99b434d..1c9a102170 100644 --- a/nipype/interfaces/afni/tests/test_auto_Localstat.py +++ b/nipype/interfaces/afni/tests/test_auto_Localstat.py @@ -4,41 +4,15 @@ def test_Localstat_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - automask=dict( - argstr="-automask", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - grid_rmode=dict( - argstr="-grid_rmode %s", - requires=["reduce_restore_grid"], - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), - mask_file=dict( - argstr="-mask %s", - extensions=None, - ), - neighborhood=dict( - argstr="-nbhd '%s(%s)'", - mandatory=True, - ), - nonmask=dict( - argstr="-use_nonmask", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + automask=dict(argstr="-automask"), + environ=dict(nohash=True, usedefault=True), + grid_rmode=dict(argstr="-grid_rmode %s", requires=["reduce_restore_grid"]), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + mask_file=dict(argstr="-mask %s", extensions=None), + neighborhood=dict(argstr="-nbhd '%s(%s)'", mandatory=True), + nonmask=dict(argstr="-use_nonmask"), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -48,28 +22,18 @@ def test_Localstat_inputs(): position=0, ), outputtype=dict(), - overwrite=dict( - argstr="-overwrite", - ), - quiet=dict( - argstr="-quiet", - ), + overwrite=dict(argstr="-overwrite"), + quiet=dict(argstr="-quiet"), reduce_grid=dict( - argstr="-reduce_grid %s", - xor=["reduce_restore_grid", "reduce_max_vox"], + argstr="-reduce_grid %s", xor=["reduce_restore_grid", "reduce_max_vox"] ), reduce_max_vox=dict( - argstr="-reduce_max_vox %s", - xor=["reduce_restore_grid", "reduce_grid"], + argstr="-reduce_max_vox %s", xor=["reduce_restore_grid", "reduce_grid"] ), reduce_restore_grid=dict( - argstr="-reduce_restore_grid %s", - xor=["reduce_max_vox", "reduce_grid"], - ), - stat=dict( - argstr="-stat %s...", - mandatory=True, + argstr="-reduce_restore_grid %s", xor=["reduce_max_vox", "reduce_grid"] ), + stat=dict(argstr="-stat %s...", mandatory=True), ) inputs = Localstat.input_spec() @@ -79,11 +43,7 @@ def test_Localstat_inputs(): def test_Localstat_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Localstat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_MaskTool.py b/nipype/interfaces/afni/tests/test_auto_MaskTool.py index a0520df606..c86f534deb 100644 --- a/nipype/interfaces/afni/tests/test_auto_MaskTool.py +++ b/nipype/interfaces/afni/tests/test_auto_MaskTool.py @@ -4,49 +4,18 @@ def test_MaskTool_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - count=dict( - argstr="-count", - position=2, - ), - datum=dict( - argstr="-datum %s", - ), - dilate_inputs=dict( - argstr="-dilate_inputs %s", - ), - dilate_results=dict( - argstr="-dilate_results %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fill_dirs=dict( - argstr="-fill_dirs %s", - requires=["fill_holes"], - ), - fill_holes=dict( - argstr="-fill_holes", - ), - frac=dict( - argstr="-frac %s", - ), - in_file=dict( - argstr="-input %s", - copyfile=False, - mandatory=True, - position=-1, - ), - inter=dict( - argstr="-inter", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + count=dict(argstr="-count", position=2), + datum=dict(argstr="-datum %s"), + dilate_inputs=dict(argstr="-dilate_inputs %s"), + dilate_results=dict(argstr="-dilate_results %s"), + environ=dict(nohash=True, usedefault=True), + fill_dirs=dict(argstr="-fill_dirs %s", requires=["fill_holes"]), + fill_holes=dict(argstr="-fill_holes"), + frac=dict(argstr="-frac %s"), + in_file=dict(argstr="-input %s", copyfile=False, mandatory=True, position=-1), + inter=dict(argstr="-inter"), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -54,12 +23,8 @@ def test_MaskTool_inputs(): name_template="%s_mask", ), outputtype=dict(), - union=dict( - argstr="-union", - ), - verbose=dict( - argstr="-verb %s", - ), + union=dict(argstr="-union"), + verbose=dict(argstr="-verb %s"), ) inputs = MaskTool.input_spec() @@ -69,11 +34,7 @@ def test_MaskTool_inputs(): def test_MaskTool_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MaskTool.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Maskave.py b/nipype/interfaces/afni/tests/test_auto_Maskave.py index ce7a07c95e..20869c822d 100644 --- a/nipype/interfaces/afni/tests/test_auto_Maskave.py +++ b/nipype/interfaces/afni/tests/test_auto_Maskave.py @@ -4,29 +4,13 @@ def test_Maskave_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-2, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - position=1, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2 ), + mask=dict(argstr="-mask %s", extensions=None, position=1), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="> %s", extensions=None, @@ -36,10 +20,7 @@ def test_Maskave_inputs(): position=-1, ), outputtype=dict(), - quiet=dict( - argstr="-quiet", - position=2, - ), + quiet=dict(argstr="-quiet", position=2), ) inputs = Maskave.input_spec() @@ -49,11 +30,7 @@ def test_Maskave_inputs(): def test_Maskave_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Maskave.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Means.py b/nipype/interfaces/afni/tests/test_auto_Means.py index 3aa3ada375..d3b30d8d7b 100644 --- a/nipype/interfaces/afni/tests/test_auto_Means.py +++ b/nipype/interfaces/afni/tests/test_auto_Means.py @@ -4,43 +4,16 @@ def test_Means_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - count=dict( - argstr="-count", - ), - datum=dict( - argstr="-datum %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file_a=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - in_file_b=dict( - argstr="%s", - extensions=None, - position=-1, - ), - mask_inter=dict( - argstr="-mask_inter", - ), - mask_union=dict( - argstr="-mask_union", - ), - non_zero=dict( - argstr="-non_zero", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + count=dict(argstr="-count"), + datum=dict(argstr="-datum %s"), + environ=dict(nohash=True, usedefault=True), + in_file_a=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + in_file_b=dict(argstr="%s", extensions=None, position=-1), + mask_inter=dict(argstr="-mask_inter"), + mask_union=dict(argstr="-mask_union"), + non_zero=dict(argstr="-non_zero"), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -48,18 +21,10 @@ def test_Means_inputs(): name_template="%s_mean", ), outputtype=dict(), - scale=dict( - argstr="-%sscale", - ), - sqr=dict( - argstr="-sqr", - ), - std_dev=dict( - argstr="-stdev", - ), - summ=dict( - argstr="-sum", - ), + scale=dict(argstr="-%sscale"), + sqr=dict(argstr="-sqr"), + std_dev=dict(argstr="-stdev"), + summ=dict(argstr="-sum"), ) inputs = Means.input_spec() @@ -69,11 +34,7 @@ def test_Means_inputs(): def test_Means_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Means.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Merge.py b/nipype/interfaces/afni/tests/test_auto_Merge.py index ac974184ea..b1e228da45 100644 --- a/nipype/interfaces/afni/tests/test_auto_Merge.py +++ b/nipype/interfaces/afni/tests/test_auto_Merge.py @@ -4,30 +4,12 @@ def test_Merge_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - blurfwhm=dict( - argstr="-1blur_fwhm %d", - units="mm", - ), - doall=dict( - argstr="-doall", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr="%s", - copyfile=False, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + blurfwhm=dict(argstr="-1blur_fwhm %d", units="mm"), + doall=dict(argstr="-doall"), + environ=dict(nohash=True, usedefault=True), + in_files=dict(argstr="%s", copyfile=False, mandatory=True, position=-1), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -44,11 +26,7 @@ def test_Merge_inputs(): def test_Merge_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NetCorr.py b/nipype/interfaces/afni/tests/test_auto_NetCorr.py index e613dc13eb..ff6f2cd7c2 100644 --- a/nipype/interfaces/afni/tests/test_auto_NetCorr.py +++ b/nipype/interfaces/afni/tests/test_auto_NetCorr.py @@ -4,40 +4,15 @@ def test_NetCorr_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fish_z=dict( - argstr="-fish_z", - ), - ignore_LT=dict( - argstr="-ignore_LT", - ), - in_file=dict( - argstr="-inset %s", - extensions=None, - mandatory=True, - ), - in_rois=dict( - argstr="-in_rois %s", - extensions=None, - mandatory=True, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - nifti=dict( - argstr="-nifti", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fish_z=dict(argstr="-fish_z"), + ignore_LT=dict(argstr="-ignore_LT"), + in_file=dict(argstr="-inset %s", extensions=None, mandatory=True), + in_rois=dict(argstr="-in_rois %s", extensions=None, mandatory=True), + mask=dict(argstr="-mask %s", extensions=None), + nifti=dict(argstr="-nifti"), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -45,38 +20,17 @@ def test_NetCorr_inputs(): name_template="%s_netcorr", position=1, ), - output_mask_nonnull=dict( - argstr="-output_mask_nonnull", - ), + output_mask_nonnull=dict(argstr="-output_mask_nonnull"), outputtype=dict(), - part_corr=dict( - argstr="-part_corr", - ), - push_thru_many_zeros=dict( - argstr="-push_thru_many_zeros", - ), - ts_indiv=dict( - argstr="-ts_indiv", - ), - ts_label=dict( - argstr="-ts_label", - ), - ts_out=dict( - argstr="-ts_out", - ), - ts_wb_Z=dict( - argstr="-ts_wb_Z", - ), - ts_wb_corr=dict( - argstr="-ts_wb_corr", - ), - ts_wb_strlabel=dict( - argstr="-ts_wb_strlabel", - ), - weight_ts=dict( - argstr="-weight_ts %s", - extensions=None, - ), + part_corr=dict(argstr="-part_corr"), + push_thru_many_zeros=dict(argstr="-push_thru_many_zeros"), + ts_indiv=dict(argstr="-ts_indiv"), + ts_label=dict(argstr="-ts_label"), + ts_out=dict(argstr="-ts_out"), + ts_wb_Z=dict(argstr="-ts_wb_Z"), + ts_wb_corr=dict(argstr="-ts_wb_corr"), + ts_wb_strlabel=dict(argstr="-ts_wb_strlabel"), + weight_ts=dict(argstr="-weight_ts %s", extensions=None), ) inputs = NetCorr.input_spec() @@ -86,12 +40,7 @@ def test_NetCorr_inputs(): def test_NetCorr_outputs(): - output_map = dict( - out_corr_maps=dict(), - out_corr_matrix=dict( - extensions=None, - ), - ) + output_map = dict(out_corr_maps=dict(), out_corr_matrix=dict(extensions=None)) outputs = NetCorr.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Notes.py b/nipype/interfaces/afni/tests/test_auto_Notes.py index c83a70f0f2..ba91f3410f 100644 --- a/nipype/interfaces/afni/tests/test_auto_Notes.py +++ b/nipype/interfaces/afni/tests/test_auto_Notes.py @@ -4,46 +4,19 @@ def test_Notes_inputs(): input_map = dict( - add=dict( - argstr='-a "%s"', - ), - add_history=dict( - argstr='-h "%s"', - xor=["rep_history"], - ), - args=dict( - argstr="%s", - ), - delete=dict( - argstr="-d %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + add=dict(argstr='-a "%s"'), + add_history=dict(argstr='-h "%s"', xor=["rep_history"]), + args=dict(argstr="%s"), + delete=dict(argstr="-d %d"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr="%s", - extensions=None, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + num_threads=dict(nohash=True, usedefault=True), + out_file=dict(argstr="%s", extensions=None), outputtype=dict(), - rep_history=dict( - argstr='-HH "%s"', - xor=["add_history"], - ), - ses=dict( - argstr="-ses", - ), + rep_history=dict(argstr='-HH "%s"', xor=["add_history"]), + ses=dict(argstr="-ses"), ) inputs = Notes.input_spec() @@ -53,11 +26,7 @@ def test_Notes_inputs(): def test_Notes_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Notes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py b/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py index 85fb2d3495..d58c26b5f0 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py @@ -4,20 +4,10 @@ def test_NwarpAdjust_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr="-source %s", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_files=dict(argstr="-source %s"), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -27,10 +17,7 @@ def test_NwarpAdjust_inputs(): requires=["in_files"], ), outputtype=dict(), - warps=dict( - argstr="-nwarp %s", - mandatory=True, - ), + warps=dict(argstr="-nwarp %s", mandatory=True), ) inputs = NwarpAdjust.input_spec() @@ -40,11 +27,7 @@ def test_NwarpAdjust_inputs(): def test_NwarpAdjust_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = NwarpAdjust.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py index c9ebd2853e..dd2cb1cdd8 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py @@ -4,52 +4,23 @@ def test_NwarpApply_inputs(): input_map = dict( - ainterp=dict( - argstr="-ainterp %s", - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-source %s", - mandatory=True, - ), - interp=dict( - argstr="-interp %s", - usedefault=True, - ), - inv_warp=dict( - argstr="-iwarp", - ), - master=dict( - argstr="-master %s", - extensions=None, - ), + ainterp=dict(argstr="-ainterp %s"), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-source %s", mandatory=True), + interp=dict(argstr="-interp %s", usedefault=True), + inv_warp=dict(argstr="-iwarp"), + master=dict(argstr="-master %s", extensions=None), out_file=dict( argstr="-prefix %s", extensions=None, name_source="in_file", name_template="%s_Nwarp", ), - quiet=dict( - argstr="-quiet", - xor=["verb"], - ), - short=dict( - argstr="-short", - ), - verb=dict( - argstr="-verb", - xor=["quiet"], - ), - warp=dict( - argstr="-nwarp %s", - mandatory=True, - ), + quiet=dict(argstr="-quiet", xor=["verb"]), + short=dict(argstr="-short"), + verb=dict(argstr="-verb", xor=["quiet"]), + warp=dict(argstr="-nwarp %s", mandatory=True), ) inputs = NwarpApply.input_spec() @@ -59,11 +30,7 @@ def test_NwarpApply_inputs(): def test_NwarpApply_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = NwarpApply.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpCat.py b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py index b89aade9b0..ea0d386d03 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpCat.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py @@ -4,32 +4,13 @@ def test_NwarpCat_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - expad=dict( - argstr="-expad %d", - ), - in_files=dict( - argstr="%s", - mandatory=True, - position=-1, - ), - interp=dict( - argstr="-interp %s", - usedefault=True, - ), - inv_warp=dict( - argstr="-iwarp", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + expad=dict(argstr="-expad %d"), + in_files=dict(argstr="%s", mandatory=True, position=-1), + interp=dict(argstr="-interp %s", usedefault=True), + inv_warp=dict(argstr="-iwarp"), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -37,12 +18,8 @@ def test_NwarpCat_inputs(): name_template="%s_NwarpCat", ), outputtype=dict(), - space=dict( - argstr="-space %s", - ), - verb=dict( - argstr="-verb", - ), + space=dict(argstr="-space %s"), + verb=dict(argstr="-verb"), ) inputs = NwarpCat.input_spec() @@ -52,11 +29,7 @@ def test_NwarpCat_inputs(): def test_NwarpCat_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = NwarpCat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py index bb47517e27..e81709db34 100644 --- a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py +++ b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py @@ -4,60 +4,29 @@ def test_OneDToolPy_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - censor_motion=dict( - argstr="-censor_motion %f %s", - ), - censor_prev_TR=dict( - argstr="-censor_prev_TR", - ), - demean=dict( - argstr="-demean", - ), - derivative=dict( - argstr="-derivative", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-infile %s", - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + censor_motion=dict(argstr="-censor_motion %f %s"), + censor_prev_TR=dict(argstr="-censor_prev_TR"), + demean=dict(argstr="-demean"), + derivative=dict(argstr="-derivative"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-infile %s", extensions=None, mandatory=True), out_file=dict( - argstr="-write %s", - extensions=None, - xor=["show_cormat_warnings"], + argstr="-write %s", extensions=None, xor=["show_cormat_warnings"] ), outputtype=dict(), - py27_path=dict( - usedefault=True, - ), - set_nruns=dict( - argstr="-set_nruns %d", - ), - show_censor_count=dict( - argstr="-show_censor_count", - ), + py27_path=dict(usedefault=True), + set_nruns=dict(argstr="-set_nruns %d"), + show_censor_count=dict(argstr="-show_censor_count"), show_cormat_warnings=dict( argstr="-show_cormat_warnings |& tee %s", extensions=None, position=-1, xor=["out_file"], ), - show_indices_interest=dict( - argstr="-show_indices_interest", - ), - show_trs_run=dict( - argstr="-show_trs_run %d", - ), - show_trs_uncensored=dict( - argstr="-show_trs_uncensored %s", - ), + show_indices_interest=dict(argstr="-show_indices_interest"), + show_trs_run=dict(argstr="-show_trs_run %d"), + show_trs_uncensored=dict(argstr="-show_trs_uncensored %s"), ) inputs = OneDToolPy.input_spec() @@ -67,11 +36,7 @@ def test_OneDToolPy_inputs(): def test_OneDToolPy_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = OneDToolPy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_OutlierCount.py b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py index 511c1ca8f2..525c94f7bc 100644 --- a/nipype/interfaces/afni/tests/test_auto_OutlierCount.py +++ b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py @@ -4,46 +4,15 @@ def test_OutlierCount_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - autoclip=dict( - argstr="-autoclip", - usedefault=True, - xor=["mask"], - ), - automask=dict( - argstr="-automask", - usedefault=True, - xor=["mask"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fraction=dict( - argstr="-fraction", - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - interval=dict( - argstr="-range", - usedefault=True, - ), - legendre=dict( - argstr="-legendre", - usedefault=True, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - xor=["autoclip", "automask"], - ), + args=dict(argstr="%s"), + autoclip=dict(argstr="-autoclip", usedefault=True, xor=["mask"]), + automask=dict(argstr="-automask", usedefault=True, xor=["mask"]), + environ=dict(nohash=True, usedefault=True), + fraction=dict(argstr="-fraction", usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + interval=dict(argstr="-range", usedefault=True), + legendre=dict(argstr="-legendre", usedefault=True), + mask=dict(argstr="-mask %s", extensions=None, xor=["autoclip", "automask"]), out_file=dict( extensions=None, keep_extension=False, @@ -58,16 +27,9 @@ def test_OutlierCount_inputs(): name_template="%s_outliers", output_name="out_outliers", ), - polort=dict( - argstr="-polort %d", - ), - qthr=dict( - argstr="-qthr %.5f", - usedefault=True, - ), - save_outliers=dict( - usedefault=True, - ), + polort=dict(argstr="-polort %d"), + qthr=dict(argstr="-qthr %.5f", usedefault=True), + save_outliers=dict(usedefault=True), ) inputs = OutlierCount.input_spec() @@ -78,12 +40,7 @@ def test_OutlierCount_inputs(): def test_OutlierCount_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), - out_outliers=dict( - extensions=None, - ), + out_file=dict(extensions=None), out_outliers=dict(extensions=None) ) outputs = OutlierCount.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_QualityIndex.py b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py index c759be87a4..899c9bb292 100644 --- a/nipype/interfaces/afni/tests/test_auto_QualityIndex.py +++ b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py @@ -4,41 +4,14 @@ def test_QualityIndex_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - autoclip=dict( - argstr="-autoclip", - usedefault=True, - xor=["mask"], - ), - automask=dict( - argstr="-automask", - usedefault=True, - xor=["mask"], - ), - clip=dict( - argstr="-clip %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - interval=dict( - argstr="-range", - usedefault=True, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - xor=["autoclip", "automask"], - ), + args=dict(argstr="%s"), + autoclip=dict(argstr="-autoclip", usedefault=True, xor=["mask"]), + automask=dict(argstr="-automask", usedefault=True, xor=["mask"]), + clip=dict(argstr="-clip %f"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + interval=dict(argstr="-range", usedefault=True), + mask=dict(argstr="-mask %s", extensions=None, xor=["autoclip", "automask"]), out_file=dict( argstr="> %s", extensions=None, @@ -47,14 +20,8 @@ def test_QualityIndex_inputs(): name_template="%s_tqual", position=-1, ), - quadrant=dict( - argstr="-quadrant", - usedefault=True, - ), - spearman=dict( - argstr="-spearman", - usedefault=True, - ), + quadrant=dict(argstr="-quadrant", usedefault=True), + spearman=dict(argstr="-spearman", usedefault=True), ) inputs = QualityIndex.input_spec() @@ -64,11 +31,7 @@ def test_QualityIndex_inputs(): def test_QualityIndex_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = QualityIndex.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Qwarp.py b/nipype/interfaces/afni/tests/test_auto_Qwarp.py index 181f7217dd..619c2f5edd 100644 --- a/nipype/interfaces/afni/tests/test_auto_Qwarp.py +++ b/nipype/interfaces/afni/tests/test_auto_Qwarp.py @@ -4,199 +4,74 @@ def test_Qwarp_inputs(): input_map = dict( - Qfinal=dict( - argstr="-Qfinal", - ), - Qonly=dict( - argstr="-Qonly", - ), - allineate=dict( - argstr="-allineate", - ), - allineate_opts=dict( - argstr="-allineate_opts %s", - requires=["allineate"], - ), - allsave=dict( - argstr="-allsave", - xor=["nopadWARP", "duplo", "plusminus"], - ), - args=dict( - argstr="%s", - ), - ballopt=dict( - argstr="-ballopt", - xor=["workhard", "boxopt"], - ), + Qfinal=dict(argstr="-Qfinal"), + Qonly=dict(argstr="-Qonly"), + allineate=dict(argstr="-allineate"), + allineate_opts=dict(argstr="-allineate_opts %s", requires=["allineate"]), + allsave=dict(argstr="-allsave", xor=["nopadWARP", "duplo", "plusminus"]), + args=dict(argstr="%s"), + ballopt=dict(argstr="-ballopt", xor=["workhard", "boxopt"]), base_file=dict( - argstr="-base %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - baxopt=dict( - argstr="-boxopt", - xor=["workhard", "ballopt"], - ), - blur=dict( - argstr="-blur %s", + argstr="-base %s", copyfile=False, extensions=None, mandatory=True ), + baxopt=dict(argstr="-boxopt", xor=["workhard", "ballopt"]), + blur=dict(argstr="-blur %s"), duplo=dict( argstr="-duplo", xor=["gridlist", "maxlev", "inilev", "iniwarp", "plusminus", "allsave"], ), - emask=dict( - argstr="-emask %s", - copyfile=False, - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - expad=dict( - argstr="-expad %d", - xor=["nopadWARP"], - ), + emask=dict(argstr="-emask %s", copyfile=False, extensions=None), + environ=dict(nohash=True, usedefault=True), + expad=dict(argstr="-expad %d", xor=["nopadWARP"]), gridlist=dict( argstr="-gridlist %s", copyfile=False, extensions=None, xor=["duplo", "plusminus"], ), - hel=dict( - argstr="-hel", - xor=["nmi", "mi", "lpc", "lpa", "pear"], - ), + hel=dict(argstr="-hel", xor=["nmi", "mi", "lpc", "lpa", "pear"]), in_file=dict( - argstr="-source %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - inilev=dict( - argstr="-inilev %d", - xor=["duplo"], - ), - iniwarp=dict( - argstr="-iniwarp %s", - xor=["duplo"], - ), - iwarp=dict( - argstr="-iwarp", - xor=["plusminus"], - ), - lpa=dict( - argstr="-lpa", - xor=["nmi", "mi", "lpc", "hel", "pear"], - ), - lpc=dict( - argstr="-lpc", - position=-2, - xor=["nmi", "mi", "hel", "lpa", "pear"], - ), - maxlev=dict( - argstr="-maxlev %d", - position=-1, - xor=["duplo"], - ), - mi=dict( - argstr="-mi", - xor=["mi", "hel", "lpc", "lpa", "pear"], - ), - minpatch=dict( - argstr="-minpatch %d", - ), - nmi=dict( - argstr="-nmi", - xor=["nmi", "hel", "lpc", "lpa", "pear"], - ), - noXdis=dict( - argstr="-noXdis", - ), - noYdis=dict( - argstr="-noYdis", - ), - noZdis=dict( - argstr="-noZdis", - ), - noneg=dict( - argstr="-noneg", - ), - nopad=dict( - argstr="-nopad", - ), - nopadWARP=dict( - argstr="-nopadWARP", - xor=["allsave", "expad"], - ), - nopenalty=dict( - argstr="-nopenalty", - ), - nowarp=dict( - argstr="-nowarp", - ), - noweight=dict( - argstr="-noweight", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + argstr="-source %s", copyfile=False, extensions=None, mandatory=True + ), + inilev=dict(argstr="-inilev %d", xor=["duplo"]), + iniwarp=dict(argstr="-iniwarp %s", xor=["duplo"]), + iwarp=dict(argstr="-iwarp", xor=["plusminus"]), + lpa=dict(argstr="-lpa", xor=["nmi", "mi", "lpc", "hel", "pear"]), + lpc=dict(argstr="-lpc", position=-2, xor=["nmi", "mi", "hel", "lpa", "pear"]), + maxlev=dict(argstr="-maxlev %d", position=-1, xor=["duplo"]), + mi=dict(argstr="-mi", xor=["mi", "hel", "lpc", "lpa", "pear"]), + minpatch=dict(argstr="-minpatch %d"), + nmi=dict(argstr="-nmi", xor=["nmi", "hel", "lpc", "lpa", "pear"]), + noXdis=dict(argstr="-noXdis"), + noYdis=dict(argstr="-noYdis"), + noZdis=dict(argstr="-noZdis"), + noneg=dict(argstr="-noneg"), + nopad=dict(argstr="-nopad"), + nopadWARP=dict(argstr="-nopadWARP", xor=["allsave", "expad"]), + nopenalty=dict(argstr="-nopenalty"), + nowarp=dict(argstr="-nowarp"), + noweight=dict(argstr="-noweight"), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, name_source=["in_file"], name_template="ppp_%s", ), - out_weight_file=dict( - argstr="-wtprefix %s", - extensions=None, - ), + out_weight_file=dict(argstr="-wtprefix %s", extensions=None), outputtype=dict(), - overwrite=dict( - argstr="-overwrite", - ), - pblur=dict( - argstr="-pblur %s", - ), - pear=dict( - argstr="-pear", - ), - penfac=dict( - argstr="-penfac %f", - ), - plusminus=dict( - argstr="-plusminus", - xor=["duplo", "allsave", "iwarp"], - ), - quiet=dict( - argstr="-quiet", - xor=["verb"], - ), - resample=dict( - argstr="-resample", - ), - verb=dict( - argstr="-verb", - xor=["quiet"], - ), - wball=dict( - argstr="-wball %s", - xor=["wmask"], - ), - weight=dict( - argstr="-weight %s", - extensions=None, - ), - wmask=dict( - argstr="-wpass %s %f", - xor=["wball"], - ), - workhard=dict( - argstr="-workhard", - xor=["boxopt", "ballopt"], - ), + overwrite=dict(argstr="-overwrite"), + pblur=dict(argstr="-pblur %s"), + pear=dict(argstr="-pear"), + penfac=dict(argstr="-penfac %f"), + plusminus=dict(argstr="-plusminus", xor=["duplo", "allsave", "iwarp"]), + quiet=dict(argstr="-quiet", xor=["verb"]), + resample=dict(argstr="-resample"), + verb=dict(argstr="-verb", xor=["quiet"]), + wball=dict(argstr="-wball %s", xor=["wmask"]), + weight=dict(argstr="-weight %s", extensions=None), + wmask=dict(argstr="-wpass %s %f", xor=["wball"]), + workhard=dict(argstr="-workhard", xor=["boxopt", "ballopt"]), ) inputs = Qwarp.input_spec() @@ -207,21 +82,11 @@ def test_Qwarp_inputs(): def test_Qwarp_outputs(): output_map = dict( - base_warp=dict( - extensions=None, - ), - source_warp=dict( - extensions=None, - ), - warped_base=dict( - extensions=None, - ), - warped_source=dict( - extensions=None, - ), - weights=dict( - extensions=None, - ), + base_warp=dict(extensions=None), + source_warp=dict(extensions=None), + warped_base=dict(extensions=None), + warped_source=dict(extensions=None), + weights=dict(extensions=None), ) outputs = Qwarp.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py index 4f386ab63b..9ef9bf1b4b 100644 --- a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py +++ b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py @@ -4,181 +4,71 @@ def test_QwarpPlusMinus_inputs(): input_map = dict( - Qfinal=dict( - argstr="-Qfinal", - ), - Qonly=dict( - argstr="-Qonly", - ), - allineate=dict( - argstr="-allineate", - ), - allineate_opts=dict( - argstr="-allineate_opts %s", - requires=["allineate"], - ), - allsave=dict( - argstr="-allsave", - xor=["nopadWARP", "duplo", "plusminus"], - ), - args=dict( - argstr="%s", - ), - ballopt=dict( - argstr="-ballopt", - xor=["workhard", "boxopt"], - ), + Qfinal=dict(argstr="-Qfinal"), + Qonly=dict(argstr="-Qonly"), + allineate=dict(argstr="-allineate"), + allineate_opts=dict(argstr="-allineate_opts %s", requires=["allineate"]), + allsave=dict(argstr="-allsave", xor=["nopadWARP", "duplo", "plusminus"]), + args=dict(argstr="%s"), + ballopt=dict(argstr="-ballopt", xor=["workhard", "boxopt"]), base_file=dict( - argstr="-base %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - baxopt=dict( - argstr="-boxopt", - xor=["workhard", "ballopt"], - ), - blur=dict( - argstr="-blur %s", + argstr="-base %s", copyfile=False, extensions=None, mandatory=True ), + baxopt=dict(argstr="-boxopt", xor=["workhard", "ballopt"]), + blur=dict(argstr="-blur %s"), duplo=dict( argstr="-duplo", xor=["gridlist", "maxlev", "inilev", "iniwarp", "plusminus", "allsave"], ), - emask=dict( - argstr="-emask %s", - copyfile=False, - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - expad=dict( - argstr="-expad %d", - xor=["nopadWARP"], - ), + emask=dict(argstr="-emask %s", copyfile=False, extensions=None), + environ=dict(nohash=True, usedefault=True), + expad=dict(argstr="-expad %d", xor=["nopadWARP"]), gridlist=dict( argstr="-gridlist %s", copyfile=False, extensions=None, xor=["duplo", "plusminus"], ), - hel=dict( - argstr="-hel", - xor=["nmi", "mi", "lpc", "lpa", "pear"], - ), + hel=dict(argstr="-hel", xor=["nmi", "mi", "lpc", "lpa", "pear"]), in_file=dict( - argstr="-source %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - inilev=dict( - argstr="-inilev %d", - xor=["duplo"], - ), - iniwarp=dict( - argstr="-iniwarp %s", - xor=["duplo"], - ), - iwarp=dict( - argstr="-iwarp", - xor=["plusminus"], - ), - lpa=dict( - argstr="-lpa", - xor=["nmi", "mi", "lpc", "hel", "pear"], - ), - lpc=dict( - argstr="-lpc", - position=-2, - xor=["nmi", "mi", "hel", "lpa", "pear"], - ), - maxlev=dict( - argstr="-maxlev %d", - position=-1, - xor=["duplo"], - ), - mi=dict( - argstr="-mi", - xor=["mi", "hel", "lpc", "lpa", "pear"], - ), - minpatch=dict( - argstr="-minpatch %d", - ), - nmi=dict( - argstr="-nmi", - xor=["nmi", "hel", "lpc", "lpa", "pear"], - ), - noXdis=dict( - argstr="-noXdis", - ), - noYdis=dict( - argstr="-noYdis", - ), - noZdis=dict( - argstr="-noZdis", - ), - noneg=dict( - argstr="-noneg", - ), - nopad=dict( - argstr="-nopad", - ), - nopadWARP=dict( - argstr="-nopadWARP", - xor=["allsave", "expad"], - ), - nopenalty=dict( - argstr="-nopenalty", - ), - nowarp=dict( - argstr="-nowarp", - ), - noweight=dict( - argstr="-noweight", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + argstr="-source %s", copyfile=False, extensions=None, mandatory=True + ), + inilev=dict(argstr="-inilev %d", xor=["duplo"]), + iniwarp=dict(argstr="-iniwarp %s", xor=["duplo"]), + iwarp=dict(argstr="-iwarp", xor=["plusminus"]), + lpa=dict(argstr="-lpa", xor=["nmi", "mi", "lpc", "hel", "pear"]), + lpc=dict(argstr="-lpc", position=-2, xor=["nmi", "mi", "hel", "lpa", "pear"]), + maxlev=dict(argstr="-maxlev %d", position=-1, xor=["duplo"]), + mi=dict(argstr="-mi", xor=["mi", "hel", "lpc", "lpa", "pear"]), + minpatch=dict(argstr="-minpatch %d"), + nmi=dict(argstr="-nmi", xor=["nmi", "hel", "lpc", "lpa", "pear"]), + noXdis=dict(argstr="-noXdis"), + noYdis=dict(argstr="-noYdis"), + noZdis=dict(argstr="-noZdis"), + noneg=dict(argstr="-noneg"), + nopad=dict(argstr="-nopad"), + nopadWARP=dict(argstr="-nopadWARP", xor=["allsave", "expad"]), + nopenalty=dict(argstr="-nopenalty"), + nowarp=dict(argstr="-nowarp"), + noweight=dict(argstr="-noweight"), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( - argstr="-prefix %s", - extensions=None, - position=0, - usedefault=True, - ), - out_weight_file=dict( - argstr="-wtprefix %s", - extensions=None, + argstr="-prefix %s", extensions=None, position=0, usedefault=True ), + out_weight_file=dict(argstr="-wtprefix %s", extensions=None), outputtype=dict(), - overwrite=dict( - argstr="-overwrite", - ), - pblur=dict( - argstr="-pblur %s", - ), - pear=dict( - argstr="-pear", - ), - penfac=dict( - argstr="-penfac %f", - ), + overwrite=dict(argstr="-overwrite"), + pblur=dict(argstr="-pblur %s"), + pear=dict(argstr="-pear"), + penfac=dict(argstr="-penfac %f"), plusminus=dict( argstr="-plusminus", position=1, usedefault=True, xor=["duplo", "allsave", "iwarp"], ), - quiet=dict( - argstr="-quiet", - xor=["verb"], - ), - resample=dict( - argstr="-resample", - ), + quiet=dict(argstr="-quiet", xor=["verb"]), + resample=dict(argstr="-resample"), source_file=dict( argstr="-source %s", copyfile=False, @@ -186,26 +76,11 @@ def test_QwarpPlusMinus_inputs(): extensions=None, new_name="in_file", ), - verb=dict( - argstr="-verb", - xor=["quiet"], - ), - wball=dict( - argstr="-wball %s", - xor=["wmask"], - ), - weight=dict( - argstr="-weight %s", - extensions=None, - ), - wmask=dict( - argstr="-wpass %s %f", - xor=["wball"], - ), - workhard=dict( - argstr="-workhard", - xor=["boxopt", "ballopt"], - ), + verb=dict(argstr="-verb", xor=["quiet"]), + wball=dict(argstr="-wball %s", xor=["wmask"]), + weight=dict(argstr="-weight %s", extensions=None), + wmask=dict(argstr="-wpass %s %f", xor=["wball"]), + workhard=dict(argstr="-workhard", xor=["boxopt", "ballopt"]), ) inputs = QwarpPlusMinus.input_spec() @@ -216,21 +91,11 @@ def test_QwarpPlusMinus_inputs(): def test_QwarpPlusMinus_outputs(): output_map = dict( - base_warp=dict( - extensions=None, - ), - source_warp=dict( - extensions=None, - ), - warped_base=dict( - extensions=None, - ), - warped_source=dict( - extensions=None, - ), - weights=dict( - extensions=None, - ), + base_warp=dict(extensions=None), + source_warp=dict(extensions=None), + warped_base=dict(extensions=None), + warped_source=dict(extensions=None), + weights=dict(extensions=None), ) outputs = QwarpPlusMinus.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_ROIStats.py b/nipype/interfaces/afni/tests/test_auto_ROIStats.py index 36e7546990..c2bea7fbbc 100644 --- a/nipype/interfaces/afni/tests/test_auto_ROIStats.py +++ b/nipype/interfaces/afni/tests/test_auto_ROIStats.py @@ -4,30 +4,12 @@ def test_ROIStats_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debug=dict( - argstr="-debug", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - format1D=dict( - argstr="-1Dformat", - xor=["format1DR"], - ), - format1DR=dict( - argstr="-1DRformat", - xor=["format1D"], - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s"), + debug=dict(argstr="-debug"), + environ=dict(nohash=True, usedefault=True), + format1D=dict(argstr="-1Dformat", xor=["format1DR"]), + format1DR=dict(argstr="-1DRformat", xor=["format1D"]), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), mask=dict( argstr="-mask %s", deprecated="1.1.4", @@ -35,22 +17,11 @@ def test_ROIStats_inputs(): new_name="mask_file", position=3, ), - mask_f2short=dict( - argstr="-mask_f2short", - ), - mask_file=dict( - argstr="-mask %s", - extensions=None, - ), - nobriklab=dict( - argstr="-nobriklab", - ), - nomeanout=dict( - argstr="-nomeanout", - ), - num_roi=dict( - argstr="-numroi %s", - ), + mask_f2short=dict(argstr="-mask_f2short"), + mask_file=dict(argstr="-mask %s", extensions=None), + nobriklab=dict(argstr="-nobriklab"), + nomeanout=dict(argstr="-nomeanout"), + num_roi=dict(argstr="-numroi %s"), out_file=dict( argstr="> %s", extensions=None, @@ -59,20 +30,10 @@ def test_ROIStats_inputs(): name_template="%s_roistat.1D", position=-1, ), - quiet=dict( - argstr="-quiet", - ), - roisel=dict( - argstr="-roisel %s", - extensions=None, - ), - stat=dict( - argstr="%s...", - ), - zerofill=dict( - argstr="-zerofill %s", - requires=["num_roi"], - ), + quiet=dict(argstr="-quiet"), + roisel=dict(argstr="-roisel %s", extensions=None), + stat=dict(argstr="%s..."), + zerofill=dict(argstr="-zerofill %s", requires=["num_roi"]), ) inputs = ROIStats.input_spec() @@ -82,11 +43,7 @@ def test_ROIStats_inputs(): def test_ROIStats_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ROIStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ReHo.py b/nipype/interfaces/afni/tests/test_auto_ReHo.py index f9a1993ce1..695770ca1b 100644 --- a/nipype/interfaces/afni/tests/test_auto_ReHo.py +++ b/nipype/interfaces/afni/tests/test_auto_ReHo.py @@ -4,38 +4,16 @@ def test_ReHo_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - chi_sq=dict( - argstr="-chi_sq", - ), + args=dict(argstr="%s"), + chi_sq=dict(argstr="-chi_sq"), ellipsoid=dict( - argstr="-neigh_X %s -neigh_Y %s -neigh_Z %s", - xor=["sphere", "neighborhood"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-inset %s", - extensions=None, - mandatory=True, - position=1, - ), - label_set=dict( - argstr="-in_rois %s", - extensions=None, - ), - mask_file=dict( - argstr="-mask %s", - extensions=None, - ), - neighborhood=dict( - argstr="-nneigh %s", - xor=["sphere", "ellipsoid"], + argstr="-neigh_X %s -neigh_Y %s -neigh_Z %s", xor=["sphere", "neighborhood"] ), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-inset %s", extensions=None, mandatory=True, position=1), + label_set=dict(argstr="-in_rois %s", extensions=None), + mask_file=dict(argstr="-mask %s", extensions=None), + neighborhood=dict(argstr="-nneigh %s", xor=["sphere", "ellipsoid"]), out_file=dict( argstr="-prefix %s", extensions=None, @@ -44,13 +22,8 @@ def test_ReHo_inputs(): name_template="%s_reho", position=0, ), - overwrite=dict( - argstr="-overwrite", - ), - sphere=dict( - argstr="-neigh_RAD %s", - xor=["neighborhood", "ellipsoid"], - ), + overwrite=dict(argstr="-overwrite"), + sphere=dict(argstr="-neigh_RAD %s", xor=["neighborhood", "ellipsoid"]), ) inputs = ReHo.input_spec() @@ -60,14 +33,7 @@ def test_ReHo_inputs(): def test_ReHo_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - out_vals=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None), out_vals=dict(extensions=None)) outputs = ReHo.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Refit.py b/nipype/interfaces/afni/tests/test_auto_Refit.py index 205f7e0190..d04433e85e 100644 --- a/nipype/interfaces/afni/tests/test_auto_Refit.py +++ b/nipype/interfaces/afni/tests/test_auto_Refit.py @@ -4,69 +4,27 @@ def test_Refit_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - atrcopy=dict( - argstr="-atrcopy %s %s", - ), - atrfloat=dict( - argstr="-atrfloat %s %s", - ), - atrint=dict( - argstr="-atrint %s %s", - ), - atrstring=dict( - argstr="-atrstring %s %s", - ), - deoblique=dict( - argstr="-deoblique", - ), - duporigin_file=dict( - argstr="-duporigin %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + atrcopy=dict(argstr="-atrcopy %s %s"), + atrfloat=dict(argstr="-atrfloat %s %s"), + atrint=dict(argstr="-atrint %s %s"), + atrstring=dict(argstr="-atrstring %s %s"), + deoblique=dict(argstr="-deoblique"), + duporigin_file=dict(argstr="-duporigin %s", extensions=None), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=True, - extensions=None, - mandatory=True, - position=-1, - ), - nosaveatr=dict( - argstr="-nosaveatr", - ), - saveatr=dict( - argstr="-saveatr", - ), - space=dict( - argstr="-space %s", - ), - xdel=dict( - argstr="-xdel %f", - ), - xorigin=dict( - argstr="-xorigin %s", - ), - xyzscale=dict( - argstr="-xyzscale %f", - ), - ydel=dict( - argstr="-ydel %f", - ), - yorigin=dict( - argstr="-yorigin %s", - ), - zdel=dict( - argstr="-zdel %f", - ), - zorigin=dict( - argstr="-zorigin %s", - ), + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-1 + ), + nosaveatr=dict(argstr="-nosaveatr"), + saveatr=dict(argstr="-saveatr"), + space=dict(argstr="-space %s"), + xdel=dict(argstr="-xdel %f"), + xorigin=dict(argstr="-xorigin %s"), + xyzscale=dict(argstr="-xyzscale %f"), + ydel=dict(argstr="-ydel %f"), + yorigin=dict(argstr="-yorigin %s"), + zdel=dict(argstr="-zdel %f"), + zorigin=dict(argstr="-zorigin %s"), ) inputs = Refit.input_spec() @@ -76,11 +34,7 @@ def test_Refit_inputs(): def test_Refit_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Refit.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Remlfit.py b/nipype/interfaces/afni/tests/test_auto_Remlfit.py index cfffeeb40e..4a1fcfad00 100644 --- a/nipype/interfaces/afni/tests/test_auto_Remlfit.py +++ b/nipype/interfaces/afni/tests/test_auto_Remlfit.py @@ -4,156 +4,46 @@ def test_Remlfit_inputs(): input_map = dict( - STATmask=dict( - argstr="-STATmask %s", - extensions=None, - ), - addbase=dict( - argstr="-addbase %s", - copyfile=False, - sep=" ", - ), - args=dict( - argstr="%s", - ), - automask=dict( - argstr="-automask", - usedefault=True, - ), - dsort=dict( - argstr="-dsort %s", - copyfile=False, - extensions=None, - ), - dsort_nods=dict( - argstr="-dsort_nods", - requires=["dsort"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - errts_file=dict( - argstr="-Rerrts %s", - extensions=None, - ), - fitts_file=dict( - argstr="-Rfitts %s", - extensions=None, - ), - fout=dict( - argstr="-fout", - ), - glt_file=dict( - argstr="-Rglt %s", - extensions=None, - ), - gltsym=dict( - argstr='-gltsym "%s" %s...', - ), - goforit=dict( - argstr="-GOFORIT", - ), - in_files=dict( - argstr='-input "%s"', - copyfile=False, - mandatory=True, - sep=" ", - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - matim=dict( - argstr="-matim %s", - extensions=None, - xor=["matrix"], - ), - matrix=dict( - argstr="-matrix %s", - extensions=None, - mandatory=True, - ), - nobout=dict( - argstr="-nobout", - ), - nodmbase=dict( - argstr="-nodmbase", - requires=["addbase", "dsort"], - ), - nofdr=dict( - argstr="-noFDR", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - obeta=dict( - argstr="-Obeta %s", - extensions=None, - ), - obuck=dict( - argstr="-Obuck %s", - extensions=None, - ), - oerrts=dict( - argstr="-Oerrts %s", - extensions=None, - ), - ofitts=dict( - argstr="-Ofitts %s", - extensions=None, - ), - oglt=dict( - argstr="-Oglt %s", - extensions=None, - ), - out_file=dict( - argstr="-Rbuck %s", - extensions=None, - ), + STATmask=dict(argstr="-STATmask %s", extensions=None), + addbase=dict(argstr="-addbase %s", copyfile=False, sep=" "), + args=dict(argstr="%s"), + automask=dict(argstr="-automask", usedefault=True), + dsort=dict(argstr="-dsort %s", copyfile=False, extensions=None), + dsort_nods=dict(argstr="-dsort_nods", requires=["dsort"]), + environ=dict(nohash=True, usedefault=True), + errts_file=dict(argstr="-Rerrts %s", extensions=None), + fitts_file=dict(argstr="-Rfitts %s", extensions=None), + fout=dict(argstr="-fout"), + glt_file=dict(argstr="-Rglt %s", extensions=None), + gltsym=dict(argstr='-gltsym "%s" %s...'), + goforit=dict(argstr="-GOFORIT"), + in_files=dict(argstr='-input "%s"', copyfile=False, mandatory=True, sep=" "), + mask=dict(argstr="-mask %s", extensions=None), + matim=dict(argstr="-matim %s", extensions=None, xor=["matrix"]), + matrix=dict(argstr="-matrix %s", extensions=None, mandatory=True), + nobout=dict(argstr="-nobout"), + nodmbase=dict(argstr="-nodmbase", requires=["addbase", "dsort"]), + nofdr=dict(argstr="-noFDR"), + num_threads=dict(nohash=True, usedefault=True), + obeta=dict(argstr="-Obeta %s", extensions=None), + obuck=dict(argstr="-Obuck %s", extensions=None), + oerrts=dict(argstr="-Oerrts %s", extensions=None), + ofitts=dict(argstr="-Ofitts %s", extensions=None), + oglt=dict(argstr="-Oglt %s", extensions=None), + out_file=dict(argstr="-Rbuck %s", extensions=None), outputtype=dict(), - ovar=dict( - argstr="-Ovar %s", - extensions=None, - ), - polort=dict( - argstr="-polort %d", - xor=["matrix"], - ), - quiet=dict( - argstr="-quiet", - ), - rbeta_file=dict( - argstr="-Rbeta %s", - extensions=None, - ), - rout=dict( - argstr="-rout", - ), - slibase=dict( - argstr="-slibase %s", - ), - slibase_sm=dict( - argstr="-slibase_sm %s", - ), - tout=dict( - argstr="-tout", - ), - usetemp=dict( - argstr="-usetemp", - ), - var_file=dict( - argstr="-Rvar %s", - extensions=None, - ), - verb=dict( - argstr="-verb", - ), - wherr_file=dict( - argstr="-Rwherr %s", - extensions=None, - ), + ovar=dict(argstr="-Ovar %s", extensions=None), + polort=dict(argstr="-polort %d", xor=["matrix"]), + quiet=dict(argstr="-quiet"), + rbeta_file=dict(argstr="-Rbeta %s", extensions=None), + rout=dict(argstr="-rout"), + slibase=dict(argstr="-slibase %s"), + slibase_sm=dict(argstr="-slibase_sm %s"), + tout=dict(argstr="-tout"), + usetemp=dict(argstr="-usetemp"), + var_file=dict(argstr="-Rvar %s", extensions=None), + verb=dict(argstr="-verb"), + wherr_file=dict(argstr="-Rwherr %s", extensions=None), ) inputs = Remlfit.input_spec() @@ -164,45 +54,19 @@ def test_Remlfit_inputs(): def test_Remlfit_outputs(): output_map = dict( - errts_file=dict( - extensions=None, - ), - fitts_file=dict( - extensions=None, - ), - glt_file=dict( - extensions=None, - ), - obeta=dict( - extensions=None, - ), - obuck=dict( - extensions=None, - ), - oerrts=dict( - extensions=None, - ), - ofitts=dict( - extensions=None, - ), - oglt=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - ovar=dict( - extensions=None, - ), - rbeta_file=dict( - extensions=None, - ), - var_file=dict( - extensions=None, - ), - wherr_file=dict( - extensions=None, - ), + errts_file=dict(extensions=None), + fitts_file=dict(extensions=None), + glt_file=dict(extensions=None), + obeta=dict(extensions=None), + obuck=dict(extensions=None), + oerrts=dict(extensions=None), + ofitts=dict(extensions=None), + oglt=dict(extensions=None), + out_file=dict(extensions=None), + ovar=dict(extensions=None), + rbeta_file=dict(extensions=None), + var_file=dict(extensions=None), + wherr_file=dict(extensions=None), ) outputs = Remlfit.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Resample.py b/nipype/interfaces/afni/tests/test_auto_Resample.py index 792c03aa9f..3aef4f5b9b 100644 --- a/nipype/interfaces/afni/tests/test_auto_Resample.py +++ b/nipype/interfaces/afni/tests/test_auto_Resample.py @@ -4,13 +4,8 @@ def test_Resample_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( argstr="-inset %s", copyfile=False, @@ -18,17 +13,9 @@ def test_Resample_inputs(): mandatory=True, position=-1, ), - master=dict( - argstr="-master %s", - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - orientation=dict( - argstr="-orient %s", - ), + master=dict(argstr="-master %s", extensions=None), + num_threads=dict(nohash=True, usedefault=True), + orientation=dict(argstr="-orient %s"), out_file=dict( argstr="-prefix %s", extensions=None, @@ -36,12 +23,8 @@ def test_Resample_inputs(): name_template="%s_resample", ), outputtype=dict(), - resample_mode=dict( - argstr="-rmode %s", - ), - voxel_size=dict( - argstr="-dxyz %f %f %f", - ), + resample_mode=dict(argstr="-rmode %s"), + voxel_size=dict(argstr="-dxyz %f %f %f"), ) inputs = Resample.input_spec() @@ -51,11 +34,7 @@ def test_Resample_inputs(): def test_Resample_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Retroicor.py b/nipype/interfaces/afni/tests/test_auto_Retroicor.py index 03039a291f..309212634d 100644 --- a/nipype/interfaces/afni/tests/test_auto_Retroicor.py +++ b/nipype/interfaces/afni/tests/test_auto_Retroicor.py @@ -4,39 +4,17 @@ def test_Retroicor_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - card=dict( - argstr="-card %s", - extensions=None, - position=-2, - ), + args=dict(argstr="%s"), + card=dict(argstr="-card %s", extensions=None, position=-2), cardphase=dict( - argstr="-cardphase %s", - extensions=None, - hash_files=False, - position=-6, - ), - environ=dict( - nohash=True, - usedefault=True, + argstr="-cardphase %s", extensions=None, hash_files=False, position=-6 ), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - order=dict( - argstr="-order %s", - position=-5, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + num_threads=dict(nohash=True, usedefault=True), + order=dict(argstr="-order %s", position=-5), out_file=dict( argstr="-prefix %s", extensions=None, @@ -45,21 +23,11 @@ def test_Retroicor_inputs(): position=1, ), outputtype=dict(), - resp=dict( - argstr="-resp %s", - extensions=None, - position=-3, - ), + resp=dict(argstr="-resp %s", extensions=None, position=-3), respphase=dict( - argstr="-respphase %s", - extensions=None, - hash_files=False, - position=-7, - ), - threshold=dict( - argstr="-threshold %d", - position=-4, + argstr="-respphase %s", extensions=None, hash_files=False, position=-7 ), + threshold=dict(argstr="-threshold %d", position=-4), ) inputs = Retroicor.input_spec() @@ -69,11 +37,7 @@ def test_Retroicor_inputs(): def test_Retroicor_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Retroicor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTest.py b/nipype/interfaces/afni/tests/test_auto_SVMTest.py index 665a4a6156..5f9f3d930f 100644 --- a/nipype/interfaces/afni/tests/test_auto_SVMTest.py +++ b/nipype/interfaces/afni/tests/test_auto_SVMTest.py @@ -4,51 +4,21 @@ def test_SVMTest_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - classout=dict( - argstr="-classout", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-testvol %s", - extensions=None, - mandatory=True, - ), - model=dict( - argstr="-model %s", - mandatory=True, - ), - multiclass=dict( - argstr="-multiclass %s", - ), - nodetrend=dict( - argstr="-nodetrend", - ), - nopredcensord=dict( - argstr="-nopredcensord", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - options=dict( - argstr="%s", - ), + args=dict(argstr="%s"), + classout=dict(argstr="-classout"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-testvol %s", extensions=None, mandatory=True), + model=dict(argstr="-model %s", mandatory=True), + multiclass=dict(argstr="-multiclass %s"), + nodetrend=dict(argstr="-nodetrend"), + nopredcensord=dict(argstr="-nopredcensord"), + num_threads=dict(nohash=True, usedefault=True), + options=dict(argstr="%s"), out_file=dict( - argstr="-predictions %s", - extensions=None, - name_template="%s_predictions", + argstr="-predictions %s", extensions=None, name_template="%s_predictions" ), outputtype=dict(), - testlabels=dict( - argstr="-testlabels %s", - extensions=None, - ), + testlabels=dict(argstr="-testlabels %s", extensions=None), ) inputs = SVMTest.input_spec() @@ -58,11 +28,7 @@ def test_SVMTest_inputs(): def test_SVMTest_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SVMTest.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTrain.py b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py index f79bf1b9ac..d7dbaf8628 100644 --- a/nipype/interfaces/afni/tests/test_auto_SVMTrain.py +++ b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py @@ -11,35 +11,15 @@ def test_SVMTrain_inputs(): name_template="%s_alphas", suffix="_alphas", ), - args=dict( - argstr="%s", - ), - censor=dict( - argstr="-censor %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + censor=dict(argstr="-censor %s", extensions=None), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="-trainvol %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - kernel=dict( - argstr="-kernel %s", - ), - mask=dict( - argstr="-mask %s", - copyfile=False, - extensions=None, - position=-1, - ), - max_iterations=dict( - argstr="-max_iterations %d", + argstr="-trainvol %s", copyfile=False, extensions=None, mandatory=True ), + kernel=dict(argstr="-kernel %s"), + mask=dict(argstr="-mask %s", copyfile=False, extensions=None, position=-1), + max_iterations=dict(argstr="-max_iterations %d"), model=dict( argstr="-model %s", extensions=None, @@ -47,16 +27,9 @@ def test_SVMTrain_inputs(): name_template="%s_model", suffix="_model", ), - nomodelmask=dict( - argstr="-nomodelmask", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - options=dict( - argstr="%s", - ), + nomodelmask=dict(argstr="-nomodelmask"), + num_threads=dict(nohash=True, usedefault=True), + options=dict(argstr="%s"), out_file=dict( argstr="-bucket %s", extensions=None, @@ -65,17 +38,9 @@ def test_SVMTrain_inputs(): suffix="_bucket", ), outputtype=dict(), - trainlabels=dict( - argstr="-trainlabels %s", - extensions=None, - ), - ttype=dict( - argstr="-type %s", - mandatory=True, - ), - w_out=dict( - argstr="-wout", - ), + trainlabels=dict(argstr="-trainlabels %s", extensions=None), + ttype=dict(argstr="-type %s", mandatory=True), + w_out=dict(argstr="-wout"), ) inputs = SVMTrain.input_spec() @@ -86,15 +51,9 @@ def test_SVMTrain_inputs(): def test_SVMTrain_outputs(): output_map = dict( - alphas=dict( - extensions=None, - ), - model=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), + alphas=dict(extensions=None), + model=dict(extensions=None), + out_file=dict(extensions=None), ) outputs = SVMTrain.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Seg.py b/nipype/interfaces/afni/tests/test_auto_Seg.py index 38b256d1ea..18db24ac48 100644 --- a/nipype/interfaces/afni/tests/test_auto_Seg.py +++ b/nipype/interfaces/afni/tests/test_auto_Seg.py @@ -4,28 +4,13 @@ def test_Seg_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bias_classes=dict( - argstr="-bias_classes %s", - ), - bias_fwhm=dict( - argstr="-bias_fwhm %f", - ), - blur_meth=dict( - argstr="-blur_meth %s", - ), - bmrf=dict( - argstr="-bmrf %f", - ), - classes=dict( - argstr="-classes %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + bias_classes=dict(argstr="-bias_classes %s"), + bias_fwhm=dict(argstr="-bias_fwhm %f"), + blur_meth=dict(argstr="-blur_meth %s"), + bmrf=dict(argstr="-bmrf %f"), + classes=dict(argstr="-classes %s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( argstr="-anat %s", copyfile=True, @@ -33,23 +18,11 @@ def test_Seg_inputs(): mandatory=True, position=-1, ), - main_N=dict( - argstr="-main_N %d", - ), - mask=dict( - argstr="-mask %s", - mandatory=True, - position=-2, - ), - mixfloor=dict( - argstr="-mixfloor %f", - ), - mixfrac=dict( - argstr="-mixfrac %s", - ), - prefix=dict( - argstr="-prefix %s", - ), + main_N=dict(argstr="-main_N %d"), + mask=dict(argstr="-mask %s", mandatory=True, position=-2), + mixfloor=dict(argstr="-mixfloor %f"), + mixfrac=dict(argstr="-mixfrac %s"), + prefix=dict(argstr="-prefix %s"), ) inputs = Seg.input_spec() @@ -59,11 +32,7 @@ def test_Seg_inputs(): def test_Seg_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Seg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_SkullStrip.py b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py index 4f78254e47..23722f6995 100644 --- a/nipype/interfaces/afni/tests/test_auto_SkullStrip.py +++ b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py @@ -4,13 +4,8 @@ def test_SkullStrip_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( argstr="-input %s", copyfile=False, @@ -18,10 +13,7 @@ def test_SkullStrip_inputs(): mandatory=True, position=1, ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -38,11 +30,7 @@ def test_SkullStrip_inputs(): def test_SkullStrip_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SkullStrip.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Synthesize.py b/nipype/interfaces/afni/tests/test_auto_Synthesize.py index 9f787edbfc..40acf54a6f 100644 --- a/nipype/interfaces/afni/tests/test_auto_Synthesize.py +++ b/nipype/interfaces/afni/tests/test_auto_Synthesize.py @@ -4,48 +4,21 @@ def test_Synthesize_inputs(): input_map = dict( - TR=dict( - argstr="-TR %f", - ), - args=dict( - argstr="%s", - ), + TR=dict(argstr="-TR %f"), + args=dict(argstr="%s"), cbucket=dict( - argstr="-cbucket %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - cenfill=dict( - argstr="-cenfill %s", - ), - dry_run=dict( - argstr="-dry", - ), - environ=dict( - nohash=True, - usedefault=True, + argstr="-cbucket %s", copyfile=False, extensions=None, mandatory=True ), + cenfill=dict(argstr="-cenfill %s"), + dry_run=dict(argstr="-dry"), + environ=dict(nohash=True, usedefault=True), matrix=dict( - argstr="-matrix %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr="-prefix %s", - extensions=None, - name_template="syn", + argstr="-matrix %s", copyfile=False, extensions=None, mandatory=True ), + num_threads=dict(nohash=True, usedefault=True), + out_file=dict(argstr="-prefix %s", extensions=None, name_template="syn"), outputtype=dict(), - select=dict( - argstr="-select %s", - mandatory=True, - ), + select=dict(argstr="-select %s", mandatory=True), ) inputs = Synthesize.input_spec() @@ -55,11 +28,7 @@ def test_Synthesize_inputs(): def test_Synthesize_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Synthesize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCat.py b/nipype/interfaces/afni/tests/test_auto_TCat.py index 595e91383e..d0b4595007 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCat.py +++ b/nipype/interfaces/afni/tests/test_auto_TCat.py @@ -4,23 +4,10 @@ def test_TCat_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr=" %s", - copyfile=False, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_files=dict(argstr=" %s", copyfile=False, mandatory=True, position=-1), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -28,13 +15,8 @@ def test_TCat_inputs(): name_template="%s_tcat", ), outputtype=dict(), - rlt=dict( - argstr="-rlt%s", - position=1, - ), - verbose=dict( - argstr="-verb", - ), + rlt=dict(argstr="-rlt%s", position=1), + verbose=dict(argstr="-verb"), ) inputs = TCat.input_spec() @@ -44,11 +26,7 @@ def test_TCat_inputs(): def test_TCat_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TCat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py index 728d281d27..17be155fd1 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py +++ b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py @@ -4,33 +4,13 @@ def test_TCatSubBrick_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr="%s%s ...", - copyfile=False, - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr="-prefix %s", - extensions=None, - genfile=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_files=dict(argstr="%s%s ...", copyfile=False, mandatory=True, position=-1), + num_threads=dict(nohash=True, usedefault=True), + out_file=dict(argstr="-prefix %s", extensions=None, genfile=True), outputtype=dict(), - rlt=dict( - argstr="-rlt%s", - position=1, - ), + rlt=dict(argstr="-rlt%s", position=1), ) inputs = TCatSubBrick.input_spec() @@ -40,11 +20,7 @@ def test_TCatSubBrick_inputs(): def test_TCatSubBrick_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TCatSubBrick.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCorr1D.py b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py index 665a0dfc3d..d35de852ab 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorr1D.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py @@ -4,22 +4,12 @@ def test_TCorr1D_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), ktaub=dict( - argstr=" -ktaub", - position=1, - xor=["pearson", "spearman", "quadrant"], - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr=" -ktaub", position=1, xor=["pearson", "spearman", "quadrant"] ), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -29,33 +19,18 @@ def test_TCorr1D_inputs(): ), outputtype=dict(), pearson=dict( - argstr=" -pearson", - position=1, - xor=["spearman", "quadrant", "ktaub"], + argstr=" -pearson", position=1, xor=["spearman", "quadrant", "ktaub"] ), quadrant=dict( - argstr=" -quadrant", - position=1, - xor=["pearson", "spearman", "ktaub"], + argstr=" -quadrant", position=1, xor=["pearson", "spearman", "ktaub"] ), spearman=dict( - argstr=" -spearman", - position=1, - xor=["pearson", "quadrant", "ktaub"], + argstr=" -spearman", position=1, xor=["pearson", "quadrant", "ktaub"] ), xset=dict( - argstr=" %s", - copyfile=False, - extensions=None, - mandatory=True, - position=-2, - ), - y_1d=dict( - argstr=" %s", - extensions=None, - mandatory=True, - position=-1, + argstr=" %s", copyfile=False, extensions=None, mandatory=True, position=-2 ), + y_1d=dict(argstr=" %s", extensions=None, mandatory=True, position=-1), ) inputs = TCorr1D.input_spec() @@ -65,11 +40,7 @@ def test_TCorr1D_inputs(): def test_TCorr1D_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TCorr1D.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py index 8e6b1860ff..7cbd2287c3 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py @@ -15,12 +15,8 @@ def test_TCorrMap_inputs(): "var_absolute_threshold_normalize", ), ), - args=dict( - argstr="%s", - ), - automask=dict( - argstr="-automask", - ), + args=dict(argstr="%s"), + automask=dict(argstr="-automask"), average_expr=dict( argstr="-Aexpr %s %s", extensions=None, @@ -35,54 +31,28 @@ def test_TCorrMap_inputs(): suffix="_cexpr", xor=("average_expr", "average_expr_nonzero", "sum_expr"), ), - bandpass=dict( - argstr="-bpass %f %f", - ), - blur_fwhm=dict( - argstr="-Gblur %f", - ), + bandpass=dict(argstr="-bpass %f %f"), + blur_fwhm=dict(argstr="-Gblur %f"), correlation_maps=dict( - argstr="-CorrMap %s", - extensions=None, - name_source="in_file", + argstr="-CorrMap %s", extensions=None, name_source="in_file" ), correlation_maps_masked=dict( - argstr="-CorrMask %s", - extensions=None, - name_source="in_file", - ), - environ=dict( - nohash=True, - usedefault=True, + argstr="-CorrMask %s", extensions=None, name_source="in_file" ), + environ=dict(nohash=True, usedefault=True), expr=dict(), histogram=dict( - argstr="-Hist %d %s", - extensions=None, - name_source="in_file", - suffix="_hist", + argstr="-Hist %d %s", extensions=None, name_source="in_file", suffix="_hist" ), histogram_bin_numbers=dict(), in_file=dict( - argstr="-input %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - mask=dict( - argstr="-mask %s", - extensions=None, + argstr="-input %s", copyfile=False, extensions=None, mandatory=True ), + mask=dict(argstr="-mask %s", extensions=None), mean_file=dict( - argstr="-Mean %s", - extensions=None, - name_source="in_file", - suffix="_mean", - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="-Mean %s", extensions=None, name_source="in_file", suffix="_mean" ), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -91,33 +61,15 @@ def test_TCorrMap_inputs(): ), outputtype=dict(), pmean=dict( - argstr="-Pmean %s", - extensions=None, - name_source="in_file", - suffix="_pmean", - ), - polort=dict( - argstr="-polort %d", + argstr="-Pmean %s", extensions=None, name_source="in_file", suffix="_pmean" ), + polort=dict(argstr="-polort %d"), qmean=dict( - argstr="-Qmean %s", - extensions=None, - name_source="in_file", - suffix="_qmean", - ), - regress_out_timeseries=dict( - argstr="-ort %s", - extensions=None, - ), - seeds=dict( - argstr="-seed %s", - extensions=None, - xor="seeds_width", - ), - seeds_width=dict( - argstr="-Mseed %f", - xor="seeds", + argstr="-Qmean %s", extensions=None, name_source="in_file", suffix="_qmean" ), + regress_out_timeseries=dict(argstr="-ort %s", extensions=None), + seeds=dict(argstr="-seed %s", extensions=None, xor="seeds_width"), + seeds_width=dict(argstr="-Mseed %f", xor="seeds"), sum_expr=dict( argstr="-Sexpr %s %s", extensions=None, @@ -149,10 +101,7 @@ def test_TCorrMap_inputs(): ), ), zmean=dict( - argstr="-Zmean %s", - extensions=None, - name_source="in_file", - suffix="_zmean", + argstr="-Zmean %s", extensions=None, name_source="in_file", suffix="_zmean" ), ) inputs = TCorrMap.input_spec() @@ -164,45 +113,19 @@ def test_TCorrMap_inputs(): def test_TCorrMap_outputs(): output_map = dict( - absolute_threshold=dict( - extensions=None, - ), - average_expr=dict( - extensions=None, - ), - average_expr_nonzero=dict( - extensions=None, - ), - correlation_maps=dict( - extensions=None, - ), - correlation_maps_masked=dict( - extensions=None, - ), - histogram=dict( - extensions=None, - ), - mean_file=dict( - extensions=None, - ), - pmean=dict( - extensions=None, - ), - qmean=dict( - extensions=None, - ), - sum_expr=dict( - extensions=None, - ), - var_absolute_threshold=dict( - extensions=None, - ), - var_absolute_threshold_normalize=dict( - extensions=None, - ), - zmean=dict( - extensions=None, - ), + absolute_threshold=dict(extensions=None), + average_expr=dict(extensions=None), + average_expr_nonzero=dict(extensions=None), + correlation_maps=dict(extensions=None), + correlation_maps_masked=dict(extensions=None), + histogram=dict(extensions=None), + mean_file=dict(extensions=None), + pmean=dict(extensions=None), + qmean=dict(extensions=None), + sum_expr=dict(extensions=None), + var_absolute_threshold=dict(extensions=None), + var_absolute_threshold_normalize=dict(extensions=None), + zmean=dict(extensions=None), ) outputs = TCorrMap.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrelate.py b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py index 1e85d44b68..ce808378c1 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrelate.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py @@ -4,17 +4,9 @@ def test_TCorrelate_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -22,25 +14,13 @@ def test_TCorrelate_inputs(): name_template="%s_tcorr", ), outputtype=dict(), - pearson=dict( - argstr="-pearson", - ), - polort=dict( - argstr="-polort %d", - ), + pearson=dict(argstr="-pearson"), + polort=dict(argstr="-polort %d"), xset=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-2, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2 ), yset=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), ) inputs = TCorrelate.input_spec() @@ -51,11 +31,7 @@ def test_TCorrelate_inputs(): def test_TCorrelate_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TCorrelate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TNorm.py b/nipype/interfaces/afni/tests/test_auto_TNorm.py index 975345d92a..2bba6711f3 100644 --- a/nipype/interfaces/afni/tests/test_auto_TNorm.py +++ b/nipype/interfaces/afni/tests/test_auto_TNorm.py @@ -4,39 +4,17 @@ def test_TNorm_inputs(): input_map = dict( - L1fit=dict( - argstr="-L1fit", - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + L1fit=dict(argstr="-L1fit"), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - norm1=dict( - argstr="-norm1", - ), - norm2=dict( - argstr="-norm2", - ), - normR=dict( - argstr="-normR", - ), - normx=dict( - argstr="-normx", - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + norm1=dict(argstr="-norm1"), + norm2=dict(argstr="-norm2"), + normR=dict(argstr="-normR"), + normx=dict(argstr="-normx"), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -44,9 +22,7 @@ def test_TNorm_inputs(): name_template="%s_tnorm", ), outputtype=dict(), - polort=dict( - argstr="-polort %s", - ), + polort=dict(argstr="-polort %s"), ) inputs = TNorm.input_spec() @@ -56,11 +32,7 @@ def test_TNorm_inputs(): def test_TNorm_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TNorm.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TProject.py b/nipype/interfaces/afni/tests/test_auto_TProject.py index d8f9990817..3b71662ed9 100644 --- a/nipype/interfaces/afni/tests/test_auto_TProject.py +++ b/nipype/interfaces/afni/tests/test_auto_TProject.py @@ -4,43 +4,17 @@ def test_TProject_inputs(): input_map = dict( - TR=dict( - argstr="-TR %g", - ), - args=dict( - argstr="%s", - ), - automask=dict( - argstr="-automask", - xor=["mask"], - ), - bandpass=dict( - argstr="-bandpass %g %g", - ), - blur=dict( - argstr="-blur %g", - ), - cenmode=dict( - argstr="-cenmode %s", - ), - censor=dict( - argstr="-censor %s", - extensions=None, - ), - censortr=dict( - argstr="-CENSORTR %s", - ), - concat=dict( - argstr="-concat %s", - extensions=None, - ), - dsort=dict( - argstr="-dsort %s...", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + TR=dict(argstr="-TR %g"), + args=dict(argstr="%s"), + automask=dict(argstr="-automask", xor=["mask"]), + bandpass=dict(argstr="-bandpass %g %g"), + blur=dict(argstr="-blur %g"), + cenmode=dict(argstr="-cenmode %s"), + censor=dict(argstr="-censor %s", extensions=None), + censortr=dict(argstr="-CENSORTR %s"), + concat=dict(argstr="-concat %s", extensions=None), + dsort=dict(argstr="-dsort %s..."), + environ=dict(nohash=True, usedefault=True), in_file=dict( argstr="-input %s", copyfile=False, @@ -48,24 +22,11 @@ def test_TProject_inputs(): mandatory=True, position=1, ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - noblock=dict( - argstr="-noblock", - ), - norm=dict( - argstr="-norm", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - ort=dict( - argstr="-ort %s", - extensions=None, - ), + mask=dict(argstr="-mask %s", extensions=None), + noblock=dict(argstr="-noblock"), + norm=dict(argstr="-norm"), + num_threads=dict(nohash=True, usedefault=True), + ort=dict(argstr="-ort %s", extensions=None), out_file=dict( argstr="-prefix %s", extensions=None, @@ -74,12 +35,8 @@ def test_TProject_inputs(): position=-1, ), outputtype=dict(), - polort=dict( - argstr="-polort %d", - ), - stopband=dict( - argstr="-stopband %g %g", - ), + polort=dict(argstr="-polort %d"), + stopband=dict(argstr="-stopband %g %g"), ) inputs = TProject.input_spec() @@ -89,11 +46,7 @@ def test_TProject_inputs(): def test_TProject_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TProject.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TShift.py b/nipype/interfaces/afni/tests/test_auto_TShift.py index 78af699c7c..1c3c838ec2 100644 --- a/nipype/interfaces/afni/tests/test_auto_TShift.py +++ b/nipype/interfaces/afni/tests/test_auto_TShift.py @@ -4,30 +4,14 @@ def test_TShift_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ignore=dict( - argstr="-ignore %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + ignore=dict(argstr="-ignore %s"), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - interp=dict( - argstr="-%s", - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + interp=dict(argstr="-%s"), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -35,34 +19,14 @@ def test_TShift_inputs(): name_template="%s_tshift", ), outputtype=dict(), - rlt=dict( - argstr="-rlt", - ), - rltplus=dict( - argstr="-rlt+", - ), - slice_encoding_direction=dict( - usedefault=True, - ), - slice_timing=dict( - argstr="-tpattern @%s", - xor=["tpattern"], - ), - tpattern=dict( - argstr="-tpattern %s", - xor=["slice_timing"], - ), - tr=dict( - argstr="-TR %s", - ), - tslice=dict( - argstr="-slice %s", - xor=["tzero"], - ), - tzero=dict( - argstr="-tzero %s", - xor=["tslice"], - ), + rlt=dict(argstr="-rlt"), + rltplus=dict(argstr="-rlt+"), + slice_encoding_direction=dict(usedefault=True), + slice_timing=dict(argstr="-tpattern @%s", xor=["tpattern"]), + tpattern=dict(argstr="-tpattern %s", xor=["slice_timing"]), + tr=dict(argstr="-TR %s"), + tslice=dict(argstr="-slice %s", xor=["tzero"]), + tzero=dict(argstr="-tzero %s", xor=["tslice"]), ) inputs = TShift.input_spec() @@ -72,14 +36,7 @@ def test_TShift_inputs(): def test_TShift_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - timing_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None), timing_file=dict(extensions=None)) outputs = TShift.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TSmooth.py b/nipype/interfaces/afni/tests/test_auto_TSmooth.py index 1836d31c22..e891d3ba0a 100644 --- a/nipype/interfaces/afni/tests/test_auto_TSmooth.py +++ b/nipype/interfaces/afni/tests/test_auto_TSmooth.py @@ -4,52 +4,21 @@ def test_TSmooth_inputs(): input_map = dict( - adaptive=dict( - argstr="-adaptive %d", - ), - args=dict( - argstr="%s", - ), - blackman=dict( - argstr="-blackman %d", - ), - custom=dict( - argstr="-custom %s", - extensions=None, - ), - datum=dict( - argstr="-datum %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hamming=dict( - argstr="-hamming %d", - ), + adaptive=dict(argstr="-adaptive %d"), + args=dict(argstr="%s"), + blackman=dict(argstr="-blackman %d"), + custom=dict(argstr="-custom %s", extensions=None), + datum=dict(argstr="-datum %s"), + environ=dict(nohash=True, usedefault=True), + hamming=dict(argstr="-hamming %d"), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - lin=dict( - argstr="-lin", - ), - lin3=dict( - argstr="-3lin %d", - ), - med=dict( - argstr="-med", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - osf=dict( - argstr="-osf", + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + lin=dict(argstr="-lin"), + lin3=dict(argstr="-3lin %d"), + med=dict(argstr="-med"), + num_threads=dict(nohash=True, usedefault=True), + osf=dict(argstr="-osf"), out_file=dict( argstr="-prefix %s", extensions=None, @@ -66,11 +35,7 @@ def test_TSmooth_inputs(): def test_TSmooth_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TStat.py b/nipype/interfaces/afni/tests/test_auto_TStat.py index b7499c5442..7ee0dbe53c 100644 --- a/nipype/interfaces/afni/tests/test_auto_TStat.py +++ b/nipype/interfaces/afni/tests/test_auto_TStat.py @@ -4,31 +4,14 @@ def test_TStat_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - options=dict( - argstr="%s", + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + mask=dict(argstr="-mask %s", extensions=None), + num_threads=dict(nohash=True, usedefault=True), + options=dict(argstr="%s"), out_file=dict( argstr="-prefix %s", extensions=None, @@ -45,11 +28,7 @@ def test_TStat_inputs(): def test_TStat_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TStat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_To3D.py b/nipype/interfaces/afni/tests/test_auto_To3D.py index 6121efbe57..02b5beda62 100644 --- a/nipype/interfaces/afni/tests/test_auto_To3D.py +++ b/nipype/interfaces/afni/tests/test_auto_To3D.py @@ -4,34 +4,14 @@ def test_To3D_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - assumemosaic=dict( - argstr="-assume_dicom_mosaic", - ), - datatype=dict( - argstr="-datum %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - filetype=dict( - argstr="-%s", - ), - funcparams=dict( - argstr="-time:zt %s alt+z2", - ), - in_folder=dict( - argstr="%s/*.dcm", - mandatory=True, - position=-1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + assumemosaic=dict(argstr="-assume_dicom_mosaic"), + datatype=dict(argstr="-datum %s"), + environ=dict(nohash=True, usedefault=True), + filetype=dict(argstr="-%s"), + funcparams=dict(argstr="-time:zt %s alt+z2"), + in_folder=dict(argstr="%s/*.dcm", mandatory=True, position=-1), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -39,9 +19,7 @@ def test_To3D_inputs(): name_template="%s", ), outputtype=dict(), - skipoutliers=dict( - argstr="-skip_outliers", - ), + skipoutliers=dict(argstr="-skip_outliers"), ) inputs = To3D.input_spec() @@ -51,11 +29,7 @@ def test_To3D_inputs(): def test_To3D_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = To3D.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Undump.py b/nipype/interfaces/afni/tests/test_auto_Undump.py index 64c98cf25a..456f4cfab4 100644 --- a/nipype/interfaces/afni/tests/test_auto_Undump.py +++ b/nipype/interfaces/afni/tests/test_auto_Undump.py @@ -4,28 +4,13 @@ def test_Undump_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - coordinates_specification=dict( - argstr="-%s", - ), - datatype=dict( - argstr="-datum %s", - ), - default_value=dict( - argstr="-dval %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fill_value=dict( - argstr="-fval %f", - ), - head_only=dict( - argstr="-head_only", - ), + args=dict(argstr="%s"), + coordinates_specification=dict(argstr="-%s"), + datatype=dict(argstr="-datum %s"), + default_value=dict(argstr="-dval %f"), + environ=dict(nohash=True, usedefault=True), + fill_value=dict(argstr="-fval %f"), + head_only=dict(argstr="-head_only"), in_file=dict( argstr="-master %s", copyfile=False, @@ -33,26 +18,12 @@ def test_Undump_inputs(): mandatory=True, position=-1, ), - mask_file=dict( - argstr="-mask %s", - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - orient=dict( - argstr="-orient %s", - ), - out_file=dict( - argstr="-prefix %s", - extensions=None, - name_source="in_file", - ), + mask_file=dict(argstr="-mask %s", extensions=None), + num_threads=dict(nohash=True, usedefault=True), + orient=dict(argstr="-orient %s"), + out_file=dict(argstr="-prefix %s", extensions=None, name_source="in_file"), outputtype=dict(), - srad=dict( - argstr="-srad %f", - ), + srad=dict(argstr="-srad %f"), ) inputs = Undump.input_spec() @@ -62,11 +33,7 @@ def test_Undump_inputs(): def test_Undump_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Undump.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Unifize.py b/nipype/interfaces/afni/tests/test_auto_Unifize.py index 15832152c2..b4cb930a0a 100644 --- a/nipype/interfaces/afni/tests/test_auto_Unifize.py +++ b/nipype/interfaces/afni/tests/test_auto_Unifize.py @@ -4,24 +4,11 @@ def test_Unifize_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - cl_frac=dict( - argstr="-clfrac %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - epi=dict( - argstr="-EPI", - requires=["no_duplo", "t2"], - xor=["gm"], - ), - gm=dict( - argstr="-GM", - ), + args=dict(argstr="%s"), + cl_frac=dict(argstr="-clfrac %f"), + environ=dict(nohash=True, usedefault=True), + epi=dict(argstr="-EPI", requires=["no_duplo", "t2"], xor=["gm"]), + gm=dict(argstr="-GM"), in_file=dict( argstr="-input %s", copyfile=False, @@ -29,13 +16,8 @@ def test_Unifize_inputs(): mandatory=True, position=-1, ), - no_duplo=dict( - argstr="-noduplo", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + no_duplo=dict(argstr="-noduplo"), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -43,25 +25,12 @@ def test_Unifize_inputs(): name_template="%s_unifized", ), outputtype=dict(), - quiet=dict( - argstr="-quiet", - ), - rbt=dict( - argstr="-rbt %f %f %f", - ), - scale_file=dict( - argstr="-ssave %s", - extensions=None, - ), - t2=dict( - argstr="-T2", - ), - t2_up=dict( - argstr="-T2up %f", - ), - urad=dict( - argstr="-Urad %s", - ), + quiet=dict(argstr="-quiet"), + rbt=dict(argstr="-rbt %f %f %f"), + scale_file=dict(argstr="-ssave %s", extensions=None), + t2=dict(argstr="-T2"), + t2_up=dict(argstr="-T2up %f"), + urad=dict(argstr="-Urad %s"), ) inputs = Unifize.input_spec() @@ -71,14 +40,7 @@ def test_Unifize_inputs(): def test_Unifize_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - scale_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None), scale_file=dict(extensions=None)) outputs = Unifize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Volreg.py b/nipype/interfaces/afni/tests/test_auto_Volreg.py index 658b933391..56159816be 100644 --- a/nipype/interfaces/afni/tests/test_auto_Volreg.py +++ b/nipype/interfaces/afni/tests/test_auto_Volreg.py @@ -4,34 +4,15 @@ def test_Volreg_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - basefile=dict( - argstr="-base %s", - extensions=None, - position=-6, - ), - copyorigin=dict( - argstr="-twodup", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + basefile=dict(argstr="-base %s", extensions=None, position=-6), + copyorigin=dict(argstr="-twodup"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - in_weight_volume=dict( - argstr="-weight '%s[%d]'", - ), - interp=dict( - argstr="-%s", + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + in_weight_volume=dict(argstr="-weight '%s[%d]'"), + interp=dict(argstr="-%s"), md1d_file=dict( argstr="-maxdisp1D %s", extensions=None, @@ -40,10 +21,7 @@ def test_Volreg_inputs(): name_template="%s_md.1D", position=-4, ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + num_threads=dict(nohash=True, usedefault=True), oned_file=dict( argstr="-1Dfile %s", extensions=None, @@ -65,16 +43,9 @@ def test_Volreg_inputs(): name_template="%s_volreg", ), outputtype=dict(), - timeshift=dict( - argstr="-tshift 0", - ), - verbose=dict( - argstr="-verbose", - ), - zpad=dict( - argstr="-zpad %d", - position=-5, - ), + timeshift=dict(argstr="-tshift 0"), + verbose=dict(argstr="-verbose"), + zpad=dict(argstr="-zpad %d", position=-5), ) inputs = Volreg.input_spec() @@ -85,18 +56,10 @@ def test_Volreg_inputs(): def test_Volreg_outputs(): output_map = dict( - md1d_file=dict( - extensions=None, - ), - oned_file=dict( - extensions=None, - ), - oned_matrix_save=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), + md1d_file=dict(extensions=None), + oned_file=dict(extensions=None), + oned_matrix_save=dict(extensions=None), + out_file=dict(extensions=None), ) outputs = Volreg.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Warp.py b/nipype/interfaces/afni/tests/test_auto_Warp.py index 9d694f8425..90abbd521c 100644 --- a/nipype/interfaces/afni/tests/test_auto_Warp.py +++ b/nipype/interfaces/afni/tests/test_auto_Warp.py @@ -4,48 +4,19 @@ def test_Warp_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - deoblique=dict( - argstr="-deoblique", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gridset=dict( - argstr="-gridset %s", - extensions=None, - ), + args=dict(argstr="%s"), + deoblique=dict(argstr="-deoblique"), + environ=dict(nohash=True, usedefault=True), + gridset=dict(argstr="-gridset %s", extensions=None), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - interp=dict( - argstr="-%s", - ), - matparent=dict( - argstr="-matparent %s", - extensions=None, - ), - mni2tta=dict( - argstr="-mni2tta", - ), - newgrid=dict( - argstr="-newgrid %f", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - oblique_parent=dict( - argstr="-oblique_parent %s", - extensions=None, - ), + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + ), + interp=dict(argstr="-%s"), + matparent=dict(argstr="-matparent %s", extensions=None), + mni2tta=dict(argstr="-mni2tta"), + newgrid=dict(argstr="-newgrid %f"), + num_threads=dict(nohash=True, usedefault=True), + oblique_parent=dict(argstr="-oblique_parent %s", extensions=None), out_file=dict( argstr="-prefix %s", extensions=None, @@ -54,18 +25,10 @@ def test_Warp_inputs(): name_template="%s_warp", ), outputtype=dict(), - save_warp=dict( - requires=["verbose"], - ), - tta2mni=dict( - argstr="-tta2mni", - ), - verbose=dict( - argstr="-verb", - ), - zpad=dict( - argstr="-zpad %d", - ), + save_warp=dict(requires=["verbose"]), + tta2mni=dict(argstr="-tta2mni"), + verbose=dict(argstr="-verb"), + zpad=dict(argstr="-zpad %d"), ) inputs = Warp.input_spec() @@ -75,14 +38,7 @@ def test_Warp_inputs(): def test_Warp_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - warp_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None), warp_file=dict(extensions=None)) outputs = Warp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ZCutUp.py b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py index 3c51d6dd1d..c5c3d25844 100644 --- a/nipype/interfaces/afni/tests/test_auto_ZCutUp.py +++ b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py @@ -4,27 +4,13 @@ def test_ZCutUp_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, - ), - keep=dict( - argstr="-keep %s", - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), + keep=dict(argstr="-keep %s"), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -41,11 +27,7 @@ def test_ZCutUp_inputs(): def test_ZCutUp_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ZCutUp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Zcat.py b/nipype/interfaces/afni/tests/test_auto_Zcat.py index e06f343591..2ce7ac7792 100644 --- a/nipype/interfaces/afni/tests/test_auto_Zcat.py +++ b/nipype/interfaces/afni/tests/test_auto_Zcat.py @@ -4,34 +4,13 @@ def test_Zcat_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - datum=dict( - argstr="-datum %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fscale=dict( - argstr="-fscale", - xor=["nscale"], - ), - in_files=dict( - argstr="%s", - copyfile=False, - mandatory=True, - position=-1, - ), - nscale=dict( - argstr="-nscale", - xor=["fscale"], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + datum=dict(argstr="-datum %s"), + environ=dict(nohash=True, usedefault=True), + fscale=dict(argstr="-fscale", xor=["nscale"]), + in_files=dict(argstr="%s", copyfile=False, mandatory=True, position=-1), + nscale=dict(argstr="-nscale", xor=["fscale"]), + num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -39,9 +18,7 @@ def test_Zcat_inputs(): name_template="%s_zcat", ), outputtype=dict(), - verb=dict( - argstr="-verb", - ), + verb=dict(argstr="-verb"), ) inputs = Zcat.input_spec() @@ -51,11 +28,7 @@ def test_Zcat_inputs(): def test_Zcat_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Zcat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Zeropad.py b/nipype/interfaces/afni/tests/test_auto_Zeropad.py index 4d6742f21e..694e7f6228 100644 --- a/nipype/interfaces/afni/tests/test_auto_Zeropad.py +++ b/nipype/interfaces/afni/tests/test_auto_Zeropad.py @@ -4,79 +4,30 @@ def test_Zeropad_inputs(): input_map = dict( - A=dict( - argstr="-A %i", - xor=["master"], - ), - AP=dict( - argstr="-AP %i", - xor=["master"], - ), - I=dict( - argstr="-I %i", - xor=["master"], - ), - IS=dict( - argstr="-IS %i", - xor=["master"], - ), - L=dict( - argstr="-L %i", - xor=["master"], - ), - P=dict( - argstr="-P %i", - xor=["master"], - ), - R=dict( - argstr="-R %i", - xor=["master"], - ), - RL=dict( - argstr="-RL %i", - xor=["master"], - ), - S=dict( - argstr="-S %i", - xor=["master"], - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + A=dict(argstr="-A %i", xor=["master"]), + AP=dict(argstr="-AP %i", xor=["master"]), + I=dict(argstr="-I %i", xor=["master"]), + IS=dict(argstr="-IS %i", xor=["master"]), + L=dict(argstr="-L %i", xor=["master"]), + P=dict(argstr="-P %i", xor=["master"]), + R=dict(argstr="-R %i", xor=["master"]), + RL=dict(argstr="-RL %i", xor=["master"]), + S=dict(argstr="-S %i", xor=["master"]), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_files=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 ), master=dict( argstr="-master %s", extensions=None, xor=["I", "S", "A", "P", "L", "R", "z", "RL", "AP", "IS", "mm"], ), - mm=dict( - argstr="-mm", - xor=["master"], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr="-prefix %s", - extensions=None, - name_template="zeropad", - ), + mm=dict(argstr="-mm", xor=["master"]), + num_threads=dict(nohash=True, usedefault=True), + out_file=dict(argstr="-prefix %s", extensions=None, name_template="zeropad"), outputtype=dict(), - z=dict( - argstr="-z %i", - xor=["master"], - ), + z=dict(argstr="-z %i", xor=["master"]), ) inputs = Zeropad.input_spec() @@ -86,11 +37,7 @@ def test_Zeropad_inputs(): def test_Zeropad_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Zeropad.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_AI.py b/nipype/interfaces/ants/tests/test_auto_AI.py index bef56b7ee6..8dd876782e 100644 --- a/nipype/interfaces/ants/tests/test_auto_AI.py +++ b/nipype/interfaces/ants/tests/test_auto_AI.py @@ -4,71 +4,22 @@ def test_AI_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - convergence=dict( - argstr="-c [%d,%g,%d]", - usedefault=True, - ), - dimension=dict( - argstr="-d %d", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_image=dict( - extensions=None, - mandatory=True, - ), - fixed_image_mask=dict( - argstr="-x %s", - extensions=None, - ), - metric=dict( - argstr="-m %s", - mandatory=True, - ), - moving_image=dict( - extensions=None, - mandatory=True, - ), - moving_image_mask=dict( - extensions=None, - requires=["fixed_image_mask"], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - output_transform=dict( - argstr="-o %s", - extensions=None, - usedefault=True, - ), - principal_axes=dict( - argstr="-p %d", - usedefault=True, - xor=["blobs"], - ), - search_factor=dict( - argstr="-s [%g,%g]", - usedefault=True, - ), - search_grid=dict( - argstr="-g %s", - min_ver="2.3.0", - ), - transform=dict( - argstr="-t %s[%g]", - usedefault=True, - ), - verbose=dict( - argstr="-v %d", - usedefault=True, - ), + args=dict(argstr="%s"), + convergence=dict(argstr="-c [%d,%g,%d]", usedefault=True), + dimension=dict(argstr="-d %d", usedefault=True), + environ=dict(nohash=True, usedefault=True), + fixed_image=dict(extensions=None, mandatory=True), + fixed_image_mask=dict(argstr="-x %s", extensions=None), + metric=dict(argstr="-m %s", mandatory=True), + moving_image=dict(extensions=None, mandatory=True), + moving_image_mask=dict(extensions=None, requires=["fixed_image_mask"]), + num_threads=dict(nohash=True, usedefault=True), + output_transform=dict(argstr="-o %s", extensions=None, usedefault=True), + principal_axes=dict(argstr="-p %d", usedefault=True, xor=["blobs"]), + search_factor=dict(argstr="-s [%g,%g]", usedefault=True), + search_grid=dict(argstr="-g %s", min_ver="2.3.0"), + transform=dict(argstr="-t %s[%g]", usedefault=True), + verbose=dict(argstr="-v %d", usedefault=True), ) inputs = AI.input_spec() @@ -78,11 +29,7 @@ def test_AI_inputs(): def test_AI_outputs(): - output_map = dict( - output_transform=dict( - extensions=None, - ), - ) + output_map = dict(output_transform=dict(extensions=None)) outputs = AI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_ANTS.py b/nipype/interfaces/ants/tests/test_auto_ANTS.py index 17f456e0dd..28721815ed 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTS.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTS.py @@ -4,97 +4,35 @@ def test_ANTS_inputs(): input_map = dict( - affine_gradient_descent_option=dict( - argstr="%s", - ), - args=dict( - argstr="%s", - ), - delta_time=dict( - requires=["number_of_time_steps"], - ), - dimension=dict( - argstr="%d", - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_image=dict( - mandatory=True, - ), - gradient_step_length=dict( - requires=["transformation_model"], - ), - metric=dict( - mandatory=True, - ), - metric_weight=dict( - mandatory=True, - requires=["metric"], - usedefault=True, - ), - mi_option=dict( - argstr="--MI-option %s", - sep="x", - ), - moving_image=dict( - argstr="%s", - mandatory=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + affine_gradient_descent_option=dict(argstr="%s"), + args=dict(argstr="%s"), + delta_time=dict(requires=["number_of_time_steps"]), + dimension=dict(argstr="%d", position=1), + environ=dict(nohash=True, usedefault=True), + fixed_image=dict(mandatory=True), + gradient_step_length=dict(requires=["transformation_model"]), + metric=dict(mandatory=True), + metric_weight=dict(mandatory=True, requires=["metric"], usedefault=True), + mi_option=dict(argstr="--MI-option %s", sep="x"), + moving_image=dict(argstr="%s", mandatory=True), + num_threads=dict(nohash=True, usedefault=True), number_of_affine_iterations=dict( - argstr="--number-of-affine-iterations %s", - sep="x", - ), - number_of_iterations=dict( - argstr="--number-of-iterations %s", - sep="x", - ), - number_of_time_steps=dict( - requires=["gradient_step_length"], + argstr="--number-of-affine-iterations %s", sep="x" ), + number_of_iterations=dict(argstr="--number-of-iterations %s", sep="x"), + number_of_time_steps=dict(requires=["gradient_step_length"]), output_transform_prefix=dict( - argstr="--output-naming %s", - mandatory=True, - usedefault=True, - ), - radius=dict( - mandatory=True, - requires=["metric"], - ), - regularization=dict( - argstr="%s", - ), - regularization_deformation_field_sigma=dict( - requires=["regularization"], - ), - regularization_gradient_field_sigma=dict( - requires=["regularization"], - ), - smoothing_sigmas=dict( - argstr="--gaussian-smoothing-sigmas %s", - sep="x", - ), - subsampling_factors=dict( - argstr="--subsampling-factors %s", - sep="x", - ), - symmetry_type=dict( - requires=["delta_time"], - ), - transformation_model=dict( - argstr="%s", - mandatory=True, - ), - use_histogram_matching=dict( - argstr="%s", - usedefault=True, - ), + argstr="--output-naming %s", mandatory=True, usedefault=True + ), + radius=dict(mandatory=True, requires=["metric"]), + regularization=dict(argstr="%s"), + regularization_deformation_field_sigma=dict(requires=["regularization"]), + regularization_gradient_field_sigma=dict(requires=["regularization"]), + smoothing_sigmas=dict(argstr="--gaussian-smoothing-sigmas %s", sep="x"), + subsampling_factors=dict(argstr="--subsampling-factors %s", sep="x"), + symmetry_type=dict(requires=["delta_time"]), + transformation_model=dict(argstr="%s", mandatory=True), + use_histogram_matching=dict(argstr="%s", usedefault=True), ) inputs = ANTS.input_spec() @@ -105,21 +43,11 @@ def test_ANTS_inputs(): def test_ANTS_outputs(): output_map = dict( - affine_transform=dict( - extensions=None, - ), - inverse_warp_transform=dict( - extensions=None, - ), - metaheader=dict( - extensions=None, - ), - metaheader_raw=dict( - extensions=None, - ), - warp_transform=dict( - extensions=None, - ), + affine_transform=dict(extensions=None), + inverse_warp_transform=dict(extensions=None), + metaheader=dict(extensions=None), + metaheader_raw=dict(extensions=None), + warp_transform=dict(extensions=None), ) outputs = ANTS.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py b/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py index 8907c4ab91..86d860293f 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py @@ -4,17 +4,9 @@ def test_ANTSCommand_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + num_threads=dict(nohash=True, usedefault=True), ) inputs = ANTSCommand.input_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py index 24ef0d655e..ad65c1887a 100644 --- a/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py +++ b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py @@ -4,60 +4,17 @@ def test_AffineInitializer_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="%s", - position=0, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_image=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - local_search=dict( - argstr="%d", - position=7, - usedefault=True, - ), - moving_image=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr="%s", - extensions=None, - position=3, - usedefault=True, - ), - principal_axes=dict( - argstr="%d", - position=6, - usedefault=True, - ), - radian_fraction=dict( - argstr="%f", - position=5, - usedefault=True, - ), - search_factor=dict( - argstr="%f", - position=4, - usedefault=True, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="%s", position=0, usedefault=True), + environ=dict(nohash=True, usedefault=True), + fixed_image=dict(argstr="%s", extensions=None, mandatory=True, position=1), + local_search=dict(argstr="%d", position=7, usedefault=True), + moving_image=dict(argstr="%s", extensions=None, mandatory=True, position=2), + num_threads=dict(nohash=True, usedefault=True), + out_file=dict(argstr="%s", extensions=None, position=3, usedefault=True), + principal_axes=dict(argstr="%d", position=6, usedefault=True), + radian_fraction=dict(argstr="%f", position=5, usedefault=True), + search_factor=dict(argstr="%f", position=4, usedefault=True), ) inputs = AffineInitializer.input_spec() @@ -67,11 +24,7 @@ def test_AffineInitializer_inputs(): def test_AffineInitializer_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = AffineInitializer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py index a18a3b60b4..062bac034a 100644 --- a/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py @@ -4,62 +4,24 @@ def test_ApplyTransforms_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - default_value=dict( - argstr="--default-value %g", - usedefault=True, - ), - dimension=dict( - argstr="--dimensionality %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - float=dict( - argstr="--float %d", - usedefault=True, - ), - input_image=dict( - argstr="--input %s", - extensions=None, - mandatory=True, - ), - input_image_type=dict( - argstr="--input-image-type %d", - ), - interpolation=dict( - argstr="%s", - usedefault=True, - ), + args=dict(argstr="%s"), + default_value=dict(argstr="--default-value %g", usedefault=True), + dimension=dict(argstr="--dimensionality %d"), + environ=dict(nohash=True, usedefault=True), + float=dict(argstr="--float %d", usedefault=True), + input_image=dict(argstr="--input %s", extensions=None, mandatory=True), + input_image_type=dict(argstr="--input-image-type %d"), + interpolation=dict(argstr="%s", usedefault=True), interpolation_parameters=dict(), invert_transform_flags=dict(), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_postfix=dict( - usedefault=True, - ), - output_image=dict( - argstr="--output %s", - genfile=True, - hash_files=False, - ), - print_out_composite_warp_file=dict( - requires=["output_image"], - ), + num_threads=dict(nohash=True, usedefault=True), + out_postfix=dict(usedefault=True), + output_image=dict(argstr="--output %s", genfile=True, hash_files=False), + print_out_composite_warp_file=dict(requires=["output_image"]), reference_image=dict( - argstr="--reference-image %s", - extensions=None, - mandatory=True, - ), - transforms=dict( - argstr="%s", - mandatory=True, + argstr="--reference-image %s", extensions=None, mandatory=True ), + transforms=dict(argstr="%s", mandatory=True), ) inputs = ApplyTransforms.input_spec() @@ -69,11 +31,7 @@ def test_ApplyTransforms_inputs(): def test_ApplyTransforms_outputs(): - output_map = dict( - output_image=dict( - extensions=None, - ), - ) + output_map = dict(output_image=dict(extensions=None)) outputs = ApplyTransforms.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py index 472c22c8b2..5d1723ba73 100644 --- a/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py @@ -4,36 +4,19 @@ def test_ApplyTransformsToPoints_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="--dimensionality %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr="--input %s", - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="--dimensionality %d"), + environ=dict(nohash=True, usedefault=True), + input_file=dict(argstr="--input %s", extensions=None, mandatory=True), invert_transform_flags=dict(), - num_threads=dict( - nohash=True, - usedefault=True, - ), + num_threads=dict(nohash=True, usedefault=True), output_file=dict( argstr="--output %s", hash_files=False, name_source=["input_file"], name_template="%s_transformed.csv", ), - transforms=dict( - argstr="%s", - mandatory=True, - ), + transforms=dict(argstr="%s", mandatory=True), ) inputs = ApplyTransformsToPoints.input_spec() @@ -43,11 +26,7 @@ def test_ApplyTransformsToPoints_inputs(): def test_ApplyTransformsToPoints_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = ApplyTransformsToPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_Atropos.py b/nipype/interfaces/ants/tests/test_auto_Atropos.py index 9f02bb5151..b99146eafa 100644 --- a/nipype/interfaces/ants/tests/test_auto_Atropos.py +++ b/nipype/interfaces/ants/tests/test_auto_Atropos.py @@ -4,85 +4,35 @@ def test_Atropos_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - convergence_threshold=dict( - requires=["n_iterations"], - ), - dimension=dict( - argstr="--image-dimensionality %d", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - icm_use_synchronous_update=dict( - argstr="%s", - ), + args=dict(argstr="%s"), + convergence_threshold=dict(requires=["n_iterations"]), + dimension=dict(argstr="--image-dimensionality %d", usedefault=True), + environ=dict(nohash=True, usedefault=True), + icm_use_synchronous_update=dict(argstr="%s"), initialization=dict( - argstr="%s", - mandatory=True, - requires=["number_of_tissue_classes"], - ), - intensity_images=dict( - argstr="--intensity-image %s...", - mandatory=True, + argstr="%s", mandatory=True, requires=["number_of_tissue_classes"] ), + intensity_images=dict(argstr="--intensity-image %s...", mandatory=True), kmeans_init_centers=dict(), - likelihood_model=dict( - argstr="--likelihood-model %s", - ), - mask_image=dict( - argstr="--mask-image %s", - extensions=None, - mandatory=True, - ), - maximum_number_of_icm_terations=dict( - requires=["icm_use_synchronous_update"], - ), - mrf_radius=dict( - requires=["mrf_smoothing_factor"], - ), - mrf_smoothing_factor=dict( - argstr="%s", - ), - n_iterations=dict( - argstr="%s", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - number_of_tissue_classes=dict( - mandatory=True, - ), + likelihood_model=dict(argstr="--likelihood-model %s"), + mask_image=dict(argstr="--mask-image %s", extensions=None, mandatory=True), + maximum_number_of_icm_terations=dict(requires=["icm_use_synchronous_update"]), + mrf_radius=dict(requires=["mrf_smoothing_factor"]), + mrf_smoothing_factor=dict(argstr="%s"), + n_iterations=dict(argstr="%s"), + num_threads=dict(nohash=True, usedefault=True), + number_of_tissue_classes=dict(mandatory=True), out_classified_image_name=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - ), - output_posteriors_name_template=dict( - usedefault=True, - ), - posterior_formulation=dict( - argstr="%s", + argstr="%s", extensions=None, genfile=True, hash_files=False ), + output_posteriors_name_template=dict(usedefault=True), + posterior_formulation=dict(argstr="%s"), prior_image=dict(), - prior_probability_threshold=dict( - requires=["prior_weighting"], - ), + prior_probability_threshold=dict(requires=["prior_weighting"]), prior_weighting=dict(), save_posteriors=dict(), - use_mixture_model_proportions=dict( - requires=["posterior_formulation"], - ), - use_random_seed=dict( - argstr="--use-random-seed %d", - usedefault=True, - ), + use_mixture_model_proportions=dict(requires=["posterior_formulation"]), + use_random_seed=dict(argstr="--use-random-seed %d", usedefault=True), ) inputs = Atropos.input_spec() @@ -92,12 +42,7 @@ def test_Atropos_inputs(): def test_Atropos_outputs(): - output_map = dict( - classified_image=dict( - extensions=None, - ), - posteriors=dict(), - ) + output_map = dict(classified_image=dict(extensions=None), posteriors=dict()) outputs = Atropos.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py index 3da6956de2..46cd73b1f0 100644 --- a/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py @@ -4,33 +4,14 @@ def test_AverageAffineTransform_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="%d", - mandatory=True, - position=0, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="%d", mandatory=True, position=0), + environ=dict(nohash=True, usedefault=True), + num_threads=dict(nohash=True, usedefault=True), output_affine_transform=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - transforms=dict( - argstr="%s", - mandatory=True, - position=3, + argstr="%s", extensions=None, mandatory=True, position=1 ), + transforms=dict(argstr="%s", mandatory=True, position=3), ) inputs = AverageAffineTransform.input_spec() @@ -40,11 +21,7 @@ def test_AverageAffineTransform_inputs(): def test_AverageAffineTransform_outputs(): - output_map = dict( - affine_transform=dict( - extensions=None, - ), - ) + output_map = dict(affine_transform=dict(extensions=None)) outputs = AverageAffineTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_AverageImages.py b/nipype/interfaces/ants/tests/test_auto_AverageImages.py index 8eb03ea7b4..94dedee9f9 100644 --- a/nipype/interfaces/ants/tests/test_auto_AverageImages.py +++ b/nipype/interfaces/ants/tests/test_auto_AverageImages.py @@ -4,38 +4,14 @@ def test_AverageImages_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="%d", - mandatory=True, - position=0, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - images=dict( - argstr="%s", - mandatory=True, - position=3, - ), - normalize=dict( - argstr="%d", - mandatory=True, - position=2, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="%d", mandatory=True, position=0), + environ=dict(nohash=True, usedefault=True), + images=dict(argstr="%s", mandatory=True, position=3), + normalize=dict(argstr="%d", mandatory=True, position=2), + num_threads=dict(nohash=True, usedefault=True), output_average_image=dict( - argstr="%s", - extensions=None, - hash_files=False, - position=1, - usedefault=True, + argstr="%s", extensions=None, hash_files=False, position=1, usedefault=True ), ) inputs = AverageImages.input_spec() @@ -46,11 +22,7 @@ def test_AverageImages_inputs(): def test_AverageImages_outputs(): - output_map = dict( - output_average_image=dict( - extensions=None, - ), - ) + output_map = dict(output_average_image=dict(extensions=None)) outputs = AverageImages.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py index 454a102f2d..e6a2e1d445 100644 --- a/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py +++ b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py @@ -4,61 +4,22 @@ def test_BrainExtraction_inputs(): input_map = dict( - anatomical_image=dict( - argstr="-a %s", - extensions=None, - mandatory=True, - ), - args=dict( - argstr="%s", - ), + anatomical_image=dict(argstr="-a %s", extensions=None, mandatory=True), + args=dict(argstr="%s"), brain_probability_mask=dict( - argstr="-m %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - brain_template=dict( - argstr="-e %s", - extensions=None, - mandatory=True, - ), - debug=dict( - argstr="-z 1", - ), - dimension=dict( - argstr="-d %d", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - extraction_registration_mask=dict( - argstr="-f %s", - extensions=None, - ), - image_suffix=dict( - argstr="-s %s", - usedefault=True, - ), - keep_temporary_files=dict( - argstr="-k %d", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_prefix=dict( - argstr="-o %s", - usedefault=True, - ), - use_floatingpoint_precision=dict( - argstr="-q %d", - ), - use_random_seeding=dict( - argstr="-u %d", - ), + argstr="-m %s", copyfile=False, extensions=None, mandatory=True + ), + brain_template=dict(argstr="-e %s", extensions=None, mandatory=True), + debug=dict(argstr="-z 1"), + dimension=dict(argstr="-d %d", usedefault=True), + environ=dict(nohash=True, usedefault=True), + extraction_registration_mask=dict(argstr="-f %s", extensions=None), + image_suffix=dict(argstr="-s %s", usedefault=True), + keep_temporary_files=dict(argstr="-k %d"), + num_threads=dict(nohash=True, usedefault=True), + out_prefix=dict(argstr="-o %s", usedefault=True), + use_floatingpoint_precision=dict(argstr="-q %d"), + use_random_seeding=dict(argstr="-u %d"), ) inputs = BrainExtraction.input_spec() @@ -69,60 +30,24 @@ def test_BrainExtraction_inputs(): def test_BrainExtraction_outputs(): output_map = dict( - BrainExtractionBrain=dict( - extensions=None, - ), - BrainExtractionCSF=dict( - extensions=None, - ), - BrainExtractionGM=dict( - extensions=None, - ), - BrainExtractionInitialAffine=dict( - extensions=None, - ), - BrainExtractionInitialAffineFixed=dict( - extensions=None, - ), - BrainExtractionInitialAffineMoving=dict( - extensions=None, - ), - BrainExtractionLaplacian=dict( - extensions=None, - ), - BrainExtractionMask=dict( - extensions=None, - ), - BrainExtractionPrior0GenericAffine=dict( - extensions=None, - ), - BrainExtractionPrior1InverseWarp=dict( - extensions=None, - ), - BrainExtractionPrior1Warp=dict( - extensions=None, - ), - BrainExtractionPriorWarped=dict( - extensions=None, - ), - BrainExtractionSegmentation=dict( - extensions=None, - ), - BrainExtractionTemplateLaplacian=dict( - extensions=None, - ), - BrainExtractionTmp=dict( - extensions=None, - ), - BrainExtractionWM=dict( - extensions=None, - ), - N4Corrected0=dict( - extensions=None, - ), - N4Truncated0=dict( - extensions=None, - ), + BrainExtractionBrain=dict(extensions=None), + BrainExtractionCSF=dict(extensions=None), + BrainExtractionGM=dict(extensions=None), + BrainExtractionInitialAffine=dict(extensions=None), + BrainExtractionInitialAffineFixed=dict(extensions=None), + BrainExtractionInitialAffineMoving=dict(extensions=None), + BrainExtractionLaplacian=dict(extensions=None), + BrainExtractionMask=dict(extensions=None), + BrainExtractionPrior0GenericAffine=dict(extensions=None), + BrainExtractionPrior1InverseWarp=dict(extensions=None), + BrainExtractionPrior1Warp=dict(extensions=None), + BrainExtractionPriorWarped=dict(extensions=None), + BrainExtractionSegmentation=dict(extensions=None), + BrainExtractionTemplateLaplacian=dict(extensions=None), + BrainExtractionTmp=dict(extensions=None), + BrainExtractionWM=dict(extensions=None), + N4Corrected0=dict(extensions=None), + N4Truncated0=dict(extensions=None), ) outputs = BrainExtraction.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py index 78afc21df2..741bf5e820 100644 --- a/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py @@ -4,22 +4,10 @@ def test_ComposeMultiTransform_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="%d", - position=0, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="%d", position=0, usedefault=True), + environ=dict(nohash=True, usedefault=True), + num_threads=dict(nohash=True, usedefault=True), output_transform=dict( argstr="%s", extensions=None, @@ -28,16 +16,8 @@ def test_ComposeMultiTransform_inputs(): name_template="%s_composed", position=1, ), - reference_image=dict( - argstr="%s", - extensions=None, - position=2, - ), - transforms=dict( - argstr="%s", - mandatory=True, - position=3, - ), + reference_image=dict(argstr="%s", extensions=None, position=2), + transforms=dict(argstr="%s", mandatory=True, position=3), ) inputs = ComposeMultiTransform.input_spec() @@ -47,11 +27,7 @@ def test_ComposeMultiTransform_inputs(): def test_ComposeMultiTransform_outputs(): - output_map = dict( - output_transform=dict( - extensions=None, - ), - ) + output_map = dict(output_transform=dict(extensions=None)) outputs = ComposeMultiTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py b/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py index 54b428db2a..dd16a5e85b 100644 --- a/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py +++ b/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py @@ -4,37 +4,13 @@ def test_CompositeTransformUtil_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s...", - mandatory=True, - position=3, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr="%s", - extensions=None, - position=2, - ), - output_prefix=dict( - argstr="%s", - position=4, - usedefault=True, - ), - process=dict( - argstr="--%s", - position=1, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s...", mandatory=True, position=3), + num_threads=dict(nohash=True, usedefault=True), + out_file=dict(argstr="%s", extensions=None, position=2), + output_prefix=dict(argstr="%s", position=4, usedefault=True), + process=dict(argstr="--%s", position=1, usedefault=True), ) inputs = CompositeTransformUtil.input_spec() @@ -45,15 +21,9 @@ def test_CompositeTransformUtil_inputs(): def test_CompositeTransformUtil_outputs(): output_map = dict( - affine_transform=dict( - extensions=None, - ), - displacement_field=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), + affine_transform=dict(extensions=None), + displacement_field=dict(extensions=None), + out_file=dict(extensions=None), ) outputs = CompositeTransformUtil.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py index a49239ebae..419c090958 100644 --- a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py +++ b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py @@ -4,70 +4,19 @@ def test_ConvertScalarImageToRGB_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - colormap=dict( - argstr="%s", - mandatory=True, - position=4, - ), - custom_color_map_file=dict( - argstr="%s", - position=5, - usedefault=True, - ), - dimension=dict( - argstr="%d", - mandatory=True, - position=0, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_image=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - mask_image=dict( - argstr="%s", - extensions=None, - position=3, - usedefault=True, - ), - maximum_RGB_output=dict( - argstr="%d", - position=9, - usedefault=True, - ), - maximum_input=dict( - argstr="%d", - mandatory=True, - position=7, - ), - minimum_RGB_output=dict( - argstr="%d", - position=8, - usedefault=True, - ), - minimum_input=dict( - argstr="%d", - mandatory=True, - position=6, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - output_image=dict( - argstr="%s", - position=2, - usedefault=True, - ), + args=dict(argstr="%s"), + colormap=dict(argstr="%s", mandatory=True, position=4), + custom_color_map_file=dict(argstr="%s", position=5, usedefault=True), + dimension=dict(argstr="%d", mandatory=True, position=0, usedefault=True), + environ=dict(nohash=True, usedefault=True), + input_image=dict(argstr="%s", extensions=None, mandatory=True, position=1), + mask_image=dict(argstr="%s", extensions=None, position=3, usedefault=True), + maximum_RGB_output=dict(argstr="%d", position=9, usedefault=True), + maximum_input=dict(argstr="%d", mandatory=True, position=7), + minimum_RGB_output=dict(argstr="%d", position=8, usedefault=True), + minimum_input=dict(argstr="%d", mandatory=True, position=6), + num_threads=dict(nohash=True, usedefault=True), + output_image=dict(argstr="%s", position=2, usedefault=True), ) inputs = ConvertScalarImageToRGB.input_spec() @@ -77,11 +26,7 @@ def test_ConvertScalarImageToRGB_inputs(): def test_ConvertScalarImageToRGB_outputs(): - output_map = dict( - output_image=dict( - extensions=None, - ), - ) + output_map = dict(output_image=dict(extensions=None)) outputs = ConvertScalarImageToRGB.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py index a42551788b..cbf89e085e 100644 --- a/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py @@ -4,94 +4,32 @@ def test_CorticalThickness_inputs(): input_map = dict( - anatomical_image=dict( - argstr="-a %s", - extensions=None, - mandatory=True, - ), - args=dict( - argstr="%s", - ), - b_spline_smoothing=dict( - argstr="-v", - ), + anatomical_image=dict(argstr="-a %s", extensions=None, mandatory=True), + args=dict(argstr="%s"), + b_spline_smoothing=dict(argstr="-v"), brain_probability_mask=dict( - argstr="-m %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - brain_template=dict( - argstr="-e %s", - extensions=None, - mandatory=True, - ), - cortical_label_image=dict( - extensions=None, - ), - debug=dict( - argstr="-z 1", - ), - dimension=dict( - argstr="-d %d", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - extraction_registration_mask=dict( - argstr="-f %s", - extensions=None, - ), - image_suffix=dict( - argstr="-s %s", - usedefault=True, - ), - keep_temporary_files=dict( - argstr="-k %d", - ), - label_propagation=dict( - argstr="-l %s", - ), - max_iterations=dict( - argstr="-i %d", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_prefix=dict( - argstr="-o %s", - usedefault=True, - ), - posterior_formulation=dict( - argstr="-b %s", - ), - prior_segmentation_weight=dict( - argstr="-w %f", - ), - quick_registration=dict( - argstr="-q 1", - ), - segmentation_iterations=dict( - argstr="-n %d", - ), - segmentation_priors=dict( - argstr="-p %s", - mandatory=True, - ), - t1_registration_template=dict( - argstr="-t %s", - extensions=None, - mandatory=True, - ), - use_floatingpoint_precision=dict( - argstr="-j %d", - ), - use_random_seeding=dict( - argstr="-u %d", - ), + argstr="-m %s", copyfile=False, extensions=None, mandatory=True + ), + brain_template=dict(argstr="-e %s", extensions=None, mandatory=True), + cortical_label_image=dict(extensions=None), + debug=dict(argstr="-z 1"), + dimension=dict(argstr="-d %d", usedefault=True), + environ=dict(nohash=True, usedefault=True), + extraction_registration_mask=dict(argstr="-f %s", extensions=None), + image_suffix=dict(argstr="-s %s", usedefault=True), + keep_temporary_files=dict(argstr="-k %d"), + label_propagation=dict(argstr="-l %s"), + max_iterations=dict(argstr="-i %d"), + num_threads=dict(nohash=True, usedefault=True), + out_prefix=dict(argstr="-o %s", usedefault=True), + posterior_formulation=dict(argstr="-b %s"), + prior_segmentation_weight=dict(argstr="-w %f"), + quick_registration=dict(argstr="-q 1"), + segmentation_iterations=dict(argstr="-n %d"), + segmentation_priors=dict(argstr="-p %s", mandatory=True), + t1_registration_template=dict(argstr="-t %s", extensions=None, mandatory=True), + use_floatingpoint_precision=dict(argstr="-j %d"), + use_random_seeding=dict(argstr="-u %d"), ) inputs = CorticalThickness.input_spec() @@ -102,43 +40,19 @@ def test_CorticalThickness_inputs(): def test_CorticalThickness_outputs(): output_map = dict( - BrainExtractionMask=dict( - extensions=None, - ), - BrainSegmentation=dict( - extensions=None, - ), - BrainSegmentationN4=dict( - extensions=None, - ), + BrainExtractionMask=dict(extensions=None), + BrainSegmentation=dict(extensions=None), + BrainSegmentationN4=dict(extensions=None), BrainSegmentationPosteriors=dict(), - BrainVolumes=dict( - extensions=None, - ), - CorticalThickness=dict( - extensions=None, - ), - CorticalThicknessNormedToTemplate=dict( - extensions=None, - ), - ExtractedBrainN4=dict( - extensions=None, - ), - SubjectToTemplate0GenericAffine=dict( - extensions=None, - ), - SubjectToTemplate1Warp=dict( - extensions=None, - ), - SubjectToTemplateLogJacobian=dict( - extensions=None, - ), - TemplateToSubject0Warp=dict( - extensions=None, - ), - TemplateToSubject1GenericAffine=dict( - extensions=None, - ), + BrainVolumes=dict(extensions=None), + CorticalThickness=dict(extensions=None), + CorticalThicknessNormedToTemplate=dict(extensions=None), + ExtractedBrainN4=dict(extensions=None), + SubjectToTemplate0GenericAffine=dict(extensions=None), + SubjectToTemplate1Warp=dict(extensions=None), + SubjectToTemplateLogJacobian=dict(extensions=None), + TemplateToSubject0Warp=dict(extensions=None), + TemplateToSubject1GenericAffine=dict(extensions=None), ) outputs = CorticalThickness.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py index 197ec2ad23..f9e5e893df 100644 --- a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py +++ b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py @@ -4,42 +4,14 @@ def test_CreateJacobianDeterminantImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - deformationField=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - doLogJacobian=dict( - argstr="%d", - position=3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - imageDimension=dict( - argstr="%d", - mandatory=True, - position=0, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - outputImage=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - useGeometric=dict( - argstr="%d", - position=4, - ), + args=dict(argstr="%s"), + deformationField=dict(argstr="%s", extensions=None, mandatory=True, position=1), + doLogJacobian=dict(argstr="%d", position=3), + environ=dict(nohash=True, usedefault=True), + imageDimension=dict(argstr="%d", mandatory=True, position=0), + num_threads=dict(nohash=True, usedefault=True), + outputImage=dict(argstr="%s", extensions=None, mandatory=True, position=2), + useGeometric=dict(argstr="%d", position=4), ) inputs = CreateJacobianDeterminantImage.input_spec() @@ -49,11 +21,7 @@ def test_CreateJacobianDeterminantImage_inputs(): def test_CreateJacobianDeterminantImage_outputs(): - output_map = dict( - jacobian_image=dict( - extensions=None, - ), - ) + output_map = dict(jacobian_image=dict(extensions=None)) outputs = CreateJacobianDeterminantImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py index 6516b03ad7..31303a5a89 100644 --- a/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py +++ b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py @@ -4,56 +4,20 @@ def test_CreateTiledMosaic_inputs(): input_map = dict( - alpha_value=dict( - argstr="-a %.2f", - ), - args=dict( - argstr="%s", - ), - direction=dict( - argstr="-d %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - flip_slice=dict( - argstr="-f %s", - ), - input_image=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - mask_image=dict( - argstr="-x %s", - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - output_image=dict( - argstr="-o %s", - usedefault=True, - ), - pad_or_crop=dict( - argstr="-p %s", - ), - permute_axes=dict( - argstr="-g", - ), - rgb_image=dict( - argstr="-r %s", - extensions=None, - mandatory=True, - ), - slices=dict( - argstr="-s %s", - ), - tile_geometry=dict( - argstr="-t %s", - ), + alpha_value=dict(argstr="-a %.2f"), + args=dict(argstr="%s"), + direction=dict(argstr="-d %d"), + environ=dict(nohash=True, usedefault=True), + flip_slice=dict(argstr="-f %s"), + input_image=dict(argstr="-i %s", extensions=None, mandatory=True), + mask_image=dict(argstr="-x %s", extensions=None), + num_threads=dict(nohash=True, usedefault=True), + output_image=dict(argstr="-o %s", usedefault=True), + pad_or_crop=dict(argstr="-p %s"), + permute_axes=dict(argstr="-g"), + rgb_image=dict(argstr="-r %s", extensions=None, mandatory=True), + slices=dict(argstr="-s %s"), + tile_geometry=dict(argstr="-t %s"), ) inputs = CreateTiledMosaic.input_spec() @@ -63,11 +27,7 @@ def test_CreateTiledMosaic_inputs(): def test_CreateTiledMosaic_outputs(): - output_map = dict( - output_image=dict( - extensions=None, - ), - ) + output_map = dict(output_image=dict(extensions=None)) outputs = CreateTiledMosaic.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py index 7af4764633..f917fc2240 100644 --- a/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py +++ b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py @@ -4,21 +4,10 @@ def test_DenoiseImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="-d %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_image=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="-d %d"), + environ=dict(nohash=True, usedefault=True), + input_image=dict(argstr="-i %s", extensions=None, mandatory=True), noise_image=dict( extensions=None, hash_files=False, @@ -26,14 +15,8 @@ def test_DenoiseImage_inputs(): name_source=["input_image"], name_template="%s_noise", ), - noise_model=dict( - argstr="-n %s", - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + noise_model=dict(argstr="-n %s", usedefault=True), + num_threads=dict(nohash=True, usedefault=True), output_image=dict( argstr="-o %s", extensions=None, @@ -42,18 +25,9 @@ def test_DenoiseImage_inputs(): name_source=["input_image"], name_template="%s_noise_corrected", ), - save_noise=dict( - mandatory=True, - usedefault=True, - xor=["noise_image"], - ), - shrink_factor=dict( - argstr="-s %s", - usedefault=True, - ), - verbose=dict( - argstr="-v", - ), + save_noise=dict(mandatory=True, usedefault=True, xor=["noise_image"]), + shrink_factor=dict(argstr="-s %s", usedefault=True), + verbose=dict(argstr="-v"), ) inputs = DenoiseImage.input_spec() @@ -64,12 +38,7 @@ def test_DenoiseImage_inputs(): def test_DenoiseImage_outputs(): output_map = dict( - noise_image=dict( - extensions=None, - ), - output_image=dict( - extensions=None, - ), + noise_image=dict(extensions=None), output_image=dict(extensions=None) ) outputs = DenoiseImage.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py b/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py index 6fde6f5b44..d320e48ecb 100644 --- a/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py +++ b/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py @@ -4,61 +4,24 @@ def test_GenWarpFields_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bias_field_correction=dict( - argstr="-n 1", - ), - dimension=dict( - argstr="-d %d", - position=1, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - force_proceed=dict( - argstr="-f 1", - ), + args=dict(argstr="%s"), + bias_field_correction=dict(argstr="-n 1"), + dimension=dict(argstr="-d %d", position=1, usedefault=True), + environ=dict(nohash=True, usedefault=True), + force_proceed=dict(argstr="-f 1"), input_image=dict( - argstr="-i %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - inverse_warp_template_labels=dict( - argstr="-l", - ), - max_iterations=dict( - argstr="-m %s", - sep="x", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_prefix=dict( - argstr="-o %s", - usedefault=True, - ), - quality_check=dict( - argstr="-q 1", + argstr="-i %s", copyfile=False, extensions=None, mandatory=True ), + inverse_warp_template_labels=dict(argstr="-l"), + max_iterations=dict(argstr="-m %s", sep="x"), + num_threads=dict(nohash=True, usedefault=True), + out_prefix=dict(argstr="-o %s", usedefault=True), + quality_check=dict(argstr="-q 1"), reference_image=dict( - argstr="-r %s", - copyfile=True, - extensions=None, - mandatory=True, - ), - similarity_metric=dict( - argstr="-s %s", - ), - transformation_model=dict( - argstr="-t %s", - usedefault=True, + argstr="-r %s", copyfile=True, extensions=None, mandatory=True ), + similarity_metric=dict(argstr="-s %s"), + transformation_model=dict(argstr="-t %s", usedefault=True), ) inputs = GenWarpFields.input_spec() @@ -69,21 +32,11 @@ def test_GenWarpFields_inputs(): def test_GenWarpFields_outputs(): output_map = dict( - affine_transformation=dict( - extensions=None, - ), - input_file=dict( - extensions=None, - ), - inverse_warp_field=dict( - extensions=None, - ), - output_file=dict( - extensions=None, - ), - warp_field=dict( - extensions=None, - ), + affine_transformation=dict(extensions=None), + input_file=dict(extensions=None), + inverse_warp_field=dict(extensions=None), + output_file=dict(extensions=None), + warp_field=dict(extensions=None), ) outputs = GenWarpFields.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ImageMath.py b/nipype/interfaces/ants/tests/test_auto_ImageMath.py index d71d4c476a..b19e64b1d2 100644 --- a/nipype/interfaces/ants/tests/test_auto_ImageMath.py +++ b/nipype/interfaces/ants/tests/test_auto_ImageMath.py @@ -4,40 +4,14 @@ def test_ImageMath_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - copy_header=dict( - usedefault=True, - ), - dimension=dict( - argstr="%d", - position=1, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - op1=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - op2=dict( - argstr="%s", - position=-1, - ), - operation=dict( - argstr="%s", - mandatory=True, - position=3, - ), + args=dict(argstr="%s"), + copy_header=dict(usedefault=True), + dimension=dict(argstr="%d", position=1, usedefault=True), + environ=dict(nohash=True, usedefault=True), + num_threads=dict(nohash=True, usedefault=True), + op1=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + op2=dict(argstr="%s", position=-1), + operation=dict(argstr="%s", mandatory=True, position=3), output_image=dict( argstr="%s", extensions=None, @@ -55,11 +29,7 @@ def test_ImageMath_inputs(): def test_ImageMath_outputs(): - output_map = dict( - output_image=dict( - extensions=None, - ), - ) + output_map = dict(output_image=dict(extensions=None)) outputs = ImageMath.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_JointFusion.py b/nipype/interfaces/ants/tests/test_auto_JointFusion.py index f234ceea7c..a3e6d61714 100644 --- a/nipype/interfaces/ants/tests/test_auto_JointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_JointFusion.py @@ -4,95 +4,39 @@ def test_JointFusion_inputs(): input_map = dict( - alpha=dict( - argstr="-a %s", - usedefault=True, - ), - args=dict( - argstr="%s", - ), - atlas_image=dict( - argstr="-g %s...", - mandatory=True, - ), - atlas_segmentation_image=dict( - argstr="-l %s...", - mandatory=True, - ), - beta=dict( - argstr="-b %s", - usedefault=True, - ), - constrain_nonnegative=dict( - argstr="-c", - usedefault=True, - ), - dimension=dict( - argstr="-d %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + alpha=dict(argstr="-a %s", usedefault=True), + args=dict(argstr="%s"), + atlas_image=dict(argstr="-g %s...", mandatory=True), + atlas_segmentation_image=dict(argstr="-l %s...", mandatory=True), + beta=dict(argstr="-b %s", usedefault=True), + constrain_nonnegative=dict(argstr="-c", usedefault=True), + dimension=dict(argstr="-d %d"), + environ=dict(nohash=True, usedefault=True), exclusion_image=dict(), - exclusion_image_label=dict( - argstr="-e %s", - requires=["exclusion_image"], - ), - mask_image=dict( - argstr="-x %s", - extensions=None, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + exclusion_image_label=dict(argstr="-e %s", requires=["exclusion_image"]), + mask_image=dict(argstr="-x %s", extensions=None), + num_threads=dict(nohash=True, usedefault=True), out_atlas_voting_weight_name_format=dict( requires=[ "out_label_fusion", "out_intensity_fusion_name_format", "out_label_post_prob_name_format", - ], - ), - out_intensity_fusion_name_format=dict( - argstr="", - ), - out_label_fusion=dict( - argstr="%s", - extensions=None, - hash_files=False, + ] ), + out_intensity_fusion_name_format=dict(argstr=""), + out_label_fusion=dict(argstr="%s", extensions=None, hash_files=False), out_label_post_prob_name_format=dict( - requires=["out_label_fusion", "out_intensity_fusion_name_format"], - ), - patch_metric=dict( - argstr="-m %s", - ), - patch_radius=dict( - argstr="-p %s", - maxlen=3, - minlen=3, - ), - retain_atlas_voting_images=dict( - argstr="-f", - usedefault=True, + requires=["out_label_fusion", "out_intensity_fusion_name_format"] ), + patch_metric=dict(argstr="-m %s"), + patch_radius=dict(argstr="-p %s", maxlen=3, minlen=3), + retain_atlas_voting_images=dict(argstr="-f", usedefault=True), retain_label_posterior_images=dict( - argstr="-r", - requires=["atlas_segmentation_image"], - usedefault=True, - ), - search_radius=dict( - argstr="-s %s", - usedefault=True, - ), - target_image=dict( - argstr="-t %s", - mandatory=True, - ), - verbose=dict( - argstr="-v", + argstr="-r", requires=["atlas_segmentation_image"], usedefault=True ), + search_radius=dict(argstr="-s %s", usedefault=True), + target_image=dict(argstr="-t %s", mandatory=True), + verbose=dict(argstr="-v"), ) inputs = JointFusion.input_spec() @@ -105,9 +49,7 @@ def test_JointFusion_outputs(): output_map = dict( out_atlas_voting_weight=dict(), out_intensity_fusion=dict(), - out_label_fusion=dict( - extensions=None, - ), + out_label_fusion=dict(extensions=None), out_label_post_prob=dict(), ) outputs = JointFusion.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py index 00c1ec53a9..d44c1f9a9c 100644 --- a/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py +++ b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py @@ -4,13 +4,8 @@ def test_KellyKapowski_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - convergence=dict( - argstr='--convergence "%s"', - usedefault=True, - ), + args=dict(argstr="%s"), + convergence=dict(argstr='--convergence "%s"', usedefault=True), cortical_thickness=dict( argstr='--output "%s"', extensions=None, @@ -19,61 +14,35 @@ def test_KellyKapowski_inputs(): name_source=["segmentation_image"], name_template="%s_cortical_thickness", ), - dimension=dict( - argstr="--image-dimensionality %d", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gradient_step=dict( - argstr="--gradient-step %f", - usedefault=True, - ), - gray_matter_label=dict( - usedefault=True, - ), + dimension=dict(argstr="--image-dimensionality %d", usedefault=True), + environ=dict(nohash=True, usedefault=True), + gradient_step=dict(argstr="--gradient-step %f", usedefault=True), + gray_matter_label=dict(usedefault=True), gray_matter_prob_image=dict( - argstr='--gray-matter-probability-image "%s"', - extensions=None, + argstr='--gray-matter-probability-image "%s"', extensions=None ), max_invert_displacement_field_iters=dict( argstr="--maximum-number-of-invert-displacement-field-iterations %d", usedefault=True, ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + num_threads=dict(nohash=True, usedefault=True), number_integration_points=dict( - argstr="--number-of-integration-points %d", - usedefault=True, + argstr="--number-of-integration-points %d", usedefault=True ), segmentation_image=dict( - argstr='--segmentation-image "%s"', - extensions=None, - mandatory=True, - ), - smoothing_variance=dict( - argstr="--smoothing-variance %f", - usedefault=True, + argstr='--segmentation-image "%s"', extensions=None, mandatory=True ), + smoothing_variance=dict(argstr="--smoothing-variance %f", usedefault=True), smoothing_velocity_field=dict( - argstr="--smoothing-velocity-field-parameter %f", - usedefault=True, + argstr="--smoothing-velocity-field-parameter %f", usedefault=True ), thickness_prior_estimate=dict( - argstr="--thickness-prior-estimate %f", - usedefault=True, + argstr="--thickness-prior-estimate %f", usedefault=True ), thickness_prior_image=dict( - argstr='--thickness-prior-image "%s"', - extensions=None, - ), - use_bspline_smoothing=dict( - argstr="--use-bspline-smoothing 1", + argstr='--thickness-prior-image "%s"', extensions=None ), + use_bspline_smoothing=dict(argstr="--use-bspline-smoothing 1"), warped_white_matter=dict( extensions=None, hash_files=False, @@ -81,12 +50,9 @@ def test_KellyKapowski_inputs(): name_source=["segmentation_image"], name_template="%s_warped_white_matter", ), - white_matter_label=dict( - usedefault=True, - ), + white_matter_label=dict(usedefault=True), white_matter_prob_image=dict( - argstr='--white-matter-probability-image "%s"', - extensions=None, + argstr='--white-matter-probability-image "%s"', extensions=None ), ) inputs = KellyKapowski.input_spec() @@ -98,12 +64,8 @@ def test_KellyKapowski_inputs(): def test_KellyKapowski_outputs(): output_map = dict( - cortical_thickness=dict( - extensions=None, - ), - warped_white_matter=dict( - extensions=None, - ), + cortical_thickness=dict(extensions=None), + warped_white_matter=dict(extensions=None), ) outputs = KellyKapowski.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py b/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py index 306c7aa17e..5159775684 100644 --- a/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py +++ b/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py @@ -4,40 +4,16 @@ def test_LabelGeometry_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="%d", - position=0, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="%d", position=0, usedefault=True), + environ=dict(nohash=True, usedefault=True), intensity_image=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - usedefault=True, - ), - label_image=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", extensions=None, mandatory=True, position=2, usedefault=True ), + label_image=dict(argstr="%s", extensions=None, mandatory=True, position=1), + num_threads=dict(nohash=True, usedefault=True), output_file=dict( - argstr="%s", - name_source=["label_image"], - name_template="%s.csv", - position=3, + argstr="%s", name_source=["label_image"], name_template="%s.csv", position=3 ), ) inputs = LabelGeometry.input_spec() @@ -48,11 +24,7 @@ def test_LabelGeometry_inputs(): def test_LabelGeometry_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = LabelGeometry.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py index f6a8ffde25..9dd1c79fc5 100644 --- a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py @@ -4,36 +4,16 @@ def test_LaplacianThickness_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dT=dict( - argstr="%s", - position=6, - requires=["prior_thickness"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + dT=dict(argstr="%s", position=6, requires=["prior_thickness"]), + environ=dict(nohash=True, usedefault=True), input_gm=dict( - argstr="%s", - copyfile=True, - extensions=None, - mandatory=True, - position=2, + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=2 ), input_wm=dict( - argstr="%s", - copyfile=True, - extensions=None, - mandatory=True, - position=1, - ), - num_threads=dict( - nohash=True, - usedefault=True, + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=1 ), + num_threads=dict(nohash=True, usedefault=True), output_image=dict( argstr="%s", hash_files=False, @@ -42,25 +22,10 @@ def test_LaplacianThickness_inputs(): name_template="%s_thickness", position=3, ), - prior_thickness=dict( - argstr="%s", - position=5, - requires=["smooth_param"], - ), - smooth_param=dict( - argstr="%s", - position=4, - ), - sulcus_prior=dict( - argstr="%s", - position=7, - requires=["dT"], - ), - tolerance=dict( - argstr="%s", - position=8, - requires=["sulcus_prior"], - ), + prior_thickness=dict(argstr="%s", position=5, requires=["smooth_param"]), + smooth_param=dict(argstr="%s", position=4), + sulcus_prior=dict(argstr="%s", position=7, requires=["dT"]), + tolerance=dict(argstr="%s", position=8, requires=["sulcus_prior"]), ) inputs = LaplacianThickness.input_spec() @@ -70,11 +35,7 @@ def test_LaplacianThickness_inputs(): def test_LaplacianThickness_outputs(): - output_map = dict( - output_image=dict( - extensions=None, - ), - ) + output_map = dict(output_image=dict(extensions=None)) outputs = LaplacianThickness.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py index 05279d8017..cf76c01234 100644 --- a/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py +++ b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py @@ -4,57 +4,19 @@ def test_MeasureImageSimilarity_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="--dimensionality %d", - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_image=dict( - extensions=None, - mandatory=True, - ), - fixed_image_mask=dict( - argstr="%s", - extensions=None, - ), - metric=dict( - argstr="%s", - mandatory=True, - ), - metric_weight=dict( - requires=["metric"], - usedefault=True, - ), - moving_image=dict( - extensions=None, - mandatory=True, - ), - moving_image_mask=dict( - extensions=None, - requires=["fixed_image_mask"], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - radius_or_number_of_bins=dict( - mandatory=True, - requires=["metric"], - ), - sampling_percentage=dict( - mandatory=True, - requires=["metric"], - ), - sampling_strategy=dict( - requires=["metric"], - usedefault=True, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="--dimensionality %d", position=1), + environ=dict(nohash=True, usedefault=True), + fixed_image=dict(extensions=None, mandatory=True), + fixed_image_mask=dict(argstr="%s", extensions=None), + metric=dict(argstr="%s", mandatory=True), + metric_weight=dict(requires=["metric"], usedefault=True), + moving_image=dict(extensions=None, mandatory=True), + moving_image_mask=dict(extensions=None, requires=["fixed_image_mask"]), + num_threads=dict(nohash=True, usedefault=True), + radius_or_number_of_bins=dict(mandatory=True, requires=["metric"]), + sampling_percentage=dict(mandatory=True, requires=["metric"]), + sampling_strategy=dict(requires=["metric"], usedefault=True), ) inputs = MeasureImageSimilarity.input_spec() @@ -64,9 +26,7 @@ def test_MeasureImageSimilarity_inputs(): def test_MeasureImageSimilarity_outputs(): - output_map = dict( - similarity=dict(), - ) + output_map = dict(similarity=dict()) outputs = MeasureImageSimilarity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py index 89f8fa60ae..ad74271e6f 100644 --- a/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py +++ b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py @@ -4,39 +4,15 @@ def test_MultiplyImages_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="%d", - mandatory=True, - position=0, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - first_input=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="%d", mandatory=True, position=0), + environ=dict(nohash=True, usedefault=True), + first_input=dict(argstr="%s", extensions=None, mandatory=True, position=1), + num_threads=dict(nohash=True, usedefault=True), output_product_image=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=3, - ), - second_input=dict( - argstr="%s", - mandatory=True, - position=2, + argstr="%s", extensions=None, mandatory=True, position=3 ), + second_input=dict(argstr="%s", mandatory=True, position=2), ) inputs = MultiplyImages.input_spec() @@ -46,11 +22,7 @@ def test_MultiplyImages_inputs(): def test_MultiplyImages_outputs(): - output_map = dict( - output_product_image=dict( - extensions=None, - ), - ) + output_map = dict(output_product_image=dict(extensions=None)) outputs = MultiplyImages.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py index 59775df2ea..912a71c137 100644 --- a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py +++ b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py @@ -4,53 +4,19 @@ def test_N4BiasFieldCorrection_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bias_image=dict( - extensions=None, - hash_files=False, - ), - bspline_fitting_distance=dict( - argstr="--bspline-fitting %s", - ), - bspline_order=dict( - requires=["bspline_fitting_distance"], - ), - convergence_threshold=dict( - requires=["n_iterations"], - ), - copy_header=dict( - mandatory=True, - usedefault=True, - ), - dimension=dict( - argstr="-d %d", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - histogram_sharpening=dict( - argstr="--histogram-sharpening [%g,%g,%d]", - ), - input_image=dict( - argstr="--input-image %s", - extensions=None, - mandatory=True, - ), - mask_image=dict( - argstr="--mask-image %s", - extensions=None, - ), - n_iterations=dict( - argstr="--convergence %s", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + bias_image=dict(extensions=None, hash_files=False), + bspline_fitting_distance=dict(argstr="--bspline-fitting %s"), + bspline_order=dict(requires=["bspline_fitting_distance"]), + convergence_threshold=dict(requires=["n_iterations"]), + copy_header=dict(mandatory=True, usedefault=True), + dimension=dict(argstr="-d %d", usedefault=True), + environ=dict(nohash=True, usedefault=True), + histogram_sharpening=dict(argstr="--histogram-sharpening [%g,%g,%d]"), + input_image=dict(argstr="--input-image %s", extensions=None, mandatory=True), + mask_image=dict(argstr="--mask-image %s", extensions=None), + n_iterations=dict(argstr="--convergence %s"), + num_threads=dict(nohash=True, usedefault=True), output_image=dict( argstr="--output %s", hash_files=False, @@ -58,23 +24,10 @@ def test_N4BiasFieldCorrection_inputs(): name_source=["input_image"], name_template="%s_corrected", ), - rescale_intensities=dict( - argstr="-r", - min_ver="2.1.0", - usedefault=True, - ), - save_bias=dict( - mandatory=True, - usedefault=True, - xor=["bias_image"], - ), - shrink_factor=dict( - argstr="--shrink-factor %d", - ), - weight_image=dict( - argstr="--weight-image %s", - extensions=None, - ), + rescale_intensities=dict(argstr="-r", min_ver="2.1.0", usedefault=True), + save_bias=dict(mandatory=True, usedefault=True, xor=["bias_image"]), + shrink_factor=dict(argstr="--shrink-factor %d"), + weight_image=dict(argstr="--weight-image %s", extensions=None), ) inputs = N4BiasFieldCorrection.input_spec() @@ -85,12 +38,7 @@ def test_N4BiasFieldCorrection_inputs(): def test_N4BiasFieldCorrection_outputs(): output_map = dict( - bias_image=dict( - extensions=None, - ), - output_image=dict( - extensions=None, - ), + bias_image=dict(extensions=None), output_image=dict(extensions=None) ) outputs = N4BiasFieldCorrection.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_Registration.py b/nipype/interfaces/ants/tests/test_auto_Registration.py index 507e0effe2..a2d875c745 100644 --- a/nipype/interfaces/ants/tests/test_auto_Registration.py +++ b/nipype/interfaces/ants/tests/test_auto_Registration.py @@ -4,170 +4,81 @@ def test_Registration_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), collapse_output_transforms=dict( - argstr="--collapse-output-transforms %d", - usedefault=True, - ), - convergence_threshold=dict( - requires=["number_of_iterations"], - usedefault=True, + argstr="--collapse-output-transforms %d", usedefault=True ), + convergence_threshold=dict(requires=["number_of_iterations"], usedefault=True), convergence_window_size=dict( - requires=["convergence_threshold"], - usedefault=True, - ), - dimension=dict( - argstr="--dimensionality %d", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_image=dict( - mandatory=True, + requires=["convergence_threshold"], usedefault=True ), + dimension=dict(argstr="--dimensionality %d", usedefault=True), + environ=dict(nohash=True, usedefault=True), + fixed_image=dict(mandatory=True), fixed_image_mask=dict( - argstr="%s", - extensions=None, - max_ver="2.1.0", - xor=["fixed_image_masks"], - ), - fixed_image_masks=dict( - min_ver="2.2.0", - xor=["fixed_image_mask"], - ), - float=dict( - argstr="--float %d", + argstr="%s", extensions=None, max_ver="2.1.0", xor=["fixed_image_masks"] ), + fixed_image_masks=dict(min_ver="2.2.0", xor=["fixed_image_mask"]), + float=dict(argstr="--float %d"), initial_moving_transform=dict( - argstr="%s", - xor=["initial_moving_transform_com"], + argstr="%s", xor=["initial_moving_transform_com"] ), initial_moving_transform_com=dict( - argstr="%s", - xor=["initial_moving_transform"], + argstr="%s", xor=["initial_moving_transform"] ), initialize_transforms_per_stage=dict( - argstr="--initialize-transforms-per-stage %d", - usedefault=True, - ), - interpolation=dict( - argstr="%s", - usedefault=True, + argstr="--initialize-transforms-per-stage %d", usedefault=True ), + interpolation=dict(argstr="%s", usedefault=True), interpolation_parameters=dict(), invert_initial_moving_transform=dict( - requires=["initial_moving_transform"], - xor=["initial_moving_transform_com"], - ), - metric=dict( - mandatory=True, + requires=["initial_moving_transform"], xor=["initial_moving_transform_com"] ), + metric=dict(mandatory=True), metric_item_trait=dict(), metric_stage_trait=dict(), - metric_weight=dict( - mandatory=True, - requires=["metric"], - usedefault=True, - ), - metric_weight_item_trait=dict( - usedefault=True, - ), + metric_weight=dict(mandatory=True, requires=["metric"], usedefault=True), + metric_weight_item_trait=dict(usedefault=True), metric_weight_stage_trait=dict(), - moving_image=dict( - mandatory=True, - ), + moving_image=dict(mandatory=True), moving_image_mask=dict( extensions=None, max_ver="2.1.0", requires=["fixed_image_mask"], xor=["moving_image_masks"], ), - moving_image_masks=dict( - min_ver="2.2.0", - xor=["moving_image_mask"], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), + moving_image_masks=dict(min_ver="2.2.0", xor=["moving_image_mask"]), + num_threads=dict(nohash=True, usedefault=True), number_of_iterations=dict(), output_inverse_warped_image=dict( - hash_files=False, - requires=["output_warped_image"], - ), - output_transform_prefix=dict( - argstr="%s", - usedefault=True, - ), - output_warped_image=dict( - hash_files=False, - ), - radius_bins_item_trait=dict( - usedefault=True, + hash_files=False, requires=["output_warped_image"] ), + output_transform_prefix=dict(argstr="%s", usedefault=True), + output_warped_image=dict(hash_files=False), + radius_bins_item_trait=dict(usedefault=True), radius_bins_stage_trait=dict(), - radius_or_number_of_bins=dict( - requires=["metric_weight"], - usedefault=True, - ), - restore_state=dict( - argstr="--restore-state %s", - extensions=None, - ), + radius_or_number_of_bins=dict(requires=["metric_weight"], usedefault=True), + restore_state=dict(argstr="--restore-state %s", extensions=None), restrict_deformation=dict(), - sampling_percentage=dict( - requires=["sampling_strategy"], - ), + sampling_percentage=dict(requires=["sampling_strategy"]), sampling_percentage_item_trait=dict(), sampling_percentage_stage_trait=dict(), - sampling_strategy=dict( - requires=["metric_weight"], - ), + sampling_strategy=dict(requires=["metric_weight"]), sampling_strategy_item_trait=dict(), sampling_strategy_stage_trait=dict(), - save_state=dict( - argstr="--save-state %s", - extensions=None, - ), - shrink_factors=dict( - mandatory=True, - ), - sigma_units=dict( - requires=["smoothing_sigmas"], - ), - smoothing_sigmas=dict( - mandatory=True, - ), + save_state=dict(argstr="--save-state %s", extensions=None), + shrink_factors=dict(mandatory=True), + sigma_units=dict(requires=["smoothing_sigmas"]), + smoothing_sigmas=dict(mandatory=True), transform_parameters=dict(), - transforms=dict( - argstr="%s", - mandatory=True, - ), + transforms=dict(argstr="%s", mandatory=True), use_estimate_learning_rate_once=dict(), - use_histogram_matching=dict( - usedefault=True, - ), - verbose=dict( - argstr="-v", - nohash=True, - usedefault=True, - ), - winsorize_lower_quantile=dict( - argstr="%s", - usedefault=True, - ), - winsorize_upper_quantile=dict( - argstr="%s", - usedefault=True, - ), + use_histogram_matching=dict(usedefault=True), + verbose=dict(argstr="-v", nohash=True, usedefault=True), + winsorize_lower_quantile=dict(argstr="%s", usedefault=True), + winsorize_upper_quantile=dict(argstr="%s", usedefault=True), write_composite_transform=dict( - argstr="--write-composite-transform %d", - usedefault=True, + argstr="--write-composite-transform %d", usedefault=True ), ) inputs = Registration.input_spec() @@ -179,29 +90,19 @@ def test_Registration_inputs(): def test_Registration_outputs(): output_map = dict( - composite_transform=dict( - extensions=None, - ), + composite_transform=dict(extensions=None), elapsed_time=dict(), forward_invert_flags=dict(), forward_transforms=dict(), - inverse_composite_transform=dict( - extensions=None, - ), - inverse_warped_image=dict( - extensions=None, - ), + inverse_composite_transform=dict(extensions=None), + inverse_warped_image=dict(extensions=None), metric_value=dict(), reverse_forward_invert_flags=dict(), reverse_forward_transforms=dict(), reverse_invert_flags=dict(), reverse_transforms=dict(), - save_state=dict( - extensions=None, - ), - warped_image=dict( - extensions=None, - ), + save_state=dict(extensions=None), + warped_image=dict(extensions=None), ) outputs = Registration.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py b/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py index c09f08d17a..8c73bc62e6 100644 --- a/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py +++ b/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py @@ -4,52 +4,18 @@ def test_RegistrationSynQuick_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="-d %d", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_image=dict( - argstr="-f %s...", - mandatory=True, - ), - histogram_bins=dict( - argstr="-r %d", - usedefault=True, - ), - moving_image=dict( - argstr="-m %s...", - mandatory=True, - ), - num_threads=dict( - argstr="-n %d", - usedefault=True, - ), - output_prefix=dict( - argstr="-o %s", - usedefault=True, - ), - precision_type=dict( - argstr="-p %s", - usedefault=True, - ), - spline_distance=dict( - argstr="-s %d", - usedefault=True, - ), - transform_type=dict( - argstr="-t %s", - usedefault=True, - ), - use_histogram_matching=dict( - argstr="-j %d", - ), + args=dict(argstr="%s"), + dimension=dict(argstr="-d %d", usedefault=True), + environ=dict(nohash=True, usedefault=True), + fixed_image=dict(argstr="-f %s...", mandatory=True), + histogram_bins=dict(argstr="-r %d", usedefault=True), + moving_image=dict(argstr="-m %s...", mandatory=True), + num_threads=dict(argstr="-n %d", usedefault=True), + output_prefix=dict(argstr="-o %s", usedefault=True), + precision_type=dict(argstr="-p %s", usedefault=True), + spline_distance=dict(argstr="-s %d", usedefault=True), + transform_type=dict(argstr="-t %s", usedefault=True), + use_histogram_matching=dict(argstr="-j %d"), ) inputs = RegistrationSynQuick.input_spec() @@ -60,21 +26,11 @@ def test_RegistrationSynQuick_inputs(): def test_RegistrationSynQuick_outputs(): output_map = dict( - forward_warp_field=dict( - extensions=None, - ), - inverse_warp_field=dict( - extensions=None, - ), - inverse_warped_image=dict( - extensions=None, - ), - out_matrix=dict( - extensions=None, - ), - warped_image=dict( - extensions=None, - ), + forward_warp_field=dict(extensions=None), + inverse_warp_field=dict(extensions=None), + inverse_warped_image=dict(extensions=None), + out_matrix=dict(extensions=None), + warped_image=dict(extensions=None), ) outputs = RegistrationSynQuick.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py b/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py index 9bf5c6c6a6..25249aee91 100644 --- a/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py +++ b/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py @@ -4,47 +4,15 @@ def test_ResampleImageBySpacing_inputs(): input_map = dict( - addvox=dict( - argstr="%d", - position=6, - requires=["apply_smoothing"], - ), - apply_smoothing=dict( - argstr="%d", - position=5, - ), - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="%d", - position=1, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_image=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - nn_interp=dict( - argstr="%d", - position=-1, - requires=["addvox"], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_spacing=dict( - argstr="%s", - mandatory=True, - position=4, - ), + addvox=dict(argstr="%d", position=6, requires=["apply_smoothing"]), + apply_smoothing=dict(argstr="%d", position=5), + args=dict(argstr="%s"), + dimension=dict(argstr="%d", position=1, usedefault=True), + environ=dict(nohash=True, usedefault=True), + input_image=dict(argstr="%s", extensions=None, mandatory=True, position=2), + nn_interp=dict(argstr="%d", position=-1, requires=["addvox"]), + num_threads=dict(nohash=True, usedefault=True), + out_spacing=dict(argstr="%s", mandatory=True, position=4), output_image=dict( argstr="%s", extensions=None, @@ -62,11 +30,7 @@ def test_ResampleImageBySpacing_inputs(): def test_ResampleImageBySpacing_outputs(): - output_map = dict( - output_image=dict( - extensions=None, - ), - ) + output_map = dict(output_image=dict(extensions=None)) outputs = ResampleImageBySpacing.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_ThresholdImage.py b/nipype/interfaces/ants/tests/test_auto_ThresholdImage.py index c17f340b69..85788d4181 100644 --- a/nipype/interfaces/ants/tests/test_auto_ThresholdImage.py +++ b/nipype/interfaces/ants/tests/test_auto_ThresholdImage.py @@ -4,52 +4,21 @@ def test_ThresholdImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - copy_header=dict( - mandatory=True, - usedefault=True, - ), - dimension=dict( - argstr="%d", - position=1, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_image=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - input_mask=dict( - argstr="%s", - extensions=None, - requires=["num_thresholds"], - ), - inside_value=dict( - argstr="%f", - position=6, - requires=["th_low"], - ), + args=dict(argstr="%s"), + copy_header=dict(mandatory=True, usedefault=True), + dimension=dict(argstr="%d", position=1, usedefault=True), + environ=dict(nohash=True, usedefault=True), + input_image=dict(argstr="%s", extensions=None, mandatory=True, position=2), + input_mask=dict(argstr="%s", extensions=None, requires=["num_thresholds"]), + inside_value=dict(argstr="%f", position=6, requires=["th_low"]), mode=dict( argstr="%s", position=4, requires=["num_thresholds"], xor=["th_low", "th_high"], ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - num_thresholds=dict( - argstr="%d", - position=5, - ), + num_threads=dict(nohash=True, usedefault=True), + num_thresholds=dict(argstr="%d", position=5), output_image=dict( argstr="%s", extensions=None, @@ -58,21 +27,9 @@ def test_ThresholdImage_inputs(): name_template="%s_resampled", position=3, ), - outside_value=dict( - argstr="%f", - position=7, - requires=["th_low"], - ), - th_high=dict( - argstr="%f", - position=5, - xor=["mode"], - ), - th_low=dict( - argstr="%f", - position=4, - xor=["mode"], - ), + outside_value=dict(argstr="%f", position=7, requires=["th_low"]), + th_high=dict(argstr="%f", position=5, xor=["mode"]), + th_low=dict(argstr="%f", position=4, xor=["mode"]), ) inputs = ThresholdImage.input_spec() @@ -82,11 +39,7 @@ def test_ThresholdImage_inputs(): def test_ThresholdImage_outputs(): - output_map = dict( - output_image=dict( - extensions=None, - ), - ) + output_map = dict(output_image=dict(extensions=None)) outputs = ThresholdImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py index 66f45d0cd7..c4e59f3779 100644 --- a/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py @@ -4,34 +4,14 @@ def test_WarpImageMultiTransform_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="%d", - position=1, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_image=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="%d", position=1, usedefault=True), + environ=dict(nohash=True, usedefault=True), + input_image=dict(argstr="%s", extensions=None, mandatory=True, position=2), invert_affine=dict(), - num_threads=dict( - nohash=True, - usedefault=True, - ), + num_threads=dict(nohash=True, usedefault=True), out_postfix=dict( - extensions=None, - hash_files=False, - usedefault=True, - xor=["output_image"], + extensions=None, hash_files=False, usedefault=True, xor=["output_image"] ), output_image=dict( argstr="%s", @@ -41,29 +21,12 @@ def test_WarpImageMultiTransform_inputs(): position=3, xor=["out_postfix"], ), - reference_image=dict( - argstr="-R %s", - extensions=None, - xor=["tightest_box"], - ), - reslice_by_header=dict( - argstr="--reslice-by-header", - ), - tightest_box=dict( - argstr="--tightest-bounding-box", - xor=["reference_image"], - ), - transformation_series=dict( - argstr="%s", - mandatory=True, - position=-1, - ), - use_bspline=dict( - argstr="--use-BSpline", - ), - use_nearest=dict( - argstr="--use-NN", - ), + reference_image=dict(argstr="-R %s", extensions=None, xor=["tightest_box"]), + reslice_by_header=dict(argstr="--reslice-by-header"), + tightest_box=dict(argstr="--tightest-bounding-box", xor=["reference_image"]), + transformation_series=dict(argstr="%s", mandatory=True, position=-1), + use_bspline=dict(argstr="--use-BSpline"), + use_nearest=dict(argstr="--use-NN"), ) inputs = WarpImageMultiTransform.input_spec() @@ -73,11 +36,7 @@ def test_WarpImageMultiTransform_inputs(): def test_WarpImageMultiTransform_outputs(): - output_map = dict( - output_image=dict( - extensions=None, - ), - ) + output_map = dict(output_image=dict(extensions=None)) outputs = WarpImageMultiTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py index 63d8d8365e..cd68fe57ae 100644 --- a/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py @@ -4,56 +4,19 @@ def test_WarpTimeSeriesImageMultiTransform_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="%d", - position=1, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_image=dict( - argstr="%s", - copyfile=True, - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="%d", position=1, usedefault=True), + environ=dict(nohash=True, usedefault=True), + input_image=dict(argstr="%s", copyfile=True, extensions=None, mandatory=True), invert_affine=dict(), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_postfix=dict( - argstr="%s", - usedefault=True, - ), - reference_image=dict( - argstr="-R %s", - extensions=None, - xor=["tightest_box"], - ), - reslice_by_header=dict( - argstr="--reslice-by-header", - ), - tightest_box=dict( - argstr="--tightest-bounding-box", - xor=["reference_image"], - ), - transformation_series=dict( - argstr="%s", - copyfile=False, - mandatory=True, - ), - use_bspline=dict( - argstr="--use-Bspline", - ), - use_nearest=dict( - argstr="--use-NN", - ), + num_threads=dict(nohash=True, usedefault=True), + out_postfix=dict(argstr="%s", usedefault=True), + reference_image=dict(argstr="-R %s", extensions=None, xor=["tightest_box"]), + reslice_by_header=dict(argstr="--reslice-by-header"), + tightest_box=dict(argstr="--tightest-bounding-box", xor=["reference_image"]), + transformation_series=dict(argstr="%s", copyfile=False, mandatory=True), + use_bspline=dict(argstr="--use-Bspline"), + use_nearest=dict(argstr="--use-NN"), ) inputs = WarpTimeSeriesImageMultiTransform.input_spec() @@ -63,11 +26,7 @@ def test_WarpTimeSeriesImageMultiTransform_inputs(): def test_WarpTimeSeriesImageMultiTransform_outputs(): - output_map = dict( - output_image=dict( - extensions=None, - ), - ) + output_map = dict(output_image=dict(extensions=None)) outputs = WarpTimeSeriesImageMultiTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py b/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py index 0318db7624..befec00f52 100644 --- a/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py +++ b/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py @@ -4,61 +4,24 @@ def test_antsIntroduction_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bias_field_correction=dict( - argstr="-n 1", - ), - dimension=dict( - argstr="-d %d", - position=1, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - force_proceed=dict( - argstr="-f 1", - ), + args=dict(argstr="%s"), + bias_field_correction=dict(argstr="-n 1"), + dimension=dict(argstr="-d %d", position=1, usedefault=True), + environ=dict(nohash=True, usedefault=True), + force_proceed=dict(argstr="-f 1"), input_image=dict( - argstr="-i %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - inverse_warp_template_labels=dict( - argstr="-l", - ), - max_iterations=dict( - argstr="-m %s", - sep="x", - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_prefix=dict( - argstr="-o %s", - usedefault=True, - ), - quality_check=dict( - argstr="-q 1", + argstr="-i %s", copyfile=False, extensions=None, mandatory=True ), + inverse_warp_template_labels=dict(argstr="-l"), + max_iterations=dict(argstr="-m %s", sep="x"), + num_threads=dict(nohash=True, usedefault=True), + out_prefix=dict(argstr="-o %s", usedefault=True), + quality_check=dict(argstr="-q 1"), reference_image=dict( - argstr="-r %s", - copyfile=True, - extensions=None, - mandatory=True, - ), - similarity_metric=dict( - argstr="-s %s", - ), - transformation_model=dict( - argstr="-t %s", - usedefault=True, + argstr="-r %s", copyfile=True, extensions=None, mandatory=True ), + similarity_metric=dict(argstr="-s %s"), + transformation_model=dict(argstr="-t %s", usedefault=True), ) inputs = antsIntroduction.input_spec() @@ -69,21 +32,11 @@ def test_antsIntroduction_inputs(): def test_antsIntroduction_outputs(): output_map = dict( - affine_transformation=dict( - extensions=None, - ), - input_file=dict( - extensions=None, - ), - inverse_warp_field=dict( - extensions=None, - ), - output_file=dict( - extensions=None, - ), - warp_field=dict( - extensions=None, - ), + affine_transformation=dict(extensions=None), + input_file=dict(extensions=None), + inverse_warp_field=dict(extensions=None), + output_file=dict(extensions=None), + warp_field=dict(extensions=None), ) outputs = antsIntroduction.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py b/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py index 2713c6af54..4732bab229 100644 --- a/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py +++ b/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py @@ -4,63 +4,21 @@ def test_buildtemplateparallel_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bias_field_correction=dict( - argstr="-n 1", - ), - dimension=dict( - argstr="-d %d", - position=1, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gradient_step_size=dict( - argstr="-g %f", - ), - in_files=dict( - argstr="%s", - mandatory=True, - position=-1, - ), - iteration_limit=dict( - argstr="-i %d", - usedefault=True, - ), - max_iterations=dict( - argstr="-m %s", - sep="x", - ), - num_cores=dict( - argstr="-j %d", - requires=["parallelization"], - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - out_prefix=dict( - argstr="-o %s", - usedefault=True, - ), - parallelization=dict( - argstr="-c %d", - usedefault=True, - ), - rigid_body_registration=dict( - argstr="-r 1", - ), - similarity_metric=dict( - argstr="-s %s", - ), - transformation_model=dict( - argstr="-t %s", - usedefault=True, - ), + args=dict(argstr="%s"), + bias_field_correction=dict(argstr="-n 1"), + dimension=dict(argstr="-d %d", position=1, usedefault=True), + environ=dict(nohash=True, usedefault=True), + gradient_step_size=dict(argstr="-g %f"), + in_files=dict(argstr="%s", mandatory=True, position=-1), + iteration_limit=dict(argstr="-i %d", usedefault=True), + max_iterations=dict(argstr="-m %s", sep="x"), + num_cores=dict(argstr="-j %d", requires=["parallelization"]), + num_threads=dict(nohash=True, usedefault=True), + out_prefix=dict(argstr="-o %s", usedefault=True), + parallelization=dict(argstr="-c %d", usedefault=True), + rigid_body_registration=dict(argstr="-r 1"), + similarity_metric=dict(argstr="-s %s"), + transformation_model=dict(argstr="-t %s", usedefault=True), use_first_as_target=dict(), ) inputs = buildtemplateparallel.input_spec() @@ -72,9 +30,7 @@ def test_buildtemplateparallel_inputs(): def test_buildtemplateparallel_outputs(): output_map = dict( - final_template_file=dict( - extensions=None, - ), + final_template_file=dict(extensions=None), subject_outfiles=dict(), template_files=dict(), ) diff --git a/nipype/interfaces/base/tests/test_auto_CommandLine.py b/nipype/interfaces/base/tests/test_auto_CommandLine.py index b03e4adfca..9a114f0d3c 100644 --- a/nipype/interfaces/base/tests/test_auto_CommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_CommandLine.py @@ -3,15 +3,7 @@ def test_CommandLine_inputs(): - input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ) + input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) inputs = CommandLine.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py b/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py index 908943c754..c8ec0bf3be 100644 --- a/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py @@ -4,17 +4,10 @@ def test_MpiCommandLine_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), n_procs=dict(), - use_mpi=dict( - usedefault=True, - ), + use_mpi=dict(usedefault=True), ) inputs = MpiCommandLine.input_spec() diff --git a/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py b/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py index 1197b2479c..51161ea121 100644 --- a/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py @@ -3,15 +3,7 @@ def test_SEMLikeCommandLine_inputs(): - input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ) + input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) inputs = SEMLikeCommandLine.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py index 39b80d487b..9a632f9b42 100644 --- a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py @@ -4,19 +4,9 @@ def test_StdOutCommandLine_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), ) inputs = StdOutCommandLine.input_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_BDP.py b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py index 0da29c372d..495ff8fb0b 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_BDP.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py @@ -10,9 +10,7 @@ def test_BDP_inputs(): position=-1, xor=["bMatrixFile"], ), - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), bMatrixFile=dict( argstr="--bmat %s", extensions=None, @@ -20,9 +18,7 @@ def test_BDP_inputs(): position=-1, xor=["BVecBValPair"], ), - bValRatioThreshold=dict( - argstr="--bval-ratio-threshold %f", - ), + bValRatioThreshold=dict(argstr="--bval-ratio-threshold %f"), bfcFile=dict( argstr="%s", extensions=None, @@ -31,152 +27,66 @@ def test_BDP_inputs(): xor=["noStructuralRegistration"], ), customDiffusionLabel=dict( - argstr="--custom-diffusion-label %s", - extensions=None, - ), - customLabelXML=dict( - argstr="--custom-label-xml %s", - extensions=None, - ), - customT1Label=dict( - argstr="--custom-t1-label %s", - extensions=None, - ), - dataSinkDelay=dict( - argstr="%s", - ), - dcorrRegMeasure=dict( - argstr="--dcorr-reg-method %s", - ), - dcorrWeight=dict( - argstr="--dcorr-regularization-wt %f", - ), - dwiMask=dict( - argstr="--dwi-mask %s", - extensions=None, - ), - echoSpacing=dict( - argstr="--echo-spacing=%f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - estimateODF_3DShore=dict( - argstr="--3dshore --diffusion_time_ms %f", - ), - estimateODF_FRACT=dict( - argstr="--FRACT", - ), - estimateODF_FRT=dict( - argstr="--FRT", - ), - estimateTensors=dict( - argstr="--tensors", - ), + argstr="--custom-diffusion-label %s", extensions=None + ), + customLabelXML=dict(argstr="--custom-label-xml %s", extensions=None), + customT1Label=dict(argstr="--custom-t1-label %s", extensions=None), + dataSinkDelay=dict(argstr="%s"), + dcorrRegMeasure=dict(argstr="--dcorr-reg-method %s"), + dcorrWeight=dict(argstr="--dcorr-regularization-wt %f"), + dwiMask=dict(argstr="--dwi-mask %s", extensions=None), + echoSpacing=dict(argstr="--echo-spacing=%f"), + environ=dict(nohash=True, usedefault=True), + estimateODF_3DShore=dict(argstr="--3dshore --diffusion_time_ms %f"), + estimateODF_FRACT=dict(argstr="--FRACT"), + estimateODF_FRT=dict(argstr="--FRT"), + estimateTensors=dict(argstr="--tensors"), fieldmapCorrection=dict( - argstr="--fieldmap-correction %s", - extensions=None, - requires=["echoSpacing"], + argstr="--fieldmap-correction %s", extensions=None, requires=["echoSpacing"] ), fieldmapCorrectionMethod=dict( - argstr="--fieldmap-correction-method %s", - xor=["skipIntensityCorr"], - ), - fieldmapSmooth=dict( - argstr="--fieldmap-smooth3=%f", - ), - flagConfigFile=dict( - argstr="--flag-conf-file %s", - extensions=None, - ), - forcePartialROIStats=dict( - argstr="--force-partial-roi-stats", - ), - generateStats=dict( - argstr="--generate-stats", - ), - ignoreFieldmapFOV=dict( - argstr="--ignore-fieldmap-fov", - ), - ignoreMemory=dict( - argstr="--ignore-memory", - ), + argstr="--fieldmap-correction-method %s", xor=["skipIntensityCorr"] + ), + fieldmapSmooth=dict(argstr="--fieldmap-smooth3=%f"), + flagConfigFile=dict(argstr="--flag-conf-file %s", extensions=None), + forcePartialROIStats=dict(argstr="--force-partial-roi-stats"), + generateStats=dict(argstr="--generate-stats"), + ignoreFieldmapFOV=dict(argstr="--ignore-fieldmap-fov"), + ignoreMemory=dict(argstr="--ignore-memory"), inputDiffusionData=dict( - argstr="--nii %s", - extensions=None, - mandatory=True, - position=-2, - ), - lowMemory=dict( - argstr="--low-memory", + argstr="--nii %s", extensions=None, mandatory=True, position=-2 ), + lowMemory=dict(argstr="--low-memory"), noStructuralRegistration=dict( argstr="--no-structural-registration", mandatory=True, position=0, xor=["bfcFile"], ), - odfLambta=dict( - argstr="--odf-lambda ", - ), - onlyStats=dict( - argstr="--generate-only-stats", - ), - outPrefix=dict( - argstr="--output-fileprefix %s", - ), - outputDiffusionCoordinates=dict( - argstr="--output-diffusion-coordinate", - ), - outputSubdir=dict( - argstr="--output-subdir %s", - ), - phaseEncodingDirection=dict( - argstr="--dir=%s", - ), - rigidRegMeasure=dict( - argstr="--rigid-reg-measure %s", - ), - skipDistortionCorr=dict( - argstr="--no-distortion-correction", - ), + odfLambta=dict(argstr="--odf-lambda "), + onlyStats=dict(argstr="--generate-only-stats"), + outPrefix=dict(argstr="--output-fileprefix %s"), + outputDiffusionCoordinates=dict(argstr="--output-diffusion-coordinate"), + outputSubdir=dict(argstr="--output-subdir %s"), + phaseEncodingDirection=dict(argstr="--dir=%s"), + rigidRegMeasure=dict(argstr="--rigid-reg-measure %s"), + skipDistortionCorr=dict(argstr="--no-distortion-correction"), skipIntensityCorr=dict( - argstr="--no-intensity-correction", - xor=["fieldmapCorrectionMethod"], - ), - skipNonuniformityCorr=dict( - argstr="--no-nonuniformity-correction", - ), - t1Mask=dict( - argstr="--t1-mask %s", - extensions=None, - ), - threads=dict( - argstr="--threads=%d", - ), - transformDataOnly=dict( - argstr="--transform-data-only", + argstr="--no-intensity-correction", xor=["fieldmapCorrectionMethod"] ), + skipNonuniformityCorr=dict(argstr="--no-nonuniformity-correction"), + t1Mask=dict(argstr="--t1-mask %s", extensions=None), + threads=dict(argstr="--threads=%d"), + transformDataOnly=dict(argstr="--transform-data-only"), transformDiffusionSurface=dict( - argstr="--transform-diffusion-surface %s", - extensions=None, + argstr="--transform-diffusion-surface %s", extensions=None ), transformDiffusionVolume=dict( - argstr="--transform-diffusion-volume %s", - extensions=None, - ), - transformInterpolation=dict( - argstr="--transform-interpolation %s", - ), - transformT1Surface=dict( - argstr="--transform-t1-surface %s", - extensions=None, - ), - transformT1Volume=dict( - argstr="--transform-t1-volume %s", - extensions=None, + argstr="--transform-diffusion-volume %s", extensions=None ), + transformInterpolation=dict(argstr="--transform-interpolation %s"), + transformT1Surface=dict(argstr="--transform-t1-surface %s", extensions=None), + transformT1Volume=dict(argstr="--transform-t1-volume %s", extensions=None), ) inputs = BDP.input_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py index dbb1f3d839..d108db5840 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py @@ -4,96 +4,35 @@ def test_Bfc_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - biasEstimateConvergenceThreshold=dict( - argstr="--beps %f", - ), - biasEstimateSpacing=dict( - argstr="-s %d", - ), - biasFieldEstimatesOutputPrefix=dict( - argstr="--biasprefix %s", - ), - biasRange=dict( - argstr="%s", - ), - controlPointSpacing=dict( - argstr="-c %d", - ), - convergenceThreshold=dict( - argstr="--eps %f", - ), - correctWholeVolume=dict( - argstr="--extrapolate", - ), - correctedImagesOutputPrefix=dict( - argstr="--prefix %s", - ), - correctionScheduleFile=dict( - argstr="--schedule %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - histogramRadius=dict( - argstr="-r %d", - ), - histogramType=dict( - argstr="%s", - ), - inputMRIFile=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - inputMaskFile=dict( - argstr="-m %s", - extensions=None, - hash_files=False, - ), - intermediate_file_type=dict( - argstr="%s", - ), - iterativeMode=dict( - argstr="--iterate", - ), - maxBias=dict( - argstr="-U %f", - usedefault=True, - ), - minBias=dict( - argstr="-L %f", - usedefault=True, - ), - outputBiasField=dict( - argstr="--bias %s", - extensions=None, - hash_files=False, - ), + args=dict(argstr="%s"), + biasEstimateConvergenceThreshold=dict(argstr="--beps %f"), + biasEstimateSpacing=dict(argstr="-s %d"), + biasFieldEstimatesOutputPrefix=dict(argstr="--biasprefix %s"), + biasRange=dict(argstr="%s"), + controlPointSpacing=dict(argstr="-c %d"), + convergenceThreshold=dict(argstr="--eps %f"), + correctWholeVolume=dict(argstr="--extrapolate"), + correctedImagesOutputPrefix=dict(argstr="--prefix %s"), + correctionScheduleFile=dict(argstr="--schedule %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + histogramRadius=dict(argstr="-r %d"), + histogramType=dict(argstr="%s"), + inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True), + inputMaskFile=dict(argstr="-m %s", extensions=None, hash_files=False), + intermediate_file_type=dict(argstr="%s"), + iterativeMode=dict(argstr="--iterate"), + maxBias=dict(argstr="-U %f", usedefault=True), + minBias=dict(argstr="-L %f", usedefault=True), + outputBiasField=dict(argstr="--bias %s", extensions=None, hash_files=False), outputMRIVolume=dict( - argstr="-o %s", - extensions=None, - genfile=True, - hash_files=False, + argstr="-o %s", extensions=None, genfile=True, hash_files=False ), outputMaskedBiasField=dict( - argstr="--maskedbias %s", - extensions=None, - hash_files=False, - ), - splineLambda=dict( - argstr="-w %f", - ), - timer=dict( - argstr="--timer", - ), - verbosityLevel=dict( - argstr="-v %d", + argstr="--maskedbias %s", extensions=None, hash_files=False ), + splineLambda=dict(argstr="-w %f"), + timer=dict(argstr="--timer"), + verbosityLevel=dict(argstr="-v %d"), ) inputs = Bfc.input_spec() @@ -104,18 +43,10 @@ def test_Bfc_inputs(): def test_Bfc_outputs(): output_map = dict( - correctionScheduleFile=dict( - extensions=None, - ), - outputBiasField=dict( - extensions=None, - ), - outputMRIVolume=dict( - extensions=None, - ), - outputMaskedBiasField=dict( - extensions=None, - ), + correctionScheduleFile=dict(extensions=None), + outputBiasField=dict(extensions=None), + outputMRIVolume=dict(extensions=None), + outputMaskedBiasField=dict(extensions=None), ) outputs = Bfc.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bse.py b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py index 748defcc00..09641067ed 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Bse.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py @@ -4,84 +4,32 @@ def test_Bse_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - diffusionConstant=dict( - argstr="-d %f", - usedefault=True, - ), - diffusionIterations=dict( - argstr="-n %d", - usedefault=True, - ), - dilateFinalMask=dict( - argstr="-p", - usedefault=True, - ), - edgeDetectionConstant=dict( - argstr="-s %f", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMRIFile=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - noRotate=dict( - argstr="--norotate", - ), - outputCortexFile=dict( - argstr="--cortex %s", - extensions=None, - hash_files=False, - ), + args=dict(argstr="%s"), + diffusionConstant=dict(argstr="-d %f", usedefault=True), + diffusionIterations=dict(argstr="-n %d", usedefault=True), + dilateFinalMask=dict(argstr="-p", usedefault=True), + edgeDetectionConstant=dict(argstr="-s %f", usedefault=True), + environ=dict(nohash=True, usedefault=True), + inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True), + noRotate=dict(argstr="--norotate"), + outputCortexFile=dict(argstr="--cortex %s", extensions=None, hash_files=False), outputDetailedBrainMask=dict( - argstr="--hires %s", - extensions=None, - hash_files=False, + argstr="--hires %s", extensions=None, hash_files=False ), outputDiffusionFilter=dict( - argstr="--adf %s", - extensions=None, - hash_files=False, - ), - outputEdgeMap=dict( - argstr="--edge %s", - extensions=None, - hash_files=False, + argstr="--adf %s", extensions=None, hash_files=False ), + outputEdgeMap=dict(argstr="--edge %s", extensions=None, hash_files=False), outputMRIVolume=dict( - argstr="-o %s", - extensions=None, - genfile=True, - hash_files=False, + argstr="-o %s", extensions=None, genfile=True, hash_files=False ), outputMaskFile=dict( - argstr="--mask %s", - extensions=None, - genfile=True, - hash_files=False, - ), - radius=dict( - argstr="-r %f", - usedefault=True, - ), - timer=dict( - argstr="--timer", - ), - trim=dict( - argstr="--trim", - usedefault=True, - ), - verbosityLevel=dict( - argstr="-v %f", - usedefault=True, + argstr="--mask %s", extensions=None, genfile=True, hash_files=False ), + radius=dict(argstr="-r %f", usedefault=True), + timer=dict(argstr="--timer"), + trim=dict(argstr="--trim", usedefault=True), + verbosityLevel=dict(argstr="-v %f", usedefault=True), ) inputs = Bse.input_spec() @@ -92,24 +40,12 @@ def test_Bse_inputs(): def test_Bse_outputs(): output_map = dict( - outputCortexFile=dict( - extensions=None, - ), - outputDetailedBrainMask=dict( - extensions=None, - ), - outputDiffusionFilter=dict( - extensions=None, - ), - outputEdgeMap=dict( - extensions=None, - ), - outputMRIVolume=dict( - extensions=None, - ), - outputMaskFile=dict( - extensions=None, - ), + outputCortexFile=dict(extensions=None), + outputDetailedBrainMask=dict(extensions=None), + outputDiffusionFilter=dict(extensions=None), + outputEdgeMap=dict(extensions=None), + outputMRIVolume=dict(extensions=None), + outputMaskFile=dict(extensions=None), ) outputs = Bse.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py index a924d7ce0f..d3932ce340 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py @@ -4,80 +4,29 @@ def test_Cerebro_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - costFunction=dict( - argstr="-c %d", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + costFunction=dict(argstr="-c %d", usedefault=True), + environ=dict(nohash=True, usedefault=True), inputAtlasLabelFile=dict( - argstr="--atlaslabels %s", - extensions=None, - mandatory=True, - ), - inputAtlasMRIFile=dict( - argstr="--atlas %s", - extensions=None, - mandatory=True, - ), - inputBrainMaskFile=dict( - argstr="-m %s", - extensions=None, - ), - inputMRIFile=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - keepTempFiles=dict( - argstr="--keep", - ), - linearConvergence=dict( - argstr="--linconv %f", + argstr="--atlaslabels %s", extensions=None, mandatory=True ), + inputAtlasMRIFile=dict(argstr="--atlas %s", extensions=None, mandatory=True), + inputBrainMaskFile=dict(argstr="-m %s", extensions=None), + inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True), + keepTempFiles=dict(argstr="--keep"), + linearConvergence=dict(argstr="--linconv %f"), outputAffineTransformFile=dict( - argstr="--air %s", - extensions=None, - genfile=True, - ), - outputCerebrumMaskFile=dict( - argstr="-o %s", - extensions=None, - genfile=True, - ), - outputLabelVolumeFile=dict( - argstr="-l %s", - extensions=None, - genfile=True, - ), - outputWarpTransformFile=dict( - argstr="--warp %s", - extensions=None, - genfile=True, - ), - tempDirectory=dict( - argstr="--tempdir %s", - ), - tempDirectoryBase=dict( - argstr="--tempdirbase %s", - ), - useCentroids=dict( - argstr="--centroids", - ), - verbosity=dict( - argstr="-v %d", - ), - warpConvergence=dict( - argstr="--warpconv %f", - ), - warpLabel=dict( - argstr="--warplevel %d", - ), + argstr="--air %s", extensions=None, genfile=True + ), + outputCerebrumMaskFile=dict(argstr="-o %s", extensions=None, genfile=True), + outputLabelVolumeFile=dict(argstr="-l %s", extensions=None, genfile=True), + outputWarpTransformFile=dict(argstr="--warp %s", extensions=None, genfile=True), + tempDirectory=dict(argstr="--tempdir %s"), + tempDirectoryBase=dict(argstr="--tempdirbase %s"), + useCentroids=dict(argstr="--centroids"), + verbosity=dict(argstr="-v %d"), + warpConvergence=dict(argstr="--warpconv %f"), + warpLabel=dict(argstr="--warplevel %d"), ) inputs = Cerebro.input_spec() @@ -88,18 +37,10 @@ def test_Cerebro_inputs(): def test_Cerebro_outputs(): output_map = dict( - outputAffineTransformFile=dict( - extensions=None, - ), - outputCerebrumMaskFile=dict( - extensions=None, - ), - outputLabelVolumeFile=dict( - extensions=None, - ), - outputWarpTransformFile=dict( - extensions=None, - ), + outputAffineTransformFile=dict(extensions=None), + outputCerebrumMaskFile=dict(extensions=None), + outputLabelVolumeFile=dict(extensions=None), + outputWarpTransformFile=dict(extensions=None), ) outputs = Cerebro.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py index 30287edf90..0935fddbc1 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py @@ -4,49 +4,17 @@ def test_Cortex_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - computeGCBoundary=dict( - argstr="-g", - ), - computeWGBoundary=dict( - argstr="-w", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - includeAllSubcorticalAreas=dict( - argstr="-a", - usedefault=True, - ), - inputHemisphereLabelFile=dict( - argstr="-h %s", - extensions=None, - mandatory=True, - ), - inputTissueFractionFile=dict( - argstr="-f %s", - extensions=None, - mandatory=True, - ), - outputCerebrumMask=dict( - argstr="-o %s", - extensions=None, - genfile=True, - ), - timer=dict( - argstr="--timer", - ), - tissueFractionThreshold=dict( - argstr="-p %f", - usedefault=True, - ), - verbosity=dict( - argstr="-v %d", - ), + args=dict(argstr="%s"), + computeGCBoundary=dict(argstr="-g"), + computeWGBoundary=dict(argstr="-w", usedefault=True), + environ=dict(nohash=True, usedefault=True), + includeAllSubcorticalAreas=dict(argstr="-a", usedefault=True), + inputHemisphereLabelFile=dict(argstr="-h %s", extensions=None, mandatory=True), + inputTissueFractionFile=dict(argstr="-f %s", extensions=None, mandatory=True), + outputCerebrumMask=dict(argstr="-o %s", extensions=None, genfile=True), + timer=dict(argstr="--timer"), + tissueFractionThreshold=dict(argstr="-p %f", usedefault=True), + verbosity=dict(argstr="-v %d"), ) inputs = Cortex.input_spec() @@ -56,11 +24,7 @@ def test_Cortex_inputs(): def test_Cortex_outputs(): - output_map = dict( - outputCerebrumMask=dict( - extensions=None, - ), - ) + output_map = dict(outputCerebrumMask=dict(extensions=None)) outputs = Cortex.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py index 4d986e22f3..b91f40790c 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py @@ -4,35 +4,14 @@ def test_Dewisp_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMaskFile=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - maximumIterations=dict( - argstr="-n %d", - ), - outputMaskFile=dict( - argstr="-o %s", - extensions=None, - genfile=True, - ), - sizeThreshold=dict( - argstr="-t %d", - ), - timer=dict( - argstr="--timer", - ), - verbosity=dict( - argstr="-v %d", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputMaskFile=dict(argstr="-i %s", extensions=None, mandatory=True), + maximumIterations=dict(argstr="-n %d"), + outputMaskFile=dict(argstr="-o %s", extensions=None, genfile=True), + sizeThreshold=dict(argstr="-t %d"), + timer=dict(argstr="--timer"), + verbosity=dict(argstr="-v %d"), ) inputs = Dewisp.input_spec() @@ -42,11 +21,7 @@ def test_Dewisp_inputs(): def test_Dewisp_outputs(): - output_map = dict( - outputMaskFile=dict( - extensions=None, - ), - ) + output_map = dict(outputMaskFile=dict(extensions=None)) outputs = Dewisp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py index 3122791cf5..ee3a473c21 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py @@ -4,70 +4,30 @@ def test_Dfs_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - curvatureWeighting=dict( - argstr="-w %f", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputShadingVolume=dict( - argstr="-c %s", - extensions=None, - ), - inputVolumeFile=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - noNormalsFlag=dict( - argstr="--nonormals", - ), + args=dict(argstr="%s"), + curvatureWeighting=dict(argstr="-w %f", usedefault=True), + environ=dict(nohash=True, usedefault=True), + inputShadingVolume=dict(argstr="-c %s", extensions=None), + inputVolumeFile=dict(argstr="-i %s", extensions=None, mandatory=True), + noNormalsFlag=dict(argstr="--nonormals"), nonZeroTessellation=dict( - argstr="-nz", - xor=("nonZeroTessellation", "specialTessellation"), - ), - outputSurfaceFile=dict( - argstr="-o %s", - extensions=None, - genfile=True, - ), - postSmoothFlag=dict( - argstr="--postsmooth", - ), - scalingPercentile=dict( - argstr="-f %f", - ), - smoothingConstant=dict( - argstr="-a %f", - usedefault=True, - ), - smoothingIterations=dict( - argstr="-n %d", - usedefault=True, + argstr="-nz", xor=("nonZeroTessellation", "specialTessellation") ), + outputSurfaceFile=dict(argstr="-o %s", extensions=None, genfile=True), + postSmoothFlag=dict(argstr="--postsmooth"), + scalingPercentile=dict(argstr="-f %f"), + smoothingConstant=dict(argstr="-a %f", usedefault=True), + smoothingIterations=dict(argstr="-n %d", usedefault=True), specialTessellation=dict( argstr="%s", position=-1, requires=["tessellationThreshold"], xor=("nonZeroTessellation", "specialTessellation"), ), - tessellationThreshold=dict( - argstr="%f", - ), - timer=dict( - argstr="--timer", - ), - verbosity=dict( - argstr="-v %d", - ), - zeroPadFlag=dict( - argstr="-z", - ), + tessellationThreshold=dict(argstr="%f"), + timer=dict(argstr="--timer"), + verbosity=dict(argstr="-v %d"), + zeroPadFlag=dict(argstr="-z"), ) inputs = Dfs.input_spec() @@ -77,11 +37,7 @@ def test_Dfs_inputs(): def test_Dfs_outputs(): - output_map = dict( - outputSurfaceFile=dict( - extensions=None, - ), - ) + output_map = dict(outputSurfaceFile=dict(extensions=None)) outputs = Dfs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py index 0696f11992..9ffcc8162e 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py @@ -4,53 +4,17 @@ def test_Hemisplit_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputHemisphereLabelFile=dict( - argstr="-l %s", - extensions=None, - mandatory=True, - ), - inputSurfaceFile=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - outputLeftHemisphere=dict( - argstr="--left %s", - extensions=None, - genfile=True, - ), - outputLeftPialHemisphere=dict( - argstr="-pl %s", - extensions=None, - genfile=True, - ), - outputRightHemisphere=dict( - argstr="--right %s", - extensions=None, - genfile=True, - ), - outputRightPialHemisphere=dict( - argstr="-pr %s", - extensions=None, - genfile=True, - ), - pialSurfaceFile=dict( - argstr="-p %s", - extensions=None, - ), - timer=dict( - argstr="--timer", - ), - verbosity=dict( - argstr="-v %d", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputHemisphereLabelFile=dict(argstr="-l %s", extensions=None, mandatory=True), + inputSurfaceFile=dict(argstr="-i %s", extensions=None, mandatory=True), + outputLeftHemisphere=dict(argstr="--left %s", extensions=None, genfile=True), + outputLeftPialHemisphere=dict(argstr="-pl %s", extensions=None, genfile=True), + outputRightHemisphere=dict(argstr="--right %s", extensions=None, genfile=True), + outputRightPialHemisphere=dict(argstr="-pr %s", extensions=None, genfile=True), + pialSurfaceFile=dict(argstr="-p %s", extensions=None), + timer=dict(argstr="--timer"), + verbosity=dict(argstr="-v %d"), ) inputs = Hemisplit.input_spec() @@ -61,18 +25,10 @@ def test_Hemisplit_inputs(): def test_Hemisplit_outputs(): output_map = dict( - outputLeftHemisphere=dict( - extensions=None, - ), - outputLeftPialHemisphere=dict( - extensions=None, - ), - outputRightHemisphere=dict( - extensions=None, - ), - outputRightPialHemisphere=dict( - extensions=None, - ), + outputLeftHemisphere=dict(extensions=None), + outputLeftPialHemisphere=dict(extensions=None), + outputRightHemisphere=dict(extensions=None), + outputRightPialHemisphere=dict(extensions=None), ) outputs = Hemisplit.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py index f5ba0725df..202b4257a7 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py @@ -4,80 +4,25 @@ def test_Pialmesh_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - exportPrefix=dict( - argstr="--prefix %s", - ), - inputMaskFile=dict( - argstr="-m %s", - extensions=None, - mandatory=True, - ), - inputSurfaceFile=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - inputTissueFractionFile=dict( - argstr="-f %s", - extensions=None, - mandatory=True, - ), - laplacianSmoothing=dict( - argstr="--smooth %f", - usedefault=True, - ), - maxThickness=dict( - argstr="--max %f", - usedefault=True, - ), - normalSmoother=dict( - argstr="--nc %f", - usedefault=True, - ), - numIterations=dict( - argstr="-n %d", - usedefault=True, - ), - outputInterval=dict( - argstr="--interval %d", - usedefault=True, - ), - outputSurfaceFile=dict( - argstr="-o %s", - extensions=None, - genfile=True, - ), - recomputeNormals=dict( - argstr="--norm", - ), - searchRadius=dict( - argstr="-r %f", - usedefault=True, - ), - stepSize=dict( - argstr="-s %f", - usedefault=True, - ), - tangentSmoother=dict( - argstr="--tc %f", - ), - timer=dict( - argstr="--timer", - ), - tissueThreshold=dict( - argstr="-t %f", - usedefault=True, - ), - verbosity=dict( - argstr="-v %d", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + exportPrefix=dict(argstr="--prefix %s"), + inputMaskFile=dict(argstr="-m %s", extensions=None, mandatory=True), + inputSurfaceFile=dict(argstr="-i %s", extensions=None, mandatory=True), + inputTissueFractionFile=dict(argstr="-f %s", extensions=None, mandatory=True), + laplacianSmoothing=dict(argstr="--smooth %f", usedefault=True), + maxThickness=dict(argstr="--max %f", usedefault=True), + normalSmoother=dict(argstr="--nc %f", usedefault=True), + numIterations=dict(argstr="-n %d", usedefault=True), + outputInterval=dict(argstr="--interval %d", usedefault=True), + outputSurfaceFile=dict(argstr="-o %s", extensions=None, genfile=True), + recomputeNormals=dict(argstr="--norm"), + searchRadius=dict(argstr="-r %f", usedefault=True), + stepSize=dict(argstr="-s %f", usedefault=True), + tangentSmoother=dict(argstr="--tc %f"), + timer=dict(argstr="--timer"), + tissueThreshold=dict(argstr="-t %f", usedefault=True), + verbosity=dict(argstr="-v %d"), ) inputs = Pialmesh.input_spec() @@ -87,11 +32,7 @@ def test_Pialmesh_inputs(): def test_Pialmesh_outputs(): - output_map = dict( - outputSurfaceFile=dict( - extensions=None, - ), - ) + output_map = dict(outputSurfaceFile=dict(extensions=None)) outputs = Pialmesh.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py index a6f52a26a7..818776e71c 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py @@ -4,44 +4,16 @@ def test_Pvc_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMRIFile=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - inputMaskFile=dict( - argstr="-m %s", - extensions=None, - ), - outputLabelFile=dict( - argstr="-o %s", - extensions=None, - genfile=True, - ), - outputTissueFractionFile=dict( - argstr="-f %s", - extensions=None, - genfile=True, - ), - spatialPrior=dict( - argstr="-l %f", - ), - threeClassFlag=dict( - argstr="-3", - ), - timer=dict( - argstr="--timer", - ), - verbosity=dict( - argstr="-v %d", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True), + inputMaskFile=dict(argstr="-m %s", extensions=None), + outputLabelFile=dict(argstr="-o %s", extensions=None, genfile=True), + outputTissueFractionFile=dict(argstr="-f %s", extensions=None, genfile=True), + spatialPrior=dict(argstr="-l %f"), + threeClassFlag=dict(argstr="-3"), + timer=dict(argstr="--timer"), + verbosity=dict(argstr="-v %d"), ) inputs = Pvc.input_spec() @@ -52,12 +24,8 @@ def test_Pvc_inputs(): def test_Pvc_outputs(): output_map = dict( - outputLabelFile=dict( - extensions=None, - ), - outputTissueFractionFile=dict( - extensions=None, - ), + outputLabelFile=dict(extensions=None), + outputTissueFractionFile=dict(extensions=None), ) outputs = Pvc.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py b/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py index 4c29c2bfda..e57f6864aa 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py @@ -4,82 +4,29 @@ def test_SVReg_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - atlasFilePrefix=dict( - argstr="'%s'", - position=1, - ), - curveMatchingInstructions=dict( - argstr="'-cur %s'", - ), - dataSinkDelay=dict( - argstr="%s", - ), - displayModuleName=dict( - argstr="'-m'", - ), - displayTimestamps=dict( - argstr="'-t'", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - iterations=dict( - argstr="'-H %d'", - ), - keepIntermediates=dict( - argstr="'-k'", - ), - pialSurfaceMaskDilation=dict( - argstr="'-D %d'", - ), - refineOutputs=dict( - argstr="'-r'", - ), - shortMessages=dict( - argstr="'-gui'", - ), - skipToIntensityReg=dict( - argstr="'-p'", - ), - skipToVolumeReg=dict( - argstr="'-s'", - ), - skipVolumetricProcessing=dict( - argstr="'-S'", - ), - subjectFilePrefix=dict( - argstr="'%s'", - mandatory=True, - position=0, - ), - useCerebrumMask=dict( - argstr="'-C'", - ), - useManualMaskFile=dict( - argstr="'-cbm'", - ), - useMultiThreading=dict( - argstr="'-P'", - ), - useSingleThreading=dict( - argstr="'-U'", - ), - verbosity0=dict( - argstr="'-v0'", - xor=("verbosity0", "verbosity1", "verbosity2"), - ), - verbosity1=dict( - argstr="'-v1'", - xor=("verbosity0", "verbosity1", "verbosity2"), - ), - verbosity2=dict( - argstr="'v2'", - xor=("verbosity0", "verbosity1", "verbosity2"), - ), + args=dict(argstr="%s"), + atlasFilePrefix=dict(argstr="'%s'", position=1), + curveMatchingInstructions=dict(argstr="'-cur %s'"), + dataSinkDelay=dict(argstr="%s"), + displayModuleName=dict(argstr="'-m'"), + displayTimestamps=dict(argstr="'-t'"), + environ=dict(nohash=True, usedefault=True), + iterations=dict(argstr="'-H %d'"), + keepIntermediates=dict(argstr="'-k'"), + pialSurfaceMaskDilation=dict(argstr="'-D %d'"), + refineOutputs=dict(argstr="'-r'"), + shortMessages=dict(argstr="'-gui'"), + skipToIntensityReg=dict(argstr="'-p'"), + skipToVolumeReg=dict(argstr="'-s'"), + skipVolumetricProcessing=dict(argstr="'-S'"), + subjectFilePrefix=dict(argstr="'%s'", mandatory=True, position=0), + useCerebrumMask=dict(argstr="'-C'"), + useManualMaskFile=dict(argstr="'-cbm'"), + useMultiThreading=dict(argstr="'-P'"), + useSingleThreading=dict(argstr="'-U'"), + verbosity0=dict(argstr="'-v0'", xor=("verbosity0", "verbosity1", "verbosity2")), + verbosity1=dict(argstr="'-v1'", xor=("verbosity0", "verbosity1", "verbosity2")), + verbosity2=dict(argstr="'v2'", xor=("verbosity0", "verbosity1", "verbosity2")), ) inputs = SVReg.input_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py index 97094db018..63f9696cd9 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py @@ -4,40 +4,15 @@ def test_Scrubmask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - backgroundFillThreshold=dict( - argstr="-b %d", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - foregroundTrimThreshold=dict( - argstr="-f %d", - usedefault=True, - ), - inputMaskFile=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - numberIterations=dict( - argstr="-n %d", - ), - outputMaskFile=dict( - argstr="-o %s", - extensions=None, - genfile=True, - ), - timer=dict( - argstr="--timer", - ), - verbosity=dict( - argstr="-v %d", - ), + args=dict(argstr="%s"), + backgroundFillThreshold=dict(argstr="-b %d", usedefault=True), + environ=dict(nohash=True, usedefault=True), + foregroundTrimThreshold=dict(argstr="-f %d", usedefault=True), + inputMaskFile=dict(argstr="-i %s", extensions=None, mandatory=True), + numberIterations=dict(argstr="-n %d"), + outputMaskFile=dict(argstr="-o %s", extensions=None, genfile=True), + timer=dict(argstr="--timer"), + verbosity=dict(argstr="-v %d"), ) inputs = Scrubmask.input_spec() @@ -47,11 +22,7 @@ def test_Scrubmask_inputs(): def test_Scrubmask_outputs(): - output_map = dict( - outputMaskFile=dict( - extensions=None, - ), - ) + output_map = dict(outputMaskFile=dict(extensions=None)) outputs = Scrubmask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py index 3120f00184..137b2959c1 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py @@ -4,58 +4,21 @@ def test_Skullfinder_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bgLabelValue=dict( - argstr="--bglabel %d", - ), - brainLabelValue=dict( - argstr="--brainlabel %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMRIFile=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - inputMaskFile=dict( - argstr="-m %s", - extensions=None, - mandatory=True, - ), - lowerThreshold=dict( - argstr="-l %d", - ), - outputLabelFile=dict( - argstr="-o %s", - extensions=None, - genfile=True, - ), - performFinalOpening=dict( - argstr="--finalOpening", - ), - scalpLabelValue=dict( - argstr="--scalplabel %d", - ), - skullLabelValue=dict( - argstr="--skulllabel %d", - ), - spaceLabelValue=dict( - argstr="--spacelabel %d", - ), - surfaceFilePrefix=dict( - argstr="-s %s", - ), - upperThreshold=dict( - argstr="-u %d", - ), - verbosity=dict( - argstr="-v %d", - ), + args=dict(argstr="%s"), + bgLabelValue=dict(argstr="--bglabel %d"), + brainLabelValue=dict(argstr="--brainlabel %d"), + environ=dict(nohash=True, usedefault=True), + inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True), + inputMaskFile=dict(argstr="-m %s", extensions=None, mandatory=True), + lowerThreshold=dict(argstr="-l %d"), + outputLabelFile=dict(argstr="-o %s", extensions=None, genfile=True), + performFinalOpening=dict(argstr="--finalOpening"), + scalpLabelValue=dict(argstr="--scalplabel %d"), + skullLabelValue=dict(argstr="--skulllabel %d"), + spaceLabelValue=dict(argstr="--spacelabel %d"), + surfaceFilePrefix=dict(argstr="-s %s"), + upperThreshold=dict(argstr="-u %d"), + verbosity=dict(argstr="-v %d"), ) inputs = Skullfinder.input_spec() @@ -65,11 +28,7 @@ def test_Skullfinder_inputs(): def test_Skullfinder_outputs(): - output_map = dict( - outputLabelFile=dict( - extensions=None, - ), - ) + output_map = dict(outputLabelFile=dict(extensions=None)) outputs = Skullfinder.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Tca.py b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py index eaba6a1d5f..253b2a0a2f 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Tca.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py @@ -4,40 +4,15 @@ def test_Tca_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - foregroundDelta=dict( - argstr="--delta %d", - usedefault=True, - ), - inputMaskFile=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - maxCorrectionSize=dict( - argstr="-n %d", - ), - minCorrectionSize=dict( - argstr="-m %d", - usedefault=True, - ), - outputMaskFile=dict( - argstr="-o %s", - extensions=None, - genfile=True, - ), - timer=dict( - argstr="--timer", - ), - verbosity=dict( - argstr="-v %d", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + foregroundDelta=dict(argstr="--delta %d", usedefault=True), + inputMaskFile=dict(argstr="-i %s", extensions=None, mandatory=True), + maxCorrectionSize=dict(argstr="-n %d"), + minCorrectionSize=dict(argstr="-m %d", usedefault=True), + outputMaskFile=dict(argstr="-o %s", extensions=None, genfile=True), + timer=dict(argstr="--timer"), + verbosity=dict(argstr="-v %d"), ) inputs = Tca.input_spec() @@ -47,11 +22,7 @@ def test_Tca_inputs(): def test_Tca_outputs(): - output_map = dict( - outputMaskFile=dict( - extensions=None, - ), - ) + output_map = dict(outputMaskFile=dict(extensions=None)) outputs = Tca.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py b/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py index 8b043c63c7..983d571fec 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py @@ -4,17 +4,9 @@ def test_ThicknessPVC_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - subjectFilePrefix=dict( - argstr="%s", - mandatory=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + subjectFilePrefix=dict(argstr="%s", mandatory=True), ) inputs = ThicknessPVC.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py index 9b6110d30d..7bfc0b5200 100644 --- a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py +++ b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py @@ -4,109 +4,32 @@ def test_AnalyzeHeader_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - centre=dict( - argstr="-centre %s", - units="mm", - ), - data_dims=dict( - argstr="-datadims %s", - units="voxels", - ), - datatype=dict( - argstr="-datatype %s", - mandatory=True, - ), - description=dict( - argstr="-description %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - greylevels=dict( - argstr="-gl %s", - units="NA", - ), - in_file=dict( - argstr="< %s", - extensions=None, - mandatory=True, - position=1, - ), - initfromheader=dict( - argstr="-initfromheader %s", - extensions=None, - position=3, - ), - intelbyteorder=dict( - argstr="-intelbyteorder", - ), - networkbyteorder=dict( - argstr="-networkbyteorder", - ), - nimages=dict( - argstr="-nimages %d", - units="NA", - ), - offset=dict( - argstr="-offset %d", - units="NA", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - picoseed=dict( - argstr="-picoseed %s", - units="mm", - ), - printbigendian=dict( - argstr="-printbigendian %s", - extensions=None, - position=3, - ), - printimagedims=dict( - argstr="-printimagedims %s", - extensions=None, - position=3, - ), + args=dict(argstr="%s"), + centre=dict(argstr="-centre %s", units="mm"), + data_dims=dict(argstr="-datadims %s", units="voxels"), + datatype=dict(argstr="-datatype %s", mandatory=True), + description=dict(argstr="-description %s"), + environ=dict(nohash=True, usedefault=True), + greylevels=dict(argstr="-gl %s", units="NA"), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1), + initfromheader=dict(argstr="-initfromheader %s", extensions=None, position=3), + intelbyteorder=dict(argstr="-intelbyteorder"), + networkbyteorder=dict(argstr="-networkbyteorder"), + nimages=dict(argstr="-nimages %d", units="NA"), + offset=dict(argstr="-offset %d", units="NA"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + picoseed=dict(argstr="-picoseed %s", units="mm"), + printbigendian=dict(argstr="-printbigendian %s", extensions=None, position=3), + printimagedims=dict(argstr="-printimagedims %s", extensions=None, position=3), printintelbyteorder=dict( - argstr="-printintelbyteorder %s", - extensions=None, - position=3, - ), - printprogargs=dict( - argstr="-printprogargs %s", - extensions=None, - position=3, - ), - readheader=dict( - argstr="-readheader %s", - extensions=None, - position=3, - ), - scaleinter=dict( - argstr="-scaleinter %d", - units="NA", - ), - scaleslope=dict( - argstr="-scaleslope %d", - units="NA", - ), - scheme_file=dict( - argstr="%s", - extensions=None, - position=2, - ), - voxel_dims=dict( - argstr="-voxeldims %s", - units="mm", - ), + argstr="-printintelbyteorder %s", extensions=None, position=3 + ), + printprogargs=dict(argstr="-printprogargs %s", extensions=None, position=3), + readheader=dict(argstr="-readheader %s", extensions=None, position=3), + scaleinter=dict(argstr="-scaleinter %d", units="NA"), + scaleslope=dict(argstr="-scaleslope %d", units="NA"), + scheme_file=dict(argstr="%s", extensions=None, position=2), + voxel_dims=dict(argstr="-voxeldims %s", units="mm"), ) inputs = AnalyzeHeader.input_spec() @@ -116,11 +39,7 @@ def test_AnalyzeHeader_inputs(): def test_AnalyzeHeader_outputs(): - output_map = dict( - header=dict( - extensions=None, - ), - ) + output_map = dict(header=dict(extensions=None)) outputs = AnalyzeHeader.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py index 6181cf7541..7745dbcac2 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py @@ -4,39 +4,14 @@ def test_ComputeEigensystem_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="< %s", - extensions=None, - mandatory=True, - position=1, - ), - inputdatatype=dict( - argstr="-inputdatatype %s", - usedefault=True, - ), - inputmodel=dict( - argstr="-inputmodel %s", - ), - maxcomponents=dict( - argstr="-maxcomponents %d", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - outputdatatype=dict( - argstr="-outputdatatype %s", - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1), + inputdatatype=dict(argstr="-inputdatatype %s", usedefault=True), + inputmodel=dict(argstr="-inputmodel %s"), + maxcomponents=dict(argstr="-maxcomponents %d"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + outputdatatype=dict(argstr="-outputdatatype %s", usedefault=True), ) inputs = ComputeEigensystem.input_spec() @@ -46,11 +21,7 @@ def test_ComputeEigensystem_inputs(): def test_ComputeEigensystem_outputs(): - output_map = dict( - eigen=dict( - extensions=None, - ), - ) + output_map = dict(eigen=dict(extensions=None)) outputs = ComputeEigensystem.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py index 0a13ac4f64..90f477dd87 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py @@ -4,39 +4,14 @@ def test_ComputeFractionalAnisotropy_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="< %s", - extensions=None, - mandatory=True, - position=1, - ), - inputdatatype=dict( - argstr="-inputdatatype %s", - ), - inputmodel=dict( - argstr="-inputmodel %s", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - outputdatatype=dict( - argstr="-outputdatatype %s", - ), - scheme_file=dict( - argstr="%s", - extensions=None, - position=2, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1), + inputdatatype=dict(argstr="-inputdatatype %s"), + inputmodel=dict(argstr="-inputmodel %s"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + outputdatatype=dict(argstr="-outputdatatype %s"), + scheme_file=dict(argstr="%s", extensions=None, position=2), ) inputs = ComputeFractionalAnisotropy.input_spec() @@ -46,11 +21,7 @@ def test_ComputeFractionalAnisotropy_inputs(): def test_ComputeFractionalAnisotropy_outputs(): - output_map = dict( - fa=dict( - extensions=None, - ), - ) + output_map = dict(fa=dict(extensions=None)) outputs = ComputeFractionalAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py index 822bd0306e..361e42521e 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py @@ -4,39 +4,14 @@ def test_ComputeMeanDiffusivity_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="< %s", - extensions=None, - mandatory=True, - position=1, - ), - inputdatatype=dict( - argstr="-inputdatatype %s", - ), - inputmodel=dict( - argstr="-inputmodel %s", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - outputdatatype=dict( - argstr="-outputdatatype %s", - ), - scheme_file=dict( - argstr="%s", - extensions=None, - position=2, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1), + inputdatatype=dict(argstr="-inputdatatype %s"), + inputmodel=dict(argstr="-inputmodel %s"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + outputdatatype=dict(argstr="-outputdatatype %s"), + scheme_file=dict(argstr="%s", extensions=None, position=2), ) inputs = ComputeMeanDiffusivity.input_spec() @@ -46,11 +21,7 @@ def test_ComputeMeanDiffusivity_inputs(): def test_ComputeMeanDiffusivity_outputs(): - output_map = dict( - md=dict( - extensions=None, - ), - ) + output_map = dict(md=dict(extensions=None)) outputs = ComputeMeanDiffusivity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py index 8a912685ae..4b081a950a 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py @@ -4,39 +4,14 @@ def test_ComputeTensorTrace_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="< %s", - extensions=None, - mandatory=True, - position=1, - ), - inputdatatype=dict( - argstr="-inputdatatype %s", - ), - inputmodel=dict( - argstr="-inputmodel %s", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - outputdatatype=dict( - argstr="-outputdatatype %s", - ), - scheme_file=dict( - argstr="%s", - extensions=None, - position=2, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1), + inputdatatype=dict(argstr="-inputdatatype %s"), + inputmodel=dict(argstr="-inputmodel %s"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + outputdatatype=dict(argstr="-outputdatatype %s"), + scheme_file=dict(argstr="%s", extensions=None, position=2), ) inputs = ComputeTensorTrace.input_spec() @@ -46,11 +21,7 @@ def test_ComputeTensorTrace_inputs(): def test_ComputeTensorTrace_outputs(): - output_map = dict( - trace=dict( - extensions=None, - ), - ) + output_map = dict(trace=dict(extensions=None)) outputs = ComputeTensorTrace.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Conmat.py b/nipype/interfaces/camino/tests/test_auto_Conmat.py index a9ea16865d..fa85ce4dd5 100644 --- a/nipype/interfaces/camino/tests/test_auto_Conmat.py +++ b/nipype/interfaces/camino/tests/test_auto_Conmat.py @@ -4,42 +4,16 @@ def test_Conmat_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-inputfile %s", - extensions=None, - mandatory=True, - ), - output_root=dict( - argstr="-outputroot %s", - extensions=None, - genfile=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True), + output_root=dict(argstr="-outputroot %s", extensions=None, genfile=True), scalar_file=dict( - argstr="-scalarfile %s", - extensions=None, - requires=["tract_stat"], - ), - target_file=dict( - argstr="-targetfile %s", - extensions=None, - mandatory=True, - ), - targetname_file=dict( - argstr="-targetnamefile %s", - extensions=None, - ), - tract_prop=dict( - argstr="-tractstat %s", - units="NA", - xor=["tract_stat"], + argstr="-scalarfile %s", extensions=None, requires=["tract_stat"] ), + target_file=dict(argstr="-targetfile %s", extensions=None, mandatory=True), + targetname_file=dict(argstr="-targetnamefile %s", extensions=None), + tract_prop=dict(argstr="-tractstat %s", units="NA", xor=["tract_stat"]), tract_stat=dict( argstr="-tractstat %s", requires=["scalar_file"], @@ -55,14 +29,7 @@ def test_Conmat_inputs(): def test_Conmat_outputs(): - output_map = dict( - conmat_sc=dict( - extensions=None, - ), - conmat_ts=dict( - extensions=None, - ), - ) + output_map = dict(conmat_sc=dict(extensions=None), conmat_ts=dict(extensions=None)) outputs = Conmat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py index b88fe01ba9..44576fba9f 100644 --- a/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py +++ b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py @@ -4,30 +4,16 @@ def test_DT2NIfTI_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), header_file=dict( - argstr="-header %s", - extensions=None, - mandatory=True, - position=3, + argstr="-header %s", extensions=None, mandatory=True, position=3 ), in_file=dict( - argstr="-inputfile %s", - extensions=None, - mandatory=True, - position=1, + argstr="-inputfile %s", extensions=None, mandatory=True, position=1 ), output_root=dict( - argstr="-outputroot %s", - extensions=None, - genfile=True, - position=2, + argstr="-outputroot %s", extensions=None, genfile=True, position=2 ), ) inputs = DT2NIfTI.input_spec() @@ -39,15 +25,9 @@ def test_DT2NIfTI_inputs(): def test_DT2NIfTI_outputs(): output_map = dict( - dt=dict( - extensions=None, - ), - exitcode=dict( - extensions=None, - ), - lns0=dict( - extensions=None, - ), + dt=dict(extensions=None), + exitcode=dict(extensions=None), + lns0=dict(extensions=None), ) outputs = DT2NIfTI.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_DTIFit.py b/nipype/interfaces/camino/tests/test_auto_DTIFit.py index 757f870fe3..1bd82d0f95 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/camino/tests/test_auto_DTIFit.py @@ -4,39 +4,13 @@ def test_DTIFit_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bgmask=dict( - argstr="-bgmask %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - non_linear=dict( - argstr="-nonlinear", - position=3, - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - scheme_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), + args=dict(argstr="%s"), + bgmask=dict(argstr="-bgmask %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), + non_linear=dict(argstr="-nonlinear", position=3), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + scheme_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), ) inputs = DTIFit.input_spec() @@ -46,11 +20,7 @@ def test_DTIFit_inputs(): def test_DTIFit_outputs(): - output_map = dict( - tensor_fitted=dict( - extensions=None, - ), - ) + output_map = dict(tensor_fitted=dict(extensions=None)) outputs = DTIFit.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py index 0ee1ffea8f..20861e0e09 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py @@ -4,64 +4,22 @@ def test_DTLUTGen_inputs(): input_map = dict( - acg=dict( - argstr="-acg", - ), - args=dict( - argstr="%s", - ), - bingham=dict( - argstr="-bingham", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - frange=dict( - argstr="-frange %s", - position=1, - units="NA", - ), - inversion=dict( - argstr="-inversion %d", - units="NA", - ), - lrange=dict( - argstr="-lrange %s", - position=1, - units="NA", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - samples=dict( - argstr="-samples %d", - units="NA", - ), + acg=dict(argstr="-acg"), + args=dict(argstr="%s"), + bingham=dict(argstr="-bingham"), + environ=dict(nohash=True, usedefault=True), + frange=dict(argstr="-frange %s", position=1, units="NA"), + inversion=dict(argstr="-inversion %d", units="NA"), + lrange=dict(argstr="-lrange %s", position=1, units="NA"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + samples=dict(argstr="-samples %d", units="NA"), scheme_file=dict( - argstr="-schemefile %s", - extensions=None, - mandatory=True, - position=2, - ), - snr=dict( - argstr="-snr %f", - units="NA", - ), - step=dict( - argstr="-step %f", - units="NA", - ), - trace=dict( - argstr="-trace %G", - units="NA", - ), - watson=dict( - argstr="-watson", + argstr="-schemefile %s", extensions=None, mandatory=True, position=2 ), + snr=dict(argstr="-snr %f", units="NA"), + step=dict(argstr="-step %f", units="NA"), + trace=dict(argstr="-trace %G", units="NA"), + watson=dict(argstr="-watson"), ) inputs = DTLUTGen.input_spec() @@ -71,11 +29,7 @@ def test_DTLUTGen_inputs(): def test_DTLUTGen_outputs(): - output_map = dict( - dtLUT=dict( - extensions=None, - ), - ) + output_map = dict(dtLUT=dict(extensions=None)) outputs = DTLUTGen.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_DTMetric.py b/nipype/interfaces/camino/tests/test_auto_DTMetric.py index 11e971b28b..665dee649d 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTMetric.py +++ b/nipype/interfaces/camino/tests/test_auto_DTMetric.py @@ -4,39 +4,14 @@ def test_DTMetric_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - data_header=dict( - argstr="-header %s", - extensions=None, - ), - eigen_data=dict( - argstr="-inputfile %s", - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputdatatype=dict( - argstr="-inputdatatype %s", - usedefault=True, - ), - metric=dict( - argstr="-stat %s", - mandatory=True, - ), - outputdatatype=dict( - argstr="-outputdatatype %s", - usedefault=True, - ), - outputfile=dict( - argstr="-outputfile %s", - extensions=None, - genfile=True, - ), + args=dict(argstr="%s"), + data_header=dict(argstr="-header %s", extensions=None), + eigen_data=dict(argstr="-inputfile %s", extensions=None, mandatory=True), + environ=dict(nohash=True, usedefault=True), + inputdatatype=dict(argstr="-inputdatatype %s", usedefault=True), + metric=dict(argstr="-stat %s", mandatory=True), + outputdatatype=dict(argstr="-outputdatatype %s", usedefault=True), + outputfile=dict(argstr="-outputfile %s", extensions=None, genfile=True), ) inputs = DTMetric.input_spec() @@ -46,11 +21,7 @@ def test_DTMetric_inputs(): def test_DTMetric_outputs(): - output_map = dict( - metric_stats=dict( - extensions=None, - ), - ) + output_map = dict(metric_stats=dict(extensions=None)) outputs = DTMetric.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py index 376fa1bf3e..01b481ceb4 100644 --- a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py +++ b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py @@ -4,58 +4,23 @@ def test_FSL2Scheme_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bscale=dict( - argstr="-bscale %d", - units="NA", - ), + args=dict(argstr="%s"), + bscale=dict(argstr="-bscale %d", units="NA"), bval_file=dict( - argstr="-bvalfile %s", - extensions=None, - mandatory=True, - position=2, + argstr="-bvalfile %s", extensions=None, mandatory=True, position=2 ), bvec_file=dict( - argstr="-bvecfile %s", - extensions=None, - mandatory=True, - position=1, - ), - diffusiontime=dict( - argstr="-diffusiontime %f", - units="NA", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - flipx=dict( - argstr="-flipx", - ), - flipy=dict( - argstr="-flipy", - ), - flipz=dict( - argstr="-flipz", - ), - interleave=dict( - argstr="-interleave", - ), - numscans=dict( - argstr="-numscans %d", - units="NA", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - usegradmod=dict( - argstr="-usegradmod", - ), + argstr="-bvecfile %s", extensions=None, mandatory=True, position=1 + ), + diffusiontime=dict(argstr="-diffusiontime %f", units="NA"), + environ=dict(nohash=True, usedefault=True), + flipx=dict(argstr="-flipx"), + flipy=dict(argstr="-flipy"), + flipz=dict(argstr="-flipz"), + interleave=dict(argstr="-interleave"), + numscans=dict(argstr="-numscans %d", units="NA"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + usegradmod=dict(argstr="-usegradmod"), ) inputs = FSL2Scheme.input_spec() @@ -65,11 +30,7 @@ def test_FSL2Scheme_inputs(): def test_FSL2Scheme_outputs(): - output_map = dict( - scheme=dict( - extensions=None, - ), - ) + output_map = dict(scheme=dict(extensions=None)) outputs = FSL2Scheme.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py index ae49936d02..904b0cd097 100644 --- a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py +++ b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py @@ -4,30 +4,11 @@ def test_Image2Voxel_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-4dimage %s", - extensions=None, - mandatory=True, - position=1, - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - out_type=dict( - argstr="-outputdatatype %s", - position=2, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-4dimage %s", extensions=None, mandatory=True, position=1), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + out_type=dict(argstr="-outputdatatype %s", position=2, usedefault=True), ) inputs = Image2Voxel.input_spec() @@ -37,11 +18,7 @@ def test_Image2Voxel_inputs(): def test_Image2Voxel_outputs(): - output_map = dict( - voxel_order=dict( - extensions=None, - ), - ) + output_map = dict(voxel_order=dict(extensions=None)) outputs = Image2Voxel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ImageStats.py b/nipype/interfaces/camino/tests/test_auto_ImageStats.py index 4bc6aa941b..e1e56e167d 100644 --- a/nipype/interfaces/camino/tests/test_auto_ImageStats.py +++ b/nipype/interfaces/camino/tests/test_auto_ImageStats.py @@ -4,32 +4,12 @@ def test_ImageStats_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr="-images %s", - mandatory=True, - position=-1, - ), - out_type=dict( - argstr="-outputdatatype %s", - usedefault=True, - ), - output_root=dict( - argstr="-outputroot %s", - extensions=None, - mandatory=True, - ), - stat=dict( - argstr="-stat %s", - mandatory=True, - units="NA", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_files=dict(argstr="-images %s", mandatory=True, position=-1), + out_type=dict(argstr="-outputdatatype %s", usedefault=True), + output_root=dict(argstr="-outputroot %s", extensions=None, mandatory=True), + stat=dict(argstr="-stat %s", mandatory=True, units="NA"), ) inputs = ImageStats.input_spec() @@ -39,11 +19,7 @@ def test_ImageStats_inputs(): def test_ImageStats_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ImageStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_LinRecon.py b/nipype/interfaces/camino/tests/test_auto_LinRecon.py index 1ed5bbbe6b..9b1ed7364e 100644 --- a/nipype/interfaces/camino/tests/test_auto_LinRecon.py +++ b/nipype/interfaces/camino/tests/test_auto_LinRecon.py @@ -4,47 +4,15 @@ def test_LinRecon_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bgmask=dict( - argstr="-bgmask %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - log=dict( - argstr="-log", - ), - normalize=dict( - argstr="-normalize", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - qball_mat=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=3, - ), - scheme_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), + args=dict(argstr="%s"), + bgmask=dict(argstr="-bgmask %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), + log=dict(argstr="-log"), + normalize=dict(argstr="-normalize"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + qball_mat=dict(argstr="%s", extensions=None, mandatory=True, position=3), + scheme_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), ) inputs = LinRecon.input_spec() @@ -54,11 +22,7 @@ def test_LinRecon_inputs(): def test_LinRecon_outputs(): - output_map = dict( - recon_data=dict( - extensions=None, - ), - ) + output_map = dict(recon_data=dict(extensions=None)) outputs = LinRecon.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_MESD.py b/nipype/interfaces/camino/tests/test_auto_MESD.py index 189dd2e2d4..be3edd23cc 100644 --- a/nipype/interfaces/camino/tests/test_auto_MESD.py +++ b/nipype/interfaces/camino/tests/test_auto_MESD.py @@ -4,56 +4,19 @@ def test_MESD_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bgmask=dict( - argstr="-bgmask %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fastmesd=dict( - argstr="-fastmesd", - requires=["mepointset"], - ), + args=dict(argstr="%s"), + bgmask=dict(argstr="-bgmask %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + fastmesd=dict(argstr="-fastmesd", requires=["mepointset"]), in_file=dict( - argstr="-inputfile %s", - extensions=None, - mandatory=True, - position=1, - ), - inputdatatype=dict( - argstr="-inputdatatype %s", - ), - inverter=dict( - argstr="-filter %s", - mandatory=True, - position=2, - ), - inverter_param=dict( - argstr="%f", - mandatory=True, - position=3, - units="NA", - ), - mepointset=dict( - argstr="-mepointset %d", - units="NA", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - scheme_file=dict( - argstr="-schemefile %s", - extensions=None, - mandatory=True, - ), + argstr="-inputfile %s", extensions=None, mandatory=True, position=1 + ), + inputdatatype=dict(argstr="-inputdatatype %s"), + inverter=dict(argstr="-filter %s", mandatory=True, position=2), + inverter_param=dict(argstr="%f", mandatory=True, position=3, units="NA"), + mepointset=dict(argstr="-mepointset %d", units="NA"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True), ) inputs = MESD.input_spec() @@ -63,11 +26,7 @@ def test_MESD_inputs(): def test_MESD_outputs(): - output_map = dict( - mesd_data=dict( - extensions=None, - ), - ) + output_map = dict(mesd_data=dict(extensions=None)) outputs = MESD.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ModelFit.py b/nipype/interfaces/camino/tests/test_auto_ModelFit.py index 82bd1a8400..4d445a03b8 100644 --- a/nipype/interfaces/camino/tests/test_auto_ModelFit.py +++ b/nipype/interfaces/camino/tests/test_auto_ModelFit.py @@ -4,74 +4,24 @@ def test_ModelFit_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bgmask=dict( - argstr="-bgmask %s", - extensions=None, - ), - bgthresh=dict( - argstr="-bgthresh %G", - ), - cfthresh=dict( - argstr="-csfthresh %G", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedbvalue=dict( - argstr="-fixedbvalue %s", - ), - fixedmodq=dict( - argstr="-fixedmod %s", - ), - in_file=dict( - argstr="-inputfile %s", - extensions=None, - mandatory=True, - ), - inputdatatype=dict( - argstr="-inputdatatype %s", - ), - model=dict( - argstr="-model %s", - mandatory=True, - ), - noisemap=dict( - argstr="-noisemap %s", - extensions=None, - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - outlier=dict( - argstr="-outliermap %s", - extensions=None, - ), - outputfile=dict( - argstr="-outputfile %s", - extensions=None, - ), - residualmap=dict( - argstr="-residualmap %s", - extensions=None, - ), - scheme_file=dict( - argstr="-schemefile %s", - extensions=None, - mandatory=True, - ), - sigma=dict( - argstr="-sigma %G", - ), - tau=dict( - argstr="-tau %G", - ), + args=dict(argstr="%s"), + bgmask=dict(argstr="-bgmask %s", extensions=None), + bgthresh=dict(argstr="-bgthresh %G"), + cfthresh=dict(argstr="-csfthresh %G"), + environ=dict(nohash=True, usedefault=True), + fixedbvalue=dict(argstr="-fixedbvalue %s"), + fixedmodq=dict(argstr="-fixedmod %s"), + in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True), + inputdatatype=dict(argstr="-inputdatatype %s"), + model=dict(argstr="-model %s", mandatory=True), + noisemap=dict(argstr="-noisemap %s", extensions=None), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + outlier=dict(argstr="-outliermap %s", extensions=None), + outputfile=dict(argstr="-outputfile %s", extensions=None), + residualmap=dict(argstr="-residualmap %s", extensions=None), + scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True), + sigma=dict(argstr="-sigma %G"), + tau=dict(argstr="-tau %G"), ) inputs = ModelFit.input_spec() @@ -81,11 +31,7 @@ def test_ModelFit_inputs(): def test_ModelFit_outputs(): - output_map = dict( - fitted_data=dict( - extensions=None, - ), - ) + output_map = dict(fitted_data=dict(extensions=None)) outputs = ModelFit.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py index 82b4276a0f..35efc7c84c 100644 --- a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py +++ b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py @@ -4,46 +4,18 @@ def test_NIfTIDT2Camino_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bgmask=dict( - argstr="-bgmask %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + bgmask=dict(argstr="-bgmask %s", extensions=None), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="-inputfile %s", - extensions=None, - mandatory=True, - position=1, - ), - lns0_file=dict( - argstr="-lns0 %s", - extensions=None, - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - s0_file=dict( - argstr="-s0 %s", - extensions=None, - ), - scaleinter=dict( - argstr="-scaleinter %s", - ), - scaleslope=dict( - argstr="-scaleslope %s", - ), - uppertriangular=dict( - argstr="-uppertriangular %s", - ), + argstr="-inputfile %s", extensions=None, mandatory=True, position=1 + ), + lns0_file=dict(argstr="-lns0 %s", extensions=None), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + s0_file=dict(argstr="-s0 %s", extensions=None), + scaleinter=dict(argstr="-scaleinter %s"), + scaleslope=dict(argstr="-scaleslope %s"), + uppertriangular=dict(argstr="-uppertriangular %s"), ) inputs = NIfTIDT2Camino.input_spec() @@ -53,11 +25,7 @@ def test_NIfTIDT2Camino_inputs(): def test_NIfTIDT2Camino_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = NIfTIDT2Camino.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py index 09f3a93cac..f2c29a01b7 100644 --- a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py +++ b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py @@ -4,50 +4,16 @@ def test_PicoPDFs_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - directmap=dict( - argstr="-directmap", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="< %s", - extensions=None, - mandatory=True, - position=1, - ), - inputmodel=dict( - argstr="-inputmodel %s", - position=2, - usedefault=True, - ), - luts=dict( - argstr="-luts %s", - mandatory=True, - ), - maxcomponents=dict( - argstr="-maxcomponents %d", - units="NA", - ), - numpds=dict( - argstr="-numpds %d", - units="NA", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - pdf=dict( - argstr="-pdf %s", - position=4, - usedefault=True, - ), + args=dict(argstr="%s"), + directmap=dict(argstr="-directmap"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1), + inputmodel=dict(argstr="-inputmodel %s", position=2, usedefault=True), + luts=dict(argstr="-luts %s", mandatory=True), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), + numpds=dict(argstr="-numpds %d", units="NA"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + pdf=dict(argstr="-pdf %s", position=4, usedefault=True), ) inputs = PicoPDFs.input_spec() @@ -57,11 +23,7 @@ def test_PicoPDFs_inputs(): def test_PicoPDFs_outputs(): - output_map = dict( - pdfs=dict( - extensions=None, - ), - ) + output_map = dict(pdfs=dict(extensions=None)) outputs = PicoPDFs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py index b1b9fda588..03fe10f87b 100644 --- a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py @@ -4,138 +4,44 @@ def test_ProcStreamlines_inputs(): input_map = dict( - allowmultitargets=dict( - argstr="-allowmultitargets", - ), - args=dict( - argstr="%s", - ), - datadims=dict( - argstr="-datadims %s", - units="voxels", - ), - directional=dict( - argstr="-directional %s", - units="NA", - ), - discardloops=dict( - argstr="-discardloops", - ), - endpointfile=dict( - argstr="-endpointfile %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - exclusionfile=dict( - argstr="-exclusionfile %s", - extensions=None, - ), - gzip=dict( - argstr="-gzip", - ), + allowmultitargets=dict(argstr="-allowmultitargets"), + args=dict(argstr="%s"), + datadims=dict(argstr="-datadims %s", units="voxels"), + directional=dict(argstr="-directional %s", units="NA"), + discardloops=dict(argstr="-discardloops"), + endpointfile=dict(argstr="-endpointfile %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + exclusionfile=dict(argstr="-exclusionfile %s", extensions=None), + gzip=dict(argstr="-gzip"), in_file=dict( - argstr="-inputfile %s", - extensions=None, - mandatory=True, - position=1, - ), - inputmodel=dict( - argstr="-inputmodel %s", - usedefault=True, - ), - iterations=dict( - argstr="-iterations %d", - units="NA", - ), - maxtractlength=dict( - argstr="-maxtractlength %d", - units="mm", - ), - maxtractpoints=dict( - argstr="-maxtractpoints %d", - units="NA", - ), - mintractlength=dict( - argstr="-mintractlength %d", - units="mm", - ), - mintractpoints=dict( - argstr="-mintractpoints %d", - units="NA", - ), - noresample=dict( - argstr="-noresample", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - outputacm=dict( - argstr="-outputacm", - requires=["outputroot", "seedfile"], - ), + argstr="-inputfile %s", extensions=None, mandatory=True, position=1 + ), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True), + iterations=dict(argstr="-iterations %d", units="NA"), + maxtractlength=dict(argstr="-maxtractlength %d", units="mm"), + maxtractpoints=dict(argstr="-maxtractpoints %d", units="NA"), + mintractlength=dict(argstr="-mintractlength %d", units="mm"), + mintractpoints=dict(argstr="-mintractpoints %d", units="NA"), + noresample=dict(argstr="-noresample"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + outputacm=dict(argstr="-outputacm", requires=["outputroot", "seedfile"]), outputcbs=dict( - argstr="-outputcbs", - requires=["outputroot", "targetfile", "seedfile"], - ), - outputcp=dict( - argstr="-outputcp", - requires=["outputroot", "seedfile"], - ), - outputroot=dict( - argstr="-outputroot %s", - extensions=None, - ), - outputsc=dict( - argstr="-outputsc", - requires=["outputroot", "seedfile"], - ), - outputtracts=dict( - argstr="-outputtracts", - ), - regionindex=dict( - argstr="-regionindex %d", - units="mm", - ), - resamplestepsize=dict( - argstr="-resamplestepsize %d", - units="NA", - ), - seedfile=dict( - argstr="-seedfile %s", - extensions=None, - ), - seedpointmm=dict( - argstr="-seedpointmm %s", - units="mm", - ), - seedpointvox=dict( - argstr="-seedpointvox %s", - units="voxels", - ), - targetfile=dict( - argstr="-targetfile %s", - extensions=None, - ), - truncateinexclusion=dict( - argstr="-truncateinexclusion", - ), - truncateloops=dict( - argstr="-truncateloops", - ), - voxeldims=dict( - argstr="-voxeldims %s", - units="mm", - ), - waypointfile=dict( - argstr="-waypointfile %s", - extensions=None, - ), + argstr="-outputcbs", requires=["outputroot", "targetfile", "seedfile"] + ), + outputcp=dict(argstr="-outputcp", requires=["outputroot", "seedfile"]), + outputroot=dict(argstr="-outputroot %s", extensions=None), + outputsc=dict(argstr="-outputsc", requires=["outputroot", "seedfile"]), + outputtracts=dict(argstr="-outputtracts"), + regionindex=dict(argstr="-regionindex %d", units="mm"), + resamplestepsize=dict(argstr="-resamplestepsize %d", units="NA"), + seedfile=dict(argstr="-seedfile %s", extensions=None), + seedpointmm=dict(argstr="-seedpointmm %s", units="mm"), + seedpointvox=dict(argstr="-seedpointvox %s", units="voxels"), + targetfile=dict(argstr="-targetfile %s", extensions=None), + truncateinexclusion=dict(argstr="-truncateinexclusion"), + truncateloops=dict(argstr="-truncateloops"), + voxeldims=dict(argstr="-voxeldims %s", units="mm"), + waypointfile=dict(argstr="-waypointfile %s", extensions=None), ) inputs = ProcStreamlines.input_spec() @@ -145,12 +51,7 @@ def test_ProcStreamlines_inputs(): def test_ProcStreamlines_outputs(): - output_map = dict( - outputroot_files=dict(), - proc=dict( - extensions=None, - ), - ) + output_map = dict(outputroot_files=dict(), proc=dict(extensions=None)) outputs = ProcStreamlines.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_QBallMX.py b/nipype/interfaces/camino/tests/test_auto_QBallMX.py index 30fda3a483..f55702e52b 100644 --- a/nipype/interfaces/camino/tests/test_auto_QBallMX.py +++ b/nipype/interfaces/camino/tests/test_auto_QBallMX.py @@ -4,44 +4,15 @@ def test_QBallMX_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - basistype=dict( - argstr="-basistype %s", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - order=dict( - argstr="-order %d", - units="NA", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - rbfpointset=dict( - argstr="-rbfpointset %d", - units="NA", - ), - rbfsigma=dict( - argstr="-rbfsigma %f", - units="NA", - ), - scheme_file=dict( - argstr="-schemefile %s", - extensions=None, - mandatory=True, - ), - smoothingsigma=dict( - argstr="-smoothingsigma %f", - units="NA", - ), + args=dict(argstr="%s"), + basistype=dict(argstr="-basistype %s", usedefault=True), + environ=dict(nohash=True, usedefault=True), + order=dict(argstr="-order %d", units="NA"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + rbfpointset=dict(argstr="-rbfpointset %d", units="NA"), + rbfsigma=dict(argstr="-rbfsigma %f", units="NA"), + scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True), + smoothingsigma=dict(argstr="-smoothingsigma %f", units="NA"), ) inputs = QBallMX.input_spec() @@ -51,11 +22,7 @@ def test_QBallMX_inputs(): def test_QBallMX_outputs(): - output_map = dict( - qmat=dict( - extensions=None, - ), - ) + output_map = dict(qmat=dict(extensions=None)) outputs = QBallMX.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py index fc58b2f2e9..33d420dc21 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py @@ -4,52 +4,17 @@ def test_SFLUTGen_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - binincsize=dict( - argstr="-binincsize %d", - units="NA", - ), - directmap=dict( - argstr="-directmap", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-inputfile %s", - extensions=None, - mandatory=True, - ), - info_file=dict( - argstr="-infofile %s", - extensions=None, - mandatory=True, - ), - minvectsperbin=dict( - argstr="-minvectsperbin %d", - units="NA", - ), - order=dict( - argstr="-order %d", - units="NA", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - outputstem=dict( - argstr="-outputstem %s", - usedefault=True, - ), - pdf=dict( - argstr="-pdf %s", - usedefault=True, - ), + args=dict(argstr="%s"), + binincsize=dict(argstr="-binincsize %d", units="NA"), + directmap=dict(argstr="-directmap"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True), + info_file=dict(argstr="-infofile %s", extensions=None, mandatory=True), + minvectsperbin=dict(argstr="-minvectsperbin %d", units="NA"), + order=dict(argstr="-order %d", units="NA"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + outputstem=dict(argstr="-outputstem %s", usedefault=True), + pdf=dict(argstr="-pdf %s", usedefault=True), ) inputs = SFLUTGen.input_spec() @@ -60,12 +25,7 @@ def test_SFLUTGen_inputs(): def test_SFLUTGen_outputs(): output_map = dict( - lut_one_fibre=dict( - extensions=None, - ), - lut_two_fibres=dict( - extensions=None, - ), + lut_one_fibre=dict(extensions=None), lut_two_fibres=dict(extensions=None) ) outputs = SFLUTGen.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py index 5c20399cbc..aa4f480d96 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py @@ -4,13 +4,8 @@ def test_SFPICOCalibData_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), info_file=dict( argstr="-infooutputfile %s", extensions=None, @@ -18,61 +13,19 @@ def test_SFPICOCalibData_inputs(): hash_files=False, mandatory=True, ), - onedtfarange=dict( - argstr="-onedtfarange %s", - units="NA", - ), - onedtfastep=dict( - argstr="-onedtfastep %f", - units="NA", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - scheme_file=dict( - argstr="-schemefile %s", - extensions=None, - mandatory=True, - ), - seed=dict( - argstr="-seed %f", - units="NA", - ), - snr=dict( - argstr="-snr %f", - units="NA", - ), - trace=dict( - argstr="-trace %f", - units="NA", - ), - twodtanglerange=dict( - argstr="-twodtanglerange %s", - units="NA", - ), - twodtanglestep=dict( - argstr="-twodtanglestep %f", - units="NA", - ), - twodtfarange=dict( - argstr="-twodtfarange %s", - units="NA", - ), - twodtfastep=dict( - argstr="-twodtfastep %f", - units="NA", - ), - twodtmixmax=dict( - argstr="-twodtmixmax %f", - units="NA", - ), - twodtmixstep=dict( - argstr="-twodtmixstep %f", - units="NA", - ), + onedtfarange=dict(argstr="-onedtfarange %s", units="NA"), + onedtfastep=dict(argstr="-onedtfastep %f", units="NA"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True), + seed=dict(argstr="-seed %f", units="NA"), + snr=dict(argstr="-snr %f", units="NA"), + trace=dict(argstr="-trace %f", units="NA"), + twodtanglerange=dict(argstr="-twodtanglerange %s", units="NA"), + twodtanglestep=dict(argstr="-twodtanglestep %f", units="NA"), + twodtfarange=dict(argstr="-twodtfarange %s", units="NA"), + twodtfastep=dict(argstr="-twodtfastep %f", units="NA"), + twodtmixmax=dict(argstr="-twodtmixmax %f", units="NA"), + twodtmixstep=dict(argstr="-twodtmixstep %f", units="NA"), ) inputs = SFPICOCalibData.input_spec() @@ -82,14 +35,7 @@ def test_SFPICOCalibData_inputs(): def test_SFPICOCalibData_outputs(): - output_map = dict( - PICOCalib=dict( - extensions=None, - ), - calib_info=dict( - extensions=None, - ), - ) + output_map = dict(PICOCalib=dict(extensions=None), calib_info=dict(extensions=None)) outputs = SFPICOCalibData.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py index 775a9061e6..117a3bfa09 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py @@ -4,71 +4,22 @@ def test_SFPeaks_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - density=dict( - argstr="-density %d", - units="NA", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-inputfile %s", - extensions=None, - mandatory=True, - ), - inputmodel=dict( - argstr="-inputmodel %s", - mandatory=True, - ), - mepointset=dict( - argstr="-mepointset %d", - units="NA", - ), - noconsistencycheck=dict( - argstr="-noconsistencycheck", - ), - numpds=dict( - argstr="-numpds %d", - units="NA", - ), - order=dict( - argstr="-order %d", - units="NA", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - pdthresh=dict( - argstr="-pdthresh %f", - units="NA", - ), - pointset=dict( - argstr="-pointset %d", - units="NA", - ), - rbfpointset=dict( - argstr="-rbfpointset %d", - units="NA", - ), - scheme_file=dict( - argstr="%s", - extensions=None, - ), - searchradius=dict( - argstr="-searchradius %f", - units="NA", - ), - stdsfrommean=dict( - argstr="-stdsfrommean %f", - units="NA", - ), + args=dict(argstr="%s"), + density=dict(argstr="-density %d", units="NA"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True), + inputmodel=dict(argstr="-inputmodel %s", mandatory=True), + mepointset=dict(argstr="-mepointset %d", units="NA"), + noconsistencycheck=dict(argstr="-noconsistencycheck"), + numpds=dict(argstr="-numpds %d", units="NA"), + order=dict(argstr="-order %d", units="NA"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + pdthresh=dict(argstr="-pdthresh %f", units="NA"), + pointset=dict(argstr="-pointset %d", units="NA"), + rbfpointset=dict(argstr="-rbfpointset %d", units="NA"), + scheme_file=dict(argstr="%s", extensions=None), + searchradius=dict(argstr="-searchradius %f", units="NA"), + stdsfrommean=dict(argstr="-stdsfrommean %f", units="NA"), ) inputs = SFPeaks.input_spec() @@ -78,11 +29,7 @@ def test_SFPeaks_inputs(): def test_SFPeaks_outputs(): - output_map = dict( - peaks=dict( - extensions=None, - ), - ) + output_map = dict(peaks=dict(extensions=None)) outputs = SFPeaks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Shredder.py b/nipype/interfaces/camino/tests/test_auto_Shredder.py index bf0f9dc9dc..3e55b5a39f 100644 --- a/nipype/interfaces/camino/tests/test_auto_Shredder.py +++ b/nipype/interfaces/camino/tests/test_auto_Shredder.py @@ -4,40 +4,13 @@ def test_Shredder_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - chunksize=dict( - argstr="%d", - position=2, - units="NA", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="< %s", - extensions=None, - mandatory=True, - position=-2, - ), - offset=dict( - argstr="%d", - position=1, - units="NA", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - space=dict( - argstr="%d", - position=3, - units="NA", - ), + args=dict(argstr="%s"), + chunksize=dict(argstr="%d", position=2, units="NA"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=-2), + offset=dict(argstr="%d", position=1, units="NA"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + space=dict(argstr="%d", position=3, units="NA"), ) inputs = Shredder.input_spec() @@ -47,11 +20,7 @@ def test_Shredder_inputs(): def test_Shredder_outputs(): - output_map = dict( - shredded=dict( - extensions=None, - ), - ) + output_map = dict(shredded=dict(extensions=None)) outputs = Shredder.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Track.py b/nipype/interfaces/camino/tests/test_auto_Track.py index 697b2d5275..5fba585144 100644 --- a/nipype/interfaces/camino/tests/test_auto_Track.py +++ b/nipype/interfaces/camino/tests/test_auto_Track.py @@ -4,91 +4,30 @@ def test_Track_inputs(): input_map = dict( - anisfile=dict( - argstr="-anisfile %s", - extensions=None, - ), - anisthresh=dict( - argstr="-anisthresh %f", - ), - args=dict( - argstr="%s", - ), - curveinterval=dict( - argstr="-curveinterval %f", - requires=["curvethresh"], - ), - curvethresh=dict( - argstr="-curvethresh %f", - ), - data_dims=dict( - argstr="-datadims %s", - units="voxels", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gzip=dict( - argstr="-gzip", - ), - in_file=dict( - argstr="-inputfile %s", - extensions=None, - position=1, - ), - inputdatatype=dict( - argstr="-inputdatatype %s", - ), - inputmodel=dict( - argstr="-inputmodel %s", - usedefault=True, - ), - interpolator=dict( - argstr="-interpolator %s", - ), - ipthresh=dict( - argstr="-ipthresh %f", - ), - maxcomponents=dict( - argstr="-maxcomponents %d", - units="NA", - ), - numpds=dict( - argstr="-numpds %d", - units="NA", - ), + anisfile=dict(argstr="-anisfile %s", extensions=None), + anisthresh=dict(argstr="-anisthresh %f"), + args=dict(argstr="%s"), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), + curvethresh=dict(argstr="-curvethresh %f"), + data_dims=dict(argstr="-datadims %s", units="voxels"), + environ=dict(nohash=True, usedefault=True), + gzip=dict(argstr="-gzip"), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1), + inputdatatype=dict(argstr="-inputdatatype %s"), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True), + interpolator=dict(argstr="-interpolator %s"), + ipthresh=dict(argstr="-ipthresh %f"), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), + numpds=dict(argstr="-numpds %d", units="NA"), out_file=dict( - argstr="-outputfile %s", - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr="-outputroot %s", - extensions=None, - position=-1, - ), - outputtracts=dict( - argstr="-outputtracts %s", - ), - seed_file=dict( - argstr="-seedfile %s", - extensions=None, - position=2, - ), - stepsize=dict( - argstr="-stepsize %f", - requires=["tracker"], - ), - tracker=dict( - argstr="-tracker %s", - usedefault=True, - ), - voxel_dims=dict( - argstr="-voxeldims %s", - units="mm", - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1 + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), + outputtracts=dict(argstr="-outputtracts %s"), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), + tracker=dict(argstr="-tracker %s", usedefault=True), + voxel_dims=dict(argstr="-voxeldims %s", units="mm"), ) inputs = Track.input_spec() @@ -98,11 +37,7 @@ def test_Track_inputs(): def test_Track_outputs(): - output_map = dict( - tracked=dict( - extensions=None, - ), - ) + output_map = dict(tracked=dict(extensions=None)) outputs = Track.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py index a117d5d782..4e7379dbfc 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py @@ -4,91 +4,30 @@ def test_TrackBallStick_inputs(): input_map = dict( - anisfile=dict( - argstr="-anisfile %s", - extensions=None, - ), - anisthresh=dict( - argstr="-anisthresh %f", - ), - args=dict( - argstr="%s", - ), - curveinterval=dict( - argstr="-curveinterval %f", - requires=["curvethresh"], - ), - curvethresh=dict( - argstr="-curvethresh %f", - ), - data_dims=dict( - argstr="-datadims %s", - units="voxels", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gzip=dict( - argstr="-gzip", - ), - in_file=dict( - argstr="-inputfile %s", - extensions=None, - position=1, - ), - inputdatatype=dict( - argstr="-inputdatatype %s", - ), - inputmodel=dict( - argstr="-inputmodel %s", - usedefault=True, - ), - interpolator=dict( - argstr="-interpolator %s", - ), - ipthresh=dict( - argstr="-ipthresh %f", - ), - maxcomponents=dict( - argstr="-maxcomponents %d", - units="NA", - ), - numpds=dict( - argstr="-numpds %d", - units="NA", - ), + anisfile=dict(argstr="-anisfile %s", extensions=None), + anisthresh=dict(argstr="-anisthresh %f"), + args=dict(argstr="%s"), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), + curvethresh=dict(argstr="-curvethresh %f"), + data_dims=dict(argstr="-datadims %s", units="voxels"), + environ=dict(nohash=True, usedefault=True), + gzip=dict(argstr="-gzip"), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1), + inputdatatype=dict(argstr="-inputdatatype %s"), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True), + interpolator=dict(argstr="-interpolator %s"), + ipthresh=dict(argstr="-ipthresh %f"), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), + numpds=dict(argstr="-numpds %d", units="NA"), out_file=dict( - argstr="-outputfile %s", - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr="-outputroot %s", - extensions=None, - position=-1, - ), - outputtracts=dict( - argstr="-outputtracts %s", - ), - seed_file=dict( - argstr="-seedfile %s", - extensions=None, - position=2, - ), - stepsize=dict( - argstr="-stepsize %f", - requires=["tracker"], - ), - tracker=dict( - argstr="-tracker %s", - usedefault=True, - ), - voxel_dims=dict( - argstr="-voxeldims %s", - units="mm", - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1 + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), + outputtracts=dict(argstr="-outputtracts %s"), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), + tracker=dict(argstr="-tracker %s", usedefault=True), + voxel_dims=dict(argstr="-voxeldims %s", units="mm"), ) inputs = TrackBallStick.input_spec() @@ -98,11 +37,7 @@ def test_TrackBallStick_inputs(): def test_TrackBallStick_outputs(): - output_map = dict( - tracked=dict( - extensions=None, - ), - ) + output_map = dict(tracked=dict(extensions=None)) outputs = TrackBallStick.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py index 56ca8ece97..dd78dcb8fc 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py @@ -4,122 +4,39 @@ def test_TrackBayesDirac_inputs(): input_map = dict( - anisfile=dict( - argstr="-anisfile %s", - extensions=None, - ), - anisthresh=dict( - argstr="-anisthresh %f", - ), - args=dict( - argstr="%s", - ), - curveinterval=dict( - argstr="-curveinterval %f", - requires=["curvethresh"], - ), - curvepriorg=dict( - argstr="-curvepriorg %G", - ), - curvepriork=dict( - argstr="-curvepriork %G", - ), - curvethresh=dict( - argstr="-curvethresh %f", - ), - data_dims=dict( - argstr="-datadims %s", - units="voxels", - ), - datamodel=dict( - argstr="-datamodel %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - extpriordatatype=dict( - argstr="-extpriordatatype %s", - ), - extpriorfile=dict( - argstr="-extpriorfile %s", - extensions=None, - ), - gzip=dict( - argstr="-gzip", - ), - in_file=dict( - argstr="-inputfile %s", - extensions=None, - position=1, - ), - inputdatatype=dict( - argstr="-inputdatatype %s", - ), - inputmodel=dict( - argstr="-inputmodel %s", - usedefault=True, - ), - interpolator=dict( - argstr="-interpolator %s", - ), - ipthresh=dict( - argstr="-ipthresh %f", - ), - iterations=dict( - argstr="-iterations %d", - units="NA", - ), - maxcomponents=dict( - argstr="-maxcomponents %d", - units="NA", - ), - numpds=dict( - argstr="-numpds %d", - units="NA", - ), + anisfile=dict(argstr="-anisfile %s", extensions=None), + anisthresh=dict(argstr="-anisthresh %f"), + args=dict(argstr="%s"), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), + curvepriorg=dict(argstr="-curvepriorg %G"), + curvepriork=dict(argstr="-curvepriork %G"), + curvethresh=dict(argstr="-curvethresh %f"), + data_dims=dict(argstr="-datadims %s", units="voxels"), + datamodel=dict(argstr="-datamodel %s"), + environ=dict(nohash=True, usedefault=True), + extpriordatatype=dict(argstr="-extpriordatatype %s"), + extpriorfile=dict(argstr="-extpriorfile %s", extensions=None), + gzip=dict(argstr="-gzip"), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1), + inputdatatype=dict(argstr="-inputdatatype %s"), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True), + interpolator=dict(argstr="-interpolator %s"), + ipthresh=dict(argstr="-ipthresh %f"), + iterations=dict(argstr="-iterations %d", units="NA"), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), + numpds=dict(argstr="-numpds %d", units="NA"), out_file=dict( - argstr="-outputfile %s", - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr="-outputroot %s", - extensions=None, - position=-1, - ), - outputtracts=dict( - argstr="-outputtracts %s", - ), - pdf=dict( - argstr="-pdf %s", - ), - pointset=dict( - argstr="-pointset %s", - ), - scheme_file=dict( - argstr="-schemefile %s", - extensions=None, - mandatory=True, - ), - seed_file=dict( - argstr="-seedfile %s", - extensions=None, - position=2, - ), - stepsize=dict( - argstr="-stepsize %f", - requires=["tracker"], - ), - tracker=dict( - argstr="-tracker %s", - usedefault=True, - ), - voxel_dims=dict( - argstr="-voxeldims %s", - units="mm", - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1 + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), + outputtracts=dict(argstr="-outputtracts %s"), + pdf=dict(argstr="-pdf %s"), + pointset=dict(argstr="-pointset %s"), + scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), + tracker=dict(argstr="-tracker %s", usedefault=True), + voxel_dims=dict(argstr="-voxeldims %s", units="mm"), ) inputs = TrackBayesDirac.input_spec() @@ -129,11 +46,7 @@ def test_TrackBayesDirac_inputs(): def test_TrackBayesDirac_outputs(): - output_map = dict( - tracked=dict( - extensions=None, - ), - ) + output_map = dict(tracked=dict(extensions=None)) outputs = TrackBayesDirac.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py index dbd8f89478..21faf179cc 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py @@ -4,99 +4,32 @@ def test_TrackBedpostxDeter_inputs(): input_map = dict( - anisfile=dict( - argstr="-anisfile %s", - extensions=None, - ), - anisthresh=dict( - argstr="-anisthresh %f", - ), - args=dict( - argstr="%s", - ), - bedpostxdir=dict( - argstr="-bedpostxdir %s", - mandatory=True, - ), - curveinterval=dict( - argstr="-curveinterval %f", - requires=["curvethresh"], - ), - curvethresh=dict( - argstr="-curvethresh %f", - ), - data_dims=dict( - argstr="-datadims %s", - units="voxels", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gzip=dict( - argstr="-gzip", - ), - in_file=dict( - argstr="-inputfile %s", - extensions=None, - position=1, - ), - inputdatatype=dict( - argstr="-inputdatatype %s", - ), - inputmodel=dict( - argstr="-inputmodel %s", - usedefault=True, - ), - interpolator=dict( - argstr="-interpolator %s", - ), - ipthresh=dict( - argstr="-ipthresh %f", - ), - maxcomponents=dict( - argstr="-maxcomponents %d", - units="NA", - ), - min_vol_frac=dict( - argstr="-bedpostxminf %d", - units="NA", - ), - numpds=dict( - argstr="-numpds %d", - units="NA", - ), + anisfile=dict(argstr="-anisfile %s", extensions=None), + anisthresh=dict(argstr="-anisthresh %f"), + args=dict(argstr="%s"), + bedpostxdir=dict(argstr="-bedpostxdir %s", mandatory=True), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), + curvethresh=dict(argstr="-curvethresh %f"), + data_dims=dict(argstr="-datadims %s", units="voxels"), + environ=dict(nohash=True, usedefault=True), + gzip=dict(argstr="-gzip"), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1), + inputdatatype=dict(argstr="-inputdatatype %s"), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True), + interpolator=dict(argstr="-interpolator %s"), + ipthresh=dict(argstr="-ipthresh %f"), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), + min_vol_frac=dict(argstr="-bedpostxminf %d", units="NA"), + numpds=dict(argstr="-numpds %d", units="NA"), out_file=dict( - argstr="-outputfile %s", - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr="-outputroot %s", - extensions=None, - position=-1, - ), - outputtracts=dict( - argstr="-outputtracts %s", - ), - seed_file=dict( - argstr="-seedfile %s", - extensions=None, - position=2, - ), - stepsize=dict( - argstr="-stepsize %f", - requires=["tracker"], - ), - tracker=dict( - argstr="-tracker %s", - usedefault=True, - ), - voxel_dims=dict( - argstr="-voxeldims %s", - units="mm", - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1 + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), + outputtracts=dict(argstr="-outputtracts %s"), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), + tracker=dict(argstr="-tracker %s", usedefault=True), + voxel_dims=dict(argstr="-voxeldims %s", units="mm"), ) inputs = TrackBedpostxDeter.input_spec() @@ -106,11 +39,7 @@ def test_TrackBedpostxDeter_inputs(): def test_TrackBedpostxDeter_outputs(): - output_map = dict( - tracked=dict( - extensions=None, - ), - ) + output_map = dict(tracked=dict(extensions=None)) outputs = TrackBedpostxDeter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py index 7d1baa0e43..be378e55ac 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py @@ -4,103 +4,33 @@ def test_TrackBedpostxProba_inputs(): input_map = dict( - anisfile=dict( - argstr="-anisfile %s", - extensions=None, - ), - anisthresh=dict( - argstr="-anisthresh %f", - ), - args=dict( - argstr="%s", - ), - bedpostxdir=dict( - argstr="-bedpostxdir %s", - mandatory=True, - ), - curveinterval=dict( - argstr="-curveinterval %f", - requires=["curvethresh"], - ), - curvethresh=dict( - argstr="-curvethresh %f", - ), - data_dims=dict( - argstr="-datadims %s", - units="voxels", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gzip=dict( - argstr="-gzip", - ), - in_file=dict( - argstr="-inputfile %s", - extensions=None, - position=1, - ), - inputdatatype=dict( - argstr="-inputdatatype %s", - ), - inputmodel=dict( - argstr="-inputmodel %s", - usedefault=True, - ), - interpolator=dict( - argstr="-interpolator %s", - ), - ipthresh=dict( - argstr="-ipthresh %f", - ), - iterations=dict( - argstr="-iterations %d", - units="NA", - ), - maxcomponents=dict( - argstr="-maxcomponents %d", - units="NA", - ), - min_vol_frac=dict( - argstr="-bedpostxminf %d", - units="NA", - ), - numpds=dict( - argstr="-numpds %d", - units="NA", - ), + anisfile=dict(argstr="-anisfile %s", extensions=None), + anisthresh=dict(argstr="-anisthresh %f"), + args=dict(argstr="%s"), + bedpostxdir=dict(argstr="-bedpostxdir %s", mandatory=True), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), + curvethresh=dict(argstr="-curvethresh %f"), + data_dims=dict(argstr="-datadims %s", units="voxels"), + environ=dict(nohash=True, usedefault=True), + gzip=dict(argstr="-gzip"), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1), + inputdatatype=dict(argstr="-inputdatatype %s"), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True), + interpolator=dict(argstr="-interpolator %s"), + ipthresh=dict(argstr="-ipthresh %f"), + iterations=dict(argstr="-iterations %d", units="NA"), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), + min_vol_frac=dict(argstr="-bedpostxminf %d", units="NA"), + numpds=dict(argstr="-numpds %d", units="NA"), out_file=dict( - argstr="-outputfile %s", - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr="-outputroot %s", - extensions=None, - position=-1, - ), - outputtracts=dict( - argstr="-outputtracts %s", - ), - seed_file=dict( - argstr="-seedfile %s", - extensions=None, - position=2, - ), - stepsize=dict( - argstr="-stepsize %f", - requires=["tracker"], - ), - tracker=dict( - argstr="-tracker %s", - usedefault=True, - ), - voxel_dims=dict( - argstr="-voxeldims %s", - units="mm", - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1 + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), + outputtracts=dict(argstr="-outputtracts %s"), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), + tracker=dict(argstr="-tracker %s", usedefault=True), + voxel_dims=dict(argstr="-voxeldims %s", units="mm"), ) inputs = TrackBedpostxProba.input_spec() @@ -110,11 +40,7 @@ def test_TrackBedpostxProba_inputs(): def test_TrackBedpostxProba_outputs(): - output_map = dict( - tracked=dict( - extensions=None, - ), - ) + output_map = dict(tracked=dict(extensions=None)) outputs = TrackBedpostxProba.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py index 75cd2e3d11..6562af75c2 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py @@ -4,111 +4,35 @@ def test_TrackBootstrap_inputs(): input_map = dict( - anisfile=dict( - argstr="-anisfile %s", - extensions=None, - ), - anisthresh=dict( - argstr="-anisthresh %f", - ), - args=dict( - argstr="%s", - ), - bgmask=dict( - argstr="-bgmask %s", - extensions=None, - ), - bsdatafiles=dict( - argstr="-bsdatafile %s", - mandatory=True, - ), - curveinterval=dict( - argstr="-curveinterval %f", - requires=["curvethresh"], - ), - curvethresh=dict( - argstr="-curvethresh %f", - ), - data_dims=dict( - argstr="-datadims %s", - units="voxels", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gzip=dict( - argstr="-gzip", - ), - in_file=dict( - argstr="-inputfile %s", - extensions=None, - position=1, - ), - inputdatatype=dict( - argstr="-inputdatatype %s", - ), - inputmodel=dict( - argstr="-inputmodel %s", - usedefault=True, - ), - interpolator=dict( - argstr="-interpolator %s", - ), - inversion=dict( - argstr="-inversion %s", - ), - ipthresh=dict( - argstr="-ipthresh %f", - ), - iterations=dict( - argstr="-iterations %d", - units="NA", - ), - maxcomponents=dict( - argstr="-maxcomponents %d", - units="NA", - ), - numpds=dict( - argstr="-numpds %d", - units="NA", - ), + anisfile=dict(argstr="-anisfile %s", extensions=None), + anisthresh=dict(argstr="-anisthresh %f"), + args=dict(argstr="%s"), + bgmask=dict(argstr="-bgmask %s", extensions=None), + bsdatafiles=dict(argstr="-bsdatafile %s", mandatory=True), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), + curvethresh=dict(argstr="-curvethresh %f"), + data_dims=dict(argstr="-datadims %s", units="voxels"), + environ=dict(nohash=True, usedefault=True), + gzip=dict(argstr="-gzip"), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1), + inputdatatype=dict(argstr="-inputdatatype %s"), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True), + interpolator=dict(argstr="-interpolator %s"), + inversion=dict(argstr="-inversion %s"), + ipthresh=dict(argstr="-ipthresh %f"), + iterations=dict(argstr="-iterations %d", units="NA"), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), + numpds=dict(argstr="-numpds %d", units="NA"), out_file=dict( - argstr="-outputfile %s", - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr="-outputroot %s", - extensions=None, - position=-1, - ), - outputtracts=dict( - argstr="-outputtracts %s", - ), - scheme_file=dict( - argstr="-schemefile %s", - extensions=None, - mandatory=True, - ), - seed_file=dict( - argstr="-seedfile %s", - extensions=None, - position=2, - ), - stepsize=dict( - argstr="-stepsize %f", - requires=["tracker"], - ), - tracker=dict( - argstr="-tracker %s", - usedefault=True, - ), - voxel_dims=dict( - argstr="-voxeldims %s", - units="mm", - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1 + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), + outputtracts=dict(argstr="-outputtracts %s"), + scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), + tracker=dict(argstr="-tracker %s", usedefault=True), + voxel_dims=dict(argstr="-voxeldims %s", units="mm"), ) inputs = TrackBootstrap.input_spec() @@ -118,11 +42,7 @@ def test_TrackBootstrap_inputs(): def test_TrackBootstrap_outputs(): - output_map = dict( - tracked=dict( - extensions=None, - ), - ) + output_map = dict(tracked=dict(extensions=None)) outputs = TrackBootstrap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackDT.py b/nipype/interfaces/camino/tests/test_auto_TrackDT.py index c60ba7b5f5..2a84aadbd9 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackDT.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackDT.py @@ -4,91 +4,30 @@ def test_TrackDT_inputs(): input_map = dict( - anisfile=dict( - argstr="-anisfile %s", - extensions=None, - ), - anisthresh=dict( - argstr="-anisthresh %f", - ), - args=dict( - argstr="%s", - ), - curveinterval=dict( - argstr="-curveinterval %f", - requires=["curvethresh"], - ), - curvethresh=dict( - argstr="-curvethresh %f", - ), - data_dims=dict( - argstr="-datadims %s", - units="voxels", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gzip=dict( - argstr="-gzip", - ), - in_file=dict( - argstr="-inputfile %s", - extensions=None, - position=1, - ), - inputdatatype=dict( - argstr="-inputdatatype %s", - ), - inputmodel=dict( - argstr="-inputmodel %s", - usedefault=True, - ), - interpolator=dict( - argstr="-interpolator %s", - ), - ipthresh=dict( - argstr="-ipthresh %f", - ), - maxcomponents=dict( - argstr="-maxcomponents %d", - units="NA", - ), - numpds=dict( - argstr="-numpds %d", - units="NA", - ), + anisfile=dict(argstr="-anisfile %s", extensions=None), + anisthresh=dict(argstr="-anisthresh %f"), + args=dict(argstr="%s"), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), + curvethresh=dict(argstr="-curvethresh %f"), + data_dims=dict(argstr="-datadims %s", units="voxels"), + environ=dict(nohash=True, usedefault=True), + gzip=dict(argstr="-gzip"), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1), + inputdatatype=dict(argstr="-inputdatatype %s"), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True), + interpolator=dict(argstr="-interpolator %s"), + ipthresh=dict(argstr="-ipthresh %f"), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), + numpds=dict(argstr="-numpds %d", units="NA"), out_file=dict( - argstr="-outputfile %s", - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr="-outputroot %s", - extensions=None, - position=-1, - ), - outputtracts=dict( - argstr="-outputtracts %s", - ), - seed_file=dict( - argstr="-seedfile %s", - extensions=None, - position=2, - ), - stepsize=dict( - argstr="-stepsize %f", - requires=["tracker"], - ), - tracker=dict( - argstr="-tracker %s", - usedefault=True, - ), - voxel_dims=dict( - argstr="-voxeldims %s", - units="mm", - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1 + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), + outputtracts=dict(argstr="-outputtracts %s"), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), + tracker=dict(argstr="-tracker %s", usedefault=True), + voxel_dims=dict(argstr="-voxeldims %s", units="mm"), ) inputs = TrackDT.input_spec() @@ -98,11 +37,7 @@ def test_TrackDT_inputs(): def test_TrackDT_outputs(): - output_map = dict( - tracked=dict( - extensions=None, - ), - ) + output_map = dict(tracked=dict(extensions=None)) outputs = TrackDT.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackPICo.py b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py index 1d3647e151..6392df92c0 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackPICo.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py @@ -4,98 +4,32 @@ def test_TrackPICo_inputs(): input_map = dict( - anisfile=dict( - argstr="-anisfile %s", - extensions=None, - ), - anisthresh=dict( - argstr="-anisthresh %f", - ), - args=dict( - argstr="%s", - ), - curveinterval=dict( - argstr="-curveinterval %f", - requires=["curvethresh"], - ), - curvethresh=dict( - argstr="-curvethresh %f", - ), - data_dims=dict( - argstr="-datadims %s", - units="voxels", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gzip=dict( - argstr="-gzip", - ), - in_file=dict( - argstr="-inputfile %s", - extensions=None, - position=1, - ), - inputdatatype=dict( - argstr="-inputdatatype %s", - ), - inputmodel=dict( - argstr="-inputmodel %s", - usedefault=True, - ), - interpolator=dict( - argstr="-interpolator %s", - ), - ipthresh=dict( - argstr="-ipthresh %f", - ), - iterations=dict( - argstr="-iterations %d", - units="NA", - ), - maxcomponents=dict( - argstr="-maxcomponents %d", - units="NA", - ), - numpds=dict( - argstr="-numpds %d", - units="NA", - ), + anisfile=dict(argstr="-anisfile %s", extensions=None), + anisthresh=dict(argstr="-anisthresh %f"), + args=dict(argstr="%s"), + curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), + curvethresh=dict(argstr="-curvethresh %f"), + data_dims=dict(argstr="-datadims %s", units="voxels"), + environ=dict(nohash=True, usedefault=True), + gzip=dict(argstr="-gzip"), + in_file=dict(argstr="-inputfile %s", extensions=None, position=1), + inputdatatype=dict(argstr="-inputdatatype %s"), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True), + interpolator=dict(argstr="-interpolator %s"), + ipthresh=dict(argstr="-ipthresh %f"), + iterations=dict(argstr="-iterations %d", units="NA"), + maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), + numpds=dict(argstr="-numpds %d", units="NA"), out_file=dict( - argstr="-outputfile %s", - extensions=None, - genfile=True, - position=-1, - ), - output_root=dict( - argstr="-outputroot %s", - extensions=None, - position=-1, - ), - outputtracts=dict( - argstr="-outputtracts %s", - ), - pdf=dict( - argstr="-pdf %s", - ), - seed_file=dict( - argstr="-seedfile %s", - extensions=None, - position=2, - ), - stepsize=dict( - argstr="-stepsize %f", - requires=["tracker"], - ), - tracker=dict( - argstr="-tracker %s", - usedefault=True, - ), - voxel_dims=dict( - argstr="-voxeldims %s", - units="mm", - ), + argstr="-outputfile %s", extensions=None, genfile=True, position=-1 + ), + output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), + outputtracts=dict(argstr="-outputtracts %s"), + pdf=dict(argstr="-pdf %s"), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), + stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), + tracker=dict(argstr="-tracker %s", usedefault=True), + voxel_dims=dict(argstr="-voxeldims %s", units="mm"), ) inputs = TrackPICo.input_spec() @@ -105,11 +39,7 @@ def test_TrackPICo_inputs(): def test_TrackPICo_outputs(): - output_map = dict( - tracked=dict( - extensions=None, - ), - ) + output_map = dict(tracked=dict(extensions=None)) outputs = TrackPICo.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TractShredder.py b/nipype/interfaces/camino/tests/test_auto_TractShredder.py index 07678c5d64..83146b1ca4 100644 --- a/nipype/interfaces/camino/tests/test_auto_TractShredder.py +++ b/nipype/interfaces/camino/tests/test_auto_TractShredder.py @@ -4,40 +4,13 @@ def test_TractShredder_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bunchsize=dict( - argstr="%d", - position=2, - units="NA", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="< %s", - extensions=None, - mandatory=True, - position=-2, - ), - offset=dict( - argstr="%d", - position=1, - units="NA", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - space=dict( - argstr="%d", - position=3, - units="NA", - ), + args=dict(argstr="%s"), + bunchsize=dict(argstr="%d", position=2, units="NA"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=-2), + offset=dict(argstr="%d", position=1, units="NA"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + space=dict(argstr="%d", position=3, units="NA"), ) inputs = TractShredder.input_spec() @@ -47,11 +20,7 @@ def test_TractShredder_inputs(): def test_TractShredder_outputs(): - output_map = dict( - shredded=dict( - extensions=None, - ), - ) + output_map = dict(shredded=dict(extensions=None)) outputs = TractShredder.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py index 194f233cc1..d466743ba1 100644 --- a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py @@ -4,58 +4,18 @@ def test_VtkStreamlines_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - colourorient=dict( - argstr="-colourorient", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr=" < %s", - extensions=None, - mandatory=True, - position=-2, - ), - inputmodel=dict( - argstr="-inputmodel %s", - usedefault=True, - ), - interpolate=dict( - argstr="-interpolate", - ), - interpolatescalars=dict( - argstr="-interpolatescalars", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), - scalar_file=dict( - argstr="-scalarfile %s", - extensions=None, - position=3, - ), - seed_file=dict( - argstr="-seedfile %s", - extensions=None, - position=1, - ), - target_file=dict( - argstr="-targetfile %s", - extensions=None, - position=2, - ), - voxeldims=dict( - argstr="-voxeldims %s", - position=4, - units="mm", - ), + args=dict(argstr="%s"), + colourorient=dict(argstr="-colourorient"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr=" < %s", extensions=None, mandatory=True, position=-2), + inputmodel=dict(argstr="-inputmodel %s", usedefault=True), + interpolate=dict(argstr="-interpolate"), + interpolatescalars=dict(argstr="-interpolatescalars"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + scalar_file=dict(argstr="-scalarfile %s", extensions=None, position=3), + seed_file=dict(argstr="-seedfile %s", extensions=None, position=1), + target_file=dict(argstr="-targetfile %s", extensions=None, position=2), + voxeldims=dict(argstr="-voxeldims %s", position=4, units="mm"), ) inputs = VtkStreamlines.input_spec() @@ -65,11 +25,7 @@ def test_VtkStreamlines_inputs(): def test_VtkStreamlines_outputs(): - output_map = dict( - vtk=dict( - extensions=None, - ), - ) + output_map = dict(vtk=dict(extensions=None)) outputs = VtkStreamlines.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py index fb076c1107..1d4fe9d7ec 100644 --- a/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py @@ -4,52 +4,16 @@ def test_Camino2Trackvis_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - data_dims=dict( - argstr="-d %s", - mandatory=True, - position=4, - sep=",", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - position=1, - ), - min_length=dict( - argstr="-l %d", - position=3, - units="mm", - ), - nifti_file=dict( - argstr="--nifti %s", - extensions=None, - position=7, - ), - out_file=dict( - argstr="-o %s", - extensions=None, - genfile=True, - position=2, - ), - voxel_dims=dict( - argstr="-x %s", - mandatory=True, - position=5, - sep=",", - ), + args=dict(argstr="%s"), + data_dims=dict(argstr="-d %s", mandatory=True, position=4, sep=","), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1), + min_length=dict(argstr="-l %d", position=3, units="mm"), + nifti_file=dict(argstr="--nifti %s", extensions=None, position=7), + out_file=dict(argstr="-o %s", extensions=None, genfile=True, position=2), + voxel_dims=dict(argstr="-x %s", mandatory=True, position=5, sep=","), voxel_order=dict( - argstr="--voxel-order %s", - extensions=None, - mandatory=True, - position=6, + argstr="--voxel-order %s", extensions=None, mandatory=True, position=6 ), ) inputs = Camino2Trackvis.input_spec() @@ -60,11 +24,7 @@ def test_Camino2Trackvis_inputs(): def test_Camino2Trackvis_outputs(): - output_map = dict( - trackvis=dict( - extensions=None, - ), - ) + output_map = dict(trackvis=dict(extensions=None)) outputs = Camino2Trackvis.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py index ec7ed22d0c..e06d93c713 100644 --- a/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py @@ -4,30 +4,11 @@ def test_Trackvis2Camino_inputs(): input_map = dict( - append_file=dict( - argstr="-a %s", - extensions=None, - position=2, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - position=1, - ), - out_file=dict( - argstr="-o %s", - extensions=None, - genfile=True, - position=2, - ), + append_file=dict(argstr="-a %s", extensions=None, position=2), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1), + out_file=dict(argstr="-o %s", extensions=None, genfile=True, position=2), ) inputs = Trackvis2Camino.input_spec() @@ -37,11 +18,7 @@ def test_Trackvis2Camino_inputs(): def test_Trackvis2Camino_outputs(): - output_map = dict( - camino=dict( - extensions=None, - ), - ) + output_map = dict(camino=dict(extensions=None)) outputs = Trackvis2Camino.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cat12/tests/test_auto_CAT12SANLMDenoising.py b/nipype/interfaces/cat12/tests/test_auto_CAT12SANLMDenoising.py index 43c0d5e4ea..1ded9a16e7 100644 --- a/nipype/interfaces/cat12/tests/test_auto_CAT12SANLMDenoising.py +++ b/nipype/interfaces/cat12/tests/test_auto_CAT12SANLMDenoising.py @@ -4,53 +4,20 @@ def test_CAT12SANLMDenoising_inputs(): input_map = dict( - addnoise=dict( - field="addnoise", - usedefault=True, - ), - filename_prefix=dict( - field="prefix", - usedefault=True, - ), - filename_suffix=dict( - field="suffix", - usedefault=True, - ), - in_files=dict( - copyfile=False, - field="data", - mandatory=True, - ), - intlim=dict( - field="intlim", - usedefault=True, - ), + addnoise=dict(field="addnoise", usedefault=True), + filename_prefix=dict(field="prefix", usedefault=True), + filename_suffix=dict(field="suffix", usedefault=True), + in_files=dict(copyfile=False, field="data", mandatory=True), + intlim=dict(field="intlim", usedefault=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - noisecorr_strength=dict( - field="nlmfilter.optimized.NCstr", - usedefault=True, - ), + mfile=dict(usedefault=True), + noisecorr_strength=dict(field="nlmfilter.optimized.NCstr", usedefault=True), paths=dict(), - replace_nan_and_inf=dict( - field="replaceNANandINF", - usedefault=True, - ), - rician=dict( - field="rician", - usedefault=True, - ), - spm_type=dict( - field="spm_type", - usedefault=True, - ), + replace_nan_and_inf=dict(field="replaceNANandINF", usedefault=True), + rician=dict(field="rician", usedefault=True), + spm_type=dict(field="spm_type", usedefault=True), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = CAT12SANLMDenoising.input_spec() @@ -60,11 +27,7 @@ def test_CAT12SANLMDenoising_inputs(): def test_CAT12SANLMDenoising_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = CAT12SANLMDenoising.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py b/nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py index 979b3afa6b..5ebbd6af82 100644 --- a/nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py +++ b/nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py @@ -4,132 +4,44 @@ def test_CAT12Segment_inputs(): input_map = dict( - affine_preprocessing=dict( - field="extopts.APP", - usedefault=True, - ), - affine_regularization=dict( - field="opts.affreg", - usedefault=True, - ), - cobra=dict( - field="output.ROImenu.atlases.hammers", - usedefault=True, - ), - csf_output_dartel=dict( - field="output.CSF.dartel", - usedefault=True, - ), - csf_output_modulated=dict( - field="output.CSF.mod", - usedefault=True, - ), - csf_output_native=dict( - field="output.CSF.native", - usedefault=True, - ), - gm_output_dartel=dict( - field="output.GM.dartel", - usedefault=True, - ), - gm_output_modulated=dict( - field="output.GM.mod", - usedefault=True, - ), - gm_output_native=dict( - field="output.GM.native", - usedefault=True, - ), - hammers=dict( - field="output.ROImenu.atlases.cobra", - usedefault=True, - ), - ignore_errors=dict( - field="extopts.ignoreErrors", - usedefault=True, - ), - in_files=dict( - copyfile=False, - field="data", - mandatory=True, - ), - initial_segmentation=dict( - field="extopts.spm_kamap", - usedefault=True, - ), + affine_preprocessing=dict(field="extopts.APP", usedefault=True), + affine_regularization=dict(field="opts.affreg", usedefault=True), + cobra=dict(field="output.ROImenu.atlases.hammers", usedefault=True), + csf_output_dartel=dict(field="output.CSF.dartel", usedefault=True), + csf_output_modulated=dict(field="output.CSF.mod", usedefault=True), + csf_output_native=dict(field="output.CSF.native", usedefault=True), + gm_output_dartel=dict(field="output.GM.dartel", usedefault=True), + gm_output_modulated=dict(field="output.GM.mod", usedefault=True), + gm_output_native=dict(field="output.GM.native", usedefault=True), + hammers=dict(field="output.ROImenu.atlases.cobra", usedefault=True), + ignore_errors=dict(field="extopts.ignoreErrors", usedefault=True), + in_files=dict(copyfile=False, field="data", mandatory=True), + initial_segmentation=dict(field="extopts.spm_kamap", usedefault=True), internal_resampling_process=dict( - field="extopts.restypes.optimal", - maxlen=2, - minlen=2, - usedefault=True, - ), - jacobianwarped=dict( - field="output.jacobianwarped", - usedefault=True, - ), - label_dartel=dict( - field="output.label.dartel", - usedefault=True, - ), - label_native=dict( - field="output.label.native", - usedefault=True, - ), - label_warped=dict( - field="output.label.warped", - usedefault=True, - ), - las_dartel=dict( - field="output.las.dartel", - usedefault=True, - ), - las_native=dict( - field="output.las.native", - usedefault=True, - ), - las_warped=dict( - field="output.las.warped", - usedefault=True, - ), - local_adaptive_seg=dict( - field="extopts.LASstr", - usedefault=True, - ), - lpba40=dict( - field="output.ROImenu.atlases.lpba40", - usedefault=True, - ), + field="extopts.restypes.optimal", maxlen=2, minlen=2, usedefault=True + ), + jacobianwarped=dict(field="output.jacobianwarped", usedefault=True), + label_dartel=dict(field="output.label.dartel", usedefault=True), + label_native=dict(field="output.label.native", usedefault=True), + label_warped=dict(field="output.label.warped", usedefault=True), + las_dartel=dict(field="output.las.dartel", usedefault=True), + las_native=dict(field="output.las.native", usedefault=True), + las_warped=dict(field="output.las.warped", usedefault=True), + local_adaptive_seg=dict(field="extopts.LASstr", usedefault=True), + lpba40=dict(field="output.ROImenu.atlases.lpba40", usedefault=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - n_jobs=dict( - field="nproc", - mandatory=True, - usedefault=True, - ), + mfile=dict(usedefault=True), + n_jobs=dict(field="nproc", mandatory=True, usedefault=True), neuromorphometrics=dict( - field="output.ROImenu.atlases.neuromorphometrics", - usedefault=True, - ), - output_labelnative=dict( - field="output.labelnative", - usedefault=True, + field="output.ROImenu.atlases.neuromorphometrics", usedefault=True ), + output_labelnative=dict(field="output.labelnative", usedefault=True), own_atlas=dict( - copyfile=False, - field="output.ROImenu.atlases.ownatlas", - mandatory=False, + copyfile=False, field="output.ROImenu.atlases.ownatlas", mandatory=False ), paths=dict(), - power_spm_inhomogeneity_correction=dict( - field="opts.biasacc", - usedefault=True, - ), - save_bias_corrected=dict( - field="output.bias.warped", - usedefault=True, - ), + power_spm_inhomogeneity_correction=dict(field="opts.biasacc", usedefault=True), + save_bias_corrected=dict(field="output.bias.warped", usedefault=True), shooting_tpm=dict( copyfile=False, extensions=[".hdr", ".img", ".img.gz", ".nii"], @@ -156,54 +68,18 @@ def test_CAT12Segment_inputs(): extensions=[".hdr", ".img", ".img.gz", ".nii"], mandatory=False, ), - skull_strip=dict( - field="extopts.gcutstr", - usedefault=True, - ), - surface_and_thickness_estimation=dict( - field="surface", - usedefault=True, - ), - surface_measures=dict( - field="output.surf_measures", - usedefault=True, - ), - tpm=dict( - copyfile=False, - field="tpm", - mandatory=False, - ), + skull_strip=dict(field="extopts.gcutstr", usedefault=True), + surface_and_thickness_estimation=dict(field="surface", usedefault=True), + surface_measures=dict(field="output.surf_measures", usedefault=True), + tpm=dict(copyfile=False, field="tpm", mandatory=False), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - voxel_size=dict( - field="extopts.vox", - usedefault=True, - ), - warps=dict( - field="output.warps", - maxlen=2, - minlen=2, - usedefault=True, - ), - wm_hyper_intensity_correction=dict( - field="extopts.WMHC", - usedefault=True, - ), - wm_output_dartel=dict( - field="output.WM.dartel", - usedefault=True, - ), - wm_output_modulated=dict( - field="output.WM.mod", - usedefault=True, - ), - wm_output_native=dict( - field="output.WM.native", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), + voxel_size=dict(field="extopts.vox", usedefault=True), + warps=dict(field="output.warps", maxlen=2, minlen=2, usedefault=True), + wm_hyper_intensity_correction=dict(field="extopts.WMHC", usedefault=True), + wm_output_dartel=dict(field="output.WM.dartel", usedefault=True), + wm_output_modulated=dict(field="output.WM.mod", usedefault=True), + wm_output_native=dict(field="output.WM.native", usedefault=True), ) inputs = CAT12Segment.input_spec() @@ -214,61 +90,27 @@ def test_CAT12Segment_inputs(): def test_CAT12Segment_outputs(): output_map = dict( - bias_corrected_image=dict( - extensions=None, - ), - csf_dartel_image=dict( - extensions=None, - ), - csf_modulated_image=dict( - extensions=None, - ), - csf_native_image=dict( - extensions=None, - ), - gm_dartel_image=dict( - extensions=None, - ), - gm_modulated_image=dict( - extensions=None, - ), - gm_native_image=dict( - extensions=None, - ), + bias_corrected_image=dict(extensions=None), + csf_dartel_image=dict(extensions=None), + csf_modulated_image=dict(extensions=None), + csf_native_image=dict(extensions=None), + gm_dartel_image=dict(extensions=None), + gm_modulated_image=dict(extensions=None), + gm_native_image=dict(extensions=None), label_files=dict(), - label_roi=dict( - extensions=None, - ), - label_rois=dict( - extensions=None, - ), - lh_central_surface=dict( - extensions=None, - ), - lh_sphere_surface=dict( - extensions=None, - ), + label_roi=dict(extensions=None), + label_rois=dict(extensions=None), + lh_central_surface=dict(extensions=None), + lh_sphere_surface=dict(extensions=None), mri_images=dict(), - report=dict( - extensions=None, - ), + report=dict(extensions=None), report_files=dict(), - rh_central_surface=dict( - extensions=None, - ), - rh_sphere_surface=dict( - extensions=None, - ), + rh_central_surface=dict(extensions=None), + rh_sphere_surface=dict(extensions=None), surface_files=dict(), - wm_dartel_image=dict( - extensions=None, - ), - wm_modulated_image=dict( - extensions=None, - ), - wm_native_image=dict( - extensions=None, - ), + wm_dartel_image=dict(extensions=None), + wm_modulated_image=dict(extensions=None), + wm_native_image=dict(extensions=None), ) outputs = CAT12Segment.output_spec() diff --git a/nipype/interfaces/cat12/tests/test_auto_ExtractAdditionalSurfaceParameters.py b/nipype/interfaces/cat12/tests/test_auto_ExtractAdditionalSurfaceParameters.py index cde7f2057e..2be548cbd8 100644 --- a/nipype/interfaces/cat12/tests/test_auto_ExtractAdditionalSurfaceParameters.py +++ b/nipype/interfaces/cat12/tests/test_auto_ExtractAdditionalSurfaceParameters.py @@ -4,45 +4,18 @@ def test_ExtractAdditionalSurfaceParameters_inputs(): input_map = dict( - area=dict( - field="area", - usedefault=True, - ), - depth=dict( - field="SD", - usedefault=True, - ), - fractal_dimension=dict( - field="FD", - usedefault=True, - ), - gmv=dict( - field="gmv", - usedefault=True, - ), - gyrification=dict( - field="GI", - usedefault=True, - ), - left_central_surfaces=dict( - copyfile=False, - field="data_surf", - mandatory=True, - ), + area=dict(field="area", usedefault=True), + depth=dict(field="SD", usedefault=True), + fractal_dimension=dict(field="FD", usedefault=True), + gmv=dict(field="gmv", usedefault=True), + gyrification=dict(field="GI", usedefault=True), + left_central_surfaces=dict(copyfile=False, field="data_surf", mandatory=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), + mfile=dict(usedefault=True), paths=dict(), - surface_files=dict( - copyfile=False, - mandatory=False, - ), + surface_files=dict(copyfile=False, mandatory=False), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = ExtractAdditionalSurfaceParameters.input_spec() diff --git a/nipype/interfaces/cat12/tests/test_auto_ExtractROIBasedSurfaceMeasures.py b/nipype/interfaces/cat12/tests/test_auto_ExtractROIBasedSurfaceMeasures.py index ffc18324aa..9c8d5f994a 100644 --- a/nipype/interfaces/cat12/tests/test_auto_ExtractROIBasedSurfaceMeasures.py +++ b/nipype/interfaces/cat12/tests/test_auto_ExtractROIBasedSurfaceMeasures.py @@ -4,38 +4,16 @@ def test_ExtractROIBasedSurfaceMeasures_inputs(): input_map = dict( - lh_roi_atlas=dict( - copyfile=False, - field="rdata", - mandatory=True, - ), - lh_surface_measure=dict( - copyfile=False, - field="cdata", - mandatory=True, - ), + lh_roi_atlas=dict(copyfile=False, field="rdata", mandatory=True), + lh_surface_measure=dict(copyfile=False, field="cdata", mandatory=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), + mfile=dict(usedefault=True), paths=dict(), - rh_roi_atlas=dict( - copyfile=False, - mandatory=False, - ), - rh_surface_measure=dict( - copyfile=False, - mandatory=False, - ), - surface_files=dict( - copyfile=False, - mandatory=False, - ), + rh_roi_atlas=dict(copyfile=False, mandatory=False), + rh_surface_measure=dict(copyfile=False, mandatory=False), + surface_files=dict(copyfile=False, mandatory=False), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = ExtractROIBasedSurfaceMeasures.input_spec() @@ -45,9 +23,7 @@ def test_ExtractROIBasedSurfaceMeasures_inputs(): def test_ExtractROIBasedSurfaceMeasures_outputs(): - output_map = dict( - label_files=dict(), - ) + output_map = dict(label_files=dict()) outputs = ExtractROIBasedSurfaceMeasures.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py index e115acad83..6f78f0c2cb 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py +++ b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py @@ -4,21 +4,11 @@ def test_AverageNetworks_inputs(): input_map = dict( - group_id=dict( - usedefault=True, - ), - in_files=dict( - mandatory=True, - ), - out_gexf_groupavg=dict( - extensions=None, - ), - out_gpickled_groupavg=dict( - extensions=None, - ), - resolution_network_file=dict( - extensions=None, - ), + group_id=dict(usedefault=True), + in_files=dict(mandatory=True), + out_gexf_groupavg=dict(extensions=None), + out_gpickled_groupavg=dict(extensions=None), + resolution_network_file=dict(extensions=None), ) inputs = AverageNetworks.input_spec() @@ -29,12 +19,8 @@ def test_AverageNetworks_inputs(): def test_AverageNetworks_outputs(): output_map = dict( - gexf_groupavg=dict( - extensions=None, - ), - gpickled_groupavg=dict( - extensions=None, - ), + gexf_groupavg=dict(extensions=None), + gpickled_groupavg=dict(extensions=None), matlab_groupavgs=dict(), ) outputs = AverageNetworks.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py index ee7b0459ef..1482d8adb1 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py @@ -6,9 +6,7 @@ def test_CFFConverter_inputs(): input_map = dict( creator=dict(), data_files=dict(), - description=dict( - usedefault=True, - ), + description=dict(usedefault=True), email=dict(), gifti_labels=dict(), gifti_surfaces=dict(), @@ -16,18 +14,13 @@ def test_CFFConverter_inputs(): graphml_networks=dict(), license=dict(), nifti_volumes=dict(), - out_file=dict( - extensions=None, - usedefault=True, - ), + out_file=dict(extensions=None, usedefault=True), publisher=dict(), references=dict(), relation=dict(), rights=dict(), script_files=dict(), - species=dict( - usedefault=True, - ), + species=dict(usedefault=True), timeseries_files=dict(), title=dict(), tract_files=dict(), @@ -40,11 +33,7 @@ def test_CFFConverter_inputs(): def test_CFFConverter_outputs(): - output_map = dict( - connectome_file=dict( - extensions=None, - ), - ) + output_map = dict(connectome_file=dict(extensions=None)) outputs = CFFConverter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py b/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py index a9466f91be..d426213f1d 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py @@ -4,49 +4,17 @@ def test_CreateMatrix_inputs(): input_map = dict( - count_region_intersections=dict( - usedefault=True, - ), - out_endpoint_array_name=dict( - extensions=None, - genfile=True, - ), - out_fiber_length_std_matrix_mat_file=dict( - extensions=None, - genfile=True, - ), - out_intersection_matrix_mat_file=dict( - extensions=None, - genfile=True, - ), - out_matrix_file=dict( - extensions=None, - genfile=True, - ), - out_matrix_mat_file=dict( - extensions=None, - usedefault=True, - ), - out_mean_fiber_length_matrix_mat_file=dict( - extensions=None, - genfile=True, - ), - out_median_fiber_length_matrix_mat_file=dict( - extensions=None, - genfile=True, - ), - resolution_network_file=dict( - extensions=None, - mandatory=True, - ), - roi_file=dict( - extensions=None, - mandatory=True, - ), - tract_file=dict( - extensions=None, - mandatory=True, - ), + count_region_intersections=dict(usedefault=True), + out_endpoint_array_name=dict(extensions=None, genfile=True), + out_fiber_length_std_matrix_mat_file=dict(extensions=None, genfile=True), + out_intersection_matrix_mat_file=dict(extensions=None, genfile=True), + out_matrix_file=dict(extensions=None, genfile=True), + out_matrix_mat_file=dict(extensions=None, usedefault=True), + out_mean_fiber_length_matrix_mat_file=dict(extensions=None, genfile=True), + out_median_fiber_length_matrix_mat_file=dict(extensions=None, genfile=True), + resolution_network_file=dict(extensions=None, mandatory=True), + roi_file=dict(extensions=None, mandatory=True), + tract_file=dict(extensions=None, mandatory=True), ) inputs = CreateMatrix.input_spec() @@ -57,54 +25,24 @@ def test_CreateMatrix_inputs(): def test_CreateMatrix_outputs(): output_map = dict( - endpoint_file=dict( - extensions=None, - ), - endpoint_file_mm=dict( - extensions=None, - ), - fiber_label_file=dict( - extensions=None, - ), - fiber_labels_noorphans=dict( - extensions=None, - ), - fiber_length_file=dict( - extensions=None, - ), - fiber_length_std_matrix_mat_file=dict( - extensions=None, - ), + endpoint_file=dict(extensions=None), + endpoint_file_mm=dict(extensions=None), + fiber_label_file=dict(extensions=None), + fiber_labels_noorphans=dict(extensions=None), + fiber_length_file=dict(extensions=None), + fiber_length_std_matrix_mat_file=dict(extensions=None), filtered_tractographies=dict(), - filtered_tractography=dict( - extensions=None, - ), - filtered_tractography_by_intersections=dict( - extensions=None, - ), - intersection_matrix_file=dict( - extensions=None, - ), - intersection_matrix_mat_file=dict( - extensions=None, - ), + filtered_tractography=dict(extensions=None), + filtered_tractography_by_intersections=dict(extensions=None), + intersection_matrix_file=dict(extensions=None), + intersection_matrix_mat_file=dict(extensions=None), matlab_matrix_files=dict(), - matrix_file=dict( - extensions=None, - ), + matrix_file=dict(extensions=None), matrix_files=dict(), - matrix_mat_file=dict( - extensions=None, - ), - mean_fiber_length_matrix_mat_file=dict( - extensions=None, - ), - median_fiber_length_matrix_mat_file=dict( - extensions=None, - ), - stats_file=dict( - extensions=None, - ), + matrix_mat_file=dict(extensions=None), + mean_fiber_length_matrix_mat_file=dict(extensions=None), + median_fiber_length_matrix_mat_file=dict(extensions=None), + stats_file=dict(extensions=None), ) outputs = CreateMatrix.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py index f88950d758..71f3e9a395 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py @@ -4,18 +4,9 @@ def test_CreateNodes_inputs(): input_map = dict( - out_filename=dict( - extensions=None, - usedefault=True, - ), - resolution_network_file=dict( - extensions=None, - mandatory=True, - ), - roi_file=dict( - extensions=None, - mandatory=True, - ), + out_filename=dict(extensions=None, usedefault=True), + resolution_network_file=dict(extensions=None, mandatory=True), + roi_file=dict(extensions=None, mandatory=True), ) inputs = CreateNodes.input_spec() @@ -25,11 +16,7 @@ def test_CreateNodes_inputs(): def test_CreateNodes_outputs(): - output_map = dict( - node_network=dict( - extensions=None, - ), - ) + output_map = dict(node_network=dict(extensions=None)) outputs = CreateNodes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py index 17f8990a08..f1d74aa5c4 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py +++ b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py @@ -4,13 +4,7 @@ def test_MergeCNetworks_inputs(): input_map = dict( - in_files=dict( - mandatory=True, - ), - out_file=dict( - extensions=None, - usedefault=True, - ), + in_files=dict(mandatory=True), out_file=dict(extensions=None, usedefault=True) ) inputs = MergeCNetworks.input_spec() @@ -20,11 +14,7 @@ def test_MergeCNetworks_inputs(): def test_MergeCNetworks_outputs(): - output_map = dict( - connectome_file=dict( - extensions=None, - ), - ) + output_map = dict(connectome_file=dict(extensions=None)) outputs = MergeCNetworks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py index 975e4741cd..af744db17a 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py @@ -4,33 +4,15 @@ def test_NetworkBasedStatistic_inputs(): input_map = dict( - edge_key=dict( - usedefault=True, - ), - in_group1=dict( - mandatory=True, - ), - in_group2=dict( - mandatory=True, - ), - node_position_network=dict( - extensions=None, - ), - number_of_permutations=dict( - usedefault=True, - ), - out_nbs_network=dict( - extensions=None, - ), - out_nbs_pval_network=dict( - extensions=None, - ), - t_tail=dict( - usedefault=True, - ), - threshold=dict( - usedefault=True, - ), + edge_key=dict(usedefault=True), + in_group1=dict(mandatory=True), + in_group2=dict(mandatory=True), + node_position_network=dict(extensions=None), + number_of_permutations=dict(usedefault=True), + out_nbs_network=dict(extensions=None), + out_nbs_pval_network=dict(extensions=None), + t_tail=dict(usedefault=True), + threshold=dict(usedefault=True), ) inputs = NetworkBasedStatistic.input_spec() @@ -41,12 +23,8 @@ def test_NetworkBasedStatistic_inputs(): def test_NetworkBasedStatistic_outputs(): output_map = dict( - nbs_network=dict( - extensions=None, - ), - nbs_pval_network=dict( - extensions=None, - ), + nbs_network=dict(extensions=None), + nbs_pval_network=dict(extensions=None), network_files=dict(), ) outputs = NetworkBasedStatistic.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py index d171e6ab7a..6b920cda30 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py @@ -4,44 +4,16 @@ def test_NetworkXMetrics_inputs(): input_map = dict( - compute_clique_related_measures=dict( - usedefault=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - out_edge_metrics_matlab=dict( - extensions=None, - genfile=True, - ), - out_global_metrics_matlab=dict( - extensions=None, - genfile=True, - ), - out_k_core=dict( - extensions=None, - usedefault=True, - ), - out_k_crust=dict( - extensions=None, - usedefault=True, - ), - out_k_shell=dict( - extensions=None, - usedefault=True, - ), - out_node_metrics_matlab=dict( - extensions=None, - genfile=True, - ), - out_pickled_extra_measures=dict( - extensions=None, - usedefault=True, - ), - treat_as_weighted_graph=dict( - usedefault=True, - ), + compute_clique_related_measures=dict(usedefault=True), + in_file=dict(extensions=None, mandatory=True), + out_edge_metrics_matlab=dict(extensions=None, genfile=True), + out_global_metrics_matlab=dict(extensions=None, genfile=True), + out_k_core=dict(extensions=None, usedefault=True), + out_k_crust=dict(extensions=None, usedefault=True), + out_k_shell=dict(extensions=None, usedefault=True), + out_node_metrics_matlab=dict(extensions=None, genfile=True), + out_pickled_extra_measures=dict(extensions=None, usedefault=True), + treat_as_weighted_graph=dict(usedefault=True), ) inputs = NetworkXMetrics.input_spec() @@ -53,32 +25,18 @@ def test_NetworkXMetrics_inputs(): def test_NetworkXMetrics_outputs(): output_map = dict( edge_measure_networks=dict(), - edge_measures_matlab=dict( - extensions=None, - ), - global_measures_matlab=dict( - extensions=None, - ), + edge_measures_matlab=dict(extensions=None), + global_measures_matlab=dict(extensions=None), gpickled_network_files=dict(), - k_core=dict( - extensions=None, - ), - k_crust=dict( - extensions=None, - ), + k_core=dict(extensions=None), + k_crust=dict(extensions=None), k_networks=dict(), - k_shell=dict( - extensions=None, - ), + k_shell=dict(extensions=None), matlab_dict_measures=dict(), matlab_matrix_files=dict(), node_measure_networks=dict(), - node_measures_matlab=dict( - extensions=None, - ), - pickled_extra_measures=dict( - extensions=None, - ), + node_measures_matlab=dict(extensions=None), + pickled_extra_measures=dict(extensions=None), ) outputs = NetworkXMetrics.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py index 800b5b516b..5d056eced0 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py +++ b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py @@ -4,20 +4,11 @@ def test_Parcellate_inputs(): input_map = dict( - dilation=dict( - usedefault=True, - ), + dilation=dict(usedefault=True), freesurfer_dir=dict(), - out_roi_file=dict( - extensions=None, - genfile=True, - ), - parcellation_name=dict( - usedefault=True, - ), - subject_id=dict( - mandatory=True, - ), + out_roi_file=dict(extensions=None, genfile=True), + parcellation_name=dict(usedefault=True), + subject_id=dict(mandatory=True), subjects_dir=dict(), ) inputs = Parcellate.input_spec() @@ -29,30 +20,14 @@ def test_Parcellate_inputs(): def test_Parcellate_outputs(): output_map = dict( - aseg_file=dict( - extensions=None, - ), - cc_unknown_file=dict( - extensions=None, - ), - dilated_roi_file_in_structural_space=dict( - extensions=None, - ), - ribbon_file=dict( - extensions=None, - ), - roi_file=dict( - extensions=None, - ), - roi_file_in_structural_space=dict( - extensions=None, - ), - roiv_file=dict( - extensions=None, - ), - white_matter_mask_file=dict( - extensions=None, - ), + aseg_file=dict(extensions=None), + cc_unknown_file=dict(extensions=None), + dilated_roi_file_in_structural_space=dict(extensions=None), + ribbon_file=dict(extensions=None), + roi_file=dict(extensions=None), + roi_file_in_structural_space=dict(extensions=None), + roiv_file=dict(extensions=None), + white_matter_mask_file=dict(extensions=None), ) outputs = Parcellate.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py index 54fd9e46e9..750a927f26 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py +++ b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py @@ -4,28 +4,12 @@ def test_ROIGen_inputs(): input_map = dict( - LUT_file=dict( - extensions=None, - xor=["use_freesurfer_LUT"], - ), - aparc_aseg_file=dict( - extensions=None, - mandatory=True, - ), - freesurfer_dir=dict( - requires=["use_freesurfer_LUT"], - ), - out_dict_file=dict( - extensions=None, - genfile=True, - ), - out_roi_file=dict( - extensions=None, - genfile=True, - ), - use_freesurfer_LUT=dict( - xor=["LUT_file"], - ), + LUT_file=dict(extensions=None, xor=["use_freesurfer_LUT"]), + aparc_aseg_file=dict(extensions=None, mandatory=True), + freesurfer_dir=dict(requires=["use_freesurfer_LUT"]), + out_dict_file=dict(extensions=None, genfile=True), + out_roi_file=dict(extensions=None, genfile=True), + use_freesurfer_LUT=dict(xor=["LUT_file"]), ) inputs = ROIGen.input_spec() @@ -35,14 +19,7 @@ def test_ROIGen_inputs(): def test_ROIGen_outputs(): - output_map = dict( - dict_file=dict( - extensions=None, - ), - roi_file=dict( - extensions=None, - ), - ) + output_map = dict(dict_file=dict(extensions=None), roi_file=dict(extensions=None)) outputs = ROIGen.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py index f7bf46f327..95019cbf6a 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py @@ -4,49 +4,17 @@ def test_DTIRecon_inputs(): input_map = dict( - DWI=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - args=dict( - argstr="%s", - ), - b0_threshold=dict( - argstr="-b0_th", - ), - bvals=dict( - extensions=None, - mandatory=True, - ), - bvecs=dict( - argstr="-gm %s", - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - image_orientation_vectors=dict( - argstr="-iop %f", - ), - n_averages=dict( - argstr="-nex %s", - ), - oblique_correction=dict( - argstr="-oc", - ), - out_prefix=dict( - argstr="%s", - position=2, - usedefault=True, - ), - output_type=dict( - argstr="-ot %s", - usedefault=True, - ), + DWI=dict(argstr="%s", extensions=None, mandatory=True, position=1), + args=dict(argstr="%s"), + b0_threshold=dict(argstr="-b0_th"), + bvals=dict(extensions=None, mandatory=True), + bvecs=dict(argstr="-gm %s", extensions=None, mandatory=True), + environ=dict(nohash=True, usedefault=True), + image_orientation_vectors=dict(argstr="-iop %f"), + n_averages=dict(argstr="-nex %s"), + oblique_correction=dict(argstr="-oc"), + out_prefix=dict(argstr="%s", position=2, usedefault=True), + output_type=dict(argstr="-ot %s", usedefault=True), ) inputs = DTIRecon.input_spec() @@ -57,42 +25,18 @@ def test_DTIRecon_inputs(): def test_DTIRecon_outputs(): output_map = dict( - ADC=dict( - extensions=None, - ), - B0=dict( - extensions=None, - ), - FA=dict( - extensions=None, - ), - FA_color=dict( - extensions=None, - ), - L1=dict( - extensions=None, - ), - L2=dict( - extensions=None, - ), - L3=dict( - extensions=None, - ), - V1=dict( - extensions=None, - ), - V2=dict( - extensions=None, - ), - V3=dict( - extensions=None, - ), - exp=dict( - extensions=None, - ), - tensor=dict( - extensions=None, - ), + ADC=dict(extensions=None), + B0=dict(extensions=None), + FA=dict(extensions=None), + FA_color=dict(extensions=None), + L1=dict(extensions=None), + L2=dict(extensions=None), + L3=dict(extensions=None), + V1=dict(extensions=None), + V2=dict(extensions=None), + V3=dict(extensions=None), + exp=dict(extensions=None), + tensor=dict(extensions=None), ) outputs = DTIRecon.output_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py index e550bc4b27..872b9c6b2e 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py @@ -4,87 +4,29 @@ def test_DTITracker_inputs(): input_map = dict( - angle_threshold=dict( - argstr="-at %f", - ), - angle_threshold_weight=dict( - argstr="-atw %f", - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_data_prefix=dict( - argstr="%s", - position=0, - usedefault=True, - ), - input_type=dict( - argstr="-it %s", - ), - invert_x=dict( - argstr="-ix", - ), - invert_y=dict( - argstr="-iy", - ), - invert_z=dict( - argstr="-iz", - ), - mask1_file=dict( - argstr="-m %s", - extensions=None, - mandatory=True, - position=2, - ), - mask1_threshold=dict( - position=3, - ), - mask2_file=dict( - argstr="-m2 %s", - extensions=None, - position=4, - ), - mask2_threshold=dict( - position=5, - ), - output_file=dict( - argstr="%s", - extensions=None, - position=1, - usedefault=True, - ), - output_mask=dict( - argstr="-om %s", - extensions=None, - ), - primary_vector=dict( - argstr="-%s", - ), - random_seed=dict( - argstr="-rseed %d", - ), - step_length=dict( - argstr="-l %f", - ), - swap_xy=dict( - argstr="-sxy", - ), - swap_yz=dict( - argstr="-syz", - ), - swap_zx=dict( - argstr="-szx", - ), - tensor_file=dict( - extensions=None, - ), - tracking_method=dict( - argstr="-%s", - ), + angle_threshold=dict(argstr="-at %f"), + angle_threshold_weight=dict(argstr="-atw %f"), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + input_data_prefix=dict(argstr="%s", position=0, usedefault=True), + input_type=dict(argstr="-it %s"), + invert_x=dict(argstr="-ix"), + invert_y=dict(argstr="-iy"), + invert_z=dict(argstr="-iz"), + mask1_file=dict(argstr="-m %s", extensions=None, mandatory=True, position=2), + mask1_threshold=dict(position=3), + mask2_file=dict(argstr="-m2 %s", extensions=None, position=4), + mask2_threshold=dict(position=5), + output_file=dict(argstr="%s", extensions=None, position=1, usedefault=True), + output_mask=dict(argstr="-om %s", extensions=None), + primary_vector=dict(argstr="-%s"), + random_seed=dict(argstr="-rseed %d"), + step_length=dict(argstr="-l %f"), + swap_xy=dict(argstr="-sxy"), + swap_yz=dict(argstr="-syz"), + swap_zx=dict(argstr="-szx"), + tensor_file=dict(extensions=None), + tracking_method=dict(argstr="-%s"), ) inputs = DTITracker.input_spec() @@ -94,14 +36,7 @@ def test_DTITracker_inputs(): def test_DTITracker_outputs(): - output_map = dict( - mask_file=dict( - extensions=None, - ), - track_file=dict( - extensions=None, - ), - ) + output_map = dict(mask_file=dict(extensions=None), track_file=dict(extensions=None)) outputs = DTITracker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py index a933495672..cf68542831 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py @@ -4,50 +4,17 @@ def test_HARDIMat_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bvals=dict( - extensions=None, - mandatory=True, - ), - bvecs=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - image_info=dict( - argstr="-info %s", - extensions=None, - ), - image_orientation_vectors=dict( - argstr="-iop %f", - ), - oblique_correction=dict( - argstr="-oc", - ), - odf_file=dict( - argstr="-odf %s", - extensions=None, - ), - order=dict( - argstr="-order %s", - ), - out_file=dict( - argstr="%s", - extensions=None, - position=2, - usedefault=True, - ), - reference_file=dict( - argstr="-ref %s", - extensions=None, - ), + args=dict(argstr="%s"), + bvals=dict(extensions=None, mandatory=True), + bvecs=dict(argstr="%s", extensions=None, mandatory=True, position=1), + environ=dict(nohash=True, usedefault=True), + image_info=dict(argstr="-info %s", extensions=None), + image_orientation_vectors=dict(argstr="-iop %f"), + oblique_correction=dict(argstr="-oc"), + odf_file=dict(argstr="-odf %s", extensions=None), + order=dict(argstr="-order %s"), + out_file=dict(argstr="%s", extensions=None, position=2, usedefault=True), + reference_file=dict(argstr="-ref %s", extensions=None), ) inputs = HARDIMat.input_spec() @@ -57,11 +24,7 @@ def test_HARDIMat_inputs(): def test_HARDIMat_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = HARDIMat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py index b6a18aaf77..ccfc2e4170 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py @@ -4,68 +4,22 @@ def test_ODFRecon_inputs(): input_map = dict( - DWI=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - args=dict( - argstr="%s", - ), - dsi=dict( - argstr="-dsi", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - filter=dict( - argstr="-f", - ), - image_orientation_vectors=dict( - argstr="-iop %f", - ), - matrix=dict( - argstr="-mat %s", - extensions=None, - mandatory=True, - ), - n_b0=dict( - argstr="-b0 %s", - mandatory=True, - ), - n_directions=dict( - argstr="%s", - mandatory=True, - position=2, - ), - n_output_directions=dict( - argstr="%s", - mandatory=True, - position=3, - ), - oblique_correction=dict( - argstr="-oc", - ), - out_prefix=dict( - argstr="%s", - position=4, - usedefault=True, - ), - output_entropy=dict( - argstr="-oe", - ), - output_type=dict( - argstr="-ot %s", - usedefault=True, - ), - sharpness=dict( - argstr="-s %f", - ), - subtract_background=dict( - argstr="-bg", - ), + DWI=dict(argstr="%s", extensions=None, mandatory=True, position=1), + args=dict(argstr="%s"), + dsi=dict(argstr="-dsi"), + environ=dict(nohash=True, usedefault=True), + filter=dict(argstr="-f"), + image_orientation_vectors=dict(argstr="-iop %f"), + matrix=dict(argstr="-mat %s", extensions=None, mandatory=True), + n_b0=dict(argstr="-b0 %s", mandatory=True), + n_directions=dict(argstr="%s", mandatory=True, position=2), + n_output_directions=dict(argstr="%s", mandatory=True, position=3), + oblique_correction=dict(argstr="-oc"), + out_prefix=dict(argstr="%s", position=4, usedefault=True), + output_entropy=dict(argstr="-oe"), + output_type=dict(argstr="-ot %s", usedefault=True), + sharpness=dict(argstr="-s %f"), + subtract_background=dict(argstr="-bg"), ) inputs = ODFRecon.input_spec() @@ -76,21 +30,11 @@ def test_ODFRecon_inputs(): def test_ODFRecon_outputs(): output_map = dict( - B0=dict( - extensions=None, - ), - DWI=dict( - extensions=None, - ), - ODF=dict( - extensions=None, - ), - entropy=dict( - extensions=None, - ), - max=dict( - extensions=None, - ), + B0=dict(extensions=None), + DWI=dict(extensions=None), + ODF=dict(extensions=None), + entropy=dict(extensions=None), + max=dict(extensions=None), ) outputs = ODFRecon.output_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py index 2118745f3f..e335036ecc 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py @@ -4,101 +4,33 @@ def test_ODFTracker_inputs(): input_map = dict( - ODF=dict( - extensions=None, - mandatory=True, - ), - angle_threshold=dict( - argstr="-at %f", - ), - args=dict( - argstr="%s", - ), - disc=dict( - argstr="-disc", - ), - dsi=dict( - argstr="-dsi", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - image_orientation_vectors=dict( - argstr="-iop %f", - ), - input_data_prefix=dict( - argstr="%s", - position=0, - usedefault=True, - ), - input_output_type=dict( - argstr="-it %s", - usedefault=True, - ), - invert_x=dict( - argstr="-ix", - ), - invert_y=dict( - argstr="-iy", - ), - invert_z=dict( - argstr="-iz", - ), - limit=dict( - argstr="-limit %d", - ), - mask1_file=dict( - argstr="-m %s", - extensions=None, - mandatory=True, - position=2, - ), - mask1_threshold=dict( - position=3, - ), - mask2_file=dict( - argstr="-m2 %s", - extensions=None, - position=4, - ), - mask2_threshold=dict( - position=5, - ), - max=dict( - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr="%s", - extensions=None, - position=1, - usedefault=True, - ), - random_seed=dict( - argstr="-rseed %s", - ), - runge_kutta2=dict( - argstr="-rk2", - ), - slice_order=dict( - argstr="-sorder %d", - ), - step_length=dict( - argstr="-l %f", - ), - swap_xy=dict( - argstr="-sxy", - ), - swap_yz=dict( - argstr="-syz", - ), - swap_zx=dict( - argstr="-szx", - ), - voxel_order=dict( - argstr="-vorder %s", - ), + ODF=dict(extensions=None, mandatory=True), + angle_threshold=dict(argstr="-at %f"), + args=dict(argstr="%s"), + disc=dict(argstr="-disc"), + dsi=dict(argstr="-dsi"), + environ=dict(nohash=True, usedefault=True), + image_orientation_vectors=dict(argstr="-iop %f"), + input_data_prefix=dict(argstr="%s", position=0, usedefault=True), + input_output_type=dict(argstr="-it %s", usedefault=True), + invert_x=dict(argstr="-ix"), + invert_y=dict(argstr="-iy"), + invert_z=dict(argstr="-iz"), + limit=dict(argstr="-limit %d"), + mask1_file=dict(argstr="-m %s", extensions=None, mandatory=True, position=2), + mask1_threshold=dict(position=3), + mask2_file=dict(argstr="-m2 %s", extensions=None, position=4), + mask2_threshold=dict(position=5), + max=dict(extensions=None, mandatory=True), + out_file=dict(argstr="%s", extensions=None, position=1, usedefault=True), + random_seed=dict(argstr="-rseed %s"), + runge_kutta2=dict(argstr="-rk2"), + slice_order=dict(argstr="-sorder %d"), + step_length=dict(argstr="-l %f"), + swap_xy=dict(argstr="-sxy"), + swap_yz=dict(argstr="-syz"), + swap_zx=dict(argstr="-szx"), + voxel_order=dict(argstr="-vorder %s"), ) inputs = ODFTracker.input_spec() @@ -108,11 +40,7 @@ def test_ODFTracker_inputs(): def test_ODFTracker_outputs(): - output_map = dict( - track_file=dict( - extensions=None, - ), - ) + output_map = dict(track_file=dict(extensions=None)) outputs = ODFTracker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py index 65450952a4..60b35bbc16 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py @@ -4,30 +4,11 @@ def test_SplineFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - output_file=dict( - argstr="%s", - extensions=None, - position=2, - usedefault=True, - ), - step_length=dict( - argstr="%f", - mandatory=True, - position=1, - ), - track_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + output_file=dict(argstr="%s", extensions=None, position=2, usedefault=True), + step_length=dict(argstr="%f", mandatory=True, position=1), + track_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), ) inputs = SplineFilter.input_spec() @@ -37,11 +18,7 @@ def test_SplineFilter_inputs(): def test_SplineFilter_outputs(): - output_map = dict( - smoothed_track_file=dict( - extensions=None, - ), - ) + output_map = dict(smoothed_track_file=dict(extensions=None)) outputs = SplineFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py index 7f668df568..c0e8039dd1 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py @@ -4,24 +4,10 @@ def test_TrackMerge_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - output_file=dict( - argstr="%s", - extensions=None, - position=-1, - usedefault=True, - ), - track_files=dict( - argstr="%s...", - mandatory=True, - position=0, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + output_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True), + track_files=dict(argstr="%s...", mandatory=True, position=0), ) inputs = TrackMerge.input_spec() @@ -31,11 +17,7 @@ def test_TrackMerge_inputs(): def test_TrackMerge_outputs(): - output_map = dict( - track_file=dict( - extensions=None, - ), - ) + output_map = dict(track_file=dict(extensions=None)) outputs = TrackMerge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_APMQball.py b/nipype/interfaces/dipy/tests/test_auto_APMQball.py index 81ff397cb8..df94f72b25 100644 --- a/nipype/interfaces/dipy/tests/test_auto_APMQball.py +++ b/nipype/interfaces/dipy/tests/test_auto_APMQball.py @@ -4,24 +4,11 @@ def test_APMQball_inputs(): input_map = dict( - b0_thres=dict( - usedefault=True, - ), - in_bval=dict( - extensions=None, - mandatory=True, - ), - in_bvec=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - mask_file=dict( - extensions=None, - ), + b0_thres=dict(usedefault=True), + in_bval=dict(extensions=None, mandatory=True), + in_bvec=dict(extensions=None, mandatory=True), + in_file=dict(extensions=None, mandatory=True), + mask_file=dict(extensions=None), out_prefix=dict(), ) inputs = APMQball.input_spec() @@ -32,11 +19,7 @@ def test_APMQball_inputs(): def test_APMQball_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = APMQball.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_CSD.py b/nipype/interfaces/dipy/tests/test_auto_CSD.py index 7cdab47f9b..96c5531679 100644 --- a/nipype/interfaces/dipy/tests/test_auto_CSD.py +++ b/nipype/interfaces/dipy/tests/test_auto_CSD.py @@ -4,37 +4,16 @@ def test_CSD_inputs(): input_map = dict( - b0_thres=dict( - usedefault=True, - ), - in_bval=dict( - extensions=None, - mandatory=True, - ), - in_bvec=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - in_mask=dict( - extensions=None, - ), - out_fods=dict( - extensions=None, - ), + b0_thres=dict(usedefault=True), + in_bval=dict(extensions=None, mandatory=True), + in_bvec=dict(extensions=None, mandatory=True), + in_file=dict(extensions=None, mandatory=True), + in_mask=dict(extensions=None), + out_fods=dict(extensions=None), out_prefix=dict(), - response=dict( - extensions=None, - ), - save_fods=dict( - usedefault=True, - ), - sh_order=dict( - usedefault=True, - ), + response=dict(extensions=None), + save_fods=dict(usedefault=True), + sh_order=dict(usedefault=True), ) inputs = CSD.input_spec() @@ -44,14 +23,7 @@ def test_CSD_inputs(): def test_CSD_outputs(): - output_map = dict( - model=dict( - extensions=None, - ), - out_fods=dict( - extensions=None, - ), - ) + output_map = dict(model=dict(extensions=None), out_fods=dict(extensions=None)) outputs = CSD.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_DTI.py b/nipype/interfaces/dipy/tests/test_auto_DTI.py index 1cea142a36..fd47004082 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DTI.py +++ b/nipype/interfaces/dipy/tests/test_auto_DTI.py @@ -4,24 +4,11 @@ def test_DTI_inputs(): input_map = dict( - b0_thres=dict( - usedefault=True, - ), - in_bval=dict( - extensions=None, - mandatory=True, - ), - in_bvec=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - mask_file=dict( - extensions=None, - ), + b0_thres=dict(usedefault=True), + in_bval=dict(extensions=None, mandatory=True), + in_bvec=dict(extensions=None, mandatory=True), + in_file=dict(extensions=None, mandatory=True), + mask_file=dict(extensions=None), out_prefix=dict(), ) inputs = DTI.input_spec() @@ -33,24 +20,12 @@ def test_DTI_inputs(): def test_DTI_outputs(): output_map = dict( - ad_file=dict( - extensions=None, - ), - color_fa_file=dict( - extensions=None, - ), - fa_file=dict( - extensions=None, - ), - md_file=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - rd_file=dict( - extensions=None, - ), + ad_file=dict(extensions=None), + color_fa_file=dict(extensions=None), + fa_file=dict(extensions=None), + md_file=dict(extensions=None), + out_file=dict(extensions=None), + rd_file=dict(extensions=None), ) outputs = DTI.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_Denoise.py b/nipype/interfaces/dipy/tests/test_auto_Denoise.py index e85d2644c2..a0089f8d67 100644 --- a/nipype/interfaces/dipy/tests/test_auto_Denoise.py +++ b/nipype/interfaces/dipy/tests/test_auto_Denoise.py @@ -4,29 +4,13 @@ def test_Denoise_inputs(): input_map = dict( - block_radius=dict( - usedefault=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - in_mask=dict( - extensions=None, - ), - noise_mask=dict( - extensions=None, - ), - noise_model=dict( - mandatory=True, - usedefault=True, - ), - patch_radius=dict( - usedefault=True, - ), - signal_mask=dict( - extensions=None, - ), + block_radius=dict(usedefault=True), + in_file=dict(extensions=None, mandatory=True), + in_mask=dict(extensions=None), + noise_mask=dict(extensions=None), + noise_model=dict(mandatory=True, usedefault=True), + patch_radius=dict(usedefault=True), + signal_mask=dict(extensions=None), snr=dict(), ) inputs = Denoise.input_spec() @@ -37,11 +21,7 @@ def test_Denoise_inputs(): def test_Denoise_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Denoise.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py index e292135ba0..cb94de4bda 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py +++ b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py @@ -4,21 +4,10 @@ def test_DipyDiffusionInterface_inputs(): input_map = dict( - b0_thres=dict( - usedefault=True, - ), - in_bval=dict( - extensions=None, - mandatory=True, - ), - in_bvec=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), + b0_thres=dict(usedefault=True), + in_bval=dict(extensions=None, mandatory=True), + in_bvec=dict(extensions=None, mandatory=True), + in_file=dict(extensions=None, mandatory=True), out_prefix=dict(), ) inputs = DipyDiffusionInterface.input_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py index 00c8c1ba0d..3031621e3e 100644 --- a/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py +++ b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py @@ -4,49 +4,19 @@ def test_EstimateResponseSH_inputs(): input_map = dict( - auto=dict( - xor=["recursive"], - ), - b0_thres=dict( - usedefault=True, - ), - fa_thresh=dict( - usedefault=True, - ), - in_bval=dict( - extensions=None, - mandatory=True, - ), - in_bvec=dict( - extensions=None, - mandatory=True, - ), - in_evals=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - in_mask=dict( - extensions=None, - ), - out_mask=dict( - extensions=None, - usedefault=True, - ), + auto=dict(xor=["recursive"]), + b0_thres=dict(usedefault=True), + fa_thresh=dict(usedefault=True), + in_bval=dict(extensions=None, mandatory=True), + in_bvec=dict(extensions=None, mandatory=True), + in_evals=dict(extensions=None, mandatory=True), + in_file=dict(extensions=None, mandatory=True), + in_mask=dict(extensions=None), + out_mask=dict(extensions=None, usedefault=True), out_prefix=dict(), - recursive=dict( - xor=["auto"], - ), - response=dict( - extensions=None, - usedefault=True, - ), - roi_radius=dict( - usedefault=True, - ), + recursive=dict(xor=["auto"]), + response=dict(extensions=None, usedefault=True), + roi_radius=dict(usedefault=True), ) inputs = EstimateResponseSH.input_spec() @@ -56,14 +26,7 @@ def test_EstimateResponseSH_inputs(): def test_EstimateResponseSH_outputs(): - output_map = dict( - out_mask=dict( - extensions=None, - ), - response=dict( - extensions=None, - ), - ) + output_map = dict(out_mask=dict(extensions=None), response=dict(extensions=None)) outputs = EstimateResponseSH.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_RESTORE.py b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py index f25127f9c9..bf137fb14d 100644 --- a/nipype/interfaces/dipy/tests/test_auto_RESTORE.py +++ b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py @@ -4,27 +4,12 @@ def test_RESTORE_inputs(): input_map = dict( - b0_thres=dict( - usedefault=True, - ), - in_bval=dict( - extensions=None, - mandatory=True, - ), - in_bvec=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - in_mask=dict( - extensions=None, - ), - noise_mask=dict( - extensions=None, - ), + b0_thres=dict(usedefault=True), + in_bval=dict(extensions=None, mandatory=True), + in_bvec=dict(extensions=None, mandatory=True), + in_file=dict(extensions=None, mandatory=True), + in_mask=dict(extensions=None), + noise_mask=dict(extensions=None), out_prefix=dict(), ) inputs = RESTORE.input_spec() @@ -36,27 +21,13 @@ def test_RESTORE_inputs(): def test_RESTORE_outputs(): output_map = dict( - evals=dict( - extensions=None, - ), - evecs=dict( - extensions=None, - ), - fa=dict( - extensions=None, - ), - md=dict( - extensions=None, - ), - mode=dict( - extensions=None, - ), - rd=dict( - extensions=None, - ), - trace=dict( - extensions=None, - ), + evals=dict(extensions=None), + evecs=dict(extensions=None), + fa=dict(extensions=None), + md=dict(extensions=None), + mode=dict(extensions=None), + rd=dict(extensions=None), + trace=dict(extensions=None), ) outputs = RESTORE.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_Resample.py b/nipype/interfaces/dipy/tests/test_auto_Resample.py index 6c765b2fa9..fcde24b209 100644 --- a/nipype/interfaces/dipy/tests/test_auto_Resample.py +++ b/nipype/interfaces/dipy/tests/test_auto_Resample.py @@ -4,14 +4,8 @@ def test_Resample_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - interp=dict( - mandatory=True, - usedefault=True, - ), + in_file=dict(extensions=None, mandatory=True), + interp=dict(mandatory=True, usedefault=True), vox_size=dict(), ) inputs = Resample.input_spec() @@ -22,11 +16,7 @@ def test_Resample_inputs(): def test_Resample_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py index 16fff2aeff..3e24a53485 100644 --- a/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py +++ b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py @@ -4,65 +4,24 @@ def test_SimulateMultiTensor_inputs(): input_map = dict( - baseline=dict( - extensions=None, - mandatory=True, - ), - bvalues=dict( - usedefault=True, - ), - diff_iso=dict( - usedefault=True, - ), - diff_sf=dict( - usedefault=True, - ), - gradients=dict( - extensions=None, - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - extensions=None, - ), - in_dirs=dict( - mandatory=True, - ), - in_frac=dict( - mandatory=True, - ), - in_mask=dict( - extensions=None, - ), - in_vfms=dict( - mandatory=True, - ), - n_proc=dict( - usedefault=True, - ), - num_dirs=dict( - usedefault=True, - ), - out_bval=dict( - extensions=None, - usedefault=True, - ), - out_bvec=dict( - extensions=None, - usedefault=True, - ), - out_file=dict( - extensions=None, - usedefault=True, - ), - out_mask=dict( - extensions=None, - usedefault=True, - ), - snr=dict( - usedefault=True, - ), + baseline=dict(extensions=None, mandatory=True), + bvalues=dict(usedefault=True), + diff_iso=dict(usedefault=True), + diff_sf=dict(usedefault=True), + gradients=dict(extensions=None), + in_bval=dict(extensions=None), + in_bvec=dict(extensions=None), + in_dirs=dict(mandatory=True), + in_frac=dict(mandatory=True), + in_mask=dict(extensions=None), + in_vfms=dict(mandatory=True), + n_proc=dict(usedefault=True), + num_dirs=dict(usedefault=True), + out_bval=dict(extensions=None, usedefault=True), + out_bvec=dict(extensions=None, usedefault=True), + out_file=dict(extensions=None, usedefault=True), + out_mask=dict(extensions=None, usedefault=True), + snr=dict(usedefault=True), ) inputs = SimulateMultiTensor.input_spec() @@ -73,18 +32,10 @@ def test_SimulateMultiTensor_inputs(): def test_SimulateMultiTensor_outputs(): output_map = dict( - out_bval=dict( - extensions=None, - ), - out_bvec=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - out_mask=dict( - extensions=None, - ), + out_bval=dict(extensions=None), + out_bvec=dict(extensions=None), + out_file=dict(extensions=None), + out_mask=dict(extensions=None), ) outputs = SimulateMultiTensor.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py index ad97523ce2..7f7b61c7df 100644 --- a/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py +++ b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py @@ -4,50 +4,19 @@ def test_StreamlineTractography_inputs(): input_map = dict( - gfa_thresh=dict( - mandatory=True, - usedefault=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - in_model=dict( - extensions=None, - ), - in_peaks=dict( - extensions=None, - ), - min_angle=dict( - mandatory=True, - usedefault=True, - ), - multiprocess=dict( - mandatory=True, - usedefault=True, - ), - num_seeds=dict( - mandatory=True, - usedefault=True, - ), + gfa_thresh=dict(mandatory=True, usedefault=True), + in_file=dict(extensions=None, mandatory=True), + in_model=dict(extensions=None), + in_peaks=dict(extensions=None), + min_angle=dict(mandatory=True, usedefault=True), + multiprocess=dict(mandatory=True, usedefault=True), + num_seeds=dict(mandatory=True, usedefault=True), out_prefix=dict(), - peak_threshold=dict( - mandatory=True, - usedefault=True, - ), - save_seeds=dict( - mandatory=True, - usedefault=True, - ), - seed_coord=dict( - extensions=None, - ), - seed_mask=dict( - extensions=None, - ), - tracking_mask=dict( - extensions=None, - ), + peak_threshold=dict(mandatory=True, usedefault=True), + save_seeds=dict(mandatory=True, usedefault=True), + seed_coord=dict(extensions=None), + seed_mask=dict(extensions=None), + tracking_mask=dict(extensions=None), ) inputs = StreamlineTractography.input_spec() @@ -58,18 +27,10 @@ def test_StreamlineTractography_inputs(): def test_StreamlineTractography_outputs(): output_map = dict( - gfa=dict( - extensions=None, - ), - odf_peaks=dict( - extensions=None, - ), - out_seeds=dict( - extensions=None, - ), - tracks=dict( - extensions=None, - ), + gfa=dict(extensions=None), + odf_peaks=dict(extensions=None), + out_seeds=dict(extensions=None), + tracks=dict(extensions=None), ) outputs = StreamlineTractography.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_TensorMode.py b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py index 59b1b30e8b..8f2eee5294 100644 --- a/nipype/interfaces/dipy/tests/test_auto_TensorMode.py +++ b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py @@ -4,24 +4,11 @@ def test_TensorMode_inputs(): input_map = dict( - b0_thres=dict( - usedefault=True, - ), - in_bval=dict( - extensions=None, - mandatory=True, - ), - in_bvec=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - mask_file=dict( - extensions=None, - ), + b0_thres=dict(usedefault=True), + in_bval=dict(extensions=None, mandatory=True), + in_bvec=dict(extensions=None, mandatory=True), + in_file=dict(extensions=None, mandatory=True), + mask_file=dict(extensions=None), out_prefix=dict(), ) inputs = TensorMode.input_spec() @@ -32,11 +19,7 @@ def test_TensorMode_inputs(): def test_TensorMode_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TensorMode.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py index 0c7855c507..70ee723c69 100644 --- a/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py +++ b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py @@ -5,20 +5,10 @@ def test_TrackDensityMap_inputs(): input_map = dict( data_dims=dict(), - in_file=dict( - extensions=None, - mandatory=True, - ), - out_filename=dict( - extensions=None, - usedefault=True, - ), - points_space=dict( - usedefault=True, - ), - reference=dict( - extensions=None, - ), + in_file=dict(extensions=None, mandatory=True), + out_filename=dict(extensions=None, usedefault=True), + points_space=dict(usedefault=True), + reference=dict(extensions=None), voxel_dims=dict(), ) inputs = TrackDensityMap.input_spec() @@ -29,11 +19,7 @@ def test_TrackDensityMap_inputs(): def test_TrackDensityMap_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TrackDensityMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py index 2988e44e8d..0d15b9a115 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py @@ -4,30 +4,12 @@ def test_AffScalarVol_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - deformation=dict( - argstr="-deformation %g %g %g %g %g %g", - xor=["transform"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - euler=dict( - argstr="-euler %g %g %g", - xor=["transform"], - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr="-interp %s", - usedefault=True, - ), + args=dict(argstr="%s"), + deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"]), + environ=dict(nohash=True, usedefault=True), + euler=dict(argstr="-euler %g %g %g", xor=["transform"]), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + interpolation=dict(argstr="-interp %s", usedefault=True), out_file=dict( argstr="-out %s", extensions=None, @@ -35,20 +17,13 @@ def test_AffScalarVol_inputs(): name_source="in_file", name_template="%s_affxfmd", ), - target=dict( - argstr="-target %s", - extensions=None, - xor=["transform"], - ), + target=dict(argstr="-target %s", extensions=None, xor=["transform"]), transform=dict( argstr="-trans %s", extensions=None, xor=["target", "translation", "euler", "deformation"], ), - translation=dict( - argstr="-translation %g %g %g", - xor=["transform"], - ), + translation=dict(argstr="-translation %g %g %g", xor=["transform"]), ) inputs = AffScalarVol.input_spec() @@ -58,11 +33,7 @@ def test_AffScalarVol_inputs(): def test_AffScalarVol_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = AffScalarVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py index d1ba18a8ac..96dfaa3584 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py @@ -4,30 +4,12 @@ def test_AffSymTensor3DVol_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - deformation=dict( - argstr="-deformation %g %g %g %g %g %g", - xor=["transform"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - euler=dict( - argstr="-euler %g %g %g", - xor=["transform"], - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr="-interp %s", - usedefault=True, - ), + args=dict(argstr="%s"), + deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"]), + environ=dict(nohash=True, usedefault=True), + euler=dict(argstr="-euler %g %g %g", xor=["transform"]), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + interpolation=dict(argstr="-interp %s", usedefault=True), out_file=dict( argstr="-out %s", extensions=None, @@ -35,24 +17,14 @@ def test_AffSymTensor3DVol_inputs(): name_source="in_file", name_template="%s_affxfmd", ), - reorient=dict( - argstr="-reorient %s", - usedefault=True, - ), - target=dict( - argstr="-target %s", - extensions=None, - xor=["transform"], - ), + reorient=dict(argstr="-reorient %s", usedefault=True), + target=dict(argstr="-target %s", extensions=None, xor=["transform"]), transform=dict( argstr="-trans %s", extensions=None, xor=["target", "translation", "euler", "deformation"], ), - translation=dict( - argstr="-translation %g %g %g", - xor=["transform"], - ), + translation=dict(argstr="-translation %g %g %g", xor=["transform"]), ) inputs = AffSymTensor3DVol.input_spec() @@ -62,11 +34,7 @@ def test_AffSymTensor3DVol_inputs(): def test_AffSymTensor3DVol_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = AffSymTensor3DVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_Affine.py b/nipype/interfaces/dtitk/tests/test_auto_Affine.py index 0ff24b788c..7ff9cc9e2b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Affine.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Affine.py @@ -4,50 +4,21 @@ def test_Affine_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), fixed_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=0, - ), - ftol=dict( - argstr="%g", - mandatory=True, - position=4, - usedefault=True, - ), - initialize_xfm=dict( - argstr="%s", - copyfile=True, - extensions=None, - position=5, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0 ), + ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True), + initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5), moving_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1 ), sampling_xyz=dict( - argstr="%g %g %g", - mandatory=True, - position=3, - usedefault=True, + argstr="%g %g %g", mandatory=True, position=3, usedefault=True ), similarity_metric=dict( - argstr="%s", - mandatory=True, - position=2, - usedefault=True, + argstr="%s", mandatory=True, position=2, usedefault=True ), ) inputs = Affine.input_spec() @@ -59,12 +30,7 @@ def test_Affine_inputs(): def test_Affine_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), - out_file_xfm=dict( - extensions=None, - ), + out_file=dict(extensions=None), out_file_xfm=dict(extensions=None) ) outputs = Affine.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py index c680c08815..f332eb35b5 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py @@ -4,50 +4,21 @@ def test_AffineTask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), fixed_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=0, - ), - ftol=dict( - argstr="%g", - mandatory=True, - position=4, - usedefault=True, - ), - initialize_xfm=dict( - argstr="%s", - copyfile=True, - extensions=None, - position=5, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0 ), + ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True), + initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5), moving_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1 ), sampling_xyz=dict( - argstr="%g %g %g", - mandatory=True, - position=3, - usedefault=True, + argstr="%g %g %g", mandatory=True, position=3, usedefault=True ), similarity_metric=dict( - argstr="%s", - mandatory=True, - position=2, - usedefault=True, + argstr="%s", mandatory=True, position=2, usedefault=True ), ) inputs = AffineTask.input_spec() @@ -59,12 +30,7 @@ def test_AffineTask_inputs(): def test_AffineTask_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), - out_file_xfm=dict( - extensions=None, - ), + out_file=dict(extensions=None), out_file_xfm=dict(extensions=None) ) outputs = AffineTask.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py index 994c8a2b8d..807c16ea73 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py +++ b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py @@ -4,31 +4,11 @@ def test_BinThresh_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), - inside_value=dict( - argstr="%g", - mandatory=True, - position=4, - usedefault=True, - ), - lower_bound=dict( - argstr="%g", - mandatory=True, - position=2, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + inside_value=dict(argstr="%g", mandatory=True, position=4, usedefault=True), + lower_bound=dict(argstr="%g", mandatory=True, position=2, usedefault=True), out_file=dict( argstr="%s", extensions=None, @@ -37,18 +17,8 @@ def test_BinThresh_inputs(): name_template="%s_thrbin", position=1, ), - outside_value=dict( - argstr="%g", - mandatory=True, - position=5, - usedefault=True, - ), - upper_bound=dict( - argstr="%g", - mandatory=True, - position=3, - usedefault=True, - ), + outside_value=dict(argstr="%g", mandatory=True, position=5, usedefault=True), + upper_bound=dict(argstr="%g", mandatory=True, position=3, usedefault=True), ) inputs = BinThresh.input_spec() @@ -58,11 +28,7 @@ def test_BinThresh_inputs(): def test_BinThresh_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = BinThresh.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py index cccf7eed27..fd430797b4 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py @@ -4,31 +4,11 @@ def test_BinThreshTask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), - inside_value=dict( - argstr="%g", - mandatory=True, - position=4, - usedefault=True, - ), - lower_bound=dict( - argstr="%g", - mandatory=True, - position=2, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + inside_value=dict(argstr="%g", mandatory=True, position=4, usedefault=True), + lower_bound=dict(argstr="%g", mandatory=True, position=2, usedefault=True), out_file=dict( argstr="%s", extensions=None, @@ -37,18 +17,8 @@ def test_BinThreshTask_inputs(): name_template="%s_thrbin", position=1, ), - outside_value=dict( - argstr="%g", - mandatory=True, - position=5, - usedefault=True, - ), - upper_bound=dict( - argstr="%g", - mandatory=True, - position=3, - usedefault=True, - ), + outside_value=dict(argstr="%g", mandatory=True, position=5, usedefault=True), + upper_bound=dict(argstr="%g", mandatory=True, position=3, usedefault=True), ) inputs = BinThreshTask.input_spec() @@ -58,11 +28,7 @@ def test_BinThreshTask_inputs(): def test_BinThreshTask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = BinThreshTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py b/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py index 03044b6bc4..ca290894cc 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py +++ b/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py @@ -3,15 +3,7 @@ def test_CommandLineDtitk_inputs(): - input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ) + input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) inputs = CommandLineDtitk.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py index fa34fdcdb3..7351bb228d 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py +++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py @@ -4,28 +4,11 @@ def test_ComposeXfm_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_aff=dict( - argstr="-aff %s", - extensions=None, - mandatory=True, - ), - in_df=dict( - argstr="-df %s", - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr="-out %s", - extensions=None, - genfile=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_aff=dict(argstr="-aff %s", extensions=None, mandatory=True), + in_df=dict(argstr="-df %s", extensions=None, mandatory=True), + out_file=dict(argstr="-out %s", extensions=None, genfile=True), ) inputs = ComposeXfm.input_spec() @@ -35,11 +18,7 @@ def test_ComposeXfm_inputs(): def test_ComposeXfm_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ComposeXfm.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py index b4b2f7509e..b79316c6ba 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py @@ -4,28 +4,11 @@ def test_ComposeXfmTask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_aff=dict( - argstr="-aff %s", - extensions=None, - mandatory=True, - ), - in_df=dict( - argstr="-df %s", - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr="-out %s", - extensions=None, - genfile=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_aff=dict(argstr="-aff %s", extensions=None, mandatory=True), + in_df=dict(argstr="-df %s", extensions=None, mandatory=True), + out_file=dict(argstr="-out %s", extensions=None, genfile=True), ) inputs = ComposeXfmTask.input_spec() @@ -35,11 +18,7 @@ def test_ComposeXfmTask_inputs(): def test_ComposeXfmTask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ComposeXfmTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py index 89a1bdcfc8..dcb9115575 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py @@ -4,47 +4,14 @@ def test_Diffeo_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_file=dict( - argstr="%s", - extensions=None, - position=0, - ), - ftol=dict( - argstr="%g", - mandatory=True, - position=5, - usedefault=True, - ), - legacy=dict( - argstr="%d", - mandatory=True, - position=3, - usedefault=True, - ), - mask_file=dict( - argstr="%s", - extensions=None, - position=2, - ), - moving_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - position=1, - ), - n_iters=dict( - argstr="%d", - mandatory=True, - position=4, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fixed_file=dict(argstr="%s", extensions=None, position=0), + ftol=dict(argstr="%g", mandatory=True, position=5, usedefault=True), + legacy=dict(argstr="%d", mandatory=True, position=3, usedefault=True), + mask_file=dict(argstr="%s", extensions=None, position=2), + moving_file=dict(argstr="%s", copyfile=False, extensions=None, position=1), + n_iters=dict(argstr="%d", mandatory=True, position=4, usedefault=True), ) inputs = Diffeo.input_spec() @@ -55,12 +22,7 @@ def test_Diffeo_inputs(): def test_Diffeo_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), - out_file_xfm=dict( - extensions=None, - ), + out_file=dict(extensions=None), out_file_xfm=dict(extensions=None) ) outputs = Diffeo.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py index 39255fb5c1..97fdde0e5b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py @@ -4,25 +4,11 @@ def test_DiffeoScalarVol_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - flip=dict( - argstr="-flip %d %d %d", - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr="-interp %s", - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + flip=dict(argstr="-flip %d %d %d"), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + interpolation=dict(argstr="-interp %s", usedefault=True), out_file=dict( argstr="-out %s", extensions=None, @@ -30,23 +16,10 @@ def test_DiffeoScalarVol_inputs(): name_source="in_file", name_template="%s_diffeoxfmd", ), - resampling_type=dict( - argstr="-type %s", - ), - target=dict( - argstr="-target %s", - extensions=None, - xor=["voxel_size"], - ), - transform=dict( - argstr="-trans %s", - extensions=None, - mandatory=True, - ), - voxel_size=dict( - argstr="-vsize %g %g %g", - xor=["target"], - ), + resampling_type=dict(argstr="-type %s"), + target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"]), + transform=dict(argstr="-trans %s", extensions=None, mandatory=True), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"]), ) inputs = DiffeoScalarVol.input_spec() @@ -56,11 +29,7 @@ def test_DiffeoScalarVol_inputs(): def test_DiffeoScalarVol_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = DiffeoScalarVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py index 123b741645..b50fdfd3be 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py @@ -4,29 +4,12 @@ def test_DiffeoSymTensor3DVol_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - df=dict( - argstr="-df %s", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - flip=dict( - argstr="-flip %d %d %d", - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr="-interp %s", - usedefault=True, - ), + args=dict(argstr="%s"), + df=dict(argstr="-df %s", usedefault=True), + environ=dict(nohash=True, usedefault=True), + flip=dict(argstr="-flip %d %d %d"), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + interpolation=dict(argstr="-interp %s", usedefault=True), out_file=dict( argstr="-out %s", extensions=None, @@ -34,27 +17,11 @@ def test_DiffeoSymTensor3DVol_inputs(): name_source="in_file", name_template="%s_diffeoxfmd", ), - reorient=dict( - argstr="-reorient %s", - usedefault=True, - ), - resampling_type=dict( - argstr="-type %s", - ), - target=dict( - argstr="-target %s", - extensions=None, - xor=["voxel_size"], - ), - transform=dict( - argstr="-trans %s", - extensions=None, - mandatory=True, - ), - voxel_size=dict( - argstr="-vsize %g %g %g", - xor=["target"], - ), + reorient=dict(argstr="-reorient %s", usedefault=True), + resampling_type=dict(argstr="-type %s"), + target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"]), + transform=dict(argstr="-trans %s", extensions=None, mandatory=True), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"]), ) inputs = DiffeoSymTensor3DVol.input_spec() @@ -64,11 +31,7 @@ def test_DiffeoSymTensor3DVol_inputs(): def test_DiffeoSymTensor3DVol_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = DiffeoSymTensor3DVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py index f7914ab9cc..80be334b0a 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py @@ -4,47 +4,14 @@ def test_DiffeoTask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_file=dict( - argstr="%s", - extensions=None, - position=0, - ), - ftol=dict( - argstr="%g", - mandatory=True, - position=5, - usedefault=True, - ), - legacy=dict( - argstr="%d", - mandatory=True, - position=3, - usedefault=True, - ), - mask_file=dict( - argstr="%s", - extensions=None, - position=2, - ), - moving_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - position=1, - ), - n_iters=dict( - argstr="%d", - mandatory=True, - position=4, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fixed_file=dict(argstr="%s", extensions=None, position=0), + ftol=dict(argstr="%g", mandatory=True, position=5, usedefault=True), + legacy=dict(argstr="%d", mandatory=True, position=3, usedefault=True), + mask_file=dict(argstr="%s", extensions=None, position=2), + moving_file=dict(argstr="%s", copyfile=False, extensions=None, position=1), + n_iters=dict(argstr="%d", mandatory=True, position=4, usedefault=True), ) inputs = DiffeoTask.input_spec() @@ -55,12 +22,7 @@ def test_DiffeoTask_inputs(): def test_DiffeoTask_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), - out_file_xfm=dict( - extensions=None, - ), + out_file=dict(extensions=None), out_file_xfm=dict(extensions=None) ) outputs = DiffeoTask.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_Rigid.py b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py index dbcc6f0fcf..066fb8e42a 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Rigid.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py @@ -4,50 +4,21 @@ def test_Rigid_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), fixed_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=0, - ), - ftol=dict( - argstr="%g", - mandatory=True, - position=4, - usedefault=True, - ), - initialize_xfm=dict( - argstr="%s", - copyfile=True, - extensions=None, - position=5, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0 ), + ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True), + initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5), moving_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1 ), sampling_xyz=dict( - argstr="%g %g %g", - mandatory=True, - position=3, - usedefault=True, + argstr="%g %g %g", mandatory=True, position=3, usedefault=True ), similarity_metric=dict( - argstr="%s", - mandatory=True, - position=2, - usedefault=True, + argstr="%s", mandatory=True, position=2, usedefault=True ), ) inputs = Rigid.input_spec() @@ -59,12 +30,7 @@ def test_Rigid_inputs(): def test_Rigid_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), - out_file_xfm=dict( - extensions=None, - ), + out_file=dict(extensions=None), out_file_xfm=dict(extensions=None) ) outputs = Rigid.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py index 6c5236607c..d915ea9e71 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py @@ -4,50 +4,21 @@ def test_RigidTask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), fixed_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=0, - ), - ftol=dict( - argstr="%g", - mandatory=True, - position=4, - usedefault=True, - ), - initialize_xfm=dict( - argstr="%s", - copyfile=True, - extensions=None, - position=5, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0 ), + ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True), + initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5), moving_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1 ), sampling_xyz=dict( - argstr="%g %g %g", - mandatory=True, - position=3, - usedefault=True, + argstr="%g %g %g", mandatory=True, position=3, usedefault=True ), similarity_metric=dict( - argstr="%s", - mandatory=True, - position=2, - usedefault=True, + argstr="%s", mandatory=True, position=2, usedefault=True ), ) inputs = RigidTask.input_spec() @@ -59,12 +30,7 @@ def test_RigidTask_inputs(): def test_RigidTask_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), - out_file_xfm=dict( - extensions=None, - ), + out_file=dict(extensions=None), out_file_xfm=dict(extensions=None) ) outputs = RigidTask.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py index 3d32a314bd..7a7e65d01c 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py @@ -4,22 +4,10 @@ def test_SVAdjustVoxSp_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - origin=dict( - argstr="-origin %g %g %g", - xor=["target_file"], - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), out_file=dict( argstr="-out %s", extensions=None, @@ -28,14 +16,9 @@ def test_SVAdjustVoxSp_inputs(): name_template="%s_avs", ), target_file=dict( - argstr="-target %s", - extensions=None, - xor=["voxel_size", "origin"], - ), - voxel_size=dict( - argstr="-vsize %g %g %g", - xor=["target_file"], + argstr="-target %s", extensions=None, xor=["voxel_size", "origin"] ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), ) inputs = SVAdjustVoxSp.input_spec() @@ -45,11 +28,7 @@ def test_SVAdjustVoxSp_inputs(): def test_SVAdjustVoxSp_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SVAdjustVoxSp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py index cedc693a24..40cf1f5c48 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py @@ -4,22 +4,10 @@ def test_SVAdjustVoxSpTask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - origin=dict( - argstr="-origin %g %g %g", - xor=["target_file"], - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), out_file=dict( argstr="-out %s", extensions=None, @@ -28,14 +16,9 @@ def test_SVAdjustVoxSpTask_inputs(): name_template="%s_avs", ), target_file=dict( - argstr="-target %s", - extensions=None, - xor=["voxel_size", "origin"], - ), - voxel_size=dict( - argstr="-vsize %g %g %g", - xor=["target_file"], + argstr="-target %s", extensions=None, xor=["voxel_size", "origin"] ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), ) inputs = SVAdjustVoxSpTask.input_spec() @@ -45,11 +28,7 @@ def test_SVAdjustVoxSpTask_inputs(): def test_SVAdjustVoxSpTask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SVAdjustVoxSpTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResample.py b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py index fc880cd3e7..295605706d 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVResample.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py @@ -4,29 +4,12 @@ def test_SVResample_inputs(): input_map = dict( - align=dict( - argstr="-align %s", - ), - args=dict( - argstr="%s", - ), - array_size=dict( - argstr="-size %d %d %d", - xor=["target_file"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - origin=dict( - argstr="-origin %g %g %g", - xor=["target_file"], - ), + align=dict(argstr="-align %s"), + args=dict(argstr="%s"), + array_size=dict(argstr="-size %d %d %d", xor=["target_file"]), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), out_file=dict( argstr="-out %s", extensions=None, @@ -39,10 +22,7 @@ def test_SVResample_inputs(): extensions=None, xor=["array_size", "voxel_size", "origin"], ), - voxel_size=dict( - argstr="-vsize %g %g %g", - xor=["target_file"], - ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), ) inputs = SVResample.input_spec() @@ -52,11 +32,7 @@ def test_SVResample_inputs(): def test_SVResample_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SVResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py index 88412e68ae..7012a9af30 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py @@ -4,29 +4,12 @@ def test_SVResampleTask_inputs(): input_map = dict( - align=dict( - argstr="-align %s", - ), - args=dict( - argstr="%s", - ), - array_size=dict( - argstr="-size %d %d %d", - xor=["target_file"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - origin=dict( - argstr="-origin %g %g %g", - xor=["target_file"], - ), + align=dict(argstr="-align %s"), + args=dict(argstr="%s"), + array_size=dict(argstr="-size %d %d %d", xor=["target_file"]), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), out_file=dict( argstr="-out %s", extensions=None, @@ -39,10 +22,7 @@ def test_SVResampleTask_inputs(): extensions=None, xor=["array_size", "voxel_size", "origin"], ), - voxel_size=dict( - argstr="-vsize %g %g %g", - xor=["target_file"], - ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), ) inputs = SVResampleTask.input_spec() @@ -52,11 +32,7 @@ def test_SVResampleTask_inputs(): def test_SVResampleTask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SVResampleTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py index 333e05628d..97da04bc0e 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py @@ -4,22 +4,10 @@ def test_TVAdjustOriginTask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - origin=dict( - argstr="-origin %g %g %g", - xor=["target_file"], - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), out_file=dict( argstr="-out %s", extensions=None, @@ -28,14 +16,9 @@ def test_TVAdjustOriginTask_inputs(): name_template="%s_avs", ), target_file=dict( - argstr="-target %s", - extensions=None, - xor=["voxel_size", "origin"], - ), - voxel_size=dict( - argstr="-vsize %g %g %g", - xor=["target_file"], + argstr="-target %s", extensions=None, xor=["voxel_size", "origin"] ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), ) inputs = TVAdjustOriginTask.input_spec() @@ -45,11 +28,7 @@ def test_TVAdjustOriginTask_inputs(): def test_TVAdjustOriginTask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TVAdjustOriginTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py index 9c6596042b..8c775b628e 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py @@ -4,22 +4,10 @@ def test_TVAdjustVoxSp_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - origin=dict( - argstr="-origin %g %g %g", - xor=["target_file"], - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), out_file=dict( argstr="-out %s", extensions=None, @@ -28,14 +16,9 @@ def test_TVAdjustVoxSp_inputs(): name_template="%s_avs", ), target_file=dict( - argstr="-target %s", - extensions=None, - xor=["voxel_size", "origin"], - ), - voxel_size=dict( - argstr="-vsize %g %g %g", - xor=["target_file"], + argstr="-target %s", extensions=None, xor=["voxel_size", "origin"] ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), ) inputs = TVAdjustVoxSp.input_spec() @@ -45,11 +28,7 @@ def test_TVAdjustVoxSp_inputs(): def test_TVAdjustVoxSp_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TVAdjustVoxSp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py index f34a76ae7b..1d53c3949b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py @@ -4,22 +4,10 @@ def test_TVAdjustVoxSpTask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - origin=dict( - argstr="-origin %g %g %g", - xor=["target_file"], - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), out_file=dict( argstr="-out %s", extensions=None, @@ -28,14 +16,9 @@ def test_TVAdjustVoxSpTask_inputs(): name_template="%s_avs", ), target_file=dict( - argstr="-target %s", - extensions=None, - xor=["voxel_size", "origin"], - ), - voxel_size=dict( - argstr="-vsize %g %g %g", - xor=["target_file"], + argstr="-target %s", extensions=None, xor=["voxel_size", "origin"] ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), ) inputs = TVAdjustVoxSpTask.input_spec() @@ -45,11 +28,7 @@ def test_TVAdjustVoxSpTask_inputs(): def test_TVAdjustVoxSpTask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TVAdjustVoxSpTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResample.py b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py index 2ca99176f8..d5aa76f872 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVResample.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py @@ -4,32 +4,13 @@ def test_TVResample_inputs(): input_map = dict( - align=dict( - argstr="-align %s", - ), - args=dict( - argstr="%s", - ), - array_size=dict( - argstr="-size %d %d %d", - xor=["target_file"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr="-interp %s", - ), - origin=dict( - argstr="-origin %g %g %g", - xor=["target_file"], - ), + align=dict(argstr="-align %s"), + args=dict(argstr="%s"), + array_size=dict(argstr="-size %d %d %d", xor=["target_file"]), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + interpolation=dict(argstr="-interp %s"), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), out_file=dict( argstr="-out %s", extensions=None, @@ -42,10 +23,7 @@ def test_TVResample_inputs(): extensions=None, xor=["array_size", "voxel_size", "origin"], ), - voxel_size=dict( - argstr="-vsize %g %g %g", - xor=["target_file"], - ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), ) inputs = TVResample.input_spec() @@ -55,11 +33,7 @@ def test_TVResample_inputs(): def test_TVResample_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TVResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py index d1f908fca8..1fdcf2dfd1 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py @@ -4,32 +4,13 @@ def test_TVResampleTask_inputs(): input_map = dict( - align=dict( - argstr="-align %s", - ), - args=dict( - argstr="%s", - ), - array_size=dict( - argstr="-size %d %d %d", - xor=["target_file"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr="-interp %s", - ), - origin=dict( - argstr="-origin %g %g %g", - xor=["target_file"], - ), + align=dict(argstr="-align %s"), + args=dict(argstr="%s"), + array_size=dict(argstr="-size %d %d %d", xor=["target_file"]), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + interpolation=dict(argstr="-interp %s"), + origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), out_file=dict( argstr="-out %s", extensions=None, @@ -42,10 +23,7 @@ def test_TVResampleTask_inputs(): extensions=None, xor=["array_size", "voxel_size", "origin"], ), - voxel_size=dict( - argstr="-vsize %g %g %g", - xor=["target_file"], - ), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), ) inputs = TVResampleTask.input_spec() @@ -55,11 +33,7 @@ def test_TVResampleTask_inputs(): def test_TVResampleTask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TVResampleTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtool.py b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py index 11e2d05acc..e1f9f2a164 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVtool.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py @@ -4,26 +4,11 @@ def test_TVtool_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - in_flag=dict( - argstr="-%s", - ), - out_file=dict( - argstr="-out %s", - extensions=None, - genfile=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + in_flag=dict(argstr="-%s"), + out_file=dict(argstr="-out %s", extensions=None, genfile=True), ) inputs = TVtool.input_spec() @@ -33,11 +18,7 @@ def test_TVtool_inputs(): def test_TVtool_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TVtool.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py index 7af7bcb75b..b7df6423be 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py @@ -4,26 +4,11 @@ def test_TVtoolTask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - in_flag=dict( - argstr="-%s", - ), - out_file=dict( - argstr="-out %s", - extensions=None, - genfile=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + in_flag=dict(argstr="-%s"), + out_file=dict(argstr="-out %s", extensions=None, genfile=True), ) inputs = TVtoolTask.input_spec() @@ -33,11 +18,7 @@ def test_TVtoolTask_inputs(): def test_TVtoolTask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TVtoolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py index 8d3ebfad98..3c74c498ae 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py @@ -4,30 +4,12 @@ def test_affScalarVolTask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - deformation=dict( - argstr="-deformation %g %g %g %g %g %g", - xor=["transform"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - euler=dict( - argstr="-euler %g %g %g", - xor=["transform"], - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr="-interp %s", - usedefault=True, - ), + args=dict(argstr="%s"), + deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"]), + environ=dict(nohash=True, usedefault=True), + euler=dict(argstr="-euler %g %g %g", xor=["transform"]), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + interpolation=dict(argstr="-interp %s", usedefault=True), out_file=dict( argstr="-out %s", extensions=None, @@ -35,20 +17,13 @@ def test_affScalarVolTask_inputs(): name_source="in_file", name_template="%s_affxfmd", ), - target=dict( - argstr="-target %s", - extensions=None, - xor=["transform"], - ), + target=dict(argstr="-target %s", extensions=None, xor=["transform"]), transform=dict( argstr="-trans %s", extensions=None, xor=["target", "translation", "euler", "deformation"], ), - translation=dict( - argstr="-translation %g %g %g", - xor=["transform"], - ), + translation=dict(argstr="-translation %g %g %g", xor=["transform"]), ) inputs = affScalarVolTask.input_spec() @@ -58,11 +33,7 @@ def test_affScalarVolTask_inputs(): def test_affScalarVolTask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = affScalarVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py index fbfde68e86..dd49b3cd01 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py @@ -4,30 +4,12 @@ def test_affSymTensor3DVolTask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - deformation=dict( - argstr="-deformation %g %g %g %g %g %g", - xor=["transform"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - euler=dict( - argstr="-euler %g %g %g", - xor=["transform"], - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr="-interp %s", - usedefault=True, - ), + args=dict(argstr="%s"), + deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"]), + environ=dict(nohash=True, usedefault=True), + euler=dict(argstr="-euler %g %g %g", xor=["transform"]), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + interpolation=dict(argstr="-interp %s", usedefault=True), out_file=dict( argstr="-out %s", extensions=None, @@ -35,24 +17,14 @@ def test_affSymTensor3DVolTask_inputs(): name_source="in_file", name_template="%s_affxfmd", ), - reorient=dict( - argstr="-reorient %s", - usedefault=True, - ), - target=dict( - argstr="-target %s", - extensions=None, - xor=["transform"], - ), + reorient=dict(argstr="-reorient %s", usedefault=True), + target=dict(argstr="-target %s", extensions=None, xor=["transform"]), transform=dict( argstr="-trans %s", extensions=None, xor=["target", "translation", "euler", "deformation"], ), - translation=dict( - argstr="-translation %g %g %g", - xor=["transform"], - ), + translation=dict(argstr="-translation %g %g %g", xor=["transform"]), ) inputs = affSymTensor3DVolTask.input_spec() @@ -62,11 +34,7 @@ def test_affSymTensor3DVolTask_inputs(): def test_affSymTensor3DVolTask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = affSymTensor3DVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py index 4a7ffee6f4..8f44e64ba9 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py @@ -4,25 +4,11 @@ def test_diffeoScalarVolTask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - flip=dict( - argstr="-flip %d %d %d", - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr="-interp %s", - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + flip=dict(argstr="-flip %d %d %d"), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + interpolation=dict(argstr="-interp %s", usedefault=True), out_file=dict( argstr="-out %s", extensions=None, @@ -30,23 +16,10 @@ def test_diffeoScalarVolTask_inputs(): name_source="in_file", name_template="%s_diffeoxfmd", ), - resampling_type=dict( - argstr="-type %s", - ), - target=dict( - argstr="-target %s", - extensions=None, - xor=["voxel_size"], - ), - transform=dict( - argstr="-trans %s", - extensions=None, - mandatory=True, - ), - voxel_size=dict( - argstr="-vsize %g %g %g", - xor=["target"], - ), + resampling_type=dict(argstr="-type %s"), + target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"]), + transform=dict(argstr="-trans %s", extensions=None, mandatory=True), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"]), ) inputs = diffeoScalarVolTask.input_spec() @@ -56,11 +29,7 @@ def test_diffeoScalarVolTask_inputs(): def test_diffeoScalarVolTask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = diffeoScalarVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py index 6724343e69..2b5f7e2e1a 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py @@ -4,29 +4,12 @@ def test_diffeoSymTensor3DVolTask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - df=dict( - argstr="-df %s", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - flip=dict( - argstr="-flip %d %d %d", - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr="-interp %s", - usedefault=True, - ), + args=dict(argstr="%s"), + df=dict(argstr="-df %s", usedefault=True), + environ=dict(nohash=True, usedefault=True), + flip=dict(argstr="-flip %d %d %d"), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + interpolation=dict(argstr="-interp %s", usedefault=True), out_file=dict( argstr="-out %s", extensions=None, @@ -34,27 +17,11 @@ def test_diffeoSymTensor3DVolTask_inputs(): name_source="in_file", name_template="%s_diffeoxfmd", ), - reorient=dict( - argstr="-reorient %s", - usedefault=True, - ), - resampling_type=dict( - argstr="-type %s", - ), - target=dict( - argstr="-target %s", - extensions=None, - xor=["voxel_size"], - ), - transform=dict( - argstr="-trans %s", - extensions=None, - mandatory=True, - ), - voxel_size=dict( - argstr="-vsize %g %g %g", - xor=["target"], - ), + reorient=dict(argstr="-reorient %s", usedefault=True), + resampling_type=dict(argstr="-type %s"), + target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"]), + transform=dict(argstr="-trans %s", extensions=None, mandatory=True), + voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"]), ) inputs = diffeoSymTensor3DVolTask.input_spec() @@ -64,11 +31,7 @@ def test_diffeoSymTensor3DVolTask_inputs(): def test_diffeoSymTensor3DVolTask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = diffeoSymTensor3DVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py index b7895e1cf1..ed5ab33ead 100644 --- a/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py @@ -4,45 +4,15 @@ def test_AnalyzeWarp_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - jac=dict( - argstr="-jac %s", - usedefault=True, - ), - jacmat=dict( - argstr="-jacmat %s", - usedefault=True, - ), - moving_image=dict( - argstr="-in %s", - extensions=None, - ), - num_threads=dict( - argstr="-threads %01d", - nohash=True, - usedefault=True, - ), - output_path=dict( - argstr="-out %s", - mandatory=True, - usedefault=True, - ), - points=dict( - argstr="-def %s", - position=0, - usedefault=True, - ), - transform_file=dict( - argstr="-tp %s", - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + jac=dict(argstr="-jac %s", usedefault=True), + jacmat=dict(argstr="-jacmat %s", usedefault=True), + moving_image=dict(argstr="-in %s", extensions=None), + num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True), + output_path=dict(argstr="-out %s", mandatory=True, usedefault=True), + points=dict(argstr="-def %s", position=0, usedefault=True), + transform_file=dict(argstr="-tp %s", extensions=None, mandatory=True), ) inputs = AnalyzeWarp.input_spec() @@ -53,15 +23,9 @@ def test_AnalyzeWarp_inputs(): def test_AnalyzeWarp_outputs(): output_map = dict( - disp_field=dict( - extensions=None, - ), - jacdet_map=dict( - extensions=None, - ), - jacmat_map=dict( - extensions=None, - ), + disp_field=dict(extensions=None), + jacdet_map=dict(extensions=None), + jacmat_map=dict(extensions=None), ) outputs = AnalyzeWarp.output_spec() diff --git a/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py index 0eebfb0665..aaab8a1031 100644 --- a/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py @@ -4,33 +4,12 @@ def test_ApplyWarp_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - moving_image=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - num_threads=dict( - argstr="-threads %01d", - nohash=True, - usedefault=True, - ), - output_path=dict( - argstr="-out %s", - mandatory=True, - usedefault=True, - ), - transform_file=dict( - argstr="-tp %s", - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + moving_image=dict(argstr="-in %s", extensions=None, mandatory=True), + num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True), + output_path=dict(argstr="-out %s", mandatory=True, usedefault=True), + transform_file=dict(argstr="-tp %s", extensions=None, mandatory=True), ) inputs = ApplyWarp.input_spec() @@ -40,11 +19,7 @@ def test_ApplyWarp_inputs(): def test_ApplyWarp_outputs(): - output_map = dict( - warped_file=dict( - extensions=None, - ), - ) + output_map = dict(warped_file=dict(extensions=None)) outputs = ApplyWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_EditTransform.py b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py index a8617c4988..2f05e789ad 100644 --- a/nipype/interfaces/elastix/tests/test_auto_EditTransform.py +++ b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py @@ -4,26 +4,12 @@ def test_EditTransform_inputs(): input_map = dict( - interpolation=dict( - argstr="FinalBSplineInterpolationOrder", - usedefault=True, - ), - output_file=dict( - extensions=None, - ), - output_format=dict( - argstr="ResultImageFormat", - ), - output_type=dict( - argstr="ResultImagePixelType", - ), - reference_image=dict( - extensions=None, - ), - transform_file=dict( - extensions=None, - mandatory=True, - ), + interpolation=dict(argstr="FinalBSplineInterpolationOrder", usedefault=True), + output_file=dict(extensions=None), + output_format=dict(argstr="ResultImageFormat"), + output_type=dict(argstr="ResultImagePixelType"), + reference_image=dict(extensions=None), + transform_file=dict(extensions=None, mandatory=True), ) inputs = EditTransform.input_spec() @@ -33,11 +19,7 @@ def test_EditTransform_inputs(): def test_EditTransform_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = EditTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py index bce2a3c662..5b44e81088 100644 --- a/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py @@ -4,33 +4,12 @@ def test_PointsWarp_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - num_threads=dict( - argstr="-threads %01d", - nohash=True, - usedefault=True, - ), - output_path=dict( - argstr="-out %s", - mandatory=True, - usedefault=True, - ), - points_file=dict( - argstr="-def %s", - extensions=None, - mandatory=True, - ), - transform_file=dict( - argstr="-tp %s", - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True), + output_path=dict(argstr="-out %s", mandatory=True, usedefault=True), + points_file=dict(argstr="-def %s", extensions=None, mandatory=True), + transform_file=dict(argstr="-tp %s", extensions=None, mandatory=True), ) inputs = PointsWarp.input_spec() @@ -40,11 +19,7 @@ def test_PointsWarp_inputs(): def test_PointsWarp_outputs(): - output_map = dict( - warped_file=dict( - extensions=None, - ), - ) + output_map = dict(warped_file=dict(extensions=None)) outputs = PointsWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_Registration.py b/nipype/interfaces/elastix/tests/test_auto_Registration.py index ae6d929950..0c4759d1e7 100644 --- a/nipype/interfaces/elastix/tests/test_auto_Registration.py +++ b/nipype/interfaces/elastix/tests/test_auto_Registration.py @@ -4,49 +4,16 @@ def test_Registration_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_image=dict( - argstr="-f %s", - extensions=None, - mandatory=True, - ), - fixed_mask=dict( - argstr="-fMask %s", - extensions=None, - ), - initial_transform=dict( - argstr="-t0 %s", - extensions=None, - ), - moving_image=dict( - argstr="-m %s", - extensions=None, - mandatory=True, - ), - moving_mask=dict( - argstr="-mMask %s", - extensions=None, - ), - num_threads=dict( - argstr="-threads %01d", - nohash=True, - usedefault=True, - ), - output_path=dict( - argstr="-out %s", - mandatory=True, - usedefault=True, - ), - parameters=dict( - argstr="-p %s...", - mandatory=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fixed_image=dict(argstr="-f %s", extensions=None, mandatory=True), + fixed_mask=dict(argstr="-fMask %s", extensions=None), + initial_transform=dict(argstr="-t0 %s", extensions=None), + moving_image=dict(argstr="-m %s", extensions=None, mandatory=True), + moving_mask=dict(argstr="-mMask %s", extensions=None), + num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True), + output_path=dict(argstr="-out %s", mandatory=True, usedefault=True), + parameters=dict(argstr="-p %s...", mandatory=True), ) inputs = Registration.input_spec() @@ -58,9 +25,7 @@ def test_Registration_inputs(): def test_Registration_outputs(): output_map = dict( transform=dict(), - warped_file=dict( - extensions=None, - ), + warped_file=dict(extensions=None), warped_files=dict(), warped_files_flags=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py index 23618b2aa8..42aa882edf 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py @@ -4,38 +4,14 @@ def test_AddXFormToHeader_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - copy_name=dict( - argstr="-c", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - out_file=dict( - argstr="%s", - extensions=None, - position=-1, - usedefault=True, - ), + args=dict(argstr="%s"), + copy_name=dict(argstr="-c"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + out_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True), subjects_dir=dict(), - transform=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - verbose=dict( - argstr="-v", - ), + transform=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + verbose=dict(argstr="-v"), ) inputs = AddXFormToHeader.input_spec() @@ -45,11 +21,7 @@ def test_AddXFormToHeader_inputs(): def test_AddXFormToHeader_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = AddXFormToHeader.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py index bc65ee096c..b89ca9877c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py @@ -4,87 +4,29 @@ def test_Aparc2Aseg_inputs(): input_map = dict( - a2009s=dict( - argstr="--a2009s", - ), - args=dict( - argstr="%s", - ), - aseg=dict( - argstr="--aseg %s", - extensions=None, - ), + a2009s=dict(argstr="--a2009s"), + args=dict(argstr="%s"), + aseg=dict(argstr="--aseg %s", extensions=None), copy_inputs=dict(), - ctxseg=dict( - argstr="--ctxseg %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - filled=dict( - extensions=None, - ), - hypo_wm=dict( - argstr="--hypo-as-wm", - ), - label_wm=dict( - argstr="--labelwm", - ), - lh_annotation=dict( - extensions=None, - mandatory=True, - ), - lh_pial=dict( - extensions=None, - mandatory=True, - ), - lh_ribbon=dict( - extensions=None, - mandatory=True, - ), - lh_white=dict( - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr="--o %s", - extensions=None, - mandatory=True, - ), - rh_annotation=dict( - extensions=None, - mandatory=True, - ), - rh_pial=dict( - extensions=None, - mandatory=True, - ), - rh_ribbon=dict( - extensions=None, - mandatory=True, - ), - rh_white=dict( - extensions=None, - mandatory=True, - ), - ribbon=dict( - extensions=None, - mandatory=True, - ), - rip_unknown=dict( - argstr="--rip-unknown", - ), - subject_id=dict( - argstr="--s %s", - mandatory=True, - usedefault=True, - ), + ctxseg=dict(argstr="--ctxseg %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + filled=dict(extensions=None), + hypo_wm=dict(argstr="--hypo-as-wm"), + label_wm=dict(argstr="--labelwm"), + lh_annotation=dict(extensions=None, mandatory=True), + lh_pial=dict(extensions=None, mandatory=True), + lh_ribbon=dict(extensions=None, mandatory=True), + lh_white=dict(extensions=None, mandatory=True), + out_file=dict(argstr="--o %s", extensions=None, mandatory=True), + rh_annotation=dict(extensions=None, mandatory=True), + rh_pial=dict(extensions=None, mandatory=True), + rh_ribbon=dict(extensions=None, mandatory=True), + rh_white=dict(extensions=None, mandatory=True), + ribbon=dict(extensions=None, mandatory=True), + rip_unknown=dict(argstr="--rip-unknown"), + subject_id=dict(argstr="--s %s", mandatory=True, usedefault=True), subjects_dir=dict(), - volmask=dict( - argstr="--volmask", - ), + volmask=dict(argstr="--volmask"), ) inputs = Aparc2Aseg.input_spec() @@ -94,12 +36,7 @@ def test_Aparc2Aseg_inputs(): def test_Aparc2Aseg_outputs(): - output_map = dict( - out_file=dict( - argstr="%s", - extensions=None, - ), - ) + output_map = dict(out_file=dict(argstr="%s", extensions=None)) outputs = Aparc2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py index e2738ac746..dd9ebb4ae9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py @@ -4,23 +4,10 @@ def test_Apas2Aseg_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="--i %s", - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr="--o %s", - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="--i %s", extensions=None, mandatory=True), + out_file=dict(argstr="--o %s", extensions=None, mandatory=True), subjects_dir=dict(), ) inputs = Apas2Aseg.input_spec() @@ -31,12 +18,7 @@ def test_Apas2Aseg_inputs(): def test_Apas2Aseg_outputs(): - output_map = dict( - out_file=dict( - argstr="%s", - extensions=None, - ), - ) + output_map = dict(out_file=dict(argstr="%s", extensions=None)) outputs = Apas2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py index b3a8e80806..952ee81323 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py @@ -4,34 +4,13 @@ def test_ApplyMask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - invert_xfm=dict( - argstr="-invert", - ), - keep_mask_deletion_edits=dict( - argstr="-keep_mask_deletion_edits", - ), - mask_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - mask_thresh=dict( - argstr="-T %.4f", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + invert_xfm=dict(argstr="-invert"), + keep_mask_deletion_edits=dict(argstr="-keep_mask_deletion_edits"), + mask_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + mask_thresh=dict(argstr="-T %.4f"), out_file=dict( argstr="%s", extensions=None, @@ -42,24 +21,11 @@ def test_ApplyMask_inputs(): position=-1, ), subjects_dir=dict(), - transfer=dict( - argstr="-transfer %d", - ), - use_abs=dict( - argstr="-abs", - ), - xfm_file=dict( - argstr="-xform %s", - extensions=None, - ), - xfm_source=dict( - argstr="-lta_src %s", - extensions=None, - ), - xfm_target=dict( - argstr="-lta_dst %s", - extensions=None, - ), + transfer=dict(argstr="-transfer %d"), + use_abs=dict(argstr="-abs"), + xfm_file=dict(argstr="-xform %s", extensions=None), + xfm_source=dict(argstr="-lta_src %s", extensions=None), + xfm_target=dict(argstr="-lta_dst %s", extensions=None), ) inputs = ApplyMask.input_spec() @@ -69,11 +35,7 @@ def test_ApplyMask_inputs(): def test_ApplyMask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ApplyMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py index 2aa4ae59f8..02c09ab30f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py @@ -4,13 +4,8 @@ def test_ApplyVolTransform_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), fs_target=dict( argstr="--fstarg", mandatory=True, @@ -32,16 +27,9 @@ def test_ApplyVolTransform_inputs(): "subject", ), ), - interp=dict( - argstr="--interp %s", - ), - inverse=dict( - argstr="--inv", - ), - invert_morph=dict( - argstr="--inv-morph", - requires=["m3z_file"], - ), + interp=dict(argstr="--interp %s"), + inverse=dict(argstr="--inv"), + invert_morph=dict(argstr="--inv-morph", requires=["m3z_file"]), lta_file=dict( argstr="--lta %s", extensions=None, @@ -72,10 +60,7 @@ def test_ApplyVolTransform_inputs(): "subject", ), ), - m3z_file=dict( - argstr="--m3z %s", - extensions=None, - ), + m3z_file=dict(argstr="--m3z %s", extensions=None), mni_152_reg=dict( argstr="--regheader", mandatory=True, @@ -90,13 +75,8 @@ def test_ApplyVolTransform_inputs(): "subject", ), ), - no_ded_m3z_path=dict( - argstr="--noDefM3zPath", - requires=["m3z_file"], - ), - no_resample=dict( - argstr="--no-resample", - ), + no_ded_m3z_path=dict(argstr="--noDefM3zPath", requires=["m3z_file"]), + no_resample=dict(argstr="--no-resample"), reg_file=dict( argstr="--reg %s", extensions=None, @@ -127,10 +107,7 @@ def test_ApplyVolTransform_inputs(): ), ), source_file=dict( - argstr="--mov %s", - copyfile=False, - extensions=None, - mandatory=True, + argstr="--mov %s", copyfile=False, extensions=None, mandatory=True ), subject=dict( argstr="--s %s", @@ -148,24 +125,16 @@ def test_ApplyVolTransform_inputs(): ), subjects_dir=dict(), tal=dict( - argstr="--tal", - mandatory=True, - xor=("target_file", "tal", "fs_target"), - ), - tal_resolution=dict( - argstr="--talres %.10f", + argstr="--tal", mandatory=True, xor=("target_file", "tal", "fs_target") ), + tal_resolution=dict(argstr="--talres %.10f"), target_file=dict( argstr="--targ %s", extensions=None, mandatory=True, xor=("target_file", "tal", "fs_target"), ), - transformed_file=dict( - argstr="--o %s", - extensions=None, - genfile=True, - ), + transformed_file=dict(argstr="--o %s", extensions=None, genfile=True), xfm_reg_file=dict( argstr="--xfm %s", extensions=None, @@ -190,11 +159,7 @@ def test_ApplyVolTransform_inputs(): def test_ApplyVolTransform_outputs(): - output_map = dict( - transformed_file=dict( - extensions=None, - ), - ) + output_map = dict(transformed_file=dict(extensions=None)) outputs = ApplyVolTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py index e7a95d1a3d..06c22a2d7b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py @@ -4,102 +4,35 @@ def test_Binarize_inputs(): input_map = dict( - abs=dict( - argstr="--abs", - ), - args=dict( - argstr="%s", - ), - bin_col_num=dict( - argstr="--bincol", - ), - bin_val=dict( - argstr="--binval %d", - ), - bin_val_not=dict( - argstr="--binvalnot %d", - ), - binary_file=dict( - argstr="--o %s", - extensions=None, - genfile=True, - ), - count_file=dict( - argstr="--count %s", - ), - dilate=dict( - argstr="--dilate %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - erode=dict( - argstr="--erode %d", - ), - erode2d=dict( - argstr="--erode2d %d", - ), - frame_no=dict( - argstr="--frame %s", - ), - in_file=dict( - argstr="--i %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - invert=dict( - argstr="--inv", - ), - mask_file=dict( - argstr="--mask maskvol", - extensions=None, - ), - mask_thresh=dict( - argstr="--mask-thresh %f", - ), - match=dict( - argstr="--match %d...", - ), - max=dict( - argstr="--max %f", - xor=["wm_ven_csf"], - ), - merge_file=dict( - argstr="--merge %s", - extensions=None, - ), - min=dict( - argstr="--min %f", - xor=["wm_ven_csf"], - ), - out_type=dict( - argstr="", - ), - rmax=dict( - argstr="--rmax %f", - ), - rmin=dict( - argstr="--rmin %f", - ), + abs=dict(argstr="--abs"), + args=dict(argstr="%s"), + bin_col_num=dict(argstr="--bincol"), + bin_val=dict(argstr="--binval %d"), + bin_val_not=dict(argstr="--binvalnot %d"), + binary_file=dict(argstr="--o %s", extensions=None, genfile=True), + count_file=dict(argstr="--count %s"), + dilate=dict(argstr="--dilate %d"), + environ=dict(nohash=True, usedefault=True), + erode=dict(argstr="--erode %d"), + erode2d=dict(argstr="--erode2d %d"), + frame_no=dict(argstr="--frame %s"), + in_file=dict(argstr="--i %s", copyfile=False, extensions=None, mandatory=True), + invert=dict(argstr="--inv"), + mask_file=dict(argstr="--mask maskvol", extensions=None), + mask_thresh=dict(argstr="--mask-thresh %f"), + match=dict(argstr="--match %d..."), + max=dict(argstr="--max %f", xor=["wm_ven_csf"]), + merge_file=dict(argstr="--merge %s", extensions=None), + min=dict(argstr="--min %f", xor=["wm_ven_csf"]), + out_type=dict(argstr=""), + rmax=dict(argstr="--rmax %f"), + rmin=dict(argstr="--rmin %f"), subjects_dir=dict(), - ventricles=dict( - argstr="--ventricles", - ), - wm=dict( - argstr="--wm", - ), - wm_ven_csf=dict( - argstr="--wm+vcsf", - xor=["min", "max"], - ), - zero_edges=dict( - argstr="--zero-edges", - ), - zero_slice_edge=dict( - argstr="--zero-slice-edges", - ), + ventricles=dict(argstr="--ventricles"), + wm=dict(argstr="--wm"), + wm_ven_csf=dict(argstr="--wm+vcsf", xor=["min", "max"]), + zero_edges=dict(argstr="--zero-edges"), + zero_slice_edge=dict(argstr="--zero-slice-edges"), ) inputs = Binarize.input_spec() @@ -110,12 +43,7 @@ def test_Binarize_inputs(): def test_Binarize_outputs(): output_map = dict( - binary_file=dict( - extensions=None, - ), - count_file=dict( - extensions=None, - ), + binary_file=dict(extensions=None), count_file=dict(extensions=None) ) outputs = Binarize.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py index d89e51841b..f03acba7d3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py @@ -4,67 +4,22 @@ def test_CALabel_inputs(): input_map = dict( - align=dict( - argstr="-align", - ), - args=dict( - argstr="%s", - ), - aseg=dict( - argstr="-aseg %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-4, - ), - in_vol=dict( - argstr="-r %s", - extensions=None, - ), - intensities=dict( - argstr="-r %s", - extensions=None, - ), - label=dict( - argstr="-l %s", - extensions=None, - ), - no_big_ventricles=dict( - argstr="-nobigventricles", - ), + align=dict(argstr="-align"), + args=dict(argstr="%s"), + aseg=dict(argstr="-aseg %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4), + in_vol=dict(argstr="-r %s", extensions=None), + intensities=dict(argstr="-r %s", extensions=None), + label=dict(argstr="-l %s", extensions=None), + no_big_ventricles=dict(argstr="-nobigventricles"), num_threads=dict(), - out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), - prior=dict( - argstr="-prior %.1f", - ), - relabel_unlikely=dict( - argstr="-relabel_unlikely %d %.1f", - ), + out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + prior=dict(argstr="-prior %.1f"), + relabel_unlikely=dict(argstr="-relabel_unlikely %d %.1f"), subjects_dir=dict(), - template=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - transform=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), + template=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + transform=dict(argstr="%s", extensions=None, mandatory=True, position=-3), ) inputs = CALabel.input_spec() @@ -74,11 +29,7 @@ def test_CALabel_inputs(): def test_CALabel_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = CALabel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py index 58788da3a4..2b35b6b2fa 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py @@ -4,37 +4,13 @@ def test_CANormalize_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - atlas=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - control_points=dict( - argstr="-c %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-4, - ), - long_file=dict( - argstr="-long %s", - extensions=None, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), + args=dict(argstr="%s"), + atlas=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + control_points=dict(argstr="-c %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4), + long_file=dict(argstr="-long %s", extensions=None), + mask=dict(argstr="-mask %s", extensions=None), out_file=dict( argstr="%s", extensions=None, @@ -45,12 +21,7 @@ def test_CANormalize_inputs(): position=-1, ), subjects_dir=dict(), - transform=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + transform=dict(argstr="%s", extensions=None, mandatory=True, position=-2), ) inputs = CANormalize.input_spec() @@ -61,12 +32,7 @@ def test_CANormalize_inputs(): def test_CANormalize_outputs(): output_map = dict( - control_points=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), + control_points=dict(extensions=None), out_file=dict(extensions=None) ) outputs = CANormalize.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py index 3f410524f0..c0e7b76b5c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py @@ -4,59 +4,21 @@ def test_CARegister_inputs(): input_map = dict( - A=dict( - argstr="-A %d", - ), - align=dict( - argstr="-align-%s", - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - invert_and_save=dict( - argstr="-invert-and-save", - position=-4, - ), - l_files=dict( - argstr="-l %s", - ), - levels=dict( - argstr="-levels %d", - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - no_big_ventricles=dict( - argstr="-nobigventricles", - ), + A=dict(argstr="-A %d"), + align=dict(argstr="-align-%s"), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + invert_and_save=dict(argstr="-invert-and-save", position=-4), + l_files=dict(argstr="-l %s"), + levels=dict(argstr="-levels %d"), + mask=dict(argstr="-mask %s", extensions=None), + no_big_ventricles=dict(argstr="-nobigventricles"), num_threads=dict(), - out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), + out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), subjects_dir=dict(), - template=dict( - argstr="%s", - extensions=None, - position=-2, - ), - transform=dict( - argstr="-T %s", - extensions=None, - ), + template=dict(argstr="%s", extensions=None, position=-2), + transform=dict(argstr="-T %s", extensions=None), ) inputs = CARegister.input_spec() @@ -66,11 +28,7 @@ def test_CARegister_inputs(): def test_CARegister_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = CARegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py index d14e203079..338ceb29be 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py @@ -4,13 +4,8 @@ def test_CheckTalairachAlignment_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( argstr="-xfm %s", extensions=None, @@ -18,17 +13,9 @@ def test_CheckTalairachAlignment_inputs(): position=-1, xor=["subject"], ), - subject=dict( - argstr="-subj %s", - mandatory=True, - position=-1, - xor=["in_file"], - ), + subject=dict(argstr="-subj %s", mandatory=True, position=-1, xor=["in_file"]), subjects_dir=dict(), - threshold=dict( - argstr="-T %.3f", - usedefault=True, - ), + threshold=dict(argstr="-T %.3f", usedefault=True), ) inputs = CheckTalairachAlignment.input_spec() @@ -38,11 +25,7 @@ def test_CheckTalairachAlignment_inputs(): def test_CheckTalairachAlignment_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = CheckTalairachAlignment.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py index 992f3e308c..c2a434cedf 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py @@ -4,70 +4,26 @@ def test_Concatenate_inputs(): input_map = dict( - add_val=dict( - argstr="--add %f", - ), - args=dict( - argstr="%s", - ), - combine=dict( - argstr="--combine", - ), - concatenated_file=dict( - argstr="--o %s", - extensions=None, - genfile=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gmean=dict( - argstr="--gmean %d", - ), - in_files=dict( - argstr="--i %s...", - mandatory=True, - ), - keep_dtype=dict( - argstr="--keep-datatype", - ), - mask_file=dict( - argstr="--mask %s", - extensions=None, - ), - max_bonfcor=dict( - argstr="--max-bonfcor", - ), - max_index=dict( - argstr="--max-index", - ), - mean_div_n=dict( - argstr="--mean-div-n", - ), - multiply_by=dict( - argstr="--mul %f", - ), - multiply_matrix_file=dict( - argstr="--mtx %s", - extensions=None, - ), - paired_stats=dict( - argstr="--paired-%s", - ), - sign=dict( - argstr="--%s", - ), - sort=dict( - argstr="--sort", - ), - stats=dict( - argstr="--%s", - ), + add_val=dict(argstr="--add %f"), + args=dict(argstr="%s"), + combine=dict(argstr="--combine"), + concatenated_file=dict(argstr="--o %s", extensions=None, genfile=True), + environ=dict(nohash=True, usedefault=True), + gmean=dict(argstr="--gmean %d"), + in_files=dict(argstr="--i %s...", mandatory=True), + keep_dtype=dict(argstr="--keep-datatype"), + mask_file=dict(argstr="--mask %s", extensions=None), + max_bonfcor=dict(argstr="--max-bonfcor"), + max_index=dict(argstr="--max-index"), + mean_div_n=dict(argstr="--mean-div-n"), + multiply_by=dict(argstr="--mul %f"), + multiply_matrix_file=dict(argstr="--mtx %s", extensions=None), + paired_stats=dict(argstr="--paired-%s"), + sign=dict(argstr="--%s"), + sort=dict(argstr="--sort"), + stats=dict(argstr="--%s"), subjects_dir=dict(), - vote=dict( - argstr="--vote", - ), + vote=dict(argstr="--vote"), ) inputs = Concatenate.input_spec() @@ -77,11 +33,7 @@ def test_Concatenate_inputs(): def test_Concatenate_outputs(): - output_map = dict( - concatenated_file=dict( - extensions=None, - ), - ) + output_map = dict(concatenated_file=dict(extensions=None)) outputs = Concatenate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py index dad221b734..848eb2eb00 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py @@ -4,33 +4,13 @@ def test_ConcatenateLTA_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_lta1=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - in_lta2=dict( - argstr="%s", - mandatory=True, - position=-2, - ), - invert_1=dict( - argstr="-invert1", - ), - invert_2=dict( - argstr="-invert2", - ), - invert_out=dict( - argstr="-invertout", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_lta1=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + in_lta2=dict(argstr="%s", mandatory=True, position=-2), + invert_1=dict(argstr="-invert1"), + invert_2=dict(argstr="-invert2"), + invert_out=dict(argstr="-invertout"), out_file=dict( argstr="%s", extensions=None, @@ -40,12 +20,8 @@ def test_ConcatenateLTA_inputs(): name_template="%s_concat", position=-1, ), - out_type=dict( - argstr="-out_type %d", - ), - subject=dict( - argstr="-subject %s", - ), + out_type=dict(argstr="-out_type %d"), + subject=dict(argstr="-subject %s"), subjects_dir=dict(), tal_source_file=dict( argstr="-tal %s", @@ -54,10 +30,7 @@ def test_ConcatenateLTA_inputs(): requires=["tal_template_file"], ), tal_template_file=dict( - argstr="%s", - extensions=None, - position=-4, - requires=["tal_source_file"], + argstr="%s", extensions=None, position=-4, requires=["tal_source_file"] ), ) inputs = ConcatenateLTA.input_spec() @@ -68,11 +41,7 @@ def test_ConcatenateLTA_inputs(): def test_ConcatenateLTA_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ConcatenateLTA.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py index 8409f26757..ed6055072f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py @@ -4,48 +4,18 @@ def test_Contrast_inputs(): input_map = dict( - annotation=dict( - extensions=None, - mandatory=True, - ), - args=dict( - argstr="%s", - ), + annotation=dict(extensions=None, mandatory=True), + args=dict(argstr="%s"), copy_inputs=dict(), - cortex=dict( - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hemisphere=dict( - argstr="--%s-only", - mandatory=True, - ), - orig=dict( - extensions=None, - mandatory=True, - ), - rawavg=dict( - extensions=None, - mandatory=True, - ), - subject_id=dict( - argstr="--s %s", - mandatory=True, - usedefault=True, - ), + cortex=dict(extensions=None, mandatory=True), + environ=dict(nohash=True, usedefault=True), + hemisphere=dict(argstr="--%s-only", mandatory=True), + orig=dict(extensions=None, mandatory=True), + rawavg=dict(extensions=None, mandatory=True), + subject_id=dict(argstr="--s %s", mandatory=True, usedefault=True), subjects_dir=dict(), - thickness=dict( - extensions=None, - mandatory=True, - ), - white=dict( - extensions=None, - mandatory=True, - ), + thickness=dict(extensions=None, mandatory=True), + white=dict(extensions=None, mandatory=True), ) inputs = Contrast.input_spec() @@ -56,15 +26,9 @@ def test_Contrast_inputs(): def test_Contrast_outputs(): output_map = dict( - out_contrast=dict( - extensions=None, - ), - out_log=dict( - extensions=None, - ), - out_stats=dict( - extensions=None, - ), + out_contrast=dict(extensions=None), + out_log=dict(extensions=None), + out_stats=dict(extensions=None), ) outputs = Contrast.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py index c230edb8ba..50145374d5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py @@ -4,37 +4,18 @@ def test_Curvature_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - averages=dict( - argstr="-a %d", - ), + args=dict(argstr="%s"), + averages=dict(argstr="-a %d"), copy_input=dict(), - distances=dict( - argstr="-distances %d %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + distances=dict(argstr="-distances %d %d"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=True, - extensions=None, - mandatory=True, - position=-2, - ), - n=dict( - argstr="-n", - ), - save=dict( - argstr="-w", + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2 ), + n=dict(argstr="-n"), + save=dict(argstr="-w"), subjects_dir=dict(), - threshold=dict( - argstr="-thresh %.3f", - ), + threshold=dict(argstr="-thresh %.3f"), ) inputs = Curvature.input_spec() @@ -44,14 +25,7 @@ def test_Curvature_inputs(): def test_Curvature_outputs(): - output_map = dict( - out_gauss=dict( - extensions=None, - ), - out_mean=dict( - extensions=None, - ), - ) + output_map = dict(out_gauss=dict(extensions=None), out_mean=dict(extensions=None)) outputs = Curvature.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py index 4e8e3d5bc2..b8dea1cb96 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py @@ -4,34 +4,13 @@ def test_CurvatureStats_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), copy_inputs=dict(), - curvfile1=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - curvfile2=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hemisphere=dict( - argstr="%s", - mandatory=True, - position=-3, - ), - min_max=dict( - argstr="-m", - ), + curvfile1=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + curvfile2=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + environ=dict(nohash=True, usedefault=True), + hemisphere=dict(argstr="%s", mandatory=True, position=-3), + min_max=dict(argstr="-m"), out_file=dict( argstr="-o %s", extensions=None, @@ -39,23 +18,11 @@ def test_CurvatureStats_inputs(): name_source=["hemisphere"], name_template="%s.curv.stats", ), - subject_id=dict( - argstr="%s", - mandatory=True, - position=-4, - usedefault=True, - ), + subject_id=dict(argstr="%s", mandatory=True, position=-4, usedefault=True), subjects_dir=dict(), - surface=dict( - argstr="-F %s", - extensions=None, - ), - values=dict( - argstr="-G", - ), - write=dict( - argstr="--writeCurvatureFiles", - ), + surface=dict(argstr="-F %s", extensions=None), + values=dict(argstr="-G"), + write=dict(argstr="--writeCurvatureFiles"), ) inputs = CurvatureStats.input_spec() @@ -65,11 +32,7 @@ def test_CurvatureStats_inputs(): def test_CurvatureStats_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = CurvatureStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py index bda2620fe1..49654f2af3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py @@ -4,35 +4,16 @@ def test_DICOMConvert_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - base_output_dir=dict( - mandatory=True, - ), - dicom_dir=dict( - mandatory=True, - ), - dicom_info=dict( - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + base_output_dir=dict(mandatory=True), + dicom_dir=dict(mandatory=True), + dicom_info=dict(extensions=None), + environ=dict(nohash=True, usedefault=True), file_mapping=dict(), - ignore_single_slice=dict( - requires=["dicom_info"], - ), - out_type=dict( - usedefault=True, - ), - seq_list=dict( - requires=["dicom_info"], - ), - subject_dir_template=dict( - usedefault=True, - ), + ignore_single_slice=dict(requires=["dicom_info"]), + out_type=dict(usedefault=True), + seq_list=dict(requires=["dicom_info"]), + subject_dir_template=dict(usedefault=True), subject_id=dict(), subjects_dir=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py index 7bf1b895f1..84b757c3b0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py @@ -4,26 +4,11 @@ def test_EMRegister_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - nbrspacing=dict( - argstr="-uns %d", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + mask=dict(argstr="-mask %s", extensions=None), + nbrspacing=dict(argstr="-uns %d"), num_threads=dict(), out_file=dict( argstr="%s", @@ -34,20 +19,10 @@ def test_EMRegister_inputs(): name_template="%s_transform.lta", position=-1, ), - skull=dict( - argstr="-skull", - ), + skull=dict(argstr="-skull"), subjects_dir=dict(), - template=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - transform=dict( - argstr="-t %s", - extensions=None, - ), + template=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + transform=dict(argstr="-t %s", extensions=None), ) inputs = EMRegister.input_spec() @@ -57,11 +32,7 @@ def test_EMRegister_inputs(): def test_EMRegister_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = EMRegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py index 5c51ed848d..c9906d9a05 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py @@ -4,40 +4,13 @@ def test_EditWMwithAseg_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - brain_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-4, - ), - keep_in=dict( - argstr="-keep-in", - ), - out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), - seg_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s"), + brain_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4), + keep_in=dict(argstr="-keep-in"), + out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + seg_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), subjects_dir=dict(), ) inputs = EditWMwithAseg.input_spec() @@ -48,11 +21,7 @@ def test_EditWMwithAseg_inputs(): def test_EditWMwithAseg_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = EditWMwithAseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py index 9d05019824..b1168b6ad8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py @@ -4,19 +4,9 @@ def test_EulerNumber_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), subjects_dir=dict(), ) inputs = EulerNumber.input_spec() @@ -27,10 +17,7 @@ def test_EulerNumber_inputs(): def test_EulerNumber_outputs(): - output_map = dict( - defects=dict(), - euler=dict(), - ) + output_map = dict(defects=dict(), euler=dict()) outputs = EulerNumber.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py index 59997ad5b4..d7d682d9bc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py @@ -4,19 +4,9 @@ def test_ExtractMainComponent_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), out_file=dict( argstr="%s", extensions=None, @@ -33,11 +23,7 @@ def test_ExtractMainComponent_inputs(): def test_ExtractMainComponent_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ExtractMainComponent.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py b/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py index aa53727cc8..6be464aa79 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py @@ -4,13 +4,8 @@ def test_FSCommand_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), subjects_dir=dict(), ) inputs = FSCommand.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py b/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py index f61b52c1ea..1405ee51e2 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py @@ -4,13 +4,8 @@ def test_FSCommandOpenMP_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), num_threads=dict(), subjects_dir=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py b/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py index 03cf55eb69..e761c7e82d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py @@ -4,13 +4,8 @@ def test_FSScriptCommand_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), subjects_dir=dict(), ) inputs = FSScriptCommand.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py index 7842c5333a..dc2af5fb8d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py @@ -4,24 +4,11 @@ def test_FitMSParams_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), flip_list=dict(), - in_files=dict( - argstr="%s", - mandatory=True, - position=-2, - ), - out_dir=dict( - argstr="%s", - genfile=True, - position=-1, - ), + in_files=dict(argstr="%s", mandatory=True, position=-2), + out_dir=dict(argstr="%s", genfile=True, position=-1), subjects_dir=dict(), te_list=dict(), tr_list=dict(), @@ -36,15 +23,9 @@ def test_FitMSParams_inputs(): def test_FitMSParams_outputs(): output_map = dict( - pd_image=dict( - extensions=None, - ), - t1_image=dict( - extensions=None, - ), - t2star_image=dict( - extensions=None, - ), + pd_image=dict(extensions=None), + t1_image=dict(extensions=None), + t2star_image=dict(extensions=None), ) outputs = FitMSParams.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py index 0037c02270..063254527a 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py @@ -4,56 +4,19 @@ def test_FixTopology_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - copy_inputs=dict( - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ga=dict( - argstr="-ga", - ), - hemisphere=dict( - argstr="%s", - mandatory=True, - position=-1, - ), - in_brain=dict( - extensions=None, - mandatory=True, - ), - in_inflated=dict( - extensions=None, - mandatory=True, - ), - in_orig=dict( - extensions=None, - mandatory=True, - ), - in_wm=dict( - extensions=None, - mandatory=True, - ), - mgz=dict( - argstr="-mgz", - ), - seed=dict( - argstr="-seed %d", - ), - sphere=dict( - argstr="-sphere %s", - extensions=None, - ), - subject_id=dict( - argstr="%s", - mandatory=True, - position=-2, - usedefault=True, - ), + args=dict(argstr="%s"), + copy_inputs=dict(mandatory=True), + environ=dict(nohash=True, usedefault=True), + ga=dict(argstr="-ga"), + hemisphere=dict(argstr="%s", mandatory=True, position=-1), + in_brain=dict(extensions=None, mandatory=True), + in_inflated=dict(extensions=None, mandatory=True), + in_orig=dict(extensions=None, mandatory=True), + in_wm=dict(extensions=None, mandatory=True), + mgz=dict(argstr="-mgz"), + seed=dict(argstr="-seed %d"), + sphere=dict(argstr="-sphere %s", extensions=None), + subject_id=dict(argstr="%s", mandatory=True, position=-2, usedefault=True), subjects_dir=dict(), ) inputs = FixTopology.input_spec() @@ -64,11 +27,7 @@ def test_FixTopology_inputs(): def test_FixTopology_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = FixTopology.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py index 12550be8b3..90a4e0ca3c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py @@ -4,40 +4,15 @@ def test_FuseSegmentations_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_norms=dict( - argstr="-n %s", - mandatory=True, - ), - in_segmentations=dict( - argstr="-a %s", - mandatory=True, - ), - in_segmentations_noCC=dict( - argstr="-c %s", - mandatory=True, - ), - out_file=dict( - extensions=None, - mandatory=True, - position=-1, - ), - subject_id=dict( - argstr="%s", - position=-3, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_norms=dict(argstr="-n %s", mandatory=True), + in_segmentations=dict(argstr="-a %s", mandatory=True), + in_segmentations_noCC=dict(argstr="-c %s", mandatory=True), + out_file=dict(extensions=None, mandatory=True, position=-1), + subject_id=dict(argstr="%s", position=-3), subjects_dir=dict(), - timepoints=dict( - argstr="%s", - mandatory=True, - position=-2, - ), + timepoints=dict(argstr="%s", mandatory=True, position=-2), ) inputs = FuseSegmentations.input_spec() @@ -47,11 +22,7 @@ def test_FuseSegmentations_inputs(): def test_FuseSegmentations_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = FuseSegmentations.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py index 21c41eb691..319efa2a8d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py @@ -4,192 +4,68 @@ def test_GLMFit_inputs(): input_map = dict( - allow_ill_cond=dict( - argstr="--illcond", - ), - allow_repeated_subjects=dict( - argstr="--allowsubjrep", - ), - args=dict( - argstr="%s", - ), - calc_AR1=dict( - argstr="--tar1", - ), - check_opts=dict( - argstr="--checkopts", - ), - compute_log_y=dict( - argstr="--logy", - ), - contrast=dict( - argstr="--C %s...", - ), - cortex=dict( - argstr="--cortex", - xor=["label_file"], - ), - debug=dict( - argstr="--debug", - ), + allow_ill_cond=dict(argstr="--illcond"), + allow_repeated_subjects=dict(argstr="--allowsubjrep"), + args=dict(argstr="%s"), + calc_AR1=dict(argstr="--tar1"), + check_opts=dict(argstr="--checkopts"), + compute_log_y=dict(argstr="--logy"), + contrast=dict(argstr="--C %s..."), + cortex=dict(argstr="--cortex", xor=["label_file"]), + debug=dict(argstr="--debug"), design=dict( - argstr="--X %s", - extensions=None, - xor=("fsgd", "design", "one_sample"), - ), - diag=dict( - argstr="--diag %d", - ), - diag_cluster=dict( - argstr="--diag-cluster", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_fx_dof=dict( - argstr="--ffxdof %d", - xor=["fixed_fx_dof_file"], + argstr="--X %s", extensions=None, xor=("fsgd", "design", "one_sample") ), + diag=dict(argstr="--diag %d"), + diag_cluster=dict(argstr="--diag-cluster"), + environ=dict(nohash=True, usedefault=True), + fixed_fx_dof=dict(argstr="--ffxdof %d", xor=["fixed_fx_dof_file"]), fixed_fx_dof_file=dict( - argstr="--ffxdofdat %d", - extensions=None, - xor=["fixed_fx_dof"], - ), - fixed_fx_var=dict( - argstr="--yffxvar %s", - extensions=None, - ), - force_perm=dict( - argstr="--perm-force", - ), - fsgd=dict( - argstr="--fsgd %s %s", - xor=("fsgd", "design", "one_sample"), - ), - fwhm=dict( - argstr="--fwhm %f", - ), - glm_dir=dict( - argstr="--glmdir %s", - genfile=True, + argstr="--ffxdofdat %d", extensions=None, xor=["fixed_fx_dof"] ), + fixed_fx_var=dict(argstr="--yffxvar %s", extensions=None), + force_perm=dict(argstr="--perm-force"), + fsgd=dict(argstr="--fsgd %s %s", xor=("fsgd", "design", "one_sample")), + fwhm=dict(argstr="--fwhm %f"), + glm_dir=dict(argstr="--glmdir %s", genfile=True), hemi=dict(), - in_file=dict( - argstr="--y %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - invert_mask=dict( - argstr="--mask-inv", - ), - label_file=dict( - argstr="--label %s", - extensions=None, - xor=["cortex"], - ), - mask_file=dict( - argstr="--mask %s", - extensions=None, - ), - no_contrast_ok=dict( - argstr="--no-contrasts-ok", - ), - no_est_fwhm=dict( - argstr="--no-est-fwhm", - ), - no_mask_smooth=dict( - argstr="--no-mask-smooth", - ), - no_prune=dict( - argstr="--no-prune", - xor=["prunethresh"], - ), + in_file=dict(argstr="--y %s", copyfile=False, extensions=None, mandatory=True), + invert_mask=dict(argstr="--mask-inv"), + label_file=dict(argstr="--label %s", extensions=None, xor=["cortex"]), + mask_file=dict(argstr="--mask %s", extensions=None), + no_contrast_ok=dict(argstr="--no-contrasts-ok"), + no_est_fwhm=dict(argstr="--no-est-fwhm"), + no_mask_smooth=dict(argstr="--no-mask-smooth"), + no_prune=dict(argstr="--no-prune", xor=["prunethresh"]), one_sample=dict( - argstr="--osgm", - xor=("one_sample", "fsgd", "design", "contrast"), - ), - pca=dict( - argstr="--pca", - ), - per_voxel_reg=dict( - argstr="--pvr %s...", - ), - profile=dict( - argstr="--profile %d", - ), - prune=dict( - argstr="--prune", - ), - prune_thresh=dict( - argstr="--prune_thr %f", - xor=["noprune"], - ), - resynth_test=dict( - argstr="--resynthtest %d", - ), - save_cond=dict( - argstr="--save-cond", - ), - save_estimate=dict( - argstr="--yhat-save", - ), - save_res_corr_mtx=dict( - argstr="--eres-scm", - ), - save_residual=dict( - argstr="--eres-save", - ), - seed=dict( - argstr="--seed %d", - ), - self_reg=dict( - argstr="--selfreg %d %d %d", - ), - sim_done_file=dict( - argstr="--sim-done %s", - extensions=None, - ), - sim_sign=dict( - argstr="--sim-sign %s", - ), - simulation=dict( - argstr="--sim %s %d %f %s", - ), + argstr="--osgm", xor=("one_sample", "fsgd", "design", "contrast") + ), + pca=dict(argstr="--pca"), + per_voxel_reg=dict(argstr="--pvr %s..."), + profile=dict(argstr="--profile %d"), + prune=dict(argstr="--prune"), + prune_thresh=dict(argstr="--prune_thr %f", xor=["noprune"]), + resynth_test=dict(argstr="--resynthtest %d"), + save_cond=dict(argstr="--save-cond"), + save_estimate=dict(argstr="--yhat-save"), + save_res_corr_mtx=dict(argstr="--eres-scm"), + save_residual=dict(argstr="--eres-save"), + seed=dict(argstr="--seed %d"), + self_reg=dict(argstr="--selfreg %d %d %d"), + sim_done_file=dict(argstr="--sim-done %s", extensions=None), + sim_sign=dict(argstr="--sim-sign %s"), + simulation=dict(argstr="--sim %s %d %f %s"), subject_id=dict(), subjects_dir=dict(), - surf=dict( - argstr="--surf %s %s %s", - requires=["subject_id", "hemi"], - ), - surf_geo=dict( - usedefault=True, - ), - synth=dict( - argstr="--synth", - ), - uniform=dict( - argstr="--uniform %f %f", - ), - var_fwhm=dict( - argstr="--var-fwhm %f", - ), - vox_dump=dict( - argstr="--voxdump %d %d %d", - ), - weight_file=dict( - extensions=None, - xor=["weighted_ls"], - ), - weight_inv=dict( - argstr="--w-inv", - xor=["weighted_ls"], - ), - weight_sqrt=dict( - argstr="--w-sqrt", - xor=["weighted_ls"], - ), + surf=dict(argstr="--surf %s %s %s", requires=["subject_id", "hemi"]), + surf_geo=dict(usedefault=True), + synth=dict(argstr="--synth"), + uniform=dict(argstr="--uniform %f %f"), + var_fwhm=dict(argstr="--var-fwhm %f"), + vox_dump=dict(argstr="--voxdump %d %d %d"), + weight_file=dict(extensions=None, xor=["weighted_ls"]), + weight_inv=dict(argstr="--w-inv", xor=["weighted_ls"]), + weight_sqrt=dict(argstr="--w-sqrt", xor=["weighted_ls"]), weighted_ls=dict( argstr="--wls %s", extensions=None, @@ -205,47 +81,23 @@ def test_GLMFit_inputs(): def test_GLMFit_outputs(): output_map = dict( - beta_file=dict( - extensions=None, - ), - dof_file=dict( - extensions=None, - ), - error_file=dict( - extensions=None, - ), - error_stddev_file=dict( - extensions=None, - ), - error_var_file=dict( - extensions=None, - ), - estimate_file=dict( - extensions=None, - ), - frame_eigenvectors=dict( - extensions=None, - ), + beta_file=dict(extensions=None), + dof_file=dict(extensions=None), + error_file=dict(extensions=None), + error_stddev_file=dict(extensions=None), + error_var_file=dict(extensions=None), + estimate_file=dict(extensions=None), + frame_eigenvectors=dict(extensions=None), ftest_file=dict(), - fwhm_file=dict( - extensions=None, - ), + fwhm_file=dict(extensions=None), gamma_file=dict(), gamma_var_file=dict(), glm_dir=dict(), - mask_file=dict( - extensions=None, - ), + mask_file=dict(extensions=None), sig_file=dict(), - singular_values=dict( - extensions=None, - ), - spatial_eigenvectors=dict( - extensions=None, - ), - svd_stats_file=dict( - extensions=None, - ), + singular_values=dict(extensions=None), + spatial_eigenvectors=dict(extensions=None), + svd_stats_file=dict(extensions=None), ) outputs = GLMFit.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py index aa6d5d302e..40d3c62268 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py @@ -4,18 +4,9 @@ def test_ImageInfo_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - position=1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, position=1), subjects_dir=dict(), ) inputs = ImageInfo.input_spec() @@ -35,9 +26,7 @@ def test_ImageInfo_outputs(): file_format=dict(), info=dict(), orientation=dict(), - out_file=dict( - extensions=None, - ), + out_file=dict(extensions=None), ph_enc_dir=dict(), vox_sizes=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py index f62c085839..a4c52778dd 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py @@ -4,25 +4,10 @@ def test_Jacobian_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_mappedsurf=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - in_origsurf=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_mappedsurf=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + in_origsurf=dict(argstr="%s", extensions=None, mandatory=True, position=-3), out_file=dict( argstr="%s", extensions=None, @@ -42,11 +27,7 @@ def test_Jacobian_inputs(): def test_Jacobian_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Jacobian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py index b70bd34c45..d7633157e8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py @@ -4,13 +4,8 @@ def test_LTAConvert_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_fsl=dict( argstr="--infsl %s", extensions=None, @@ -46,39 +41,16 @@ def test_LTAConvert_inputs(): mandatory=True, xor=("in_lta", "in_fsl", "in_mni", "in_reg", "in_niftyreg", "in_itk"), ), - invert=dict( - argstr="--invert", - ), - ltavox2vox=dict( - argstr="--ltavox2vox", - requires=["out_lta"], - ), - out_fsl=dict( - argstr="--outfsl %s", - ), - out_itk=dict( - argstr="--outitk %s", - ), - out_lta=dict( - argstr="--outlta %s", - ), - out_mni=dict( - argstr="--outmni %s", - ), - out_reg=dict( - argstr="--outreg %s", - ), - source_file=dict( - argstr="--src %s", - extensions=None, - ), - target_conform=dict( - argstr="--trgconform", - ), - target_file=dict( - argstr="--trg %s", - extensions=None, - ), + invert=dict(argstr="--invert"), + ltavox2vox=dict(argstr="--ltavox2vox", requires=["out_lta"]), + out_fsl=dict(argstr="--outfsl %s"), + out_itk=dict(argstr="--outitk %s"), + out_lta=dict(argstr="--outlta %s"), + out_mni=dict(argstr="--outmni %s"), + out_reg=dict(argstr="--outreg %s"), + source_file=dict(argstr="--src %s", extensions=None), + target_conform=dict(argstr="--trgconform"), + target_file=dict(argstr="--trg %s", extensions=None), ) inputs = LTAConvert.input_spec() @@ -89,21 +61,11 @@ def test_LTAConvert_inputs(): def test_LTAConvert_outputs(): output_map = dict( - out_fsl=dict( - extensions=None, - ), - out_itk=dict( - extensions=None, - ), - out_lta=dict( - extensions=None, - ), - out_mni=dict( - extensions=None, - ), - out_reg=dict( - extensions=None, - ), + out_fsl=dict(extensions=None), + out_itk=dict(extensions=None), + out_lta=dict(extensions=None), + out_mni=dict(extensions=None), + out_reg=dict(extensions=None), ) outputs = LTAConvert.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py index 7e1caf88cc..3400725272 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py @@ -4,46 +4,18 @@ def test_Label2Annot_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - color_table=dict( - argstr="--ctab %s", - extensions=None, - ), + args=dict(argstr="%s"), + color_table=dict(argstr="--ctab %s", extensions=None), copy_inputs=dict(), - environ=dict( - nohash=True, - usedefault=True, - ), - hemisphere=dict( - argstr="--hemi %s", - mandatory=True, - ), - in_labels=dict( - argstr="--l %s...", - mandatory=True, - ), - keep_max=dict( - argstr="--maxstatwinner", - ), - orig=dict( - extensions=None, - mandatory=True, - ), - out_annot=dict( - argstr="--a %s", - mandatory=True, - ), - subject_id=dict( - argstr="--s %s", - mandatory=True, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True), + hemisphere=dict(argstr="--hemi %s", mandatory=True), + in_labels=dict(argstr="--l %s...", mandatory=True), + keep_max=dict(argstr="--maxstatwinner"), + orig=dict(extensions=None, mandatory=True), + out_annot=dict(argstr="--a %s", mandatory=True), + subject_id=dict(argstr="--s %s", mandatory=True, usedefault=True), subjects_dir=dict(), - verbose_off=dict( - argstr="--noverbose", - ), + verbose_off=dict(argstr="--noverbose"), ) inputs = Label2Annot.input_spec() @@ -53,11 +25,7 @@ def test_Label2Annot_inputs(): def test_Label2Annot_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Label2Annot.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py index 34f99e1a24..80c62567c5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py @@ -4,18 +4,10 @@ def test_Label2Label_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), copy_inputs=dict(), - environ=dict( - nohash=True, - usedefault=True, - ), - hemisphere=dict( - argstr="--hemi %s", - mandatory=True, - ), + environ=dict(nohash=True, usedefault=True), + hemisphere=dict(argstr="--hemi %s", mandatory=True), out_file=dict( argstr="--trglabel %s", extensions=None, @@ -24,41 +16,15 @@ def test_Label2Label_inputs(): name_source=["source_label"], name_template="%s_converted", ), - registration_method=dict( - argstr="--regmethod %s", - usedefault=True, - ), - source_label=dict( - argstr="--srclabel %s", - extensions=None, - mandatory=True, - ), - source_sphere_reg=dict( - extensions=None, - mandatory=True, - ), - source_subject=dict( - argstr="--srcsubject %s", - mandatory=True, - ), - source_white=dict( - extensions=None, - mandatory=True, - ), - sphere_reg=dict( - extensions=None, - mandatory=True, - ), - subject_id=dict( - argstr="--trgsubject %s", - mandatory=True, - usedefault=True, - ), + registration_method=dict(argstr="--regmethod %s", usedefault=True), + source_label=dict(argstr="--srclabel %s", extensions=None, mandatory=True), + source_sphere_reg=dict(extensions=None, mandatory=True), + source_subject=dict(argstr="--srcsubject %s", mandatory=True), + source_white=dict(extensions=None, mandatory=True), + sphere_reg=dict(extensions=None, mandatory=True), + subject_id=dict(argstr="--trgsubject %s", mandatory=True, usedefault=True), subjects_dir=dict(), - white=dict( - extensions=None, - mandatory=True, - ), + white=dict(extensions=None, mandatory=True), ) inputs = Label2Label.input_spec() @@ -68,11 +34,7 @@ def test_Label2Label_inputs(): def test_Label2Label_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Label2Label.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py index aa1b19f564..5722e24ab6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py @@ -17,50 +17,23 @@ def test_Label2Vol_inputs(): mandatory=True, xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fill_thresh=dict( - argstr="--fillthresh %g", - ), - hemi=dict( - argstr="--hemi %s", - ), - identity=dict( - argstr="--identity", - xor=("reg_file", "reg_header", "identity"), - ), - invert_mtx=dict( - argstr="--invertmtx", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fill_thresh=dict(argstr="--fillthresh %g"), + hemi=dict(argstr="--hemi %s"), + identity=dict(argstr="--identity", xor=("reg_file", "reg_header", "identity")), + invert_mtx=dict(argstr="--invertmtx"), label_file=dict( argstr="--label %s...", copyfile=False, mandatory=True, xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), ), - label_hit_file=dict( - argstr="--hits %s", - extensions=None, - ), - label_voxel_volume=dict( - argstr="--labvoxvol %f", - ), - map_label_stat=dict( - argstr="--label-stat %s", - extensions=None, - ), - native_vox2ras=dict( - argstr="--native-vox2ras", - ), - proj=dict( - argstr="--proj %s %f %f %f", - requires=("subject_id", "hemi"), - ), + label_hit_file=dict(argstr="--hits %s", extensions=None), + label_voxel_volume=dict(argstr="--labvoxvol %f"), + map_label_stat=dict(argstr="--label-stat %s", extensions=None), + native_vox2ras=dict(argstr="--native-vox2ras"), + proj=dict(argstr="--proj %s %f %f %f", requires=("subject_id", "hemi")), reg_file=dict( argstr="--reg %s", extensions=None, @@ -78,23 +51,11 @@ def test_Label2Vol_inputs(): mandatory=True, xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), ), - subject_id=dict( - argstr="--subject %s", - ), + subject_id=dict(argstr="--subject %s"), subjects_dir=dict(), - surface=dict( - argstr="--surf %s", - ), - template_file=dict( - argstr="--temp %s", - extensions=None, - mandatory=True, - ), - vol_label_file=dict( - argstr="--o %s", - extensions=None, - genfile=True, - ), + surface=dict(argstr="--surf %s"), + template_file=dict(argstr="--temp %s", extensions=None, mandatory=True), + vol_label_file=dict(argstr="--o %s", extensions=None, genfile=True), ) inputs = Label2Vol.input_spec() @@ -104,11 +65,7 @@ def test_Label2Vol_inputs(): def test_Label2Vol_outputs(): - output_map = dict( - vol_label_file=dict( - extensions=None, - ), - ) + output_map = dict(vol_label_file=dict(extensions=None)) outputs = Label2Vol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py index 3b3c2f0852..f9a86b5b67 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py @@ -4,32 +4,13 @@ def test_MNIBiasCorrection_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - distance=dict( - argstr="--distance %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="--i %s", - extensions=None, - mandatory=True, - ), - iterations=dict( - argstr="--n %d", - usedefault=True, - ), - mask=dict( - argstr="--mask %s", - extensions=None, - ), - no_rescale=dict( - argstr="--no-rescale", - ), + args=dict(argstr="%s"), + distance=dict(argstr="--distance %d"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="--i %s", extensions=None, mandatory=True), + iterations=dict(argstr="--n %d", usedefault=True), + mask=dict(argstr="--mask %s", extensions=None), + no_rescale=dict(argstr="--no-rescale"), out_file=dict( argstr="--o %s", extensions=None, @@ -38,20 +19,11 @@ def test_MNIBiasCorrection_inputs(): name_source=["in_file"], name_template="%s_output", ), - protocol_iterations=dict( - argstr="--proto-iters %d", - ), - shrink=dict( - argstr="--shrink %d", - ), - stop=dict( - argstr="--stop %f", - ), + protocol_iterations=dict(argstr="--proto-iters %d"), + shrink=dict(argstr="--shrink %d"), + stop=dict(argstr="--stop %f"), subjects_dir=dict(), - transform=dict( - argstr="--uchar %s", - extensions=None, - ), + transform=dict(argstr="--uchar %s", extensions=None), ) inputs = MNIBiasCorrection.input_spec() @@ -61,11 +33,7 @@ def test_MNIBiasCorrection_inputs(): def test_MNIBiasCorrection_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MNIBiasCorrection.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py index 4f21cc2f61..b2c62ba590 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py @@ -4,27 +4,12 @@ def test_MPRtoMNI305_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - usedefault=True, - ), - reference_dir=dict( - mandatory=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, usedefault=True), + reference_dir=dict(mandatory=True, usedefault=True), subjects_dir=dict(), - target=dict( - mandatory=True, - usedefault=True, - ), + target=dict(mandatory=True, usedefault=True), ) inputs = MPRtoMNI305.input_spec() @@ -35,13 +20,7 @@ def test_MPRtoMNI305_inputs(): def test_MPRtoMNI305_outputs(): output_map = dict( - log_file=dict( - extensions=None, - usedefault=True, - ), - out_file=dict( - extensions=None, - ), + log_file=dict(extensions=None, usedefault=True), out_file=dict(extensions=None) ) outputs = MPRtoMNI305.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py index 9e229078ef..19fb1874f1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py @@ -5,276 +5,97 @@ def test_MRIConvert_inputs(): input_map = dict( apply_inv_transform=dict( - argstr="--apply_inverse_transform %s", - extensions=None, - ), - apply_transform=dict( - argstr="--apply_transform %s", - extensions=None, - ), - args=dict( - argstr="%s", - ), - ascii=dict( - argstr="--ascii", - ), - autoalign_matrix=dict( - argstr="--autoalign %s", - extensions=None, - ), - color_file=dict( - argstr="--color_file %s", - extensions=None, - ), - conform=dict( - argstr="--conform", - ), - conform_min=dict( - argstr="--conform_min", - ), - conform_size=dict( - argstr="--conform_size %s", - ), - crop_center=dict( - argstr="--crop %d %d %d", - ), - crop_gdf=dict( - argstr="--crop_gdf", - ), - crop_size=dict( - argstr="--cropsize %d %d %d", - ), - cut_ends=dict( - argstr="--cutends %d", - ), - cw256=dict( - argstr="--cw256", - ), - devolve_transform=dict( - argstr="--devolvexfm %s", - ), - drop_n=dict( - argstr="--ndrop %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fill_parcellation=dict( - argstr="--fill_parcellation", - ), - force_ras=dict( - argstr="--force_ras_good", - ), - frame=dict( - argstr="--frame %d", - ), - frame_subsample=dict( - argstr="--fsubsample %d %d %d", - ), - fwhm=dict( - argstr="--fwhm %f", - ), - in_center=dict( - argstr="--in_center %s", - ), + argstr="--apply_inverse_transform %s", extensions=None + ), + apply_transform=dict(argstr="--apply_transform %s", extensions=None), + args=dict(argstr="%s"), + ascii=dict(argstr="--ascii"), + autoalign_matrix=dict(argstr="--autoalign %s", extensions=None), + color_file=dict(argstr="--color_file %s", extensions=None), + conform=dict(argstr="--conform"), + conform_min=dict(argstr="--conform_min"), + conform_size=dict(argstr="--conform_size %s"), + crop_center=dict(argstr="--crop %d %d %d"), + crop_gdf=dict(argstr="--crop_gdf"), + crop_size=dict(argstr="--cropsize %d %d %d"), + cut_ends=dict(argstr="--cutends %d"), + cw256=dict(argstr="--cw256"), + devolve_transform=dict(argstr="--devolvexfm %s"), + drop_n=dict(argstr="--ndrop %d"), + environ=dict(nohash=True, usedefault=True), + fill_parcellation=dict(argstr="--fill_parcellation"), + force_ras=dict(argstr="--force_ras_good"), + frame=dict(argstr="--frame %d"), + frame_subsample=dict(argstr="--fsubsample %d %d %d"), + fwhm=dict(argstr="--fwhm %f"), + in_center=dict(argstr="--in_center %s"), in_file=dict( - argstr="--input_volume %s", - extensions=None, - mandatory=True, - position=-2, - ), - in_i_dir=dict( - argstr="--in_i_direction %f %f %f", - ), - in_i_size=dict( - argstr="--in_i_size %d", - ), - in_info=dict( - argstr="--in_info", - ), - in_j_dir=dict( - argstr="--in_j_direction %f %f %f", - ), - in_j_size=dict( - argstr="--in_j_size %d", - ), - in_k_dir=dict( - argstr="--in_k_direction %f %f %f", - ), - in_k_size=dict( - argstr="--in_k_size %d", - ), - in_like=dict( - argstr="--in_like %s", - extensions=None, - ), - in_matrix=dict( - argstr="--in_matrix", - ), - in_orientation=dict( - argstr="--in_orientation %s", - ), - in_scale=dict( - argstr="--scale %f", - ), - in_stats=dict( - argstr="--in_stats", - ), - in_type=dict( - argstr="--in_type %s", - ), - invert_contrast=dict( - argstr="--invert_contrast %f", - ), - midframe=dict( - argstr="--mid-frame", - ), - no_change=dict( - argstr="--nochange", - ), - no_scale=dict( - argstr="--no_scale 1", - ), - no_translate=dict( - argstr="--no_translate", - ), - no_write=dict( - argstr="--no_write", - ), - out_center=dict( - argstr="--out_center %f %f %f", - ), - out_datatype=dict( - argstr="--out_data_type %s", - ), + argstr="--input_volume %s", extensions=None, mandatory=True, position=-2 + ), + in_i_dir=dict(argstr="--in_i_direction %f %f %f"), + in_i_size=dict(argstr="--in_i_size %d"), + in_info=dict(argstr="--in_info"), + in_j_dir=dict(argstr="--in_j_direction %f %f %f"), + in_j_size=dict(argstr="--in_j_size %d"), + in_k_dir=dict(argstr="--in_k_direction %f %f %f"), + in_k_size=dict(argstr="--in_k_size %d"), + in_like=dict(argstr="--in_like %s", extensions=None), + in_matrix=dict(argstr="--in_matrix"), + in_orientation=dict(argstr="--in_orientation %s"), + in_scale=dict(argstr="--scale %f"), + in_stats=dict(argstr="--in_stats"), + in_type=dict(argstr="--in_type %s"), + invert_contrast=dict(argstr="--invert_contrast %f"), + midframe=dict(argstr="--mid-frame"), + no_change=dict(argstr="--nochange"), + no_scale=dict(argstr="--no_scale 1"), + no_translate=dict(argstr="--no_translate"), + no_write=dict(argstr="--no_write"), + out_center=dict(argstr="--out_center %f %f %f"), + out_datatype=dict(argstr="--out_data_type %s"), out_file=dict( - argstr="--output_volume %s", - extensions=None, - genfile=True, - position=-1, - ), - out_i_count=dict( - argstr="--out_i_count %d", - ), - out_i_dir=dict( - argstr="--out_i_direction %f %f %f", - ), - out_i_size=dict( - argstr="--out_i_size %d", - ), - out_info=dict( - argstr="--out_info", - ), - out_j_count=dict( - argstr="--out_j_count %d", - ), - out_j_dir=dict( - argstr="--out_j_direction %f %f %f", - ), - out_j_size=dict( - argstr="--out_j_size %d", - ), - out_k_count=dict( - argstr="--out_k_count %d", - ), - out_k_dir=dict( - argstr="--out_k_direction %f %f %f", - ), - out_k_size=dict( - argstr="--out_k_size %d", - ), - out_matrix=dict( - argstr="--out_matrix", - ), - out_orientation=dict( - argstr="--out_orientation %s", - ), - out_scale=dict( - argstr="--out-scale %d", - ), - out_stats=dict( - argstr="--out_stats", - ), - out_type=dict( - argstr="--out_type %s", - ), - parse_only=dict( - argstr="--parse_only", - ), - read_only=dict( - argstr="--read_only", - ), - reorder=dict( - argstr="--reorder %d %d %d", - ), - resample_type=dict( - argstr="--resample_type %s", - ), - reslice_like=dict( - argstr="--reslice_like %s", - extensions=None, - ), - sdcm_list=dict( - argstr="--sdcmlist %s", - extensions=None, - ), - skip_n=dict( - argstr="--nskip %d", - ), - slice_bias=dict( - argstr="--slice-bias %f", - ), - slice_crop=dict( - argstr="--slice-crop %d %d", - ), - slice_reverse=dict( - argstr="--slice-reverse", - ), - smooth_parcellation=dict( - argstr="--smooth_parcellation", - ), - sphinx=dict( - argstr="--sphinx", - ), - split=dict( - argstr="--split", - ), - status_file=dict( - argstr="--status %s", - extensions=None, - ), - subject_name=dict( - argstr="--subject_name %s", - ), + argstr="--output_volume %s", extensions=None, genfile=True, position=-1 + ), + out_i_count=dict(argstr="--out_i_count %d"), + out_i_dir=dict(argstr="--out_i_direction %f %f %f"), + out_i_size=dict(argstr="--out_i_size %d"), + out_info=dict(argstr="--out_info"), + out_j_count=dict(argstr="--out_j_count %d"), + out_j_dir=dict(argstr="--out_j_direction %f %f %f"), + out_j_size=dict(argstr="--out_j_size %d"), + out_k_count=dict(argstr="--out_k_count %d"), + out_k_dir=dict(argstr="--out_k_direction %f %f %f"), + out_k_size=dict(argstr="--out_k_size %d"), + out_matrix=dict(argstr="--out_matrix"), + out_orientation=dict(argstr="--out_orientation %s"), + out_scale=dict(argstr="--out-scale %d"), + out_stats=dict(argstr="--out_stats"), + out_type=dict(argstr="--out_type %s"), + parse_only=dict(argstr="--parse_only"), + read_only=dict(argstr="--read_only"), + reorder=dict(argstr="--reorder %d %d %d"), + resample_type=dict(argstr="--resample_type %s"), + reslice_like=dict(argstr="--reslice_like %s", extensions=None), + sdcm_list=dict(argstr="--sdcmlist %s", extensions=None), + skip_n=dict(argstr="--nskip %d"), + slice_bias=dict(argstr="--slice-bias %f"), + slice_crop=dict(argstr="--slice-crop %d %d"), + slice_reverse=dict(argstr="--slice-reverse"), + smooth_parcellation=dict(argstr="--smooth_parcellation"), + sphinx=dict(argstr="--sphinx"), + split=dict(argstr="--split"), + status_file=dict(argstr="--status %s", extensions=None), + subject_name=dict(argstr="--subject_name %s"), subjects_dir=dict(), - te=dict( - argstr="-te %d", - ), - template_info=dict( - argstr="--template_info", - ), - template_type=dict( - argstr="--template_type %s", - ), - ti=dict( - argstr="-ti %d", - ), - tr=dict( - argstr="-tr %d", - ), - unwarp_gradient=dict( - argstr="--unwarp_gradient_nonlinearity", - ), - vox_size=dict( - argstr="-voxsize %f %f %f", - ), - zero_ge_z_offset=dict( - argstr="--zero_ge_z_offset", - ), - zero_outlines=dict( - argstr="--zero_outlines", - ), + te=dict(argstr="-te %d"), + template_info=dict(argstr="--template_info"), + template_type=dict(argstr="--template_type %s"), + ti=dict(argstr="-ti %d"), + tr=dict(argstr="-tr %d"), + unwarp_gradient=dict(argstr="--unwarp_gradient_nonlinearity"), + vox_size=dict(argstr="-voxsize %f %f %f"), + zero_ge_z_offset=dict(argstr="--zero_ge_z_offset"), + zero_outlines=dict(argstr="--zero_outlines"), ) inputs = MRIConvert.input_spec() @@ -284,9 +105,7 @@ def test_MRIConvert_inputs(): def test_MRIConvert_outputs(): - output_map = dict( - out_file=dict(), - ) + output_map = dict(out_file=dict()) outputs = MRIConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py index 3d85129f3d..72b2fa7913 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py @@ -4,79 +4,29 @@ def test_MRICoreg_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - brute_force_limit=dict( - argstr="--bf-lim %g", - xor=["no_brute_force"], - ), - brute_force_samples=dict( - argstr="--bf-nsamp %d", - xor=["no_brute_force"], - ), - conform_reference=dict( - argstr="--conf-ref", - ), - dof=dict( - argstr="--dof %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ftol=dict( - argstr="--ftol %e", - ), - initial_rotation=dict( - argstr="--rot %g %g %g", - ), - initial_scale=dict( - argstr="--scale %g %g %g", - ), - initial_shear=dict( - argstr="--shear %g %g %g", - ), - initial_translation=dict( - argstr="--trans %g %g %g", - ), - linmintol=dict( - argstr="--linmintol %e", - ), - max_iters=dict( - argstr="--nitersmax %d", - ), - no_brute_force=dict( - argstr="--no-bf", - ), - no_coord_dithering=dict( - argstr="--no-coord-dither", - ), - no_cras0=dict( - argstr="--no-cras0", - ), - no_intensity_dithering=dict( - argstr="--no-intensity-dither", - ), - no_smooth=dict( - argstr="--no-smooth", - ), - num_threads=dict( - argstr="--threads %d", - ), - out_lta_file=dict( - argstr="--lta %s", - usedefault=True, - ), - out_params_file=dict( - argstr="--params %s", - ), - out_reg_file=dict( - argstr="--regdat %s", - ), - ref_fwhm=dict( - argstr="--ref-fwhm", - ), + args=dict(argstr="%s"), + brute_force_limit=dict(argstr="--bf-lim %g", xor=["no_brute_force"]), + brute_force_samples=dict(argstr="--bf-nsamp %d", xor=["no_brute_force"]), + conform_reference=dict(argstr="--conf-ref"), + dof=dict(argstr="--dof %d"), + environ=dict(nohash=True, usedefault=True), + ftol=dict(argstr="--ftol %e"), + initial_rotation=dict(argstr="--rot %g %g %g"), + initial_scale=dict(argstr="--scale %g %g %g"), + initial_shear=dict(argstr="--shear %g %g %g"), + initial_translation=dict(argstr="--trans %g %g %g"), + linmintol=dict(argstr="--linmintol %e"), + max_iters=dict(argstr="--nitersmax %d"), + no_brute_force=dict(argstr="--no-bf"), + no_coord_dithering=dict(argstr="--no-coord-dither"), + no_cras0=dict(argstr="--no-cras0"), + no_intensity_dithering=dict(argstr="--no-intensity-dither"), + no_smooth=dict(argstr="--no-smooth"), + num_threads=dict(argstr="--threads %d"), + out_lta_file=dict(argstr="--lta %s", usedefault=True), + out_params_file=dict(argstr="--params %s"), + out_reg_file=dict(argstr="--regdat %s"), + ref_fwhm=dict(argstr="--ref-fwhm"), reference_file=dict( argstr="--ref %s", copyfile=False, @@ -84,28 +34,14 @@ def test_MRICoreg_inputs(): mandatory=True, xor=["subject_id"], ), - reference_mask=dict( - argstr="--ref-mask %s", - position=2, - ), - saturation_threshold=dict( - argstr="--sat %g", - ), - sep=dict( - argstr="--sep %s...", - ), + reference_mask=dict(argstr="--ref-mask %s", position=2), + saturation_threshold=dict(argstr="--sat %g"), + sep=dict(argstr="--sep %s..."), source_file=dict( - argstr="--mov %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - source_mask=dict( - argstr="--mov-mask", - ), - source_oob=dict( - argstr="--mov-oob", + argstr="--mov %s", copyfile=False, extensions=None, mandatory=True ), + source_mask=dict(argstr="--mov-mask"), + source_oob=dict(argstr="--mov-oob"), subject_id=dict( argstr="--s %s", mandatory=True, @@ -113,9 +49,7 @@ def test_MRICoreg_inputs(): requires=["subjects_dir"], xor=["reference_file"], ), - subjects_dir=dict( - argstr="--sd %s", - ), + subjects_dir=dict(argstr="--sd %s"), ) inputs = MRICoreg.input_spec() @@ -126,15 +60,9 @@ def test_MRICoreg_inputs(): def test_MRICoreg_outputs(): output_map = dict( - out_lta_file=dict( - extensions=None, - ), - out_params_file=dict( - extensions=None, - ), - out_reg_file=dict( - extensions=None, - ), + out_lta_file=dict(extensions=None), + out_params_file=dict(extensions=None), + out_reg_file=dict(extensions=None), ) outputs = MRICoreg.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py index bf359364ba..c07c843181 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py @@ -4,38 +4,14 @@ def test_MRIFill_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - log_file=dict( - argstr="-a %s", - extensions=None, - ), - out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), - segmentation=dict( - argstr="-segmentation %s", - extensions=None, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + log_file=dict(argstr="-a %s", extensions=None), + out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + segmentation=dict(argstr="-segmentation %s", extensions=None), subjects_dir=dict(), - transform=dict( - argstr="-xform %s", - extensions=None, - ), + transform=dict(argstr="-xform %s", extensions=None), ) inputs = MRIFill.input_spec() @@ -45,14 +21,7 @@ def test_MRIFill_inputs(): def test_MRIFill_outputs(): - output_map = dict( - log_file=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - ) + output_map = dict(log_file=dict(extensions=None), out_file=dict(extensions=None)) outputs = MRIFill.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py index ccb2ab4388..0233931794 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py @@ -4,35 +4,12 @@ def test_MRIMarchingCubes_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - connectivity_value=dict( - argstr="%d", - position=-1, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - label_value=dict( - argstr="%d", - mandatory=True, - position=2, - ), - out_file=dict( - argstr="./%s", - extensions=None, - genfile=True, - position=-2, - ), + args=dict(argstr="%s"), + connectivity_value=dict(argstr="%d", position=-1, usedefault=True), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), + label_value=dict(argstr="%d", mandatory=True, position=2), + out_file=dict(argstr="./%s", extensions=None, genfile=True, position=-2), subjects_dir=dict(), ) inputs = MRIMarchingCubes.input_spec() @@ -43,11 +20,7 @@ def test_MRIMarchingCubes_inputs(): def test_MRIMarchingCubes_outputs(): - output_map = dict( - surface=dict( - extensions=None, - ), - ) + output_map = dict(surface=dict(extensions=None)) outputs = MRIMarchingCubes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py index e6a239fbd5..2ebbbc120d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py @@ -4,37 +4,13 @@ def test_MRIPretess_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_filled=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-4, - ), - in_norm=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - keep=dict( - argstr="-keep", - ), - label=dict( - argstr="%s", - mandatory=True, - position=-3, - usedefault=True, - ), - nocorners=dict( - argstr="-nocorners", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_filled=dict(argstr="%s", extensions=None, mandatory=True, position=-4), + in_norm=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + keep=dict(argstr="-keep"), + label=dict(argstr="%s", mandatory=True, position=-3, usedefault=True), + nocorners=dict(argstr="-nocorners"), out_file=dict( argstr="%s", extensions=None, @@ -44,9 +20,7 @@ def test_MRIPretess_inputs(): position=-1, ), subjects_dir=dict(), - test=dict( - argstr="-test", - ), + test=dict(argstr="-test"), ) inputs = MRIPretess.input_spec() @@ -56,11 +30,7 @@ def test_MRIPretess_inputs(): def test_MRIPretess_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MRIPretess.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py index 845e6c6c3c..d672071076 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py @@ -4,84 +4,43 @@ def test_MRISPreproc_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), fsgd_file=dict( argstr="--fsgd %s", extensions=None, xor=("subjects", "fsgd_file", "subject_file"), ), - fwhm=dict( - argstr="--fwhm %f", - xor=["num_iters"], - ), - fwhm_source=dict( - argstr="--fwhm-src %f", - xor=["num_iters_source"], - ), - hemi=dict( - argstr="--hemi %s", - mandatory=True, - ), - num_iters=dict( - argstr="--niters %d", - xor=["fwhm"], - ), - num_iters_source=dict( - argstr="--niterssrc %d", - xor=["fwhm_source"], - ), - out_file=dict( - argstr="--out %s", - extensions=None, - genfile=True, - ), - proj_frac=dict( - argstr="--projfrac %s", - ), - smooth_cortex_only=dict( - argstr="--smooth-cortex-only", - ), - source_format=dict( - argstr="--srcfmt %s", - ), + fwhm=dict(argstr="--fwhm %f", xor=["num_iters"]), + fwhm_source=dict(argstr="--fwhm-src %f", xor=["num_iters_source"]), + hemi=dict(argstr="--hemi %s", mandatory=True), + num_iters=dict(argstr="--niters %d", xor=["fwhm"]), + num_iters_source=dict(argstr="--niterssrc %d", xor=["fwhm_source"]), + out_file=dict(argstr="--out %s", extensions=None, genfile=True), + proj_frac=dict(argstr="--projfrac %s"), + smooth_cortex_only=dict(argstr="--smooth-cortex-only"), + source_format=dict(argstr="--srcfmt %s"), subject_file=dict( argstr="--f %s", extensions=None, xor=("subjects", "fsgd_file", "subject_file"), ), subjects=dict( - argstr="--s %s...", - xor=("subjects", "fsgd_file", "subject_file"), + argstr="--s %s...", xor=("subjects", "fsgd_file", "subject_file") ), subjects_dir=dict(), surf_area=dict( - argstr="--area %s", - xor=("surf_measure", "surf_measure_file", "surf_area"), - ), - surf_dir=dict( - argstr="--surfdir %s", + argstr="--area %s", xor=("surf_measure", "surf_measure_file", "surf_area") ), + surf_dir=dict(argstr="--surfdir %s"), surf_measure=dict( - argstr="--meas %s", - xor=("surf_measure", "surf_measure_file", "surf_area"), + argstr="--meas %s", xor=("surf_measure", "surf_measure_file", "surf_area") ), surf_measure_file=dict( - argstr="--is %s...", - xor=("surf_measure", "surf_measure_file", "surf_area"), - ), - target=dict( - argstr="--target %s", - mandatory=True, - ), - vol_measure_file=dict( - argstr="--iv %s %s...", + argstr="--is %s...", xor=("surf_measure", "surf_measure_file", "surf_area") ), + target=dict(argstr="--target %s", mandatory=True), + vol_measure_file=dict(argstr="--iv %s %s..."), ) inputs = MRISPreproc.input_spec() @@ -91,11 +50,7 @@ def test_MRISPreproc_inputs(): def test_MRISPreproc_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MRISPreproc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py index 5bdb0614e5..d3778f299d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py @@ -4,61 +4,25 @@ def test_MRISPreprocReconAll_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), copy_inputs=dict(), - environ=dict( - nohash=True, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True), fsgd_file=dict( argstr="--fsgd %s", extensions=None, xor=("subjects", "fsgd_file", "subject_file"), ), - fwhm=dict( - argstr="--fwhm %f", - xor=["num_iters"], - ), - fwhm_source=dict( - argstr="--fwhm-src %f", - xor=["num_iters_source"], - ), - hemi=dict( - argstr="--hemi %s", - mandatory=True, - ), - lh_surfreg_target=dict( - extensions=None, - requires=["surfreg_files"], - ), - num_iters=dict( - argstr="--niters %d", - xor=["fwhm"], - ), - num_iters_source=dict( - argstr="--niterssrc %d", - xor=["fwhm_source"], - ), - out_file=dict( - argstr="--out %s", - extensions=None, - genfile=True, - ), - proj_frac=dict( - argstr="--projfrac %s", - ), - rh_surfreg_target=dict( - extensions=None, - requires=["surfreg_files"], - ), - smooth_cortex_only=dict( - argstr="--smooth-cortex-only", - ), - source_format=dict( - argstr="--srcfmt %s", - ), + fwhm=dict(argstr="--fwhm %f", xor=["num_iters"]), + fwhm_source=dict(argstr="--fwhm-src %f", xor=["num_iters_source"]), + hemi=dict(argstr="--hemi %s", mandatory=True), + lh_surfreg_target=dict(extensions=None, requires=["surfreg_files"]), + num_iters=dict(argstr="--niters %d", xor=["fwhm"]), + num_iters_source=dict(argstr="--niterssrc %d", xor=["fwhm_source"]), + out_file=dict(argstr="--out %s", extensions=None, genfile=True), + proj_frac=dict(argstr="--projfrac %s"), + rh_surfreg_target=dict(extensions=None, requires=["surfreg_files"]), + smooth_cortex_only=dict(argstr="--smooth-cortex-only"), + source_format=dict(argstr="--srcfmt %s"), subject_file=dict( argstr="--f %s", extensions=None, @@ -70,20 +34,15 @@ def test_MRISPreprocReconAll_inputs(): xor=("subjects", "fsgd_file", "subject_file", "subject_id"), ), subjects=dict( - argstr="--s %s...", - xor=("subjects", "fsgd_file", "subject_file"), + argstr="--s %s...", xor=("subjects", "fsgd_file", "subject_file") ), subjects_dir=dict(), surf_area=dict( - argstr="--area %s", - xor=("surf_measure", "surf_measure_file", "surf_area"), - ), - surf_dir=dict( - argstr="--surfdir %s", + argstr="--area %s", xor=("surf_measure", "surf_measure_file", "surf_area") ), + surf_dir=dict(argstr="--surfdir %s"), surf_measure=dict( - argstr="--meas %s", - xor=("surf_measure", "surf_measure_file", "surf_area"), + argstr="--meas %s", xor=("surf_measure", "surf_measure_file", "surf_area") ), surf_measure_file=dict( argstr="--meas %s", @@ -91,16 +50,10 @@ def test_MRISPreprocReconAll_inputs(): xor=("surf_measure", "surf_measure_file", "surf_area"), ), surfreg_files=dict( - argstr="--surfreg %s", - requires=["lh_surfreg_target", "rh_surfreg_target"], - ), - target=dict( - argstr="--target %s", - mandatory=True, - ), - vol_measure_file=dict( - argstr="--iv %s %s...", + argstr="--surfreg %s", requires=["lh_surfreg_target", "rh_surfreg_target"] ), + target=dict(argstr="--target %s", mandatory=True), + vol_measure_file=dict(argstr="--iv %s %s..."), ) inputs = MRISPreprocReconAll.input_spec() @@ -110,11 +63,7 @@ def test_MRISPreprocReconAll_inputs(): def test_MRISPreprocReconAll_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MRISPreprocReconAll.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py index 8aa7210d0e..0101492c42 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py @@ -4,37 +4,14 @@ def test_MRITessellate_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - label_value=dict( - argstr="%d", - mandatory=True, - position=-2, - ), - out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + label_value=dict(argstr="%d", mandatory=True, position=-2), + out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), subjects_dir=dict(), - tesselate_all_voxels=dict( - argstr="-a", - ), - use_real_RAS_coordinates=dict( - argstr="-n", - ), + tesselate_all_voxels=dict(argstr="-a"), + use_real_RAS_coordinates=dict(argstr="-n"), ) inputs = MRITessellate.input_spec() @@ -44,11 +21,7 @@ def test_MRITessellate_inputs(): def test_MRITessellate_outputs(): - output_map = dict( - surface=dict( - extensions=None, - ), - ) + output_map = dict(surface=dict(extensions=None)) outputs = MRITessellate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py index e37cf0723a..906aea741e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py @@ -4,43 +4,15 @@ def test_MRIsCALabel_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - aseg=dict( - argstr="-aseg %s", - extensions=None, - ), - canonsurf=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - classifier=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s"), + aseg=dict(argstr="-aseg %s", extensions=None), + canonsurf=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + classifier=dict(argstr="%s", extensions=None, mandatory=True, position=-2), copy_inputs=dict(), - curv=dict( - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hemisphere=dict( - argstr="%s", - mandatory=True, - position=-4, - ), - label=dict( - argstr="-l %s", - extensions=None, - ), + curv=dict(extensions=None, mandatory=True), + environ=dict(nohash=True, usedefault=True), + hemisphere=dict(argstr="%s", mandatory=True, position=-4), + label=dict(argstr="-l %s", extensions=None), num_threads=dict(), out_file=dict( argstr="%s", @@ -51,24 +23,11 @@ def test_MRIsCALabel_inputs(): name_template="%s.aparc.annot", position=-1, ), - seed=dict( - argstr="-seed %d", - ), - smoothwm=dict( - extensions=None, - mandatory=True, - ), - subject_id=dict( - argstr="%s", - mandatory=True, - position=-5, - usedefault=True, - ), + seed=dict(argstr="-seed %d"), + smoothwm=dict(extensions=None, mandatory=True), + subject_id=dict(argstr="%s", mandatory=True, position=-5, usedefault=True), subjects_dir=dict(), - sulc=dict( - extensions=None, - mandatory=True, - ), + sulc=dict(extensions=None, mandatory=True), ) inputs = MRIsCALabel.input_spec() @@ -78,11 +37,7 @@ def test_MRIsCALabel_inputs(): def test_MRIsCALabel_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MRIsCALabel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py index 1ef9c95c46..6d0d91d620 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py @@ -4,45 +4,16 @@ def test_MRIsCalc_inputs(): input_map = dict( - action=dict( - argstr="%s", - mandatory=True, - position=-2, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file1=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), + action=dict(argstr="%s", mandatory=True, position=-2), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file1=dict(argstr="%s", extensions=None, mandatory=True, position=-3), in_file2=dict( - argstr="%s", - extensions=None, - position=-1, - xor=["in_float", "in_int"], - ), - in_float=dict( - argstr="%f", - position=-1, - xor=["in_file2", "in_int"], - ), - in_int=dict( - argstr="%d", - position=-1, - xor=["in_file2", "in_float"], - ), - out_file=dict( - argstr="-o %s", - extensions=None, - mandatory=True, + argstr="%s", extensions=None, position=-1, xor=["in_float", "in_int"] ), + in_float=dict(argstr="%f", position=-1, xor=["in_file2", "in_int"]), + in_int=dict(argstr="%d", position=-1, xor=["in_file2", "in_float"]), + out_file=dict(argstr="-o %s", extensions=None, mandatory=True), subjects_dir=dict(), ) inputs = MRIsCalc.input_spec() @@ -53,11 +24,7 @@ def test_MRIsCalc_inputs(): def test_MRIsCalc_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MRIsCalc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py index 01aef41a01..3b901a1a2f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py @@ -4,24 +4,11 @@ def test_MRIsCombine_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr="--combinesurfs %s", - mandatory=True, - position=1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_files=dict(argstr="--combinesurfs %s", mandatory=True, position=1), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - mandatory=True, - position=-1, + argstr="%s", extensions=None, genfile=True, mandatory=True, position=-1 ), subjects_dir=dict(), ) @@ -33,11 +20,7 @@ def test_MRIsCombine_inputs(): def test_MRIsCombine_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MRIsCombine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py index daf4462ff8..98c45ae030 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py @@ -4,48 +4,17 @@ def test_MRIsConvert_inputs(): input_map = dict( - annot_file=dict( - argstr="--annot %s", - extensions=None, - ), - args=dict( - argstr="%s", - ), - dataarray_num=dict( - argstr="--da_num %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - functional_file=dict( - argstr="-f %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - label_file=dict( - argstr="--label %s", - extensions=None, - ), - labelstats_outfile=dict( - argstr="--labelstats %s", - extensions=None, - ), - normal=dict( - argstr="-n", - ), - origname=dict( - argstr="-o %s", - ), - out_datatype=dict( - mandatory=True, - xor=["out_file"], - ), + annot_file=dict(argstr="--annot %s", extensions=None), + args=dict(argstr="%s"), + dataarray_num=dict(argstr="--da_num %d"), + environ=dict(nohash=True, usedefault=True), + functional_file=dict(argstr="-f %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + label_file=dict(argstr="--label %s", extensions=None), + labelstats_outfile=dict(argstr="--labelstats %s", extensions=None), + normal=dict(argstr="-n"), + origname=dict(argstr="-o %s"), + out_datatype=dict(mandatory=True, xor=["out_file"]), out_file=dict( argstr="%s", extensions=None, @@ -54,39 +23,17 @@ def test_MRIsConvert_inputs(): position=-1, xor=["out_datatype"], ), - parcstats_file=dict( - argstr="--parcstats %s", - extensions=None, - ), - patch=dict( - argstr="-p", - ), - rescale=dict( - argstr="-r", - ), - scalarcurv_file=dict( - argstr="-c %s", - extensions=None, - ), - scale=dict( - argstr="-s %.3f", - ), + parcstats_file=dict(argstr="--parcstats %s", extensions=None), + patch=dict(argstr="-p"), + rescale=dict(argstr="-r"), + scalarcurv_file=dict(argstr="-c %s", extensions=None), + scale=dict(argstr="-s %.3f"), subjects_dir=dict(), - talairachxfm_subjid=dict( - argstr="-t %s", - ), - to_scanner=dict( - argstr="--to-scanner", - ), - to_tkr=dict( - argstr="--to-tkr", - ), - vertex=dict( - argstr="-v", - ), - xyz_ascii=dict( - argstr="-a", - ), + talairachxfm_subjid=dict(argstr="-t %s"), + to_scanner=dict(argstr="--to-scanner"), + to_tkr=dict(argstr="--to-tkr"), + vertex=dict(argstr="-v"), + xyz_ascii=dict(argstr="-a"), ) inputs = MRIsConvert.input_spec() @@ -96,11 +43,7 @@ def test_MRIsConvert_inputs(): def test_MRIsConvert_outputs(): - output_map = dict( - converted=dict( - extensions=None, - ), - ) + output_map = dict(converted=dict(extensions=None)) outputs = MRIsConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py index 05e34a29b5..fff96b681f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py @@ -4,61 +4,23 @@ def test_MRIsExpand_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - distance=dict( - argstr="%g", - mandatory=True, - position=-2, - ), - dt=dict( - argstr="-T %g", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + distance=dict(argstr="%g", mandatory=True, position=-2), + dt=dict(argstr="-T %g"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=-3, - ), - nsurfaces=dict( - argstr="-N %d", - ), - out_name=dict( - argstr="%s", - position=-1, - usedefault=True, - ), - pial=dict( - argstr="-pial %s", - copyfile=False, - ), - smooth_averages=dict( - argstr="-A %d", - ), - sphere=dict( - copyfile=False, - usedefault=True, - ), - spring=dict( - argstr="-S %g", - ), + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-3 + ), + nsurfaces=dict(argstr="-N %d"), + out_name=dict(argstr="%s", position=-1, usedefault=True), + pial=dict(argstr="-pial %s", copyfile=False), + smooth_averages=dict(argstr="-A %d"), + sphere=dict(copyfile=False, usedefault=True), + spring=dict(argstr="-S %g"), subjects_dir=dict(), - thickness=dict( - argstr="-thickness", - ), - thickness_name=dict( - argstr="-thickness_name %s", - copyfile=False, - ), - write_iterations=dict( - argstr="-W %d", - ), + thickness=dict(argstr="-thickness"), + thickness_name=dict(argstr="-thickness_name %s", copyfile=False), + write_iterations=dict(argstr="-W %d"), ) inputs = MRIsExpand.input_spec() @@ -68,11 +30,7 @@ def test_MRIsExpand_inputs(): def test_MRIsExpand_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MRIsExpand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py index 9cc45189a0..2ef9a9043f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py @@ -4,24 +4,12 @@ def test_MRIsInflate_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=True, - extensions=None, - mandatory=True, - position=-2, - ), - no_save_sulc=dict( - argstr="-no-save-sulc", - xor=["out_sulc"], + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2 ), + no_save_sulc=dict(argstr="-no-save-sulc", xor=["out_sulc"]), out_file=dict( argstr="%s", extensions=None, @@ -31,10 +19,7 @@ def test_MRIsInflate_inputs(): name_template="%s.inflated", position=-1, ), - out_sulc=dict( - extensions=None, - xor=["no_save_sulc"], - ), + out_sulc=dict(extensions=None, xor=["no_save_sulc"]), subjects_dir=dict(), ) inputs = MRIsInflate.input_spec() @@ -45,14 +30,7 @@ def test_MRIsInflate_inputs(): def test_MRIsInflate_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - out_sulc=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None), out_sulc=dict(extensions=None)) outputs = MRIsInflate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py index 093dd3d9b8..fa6b161543 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py @@ -4,52 +4,18 @@ def test_MS_LDA_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - conform=dict( - argstr="-conform", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - images=dict( - argstr="%s", - copyfile=False, - mandatory=True, - position=-1, - ), - label_file=dict( - argstr="-label %s", - extensions=None, - ), - lda_labels=dict( - argstr="-lda %s", - mandatory=True, - sep=" ", - ), - mask_file=dict( - argstr="-mask %s", - extensions=None, - ), - shift=dict( - argstr="-shift %d", - ), + args=dict(argstr="%s"), + conform=dict(argstr="-conform"), + environ=dict(nohash=True, usedefault=True), + images=dict(argstr="%s", copyfile=False, mandatory=True, position=-1), + label_file=dict(argstr="-label %s", extensions=None), + lda_labels=dict(argstr="-lda %s", mandatory=True, sep=" "), + mask_file=dict(argstr="-mask %s", extensions=None), + shift=dict(argstr="-shift %d"), subjects_dir=dict(), - use_weights=dict( - argstr="-W", - ), - vol_synth_file=dict( - argstr="-synth %s", - extensions=None, - mandatory=True, - ), - weight_file=dict( - argstr="-weight %s", - extensions=None, - mandatory=True, - ), + use_weights=dict(argstr="-W"), + vol_synth_file=dict(argstr="-synth %s", extensions=None, mandatory=True), + weight_file=dict(argstr="-weight %s", extensions=None, mandatory=True), ) inputs = MS_LDA.input_spec() @@ -60,12 +26,7 @@ def test_MS_LDA_inputs(): def test_MS_LDA_outputs(): output_map = dict( - vol_synth_file=dict( - extensions=None, - ), - weight_file=dict( - extensions=None, - ), + vol_synth_file=dict(extensions=None), weight_file=dict(extensions=None) ) outputs = MS_LDA.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py index e3778911e6..bf21f14842 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py @@ -4,24 +4,11 @@ def test_MakeAverageSubject_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - out_name=dict( - argstr="--out %s", - extensions=None, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + out_name=dict(argstr="--out %s", extensions=None, usedefault=True), subjects_dir=dict(), - subjects_ids=dict( - argstr="--subjects %s", - mandatory=True, - sep=" ", - ), + subjects_ids=dict(argstr="--subjects %s", mandatory=True, sep=" "), ) inputs = MakeAverageSubject.input_spec() @@ -31,9 +18,7 @@ def test_MakeAverageSubject_inputs(): def test_MakeAverageSubject_outputs(): - output_map = dict( - average_subject_name=dict(), - ) + output_map = dict(average_subject_name=dict()) outputs = MakeAverageSubject.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py index 06316d071a..8926586954 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py @@ -4,88 +4,29 @@ def test_MakeSurfaces_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), copy_inputs=dict(), - environ=dict( - nohash=True, - usedefault=True, - ), - fix_mtl=dict( - argstr="-fix_mtl", - ), - hemisphere=dict( - argstr="%s", - mandatory=True, - position=-1, - ), - in_T1=dict( - argstr="-T1 %s", - extensions=None, - ), - in_aseg=dict( - argstr="-aseg %s", - extensions=None, - ), - in_filled=dict( - extensions=None, - mandatory=True, - ), - in_label=dict( - extensions=None, - xor=["noaparc"], - ), - in_orig=dict( - argstr="-orig %s", - extensions=None, - mandatory=True, - ), - in_white=dict( - extensions=None, - ), - in_wm=dict( - extensions=None, - mandatory=True, - ), - longitudinal=dict( - argstr="-long", - ), - maximum=dict( - argstr="-max %.1f", - ), - mgz=dict( - argstr="-mgz", - ), - no_white=dict( - argstr="-nowhite", - ), - noaparc=dict( - argstr="-noaparc", - xor=["in_label"], - ), - orig_pial=dict( - argstr="-orig_pial %s", - extensions=None, - requires=["in_label"], - ), - orig_white=dict( - argstr="-orig_white %s", - extensions=None, - ), - subject_id=dict( - argstr="%s", - mandatory=True, - position=-2, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True), + fix_mtl=dict(argstr="-fix_mtl"), + hemisphere=dict(argstr="%s", mandatory=True, position=-1), + in_T1=dict(argstr="-T1 %s", extensions=None), + in_aseg=dict(argstr="-aseg %s", extensions=None), + in_filled=dict(extensions=None, mandatory=True), + in_label=dict(extensions=None, xor=["noaparc"]), + in_orig=dict(argstr="-orig %s", extensions=None, mandatory=True), + in_white=dict(extensions=None), + in_wm=dict(extensions=None, mandatory=True), + longitudinal=dict(argstr="-long"), + maximum=dict(argstr="-max %.1f"), + mgz=dict(argstr="-mgz"), + no_white=dict(argstr="-nowhite"), + noaparc=dict(argstr="-noaparc", xor=["in_label"]), + orig_pial=dict(argstr="-orig_pial %s", extensions=None, requires=["in_label"]), + orig_white=dict(argstr="-orig_white %s", extensions=None), + subject_id=dict(argstr="%s", mandatory=True, position=-2, usedefault=True), subjects_dir=dict(), - white=dict( - argstr="-white %s", - ), - white_only=dict( - argstr="-whiteonly", - ), + white=dict(argstr="-white %s"), + white_only=dict(argstr="-whiteonly"), ) inputs = MakeSurfaces.input_spec() @@ -96,24 +37,12 @@ def test_MakeSurfaces_inputs(): def test_MakeSurfaces_outputs(): output_map = dict( - out_area=dict( - extensions=None, - ), - out_cortex=dict( - extensions=None, - ), - out_curv=dict( - extensions=None, - ), - out_pial=dict( - extensions=None, - ), - out_thickness=dict( - extensions=None, - ), - out_white=dict( - extensions=None, - ), + out_area=dict(extensions=None), + out_cortex=dict(extensions=None), + out_curv=dict(extensions=None), + out_pial=dict(extensions=None), + out_thickness=dict(extensions=None), + out_white=dict(extensions=None), ) outputs = MakeSurfaces.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py index 271f0bb328..a50beab155 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py @@ -4,26 +4,11 @@ def test_Normalize_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gradient=dict( - argstr="-g %d", - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + gradient=dict(argstr="-g %d"), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + mask=dict(argstr="-mask %s", extensions=None), out_file=dict( argstr="%s", extensions=None, @@ -33,14 +18,9 @@ def test_Normalize_inputs(): name_template="%s_norm", position=-1, ), - segmentation=dict( - argstr="-aseg %s", - extensions=None, - ), + segmentation=dict(argstr="-aseg %s", extensions=None), subjects_dir=dict(), - transform=dict( - extensions=None, - ), + transform=dict(extensions=None), ) inputs = Normalize.input_spec() @@ -50,11 +30,7 @@ def test_Normalize_inputs(): def test_Normalize_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Normalize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py index 533c0a17a9..b27728f6c0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py @@ -4,192 +4,68 @@ def test_OneSampleTTest_inputs(): input_map = dict( - allow_ill_cond=dict( - argstr="--illcond", - ), - allow_repeated_subjects=dict( - argstr="--allowsubjrep", - ), - args=dict( - argstr="%s", - ), - calc_AR1=dict( - argstr="--tar1", - ), - check_opts=dict( - argstr="--checkopts", - ), - compute_log_y=dict( - argstr="--logy", - ), - contrast=dict( - argstr="--C %s...", - ), - cortex=dict( - argstr="--cortex", - xor=["label_file"], - ), - debug=dict( - argstr="--debug", - ), + allow_ill_cond=dict(argstr="--illcond"), + allow_repeated_subjects=dict(argstr="--allowsubjrep"), + args=dict(argstr="%s"), + calc_AR1=dict(argstr="--tar1"), + check_opts=dict(argstr="--checkopts"), + compute_log_y=dict(argstr="--logy"), + contrast=dict(argstr="--C %s..."), + cortex=dict(argstr="--cortex", xor=["label_file"]), + debug=dict(argstr="--debug"), design=dict( - argstr="--X %s", - extensions=None, - xor=("fsgd", "design", "one_sample"), - ), - diag=dict( - argstr="--diag %d", - ), - diag_cluster=dict( - argstr="--diag-cluster", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_fx_dof=dict( - argstr="--ffxdof %d", - xor=["fixed_fx_dof_file"], + argstr="--X %s", extensions=None, xor=("fsgd", "design", "one_sample") ), + diag=dict(argstr="--diag %d"), + diag_cluster=dict(argstr="--diag-cluster"), + environ=dict(nohash=True, usedefault=True), + fixed_fx_dof=dict(argstr="--ffxdof %d", xor=["fixed_fx_dof_file"]), fixed_fx_dof_file=dict( - argstr="--ffxdofdat %d", - extensions=None, - xor=["fixed_fx_dof"], - ), - fixed_fx_var=dict( - argstr="--yffxvar %s", - extensions=None, - ), - force_perm=dict( - argstr="--perm-force", - ), - fsgd=dict( - argstr="--fsgd %s %s", - xor=("fsgd", "design", "one_sample"), - ), - fwhm=dict( - argstr="--fwhm %f", - ), - glm_dir=dict( - argstr="--glmdir %s", - genfile=True, + argstr="--ffxdofdat %d", extensions=None, xor=["fixed_fx_dof"] ), + fixed_fx_var=dict(argstr="--yffxvar %s", extensions=None), + force_perm=dict(argstr="--perm-force"), + fsgd=dict(argstr="--fsgd %s %s", xor=("fsgd", "design", "one_sample")), + fwhm=dict(argstr="--fwhm %f"), + glm_dir=dict(argstr="--glmdir %s", genfile=True), hemi=dict(), - in_file=dict( - argstr="--y %s", - copyfile=False, - extensions=None, - mandatory=True, - ), - invert_mask=dict( - argstr="--mask-inv", - ), - label_file=dict( - argstr="--label %s", - extensions=None, - xor=["cortex"], - ), - mask_file=dict( - argstr="--mask %s", - extensions=None, - ), - no_contrast_ok=dict( - argstr="--no-contrasts-ok", - ), - no_est_fwhm=dict( - argstr="--no-est-fwhm", - ), - no_mask_smooth=dict( - argstr="--no-mask-smooth", - ), - no_prune=dict( - argstr="--no-prune", - xor=["prunethresh"], - ), + in_file=dict(argstr="--y %s", copyfile=False, extensions=None, mandatory=True), + invert_mask=dict(argstr="--mask-inv"), + label_file=dict(argstr="--label %s", extensions=None, xor=["cortex"]), + mask_file=dict(argstr="--mask %s", extensions=None), + no_contrast_ok=dict(argstr="--no-contrasts-ok"), + no_est_fwhm=dict(argstr="--no-est-fwhm"), + no_mask_smooth=dict(argstr="--no-mask-smooth"), + no_prune=dict(argstr="--no-prune", xor=["prunethresh"]), one_sample=dict( - argstr="--osgm", - xor=("one_sample", "fsgd", "design", "contrast"), - ), - pca=dict( - argstr="--pca", - ), - per_voxel_reg=dict( - argstr="--pvr %s...", - ), - profile=dict( - argstr="--profile %d", - ), - prune=dict( - argstr="--prune", - ), - prune_thresh=dict( - argstr="--prune_thr %f", - xor=["noprune"], - ), - resynth_test=dict( - argstr="--resynthtest %d", - ), - save_cond=dict( - argstr="--save-cond", - ), - save_estimate=dict( - argstr="--yhat-save", - ), - save_res_corr_mtx=dict( - argstr="--eres-scm", - ), - save_residual=dict( - argstr="--eres-save", - ), - seed=dict( - argstr="--seed %d", - ), - self_reg=dict( - argstr="--selfreg %d %d %d", - ), - sim_done_file=dict( - argstr="--sim-done %s", - extensions=None, - ), - sim_sign=dict( - argstr="--sim-sign %s", - ), - simulation=dict( - argstr="--sim %s %d %f %s", - ), + argstr="--osgm", xor=("one_sample", "fsgd", "design", "contrast") + ), + pca=dict(argstr="--pca"), + per_voxel_reg=dict(argstr="--pvr %s..."), + profile=dict(argstr="--profile %d"), + prune=dict(argstr="--prune"), + prune_thresh=dict(argstr="--prune_thr %f", xor=["noprune"]), + resynth_test=dict(argstr="--resynthtest %d"), + save_cond=dict(argstr="--save-cond"), + save_estimate=dict(argstr="--yhat-save"), + save_res_corr_mtx=dict(argstr="--eres-scm"), + save_residual=dict(argstr="--eres-save"), + seed=dict(argstr="--seed %d"), + self_reg=dict(argstr="--selfreg %d %d %d"), + sim_done_file=dict(argstr="--sim-done %s", extensions=None), + sim_sign=dict(argstr="--sim-sign %s"), + simulation=dict(argstr="--sim %s %d %f %s"), subject_id=dict(), subjects_dir=dict(), - surf=dict( - argstr="--surf %s %s %s", - requires=["subject_id", "hemi"], - ), - surf_geo=dict( - usedefault=True, - ), - synth=dict( - argstr="--synth", - ), - uniform=dict( - argstr="--uniform %f %f", - ), - var_fwhm=dict( - argstr="--var-fwhm %f", - ), - vox_dump=dict( - argstr="--voxdump %d %d %d", - ), - weight_file=dict( - extensions=None, - xor=["weighted_ls"], - ), - weight_inv=dict( - argstr="--w-inv", - xor=["weighted_ls"], - ), - weight_sqrt=dict( - argstr="--w-sqrt", - xor=["weighted_ls"], - ), + surf=dict(argstr="--surf %s %s %s", requires=["subject_id", "hemi"]), + surf_geo=dict(usedefault=True), + synth=dict(argstr="--synth"), + uniform=dict(argstr="--uniform %f %f"), + var_fwhm=dict(argstr="--var-fwhm %f"), + vox_dump=dict(argstr="--voxdump %d %d %d"), + weight_file=dict(extensions=None, xor=["weighted_ls"]), + weight_inv=dict(argstr="--w-inv", xor=["weighted_ls"]), + weight_sqrt=dict(argstr="--w-sqrt", xor=["weighted_ls"]), weighted_ls=dict( argstr="--wls %s", extensions=None, @@ -205,47 +81,23 @@ def test_OneSampleTTest_inputs(): def test_OneSampleTTest_outputs(): output_map = dict( - beta_file=dict( - extensions=None, - ), - dof_file=dict( - extensions=None, - ), - error_file=dict( - extensions=None, - ), - error_stddev_file=dict( - extensions=None, - ), - error_var_file=dict( - extensions=None, - ), - estimate_file=dict( - extensions=None, - ), - frame_eigenvectors=dict( - extensions=None, - ), + beta_file=dict(extensions=None), + dof_file=dict(extensions=None), + error_file=dict(extensions=None), + error_stddev_file=dict(extensions=None), + error_var_file=dict(extensions=None), + estimate_file=dict(extensions=None), + frame_eigenvectors=dict(extensions=None), ftest_file=dict(), - fwhm_file=dict( - extensions=None, - ), + fwhm_file=dict(extensions=None), gamma_file=dict(), gamma_var_file=dict(), glm_dir=dict(), - mask_file=dict( - extensions=None, - ), + mask_file=dict(extensions=None), sig_file=dict(), - singular_values=dict( - extensions=None, - ), - spatial_eigenvectors=dict( - extensions=None, - ), - svd_stats_file=dict( - extensions=None, - ), + singular_values=dict(extensions=None), + spatial_eigenvectors=dict(extensions=None), + svd_stats_file=dict(extensions=None), ) outputs = OneSampleTTest.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Paint.py b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py index d95c4c9fa3..24463dad25 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Paint.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py @@ -4,22 +4,10 @@ def test_Paint_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - averages=dict( - argstr="-a %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_surf=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s"), + averages=dict(argstr="-a %d"), + environ=dict(nohash=True, usedefault=True), + in_surf=dict(argstr="%s", extensions=None, mandatory=True, position=-2), out_file=dict( argstr="%s", extensions=None, @@ -30,12 +18,7 @@ def test_Paint_inputs(): position=-1, ), subjects_dir=dict(), - template=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), + template=dict(argstr="%s", extensions=None, mandatory=True, position=-3), template_param=dict(), ) inputs = Paint.input_spec() @@ -46,11 +29,7 @@ def test_Paint_inputs(): def test_Paint_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Paint.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py index e1632020b5..74929dfd51 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py @@ -4,109 +4,36 @@ def test_ParcellationStats_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - aseg=dict( - extensions=None, - mandatory=True, - ), - brainmask=dict( - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + aseg=dict(extensions=None, mandatory=True), + brainmask=dict(extensions=None, mandatory=True), copy_inputs=dict(), - cortex_label=dict( - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hemisphere=dict( - argstr="%s", - mandatory=True, - position=-2, - ), - in_annotation=dict( - argstr="-a %s", - extensions=None, - xor=["in_label"], - ), - in_cortex=dict( - argstr="-cortex %s", - extensions=None, - ), + cortex_label=dict(extensions=None), + environ=dict(nohash=True, usedefault=True), + hemisphere=dict(argstr="%s", mandatory=True, position=-2), + in_annotation=dict(argstr="-a %s", extensions=None, xor=["in_label"]), + in_cortex=dict(argstr="-cortex %s", extensions=None), in_label=dict( - argstr="-l %s", - extensions=None, - xor=["in_annotatoin", "out_color"], - ), - lh_pial=dict( - extensions=None, - mandatory=True, - ), - lh_white=dict( - extensions=None, - mandatory=True, - ), - mgz=dict( - argstr="-mgz", - ), - out_color=dict( - argstr="-c %s", - extensions=None, - genfile=True, - xor=["in_label"], + argstr="-l %s", extensions=None, xor=["in_annotatoin", "out_color"] ), + lh_pial=dict(extensions=None, mandatory=True), + lh_white=dict(extensions=None, mandatory=True), + mgz=dict(argstr="-mgz"), + out_color=dict(argstr="-c %s", extensions=None, genfile=True, xor=["in_label"]), out_table=dict( - argstr="-f %s", - extensions=None, - genfile=True, - requires=["tabular_output"], - ), - rh_pial=dict( - extensions=None, - mandatory=True, - ), - rh_white=dict( - extensions=None, - mandatory=True, - ), - ribbon=dict( - extensions=None, - mandatory=True, - ), - subject_id=dict( - argstr="%s", - mandatory=True, - position=-3, - usedefault=True, + argstr="-f %s", extensions=None, genfile=True, requires=["tabular_output"] ), + rh_pial=dict(extensions=None, mandatory=True), + rh_white=dict(extensions=None, mandatory=True), + ribbon=dict(extensions=None, mandatory=True), + subject_id=dict(argstr="%s", mandatory=True, position=-3, usedefault=True), subjects_dir=dict(), - surface=dict( - argstr="%s", - position=-1, - ), - tabular_output=dict( - argstr="-b", - ), - th3=dict( - argstr="-th3", - requires=["cortex_label"], - ), - thickness=dict( - extensions=None, - mandatory=True, - ), - transform=dict( - extensions=None, - mandatory=True, - ), - wm=dict( - extensions=None, - mandatory=True, - ), + surface=dict(argstr="%s", position=-1), + tabular_output=dict(argstr="-b"), + th3=dict(argstr="-th3", requires=["cortex_label"]), + thickness=dict(extensions=None, mandatory=True), + transform=dict(extensions=None, mandatory=True), + wm=dict(extensions=None, mandatory=True), ) inputs = ParcellationStats.input_spec() @@ -116,14 +43,7 @@ def test_ParcellationStats_inputs(): def test_ParcellationStats_outputs(): - output_map = dict( - out_color=dict( - extensions=None, - ), - out_table=dict( - extensions=None, - ), - ) + output_map = dict(out_color=dict(extensions=None), out_table=dict(extensions=None)) outputs = ParcellationStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py index 3168ac64ec..0f3f4ef2be 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py @@ -4,29 +4,13 @@ def test_ParseDICOMDir_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dicom_dir=dict( - argstr="--d %s", - mandatory=True, - ), - dicom_info_file=dict( - argstr="--o %s", - extensions=None, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - sortbyrun=dict( - argstr="--sortbyrun", - ), + args=dict(argstr="%s"), + dicom_dir=dict(argstr="--d %s", mandatory=True), + dicom_info_file=dict(argstr="--o %s", extensions=None, usedefault=True), + environ=dict(nohash=True, usedefault=True), + sortbyrun=dict(argstr="--sortbyrun"), subjects_dir=dict(), - summarize=dict( - argstr="--summarize", - ), + summarize=dict(argstr="--summarize"), ) inputs = ParseDICOMDir.input_spec() @@ -36,11 +20,7 @@ def test_ParseDICOMDir_inputs(): def test_ParseDICOMDir_outputs(): - output_map = dict( - dicom_info_file=dict( - extensions=None, - ), - ) + output_map = dict(dicom_info_file=dict(extensions=None)) outputs = ParseDICOMDir.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py index aa270f30b3..d5cbe65c7f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py @@ -4,168 +4,58 @@ def test_ReconAll_inputs(): input_map = dict( - FLAIR_file=dict( - argstr="-FLAIR %s", - extensions=None, - min_ver="5.3.0", - ), - T1_files=dict( - argstr="-i %s...", - ), - T2_file=dict( - argstr="-T2 %s", - extensions=None, - min_ver="5.3.0", - ), - args=dict( - argstr="%s", - ), - big_ventricles=dict( - argstr="-bigventricles", - ), - brainstem=dict( - argstr="-brainstem-structures", - ), - directive=dict( - argstr="-%s", - position=0, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - expert=dict( - argstr="-expert %s", - extensions=None, - ), - flags=dict( - argstr="%s", - ), - hemi=dict( - argstr="-hemi %s", - ), + FLAIR_file=dict(argstr="-FLAIR %s", extensions=None, min_ver="5.3.0"), + T1_files=dict(argstr="-i %s..."), + T2_file=dict(argstr="-T2 %s", extensions=None, min_ver="5.3.0"), + args=dict(argstr="%s"), + big_ventricles=dict(argstr="-bigventricles"), + brainstem=dict(argstr="-brainstem-structures"), + directive=dict(argstr="-%s", position=0, usedefault=True), + environ=dict(nohash=True, usedefault=True), + expert=dict(argstr="-expert %s", extensions=None), + flags=dict(argstr="%s"), + hemi=dict(argstr="-hemi %s"), hippocampal_subfields_T1=dict( - argstr="-hippocampal-subfields-T1", - min_ver="6.0.0", + argstr="-hippocampal-subfields-T1", min_ver="6.0.0" ), hippocampal_subfields_T2=dict( - argstr="-hippocampal-subfields-T2 %s %s", - min_ver="6.0.0", - ), - hires=dict( - argstr="-hires", - min_ver="6.0.0", - ), - mprage=dict( - argstr="-mprage", - ), - mri_aparc2aseg=dict( - xor=["expert"], - ), - mri_ca_label=dict( - xor=["expert"], - ), - mri_ca_normalize=dict( - xor=["expert"], - ), - mri_ca_register=dict( - xor=["expert"], - ), - mri_edit_wm_with_aseg=dict( - xor=["expert"], - ), - mri_em_register=dict( - xor=["expert"], - ), - mri_fill=dict( - xor=["expert"], - ), - mri_mask=dict( - xor=["expert"], - ), - mri_normalize=dict( - xor=["expert"], - ), - mri_pretess=dict( - xor=["expert"], - ), - mri_remove_neck=dict( - xor=["expert"], - ), - mri_segment=dict( - xor=["expert"], - ), - mri_segstats=dict( - xor=["expert"], - ), - mri_tessellate=dict( - xor=["expert"], - ), - mri_watershed=dict( - xor=["expert"], - ), - mris_anatomical_stats=dict( - xor=["expert"], - ), - mris_ca_label=dict( - xor=["expert"], - ), - mris_fix_topology=dict( - xor=["expert"], - ), - mris_inflate=dict( - xor=["expert"], - ), - mris_make_surfaces=dict( - xor=["expert"], - ), - mris_register=dict( - xor=["expert"], - ), - mris_smooth=dict( - xor=["expert"], - ), - mris_sphere=dict( - xor=["expert"], - ), - mris_surf2vol=dict( - xor=["expert"], - ), - mrisp_paint=dict( - xor=["expert"], - ), - openmp=dict( - argstr="-openmp %d", - ), - parallel=dict( - argstr="-parallel", - ), - subject_id=dict( - argstr="-subjid %s", - usedefault=True, - ), - subjects_dir=dict( - argstr="-sd %s", - genfile=True, - hash_files=False, - ), - talairach=dict( - xor=["expert"], - ), - use_FLAIR=dict( - argstr="-FLAIRpial", - min_ver="5.3.0", - xor=["use_T2"], - ), - use_T2=dict( - argstr="-T2pial", - min_ver="5.3.0", - xor=["use_FLAIR"], - ), - xopts=dict( - argstr="-xopts-%s", - ), + argstr="-hippocampal-subfields-T2 %s %s", min_ver="6.0.0" + ), + hires=dict(argstr="-hires", min_ver="6.0.0"), + mprage=dict(argstr="-mprage"), + mri_aparc2aseg=dict(xor=["expert"]), + mri_ca_label=dict(xor=["expert"]), + mri_ca_normalize=dict(xor=["expert"]), + mri_ca_register=dict(xor=["expert"]), + mri_edit_wm_with_aseg=dict(xor=["expert"]), + mri_em_register=dict(xor=["expert"]), + mri_fill=dict(xor=["expert"]), + mri_mask=dict(xor=["expert"]), + mri_normalize=dict(xor=["expert"]), + mri_pretess=dict(xor=["expert"]), + mri_remove_neck=dict(xor=["expert"]), + mri_segment=dict(xor=["expert"]), + mri_segstats=dict(xor=["expert"]), + mri_tessellate=dict(xor=["expert"]), + mri_watershed=dict(xor=["expert"]), + mris_anatomical_stats=dict(xor=["expert"]), + mris_ca_label=dict(xor=["expert"]), + mris_fix_topology=dict(xor=["expert"]), + mris_inflate=dict(xor=["expert"]), + mris_make_surfaces=dict(xor=["expert"]), + mris_register=dict(xor=["expert"]), + mris_smooth=dict(xor=["expert"]), + mris_sphere=dict(xor=["expert"]), + mris_surf2vol=dict(xor=["expert"]), + mrisp_paint=dict(xor=["expert"]), + openmp=dict(argstr="-openmp %d"), + parallel=dict(argstr="-parallel"), + subject_id=dict(argstr="-subjid %s", usedefault=True), + subjects_dir=dict(argstr="-sd %s", genfile=True, hash_files=False), + talairach=dict(xor=["expert"]), + use_FLAIR=dict(argstr="-FLAIRpial", min_ver="5.3.0", xor=["use_T2"]), + use_T2=dict(argstr="-T2pial", min_ver="5.3.0", xor=["use_FLAIR"]), + xopts=dict(argstr="-xopts-%s"), ) inputs = ReconAll.input_spec() @@ -176,145 +66,45 @@ def test_ReconAll_inputs(): def test_ReconAll_outputs(): output_map = dict( - BA_stats=dict( - altkey="BA", - loc="stats", - ), - T1=dict( - extensions=None, - loc="mri", - ), - annot=dict( - altkey="*annot", - loc="label", - ), - aparc_a2009s_stats=dict( - altkey="aparc.a2009s", - loc="stats", - ), - aparc_aseg=dict( - altkey="aparc*aseg", - loc="mri", - ), - aparc_stats=dict( - altkey="aparc", - loc="stats", - ), - area_pial=dict( - altkey="area.pial", - loc="surf", - ), - aseg=dict( - extensions=None, - loc="mri", - ), - aseg_stats=dict( - altkey="aseg", - loc="stats", - ), - avg_curv=dict( - loc="surf", - ), - brain=dict( - extensions=None, - loc="mri", - ), - brainmask=dict( - extensions=None, - loc="mri", - ), - curv=dict( - loc="surf", - ), - curv_pial=dict( - altkey="curv.pial", - loc="surf", - ), - curv_stats=dict( - altkey="curv", - loc="stats", - ), - entorhinal_exvivo_stats=dict( - altkey="entorhinal_exvivo", - loc="stats", - ), - filled=dict( - extensions=None, - loc="mri", - ), - graymid=dict( - altkey=["graymid", "midthickness"], - loc="surf", - ), - inflated=dict( - loc="surf", - ), - jacobian_white=dict( - loc="surf", - ), - label=dict( - altkey="*label", - loc="label", - ), - norm=dict( - extensions=None, - loc="mri", - ), - nu=dict( - extensions=None, - loc="mri", - ), - orig=dict( - extensions=None, - loc="mri", - ), - pial=dict( - loc="surf", - ), - rawavg=dict( - extensions=None, - loc="mri", - ), - ribbon=dict( - altkey="*ribbon", - loc="mri", - ), - smoothwm=dict( - loc="surf", - ), - sphere=dict( - loc="surf", - ), - sphere_reg=dict( - altkey="sphere.reg", - loc="surf", - ), + BA_stats=dict(altkey="BA", loc="stats"), + T1=dict(extensions=None, loc="mri"), + annot=dict(altkey="*annot", loc="label"), + aparc_a2009s_stats=dict(altkey="aparc.a2009s", loc="stats"), + aparc_aseg=dict(altkey="aparc*aseg", loc="mri"), + aparc_stats=dict(altkey="aparc", loc="stats"), + area_pial=dict(altkey="area.pial", loc="surf"), + aseg=dict(extensions=None, loc="mri"), + aseg_stats=dict(altkey="aseg", loc="stats"), + avg_curv=dict(loc="surf"), + brain=dict(extensions=None, loc="mri"), + brainmask=dict(extensions=None, loc="mri"), + curv=dict(loc="surf"), + curv_pial=dict(altkey="curv.pial", loc="surf"), + curv_stats=dict(altkey="curv", loc="stats"), + entorhinal_exvivo_stats=dict(altkey="entorhinal_exvivo", loc="stats"), + filled=dict(extensions=None, loc="mri"), + graymid=dict(altkey=["graymid", "midthickness"], loc="surf"), + inflated=dict(loc="surf"), + jacobian_white=dict(loc="surf"), + label=dict(altkey="*label", loc="label"), + norm=dict(extensions=None, loc="mri"), + nu=dict(extensions=None, loc="mri"), + orig=dict(extensions=None, loc="mri"), + pial=dict(loc="surf"), + rawavg=dict(extensions=None, loc="mri"), + ribbon=dict(altkey="*ribbon", loc="mri"), + smoothwm=dict(loc="surf"), + sphere=dict(loc="surf"), + sphere_reg=dict(altkey="sphere.reg", loc="surf"), subject_id=dict(), subjects_dir=dict(), - sulc=dict( - loc="surf", - ), - thickness=dict( - loc="surf", - ), - volume=dict( - loc="surf", - ), - white=dict( - loc="surf", - ), - wm=dict( - extensions=None, - loc="mri", - ), - wmparc=dict( - extensions=None, - loc="mri", - ), - wmparc_stats=dict( - altkey="wmparc", - loc="stats", - ), + sulc=dict(loc="surf"), + thickness=dict(loc="surf"), + volume=dict(loc="surf"), + white=dict(loc="surf"), + wm=dict(extensions=None, loc="mri"), + wmparc=dict(extensions=None, loc="mri"), + wmparc_stats=dict(altkey="wmparc", loc="stats"), ) outputs = ReconAll.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Register.py b/nipype/interfaces/freesurfer/tests/test_auto_Register.py index c10daabd58..4e1187f62d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Register.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Register.py @@ -4,46 +4,17 @@ def test_Register_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - curv=dict( - argstr="-curv", - requires=["in_smoothwm"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_smoothwm=dict( - copyfile=True, - extensions=None, - ), - in_sulc=dict( - copyfile=True, - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + curv=dict(argstr="-curv", requires=["in_smoothwm"]), + environ=dict(nohash=True, usedefault=True), + in_smoothwm=dict(copyfile=True, extensions=None), + in_sulc=dict(copyfile=True, extensions=None, mandatory=True), in_surf=dict( - argstr="%s", - copyfile=True, - extensions=None, - mandatory=True, - position=-3, - ), - out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-3 ), + out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), subjects_dir=dict(), - target=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + target=dict(argstr="%s", extensions=None, mandatory=True, position=-2), ) inputs = Register.input_spec() @@ -53,11 +24,7 @@ def test_Register_inputs(): def test_Register_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Register.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py index f66ac1bda7..ef3763fe65 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py @@ -4,38 +4,13 @@ def test_RegisterAVItoTalairach_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), - out_file=dict( - argstr="%s", - extensions=None, - position=3, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + out_file=dict(argstr="%s", extensions=None, position=3, usedefault=True), subjects_dir=dict(), - target=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - vox2vox=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), + target=dict(argstr="%s", extensions=None, mandatory=True, position=1), + vox2vox=dict(argstr="%s", extensions=None, mandatory=True, position=2), ) inputs = RegisterAVItoTalairach.input_spec() @@ -46,13 +21,7 @@ def test_RegisterAVItoTalairach_inputs(): def test_RegisterAVItoTalairach_outputs(): output_map = dict( - log_file=dict( - extensions=None, - usedefault=True, - ), - out_file=dict( - extensions=None, - ), + log_file=dict(extensions=None, usedefault=True), out_file=dict(extensions=None) ) outputs = RegisterAVItoTalairach.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py index eeac74f722..5c48d3e406 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py @@ -4,24 +4,10 @@ def test_RelabelHypointensities_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - aseg=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - lh_white=dict( - copyfile=True, - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + aseg=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + environ=dict(nohash=True, usedefault=True), + lh_white=dict(copyfile=True, extensions=None, mandatory=True), out_file=dict( argstr="%s", extensions=None, @@ -31,17 +17,9 @@ def test_RelabelHypointensities_inputs(): name_template="%s.hypos.mgz", position=-1, ), - rh_white=dict( - copyfile=True, - extensions=None, - mandatory=True, - ), + rh_white=dict(copyfile=True, extensions=None, mandatory=True), subjects_dir=dict(), - surf_directory=dict( - argstr="%s", - position=-2, - usedefault=True, - ), + surf_directory=dict(argstr="%s", position=-2, usedefault=True), ) inputs = RelabelHypointensities.input_spec() @@ -51,12 +29,7 @@ def test_RelabelHypointensities_inputs(): def test_RelabelHypointensities_outputs(): - output_map = dict( - out_file=dict( - argstr="%s", - extensions=None, - ), - ) + output_map = dict(out_file=dict(argstr="%s", extensions=None)) outputs = RelabelHypointensities.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py index 735ea7b84a..421e90e4fa 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py @@ -4,19 +4,10 @@ def test_RemoveIntersection_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=True, - extensions=None, - mandatory=True, - position=-2, + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2 ), out_file=dict( argstr="%s", @@ -37,11 +28,7 @@ def test_RemoveIntersection_inputs(): def test_RemoveIntersection_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = RemoveIntersection.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py index 3d2ce30cbd..b8211ba7f4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py @@ -4,19 +4,9 @@ def test_RemoveNeck_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-4, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4), out_file=dict( argstr="%s", extensions=None, @@ -26,22 +16,10 @@ def test_RemoveNeck_inputs(): name_template="%s_noneck", position=-1, ), - radius=dict( - argstr="-radius %d", - ), + radius=dict(argstr="-radius %d"), subjects_dir=dict(), - template=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - transform=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), + template=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + transform=dict(argstr="%s", extensions=None, mandatory=True, position=-3), ) inputs = RemoveNeck.input_spec() @@ -51,11 +29,7 @@ def test_RemoveNeck_inputs(): def test_RemoveNeck_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = RemoveNeck.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Resample.py b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py index 280a8a4cc1..cd109ec2b3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Resample.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py @@ -4,30 +4,12 @@ def test_Resample_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - position=-2, - ), - resampled_file=dict( - argstr="-o %s", - extensions=None, - genfile=True, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=-2), + resampled_file=dict(argstr="-o %s", extensions=None, genfile=True, position=-1), subjects_dir=dict(), - voxel_size=dict( - argstr="-vs %.2f %.2f %.2f", - mandatory=True, - ), + voxel_size=dict(argstr="-vs %.2f %.2f %.2f", mandatory=True), ) inputs = Resample.input_spec() @@ -37,11 +19,7 @@ def test_Resample_inputs(): def test_Resample_outputs(): - output_map = dict( - resampled_file=dict( - extensions=None, - ), - ) + output_map = dict(resampled_file=dict(extensions=None)) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py index 3f7e1b96a0..d64445faa9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py @@ -4,113 +4,38 @@ def test_RobustRegister_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - auto_sens=dict( - argstr="--satit", - mandatory=True, - xor=["outlier_sens"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - est_int_scale=dict( - argstr="--iscale", - ), - force_double=dict( - argstr="--doubleprec", - ), - force_float=dict( - argstr="--floattype", - ), - half_source=dict( - argstr="--halfmov %s", - ), - half_source_xfm=dict( - argstr="--halfmovlta %s", - ), - half_targ=dict( - argstr="--halfdst %s", - ), - half_targ_xfm=dict( - argstr="--halfdstlta %s", - ), - half_weights=dict( - argstr="--halfweights %s", - ), - high_iterations=dict( - argstr="--highit %d", - ), - in_xfm_file=dict( - argstr="--transform", - extensions=None, - ), - init_orient=dict( - argstr="--initorient", - ), - iteration_thresh=dict( - argstr="--epsit %.3f", - ), - least_squares=dict( - argstr="--leastsquares", - ), - mask_source=dict( - argstr="--maskmov %s", - extensions=None, - ), - mask_target=dict( - argstr="--maskdst %s", - extensions=None, - ), - max_iterations=dict( - argstr="--maxit %d", - ), - no_init=dict( - argstr="--noinit", - ), - no_multi=dict( - argstr="--nomulti", - ), - out_reg_file=dict( - argstr="--lta %s", - usedefault=True, - ), - outlier_limit=dict( - argstr="--wlimit %.3f", - ), - outlier_sens=dict( - argstr="--sat %.4f", - mandatory=True, - xor=["auto_sens"], - ), - registered_file=dict( - argstr="--warp %s", - ), - source_file=dict( - argstr="--mov %s", - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + auto_sens=dict(argstr="--satit", mandatory=True, xor=["outlier_sens"]), + environ=dict(nohash=True, usedefault=True), + est_int_scale=dict(argstr="--iscale"), + force_double=dict(argstr="--doubleprec"), + force_float=dict(argstr="--floattype"), + half_source=dict(argstr="--halfmov %s"), + half_source_xfm=dict(argstr="--halfmovlta %s"), + half_targ=dict(argstr="--halfdst %s"), + half_targ_xfm=dict(argstr="--halfdstlta %s"), + half_weights=dict(argstr="--halfweights %s"), + high_iterations=dict(argstr="--highit %d"), + in_xfm_file=dict(argstr="--transform", extensions=None), + init_orient=dict(argstr="--initorient"), + iteration_thresh=dict(argstr="--epsit %.3f"), + least_squares=dict(argstr="--leastsquares"), + mask_source=dict(argstr="--maskmov %s", extensions=None), + mask_target=dict(argstr="--maskdst %s", extensions=None), + max_iterations=dict(argstr="--maxit %d"), + no_init=dict(argstr="--noinit"), + no_multi=dict(argstr="--nomulti"), + out_reg_file=dict(argstr="--lta %s", usedefault=True), + outlier_limit=dict(argstr="--wlimit %.3f"), + outlier_sens=dict(argstr="--sat %.4f", mandatory=True, xor=["auto_sens"]), + registered_file=dict(argstr="--warp %s"), + source_file=dict(argstr="--mov %s", extensions=None, mandatory=True), subjects_dir=dict(), - subsample_thresh=dict( - argstr="--subsample %d", - ), - target_file=dict( - argstr="--dst %s", - extensions=None, - mandatory=True, - ), - trans_only=dict( - argstr="--transonly", - ), - weights_file=dict( - argstr="--weights %s", - ), - write_vo2vox=dict( - argstr="--vox2vox", - ), + subsample_thresh=dict(argstr="--subsample %d"), + target_file=dict(argstr="--dst %s", extensions=None, mandatory=True), + trans_only=dict(argstr="--transonly"), + weights_file=dict(argstr="--weights %s"), + write_vo2vox=dict(argstr="--vox2vox"), ) inputs = RobustRegister.input_spec() @@ -121,30 +46,14 @@ def test_RobustRegister_inputs(): def test_RobustRegister_outputs(): output_map = dict( - half_source=dict( - extensions=None, - ), - half_source_xfm=dict( - extensions=None, - ), - half_targ=dict( - extensions=None, - ), - half_targ_xfm=dict( - extensions=None, - ), - half_weights=dict( - extensions=None, - ), - out_reg_file=dict( - extensions=None, - ), - registered_file=dict( - extensions=None, - ), - weights_file=dict( - extensions=None, - ), + half_source=dict(extensions=None), + half_source_xfm=dict(extensions=None), + half_targ=dict(extensions=None), + half_targ_xfm=dict(extensions=None), + half_weights=dict(extensions=None), + out_reg_file=dict(extensions=None), + registered_file=dict(extensions=None), + weights_file=dict(extensions=None), ) outputs = RobustRegister.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py index 8c180332db..53391af994 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py @@ -4,65 +4,30 @@ def test_RobustTemplate_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), auto_detect_sensitivity=dict( - argstr="--satit", - mandatory=True, - xor=["outlier_sensitivity"], - ), - average_metric=dict( - argstr="--average %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_timepoint=dict( - argstr="--fixtp", - ), - in_files=dict( - argstr="--mov %s", - mandatory=True, - ), - in_intensity_scales=dict( - argstr="--iscalein %s", - ), - initial_timepoint=dict( - argstr="--inittp %d", - ), - initial_transforms=dict( - argstr="--ixforms %s", - ), - intensity_scaling=dict( - argstr="--iscale", - ), - no_iteration=dict( - argstr="--noit", - ), + argstr="--satit", mandatory=True, xor=["outlier_sensitivity"] + ), + average_metric=dict(argstr="--average %d"), + environ=dict(nohash=True, usedefault=True), + fixed_timepoint=dict(argstr="--fixtp"), + in_files=dict(argstr="--mov %s", mandatory=True), + in_intensity_scales=dict(argstr="--iscalein %s"), + initial_timepoint=dict(argstr="--inittp %d"), + initial_transforms=dict(argstr="--ixforms %s"), + intensity_scaling=dict(argstr="--iscale"), + no_iteration=dict(argstr="--noit"), num_threads=dict(), out_file=dict( - argstr="--template %s", - extensions=None, - mandatory=True, - usedefault=True, + argstr="--template %s", extensions=None, mandatory=True, usedefault=True ), outlier_sensitivity=dict( - argstr="--sat %.4f", - mandatory=True, - xor=["auto_detect_sensitivity"], - ), - scaled_intensity_outputs=dict( - argstr="--iscaleout %s", + argstr="--sat %.4f", mandatory=True, xor=["auto_detect_sensitivity"] ), + scaled_intensity_outputs=dict(argstr="--iscaleout %s"), subjects_dir=dict(), - subsample_threshold=dict( - argstr="--subsample %d", - ), - transform_outputs=dict( - argstr="--lta %s", - ), + subsample_threshold=dict(argstr="--subsample %d"), + transform_outputs=dict(argstr="--lta %s"), ) inputs = RobustTemplate.input_spec() @@ -73,9 +38,7 @@ def test_RobustTemplate_inputs(): def test_RobustTemplate_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), + out_file=dict(extensions=None), scaled_intensity_outputs=dict(), transform_outputs=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py index de9ffe2485..1dd20355b9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py @@ -4,83 +4,31 @@ def test_SampleToSurface_inputs(): input_map = dict( - apply_rot=dict( - argstr="--rot %.3f %.3f %.3f", - ), - apply_trans=dict( - argstr="--trans %.3f %.3f %.3f", - ), - args=dict( - argstr="%s", - ), - cortex_mask=dict( - argstr="--cortex", - xor=["mask_label"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fix_tk_reg=dict( - argstr="--fixtkreg", - ), - float2int_method=dict( - argstr="--float2int %s", - ), - frame=dict( - argstr="--frame %d", - ), - hemi=dict( - argstr="--hemi %s", - mandatory=True, - ), - hits_file=dict( - argstr="--srchit %s", - ), - hits_type=dict( - argstr="--srchit_type", - ), - ico_order=dict( - argstr="--icoorder %d", - requires=["target_subject"], - ), - interp_method=dict( - argstr="--interp %s", - ), - mask_label=dict( - argstr="--mask %s", - extensions=None, - xor=["cortex_mask"], - ), + apply_rot=dict(argstr="--rot %.3f %.3f %.3f"), + apply_trans=dict(argstr="--trans %.3f %.3f %.3f"), + args=dict(argstr="%s"), + cortex_mask=dict(argstr="--cortex", xor=["mask_label"]), + environ=dict(nohash=True, usedefault=True), + fix_tk_reg=dict(argstr="--fixtkreg"), + float2int_method=dict(argstr="--float2int %s"), + frame=dict(argstr="--frame %d"), + hemi=dict(argstr="--hemi %s", mandatory=True), + hits_file=dict(argstr="--srchit %s"), + hits_type=dict(argstr="--srchit_type"), + ico_order=dict(argstr="--icoorder %d", requires=["target_subject"]), + interp_method=dict(argstr="--interp %s"), + mask_label=dict(argstr="--mask %s", extensions=None, xor=["cortex_mask"]), mni152reg=dict( argstr="--mni152reg", mandatory=True, xor=["reg_file", "reg_header", "mni152reg"], ), - no_reshape=dict( - argstr="--noreshape", - xor=["reshape"], - ), - out_file=dict( - argstr="--o %s", - extensions=None, - genfile=True, - ), - out_type=dict( - argstr="--out_type %s", - ), - override_reg_subj=dict( - argstr="--srcsubject %s", - requires=["subject_id"], - ), - projection_stem=dict( - mandatory=True, - xor=["sampling_method"], - ), - reference_file=dict( - argstr="--ref %s", - extensions=None, - ), + no_reshape=dict(argstr="--noreshape", xor=["reshape"]), + out_file=dict(argstr="--o %s", extensions=None, genfile=True), + out_type=dict(argstr="--out_type %s"), + override_reg_subj=dict(argstr="--srcsubject %s", requires=["subject_id"]), + projection_stem=dict(mandatory=True, xor=["sampling_method"]), + reference_file=dict(argstr="--ref %s", extensions=None), reg_file=dict( argstr="--reg %s", extensions=None, @@ -93,13 +41,8 @@ def test_SampleToSurface_inputs(): requires=["subject_id"], xor=["reg_file", "reg_header", "mni152reg"], ), - reshape=dict( - argstr="--reshape", - xor=["no_reshape"], - ), - reshape_slices=dict( - argstr="--rf %d", - ), + reshape=dict(argstr="--reshape", xor=["no_reshape"]), + reshape_slices=dict(argstr="--rf %d"), sampling_method=dict( argstr="%s", mandatory=True, @@ -108,35 +51,16 @@ def test_SampleToSurface_inputs(): ), sampling_range=dict(), sampling_units=dict(), - scale_input=dict( - argstr="--scale %.3f", - ), - smooth_surf=dict( - argstr="--surf-fwhm %.3f", - ), - smooth_vol=dict( - argstr="--fwhm %.3f", - ), - source_file=dict( - argstr="--mov %s", - extensions=None, - mandatory=True, - ), + scale_input=dict(argstr="--scale %.3f"), + smooth_surf=dict(argstr="--surf-fwhm %.3f"), + smooth_vol=dict(argstr="--fwhm %.3f"), + source_file=dict(argstr="--mov %s", extensions=None, mandatory=True), subject_id=dict(), subjects_dir=dict(), - surf_reg=dict( - argstr="--surfreg %s", - requires=["target_subject"], - ), - surface=dict( - argstr="--surf %s", - ), - target_subject=dict( - argstr="--trgsubject %s", - ), - vox_file=dict( - argstr="--nvox %s", - ), + surf_reg=dict(argstr="--surfreg %s", requires=["target_subject"]), + surface=dict(argstr="--surf %s"), + target_subject=dict(argstr="--trgsubject %s"), + vox_file=dict(argstr="--nvox %s"), ) inputs = SampleToSurface.input_spec() @@ -147,15 +71,9 @@ def test_SampleToSurface_inputs(): def test_SampleToSurface_outputs(): output_map = dict( - hits_file=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - vox_file=dict( - extensions=None, - ), + hits_file=dict(extensions=None), + out_file=dict(extensions=None), + vox_file=dict(extensions=None), ) outputs = SampleToSurface.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py index dfb82e8b85..3db964e8a3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py @@ -9,146 +9,72 @@ def test_SegStats_inputs(): mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - args=dict( - argstr="%s", - ), - avgwf_file=dict( - argstr="--avgwfvol %s", - ), - avgwf_txt_file=dict( - argstr="--avgwf %s", - ), - brain_vol=dict( - argstr="--%s", - ), - brainmask_file=dict( - argstr="--brainmask %s", - extensions=None, - ), - calc_power=dict( - argstr="--%s", - ), - calc_snr=dict( - argstr="--snr", - ), + args=dict(argstr="%s"), + avgwf_file=dict(argstr="--avgwfvol %s"), + avgwf_txt_file=dict(argstr="--avgwf %s"), + brain_vol=dict(argstr="--%s"), + brainmask_file=dict(argstr="--brainmask %s", extensions=None), + calc_power=dict(argstr="--%s"), + calc_snr=dict(argstr="--snr"), color_table_file=dict( argstr="--ctab %s", extensions=None, xor=("color_table_file", "default_color_table", "gca_color_table"), ), - cortex_vol_from_surf=dict( - argstr="--surf-ctx-vol", - ), + cortex_vol_from_surf=dict(argstr="--surf-ctx-vol"), default_color_table=dict( argstr="--ctab-default", xor=("color_table_file", "default_color_table", "gca_color_table"), ), - empty=dict( - argstr="--empty", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - etiv=dict( - argstr="--etiv", - ), + empty=dict(argstr="--empty"), + environ=dict(nohash=True, usedefault=True), + etiv=dict(argstr="--etiv"), etiv_only=dict(), - euler=dict( - argstr="--euler", - ), - exclude_ctx_gm_wm=dict( - argstr="--excl-ctxgmwm", - ), - exclude_id=dict( - argstr="--excludeid %d", - ), - frame=dict( - argstr="--frame %d", - ), + euler=dict(argstr="--euler"), + exclude_ctx_gm_wm=dict(argstr="--excl-ctxgmwm"), + exclude_id=dict(argstr="--excludeid %d"), + frame=dict(argstr="--frame %d"), gca_color_table=dict( argstr="--ctab-gca %s", extensions=None, xor=("color_table_file", "default_color_table", "gca_color_table"), ), - in_file=dict( - argstr="--i %s", - extensions=None, - ), - in_intensity=dict( - argstr="--in %s --in-intensity-name %s", - extensions=None, - ), + in_file=dict(argstr="--i %s", extensions=None), + in_intensity=dict(argstr="--in %s --in-intensity-name %s", extensions=None), intensity_units=dict( - argstr="--in-intensity-units %s", - requires=["in_intensity"], - ), - mask_erode=dict( - argstr="--maskerode %d", - ), - mask_file=dict( - argstr="--mask %s", - extensions=None, - ), - mask_frame=dict( - requires=["mask_file"], - ), - mask_invert=dict( - argstr="--maskinvert", + argstr="--in-intensity-units %s", requires=["in_intensity"] ), + mask_erode=dict(argstr="--maskerode %d"), + mask_file=dict(argstr="--mask %s", extensions=None), + mask_frame=dict(requires=["mask_file"]), + mask_invert=dict(argstr="--maskinvert"), mask_sign=dict(), - mask_thresh=dict( - argstr="--maskthresh %f", - ), - multiply=dict( - argstr="--mul %f", - ), - non_empty_only=dict( - argstr="--nonempty", - ), - partial_volume_file=dict( - argstr="--pv %s", - extensions=None, - ), - segment_id=dict( - argstr="--id %s...", - ), + mask_thresh=dict(argstr="--maskthresh %f"), + multiply=dict(argstr="--mul %f"), + non_empty_only=dict(argstr="--nonempty"), + partial_volume_file=dict(argstr="--pv %s", extensions=None), + segment_id=dict(argstr="--id %s..."), segmentation_file=dict( argstr="--seg %s", extensions=None, mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - sf_avg_file=dict( - argstr="--sfavg %s", - ), - subcort_gm=dict( - argstr="--subcortgray", - ), + sf_avg_file=dict(argstr="--sfavg %s"), + subcort_gm=dict(argstr="--subcortgray"), subjects_dir=dict(), summary_file=dict( - argstr="--sum %s", - extensions=None, - genfile=True, - position=-1, - ), - supratent=dict( - argstr="--supratent", + argstr="--sum %s", extensions=None, genfile=True, position=-1 ), + supratent=dict(argstr="--supratent"), surf_label=dict( argstr="--slabel %s %s %s", mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - total_gray=dict( - argstr="--totalgray", - ), - vox=dict( - argstr="--vox %s", - ), - wm_vol_from_surf=dict( - argstr="--surf-wm-vol", - ), + total_gray=dict(argstr="--totalgray"), + vox=dict(argstr="--vox %s"), + wm_vol_from_surf=dict(argstr="--surf-wm-vol"), ) inputs = SegStats.input_spec() @@ -159,18 +85,10 @@ def test_SegStats_inputs(): def test_SegStats_outputs(): output_map = dict( - avgwf_file=dict( - extensions=None, - ), - avgwf_txt_file=dict( - extensions=None, - ), - sf_avg_file=dict( - extensions=None, - ), - summary_file=dict( - extensions=None, - ), + avgwf_file=dict(extensions=None), + avgwf_txt_file=dict(extensions=None), + sf_avg_file=dict(extensions=None), + summary_file=dict(extensions=None), ) outputs = SegStats.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py index 0121dd7d9e..04e12b227d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py @@ -9,190 +9,84 @@ def test_SegStatsReconAll_inputs(): mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - args=dict( - argstr="%s", - ), - aseg=dict( - extensions=None, - ), - avgwf_file=dict( - argstr="--avgwfvol %s", - ), - avgwf_txt_file=dict( - argstr="--avgwf %s", - ), - brain_vol=dict( - argstr="--%s", - ), - brainmask_file=dict( - argstr="--brainmask %s", - extensions=None, - ), - calc_power=dict( - argstr="--%s", - ), - calc_snr=dict( - argstr="--snr", - ), + args=dict(argstr="%s"), + aseg=dict(extensions=None), + avgwf_file=dict(argstr="--avgwfvol %s"), + avgwf_txt_file=dict(argstr="--avgwf %s"), + brain_vol=dict(argstr="--%s"), + brainmask_file=dict(argstr="--brainmask %s", extensions=None), + calc_power=dict(argstr="--%s"), + calc_snr=dict(argstr="--snr"), color_table_file=dict( argstr="--ctab %s", extensions=None, xor=("color_table_file", "default_color_table", "gca_color_table"), ), copy_inputs=dict(), - cortex_vol_from_surf=dict( - argstr="--surf-ctx-vol", - ), + cortex_vol_from_surf=dict(argstr="--surf-ctx-vol"), default_color_table=dict( argstr="--ctab-default", xor=("color_table_file", "default_color_table", "gca_color_table"), ), - empty=dict( - argstr="--empty", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - etiv=dict( - argstr="--etiv", - ), + empty=dict(argstr="--empty"), + environ=dict(nohash=True, usedefault=True), + etiv=dict(argstr="--etiv"), etiv_only=dict(), - euler=dict( - argstr="--euler", - ), - exclude_ctx_gm_wm=dict( - argstr="--excl-ctxgmwm", - ), - exclude_id=dict( - argstr="--excludeid %d", - ), - frame=dict( - argstr="--frame %d", - ), + euler=dict(argstr="--euler"), + exclude_ctx_gm_wm=dict(argstr="--excl-ctxgmwm"), + exclude_id=dict(argstr="--excludeid %d"), + frame=dict(argstr="--frame %d"), gca_color_table=dict( argstr="--ctab-gca %s", extensions=None, xor=("color_table_file", "default_color_table", "gca_color_table"), ), - in_file=dict( - argstr="--i %s", - extensions=None, - ), - in_intensity=dict( - argstr="--in %s --in-intensity-name %s", - extensions=None, - ), + in_file=dict(argstr="--i %s", extensions=None), + in_intensity=dict(argstr="--in %s --in-intensity-name %s", extensions=None), intensity_units=dict( - argstr="--in-intensity-units %s", - requires=["in_intensity"], - ), - lh_orig_nofix=dict( - extensions=None, - mandatory=True, - ), - lh_pial=dict( - extensions=None, - mandatory=True, - ), - lh_white=dict( - extensions=None, - mandatory=True, - ), - mask_erode=dict( - argstr="--maskerode %d", - ), - mask_file=dict( - argstr="--mask %s", - extensions=None, - ), - mask_frame=dict( - requires=["mask_file"], - ), - mask_invert=dict( - argstr="--maskinvert", - ), + argstr="--in-intensity-units %s", requires=["in_intensity"] + ), + lh_orig_nofix=dict(extensions=None, mandatory=True), + lh_pial=dict(extensions=None, mandatory=True), + lh_white=dict(extensions=None, mandatory=True), + mask_erode=dict(argstr="--maskerode %d"), + mask_file=dict(argstr="--mask %s", extensions=None), + mask_frame=dict(requires=["mask_file"]), + mask_invert=dict(argstr="--maskinvert"), mask_sign=dict(), - mask_thresh=dict( - argstr="--maskthresh %f", - ), - multiply=dict( - argstr="--mul %f", - ), - non_empty_only=dict( - argstr="--nonempty", - ), - partial_volume_file=dict( - argstr="--pv %s", - extensions=None, - ), - presurf_seg=dict( - extensions=None, - ), - rh_orig_nofix=dict( - extensions=None, - mandatory=True, - ), - rh_pial=dict( - extensions=None, - mandatory=True, - ), - rh_white=dict( - extensions=None, - mandatory=True, - ), - ribbon=dict( - extensions=None, - mandatory=True, - ), - segment_id=dict( - argstr="--id %s...", - ), + mask_thresh=dict(argstr="--maskthresh %f"), + multiply=dict(argstr="--mul %f"), + non_empty_only=dict(argstr="--nonempty"), + partial_volume_file=dict(argstr="--pv %s", extensions=None), + presurf_seg=dict(extensions=None), + rh_orig_nofix=dict(extensions=None, mandatory=True), + rh_pial=dict(extensions=None, mandatory=True), + rh_white=dict(extensions=None, mandatory=True), + ribbon=dict(extensions=None, mandatory=True), + segment_id=dict(argstr="--id %s..."), segmentation_file=dict( argstr="--seg %s", extensions=None, mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - sf_avg_file=dict( - argstr="--sfavg %s", - ), - subcort_gm=dict( - argstr="--subcortgray", - ), - subject_id=dict( - argstr="--subject %s", - mandatory=True, - usedefault=True, - ), + sf_avg_file=dict(argstr="--sfavg %s"), + subcort_gm=dict(argstr="--subcortgray"), + subject_id=dict(argstr="--subject %s", mandatory=True, usedefault=True), subjects_dir=dict(), summary_file=dict( - argstr="--sum %s", - extensions=None, - genfile=True, - position=-1, - ), - supratent=dict( - argstr="--supratent", + argstr="--sum %s", extensions=None, genfile=True, position=-1 ), + supratent=dict(argstr="--supratent"), surf_label=dict( argstr="--slabel %s %s %s", mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - total_gray=dict( - argstr="--totalgray", - ), - transform=dict( - extensions=None, - mandatory=True, - ), - vox=dict( - argstr="--vox %s", - ), - wm_vol_from_surf=dict( - argstr="--surf-wm-vol", - ), + total_gray=dict(argstr="--totalgray"), + transform=dict(extensions=None, mandatory=True), + vox=dict(argstr="--vox %s"), + wm_vol_from_surf=dict(argstr="--surf-wm-vol"), ) inputs = SegStatsReconAll.input_spec() @@ -203,18 +97,10 @@ def test_SegStatsReconAll_inputs(): def test_SegStatsReconAll_outputs(): output_map = dict( - avgwf_file=dict( - extensions=None, - ), - avgwf_txt_file=dict( - extensions=None, - ), - sf_avg_file=dict( - extensions=None, - ), - summary_file=dict( - extensions=None, - ), + avgwf_file=dict(extensions=None), + avgwf_txt_file=dict(extensions=None), + sf_avg_file=dict(extensions=None), + summary_file=dict(extensions=None), ) outputs = SegStatsReconAll.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py index 7c16a1f476..5b0083cb96 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py @@ -4,23 +4,11 @@ def test_SegmentCC_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), copy_inputs=dict(), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-aseg %s", - extensions=None, - mandatory=True, - ), - in_norm=dict( - extensions=None, - mandatory=True, - ), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-aseg %s", extensions=None, mandatory=True), + in_norm=dict(extensions=None, mandatory=True), out_file=dict( argstr="-o %s", extensions=None, @@ -29,17 +17,8 @@ def test_SegmentCC_inputs(): name_source=["in_file"], name_template="%s.auto.mgz", ), - out_rotation=dict( - argstr="-lta %s", - extensions=None, - mandatory=True, - ), - subject_id=dict( - argstr="%s", - mandatory=True, - position=-1, - usedefault=True, - ), + out_rotation=dict(argstr="-lta %s", extensions=None, mandatory=True), + subject_id=dict(argstr="%s", mandatory=True, position=-1, usedefault=True), subjects_dir=dict(), ) inputs = SegmentCC.input_spec() @@ -51,12 +30,7 @@ def test_SegmentCC_inputs(): def test_SegmentCC_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), - out_rotation=dict( - extensions=None, - ), + out_file=dict(extensions=None), out_rotation=dict(extensions=None) ) outputs = SegmentCC.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py index 8aac066c26..f2bebb610c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py @@ -4,25 +4,10 @@ def test_SegmentWM_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), subjects_dir=dict(), ) inputs = SegmentWM.input_spec() @@ -33,11 +18,7 @@ def test_SegmentWM_inputs(): def test_SegmentWM_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SegmentWM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py index e20de1c795..d510fbac22 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py @@ -4,51 +4,19 @@ def test_Smooth_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="--i %s", - extensions=None, - mandatory=True, - ), - num_iters=dict( - argstr="--niters %d", - mandatory=True, - xor=["surface_fwhm"], - ), - proj_frac=dict( - argstr="--projfrac %s", - xor=["proj_frac_avg"], - ), - proj_frac_avg=dict( - argstr="--projfrac-avg %.2f %.2f %.2f", - xor=["proj_frac"], - ), - reg_file=dict( - argstr="--reg %s", - extensions=None, - mandatory=True, - ), - smoothed_file=dict( - argstr="--o %s", - extensions=None, - genfile=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="--i %s", extensions=None, mandatory=True), + num_iters=dict(argstr="--niters %d", mandatory=True, xor=["surface_fwhm"]), + proj_frac=dict(argstr="--projfrac %s", xor=["proj_frac_avg"]), + proj_frac_avg=dict(argstr="--projfrac-avg %.2f %.2f %.2f", xor=["proj_frac"]), + reg_file=dict(argstr="--reg %s", extensions=None, mandatory=True), + smoothed_file=dict(argstr="--o %s", extensions=None, genfile=True), subjects_dir=dict(), surface_fwhm=dict( - argstr="--fwhm %f", - mandatory=True, - requires=["reg_file"], - xor=["num_iters"], - ), - vol_fwhm=dict( - argstr="--vol-fwhm %f", + argstr="--fwhm %f", mandatory=True, requires=["reg_file"], xor=["num_iters"] ), + vol_fwhm=dict(argstr="--vol-fwhm %f"), ) inputs = Smooth.input_spec() @@ -58,11 +26,7 @@ def test_Smooth_inputs(): def test_Smooth_outputs(): - output_map = dict( - smoothed_file=dict( - extensions=None, - ), - ) + output_map = dict(smoothed_file=dict(extensions=None)) outputs = Smooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py index 5f97cc281b..983296b4bd 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py @@ -4,65 +4,25 @@ def test_SmoothTessellation_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - curvature_averaging_iterations=dict( - argstr="-a %d", - ), - disable_estimates=dict( - argstr="-nw", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gaussian_curvature_norm_steps=dict( - argstr="%d", - ), - gaussian_curvature_smoothing_steps=dict( - argstr=" %d", - ), + args=dict(argstr="%s"), + curvature_averaging_iterations=dict(argstr="-a %d"), + disable_estimates=dict(argstr="-nw"), + environ=dict(nohash=True, usedefault=True), + gaussian_curvature_norm_steps=dict(argstr="%d"), + gaussian_curvature_smoothing_steps=dict(argstr=" %d"), in_file=dict( - argstr="%s", - copyfile=True, - extensions=None, - mandatory=True, - position=-2, - ), - normalize_area=dict( - argstr="-area", - ), - out_area_file=dict( - argstr="-b %s", - extensions=None, - ), - out_curvature_file=dict( - argstr="-c %s", - extensions=None, - ), - out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - seed=dict( - argstr="-seed %d", - ), - smoothing_iterations=dict( - argstr="-n %d", - ), - snapshot_writing_iterations=dict( - argstr="-w %d", - ), + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2 + ), + normalize_area=dict(argstr="-area"), + out_area_file=dict(argstr="-b %s", extensions=None), + out_curvature_file=dict(argstr="-c %s", extensions=None), + out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), + seed=dict(argstr="-seed %d"), + smoothing_iterations=dict(argstr="-n %d"), + snapshot_writing_iterations=dict(argstr="-w %d"), subjects_dir=dict(), - use_gaussian_curvature_smoothing=dict( - argstr="-g", - ), - use_momentum=dict( - argstr="-m", - ), + use_gaussian_curvature_smoothing=dict(argstr="-g"), + use_momentum=dict(argstr="-m"), ) inputs = SmoothTessellation.input_spec() @@ -72,11 +32,7 @@ def test_SmoothTessellation_inputs(): def test_SmoothTessellation_outputs(): - output_map = dict( - surface=dict( - extensions=None, - ), - ) + output_map = dict(surface=dict(extensions=None)) outputs = SmoothTessellation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py index 84673e2951..35d47936ea 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py @@ -4,27 +4,13 @@ def test_Sphere_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=True, - extensions=None, - mandatory=True, - position=-2, - ), - in_smoothwm=dict( - copyfile=True, - extensions=None, - ), - magic=dict( - argstr="-q", + argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2 ), + in_smoothwm=dict(copyfile=True, extensions=None), + magic=dict(argstr="-q"), num_threads=dict(), out_file=dict( argstr="%s", @@ -34,9 +20,7 @@ def test_Sphere_inputs(): name_template="%s.sphere", position=-1, ), - seed=dict( - argstr="-seed %d", - ), + seed=dict(argstr="-seed %d"), subjects_dir=dict(), ) inputs = Sphere.input_spec() @@ -47,11 +31,7 @@ def test_Sphere_inputs(): def test_Sphere_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Sphere.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py index 73f5b3efd7..f9fdc41306 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py @@ -4,60 +4,19 @@ def test_SphericalAverage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - erode=dict( - argstr="-erode %d", - ), - fname=dict( - argstr="%s", - mandatory=True, - position=-5, - ), - hemisphere=dict( - argstr="%s", - mandatory=True, - position=-4, - ), - in_average=dict( - argstr="%s", - genfile=True, - position=-2, - ), - in_orig=dict( - argstr="-orig %s", - extensions=None, - ), - in_surf=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - subject_id=dict( - argstr="-o %s", - mandatory=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + erode=dict(argstr="-erode %d"), + fname=dict(argstr="%s", mandatory=True, position=-5), + hemisphere=dict(argstr="%s", mandatory=True, position=-4), + in_average=dict(argstr="%s", genfile=True, position=-2), + in_orig=dict(argstr="-orig %s", extensions=None), + in_surf=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), + subject_id=dict(argstr="-o %s", mandatory=True), subjects_dir=dict(), - threshold=dict( - argstr="-t %.1f", - ), - which=dict( - argstr="%s", - mandatory=True, - position=-6, - ), + threshold=dict(argstr="-t %.1f"), + which=dict(argstr="%s", mandatory=True, position=-6), ) inputs = SphericalAverage.input_spec() @@ -67,11 +26,7 @@ def test_SphericalAverage_inputs(): def test_SphericalAverage_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SphericalAverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py index 01dc354710..999d67ff8e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py @@ -4,29 +4,13 @@ def test_Surface2VolTransform_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hemi=dict( - argstr="--hemi %s", - mandatory=True, - ), - mkmask=dict( - argstr="--mkmask", - xor=["source_file"], - ), - projfrac=dict( - argstr="--projfrac %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + hemi=dict(argstr="--hemi %s", mandatory=True), + mkmask=dict(argstr="--mkmask", xor=["source_file"]), + projfrac=dict(argstr="--projfrac %s"), reg_file=dict( - argstr="--volreg %s", - extensions=None, - mandatory=True, - xor=["subject_id"], + argstr="--volreg %s", extensions=None, mandatory=True, xor=["subject_id"] ), source_file=dict( argstr="--surfval %s", @@ -35,20 +19,10 @@ def test_Surface2VolTransform_inputs(): mandatory=True, xor=["mkmask"], ), - subject_id=dict( - argstr="--identity %s", - xor=["reg_file"], - ), - subjects_dir=dict( - argstr="--sd %s", - ), - surf_name=dict( - argstr="--surf %s", - ), - template_file=dict( - argstr="--template %s", - extensions=None, - ), + subject_id=dict(argstr="--identity %s", xor=["reg_file"]), + subjects_dir=dict(argstr="--sd %s"), + surf_name=dict(argstr="--surf %s"), + template_file=dict(argstr="--template %s", extensions=None), transformed_file=dict( argstr="--outvol %s", extensions=None, @@ -73,12 +47,7 @@ def test_Surface2VolTransform_inputs(): def test_Surface2VolTransform_outputs(): output_map = dict( - transformed_file=dict( - extensions=None, - ), - vertexvol_file=dict( - extensions=None, - ), + transformed_file=dict(extensions=None), vertexvol_file=dict(extensions=None) ) outputs = Surface2VolTransform.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py index 7876dfa1cc..1f373f07a0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py @@ -4,46 +4,16 @@ def test_SurfaceSmooth_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - cortex=dict( - argstr="--cortex", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fwhm=dict( - argstr="--fwhm %.4f", - xor=["smooth_iters"], - ), - hemi=dict( - argstr="--hemi %s", - mandatory=True, - ), - in_file=dict( - argstr="--sval %s", - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr="--tval %s", - extensions=None, - genfile=True, - ), - reshape=dict( - argstr="--reshape", - ), - smooth_iters=dict( - argstr="--smooth %d", - xor=["fwhm"], - ), - subject_id=dict( - argstr="--s %s", - mandatory=True, - ), + args=dict(argstr="%s"), + cortex=dict(argstr="--cortex", usedefault=True), + environ=dict(nohash=True, usedefault=True), + fwhm=dict(argstr="--fwhm %.4f", xor=["smooth_iters"]), + hemi=dict(argstr="--hemi %s", mandatory=True), + in_file=dict(argstr="--sval %s", extensions=None, mandatory=True), + out_file=dict(argstr="--tval %s", extensions=None, genfile=True), + reshape=dict(argstr="--reshape"), + smooth_iters=dict(argstr="--smooth %d", xor=["fwhm"]), + subject_id=dict(argstr="--s %s", mandatory=True), subjects_dir=dict(), ) inputs = SurfaceSmooth.input_spec() @@ -54,11 +24,7 @@ def test_SurfaceSmooth_inputs(): def test_SurfaceSmooth_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SurfaceSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py index c778bcc959..8035c97669 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py @@ -4,127 +4,49 @@ def test_SurfaceSnapshots_inputs(): input_map = dict( - annot_file=dict( - argstr="-annotation %s", - extensions=None, - xor=["annot_name"], - ), - annot_name=dict( - argstr="-annotation %s", - xor=["annot_file"], - ), - args=dict( - argstr="%s", - ), - colortable=dict( - argstr="-colortable %s", - extensions=None, - ), - demean_overlay=dict( - argstr="-zm", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hemi=dict( - argstr="%s", - mandatory=True, - position=2, - ), + annot_file=dict(argstr="-annotation %s", extensions=None, xor=["annot_name"]), + annot_name=dict(argstr="-annotation %s", xor=["annot_file"]), + args=dict(argstr="%s"), + colortable=dict(argstr="-colortable %s", extensions=None), + demean_overlay=dict(argstr="-zm"), + environ=dict(nohash=True, usedefault=True), + hemi=dict(argstr="%s", mandatory=True, position=2), identity_reg=dict( argstr="-overlay-reg-identity", xor=["overlay_reg", "identity_reg", "mni152_reg"], ), - invert_overlay=dict( - argstr="-invphaseflag 1", - ), - label_file=dict( - argstr="-label %s", - extensions=None, - xor=["label_name"], - ), - label_name=dict( - argstr="-label %s", - xor=["label_file"], - ), - label_outline=dict( - argstr="-label-outline", - ), - label_under=dict( - argstr="-labels-under", - ), + invert_overlay=dict(argstr="-invphaseflag 1"), + label_file=dict(argstr="-label %s", extensions=None, xor=["label_name"]), + label_name=dict(argstr="-label %s", xor=["label_file"]), + label_outline=dict(argstr="-label-outline"), + label_under=dict(argstr="-labels-under"), mni152_reg=dict( - argstr="-mni152reg", - xor=["overlay_reg", "identity_reg", "mni152_reg"], - ), - orig_suffix=dict( - argstr="-orig %s", - ), - overlay=dict( - argstr="-overlay %s", - extensions=None, - requires=["overlay_range"], - ), - overlay_range=dict( - argstr="%s", - ), - overlay_range_offset=dict( - argstr="-foffset %.3f", + argstr="-mni152reg", xor=["overlay_reg", "identity_reg", "mni152_reg"] ), + orig_suffix=dict(argstr="-orig %s"), + overlay=dict(argstr="-overlay %s", extensions=None, requires=["overlay_range"]), + overlay_range=dict(argstr="%s"), + overlay_range_offset=dict(argstr="-foffset %.3f"), overlay_reg=dict( argstr="-overlay-reg %s", extensions=None, xor=["overlay_reg", "identity_reg", "mni152_reg"], ), - patch_file=dict( - argstr="-patch %s", - extensions=None, - ), - reverse_overlay=dict( - argstr="-revphaseflag 1", - ), + patch_file=dict(argstr="-patch %s", extensions=None), + reverse_overlay=dict(argstr="-revphaseflag 1"), screenshot_stem=dict(), - show_color_scale=dict( - argstr="-colscalebarflag 1", - ), - show_color_text=dict( - argstr="-colscaletext 1", - ), - show_curv=dict( - argstr="-curv", - xor=["show_gray_curv"], - ), - show_gray_curv=dict( - argstr="-gray", - xor=["show_curv"], - ), + show_color_scale=dict(argstr="-colscalebarflag 1"), + show_color_text=dict(argstr="-colscaletext 1"), + show_curv=dict(argstr="-curv", xor=["show_gray_curv"]), + show_gray_curv=dict(argstr="-gray", xor=["show_curv"]), six_images=dict(), - sphere_suffix=dict( - argstr="-sphere %s", - ), - stem_template_args=dict( - requires=["screenshot_stem"], - ), - subject_id=dict( - argstr="%s", - mandatory=True, - position=1, - ), + sphere_suffix=dict(argstr="-sphere %s"), + stem_template_args=dict(requires=["screenshot_stem"]), + subject_id=dict(argstr="%s", mandatory=True, position=1), subjects_dir=dict(), - surface=dict( - argstr="%s", - mandatory=True, - position=3, - ), - tcl_script=dict( - argstr="%s", - extensions=None, - genfile=True, - ), - truncate_overlay=dict( - argstr="-truncphaseflag 1", - ), + surface=dict(argstr="%s", mandatory=True, position=3), + tcl_script=dict(argstr="%s", extensions=None, genfile=True), + truncate_overlay=dict(argstr="-truncphaseflag 1"), ) inputs = SurfaceSnapshots.input_spec() @@ -134,9 +56,7 @@ def test_SurfaceSnapshots_inputs(): def test_SurfaceSnapshots_outputs(): - output_map = dict( - snapshots=dict(), - ) + output_map = dict(snapshots=dict()) outputs = SurfaceSnapshots.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py index 88923befd4..3827bb161a 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py @@ -4,28 +4,12 @@ def test_SurfaceTransform_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hemi=dict( - argstr="--hemi %s", - mandatory=True, - ), - out_file=dict( - argstr="--tval %s", - extensions=None, - genfile=True, - ), - reshape=dict( - argstr="--reshape", - ), - reshape_factor=dict( - argstr="--reshape-factor", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + hemi=dict(argstr="--hemi %s", mandatory=True), + out_file=dict(argstr="--tval %s", extensions=None, genfile=True), + reshape=dict(argstr="--reshape"), + reshape_factor=dict(argstr="--reshape-factor"), source_annot_file=dict( argstr="--sval-annot %s", extensions=None, @@ -38,25 +22,12 @@ def test_SurfaceTransform_inputs(): mandatory=True, xor=["source_annot_file"], ), - source_subject=dict( - argstr="--srcsubject %s", - mandatory=True, - ), - source_type=dict( - argstr="--sfmt %s", - requires=["source_file"], - ), + source_subject=dict(argstr="--srcsubject %s", mandatory=True), + source_type=dict(argstr="--sfmt %s", requires=["source_file"]), subjects_dir=dict(), - target_ico_order=dict( - argstr="--trgicoorder %d", - ), - target_subject=dict( - argstr="--trgsubject %s", - mandatory=True, - ), - target_type=dict( - argstr="--tfmt %s", - ), + target_ico_order=dict(argstr="--trgicoorder %d"), + target_subject=dict(argstr="--trgsubject %s", mandatory=True), + target_type=dict(argstr="--tfmt %s"), ) inputs = SurfaceTransform.input_spec() @@ -66,11 +37,7 @@ def test_SurfaceTransform_inputs(): def test_SurfaceTransform_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SurfaceTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py index 7d52c994bc..c30574d897 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py @@ -4,50 +4,16 @@ def test_SynthesizeFLASH_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixed_weighting=dict( - argstr="-w", - position=1, - ), - flip_angle=dict( - argstr="%.2f", - mandatory=True, - position=3, - ), - out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - ), - pd_image=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=6, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fixed_weighting=dict(argstr="-w", position=1), + flip_angle=dict(argstr="%.2f", mandatory=True, position=3), + out_file=dict(argstr="%s", extensions=None, genfile=True), + pd_image=dict(argstr="%s", extensions=None, mandatory=True, position=6), subjects_dir=dict(), - t1_image=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=5, - ), - te=dict( - argstr="%.3f", - mandatory=True, - position=4, - ), - tr=dict( - argstr="%.2f", - mandatory=True, - position=2, - ), + t1_image=dict(argstr="%s", extensions=None, mandatory=True, position=5), + te=dict(argstr="%.3f", mandatory=True, position=4), + tr=dict(argstr="%.2f", mandatory=True, position=2), ) inputs = SynthesizeFLASH.input_spec() @@ -57,11 +23,7 @@ def test_SynthesizeFLASH_inputs(): def test_SynthesizeFLASH_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SynthesizeFLASH.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py index 384f44edd2..8266edd299 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py @@ -4,26 +4,11 @@ def test_TalairachAVI_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - atlas=dict( - argstr="--atlas %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="--i %s", - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr="--xfm %s", - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + atlas=dict(argstr="--atlas %s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="--i %s", extensions=None, mandatory=True), + out_file=dict(argstr="--xfm %s", extensions=None, mandatory=True), subjects_dir=dict(), ) inputs = TalairachAVI.input_spec() @@ -35,15 +20,9 @@ def test_TalairachAVI_inputs(): def test_TalairachAVI_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), - out_log=dict( - extensions=None, - ), - out_txt=dict( - extensions=None, - ), + out_file=dict(extensions=None), + out_log=dict(extensions=None), + out_txt=dict(extensions=None), ) outputs = TalairachAVI.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py index c6536186aa..3384f9c27f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py @@ -4,19 +4,9 @@ def test_TalairachQC_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - log_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + log_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), subjects_dir=dict(), ) inputs = TalairachQC.input_spec() @@ -27,12 +17,7 @@ def test_TalairachQC_inputs(): def test_TalairachQC_outputs(): - output_map = dict( - log_file=dict( - extensions=None, - usedefault=True, - ), - ) + output_map = dict(log_file=dict(extensions=None, usedefault=True)) outputs = TalairachQC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py index 31cdedb679..4be76d5396 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py @@ -4,76 +4,27 @@ def test_Tkregister2_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fsl_in_matrix=dict( - argstr="--fsl %s", - extensions=None, - ), - fsl_out=dict( - argstr="--fslregout %s", - ), - fstal=dict( - argstr="--fstal", - xor=["target_image", "moving_image", "reg_file"], - ), - fstarg=dict( - argstr="--fstarg", - xor=["target_image"], - ), - invert_lta_in=dict( - requires=["lta_in"], - ), - invert_lta_out=dict( - argstr="--ltaout-inv", - requires=["lta_in"], - ), - lta_in=dict( - argstr="--lta %s", - extensions=None, - ), - lta_out=dict( - argstr="--ltaout %s", - ), - moving_image=dict( - argstr="--mov %s", - extensions=None, - mandatory=True, - ), - movscale=dict( - argstr="--movscale %f", - ), - noedit=dict( - argstr="--noedit", - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fsl_in_matrix=dict(argstr="--fsl %s", extensions=None), + fsl_out=dict(argstr="--fslregout %s"), + fstal=dict(argstr="--fstal", xor=["target_image", "moving_image", "reg_file"]), + fstarg=dict(argstr="--fstarg", xor=["target_image"]), + invert_lta_in=dict(requires=["lta_in"]), + invert_lta_out=dict(argstr="--ltaout-inv", requires=["lta_in"]), + lta_in=dict(argstr="--lta %s", extensions=None), + lta_out=dict(argstr="--ltaout %s"), + moving_image=dict(argstr="--mov %s", extensions=None, mandatory=True), + movscale=dict(argstr="--movscale %f"), + noedit=dict(argstr="--noedit", usedefault=True), reg_file=dict( - argstr="--reg %s", - extensions=None, - mandatory=True, - usedefault=True, - ), - reg_header=dict( - argstr="--regheader", - ), - subject_id=dict( - argstr="--s %s", + argstr="--reg %s", extensions=None, mandatory=True, usedefault=True ), + reg_header=dict(argstr="--regheader"), + subject_id=dict(argstr="--s %s"), subjects_dir=dict(), - target_image=dict( - argstr="--targ %s", - extensions=None, - xor=["fstarg"], - ), - xfm=dict( - argstr="--xfm %s", - extensions=None, - ), + target_image=dict(argstr="--targ %s", extensions=None, xor=["fstarg"]), + xfm=dict(argstr="--xfm %s", extensions=None), ) inputs = Tkregister2.input_spec() @@ -84,15 +35,9 @@ def test_Tkregister2_inputs(): def test_Tkregister2_outputs(): output_map = dict( - fsl_file=dict( - extensions=None, - ), - lta_file=dict( - extensions=None, - ), - reg_file=dict( - extensions=None, - ), + fsl_file=dict(extensions=None), + lta_file=dict(extensions=None), + reg_file=dict(extensions=None), ) outputs = Tkregister2.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py index b6b27e67b8..b04e8c00ed 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py @@ -4,57 +4,33 @@ def test_UnpackSDICOMDir_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), config=dict( argstr="-cfg %s", extensions=None, mandatory=True, xor=("run_info", "config", "seq_config"), ), - dir_structure=dict( - argstr="-%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - log_file=dict( - argstr="-log %s", - extensions=None, - ), - no_info_dump=dict( - argstr="-noinfodump", - ), - no_unpack_err=dict( - argstr="-no-unpackerr", - ), - output_dir=dict( - argstr="-targ %s", - ), + dir_structure=dict(argstr="-%s"), + environ=dict(nohash=True, usedefault=True), + log_file=dict(argstr="-log %s", extensions=None), + no_info_dump=dict(argstr="-noinfodump"), + no_unpack_err=dict(argstr="-no-unpackerr"), + output_dir=dict(argstr="-targ %s"), run_info=dict( argstr="-run %d %s %s %s", mandatory=True, xor=("run_info", "config", "seq_config"), ), - scan_only=dict( - argstr="-scanonly %s", - extensions=None, - ), + scan_only=dict(argstr="-scanonly %s", extensions=None), seq_config=dict( argstr="-seqcfg %s", extensions=None, mandatory=True, xor=("run_info", "config", "seq_config"), ), - source_dir=dict( - argstr="-src %s", - mandatory=True, - ), - spm_zeropad=dict( - argstr="-nspmzeropad %d", - ), + source_dir=dict(argstr="-src %s", mandatory=True), + spm_zeropad=dict(argstr="-nspmzeropad %d"), subjects_dir=dict(), ) inputs = UnpackSDICOMDir.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py index 152f03eaa8..c3cf3788bc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py @@ -4,64 +4,21 @@ def test_VolumeMask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - aseg=dict( - extensions=None, - xor=["in_aseg"], - ), + args=dict(argstr="%s"), + aseg=dict(extensions=None, xor=["in_aseg"]), copy_inputs=dict(), - environ=dict( - nohash=True, - usedefault=True, - ), - in_aseg=dict( - argstr="--aseg_name %s", - extensions=None, - xor=["aseg"], - ), - left_ribbonlabel=dict( - argstr="--label_left_ribbon %d", - mandatory=True, - ), - left_whitelabel=dict( - argstr="--label_left_white %d", - mandatory=True, - ), - lh_pial=dict( - extensions=None, - mandatory=True, - ), - lh_white=dict( - extensions=None, - mandatory=True, - ), - rh_pial=dict( - extensions=None, - mandatory=True, - ), - rh_white=dict( - extensions=None, - mandatory=True, - ), - right_ribbonlabel=dict( - argstr="--label_right_ribbon %d", - mandatory=True, - ), - right_whitelabel=dict( - argstr="--label_right_white %d", - mandatory=True, - ), - save_ribbon=dict( - argstr="--save_ribbon", - ), - subject_id=dict( - argstr="%s", - mandatory=True, - position=-1, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True), + in_aseg=dict(argstr="--aseg_name %s", extensions=None, xor=["aseg"]), + left_ribbonlabel=dict(argstr="--label_left_ribbon %d", mandatory=True), + left_whitelabel=dict(argstr="--label_left_white %d", mandatory=True), + lh_pial=dict(extensions=None, mandatory=True), + lh_white=dict(extensions=None, mandatory=True), + rh_pial=dict(extensions=None, mandatory=True), + rh_white=dict(extensions=None, mandatory=True), + right_ribbonlabel=dict(argstr="--label_right_ribbon %d", mandatory=True), + right_whitelabel=dict(argstr="--label_right_white %d", mandatory=True), + save_ribbon=dict(argstr="--save_ribbon"), + subject_id=dict(argstr="%s", mandatory=True, position=-1, usedefault=True), subjects_dir=dict(), ) inputs = VolumeMask.input_spec() @@ -73,15 +30,9 @@ def test_VolumeMask_inputs(): def test_VolumeMask_outputs(): output_map = dict( - lh_ribbon=dict( - extensions=None, - ), - out_ribbon=dict( - extensions=None, - ), - rh_ribbon=dict( - extensions=None, - ), + lh_ribbon=dict(extensions=None), + out_ribbon=dict(extensions=None), + rh_ribbon=dict(extensions=None), ) outputs = VolumeMask.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py index 5e8609c4c6..656b26f869 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py @@ -4,40 +4,16 @@ def test_WatershedSkullStrip_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - brain_atlas=dict( - argstr="-brain_atlas %s", - extensions=None, - position=-4, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s"), + brain_atlas=dict(argstr="-brain_atlas %s", extensions=None, position=-4), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - usedefault=True, + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True ), subjects_dir=dict(), - t1=dict( - argstr="-T1", - ), - transform=dict( - argstr="%s", - extensions=None, - position=-3, - ), + t1=dict(argstr="-T1"), + transform=dict(argstr="%s", extensions=None, position=-3), ) inputs = WatershedSkullStrip.input_spec() @@ -47,11 +23,7 @@ def test_WatershedSkullStrip_inputs(): def test_WatershedSkullStrip_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = WatershedSkullStrip.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_AR1Image.py b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py index 0f4cfc2bcc..3f16d7235d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AR1Image.py +++ b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py @@ -4,43 +4,16 @@ def test_AR1Image_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="-%sar1", - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="-%sar1", position=4, usedefault=True), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + nan2zeros=dict(argstr="-nan", position=3), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = AR1Image.input_spec() @@ -51,11 +24,7 @@ def test_AR1Image_inputs(): def test_AR1Image_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = AR1Image.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py index 359a076f2d..c84d3694ce 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py +++ b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py @@ -4,30 +4,11 @@ def test_AccuracyTester_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - mel_icas=dict( - argstr="%s", - copyfile=False, - mandatory=True, - position=3, - ), - output_directory=dict( - argstr="%s", - mandatory=True, - position=2, - ), - trained_wts_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + mel_icas=dict(argstr="%s", copyfile=False, mandatory=True, position=3), + output_directory=dict(argstr="%s", mandatory=True, position=2), + trained_wts_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), ) inputs = AccuracyTester.input_spec() @@ -37,12 +18,7 @@ def test_AccuracyTester_inputs(): def test_AccuracyTester_outputs(): - output_map = dict( - output_directory=dict( - argstr="%s", - position=1, - ), - ) + output_map = dict(output_directory=dict(argstr="%s", position=1)) outputs = AccuracyTester.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py index f38990e572..afcb8e7267 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py @@ -4,44 +4,16 @@ def test_ApplyMask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - mask_file=dict( - argstr="-mas %s", - extensions=None, - mandatory=True, - position=4, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + mask_file=dict(argstr="-mas %s", extensions=None, mandatory=True, position=4), + nan2zeros=dict(argstr="-nan", position=3), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = ApplyMask.input_spec() @@ -52,11 +24,7 @@ def test_ApplyMask_inputs(): def test_ApplyMask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ApplyMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py index 4c5bcc13a7..e346363525 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py @@ -4,30 +4,12 @@ def test_ApplyTOPUP_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - datatype=dict( - argstr="-d=%s", - ), - encoding_file=dict( - argstr="--datain=%s", - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr="--imain=%s", - mandatory=True, - sep=",", - ), - in_index=dict( - argstr="--inindex=%s", - sep=",", - ), + args=dict(argstr="%s"), + datatype=dict(argstr="-d=%s"), + encoding_file=dict(argstr="--datain=%s", extensions=None, mandatory=True), + environ=dict(nohash=True, usedefault=True), + in_files=dict(argstr="--imain=%s", mandatory=True, sep=","), + in_index=dict(argstr="--inindex=%s", sep=","), in_topup_fieldcoef=dict( argstr="--topup=%s", copyfile=False, @@ -35,16 +17,10 @@ def test_ApplyTOPUP_inputs(): requires=["in_topup_movpar"], ), in_topup_movpar=dict( - copyfile=False, - extensions=None, - requires=["in_topup_fieldcoef"], - ), - interp=dict( - argstr="--interp=%s", - ), - method=dict( - argstr="--method=%s", + copyfile=False, extensions=None, requires=["in_topup_fieldcoef"] ), + interp=dict(argstr="--interp=%s"), + method=dict(argstr="--method=%s"), out_corrected=dict( argstr="--out=%s", extensions=None, @@ -61,11 +37,7 @@ def test_ApplyTOPUP_inputs(): def test_ApplyTOPUP_outputs(): - output_map = dict( - out_corrected=dict( - extensions=None, - ), - ) + output_map = dict(out_corrected=dict(extensions=None)) outputs = ApplyTOPUP.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py index 1eaf3eb2e7..ec6d5fe07a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py @@ -4,38 +4,14 @@ def test_ApplyWarp_inputs(): input_map = dict( - abswarp=dict( - argstr="--abs", - xor=["relwarp"], - ), - args=dict( - argstr="%s", - ), - datatype=dict( - argstr="--datatype=%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - field_file=dict( - argstr="--warp=%s", - extensions=None, - ), - in_file=dict( - argstr="--in=%s", - extensions=None, - mandatory=True, - position=0, - ), - interp=dict( - argstr="--interp=%s", - position=-2, - ), - mask_file=dict( - argstr="--mask=%s", - extensions=None, - ), + abswarp=dict(argstr="--abs", xor=["relwarp"]), + args=dict(argstr="%s"), + datatype=dict(argstr="--datatype=%s"), + environ=dict(nohash=True, usedefault=True), + field_file=dict(argstr="--warp=%s", extensions=None), + in_file=dict(argstr="--in=%s", extensions=None, mandatory=True, position=0), + interp=dict(argstr="--interp=%s", position=-2), + mask_file=dict(argstr="--mask=%s", extensions=None), out_file=dict( argstr="--out=%s", extensions=None, @@ -44,31 +20,12 @@ def test_ApplyWarp_inputs(): position=2, ), output_type=dict(), - postmat=dict( - argstr="--postmat=%s", - extensions=None, - ), - premat=dict( - argstr="--premat=%s", - extensions=None, - ), - ref_file=dict( - argstr="--ref=%s", - extensions=None, - mandatory=True, - position=1, - ), - relwarp=dict( - argstr="--rel", - position=-1, - xor=["abswarp"], - ), - superlevel=dict( - argstr="--superlevel=%s", - ), - supersample=dict( - argstr="--super", - ), + postmat=dict(argstr="--postmat=%s", extensions=None), + premat=dict(argstr="--premat=%s", extensions=None), + ref_file=dict(argstr="--ref=%s", extensions=None, mandatory=True, position=1), + relwarp=dict(argstr="--rel", position=-1, xor=["abswarp"]), + superlevel=dict(argstr="--superlevel=%s"), + supersample=dict(argstr="--super"), ) inputs = ApplyWarp.input_spec() @@ -78,11 +35,7 @@ def test_ApplyWarp_inputs(): def test_ApplyWarp_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ApplyWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py index 0fc914cdde..0a904031fd 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py @@ -4,111 +4,35 @@ def test_ApplyXFM_inputs(): input_map = dict( - angle_rep=dict( - argstr="-anglerep %s", - ), - apply_isoxfm=dict( - argstr="-applyisoxfm %f", - xor=["apply_xfm"], - ), - apply_xfm=dict( - argstr="-applyxfm", - usedefault=True, - ), - args=dict( - argstr="%s", - ), - bbrslope=dict( - argstr="-bbrslope %f", - min_ver="5.0.0", - ), - bbrtype=dict( - argstr="-bbrtype %s", - min_ver="5.0.0", - ), - bgvalue=dict( - argstr="-setbackground %f", - ), - bins=dict( - argstr="-bins %d", - ), - coarse_search=dict( - argstr="-coarsesearch %d", - units="degrees", - ), - cost=dict( - argstr="-cost %s", - ), - cost_func=dict( - argstr="-searchcost %s", - ), - datatype=dict( - argstr="-datatype %s", - ), - display_init=dict( - argstr="-displayinit", - ), - dof=dict( - argstr="-dof %d", - ), - echospacing=dict( - argstr="-echospacing %f", - min_ver="5.0.0", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fieldmap=dict( - argstr="-fieldmap %s", - extensions=None, - min_ver="5.0.0", - ), - fieldmapmask=dict( - argstr="-fieldmapmask %s", - extensions=None, - min_ver="5.0.0", - ), - fine_search=dict( - argstr="-finesearch %d", - units="degrees", - ), - force_scaling=dict( - argstr="-forcescaling", - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - position=0, - ), - in_matrix_file=dict( - argstr="-init %s", - extensions=None, - ), - in_weight=dict( - argstr="-inweight %s", - extensions=None, - ), - interp=dict( - argstr="-interp %s", - ), - min_sampling=dict( - argstr="-minsampling %f", - units="mm", - ), - no_clamp=dict( - argstr="-noclamp", - ), - no_resample=dict( - argstr="-noresample", - ), - no_resample_blur=dict( - argstr="-noresampblur", - ), - no_search=dict( - argstr="-nosearch", - ), + angle_rep=dict(argstr="-anglerep %s"), + apply_isoxfm=dict(argstr="-applyisoxfm %f", xor=["apply_xfm"]), + apply_xfm=dict(argstr="-applyxfm", usedefault=True), + args=dict(argstr="%s"), + bbrslope=dict(argstr="-bbrslope %f", min_ver="5.0.0"), + bbrtype=dict(argstr="-bbrtype %s", min_ver="5.0.0"), + bgvalue=dict(argstr="-setbackground %f"), + bins=dict(argstr="-bins %d"), + coarse_search=dict(argstr="-coarsesearch %d", units="degrees"), + cost=dict(argstr="-cost %s"), + cost_func=dict(argstr="-searchcost %s"), + datatype=dict(argstr="-datatype %s"), + display_init=dict(argstr="-displayinit"), + dof=dict(argstr="-dof %d"), + echospacing=dict(argstr="-echospacing %f", min_ver="5.0.0"), + environ=dict(nohash=True, usedefault=True), + fieldmap=dict(argstr="-fieldmap %s", extensions=None, min_ver="5.0.0"), + fieldmapmask=dict(argstr="-fieldmapmask %s", extensions=None, min_ver="5.0.0"), + fine_search=dict(argstr="-finesearch %d", units="degrees"), + force_scaling=dict(argstr="-forcescaling"), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=0), + in_matrix_file=dict(argstr="-init %s", extensions=None), + in_weight=dict(argstr="-inweight %s", extensions=None), + interp=dict(argstr="-interp %s"), + min_sampling=dict(argstr="-minsampling %f", units="mm"), + no_clamp=dict(argstr="-noclamp"), + no_resample=dict(argstr="-noresample"), + no_resample_blur=dict(argstr="-noresampblur"), + no_search=dict(argstr="-nosearch"), out_file=dict( argstr="-out %s", extensions=None, @@ -134,72 +58,23 @@ def test_ApplyXFM_inputs(): position=3, ), output_type=dict(), - padding_size=dict( - argstr="-paddingsize %d", - units="voxels", - ), - pedir=dict( - argstr="-pedir %d", - min_ver="5.0.0", - ), - ref_weight=dict( - argstr="-refweight %s", - extensions=None, - ), - reference=dict( - argstr="-ref %s", - extensions=None, - mandatory=True, - position=1, - ), - rigid2D=dict( - argstr="-2D", - ), + padding_size=dict(argstr="-paddingsize %d", units="voxels"), + pedir=dict(argstr="-pedir %d", min_ver="5.0.0"), + ref_weight=dict(argstr="-refweight %s", extensions=None), + reference=dict(argstr="-ref %s", extensions=None, mandatory=True, position=1), + rigid2D=dict(argstr="-2D"), save_log=dict(), - schedule=dict( - argstr="-schedule %s", - extensions=None, - ), - searchr_x=dict( - argstr="-searchrx %s", - units="degrees", - ), - searchr_y=dict( - argstr="-searchry %s", - units="degrees", - ), - searchr_z=dict( - argstr="-searchrz %s", - units="degrees", - ), - sinc_width=dict( - argstr="-sincwidth %d", - units="voxels", - ), - sinc_window=dict( - argstr="-sincwindow %s", - ), - uses_qform=dict( - argstr="-usesqform", - ), - verbose=dict( - argstr="-verbose %d", - ), - wm_seg=dict( - argstr="-wmseg %s", - extensions=None, - min_ver="5.0.0", - ), - wmcoords=dict( - argstr="-wmcoords %s", - extensions=None, - min_ver="5.0.0", - ), - wmnorms=dict( - argstr="-wmnorms %s", - extensions=None, - min_ver="5.0.0", - ), + schedule=dict(argstr="-schedule %s", extensions=None), + searchr_x=dict(argstr="-searchrx %s", units="degrees"), + searchr_y=dict(argstr="-searchry %s", units="degrees"), + searchr_z=dict(argstr="-searchrz %s", units="degrees"), + sinc_width=dict(argstr="-sincwidth %d", units="voxels"), + sinc_window=dict(argstr="-sincwindow %s"), + uses_qform=dict(argstr="-usesqform"), + verbose=dict(argstr="-verbose %d"), + wm_seg=dict(argstr="-wmseg %s", extensions=None, min_ver="5.0.0"), + wmcoords=dict(argstr="-wmcoords %s", extensions=None, min_ver="5.0.0"), + wmnorms=dict(argstr="-wmnorms %s", extensions=None, min_ver="5.0.0"), ) inputs = ApplyXFM.input_spec() @@ -210,15 +85,9 @@ def test_ApplyXFM_inputs(): def test_ApplyXFM_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), - out_log=dict( - extensions=None, - ), - out_matrix_file=dict( - extensions=None, - ), + out_file=dict(extensions=None), + out_log=dict(extensions=None), + out_matrix_file=dict(extensions=None), ) outputs = ApplyXFM.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_AvScale.py b/nipype/interfaces/fsl/tests/test_auto_AvScale.py index 4748d6a9bc..32ed206773 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AvScale.py +++ b/nipype/interfaces/fsl/tests/test_auto_AvScale.py @@ -4,26 +4,11 @@ def test_AvScale_inputs(): input_map = dict( - all_param=dict( - argstr="--allparams", - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - mat_file=dict( - argstr="%s", - extensions=None, - position=-2, - ), - ref_file=dict( - argstr="%s", - extensions=None, - position=-1, - ), + all_param=dict(argstr="--allparams"), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + mat_file=dict(argstr="%s", extensions=None, position=-2), + ref_file=dict(argstr="%s", extensions=None, position=-1), ) inputs = AvScale.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_B0Calc.py b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py index 01727179ff..07a086d23a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_B0Calc.py +++ b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py @@ -4,39 +4,14 @@ def test_B0Calc_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - chi_air=dict( - argstr="--chi0=%e", - usedefault=True, - ), - compute_xyz=dict( - argstr="--xyz", - usedefault=True, - ), - delta=dict( - argstr="-d %e", - usedefault=True, - ), - directconv=dict( - argstr="--directconv", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - extendboundary=dict( - argstr="--extendboundary=%0.2f", - usedefault=True, - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - position=0, - ), + args=dict(argstr="%s"), + chi_air=dict(argstr="--chi0=%e", usedefault=True), + compute_xyz=dict(argstr="--xyz", usedefault=True), + delta=dict(argstr="-d %e", usedefault=True), + directconv=dict(argstr="--directconv", usedefault=True), + environ=dict(nohash=True, usedefault=True), + extendboundary=dict(argstr="--extendboundary=%0.2f", usedefault=True), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0), out_file=dict( argstr="-o %s", extensions=None, @@ -46,37 +21,15 @@ def test_B0Calc_inputs(): position=1, ), output_type=dict(), - x_b0=dict( - argstr="--b0x=%0.2f", - usedefault=True, - xor=["xyz_b0"], - ), - x_grad=dict( - argstr="--gx=%0.4f", - usedefault=True, - ), + x_b0=dict(argstr="--b0x=%0.2f", usedefault=True, xor=["xyz_b0"]), + x_grad=dict(argstr="--gx=%0.4f", usedefault=True), xyz_b0=dict( - argstr="--b0x=%0.2f --b0y=%0.2f --b0=%0.2f", - xor=["x_b0", "y_b0", "z_b0"], - ), - y_b0=dict( - argstr="--b0y=%0.2f", - usedefault=True, - xor=["xyz_b0"], - ), - y_grad=dict( - argstr="--gy=%0.4f", - usedefault=True, - ), - z_b0=dict( - argstr="--b0=%0.2f", - usedefault=True, - xor=["xyz_b0"], - ), - z_grad=dict( - argstr="--gz=%0.4f", - usedefault=True, + argstr="--b0x=%0.2f --b0y=%0.2f --b0=%0.2f", xor=["x_b0", "y_b0", "z_b0"] ), + y_b0=dict(argstr="--b0y=%0.2f", usedefault=True, xor=["xyz_b0"]), + y_grad=dict(argstr="--gy=%0.4f", usedefault=True), + z_b0=dict(argstr="--b0=%0.2f", usedefault=True, xor=["xyz_b0"]), + z_grad=dict(argstr="--gz=%0.4f", usedefault=True), ) inputs = B0Calc.input_spec() @@ -86,11 +39,7 @@ def test_B0Calc_inputs(): def test_B0Calc_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = B0Calc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py index 2424307165..7b7b0680f1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py +++ b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py @@ -4,114 +4,37 @@ def test_BEDPOSTX5_inputs(): input_map = dict( - all_ard=dict( - argstr="--allard", - xor=("no_ard", "all_ard"), - ), - args=dict( - argstr="%s", - ), - burn_in=dict( - argstr="-b %d", - usedefault=True, - ), - burn_in_no_ard=dict( - argstr="--burnin_noard=%d", - usedefault=True, - ), - bvals=dict( - extensions=None, - mandatory=True, - ), - bvecs=dict( - extensions=None, - mandatory=True, - ), - cnlinear=dict( - argstr="--cnonlinear", - xor=("no_spat", "non_linear", "cnlinear"), - ), - dwi=dict( - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - f0_ard=dict( - argstr="--f0 --ardf0", - xor=["f0_noard", "f0_ard", "all_ard"], - ), - f0_noard=dict( - argstr="--f0", - xor=["f0_noard", "f0_ard"], - ), - force_dir=dict( - argstr="--forcedir", - usedefault=True, - ), - fudge=dict( - argstr="-w %d", - ), - grad_dev=dict( - extensions=None, - ), - gradnonlin=dict( - argstr="-g", - ), - logdir=dict( - argstr="--logdir=%s", - ), - mask=dict( - extensions=None, - mandatory=True, - ), - model=dict( - argstr="-model %d", - ), - n_fibres=dict( - argstr="-n %d", - mandatory=True, - usedefault=True, - ), - n_jumps=dict( - argstr="-j %d", - usedefault=True, - ), - no_ard=dict( - argstr="--noard", - xor=("no_ard", "all_ard"), - ), - no_spat=dict( - argstr="--nospat", - xor=("no_spat", "non_linear", "cnlinear"), - ), + all_ard=dict(argstr="--allard", xor=("no_ard", "all_ard")), + args=dict(argstr="%s"), + burn_in=dict(argstr="-b %d", usedefault=True), + burn_in_no_ard=dict(argstr="--burnin_noard=%d", usedefault=True), + bvals=dict(extensions=None, mandatory=True), + bvecs=dict(extensions=None, mandatory=True), + cnlinear=dict(argstr="--cnonlinear", xor=("no_spat", "non_linear", "cnlinear")), + dwi=dict(extensions=None, mandatory=True), + environ=dict(nohash=True, usedefault=True), + f0_ard=dict(argstr="--f0 --ardf0", xor=["f0_noard", "f0_ard", "all_ard"]), + f0_noard=dict(argstr="--f0", xor=["f0_noard", "f0_ard"]), + force_dir=dict(argstr="--forcedir", usedefault=True), + fudge=dict(argstr="-w %d"), + grad_dev=dict(extensions=None), + gradnonlin=dict(argstr="-g"), + logdir=dict(argstr="--logdir=%s"), + mask=dict(extensions=None, mandatory=True), + model=dict(argstr="-model %d"), + n_fibres=dict(argstr="-n %d", mandatory=True, usedefault=True), + n_jumps=dict(argstr="-j %d", usedefault=True), + no_ard=dict(argstr="--noard", xor=("no_ard", "all_ard")), + no_spat=dict(argstr="--nospat", xor=("no_spat", "non_linear", "cnlinear")), non_linear=dict( - argstr="--nonlinear", - xor=("no_spat", "non_linear", "cnlinear"), - ), - out_dir=dict( - argstr="%s", - mandatory=True, - position=1, - usedefault=True, + argstr="--nonlinear", xor=("no_spat", "non_linear", "cnlinear") ), + out_dir=dict(argstr="%s", mandatory=True, position=1, usedefault=True), output_type=dict(), - rician=dict( - argstr="--rician", - ), - sample_every=dict( - argstr="-s %d", - usedefault=True, - ), - seed=dict( - argstr="--seed=%d", - ), - update_proposal_every=dict( - argstr="--updateproposalevery=%d", - usedefault=True, - ), + rician=dict(argstr="--rician"), + sample_every=dict(argstr="-s %d", usedefault=True), + seed=dict(argstr="--seed=%d"), + update_proposal_every=dict(argstr="--updateproposalevery=%d", usedefault=True), use_gpu=dict(), ) inputs = BEDPOSTX5.input_spec() @@ -125,12 +48,8 @@ def test_BEDPOSTX5_outputs(): output_map = dict( dyads=dict(), dyads_dispersion=dict(), - mean_S0samples=dict( - extensions=None, - ), - mean_dsamples=dict( - extensions=None, - ), + mean_S0samples=dict(extensions=None), + mean_dsamples=dict(extensions=None), mean_fsamples=dict(), mean_phsamples=dict(), mean_thsamples=dict(), diff --git a/nipype/interfaces/fsl/tests/test_auto_BET.py b/nipype/interfaces/fsl/tests/test_auto_BET.py index 82757a10a6..d64186249d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BET.py +++ b/nipype/interfaces/fsl/tests/test_auto_BET.py @@ -4,20 +4,10 @@ def test_BET_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - center=dict( - argstr="-c %s", - units="voxels", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - frac=dict( - argstr="-f %.2f", - ), + args=dict(argstr="%s"), + center=dict(argstr="-c %s", units="voxels"), + environ=dict(nohash=True, usedefault=True), + frac=dict(argstr="-f %.2f"), functional=dict( argstr="-F", xor=( @@ -31,31 +21,15 @@ def test_BET_inputs(): ), ), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=0, - ), - mask=dict( - argstr="-m", - ), - mesh=dict( - argstr="-e", - ), - no_output=dict( - argstr="-n", + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0 ), + mask=dict(argstr="-m"), + mesh=dict(argstr="-e"), + no_output=dict(argstr="-n"), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=1, - ), - outline=dict( - argstr="-o", + argstr="%s", extensions=None, genfile=True, hash_files=False, position=1 ), + outline=dict(argstr="-o"), output_type=dict(), padding=dict( argstr="-Z", @@ -69,10 +43,7 @@ def test_BET_inputs(): "t2_guided", ), ), - radius=dict( - argstr="-r %d", - units="mm", - ), + radius=dict(argstr="-r %d", units="mm"), reduce_bias=dict( argstr="-B", xor=( @@ -109,9 +80,7 @@ def test_BET_inputs(): "t2_guided", ), ), - skull=dict( - argstr="-s", - ), + skull=dict(argstr="-s"), surfaces=dict( argstr="-A", xor=( @@ -137,12 +106,8 @@ def test_BET_inputs(): "t2_guided", ), ), - threshold=dict( - argstr="-t", - ), - vertical_gradient=dict( - argstr="-g %.2f", - ), + threshold=dict(argstr="-t"), + vertical_gradient=dict(argstr="-g %.2f"), ) inputs = BET.input_spec() @@ -153,42 +118,18 @@ def test_BET_inputs(): def test_BET_outputs(): output_map = dict( - inskull_mask_file=dict( - extensions=None, - ), - inskull_mesh_file=dict( - extensions=None, - ), - mask_file=dict( - extensions=None, - ), - meshfile=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - outline_file=dict( - extensions=None, - ), - outskin_mask_file=dict( - extensions=None, - ), - outskin_mesh_file=dict( - extensions=None, - ), - outskull_mask_file=dict( - extensions=None, - ), - outskull_mesh_file=dict( - extensions=None, - ), - skull_file=dict( - extensions=None, - ), - skull_mask_file=dict( - extensions=None, - ), + inskull_mask_file=dict(extensions=None), + inskull_mesh_file=dict(extensions=None), + mask_file=dict(extensions=None), + meshfile=dict(extensions=None), + out_file=dict(extensions=None), + outline_file=dict(extensions=None), + outskin_mask_file=dict(extensions=None), + outskin_mesh_file=dict(extensions=None), + outskull_mask_file=dict(extensions=None), + outskull_mesh_file=dict(extensions=None), + skull_file=dict(extensions=None), + skull_mask_file=dict(extensions=None), ) outputs = BET.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py index 405cd592f6..b0febff205 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py @@ -4,27 +4,11 @@ def test_BinaryMaths_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + nan2zeros=dict(argstr="-nan", position=3), operand_file=dict( argstr="%s", extensions=None, @@ -33,27 +17,13 @@ def test_BinaryMaths_inputs(): xor=["operand_value"], ), operand_value=dict( - argstr="%.8f", - mandatory=True, - position=5, - xor=["operand_file"], - ), - operation=dict( - argstr="-%s", - mandatory=True, - position=4, + argstr="%.8f", mandatory=True, position=5, xor=["operand_file"] ), + operation=dict(argstr="-%s", mandatory=True, position=4), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = BinaryMaths.input_spec() @@ -64,11 +34,7 @@ def test_BinaryMaths_inputs(): def test_BinaryMaths_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = BinaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py index 61f27be950..8ef6b770cc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py +++ b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py @@ -4,39 +4,15 @@ def test_ChangeDataType_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + nan2zeros=dict(argstr="-nan", position=3), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - mandatory=True, - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", mandatory=True, position=-1), output_type=dict(), ) inputs = ChangeDataType.input_spec() @@ -47,11 +23,7 @@ def test_ChangeDataType_inputs(): def test_ChangeDataType_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ChangeDataType.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Classifier.py b/nipype/interfaces/fsl/tests/test_auto_Classifier.py index 44fde8ae1a..3cb4616892 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Classifier.py +++ b/nipype/interfaces/fsl/tests/test_auto_Classifier.py @@ -4,32 +4,13 @@ def test_Classifier_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - artifacts_list_file=dict( - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - mel_ica=dict( - argstr="%s", - copyfile=False, - position=1, - ), - thresh=dict( - argstr="%d", - mandatory=True, - position=-1, - ), + args=dict(argstr="%s"), + artifacts_list_file=dict(extensions=None), + environ=dict(nohash=True, usedefault=True), + mel_ica=dict(argstr="%s", copyfile=False, position=1), + thresh=dict(argstr="%d", mandatory=True, position=-1), trained_wts_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=2, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=2 ), ) inputs = Classifier.input_spec() @@ -40,11 +21,7 @@ def test_Classifier_inputs(): def test_Classifier_outputs(): - output_map = dict( - artifacts_list_file=dict( - extensions=None, - ), - ) + output_map = dict(artifacts_list_file=dict(extensions=None)) outputs = Classifier.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py index 55e6851603..d23dd308a7 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py @@ -4,47 +4,17 @@ def test_Cleaner_inputs(): input_map = dict( - aggressive=dict( - argstr="-A", - position=3, - ), - args=dict( - argstr="%s", - ), + aggressive=dict(argstr="-A", position=3), + args=dict(argstr="%s"), artifacts_list_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - cleanup_motion=dict( - argstr="-m", - position=2, - ), - confound_file=dict( - argstr="-x %s", - extensions=None, - position=4, - ), - confound_file_1=dict( - argstr="-x %s", - extensions=None, - position=5, - ), - confound_file_2=dict( - argstr="-x %s", - extensions=None, - position=6, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - highpass=dict( - argstr="-m -h %f", - position=2, - usedefault=True, - ), + argstr="%s", extensions=None, mandatory=True, position=1 + ), + cleanup_motion=dict(argstr="-m", position=2), + confound_file=dict(argstr="-x %s", extensions=None, position=4), + confound_file_1=dict(argstr="-x %s", extensions=None, position=5), + confound_file_2=dict(argstr="-x %s", extensions=None, position=6), + environ=dict(nohash=True, usedefault=True), + highpass=dict(argstr="-m -h %f", position=2, usedefault=True), ) inputs = Cleaner.input_spec() @@ -54,11 +24,7 @@ def test_Cleaner_inputs(): def test_Cleaner_outputs(): - output_map = dict( - cleaned_functional_file=dict( - extensions=None, - ), - ) + output_map = dict(cleaned_functional_file=dict(extensions=None)) outputs = Cleaner.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Cluster.py b/nipype/interfaces/fsl/tests/test_auto_Cluster.py index ba4dfc8ae3..3f1dda4725 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cluster.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cluster.py @@ -4,110 +4,34 @@ def test_Cluster_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - connectivity=dict( - argstr="--connectivity=%d", - ), - cope_file=dict( - argstr="--cope=%s", - extensions=None, - ), - dlh=dict( - argstr="--dlh=%.10f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - find_min=dict( - argstr="--min", - usedefault=True, - ), - fractional=dict( - argstr="--fractional", - usedefault=True, - ), - in_file=dict( - argstr="--in=%s", - extensions=None, - mandatory=True, - ), - minclustersize=dict( - argstr="--minclustersize", - usedefault=True, - ), - no_table=dict( - argstr="--no_table", - usedefault=True, - ), - num_maxima=dict( - argstr="--num=%d", - ), - out_index_file=dict( - argstr="--oindex=%s", - hash_files=False, - ), - out_localmax_txt_file=dict( - argstr="--olmax=%s", - hash_files=False, - ), - out_localmax_vol_file=dict( - argstr="--olmaxim=%s", - hash_files=False, - ), - out_max_file=dict( - argstr="--omax=%s", - hash_files=False, - ), - out_mean_file=dict( - argstr="--omean=%s", - hash_files=False, - ), - out_pval_file=dict( - argstr="--opvals=%s", - hash_files=False, - ), - out_size_file=dict( - argstr="--osize=%s", - hash_files=False, - ), - out_threshold_file=dict( - argstr="--othresh=%s", - hash_files=False, - ), + args=dict(argstr="%s"), + connectivity=dict(argstr="--connectivity=%d"), + cope_file=dict(argstr="--cope=%s", extensions=None), + dlh=dict(argstr="--dlh=%.10f"), + environ=dict(nohash=True, usedefault=True), + find_min=dict(argstr="--min", usedefault=True), + fractional=dict(argstr="--fractional", usedefault=True), + in_file=dict(argstr="--in=%s", extensions=None, mandatory=True), + minclustersize=dict(argstr="--minclustersize", usedefault=True), + no_table=dict(argstr="--no_table", usedefault=True), + num_maxima=dict(argstr="--num=%d"), + out_index_file=dict(argstr="--oindex=%s", hash_files=False), + out_localmax_txt_file=dict(argstr="--olmax=%s", hash_files=False), + out_localmax_vol_file=dict(argstr="--olmaxim=%s", hash_files=False), + out_max_file=dict(argstr="--omax=%s", hash_files=False), + out_mean_file=dict(argstr="--omean=%s", hash_files=False), + out_pval_file=dict(argstr="--opvals=%s", hash_files=False), + out_size_file=dict(argstr="--osize=%s", hash_files=False), + out_threshold_file=dict(argstr="--othresh=%s", hash_files=False), output_type=dict(), - peak_distance=dict( - argstr="--peakdist=%.10f", - ), - pthreshold=dict( - argstr="--pthresh=%.10f", - requires=["dlh", "volume"], - ), - std_space_file=dict( - argstr="--stdvol=%s", - extensions=None, - ), - threshold=dict( - argstr="--thresh=%.10f", - mandatory=True, - ), - use_mm=dict( - argstr="--mm", - usedefault=True, - ), - volume=dict( - argstr="--volume=%d", - ), - warpfield_file=dict( - argstr="--warpvol=%s", - extensions=None, - ), - xfm_file=dict( - argstr="--xfm=%s", - extensions=None, - ), + peak_distance=dict(argstr="--peakdist=%.10f"), + pthreshold=dict(argstr="--pthresh=%.10f", requires=["dlh", "volume"]), + std_space_file=dict(argstr="--stdvol=%s", extensions=None), + threshold=dict(argstr="--thresh=%.10f", mandatory=True), + use_mm=dict(argstr="--mm", usedefault=True), + volume=dict(argstr="--volume=%d"), + warpfield_file=dict(argstr="--warpvol=%s", extensions=None), + xfm_file=dict(argstr="--xfm=%s", extensions=None), ) inputs = Cluster.input_spec() @@ -118,30 +42,14 @@ def test_Cluster_inputs(): def test_Cluster_outputs(): output_map = dict( - index_file=dict( - extensions=None, - ), - localmax_txt_file=dict( - extensions=None, - ), - localmax_vol_file=dict( - extensions=None, - ), - max_file=dict( - extensions=None, - ), - mean_file=dict( - extensions=None, - ), - pval_file=dict( - extensions=None, - ), - size_file=dict( - extensions=None, - ), - threshold_file=dict( - extensions=None, - ), + index_file=dict(extensions=None), + localmax_txt_file=dict(extensions=None), + localmax_vol_file=dict(extensions=None), + max_file=dict(extensions=None), + mean_file=dict(extensions=None), + pval_file=dict(extensions=None), + size_file=dict(extensions=None), + threshold_file=dict(extensions=None), ) outputs = Cluster.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Complex.py b/nipype/interfaces/fsl/tests/test_auto_Complex.py index 80d6f20fe6..b20e878bb9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Complex.py +++ b/nipype/interfaces/fsl/tests/test_auto_Complex.py @@ -4,9 +4,7 @@ def test_Complex_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), complex_cartesian=dict( argstr="-complex", position=1, @@ -19,16 +17,8 @@ def test_Complex_inputs(): "complex_merge", ], ), - complex_in_file=dict( - argstr="%s", - extensions=None, - position=2, - ), - complex_in_file2=dict( - argstr="%s", - extensions=None, - position=3, - ), + complex_in_file=dict(argstr="%s", extensions=None, position=2), + complex_in_file2=dict(argstr="%s", extensions=None, position=3), complex_merge=dict( argstr="-complexmerge", position=1, @@ -82,19 +72,9 @@ def test_Complex_inputs(): "complex_merge", ], ), - end_vol=dict( - argstr="%d", - position=-1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - imaginary_in_file=dict( - argstr="%s", - extensions=None, - position=3, - ), + end_vol=dict(argstr="%d", position=-1), + environ=dict(nohash=True, usedefault=True), + imaginary_in_file=dict(argstr="%s", extensions=None, position=3), imaginary_out_file=dict( argstr="%s", extensions=None, @@ -111,11 +91,7 @@ def test_Complex_inputs(): "complex_merge", ], ), - magnitude_in_file=dict( - argstr="%s", - extensions=None, - position=2, - ), + magnitude_in_file=dict(argstr="%s", extensions=None, position=2), magnitude_out_file=dict( argstr="%s", extensions=None, @@ -133,11 +109,7 @@ def test_Complex_inputs(): ], ), output_type=dict(), - phase_in_file=dict( - argstr="%s", - extensions=None, - position=3, - ), + phase_in_file=dict(argstr="%s", extensions=None, position=3), phase_out_file=dict( argstr="%s", extensions=None, @@ -166,11 +138,7 @@ def test_Complex_inputs(): "complex_merge", ], ), - real_in_file=dict( - argstr="%s", - extensions=None, - position=2, - ), + real_in_file=dict(argstr="%s", extensions=None, position=2), real_out_file=dict( argstr="%s", extensions=None, @@ -199,10 +167,7 @@ def test_Complex_inputs(): "complex_merge", ], ), - start_vol=dict( - argstr="%d", - position=-2, - ), + start_vol=dict(argstr="%d", position=-2), ) inputs = Complex.input_spec() @@ -213,21 +178,11 @@ def test_Complex_inputs(): def test_Complex_outputs(): output_map = dict( - complex_out_file=dict( - extensions=None, - ), - imaginary_out_file=dict( - extensions=None, - ), - magnitude_out_file=dict( - extensions=None, - ), - phase_out_file=dict( - extensions=None, - ), - real_out_file=dict( - extensions=None, - ), + complex_out_file=dict(extensions=None), + imaginary_out_file=dict(extensions=None), + magnitude_out_file=dict(extensions=None), + phase_out_file=dict(extensions=None), + real_out_file=dict(extensions=None), ) outputs = Complex.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py index 35e5bd43a7..966a030b42 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py +++ b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py @@ -4,53 +4,19 @@ def test_ContrastMgr_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - contrast_num=dict( - argstr="-cope", - ), - corrections=dict( - copyfile=False, - extensions=None, - mandatory=True, - ), - dof_file=dict( - argstr="", - copyfile=False, - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fcon_file=dict( - argstr="-f %s", - extensions=None, - ), + args=dict(argstr="%s"), + contrast_num=dict(argstr="-cope"), + corrections=dict(copyfile=False, extensions=None, mandatory=True), + dof_file=dict(argstr="", copyfile=False, extensions=None, mandatory=True), + environ=dict(nohash=True, usedefault=True), + fcon_file=dict(argstr="-f %s", extensions=None), output_type=dict(), - param_estimates=dict( - argstr="", - copyfile=False, - mandatory=True, - ), + param_estimates=dict(argstr="", copyfile=False, mandatory=True), sigmasquareds=dict( - argstr="", - copyfile=False, - extensions=None, - mandatory=True, - position=-2, - ), - suffix=dict( - argstr="-suffix %s", - ), - tcon_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, + argstr="", copyfile=False, extensions=None, mandatory=True, position=-2 ), + suffix=dict(argstr="-suffix %s"), + tcon_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), ) inputs = ContrastMgr.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py index 7ae7f7471b..a7538b87bc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py @@ -4,34 +4,14 @@ def test_ConvertWarp_inputs(): input_map = dict( - abswarp=dict( - argstr="--abs", - xor=["relwarp"], - ), - args=dict( - argstr="%s", - ), - cons_jacobian=dict( - argstr="--constrainj", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - jacobian_max=dict( - argstr="--jmax=%f", - ), - jacobian_min=dict( - argstr="--jmin=%f", - ), - midmat=dict( - argstr="--midmat=%s", - extensions=None, - ), - out_abswarp=dict( - argstr="--absout", - xor=["out_relwarp"], - ), + abswarp=dict(argstr="--abs", xor=["relwarp"]), + args=dict(argstr="%s"), + cons_jacobian=dict(argstr="--constrainj"), + environ=dict(nohash=True, usedefault=True), + jacobian_max=dict(argstr="--jmax=%f"), + jacobian_min=dict(argstr="--jmin=%f"), + midmat=dict(argstr="--midmat=%s", extensions=None), + out_abswarp=dict(argstr="--absout", xor=["out_relwarp"]), out_file=dict( argstr="--out=%s", extensions=None, @@ -40,45 +20,16 @@ def test_ConvertWarp_inputs(): output_name="out_file", position=-1, ), - out_relwarp=dict( - argstr="--relout", - xor=["out_abswarp"], - ), + out_relwarp=dict(argstr="--relout", xor=["out_abswarp"]), output_type=dict(), - postmat=dict( - argstr="--postmat=%s", - extensions=None, - ), - premat=dict( - argstr="--premat=%s", - extensions=None, - ), - reference=dict( - argstr="--ref=%s", - extensions=None, - mandatory=True, - position=1, - ), - relwarp=dict( - argstr="--rel", - xor=["abswarp"], - ), - shift_direction=dict( - argstr="--shiftdir=%s", - requires=["shift_in_file"], - ), - shift_in_file=dict( - argstr="--shiftmap=%s", - extensions=None, - ), - warp1=dict( - argstr="--warp1=%s", - extensions=None, - ), - warp2=dict( - argstr="--warp2=%s", - extensions=None, - ), + postmat=dict(argstr="--postmat=%s", extensions=None), + premat=dict(argstr="--premat=%s", extensions=None), + reference=dict(argstr="--ref=%s", extensions=None, mandatory=True, position=1), + relwarp=dict(argstr="--rel", xor=["abswarp"]), + shift_direction=dict(argstr="--shiftdir=%s", requires=["shift_in_file"]), + shift_in_file=dict(argstr="--shiftmap=%s", extensions=None), + warp1=dict(argstr="--warp1=%s", extensions=None), + warp2=dict(argstr="--warp2=%s", extensions=None), ) inputs = ConvertWarp.input_spec() @@ -88,11 +39,7 @@ def test_ConvertWarp_inputs(): def test_ConvertWarp_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ConvertWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py index 60e1d7553b..dff16762b9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py @@ -4,36 +4,22 @@ def test_ConvertXFM_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), concat_xfm=dict( argstr="-concat", position=-3, requires=["in_file2"], xor=["invert_xfm", "concat_xfm", "fix_scale_skew"], ), - environ=dict( - nohash=True, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True), fix_scale_skew=dict( argstr="-fixscaleskew", position=-3, requires=["in_file2"], xor=["invert_xfm", "concat_xfm", "fix_scale_skew"], ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), - in_file2=dict( - argstr="%s", - extensions=None, - position=-2, - ), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + in_file2=dict(argstr="%s", extensions=None, position=-2), invert_xfm=dict( argstr="-inverse", position=-3, @@ -56,11 +42,7 @@ def test_ConvertXFM_inputs(): def test_ConvertXFM_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ConvertXFM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py index 6f15d47bfc..783b0d0752 100644 --- a/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py +++ b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py @@ -4,9 +4,7 @@ def test_CopyGeom_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), dest_file=dict( argstr="%s", copyfile=True, @@ -17,20 +15,9 @@ def test_CopyGeom_inputs(): output_name="out_file", position=1, ), - environ=dict( - nohash=True, - usedefault=True, - ), - ignore_dims=dict( - argstr="-d", - position="-1", - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), + environ=dict(nohash=True, usedefault=True), + ignore_dims=dict(argstr="-d", position="-1"), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), output_type=dict(), ) inputs = CopyGeom.input_spec() @@ -41,11 +28,7 @@ def test_CopyGeom_inputs(): def test_CopyGeom_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = CopyGeom.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_DTIFit.py b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py index 546ffa6848..745fdaeba6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py @@ -4,78 +4,25 @@ def test_DTIFit_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - base_name=dict( - argstr="-o %s", - position=1, - usedefault=True, - ), - bvals=dict( - argstr="-b %s", - extensions=None, - mandatory=True, - position=4, - ), - bvecs=dict( - argstr="-r %s", - extensions=None, - mandatory=True, - position=3, - ), - cni=dict( - argstr="--cni=%s", - extensions=None, - ), - dwi=dict( - argstr="-k %s", - extensions=None, - mandatory=True, - position=0, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gradnonlin=dict( - argstr="--gradnonlin=%s", - extensions=None, - ), - little_bit=dict( - argstr="--littlebit", - ), - mask=dict( - argstr="-m %s", - extensions=None, - mandatory=True, - position=2, - ), - max_x=dict( - argstr="-X %d", - ), - max_y=dict( - argstr="-Y %d", - ), - max_z=dict( - argstr="-Z %d", - ), - min_x=dict( - argstr="-x %d", - ), - min_y=dict( - argstr="-y %d", - ), - min_z=dict( - argstr="-z %d", - ), + args=dict(argstr="%s"), + base_name=dict(argstr="-o %s", position=1, usedefault=True), + bvals=dict(argstr="-b %s", extensions=None, mandatory=True, position=4), + bvecs=dict(argstr="-r %s", extensions=None, mandatory=True, position=3), + cni=dict(argstr="--cni=%s", extensions=None), + dwi=dict(argstr="-k %s", extensions=None, mandatory=True, position=0), + environ=dict(nohash=True, usedefault=True), + gradnonlin=dict(argstr="--gradnonlin=%s", extensions=None), + little_bit=dict(argstr="--littlebit"), + mask=dict(argstr="-m %s", extensions=None, mandatory=True, position=2), + max_x=dict(argstr="-X %d"), + max_y=dict(argstr="-Y %d"), + max_z=dict(argstr="-Z %d"), + min_x=dict(argstr="-x %d"), + min_y=dict(argstr="-y %d"), + min_z=dict(argstr="-z %d"), output_type=dict(), - save_tensor=dict( - argstr="--save_tensor", - ), - sse=dict( - argstr="--sse", - ), + save_tensor=dict(argstr="--save_tensor"), + sse=dict(argstr="--sse"), ) inputs = DTIFit.input_spec() @@ -86,42 +33,18 @@ def test_DTIFit_inputs(): def test_DTIFit_outputs(): output_map = dict( - FA=dict( - extensions=None, - ), - L1=dict( - extensions=None, - ), - L2=dict( - extensions=None, - ), - L3=dict( - extensions=None, - ), - MD=dict( - extensions=None, - ), - MO=dict( - extensions=None, - ), - S0=dict( - extensions=None, - ), - V1=dict( - extensions=None, - ), - V2=dict( - extensions=None, - ), - V3=dict( - extensions=None, - ), - sse=dict( - extensions=None, - ), - tensor=dict( - extensions=None, - ), + FA=dict(extensions=None), + L1=dict(extensions=None), + L2=dict(extensions=None), + L3=dict(extensions=None), + MD=dict(extensions=None), + MO=dict(extensions=None), + S0=dict(extensions=None), + V1=dict(extensions=None), + V2=dict(extensions=None), + V3=dict(extensions=None), + sse=dict(extensions=None), + tensor=dict(extensions=None), ) outputs = DTIFit.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_DilateImage.py b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py index ac204e5a38..867e5b2466 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DilateImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py @@ -4,58 +4,19 @@ def test_DilateImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - kernel_file=dict( - argstr="%s", - extensions=None, - position=5, - xor=["kernel_size"], - ), - kernel_shape=dict( - argstr="-kernel %s", - position=4, - ), - kernel_size=dict( - argstr="%.4f", - position=5, - xor=["kernel_file"], - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), - operation=dict( - argstr="-dil%s", - mandatory=True, - position=6, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + kernel_file=dict(argstr="%s", extensions=None, position=5, xor=["kernel_size"]), + kernel_shape=dict(argstr="-kernel %s", position=4), + kernel_size=dict(argstr="%.4f", position=5, xor=["kernel_file"]), + nan2zeros=dict(argstr="-nan", position=3), + operation=dict(argstr="-dil%s", mandatory=True, position=6), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = DilateImage.input_spec() @@ -66,11 +27,7 @@ def test_DilateImage_inputs(): def test_DilateImage_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = DilateImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py index 74ea024917..db9891a2f0 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py +++ b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py @@ -4,35 +4,15 @@ def test_DistanceMap_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), distance_map=dict( - argstr="--out=%s", - extensions=None, - genfile=True, - hash_files=False, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="--in=%s", - extensions=None, - mandatory=True, - ), - invert_input=dict( - argstr="--invert", - ), - local_max_file=dict( - argstr="--localmax=%s", - hash_files=False, - ), - mask_file=dict( - argstr="--mask=%s", - extensions=None, + argstr="--out=%s", extensions=None, genfile=True, hash_files=False ), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="--in=%s", extensions=None, mandatory=True), + invert_input=dict(argstr="--invert"), + local_max_file=dict(argstr="--localmax=%s", hash_files=False), + mask_file=dict(argstr="--mask=%s", extensions=None), output_type=dict(), ) inputs = DistanceMap.input_spec() @@ -44,12 +24,7 @@ def test_DistanceMap_inputs(): def test_DistanceMap_outputs(): output_map = dict( - distance_map=dict( - extensions=None, - ), - local_max_file=dict( - extensions=None, - ), + distance_map=dict(extensions=None), local_max_file=dict(extensions=None) ) outputs = DistanceMap.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_DualRegression.py b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py index 4b3d8f6851..8b71777e8b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DualRegression.py +++ b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py @@ -4,55 +4,16 @@ def test_DualRegression_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - con_file=dict( - argstr="%s", - extensions=None, - position=4, - ), - des_norm=dict( - argstr="%i", - position=2, - usedefault=True, - ), - design_file=dict( - argstr="%s", - extensions=None, - position=3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - group_IC_maps_4D=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - in_files=dict( - argstr="%s", - mandatory=True, - position=-1, - sep=" ", - ), - n_perm=dict( - argstr="%i", - mandatory=True, - position=5, - ), - one_sample_group_mean=dict( - argstr="-1", - position=3, - ), - out_dir=dict( - argstr="%s", - genfile=True, - position=6, - usedefault=True, - ), + args=dict(argstr="%s"), + con_file=dict(argstr="%s", extensions=None, position=4), + des_norm=dict(argstr="%i", position=2, usedefault=True), + design_file=dict(argstr="%s", extensions=None, position=3), + environ=dict(nohash=True, usedefault=True), + group_IC_maps_4D=dict(argstr="%s", extensions=None, mandatory=True, position=1), + in_files=dict(argstr="%s", mandatory=True, position=-1, sep=" "), + n_perm=dict(argstr="%i", mandatory=True, position=5), + one_sample_group_mean=dict(argstr="-1", position=3), + out_dir=dict(argstr="%s", genfile=True, position=6, usedefault=True), output_type=dict(), ) inputs = DualRegression.input_spec() @@ -63,9 +24,7 @@ def test_DualRegression_inputs(): def test_DualRegression_outputs(): - output_map = dict( - out_dir=dict(), - ) + output_map = dict(out_dir=dict()) outputs = DualRegression.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py index 0462fa9cbe..a398476793 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py @@ -4,68 +4,22 @@ def test_EPIDeWarp_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - cleanup=dict( - argstr="--cleanup", - ), - dph_file=dict( - argstr="--dph %s", - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - epi_file=dict( - argstr="--epi %s", - extensions=None, - ), - epidw=dict( - argstr="--epidw %s", - genfile=False, - ), - esp=dict( - argstr="--esp %s", - usedefault=True, - ), - exf_file=dict( - argstr="--exf %s", - extensions=None, - ), - exfdw=dict( - argstr="--exfdw %s", - genfile=True, - ), - mag_file=dict( - argstr="--mag %s", - extensions=None, - mandatory=True, - position=0, - ), - nocleanup=dict( - argstr="--nocleanup", - usedefault=True, - ), + args=dict(argstr="%s"), + cleanup=dict(argstr="--cleanup"), + dph_file=dict(argstr="--dph %s", extensions=None, mandatory=True), + environ=dict(nohash=True, usedefault=True), + epi_file=dict(argstr="--epi %s", extensions=None), + epidw=dict(argstr="--epidw %s", genfile=False), + esp=dict(argstr="--esp %s", usedefault=True), + exf_file=dict(argstr="--exf %s", extensions=None), + exfdw=dict(argstr="--exfdw %s", genfile=True), + mag_file=dict(argstr="--mag %s", extensions=None, mandatory=True, position=0), + nocleanup=dict(argstr="--nocleanup", usedefault=True), output_type=dict(), - sigma=dict( - argstr="--sigma %s", - usedefault=True, - ), - tediff=dict( - argstr="--tediff %s", - usedefault=True, - ), - tmpdir=dict( - argstr="--tmpdir %s", - genfile=True, - ), - vsm=dict( - argstr="--vsm %s", - genfile=True, - ), + sigma=dict(argstr="--sigma %s", usedefault=True), + tediff=dict(argstr="--tediff %s", usedefault=True), + tmpdir=dict(argstr="--tmpdir %s", genfile=True), + vsm=dict(argstr="--vsm %s", genfile=True), ) inputs = EPIDeWarp.input_spec() @@ -76,18 +30,10 @@ def test_EPIDeWarp_inputs(): def test_EPIDeWarp_outputs(): output_map = dict( - exf_mask=dict( - extensions=None, - ), - exfdw=dict( - extensions=None, - ), - unwarped_file=dict( - extensions=None, - ), - vsm_file=dict( - extensions=None, - ), + exf_mask=dict(extensions=None), + exfdw=dict(extensions=None), + unwarped_file=dict(extensions=None), + vsm_file=dict(extensions=None), ) outputs = EPIDeWarp.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Eddy.py b/nipype/interfaces/fsl/tests/test_auto_Eddy.py index 0005085474..ae2f013f47 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Eddy.py +++ b/nipype/interfaces/fsl/tests/test_auto_Eddy.py @@ -4,99 +4,33 @@ def test_Eddy_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - cnr_maps=dict( - argstr="--cnr_maps", - min_ver="5.0.10", - ), - dont_peas=dict( - argstr="--dont_peas", - ), - dont_sep_offs_move=dict( - argstr="--dont_sep_offs_move", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + cnr_maps=dict(argstr="--cnr_maps", min_ver="5.0.10"), + dont_peas=dict(argstr="--dont_peas"), + dont_sep_offs_move=dict(argstr="--dont_sep_offs_move"), + environ=dict(nohash=True, usedefault=True), estimate_move_by_susceptibility=dict( - argstr="--estimate_move_by_susceptibility", - min_ver="6.0.1", - ), - fep=dict( - argstr="--fep", - ), - field=dict( - argstr="--field=%s", - extensions=None, - ), - field_mat=dict( - argstr="--field_mat=%s", - extensions=None, - ), - flm=dict( - argstr="--flm=%s", - usedefault=True, - ), - fudge_factor=dict( - argstr="--ff=%s", - usedefault=True, - ), - fwhm=dict( - argstr="--fwhm=%s", - ), - in_acqp=dict( - argstr="--acqp=%s", - extensions=None, - mandatory=True, - ), - in_bval=dict( - argstr="--bvals=%s", - extensions=None, - mandatory=True, - ), - in_bvec=dict( - argstr="--bvecs=%s", - extensions=None, - mandatory=True, - ), - in_file=dict( - argstr="--imain=%s", - extensions=None, - mandatory=True, - ), - in_index=dict( - argstr="--index=%s", - extensions=None, - mandatory=True, - ), - in_mask=dict( - argstr="--mask=%s", - extensions=None, - mandatory=True, - ), + argstr="--estimate_move_by_susceptibility", min_ver="6.0.1" + ), + fep=dict(argstr="--fep"), + field=dict(argstr="--field=%s", extensions=None), + field_mat=dict(argstr="--field_mat=%s", extensions=None), + flm=dict(argstr="--flm=%s", usedefault=True), + fudge_factor=dict(argstr="--ff=%s", usedefault=True), + fwhm=dict(argstr="--fwhm=%s"), + in_acqp=dict(argstr="--acqp=%s", extensions=None, mandatory=True), + in_bval=dict(argstr="--bvals=%s", extensions=None, mandatory=True), + in_bvec=dict(argstr="--bvecs=%s", extensions=None, mandatory=True), + in_file=dict(argstr="--imain=%s", extensions=None, mandatory=True), + in_index=dict(argstr="--index=%s", extensions=None, mandatory=True), + in_mask=dict(argstr="--mask=%s", extensions=None, mandatory=True), in_topup_fieldcoef=dict( - argstr="--topup=%s", - extensions=None, - requires=["in_topup_movpar"], - ), - in_topup_movpar=dict( - extensions=None, - requires=["in_topup_fieldcoef"], - ), - initrand=dict( - argstr="--initrand", - min_ver="5.0.10", - ), - interp=dict( - argstr="--interp=%s", - usedefault=True, - ), - is_shelled=dict( - argstr="--data_is_shelled", + argstr="--topup=%s", extensions=None, requires=["in_topup_movpar"] ), + in_topup_movpar=dict(extensions=None, requires=["in_topup_fieldcoef"]), + initrand=dict(argstr="--initrand", min_ver="5.0.10"), + interp=dict(argstr="--interp=%s", usedefault=True), + is_shelled=dict(argstr="--data_is_shelled"), json=dict( argstr="--json=%s", min_ver="6.0.1", @@ -118,102 +52,38 @@ def test_Eddy_inputs(): min_ver="6.0.1", requires=["estimate_move_by_susceptibility"], ), - method=dict( - argstr="--resamp=%s", - usedefault=True, - ), - mporder=dict( - argstr="--mporder=%s", - min_ver="5.0.11", - requires=["use_cuda"], - ), - multiband_factor=dict( - argstr="--mb=%s", - min_ver="5.0.10", - ), + method=dict(argstr="--resamp=%s", usedefault=True), + mporder=dict(argstr="--mporder=%s", min_ver="5.0.11", requires=["use_cuda"]), + multiband_factor=dict(argstr="--mb=%s", min_ver="5.0.10"), multiband_offset=dict( - argstr="--mb_offs=%d", - min_ver="5.0.10", - requires=["multiband_factor"], - ), - niter=dict( - argstr="--niter=%s", - usedefault=True, - ), - num_threads=dict( - nohash=True, - usedefault=True, - ), - nvoxhp=dict( - argstr="--nvoxhp=%s", - usedefault=True, - ), - out_base=dict( - argstr="--out=%s", - usedefault=True, - ), - outlier_nstd=dict( - argstr="--ol_nstd", - min_ver="5.0.10", - requires=["repol"], - ), - outlier_nvox=dict( - argstr="--ol_nvox", - min_ver="5.0.10", - requires=["repol"], - ), - outlier_pos=dict( - argstr="--ol_pos", - min_ver="5.0.10", - requires=["repol"], - ), - outlier_sqr=dict( - argstr="--ol_sqr", - min_ver="5.0.10", - requires=["repol"], - ), - outlier_type=dict( - argstr="--ol_type", - min_ver="5.0.10", - requires=["repol"], - ), + argstr="--mb_offs=%d", min_ver="5.0.10", requires=["multiband_factor"] + ), + niter=dict(argstr="--niter=%s", usedefault=True), + num_threads=dict(nohash=True, usedefault=True), + nvoxhp=dict(argstr="--nvoxhp=%s", usedefault=True), + out_base=dict(argstr="--out=%s", usedefault=True), + outlier_nstd=dict(argstr="--ol_nstd", min_ver="5.0.10", requires=["repol"]), + outlier_nvox=dict(argstr="--ol_nvox", min_ver="5.0.10", requires=["repol"]), + outlier_pos=dict(argstr="--ol_pos", min_ver="5.0.10", requires=["repol"]), + outlier_sqr=dict(argstr="--ol_sqr", min_ver="5.0.10", requires=["repol"]), + outlier_type=dict(argstr="--ol_type", min_ver="5.0.10", requires=["repol"]), output_type=dict(), - repol=dict( - argstr="--repol", - ), - residuals=dict( - argstr="--residuals", - min_ver="5.0.10", - ), - session=dict( - argstr="--session=%s", - extensions=None, - ), + repol=dict(argstr="--repol"), + residuals=dict(argstr="--residuals", min_ver="5.0.10"), + session=dict(argstr="--session=%s", extensions=None), slice2vol_interp=dict( - argstr="--s2v_interp=%s", - min_ver="5.0.11", - requires=["mporder"], + argstr="--s2v_interp=%s", min_ver="5.0.11", requires=["mporder"] ), slice2vol_lambda=dict( - argstr="--s2v_lambda=%d", - min_ver="5.0.11", - requires=["mporder"], + argstr="--s2v_lambda=%d", min_ver="5.0.11", requires=["mporder"] ), slice2vol_niter=dict( - argstr="--s2v_niter=%d", - min_ver="5.0.11", - requires=["mporder"], + argstr="--s2v_niter=%d", min_ver="5.0.11", requires=["mporder"] ), slice_order=dict( - argstr="--slspec=%s", - min_ver="5.0.11", - requires=["mporder"], - xor=["json"], - ), - slm=dict( - argstr="--slm=%s", - usedefault=True, + argstr="--slspec=%s", min_ver="5.0.11", requires=["mporder"], xor=["json"] ), + slm=dict(argstr="--slm=%s", usedefault=True), use_cuda=dict(), ) inputs = Eddy.input_spec() @@ -225,51 +95,21 @@ def test_Eddy_inputs(): def test_Eddy_outputs(): output_map = dict( - out_cnr_maps=dict( - extensions=None, - ), - out_corrected=dict( - extensions=None, - ), - out_movement_over_time=dict( - extensions=None, - ), - out_movement_rms=dict( - extensions=None, - ), - out_outlier_free=dict( - extensions=None, - ), - out_outlier_map=dict( - extensions=None, - ), - out_outlier_n_sqr_stdev_map=dict( - extensions=None, - ), - out_outlier_n_stdev_map=dict( - extensions=None, - ), - out_outlier_report=dict( - extensions=None, - ), - out_parameter=dict( - extensions=None, - ), - out_residuals=dict( - extensions=None, - ), - out_restricted_movement_rms=dict( - extensions=None, - ), - out_rotated_bvecs=dict( - extensions=None, - ), - out_shell_alignment_parameters=dict( - extensions=None, - ), - out_shell_pe_translation_parameters=dict( - extensions=None, - ), + out_cnr_maps=dict(extensions=None), + out_corrected=dict(extensions=None), + out_movement_over_time=dict(extensions=None), + out_movement_rms=dict(extensions=None), + out_outlier_free=dict(extensions=None), + out_outlier_map=dict(extensions=None), + out_outlier_n_sqr_stdev_map=dict(extensions=None), + out_outlier_n_stdev_map=dict(extensions=None), + out_outlier_report=dict(extensions=None), + out_parameter=dict(extensions=None), + out_residuals=dict(extensions=None), + out_restricted_movement_rms=dict(extensions=None), + out_rotated_bvecs=dict(extensions=None), + out_shell_alignment_parameters=dict(extensions=None), + out_shell_pe_translation_parameters=dict(extensions=None), ) outputs = Eddy.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py index e2ce1c0a3b..4d63c36b51 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py +++ b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py @@ -4,19 +4,9 @@ def test_EddyCorrect_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), out_file=dict( argstr="%s", extensions=None, @@ -26,12 +16,7 @@ def test_EddyCorrect_inputs(): position=1, ), output_type=dict(), - ref_num=dict( - argstr="%d", - mandatory=True, - position=2, - usedefault=True, - ), + ref_num=dict(argstr="%d", mandatory=True, position=2, usedefault=True), ) inputs = EddyCorrect.input_spec() @@ -41,11 +26,7 @@ def test_EddyCorrect_inputs(): def test_EddyCorrect_outputs(): - output_map = dict( - eddy_corrected=dict( - extensions=None, - ), - ) + output_map = dict(eddy_corrected=dict(extensions=None)) outputs = EddyCorrect.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py index 3d9756a4be..3b96a2ad6e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py +++ b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py @@ -4,59 +4,21 @@ def test_EddyQuad_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - base_name=dict( - argstr="%s", - position=0, - usedefault=True, - ), - bval_file=dict( - argstr="--bvals %s", - extensions=None, - mandatory=True, - ), - bvec_file=dict( - argstr="--bvecs %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - field=dict( - argstr="--field %s", - extensions=None, - ), - idx_file=dict( - argstr="--eddyIdx %s", - extensions=None, - mandatory=True, - ), - mask_file=dict( - argstr="--mask %s", - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + base_name=dict(argstr="%s", position=0, usedefault=True), + bval_file=dict(argstr="--bvals %s", extensions=None, mandatory=True), + bvec_file=dict(argstr="--bvecs %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + field=dict(argstr="--field %s", extensions=None), + idx_file=dict(argstr="--eddyIdx %s", extensions=None, mandatory=True), + mask_file=dict(argstr="--mask %s", extensions=None, mandatory=True), output_dir=dict( - argstr="--output-dir %s", - name_source=["base_name"], - name_template="%s.qc", + argstr="--output-dir %s", name_source=["base_name"], name_template="%s.qc" ), output_type=dict(), - param_file=dict( - argstr="--eddyParams %s", - extensions=None, - mandatory=True, - ), - slice_spec=dict( - argstr="--slspec %s", - extensions=None, - ), - verbose=dict( - argstr="--verbose", - ), + param_file=dict(argstr="--eddyParams %s", extensions=None, mandatory=True), + slice_spec=dict(argstr="--slspec %s", extensions=None), + verbose=dict(argstr="--verbose"), ) inputs = EddyQuad.input_spec() @@ -69,22 +31,12 @@ def test_EddyQuad_outputs(): output_map = dict( avg_b0_pe_png=dict(), avg_b_png=dict(), - clean_volumes=dict( - extensions=None, - ), + clean_volumes=dict(extensions=None), cnr_png=dict(), - qc_json=dict( - extensions=None, - ), - qc_pdf=dict( - extensions=None, - ), - residuals=dict( - extensions=None, - ), - vdm_png=dict( - extensions=None, - ), + qc_json=dict(extensions=None), + qc_pdf=dict(extensions=None), + residuals=dict(extensions=None), + vdm_png=dict(extensions=None), ) outputs = EddyQuad.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py index 242c2e6040..2341c9229d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py @@ -4,70 +4,24 @@ def test_EpiReg_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - echospacing=dict( - argstr="--echospacing=%f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - epi=dict( - argstr="--epi=%s", - extensions=None, - mandatory=True, - position=-4, - ), - fmap=dict( - argstr="--fmap=%s", - extensions=None, - ), - fmapmag=dict( - argstr="--fmapmag=%s", - extensions=None, - ), - fmapmagbrain=dict( - argstr="--fmapmagbrain=%s", - extensions=None, - ), - no_clean=dict( - argstr="--noclean", - usedefault=True, - ), - no_fmapreg=dict( - argstr="--nofmapreg", - ), - out_base=dict( - argstr="--out=%s", - position=-1, - usedefault=True, - ), + args=dict(argstr="%s"), + echospacing=dict(argstr="--echospacing=%f"), + environ=dict(nohash=True, usedefault=True), + epi=dict(argstr="--epi=%s", extensions=None, mandatory=True, position=-4), + fmap=dict(argstr="--fmap=%s", extensions=None), + fmapmag=dict(argstr="--fmapmag=%s", extensions=None), + fmapmagbrain=dict(argstr="--fmapmagbrain=%s", extensions=None), + no_clean=dict(argstr="--noclean", usedefault=True), + no_fmapreg=dict(argstr="--nofmapreg"), + out_base=dict(argstr="--out=%s", position=-1, usedefault=True), output_type=dict(), - pedir=dict( - argstr="--pedir=%s", - ), + pedir=dict(argstr="--pedir=%s"), t1_brain=dict( - argstr="--t1brain=%s", - extensions=None, - mandatory=True, - position=-2, - ), - t1_head=dict( - argstr="--t1=%s", - extensions=None, - mandatory=True, - position=-3, - ), - weight_image=dict( - argstr="--weight=%s", - extensions=None, - ), - wmseg=dict( - argstr="--wmseg=%s", - extensions=None, + argstr="--t1brain=%s", extensions=None, mandatory=True, position=-2 ), + t1_head=dict(argstr="--t1=%s", extensions=None, mandatory=True, position=-3), + weight_image=dict(argstr="--weight=%s", extensions=None), + wmseg=dict(argstr="--wmseg=%s", extensions=None), ) inputs = EpiReg.input_spec() @@ -78,48 +32,20 @@ def test_EpiReg_inputs(): def test_EpiReg_outputs(): output_map = dict( - epi2str_inv=dict( - extensions=None, - ), - epi2str_mat=dict( - extensions=None, - ), - fmap2epi_mat=dict( - extensions=None, - ), - fmap2str_mat=dict( - extensions=None, - ), - fmap_epi=dict( - extensions=None, - ), - fmap_str=dict( - extensions=None, - ), - fmapmag_str=dict( - extensions=None, - ), - fullwarp=dict( - extensions=None, - ), - out_1vol=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - seg=dict( - extensions=None, - ), - shiftmap=dict( - extensions=None, - ), - wmedge=dict( - extensions=None, - ), - wmseg=dict( - extensions=None, - ), + epi2str_inv=dict(extensions=None), + epi2str_mat=dict(extensions=None), + fmap2epi_mat=dict(extensions=None), + fmap2str_mat=dict(extensions=None), + fmap_epi=dict(extensions=None), + fmap_str=dict(extensions=None), + fmapmag_str=dict(extensions=None), + fullwarp=dict(extensions=None), + out_1vol=dict(extensions=None), + out_file=dict(extensions=None), + seg=dict(extensions=None), + shiftmap=dict(extensions=None), + wmedge=dict(extensions=None), + wmseg=dict(extensions=None), ) outputs = EpiReg.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py index 1aad31cd16..a4f066c297 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py @@ -4,58 +4,19 @@ def test_ErodeImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - kernel_file=dict( - argstr="%s", - extensions=None, - position=5, - xor=["kernel_size"], - ), - kernel_shape=dict( - argstr="-kernel %s", - position=4, - ), - kernel_size=dict( - argstr="%.4f", - position=5, - xor=["kernel_file"], - ), - minimum_filter=dict( - argstr="%s", - position=6, - usedefault=True, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + kernel_file=dict(argstr="%s", extensions=None, position=5, xor=["kernel_size"]), + kernel_shape=dict(argstr="-kernel %s", position=4), + kernel_size=dict(argstr="%.4f", position=5, xor=["kernel_file"]), + minimum_filter=dict(argstr="%s", position=6, usedefault=True), + nan2zeros=dict(argstr="-nan", position=3), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = ErodeImage.input_spec() @@ -66,11 +27,7 @@ def test_ErodeImage_inputs(): def test_ErodeImage_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ErodeImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py index bd6acb137c..cfb7523757 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py +++ b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py @@ -4,9 +4,7 @@ def test_ExtractROI_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), crop_list=dict( argstr="%s", position=2, @@ -21,56 +19,20 @@ def test_ExtractROI_inputs(): "t_size", ], ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), output_type=dict(), roi_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=1, - ), - t_min=dict( - argstr="%d", - position=8, - ), - t_size=dict( - argstr="%d", - position=9, - ), - x_min=dict( - argstr="%d", - position=2, - ), - x_size=dict( - argstr="%d", - position=3, - ), - y_min=dict( - argstr="%d", - position=4, - ), - y_size=dict( - argstr="%d", - position=5, - ), - z_min=dict( - argstr="%d", - position=6, - ), - z_size=dict( - argstr="%d", - position=7, - ), + argstr="%s", extensions=None, genfile=True, hash_files=False, position=1 + ), + t_min=dict(argstr="%d", position=8), + t_size=dict(argstr="%d", position=9), + x_min=dict(argstr="%d", position=2), + x_size=dict(argstr="%d", position=3), + y_min=dict(argstr="%d", position=4), + y_size=dict(argstr="%d", position=5), + z_min=dict(argstr="%d", position=6), + z_size=dict(argstr="%d", position=7), ) inputs = ExtractROI.input_spec() @@ -80,11 +42,7 @@ def test_ExtractROI_inputs(): def test_ExtractROI_outputs(): - output_map = dict( - roi_file=dict( - extensions=None, - ), - ) + output_map = dict(roi_file=dict(extensions=None)) outputs = ExtractROI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FAST.py b/nipype/interfaces/fsl/tests/test_auto_FAST.py index e775d97b35..0be8b88bf6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FAST.py +++ b/nipype/interfaces/fsl/tests/test_auto_FAST.py @@ -4,87 +4,31 @@ def test_FAST_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bias_iters=dict( - argstr="-I %d", - ), - bias_lowpass=dict( - argstr="-l %d", - units="mm", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hyper=dict( - argstr="-H %.2f", - ), - img_type=dict( - argstr="-t %d", - ), - in_files=dict( - argstr="%s", - copyfile=False, - mandatory=True, - position=-1, - ), - init_seg_smooth=dict( - argstr="-f %.3f", - ), - init_transform=dict( - argstr="-a %s", - extensions=None, - ), - iters_afterbias=dict( - argstr="-O %d", - ), - manual_seg=dict( - argstr="-s %s", - extensions=None, - ), - mixel_smooth=dict( - argstr="-R %.2f", - ), - no_bias=dict( - argstr="-N", - ), - no_pve=dict( - argstr="--nopve", - ), - number_classes=dict( - argstr="-n %d", - ), - other_priors=dict( - argstr="-A %s", - ), - out_basename=dict( - argstr="-o %s", - extensions=None, - ), - output_biascorrected=dict( - argstr="-B", - ), - output_biasfield=dict( - argstr="-b", - ), + args=dict(argstr="%s"), + bias_iters=dict(argstr="-I %d"), + bias_lowpass=dict(argstr="-l %d", units="mm"), + environ=dict(nohash=True, usedefault=True), + hyper=dict(argstr="-H %.2f"), + img_type=dict(argstr="-t %d"), + in_files=dict(argstr="%s", copyfile=False, mandatory=True, position=-1), + init_seg_smooth=dict(argstr="-f %.3f"), + init_transform=dict(argstr="-a %s", extensions=None), + iters_afterbias=dict(argstr="-O %d"), + manual_seg=dict(argstr="-s %s", extensions=None), + mixel_smooth=dict(argstr="-R %.2f"), + no_bias=dict(argstr="-N"), + no_pve=dict(argstr="--nopve"), + number_classes=dict(argstr="-n %d"), + other_priors=dict(argstr="-A %s"), + out_basename=dict(argstr="-o %s", extensions=None), + output_biascorrected=dict(argstr="-B"), + output_biasfield=dict(argstr="-b"), output_type=dict(), - probability_maps=dict( - argstr="-p", - ), - segment_iters=dict( - argstr="-W %d", - ), - segments=dict( - argstr="-g", - ), - use_priors=dict( - argstr="-P", - ), - verbose=dict( - argstr="-v", - ), + probability_maps=dict(argstr="-p"), + segment_iters=dict(argstr="-W %d"), + segments=dict(argstr="-g"), + use_priors=dict(argstr="-P"), + verbose=dict(argstr="-v"), ) inputs = FAST.input_spec() @@ -96,19 +40,13 @@ def test_FAST_inputs(): def test_FAST_outputs(): output_map = dict( bias_field=dict(), - mixeltype=dict( - extensions=None, - ), + mixeltype=dict(extensions=None), partial_volume_files=dict(), - partial_volume_map=dict( - extensions=None, - ), + partial_volume_map=dict(extensions=None), probability_maps=dict(), restored_image=dict(), tissue_class_files=dict(), - tissue_class_map=dict( - extensions=None, - ), + tissue_class_map=dict(extensions=None), ) outputs = FAST.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FEAT.py b/nipype/interfaces/fsl/tests/test_auto_FEAT.py index b363dd290f..4521b9d55c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEAT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEAT.py @@ -4,19 +4,9 @@ def test_FEAT_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fsf_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fsf_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), output_type=dict(), ) inputs = FEAT.input_spec() @@ -27,9 +17,7 @@ def test_FEAT_inputs(): def test_FEAT_outputs(): - output_map = dict( - feat_dir=dict(), - ) + output_map = dict(feat_dir=dict()) outputs = FEAT.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATModel.py b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py index 0e6c2f9e33..304cd029e0 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEATModel.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py @@ -4,25 +4,11 @@ def test_FEATModel_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ev_files=dict( - argstr="%s", - copyfile=False, - mandatory=True, - position=1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + ev_files=dict(argstr="%s", copyfile=False, mandatory=True, position=1), fsf_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=0, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0 ), output_type=dict(), ) @@ -35,21 +21,11 @@ def test_FEATModel_inputs(): def test_FEATModel_outputs(): output_map = dict( - con_file=dict( - extensions=None, - ), - design_cov=dict( - extensions=None, - ), - design_file=dict( - extensions=None, - ), - design_image=dict( - extensions=None, - ), - fcon_file=dict( - extensions=None, - ), + con_file=dict(extensions=None), + design_cov=dict(extensions=None), + design_file=dict(extensions=None), + design_image=dict(extensions=None), + fcon_file=dict(extensions=None), ) outputs = FEATModel.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py index fe09c468ec..bb8637703c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py @@ -4,16 +4,9 @@ def test_FEATRegister_inputs(): input_map = dict( - feat_dirs=dict( - mandatory=True, - ), - reg_dof=dict( - usedefault=True, - ), - reg_image=dict( - extensions=None, - mandatory=True, - ), + feat_dirs=dict(mandatory=True), + reg_dof=dict(usedefault=True), + reg_image=dict(extensions=None, mandatory=True), ) inputs = FEATRegister.input_spec() @@ -23,11 +16,7 @@ def test_FEATRegister_inputs(): def test_FEATRegister_outputs(): - output_map = dict( - fsf_file=dict( - extensions=None, - ), - ) + output_map = dict(fsf_file=dict(extensions=None)) outputs = FEATRegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FIRST.py b/nipype/interfaces/fsl/tests/test_auto_FIRST.py index 42ba79e799..d3ff1c9c26 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FIRST.py +++ b/nipype/interfaces/fsl/tests/test_auto_FIRST.py @@ -4,48 +4,22 @@ def test_FIRST_inputs(): input_map = dict( - affine_file=dict( - argstr="-a %s", - extensions=None, - position=6, - ), - args=dict( - argstr="%s", - ), - brain_extracted=dict( - argstr="-b", - position=2, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + affine_file=dict(argstr="-a %s", extensions=None, position=6), + args=dict(argstr="%s"), + brain_extracted=dict(argstr="-b", position=2), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="-i %s", - copyfile=False, - extensions=None, - mandatory=True, - position=-2, - ), - list_of_specific_structures=dict( - argstr="-s %s", - position=5, - sep=",", + argstr="-i %s", copyfile=False, extensions=None, mandatory=True, position=-2 ), + list_of_specific_structures=dict(argstr="-s %s", position=5, sep=","), method=dict( argstr="-m %s", position=4, usedefault=True, xor=["method_as_numerical_threshold"], ), - method_as_numerical_threshold=dict( - argstr="-m %.4f", - position=4, - ), - no_cleanup=dict( - argstr="-d", - position=3, - ), + method_as_numerical_threshold=dict(argstr="-m %.4f", position=4), + no_cleanup=dict(argstr="-d", position=3), out_file=dict( argstr="-o %s", extensions=None, @@ -55,10 +29,7 @@ def test_FIRST_inputs(): usedefault=True, ), output_type=dict(), - verbose=dict( - argstr="-v", - position=1, - ), + verbose=dict(argstr="-v", position=1), ) inputs = FIRST.input_spec() @@ -70,12 +41,8 @@ def test_FIRST_inputs(): def test_FIRST_outputs(): output_map = dict( bvars=dict(), - original_segmentations=dict( - extensions=None, - ), - segmentation_file=dict( - extensions=None, - ), + original_segmentations=dict(extensions=None), + segmentation_file=dict(extensions=None), vtk_surfaces=dict(), ) outputs = FIRST.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py index f25b225d6e..465f322dc2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py +++ b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py @@ -4,83 +4,29 @@ def test_FLAMEO_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - burnin=dict( - argstr="--burnin=%d", - ), - cope_file=dict( - argstr="--copefile=%s", - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + burnin=dict(argstr="--burnin=%d"), + cope_file=dict(argstr="--copefile=%s", extensions=None, mandatory=True), cov_split_file=dict( - argstr="--covsplitfile=%s", - extensions=None, - mandatory=True, - ), - design_file=dict( - argstr="--designfile=%s", - extensions=None, - mandatory=True, - ), - dof_var_cope_file=dict( - argstr="--dofvarcopefile=%s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - f_con_file=dict( - argstr="--fcontrastsfile=%s", - extensions=None, - ), - fix_mean=dict( - argstr="--fixmean", - ), - infer_outliers=dict( - argstr="--inferoutliers", - ), - log_dir=dict( - argstr="--ld=%s", - usedefault=True, - ), - mask_file=dict( - argstr="--maskfile=%s", - extensions=None, - mandatory=True, - ), - n_jumps=dict( - argstr="--njumps=%d", - ), - no_pe_outputs=dict( - argstr="--nopeoutput", - ), - outlier_iter=dict( - argstr="--ioni=%d", - ), + argstr="--covsplitfile=%s", extensions=None, mandatory=True + ), + design_file=dict(argstr="--designfile=%s", extensions=None, mandatory=True), + dof_var_cope_file=dict(argstr="--dofvarcopefile=%s", extensions=None), + environ=dict(nohash=True, usedefault=True), + f_con_file=dict(argstr="--fcontrastsfile=%s", extensions=None), + fix_mean=dict(argstr="--fixmean"), + infer_outliers=dict(argstr="--inferoutliers"), + log_dir=dict(argstr="--ld=%s", usedefault=True), + mask_file=dict(argstr="--maskfile=%s", extensions=None, mandatory=True), + n_jumps=dict(argstr="--njumps=%d"), + no_pe_outputs=dict(argstr="--nopeoutput"), + outlier_iter=dict(argstr="--ioni=%d"), output_type=dict(), - run_mode=dict( - argstr="--runmode=%s", - mandatory=True, - ), - sample_every=dict( - argstr="--sampleevery=%d", - ), - sigma_dofs=dict( - argstr="--sigma_dofs=%d", - ), - t_con_file=dict( - argstr="--tcontrastsfile=%s", - extensions=None, - mandatory=True, - ), - var_cope_file=dict( - argstr="--varcopefile=%s", - extensions=None, - ), + run_mode=dict(argstr="--runmode=%s", mandatory=True), + sample_every=dict(argstr="--sampleevery=%d"), + sigma_dofs=dict(argstr="--sigma_dofs=%d"), + t_con_file=dict(argstr="--tcontrastsfile=%s", extensions=None, mandatory=True), + var_cope_file=dict(argstr="--varcopefile=%s", extensions=None), ) inputs = FLAMEO.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FLIRT.py b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py index a9bdc38477..91f9d890f9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FLIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py @@ -4,110 +4,35 @@ def test_FLIRT_inputs(): input_map = dict( - angle_rep=dict( - argstr="-anglerep %s", - ), - apply_isoxfm=dict( - argstr="-applyisoxfm %f", - xor=["apply_xfm"], - ), - apply_xfm=dict( - argstr="-applyxfm", - ), - args=dict( - argstr="%s", - ), - bbrslope=dict( - argstr="-bbrslope %f", - min_ver="5.0.0", - ), - bbrtype=dict( - argstr="-bbrtype %s", - min_ver="5.0.0", - ), - bgvalue=dict( - argstr="-setbackground %f", - ), - bins=dict( - argstr="-bins %d", - ), - coarse_search=dict( - argstr="-coarsesearch %d", - units="degrees", - ), - cost=dict( - argstr="-cost %s", - ), - cost_func=dict( - argstr="-searchcost %s", - ), - datatype=dict( - argstr="-datatype %s", - ), - display_init=dict( - argstr="-displayinit", - ), - dof=dict( - argstr="-dof %d", - ), - echospacing=dict( - argstr="-echospacing %f", - min_ver="5.0.0", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fieldmap=dict( - argstr="-fieldmap %s", - extensions=None, - min_ver="5.0.0", - ), - fieldmapmask=dict( - argstr="-fieldmapmask %s", - extensions=None, - min_ver="5.0.0", - ), - fine_search=dict( - argstr="-finesearch %d", - units="degrees", - ), - force_scaling=dict( - argstr="-forcescaling", - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - position=0, - ), - in_matrix_file=dict( - argstr="-init %s", - extensions=None, - ), - in_weight=dict( - argstr="-inweight %s", - extensions=None, - ), - interp=dict( - argstr="-interp %s", - ), - min_sampling=dict( - argstr="-minsampling %f", - units="mm", - ), - no_clamp=dict( - argstr="-noclamp", - ), - no_resample=dict( - argstr="-noresample", - ), - no_resample_blur=dict( - argstr="-noresampblur", - ), - no_search=dict( - argstr="-nosearch", - ), + angle_rep=dict(argstr="-anglerep %s"), + apply_isoxfm=dict(argstr="-applyisoxfm %f", xor=["apply_xfm"]), + apply_xfm=dict(argstr="-applyxfm"), + args=dict(argstr="%s"), + bbrslope=dict(argstr="-bbrslope %f", min_ver="5.0.0"), + bbrtype=dict(argstr="-bbrtype %s", min_ver="5.0.0"), + bgvalue=dict(argstr="-setbackground %f"), + bins=dict(argstr="-bins %d"), + coarse_search=dict(argstr="-coarsesearch %d", units="degrees"), + cost=dict(argstr="-cost %s"), + cost_func=dict(argstr="-searchcost %s"), + datatype=dict(argstr="-datatype %s"), + display_init=dict(argstr="-displayinit"), + dof=dict(argstr="-dof %d"), + echospacing=dict(argstr="-echospacing %f", min_ver="5.0.0"), + environ=dict(nohash=True, usedefault=True), + fieldmap=dict(argstr="-fieldmap %s", extensions=None, min_ver="5.0.0"), + fieldmapmask=dict(argstr="-fieldmapmask %s", extensions=None, min_ver="5.0.0"), + fine_search=dict(argstr="-finesearch %d", units="degrees"), + force_scaling=dict(argstr="-forcescaling"), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=0), + in_matrix_file=dict(argstr="-init %s", extensions=None), + in_weight=dict(argstr="-inweight %s", extensions=None), + interp=dict(argstr="-interp %s"), + min_sampling=dict(argstr="-minsampling %f", units="mm"), + no_clamp=dict(argstr="-noclamp"), + no_resample=dict(argstr="-noresample"), + no_resample_blur=dict(argstr="-noresampblur"), + no_search=dict(argstr="-nosearch"), out_file=dict( argstr="-out %s", extensions=None, @@ -133,72 +58,23 @@ def test_FLIRT_inputs(): position=3, ), output_type=dict(), - padding_size=dict( - argstr="-paddingsize %d", - units="voxels", - ), - pedir=dict( - argstr="-pedir %d", - min_ver="5.0.0", - ), - ref_weight=dict( - argstr="-refweight %s", - extensions=None, - ), - reference=dict( - argstr="-ref %s", - extensions=None, - mandatory=True, - position=1, - ), - rigid2D=dict( - argstr="-2D", - ), + padding_size=dict(argstr="-paddingsize %d", units="voxels"), + pedir=dict(argstr="-pedir %d", min_ver="5.0.0"), + ref_weight=dict(argstr="-refweight %s", extensions=None), + reference=dict(argstr="-ref %s", extensions=None, mandatory=True, position=1), + rigid2D=dict(argstr="-2D"), save_log=dict(), - schedule=dict( - argstr="-schedule %s", - extensions=None, - ), - searchr_x=dict( - argstr="-searchrx %s", - units="degrees", - ), - searchr_y=dict( - argstr="-searchry %s", - units="degrees", - ), - searchr_z=dict( - argstr="-searchrz %s", - units="degrees", - ), - sinc_width=dict( - argstr="-sincwidth %d", - units="voxels", - ), - sinc_window=dict( - argstr="-sincwindow %s", - ), - uses_qform=dict( - argstr="-usesqform", - ), - verbose=dict( - argstr="-verbose %d", - ), - wm_seg=dict( - argstr="-wmseg %s", - extensions=None, - min_ver="5.0.0", - ), - wmcoords=dict( - argstr="-wmcoords %s", - extensions=None, - min_ver="5.0.0", - ), - wmnorms=dict( - argstr="-wmnorms %s", - extensions=None, - min_ver="5.0.0", - ), + schedule=dict(argstr="-schedule %s", extensions=None), + searchr_x=dict(argstr="-searchrx %s", units="degrees"), + searchr_y=dict(argstr="-searchry %s", units="degrees"), + searchr_z=dict(argstr="-searchrz %s", units="degrees"), + sinc_width=dict(argstr="-sincwidth %d", units="voxels"), + sinc_window=dict(argstr="-sincwindow %s"), + uses_qform=dict(argstr="-usesqform"), + verbose=dict(argstr="-verbose %d"), + wm_seg=dict(argstr="-wmseg %s", extensions=None, min_ver="5.0.0"), + wmcoords=dict(argstr="-wmcoords %s", extensions=None, min_ver="5.0.0"), + wmnorms=dict(argstr="-wmnorms %s", extensions=None, min_ver="5.0.0"), ) inputs = FLIRT.input_spec() @@ -209,15 +85,9 @@ def test_FLIRT_inputs(): def test_FLIRT_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), - out_log=dict( - extensions=None, - ), - out_matrix_file=dict( - extensions=None, - ), + out_file=dict(extensions=None), + out_log=dict(extensions=None), + out_matrix_file=dict(extensions=None), ) outputs = FLIRT.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py index eb6ae1f714..56a4f518bb 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py @@ -4,169 +4,57 @@ def test_FNIRT_inputs(): input_map = dict( - affine_file=dict( - argstr="--aff=%s", - extensions=None, - ), - apply_inmask=dict( - argstr="--applyinmask=%s", - sep=",", - xor=["skip_inmask"], - ), + affine_file=dict(argstr="--aff=%s", extensions=None), + apply_inmask=dict(argstr="--applyinmask=%s", sep=",", xor=["skip_inmask"]), apply_intensity_mapping=dict( - argstr="--estint=%s", - sep=",", - xor=["skip_intensity_mapping"], - ), - apply_refmask=dict( - argstr="--applyrefmask=%s", - sep=",", - xor=["skip_refmask"], - ), - args=dict( - argstr="%s", - ), - bias_regularization_lambda=dict( - argstr="--biaslambda=%f", - ), - biasfield_resolution=dict( - argstr="--biasres=%d,%d,%d", - ), - config_file=dict( - argstr="--config=%s", - ), - derive_from_ref=dict( - argstr="--refderiv", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - field_file=dict( - argstr="--fout=%s", - hash_files=False, - ), - fieldcoeff_file=dict( - argstr="--cout=%s", - ), - hessian_precision=dict( - argstr="--numprec=%s", - ), - in_file=dict( - argstr="--in=%s", - extensions=None, - mandatory=True, - ), - in_fwhm=dict( - argstr="--infwhm=%s", - sep=",", - ), - in_intensitymap_file=dict( - argstr="--intin=%s", - copyfile=False, - ), - inmask_file=dict( - argstr="--inmask=%s", - extensions=None, - ), - inmask_val=dict( - argstr="--impinval=%f", - ), - intensity_mapping_model=dict( - argstr="--intmod=%s", - ), - intensity_mapping_order=dict( - argstr="--intorder=%d", - ), - inwarp_file=dict( - argstr="--inwarp=%s", - extensions=None, - ), - jacobian_file=dict( - argstr="--jout=%s", - hash_files=False, - ), - jacobian_range=dict( - argstr="--jacrange=%f,%f", - ), + argstr="--estint=%s", sep=",", xor=["skip_intensity_mapping"] + ), + apply_refmask=dict(argstr="--applyrefmask=%s", sep=",", xor=["skip_refmask"]), + args=dict(argstr="%s"), + bias_regularization_lambda=dict(argstr="--biaslambda=%f"), + biasfield_resolution=dict(argstr="--biasres=%d,%d,%d"), + config_file=dict(argstr="--config=%s"), + derive_from_ref=dict(argstr="--refderiv"), + environ=dict(nohash=True, usedefault=True), + field_file=dict(argstr="--fout=%s", hash_files=False), + fieldcoeff_file=dict(argstr="--cout=%s"), + hessian_precision=dict(argstr="--numprec=%s"), + in_file=dict(argstr="--in=%s", extensions=None, mandatory=True), + in_fwhm=dict(argstr="--infwhm=%s", sep=","), + in_intensitymap_file=dict(argstr="--intin=%s", copyfile=False), + inmask_file=dict(argstr="--inmask=%s", extensions=None), + inmask_val=dict(argstr="--impinval=%f"), + intensity_mapping_model=dict(argstr="--intmod=%s"), + intensity_mapping_order=dict(argstr="--intorder=%d"), + inwarp_file=dict(argstr="--inwarp=%s", extensions=None), + jacobian_file=dict(argstr="--jout=%s", hash_files=False), + jacobian_range=dict(argstr="--jacrange=%f,%f"), log_file=dict( - argstr="--logout=%s", - extensions=None, - genfile=True, - hash_files=False, - ), - max_nonlin_iter=dict( - argstr="--miter=%s", - sep=",", - ), - modulatedref_file=dict( - argstr="--refout=%s", - hash_files=False, - ), - out_intensitymap_file=dict( - argstr="--intout=%s", - hash_files=False, + argstr="--logout=%s", extensions=None, genfile=True, hash_files=False ), + max_nonlin_iter=dict(argstr="--miter=%s", sep=","), + modulatedref_file=dict(argstr="--refout=%s", hash_files=False), + out_intensitymap_file=dict(argstr="--intout=%s", hash_files=False), output_type=dict(), - ref_file=dict( - argstr="--ref=%s", - extensions=None, - mandatory=True, - ), - ref_fwhm=dict( - argstr="--reffwhm=%s", - sep=",", - ), - refmask_file=dict( - argstr="--refmask=%s", - extensions=None, - ), - refmask_val=dict( - argstr="--imprefval=%f", - ), - regularization_lambda=dict( - argstr="--lambda=%s", - sep=",", - ), - regularization_model=dict( - argstr="--regmod=%s", - ), - skip_implicit_in_masking=dict( - argstr="--impinm=0", - ), - skip_implicit_ref_masking=dict( - argstr="--imprefm=0", - ), - skip_inmask=dict( - argstr="--applyinmask=0", - xor=["apply_inmask"], - ), + ref_file=dict(argstr="--ref=%s", extensions=None, mandatory=True), + ref_fwhm=dict(argstr="--reffwhm=%s", sep=","), + refmask_file=dict(argstr="--refmask=%s", extensions=None), + refmask_val=dict(argstr="--imprefval=%f"), + regularization_lambda=dict(argstr="--lambda=%s", sep=","), + regularization_model=dict(argstr="--regmod=%s"), + skip_implicit_in_masking=dict(argstr="--impinm=0"), + skip_implicit_ref_masking=dict(argstr="--imprefm=0"), + skip_inmask=dict(argstr="--applyinmask=0", xor=["apply_inmask"]), skip_intensity_mapping=dict( - argstr="--estint=0", - xor=["apply_intensity_mapping"], - ), - skip_lambda_ssq=dict( - argstr="--ssqlambda=0", - ), - skip_refmask=dict( - argstr="--applyrefmask=0", - xor=["apply_refmask"], - ), - spline_order=dict( - argstr="--splineorder=%d", - ), - subsampling_scheme=dict( - argstr="--subsamp=%s", - sep=",", - ), - warp_resolution=dict( - argstr="--warpres=%d,%d,%d", + argstr="--estint=0", xor=["apply_intensity_mapping"] ), + skip_lambda_ssq=dict(argstr="--ssqlambda=0"), + skip_refmask=dict(argstr="--applyrefmask=0", xor=["apply_refmask"]), + spline_order=dict(argstr="--splineorder=%d"), + subsampling_scheme=dict(argstr="--subsamp=%s", sep=","), + warp_resolution=dict(argstr="--warpres=%d,%d,%d"), warped_file=dict( - argstr="--iout=%s", - extensions=None, - genfile=True, - hash_files=False, + argstr="--iout=%s", extensions=None, genfile=True, hash_files=False ), ) inputs = FNIRT.input_spec() @@ -178,25 +66,13 @@ def test_FNIRT_inputs(): def test_FNIRT_outputs(): output_map = dict( - field_file=dict( - extensions=None, - ), - fieldcoeff_file=dict( - extensions=None, - ), - jacobian_file=dict( - extensions=None, - ), - log_file=dict( - extensions=None, - ), - modulatedref_file=dict( - extensions=None, - ), + field_file=dict(extensions=None), + fieldcoeff_file=dict(extensions=None), + jacobian_file=dict(extensions=None), + log_file=dict(extensions=None), + modulatedref_file=dict(extensions=None), out_intensitymap_file=dict(), - warped_file=dict( - extensions=None, - ), + warped_file=dict(extensions=None), ) outputs = FNIRT.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py b/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py index 1b444c381e..0fc059c207 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py @@ -4,13 +4,8 @@ def test_FSLCommand_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), output_type=dict(), ) inputs = FSLCommand.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py index 3948f3d650..5ecdc4135b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py @@ -4,107 +4,34 @@ def test_FSLXCommand_inputs(): input_map = dict( - all_ard=dict( - argstr="--allard", - xor=("no_ard", "all_ard"), - ), - args=dict( - argstr="%s", - ), - burn_in=dict( - argstr="--burnin=%d", - usedefault=True, - ), - burn_in_no_ard=dict( - argstr="--burnin_noard=%d", - usedefault=True, - ), - bvals=dict( - argstr="--bvals=%s", - extensions=None, - mandatory=True, - ), - bvecs=dict( - argstr="--bvecs=%s", - extensions=None, - mandatory=True, - ), - cnlinear=dict( - argstr="--cnonlinear", - xor=("no_spat", "non_linear", "cnlinear"), - ), - dwi=dict( - argstr="--data=%s", - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - f0_ard=dict( - argstr="--f0 --ardf0", - xor=["f0_noard", "f0_ard", "all_ard"], - ), - f0_noard=dict( - argstr="--f0", - xor=["f0_noard", "f0_ard"], - ), - force_dir=dict( - argstr="--forcedir", - usedefault=True, - ), - fudge=dict( - argstr="--fudge=%d", - ), - logdir=dict( - argstr="--logdir=%s", - usedefault=True, - ), - mask=dict( - argstr="--mask=%s", - extensions=None, - mandatory=True, - ), - model=dict( - argstr="--model=%d", - ), - n_fibres=dict( - argstr="--nfibres=%d", - mandatory=True, - usedefault=True, - ), - n_jumps=dict( - argstr="--njumps=%d", - usedefault=True, - ), - no_ard=dict( - argstr="--noard", - xor=("no_ard", "all_ard"), - ), - no_spat=dict( - argstr="--nospat", - xor=("no_spat", "non_linear", "cnlinear"), - ), + all_ard=dict(argstr="--allard", xor=("no_ard", "all_ard")), + args=dict(argstr="%s"), + burn_in=dict(argstr="--burnin=%d", usedefault=True), + burn_in_no_ard=dict(argstr="--burnin_noard=%d", usedefault=True), + bvals=dict(argstr="--bvals=%s", extensions=None, mandatory=True), + bvecs=dict(argstr="--bvecs=%s", extensions=None, mandatory=True), + cnlinear=dict(argstr="--cnonlinear", xor=("no_spat", "non_linear", "cnlinear")), + dwi=dict(argstr="--data=%s", extensions=None, mandatory=True), + environ=dict(nohash=True, usedefault=True), + f0_ard=dict(argstr="--f0 --ardf0", xor=["f0_noard", "f0_ard", "all_ard"]), + f0_noard=dict(argstr="--f0", xor=["f0_noard", "f0_ard"]), + force_dir=dict(argstr="--forcedir", usedefault=True), + fudge=dict(argstr="--fudge=%d"), + logdir=dict(argstr="--logdir=%s", usedefault=True), + mask=dict(argstr="--mask=%s", extensions=None, mandatory=True), + model=dict(argstr="--model=%d"), + n_fibres=dict(argstr="--nfibres=%d", mandatory=True, usedefault=True), + n_jumps=dict(argstr="--njumps=%d", usedefault=True), + no_ard=dict(argstr="--noard", xor=("no_ard", "all_ard")), + no_spat=dict(argstr="--nospat", xor=("no_spat", "non_linear", "cnlinear")), non_linear=dict( - argstr="--nonlinear", - xor=("no_spat", "non_linear", "cnlinear"), + argstr="--nonlinear", xor=("no_spat", "non_linear", "cnlinear") ), output_type=dict(), - rician=dict( - argstr="--rician", - ), - sample_every=dict( - argstr="--sampleevery=%d", - usedefault=True, - ), - seed=dict( - argstr="--seed=%d", - ), - update_proposal_every=dict( - argstr="--updateproposalevery=%d", - usedefault=True, - ), + rician=dict(argstr="--rician"), + sample_every=dict(argstr="--sampleevery=%d", usedefault=True), + seed=dict(argstr="--seed=%d"), + update_proposal_every=dict(argstr="--updateproposalevery=%d", usedefault=True), ) inputs = FSLXCommand.input_spec() @@ -117,16 +44,10 @@ def test_FSLXCommand_outputs(): output_map = dict( dyads=dict(), fsamples=dict(), - mean_S0samples=dict( - extensions=None, - ), - mean_dsamples=dict( - extensions=None, - ), + mean_S0samples=dict(extensions=None), + mean_dsamples=dict(extensions=None), mean_fsamples=dict(), - mean_tausamples=dict( - extensions=None, - ), + mean_tausamples=dict(extensions=None), phsamples=dict(), thsamples=dict(), ) diff --git a/nipype/interfaces/fsl/tests/test_auto_FUGUE.py b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py index 841bb2021f..b33d65309a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FUGUE.py +++ b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py @@ -4,115 +4,39 @@ def test_FUGUE_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - asym_se_time=dict( - argstr="--asym=%.10f", - ), - despike_2dfilter=dict( - argstr="--despike", - ), - despike_threshold=dict( - argstr="--despikethreshold=%s", - ), - dwell_time=dict( - argstr="--dwell=%.10f", - ), - dwell_to_asym_ratio=dict( - argstr="--dwelltoasym=%.10f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fmap_in_file=dict( - argstr="--loadfmap=%s", - extensions=None, - ), - fmap_out_file=dict( - argstr="--savefmap=%s", - extensions=None, - ), - forward_warping=dict( - usedefault=True, - ), - fourier_order=dict( - argstr="--fourier=%d", - ), - icorr=dict( - argstr="--icorr", - requires=["shift_in_file"], - ), - icorr_only=dict( - argstr="--icorronly", - requires=["unwarped_file"], - ), - in_file=dict( - argstr="--in=%s", - extensions=None, - ), - mask_file=dict( - argstr="--mask=%s", - extensions=None, - ), - median_2dfilter=dict( - argstr="--median", - ), - no_extend=dict( - argstr="--noextend", - ), - no_gap_fill=dict( - argstr="--nofill", - ), - nokspace=dict( - argstr="--nokspace", - ), + args=dict(argstr="%s"), + asym_se_time=dict(argstr="--asym=%.10f"), + despike_2dfilter=dict(argstr="--despike"), + despike_threshold=dict(argstr="--despikethreshold=%s"), + dwell_time=dict(argstr="--dwell=%.10f"), + dwell_to_asym_ratio=dict(argstr="--dwelltoasym=%.10f"), + environ=dict(nohash=True, usedefault=True), + fmap_in_file=dict(argstr="--loadfmap=%s", extensions=None), + fmap_out_file=dict(argstr="--savefmap=%s", extensions=None), + forward_warping=dict(usedefault=True), + fourier_order=dict(argstr="--fourier=%d"), + icorr=dict(argstr="--icorr", requires=["shift_in_file"]), + icorr_only=dict(argstr="--icorronly", requires=["unwarped_file"]), + in_file=dict(argstr="--in=%s", extensions=None), + mask_file=dict(argstr="--mask=%s", extensions=None), + median_2dfilter=dict(argstr="--median"), + no_extend=dict(argstr="--noextend"), + no_gap_fill=dict(argstr="--nofill"), + nokspace=dict(argstr="--nokspace"), output_type=dict(), - pava=dict( - argstr="--pava", - ), - phase_conjugate=dict( - argstr="--phaseconj", - ), - phasemap_in_file=dict( - argstr="--phasemap=%s", - extensions=None, - ), - poly_order=dict( - argstr="--poly=%d", - ), - save_fmap=dict( - xor=["save_unmasked_fmap"], - ), - save_shift=dict( - xor=["save_unmasked_shift"], - ), - save_unmasked_fmap=dict( - argstr="--unmaskfmap", - xor=["save_fmap"], - ), - save_unmasked_shift=dict( - argstr="--unmaskshift", - xor=["save_shift"], - ), - shift_in_file=dict( - argstr="--loadshift=%s", - extensions=None, - ), - shift_out_file=dict( - argstr="--saveshift=%s", - extensions=None, - ), - smooth2d=dict( - argstr="--smooth2=%.2f", - ), - smooth3d=dict( - argstr="--smooth3=%.2f", - ), - unwarp_direction=dict( - argstr="--unwarpdir=%s", - ), + pava=dict(argstr="--pava"), + phase_conjugate=dict(argstr="--phaseconj"), + phasemap_in_file=dict(argstr="--phasemap=%s", extensions=None), + poly_order=dict(argstr="--poly=%d"), + save_fmap=dict(xor=["save_unmasked_fmap"]), + save_shift=dict(xor=["save_unmasked_shift"]), + save_unmasked_fmap=dict(argstr="--unmaskfmap", xor=["save_fmap"]), + save_unmasked_shift=dict(argstr="--unmaskshift", xor=["save_shift"]), + shift_in_file=dict(argstr="--loadshift=%s", extensions=None), + shift_out_file=dict(argstr="--saveshift=%s", extensions=None), + smooth2d=dict(argstr="--smooth2=%.2f"), + smooth3d=dict(argstr="--smooth3=%.2f"), + unwarp_direction=dict(argstr="--unwarpdir=%s"), unwarped_file=dict( argstr="--unwarp=%s", extensions=None, @@ -135,18 +59,10 @@ def test_FUGUE_inputs(): def test_FUGUE_outputs(): output_map = dict( - fmap_out_file=dict( - extensions=None, - ), - shift_out_file=dict( - extensions=None, - ), - unwarped_file=dict( - extensions=None, - ), - warped_file=dict( - extensions=None, - ), + fmap_out_file=dict(extensions=None), + shift_out_file=dict(extensions=None), + unwarped_file=dict(extensions=None), + warped_file=dict(extensions=None), ) outputs = FUGUE.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py index 8531fe17c4..a759c16802 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py +++ b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py @@ -4,18 +4,9 @@ def test_FeatureExtractor_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - mel_ica=dict( - argstr="%s", - copyfile=False, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + mel_ica=dict(argstr="%s", copyfile=False, position=-1), ) inputs = FeatureExtractor.input_spec() @@ -25,13 +16,7 @@ def test_FeatureExtractor_inputs(): def test_FeatureExtractor_outputs(): - output_map = dict( - mel_ica=dict( - argstr="%s", - copyfile=False, - position=-1, - ), - ) + output_map = dict(mel_ica=dict(argstr="%s", copyfile=False, position=-1)) outputs = FeatureExtractor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py index e4826db355..ef5b798656 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py +++ b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py @@ -4,55 +4,23 @@ def test_FilterRegressor_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - design_file=dict( - argstr="-d %s", - extensions=None, - mandatory=True, - position=3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + design_file=dict(argstr="-d %s", extensions=None, mandatory=True, position=3), + environ=dict(nohash=True, usedefault=True), filter_all=dict( - argstr="-f '%s'", - mandatory=True, - position=4, - xor=["filter_columns"], + argstr="-f '%s'", mandatory=True, position=4, xor=["filter_columns"] ), filter_columns=dict( - argstr="-f '%s'", - mandatory=True, - position=4, - xor=["filter_all"], - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - position=1, - ), - mask=dict( - argstr="-m %s", - extensions=None, + argstr="-f '%s'", mandatory=True, position=4, xor=["filter_all"] ), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1), + mask=dict(argstr="-m %s", extensions=None), out_file=dict( - argstr="-o %s", - extensions=None, - genfile=True, - hash_files=False, - position=2, - ), - out_vnscales=dict( - argstr="--out_vnscales", + argstr="-o %s", extensions=None, genfile=True, hash_files=False, position=2 ), + out_vnscales=dict(argstr="--out_vnscales"), output_type=dict(), - var_norm=dict( - argstr="--vn", - ), + var_norm=dict(argstr="--vn"), ) inputs = FilterRegressor.input_spec() @@ -62,11 +30,7 @@ def test_FilterRegressor_inputs(): def test_FilterRegressor_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = FilterRegressor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py index 14b3bbb8da..c11acc3dbc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py +++ b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py @@ -4,24 +4,11 @@ def test_FindTheBiggest_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr="%s", - mandatory=True, - position=0, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_files=dict(argstr="%s", mandatory=True, position=0), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=2, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=2 ), output_type=dict(), ) @@ -33,12 +20,7 @@ def test_FindTheBiggest_inputs(): def test_FindTheBiggest_outputs(): - output_map = dict( - out_file=dict( - argstr="%s", - extensions=None, - ), - ) + output_map = dict(out_file=dict(argstr="%s", extensions=None)) outputs = FindTheBiggest.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_GLM.py b/nipype/interfaces/fsl/tests/test_auto_GLM.py index 63105f128d..c9b7accb30 100644 --- a/nipype/interfaces/fsl/tests/test_auto_GLM.py +++ b/nipype/interfaces/fsl/tests/test_auto_GLM.py @@ -4,57 +4,19 @@ def test_GLM_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - contrasts=dict( - argstr="-c %s", - extensions=None, - ), - dat_norm=dict( - argstr="--dat_norm", - ), - demean=dict( - argstr="--demean", - ), - des_norm=dict( - argstr="--des_norm", - ), - design=dict( - argstr="-d %s", - extensions=None, - mandatory=True, - position=2, - ), - dof=dict( - argstr="--dof=%d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - position=1, - ), - mask=dict( - argstr="-m %s", - extensions=None, - ), - out_cope=dict( - argstr="--out_cope=%s", - extensions=None, - ), - out_data_name=dict( - argstr="--out_data=%s", - extensions=None, - ), - out_f_name=dict( - argstr="--out_f=%s", - extensions=None, - ), + args=dict(argstr="%s"), + contrasts=dict(argstr="-c %s", extensions=None), + dat_norm=dict(argstr="--dat_norm"), + demean=dict(argstr="--demean"), + des_norm=dict(argstr="--des_norm"), + design=dict(argstr="-d %s", extensions=None, mandatory=True, position=2), + dof=dict(argstr="--dof=%d"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1), + mask=dict(argstr="-m %s", extensions=None), + out_cope=dict(argstr="--out_cope=%s", extensions=None), + out_data_name=dict(argstr="--out_data=%s", extensions=None), + out_f_name=dict(argstr="--out_f=%s", extensions=None), out_file=dict( argstr="-o %s", extensions=None, @@ -63,42 +25,16 @@ def test_GLM_inputs(): name_template="%s_glm", position=3, ), - out_p_name=dict( - argstr="--out_p=%s", - extensions=None, - ), - out_pf_name=dict( - argstr="--out_pf=%s", - extensions=None, - ), - out_res_name=dict( - argstr="--out_res=%s", - extensions=None, - ), - out_sigsq_name=dict( - argstr="--out_sigsq=%s", - extensions=None, - ), - out_t_name=dict( - argstr="--out_t=%s", - extensions=None, - ), - out_varcb_name=dict( - argstr="--out_varcb=%s", - extensions=None, - ), - out_vnscales_name=dict( - argstr="--out_vnscales=%s", - extensions=None, - ), - out_z_name=dict( - argstr="--out_z=%s", - extensions=None, - ), + out_p_name=dict(argstr="--out_p=%s", extensions=None), + out_pf_name=dict(argstr="--out_pf=%s", extensions=None), + out_res_name=dict(argstr="--out_res=%s", extensions=None), + out_sigsq_name=dict(argstr="--out_sigsq=%s", extensions=None), + out_t_name=dict(argstr="--out_t=%s", extensions=None), + out_varcb_name=dict(argstr="--out_varcb=%s", extensions=None), + out_vnscales_name=dict(argstr="--out_vnscales=%s", extensions=None), + out_z_name=dict(argstr="--out_z=%s", extensions=None), output_type=dict(), - var_norm=dict( - argstr="--vn", - ), + var_norm=dict(argstr="--vn"), ) inputs = GLM.input_spec() @@ -112,9 +48,7 @@ def test_GLM_outputs(): out_cope=dict(), out_data=dict(), out_f=dict(), - out_file=dict( - extensions=None, - ), + out_file=dict(extensions=None), out_p=dict(), out_pf=dict(), out_res=dict(), diff --git a/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py index b49813e24d..44dce0bf50 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py +++ b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py @@ -4,64 +4,25 @@ def test_ICA_AROMA_inputs(): input_map = dict( - TR=dict( - argstr="-tr %.3f", - ), - args=dict( - argstr="%s", - ), - denoise_type=dict( - argstr="-den %s", - mandatory=True, - usedefault=True, - ), - dim=dict( - argstr="-dim %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + TR=dict(argstr="-tr %.3f"), + args=dict(argstr="%s"), + denoise_type=dict(argstr="-den %s", mandatory=True, usedefault=True), + dim=dict(argstr="-dim %d"), + environ=dict(nohash=True, usedefault=True), feat_dir=dict( argstr="-feat %s", mandatory=True, xor=["in_file", "mat_file", "fnirt_warp_file", "motion_parameters"], ), - fnirt_warp_file=dict( - argstr="-warp %s", - extensions=None, - xor=["feat_dir"], - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - xor=["feat_dir"], - ), - mask=dict( - argstr="-m %s", - extensions=None, - xor=["feat_dir"], - ), - mat_file=dict( - argstr="-affmat %s", - extensions=None, - xor=["feat_dir"], - ), - melodic_dir=dict( - argstr="-meldir %s", - ), + fnirt_warp_file=dict(argstr="-warp %s", extensions=None, xor=["feat_dir"]), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, xor=["feat_dir"]), + mask=dict(argstr="-m %s", extensions=None, xor=["feat_dir"]), + mat_file=dict(argstr="-affmat %s", extensions=None, xor=["feat_dir"]), + melodic_dir=dict(argstr="-meldir %s"), motion_parameters=dict( - argstr="-mc %s", - extensions=None, - mandatory=True, - xor=["feat_dir"], - ), - out_dir=dict( - argstr="-o %s", - mandatory=True, - usedefault=True, + argstr="-mc %s", extensions=None, mandatory=True, xor=["feat_dir"] ), + out_dir=dict(argstr="-o %s", mandatory=True, usedefault=True), ) inputs = ICA_AROMA.input_spec() @@ -72,12 +33,8 @@ def test_ICA_AROMA_inputs(): def test_ICA_AROMA_outputs(): output_map = dict( - aggr_denoised_file=dict( - extensions=None, - ), - nonaggr_denoised_file=dict( - extensions=None, - ), + aggr_denoised_file=dict(extensions=None), + nonaggr_denoised_file=dict(extensions=None), out_dir=dict(), ) outputs = ICA_AROMA.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py index d2c4737d65..dcd3829f03 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py @@ -4,42 +4,15 @@ def test_ImageMaths_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - in_file2=dict( - argstr="%s", - extensions=None, - position=3, - ), - mask_file=dict( - argstr="-mas %s", - extensions=None, - ), - op_string=dict( - argstr="%s", - position=2, - ), - out_data_type=dict( - argstr="-odt %s", - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), + in_file2=dict(argstr="%s", extensions=None, position=3), + mask_file=dict(argstr="-mas %s", extensions=None), + op_string=dict(argstr="%s", position=2), + out_data_type=dict(argstr="-odt %s", position=-1), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), output_type=dict(), suffix=dict(), @@ -52,11 +25,7 @@ def test_ImageMaths_inputs(): def test_ImageMaths_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ImageMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py index b050d8f50b..60846b9c3a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py @@ -4,52 +4,19 @@ def test_ImageMeants_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - eig=dict( - argstr="--eig", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - position=0, - ), - mask=dict( - argstr="-m %s", - extensions=None, - ), - nobin=dict( - argstr="--no_bin", - ), - order=dict( - argstr="--order=%d", - usedefault=True, - ), - out_file=dict( - argstr="-o %s", - extensions=None, - genfile=True, - hash_files=False, - ), + args=dict(argstr="%s"), + eig=dict(argstr="--eig"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0), + mask=dict(argstr="-m %s", extensions=None), + nobin=dict(argstr="--no_bin"), + order=dict(argstr="--order=%d", usedefault=True), + out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False), output_type=dict(), - show_all=dict( - argstr="--showall", - ), - spatial_coord=dict( - argstr="-c %s", - ), - transpose=dict( - argstr="--transpose", - ), - use_mm=dict( - argstr="--usemm", - ), + show_all=dict(argstr="--showall"), + spatial_coord=dict(argstr="-c %s"), + transpose=dict(argstr="--transpose"), + use_mm=dict(argstr="--usemm"), ) inputs = ImageMeants.input_spec() @@ -59,11 +26,7 @@ def test_ImageMeants_inputs(): def test_ImageMeants_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ImageMeants.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py index e4ddf5f06d..54e44b2b61 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py @@ -4,38 +4,14 @@ def test_ImageStats_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=3, - ), - index_mask_file=dict( - argstr="-K %s", - extensions=None, - position=2, - ), - mask_file=dict( - argstr="", - extensions=None, - ), - op_string=dict( - argstr="%s", - mandatory=True, - position=4, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=3), + index_mask_file=dict(argstr="-K %s", extensions=None, position=2), + mask_file=dict(argstr="", extensions=None), + op_string=dict(argstr="%s", mandatory=True, position=4), output_type=dict(), - split_4d=dict( - argstr="-t", - position=1, - ), + split_4d=dict(argstr="-t", position=1), ) inputs = ImageStats.input_spec() @@ -45,9 +21,7 @@ def test_ImageStats_inputs(): def test_ImageStats_outputs(): - output_map = dict( - out_stat=dict(), - ) + output_map = dict(out_stat=dict()) outputs = ImageStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_InvWarp.py b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py index 1dba5e578a..688e46da70 100644 --- a/nipype/interfaces/fsl/tests/test_auto_InvWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py @@ -4,17 +4,9 @@ def test_InvWarp_inputs(): input_map = dict( - absolute=dict( - argstr="--abs", - xor=["relative"], - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + absolute=dict(argstr="--abs", xor=["relative"]), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), inverse_warp=dict( argstr="--out=%s", extensions=None, @@ -22,36 +14,15 @@ def test_InvWarp_inputs(): name_source=["warp"], name_template="%s_inverse", ), - jacobian_max=dict( - argstr="--jmax=%f", - ), - jacobian_min=dict( - argstr="--jmin=%f", - ), - niter=dict( - argstr="--niter=%d", - ), - noconstraint=dict( - argstr="--noconstraint", - ), + jacobian_max=dict(argstr="--jmax=%f"), + jacobian_min=dict(argstr="--jmin=%f"), + niter=dict(argstr="--niter=%d"), + noconstraint=dict(argstr="--noconstraint"), output_type=dict(), - reference=dict( - argstr="--ref=%s", - extensions=None, - mandatory=True, - ), - regularise=dict( - argstr="--regularise=%f", - ), - relative=dict( - argstr="--rel", - xor=["absolute"], - ), - warp=dict( - argstr="--warp=%s", - extensions=None, - mandatory=True, - ), + reference=dict(argstr="--ref=%s", extensions=None, mandatory=True), + regularise=dict(argstr="--regularise=%f"), + relative=dict(argstr="--rel", xor=["absolute"]), + warp=dict(argstr="--warp=%s", extensions=None, mandatory=True), ) inputs = InvWarp.input_spec() @@ -61,11 +32,7 @@ def test_InvWarp_inputs(): def test_InvWarp_outputs(): - output_map = dict( - inverse_warp=dict( - extensions=None, - ), - ) + output_map = dict(inverse_warp=dict(extensions=None)) outputs = InvWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py index f9c5432d40..0597d182dd 100644 --- a/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py +++ b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py @@ -4,51 +4,18 @@ def test_IsotropicSmooth_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fwhm=dict( - argstr="-s %.5f", - mandatory=True, - position=4, - xor=["sigma"], - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fwhm=dict(argstr="-s %.5f", mandatory=True, position=4, xor=["sigma"]), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + nan2zeros=dict(argstr="-nan", position=3), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), - sigma=dict( - argstr="-s %.5f", - mandatory=True, - position=4, - xor=["fwhm"], - ), + sigma=dict(argstr="-s %.5f", mandatory=True, position=4, xor=["fwhm"]), ) inputs = IsotropicSmooth.input_spec() @@ -58,11 +25,7 @@ def test_IsotropicSmooth_inputs(): def test_IsotropicSmooth_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = IsotropicSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_L2Model.py b/nipype/interfaces/fsl/tests/test_auto_L2Model.py index c4547fc7a2..daa465ea3a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_L2Model.py +++ b/nipype/interfaces/fsl/tests/test_auto_L2Model.py @@ -3,11 +3,7 @@ def test_L2Model_inputs(): - input_map = dict( - num_copes=dict( - mandatory=True, - ), - ) + input_map = dict(num_copes=dict(mandatory=True)) inputs = L2Model.input_spec() for key, metadata in list(input_map.items()): @@ -17,15 +13,9 @@ def test_L2Model_inputs(): def test_L2Model_outputs(): output_map = dict( - design_con=dict( - extensions=None, - ), - design_grp=dict( - extensions=None, - ), - design_mat=dict( - extensions=None, - ), + design_con=dict(extensions=None), + design_grp=dict(extensions=None), + design_mat=dict(extensions=None), ) outputs = L2Model.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Level1Design.py b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py index 5a43989601..c941ef0687 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Level1Design.py +++ b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py @@ -4,22 +4,12 @@ def test_Level1Design_inputs(): input_map = dict( - bases=dict( - mandatory=True, - ), + bases=dict(mandatory=True), contrasts=dict(), - interscan_interval=dict( - mandatory=True, - ), - model_serial_correlations=dict( - mandatory=True, - ), - orthogonalization=dict( - usedefault=True, - ), - session_info=dict( - mandatory=True, - ), + interscan_interval=dict(mandatory=True), + model_serial_correlations=dict(mandatory=True), + orthogonalization=dict(usedefault=True), + session_info=dict(mandatory=True), ) inputs = Level1Design.input_spec() @@ -29,10 +19,7 @@ def test_Level1Design_inputs(): def test_Level1Design_outputs(): - output_map = dict( - ev_files=dict(), - fsf_files=dict(), - ) + output_map = dict(ev_files=dict(), fsf_files=dict()) outputs = Level1Design.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py index 768c52a7f4..98f38d7186 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py @@ -4,82 +4,31 @@ def test_MCFLIRT_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bins=dict( - argstr="-bins %d", - ), - cost=dict( - argstr="-cost %s", - ), - dof=dict( - argstr="-dof %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - position=0, - ), - init=dict( - argstr="-init %s", - extensions=None, - ), - interpolation=dict( - argstr="-%s_final", - ), - mean_vol=dict( - argstr="-meanvol", - ), + args=dict(argstr="%s"), + bins=dict(argstr="-bins %d"), + cost=dict(argstr="-cost %s"), + dof=dict(argstr="-dof %d"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=0), + init=dict(argstr="-init %s", extensions=None), + interpolation=dict(argstr="-%s_final"), + mean_vol=dict(argstr="-meanvol"), out_file=dict( - argstr="-out %s", - extensions=None, - genfile=True, - hash_files=False, + argstr="-out %s", extensions=None, genfile=True, hash_files=False ), output_type=dict(), - ref_file=dict( - argstr="-reffile %s", - extensions=None, - ), - ref_vol=dict( - argstr="-refvol %d", - ), - rotation=dict( - argstr="-rotation %d", - ), - save_mats=dict( - argstr="-mats", - ), - save_plots=dict( - argstr="-plots", - ), - save_rms=dict( - argstr="-rmsabs -rmsrel", - ), - scaling=dict( - argstr="-scaling %.2f", - ), - smooth=dict( - argstr="-smooth %.2f", - ), - stages=dict( - argstr="-stages %d", - ), - stats_imgs=dict( - argstr="-stats", - ), - use_contour=dict( - argstr="-edge", - ), - use_gradient=dict( - argstr="-gdt", - ), + ref_file=dict(argstr="-reffile %s", extensions=None), + ref_vol=dict(argstr="-refvol %d"), + rotation=dict(argstr="-rotation %d"), + save_mats=dict(argstr="-mats"), + save_plots=dict(argstr="-plots"), + save_rms=dict(argstr="-rmsabs -rmsrel"), + scaling=dict(argstr="-scaling %.2f"), + smooth=dict(argstr="-smooth %.2f"), + stages=dict(argstr="-stages %d"), + stats_imgs=dict(argstr="-stats"), + use_contour=dict(argstr="-edge"), + use_gradient=dict(argstr="-gdt"), ) inputs = MCFLIRT.input_spec() @@ -91,22 +40,12 @@ def test_MCFLIRT_inputs(): def test_MCFLIRT_outputs(): output_map = dict( mat_file=dict(), - mean_img=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - par_file=dict( - extensions=None, - ), + mean_img=dict(extensions=None), + out_file=dict(extensions=None), + par_file=dict(extensions=None), rms_files=dict(), - std_img=dict( - extensions=None, - ), - variance_img=dict( - extensions=None, - ), + std_img=dict(extensions=None), + variance_img=dict(extensions=None), ) outputs = MCFLIRT.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MELODIC.py b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py index db2406e30f..64fe88bcdd 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MELODIC.py +++ b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py @@ -4,171 +4,57 @@ def test_MELODIC_inputs(): input_map = dict( - ICs=dict( - argstr="--ICs=%s", - extensions=None, - ), - approach=dict( - argstr="-a %s", - ), - args=dict( - argstr="%s", - ), - bg_image=dict( - argstr="--bgimage=%s", - extensions=None, - ), - bg_threshold=dict( - argstr="--bgthreshold=%f", - ), - cov_weight=dict( - argstr="--covarweight=%f", - ), - dim=dict( - argstr="-d %d", - ), - dim_est=dict( - argstr="--dimest=%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - epsilon=dict( - argstr="--eps=%f", - ), - epsilonS=dict( - argstr="--epsS=%f", - ), - in_files=dict( - argstr="-i %s", - mandatory=True, - position=0, - sep=",", - ), - log_power=dict( - argstr="--logPower", - ), - mask=dict( - argstr="-m %s", - extensions=None, - ), - max_restart=dict( - argstr="--maxrestart=%d", - ), - maxit=dict( - argstr="--maxit=%d", - ), - migp=dict( - argstr="--migp", - ), - migpN=dict( - argstr="--migpN %d", - ), - migp_factor=dict( - argstr="--migp_factor %d", - ), - migp_shuffle=dict( - argstr="--migp_shuffle", - ), - mix=dict( - argstr="--mix=%s", - extensions=None, - ), - mm_thresh=dict( - argstr="--mmthresh=%f", - ), - no_bet=dict( - argstr="--nobet", - ), - no_mask=dict( - argstr="--nomask", - ), - no_mm=dict( - argstr="--no_mm", - ), - non_linearity=dict( - argstr="--nl=%s", - ), - num_ICs=dict( - argstr="-n %d", - ), - out_all=dict( - argstr="--Oall", - ), - out_dir=dict( - argstr="-o %s", - genfile=True, - ), - out_mean=dict( - argstr="--Omean", - ), - out_orig=dict( - argstr="--Oorig", - ), - out_pca=dict( - argstr="--Opca", - ), - out_stats=dict( - argstr="--Ostats", - ), - out_unmix=dict( - argstr="--Ounmix", - ), - out_white=dict( - argstr="--Owhite", - ), + ICs=dict(argstr="--ICs=%s", extensions=None), + approach=dict(argstr="-a %s"), + args=dict(argstr="%s"), + bg_image=dict(argstr="--bgimage=%s", extensions=None), + bg_threshold=dict(argstr="--bgthreshold=%f"), + cov_weight=dict(argstr="--covarweight=%f"), + dim=dict(argstr="-d %d"), + dim_est=dict(argstr="--dimest=%s"), + environ=dict(nohash=True, usedefault=True), + epsilon=dict(argstr="--eps=%f"), + epsilonS=dict(argstr="--epsS=%f"), + in_files=dict(argstr="-i %s", mandatory=True, position=0, sep=","), + log_power=dict(argstr="--logPower"), + mask=dict(argstr="-m %s", extensions=None), + max_restart=dict(argstr="--maxrestart=%d"), + maxit=dict(argstr="--maxit=%d"), + migp=dict(argstr="--migp"), + migpN=dict(argstr="--migpN %d"), + migp_factor=dict(argstr="--migp_factor %d"), + migp_shuffle=dict(argstr="--migp_shuffle"), + mix=dict(argstr="--mix=%s", extensions=None), + mm_thresh=dict(argstr="--mmthresh=%f"), + no_bet=dict(argstr="--nobet"), + no_mask=dict(argstr="--nomask"), + no_mm=dict(argstr="--no_mm"), + non_linearity=dict(argstr="--nl=%s"), + num_ICs=dict(argstr="-n %d"), + out_all=dict(argstr="--Oall"), + out_dir=dict(argstr="-o %s", genfile=True), + out_mean=dict(argstr="--Omean"), + out_orig=dict(argstr="--Oorig"), + out_pca=dict(argstr="--Opca"), + out_stats=dict(argstr="--Ostats"), + out_unmix=dict(argstr="--Ounmix"), + out_white=dict(argstr="--Owhite"), output_type=dict(), - pbsc=dict( - argstr="--pbsc", - ), - rem_cmp=dict( - argstr="-f %d", - ), - remove_deriv=dict( - argstr="--remove_deriv", - ), - report=dict( - argstr="--report", - ), - report_maps=dict( - argstr="--report_maps=%s", - ), - s_con=dict( - argstr="--Scon=%s", - extensions=None, - ), - s_des=dict( - argstr="--Sdes=%s", - extensions=None, - ), - sep_vn=dict( - argstr="--sep_vn", - ), - sep_whiten=dict( - argstr="--sep_whiten", - ), - smode=dict( - argstr="--smode=%s", - extensions=None, - ), - t_con=dict( - argstr="--Tcon=%s", - extensions=None, - ), - t_des=dict( - argstr="--Tdes=%s", - extensions=None, - ), - tr_sec=dict( - argstr="--tr=%f", - ), - update_mask=dict( - argstr="--update_mask", - ), - var_norm=dict( - argstr="--vn", - ), + pbsc=dict(argstr="--pbsc"), + rem_cmp=dict(argstr="-f %d"), + remove_deriv=dict(argstr="--remove_deriv"), + report=dict(argstr="--report"), + report_maps=dict(argstr="--report_maps=%s"), + s_con=dict(argstr="--Scon=%s", extensions=None), + s_des=dict(argstr="--Sdes=%s", extensions=None), + sep_vn=dict(argstr="--sep_vn"), + sep_whiten=dict(argstr="--sep_whiten"), + smode=dict(argstr="--smode=%s", extensions=None), + t_con=dict(argstr="--Tcon=%s", extensions=None), + t_des=dict(argstr="--Tdes=%s", extensions=None), + tr_sec=dict(argstr="--tr=%f"), + update_mask=dict(argstr="--update_mask"), + var_norm=dict(argstr="--vn"), ) inputs = MELODIC.input_spec() @@ -178,10 +64,7 @@ def test_MELODIC_inputs(): def test_MELODIC_outputs(): - output_map = dict( - out_dir=dict(), - report_dir=dict(), - ) + output_map = dict(out_dir=dict(), report_dir=dict()) outputs = MELODIC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py index bfdb32146e..95172667f4 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py +++ b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py @@ -4,42 +4,16 @@ def test_MakeDyadicVectors_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - mask=dict( - argstr="%s", - extensions=None, - position=2, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + mask=dict(argstr="%s", extensions=None, position=2), output=dict( - argstr="%s", - extensions=None, - hash_files=False, - position=3, - usedefault=True, + argstr="%s", extensions=None, hash_files=False, position=3, usedefault=True ), output_type=dict(), - perc=dict( - argstr="%f", - position=4, - ), - phi_vol=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - theta_vol=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), + perc=dict(argstr="%f", position=4), + phi_vol=dict(argstr="%s", extensions=None, mandatory=True, position=1), + theta_vol=dict(argstr="%s", extensions=None, mandatory=True, position=0), ) inputs = MakeDyadicVectors.input_spec() @@ -49,14 +23,7 @@ def test_MakeDyadicVectors_inputs(): def test_MakeDyadicVectors_outputs(): - output_map = dict( - dispersion=dict( - extensions=None, - ), - dyads=dict( - extensions=None, - ), - ) + output_map = dict(dispersion=dict(extensions=None), dyads=dict(extensions=None)) outputs = MakeDyadicVectors.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py index e14e4a4005..9cab619ecd 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py @@ -4,38 +4,15 @@ def test_MathsCommand_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + nan2zeros=dict(argstr="-nan", position=3), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = MathsCommand.input_spec() @@ -46,11 +23,7 @@ def test_MathsCommand_inputs(): def test_MathsCommand_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MathsCommand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py index f96f931fcf..8ce0e9b7df 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MaxImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py @@ -4,43 +4,16 @@ def test_MaxImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="-%smax", - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="-%smax", position=4, usedefault=True), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + nan2zeros=dict(argstr="-nan", position=3), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = MaxImage.input_spec() @@ -51,11 +24,7 @@ def test_MaxImage_inputs(): def test_MaxImage_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MaxImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py index 30ada25d79..85f251bb15 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py @@ -4,43 +4,16 @@ def test_MaxnImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="-%smaxn", - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="-%smaxn", position=4, usedefault=True), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + nan2zeros=dict(argstr="-nan", position=3), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = MaxnImage.input_spec() @@ -51,11 +24,7 @@ def test_MaxnImage_inputs(): def test_MaxnImage_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MaxnImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MeanImage.py b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py index e29104476c..5affe0d8f0 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MeanImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py @@ -4,43 +4,16 @@ def test_MeanImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="-%smean", - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="-%smean", position=4, usedefault=True), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + nan2zeros=dict(argstr="-nan", position=3), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = MeanImage.input_spec() @@ -51,11 +24,7 @@ def test_MeanImage_inputs(): def test_MeanImage_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MeanImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MedianImage.py b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py index 7c8052fd31..89b4fa63cd 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MedianImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py @@ -4,43 +4,16 @@ def test_MedianImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="-%smedian", - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="-%smedian", position=4, usedefault=True), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + nan2zeros=dict(argstr="-nan", position=3), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = MedianImage.input_spec() @@ -51,11 +24,7 @@ def test_MedianImage_inputs(): def test_MedianImage_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MedianImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Merge.py b/nipype/interfaces/fsl/tests/test_auto_Merge.py index 847f9b7bd3..5b5db8d3c6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Merge.py +++ b/nipype/interfaces/fsl/tests/test_auto_Merge.py @@ -4,23 +4,10 @@ def test_Merge_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="-%s", - mandatory=True, - position=0, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr="%s", - mandatory=True, - position=2, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="-%s", mandatory=True, position=0), + environ=dict(nohash=True, usedefault=True), + in_files=dict(argstr="%s", mandatory=True, position=2), merged_file=dict( argstr="%s", extensions=None, @@ -30,10 +17,7 @@ def test_Merge_inputs(): position=1, ), output_type=dict(), - tr=dict( - argstr="%.2f", - position=-1, - ), + tr=dict(argstr="%.2f", position=-1), ) inputs = Merge.input_spec() @@ -43,11 +27,7 @@ def test_Merge_inputs(): def test_Merge_outputs(): - output_map = dict( - merged_file=dict( - extensions=None, - ), - ) + output_map = dict(merged_file=dict(extensions=None)) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MinImage.py b/nipype/interfaces/fsl/tests/test_auto_MinImage.py index bde76c1afc..7cf244a38d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MinImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MinImage.py @@ -4,43 +4,16 @@ def test_MinImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="-%smin", - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="-%smin", position=4, usedefault=True), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + nan2zeros=dict(argstr="-nan", position=3), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = MinImage.input_spec() @@ -51,11 +24,7 @@ def test_MinImage_inputs(): def test_MinImage_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MinImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py index 9a5773336f..7a9b0a2e2e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py +++ b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py @@ -4,31 +4,13 @@ def test_MotionOutliers_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dummy=dict( - argstr="--dummy=%d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - mask=dict( - argstr="-m %s", - extensions=None, - ), - metric=dict( - argstr="--%s", - ), - no_motion_correction=dict( - argstr="--nomoco", - ), + args=dict(argstr="%s"), + dummy=dict(argstr="--dummy=%d"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True), + mask=dict(argstr="-m %s", extensions=None), + metric=dict(argstr="--%s"), + no_motion_correction=dict(argstr="--nomoco"), out_file=dict( argstr="-o %s", extensions=None, @@ -54,9 +36,7 @@ def test_MotionOutliers_inputs(): name_template="%s_metrics.txt", ), output_type=dict(), - threshold=dict( - argstr="--thresh=%g", - ), + threshold=dict(argstr="--thresh=%g"), ) inputs = MotionOutliers.input_spec() @@ -67,15 +47,9 @@ def test_MotionOutliers_inputs(): def test_MotionOutliers_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), - out_metric_plot=dict( - extensions=None, - ), - out_metric_values=dict( - extensions=None, - ), + out_file=dict(extensions=None), + out_metric_plot=dict(extensions=None), + out_metric_values=dict(extensions=None), ) outputs = MotionOutliers.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py index 95de40d023..4f731da4bd 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py @@ -4,46 +4,17 @@ def test_MultiImageMaths_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), - op_string=dict( - argstr="%s", - mandatory=True, - position=4, - ), - operand_files=dict( - mandatory=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + nan2zeros=dict(argstr="-nan", position=3), + op_string=dict(argstr="%s", mandatory=True, position=4), + operand_files=dict(mandatory=True), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = MultiImageMaths.input_spec() @@ -54,11 +25,7 @@ def test_MultiImageMaths_inputs(): def test_MultiImageMaths_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MultiImageMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py index cae5e90cd4..30d76b56df 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py +++ b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py @@ -4,13 +4,7 @@ def test_MultipleRegressDesign_inputs(): input_map = dict( - contrasts=dict( - mandatory=True, - ), - groups=dict(), - regressors=dict( - mandatory=True, - ), + contrasts=dict(mandatory=True), groups=dict(), regressors=dict(mandatory=True) ) inputs = MultipleRegressDesign.input_spec() @@ -21,18 +15,10 @@ def test_MultipleRegressDesign_inputs(): def test_MultipleRegressDesign_outputs(): output_map = dict( - design_con=dict( - extensions=None, - ), - design_fts=dict( - extensions=None, - ), - design_grp=dict( - extensions=None, - ), - design_mat=dict( - extensions=None, - ), + design_con=dict(extensions=None), + design_fts=dict(extensions=None), + design_grp=dict(extensions=None), + design_mat=dict(extensions=None), ) outputs = MultipleRegressDesign.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Overlay.py b/nipype/interfaces/fsl/tests/test_auto_Overlay.py index 22c4f08a44..27e4155245 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Overlay.py +++ b/nipype/interfaces/fsl/tests/test_auto_Overlay.py @@ -4,31 +4,21 @@ def test_Overlay_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), auto_thresh_bg=dict( argstr="-a", mandatory=True, position=5, xor=("auto_thresh_bg", "full_bg_range", "bg_thresh"), ), - background_image=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=4, - ), + background_image=dict(argstr="%s", extensions=None, mandatory=True, position=4), bg_thresh=dict( argstr="%.3f %.3f", mandatory=True, position=5, xor=("auto_thresh_bg", "full_bg_range", "bg_thresh"), ), - environ=dict( - nohash=True, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True), full_bg_range=dict( argstr="-A", mandatory=True, @@ -36,53 +26,19 @@ def test_Overlay_inputs(): xor=("auto_thresh_bg", "full_bg_range", "bg_thresh"), ), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-1, - ), - out_type=dict( - argstr="%s", - position=2, - usedefault=True, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-1 ), + out_type=dict(argstr="%s", position=2, usedefault=True), output_type=dict(), - show_negative_stats=dict( - argstr="%s", - position=8, - xor=["stat_image2"], - ), - stat_image=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=6, - ), + show_negative_stats=dict(argstr="%s", position=8, xor=["stat_image2"]), + stat_image=dict(argstr="%s", extensions=None, mandatory=True, position=6), stat_image2=dict( - argstr="%s", - extensions=None, - position=9, - xor=["show_negative_stats"], - ), - stat_thresh=dict( - argstr="%.2f %.2f", - mandatory=True, - position=7, - ), - stat_thresh2=dict( - argstr="%.2f %.2f", - position=10, - ), - transparency=dict( - argstr="%s", - position=1, - usedefault=True, - ), - use_checkerboard=dict( - argstr="-c", - position=3, + argstr="%s", extensions=None, position=9, xor=["show_negative_stats"] ), + stat_thresh=dict(argstr="%.2f %.2f", mandatory=True, position=7), + stat_thresh2=dict(argstr="%.2f %.2f", position=10), + transparency=dict(argstr="%s", position=1, usedefault=True), + use_checkerboard=dict(argstr="-c", position=3), ) inputs = Overlay.input_spec() @@ -92,11 +48,7 @@ def test_Overlay_inputs(): def test_Overlay_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Overlay.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py index 0194526c70..90e9caa71a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py +++ b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py @@ -4,43 +4,25 @@ def test_PRELUDE_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), complex_phase_file=dict( argstr="--complex=%s", extensions=None, mandatory=True, xor=["magnitude_file", "phase_file"], ), - end=dict( - argstr="--end=%d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - label_file=dict( - argstr="--labels=%s", - extensions=None, - hash_files=False, - ), - labelprocess2d=dict( - argstr="--labelslices", - ), + end=dict(argstr="--end=%d"), + environ=dict(nohash=True, usedefault=True), + label_file=dict(argstr="--labels=%s", extensions=None, hash_files=False), + labelprocess2d=dict(argstr="--labelslices"), magnitude_file=dict( argstr="--abs=%s", extensions=None, mandatory=True, xor=["complex_phase_file"], ), - mask_file=dict( - argstr="--mask=%s", - extensions=None, - ), - num_partitions=dict( - argstr="--numphasesplit=%d", - ), + mask_file=dict(argstr="--mask=%s", extensions=None), + num_partitions=dict(argstr="--numphasesplit=%d"), output_type=dict(), phase_file=dict( argstr="--phase=%s", @@ -48,38 +30,15 @@ def test_PRELUDE_inputs(): mandatory=True, xor=["complex_phase_file"], ), - process2d=dict( - argstr="--slices", - xor=["labelprocess2d"], - ), - process3d=dict( - argstr="--force3D", - xor=["labelprocess2d", "process2d"], - ), - rawphase_file=dict( - argstr="--rawphase=%s", - extensions=None, - hash_files=False, - ), - removeramps=dict( - argstr="--removeramps", - ), - savemask_file=dict( - argstr="--savemask=%s", - extensions=None, - hash_files=False, - ), - start=dict( - argstr="--start=%d", - ), - threshold=dict( - argstr="--thresh=%.10f", - ), + process2d=dict(argstr="--slices", xor=["labelprocess2d"]), + process3d=dict(argstr="--force3D", xor=["labelprocess2d", "process2d"]), + rawphase_file=dict(argstr="--rawphase=%s", extensions=None, hash_files=False), + removeramps=dict(argstr="--removeramps"), + savemask_file=dict(argstr="--savemask=%s", extensions=None, hash_files=False), + start=dict(argstr="--start=%d"), + threshold=dict(argstr="--thresh=%.10f"), unwrapped_phase_file=dict( - argstr="--unwrap=%s", - extensions=None, - genfile=True, - hash_files=False, + argstr="--unwrap=%s", extensions=None, genfile=True, hash_files=False ), ) inputs = PRELUDE.input_spec() @@ -90,11 +49,7 @@ def test_PRELUDE_inputs(): def test_PRELUDE_outputs(): - output_map = dict( - unwrapped_phase_file=dict( - extensions=None, - ), - ) + output_map = dict(unwrapped_phase_file=dict(extensions=None)) outputs = PRELUDE.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py index 3a3ae14a78..1cc1217b82 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py @@ -4,48 +4,18 @@ def test_PercentileImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="-%sperc", - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="-%sperc", position=4, usedefault=True), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + nan2zeros=dict(argstr="-nan", position=3), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), - perc=dict( - argstr="%f", - position=5, - ), + perc=dict(argstr="%f", position=5), ) inputs = PercentileImage.input_spec() @@ -55,11 +25,7 @@ def test_PercentileImage_inputs(): def test_PercentileImage_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = PercentileImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py index 8cf1d2e214..75a846aeb2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py +++ b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py @@ -4,35 +4,14 @@ def test_PlotMotionParams_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - mandatory=True, - position=1, - ), - in_source=dict( - mandatory=True, - ), - out_file=dict( - argstr="-o %s", - extensions=None, - genfile=True, - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", mandatory=True, position=1), + in_source=dict(mandatory=True), + out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False), output_type=dict(), - plot_size=dict( - argstr="%s", - ), - plot_type=dict( - argstr="%s", - mandatory=True, - ), + plot_size=dict(argstr="%s"), + plot_type=dict(argstr="%s", mandatory=True), ) inputs = PlotMotionParams.input_spec() @@ -42,11 +21,7 @@ def test_PlotMotionParams_inputs(): def test_PlotMotionParams_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = PlotMotionParams.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py index 5b4ebc46aa..703a347792 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py +++ b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py @@ -4,72 +4,24 @@ def test_PlotTimeSeries_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - mandatory=True, - position=1, - ), - labels=dict( - argstr="%s", - ), - legend_file=dict( - argstr="--legend=%s", - extensions=None, - ), - out_file=dict( - argstr="-o %s", - extensions=None, - genfile=True, - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", mandatory=True, position=1), + labels=dict(argstr="%s"), + legend_file=dict(argstr="--legend=%s", extensions=None), + out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False), output_type=dict(), - plot_finish=dict( - argstr="--finish=%d", - xor=("plot_range",), - ), - plot_range=dict( - argstr="%s", - xor=("plot_start", "plot_finish"), - ), - plot_size=dict( - argstr="%s", - ), - plot_start=dict( - argstr="--start=%d", - xor=("plot_range",), - ), - sci_notation=dict( - argstr="--sci", - ), - title=dict( - argstr="%s", - ), - x_precision=dict( - argstr="--precision=%d", - ), - x_units=dict( - argstr="-u %d", - usedefault=True, - ), - y_max=dict( - argstr="--ymax=%.2f", - xor=("y_range",), - ), - y_min=dict( - argstr="--ymin=%.2f", - xor=("y_range",), - ), - y_range=dict( - argstr="%s", - xor=("y_min", "y_max"), - ), + plot_finish=dict(argstr="--finish=%d", xor=("plot_range",)), + plot_range=dict(argstr="%s", xor=("plot_start", "plot_finish")), + plot_size=dict(argstr="%s"), + plot_start=dict(argstr="--start=%d", xor=("plot_range",)), + sci_notation=dict(argstr="--sci"), + title=dict(argstr="%s"), + x_precision=dict(argstr="--precision=%d"), + x_units=dict(argstr="-u %d", usedefault=True), + y_max=dict(argstr="--ymax=%.2f", xor=("y_range",)), + y_min=dict(argstr="--ymin=%.2f", xor=("y_range",)), + y_range=dict(argstr="%s", xor=("y_min", "y_max")), ) inputs = PlotTimeSeries.input_spec() @@ -79,11 +31,7 @@ def test_PlotTimeSeries_inputs(): def test_PlotTimeSeries_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = PlotTimeSeries.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py index 874cbcf0e8..b4c2f58ab1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py +++ b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py @@ -4,25 +4,11 @@ def test_PowerSpectrum_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=1 ), output_type=dict(), ) @@ -34,11 +20,7 @@ def test_PowerSpectrum_inputs(): def test_PowerSpectrum_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = PowerSpectrum.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py index 2286dad026..4f731c95ad 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py +++ b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py @@ -4,47 +4,15 @@ def test_PrepareFieldmap_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - delta_TE=dict( - argstr="%f", - mandatory=True, - position=-2, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_magnitude=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=3, - ), - in_phase=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - nocheck=dict( - argstr="--nocheck", - position=-1, - usedefault=True, - ), - out_fieldmap=dict( - argstr="%s", - extensions=None, - position=4, - ), + args=dict(argstr="%s"), + delta_TE=dict(argstr="%f", mandatory=True, position=-2, usedefault=True), + environ=dict(nohash=True, usedefault=True), + in_magnitude=dict(argstr="%s", extensions=None, mandatory=True, position=3), + in_phase=dict(argstr="%s", extensions=None, mandatory=True, position=2), + nocheck=dict(argstr="--nocheck", position=-1, usedefault=True), + out_fieldmap=dict(argstr="%s", extensions=None, position=4), output_type=dict(), - scanner=dict( - argstr="%s", - position=1, - usedefault=True, - ), + scanner=dict(argstr="%s", position=1, usedefault=True), ) inputs = PrepareFieldmap.input_spec() @@ -54,11 +22,7 @@ def test_PrepareFieldmap_inputs(): def test_PrepareFieldmap_outputs(): - output_map = dict( - out_fieldmap=dict( - extensions=None, - ), - ) + output_map = dict(out_fieldmap=dict(extensions=None)) outputs = PrepareFieldmap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py index aae5d80c57..0737972f17 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py @@ -4,139 +4,45 @@ def test_ProbTrackX_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - avoid_mp=dict( - argstr="--avoid=%s", - extensions=None, - ), - c_thresh=dict( - argstr="--cthr=%.3f", - ), - correct_path_distribution=dict( - argstr="--pd", - ), - dist_thresh=dict( - argstr="--distthresh=%.3f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fibst=dict( - argstr="--fibst=%d", - ), - force_dir=dict( - argstr="--forcedir", - usedefault=True, - ), - fsamples=dict( - mandatory=True, - ), - inv_xfm=dict( - argstr="--invxfm=%s", - extensions=None, - ), - loop_check=dict( - argstr="--loopcheck", - ), - mask=dict( - argstr="-m %s", - extensions=None, - mandatory=True, - ), - mask2=dict( - argstr="--mask2=%s", - extensions=None, - ), - mesh=dict( - argstr="--mesh=%s", - extensions=None, - ), - mod_euler=dict( - argstr="--modeuler", - ), - mode=dict( - argstr="--mode=%s", - genfile=True, - ), - n_samples=dict( - argstr="--nsamples=%d", - usedefault=True, - ), - n_steps=dict( - argstr="--nsteps=%d", - ), - network=dict( - argstr="--network", - ), - opd=dict( - argstr="--opd", - usedefault=True, - ), - os2t=dict( - argstr="--os2t", - ), - out_dir=dict( - argstr="--dir=%s", - genfile=True, - ), + args=dict(argstr="%s"), + avoid_mp=dict(argstr="--avoid=%s", extensions=None), + c_thresh=dict(argstr="--cthr=%.3f"), + correct_path_distribution=dict(argstr="--pd"), + dist_thresh=dict(argstr="--distthresh=%.3f"), + environ=dict(nohash=True, usedefault=True), + fibst=dict(argstr="--fibst=%d"), + force_dir=dict(argstr="--forcedir", usedefault=True), + fsamples=dict(mandatory=True), + inv_xfm=dict(argstr="--invxfm=%s", extensions=None), + loop_check=dict(argstr="--loopcheck"), + mask=dict(argstr="-m %s", extensions=None, mandatory=True), + mask2=dict(argstr="--mask2=%s", extensions=None), + mesh=dict(argstr="--mesh=%s", extensions=None), + mod_euler=dict(argstr="--modeuler"), + mode=dict(argstr="--mode=%s", genfile=True), + n_samples=dict(argstr="--nsamples=%d", usedefault=True), + n_steps=dict(argstr="--nsteps=%d"), + network=dict(argstr="--network"), + opd=dict(argstr="--opd", usedefault=True), + os2t=dict(argstr="--os2t"), + out_dir=dict(argstr="--dir=%s", genfile=True), output_type=dict(), - phsamples=dict( - mandatory=True, - ), - rand_fib=dict( - argstr="--randfib=%d", - ), - random_seed=dict( - argstr="--rseed", - ), - s2tastext=dict( - argstr="--s2tastext", - ), - sample_random_points=dict( - argstr="--sampvox", - ), - samples_base_name=dict( - argstr="--samples=%s", - usedefault=True, - ), - seed=dict( - argstr="--seed=%s", - mandatory=True, - ), - seed_ref=dict( - argstr="--seedref=%s", - extensions=None, - ), - step_length=dict( - argstr="--steplength=%.3f", - ), - stop_mask=dict( - argstr="--stop=%s", - extensions=None, - ), - target_masks=dict( - argstr="--targetmasks=%s", - ), - thsamples=dict( - mandatory=True, - ), - use_anisotropy=dict( - argstr="--usef", - ), - verbose=dict( - argstr="--verbose=%d", - ), - waypoints=dict( - argstr="--waypoints=%s", - extensions=None, - ), - xfm=dict( - argstr="--xfm=%s", - extensions=None, - ), + phsamples=dict(mandatory=True), + rand_fib=dict(argstr="--randfib=%d"), + random_seed=dict(argstr="--rseed"), + s2tastext=dict(argstr="--s2tastext"), + sample_random_points=dict(argstr="--sampvox"), + samples_base_name=dict(argstr="--samples=%s", usedefault=True), + seed=dict(argstr="--seed=%s", mandatory=True), + seed_ref=dict(argstr="--seedref=%s", extensions=None), + step_length=dict(argstr="--steplength=%.3f"), + stop_mask=dict(argstr="--stop=%s", extensions=None), + target_masks=dict(argstr="--targetmasks=%s"), + thsamples=dict(mandatory=True), + use_anisotropy=dict(argstr="--usef"), + verbose=dict(argstr="--verbose=%d"), + waypoints=dict(argstr="--waypoints=%s", extensions=None), + xfm=dict(argstr="--xfm=%s", extensions=None), ) inputs = ProbTrackX.input_spec() @@ -148,14 +54,10 @@ def test_ProbTrackX_inputs(): def test_ProbTrackX_outputs(): output_map = dict( fdt_paths=dict(), - log=dict( - extensions=None, - ), + log=dict(extensions=None), particle_files=dict(), targets=dict(), - way_total=dict( - extensions=None, - ), + way_total=dict(extensions=None), ) outputs = ProbTrackX.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py index 1813bd3c9c..057514614b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py @@ -4,186 +4,59 @@ def test_ProbTrackX2_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - avoid_mp=dict( - argstr="--avoid=%s", - extensions=None, - ), - c_thresh=dict( - argstr="--cthr=%.3f", - ), - colmask4=dict( - argstr="--colmask4=%s", - extensions=None, - ), - correct_path_distribution=dict( - argstr="--pd", - ), - dist_thresh=dict( - argstr="--distthresh=%.3f", - ), - distthresh1=dict( - argstr="--distthresh1=%.3f", - ), - distthresh3=dict( - argstr="--distthresh3=%.3f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fibst=dict( - argstr="--fibst=%d", - ), - fopd=dict( - argstr="--fopd=%s", - extensions=None, - ), - force_dir=dict( - argstr="--forcedir", - usedefault=True, - ), - fsamples=dict( - mandatory=True, - ), - inv_xfm=dict( - argstr="--invxfm=%s", - extensions=None, - ), - loop_check=dict( - argstr="--loopcheck", - ), - lrtarget3=dict( - argstr="--lrtarget3=%s", - extensions=None, - ), - mask=dict( - argstr="-m %s", - extensions=None, - mandatory=True, - ), - meshspace=dict( - argstr="--meshspace=%s", - ), - mod_euler=dict( - argstr="--modeuler", - ), - n_samples=dict( - argstr="--nsamples=%d", - usedefault=True, - ), - n_steps=dict( - argstr="--nsteps=%d", - ), - network=dict( - argstr="--network", - ), - omatrix1=dict( - argstr="--omatrix1", - ), - omatrix2=dict( - argstr="--omatrix2", - requires=["target2"], - ), - omatrix3=dict( - argstr="--omatrix3", - requires=["target3", "lrtarget3"], - ), - omatrix4=dict( - argstr="--omatrix4", - ), - onewaycondition=dict( - argstr="--onewaycondition", - ), - opd=dict( - argstr="--opd", - usedefault=True, - ), - os2t=dict( - argstr="--os2t", - ), - out_dir=dict( - argstr="--dir=%s", - genfile=True, - ), + args=dict(argstr="%s"), + avoid_mp=dict(argstr="--avoid=%s", extensions=None), + c_thresh=dict(argstr="--cthr=%.3f"), + colmask4=dict(argstr="--colmask4=%s", extensions=None), + correct_path_distribution=dict(argstr="--pd"), + dist_thresh=dict(argstr="--distthresh=%.3f"), + distthresh1=dict(argstr="--distthresh1=%.3f"), + distthresh3=dict(argstr="--distthresh3=%.3f"), + environ=dict(nohash=True, usedefault=True), + fibst=dict(argstr="--fibst=%d"), + fopd=dict(argstr="--fopd=%s", extensions=None), + force_dir=dict(argstr="--forcedir", usedefault=True), + fsamples=dict(mandatory=True), + inv_xfm=dict(argstr="--invxfm=%s", extensions=None), + loop_check=dict(argstr="--loopcheck"), + lrtarget3=dict(argstr="--lrtarget3=%s", extensions=None), + mask=dict(argstr="-m %s", extensions=None, mandatory=True), + meshspace=dict(argstr="--meshspace=%s"), + mod_euler=dict(argstr="--modeuler"), + n_samples=dict(argstr="--nsamples=%d", usedefault=True), + n_steps=dict(argstr="--nsteps=%d"), + network=dict(argstr="--network"), + omatrix1=dict(argstr="--omatrix1"), + omatrix2=dict(argstr="--omatrix2", requires=["target2"]), + omatrix3=dict(argstr="--omatrix3", requires=["target3", "lrtarget3"]), + omatrix4=dict(argstr="--omatrix4"), + onewaycondition=dict(argstr="--onewaycondition"), + opd=dict(argstr="--opd", usedefault=True), + os2t=dict(argstr="--os2t"), + out_dir=dict(argstr="--dir=%s", genfile=True), output_type=dict(), - phsamples=dict( - mandatory=True, - ), - rand_fib=dict( - argstr="--randfib=%d", - ), - random_seed=dict( - argstr="--rseed", - ), - s2tastext=dict( - argstr="--s2tastext", - ), - sample_random_points=dict( - argstr="--sampvox", - ), - samples_base_name=dict( - argstr="--samples=%s", - usedefault=True, - ), - seed=dict( - argstr="--seed=%s", - mandatory=True, - ), - seed_ref=dict( - argstr="--seedref=%s", - extensions=None, - ), - simple=dict( - argstr="--simple", - ), - step_length=dict( - argstr="--steplength=%.3f", - ), - stop_mask=dict( - argstr="--stop=%s", - extensions=None, - ), - target2=dict( - argstr="--target2=%s", - extensions=None, - ), - target3=dict( - argstr="--target3=%s", - extensions=None, - ), - target4=dict( - argstr="--target4=%s", - extensions=None, - ), - target_masks=dict( - argstr="--targetmasks=%s", - ), - thsamples=dict( - mandatory=True, - ), - use_anisotropy=dict( - argstr="--usef", - ), - verbose=dict( - argstr="--verbose=%d", - ), - waycond=dict( - argstr="--waycond=%s", - ), - wayorder=dict( - argstr="--wayorder", - ), - waypoints=dict( - argstr="--waypoints=%s", - extensions=None, - ), - xfm=dict( - argstr="--xfm=%s", - extensions=None, - ), + phsamples=dict(mandatory=True), + rand_fib=dict(argstr="--randfib=%d"), + random_seed=dict(argstr="--rseed"), + s2tastext=dict(argstr="--s2tastext"), + sample_random_points=dict(argstr="--sampvox"), + samples_base_name=dict(argstr="--samples=%s", usedefault=True), + seed=dict(argstr="--seed=%s", mandatory=True), + seed_ref=dict(argstr="--seedref=%s", extensions=None), + simple=dict(argstr="--simple"), + step_length=dict(argstr="--steplength=%.3f"), + stop_mask=dict(argstr="--stop=%s", extensions=None), + target2=dict(argstr="--target2=%s", extensions=None), + target3=dict(argstr="--target3=%s", extensions=None), + target4=dict(argstr="--target4=%s", extensions=None), + target_masks=dict(argstr="--targetmasks=%s"), + thsamples=dict(mandatory=True), + use_anisotropy=dict(argstr="--usef"), + verbose=dict(argstr="--verbose=%d"), + waycond=dict(argstr="--waycond=%s"), + wayorder=dict(argstr="--wayorder"), + waypoints=dict(argstr="--waypoints=%s", extensions=None), + xfm=dict(argstr="--xfm=%s", extensions=None), ) inputs = ProbTrackX2.input_spec() @@ -195,29 +68,15 @@ def test_ProbTrackX2_inputs(): def test_ProbTrackX2_outputs(): output_map = dict( fdt_paths=dict(), - log=dict( - extensions=None, - ), - lookup_tractspace=dict( - extensions=None, - ), - matrix1_dot=dict( - extensions=None, - ), - matrix2_dot=dict( - extensions=None, - ), - matrix3_dot=dict( - extensions=None, - ), - network_matrix=dict( - extensions=None, - ), + log=dict(extensions=None), + lookup_tractspace=dict(extensions=None), + matrix1_dot=dict(extensions=None), + matrix2_dot=dict(extensions=None), + matrix3_dot=dict(extensions=None), + network_matrix=dict(extensions=None), particle_files=dict(), targets=dict(), - way_total=dict( - extensions=None, - ), + way_total=dict(extensions=None), ) outputs = ProbTrackX2.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py b/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py index 420eacb9c2..d6e3615eca 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py @@ -4,24 +4,11 @@ def test_ProjThresh_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr="%s", - mandatory=True, - position=0, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_files=dict(argstr="%s", mandatory=True, position=0), output_type=dict(), - threshold=dict( - argstr="%d", - mandatory=True, - position=1, - ), + threshold=dict(argstr="%d", mandatory=True, position=1), ) inputs = ProjThresh.input_spec() @@ -31,9 +18,7 @@ def test_ProjThresh_inputs(): def test_ProjThresh_outputs(): - output_map = dict( - out_files=dict(), - ) + output_map = dict(out_files=dict()) outputs = ProjThresh.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Randomise.py b/nipype/interfaces/fsl/tests/test_auto_Randomise.py index 9b0b74bf28..82993b4c2a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Randomise.py +++ b/nipype/interfaces/fsl/tests/test_auto_Randomise.py @@ -4,107 +4,36 @@ def test_Randomise_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - base_name=dict( - argstr='-o "%s"', - position=1, - usedefault=True, - ), - c_thresh=dict( - argstr="-c %.1f", - ), - cm_thresh=dict( - argstr="-C %.1f", - ), - demean=dict( - argstr="-D", - ), - design_mat=dict( - argstr="-d %s", - extensions=None, - position=2, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - f_c_thresh=dict( - argstr="-F %.2f", - ), - f_cm_thresh=dict( - argstr="-S %.2f", - ), - f_only=dict( - argstr="--fonly", - ), - fcon=dict( - argstr="-f %s", - extensions=None, - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - position=0, - ), - mask=dict( - argstr="-m %s", - extensions=None, - ), - num_perm=dict( - argstr="-n %d", - ), - one_sample_group_mean=dict( - argstr="-1", - ), + args=dict(argstr="%s"), + base_name=dict(argstr='-o "%s"', position=1, usedefault=True), + c_thresh=dict(argstr="-c %.1f"), + cm_thresh=dict(argstr="-C %.1f"), + demean=dict(argstr="-D"), + design_mat=dict(argstr="-d %s", extensions=None, position=2), + environ=dict(nohash=True, usedefault=True), + f_c_thresh=dict(argstr="-F %.2f"), + f_cm_thresh=dict(argstr="-S %.2f"), + f_only=dict(argstr="--fonly"), + fcon=dict(argstr="-f %s", extensions=None), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0), + mask=dict(argstr="-m %s", extensions=None), + num_perm=dict(argstr="-n %d"), + one_sample_group_mean=dict(argstr="-1"), output_type=dict(), - p_vec_n_dist_files=dict( - argstr="-P", - ), - raw_stats_imgs=dict( - argstr="-R", - ), - seed=dict( - argstr="--seed=%d", - ), - show_info_parallel_mode=dict( - argstr="-Q", - ), - show_total_perms=dict( - argstr="-q", - ), - tcon=dict( - argstr="-t %s", - extensions=None, - position=3, - ), - tfce=dict( - argstr="-T", - ), - tfce2D=dict( - argstr="--T2", - ), - tfce_C=dict( - argstr="--tfce_C=%.2f", - ), - tfce_E=dict( - argstr="--tfce_E=%.2f", - ), - tfce_H=dict( - argstr="--tfce_H=%.2f", - ), - var_smooth=dict( - argstr="-v %d", - ), - vox_p_values=dict( - argstr="-x", - ), - x_block_labels=dict( - argstr="-e %s", - extensions=None, - ), + p_vec_n_dist_files=dict(argstr="-P"), + raw_stats_imgs=dict(argstr="-R"), + seed=dict(argstr="--seed=%d"), + show_info_parallel_mode=dict(argstr="-Q"), + show_total_perms=dict(argstr="-q"), + tcon=dict(argstr="-t %s", extensions=None, position=3), + tfce=dict(argstr="-T"), + tfce2D=dict(argstr="--T2"), + tfce_C=dict(argstr="--tfce_C=%.2f"), + tfce_E=dict(argstr="--tfce_E=%.2f"), + tfce_H=dict(argstr="--tfce_H=%.2f"), + var_smooth=dict(argstr="-v %d"), + vox_p_values=dict(argstr="-x"), + x_block_labels=dict(argstr="-e %s", extensions=None), ) inputs = Randomise.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py index e008eb44e6..e26b2e2ed8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py +++ b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py @@ -4,24 +4,10 @@ def test_Reorient2Std_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - ), - out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True), + out_file=dict(argstr="%s", extensions=None, genfile=True, hash_files=False), output_type=dict(), ) inputs = Reorient2Std.input_spec() @@ -32,11 +18,7 @@ def test_Reorient2Std_inputs(): def test_Reorient2Std_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Reorient2Std.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py index b5598f0de4..b74f585f64 100644 --- a/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py +++ b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py @@ -4,22 +4,10 @@ def test_RobustFOV_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - brainsize=dict( - argstr="-b %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - position=0, - ), + args=dict(argstr="%s"), + brainsize=dict(argstr="-b %d"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0), out_roi=dict( argstr="-r %s", extensions=None, @@ -45,12 +33,7 @@ def test_RobustFOV_inputs(): def test_RobustFOV_outputs(): output_map = dict( - out_roi=dict( - extensions=None, - ), - out_transform=dict( - extensions=None, - ), + out_roi=dict(extensions=None), out_transform=dict(extensions=None) ) outputs = RobustFOV.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_SMM.py b/nipype/interfaces/fsl/tests/test_auto_SMM.py index 51777eaed9..0c7bb96c90 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SMM.py +++ b/nipype/interfaces/fsl/tests/test_auto_SMM.py @@ -4,13 +4,8 @@ def test_SMM_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), mask=dict( argstr='--mask="%s"', copyfile=False, @@ -18,10 +13,7 @@ def test_SMM_inputs(): mandatory=True, position=1, ), - no_deactivation_class=dict( - argstr="--zfstatmode", - position=2, - ), + no_deactivation_class=dict(argstr="--zfstatmode", position=2), output_type=dict(), spatial_data_file=dict( argstr='--sdf="%s"', @@ -40,15 +32,9 @@ def test_SMM_inputs(): def test_SMM_outputs(): output_map = dict( - activation_p_map=dict( - extensions=None, - ), - deactivation_p_map=dict( - extensions=None, - ), - null_p_map=dict( - extensions=None, - ), + activation_p_map=dict(extensions=None), + deactivation_p_map=dict(extensions=None), + null_p_map=dict(extensions=None), ) outputs = SMM.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_SUSAN.py b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py index 427b770222..4ad74f7d39 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SUSAN.py +++ b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py @@ -4,52 +4,18 @@ def test_SUSAN_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - brightness_threshold=dict( - argstr="%.10f", - mandatory=True, - position=2, - ), - dimension=dict( - argstr="%d", - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fwhm=dict( - argstr="%.10f", - mandatory=True, - position=3, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), + args=dict(argstr="%s"), + brightness_threshold=dict(argstr="%.10f", mandatory=True, position=2), + dimension=dict(argstr="%d", position=4, usedefault=True), + environ=dict(nohash=True, usedefault=True), + fwhm=dict(argstr="%.10f", mandatory=True, position=3), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-1 ), output_type=dict(), - usans=dict( - argstr="", - position=6, - usedefault=True, - ), - use_median=dict( - argstr="%d", - position=5, - usedefault=True, - ), + usans=dict(argstr="", position=6, usedefault=True), + use_median=dict(argstr="%d", position=5, usedefault=True), ) inputs = SUSAN.input_spec() @@ -59,11 +25,7 @@ def test_SUSAN_inputs(): def test_SUSAN_outputs(): - output_map = dict( - smoothed_file=dict( - extensions=None, - ), - ) + output_map = dict(smoothed_file=dict(extensions=None)) outputs = SUSAN.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SigLoss.py b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py index 11be93c5b9..91062bcbaa 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SigLoss.py +++ b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py @@ -4,34 +4,14 @@ def test_SigLoss_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - echo_time=dict( - argstr="--te=%f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - mask_file=dict( - argstr="-m %s", - extensions=None, - ), - out_file=dict( - argstr="-s %s", - extensions=None, - genfile=True, - ), + args=dict(argstr="%s"), + echo_time=dict(argstr="--te=%f"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True), + mask_file=dict(argstr="-m %s", extensions=None), + out_file=dict(argstr="-s %s", extensions=None, genfile=True), output_type=dict(), - slice_direction=dict( - argstr="-d %s", - ), + slice_direction=dict(argstr="-d %s"), ) inputs = SigLoss.input_spec() @@ -41,11 +21,7 @@ def test_SigLoss_inputs(): def test_SigLoss_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SigLoss.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Slice.py b/nipype/interfaces/fsl/tests/test_auto_Slice.py index f5360716c6..3003cbd77c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Slice.py +++ b/nipype/interfaces/fsl/tests/test_auto_Slice.py @@ -4,24 +4,12 @@ def test_Slice_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", - copyfile=False, - extensions=None, - mandatory=True, - position=0, - ), - out_base_name=dict( - argstr="%s", - position=1, + argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0 ), + out_base_name=dict(argstr="%s", position=1), output_type=dict(), ) inputs = Slice.input_spec() @@ -32,9 +20,7 @@ def test_Slice_inputs(): def test_Slice_outputs(): - output_map = dict( - out_files=dict(), - ) + output_map = dict(out_files=dict()) outputs = Slice.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py index acdbd8c2ca..4bd3452b55 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py +++ b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py @@ -4,49 +4,20 @@ def test_SliceTimer_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - custom_order=dict( - argstr="--ocustom=%s", - extensions=None, - ), - custom_timings=dict( - argstr="--tcustom=%s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - global_shift=dict( - argstr="--tglobal", - ), - in_file=dict( - argstr="--in=%s", - extensions=None, - mandatory=True, - position=0, - ), - index_dir=dict( - argstr="--down", - ), - interleaved=dict( - argstr="--odd", - ), + args=dict(argstr="%s"), + custom_order=dict(argstr="--ocustom=%s", extensions=None), + custom_timings=dict(argstr="--tcustom=%s", extensions=None), + environ=dict(nohash=True, usedefault=True), + global_shift=dict(argstr="--tglobal"), + in_file=dict(argstr="--in=%s", extensions=None, mandatory=True, position=0), + index_dir=dict(argstr="--down"), + interleaved=dict(argstr="--odd"), out_file=dict( - argstr="--out=%s", - extensions=None, - genfile=True, - hash_files=False, + argstr="--out=%s", extensions=None, genfile=True, hash_files=False ), output_type=dict(), - slice_direction=dict( - argstr="--direction=%d", - ), - time_repetition=dict( - argstr="--repeat=%f", - ), + slice_direction=dict(argstr="--direction=%d"), + time_repetition=dict(argstr="--repeat=%f"), ) inputs = SliceTimer.input_spec() @@ -56,11 +27,7 @@ def test_SliceTimer_inputs(): def test_SliceTimer_outputs(): - output_map = dict( - slice_time_corrected_file=dict( - extensions=None, - ), - ) + output_map = dict(slice_time_corrected_file=dict(extensions=None)) outputs = SliceTimer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Slicer.py b/nipype/interfaces/fsl/tests/test_auto_Slicer.py index 8e3195fd39..36d676441d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Slicer.py +++ b/nipype/interfaces/fsl/tests/test_auto_Slicer.py @@ -10,61 +10,23 @@ def test_Slicer_inputs(): requires=["image_width"], xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), ), - args=dict( - argstr="%s", - ), - colour_map=dict( - argstr="-l %s", - extensions=None, - position=4, - ), - dither_edges=dict( - argstr="-t", - position=7, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - image_edges=dict( - argstr="%s", - extensions=None, - position=2, - ), - image_width=dict( - argstr="%d", - position=-2, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - intensity_range=dict( - argstr="-i %.3f %.3f", - position=5, - ), - label_slices=dict( - argstr="-L", - position=3, - usedefault=True, - ), + args=dict(argstr="%s"), + colour_map=dict(argstr="-l %s", extensions=None, position=4), + dither_edges=dict(argstr="-t", position=7), + environ=dict(nohash=True, usedefault=True), + image_edges=dict(argstr="%s", extensions=None, position=2), + image_width=dict(argstr="%d", position=-2), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), + intensity_range=dict(argstr="-i %.3f %.3f", position=5), + label_slices=dict(argstr="-L", position=3, usedefault=True), middle_slices=dict( argstr="-a", position=10, xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), ), - nearest_neighbour=dict( - argstr="-n", - position=8, - ), + nearest_neighbour=dict(argstr="-n", position=8), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-1 ), output_type=dict(), sample_axial=dict( @@ -73,29 +35,16 @@ def test_Slicer_inputs(): requires=["image_width"], xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), ), - scaling=dict( - argstr="-s %f", - position=0, - ), - show_orientation=dict( - argstr="%s", - position=9, - usedefault=True, - ), + scaling=dict(argstr="-s %f", position=0), + show_orientation=dict(argstr="%s", position=9, usedefault=True), single_slice=dict( argstr="-%s", position=10, requires=["slice_number"], xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), ), - slice_number=dict( - argstr="-%d", - position=11, - ), - threshold_edges=dict( - argstr="-e %.3f", - position=6, - ), + slice_number=dict(argstr="-%d", position=11), + threshold_edges=dict(argstr="-e %.3f", position=6), ) inputs = Slicer.input_spec() @@ -105,11 +54,7 @@ def test_Slicer_inputs(): def test_Slicer_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Slicer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Smooth.py b/nipype/interfaces/fsl/tests/test_auto_Smooth.py index 9d9324770b..3e5ac40fe1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Smooth.py +++ b/nipype/interfaces/fsl/tests/test_auto_Smooth.py @@ -4,25 +4,15 @@ def test_Smooth_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), fwhm=dict( argstr="-kernel gauss %.03f -fmean", mandatory=True, position=1, xor=["sigma"], ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), output_type=dict(), sigma=dict( argstr="-kernel gauss %.03f -fmean", @@ -47,11 +37,7 @@ def test_Smooth_inputs(): def test_Smooth_outputs(): - output_map = dict( - smoothed_file=dict( - extensions=None, - ), - ) + output_map = dict(smoothed_file=dict(extensions=None)) outputs = Smooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py index bf21438d1d..f1e0a409f7 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py +++ b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py @@ -4,34 +4,13 @@ def test_SmoothEstimate_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dof=dict( - argstr="--dof=%d", - mandatory=True, - xor=["zstat_file"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - mask_file=dict( - argstr="--mask=%s", - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + dof=dict(argstr="--dof=%d", mandatory=True, xor=["zstat_file"]), + environ=dict(nohash=True, usedefault=True), + mask_file=dict(argstr="--mask=%s", extensions=None, mandatory=True), output_type=dict(), - residual_fit_file=dict( - argstr="--res=%s", - extensions=None, - requires=["dof"], - ), - zstat_file=dict( - argstr="--zstat=%s", - extensions=None, - xor=["dof"], - ), + residual_fit_file=dict(argstr="--res=%s", extensions=None, requires=["dof"]), + zstat_file=dict(argstr="--zstat=%s", extensions=None, xor=["dof"]), ) inputs = SmoothEstimate.input_spec() @@ -41,11 +20,7 @@ def test_SmoothEstimate_inputs(): def test_SmoothEstimate_outputs(): - output_map = dict( - dlh=dict(), - resels=dict(), - volume=dict(), - ) + output_map = dict(dlh=dict(), resels=dict(), volume=dict()) outputs = SmoothEstimate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py index 0d6f68cbea..60b6bc6e3e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py +++ b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py @@ -4,58 +4,19 @@ def test_SpatialFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - kernel_file=dict( - argstr="%s", - extensions=None, - position=5, - xor=["kernel_size"], - ), - kernel_shape=dict( - argstr="-kernel %s", - position=4, - ), - kernel_size=dict( - argstr="%.4f", - position=5, - xor=["kernel_file"], - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), - operation=dict( - argstr="-f%s", - mandatory=True, - position=6, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + kernel_file=dict(argstr="%s", extensions=None, position=5, xor=["kernel_size"]), + kernel_shape=dict(argstr="-kernel %s", position=4), + kernel_size=dict(argstr="%.4f", position=5, xor=["kernel_file"]), + nan2zeros=dict(argstr="-nan", position=3), + operation=dict(argstr="-f%s", mandatory=True, position=6), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = SpatialFilter.input_spec() @@ -66,11 +27,7 @@ def test_SpatialFilter_inputs(): def test_SpatialFilter_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SpatialFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Split.py b/nipype/interfaces/fsl/tests/test_auto_Split.py index 79aa3a7ade..d47bafb409 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Split.py +++ b/nipype/interfaces/fsl/tests/test_auto_Split.py @@ -4,28 +4,11 @@ def test_Split_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="-%s", - mandatory=True, - position=2, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), - out_base_name=dict( - argstr="%s", - position=1, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="-%s", mandatory=True, position=2), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + out_base_name=dict(argstr="%s", position=1), output_type=dict(), ) inputs = Split.input_spec() @@ -36,9 +19,7 @@ def test_Split_inputs(): def test_Split_outputs(): - output_map = dict( - out_files=dict(), - ) + output_map = dict(out_files=dict()) outputs = Split.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_StdImage.py b/nipype/interfaces/fsl/tests/test_auto_StdImage.py index 226abb5e5e..c829c91224 100644 --- a/nipype/interfaces/fsl/tests/test_auto_StdImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_StdImage.py @@ -4,43 +4,16 @@ def test_StdImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - argstr="-%sstd", - position=4, - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + dimension=dict(argstr="-%sstd", position=4, usedefault=True), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + nan2zeros=dict(argstr="-nan", position=3), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = StdImage.input_spec() @@ -51,11 +24,7 @@ def test_StdImage_inputs(): def test_StdImage_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = StdImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py index 26b3b3ff54..cdfaf37a42 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py +++ b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py @@ -4,29 +4,11 @@ def test_SwapDimensions_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position="1", - ), - new_dims=dict( - argstr="%s %s %s", - mandatory=True, - ), - out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position="1"), + new_dims=dict(argstr="%s %s %s", mandatory=True), + out_file=dict(argstr="%s", extensions=None, genfile=True, hash_files=False), output_type=dict(), ) inputs = SwapDimensions.input_spec() @@ -37,11 +19,7 @@ def test_SwapDimensions_inputs(): def test_SwapDimensions_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SwapDimensions.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TOPUP.py b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py index 3358190dfb..4d77301342 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TOPUP.py +++ b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py @@ -4,13 +4,8 @@ def test_TOPUP_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - config=dict( - argstr="--config=%s", - usedefault=True, - ), + args=dict(argstr="%s"), + config=dict(argstr="--config=%s", usedefault=True), encoding_direction=dict( argstr="--datain=%s", mandatory=True, @@ -23,33 +18,14 @@ def test_TOPUP_inputs(): mandatory=True, xor=["encoding_direction"], ), - environ=dict( - nohash=True, - usedefault=True, - ), - estmov=dict( - argstr="--estmov=%d", - ), - fwhm=dict( - argstr="--fwhm=%f", - ), - in_file=dict( - argstr="--imain=%s", - extensions=None, - mandatory=True, - ), - interp=dict( - argstr="--interp=%s", - ), - max_iter=dict( - argstr="--miter=%d", - ), - minmet=dict( - argstr="--minmet=%d", - ), - numprec=dict( - argstr="--numprec=%s", - ), + environ=dict(nohash=True, usedefault=True), + estmov=dict(argstr="--estmov=%d"), + fwhm=dict(argstr="--fwhm=%f"), + in_file=dict(argstr="--imain=%s", extensions=None, mandatory=True), + interp=dict(argstr="--interp=%s"), + max_iter=dict(argstr="--miter=%d"), + minmet=dict(argstr="--minmet=%d"), + numprec=dict(argstr="--numprec=%s"), out_base=dict( argstr="--out=%s", extensions=None, @@ -71,11 +47,7 @@ def test_TOPUP_inputs(): name_source=["in_file"], name_template="%s_field", ), - out_jac_prefix=dict( - argstr="--jacout=%s", - hash_files=False, - usedefault=True, - ), + out_jac_prefix=dict(argstr="--jacout=%s", hash_files=False, usedefault=True), out_logfile=dict( argstr="--logout=%s", extensions=None, @@ -84,46 +56,20 @@ def test_TOPUP_inputs(): name_source=["in_file"], name_template="%s_topup.log", ), - out_mat_prefix=dict( - argstr="--rbmout=%s", - hash_files=False, - usedefault=True, - ), - out_warp_prefix=dict( - argstr="--dfout=%s", - hash_files=False, - usedefault=True, - ), + out_mat_prefix=dict(argstr="--rbmout=%s", hash_files=False, usedefault=True), + out_warp_prefix=dict(argstr="--dfout=%s", hash_files=False, usedefault=True), output_type=dict(), readout_times=dict( - mandatory=True, - requires=["encoding_direction"], - xor=["encoding_file"], - ), - reg_lambda=dict( - argstr="--lambda=%0.f", - ), - regmod=dict( - argstr="--regmod=%s", - ), - regrid=dict( - argstr="--regrid=%d", - ), - scale=dict( - argstr="--scale=%d", - ), - splineorder=dict( - argstr="--splineorder=%d", - ), - ssqlambda=dict( - argstr="--ssqlambda=%d", - ), - subsamp=dict( - argstr="--subsamp=%d", - ), - warp_res=dict( - argstr="--warpres=%f", - ), + mandatory=True, requires=["encoding_direction"], xor=["encoding_file"] + ), + reg_lambda=dict(argstr="--lambda=%0.f"), + regmod=dict(argstr="--regmod=%s"), + regrid=dict(argstr="--regrid=%d"), + scale=dict(argstr="--scale=%d"), + splineorder=dict(argstr="--splineorder=%d"), + ssqlambda=dict(argstr="--ssqlambda=%d"), + subsamp=dict(argstr="--subsamp=%d"), + warp_res=dict(argstr="--warpres=%f"), ) inputs = TOPUP.input_spec() @@ -134,26 +80,14 @@ def test_TOPUP_inputs(): def test_TOPUP_outputs(): output_map = dict( - out_corrected=dict( - extensions=None, - ), - out_enc_file=dict( - extensions=None, - ), - out_field=dict( - extensions=None, - ), - out_fieldcoef=dict( - extensions=None, - ), + out_corrected=dict(extensions=None), + out_enc_file=dict(extensions=None), + out_field=dict(extensions=None), + out_fieldcoef=dict(extensions=None), out_jacs=dict(), - out_logfile=dict( - extensions=None, - ), + out_logfile=dict(extensions=None), out_mats=dict(), - out_movpar=dict( - extensions=None, - ), + out_movpar=dict(extensions=None), out_warps=dict(), ) outputs = TOPUP.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py index 3a825e4e45..8eaf910ed6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py +++ b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py @@ -4,48 +4,17 @@ def test_TemporalFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - highpass_sigma=dict( - argstr="-bptf %.6f", - position=4, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - lowpass_sigma=dict( - argstr="%.6f", - position=5, - usedefault=True, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + highpass_sigma=dict(argstr="-bptf %.6f", position=4, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + lowpass_sigma=dict(argstr="%.6f", position=5, usedefault=True), + nan2zeros=dict(argstr="-nan", position=3), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = TemporalFilter.input_spec() @@ -56,11 +25,7 @@ def test_TemporalFilter_inputs(): def test_TemporalFilter_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TemporalFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Threshold.py b/nipype/interfaces/fsl/tests/test_auto_Threshold.py index cc3446bd47..ee6bdfdaac 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Threshold.py +++ b/nipype/interfaces/fsl/tests/test_auto_Threshold.py @@ -4,50 +4,19 @@ def test_Threshold_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - direction=dict( - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), + args=dict(argstr="%s"), + direction=dict(usedefault=True), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + nan2zeros=dict(argstr="-nan", position=3), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), - thresh=dict( - argstr="%s", - mandatory=True, - position=4, - ), - use_nonzero_voxels=dict( - requires=["use_robust_range"], - ), + thresh=dict(argstr="%s", mandatory=True, position=4), + use_nonzero_voxels=dict(requires=["use_robust_range"]), use_robust_range=dict(), ) inputs = Threshold.input_spec() @@ -58,11 +27,7 @@ def test_Threshold_inputs(): def test_Threshold_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Threshold.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py index 631741da49..e3dc720e45 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py +++ b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py @@ -4,52 +4,23 @@ def test_TractSkeleton_inputs(): input_map = dict( - alt_data_file=dict( - argstr="-a %s", - extensions=None, - ), - alt_skeleton=dict( - argstr="-s %s", - extensions=None, - ), - args=dict( - argstr="%s", - ), - data_file=dict( - extensions=None, - ), - distance_map=dict( - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), + alt_data_file=dict(argstr="-a %s", extensions=None), + alt_skeleton=dict(argstr="-s %s", extensions=None), + args=dict(argstr="%s"), + data_file=dict(extensions=None), + distance_map=dict(extensions=None), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True), output_type=dict(), project_data=dict( argstr="-p %.3f %s %s %s %s", requires=["threshold", "distance_map", "data_file"], ), - projected_data=dict( - extensions=None, - ), - search_mask_file=dict( - extensions=None, - xor=["use_cingulum_mask"], - ), - skeleton_file=dict( - argstr="-o %s", - ), + projected_data=dict(extensions=None), + search_mask_file=dict(extensions=None, xor=["use_cingulum_mask"]), + skeleton_file=dict(argstr="-o %s"), threshold=dict(), - use_cingulum_mask=dict( - usedefault=True, - xor=["search_mask_file"], - ), + use_cingulum_mask=dict(usedefault=True, xor=["search_mask_file"]), ) inputs = TractSkeleton.input_spec() @@ -60,12 +31,7 @@ def test_TractSkeleton_inputs(): def test_TractSkeleton_outputs(): output_map = dict( - projected_data=dict( - extensions=None, - ), - skeleton_file=dict( - extensions=None, - ), + projected_data=dict(extensions=None), skeleton_file=dict(extensions=None) ) outputs = TractSkeleton.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Training.py b/nipype/interfaces/fsl/tests/test_auto_Training.py index 82a53d1408..68cc67fe94 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Training.py +++ b/nipype/interfaces/fsl/tests/test_auto_Training.py @@ -4,26 +4,11 @@ def test_Training_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - loo=dict( - argstr="-l", - position=2, - ), - mel_icas=dict( - argstr="%s", - copyfile=False, - position=-1, - ), - trained_wts_filestem=dict( - argstr="%s", - position=1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + loo=dict(argstr="-l", position=2), + mel_icas=dict(argstr="%s", copyfile=False, position=-1), + trained_wts_filestem=dict(argstr="%s", position=1), ) inputs = Training.input_spec() @@ -33,11 +18,7 @@ def test_Training_inputs(): def test_Training_outputs(): - output_map = dict( - trained_wts_file=dict( - extensions=None, - ), - ) + output_map = dict(trained_wts_file=dict(extensions=None)) outputs = Training.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py index 3ab307d6a8..a47e552ad2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py +++ b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py @@ -3,13 +3,7 @@ def test_TrainingSetCreator_inputs(): - input_map = dict( - mel_icas_in=dict( - argstr="%s", - copyfile=False, - position=-1, - ), - ) + input_map = dict(mel_icas_in=dict(argstr="%s", copyfile=False, position=-1)) inputs = TrainingSetCreator.input_spec() for key, metadata in list(input_map.items()): @@ -18,13 +12,7 @@ def test_TrainingSetCreator_inputs(): def test_TrainingSetCreator_outputs(): - output_map = dict( - mel_icas_out=dict( - argstr="%s", - copyfile=False, - position=-1, - ), - ) + output_map = dict(mel_icas_out=dict(argstr="%s", copyfile=False, position=-1)) outputs = TrainingSetCreator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py index cb27a76876..334340bb85 100644 --- a/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py @@ -4,43 +4,16 @@ def test_UnaryMaths_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - internal_datatype=dict( - argstr="-dt %s", - position=1, - ), - nan2zeros=dict( - argstr="-nan", - position=3, - ), - operation=dict( - argstr="-%s", - mandatory=True, - position=4, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + internal_datatype=dict(argstr="-dt %s", position=1), + nan2zeros=dict(argstr="-nan", position=3), + operation=dict(argstr="-%s", mandatory=True, position=4), out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - hash_files=False, - position=-2, - ), - output_datatype=dict( - argstr="-odt %s", - position=-1, + argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 ), + output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = UnaryMaths.input_spec() @@ -51,11 +24,7 @@ def test_UnaryMaths_inputs(): def test_UnaryMaths_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = UnaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_VecReg.py b/nipype/interfaces/fsl/tests/test_auto_VecReg.py index 9564241cc3..423d3546fb 100644 --- a/nipype/interfaces/fsl/tests/test_auto_VecReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_VecReg.py @@ -4,57 +4,19 @@ def test_VecReg_inputs(): input_map = dict( - affine_mat=dict( - argstr="-t %s", - extensions=None, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - interpolation=dict( - argstr="--interp=%s", - ), - mask=dict( - argstr="-m %s", - extensions=None, - ), - out_file=dict( - argstr="-o %s", - extensions=None, - genfile=True, - hash_files=False, - ), + affine_mat=dict(argstr="-t %s", extensions=None), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True), + interpolation=dict(argstr="--interp=%s"), + mask=dict(argstr="-m %s", extensions=None), + out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False), output_type=dict(), - ref_mask=dict( - argstr="--refmask=%s", - extensions=None, - ), - ref_vol=dict( - argstr="-r %s", - extensions=None, - mandatory=True, - ), - rotation_mat=dict( - argstr="--rotmat=%s", - extensions=None, - ), - rotation_warp=dict( - argstr="--rotwarp=%s", - extensions=None, - ), - warp_field=dict( - argstr="-w %s", - extensions=None, - ), + ref_mask=dict(argstr="--refmask=%s", extensions=None), + ref_vol=dict(argstr="-r %s", extensions=None, mandatory=True), + rotation_mat=dict(argstr="--rotmat=%s", extensions=None), + rotation_warp=dict(argstr="--rotwarp=%s", extensions=None), + warp_field=dict(argstr="-w %s", extensions=None), ) inputs = VecReg.input_spec() @@ -64,11 +26,7 @@ def test_VecReg_inputs(): def test_VecReg_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = VecReg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py index b7f7fc7d87..18659d0534 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py @@ -4,53 +4,21 @@ def test_WarpPoints_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - coord_mm=dict( - argstr="-mm", - xor=["coord_vox"], - ), - coord_vox=dict( - argstr="-vox", - xor=["coord_mm"], - ), - dest_file=dict( - argstr="-dest %s", - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_coords=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), + args=dict(argstr="%s"), + coord_mm=dict(argstr="-mm", xor=["coord_vox"]), + coord_vox=dict(argstr="-vox", xor=["coord_mm"]), + dest_file=dict(argstr="-dest %s", extensions=None, mandatory=True), + environ=dict(nohash=True, usedefault=True), + in_coords=dict(argstr="%s", extensions=None, mandatory=True, position=-1), out_file=dict( extensions=None, name_source="in_coords", name_template="%s_warped", output_name="out_file", ), - src_file=dict( - argstr="-src %s", - extensions=None, - mandatory=True, - ), - warp_file=dict( - argstr="-warp %s", - extensions=None, - xor=["xfm_file"], - ), - xfm_file=dict( - argstr="-xfm %s", - extensions=None, - xor=["warp_file"], - ), + src_file=dict(argstr="-src %s", extensions=None, mandatory=True), + warp_file=dict(argstr="-warp %s", extensions=None, xor=["xfm_file"]), + xfm_file=dict(argstr="-xfm %s", extensions=None, xor=["warp_file"]), ) inputs = WarpPoints.input_spec() @@ -60,11 +28,7 @@ def test_WarpPoints_inputs(): def test_WarpPoints_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = WarpPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py index b8f4cbef97..27c1999c23 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py @@ -4,47 +4,15 @@ def test_WarpPointsFromStd_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - coord_mm=dict( - argstr="-mm", - xor=["coord_vox"], - ), - coord_vox=dict( - argstr="-vox", - xor=["coord_mm"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - img_file=dict( - argstr="-img %s", - extensions=None, - mandatory=True, - ), - in_coords=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - std_file=dict( - argstr="-std %s", - extensions=None, - mandatory=True, - ), - warp_file=dict( - argstr="-warp %s", - extensions=None, - xor=["xfm_file"], - ), - xfm_file=dict( - argstr="-xfm %s", - extensions=None, - xor=["warp_file"], - ), + args=dict(argstr="%s"), + coord_mm=dict(argstr="-mm", xor=["coord_vox"]), + coord_vox=dict(argstr="-vox", xor=["coord_mm"]), + environ=dict(nohash=True, usedefault=True), + img_file=dict(argstr="-img %s", extensions=None, mandatory=True), + in_coords=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + std_file=dict(argstr="-std %s", extensions=None, mandatory=True), + warp_file=dict(argstr="-warp %s", extensions=None, xor=["xfm_file"]), + xfm_file=dict(argstr="-xfm %s", extensions=None, xor=["warp_file"]), ) inputs = WarpPointsFromStd.input_spec() @@ -54,11 +22,7 @@ def test_WarpPointsFromStd_inputs(): def test_WarpPointsFromStd_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = WarpPointsFromStd.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py index 0b5881c776..d123b2abf9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py @@ -4,57 +4,22 @@ def test_WarpPointsToStd_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - coord_mm=dict( - argstr="-mm", - xor=["coord_vox"], - ), - coord_vox=dict( - argstr="-vox", - xor=["coord_mm"], - ), - environ=dict( - nohash=True, - usedefault=True, - ), - img_file=dict( - argstr="-img %s", - extensions=None, - mandatory=True, - ), - in_coords=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), + args=dict(argstr="%s"), + coord_mm=dict(argstr="-mm", xor=["coord_vox"]), + coord_vox=dict(argstr="-vox", xor=["coord_mm"]), + environ=dict(nohash=True, usedefault=True), + img_file=dict(argstr="-img %s", extensions=None, mandatory=True), + in_coords=dict(argstr="%s", extensions=None, mandatory=True, position=-1), out_file=dict( extensions=None, name_source="in_coords", name_template="%s_warped", output_name="out_file", ), - premat_file=dict( - argstr="-premat %s", - extensions=None, - ), - std_file=dict( - argstr="-std %s", - extensions=None, - mandatory=True, - ), - warp_file=dict( - argstr="-warp %s", - extensions=None, - xor=["xfm_file"], - ), - xfm_file=dict( - argstr="-xfm %s", - extensions=None, - xor=["warp_file"], - ), + premat_file=dict(argstr="-premat %s", extensions=None), + std_file=dict(argstr="-std %s", extensions=None, mandatory=True), + warp_file=dict(argstr="-warp %s", extensions=None, xor=["xfm_file"]), + xfm_file=dict(argstr="-xfm %s", extensions=None, xor=["warp_file"]), ) inputs = WarpPointsToStd.input_spec() @@ -64,11 +29,7 @@ def test_WarpPointsToStd_inputs(): def test_WarpPointsToStd_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = WarpPointsToStd.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py index c8caa8da84..9ede954f07 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py @@ -4,21 +4,10 @@ def test_WarpUtils_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="--in=%s", - extensions=None, - mandatory=True, - ), - knot_space=dict( - argstr="--knotspace=%d,%d,%d", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="--in=%s", extensions=None, mandatory=True), + knot_space=dict(argstr="--knotspace=%d,%d,%d"), out_file=dict( argstr="--out=%s", extensions=None, @@ -26,29 +15,13 @@ def test_WarpUtils_inputs(): output_name="out_file", position=-1, ), - out_format=dict( - argstr="--outformat=%s", - ), - out_jacobian=dict( - argstr="--jac=%s", - extensions=None, - ), + out_format=dict(argstr="--outformat=%s"), + out_jacobian=dict(argstr="--jac=%s", extensions=None), output_type=dict(), - reference=dict( - argstr="--ref=%s", - extensions=None, - mandatory=True, - ), - warp_resolution=dict( - argstr="--warpres=%0.4f,%0.4f,%0.4f", - ), - with_affine=dict( - argstr="--withaff", - ), - write_jacobian=dict( - mandatory=True, - usedefault=True, - ), + reference=dict(argstr="--ref=%s", extensions=None, mandatory=True), + warp_resolution=dict(argstr="--warpres=%0.4f,%0.4f,%0.4f"), + with_affine=dict(argstr="--withaff"), + write_jacobian=dict(mandatory=True, usedefault=True), ) inputs = WarpUtils.input_spec() @@ -59,12 +32,7 @@ def test_WarpUtils_inputs(): def test_WarpUtils_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), - out_jacobian=dict( - extensions=None, - ), + out_file=dict(extensions=None), out_jacobian=dict(extensions=None) ) outputs = WarpUtils.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_XFibres5.py b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py index 349b2f52e7..f8e6abe71b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_XFibres5.py +++ b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py @@ -4,111 +4,35 @@ def test_XFibres5_inputs(): input_map = dict( - all_ard=dict( - argstr="--allard", - xor=("no_ard", "all_ard"), - ), - args=dict( - argstr="%s", - ), - burn_in=dict( - argstr="--burnin=%d", - usedefault=True, - ), - burn_in_no_ard=dict( - argstr="--burnin_noard=%d", - usedefault=True, - ), - bvals=dict( - argstr="--bvals=%s", - extensions=None, - mandatory=True, - ), - bvecs=dict( - argstr="--bvecs=%s", - extensions=None, - mandatory=True, - ), - cnlinear=dict( - argstr="--cnonlinear", - xor=("no_spat", "non_linear", "cnlinear"), - ), - dwi=dict( - argstr="--data=%s", - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - f0_ard=dict( - argstr="--f0 --ardf0", - xor=["f0_noard", "f0_ard", "all_ard"], - ), - f0_noard=dict( - argstr="--f0", - xor=["f0_noard", "f0_ard"], - ), - force_dir=dict( - argstr="--forcedir", - usedefault=True, - ), - fudge=dict( - argstr="--fudge=%d", - ), - gradnonlin=dict( - argstr="--gradnonlin=%s", - extensions=None, - ), - logdir=dict( - argstr="--logdir=%s", - usedefault=True, - ), - mask=dict( - argstr="--mask=%s", - extensions=None, - mandatory=True, - ), - model=dict( - argstr="--model=%d", - ), - n_fibres=dict( - argstr="--nfibres=%d", - mandatory=True, - usedefault=True, - ), - n_jumps=dict( - argstr="--njumps=%d", - usedefault=True, - ), - no_ard=dict( - argstr="--noard", - xor=("no_ard", "all_ard"), - ), - no_spat=dict( - argstr="--nospat", - xor=("no_spat", "non_linear", "cnlinear"), - ), + all_ard=dict(argstr="--allard", xor=("no_ard", "all_ard")), + args=dict(argstr="%s"), + burn_in=dict(argstr="--burnin=%d", usedefault=True), + burn_in_no_ard=dict(argstr="--burnin_noard=%d", usedefault=True), + bvals=dict(argstr="--bvals=%s", extensions=None, mandatory=True), + bvecs=dict(argstr="--bvecs=%s", extensions=None, mandatory=True), + cnlinear=dict(argstr="--cnonlinear", xor=("no_spat", "non_linear", "cnlinear")), + dwi=dict(argstr="--data=%s", extensions=None, mandatory=True), + environ=dict(nohash=True, usedefault=True), + f0_ard=dict(argstr="--f0 --ardf0", xor=["f0_noard", "f0_ard", "all_ard"]), + f0_noard=dict(argstr="--f0", xor=["f0_noard", "f0_ard"]), + force_dir=dict(argstr="--forcedir", usedefault=True), + fudge=dict(argstr="--fudge=%d"), + gradnonlin=dict(argstr="--gradnonlin=%s", extensions=None), + logdir=dict(argstr="--logdir=%s", usedefault=True), + mask=dict(argstr="--mask=%s", extensions=None, mandatory=True), + model=dict(argstr="--model=%d"), + n_fibres=dict(argstr="--nfibres=%d", mandatory=True, usedefault=True), + n_jumps=dict(argstr="--njumps=%d", usedefault=True), + no_ard=dict(argstr="--noard", xor=("no_ard", "all_ard")), + no_spat=dict(argstr="--nospat", xor=("no_spat", "non_linear", "cnlinear")), non_linear=dict( - argstr="--nonlinear", - xor=("no_spat", "non_linear", "cnlinear"), + argstr="--nonlinear", xor=("no_spat", "non_linear", "cnlinear") ), output_type=dict(), - rician=dict( - argstr="--rician", - ), - sample_every=dict( - argstr="--sampleevery=%d", - usedefault=True, - ), - seed=dict( - argstr="--seed=%d", - ), - update_proposal_every=dict( - argstr="--updateproposalevery=%d", - usedefault=True, - ), + rician=dict(argstr="--rician"), + sample_every=dict(argstr="--sampleevery=%d", usedefault=True), + seed=dict(argstr="--seed=%d"), + update_proposal_every=dict(argstr="--updateproposalevery=%d", usedefault=True), ) inputs = XFibres5.input_spec() @@ -121,16 +45,10 @@ def test_XFibres5_outputs(): output_map = dict( dyads=dict(), fsamples=dict(), - mean_S0samples=dict( - extensions=None, - ), - mean_dsamples=dict( - extensions=None, - ), + mean_S0samples=dict(extensions=None), + mean_dsamples=dict(extensions=None), mean_fsamples=dict(), - mean_tausamples=dict( - extensions=None, - ), + mean_tausamples=dict(extensions=None), phsamples=dict(), thsamples=dict(), ) diff --git a/nipype/interfaces/minc/tests/test_auto_Average.py b/nipype/interfaces/minc/tests/test_auto_Average.py index 7017967d61..2b3cbd66e4 100644 --- a/nipype/interfaces/minc/tests/test_auto_Average.py +++ b/nipype/interfaces/minc/tests/test_auto_Average.py @@ -4,40 +4,18 @@ def test_Average_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - avgdim=dict( - argstr="-avgdim %s", - ), - binarize=dict( - argstr="-binarize", - ), - binrange=dict( - argstr="-binrange %s %s", - ), - binvalue=dict( - argstr="-binvalue %s", - ), + args=dict(argstr="%s"), + avgdim=dict(argstr="-avgdim %s"), + binarize=dict(argstr="-binarize"), + binrange=dict(argstr="-binrange %s %s"), + binvalue=dict(argstr="-binvalue %s"), check_dimensions=dict( - argstr="-check_dimensions", - xor=("check_dimensions", "no_check_dimensions"), - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - copy_header=dict( - argstr="-copy_header", - xor=("copy_header", "no_copy_header"), - ), - debug=dict( - argstr="-debug", - ), - environ=dict( - nohash=True, - usedefault=True, + argstr="-check_dimensions", xor=("check_dimensions", "no_check_dimensions") ), + clobber=dict(argstr="-clobber", usedefault=True), + copy_header=dict(argstr="-copy_header", xor=("copy_header", "no_copy_header")), + debug=dict(argstr="-debug"), + environ=dict(nohash=True, usedefault=True), filelist=dict( argstr="-filelist %s", extensions=None, @@ -177,26 +155,16 @@ def test_Average_inputs(): sep=" ", xor=("input_files", "filelist"), ), - max_buffer_size_in_kb=dict( - argstr="-max_buffer_size_in_kb %d", - usedefault=True, - ), + max_buffer_size_in_kb=dict(argstr="-max_buffer_size_in_kb %d", usedefault=True), no_check_dimensions=dict( argstr="-nocheck_dimensions", xor=("check_dimensions", "no_check_dimensions"), ), no_copy_header=dict( - argstr="-nocopy_header", - xor=("copy_header", "no_copy_header"), - ), - nonormalize=dict( - argstr="-nonormalize", - xor=("normalize", "nonormalize"), - ), - normalize=dict( - argstr="-normalize", - xor=("normalize", "nonormalize"), + argstr="-nocopy_header", xor=("copy_header", "no_copy_header") ), + nonormalize=dict(argstr="-nonormalize", xor=("normalize", "nonormalize")), + normalize=dict(argstr="-normalize", xor=("normalize", "nonormalize")), output_file=dict( argstr="%s", extensions=None, @@ -206,32 +174,13 @@ def test_Average_inputs(): name_template="%s_averaged.mnc", position=-1, ), - quiet=dict( - argstr="-quiet", - xor=("verbose", "quiet"), - ), - sdfile=dict( - argstr="-sdfile %s", - extensions=None, - ), - two=dict( - argstr="-2", - ), - verbose=dict( - argstr="-verbose", - xor=("verbose", "quiet"), - ), - voxel_range=dict( - argstr="-range %d %d", - ), - weights=dict( - argstr="-weights %s", - sep=",", - ), - width_weighted=dict( - argstr="-width_weighted", - requires=("avgdim",), - ), + quiet=dict(argstr="-quiet", xor=("verbose", "quiet")), + sdfile=dict(argstr="-sdfile %s", extensions=None), + two=dict(argstr="-2"), + verbose=dict(argstr="-verbose", xor=("verbose", "quiet")), + voxel_range=dict(argstr="-range %d %d"), + weights=dict(argstr="-weights %s", sep=","), + width_weighted=dict(argstr="-width_weighted", requires=("avgdim",)), ) inputs = Average.input_spec() @@ -241,11 +190,7 @@ def test_Average_inputs(): def test_Average_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = Average.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_BBox.py b/nipype/interfaces/minc/tests/test_auto_BBox.py index 532cb14d5f..eac3f53b48 100644 --- a/nipype/interfaces/minc/tests/test_auto_BBox.py +++ b/nipype/interfaces/minc/tests/test_auto_BBox.py @@ -4,38 +4,14 @@ def test_BBox_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - format_minccrop=dict( - argstr="-minccrop", - ), - format_mincresample=dict( - argstr="-mincresample", - ), - format_mincreshape=dict( - argstr="-mincreshape", - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - one_line=dict( - argstr="-one_line", - xor=("one_line", "two_lines"), - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + format_minccrop=dict(argstr="-minccrop"), + format_mincresample=dict(argstr="-mincresample"), + format_mincreshape=dict(argstr="-mincreshape"), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + one_line=dict(argstr="-one_line", xor=("one_line", "two_lines")), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), output_file=dict( extensions=None, hash_files=False, @@ -44,13 +20,8 @@ def test_BBox_inputs(): name_template="%s_bbox.txt", position=-1, ), - threshold=dict( - argstr="-threshold", - ), - two_lines=dict( - argstr="-two_lines", - xor=("one_line", "two_lines"), - ), + threshold=dict(argstr="-threshold"), + two_lines=dict(argstr="-two_lines", xor=("one_line", "two_lines")), ) inputs = BBox.input_spec() @@ -60,11 +31,7 @@ def test_BBox_inputs(): def test_BBox_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = BBox.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Beast.py b/nipype/interfaces/minc/tests/test_auto_Beast.py index 487550a33a..caa25231aa 100644 --- a/nipype/interfaces/minc/tests/test_auto_Beast.py +++ b/nipype/interfaces/minc/tests/test_auto_Beast.py @@ -4,59 +4,20 @@ def test_Beast_inputs(): input_map = dict( - abspath=dict( - argstr="-abspath", - usedefault=True, - ), - args=dict( - argstr="%s", - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - confidence_level_alpha=dict( - argstr="-alpha %s", - usedefault=True, - ), - configuration_file=dict( - argstr="-configuration %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fill_holes=dict( - argstr="-fill", - ), - flip_images=dict( - argstr="-flip", - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - library_dir=dict( - argstr="%s", - mandatory=True, - position=-3, - ), - load_moments=dict( - argstr="-load_moments", - ), - median_filter=dict( - argstr="-median", - ), - nlm_filter=dict( - argstr="-nlm_filter", - ), - number_selected_images=dict( - argstr="-selection_num %s", - usedefault=True, - ), + abspath=dict(argstr="-abspath", usedefault=True), + args=dict(argstr="%s"), + clobber=dict(argstr="-clobber", usedefault=True), + confidence_level_alpha=dict(argstr="-alpha %s", usedefault=True), + configuration_file=dict(argstr="-configuration %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + fill_holes=dict(argstr="-fill"), + flip_images=dict(argstr="-flip"), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + library_dir=dict(argstr="%s", mandatory=True, position=-3), + load_moments=dict(argstr="-load_moments"), + median_filter=dict(argstr="-median"), + nlm_filter=dict(argstr="-nlm_filter"), + number_selected_images=dict(argstr="-selection_num %s", usedefault=True), output_file=dict( argstr="%s", extensions=None, @@ -65,32 +26,13 @@ def test_Beast_inputs(): name_template="%s_beast_mask.mnc", position=-1, ), - patch_size=dict( - argstr="-patch_size %s", - usedefault=True, - ), - probability_map=dict( - argstr="-probability", - ), - same_resolution=dict( - argstr="-same_resolution", - ), - search_area=dict( - argstr="-search_area %s", - usedefault=True, - ), - smoothness_factor_beta=dict( - argstr="-beta %s", - usedefault=True, - ), - threshold_patch_selection=dict( - argstr="-threshold %s", - usedefault=True, - ), - voxel_size=dict( - argstr="-voxel_size %s", - usedefault=True, - ), + patch_size=dict(argstr="-patch_size %s", usedefault=True), + probability_map=dict(argstr="-probability"), + same_resolution=dict(argstr="-same_resolution"), + search_area=dict(argstr="-search_area %s", usedefault=True), + smoothness_factor_beta=dict(argstr="-beta %s", usedefault=True), + threshold_patch_selection=dict(argstr="-threshold %s", usedefault=True), + voxel_size=dict(argstr="-voxel_size %s", usedefault=True), ) inputs = Beast.input_spec() @@ -100,11 +42,7 @@ def test_Beast_inputs(): def test_Beast_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = Beast.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_BestLinReg.py b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py index 57a8929878..09f7ef0958 100644 --- a/nipype/interfaces/minc/tests/test_auto_BestLinReg.py +++ b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py @@ -4,17 +4,9 @@ def test_BestLinReg_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + clobber=dict(argstr="-clobber", usedefault=True), + environ=dict(nohash=True, usedefault=True), output_mnc=dict( argstr="%s", extensions=None, @@ -35,21 +27,9 @@ def test_BestLinReg_inputs(): name_template="%s_bestlinreg.xfm", position=-2, ), - source=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-4, - ), - target=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - verbose=dict( - argstr="-verbose", - ), + source=dict(argstr="%s", extensions=None, mandatory=True, position=-4), + target=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + verbose=dict(argstr="-verbose"), ) inputs = BestLinReg.input_spec() @@ -60,12 +40,7 @@ def test_BestLinReg_inputs(): def test_BestLinReg_outputs(): output_map = dict( - output_mnc=dict( - extensions=None, - ), - output_xfm=dict( - extensions=None, - ), + output_mnc=dict(extensions=None), output_xfm=dict(extensions=None) ) outputs = BestLinReg.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_BigAverage.py b/nipype/interfaces/minc/tests/test_auto_BigAverage.py index 1eefb273d6..cea520ba5c 100644 --- a/nipype/interfaces/minc/tests/test_auto_BigAverage.py +++ b/nipype/interfaces/minc/tests/test_auto_BigAverage.py @@ -4,23 +4,10 @@ def test_BigAverage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - clobber=dict( - argstr="--clobber", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_files=dict( - argstr="%s", - mandatory=True, - position=-2, - sep=" ", - ), + args=dict(argstr="%s"), + clobber=dict(argstr="--clobber", usedefault=True), + environ=dict(nohash=True, usedefault=True), + input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" "), output_file=dict( argstr="%s", extensions=None, @@ -30,12 +17,8 @@ def test_BigAverage_inputs(): name_template="%s_bigaverage.mnc", position=-1, ), - output_float=dict( - argstr="--float", - ), - robust=dict( - argstr="-robust", - ), + output_float=dict(argstr="--float"), + robust=dict(argstr="-robust"), sd_file=dict( argstr="--sdfile %s", extensions=None, @@ -43,12 +26,8 @@ def test_BigAverage_inputs(): name_source=["input_files"], name_template="%s_bigaverage_stdev.mnc", ), - tmpdir=dict( - argstr="-tmpdir %s", - ), - verbose=dict( - argstr="--verbose", - ), + tmpdir=dict(argstr="-tmpdir %s"), + verbose=dict(argstr="--verbose"), ) inputs = BigAverage.input_spec() @@ -58,14 +37,7 @@ def test_BigAverage_inputs(): def test_BigAverage_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - sd_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None), sd_file=dict(extensions=None)) outputs = BigAverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Blob.py b/nipype/interfaces/minc/tests/test_auto_Blob.py index ae2b445c73..a0ef171fd7 100644 --- a/nipype/interfaces/minc/tests/test_auto_Blob.py +++ b/nipype/interfaces/minc/tests/test_auto_Blob.py @@ -4,25 +4,11 @@ def test_Blob_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - determinant=dict( - argstr="-determinant", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - magnitude=dict( - argstr="-magnitude", - ), + args=dict(argstr="%s"), + determinant=dict(argstr="-determinant"), + environ=dict(nohash=True, usedefault=True), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + magnitude=dict(argstr="-magnitude"), output_file=dict( argstr="%s", extensions=None, @@ -32,12 +18,8 @@ def test_Blob_inputs(): name_template="%s_blob.mnc", position=-1, ), - trace=dict( - argstr="-trace", - ), - translation=dict( - argstr="-translation", - ), + trace=dict(argstr="-trace"), + translation=dict(argstr="-translation"), ) inputs = Blob.input_spec() @@ -47,11 +29,7 @@ def test_Blob_inputs(): def test_Blob_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = Blob.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Blur.py b/nipype/interfaces/minc/tests/test_auto_Blur.py index 87647b5f62..591957a6e8 100644 --- a/nipype/interfaces/minc/tests/test_auto_Blur.py +++ b/nipype/interfaces/minc/tests/test_auto_Blur.py @@ -4,58 +4,25 @@ def test_Blur_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - dimensions=dict( - argstr="-dimensions %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + clobber=dict(argstr="-clobber", usedefault=True), + dimensions=dict(argstr="-dimensions %s"), + environ=dict(nohash=True, usedefault=True), fwhm=dict( - argstr="-fwhm %s", - mandatory=True, - xor=("fwhm", "fwhm3d", "standard_dev"), + argstr="-fwhm %s", mandatory=True, xor=("fwhm", "fwhm3d", "standard_dev") ), fwhm3d=dict( argstr="-3dfwhm %s %s %s", mandatory=True, xor=("fwhm", "fwhm3d", "standard_dev"), ), - gaussian=dict( - argstr="-gaussian", - xor=("gaussian", "rect"), - ), - gradient=dict( - argstr="-gradient", - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - no_apodize=dict( - argstr="-no_apodize", - ), - output_file_base=dict( - argstr="%s", - extensions=None, - position=-1, - ), - partial=dict( - argstr="-partial", - ), - rect=dict( - argstr="-rect", - xor=("gaussian", "rect"), - ), + gaussian=dict(argstr="-gaussian", xor=("gaussian", "rect")), + gradient=dict(argstr="-gradient"), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + no_apodize=dict(argstr="-no_apodize"), + output_file_base=dict(argstr="%s", extensions=None, position=-1), + partial=dict(argstr="-partial"), + rect=dict(argstr="-rect", xor=("gaussian", "rect")), standard_dev=dict( argstr="-standarddev %s", mandatory=True, @@ -71,24 +38,12 @@ def test_Blur_inputs(): def test_Blur_outputs(): output_map = dict( - gradient_dxyz=dict( - extensions=None, - ), - output_file=dict( - extensions=None, - ), - partial_dx=dict( - extensions=None, - ), - partial_dxyz=dict( - extensions=None, - ), - partial_dy=dict( - extensions=None, - ), - partial_dz=dict( - extensions=None, - ), + gradient_dxyz=dict(extensions=None), + output_file=dict(extensions=None), + partial_dx=dict(extensions=None), + partial_dxyz=dict(extensions=None), + partial_dy=dict(extensions=None), + partial_dz=dict(extensions=None), ) outputs = Blur.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Calc.py b/nipype/interfaces/minc/tests/test_auto_Calc.py index d0d4f61fbe..48f2e4b6c4 100644 --- a/nipype/interfaces/minc/tests/test_auto_Calc.py +++ b/nipype/interfaces/minc/tests/test_auto_Calc.py @@ -4,31 +4,15 @@ def test_Calc_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), check_dimensions=dict( - argstr="-check_dimensions", - xor=("check_dimensions", "no_check_dimensions"), - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - copy_header=dict( - argstr="-copy_header", - xor=("copy_header", "no_copy_header"), - ), - debug=dict( - argstr="-debug", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - eval_width=dict( - argstr="-eval_width %s", + argstr="-check_dimensions", xor=("check_dimensions", "no_check_dimensions") ), + clobber=dict(argstr="-clobber", usedefault=True), + copy_header=dict(argstr="-copy_header", xor=("copy_header", "no_copy_header")), + debug=dict(argstr="-debug"), + environ=dict(nohash=True, usedefault=True), + eval_width=dict(argstr="-eval_width %s"), expfile=dict( argstr="-expfile %s", extensions=None, @@ -36,9 +20,7 @@ def test_Calc_inputs(): xor=("expression", "expfile"), ), expression=dict( - argstr="-expression '%s'", - mandatory=True, - xor=("expression", "expfile"), + argstr="-expression '%s'", mandatory=True, xor=("expression", "expfile") ), filelist=dict( argstr="-filelist %s", @@ -172,25 +154,15 @@ def test_Calc_inputs(): "format_unsigned", ), ), - ignore_nan=dict( - argstr="-ignore_nan", - ), - input_files=dict( - argstr="%s", - mandatory=True, - position=-2, - sep=" ", - ), - max_buffer_size_in_kb=dict( - argstr="-max_buffer_size_in_kb %d", - ), + ignore_nan=dict(argstr="-ignore_nan"), + input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" "), + max_buffer_size_in_kb=dict(argstr="-max_buffer_size_in_kb %d"), no_check_dimensions=dict( argstr="-nocheck_dimensions", xor=("check_dimensions", "no_check_dimensions"), ), no_copy_header=dict( - argstr="-nocopy_header", - xor=("copy_header", "no_copy_header"), + argstr="-nocopy_header", xor=("copy_header", "no_copy_header") ), outfiles=dict(), output_file=dict( @@ -207,30 +179,16 @@ def test_Calc_inputs(): xor=("output_nan", "output_zero", "output_illegal_value"), ), output_nan=dict( - argstr="-nan", - xor=("output_nan", "output_zero", "output_illegal_value"), + argstr="-nan", xor=("output_nan", "output_zero", "output_illegal_value") ), output_zero=dict( - argstr="-zero", - xor=("output_nan", "output_zero", "output_illegal_value"), - ), - propagate_nan=dict( - argstr="-propagate_nan", - ), - quiet=dict( - argstr="-quiet", - xor=("verbose", "quiet"), - ), - two=dict( - argstr="-2", - ), - verbose=dict( - argstr="-verbose", - xor=("verbose", "quiet"), - ), - voxel_range=dict( - argstr="-range %d %d", + argstr="-zero", xor=("output_nan", "output_zero", "output_illegal_value") ), + propagate_nan=dict(argstr="-propagate_nan"), + quiet=dict(argstr="-quiet", xor=("verbose", "quiet")), + two=dict(argstr="-2"), + verbose=dict(argstr="-verbose", xor=("verbose", "quiet")), + voxel_range=dict(argstr="-range %d %d"), ) inputs = Calc.input_spec() @@ -240,11 +198,7 @@ def test_Calc_inputs(): def test_Calc_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = Calc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Convert.py b/nipype/interfaces/minc/tests/test_auto_Convert.py index 57963b9b81..e63dde6520 100644 --- a/nipype/interfaces/minc/tests/test_auto_Convert.py +++ b/nipype/interfaces/minc/tests/test_auto_Convert.py @@ -4,29 +4,12 @@ def test_Convert_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - chunk=dict( - argstr="-chunk %d", - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - compression=dict( - argstr="-compress %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s"), + chunk=dict(argstr="-chunk %d"), + clobber=dict(argstr="-clobber", usedefault=True), + compression=dict(argstr="-compress %s"), + environ=dict(nohash=True, usedefault=True), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), output_file=dict( argstr="%s", extensions=None, @@ -36,12 +19,8 @@ def test_Convert_inputs(): name_template="%s_convert_output.mnc", position=-1, ), - template=dict( - argstr="-template", - ), - two=dict( - argstr="-2", - ), + template=dict(argstr="-template"), + two=dict(argstr="-2"), ) inputs = Convert.input_spec() @@ -51,11 +30,7 @@ def test_Convert_inputs(): def test_Convert_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = Convert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Copy.py b/nipype/interfaces/minc/tests/test_auto_Copy.py index a6bb527e7a..3274e3b95e 100644 --- a/nipype/interfaces/minc/tests/test_auto_Copy.py +++ b/nipype/interfaces/minc/tests/test_auto_Copy.py @@ -4,19 +4,9 @@ def test_Copy_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), output_file=dict( argstr="%s", extensions=None, @@ -26,14 +16,8 @@ def test_Copy_inputs(): name_template="%s_copy.mnc", position=-1, ), - pixel_values=dict( - argstr="-pixel_values", - xor=("pixel_values", "real_values"), - ), - real_values=dict( - argstr="-real_values", - xor=("pixel_values", "real_values"), - ), + pixel_values=dict(argstr="-pixel_values", xor=("pixel_values", "real_values")), + real_values=dict(argstr="-real_values", xor=("pixel_values", "real_values")), ) inputs = Copy.input_spec() @@ -43,11 +27,7 @@ def test_Copy_inputs(): def test_Copy_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = Copy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Dump.py b/nipype/interfaces/minc/tests/test_auto_Dump.py index 4253bc20cc..548a5e6eed 100644 --- a/nipype/interfaces/minc/tests/test_auto_Dump.py +++ b/nipype/interfaces/minc/tests/test_auto_Dump.py @@ -5,46 +5,19 @@ def test_Dump_inputs(): input_map = dict( annotations_brief=dict( - argstr="-b %s", - xor=("annotations_brief", "annotations_full"), + argstr="-b %s", xor=("annotations_brief", "annotations_full") ), annotations_full=dict( - argstr="-f %s", - xor=("annotations_brief", "annotations_full"), - ), - args=dict( - argstr="%s", - ), - coordinate_data=dict( - argstr="-c", - xor=("coordinate_data", "header_data"), - ), - environ=dict( - nohash=True, - usedefault=True, - ), - header_data=dict( - argstr="-h", - xor=("coordinate_data", "header_data"), - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - line_length=dict( - argstr="-l %d", - ), - netcdf_name=dict( - argstr="-n %s", - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), + argstr="-f %s", xor=("annotations_brief", "annotations_full") + ), + args=dict(argstr="%s"), + coordinate_data=dict(argstr="-c", xor=("coordinate_data", "header_data")), + environ=dict(nohash=True, usedefault=True), + header_data=dict(argstr="-h", xor=("coordinate_data", "header_data")), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + line_length=dict(argstr="-l %d"), + netcdf_name=dict(argstr="-n %s"), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), output_file=dict( extensions=None, hash_files=False, @@ -53,13 +26,8 @@ def test_Dump_inputs(): name_template="%s_dump.txt", position=-1, ), - precision=dict( - argstr="%s", - ), - variables=dict( - argstr="-v %s", - sep=",", - ), + precision=dict(argstr="%s"), + variables=dict(argstr="-v %s", sep=","), ) inputs = Dump.input_spec() @@ -69,11 +37,7 @@ def test_Dump_inputs(): def test_Dump_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = Dump.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Extract.py b/nipype/interfaces/minc/tests/test_auto_Extract.py index 6c34b443f0..ee5f85bf7b 100644 --- a/nipype/interfaces/minc/tests/test_auto_Extract.py +++ b/nipype/interfaces/minc/tests/test_auto_Extract.py @@ -4,17 +4,9 @@ def test_Extract_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - count=dict( - argstr="-count %s", - sep=",", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + count=dict(argstr="-count %s", sep=","), + environ=dict(nohash=True, usedefault=True), flip_any_direction=dict( argstr="-any_direction", xor=( @@ -75,35 +67,13 @@ def test_Extract_inputs(): argstr="+zdirection", xor=("flip_z_positive", "flip_z_negative", "flip_z_any"), ), - image_maximum=dict( - argstr="-image_maximum %s", - ), - image_minimum=dict( - argstr="-image_minimum %s", - ), - image_range=dict( - argstr="-image_range %s %s", - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - nonormalize=dict( - argstr="-nonormalize", - xor=("normalize", "nonormalize"), - ), - normalize=dict( - argstr="-normalize", - xor=("normalize", "nonormalize"), - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), + image_maximum=dict(argstr="-image_maximum %s"), + image_minimum=dict(argstr="-image_minimum %s"), + image_range=dict(argstr="-image_range %s %s"), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + nonormalize=dict(argstr="-nonormalize", xor=("normalize", "nonormalize")), + normalize=dict(argstr="-normalize", xor=("normalize", "nonormalize")), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), output_file=dict( extensions=None, hash_files=False, @@ -112,10 +82,7 @@ def test_Extract_inputs(): name_template="%s.raw", position=-1, ), - start=dict( - argstr="-start %s", - sep=",", - ), + start=dict(argstr="-start %s", sep=","), write_ascii=dict( argstr="-ascii", xor=( @@ -206,9 +173,7 @@ def test_Extract_inputs(): "write_unsigned", ), ), - write_range=dict( - argstr="-range %s %s", - ), + write_range=dict(argstr="-range %s %s"), write_short=dict( argstr="-short", xor=( @@ -224,14 +189,8 @@ def test_Extract_inputs(): "write_unsigned", ), ), - write_signed=dict( - argstr="-signed", - xor=("write_signed", "write_unsigned"), - ), - write_unsigned=dict( - argstr="-unsigned", - xor=("write_signed", "write_unsigned"), - ), + write_signed=dict(argstr="-signed", xor=("write_signed", "write_unsigned")), + write_unsigned=dict(argstr="-unsigned", xor=("write_signed", "write_unsigned")), ) inputs = Extract.input_spec() @@ -241,11 +200,7 @@ def test_Extract_inputs(): def test_Extract_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = Extract.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py index 91bcc57e3c..0ea0a5befc 100644 --- a/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py +++ b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py @@ -4,24 +4,11 @@ def test_Gennlxfm_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ident=dict( - argstr="-ident", - ), - like=dict( - argstr="-like %s", - extensions=None, - ), + args=dict(argstr="%s"), + clobber=dict(argstr="-clobber", usedefault=True), + environ=dict(nohash=True, usedefault=True), + ident=dict(argstr="-ident"), + like=dict(argstr="-like %s", extensions=None), output_file=dict( argstr="%s", extensions=None, @@ -31,12 +18,8 @@ def test_Gennlxfm_inputs(): name_template="%s_gennlxfm.xfm", position=-1, ), - step=dict( - argstr="-step %s", - ), - verbose=dict( - argstr="-verbose", - ), + step=dict(argstr="-step %s"), + verbose=dict(argstr="-verbose"), ) inputs = Gennlxfm.input_spec() @@ -47,12 +30,7 @@ def test_Gennlxfm_inputs(): def test_Gennlxfm_outputs(): output_map = dict( - output_file=dict( - extensions=None, - ), - output_grid=dict( - extensions=None, - ), + output_file=dict(extensions=None), output_grid=dict(extensions=None) ) outputs = Gennlxfm.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Math.py b/nipype/interfaces/minc/tests/test_auto_Math.py index 86858235cd..b8f48beb23 100644 --- a/nipype/interfaces/minc/tests/test_auto_Math.py +++ b/nipype/interfaces/minc/tests/test_auto_Math.py @@ -4,61 +4,25 @@ def test_Math_inputs(): input_map = dict( - abs=dict( - argstr="-abs", - ), - args=dict( - argstr="%s", - ), - calc_add=dict( - argstr="-add", - ), - calc_and=dict( - argstr="-and", - ), - calc_div=dict( - argstr="-div", - ), - calc_mul=dict( - argstr="-mult", - ), - calc_not=dict( - argstr="-not", - ), - calc_or=dict( - argstr="-or", - ), - calc_sub=dict( - argstr="-sub", - ), + abs=dict(argstr="-abs"), + args=dict(argstr="%s"), + calc_add=dict(argstr="-add"), + calc_and=dict(argstr="-and"), + calc_div=dict(argstr="-div"), + calc_mul=dict(argstr="-mult"), + calc_not=dict(argstr="-not"), + calc_or=dict(argstr="-or"), + calc_sub=dict(argstr="-sub"), check_dimensions=dict( - argstr="-check_dimensions", - xor=("check_dimensions", "no_check_dimensions"), - ), - clamp=dict( - argstr="-clamp -const2 %s %s", - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - copy_header=dict( - argstr="-copy_header", - xor=("copy_header", "no_copy_header"), - ), - count_valid=dict( - argstr="-count_valid", - ), - dimension=dict( - argstr="-dimension %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - exp=dict( - argstr="-exp -const2 %s %s", - ), + argstr="-check_dimensions", xor=("check_dimensions", "no_check_dimensions") + ), + clamp=dict(argstr="-clamp -const2 %s %s"), + clobber=dict(argstr="-clobber", usedefault=True), + copy_header=dict(argstr="-copy_header", xor=("copy_header", "no_copy_header")), + count_valid=dict(argstr="-count_valid"), + dimension=dict(argstr="-dimension %s"), + environ=dict(nohash=True, usedefault=True), + exp=dict(argstr="-exp -const2 %s %s"), filelist=dict( argstr="-filelist %s", extensions=None, @@ -191,9 +155,7 @@ def test_Math_inputs(): "format_unsigned", ), ), - ignore_nan=dict( - argstr="-ignore_nan", - ), + ignore_nan=dict(argstr="-ignore_nan"), input_files=dict( argstr="%s", mandatory=True, @@ -201,39 +163,21 @@ def test_Math_inputs(): sep=" ", xor=("input_files", "filelist"), ), - invert=dict( - argstr="-invert -const %s", - ), - isnan=dict( - argstr="-isnan", - ), - log=dict( - argstr="-log -const2 %s %s", - ), - max_buffer_size_in_kb=dict( - argstr="-max_buffer_size_in_kb %d", - usedefault=True, - ), - maximum=dict( - argstr="-maximum", - ), - minimum=dict( - argstr="-minimum", - ), - nisnan=dict( - argstr="-nisnan", - ), + invert=dict(argstr="-invert -const %s"), + isnan=dict(argstr="-isnan"), + log=dict(argstr="-log -const2 %s %s"), + max_buffer_size_in_kb=dict(argstr="-max_buffer_size_in_kb %d", usedefault=True), + maximum=dict(argstr="-maximum"), + minimum=dict(argstr="-minimum"), + nisnan=dict(argstr="-nisnan"), no_check_dimensions=dict( argstr="-nocheck_dimensions", xor=("check_dimensions", "no_check_dimensions"), ), no_copy_header=dict( - argstr="-nocopy_header", - xor=("copy_header", "no_copy_header"), - ), - nsegment=dict( - argstr="-nsegment -const2 %s %s", + argstr="-nocopy_header", xor=("copy_header", "no_copy_header") ), + nsegment=dict(argstr="-nsegment -const2 %s %s"), output_file=dict( argstr="%s", extensions=None, @@ -248,55 +192,25 @@ def test_Math_inputs(): xor=("output_nan", "output_zero", "output_illegal_value"), ), output_nan=dict( - argstr="-nan", - xor=("output_nan", "output_zero", "output_illegal_value"), + argstr="-nan", xor=("output_nan", "output_zero", "output_illegal_value") ), output_zero=dict( - argstr="-zero", - xor=("output_nan", "output_zero", "output_illegal_value"), - ), - percentdiff=dict( - argstr="-percentdiff", - ), - propagate_nan=dict( - argstr="-propagate_nan", - ), - scale=dict( - argstr="-scale -const2 %s %s", - ), - segment=dict( - argstr="-segment -const2 %s %s", - ), - sqrt=dict( - argstr="-sqrt", - ), - square=dict( - argstr="-square", - ), - test_eq=dict( - argstr="-eq", - ), - test_ge=dict( - argstr="-ge", - ), - test_gt=dict( - argstr="-gt", - ), - test_le=dict( - argstr="-le", - ), - test_lt=dict( - argstr="-lt", - ), - test_ne=dict( - argstr="-ne", - ), - two=dict( - argstr="-2", - ), - voxel_range=dict( - argstr="-range %d %d", - ), + argstr="-zero", xor=("output_nan", "output_zero", "output_illegal_value") + ), + percentdiff=dict(argstr="-percentdiff"), + propagate_nan=dict(argstr="-propagate_nan"), + scale=dict(argstr="-scale -const2 %s %s"), + segment=dict(argstr="-segment -const2 %s %s"), + sqrt=dict(argstr="-sqrt"), + square=dict(argstr="-square"), + test_eq=dict(argstr="-eq"), + test_ge=dict(argstr="-ge"), + test_gt=dict(argstr="-gt"), + test_le=dict(argstr="-le"), + test_lt=dict(argstr="-lt"), + test_ne=dict(argstr="-ne"), + two=dict(argstr="-2"), + voxel_range=dict(argstr="-range %d %d"), ) inputs = Math.input_spec() @@ -306,11 +220,7 @@ def test_Math_inputs(): def test_Math_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = Math.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_NlpFit.py b/nipype/interfaces/minc/tests/test_auto_NlpFit.py index 39b1df4743..d054c8cafd 100644 --- a/nipype/interfaces/minc/tests/test_auto_NlpFit.py +++ b/nipype/interfaces/minc/tests/test_auto_NlpFit.py @@ -4,54 +4,17 @@ def test_NlpFit_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - config_file=dict( - argstr="-config_file %s", - extensions=None, - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - init_xfm=dict( - argstr="-init_xfm %s", - extensions=None, - mandatory=True, - ), + args=dict(argstr="%s"), + clobber=dict(argstr="-clobber", usedefault=True), + config_file=dict(argstr="-config_file %s", extensions=None, mandatory=True), + environ=dict(nohash=True, usedefault=True), + init_xfm=dict(argstr="-init_xfm %s", extensions=None, mandatory=True), input_grid_files=dict(), - output_xfm=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - source=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - source_mask=dict( - argstr="-source_mask %s", - extensions=None, - mandatory=True, - ), - target=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - verbose=dict( - argstr="-verbose", - ), + output_xfm=dict(argstr="%s", extensions=None, genfile=True, position=-1), + source=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + source_mask=dict(argstr="-source_mask %s", extensions=None, mandatory=True), + target=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + verbose=dict(argstr="-verbose"), ) inputs = NlpFit.input_spec() @@ -62,12 +25,7 @@ def test_NlpFit_inputs(): def test_NlpFit_outputs(): output_map = dict( - output_grid=dict( - extensions=None, - ), - output_xfm=dict( - extensions=None, - ), + output_grid=dict(extensions=None), output_xfm=dict(extensions=None) ) outputs = NlpFit.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Norm.py b/nipype/interfaces/minc/tests/test_auto_Norm.py index f8c2060250..4dee5debd3 100644 --- a/nipype/interfaces/minc/tests/test_auto_Norm.py +++ b/nipype/interfaces/minc/tests/test_auto_Norm.py @@ -4,43 +4,16 @@ def test_Norm_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - clamp=dict( - argstr="-clamp", - usedefault=True, - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - cutoff=dict( - argstr="-cutoff %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - lower=dict( - argstr="-lower %s", - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), - out_ceil=dict( - argstr="-out_ceil %s", - ), - out_floor=dict( - argstr="-out_floor %s", - ), + args=dict(argstr="%s"), + clamp=dict(argstr="-clamp", usedefault=True), + clobber=dict(argstr="-clobber", usedefault=True), + cutoff=dict(argstr="-cutoff %s"), + environ=dict(nohash=True, usedefault=True), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + lower=dict(argstr="-lower %s"), + mask=dict(argstr="-mask %s", extensions=None), + out_ceil=dict(argstr="-out_ceil %s"), + out_floor=dict(argstr="-out_floor %s"), output_file=dict( argstr="%s", extensions=None, @@ -57,21 +30,11 @@ def test_Norm_inputs(): name_source=["input_file"], name_template="%s_norm_threshold_mask.mnc", ), - threshold=dict( - argstr="-threshold", - ), - threshold_blur=dict( - argstr="-threshold_blur %s", - ), - threshold_bmt=dict( - argstr="-threshold_bmt", - ), - threshold_perc=dict( - argstr="-threshold_perc %s", - ), - upper=dict( - argstr="-upper %s", - ), + threshold=dict(argstr="-threshold"), + threshold_blur=dict(argstr="-threshold_blur %s"), + threshold_bmt=dict(argstr="-threshold_bmt"), + threshold_perc=dict(argstr="-threshold_perc %s"), + upper=dict(argstr="-upper %s"), ) inputs = Norm.input_spec() @@ -82,12 +45,7 @@ def test_Norm_inputs(): def test_Norm_outputs(): output_map = dict( - output_file=dict( - extensions=None, - ), - output_threshold_mask=dict( - extensions=None, - ), + output_file=dict(extensions=None), output_threshold_mask=dict(extensions=None) ) outputs = Norm.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Pik.py b/nipype/interfaces/minc/tests/test_auto_Pik.py index 3323ef74a1..a9191ff17b 100644 --- a/nipype/interfaces/minc/tests/test_auto_Pik.py +++ b/nipype/interfaces/minc/tests/test_auto_Pik.py @@ -4,50 +4,23 @@ def test_Pik_inputs(): input_map = dict( - annotated_bar=dict( - argstr="--anot_bar", - ), - args=dict( - argstr="%s", - ), - auto_range=dict( - argstr="--auto_range", - xor=("image_range", "auto_range"), - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - depth=dict( - argstr="--depth %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + annotated_bar=dict(argstr="--anot_bar"), + args=dict(argstr="%s"), + auto_range=dict(argstr="--auto_range", xor=("image_range", "auto_range")), + clobber=dict(argstr="-clobber", usedefault=True), + depth=dict(argstr="--depth %s"), + environ=dict(nohash=True, usedefault=True), horizontal_triplanar_view=dict( argstr="--horizontal", xor=("vertical_triplanar_view", "horizontal_triplanar_view"), ), image_range=dict( - argstr="--image_range %s %s", - xor=("image_range", "auto_range"), - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - jpg=dict( - xor=("jpg", "png"), - ), - lookup=dict( - argstr="--lookup %s", - ), - minc_range=dict( - argstr="--range %s %s", + argstr="--image_range %s %s", xor=("image_range", "auto_range") ), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + jpg=dict(xor=("jpg", "png")), + lookup=dict(argstr="--lookup %s"), + minc_range=dict(argstr="--range %s %s"), output_file=dict( argstr="%s", extensions=None, @@ -58,54 +31,23 @@ def test_Pik_inputs(): name_template="%s.png", position=-1, ), - png=dict( - xor=("jpg", "png"), - ), - sagittal_offset=dict( - argstr="--sagittal_offset %s", - ), - sagittal_offset_perc=dict( - argstr="--sagittal_offset_perc %d", - ), - scale=dict( - argstr="--scale %s", - usedefault=True, - ), - slice_x=dict( - argstr="-x", - xor=("slice_z", "slice_y", "slice_x"), - ), - slice_y=dict( - argstr="-y", - xor=("slice_z", "slice_y", "slice_x"), - ), - slice_z=dict( - argstr="-z", - xor=("slice_z", "slice_y", "slice_x"), - ), - start=dict( - argstr="--slice %s", - ), - tile_size=dict( - argstr="--tilesize %s", - ), - title=dict( - argstr="%s", - ), - title_size=dict( - argstr="--title_size %s", - requires=["title"], - ), - triplanar=dict( - argstr="--triplanar", - ), + png=dict(xor=("jpg", "png")), + sagittal_offset=dict(argstr="--sagittal_offset %s"), + sagittal_offset_perc=dict(argstr="--sagittal_offset_perc %d"), + scale=dict(argstr="--scale %s", usedefault=True), + slice_x=dict(argstr="-x", xor=("slice_z", "slice_y", "slice_x")), + slice_y=dict(argstr="-y", xor=("slice_z", "slice_y", "slice_x")), + slice_z=dict(argstr="-z", xor=("slice_z", "slice_y", "slice_x")), + start=dict(argstr="--slice %s"), + tile_size=dict(argstr="--tilesize %s"), + title=dict(argstr="%s"), + title_size=dict(argstr="--title_size %s", requires=["title"]), + triplanar=dict(argstr="--triplanar"), vertical_triplanar_view=dict( argstr="--vertical", xor=("vertical_triplanar_view", "horizontal_triplanar_view"), ), - width=dict( - argstr="--width %s", - ), + width=dict(argstr="--width %s"), ) inputs = Pik.input_spec() @@ -115,11 +57,7 @@ def test_Pik_inputs(): def test_Pik_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = Pik.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Resample.py b/nipype/interfaces/minc/tests/test_auto_Resample.py index 59f2ae180d..7454d38115 100644 --- a/nipype/interfaces/minc/tests/test_auto_Resample.py +++ b/nipype/interfaces/minc/tests/test_auto_Resample.py @@ -4,33 +4,15 @@ def test_Resample_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), + args=dict(argstr="%s"), + clobber=dict(argstr="-clobber", usedefault=True), coronal_slices=dict( - argstr="-coronal", - xor=("transverse", "sagittal", "coronal"), - ), - dircos=dict( - argstr="-dircos %s %s %s", - xor=("nelements", "nelements_x_y_or_z"), - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fill=dict( - argstr="-fill", - xor=("nofill", "fill"), - ), - fill_value=dict( - argstr="-fillvalue %s", - requires=["fill"], + argstr="-coronal", xor=("transverse", "sagittal", "coronal") ), + dircos=dict(argstr="-dircos %s %s %s", xor=("nelements", "nelements_x_y_or_z")), + environ=dict(nohash=True, usedefault=True), + fill=dict(argstr="-fill", xor=("nofill", "fill")), + fill_value=dict(argstr="-fillvalue %s", requires=["fill"]), format_byte=dict( argstr="-byte", xor=( @@ -136,27 +118,15 @@ def test_Resample_inputs(): ), ), half_width_sinc_window=dict( - argstr="-width %s", - requires=["sinc_interpolation"], - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, + argstr="-width %s", requires=["sinc_interpolation"] ), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), input_grid_files=dict(), - invert_transformation=dict( - argstr="-invert_transformation", - ), + invert_transformation=dict(argstr="-invert_transformation"), keep_real_range=dict( - argstr="-keep_real_range", - xor=("keep_real_range", "nokeep_real_range"), - ), - like=dict( - argstr="-like %s", - extensions=None, + argstr="-keep_real_range", xor=("keep_real_range", "nokeep_real_range") ), + like=dict(argstr="-like %s", extensions=None), nearest_neighbour_interpolation=dict( argstr="-nearest_neighbour", xor=( @@ -167,24 +137,16 @@ def test_Resample_inputs(): ), ), nelements=dict( - argstr="-nelements %s %s %s", - xor=("nelements", "nelements_x_y_or_z"), - ), - no_fill=dict( - argstr="-nofill", - xor=("nofill", "fill"), + argstr="-nelements %s %s %s", xor=("nelements", "nelements_x_y_or_z") ), + no_fill=dict(argstr="-nofill", xor=("nofill", "fill")), no_input_sampling=dict( - argstr="-use_input_sampling", - xor=("vio_transform", "no_input_sampling"), + argstr="-use_input_sampling", xor=("vio_transform", "no_input_sampling") ), nokeep_real_range=dict( - argstr="-nokeep_real_range", - xor=("keep_real_range", "nokeep_real_range"), - ), - origin=dict( - argstr="-origin %s %s %s", + argstr="-nokeep_real_range", xor=("keep_real_range", "nokeep_real_range") ), + origin=dict(argstr="-origin %s %s %s"), output_file=dict( argstr="%s", extensions=None, @@ -194,12 +156,9 @@ def test_Resample_inputs(): name_template="%s_resample.mnc", position=-1, ), - output_range=dict( - argstr="-range %s %s", - ), + output_range=dict(argstr="-range %s %s"), sagittal_slices=dict( - argstr="-sagittal", - xor=("transverse", "sagittal", "coronal"), + argstr="-sagittal", xor=("transverse", "sagittal", "coronal") ), sinc_interpolation=dict( argstr="-sinc", @@ -220,30 +179,14 @@ def test_Resample_inputs(): requires=["sinc_interpolation"], xor=("sinc_window_hanning", "sinc_window_hamming"), ), - spacetype=dict( - argstr="-spacetype %s", - ), - standard_sampling=dict( - argstr="-standard_sampling", - ), - start=dict( - argstr="-start %s %s %s", - xor=("nelements", "nelements_x_y_or_z"), - ), - step=dict( - argstr="-step %s %s %s", - xor=("nelements", "nelements_x_y_or_z"), - ), - talairach=dict( - argstr="-talairach", - ), - transformation=dict( - argstr="-transformation %s", - extensions=None, - ), + spacetype=dict(argstr="-spacetype %s"), + standard_sampling=dict(argstr="-standard_sampling"), + start=dict(argstr="-start %s %s %s", xor=("nelements", "nelements_x_y_or_z")), + step=dict(argstr="-step %s %s %s", xor=("nelements", "nelements_x_y_or_z")), + talairach=dict(argstr="-talairach"), + transformation=dict(argstr="-transformation %s", extensions=None), transverse_slices=dict( - argstr="-transverse", - xor=("transverse", "sagittal", "coronal"), + argstr="-transverse", xor=("transverse", "sagittal", "coronal") ), tricubic_interpolation=dict( argstr="-tricubic", @@ -263,15 +206,10 @@ def test_Resample_inputs(): "sinc_interpolation", ), ), - two=dict( - argstr="-2", - ), - units=dict( - argstr="-units %s", - ), + two=dict(argstr="-2"), + units=dict(argstr="-units %s"), vio_transform=dict( - argstr="-tfm_input_sampling", - xor=("vio_transform", "no_input_sampling"), + argstr="-tfm_input_sampling", xor=("vio_transform", "no_input_sampling") ), xdircos=dict( argstr="-xdircos %s", @@ -342,11 +280,7 @@ def test_Resample_inputs(): def test_Resample_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Reshape.py b/nipype/interfaces/minc/tests/test_auto_Reshape.py index 50286b41e5..3f2be25b13 100644 --- a/nipype/interfaces/minc/tests/test_auto_Reshape.py +++ b/nipype/interfaces/minc/tests/test_auto_Reshape.py @@ -4,23 +4,10 @@ def test_Reshape_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s"), + clobber=dict(argstr="-clobber", usedefault=True), + environ=dict(nohash=True, usedefault=True), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), output_file=dict( argstr="%s", extensions=None, @@ -30,12 +17,8 @@ def test_Reshape_inputs(): name_template="%s_reshape.mnc", position=-1, ), - verbose=dict( - argstr="-verbose", - ), - write_short=dict( - argstr="-short", - ), + verbose=dict(argstr="-verbose"), + write_short=dict(argstr="-short"), ) inputs = Reshape.input_spec() @@ -45,11 +28,7 @@ def test_Reshape_inputs(): def test_Reshape_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = Reshape.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_ToEcat.py b/nipype/interfaces/minc/tests/test_auto_ToEcat.py index f6ce521232..75bc57b662 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToEcat.py +++ b/nipype/interfaces/minc/tests/test_auto_ToEcat.py @@ -4,40 +4,18 @@ def test_ToEcat_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ignore_acquisition_variable=dict( - argstr="-ignore_acquisition_variable", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + ignore_acquisition_variable=dict(argstr="-ignore_acquisition_variable"), ignore_ecat_acquisition_variable=dict( - argstr="-ignore_ecat_acquisition_variable", - ), - ignore_ecat_main=dict( - argstr="-ignore_ecat_main", - ), - ignore_ecat_subheader_variable=dict( - argstr="-ignore_ecat_subheader_variable", - ), - ignore_patient_variable=dict( - argstr="-ignore_patient_variable", - ), - ignore_study_variable=dict( - argstr="-ignore_study_variable", - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - no_decay_corr_fctr=dict( - argstr="-no_decay_corr_fctr", - ), + argstr="-ignore_ecat_acquisition_variable" + ), + ignore_ecat_main=dict(argstr="-ignore_ecat_main"), + ignore_ecat_subheader_variable=dict(argstr="-ignore_ecat_subheader_variable"), + ignore_patient_variable=dict(argstr="-ignore_patient_variable"), + ignore_study_variable=dict(argstr="-ignore_study_variable"), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + no_decay_corr_fctr=dict(argstr="-no_decay_corr_fctr"), output_file=dict( argstr="%s", extensions=None, @@ -48,9 +26,7 @@ def test_ToEcat_inputs(): name_template="%s_to_ecat.v", position=-1, ), - voxels_as_integers=dict( - argstr="-label", - ), + voxels_as_integers=dict(argstr="-label"), ) inputs = ToEcat.input_spec() @@ -60,11 +36,7 @@ def test_ToEcat_inputs(): def test_ToEcat_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = ToEcat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_ToRaw.py b/nipype/interfaces/minc/tests/test_auto_ToRaw.py index 39940170f6..a2bb27dbfd 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToRaw.py +++ b/nipype/interfaces/minc/tests/test_auto_ToRaw.py @@ -4,33 +4,12 @@ def test_ToRaw_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - nonormalize=dict( - argstr="-nonormalize", - xor=("normalize", "nonormalize"), - ), - normalize=dict( - argstr="-normalize", - xor=("normalize", "nonormalize"), - ), - out_file=dict( - argstr="> %s", - extensions=None, - genfile=True, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + nonormalize=dict(argstr="-nonormalize", xor=("normalize", "nonormalize")), + normalize=dict(argstr="-normalize", xor=("normalize", "nonormalize")), + out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), output_file=dict( extensions=None, hash_files=False, @@ -94,9 +73,7 @@ def test_ToRaw_inputs(): "write_double", ), ), - write_range=dict( - argstr="-range %s %s", - ), + write_range=dict(argstr="-range %s %s"), write_short=dict( argstr="-short", xor=( @@ -108,14 +85,8 @@ def test_ToRaw_inputs(): "write_double", ), ), - write_signed=dict( - argstr="-signed", - xor=("write_signed", "write_unsigned"), - ), - write_unsigned=dict( - argstr="-unsigned", - xor=("write_signed", "write_unsigned"), - ), + write_signed=dict(argstr="-signed", xor=("write_signed", "write_unsigned")), + write_unsigned=dict(argstr="-unsigned", xor=("write_signed", "write_unsigned")), ) inputs = ToRaw.input_spec() @@ -125,11 +96,7 @@ def test_ToRaw_inputs(): def test_ToRaw_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = ToRaw.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_VolSymm.py b/nipype/interfaces/minc/tests/test_auto_VolSymm.py index a1b89616f2..d1827b4fb9 100644 --- a/nipype/interfaces/minc/tests/test_auto_VolSymm.py +++ b/nipype/interfaces/minc/tests/test_auto_VolSymm.py @@ -4,37 +4,15 @@ def test_VolSymm_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - config_file=dict( - argstr="-config_file %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fit_linear=dict( - argstr="-linear", - ), - fit_nonlinear=dict( - argstr="-nonlinear", - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), + args=dict(argstr="%s"), + clobber=dict(argstr="-clobber", usedefault=True), + config_file=dict(argstr="-config_file %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + fit_linear=dict(argstr="-linear"), + fit_nonlinear=dict(argstr="-nonlinear"), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), input_grid_files=dict(), - nofit=dict( - argstr="-nofit", - ), + nofit=dict(argstr="-nofit"), output_file=dict( argstr="%s", extensions=None, @@ -54,18 +32,10 @@ def test_VolSymm_inputs(): name_template="%s_vol_symm.xfm", position=-2, ), - verbose=dict( - argstr="-verbose", - ), - x=dict( - argstr="-x", - ), - y=dict( - argstr="-y", - ), - z=dict( - argstr="-z", - ), + verbose=dict(argstr="-verbose"), + x=dict(argstr="-x"), + y=dict(argstr="-y"), + z=dict(argstr="-z"), ) inputs = VolSymm.input_spec() @@ -76,15 +46,9 @@ def test_VolSymm_inputs(): def test_VolSymm_outputs(): output_map = dict( - output_file=dict( - extensions=None, - ), - output_grid=dict( - extensions=None, - ), - trans_file=dict( - extensions=None, - ), + output_file=dict(extensions=None), + output_grid=dict(extensions=None), + trans_file=dict(extensions=None), ) outputs = VolSymm.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Volcentre.py b/nipype/interfaces/minc/tests/test_auto_Volcentre.py index cf9f777f70..0a9ef4ea81 100644 --- a/nipype/interfaces/minc/tests/test_auto_Volcentre.py +++ b/nipype/interfaces/minc/tests/test_auto_Volcentre.py @@ -4,29 +4,12 @@ def test_Volcentre_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - centre=dict( - argstr="-centre %s %s %s", - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - com=dict( - argstr="-com", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s"), + centre=dict(argstr="-centre %s %s %s"), + clobber=dict(argstr="-clobber", usedefault=True), + com=dict(argstr="-com"), + environ=dict(nohash=True, usedefault=True), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), output_file=dict( argstr="%s", extensions=None, @@ -36,12 +19,8 @@ def test_Volcentre_inputs(): name_template="%s_volcentre.mnc", position=-1, ), - verbose=dict( - argstr="-verbose", - ), - zero_dircos=dict( - argstr="-zero_dircos", - ), + verbose=dict(argstr="-verbose"), + zero_dircos=dict(argstr="-zero_dircos"), ) inputs = Volcentre.input_spec() @@ -51,11 +30,7 @@ def test_Volcentre_inputs(): def test_Volcentre_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = Volcentre.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Voliso.py b/nipype/interfaces/minc/tests/test_auto_Voliso.py index 40d01b5767..aa5cc338ab 100644 --- a/nipype/interfaces/minc/tests/test_auto_Voliso.py +++ b/nipype/interfaces/minc/tests/test_auto_Voliso.py @@ -4,32 +4,13 @@ def test_Voliso_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - avgstep=dict( - argstr="--avgstep", - ), - clobber=dict( - argstr="--clobber", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - maxstep=dict( - argstr="--maxstep %s", - ), - minstep=dict( - argstr="--minstep %s", - ), + args=dict(argstr="%s"), + avgstep=dict(argstr="--avgstep"), + clobber=dict(argstr="--clobber", usedefault=True), + environ=dict(nohash=True, usedefault=True), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + maxstep=dict(argstr="--maxstep %s"), + minstep=dict(argstr="--minstep %s"), output_file=dict( argstr="%s", extensions=None, @@ -39,9 +20,7 @@ def test_Voliso_inputs(): name_template="%s_voliso.mnc", position=-1, ), - verbose=dict( - argstr="--verbose", - ), + verbose=dict(argstr="--verbose"), ) inputs = Voliso.input_spec() @@ -51,11 +30,7 @@ def test_Voliso_inputs(): def test_Voliso_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = Voliso.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Volpad.py b/nipype/interfaces/minc/tests/test_auto_Volpad.py index 5102199657..f052e699e9 100644 --- a/nipype/interfaces/minc/tests/test_auto_Volpad.py +++ b/nipype/interfaces/minc/tests/test_auto_Volpad.py @@ -4,32 +4,13 @@ def test_Volpad_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - auto=dict( - argstr="-auto", - ), - auto_freq=dict( - argstr="-auto_freq %s", - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - distance=dict( - argstr="-distance %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s"), + auto=dict(argstr="-auto"), + auto_freq=dict(argstr="-auto_freq %s"), + clobber=dict(argstr="-clobber", usedefault=True), + distance=dict(argstr="-distance %s"), + environ=dict(nohash=True, usedefault=True), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), output_file=dict( argstr="%s", extensions=None, @@ -39,15 +20,9 @@ def test_Volpad_inputs(): name_template="%s_volpad.mnc", position=-1, ), - smooth=dict( - argstr="-smooth", - ), - smooth_distance=dict( - argstr="-smooth_distance %s", - ), - verbose=dict( - argstr="-verbose", - ), + smooth=dict(argstr="-smooth"), + smooth_distance=dict(argstr="-smooth_distance %s"), + verbose=dict(argstr="-verbose"), ) inputs = Volpad.input_spec() @@ -57,11 +32,7 @@ def test_Volpad_inputs(): def test_Volpad_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - ) + output_map = dict(output_file=dict(extensions=None)) outputs = Volpad.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_XfmAvg.py b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py index f5df6f4d54..bd7b30897b 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmAvg.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py @@ -4,45 +4,17 @@ def test_XfmAvg_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - avg_linear=dict( - argstr="-avg_linear", - ), - avg_nonlinear=dict( - argstr="-avg_nonlinear", - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ignore_linear=dict( - argstr="-ignore_linear", - ), - ignore_nonlinear=dict( - argstr="-ignore_nonline", - ), - input_files=dict( - argstr="%s", - mandatory=True, - position=-2, - sep=" ", - ), + args=dict(argstr="%s"), + avg_linear=dict(argstr="-avg_linear"), + avg_nonlinear=dict(argstr="-avg_nonlinear"), + clobber=dict(argstr="-clobber", usedefault=True), + environ=dict(nohash=True, usedefault=True), + ignore_linear=dict(argstr="-ignore_linear"), + ignore_nonlinear=dict(argstr="-ignore_nonline"), + input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" "), input_grid_files=dict(), - output_file=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - verbose=dict( - argstr="-verbose", - ), + output_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), + verbose=dict(argstr="-verbose"), ) inputs = XfmAvg.input_spec() @@ -53,12 +25,7 @@ def test_XfmAvg_inputs(): def test_XfmAvg_outputs(): output_map = dict( - output_file=dict( - extensions=None, - ), - output_grid=dict( - extensions=None, - ), + output_file=dict(extensions=None), output_grid=dict(extensions=None) ) outputs = XfmAvg.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_XfmConcat.py b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py index 58144779b8..fedc80449d 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmConcat.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py @@ -4,23 +4,10 @@ def test_XfmConcat_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_files=dict( - argstr="%s", - mandatory=True, - position=-2, - sep=" ", - ), + args=dict(argstr="%s"), + clobber=dict(argstr="-clobber", usedefault=True), + environ=dict(nohash=True, usedefault=True), + input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" "), input_grid_files=dict(), output_file=dict( argstr="%s", @@ -31,9 +18,7 @@ def test_XfmConcat_inputs(): name_template="%s_xfmconcat.xfm", position=-1, ), - verbose=dict( - argstr="-verbose", - ), + verbose=dict(argstr="-verbose"), ) inputs = XfmConcat.input_spec() @@ -43,12 +28,7 @@ def test_XfmConcat_inputs(): def test_XfmConcat_outputs(): - output_map = dict( - output_file=dict( - extensions=None, - ), - output_grids=dict(), - ) + output_map = dict(output_file=dict(extensions=None), output_grids=dict()) outputs = XfmConcat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py index aa8fb61ccd..dea70656e9 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py @@ -4,32 +4,12 @@ def test_XfmInvert_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - clobber=dict( - argstr="-clobber", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - input_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - output_file=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - verbose=dict( - argstr="-verbose", - ), + args=dict(argstr="%s"), + clobber=dict(argstr="-clobber", usedefault=True), + environ=dict(nohash=True, usedefault=True), + input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + output_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), + verbose=dict(argstr="-verbose"), ) inputs = XfmInvert.input_spec() @@ -40,12 +20,7 @@ def test_XfmInvert_inputs(): def test_XfmInvert_outputs(): output_map = dict( - output_file=dict( - extensions=None, - ), - output_grid=dict( - extensions=None, - ), + output_file=dict(extensions=None), output_grid=dict(extensions=None) ) outputs = XfmInvert.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py index 9daa1b996e..fd556e2ce1 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py @@ -4,96 +4,32 @@ def test_JistBrainMgdmSegmentation_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inAdjust=dict( - argstr="--inAdjust %s", - ), - inAtlas=dict( - argstr="--inAtlas %s", - extensions=None, - ), - inCompute=dict( - argstr="--inCompute %s", - ), - inCurvature=dict( - argstr="--inCurvature %f", - ), - inData=dict( - argstr="--inData %f", - ), - inFLAIR=dict( - argstr="--inFLAIR %s", - extensions=None, - ), - inMP2RAGE=dict( - argstr="--inMP2RAGE %s", - extensions=None, - ), - inMP2RAGE2=dict( - argstr="--inMP2RAGE2 %s", - extensions=None, - ), - inMPRAGE=dict( - argstr="--inMPRAGE %s", - extensions=None, - ), - inMax=dict( - argstr="--inMax %d", - ), - inMin=dict( - argstr="--inMin %f", - ), - inOutput=dict( - argstr="--inOutput %s", - ), - inPV=dict( - argstr="--inPV %s", - extensions=None, - ), - inPosterior=dict( - argstr="--inPosterior %f", - ), - inSteps=dict( - argstr="--inSteps %d", - ), - inTopology=dict( - argstr="--inTopology %s", - ), - null=dict( - argstr="--null %s", - ), - outLevelset=dict( - argstr="--outLevelset %s", - hash_files=False, - ), - outPosterior2=dict( - argstr="--outPosterior2 %s", - hash_files=False, - ), - outPosterior3=dict( - argstr="--outPosterior3 %s", - hash_files=False, - ), - outSegmented=dict( - argstr="--outSegmented %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inAdjust=dict(argstr="--inAdjust %s"), + inAtlas=dict(argstr="--inAtlas %s", extensions=None), + inCompute=dict(argstr="--inCompute %s"), + inCurvature=dict(argstr="--inCurvature %f"), + inData=dict(argstr="--inData %f"), + inFLAIR=dict(argstr="--inFLAIR %s", extensions=None), + inMP2RAGE=dict(argstr="--inMP2RAGE %s", extensions=None), + inMP2RAGE2=dict(argstr="--inMP2RAGE2 %s", extensions=None), + inMPRAGE=dict(argstr="--inMPRAGE %s", extensions=None), + inMax=dict(argstr="--inMax %d"), + inMin=dict(argstr="--inMin %f"), + inOutput=dict(argstr="--inOutput %s"), + inPV=dict(argstr="--inPV %s", extensions=None), + inPosterior=dict(argstr="--inPosterior %f"), + inSteps=dict(argstr="--inSteps %d"), + inTopology=dict(argstr="--inTopology %s"), + null=dict(argstr="--null %s"), + outLevelset=dict(argstr="--outLevelset %s", hash_files=False), + outPosterior2=dict(argstr="--outPosterior2 %s", hash_files=False), + outPosterior3=dict(argstr="--outPosterior3 %s", hash_files=False), + outSegmented=dict(argstr="--outSegmented %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = JistBrainMgdmSegmentation.input_spec() @@ -104,18 +40,10 @@ def test_JistBrainMgdmSegmentation_inputs(): def test_JistBrainMgdmSegmentation_outputs(): output_map = dict( - outLevelset=dict( - extensions=None, - ), - outPosterior2=dict( - extensions=None, - ), - outPosterior3=dict( - extensions=None, - ), - outSegmented=dict( - extensions=None, - ), + outLevelset=dict(extensions=None), + outPosterior2=dict(extensions=None), + outPosterior3=dict(extensions=None), + outSegmented=dict(extensions=None), ) outputs = JistBrainMgdmSegmentation.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py index e7706b16b5..da92f8b58f 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py @@ -4,44 +4,17 @@ def test_JistBrainMp2rageDuraEstimation_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inDistance=dict( - argstr="--inDistance %f", - ), - inSecond=dict( - argstr="--inSecond %s", - extensions=None, - ), - inSkull=dict( - argstr="--inSkull %s", - extensions=None, - ), - inoutput=dict( - argstr="--inoutput %s", - ), - null=dict( - argstr="--null %s", - ), - outDura=dict( - argstr="--outDura %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inDistance=dict(argstr="--inDistance %f"), + inSecond=dict(argstr="--inSecond %s", extensions=None), + inSkull=dict(argstr="--inSkull %s", extensions=None), + inoutput=dict(argstr="--inoutput %s"), + null=dict(argstr="--null %s"), + outDura=dict(argstr="--outDura %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = JistBrainMp2rageDuraEstimation.input_spec() @@ -51,11 +24,7 @@ def test_JistBrainMp2rageDuraEstimation_inputs(): def test_JistBrainMp2rageDuraEstimation_outputs(): - output_map = dict( - outDura=dict( - extensions=None, - ), - ) + output_map = dict(outDura=dict(extensions=None)) outputs = JistBrainMp2rageDuraEstimation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py index 637b50dcad..216f256a80 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py @@ -4,61 +4,21 @@ def test_JistBrainMp2rageSkullStripping_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inFilter=dict( - argstr="--inFilter %s", - extensions=None, - ), - inSecond=dict( - argstr="--inSecond %s", - extensions=None, - ), - inSkip=dict( - argstr="--inSkip %s", - ), - inT1=dict( - argstr="--inT1 %s", - extensions=None, - ), - inT1weighted=dict( - argstr="--inT1weighted %s", - extensions=None, - ), - null=dict( - argstr="--null %s", - ), - outBrain=dict( - argstr="--outBrain %s", - hash_files=False, - ), - outMasked=dict( - argstr="--outMasked %s", - hash_files=False, - ), - outMasked2=dict( - argstr="--outMasked2 %s", - hash_files=False, - ), - outMasked3=dict( - argstr="--outMasked3 %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inFilter=dict(argstr="--inFilter %s", extensions=None), + inSecond=dict(argstr="--inSecond %s", extensions=None), + inSkip=dict(argstr="--inSkip %s"), + inT1=dict(argstr="--inT1 %s", extensions=None), + inT1weighted=dict(argstr="--inT1weighted %s", extensions=None), + null=dict(argstr="--null %s"), + outBrain=dict(argstr="--outBrain %s", hash_files=False), + outMasked=dict(argstr="--outMasked %s", hash_files=False), + outMasked2=dict(argstr="--outMasked2 %s", hash_files=False), + outMasked3=dict(argstr="--outMasked3 %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = JistBrainMp2rageSkullStripping.input_spec() @@ -69,18 +29,10 @@ def test_JistBrainMp2rageSkullStripping_inputs(): def test_JistBrainMp2rageSkullStripping_outputs(): output_map = dict( - outBrain=dict( - extensions=None, - ), - outMasked=dict( - extensions=None, - ), - outMasked2=dict( - extensions=None, - ), - outMasked3=dict( - extensions=None, - ), + outBrain=dict(extensions=None), + outMasked=dict(extensions=None), + outMasked2=dict(extensions=None), + outMasked3=dict(extensions=None), ) outputs = JistBrainMp2rageSkullStripping.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py index 61a3e2b074..f996db68a1 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py @@ -4,40 +4,16 @@ def test_JistBrainPartialVolumeFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inInput=dict( - argstr="--inInput %s", - extensions=None, - ), - inPV=dict( - argstr="--inPV %s", - ), - inoutput=dict( - argstr="--inoutput %s", - ), - null=dict( - argstr="--null %s", - ), - outPartial=dict( - argstr="--outPartial %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inInput=dict(argstr="--inInput %s", extensions=None), + inPV=dict(argstr="--inPV %s"), + inoutput=dict(argstr="--inoutput %s"), + null=dict(argstr="--null %s"), + outPartial=dict(argstr="--outPartial %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = JistBrainPartialVolumeFilter.input_spec() @@ -47,11 +23,7 @@ def test_JistBrainPartialVolumeFilter_inputs(): def test_JistBrainPartialVolumeFilter_outputs(): - output_map = dict( - outPartial=dict( - extensions=None, - ), - ) + output_map = dict(outPartial=dict(extensions=None)) outputs = JistBrainPartialVolumeFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py index 41ae9c5cce..4c8919d848 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py @@ -4,56 +4,21 @@ def test_JistCortexSurfaceMeshInflation_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inLevelset=dict( - argstr="--inLevelset %s", - extensions=None, - ), - inLorentzian=dict( - argstr="--inLorentzian %s", - ), - inMax=dict( - argstr="--inMax %d", - ), - inMean=dict( - argstr="--inMean %f", - ), - inSOR=dict( - argstr="--inSOR %f", - ), - inStep=dict( - argstr="--inStep %d", - ), - inTopology=dict( - argstr="--inTopology %s", - ), - null=dict( - argstr="--null %s", - ), - outInflated=dict( - argstr="--outInflated %s", - hash_files=False, - ), - outOriginal=dict( - argstr="--outOriginal %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inLevelset=dict(argstr="--inLevelset %s", extensions=None), + inLorentzian=dict(argstr="--inLorentzian %s"), + inMax=dict(argstr="--inMax %d"), + inMean=dict(argstr="--inMean %f"), + inSOR=dict(argstr="--inSOR %f"), + inStep=dict(argstr="--inStep %d"), + inTopology=dict(argstr="--inTopology %s"), + null=dict(argstr="--null %s"), + outInflated=dict(argstr="--outInflated %s", hash_files=False), + outOriginal=dict(argstr="--outOriginal %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = JistCortexSurfaceMeshInflation.input_spec() @@ -64,12 +29,7 @@ def test_JistCortexSurfaceMeshInflation_inputs(): def test_JistCortexSurfaceMeshInflation_outputs(): output_map = dict( - outInflated=dict( - extensions=None, - ), - outOriginal=dict( - extensions=None, - ), + outInflated=dict(extensions=None), outOriginal=dict(extensions=None) ) outputs = JistCortexSurfaceMeshInflation.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py index 94d9cc525a..bb5eafcf3d 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py @@ -4,63 +4,22 @@ def test_JistIntensityMp2rageMasking_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inBackground=dict( - argstr="--inBackground %s", - ), - inMasking=dict( - argstr="--inMasking %s", - ), - inQuantitative=dict( - argstr="--inQuantitative %s", - extensions=None, - ), - inSecond=dict( - argstr="--inSecond %s", - extensions=None, - ), - inSkip=dict( - argstr="--inSkip %s", - ), - inT1weighted=dict( - argstr="--inT1weighted %s", - extensions=None, - ), - null=dict( - argstr="--null %s", - ), - outMasked=dict( - argstr="--outMasked_T1_Map %s", - hash_files=False, - ), - outMasked2=dict( - argstr="--outMasked_T1weighted %s", - hash_files=False, - ), - outSignal=dict( - argstr="--outSignal_Proba %s", - hash_files=False, - ), - outSignal2=dict( - argstr="--outSignal_Mask %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inBackground=dict(argstr="--inBackground %s"), + inMasking=dict(argstr="--inMasking %s"), + inQuantitative=dict(argstr="--inQuantitative %s", extensions=None), + inSecond=dict(argstr="--inSecond %s", extensions=None), + inSkip=dict(argstr="--inSkip %s"), + inT1weighted=dict(argstr="--inT1weighted %s", extensions=None), + null=dict(argstr="--null %s"), + outMasked=dict(argstr="--outMasked_T1_Map %s", hash_files=False), + outMasked2=dict(argstr="--outMasked_T1weighted %s", hash_files=False), + outSignal=dict(argstr="--outSignal_Proba %s", hash_files=False), + outSignal2=dict(argstr="--outSignal_Mask %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = JistIntensityMp2rageMasking.input_spec() @@ -71,18 +30,10 @@ def test_JistIntensityMp2rageMasking_inputs(): def test_JistIntensityMp2rageMasking_outputs(): output_map = dict( - outMasked=dict( - extensions=None, - ), - outMasked2=dict( - extensions=None, - ), - outSignal=dict( - extensions=None, - ), - outSignal2=dict( - extensions=None, - ), + outMasked=dict(extensions=None), + outMasked2=dict(extensions=None), + outSignal=dict(extensions=None), + outSignal2=dict(extensions=None), ) outputs = JistIntensityMp2rageMasking.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py index 0cc1501e4f..10be662930 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py @@ -4,41 +4,16 @@ def test_JistLaminarProfileCalculator_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inIntensity=dict( - argstr="--inIntensity %s", - extensions=None, - ), - inMask=dict( - argstr="--inMask %s", - extensions=None, - ), - incomputed=dict( - argstr="--incomputed %s", - ), - null=dict( - argstr="--null %s", - ), - outResult=dict( - argstr="--outResult %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inIntensity=dict(argstr="--inIntensity %s", extensions=None), + inMask=dict(argstr="--inMask %s", extensions=None), + incomputed=dict(argstr="--incomputed %s"), + null=dict(argstr="--null %s"), + outResult=dict(argstr="--outResult %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = JistLaminarProfileCalculator.input_spec() @@ -48,11 +23,7 @@ def test_JistLaminarProfileCalculator_inputs(): def test_JistLaminarProfileCalculator_outputs(): - output_map = dict( - outResult=dict( - extensions=None, - ), - ) + output_map = dict(outResult=dict(extensions=None)) outputs = JistLaminarProfileCalculator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py index 758d331935..01bd958029 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py @@ -4,46 +4,18 @@ def test_JistLaminarProfileGeometry_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inProfile=dict( - argstr="--inProfile %s", - extensions=None, - ), - incomputed=dict( - argstr="--incomputed %s", - ), - inoutside=dict( - argstr="--inoutside %f", - ), - inregularization=dict( - argstr="--inregularization %s", - ), - insmoothing=dict( - argstr="--insmoothing %f", - ), - null=dict( - argstr="--null %s", - ), - outResult=dict( - argstr="--outResult %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inProfile=dict(argstr="--inProfile %s", extensions=None), + incomputed=dict(argstr="--incomputed %s"), + inoutside=dict(argstr="--inoutside %f"), + inregularization=dict(argstr="--inregularization %s"), + insmoothing=dict(argstr="--insmoothing %f"), + null=dict(argstr="--null %s"), + outResult=dict(argstr="--outResult %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = JistLaminarProfileGeometry.input_spec() @@ -53,11 +25,7 @@ def test_JistLaminarProfileGeometry_inputs(): def test_JistLaminarProfileGeometry_outputs(): - output_map = dict( - outResult=dict( - extensions=None, - ), - ) + output_map = dict(outResult=dict(extensions=None)) outputs = JistLaminarProfileGeometry.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py index 65841c48a9..f60597b6fc 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py @@ -4,46 +4,17 @@ def test_JistLaminarProfileSampling_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inCortex=dict( - argstr="--inCortex %s", - extensions=None, - ), - inIntensity=dict( - argstr="--inIntensity %s", - extensions=None, - ), - inProfile=dict( - argstr="--inProfile %s", - extensions=None, - ), - null=dict( - argstr="--null %s", - ), - outProfile2=dict( - argstr="--outProfile2 %s", - hash_files=False, - ), - outProfilemapped=dict( - argstr="--outProfilemapped %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inCortex=dict(argstr="--inCortex %s", extensions=None), + inIntensity=dict(argstr="--inIntensity %s", extensions=None), + inProfile=dict(argstr="--inProfile %s", extensions=None), + null=dict(argstr="--null %s"), + outProfile2=dict(argstr="--outProfile2 %s", hash_files=False), + outProfilemapped=dict(argstr="--outProfilemapped %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = JistLaminarProfileSampling.input_spec() @@ -54,12 +25,7 @@ def test_JistLaminarProfileSampling_inputs(): def test_JistLaminarProfileSampling_outputs(): output_map = dict( - outProfile2=dict( - extensions=None, - ), - outProfilemapped=dict( - extensions=None, - ), + outProfile2=dict(extensions=None), outProfilemapped=dict(extensions=None) ) outputs = JistLaminarProfileSampling.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py index fed4abfca1..dd56007f05 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py @@ -4,45 +4,17 @@ def test_JistLaminarROIAveraging_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inIntensity=dict( - argstr="--inIntensity %s", - extensions=None, - ), - inMask=dict( - argstr="--inMask %s", - extensions=None, - ), - inROI=dict( - argstr="--inROI %s", - extensions=None, - ), - inROI2=dict( - argstr="--inROI2 %s", - ), - null=dict( - argstr="--null %s", - ), - outROI3=dict( - argstr="--outROI3 %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inIntensity=dict(argstr="--inIntensity %s", extensions=None), + inMask=dict(argstr="--inMask %s", extensions=None), + inROI=dict(argstr="--inROI %s", extensions=None), + inROI2=dict(argstr="--inROI2 %s"), + null=dict(argstr="--null %s"), + outROI3=dict(argstr="--outROI3 %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = JistLaminarROIAveraging.input_spec() @@ -52,11 +24,7 @@ def test_JistLaminarROIAveraging_inputs(): def test_JistLaminarROIAveraging_outputs(): - output_map = dict( - outROI3=dict( - extensions=None, - ), - ) + output_map = dict(outROI3=dict(extensions=None)) outputs = JistLaminarROIAveraging.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py index 31d34ae32e..496bcf86b3 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py @@ -4,73 +4,26 @@ def test_JistLaminarVolumetricLayering_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inInner=dict( - argstr="--inInner %s", - extensions=None, - ), - inLayering=dict( - argstr="--inLayering %s", - ), - inLayering2=dict( - argstr="--inLayering2 %s", - ), - inMax=dict( - argstr="--inMax %d", - ), - inMin=dict( - argstr="--inMin %f", - ), - inNumber=dict( - argstr="--inNumber %d", - ), - inOuter=dict( - argstr="--inOuter %s", - extensions=None, - ), - inTopology=dict( - argstr="--inTopology %s", - ), - incurvature=dict( - argstr="--incurvature %d", - ), - inpresmooth=dict( - argstr="--inpresmooth %s", - ), - inratio=dict( - argstr="--inratio %f", - ), - null=dict( - argstr="--null %s", - ), - outContinuous=dict( - argstr="--outContinuous %s", - hash_files=False, - ), - outDiscrete=dict( - argstr="--outDiscrete %s", - hash_files=False, - ), - outLayer=dict( - argstr="--outLayer %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inInner=dict(argstr="--inInner %s", extensions=None), + inLayering=dict(argstr="--inLayering %s"), + inLayering2=dict(argstr="--inLayering2 %s"), + inMax=dict(argstr="--inMax %d"), + inMin=dict(argstr="--inMin %f"), + inNumber=dict(argstr="--inNumber %d"), + inOuter=dict(argstr="--inOuter %s", extensions=None), + inTopology=dict(argstr="--inTopology %s"), + incurvature=dict(argstr="--incurvature %d"), + inpresmooth=dict(argstr="--inpresmooth %s"), + inratio=dict(argstr="--inratio %f"), + null=dict(argstr="--null %s"), + outContinuous=dict(argstr="--outContinuous %s", hash_files=False), + outDiscrete=dict(argstr="--outDiscrete %s", hash_files=False), + outLayer=dict(argstr="--outLayer %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = JistLaminarVolumetricLayering.input_spec() @@ -81,15 +34,9 @@ def test_JistLaminarVolumetricLayering_inputs(): def test_JistLaminarVolumetricLayering_outputs(): output_map = dict( - outContinuous=dict( - extensions=None, - ), - outDiscrete=dict( - extensions=None, - ), - outLayer=dict( - extensions=None, - ), + outContinuous=dict(extensions=None), + outDiscrete=dict(extensions=None), + outLayer=dict(extensions=None), ) outputs = JistLaminarVolumetricLayering.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py index 7b9a0fc859..01e93a70d9 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py @@ -4,41 +4,16 @@ def test_MedicAlgorithmImageCalculator_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inOperation=dict( - argstr="--inOperation %s", - ), - inVolume=dict( - argstr="--inVolume %s", - extensions=None, - ), - inVolume2=dict( - argstr="--inVolume2 %s", - extensions=None, - ), - null=dict( - argstr="--null %s", - ), - outResult=dict( - argstr="--outResult %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inOperation=dict(argstr="--inOperation %s"), + inVolume=dict(argstr="--inVolume %s", extensions=None), + inVolume2=dict(argstr="--inVolume2 %s", extensions=None), + null=dict(argstr="--null %s"), + outResult=dict(argstr="--outResult %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = MedicAlgorithmImageCalculator.input_spec() @@ -48,11 +23,7 @@ def test_MedicAlgorithmImageCalculator_inputs(): def test_MedicAlgorithmImageCalculator_outputs(): - output_map = dict( - outResult=dict( - extensions=None, - ), - ) + output_map = dict(outResult=dict(extensions=None)) outputs = MedicAlgorithmImageCalculator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py index 4fb5f2567b..a3ae228361 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py @@ -4,131 +4,42 @@ def test_MedicAlgorithmLesionToads_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inAtlas=dict( - argstr="--inAtlas %s", - ), - inAtlas2=dict( - argstr="--inAtlas2 %s", - extensions=None, - ), - inAtlas3=dict( - argstr="--inAtlas3 %s", - extensions=None, - ), - inAtlas4=dict( - argstr="--inAtlas4 %s", - extensions=None, - ), - inAtlas5=dict( - argstr="--inAtlas5 %f", - ), - inAtlas6=dict( - argstr="--inAtlas6 %s", - ), - inConnectivity=dict( - argstr="--inConnectivity %s", - ), - inCorrect=dict( - argstr="--inCorrect %s", - ), - inFLAIR=dict( - argstr="--inFLAIR %s", - extensions=None, - ), - inInclude=dict( - argstr="--inInclude %s", - ), - inMaximum=dict( - argstr="--inMaximum %d", - ), - inMaximum2=dict( - argstr="--inMaximum2 %d", - ), - inMaximum3=dict( - argstr="--inMaximum3 %d", - ), - inMaximum4=dict( - argstr="--inMaximum4 %f", - ), - inMaximum5=dict( - argstr="--inMaximum5 %d", - ), - inOutput=dict( - argstr="--inOutput %s", - ), - inOutput2=dict( - argstr="--inOutput2 %s", - ), - inOutput3=dict( - argstr="--inOutput3 %s", - ), - inSmooting=dict( - argstr="--inSmooting %f", - ), - inT1_MPRAGE=dict( - argstr="--inT1_MPRAGE %s", - extensions=None, - ), - inT1_SPGR=dict( - argstr="--inT1_SPGR %s", - extensions=None, - ), - null=dict( - argstr="--null %s", - ), - outCortical=dict( - argstr="--outCortical %s", - hash_files=False, - ), - outFilled=dict( - argstr="--outFilled %s", - hash_files=False, - ), - outHard=dict( - argstr="--outHard %s", - hash_files=False, - ), - outHard2=dict( - argstr="--outHard2 %s", - hash_files=False, - ), - outInhomogeneity=dict( - argstr="--outInhomogeneity %s", - hash_files=False, - ), - outLesion=dict( - argstr="--outLesion %s", - hash_files=False, - ), - outMembership=dict( - argstr="--outMembership %s", - hash_files=False, - ), - outSulcal=dict( - argstr="--outSulcal %s", - hash_files=False, - ), - outWM=dict( - argstr="--outWM %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inAtlas=dict(argstr="--inAtlas %s"), + inAtlas2=dict(argstr="--inAtlas2 %s", extensions=None), + inAtlas3=dict(argstr="--inAtlas3 %s", extensions=None), + inAtlas4=dict(argstr="--inAtlas4 %s", extensions=None), + inAtlas5=dict(argstr="--inAtlas5 %f"), + inAtlas6=dict(argstr="--inAtlas6 %s"), + inConnectivity=dict(argstr="--inConnectivity %s"), + inCorrect=dict(argstr="--inCorrect %s"), + inFLAIR=dict(argstr="--inFLAIR %s", extensions=None), + inInclude=dict(argstr="--inInclude %s"), + inMaximum=dict(argstr="--inMaximum %d"), + inMaximum2=dict(argstr="--inMaximum2 %d"), + inMaximum3=dict(argstr="--inMaximum3 %d"), + inMaximum4=dict(argstr="--inMaximum4 %f"), + inMaximum5=dict(argstr="--inMaximum5 %d"), + inOutput=dict(argstr="--inOutput %s"), + inOutput2=dict(argstr="--inOutput2 %s"), + inOutput3=dict(argstr="--inOutput3 %s"), + inSmooting=dict(argstr="--inSmooting %f"), + inT1_MPRAGE=dict(argstr="--inT1_MPRAGE %s", extensions=None), + inT1_SPGR=dict(argstr="--inT1_SPGR %s", extensions=None), + null=dict(argstr="--null %s"), + outCortical=dict(argstr="--outCortical %s", hash_files=False), + outFilled=dict(argstr="--outFilled %s", hash_files=False), + outHard=dict(argstr="--outHard %s", hash_files=False), + outHard2=dict(argstr="--outHard2 %s", hash_files=False), + outInhomogeneity=dict(argstr="--outInhomogeneity %s", hash_files=False), + outLesion=dict(argstr="--outLesion %s", hash_files=False), + outMembership=dict(argstr="--outMembership %s", hash_files=False), + outSulcal=dict(argstr="--outSulcal %s", hash_files=False), + outWM=dict(argstr="--outWM %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = MedicAlgorithmLesionToads.input_spec() @@ -139,33 +50,15 @@ def test_MedicAlgorithmLesionToads_inputs(): def test_MedicAlgorithmLesionToads_outputs(): output_map = dict( - outCortical=dict( - extensions=None, - ), - outFilled=dict( - extensions=None, - ), - outHard=dict( - extensions=None, - ), - outHard2=dict( - extensions=None, - ), - outInhomogeneity=dict( - extensions=None, - ), - outLesion=dict( - extensions=None, - ), - outMembership=dict( - extensions=None, - ), - outSulcal=dict( - extensions=None, - ), - outWM=dict( - extensions=None, - ), + outCortical=dict(extensions=None), + outFilled=dict(extensions=None), + outHard=dict(extensions=None), + outHard2=dict(extensions=None), + outInhomogeneity=dict(extensions=None), + outLesion=dict(extensions=None), + outMembership=dict(extensions=None), + outSulcal=dict(extensions=None), + outWM=dict(extensions=None), ) outputs = MedicAlgorithmLesionToads.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py index 49c307f27f..15080a09a6 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py @@ -4,59 +4,22 @@ def test_MedicAlgorithmMipavReorient_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inInterpolation=dict( - argstr="--inInterpolation %s", - ), - inNew=dict( - argstr="--inNew %s", - ), - inResolution=dict( - argstr="--inResolution %s", - ), - inSource=dict( - argstr="--inSource %s", - sep=";", - ), - inTemplate=dict( - argstr="--inTemplate %s", - extensions=None, - ), - inUser=dict( - argstr="--inUser %s", - ), - inUser2=dict( - argstr="--inUser2 %s", - ), - inUser3=dict( - argstr="--inUser3 %s", - ), - inUser4=dict( - argstr="--inUser4 %s", - ), - null=dict( - argstr="--null %s", - ), - outReoriented=dict( - argstr="--outReoriented %s", - sep=";", - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inInterpolation=dict(argstr="--inInterpolation %s"), + inNew=dict(argstr="--inNew %s"), + inResolution=dict(argstr="--inResolution %s"), + inSource=dict(argstr="--inSource %s", sep=";"), + inTemplate=dict(argstr="--inTemplate %s", extensions=None), + inUser=dict(argstr="--inUser %s"), + inUser2=dict(argstr="--inUser2 %s"), + inUser3=dict(argstr="--inUser3 %s"), + inUser4=dict(argstr="--inUser4 %s"), + null=dict(argstr="--null %s"), + outReoriented=dict(argstr="--outReoriented %s", sep=";"), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = MedicAlgorithmMipavReorient.input_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py index bf895247a6..8104a4a868 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py @@ -4,62 +4,23 @@ def test_MedicAlgorithmN3_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inAutomatic=dict( - argstr="--inAutomatic %s", - ), - inEnd=dict( - argstr="--inEnd %f", - ), - inField=dict( - argstr="--inField %f", - ), - inInput=dict( - argstr="--inInput %s", - extensions=None, - ), - inKernel=dict( - argstr="--inKernel %f", - ), - inMaximum=dict( - argstr="--inMaximum %d", - ), - inSignal=dict( - argstr="--inSignal %f", - ), - inSubsample=dict( - argstr="--inSubsample %f", - ), - inWeiner=dict( - argstr="--inWeiner %f", - ), - null=dict( - argstr="--null %s", - ), - outInhomogeneity=dict( - argstr="--outInhomogeneity %s", - hash_files=False, - ), - outInhomogeneity2=dict( - argstr="--outInhomogeneity2 %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inAutomatic=dict(argstr="--inAutomatic %s"), + inEnd=dict(argstr="--inEnd %f"), + inField=dict(argstr="--inField %f"), + inInput=dict(argstr="--inInput %s", extensions=None), + inKernel=dict(argstr="--inKernel %f"), + inMaximum=dict(argstr="--inMaximum %d"), + inSignal=dict(argstr="--inSignal %f"), + inSubsample=dict(argstr="--inSubsample %f"), + inWeiner=dict(argstr="--inWeiner %f"), + null=dict(argstr="--null %s"), + outInhomogeneity=dict(argstr="--outInhomogeneity %s", hash_files=False), + outInhomogeneity2=dict(argstr="--outInhomogeneity2 %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = MedicAlgorithmN3.input_spec() @@ -70,12 +31,7 @@ def test_MedicAlgorithmN3_inputs(): def test_MedicAlgorithmN3_outputs(): output_map = dict( - outInhomogeneity=dict( - extensions=None, - ), - outInhomogeneity2=dict( - extensions=None, - ), + outInhomogeneity=dict(extensions=None), outInhomogeneity2=dict(extensions=None) ) outputs = MedicAlgorithmN3.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py index b62def8a4f..1cd9b882c6 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py @@ -4,166 +4,55 @@ def test_MedicAlgorithmSPECTRE2010_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inApply=dict( - argstr="--inApply %s", - ), - inAtlas=dict( - argstr="--inAtlas %s", - extensions=None, - ), - inBackground=dict( - argstr="--inBackground %f", - ), - inCoarse=dict( - argstr="--inCoarse %f", - ), - inCost=dict( - argstr="--inCost %s", - ), - inDegrees=dict( - argstr="--inDegrees %s", - ), - inFind=dict( - argstr="--inFind %s", - ), - inFine=dict( - argstr="--inFine %f", - ), - inImage=dict( - argstr="--inImage %s", - ), - inInhomogeneity=dict( - argstr="--inInhomogeneity %s", - ), - inInitial=dict( - argstr="--inInitial %d", - ), - inInitial2=dict( - argstr="--inInitial2 %f", - ), - inInput=dict( - argstr="--inInput %s", - extensions=None, - ), - inMMC=dict( - argstr="--inMMC %d", - ), - inMMC2=dict( - argstr="--inMMC2 %d", - ), - inMaximum=dict( - argstr="--inMaximum %f", - ), - inMinimum=dict( - argstr="--inMinimum %f", - ), - inMinimum2=dict( - argstr="--inMinimum2 %f", - ), - inMultiple=dict( - argstr="--inMultiple %d", - ), - inMultithreading=dict( - argstr="--inMultithreading %s", - ), - inNumber=dict( - argstr="--inNumber %d", - ), - inNumber2=dict( - argstr="--inNumber2 %d", - ), - inOutput=dict( - argstr="--inOutput %s", - ), - inOutput2=dict( - argstr="--inOutput2 %s", - ), - inOutput3=dict( - argstr="--inOutput3 %s", - ), - inOutput4=dict( - argstr="--inOutput4 %s", - ), - inOutput5=dict( - argstr="--inOutput5 %s", - ), - inRegistration=dict( - argstr="--inRegistration %s", - ), - inResample=dict( - argstr="--inResample %s", - ), - inRun=dict( - argstr="--inRun %s", - ), - inSkip=dict( - argstr="--inSkip %s", - ), - inSmoothing=dict( - argstr="--inSmoothing %f", - ), - inSubsample=dict( - argstr="--inSubsample %s", - ), - inUse=dict( - argstr="--inUse %s", - ), - null=dict( - argstr="--null %s", - ), - outFANTASM=dict( - argstr="--outFANTASM %s", - hash_files=False, - ), - outMask=dict( - argstr="--outMask %s", - hash_files=False, - ), - outMidsagittal=dict( - argstr="--outMidsagittal %s", - hash_files=False, - ), - outOriginal=dict( - argstr="--outOriginal %s", - hash_files=False, - ), - outPrior=dict( - argstr="--outPrior %s", - hash_files=False, - ), - outSegmentation=dict( - argstr="--outSegmentation %s", - hash_files=False, - ), - outSplitHalves=dict( - argstr="--outSplitHalves %s", - hash_files=False, - ), - outStripped=dict( - argstr="--outStripped %s", - hash_files=False, - ), - outd0=dict( - argstr="--outd0 %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inApply=dict(argstr="--inApply %s"), + inAtlas=dict(argstr="--inAtlas %s", extensions=None), + inBackground=dict(argstr="--inBackground %f"), + inCoarse=dict(argstr="--inCoarse %f"), + inCost=dict(argstr="--inCost %s"), + inDegrees=dict(argstr="--inDegrees %s"), + inFind=dict(argstr="--inFind %s"), + inFine=dict(argstr="--inFine %f"), + inImage=dict(argstr="--inImage %s"), + inInhomogeneity=dict(argstr="--inInhomogeneity %s"), + inInitial=dict(argstr="--inInitial %d"), + inInitial2=dict(argstr="--inInitial2 %f"), + inInput=dict(argstr="--inInput %s", extensions=None), + inMMC=dict(argstr="--inMMC %d"), + inMMC2=dict(argstr="--inMMC2 %d"), + inMaximum=dict(argstr="--inMaximum %f"), + inMinimum=dict(argstr="--inMinimum %f"), + inMinimum2=dict(argstr="--inMinimum2 %f"), + inMultiple=dict(argstr="--inMultiple %d"), + inMultithreading=dict(argstr="--inMultithreading %s"), + inNumber=dict(argstr="--inNumber %d"), + inNumber2=dict(argstr="--inNumber2 %d"), + inOutput=dict(argstr="--inOutput %s"), + inOutput2=dict(argstr="--inOutput2 %s"), + inOutput3=dict(argstr="--inOutput3 %s"), + inOutput4=dict(argstr="--inOutput4 %s"), + inOutput5=dict(argstr="--inOutput5 %s"), + inRegistration=dict(argstr="--inRegistration %s"), + inResample=dict(argstr="--inResample %s"), + inRun=dict(argstr="--inRun %s"), + inSkip=dict(argstr="--inSkip %s"), + inSmoothing=dict(argstr="--inSmoothing %f"), + inSubsample=dict(argstr="--inSubsample %s"), + inUse=dict(argstr="--inUse %s"), + null=dict(argstr="--null %s"), + outFANTASM=dict(argstr="--outFANTASM %s", hash_files=False), + outMask=dict(argstr="--outMask %s", hash_files=False), + outMidsagittal=dict(argstr="--outMidsagittal %s", hash_files=False), + outOriginal=dict(argstr="--outOriginal %s", hash_files=False), + outPrior=dict(argstr="--outPrior %s", hash_files=False), + outSegmentation=dict(argstr="--outSegmentation %s", hash_files=False), + outSplitHalves=dict(argstr="--outSplitHalves %s", hash_files=False), + outStripped=dict(argstr="--outStripped %s", hash_files=False), + outd0=dict(argstr="--outd0 %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = MedicAlgorithmSPECTRE2010.input_spec() @@ -174,33 +63,15 @@ def test_MedicAlgorithmSPECTRE2010_inputs(): def test_MedicAlgorithmSPECTRE2010_outputs(): output_map = dict( - outFANTASM=dict( - extensions=None, - ), - outMask=dict( - extensions=None, - ), - outMidsagittal=dict( - extensions=None, - ), - outOriginal=dict( - extensions=None, - ), - outPrior=dict( - extensions=None, - ), - outSegmentation=dict( - extensions=None, - ), - outSplitHalves=dict( - extensions=None, - ), - outStripped=dict( - extensions=None, - ), - outd0=dict( - extensions=None, - ), + outFANTASM=dict(extensions=None), + outMask=dict(extensions=None), + outMidsagittal=dict(extensions=None), + outOriginal=dict(extensions=None), + outPrior=dict(extensions=None), + outSegmentation=dict(extensions=None), + outSplitHalves=dict(extensions=None), + outStripped=dict(extensions=None), + outd0=dict(extensions=None), ) outputs = MedicAlgorithmSPECTRE2010.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py index bdd6e4e1b6..c7b4761baa 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py @@ -4,43 +4,17 @@ def test_MedicAlgorithmThresholdToBinaryMask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inLabel=dict( - argstr="--inLabel %s", - sep=";", - ), - inMaximum=dict( - argstr="--inMaximum %f", - ), - inMinimum=dict( - argstr="--inMinimum %f", - ), - inUse=dict( - argstr="--inUse %s", - ), - null=dict( - argstr="--null %s", - ), - outBinary=dict( - argstr="--outBinary %s", - sep=";", - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inLabel=dict(argstr="--inLabel %s", sep=";"), + inMaximum=dict(argstr="--inMaximum %f"), + inMinimum=dict(argstr="--inMinimum %f"), + inUse=dict(argstr="--inUse %s"), + null=dict(argstr="--null %s"), + outBinary=dict(argstr="--outBinary %s", sep=";"), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = MedicAlgorithmThresholdToBinaryMask.input_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_RandomVol.py b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py index 4929f54d6a..0119bab925 100644 --- a/nipype/interfaces/mipav/tests/test_auto_RandomVol.py +++ b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py @@ -4,57 +4,22 @@ def test_RandomVol_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inField=dict( - argstr="--inField %s", - ), - inLambda=dict( - argstr="--inLambda %f", - ), - inMaximum=dict( - argstr="--inMaximum %d", - ), - inMinimum=dict( - argstr="--inMinimum %d", - ), - inSize=dict( - argstr="--inSize %d", - ), - inSize2=dict( - argstr="--inSize2 %d", - ), - inSize3=dict( - argstr="--inSize3 %d", - ), - inSize4=dict( - argstr="--inSize4 %d", - ), - inStandard=dict( - argstr="--inStandard %d", - ), - null=dict( - argstr="--null %s", - ), - outRand1=dict( - argstr="--outRand1 %s", - hash_files=False, - ), - xDefaultMem=dict( - argstr="-xDefaultMem %d", - ), - xMaxProcess=dict( - argstr="-xMaxProcess %d", - usedefault=True, - ), - xPrefExt=dict( - argstr="--xPrefExt %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inField=dict(argstr="--inField %s"), + inLambda=dict(argstr="--inLambda %f"), + inMaximum=dict(argstr="--inMaximum %d"), + inMinimum=dict(argstr="--inMinimum %d"), + inSize=dict(argstr="--inSize %d"), + inSize2=dict(argstr="--inSize2 %d"), + inSize3=dict(argstr="--inSize3 %d"), + inSize4=dict(argstr="--inSize4 %d"), + inStandard=dict(argstr="--inStandard %d"), + null=dict(argstr="--null %s"), + outRand1=dict(argstr="--outRand1 %s", hash_files=False), + xDefaultMem=dict(argstr="-xDefaultMem %d"), + xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), + xPrefExt=dict(argstr="--xPrefExt %s"), ) inputs = RandomVol.input_spec() @@ -64,11 +29,7 @@ def test_RandomVol_inputs(): def test_RandomVol_outputs(): - output_map = dict( - outRand1=dict( - extensions=None, - ), - ) + output_map = dict(outRand1=dict(extensions=None)) outputs = RandomVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py index 57f1b40e4d..124d3c7a9a 100644 --- a/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py +++ b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py @@ -4,32 +4,13 @@ def test_WatershedBEM_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - atlas_mode=dict( - argstr="--atlas", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - overwrite=dict( - argstr="--overwrite", - usedefault=True, - ), - subject_id=dict( - argstr="--subject %s", - mandatory=True, - ), - subjects_dir=dict( - mandatory=True, - usedefault=True, - ), - volume=dict( - argstr="--volume %s", - usedefault=True, - ), + args=dict(argstr="%s"), + atlas_mode=dict(argstr="--atlas"), + environ=dict(nohash=True, usedefault=True), + overwrite=dict(argstr="--overwrite", usedefault=True), + subject_id=dict(argstr="--subject %s", mandatory=True), + subjects_dir=dict(mandatory=True, usedefault=True), + volume=dict(argstr="--volume %s", usedefault=True), ) inputs = WatershedBEM.input_spec() @@ -40,32 +21,13 @@ def test_WatershedBEM_inputs(): def test_WatershedBEM_outputs(): output_map = dict( - brain_surface=dict( - extensions=None, - loc="bem/watershed", - ), - cor_files=dict( - altkey="COR", - loc="bem/watershed/ws", - ), - fif_file=dict( - altkey="fif", - extensions=None, - loc="bem", - ), - inner_skull_surface=dict( - extensions=None, - loc="bem/watershed", - ), + brain_surface=dict(extensions=None, loc="bem/watershed"), + cor_files=dict(altkey="COR", loc="bem/watershed/ws"), + fif_file=dict(altkey="fif", extensions=None, loc="bem"), + inner_skull_surface=dict(extensions=None, loc="bem/watershed"), mesh_files=dict(), - outer_skin_surface=dict( - extensions=None, - loc="bem/watershed", - ), - outer_skull_surface=dict( - extensions=None, - loc="bem/watershed", - ), + outer_skin_surface=dict(extensions=None, loc="bem/watershed"), + outer_skull_surface=dict(extensions=None, loc="bem/watershed"), ) outputs = WatershedBEM.output_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py index ad93f35b9a..e2cfe0a687 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py @@ -4,70 +4,21 @@ def test_ConstrainedSphericalDeconvolution_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debug=dict( - argstr="-debug", - ), - directions_file=dict( - argstr="-directions %s", - extensions=None, - position=-2, - ), - encoding_file=dict( - argstr="-grad %s", - extensions=None, - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - filter_file=dict( - argstr="-filter %s", - extensions=None, - position=-2, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - iterations=dict( - argstr="-niter %s", - ), - lambda_value=dict( - argstr="-lambda %s", - ), - mask_image=dict( - argstr="-mask %s", - extensions=None, - position=2, - ), - maximum_harmonic_order=dict( - argstr="-lmax %s", - ), - normalise=dict( - argstr="-normalise", - position=3, - ), - out_filename=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - response_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - threshold_value=dict( - argstr="-threshold %s", - ), + args=dict(argstr="%s"), + debug=dict(argstr="-debug"), + directions_file=dict(argstr="-directions %s", extensions=None, position=-2), + encoding_file=dict(argstr="-grad %s", extensions=None, position=1), + environ=dict(nohash=True, usedefault=True), + filter_file=dict(argstr="-filter %s", extensions=None, position=-2), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + iterations=dict(argstr="-niter %s"), + lambda_value=dict(argstr="-lambda %s"), + mask_image=dict(argstr="-mask %s", extensions=None, position=2), + maximum_harmonic_order=dict(argstr="-lmax %s"), + normalise=dict(argstr="-normalise", position=3), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), + response_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + threshold_value=dict(argstr="-threshold %s"), ) inputs = ConstrainedSphericalDeconvolution.input_spec() @@ -77,11 +28,7 @@ def test_ConstrainedSphericalDeconvolution_inputs(): def test_ConstrainedSphericalDeconvolution_outputs(): - output_map = dict( - spherical_harmonics_image=dict( - extensions=None, - ), - ) + output_map = dict(spherical_harmonics_image=dict(extensions=None)) outputs = ConstrainedSphericalDeconvolution.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py index 97b5885baf..fef47d177b 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py @@ -4,38 +4,15 @@ def test_DWI2SphericalHarmonicsImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), encoding_file=dict( - argstr="-grad %s", - extensions=None, - mandatory=True, - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - maximum_harmonic_order=dict( - argstr="-lmax %s", - ), - normalise=dict( - argstr="-normalise", - position=3, - ), - out_filename=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, + argstr="-grad %s", extensions=None, mandatory=True, position=1 ), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + maximum_harmonic_order=dict(argstr="-lmax %s"), + normalise=dict(argstr="-normalise", position=3), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), ) inputs = DWI2SphericalHarmonicsImage.input_spec() @@ -45,11 +22,7 @@ def test_DWI2SphericalHarmonicsImage_inputs(): def test_DWI2SphericalHarmonicsImage_outputs(): - output_map = dict( - spherical_harmonics_image=dict( - extensions=None, - ), - ) + output_map = dict(spherical_harmonics_image=dict(extensions=None)) outputs = DWI2SphericalHarmonicsImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py index 72a2820cc1..b91a699925 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py @@ -4,41 +4,14 @@ def test_DWI2Tensor_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debug=dict( - argstr="-debug", - position=1, - ), - encoding_file=dict( - argstr="-grad %s", - extensions=None, - position=2, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ignore_slice_by_volume=dict( - argstr="-ignoreslices %s", - position=2, - sep=" ", - ), - ignore_volumes=dict( - argstr="-ignorevolumes %s", - position=2, - sep=" ", - ), - in_file=dict( - argstr="%s", - mandatory=True, - position=-2, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - ), + args=dict(argstr="%s"), + debug=dict(argstr="-debug", position=1), + encoding_file=dict(argstr="-grad %s", extensions=None, position=2), + environ=dict(nohash=True, usedefault=True), + ignore_slice_by_volume=dict(argstr="-ignoreslices %s", position=2, sep=" "), + ignore_volumes=dict(argstr="-ignorevolumes %s", position=2, sep=" "), + in_file=dict(argstr="%s", mandatory=True, position=-2), + mask=dict(argstr="-mask %s", extensions=None), out_filename=dict( argstr="%s", extensions=None, @@ -47,10 +20,7 @@ def test_DWI2Tensor_inputs(): output_name="tensor", position=-1, ), - quiet=dict( - argstr="-quiet", - position=1, - ), + quiet=dict(argstr="-quiet", position=1), ) inputs = DWI2Tensor.input_spec() @@ -60,11 +30,7 @@ def test_DWI2Tensor_inputs(): def test_DWI2Tensor_outputs(): - output_map = dict( - tensor=dict( - extensions=None, - ), - ) + output_map = dict(tensor=dict(extensions=None)) outputs = DWI2Tensor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py index c6fe4f586e..98dfdb1137 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py @@ -4,27 +4,13 @@ def test_DiffusionTensorStreamlineTrack_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - cutoff_value=dict( - argstr="-cutoff %s", - units="NA", - ), - desired_number_of_tracks=dict( - argstr="-number %d", - ), - do_not_precompute=dict( - argstr="-noprecomputed", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + cutoff_value=dict(argstr="-cutoff %s", units="NA"), + desired_number_of_tracks=dict(argstr="-number %d"), + do_not_precompute=dict(argstr="-noprecomputed"), + environ=dict(nohash=True, usedefault=True), exclude_file=dict( - argstr="-exclude %s", - extensions=None, - xor=["exclude_file", "exclude_spec"], + argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"] ), exclude_spec=dict( argstr="-exclude %s", @@ -34,21 +20,11 @@ def test_DiffusionTensorStreamlineTrack_inputs(): xor=["exclude_file", "exclude_spec"], ), gradient_encoding_file=dict( - argstr="-grad %s", - extensions=None, - mandatory=True, - position=-2, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, + argstr="-grad %s", extensions=None, mandatory=True, position=-2 ), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), include_file=dict( - argstr="-include %s", - extensions=None, - xor=["include_file", "include_spec"], + argstr="-include %s", extensions=None, xor=["include_file", "include_spec"] ), include_spec=dict( argstr="-include %s", @@ -57,23 +33,11 @@ def test_DiffusionTensorStreamlineTrack_inputs(): units="mm", xor=["include_file", "include_spec"], ), - initial_cutoff_value=dict( - argstr="-initcutoff %s", - units="NA", - ), - initial_direction=dict( - argstr="-initdirection %s", - units="voxels", - ), - inputmodel=dict( - argstr="%s", - position=-3, - usedefault=True, - ), + initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA"), + initial_direction=dict(argstr="-initdirection %s", units="voxels"), + inputmodel=dict(argstr="%s", position=-3, usedefault=True), mask_file=dict( - argstr="-mask %s", - extensions=None, - xor=["mask_file", "mask_spec"], + argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"] ), mask_spec=dict( argstr="-mask %s", @@ -82,24 +46,11 @@ def test_DiffusionTensorStreamlineTrack_inputs(): units="mm", xor=["mask_file", "mask_spec"], ), - maximum_number_of_tracks=dict( - argstr="-maxnum %d", - ), - maximum_tract_length=dict( - argstr="-length %s", - units="mm", - ), - minimum_radius_of_curvature=dict( - argstr="-curvature %s", - units="mm", - ), - minimum_tract_length=dict( - argstr="-minlength %s", - units="mm", - ), - no_mask_interpolation=dict( - argstr="-nomaskinterp", - ), + maximum_number_of_tracks=dict(argstr="-maxnum %d"), + maximum_tract_length=dict(argstr="-length %s", units="mm"), + minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm"), + minimum_tract_length=dict(argstr="-minlength %s", units="mm"), + no_mask_interpolation=dict(argstr="-nomaskinterp"), out_file=dict( argstr="%s", extensions=None, @@ -109,9 +60,7 @@ def test_DiffusionTensorStreamlineTrack_inputs(): position=-1, ), seed_file=dict( - argstr="-seed %s", - extensions=None, - xor=["seed_file", "seed_spec"], + argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"] ), seed_spec=dict( argstr="-seed %s", @@ -120,16 +69,9 @@ def test_DiffusionTensorStreamlineTrack_inputs(): units="mm", xor=["seed_file", "seed_spec"], ), - step_size=dict( - argstr="-step %s", - units="mm", - ), - stop=dict( - argstr="-stop", - ), - unidirectional=dict( - argstr="-unidirectional", - ), + step_size=dict(argstr="-step %s", units="mm"), + stop=dict(argstr="-stop"), + unidirectional=dict(argstr="-unidirectional"), ) inputs = DiffusionTensorStreamlineTrack.input_spec() @@ -139,11 +81,7 @@ def test_DiffusionTensorStreamlineTrack_inputs(): def test_DiffusionTensorStreamlineTrack_outputs(): - output_map = dict( - tracked=dict( - extensions=None, - ), - ) + output_map = dict(tracked=dict(extensions=None)) outputs = DiffusionTensorStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py index 4685b0e9d6..a20c3e28c6 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py @@ -4,28 +4,12 @@ def test_Directions2Amplitude_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - display_debug=dict( - argstr="-debug", - ), - display_info=dict( - argstr="-info", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - num_peaks=dict( - argstr="-num %s", - ), + args=dict(argstr="%s"), + display_debug=dict(argstr="-debug"), + display_info=dict(argstr="-info"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + num_peaks=dict(argstr="-num %s"), out_file=dict( argstr="%s", extensions=None, @@ -35,17 +19,9 @@ def test_Directions2Amplitude_inputs(): name_template="%s_amplitudes.mif", position=-1, ), - peak_directions=dict( - argstr="-direction %s", - sep=" ", - ), - peaks_image=dict( - argstr="-peaks %s", - extensions=None, - ), - quiet_display=dict( - argstr="-quiet", - ), + peak_directions=dict(argstr="-direction %s", sep=" "), + peaks_image=dict(argstr="-peaks %s", extensions=None), + quiet_display=dict(argstr="-quiet"), ) inputs = Directions2Amplitude.input_spec() @@ -55,11 +31,7 @@ def test_Directions2Amplitude_inputs(): def test_Directions2Amplitude_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Directions2Amplitude.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Erode.py b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py index 86a6a3d0b1..6cafdb0c9d 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Erode.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py @@ -4,40 +4,14 @@ def test_Erode_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debug=dict( - argstr="-debug", - position=1, - ), - dilate=dict( - argstr="-dilate", - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - number_of_passes=dict( - argstr="-npass %s", - ), - out_filename=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict( - argstr="-quiet", - position=1, - ), + args=dict(argstr="%s"), + debug=dict(argstr="-debug", position=1), + dilate=dict(argstr="-dilate", position=1), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + number_of_passes=dict(argstr="-npass %s"), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), + quiet=dict(argstr="-quiet", position=1), ) inputs = Erode.input_spec() @@ -47,11 +21,7 @@ def test_Erode_inputs(): def test_Erode_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Erode.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py index e93a7744fc..9469f142ad 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py @@ -4,49 +4,18 @@ def test_EstimateResponseForSH_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debug=dict( - argstr="-debug", - ), + args=dict(argstr="%s"), + debug=dict(argstr="-debug"), encoding_file=dict( - argstr="-grad %s", - extensions=None, - mandatory=True, - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - mask_image=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - maximum_harmonic_order=dict( - argstr="-lmax %s", - ), - normalise=dict( - argstr="-normalise", - ), - out_filename=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict( - argstr="-quiet", - ), + argstr="-grad %s", extensions=None, mandatory=True, position=1 + ), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + mask_image=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + maximum_harmonic_order=dict(argstr="-lmax %s"), + normalise=dict(argstr="-normalise"), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), + quiet=dict(argstr="-quiet"), ) inputs = EstimateResponseForSH.input_spec() @@ -56,11 +25,7 @@ def test_EstimateResponseForSH_inputs(): def test_EstimateResponseForSH_outputs(): - output_map = dict( - response=dict( - extensions=None, - ), - ) + output_map = dict(response=dict(extensions=None)) outputs = EstimateResponseForSH.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py b/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py index 3e11a7db45..fa1968c5ec 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py @@ -4,27 +4,12 @@ def test_FSL2MRTrix_inputs(): input_map = dict( - bval_file=dict( - extensions=None, - mandatory=True, - ), - bvec_file=dict( - extensions=None, - mandatory=True, - ), - invert_x=dict( - usedefault=True, - ), - invert_y=dict( - usedefault=True, - ), - invert_z=dict( - usedefault=True, - ), - out_encoding_file=dict( - extensions=None, - genfile=True, - ), + bval_file=dict(extensions=None, mandatory=True), + bvec_file=dict(extensions=None, mandatory=True), + invert_x=dict(usedefault=True), + invert_y=dict(usedefault=True), + invert_z=dict(usedefault=True), + out_encoding_file=dict(extensions=None, genfile=True), ) inputs = FSL2MRTrix.input_spec() @@ -34,11 +19,7 @@ def test_FSL2MRTrix_inputs(): def test_FSL2MRTrix_outputs(): - output_map = dict( - encoding_file=dict( - extensions=None, - ), - ) + output_map = dict(encoding_file=dict(extensions=None)) outputs = FSL2MRTrix.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py index c7b9c19d24..ce6143503c 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py @@ -4,21 +4,11 @@ def test_FilterTracks_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debug=dict( - argstr="-debug", - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + debug=dict(argstr="-debug", position=1), + environ=dict(nohash=True, usedefault=True), exclude_file=dict( - argstr="-exclude %s", - extensions=None, - xor=["exclude_file", "exclude_spec"], + argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"] ), exclude_spec=dict( argstr="-exclude %s", @@ -27,16 +17,9 @@ def test_FilterTracks_inputs(): units="mm", xor=["exclude_file", "exclude_spec"], ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), include_file=dict( - argstr="-include %s", - extensions=None, - xor=["include_file", "include_spec"], + argstr="-include %s", extensions=None, xor=["include_file", "include_spec"] ), include_spec=dict( argstr="-include %s", @@ -45,16 +28,9 @@ def test_FilterTracks_inputs(): units="mm", xor=["include_file", "include_spec"], ), - invert=dict( - argstr="-invert", - ), - minimum_tract_length=dict( - argstr="-minlength %s", - units="mm", - ), - no_mask_interpolation=dict( - argstr="-nomaskinterp", - ), + invert=dict(argstr="-invert"), + minimum_tract_length=dict(argstr="-minlength %s", units="mm"), + no_mask_interpolation=dict(argstr="-nomaskinterp"), out_file=dict( argstr="%s", extensions=None, @@ -63,10 +39,7 @@ def test_FilterTracks_inputs(): name_template="%s_filt", position=-1, ), - quiet=dict( - argstr="-quiet", - position=1, - ), + quiet=dict(argstr="-quiet", position=1), ) inputs = FilterTracks.input_spec() @@ -76,11 +49,7 @@ def test_FilterTracks_inputs(): def test_FilterTracks_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = FilterTracks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py index 62132f795a..3efa6690b1 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py @@ -4,34 +4,13 @@ def test_FindShPeaks_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - directions_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - display_debug=dict( - argstr="-debug", - ), - display_info=dict( - argstr="-info", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - num_peaks=dict( - argstr="-num %s", - ), + args=dict(argstr="%s"), + directions_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + display_debug=dict(argstr="-debug"), + display_info=dict(argstr="-info"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + num_peaks=dict(argstr="-num %s"), out_file=dict( argstr="%s", extensions=None, @@ -41,20 +20,10 @@ def test_FindShPeaks_inputs(): name_template="%s_peak_dirs.mif", position=-1, ), - peak_directions=dict( - argstr="-direction %s", - sep=" ", - ), - peak_threshold=dict( - argstr="-threshold %s", - ), - peaks_image=dict( - argstr="-peaks %s", - extensions=None, - ), - quiet_display=dict( - argstr="-quiet", - ), + peak_directions=dict(argstr="-direction %s", sep=" "), + peak_threshold=dict(argstr="-threshold %s"), + peaks_image=dict(argstr="-peaks %s", extensions=None), + quiet_display=dict(argstr="-quiet"), ) inputs = FindShPeaks.input_spec() @@ -64,11 +33,7 @@ def test_FindShPeaks_inputs(): def test_FindShPeaks_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = FindShPeaks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py index 63b66ab1e6..128185da4e 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py @@ -4,27 +4,12 @@ def test_GenerateDirections_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - display_debug=dict( - argstr="-debug", - ), - display_info=dict( - argstr="-info", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - niter=dict( - argstr="-niter %s", - ), - num_dirs=dict( - argstr="%s", - mandatory=True, - position=-2, - ), + args=dict(argstr="%s"), + display_debug=dict(argstr="-debug"), + display_info=dict(argstr="-info"), + environ=dict(nohash=True, usedefault=True), + niter=dict(argstr="-niter %s"), + num_dirs=dict(argstr="%s", mandatory=True, position=-2), out_file=dict( argstr="%s", extensions=None, @@ -33,12 +18,8 @@ def test_GenerateDirections_inputs(): name_template="directions_%d.txt", position=-1, ), - power=dict( - argstr="-power %s", - ), - quiet_display=dict( - argstr="-quiet", - ), + power=dict(argstr="-power %s"), + quiet_display=dict(argstr="-quiet"), ) inputs = GenerateDirections.input_spec() @@ -48,11 +29,7 @@ def test_GenerateDirections_inputs(): def test_GenerateDirections_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = GenerateDirections.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py index 09b893f105..4c3717459c 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py @@ -4,39 +4,16 @@ def test_GenerateWhiteMatterMask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - binary_mask=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s"), + binary_mask=dict(argstr="%s", extensions=None, mandatory=True, position=-2), encoding_file=dict( - argstr="-grad %s", - extensions=None, - mandatory=True, - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - noise_level_margin=dict( - argstr="-margin %s", + argstr="-grad %s", extensions=None, mandatory=True, position=1 ), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + noise_level_margin=dict(argstr="-margin %s"), out_WMProb_filename=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, + argstr="%s", extensions=None, genfile=True, position=-1 ), ) inputs = GenerateWhiteMatterMask.input_spec() @@ -47,11 +24,7 @@ def test_GenerateWhiteMatterMask_inputs(): def test_GenerateWhiteMatterMask_outputs(): - output_map = dict( - WMprobabilitymap=dict( - extensions=None, - ), - ) + output_map = dict(WMprobabilitymap=dict(extensions=None)) outputs = GenerateWhiteMatterMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py index 7e819a66da..2daffeafec 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py @@ -4,69 +4,20 @@ def test_MRConvert_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - extension=dict( - position=2, - usedefault=True, - ), - extract_at_axis=dict( - argstr="-coord %s", - position=1, - ), - extract_at_coordinate=dict( - argstr="%s", - position=2, - sep=",", - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - layout=dict( - argstr="-output %s", - position=2, - ), - offset_bias=dict( - argstr="-scale %d", - position=3, - units="mm", - ), - out_filename=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - output_datatype=dict( - argstr="-output %s", - position=2, - ), - prs=dict( - argstr="-prs", - position=3, - ), - replace_NaN_with_zero=dict( - argstr="-zero", - position=3, - ), - resample=dict( - argstr="-scale %d", - position=3, - units="mm", - ), - voxel_dims=dict( - argstr="-vox %s", - position=3, - sep=",", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + extension=dict(position=2, usedefault=True), + extract_at_axis=dict(argstr="-coord %s", position=1), + extract_at_coordinate=dict(argstr="%s", position=2, sep=","), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + layout=dict(argstr="-output %s", position=2), + offset_bias=dict(argstr="-scale %d", position=3, units="mm"), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), + output_datatype=dict(argstr="-output %s", position=2), + prs=dict(argstr="-prs", position=3), + replace_NaN_with_zero=dict(argstr="-zero", position=3), + resample=dict(argstr="-scale %d", position=3, units="mm"), + voxel_dims=dict(argstr="-vox %s", position=3, sep=","), ) inputs = MRConvert.input_spec() @@ -76,11 +27,7 @@ def test_MRConvert_inputs(): def test_MRConvert_outputs(): - output_map = dict( - converted=dict( - extensions=None, - ), - ) + output_map = dict(converted=dict(extensions=None)) outputs = MRConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py index daa1231f06..1076605c1e 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py @@ -4,32 +4,12 @@ def test_MRMultiply_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debug=dict( - argstr="-debug", - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr="%s", - mandatory=True, - position=-2, - ), - out_filename=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict( - argstr="-quiet", - position=1, - ), + args=dict(argstr="%s"), + debug=dict(argstr="-debug", position=1), + environ=dict(nohash=True, usedefault=True), + in_files=dict(argstr="%s", mandatory=True, position=-2), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), + quiet=dict(argstr="-quiet", position=1), ) inputs = MRMultiply.input_spec() @@ -39,11 +19,7 @@ def test_MRMultiply_inputs(): def test_MRMultiply_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MRMultiply.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py index f619525575..4ec839b0c8 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py @@ -4,64 +4,19 @@ def test_MRTransform_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debug=dict( - argstr="-debug", - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - flip_x=dict( - argstr="-flipx", - position=1, - ), - in_files=dict( - argstr="%s", - mandatory=True, - position=-2, - ), - invert=dict( - argstr="-inverse", - position=1, - ), - linear_transform=dict( - argstr="-linear %s", - extensions=None, - position=1, - ), - out_filename=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict( - argstr="-quiet", - position=1, - ), - reference_image=dict( - argstr="-reference %s", - extensions=None, - position=1, - ), - replace_transform=dict( - argstr="-replace", - position=1, - ), - template_image=dict( - argstr="-template %s", - extensions=None, - position=1, - ), - transformation_file=dict( - argstr="-transform %s", - extensions=None, - position=1, - ), + args=dict(argstr="%s"), + debug=dict(argstr="-debug", position=1), + environ=dict(nohash=True, usedefault=True), + flip_x=dict(argstr="-flipx", position=1), + in_files=dict(argstr="%s", mandatory=True, position=-2), + invert=dict(argstr="-inverse", position=1), + linear_transform=dict(argstr="-linear %s", extensions=None, position=1), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), + quiet=dict(argstr="-quiet", position=1), + reference_image=dict(argstr="-reference %s", extensions=None, position=1), + replace_transform=dict(argstr="-replace", position=1), + template_image=dict(argstr="-template %s", extensions=None, position=1), + transformation_file=dict(argstr="-transform %s", extensions=None, position=1), ) inputs = MRTransform.input_spec() @@ -71,11 +26,7 @@ def test_MRTransform_inputs(): def test_MRTransform_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MRTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py index 0fb54a3020..d4393db863 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py @@ -4,24 +4,11 @@ def test_MRTrix2TrackVis_inputs(): input_map = dict( - image_file=dict( - extensions=None, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - matrix_file=dict( - extensions=None, - ), - out_filename=dict( - extensions=None, - genfile=True, - usedefault=True, - ), - registration_image_file=dict( - extensions=None, - ), + image_file=dict(extensions=None), + in_file=dict(extensions=None, mandatory=True), + matrix_file=dict(extensions=None), + out_filename=dict(extensions=None, genfile=True, usedefault=True), + registration_image_file=dict(extensions=None), ) inputs = MRTrix2TrackVis.input_spec() @@ -31,11 +18,7 @@ def test_MRTrix2TrackVis_inputs(): def test_MRTrix2TrackVis_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MRTrix2TrackVis.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py index eecdc39bf5..3b0e2413d9 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py @@ -4,19 +4,9 @@ def test_MRTrixInfo_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), ) inputs = MRTrixInfo.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py index 8eab033221..ad3dfb5bb3 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py @@ -4,26 +4,11 @@ def test_MRTrixViewer_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debug=dict( - argstr="-debug", - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_files=dict( - argstr="%s", - mandatory=True, - position=-2, - ), - quiet=dict( - argstr="-quiet", - position=1, - ), + args=dict(argstr="%s"), + debug=dict(argstr="-debug", position=1), + environ=dict(nohash=True, usedefault=True), + in_files=dict(argstr="%s", mandatory=True, position=-2), + quiet=dict(argstr="-quiet", position=1), ) inputs = MRTrixViewer.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py index 7a0974d9b8..634c7e63eb 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py @@ -4,33 +4,12 @@ def test_MedianFilter3D_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debug=dict( - argstr="-debug", - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - out_filename=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict( - argstr="-quiet", - position=1, - ), + args=dict(argstr="%s"), + debug=dict(argstr="-debug", position=1), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), + quiet=dict(argstr="-quiet", position=1), ) inputs = MedianFilter3D.input_spec() @@ -40,11 +19,7 @@ def test_MedianFilter3D_inputs(): def test_MedianFilter3D_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MedianFilter3D.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py index 8dded55576..dd1d9e6649 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py @@ -4,27 +4,13 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - cutoff_value=dict( - argstr="-cutoff %s", - units="NA", - ), - desired_number_of_tracks=dict( - argstr="-number %d", - ), - do_not_precompute=dict( - argstr="-noprecomputed", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + cutoff_value=dict(argstr="-cutoff %s", units="NA"), + desired_number_of_tracks=dict(argstr="-number %d"), + do_not_precompute=dict(argstr="-noprecomputed"), + environ=dict(nohash=True, usedefault=True), exclude_file=dict( - argstr="-exclude %s", - extensions=None, - xor=["exclude_file", "exclude_spec"], + argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"] ), exclude_spec=dict( argstr="-exclude %s", @@ -33,16 +19,9 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["exclude_file", "exclude_spec"], ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), include_file=dict( - argstr="-include %s", - extensions=None, - xor=["include_file", "include_spec"], + argstr="-include %s", extensions=None, xor=["include_file", "include_spec"] ), include_spec=dict( argstr="-include %s", @@ -51,23 +30,11 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["include_file", "include_spec"], ), - initial_cutoff_value=dict( - argstr="-initcutoff %s", - units="NA", - ), - initial_direction=dict( - argstr="-initdirection %s", - units="voxels", - ), - inputmodel=dict( - argstr="%s", - position=-3, - usedefault=True, - ), + initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA"), + initial_direction=dict(argstr="-initdirection %s", units="voxels"), + inputmodel=dict(argstr="%s", position=-3, usedefault=True), mask_file=dict( - argstr="-mask %s", - extensions=None, - xor=["mask_file", "mask_spec"], + argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"] ), mask_spec=dict( argstr="-mask %s", @@ -76,27 +43,12 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["mask_file", "mask_spec"], ), - maximum_number_of_tracks=dict( - argstr="-maxnum %d", - ), - maximum_number_of_trials=dict( - argstr="-trials %s", - ), - maximum_tract_length=dict( - argstr="-length %s", - units="mm", - ), - minimum_radius_of_curvature=dict( - argstr="-curvature %s", - units="mm", - ), - minimum_tract_length=dict( - argstr="-minlength %s", - units="mm", - ), - no_mask_interpolation=dict( - argstr="-nomaskinterp", - ), + maximum_number_of_tracks=dict(argstr="-maxnum %d"), + maximum_number_of_trials=dict(argstr="-trials %s"), + maximum_tract_length=dict(argstr="-length %s", units="mm"), + minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm"), + minimum_tract_length=dict(argstr="-minlength %s", units="mm"), + no_mask_interpolation=dict(argstr="-nomaskinterp"), out_file=dict( argstr="%s", extensions=None, @@ -106,9 +58,7 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): position=-1, ), seed_file=dict( - argstr="-seed %s", - extensions=None, - xor=["seed_file", "seed_spec"], + argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"] ), seed_spec=dict( argstr="-seed %s", @@ -117,16 +67,9 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["seed_file", "seed_spec"], ), - step_size=dict( - argstr="-step %s", - units="mm", - ), - stop=dict( - argstr="-stop", - ), - unidirectional=dict( - argstr="-unidirectional", - ), + step_size=dict(argstr="-step %s", units="mm"), + stop=dict(argstr="-stop"), + unidirectional=dict(argstr="-unidirectional"), ) inputs = ProbabilisticSphericallyDeconvolutedStreamlineTrack.input_spec() @@ -136,11 +79,7 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_outputs(): - output_map = dict( - tracked=dict( - extensions=None, - ), - ) + output_map = dict(tracked=dict(extensions=None)) outputs = ProbabilisticSphericallyDeconvolutedStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py index c717eb628b..8dfdd1047a 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py @@ -4,27 +4,13 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - cutoff_value=dict( - argstr="-cutoff %s", - units="NA", - ), - desired_number_of_tracks=dict( - argstr="-number %d", - ), - do_not_precompute=dict( - argstr="-noprecomputed", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + cutoff_value=dict(argstr="-cutoff %s", units="NA"), + desired_number_of_tracks=dict(argstr="-number %d"), + do_not_precompute=dict(argstr="-noprecomputed"), + environ=dict(nohash=True, usedefault=True), exclude_file=dict( - argstr="-exclude %s", - extensions=None, - xor=["exclude_file", "exclude_spec"], + argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"] ), exclude_spec=dict( argstr="-exclude %s", @@ -33,16 +19,9 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["exclude_file", "exclude_spec"], ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), include_file=dict( - argstr="-include %s", - extensions=None, - xor=["include_file", "include_spec"], + argstr="-include %s", extensions=None, xor=["include_file", "include_spec"] ), include_spec=dict( argstr="-include %s", @@ -51,23 +30,11 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["include_file", "include_spec"], ), - initial_cutoff_value=dict( - argstr="-initcutoff %s", - units="NA", - ), - initial_direction=dict( - argstr="-initdirection %s", - units="voxels", - ), - inputmodel=dict( - argstr="%s", - position=-3, - usedefault=True, - ), + initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA"), + initial_direction=dict(argstr="-initdirection %s", units="voxels"), + inputmodel=dict(argstr="%s", position=-3, usedefault=True), mask_file=dict( - argstr="-mask %s", - extensions=None, - xor=["mask_file", "mask_spec"], + argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"] ), mask_spec=dict( argstr="-mask %s", @@ -76,24 +43,11 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["mask_file", "mask_spec"], ), - maximum_number_of_tracks=dict( - argstr="-maxnum %d", - ), - maximum_tract_length=dict( - argstr="-length %s", - units="mm", - ), - minimum_radius_of_curvature=dict( - argstr="-curvature %s", - units="mm", - ), - minimum_tract_length=dict( - argstr="-minlength %s", - units="mm", - ), - no_mask_interpolation=dict( - argstr="-nomaskinterp", - ), + maximum_number_of_tracks=dict(argstr="-maxnum %d"), + maximum_tract_length=dict(argstr="-length %s", units="mm"), + minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm"), + minimum_tract_length=dict(argstr="-minlength %s", units="mm"), + no_mask_interpolation=dict(argstr="-nomaskinterp"), out_file=dict( argstr="%s", extensions=None, @@ -103,9 +57,7 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): position=-1, ), seed_file=dict( - argstr="-seed %s", - extensions=None, - xor=["seed_file", "seed_spec"], + argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"] ), seed_spec=dict( argstr="-seed %s", @@ -114,16 +66,9 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["seed_file", "seed_spec"], ), - step_size=dict( - argstr="-step %s", - units="mm", - ), - stop=dict( - argstr="-stop", - ), - unidirectional=dict( - argstr="-unidirectional", - ), + step_size=dict(argstr="-step %s", units="mm"), + stop=dict(argstr="-stop"), + unidirectional=dict(argstr="-unidirectional"), ) inputs = SphericallyDeconvolutedStreamlineTrack.input_spec() @@ -133,11 +78,7 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): def test_SphericallyDeconvolutedStreamlineTrack_outputs(): - output_map = dict( - tracked=dict( - extensions=None, - ), - ) + output_map = dict(tracked=dict(extensions=None)) outputs = SphericallyDeconvolutedStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py index d8f1a5c869..1b0ea2e187 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py @@ -4,27 +4,13 @@ def test_StreamlineTrack_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - cutoff_value=dict( - argstr="-cutoff %s", - units="NA", - ), - desired_number_of_tracks=dict( - argstr="-number %d", - ), - do_not_precompute=dict( - argstr="-noprecomputed", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + cutoff_value=dict(argstr="-cutoff %s", units="NA"), + desired_number_of_tracks=dict(argstr="-number %d"), + do_not_precompute=dict(argstr="-noprecomputed"), + environ=dict(nohash=True, usedefault=True), exclude_file=dict( - argstr="-exclude %s", - extensions=None, - xor=["exclude_file", "exclude_spec"], + argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"] ), exclude_spec=dict( argstr="-exclude %s", @@ -33,16 +19,9 @@ def test_StreamlineTrack_inputs(): units="mm", xor=["exclude_file", "exclude_spec"], ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), include_file=dict( - argstr="-include %s", - extensions=None, - xor=["include_file", "include_spec"], + argstr="-include %s", extensions=None, xor=["include_file", "include_spec"] ), include_spec=dict( argstr="-include %s", @@ -51,23 +30,11 @@ def test_StreamlineTrack_inputs(): units="mm", xor=["include_file", "include_spec"], ), - initial_cutoff_value=dict( - argstr="-initcutoff %s", - units="NA", - ), - initial_direction=dict( - argstr="-initdirection %s", - units="voxels", - ), - inputmodel=dict( - argstr="%s", - position=-3, - usedefault=True, - ), + initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA"), + initial_direction=dict(argstr="-initdirection %s", units="voxels"), + inputmodel=dict(argstr="%s", position=-3, usedefault=True), mask_file=dict( - argstr="-mask %s", - extensions=None, - xor=["mask_file", "mask_spec"], + argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"] ), mask_spec=dict( argstr="-mask %s", @@ -76,24 +43,11 @@ def test_StreamlineTrack_inputs(): units="mm", xor=["mask_file", "mask_spec"], ), - maximum_number_of_tracks=dict( - argstr="-maxnum %d", - ), - maximum_tract_length=dict( - argstr="-length %s", - units="mm", - ), - minimum_radius_of_curvature=dict( - argstr="-curvature %s", - units="mm", - ), - minimum_tract_length=dict( - argstr="-minlength %s", - units="mm", - ), - no_mask_interpolation=dict( - argstr="-nomaskinterp", - ), + maximum_number_of_tracks=dict(argstr="-maxnum %d"), + maximum_tract_length=dict(argstr="-length %s", units="mm"), + minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm"), + minimum_tract_length=dict(argstr="-minlength %s", units="mm"), + no_mask_interpolation=dict(argstr="-nomaskinterp"), out_file=dict( argstr="%s", extensions=None, @@ -103,9 +57,7 @@ def test_StreamlineTrack_inputs(): position=-1, ), seed_file=dict( - argstr="-seed %s", - extensions=None, - xor=["seed_file", "seed_spec"], + argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"] ), seed_spec=dict( argstr="-seed %s", @@ -114,16 +66,9 @@ def test_StreamlineTrack_inputs(): units="mm", xor=["seed_file", "seed_spec"], ), - step_size=dict( - argstr="-step %s", - units="mm", - ), - stop=dict( - argstr="-stop", - ), - unidirectional=dict( - argstr="-unidirectional", - ), + step_size=dict(argstr="-step %s", units="mm"), + stop=dict(argstr="-stop"), + unidirectional=dict(argstr="-unidirectional"), ) inputs = StreamlineTrack.input_spec() @@ -133,11 +78,7 @@ def test_StreamlineTrack_inputs(): def test_StreamlineTrack_outputs(): - output_map = dict( - tracked=dict( - extensions=None, - ), - ) + output_map = dict(tracked=dict(extensions=None)) outputs = StreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py index c32daa3574..985e9b1e41 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py @@ -4,33 +4,12 @@ def test_Tensor2ApparentDiffusion_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debug=dict( - argstr="-debug", - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - out_filename=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict( - argstr="-quiet", - position=1, - ), + args=dict(argstr="%s"), + debug=dict(argstr="-debug", position=1), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), + quiet=dict(argstr="-quiet", position=1), ) inputs = Tensor2ApparentDiffusion.input_spec() @@ -40,11 +19,7 @@ def test_Tensor2ApparentDiffusion_inputs(): def test_Tensor2ApparentDiffusion_outputs(): - output_map = dict( - ADC=dict( - extensions=None, - ), - ) + output_map = dict(ADC=dict(extensions=None)) outputs = Tensor2ApparentDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py index bf90806f74..d1b02c4ac2 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py @@ -4,33 +4,12 @@ def test_Tensor2FractionalAnisotropy_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debug=dict( - argstr="-debug", - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - out_filename=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict( - argstr="-quiet", - position=1, - ), + args=dict(argstr="%s"), + debug=dict(argstr="-debug", position=1), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), + quiet=dict(argstr="-quiet", position=1), ) inputs = Tensor2FractionalAnisotropy.input_spec() @@ -40,11 +19,7 @@ def test_Tensor2FractionalAnisotropy_inputs(): def test_Tensor2FractionalAnisotropy_outputs(): - output_map = dict( - FA=dict( - extensions=None, - ), - ) + output_map = dict(FA=dict(extensions=None)) outputs = Tensor2FractionalAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py index a87eefef7e..8ec90cbaa7 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py @@ -4,33 +4,12 @@ def test_Tensor2Vector_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debug=dict( - argstr="-debug", - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - out_filename=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - quiet=dict( - argstr="-quiet", - position=1, - ), + args=dict(argstr="%s"), + debug=dict(argstr="-debug", position=1), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), + quiet=dict(argstr="-quiet", position=1), ) inputs = Tensor2Vector.input_spec() @@ -40,11 +19,7 @@ def test_Tensor2Vector_inputs(): def test_Tensor2Vector_outputs(): - output_map = dict( - vector=dict( - extensions=None, - ), - ) + output_map = dict(vector=dict(extensions=None)) outputs = Tensor2Vector.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py index 0a67f4db56..59e0696956 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py @@ -4,47 +4,16 @@ def test_Threshold_inputs(): input_map = dict( - absolute_threshold_value=dict( - argstr="-abs %s", - ), - args=dict( - argstr="%s", - ), - debug=dict( - argstr="-debug", - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - invert=dict( - argstr="-invert", - position=1, - ), - out_filename=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - percentage_threshold_value=dict( - argstr="-percent %s", - ), - quiet=dict( - argstr="-quiet", - position=1, - ), - replace_zeros_with_NaN=dict( - argstr="-nan", - position=1, - ), + absolute_threshold_value=dict(argstr="-abs %s"), + args=dict(argstr="%s"), + debug=dict(argstr="-debug", position=1), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + invert=dict(argstr="-invert", position=1), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), + percentage_threshold_value=dict(argstr="-percent %s"), + quiet=dict(argstr="-quiet", position=1), + replace_zeros_with_NaN=dict(argstr="-nan", position=1), ) inputs = Threshold.input_spec() @@ -54,11 +23,7 @@ def test_Threshold_inputs(): def test_Threshold_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Threshold.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py index 9323ba74f2..c976790aaf 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py @@ -4,52 +4,16 @@ def test_Tracks2Prob_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - colour=dict( - argstr="-colour", - position=3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fraction=dict( - argstr="-fraction", - position=3, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - out_filename=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - output_datatype=dict( - argstr="-datatype %s", - position=2, - ), - resample=dict( - argstr="-resample %d", - position=3, - units="mm", - ), - template_file=dict( - argstr="-template %s", - extensions=None, - position=1, - ), - voxel_dims=dict( - argstr="-vox %s", - position=2, - sep=",", - ), + args=dict(argstr="%s"), + colour=dict(argstr="-colour", position=3), + environ=dict(nohash=True, usedefault=True), + fraction=dict(argstr="-fraction", position=3), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), + output_datatype=dict(argstr="-datatype %s", position=2), + resample=dict(argstr="-resample %d", position=3, units="mm"), + template_file=dict(argstr="-template %s", extensions=None, position=1), + voxel_dims=dict(argstr="-vox %s", position=2, sep=","), ) inputs = Tracks2Prob.input_spec() @@ -59,11 +23,7 @@ def test_Tracks2Prob_inputs(): def test_Tracks2Prob_outputs(): - output_map = dict( - tract_image=dict( - extensions=None, - ), - ) + output_map = dict(tract_image=dict(extensions=None)) outputs = Tracks2Prob.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py index a9334f5412..a54a60285a 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py @@ -4,25 +4,11 @@ def test_ACTPrepareFSL_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - usedefault=True, + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True ), ) inputs = ACTPrepareFSL.input_spec() @@ -33,11 +19,7 @@ def test_ACTPrepareFSL_inputs(): def test_ACTPrepareFSL_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ACTPrepareFSL.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py index ddf96a9c5f..635bc0c9ab 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py @@ -4,48 +4,17 @@ def test_BrainMask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bval_scale=dict( - argstr="-bvalue_scaling %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr="-grad %s", - extensions=None, - xor=["grad_fsl"], - ), - grad_fsl=dict( - argstr="-fslgrad %s %s", - xor=["grad_file"], - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - argstr="-fslgrad %s %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), + args=dict(argstr="%s"), + bval_scale=dict(argstr="-bvalue_scaling %s"), + environ=dict(nohash=True, usedefault=True), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + nthreads=dict(argstr="-nthreads %d", nohash=True), out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - usedefault=True, + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True ), ) inputs = BrainMask.input_spec() @@ -56,11 +25,7 @@ def test_BrainMask_inputs(): def test_BrainMask_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = BrainMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py index bfb0b1f0c9..b94051ab99 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py @@ -4,64 +4,23 @@ def test_BuildConnectome_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - in_parc=dict( - argstr="%s", - extensions=None, - position=-2, - ), - in_scalar=dict( - argstr="-image %s", - extensions=None, - ), - in_weights=dict( - argstr="-tck_weights_in %s", - extensions=None, - ), - keep_unassigned=dict( - argstr="-keep_unassigned", - ), - metric=dict( - argstr="-metric %s", - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + in_parc=dict(argstr="%s", extensions=None, position=-2), + in_scalar=dict(argstr="-image %s", extensions=None), + in_weights=dict(argstr="-tck_weights_in %s", extensions=None), + keep_unassigned=dict(argstr="-keep_unassigned"), + metric=dict(argstr="-metric %s"), + nthreads=dict(argstr="-nthreads %d", nohash=True), out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - usedefault=True, - ), - search_forward=dict( - argstr="-assignment_forward_search %f", - ), - search_radius=dict( - argstr="-assignment_radial_search %f", - ), - search_reverse=dict( - argstr="-assignment_reverse_search %f", - ), - vox_lookup=dict( - argstr="-assignment_voxel_lookup", - ), - zero_diagonal=dict( - argstr="-zero_diagonal", + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True ), + search_forward=dict(argstr="-assignment_forward_search %f"), + search_radius=dict(argstr="-assignment_radial_search %f"), + search_reverse=dict(argstr="-assignment_reverse_search %f"), + vox_lookup=dict(argstr="-assignment_voxel_lookup"), + zero_diagonal=dict(argstr="-zero_diagonal"), ) inputs = BuildConnectome.input_spec() @@ -71,11 +30,7 @@ def test_BuildConnectome_inputs(): def test_BuildConnectome_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = BuildConnectome.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py index ab1d984425..03ec89168e 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py @@ -4,82 +4,27 @@ def test_ComputeTDI_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - contrast=dict( - argstr="-constrast %s", - ), - data_type=dict( - argstr="-datatype %s", - ), - dixel=dict( - argstr="-dixel %s", - extensions=None, - ), - ends_only=dict( - argstr="-ends_only", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fwhm_tck=dict( - argstr="-fwhm_tck %f", - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - in_map=dict( - argstr="-image %s", - extensions=None, - ), - map_zero=dict( - argstr="-map_zero", - ), - max_tod=dict( - argstr="-tod %d", - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), - out_file=dict( - argstr="%s", - extensions=None, - position=-1, - usedefault=True, - ), - precise=dict( - argstr="-precise", - ), - reference=dict( - argstr="-template %s", - extensions=None, - ), - stat_tck=dict( - argstr="-stat_tck %s", - ), - stat_vox=dict( - argstr="-stat_vox %s", - ), - tck_weights=dict( - argstr="-tck_weights_in %s", - extensions=None, - ), - upsample=dict( - argstr="-upsample %d", - ), - use_dec=dict( - argstr="-dec", - ), - vox_size=dict( - argstr="-vox %s", - sep=",", - ), + args=dict(argstr="%s"), + contrast=dict(argstr="-constrast %s"), + data_type=dict(argstr="-datatype %s"), + dixel=dict(argstr="-dixel %s", extensions=None), + ends_only=dict(argstr="-ends_only"), + environ=dict(nohash=True, usedefault=True), + fwhm_tck=dict(argstr="-fwhm_tck %f"), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + in_map=dict(argstr="-image %s", extensions=None), + map_zero=dict(argstr="-map_zero"), + max_tod=dict(argstr="-tod %d"), + nthreads=dict(argstr="-nthreads %d", nohash=True), + out_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True), + precise=dict(argstr="-precise"), + reference=dict(argstr="-template %s", extensions=None), + stat_tck=dict(argstr="-stat_tck %s"), + stat_vox=dict(argstr="-stat_vox %s"), + tck_weights=dict(argstr="-tck_weights_in %s", extensions=None), + upsample=dict(argstr="-upsample %d"), + use_dec=dict(argstr="-dec"), + vox_size=dict(argstr="-vox %s", sep=","), ) inputs = ComputeTDI.input_spec() @@ -89,11 +34,7 @@ def test_ComputeTDI_inputs(): def test_ComputeTDI_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ComputeTDI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py b/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py index c395f0d1c8..ab85e9c8d4 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py @@ -4,100 +4,29 @@ def test_ConstrainedSphericalDeconvolution_inputs(): input_map = dict( - algorithm=dict( - argstr="%s", - mandatory=True, - position=-8, - ), - args=dict( - argstr="%s", - ), - bval_scale=dict( - argstr="-bvalue_scaling %s", - ), - csf_odf=dict( - argstr="%s", - extensions=None, - position=-1, - ), - csf_txt=dict( - argstr="%s", - extensions=None, - position=-2, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gm_odf=dict( - argstr="%s", - extensions=None, - position=-3, - ), - gm_txt=dict( - argstr="%s", - extensions=None, - position=-4, - ), - grad_file=dict( - argstr="-grad %s", - extensions=None, - xor=["grad_fsl"], - ), - grad_fsl=dict( - argstr="-fslgrad %s %s", - xor=["grad_file"], - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - argstr="-fslgrad %s %s", - extensions=None, - ), - in_dirs=dict( - argstr="-directions %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-7, - ), - mask_file=dict( - argstr="-mask %s", - extensions=None, - ), - max_sh=dict( - argstr="-lmax %s", - sep=",", - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), - predicted_signal=dict( - argstr="-predicted_signal %s", - extensions=None, - ), - shell=dict( - argstr="-shell %s", - sep=",", - ), + algorithm=dict(argstr="%s", mandatory=True, position=-8), + args=dict(argstr="%s"), + bval_scale=dict(argstr="-bvalue_scaling %s"), + csf_odf=dict(argstr="%s", extensions=None, position=-1), + csf_txt=dict(argstr="%s", extensions=None, position=-2), + environ=dict(nohash=True, usedefault=True), + gm_odf=dict(argstr="%s", extensions=None, position=-3), + gm_txt=dict(argstr="%s", extensions=None, position=-4), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_dirs=dict(argstr="-directions %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-7), + mask_file=dict(argstr="-mask %s", extensions=None), + max_sh=dict(argstr="-lmax %s", sep=","), + nthreads=dict(argstr="-nthreads %d", nohash=True), + predicted_signal=dict(argstr="-predicted_signal %s", extensions=None), + shell=dict(argstr="-shell %s", sep=","), wm_odf=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-5, - usedefault=True, - ), - wm_txt=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-6, + argstr="%s", extensions=None, mandatory=True, position=-5, usedefault=True ), + wm_txt=dict(argstr="%s", extensions=None, mandatory=True, position=-6), ) inputs = ConstrainedSphericalDeconvolution.input_spec() @@ -108,21 +37,10 @@ def test_ConstrainedSphericalDeconvolution_inputs(): def test_ConstrainedSphericalDeconvolution_outputs(): output_map = dict( - csf_odf=dict( - argstr="%s", - extensions=None, - ), - gm_odf=dict( - argstr="%s", - extensions=None, - ), - predicted_signal=dict( - extensions=None, - ), - wm_odf=dict( - argstr="%s", - extensions=None, - ), + csf_odf=dict(argstr="%s", extensions=None), + gm_odf=dict(argstr="%s", extensions=None), + predicted_signal=dict(extensions=None), + wm_odf=dict(argstr="%s", extensions=None), ) outputs = ConstrainedSphericalDeconvolution.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py index 0028748ab9..0a53b5c858 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py @@ -4,50 +4,17 @@ def test_DWIBiasCorrect_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bias=dict( - argstr="-bias %s", - extensions=None, - ), - bval_scale=dict( - argstr="-bvalue_scaling %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr="-grad %s", - extensions=None, - xor=["grad_fsl"], - ), - grad_fsl=dict( - argstr="-fslgrad %s %s", - xor=["grad_file"], - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - argstr="-fslgrad %s %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - in_mask=dict( - argstr="-mask %s", - extensions=None, - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), + args=dict(argstr="%s"), + bias=dict(argstr="-bias %s", extensions=None), + bval_scale=dict(argstr="-bvalue_scaling %s"), + environ=dict(nohash=True, usedefault=True), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + in_mask=dict(argstr="-mask %s", extensions=None), + nthreads=dict(argstr="-nthreads %d", nohash=True), out_file=dict( argstr="%s", extensions=None, @@ -57,18 +24,8 @@ def test_DWIBiasCorrect_inputs(): name_template="%s_biascorr", position=-1, ), - use_ants=dict( - argstr="ants", - mandatory=True, - position=0, - xor=["use_fsl"], - ), - use_fsl=dict( - argstr="fsl", - mandatory=True, - position=0, - xor=["use_ants"], - ), + use_ants=dict(argstr="ants", mandatory=True, position=0, xor=["use_fsl"]), + use_fsl=dict(argstr="fsl", mandatory=True, position=0, xor=["use_ants"]), ) inputs = DWIBiasCorrect.input_spec() @@ -78,14 +35,7 @@ def test_DWIBiasCorrect_inputs(): def test_DWIBiasCorrect_outputs(): - output_map = dict( - bias=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - ) + output_map = dict(bias=dict(extensions=None), out_file=dict(extensions=None)) outputs = DWIBiasCorrect.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py index efa722c81d..4c7dd7d8d6 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py @@ -4,46 +4,16 @@ def test_DWIDenoise_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bval_scale=dict( - argstr="-bvalue_scaling %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - extent=dict( - argstr="-extent %d,%d,%d", - ), - grad_file=dict( - argstr="-grad %s", - extensions=None, - xor=["grad_fsl"], - ), - grad_fsl=dict( - argstr="-fslgrad %s %s", - xor=["grad_file"], - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - argstr="-fslgrad %s %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - position=1, - ), + args=dict(argstr="%s"), + bval_scale=dict(argstr="-bvalue_scaling %s"), + environ=dict(nohash=True, usedefault=True), + extent=dict(argstr="-extent %d,%d,%d"), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + mask=dict(argstr="-mask %s", extensions=None, position=1), noise=dict( argstr="-noise %s", extensions=None, @@ -51,10 +21,7 @@ def test_DWIDenoise_inputs(): name_source="in_file", name_template="%s_noise", ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), + nthreads=dict(argstr="-nthreads %d", nohash=True), out_file=dict( argstr="%s", extensions=None, @@ -72,14 +39,7 @@ def test_DWIDenoise_inputs(): def test_DWIDenoise_outputs(): - output_map = dict( - noise=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - ) + output_map = dict(noise=dict(extensions=None), out_file=dict(extensions=None)) outputs = DWIDenoise.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py index 379e67d397..e6a17f277e 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py @@ -4,61 +4,20 @@ def test_DWIExtract_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bval_scale=dict( - argstr="-bvalue_scaling %s", - ), - bzero=dict( - argstr="-bzero", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr="-grad %s", - extensions=None, - xor=["grad_fsl"], - ), - grad_fsl=dict( - argstr="-fslgrad %s %s", - xor=["grad_file"], - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - argstr="-fslgrad %s %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - nobzero=dict( - argstr="-no_bzero", - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), - out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), - shell=dict( - argstr="-shell %s", - sep=",", - ), - singleshell=dict( - argstr="-singleshell", - ), + args=dict(argstr="%s"), + bval_scale=dict(argstr="-bvalue_scaling %s"), + bzero=dict(argstr="-bzero"), + environ=dict(nohash=True, usedefault=True), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + nobzero=dict(argstr="-no_bzero"), + nthreads=dict(argstr="-nthreads %d", nohash=True), + out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + shell=dict(argstr="-shell %s", sep=","), + singleshell=dict(argstr="-singleshell"), ) inputs = DWIExtract.input_spec() @@ -68,11 +27,7 @@ def test_DWIExtract_inputs(): def test_DWIExtract_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = DWIExtract.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py index 7f226fe3cd..76fae6548f 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py @@ -4,90 +4,34 @@ def test_DWIPreproc_inputs(): input_map = dict( - align_seepi=dict( - argstr="-align_seepi", - ), - args=dict( - argstr="%s", - ), - bval_scale=dict( - argstr="-bvalue_scaling %s", - ), - eddy_options=dict( - argstr='-eddy_options "%s"', - ), - environ=dict( - nohash=True, - usedefault=True, - ), - export_grad_fsl=dict( - argstr="-export_grad_fsl", - ), - export_grad_mrtrix=dict( - argstr="-export_grad_mrtrix", - ), - grad_file=dict( - argstr="-grad %s", - extensions=None, - xor=["grad_fsl"], - ), - grad_fsl=dict( - argstr="-fslgrad %s %s", - xor=["grad_file"], - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - argstr="-fslgrad %s %s", - extensions=None, - ), - in_epi=dict( - argstr="-se_epi %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), + align_seepi=dict(argstr="-align_seepi"), + args=dict(argstr="%s"), + bval_scale=dict(argstr="-bvalue_scaling %s"), + eddy_options=dict(argstr='-eddy_options "%s"'), + environ=dict(nohash=True, usedefault=True), + export_grad_fsl=dict(argstr="-export_grad_fsl"), + export_grad_mrtrix=dict(argstr="-export_grad_mrtrix"), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_epi=dict(argstr="-se_epi %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + nthreads=dict(argstr="-nthreads %d", nohash=True), out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - usedefault=True, - ), - out_grad_fsl=dict( - argstr="%s, %s", - requires=["export_grad_fsl"], + argstr="%s", extensions=None, mandatory=True, position=1, usedefault=True ), + out_grad_fsl=dict(argstr="%s, %s", requires=["export_grad_fsl"]), out_grad_mrtrix=dict( argstr="%s", extensions=None, requires=["export_grad_mrtrix"], usedefault=True, ), - pe_dir=dict( - argstr="-pe_dir %s", - mandatory=True, - ), - ro_time=dict( - argstr="-readout_time %f", - ), - rpe_options=dict( - argstr="-rpe_%s", - mandatory=True, - position=2, - ), - topup_options=dict( - argstr='-topup_options "%s"', - ), + pe_dir=dict(argstr="-pe_dir %s", mandatory=True), + ro_time=dict(argstr="-readout_time %f"), + rpe_options=dict(argstr="-rpe_%s", mandatory=True, position=2), + topup_options=dict(argstr='-topup_options "%s"'), ) inputs = DWIPreproc.input_spec() @@ -98,25 +42,10 @@ def test_DWIPreproc_inputs(): def test_DWIPreproc_outputs(): output_map = dict( - out_file=dict( - argstr="%s", - extensions=None, - ), - out_fsl_bval=dict( - argstr="%s", - extensions=None, - usedefault=True, - ), - out_fsl_bvec=dict( - argstr="%s", - extensions=None, - usedefault=True, - ), - out_grad_mrtrix=dict( - argstr="%s", - extensions=None, - usedefault=True, - ), + out_file=dict(argstr="%s", extensions=None), + out_fsl_bval=dict(argstr="%s", extensions=None, usedefault=True), + out_fsl_bvec=dict(argstr="%s", extensions=None, usedefault=True), + out_grad_mrtrix=dict(argstr="%s", extensions=None, usedefault=True), ) outputs = DWIPreproc.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py index 2d15207571..270e131a04 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py @@ -4,103 +4,29 @@ def test_EstimateFOD_inputs(): input_map = dict( - algorithm=dict( - argstr="%s", - mandatory=True, - position=-8, - ), - args=dict( - argstr="%s", - ), - bval_scale=dict( - argstr="-bvalue_scaling %s", - ), - csf_odf=dict( - argstr="%s", - extensions=None, - position=-1, - usedefault=True, - ), - csf_txt=dict( - argstr="%s", - extensions=None, - position=-2, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gm_odf=dict( - argstr="%s", - extensions=None, - position=-3, - usedefault=True, - ), - gm_txt=dict( - argstr="%s", - extensions=None, - position=-4, - ), - grad_file=dict( - argstr="-grad %s", - extensions=None, - xor=["grad_fsl"], - ), - grad_fsl=dict( - argstr="-fslgrad %s %s", - xor=["grad_file"], - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - argstr="-fslgrad %s %s", - extensions=None, - ), - in_dirs=dict( - argstr="-directions %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-7, - ), - mask_file=dict( - argstr="-mask %s", - extensions=None, - ), - max_sh=dict( - argstr="-lmax %s", - sep=",", - usedefault=True, - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), - predicted_signal=dict( - argstr="-predicted_signal %s", - extensions=None, - ), - shell=dict( - argstr="-shell %s", - sep=",", - ), + algorithm=dict(argstr="%s", mandatory=True, position=-8), + args=dict(argstr="%s"), + bval_scale=dict(argstr="-bvalue_scaling %s"), + csf_odf=dict(argstr="%s", extensions=None, position=-1, usedefault=True), + csf_txt=dict(argstr="%s", extensions=None, position=-2), + environ=dict(nohash=True, usedefault=True), + gm_odf=dict(argstr="%s", extensions=None, position=-3, usedefault=True), + gm_txt=dict(argstr="%s", extensions=None, position=-4), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_dirs=dict(argstr="-directions %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-7), + mask_file=dict(argstr="-mask %s", extensions=None), + max_sh=dict(argstr="-lmax %s", sep=",", usedefault=True), + nthreads=dict(argstr="-nthreads %d", nohash=True), + predicted_signal=dict(argstr="-predicted_signal %s", extensions=None), + shell=dict(argstr="-shell %s", sep=","), wm_odf=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-5, - usedefault=True, - ), - wm_txt=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-6, + argstr="%s", extensions=None, mandatory=True, position=-5, usedefault=True ), + wm_txt=dict(argstr="%s", extensions=None, mandatory=True, position=-6), ) inputs = EstimateFOD.input_spec() @@ -111,21 +37,10 @@ def test_EstimateFOD_inputs(): def test_EstimateFOD_outputs(): output_map = dict( - csf_odf=dict( - argstr="%s", - extensions=None, - ), - gm_odf=dict( - argstr="%s", - extensions=None, - ), - predicted_signal=dict( - extensions=None, - ), - wm_odf=dict( - argstr="%s", - extensions=None, - ), + csf_odf=dict(argstr="%s", extensions=None), + gm_odf=dict(argstr="%s", extensions=None), + predicted_signal=dict(extensions=None), + wm_odf=dict(argstr="%s", extensions=None), ) outputs = EstimateFOD.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py index 7cf38faf8c..b296614696 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py @@ -4,64 +4,22 @@ def test_FitTensor_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bval_scale=dict( - argstr="-bvalue_scaling %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr="-grad %s", - extensions=None, - xor=["grad_fsl"], - ), - grad_fsl=dict( - argstr="-fslgrad %s %s", - xor=["grad_file"], - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - argstr="-fslgrad %s %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - in_mask=dict( - argstr="-mask %s", - extensions=None, - ), - method=dict( - argstr="-method %s", - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), + args=dict(argstr="%s"), + bval_scale=dict(argstr="-bvalue_scaling %s"), + environ=dict(nohash=True, usedefault=True), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + in_mask=dict(argstr="-mask %s", extensions=None), + method=dict(argstr="-method %s"), + nthreads=dict(argstr="-nthreads %d", nohash=True), out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - usedefault=True, - ), - predicted_signal=dict( - argstr="-predicted_signal %s", - extensions=None, - ), - reg_term=dict( - argstr="-regularisation %f", - max_ver="0.3.13", + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True ), + predicted_signal=dict(argstr="-predicted_signal %s", extensions=None), + reg_term=dict(argstr="-regularisation %f", max_ver="0.3.13"), ) inputs = FitTensor.input_spec() @@ -72,12 +30,7 @@ def test_FitTensor_inputs(): def test_FitTensor_outputs(): output_map = dict( - out_file=dict( - extensions=None, - ), - predicted_signal=dict( - extensions=None, - ), + out_file=dict(extensions=None), predicted_signal=dict(extensions=None) ) outputs = FitTensor.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py index 1b135a5917..1e4fef12be 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py @@ -4,53 +4,17 @@ def test_Generate5tt_inputs(): input_map = dict( - algorithm=dict( - argstr="%s", - mandatory=True, - position=-3, - ), - args=dict( - argstr="%s", - ), - bval_scale=dict( - argstr="-bvalue_scaling %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr="-grad %s", - extensions=None, - xor=["grad_fsl"], - ), - grad_fsl=dict( - argstr="-fslgrad %s %s", - xor=["grad_file"], - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - argstr="-fslgrad %s %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), - out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), + algorithm=dict(argstr="%s", mandatory=True, position=-3), + args=dict(argstr="%s"), + bval_scale=dict(argstr="-bvalue_scaling %s"), + environ=dict(nohash=True, usedefault=True), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + nthreads=dict(argstr="-nthreads %d", nohash=True), + out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), ) inputs = Generate5tt.input_spec() @@ -60,11 +24,7 @@ def test_Generate5tt_inputs(): def test_Generate5tt_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Generate5tt.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py index 15116f9bb8..d505cdfb4f 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py @@ -4,55 +4,19 @@ def test_LabelConfig_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_config=dict( - argstr="%s", - extensions=None, - position=-2, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - lut_aal=dict( - argstr="-lut_aal %s", - extensions=None, - ), - lut_basic=dict( - argstr="-lut_basic %s", - extensions=None, - ), - lut_fs=dict( - argstr="-lut_freesurfer %s", - extensions=None, - ), - lut_itksnap=dict( - argstr="-lut_itksnap %s", - extensions=None, - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_config=dict(argstr="%s", extensions=None, position=-2), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + lut_aal=dict(argstr="-lut_aal %s", extensions=None), + lut_basic=dict(argstr="-lut_basic %s", extensions=None), + lut_fs=dict(argstr="-lut_freesurfer %s", extensions=None), + lut_itksnap=dict(argstr="-lut_itksnap %s", extensions=None), + nthreads=dict(argstr="-nthreads %d", nohash=True), out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - usedefault=True, - ), - spine=dict( - argstr="-spine %s", - extensions=None, + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True ), + spine=dict(argstr="-spine %s", extensions=None), ) inputs = LabelConfig.input_spec() @@ -62,11 +26,7 @@ def test_LabelConfig_inputs(): def test_LabelConfig_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = LabelConfig.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py index 98512ac317..554fb4d374 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py @@ -4,45 +4,16 @@ def test_LabelConvert_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_config=dict( - argstr="%s", - extensions=None, - position=-2, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-4, - ), - in_lut=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - num_threads=dict( - argstr="-nthreads %d", - nohash=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_config=dict(argstr="%s", extensions=None, position=-2), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4), + in_lut=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + num_threads=dict(argstr="-nthreads %d", nohash=True), out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - usedefault=True, - ), - spine=dict( - argstr="-spine %s", - extensions=None, + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True ), + spine=dict(argstr="-spine %s", extensions=None), ) inputs = LabelConvert.input_spec() @@ -52,11 +23,7 @@ def test_LabelConvert_inputs(): def test_LabelConvert_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = LabelConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py index 578ffb9b1a..f23f1a599b 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py @@ -4,65 +4,22 @@ def test_MRConvert_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - axes=dict( - argstr="-axes %s", - sep=",", - ), - bval_scale=dict( - argstr="-bvalue_scaling %s", - ), - coord=dict( - argstr="-coord %s", - sep=" ", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr="-grad %s", - extensions=None, - xor=["grad_fsl"], - ), - grad_fsl=dict( - argstr="-fslgrad %s %s", - xor=["grad_file"], - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - argstr="-fslgrad %s %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), + args=dict(argstr="%s"), + axes=dict(argstr="-axes %s", sep=","), + bval_scale=dict(argstr="-bvalue_scaling %s"), + coord=dict(argstr="-coord %s", sep=" "), + environ=dict(nohash=True, usedefault=True), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + nthreads=dict(argstr="-nthreads %d", nohash=True), out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - usedefault=True, - ), - scaling=dict( - argstr="-scaling %s", - sep=",", - ), - vox=dict( - argstr="-vox %s", - sep=",", + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True ), + scaling=dict(argstr="-scaling %s", sep=","), + vox=dict(argstr="-vox %s", sep=","), ) inputs = MRConvert.input_spec() @@ -72,11 +29,7 @@ def test_MRConvert_inputs(): def test_MRConvert_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MRConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py index f1ef52ab88..042b1a745a 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py @@ -4,61 +4,19 @@ def test_MRDeGibbs_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - axes=dict( - argstr="-axes %s", - maxlen=2, - minlen=2, - sep=",", - usedefault=True, - ), - bval_scale=dict( - argstr="-bvalue_scaling %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr="-grad %s", - extensions=None, - xor=["grad_fsl"], - ), - grad_fsl=dict( - argstr="-fslgrad %s %s", - xor=["grad_file"], - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - argstr="-fslgrad %s %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - maxW=dict( - argstr="-maxW %d", - usedefault=True, - ), - minW=dict( - argstr="-minW %d", - usedefault=True, - ), - nshifts=dict( - argstr="-nshifts %d", - usedefault=True, - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), + args=dict(argstr="%s"), + axes=dict(argstr="-axes %s", maxlen=2, minlen=2, sep=",", usedefault=True), + bval_scale=dict(argstr="-bvalue_scaling %s"), + environ=dict(nohash=True, usedefault=True), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + maxW=dict(argstr="-maxW %d", usedefault=True), + minW=dict(argstr="-minW %d", usedefault=True), + nshifts=dict(argstr="-nshifts %d", usedefault=True), + nthreads=dict(argstr="-nthreads %d", nohash=True), out_file=dict( argstr="%s", extensions=None, @@ -76,11 +34,7 @@ def test_MRDeGibbs_inputs(): def test_MRDeGibbs_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MRDeGibbs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py index 6446b2ceda..0962705f36 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py @@ -4,56 +4,18 @@ def test_MRMath_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - axis=dict( - argstr="-axis %d", - ), - bval_scale=dict( - argstr="-bvalue_scaling %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr="-grad %s", - extensions=None, - xor=["grad_fsl"], - ), - grad_fsl=dict( - argstr="-fslgrad %s %s", - xor=["grad_file"], - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - argstr="-fslgrad %s %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), - operation=dict( - argstr="%s", - mandatory=True, - position=-2, - ), - out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), + args=dict(argstr="%s"), + axis=dict(argstr="-axis %d"), + bval_scale=dict(argstr="-bvalue_scaling %s"), + environ=dict(nohash=True, usedefault=True), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + nthreads=dict(argstr="-nthreads %d", nohash=True), + operation=dict(argstr="%s", mandatory=True, position=-2), + out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), ) inputs = MRMath.input_spec() @@ -63,11 +25,7 @@ def test_MRMath_inputs(): def test_MRMath_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MRMath.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py index ee982c9561..2415b4d9dd 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py @@ -4,51 +4,19 @@ def test_MRResize_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bval_scale=dict( - argstr="-bvalue_scaling %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr="-grad %s", - extensions=None, - xor=["grad_fsl"], - ), - grad_fsl=dict( - argstr="-fslgrad %s %s", - xor=["grad_file"], - ), + args=dict(argstr="%s"), + bval_scale=dict(argstr="-bvalue_scaling %s"), + environ=dict(nohash=True, usedefault=True), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), image_size=dict( - argstr="-size %d,%d,%d", - mandatory=True, - xor=["voxel_size", "scale_factor"], - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - argstr="-fslgrad %s %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - interpolation=dict( - argstr="-interp %s", - usedefault=True, - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, + argstr="-size %d,%d,%d", mandatory=True, xor=["voxel_size", "scale_factor"] ), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + interpolation=dict(argstr="-interp %s", usedefault=True), + nthreads=dict(argstr="-nthreads %d", nohash=True), out_file=dict( argstr="%s", extensions=None, @@ -58,14 +26,10 @@ def test_MRResize_inputs(): position=-1, ), scale_factor=dict( - argstr="-scale %g,%g,%g", - mandatory=True, - xor=["image_size", "voxel_size"], + argstr="-scale %g,%g,%g", mandatory=True, xor=["image_size", "voxel_size"] ), voxel_size=dict( - argstr="-voxel %g,%g,%g", - mandatory=True, - xor=["image_size", "scale_factor"], + argstr="-voxel %g,%g,%g", mandatory=True, xor=["image_size", "scale_factor"] ), ) inputs = MRResize.input_spec() @@ -76,11 +40,7 @@ def test_MRResize_inputs(): def test_MRResize_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MRResize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py index 7689f14f11..6ece1ac2a4 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py @@ -3,15 +3,7 @@ def test_MRTrix3Base_inputs(): - input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ) + input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) inputs = MRTrix3Base.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py index 0fd63be8af..720f4b12ad 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py @@ -4,36 +4,14 @@ def test_Mesh2PVE_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - in_first=dict( - argstr="-first %s", - extensions=None, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + in_first=dict(argstr="-first %s", extensions=None), out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - usedefault=True, - ), - reference=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True ), + reference=dict(argstr="%s", extensions=None, mandatory=True, position=-2), ) inputs = Mesh2PVE.input_spec() @@ -43,11 +21,7 @@ def test_Mesh2PVE_inputs(): def test_Mesh2PVE_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Mesh2PVE.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py index ef3053cede..3058252e62 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py @@ -4,36 +4,13 @@ def test_ReplaceFSwithFIRST_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_config=dict( - argstr="%s", - extensions=None, - position=-2, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-4, - ), - in_t1w=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_config=dict(argstr="%s", extensions=None, position=-2), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4), + in_t1w=dict(argstr="%s", extensions=None, mandatory=True, position=-3), out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - usedefault=True, + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True ), ) inputs = ReplaceFSwithFIRST.input_spec() @@ -44,11 +21,7 @@ def test_ReplaceFSwithFIRST_inputs(): def test_ReplaceFSwithFIRST_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ReplaceFSwithFIRST.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py index f7e556f466..15bd5704f3 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py @@ -4,76 +4,22 @@ def test_ResponseSD_inputs(): input_map = dict( - algorithm=dict( - argstr="%s", - mandatory=True, - position=1, - ), - args=dict( - argstr="%s", - ), - bval_scale=dict( - argstr="-bvalue_scaling %s", - ), - csf_file=dict( - argstr="%s", - extensions=None, - position=-1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gm_file=dict( - argstr="%s", - extensions=None, - position=-2, - ), - grad_file=dict( - argstr="-grad %s", - extensions=None, - xor=["grad_fsl"], - ), - grad_fsl=dict( - argstr="-fslgrad %s %s", - xor=["grad_file"], - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - argstr="-fslgrad %s %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-5, - ), - in_mask=dict( - argstr="-mask %s", - extensions=None, - ), - max_sh=dict( - argstr="-lmax %s", - sep=",", - ), - mtt_file=dict( - argstr="%s", - extensions=None, - position=-4, - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), - wm_file=dict( - argstr="%s", - extensions=None, - position=-3, - usedefault=True, - ), + algorithm=dict(argstr="%s", mandatory=True, position=1), + args=dict(argstr="%s"), + bval_scale=dict(argstr="-bvalue_scaling %s"), + csf_file=dict(argstr="%s", extensions=None, position=-1), + environ=dict(nohash=True, usedefault=True), + gm_file=dict(argstr="%s", extensions=None, position=-2), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-5), + in_mask=dict(argstr="-mask %s", extensions=None), + max_sh=dict(argstr="-lmax %s", sep=","), + mtt_file=dict(argstr="%s", extensions=None, position=-4), + nthreads=dict(argstr="-nthreads %d", nohash=True), + wm_file=dict(argstr="%s", extensions=None, position=-3, usedefault=True), ) inputs = ResponseSD.input_spec() @@ -84,18 +30,9 @@ def test_ResponseSD_inputs(): def test_ResponseSD_outputs(): output_map = dict( - csf_file=dict( - argstr="%s", - extensions=None, - ), - gm_file=dict( - argstr="%s", - extensions=None, - ), - wm_file=dict( - argstr="%s", - extensions=None, - ), + csf_file=dict(argstr="%s", extensions=None), + gm_file=dict(argstr="%s", extensions=None), + wm_file=dict(argstr="%s", extensions=None), ) outputs = ResponseSD.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py b/nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py index ab75fc1f8a..9079e3c128 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py @@ -4,28 +4,11 @@ def test_SH2Amp_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - directions=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), - nonnegative=dict( - argstr="-nonnegative", - ), + args=dict(argstr="%s"), + directions=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + nonnegative=dict(argstr="-nonnegative"), out_file=dict( argstr="%s", extensions=None, @@ -43,11 +26,7 @@ def test_SH2Amp_inputs(): def test_SH2Amp_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SH2Amp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py b/nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py index 992e6984a8..4bcc4d8155 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py @@ -4,19 +4,9 @@ def test_SHConv_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-3, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), out_file=dict( argstr="%s", extensions=None, @@ -25,12 +15,7 @@ def test_SHConv_inputs(): position=-1, usedefault=True, ), - response=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), + response=dict(argstr="%s", extensions=None, mandatory=True, position=-2), ) inputs = SHConv.input_spec() @@ -40,11 +25,7 @@ def test_SHConv_inputs(): def test_SHConv_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SHConv.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py index 215dafedc0..f47f46adbe 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py @@ -4,37 +4,13 @@ def test_TCK2VTK_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), - out_file=dict( - argstr="%s", - extensions=None, - position=-1, - usedefault=True, - ), - reference=dict( - argstr="-image %s", - extensions=None, - ), - voxel=dict( - argstr="-image %s", - extensions=None, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + nthreads=dict(argstr="-nthreads %d", nohash=True), + out_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True), + reference=dict(argstr="-image %s", extensions=None), + voxel=dict(argstr="-image %s", extensions=None), ) inputs = TCK2VTK.input_spec() @@ -44,11 +20,7 @@ def test_TCK2VTK_inputs(): def test_TCK2VTK_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TCK2VTK.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py index b23813aaf4..a91f571f35 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py @@ -4,47 +4,16 @@ def test_TensorMetrics_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - component=dict( - argstr="-num %s", - sep=",", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - ), - in_mask=dict( - argstr="-mask %s", - extensions=None, - ), - modulate=dict( - argstr="-modulate %s", - ), - out_adc=dict( - argstr="-adc %s", - extensions=None, - ), - out_eval=dict( - argstr="-value %s", - extensions=None, - ), - out_evec=dict( - argstr="-vector %s", - extensions=None, - ), - out_fa=dict( - argstr="-fa %s", - extensions=None, - ), + args=dict(argstr="%s"), + component=dict(argstr="-num %s", sep=",", usedefault=True), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + in_mask=dict(argstr="-mask %s", extensions=None), + modulate=dict(argstr="-modulate %s"), + out_adc=dict(argstr="-adc %s", extensions=None), + out_eval=dict(argstr="-value %s", extensions=None), + out_evec=dict(argstr="-vector %s", extensions=None), + out_fa=dict(argstr="-fa %s", extensions=None), ) inputs = TensorMetrics.input_spec() @@ -55,18 +24,10 @@ def test_TensorMetrics_inputs(): def test_TensorMetrics_outputs(): output_map = dict( - out_adc=dict( - extensions=None, - ), - out_eval=dict( - extensions=None, - ), - out_evec=dict( - extensions=None, - ), - out_fa=dict( - extensions=None, - ), + out_adc=dict(extensions=None), + out_eval=dict(extensions=None), + out_evec=dict(extensions=None), + out_fa=dict(extensions=None), ) outputs = TensorMetrics.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py index 2a70fe09f6..08926ebe48 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py @@ -4,168 +4,59 @@ def test_Tractography_inputs(): input_map = dict( - act_file=dict( - argstr="-act %s", - extensions=None, - ), - algorithm=dict( - argstr="-algorithm %s", - usedefault=True, - ), - angle=dict( - argstr="-angle %f", - ), - args=dict( - argstr="%s", - ), - backtrack=dict( - argstr="-backtrack", - ), - bval_scale=dict( - argstr="-bvalue_scaling %s", - ), - crop_at_gmwmi=dict( - argstr="-crop_at_gmwmi", - ), - cutoff=dict( - argstr="-cutoff %f", - ), - cutoff_init=dict( - argstr="-initcutoff %f", - ), - downsample=dict( - argstr="-downsample %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - grad_file=dict( - argstr="-grad %s", - extensions=None, - xor=["grad_fsl"], - ), - grad_fsl=dict( - argstr="-fslgrad %s %s", - xor=["grad_file"], - ), - in_bval=dict( - extensions=None, - ), - in_bvec=dict( - argstr="-fslgrad %s %s", - extensions=None, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-2, - ), - init_dir=dict( - argstr="-initdirection %f,%f,%f", - ), - max_length=dict( - argstr="-maxlength %f", - ), - max_seed_attempts=dict( - argstr="-max_seed_attempts %d", - ), - max_tracks=dict( - argstr="-maxnum %d", - ), - min_length=dict( - argstr="-minlength %f", - ), - n_samples=dict( - argstr="-samples %d", - usedefault=True, - ), - n_tracks=dict( - argstr="-number %d", - max_ver="0.4", - ), - n_trials=dict( - argstr="-trials %d", - ), - noprecompt=dict( - argstr="-noprecomputed", - ), - nthreads=dict( - argstr="-nthreads %d", - nohash=True, - ), + act_file=dict(argstr="-act %s", extensions=None), + algorithm=dict(argstr="-algorithm %s", usedefault=True), + angle=dict(argstr="-angle %f"), + args=dict(argstr="%s"), + backtrack=dict(argstr="-backtrack"), + bval_scale=dict(argstr="-bvalue_scaling %s"), + crop_at_gmwmi=dict(argstr="-crop_at_gmwmi"), + cutoff=dict(argstr="-cutoff %f"), + cutoff_init=dict(argstr="-initcutoff %f"), + downsample=dict(argstr="-downsample %f"), + environ=dict(nohash=True, usedefault=True), + grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), + grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + in_bval=dict(extensions=None), + in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + init_dir=dict(argstr="-initdirection %f,%f,%f"), + max_length=dict(argstr="-maxlength %f"), + max_seed_attempts=dict(argstr="-max_seed_attempts %d"), + max_tracks=dict(argstr="-maxnum %d"), + min_length=dict(argstr="-minlength %f"), + n_samples=dict(argstr="-samples %d", usedefault=True), + n_tracks=dict(argstr="-number %d", max_ver="0.4"), + n_trials=dict(argstr="-trials %d"), + noprecompt=dict(argstr="-noprecomputed"), + nthreads=dict(argstr="-nthreads %d", nohash=True), out_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=-1, - usedefault=True, - ), - out_seeds=dict( - argstr="-output_seeds %s", - extensions=None, - usedefault=True, - ), - power=dict( - argstr="-power %d", - ), - roi_excl=dict( - argstr="-exclude %s", - ), - roi_incl=dict( - argstr="-include %s", - ), - roi_mask=dict( - argstr="-mask %s", - ), - seed_dynamic=dict( - argstr="-seed_dynamic %s", - extensions=None, - ), + argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True + ), + out_seeds=dict(argstr="-output_seeds %s", extensions=None, usedefault=True), + power=dict(argstr="-power %d"), + roi_excl=dict(argstr="-exclude %s"), + roi_incl=dict(argstr="-include %s"), + roi_mask=dict(argstr="-mask %s"), + seed_dynamic=dict(argstr="-seed_dynamic %s", extensions=None), seed_gmwmi=dict( - argstr="-seed_gmwmi %s", - extensions=None, - requires=["act_file"], + argstr="-seed_gmwmi %s", extensions=None, requires=["act_file"] ), seed_grid_voxel=dict( - argstr="-seed_grid_per_voxel %s %d", - xor=["seed_image", "seed_rnd_voxel"], - ), - seed_image=dict( - argstr="-seed_image %s", - extensions=None, - ), - seed_rejection=dict( - argstr="-seed_rejection %s", - extensions=None, + argstr="-seed_grid_per_voxel %s %d", xor=["seed_image", "seed_rnd_voxel"] ), + seed_image=dict(argstr="-seed_image %s", extensions=None), + seed_rejection=dict(argstr="-seed_rejection %s", extensions=None), seed_rnd_voxel=dict( - argstr="-seed_random_per_voxel %s %d", - xor=["seed_image", "seed_grid_voxel"], - ), - seed_sphere=dict( - argstr="-seed_sphere %f,%f,%f,%f", - ), - select=dict( - argstr="-select %d", - min_ver="3", - ), - sph_trait=dict( - argstr="%f,%f,%f,%f", - ), - step_size=dict( - argstr="-step %f", - ), - stop=dict( - argstr="-stop", - ), - unidirectional=dict( - argstr="-unidirectional", - ), - use_rk4=dict( - argstr="-rk4", - ), + argstr="-seed_random_per_voxel %s %d", xor=["seed_image", "seed_grid_voxel"] + ), + seed_sphere=dict(argstr="-seed_sphere %f,%f,%f,%f"), + select=dict(argstr="-select %d", min_ver="3"), + sph_trait=dict(argstr="%f,%f,%f,%f"), + step_size=dict(argstr="-step %f"), + stop=dict(argstr="-stop"), + unidirectional=dict(argstr="-unidirectional"), + use_rk4=dict(argstr="-rk4"), ) inputs = Tractography.input_spec() @@ -175,14 +66,7 @@ def test_Tractography_inputs(): def test_Tractography_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - out_seeds=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None), out_seeds=dict(extensions=None)) outputs = Tractography.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py index ca14384031..76ca1f72dd 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py @@ -4,14 +4,8 @@ def test_DwiTool_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - b0_file=dict( - argstr="-b0 %s", - extensions=None, - position=4, - ), + args=dict(argstr="%s"), + b0_file=dict(argstr="-b0 %s", extensions=None, position=4), ball_flag=dict( argstr="-ball", position=6, @@ -38,23 +32,10 @@ def test_DwiTool_inputs(): "nodv_flag", ], ), - bval_file=dict( - argstr="-bval %s", - extensions=None, - mandatory=True, - position=2, - ), - bvec_file=dict( - argstr="-bvec %s", - extensions=None, - position=3, - ), - diso_val=dict( - argstr="-diso %f", - ), - dpr_val=dict( - argstr="-dpr %f", - ), + bval_file=dict(argstr="-bval %s", extensions=None, mandatory=True, position=2), + bvec_file=dict(argstr="-bvec %s", extensions=None, position=3), + diso_val=dict(argstr="-diso %f"), + dpr_val=dict(argstr="-dpr %f"), dti_flag=dict( argstr="-dti", position=6, @@ -81,10 +62,7 @@ def test_DwiTool_inputs(): "nodv_flag", ], ), - environ=dict( - nohash=True, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True), famap_file=dict( argstr="-famap %s", extensions=None, @@ -110,11 +88,7 @@ def test_DwiTool_inputs(): name_source=["source_file"], name_template="%s_logdti2.nii.gz", ), - mask_file=dict( - argstr="-mask %s", - extensions=None, - position=5, - ), + mask_file=dict(argstr="-mask %s", extensions=None, position=5), mcmap_file=dict( argstr="-mcmap %s", extensions=None, @@ -173,10 +147,7 @@ def test_DwiTool_inputs(): name_template="%s_rgbmap.nii.gz", ), source_file=dict( - argstr="-source %s", - extensions=None, - mandatory=True, - position=1, + argstr="-source %s", extensions=None, mandatory=True, position=1 ), syn_file=dict( argstr="-syn %s", @@ -201,27 +172,13 @@ def test_DwiTool_inputs(): def test_DwiTool_outputs(): output_map = dict( - famap_file=dict( - extensions=None, - ), - logdti_file=dict( - extensions=None, - ), - mcmap_file=dict( - extensions=None, - ), - mdmap_file=dict( - extensions=None, - ), - rgbmap_file=dict( - extensions=None, - ), - syn_file=dict( - extensions=None, - ), - v1map_file=dict( - extensions=None, - ), + famap_file=dict(extensions=None), + logdti_file=dict(extensions=None), + mcmap_file=dict(extensions=None), + mdmap_file=dict(extensions=None), + rgbmap_file=dict(extensions=None), + syn_file=dict(extensions=None), + v1map_file=dict(extensions=None), ) outputs = DwiTool.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py index 14093322cc..8f8781c5e3 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py @@ -4,115 +4,48 @@ def test_FitAsl_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), cbf_file=dict( argstr="-cbf %s", extensions=None, name_source=["source_file"], name_template="%s_cbf.nii.gz", ), - dpld=dict( - argstr="-dPLD %f", - ), - dt_inv2=dict( - argstr="-dTinv2 %f", - ), - eff=dict( - argstr="-eff %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + dpld=dict(argstr="-dPLD %f"), + dt_inv2=dict(argstr="-dTinv2 %f"), + eff=dict(argstr="-eff %f"), + environ=dict(nohash=True, usedefault=True), error_file=dict( argstr="-error %s", extensions=None, name_source=["source_file"], name_template="%s_error.nii.gz", ), - gm_plasma=dict( - argstr="-gmL %f", - ), - gm_t1=dict( - argstr="-gmT1 %f", - ), - gm_ttt=dict( - argstr="-gmTTT %f", - ), - ir_output=dict( - argstr="-IRoutput %s", - extensions=None, - ), - ir_volume=dict( - argstr="-IRvolume %s", - extensions=None, - ), - ldd=dict( - argstr="-LDD %f", - ), - m0map=dict( - argstr="-m0map %s", - extensions=None, - ), - m0mape=dict( - argstr="-m0mape %s", - extensions=None, - ), - mask=dict( - argstr="-mask %s", - extensions=None, - position=2, - ), - mul=dict( - argstr="-mul %f", - ), - mulgm=dict( - argstr="-sig", - ), - out=dict( - argstr="-out %f", - ), - pasl=dict( - argstr="-pasl", - ), - pcasl=dict( - argstr="-pcasl", - ), - plasma_coeff=dict( - argstr="-L %f", - ), - pld=dict( - argstr="-PLD %f", - ), - pv0=dict( - argstr="-pv0 %d", - ), - pv2=dict( - argstr="-pv2 %d", - ), - pv3=dict( - argstr="-pv3 %d %d %d", - ), - pv_threshold=dict( - argstr="-pvthreshold", - ), - seg=dict( - argstr="-seg %s", - extensions=None, - ), - segstyle=dict( - argstr="-segstyle", - ), - sig=dict( - argstr="-sig", - ), + gm_plasma=dict(argstr="-gmL %f"), + gm_t1=dict(argstr="-gmT1 %f"), + gm_ttt=dict(argstr="-gmTTT %f"), + ir_output=dict(argstr="-IRoutput %s", extensions=None), + ir_volume=dict(argstr="-IRvolume %s", extensions=None), + ldd=dict(argstr="-LDD %f"), + m0map=dict(argstr="-m0map %s", extensions=None), + m0mape=dict(argstr="-m0mape %s", extensions=None), + mask=dict(argstr="-mask %s", extensions=None, position=2), + mul=dict(argstr="-mul %f"), + mulgm=dict(argstr="-sig"), + out=dict(argstr="-out %f"), + pasl=dict(argstr="-pasl"), + pcasl=dict(argstr="-pcasl"), + plasma_coeff=dict(argstr="-L %f"), + pld=dict(argstr="-PLD %f"), + pv0=dict(argstr="-pv0 %d"), + pv2=dict(argstr="-pv2 %d"), + pv3=dict(argstr="-pv3 %d %d %d"), + pv_threshold=dict(argstr="-pvthreshold"), + seg=dict(argstr="-seg %s", extensions=None), + segstyle=dict(argstr="-segstyle"), + sig=dict(argstr="-sig"), source_file=dict( - argstr="-source %s", - extensions=None, - mandatory=True, - position=1, + argstr="-source %s", extensions=None, mandatory=True, position=1 ), syn_file=dict( argstr="-syn %s", @@ -120,28 +53,13 @@ def test_FitAsl_inputs(): name_source=["source_file"], name_template="%s_syn.nii.gz", ), - t1_art_cmp=dict( - argstr="-T1a %f", - ), - t1map=dict( - argstr="-t1map %s", - extensions=None, - ), - t_inv1=dict( - argstr="-Tinv1 %f", - ), - t_inv2=dict( - argstr="-Tinv2 %f", - ), - wm_plasma=dict( - argstr="-wmL %f", - ), - wm_t1=dict( - argstr="-wmT1 %f", - ), - wm_ttt=dict( - argstr="-wmTTT %f", - ), + t1_art_cmp=dict(argstr="-T1a %f"), + t1map=dict(argstr="-t1map %s", extensions=None), + t_inv1=dict(argstr="-Tinv1 %f"), + t_inv2=dict(argstr="-Tinv2 %f"), + wm_plasma=dict(argstr="-wmL %f"), + wm_t1=dict(argstr="-wmT1 %f"), + wm_ttt=dict(argstr="-wmTTT %f"), ) inputs = FitAsl.input_spec() @@ -152,15 +70,9 @@ def test_FitAsl_inputs(): def test_FitAsl_outputs(): output_map = dict( - cbf_file=dict( - extensions=None, - ), - error_file=dict( - extensions=None, - ), - syn_file=dict( - extensions=None, - ), + cbf_file=dict(extensions=None), + error_file=dict(extensions=None), + syn_file=dict(extensions=None), ) outputs = FitAsl.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py index 87650ffbd6..f6a09ecea1 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py @@ -4,12 +4,8 @@ def test_FitDwi_inputs(): input_map = dict( - acceptance=dict( - argstr="-accpetance %f", - ), - args=dict( - argstr="%s", - ), + acceptance=dict(argstr="-accpetance %f"), + args=dict(argstr="%s"), ball_flag=dict( argstr="-ball", position=4, @@ -34,31 +30,12 @@ def test_FitDwi_inputs(): "nodv_flag", ], ), - bval_file=dict( - argstr="-bval %s", - extensions=None, - mandatory=True, - position=2, - ), - bvec_file=dict( - argstr="-bvec %s", - extensions=None, - mandatory=True, - position=3, - ), - cov_file=dict( - argstr="-cov %s", - extensions=None, - ), - csf_t2_val=dict( - argstr="-csfT2 %f", - ), - diso_val=dict( - argstr="-diso %f", - ), - dpr_val=dict( - argstr="-dpr %f", - ), + bval_file=dict(argstr="-bval %s", extensions=None, mandatory=True, position=2), + bvec_file=dict(argstr="-bvec %s", extensions=None, mandatory=True, position=3), + cov_file=dict(argstr="-cov %s", extensions=None), + csf_t2_val=dict(argstr="-csfT2 %f"), + diso_val=dict(argstr="-diso %f"), + dpr_val=dict(argstr="-dpr %f"), dti_flag=dict( argstr="-dti", position=4, @@ -71,10 +48,7 @@ def test_FitDwi_inputs(): "nodv_flag", ], ), - environ=dict( - nohash=True, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True), error_file=dict( argstr="-error %s", extensions=None, @@ -87,10 +61,7 @@ def test_FitDwi_inputs(): name_source=["source_file"], name_template="%s_famap.nii.gz", ), - gn_flag=dict( - argstr="-gn", - xor=["wls_flag"], - ), + gn_flag=dict(argstr="-gn", xor=["wls_flag"]), ivim_flag=dict( argstr="-ivim", position=4, @@ -103,18 +74,9 @@ def test_FitDwi_inputs(): "nodv_flag", ], ), - lm_vals=dict( - argstr="-lm %f %f", - requires=["gn_flag"], - ), - mask_file=dict( - argstr="-mask %s", - extensions=None, - ), - maxit_val=dict( - argstr="-maxit %d", - requires=["gn_flag"], - ), + lm_vals=dict(argstr="-lm %f %f", requires=["gn_flag"]), + mask_file=dict(argstr="-mask %s", extensions=None), + maxit_val=dict(argstr="-maxit %d", requires=["gn_flag"]), mcmap_file=dict( argstr="-mcmap %s", extensions=None, @@ -122,18 +84,14 @@ def test_FitDwi_inputs(): name_template="%s_mcmap.nii.gz", requires=["nodv_flag"], ), - mcmaxit=dict( - argstr="-mcmaxit %d", - ), + mcmaxit=dict(argstr="-mcmaxit %d"), mcout=dict( argstr="-mcout %s", extensions=None, name_source=["source_file"], name_template="%s_mcout.txt", ), - mcsamples=dict( - argstr="-mcsamples %d", - ), + mcsamples=dict(argstr="-mcsamples %d"), mdmap_file=dict( argstr="-mdmap %s", extensions=None, @@ -182,13 +140,8 @@ def test_FitDwi_inputs(): "nod_flag", ], ), - perf_thr=dict( - argstr="-perfthreshold %f", - ), - prior_file=dict( - argstr="-prior %s", - extensions=None, - ), + perf_thr=dict(argstr="-perfthreshold %f"), + prior_file=dict(argstr="-prior %s", extensions=None), res_file=dict( argstr="-res %s", extensions=None, @@ -202,40 +155,21 @@ def test_FitDwi_inputs(): name_template="%s_rgbmap.nii.gz", requires=["dti_flag"], ), - rot_sform_flag=dict( - argstr="-rotsform %d", - ), - slice_no=dict( - argstr="-slice %d", - ), + rot_sform_flag=dict(argstr="-rotsform %d"), + slice_no=dict(argstr="-slice %d"), source_file=dict( - argstr="-source %s", - extensions=None, - mandatory=True, - position=1, - ), - swls_val=dict( - argstr="-swls %f", + argstr="-source %s", extensions=None, mandatory=True, position=1 ), + swls_val=dict(argstr="-swls %f"), syn_file=dict( argstr="-syn %s", extensions=None, name_source=["source_file"], name_template="%s_syn.nii.gz", ), - te_file=dict( - argstr="-TE %s", - extensions=None, - xor=["te_file"], - ), - te_value=dict( - argstr="-TE %s", - extensions=None, - xor=["te_file"], - ), - ten_type=dict( - usedefault=True, - ), + te_file=dict(argstr="-TE %s", extensions=None, xor=["te_file"]), + te_value=dict(argstr="-TE %s", extensions=None, xor=["te_file"]), + ten_type=dict(usedefault=True), tenmap2_file=dict( argstr="-tenmap2 %s", extensions=None, @@ -256,19 +190,10 @@ def test_FitDwi_inputs(): name_source=["source_file"], name_template="%s_v1map.nii.gz", ), - vb_flag=dict( - argstr="-vb", - ), - voxel=dict( - argstr="-voxel %d %d %d", - ), - wls_flag=dict( - argstr="-wls", - xor=["gn_flag"], - ), - wm_t2_val=dict( - argstr="-wmT2 %f", - ), + vb_flag=dict(argstr="-vb"), + voxel=dict(argstr="-voxel %d %d %d"), + wls_flag=dict(argstr="-wls", xor=["gn_flag"]), + wm_t2_val=dict(argstr="-wmT2 %f"), ) inputs = FitDwi.input_spec() @@ -279,42 +204,18 @@ def test_FitDwi_inputs(): def test_FitDwi_outputs(): output_map = dict( - error_file=dict( - extensions=None, - ), - famap_file=dict( - extensions=None, - ), - mcmap_file=dict( - extensions=None, - ), - mcout=dict( - extensions=None, - ), - mdmap_file=dict( - extensions=None, - ), - nodiff_file=dict( - extensions=None, - ), - res_file=dict( - extensions=None, - ), - rgbmap_file=dict( - extensions=None, - ), - syn_file=dict( - extensions=None, - ), - tenmap2_file=dict( - extensions=None, - ), - tenmap_file=dict( - extensions=None, - ), - v1map_file=dict( - extensions=None, - ), + error_file=dict(extensions=None), + famap_file=dict(extensions=None), + mcmap_file=dict(extensions=None), + mcout=dict(extensions=None), + mdmap_file=dict(extensions=None), + nodiff_file=dict(extensions=None), + res_file=dict(extensions=None), + rgbmap_file=dict(extensions=None), + syn_file=dict(extensions=None), + tenmap2_file=dict(extensions=None), + tenmap_file=dict(extensions=None), + v1map_file=dict(extensions=None), ) outputs = FitDwi.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py index ee82b5c900..7547842c8b 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py @@ -4,158 +4,78 @@ def test_FitQt1_inputs(): input_map = dict( - acceptance=dict( - argstr="-acceptance %f", - ), - args=dict( - argstr="%s", - ), - b1map=dict( - argstr="-b1map %s", - extensions=None, - ), + acceptance=dict(argstr="-acceptance %f"), + args=dict(argstr="%s"), + b1map=dict(argstr="-b1map %s", extensions=None), comp_file=dict( argstr="-comp %s", extensions=None, name_source=["source_file"], name_template="%s_comp.nii.gz", ), - environ=dict( - nohash=True, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True), error_file=dict( argstr="-error %s", extensions=None, name_source=["source_file"], name_template="%s_error.nii.gz", ), - flips=dict( - argstr="-flips %s", - sep=" ", - ), - flips_list=dict( - argstr="-fliplist %s", - extensions=None, - ), - gn_flag=dict( - argstr="-gn", - position=8, - ), - ir_flag=dict( - argstr="-IR", - position=13, - ), - lm_val=dict( - argstr="-lm %f %f", - position=7, - ), + flips=dict(argstr="-flips %s", sep=" "), + flips_list=dict(argstr="-fliplist %s", extensions=None), + gn_flag=dict(argstr="-gn", position=8), + ir_flag=dict(argstr="-IR", position=13), + lm_val=dict(argstr="-lm %f %f", position=7), m0map_file=dict( argstr="-m0map %s", extensions=None, name_source=["source_file"], name_template="%s_m0map.nii.gz", ), - mask=dict( - argstr="-mask %s", - extensions=None, - position=2, - ), - maxit=dict( - argstr="-maxit %d", - position=11, - ), + mask=dict(argstr="-mask %s", extensions=None, position=2), + maxit=dict(argstr="-maxit %d", position=11), mcmap_file=dict( argstr="-mcmap %s", extensions=None, name_source=["source_file"], name_template="%s_mcmap.nii.gz", ), - mcmaxit=dict( - argstr="-mcmaxit %d", - ), - mcout=dict( - argstr="-mcout %s", - extensions=None, - ), - mcsamples=dict( - argstr="-mcsamples %d", - ), - nb_comp=dict( - argstr="-nc %d", - position=6, - ), - prior=dict( - argstr="-prior %s", - extensions=None, - position=3, - ), + mcmaxit=dict(argstr="-mcmaxit %d"), + mcout=dict(argstr="-mcout %s", extensions=None), + mcsamples=dict(argstr="-mcsamples %d"), + nb_comp=dict(argstr="-nc %d", position=6), + prior=dict(argstr="-prior %s", extensions=None, position=3), res_file=dict( argstr="-res %s", extensions=None, name_source=["source_file"], name_template="%s_res.nii.gz", ), - slice_no=dict( - argstr="-slice %d", - position=9, - ), + slice_no=dict(argstr="-slice %d", position=9), source_file=dict( - argstr="-source %s", - extensions=None, - mandatory=True, - position=1, - ), - spgr=dict( - argstr="-SPGR", - ), - sr_flag=dict( - argstr="-SR", - position=12, + argstr="-source %s", extensions=None, mandatory=True, position=1 ), + spgr=dict(argstr="-SPGR"), + sr_flag=dict(argstr="-SR", position=12), syn_file=dict( argstr="-syn %s", extensions=None, name_source=["source_file"], name_template="%s_syn.nii.gz", ), - t1_list=dict( - argstr="-T1list %s", - extensions=None, - ), + t1_list=dict(argstr="-T1list %s", extensions=None), t1map_file=dict( argstr="-t1map %s", extensions=None, name_source=["source_file"], name_template="%s_t1map.nii.gz", ), - t1max=dict( - argstr="-T1max %f", - ), - t1min=dict( - argstr="-T1min %f", - ), - te_value=dict( - argstr="-TE %f", - position=4, - ), - tis=dict( - argstr="-TIs %s", - position=14, - sep=" ", - ), - tis_list=dict( - argstr="-TIlist %s", - extensions=None, - ), - tr_value=dict( - argstr="-TR %f", - position=5, - ), - voxel=dict( - argstr="-voxel %d %d %d", - position=10, - ), + t1max=dict(argstr="-T1max %f"), + t1min=dict(argstr="-T1min %f"), + te_value=dict(argstr="-TE %f", position=4), + tis=dict(argstr="-TIs %s", position=14, sep=" "), + tis_list=dict(argstr="-TIlist %s", extensions=None), + tr_value=dict(argstr="-TR %f", position=5), + voxel=dict(argstr="-voxel %d %d %d", position=10), ) inputs = FitQt1.input_spec() @@ -166,27 +86,13 @@ def test_FitQt1_inputs(): def test_FitQt1_outputs(): output_map = dict( - comp_file=dict( - extensions=None, - ), - error_file=dict( - extensions=None, - ), - m0map_file=dict( - extensions=None, - ), - mcmap_file=dict( - extensions=None, - ), - res_file=dict( - extensions=None, - ), - syn_file=dict( - extensions=None, - ), - t1map_file=dict( - extensions=None, - ), + comp_file=dict(extensions=None), + error_file=dict(extensions=None), + m0map_file=dict(extensions=None), + mcmap_file=dict(extensions=None), + res_file=dict(extensions=None), + syn_file=dict(extensions=None), + t1map_file=dict(extensions=None), ) outputs = FitQt1.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py b/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py index f12ccad480..7b0048ae15 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py @@ -3,15 +3,7 @@ def test_NiftyFitCommand_inputs(): - input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ) + input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) inputs = NiftyFitCommand.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py b/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py index 43f72df69f..7aa97f4103 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py @@ -4,17 +4,9 @@ def test_NiftyRegCommand_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - omp_core_val=dict( - argstr="-omp %i", - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + omp_core_val=dict(argstr="-omp %i", usedefault=True), ) inputs = NiftyRegCommand.input_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py index a4485d0e20..ae9013a82b 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py @@ -4,108 +4,45 @@ def test_RegAladin_inputs(): input_map = dict( - aff_direct_flag=dict( - argstr="-affDirect", - ), + aff_direct_flag=dict(argstr="-affDirect"), aff_file=dict( argstr="-aff %s", extensions=None, name_source=["flo_file"], name_template="%s_aff.txt", ), - args=dict( - argstr="%s", - ), - cog_flag=dict( - argstr="-cog", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - flo_file=dict( - argstr="-flo %s", - extensions=None, - mandatory=True, - ), - flo_low_val=dict( - argstr="-floLowThr %f", - ), - flo_up_val=dict( - argstr="-floUpThr %f", - ), - fmask_file=dict( - argstr="-fmask %s", - extensions=None, - ), - gpuid_val=dict( - argstr="-gpuid %i", - ), - i_val=dict( - argstr="-pi %d", - ), - in_aff_file=dict( - argstr="-inaff %s", - extensions=None, - ), - ln_val=dict( - argstr="-ln %d", - ), - lp_val=dict( - argstr="-lp %d", - ), - maxit_val=dict( - argstr="-maxit %d", - ), - nac_flag=dict( - argstr="-nac", - ), - nosym_flag=dict( - argstr="-noSym", - ), - omp_core_val=dict( - argstr="-omp %i", - usedefault=True, - ), - platform_val=dict( - argstr="-platf %i", - ), - ref_file=dict( - argstr="-ref %s", - extensions=None, - mandatory=True, - ), - ref_low_val=dict( - argstr="-refLowThr %f", - ), - ref_up_val=dict( - argstr="-refUpThr %f", - ), + args=dict(argstr="%s"), + cog_flag=dict(argstr="-cog"), + environ=dict(nohash=True, usedefault=True), + flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True), + flo_low_val=dict(argstr="-floLowThr %f"), + flo_up_val=dict(argstr="-floUpThr %f"), + fmask_file=dict(argstr="-fmask %s", extensions=None), + gpuid_val=dict(argstr="-gpuid %i"), + i_val=dict(argstr="-pi %d"), + in_aff_file=dict(argstr="-inaff %s", extensions=None), + ln_val=dict(argstr="-ln %d"), + lp_val=dict(argstr="-lp %d"), + maxit_val=dict(argstr="-maxit %d"), + nac_flag=dict(argstr="-nac"), + nosym_flag=dict(argstr="-noSym"), + omp_core_val=dict(argstr="-omp %i", usedefault=True), + platform_val=dict(argstr="-platf %i"), + ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True), + ref_low_val=dict(argstr="-refLowThr %f"), + ref_up_val=dict(argstr="-refUpThr %f"), res_file=dict( argstr="-res %s", extensions=None, name_source=["flo_file"], name_template="%s_res.nii.gz", ), - rig_only_flag=dict( - argstr="-rigOnly", - ), - rmask_file=dict( - argstr="-rmask %s", - extensions=None, - ), - smoo_f_val=dict( - argstr="-smooF %f", - ), - smoo_r_val=dict( - argstr="-smooR %f", - ), - v_val=dict( - argstr="-pv %d", - ), - verbosity_off_flag=dict( - argstr="-voff", - ), + rig_only_flag=dict(argstr="-rigOnly"), + rmask_file=dict(argstr="-rmask %s", extensions=None), + smoo_f_val=dict(argstr="-smooF %f"), + smoo_r_val=dict(argstr="-smooR %f"), + v_val=dict(argstr="-pv %d"), + verbosity_off_flag=dict(argstr="-voff"), ) inputs = RegAladin.input_spec() @@ -116,13 +53,9 @@ def test_RegAladin_inputs(): def test_RegAladin_outputs(): output_map = dict( - aff_file=dict( - extensions=None, - ), + aff_file=dict(extensions=None), avg_output=dict(), - res_file=dict( - extensions=None, - ), + res_file=dict(extensions=None), ) outputs = RegAladin.output_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py index 0077b85faa..42b7e2db8c 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py @@ -4,9 +4,7 @@ def test_RegAverage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), avg_files=dict( argstr="-avg %s", position=1, @@ -85,25 +83,11 @@ def test_RegAverage_inputs(): "demean2_ref_file", ], ), - environ=dict( - nohash=True, - usedefault=True, - ), - omp_core_val=dict( - argstr="-omp %i", - usedefault=True, - ), - out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - position=0, - ), + environ=dict(nohash=True, usedefault=True), + omp_core_val=dict(argstr="-omp %i", usedefault=True), + out_file=dict(argstr="%s", extensions=None, genfile=True, position=0), warp_files=dict( - argstr="%s", - position=-1, - sep=" ", - xor=["avg_files", "avg_lts_files"], + argstr="%s", position=-1, sep=" ", xor=["avg_files", "avg_lts_files"] ), ) inputs = RegAverage.input_spec() @@ -114,11 +98,7 @@ def test_RegAverage_inputs(): def test_RegAverage_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = RegAverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py index b760ebb3d1..830b6966aa 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py @@ -4,181 +4,69 @@ def test_RegF3D_inputs(): input_map = dict( - aff_file=dict( - argstr="-aff %s", - extensions=None, - ), - amc_flag=dict( - argstr="-amc", - ), - args=dict( - argstr="%s", - ), - be_val=dict( - argstr="-be %f", - ), + aff_file=dict(argstr="-aff %s", extensions=None), + amc_flag=dict(argstr="-amc"), + args=dict(argstr="%s"), + be_val=dict(argstr="-be %f"), cpp_file=dict( argstr="-cpp %s", extensions=None, name_source=["flo_file"], name_template="%s_cpp.nii.gz", ), - environ=dict( - nohash=True, - usedefault=True, - ), - fbn2_val=dict( - argstr="-fbn %d %d", - ), - fbn_val=dict( - argstr="--fbn %d", - ), - flo_file=dict( - argstr="-flo %s", - extensions=None, - mandatory=True, - ), - flo_smooth_val=dict( - argstr="-smooF %f", - ), - flwth2_thr_val=dict( - argstr="-fLwTh %d %f", - ), - flwth_thr_val=dict( - argstr="--fLwTh %f", - ), - fmask_file=dict( - argstr="-fmask %s", - extensions=None, - ), - fupth2_thr_val=dict( - argstr="-fUpTh %d %f", - ), - fupth_thr_val=dict( - argstr="--fUpTh %f", - ), - incpp_file=dict( - argstr="-incpp %s", - extensions=None, - ), - jl_val=dict( - argstr="-jl %f", - ), - kld2_flag=dict( - argstr="-kld %d", - ), - kld_flag=dict( - argstr="--kld", - ), - le_val=dict( - argstr="-le %f", - ), - ln_val=dict( - argstr="-ln %d", - ), - lncc2_val=dict( - argstr="-lncc %d %f", - ), - lncc_val=dict( - argstr="--lncc %f", - ), - lp_val=dict( - argstr="-lp %d", - ), - maxit_val=dict( - argstr="-maxit %d", - ), - nmi_flag=dict( - argstr="--nmi", - ), - no_app_jl_flag=dict( - argstr="-noAppJL", - ), - noconj_flag=dict( - argstr="-noConj", - ), - nopy_flag=dict( - argstr="-nopy", - ), - nox_flag=dict( - argstr="-nox", - ), - noy_flag=dict( - argstr="-noy", - ), - noz_flag=dict( - argstr="-noz", - ), - omp_core_val=dict( - argstr="-omp %i", - usedefault=True, - ), - pad_val=dict( - argstr="-pad %f", - ), - pert_val=dict( - argstr="-pert %d", - ), - rbn2_val=dict( - argstr="-rbn %d %d", - ), - rbn_val=dict( - argstr="--rbn %d", - ), - ref_file=dict( - argstr="-ref %s", - extensions=None, - mandatory=True, - ), - ref_smooth_val=dict( - argstr="-smooR %f", - ), + environ=dict(nohash=True, usedefault=True), + fbn2_val=dict(argstr="-fbn %d %d"), + fbn_val=dict(argstr="--fbn %d"), + flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True), + flo_smooth_val=dict(argstr="-smooF %f"), + flwth2_thr_val=dict(argstr="-fLwTh %d %f"), + flwth_thr_val=dict(argstr="--fLwTh %f"), + fmask_file=dict(argstr="-fmask %s", extensions=None), + fupth2_thr_val=dict(argstr="-fUpTh %d %f"), + fupth_thr_val=dict(argstr="--fUpTh %f"), + incpp_file=dict(argstr="-incpp %s", extensions=None), + jl_val=dict(argstr="-jl %f"), + kld2_flag=dict(argstr="-kld %d"), + kld_flag=dict(argstr="--kld"), + le_val=dict(argstr="-le %f"), + ln_val=dict(argstr="-ln %d"), + lncc2_val=dict(argstr="-lncc %d %f"), + lncc_val=dict(argstr="--lncc %f"), + lp_val=dict(argstr="-lp %d"), + maxit_val=dict(argstr="-maxit %d"), + nmi_flag=dict(argstr="--nmi"), + no_app_jl_flag=dict(argstr="-noAppJL"), + noconj_flag=dict(argstr="-noConj"), + nopy_flag=dict(argstr="-nopy"), + nox_flag=dict(argstr="-nox"), + noy_flag=dict(argstr="-noy"), + noz_flag=dict(argstr="-noz"), + omp_core_val=dict(argstr="-omp %i", usedefault=True), + pad_val=dict(argstr="-pad %f"), + pert_val=dict(argstr="-pert %d"), + rbn2_val=dict(argstr="-rbn %d %d"), + rbn_val=dict(argstr="--rbn %d"), + ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True), + ref_smooth_val=dict(argstr="-smooR %f"), res_file=dict( argstr="-res %s", extensions=None, name_source=["flo_file"], name_template="%s_res.nii.gz", ), - rlwth2_thr_val=dict( - argstr="-rLwTh %d %f", - ), - rlwth_thr_val=dict( - argstr="--rLwTh %f", - ), - rmask_file=dict( - argstr="-rmask %s", - extensions=None, - ), - rupth2_thr_val=dict( - argstr="-rUpTh %d %f", - ), - rupth_thr_val=dict( - argstr="--rUpTh %f", - ), - smooth_grad_val=dict( - argstr="-smoothGrad %f", - ), - ssd2_flag=dict( - argstr="-ssd %d", - ), - ssd_flag=dict( - argstr="--ssd", - ), - sx_val=dict( - argstr="-sx %f", - ), - sy_val=dict( - argstr="-sy %f", - ), - sz_val=dict( - argstr="-sz %f", - ), - vel_flag=dict( - argstr="-vel", - ), - verbosity_off_flag=dict( - argstr="-voff", - ), + rlwth2_thr_val=dict(argstr="-rLwTh %d %f"), + rlwth_thr_val=dict(argstr="--rLwTh %f"), + rmask_file=dict(argstr="-rmask %s", extensions=None), + rupth2_thr_val=dict(argstr="-rUpTh %d %f"), + rupth_thr_val=dict(argstr="--rUpTh %f"), + smooth_grad_val=dict(argstr="-smoothGrad %f"), + ssd2_flag=dict(argstr="-ssd %d"), + ssd_flag=dict(argstr="--ssd"), + sx_val=dict(argstr="-sx %f"), + sy_val=dict(argstr="-sy %f"), + sz_val=dict(argstr="-sz %f"), + vel_flag=dict(argstr="-vel"), + verbosity_off_flag=dict(argstr="-voff"), ) inputs = RegF3D.input_spec() @@ -190,18 +78,10 @@ def test_RegF3D_inputs(): def test_RegF3D_outputs(): output_map = dict( avg_output=dict(), - cpp_file=dict( - extensions=None, - ), - invcpp_file=dict( - extensions=None, - ), - invres_file=dict( - extensions=None, - ), - res_file=dict( - extensions=None, - ), + cpp_file=dict(extensions=None), + invcpp_file=dict(extensions=None), + invres_file=dict(extensions=None), + res_file=dict(extensions=None), ) outputs = RegF3D.output_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py index 60c8ce5c08..971fa36b44 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py @@ -4,17 +4,9 @@ def test_RegJacobian_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - omp_core_val=dict( - argstr="-omp %i", - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + omp_core_val=dict(argstr="-omp %i", usedefault=True), out_file=dict( argstr="%s", extensions=None, @@ -22,20 +14,9 @@ def test_RegJacobian_inputs(): name_template="%s", position=-1, ), - ref_file=dict( - argstr="-ref %s", - extensions=None, - ), - trans_file=dict( - argstr="-trans %s", - extensions=None, - mandatory=True, - ), - type=dict( - argstr="-%s", - position=-2, - usedefault=True, - ), + ref_file=dict(argstr="-ref %s", extensions=None), + trans_file=dict(argstr="-trans %s", extensions=None, mandatory=True), + type=dict(argstr="-%s", position=-2, usedefault=True), ) inputs = RegJacobian.input_spec() @@ -45,11 +26,7 @@ def test_RegJacobian_inputs(): def test_RegJacobian_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = RegJacobian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py index 8a7e470e6c..0579447aa0 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py @@ -4,37 +4,18 @@ def test_RegMeasure_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - flo_file=dict( - argstr="-flo %s", - extensions=None, - mandatory=True, - ), - measure_type=dict( - argstr="-%s", - mandatory=True, - ), - omp_core_val=dict( - argstr="-omp %i", - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True), + measure_type=dict(argstr="-%s", mandatory=True), + omp_core_val=dict(argstr="-omp %i", usedefault=True), out_file=dict( argstr="-out %s", extensions=None, name_source=["flo_file"], name_template="%s", ), - ref_file=dict( - argstr="-ref %s", - extensions=None, - mandatory=True, - ), + ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True), ) inputs = RegMeasure.input_spec() @@ -44,11 +25,7 @@ def test_RegMeasure_inputs(): def test_RegMeasure_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = RegMeasure.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py index 6d9c9a93e5..24533ad9e2 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py @@ -4,25 +4,11 @@ def test_RegResample_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - flo_file=dict( - argstr="-flo %s", - extensions=None, - mandatory=True, - ), - inter_val=dict( - argstr="-inter %d", - ), - omp_core_val=dict( - argstr="-omp %i", - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True), + inter_val=dict(argstr="-inter %d"), + omp_core_val=dict(argstr="-omp %i", usedefault=True), out_file=dict( argstr="%s", extensions=None, @@ -30,35 +16,14 @@ def test_RegResample_inputs(): name_template="%s", position=-1, ), - pad_val=dict( - argstr="-pad %f", - ), - psf_alg=dict( - argstr="-psf_alg %d", - ), - psf_flag=dict( - argstr="-psf", - ), - ref_file=dict( - argstr="-ref %s", - extensions=None, - mandatory=True, - ), - tensor_flag=dict( - argstr="-tensor ", - ), - trans_file=dict( - argstr="-trans %s", - extensions=None, - ), - type=dict( - argstr="-%s", - position=-2, - usedefault=True, - ), - verbosity_off_flag=dict( - argstr="-voff", - ), + pad_val=dict(argstr="-pad %f"), + psf_alg=dict(argstr="-psf_alg %d"), + psf_flag=dict(argstr="-psf"), + ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True), + tensor_flag=dict(argstr="-tensor "), + trans_file=dict(argstr="-trans %s", extensions=None), + type=dict(argstr="-%s", position=-2, usedefault=True), + verbosity_off_flag=dict(argstr="-voff"), ) inputs = RegResample.input_spec() @@ -68,11 +33,7 @@ def test_RegResample_inputs(): def test_RegResample_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = RegResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py index 9abf8184ec..6c6afecb36 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py @@ -4,75 +4,31 @@ def test_RegTools_inputs(): input_map = dict( - add_val=dict( - argstr="-add %s", - ), - args=dict( - argstr="%s", - ), - bin_flag=dict( - argstr="-bin", - ), - chg_res_val=dict( - argstr="-chgres %f %f %f", - ), - div_val=dict( - argstr="-div %s", - ), - down_flag=dict( - argstr="-down", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - ), - inter_val=dict( - argstr="-interp %d", - ), - iso_flag=dict( - argstr="-iso", - ), - mask_file=dict( - argstr="-nan %s", - extensions=None, - ), - mul_val=dict( - argstr="-mul %s", - ), - noscl_flag=dict( - argstr="-noscl", - ), - omp_core_val=dict( - argstr="-omp %i", - usedefault=True, - ), + add_val=dict(argstr="-add %s"), + args=dict(argstr="%s"), + bin_flag=dict(argstr="-bin"), + chg_res_val=dict(argstr="-chgres %f %f %f"), + div_val=dict(argstr="-div %s"), + down_flag=dict(argstr="-down"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True), + inter_val=dict(argstr="-interp %d"), + iso_flag=dict(argstr="-iso"), + mask_file=dict(argstr="-nan %s", extensions=None), + mul_val=dict(argstr="-mul %s"), + noscl_flag=dict(argstr="-noscl"), + omp_core_val=dict(argstr="-omp %i", usedefault=True), out_file=dict( argstr="-out %s", extensions=None, name_source=["in_file"], name_template="%s_tools.nii.gz", ), - rms_val=dict( - argstr="-rms %s", - extensions=None, - ), - smo_g_val=dict( - argstr="-smoG %f %f %f", - ), - smo_s_val=dict( - argstr="-smoS %f %f %f", - ), - sub_val=dict( - argstr="-sub %s", - ), - thr_val=dict( - argstr="-thr %f", - ), + rms_val=dict(argstr="-rms %s", extensions=None), + smo_g_val=dict(argstr="-smoG %f %f %f"), + smo_s_val=dict(argstr="-smoS %f %f %f"), + sub_val=dict(argstr="-sub %s"), + thr_val=dict(argstr="-thr %f"), ) inputs = RegTools.input_spec() @@ -82,11 +38,7 @@ def test_RegTools_inputs(): def test_RegTools_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = RegTools.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py index b9ee8bf2af..572e8bdad3 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py @@ -21,9 +21,7 @@ def test_RegTransform_inputs(): "flirt_2_nr_input", ], ), - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), comp_input=dict( argstr="-comp %s", extensions=None, @@ -42,11 +40,7 @@ def test_RegTransform_inputs(): "flirt_2_nr_input", ], ), - comp_input2=dict( - argstr="%s", - extensions=None, - position=-2, - ), + comp_input2=dict(argstr="%s", extensions=None, position=-2), def_input=dict( argstr="-def %s", extensions=None, @@ -81,10 +75,7 @@ def test_RegTransform_inputs(): "flirt_2_nr_input", ], ), - environ=dict( - nohash=True, - usedefault=True, - ), + environ=dict(nohash=True, usedefault=True), flirt_2_nr_input=dict( argstr="-flirtAff2NR %s %s %s", position=-2, @@ -184,26 +175,11 @@ def test_RegTransform_inputs(): "flirt_2_nr_input", ], ), - omp_core_val=dict( - argstr="-omp %i", - usedefault=True, - ), - out_file=dict( - argstr="%s", - extensions=None, - genfile=True, - position=-1, - ), - ref1_file=dict( - argstr="-ref %s", - extensions=None, - position=0, - ), + omp_core_val=dict(argstr="-omp %i", usedefault=True), + out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), + ref1_file=dict(argstr="-ref %s", extensions=None, position=0), ref2_file=dict( - argstr="-ref2 %s", - extensions=None, - position=1, - requires=["ref1_file"], + argstr="-ref2 %s", extensions=None, position=1, requires=["ref1_file"] ), upd_s_form_input=dict( argstr="-updSform %s", @@ -224,10 +200,7 @@ def test_RegTransform_inputs(): ], ), upd_s_form_input2=dict( - argstr="%s", - extensions=None, - position=-2, - requires=["upd_s_form_input"], + argstr="%s", extensions=None, position=-2, requires=["upd_s_form_input"] ), ) inputs = RegTransform.input_spec() @@ -238,11 +211,7 @@ def test_RegTransform_inputs(): def test_RegTransform_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = RegTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py index 4c0a962a21..eff9e4de49 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py @@ -4,19 +4,9 @@ def test_BinaryMaths_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), operand_file=dict( argstr="%s", extensions=None, @@ -36,11 +26,7 @@ def test_BinaryMaths_inputs(): position=5, xor=["operand_file", "operand_str"], ), - operation=dict( - argstr="-%s", - mandatory=True, - position=4, - ), + operation=dict(argstr="-%s", mandatory=True, position=4), out_file=dict( argstr="%s", extensions=None, @@ -48,10 +34,7 @@ def test_BinaryMaths_inputs(): name_template="%s", position=-2, ), - output_datatype=dict( - argstr="-odt %s", - position=-3, - ), + output_datatype=dict(argstr="-odt %s", position=-3), ) inputs = BinaryMaths.input_spec() @@ -61,11 +44,7 @@ def test_BinaryMaths_inputs(): def test_BinaryMaths_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = BinaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py index 440cb92bbc..bceba25b22 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py @@ -4,29 +4,11 @@ def test_BinaryMathsInteger_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - operand_value=dict( - argstr="%d", - mandatory=True, - position=5, - ), - operation=dict( - argstr="-%s", - mandatory=True, - position=4, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + operand_value=dict(argstr="%d", mandatory=True, position=5), + operation=dict(argstr="-%s", mandatory=True, position=4), out_file=dict( argstr="%s", extensions=None, @@ -34,10 +16,7 @@ def test_BinaryMathsInteger_inputs(): name_template="%s", position=-2, ), - output_datatype=dict( - argstr="-odt %s", - position=-3, - ), + output_datatype=dict(argstr="-odt %s", position=-3), ) inputs = BinaryMathsInteger.input_spec() @@ -47,11 +26,7 @@ def test_BinaryMathsInteger_inputs(): def test_BinaryMathsInteger_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = BinaryMathsInteger.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py index 5a5ac7298b..a01dd5ba16 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py @@ -4,28 +4,11 @@ def test_BinaryStats_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - larger_voxel=dict( - argstr="-t %f", - position=-3, - ), - mask_file=dict( - argstr="-m %s", - extensions=None, - position=-2, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + larger_voxel=dict(argstr="-t %f", position=-3), + mask_file=dict(argstr="-m %s", extensions=None, position=-2), operand_file=dict( argstr="%s", extensions=None, @@ -34,16 +17,9 @@ def test_BinaryStats_inputs(): xor=["operand_value"], ), operand_value=dict( - argstr="%.8f", - mandatory=True, - position=5, - xor=["operand_file"], - ), - operation=dict( - argstr="-%s", - mandatory=True, - position=4, + argstr="%.8f", mandatory=True, position=5, xor=["operand_file"] ), + operation=dict(argstr="-%s", mandatory=True, position=4), ) inputs = BinaryStats.input_spec() @@ -53,9 +29,7 @@ def test_BinaryStats_inputs(): def test_BinaryStats_outputs(): - output_map = dict( - output=dict(), - ) + output_map = dict(output=dict()) outputs = BinaryStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py index e0943be61e..40bf7f7926 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py @@ -4,38 +4,13 @@ def test_CalcTopNCC_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-target %s", - extensions=None, - mandatory=True, - position=1, - ), - in_templates=dict( - argstr="%s", - mandatory=True, - position=3, - ), - mask_file=dict( - argstr="-mask %s", - extensions=None, - ), - num_templates=dict( - argstr="-templates %s", - mandatory=True, - position=2, - ), - top_templates=dict( - argstr="-n %s", - mandatory=True, - position=4, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-target %s", extensions=None, mandatory=True, position=1), + in_templates=dict(argstr="%s", mandatory=True, position=3), + mask_file=dict(argstr="-mask %s", extensions=None), + num_templates=dict(argstr="-templates %s", mandatory=True, position=2), + top_templates=dict(argstr="-n %s", mandatory=True, position=4), ) inputs = CalcTopNCC.input_spec() @@ -45,9 +20,7 @@ def test_CalcTopNCC_inputs(): def test_CalcTopNCC_outputs(): - output_map = dict( - out_files=dict(), - ) + output_map = dict(out_files=dict()) outputs = CalcTopNCC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_EM.py b/nipype/interfaces/niftyseg/tests/test_auto_EM.py index a0394b174e..977ae7ce44 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_EM.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_EM.py @@ -4,46 +4,17 @@ def test_EM_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bc_order_val=dict( - argstr="-bc_order %s", - usedefault=True, - ), - bc_thresh_val=dict( - argstr="-bc_thresh %s", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - position=4, - ), - mask_file=dict( - argstr="-mask %s", - extensions=None, - ), - max_iter=dict( - argstr="-max_iter %s", - usedefault=True, - ), - min_iter=dict( - argstr="-min_iter %s", - usedefault=True, - ), - mrf_beta_val=dict( - argstr="-mrf_beta %s", - ), + args=dict(argstr="%s"), + bc_order_val=dict(argstr="-bc_order %s", usedefault=True), + bc_thresh_val=dict(argstr="-bc_thresh %s", usedefault=True), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=4), + mask_file=dict(argstr="-mask %s", extensions=None), + max_iter=dict(argstr="-max_iter %s", usedefault=True), + min_iter=dict(argstr="-min_iter %s", usedefault=True), + mrf_beta_val=dict(argstr="-mrf_beta %s"), no_prior=dict( - argstr="-nopriors %s", - mandatory=True, - xor=["prior_4D", "priors"], + argstr="-nopriors %s", mandatory=True, xor=["prior_4D", "priors"] ), out_bc_file=dict( argstr="-bc_out %s", @@ -63,26 +34,16 @@ def test_EM_inputs(): name_source=["in_file"], name_template="%s_outlier_em.nii.gz", ), - outlier_val=dict( - argstr="-outlier %s %s", - ), + outlier_val=dict(argstr="-outlier %s %s"), prior_4D=dict( argstr="-prior4D %s", extensions=None, mandatory=True, xor=["no_prior", "priors"], ), - priors=dict( - argstr="%s", - mandatory=True, - xor=["no_prior", "prior_4D"], - ), - reg_val=dict( - argstr="-reg %s", - ), - relax_priors=dict( - argstr="-rf %s %s", - ), + priors=dict(argstr="%s", mandatory=True, xor=["no_prior", "prior_4D"]), + reg_val=dict(argstr="-reg %s"), + relax_priors=dict(argstr="-rf %s %s"), ) inputs = EM.input_spec() @@ -93,15 +54,9 @@ def test_EM_inputs(): def test_EM_outputs(): output_map = dict( - out_bc_file=dict( - extensions=None, - ), - out_file=dict( - extensions=None, - ), - out_outlier_file=dict( - extensions=None, - ), + out_bc_file=dict(extensions=None), + out_file=dict(extensions=None), + out_outlier_file=dict(extensions=None), ) outputs = EM.output_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py index 9e1b06a892..9d694c103b 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py @@ -4,47 +4,17 @@ def test_FillLesions_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bin_mask=dict( - argstr="-mask %s", - extensions=None, - ), - cwf=dict( - argstr="-cwf %f", - ), - debug=dict( - argstr="-debug", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_dilation=dict( - argstr="-dil %d", - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - position=1, - ), - lesion_mask=dict( - argstr="-l %s", - extensions=None, - mandatory=True, - position=2, - ), - match=dict( - argstr="-match %f", - ), - other=dict( - argstr="-other", - ), - out_datatype=dict( - argstr="-odt %s", - ), + args=dict(argstr="%s"), + bin_mask=dict(argstr="-mask %s", extensions=None), + cwf=dict(argstr="-cwf %f"), + debug=dict(argstr="-debug"), + environ=dict(nohash=True, usedefault=True), + in_dilation=dict(argstr="-dil %d"), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1), + lesion_mask=dict(argstr="-l %s", extensions=None, mandatory=True, position=2), + match=dict(argstr="-match %f"), + other=dict(argstr="-other"), + out_datatype=dict(argstr="-odt %s"), out_file=dict( argstr="-o %s", extensions=None, @@ -52,21 +22,11 @@ def test_FillLesions_inputs(): name_template="%s_lesions_filled.nii.gz", position=3, ), - search=dict( - argstr="-search %f", - ), - size=dict( - argstr="-size %d", - ), - smooth=dict( - argstr="-smo %f", - ), - use_2d=dict( - argstr="-2D", - ), - verbose=dict( - argstr="-v", - ), + search=dict(argstr="-search %f"), + size=dict(argstr="-size %d"), + smooth=dict(argstr="-smo %f"), + use_2d=dict(argstr="-2D"), + verbose=dict(argstr="-v"), ) inputs = FillLesions.input_spec() @@ -76,11 +36,7 @@ def test_FillLesions_inputs(): def test_FillLesions_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = FillLesions.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py index dc4590a15b..82c676a5f0 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py @@ -4,79 +4,33 @@ def test_LabelFusion_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - classifier_type=dict( - argstr="-%s", - mandatory=True, - position=2, - ), - conv=dict( - argstr="-conv %f", - ), + args=dict(argstr="%s"), + classifier_type=dict(argstr="-%s", mandatory=True, position=2), + conv=dict(argstr="-conv %f"), dilation_roi=dict(), - environ=dict( - nohash=True, - usedefault=True, - ), - file_to_seg=dict( - extensions=None, - mandatory=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - position=1, - ), + environ=dict(nohash=True, usedefault=True), + file_to_seg=dict(extensions=None, mandatory=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=1), kernel_size=dict(), - mask_file=dict( - argstr="-mask %s", - extensions=None, - ), - max_iter=dict( - argstr="-max_iter %d", - ), - mrf_value=dict( - argstr="-MRF_beta %f", - ), + mask_file=dict(argstr="-mask %s", extensions=None), + max_iter=dict(argstr="-max_iter %d"), + mrf_value=dict(argstr="-MRF_beta %f"), out_file=dict( argstr="-out %s", extensions=None, name_source=["in_file"], name_template="%s", ), - prob_flag=dict( - argstr="-outProb", - ), - prob_update_flag=dict( - argstr="-prop_update", - ), - proportion=dict( - argstr="-prop %s", - ), - set_pq=dict( - argstr="-setPQ %f %f", - ), - sm_ranking=dict( - argstr="-%s", - position=3, - usedefault=True, - ), - template_file=dict( - extensions=None, - ), + prob_flag=dict(argstr="-outProb"), + prob_update_flag=dict(argstr="-prop_update"), + proportion=dict(argstr="-prop %s"), + set_pq=dict(argstr="-setPQ %f %f"), + sm_ranking=dict(argstr="-%s", position=3, usedefault=True), + template_file=dict(extensions=None), template_num=dict(), - unc=dict( - argstr="-unc", - ), - unc_thresh=dict( - argstr="-uncthres %f", - ), - verbose=dict( - argstr="-v %s", - ), + unc=dict(argstr="-unc"), + unc_thresh=dict(argstr="-uncthres %f"), + verbose=dict(argstr="-v %s"), ) inputs = LabelFusion.input_spec() @@ -86,11 +40,7 @@ def test_LabelFusion_inputs(): def test_LabelFusion_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = LabelFusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py index 963ddf96f8..b0a32ba7ab 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py @@ -4,19 +4,9 @@ def test_MathsCommand_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), out_file=dict( argstr="%s", extensions=None, @@ -24,10 +14,7 @@ def test_MathsCommand_inputs(): name_template="%s", position=-2, ), - output_datatype=dict( - argstr="-odt %s", - position=-3, - ), + output_datatype=dict(argstr="-odt %s", position=-3), ) inputs = MathsCommand.input_spec() @@ -37,11 +24,7 @@ def test_MathsCommand_inputs(): def test_MathsCommand_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MathsCommand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_Merge.py b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py index de8dc903e6..7e91b79484 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_Merge.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py @@ -4,27 +4,11 @@ def test_Merge_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - dimension=dict( - mandatory=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - merge_files=dict( - argstr="%s", - mandatory=True, - position=4, - ), + args=dict(argstr="%s"), + dimension=dict(mandatory=True), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + merge_files=dict(argstr="%s", mandatory=True, position=4), out_file=dict( argstr="%s", extensions=None, @@ -32,10 +16,7 @@ def test_Merge_inputs(): name_template="%s", position=-2, ), - output_datatype=dict( - argstr="-odt %s", - position=-3, - ), + output_datatype=dict(argstr="-odt %s", position=-3), ) inputs = Merge.input_spec() @@ -45,11 +26,7 @@ def test_Merge_inputs(): def test_Merge_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py index 37a6ee059c..4db7d817f6 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py @@ -3,15 +3,7 @@ def test_NiftySegCommand_inputs(): - input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ) + input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) inputs = NiftySegCommand.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py index c5b9dba115..067349ac24 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py @@ -4,40 +4,16 @@ def test_PatchMatch_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - cs_size=dict( - argstr="-cs %i", - ), + args=dict(argstr="%s"), + cs_size=dict(argstr="-cs %i"), database_file=dict( - argstr="-db %s", - extensions=None, - mandatory=True, - position=3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - position=1, - ), - it_num=dict( - argstr="-it %i", - ), - mask_file=dict( - argstr="-m %s", - extensions=None, - mandatory=True, - position=2, - ), - match_num=dict( - argstr="-match %i", + argstr="-db %s", extensions=None, mandatory=True, position=3 ), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1), + it_num=dict(argstr="-it %i"), + mask_file=dict(argstr="-m %s", extensions=None, mandatory=True, position=2), + match_num=dict(argstr="-match %i"), out_file=dict( argstr="-o %s", extensions=None, @@ -45,12 +21,8 @@ def test_PatchMatch_inputs(): name_template="%s_pm.nii.gz", position=4, ), - patch_size=dict( - argstr="-size %i", - ), - pm_num=dict( - argstr="-pm %i", - ), + patch_size=dict(argstr="-size %i"), + pm_num=dict(argstr="-pm %i"), ) inputs = PatchMatch.input_spec() @@ -60,11 +32,7 @@ def test_PatchMatch_inputs(): def test_PatchMatch_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = PatchMatch.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py index 6b173663a9..e710f3dd9b 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py @@ -4,28 +4,11 @@ def test_StatsCommand_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - larger_voxel=dict( - argstr="-t %f", - position=-3, - ), - mask_file=dict( - argstr="-m %s", - extensions=None, - position=-2, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + larger_voxel=dict(argstr="-t %f", position=-3), + mask_file=dict(argstr="-m %s", extensions=None, position=-2), ) inputs = StatsCommand.input_spec() @@ -35,9 +18,7 @@ def test_StatsCommand_inputs(): def test_StatsCommand_outputs(): - output_map = dict( - output=dict(), - ) + output_map = dict(output=dict()) outputs = StatsCommand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py index ef1d4c401f..b82d0cb9c1 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py @@ -4,19 +4,9 @@ def test_TupleMaths_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), operand_file1=dict( argstr="%s", extensions=None, @@ -32,22 +22,12 @@ def test_TupleMaths_inputs(): xor=["operand_value2"], ), operand_value1=dict( - argstr="%.8f", - mandatory=True, - position=5, - xor=["operand_file1"], + argstr="%.8f", mandatory=True, position=5, xor=["operand_file1"] ), operand_value2=dict( - argstr="%.8f", - mandatory=True, - position=6, - xor=["operand_file2"], - ), - operation=dict( - argstr="-%s", - mandatory=True, - position=4, + argstr="%.8f", mandatory=True, position=6, xor=["operand_file2"] ), + operation=dict(argstr="-%s", mandatory=True, position=4), out_file=dict( argstr="%s", extensions=None, @@ -55,10 +35,7 @@ def test_TupleMaths_inputs(): name_template="%s", position=-2, ), - output_datatype=dict( - argstr="-odt %s", - position=-3, - ), + output_datatype=dict(argstr="-odt %s", position=-3), ) inputs = TupleMaths.input_spec() @@ -68,11 +45,7 @@ def test_TupleMaths_inputs(): def test_TupleMaths_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = TupleMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py index f8189f0f84..3cabcf15a5 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py @@ -4,24 +4,10 @@ def test_UnaryMaths_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - operation=dict( - argstr="-%s", - mandatory=True, - position=4, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + operation=dict(argstr="-%s", mandatory=True, position=4), out_file=dict( argstr="%s", extensions=None, @@ -29,10 +15,7 @@ def test_UnaryMaths_inputs(): name_template="%s", position=-2, ), - output_datatype=dict( - argstr="-odt %s", - position=-3, - ), + output_datatype=dict(argstr="-odt %s", position=-3), ) inputs = UnaryMaths.input_spec() @@ -42,11 +25,7 @@ def test_UnaryMaths_inputs(): def test_UnaryMaths_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = UnaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py index 117ab819b6..bf742096aa 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py @@ -4,33 +4,12 @@ def test_UnaryStats_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), - larger_voxel=dict( - argstr="-t %f", - position=-3, - ), - mask_file=dict( - argstr="-m %s", - extensions=None, - position=-2, - ), - operation=dict( - argstr="-%s", - mandatory=True, - position=4, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + larger_voxel=dict(argstr="-t %f", position=-3), + mask_file=dict(argstr="-m %s", extensions=None, position=-2), + operation=dict(argstr="-%s", mandatory=True, position=4), ) inputs = UnaryStats.input_spec() @@ -40,9 +19,7 @@ def test_UnaryStats_inputs(): def test_UnaryStats_outputs(): - output_map = dict( - output=dict(), - ) + output_map = dict(output=dict()) outputs = UnaryStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py index db1b784a03..0dd1fbe7fa 100644 --- a/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py +++ b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py @@ -7,13 +7,8 @@ def test_ComputeMask_inputs(): M=dict(), cc=dict(), m=dict(), - mean_volume=dict( - extensions=None, - mandatory=True, - ), - reference_volume=dict( - extensions=None, - ), + mean_volume=dict(extensions=None, mandatory=True), + reference_volume=dict(extensions=None), ) inputs = ComputeMask.input_spec() @@ -23,11 +18,7 @@ def test_ComputeMask_inputs(): def test_ComputeMask_outputs(): - output_map = dict( - brain_mask=dict( - extensions=None, - ), - ) + output_map = dict(brain_mask=dict(extensions=None)) outputs = ComputeMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py index 172f2205fd..53369383c7 100644 --- a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py @@ -4,35 +4,15 @@ def test_EstimateContrast_inputs(): input_map = dict( - axis=dict( - mandatory=True, - ), - beta=dict( - extensions=None, - mandatory=True, - ), - constants=dict( - mandatory=True, - ), - contrasts=dict( - mandatory=True, - ), - dof=dict( - mandatory=True, - ), - mask=dict( - extensions=None, - ), - nvbeta=dict( - mandatory=True, - ), - reg_names=dict( - mandatory=True, - ), - s2=dict( - extensions=None, - mandatory=True, - ), + axis=dict(mandatory=True), + beta=dict(extensions=None, mandatory=True), + constants=dict(mandatory=True), + contrasts=dict(mandatory=True), + dof=dict(mandatory=True), + mask=dict(extensions=None), + nvbeta=dict(mandatory=True), + reg_names=dict(mandatory=True), + s2=dict(extensions=None, mandatory=True), ) inputs = EstimateContrast.input_spec() @@ -42,11 +22,7 @@ def test_EstimateContrast_inputs(): def test_EstimateContrast_outputs(): - output_map = dict( - p_maps=dict(), - stat_maps=dict(), - z_maps=dict(), - ) + output_map = dict(p_maps=dict(), stat_maps=dict(), z_maps=dict()) outputs = EstimateContrast.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py index f04081214c..fc8bc8852e 100644 --- a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py +++ b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py @@ -4,36 +4,16 @@ def test_FitGLM_inputs(): input_map = dict( - TR=dict( - mandatory=True, - ), - drift_model=dict( - usedefault=True, - ), - hrf_model=dict( - usedefault=True, - ), - mask=dict( - extensions=None, - ), - method=dict( - usedefault=True, - ), - model=dict( - usedefault=True, - ), - normalize_design_matrix=dict( - usedefault=True, - ), - plot_design_matrix=dict( - usedefault=True, - ), - save_residuals=dict( - usedefault=True, - ), - session_info=dict( - mandatory=True, - ), + TR=dict(mandatory=True), + drift_model=dict(usedefault=True), + hrf_model=dict(usedefault=True), + mask=dict(extensions=None), + method=dict(usedefault=True), + model=dict(usedefault=True), + normalize_design_matrix=dict(usedefault=True), + plot_design_matrix=dict(usedefault=True), + save_residuals=dict(usedefault=True), + session_info=dict(mandatory=True), ) inputs = FitGLM.input_spec() @@ -44,23 +24,15 @@ def test_FitGLM_inputs(): def test_FitGLM_outputs(): output_map = dict( - a=dict( - extensions=None, - ), + a=dict(extensions=None), axis=dict(), - beta=dict( - extensions=None, - ), + beta=dict(extensions=None), constants=dict(), dof=dict(), nvbeta=dict(), reg_names=dict(), - residuals=dict( - extensions=None, - ), - s2=dict( - extensions=None, - ), + residuals=dict(extensions=None), + s2=dict(extensions=None), ) outputs = FitGLM.output_spec() diff --git a/nipype/interfaces/nipy/tests/test_auto_Similarity.py b/nipype/interfaces/nipy/tests/test_auto_Similarity.py index 81e8622078..61f50389db 100644 --- a/nipype/interfaces/nipy/tests/test_auto_Similarity.py +++ b/nipype/interfaces/nipy/tests/test_auto_Similarity.py @@ -4,23 +4,11 @@ def test_Similarity_inputs(): input_map = dict( - mask1=dict( - extensions=None, - ), - mask2=dict( - extensions=None, - ), - metric=dict( - usedefault=True, - ), - volume1=dict( - extensions=None, - mandatory=True, - ), - volume2=dict( - extensions=None, - mandatory=True, - ), + mask1=dict(extensions=None), + mask2=dict(extensions=None), + metric=dict(usedefault=True), + volume1=dict(extensions=None, mandatory=True), + volume2=dict(extensions=None, mandatory=True), ) inputs = Similarity.input_spec() @@ -30,9 +18,7 @@ def test_Similarity_inputs(): def test_Similarity_outputs(): - output_map = dict( - similarity=dict(), - ) + output_map = dict(similarity=dict()) outputs = Similarity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py b/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py index fd65848f72..1f063e24e1 100644 --- a/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py +++ b/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py @@ -4,17 +4,10 @@ def test_SpaceTimeRealigner_inputs(): input_map = dict( - in_file=dict( - mandatory=True, - min_ver="0.4.0.dev", - ), - slice_info=dict( - requires=["slice_times"], - ), + in_file=dict(mandatory=True, min_ver="0.4.0.dev"), + slice_info=dict(requires=["slice_times"]), slice_times=dict(), - tr=dict( - requires=["slice_times"], - ), + tr=dict(requires=["slice_times"]), ) inputs = SpaceTimeRealigner.input_spec() @@ -24,10 +17,7 @@ def test_SpaceTimeRealigner_inputs(): def test_SpaceTimeRealigner_outputs(): - output_map = dict( - out_file=dict(), - par_file=dict(), - ) + output_map = dict(out_file=dict(), par_file=dict()) outputs = SpaceTimeRealigner.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_Trim.py b/nipype/interfaces/nipy/tests/test_auto_Trim.py index c4ecee3007..5855e3b27a 100644 --- a/nipype/interfaces/nipy/tests/test_auto_Trim.py +++ b/nipype/interfaces/nipy/tests/test_auto_Trim.py @@ -4,22 +4,11 @@ def test_Trim_inputs(): input_map = dict( - begin_index=dict( - usedefault=True, - ), - end_index=dict( - usedefault=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - out_file=dict( - extensions=None, - ), - suffix=dict( - usedefault=True, - ), + begin_index=dict(usedefault=True), + end_index=dict(usedefault=True), + in_file=dict(extensions=None, mandatory=True), + out_file=dict(extensions=None), + suffix=dict(usedefault=True), ) inputs = Trim.input_spec() @@ -29,11 +18,7 @@ def test_Trim_inputs(): def test_Trim_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Trim.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py index 8c70d059ab..56c04ae43e 100644 --- a/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py +++ b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py @@ -4,30 +4,15 @@ def test_CoherenceAnalyzer_inputs(): input_map = dict( - NFFT=dict( - usedefault=True, - ), + NFFT=dict(usedefault=True), TR=dict(), - figure_type=dict( - usedefault=True, - ), - frequency_range=dict( - usedefault=True, - ), + figure_type=dict(usedefault=True), + frequency_range=dict(usedefault=True), in_TS=dict(), - in_file=dict( - extensions=None, - requires=("TR",), - ), - n_overlap=dict( - usedefault=True, - ), - output_csv_file=dict( - extensions=None, - ), - output_figure_file=dict( - extensions=None, - ), + in_file=dict(extensions=None, requires=("TR",)), + n_overlap=dict(usedefault=True), + output_csv_file=dict(extensions=None), + output_figure_file=dict(extensions=None), ) inputs = CoherenceAnalyzer.input_spec() @@ -39,19 +24,11 @@ def test_CoherenceAnalyzer_inputs(): def test_CoherenceAnalyzer_outputs(): output_map = dict( coherence_array=dict(), - coherence_csv=dict( - extensions=None, - ), - coherence_fig=dict( - extensions=None, - ), + coherence_csv=dict(extensions=None), + coherence_fig=dict(extensions=None), timedelay_array=dict(), - timedelay_csv=dict( - extensions=None, - ), - timedelay_fig=dict( - extensions=None, - ), + timedelay_csv=dict(extensions=None), + timedelay_fig=dict(extensions=None), ) outputs = CoherenceAnalyzer.output_spec() diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py index 9098ee2640..e222b58c36 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py @@ -4,45 +4,16 @@ def test_BRAINSPosteriorToContinuousClass_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputBasalGmVolume=dict( - argstr="--inputBasalGmVolume %s", - extensions=None, - ), - inputCrblGmVolume=dict( - argstr="--inputCrblGmVolume %s", - extensions=None, - ), - inputCrblWmVolume=dict( - argstr="--inputCrblWmVolume %s", - extensions=None, - ), - inputCsfVolume=dict( - argstr="--inputCsfVolume %s", - extensions=None, - ), - inputSurfaceGmVolume=dict( - argstr="--inputSurfaceGmVolume %s", - extensions=None, - ), - inputVbVolume=dict( - argstr="--inputVbVolume %s", - extensions=None, - ), - inputWhiteVolume=dict( - argstr="--inputWhiteVolume %s", - extensions=None, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputBasalGmVolume=dict(argstr="--inputBasalGmVolume %s", extensions=None), + inputCrblGmVolume=dict(argstr="--inputCrblGmVolume %s", extensions=None), + inputCrblWmVolume=dict(argstr="--inputCrblWmVolume %s", extensions=None), + inputCsfVolume=dict(argstr="--inputCsfVolume %s", extensions=None), + inputSurfaceGmVolume=dict(argstr="--inputSurfaceGmVolume %s", extensions=None), + inputVbVolume=dict(argstr="--inputVbVolume %s", extensions=None), + inputWhiteVolume=dict(argstr="--inputWhiteVolume %s", extensions=None), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = BRAINSPosteriorToContinuousClass.input_spec() @@ -52,11 +23,7 @@ def test_BRAINSPosteriorToContinuousClass_inputs(): def test_BRAINSPosteriorToContinuousClass_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = BRAINSPosteriorToContinuousClass.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py index 195ebdcad0..bfdbe33f46 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py @@ -4,53 +4,19 @@ def test_BRAINSTalairach_inputs(): input_map = dict( - AC=dict( - argstr="--AC %s", - sep=",", - ), - ACisIndex=dict( - argstr="--ACisIndex ", - ), - IRP=dict( - argstr="--IRP %s", - sep=",", - ), - IRPisIndex=dict( - argstr="--IRPisIndex ", - ), - PC=dict( - argstr="--PC %s", - sep=",", - ), - PCisIndex=dict( - argstr="--PCisIndex ", - ), - SLA=dict( - argstr="--SLA %s", - sep=",", - ), - SLAisIndex=dict( - argstr="--SLAisIndex ", - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - outputBox=dict( - argstr="--outputBox %s", - hash_files=False, - ), - outputGrid=dict( - argstr="--outputGrid %s", - hash_files=False, - ), + AC=dict(argstr="--AC %s", sep=","), + ACisIndex=dict(argstr="--ACisIndex "), + IRP=dict(argstr="--IRP %s", sep=","), + IRPisIndex=dict(argstr="--IRPisIndex "), + PC=dict(argstr="--PC %s", sep=","), + PCisIndex=dict(argstr="--PCisIndex "), + SLA=dict(argstr="--SLA %s", sep=","), + SLAisIndex=dict(argstr="--SLAisIndex "), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + outputBox=dict(argstr="--outputBox %s", hash_files=False), + outputGrid=dict(argstr="--outputGrid %s", hash_files=False), ) inputs = BRAINSTalairach.input_spec() @@ -60,14 +26,7 @@ def test_BRAINSTalairach_inputs(): def test_BRAINSTalairach_outputs(): - output_map = dict( - outputBox=dict( - extensions=None, - ), - outputGrid=dict( - extensions=None, - ), - ) + output_map = dict(outputBox=dict(extensions=None), outputGrid=dict(extensions=None)) outputs = BRAINSTalairach.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py index 2470e42f47..35ec58c6e1 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py @@ -4,35 +4,14 @@ def test_BRAINSTalairachMask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - expand=dict( - argstr="--expand ", - ), - hemisphereMode=dict( - argstr="--hemisphereMode %s", - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - talairachBox=dict( - argstr="--talairachBox %s", - extensions=None, - ), - talairachParameters=dict( - argstr="--talairachParameters %s", - extensions=None, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + expand=dict(argstr="--expand "), + hemisphereMode=dict(argstr="--hemisphereMode %s"), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + talairachBox=dict(argstr="--talairachBox %s", extensions=None), + talairachParameters=dict(argstr="--talairachParameters %s", extensions=None), ) inputs = BRAINSTalairachMask.input_spec() @@ -42,11 +21,7 @@ def test_BRAINSTalairachMask_inputs(): def test_BRAINSTalairachMask_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = BRAINSTalairachMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py index 218c67a4b0..efce068e3b 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py @@ -4,43 +4,19 @@ def test_GenerateEdgeMapImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMRVolumes=dict( - argstr="--inputMRVolumes %s...", - ), - inputMask=dict( - argstr="--inputMask %s", - extensions=None, - ), - lowerPercentileMatching=dict( - argstr="--lowerPercentileMatching %f", - ), - maximumOutputRange=dict( - argstr="--maximumOutputRange %d", - ), - minimumOutputRange=dict( - argstr="--minimumOutputRange %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputEdgeMap=dict( - argstr="--outputEdgeMap %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputMRVolumes=dict(argstr="--inputMRVolumes %s..."), + inputMask=dict(argstr="--inputMask %s", extensions=None), + lowerPercentileMatching=dict(argstr="--lowerPercentileMatching %f"), + maximumOutputRange=dict(argstr="--maximumOutputRange %d"), + minimumOutputRange=dict(argstr="--minimumOutputRange %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputEdgeMap=dict(argstr="--outputEdgeMap %s", hash_files=False), outputMaximumGradientImage=dict( - argstr="--outputMaximumGradientImage %s", - hash_files=False, - ), - upperPercentileMatching=dict( - argstr="--upperPercentileMatching %f", + argstr="--outputMaximumGradientImage %s", hash_files=False ), + upperPercentileMatching=dict(argstr="--upperPercentileMatching %f"), ) inputs = GenerateEdgeMapImage.input_spec() @@ -51,12 +27,8 @@ def test_GenerateEdgeMapImage_inputs(): def test_GenerateEdgeMapImage_outputs(): output_map = dict( - outputEdgeMap=dict( - extensions=None, - ), - outputMaximumGradientImage=dict( - extensions=None, - ), + outputEdgeMap=dict(extensions=None), + outputMaximumGradientImage=dict(extensions=None), ) outputs = GenerateEdgeMapImage.output_spec() diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py index e68b03dcf9..ff38a9dc96 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py @@ -4,27 +4,12 @@ def test_GeneratePurePlugMask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputImageModalities=dict( - argstr="--inputImageModalities %s...", - ), - numberOfSubSamples=dict( - argstr="--numberOfSubSamples %s", - sep=",", - ), - outputMaskFile=dict( - argstr="--outputMaskFile %s", - hash_files=False, - ), - threshold=dict( - argstr="--threshold %f", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputImageModalities=dict(argstr="--inputImageModalities %s..."), + numberOfSubSamples=dict(argstr="--numberOfSubSamples %s", sep=","), + outputMaskFile=dict(argstr="--outputMaskFile %s", hash_files=False), + threshold=dict(argstr="--threshold %f"), ) inputs = GeneratePurePlugMask.input_spec() @@ -34,11 +19,7 @@ def test_GeneratePurePlugMask_inputs(): def test_GeneratePurePlugMask_outputs(): - output_map = dict( - outputMaskFile=dict( - extensions=None, - ), - ) + output_map = dict(outputMaskFile=dict(extensions=None)) outputs = GeneratePurePlugMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py index 110aec4891..bc49e87966 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py @@ -4,48 +4,20 @@ def test_HistogramMatchingFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - histogramAlgorithm=dict( - argstr="--histogramAlgorithm %s", - ), - inputBinaryVolume=dict( - argstr="--inputBinaryVolume %s", - extensions=None, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - numberOfHistogramBins=dict( - argstr="--numberOfHistogramBins %d", - ), - numberOfMatchPoints=dict( - argstr="--numberOfMatchPoints %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + histogramAlgorithm=dict(argstr="--histogramAlgorithm %s"), + inputBinaryVolume=dict(argstr="--inputBinaryVolume %s", extensions=None), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), referenceBinaryVolume=dict( - argstr="--referenceBinaryVolume %s", - extensions=None, - ), - referenceVolume=dict( - argstr="--referenceVolume %s", - extensions=None, - ), - verbose=dict( - argstr="--verbose ", - ), - writeHistogram=dict( - argstr="--writeHistogram %s", + argstr="--referenceBinaryVolume %s", extensions=None ), + referenceVolume=dict(argstr="--referenceVolume %s", extensions=None), + verbose=dict(argstr="--verbose "), + writeHistogram=dict(argstr="--writeHistogram %s"), ) inputs = HistogramMatchingFilter.input_spec() @@ -55,11 +27,7 @@ def test_HistogramMatchingFilter_inputs(): def test_HistogramMatchingFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = HistogramMatchingFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py index 881e3379de..7c5d7d1303 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py @@ -4,28 +4,12 @@ def test_SimilarityIndex_inputs(): input_map = dict( - ANNContinuousVolume=dict( - argstr="--ANNContinuousVolume %s", - extensions=None, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputManualVolume=dict( - argstr="--inputManualVolume %s", - extensions=None, - ), - outputCSVFilename=dict( - argstr="--outputCSVFilename %s", - extensions=None, - ), - thresholdInterval=dict( - argstr="--thresholdInterval %f", - ), + ANNContinuousVolume=dict(argstr="--ANNContinuousVolume %s", extensions=None), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputManualVolume=dict(argstr="--inputManualVolume %s", extensions=None), + outputCSVFilename=dict(argstr="--outputCSVFilename %s", extensions=None), + thresholdInterval=dict(argstr="--thresholdInterval %f"), ) inputs = SimilarityIndex.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py index 4dfb6943cb..f15a03e47b 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py @@ -4,76 +4,26 @@ def test_DWIConvert_inputs(): input_map = dict( - allowLossyConversion=dict( - argstr="--allowLossyConversion ", - ), - args=dict( - argstr="%s", - ), - conversionMode=dict( - argstr="--conversionMode %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fMRI=dict( - argstr="--fMRI ", - ), - fslNIFTIFile=dict( - argstr="--fslNIFTIFile %s", - extensions=None, - ), - gradientVectorFile=dict( - argstr="--gradientVectorFile %s", - hash_files=False, - ), - inputBValues=dict( - argstr="--inputBValues %s", - extensions=None, - ), - inputBVectors=dict( - argstr="--inputBVectors %s", - extensions=None, - ), - inputDicomDirectory=dict( - argstr="--inputDicomDirectory %s", - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - outputBValues=dict( - argstr="--outputBValues %s", - hash_files=False, - ), - outputBVectors=dict( - argstr="--outputBVectors %s", - hash_files=False, - ), - outputDirectory=dict( - argstr="--outputDirectory %s", - hash_files=False, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - smallGradientThreshold=dict( - argstr="--smallGradientThreshold %f", - ), - transposeInputBVectors=dict( - argstr="--transposeInputBVectors ", - ), - useBMatrixGradientDirections=dict( - argstr="--useBMatrixGradientDirections ", - ), - useIdentityMeaseurementFrame=dict( - argstr="--useIdentityMeaseurementFrame ", - ), - writeProtocolGradientsFile=dict( - argstr="--writeProtocolGradientsFile ", - ), + allowLossyConversion=dict(argstr="--allowLossyConversion "), + args=dict(argstr="%s"), + conversionMode=dict(argstr="--conversionMode %s"), + environ=dict(nohash=True, usedefault=True), + fMRI=dict(argstr="--fMRI "), + fslNIFTIFile=dict(argstr="--fslNIFTIFile %s", extensions=None), + gradientVectorFile=dict(argstr="--gradientVectorFile %s", hash_files=False), + inputBValues=dict(argstr="--inputBValues %s", extensions=None), + inputBVectors=dict(argstr="--inputBVectors %s", extensions=None), + inputDicomDirectory=dict(argstr="--inputDicomDirectory %s"), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + outputBValues=dict(argstr="--outputBValues %s", hash_files=False), + outputBVectors=dict(argstr="--outputBVectors %s", hash_files=False), + outputDirectory=dict(argstr="--outputDirectory %s", hash_files=False), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + smallGradientThreshold=dict(argstr="--smallGradientThreshold %f"), + transposeInputBVectors=dict(argstr="--transposeInputBVectors "), + useBMatrixGradientDirections=dict(argstr="--useBMatrixGradientDirections "), + useIdentityMeaseurementFrame=dict(argstr="--useIdentityMeaseurementFrame "), + writeProtocolGradientsFile=dict(argstr="--writeProtocolGradientsFile "), ) inputs = DWIConvert.input_spec() @@ -84,19 +34,11 @@ def test_DWIConvert_inputs(): def test_DWIConvert_outputs(): output_map = dict( - gradientVectorFile=dict( - extensions=None, - ), - outputBValues=dict( - extensions=None, - ), - outputBVectors=dict( - extensions=None, - ), + gradientVectorFile=dict(extensions=None), + outputBValues=dict(extensions=None), + outputBVectors=dict(extensions=None), outputDirectory=dict(), - outputVolume=dict( - extensions=None, - ), + outputVolume=dict(extensions=None), ) outputs = DWIConvert.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py index 38e8f92b0b..fe06bb927d 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py @@ -4,39 +4,16 @@ def test_compareTractInclusion_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - closeness=dict( - argstr="--closeness %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - numberOfPoints=dict( - argstr="--numberOfPoints %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - standardFiber=dict( - argstr="--standardFiber %s", - extensions=None, - ), - testFiber=dict( - argstr="--testFiber %s", - extensions=None, - ), - testForBijection=dict( - argstr="--testForBijection ", - ), - testForFiberCardinality=dict( - argstr="--testForFiberCardinality ", - ), - writeXMLPolyDataFile=dict( - argstr="--writeXMLPolyDataFile ", - ), + args=dict(argstr="%s"), + closeness=dict(argstr="--closeness %f"), + environ=dict(nohash=True, usedefault=True), + numberOfPoints=dict(argstr="--numberOfPoints %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + standardFiber=dict(argstr="--standardFiber %s", extensions=None), + testFiber=dict(argstr="--testFiber %s", extensions=None), + testForBijection=dict(argstr="--testForBijection "), + testForFiberCardinality=dict(argstr="--testForFiberCardinality "), + writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile "), ) inputs = compareTractInclusion.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py index 17d4d19b4c..fa8bb55297 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py @@ -4,26 +4,12 @@ def test_dtiaverage_inputs(): input_map = dict( - DTI_double=dict( - argstr="--DTI_double ", - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputs=dict( - argstr="--inputs %s...", - ), - tensor_output=dict( - argstr="--tensor_output %s", - hash_files=False, - ), - verbose=dict( - argstr="--verbose ", - ), + DTI_double=dict(argstr="--DTI_double "), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputs=dict(argstr="--inputs %s..."), + tensor_output=dict(argstr="--tensor_output %s", hash_files=False), + verbose=dict(argstr="--verbose "), ) inputs = dtiaverage.input_spec() @@ -33,11 +19,7 @@ def test_dtiaverage_inputs(): def test_dtiaverage_outputs(): - output_map = dict( - tensor_output=dict( - extensions=None, - ), - ) + output_map = dict(tensor_output=dict(extensions=None)) outputs = dtiaverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py index 0a36716e87..ef40c6e373 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py @@ -4,75 +4,26 @@ def test_dtiestim_inputs(): input_map = dict( - B0=dict( - argstr="--B0 %s", - hash_files=False, - ), - B0_mask_output=dict( - argstr="--B0_mask_output %s", - hash_files=False, - ), - DTI_double=dict( - argstr="--DTI_double ", - ), - args=dict( - argstr="%s", - ), - bad_region_mask=dict( - argstr="--bad_region_mask %s", - extensions=None, - ), - brain_mask=dict( - argstr="--brain_mask %s", - extensions=None, - ), - correction=dict( - argstr="--correction %s", - ), - defaultTensor=dict( - argstr="--defaultTensor %s", - sep=",", - ), - dwi_image=dict( - argstr="--dwi_image %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - idwi=dict( - argstr="--idwi %s", - hash_files=False, - ), - method=dict( - argstr="--method %s", - ), - shiftNeg=dict( - argstr="--shiftNeg ", - ), - shiftNegCoeff=dict( - argstr="--shiftNegCoeff %f", - ), - sigma=dict( - argstr="--sigma %f", - ), - step=dict( - argstr="--step %f", - ), - tensor_output=dict( - argstr="--tensor_output %s", - hash_files=False, - ), - threshold=dict( - argstr="--threshold %d", - ), - verbose=dict( - argstr="--verbose ", - ), - weight_iterations=dict( - argstr="--weight_iterations %d", - ), + B0=dict(argstr="--B0 %s", hash_files=False), + B0_mask_output=dict(argstr="--B0_mask_output %s", hash_files=False), + DTI_double=dict(argstr="--DTI_double "), + args=dict(argstr="%s"), + bad_region_mask=dict(argstr="--bad_region_mask %s", extensions=None), + brain_mask=dict(argstr="--brain_mask %s", extensions=None), + correction=dict(argstr="--correction %s"), + defaultTensor=dict(argstr="--defaultTensor %s", sep=","), + dwi_image=dict(argstr="--dwi_image %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + idwi=dict(argstr="--idwi %s", hash_files=False), + method=dict(argstr="--method %s"), + shiftNeg=dict(argstr="--shiftNeg "), + shiftNegCoeff=dict(argstr="--shiftNegCoeff %f"), + sigma=dict(argstr="--sigma %f"), + step=dict(argstr="--step %f"), + tensor_output=dict(argstr="--tensor_output %s", hash_files=False), + threshold=dict(argstr="--threshold %d"), + verbose=dict(argstr="--verbose "), + weight_iterations=dict(argstr="--weight_iterations %d"), ) inputs = dtiestim.input_spec() @@ -83,18 +34,10 @@ def test_dtiestim_inputs(): def test_dtiestim_outputs(): output_map = dict( - B0=dict( - extensions=None, - ), - B0_mask_output=dict( - extensions=None, - ), - idwi=dict( - extensions=None, - ), - tensor_output=dict( - extensions=None, - ), + B0=dict(extensions=None), + B0_mask_output=dict(extensions=None), + idwi=dict(extensions=None), + tensor_output=dict(extensions=None), ) outputs = dtiestim.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py index 24352abbe3..4fb5b05acb 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py @@ -4,121 +4,43 @@ def test_dtiprocess_inputs(): input_map = dict( - DTI_double=dict( - argstr="--DTI_double ", - ), - RD_output=dict( - argstr="--RD_output %s", - hash_files=False, - ), - affineitk_file=dict( - argstr="--affineitk_file %s", - extensions=None, - ), - args=dict( - argstr="%s", - ), - color_fa_output=dict( - argstr="--color_fa_output %s", - hash_files=False, - ), - correction=dict( - argstr="--correction %s", - ), - deformation_output=dict( - argstr="--deformation_output %s", - hash_files=False, - ), - dof_file=dict( - argstr="--dof_file %s", - extensions=None, - ), - dti_image=dict( - argstr="--dti_image %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fa_gradient_output=dict( - argstr="--fa_gradient_output %s", - hash_files=False, - ), - fa_gradmag_output=dict( - argstr="--fa_gradmag_output %s", - hash_files=False, - ), - fa_output=dict( - argstr="--fa_output %s", - hash_files=False, - ), - forward=dict( - argstr="--forward %s", - extensions=None, - ), + DTI_double=dict(argstr="--DTI_double "), + RD_output=dict(argstr="--RD_output %s", hash_files=False), + affineitk_file=dict(argstr="--affineitk_file %s", extensions=None), + args=dict(argstr="%s"), + color_fa_output=dict(argstr="--color_fa_output %s", hash_files=False), + correction=dict(argstr="--correction %s"), + deformation_output=dict(argstr="--deformation_output %s", hash_files=False), + dof_file=dict(argstr="--dof_file %s", extensions=None), + dti_image=dict(argstr="--dti_image %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + fa_gradient_output=dict(argstr="--fa_gradient_output %s", hash_files=False), + fa_gradmag_output=dict(argstr="--fa_gradmag_output %s", hash_files=False), + fa_output=dict(argstr="--fa_output %s", hash_files=False), + forward=dict(argstr="--forward %s", extensions=None), frobenius_norm_output=dict( - argstr="--frobenius_norm_output %s", - hash_files=False, - ), - hField=dict( - argstr="--hField ", - ), - interpolation=dict( - argstr="--interpolation %s", - ), - lambda1_output=dict( - argstr="--lambda1_output %s", - hash_files=False, - ), - lambda2_output=dict( - argstr="--lambda2_output %s", - hash_files=False, - ), - lambda3_output=dict( - argstr="--lambda3_output %s", - hash_files=False, - ), - mask=dict( - argstr="--mask %s", - extensions=None, - ), - md_output=dict( - argstr="--md_output %s", - hash_files=False, - ), + argstr="--frobenius_norm_output %s", hash_files=False + ), + hField=dict(argstr="--hField "), + interpolation=dict(argstr="--interpolation %s"), + lambda1_output=dict(argstr="--lambda1_output %s", hash_files=False), + lambda2_output=dict(argstr="--lambda2_output %s", hash_files=False), + lambda3_output=dict(argstr="--lambda3_output %s", hash_files=False), + mask=dict(argstr="--mask %s", extensions=None), + md_output=dict(argstr="--md_output %s", hash_files=False), negative_eigenvector_output=dict( - argstr="--negative_eigenvector_output %s", - hash_files=False, - ), - newdof_file=dict( - argstr="--newdof_file %s", - extensions=None, - ), - outmask=dict( - argstr="--outmask %s", - hash_files=False, + argstr="--negative_eigenvector_output %s", hash_files=False ), + newdof_file=dict(argstr="--newdof_file %s", extensions=None), + outmask=dict(argstr="--outmask %s", hash_files=False), principal_eigenvector_output=dict( - argstr="--principal_eigenvector_output %s", - hash_files=False, - ), - reorientation=dict( - argstr="--reorientation %s", - ), - rot_output=dict( - argstr="--rot_output %s", - hash_files=False, - ), - scalar_float=dict( - argstr="--scalar_float ", - ), - sigma=dict( - argstr="--sigma %f", - ), - verbose=dict( - argstr="--verbose ", + argstr="--principal_eigenvector_output %s", hash_files=False ), + reorientation=dict(argstr="--reorientation %s"), + rot_output=dict(argstr="--rot_output %s", hash_files=False), + scalar_float=dict(argstr="--scalar_float "), + sigma=dict(argstr="--sigma %f"), + verbose=dict(argstr="--verbose "), ) inputs = dtiprocess.input_spec() @@ -129,51 +51,21 @@ def test_dtiprocess_inputs(): def test_dtiprocess_outputs(): output_map = dict( - RD_output=dict( - extensions=None, - ), - color_fa_output=dict( - extensions=None, - ), - deformation_output=dict( - extensions=None, - ), - fa_gradient_output=dict( - extensions=None, - ), - fa_gradmag_output=dict( - extensions=None, - ), - fa_output=dict( - extensions=None, - ), - frobenius_norm_output=dict( - extensions=None, - ), - lambda1_output=dict( - extensions=None, - ), - lambda2_output=dict( - extensions=None, - ), - lambda3_output=dict( - extensions=None, - ), - md_output=dict( - extensions=None, - ), - negative_eigenvector_output=dict( - extensions=None, - ), - outmask=dict( - extensions=None, - ), - principal_eigenvector_output=dict( - extensions=None, - ), - rot_output=dict( - extensions=None, - ), + RD_output=dict(extensions=None), + color_fa_output=dict(extensions=None), + deformation_output=dict(extensions=None), + fa_gradient_output=dict(extensions=None), + fa_gradmag_output=dict(extensions=None), + fa_output=dict(extensions=None), + frobenius_norm_output=dict(extensions=None), + lambda1_output=dict(extensions=None), + lambda2_output=dict(extensions=None), + lambda3_output=dict(extensions=None), + md_output=dict(extensions=None), + negative_eigenvector_output=dict(extensions=None), + outmask=dict(extensions=None), + principal_eigenvector_output=dict(extensions=None), + rot_output=dict(extensions=None), ) outputs = dtiprocess.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py index aaa516e9dc..6856fbb8cd 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py @@ -4,30 +4,13 @@ def test_extractNrrdVectorIndex_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - setImageOrientation=dict( - argstr="--setImageOrientation %s", - ), - vectorIndex=dict( - argstr="--vectorIndex %d", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + setImageOrientation=dict(argstr="--setImageOrientation %s"), + vectorIndex=dict(argstr="--vectorIndex %d"), ) inputs = extractNrrdVectorIndex.input_spec() @@ -37,11 +20,7 @@ def test_extractNrrdVectorIndex_inputs(): def test_extractNrrdVectorIndex_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = extractNrrdVectorIndex.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py index da3e02c37b..3b6e262354 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py @@ -4,27 +4,12 @@ def test_gtractAnisotropyMap_inputs(): input_map = dict( - anisotropyType=dict( - argstr="--anisotropyType %s", - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputTensorVolume=dict( - argstr="--inputTensorVolume %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + anisotropyType=dict(argstr="--anisotropyType %s"), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = gtractAnisotropyMap.input_spec() @@ -34,11 +19,7 @@ def test_gtractAnisotropyMap_inputs(): def test_gtractAnisotropyMap_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = gtractAnisotropyMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py index a37b0e65ce..eb463c2a08 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py @@ -4,30 +4,13 @@ def test_gtractAverageBvalues_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - averageB0only=dict( - argstr="--averageB0only ", - ), - directionsTolerance=dict( - argstr="--directionsTolerance %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + averageB0only=dict(argstr="--averageB0only "), + directionsTolerance=dict(argstr="--directionsTolerance %f"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = gtractAverageBvalues.input_spec() @@ -37,11 +20,7 @@ def test_gtractAverageBvalues_inputs(): def test_gtractAverageBvalues_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = gtractAverageBvalues.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py index 3d6e24aee3..720573f0eb 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py @@ -4,30 +4,13 @@ def test_gtractClipAnisotropy_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - clipFirstSlice=dict( - argstr="--clipFirstSlice ", - ), - clipLastSlice=dict( - argstr="--clipLastSlice ", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + clipFirstSlice=dict(argstr="--clipFirstSlice "), + clipLastSlice=dict(argstr="--clipLastSlice "), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = gtractClipAnisotropy.input_spec() @@ -37,11 +20,7 @@ def test_gtractClipAnisotropy_inputs(): def test_gtractClipAnisotropy_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = gtractClipAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py index 1ab780c1b9..e82c6e3ada 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py @@ -4,90 +4,34 @@ def test_gtractCoRegAnatomy_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - borderSize=dict( - argstr="--borderSize %d", - ), - convergence=dict( - argstr="--convergence %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gradientTolerance=dict( - argstr="--gradientTolerance %f", - ), - gridSize=dict( - argstr="--gridSize %s", - sep=",", - ), + args=dict(argstr="%s"), + borderSize=dict(argstr="--borderSize %d"), + convergence=dict(argstr="--convergence %f"), + environ=dict(nohash=True, usedefault=True), + gradientTolerance=dict(argstr="--gradientTolerance %f"), + gridSize=dict(argstr="--gridSize %s", sep=","), inputAnatomicalVolume=dict( - argstr="--inputAnatomicalVolume %s", - extensions=None, - ), - inputRigidTransform=dict( - argstr="--inputRigidTransform %s", - extensions=None, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - maxBSplineDisplacement=dict( - argstr="--maxBSplineDisplacement %f", - ), - maximumStepSize=dict( - argstr="--maximumStepSize %f", - ), - minimumStepSize=dict( - argstr="--minimumStepSize %f", - ), - numberOfHistogramBins=dict( - argstr="--numberOfHistogramBins %d", - ), - numberOfIterations=dict( - argstr="--numberOfIterations %d", - ), - numberOfSamples=dict( - argstr="--numberOfSamples %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputTransformName=dict( - argstr="--outputTransformName %s", - hash_files=False, - ), - relaxationFactor=dict( - argstr="--relaxationFactor %f", - ), - samplingPercentage=dict( - argstr="--samplingPercentage %f", - ), - spatialScale=dict( - argstr="--spatialScale %d", - ), - transformType=dict( - argstr="--transformType %s", - ), - translationScale=dict( - argstr="--translationScale %f", - ), - useCenterOfHeadAlign=dict( - argstr="--useCenterOfHeadAlign ", - ), - useGeometryAlign=dict( - argstr="--useGeometryAlign ", - ), - useMomentsAlign=dict( - argstr="--useMomentsAlign ", - ), - vectorIndex=dict( - argstr="--vectorIndex %d", - ), + argstr="--inputAnatomicalVolume %s", extensions=None + ), + inputRigidTransform=dict(argstr="--inputRigidTransform %s", extensions=None), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + maxBSplineDisplacement=dict(argstr="--maxBSplineDisplacement %f"), + maximumStepSize=dict(argstr="--maximumStepSize %f"), + minimumStepSize=dict(argstr="--minimumStepSize %f"), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), + numberOfIterations=dict(argstr="--numberOfIterations %d"), + numberOfSamples=dict(argstr="--numberOfSamples %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputTransformName=dict(argstr="--outputTransformName %s", hash_files=False), + relaxationFactor=dict(argstr="--relaxationFactor %f"), + samplingPercentage=dict(argstr="--samplingPercentage %f"), + spatialScale=dict(argstr="--spatialScale %d"), + transformType=dict(argstr="--transformType %s"), + translationScale=dict(argstr="--translationScale %f"), + useCenterOfHeadAlign=dict(argstr="--useCenterOfHeadAlign "), + useGeometryAlign=dict(argstr="--useGeometryAlign "), + useMomentsAlign=dict(argstr="--useMomentsAlign "), + vectorIndex=dict(argstr="--vectorIndex %d"), ) inputs = gtractCoRegAnatomy.input_spec() @@ -97,11 +41,7 @@ def test_gtractCoRegAnatomy_inputs(): def test_gtractCoRegAnatomy_outputs(): - output_map = dict( - outputTransformName=dict( - extensions=None, - ), - ) + output_map = dict(outputTransformName=dict(extensions=None)) outputs = gtractCoRegAnatomy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py index d2a6ca3288..73815728a8 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py @@ -4,26 +4,12 @@ def test_gtractConcatDwi_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ignoreOrigins=dict( - argstr="--ignoreOrigins ", - ), - inputVolume=dict( - argstr="--inputVolume %s...", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + ignoreOrigins=dict(argstr="--ignoreOrigins "), + inputVolume=dict(argstr="--inputVolume %s..."), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = gtractConcatDwi.input_spec() @@ -33,11 +19,7 @@ def test_gtractConcatDwi_inputs(): def test_gtractConcatDwi_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = gtractConcatDwi.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py index ba03837015..0f29823049 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py @@ -4,28 +4,12 @@ def test_gtractCopyImageOrientation_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputReferenceVolume=dict( - argstr="--inputReferenceVolume %s", - extensions=None, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = gtractCopyImageOrientation.input_spec() @@ -35,11 +19,7 @@ def test_gtractCopyImageOrientation_inputs(): def test_gtractCopyImageOrientation_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = gtractCopyImageOrientation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py index 0122bf7636..0dab7966ce 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py @@ -4,65 +4,24 @@ def test_gtractCoregBvalues_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debugLevel=dict( - argstr="--debugLevel %d", - ), - eddyCurrentCorrection=dict( - argstr="--eddyCurrentCorrection ", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedVolume=dict( - argstr="--fixedVolume %s", - extensions=None, - ), - fixedVolumeIndex=dict( - argstr="--fixedVolumeIndex %d", - ), - maximumStepSize=dict( - argstr="--maximumStepSize %f", - ), - minimumStepSize=dict( - argstr="--minimumStepSize %f", - ), - movingVolume=dict( - argstr="--movingVolume %s", - extensions=None, - ), - numberOfIterations=dict( - argstr="--numberOfIterations %d", - ), - numberOfSpatialSamples=dict( - argstr="--numberOfSpatialSamples %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputTransform=dict( - argstr="--outputTransform %s", - hash_files=False, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - registerB0Only=dict( - argstr="--registerB0Only ", - ), - relaxationFactor=dict( - argstr="--relaxationFactor %f", - ), - samplingPercentage=dict( - argstr="--samplingPercentage %f", - ), - spatialScale=dict( - argstr="--spatialScale %f", - ), + args=dict(argstr="%s"), + debugLevel=dict(argstr="--debugLevel %d"), + eddyCurrentCorrection=dict(argstr="--eddyCurrentCorrection "), + environ=dict(nohash=True, usedefault=True), + fixedVolume=dict(argstr="--fixedVolume %s", extensions=None), + fixedVolumeIndex=dict(argstr="--fixedVolumeIndex %d"), + maximumStepSize=dict(argstr="--maximumStepSize %f"), + minimumStepSize=dict(argstr="--minimumStepSize %f"), + movingVolume=dict(argstr="--movingVolume %s", extensions=None), + numberOfIterations=dict(argstr="--numberOfIterations %d"), + numberOfSpatialSamples=dict(argstr="--numberOfSpatialSamples %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + registerB0Only=dict(argstr="--registerB0Only "), + relaxationFactor=dict(argstr="--relaxationFactor %f"), + samplingPercentage=dict(argstr="--samplingPercentage %f"), + spatialScale=dict(argstr="--spatialScale %f"), ) inputs = gtractCoregBvalues.input_spec() @@ -73,12 +32,7 @@ def test_gtractCoregBvalues_inputs(): def test_gtractCoregBvalues_outputs(): output_map = dict( - outputTransform=dict( - extensions=None, - ), - outputVolume=dict( - extensions=None, - ), + outputTransform=dict(extensions=None), outputVolume=dict(extensions=None) ) outputs = gtractCoregBvalues.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py index 7d086cd7c0..7353cee2c3 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py @@ -4,48 +4,22 @@ def test_gtractCostFastMarching_inputs(): input_map = dict( - anisotropyWeight=dict( - argstr="--anisotropyWeight %f", - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + anisotropyWeight=dict(argstr="--anisotropyWeight %f"), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), inputAnisotropyVolume=dict( - argstr="--inputAnisotropyVolume %s", - extensions=None, + argstr="--inputAnisotropyVolume %s", extensions=None ), inputStartingSeedsLabelMapVolume=dict( - argstr="--inputStartingSeedsLabelMapVolume %s", - extensions=None, - ), - inputTensorVolume=dict( - argstr="--inputTensorVolume %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputCostVolume=dict( - argstr="--outputCostVolume %s", - hash_files=False, - ), - outputSpeedVolume=dict( - argstr="--outputSpeedVolume %s", - hash_files=False, - ), - seedThreshold=dict( - argstr="--seedThreshold %f", - ), - startingSeedsLabel=dict( - argstr="--startingSeedsLabel %d", - ), - stoppingValue=dict( - argstr="--stoppingValue %f", - ), + argstr="--inputStartingSeedsLabelMapVolume %s", extensions=None + ), + inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputCostVolume=dict(argstr="--outputCostVolume %s", hash_files=False), + outputSpeedVolume=dict(argstr="--outputSpeedVolume %s", hash_files=False), + seedThreshold=dict(argstr="--seedThreshold %f"), + startingSeedsLabel=dict(argstr="--startingSeedsLabel %d"), + stoppingValue=dict(argstr="--stoppingValue %f"), ) inputs = gtractCostFastMarching.input_spec() @@ -56,12 +30,7 @@ def test_gtractCostFastMarching_inputs(): def test_gtractCostFastMarching_outputs(): output_map = dict( - outputCostVolume=dict( - extensions=None, - ), - outputSpeedVolume=dict( - extensions=None, - ), + outputCostVolume=dict(extensions=None), outputSpeedVolume=dict(extensions=None) ) outputs = gtractCostFastMarching.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py index 1990cc2057..5d0b75f7e6 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py @@ -4,30 +4,13 @@ def test_gtractCreateGuideFiber_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputFiber=dict( - argstr="--inputFiber %s", - extensions=None, - ), - numberOfPoints=dict( - argstr="--numberOfPoints %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputFiber=dict( - argstr="--outputFiber %s", - hash_files=False, - ), - writeXMLPolyDataFile=dict( - argstr="--writeXMLPolyDataFile ", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputFiber=dict(argstr="--inputFiber %s", extensions=None), + numberOfPoints=dict(argstr="--numberOfPoints %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputFiber=dict(argstr="--outputFiber %s", hash_files=False), + writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile "), ) inputs = gtractCreateGuideFiber.input_spec() @@ -37,11 +20,7 @@ def test_gtractCreateGuideFiber_inputs(): def test_gtractCreateGuideFiber_outputs(): - output_map = dict( - outputFiber=dict( - extensions=None, - ), - ) + output_map = dict(outputFiber=dict(extensions=None)) outputs = gtractCreateGuideFiber.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py index 4059d45f6a..202cb0f86d 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py @@ -4,60 +4,26 @@ def test_gtractFastMarchingTracking_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - costStepSize=dict( - argstr="--costStepSize %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + costStepSize=dict(argstr="--costStepSize %f"), + environ=dict(nohash=True, usedefault=True), inputAnisotropyVolume=dict( - argstr="--inputAnisotropyVolume %s", - extensions=None, - ), - inputCostVolume=dict( - argstr="--inputCostVolume %s", - extensions=None, + argstr="--inputAnisotropyVolume %s", extensions=None ), + inputCostVolume=dict(argstr="--inputCostVolume %s", extensions=None), inputStartingSeedsLabelMapVolume=dict( - argstr="--inputStartingSeedsLabelMapVolume %s", - extensions=None, - ), - inputTensorVolume=dict( - argstr="--inputTensorVolume %s", - extensions=None, - ), - maximumStepSize=dict( - argstr="--maximumStepSize %f", - ), - minimumStepSize=dict( - argstr="--minimumStepSize %f", - ), - numberOfIterations=dict( - argstr="--numberOfIterations %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputTract=dict( - argstr="--outputTract %s", - hash_files=False, - ), - seedThreshold=dict( - argstr="--seedThreshold %f", - ), - startingSeedsLabel=dict( - argstr="--startingSeedsLabel %d", - ), - trackingThreshold=dict( - argstr="--trackingThreshold %f", - ), - writeXMLPolyDataFile=dict( - argstr="--writeXMLPolyDataFile ", - ), + argstr="--inputStartingSeedsLabelMapVolume %s", extensions=None + ), + inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None), + maximumStepSize=dict(argstr="--maximumStepSize %f"), + minimumStepSize=dict(argstr="--minimumStepSize %f"), + numberOfIterations=dict(argstr="--numberOfIterations %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputTract=dict(argstr="--outputTract %s", hash_files=False), + seedThreshold=dict(argstr="--seedThreshold %f"), + startingSeedsLabel=dict(argstr="--startingSeedsLabel %d"), + trackingThreshold=dict(argstr="--trackingThreshold %f"), + writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile "), ) inputs = gtractFastMarchingTracking.input_spec() @@ -67,11 +33,7 @@ def test_gtractFastMarchingTracking_inputs(): def test_gtractFastMarchingTracking_outputs(): - output_map = dict( - outputTract=dict( - extensions=None, - ), - ) + output_map = dict(outputTract=dict(extensions=None)) outputs = gtractFastMarchingTracking.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py index 9837774d3e..4e3846fb7e 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py @@ -4,103 +4,42 @@ def test_gtractFiberTracking_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - branchingAngle=dict( - argstr="--branchingAngle %f", - ), - branchingThreshold=dict( - argstr="--branchingThreshold %f", - ), - curvatureThreshold=dict( - argstr="--curvatureThreshold %f", - ), - endingSeedsLabel=dict( - argstr="--endingSeedsLabel %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - guidedCurvatureThreshold=dict( - argstr="--guidedCurvatureThreshold %f", - ), + args=dict(argstr="%s"), + branchingAngle=dict(argstr="--branchingAngle %f"), + branchingThreshold=dict(argstr="--branchingThreshold %f"), + curvatureThreshold=dict(argstr="--curvatureThreshold %f"), + endingSeedsLabel=dict(argstr="--endingSeedsLabel %d"), + environ=dict(nohash=True, usedefault=True), + guidedCurvatureThreshold=dict(argstr="--guidedCurvatureThreshold %f"), inputAnisotropyVolume=dict( - argstr="--inputAnisotropyVolume %s", - extensions=None, + argstr="--inputAnisotropyVolume %s", extensions=None ), inputEndingSeedsLabelMapVolume=dict( - argstr="--inputEndingSeedsLabelMapVolume %s", - extensions=None, + argstr="--inputEndingSeedsLabelMapVolume %s", extensions=None ), inputStartingSeedsLabelMapVolume=dict( - argstr="--inputStartingSeedsLabelMapVolume %s", - extensions=None, - ), - inputTensorVolume=dict( - argstr="--inputTensorVolume %s", - extensions=None, - ), - inputTract=dict( - argstr="--inputTract %s", - extensions=None, - ), - maximumBranchPoints=dict( - argstr="--maximumBranchPoints %d", - ), - maximumGuideDistance=dict( - argstr="--maximumGuideDistance %f", - ), - maximumLength=dict( - argstr="--maximumLength %f", - ), - minimumLength=dict( - argstr="--minimumLength %f", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputTract=dict( - argstr="--outputTract %s", - hash_files=False, - ), - randomSeed=dict( - argstr="--randomSeed %d", - ), - seedThreshold=dict( - argstr="--seedThreshold %f", - ), - startingSeedsLabel=dict( - argstr="--startingSeedsLabel %d", - ), - stepSize=dict( - argstr="--stepSize %f", - ), - tendF=dict( - argstr="--tendF %f", - ), - tendG=dict( - argstr="--tendG %f", - ), - trackingMethod=dict( - argstr="--trackingMethod %s", - ), - trackingThreshold=dict( - argstr="--trackingThreshold %f", - ), - useLoopDetection=dict( - argstr="--useLoopDetection ", - ), - useRandomWalk=dict( - argstr="--useRandomWalk ", - ), - useTend=dict( - argstr="--useTend ", - ), - writeXMLPolyDataFile=dict( - argstr="--writeXMLPolyDataFile ", - ), + argstr="--inputStartingSeedsLabelMapVolume %s", extensions=None + ), + inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None), + inputTract=dict(argstr="--inputTract %s", extensions=None), + maximumBranchPoints=dict(argstr="--maximumBranchPoints %d"), + maximumGuideDistance=dict(argstr="--maximumGuideDistance %f"), + maximumLength=dict(argstr="--maximumLength %f"), + minimumLength=dict(argstr="--minimumLength %f"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputTract=dict(argstr="--outputTract %s", hash_files=False), + randomSeed=dict(argstr="--randomSeed %d"), + seedThreshold=dict(argstr="--seedThreshold %f"), + startingSeedsLabel=dict(argstr="--startingSeedsLabel %d"), + stepSize=dict(argstr="--stepSize %f"), + tendF=dict(argstr="--tendF %f"), + tendG=dict(argstr="--tendG %f"), + trackingMethod=dict(argstr="--trackingMethod %s"), + trackingThreshold=dict(argstr="--trackingThreshold %f"), + useLoopDetection=dict(argstr="--useLoopDetection "), + useRandomWalk=dict(argstr="--useRandomWalk "), + useTend=dict(argstr="--useTend "), + writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile "), ) inputs = gtractFiberTracking.input_spec() @@ -110,11 +49,7 @@ def test_gtractFiberTracking_inputs(): def test_gtractFiberTracking_outputs(): - output_map = dict( - outputTract=dict( - extensions=None, - ), - ) + output_map = dict(outputTract=dict(extensions=None)) outputs = gtractFiberTracking.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py index 64b896e0ca..57eadb5d08 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py @@ -4,28 +4,12 @@ def test_gtractImageConformity_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputReferenceVolume=dict( - argstr="--inputReferenceVolume %s", - extensions=None, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = gtractImageConformity.input_spec() @@ -35,11 +19,7 @@ def test_gtractImageConformity_inputs(): def test_gtractImageConformity_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = gtractImageConformity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py index ed43c90dc6..9271a04262 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py @@ -4,32 +4,13 @@ def test_gtractInvertBSplineTransform_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputReferenceVolume=dict( - argstr="--inputReferenceVolume %s", - extensions=None, - ), - inputTransform=dict( - argstr="--inputTransform %s", - extensions=None, - ), - landmarkDensity=dict( - argstr="--landmarkDensity %s", - sep=",", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputTransform=dict( - argstr="--outputTransform %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None), + inputTransform=dict(argstr="--inputTransform %s", extensions=None), + landmarkDensity=dict(argstr="--landmarkDensity %s", sep=","), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False), ) inputs = gtractInvertBSplineTransform.input_spec() @@ -39,11 +20,7 @@ def test_gtractInvertBSplineTransform_inputs(): def test_gtractInvertBSplineTransform_outputs(): - output_map = dict( - outputTransform=dict( - extensions=None, - ), - ) + output_map = dict(outputTransform=dict(extensions=None)) outputs = gtractInvertBSplineTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py index 83129902aa..033a325642 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py @@ -4,31 +4,13 @@ def test_gtractInvertDisplacementField_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - baseImage=dict( - argstr="--baseImage %s", - extensions=None, - ), - deformationImage=dict( - argstr="--deformationImage %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - subsamplingFactor=dict( - argstr="--subsamplingFactor %d", - ), + args=dict(argstr="%s"), + baseImage=dict(argstr="--baseImage %s", extensions=None), + deformationImage=dict(argstr="--deformationImage %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + subsamplingFactor=dict(argstr="--subsamplingFactor %d"), ) inputs = gtractInvertDisplacementField.input_spec() @@ -38,11 +20,7 @@ def test_gtractInvertDisplacementField_inputs(): def test_gtractInvertDisplacementField_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = gtractInvertDisplacementField.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py index 73ba9c576f..e6030498f3 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py @@ -4,24 +4,11 @@ def test_gtractInvertRigidTransform_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputTransform=dict( - argstr="--inputTransform %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputTransform=dict( - argstr="--outputTransform %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputTransform=dict(argstr="--inputTransform %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False), ) inputs = gtractInvertRigidTransform.input_spec() @@ -31,11 +18,7 @@ def test_gtractInvertRigidTransform_inputs(): def test_gtractInvertRigidTransform_outputs(): - output_map = dict( - outputTransform=dict( - extensions=None, - ), - ) + output_map = dict(outputTransform=dict(extensions=None)) outputs = gtractInvertRigidTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py index 7b38abe0b5..14477f1b61 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py @@ -4,35 +4,18 @@ def test_gtractResampleAnisotropy_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), inputAnatomicalVolume=dict( - argstr="--inputAnatomicalVolume %s", - extensions=None, + argstr="--inputAnatomicalVolume %s", extensions=None ), inputAnisotropyVolume=dict( - argstr="--inputAnisotropyVolume %s", - extensions=None, - ), - inputTransform=dict( - argstr="--inputTransform %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - transformType=dict( - argstr="--transformType %s", + argstr="--inputAnisotropyVolume %s", extensions=None ), + inputTransform=dict(argstr="--inputTransform %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + transformType=dict(argstr="--transformType %s"), ) inputs = gtractResampleAnisotropy.input_spec() @@ -42,11 +25,7 @@ def test_gtractResampleAnisotropy_inputs(): def test_gtractResampleAnisotropy_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = gtractResampleAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py index 7271e8a42a..c0b27b8ad9 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py @@ -4,38 +4,17 @@ def test_gtractResampleB0_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), inputAnatomicalVolume=dict( - argstr="--inputAnatomicalVolume %s", - extensions=None, - ), - inputTransform=dict( - argstr="--inputTransform %s", - extensions=None, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - transformType=dict( - argstr="--transformType %s", - ), - vectorIndex=dict( - argstr="--vectorIndex %d", - ), + argstr="--inputAnatomicalVolume %s", extensions=None + ), + inputTransform=dict(argstr="--inputTransform %s", extensions=None), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + transformType=dict(argstr="--transformType %s"), + vectorIndex=dict(argstr="--vectorIndex %d"), ) inputs = gtractResampleB0.input_spec() @@ -45,11 +24,7 @@ def test_gtractResampleB0_inputs(): def test_gtractResampleB0_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = gtractResampleB0.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py index 6649ecfc1f..8ec22d7e7d 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py @@ -4,35 +4,14 @@ def test_gtractResampleCodeImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputCodeVolume=dict( - argstr="--inputCodeVolume %s", - extensions=None, - ), - inputReferenceVolume=dict( - argstr="--inputReferenceVolume %s", - extensions=None, - ), - inputTransform=dict( - argstr="--inputTransform %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - transformType=dict( - argstr="--transformType %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputCodeVolume=dict(argstr="--inputCodeVolume %s", extensions=None), + inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None), + inputTransform=dict(argstr="--inputTransform %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + transformType=dict(argstr="--transformType %s"), ) inputs = gtractResampleCodeImage.input_spec() @@ -42,11 +21,7 @@ def test_gtractResampleCodeImage_inputs(): def test_gtractResampleCodeImage_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = gtractResampleCodeImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py index 3b61312e54..827433588e 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py @@ -4,47 +4,17 @@ def test_gtractResampleDWIInPlace_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debugLevel=dict( - argstr="--debugLevel %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - imageOutputSize=dict( - argstr="--imageOutputSize %s", - sep=",", - ), - inputTransform=dict( - argstr="--inputTransform %s", - extensions=None, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputResampledB0=dict( - argstr="--outputResampledB0 %s", - hash_files=False, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - referenceVolume=dict( - argstr="--referenceVolume %s", - extensions=None, - ), - warpDWITransform=dict( - argstr="--warpDWITransform %s", - extensions=None, - ), + args=dict(argstr="%s"), + debugLevel=dict(argstr="--debugLevel %d"), + environ=dict(nohash=True, usedefault=True), + imageOutputSize=dict(argstr="--imageOutputSize %s", sep=","), + inputTransform=dict(argstr="--inputTransform %s", extensions=None), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputResampledB0=dict(argstr="--outputResampledB0 %s", hash_files=False), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + referenceVolume=dict(argstr="--referenceVolume %s", extensions=None), + warpDWITransform=dict(argstr="--warpDWITransform %s", extensions=None), ) inputs = gtractResampleDWIInPlace.input_spec() @@ -55,12 +25,7 @@ def test_gtractResampleDWIInPlace_inputs(): def test_gtractResampleDWIInPlace_outputs(): output_map = dict( - outputResampledB0=dict( - extensions=None, - ), - outputVolume=dict( - extensions=None, - ), + outputResampledB0=dict(extensions=None), outputVolume=dict(extensions=None) ) outputs = gtractResampleDWIInPlace.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py index d64d2d8581..2c342945c8 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py @@ -4,35 +4,18 @@ def test_gtractResampleFibers_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), inputForwardDeformationFieldVolume=dict( - argstr="--inputForwardDeformationFieldVolume %s", - extensions=None, + argstr="--inputForwardDeformationFieldVolume %s", extensions=None ), inputReverseDeformationFieldVolume=dict( - argstr="--inputReverseDeformationFieldVolume %s", - extensions=None, - ), - inputTract=dict( - argstr="--inputTract %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputTract=dict( - argstr="--outputTract %s", - hash_files=False, - ), - writeXMLPolyDataFile=dict( - argstr="--writeXMLPolyDataFile ", + argstr="--inputReverseDeformationFieldVolume %s", extensions=None ), + inputTract=dict(argstr="--inputTract %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputTract=dict(argstr="--outputTract %s", hash_files=False), + writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile "), ) inputs = gtractResampleFibers.input_spec() @@ -42,11 +25,7 @@ def test_gtractResampleFibers_inputs(): def test_gtractResampleFibers_outputs(): - output_map = dict( - outputTract=dict( - extensions=None, - ), - ) + output_map = dict(outputTract=dict(extensions=None)) outputs = gtractResampleFibers.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py index eabe7c6f50..d21d4a11a6 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py @@ -4,54 +4,22 @@ def test_gtractTensor_inputs(): input_map = dict( - applyMeasurementFrame=dict( - argstr="--applyMeasurementFrame ", - ), - args=dict( - argstr="%s", - ), - b0Index=dict( - argstr="--b0Index %d", - ), + applyMeasurementFrame=dict(argstr="--applyMeasurementFrame "), + args=dict(argstr="%s"), + b0Index=dict(argstr="--b0Index %d"), backgroundSuppressingThreshold=dict( - argstr="--backgroundSuppressingThreshold %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ignoreIndex=dict( - argstr="--ignoreIndex %s", - sep=",", - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - maskProcessingMode=dict( - argstr="--maskProcessingMode %s", - ), - maskVolume=dict( - argstr="--maskVolume %s", - extensions=None, - ), - medianFilterSize=dict( - argstr="--medianFilterSize %s", - sep=",", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - resampleIsotropic=dict( - argstr="--resampleIsotropic ", - ), - size=dict( - argstr="--size %f", - ), + argstr="--backgroundSuppressingThreshold %d" + ), + environ=dict(nohash=True, usedefault=True), + ignoreIndex=dict(argstr="--ignoreIndex %s", sep=","), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + maskProcessingMode=dict(argstr="--maskProcessingMode %s"), + maskVolume=dict(argstr="--maskVolume %s", extensions=None), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + resampleIsotropic=dict(argstr="--resampleIsotropic "), + size=dict(argstr="--size %f"), ) inputs = gtractTensor.input_spec() @@ -61,11 +29,7 @@ def test_gtractTensor_inputs(): def test_gtractTensor_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = gtractTensor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py index 64daec32fb..234fbb38e8 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py @@ -4,27 +4,13 @@ def test_gtractTransformToDisplacementField_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputReferenceVolume=dict( - argstr="--inputReferenceVolume %s", - extensions=None, - ), - inputTransform=dict( - argstr="--inputTransform %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None), + inputTransform=dict(argstr="--inputTransform %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), outputDeformationFieldVolume=dict( - argstr="--outputDeformationFieldVolume %s", - hash_files=False, + argstr="--outputDeformationFieldVolume %s", hash_files=False ), ) inputs = gtractTransformToDisplacementField.input_spec() @@ -35,11 +21,7 @@ def test_gtractTransformToDisplacementField_inputs(): def test_gtractTransformToDisplacementField_outputs(): - output_map = dict( - outputDeformationFieldVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputDeformationFieldVolume=dict(extensions=None)) outputs = gtractTransformToDisplacementField.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py index 6638ef34cd..b37f51e28c 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py @@ -4,27 +4,12 @@ def test_maxcurvature_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - image=dict( - argstr="--image %s", - extensions=None, - ), - output=dict( - argstr="--output %s", - hash_files=False, - ), - sigma=dict( - argstr="--sigma %f", - ), - verbose=dict( - argstr="--verbose ", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + image=dict(argstr="--image %s", extensions=None), + output=dict(argstr="--output %s", hash_files=False), + sigma=dict(argstr="--sigma %f"), + verbose=dict(argstr="--verbose "), ) inputs = maxcurvature.input_spec() @@ -34,11 +19,7 @@ def test_maxcurvature_inputs(): def test_maxcurvature_outputs(): - output_map = dict( - output=dict( - extensions=None, - ), - ) + output_map = dict(output=dict(extensions=None)) outputs = maxcurvature.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py index 92050c6e43..62e18e16d0 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py @@ -4,118 +4,43 @@ def test_UKFTractography_inputs(): input_map = dict( - Ql=dict( - argstr="--Ql %f", - ), - Qm=dict( - argstr="--Qm %f", - ), - Qw=dict( - argstr="--Qw %f", - ), - Rs=dict( - argstr="--Rs %f", - ), - args=dict( - argstr="%s", - ), - dwiFile=dict( - argstr="--dwiFile %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - freeWater=dict( - argstr="--freeWater ", - ), - fullTensorModel=dict( - argstr="--fullTensorModel ", - ), - labels=dict( - argstr="--labels %s", - sep=",", - ), - maskFile=dict( - argstr="--maskFile %s", - extensions=None, - ), - maxBranchingAngle=dict( - argstr="--maxBranchingAngle %f", - ), - maxHalfFiberLength=dict( - argstr="--maxHalfFiberLength %f", - ), - minBranchingAngle=dict( - argstr="--minBranchingAngle %f", - ), - minFA=dict( - argstr="--minFA %f", - ), - minGA=dict( - argstr="--minGA %f", - ), - numTensor=dict( - argstr="--numTensor %s", - ), - numThreads=dict( - argstr="--numThreads %d", - ), - recordCovariance=dict( - argstr="--recordCovariance ", - ), - recordFA=dict( - argstr="--recordFA ", - ), - recordFreeWater=dict( - argstr="--recordFreeWater ", - ), - recordLength=dict( - argstr="--recordLength %f", - ), - recordNMSE=dict( - argstr="--recordNMSE ", - ), - recordState=dict( - argstr="--recordState ", - ), - recordTensors=dict( - argstr="--recordTensors ", - ), - recordTrace=dict( - argstr="--recordTrace ", - ), - seedFALimit=dict( - argstr="--seedFALimit %f", - ), - seedsFile=dict( - argstr="--seedsFile %s", - extensions=None, - ), - seedsPerVoxel=dict( - argstr="--seedsPerVoxel %d", - ), - stepLength=dict( - argstr="--stepLength %f", - ), - storeGlyphs=dict( - argstr="--storeGlyphs ", - ), - tracts=dict( - argstr="--tracts %s", - hash_files=False, - ), + Ql=dict(argstr="--Ql %f"), + Qm=dict(argstr="--Qm %f"), + Qw=dict(argstr="--Qw %f"), + Rs=dict(argstr="--Rs %f"), + args=dict(argstr="%s"), + dwiFile=dict(argstr="--dwiFile %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + freeWater=dict(argstr="--freeWater "), + fullTensorModel=dict(argstr="--fullTensorModel "), + labels=dict(argstr="--labels %s", sep=","), + maskFile=dict(argstr="--maskFile %s", extensions=None), + maxBranchingAngle=dict(argstr="--maxBranchingAngle %f"), + maxHalfFiberLength=dict(argstr="--maxHalfFiberLength %f"), + minBranchingAngle=dict(argstr="--minBranchingAngle %f"), + minFA=dict(argstr="--minFA %f"), + minGA=dict(argstr="--minGA %f"), + numTensor=dict(argstr="--numTensor %s"), + numThreads=dict(argstr="--numThreads %d"), + recordCovariance=dict(argstr="--recordCovariance "), + recordFA=dict(argstr="--recordFA "), + recordFreeWater=dict(argstr="--recordFreeWater "), + recordLength=dict(argstr="--recordLength %f"), + recordNMSE=dict(argstr="--recordNMSE "), + recordState=dict(argstr="--recordState "), + recordTensors=dict(argstr="--recordTensors "), + recordTrace=dict(argstr="--recordTrace "), + seedFALimit=dict(argstr="--seedFALimit %f"), + seedsFile=dict(argstr="--seedsFile %s", extensions=None), + seedsPerVoxel=dict(argstr="--seedsPerVoxel %d"), + stepLength=dict(argstr="--stepLength %f"), + storeGlyphs=dict(argstr="--storeGlyphs "), + tracts=dict(argstr="--tracts %s", hash_files=False), tractsWithSecondTensor=dict( - argstr="--tractsWithSecondTensor %s", - hash_files=False, - ), - writeAsciiTracts=dict( - argstr="--writeAsciiTracts ", - ), - writeUncompressedTracts=dict( - argstr="--writeUncompressedTracts ", + argstr="--tractsWithSecondTensor %s", hash_files=False ), + writeAsciiTracts=dict(argstr="--writeAsciiTracts "), + writeUncompressedTracts=dict(argstr="--writeUncompressedTracts "), ) inputs = UKFTractography.input_spec() @@ -126,12 +51,7 @@ def test_UKFTractography_inputs(): def test_UKFTractography_outputs(): output_map = dict( - tracts=dict( - extensions=None, - ), - tractsWithSecondTensor=dict( - extensions=None, - ), + tracts=dict(extensions=None), tractsWithSecondTensor=dict(extensions=None) ) outputs = UKFTractography.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py index 506d3f8f90..63fcdccc71 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py @@ -4,61 +4,22 @@ def test_fiberprocess_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - displacement_field=dict( - argstr="--displacement_field %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fiber_file=dict( - argstr="--fiber_file %s", - extensions=None, - ), - fiber_output=dict( - argstr="--fiber_output %s", - hash_files=False, - ), - fiber_radius=dict( - argstr="--fiber_radius %f", - ), - h_field=dict( - argstr="--h_field %s", - extensions=None, - ), - index_space=dict( - argstr="--index_space ", - ), - noDataChange=dict( - argstr="--noDataChange ", - ), - no_warp=dict( - argstr="--no_warp ", - ), - saveProperties=dict( - argstr="--saveProperties ", - ), - tensor_volume=dict( - argstr="--tensor_volume %s", - extensions=None, - ), - verbose=dict( - argstr="--verbose ", - ), - voxel_label=dict( - argstr="--voxel_label %d", - ), - voxelize=dict( - argstr="--voxelize %s", - hash_files=False, - ), - voxelize_count_fibers=dict( - argstr="--voxelize_count_fibers ", - ), + args=dict(argstr="%s"), + displacement_field=dict(argstr="--displacement_field %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + fiber_file=dict(argstr="--fiber_file %s", extensions=None), + fiber_output=dict(argstr="--fiber_output %s", hash_files=False), + fiber_radius=dict(argstr="--fiber_radius %f"), + h_field=dict(argstr="--h_field %s", extensions=None), + index_space=dict(argstr="--index_space "), + noDataChange=dict(argstr="--noDataChange "), + no_warp=dict(argstr="--no_warp "), + saveProperties=dict(argstr="--saveProperties "), + tensor_volume=dict(argstr="--tensor_volume %s", extensions=None), + verbose=dict(argstr="--verbose "), + voxel_label=dict(argstr="--voxel_label %d"), + voxelize=dict(argstr="--voxelize %s", hash_files=False), + voxelize_count_fibers=dict(argstr="--voxelize_count_fibers "), ) inputs = fiberprocess.input_spec() @@ -69,12 +30,7 @@ def test_fiberprocess_inputs(): def test_fiberprocess_outputs(): output_map = dict( - fiber_output=dict( - extensions=None, - ), - voxelize=dict( - extensions=None, - ), + fiber_output=dict(extensions=None), voxelize=dict(extensions=None) ) outputs = fiberprocess.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py index a9df738d28..7ceb5d7579 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py @@ -4,20 +4,10 @@ def test_fiberstats_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fiber_file=dict( - argstr="--fiber_file %s", - extensions=None, - ), - verbose=dict( - argstr="--verbose ", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fiber_file=dict(argstr="--fiber_file %s", extensions=None), + verbose=dict(argstr="--verbose "), ) inputs = fiberstats.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py index 28798b14ff..2a34252eb3 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py @@ -4,55 +4,21 @@ def test_fibertrack_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - forbidden_label=dict( - argstr="--forbidden_label %d", - ), - force=dict( - argstr="--force ", - ), - input_roi_file=dict( - argstr="--input_roi_file %s", - extensions=None, - ), - input_tensor_file=dict( - argstr="--input_tensor_file %s", - extensions=None, - ), - max_angle=dict( - argstr="--max_angle %f", - ), - min_fa=dict( - argstr="--min_fa %f", - ), - output_fiber_file=dict( - argstr="--output_fiber_file %s", - hash_files=False, - ), - really_verbose=dict( - argstr="--really_verbose ", - ), - source_label=dict( - argstr="--source_label %d", - ), - step_size=dict( - argstr="--step_size %f", - ), - target_label=dict( - argstr="--target_label %d", - ), - verbose=dict( - argstr="--verbose ", - ), - whole_brain=dict( - argstr="--whole_brain ", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + forbidden_label=dict(argstr="--forbidden_label %d"), + force=dict(argstr="--force "), + input_roi_file=dict(argstr="--input_roi_file %s", extensions=None), + input_tensor_file=dict(argstr="--input_tensor_file %s", extensions=None), + max_angle=dict(argstr="--max_angle %f"), + min_fa=dict(argstr="--min_fa %f"), + output_fiber_file=dict(argstr="--output_fiber_file %s", hash_files=False), + really_verbose=dict(argstr="--really_verbose "), + source_label=dict(argstr="--source_label %d"), + step_size=dict(argstr="--step_size %f"), + target_label=dict(argstr="--target_label %d"), + verbose=dict(argstr="--verbose "), + whole_brain=dict(argstr="--whole_brain "), ) inputs = fibertrack.input_spec() @@ -62,11 +28,7 @@ def test_fibertrack_inputs(): def test_fibertrack_outputs(): - output_map = dict( - output_fiber_file=dict( - extensions=None, - ), - ) + output_map = dict(output_fiber_file=dict(extensions=None)) outputs = fibertrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py index 222c460279..7bee4b1cbb 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py @@ -4,30 +4,13 @@ def test_CannyEdge_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - lowerThreshold=dict( - argstr="--lowerThreshold %f", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - upperThreshold=dict( - argstr="--upperThreshold %f", - ), - variance=dict( - argstr="--variance %f", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + lowerThreshold=dict(argstr="--lowerThreshold %f"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + upperThreshold=dict(argstr="--upperThreshold %f"), + variance=dict(argstr="--variance %f"), ) inputs = CannyEdge.input_spec() @@ -37,11 +20,7 @@ def test_CannyEdge_inputs(): def test_CannyEdge_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = CannyEdge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py index 21f8e5da6f..58b492e14b 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py @@ -4,44 +4,17 @@ def test_CannySegmentationLevelSetImageFilter_inputs(): input_map = dict( - advectionWeight=dict( - argstr="--advectionWeight %f", - ), - args=dict( - argstr="%s", - ), - cannyThreshold=dict( - argstr="--cannyThreshold %f", - ), - cannyVariance=dict( - argstr="--cannyVariance %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - initialModel=dict( - argstr="--initialModel %s", - extensions=None, - ), - initialModelIsovalue=dict( - argstr="--initialModelIsovalue %f", - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - maxIterations=dict( - argstr="--maxIterations %d", - ), - outputSpeedVolume=dict( - argstr="--outputSpeedVolume %s", - hash_files=False, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + advectionWeight=dict(argstr="--advectionWeight %f"), + args=dict(argstr="%s"), + cannyThreshold=dict(argstr="--cannyThreshold %f"), + cannyVariance=dict(argstr="--cannyVariance %f"), + environ=dict(nohash=True, usedefault=True), + initialModel=dict(argstr="--initialModel %s", extensions=None), + initialModelIsovalue=dict(argstr="--initialModelIsovalue %f"), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + maxIterations=dict(argstr="--maxIterations %d"), + outputSpeedVolume=dict(argstr="--outputSpeedVolume %s", hash_files=False), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = CannySegmentationLevelSetImageFilter.input_spec() @@ -52,12 +25,7 @@ def test_CannySegmentationLevelSetImageFilter_inputs(): def test_CannySegmentationLevelSetImageFilter_outputs(): output_map = dict( - outputSpeedVolume=dict( - extensions=None, - ), - outputVolume=dict( - extensions=None, - ), + outputSpeedVolume=dict(extensions=None), outputVolume=dict(extensions=None) ) outputs = CannySegmentationLevelSetImageFilter.output_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py index 2be2940aeb..700db119a7 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py @@ -4,28 +4,12 @@ def test_DilateImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMaskVolume=dict( - argstr="--inputMaskVolume %s", - extensions=None, - ), - inputRadius=dict( - argstr="--inputRadius %d", - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), + inputRadius=dict(argstr="--inputRadius %d"), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = DilateImage.input_spec() @@ -35,11 +19,7 @@ def test_DilateImage_inputs(): def test_DilateImage_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = DilateImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py index 66cc444a94..28c7c239f8 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py @@ -4,31 +4,13 @@ def test_DilateMask_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputBinaryVolume=dict( - argstr="--inputBinaryVolume %s", - extensions=None, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - lowerThreshold=dict( - argstr="--lowerThreshold %f", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - sizeStructuralElement=dict( - argstr="--sizeStructuralElement %d", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputBinaryVolume=dict(argstr="--inputBinaryVolume %s", extensions=None), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + lowerThreshold=dict(argstr="--lowerThreshold %f"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + sizeStructuralElement=dict(argstr="--sizeStructuralElement %d"), ) inputs = DilateMask.input_spec() @@ -38,11 +20,7 @@ def test_DilateMask_inputs(): def test_DilateMask_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = DilateMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py index 255145cfc6..0018acc6fd 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py @@ -4,28 +4,12 @@ def test_DistanceMaps_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputLabelVolume=dict( - argstr="--inputLabelVolume %s", - extensions=None, - ), - inputMaskVolume=dict( - argstr="--inputMaskVolume %s", - extensions=None, - ), - inputTissueLabel=dict( - argstr="--inputTissueLabel %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputLabelVolume=dict(argstr="--inputLabelVolume %s", extensions=None), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), + inputTissueLabel=dict(argstr="--inputTissueLabel %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = DistanceMaps.input_spec() @@ -35,11 +19,7 @@ def test_DistanceMaps_inputs(): def test_DistanceMaps_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = DistanceMaps.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py index b62a21fdbe..2e06afc214 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py @@ -4,21 +4,10 @@ def test_DumpBinaryTrainingVectors_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputHeaderFilename=dict( - argstr="--inputHeaderFilename %s", - extensions=None, - ), - inputVectorFilename=dict( - argstr="--inputVectorFilename %s", - extensions=None, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputHeaderFilename=dict(argstr="--inputHeaderFilename %s", extensions=None), + inputVectorFilename=dict(argstr="--inputVectorFilename %s", extensions=None), ) inputs = DumpBinaryTrainingVectors.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py index ae7140754f..2752e5b306 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py @@ -4,28 +4,12 @@ def test_ErodeImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMaskVolume=dict( - argstr="--inputMaskVolume %s", - extensions=None, - ), - inputRadius=dict( - argstr="--inputRadius %d", - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), + inputRadius=dict(argstr="--inputRadius %d"), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = ErodeImage.input_spec() @@ -35,11 +19,7 @@ def test_ErodeImage_inputs(): def test_ErodeImage_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = ErodeImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py index 704bc01820..d10d66e618 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py @@ -4,25 +4,11 @@ def test_FlippedDifference_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMaskVolume=dict( - argstr="--inputMaskVolume %s", - extensions=None, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = FlippedDifference.input_spec() @@ -32,11 +18,7 @@ def test_FlippedDifference_inputs(): def test_FlippedDifference_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = FlippedDifference.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py index e48d622ab3..7e7827ac7b 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py @@ -4,28 +4,12 @@ def test_GenerateBrainClippedImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputImg=dict( - argstr="--inputImg %s", - extensions=None, - ), - inputMsk=dict( - argstr="--inputMsk %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputFileName=dict( - argstr="--outputFileName %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputImg=dict(argstr="--inputImg %s", extensions=None), + inputMsk=dict(argstr="--inputMsk %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputFileName=dict(argstr="--outputFileName %s", hash_files=False), ) inputs = GenerateBrainClippedImage.input_spec() @@ -35,11 +19,7 @@ def test_GenerateBrainClippedImage_inputs(): def test_GenerateBrainClippedImage_outputs(): - output_map = dict( - outputFileName=dict( - extensions=None, - ), - ) + output_map = dict(outputFileName=dict(extensions=None)) outputs = GenerateBrainClippedImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py index b53396f396..7ee1cbd5d2 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py @@ -4,31 +4,13 @@ def test_GenerateSummedGradientImage_inputs(): input_map = dict( - MaximumGradient=dict( - argstr="--MaximumGradient ", - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr="--inputVolume1 %s", - extensions=None, - ), - inputVolume2=dict( - argstr="--inputVolume2 %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputFileName=dict( - argstr="--outputFileName %s", - hash_files=False, - ), + MaximumGradient=dict(argstr="--MaximumGradient "), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None), + inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputFileName=dict(argstr="--outputFileName %s", hash_files=False), ) inputs = GenerateSummedGradientImage.input_spec() @@ -38,11 +20,7 @@ def test_GenerateSummedGradientImage_inputs(): def test_GenerateSummedGradientImage_outputs(): - output_map = dict( - outputFileName=dict( - extensions=None, - ), - ) + output_map = dict(outputFileName=dict(extensions=None)) outputs = GenerateSummedGradientImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py index c6e8fdfb12..116a3d6cd6 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py @@ -4,30 +4,13 @@ def test_GenerateTestImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - lowerBoundOfOutputVolume=dict( - argstr="--lowerBoundOfOutputVolume %f", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - outputVolumeSize=dict( - argstr="--outputVolumeSize %f", - ), - upperBoundOfOutputVolume=dict( - argstr="--upperBoundOfOutputVolume %f", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + lowerBoundOfOutputVolume=dict(argstr="--lowerBoundOfOutputVolume %f"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + outputVolumeSize=dict(argstr="--outputVolumeSize %f"), + upperBoundOfOutputVolume=dict(argstr="--upperBoundOfOutputVolume %f"), ) inputs = GenerateTestImage.input_spec() @@ -37,11 +20,7 @@ def test_GenerateTestImage_inputs(): def test_GenerateTestImage_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = GenerateTestImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py index c7828aaccc..e25d090992 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py @@ -4,30 +4,13 @@ def test_GradientAnisotropicDiffusionImageFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - conductance=dict( - argstr="--conductance %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - numberOfIterations=dict( - argstr="--numberOfIterations %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - timeStep=dict( - argstr="--timeStep %f", - ), + args=dict(argstr="%s"), + conductance=dict(argstr="--conductance %f"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + numberOfIterations=dict(argstr="--numberOfIterations %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + timeStep=dict(argstr="--timeStep %f"), ) inputs = GradientAnisotropicDiffusionImageFilter.input_spec() @@ -37,11 +20,7 @@ def test_GradientAnisotropicDiffusionImageFilter_inputs(): def test_GradientAnisotropicDiffusionImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = GradientAnisotropicDiffusionImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py index 8188ad0432..e8d82e3316 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py @@ -4,34 +4,14 @@ def test_HammerAttributeCreator_inputs(): input_map = dict( - Scale=dict( - argstr="--Scale %d", - ), - Strength=dict( - argstr="--Strength %f", - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputCSFVolume=dict( - argstr="--inputCSFVolume %s", - extensions=None, - ), - inputGMVolume=dict( - argstr="--inputGMVolume %s", - extensions=None, - ), - inputWMVolume=dict( - argstr="--inputWMVolume %s", - extensions=None, - ), - outputVolumeBase=dict( - argstr="--outputVolumeBase %s", - ), + Scale=dict(argstr="--Scale %d"), + Strength=dict(argstr="--Strength %f"), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputCSFVolume=dict(argstr="--inputCSFVolume %s", extensions=None), + inputGMVolume=dict(argstr="--inputGMVolume %s", extensions=None), + inputWMVolume=dict(argstr="--inputWMVolume %s", extensions=None), + outputVolumeBase=dict(argstr="--outputVolumeBase %s"), ) inputs = HammerAttributeCreator.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py index 839bfe5f11..e5d801e261 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py @@ -4,28 +4,12 @@ def test_NeighborhoodMean_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMaskVolume=dict( - argstr="--inputMaskVolume %s", - extensions=None, - ), - inputRadius=dict( - argstr="--inputRadius %d", - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), + inputRadius=dict(argstr="--inputRadius %d"), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = NeighborhoodMean.input_spec() @@ -35,11 +19,7 @@ def test_NeighborhoodMean_inputs(): def test_NeighborhoodMean_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = NeighborhoodMean.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py index 476a93595e..ead623c7bb 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py @@ -4,28 +4,12 @@ def test_NeighborhoodMedian_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMaskVolume=dict( - argstr="--inputMaskVolume %s", - extensions=None, - ), - inputRadius=dict( - argstr="--inputRadius %d", - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), + inputRadius=dict(argstr="--inputRadius %d"), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = NeighborhoodMedian.input_spec() @@ -35,11 +19,7 @@ def test_NeighborhoodMedian_inputs(): def test_NeighborhoodMedian_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = NeighborhoodMedian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py index f6ee369414..1b01e17bcf 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py @@ -4,23 +4,11 @@ def test_STAPLEAnalysis_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputDimension=dict( - argstr="--inputDimension %d", - ), - inputLabelVolume=dict( - argstr="--inputLabelVolume %s...", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputDimension=dict(argstr="--inputDimension %d"), + inputLabelVolume=dict(argstr="--inputLabelVolume %s..."), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = STAPLEAnalysis.input_spec() @@ -30,11 +18,7 @@ def test_STAPLEAnalysis_inputs(): def test_STAPLEAnalysis_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = STAPLEAnalysis.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py index e16d7a9522..d96e7e487f 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py @@ -4,24 +4,11 @@ def test_TextureFromNoiseImageFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputRadius=dict( - argstr="--inputRadius %d", - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputRadius=dict(argstr="--inputRadius %d"), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = TextureFromNoiseImageFilter.input_spec() @@ -31,11 +18,7 @@ def test_TextureFromNoiseImageFilter_inputs(): def test_TextureFromNoiseImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = TextureFromNoiseImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py index 7e6c4f6263..6cef038c09 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py @@ -4,31 +4,13 @@ def test_TextureMeasureFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - distance=dict( - argstr="--distance %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMaskVolume=dict( - argstr="--inputMaskVolume %s", - extensions=None, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - insideROIValue=dict( - argstr="--insideROIValue %f", - ), - outputFilename=dict( - argstr="--outputFilename %s", - hash_files=False, - ), + args=dict(argstr="%s"), + distance=dict(argstr="--distance %d"), + environ=dict(nohash=True, usedefault=True), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + insideROIValue=dict(argstr="--insideROIValue %f"), + outputFilename=dict(argstr="--outputFilename %s", hash_files=False), ) inputs = TextureMeasureFilter.input_spec() @@ -38,11 +20,7 @@ def test_TextureMeasureFilter_inputs(): def test_TextureMeasureFilter_outputs(): - output_map = dict( - outputFilename=dict( - extensions=None, - ), - ) + output_map = dict(outputFilename=dict(extensions=None)) outputs = TextureMeasureFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py index 194d556a8e..a68b1fd398 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py @@ -4,40 +4,15 @@ def test_UnbiasedNonLocalMeans_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hp=dict( - argstr="--hp %f", - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - ps=dict( - argstr="--ps %f", - ), - rc=dict( - argstr="--rc %s", - sep=",", - ), - rs=dict( - argstr="--rs %s", - sep=",", - ), - sigma=dict( - argstr="--sigma %f", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + hp=dict(argstr="--hp %f"), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), + ps=dict(argstr="--ps %f"), + rc=dict(argstr="--rc %s", sep=","), + rs=dict(argstr="--rs %s", sep=","), + sigma=dict(argstr="--sigma %f"), ) inputs = UnbiasedNonLocalMeans.input_spec() @@ -47,12 +22,7 @@ def test_UnbiasedNonLocalMeans_inputs(): def test_UnbiasedNonLocalMeans_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = UnbiasedNonLocalMeans.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py index 6af2e14039..0a0dd8d6f7 100644 --- a/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py +++ b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py @@ -4,38 +4,15 @@ def test_scalartransform_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - deformation=dict( - argstr="--deformation %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - h_field=dict( - argstr="--h_field ", - ), - input_image=dict( - argstr="--input_image %s", - extensions=None, - ), - interpolation=dict( - argstr="--interpolation %s", - ), - invert=dict( - argstr="--invert ", - ), - output_image=dict( - argstr="--output_image %s", - hash_files=False, - ), - transformation=dict( - argstr="--transformation %s", - hash_files=False, - ), + args=dict(argstr="%s"), + deformation=dict(argstr="--deformation %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + h_field=dict(argstr="--h_field "), + input_image=dict(argstr="--input_image %s", extensions=None), + interpolation=dict(argstr="--interpolation %s"), + invert=dict(argstr="--invert "), + output_image=dict(argstr="--output_image %s", hash_files=False), + transformation=dict(argstr="--transformation %s", hash_files=False), ) inputs = scalartransform.input_spec() @@ -46,12 +23,7 @@ def test_scalartransform_inputs(): def test_scalartransform_outputs(): output_map = dict( - output_image=dict( - extensions=None, - ), - transformation=dict( - extensions=None, - ), + output_image=dict(extensions=None), transformation=dict(extensions=None) ) outputs = scalartransform.output_spec() diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py index d1c8055df3..845cf63f3c 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py @@ -4,149 +4,60 @@ def test_BRAINSDemonWarp_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), arrayOfPyramidLevelIterations=dict( - argstr="--arrayOfPyramidLevelIterations %s", - sep=",", - ), - backgroundFillValue=dict( - argstr="--backgroundFillValue %d", + argstr="--arrayOfPyramidLevelIterations %s", sep="," ), + backgroundFillValue=dict(argstr="--backgroundFillValue %d"), checkerboardPatternSubdivisions=dict( - argstr="--checkerboardPatternSubdivisions %s", - sep=",", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedBinaryVolume=dict( - argstr="--fixedBinaryVolume %s", - extensions=None, - ), - fixedVolume=dict( - argstr="--fixedVolume %s", - extensions=None, - ), - gradient_type=dict( - argstr="--gradient_type %s", - ), - gui=dict( - argstr="--gui ", - ), - histogramMatch=dict( - argstr="--histogramMatch ", - ), + argstr="--checkerboardPatternSubdivisions %s", sep="," + ), + environ=dict(nohash=True, usedefault=True), + fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None), + fixedVolume=dict(argstr="--fixedVolume %s", extensions=None), + gradient_type=dict(argstr="--gradient_type %s"), + gui=dict(argstr="--gui "), + histogramMatch=dict(argstr="--histogramMatch "), initializeWithDisplacementField=dict( - argstr="--initializeWithDisplacementField %s", - extensions=None, + argstr="--initializeWithDisplacementField %s", extensions=None ), initializeWithTransform=dict( - argstr="--initializeWithTransform %s", - extensions=None, - ), - inputPixelType=dict( - argstr="--inputPixelType %s", - ), - interpolationMode=dict( - argstr="--interpolationMode %s", - ), - lowerThresholdForBOBF=dict( - argstr="--lowerThresholdForBOBF %d", - ), - maskProcessingMode=dict( - argstr="--maskProcessingMode %s", - ), - max_step_length=dict( - argstr="--max_step_length %f", - ), - medianFilterSize=dict( - argstr="--medianFilterSize %s", - sep=",", - ), - minimumFixedPyramid=dict( - argstr="--minimumFixedPyramid %s", - sep=",", - ), - minimumMovingPyramid=dict( - argstr="--minimumMovingPyramid %s", - sep=",", - ), - movingBinaryVolume=dict( - argstr="--movingBinaryVolume %s", - extensions=None, - ), - movingVolume=dict( - argstr="--movingVolume %s", - extensions=None, - ), - neighborhoodForBOBF=dict( - argstr="--neighborhoodForBOBF %s", - sep=",", - ), - numberOfBCHApproximationTerms=dict( - argstr="--numberOfBCHApproximationTerms %d", - ), - numberOfHistogramBins=dict( - argstr="--numberOfHistogramBins %d", - ), - numberOfMatchPoints=dict( - argstr="--numberOfMatchPoints %d", - ), - numberOfPyramidLevels=dict( - argstr="--numberOfPyramidLevels %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), + argstr="--initializeWithTransform %s", extensions=None + ), + inputPixelType=dict(argstr="--inputPixelType %s"), + interpolationMode=dict(argstr="--interpolationMode %s"), + lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d"), + maskProcessingMode=dict(argstr="--maskProcessingMode %s"), + max_step_length=dict(argstr="--max_step_length %f"), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), + minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=","), + minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=","), + movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None), + movingVolume=dict(argstr="--movingVolume %s", extensions=None), + neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=","), + numberOfBCHApproximationTerms=dict(argstr="--numberOfBCHApproximationTerms %d"), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), + numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), outputCheckerboardVolume=dict( - argstr="--outputCheckerboardVolume %s", - hash_files=False, - ), - outputDebug=dict( - argstr="--outputDebug ", - ), - outputDisplacementFieldPrefix=dict( - argstr="--outputDisplacementFieldPrefix %s", + argstr="--outputCheckerboardVolume %s", hash_files=False ), + outputDebug=dict(argstr="--outputDebug "), + outputDisplacementFieldPrefix=dict(argstr="--outputDisplacementFieldPrefix %s"), outputDisplacementFieldVolume=dict( - argstr="--outputDisplacementFieldVolume %s", - hash_files=False, - ), - outputNormalized=dict( - argstr="--outputNormalized ", - ), - outputPixelType=dict( - argstr="--outputPixelType %s", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - promptUser=dict( - argstr="--promptUser ", - ), - registrationFilterType=dict( - argstr="--registrationFilterType %s", - ), - seedForBOBF=dict( - argstr="--seedForBOBF %s", - sep=",", - ), - smoothDisplacementFieldSigma=dict( - argstr="--smoothDisplacementFieldSigma %f", - ), - upFieldSmoothing=dict( - argstr="--upFieldSmoothing %f", - ), - upperThresholdForBOBF=dict( - argstr="--upperThresholdForBOBF %d", - ), - use_vanilla_dem=dict( - argstr="--use_vanilla_dem ", - ), + argstr="--outputDisplacementFieldVolume %s", hash_files=False + ), + outputNormalized=dict(argstr="--outputNormalized "), + outputPixelType=dict(argstr="--outputPixelType %s"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + promptUser=dict(argstr="--promptUser "), + registrationFilterType=dict(argstr="--registrationFilterType %s"), + seedForBOBF=dict(argstr="--seedForBOBF %s", sep=","), + smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f"), + upFieldSmoothing=dict(argstr="--upFieldSmoothing %f"), + upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d"), + use_vanilla_dem=dict(argstr="--use_vanilla_dem "), ) inputs = BRAINSDemonWarp.input_spec() @@ -157,15 +68,9 @@ def test_BRAINSDemonWarp_inputs(): def test_BRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict( - extensions=None, - ), - outputDisplacementFieldVolume=dict( - extensions=None, - ), - outputVolume=dict( - extensions=None, - ), + outputCheckerboardVolume=dict(extensions=None), + outputDisplacementFieldVolume=dict(extensions=None), + outputVolume=dict(extensions=None), ) outputs = BRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py index 69111d9212..da777409c1 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py @@ -4,228 +4,79 @@ def test_BRAINSFit_inputs(): input_map = dict( - ROIAutoClosingSize=dict( - argstr="--ROIAutoClosingSize %f", - ), - ROIAutoDilateSize=dict( - argstr="--ROIAutoDilateSize %f", - ), - args=dict( - argstr="%s", - ), - backgroundFillValue=dict( - argstr="--backgroundFillValue %f", - ), - bsplineTransform=dict( - argstr="--bsplineTransform %s", - hash_files=False, - ), - costFunctionConvergenceFactor=dict( - argstr="--costFunctionConvergenceFactor %f", - ), - costMetric=dict( - argstr="--costMetric %s", - ), - debugLevel=dict( - argstr="--debugLevel %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - failureExitCode=dict( - argstr="--failureExitCode %d", - ), - fixedBinaryVolume=dict( - argstr="--fixedBinaryVolume %s", - extensions=None, - ), - fixedVolume=dict( - argstr="--fixedVolume %s", - extensions=None, - ), - fixedVolume2=dict( - argstr="--fixedVolume2 %s", - extensions=None, - ), - fixedVolumeTimeIndex=dict( - argstr="--fixedVolumeTimeIndex %d", - ), - gui=dict( - argstr="--gui ", - ), - histogramMatch=dict( - argstr="--histogramMatch ", - ), - initialTransform=dict( - argstr="--initialTransform %s", - extensions=None, - ), + ROIAutoClosingSize=dict(argstr="--ROIAutoClosingSize %f"), + ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f"), + args=dict(argstr="%s"), + backgroundFillValue=dict(argstr="--backgroundFillValue %f"), + bsplineTransform=dict(argstr="--bsplineTransform %s", hash_files=False), + costFunctionConvergenceFactor=dict(argstr="--costFunctionConvergenceFactor %f"), + costMetric=dict(argstr="--costMetric %s"), + debugLevel=dict(argstr="--debugLevel %d"), + environ=dict(nohash=True, usedefault=True), + failureExitCode=dict(argstr="--failureExitCode %d"), + fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None), + fixedVolume=dict(argstr="--fixedVolume %s", extensions=None), + fixedVolume2=dict(argstr="--fixedVolume2 %s", extensions=None), + fixedVolumeTimeIndex=dict(argstr="--fixedVolumeTimeIndex %d"), + gui=dict(argstr="--gui "), + histogramMatch=dict(argstr="--histogramMatch "), + initialTransform=dict(argstr="--initialTransform %s", extensions=None), initializeRegistrationByCurrentGenericTransform=dict( - argstr="--initializeRegistrationByCurrentGenericTransform ", - ), - initializeTransformMode=dict( - argstr="--initializeTransformMode %s", - ), - interpolationMode=dict( - argstr="--interpolationMode %s", - ), - linearTransform=dict( - argstr="--linearTransform %s", - hash_files=False, - ), - logFileReport=dict( - argstr="--logFileReport %s", - hash_files=False, - ), - maskInferiorCutOffFromCenter=dict( - argstr="--maskInferiorCutOffFromCenter %f", - ), - maskProcessingMode=dict( - argstr="--maskProcessingMode %s", - ), - maxBSplineDisplacement=dict( - argstr="--maxBSplineDisplacement %f", - ), - maximumNumberOfCorrections=dict( - argstr="--maximumNumberOfCorrections %d", - ), - maximumNumberOfEvaluations=dict( - argstr="--maximumNumberOfEvaluations %d", - ), - maximumStepLength=dict( - argstr="--maximumStepLength %f", - ), - medianFilterSize=dict( - argstr="--medianFilterSize %s", - sep=",", - ), - metricSamplingStrategy=dict( - argstr="--metricSamplingStrategy %s", - ), - minimumStepLength=dict( - argstr="--minimumStepLength %s", - sep=",", - ), - movingBinaryVolume=dict( - argstr="--movingBinaryVolume %s", - extensions=None, - ), - movingVolume=dict( - argstr="--movingVolume %s", - extensions=None, - ), - movingVolume2=dict( - argstr="--movingVolume2 %s", - extensions=None, - ), - movingVolumeTimeIndex=dict( - argstr="--movingVolumeTimeIndex %d", - ), - numberOfHistogramBins=dict( - argstr="--numberOfHistogramBins %d", - ), - numberOfIterations=dict( - argstr="--numberOfIterations %s", - sep=",", - ), - numberOfMatchPoints=dict( - argstr="--numberOfMatchPoints %d", - ), - numberOfSamples=dict( - argstr="--numberOfSamples %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputFixedVolumeROI=dict( - argstr="--outputFixedVolumeROI %s", - hash_files=False, - ), + argstr="--initializeRegistrationByCurrentGenericTransform " + ), + initializeTransformMode=dict(argstr="--initializeTransformMode %s"), + interpolationMode=dict(argstr="--interpolationMode %s"), + linearTransform=dict(argstr="--linearTransform %s", hash_files=False), + logFileReport=dict(argstr="--logFileReport %s", hash_files=False), + maskInferiorCutOffFromCenter=dict(argstr="--maskInferiorCutOffFromCenter %f"), + maskProcessingMode=dict(argstr="--maskProcessingMode %s"), + maxBSplineDisplacement=dict(argstr="--maxBSplineDisplacement %f"), + maximumNumberOfCorrections=dict(argstr="--maximumNumberOfCorrections %d"), + maximumNumberOfEvaluations=dict(argstr="--maximumNumberOfEvaluations %d"), + maximumStepLength=dict(argstr="--maximumStepLength %f"), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), + metricSamplingStrategy=dict(argstr="--metricSamplingStrategy %s"), + minimumStepLength=dict(argstr="--minimumStepLength %s", sep=","), + movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None), + movingVolume=dict(argstr="--movingVolume %s", extensions=None), + movingVolume2=dict(argstr="--movingVolume2 %s", extensions=None), + movingVolumeTimeIndex=dict(argstr="--movingVolumeTimeIndex %d"), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), + numberOfIterations=dict(argstr="--numberOfIterations %s", sep=","), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), + numberOfSamples=dict(argstr="--numberOfSamples %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputFixedVolumeROI=dict(argstr="--outputFixedVolumeROI %s", hash_files=False), outputMovingVolumeROI=dict( - argstr="--outputMovingVolumeROI %s", - hash_files=False, - ), - outputTransform=dict( - argstr="--outputTransform %s", - hash_files=False, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - outputVolumePixelType=dict( - argstr="--outputVolumePixelType %s", - ), - projectedGradientTolerance=dict( - argstr="--projectedGradientTolerance %f", - ), - promptUser=dict( - argstr="--promptUser ", - ), - relaxationFactor=dict( - argstr="--relaxationFactor %f", - ), - removeIntensityOutliers=dict( - argstr="--removeIntensityOutliers %f", - ), - reproportionScale=dict( - argstr="--reproportionScale %f", - ), - samplingPercentage=dict( - argstr="--samplingPercentage %f", - ), - scaleOutputValues=dict( - argstr="--scaleOutputValues ", - ), - skewScale=dict( - argstr="--skewScale %f", - ), - splineGridSize=dict( - argstr="--splineGridSize %s", - sep=",", - ), + argstr="--outputMovingVolumeROI %s", hash_files=False + ), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + outputVolumePixelType=dict(argstr="--outputVolumePixelType %s"), + projectedGradientTolerance=dict(argstr="--projectedGradientTolerance %f"), + promptUser=dict(argstr="--promptUser "), + relaxationFactor=dict(argstr="--relaxationFactor %f"), + removeIntensityOutliers=dict(argstr="--removeIntensityOutliers %f"), + reproportionScale=dict(argstr="--reproportionScale %f"), + samplingPercentage=dict(argstr="--samplingPercentage %f"), + scaleOutputValues=dict(argstr="--scaleOutputValues "), + skewScale=dict(argstr="--skewScale %f"), + splineGridSize=dict(argstr="--splineGridSize %s", sep=","), strippedOutputTransform=dict( - argstr="--strippedOutputTransform %s", - hash_files=False, - ), - transformType=dict( - argstr="--transformType %s", - sep=",", - ), - translationScale=dict( - argstr="--translationScale %f", - ), - useAffine=dict( - argstr="--useAffine ", - ), - useBSpline=dict( - argstr="--useBSpline ", - ), - useComposite=dict( - argstr="--useComposite ", - ), - useROIBSpline=dict( - argstr="--useROIBSpline ", - ), - useRigid=dict( - argstr="--useRigid ", - ), - useScaleSkewVersor3D=dict( - argstr="--useScaleSkewVersor3D ", - ), - useScaleVersor3D=dict( - argstr="--useScaleVersor3D ", - ), - useSyN=dict( - argstr="--useSyN ", - ), - writeOutputTransformInFloat=dict( - argstr="--writeOutputTransformInFloat ", - ), - writeTransformOnFailure=dict( - argstr="--writeTransformOnFailure ", - ), + argstr="--strippedOutputTransform %s", hash_files=False + ), + transformType=dict(argstr="--transformType %s", sep=","), + translationScale=dict(argstr="--translationScale %f"), + useAffine=dict(argstr="--useAffine "), + useBSpline=dict(argstr="--useBSpline "), + useComposite=dict(argstr="--useComposite "), + useROIBSpline=dict(argstr="--useROIBSpline "), + useRigid=dict(argstr="--useRigid "), + useScaleSkewVersor3D=dict(argstr="--useScaleSkewVersor3D "), + useScaleVersor3D=dict(argstr="--useScaleVersor3D "), + useSyN=dict(argstr="--useSyN "), + writeOutputTransformInFloat=dict(argstr="--writeOutputTransformInFloat "), + writeTransformOnFailure=dict(argstr="--writeTransformOnFailure "), ) inputs = BRAINSFit.input_spec() @@ -236,30 +87,14 @@ def test_BRAINSFit_inputs(): def test_BRAINSFit_outputs(): output_map = dict( - bsplineTransform=dict( - extensions=None, - ), - linearTransform=dict( - extensions=None, - ), - logFileReport=dict( - extensions=None, - ), - outputFixedVolumeROI=dict( - extensions=None, - ), - outputMovingVolumeROI=dict( - extensions=None, - ), - outputTransform=dict( - extensions=None, - ), - outputVolume=dict( - extensions=None, - ), - strippedOutputTransform=dict( - extensions=None, - ), + bsplineTransform=dict(extensions=None), + linearTransform=dict(extensions=None), + logFileReport=dict(extensions=None), + outputFixedVolumeROI=dict(extensions=None), + outputMovingVolumeROI=dict(extensions=None), + outputTransform=dict(extensions=None), + outputVolume=dict(extensions=None), + strippedOutputTransform=dict(extensions=None), ) outputs = BRAINSFit.output_spec() diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py index 46d175da07..f2b6760eea 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py @@ -4,52 +4,19 @@ def test_BRAINSResample_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - defaultValue=dict( - argstr="--defaultValue %f", - ), - deformationVolume=dict( - argstr="--deformationVolume %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gridSpacing=dict( - argstr="--gridSpacing %s", - sep=",", - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - interpolationMode=dict( - argstr="--interpolationMode %s", - ), - inverseTransform=dict( - argstr="--inverseTransform ", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - pixelType=dict( - argstr="--pixelType %s", - ), - referenceVolume=dict( - argstr="--referenceVolume %s", - extensions=None, - ), - warpTransform=dict( - argstr="--warpTransform %s", - extensions=None, - ), + args=dict(argstr="%s"), + defaultValue=dict(argstr="--defaultValue %f"), + deformationVolume=dict(argstr="--deformationVolume %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + gridSpacing=dict(argstr="--gridSpacing %s", sep=","), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + interpolationMode=dict(argstr="--interpolationMode %s"), + inverseTransform=dict(argstr="--inverseTransform "), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + pixelType=dict(argstr="--pixelType %s"), + referenceVolume=dict(argstr="--referenceVolume %s", extensions=None), + warpTransform=dict(argstr="--warpTransform %s", extensions=None), ) inputs = BRAINSResample.input_spec() @@ -59,11 +26,7 @@ def test_BRAINSResample_inputs(): def test_BRAINSResample_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = BRAINSResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py index 52d24e6fff..b3d3a36e13 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py @@ -4,27 +4,12 @@ def test_BRAINSResize_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - pixelType=dict( - argstr="--pixelType %s", - ), - scaleFactor=dict( - argstr="--scaleFactor %f", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + pixelType=dict(argstr="--pixelType %s"), + scaleFactor=dict(argstr="--scaleFactor %f"), ) inputs = BRAINSResize.input_spec() @@ -34,11 +19,7 @@ def test_BRAINSResize_inputs(): def test_BRAINSResize_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = BRAINSResize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py index bc98fc2763..5c4cfbc8ba 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py @@ -4,37 +4,15 @@ def test_BRAINSTransformFromFiducials_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedLandmarks=dict( - argstr="--fixedLandmarks %s...", - ), - fixedLandmarksFile=dict( - argstr="--fixedLandmarksFile %s", - extensions=None, - ), - movingLandmarks=dict( - argstr="--movingLandmarks %s...", - ), - movingLandmarksFile=dict( - argstr="--movingLandmarksFile %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - saveTransform=dict( - argstr="--saveTransform %s", - hash_files=False, - ), - transformType=dict( - argstr="--transformType %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fixedLandmarks=dict(argstr="--fixedLandmarks %s..."), + fixedLandmarksFile=dict(argstr="--fixedLandmarksFile %s", extensions=None), + movingLandmarks=dict(argstr="--movingLandmarks %s..."), + movingLandmarksFile=dict(argstr="--movingLandmarksFile %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + saveTransform=dict(argstr="--saveTransform %s", hash_files=False), + transformType=dict(argstr="--transformType %s"), ) inputs = BRAINSTransformFromFiducials.input_spec() @@ -44,11 +22,7 @@ def test_BRAINSTransformFromFiducials_inputs(): def test_BRAINSTransformFromFiducials_outputs(): - output_map = dict( - saveTransform=dict( - extensions=None, - ), - ) + output_map = dict(saveTransform=dict(extensions=None)) outputs = BRAINSTransformFromFiducials.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py index af8bac8680..45bfc67734 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -4,151 +4,61 @@ def test_VBRAINSDemonWarp_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), arrayOfPyramidLevelIterations=dict( - argstr="--arrayOfPyramidLevelIterations %s", - sep=",", - ), - backgroundFillValue=dict( - argstr="--backgroundFillValue %d", + argstr="--arrayOfPyramidLevelIterations %s", sep="," ), + backgroundFillValue=dict(argstr="--backgroundFillValue %d"), checkerboardPatternSubdivisions=dict( - argstr="--checkerboardPatternSubdivisions %s", - sep=",", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedBinaryVolume=dict( - argstr="--fixedBinaryVolume %s", - extensions=None, - ), - fixedVolume=dict( - argstr="--fixedVolume %s...", - ), - gradient_type=dict( - argstr="--gradient_type %s", - ), - gui=dict( - argstr="--gui ", - ), - histogramMatch=dict( - argstr="--histogramMatch ", - ), + argstr="--checkerboardPatternSubdivisions %s", sep="," + ), + environ=dict(nohash=True, usedefault=True), + fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None), + fixedVolume=dict(argstr="--fixedVolume %s..."), + gradient_type=dict(argstr="--gradient_type %s"), + gui=dict(argstr="--gui "), + histogramMatch=dict(argstr="--histogramMatch "), initializeWithDisplacementField=dict( - argstr="--initializeWithDisplacementField %s", - extensions=None, + argstr="--initializeWithDisplacementField %s", extensions=None ), initializeWithTransform=dict( - argstr="--initializeWithTransform %s", - extensions=None, - ), - inputPixelType=dict( - argstr="--inputPixelType %s", - ), - interpolationMode=dict( - argstr="--interpolationMode %s", - ), - lowerThresholdForBOBF=dict( - argstr="--lowerThresholdForBOBF %d", - ), - makeBOBF=dict( - argstr="--makeBOBF ", - ), - max_step_length=dict( - argstr="--max_step_length %f", - ), - medianFilterSize=dict( - argstr="--medianFilterSize %s", - sep=",", - ), - minimumFixedPyramid=dict( - argstr="--minimumFixedPyramid %s", - sep=",", - ), - minimumMovingPyramid=dict( - argstr="--minimumMovingPyramid %s", - sep=",", - ), - movingBinaryVolume=dict( - argstr="--movingBinaryVolume %s", - extensions=None, - ), - movingVolume=dict( - argstr="--movingVolume %s...", - ), - neighborhoodForBOBF=dict( - argstr="--neighborhoodForBOBF %s", - sep=",", - ), - numberOfBCHApproximationTerms=dict( - argstr="--numberOfBCHApproximationTerms %d", - ), - numberOfHistogramBins=dict( - argstr="--numberOfHistogramBins %d", - ), - numberOfMatchPoints=dict( - argstr="--numberOfMatchPoints %d", - ), - numberOfPyramidLevels=dict( - argstr="--numberOfPyramidLevels %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), + argstr="--initializeWithTransform %s", extensions=None + ), + inputPixelType=dict(argstr="--inputPixelType %s"), + interpolationMode=dict(argstr="--interpolationMode %s"), + lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d"), + makeBOBF=dict(argstr="--makeBOBF "), + max_step_length=dict(argstr="--max_step_length %f"), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), + minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=","), + minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=","), + movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None), + movingVolume=dict(argstr="--movingVolume %s..."), + neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=","), + numberOfBCHApproximationTerms=dict(argstr="--numberOfBCHApproximationTerms %d"), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), + numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), outputCheckerboardVolume=dict( - argstr="--outputCheckerboardVolume %s", - hash_files=False, - ), - outputDebug=dict( - argstr="--outputDebug ", - ), - outputDisplacementFieldPrefix=dict( - argstr="--outputDisplacementFieldPrefix %s", + argstr="--outputCheckerboardVolume %s", hash_files=False ), + outputDebug=dict(argstr="--outputDebug "), + outputDisplacementFieldPrefix=dict(argstr="--outputDisplacementFieldPrefix %s"), outputDisplacementFieldVolume=dict( - argstr="--outputDisplacementFieldVolume %s", - hash_files=False, - ), - outputNormalized=dict( - argstr="--outputNormalized ", - ), - outputPixelType=dict( - argstr="--outputPixelType %s", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - promptUser=dict( - argstr="--promptUser ", - ), - registrationFilterType=dict( - argstr="--registrationFilterType %s", - ), - seedForBOBF=dict( - argstr="--seedForBOBF %s", - sep=",", - ), - smoothDisplacementFieldSigma=dict( - argstr="--smoothDisplacementFieldSigma %f", - ), - upFieldSmoothing=dict( - argstr="--upFieldSmoothing %f", - ), - upperThresholdForBOBF=dict( - argstr="--upperThresholdForBOBF %d", - ), - use_vanilla_dem=dict( - argstr="--use_vanilla_dem ", - ), - weightFactors=dict( - argstr="--weightFactors %s", - sep=",", - ), + argstr="--outputDisplacementFieldVolume %s", hash_files=False + ), + outputNormalized=dict(argstr="--outputNormalized "), + outputPixelType=dict(argstr="--outputPixelType %s"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + promptUser=dict(argstr="--promptUser "), + registrationFilterType=dict(argstr="--registrationFilterType %s"), + seedForBOBF=dict(argstr="--seedForBOBF %s", sep=","), + smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f"), + upFieldSmoothing=dict(argstr="--upFieldSmoothing %f"), + upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d"), + use_vanilla_dem=dict(argstr="--use_vanilla_dem "), + weightFactors=dict(argstr="--weightFactors %s", sep=","), ) inputs = VBRAINSDemonWarp.input_spec() @@ -159,15 +69,9 @@ def test_VBRAINSDemonWarp_inputs(): def test_VBRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict( - extensions=None, - ), - outputDisplacementFieldVolume=dict( - extensions=None, - ), - outputVolume=dict( - extensions=None, - ), + outputCheckerboardVolume=dict(extensions=None), + outputDisplacementFieldVolume=dict(extensions=None), + outputVolume=dict(extensions=None), ) outputs = VBRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py index 5e66b2d82c..e93a8f9e7e 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py @@ -4,123 +4,46 @@ def test_BRAINSABC_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - atlasDefinition=dict( - argstr="--atlasDefinition %s", - extensions=None, - ), + args=dict(argstr="%s"), + atlasDefinition=dict(argstr="--atlasDefinition %s", extensions=None), atlasToSubjectInitialTransform=dict( - argstr="--atlasToSubjectInitialTransform %s", - hash_files=False, + argstr="--atlasToSubjectInitialTransform %s", hash_files=False ), atlasToSubjectTransform=dict( - argstr="--atlasToSubjectTransform %s", - hash_files=False, - ), - atlasToSubjectTransformType=dict( - argstr="--atlasToSubjectTransformType %s", - ), - atlasWarpingOff=dict( - argstr="--atlasWarpingOff ", - ), - debuglevel=dict( - argstr="--debuglevel %d", - ), - defaultSuffix=dict( - argstr="--defaultSuffix %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - filterIteration=dict( - argstr="--filterIteration %d", - ), - filterMethod=dict( - argstr="--filterMethod %s", - ), - filterTimeStep=dict( - argstr="--filterTimeStep %f", - ), - gridSize=dict( - argstr="--gridSize %s", - sep=",", - ), - implicitOutputs=dict( - argstr="--implicitOutputs %s...", - hash_files=False, - ), - inputVolumeTypes=dict( - argstr="--inputVolumeTypes %s", - sep=",", - ), - inputVolumes=dict( - argstr="--inputVolumes %s...", - ), - interpolationMode=dict( - argstr="--interpolationMode %s", - ), - maxBiasDegree=dict( - argstr="--maxBiasDegree %d", - ), - maxIterations=dict( - argstr="--maxIterations %d", - ), - medianFilterSize=dict( - argstr="--medianFilterSize %s", - sep=",", - ), + argstr="--atlasToSubjectTransform %s", hash_files=False + ), + atlasToSubjectTransformType=dict(argstr="--atlasToSubjectTransformType %s"), + atlasWarpingOff=dict(argstr="--atlasWarpingOff "), + debuglevel=dict(argstr="--debuglevel %d"), + defaultSuffix=dict(argstr="--defaultSuffix %s"), + environ=dict(nohash=True, usedefault=True), + filterIteration=dict(argstr="--filterIteration %d"), + filterMethod=dict(argstr="--filterMethod %s"), + filterTimeStep=dict(argstr="--filterTimeStep %f"), + gridSize=dict(argstr="--gridSize %s", sep=","), + implicitOutputs=dict(argstr="--implicitOutputs %s...", hash_files=False), + inputVolumeTypes=dict(argstr="--inputVolumeTypes %s", sep=","), + inputVolumes=dict(argstr="--inputVolumes %s..."), + interpolationMode=dict(argstr="--interpolationMode %s"), + maxBiasDegree=dict(argstr="--maxBiasDegree %d"), + maxIterations=dict(argstr="--maxIterations %d"), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), numberOfSubSamplesInEachPlugArea=dict( - argstr="--numberOfSubSamplesInEachPlugArea %s", - sep=",", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputDir=dict( - argstr="--outputDir %s", - hash_files=False, - ), - outputDirtyLabels=dict( - argstr="--outputDirtyLabels %s", - hash_files=False, - ), - outputFormat=dict( - argstr="--outputFormat %s", - ), - outputLabels=dict( - argstr="--outputLabels %s", - hash_files=False, - ), - outputVolumes=dict( - argstr="--outputVolumes %s...", - hash_files=False, - ), - posteriorTemplate=dict( - argstr="--posteriorTemplate %s", - ), - purePlugsThreshold=dict( - argstr="--purePlugsThreshold %f", - ), - restoreState=dict( - argstr="--restoreState %s", - extensions=None, - ), - saveState=dict( - argstr="--saveState %s", - hash_files=False, - ), - subjectIntermodeTransformType=dict( - argstr="--subjectIntermodeTransformType %s", - ), - useKNN=dict( - argstr="--useKNN ", - ), - writeLess=dict( - argstr="--writeLess ", - ), + argstr="--numberOfSubSamplesInEachPlugArea %s", sep="," + ), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputDir=dict(argstr="--outputDir %s", hash_files=False), + outputDirtyLabels=dict(argstr="--outputDirtyLabels %s", hash_files=False), + outputFormat=dict(argstr="--outputFormat %s"), + outputLabels=dict(argstr="--outputLabels %s", hash_files=False), + outputVolumes=dict(argstr="--outputVolumes %s...", hash_files=False), + posteriorTemplate=dict(argstr="--posteriorTemplate %s"), + purePlugsThreshold=dict(argstr="--purePlugsThreshold %f"), + restoreState=dict(argstr="--restoreState %s", extensions=None), + saveState=dict(argstr="--saveState %s", hash_files=False), + subjectIntermodeTransformType=dict(argstr="--subjectIntermodeTransformType %s"), + useKNN=dict(argstr="--useKNN "), + writeLess=dict(argstr="--writeLess "), ) inputs = BRAINSABC.input_spec() @@ -131,24 +54,14 @@ def test_BRAINSABC_inputs(): def test_BRAINSABC_outputs(): output_map = dict( - atlasToSubjectInitialTransform=dict( - extensions=None, - ), - atlasToSubjectTransform=dict( - extensions=None, - ), + atlasToSubjectInitialTransform=dict(extensions=None), + atlasToSubjectTransform=dict(extensions=None), implicitOutputs=dict(), outputDir=dict(), - outputDirtyLabels=dict( - extensions=None, - ), - outputLabels=dict( - extensions=None, - ), + outputDirtyLabels=dict(extensions=None), + outputLabels=dict(extensions=None), outputVolumes=dict(), - saveState=dict( - extensions=None, - ), + saveState=dict(extensions=None), ) outputs = BRAINSABC.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py index 2f77f419e7..6a20f4abfc 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py @@ -4,155 +4,62 @@ def test_BRAINSConstellationDetector_inputs(): input_map = dict( - BackgroundFillValue=dict( - argstr="--BackgroundFillValue %s", - ), - LLSModel=dict( - argstr="--LLSModel %s", - extensions=None, - ), - acLowerBound=dict( - argstr="--acLowerBound %f", - ), - args=dict( - argstr="%s", - ), - atlasLandmarkWeights=dict( - argstr="--atlasLandmarkWeights %s", - extensions=None, - ), - atlasLandmarks=dict( - argstr="--atlasLandmarks %s", - extensions=None, - ), - atlasVolume=dict( - argstr="--atlasVolume %s", - extensions=None, - ), - cutOutHeadInOutputVolume=dict( - argstr="--cutOutHeadInOutputVolume ", - ), - debug=dict( - argstr="--debug ", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - forceACPoint=dict( - argstr="--forceACPoint %s", - sep=",", - ), + BackgroundFillValue=dict(argstr="--BackgroundFillValue %s"), + LLSModel=dict(argstr="--LLSModel %s", extensions=None), + acLowerBound=dict(argstr="--acLowerBound %f"), + args=dict(argstr="%s"), + atlasLandmarkWeights=dict(argstr="--atlasLandmarkWeights %s", extensions=None), + atlasLandmarks=dict(argstr="--atlasLandmarks %s", extensions=None), + atlasVolume=dict(argstr="--atlasVolume %s", extensions=None), + cutOutHeadInOutputVolume=dict(argstr="--cutOutHeadInOutputVolume "), + debug=dict(argstr="--debug "), + environ=dict(nohash=True, usedefault=True), + forceACPoint=dict(argstr="--forceACPoint %s", sep=","), forceHoughEyeDetectorReportFailure=dict( - argstr="--forceHoughEyeDetectorReportFailure ", - ), - forcePCPoint=dict( - argstr="--forcePCPoint %s", - sep=",", - ), - forceRPPoint=dict( - argstr="--forceRPPoint %s", - sep=",", - ), - forceVN4Point=dict( - argstr="--forceVN4Point %s", - sep=",", - ), - houghEyeDetectorMode=dict( - argstr="--houghEyeDetectorMode %d", - ), - inputLandmarksEMSP=dict( - argstr="--inputLandmarksEMSP %s", - extensions=None, - ), - inputTemplateModel=dict( - argstr="--inputTemplateModel %s", - extensions=None, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - interpolationMode=dict( - argstr="--interpolationMode %s", - ), - mspQualityLevel=dict( - argstr="--mspQualityLevel %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - otsuPercentileThreshold=dict( - argstr="--otsuPercentileThreshold %f", - ), + argstr="--forceHoughEyeDetectorReportFailure " + ), + forcePCPoint=dict(argstr="--forcePCPoint %s", sep=","), + forceRPPoint=dict(argstr="--forceRPPoint %s", sep=","), + forceVN4Point=dict(argstr="--forceVN4Point %s", sep=","), + houghEyeDetectorMode=dict(argstr="--houghEyeDetectorMode %d"), + inputLandmarksEMSP=dict(argstr="--inputLandmarksEMSP %s", extensions=None), + inputTemplateModel=dict(argstr="--inputTemplateModel %s", extensions=None), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + interpolationMode=dict(argstr="--interpolationMode %s"), + mspQualityLevel=dict(argstr="--mspQualityLevel %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f"), outputLandmarksInACPCAlignedSpace=dict( - argstr="--outputLandmarksInACPCAlignedSpace %s", - hash_files=False, + argstr="--outputLandmarksInACPCAlignedSpace %s", hash_files=False ), outputLandmarksInInputSpace=dict( - argstr="--outputLandmarksInInputSpace %s", - hash_files=False, - ), - outputMRML=dict( - argstr="--outputMRML %s", - hash_files=False, + argstr="--outputLandmarksInInputSpace %s", hash_files=False ), + outputMRML=dict(argstr="--outputMRML %s", hash_files=False), outputResampledVolume=dict( - argstr="--outputResampledVolume %s", - hash_files=False, - ), - outputTransform=dict( - argstr="--outputTransform %s", - hash_files=False, + argstr="--outputResampledVolume %s", hash_files=False ), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False), outputUntransformedClippedVolume=dict( - argstr="--outputUntransformedClippedVolume %s", - hash_files=False, + argstr="--outputUntransformedClippedVolume %s", hash_files=False ), outputVerificationScript=dict( - argstr="--outputVerificationScript %s", - hash_files=False, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - rVN4=dict( - argstr="--rVN4 %f", - ), - rac=dict( - argstr="--rac %f", - ), - rescaleIntensities=dict( - argstr="--rescaleIntensities ", + argstr="--outputVerificationScript %s", hash_files=False ), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + rVN4=dict(argstr="--rVN4 %f"), + rac=dict(argstr="--rac %f"), + rescaleIntensities=dict(argstr="--rescaleIntensities "), rescaleIntensitiesOutputRange=dict( - argstr="--rescaleIntensitiesOutputRange %s", - sep=",", - ), - resultsDir=dict( - argstr="--resultsDir %s", - hash_files=False, - ), - rmpj=dict( - argstr="--rmpj %f", - ), - rpc=dict( - argstr="--rpc %f", - ), - trimRescaledIntensities=dict( - argstr="--trimRescaledIntensities %f", - ), - verbose=dict( - argstr="--verbose ", - ), - writeBranded2DImage=dict( - argstr="--writeBranded2DImage %s", - hash_files=False, - ), - writedebuggingImagesLevel=dict( - argstr="--writedebuggingImagesLevel %d", - ), + argstr="--rescaleIntensitiesOutputRange %s", sep="," + ), + resultsDir=dict(argstr="--resultsDir %s", hash_files=False), + rmpj=dict(argstr="--rmpj %f"), + rpc=dict(argstr="--rpc %f"), + trimRescaledIntensities=dict(argstr="--trimRescaledIntensities %f"), + verbose=dict(argstr="--verbose "), + writeBranded2DImage=dict(argstr="--writeBranded2DImage %s", hash_files=False), + writedebuggingImagesLevel=dict(argstr="--writedebuggingImagesLevel %d"), ) inputs = BRAINSConstellationDetector.input_spec() @@ -163,34 +70,16 @@ def test_BRAINSConstellationDetector_inputs(): def test_BRAINSConstellationDetector_outputs(): output_map = dict( - outputLandmarksInACPCAlignedSpace=dict( - extensions=None, - ), - outputLandmarksInInputSpace=dict( - extensions=None, - ), - outputMRML=dict( - extensions=None, - ), - outputResampledVolume=dict( - extensions=None, - ), - outputTransform=dict( - extensions=None, - ), - outputUntransformedClippedVolume=dict( - extensions=None, - ), - outputVerificationScript=dict( - extensions=None, - ), - outputVolume=dict( - extensions=None, - ), + outputLandmarksInACPCAlignedSpace=dict(extensions=None), + outputLandmarksInInputSpace=dict(extensions=None), + outputMRML=dict(extensions=None), + outputResampledVolume=dict(extensions=None), + outputTransform=dict(extensions=None), + outputUntransformedClippedVolume=dict(extensions=None), + outputVerificationScript=dict(extensions=None), + outputVolume=dict(extensions=None), resultsDir=dict(), - writeBranded2DImage=dict( - extensions=None, - ), + writeBranded2DImage=dict(extensions=None), ) outputs = BRAINSConstellationDetector.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py index 4a395fbc14..541d635620 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py @@ -4,39 +4,15 @@ def test_BRAINSCreateLabelMapFromProbabilityMaps_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - cleanLabelVolume=dict( - argstr="--cleanLabelVolume %s", - hash_files=False, - ), - dirtyLabelVolume=dict( - argstr="--dirtyLabelVolume %s", - hash_files=False, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - foregroundPriors=dict( - argstr="--foregroundPriors %s", - sep=",", - ), - inclusionThreshold=dict( - argstr="--inclusionThreshold %f", - ), - inputProbabilityVolume=dict( - argstr="--inputProbabilityVolume %s...", - ), - nonAirRegionMask=dict( - argstr="--nonAirRegionMask %s", - extensions=None, - ), - priorLabelCodes=dict( - argstr="--priorLabelCodes %s", - sep=",", - ), + args=dict(argstr="%s"), + cleanLabelVolume=dict(argstr="--cleanLabelVolume %s", hash_files=False), + dirtyLabelVolume=dict(argstr="--dirtyLabelVolume %s", hash_files=False), + environ=dict(nohash=True, usedefault=True), + foregroundPriors=dict(argstr="--foregroundPriors %s", sep=","), + inclusionThreshold=dict(argstr="--inclusionThreshold %f"), + inputProbabilityVolume=dict(argstr="--inputProbabilityVolume %s..."), + nonAirRegionMask=dict(argstr="--nonAirRegionMask %s", extensions=None), + priorLabelCodes=dict(argstr="--priorLabelCodes %s", sep=","), ) inputs = BRAINSCreateLabelMapFromProbabilityMaps.input_spec() @@ -47,12 +23,7 @@ def test_BRAINSCreateLabelMapFromProbabilityMaps_inputs(): def test_BRAINSCreateLabelMapFromProbabilityMaps_outputs(): output_map = dict( - cleanLabelVolume=dict( - extensions=None, - ), - dirtyLabelVolume=dict( - extensions=None, - ), + cleanLabelVolume=dict(extensions=None), dirtyLabelVolume=dict(extensions=None) ) outputs = BRAINSCreateLabelMapFromProbabilityMaps.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py index e101cde2cc..26d1612061 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py @@ -4,66 +4,27 @@ def test_BRAINSCut_inputs(): input_map = dict( - NoTrainingVectorShuffling=dict( - argstr="--NoTrainingVectorShuffling ", - ), - applyModel=dict( - argstr="--applyModel ", - ), - args=dict( - argstr="%s", - ), - computeSSEOn=dict( - argstr="--computeSSEOn ", - ), - createVectors=dict( - argstr="--createVectors ", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - generateProbability=dict( - argstr="--generateProbability ", - ), - histogramEqualization=dict( - argstr="--histogramEqualization ", - ), - method=dict( - argstr="--method %s", - ), + NoTrainingVectorShuffling=dict(argstr="--NoTrainingVectorShuffling "), + applyModel=dict(argstr="--applyModel "), + args=dict(argstr="%s"), + computeSSEOn=dict(argstr="--computeSSEOn "), + createVectors=dict(argstr="--createVectors "), + environ=dict(nohash=True, usedefault=True), + generateProbability=dict(argstr="--generateProbability "), + histogramEqualization=dict(argstr="--histogramEqualization "), + method=dict(argstr="--method %s"), modelConfigurationFilename=dict( - argstr="--modelConfigurationFilename %s", - extensions=None, - ), - modelFilename=dict( - argstr="--modelFilename %s", - ), - multiStructureThreshold=dict( - argstr="--multiStructureThreshold ", - ), - netConfiguration=dict( - argstr="--netConfiguration %s", - extensions=None, - ), - numberOfTrees=dict( - argstr="--numberOfTrees %d", - ), - randomTreeDepth=dict( - argstr="--randomTreeDepth %d", - ), - trainModel=dict( - argstr="--trainModel ", - ), - trainModelStartIndex=dict( - argstr="--trainModelStartIndex %d", - ), - validate=dict( - argstr="--validate ", - ), - verbose=dict( - argstr="--verbose %d", - ), + argstr="--modelConfigurationFilename %s", extensions=None + ), + modelFilename=dict(argstr="--modelFilename %s"), + multiStructureThreshold=dict(argstr="--multiStructureThreshold "), + netConfiguration=dict(argstr="--netConfiguration %s", extensions=None), + numberOfTrees=dict(argstr="--numberOfTrees %d"), + randomTreeDepth=dict(argstr="--randomTreeDepth %d"), + trainModel=dict(argstr="--trainModel "), + trainModelStartIndex=dict(argstr="--trainModelStartIndex %d"), + validate=dict(argstr="--validate "), + verbose=dict(argstr="--verbose %d"), ) inputs = BRAINSCut.input_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py index 6ff468ba47..c52a2cc0d9 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py @@ -4,40 +4,20 @@ def test_BRAINSMultiSTAPLE_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), inputCompositeT1Volume=dict( - argstr="--inputCompositeT1Volume %s", - extensions=None, - ), - inputLabelVolume=dict( - argstr="--inputLabelVolume %s...", - ), - inputTransform=dict( - argstr="--inputTransform %s...", - ), - labelForUndecidedPixels=dict( - argstr="--labelForUndecidedPixels %d", + argstr="--inputCompositeT1Volume %s", extensions=None ), + inputLabelVolume=dict(argstr="--inputLabelVolume %s..."), + inputTransform=dict(argstr="--inputTransform %s..."), + labelForUndecidedPixels=dict(argstr="--labelForUndecidedPixels %d"), outputConfusionMatrix=dict( - argstr="--outputConfusionMatrix %s", - hash_files=False, - ), - outputMultiSTAPLE=dict( - argstr="--outputMultiSTAPLE %s", - hash_files=False, - ), - resampledVolumePrefix=dict( - argstr="--resampledVolumePrefix %s", - ), - skipResampling=dict( - argstr="--skipResampling ", + argstr="--outputConfusionMatrix %s", hash_files=False ), + outputMultiSTAPLE=dict(argstr="--outputMultiSTAPLE %s", hash_files=False), + resampledVolumePrefix=dict(argstr="--resampledVolumePrefix %s"), + skipResampling=dict(argstr="--skipResampling "), ) inputs = BRAINSMultiSTAPLE.input_spec() @@ -48,12 +28,8 @@ def test_BRAINSMultiSTAPLE_inputs(): def test_BRAINSMultiSTAPLE_outputs(): output_map = dict( - outputConfusionMatrix=dict( - extensions=None, - ), - outputMultiSTAPLE=dict( - extensions=None, - ), + outputConfusionMatrix=dict(extensions=None), + outputMultiSTAPLE=dict(extensions=None), ) outputs = BRAINSMultiSTAPLE.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py index f65c8c9b88..55643c74d4 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -4,49 +4,19 @@ def test_BRAINSROIAuto_inputs(): input_map = dict( - ROIAutoDilateSize=dict( - argstr="--ROIAutoDilateSize %f", - ), - args=dict( - argstr="%s", - ), - closingSize=dict( - argstr="--closingSize %f", - ), - cropOutput=dict( - argstr="--cropOutput ", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - maskOutput=dict( - argstr="--maskOutput ", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - otsuPercentileThreshold=dict( - argstr="--otsuPercentileThreshold %f", - ), - outputROIMaskVolume=dict( - argstr="--outputROIMaskVolume %s", - hash_files=False, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - outputVolumePixelType=dict( - argstr="--outputVolumePixelType %s", - ), - thresholdCorrectionFactor=dict( - argstr="--thresholdCorrectionFactor %f", - ), + ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f"), + args=dict(argstr="%s"), + closingSize=dict(argstr="--closingSize %f"), + cropOutput=dict(argstr="--cropOutput "), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + maskOutput=dict(argstr="--maskOutput "), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f"), + outputROIMaskVolume=dict(argstr="--outputROIMaskVolume %s", hash_files=False), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + outputVolumePixelType=dict(argstr="--outputVolumePixelType %s"), + thresholdCorrectionFactor=dict(argstr="--thresholdCorrectionFactor %f"), ) inputs = BRAINSROIAuto.input_spec() @@ -57,12 +27,7 @@ def test_BRAINSROIAuto_inputs(): def test_BRAINSROIAuto_outputs(): output_map = dict( - outputROIMaskVolume=dict( - extensions=None, - ), - outputVolume=dict( - extensions=None, - ), + outputROIMaskVolume=dict(extensions=None), outputVolume=dict(extensions=None) ) outputs = BRAINSROIAuto.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py index 1dd20adecd..a0ad0610f8 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py @@ -4,40 +4,22 @@ def test_BinaryMaskEditorBasedOnLandmarks_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputBinaryVolume=dict( - argstr="--inputBinaryVolume %s", - extensions=None, - ), - inputLandmarkNames=dict( - argstr="--inputLandmarkNames %s", - sep=",", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputBinaryVolume=dict(argstr="--inputBinaryVolume %s", extensions=None), + inputLandmarkNames=dict(argstr="--inputLandmarkNames %s", sep=","), inputLandmarkNamesForObliquePlane=dict( - argstr="--inputLandmarkNamesForObliquePlane %s", - sep=",", + argstr="--inputLandmarkNamesForObliquePlane %s", sep="," ), inputLandmarksFilename=dict( - argstr="--inputLandmarksFilename %s", - extensions=None, - ), - outputBinaryVolume=dict( - argstr="--outputBinaryVolume %s", - hash_files=False, + argstr="--inputLandmarksFilename %s", extensions=None ), + outputBinaryVolume=dict(argstr="--outputBinaryVolume %s", hash_files=False), setCutDirectionForLandmark=dict( - argstr="--setCutDirectionForLandmark %s", - sep=",", + argstr="--setCutDirectionForLandmark %s", sep="," ), setCutDirectionForObliquePlane=dict( - argstr="--setCutDirectionForObliquePlane %s", - sep=",", + argstr="--setCutDirectionForObliquePlane %s", sep="," ), ) inputs = BinaryMaskEditorBasedOnLandmarks.input_spec() @@ -48,11 +30,7 @@ def test_BinaryMaskEditorBasedOnLandmarks_inputs(): def test_BinaryMaskEditorBasedOnLandmarks_outputs(): - output_map = dict( - outputBinaryVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputBinaryVolume=dict(extensions=None)) outputs = BinaryMaskEditorBasedOnLandmarks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py index 272327f4db..8ce8cdac7e 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py @@ -4,42 +4,17 @@ def test_ESLR_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - closingSize=dict( - argstr="--closingSize %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - high=dict( - argstr="--high %d", - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - low=dict( - argstr="--low %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - openingSize=dict( - argstr="--openingSize %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - preserveOutside=dict( - argstr="--preserveOutside ", - ), - safetySize=dict( - argstr="--safetySize %d", - ), + args=dict(argstr="%s"), + closingSize=dict(argstr="--closingSize %d"), + environ=dict(nohash=True, usedefault=True), + high=dict(argstr="--high %d"), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + low=dict(argstr="--low %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + openingSize=dict(argstr="--openingSize %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + preserveOutside=dict(argstr="--preserveOutside "), + safetySize=dict(argstr="--safetySize %d"), ) inputs = ESLR.input_spec() @@ -49,11 +24,7 @@ def test_ESLR_inputs(): def test_ESLR_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = ESLR.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/tests/test_auto_DWICompare.py b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py index b691c097a8..5b4312b22e 100644 --- a/nipype/interfaces/semtools/tests/test_auto_DWICompare.py +++ b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py @@ -4,21 +4,10 @@ def test_DWICompare_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr="--inputVolume1 %s", - extensions=None, - ), - inputVolume2=dict( - argstr="--inputVolume2 %s", - extensions=None, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None), + inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None), ) inputs = DWICompare.input_spec() diff --git a/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py index ad4dcb12d4..3c2818ed53 100644 --- a/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py +++ b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py @@ -4,24 +4,11 @@ def test_DWISimpleCompare_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - checkDWIData=dict( - argstr="--checkDWIData ", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr="--inputVolume1 %s", - extensions=None, - ), - inputVolume2=dict( - argstr="--inputVolume2 %s", - extensions=None, - ), + args=dict(argstr="%s"), + checkDWIData=dict(argstr="--checkDWIData "), + environ=dict(nohash=True, usedefault=True), + inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None), + inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None), ) inputs = DWISimpleCompare.input_spec() diff --git a/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py index 3f857d8085..414fbe1291 100644 --- a/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py +++ b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py @@ -4,21 +4,10 @@ def test_GenerateCsfClippedFromClassifiedImage_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputCassifiedVolume=dict( - argstr="--inputCassifiedVolume %s", - extensions=None, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputCassifiedVolume=dict(argstr="--inputCassifiedVolume %s", extensions=None), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = GenerateCsfClippedFromClassifiedImage.input_spec() @@ -28,11 +17,7 @@ def test_GenerateCsfClippedFromClassifiedImage_inputs(): def test_GenerateCsfClippedFromClassifiedImage_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = GenerateCsfClippedFromClassifiedImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py index 81fee98e93..7464c0cc00 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py @@ -4,53 +4,22 @@ def test_BRAINSAlignMSP_inputs(): input_map = dict( - BackgroundFillValue=dict( - argstr="--BackgroundFillValue %s", - ), - OutputresampleMSP=dict( - argstr="--OutputresampleMSP %s", - hash_files=False, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - interpolationMode=dict( - argstr="--interpolationMode %s", - ), - mspQualityLevel=dict( - argstr="--mspQualityLevel %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - rescaleIntensities=dict( - argstr="--rescaleIntensities ", - ), + BackgroundFillValue=dict(argstr="--BackgroundFillValue %s"), + OutputresampleMSP=dict(argstr="--OutputresampleMSP %s", hash_files=False), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + interpolationMode=dict(argstr="--interpolationMode %s"), + mspQualityLevel=dict(argstr="--mspQualityLevel %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + rescaleIntensities=dict(argstr="--rescaleIntensities "), rescaleIntensitiesOutputRange=dict( - argstr="--rescaleIntensitiesOutputRange %s", - sep=",", - ), - resultsDir=dict( - argstr="--resultsDir %s", - hash_files=False, - ), - trimRescaledIntensities=dict( - argstr="--trimRescaledIntensities %f", - ), - verbose=dict( - argstr="--verbose ", - ), - writedebuggingImagesLevel=dict( - argstr="--writedebuggingImagesLevel %d", + argstr="--rescaleIntensitiesOutputRange %s", sep="," ), + resultsDir=dict(argstr="--resultsDir %s", hash_files=False), + trimRescaledIntensities=dict(argstr="--trimRescaledIntensities %f"), + verbose=dict(argstr="--verbose "), + writedebuggingImagesLevel=dict(argstr="--writedebuggingImagesLevel %d"), ) inputs = BRAINSAlignMSP.input_spec() @@ -60,12 +29,7 @@ def test_BRAINSAlignMSP_inputs(): def test_BRAINSAlignMSP_outputs(): - output_map = dict( - OutputresampleMSP=dict( - extensions=None, - ), - resultsDir=dict(), - ) + output_map = dict(OutputresampleMSP=dict(extensions=None), resultsDir=dict()) outputs = BRAINSAlignMSP.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py index 92e45758b1..645b2c0664 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py @@ -4,30 +4,13 @@ def test_BRAINSClipInferior_inputs(): input_map = dict( - BackgroundFillValue=dict( - argstr="--BackgroundFillValue %s", - ), - acLowerBound=dict( - argstr="--acLowerBound %f", - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + BackgroundFillValue=dict(argstr="--BackgroundFillValue %s"), + acLowerBound=dict(argstr="--acLowerBound %f"), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = BRAINSClipInferior.input_spec() @@ -37,11 +20,7 @@ def test_BRAINSClipInferior_inputs(): def test_BRAINSClipInferior_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = BRAINSClipInferior.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py index 2e26a91e05..ec9a115c16 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py @@ -4,56 +4,25 @@ def test_BRAINSConstellationModeler_inputs(): input_map = dict( - BackgroundFillValue=dict( - argstr="--BackgroundFillValue %s", - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputTrainingList=dict( - argstr="--inputTrainingList %s", - extensions=None, - ), - mspQualityLevel=dict( - argstr="--mspQualityLevel %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), + BackgroundFillValue=dict(argstr="--BackgroundFillValue %s"), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputTrainingList=dict(argstr="--inputTrainingList %s", extensions=None), + mspQualityLevel=dict(argstr="--mspQualityLevel %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), optimizedLandmarksFilenameExtender=dict( - argstr="--optimizedLandmarksFilenameExtender %s", - ), - outputModel=dict( - argstr="--outputModel %s", - hash_files=False, - ), - rescaleIntensities=dict( - argstr="--rescaleIntensities ", + argstr="--optimizedLandmarksFilenameExtender %s" ), + outputModel=dict(argstr="--outputModel %s", hash_files=False), + rescaleIntensities=dict(argstr="--rescaleIntensities "), rescaleIntensitiesOutputRange=dict( - argstr="--rescaleIntensitiesOutputRange %s", - sep=",", - ), - resultsDir=dict( - argstr="--resultsDir %s", - hash_files=False, - ), - saveOptimizedLandmarks=dict( - argstr="--saveOptimizedLandmarks ", - ), - trimRescaledIntensities=dict( - argstr="--trimRescaledIntensities %f", - ), - verbose=dict( - argstr="--verbose ", - ), - writedebuggingImagesLevel=dict( - argstr="--writedebuggingImagesLevel %d", + argstr="--rescaleIntensitiesOutputRange %s", sep="," ), + resultsDir=dict(argstr="--resultsDir %s", hash_files=False), + saveOptimizedLandmarks=dict(argstr="--saveOptimizedLandmarks "), + trimRescaledIntensities=dict(argstr="--trimRescaledIntensities %f"), + verbose=dict(argstr="--verbose "), + writedebuggingImagesLevel=dict(argstr="--writedebuggingImagesLevel %d"), ) inputs = BRAINSConstellationModeler.input_spec() @@ -63,12 +32,7 @@ def test_BRAINSConstellationModeler_inputs(): def test_BRAINSConstellationModeler_outputs(): - output_map = dict( - outputModel=dict( - extensions=None, - ), - resultsDir=dict(), - ) + output_map = dict(outputModel=dict(extensions=None), resultsDir=dict()) outputs = BRAINSConstellationModeler.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py index c511be64c5..2de2ff7fd0 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py @@ -4,27 +4,12 @@ def test_BRAINSEyeDetector_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - debugDir=dict( - argstr="--debugDir %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + debugDir=dict(argstr="--debugDir %s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = BRAINSEyeDetector.input_spec() @@ -34,11 +19,7 @@ def test_BRAINSEyeDetector_inputs(): def test_BRAINSEyeDetector_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = BRAINSEyeDetector.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py index 05ba1ae7d6..db35fef86e 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py @@ -4,35 +4,14 @@ def test_BRAINSInitializedControlPoints_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputLandmarksFile=dict( - argstr="--outputLandmarksFile %s", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - permuteOrder=dict( - argstr="--permuteOrder %s", - sep=",", - ), - splineGridSize=dict( - argstr="--splineGridSize %s", - sep=",", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputLandmarksFile=dict(argstr="--outputLandmarksFile %s"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + permuteOrder=dict(argstr="--permuteOrder %s", sep=","), + splineGridSize=dict(argstr="--splineGridSize %s", sep=","), ) inputs = BRAINSInitializedControlPoints.input_spec() @@ -42,11 +21,7 @@ def test_BRAINSInitializedControlPoints_inputs(): def test_BRAINSInitializedControlPoints_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = BRAINSInitializedControlPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py index efaa484008..75bdf24ae5 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py @@ -4,28 +4,17 @@ def test_BRAINSLandmarkInitializer_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), inputFixedLandmarkFilename=dict( - argstr="--inputFixedLandmarkFilename %s", - extensions=None, + argstr="--inputFixedLandmarkFilename %s", extensions=None ), inputMovingLandmarkFilename=dict( - argstr="--inputMovingLandmarkFilename %s", - extensions=None, - ), - inputWeightFilename=dict( - argstr="--inputWeightFilename %s", - extensions=None, + argstr="--inputMovingLandmarkFilename %s", extensions=None ), + inputWeightFilename=dict(argstr="--inputWeightFilename %s", extensions=None), outputTransformFilename=dict( - argstr="--outputTransformFilename %s", - hash_files=False, + argstr="--outputTransformFilename %s", hash_files=False ), ) inputs = BRAINSLandmarkInitializer.input_spec() @@ -36,11 +25,7 @@ def test_BRAINSLandmarkInitializer_inputs(): def test_BRAINSLandmarkInitializer_outputs(): - output_map = dict( - outputTransformFilename=dict( - extensions=None, - ), - ) + output_map = dict(outputTransformFilename=dict(extensions=None)) outputs = BRAINSLandmarkInitializer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py index beb7a5f664..911dad6116 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py @@ -4,20 +4,10 @@ def test_BRAINSLinearModelerEPCA_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputTrainingList=dict( - argstr="--inputTrainingList %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputTrainingList=dict(argstr="--inputTrainingList %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), ) inputs = BRAINSLinearModelerEPCA.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py index d0cdc8cc7f..58c035444a 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py @@ -4,39 +4,18 @@ def test_BRAINSLmkTransform_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputFixedLandmarks=dict( - argstr="--inputFixedLandmarks %s", - extensions=None, - ), - inputMovingLandmarks=dict( - argstr="--inputMovingLandmarks %s", - extensions=None, - ), - inputMovingVolume=dict( - argstr="--inputMovingVolume %s", - extensions=None, - ), - inputReferenceVolume=dict( - argstr="--inputReferenceVolume %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputFixedLandmarks=dict(argstr="--inputFixedLandmarks %s", extensions=None), + inputMovingLandmarks=dict(argstr="--inputMovingLandmarks %s", extensions=None), + inputMovingVolume=dict(argstr="--inputMovingVolume %s", extensions=None), + inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), outputAffineTransform=dict( - argstr="--outputAffineTransform %s", - hash_files=False, + argstr="--outputAffineTransform %s", hash_files=False ), outputResampledVolume=dict( - argstr="--outputResampledVolume %s", - hash_files=False, + argstr="--outputResampledVolume %s", hash_files=False ), ) inputs = BRAINSLmkTransform.input_spec() @@ -48,12 +27,8 @@ def test_BRAINSLmkTransform_inputs(): def test_BRAINSLmkTransform_outputs(): output_map = dict( - outputAffineTransform=dict( - extensions=None, - ), - outputResampledVolume=dict( - extensions=None, - ), + outputAffineTransform=dict(extensions=None), + outputResampledVolume=dict(extensions=None), ) outputs = BRAINSLmkTransform.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py index e6eac9cf2d..85181b2078 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py @@ -4,70 +4,24 @@ def test_BRAINSMush_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - boundingBoxSize=dict( - argstr="--boundingBoxSize %s", - sep=",", - ), - boundingBoxStart=dict( - argstr="--boundingBoxStart %s", - sep=",", - ), - desiredMean=dict( - argstr="--desiredMean %f", - ), - desiredVariance=dict( - argstr="--desiredVariance %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputFirstVolume=dict( - argstr="--inputFirstVolume %s", - extensions=None, - ), - inputMaskVolume=dict( - argstr="--inputMaskVolume %s", - extensions=None, - ), - inputSecondVolume=dict( - argstr="--inputSecondVolume %s", - extensions=None, - ), - lowerThresholdFactor=dict( - argstr="--lowerThresholdFactor %f", - ), - lowerThresholdFactorPre=dict( - argstr="--lowerThresholdFactorPre %f", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputMask=dict( - argstr="--outputMask %s", - hash_files=False, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - outputWeightsFile=dict( - argstr="--outputWeightsFile %s", - hash_files=False, - ), - seed=dict( - argstr="--seed %s", - sep=",", - ), - upperThresholdFactor=dict( - argstr="--upperThresholdFactor %f", - ), - upperThresholdFactorPre=dict( - argstr="--upperThresholdFactorPre %f", - ), + args=dict(argstr="%s"), + boundingBoxSize=dict(argstr="--boundingBoxSize %s", sep=","), + boundingBoxStart=dict(argstr="--boundingBoxStart %s", sep=","), + desiredMean=dict(argstr="--desiredMean %f"), + desiredVariance=dict(argstr="--desiredVariance %f"), + environ=dict(nohash=True, usedefault=True), + inputFirstVolume=dict(argstr="--inputFirstVolume %s", extensions=None), + inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), + inputSecondVolume=dict(argstr="--inputSecondVolume %s", extensions=None), + lowerThresholdFactor=dict(argstr="--lowerThresholdFactor %f"), + lowerThresholdFactorPre=dict(argstr="--lowerThresholdFactorPre %f"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputMask=dict(argstr="--outputMask %s", hash_files=False), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + outputWeightsFile=dict(argstr="--outputWeightsFile %s", hash_files=False), + seed=dict(argstr="--seed %s", sep=","), + upperThresholdFactor=dict(argstr="--upperThresholdFactor %f"), + upperThresholdFactorPre=dict(argstr="--upperThresholdFactorPre %f"), ) inputs = BRAINSMush.input_spec() @@ -78,15 +32,9 @@ def test_BRAINSMush_inputs(): def test_BRAINSMush_outputs(): output_map = dict( - outputMask=dict( - extensions=None, - ), - outputVolume=dict( - extensions=None, - ), - outputWeightsFile=dict( - extensions=None, - ), + outputMask=dict(extensions=None), + outputVolume=dict(extensions=None), + outputWeightsFile=dict(extensions=None), ) outputs = BRAINSMush.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py index a120b062a4..a18785c071 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py @@ -4,39 +4,21 @@ def test_BRAINSSnapShotWriter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputBinaryVolumes=dict( - argstr="--inputBinaryVolumes %s...", - ), - inputPlaneDirection=dict( - argstr="--inputPlaneDirection %s", - sep=",", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputBinaryVolumes=dict(argstr="--inputBinaryVolumes %s..."), + inputPlaneDirection=dict(argstr="--inputPlaneDirection %s", sep=","), inputSliceToExtractInIndex=dict( - argstr="--inputSliceToExtractInIndex %s", - sep=",", + argstr="--inputSliceToExtractInIndex %s", sep="," ), inputSliceToExtractInPercent=dict( - argstr="--inputSliceToExtractInPercent %s", - sep=",", + argstr="--inputSliceToExtractInPercent %s", sep="," ), inputSliceToExtractInPhysicalPoint=dict( - argstr="--inputSliceToExtractInPhysicalPoint %s", - sep=",", - ), - inputVolumes=dict( - argstr="--inputVolumes %s...", - ), - outputFilename=dict( - argstr="--outputFilename %s", - hash_files=False, + argstr="--inputSliceToExtractInPhysicalPoint %s", sep="," ), + inputVolumes=dict(argstr="--inputVolumes %s..."), + outputFilename=dict(argstr="--outputFilename %s", hash_files=False), ) inputs = BRAINSSnapShotWriter.input_spec() @@ -46,11 +28,7 @@ def test_BRAINSSnapShotWriter_inputs(): def test_BRAINSSnapShotWriter_outputs(): - output_map = dict( - outputFilename=dict( - extensions=None, - ), - ) + output_map = dict(outputFilename=dict(extensions=None)) outputs = BRAINSSnapShotWriter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py index 1eebbb0cec..d47e38deb3 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py @@ -4,35 +4,14 @@ def test_BRAINSTransformConvert_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - displacementVolume=dict( - argstr="--displacementVolume %s", - hash_files=False, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputTransform=dict( - argstr="--inputTransform %s", - extensions=None, - ), - outputPrecisionType=dict( - argstr="--outputPrecisionType %s", - ), - outputTransform=dict( - argstr="--outputTransform %s", - hash_files=False, - ), - outputTransformType=dict( - argstr="--outputTransformType %s", - ), - referenceVolume=dict( - argstr="--referenceVolume %s", - extensions=None, - ), + args=dict(argstr="%s"), + displacementVolume=dict(argstr="--displacementVolume %s", hash_files=False), + environ=dict(nohash=True, usedefault=True), + inputTransform=dict(argstr="--inputTransform %s", extensions=None), + outputPrecisionType=dict(argstr="--outputPrecisionType %s"), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False), + outputTransformType=dict(argstr="--outputTransformType %s"), + referenceVolume=dict(argstr="--referenceVolume %s", extensions=None), ) inputs = BRAINSTransformConvert.input_spec() @@ -43,12 +22,7 @@ def test_BRAINSTransformConvert_inputs(): def test_BRAINSTransformConvert_outputs(): output_map = dict( - displacementVolume=dict( - extensions=None, - ), - outputTransform=dict( - extensions=None, - ), + displacementVolume=dict(extensions=None), outputTransform=dict(extensions=None) ) outputs = BRAINSTransformConvert.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py index fa68f51e21..ee4699326d 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py @@ -4,39 +4,16 @@ def test_BRAINSTrimForegroundInDirection_inputs(): input_map = dict( - BackgroundFillValue=dict( - argstr="--BackgroundFillValue %s", - ), - args=dict( - argstr="%s", - ), - closingSize=dict( - argstr="--closingSize %d", - ), - directionCode=dict( - argstr="--directionCode %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - headSizeLimit=dict( - argstr="--headSizeLimit %f", - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - otsuPercentileThreshold=dict( - argstr="--otsuPercentileThreshold %f", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), + BackgroundFillValue=dict(argstr="--BackgroundFillValue %s"), + args=dict(argstr="%s"), + closingSize=dict(argstr="--closingSize %d"), + directionCode=dict(argstr="--directionCode %d"), + environ=dict(nohash=True, usedefault=True), + headSizeLimit=dict(argstr="--headSizeLimit %f"), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), ) inputs = BRAINSTrimForegroundInDirection.input_spec() @@ -46,11 +23,7 @@ def test_BRAINSTrimForegroundInDirection_inputs(): def test_BRAINSTrimForegroundInDirection_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = BRAINSTrimForegroundInDirection.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py b/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py index 14ec09298c..78fda5d9ad 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py @@ -4,19 +4,11 @@ def test_CleanUpOverlapLabels_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputBinaryVolumes=dict( - argstr="--inputBinaryVolumes %s...", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputBinaryVolumes=dict(argstr="--inputBinaryVolumes %s..."), outputBinaryVolumes=dict( - argstr="--outputBinaryVolumes %s...", - hash_files=False, + argstr="--outputBinaryVolumes %s...", hash_files=False ), ) inputs = CleanUpOverlapLabels.input_spec() @@ -27,9 +19,7 @@ def test_CleanUpOverlapLabels_inputs(): def test_CleanUpOverlapLabels_outputs(): - output_map = dict( - outputBinaryVolumes=dict(), - ) + output_map = dict(outputBinaryVolumes=dict()) outputs = CleanUpOverlapLabels.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py index 4a0d7c89c2..5ae2523f7f 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py @@ -4,69 +4,28 @@ def test_FindCenterOfBrain_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - axis=dict( - argstr="--axis %d", - ), - backgroundValue=dict( - argstr="--backgroundValue %d", - ), - clippedImageMask=dict( - argstr="--clippedImageMask %s", - hash_files=False, - ), - closingSize=dict( - argstr="--closingSize %d", - ), + args=dict(argstr="%s"), + axis=dict(argstr="--axis %d"), + backgroundValue=dict(argstr="--backgroundValue %d"), + clippedImageMask=dict(argstr="--clippedImageMask %s", hash_files=False), + closingSize=dict(argstr="--closingSize %d"), debugAfterGridComputationsForegroundImage=dict( - argstr="--debugAfterGridComputationsForegroundImage %s", - hash_files=False, + argstr="--debugAfterGridComputationsForegroundImage %s", hash_files=False ), debugClippedImageMask=dict( - argstr="--debugClippedImageMask %s", - hash_files=False, - ), - debugDistanceImage=dict( - argstr="--debugDistanceImage %s", - hash_files=False, - ), - debugGridImage=dict( - argstr="--debugGridImage %s", - hash_files=False, - ), - debugTrimmedImage=dict( - argstr="--debugTrimmedImage %s", - hash_files=False, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - generateDebugImages=dict( - argstr="--generateDebugImages ", - ), - headSizeEstimate=dict( - argstr="--headSizeEstimate %f", - ), - headSizeLimit=dict( - argstr="--headSizeLimit %f", - ), - imageMask=dict( - argstr="--imageMask %s", - extensions=None, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - maximize=dict( - argstr="--maximize ", - ), - otsuPercentileThreshold=dict( - argstr="--otsuPercentileThreshold %f", - ), + argstr="--debugClippedImageMask %s", hash_files=False + ), + debugDistanceImage=dict(argstr="--debugDistanceImage %s", hash_files=False), + debugGridImage=dict(argstr="--debugGridImage %s", hash_files=False), + debugTrimmedImage=dict(argstr="--debugTrimmedImage %s", hash_files=False), + environ=dict(nohash=True, usedefault=True), + generateDebugImages=dict(argstr="--generateDebugImages "), + headSizeEstimate=dict(argstr="--headSizeEstimate %f"), + headSizeLimit=dict(argstr="--headSizeLimit %f"), + imageMask=dict(argstr="--imageMask %s", extensions=None), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + maximize=dict(argstr="--maximize "), + otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f"), ) inputs = FindCenterOfBrain.input_spec() @@ -77,24 +36,12 @@ def test_FindCenterOfBrain_inputs(): def test_FindCenterOfBrain_outputs(): output_map = dict( - clippedImageMask=dict( - extensions=None, - ), - debugAfterGridComputationsForegroundImage=dict( - extensions=None, - ), - debugClippedImageMask=dict( - extensions=None, - ), - debugDistanceImage=dict( - extensions=None, - ), - debugGridImage=dict( - extensions=None, - ), - debugTrimmedImage=dict( - extensions=None, - ), + clippedImageMask=dict(extensions=None), + debugAfterGridComputationsForegroundImage=dict(extensions=None), + debugClippedImageMask=dict(extensions=None), + debugDistanceImage=dict(extensions=None), + debugGridImage=dict(extensions=None), + debugTrimmedImage=dict(extensions=None), ) outputs = FindCenterOfBrain.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py index d3840ec7bd..ecdc4dd682 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py @@ -4,23 +4,11 @@ def test_GenerateLabelMapFromProbabilityMap_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolumes=dict( - argstr="--inputVolumes %s...", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputLabelVolume=dict( - argstr="--outputLabelVolume %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolumes=dict(argstr="--inputVolumes %s..."), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputLabelVolume=dict(argstr="--outputLabelVolume %s", hash_files=False), ) inputs = GenerateLabelMapFromProbabilityMap.input_spec() @@ -30,11 +18,7 @@ def test_GenerateLabelMapFromProbabilityMap_inputs(): def test_GenerateLabelMapFromProbabilityMap_outputs(): - output_map = dict( - outputLabelVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputLabelVolume=dict(extensions=None)) outputs = GenerateLabelMapFromProbabilityMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py index 5b71204a67..2159df3114 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py @@ -4,44 +4,17 @@ def test_ImageRegionPlotter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputBinaryROIVolume=dict( - argstr="--inputBinaryROIVolume %s", - extensions=None, - ), - inputLabelVolume=dict( - argstr="--inputLabelVolume %s", - extensions=None, - ), - inputVolume1=dict( - argstr="--inputVolume1 %s", - extensions=None, - ), - inputVolume2=dict( - argstr="--inputVolume2 %s", - extensions=None, - ), - numberOfHistogramBins=dict( - argstr="--numberOfHistogramBins %d", - ), - outputJointHistogramData=dict( - argstr="--outputJointHistogramData %s", - ), - useIntensityForHistogram=dict( - argstr="--useIntensityForHistogram ", - ), - useROIAUTO=dict( - argstr="--useROIAUTO ", - ), - verbose=dict( - argstr="--verbose ", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputBinaryROIVolume=dict(argstr="--inputBinaryROIVolume %s", extensions=None), + inputLabelVolume=dict(argstr="--inputLabelVolume %s", extensions=None), + inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None), + inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), + outputJointHistogramData=dict(argstr="--outputJointHistogramData %s"), + useIntensityForHistogram=dict(argstr="--useIntensityForHistogram "), + useROIAUTO=dict(argstr="--useROIAUTO "), + verbose=dict(argstr="--verbose "), ) inputs = ImageRegionPlotter.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py index d5e61c867a..33ba5789f6 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py @@ -4,35 +4,18 @@ def test_JointHistogram_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), inputMaskVolumeInXAxis=dict( - argstr="--inputMaskVolumeInXAxis %s", - extensions=None, + argstr="--inputMaskVolumeInXAxis %s", extensions=None ), inputMaskVolumeInYAxis=dict( - argstr="--inputMaskVolumeInYAxis %s", - extensions=None, - ), - inputVolumeInXAxis=dict( - argstr="--inputVolumeInXAxis %s", - extensions=None, - ), - inputVolumeInYAxis=dict( - argstr="--inputVolumeInYAxis %s", - extensions=None, - ), - outputJointHistogramImage=dict( - argstr="--outputJointHistogramImage %s", - ), - verbose=dict( - argstr="--verbose ", + argstr="--inputMaskVolumeInYAxis %s", extensions=None ), + inputVolumeInXAxis=dict(argstr="--inputVolumeInXAxis %s", extensions=None), + inputVolumeInYAxis=dict(argstr="--inputVolumeInYAxis %s", extensions=None), + outputJointHistogramImage=dict(argstr="--outputJointHistogramImage %s"), + verbose=dict(argstr="--verbose "), ) inputs = JointHistogram.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py index 4b7d3431bd..77f9ceb548 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py @@ -4,24 +4,15 @@ def test_ShuffleVectorsModule_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), inputVectorFileBaseName=dict( - argstr="--inputVectorFileBaseName %s", - extensions=None, + argstr="--inputVectorFileBaseName %s", extensions=None ), outputVectorFileBaseName=dict( - argstr="--outputVectorFileBaseName %s", - hash_files=False, - ), - resampleProportion=dict( - argstr="--resampleProportion %f", + argstr="--outputVectorFileBaseName %s", hash_files=False ), + resampleProportion=dict(argstr="--resampleProportion %f"), ) inputs = ShuffleVectorsModule.input_spec() @@ -31,11 +22,7 @@ def test_ShuffleVectorsModule_inputs(): def test_ShuffleVectorsModule_outputs(): - output_map = dict( - outputVectorFileBaseName=dict( - extensions=None, - ), - ) + output_map = dict(outputVectorFileBaseName=dict(extensions=None)) outputs = ShuffleVectorsModule.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py index 896f630839..c95c635974 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py @@ -4,34 +4,16 @@ def test_fcsv_to_hdf5_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - landmarkGlobPattern=dict( - argstr="--landmarkGlobPattern %s", - ), - landmarkTypesList=dict( - argstr="--landmarkTypesList %s", - extensions=None, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + landmarkGlobPattern=dict(argstr="--landmarkGlobPattern %s"), + landmarkTypesList=dict(argstr="--landmarkTypesList %s", extensions=None), landmarksInformationFile=dict( - argstr="--landmarksInformationFile %s", - hash_files=False, - ), - modelFile=dict( - argstr="--modelFile %s", - hash_files=False, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - versionID=dict( - argstr="--versionID %s", + argstr="--landmarksInformationFile %s", hash_files=False ), + modelFile=dict(argstr="--modelFile %s", hash_files=False), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + versionID=dict(argstr="--versionID %s"), ) inputs = fcsv_to_hdf5.input_spec() @@ -42,12 +24,7 @@ def test_fcsv_to_hdf5_inputs(): def test_fcsv_to_hdf5_outputs(): output_map = dict( - landmarksInformationFile=dict( - extensions=None, - ), - modelFile=dict( - extensions=None, - ), + landmarksInformationFile=dict(extensions=None), modelFile=dict(extensions=None) ) outputs = fcsv_to_hdf5.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py index 05aa2a3910..39b0fa64bc 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py @@ -4,21 +4,10 @@ def test_insertMidACPCpoint_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputLandmarkFile=dict( - argstr="--inputLandmarkFile %s", - extensions=None, - ), - outputLandmarkFile=dict( - argstr="--outputLandmarkFile %s", - hash_files=False, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputLandmarkFile=dict(argstr="--inputLandmarkFile %s", extensions=None), + outputLandmarkFile=dict(argstr="--outputLandmarkFile %s", hash_files=False), ) inputs = insertMidACPCpoint.input_spec() @@ -28,11 +17,7 @@ def test_insertMidACPCpoint_inputs(): def test_insertMidACPCpoint_outputs(): - output_map = dict( - outputLandmarkFile=dict( - extensions=None, - ), - ) + output_map = dict(outputLandmarkFile=dict(extensions=None)) outputs = insertMidACPCpoint.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py index 472a1326e0..09d2426eb9 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py @@ -4,20 +4,11 @@ def test_landmarksConstellationAligner_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputLandmarksPaired=dict( - argstr="--inputLandmarksPaired %s", - extensions=None, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputLandmarksPaired=dict(argstr="--inputLandmarksPaired %s", extensions=None), outputLandmarksPaired=dict( - argstr="--outputLandmarksPaired %s", - hash_files=False, + argstr="--outputLandmarksPaired %s", hash_files=False ), ) inputs = landmarksConstellationAligner.input_spec() @@ -28,11 +19,7 @@ def test_landmarksConstellationAligner_inputs(): def test_landmarksConstellationAligner_outputs(): - output_map = dict( - outputLandmarksPaired=dict( - extensions=None, - ), - ) + output_map = dict(outputLandmarksPaired=dict(extensions=None)) outputs = landmarksConstellationAligner.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py index c14fdff775..f41a5c2a5a 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py @@ -4,29 +4,12 @@ def test_landmarksConstellationWeights_inputs(): input_map = dict( - LLSModel=dict( - argstr="--LLSModel %s", - extensions=None, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputTemplateModel=dict( - argstr="--inputTemplateModel %s", - extensions=None, - ), - inputTrainingList=dict( - argstr="--inputTrainingList %s", - extensions=None, - ), - outputWeightsList=dict( - argstr="--outputWeightsList %s", - hash_files=False, - ), + LLSModel=dict(argstr="--LLSModel %s", extensions=None), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputTemplateModel=dict(argstr="--inputTemplateModel %s", extensions=None), + inputTrainingList=dict(argstr="--inputTrainingList %s", extensions=None), + outputWeightsList=dict(argstr="--outputWeightsList %s", hash_files=False), ) inputs = landmarksConstellationWeights.input_spec() @@ -36,11 +19,7 @@ def test_landmarksConstellationWeights_inputs(): def test_landmarksConstellationWeights_outputs(): - output_map = dict( - outputWeightsList=dict( - extensions=None, - ), - ) + output_map = dict(outputWeightsList=dict(extensions=None)) outputs = landmarksConstellationWeights.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py index 9017db6760..3b2e3d9909 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py @@ -4,23 +4,10 @@ def test_DTIexport_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputTensor=dict( - argstr="%s", - extensions=None, - position=-2, - ), - outputFile=dict( - argstr="%s", - hash_files=False, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputTensor=dict(argstr="%s", extensions=None, position=-2), + outputFile=dict(argstr="%s", hash_files=False, position=-1), ) inputs = DTIexport.input_spec() @@ -30,12 +17,7 @@ def test_DTIexport_inputs(): def test_DTIexport_outputs(): - output_map = dict( - outputFile=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputFile=dict(extensions=None, position=-1)) outputs = DTIexport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py index 802d4ce9bc..c9588f6c71 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py @@ -4,26 +4,11 @@ def test_DTIimport_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputFile=dict( - argstr="%s", - extensions=None, - position=-2, - ), - outputTensor=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - testingmode=dict( - argstr="--testingmode ", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputFile=dict(argstr="%s", extensions=None, position=-2), + outputTensor=dict(argstr="%s", hash_files=False, position=-1), + testingmode=dict(argstr="--testingmode "), ) inputs = DTIimport.input_spec() @@ -33,12 +18,7 @@ def test_DTIimport_inputs(): def test_DTIimport_outputs(): - output_map = dict( - outputTensor=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputTensor=dict(extensions=None, position=-1)) outputs = DTIimport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py index e004599c12..381b97b677 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py @@ -4,37 +4,14 @@ def test_DWIJointRicianLMMSEFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - compressOutput=dict( - argstr="--compressOutput ", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - ng=dict( - argstr="--ng %d", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - re=dict( - argstr="--re %s", - sep=",", - ), - rf=dict( - argstr="--rf %s", - sep=",", - ), + args=dict(argstr="%s"), + compressOutput=dict(argstr="--compressOutput "), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + ng=dict(argstr="--ng %d"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), + re=dict(argstr="--re %s", sep=","), + rf=dict(argstr="--rf %s", sep=","), ) inputs = DWIJointRicianLMMSEFilter.input_spec() @@ -44,12 +21,7 @@ def test_DWIJointRicianLMMSEFilter_inputs(): def test_DWIJointRicianLMMSEFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = DWIJointRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py index 3c38117737..f167c84564 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py @@ -4,55 +4,20 @@ def test_DWIRicianLMMSEFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - compressOutput=dict( - argstr="--compressOutput ", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hrf=dict( - argstr="--hrf %f", - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - iter=dict( - argstr="--iter %d", - ), - maxnstd=dict( - argstr="--maxnstd %d", - ), - minnstd=dict( - argstr="--minnstd %d", - ), - mnve=dict( - argstr="--mnve %d", - ), - mnvf=dict( - argstr="--mnvf %d", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - re=dict( - argstr="--re %s", - sep=",", - ), - rf=dict( - argstr="--rf %s", - sep=",", - ), - uav=dict( - argstr="--uav ", - ), + args=dict(argstr="%s"), + compressOutput=dict(argstr="--compressOutput "), + environ=dict(nohash=True, usedefault=True), + hrf=dict(argstr="--hrf %f"), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + iter=dict(argstr="--iter %d"), + maxnstd=dict(argstr="--maxnstd %d"), + minnstd=dict(argstr="--minnstd %d"), + mnve=dict(argstr="--mnve %d"), + mnvf=dict(argstr="--mnvf %d"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), + re=dict(argstr="--re %s", sep=","), + rf=dict(argstr="--rf %s", sep=","), + uav=dict(argstr="--uav "), ) inputs = DWIRicianLMMSEFilter.input_spec() @@ -62,12 +27,7 @@ def test_DWIRicianLMMSEFilter_inputs(): def test_DWIRicianLMMSEFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = DWIRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py index 76b305283c..a17f9a8e9d 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py @@ -4,38 +4,14 @@ def test_DWIToDTIEstimation_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - enumeration=dict( - argstr="--enumeration %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-3, - ), - mask=dict( - argstr="--mask %s", - extensions=None, - ), - outputBaseline=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - outputTensor=dict( - argstr="%s", - hash_files=False, - position=-2, - ), - shiftNeg=dict( - argstr="--shiftNeg ", - ), + args=dict(argstr="%s"), + enumeration=dict(argstr="--enumeration %s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="%s", extensions=None, position=-3), + mask=dict(argstr="--mask %s", extensions=None), + outputBaseline=dict(argstr="%s", hash_files=False, position=-1), + outputTensor=dict(argstr="%s", hash_files=False, position=-2), + shiftNeg=dict(argstr="--shiftNeg "), ) inputs = DWIToDTIEstimation.input_spec() @@ -46,14 +22,8 @@ def test_DWIToDTIEstimation_inputs(): def test_DWIToDTIEstimation_outputs(): output_map = dict( - outputBaseline=dict( - extensions=None, - position=-1, - ), - outputTensor=dict( - extensions=None, - position=-2, - ), + outputBaseline=dict(extensions=None, position=-1), + outputTensor=dict(extensions=None, position=-2), ) outputs = DWIToDTIEstimation.output_spec() diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py index 143194f493..11f556ff53 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py @@ -4,26 +4,11 @@ def test_DiffusionTensorScalarMeasurements_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - enumeration=dict( - argstr="--enumeration %s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-3, - ), - outputScalar=dict( - argstr="%s", - hash_files=False, - position=-1, - ), + args=dict(argstr="%s"), + enumeration=dict(argstr="--enumeration %s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="%s", extensions=None, position=-3), + outputScalar=dict(argstr="%s", hash_files=False, position=-1), ) inputs = DiffusionTensorScalarMeasurements.input_spec() @@ -33,12 +18,7 @@ def test_DiffusionTensorScalarMeasurements_inputs(): def test_DiffusionTensorScalarMeasurements_outputs(): - output_map = dict( - outputScalar=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputScalar=dict(extensions=None, position=-1)) outputs = DiffusionTensorScalarMeasurements.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py index 5b11d2f578..85dfe36117 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py @@ -4,34 +4,13 @@ def test_DiffusionWeightedVolumeMasking_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-4, - ), - otsuomegathreshold=dict( - argstr="--otsuomegathreshold %f", - ), - outputBaseline=dict( - argstr="%s", - hash_files=False, - position=-2, - ), - removeislands=dict( - argstr="--removeislands ", - ), - thresholdMask=dict( - argstr="%s", - hash_files=False, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="%s", extensions=None, position=-4), + otsuomegathreshold=dict(argstr="--otsuomegathreshold %f"), + outputBaseline=dict(argstr="%s", hash_files=False, position=-2), + removeislands=dict(argstr="--removeislands "), + thresholdMask=dict(argstr="%s", hash_files=False, position=-1), ) inputs = DiffusionWeightedVolumeMasking.input_spec() @@ -42,14 +21,8 @@ def test_DiffusionWeightedVolumeMasking_inputs(): def test_DiffusionWeightedVolumeMasking_outputs(): output_map = dict( - outputBaseline=dict( - extensions=None, - position=-2, - ), - thresholdMask=dict( - extensions=None, - position=-1, - ), + outputBaseline=dict(extensions=None, position=-2), + thresholdMask=dict(extensions=None, position=-1), ) outputs = DiffusionWeightedVolumeMasking.output_spec() diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py index c52bb5357d..e0f4e56932 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py @@ -4,102 +4,34 @@ def test_ResampleDTIVolume_inputs(): input_map = dict( - Inverse_ITK_Transformation=dict( - argstr="--Inverse_ITK_Transformation ", - ), - Reference=dict( - argstr="--Reference %s", - extensions=None, - ), - args=dict( - argstr="%s", - ), - centered_transform=dict( - argstr="--centered_transform ", - ), - correction=dict( - argstr="--correction %s", - ), - defField=dict( - argstr="--defField %s", - extensions=None, - ), - default_pixel_value=dict( - argstr="--default_pixel_value %f", - ), - direction_matrix=dict( - argstr="--direction_matrix %s", - sep=",", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hfieldtype=dict( - argstr="--hfieldtype %s", - ), - image_center=dict( - argstr="--image_center %s", - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - interpolation=dict( - argstr="--interpolation %s", - ), - notbulk=dict( - argstr="--notbulk ", - ), - number_of_thread=dict( - argstr="--number_of_thread %d", - ), - origin=dict( - argstr="--origin %s", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - rotation_point=dict( - argstr="--rotation_point %s", - ), - size=dict( - argstr="--size %s", - sep=",", - ), - spaceChange=dict( - argstr="--spaceChange ", - ), - spacing=dict( - argstr="--spacing %s", - sep=",", - ), - spline_order=dict( - argstr="--spline_order %d", - ), - transform=dict( - argstr="--transform %s", - ), - transform_matrix=dict( - argstr="--transform_matrix %s", - sep=",", - ), - transform_order=dict( - argstr="--transform_order %s", - ), - transform_tensor_method=dict( - argstr="--transform_tensor_method %s", - ), - transformationFile=dict( - argstr="--transformationFile %s", - extensions=None, - ), - window_function=dict( - argstr="--window_function %s", - ), + Inverse_ITK_Transformation=dict(argstr="--Inverse_ITK_Transformation "), + Reference=dict(argstr="--Reference %s", extensions=None), + args=dict(argstr="%s"), + centered_transform=dict(argstr="--centered_transform "), + correction=dict(argstr="--correction %s"), + defField=dict(argstr="--defField %s", extensions=None), + default_pixel_value=dict(argstr="--default_pixel_value %f"), + direction_matrix=dict(argstr="--direction_matrix %s", sep=","), + environ=dict(nohash=True, usedefault=True), + hfieldtype=dict(argstr="--hfieldtype %s"), + image_center=dict(argstr="--image_center %s"), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + interpolation=dict(argstr="--interpolation %s"), + notbulk=dict(argstr="--notbulk "), + number_of_thread=dict(argstr="--number_of_thread %d"), + origin=dict(argstr="--origin %s"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), + rotation_point=dict(argstr="--rotation_point %s"), + size=dict(argstr="--size %s", sep=","), + spaceChange=dict(argstr="--spaceChange "), + spacing=dict(argstr="--spacing %s", sep=","), + spline_order=dict(argstr="--spline_order %d"), + transform=dict(argstr="--transform %s"), + transform_matrix=dict(argstr="--transform_matrix %s", sep=","), + transform_order=dict(argstr="--transform_order %s"), + transform_tensor_method=dict(argstr="--transform_tensor_method %s"), + transformationFile=dict(argstr="--transformationFile %s", extensions=None), + window_function=dict(argstr="--window_function %s"), ) inputs = ResampleDTIVolume.input_spec() @@ -109,12 +41,7 @@ def test_ResampleDTIVolume_inputs(): def test_ResampleDTIVolume_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = ResampleDTIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py index f8b1a3ddff..e28b877a19 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py @@ -4,70 +4,25 @@ def test_TractographyLabelMapSeeding_inputs(): input_map = dict( - InputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - OutputFibers=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - args=dict( - argstr="%s", - ), - clthreshold=dict( - argstr="--clthreshold %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputroi=dict( - argstr="--inputroi %s", - extensions=None, - ), - integrationsteplength=dict( - argstr="--integrationsteplength %f", - ), - label=dict( - argstr="--label %d", - ), - maximumlength=dict( - argstr="--maximumlength %f", - ), - minimumlength=dict( - argstr="--minimumlength %f", - ), - name=dict( - argstr="--name %s", - ), - outputdirectory=dict( - argstr="--outputdirectory %s", - hash_files=False, - ), - randomgrid=dict( - argstr="--randomgrid ", - ), - seedspacing=dict( - argstr="--seedspacing %f", - ), - stoppingcurvature=dict( - argstr="--stoppingcurvature %f", - ), - stoppingmode=dict( - argstr="--stoppingmode %s", - ), - stoppingvalue=dict( - argstr="--stoppingvalue %f", - ), - useindexspace=dict( - argstr="--useindexspace ", - ), - writetofile=dict( - argstr="--writetofile ", - ), + InputVolume=dict(argstr="%s", extensions=None, position=-2), + OutputFibers=dict(argstr="%s", hash_files=False, position=-1), + args=dict(argstr="%s"), + clthreshold=dict(argstr="--clthreshold %f"), + environ=dict(nohash=True, usedefault=True), + inputroi=dict(argstr="--inputroi %s", extensions=None), + integrationsteplength=dict(argstr="--integrationsteplength %f"), + label=dict(argstr="--label %d"), + maximumlength=dict(argstr="--maximumlength %f"), + minimumlength=dict(argstr="--minimumlength %f"), + name=dict(argstr="--name %s"), + outputdirectory=dict(argstr="--outputdirectory %s", hash_files=False), + randomgrid=dict(argstr="--randomgrid "), + seedspacing=dict(argstr="--seedspacing %f"), + stoppingcurvature=dict(argstr="--stoppingcurvature %f"), + stoppingmode=dict(argstr="--stoppingmode %s"), + stoppingvalue=dict(argstr="--stoppingvalue %f"), + useindexspace=dict(argstr="--useindexspace "), + writetofile=dict(argstr="--writetofile "), ) inputs = TractographyLabelMapSeeding.input_spec() @@ -78,11 +33,7 @@ def test_TractographyLabelMapSeeding_inputs(): def test_TractographyLabelMapSeeding_outputs(): output_map = dict( - OutputFibers=dict( - extensions=None, - position=-1, - ), - outputdirectory=dict(), + OutputFibers=dict(extensions=None, position=-1), outputdirectory=dict() ) outputs = TractographyLabelMapSeeding.output_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py index 2cd0ac229d..b4b9a19029 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py @@ -4,31 +4,12 @@ def test_AddScalarVolumes_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr="%s", - extensions=None, - position=-3, - ), - inputVolume2=dict( - argstr="%s", - extensions=None, - position=-2, - ), - order=dict( - argstr="--order %s", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume1=dict(argstr="%s", extensions=None, position=-3), + inputVolume2=dict(argstr="%s", extensions=None, position=-2), + order=dict(argstr="--order %s"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), ) inputs = AddScalarVolumes.input_spec() @@ -38,12 +19,7 @@ def test_AddScalarVolumes_inputs(): def test_AddScalarVolumes_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = AddScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py index 8417ab1a90..08641f03a6 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py @@ -4,26 +4,11 @@ def test_CastScalarVolume_inputs(): input_map = dict( - InputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - OutputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - type=dict( - argstr="--type %s", - ), + InputVolume=dict(argstr="%s", extensions=None, position=-2), + OutputVolume=dict(argstr="%s", hash_files=False, position=-1), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + type=dict(argstr="--type %s"), ) inputs = CastScalarVolume.input_spec() @@ -33,12 +18,7 @@ def test_CastScalarVolume_inputs(): def test_CastScalarVolume_outputs(): - output_map = dict( - OutputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(OutputVolume=dict(extensions=None, position=-1)) outputs = CastScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py index 49b5133faa..9eac1b6c21 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py @@ -4,32 +4,12 @@ def test_CheckerBoardFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - checkerPattern=dict( - argstr="--checkerPattern %s", - sep=",", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr="%s", - extensions=None, - position=-3, - ), - inputVolume2=dict( - argstr="%s", - extensions=None, - position=-2, - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), + args=dict(argstr="%s"), + checkerPattern=dict(argstr="--checkerPattern %s", sep=","), + environ=dict(nohash=True, usedefault=True), + inputVolume1=dict(argstr="%s", extensions=None, position=-3), + inputVolume2=dict(argstr="%s", extensions=None, position=-2), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), ) inputs = CheckerBoardFilter.input_spec() @@ -39,12 +19,7 @@ def test_CheckerBoardFilter_inputs(): def test_CheckerBoardFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = CheckerBoardFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py index 48421b7c21..49b59aa007 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py @@ -4,32 +4,13 @@ def test_CurvatureAnisotropicDiffusion_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - conductance=dict( - argstr="--conductance %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - iterations=dict( - argstr="--iterations %d", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - timeStep=dict( - argstr="--timeStep %f", - ), + args=dict(argstr="%s"), + conductance=dict(argstr="--conductance %f"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + iterations=dict(argstr="--iterations %d"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), + timeStep=dict(argstr="--timeStep %f"), ) inputs = CurvatureAnisotropicDiffusion.input_spec() @@ -39,12 +20,7 @@ def test_CurvatureAnisotropicDiffusion_inputs(): def test_CurvatureAnisotropicDiffusion_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = CurvatureAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py index 430e299787..00cc92cc4e 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py @@ -4,35 +4,14 @@ def test_ExtractSkeleton_inputs(): input_map = dict( - InputImageFileName=dict( - argstr="%s", - extensions=None, - position=-2, - ), - OutputImageFileName=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - args=dict( - argstr="%s", - ), - dontPrune=dict( - argstr="--dontPrune ", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - numPoints=dict( - argstr="--numPoints %d", - ), - pointsFile=dict( - argstr="--pointsFile %s", - ), - type=dict( - argstr="--type %s", - ), + InputImageFileName=dict(argstr="%s", extensions=None, position=-2), + OutputImageFileName=dict(argstr="%s", hash_files=False, position=-1), + args=dict(argstr="%s"), + dontPrune=dict(argstr="--dontPrune "), + environ=dict(nohash=True, usedefault=True), + numPoints=dict(argstr="--numPoints %d"), + pointsFile=dict(argstr="--pointsFile %s"), + type=dict(argstr="--type %s"), ) inputs = ExtractSkeleton.input_spec() @@ -42,12 +21,7 @@ def test_ExtractSkeleton_inputs(): def test_ExtractSkeleton_outputs(): - output_map = dict( - OutputImageFileName=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(OutputImageFileName=dict(extensions=None, position=-1)) outputs = ExtractSkeleton.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py index 113490472d..15f571fceb 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py @@ -4,26 +4,11 @@ def test_GaussianBlurImageFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - sigma=dict( - argstr="--sigma %f", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), + sigma=dict(argstr="--sigma %f"), ) inputs = GaussianBlurImageFilter.input_spec() @@ -33,12 +18,7 @@ def test_GaussianBlurImageFilter_inputs(): def test_GaussianBlurImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = GaussianBlurImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py index 95810788c7..6445abfb6e 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py @@ -4,32 +4,13 @@ def test_GradientAnisotropicDiffusion_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - conductance=dict( - argstr="--conductance %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - iterations=dict( - argstr="--iterations %d", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - timeStep=dict( - argstr="--timeStep %f", - ), + args=dict(argstr="%s"), + conductance=dict(argstr="--conductance %f"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + iterations=dict(argstr="--iterations %d"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), + timeStep=dict(argstr="--timeStep %f"), ) inputs = GradientAnisotropicDiffusion.input_spec() @@ -39,12 +20,7 @@ def test_GradientAnisotropicDiffusion_inputs(): def test_GradientAnisotropicDiffusion_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = GradientAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py index 8891232347..c1541d7820 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py @@ -4,23 +4,10 @@ def test_GrayscaleFillHoleImageFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), ) inputs = GrayscaleFillHoleImageFilter.input_spec() @@ -30,12 +17,7 @@ def test_GrayscaleFillHoleImageFilter_inputs(): def test_GrayscaleFillHoleImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = GrayscaleFillHoleImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py index d48d9ded63..41a2a8e838 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py @@ -4,23 +4,10 @@ def test_GrayscaleGrindPeakImageFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), ) inputs = GrayscaleGrindPeakImageFilter.input_spec() @@ -30,12 +17,7 @@ def test_GrayscaleGrindPeakImageFilter_inputs(): def test_GrayscaleGrindPeakImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = GrayscaleGrindPeakImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py index cf2a959ff9..033141d738 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py @@ -4,37 +4,14 @@ def test_HistogramMatching_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-3, - ), - numberOfHistogramLevels=dict( - argstr="--numberOfHistogramLevels %d", - ), - numberOfMatchPoints=dict( - argstr="--numberOfMatchPoints %d", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - referenceVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - threshold=dict( - argstr="--threshold ", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="%s", extensions=None, position=-3), + numberOfHistogramLevels=dict(argstr="--numberOfHistogramLevels %d"), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), + referenceVolume=dict(argstr="%s", extensions=None, position=-2), + threshold=dict(argstr="--threshold "), ) inputs = HistogramMatching.input_spec() @@ -44,12 +21,7 @@ def test_HistogramMatching_inputs(): def test_HistogramMatching_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = HistogramMatching.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py index 802baf5f38..0c4af24580 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py @@ -4,31 +4,12 @@ def test_ImageLabelCombine_inputs(): input_map = dict( - InputLabelMap_A=dict( - argstr="%s", - extensions=None, - position=-3, - ), - InputLabelMap_B=dict( - argstr="%s", - extensions=None, - position=-2, - ), - OutputLabelMap=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - first_overwrites=dict( - argstr="--first_overwrites ", - ), + InputLabelMap_A=dict(argstr="%s", extensions=None, position=-3), + InputLabelMap_B=dict(argstr="%s", extensions=None, position=-2), + OutputLabelMap=dict(argstr="%s", hash_files=False, position=-1), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + first_overwrites=dict(argstr="--first_overwrites "), ) inputs = ImageLabelCombine.input_spec() @@ -38,12 +19,7 @@ def test_ImageLabelCombine_inputs(): def test_ImageLabelCombine_outputs(): - output_map = dict( - OutputLabelMap=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(OutputLabelMap=dict(extensions=None, position=-1)) outputs = ImageLabelCombine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py index 5070718d66..b9872b7e4e 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py @@ -4,34 +4,13 @@ def test_MaskScalarVolume_inputs(): input_map = dict( - InputVolume=dict( - argstr="%s", - extensions=None, - position=-3, - ), - MaskVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - OutputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - label=dict( - argstr="--label %d", - ), - replace=dict( - argstr="--replace %d", - ), + InputVolume=dict(argstr="%s", extensions=None, position=-3), + MaskVolume=dict(argstr="%s", extensions=None, position=-2), + OutputVolume=dict(argstr="%s", hash_files=False, position=-1), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + label=dict(argstr="--label %d"), + replace=dict(argstr="--replace %d"), ) inputs = MaskScalarVolume.input_spec() @@ -41,12 +20,7 @@ def test_MaskScalarVolume_inputs(): def test_MaskScalarVolume_outputs(): - output_map = dict( - OutputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(OutputVolume=dict(extensions=None, position=-1)) outputs = MaskScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py index 8b86a90c3b..ceea660265 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py @@ -4,27 +4,11 @@ def test_MedianImageFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - neighborhood=dict( - argstr="--neighborhood %s", - sep=",", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + neighborhood=dict(argstr="--neighborhood %s", sep=","), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), ) inputs = MedianImageFilter.input_spec() @@ -34,12 +18,7 @@ def test_MedianImageFilter_inputs(): def test_MedianImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = MedianImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py index 817fdbbe95..e28dfb2a66 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py @@ -4,31 +4,12 @@ def test_MultiplyScalarVolumes_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr="%s", - extensions=None, - position=-3, - ), - inputVolume2=dict( - argstr="%s", - extensions=None, - position=-2, - ), - order=dict( - argstr="--order %s", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume1=dict(argstr="%s", extensions=None, position=-3), + inputVolume2=dict(argstr="%s", extensions=None, position=-2), + order=dict(argstr="--order %s"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), ) inputs = MultiplyScalarVolumes.input_spec() @@ -38,12 +19,7 @@ def test_MultiplyScalarVolumes_inputs(): def test_MultiplyScalarVolumes_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = MultiplyScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py index 43038036d5..f1ab2c1d90 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py @@ -4,57 +4,20 @@ def test_N4ITKBiasFieldCorrection_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - bsplineorder=dict( - argstr="--bsplineorder %d", - ), - convergencethreshold=dict( - argstr="--convergencethreshold %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - histogramsharpening=dict( - argstr="--histogramsharpening %s", - sep=",", - ), - inputimage=dict( - argstr="--inputimage %s", - extensions=None, - ), - iterations=dict( - argstr="--iterations %s", - sep=",", - ), - maskimage=dict( - argstr="--maskimage %s", - extensions=None, - ), - meshresolution=dict( - argstr="--meshresolution %s", - sep=",", - ), - outputbiasfield=dict( - argstr="--outputbiasfield %s", - hash_files=False, - ), - outputimage=dict( - argstr="--outputimage %s", - hash_files=False, - ), - shrinkfactor=dict( - argstr="--shrinkfactor %d", - ), - splinedistance=dict( - argstr="--splinedistance %f", - ), - weightimage=dict( - argstr="--weightimage %s", - extensions=None, - ), + args=dict(argstr="%s"), + bsplineorder=dict(argstr="--bsplineorder %d"), + convergencethreshold=dict(argstr="--convergencethreshold %f"), + environ=dict(nohash=True, usedefault=True), + histogramsharpening=dict(argstr="--histogramsharpening %s", sep=","), + inputimage=dict(argstr="--inputimage %s", extensions=None), + iterations=dict(argstr="--iterations %s", sep=","), + maskimage=dict(argstr="--maskimage %s", extensions=None), + meshresolution=dict(argstr="--meshresolution %s", sep=","), + outputbiasfield=dict(argstr="--outputbiasfield %s", hash_files=False), + outputimage=dict(argstr="--outputimage %s", hash_files=False), + shrinkfactor=dict(argstr="--shrinkfactor %d"), + splinedistance=dict(argstr="--splinedistance %f"), + weightimage=dict(argstr="--weightimage %s", extensions=None), ) inputs = N4ITKBiasFieldCorrection.input_spec() @@ -65,12 +28,7 @@ def test_N4ITKBiasFieldCorrection_inputs(): def test_N4ITKBiasFieldCorrection_outputs(): output_map = dict( - outputbiasfield=dict( - extensions=None, - ), - outputimage=dict( - extensions=None, - ), + outputbiasfield=dict(extensions=None), outputimage=dict(extensions=None) ) outputs = N4ITKBiasFieldCorrection.output_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py index 37dca6437c..d3b61640bb 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py @@ -4,96 +4,32 @@ def test_ResampleScalarVectorDWIVolume_inputs(): input_map = dict( - Inverse_ITK_Transformation=dict( - argstr="--Inverse_ITK_Transformation ", - ), - Reference=dict( - argstr="--Reference %s", - extensions=None, - ), - args=dict( - argstr="%s", - ), - centered_transform=dict( - argstr="--centered_transform ", - ), - defField=dict( - argstr="--defField %s", - extensions=None, - ), - default_pixel_value=dict( - argstr="--default_pixel_value %f", - ), - direction_matrix=dict( - argstr="--direction_matrix %s", - sep=",", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hfieldtype=dict( - argstr="--hfieldtype %s", - ), - image_center=dict( - argstr="--image_center %s", - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - interpolation=dict( - argstr="--interpolation %s", - ), - notbulk=dict( - argstr="--notbulk ", - ), - number_of_thread=dict( - argstr="--number_of_thread %d", - ), - origin=dict( - argstr="--origin %s", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - rotation_point=dict( - argstr="--rotation_point %s", - ), - size=dict( - argstr="--size %s", - sep=",", - ), - spaceChange=dict( - argstr="--spaceChange ", - ), - spacing=dict( - argstr="--spacing %s", - sep=",", - ), - spline_order=dict( - argstr="--spline_order %d", - ), - transform=dict( - argstr="--transform %s", - ), - transform_matrix=dict( - argstr="--transform_matrix %s", - sep=",", - ), - transform_order=dict( - argstr="--transform_order %s", - ), - transformationFile=dict( - argstr="--transformationFile %s", - extensions=None, - ), - window_function=dict( - argstr="--window_function %s", - ), + Inverse_ITK_Transformation=dict(argstr="--Inverse_ITK_Transformation "), + Reference=dict(argstr="--Reference %s", extensions=None), + args=dict(argstr="%s"), + centered_transform=dict(argstr="--centered_transform "), + defField=dict(argstr="--defField %s", extensions=None), + default_pixel_value=dict(argstr="--default_pixel_value %f"), + direction_matrix=dict(argstr="--direction_matrix %s", sep=","), + environ=dict(nohash=True, usedefault=True), + hfieldtype=dict(argstr="--hfieldtype %s"), + image_center=dict(argstr="--image_center %s"), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + interpolation=dict(argstr="--interpolation %s"), + notbulk=dict(argstr="--notbulk "), + number_of_thread=dict(argstr="--number_of_thread %d"), + origin=dict(argstr="--origin %s"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), + rotation_point=dict(argstr="--rotation_point %s"), + size=dict(argstr="--size %s", sep=","), + spaceChange=dict(argstr="--spaceChange "), + spacing=dict(argstr="--spacing %s", sep=","), + spline_order=dict(argstr="--spline_order %d"), + transform=dict(argstr="--transform %s"), + transform_matrix=dict(argstr="--transform_matrix %s", sep=","), + transform_order=dict(argstr="--transform_order %s"), + transformationFile=dict(argstr="--transformationFile %s", extensions=None), + window_function=dict(argstr="--window_function %s"), ) inputs = ResampleScalarVectorDWIVolume.input_spec() @@ -103,12 +39,7 @@ def test_ResampleScalarVectorDWIVolume_inputs(): def test_ResampleScalarVectorDWIVolume_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = ResampleScalarVectorDWIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py index abe3d9ad00..2bf2568637 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py @@ -4,31 +4,12 @@ def test_SubtractScalarVolumes_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr="%s", - extensions=None, - position=-3, - ), - inputVolume2=dict( - argstr="%s", - extensions=None, - position=-2, - ), - order=dict( - argstr="--order %s", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume1=dict(argstr="%s", extensions=None, position=-3), + inputVolume2=dict(argstr="%s", extensions=None, position=-2), + order=dict(argstr="--order %s"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), ) inputs = SubtractScalarVolumes.input_spec() @@ -38,12 +19,7 @@ def test_SubtractScalarVolumes_inputs(): def test_SubtractScalarVolumes_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = SubtractScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py index 0aaab0ff7a..cede949179 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py @@ -4,38 +4,15 @@ def test_ThresholdScalarVolume_inputs(): input_map = dict( - InputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - OutputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - lower=dict( - argstr="--lower %d", - ), - outsidevalue=dict( - argstr="--outsidevalue %d", - ), - threshold=dict( - argstr="--threshold %d", - ), - thresholdtype=dict( - argstr="--thresholdtype %s", - ), - upper=dict( - argstr="--upper %d", - ), + InputVolume=dict(argstr="%s", extensions=None, position=-2), + OutputVolume=dict(argstr="%s", hash_files=False, position=-1), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + lower=dict(argstr="--lower %d"), + outsidevalue=dict(argstr="--outsidevalue %d"), + threshold=dict(argstr="--threshold %d"), + thresholdtype=dict(argstr="--thresholdtype %s"), + upper=dict(argstr="--upper %d"), ) inputs = ThresholdScalarVolume.input_spec() @@ -45,12 +22,7 @@ def test_ThresholdScalarVolume_inputs(): def test_ThresholdScalarVolume_outputs(): - output_map = dict( - OutputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(OutputVolume=dict(extensions=None, position=-1)) outputs = ThresholdScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py index bbaa19f848..f6bb34bbdf 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py @@ -4,36 +4,14 @@ def test_VotingBinaryHoleFillingImageFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - background=dict( - argstr="--background %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - foreground=dict( - argstr="--foreground %d", - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - majorityThreshold=dict( - argstr="--majorityThreshold %d", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - radius=dict( - argstr="--radius %s", - sep=",", - ), + args=dict(argstr="%s"), + background=dict(argstr="--background %d"), + environ=dict(nohash=True, usedefault=True), + foreground=dict(argstr="--foreground %d"), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + majorityThreshold=dict(argstr="--majorityThreshold %d"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), + radius=dict(argstr="--radius %s", sep=","), ) inputs = VotingBinaryHoleFillingImageFilter.input_spec() @@ -43,12 +21,7 @@ def test_VotingBinaryHoleFillingImageFilter_inputs(): def test_VotingBinaryHoleFillingImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = VotingBinaryHoleFillingImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py index 8aa18dc6a3..4d5ee32124 100644 --- a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py +++ b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py @@ -4,41 +4,15 @@ def test_DWIUnbiasedNonLocalMeansFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - hp=dict( - argstr="--hp %f", - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - ng=dict( - argstr="--ng %d", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - rc=dict( - argstr="--rc %s", - sep=",", - ), - re=dict( - argstr="--re %s", - sep=",", - ), - rs=dict( - argstr="--rs %s", - sep=",", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + hp=dict(argstr="--hp %f"), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + ng=dict(argstr="--ng %d"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), + rc=dict(argstr="--rc %s", sep=","), + re=dict(argstr="--re %s", sep=","), + rs=dict(argstr="--rs %s", sep=","), ) inputs = DWIUnbiasedNonLocalMeansFilter.input_spec() @@ -48,12 +22,7 @@ def test_DWIUnbiasedNonLocalMeansFilter_inputs(): def test_DWIUnbiasedNonLocalMeansFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = DWIUnbiasedNonLocalMeansFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py index bb2de08cfb..82c345813f 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py @@ -4,53 +4,21 @@ def test_AffineRegistration_inputs(): input_map = dict( - FixedImageFileName=dict( - argstr="%s", - extensions=None, - position=-2, - ), - MovingImageFileName=dict( - argstr="%s", - extensions=None, - position=-1, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedsmoothingfactor=dict( - argstr="--fixedsmoothingfactor %d", - ), - histogrambins=dict( - argstr="--histogrambins %d", - ), - initialtransform=dict( - argstr="--initialtransform %s", - extensions=None, - ), - iterations=dict( - argstr="--iterations %d", - ), - movingsmoothingfactor=dict( - argstr="--movingsmoothingfactor %d", - ), - outputtransform=dict( - argstr="--outputtransform %s", - hash_files=False, - ), + FixedImageFileName=dict(argstr="%s", extensions=None, position=-2), + MovingImageFileName=dict(argstr="%s", extensions=None, position=-1), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fixedsmoothingfactor=dict(argstr="--fixedsmoothingfactor %d"), + histogrambins=dict(argstr="--histogrambins %d"), + initialtransform=dict(argstr="--initialtransform %s", extensions=None), + iterations=dict(argstr="--iterations %d"), + movingsmoothingfactor=dict(argstr="--movingsmoothingfactor %d"), + outputtransform=dict(argstr="--outputtransform %s", hash_files=False), resampledmovingfilename=dict( - argstr="--resampledmovingfilename %s", - hash_files=False, - ), - spatialsamples=dict( - argstr="--spatialsamples %d", - ), - translationscale=dict( - argstr="--translationscale %f", + argstr="--resampledmovingfilename %s", hash_files=False ), + spatialsamples=dict(argstr="--spatialsamples %d"), + translationscale=dict(argstr="--translationscale %f"), ) inputs = AffineRegistration.input_spec() @@ -61,12 +29,8 @@ def test_AffineRegistration_inputs(): def test_AffineRegistration_outputs(): output_map = dict( - outputtransform=dict( - extensions=None, - ), - resampledmovingfilename=dict( - extensions=None, - ), + outputtransform=dict(extensions=None), + resampledmovingfilename=dict(extensions=None), ) outputs = AffineRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py index 41b316e7dc..7e5762a0db 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py @@ -4,60 +4,23 @@ def test_BSplineDeformableRegistration_inputs(): input_map = dict( - FixedImageFileName=dict( - argstr="%s", - extensions=None, - position=-2, - ), - MovingImageFileName=dict( - argstr="%s", - extensions=None, - position=-1, - ), - args=dict( - argstr="%s", - ), - constrain=dict( - argstr="--constrain ", - ), - default=dict( - argstr="--default %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gridSize=dict( - argstr="--gridSize %d", - ), - histogrambins=dict( - argstr="--histogrambins %d", - ), - initialtransform=dict( - argstr="--initialtransform %s", - extensions=None, - ), - iterations=dict( - argstr="--iterations %d", - ), - maximumDeformation=dict( - argstr="--maximumDeformation %f", - ), - outputtransform=dict( - argstr="--outputtransform %s", - hash_files=False, - ), - outputwarp=dict( - argstr="--outputwarp %s", - hash_files=False, - ), + FixedImageFileName=dict(argstr="%s", extensions=None, position=-2), + MovingImageFileName=dict(argstr="%s", extensions=None, position=-1), + args=dict(argstr="%s"), + constrain=dict(argstr="--constrain "), + default=dict(argstr="--default %d"), + environ=dict(nohash=True, usedefault=True), + gridSize=dict(argstr="--gridSize %d"), + histogrambins=dict(argstr="--histogrambins %d"), + initialtransform=dict(argstr="--initialtransform %s", extensions=None), + iterations=dict(argstr="--iterations %d"), + maximumDeformation=dict(argstr="--maximumDeformation %f"), + outputtransform=dict(argstr="--outputtransform %s", hash_files=False), + outputwarp=dict(argstr="--outputwarp %s", hash_files=False), resampledmovingfilename=dict( - argstr="--resampledmovingfilename %s", - hash_files=False, - ), - spatialsamples=dict( - argstr="--spatialsamples %d", + argstr="--resampledmovingfilename %s", hash_files=False ), + spatialsamples=dict(argstr="--spatialsamples %d"), ) inputs = BSplineDeformableRegistration.input_spec() @@ -68,15 +31,9 @@ def test_BSplineDeformableRegistration_inputs(): def test_BSplineDeformableRegistration_outputs(): output_map = dict( - outputtransform=dict( - extensions=None, - ), - outputwarp=dict( - extensions=None, - ), - resampledmovingfilename=dict( - extensions=None, - ), + outputtransform=dict(extensions=None), + outputwarp=dict(extensions=None), + resampledmovingfilename=dict(extensions=None), ) outputs = BSplineDeformableRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py index fbd37eeb8e..0987c9b33e 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py @@ -4,25 +4,11 @@ def test_BSplineToDeformationField_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - defImage=dict( - argstr="--defImage %s", - hash_files=False, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - refImage=dict( - argstr="--refImage %s", - extensions=None, - ), - tfm=dict( - argstr="--tfm %s", - extensions=None, - ), + args=dict(argstr="%s"), + defImage=dict(argstr="--defImage %s", hash_files=False), + environ=dict(nohash=True, usedefault=True), + refImage=dict(argstr="--refImage %s", extensions=None), + tfm=dict(argstr="--tfm %s", extensions=None), ) inputs = BSplineToDeformationField.input_spec() @@ -32,11 +18,7 @@ def test_BSplineToDeformationField_inputs(): def test_BSplineToDeformationField_outputs(): - output_map = dict( - defImage=dict( - extensions=None, - ), - ) + output_map = dict(defImage=dict(extensions=None)) outputs = BSplineToDeformationField.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py index 28f4e19d7b..699ac2133b 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py @@ -4,105 +4,36 @@ def test_ExpertAutomatedRegistration_inputs(): input_map = dict( - affineMaxIterations=dict( - argstr="--affineMaxIterations %d", - ), - affineSamplingRatio=dict( - argstr="--affineSamplingRatio %f", - ), - args=dict( - argstr="%s", - ), - bsplineMaxIterations=dict( - argstr="--bsplineMaxIterations %d", - ), - bsplineSamplingRatio=dict( - argstr="--bsplineSamplingRatio %f", - ), - controlPointSpacing=dict( - argstr="--controlPointSpacing %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - expectedOffset=dict( - argstr="--expectedOffset %f", - ), - expectedRotation=dict( - argstr="--expectedRotation %f", - ), - expectedScale=dict( - argstr="--expectedScale %f", - ), - expectedSkew=dict( - argstr="--expectedSkew %f", - ), - fixedImage=dict( - argstr="%s", - extensions=None, - position=-2, - ), - fixedImageMask=dict( - argstr="--fixedImageMask %s", - extensions=None, - ), - fixedLandmarks=dict( - argstr="--fixedLandmarks %s...", - ), - initialization=dict( - argstr="--initialization %s", - ), - interpolation=dict( - argstr="--interpolation %s", - ), - loadTransform=dict( - argstr="--loadTransform %s", - extensions=None, - ), - metric=dict( - argstr="--metric %s", - ), - minimizeMemory=dict( - argstr="--minimizeMemory ", - ), - movingImage=dict( - argstr="%s", - extensions=None, - position=-1, - ), - movingLandmarks=dict( - argstr="--movingLandmarks %s...", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - randomNumberSeed=dict( - argstr="--randomNumberSeed %d", - ), - registration=dict( - argstr="--registration %s", - ), - resampledImage=dict( - argstr="--resampledImage %s", - hash_files=False, - ), - rigidMaxIterations=dict( - argstr="--rigidMaxIterations %d", - ), - rigidSamplingRatio=dict( - argstr="--rigidSamplingRatio %f", - ), - sampleFromOverlap=dict( - argstr="--sampleFromOverlap ", - ), - saveTransform=dict( - argstr="--saveTransform %s", - hash_files=False, - ), - verbosityLevel=dict( - argstr="--verbosityLevel %s", - ), + affineMaxIterations=dict(argstr="--affineMaxIterations %d"), + affineSamplingRatio=dict(argstr="--affineSamplingRatio %f"), + args=dict(argstr="%s"), + bsplineMaxIterations=dict(argstr="--bsplineMaxIterations %d"), + bsplineSamplingRatio=dict(argstr="--bsplineSamplingRatio %f"), + controlPointSpacing=dict(argstr="--controlPointSpacing %d"), + environ=dict(nohash=True, usedefault=True), + expectedOffset=dict(argstr="--expectedOffset %f"), + expectedRotation=dict(argstr="--expectedRotation %f"), + expectedScale=dict(argstr="--expectedScale %f"), + expectedSkew=dict(argstr="--expectedSkew %f"), + fixedImage=dict(argstr="%s", extensions=None, position=-2), + fixedImageMask=dict(argstr="--fixedImageMask %s", extensions=None), + fixedLandmarks=dict(argstr="--fixedLandmarks %s..."), + initialization=dict(argstr="--initialization %s"), + interpolation=dict(argstr="--interpolation %s"), + loadTransform=dict(argstr="--loadTransform %s", extensions=None), + metric=dict(argstr="--metric %s"), + minimizeMemory=dict(argstr="--minimizeMemory "), + movingImage=dict(argstr="%s", extensions=None, position=-1), + movingLandmarks=dict(argstr="--movingLandmarks %s..."), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + randomNumberSeed=dict(argstr="--randomNumberSeed %d"), + registration=dict(argstr="--registration %s"), + resampledImage=dict(argstr="--resampledImage %s", hash_files=False), + rigidMaxIterations=dict(argstr="--rigidMaxIterations %d"), + rigidSamplingRatio=dict(argstr="--rigidSamplingRatio %f"), + sampleFromOverlap=dict(argstr="--sampleFromOverlap "), + saveTransform=dict(argstr="--saveTransform %s", hash_files=False), + verbosityLevel=dict(argstr="--verbosityLevel %s"), ) inputs = ExpertAutomatedRegistration.input_spec() @@ -113,12 +44,7 @@ def test_ExpertAutomatedRegistration_inputs(): def test_ExpertAutomatedRegistration_outputs(): output_map = dict( - resampledImage=dict( - extensions=None, - ), - saveTransform=dict( - extensions=None, - ), + resampledImage=dict(extensions=None), saveTransform=dict(extensions=None) ) outputs = ExpertAutomatedRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py index 77fb5d69f6..8682fc92db 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py @@ -4,58 +4,22 @@ def test_LinearRegistration_inputs(): input_map = dict( - FixedImageFileName=dict( - argstr="%s", - extensions=None, - position=-2, - ), - MovingImageFileName=dict( - argstr="%s", - extensions=None, - position=-1, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedsmoothingfactor=dict( - argstr="--fixedsmoothingfactor %d", - ), - histogrambins=dict( - argstr="--histogrambins %d", - ), - initialtransform=dict( - argstr="--initialtransform %s", - extensions=None, - ), - iterations=dict( - argstr="--iterations %s", - sep=",", - ), - learningrate=dict( - argstr="--learningrate %s", - sep=",", - ), - movingsmoothingfactor=dict( - argstr="--movingsmoothingfactor %d", - ), - outputtransform=dict( - argstr="--outputtransform %s", - hash_files=False, - ), + FixedImageFileName=dict(argstr="%s", extensions=None, position=-2), + MovingImageFileName=dict(argstr="%s", extensions=None, position=-1), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fixedsmoothingfactor=dict(argstr="--fixedsmoothingfactor %d"), + histogrambins=dict(argstr="--histogrambins %d"), + initialtransform=dict(argstr="--initialtransform %s", extensions=None), + iterations=dict(argstr="--iterations %s", sep=","), + learningrate=dict(argstr="--learningrate %s", sep=","), + movingsmoothingfactor=dict(argstr="--movingsmoothingfactor %d"), + outputtransform=dict(argstr="--outputtransform %s", hash_files=False), resampledmovingfilename=dict( - argstr="--resampledmovingfilename %s", - hash_files=False, - ), - spatialsamples=dict( - argstr="--spatialsamples %d", - ), - translationscale=dict( - argstr="--translationscale %f", + argstr="--resampledmovingfilename %s", hash_files=False ), + spatialsamples=dict(argstr="--spatialsamples %d"), + translationscale=dict(argstr="--translationscale %f"), ) inputs = LinearRegistration.input_spec() @@ -66,12 +30,8 @@ def test_LinearRegistration_inputs(): def test_LinearRegistration_outputs(): output_map = dict( - outputtransform=dict( - extensions=None, - ), - resampledmovingfilename=dict( - extensions=None, - ), + outputtransform=dict(extensions=None), + resampledmovingfilename=dict(extensions=None), ) outputs = LinearRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py index 0f1e19d4ba..59613bb9f4 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py @@ -4,53 +4,19 @@ def test_MultiResolutionAffineRegistration_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedImage=dict( - argstr="%s", - extensions=None, - position=-2, - ), - fixedImageMask=dict( - argstr="--fixedImageMask %s", - extensions=None, - ), - fixedImageROI=dict( - argstr="--fixedImageROI %s", - ), - metricTolerance=dict( - argstr="--metricTolerance %f", - ), - movingImage=dict( - argstr="%s", - extensions=None, - position=-1, - ), - numIterations=dict( - argstr="--numIterations %d", - ), - numLineIterations=dict( - argstr="--numLineIterations %d", - ), - resampledImage=dict( - argstr="--resampledImage %s", - hash_files=False, - ), - saveTransform=dict( - argstr="--saveTransform %s", - hash_files=False, - ), - stepSize=dict( - argstr="--stepSize %f", - ), - stepTolerance=dict( - argstr="--stepTolerance %f", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fixedImage=dict(argstr="%s", extensions=None, position=-2), + fixedImageMask=dict(argstr="--fixedImageMask %s", extensions=None), + fixedImageROI=dict(argstr="--fixedImageROI %s"), + metricTolerance=dict(argstr="--metricTolerance %f"), + movingImage=dict(argstr="%s", extensions=None, position=-1), + numIterations=dict(argstr="--numIterations %d"), + numLineIterations=dict(argstr="--numLineIterations %d"), + resampledImage=dict(argstr="--resampledImage %s", hash_files=False), + saveTransform=dict(argstr="--saveTransform %s", hash_files=False), + stepSize=dict(argstr="--stepSize %f"), + stepTolerance=dict(argstr="--stepTolerance %f"), ) inputs = MultiResolutionAffineRegistration.input_spec() @@ -61,12 +27,7 @@ def test_MultiResolutionAffineRegistration_inputs(): def test_MultiResolutionAffineRegistration_outputs(): output_map = dict( - resampledImage=dict( - extensions=None, - ), - saveTransform=dict( - extensions=None, - ), + resampledImage=dict(extensions=None), saveTransform=dict(extensions=None) ) outputs = MultiResolutionAffineRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py index 672d971471..8a18403ddb 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py @@ -4,32 +4,13 @@ def test_OtsuThresholdImageFilter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - insideValue=dict( - argstr="--insideValue %d", - ), - numberOfBins=dict( - argstr="--numberOfBins %d", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - outsideValue=dict( - argstr="--outsideValue %d", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + insideValue=dict(argstr="--insideValue %d"), + numberOfBins=dict(argstr="--numberOfBins %d"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), + outsideValue=dict(argstr="--outsideValue %d"), ) inputs = OtsuThresholdImageFilter.input_spec() @@ -39,12 +20,7 @@ def test_OtsuThresholdImageFilter_inputs(): def test_OtsuThresholdImageFilter_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = OtsuThresholdImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py index a28c8231c8..0ee06ba79a 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py @@ -4,35 +4,14 @@ def test_OtsuThresholdSegmentation_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - brightObjects=dict( - argstr="--brightObjects ", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - faceConnected=dict( - argstr="--faceConnected ", - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - minimumObjectSize=dict( - argstr="--minimumObjectSize %d", - ), - numberOfBins=dict( - argstr="--numberOfBins %d", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), + args=dict(argstr="%s"), + brightObjects=dict(argstr="--brightObjects "), + environ=dict(nohash=True, usedefault=True), + faceConnected=dict(argstr="--faceConnected "), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + minimumObjectSize=dict(argstr="--minimumObjectSize %d"), + numberOfBins=dict(argstr="--numberOfBins %d"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), ) inputs = OtsuThresholdSegmentation.input_spec() @@ -42,12 +21,7 @@ def test_OtsuThresholdSegmentation_inputs(): def test_OtsuThresholdSegmentation_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = OtsuThresholdSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py index 2f47b3bd16..fba4c4b1f1 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py @@ -4,30 +4,12 @@ def test_ResampleScalarVolume_inputs(): input_map = dict( - InputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - OutputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - interpolation=dict( - argstr="--interpolation %s", - ), - spacing=dict( - argstr="--spacing %s", - sep=",", - ), + InputVolume=dict(argstr="%s", extensions=None, position=-2), + OutputVolume=dict(argstr="%s", hash_files=False, position=-1), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + interpolation=dict(argstr="--interpolation %s"), + spacing=dict(argstr="--spacing %s", sep=","), ) inputs = ResampleScalarVolume.input_spec() @@ -37,12 +19,7 @@ def test_ResampleScalarVolume_inputs(): def test_ResampleScalarVolume_outputs(): - output_map = dict( - OutputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(OutputVolume=dict(extensions=None, position=-1)) outputs = ResampleScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py index 3e8aba0e4d..abab2f260c 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py @@ -4,61 +4,23 @@ def test_RigidRegistration_inputs(): input_map = dict( - FixedImageFileName=dict( - argstr="%s", - extensions=None, - position=-2, - ), - MovingImageFileName=dict( - argstr="%s", - extensions=None, - position=-1, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedsmoothingfactor=dict( - argstr="--fixedsmoothingfactor %d", - ), - histogrambins=dict( - argstr="--histogrambins %d", - ), - initialtransform=dict( - argstr="--initialtransform %s", - extensions=None, - ), - iterations=dict( - argstr="--iterations %s", - sep=",", - ), - learningrate=dict( - argstr="--learningrate %s", - sep=",", - ), - movingsmoothingfactor=dict( - argstr="--movingsmoothingfactor %d", - ), - outputtransform=dict( - argstr="--outputtransform %s", - hash_files=False, - ), + FixedImageFileName=dict(argstr="%s", extensions=None, position=-2), + MovingImageFileName=dict(argstr="%s", extensions=None, position=-1), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fixedsmoothingfactor=dict(argstr="--fixedsmoothingfactor %d"), + histogrambins=dict(argstr="--histogrambins %d"), + initialtransform=dict(argstr="--initialtransform %s", extensions=None), + iterations=dict(argstr="--iterations %s", sep=","), + learningrate=dict(argstr="--learningrate %s", sep=","), + movingsmoothingfactor=dict(argstr="--movingsmoothingfactor %d"), + outputtransform=dict(argstr="--outputtransform %s", hash_files=False), resampledmovingfilename=dict( - argstr="--resampledmovingfilename %s", - hash_files=False, - ), - spatialsamples=dict( - argstr="--spatialsamples %d", - ), - testingmode=dict( - argstr="--testingmode ", - ), - translationscale=dict( - argstr="--translationscale %f", + argstr="--resampledmovingfilename %s", hash_files=False ), + spatialsamples=dict(argstr="--spatialsamples %d"), + testingmode=dict(argstr="--testingmode "), + translationscale=dict(argstr="--translationscale %f"), ) inputs = RigidRegistration.input_spec() @@ -69,12 +31,8 @@ def test_RigidRegistration_inputs(): def test_RigidRegistration_outputs(): output_map = dict( - outputtransform=dict( - extensions=None, - ), - resampledmovingfilename=dict( - extensions=None, - ), + outputtransform=dict(extensions=None), + resampledmovingfilename=dict(extensions=None), ) outputs = RigidRegistration.output_spec() diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py index 6bc91e4d5e..d5057e46c0 100644 --- a/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py @@ -4,43 +4,15 @@ def test_IntensityDifferenceMetric_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - baselineSegmentationVolume=dict( - argstr="%s", - extensions=None, - position=-3, - ), - baselineVolume=dict( - argstr="%s", - extensions=None, - position=-4, - ), - changingBandSize=dict( - argstr="--changingBandSize %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - followupVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - reportFileName=dict( - argstr="--reportFileName %s", - hash_files=False, - ), - sensitivityThreshold=dict( - argstr="--sensitivityThreshold %f", - ), + args=dict(argstr="%s"), + baselineSegmentationVolume=dict(argstr="%s", extensions=None, position=-3), + baselineVolume=dict(argstr="%s", extensions=None, position=-4), + changingBandSize=dict(argstr="--changingBandSize %d"), + environ=dict(nohash=True, usedefault=True), + followupVolume=dict(argstr="%s", extensions=None, position=-2), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), + reportFileName=dict(argstr="--reportFileName %s", hash_files=False), + sensitivityThreshold=dict(argstr="--sensitivityThreshold %f"), ) inputs = IntensityDifferenceMetric.input_spec() @@ -51,13 +23,8 @@ def test_IntensityDifferenceMetric_inputs(): def test_IntensityDifferenceMetric_outputs(): output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - reportFileName=dict( - extensions=None, - ), + outputVolume=dict(extensions=None, position=-1), + reportFileName=dict(extensions=None), ) outputs = IntensityDifferenceMetric.output_spec() diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py index aec22b541f..8e60f24f83 100644 --- a/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py @@ -4,47 +4,18 @@ def test_PETStandardUptakeValueComputation_inputs(): input_map = dict( - OutputLabel=dict( - argstr="--OutputLabel %s", - ), - OutputLabelValue=dict( - argstr="--OutputLabelValue %s", - ), - SUVMax=dict( - argstr="--SUVMax %s", - ), - SUVMean=dict( - argstr="--SUVMean %s", - ), - SUVMin=dict( - argstr="--SUVMin %s", - ), - args=dict( - argstr="%s", - ), - color=dict( - argstr="--color %s", - extensions=None, - ), - csvFile=dict( - argstr="--csvFile %s", - hash_files=False, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - labelMap=dict( - argstr="--labelMap %s", - extensions=None, - ), - petDICOMPath=dict( - argstr="--petDICOMPath %s", - ), - petVolume=dict( - argstr="--petVolume %s", - extensions=None, - ), + OutputLabel=dict(argstr="--OutputLabel %s"), + OutputLabelValue=dict(argstr="--OutputLabelValue %s"), + SUVMax=dict(argstr="--SUVMax %s"), + SUVMean=dict(argstr="--SUVMean %s"), + SUVMin=dict(argstr="--SUVMin %s"), + args=dict(argstr="%s"), + color=dict(argstr="--color %s", extensions=None), + csvFile=dict(argstr="--csvFile %s", hash_files=False), + environ=dict(nohash=True, usedefault=True), + labelMap=dict(argstr="--labelMap %s", extensions=None), + petDICOMPath=dict(argstr="--petDICOMPath %s"), + petVolume=dict(argstr="--petVolume %s", extensions=None), ) inputs = PETStandardUptakeValueComputation.input_spec() @@ -54,11 +25,7 @@ def test_PETStandardUptakeValueComputation_inputs(): def test_PETStandardUptakeValueComputation_outputs(): - output_map = dict( - csvFile=dict( - extensions=None, - ), - ) + output_map = dict(csvFile=dict(extensions=None)) outputs = PETStandardUptakeValueComputation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py index 58c7c49f32..fc4cfc7212 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py @@ -4,26 +4,12 @@ def test_ACPCTransform_inputs(): input_map = dict( - acpc=dict( - argstr="--acpc %s...", - ), - args=dict( - argstr="%s", - ), - debugSwitch=dict( - argstr="--debugSwitch ", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - midline=dict( - argstr="--midline %s...", - ), - outputTransform=dict( - argstr="--outputTransform %s", - hash_files=False, - ), + acpc=dict(argstr="--acpc %s..."), + args=dict(argstr="%s"), + debugSwitch=dict(argstr="--debugSwitch "), + environ=dict(nohash=True, usedefault=True), + midline=dict(argstr="--midline %s..."), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False), ) inputs = ACPCTransform.input_spec() @@ -33,11 +19,7 @@ def test_ACPCTransform_inputs(): def test_ACPCTransform_outputs(): - output_map = dict( - outputTransform=dict( - extensions=None, - ), - ) + output_map = dict(outputTransform=dict(extensions=None)) outputs = ACPCTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py index d1c8055df3..845cf63f3c 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py @@ -4,149 +4,60 @@ def test_BRAINSDemonWarp_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), arrayOfPyramidLevelIterations=dict( - argstr="--arrayOfPyramidLevelIterations %s", - sep=",", - ), - backgroundFillValue=dict( - argstr="--backgroundFillValue %d", + argstr="--arrayOfPyramidLevelIterations %s", sep="," ), + backgroundFillValue=dict(argstr="--backgroundFillValue %d"), checkerboardPatternSubdivisions=dict( - argstr="--checkerboardPatternSubdivisions %s", - sep=",", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedBinaryVolume=dict( - argstr="--fixedBinaryVolume %s", - extensions=None, - ), - fixedVolume=dict( - argstr="--fixedVolume %s", - extensions=None, - ), - gradient_type=dict( - argstr="--gradient_type %s", - ), - gui=dict( - argstr="--gui ", - ), - histogramMatch=dict( - argstr="--histogramMatch ", - ), + argstr="--checkerboardPatternSubdivisions %s", sep="," + ), + environ=dict(nohash=True, usedefault=True), + fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None), + fixedVolume=dict(argstr="--fixedVolume %s", extensions=None), + gradient_type=dict(argstr="--gradient_type %s"), + gui=dict(argstr="--gui "), + histogramMatch=dict(argstr="--histogramMatch "), initializeWithDisplacementField=dict( - argstr="--initializeWithDisplacementField %s", - extensions=None, + argstr="--initializeWithDisplacementField %s", extensions=None ), initializeWithTransform=dict( - argstr="--initializeWithTransform %s", - extensions=None, - ), - inputPixelType=dict( - argstr="--inputPixelType %s", - ), - interpolationMode=dict( - argstr="--interpolationMode %s", - ), - lowerThresholdForBOBF=dict( - argstr="--lowerThresholdForBOBF %d", - ), - maskProcessingMode=dict( - argstr="--maskProcessingMode %s", - ), - max_step_length=dict( - argstr="--max_step_length %f", - ), - medianFilterSize=dict( - argstr="--medianFilterSize %s", - sep=",", - ), - minimumFixedPyramid=dict( - argstr="--minimumFixedPyramid %s", - sep=",", - ), - minimumMovingPyramid=dict( - argstr="--minimumMovingPyramid %s", - sep=",", - ), - movingBinaryVolume=dict( - argstr="--movingBinaryVolume %s", - extensions=None, - ), - movingVolume=dict( - argstr="--movingVolume %s", - extensions=None, - ), - neighborhoodForBOBF=dict( - argstr="--neighborhoodForBOBF %s", - sep=",", - ), - numberOfBCHApproximationTerms=dict( - argstr="--numberOfBCHApproximationTerms %d", - ), - numberOfHistogramBins=dict( - argstr="--numberOfHistogramBins %d", - ), - numberOfMatchPoints=dict( - argstr="--numberOfMatchPoints %d", - ), - numberOfPyramidLevels=dict( - argstr="--numberOfPyramidLevels %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), + argstr="--initializeWithTransform %s", extensions=None + ), + inputPixelType=dict(argstr="--inputPixelType %s"), + interpolationMode=dict(argstr="--interpolationMode %s"), + lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d"), + maskProcessingMode=dict(argstr="--maskProcessingMode %s"), + max_step_length=dict(argstr="--max_step_length %f"), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), + minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=","), + minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=","), + movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None), + movingVolume=dict(argstr="--movingVolume %s", extensions=None), + neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=","), + numberOfBCHApproximationTerms=dict(argstr="--numberOfBCHApproximationTerms %d"), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), + numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), outputCheckerboardVolume=dict( - argstr="--outputCheckerboardVolume %s", - hash_files=False, - ), - outputDebug=dict( - argstr="--outputDebug ", - ), - outputDisplacementFieldPrefix=dict( - argstr="--outputDisplacementFieldPrefix %s", + argstr="--outputCheckerboardVolume %s", hash_files=False ), + outputDebug=dict(argstr="--outputDebug "), + outputDisplacementFieldPrefix=dict(argstr="--outputDisplacementFieldPrefix %s"), outputDisplacementFieldVolume=dict( - argstr="--outputDisplacementFieldVolume %s", - hash_files=False, - ), - outputNormalized=dict( - argstr="--outputNormalized ", - ), - outputPixelType=dict( - argstr="--outputPixelType %s", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - promptUser=dict( - argstr="--promptUser ", - ), - registrationFilterType=dict( - argstr="--registrationFilterType %s", - ), - seedForBOBF=dict( - argstr="--seedForBOBF %s", - sep=",", - ), - smoothDisplacementFieldSigma=dict( - argstr="--smoothDisplacementFieldSigma %f", - ), - upFieldSmoothing=dict( - argstr="--upFieldSmoothing %f", - ), - upperThresholdForBOBF=dict( - argstr="--upperThresholdForBOBF %d", - ), - use_vanilla_dem=dict( - argstr="--use_vanilla_dem ", - ), + argstr="--outputDisplacementFieldVolume %s", hash_files=False + ), + outputNormalized=dict(argstr="--outputNormalized "), + outputPixelType=dict(argstr="--outputPixelType %s"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + promptUser=dict(argstr="--promptUser "), + registrationFilterType=dict(argstr="--registrationFilterType %s"), + seedForBOBF=dict(argstr="--seedForBOBF %s", sep=","), + smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f"), + upFieldSmoothing=dict(argstr="--upFieldSmoothing %f"), + upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d"), + use_vanilla_dem=dict(argstr="--use_vanilla_dem "), ) inputs = BRAINSDemonWarp.input_spec() @@ -157,15 +68,9 @@ def test_BRAINSDemonWarp_inputs(): def test_BRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict( - extensions=None, - ), - outputDisplacementFieldVolume=dict( - extensions=None, - ), - outputVolume=dict( - extensions=None, - ), + outputCheckerboardVolume=dict(extensions=None), + outputDisplacementFieldVolume=dict(extensions=None), + outputVolume=dict(extensions=None), ) outputs = BRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py index 0d7b124635..f3d5a50759 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py @@ -5,213 +5,80 @@ def test_BRAINSFit_inputs(): input_map = dict( NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00=dict( - argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00 ", + argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00 " ), NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01=dict( - argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01 ", + argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01 " ), NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02=dict( - argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 ", - ), - ROIAutoClosingSize=dict( - argstr="--ROIAutoClosingSize %f", - ), - ROIAutoDilateSize=dict( - argstr="--ROIAutoDilateSize %f", - ), - args=dict( - argstr="%s", - ), - backgroundFillValue=dict( - argstr="--backgroundFillValue %f", - ), - bsplineTransform=dict( - argstr="--bsplineTransform %s", - hash_files=False, - ), - costFunctionConvergenceFactor=dict( - argstr="--costFunctionConvergenceFactor %f", - ), - costMetric=dict( - argstr="--costMetric %s", - ), - debugLevel=dict( - argstr="--debugLevel %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - failureExitCode=dict( - argstr="--failureExitCode %d", - ), - fixedBinaryVolume=dict( - argstr="--fixedBinaryVolume %s", - extensions=None, - ), - fixedVolume=dict( - argstr="--fixedVolume %s", - extensions=None, - ), - fixedVolumeTimeIndex=dict( - argstr="--fixedVolumeTimeIndex %d", - ), - forceMINumberOfThreads=dict( - argstr="--forceMINumberOfThreads %d", - ), - gui=dict( - argstr="--gui ", - ), - histogramMatch=dict( - argstr="--histogramMatch ", - ), - initialTransform=dict( - argstr="--initialTransform %s", - extensions=None, - ), - initializeTransformMode=dict( - argstr="--initializeTransformMode %s", - ), - interpolationMode=dict( - argstr="--interpolationMode %s", - ), - linearTransform=dict( - argstr="--linearTransform %s", - hash_files=False, - ), - maskInferiorCutOffFromCenter=dict( - argstr="--maskInferiorCutOffFromCenter %f", - ), - maskProcessingMode=dict( - argstr="--maskProcessingMode %s", - ), - maxBSplineDisplacement=dict( - argstr="--maxBSplineDisplacement %f", - ), - maximumStepLength=dict( - argstr="--maximumStepLength %f", - ), - medianFilterSize=dict( - argstr="--medianFilterSize %s", - sep=",", - ), - minimumStepLength=dict( - argstr="--minimumStepLength %s", - sep=",", - ), - movingBinaryVolume=dict( - argstr="--movingBinaryVolume %s", - extensions=None, - ), - movingVolume=dict( - argstr="--movingVolume %s", - extensions=None, - ), - movingVolumeTimeIndex=dict( - argstr="--movingVolumeTimeIndex %d", - ), - numberOfHistogramBins=dict( - argstr="--numberOfHistogramBins %d", - ), - numberOfIterations=dict( - argstr="--numberOfIterations %s", - sep=",", - ), - numberOfMatchPoints=dict( - argstr="--numberOfMatchPoints %d", - ), - numberOfSamples=dict( - argstr="--numberOfSamples %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputFixedVolumeROI=dict( - argstr="--outputFixedVolumeROI %s", - hash_files=False, - ), + argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 " + ), + ROIAutoClosingSize=dict(argstr="--ROIAutoClosingSize %f"), + ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f"), + args=dict(argstr="%s"), + backgroundFillValue=dict(argstr="--backgroundFillValue %f"), + bsplineTransform=dict(argstr="--bsplineTransform %s", hash_files=False), + costFunctionConvergenceFactor=dict(argstr="--costFunctionConvergenceFactor %f"), + costMetric=dict(argstr="--costMetric %s"), + debugLevel=dict(argstr="--debugLevel %d"), + environ=dict(nohash=True, usedefault=True), + failureExitCode=dict(argstr="--failureExitCode %d"), + fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None), + fixedVolume=dict(argstr="--fixedVolume %s", extensions=None), + fixedVolumeTimeIndex=dict(argstr="--fixedVolumeTimeIndex %d"), + forceMINumberOfThreads=dict(argstr="--forceMINumberOfThreads %d"), + gui=dict(argstr="--gui "), + histogramMatch=dict(argstr="--histogramMatch "), + initialTransform=dict(argstr="--initialTransform %s", extensions=None), + initializeTransformMode=dict(argstr="--initializeTransformMode %s"), + interpolationMode=dict(argstr="--interpolationMode %s"), + linearTransform=dict(argstr="--linearTransform %s", hash_files=False), + maskInferiorCutOffFromCenter=dict(argstr="--maskInferiorCutOffFromCenter %f"), + maskProcessingMode=dict(argstr="--maskProcessingMode %s"), + maxBSplineDisplacement=dict(argstr="--maxBSplineDisplacement %f"), + maximumStepLength=dict(argstr="--maximumStepLength %f"), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), + minimumStepLength=dict(argstr="--minimumStepLength %s", sep=","), + movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None), + movingVolume=dict(argstr="--movingVolume %s", extensions=None), + movingVolumeTimeIndex=dict(argstr="--movingVolumeTimeIndex %d"), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), + numberOfIterations=dict(argstr="--numberOfIterations %s", sep=","), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), + numberOfSamples=dict(argstr="--numberOfSamples %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputFixedVolumeROI=dict(argstr="--outputFixedVolumeROI %s", hash_files=False), outputMovingVolumeROI=dict( - argstr="--outputMovingVolumeROI %s", - hash_files=False, - ), - outputTransform=dict( - argstr="--outputTransform %s", - hash_files=False, - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - outputVolumePixelType=dict( - argstr="--outputVolumePixelType %s", - ), - permitParameterVariation=dict( - argstr="--permitParameterVariation %s", - sep=",", - ), - projectedGradientTolerance=dict( - argstr="--projectedGradientTolerance %f", - ), - promptUser=dict( - argstr="--promptUser ", - ), - relaxationFactor=dict( - argstr="--relaxationFactor %f", - ), - removeIntensityOutliers=dict( - argstr="--removeIntensityOutliers %f", - ), - reproportionScale=dict( - argstr="--reproportionScale %f", - ), - scaleOutputValues=dict( - argstr="--scaleOutputValues ", - ), - skewScale=dict( - argstr="--skewScale %f", - ), - splineGridSize=dict( - argstr="--splineGridSize %s", - sep=",", - ), + argstr="--outputMovingVolumeROI %s", hash_files=False + ), + outputTransform=dict(argstr="--outputTransform %s", hash_files=False), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + outputVolumePixelType=dict(argstr="--outputVolumePixelType %s"), + permitParameterVariation=dict(argstr="--permitParameterVariation %s", sep=","), + projectedGradientTolerance=dict(argstr="--projectedGradientTolerance %f"), + promptUser=dict(argstr="--promptUser "), + relaxationFactor=dict(argstr="--relaxationFactor %f"), + removeIntensityOutliers=dict(argstr="--removeIntensityOutliers %f"), + reproportionScale=dict(argstr="--reproportionScale %f"), + scaleOutputValues=dict(argstr="--scaleOutputValues "), + skewScale=dict(argstr="--skewScale %f"), + splineGridSize=dict(argstr="--splineGridSize %s", sep=","), strippedOutputTransform=dict( - argstr="--strippedOutputTransform %s", - hash_files=False, - ), - transformType=dict( - argstr="--transformType %s", - sep=",", - ), - translationScale=dict( - argstr="--translationScale %f", - ), - useAffine=dict( - argstr="--useAffine ", - ), - useBSpline=dict( - argstr="--useBSpline ", + argstr="--strippedOutputTransform %s", hash_files=False ), + transformType=dict(argstr="--transformType %s", sep=","), + translationScale=dict(argstr="--translationScale %f"), + useAffine=dict(argstr="--useAffine "), + useBSpline=dict(argstr="--useBSpline "), useCachingOfBSplineWeightsMode=dict( - argstr="--useCachingOfBSplineWeightsMode %s", - ), - useExplicitPDFDerivativesMode=dict( - argstr="--useExplicitPDFDerivativesMode %s", - ), - useRigid=dict( - argstr="--useRigid ", - ), - useScaleSkewVersor3D=dict( - argstr="--useScaleSkewVersor3D ", - ), - useScaleVersor3D=dict( - argstr="--useScaleVersor3D ", - ), - writeOutputTransformInFloat=dict( - argstr="--writeOutputTransformInFloat ", - ), - writeTransformOnFailure=dict( - argstr="--writeTransformOnFailure ", - ), + argstr="--useCachingOfBSplineWeightsMode %s" + ), + useExplicitPDFDerivativesMode=dict(argstr="--useExplicitPDFDerivativesMode %s"), + useRigid=dict(argstr="--useRigid "), + useScaleSkewVersor3D=dict(argstr="--useScaleSkewVersor3D "), + useScaleVersor3D=dict(argstr="--useScaleVersor3D "), + writeOutputTransformInFloat=dict(argstr="--writeOutputTransformInFloat "), + writeTransformOnFailure=dict(argstr="--writeTransformOnFailure "), ) inputs = BRAINSFit.input_spec() @@ -222,27 +89,13 @@ def test_BRAINSFit_inputs(): def test_BRAINSFit_outputs(): output_map = dict( - bsplineTransform=dict( - extensions=None, - ), - linearTransform=dict( - extensions=None, - ), - outputFixedVolumeROI=dict( - extensions=None, - ), - outputMovingVolumeROI=dict( - extensions=None, - ), - outputTransform=dict( - extensions=None, - ), - outputVolume=dict( - extensions=None, - ), - strippedOutputTransform=dict( - extensions=None, - ), + bsplineTransform=dict(extensions=None), + linearTransform=dict(extensions=None), + outputFixedVolumeROI=dict(extensions=None), + outputMovingVolumeROI=dict(extensions=None), + outputTransform=dict(extensions=None), + outputVolume=dict(extensions=None), + strippedOutputTransform=dict(extensions=None), ) outputs = BRAINSFit.output_spec() diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py index 46d175da07..f2b6760eea 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py @@ -4,52 +4,19 @@ def test_BRAINSResample_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - defaultValue=dict( - argstr="--defaultValue %f", - ), - deformationVolume=dict( - argstr="--deformationVolume %s", - extensions=None, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gridSpacing=dict( - argstr="--gridSpacing %s", - sep=",", - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - interpolationMode=dict( - argstr="--interpolationMode %s", - ), - inverseTransform=dict( - argstr="--inverseTransform ", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - pixelType=dict( - argstr="--pixelType %s", - ), - referenceVolume=dict( - argstr="--referenceVolume %s", - extensions=None, - ), - warpTransform=dict( - argstr="--warpTransform %s", - extensions=None, - ), + args=dict(argstr="%s"), + defaultValue=dict(argstr="--defaultValue %f"), + deformationVolume=dict(argstr="--deformationVolume %s", extensions=None), + environ=dict(nohash=True, usedefault=True), + gridSpacing=dict(argstr="--gridSpacing %s", sep=","), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + interpolationMode=dict(argstr="--interpolationMode %s"), + inverseTransform=dict(argstr="--inverseTransform "), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + pixelType=dict(argstr="--pixelType %s"), + referenceVolume=dict(argstr="--referenceVolume %s", extensions=None), + warpTransform=dict(argstr="--warpTransform %s", extensions=None), ) inputs = BRAINSResample.input_spec() @@ -59,11 +26,7 @@ def test_BRAINSResample_inputs(): def test_BRAINSResample_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - ), - ) + output_map = dict(outputVolume=dict(extensions=None)) outputs = BRAINSResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py index 6b511790c7..f70f00a4ed 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py @@ -4,32 +4,14 @@ def test_FiducialRegistration_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedLandmarks=dict( - argstr="--fixedLandmarks %s...", - ), - movingLandmarks=dict( - argstr="--movingLandmarks %s...", - ), - outputMessage=dict( - argstr="--outputMessage %s", - ), - rms=dict( - argstr="--rms %f", - ), - saveTransform=dict( - argstr="--saveTransform %s", - hash_files=False, - ), - transformType=dict( - argstr="--transformType %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fixedLandmarks=dict(argstr="--fixedLandmarks %s..."), + movingLandmarks=dict(argstr="--movingLandmarks %s..."), + outputMessage=dict(argstr="--outputMessage %s"), + rms=dict(argstr="--rms %f"), + saveTransform=dict(argstr="--saveTransform %s", hash_files=False), + transformType=dict(argstr="--transformType %s"), ) inputs = FiducialRegistration.input_spec() @@ -39,11 +21,7 @@ def test_FiducialRegistration_inputs(): def test_FiducialRegistration_outputs(): - output_map = dict( - saveTransform=dict( - extensions=None, - ), - ) + output_map = dict(saveTransform=dict(extensions=None)) outputs = FiducialRegistration.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py index af8bac8680..45bfc67734 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -4,151 +4,61 @@ def test_VBRAINSDemonWarp_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), arrayOfPyramidLevelIterations=dict( - argstr="--arrayOfPyramidLevelIterations %s", - sep=",", - ), - backgroundFillValue=dict( - argstr="--backgroundFillValue %d", + argstr="--arrayOfPyramidLevelIterations %s", sep="," ), + backgroundFillValue=dict(argstr="--backgroundFillValue %d"), checkerboardPatternSubdivisions=dict( - argstr="--checkerboardPatternSubdivisions %s", - sep=",", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fixedBinaryVolume=dict( - argstr="--fixedBinaryVolume %s", - extensions=None, - ), - fixedVolume=dict( - argstr="--fixedVolume %s...", - ), - gradient_type=dict( - argstr="--gradient_type %s", - ), - gui=dict( - argstr="--gui ", - ), - histogramMatch=dict( - argstr="--histogramMatch ", - ), + argstr="--checkerboardPatternSubdivisions %s", sep="," + ), + environ=dict(nohash=True, usedefault=True), + fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None), + fixedVolume=dict(argstr="--fixedVolume %s..."), + gradient_type=dict(argstr="--gradient_type %s"), + gui=dict(argstr="--gui "), + histogramMatch=dict(argstr="--histogramMatch "), initializeWithDisplacementField=dict( - argstr="--initializeWithDisplacementField %s", - extensions=None, + argstr="--initializeWithDisplacementField %s", extensions=None ), initializeWithTransform=dict( - argstr="--initializeWithTransform %s", - extensions=None, - ), - inputPixelType=dict( - argstr="--inputPixelType %s", - ), - interpolationMode=dict( - argstr="--interpolationMode %s", - ), - lowerThresholdForBOBF=dict( - argstr="--lowerThresholdForBOBF %d", - ), - makeBOBF=dict( - argstr="--makeBOBF ", - ), - max_step_length=dict( - argstr="--max_step_length %f", - ), - medianFilterSize=dict( - argstr="--medianFilterSize %s", - sep=",", - ), - minimumFixedPyramid=dict( - argstr="--minimumFixedPyramid %s", - sep=",", - ), - minimumMovingPyramid=dict( - argstr="--minimumMovingPyramid %s", - sep=",", - ), - movingBinaryVolume=dict( - argstr="--movingBinaryVolume %s", - extensions=None, - ), - movingVolume=dict( - argstr="--movingVolume %s...", - ), - neighborhoodForBOBF=dict( - argstr="--neighborhoodForBOBF %s", - sep=",", - ), - numberOfBCHApproximationTerms=dict( - argstr="--numberOfBCHApproximationTerms %d", - ), - numberOfHistogramBins=dict( - argstr="--numberOfHistogramBins %d", - ), - numberOfMatchPoints=dict( - argstr="--numberOfMatchPoints %d", - ), - numberOfPyramidLevels=dict( - argstr="--numberOfPyramidLevels %d", - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), + argstr="--initializeWithTransform %s", extensions=None + ), + inputPixelType=dict(argstr="--inputPixelType %s"), + interpolationMode=dict(argstr="--interpolationMode %s"), + lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d"), + makeBOBF=dict(argstr="--makeBOBF "), + max_step_length=dict(argstr="--max_step_length %f"), + medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), + minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=","), + minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=","), + movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None), + movingVolume=dict(argstr="--movingVolume %s..."), + neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=","), + numberOfBCHApproximationTerms=dict(argstr="--numberOfBCHApproximationTerms %d"), + numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), + numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), + numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d"), + numberOfThreads=dict(argstr="--numberOfThreads %d"), outputCheckerboardVolume=dict( - argstr="--outputCheckerboardVolume %s", - hash_files=False, - ), - outputDebug=dict( - argstr="--outputDebug ", - ), - outputDisplacementFieldPrefix=dict( - argstr="--outputDisplacementFieldPrefix %s", + argstr="--outputCheckerboardVolume %s", hash_files=False ), + outputDebug=dict(argstr="--outputDebug "), + outputDisplacementFieldPrefix=dict(argstr="--outputDisplacementFieldPrefix %s"), outputDisplacementFieldVolume=dict( - argstr="--outputDisplacementFieldVolume %s", - hash_files=False, - ), - outputNormalized=dict( - argstr="--outputNormalized ", - ), - outputPixelType=dict( - argstr="--outputPixelType %s", - ), - outputVolume=dict( - argstr="--outputVolume %s", - hash_files=False, - ), - promptUser=dict( - argstr="--promptUser ", - ), - registrationFilterType=dict( - argstr="--registrationFilterType %s", - ), - seedForBOBF=dict( - argstr="--seedForBOBF %s", - sep=",", - ), - smoothDisplacementFieldSigma=dict( - argstr="--smoothDisplacementFieldSigma %f", - ), - upFieldSmoothing=dict( - argstr="--upFieldSmoothing %f", - ), - upperThresholdForBOBF=dict( - argstr="--upperThresholdForBOBF %d", - ), - use_vanilla_dem=dict( - argstr="--use_vanilla_dem ", - ), - weightFactors=dict( - argstr="--weightFactors %s", - sep=",", - ), + argstr="--outputDisplacementFieldVolume %s", hash_files=False + ), + outputNormalized=dict(argstr="--outputNormalized "), + outputPixelType=dict(argstr="--outputPixelType %s"), + outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + promptUser=dict(argstr="--promptUser "), + registrationFilterType=dict(argstr="--registrationFilterType %s"), + seedForBOBF=dict(argstr="--seedForBOBF %s", sep=","), + smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f"), + upFieldSmoothing=dict(argstr="--upFieldSmoothing %f"), + upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d"), + use_vanilla_dem=dict(argstr="--use_vanilla_dem "), + weightFactors=dict(argstr="--weightFactors %s", sep=","), ) inputs = VBRAINSDemonWarp.input_spec() @@ -159,15 +69,9 @@ def test_VBRAINSDemonWarp_inputs(): def test_VBRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict( - extensions=None, - ), - outputDisplacementFieldVolume=dict( - extensions=None, - ), - outputVolume=dict( - extensions=None, - ), + outputCheckerboardVolume=dict(extensions=None), + outputDisplacementFieldVolume=dict(extensions=None), + outputVolume=dict(extensions=None), ) outputs = VBRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py index 8990caaf1a..cffee26c1f 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -4,43 +4,19 @@ def test_BRAINSROIAuto_inputs(): input_map = dict( - ROIAutoDilateSize=dict( - argstr="--ROIAutoDilateSize %f", - ), - args=dict( - argstr="%s", - ), - closingSize=dict( - argstr="--closingSize %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="--inputVolume %s", - extensions=None, - ), - numberOfThreads=dict( - argstr="--numberOfThreads %d", - ), - otsuPercentileThreshold=dict( - argstr="--otsuPercentileThreshold %f", - ), + ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f"), + args=dict(argstr="%s"), + closingSize=dict(argstr="--closingSize %f"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="--inputVolume %s", extensions=None), + numberOfThreads=dict(argstr="--numberOfThreads %d"), + otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f"), outputClippedVolumeROI=dict( - argstr="--outputClippedVolumeROI %s", - hash_files=False, - ), - outputROIMaskVolume=dict( - argstr="--outputROIMaskVolume %s", - hash_files=False, - ), - outputVolumePixelType=dict( - argstr="--outputVolumePixelType %s", - ), - thresholdCorrectionFactor=dict( - argstr="--thresholdCorrectionFactor %f", + argstr="--outputClippedVolumeROI %s", hash_files=False ), + outputROIMaskVolume=dict(argstr="--outputROIMaskVolume %s", hash_files=False), + outputVolumePixelType=dict(argstr="--outputVolumePixelType %s"), + thresholdCorrectionFactor=dict(argstr="--thresholdCorrectionFactor %f"), ) inputs = BRAINSROIAuto.input_spec() @@ -51,12 +27,8 @@ def test_BRAINSROIAuto_inputs(): def test_BRAINSROIAuto_outputs(): output_map = dict( - outputClippedVolumeROI=dict( - extensions=None, - ), - outputROIMaskVolume=dict( - extensions=None, - ), + outputClippedVolumeROI=dict(extensions=None), + outputROIMaskVolume=dict(extensions=None), ) outputs = BRAINSROIAuto.output_spec() diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py index 2ed2595d4e..68cc07d823 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py @@ -4,81 +4,35 @@ def test_EMSegmentCommandLine_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - atlasVolumeFileNames=dict( - argstr="--atlasVolumeFileNames %s...", - ), - disableCompression=dict( - argstr="--disableCompression ", - ), - disableMultithreading=dict( - argstr="--disableMultithreading %d", - ), - dontUpdateIntermediateData=dict( - argstr="--dontUpdateIntermediateData %d", - ), - dontWriteResults=dict( - argstr="--dontWriteResults ", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + atlasVolumeFileNames=dict(argstr="--atlasVolumeFileNames %s..."), + disableCompression=dict(argstr="--disableCompression "), + disableMultithreading=dict(argstr="--disableMultithreading %d"), + dontUpdateIntermediateData=dict(argstr="--dontUpdateIntermediateData %d"), + dontWriteResults=dict(argstr="--dontWriteResults "), + environ=dict(nohash=True, usedefault=True), generateEmptyMRMLSceneAndQuit=dict( - argstr="--generateEmptyMRMLSceneAndQuit %s", - hash_files=False, - ), - intermediateResultsDirectory=dict( - argstr="--intermediateResultsDirectory %s", - ), - keepTempFiles=dict( - argstr="--keepTempFiles ", - ), - loadAtlasNonCentered=dict( - argstr="--loadAtlasNonCentered ", - ), - loadTargetCentered=dict( - argstr="--loadTargetCentered ", - ), - mrmlSceneFileName=dict( - argstr="--mrmlSceneFileName %s", - extensions=None, - ), - parametersMRMLNodeName=dict( - argstr="--parametersMRMLNodeName %s", - ), - registrationAffineType=dict( - argstr="--registrationAffineType %d", - ), - registrationDeformableType=dict( - argstr="--registrationDeformableType %d", - ), - registrationPackage=dict( - argstr="--registrationPackage %s", - ), + argstr="--generateEmptyMRMLSceneAndQuit %s", hash_files=False + ), + intermediateResultsDirectory=dict(argstr="--intermediateResultsDirectory %s"), + keepTempFiles=dict(argstr="--keepTempFiles "), + loadAtlasNonCentered=dict(argstr="--loadAtlasNonCentered "), + loadTargetCentered=dict(argstr="--loadTargetCentered "), + mrmlSceneFileName=dict(argstr="--mrmlSceneFileName %s", extensions=None), + parametersMRMLNodeName=dict(argstr="--parametersMRMLNodeName %s"), + registrationAffineType=dict(argstr="--registrationAffineType %d"), + registrationDeformableType=dict(argstr="--registrationDeformableType %d"), + registrationPackage=dict(argstr="--registrationPackage %s"), resultMRMLSceneFileName=dict( - argstr="--resultMRMLSceneFileName %s", - hash_files=False, + argstr="--resultMRMLSceneFileName %s", hash_files=False ), resultStandardVolumeFileName=dict( - argstr="--resultStandardVolumeFileName %s", - extensions=None, - ), - resultVolumeFileName=dict( - argstr="--resultVolumeFileName %s", - hash_files=False, - ), - targetVolumeFileNames=dict( - argstr="--targetVolumeFileNames %s...", - ), - taskPreProcessingSetting=dict( - argstr="--taskPreProcessingSetting %s", - ), - verbose=dict( - argstr="--verbose ", + argstr="--resultStandardVolumeFileName %s", extensions=None ), + resultVolumeFileName=dict(argstr="--resultVolumeFileName %s", hash_files=False), + targetVolumeFileNames=dict(argstr="--targetVolumeFileNames %s..."), + taskPreProcessingSetting=dict(argstr="--taskPreProcessingSetting %s"), + verbose=dict(argstr="--verbose "), ) inputs = EMSegmentCommandLine.input_spec() @@ -89,15 +43,9 @@ def test_EMSegmentCommandLine_inputs(): def test_EMSegmentCommandLine_outputs(): output_map = dict( - generateEmptyMRMLSceneAndQuit=dict( - extensions=None, - ), - resultMRMLSceneFileName=dict( - extensions=None, - ), - resultVolumeFileName=dict( - extensions=None, - ), + generateEmptyMRMLSceneAndQuit=dict(extensions=None), + resultMRMLSceneFileName=dict(extensions=None), + resultVolumeFileName=dict(extensions=None), ) outputs = EMSegmentCommandLine.output_spec() diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py index 4bd05c6fc2..fdd72676c4 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py @@ -4,43 +4,16 @@ def test_RobustStatisticsSegmenter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - curvatureWeight=dict( - argstr="--curvatureWeight %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - expectedVolume=dict( - argstr="--expectedVolume %f", - ), - intensityHomogeneity=dict( - argstr="--intensityHomogeneity %f", - ), - labelImageFileName=dict( - argstr="%s", - extensions=None, - position=-2, - ), - labelValue=dict( - argstr="--labelValue %d", - ), - maxRunningTime=dict( - argstr="--maxRunningTime %f", - ), - originalImageFileName=dict( - argstr="%s", - extensions=None, - position=-3, - ), - segmentedImageFileName=dict( - argstr="%s", - hash_files=False, - position=-1, - ), + args=dict(argstr="%s"), + curvatureWeight=dict(argstr="--curvatureWeight %f"), + environ=dict(nohash=True, usedefault=True), + expectedVolume=dict(argstr="--expectedVolume %f"), + intensityHomogeneity=dict(argstr="--intensityHomogeneity %f"), + labelImageFileName=dict(argstr="%s", extensions=None, position=-2), + labelValue=dict(argstr="--labelValue %d"), + maxRunningTime=dict(argstr="--maxRunningTime %f"), + originalImageFileName=dict(argstr="%s", extensions=None, position=-3), + segmentedImageFileName=dict(argstr="%s", hash_files=False, position=-1), ) inputs = RobustStatisticsSegmenter.input_spec() @@ -50,12 +23,7 @@ def test_RobustStatisticsSegmenter_inputs(): def test_RobustStatisticsSegmenter_outputs(): - output_map = dict( - segmentedImageFileName=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(segmentedImageFileName=dict(extensions=None, position=-1)) outputs = RobustStatisticsSegmenter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py index 9a308ec959..d70780d809 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py @@ -4,44 +4,17 @@ def test_SimpleRegionGrowingSegmentation_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - iterations=dict( - argstr="--iterations %d", - ), - labelvalue=dict( - argstr="--labelvalue %d", - ), - multiplier=dict( - argstr="--multiplier %f", - ), - neighborhood=dict( - argstr="--neighborhood %d", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - seed=dict( - argstr="--seed %s...", - ), - smoothingIterations=dict( - argstr="--smoothingIterations %d", - ), - timestep=dict( - argstr="--timestep %f", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + iterations=dict(argstr="--iterations %d"), + labelvalue=dict(argstr="--labelvalue %d"), + multiplier=dict(argstr="--multiplier %f"), + neighborhood=dict(argstr="--neighborhood %d"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), + seed=dict(argstr="--seed %s..."), + smoothingIterations=dict(argstr="--smoothingIterations %d"), + timestep=dict(argstr="--timestep %f"), ) inputs = SimpleRegionGrowingSegmentation.input_spec() @@ -51,12 +24,7 @@ def test_SimpleRegionGrowingSegmentation_inputs(): def test_SimpleRegionGrowingSegmentation_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = SimpleRegionGrowingSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py b/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py index b02dfd595d..50138f9388 100644 --- a/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py +++ b/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py @@ -4,35 +4,15 @@ def test_DicomToNrrdConverter_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputDicomDirectory=dict( - argstr="--inputDicomDirectory %s", - ), - outputDirectory=dict( - argstr="--outputDirectory %s", - hash_files=False, - ), - outputVolume=dict( - argstr="--outputVolume %s", - ), - smallGradientThreshold=dict( - argstr="--smallGradientThreshold %f", - ), - useBMatrixGradientDirections=dict( - argstr="--useBMatrixGradientDirections ", - ), - useIdentityMeaseurementFrame=dict( - argstr="--useIdentityMeaseurementFrame ", - ), - writeProtocolGradientsFile=dict( - argstr="--writeProtocolGradientsFile ", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputDicomDirectory=dict(argstr="--inputDicomDirectory %s"), + outputDirectory=dict(argstr="--outputDirectory %s", hash_files=False), + outputVolume=dict(argstr="--outputVolume %s"), + smallGradientThreshold=dict(argstr="--smallGradientThreshold %f"), + useBMatrixGradientDirections=dict(argstr="--useBMatrixGradientDirections "), + useIdentityMeaseurementFrame=dict(argstr="--useIdentityMeaseurementFrame "), + writeProtocolGradientsFile=dict(argstr="--writeProtocolGradientsFile "), ) inputs = DicomToNrrdConverter.input_spec() @@ -42,9 +22,7 @@ def test_DicomToNrrdConverter_inputs(): def test_DicomToNrrdConverter_outputs(): - output_map = dict( - outputDirectory=dict(), - ) + output_map = dict(outputDirectory=dict()) outputs = DicomToNrrdConverter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py index 338fa49cae..ef55ce0c3a 100644 --- a/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py +++ b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py @@ -4,24 +4,11 @@ def test_EMSegmentTransformToNewFormat_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputMRMLFileName=dict( - argstr="--inputMRMLFileName %s", - extensions=None, - ), - outputMRMLFileName=dict( - argstr="--outputMRMLFileName %s", - hash_files=False, - ), - templateFlag=dict( - argstr="--templateFlag ", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputMRMLFileName=dict(argstr="--inputMRMLFileName %s", extensions=None), + outputMRMLFileName=dict(argstr="--outputMRMLFileName %s", hash_files=False), + templateFlag=dict(argstr="--templateFlag "), ) inputs = EMSegmentTransformToNewFormat.input_spec() @@ -31,11 +18,7 @@ def test_EMSegmentTransformToNewFormat_inputs(): def test_EMSegmentTransformToNewFormat_outputs(): - output_map = dict( - outputMRMLFileName=dict( - extensions=None, - ), - ) + output_map = dict(outputMRMLFileName=dict(extensions=None)) outputs = EMSegmentTransformToNewFormat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py index 8bab4bd963..93b911db65 100644 --- a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py @@ -4,41 +4,16 @@ def test_GrayscaleModelMaker_inputs(): input_map = dict( - InputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - OutputGeometry=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - args=dict( - argstr="%s", - ), - decimate=dict( - argstr="--decimate %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - name=dict( - argstr="--name %s", - ), - pointnormals=dict( - argstr="--pointnormals ", - ), - smooth=dict( - argstr="--smooth %d", - ), - splitnormals=dict( - argstr="--splitnormals ", - ), - threshold=dict( - argstr="--threshold %f", - ), + InputVolume=dict(argstr="%s", extensions=None, position=-2), + OutputGeometry=dict(argstr="%s", hash_files=False, position=-1), + args=dict(argstr="%s"), + decimate=dict(argstr="--decimate %f"), + environ=dict(nohash=True, usedefault=True), + name=dict(argstr="--name %s"), + pointnormals=dict(argstr="--pointnormals "), + smooth=dict(argstr="--smooth %d"), + splitnormals=dict(argstr="--splitnormals "), + threshold=dict(argstr="--threshold %f"), ) inputs = GrayscaleModelMaker.input_spec() @@ -48,12 +23,7 @@ def test_GrayscaleModelMaker_inputs(): def test_GrayscaleModelMaker_outputs(): - output_map = dict( - OutputGeometry=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(OutputGeometry=dict(extensions=None, position=-1)) outputs = GrayscaleModelMaker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py index 3dab7b1498..605560c896 100644 --- a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py +++ b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py @@ -4,35 +4,14 @@ def test_LabelMapSmoothing_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - gaussianSigma=dict( - argstr="--gaussianSigma %f", - ), - inputVolume=dict( - argstr="%s", - extensions=None, - position=-2, - ), - labelToSmooth=dict( - argstr="--labelToSmooth %d", - ), - maxRMSError=dict( - argstr="--maxRMSError %f", - ), - numberOfIterations=dict( - argstr="--numberOfIterations %d", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + gaussianSigma=dict(argstr="--gaussianSigma %f"), + inputVolume=dict(argstr="%s", extensions=None, position=-2), + labelToSmooth=dict(argstr="--labelToSmooth %d"), + maxRMSError=dict(argstr="--maxRMSError %f"), + numberOfIterations=dict(argstr="--numberOfIterations %d"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), ) inputs = LabelMapSmoothing.input_spec() @@ -42,12 +21,7 @@ def test_LabelMapSmoothing_inputs(): def test_LabelMapSmoothing_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = LabelMapSmoothing.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py index dc93147248..c66ac2bc8b 100644 --- a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py +++ b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py @@ -4,28 +4,11 @@ def test_MergeModels_inputs(): input_map = dict( - Model1=dict( - argstr="%s", - extensions=None, - position=-3, - ), - Model2=dict( - argstr="%s", - extensions=None, - position=-2, - ), - ModelOutput=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + Model1=dict(argstr="%s", extensions=None, position=-3), + Model2=dict(argstr="%s", extensions=None, position=-2), + ModelOutput=dict(argstr="%s", hash_files=False, position=-1), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), ) inputs = MergeModels.input_spec() @@ -35,12 +18,7 @@ def test_MergeModels_inputs(): def test_MergeModels_outputs(): - output_map = dict( - ModelOutput=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(ModelOutput=dict(extensions=None, position=-1)) outputs = MergeModels.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py index 905b1417e9..264e8b2f89 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py @@ -4,72 +4,26 @@ def test_ModelMaker_inputs(): input_map = dict( - InputVolume=dict( - argstr="%s", - extensions=None, - position=-1, - ), - args=dict( - argstr="%s", - ), - color=dict( - argstr="--color %s", - extensions=None, - ), - debug=dict( - argstr="--debug ", - ), - decimate=dict( - argstr="--decimate %f", - ), - end=dict( - argstr="--end %d", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - filtertype=dict( - argstr="--filtertype %s", - ), - generateAll=dict( - argstr="--generateAll ", - ), - jointsmooth=dict( - argstr="--jointsmooth ", - ), - labels=dict( - argstr="--labels %s", - sep=",", - ), - modelSceneFile=dict( - argstr="--modelSceneFile %s...", - hash_files=False, - ), - name=dict( - argstr="--name %s", - ), - pad=dict( - argstr="--pad ", - ), - pointnormals=dict( - argstr="--pointnormals ", - ), - saveIntermediateModels=dict( - argstr="--saveIntermediateModels ", - ), - skipUnNamed=dict( - argstr="--skipUnNamed ", - ), - smooth=dict( - argstr="--smooth %d", - ), - splitnormals=dict( - argstr="--splitnormals ", - ), - start=dict( - argstr="--start %d", - ), + InputVolume=dict(argstr="%s", extensions=None, position=-1), + args=dict(argstr="%s"), + color=dict(argstr="--color %s", extensions=None), + debug=dict(argstr="--debug "), + decimate=dict(argstr="--decimate %f"), + end=dict(argstr="--end %d"), + environ=dict(nohash=True, usedefault=True), + filtertype=dict(argstr="--filtertype %s"), + generateAll=dict(argstr="--generateAll "), + jointsmooth=dict(argstr="--jointsmooth "), + labels=dict(argstr="--labels %s", sep=","), + modelSceneFile=dict(argstr="--modelSceneFile %s...", hash_files=False), + name=dict(argstr="--name %s"), + pad=dict(argstr="--pad "), + pointnormals=dict(argstr="--pointnormals "), + saveIntermediateModels=dict(argstr="--saveIntermediateModels "), + skipUnNamed=dict(argstr="--skipUnNamed "), + smooth=dict(argstr="--smooth %d"), + splitnormals=dict(argstr="--splitnormals "), + start=dict(argstr="--start %d"), ) inputs = ModelMaker.input_spec() @@ -79,9 +33,7 @@ def test_ModelMaker_inputs(): def test_ModelMaker_outputs(): - output_map = dict( - modelSceneFile=dict(), - ) + output_map = dict(modelSceneFile=dict()) outputs = ModelMaker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py index 8449c15fce..274494e6c0 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py @@ -4,31 +4,12 @@ def test_ModelToLabelMap_inputs(): input_map = dict( - InputVolume=dict( - argstr="%s", - extensions=None, - position=-3, - ), - OutputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - args=dict( - argstr="%s", - ), - distance=dict( - argstr="--distance %f", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - surface=dict( - argstr="%s", - extensions=None, - position=-2, - ), + InputVolume=dict(argstr="%s", extensions=None, position=-3), + OutputVolume=dict(argstr="%s", hash_files=False, position=-1), + args=dict(argstr="%s"), + distance=dict(argstr="--distance %f"), + environ=dict(nohash=True, usedefault=True), + surface=dict(argstr="%s", extensions=None, position=-2), ) inputs = ModelToLabelMap.input_spec() @@ -38,12 +19,7 @@ def test_ModelToLabelMap_inputs(): def test_ModelToLabelMap_outputs(): - output_map = dict( - OutputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(OutputVolume=dict(extensions=None, position=-1)) outputs = ModelToLabelMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py index e2b4a1a2f7..e8a2f433eb 100644 --- a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py +++ b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py @@ -4,26 +4,11 @@ def test_OrientScalarVolume_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - inputVolume1=dict( - argstr="%s", - extensions=None, - position=-2, - ), - orientation=dict( - argstr="--orientation %s", - ), - outputVolume=dict( - argstr="%s", - hash_files=False, - position=-1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + inputVolume1=dict(argstr="%s", extensions=None, position=-2), + orientation=dict(argstr="--orientation %s"), + outputVolume=dict(argstr="%s", hash_files=False, position=-1), ) inputs = OrientScalarVolume.input_spec() @@ -33,12 +18,7 @@ def test_OrientScalarVolume_inputs(): def test_OrientScalarVolume_outputs(): - output_map = dict( - outputVolume=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(outputVolume=dict(extensions=None, position=-1)) outputs = OrientScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py index 77498c0b08..952c029684 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py +++ b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py @@ -4,28 +4,11 @@ def test_ProbeVolumeWithModel_inputs(): input_map = dict( - InputModel=dict( - argstr="%s", - extensions=None, - position=-2, - ), - InputVolume=dict( - argstr="%s", - extensions=None, - position=-3, - ), - OutputModel=dict( - argstr="%s", - hash_files=False, - position=-1, - ), - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + InputModel=dict(argstr="%s", extensions=None, position=-2), + InputVolume=dict(argstr="%s", extensions=None, position=-3), + OutputModel=dict(argstr="%s", hash_files=False, position=-1), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), ) inputs = ProbeVolumeWithModel.input_spec() @@ -35,12 +18,7 @@ def test_ProbeVolumeWithModel_inputs(): def test_ProbeVolumeWithModel_outputs(): - output_map = dict( - OutputModel=dict( - extensions=None, - position=-1, - ), - ) + output_map = dict(OutputModel=dict(extensions=None, position=-1)) outputs = ProbeVolumeWithModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py b/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py index 7a16ed38bb..1961fb2879 100644 --- a/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py +++ b/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py @@ -3,15 +3,7 @@ def test_SlicerCommandLine_inputs(): - input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ) + input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) inputs = SlicerCommandLine.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index b3c78787a2..7409e016c3 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -282,7 +282,7 @@ class FieldMap(SPMCommand): def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm""" - + if ((self.inputs.jobtype == "calculatevdm") and (opt in ['phase_file', 'magnitude_file', 'anat_file', 'epi_file'])): return scans_for_fname(ensure_list(val)) diff --git a/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py index 15fe9399ed..0a77d98abc 100644 --- a/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py +++ b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py @@ -4,20 +4,12 @@ def test_Analyze2nii_inputs(): input_map = dict( - analyze_file=dict( - extensions=None, - mandatory=True, - ), + analyze_file=dict(extensions=None, mandatory=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), + mfile=dict(usedefault=True), paths=dict(), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = Analyze2nii.input_spec() @@ -29,18 +21,11 @@ def test_Analyze2nii_inputs(): def test_Analyze2nii_outputs(): output_map = dict( matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - nifti_file=dict( - extensions=None, - ), + mfile=dict(usedefault=True), + nifti_file=dict(extensions=None), paths=dict(), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) outputs = Analyze2nii.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py index d41a0fb4b6..37e117d1ad 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py @@ -4,22 +4,11 @@ def test_ApplyDeformations_inputs(): input_map = dict( - deformation_field=dict( - extensions=None, - field="comp{1}.def", - mandatory=True, - ), - in_files=dict( - field="fnames", - mandatory=True, - ), - interp=dict( - field="interp", - ), + deformation_field=dict(extensions=None, field="comp{1}.def", mandatory=True), + in_files=dict(field="fnames", mandatory=True), + interp=dict(field="interp"), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), + mfile=dict(usedefault=True), paths=dict(), reference_volume=dict( extensions=[".hdr", ".img", ".img.gz", ".nii"], @@ -27,10 +16,7 @@ def test_ApplyDeformations_inputs(): mandatory=True, ), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = ApplyDeformations.input_spec() @@ -40,9 +26,7 @@ def test_ApplyDeformations_inputs(): def test_ApplyDeformations_outputs(): - output_map = dict( - out_files=dict(), - ) + output_map = dict(out_files=dict()) outputs = ApplyDeformations.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py index c652bd7e12..9f152c27aa 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py @@ -4,43 +4,24 @@ def test_ApplyInverseDeformation_inputs(): input_map = dict( - bounding_box=dict( - field="comp{1}.inv.comp{1}.sn2def.bb", - ), + bounding_box=dict(field="comp{1}.inv.comp{1}.sn2def.bb"), deformation=dict( extensions=None, field="comp{1}.inv.comp{1}.sn2def.matname", xor=["deformation_field"], ), deformation_field=dict( - extensions=None, - field="comp{1}.inv.comp{1}.def", - xor=["deformation"], - ), - in_files=dict( - field="fnames", - mandatory=True, - ), - interpolation=dict( - field="interp", + extensions=None, field="comp{1}.inv.comp{1}.def", xor=["deformation"] ), + in_files=dict(field="fnames", mandatory=True), + interpolation=dict(field="interp"), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), + mfile=dict(usedefault=True), paths=dict(), - target=dict( - extensions=None, - field="comp{1}.inv.space", - ), + target=dict(extensions=None, field="comp{1}.inv.space"), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - voxel_sizes=dict( - field="comp{1}.inv.comp{1}.sn2def.vox", - ), + use_v8struct=dict(min_ver="8", usedefault=True), + voxel_sizes=dict(field="comp{1}.inv.comp{1}.sn2def.vox"), ) inputs = ApplyInverseDeformation.input_spec() @@ -50,9 +31,7 @@ def test_ApplyInverseDeformation_inputs(): def test_ApplyInverseDeformation_outputs(): - output_map = dict( - out_files=dict(), - ) + output_map = dict(out_files=dict()) outputs = ApplyInverseDeformation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py index ae0516370d..89bfb2059a 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py @@ -4,29 +4,14 @@ def test_ApplyTransform_inputs(): input_map = dict( - in_file=dict( - copyfile=True, - extensions=None, - mandatory=True, - ), - mat=dict( - extensions=None, - mandatory=True, - ), + in_file=dict(copyfile=True, extensions=None, mandatory=True), + mat=dict(extensions=None, mandatory=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - out_file=dict( - extensions=None, - genfile=True, - ), + mfile=dict(usedefault=True), + out_file=dict(extensions=None, genfile=True), paths=dict(), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = ApplyTransform.input_spec() @@ -36,11 +21,7 @@ def test_ApplyTransform_inputs(): def test_ApplyTransform_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ApplyTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py index 156591cbb9..1d11bbc500 100644 --- a/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py +++ b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py @@ -4,31 +4,15 @@ def test_CalcCoregAffine_inputs(): input_map = dict( - invmat=dict( - extensions=None, - ), - mat=dict( - extensions=None, - ), + invmat=dict(extensions=None), + mat=dict(extensions=None), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - moving=dict( - copyfile=False, - extensions=None, - mandatory=True, - ), + mfile=dict(usedefault=True), + moving=dict(copyfile=False, extensions=None, mandatory=True), paths=dict(), - target=dict( - extensions=None, - mandatory=True, - ), + target=dict(extensions=None, mandatory=True), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = CalcCoregAffine.input_spec() @@ -38,14 +22,7 @@ def test_CalcCoregAffine_inputs(): def test_CalcCoregAffine_outputs(): - output_map = dict( - invmat=dict( - extensions=None, - ), - mat=dict( - extensions=None, - ), - ) + output_map = dict(invmat=dict(extensions=None), mat=dict(extensions=None)) outputs = CalcCoregAffine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Coregister.py b/nipype/interfaces/spm/tests/test_auto_Coregister.py index 940f69ebbf..918659ad69 100644 --- a/nipype/interfaces/spm/tests/test_auto_Coregister.py +++ b/nipype/interfaces/spm/tests/test_auto_Coregister.py @@ -4,59 +4,28 @@ def test_Coregister_inputs(): input_map = dict( - apply_to_files=dict( - copyfile=True, - field="other", - ), - cost_function=dict( - field="eoptions.cost_fun", - ), - fwhm=dict( - field="eoptions.fwhm", - ), - jobtype=dict( - usedefault=True, - ), + apply_to_files=dict(copyfile=True, field="other"), + cost_function=dict(field="eoptions.cost_fun"), + fwhm=dict(field="eoptions.fwhm"), + jobtype=dict(usedefault=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - out_prefix=dict( - field="roptions.prefix", - usedefault=True, - ), + mfile=dict(usedefault=True), + out_prefix=dict(field="roptions.prefix", usedefault=True), paths=dict(), - separation=dict( - field="eoptions.sep", - ), - source=dict( - copyfile=True, - field="source", - mandatory=True, - ), + separation=dict(field="eoptions.sep"), + source=dict(copyfile=True, field="source", mandatory=True), target=dict( copyfile=False, extensions=[".hdr", ".img", ".img.gz", ".nii"], field="ref", mandatory=True, ), - tolerance=dict( - field="eoptions.tol", - ), + tolerance=dict(field="eoptions.tol"), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - write_interp=dict( - field="roptions.interp", - ), - write_mask=dict( - field="roptions.mask", - ), - write_wrap=dict( - field="roptions.wrap", - ), + use_v8struct=dict(min_ver="8", usedefault=True), + write_interp=dict(field="roptions.interp"), + write_mask=dict(field="roptions.mask"), + write_wrap=dict(field="roptions.wrap"), ) inputs = Coregister.input_spec() @@ -66,10 +35,7 @@ def test_Coregister_inputs(): def test_Coregister_outputs(): - output_map = dict( - coregistered_files=dict(), - coregistered_source=dict(), - ) + output_map = dict(coregistered_files=dict(), coregistered_source=dict()) outputs = Coregister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_CreateWarped.py b/nipype/interfaces/spm/tests/test_auto_CreateWarped.py index f62694641c..8ca329690b 100644 --- a/nipype/interfaces/spm/tests/test_auto_CreateWarped.py +++ b/nipype/interfaces/spm/tests/test_auto_CreateWarped.py @@ -5,34 +5,17 @@ def test_CreateWarped_inputs(): input_map = dict( flowfield_files=dict( - copyfile=False, - field="crt_warped.flowfields", - mandatory=True, - ), - image_files=dict( - copyfile=False, - field="crt_warped.images", - mandatory=True, - ), - interp=dict( - field="crt_warped.interp", - ), - iterations=dict( - field="crt_warped.K", + copyfile=False, field="crt_warped.flowfields", mandatory=True ), + image_files=dict(copyfile=False, field="crt_warped.images", mandatory=True), + interp=dict(field="crt_warped.interp"), + iterations=dict(field="crt_warped.K"), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - modulate=dict( - field="crt_warped.jactransf", - ), + mfile=dict(usedefault=True), + modulate=dict(field="crt_warped.jactransf"), paths=dict(), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = CreateWarped.input_spec() @@ -42,9 +25,7 @@ def test_CreateWarped_inputs(): def test_CreateWarped_outputs(): - output_map = dict( - warped_files=dict(), - ) + output_map = dict(warped_files=dict()) outputs = CreateWarped.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_DARTEL.py b/nipype/interfaces/spm/tests/test_auto_DARTEL.py index 5fbba0c287..20be1d8767 100644 --- a/nipype/interfaces/spm/tests/test_auto_DARTEL.py +++ b/nipype/interfaces/spm/tests/test_auto_DARTEL.py @@ -4,34 +4,16 @@ def test_DARTEL_inputs(): input_map = dict( - image_files=dict( - copyfile=False, - field="warp.images", - mandatory=True, - ), - iteration_parameters=dict( - field="warp.settings.param", - ), + image_files=dict(copyfile=False, field="warp.images", mandatory=True), + iteration_parameters=dict(field="warp.settings.param"), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - optimization_parameters=dict( - field="warp.settings.optim", - ), + mfile=dict(usedefault=True), + optimization_parameters=dict(field="warp.settings.optim"), paths=dict(), - regularization_form=dict( - field="warp.settings.rform", - ), - template_prefix=dict( - field="warp.settings.template", - usedefault=True, - ), + regularization_form=dict(field="warp.settings.rform"), + template_prefix=dict(field="warp.settings.template", usedefault=True), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = DARTEL.input_spec() @@ -43,9 +25,7 @@ def test_DARTEL_inputs(): def test_DARTEL_outputs(): output_map = dict( dartel_flow_fields=dict(), - final_template_file=dict( - extensions=None, - ), + final_template_file=dict(extensions=None), template_files=dict(), ) outputs = DARTEL.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py index d4b8ad8a78..7df27a3c96 100644 --- a/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py +++ b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py @@ -5,27 +5,14 @@ def test_DARTELNorm2MNI_inputs(): input_map = dict( apply_to_files=dict( - copyfile=False, - field="mni_norm.data.subjs.images", - mandatory=True, - ), - bounding_box=dict( - field="mni_norm.bb", - ), - flowfield_files=dict( - field="mni_norm.data.subjs.flowfields", - mandatory=True, - ), - fwhm=dict( - field="mni_norm.fwhm", + copyfile=False, field="mni_norm.data.subjs.images", mandatory=True ), + bounding_box=dict(field="mni_norm.bb"), + flowfield_files=dict(field="mni_norm.data.subjs.flowfields", mandatory=True), + fwhm=dict(field="mni_norm.fwhm"), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - modulate=dict( - field="mni_norm.preserve", - ), + mfile=dict(usedefault=True), + modulate=dict(field="mni_norm.preserve"), paths=dict(), template_file=dict( copyfile=False, @@ -34,13 +21,8 @@ def test_DARTELNorm2MNI_inputs(): mandatory=True, ), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - voxel_size=dict( - field="mni_norm.vox", - ), + use_v8struct=dict(min_ver="8", usedefault=True), + voxel_size=dict(field="mni_norm.vox"), ) inputs = DARTELNorm2MNI.input_spec() @@ -51,10 +33,7 @@ def test_DARTELNorm2MNI_inputs(): def test_DARTELNorm2MNI_outputs(): output_map = dict( - normalization_parameter_file=dict( - extensions=None, - ), - normalized_files=dict(), + normalization_parameter_file=dict(extensions=None), normalized_files=dict() ) outputs = DARTELNorm2MNI.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_DicomImport.py b/nipype/interfaces/spm/tests/test_auto_DicomImport.py index e0459d467f..455c31acfb 100644 --- a/nipype/interfaces/spm/tests/test_auto_DicomImport.py +++ b/nipype/interfaces/spm/tests/test_auto_DicomImport.py @@ -4,36 +4,16 @@ def test_DicomImport_inputs(): input_map = dict( - format=dict( - field="convopts.format", - usedefault=True, - ), - icedims=dict( - field="convopts.icedims", - usedefault=True, - ), - in_files=dict( - field="data", - mandatory=True, - ), + format=dict(field="convopts.format", usedefault=True), + icedims=dict(field="convopts.icedims", usedefault=True), + in_files=dict(field="data", mandatory=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - output_dir=dict( - field="outdir", - usedefault=True, - ), - output_dir_struct=dict( - field="root", - usedefault=True, - ), + mfile=dict(usedefault=True), + output_dir=dict(field="outdir", usedefault=True), + output_dir_struct=dict(field="root", usedefault=True), paths=dict(), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = DicomImport.input_spec() @@ -43,9 +23,7 @@ def test_DicomImport_inputs(): def test_DicomImport_outputs(): - output_map = dict( - out_files=dict(), - ) + output_map = dict(out_files=dict()) outputs = DicomImport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py index 84fb496e9c..2ad12c60af 100644 --- a/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py @@ -4,40 +4,19 @@ def test_EstimateContrast_inputs(): input_map = dict( - beta_images=dict( - copyfile=False, - mandatory=True, - ), - contrasts=dict( - mandatory=True, - ), - group_contrast=dict( - xor=["use_derivs"], - ), + beta_images=dict(copyfile=False, mandatory=True), + contrasts=dict(mandatory=True), + group_contrast=dict(xor=["use_derivs"]), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), + mfile=dict(usedefault=True), paths=dict(), - residual_image=dict( - copyfile=False, - extensions=None, - mandatory=True, - ), + residual_image=dict(copyfile=False, extensions=None, mandatory=True), spm_mat_file=dict( - copyfile=True, - extensions=None, - field="spmmat", - mandatory=True, - ), - use_derivs=dict( - xor=["group_contrast"], + copyfile=True, extensions=None, field="spmmat", mandatory=True ), + use_derivs=dict(xor=["group_contrast"]), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = EstimateContrast.input_spec() @@ -52,9 +31,7 @@ def test_EstimateContrast_outputs(): ess_images=dict(), spmF_images=dict(), spmT_images=dict(), - spm_mat_file=dict( - extensions=None, - ), + spm_mat_file=dict(extensions=None), ) outputs = EstimateContrast.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateModel.py b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py index c78924de2f..5351469cc9 100644 --- a/nipype/interfaces/spm/tests/test_auto_EstimateModel.py +++ b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py @@ -4,30 +4,17 @@ def test_EstimateModel_inputs(): input_map = dict( - estimation_method=dict( - field="method", - mandatory=True, - ), + estimation_method=dict(field="method", mandatory=True), flags=dict(), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), + mfile=dict(usedefault=True), paths=dict(), spm_mat_file=dict( - copyfile=True, - extensions=None, - field="spmmat", - mandatory=True, + copyfile=True, extensions=None, field="spmmat", mandatory=True ), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - write_residuals=dict( - field="write_residuals", - ), + use_v8struct=dict(min_ver="8", usedefault=True), + write_residuals=dict(field="write_residuals"), ) inputs = EstimateModel.input_spec() @@ -40,25 +27,15 @@ def test_EstimateModel_outputs(): output_map = dict( ARcoef=dict(), Cbetas=dict(), - RPVimage=dict( - extensions=[".hdr", ".img", ".img.gz", ".nii"], - ), + RPVimage=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"]), SDbetas=dict(), SDerror=dict(), beta_images=dict(), - labels=dict( - extensions=[".hdr", ".img", ".img.gz", ".nii"], - ), - mask_image=dict( - extensions=[".hdr", ".img", ".img.gz", ".nii"], - ), - residual_image=dict( - extensions=[".hdr", ".img", ".img.gz", ".nii"], - ), + labels=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"]), + mask_image=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"]), + residual_image=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"]), residual_images=dict(), - spm_mat_file=dict( - extensions=None, - ), + spm_mat_file=dict(extensions=None), ) outputs = EstimateModel.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py index de435ea771..872b8c7687 100644 --- a/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py @@ -4,39 +4,24 @@ def test_FactorialDesign_inputs(): input_map = dict( - covariates=dict( - field="cov", - ), - explicit_mask_file=dict( - extensions=None, - field="masking.em", - ), + covariates=dict(field="cov"), + explicit_mask_file=dict(extensions=None, field="masking.em"), global_calc_mean=dict( - field="globalc.g_mean", - xor=["global_calc_omit", "global_calc_values"], + field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"] ), global_calc_omit=dict( - field="globalc.g_omit", - xor=["global_calc_mean", "global_calc_values"], + field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"] ), global_calc_values=dict( field="globalc.g_user.global_uval", xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict( - field="globalm.glonorm", - ), + global_normalization=dict(field="globalm.glonorm"), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - no_grand_mean_scaling=dict( - field="globalm.gmsca.gmsca_no", - ), + mfile=dict(usedefault=True), + no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no"), paths=dict(), - spm_mat_dir=dict( - field="dir", - ), + spm_mat_dir=dict(field="dir"), threshold_mask_absolute=dict( field="masking.tm.tma.athresh", xor=["threshold_mask_none", "threshold_mask_relative"], @@ -49,14 +34,9 @@ def test_FactorialDesign_inputs(): field="masking.tm.tmr.rthresh", xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict( - field="masking.im", - ), + use_implicit_threshold=dict(field="masking.im"), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = FactorialDesign.input_spec() @@ -66,11 +46,7 @@ def test_FactorialDesign_inputs(): def test_FactorialDesign_outputs(): - output_map = dict( - spm_mat_file=dict( - extensions=None, - ), - ) + output_map = dict(spm_mat_file=dict(extensions=None)) outputs = FactorialDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_FieldMap.py b/nipype/interfaces/spm/tests/test_auto_FieldMap.py index a91eec64d8..4061ca9ce2 100644 --- a/nipype/interfaces/spm/tests/test_auto_FieldMap.py +++ b/nipype/interfaces/spm/tests/test_auto_FieldMap.py @@ -4,123 +4,59 @@ def test_FieldMap_inputs(): input_map = dict( - anat_file=dict( - copyfile=False, - extensions=None, - field="subj.anat", - ), - blip_direction=dict( - field="subj.defaults.defaultsval.blipdir", - mandatory=True, - ), - echo_times=dict( - field="subj.defaults.defaultsval.et", - mandatory=True, - ), - epi_file=dict( - copyfile=False, - extensions=None, - field="subj.session.epi", - mandatory=True, - ), - epifm=dict( - field="subj.defaults.defaultsval.epifm", - usedefault=True, - ), + anat_file=dict(copyfile=False, extensions=None, field="subj.anat"), + blip_direction=dict(field="subj.defaults.defaultsval.blipdir", mandatory=True), + distortion_direction=dict(field="roptions.pedir", usedefault=True), + echo_times=dict(field="subj.defaults.defaultsval.et"), + epi_file=dict(copyfile=False, extensions=None, field="subj.session.epi"), + epifm=dict(field="subj.defaults.defaultsval.epifm", usedefault=True), + in_files=dict(copyfile=True, field="data.scans", mandatory=True), + interpolation=dict(field="roptions.rinterp", usedefault=True), jacobian_modulation=dict( - field="subj.defaults.defaultsval.ajm", - usedefault=True, - ), - jobtype=dict( - usedefault=True, + field="subj.defaults.defaultsval.ajm", usedefault=True ), + jobtype=dict(usedefault=True), magnitude_file=dict( - copyfile=False, - extensions=None, - field="subj.data.presubphasemag.magnitude", - mandatory=True, - ), - mask_fwhm=dict( - field="subj.defaults.defaultsval.mflags.fwhm", - usedefault=True, - ), - maskbrain=dict( - field="subj.defaults.defaultsval.maskbrain", - usedefault=True, - ), - matchanat=dict( - field="subj.matchanat", - usedefault=True, - ), - matchvdm=dict( - field="subj.matchvdm", - usedefault=True, + copyfile=False, extensions=None, field="subj.data.presubphasemag.magnitude" ), + mask_fwhm=dict(field="subj.defaults.defaultsval.mflags.fwhm", usedefault=True), + maskbrain=dict(field="subj.defaults.defaultsval.maskbrain", usedefault=True), + matchanat=dict(field="subj.matchanat", usedefault=True), + matchvdm=dict(field="subj.matchvdm", usedefault=True), matlab_cmd=dict(), - method=dict( - field="subj.defaults.defaultsval.uflags.method", - usedefault=True, - ), - mfile=dict( - usedefault=True, - ), - ndilate=dict( - field="subj.defaults.defaultsval.mflags.ndilate", - usedefault=True, - ), - nerode=dict( - field="subj.defaults.defaultsval.mflags.nerode", - usedefault=True, - ), - pad=dict( - field="subj.defaults.defaultsval.uflags.pad", - usedefault=True, - ), + method=dict(field="subj.defaults.defaultsval.uflags.method", usedefault=True), + mfile=dict(usedefault=True), + ndilate=dict(field="subj.defaults.defaultsval.mflags.ndilate", usedefault=True), + nerode=dict(field="subj.defaults.defaultsval.mflags.nerode", usedefault=True), + out_prefix=dict(field="roptions.prefix", usedefault=True), + pad=dict(field="subj.defaults.defaultsval.uflags.pad", usedefault=True), paths=dict(), phase_file=dict( - copyfile=False, - extensions=None, - field="subj.data.presubphasemag.phase", - mandatory=True, - ), - reg=dict( - field="subj.defaults.defaultsval.mflags.reg", - usedefault=True, - ), - sessname=dict( - field="subj.sessname", - usedefault=True, + copyfile=False, extensions=None, field="subj.data.presubphasemag.phase" ), + reg=dict(field="subj.defaults.defaultsval.mflags.reg", usedefault=True), + reslice_interp=dict(field="roptions.rinterp"), + sessname=dict(field="subj.sessname", usedefault=True), template=dict( copyfile=False, extensions=None, field="subj.defaults.defaultsval.mflags.template", ), - thresh=dict( - field="subj.defaults.defaultsval.mflags.thresh", - usedefault=True, - ), - total_readout_time=dict( - field="subj.defaults.defaultsval.tert", - mandatory=True, - ), + thresh=dict(field="subj.defaults.defaultsval.mflags.thresh", usedefault=True), + total_readout_time=dict(field="subj.defaults.defaultsval.tert", mandatory=True), unwarp_fwhm=dict( - field="subj.defaults.defaultsval.uflags.fwhm", - usedefault=True, + field="subj.defaults.defaultsval.uflags.fwhm", usedefault=True ), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - writeunwarped=dict( - field="subj.writeunwarped", - usedefault=True, - ), - ws=dict( - field="subj.defaults.defaultsval.uflags.ws", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), + vdmfile=dict( + copyfile=True, extensions=None, field="data.vdmfile", mandatory=True + ), + write_mask=dict(field="roptions.mask"), + write_which=dict(field="roptions.which", maxlen=2, minlen=2, usedefault=True), + write_wrap=dict(field="roptions.wrap"), + writeunwarped=dict(field="subj.writeunwarped", usedefault=True), + ws=dict(field="subj.defaults.defaultsval.uflags.ws", usedefault=True), ) inputs = FieldMap.input_spec() @@ -131,9 +67,7 @@ def test_FieldMap_inputs(): def test_FieldMap_outputs(): output_map = dict( - vdm=dict( - extensions=None, - ), + mean_image=dict(extensions=None), out_files=dict(), vdm=dict(extensions=None) ) outputs = FieldMap.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Level1Design.py b/nipype/interfaces/spm/tests/test_auto_Level1Design.py index 04c9f315ef..db2690ea02 100644 --- a/nipype/interfaces/spm/tests/test_auto_Level1Design.py +++ b/nipype/interfaces/spm/tests/test_auto_Level1Design.py @@ -4,61 +4,25 @@ def test_Level1Design_inputs(): input_map = dict( - bases=dict( - field="bases", - mandatory=True, - ), - factor_info=dict( - field="fact", - ), + bases=dict(field="bases", mandatory=True), + factor_info=dict(field="fact"), flags=dict(), - global_intensity_normalization=dict( - field="global", - ), - interscan_interval=dict( - field="timing.RT", - mandatory=True, - ), - mask_image=dict( - extensions=None, - field="mask", - ), - mask_threshold=dict( - usedefault=True, - ), + global_intensity_normalization=dict(field="global"), + interscan_interval=dict(field="timing.RT", mandatory=True), + mask_image=dict(extensions=None, field="mask"), + mask_threshold=dict(usedefault=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - microtime_onset=dict( - field="timing.fmri_t0", - ), - microtime_resolution=dict( - field="timing.fmri_t", - ), - model_serial_correlations=dict( - field="cvi", - ), + mfile=dict(usedefault=True), + microtime_onset=dict(field="timing.fmri_t0"), + microtime_resolution=dict(field="timing.fmri_t"), + model_serial_correlations=dict(field="cvi"), paths=dict(), - session_info=dict( - field="sess", - mandatory=True, - ), - spm_mat_dir=dict( - field="dir", - ), - timing_units=dict( - field="timing.units", - mandatory=True, - ), + session_info=dict(field="sess", mandatory=True), + spm_mat_dir=dict(field="dir"), + timing_units=dict(field="timing.units", mandatory=True), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - volterra_expansion_order=dict( - field="volt", - ), + use_v8struct=dict(min_ver="8", usedefault=True), + volterra_expansion_order=dict(field="volt"), ) inputs = Level1Design.input_spec() @@ -68,11 +32,7 @@ def test_Level1Design_inputs(): def test_Level1Design_outputs(): - output_map = dict( - spm_mat_file=dict( - extensions=None, - ), - ) + output_map = dict(spm_mat_file=dict(extensions=None)) outputs = Level1Design.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_MultiChannelNewSegment.py b/nipype/interfaces/spm/tests/test_auto_MultiChannelNewSegment.py index 9c8f743d45..efec3fa8c6 100644 --- a/nipype/interfaces/spm/tests/test_auto_MultiChannelNewSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_MultiChannelNewSegment.py @@ -4,34 +4,17 @@ def test_MultiChannelNewSegment_inputs(): input_map = dict( - affine_regularization=dict( - field="warp.affreg", - ), - channels=dict( - field="channel", - ), + affine_regularization=dict(field="warp.affreg"), + channels=dict(field="channel"), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), + mfile=dict(usedefault=True), paths=dict(), - sampling_distance=dict( - field="warp.samp", - ), - tissues=dict( - field="tissue", - ), + sampling_distance=dict(field="warp.samp"), + tissues=dict(field="tissue"), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - warping_regularization=dict( - field="warp.reg", - ), - write_deformation_fields=dict( - field="warp.write", - ), + use_v8struct=dict(min_ver="8", usedefault=True), + warping_regularization=dict(field="warp.reg"), + write_deformation_fields=dict(field="warp.write"), ) inputs = MultiChannelNewSegment.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py index 61d3b38a74..cf2969ff67 100644 --- a/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py @@ -4,47 +4,26 @@ def test_MultipleRegressionDesign_inputs(): input_map = dict( - covariates=dict( - field="cov", - ), - explicit_mask_file=dict( - extensions=None, - field="masking.em", - ), + covariates=dict(field="cov"), + explicit_mask_file=dict(extensions=None, field="masking.em"), global_calc_mean=dict( - field="globalc.g_mean", - xor=["global_calc_omit", "global_calc_values"], + field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"] ), global_calc_omit=dict( - field="globalc.g_omit", - xor=["global_calc_mean", "global_calc_values"], + field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"] ), global_calc_values=dict( field="globalc.g_user.global_uval", xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict( - field="globalm.glonorm", - ), - in_files=dict( - field="des.mreg.scans", - mandatory=True, - ), - include_intercept=dict( - field="des.mreg.incint", - usedefault=True, - ), + global_normalization=dict(field="globalm.glonorm"), + in_files=dict(field="des.mreg.scans", mandatory=True), + include_intercept=dict(field="des.mreg.incint", usedefault=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - no_grand_mean_scaling=dict( - field="globalm.gmsca.gmsca_no", - ), + mfile=dict(usedefault=True), + no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no"), paths=dict(), - spm_mat_dir=dict( - field="dir", - ), + spm_mat_dir=dict(field="dir"), threshold_mask_absolute=dict( field="masking.tm.tma.athresh", xor=["threshold_mask_none", "threshold_mask_relative"], @@ -57,17 +36,10 @@ def test_MultipleRegressionDesign_inputs(): field="masking.tm.tmr.rthresh", xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict( - field="masking.im", - ), + use_implicit_threshold=dict(field="masking.im"), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - user_covariates=dict( - field="des.mreg.mcov", - ), + use_v8struct=dict(min_ver="8", usedefault=True), + user_covariates=dict(field="des.mreg.mcov"), ) inputs = MultipleRegressionDesign.input_spec() @@ -77,11 +49,7 @@ def test_MultipleRegressionDesign_inputs(): def test_MultipleRegressionDesign_outputs(): - output_map = dict( - spm_mat_file=dict( - extensions=None, - ), - ) + output_map = dict(spm_mat_file=dict(extensions=None)) outputs = MultipleRegressionDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_NewSegment.py b/nipype/interfaces/spm/tests/test_auto_NewSegment.py index dae310f6e2..77f84939da 100644 --- a/nipype/interfaces/spm/tests/test_auto_NewSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_NewSegment.py @@ -4,39 +4,18 @@ def test_NewSegment_inputs(): input_map = dict( - affine_regularization=dict( - field="warp.affreg", - ), - channel_files=dict( - copyfile=False, - field="channel", - mandatory=True, - ), - channel_info=dict( - field="channel", - ), + affine_regularization=dict(field="warp.affreg"), + channel_files=dict(copyfile=False, field="channel", mandatory=True), + channel_info=dict(field="channel"), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), + mfile=dict(usedefault=True), paths=dict(), - sampling_distance=dict( - field="warp.samp", - ), - tissues=dict( - field="tissue", - ), + sampling_distance=dict(field="warp.samp"), + tissues=dict(field="tissue"), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - warping_regularization=dict( - field="warp.reg", - ), - write_deformation_fields=dict( - field="warp.write", - ), + use_v8struct=dict(min_ver="8", usedefault=True), + warping_regularization=dict(field="warp.reg"), + write_deformation_fields=dict(field="warp.write"), ) inputs = NewSegment.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize.py b/nipype/interfaces/spm/tests/test_auto_Normalize.py index caa063d923..b2c34f37e8 100644 --- a/nipype/interfaces/spm/tests/test_auto_Normalize.py +++ b/nipype/interfaces/spm/tests/test_auto_Normalize.py @@ -4,33 +4,15 @@ def test_Normalize_inputs(): input_map = dict( - DCT_period_cutoff=dict( - field="eoptions.cutoff", - ), - affine_regularization_type=dict( - field="eoptions.regtype", - ), - apply_to_files=dict( - copyfile=True, - field="subj.resample", - ), - jobtype=dict( - usedefault=True, - ), + DCT_period_cutoff=dict(field="eoptions.cutoff"), + affine_regularization_type=dict(field="eoptions.regtype"), + apply_to_files=dict(copyfile=True, field="subj.resample"), + jobtype=dict(usedefault=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - nonlinear_iterations=dict( - field="eoptions.nits", - ), - nonlinear_regularization=dict( - field="eoptions.reg", - ), - out_prefix=dict( - field="roptions.prefix", - usedefault=True, - ), + mfile=dict(usedefault=True), + nonlinear_iterations=dict(field="eoptions.nits"), + nonlinear_regularization=dict(field="eoptions.reg"), + out_prefix=dict(field="roptions.prefix", usedefault=True), parameter_file=dict( copyfile=False, extensions=None, @@ -40,19 +22,10 @@ def test_Normalize_inputs(): ), paths=dict(), source=dict( - copyfile=True, - field="subj.source", - mandatory=True, - xor=["parameter_file"], - ), - source_image_smoothing=dict( - field="eoptions.smosrc", - ), - source_weight=dict( - copyfile=False, - extensions=None, - field="subj.wtsrc", + copyfile=True, field="subj.source", mandatory=True, xor=["parameter_file"] ), + source_image_smoothing=dict(field="eoptions.smosrc"), + source_weight=dict(copyfile=False, extensions=None, field="subj.wtsrc"), template=dict( copyfile=False, extensions=None, @@ -60,34 +33,15 @@ def test_Normalize_inputs(): mandatory=True, xor=["parameter_file"], ), - template_image_smoothing=dict( - field="eoptions.smoref", - ), - template_weight=dict( - copyfile=False, - extensions=None, - field="eoptions.weight", - ), + template_image_smoothing=dict(field="eoptions.smoref"), + template_weight=dict(copyfile=False, extensions=None, field="eoptions.weight"), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - write_bounding_box=dict( - field="roptions.bb", - ), - write_interp=dict( - field="roptions.interp", - ), - write_preserve=dict( - field="roptions.preserve", - ), - write_voxel_sizes=dict( - field="roptions.vox", - ), - write_wrap=dict( - field="roptions.wrap", - ), + use_v8struct=dict(min_ver="8", usedefault=True), + write_bounding_box=dict(field="roptions.bb"), + write_interp=dict(field="roptions.interp"), + write_preserve=dict(field="roptions.preserve"), + write_voxel_sizes=dict(field="roptions.vox"), + write_wrap=dict(field="roptions.wrap"), ) inputs = Normalize.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize12.py b/nipype/interfaces/spm/tests/test_auto_Normalize12.py index 965ffafec9..c4bf661f90 100644 --- a/nipype/interfaces/spm/tests/test_auto_Normalize12.py +++ b/nipype/interfaces/spm/tests/test_auto_Normalize12.py @@ -4,19 +4,10 @@ def test_Normalize12_inputs(): input_map = dict( - affine_regularization_type=dict( - field="eoptions.affreg", - ), - apply_to_files=dict( - copyfile=True, - field="subj.resample", - ), - bias_fwhm=dict( - field="eoptions.biasfwhm", - ), - bias_regularization=dict( - field="eoptions.biasreg", - ), + affine_regularization_type=dict(field="eoptions.affreg"), + apply_to_files=dict(copyfile=True, field="subj.resample"), + bias_fwhm=dict(field="eoptions.biasfwhm"), + bias_regularization=dict(field="eoptions.biasreg"), deformation_file=dict( copyfile=False, extensions=[".hdr", ".img", ".img.gz", ".nii"], @@ -31,24 +22,13 @@ def test_Normalize12_inputs(): mandatory=True, xor=["deformation_file"], ), - jobtype=dict( - usedefault=True, - ), + jobtype=dict(usedefault=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - out_prefix=dict( - field="woptions.prefix", - usedefault=True, - ), + mfile=dict(usedefault=True), + out_prefix=dict(field="woptions.prefix", usedefault=True), paths=dict(), - sampling_distance=dict( - field="eoptions.samp", - ), - smoothness=dict( - field="eoptions.fwhm", - ), + sampling_distance=dict(field="eoptions.samp"), + smoothness=dict(field="eoptions.fwhm"), tpm=dict( copyfile=False, extensions=None, @@ -56,22 +36,11 @@ def test_Normalize12_inputs(): xor=["deformation_file"], ), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - warping_regularization=dict( - field="eoptions.reg", - ), - write_bounding_box=dict( - field="woptions.bb", - ), - write_interp=dict( - field="woptions.interp", - ), - write_voxel_sizes=dict( - field="woptions.vox", - ), + use_v8struct=dict(min_ver="8", usedefault=True), + warping_regularization=dict(field="eoptions.reg"), + write_bounding_box=dict(field="woptions.bb"), + write_interp=dict(field="woptions.interp"), + write_voxel_sizes=dict(field="woptions.vox"), ) inputs = Normalize12.input_spec() @@ -82,9 +51,7 @@ def test_Normalize12_inputs(): def test_Normalize12_outputs(): output_map = dict( - deformation_field=dict(), - normalized_files=dict(), - normalized_image=dict(), + deformation_field=dict(), normalized_files=dict(), normalized_image=dict() ) outputs = Normalize12.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py index 9b77ab0af7..7d80fea70d 100644 --- a/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py @@ -4,43 +4,25 @@ def test_OneSampleTTestDesign_inputs(): input_map = dict( - covariates=dict( - field="cov", - ), - explicit_mask_file=dict( - extensions=None, - field="masking.em", - ), + covariates=dict(field="cov"), + explicit_mask_file=dict(extensions=None, field="masking.em"), global_calc_mean=dict( - field="globalc.g_mean", - xor=["global_calc_omit", "global_calc_values"], + field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"] ), global_calc_omit=dict( - field="globalc.g_omit", - xor=["global_calc_mean", "global_calc_values"], + field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"] ), global_calc_values=dict( field="globalc.g_user.global_uval", xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict( - field="globalm.glonorm", - ), - in_files=dict( - field="des.t1.scans", - mandatory=True, - ), + global_normalization=dict(field="globalm.glonorm"), + in_files=dict(field="des.t1.scans", mandatory=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - no_grand_mean_scaling=dict( - field="globalm.gmsca.gmsca_no", - ), + mfile=dict(usedefault=True), + no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no"), paths=dict(), - spm_mat_dir=dict( - field="dir", - ), + spm_mat_dir=dict(field="dir"), threshold_mask_absolute=dict( field="masking.tm.tma.athresh", xor=["threshold_mask_none", "threshold_mask_relative"], @@ -53,14 +35,9 @@ def test_OneSampleTTestDesign_inputs(): field="masking.tm.tmr.rthresh", xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict( - field="masking.im", - ), + use_implicit_threshold=dict(field="masking.im"), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = OneSampleTTestDesign.input_spec() @@ -70,11 +47,7 @@ def test_OneSampleTTestDesign_inputs(): def test_OneSampleTTestDesign_outputs(): - output_map = dict( - spm_mat_file=dict( - extensions=None, - ), - ) + output_map = dict(spm_mat_file=dict(extensions=None)) outputs = OneSampleTTestDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py index 6be1f6ec01..7ca526ded8 100644 --- a/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py @@ -4,49 +4,27 @@ def test_PairedTTestDesign_inputs(): input_map = dict( - ancova=dict( - field="des.pt.ancova", - ), - covariates=dict( - field="cov", - ), - explicit_mask_file=dict( - extensions=None, - field="masking.em", - ), + ancova=dict(field="des.pt.ancova"), + covariates=dict(field="cov"), + explicit_mask_file=dict(extensions=None, field="masking.em"), global_calc_mean=dict( - field="globalc.g_mean", - xor=["global_calc_omit", "global_calc_values"], + field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"] ), global_calc_omit=dict( - field="globalc.g_omit", - xor=["global_calc_mean", "global_calc_values"], + field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"] ), global_calc_values=dict( field="globalc.g_user.global_uval", xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict( - field="globalm.glonorm", - ), - grand_mean_scaling=dict( - field="des.pt.gmsca", - ), + global_normalization=dict(field="globalm.glonorm"), + grand_mean_scaling=dict(field="des.pt.gmsca"), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - no_grand_mean_scaling=dict( - field="globalm.gmsca.gmsca_no", - ), - paired_files=dict( - field="des.pt.pair", - mandatory=True, - ), + mfile=dict(usedefault=True), + no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no"), + paired_files=dict(field="des.pt.pair", mandatory=True), paths=dict(), - spm_mat_dir=dict( - field="dir", - ), + spm_mat_dir=dict(field="dir"), threshold_mask_absolute=dict( field="masking.tm.tma.athresh", xor=["threshold_mask_none", "threshold_mask_relative"], @@ -59,14 +37,9 @@ def test_PairedTTestDesign_inputs(): field="masking.tm.tmr.rthresh", xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict( - field="masking.im", - ), + use_implicit_threshold=dict(field="masking.im"), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = PairedTTestDesign.input_spec() @@ -76,11 +49,7 @@ def test_PairedTTestDesign_inputs(): def test_PairedTTestDesign_outputs(): - output_map = dict( - spm_mat_file=dict( - extensions=None, - ), - ) + output_map = dict(spm_mat_file=dict(extensions=None)) outputs = PairedTTestDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Realign.py b/nipype/interfaces/spm/tests/test_auto_Realign.py index 5165d6f33e..8682769634 100644 --- a/nipype/interfaces/spm/tests/test_auto_Realign.py +++ b/nipype/interfaces/spm/tests/test_auto_Realign.py @@ -4,65 +4,25 @@ def test_Realign_inputs(): input_map = dict( - fwhm=dict( - field="eoptions.fwhm", - ), - in_files=dict( - copyfile=True, - field="data", - mandatory=True, - ), - interp=dict( - field="eoptions.interp", - ), - jobtype=dict( - usedefault=True, - ), + fwhm=dict(field="eoptions.fwhm"), + in_files=dict(copyfile=True, field="data", mandatory=True), + interp=dict(field="eoptions.interp"), + jobtype=dict(usedefault=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - out_prefix=dict( - field="roptions.prefix", - usedefault=True, - ), + mfile=dict(usedefault=True), + out_prefix=dict(field="roptions.prefix", usedefault=True), paths=dict(), - quality=dict( - field="eoptions.quality", - ), - register_to_mean=dict( - field="eoptions.rtm", - ), - separation=dict( - field="eoptions.sep", - ), + quality=dict(field="eoptions.quality"), + register_to_mean=dict(field="eoptions.rtm"), + separation=dict(field="eoptions.sep"), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - weight_img=dict( - extensions=None, - field="eoptions.weight", - ), - wrap=dict( - field="eoptions.wrap", - ), - write_interp=dict( - field="roptions.interp", - ), - write_mask=dict( - field="roptions.mask", - ), - write_which=dict( - field="roptions.which", - maxlen=2, - minlen=2, - usedefault=True, - ), - write_wrap=dict( - field="roptions.wrap", - ), + use_v8struct=dict(min_ver="8", usedefault=True), + weight_img=dict(extensions=None, field="eoptions.weight"), + wrap=dict(field="eoptions.wrap"), + write_interp=dict(field="roptions.interp"), + write_mask=dict(field="roptions.mask"), + write_which=dict(field="roptions.which", maxlen=2, minlen=2, usedefault=True), + write_wrap=dict(field="roptions.wrap"), ) inputs = Realign.input_spec() @@ -73,9 +33,7 @@ def test_Realign_inputs(): def test_Realign_outputs(): output_map = dict( - mean_image=dict( - extensions=None, - ), + mean_image=dict(extensions=None), modified_in_files=dict(), realigned_files=dict(), realignment_parameters=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py b/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py index bb27419547..20eca6653f 100644 --- a/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py +++ b/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py @@ -4,104 +4,41 @@ def test_RealignUnwarp_inputs(): input_map = dict( - est_basis_func=dict( - field="uweoptions.basfcn", - ), - est_first_order_effects=dict( - field="uweoptions.fot", - ), - est_jacobian_deformations=dict( - field="uweoptions.jm", - ), + est_basis_func=dict(field="uweoptions.basfcn"), + est_first_order_effects=dict(field="uweoptions.fot"), + est_jacobian_deformations=dict(field="uweoptions.jm"), est_num_of_iterations=dict( - field="uweoptions.noi", - maxlen=1, - minlen=1, - usedefault=True, - ), - est_re_est_mov_par=dict( - field="uweoptions.rem", + field="uweoptions.noi", maxlen=1, minlen=1, usedefault=True ), + est_re_est_mov_par=dict(field="uweoptions.rem"), est_reg_factor=dict( - field="uweoptions.lambda", - maxlen=1, - minlen=1, - usedefault=True, - ), - est_reg_order=dict( - field="uweoptions.regorder", - ), - est_second_order_effects=dict( - field="uweoptions.sot", - ), - est_taylor_expansion_point=dict( - field="uweoptions.expround", - usedefault=True, - ), - est_unwarp_fwhm=dict( - field="uweoptions.uwfwhm", - ), - fwhm=dict( - field="eoptions.fwhm", - ), - in_files=dict( - copyfile=True, - field="data.scans", - mandatory=True, - ), - interp=dict( - field="eoptions.einterp", - ), + field="uweoptions.lambda", maxlen=1, minlen=1, usedefault=True + ), + est_reg_order=dict(field="uweoptions.regorder"), + est_second_order_effects=dict(field="uweoptions.sot"), + est_taylor_expansion_point=dict(field="uweoptions.expround", usedefault=True), + est_unwarp_fwhm=dict(field="uweoptions.uwfwhm"), + fwhm=dict(field="eoptions.fwhm"), + in_files=dict(copyfile=True, field="data.scans", mandatory=True), + interp=dict(field="eoptions.einterp"), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - out_prefix=dict( - field="uwroptions.prefix", - usedefault=True, - ), + mfile=dict(usedefault=True), + out_prefix=dict(field="uwroptions.prefix", usedefault=True), paths=dict(), - phase_map=dict( - copyfile=False, - extensions=None, - field="data.pmscan", - ), - quality=dict( - field="eoptions.quality", - ), - register_to_mean=dict( - field="eoptions.rtm", - ), - reslice_interp=dict( - field="uwroptions.rinterp", - ), - reslice_mask=dict( - field="uwroptions.mask", - ), + phase_map=dict(copyfile=False, extensions=None, field="data.pmscan"), + quality=dict(field="eoptions.quality"), + register_to_mean=dict(field="eoptions.rtm"), + reslice_interp=dict(field="uwroptions.rinterp"), + reslice_mask=dict(field="uwroptions.mask"), reslice_which=dict( - field="uwroptions.uwwhich", - maxlen=2, - minlen=2, - usedefault=True, - ), - reslice_wrap=dict( - field="uwroptions.wrap", - ), - separation=dict( - field="eoptions.sep", + field="uwroptions.uwwhich", maxlen=2, minlen=2, usedefault=True ), + reslice_wrap=dict(field="uwroptions.wrap"), + separation=dict(field="eoptions.sep"), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - weight_img=dict( - extensions=None, - field="eoptions.weight", - ), - wrap=dict( - field="eoptions.ewrap", - ), + use_v8struct=dict(min_ver="8", usedefault=True), + weight_img=dict(extensions=None, field="eoptions.weight"), + wrap=dict(field="eoptions.ewrap"), ) inputs = RealignUnwarp.input_spec() @@ -112,9 +49,7 @@ def test_RealignUnwarp_inputs(): def test_RealignUnwarp_outputs(): output_map = dict( - mean_image=dict( - extensions=None, - ), + mean_image=dict(extensions=None), modified_in_files=dict(), realigned_unwarped_files=dict(), realignment_parameters=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_Reslice.py b/nipype/interfaces/spm/tests/test_auto_Reslice.py index c48d1a4b88..b3ff842fd7 100644 --- a/nipype/interfaces/spm/tests/test_auto_Reslice.py +++ b/nipype/interfaces/spm/tests/test_auto_Reslice.py @@ -4,30 +4,15 @@ def test_Reslice_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - interp=dict( - usedefault=True, - ), + in_file=dict(extensions=None, mandatory=True), + interp=dict(usedefault=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - out_file=dict( - extensions=None, - ), + mfile=dict(usedefault=True), + out_file=dict(extensions=None), paths=dict(), - space_defining=dict( - extensions=None, - mandatory=True, - ), + space_defining=dict(extensions=None, mandatory=True), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = Reslice.input_spec() @@ -37,11 +22,7 @@ def test_Reslice_inputs(): def test_Reslice_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Reslice.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py index 8bbb1d1307..bf2fe90c4e 100644 --- a/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py +++ b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py @@ -4,33 +4,16 @@ def test_ResliceToReference_inputs(): input_map = dict( - bounding_box=dict( - field="comp{2}.idbbvox.bb", - ), - in_files=dict( - field="fnames", - mandatory=True, - ), - interpolation=dict( - field="interp", - ), + bounding_box=dict(field="comp{2}.idbbvox.bb"), + in_files=dict(field="fnames", mandatory=True), + interpolation=dict(field="interp"), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), + mfile=dict(usedefault=True), paths=dict(), - target=dict( - extensions=None, - field="comp{1}.id.space", - ), + target=dict(extensions=None, field="comp{1}.id.space"), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - voxel_sizes=dict( - field="comp{2}.idbbvox.vox", - ), + use_v8struct=dict(min_ver="8", usedefault=True), + voxel_sizes=dict(field="comp{2}.idbbvox.vox"), ) inputs = ResliceToReference.input_spec() @@ -40,9 +23,7 @@ def test_ResliceToReference_inputs(): def test_ResliceToReference_outputs(): - output_map = dict( - out_files=dict(), - ) + output_map = dict(out_files=dict()) outputs = ResliceToReference.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_SPMCommand.py b/nipype/interfaces/spm/tests/test_auto_SPMCommand.py index 0c0a8d7506..3e9cecf29d 100644 --- a/nipype/interfaces/spm/tests/test_auto_SPMCommand.py +++ b/nipype/interfaces/spm/tests/test_auto_SPMCommand.py @@ -5,15 +5,10 @@ def test_SPMCommand_inputs(): input_map = dict( matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), + mfile=dict(usedefault=True), paths=dict(), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = SPMCommand.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Segment.py b/nipype/interfaces/spm/tests/test_auto_Segment.py index 4859c76a00..b498e1553a 100644 --- a/nipype/interfaces/spm/tests/test_auto_Segment.py +++ b/nipype/interfaces/spm/tests/test_auto_Segment.py @@ -4,64 +4,26 @@ def test_Segment_inputs(): input_map = dict( - affine_regularization=dict( - field="opts.regtype", - ), - bias_fwhm=dict( - field="opts.biasfwhm", - ), - bias_regularization=dict( - field="opts.biasreg", - ), - clean_masks=dict( - field="output.cleanup", - ), - csf_output_type=dict( - field="output.CSF", - ), - data=dict( - copyfile=False, - field="data", - mandatory=True, - ), - gaussians_per_class=dict( - field="opts.ngaus", - ), - gm_output_type=dict( - field="output.GM", - ), - mask_image=dict( - extensions=None, - field="opts.msk", - ), + affine_regularization=dict(field="opts.regtype"), + bias_fwhm=dict(field="opts.biasfwhm"), + bias_regularization=dict(field="opts.biasreg"), + clean_masks=dict(field="output.cleanup"), + csf_output_type=dict(field="output.CSF"), + data=dict(copyfile=False, field="data", mandatory=True), + gaussians_per_class=dict(field="opts.ngaus"), + gm_output_type=dict(field="output.GM"), + mask_image=dict(extensions=None, field="opts.msk"), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), + mfile=dict(usedefault=True), paths=dict(), - sampling_distance=dict( - field="opts.samp", - ), - save_bias_corrected=dict( - field="output.biascor", - ), - tissue_prob_maps=dict( - field="opts.tpm", - ), + sampling_distance=dict(field="opts.samp"), + save_bias_corrected=dict(field="output.biascor"), + tissue_prob_maps=dict(field="opts.tpm"), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - warp_frequency_cutoff=dict( - field="opts.warpco", - ), - warping_regularization=dict( - field="opts.warpreg", - ), - wm_output_type=dict( - field="output.WM", - ), + use_v8struct=dict(min_ver="8", usedefault=True), + warp_frequency_cutoff=dict(field="opts.warpco"), + warping_regularization=dict(field="opts.warpreg"), + wm_output_type=dict(field="output.WM"), ) inputs = Segment.input_spec() @@ -72,47 +34,21 @@ def test_Segment_inputs(): def test_Segment_outputs(): output_map = dict( - bias_corrected_image=dict( - extensions=None, - ), - inverse_transformation_mat=dict( - extensions=None, - ), - modulated_csf_image=dict( - extensions=None, - ), - modulated_gm_image=dict( - extensions=None, - ), + bias_corrected_image=dict(extensions=None), + inverse_transformation_mat=dict(extensions=None), + modulated_csf_image=dict(extensions=None), + modulated_gm_image=dict(extensions=None), modulated_input_image=dict( - deprecated="0.10", - extensions=None, - new_name="bias_corrected_image", - ), - modulated_wm_image=dict( - extensions=None, - ), - native_csf_image=dict( - extensions=None, - ), - native_gm_image=dict( - extensions=None, - ), - native_wm_image=dict( - extensions=None, - ), - normalized_csf_image=dict( - extensions=None, - ), - normalized_gm_image=dict( - extensions=None, - ), - normalized_wm_image=dict( - extensions=None, - ), - transformation_mat=dict( - extensions=None, - ), + deprecated="0.10", extensions=None, new_name="bias_corrected_image" + ), + modulated_wm_image=dict(extensions=None), + native_csf_image=dict(extensions=None), + native_gm_image=dict(extensions=None), + native_wm_image=dict(extensions=None), + normalized_csf_image=dict(extensions=None), + normalized_gm_image=dict(extensions=None), + normalized_wm_image=dict(extensions=None), + transformation_mat=dict(extensions=None), ) outputs = Segment.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_SliceTiming.py b/nipype/interfaces/spm/tests/test_auto_SliceTiming.py index 85ddf03c52..a9cba8fcdc 100644 --- a/nipype/interfaces/spm/tests/test_auto_SliceTiming.py +++ b/nipype/interfaces/spm/tests/test_auto_SliceTiming.py @@ -4,45 +4,18 @@ def test_SliceTiming_inputs(): input_map = dict( - in_files=dict( - copyfile=False, - field="scans", - mandatory=True, - ), + in_files=dict(copyfile=False, field="scans", mandatory=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - num_slices=dict( - field="nslices", - mandatory=True, - ), - out_prefix=dict( - field="prefix", - usedefault=True, - ), + mfile=dict(usedefault=True), + num_slices=dict(field="nslices", mandatory=True), + out_prefix=dict(field="prefix", usedefault=True), paths=dict(), - ref_slice=dict( - field="refslice", - mandatory=True, - ), - slice_order=dict( - field="so", - mandatory=True, - ), - time_acquisition=dict( - field="ta", - mandatory=True, - ), - time_repetition=dict( - field="tr", - mandatory=True, - ), + ref_slice=dict(field="refslice", mandatory=True), + slice_order=dict(field="so", mandatory=True), + time_acquisition=dict(field="ta", mandatory=True), + time_repetition=dict(field="tr", mandatory=True), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = SliceTiming.input_spec() @@ -52,9 +25,7 @@ def test_SliceTiming_inputs(): def test_SliceTiming_outputs(): - output_map = dict( - timecorrected_files=dict(), - ) + output_map = dict(timecorrected_files=dict()) outputs = SliceTiming.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Smooth.py b/nipype/interfaces/spm/tests/test_auto_Smooth.py index 5ed7aa57c0..05512ade20 100644 --- a/nipype/interfaces/spm/tests/test_auto_Smooth.py +++ b/nipype/interfaces/spm/tests/test_auto_Smooth.py @@ -4,34 +4,16 @@ def test_Smooth_inputs(): input_map = dict( - data_type=dict( - field="dtype", - ), - fwhm=dict( - field="fwhm", - ), - implicit_masking=dict( - field="im", - ), - in_files=dict( - copyfile=False, - field="data", - mandatory=True, - ), + data_type=dict(field="dtype"), + fwhm=dict(field="fwhm"), + implicit_masking=dict(field="im"), + in_files=dict(copyfile=False, field="data", mandatory=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - out_prefix=dict( - field="prefix", - usedefault=True, - ), + mfile=dict(usedefault=True), + out_prefix=dict(field="prefix", usedefault=True), paths=dict(), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = Smooth.input_spec() @@ -41,9 +23,7 @@ def test_Smooth_inputs(): def test_Smooth_outputs(): - output_map = dict( - smoothed_files=dict(), - ) + output_map = dict(smoothed_files=dict()) outputs = Smooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Threshold.py b/nipype/interfaces/spm/tests/test_auto_Threshold.py index 470ae7f2d1..93bb95ac6f 100644 --- a/nipype/interfaces/spm/tests/test_auto_Threshold.py +++ b/nipype/interfaces/spm/tests/test_auto_Threshold.py @@ -4,50 +4,21 @@ def test_Threshold_inputs(): input_map = dict( - contrast_index=dict( - mandatory=True, - ), - extent_fdr_p_threshold=dict( - usedefault=True, - ), - extent_threshold=dict( - usedefault=True, - ), - force_activation=dict( - usedefault=True, - ), - height_threshold=dict( - usedefault=True, - ), - height_threshold_type=dict( - usedefault=True, - ), + contrast_index=dict(mandatory=True), + extent_fdr_p_threshold=dict(usedefault=True), + extent_threshold=dict(usedefault=True), + force_activation=dict(usedefault=True), + height_threshold=dict(usedefault=True), + height_threshold_type=dict(usedefault=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), + mfile=dict(usedefault=True), paths=dict(), - spm_mat_file=dict( - copyfile=True, - extensions=None, - mandatory=True, - ), - stat_image=dict( - copyfile=False, - extensions=None, - mandatory=True, - ), - use_fwe_correction=dict( - usedefault=True, - ), + spm_mat_file=dict(copyfile=True, extensions=None, mandatory=True), + stat_image=dict(copyfile=False, extensions=None, mandatory=True), + use_fwe_correction=dict(usedefault=True), use_mcr=dict(), - use_topo_fdr=dict( - usedefault=True, - ), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_topo_fdr=dict(usedefault=True), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = Threshold.input_spec() @@ -61,13 +32,9 @@ def test_Threshold_outputs(): activation_forced=dict(), cluster_forming_thr=dict(), n_clusters=dict(), - pre_topo_fdr_map=dict( - extensions=None, - ), + pre_topo_fdr_map=dict(extensions=None), pre_topo_n_clusters=dict(), - thresholded_map=dict( - extensions=None, - ), + thresholded_map=dict(extensions=None), ) outputs = Threshold.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py index 89c5a42e57..6ee8622b4d 100644 --- a/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py +++ b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py @@ -4,35 +4,16 @@ def test_ThresholdStatistics_inputs(): input_map = dict( - contrast_index=dict( - mandatory=True, - ), - extent_threshold=dict( - usedefault=True, - ), - height_threshold=dict( - mandatory=True, - ), + contrast_index=dict(mandatory=True), + extent_threshold=dict(usedefault=True), + height_threshold=dict(mandatory=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), + mfile=dict(usedefault=True), paths=dict(), - spm_mat_file=dict( - copyfile=True, - extensions=None, - mandatory=True, - ), - stat_image=dict( - copyfile=False, - extensions=None, - mandatory=True, - ), + spm_mat_file=dict(copyfile=True, extensions=None, mandatory=True), + stat_image=dict(copyfile=False, extensions=None, mandatory=True), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = ThresholdStatistics.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py index fd03e6c867..40760ba75d 100644 --- a/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py @@ -4,50 +4,27 @@ def test_TwoSampleTTestDesign_inputs(): input_map = dict( - covariates=dict( - field="cov", - ), - dependent=dict( - field="des.t2.dept", - ), - explicit_mask_file=dict( - extensions=None, - field="masking.em", - ), + covariates=dict(field="cov"), + dependent=dict(field="des.t2.dept"), + explicit_mask_file=dict(extensions=None, field="masking.em"), global_calc_mean=dict( - field="globalc.g_mean", - xor=["global_calc_omit", "global_calc_values"], + field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"] ), global_calc_omit=dict( - field="globalc.g_omit", - xor=["global_calc_mean", "global_calc_values"], + field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"] ), global_calc_values=dict( field="globalc.g_user.global_uval", xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict( - field="globalm.glonorm", - ), - group1_files=dict( - field="des.t2.scans1", - mandatory=True, - ), - group2_files=dict( - field="des.t2.scans2", - mandatory=True, - ), + global_normalization=dict(field="globalm.glonorm"), + group1_files=dict(field="des.t2.scans1", mandatory=True), + group2_files=dict(field="des.t2.scans2", mandatory=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - no_grand_mean_scaling=dict( - field="globalm.gmsca.gmsca_no", - ), + mfile=dict(usedefault=True), + no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no"), paths=dict(), - spm_mat_dir=dict( - field="dir", - ), + spm_mat_dir=dict(field="dir"), threshold_mask_absolute=dict( field="masking.tm.tma.athresh", xor=["threshold_mask_none", "threshold_mask_relative"], @@ -60,17 +37,10 @@ def test_TwoSampleTTestDesign_inputs(): field="masking.tm.tmr.rthresh", xor=["threshold_mask_absolute", "threshold_mask_none"], ), - unequal_variance=dict( - field="des.t2.variance", - ), - use_implicit_threshold=dict( - field="masking.im", - ), + unequal_variance=dict(field="des.t2.variance"), + use_implicit_threshold=dict(field="masking.im"), use_mcr=dict(), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), + use_v8struct=dict(min_ver="8", usedefault=True), ) inputs = TwoSampleTTestDesign.input_spec() @@ -80,11 +50,7 @@ def test_TwoSampleTTestDesign_inputs(): def test_TwoSampleTTestDesign_outputs(): - output_map = dict( - spm_mat_file=dict( - extensions=None, - ), - ) + output_map = dict(spm_mat_file=dict(extensions=None)) outputs = TwoSampleTTestDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_VBMSegment.py b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py index 4bc4664c27..1c57968462 100644 --- a/nipype/interfaces/spm/tests/test_auto_VBMSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py @@ -5,146 +5,64 @@ def test_VBMSegment_inputs(): input_map = dict( bias_corrected_affine=dict( - field="estwrite.output.bias.affine", - usedefault=True, + field="estwrite.output.bias.affine", usedefault=True ), bias_corrected_native=dict( - field="estwrite.output.bias.native", - usedefault=True, + field="estwrite.output.bias.native", usedefault=True ), bias_corrected_normalized=dict( - field="estwrite.output.bias.warped", - usedefault=True, - ), - bias_fwhm=dict( - field="estwrite.opts.biasfwhm", - usedefault=True, - ), - bias_regularization=dict( - field="estwrite.opts.biasreg", - usedefault=True, - ), - cleanup_partitions=dict( - field="estwrite.extopts.cleanup", - usedefault=True, - ), - csf_dartel=dict( - field="estwrite.output.CSF.dartel", - usedefault=True, + field="estwrite.output.bias.warped", usedefault=True ), + bias_fwhm=dict(field="estwrite.opts.biasfwhm", usedefault=True), + bias_regularization=dict(field="estwrite.opts.biasreg", usedefault=True), + cleanup_partitions=dict(field="estwrite.extopts.cleanup", usedefault=True), + csf_dartel=dict(field="estwrite.output.CSF.dartel", usedefault=True), csf_modulated_normalized=dict( - field="estwrite.output.CSF.modulated", - usedefault=True, - ), - csf_native=dict( - field="estwrite.output.CSF.native", - usedefault=True, - ), - csf_normalized=dict( - field="estwrite.output.CSF.warped", - usedefault=True, + field="estwrite.output.CSF.modulated", usedefault=True ), + csf_native=dict(field="estwrite.output.CSF.native", usedefault=True), + csf_normalized=dict(field="estwrite.output.CSF.warped", usedefault=True), dartel_template=dict( extensions=[".hdr", ".img", ".img.gz", ".nii"], field="estwrite.extopts.dartelwarp.normhigh.darteltpm", ), - deformation_field=dict( - field="estwrite.output.warps", - usedefault=True, - ), - display_results=dict( - field="estwrite.extopts.print", - usedefault=True, - ), - gaussians_per_class=dict( - usedefault=True, - ), - gm_dartel=dict( - field="estwrite.output.GM.dartel", - usedefault=True, - ), + deformation_field=dict(field="estwrite.output.warps", usedefault=True), + display_results=dict(field="estwrite.extopts.print", usedefault=True), + gaussians_per_class=dict(usedefault=True), + gm_dartel=dict(field="estwrite.output.GM.dartel", usedefault=True), gm_modulated_normalized=dict( - field="estwrite.output.GM.modulated", - usedefault=True, - ), - gm_native=dict( - field="estwrite.output.GM.native", - usedefault=True, - ), - gm_normalized=dict( - field="estwrite.output.GM.warped", - usedefault=True, - ), - in_files=dict( - copyfile=False, - field="estwrite.data", - mandatory=True, - ), - jacobian_determinant=dict( - field="estwrite.jacobian.warped", - usedefault=True, + field="estwrite.output.GM.modulated", usedefault=True ), + gm_native=dict(field="estwrite.output.GM.native", usedefault=True), + gm_normalized=dict(field="estwrite.output.GM.warped", usedefault=True), + in_files=dict(copyfile=False, field="estwrite.data", mandatory=True), + jacobian_determinant=dict(field="estwrite.jacobian.warped", usedefault=True), matlab_cmd=dict(), - mfile=dict( - usedefault=True, - ), - mrf_weighting=dict( - field="estwrite.extopts.mrf", - usedefault=True, - ), + mfile=dict(usedefault=True), + mrf_weighting=dict(field="estwrite.extopts.mrf", usedefault=True), paths=dict(), - pve_label_dartel=dict( - field="estwrite.output.label.dartel", - usedefault=True, - ), - pve_label_native=dict( - field="estwrite.output.label.native", - usedefault=True, - ), + pve_label_dartel=dict(field="estwrite.output.label.dartel", usedefault=True), + pve_label_native=dict(field="estwrite.output.label.native", usedefault=True), pve_label_normalized=dict( - field="estwrite.output.label.warped", - usedefault=True, - ), - sampling_distance=dict( - field="estwrite.opts.samp", - usedefault=True, - ), - spatial_normalization=dict( - usedefault=True, + field="estwrite.output.label.warped", usedefault=True ), + sampling_distance=dict(field="estwrite.opts.samp", usedefault=True), + spatial_normalization=dict(usedefault=True), tissues=dict( - extensions=[".hdr", ".img", ".img.gz", ".nii"], - field="estwrite.tpm", + extensions=[".hdr", ".img", ".img.gz", ".nii"], field="estwrite.tpm" ), use_mcr=dict(), use_sanlm_denoising_filter=dict( - field="estwrite.extopts.sanlm", - usedefault=True, - ), - use_v8struct=dict( - min_ver="8", - usedefault=True, - ), - warping_regularization=dict( - field="estwrite.opts.warpreg", - usedefault=True, - ), - wm_dartel=dict( - field="estwrite.output.WM.dartel", - usedefault=True, + field="estwrite.extopts.sanlm", usedefault=True ), + use_v8struct=dict(min_ver="8", usedefault=True), + warping_regularization=dict(field="estwrite.opts.warpreg", usedefault=True), + wm_dartel=dict(field="estwrite.output.WM.dartel", usedefault=True), wm_modulated_normalized=dict( - field="estwrite.output.WM.modulated", - usedefault=True, - ), - wm_native=dict( - field="estwrite.output.WM.native", - usedefault=True, - ), - wm_normalized=dict( - field="estwrite.output.WM.warped", - usedefault=True, + field="estwrite.output.WM.modulated", usedefault=True ), + wm_native=dict(field="estwrite.output.WM.native", usedefault=True), + wm_normalized=dict(field="estwrite.output.WM.warped", usedefault=True), ) inputs = VBMSegment.input_spec() diff --git a/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py index dd7d45e42b..74f9e7312c 100644 --- a/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py +++ b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py @@ -4,18 +4,11 @@ def test_BIDSDataGrabber_inputs(): input_map = dict( - base_dir=dict( - mandatory=True, - ), + base_dir=dict(mandatory=True), extra_derivatives=dict(), - index_derivatives=dict( - mandatory=True, - usedefault=True, - ), + index_derivatives=dict(mandatory=True, usedefault=True), output_query=dict(), - raise_on_empty=dict( - usedefault=True, - ), + raise_on_empty=dict(usedefault=True), ) inputs = BIDSDataGrabber.input_spec() diff --git a/nipype/interfaces/tests/test_auto_Bru2.py b/nipype/interfaces/tests/test_auto_Bru2.py index 7935d2fc97..8537020701 100644 --- a/nipype/interfaces/tests/test_auto_Bru2.py +++ b/nipype/interfaces/tests/test_auto_Bru2.py @@ -4,34 +4,14 @@ def test_Bru2_inputs(): input_map = dict( - actual_size=dict( - argstr="-a", - ), - append_protocol_name=dict( - argstr="-p", - ), - args=dict( - argstr="%s", - ), - compress=dict( - argstr="-z", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - force_conversion=dict( - argstr="-f", - ), - input_dir=dict( - argstr="%s", - mandatory=True, - position=-1, - ), - output_filename=dict( - argstr="-o %s", - genfile=True, - ), + actual_size=dict(argstr="-a"), + append_protocol_name=dict(argstr="-p"), + args=dict(argstr="%s"), + compress=dict(argstr="-z"), + environ=dict(nohash=True, usedefault=True), + force_conversion=dict(argstr="-f"), + input_dir=dict(argstr="%s", mandatory=True, position=-1), + output_filename=dict(argstr="-o %s", genfile=True), ) inputs = Bru2.input_spec() @@ -41,11 +21,7 @@ def test_Bru2_inputs(): def test_Bru2_outputs(): - output_map = dict( - nii_file=dict( - extensions=None, - ), - ) + output_map = dict(nii_file=dict(extensions=None)) outputs = Bru2.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_C3d.py b/nipype/interfaces/tests/test_auto_C3d.py index d4d26e8264..8d0a473d02 100644 --- a/nipype/interfaces/tests/test_auto_C3d.py +++ b/nipype/interfaces/tests/test_auto_C3d.py @@ -4,55 +4,19 @@ def test_C3d_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - mandatory=True, - position=1, - ), - interp=dict( - argstr="-interpolation %s", - ), - is_4d=dict( - usedefault=True, - ), - multicomp_split=dict( - argstr="-mcr", - position=0, - usedefault=True, - ), - out_file=dict( - argstr="-o %s", - extensions=None, - position=-1, - xor=["out_files"], - ), - out_files=dict( - argstr="-oo %s", - position=-1, - xor=["out_file"], - ), - pix_type=dict( - argstr="-type %s", - ), - resample=dict( - argstr="-resample %s", - ), - scale=dict( - argstr="-scale %s", - ), - shift=dict( - argstr="-shift %s", - ), - smooth=dict( - argstr="-smooth %s", - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", mandatory=True, position=1), + interp=dict(argstr="-interpolation %s"), + is_4d=dict(usedefault=True), + multicomp_split=dict(argstr="-mcr", position=0, usedefault=True), + out_file=dict(argstr="-o %s", extensions=None, position=-1, xor=["out_files"]), + out_files=dict(argstr="-oo %s", position=-1, xor=["out_file"]), + pix_type=dict(argstr="-type %s"), + resample=dict(argstr="-resample %s"), + scale=dict(argstr="-scale %s"), + shift=dict(argstr="-shift %s"), + smooth=dict(argstr="-smooth %s"), ) inputs = C3d.input_spec() @@ -62,9 +26,7 @@ def test_C3d_inputs(): def test_C3d_outputs(): - output_map = dict( - out_files=dict(), - ) + output_map = dict(out_files=dict()) outputs = C3d.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_C3dAffineTool.py b/nipype/interfaces/tests/test_auto_C3dAffineTool.py index 153f6090a7..851a354bd3 100644 --- a/nipype/interfaces/tests/test_auto_C3dAffineTool.py +++ b/nipype/interfaces/tests/test_auto_C3dAffineTool.py @@ -4,37 +4,13 @@ def test_C3dAffineTool_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fsl2ras=dict( - argstr="-fsl2ras", - position=4, - ), - itk_transform=dict( - argstr="-oitk %s", - hash_files=False, - position=5, - ), - reference_file=dict( - argstr="-ref %s", - extensions=None, - position=1, - ), - source_file=dict( - argstr="-src %s", - extensions=None, - position=2, - ), - transform_file=dict( - argstr="%s", - extensions=None, - position=3, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + fsl2ras=dict(argstr="-fsl2ras", position=4), + itk_transform=dict(argstr="-oitk %s", hash_files=False, position=5), + reference_file=dict(argstr="-ref %s", extensions=None, position=1), + source_file=dict(argstr="-src %s", extensions=None, position=2), + transform_file=dict(argstr="%s", extensions=None, position=3), ) inputs = C3dAffineTool.input_spec() @@ -44,11 +20,7 @@ def test_C3dAffineTool_inputs(): def test_C3dAffineTool_outputs(): - output_map = dict( - itk_transform=dict( - extensions=None, - ), - ) + output_map = dict(itk_transform=dict(extensions=None)) outputs = C3dAffineTool.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_CopyMeta.py b/nipype/interfaces/tests/test_auto_CopyMeta.py index f7a554226e..aa7fe10301 100644 --- a/nipype/interfaces/tests/test_auto_CopyMeta.py +++ b/nipype/interfaces/tests/test_auto_CopyMeta.py @@ -4,16 +4,10 @@ def test_CopyMeta_inputs(): input_map = dict( - dest_file=dict( - extensions=None, - mandatory=True, - ), + dest_file=dict(extensions=None, mandatory=True), exclude_classes=dict(), include_classes=dict(), - src_file=dict( - extensions=None, - mandatory=True, - ), + src_file=dict(extensions=None, mandatory=True), ) inputs = CopyMeta.input_spec() @@ -23,11 +17,7 @@ def test_CopyMeta_inputs(): def test_CopyMeta_outputs(): - output_map = dict( - dest_file=dict( - extensions=None, - ), - ) + output_map = dict(dest_file=dict(extensions=None)) outputs = CopyMeta.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_DataFinder.py b/nipype/interfaces/tests/test_auto_DataFinder.py index e1f24b3517..18980655cf 100644 --- a/nipype/interfaces/tests/test_auto_DataFinder.py +++ b/nipype/interfaces/tests/test_auto_DataFinder.py @@ -5,17 +5,11 @@ def test_DataFinder_inputs(): input_map = dict( ignore_regexes=dict(), - match_regex=dict( - usedefault=True, - ), + match_regex=dict(usedefault=True), max_depth=dict(), min_depth=dict(), - root_paths=dict( - mandatory=True, - ), - unpack_single=dict( - usedefault=True, - ), + root_paths=dict(mandatory=True), + unpack_single=dict(usedefault=True), ) inputs = DataFinder.input_spec() diff --git a/nipype/interfaces/tests/test_auto_DataGrabber.py b/nipype/interfaces/tests/test_auto_DataGrabber.py index a58e4dd434..d2697f537d 100644 --- a/nipype/interfaces/tests/test_auto_DataGrabber.py +++ b/nipype/interfaces/tests/test_auto_DataGrabber.py @@ -5,18 +5,10 @@ def test_DataGrabber_inputs(): input_map = dict( base_directory=dict(), - drop_blank_outputs=dict( - usedefault=True, - ), - raise_on_empty=dict( - usedefault=True, - ), - sort_filelist=dict( - mandatory=True, - ), - template=dict( - mandatory=True, - ), + drop_blank_outputs=dict(usedefault=True), + raise_on_empty=dict(usedefault=True), + sort_filelist=dict(mandatory=True), + template=dict(mandatory=True), template_args=dict(), ) inputs = DataGrabber.input_spec() diff --git a/nipype/interfaces/tests/test_auto_DataSink.py b/nipype/interfaces/tests/test_auto_DataSink.py index 1ce4183b70..84bf211231 100644 --- a/nipype/interfaces/tests/test_auto_DataSink.py +++ b/nipype/interfaces/tests/test_auto_DataSink.py @@ -4,22 +4,16 @@ def test_DataSink_inputs(): input_map = dict( - _outputs=dict( - usedefault=True, - ), + _outputs=dict(usedefault=True), base_directory=dict(), bucket=dict(), container=dict(), creds_path=dict(), encrypt_bucket_keys=dict(), local_copy=dict(), - parameterization=dict( - usedefault=True, - ), + parameterization=dict(usedefault=True), regexp_substitutions=dict(), - remove_dest_dir=dict( - usedefault=True, - ), + remove_dest_dir=dict(usedefault=True), strip_dir=dict(), substitutions=dict(), ) @@ -31,9 +25,7 @@ def test_DataSink_inputs(): def test_DataSink_outputs(): - output_map = dict( - out_file=dict(), - ) + output_map = dict(out_file=dict()) outputs = DataSink.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_Dcm2nii.py b/nipype/interfaces/tests/test_auto_Dcm2nii.py index 948aafa083..c45c4b71fb 100644 --- a/nipype/interfaces/tests/test_auto_Dcm2nii.py +++ b/nipype/interfaces/tests/test_auto_Dcm2nii.py @@ -4,86 +4,27 @@ def test_Dcm2nii_inputs(): input_map = dict( - anonymize=dict( - argstr="-a", - usedefault=True, - ), - args=dict( - argstr="%s", - ), - collapse_folders=dict( - argstr="-c", - usedefault=True, - ), - config_file=dict( - argstr="-b %s", - extensions=None, - genfile=True, - ), - convert_all_pars=dict( - argstr="-v", - usedefault=True, - ), - date_in_filename=dict( - argstr="-d", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - events_in_filename=dict( - argstr="-e", - usedefault=True, - ), - gzip_output=dict( - argstr="-g", - usedefault=True, - ), - id_in_filename=dict( - argstr="-i", - usedefault=True, - ), - nii_output=dict( - argstr="-n", - usedefault=True, - ), - output_dir=dict( - argstr="-o %s", - genfile=True, - ), - protocol_in_filename=dict( - argstr="-p", - usedefault=True, - ), - reorient=dict( - argstr="-r", - ), - reorient_and_crop=dict( - argstr="-x", - usedefault=True, - ), - source_dir=dict( - argstr="%s", - mandatory=True, - position=-1, - xor=["source_names"], - ), - source_in_filename=dict( - argstr="-f", - usedefault=True, - ), + anonymize=dict(argstr="-a", usedefault=True), + args=dict(argstr="%s"), + collapse_folders=dict(argstr="-c", usedefault=True), + config_file=dict(argstr="-b %s", extensions=None, genfile=True), + convert_all_pars=dict(argstr="-v", usedefault=True), + date_in_filename=dict(argstr="-d", usedefault=True), + environ=dict(nohash=True, usedefault=True), + events_in_filename=dict(argstr="-e", usedefault=True), + gzip_output=dict(argstr="-g", usedefault=True), + id_in_filename=dict(argstr="-i", usedefault=True), + nii_output=dict(argstr="-n", usedefault=True), + output_dir=dict(argstr="-o %s", genfile=True), + protocol_in_filename=dict(argstr="-p", usedefault=True), + reorient=dict(argstr="-r"), + reorient_and_crop=dict(argstr="-x", usedefault=True), + source_dir=dict(argstr="%s", mandatory=True, position=-1, xor=["source_names"]), + source_in_filename=dict(argstr="-f", usedefault=True), source_names=dict( - argstr="%s", - copyfile=False, - mandatory=True, - position=-1, - xor=["source_dir"], - ), - spm_analyze=dict( - argstr="-s", - xor=["nii_output"], + argstr="%s", copyfile=False, mandatory=True, position=-1, xor=["source_dir"] ), + spm_analyze=dict(argstr="-s", xor=["nii_output"]), ) inputs = Dcm2nii.input_spec() diff --git a/nipype/interfaces/tests/test_auto_Dcm2niix.py b/nipype/interfaces/tests/test_auto_Dcm2niix.py index dec95abcff..918f839faf 100644 --- a/nipype/interfaces/tests/test_auto_Dcm2niix.py +++ b/nipype/interfaces/tests/test_auto_Dcm2niix.py @@ -4,83 +4,28 @@ def test_Dcm2niix_inputs(): input_map = dict( - anon_bids=dict( - argstr="-ba", - requires=["bids_format"], - ), - args=dict( - argstr="%s", - ), - bids_format=dict( - argstr="-b", - usedefault=True, - ), - comment=dict( - argstr="-c %s", - ), - compress=dict( - argstr="-z %s", - usedefault=True, - ), - compression=dict( - argstr="-%d", - ), - crop=dict( - argstr="-x", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - has_private=dict( - argstr="-t", - usedefault=True, - ), - ignore_deriv=dict( - argstr="-i", - ), - merge_imgs=dict( - argstr="-m", - usedefault=True, - ), - out_filename=dict( - argstr="-f %s", - ), - output_dir=dict( - argstr="-o %s", - usedefault=True, - ), - philips_float=dict( - argstr="-p", - ), - series_numbers=dict( - argstr="-n %s...", - ), - single_file=dict( - argstr="-s", - usedefault=True, - ), - source_dir=dict( - argstr="%s", - mandatory=True, - position=-1, - xor=["source_names"], - ), + anon_bids=dict(argstr="-ba", requires=["bids_format"]), + args=dict(argstr="%s"), + bids_format=dict(argstr="-b", usedefault=True), + comment=dict(argstr="-c %s"), + compress=dict(argstr="-z %s", usedefault=True), + compression=dict(argstr="-%d"), + crop=dict(argstr="-x", usedefault=True), + environ=dict(nohash=True, usedefault=True), + has_private=dict(argstr="-t", usedefault=True), + ignore_deriv=dict(argstr="-i"), + merge_imgs=dict(argstr="-m", usedefault=True), + out_filename=dict(argstr="-f %s"), + output_dir=dict(argstr="-o %s", usedefault=True), + philips_float=dict(argstr="-p"), + series_numbers=dict(argstr="-n %s..."), + single_file=dict(argstr="-s", usedefault=True), + source_dir=dict(argstr="%s", mandatory=True, position=-1, xor=["source_names"]), source_names=dict( - argstr="%s", - copyfile=False, - mandatory=True, - position=-1, - xor=["source_dir"], - ), - to_nrrd=dict( - argstr="-e", - ), - verbose=dict( - argstr="-v", - usedefault=True, + argstr="%s", copyfile=False, mandatory=True, position=-1, xor=["source_dir"] ), + to_nrrd=dict(argstr="-e"), + verbose=dict(argstr="-v", usedefault=True), ) inputs = Dcm2niix.input_spec() @@ -90,12 +35,7 @@ def test_Dcm2niix_inputs(): def test_Dcm2niix_outputs(): - output_map = dict( - bids=dict(), - bvals=dict(), - bvecs=dict(), - converted_files=dict(), - ) + output_map = dict(bids=dict(), bvals=dict(), bvecs=dict(), converted_files=dict()) outputs = Dcm2niix.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_DcmStack.py b/nipype/interfaces/tests/test_auto_DcmStack.py index 02bf268904..b4e9ac72e8 100644 --- a/nipype/interfaces/tests/test_auto_DcmStack.py +++ b/nipype/interfaces/tests/test_auto_DcmStack.py @@ -4,18 +4,12 @@ def test_DcmStack_inputs(): input_map = dict( - dicom_files=dict( - mandatory=True, - ), + dicom_files=dict(mandatory=True), embed_meta=dict(), exclude_regexes=dict(), - force_read=dict( - usedefault=True, - ), + force_read=dict(usedefault=True), include_regexes=dict(), - out_ext=dict( - usedefault=True, - ), + out_ext=dict(usedefault=True), out_format=dict(), out_path=dict(), ) @@ -27,11 +21,7 @@ def test_DcmStack_inputs(): def test_DcmStack_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = DcmStack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_ExportFile.py b/nipype/interfaces/tests/test_auto_ExportFile.py index adac52e161..25b60efd23 100644 --- a/nipype/interfaces/tests/test_auto_ExportFile.py +++ b/nipype/interfaces/tests/test_auto_ExportFile.py @@ -4,18 +4,10 @@ def test_ExportFile_inputs(): input_map = dict( - check_extension=dict( - usedefault=True, - ), + check_extension=dict(usedefault=True), clobber=dict(), - in_file=dict( - extensions=None, - mandatory=True, - ), - out_file=dict( - extensions=None, - mandatory=True, - ), + in_file=dict(extensions=None, mandatory=True), + out_file=dict(extensions=None, mandatory=True), ) inputs = ExportFile.input_spec() @@ -25,11 +17,7 @@ def test_ExportFile_inputs(): def test_ExportFile_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = ExportFile.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_FreeSurferSource.py b/nipype/interfaces/tests/test_auto_FreeSurferSource.py index 875380f75f..e645054392 100644 --- a/nipype/interfaces/tests/test_auto_FreeSurferSource.py +++ b/nipype/interfaces/tests/test_auto_FreeSurferSource.py @@ -4,15 +4,9 @@ def test_FreeSurferSource_inputs(): input_map = dict( - hemi=dict( - usedefault=True, - ), - subject_id=dict( - mandatory=True, - ), - subjects_dir=dict( - mandatory=True, - ), + hemi=dict(usedefault=True), + subject_id=dict(mandatory=True), + subjects_dir=dict(mandatory=True), ) inputs = FreeSurferSource.input_spec() @@ -23,143 +17,43 @@ def test_FreeSurferSource_inputs(): def test_FreeSurferSource_outputs(): output_map = dict( - BA_stats=dict( - altkey="BA", - loc="stats", - ), - T1=dict( - extensions=None, - loc="mri", - ), - annot=dict( - altkey="*annot", - loc="label", - ), - aparc_a2009s_stats=dict( - altkey="aparc.a2009s", - loc="stats", - ), - aparc_aseg=dict( - altkey="aparc*aseg", - loc="mri", - ), - aparc_stats=dict( - altkey="aparc", - loc="stats", - ), - area_pial=dict( - altkey="area.pial", - loc="surf", - ), - aseg=dict( - extensions=None, - loc="mri", - ), - aseg_stats=dict( - altkey="aseg", - loc="stats", - ), - avg_curv=dict( - loc="surf", - ), - brain=dict( - extensions=None, - loc="mri", - ), - brainmask=dict( - extensions=None, - loc="mri", - ), - curv=dict( - loc="surf", - ), - curv_pial=dict( - altkey="curv.pial", - loc="surf", - ), - curv_stats=dict( - altkey="curv", - loc="stats", - ), - entorhinal_exvivo_stats=dict( - altkey="entorhinal_exvivo", - loc="stats", - ), - filled=dict( - extensions=None, - loc="mri", - ), - graymid=dict( - altkey=["graymid", "midthickness"], - loc="surf", - ), - inflated=dict( - loc="surf", - ), - jacobian_white=dict( - loc="surf", - ), - label=dict( - altkey="*label", - loc="label", - ), - norm=dict( - extensions=None, - loc="mri", - ), - nu=dict( - extensions=None, - loc="mri", - ), - orig=dict( - extensions=None, - loc="mri", - ), - pial=dict( - loc="surf", - ), - rawavg=dict( - extensions=None, - loc="mri", - ), - ribbon=dict( - altkey="*ribbon", - loc="mri", - ), - smoothwm=dict( - loc="surf", - ), - sphere=dict( - loc="surf", - ), - sphere_reg=dict( - altkey="sphere.reg", - loc="surf", - ), - sulc=dict( - loc="surf", - ), - thickness=dict( - loc="surf", - ), - volume=dict( - loc="surf", - ), - white=dict( - loc="surf", - ), - wm=dict( - extensions=None, - loc="mri", - ), - wmparc=dict( - extensions=None, - loc="mri", - ), - wmparc_stats=dict( - altkey="wmparc", - loc="stats", - ), + BA_stats=dict(altkey="BA", loc="stats"), + T1=dict(extensions=None, loc="mri"), + annot=dict(altkey="*annot", loc="label"), + aparc_a2009s_stats=dict(altkey="aparc.a2009s", loc="stats"), + aparc_aseg=dict(altkey="aparc*aseg", loc="mri"), + aparc_stats=dict(altkey="aparc", loc="stats"), + area_pial=dict(altkey="area.pial", loc="surf"), + aseg=dict(extensions=None, loc="mri"), + aseg_stats=dict(altkey="aseg", loc="stats"), + avg_curv=dict(loc="surf"), + brain=dict(extensions=None, loc="mri"), + brainmask=dict(extensions=None, loc="mri"), + curv=dict(loc="surf"), + curv_pial=dict(altkey="curv.pial", loc="surf"), + curv_stats=dict(altkey="curv", loc="stats"), + entorhinal_exvivo_stats=dict(altkey="entorhinal_exvivo", loc="stats"), + filled=dict(extensions=None, loc="mri"), + graymid=dict(altkey=["graymid", "midthickness"], loc="surf"), + inflated=dict(loc="surf"), + jacobian_white=dict(loc="surf"), + label=dict(altkey="*label", loc="label"), + norm=dict(extensions=None, loc="mri"), + nu=dict(extensions=None, loc="mri"), + orig=dict(extensions=None, loc="mri"), + pial=dict(loc="surf"), + rawavg=dict(extensions=None, loc="mri"), + ribbon=dict(altkey="*ribbon", loc="mri"), + smoothwm=dict(loc="surf"), + sphere=dict(loc="surf"), + sphere_reg=dict(altkey="sphere.reg", loc="surf"), + sulc=dict(loc="surf"), + thickness=dict(loc="surf"), + volume=dict(loc="surf"), + white=dict(loc="surf"), + wm=dict(extensions=None, loc="mri"), + wmparc=dict(extensions=None, loc="mri"), + wmparc_stats=dict(altkey="wmparc", loc="stats"), ) outputs = FreeSurferSource.output_spec() diff --git a/nipype/interfaces/tests/test_auto_GroupAndStack.py b/nipype/interfaces/tests/test_auto_GroupAndStack.py index 7282e23a8b..4d96b6d507 100644 --- a/nipype/interfaces/tests/test_auto_GroupAndStack.py +++ b/nipype/interfaces/tests/test_auto_GroupAndStack.py @@ -4,18 +4,12 @@ def test_GroupAndStack_inputs(): input_map = dict( - dicom_files=dict( - mandatory=True, - ), + dicom_files=dict(mandatory=True), embed_meta=dict(), exclude_regexes=dict(), - force_read=dict( - usedefault=True, - ), + force_read=dict(usedefault=True), include_regexes=dict(), - out_ext=dict( - usedefault=True, - ), + out_ext=dict(usedefault=True), out_format=dict(), out_path=dict(), ) @@ -27,9 +21,7 @@ def test_GroupAndStack_inputs(): def test_GroupAndStack_outputs(): - output_map = dict( - out_list=dict(), - ) + output_map = dict(out_list=dict()) outputs = GroupAndStack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_JSONFileGrabber.py b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py index 6e548f17c1..797fb74271 100644 --- a/nipype/interfaces/tests/test_auto_JSONFileGrabber.py +++ b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py @@ -3,12 +3,7 @@ def test_JSONFileGrabber_inputs(): - input_map = dict( - defaults=dict(), - in_file=dict( - extensions=None, - ), - ) + input_map = dict(defaults=dict(), in_file=dict(extensions=None)) inputs = JSONFileGrabber.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/tests/test_auto_JSONFileSink.py b/nipype/interfaces/tests/test_auto_JSONFileSink.py index cdfa32195b..03e9eb4511 100644 --- a/nipype/interfaces/tests/test_auto_JSONFileSink.py +++ b/nipype/interfaces/tests/test_auto_JSONFileSink.py @@ -4,15 +4,9 @@ def test_JSONFileSink_inputs(): input_map = dict( - _outputs=dict( - usedefault=True, - ), - in_dict=dict( - usedefault=True, - ), - out_file=dict( - extensions=None, - ), + _outputs=dict(usedefault=True), + in_dict=dict(usedefault=True), + out_file=dict(extensions=None), ) inputs = JSONFileSink.input_spec() @@ -22,11 +16,7 @@ def test_JSONFileSink_inputs(): def test_JSONFileSink_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = JSONFileSink.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_LookupMeta.py b/nipype/interfaces/tests/test_auto_LookupMeta.py index 22f6678734..d3508cf130 100644 --- a/nipype/interfaces/tests/test_auto_LookupMeta.py +++ b/nipype/interfaces/tests/test_auto_LookupMeta.py @@ -4,13 +4,7 @@ def test_LookupMeta_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - meta_keys=dict( - mandatory=True, - ), + in_file=dict(extensions=None, mandatory=True), meta_keys=dict(mandatory=True) ) inputs = LookupMeta.input_spec() diff --git a/nipype/interfaces/tests/test_auto_MatlabCommand.py b/nipype/interfaces/tests/test_auto_MatlabCommand.py index 1dfd9c1dde..ff9f51e77b 100644 --- a/nipype/interfaces/tests/test_auto_MatlabCommand.py +++ b/nipype/interfaces/tests/test_auto_MatlabCommand.py @@ -4,54 +4,19 @@ def test_MatlabCommand_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - logfile=dict( - argstr="-logfile %s", - extensions=None, - ), - mfile=dict( - usedefault=True, - ), - nodesktop=dict( - argstr="-nodesktop", - nohash=True, - usedefault=True, - ), - nosplash=dict( - argstr="-nosplash", - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + logfile=dict(argstr="-logfile %s", extensions=None), + mfile=dict(usedefault=True), + nodesktop=dict(argstr="-nodesktop", nohash=True, usedefault=True), + nosplash=dict(argstr="-nosplash", nohash=True, usedefault=True), paths=dict(), - postscript=dict( - usedefault=True, - ), - prescript=dict( - usedefault=True, - ), - script=dict( - argstr='-r "%s;exit"', - mandatory=True, - position=-1, - ), - script_file=dict( - extensions=None, - usedefault=True, - ), - single_comp_thread=dict( - argstr="-singleCompThread", - nohash=True, - ), - uses_mcr=dict( - nohash=True, - xor=["nodesktop", "nosplash", "single_comp_thread"], - ), + postscript=dict(usedefault=True), + prescript=dict(usedefault=True), + script=dict(argstr='-r "%s;exit"', mandatory=True, position=-1), + script_file=dict(extensions=None, usedefault=True), + single_comp_thread=dict(argstr="-singleCompThread", nohash=True), + uses_mcr=dict(nohash=True, xor=["nodesktop", "nosplash", "single_comp_thread"]), ) inputs = MatlabCommand.input_spec() diff --git a/nipype/interfaces/tests/test_auto_MergeNifti.py b/nipype/interfaces/tests/test_auto_MergeNifti.py index f199fc5da6..64c4a68dd0 100644 --- a/nipype/interfaces/tests/test_auto_MergeNifti.py +++ b/nipype/interfaces/tests/test_auto_MergeNifti.py @@ -4,13 +4,9 @@ def test_MergeNifti_inputs(): input_map = dict( - in_files=dict( - mandatory=True, - ), + in_files=dict(mandatory=True), merge_dim=dict(), - out_ext=dict( - usedefault=True, - ), + out_ext=dict(usedefault=True), out_format=dict(), out_path=dict(), sort_order=dict(), @@ -23,11 +19,7 @@ def test_MergeNifti_inputs(): def test_MergeNifti_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = MergeNifti.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_MeshFix.py b/nipype/interfaces/tests/test_auto_MeshFix.py index 1ae3b2b67c..9d4f8c8de0 100644 --- a/nipype/interfaces/tests/test_auto_MeshFix.py +++ b/nipype/interfaces/tests/test_auto_MeshFix.py @@ -4,41 +4,18 @@ def test_MeshFix_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - cut_inner=dict( - argstr="--cut-inner %d", - ), - cut_outer=dict( - argstr="--cut-outer %d", - ), - decouple_inin=dict( - argstr="--decouple-inin %d", - ), - decouple_outin=dict( - argstr="--decouple-outin %d", - ), - decouple_outout=dict( - argstr="--decouple-outout %d", - ), - dilation=dict( - argstr="--dilate %d", - ), - dont_clean=dict( - argstr="--no-clean", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - epsilon_angle=dict( - argstr="-a %f", - ), + args=dict(argstr="%s"), + cut_inner=dict(argstr="--cut-inner %d"), + cut_outer=dict(argstr="--cut-outer %d"), + decouple_inin=dict(argstr="--decouple-inin %d"), + decouple_outin=dict(argstr="--decouple-outin %d"), + decouple_outout=dict(argstr="--decouple-outout %d"), + dilation=dict(argstr="--dilate %d"), + dont_clean=dict(argstr="--no-clean"), + environ=dict(nohash=True, usedefault=True), + epsilon_angle=dict(argstr="-a %f"), finetuning_distance=dict( - argstr="%f", - position=-2, - requires=["finetuning_substeps"], + argstr="%f", position=-2, requires=["finetuning_substeps"] ), finetuning_inwards=dict( argstr="--fineTuneIn ", @@ -52,75 +29,37 @@ def test_MeshFix_inputs(): xor=["finetuning_inwards"], ), finetuning_substeps=dict( - argstr="%d", - position=-1, - requires=["finetuning_distance"], - ), - in_file1=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - in_file2=dict( - argstr="%s", - extensions=None, - position=2, - ), - join_closest_components=dict( - argstr="-jc", - xor=["join_closest_components"], + argstr="%d", position=-1, requires=["finetuning_distance"] ), + in_file1=dict(argstr="%s", extensions=None, mandatory=True, position=1), + in_file2=dict(argstr="%s", extensions=None, position=2), + join_closest_components=dict(argstr="-jc", xor=["join_closest_components"]), join_overlapping_largest_components=dict( - argstr="-j", - xor=["join_closest_components"], - ), - laplacian_smoothing_steps=dict( - argstr="--smooth %d", - ), - number_of_biggest_shells=dict( - argstr="--shells %d", - ), - out_filename=dict( - argstr="-o %s", - extensions=None, - genfile=True, - ), - output_type=dict( - usedefault=True, - ), - quiet_mode=dict( - argstr="-q", - ), - remove_handles=dict( - argstr="--remove-handles", - ), + argstr="-j", xor=["join_closest_components"] + ), + laplacian_smoothing_steps=dict(argstr="--smooth %d"), + number_of_biggest_shells=dict(argstr="--shells %d"), + out_filename=dict(argstr="-o %s", extensions=None, genfile=True), + output_type=dict(usedefault=True), + quiet_mode=dict(argstr="-q"), + remove_handles=dict(argstr="--remove-handles"), save_as_freesurfer_mesh=dict( - argstr="--fsmesh", - xor=["save_as_vrml", "save_as_stl"], + argstr="--fsmesh", xor=["save_as_vrml", "save_as_stl"] ), save_as_stl=dict( - argstr="--stl", - xor=["save_as_vrml", "save_as_freesurfer_mesh"], + argstr="--stl", xor=["save_as_vrml", "save_as_freesurfer_mesh"] ), save_as_vrml=dict( - argstr="--wrl", - xor=["save_as_stl", "save_as_freesurfer_mesh"], - ), - set_intersections_to_one=dict( - argstr="--intersect", + argstr="--wrl", xor=["save_as_stl", "save_as_freesurfer_mesh"] ), + set_intersections_to_one=dict(argstr="--intersect"), uniform_remeshing_steps=dict( - argstr="-u %d", - requires=["uniform_remeshing_vertices"], + argstr="-u %d", requires=["uniform_remeshing_vertices"] ), uniform_remeshing_vertices=dict( - argstr="--vertices %d", - requires=["uniform_remeshing_steps"], - ), - x_shift=dict( - argstr="--smooth %d", + argstr="--vertices %d", requires=["uniform_remeshing_steps"] ), + x_shift=dict(argstr="--smooth %d"), ) inputs = MeshFix.input_spec() @@ -130,11 +69,7 @@ def test_MeshFix_inputs(): def test_MeshFix_outputs(): - output_map = dict( - mesh_file=dict( - extensions=None, - ), - ) + output_map = dict(mesh_file=dict(extensions=None)) outputs = MeshFix.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_MySQLSink.py b/nipype/interfaces/tests/test_auto_MySQLSink.py index 702a21e9ce..56623680fe 100644 --- a/nipype/interfaces/tests/test_auto_MySQLSink.py +++ b/nipype/interfaces/tests/test_auto_MySQLSink.py @@ -4,14 +4,8 @@ def test_MySQLSink_inputs(): input_map = dict( - config=dict( - extensions=None, - mandatory=True, - xor=["host"], - ), - database_name=dict( - mandatory=True, - ), + config=dict(extensions=None, mandatory=True, xor=["host"]), + database_name=dict(mandatory=True), host=dict( mandatory=True, requires=["username", "password"], @@ -19,9 +13,7 @@ def test_MySQLSink_inputs(): xor=["config"], ), password=dict(), - table_name=dict( - mandatory=True, - ), + table_name=dict(mandatory=True), username=dict(), ) inputs = MySQLSink.input_spec() diff --git a/nipype/interfaces/tests/test_auto_PETPVC.py b/nipype/interfaces/tests/test_auto_PETPVC.py index f7da7f45dd..5bcddbdb0b 100644 --- a/nipype/interfaces/tests/test_auto_PETPVC.py +++ b/nipype/interfaces/tests/test_auto_PETPVC.py @@ -4,65 +4,20 @@ def test_PETPVC_inputs(): input_map = dict( - alpha=dict( - argstr="-a %.4f", - usedefault=True, - ), - args=dict( - argstr="%s", - ), - debug=dict( - argstr="-d", - usedefault=True, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fwhm_x=dict( - argstr="-x %.4f", - mandatory=True, - ), - fwhm_y=dict( - argstr="-y %.4f", - mandatory=True, - ), - fwhm_z=dict( - argstr="-z %.4f", - mandatory=True, - ), - in_file=dict( - argstr="-i %s", - extensions=None, - mandatory=True, - ), - mask_file=dict( - argstr="-m %s", - extensions=None, - mandatory=True, - ), - n_deconv=dict( - argstr="-k %d", - usedefault=True, - ), - n_iter=dict( - argstr="-n %d", - usedefault=True, - ), - out_file=dict( - argstr="-o %s", - extensions=None, - genfile=True, - hash_files=False, - ), - pvc=dict( - argstr="-p %s", - mandatory=True, - ), - stop_crit=dict( - argstr="-s %.4f", - usedefault=True, - ), + alpha=dict(argstr="-a %.4f", usedefault=True), + args=dict(argstr="%s"), + debug=dict(argstr="-d", usedefault=True), + environ=dict(nohash=True, usedefault=True), + fwhm_x=dict(argstr="-x %.4f", mandatory=True), + fwhm_y=dict(argstr="-y %.4f", mandatory=True), + fwhm_z=dict(argstr="-z %.4f", mandatory=True), + in_file=dict(argstr="-i %s", extensions=None, mandatory=True), + mask_file=dict(argstr="-m %s", extensions=None, mandatory=True), + n_deconv=dict(argstr="-k %d", usedefault=True), + n_iter=dict(argstr="-n %d", usedefault=True), + out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False), + pvc=dict(argstr="-p %s", mandatory=True), + stop_crit=dict(argstr="-s %.4f", usedefault=True), ) inputs = PETPVC.input_spec() @@ -72,11 +27,7 @@ def test_PETPVC_inputs(): def test_PETPVC_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = PETPVC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_Quickshear.py b/nipype/interfaces/tests/test_auto_Quickshear.py index 92212335dc..e763237eb6 100644 --- a/nipype/interfaces/tests/test_auto_Quickshear.py +++ b/nipype/interfaces/tests/test_auto_Quickshear.py @@ -4,29 +4,11 @@ def test_Quickshear_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - buff=dict( - argstr="%d", - position=4, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - mask_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), + args=dict(argstr="%s"), + buff=dict(argstr="%d", position=4), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), + mask_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), out_file=dict( argstr="%s", extensions=None, @@ -44,11 +26,7 @@ def test_Quickshear_inputs(): def test_Quickshear_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Quickshear.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_RCommand.py b/nipype/interfaces/tests/test_auto_RCommand.py index adfcf36cf0..45a807fe84 100644 --- a/nipype/interfaces/tests/test_auto_RCommand.py +++ b/nipype/interfaces/tests/test_auto_RCommand.py @@ -4,25 +4,11 @@ def test_RCommand_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - rfile=dict( - usedefault=True, - ), - script=dict( - argstr='-e "%s"', - mandatory=True, - position=-1, - ), - script_file=dict( - extensions=None, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + rfile=dict(usedefault=True), + script=dict(argstr='-e "%s"', mandatory=True, position=-1), + script_file=dict(extensions=None, usedefault=True), ) inputs = RCommand.input_spec() diff --git a/nipype/interfaces/tests/test_auto_Reorient.py b/nipype/interfaces/tests/test_auto_Reorient.py index eb7a0ce5ff..7cc171de37 100644 --- a/nipype/interfaces/tests/test_auto_Reorient.py +++ b/nipype/interfaces/tests/test_auto_Reorient.py @@ -4,13 +4,7 @@ def test_Reorient_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - orientation=dict( - usedefault=True, - ), + in_file=dict(extensions=None, mandatory=True), orientation=dict(usedefault=True) ) inputs = Reorient.input_spec() @@ -20,14 +14,7 @@ def test_Reorient_inputs(): def test_Reorient_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - transform=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None), transform=dict(extensions=None)) outputs = Reorient.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_Rescale.py b/nipype/interfaces/tests/test_auto_Rescale.py index 5b14cfc8c0..4a9ccced93 100644 --- a/nipype/interfaces/tests/test_auto_Rescale.py +++ b/nipype/interfaces/tests/test_auto_Rescale.py @@ -4,18 +4,10 @@ def test_Rescale_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), + in_file=dict(extensions=None, mandatory=True), invert=dict(), - percentile=dict( - usedefault=True, - ), - ref_file=dict( - extensions=None, - mandatory=True, - ), + percentile=dict(usedefault=True), + ref_file=dict(extensions=None, mandatory=True), ) inputs = Rescale.input_spec() @@ -25,11 +17,7 @@ def test_Rescale_inputs(): def test_Rescale_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Rescale.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_S3DataGrabber.py b/nipype/interfaces/tests/test_auto_S3DataGrabber.py index 4b71fe49c3..a73058f246 100644 --- a/nipype/interfaces/tests/test_auto_S3DataGrabber.py +++ b/nipype/interfaces/tests/test_auto_S3DataGrabber.py @@ -4,28 +4,14 @@ def test_S3DataGrabber_inputs(): input_map = dict( - anon=dict( - usedefault=True, - ), - bucket=dict( - mandatory=True, - ), - bucket_path=dict( - usedefault=True, - ), + anon=dict(usedefault=True), + bucket=dict(mandatory=True), + bucket_path=dict(usedefault=True), local_directory=dict(), - raise_on_empty=dict( - usedefault=True, - ), - region=dict( - usedefault=True, - ), - sort_filelist=dict( - mandatory=True, - ), - template=dict( - mandatory=True, - ), + raise_on_empty=dict(usedefault=True), + region=dict(usedefault=True), + sort_filelist=dict(mandatory=True), + template=dict(mandatory=True), template_args=dict(), ) inputs = S3DataGrabber.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SQLiteSink.py b/nipype/interfaces/tests/test_auto_SQLiteSink.py index fe33b65675..ea394509bc 100644 --- a/nipype/interfaces/tests/test_auto_SQLiteSink.py +++ b/nipype/interfaces/tests/test_auto_SQLiteSink.py @@ -4,13 +4,8 @@ def test_SQLiteSink_inputs(): input_map = dict( - database_file=dict( - extensions=None, - mandatory=True, - ), - table_name=dict( - mandatory=True, - ), + database_file=dict(extensions=None, mandatory=True), + table_name=dict(mandatory=True), ) inputs = SQLiteSink.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SSHDataGrabber.py b/nipype/interfaces/tests/test_auto_SSHDataGrabber.py index 5a58cad276..ed8ead60e6 100644 --- a/nipype/interfaces/tests/test_auto_SSHDataGrabber.py +++ b/nipype/interfaces/tests/test_auto_SSHDataGrabber.py @@ -4,35 +4,17 @@ def test_SSHDataGrabber_inputs(): input_map = dict( - base_directory=dict( - mandatory=True, - ), - download_files=dict( - usedefault=True, - ), - drop_blank_outputs=dict( - usedefault=True, - ), - hostname=dict( - mandatory=True, - ), + base_directory=dict(mandatory=True), + download_files=dict(usedefault=True), + drop_blank_outputs=dict(usedefault=True), + hostname=dict(mandatory=True), password=dict(), - raise_on_empty=dict( - usedefault=True, - ), - sort_filelist=dict( - mandatory=True, - ), - ssh_log_to_file=dict( - usedefault=True, - ), - template=dict( - mandatory=True, - ), + raise_on_empty=dict(usedefault=True), + sort_filelist=dict(mandatory=True), + ssh_log_to_file=dict(usedefault=True), + template=dict(mandatory=True), template_args=dict(), - template_expression=dict( - usedefault=True, - ), + template_expression=dict(usedefault=True), username=dict(), ) inputs = SSHDataGrabber.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SelectFiles.py b/nipype/interfaces/tests/test_auto_SelectFiles.py index 06f86c36f0..ec04e42740 100644 --- a/nipype/interfaces/tests/test_auto_SelectFiles.py +++ b/nipype/interfaces/tests/test_auto_SelectFiles.py @@ -5,15 +5,9 @@ def test_SelectFiles_inputs(): input_map = dict( base_directory=dict(), - force_lists=dict( - usedefault=True, - ), - raise_on_empty=dict( - usedefault=True, - ), - sort_filelist=dict( - usedefault=True, - ), + force_lists=dict(usedefault=True), + raise_on_empty=dict(usedefault=True), + sort_filelist=dict(usedefault=True), ) inputs = SelectFiles.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SignalExtraction.py b/nipype/interfaces/tests/test_auto_SignalExtraction.py index 272d94f54d..33b2512e75 100644 --- a/nipype/interfaces/tests/test_auto_SignalExtraction.py +++ b/nipype/interfaces/tests/test_auto_SignalExtraction.py @@ -4,29 +4,13 @@ def test_SignalExtraction_inputs(): input_map = dict( - class_labels=dict( - mandatory=True, - ), - detrend=dict( - usedefault=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), - incl_shared_variance=dict( - usedefault=True, - ), - include_global=dict( - usedefault=True, - ), - label_files=dict( - mandatory=True, - ), - out_file=dict( - extensions=None, - usedefault=True, - ), + class_labels=dict(mandatory=True), + detrend=dict(usedefault=True), + in_file=dict(extensions=None, mandatory=True), + incl_shared_variance=dict(usedefault=True), + include_global=dict(usedefault=True), + label_files=dict(mandatory=True), + out_file=dict(extensions=None, usedefault=True), ) inputs = SignalExtraction.input_spec() @@ -36,11 +20,7 @@ def test_SignalExtraction_inputs(): def test_SignalExtraction_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = SignalExtraction.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_SlicerCommandLine.py b/nipype/interfaces/tests/test_auto_SlicerCommandLine.py index c13466483b..080db16b53 100644 --- a/nipype/interfaces/tests/test_auto_SlicerCommandLine.py +++ b/nipype/interfaces/tests/test_auto_SlicerCommandLine.py @@ -4,13 +4,8 @@ def test_SlicerCommandLine_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), module=dict(), ) inputs = SlicerCommandLine.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SplitNifti.py b/nipype/interfaces/tests/test_auto_SplitNifti.py index 14ccc6bdb0..3babd07241 100644 --- a/nipype/interfaces/tests/test_auto_SplitNifti.py +++ b/nipype/interfaces/tests/test_auto_SplitNifti.py @@ -4,13 +4,8 @@ def test_SplitNifti_inputs(): input_map = dict( - in_file=dict( - extensions=None, - mandatory=True, - ), - out_ext=dict( - usedefault=True, - ), + in_file=dict(extensions=None, mandatory=True), + out_ext=dict(usedefault=True), out_format=dict(), out_path=dict(), split_dim=dict(), @@ -23,9 +18,7 @@ def test_SplitNifti_inputs(): def test_SplitNifti_outputs(): - output_map = dict( - out_list=dict(), - ) + output_map = dict(out_list=dict()) outputs = SplitNifti.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_XNATSink.py b/nipype/interfaces/tests/test_auto_XNATSink.py index ec6f920f57..92234e557b 100644 --- a/nipype/interfaces/tests/test_auto_XNATSink.py +++ b/nipype/interfaces/tests/test_auto_XNATSink.py @@ -4,39 +4,17 @@ def test_XNATSink_inputs(): input_map = dict( - _outputs=dict( - usedefault=True, - ), - assessor_id=dict( - xor=["reconstruction_id"], - ), + _outputs=dict(usedefault=True), + assessor_id=dict(xor=["reconstruction_id"]), cache_dir=dict(), - config=dict( - extensions=None, - mandatory=True, - xor=["server"], - ), - experiment_id=dict( - mandatory=True, - ), - project_id=dict( - mandatory=True, - ), + config=dict(extensions=None, mandatory=True, xor=["server"]), + experiment_id=dict(mandatory=True), + project_id=dict(mandatory=True), pwd=dict(), - reconstruction_id=dict( - xor=["assessor_id"], - ), - server=dict( - mandatory=True, - requires=["user", "pwd"], - xor=["config"], - ), - share=dict( - usedefault=True, - ), - subject_id=dict( - mandatory=True, - ), + reconstruction_id=dict(xor=["assessor_id"]), + server=dict(mandatory=True, requires=["user", "pwd"], xor=["config"]), + share=dict(usedefault=True), + subject_id=dict(mandatory=True), user=dict(), ) inputs = XNATSink.input_spec() diff --git a/nipype/interfaces/tests/test_auto_XNATSource.py b/nipype/interfaces/tests/test_auto_XNATSource.py index f115657c9f..7114d47840 100644 --- a/nipype/interfaces/tests/test_auto_XNATSource.py +++ b/nipype/interfaces/tests/test_auto_XNATSource.py @@ -5,23 +5,11 @@ def test_XNATSource_inputs(): input_map = dict( cache_dir=dict(), - config=dict( - extensions=None, - mandatory=True, - xor=["server"], - ), + config=dict(extensions=None, mandatory=True, xor=["server"]), pwd=dict(), - query_template=dict( - mandatory=True, - ), - query_template_args=dict( - usedefault=True, - ), - server=dict( - mandatory=True, - requires=["user", "pwd"], - xor=["config"], - ), + query_template=dict(mandatory=True), + query_template_args=dict(usedefault=True), + server=dict(mandatory=True, requires=["user", "pwd"], xor=["config"]), user=dict(), ) inputs = XNATSource.input_spec() diff --git a/nipype/interfaces/utility/tests/test_auto_AssertEqual.py b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py index c550a5efba..43a63eb081 100644 --- a/nipype/interfaces/utility/tests/test_auto_AssertEqual.py +++ b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py @@ -4,14 +4,8 @@ def test_AssertEqual_inputs(): input_map = dict( - volume1=dict( - extensions=None, - mandatory=True, - ), - volume2=dict( - extensions=None, - mandatory=True, - ), + volume1=dict(extensions=None, mandatory=True), + volume2=dict(extensions=None, mandatory=True), ) inputs = AssertEqual.input_spec() diff --git a/nipype/interfaces/utility/tests/test_auto_CSVReader.py b/nipype/interfaces/utility/tests/test_auto_CSVReader.py index be24c59eb4..4aec1fbb9e 100644 --- a/nipype/interfaces/utility/tests/test_auto_CSVReader.py +++ b/nipype/interfaces/utility/tests/test_auto_CSVReader.py @@ -4,13 +4,7 @@ def test_CSVReader_inputs(): input_map = dict( - header=dict( - usedefault=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), + header=dict(usedefault=True), in_file=dict(extensions=None, mandatory=True) ) inputs = CSVReader.input_spec() diff --git a/nipype/interfaces/utility/tests/test_auto_Function.py b/nipype/interfaces/utility/tests/test_auto_Function.py index fdbccc3098..790ffa1ad0 100644 --- a/nipype/interfaces/utility/tests/test_auto_Function.py +++ b/nipype/interfaces/utility/tests/test_auto_Function.py @@ -3,11 +3,7 @@ def test_Function_inputs(): - input_map = dict( - function_str=dict( - mandatory=True, - ), - ) + input_map = dict(function_str=dict(mandatory=True)) inputs = Function.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Merge.py b/nipype/interfaces/utility/tests/test_auto_Merge.py index f9304a9897..0e3af5c80e 100644 --- a/nipype/interfaces/utility/tests/test_auto_Merge.py +++ b/nipype/interfaces/utility/tests/test_auto_Merge.py @@ -4,15 +4,9 @@ def test_Merge_inputs(): input_map = dict( - axis=dict( - usedefault=True, - ), - no_flatten=dict( - usedefault=True, - ), - ravel_inputs=dict( - usedefault=True, - ), + axis=dict(usedefault=True), + no_flatten=dict(usedefault=True), + ravel_inputs=dict(usedefault=True), ) inputs = Merge.input_spec() @@ -22,9 +16,7 @@ def test_Merge_inputs(): def test_Merge_outputs(): - output_map = dict( - out=dict(), - ) + output_map = dict(out=dict()) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Rename.py b/nipype/interfaces/utility/tests/test_auto_Rename.py index 5b9a183796..6f426cca9c 100644 --- a/nipype/interfaces/utility/tests/test_auto_Rename.py +++ b/nipype/interfaces/utility/tests/test_auto_Rename.py @@ -4,18 +4,11 @@ def test_Rename_inputs(): input_map = dict( - format_string=dict( - mandatory=True, - ), - in_file=dict( - extensions=None, - mandatory=True, - ), + format_string=dict(mandatory=True), + in_file=dict(extensions=None, mandatory=True), keep_ext=dict(), parse_string=dict(), - use_fullpath=dict( - usedefault=True, - ), + use_fullpath=dict(usedefault=True), ) inputs = Rename.input_spec() @@ -25,11 +18,7 @@ def test_Rename_inputs(): def test_Rename_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Rename.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Select.py b/nipype/interfaces/utility/tests/test_auto_Select.py index 21dcb91670..7595b92f9a 100644 --- a/nipype/interfaces/utility/tests/test_auto_Select.py +++ b/nipype/interfaces/utility/tests/test_auto_Select.py @@ -3,14 +3,7 @@ def test_Select_inputs(): - input_map = dict( - index=dict( - mandatory=True, - ), - inlist=dict( - mandatory=True, - ), - ) + input_map = dict(index=dict(mandatory=True), inlist=dict(mandatory=True)) inputs = Select.input_spec() for key, metadata in list(input_map.items()): @@ -19,9 +12,7 @@ def test_Select_inputs(): def test_Select_outputs(): - output_map = dict( - out=dict(), - ) + output_map = dict(out=dict()) outputs = Select.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Split.py b/nipype/interfaces/utility/tests/test_auto_Split.py index 60f46fb73c..5ad771031f 100644 --- a/nipype/interfaces/utility/tests/test_auto_Split.py +++ b/nipype/interfaces/utility/tests/test_auto_Split.py @@ -4,15 +4,9 @@ def test_Split_inputs(): input_map = dict( - inlist=dict( - mandatory=True, - ), - splits=dict( - mandatory=True, - ), - squeeze=dict( - usedefault=True, - ), + inlist=dict(mandatory=True), + splits=dict(mandatory=True), + squeeze=dict(usedefault=True), ) inputs = Split.input_spec() diff --git a/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py index f06a118fc2..e0d3f0b038 100644 --- a/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py +++ b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py @@ -4,24 +4,10 @@ def test_Vnifti2Image_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - attributes=dict( - argstr="-attr %s", - extensions=None, - position=2, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - position=1, - ), + args=dict(argstr="%s"), + attributes=dict(argstr="-attr %s", extensions=None, position=2), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=1), out_file=dict( argstr="-out %s", extensions=None, @@ -40,11 +26,7 @@ def test_Vnifti2Image_inputs(): def test_Vnifti2Image_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = Vnifti2Image.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/vista/tests/test_auto_VtoMat.py b/nipype/interfaces/vista/tests/test_auto_VtoMat.py index 8d2b53ab0c..d35ff8661b 100644 --- a/nipype/interfaces/vista/tests/test_auto_VtoMat.py +++ b/nipype/interfaces/vista/tests/test_auto_VtoMat.py @@ -4,19 +4,9 @@ def test_VtoMat_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="-in %s", - extensions=None, - mandatory=True, - position=1, - ), + args=dict(argstr="%s"), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=1), out_file=dict( argstr="-out %s", extensions=None, @@ -35,11 +25,7 @@ def test_VtoMat_inputs(): def test_VtoMat_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = VtoMat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py b/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py index 03beac887a..47307b656b 100644 --- a/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py +++ b/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py @@ -4,9 +4,7 @@ def test_CiftiSmooth_inputs(): input_map = dict( - args=dict( - argstr="%s", - ), + args=dict(argstr="%s"), cerebellum_corrected_areas=dict( argstr="cerebellum-corrected-areas %s", extensions=None, @@ -14,53 +12,21 @@ def test_CiftiSmooth_inputs(): requires=["cerebellum_surf"], ), cerebellum_surf=dict( - argstr="-cerebellum-surface %s", - extensions=None, - position=9, - ), - cifti_roi=dict( - argstr="-cifti-roi %s", - extensions=None, - position=11, - ), - direction=dict( - argstr="%s", - mandatory=True, - position=3, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - fix_zeros_surf=dict( - argstr="-fix-zeros-surface", - position=13, - ), - fix_zeros_vol=dict( - argstr="-fix-zeros-volume", - position=12, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), + argstr="-cerebellum-surface %s", extensions=None, position=9 + ), + cifti_roi=dict(argstr="-cifti-roi %s", extensions=None, position=11), + direction=dict(argstr="%s", mandatory=True, position=3), + environ=dict(nohash=True, usedefault=True), + fix_zeros_surf=dict(argstr="-fix-zeros-surface", position=13), + fix_zeros_vol=dict(argstr="-fix-zeros-volume", position=12), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), left_corrected_areas=dict( - argstr="-left-corrected-areas %s", - extensions=None, - position=6, + argstr="-left-corrected-areas %s", extensions=None, position=6 ), left_surf=dict( - argstr="-left-surface %s", - extensions=None, - mandatory=True, - position=5, - ), - merged_volume=dict( - argstr="-merged-volume", - position=14, + argstr="-left-surface %s", extensions=None, mandatory=True, position=5 ), + merged_volume=dict(argstr="-merged-volume", position=14), out_file=dict( argstr="%s", extensions=None, @@ -70,26 +36,13 @@ def test_CiftiSmooth_inputs(): position=4, ), right_corrected_areas=dict( - argstr="-right-corrected-areas %s", - extensions=None, - position=8, + argstr="-right-corrected-areas %s", extensions=None, position=8 ), right_surf=dict( - argstr="-right-surface %s", - extensions=None, - mandatory=True, - position=7, - ), - sigma_surf=dict( - argstr="%s", - mandatory=True, - position=1, - ), - sigma_vol=dict( - argstr="%s", - mandatory=True, - position=2, + argstr="-right-surface %s", extensions=None, mandatory=True, position=7 ), + sigma_surf=dict(argstr="%s", mandatory=True, position=1), + sigma_vol=dict(argstr="%s", mandatory=True, position=2), ) inputs = CiftiSmooth.input_spec() @@ -99,11 +52,7 @@ def test_CiftiSmooth_inputs(): def test_CiftiSmooth_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None)) outputs = CiftiSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/workbench/tests/test_auto_MetricResample.py b/nipype/interfaces/workbench/tests/test_auto_MetricResample.py index e7a861963b..d717054d16 100644 --- a/nipype/interfaces/workbench/tests/test_auto_MetricResample.py +++ b/nipype/interfaces/workbench/tests/test_auto_MetricResample.py @@ -4,60 +4,17 @@ def test_MetricResample_inputs(): input_map = dict( - area_metrics=dict( - argstr="-area-metrics", - position=5, - xor=["area_surfs"], - ), - area_surfs=dict( - argstr="-area-surfs", - position=5, - xor=["area_metrics"], - ), - args=dict( - argstr="%s", - ), - current_area=dict( - argstr="%s", - extensions=None, - position=6, - ), - current_sphere=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=1, - ), - environ=dict( - nohash=True, - usedefault=True, - ), - in_file=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=0, - ), - largest=dict( - argstr="-largest", - position=10, - ), - method=dict( - argstr="%s", - mandatory=True, - position=3, - ), - new_area=dict( - argstr="%s", - extensions=None, - position=7, - ), - new_sphere=dict( - argstr="%s", - extensions=None, - mandatory=True, - position=2, - ), + area_metrics=dict(argstr="-area-metrics", position=5, xor=["area_surfs"]), + area_surfs=dict(argstr="-area-surfs", position=5, xor=["area_metrics"]), + args=dict(argstr="%s"), + current_area=dict(argstr="%s", extensions=None, position=6), + current_sphere=dict(argstr="%s", extensions=None, mandatory=True, position=1), + environ=dict(nohash=True, usedefault=True), + in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + largest=dict(argstr="-largest", position=10), + method=dict(argstr="%s", mandatory=True, position=3), + new_area=dict(argstr="%s", extensions=None, position=7), + new_sphere=dict(argstr="%s", extensions=None, mandatory=True, position=2), out_file=dict( argstr="%s", extensions=None, @@ -66,15 +23,8 @@ def test_MetricResample_inputs(): name_template="%s.out", position=4, ), - roi_metric=dict( - argstr="-current-roi %s", - extensions=None, - position=8, - ), - valid_roi_out=dict( - argstr="-valid-roi-out", - position=9, - ), + roi_metric=dict(argstr="-current-roi %s", extensions=None, position=8), + valid_roi_out=dict(argstr="-valid-roi-out", position=9), ) inputs = MetricResample.input_spec() @@ -84,14 +34,7 @@ def test_MetricResample_inputs(): def test_MetricResample_outputs(): - output_map = dict( - out_file=dict( - extensions=None, - ), - roi_file=dict( - extensions=None, - ), - ) + output_map = dict(out_file=dict(extensions=None), roi_file=dict(extensions=None)) outputs = MetricResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/workbench/tests/test_auto_WBCommand.py b/nipype/interfaces/workbench/tests/test_auto_WBCommand.py index d2cc9a6b96..dd0e33ed98 100644 --- a/nipype/interfaces/workbench/tests/test_auto_WBCommand.py +++ b/nipype/interfaces/workbench/tests/test_auto_WBCommand.py @@ -3,15 +3,7 @@ def test_WBCommand_inputs(): - input_map = dict( - args=dict( - argstr="%s", - ), - environ=dict( - nohash=True, - usedefault=True, - ), - ) + input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) inputs = WBCommand.input_spec() for key, metadata in list(input_map.items()): From 674e9b0eeca082efb5322b61fea57ee89a3e4a24 Mon Sep 17 00:00:00 2001 From: Fabio Bernardoni Date: Wed, 20 Oct 2021 12:07:15 +0200 Subject: [PATCH 1114/1665] run black on the preprocess.py file to improve formatting --- nipype/interfaces/spm/preprocess.py | 160 +++++++++++++++++----------- 1 file changed, 99 insertions(+), 61 deletions(-) diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index 7409e016c3..28f05ee32f 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -46,14 +46,14 @@ class FieldMapInputSpec(SPMCommandInputSpec): desc="one of: calculatevdm, applyvdm", ) phase_file = File( - #mandatory=True, + # mandatory=True, exists=True, copyfile=False, field="subj.data.presubphasemag.phase", desc="presubstracted phase file", ) magnitude_file = File( - #mandatory=True, + # mandatory=True, exists=True, copyfile=False, field="subj.data.presubphasemag.magnitude", @@ -62,7 +62,7 @@ class FieldMapInputSpec(SPMCommandInputSpec): echo_times = traits.Tuple( traits.Float, traits.Float, - #mandatory=True, + # mandatory=True, field="subj.defaults.defaultsval.et", desc="short and long echo times", ) @@ -169,7 +169,7 @@ class FieldMapInputSpec(SPMCommandInputSpec): epi_file = File( copyfile=False, exists=True, - #mandatory=True, + # mandatory=True, field="subj.session.epi", desc="EPI to unwarp", ) @@ -196,46 +196,62 @@ class FieldMapInputSpec(SPMCommandInputSpec): ) in_files = InputMultiObject( - traits.Either(ImageFileSPM(exists=True), - traits.List(ImageFileSPM(exists=True))), - field='data.scans',mandatory=True, + traits.Either( + ImageFileSPM(exists=True), traits.List(ImageFileSPM(exists=True)) + ), + field="data.scans", + mandatory=True, copyfile=True, - desc='list of filenames to apply the vdm to') + desc="list of filenames to apply the vdm to", + ) vdmfile = File( - field='data.vdmfile', - desc='Voxel displacement map to use',mandatory=True, - copyfile=True) + field="data.vdmfile", + desc="Voxel displacement map to use", + mandatory=True, + copyfile=True, + ) distortion_direction = traits.Int( - 2, field='roptions.pedir', desc='phase encode direction input data have been acquired with', - usedefault=True) + 2, + field="roptions.pedir", + desc="phase encode direction input data have been acquired with", + usedefault=True, + ) write_which = traits.ListInt( [2, 1], - field='roptions.which', + field="roptions.which", minlen=2, maxlen=2, usedefault=True, - desc='determines which images to apply vdm to') + desc="determines which images to apply vdm to", + ) interpolation = traits.Int( - 4, field='roptions.rinterp', desc='phase encode direction input data have been acquired with', - usedefault=True) + 4, + field="roptions.rinterp", + desc="phase encode direction input data have been acquired with", + usedefault=True, + ) reslice_interp = traits.Range( low=0, high=7, - field='roptions.rinterp', - desc='degree of b-spline used for interpolation') + field="roptions.rinterp", + desc="degree of b-spline used for interpolation", + ) write_wrap = traits.List( traits.Int(), minlen=3, maxlen=3, - field='roptions.wrap', - desc=('Check if interpolation should wrap in [x,y,z]')) + field="roptions.wrap", + desc=("Check if interpolation should wrap in [x,y,z]"), + ) write_mask = traits.Bool( - field='roptions.mask', desc='True/False mask time series images') + field="roptions.mask", desc="True/False mask time series images" + ) out_prefix = traits.String( - 'u', - field='roptions.prefix', + "u", + field="roptions.prefix", usedefault=True, - desc='fieldmap corrected output prefix') + desc="fieldmap corrected output prefix", + ) class FieldMapOutputSpec(TraitedSpec): @@ -243,12 +259,15 @@ class FieldMapOutputSpec(TraitedSpec): out_files = OutputMultiPath( traits.Either(traits.List(File(exists=True)), File(exists=True)), - desc=('If jobtype is applyvdm, ' - 'these will be the fieldmap corrected files.' - ' Otherwise, they will be copies ' - 'of in_files that have had their ' - 'headers rewritten.')) - mean_image = File(exists=True, desc='Mean image') + desc=( + "If jobtype is applyvdm, " + "these will be the fieldmap corrected files." + " Otherwise, they will be copies " + "of in_files that have had their " + "headers rewritten." + ), + ) + mean_image = File(exists=True, desc="Mean image") class FieldMap(SPMCommand): @@ -283,75 +302,94 @@ class FieldMap(SPMCommand): def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm""" - if ((self.inputs.jobtype == "calculatevdm") and (opt in ['phase_file', 'magnitude_file', 'anat_file', 'epi_file'])): + if (self.inputs.jobtype == "calculatevdm") and ( + opt in ["phase_file", "magnitude_file", "anat_file", "epi_file"] + ): return scans_for_fname(ensure_list(val)) - if ((self.inputs.jobtype == "applyvdm") and (opt =='in_files')): + if (self.inputs.jobtype == "applyvdm") and (opt == "in_files"): return scans_for_fnames(ensure_list(val)) - if ((self.inputs.jobtype == "applyvdm") and (opt =='vdmfile')): + if (self.inputs.jobtype == "applyvdm") and (opt == "vdmfile"): return scans_for_fname(ensure_list(val)) return super(FieldMap, self)._format_arg(opt, spec, val) - def _parse_inputs(self): """validate spm fieldmap options if set to None ignore""" if self.inputs.jobtype == "applyvdm": - einputs = (super(FieldMap, self) - ._parse_inputs(skip=('jobtype','phase_file', 'magnitude_file', - 'echo_times', 'blip_direction', - 'total_readout_time','maskbrain', - 'epifm','jacobian_modulation', - 'method','unwarp_fwhm','pad','ws', - 'template','mask_fwhm','nerode','ndilate', - 'thresh','reg','epi_file','matchvdm', - 'sessname','writeunwarped', - 'anat_file','matchanat'))) + einputs = super(FieldMap, self)._parse_inputs( + skip=( + "jobtype", + "phase_file", + "magnitude_file", + "echo_times", + "blip_direction", + "total_readout_time", + "maskbrain", + "epifm", + "jacobian_modulation", + "method", + "unwarp_fwhm", + "pad", + "ws", + "template", + "mask_fwhm", + "nerode", + "ndilate", + "thresh", + "reg", + "epi_file", + "matchvdm", + "sessname", + "writeunwarped", + "anat_file", + "matchanat", + ) + ) else: - einputs = (super(FieldMap, self) - ._parse_inputs(skip=('jobtype','in_files', 'vdmfile'))) + einputs = super(FieldMap, self)._parse_inputs( + skip=("jobtype", "in_files", "vdmfile") + ) jobtype = self.inputs.jobtype - return [{'%s' % (jobtype): einputs[0]}] - + return [{"%s" % (jobtype): einputs[0]}] def _list_outputs(self): outputs = self._outputs().get() jobtype = self.inputs.jobtype - resliced_all = self.inputs.write_which[0] > 0 + resliced_all = self.inputs.write_which[0] > 0 resliced_mean = self.inputs.write_which[1] > 0 if jobtype == "calculatevdm": - outputs['vdm'] = fname_presuffix(self.inputs.phase_file, prefix='vdm5_sc') + outputs["vdm"] = fname_presuffix(self.inputs.phase_file, prefix="vdm5_sc") elif jobtype == "applyvdm": if resliced_mean: if isinstance(self.inputs.in_files[0], list): first_image = self.inputs.in_files[0][0] else: first_image = self.inputs.in_files[0] - outputs['mean_image'] = fname_presuffix( - first_image, prefix='meanu') + outputs["mean_image"] = fname_presuffix(first_image, prefix="meanu") if resliced_all: - outputs['out_files'] = [] + outputs["out_files"] = [] for idx, imgf in enumerate(ensure_list(self.inputs.in_files)): appliedvdm_run = [] if isinstance(imgf, list): for i, inner_imgf in enumerate(ensure_list(imgf)): - newfile = fname_presuffix(inner_imgf, - prefix=self.inputs.out_prefix) + newfile = fname_presuffix( + inner_imgf, prefix=self.inputs.out_prefix + ) appliedvdm_run.append(newfile) else: - appliedvdm_run = fname_presuffix(imgf, - prefix=self.inputs.out_prefix) - outputs['out_files'].append(appliedvdm_run) + appliedvdm_run = fname_presuffix( + imgf, prefix=self.inputs.out_prefix + ) + outputs["out_files"].append(appliedvdm_run) return outputs - return outputs - class SliceTimingInputSpec(SPMCommandInputSpec): in_files = InputMultiPath( traits.Either( From 411b58bc8e2e4afaa1158f215a71c45743a31071 Mon Sep 17 00:00:00 2001 From: Fabio Bernardoni Date: Wed, 20 Oct 2021 12:11:16 +0200 Subject: [PATCH 1115/1665] Revert "run the make check-before-commit and edited the .zenodo file" This reverts commit a57cdc28ca28861b4fa32ede55b99fe85d0b5ea8. --- .zenodo.json | 5 - Makefile | 2 +- nipype/algorithms/tests/test_auto_ACompCor.py | 72 +++- .../tests/test_auto_ActivationCount.py | 21 +- .../tests/test_auto_AddCSVColumn.py | 16 +- .../algorithms/tests/test_auto_AddCSVRow.py | 14 +- nipype/algorithms/tests/test_auto_AddNoise.py | 33 +- .../tests/test_auto_ArtifactDetect.py | 70 +++- .../tests/test_auto_CalculateMedian.py | 10 +- .../test_auto_CalculateNormalizedMoments.py | 13 +- .../tests/test_auto_ComputeDVARS.py | 78 +++- .../tests/test_auto_ComputeMeshWarp.py | 36 +- .../algorithms/tests/test_auto_CreateNifti.py | 16 +- nipype/algorithms/tests/test_auto_Distance.py | 25 +- .../tests/test_auto_FramewiseDisplacement.py | 47 ++- .../tests/test_auto_FuzzyOverlap.py | 28 +- nipype/algorithms/tests/test_auto_Gunzip.py | 13 +- nipype/algorithms/tests/test_auto_ICC.py | 21 +- .../algorithms/tests/test_auto_Matlab2CSV.py | 13 +- .../tests/test_auto_MergeCSVFiles.py | 19 +- .../algorithms/tests/test_auto_MergeROIs.py | 12 +- .../tests/test_auto_MeshWarpMaths.py | 33 +- .../tests/test_auto_ModifyAffine.py | 11 +- .../tests/test_auto_NonSteadyStateDetector.py | 11 +- .../test_auto_NormalizeProbabilityMapSet.py | 11 +- .../algorithms/tests/test_auto_P2PDistance.py | 36 +- .../algorithms/tests/test_auto_PickAtlas.py | 27 +- .../algorithms/tests/test_auto_Similarity.py | 26 +- .../tests/test_auto_SimpleThreshold.py | 13 +- .../tests/test_auto_SpecifyModel.py | 48 ++- .../tests/test_auto_SpecifySPMModel.py | 56 ++- .../tests/test_auto_SpecifySparseModel.py | 74 +++- .../algorithms/tests/test_auto_SplitROIs.py | 15 +- .../tests/test_auto_StimulusCorrelation.py | 21 +- nipype/algorithms/tests/test_auto_TCompCor.py | 76 +++- .../algorithms/tests/test_auto_WarpPoints.py | 21 +- .../afni/tests/test_auto_ABoverlap.py | 50 ++- .../afni/tests/test_auto_AFNICommand.py | 14 +- .../afni/tests/test_auto_AFNICommandBase.py | 10 +- .../afni/tests/test_auto_AFNIPythonCommand.py | 14 +- .../afni/tests/test_auto_AFNItoNIFTI.py | 44 +- .../afni/tests/test_auto_AlignEpiAnatPy.py | 103 +++-- .../afni/tests/test_auto_Allineate.py | 227 ++++++++--- .../afni/tests/test_auto_AutoTLRC.py | 29 +- .../afni/tests/test_auto_AutoTcorrelate.py | 48 ++- .../afni/tests/test_auto_Autobox.py | 31 +- .../afni/tests/test_auto_Automask.py | 41 +- .../afni/tests/test_auto_Axialize.py | 49 ++- .../afni/tests/test_auto_Bandpass.py | 91 ++++- .../afni/tests/test_auto_BlurInMask.py | 52 ++- .../afni/tests/test_auto_BlurToFWHM.py | 48 ++- .../afni/tests/test_auto_BrickStat.py | 55 ++- .../interfaces/afni/tests/test_auto_Bucket.py | 32 +- .../interfaces/afni/tests/test_auto_Calc.py | 62 ++- nipype/interfaces/afni/tests/test_auto_Cat.py | 50 ++- .../afni/tests/test_auto_CatMatvec.py | 41 +- .../afni/tests/test_auto_CenterMass.py | 48 ++- .../afni/tests/test_auto_ClipLevel.py | 38 +- .../afni/tests/test_auto_ConvertDset.py | 38 +- .../interfaces/afni/tests/test_auto_Copy.py | 30 +- .../afni/tests/test_auto_Deconvolve.py | 224 +++++++--- .../afni/tests/test_auto_DegreeCentrality.py | 58 ++- .../afni/tests/test_auto_Despike.py | 26 +- .../afni/tests/test_auto_Detrend.py | 26 +- nipype/interfaces/afni/tests/test_auto_Dot.py | 80 +++- nipype/interfaces/afni/tests/test_auto_ECM.py | 79 +++- .../interfaces/afni/tests/test_auto_Edge3.py | 52 ++- .../interfaces/afni/tests/test_auto_Eval.py | 62 ++- .../interfaces/afni/tests/test_auto_FWHMx.py | 79 +++- nipype/interfaces/afni/tests/test_auto_Fim.py | 35 +- .../afni/tests/test_auto_Fourier.py | 40 +- .../interfaces/afni/tests/test_auto_GCOR.py | 27 +- .../interfaces/afni/tests/test_auto_Hist.py | 44 +- .../interfaces/afni/tests/test_auto_LFCD.py | 47 ++- .../afni/tests/test_auto_LocalBistat.py | 60 ++- .../afni/tests/test_auto_Localstat.py | 72 +++- .../afni/tests/test_auto_MaskTool.py | 69 +++- .../afni/tests/test_auto_Maskave.py | 37 +- .../interfaces/afni/tests/test_auto_Means.py | 69 +++- .../interfaces/afni/tests/test_auto_Merge.py | 36 +- .../afni/tests/test_auto_NetCorr.py | 91 ++++- .../interfaces/afni/tests/test_auto_Notes.py | 53 ++- .../afni/tests/test_auto_NwarpAdjust.py | 29 +- .../afni/tests/test_auto_NwarpApply.py | 57 ++- .../afni/tests/test_auto_NwarpCat.py | 47 ++- .../afni/tests/test_auto_OneDToolPy.py | 65 ++- .../afni/tests/test_auto_OutlierCount.py | 69 +++- .../afni/tests/test_auto_QualityIndex.py | 59 ++- .../interfaces/afni/tests/test_auto_Qwarp.py | 241 ++++++++--- .../afni/tests/test_auto_QwarpPlusMinus.py | 241 ++++++++--- .../afni/tests/test_auto_ROIStats.py | 75 +++- .../interfaces/afni/tests/test_auto_ReHo.py | 56 ++- .../interfaces/afni/tests/test_auto_Refit.py | 88 +++- .../afni/tests/test_auto_Remlfit.py | 240 ++++++++--- .../afni/tests/test_auto_Resample.py | 37 +- .../afni/tests/test_auto_Retroicor.py | 58 ++- .../afni/tests/test_auto_SVMTest.py | 60 ++- .../afni/tests/test_auto_SVMTrain.py | 73 +++- nipype/interfaces/afni/tests/test_auto_Seg.py | 57 ++- .../afni/tests/test_auto_SkullStrip.py | 20 +- .../afni/tests/test_auto_Synthesize.py | 53 ++- .../interfaces/afni/tests/test_auto_TCat.py | 36 +- .../afni/tests/test_auto_TCatSubBrick.py | 38 +- .../afni/tests/test_auto_TCorr1D.py | 49 ++- .../afni/tests/test_auto_TCorrMap.py | 141 +++++-- .../afni/tests/test_auto_TCorrelate.py | 40 +- .../interfaces/afni/tests/test_auto_TNorm.py | 50 ++- .../afni/tests/test_auto_TProject.py | 85 +++- .../interfaces/afni/tests/test_auto_TShift.py | 73 +++- .../afni/tests/test_auto_TSmooth.py | 63 ++- .../interfaces/afni/tests/test_auto_TStat.py | 35 +- .../interfaces/afni/tests/test_auto_To3D.py | 46 ++- .../interfaces/afni/tests/test_auto_Undump.py | 59 ++- .../afni/tests/test_auto_Unifize.py | 66 ++- .../interfaces/afni/tests/test_auto_Volreg.py | 67 ++- .../interfaces/afni/tests/test_auto_Warp.py | 78 +++- .../interfaces/afni/tests/test_auto_ZCutUp.py | 30 +- .../interfaces/afni/tests/test_auto_Zcat.py | 45 ++- .../afni/tests/test_auto_Zeropad.py | 87 +++- nipype/interfaces/ants/tests/test_auto_AI.py | 87 +++- .../interfaces/ants/tests/test_auto_ANTS.py | 134 ++++-- .../ants/tests/test_auto_ANTSCommand.py | 14 +- .../ants/tests/test_auto_AffineInitializer.py | 71 +++- .../ants/tests/test_auto_ApplyTransforms.py | 72 +++- .../test_auto_ApplyTransformsToPoints.py | 35 +- .../ants/tests/test_auto_Atropos.py | 99 ++++- .../tests/test_auto_AverageAffineTransform.py | 37 +- .../ants/tests/test_auto_AverageImages.py | 44 +- .../ants/tests/test_auto_BrainExtraction.py | 141 +++++-- .../tests/test_auto_ComposeMultiTransform.py | 38 +- .../tests/test_auto_CompositeTransformUtil.py | 50 ++- .../test_auto_ConvertScalarImageToRGB.py | 83 +++- .../ants/tests/test_auto_CorticalThickness.py | 160 ++++++-- ...est_auto_CreateJacobianDeterminantImage.py | 50 ++- .../ants/tests/test_auto_CreateTiledMosaic.py | 70 +++- .../ants/tests/test_auto_DenoiseImage.py | 51 ++- .../ants/tests/test_auto_GenWarpFields.py | 85 +++- .../ants/tests/test_auto_ImageMath.py | 48 ++- .../ants/tests/test_auto_JointFusion.py | 104 +++-- .../ants/tests/test_auto_KellyKapowski.py | 76 +++- .../ants/tests/test_auto_LabelGeometry.py | 44 +- .../tests/test_auto_LaplacianThickness.py | 61 ++- .../tests/test_auto_MeasureImageSimilarity.py | 68 +++- .../ants/tests/test_auto_MultiplyImages.py | 44 +- .../tests/test_auto_N4BiasFieldCorrection.py | 88 +++- .../ants/tests/test_auto_Registration.py | 187 +++++++-- .../tests/test_auto_RegistrationSynQuick.py | 78 +++- .../tests/test_auto_ResampleImageBySpacing.py | 56 ++- .../ants/tests/test_auto_ThresholdImage.py | 73 +++- .../test_auto_WarpImageMultiTransform.py | 67 ++- ..._auto_WarpTimeSeriesImageMultiTransform.py | 67 ++- .../ants/tests/test_auto_antsIntroduction.py | 85 +++- .../tests/test_auto_buildtemplateparallel.py | 76 +++- .../base/tests/test_auto_CommandLine.py | 10 +- .../base/tests/test_auto_MpiCommandLine.py | 13 +- .../tests/test_auto_SEMLikeCommandLine.py | 10 +- .../base/tests/test_auto_StdOutCommandLine.py | 16 +- .../brainsuite/tests/test_auto_BDP.py | 180 ++++++--- .../brainsuite/tests/test_auto_Bfc.py | 127 ++++-- .../brainsuite/tests/test_auto_Bse.py | 112 +++-- .../brainsuite/tests/test_auto_Cerebro.py | 107 +++-- .../brainsuite/tests/test_auto_Cortex.py | 60 ++- .../brainsuite/tests/test_auto_Dewisp.py | 43 +- .../brainsuite/tests/test_auto_Dfs.py | 78 +++- .../brainsuite/tests/test_auto_Hemisplit.py | 74 +++- .../brainsuite/tests/test_auto_Pialmesh.py | 99 ++++- .../brainsuite/tests/test_auto_Pvc.py | 56 ++- .../brainsuite/tests/test_auto_SVReg.py | 99 +++-- .../brainsuite/tests/test_auto_Scrubmask.py | 49 ++- .../brainsuite/tests/test_auto_Skullfinder.py | 73 +++- .../brainsuite/tests/test_auto_Tca.py | 49 ++- .../tests/test_auto_ThicknessPVC.py | 14 +- .../camino/tests/test_auto_AnalyzeHeader.py | 133 ++++-- .../tests/test_auto_ComputeEigensystem.py | 47 ++- .../test_auto_ComputeFractionalAnisotropy.py | 47 ++- .../tests/test_auto_ComputeMeanDiffusivity.py | 47 ++- .../tests/test_auto_ComputeTensorTrace.py | 47 ++- .../camino/tests/test_auto_Conmat.py | 51 ++- .../camino/tests/test_auto_DT2NIfTI.py | 36 +- .../camino/tests/test_auto_DTIFit.py | 46 ++- .../camino/tests/test_auto_DTLUTGen.py | 76 +++- .../camino/tests/test_auto_DTMetric.py | 47 ++- .../camino/tests/test_auto_FSL2Scheme.py | 69 +++- .../camino/tests/test_auto_Image2Voxel.py | 35 +- .../camino/tests/test_auto_ImageStats.py | 38 +- .../camino/tests/test_auto_LinRecon.py | 56 ++- .../interfaces/camino/tests/test_auto_MESD.py | 67 ++- .../camino/tests/test_auto_ModelFit.py | 92 ++++- .../camino/tests/test_auto_NIfTIDT2Camino.py | 56 ++- .../camino/tests/test_auto_PicoPDFs.py | 60 ++- .../camino/tests/test_auto_ProcStreamlines.py | 173 ++++++-- .../camino/tests/test_auto_QBallMX.py | 53 ++- .../camino/tests/test_auto_SFLUTGen.py | 64 ++- .../camino/tests/test_auto_SFPICOCalibData.py | 86 +++- .../camino/tests/test_auto_SFPeaks.py | 87 +++- .../camino/tests/test_auto_Shredder.py | 47 ++- .../camino/tests/test_auto_Track.py | 113 ++++-- .../camino/tests/test_auto_TrackBallStick.py | 113 ++++-- .../camino/tests/test_auto_TrackBayesDirac.py | 153 +++++-- .../tests/test_auto_TrackBedpostxDeter.py | 123 ++++-- .../tests/test_auto_TrackBedpostxProba.py | 128 ++++-- .../camino/tests/test_auto_TrackBootstrap.py | 138 +++++-- .../camino/tests/test_auto_TrackDT.py | 113 ++++-- .../camino/tests/test_auto_TrackPICo.py | 122 ++++-- .../camino/tests/test_auto_TractShredder.py | 47 ++- .../camino/tests/test_auto_VtkStreamlines.py | 70 +++- .../tests/test_auto_Camino2Trackvis.py | 60 ++- .../tests/test_auto_Trackvis2Camino.py | 35 +- .../tests/test_auto_CAT12SANLMDenoising.py | 61 ++- .../cat12/tests/test_auto_CAT12Segment.py | 276 ++++++++++--- ...auto_ExtractAdditionalSurfaceParameters.py | 45 ++- ...est_auto_ExtractROIBasedSurfaceMeasures.py | 40 +- .../cmtk/tests/test_auto_AverageNetworks.py | 28 +- .../cmtk/tests/test_auto_CFFConverter.py | 19 +- .../cmtk/tests/test_auto_CreateMatrix.py | 114 ++++-- .../cmtk/tests/test_auto_CreateNodes.py | 21 +- .../cmtk/tests/test_auto_MergeCNetworks.py | 14 +- .../tests/test_auto_NetworkBasedStatistic.py | 44 +- .../cmtk/tests/test_auto_NetworkXMetrics.py | 76 +++- .../cmtk/tests/test_auto_Parcellate.py | 49 ++- .../interfaces/cmtk/tests/test_auto_ROIGen.py | 37 +- .../tests/test_auto_DTIRecon.py | 102 +++-- .../tests/test_auto_DTITracker.py | 113 ++++-- .../tests/test_auto_HARDIMat.py | 61 ++- .../tests/test_auto_ODFRecon.py | 98 ++++- .../tests/test_auto_ODFTracker.py | 128 ++++-- .../tests/test_auto_SplineFilter.py | 35 +- .../tests/test_auto_TrackMerge.py | 28 +- .../dipy/tests/test_auto_APMQball.py | 29 +- nipype/interfaces/dipy/tests/test_auto_CSD.py | 48 ++- nipype/interfaces/dipy/tests/test_auto_DTI.py | 47 ++- .../dipy/tests/test_auto_Denoise.py | 36 +- .../tests/test_auto_DipyDiffusionInterface.py | 19 +- .../tests/test_auto_EstimateResponseSH.py | 63 ++- .../dipy/tests/test_auto_RESTORE.py | 55 ++- .../dipy/tests/test_auto_Resample.py | 16 +- .../tests/test_auto_SimulateMultiTensor.py | 93 ++++- .../tests/test_auto_StreamlineTractography.py | 71 +++- .../dipy/tests/test_auto_TensorMode.py | 29 +- .../dipy/tests/test_auto_TrackDensityMap.py | 24 +- .../dtitk/tests/test_auto_AffScalarVol.py | 47 ++- .../tests/test_auto_AffSymTensor3DVol.py | 52 ++- .../dtitk/tests/test_auto_Affine.py | 52 ++- .../dtitk/tests/test_auto_AffineTask.py | 52 ++- .../dtitk/tests/test_auto_BinThresh.py | 50 ++- .../dtitk/tests/test_auto_BinThreshTask.py | 50 ++- .../dtitk/tests/test_auto_CommandLineDtitk.py | 10 +- .../dtitk/tests/test_auto_ComposeXfm.py | 33 +- .../dtitk/tests/test_auto_ComposeXfmTask.py | 33 +- .../dtitk/tests/test_auto_Diffeo.py | 56 ++- .../dtitk/tests/test_auto_DiffeoScalarVol.py | 51 ++- .../tests/test_auto_DiffeoSymTensor3DVol.py | 61 ++- .../dtitk/tests/test_auto_DiffeoTask.py | 56 ++- .../interfaces/dtitk/tests/test_auto_Rigid.py | 52 ++- .../dtitk/tests/test_auto_RigidTask.py | 52 ++- .../dtitk/tests/test_auto_SVAdjustVoxSp.py | 35 +- .../tests/test_auto_SVAdjustVoxSpTask.py | 35 +- .../dtitk/tests/test_auto_SVResample.py | 40 +- .../dtitk/tests/test_auto_SVResampleTask.py | 40 +- .../tests/test_auto_TVAdjustOriginTask.py | 35 +- .../dtitk/tests/test_auto_TVAdjustVoxSp.py | 35 +- .../tests/test_auto_TVAdjustVoxSpTask.py | 35 +- .../dtitk/tests/test_auto_TVResample.py | 44 +- .../dtitk/tests/test_auto_TVResampleTask.py | 44 +- .../dtitk/tests/test_auto_TVtool.py | 31 +- .../dtitk/tests/test_auto_TVtoolTask.py | 31 +- .../dtitk/tests/test_auto_affScalarVolTask.py | 47 ++- .../tests/test_auto_affSymTensor3DVolTask.py | 52 ++- .../tests/test_auto_diffeoScalarVolTask.py | 51 ++- .../test_auto_diffeoSymTensor3DVolTask.py | 61 ++- .../elastix/tests/test_auto_AnalyzeWarp.py | 60 ++- .../elastix/tests/test_auto_ApplyWarp.py | 39 +- .../elastix/tests/test_auto_EditTransform.py | 32 +- .../elastix/tests/test_auto_PointsWarp.py | 39 +- .../elastix/tests/test_auto_Registration.py | 57 ++- .../tests/test_auto_AddXFormToHeader.py | 44 +- .../freesurfer/tests/test_auto_Aparc2Aseg.py | 107 ++++- .../freesurfer/tests/test_auto_Apas2Aseg.py | 28 +- .../freesurfer/tests/test_auto_ApplyMask.py | 64 ++- .../tests/test_auto_ApplyVolTransform.py | 61 ++- .../freesurfer/tests/test_auto_Binarize.py | 130 ++++-- .../freesurfer/tests/test_auto_CALabel.py | 79 +++- .../freesurfer/tests/test_auto_CANormalize.py | 52 ++- .../freesurfer/tests/test_auto_CARegister.py | 70 +++- .../test_auto_CheckTalairachAlignment.py | 27 +- .../freesurfer/tests/test_auto_Concatenate.py | 88 +++- .../tests/test_auto_ConcatenateLTA.py | 53 ++- .../freesurfer/tests/test_auto_Contrast.py | 62 ++- .../freesurfer/tests/test_auto_Curvature.py | 44 +- .../tests/test_auto_CurvatureStats.py | 59 ++- .../tests/test_auto_DICOMConvert.py | 37 +- .../freesurfer/tests/test_auto_EMRegister.py | 47 ++- .../tests/test_auto_EditWMwithAseg.py | 47 ++- .../freesurfer/tests/test_auto_EulerNumber.py | 21 +- .../tests/test_auto_ExtractMainComponent.py | 22 +- .../freesurfer/tests/test_auto_FSCommand.py | 9 +- .../tests/test_auto_FSCommandOpenMP.py | 9 +- .../tests/test_auto_FSScriptCommand.py | 9 +- .../freesurfer/tests/test_auto_FitMSParams.py | 33 +- .../freesurfer/tests/test_auto_FixTopology.py | 69 +++- .../tests/test_auto_FuseSegmentations.py | 47 ++- .../freesurfer/tests/test_auto_GLMFit.py | 280 ++++++++++--- .../freesurfer/tests/test_auto_ImageInfo.py | 19 +- .../freesurfer/tests/test_auto_Jacobian.py | 29 +- .../freesurfer/tests/test_auto_LTAConvert.py | 72 +++- .../freesurfer/tests/test_auto_Label2Annot.py | 54 ++- .../freesurfer/tests/test_auto_Label2Label.py | 62 ++- .../freesurfer/tests/test_auto_Label2Vol.py | 75 +++- .../tests/test_auto_MNIBiasCorrection.py | 56 ++- .../freesurfer/tests/test_auto_MPRtoMNI305.py | 33 +- .../freesurfer/tests/test_auto_MRIConvert.py | 359 ++++++++++++---- .../freesurfer/tests/test_auto_MRICoreg.py | 138 +++++-- .../freesurfer/tests/test_auto_MRIFill.py | 47 ++- .../tests/test_auto_MRIMarchingCubes.py | 41 +- .../freesurfer/tests/test_auto_MRIPretess.py | 48 ++- .../freesurfer/tests/test_auto_MRISPreproc.py | 83 +++- .../tests/test_auto_MRISPreprocReconAll.py | 93 ++++- .../tests/test_auto_MRITessellate.py | 43 +- .../freesurfer/tests/test_auto_MRIsCALabel.py | 71 +++- .../freesurfer/tests/test_auto_MRIsCalc.py | 51 ++- .../freesurfer/tests/test_auto_MRIsCombine.py | 27 +- .../freesurfer/tests/test_auto_MRIsConvert.py | 101 ++++- .../freesurfer/tests/test_auto_MRIsExpand.py | 74 +++- .../freesurfer/tests/test_auto_MRIsInflate.py | 34 +- .../freesurfer/tests/test_auto_MS_LDA.py | 63 ++- .../tests/test_auto_MakeAverageSubject.py | 25 +- .../tests/test_auto_MakeSurfaces.py | 125 ++++-- .../freesurfer/tests/test_auto_Normalize.py | 40 +- .../tests/test_auto_OneSampleTTest.py | 280 ++++++++++--- .../freesurfer/tests/test_auto_Paint.py | 33 +- .../tests/test_auto_ParcellationStats.py | 130 ++++-- .../tests/test_auto_ParseDICOMDir.py | 34 +- .../freesurfer/tests/test_auto_ReconAll.py | 382 ++++++++++++++---- .../freesurfer/tests/test_auto_Register.py | 51 ++- .../tests/test_auto_RegisterAVItoTalairach.py | 45 ++- .../tests/test_auto_RelabelHypointensities.py | 41 +- .../tests/test_auto_RemoveIntersection.py | 21 +- .../freesurfer/tests/test_auto_RemoveNeck.py | 40 +- .../freesurfer/tests/test_auto_Resample.py | 34 +- .../tests/test_auto_RobustRegister.py | 169 ++++++-- .../tests/test_auto_RobustTemplate.py | 73 +++- .../tests/test_auto_SampleToSurface.py | 148 +++++-- .../freesurfer/tests/test_auto_SegStats.py | 158 ++++++-- .../tests/test_auto_SegStatsReconAll.py | 214 +++++++--- .../freesurfer/tests/test_auto_SegmentCC.py | 40 +- .../freesurfer/tests/test_auto_SegmentWM.py | 29 +- .../freesurfer/tests/test_auto_Smooth.py | 58 ++- .../tests/test_auto_SmoothTessellation.py | 80 +++- .../freesurfer/tests/test_auto_Sphere.py | 34 +- .../tests/test_auto_SphericalAverage.py | 71 +++- .../tests/test_auto_Surface2VolTransform.py | 53 ++- .../tests/test_auto_SurfaceSmooth.py | 56 ++- .../tests/test_auto_SurfaceSnapshots.py | 140 +++++-- .../tests/test_auto_SurfaceTransform.py | 57 ++- .../tests/test_auto_SynthesizeFLASH.py | 58 ++- .../tests/test_auto_TalairachAVI.py | 37 +- .../freesurfer/tests/test_auto_TalairachQC.py | 23 +- .../freesurfer/tests/test_auto_Tkregister2.py | 97 ++++- .../tests/test_auto_UnpackSDICOMDir.py | 44 +- .../freesurfer/tests/test_auto_VolumeMask.py | 83 +++- .../tests/test_auto_WatershedSkullStrip.py | 44 +- .../fsl/tests/test_auto_AR1Image.py | 49 ++- .../fsl/tests/test_auto_AccuracyTester.py | 36 +- .../fsl/tests/test_auto_ApplyMask.py | 50 ++- .../fsl/tests/test_auto_ApplyTOPUP.py | 48 ++- .../fsl/tests/test_auto_ApplyWarp.py | 77 +++- .../fsl/tests/test_auto_ApplyXFM.py | 227 ++++++++--- .../interfaces/fsl/tests/test_auto_AvScale.py | 25 +- .../interfaces/fsl/tests/test_auto_B0Calc.py | 83 +++- .../fsl/tests/test_auto_BEDPOSTX5.py | 141 +++++-- nipype/interfaces/fsl/tests/test_auto_BET.py | 111 +++-- .../fsl/tests/test_auto_BinaryMaths.py | 54 ++- .../fsl/tests/test_auto_ChangeDataType.py | 44 +- .../fsl/tests/test_auto_Classifier.py | 37 +- .../interfaces/fsl/tests/test_auto_Cleaner.py | 56 ++- .../interfaces/fsl/tests/test_auto_Cluster.py | 162 ++++++-- .../interfaces/fsl/tests/test_auto_Complex.py | 75 +++- .../fsl/tests/test_auto_ContrastMgr.py | 54 ++- .../fsl/tests/test_auto_ConvertWarp.py | 89 +++- .../fsl/tests/test_auto_ConvertXFM.py | 28 +- .../fsl/tests/test_auto_CopyGeom.py | 27 +- .../interfaces/fsl/tests/test_auto_DTIFit.py | 137 +++++-- .../fsl/tests/test_auto_DilateImage.py | 67 ++- .../fsl/tests/test_auto_DistanceMap.py | 41 +- .../fsl/tests/test_auto_DualRegression.py | 63 ++- .../fsl/tests/test_auto_EPIDeWarp.py | 92 ++++- nipype/interfaces/fsl/tests/test_auto_Eddy.py | 282 ++++++++++--- .../fsl/tests/test_auto_EddyCorrect.py | 29 +- .../fsl/tests/test_auto_EddyQuad.py | 82 +++- .../interfaces/fsl/tests/test_auto_EpiReg.py | 132 ++++-- .../fsl/tests/test_auto_ErodeImage.py | 67 ++- .../fsl/tests/test_auto_ExtractROI.py | 70 +++- nipype/interfaces/fsl/tests/test_auto_FAST.py | 116 ++++-- nipype/interfaces/fsl/tests/test_auto_FEAT.py | 20 +- .../fsl/tests/test_auto_FEATModel.py | 42 +- .../fsl/tests/test_auto_FEATRegister.py | 19 +- .../interfaces/fsl/tests/test_auto_FIRST.py | 55 ++- .../interfaces/fsl/tests/test_auto_FLAMEO.py | 96 ++++- .../interfaces/fsl/tests/test_auto_FLIRT.py | 226 ++++++++--- .../interfaces/fsl/tests/test_auto_FNIRT.py | 224 +++++++--- .../fsl/tests/test_auto_FSLCommand.py | 9 +- .../fsl/tests/test_auto_FSLXCommand.py | 135 +++++-- .../interfaces/fsl/tests/test_auto_FUGUE.py | 156 +++++-- .../fsl/tests/test_auto_FeatureExtractor.py | 23 +- .../fsl/tests/test_auto_FilterRegressor.py | 58 ++- .../fsl/tests/test_auto_FindTheBiggest.py | 28 +- nipype/interfaces/fsl/tests/test_auto_GLM.py | 112 +++-- .../fsl/tests/test_auto_ICA_AROMA.py | 71 +++- .../fsl/tests/test_auto_ImageMaths.py | 49 ++- .../fsl/tests/test_auto_ImageMeants.py | 63 ++- .../fsl/tests/test_auto_ImageStats.py | 42 +- .../interfaces/fsl/tests/test_auto_InvWarp.py | 57 ++- .../fsl/tests/test_auto_IsotropicSmooth.py | 57 ++- .../interfaces/fsl/tests/test_auto_L2Model.py | 18 +- .../fsl/tests/test_auto_Level1Design.py | 25 +- .../interfaces/fsl/tests/test_auto_MCFLIRT.py | 115 ++++-- .../interfaces/fsl/tests/test_auto_MELODIC.py | 219 +++++++--- .../fsl/tests/test_auto_MakeDyadicVectors.py | 49 ++- .../fsl/tests/test_auto_MathsCommand.py | 43 +- .../fsl/tests/test_auto_MaxImage.py | 49 ++- .../fsl/tests/test_auto_MaxnImage.py | 49 ++- .../fsl/tests/test_auto_MeanImage.py | 49 ++- .../fsl/tests/test_auto_MedianImage.py | 49 ++- .../interfaces/fsl/tests/test_auto_Merge.py | 32 +- .../fsl/tests/test_auto_MinImage.py | 49 ++- .../fsl/tests/test_auto_MotionOutliers.py | 48 ++- .../fsl/tests/test_auto_MultiImageMaths.py | 53 ++- .../tests/test_auto_MultipleRegressDesign.py | 24 +- .../interfaces/fsl/tests/test_auto_Overlay.py | 74 +++- .../interfaces/fsl/tests/test_auto_PRELUDE.py | 77 +++- .../fsl/tests/test_auto_PercentileImage.py | 54 ++- .../fsl/tests/test_auto_PlotMotionParams.py | 41 +- .../fsl/tests/test_auto_PlotTimeSeries.py | 88 +++- .../fsl/tests/test_auto_PowerSpectrum.py | 28 +- .../fsl/tests/test_auto_PrepareFieldmap.py | 54 ++- .../fsl/tests/test_auto_ProbTrackX.py | 178 ++++++-- .../fsl/tests/test_auto_ProbTrackX2.py | 259 +++++++++--- .../fsl/tests/test_auto_ProjThresh.py | 25 +- .../fsl/tests/test_auto_Randomise.py | 129 ++++-- .../fsl/tests/test_auto_Reorient2Std.py | 28 +- .../fsl/tests/test_auto_RobustFOV.py | 27 +- nipype/interfaces/fsl/tests/test_auto_SMM.py | 26 +- .../interfaces/fsl/tests/test_auto_SUSAN.py | 58 ++- .../interfaces/fsl/tests/test_auto_SigLoss.py | 40 +- .../interfaces/fsl/tests/test_auto_Slice.py | 24 +- .../fsl/tests/test_auto_SliceTimer.py | 57 ++- .../interfaces/fsl/tests/test_auto_Slicer.py | 87 +++- .../interfaces/fsl/tests/test_auto_Smooth.py | 22 +- .../fsl/tests/test_auto_SmoothEstimate.py | 39 +- .../fsl/tests/test_auto_SpatialFilter.py | 67 ++- .../interfaces/fsl/tests/test_auto_Split.py | 31 +- .../fsl/tests/test_auto_StdImage.py | 49 ++- .../fsl/tests/test_auto_SwapDimensions.py | 34 +- .../interfaces/fsl/tests/test_auto_TOPUP.py | 124 ++++-- .../fsl/tests/test_auto_TemporalFilter.py | 55 ++- .../fsl/tests/test_auto_Threshold.py | 57 ++- .../fsl/tests/test_auto_TractSkeleton.py | 58 ++- .../fsl/tests/test_auto_Training.py | 31 +- .../fsl/tests/test_auto_TrainingSetCreator.py | 16 +- .../fsl/tests/test_auto_UnaryMaths.py | 49 ++- .../interfaces/fsl/tests/test_auto_VecReg.py | 68 +++- .../fsl/tests/test_auto_WarpPoints.py | 56 ++- .../fsl/tests/test_auto_WarpPointsFromStd.py | 56 ++- .../fsl/tests/test_auto_WarpPointsToStd.py | 61 ++- .../fsl/tests/test_auto_WarpUtils.py | 54 ++- .../fsl/tests/test_auto_XFibres5.py | 140 +++++-- .../minc/tests/test_auto_Average.py | 99 ++++- .../interfaces/minc/tests/test_auto_BBox.py | 55 ++- .../interfaces/minc/tests/test_auto_Beast.py | 106 ++++- .../minc/tests/test_auto_BestLinReg.py | 39 +- .../minc/tests/test_auto_BigAverage.py | 46 ++- .../interfaces/minc/tests/test_auto_Blob.py | 38 +- .../interfaces/minc/tests/test_auto_Blur.py | 81 +++- .../interfaces/minc/tests/test_auto_Calc.py | 86 +++- .../minc/tests/test_auto_Convert.py | 43 +- .../interfaces/minc/tests/test_auto_Copy.py | 32 +- .../interfaces/minc/tests/test_auto_Dump.py | 64 ++- .../minc/tests/test_auto_Extract.py | 75 +++- .../minc/tests/test_auto_Gennlxfm.py | 38 +- .../interfaces/minc/tests/test_auto_Math.py | 182 ++++++--- .../interfaces/minc/tests/test_auto_NlpFit.py | 64 ++- .../interfaces/minc/tests/test_auto_Norm.py | 74 +++- nipype/interfaces/minc/tests/test_auto_Pik.py | 112 +++-- .../minc/tests/test_auto_Resample.py | 126 ++++-- .../minc/tests/test_auto_Reshape.py | 35 +- .../interfaces/minc/tests/test_auto_ToEcat.py | 54 ++- .../interfaces/minc/tests/test_auto_ToRaw.py | 53 ++- .../minc/tests/test_auto_VolSymm.py | 66 ++- .../minc/tests/test_auto_Volcentre.py | 43 +- .../interfaces/minc/tests/test_auto_Voliso.py | 43 +- .../interfaces/minc/tests/test_auto_Volpad.py | 51 ++- .../interfaces/minc/tests/test_auto_XfmAvg.py | 55 ++- .../minc/tests/test_auto_XfmConcat.py | 32 +- .../minc/tests/test_auto_XfmInvert.py | 39 +- .../test_auto_JistBrainMgdmSegmentation.py | 132 ++++-- ...est_auto_JistBrainMp2rageDuraEstimation.py | 55 ++- ...est_auto_JistBrainMp2rageSkullStripping.py | 86 +++- .../test_auto_JistBrainPartialVolumeFilter.py | 50 ++- ...est_auto_JistCortexSurfaceMeshInflation.py | 72 +++- .../test_auto_JistIntensityMp2rageMasking.py | 89 +++- .../test_auto_JistLaminarProfileCalculator.py | 51 ++- .../test_auto_JistLaminarProfileGeometry.py | 58 ++- .../test_auto_JistLaminarProfileSampling.py | 58 ++- .../test_auto_JistLaminarROIAveraging.py | 56 ++- ...test_auto_JistLaminarVolumetricLayering.py | 99 +++-- ...test_auto_MedicAlgorithmImageCalculator.py | 51 ++- .../test_auto_MedicAlgorithmLesionToads.py | 197 ++++++--- .../test_auto_MedicAlgorithmMipavReorient.py | 69 +++- .../mipav/tests/test_auto_MedicAlgorithmN3.py | 80 +++- .../test_auto_MedicAlgorithmSPECTRE2010.py | 245 ++++++++--- ...uto_MedicAlgorithmThresholdToBinaryMask.py | 48 ++- .../mipav/tests/test_auto_RandomVol.py | 73 +++- .../mne/tests/test_auto_WatershedBEM.py | 64 ++- ..._auto_ConstrainedSphericalDeconvolution.py | 85 +++- .../test_auto_DWI2SphericalHarmonicsImage.py | 43 +- .../mrtrix/tests/test_auto_DWI2Tensor.py | 54 ++- ...est_auto_DiffusionTensorStreamlineTrack.py | 108 +++-- .../tests/test_auto_Directions2Amplitude.py | 48 ++- .../mrtrix/tests/test_auto_Erode.py | 48 ++- .../tests/test_auto_EstimateResponseForSH.py | 59 ++- .../mrtrix/tests/test_auto_FSL2MRTrix.py | 33 +- .../mrtrix/tests/test_auto_FilterTracks.py | 53 ++- .../mrtrix/tests/test_auto_FindShPeaks.py | 59 ++- .../tests/test_auto_GenerateDirections.py | 41 +- .../test_auto_GenerateWhiteMatterMask.py | 43 +- .../mrtrix/tests/test_auto_MRConvert.py | 83 +++- .../mrtrix/tests/test_auto_MRMultiply.py | 38 +- .../mrtrix/tests/test_auto_MRTransform.py | 77 +++- .../mrtrix/tests/test_auto_MRTrix2TrackVis.py | 29 +- .../mrtrix/tests/test_auto_MRTrixInfo.py | 16 +- .../mrtrix/tests/test_auto_MRTrixViewer.py | 25 +- .../mrtrix/tests/test_auto_MedianFilter3D.py | 39 +- ...cSphericallyDeconvolutedStreamlineTrack.py | 107 +++-- ..._SphericallyDeconvolutedStreamlineTrack.py | 103 ++++- .../mrtrix/tests/test_auto_StreamlineTrack.py | 103 ++++- .../test_auto_Tensor2ApparentDiffusion.py | 39 +- .../test_auto_Tensor2FractionalAnisotropy.py | 39 +- .../mrtrix/tests/test_auto_Tensor2Vector.py | 39 +- .../mrtrix/tests/test_auto_Threshold.py | 57 ++- .../mrtrix/tests/test_auto_Tracks2Prob.py | 62 ++- .../mrtrix3/tests/test_auto_ACTPrepareFSL.py | 28 +- .../mrtrix3/tests/test_auto_BrainMask.py | 57 ++- .../tests/test_auto_BuildConnectome.py | 77 +++- .../mrtrix3/tests/test_auto_ComputeTDI.py | 103 ++++- ..._auto_ConstrainedSphericalDeconvolution.py | 132 ++++-- .../mrtrix3/tests/test_auto_DWIBiasCorrect.py | 78 +++- .../mrtrix3/tests/test_auto_DWIDenoise.py | 64 ++- .../mrtrix3/tests/test_auto_DWIExtract.py | 75 +++- .../mrtrix3/tests/test_auto_DWIPreproc.py | 119 ++++-- .../mrtrix3/tests/test_auto_EstimateFOD.py | 135 +++++-- .../mrtrix3/tests/test_auto_FitTensor.py | 77 +++- .../mrtrix3/tests/test_auto_Generate5tt.py | 64 ++- .../mrtrix3/tests/test_auto_LabelConfig.py | 64 ++- .../mrtrix3/tests/test_auto_LabelConvert.py | 51 ++- .../mrtrix3/tests/test_auto_MRConvert.py | 77 +++- .../mrtrix3/tests/test_auto_MRDeGibbs.py | 74 +++- .../mrtrix3/tests/test_auto_MRMath.py | 68 +++- .../mrtrix3/tests/test_auto_MRResize.py | 68 +++- .../mrtrix3/tests/test_auto_MRTrix3Base.py | 10 +- .../mrtrix3/tests/test_auto_Mesh2PVE.py | 40 +- .../tests/test_auto_ReplaceFSwithFIRST.py | 41 +- .../mrtrix3/tests/test_auto_ResponseSD.py | 101 ++++- .../mrtrix3/tests/test_auto_SH2Amp.py | 33 +- .../mrtrix3/tests/test_auto_SHConv.py | 29 +- .../mrtrix3/tests/test_auto_TCK2VTK.py | 44 +- .../mrtrix3/tests/test_auto_TensorMetrics.py | 67 ++- .../mrtrix3/tests/test_auto_Tractography.py | 212 +++++++--- .../niftyfit/tests/test_auto_DwiTool.py | 75 +++- .../niftyfit/tests/test_auto_FitAsl.py | 166 ++++++-- .../niftyfit/tests/test_auto_FitDwi.py | 179 ++++++-- .../niftyfit/tests/test_auto_FitQt1.py | 164 ++++++-- .../tests/test_auto_NiftyFitCommand.py | 10 +- .../tests/test_auto_NiftyRegCommand.py | 14 +- .../niftyreg/tests/test_auto_RegAladin.py | 125 ++++-- .../niftyreg/tests/test_auto_RegAverage.py | 32 +- .../niftyreg/tests/test_auto_RegF3D.py | 230 ++++++++--- .../niftyreg/tests/test_auto_RegJacobian.py | 37 +- .../niftyreg/tests/test_auto_RegMeasure.py | 37 +- .../niftyreg/tests/test_auto_RegResample.py | 67 ++- .../niftyreg/tests/test_auto_RegTools.py | 88 +++- .../niftyreg/tests/test_auto_RegTransform.py | 49 ++- .../niftyseg/tests/test_auto_BinaryMaths.py | 33 +- .../tests/test_auto_BinaryMathsInteger.py | 39 +- .../niftyseg/tests/test_auto_BinaryStats.py | 42 +- .../niftyseg/tests/test_auto_CalcTopNCC.py | 43 +- .../interfaces/niftyseg/tests/test_auto_EM.py | 79 +++- .../niftyseg/tests/test_auto_FillLesions.py | 78 +++- .../niftyseg/tests/test_auto_LabelFusion.py | 88 +++- .../niftyseg/tests/test_auto_MathsCommand.py | 27 +- .../niftyseg/tests/test_auto_Merge.py | 37 +- .../tests/test_auto_NiftySegCommand.py | 10 +- .../niftyseg/tests/test_auto_PatchMatch.py | 54 ++- .../niftyseg/tests/test_auto_StatsCommand.py | 31 +- .../niftyseg/tests/test_auto_TupleMaths.py | 43 +- .../niftyseg/tests/test_auto_UnaryMaths.py | 33 +- .../niftyseg/tests/test_auto_UnaryStats.py | 37 +- .../nipy/tests/test_auto_ComputeMask.py | 15 +- .../nipy/tests/test_auto_EstimateContrast.py | 44 +- .../interfaces/nipy/tests/test_auto_FitGLM.py | 56 ++- .../nipy/tests/test_auto_Similarity.py | 26 +- .../tests/test_auto_SpaceTimeRealigner.py | 18 +- .../interfaces/nipy/tests/test_auto_Trim.py | 27 +- .../tests/test_auto_CoherenceAnalyzer.py | 45 ++- ...t_auto_BRAINSPosteriorToContinuousClass.py | 55 ++- .../brains/tests/test_auto_BRAINSTalairach.py | 69 +++- .../tests/test_auto_BRAINSTalairachMask.py | 43 +- .../tests/test_auto_GenerateEdgeMapImage.py | 54 ++- .../tests/test_auto_GeneratePurePlugMask.py | 33 +- .../test_auto_HistogramMatchingFilter.py | 58 ++- .../brains/tests/test_auto_SimilarityIndex.py | 28 +- .../diffusion/tests/test_auto_DWIConvert.py | 106 +++-- .../tests/test_auto_compareTractInclusion.py | 43 +- .../diffusion/tests/test_auto_dtiaverage.py | 32 +- .../diffusion/tests/test_auto_dtiestim.py | 105 +++-- .../diffusion/tests/test_auto_dtiprocess.py | 202 ++++++--- .../tests/test_auto_extractNrrdVectorIndex.py | 37 +- .../tests/test_auto_gtractAnisotropyMap.py | 33 +- .../tests/test_auto_gtractAverageBvalues.py | 37 +- .../tests/test_auto_gtractClipAnisotropy.py | 37 +- .../tests/test_auto_gtractCoRegAnatomy.py | 116 ++++-- .../tests/test_auto_gtractConcatDwi.py | 32 +- .../test_auto_gtractCopyImageOrientation.py | 34 +- .../tests/test_auto_gtractCoregBvalues.py | 84 +++- .../tests/test_auto_gtractCostFastMarching.py | 59 ++- .../tests/test_auto_gtractCreateGuideFiber.py | 37 +- .../test_auto_gtractFastMarchingTracking.py | 74 +++- .../tests/test_auto_gtractFiberTracking.py | 129 ++++-- .../tests/test_auto_gtractImageConformity.py | 34 +- .../test_auto_gtractInvertBSplineTransform.py | 39 +- ...test_auto_gtractInvertDisplacementField.py | 38 +- .../test_auto_gtractInvertRigidTransform.py | 29 +- .../test_auto_gtractResampleAnisotropy.py | 39 +- .../tests/test_auto_gtractResampleB0.py | 47 ++- .../test_auto_gtractResampleCodeImage.py | 43 +- .../test_auto_gtractResampleDWIInPlace.py | 59 ++- .../tests/test_auto_gtractResampleFibers.py | 39 +- .../diffusion/tests/test_auto_gtractTensor.py | 68 +++- ...auto_gtractTransformToDisplacementField.py | 32 +- .../diffusion/tests/test_auto_maxcurvature.py | 33 +- .../tests/test_auto_UKFTractography.py | 152 +++++-- .../tests/test_auto_fiberprocess.py | 78 +++- .../tests/test_auto_fiberstats.py | 18 +- .../tests/test_auto_fibertrack.py | 70 +++- .../filtering/tests/test_auto_CannyEdge.py | 37 +- ...to_CannySegmentationLevelSetImageFilter.py | 56 ++- .../filtering/tests/test_auto_DilateImage.py | 34 +- .../filtering/tests/test_auto_DilateMask.py | 38 +- .../filtering/tests/test_auto_DistanceMaps.py | 34 +- .../test_auto_DumpBinaryTrainingVectors.py | 19 +- .../filtering/tests/test_auto_ErodeImage.py | 34 +- .../tests/test_auto_FlippedDifference.py | 30 +- .../test_auto_GenerateBrainClippedImage.py | 34 +- .../test_auto_GenerateSummedGradientImage.py | 38 +- .../tests/test_auto_GenerateTestImage.py | 37 +- ...GradientAnisotropicDiffusionImageFilter.py | 37 +- .../tests/test_auto_HammerAttributeCreator.py | 36 +- .../tests/test_auto_NeighborhoodMean.py | 34 +- .../tests/test_auto_NeighborhoodMedian.py | 34 +- .../tests/test_auto_STAPLEAnalysis.py | 28 +- .../test_auto_TextureFromNoiseImageFilter.py | 29 +- .../tests/test_auto_TextureMeasureFilter.py | 38 +- .../tests/test_auto_UnbiasedNonLocalMeans.py | 50 ++- .../legacy/tests/test_auto_scalartransform.py | 48 ++- .../tests/test_auto_BRAINSDemonWarp.py | 191 ++++++--- .../registration/tests/test_auto_BRAINSFit.py | 321 +++++++++++---- .../tests/test_auto_BRAINSResample.py | 65 ++- .../tests/test_auto_BRAINSResize.py | 33 +- .../test_auto_BRAINSTransformFromFiducials.py | 46 ++- .../tests/test_auto_VBRAINSDemonWarp.py | 194 ++++++--- .../segmentation/tests/test_auto_BRAINSABC.py | 169 ++++++-- .../test_auto_BRAINSConstellationDetector.py | 217 +++++++--- ...BRAINSCreateLabelMapFromProbabilityMaps.py | 49 ++- .../segmentation/tests/test_auto_BRAINSCut.py | 79 +++- .../tests/test_auto_BRAINSMultiSTAPLE.py | 48 ++- .../tests/test_auto_BRAINSROIAuto.py | 63 ++- ...t_auto_BinaryMaskEditorBasedOnLandmarks.py | 42 +- .../segmentation/tests/test_auto_ESLR.py | 53 ++- .../semtools/tests/test_auto_DWICompare.py | 19 +- .../tests/test_auto_DWISimpleCompare.py | 23 +- ...o_GenerateCsfClippedFromClassifiedImage.py | 25 +- .../tests/test_auto_BRAINSAlignMSP.py | 66 ++- .../tests/test_auto_BRAINSClipInferior.py | 37 +- .../test_auto_BRAINSConstellationModeler.py | 68 +++- .../tests/test_auto_BRAINSEyeDetector.py | 33 +- ...est_auto_BRAINSInitializedControlPoints.py | 43 +- .../test_auto_BRAINSLandmarkInitializer.py | 29 +- .../test_auto_BRAINSLinearModelerEPCA.py | 18 +- .../tests/test_auto_BRAINSLmkTransform.py | 47 ++- .../utilities/tests/test_auto_BRAINSMush.py | 94 ++++- .../tests/test_auto_BRAINSSnapShotWriter.py | 42 +- .../tests/test_auto_BRAINSTransformConvert.py | 44 +- ...st_auto_BRAINSTrimForegroundInDirection.py | 49 ++- .../tests/test_auto_CleanUpOverlapLabels.py | 20 +- .../tests/test_auto_FindCenterOfBrain.py | 103 +++-- ...auto_GenerateLabelMapFromProbabilityMap.py | 28 +- .../tests/test_auto_ImageRegionPlotter.py | 49 ++- .../tests/test_auto_JointHistogram.py | 33 +- .../tests/test_auto_ShuffleVectorsModule.py | 25 +- .../utilities/tests/test_auto_fcsv_to_hdf5.py | 41 +- .../tests/test_auto_insertMidACPCpoint.py | 25 +- ...test_auto_landmarksConstellationAligner.py | 23 +- ...test_auto_landmarksConstellationWeights.py | 35 +- .../diffusion/tests/test_auto_DTIexport.py | 28 +- .../diffusion/tests/test_auto_DTIimport.py | 32 +- .../test_auto_DWIJointRicianLMMSEFilter.py | 46 ++- .../tests/test_auto_DWIRicianLMMSEFilter.py | 70 +++- .../tests/test_auto_DWIToDTIEstimation.py | 50 ++- ..._auto_DiffusionTensorScalarMeasurements.py | 32 +- ...est_auto_DiffusionWeightedVolumeMasking.py | 45 ++- .../tests/test_auto_ResampleDTIVolume.py | 131 ++++-- .../test_auto_TractographyLabelMapSeeding.py | 89 +++- .../tests/test_auto_AddScalarVolumes.py | 38 +- .../tests/test_auto_CastScalarVolume.py | 32 +- .../tests/test_auto_CheckerBoardFilter.py | 39 +- ...test_auto_CurvatureAnisotropicDiffusion.py | 40 +- .../tests/test_auto_ExtractSkeleton.py | 44 +- .../test_auto_GaussianBlurImageFilter.py | 32 +- .../test_auto_GradientAnisotropicDiffusion.py | 40 +- .../test_auto_GrayscaleFillHoleImageFilter.py | 28 +- ...test_auto_GrayscaleGrindPeakImageFilter.py | 28 +- .../tests/test_auto_HistogramMatching.py | 46 ++- .../tests/test_auto_ImageLabelCombine.py | 38 +- .../tests/test_auto_MaskScalarVolume.py | 42 +- .../tests/test_auto_MedianImageFilter.py | 33 +- .../tests/test_auto_MultiplyScalarVolumes.py | 38 +- .../test_auto_N4ITKBiasFieldCorrection.py | 72 +++- ...test_auto_ResampleScalarVectorDWIVolume.py | 123 ++++-- .../tests/test_auto_SubtractScalarVolumes.py | 38 +- .../tests/test_auto_ThresholdScalarVolume.py | 48 ++- ...auto_VotingBinaryHoleFillingImageFilter.py | 45 ++- ...est_auto_DWIUnbiasedNonLocalMeansFilter.py | 51 ++- .../tests/test_auto_AffineRegistration.py | 66 ++- ...test_auto_BSplineDeformableRegistration.py | 79 +++- .../test_auto_BSplineToDeformationField.py | 30 +- .../test_auto_ExpertAutomatedRegistration.py | 136 +++++-- .../tests/test_auto_LinearRegistration.py | 72 +++- ..._auto_MultiResolutionAffineRegistration.py | 67 ++- .../test_auto_OtsuThresholdImageFilter.py | 40 +- .../test_auto_OtsuThresholdSegmentation.py | 44 +- .../tests/test_auto_ResampleScalarVolume.py | 37 +- .../tests/test_auto_RigidRegistration.py | 76 +++- .../test_auto_IntensityDifferenceMetric.py | 55 ++- ..._auto_PETStandardUptakeValueComputation.py | 59 ++- .../tests/test_auto_ACPCTransform.py | 32 +- .../tests/test_auto_BRAINSDemonWarp.py | 191 ++++++--- .../registration/tests/test_auto_BRAINSFit.py | 293 ++++++++++---- .../tests/test_auto_BRAINSResample.py | 65 ++- .../tests/test_auto_FiducialRegistration.py | 40 +- .../tests/test_auto_VBRAINSDemonWarp.py | 194 ++++++--- .../tests/test_auto_BRAINSROIAuto.py | 54 ++- .../tests/test_auto_EMSegmentCommandLine.py | 106 +++-- .../test_auto_RobustStatisticsSegmenter.py | 54 ++- ...st_auto_SimpleRegionGrowingSegmentation.py | 56 ++- .../tests/test_auto_DicomToNrrdConverter.py | 42 +- ...test_auto_EMSegmentTransformToNewFormat.py | 29 +- .../tests/test_auto_GrayscaleModelMaker.py | 52 ++- .../tests/test_auto_LabelMapSmoothing.py | 44 +- .../slicer/tests/test_auto_MergeModels.py | 34 +- .../slicer/tests/test_auto_ModelMaker.py | 90 ++++- .../slicer/tests/test_auto_ModelToLabelMap.py | 38 +- .../tests/test_auto_OrientScalarVolume.py | 32 +- .../tests/test_auto_ProbeVolumeWithModel.py | 34 +- .../tests/test_auto_SlicerCommandLine.py | 10 +- nipype/interfaces/spm/preprocess.py | 1 + .../spm/tests/test_auto_Analyze2nii.py | 27 +- .../spm/tests/test_auto_ApplyDeformations.py | 28 +- .../test_auto_ApplyInverseDeformation.py | 39 +- .../spm/tests/test_auto_ApplyTransform.py | 31 +- .../spm/tests/test_auto_CalcCoregAffine.py | 37 +- .../spm/tests/test_auto_Coregister.py | 62 ++- .../spm/tests/test_auto_CreateWarped.py | 35 +- .../interfaces/spm/tests/test_auto_DARTEL.py | 36 +- .../spm/tests/test_auto_DARTELNorm2MNI.py | 39 +- .../spm/tests/test_auto_DicomImport.py | 38 +- .../spm/tests/test_auto_EstimateContrast.py | 41 +- .../spm/tests/test_auto_EstimateModel.py | 43 +- .../spm/tests/test_auto_FactorialDesign.py | 46 ++- .../spm/tests/test_auto_FieldMap.py | 142 +++++-- .../spm/tests/test_auto_Level1Design.py | 72 +++- .../tests/test_auto_MultiChannelNewSegment.py | 33 +- .../test_auto_MultipleRegressionDesign.py | 60 ++- .../spm/tests/test_auto_NewSegment.py | 39 +- .../spm/tests/test_auto_Normalize.py | 84 +++- .../spm/tests/test_auto_Normalize12.py | 63 ++- .../tests/test_auto_OneSampleTTestDesign.py | 51 ++- .../spm/tests/test_auto_PairedTTestDesign.py | 59 ++- .../interfaces/spm/tests/test_auto_Realign.py | 76 +++- .../spm/tests/test_auto_RealignUnwarp.py | 121 ++++-- .../interfaces/spm/tests/test_auto_Reslice.py | 33 +- .../spm/tests/test_auto_ResliceToReference.py | 35 +- .../spm/tests/test_auto_SPMCommand.py | 9 +- .../interfaces/spm/tests/test_auto_Segment.py | 126 ++++-- .../spm/tests/test_auto_SliceTiming.py | 49 ++- .../interfaces/spm/tests/test_auto_Smooth.py | 36 +- .../spm/tests/test_auto_Threshold.py | 61 ++- .../tests/test_auto_ThresholdStatistics.py | 33 +- .../tests/test_auto_TwoSampleTTestDesign.py | 64 ++- .../spm/tests/test_auto_VBMSegment.py | 150 +++++-- .../tests/test_auto_BIDSDataGrabber.py | 13 +- nipype/interfaces/tests/test_auto_Bru2.py | 42 +- nipype/interfaces/tests/test_auto_C3d.py | 66 ++- .../tests/test_auto_C3dAffineTool.py | 44 +- nipype/interfaces/tests/test_auto_CopyMeta.py | 16 +- .../interfaces/tests/test_auto_DataFinder.py | 12 +- .../interfaces/tests/test_auto_DataGrabber.py | 16 +- nipype/interfaces/tests/test_auto_DataSink.py | 16 +- nipype/interfaces/tests/test_auto_Dcm2nii.py | 97 ++++- nipype/interfaces/tests/test_auto_Dcm2niix.py | 102 ++++- nipype/interfaces/tests/test_auto_DcmStack.py | 18 +- .../interfaces/tests/test_auto_ExportFile.py | 20 +- .../tests/test_auto_FreeSurferSource.py | 186 +++++++-- .../tests/test_auto_GroupAndStack.py | 16 +- .../tests/test_auto_JSONFileGrabber.py | 7 +- .../tests/test_auto_JSONFileSink.py | 18 +- .../interfaces/tests/test_auto_LookupMeta.py | 8 +- .../tests/test_auto_MatlabCommand.py | 59 ++- .../interfaces/tests/test_auto_MergeNifti.py | 14 +- nipype/interfaces/tests/test_auto_MeshFix.py | 127 ++++-- .../interfaces/tests/test_auto_MySQLSink.py | 14 +- nipype/interfaces/tests/test_auto_PETPVC.py | 79 +++- .../interfaces/tests/test_auto_Quickshear.py | 34 +- nipype/interfaces/tests/test_auto_RCommand.py | 24 +- nipype/interfaces/tests/test_auto_Reorient.py | 17 +- nipype/interfaces/tests/test_auto_Rescale.py | 20 +- .../tests/test_auto_S3DataGrabber.py | 28 +- .../interfaces/tests/test_auto_SQLiteSink.py | 9 +- .../tests/test_auto_SSHDataGrabber.py | 36 +- .../interfaces/tests/test_auto_SelectFiles.py | 12 +- .../tests/test_auto_SignalExtraction.py | 36 +- .../tests/test_auto_SlicerCommandLine.py | 9 +- .../interfaces/tests/test_auto_SplitNifti.py | 13 +- nipype/interfaces/tests/test_auto_XNATSink.py | 40 +- .../interfaces/tests/test_auto_XNATSource.py | 20 +- .../utility/tests/test_auto_AssertEqual.py | 10 +- .../utility/tests/test_auto_CSVReader.py | 8 +- .../utility/tests/test_auto_Function.py | 6 +- .../utility/tests/test_auto_Merge.py | 16 +- .../utility/tests/test_auto_Rename.py | 19 +- .../utility/tests/test_auto_Select.py | 13 +- .../utility/tests/test_auto_Split.py | 12 +- .../vista/tests/test_auto_Vnifti2Image.py | 28 +- .../vista/tests/test_auto_VtoMat.py | 22 +- .../workbench/tests/test_auto_CiftiSmooth.py | 85 +++- .../tests/test_auto_MetricResample.py | 85 +++- .../workbench/tests/test_auto_WBCommand.py | 10 +- 845 files changed, 41133 insertions(+), 11092 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index e152cb308f..0b2d0f65fb 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -313,11 +313,6 @@ "name": "Geisler, Daniel", "orcid": "0000-0003-2076-5329" }, - { - "affiliation": "Division of Psychological and Social Medicine and Developmental Neuroscience, Faculty of Medicine, Technische Universit\u00e4t Dresden, Dresden, Germany", - "name": "Bernardoni, Fabio", - "orcid": "0000-0002-5112-405X" - }, { "name": "Salvatore, John" }, diff --git a/Makefile b/Makefile index 75bc40924f..03c1152053 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ # Files are then pushed to sourceforge using rsync with a command like this: # rsync -e ssh nipype-0.1-py2.5.egg cburns,nipy@frs.sourceforge.net:/home/frs/project/n/ni/nipy/nipype/nipype-0.1/ -PYTHON ?= python3 +PYTHON ?= python .PHONY: zipdoc sdist egg upload_to_pypi trailing-spaces clean-pyc clean-so clean-build clean-ctags clean in inplace test-code test-coverage test html specs check-before-commit check gen-base-dockerfile gen-main-dockerfile gen-dockerfiles diff --git a/nipype/algorithms/tests/test_auto_ACompCor.py b/nipype/algorithms/tests/test_auto_ACompCor.py index 2ad9bf459f..814aa71704 100644 --- a/nipype/algorithms/tests/test_auto_ACompCor.py +++ b/nipype/algorithms/tests/test_auto_ACompCor.py @@ -4,24 +4,56 @@ def test_ACompCor_inputs(): input_map = dict( - components_file=dict(usedefault=True), - failure_mode=dict(usedefault=True), + components_file=dict( + usedefault=True, + ), + failure_mode=dict( + usedefault=True, + ), header_prefix=dict(), - high_pass_cutoff=dict(usedefault=True), - ignore_initial_volumes=dict(usedefault=True), + high_pass_cutoff=dict( + usedefault=True, + ), + ignore_initial_volumes=dict( + usedefault=True, + ), mask_files=dict(), - mask_index=dict(requires=["mask_files"], xor=["merge_method"]), + mask_index=dict( + requires=["mask_files"], + xor=["merge_method"], + ), mask_names=dict(), - merge_method=dict(requires=["mask_files"], xor=["mask_index"]), - num_components=dict(xor=["variance_threshold"]), - pre_filter=dict(usedefault=True), - realigned_file=dict(extensions=None, mandatory=True), - regress_poly_degree=dict(usedefault=True), + merge_method=dict( + requires=["mask_files"], + xor=["mask_index"], + ), + num_components=dict( + xor=["variance_threshold"], + ), + pre_filter=dict( + usedefault=True, + ), + realigned_file=dict( + extensions=None, + mandatory=True, + ), + regress_poly_degree=dict( + usedefault=True, + ), repetition_time=dict(), - save_metadata=dict(usedefault=True), - save_pre_filter=dict(usedefault=True), - use_regress_poly=dict(deprecated="0.15.0", new_name="pre_filter"), - variance_threshold=dict(xor=["num_components"]), + save_metadata=dict( + usedefault=True, + ), + save_pre_filter=dict( + usedefault=True, + ), + use_regress_poly=dict( + deprecated="0.15.0", + new_name="pre_filter", + ), + variance_threshold=dict( + xor=["num_components"], + ), ) inputs = ACompCor.input_spec() @@ -32,9 +64,15 @@ def test_ACompCor_inputs(): def test_ACompCor_outputs(): output_map = dict( - components_file=dict(extensions=None), - metadata_file=dict(extensions=None), - pre_filter_file=dict(extensions=None), + components_file=dict( + extensions=None, + ), + metadata_file=dict( + extensions=None, + ), + pre_filter_file=dict( + extensions=None, + ), ) outputs = ACompCor.output_spec() diff --git a/nipype/algorithms/tests/test_auto_ActivationCount.py b/nipype/algorithms/tests/test_auto_ActivationCount.py index 6492d4d9d7..7df84ee122 100644 --- a/nipype/algorithms/tests/test_auto_ActivationCount.py +++ b/nipype/algorithms/tests/test_auto_ActivationCount.py @@ -3,7 +3,14 @@ def test_ActivationCount_inputs(): - input_map = dict(in_files=dict(mandatory=True), threshold=dict(mandatory=True)) + input_map = dict( + in_files=dict( + mandatory=True, + ), + threshold=dict( + mandatory=True, + ), + ) inputs = ActivationCount.input_spec() for key, metadata in list(input_map.items()): @@ -13,9 +20,15 @@ def test_ActivationCount_inputs(): def test_ActivationCount_outputs(): output_map = dict( - acm_neg=dict(extensions=None), - acm_pos=dict(extensions=None), - out_file=dict(extensions=None), + acm_neg=dict( + extensions=None, + ), + acm_pos=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = ActivationCount.output_spec() diff --git a/nipype/algorithms/tests/test_auto_AddCSVColumn.py b/nipype/algorithms/tests/test_auto_AddCSVColumn.py index 1afe128aa4..b76fd46457 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVColumn.py +++ b/nipype/algorithms/tests/test_auto_AddCSVColumn.py @@ -6,8 +6,14 @@ def test_AddCSVColumn_inputs(): input_map = dict( extra_column_heading=dict(), extra_field=dict(), - in_file=dict(extensions=None, mandatory=True), - out_file=dict(extensions=None, usedefault=True), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), ) inputs = AddCSVColumn.input_spec() @@ -17,7 +23,11 @@ def test_AddCSVColumn_inputs(): def test_AddCSVColumn_outputs(): - output_map = dict(csv_file=dict(extensions=None)) + output_map = dict( + csv_file=dict( + extensions=None, + ), + ) outputs = AddCSVColumn.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_AddCSVRow.py b/nipype/algorithms/tests/test_auto_AddCSVRow.py index 5806842dc3..78976f418d 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVRow.py +++ b/nipype/algorithms/tests/test_auto_AddCSVRow.py @@ -4,7 +4,13 @@ def test_AddCSVRow_inputs(): input_map = dict( - _outputs=dict(usedefault=True), in_file=dict(extensions=None, mandatory=True) + _outputs=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = AddCSVRow.input_spec() @@ -14,7 +20,11 @@ def test_AddCSVRow_inputs(): def test_AddCSVRow_outputs(): - output_map = dict(csv_file=dict(extensions=None)) + output_map = dict( + csv_file=dict( + extensions=None, + ), + ) outputs = AddCSVRow.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_AddNoise.py b/nipype/algorithms/tests/test_auto_AddNoise.py index 5a44d2eaa7..5cf92e33f7 100644 --- a/nipype/algorithms/tests/test_auto_AddNoise.py +++ b/nipype/algorithms/tests/test_auto_AddNoise.py @@ -4,12 +4,27 @@ def test_AddNoise_inputs(): input_map = dict( - bg_dist=dict(mandatory=True, usedefault=True), - dist=dict(mandatory=True, usedefault=True), - in_file=dict(extensions=None, mandatory=True), - in_mask=dict(extensions=None), - out_file=dict(extensions=None), - snr=dict(usedefault=True), + bg_dist=dict( + mandatory=True, + usedefault=True, + ), + dist=dict( + mandatory=True, + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + snr=dict( + usedefault=True, + ), ) inputs = AddNoise.input_spec() @@ -19,7 +34,11 @@ def test_AddNoise_inputs(): def test_AddNoise_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AddNoise.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ArtifactDetect.py b/nipype/algorithms/tests/test_auto_ArtifactDetect.py index ed7ff3808c..51010aea3a 100644 --- a/nipype/algorithms/tests/test_auto_ArtifactDetect.py +++ b/nipype/algorithms/tests/test_auto_ArtifactDetect.py @@ -4,25 +4,61 @@ def test_ArtifactDetect_inputs(): input_map = dict( - bound_by_brainmask=dict(usedefault=True), - global_threshold=dict(usedefault=True), - intersect_mask=dict(usedefault=True), - mask_file=dict(extensions=None), + bound_by_brainmask=dict( + usedefault=True, + ), + global_threshold=dict( + usedefault=True, + ), + intersect_mask=dict( + usedefault=True, + ), + mask_file=dict( + extensions=None, + ), mask_threshold=dict(), - mask_type=dict(mandatory=True), + mask_type=dict( + mandatory=True, + ), norm_threshold=dict( - mandatory=True, xor=["rotation_threshold", "translation_threshold"] - ), - parameter_source=dict(mandatory=True), - plot_type=dict(usedefault=True), - realigned_files=dict(mandatory=True), - realignment_parameters=dict(mandatory=True), - rotation_threshold=dict(mandatory=True, xor=["norm_threshold"]), - save_plot=dict(usedefault=True), - translation_threshold=dict(mandatory=True, xor=["norm_threshold"]), - use_differences=dict(maxlen=2, minlen=2, usedefault=True), - use_norm=dict(requires=["norm_threshold"], usedefault=True), - zintensity_threshold=dict(mandatory=True), + mandatory=True, + xor=["rotation_threshold", "translation_threshold"], + ), + parameter_source=dict( + mandatory=True, + ), + plot_type=dict( + usedefault=True, + ), + realigned_files=dict( + mandatory=True, + ), + realignment_parameters=dict( + mandatory=True, + ), + rotation_threshold=dict( + mandatory=True, + xor=["norm_threshold"], + ), + save_plot=dict( + usedefault=True, + ), + translation_threshold=dict( + mandatory=True, + xor=["norm_threshold"], + ), + use_differences=dict( + maxlen=2, + minlen=2, + usedefault=True, + ), + use_norm=dict( + requires=["norm_threshold"], + usedefault=True, + ), + zintensity_threshold=dict( + mandatory=True, + ), ) inputs = ArtifactDetect.input_spec() diff --git a/nipype/algorithms/tests/test_auto_CalculateMedian.py b/nipype/algorithms/tests/test_auto_CalculateMedian.py index 2b3e42314e..ddc8b9814d 100644 --- a/nipype/algorithms/tests/test_auto_CalculateMedian.py +++ b/nipype/algorithms/tests/test_auto_CalculateMedian.py @@ -4,7 +4,11 @@ def test_CalculateMedian_inputs(): input_map = dict( - in_files=dict(), median_file=dict(), median_per_file=dict(usedefault=True) + in_files=dict(), + median_file=dict(), + median_per_file=dict( + usedefault=True, + ), ) inputs = CalculateMedian.input_spec() @@ -14,7 +18,9 @@ def test_CalculateMedian_inputs(): def test_CalculateMedian_outputs(): - output_map = dict(median_files=dict()) + output_map = dict( + median_files=dict(), + ) outputs = CalculateMedian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py b/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py index ee43585d0b..a67f959176 100644 --- a/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py +++ b/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py @@ -4,8 +4,13 @@ def test_CalculateNormalizedMoments_inputs(): input_map = dict( - moment=dict(mandatory=True), - timeseries_file=dict(extensions=None, mandatory=True), + moment=dict( + mandatory=True, + ), + timeseries_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CalculateNormalizedMoments.input_spec() @@ -15,7 +20,9 @@ def test_CalculateNormalizedMoments_inputs(): def test_CalculateNormalizedMoments_outputs(): - output_map = dict(moments=dict()) + output_map = dict( + moments=dict(), + ) outputs = CalculateNormalizedMoments.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ComputeDVARS.py b/nipype/algorithms/tests/test_auto_ComputeDVARS.py index cb47ffd14e..5fe2d241b9 100644 --- a/nipype/algorithms/tests/test_auto_ComputeDVARS.py +++ b/nipype/algorithms/tests/test_auto_ComputeDVARS.py @@ -4,18 +4,44 @@ def test_ComputeDVARS_inputs(): input_map = dict( - figdpi=dict(usedefault=True), - figformat=dict(usedefault=True), - figsize=dict(usedefault=True), - in_file=dict(extensions=None, mandatory=True), - in_mask=dict(extensions=None, mandatory=True), - intensity_normalization=dict(usedefault=True), - remove_zerovariance=dict(usedefault=True), - save_all=dict(usedefault=True), - save_nstd=dict(usedefault=True), - save_plot=dict(usedefault=True), - save_std=dict(usedefault=True), - save_vxstd=dict(usedefault=True), + figdpi=dict( + usedefault=True, + ), + figformat=dict( + usedefault=True, + ), + figsize=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict( + extensions=None, + mandatory=True, + ), + intensity_normalization=dict( + usedefault=True, + ), + remove_zerovariance=dict( + usedefault=True, + ), + save_all=dict( + usedefault=True, + ), + save_nstd=dict( + usedefault=True, + ), + save_plot=dict( + usedefault=True, + ), + save_std=dict( + usedefault=True, + ), + save_vxstd=dict( + usedefault=True, + ), series_tr=dict(), ) inputs = ComputeDVARS.input_spec() @@ -30,13 +56,27 @@ def test_ComputeDVARS_outputs(): avg_nstd=dict(), avg_std=dict(), avg_vxstd=dict(), - fig_nstd=dict(extensions=None), - fig_std=dict(extensions=None), - fig_vxstd=dict(extensions=None), - out_all=dict(extensions=None), - out_nstd=dict(extensions=None), - out_std=dict(extensions=None), - out_vxstd=dict(extensions=None), + fig_nstd=dict( + extensions=None, + ), + fig_std=dict( + extensions=None, + ), + fig_vxstd=dict( + extensions=None, + ), + out_all=dict( + extensions=None, + ), + out_nstd=dict( + extensions=None, + ), + out_std=dict( + extensions=None, + ), + out_vxstd=dict( + extensions=None, + ), ) outputs = ComputeDVARS.output_spec() diff --git a/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py index c2157e04cc..639f03770c 100644 --- a/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py +++ b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py @@ -4,12 +4,28 @@ def test_ComputeMeshWarp_inputs(): input_map = dict( - metric=dict(usedefault=True), - out_file=dict(extensions=None, usedefault=True), - out_warp=dict(extensions=None, usedefault=True), - surface1=dict(extensions=None, mandatory=True), - surface2=dict(extensions=None, mandatory=True), - weighting=dict(usedefault=True), + metric=dict( + usedefault=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), + out_warp=dict( + extensions=None, + usedefault=True, + ), + surface1=dict( + extensions=None, + mandatory=True, + ), + surface2=dict( + extensions=None, + mandatory=True, + ), + weighting=dict( + usedefault=True, + ), ) inputs = ComputeMeshWarp.input_spec() @@ -20,7 +36,13 @@ def test_ComputeMeshWarp_inputs(): def test_ComputeMeshWarp_outputs(): output_map = dict( - distance=dict(), out_file=dict(extensions=None), out_warp=dict(extensions=None) + distance=dict(), + out_file=dict( + extensions=None, + ), + out_warp=dict( + extensions=None, + ), ) outputs = ComputeMeshWarp.output_spec() diff --git a/nipype/algorithms/tests/test_auto_CreateNifti.py b/nipype/algorithms/tests/test_auto_CreateNifti.py index bfa3a5a0ab..f5c5c4a2f5 100644 --- a/nipype/algorithms/tests/test_auto_CreateNifti.py +++ b/nipype/algorithms/tests/test_auto_CreateNifti.py @@ -5,8 +5,14 @@ def test_CreateNifti_inputs(): input_map = dict( affine=dict(), - data_file=dict(extensions=None, mandatory=True), - header_file=dict(extensions=None, mandatory=True), + data_file=dict( + extensions=None, + mandatory=True, + ), + header_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CreateNifti.input_spec() @@ -16,7 +22,11 @@ def test_CreateNifti_inputs(): def test_CreateNifti_outputs(): - output_map = dict(nifti_file=dict(extensions=None)) + output_map = dict( + nifti_file=dict( + extensions=None, + ), + ) outputs = CreateNifti.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_Distance.py b/nipype/algorithms/tests/test_auto_Distance.py index 9a7e696ff7..46e48342c4 100644 --- a/nipype/algorithms/tests/test_auto_Distance.py +++ b/nipype/algorithms/tests/test_auto_Distance.py @@ -4,10 +4,20 @@ def test_Distance_inputs(): input_map = dict( - mask_volume=dict(extensions=None), - method=dict(usedefault=True), - volume1=dict(extensions=None, mandatory=True), - volume2=dict(extensions=None, mandatory=True), + mask_volume=dict( + extensions=None, + ), + method=dict( + usedefault=True, + ), + volume1=dict( + extensions=None, + mandatory=True, + ), + volume2=dict( + extensions=None, + mandatory=True, + ), ) inputs = Distance.input_spec() @@ -18,7 +28,12 @@ def test_Distance_inputs(): def test_Distance_outputs(): output_map = dict( - distance=dict(), histogram=dict(extensions=None), point1=dict(), point2=dict() + distance=dict(), + histogram=dict( + extensions=None, + ), + point1=dict(), + point2=dict(), ) outputs = Distance.output_spec() diff --git a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py index 2307b28f43..1308b4d97d 100644 --- a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py +++ b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py @@ -4,15 +4,36 @@ def test_FramewiseDisplacement_inputs(): input_map = dict( - figdpi=dict(usedefault=True), - figsize=dict(usedefault=True), - in_file=dict(extensions=None, mandatory=True), - normalize=dict(usedefault=True), - out_figure=dict(extensions=None, usedefault=True), - out_file=dict(extensions=None, usedefault=True), - parameter_source=dict(mandatory=True), - radius=dict(usedefault=True), - save_plot=dict(usedefault=True), + figdpi=dict( + usedefault=True, + ), + figsize=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + normalize=dict( + usedefault=True, + ), + out_figure=dict( + extensions=None, + usedefault=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), + parameter_source=dict( + mandatory=True, + ), + radius=dict( + usedefault=True, + ), + save_plot=dict( + usedefault=True, + ), series_tr=dict(), ) inputs = FramewiseDisplacement.input_spec() @@ -25,8 +46,12 @@ def test_FramewiseDisplacement_inputs(): def test_FramewiseDisplacement_outputs(): output_map = dict( fd_average=dict(), - out_figure=dict(extensions=None), - out_file=dict(extensions=None), + out_figure=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = FramewiseDisplacement.output_spec() diff --git a/nipype/algorithms/tests/test_auto_FuzzyOverlap.py b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py index 9ae90df356..e8a7fe5ef1 100644 --- a/nipype/algorithms/tests/test_auto_FuzzyOverlap.py +++ b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py @@ -4,11 +4,22 @@ def test_FuzzyOverlap_inputs(): input_map = dict( - in_mask=dict(extensions=None), - in_ref=dict(mandatory=True), - in_tst=dict(mandatory=True), - out_file=dict(extensions=None, usedefault=True), - weighting=dict(usedefault=True), + in_mask=dict( + extensions=None, + ), + in_ref=dict( + mandatory=True, + ), + in_tst=dict( + mandatory=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), + weighting=dict( + usedefault=True, + ), ) inputs = FuzzyOverlap.input_spec() @@ -18,7 +29,12 @@ def test_FuzzyOverlap_inputs(): def test_FuzzyOverlap_outputs(): - output_map = dict(class_fdi=dict(), class_fji=dict(), dice=dict(), jaccard=dict()) + output_map = dict( + class_fdi=dict(), + class_fji=dict(), + dice=dict(), + jaccard=dict(), + ) outputs = FuzzyOverlap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_Gunzip.py b/nipype/algorithms/tests/test_auto_Gunzip.py index d238a7c4a9..7629feb820 100644 --- a/nipype/algorithms/tests/test_auto_Gunzip.py +++ b/nipype/algorithms/tests/test_auto_Gunzip.py @@ -3,7 +3,12 @@ def test_Gunzip_inputs(): - input_map = dict(in_file=dict(extensions=None, mandatory=True)) + input_map = dict( + in_file=dict( + extensions=None, + mandatory=True, + ), + ) inputs = Gunzip.input_spec() for key, metadata in list(input_map.items()): @@ -12,7 +17,11 @@ def test_Gunzip_inputs(): def test_Gunzip_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Gunzip.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ICC.py b/nipype/algorithms/tests/test_auto_ICC.py index a5ac327e80..4a2389202c 100644 --- a/nipype/algorithms/tests/test_auto_ICC.py +++ b/nipype/algorithms/tests/test_auto_ICC.py @@ -4,8 +4,13 @@ def test_ICC_inputs(): input_map = dict( - mask=dict(extensions=None, mandatory=True), - subjects_sessions=dict(mandatory=True), + mask=dict( + extensions=None, + mandatory=True, + ), + subjects_sessions=dict( + mandatory=True, + ), ) inputs = ICC.input_spec() @@ -16,9 +21,15 @@ def test_ICC_inputs(): def test_ICC_outputs(): output_map = dict( - icc_map=dict(extensions=None), - session_var_map=dict(extensions=None), - subject_var_map=dict(extensions=None), + icc_map=dict( + extensions=None, + ), + session_var_map=dict( + extensions=None, + ), + subject_var_map=dict( + extensions=None, + ), ) outputs = ICC.output_spec() diff --git a/nipype/algorithms/tests/test_auto_Matlab2CSV.py b/nipype/algorithms/tests/test_auto_Matlab2CSV.py index d16bf2806d..42acbd514a 100644 --- a/nipype/algorithms/tests/test_auto_Matlab2CSV.py +++ b/nipype/algorithms/tests/test_auto_Matlab2CSV.py @@ -4,8 +4,13 @@ def test_Matlab2CSV_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True), - reshape_matrix=dict(usedefault=True), + in_file=dict( + extensions=None, + mandatory=True, + ), + reshape_matrix=dict( + usedefault=True, + ), ) inputs = Matlab2CSV.input_spec() @@ -15,7 +20,9 @@ def test_Matlab2CSV_inputs(): def test_Matlab2CSV_outputs(): - output_map = dict(csv_files=dict()) + output_map = dict( + csv_files=dict(), + ) outputs = Matlab2CSV.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_MergeCSVFiles.py b/nipype/algorithms/tests/test_auto_MergeCSVFiles.py index 097f6b2828..bb7e9ed65a 100644 --- a/nipype/algorithms/tests/test_auto_MergeCSVFiles.py +++ b/nipype/algorithms/tests/test_auto_MergeCSVFiles.py @@ -7,9 +7,16 @@ def test_MergeCSVFiles_inputs(): column_headings=dict(), extra_column_heading=dict(), extra_field=dict(), - in_files=dict(mandatory=True), - out_file=dict(extensions=None, usedefault=True), - row_heading_title=dict(usedefault=True), + in_files=dict( + mandatory=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), + row_heading_title=dict( + usedefault=True, + ), row_headings=dict(), ) inputs = MergeCSVFiles.input_spec() @@ -20,7 +27,11 @@ def test_MergeCSVFiles_inputs(): def test_MergeCSVFiles_outputs(): - output_map = dict(csv_file=dict(extensions=None)) + output_map = dict( + csv_file=dict( + extensions=None, + ), + ) outputs = MergeCSVFiles.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_MergeROIs.py b/nipype/algorithms/tests/test_auto_MergeROIs.py index a7f36008be..c43a33b686 100644 --- a/nipype/algorithms/tests/test_auto_MergeROIs.py +++ b/nipype/algorithms/tests/test_auto_MergeROIs.py @@ -4,7 +4,11 @@ def test_MergeROIs_inputs(): input_map = dict( - in_files=dict(), in_index=dict(), in_reference=dict(extensions=None) + in_files=dict(), + in_index=dict(), + in_reference=dict( + extensions=None, + ), ) inputs = MergeROIs.input_spec() @@ -14,7 +18,11 @@ def test_MergeROIs_inputs(): def test_MergeROIs_outputs(): - output_map = dict(merged_file=dict(extensions=None)) + output_map = dict( + merged_file=dict( + extensions=None, + ), + ) outputs = MergeROIs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_MeshWarpMaths.py b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py index 50a9e14a25..a4295b8f46 100644 --- a/nipype/algorithms/tests/test_auto_MeshWarpMaths.py +++ b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py @@ -5,11 +5,25 @@ def test_MeshWarpMaths_inputs(): input_map = dict( float_trait=dict(), - in_surf=dict(extensions=None, mandatory=True), - operation=dict(usedefault=True), - operator=dict(mandatory=True, usedefault=True), - out_file=dict(extensions=None, usedefault=True), - out_warp=dict(extensions=None, usedefault=True), + in_surf=dict( + extensions=None, + mandatory=True, + ), + operation=dict( + usedefault=True, + ), + operator=dict( + mandatory=True, + usedefault=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), + out_warp=dict( + extensions=None, + usedefault=True, + ), ) inputs = MeshWarpMaths.input_spec() @@ -19,7 +33,14 @@ def test_MeshWarpMaths_inputs(): def test_MeshWarpMaths_outputs(): - output_map = dict(out_file=dict(extensions=None), out_warp=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + out_warp=dict( + extensions=None, + ), + ) outputs = MeshWarpMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_ModifyAffine.py b/nipype/algorithms/tests/test_auto_ModifyAffine.py index ddef881629..6592e28690 100644 --- a/nipype/algorithms/tests/test_auto_ModifyAffine.py +++ b/nipype/algorithms/tests/test_auto_ModifyAffine.py @@ -4,7 +4,12 @@ def test_ModifyAffine_inputs(): input_map = dict( - transformation_matrix=dict(usedefault=True), volumes=dict(mandatory=True) + transformation_matrix=dict( + usedefault=True, + ), + volumes=dict( + mandatory=True, + ), ) inputs = ModifyAffine.input_spec() @@ -14,7 +19,9 @@ def test_ModifyAffine_inputs(): def test_ModifyAffine_outputs(): - output_map = dict(transformed_volumes=dict()) + output_map = dict( + transformed_volumes=dict(), + ) outputs = ModifyAffine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py index 1217a82dbf..9e14e00595 100644 --- a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py +++ b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py @@ -3,7 +3,12 @@ def test_NonSteadyStateDetector_inputs(): - input_map = dict(in_file=dict(extensions=None, mandatory=True)) + input_map = dict( + in_file=dict( + extensions=None, + mandatory=True, + ), + ) inputs = NonSteadyStateDetector.input_spec() for key, metadata in list(input_map.items()): @@ -12,7 +17,9 @@ def test_NonSteadyStateDetector_inputs(): def test_NonSteadyStateDetector_outputs(): - output_map = dict(n_volumes_to_discard=dict()) + output_map = dict( + n_volumes_to_discard=dict(), + ) outputs = NonSteadyStateDetector.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py b/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py index ea852e286c..be18979a85 100644 --- a/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py +++ b/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py @@ -3,7 +3,12 @@ def test_NormalizeProbabilityMapSet_inputs(): - input_map = dict(in_files=dict(), in_mask=dict(extensions=None)) + input_map = dict( + in_files=dict(), + in_mask=dict( + extensions=None, + ), + ) inputs = NormalizeProbabilityMapSet.input_spec() for key, metadata in list(input_map.items()): @@ -12,7 +17,9 @@ def test_NormalizeProbabilityMapSet_inputs(): def test_NormalizeProbabilityMapSet_outputs(): - output_map = dict(out_files=dict()) + output_map = dict( + out_files=dict(), + ) outputs = NormalizeProbabilityMapSet.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_P2PDistance.py b/nipype/algorithms/tests/test_auto_P2PDistance.py index 75e9a76a1b..a5623353ec 100644 --- a/nipype/algorithms/tests/test_auto_P2PDistance.py +++ b/nipype/algorithms/tests/test_auto_P2PDistance.py @@ -4,12 +4,28 @@ def test_P2PDistance_inputs(): input_map = dict( - metric=dict(usedefault=True), - out_file=dict(extensions=None, usedefault=True), - out_warp=dict(extensions=None, usedefault=True), - surface1=dict(extensions=None, mandatory=True), - surface2=dict(extensions=None, mandatory=True), - weighting=dict(usedefault=True), + metric=dict( + usedefault=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), + out_warp=dict( + extensions=None, + usedefault=True, + ), + surface1=dict( + extensions=None, + mandatory=True, + ), + surface2=dict( + extensions=None, + mandatory=True, + ), + weighting=dict( + usedefault=True, + ), ) inputs = P2PDistance.input_spec() @@ -20,7 +36,13 @@ def test_P2PDistance_inputs(): def test_P2PDistance_outputs(): output_map = dict( - distance=dict(), out_file=dict(extensions=None), out_warp=dict(extensions=None) + distance=dict(), + out_file=dict( + extensions=None, + ), + out_warp=dict( + extensions=None, + ), ) outputs = P2PDistance.output_spec() diff --git a/nipype/algorithms/tests/test_auto_PickAtlas.py b/nipype/algorithms/tests/test_auto_PickAtlas.py index 9369818ba0..2a29ca8d23 100644 --- a/nipype/algorithms/tests/test_auto_PickAtlas.py +++ b/nipype/algorithms/tests/test_auto_PickAtlas.py @@ -4,11 +4,22 @@ def test_PickAtlas_inputs(): input_map = dict( - atlas=dict(extensions=None, mandatory=True), - dilation_size=dict(usedefault=True), - hemi=dict(usedefault=True), - labels=dict(mandatory=True), - output_file=dict(extensions=None), + atlas=dict( + extensions=None, + mandatory=True, + ), + dilation_size=dict( + usedefault=True, + ), + hemi=dict( + usedefault=True, + ), + labels=dict( + mandatory=True, + ), + output_file=dict( + extensions=None, + ), ) inputs = PickAtlas.input_spec() @@ -18,7 +29,11 @@ def test_PickAtlas_inputs(): def test_PickAtlas_outputs(): - output_map = dict(mask_file=dict(extensions=None)) + output_map = dict( + mask_file=dict( + extensions=None, + ), + ) outputs = PickAtlas.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_Similarity.py b/nipype/algorithms/tests/test_auto_Similarity.py index e0287b04d3..a5e5f583d5 100644 --- a/nipype/algorithms/tests/test_auto_Similarity.py +++ b/nipype/algorithms/tests/test_auto_Similarity.py @@ -4,11 +4,23 @@ def test_Similarity_inputs(): input_map = dict( - mask1=dict(extensions=None), - mask2=dict(extensions=None), - metric=dict(usedefault=True), - volume1=dict(extensions=None, mandatory=True), - volume2=dict(extensions=None, mandatory=True), + mask1=dict( + extensions=None, + ), + mask2=dict( + extensions=None, + ), + metric=dict( + usedefault=True, + ), + volume1=dict( + extensions=None, + mandatory=True, + ), + volume2=dict( + extensions=None, + mandatory=True, + ), ) inputs = Similarity.input_spec() @@ -18,7 +30,9 @@ def test_Similarity_inputs(): def test_Similarity_outputs(): - output_map = dict(similarity=dict()) + output_map = dict( + similarity=dict(), + ) outputs = Similarity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SimpleThreshold.py b/nipype/algorithms/tests/test_auto_SimpleThreshold.py index f6b47ae8c7..ab7141f0de 100644 --- a/nipype/algorithms/tests/test_auto_SimpleThreshold.py +++ b/nipype/algorithms/tests/test_auto_SimpleThreshold.py @@ -3,7 +3,14 @@ def test_SimpleThreshold_inputs(): - input_map = dict(threshold=dict(mandatory=True), volumes=dict(mandatory=True)) + input_map = dict( + threshold=dict( + mandatory=True, + ), + volumes=dict( + mandatory=True, + ), + ) inputs = SimpleThreshold.input_spec() for key, metadata in list(input_map.items()): @@ -12,7 +19,9 @@ def test_SimpleThreshold_inputs(): def test_SimpleThreshold_outputs(): - output_map = dict(thresholded_volumes=dict()) + output_map = dict( + thresholded_volumes=dict(), + ) outputs = SimpleThreshold.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SpecifyModel.py b/nipype/algorithms/tests/test_auto_SpecifyModel.py index 4d7dd90eae..15d9e4994e 100644 --- a/nipype/algorithms/tests/test_auto_SpecifyModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifyModel.py @@ -5,23 +5,43 @@ def test_SpecifyModel_inputs(): input_map = dict( bids_amplitude_column=dict(), - bids_condition_column=dict(usedefault=True), + bids_condition_column=dict( + usedefault=True, + ), bids_event_file=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], ), event_files=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] - ), - functional_runs=dict(copyfile=False, mandatory=True), - high_pass_filter_cutoff=dict(mandatory=True), - input_units=dict(mandatory=True), - outlier_files=dict(copyfile=False), - parameter_source=dict(usedefault=True), - realignment_parameters=dict(copyfile=False), + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], + ), + functional_runs=dict( + copyfile=False, + mandatory=True, + ), + high_pass_filter_cutoff=dict( + mandatory=True, + ), + input_units=dict( + mandatory=True, + ), + outlier_files=dict( + copyfile=False, + ), + parameter_source=dict( + usedefault=True, + ), + realignment_parameters=dict( + copyfile=False, + ), subject_info=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], + ), + time_repetition=dict( + mandatory=True, ), - time_repetition=dict(mandatory=True), ) inputs = SpecifyModel.input_spec() @@ -31,7 +51,9 @@ def test_SpecifyModel_inputs(): def test_SpecifyModel_outputs(): - output_map = dict(session_info=dict()) + output_map = dict( + session_info=dict(), + ) outputs = SpecifyModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py index 1a08610f5e..64bb206359 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py @@ -5,25 +5,49 @@ def test_SpecifySPMModel_inputs(): input_map = dict( bids_amplitude_column=dict(), - bids_condition_column=dict(usedefault=True), + bids_condition_column=dict( + usedefault=True, + ), bids_event_file=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], + ), + concatenate_runs=dict( + usedefault=True, ), - concatenate_runs=dict(usedefault=True), event_files=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] - ), - functional_runs=dict(copyfile=False, mandatory=True), - high_pass_filter_cutoff=dict(mandatory=True), - input_units=dict(mandatory=True), - outlier_files=dict(copyfile=False), - output_units=dict(usedefault=True), - parameter_source=dict(usedefault=True), - realignment_parameters=dict(copyfile=False), + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], + ), + functional_runs=dict( + copyfile=False, + mandatory=True, + ), + high_pass_filter_cutoff=dict( + mandatory=True, + ), + input_units=dict( + mandatory=True, + ), + outlier_files=dict( + copyfile=False, + ), + output_units=dict( + usedefault=True, + ), + parameter_source=dict( + usedefault=True, + ), + realignment_parameters=dict( + copyfile=False, + ), subject_info=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], + ), + time_repetition=dict( + mandatory=True, ), - time_repetition=dict(mandatory=True), ) inputs = SpecifySPMModel.input_spec() @@ -33,7 +57,9 @@ def test_SpecifySPMModel_inputs(): def test_SpecifySPMModel_outputs(): - output_map = dict(session_info=dict()) + output_map = dict( + session_info=dict(), + ) outputs = SpecifySPMModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py index 7e91677144..cac4ce5770 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py @@ -5,31 +5,63 @@ def test_SpecifySparseModel_inputs(): input_map = dict( bids_amplitude_column=dict(), - bids_condition_column=dict(usedefault=True), + bids_condition_column=dict( + usedefault=True, + ), bids_event_file=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], ), event_files=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], + ), + functional_runs=dict( + copyfile=False, + mandatory=True, + ), + high_pass_filter_cutoff=dict( + mandatory=True, + ), + input_units=dict( + mandatory=True, ), - functional_runs=dict(copyfile=False, mandatory=True), - high_pass_filter_cutoff=dict(mandatory=True), - input_units=dict(mandatory=True), model_hrf=dict(), - outlier_files=dict(copyfile=False), - parameter_source=dict(usedefault=True), - realignment_parameters=dict(copyfile=False), + outlier_files=dict( + copyfile=False, + ), + parameter_source=dict( + usedefault=True, + ), + realignment_parameters=dict( + copyfile=False, + ), save_plot=dict(), - scale_regressors=dict(usedefault=True), - scan_onset=dict(usedefault=True), - stimuli_as_impulses=dict(usedefault=True), + scale_regressors=dict( + usedefault=True, + ), + scan_onset=dict( + usedefault=True, + ), + stimuli_as_impulses=dict( + usedefault=True, + ), subject_info=dict( - mandatory=True, xor=["subject_info", "event_files", "bids_event_file"] + mandatory=True, + xor=["subject_info", "event_files", "bids_event_file"], + ), + time_acquisition=dict( + mandatory=True, + ), + time_repetition=dict( + mandatory=True, + ), + use_temporal_deriv=dict( + requires=["model_hrf"], + ), + volumes_in_cluster=dict( + usedefault=True, ), - time_acquisition=dict(mandatory=True), - time_repetition=dict(mandatory=True), - use_temporal_deriv=dict(requires=["model_hrf"]), - volumes_in_cluster=dict(usedefault=True), ) inputs = SpecifySparseModel.input_spec() @@ -41,8 +73,12 @@ def test_SpecifySparseModel_inputs(): def test_SpecifySparseModel_outputs(): output_map = dict( session_info=dict(), - sparse_png_file=dict(extensions=None), - sparse_svg_file=dict(extensions=None), + sparse_png_file=dict( + extensions=None, + ), + sparse_svg_file=dict( + extensions=None, + ), ) outputs = SpecifySparseModel.output_spec() diff --git a/nipype/algorithms/tests/test_auto_SplitROIs.py b/nipype/algorithms/tests/test_auto_SplitROIs.py index 3760e32cf4..c9eec86058 100644 --- a/nipype/algorithms/tests/test_auto_SplitROIs.py +++ b/nipype/algorithms/tests/test_auto_SplitROIs.py @@ -4,8 +4,13 @@ def test_SplitROIs_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True), - in_mask=dict(extensions=None), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict( + extensions=None, + ), roi_size=dict(), ) inputs = SplitROIs.input_spec() @@ -16,7 +21,11 @@ def test_SplitROIs_inputs(): def test_SplitROIs_outputs(): - output_map = dict(out_files=dict(), out_index=dict(), out_masks=dict()) + output_map = dict( + out_files=dict(), + out_index=dict(), + out_masks=dict(), + ) outputs = SplitROIs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_StimulusCorrelation.py b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py index 5bcffb6576..19cec418c4 100644 --- a/nipype/algorithms/tests/test_auto_StimulusCorrelation.py +++ b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py @@ -4,10 +4,19 @@ def test_StimulusCorrelation_inputs(): input_map = dict( - concatenated_design=dict(mandatory=True), - intensity_values=dict(mandatory=True), - realignment_parameters=dict(mandatory=True), - spm_mat_file=dict(extensions=None, mandatory=True), + concatenated_design=dict( + mandatory=True, + ), + intensity_values=dict( + mandatory=True, + ), + realignment_parameters=dict( + mandatory=True, + ), + spm_mat_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = StimulusCorrelation.input_spec() @@ -17,7 +26,9 @@ def test_StimulusCorrelation_inputs(): def test_StimulusCorrelation_outputs(): - output_map = dict(stimcorr_files=dict()) + output_map = dict( + stimcorr_files=dict(), + ) outputs = StimulusCorrelation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/algorithms/tests/test_auto_TCompCor.py b/nipype/algorithms/tests/test_auto_TCompCor.py index d4df6380ab..0f802cc92e 100644 --- a/nipype/algorithms/tests/test_auto_TCompCor.py +++ b/nipype/algorithms/tests/test_auto_TCompCor.py @@ -4,25 +4,59 @@ def test_TCompCor_inputs(): input_map = dict( - components_file=dict(usedefault=True), - failure_mode=dict(usedefault=True), + components_file=dict( + usedefault=True, + ), + failure_mode=dict( + usedefault=True, + ), header_prefix=dict(), - high_pass_cutoff=dict(usedefault=True), - ignore_initial_volumes=dict(usedefault=True), + high_pass_cutoff=dict( + usedefault=True, + ), + ignore_initial_volumes=dict( + usedefault=True, + ), mask_files=dict(), - mask_index=dict(requires=["mask_files"], xor=["merge_method"]), + mask_index=dict( + requires=["mask_files"], + xor=["merge_method"], + ), mask_names=dict(), - merge_method=dict(requires=["mask_files"], xor=["mask_index"]), - num_components=dict(xor=["variance_threshold"]), - percentile_threshold=dict(usedefault=True), - pre_filter=dict(usedefault=True), - realigned_file=dict(extensions=None, mandatory=True), - regress_poly_degree=dict(usedefault=True), + merge_method=dict( + requires=["mask_files"], + xor=["mask_index"], + ), + num_components=dict( + xor=["variance_threshold"], + ), + percentile_threshold=dict( + usedefault=True, + ), + pre_filter=dict( + usedefault=True, + ), + realigned_file=dict( + extensions=None, + mandatory=True, + ), + regress_poly_degree=dict( + usedefault=True, + ), repetition_time=dict(), - save_metadata=dict(usedefault=True), - save_pre_filter=dict(usedefault=True), - use_regress_poly=dict(deprecated="0.15.0", new_name="pre_filter"), - variance_threshold=dict(xor=["num_components"]), + save_metadata=dict( + usedefault=True, + ), + save_pre_filter=dict( + usedefault=True, + ), + use_regress_poly=dict( + deprecated="0.15.0", + new_name="pre_filter", + ), + variance_threshold=dict( + xor=["num_components"], + ), ) inputs = TCompCor.input_spec() @@ -33,10 +67,16 @@ def test_TCompCor_inputs(): def test_TCompCor_outputs(): output_map = dict( - components_file=dict(extensions=None), + components_file=dict( + extensions=None, + ), high_variance_masks=dict(), - metadata_file=dict(extensions=None), - pre_filter_file=dict(extensions=None), + metadata_file=dict( + extensions=None, + ), + pre_filter_file=dict( + extensions=None, + ), ) outputs = TCompCor.output_spec() diff --git a/nipype/algorithms/tests/test_auto_WarpPoints.py b/nipype/algorithms/tests/test_auto_WarpPoints.py index f8b99dfe9a..fc72866d83 100644 --- a/nipype/algorithms/tests/test_auto_WarpPoints.py +++ b/nipype/algorithms/tests/test_auto_WarpPoints.py @@ -4,7 +4,10 @@ def test_WarpPoints_inputs(): input_map = dict( - interp=dict(mandatory=True, usedefault=True), + interp=dict( + mandatory=True, + usedefault=True, + ), out_points=dict( extensions=None, keep_extension=True, @@ -12,8 +15,14 @@ def test_WarpPoints_inputs(): name_template="%s_warped", output_name="out_points", ), - points=dict(extensions=None, mandatory=True), - warp=dict(extensions=None, mandatory=True), + points=dict( + extensions=None, + mandatory=True, + ), + warp=dict( + extensions=None, + mandatory=True, + ), ) inputs = WarpPoints.input_spec() @@ -23,7 +32,11 @@ def test_WarpPoints_inputs(): def test_WarpPoints_outputs(): - output_map = dict(out_points=dict(extensions=None)) + output_map = dict( + out_points=dict( + extensions=None, + ), + ) outputs = WarpPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ABoverlap.py b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py index 63e7c79717..501f5331b7 100644 --- a/nipype/interfaces/afni/tests/test_auto_ABoverlap.py +++ b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py @@ -4,20 +4,46 @@ def test_ABoverlap_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file_a=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-3 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-3, ), in_file_b=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-2, + ), + no_automask=dict( + argstr="-no_automask", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr=" |& tee %s", + extensions=None, + position=-1, ), - no_automask=dict(argstr="-no_automask"), - num_threads=dict(nohash=True, usedefault=True), - out_file=dict(argstr=" |& tee %s", extensions=None, position=-1), outputtype=dict(), - quiet=dict(argstr="-quiet"), - verb=dict(argstr="-verb"), + quiet=dict( + argstr="-quiet", + ), + verb=dict( + argstr="-verb", + ), ) inputs = ABoverlap.input_spec() @@ -27,7 +53,11 @@ def test_ABoverlap_inputs(): def test_ABoverlap_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ABoverlap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AFNICommand.py b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py index b9e43c583a..941667f49f 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNICommand.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py @@ -4,9 +4,17 @@ def test_AFNICommand_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, diff --git a/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py b/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py index 47e6d2186f..de23f6c05b 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py @@ -3,7 +3,15 @@ def test_AFNICommandBase_inputs(): - input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) + input_map = dict( + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) inputs = AFNICommandBase.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py index c4609dc9d9..fd4682947b 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py @@ -4,9 +4,17 @@ def test_AFNIPythonCommand_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, diff --git a/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py index ae95b3b575..6983e839fb 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py @@ -4,15 +4,35 @@ def test_AFNItoNIFTI_inputs(): input_map = dict( - args=dict(argstr="%s"), - denote=dict(argstr="-denote"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + denote=dict( + argstr="-denote", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + newid=dict( + argstr="-newid", + xor=["oldid"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + oldid=dict( + argstr="-oldid", + xor=["newid"], ), - newid=dict(argstr="-newid", xor=["oldid"]), - num_threads=dict(nohash=True, usedefault=True), - oldid=dict(argstr="-oldid", xor=["newid"]), out_file=dict( argstr="-prefix %s", extensions=None, @@ -21,7 +41,9 @@ def test_AFNItoNIFTI_inputs(): name_template="%s.nii", ), outputtype=dict(), - pure=dict(argstr="-pure"), + pure=dict( + argstr="-pure", + ), ) inputs = AFNItoNIFTI.input_spec() @@ -31,7 +53,11 @@ def test_AFNItoNIFTI_inputs(): def test_AFNItoNIFTI_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AFNItoNIFTI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py index e5d49ecec2..a3b376f55b 100644 --- a/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py +++ b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py @@ -4,20 +4,57 @@ def test_AlignEpiAnatPy_inputs(): input_map = dict( - anat=dict(argstr="-anat %s", copyfile=False, extensions=None, mandatory=True), - anat2epi=dict(argstr="-anat2epi"), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - epi2anat=dict(argstr="-epi2anat"), - epi_base=dict(argstr="-epi_base %s", mandatory=True), - epi_strip=dict(argstr="-epi_strip %s"), - in_file=dict(argstr="-epi %s", copyfile=False, extensions=None, mandatory=True), + anat=dict( + argstr="-anat %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + anat2epi=dict( + argstr="-anat2epi", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi2anat=dict( + argstr="-epi2anat", + ), + epi_base=dict( + argstr="-epi_base %s", + mandatory=True, + ), + epi_strip=dict( + argstr="-epi_strip %s", + ), + in_file=dict( + argstr="-epi %s", + copyfile=False, + extensions=None, + mandatory=True, + ), outputtype=dict(), - py27_path=dict(usedefault=True), - save_skullstrip=dict(argstr="-save_skullstrip"), - suffix=dict(argstr="-suffix %s", usedefault=True), - tshift=dict(argstr="-tshift %s", usedefault=True), - volreg=dict(argstr="-volreg %s", usedefault=True), + py27_path=dict( + usedefault=True, + ), + save_skullstrip=dict( + argstr="-save_skullstrip", + ), + suffix=dict( + argstr="-suffix %s", + usedefault=True, + ), + tshift=dict( + argstr="-tshift %s", + usedefault=True, + ), + volreg=dict( + argstr="-volreg %s", + usedefault=True, + ), ) inputs = AlignEpiAnatPy.input_spec() @@ -28,16 +65,36 @@ def test_AlignEpiAnatPy_inputs(): def test_AlignEpiAnatPy_outputs(): output_map = dict( - anat_al_mat=dict(extensions=None), - anat_al_orig=dict(extensions=None), - epi_al_mat=dict(extensions=None), - epi_al_orig=dict(extensions=None), - epi_al_tlrc_mat=dict(extensions=None), - epi_reg_al_mat=dict(extensions=None), - epi_tlrc_al=dict(extensions=None), - epi_vr_al_mat=dict(extensions=None), - epi_vr_motion=dict(extensions=None), - skullstrip=dict(extensions=None), + anat_al_mat=dict( + extensions=None, + ), + anat_al_orig=dict( + extensions=None, + ), + epi_al_mat=dict( + extensions=None, + ), + epi_al_orig=dict( + extensions=None, + ), + epi_al_tlrc_mat=dict( + extensions=None, + ), + epi_reg_al_mat=dict( + extensions=None, + ), + epi_tlrc_al=dict( + extensions=None, + ), + epi_vr_al_mat=dict( + extensions=None, + ), + epi_vr_motion=dict( + extensions=None, + ), + skullstrip=dict( + extensions=None, + ), ) outputs = AlignEpiAnatPy.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Allineate.py b/nipype/interfaces/afni/tests/test_auto_Allineate.py index 39f5e9b1bd..afe6c3f24d 100644 --- a/nipype/interfaces/afni/tests/test_auto_Allineate.py +++ b/nipype/interfaces/afni/tests/test_auto_Allineate.py @@ -10,20 +10,48 @@ def test_Allineate_inputs(): position=-1, xor=["out_file", "out_matrix", "out_param_file", "out_weight_file"], ), - args=dict(argstr="%s"), - autobox=dict(argstr="-autobox"), - automask=dict(argstr="-automask+%d"), - autoweight=dict(argstr="-autoweight%s"), - center_of_mass=dict(argstr="-cmass%s"), - check=dict(argstr="-check %s"), - convergence=dict(argstr="-conv %f"), - cost=dict(argstr="-cost %s"), - environ=dict(nohash=True, usedefault=True), - epi=dict(argstr="-EPI"), - final_interpolation=dict(argstr="-final %s"), - fine_blur=dict(argstr="-fineblur %f"), + args=dict( + argstr="%s", + ), + autobox=dict( + argstr="-autobox", + ), + automask=dict( + argstr="-automask+%d", + ), + autoweight=dict( + argstr="-autoweight%s", + ), + center_of_mass=dict( + argstr="-cmass%s", + ), + check=dict( + argstr="-check %s", + ), + convergence=dict( + argstr="-conv %f", + ), + cost=dict( + argstr="-cost %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi=dict( + argstr="-EPI", + ), + final_interpolation=dict( + argstr="-final %s", + ), + fine_blur=dict( + argstr="-fineblur %f", + ), in_file=dict( - argstr="-source %s", copyfile=False, extensions=None, mandatory=True + argstr="-source %s", + copyfile=False, + extensions=None, + mandatory=True, ), in_matrix=dict( argstr="-1Dmatrix_apply %s", @@ -32,23 +60,57 @@ def test_Allineate_inputs(): xor=["out_matrix"], ), in_param_file=dict( - argstr="-1Dparam_apply %s", extensions=None, xor=["out_param_file"] - ), - interpolation=dict(argstr="-interp %s"), - master=dict(argstr="-master %s", extensions=None), - maxrot=dict(argstr="-maxrot %f"), - maxscl=dict(argstr="-maxscl %f"), - maxshf=dict(argstr="-maxshf %f"), - maxshr=dict(argstr="-maxshr %f"), - newgrid=dict(argstr="-newgrid %f"), - nmatch=dict(argstr="-nmatch %d"), - no_pad=dict(argstr="-nopad"), - nomask=dict(argstr="-nomask"), - num_threads=dict(nohash=True, usedefault=True), - nwarp=dict(argstr="-nwarp %s"), - nwarp_fixdep=dict(argstr="-nwarp_fixdep%s..."), - nwarp_fixmot=dict(argstr="-nwarp_fixmot%s..."), - one_pass=dict(argstr="-onepass"), + argstr="-1Dparam_apply %s", + extensions=None, + xor=["out_param_file"], + ), + interpolation=dict( + argstr="-interp %s", + ), + master=dict( + argstr="-master %s", + extensions=None, + ), + maxrot=dict( + argstr="-maxrot %f", + ), + maxscl=dict( + argstr="-maxscl %f", + ), + maxshf=dict( + argstr="-maxshf %f", + ), + maxshr=dict( + argstr="-maxshr %f", + ), + newgrid=dict( + argstr="-newgrid %f", + ), + nmatch=dict( + argstr="-nmatch %d", + ), + no_pad=dict( + argstr="-nopad", + ), + nomask=dict( + argstr="-nomask", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + nwarp=dict( + argstr="-nwarp %s", + ), + nwarp_fixdep=dict( + argstr="-nwarp_fixdep%s...", + ), + nwarp_fixmot=dict( + argstr="-nwarp_fixmot%s...", + ), + one_pass=dict( + argstr="-onepass", + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -58,35 +120,80 @@ def test_Allineate_inputs(): xor=["allcostx"], ), out_matrix=dict( - argstr="-1Dmatrix_save %s", extensions=None, xor=["in_matrix", "allcostx"] + argstr="-1Dmatrix_save %s", + extensions=None, + xor=["in_matrix", "allcostx"], ), out_param_file=dict( argstr="-1Dparam_save %s", extensions=None, xor=["in_param_file", "allcostx"], ), - out_weight_file=dict(argstr="-wtprefix %s", extensions=None, xor=["allcostx"]), + out_weight_file=dict( + argstr="-wtprefix %s", + extensions=None, + xor=["allcostx"], + ), outputtype=dict(), - overwrite=dict(argstr="-overwrite"), - quiet=dict(argstr="-quiet"), - reference=dict(argstr="-base %s", extensions=None), - replacebase=dict(argstr="-replacebase"), - replacemeth=dict(argstr="-replacemeth %s"), - source_automask=dict(argstr="-source_automask+%d"), - source_mask=dict(argstr="-source_mask %s", extensions=None), - two_best=dict(argstr="-twobest %d"), - two_blur=dict(argstr="-twoblur %f"), - two_first=dict(argstr="-twofirst"), - two_pass=dict(argstr="-twopass"), - usetemp=dict(argstr="-usetemp"), - verbose=dict(argstr="-verb"), - warp_type=dict(argstr="-warp %s"), - warpfreeze=dict(argstr="-warpfreeze"), - weight=dict(argstr="-weight %s"), + overwrite=dict( + argstr="-overwrite", + ), + quiet=dict( + argstr="-quiet", + ), + reference=dict( + argstr="-base %s", + extensions=None, + ), + replacebase=dict( + argstr="-replacebase", + ), + replacemeth=dict( + argstr="-replacemeth %s", + ), + source_automask=dict( + argstr="-source_automask+%d", + ), + source_mask=dict( + argstr="-source_mask %s", + extensions=None, + ), + two_best=dict( + argstr="-twobest %d", + ), + two_blur=dict( + argstr="-twoblur %f", + ), + two_first=dict( + argstr="-twofirst", + ), + two_pass=dict( + argstr="-twopass", + ), + usetemp=dict( + argstr="-usetemp", + ), + verbose=dict( + argstr="-verb", + ), + warp_type=dict( + argstr="-warp %s", + ), + warpfreeze=dict( + argstr="-warpfreeze", + ), + weight=dict( + argstr="-weight %s", + ), weight_file=dict( - argstr="-weight %s", deprecated="1.0.0", extensions=None, new_name="weight" + argstr="-weight %s", + deprecated="1.0.0", + extensions=None, + new_name="weight", + ), + zclip=dict( + argstr="-zclip", ), - zclip=dict(argstr="-zclip"), ) inputs = Allineate.input_spec() @@ -97,11 +204,21 @@ def test_Allineate_inputs(): def test_Allineate_outputs(): output_map = dict( - allcostx=dict(extensions=None), - out_file=dict(extensions=None), - out_matrix=dict(extensions=None), - out_param_file=dict(extensions=None), - out_weight_file=dict(extensions=None), + allcostx=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + out_matrix=dict( + extensions=None, + ), + out_param_file=dict( + extensions=None, + ), + out_weight_file=dict( + extensions=None, + ), ) outputs = Allineate.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py index 10d73d8011..eebfc73b6b 100644 --- a/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py +++ b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py @@ -4,13 +4,26 @@ def test_AutoTLRC_inputs(): input_map = dict( - args=dict(argstr="%s"), - base=dict(argstr="-base %s", mandatory=True), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + base=dict( + argstr="-base %s", + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="-input %s", copyfile=False, extensions=None, mandatory=True + argstr="-input %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + no_ss=dict( + argstr="-no_ss", ), - no_ss=dict(argstr="-no_ss"), outputtype=dict(), ) inputs = AutoTLRC.input_spec() @@ -21,7 +34,11 @@ def test_AutoTLRC_inputs(): def test_AutoTLRC_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AutoTLRC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py index 1d07a0002a..14c59cba0c 100644 --- a/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py +++ b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py @@ -4,18 +4,40 @@ def test_AutoTcorrelate_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - eta2=dict(argstr="-eta2"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + eta2=dict( + argstr="-eta2", + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + mask_only_targets=dict( + argstr="-mask_only_targets", + xor=["mask_source"], ), - mask=dict(argstr="-mask %s", extensions=None), - mask_only_targets=dict(argstr="-mask_only_targets", xor=["mask_source"]), mask_source=dict( - argstr="-mask_source %s", extensions=None, xor=["mask_only_targets"] + argstr="-mask_source %s", + extensions=None, + xor=["mask_only_targets"], + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -23,7 +45,9 @@ def test_AutoTcorrelate_inputs(): name_template="%s_similarity_matrix.1D", ), outputtype=dict(), - polort=dict(argstr="-polort %d"), + polort=dict( + argstr="-polort %d", + ), ) inputs = AutoTcorrelate.input_spec() @@ -33,7 +57,11 @@ def test_AutoTcorrelate_inputs(): def test_AutoTcorrelate_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AutoTcorrelate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Autobox.py b/nipype/interfaces/afni/tests/test_auto_Autobox.py index eda0062a6b..8a13b14742 100644 --- a/nipype/interfaces/afni/tests/test_auto_Autobox.py +++ b/nipype/interfaces/afni/tests/test_auto_Autobox.py @@ -4,13 +4,26 @@ def test_Autobox_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="-input %s", copyfile=False, extensions=None, mandatory=True + argstr="-input %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + no_clustering=dict( + argstr="-noclust", + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - no_clustering=dict(argstr="-noclust"), - num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -18,7 +31,9 @@ def test_Autobox_inputs(): name_template="%s_autobox", ), outputtype=dict(), - padding=dict(argstr="-npad %d"), + padding=dict( + argstr="-npad %d", + ), ) inputs = Autobox.input_spec() @@ -29,7 +44,9 @@ def test_Autobox_inputs(): def test_Autobox_outputs(): output_map = dict( - out_file=dict(extensions=None), + out_file=dict( + extensions=None, + ), x_max=dict(), x_min=dict(), y_max=dict(), diff --git a/nipype/interfaces/afni/tests/test_auto_Automask.py b/nipype/interfaces/afni/tests/test_auto_Automask.py index 5c21392ddf..1c2a3c4ee9 100644 --- a/nipype/interfaces/afni/tests/test_auto_Automask.py +++ b/nipype/interfaces/afni/tests/test_auto_Automask.py @@ -4,21 +4,39 @@ def test_Automask_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), brain_file=dict( argstr="-apply_prefix %s", extensions=None, name_source="in_file", name_template="%s_masked", ), - clfrac=dict(argstr="-clfrac %s"), - dilate=dict(argstr="-dilate %s"), - environ=dict(nohash=True, usedefault=True), - erode=dict(argstr="-erode %s"), + clfrac=dict( + argstr="-clfrac %s", + ), + dilate=dict( + argstr="-dilate %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + erode=dict( + argstr="-erode %s", + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -35,7 +53,14 @@ def test_Automask_inputs(): def test_Automask_outputs(): - output_map = dict(brain_file=dict(extensions=None), out_file=dict(extensions=None)) + output_map = dict( + brain_file=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + ) outputs = Automask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Axialize.py b/nipype/interfaces/afni/tests/test_auto_Axialize.py index 0145b89cd1..bac640d601 100644 --- a/nipype/interfaces/afni/tests/test_auto_Axialize.py +++ b/nipype/interfaces/afni/tests/test_auto_Axialize.py @@ -4,15 +4,35 @@ def test_Axialize_inputs(): input_map = dict( - args=dict(argstr="%s"), - axial=dict(argstr="-axial", xor=["coronal", "sagittal"]), - coronal=dict(argstr="-coronal", xor=["sagittal", "axial"]), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + axial=dict( + argstr="-axial", + xor=["coronal", "sagittal"], + ), + coronal=dict( + argstr="-coronal", + xor=["sagittal", "axial"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + orientation=dict( + argstr="-orient %s", ), - num_threads=dict(nohash=True, usedefault=True), - orientation=dict(argstr="-orient %s"), out_file=dict( argstr="-prefix %s", extensions=None, @@ -20,8 +40,13 @@ def test_Axialize_inputs(): name_template="%s_axialize", ), outputtype=dict(), - sagittal=dict(argstr="-sagittal", xor=["coronal", "axial"]), - verb=dict(argstr="-verb"), + sagittal=dict( + argstr="-sagittal", + xor=["coronal", "axial"], + ), + verb=dict( + argstr="-verb", + ), ) inputs = Axialize.input_spec() @@ -31,7 +56,11 @@ def test_Axialize_inputs(): def test_Axialize_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Axialize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Bandpass.py b/nipype/interfaces/afni/tests/test_auto_Bandpass.py index d361c4ed2c..8ae9966240 100644 --- a/nipype/interfaces/afni/tests/test_auto_Bandpass.py +++ b/nipype/interfaces/afni/tests/test_auto_Bandpass.py @@ -4,25 +4,70 @@ def test_Bandpass_inputs(): input_map = dict( - args=dict(argstr="%s"), - automask=dict(argstr="-automask"), - blur=dict(argstr="-blur %f"), - despike=dict(argstr="-despike"), - environ=dict(nohash=True, usedefault=True), - highpass=dict(argstr="%f", mandatory=True, position=-3), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + ), + blur=dict( + argstr="-blur %f", + ), + despike=dict( + argstr="-despike", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + highpass=dict( + argstr="%f", + mandatory=True, + position=-3, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 - ), - localPV=dict(argstr="-localPV %f"), - lowpass=dict(argstr="%f", mandatory=True, position=-2), - mask=dict(argstr="-mask %s", extensions=None, position=2), - nfft=dict(argstr="-nfft %d"), - no_detrend=dict(argstr="-nodetrend"), - normalize=dict(argstr="-norm"), - notrans=dict(argstr="-notrans"), - num_threads=dict(nohash=True, usedefault=True), - orthogonalize_dset=dict(argstr="-dsort %s", extensions=None), - orthogonalize_file=dict(argstr="-ort %s"), + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + localPV=dict( + argstr="-localPV %f", + ), + lowpass=dict( + argstr="%f", + mandatory=True, + position=-2, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + position=2, + ), + nfft=dict( + argstr="-nfft %d", + ), + no_detrend=dict( + argstr="-nodetrend", + ), + normalize=dict( + argstr="-norm", + ), + notrans=dict( + argstr="-notrans", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + orthogonalize_dset=dict( + argstr="-dsort %s", + extensions=None, + ), + orthogonalize_file=dict( + argstr="-ort %s", + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -31,7 +76,9 @@ def test_Bandpass_inputs(): position=1, ), outputtype=dict(), - tr=dict(argstr="-dt %f"), + tr=dict( + argstr="-dt %f", + ), ) inputs = Bandpass.input_spec() @@ -41,7 +88,11 @@ def test_Bandpass_inputs(): def test_Bandpass_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Bandpass.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_BlurInMask.py b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py index 9e66102557..91114611dc 100644 --- a/nipype/interfaces/afni/tests/test_auto_BlurInMask.py +++ b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py @@ -4,11 +4,23 @@ def test_BlurInMask_inputs(): input_map = dict( - args=dict(argstr="%s"), - automask=dict(argstr="-automask"), - environ=dict(nohash=True, usedefault=True), - float_out=dict(argstr="-float"), - fwhm=dict(argstr="-FWHM %f", mandatory=True), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + float_out=dict( + argstr="-float", + ), + fwhm=dict( + argstr="-FWHM %f", + mandatory=True, + ), in_file=dict( argstr="-input %s", copyfile=False, @@ -16,10 +28,22 @@ def test_BlurInMask_inputs(): mandatory=True, position=1, ), - mask=dict(argstr="-mask %s", extensions=None), - multimask=dict(argstr="-Mmask %s", extensions=None), - num_threads=dict(nohash=True, usedefault=True), - options=dict(argstr="%s", position=2), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + multimask=dict( + argstr="-Mmask %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + options=dict( + argstr="%s", + position=2, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -28,7 +52,9 @@ def test_BlurInMask_inputs(): position=-1, ), outputtype=dict(), - preserve=dict(argstr="-preserve"), + preserve=dict( + argstr="-preserve", + ), ) inputs = BlurInMask.input_spec() @@ -38,7 +64,11 @@ def test_BlurInMask_inputs(): def test_BlurInMask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BlurInMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py index 0af245f3dc..f164ae815e 100644 --- a/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py +++ b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py @@ -4,15 +4,39 @@ def test_BlurToFWHM_inputs(): input_map = dict( - args=dict(argstr="%s"), - automask=dict(argstr="-automask"), - blurmaster=dict(argstr="-blurmaster %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - fwhm=dict(argstr="-FWHM %f"), - fwhmxy=dict(argstr="-FWHMxy %f"), - in_file=dict(argstr="-input %s", extensions=None, mandatory=True), - mask=dict(argstr="-mask %s", extensions=None), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + ), + blurmaster=dict( + argstr="-blurmaster %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm=dict( + argstr="-FWHM %f", + ), + fwhmxy=dict( + argstr="-FWHMxy %f", + ), + in_file=dict( + argstr="-input %s", + extensions=None, + mandatory=True, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -29,7 +53,11 @@ def test_BlurToFWHM_inputs(): def test_BlurToFWHM_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BlurToFWHM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_BrickStat.py b/nipype/interfaces/afni/tests/test_auto_BrickStat.py index 4d1d10c5d0..a366953a5b 100644 --- a/nipype/interfaces/afni/tests/test_auto_BrickStat.py +++ b/nipype/interfaces/afni/tests/test_auto_BrickStat.py @@ -4,17 +4,46 @@ def test_BrickStat_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), - mask=dict(argstr="-mask %s", extensions=None, position=2), - max=dict(argstr="-max"), - mean=dict(argstr="-mean"), - min=dict(argstr="-min", position=1), - percentile=dict(argstr="-percentile %.3f %.3f %.3f"), - slow=dict(argstr="-slow"), - sum=dict(argstr="-sum"), - var=dict(argstr="-var"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + position=2, + ), + max=dict( + argstr="-max", + ), + mean=dict( + argstr="-mean", + ), + min=dict( + argstr="-min", + position=1, + ), + percentile=dict( + argstr="-percentile %.3f %.3f %.3f", + ), + slow=dict( + argstr="-slow", + ), + sum=dict( + argstr="-sum", + ), + var=dict( + argstr="-var", + ), ) inputs = BrickStat.input_spec() @@ -24,7 +53,9 @@ def test_BrickStat_inputs(): def test_BrickStat_outputs(): - output_map = dict(min_val=dict()) + output_map = dict( + min_val=dict(), + ) outputs = BrickStat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Bucket.py b/nipype/interfaces/afni/tests/test_auto_Bucket.py index 9694294ada..34dbd18bc2 100644 --- a/nipype/interfaces/afni/tests/test_auto_Bucket.py +++ b/nipype/interfaces/afni/tests/test_auto_Bucket.py @@ -4,11 +4,27 @@ def test_Bucket_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", mandatory=True, position=-1), - num_threads=dict(nohash=True, usedefault=True), - out_file=dict(argstr="-prefix %s", extensions=None, name_template="buck"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="-prefix %s", + extensions=None, + name_template="buck", + ), outputtype=dict(), ) inputs = Bucket.input_spec() @@ -19,7 +35,11 @@ def test_Bucket_inputs(): def test_Bucket_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Bucket.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Calc.py b/nipype/interfaces/afni/tests/test_auto_Calc.py index 0b4947262d..dc50380317 100644 --- a/nipype/interfaces/afni/tests/test_auto_Calc.py +++ b/nipype/interfaces/afni/tests/test_auto_Calc.py @@ -4,14 +4,42 @@ def test_Calc_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - expr=dict(argstr='-expr "%s"', mandatory=True, position=3), - in_file_a=dict(argstr="-a %s", extensions=None, mandatory=True, position=0), - in_file_b=dict(argstr="-b %s", extensions=None, position=1), - in_file_c=dict(argstr="-c %s", extensions=None, position=2), - num_threads=dict(nohash=True, usedefault=True), - other=dict(argstr="", extensions=None), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expr=dict( + argstr='-expr "%s"', + mandatory=True, + position=3, + ), + in_file_a=dict( + argstr="-a %s", + extensions=None, + mandatory=True, + position=0, + ), + in_file_b=dict( + argstr="-b %s", + extensions=None, + position=1, + ), + in_file_c=dict( + argstr="-c %s", + extensions=None, + position=2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + other=dict( + argstr="", + extensions=None, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -19,10 +47,16 @@ def test_Calc_inputs(): name_template="%s_calc", ), outputtype=dict(), - overwrite=dict(argstr="-overwrite"), + overwrite=dict( + argstr="-overwrite", + ), single_idx=dict(), - start_idx=dict(requires=["stop_idx"]), - stop_idx=dict(requires=["start_idx"]), + start_idx=dict( + requires=["stop_idx"], + ), + stop_idx=dict( + requires=["start_idx"], + ), ) inputs = Calc.input_spec() @@ -32,7 +66,11 @@ def test_Calc_inputs(): def test_Calc_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Calc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Cat.py b/nipype/interfaces/afni/tests/test_auto_Cat.py index d6b8b796c8..e5c76b34b1 100644 --- a/nipype/interfaces/afni/tests/test_auto_Cat.py +++ b/nipype/interfaces/afni/tests/test_auto_Cat.py @@ -4,21 +4,41 @@ def test_Cat_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_files=dict(argstr="%s", mandatory=True, position=-2), - keepfree=dict(argstr="-nonfixed"), - num_threads=dict(nohash=True, usedefault=True), - omitconst=dict(argstr="-nonconst"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + keepfree=dict( + argstr="-nonfixed", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + omitconst=dict( + argstr="-nonconst", + ), out_cint=dict( - xor=["out_format", "out_nice", "out_double", "out_fint", "out_int"] + xor=["out_format", "out_nice", "out_double", "out_fint", "out_int"], ), out_double=dict( argstr="-d", xor=["out_format", "out_nice", "out_int", "out_fint", "out_cint"], ), out_file=dict( - argstr="> %s", extensions=None, mandatory=True, position=-1, usedefault=True + argstr="> %s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, ), out_fint=dict( argstr="-f", @@ -37,8 +57,12 @@ def test_Cat_inputs(): xor=["out_format", "out_int", "out_double", "out_fint", "out_cint"], ), outputtype=dict(), - sel=dict(argstr="-sel %s"), - stack=dict(argstr="-stack"), + sel=dict( + argstr="-sel %s", + ), + stack=dict( + argstr="-stack", + ), ) inputs = Cat.input_spec() @@ -48,7 +72,11 @@ def test_Cat_inputs(): def test_Cat_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Cat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py index 83edf1e484..6b6c2630f6 100644 --- a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py +++ b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py @@ -4,13 +4,34 @@ def test_CatMatvec_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fourxfour=dict(argstr="-4x4", xor=["matrix", "oneline"]), - in_file=dict(argstr="%s", mandatory=True, position=-2), - matrix=dict(argstr="-MATRIX", xor=["oneline", "fourxfour"]), - num_threads=dict(nohash=True, usedefault=True), - oneline=dict(argstr="-ONELINE", xor=["matrix", "fourxfour"]), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fourxfour=dict( + argstr="-4x4", + xor=["matrix", "oneline"], + ), + in_file=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + matrix=dict( + argstr="-MATRIX", + xor=["oneline", "fourxfour"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + oneline=dict( + argstr="-ONELINE", + xor=["matrix", "fourxfour"], + ), out_file=dict( argstr=" > %s", extensions=None, @@ -30,7 +51,11 @@ def test_CatMatvec_inputs(): def test_CatMatvec_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = CatMatvec.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_CenterMass.py b/nipype/interfaces/afni/tests/test_auto_CenterMass.py index bd293db0d4..7ec95938b4 100644 --- a/nipype/interfaces/afni/tests/test_auto_CenterMass.py +++ b/nipype/interfaces/afni/tests/test_auto_CenterMass.py @@ -4,9 +4,15 @@ def test_CenterMass_inputs(): input_map = dict( - all_rois=dict(argstr="-all_rois"), - args=dict(argstr="%s"), - automask=dict(argstr="-automask"), + all_rois=dict( + argstr="-all_rois", + ), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + ), cm_file=dict( argstr="> %s", extensions=None, @@ -16,14 +22,30 @@ def test_CenterMass_inputs(): name_template="%s_cm.out", position=-1, ), - environ=dict(nohash=True, usedefault=True), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2 + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-2, + ), + local_ijk=dict( + argstr="-local_ijk", + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + roi_vals=dict( + argstr="-roi_vals %s", + ), + set_cm=dict( + argstr="-set %f %f %f", ), - local_ijk=dict(argstr="-local_ijk"), - mask_file=dict(argstr="-mask %s", extensions=None), - roi_vals=dict(argstr="-roi_vals %s"), - set_cm=dict(argstr="-set %f %f %f"), ) inputs = CenterMass.input_spec() @@ -34,7 +56,13 @@ def test_CenterMass_inputs(): def test_CenterMass_outputs(): output_map = dict( - cm=dict(), cm_file=dict(extensions=None), out_file=dict(extensions=None) + cm=dict(), + cm_file=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = CenterMass.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py index 5c7a318fc6..7a324fe7d4 100644 --- a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py +++ b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py @@ -4,12 +4,34 @@ def test_ClipLevel_inputs(): input_map = dict( - args=dict(argstr="%s"), - doall=dict(argstr="-doall", position=3, xor="grad"), - environ=dict(nohash=True, usedefault=True), - grad=dict(argstr="-grad %s", extensions=None, position=3, xor="doall"), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), - mfrac=dict(argstr="-mfrac %s", position=2), + args=dict( + argstr="%s", + ), + doall=dict( + argstr="-doall", + position=3, + xor="grad", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad=dict( + argstr="-grad %s", + extensions=None, + position=3, + xor="doall", + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + mfrac=dict( + argstr="-mfrac %s", + position=2, + ), ) inputs = ClipLevel.input_spec() @@ -19,7 +41,9 @@ def test_ClipLevel_inputs(): def test_ClipLevel_outputs(): - output_map = dict(clip_val=dict()) + output_map = dict( + clip_val=dict(), + ) outputs = ClipLevel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ConvertDset.py b/nipype/interfaces/afni/tests/test_auto_ConvertDset.py index 51e0dbb300..226eac97b5 100644 --- a/nipype/interfaces/afni/tests/test_auto_ConvertDset.py +++ b/nipype/interfaces/afni/tests/test_auto_ConvertDset.py @@ -4,14 +4,34 @@ def test_ConvertDset_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-input %s", extensions=None, mandatory=True, position=-2), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-input %s", + extensions=None, + mandatory=True, + position=-2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( - argstr="-prefix %s", extensions=None, mandatory=True, position=-1 + argstr="-prefix %s", + extensions=None, + mandatory=True, + position=-1, + ), + out_type=dict( + argstr="-o_%s", + mandatory=True, + position=0, ), - out_type=dict(argstr="-o_%s", mandatory=True, position=0), outputtype=dict(), ) inputs = ConvertDset.input_spec() @@ -22,7 +42,11 @@ def test_ConvertDset_inputs(): def test_ConvertDset_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ConvertDset.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Copy.py b/nipype/interfaces/afni/tests/test_auto_Copy.py index c5d0c3736a..e96592b184 100644 --- a/nipype/interfaces/afni/tests/test_auto_Copy.py +++ b/nipype/interfaces/afni/tests/test_auto_Copy.py @@ -4,12 +4,24 @@ def test_Copy_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-2, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="%s", extensions=None, @@ -18,7 +30,9 @@ def test_Copy_inputs(): position=-1, ), outputtype=dict(), - verbose=dict(argstr="-verb"), + verbose=dict( + argstr="-verb", + ), ) inputs = Copy.input_spec() @@ -28,7 +42,11 @@ def test_Copy_inputs(): def test_Copy_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Copy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Deconvolve.py b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py index 3137a455d4..c4195807eb 100644 --- a/nipype/interfaces/afni/tests/test_auto_Deconvolve.py +++ b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py @@ -4,55 +4,169 @@ def test_Deconvolve_inputs(): input_map = dict( - STATmask=dict(argstr="-STATmask %s", extensions=None), - TR_1D=dict(argstr="-TR_1D %f"), - allzero_OK=dict(argstr="-allzero_OK"), - args=dict(argstr="%s"), - automask=dict(argstr="-automask"), - cbucket=dict(argstr="-cbucket %s"), - censor=dict(argstr="-censor %s", extensions=None), - dmbase=dict(argstr="-dmbase"), - dname=dict(argstr="-D%s=%s"), - environ=dict(nohash=True, usedefault=True), - force_TR=dict(argstr="-force_TR %f", position=0), - fout=dict(argstr="-fout"), - global_times=dict(argstr="-global_times", xor=["local_times"]), - glt_label=dict(argstr="-glt_label %d %s...", position=-1, requires=["gltsym"]), - gltsym=dict(argstr="-gltsym 'SYM: %s'...", position=-2), - goforit=dict(argstr="-GOFORIT %i"), - in_files=dict(argstr="-input %s", copyfile=False, position=1, sep=" "), - input1D=dict(argstr="-input1D %s", extensions=None), - legendre=dict(argstr="-legendre"), - local_times=dict(argstr="-local_times", xor=["global_times"]), - mask=dict(argstr="-mask %s", extensions=None), - noblock=dict(argstr="-noblock"), - nocond=dict(argstr="-nocond"), - nodmbase=dict(argstr="-nodmbase"), - nofdr=dict(argstr="-noFDR"), - nolegendre=dict(argstr="-nolegendre"), - nosvd=dict(argstr="-nosvd"), - num_glt=dict(argstr="-num_glt %d", position=-3), - num_stimts=dict(argstr="-num_stimts %d", position=-6), - num_threads=dict(argstr="-jobs %d", nohash=True), - ortvec=dict(argstr="-ortvec %s %s"), - out_file=dict(argstr="-bucket %s", extensions=None), + STATmask=dict( + argstr="-STATmask %s", + extensions=None, + ), + TR_1D=dict( + argstr="-TR_1D %f", + ), + allzero_OK=dict( + argstr="-allzero_OK", + ), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + ), + cbucket=dict( + argstr="-cbucket %s", + ), + censor=dict( + argstr="-censor %s", + extensions=None, + ), + dmbase=dict( + argstr="-dmbase", + ), + dname=dict( + argstr="-D%s=%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + force_TR=dict( + argstr="-force_TR %f", + position=0, + ), + fout=dict( + argstr="-fout", + ), + global_times=dict( + argstr="-global_times", + xor=["local_times"], + ), + glt_label=dict( + argstr="-glt_label %d %s...", + position=-1, + requires=["gltsym"], + ), + gltsym=dict( + argstr="-gltsym 'SYM: %s'...", + position=-2, + ), + goforit=dict( + argstr="-GOFORIT %i", + ), + in_files=dict( + argstr="-input %s", + copyfile=False, + position=1, + sep=" ", + ), + input1D=dict( + argstr="-input1D %s", + extensions=None, + ), + legendre=dict( + argstr="-legendre", + ), + local_times=dict( + argstr="-local_times", + xor=["global_times"], + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + noblock=dict( + argstr="-noblock", + ), + nocond=dict( + argstr="-nocond", + ), + nodmbase=dict( + argstr="-nodmbase", + ), + nofdr=dict( + argstr="-noFDR", + ), + nolegendre=dict( + argstr="-nolegendre", + ), + nosvd=dict( + argstr="-nosvd", + ), + num_glt=dict( + argstr="-num_glt %d", + position=-3, + ), + num_stimts=dict( + argstr="-num_stimts %d", + position=-6, + ), + num_threads=dict( + argstr="-jobs %d", + nohash=True, + ), + ortvec=dict( + argstr="-ortvec %s %s", + ), + out_file=dict( + argstr="-bucket %s", + extensions=None, + ), outputtype=dict(), - polort=dict(argstr="-polort %d"), - rmsmin=dict(argstr="-rmsmin %f"), - rout=dict(argstr="-rout"), - sat=dict(argstr="-sat", xor=["trans"]), - singvals=dict(argstr="-singvals"), + polort=dict( + argstr="-polort %d", + ), + rmsmin=dict( + argstr="-rmsmin %f", + ), + rout=dict( + argstr="-rout", + ), + sat=dict( + argstr="-sat", + xor=["trans"], + ), + singvals=dict( + argstr="-singvals", + ), stim_label=dict( - argstr="-stim_label %d %s...", position=-4, requires=["stim_times"] - ), - stim_times=dict(argstr="-stim_times %d %s '%s'...", position=-5), - stim_times_subtract=dict(argstr="-stim_times_subtract %f"), - svd=dict(argstr="-svd"), - tout=dict(argstr="-tout"), - trans=dict(argstr="-trans", xor=["sat"]), - vout=dict(argstr="-vout"), - x1D=dict(argstr="-x1D %s", extensions=None), - x1D_stop=dict(argstr="-x1D_stop"), + argstr="-stim_label %d %s...", + position=-4, + requires=["stim_times"], + ), + stim_times=dict( + argstr="-stim_times %d %s '%s'...", + position=-5, + ), + stim_times_subtract=dict( + argstr="-stim_times_subtract %f", + ), + svd=dict( + argstr="-svd", + ), + tout=dict( + argstr="-tout", + ), + trans=dict( + argstr="-trans", + xor=["sat"], + ), + vout=dict( + argstr="-vout", + ), + x1D=dict( + argstr="-x1D %s", + extensions=None, + ), + x1D_stop=dict( + argstr="-x1D_stop", + ), ) inputs = Deconvolve.input_spec() @@ -63,10 +177,18 @@ def test_Deconvolve_inputs(): def test_Deconvolve_outputs(): output_map = dict( - cbucket=dict(extensions=None), - out_file=dict(extensions=None), - reml_script=dict(extensions=None), - x1D=dict(extensions=None), + cbucket=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + reml_script=dict( + extensions=None, + ), + x1D=dict( + extensions=None, + ), ) outputs = Deconvolve.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py index da68d04da2..afbc5a7d4f 100644 --- a/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py +++ b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py @@ -4,16 +4,37 @@ def test_DegreeCentrality_inputs(): input_map = dict( - args=dict(argstr="%s"), - autoclip=dict(argstr="-autoclip"), - automask=dict(argstr="-automask"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + autoclip=dict( + argstr="-autoclip", + ), + automask=dict( + argstr="-automask", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + oned_file=dict( + argstr="-out1D %s", ), - mask=dict(argstr="-mask %s", extensions=None), - num_threads=dict(nohash=True, usedefault=True), - oned_file=dict(argstr="-out1D %s"), out_file=dict( argstr="-prefix %s", extensions=None, @@ -21,9 +42,15 @@ def test_DegreeCentrality_inputs(): name_template="%s_afni", ), outputtype=dict(), - polort=dict(argstr="-polort %d"), - sparsity=dict(argstr="-sparsity %f"), - thresh=dict(argstr="-thresh %f"), + polort=dict( + argstr="-polort %d", + ), + sparsity=dict( + argstr="-sparsity %f", + ), + thresh=dict( + argstr="-thresh %f", + ), ) inputs = DegreeCentrality.input_spec() @@ -33,7 +60,14 @@ def test_DegreeCentrality_inputs(): def test_DegreeCentrality_outputs(): - output_map = dict(oned_file=dict(extensions=None), out_file=dict(extensions=None)) + output_map = dict( + oned_file=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + ) outputs = DegreeCentrality.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Despike.py b/nipype/interfaces/afni/tests/test_auto_Despike.py index 025a88ef52..8835dd7e07 100644 --- a/nipype/interfaces/afni/tests/test_auto_Despike.py +++ b/nipype/interfaces/afni/tests/test_auto_Despike.py @@ -4,12 +4,24 @@ def test_Despike_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -26,7 +38,11 @@ def test_Despike_inputs(): def test_Despike_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Despike.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Detrend.py b/nipype/interfaces/afni/tests/test_auto_Detrend.py index c662529685..5edbdd74ee 100644 --- a/nipype/interfaces/afni/tests/test_auto_Detrend.py +++ b/nipype/interfaces/afni/tests/test_auto_Detrend.py @@ -4,12 +4,24 @@ def test_Detrend_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -26,7 +38,11 @@ def test_Detrend_inputs(): def test_Detrend_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Detrend.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Dot.py b/nipype/interfaces/afni/tests/test_auto_Dot.py index dd780b6815..9cf8083ab9 100644 --- a/nipype/interfaces/afni/tests/test_auto_Dot.py +++ b/nipype/interfaces/afni/tests/test_auto_Dot.py @@ -4,24 +4,64 @@ def test_Dot_inputs(): input_map = dict( - args=dict(argstr="%s"), - demean=dict(argstr="-demean"), - docoef=dict(argstr="-docoef"), - docor=dict(argstr="-docor"), - dodice=dict(argstr="-dodice"), - dodot=dict(argstr="-dodot"), - doeta2=dict(argstr="-doeta2"), - dosums=dict(argstr="-dosums"), - environ=dict(nohash=True, usedefault=True), - full=dict(argstr="-full"), - in_files=dict(argstr="%s ...", position=-2), - mask=dict(argstr="-mask %s", extensions=None), - mrange=dict(argstr="-mrange %s %s"), - num_threads=dict(nohash=True, usedefault=True), - out_file=dict(argstr=" |& tee %s", extensions=None, position=-1), + args=dict( + argstr="%s", + ), + demean=dict( + argstr="-demean", + ), + docoef=dict( + argstr="-docoef", + ), + docor=dict( + argstr="-docor", + ), + dodice=dict( + argstr="-dodice", + ), + dodot=dict( + argstr="-dodot", + ), + doeta2=dict( + argstr="-doeta2", + ), + dosums=dict( + argstr="-dosums", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + full=dict( + argstr="-full", + ), + in_files=dict( + argstr="%s ...", + position=-2, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + mrange=dict( + argstr="-mrange %s %s", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr=" |& tee %s", + extensions=None, + position=-1, + ), outputtype=dict(), - show_labels=dict(argstr="-show_labels"), - upper=dict(argstr="-upper"), + show_labels=dict( + argstr="-show_labels", + ), + upper=dict( + argstr="-upper", + ), ) inputs = Dot.input_spec() @@ -31,7 +71,11 @@ def test_Dot_inputs(): def test_Dot_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Dot.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ECM.py b/nipype/interfaces/afni/tests/test_auto_ECM.py index 12364c46b4..030aaffe6a 100644 --- a/nipype/interfaces/afni/tests/test_auto_ECM.py +++ b/nipype/interfaces/afni/tests/test_auto_ECM.py @@ -4,20 +4,49 @@ def test_ECM_inputs(): input_map = dict( - args=dict(argstr="%s"), - autoclip=dict(argstr="-autoclip"), - automask=dict(argstr="-automask"), - environ=dict(nohash=True, usedefault=True), - eps=dict(argstr="-eps %f"), - fecm=dict(argstr="-fecm"), - full=dict(argstr="-full"), + args=dict( + argstr="%s", + ), + autoclip=dict( + argstr="-autoclip", + ), + automask=dict( + argstr="-automask", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + eps=dict( + argstr="-eps %f", + ), + fecm=dict( + argstr="-fecm", + ), + full=dict( + argstr="-full", + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + max_iter=dict( + argstr="-max_iter %d", + ), + memory=dict( + argstr="-memory %f", + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - mask=dict(argstr="-mask %s", extensions=None), - max_iter=dict(argstr="-max_iter %d"), - memory=dict(argstr="-memory %f"), - num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -25,11 +54,21 @@ def test_ECM_inputs(): name_template="%s_afni", ), outputtype=dict(), - polort=dict(argstr="-polort %d"), - scale=dict(argstr="-scale %f"), - shift=dict(argstr="-shift %f"), - sparsity=dict(argstr="-sparsity %f"), - thresh=dict(argstr="-thresh %f"), + polort=dict( + argstr="-polort %d", + ), + scale=dict( + argstr="-scale %f", + ), + shift=dict( + argstr="-shift %f", + ), + sparsity=dict( + argstr="-sparsity %f", + ), + thresh=dict( + argstr="-thresh %f", + ), ) inputs = ECM.input_spec() @@ -39,7 +78,11 @@ def test_ECM_inputs(): def test_ECM_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ECM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Edge3.py b/nipype/interfaces/afni/tests/test_auto_Edge3.py index 966a24320a..45b49fd243 100644 --- a/nipype/interfaces/afni/tests/test_auto_Edge3.py +++ b/nipype/interfaces/afni/tests/test_auto_Edge3.py @@ -4,11 +4,24 @@ def test_Edge3_inputs(): input_map = dict( - args=dict(argstr="%s"), - datum=dict(argstr="-datum %s"), - environ=dict(nohash=True, usedefault=True), - fscale=dict(argstr="-fscale", xor=["gscale", "nscale", "scale_floats"]), - gscale=dict(argstr="-gscale", xor=["fscale", "nscale", "scale_floats"]), + args=dict( + argstr="%s", + ), + datum=dict( + argstr="-datum %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fscale=dict( + argstr="-fscale", + xor=["gscale", "nscale", "scale_floats"], + ), + gscale=dict( + argstr="-gscale", + xor=["fscale", "nscale", "scale_floats"], + ), in_file=dict( argstr="-input %s", copyfile=False, @@ -16,14 +29,27 @@ def test_Edge3_inputs(): mandatory=True, position=0, ), - nscale=dict(argstr="-nscale", xor=["fscale", "gscale", "scale_floats"]), - num_threads=dict(nohash=True, usedefault=True), - out_file=dict(argstr="-prefix %s", extensions=None, position=-1), + nscale=dict( + argstr="-nscale", + xor=["fscale", "gscale", "scale_floats"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="-prefix %s", + extensions=None, + position=-1, + ), outputtype=dict(), scale_floats=dict( - argstr="-scale_floats %f", xor=["fscale", "gscale", "nscale"] + argstr="-scale_floats %f", + xor=["fscale", "gscale", "nscale"], + ), + verbose=dict( + argstr="-verbose", ), - verbose=dict(argstr="-verbose"), ) inputs = Edge3.input_spec() @@ -33,7 +59,11 @@ def test_Edge3_inputs(): def test_Edge3_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Edge3.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Eval.py b/nipype/interfaces/afni/tests/test_auto_Eval.py index 29ca4f1433..748bf05dfd 100644 --- a/nipype/interfaces/afni/tests/test_auto_Eval.py +++ b/nipype/interfaces/afni/tests/test_auto_Eval.py @@ -4,15 +4,45 @@ def test_Eval_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - expr=dict(argstr='-expr "%s"', mandatory=True, position=3), - in_file_a=dict(argstr="-a %s", extensions=None, mandatory=True, position=0), - in_file_b=dict(argstr="-b %s", extensions=None, position=1), - in_file_c=dict(argstr="-c %s", extensions=None, position=2), - num_threads=dict(nohash=True, usedefault=True), - other=dict(argstr="", extensions=None), - out1D=dict(argstr="-1D"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expr=dict( + argstr='-expr "%s"', + mandatory=True, + position=3, + ), + in_file_a=dict( + argstr="-a %s", + extensions=None, + mandatory=True, + position=0, + ), + in_file_b=dict( + argstr="-b %s", + extensions=None, + position=1, + ), + in_file_c=dict( + argstr="-c %s", + extensions=None, + position=2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + other=dict( + argstr="", + extensions=None, + ), + out1D=dict( + argstr="-1D", + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -21,8 +51,12 @@ def test_Eval_inputs(): ), outputtype=dict(), single_idx=dict(), - start_idx=dict(requires=["stop_idx"]), - stop_idx=dict(requires=["start_idx"]), + start_idx=dict( + requires=["stop_idx"], + ), + stop_idx=dict( + requires=["start_idx"], + ), ) inputs = Eval.input_spec() @@ -32,7 +66,11 @@ def test_Eval_inputs(): def test_Eval_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Eval.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_FWHMx.py b/nipype/interfaces/afni/tests/test_auto_FWHMx.py index f1126d0e1d..06151c569a 100644 --- a/nipype/interfaces/afni/tests/test_auto_FWHMx.py +++ b/nipype/interfaces/afni/tests/test_auto_FWHMx.py @@ -4,18 +4,53 @@ def test_FWHMx_inputs(): input_map = dict( - acf=dict(argstr="-acf", usedefault=True), - args=dict(argstr="%s"), - arith=dict(argstr="-arith", xor=["geom"]), - automask=dict(argstr="-automask", usedefault=True), - combine=dict(argstr="-combine"), - compat=dict(argstr="-compat"), - demed=dict(argstr="-demed", xor=["detrend"]), - detrend=dict(argstr="-detrend", usedefault=True, xor=["demed"]), - environ=dict(nohash=True, usedefault=True), - geom=dict(argstr="-geom", xor=["arith"]), - in_file=dict(argstr="-input %s", extensions=None, mandatory=True), - mask=dict(argstr="-mask %s", extensions=None), + acf=dict( + argstr="-acf", + usedefault=True, + ), + args=dict( + argstr="%s", + ), + arith=dict( + argstr="-arith", + xor=["geom"], + ), + automask=dict( + argstr="-automask", + usedefault=True, + ), + combine=dict( + argstr="-combine", + ), + compat=dict( + argstr="-compat", + ), + demed=dict( + argstr="-demed", + xor=["detrend"], + ), + detrend=dict( + argstr="-detrend", + usedefault=True, + xor=["demed"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + geom=dict( + argstr="-geom", + xor=["arith"], + ), + in_file=dict( + argstr="-input %s", + extensions=None, + mandatory=True, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), out_detrend=dict( argstr="-detprefix %s", extensions=None, @@ -38,7 +73,9 @@ def test_FWHMx_inputs(): name_source="in_file", name_template="%s_subbricks.out", ), - unif=dict(argstr="-unif"), + unif=dict( + argstr="-unif", + ), ) inputs = FWHMx.input_spec() @@ -51,10 +88,18 @@ def test_FWHMx_outputs(): output_map = dict( acf_param=dict(), fwhm=dict(), - out_acf=dict(extensions=None), - out_detrend=dict(extensions=None), - out_file=dict(extensions=None), - out_subbricks=dict(extensions=None), + out_acf=dict( + extensions=None, + ), + out_detrend=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + out_subbricks=dict( + extensions=None, + ), ) outputs = FWHMx.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Fim.py b/nipype/interfaces/afni/tests/test_auto_Fim.py index 8be7f6e8f7..aea43391bc 100644 --- a/nipype/interfaces/afni/tests/test_auto_Fim.py +++ b/nipype/interfaces/afni/tests/test_auto_Fim.py @@ -4,11 +4,22 @@ def test_Fim_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fim_thr=dict(argstr="-fim_thr %f", position=3), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fim_thr=dict( + argstr="-fim_thr %f", + position=3, + ), ideal_file=dict( - argstr="-ideal_file %s", extensions=None, mandatory=True, position=2 + argstr="-ideal_file %s", + extensions=None, + mandatory=True, + position=2, ), in_file=dict( argstr="-input %s", @@ -17,8 +28,14 @@ def test_Fim_inputs(): mandatory=True, position=1, ), - num_threads=dict(nohash=True, usedefault=True), - out=dict(argstr="-out %s", position=4), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out=dict( + argstr="-out %s", + position=4, + ), out_file=dict( argstr="-bucket %s", extensions=None, @@ -35,7 +52,11 @@ def test_Fim_inputs(): def test_Fim_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Fim.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Fourier.py b/nipype/interfaces/afni/tests/test_auto_Fourier.py index 2604f95de5..97764a2b9b 100644 --- a/nipype/interfaces/afni/tests/test_auto_Fourier.py +++ b/nipype/interfaces/afni/tests/test_auto_Fourier.py @@ -4,14 +4,32 @@ def test_Fourier_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - highpass=dict(argstr="-highpass %f", mandatory=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + highpass=dict( + argstr="-highpass %f", + mandatory=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + lowpass=dict( + argstr="-lowpass %f", + mandatory=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - lowpass=dict(argstr="-lowpass %f", mandatory=True), - num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -19,7 +37,9 @@ def test_Fourier_inputs(): name_template="%s_fourier", ), outputtype=dict(), - retrend=dict(argstr="-retrend"), + retrend=dict( + argstr="-retrend", + ), ) inputs = Fourier.input_spec() @@ -29,7 +49,11 @@ def test_Fourier_inputs(): def test_Fourier_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Fourier.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_GCOR.py b/nipype/interfaces/afni/tests/test_auto_GCOR.py index c3521629c3..2e5f0f372d 100644 --- a/nipype/interfaces/afni/tests/test_auto_GCOR.py +++ b/nipype/interfaces/afni/tests/test_auto_GCOR.py @@ -4,8 +4,13 @@ def test_GCOR_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( argstr="-input %s", copyfile=False, @@ -13,9 +18,17 @@ def test_GCOR_inputs(): mandatory=True, position=-1, ), - mask=dict(argstr="-mask %s", copyfile=False, extensions=None), - nfirst=dict(argstr="-nfirst %d"), - no_demean=dict(argstr="-no_demean"), + mask=dict( + argstr="-mask %s", + copyfile=False, + extensions=None, + ), + nfirst=dict( + argstr="-nfirst %d", + ), + no_demean=dict( + argstr="-no_demean", + ), ) inputs = GCOR.input_spec() @@ -25,7 +38,9 @@ def test_GCOR_inputs(): def test_GCOR_outputs(): - output_map = dict(out=dict()) + output_map = dict( + out=dict(), + ) outputs = GCOR.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Hist.py b/nipype/interfaces/afni/tests/test_auto_Hist.py index 117b04f705..2263f3632b 100644 --- a/nipype/interfaces/afni/tests/test_auto_Hist.py +++ b/nipype/interfaces/afni/tests/test_auto_Hist.py @@ -4,9 +4,16 @@ def test_Hist_inputs(): input_map = dict( - args=dict(argstr="%s"), - bin_width=dict(argstr="-binwidth %f"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + bin_width=dict( + argstr="-binwidth %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( argstr="-input %s", copyfile=False, @@ -14,10 +21,19 @@ def test_Hist_inputs(): mandatory=True, position=1, ), - mask=dict(argstr="-mask %s", extensions=None), - max_value=dict(argstr="-max %f"), - min_value=dict(argstr="-min %f"), - nbin=dict(argstr="-nbin %d"), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + max_value=dict( + argstr="-max %f", + ), + min_value=dict( + argstr="-min %f", + ), + nbin=dict( + argstr="-nbin %d", + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -33,7 +49,10 @@ def test_Hist_inputs(): name_template="%s_hist.out", position=-1, ), - showhist=dict(argstr="-showhist", usedefault=True), + showhist=dict( + argstr="-showhist", + usedefault=True, + ), ) inputs = Hist.input_spec() @@ -43,7 +62,14 @@ def test_Hist_inputs(): def test_Hist_outputs(): - output_map = dict(out_file=dict(extensions=None), out_show=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + out_show=dict( + extensions=None, + ), + ) outputs = Hist.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_LFCD.py b/nipype/interfaces/afni/tests/test_auto_LFCD.py index fff85c330c..bd4b76baee 100644 --- a/nipype/interfaces/afni/tests/test_auto_LFCD.py +++ b/nipype/interfaces/afni/tests/test_auto_LFCD.py @@ -4,15 +4,34 @@ def test_LFCD_inputs(): input_map = dict( - args=dict(argstr="%s"), - autoclip=dict(argstr="-autoclip"), - automask=dict(argstr="-automask"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + autoclip=dict( + argstr="-autoclip", + ), + automask=dict( + argstr="-automask", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - mask=dict(argstr="-mask %s", extensions=None), - num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -20,8 +39,12 @@ def test_LFCD_inputs(): name_template="%s_afni", ), outputtype=dict(), - polort=dict(argstr="-polort %d"), - thresh=dict(argstr="-thresh %f"), + polort=dict( + argstr="-polort %d", + ), + thresh=dict( + argstr="-thresh %f", + ), ) inputs = LFCD.input_spec() @@ -31,7 +54,11 @@ def test_LFCD_inputs(): def test_LFCD_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = LFCD.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py index 87cb3b6dbd..2ffe29dda0 100644 --- a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py +++ b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py @@ -4,14 +4,41 @@ def test_LocalBistat_inputs(): input_map = dict( - args=dict(argstr="%s"), - automask=dict(argstr="-automask", xor=["weight_file"]), - environ=dict(nohash=True, usedefault=True), - in_file1=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - in_file2=dict(argstr="%s", extensions=None, mandatory=True, position=-1), - mask_file=dict(argstr="-mask %s", extensions=None), - neighborhood=dict(argstr="-nbhd '%s(%s)'", mandatory=True), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + xor=["weight_file"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file1=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + in_file2=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + neighborhood=dict( + argstr="-nbhd '%s(%s)'", + mandatory=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -21,8 +48,15 @@ def test_LocalBistat_inputs(): position=0, ), outputtype=dict(), - stat=dict(argstr="-stat %s...", mandatory=True), - weight_file=dict(argstr="-weight %s", extensions=None, xor=["automask"]), + stat=dict( + argstr="-stat %s...", + mandatory=True, + ), + weight_file=dict( + argstr="-weight %s", + extensions=None, + xor=["automask"], + ), ) inputs = LocalBistat.input_spec() @@ -32,7 +66,11 @@ def test_LocalBistat_inputs(): def test_LocalBistat_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = LocalBistat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Localstat.py b/nipype/interfaces/afni/tests/test_auto_Localstat.py index 1c9a102170..54c99b434d 100644 --- a/nipype/interfaces/afni/tests/test_auto_Localstat.py +++ b/nipype/interfaces/afni/tests/test_auto_Localstat.py @@ -4,15 +4,41 @@ def test_Localstat_inputs(): input_map = dict( - args=dict(argstr="%s"), - automask=dict(argstr="-automask"), - environ=dict(nohash=True, usedefault=True), - grid_rmode=dict(argstr="-grid_rmode %s", requires=["reduce_restore_grid"]), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), - mask_file=dict(argstr="-mask %s", extensions=None), - neighborhood=dict(argstr="-nbhd '%s(%s)'", mandatory=True), - nonmask=dict(argstr="-use_nonmask"), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grid_rmode=dict( + argstr="-grid_rmode %s", + requires=["reduce_restore_grid"], + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + neighborhood=dict( + argstr="-nbhd '%s(%s)'", + mandatory=True, + ), + nonmask=dict( + argstr="-use_nonmask", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -22,18 +48,28 @@ def test_Localstat_inputs(): position=0, ), outputtype=dict(), - overwrite=dict(argstr="-overwrite"), - quiet=dict(argstr="-quiet"), + overwrite=dict( + argstr="-overwrite", + ), + quiet=dict( + argstr="-quiet", + ), reduce_grid=dict( - argstr="-reduce_grid %s", xor=["reduce_restore_grid", "reduce_max_vox"] + argstr="-reduce_grid %s", + xor=["reduce_restore_grid", "reduce_max_vox"], ), reduce_max_vox=dict( - argstr="-reduce_max_vox %s", xor=["reduce_restore_grid", "reduce_grid"] + argstr="-reduce_max_vox %s", + xor=["reduce_restore_grid", "reduce_grid"], ), reduce_restore_grid=dict( - argstr="-reduce_restore_grid %s", xor=["reduce_max_vox", "reduce_grid"] + argstr="-reduce_restore_grid %s", + xor=["reduce_max_vox", "reduce_grid"], + ), + stat=dict( + argstr="-stat %s...", + mandatory=True, ), - stat=dict(argstr="-stat %s...", mandatory=True), ) inputs = Localstat.input_spec() @@ -43,7 +79,11 @@ def test_Localstat_inputs(): def test_Localstat_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Localstat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_MaskTool.py b/nipype/interfaces/afni/tests/test_auto_MaskTool.py index c86f534deb..a0520df606 100644 --- a/nipype/interfaces/afni/tests/test_auto_MaskTool.py +++ b/nipype/interfaces/afni/tests/test_auto_MaskTool.py @@ -4,18 +4,49 @@ def test_MaskTool_inputs(): input_map = dict( - args=dict(argstr="%s"), - count=dict(argstr="-count", position=2), - datum=dict(argstr="-datum %s"), - dilate_inputs=dict(argstr="-dilate_inputs %s"), - dilate_results=dict(argstr="-dilate_results %s"), - environ=dict(nohash=True, usedefault=True), - fill_dirs=dict(argstr="-fill_dirs %s", requires=["fill_holes"]), - fill_holes=dict(argstr="-fill_holes"), - frac=dict(argstr="-frac %s"), - in_file=dict(argstr="-input %s", copyfile=False, mandatory=True, position=-1), - inter=dict(argstr="-inter"), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + count=dict( + argstr="-count", + position=2, + ), + datum=dict( + argstr="-datum %s", + ), + dilate_inputs=dict( + argstr="-dilate_inputs %s", + ), + dilate_results=dict( + argstr="-dilate_results %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill_dirs=dict( + argstr="-fill_dirs %s", + requires=["fill_holes"], + ), + fill_holes=dict( + argstr="-fill_holes", + ), + frac=dict( + argstr="-frac %s", + ), + in_file=dict( + argstr="-input %s", + copyfile=False, + mandatory=True, + position=-1, + ), + inter=dict( + argstr="-inter", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -23,8 +54,12 @@ def test_MaskTool_inputs(): name_template="%s_mask", ), outputtype=dict(), - union=dict(argstr="-union"), - verbose=dict(argstr="-verb %s"), + union=dict( + argstr="-union", + ), + verbose=dict( + argstr="-verb %s", + ), ) inputs = MaskTool.input_spec() @@ -34,7 +69,11 @@ def test_MaskTool_inputs(): def test_MaskTool_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MaskTool.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Maskave.py b/nipype/interfaces/afni/tests/test_auto_Maskave.py index 20869c822d..ce7a07c95e 100644 --- a/nipype/interfaces/afni/tests/test_auto_Maskave.py +++ b/nipype/interfaces/afni/tests/test_auto_Maskave.py @@ -4,13 +4,29 @@ def test_Maskave_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-2, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + position=1, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - mask=dict(argstr="-mask %s", extensions=None, position=1), - num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="> %s", extensions=None, @@ -20,7 +36,10 @@ def test_Maskave_inputs(): position=-1, ), outputtype=dict(), - quiet=dict(argstr="-quiet", position=2), + quiet=dict( + argstr="-quiet", + position=2, + ), ) inputs = Maskave.input_spec() @@ -30,7 +49,11 @@ def test_Maskave_inputs(): def test_Maskave_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Maskave.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Means.py b/nipype/interfaces/afni/tests/test_auto_Means.py index d3b30d8d7b..3aa3ada375 100644 --- a/nipype/interfaces/afni/tests/test_auto_Means.py +++ b/nipype/interfaces/afni/tests/test_auto_Means.py @@ -4,16 +4,43 @@ def test_Means_inputs(): input_map = dict( - args=dict(argstr="%s"), - count=dict(argstr="-count"), - datum=dict(argstr="-datum %s"), - environ=dict(nohash=True, usedefault=True), - in_file_a=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - in_file_b=dict(argstr="%s", extensions=None, position=-1), - mask_inter=dict(argstr="-mask_inter"), - mask_union=dict(argstr="-mask_union"), - non_zero=dict(argstr="-non_zero"), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + count=dict( + argstr="-count", + ), + datum=dict( + argstr="-datum %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file_a=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + in_file_b=dict( + argstr="%s", + extensions=None, + position=-1, + ), + mask_inter=dict( + argstr="-mask_inter", + ), + mask_union=dict( + argstr="-mask_union", + ), + non_zero=dict( + argstr="-non_zero", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -21,10 +48,18 @@ def test_Means_inputs(): name_template="%s_mean", ), outputtype=dict(), - scale=dict(argstr="-%sscale"), - sqr=dict(argstr="-sqr"), - std_dev=dict(argstr="-stdev"), - summ=dict(argstr="-sum"), + scale=dict( + argstr="-%sscale", + ), + sqr=dict( + argstr="-sqr", + ), + std_dev=dict( + argstr="-stdev", + ), + summ=dict( + argstr="-sum", + ), ) inputs = Means.input_spec() @@ -34,7 +69,11 @@ def test_Means_inputs(): def test_Means_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Means.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Merge.py b/nipype/interfaces/afni/tests/test_auto_Merge.py index b1e228da45..ac974184ea 100644 --- a/nipype/interfaces/afni/tests/test_auto_Merge.py +++ b/nipype/interfaces/afni/tests/test_auto_Merge.py @@ -4,12 +4,30 @@ def test_Merge_inputs(): input_map = dict( - args=dict(argstr="%s"), - blurfwhm=dict(argstr="-1blur_fwhm %d", units="mm"), - doall=dict(argstr="-doall"), - environ=dict(nohash=True, usedefault=True), - in_files=dict(argstr="%s", copyfile=False, mandatory=True, position=-1), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + blurfwhm=dict( + argstr="-1blur_fwhm %d", + units="mm", + ), + doall=dict( + argstr="-doall", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s", + copyfile=False, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -26,7 +44,11 @@ def test_Merge_inputs(): def test_Merge_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NetCorr.py b/nipype/interfaces/afni/tests/test_auto_NetCorr.py index ff6f2cd7c2..e613dc13eb 100644 --- a/nipype/interfaces/afni/tests/test_auto_NetCorr.py +++ b/nipype/interfaces/afni/tests/test_auto_NetCorr.py @@ -4,15 +4,40 @@ def test_NetCorr_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fish_z=dict(argstr="-fish_z"), - ignore_LT=dict(argstr="-ignore_LT"), - in_file=dict(argstr="-inset %s", extensions=None, mandatory=True), - in_rois=dict(argstr="-in_rois %s", extensions=None, mandatory=True), - mask=dict(argstr="-mask %s", extensions=None), - nifti=dict(argstr="-nifti"), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fish_z=dict( + argstr="-fish_z", + ), + ignore_LT=dict( + argstr="-ignore_LT", + ), + in_file=dict( + argstr="-inset %s", + extensions=None, + mandatory=True, + ), + in_rois=dict( + argstr="-in_rois %s", + extensions=None, + mandatory=True, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + nifti=dict( + argstr="-nifti", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -20,17 +45,38 @@ def test_NetCorr_inputs(): name_template="%s_netcorr", position=1, ), - output_mask_nonnull=dict(argstr="-output_mask_nonnull"), + output_mask_nonnull=dict( + argstr="-output_mask_nonnull", + ), outputtype=dict(), - part_corr=dict(argstr="-part_corr"), - push_thru_many_zeros=dict(argstr="-push_thru_many_zeros"), - ts_indiv=dict(argstr="-ts_indiv"), - ts_label=dict(argstr="-ts_label"), - ts_out=dict(argstr="-ts_out"), - ts_wb_Z=dict(argstr="-ts_wb_Z"), - ts_wb_corr=dict(argstr="-ts_wb_corr"), - ts_wb_strlabel=dict(argstr="-ts_wb_strlabel"), - weight_ts=dict(argstr="-weight_ts %s", extensions=None), + part_corr=dict( + argstr="-part_corr", + ), + push_thru_many_zeros=dict( + argstr="-push_thru_many_zeros", + ), + ts_indiv=dict( + argstr="-ts_indiv", + ), + ts_label=dict( + argstr="-ts_label", + ), + ts_out=dict( + argstr="-ts_out", + ), + ts_wb_Z=dict( + argstr="-ts_wb_Z", + ), + ts_wb_corr=dict( + argstr="-ts_wb_corr", + ), + ts_wb_strlabel=dict( + argstr="-ts_wb_strlabel", + ), + weight_ts=dict( + argstr="-weight_ts %s", + extensions=None, + ), ) inputs = NetCorr.input_spec() @@ -40,7 +86,12 @@ def test_NetCorr_inputs(): def test_NetCorr_outputs(): - output_map = dict(out_corr_maps=dict(), out_corr_matrix=dict(extensions=None)) + output_map = dict( + out_corr_maps=dict(), + out_corr_matrix=dict( + extensions=None, + ), + ) outputs = NetCorr.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Notes.py b/nipype/interfaces/afni/tests/test_auto_Notes.py index ba91f3410f..c83a70f0f2 100644 --- a/nipype/interfaces/afni/tests/test_auto_Notes.py +++ b/nipype/interfaces/afni/tests/test_auto_Notes.py @@ -4,19 +4,46 @@ def test_Notes_inputs(): input_map = dict( - add=dict(argstr='-a "%s"'), - add_history=dict(argstr='-h "%s"', xor=["rep_history"]), - args=dict(argstr="%s"), - delete=dict(argstr="-d %d"), - environ=dict(nohash=True, usedefault=True), + add=dict( + argstr='-a "%s"', + ), + add_history=dict( + argstr='-h "%s"', + xor=["rep_history"], + ), + args=dict( + argstr="%s", + ), + delete=dict( + argstr="-d %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="%s", + extensions=None, ), - num_threads=dict(nohash=True, usedefault=True), - out_file=dict(argstr="%s", extensions=None), outputtype=dict(), - rep_history=dict(argstr='-HH "%s"', xor=["add_history"]), - ses=dict(argstr="-ses"), + rep_history=dict( + argstr='-HH "%s"', + xor=["add_history"], + ), + ses=dict( + argstr="-ses", + ), ) inputs = Notes.input_spec() @@ -26,7 +53,11 @@ def test_Notes_inputs(): def test_Notes_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Notes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py b/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py index d58c26b5f0..85fb2d3495 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py @@ -4,10 +4,20 @@ def test_NwarpAdjust_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_files=dict(argstr="-source %s"), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="-source %s", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -17,7 +27,10 @@ def test_NwarpAdjust_inputs(): requires=["in_files"], ), outputtype=dict(), - warps=dict(argstr="-nwarp %s", mandatory=True), + warps=dict( + argstr="-nwarp %s", + mandatory=True, + ), ) inputs = NwarpAdjust.input_spec() @@ -27,7 +40,11 @@ def test_NwarpAdjust_inputs(): def test_NwarpAdjust_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = NwarpAdjust.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py index dd2cb1cdd8..c9ebd2853e 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py @@ -4,23 +4,52 @@ def test_NwarpApply_inputs(): input_map = dict( - ainterp=dict(argstr="-ainterp %s"), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-source %s", mandatory=True), - interp=dict(argstr="-interp %s", usedefault=True), - inv_warp=dict(argstr="-iwarp"), - master=dict(argstr="-master %s", extensions=None), + ainterp=dict( + argstr="-ainterp %s", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-source %s", + mandatory=True, + ), + interp=dict( + argstr="-interp %s", + usedefault=True, + ), + inv_warp=dict( + argstr="-iwarp", + ), + master=dict( + argstr="-master %s", + extensions=None, + ), out_file=dict( argstr="-prefix %s", extensions=None, name_source="in_file", name_template="%s_Nwarp", ), - quiet=dict(argstr="-quiet", xor=["verb"]), - short=dict(argstr="-short"), - verb=dict(argstr="-verb", xor=["quiet"]), - warp=dict(argstr="-nwarp %s", mandatory=True), + quiet=dict( + argstr="-quiet", + xor=["verb"], + ), + short=dict( + argstr="-short", + ), + verb=dict( + argstr="-verb", + xor=["quiet"], + ), + warp=dict( + argstr="-nwarp %s", + mandatory=True, + ), ) inputs = NwarpApply.input_spec() @@ -30,7 +59,11 @@ def test_NwarpApply_inputs(): def test_NwarpApply_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = NwarpApply.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpCat.py b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py index ea0d386d03..b89aade9b0 100644 --- a/nipype/interfaces/afni/tests/test_auto_NwarpCat.py +++ b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py @@ -4,13 +4,32 @@ def test_NwarpCat_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - expad=dict(argstr="-expad %d"), - in_files=dict(argstr="%s", mandatory=True, position=-1), - interp=dict(argstr="-interp %s", usedefault=True), - inv_warp=dict(argstr="-iwarp"), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expad=dict( + argstr="-expad %d", + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=-1, + ), + interp=dict( + argstr="-interp %s", + usedefault=True, + ), + inv_warp=dict( + argstr="-iwarp", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -18,8 +37,12 @@ def test_NwarpCat_inputs(): name_template="%s_NwarpCat", ), outputtype=dict(), - space=dict(argstr="-space %s"), - verb=dict(argstr="-verb"), + space=dict( + argstr="-space %s", + ), + verb=dict( + argstr="-verb", + ), ) inputs = NwarpCat.input_spec() @@ -29,7 +52,11 @@ def test_NwarpCat_inputs(): def test_NwarpCat_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = NwarpCat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py index e81709db34..bb47517e27 100644 --- a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py +++ b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py @@ -4,29 +4,60 @@ def test_OneDToolPy_inputs(): input_map = dict( - args=dict(argstr="%s"), - censor_motion=dict(argstr="-censor_motion %f %s"), - censor_prev_TR=dict(argstr="-censor_prev_TR"), - demean=dict(argstr="-demean"), - derivative=dict(argstr="-derivative"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-infile %s", extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + censor_motion=dict( + argstr="-censor_motion %f %s", + ), + censor_prev_TR=dict( + argstr="-censor_prev_TR", + ), + demean=dict( + argstr="-demean", + ), + derivative=dict( + argstr="-derivative", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-infile %s", + extensions=None, + mandatory=True, + ), out_file=dict( - argstr="-write %s", extensions=None, xor=["show_cormat_warnings"] + argstr="-write %s", + extensions=None, + xor=["show_cormat_warnings"], ), outputtype=dict(), - py27_path=dict(usedefault=True), - set_nruns=dict(argstr="-set_nruns %d"), - show_censor_count=dict(argstr="-show_censor_count"), + py27_path=dict( + usedefault=True, + ), + set_nruns=dict( + argstr="-set_nruns %d", + ), + show_censor_count=dict( + argstr="-show_censor_count", + ), show_cormat_warnings=dict( argstr="-show_cormat_warnings |& tee %s", extensions=None, position=-1, xor=["out_file"], ), - show_indices_interest=dict(argstr="-show_indices_interest"), - show_trs_run=dict(argstr="-show_trs_run %d"), - show_trs_uncensored=dict(argstr="-show_trs_uncensored %s"), + show_indices_interest=dict( + argstr="-show_indices_interest", + ), + show_trs_run=dict( + argstr="-show_trs_run %d", + ), + show_trs_uncensored=dict( + argstr="-show_trs_uncensored %s", + ), ) inputs = OneDToolPy.input_spec() @@ -36,7 +67,11 @@ def test_OneDToolPy_inputs(): def test_OneDToolPy_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = OneDToolPy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_OutlierCount.py b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py index 525c94f7bc..511c1ca8f2 100644 --- a/nipype/interfaces/afni/tests/test_auto_OutlierCount.py +++ b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py @@ -4,15 +4,46 @@ def test_OutlierCount_inputs(): input_map = dict( - args=dict(argstr="%s"), - autoclip=dict(argstr="-autoclip", usedefault=True, xor=["mask"]), - automask=dict(argstr="-automask", usedefault=True, xor=["mask"]), - environ=dict(nohash=True, usedefault=True), - fraction=dict(argstr="-fraction", usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - interval=dict(argstr="-range", usedefault=True), - legendre=dict(argstr="-legendre", usedefault=True), - mask=dict(argstr="-mask %s", extensions=None, xor=["autoclip", "automask"]), + args=dict( + argstr="%s", + ), + autoclip=dict( + argstr="-autoclip", + usedefault=True, + xor=["mask"], + ), + automask=dict( + argstr="-automask", + usedefault=True, + xor=["mask"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fraction=dict( + argstr="-fraction", + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + interval=dict( + argstr="-range", + usedefault=True, + ), + legendre=dict( + argstr="-legendre", + usedefault=True, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + xor=["autoclip", "automask"], + ), out_file=dict( extensions=None, keep_extension=False, @@ -27,9 +58,16 @@ def test_OutlierCount_inputs(): name_template="%s_outliers", output_name="out_outliers", ), - polort=dict(argstr="-polort %d"), - qthr=dict(argstr="-qthr %.5f", usedefault=True), - save_outliers=dict(usedefault=True), + polort=dict( + argstr="-polort %d", + ), + qthr=dict( + argstr="-qthr %.5f", + usedefault=True, + ), + save_outliers=dict( + usedefault=True, + ), ) inputs = OutlierCount.input_spec() @@ -40,7 +78,12 @@ def test_OutlierCount_inputs(): def test_OutlierCount_outputs(): output_map = dict( - out_file=dict(extensions=None), out_outliers=dict(extensions=None) + out_file=dict( + extensions=None, + ), + out_outliers=dict( + extensions=None, + ), ) outputs = OutlierCount.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_QualityIndex.py b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py index 899c9bb292..c759be87a4 100644 --- a/nipype/interfaces/afni/tests/test_auto_QualityIndex.py +++ b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py @@ -4,14 +4,41 @@ def test_QualityIndex_inputs(): input_map = dict( - args=dict(argstr="%s"), - autoclip=dict(argstr="-autoclip", usedefault=True, xor=["mask"]), - automask=dict(argstr="-automask", usedefault=True, xor=["mask"]), - clip=dict(argstr="-clip %f"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - interval=dict(argstr="-range", usedefault=True), - mask=dict(argstr="-mask %s", extensions=None, xor=["autoclip", "automask"]), + args=dict( + argstr="%s", + ), + autoclip=dict( + argstr="-autoclip", + usedefault=True, + xor=["mask"], + ), + automask=dict( + argstr="-automask", + usedefault=True, + xor=["mask"], + ), + clip=dict( + argstr="-clip %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + interval=dict( + argstr="-range", + usedefault=True, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + xor=["autoclip", "automask"], + ), out_file=dict( argstr="> %s", extensions=None, @@ -20,8 +47,14 @@ def test_QualityIndex_inputs(): name_template="%s_tqual", position=-1, ), - quadrant=dict(argstr="-quadrant", usedefault=True), - spearman=dict(argstr="-spearman", usedefault=True), + quadrant=dict( + argstr="-quadrant", + usedefault=True, + ), + spearman=dict( + argstr="-spearman", + usedefault=True, + ), ) inputs = QualityIndex.input_spec() @@ -31,7 +64,11 @@ def test_QualityIndex_inputs(): def test_QualityIndex_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = QualityIndex.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Qwarp.py b/nipype/interfaces/afni/tests/test_auto_Qwarp.py index 619c2f5edd..181f7217dd 100644 --- a/nipype/interfaces/afni/tests/test_auto_Qwarp.py +++ b/nipype/interfaces/afni/tests/test_auto_Qwarp.py @@ -4,74 +4,199 @@ def test_Qwarp_inputs(): input_map = dict( - Qfinal=dict(argstr="-Qfinal"), - Qonly=dict(argstr="-Qonly"), - allineate=dict(argstr="-allineate"), - allineate_opts=dict(argstr="-allineate_opts %s", requires=["allineate"]), - allsave=dict(argstr="-allsave", xor=["nopadWARP", "duplo", "plusminus"]), - args=dict(argstr="%s"), - ballopt=dict(argstr="-ballopt", xor=["workhard", "boxopt"]), + Qfinal=dict( + argstr="-Qfinal", + ), + Qonly=dict( + argstr="-Qonly", + ), + allineate=dict( + argstr="-allineate", + ), + allineate_opts=dict( + argstr="-allineate_opts %s", + requires=["allineate"], + ), + allsave=dict( + argstr="-allsave", + xor=["nopadWARP", "duplo", "plusminus"], + ), + args=dict( + argstr="%s", + ), + ballopt=dict( + argstr="-ballopt", + xor=["workhard", "boxopt"], + ), base_file=dict( - argstr="-base %s", copyfile=False, extensions=None, mandatory=True + argstr="-base %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + baxopt=dict( + argstr="-boxopt", + xor=["workhard", "ballopt"], + ), + blur=dict( + argstr="-blur %s", ), - baxopt=dict(argstr="-boxopt", xor=["workhard", "ballopt"]), - blur=dict(argstr="-blur %s"), duplo=dict( argstr="-duplo", xor=["gridlist", "maxlev", "inilev", "iniwarp", "plusminus", "allsave"], ), - emask=dict(argstr="-emask %s", copyfile=False, extensions=None), - environ=dict(nohash=True, usedefault=True), - expad=dict(argstr="-expad %d", xor=["nopadWARP"]), + emask=dict( + argstr="-emask %s", + copyfile=False, + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expad=dict( + argstr="-expad %d", + xor=["nopadWARP"], + ), gridlist=dict( argstr="-gridlist %s", copyfile=False, extensions=None, xor=["duplo", "plusminus"], ), - hel=dict(argstr="-hel", xor=["nmi", "mi", "lpc", "lpa", "pear"]), + hel=dict( + argstr="-hel", + xor=["nmi", "mi", "lpc", "lpa", "pear"], + ), in_file=dict( - argstr="-source %s", copyfile=False, extensions=None, mandatory=True - ), - inilev=dict(argstr="-inilev %d", xor=["duplo"]), - iniwarp=dict(argstr="-iniwarp %s", xor=["duplo"]), - iwarp=dict(argstr="-iwarp", xor=["plusminus"]), - lpa=dict(argstr="-lpa", xor=["nmi", "mi", "lpc", "hel", "pear"]), - lpc=dict(argstr="-lpc", position=-2, xor=["nmi", "mi", "hel", "lpa", "pear"]), - maxlev=dict(argstr="-maxlev %d", position=-1, xor=["duplo"]), - mi=dict(argstr="-mi", xor=["mi", "hel", "lpc", "lpa", "pear"]), - minpatch=dict(argstr="-minpatch %d"), - nmi=dict(argstr="-nmi", xor=["nmi", "hel", "lpc", "lpa", "pear"]), - noXdis=dict(argstr="-noXdis"), - noYdis=dict(argstr="-noYdis"), - noZdis=dict(argstr="-noZdis"), - noneg=dict(argstr="-noneg"), - nopad=dict(argstr="-nopad"), - nopadWARP=dict(argstr="-nopadWARP", xor=["allsave", "expad"]), - nopenalty=dict(argstr="-nopenalty"), - nowarp=dict(argstr="-nowarp"), - noweight=dict(argstr="-noweight"), - num_threads=dict(nohash=True, usedefault=True), + argstr="-source %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + inilev=dict( + argstr="-inilev %d", + xor=["duplo"], + ), + iniwarp=dict( + argstr="-iniwarp %s", + xor=["duplo"], + ), + iwarp=dict( + argstr="-iwarp", + xor=["plusminus"], + ), + lpa=dict( + argstr="-lpa", + xor=["nmi", "mi", "lpc", "hel", "pear"], + ), + lpc=dict( + argstr="-lpc", + position=-2, + xor=["nmi", "mi", "hel", "lpa", "pear"], + ), + maxlev=dict( + argstr="-maxlev %d", + position=-1, + xor=["duplo"], + ), + mi=dict( + argstr="-mi", + xor=["mi", "hel", "lpc", "lpa", "pear"], + ), + minpatch=dict( + argstr="-minpatch %d", + ), + nmi=dict( + argstr="-nmi", + xor=["nmi", "hel", "lpc", "lpa", "pear"], + ), + noXdis=dict( + argstr="-noXdis", + ), + noYdis=dict( + argstr="-noYdis", + ), + noZdis=dict( + argstr="-noZdis", + ), + noneg=dict( + argstr="-noneg", + ), + nopad=dict( + argstr="-nopad", + ), + nopadWARP=dict( + argstr="-nopadWARP", + xor=["allsave", "expad"], + ), + nopenalty=dict( + argstr="-nopenalty", + ), + nowarp=dict( + argstr="-nowarp", + ), + noweight=dict( + argstr="-noweight", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, name_source=["in_file"], name_template="ppp_%s", ), - out_weight_file=dict(argstr="-wtprefix %s", extensions=None), + out_weight_file=dict( + argstr="-wtprefix %s", + extensions=None, + ), outputtype=dict(), - overwrite=dict(argstr="-overwrite"), - pblur=dict(argstr="-pblur %s"), - pear=dict(argstr="-pear"), - penfac=dict(argstr="-penfac %f"), - plusminus=dict(argstr="-plusminus", xor=["duplo", "allsave", "iwarp"]), - quiet=dict(argstr="-quiet", xor=["verb"]), - resample=dict(argstr="-resample"), - verb=dict(argstr="-verb", xor=["quiet"]), - wball=dict(argstr="-wball %s", xor=["wmask"]), - weight=dict(argstr="-weight %s", extensions=None), - wmask=dict(argstr="-wpass %s %f", xor=["wball"]), - workhard=dict(argstr="-workhard", xor=["boxopt", "ballopt"]), + overwrite=dict( + argstr="-overwrite", + ), + pblur=dict( + argstr="-pblur %s", + ), + pear=dict( + argstr="-pear", + ), + penfac=dict( + argstr="-penfac %f", + ), + plusminus=dict( + argstr="-plusminus", + xor=["duplo", "allsave", "iwarp"], + ), + quiet=dict( + argstr="-quiet", + xor=["verb"], + ), + resample=dict( + argstr="-resample", + ), + verb=dict( + argstr="-verb", + xor=["quiet"], + ), + wball=dict( + argstr="-wball %s", + xor=["wmask"], + ), + weight=dict( + argstr="-weight %s", + extensions=None, + ), + wmask=dict( + argstr="-wpass %s %f", + xor=["wball"], + ), + workhard=dict( + argstr="-workhard", + xor=["boxopt", "ballopt"], + ), ) inputs = Qwarp.input_spec() @@ -82,11 +207,21 @@ def test_Qwarp_inputs(): def test_Qwarp_outputs(): output_map = dict( - base_warp=dict(extensions=None), - source_warp=dict(extensions=None), - warped_base=dict(extensions=None), - warped_source=dict(extensions=None), - weights=dict(extensions=None), + base_warp=dict( + extensions=None, + ), + source_warp=dict( + extensions=None, + ), + warped_base=dict( + extensions=None, + ), + warped_source=dict( + extensions=None, + ), + weights=dict( + extensions=None, + ), ) outputs = Qwarp.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py index 9ef9bf1b4b..4f386ab63b 100644 --- a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py +++ b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py @@ -4,71 +4,181 @@ def test_QwarpPlusMinus_inputs(): input_map = dict( - Qfinal=dict(argstr="-Qfinal"), - Qonly=dict(argstr="-Qonly"), - allineate=dict(argstr="-allineate"), - allineate_opts=dict(argstr="-allineate_opts %s", requires=["allineate"]), - allsave=dict(argstr="-allsave", xor=["nopadWARP", "duplo", "plusminus"]), - args=dict(argstr="%s"), - ballopt=dict(argstr="-ballopt", xor=["workhard", "boxopt"]), + Qfinal=dict( + argstr="-Qfinal", + ), + Qonly=dict( + argstr="-Qonly", + ), + allineate=dict( + argstr="-allineate", + ), + allineate_opts=dict( + argstr="-allineate_opts %s", + requires=["allineate"], + ), + allsave=dict( + argstr="-allsave", + xor=["nopadWARP", "duplo", "plusminus"], + ), + args=dict( + argstr="%s", + ), + ballopt=dict( + argstr="-ballopt", + xor=["workhard", "boxopt"], + ), base_file=dict( - argstr="-base %s", copyfile=False, extensions=None, mandatory=True + argstr="-base %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + baxopt=dict( + argstr="-boxopt", + xor=["workhard", "ballopt"], + ), + blur=dict( + argstr="-blur %s", ), - baxopt=dict(argstr="-boxopt", xor=["workhard", "ballopt"]), - blur=dict(argstr="-blur %s"), duplo=dict( argstr="-duplo", xor=["gridlist", "maxlev", "inilev", "iniwarp", "plusminus", "allsave"], ), - emask=dict(argstr="-emask %s", copyfile=False, extensions=None), - environ=dict(nohash=True, usedefault=True), - expad=dict(argstr="-expad %d", xor=["nopadWARP"]), + emask=dict( + argstr="-emask %s", + copyfile=False, + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expad=dict( + argstr="-expad %d", + xor=["nopadWARP"], + ), gridlist=dict( argstr="-gridlist %s", copyfile=False, extensions=None, xor=["duplo", "plusminus"], ), - hel=dict(argstr="-hel", xor=["nmi", "mi", "lpc", "lpa", "pear"]), + hel=dict( + argstr="-hel", + xor=["nmi", "mi", "lpc", "lpa", "pear"], + ), in_file=dict( - argstr="-source %s", copyfile=False, extensions=None, mandatory=True - ), - inilev=dict(argstr="-inilev %d", xor=["duplo"]), - iniwarp=dict(argstr="-iniwarp %s", xor=["duplo"]), - iwarp=dict(argstr="-iwarp", xor=["plusminus"]), - lpa=dict(argstr="-lpa", xor=["nmi", "mi", "lpc", "hel", "pear"]), - lpc=dict(argstr="-lpc", position=-2, xor=["nmi", "mi", "hel", "lpa", "pear"]), - maxlev=dict(argstr="-maxlev %d", position=-1, xor=["duplo"]), - mi=dict(argstr="-mi", xor=["mi", "hel", "lpc", "lpa", "pear"]), - minpatch=dict(argstr="-minpatch %d"), - nmi=dict(argstr="-nmi", xor=["nmi", "hel", "lpc", "lpa", "pear"]), - noXdis=dict(argstr="-noXdis"), - noYdis=dict(argstr="-noYdis"), - noZdis=dict(argstr="-noZdis"), - noneg=dict(argstr="-noneg"), - nopad=dict(argstr="-nopad"), - nopadWARP=dict(argstr="-nopadWARP", xor=["allsave", "expad"]), - nopenalty=dict(argstr="-nopenalty"), - nowarp=dict(argstr="-nowarp"), - noweight=dict(argstr="-noweight"), - num_threads=dict(nohash=True, usedefault=True), + argstr="-source %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + inilev=dict( + argstr="-inilev %d", + xor=["duplo"], + ), + iniwarp=dict( + argstr="-iniwarp %s", + xor=["duplo"], + ), + iwarp=dict( + argstr="-iwarp", + xor=["plusminus"], + ), + lpa=dict( + argstr="-lpa", + xor=["nmi", "mi", "lpc", "hel", "pear"], + ), + lpc=dict( + argstr="-lpc", + position=-2, + xor=["nmi", "mi", "hel", "lpa", "pear"], + ), + maxlev=dict( + argstr="-maxlev %d", + position=-1, + xor=["duplo"], + ), + mi=dict( + argstr="-mi", + xor=["mi", "hel", "lpc", "lpa", "pear"], + ), + minpatch=dict( + argstr="-minpatch %d", + ), + nmi=dict( + argstr="-nmi", + xor=["nmi", "hel", "lpc", "lpa", "pear"], + ), + noXdis=dict( + argstr="-noXdis", + ), + noYdis=dict( + argstr="-noYdis", + ), + noZdis=dict( + argstr="-noZdis", + ), + noneg=dict( + argstr="-noneg", + ), + nopad=dict( + argstr="-nopad", + ), + nopadWARP=dict( + argstr="-nopadWARP", + xor=["allsave", "expad"], + ), + nopenalty=dict( + argstr="-nopenalty", + ), + nowarp=dict( + argstr="-nowarp", + ), + noweight=dict( + argstr="-noweight", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( - argstr="-prefix %s", extensions=None, position=0, usedefault=True + argstr="-prefix %s", + extensions=None, + position=0, + usedefault=True, + ), + out_weight_file=dict( + argstr="-wtprefix %s", + extensions=None, ), - out_weight_file=dict(argstr="-wtprefix %s", extensions=None), outputtype=dict(), - overwrite=dict(argstr="-overwrite"), - pblur=dict(argstr="-pblur %s"), - pear=dict(argstr="-pear"), - penfac=dict(argstr="-penfac %f"), + overwrite=dict( + argstr="-overwrite", + ), + pblur=dict( + argstr="-pblur %s", + ), + pear=dict( + argstr="-pear", + ), + penfac=dict( + argstr="-penfac %f", + ), plusminus=dict( argstr="-plusminus", position=1, usedefault=True, xor=["duplo", "allsave", "iwarp"], ), - quiet=dict(argstr="-quiet", xor=["verb"]), - resample=dict(argstr="-resample"), + quiet=dict( + argstr="-quiet", + xor=["verb"], + ), + resample=dict( + argstr="-resample", + ), source_file=dict( argstr="-source %s", copyfile=False, @@ -76,11 +186,26 @@ def test_QwarpPlusMinus_inputs(): extensions=None, new_name="in_file", ), - verb=dict(argstr="-verb", xor=["quiet"]), - wball=dict(argstr="-wball %s", xor=["wmask"]), - weight=dict(argstr="-weight %s", extensions=None), - wmask=dict(argstr="-wpass %s %f", xor=["wball"]), - workhard=dict(argstr="-workhard", xor=["boxopt", "ballopt"]), + verb=dict( + argstr="-verb", + xor=["quiet"], + ), + wball=dict( + argstr="-wball %s", + xor=["wmask"], + ), + weight=dict( + argstr="-weight %s", + extensions=None, + ), + wmask=dict( + argstr="-wpass %s %f", + xor=["wball"], + ), + workhard=dict( + argstr="-workhard", + xor=["boxopt", "ballopt"], + ), ) inputs = QwarpPlusMinus.input_spec() @@ -91,11 +216,21 @@ def test_QwarpPlusMinus_inputs(): def test_QwarpPlusMinus_outputs(): output_map = dict( - base_warp=dict(extensions=None), - source_warp=dict(extensions=None), - warped_base=dict(extensions=None), - warped_source=dict(extensions=None), - weights=dict(extensions=None), + base_warp=dict( + extensions=None, + ), + source_warp=dict( + extensions=None, + ), + warped_base=dict( + extensions=None, + ), + warped_source=dict( + extensions=None, + ), + weights=dict( + extensions=None, + ), ) outputs = QwarpPlusMinus.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_ROIStats.py b/nipype/interfaces/afni/tests/test_auto_ROIStats.py index c2bea7fbbc..36e7546990 100644 --- a/nipype/interfaces/afni/tests/test_auto_ROIStats.py +++ b/nipype/interfaces/afni/tests/test_auto_ROIStats.py @@ -4,12 +4,30 @@ def test_ROIStats_inputs(): input_map = dict( - args=dict(argstr="%s"), - debug=dict(argstr="-debug"), - environ=dict(nohash=True, usedefault=True), - format1D=dict(argstr="-1Dformat", xor=["format1DR"]), - format1DR=dict(argstr="-1DRformat", xor=["format1D"]), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + format1D=dict( + argstr="-1Dformat", + xor=["format1DR"], + ), + format1DR=dict( + argstr="-1DRformat", + xor=["format1D"], + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), mask=dict( argstr="-mask %s", deprecated="1.1.4", @@ -17,11 +35,22 @@ def test_ROIStats_inputs(): new_name="mask_file", position=3, ), - mask_f2short=dict(argstr="-mask_f2short"), - mask_file=dict(argstr="-mask %s", extensions=None), - nobriklab=dict(argstr="-nobriklab"), - nomeanout=dict(argstr="-nomeanout"), - num_roi=dict(argstr="-numroi %s"), + mask_f2short=dict( + argstr="-mask_f2short", + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + nobriklab=dict( + argstr="-nobriklab", + ), + nomeanout=dict( + argstr="-nomeanout", + ), + num_roi=dict( + argstr="-numroi %s", + ), out_file=dict( argstr="> %s", extensions=None, @@ -30,10 +59,20 @@ def test_ROIStats_inputs(): name_template="%s_roistat.1D", position=-1, ), - quiet=dict(argstr="-quiet"), - roisel=dict(argstr="-roisel %s", extensions=None), - stat=dict(argstr="%s..."), - zerofill=dict(argstr="-zerofill %s", requires=["num_roi"]), + quiet=dict( + argstr="-quiet", + ), + roisel=dict( + argstr="-roisel %s", + extensions=None, + ), + stat=dict( + argstr="%s...", + ), + zerofill=dict( + argstr="-zerofill %s", + requires=["num_roi"], + ), ) inputs = ROIStats.input_spec() @@ -43,7 +82,11 @@ def test_ROIStats_inputs(): def test_ROIStats_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ROIStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ReHo.py b/nipype/interfaces/afni/tests/test_auto_ReHo.py index 695770ca1b..f9a1993ce1 100644 --- a/nipype/interfaces/afni/tests/test_auto_ReHo.py +++ b/nipype/interfaces/afni/tests/test_auto_ReHo.py @@ -4,16 +4,38 @@ def test_ReHo_inputs(): input_map = dict( - args=dict(argstr="%s"), - chi_sq=dict(argstr="-chi_sq"), + args=dict( + argstr="%s", + ), + chi_sq=dict( + argstr="-chi_sq", + ), ellipsoid=dict( - argstr="-neigh_X %s -neigh_Y %s -neigh_Z %s", xor=["sphere", "neighborhood"] + argstr="-neigh_X %s -neigh_Y %s -neigh_Z %s", + xor=["sphere", "neighborhood"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-inset %s", + extensions=None, + mandatory=True, + position=1, + ), + label_set=dict( + argstr="-in_rois %s", + extensions=None, + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + neighborhood=dict( + argstr="-nneigh %s", + xor=["sphere", "ellipsoid"], ), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-inset %s", extensions=None, mandatory=True, position=1), - label_set=dict(argstr="-in_rois %s", extensions=None), - mask_file=dict(argstr="-mask %s", extensions=None), - neighborhood=dict(argstr="-nneigh %s", xor=["sphere", "ellipsoid"]), out_file=dict( argstr="-prefix %s", extensions=None, @@ -22,8 +44,13 @@ def test_ReHo_inputs(): name_template="%s_reho", position=0, ), - overwrite=dict(argstr="-overwrite"), - sphere=dict(argstr="-neigh_RAD %s", xor=["neighborhood", "ellipsoid"]), + overwrite=dict( + argstr="-overwrite", + ), + sphere=dict( + argstr="-neigh_RAD %s", + xor=["neighborhood", "ellipsoid"], + ), ) inputs = ReHo.input_spec() @@ -33,7 +60,14 @@ def test_ReHo_inputs(): def test_ReHo_outputs(): - output_map = dict(out_file=dict(extensions=None), out_vals=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + out_vals=dict( + extensions=None, + ), + ) outputs = ReHo.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Refit.py b/nipype/interfaces/afni/tests/test_auto_Refit.py index d04433e85e..205f7e0190 100644 --- a/nipype/interfaces/afni/tests/test_auto_Refit.py +++ b/nipype/interfaces/afni/tests/test_auto_Refit.py @@ -4,27 +4,69 @@ def test_Refit_inputs(): input_map = dict( - args=dict(argstr="%s"), - atrcopy=dict(argstr="-atrcopy %s %s"), - atrfloat=dict(argstr="-atrfloat %s %s"), - atrint=dict(argstr="-atrint %s %s"), - atrstring=dict(argstr="-atrstring %s %s"), - deoblique=dict(argstr="-deoblique"), - duporigin_file=dict(argstr="-duporigin %s", extensions=None), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + atrcopy=dict( + argstr="-atrcopy %s %s", + ), + atrfloat=dict( + argstr="-atrfloat %s %s", + ), + atrint=dict( + argstr="-atrint %s %s", + ), + atrstring=dict( + argstr="-atrstring %s %s", + ), + deoblique=dict( + argstr="-deoblique", + ), + duporigin_file=dict( + argstr="-duporigin %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-1 - ), - nosaveatr=dict(argstr="-nosaveatr"), - saveatr=dict(argstr="-saveatr"), - space=dict(argstr="-space %s"), - xdel=dict(argstr="-xdel %f"), - xorigin=dict(argstr="-xorigin %s"), - xyzscale=dict(argstr="-xyzscale %f"), - ydel=dict(argstr="-ydel %f"), - yorigin=dict(argstr="-yorigin %s"), - zdel=dict(argstr="-zdel %f"), - zorigin=dict(argstr="-zorigin %s"), + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-1, + ), + nosaveatr=dict( + argstr="-nosaveatr", + ), + saveatr=dict( + argstr="-saveatr", + ), + space=dict( + argstr="-space %s", + ), + xdel=dict( + argstr="-xdel %f", + ), + xorigin=dict( + argstr="-xorigin %s", + ), + xyzscale=dict( + argstr="-xyzscale %f", + ), + ydel=dict( + argstr="-ydel %f", + ), + yorigin=dict( + argstr="-yorigin %s", + ), + zdel=dict( + argstr="-zdel %f", + ), + zorigin=dict( + argstr="-zorigin %s", + ), ) inputs = Refit.input_spec() @@ -34,7 +76,11 @@ def test_Refit_inputs(): def test_Refit_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Refit.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Remlfit.py b/nipype/interfaces/afni/tests/test_auto_Remlfit.py index 4a1fcfad00..cfffeeb40e 100644 --- a/nipype/interfaces/afni/tests/test_auto_Remlfit.py +++ b/nipype/interfaces/afni/tests/test_auto_Remlfit.py @@ -4,46 +4,156 @@ def test_Remlfit_inputs(): input_map = dict( - STATmask=dict(argstr="-STATmask %s", extensions=None), - addbase=dict(argstr="-addbase %s", copyfile=False, sep=" "), - args=dict(argstr="%s"), - automask=dict(argstr="-automask", usedefault=True), - dsort=dict(argstr="-dsort %s", copyfile=False, extensions=None), - dsort_nods=dict(argstr="-dsort_nods", requires=["dsort"]), - environ=dict(nohash=True, usedefault=True), - errts_file=dict(argstr="-Rerrts %s", extensions=None), - fitts_file=dict(argstr="-Rfitts %s", extensions=None), - fout=dict(argstr="-fout"), - glt_file=dict(argstr="-Rglt %s", extensions=None), - gltsym=dict(argstr='-gltsym "%s" %s...'), - goforit=dict(argstr="-GOFORIT"), - in_files=dict(argstr='-input "%s"', copyfile=False, mandatory=True, sep=" "), - mask=dict(argstr="-mask %s", extensions=None), - matim=dict(argstr="-matim %s", extensions=None, xor=["matrix"]), - matrix=dict(argstr="-matrix %s", extensions=None, mandatory=True), - nobout=dict(argstr="-nobout"), - nodmbase=dict(argstr="-nodmbase", requires=["addbase", "dsort"]), - nofdr=dict(argstr="-noFDR"), - num_threads=dict(nohash=True, usedefault=True), - obeta=dict(argstr="-Obeta %s", extensions=None), - obuck=dict(argstr="-Obuck %s", extensions=None), - oerrts=dict(argstr="-Oerrts %s", extensions=None), - ofitts=dict(argstr="-Ofitts %s", extensions=None), - oglt=dict(argstr="-Oglt %s", extensions=None), - out_file=dict(argstr="-Rbuck %s", extensions=None), + STATmask=dict( + argstr="-STATmask %s", + extensions=None, + ), + addbase=dict( + argstr="-addbase %s", + copyfile=False, + sep=" ", + ), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + usedefault=True, + ), + dsort=dict( + argstr="-dsort %s", + copyfile=False, + extensions=None, + ), + dsort_nods=dict( + argstr="-dsort_nods", + requires=["dsort"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + errts_file=dict( + argstr="-Rerrts %s", + extensions=None, + ), + fitts_file=dict( + argstr="-Rfitts %s", + extensions=None, + ), + fout=dict( + argstr="-fout", + ), + glt_file=dict( + argstr="-Rglt %s", + extensions=None, + ), + gltsym=dict( + argstr='-gltsym "%s" %s...', + ), + goforit=dict( + argstr="-GOFORIT", + ), + in_files=dict( + argstr='-input "%s"', + copyfile=False, + mandatory=True, + sep=" ", + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + matim=dict( + argstr="-matim %s", + extensions=None, + xor=["matrix"], + ), + matrix=dict( + argstr="-matrix %s", + extensions=None, + mandatory=True, + ), + nobout=dict( + argstr="-nobout", + ), + nodmbase=dict( + argstr="-nodmbase", + requires=["addbase", "dsort"], + ), + nofdr=dict( + argstr="-noFDR", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + obeta=dict( + argstr="-Obeta %s", + extensions=None, + ), + obuck=dict( + argstr="-Obuck %s", + extensions=None, + ), + oerrts=dict( + argstr="-Oerrts %s", + extensions=None, + ), + ofitts=dict( + argstr="-Ofitts %s", + extensions=None, + ), + oglt=dict( + argstr="-Oglt %s", + extensions=None, + ), + out_file=dict( + argstr="-Rbuck %s", + extensions=None, + ), outputtype=dict(), - ovar=dict(argstr="-Ovar %s", extensions=None), - polort=dict(argstr="-polort %d", xor=["matrix"]), - quiet=dict(argstr="-quiet"), - rbeta_file=dict(argstr="-Rbeta %s", extensions=None), - rout=dict(argstr="-rout"), - slibase=dict(argstr="-slibase %s"), - slibase_sm=dict(argstr="-slibase_sm %s"), - tout=dict(argstr="-tout"), - usetemp=dict(argstr="-usetemp"), - var_file=dict(argstr="-Rvar %s", extensions=None), - verb=dict(argstr="-verb"), - wherr_file=dict(argstr="-Rwherr %s", extensions=None), + ovar=dict( + argstr="-Ovar %s", + extensions=None, + ), + polort=dict( + argstr="-polort %d", + xor=["matrix"], + ), + quiet=dict( + argstr="-quiet", + ), + rbeta_file=dict( + argstr="-Rbeta %s", + extensions=None, + ), + rout=dict( + argstr="-rout", + ), + slibase=dict( + argstr="-slibase %s", + ), + slibase_sm=dict( + argstr="-slibase_sm %s", + ), + tout=dict( + argstr="-tout", + ), + usetemp=dict( + argstr="-usetemp", + ), + var_file=dict( + argstr="-Rvar %s", + extensions=None, + ), + verb=dict( + argstr="-verb", + ), + wherr_file=dict( + argstr="-Rwherr %s", + extensions=None, + ), ) inputs = Remlfit.input_spec() @@ -54,19 +164,45 @@ def test_Remlfit_inputs(): def test_Remlfit_outputs(): output_map = dict( - errts_file=dict(extensions=None), - fitts_file=dict(extensions=None), - glt_file=dict(extensions=None), - obeta=dict(extensions=None), - obuck=dict(extensions=None), - oerrts=dict(extensions=None), - ofitts=dict(extensions=None), - oglt=dict(extensions=None), - out_file=dict(extensions=None), - ovar=dict(extensions=None), - rbeta_file=dict(extensions=None), - var_file=dict(extensions=None), - wherr_file=dict(extensions=None), + errts_file=dict( + extensions=None, + ), + fitts_file=dict( + extensions=None, + ), + glt_file=dict( + extensions=None, + ), + obeta=dict( + extensions=None, + ), + obuck=dict( + extensions=None, + ), + oerrts=dict( + extensions=None, + ), + ofitts=dict( + extensions=None, + ), + oglt=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + ovar=dict( + extensions=None, + ), + rbeta_file=dict( + extensions=None, + ), + var_file=dict( + extensions=None, + ), + wherr_file=dict( + extensions=None, + ), ) outputs = Remlfit.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Resample.py b/nipype/interfaces/afni/tests/test_auto_Resample.py index 3aef4f5b9b..792c03aa9f 100644 --- a/nipype/interfaces/afni/tests/test_auto_Resample.py +++ b/nipype/interfaces/afni/tests/test_auto_Resample.py @@ -4,8 +4,13 @@ def test_Resample_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( argstr="-inset %s", copyfile=False, @@ -13,9 +18,17 @@ def test_Resample_inputs(): mandatory=True, position=-1, ), - master=dict(argstr="-master %s", extensions=None), - num_threads=dict(nohash=True, usedefault=True), - orientation=dict(argstr="-orient %s"), + master=dict( + argstr="-master %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + orientation=dict( + argstr="-orient %s", + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -23,8 +36,12 @@ def test_Resample_inputs(): name_template="%s_resample", ), outputtype=dict(), - resample_mode=dict(argstr="-rmode %s"), - voxel_size=dict(argstr="-dxyz %f %f %f"), + resample_mode=dict( + argstr="-rmode %s", + ), + voxel_size=dict( + argstr="-dxyz %f %f %f", + ), ) inputs = Resample.input_spec() @@ -34,7 +51,11 @@ def test_Resample_inputs(): def test_Resample_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Retroicor.py b/nipype/interfaces/afni/tests/test_auto_Retroicor.py index 309212634d..03039a291f 100644 --- a/nipype/interfaces/afni/tests/test_auto_Retroicor.py +++ b/nipype/interfaces/afni/tests/test_auto_Retroicor.py @@ -4,17 +4,39 @@ def test_Retroicor_inputs(): input_map = dict( - args=dict(argstr="%s"), - card=dict(argstr="-card %s", extensions=None, position=-2), + args=dict( + argstr="%s", + ), + card=dict( + argstr="-card %s", + extensions=None, + position=-2, + ), cardphase=dict( - argstr="-cardphase %s", extensions=None, hash_files=False, position=-6 + argstr="-cardphase %s", + extensions=None, + hash_files=False, + position=-6, + ), + environ=dict( + nohash=True, + usedefault=True, ), - environ=dict(nohash=True, usedefault=True), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + order=dict( + argstr="-order %s", + position=-5, ), - num_threads=dict(nohash=True, usedefault=True), - order=dict(argstr="-order %s", position=-5), out_file=dict( argstr="-prefix %s", extensions=None, @@ -23,11 +45,21 @@ def test_Retroicor_inputs(): position=1, ), outputtype=dict(), - resp=dict(argstr="-resp %s", extensions=None, position=-3), + resp=dict( + argstr="-resp %s", + extensions=None, + position=-3, + ), respphase=dict( - argstr="-respphase %s", extensions=None, hash_files=False, position=-7 + argstr="-respphase %s", + extensions=None, + hash_files=False, + position=-7, + ), + threshold=dict( + argstr="-threshold %d", + position=-4, ), - threshold=dict(argstr="-threshold %d", position=-4), ) inputs = Retroicor.input_spec() @@ -37,7 +69,11 @@ def test_Retroicor_inputs(): def test_Retroicor_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Retroicor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTest.py b/nipype/interfaces/afni/tests/test_auto_SVMTest.py index 5f9f3d930f..665a4a6156 100644 --- a/nipype/interfaces/afni/tests/test_auto_SVMTest.py +++ b/nipype/interfaces/afni/tests/test_auto_SVMTest.py @@ -4,21 +4,51 @@ def test_SVMTest_inputs(): input_map = dict( - args=dict(argstr="%s"), - classout=dict(argstr="-classout"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-testvol %s", extensions=None, mandatory=True), - model=dict(argstr="-model %s", mandatory=True), - multiclass=dict(argstr="-multiclass %s"), - nodetrend=dict(argstr="-nodetrend"), - nopredcensord=dict(argstr="-nopredcensord"), - num_threads=dict(nohash=True, usedefault=True), - options=dict(argstr="%s"), + args=dict( + argstr="%s", + ), + classout=dict( + argstr="-classout", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-testvol %s", + extensions=None, + mandatory=True, + ), + model=dict( + argstr="-model %s", + mandatory=True, + ), + multiclass=dict( + argstr="-multiclass %s", + ), + nodetrend=dict( + argstr="-nodetrend", + ), + nopredcensord=dict( + argstr="-nopredcensord", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + options=dict( + argstr="%s", + ), out_file=dict( - argstr="-predictions %s", extensions=None, name_template="%s_predictions" + argstr="-predictions %s", + extensions=None, + name_template="%s_predictions", ), outputtype=dict(), - testlabels=dict(argstr="-testlabels %s", extensions=None), + testlabels=dict( + argstr="-testlabels %s", + extensions=None, + ), ) inputs = SVMTest.input_spec() @@ -28,7 +58,11 @@ def test_SVMTest_inputs(): def test_SVMTest_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SVMTest.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTrain.py b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py index d7dbaf8628..f79bf1b9ac 100644 --- a/nipype/interfaces/afni/tests/test_auto_SVMTrain.py +++ b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py @@ -11,15 +11,35 @@ def test_SVMTrain_inputs(): name_template="%s_alphas", suffix="_alphas", ), - args=dict(argstr="%s"), - censor=dict(argstr="-censor %s", extensions=None), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + censor=dict( + argstr="-censor %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="-trainvol %s", copyfile=False, extensions=None, mandatory=True + argstr="-trainvol %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + kernel=dict( + argstr="-kernel %s", + ), + mask=dict( + argstr="-mask %s", + copyfile=False, + extensions=None, + position=-1, + ), + max_iterations=dict( + argstr="-max_iterations %d", ), - kernel=dict(argstr="-kernel %s"), - mask=dict(argstr="-mask %s", copyfile=False, extensions=None, position=-1), - max_iterations=dict(argstr="-max_iterations %d"), model=dict( argstr="-model %s", extensions=None, @@ -27,9 +47,16 @@ def test_SVMTrain_inputs(): name_template="%s_model", suffix="_model", ), - nomodelmask=dict(argstr="-nomodelmask"), - num_threads=dict(nohash=True, usedefault=True), - options=dict(argstr="%s"), + nomodelmask=dict( + argstr="-nomodelmask", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + options=dict( + argstr="%s", + ), out_file=dict( argstr="-bucket %s", extensions=None, @@ -38,9 +65,17 @@ def test_SVMTrain_inputs(): suffix="_bucket", ), outputtype=dict(), - trainlabels=dict(argstr="-trainlabels %s", extensions=None), - ttype=dict(argstr="-type %s", mandatory=True), - w_out=dict(argstr="-wout"), + trainlabels=dict( + argstr="-trainlabels %s", + extensions=None, + ), + ttype=dict( + argstr="-type %s", + mandatory=True, + ), + w_out=dict( + argstr="-wout", + ), ) inputs = SVMTrain.input_spec() @@ -51,9 +86,15 @@ def test_SVMTrain_inputs(): def test_SVMTrain_outputs(): output_map = dict( - alphas=dict(extensions=None), - model=dict(extensions=None), - out_file=dict(extensions=None), + alphas=dict( + extensions=None, + ), + model=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = SVMTrain.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Seg.py b/nipype/interfaces/afni/tests/test_auto_Seg.py index 18db24ac48..38b256d1ea 100644 --- a/nipype/interfaces/afni/tests/test_auto_Seg.py +++ b/nipype/interfaces/afni/tests/test_auto_Seg.py @@ -4,13 +4,28 @@ def test_Seg_inputs(): input_map = dict( - args=dict(argstr="%s"), - bias_classes=dict(argstr="-bias_classes %s"), - bias_fwhm=dict(argstr="-bias_fwhm %f"), - blur_meth=dict(argstr="-blur_meth %s"), - bmrf=dict(argstr="-bmrf %f"), - classes=dict(argstr="-classes %s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + bias_classes=dict( + argstr="-bias_classes %s", + ), + bias_fwhm=dict( + argstr="-bias_fwhm %f", + ), + blur_meth=dict( + argstr="-blur_meth %s", + ), + bmrf=dict( + argstr="-bmrf %f", + ), + classes=dict( + argstr="-classes %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( argstr="-anat %s", copyfile=True, @@ -18,11 +33,23 @@ def test_Seg_inputs(): mandatory=True, position=-1, ), - main_N=dict(argstr="-main_N %d"), - mask=dict(argstr="-mask %s", mandatory=True, position=-2), - mixfloor=dict(argstr="-mixfloor %f"), - mixfrac=dict(argstr="-mixfrac %s"), - prefix=dict(argstr="-prefix %s"), + main_N=dict( + argstr="-main_N %d", + ), + mask=dict( + argstr="-mask %s", + mandatory=True, + position=-2, + ), + mixfloor=dict( + argstr="-mixfloor %f", + ), + mixfrac=dict( + argstr="-mixfrac %s", + ), + prefix=dict( + argstr="-prefix %s", + ), ) inputs = Seg.input_spec() @@ -32,7 +59,11 @@ def test_Seg_inputs(): def test_Seg_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Seg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_SkullStrip.py b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py index 23722f6995..4f78254e47 100644 --- a/nipype/interfaces/afni/tests/test_auto_SkullStrip.py +++ b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py @@ -4,8 +4,13 @@ def test_SkullStrip_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( argstr="-input %s", copyfile=False, @@ -13,7 +18,10 @@ def test_SkullStrip_inputs(): mandatory=True, position=1, ), - num_threads=dict(nohash=True, usedefault=True), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -30,7 +38,11 @@ def test_SkullStrip_inputs(): def test_SkullStrip_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SkullStrip.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Synthesize.py b/nipype/interfaces/afni/tests/test_auto_Synthesize.py index 40acf54a6f..9f787edbfc 100644 --- a/nipype/interfaces/afni/tests/test_auto_Synthesize.py +++ b/nipype/interfaces/afni/tests/test_auto_Synthesize.py @@ -4,21 +4,48 @@ def test_Synthesize_inputs(): input_map = dict( - TR=dict(argstr="-TR %f"), - args=dict(argstr="%s"), + TR=dict( + argstr="-TR %f", + ), + args=dict( + argstr="%s", + ), cbucket=dict( - argstr="-cbucket %s", copyfile=False, extensions=None, mandatory=True + argstr="-cbucket %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + cenfill=dict( + argstr="-cenfill %s", + ), + dry_run=dict( + argstr="-dry", + ), + environ=dict( + nohash=True, + usedefault=True, ), - cenfill=dict(argstr="-cenfill %s"), - dry_run=dict(argstr="-dry"), - environ=dict(nohash=True, usedefault=True), matrix=dict( - argstr="-matrix %s", copyfile=False, extensions=None, mandatory=True + argstr="-matrix %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="-prefix %s", + extensions=None, + name_template="syn", ), - num_threads=dict(nohash=True, usedefault=True), - out_file=dict(argstr="-prefix %s", extensions=None, name_template="syn"), outputtype=dict(), - select=dict(argstr="-select %s", mandatory=True), + select=dict( + argstr="-select %s", + mandatory=True, + ), ) inputs = Synthesize.input_spec() @@ -28,7 +55,11 @@ def test_Synthesize_inputs(): def test_Synthesize_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Synthesize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCat.py b/nipype/interfaces/afni/tests/test_auto_TCat.py index d0b4595007..595e91383e 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCat.py +++ b/nipype/interfaces/afni/tests/test_auto_TCat.py @@ -4,10 +4,23 @@ def test_TCat_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_files=dict(argstr=" %s", copyfile=False, mandatory=True, position=-1), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr=" %s", + copyfile=False, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -15,8 +28,13 @@ def test_TCat_inputs(): name_template="%s_tcat", ), outputtype=dict(), - rlt=dict(argstr="-rlt%s", position=1), - verbose=dict(argstr="-verb"), + rlt=dict( + argstr="-rlt%s", + position=1, + ), + verbose=dict( + argstr="-verb", + ), ) inputs = TCat.input_spec() @@ -26,7 +44,11 @@ def test_TCat_inputs(): def test_TCat_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TCat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py index 17be155fd1..728d281d27 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py +++ b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py @@ -4,13 +4,33 @@ def test_TCatSubBrick_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_files=dict(argstr="%s%s ...", copyfile=False, mandatory=True, position=-1), - num_threads=dict(nohash=True, usedefault=True), - out_file=dict(argstr="-prefix %s", extensions=None, genfile=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s%s ...", + copyfile=False, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="-prefix %s", + extensions=None, + genfile=True, + ), outputtype=dict(), - rlt=dict(argstr="-rlt%s", position=1), + rlt=dict( + argstr="-rlt%s", + position=1, + ), ) inputs = TCatSubBrick.input_spec() @@ -20,7 +40,11 @@ def test_TCatSubBrick_inputs(): def test_TCatSubBrick_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TCatSubBrick.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCorr1D.py b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py index d35de852ab..665a0dfc3d 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorr1D.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py @@ -4,12 +4,22 @@ def test_TCorr1D_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), ktaub=dict( - argstr=" -ktaub", position=1, xor=["pearson", "spearman", "quadrant"] + argstr=" -ktaub", + position=1, + xor=["pearson", "spearman", "quadrant"], + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -19,18 +29,33 @@ def test_TCorr1D_inputs(): ), outputtype=dict(), pearson=dict( - argstr=" -pearson", position=1, xor=["spearman", "quadrant", "ktaub"] + argstr=" -pearson", + position=1, + xor=["spearman", "quadrant", "ktaub"], ), quadrant=dict( - argstr=" -quadrant", position=1, xor=["pearson", "spearman", "ktaub"] + argstr=" -quadrant", + position=1, + xor=["pearson", "spearman", "ktaub"], ), spearman=dict( - argstr=" -spearman", position=1, xor=["pearson", "quadrant", "ktaub"] + argstr=" -spearman", + position=1, + xor=["pearson", "quadrant", "ktaub"], ), xset=dict( - argstr=" %s", copyfile=False, extensions=None, mandatory=True, position=-2 + argstr=" %s", + copyfile=False, + extensions=None, + mandatory=True, + position=-2, + ), + y_1d=dict( + argstr=" %s", + extensions=None, + mandatory=True, + position=-1, ), - y_1d=dict(argstr=" %s", extensions=None, mandatory=True, position=-1), ) inputs = TCorr1D.input_spec() @@ -40,7 +65,11 @@ def test_TCorr1D_inputs(): def test_TCorr1D_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TCorr1D.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py index 7cbd2287c3..8e6b1860ff 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py @@ -15,8 +15,12 @@ def test_TCorrMap_inputs(): "var_absolute_threshold_normalize", ), ), - args=dict(argstr="%s"), - automask=dict(argstr="-automask"), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + ), average_expr=dict( argstr="-Aexpr %s %s", extensions=None, @@ -31,28 +35,54 @@ def test_TCorrMap_inputs(): suffix="_cexpr", xor=("average_expr", "average_expr_nonzero", "sum_expr"), ), - bandpass=dict(argstr="-bpass %f %f"), - blur_fwhm=dict(argstr="-Gblur %f"), + bandpass=dict( + argstr="-bpass %f %f", + ), + blur_fwhm=dict( + argstr="-Gblur %f", + ), correlation_maps=dict( - argstr="-CorrMap %s", extensions=None, name_source="in_file" + argstr="-CorrMap %s", + extensions=None, + name_source="in_file", ), correlation_maps_masked=dict( - argstr="-CorrMask %s", extensions=None, name_source="in_file" + argstr="-CorrMask %s", + extensions=None, + name_source="in_file", + ), + environ=dict( + nohash=True, + usedefault=True, ), - environ=dict(nohash=True, usedefault=True), expr=dict(), histogram=dict( - argstr="-Hist %d %s", extensions=None, name_source="in_file", suffix="_hist" + argstr="-Hist %d %s", + extensions=None, + name_source="in_file", + suffix="_hist", ), histogram_bin_numbers=dict(), in_file=dict( - argstr="-input %s", copyfile=False, extensions=None, mandatory=True + argstr="-input %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + mask=dict( + argstr="-mask %s", + extensions=None, ), - mask=dict(argstr="-mask %s", extensions=None), mean_file=dict( - argstr="-Mean %s", extensions=None, name_source="in_file", suffix="_mean" + argstr="-Mean %s", + extensions=None, + name_source="in_file", + suffix="_mean", + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -61,15 +91,33 @@ def test_TCorrMap_inputs(): ), outputtype=dict(), pmean=dict( - argstr="-Pmean %s", extensions=None, name_source="in_file", suffix="_pmean" + argstr="-Pmean %s", + extensions=None, + name_source="in_file", + suffix="_pmean", + ), + polort=dict( + argstr="-polort %d", ), - polort=dict(argstr="-polort %d"), qmean=dict( - argstr="-Qmean %s", extensions=None, name_source="in_file", suffix="_qmean" + argstr="-Qmean %s", + extensions=None, + name_source="in_file", + suffix="_qmean", + ), + regress_out_timeseries=dict( + argstr="-ort %s", + extensions=None, + ), + seeds=dict( + argstr="-seed %s", + extensions=None, + xor="seeds_width", + ), + seeds_width=dict( + argstr="-Mseed %f", + xor="seeds", ), - regress_out_timeseries=dict(argstr="-ort %s", extensions=None), - seeds=dict(argstr="-seed %s", extensions=None, xor="seeds_width"), - seeds_width=dict(argstr="-Mseed %f", xor="seeds"), sum_expr=dict( argstr="-Sexpr %s %s", extensions=None, @@ -101,7 +149,10 @@ def test_TCorrMap_inputs(): ), ), zmean=dict( - argstr="-Zmean %s", extensions=None, name_source="in_file", suffix="_zmean" + argstr="-Zmean %s", + extensions=None, + name_source="in_file", + suffix="_zmean", ), ) inputs = TCorrMap.input_spec() @@ -113,19 +164,45 @@ def test_TCorrMap_inputs(): def test_TCorrMap_outputs(): output_map = dict( - absolute_threshold=dict(extensions=None), - average_expr=dict(extensions=None), - average_expr_nonzero=dict(extensions=None), - correlation_maps=dict(extensions=None), - correlation_maps_masked=dict(extensions=None), - histogram=dict(extensions=None), - mean_file=dict(extensions=None), - pmean=dict(extensions=None), - qmean=dict(extensions=None), - sum_expr=dict(extensions=None), - var_absolute_threshold=dict(extensions=None), - var_absolute_threshold_normalize=dict(extensions=None), - zmean=dict(extensions=None), + absolute_threshold=dict( + extensions=None, + ), + average_expr=dict( + extensions=None, + ), + average_expr_nonzero=dict( + extensions=None, + ), + correlation_maps=dict( + extensions=None, + ), + correlation_maps_masked=dict( + extensions=None, + ), + histogram=dict( + extensions=None, + ), + mean_file=dict( + extensions=None, + ), + pmean=dict( + extensions=None, + ), + qmean=dict( + extensions=None, + ), + sum_expr=dict( + extensions=None, + ), + var_absolute_threshold=dict( + extensions=None, + ), + var_absolute_threshold_normalize=dict( + extensions=None, + ), + zmean=dict( + extensions=None, + ), ) outputs = TCorrMap.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrelate.py b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py index ce808378c1..1e85d44b68 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrelate.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py @@ -4,9 +4,17 @@ def test_TCorrelate_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -14,13 +22,25 @@ def test_TCorrelate_inputs(): name_template="%s_tcorr", ), outputtype=dict(), - pearson=dict(argstr="-pearson"), - polort=dict(argstr="-polort %d"), + pearson=dict( + argstr="-pearson", + ), + polort=dict( + argstr="-polort %d", + ), xset=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-2 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-2, ), yset=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, ), ) inputs = TCorrelate.input_spec() @@ -31,7 +51,11 @@ def test_TCorrelate_inputs(): def test_TCorrelate_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TCorrelate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TNorm.py b/nipype/interfaces/afni/tests/test_auto_TNorm.py index 2bba6711f3..975345d92a 100644 --- a/nipype/interfaces/afni/tests/test_auto_TNorm.py +++ b/nipype/interfaces/afni/tests/test_auto_TNorm.py @@ -4,17 +4,39 @@ def test_TNorm_inputs(): input_map = dict( - L1fit=dict(argstr="-L1fit"), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + L1fit=dict( + argstr="-L1fit", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + norm1=dict( + argstr="-norm1", + ), + norm2=dict( + argstr="-norm2", + ), + normR=dict( + argstr="-normR", + ), + normx=dict( + argstr="-normx", + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - norm1=dict(argstr="-norm1"), - norm2=dict(argstr="-norm2"), - normR=dict(argstr="-normR"), - normx=dict(argstr="-normx"), - num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -22,7 +44,9 @@ def test_TNorm_inputs(): name_template="%s_tnorm", ), outputtype=dict(), - polort=dict(argstr="-polort %s"), + polort=dict( + argstr="-polort %s", + ), ) inputs = TNorm.input_spec() @@ -32,7 +56,11 @@ def test_TNorm_inputs(): def test_TNorm_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TNorm.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TProject.py b/nipype/interfaces/afni/tests/test_auto_TProject.py index 3b71662ed9..d8f9990817 100644 --- a/nipype/interfaces/afni/tests/test_auto_TProject.py +++ b/nipype/interfaces/afni/tests/test_auto_TProject.py @@ -4,17 +4,43 @@ def test_TProject_inputs(): input_map = dict( - TR=dict(argstr="-TR %g"), - args=dict(argstr="%s"), - automask=dict(argstr="-automask", xor=["mask"]), - bandpass=dict(argstr="-bandpass %g %g"), - blur=dict(argstr="-blur %g"), - cenmode=dict(argstr="-cenmode %s"), - censor=dict(argstr="-censor %s", extensions=None), - censortr=dict(argstr="-CENSORTR %s"), - concat=dict(argstr="-concat %s", extensions=None), - dsort=dict(argstr="-dsort %s..."), - environ=dict(nohash=True, usedefault=True), + TR=dict( + argstr="-TR %g", + ), + args=dict( + argstr="%s", + ), + automask=dict( + argstr="-automask", + xor=["mask"], + ), + bandpass=dict( + argstr="-bandpass %g %g", + ), + blur=dict( + argstr="-blur %g", + ), + cenmode=dict( + argstr="-cenmode %s", + ), + censor=dict( + argstr="-censor %s", + extensions=None, + ), + censortr=dict( + argstr="-CENSORTR %s", + ), + concat=dict( + argstr="-concat %s", + extensions=None, + ), + dsort=dict( + argstr="-dsort %s...", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( argstr="-input %s", copyfile=False, @@ -22,11 +48,24 @@ def test_TProject_inputs(): mandatory=True, position=1, ), - mask=dict(argstr="-mask %s", extensions=None), - noblock=dict(argstr="-noblock"), - norm=dict(argstr="-norm"), - num_threads=dict(nohash=True, usedefault=True), - ort=dict(argstr="-ort %s", extensions=None), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + noblock=dict( + argstr="-noblock", + ), + norm=dict( + argstr="-norm", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + ort=dict( + argstr="-ort %s", + extensions=None, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -35,8 +74,12 @@ def test_TProject_inputs(): position=-1, ), outputtype=dict(), - polort=dict(argstr="-polort %d"), - stopband=dict(argstr="-stopband %g %g"), + polort=dict( + argstr="-polort %d", + ), + stopband=dict( + argstr="-stopband %g %g", + ), ) inputs = TProject.input_spec() @@ -46,7 +89,11 @@ def test_TProject_inputs(): def test_TProject_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TProject.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TShift.py b/nipype/interfaces/afni/tests/test_auto_TShift.py index 1c3c838ec2..78af699c7c 100644 --- a/nipype/interfaces/afni/tests/test_auto_TShift.py +++ b/nipype/interfaces/afni/tests/test_auto_TShift.py @@ -4,14 +4,30 @@ def test_TShift_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - ignore=dict(argstr="-ignore %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignore=dict( + argstr="-ignore %s", + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + interp=dict( + argstr="-%s", + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - interp=dict(argstr="-%s"), - num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -19,14 +35,34 @@ def test_TShift_inputs(): name_template="%s_tshift", ), outputtype=dict(), - rlt=dict(argstr="-rlt"), - rltplus=dict(argstr="-rlt+"), - slice_encoding_direction=dict(usedefault=True), - slice_timing=dict(argstr="-tpattern @%s", xor=["tpattern"]), - tpattern=dict(argstr="-tpattern %s", xor=["slice_timing"]), - tr=dict(argstr="-TR %s"), - tslice=dict(argstr="-slice %s", xor=["tzero"]), - tzero=dict(argstr="-tzero %s", xor=["tslice"]), + rlt=dict( + argstr="-rlt", + ), + rltplus=dict( + argstr="-rlt+", + ), + slice_encoding_direction=dict( + usedefault=True, + ), + slice_timing=dict( + argstr="-tpattern @%s", + xor=["tpattern"], + ), + tpattern=dict( + argstr="-tpattern %s", + xor=["slice_timing"], + ), + tr=dict( + argstr="-TR %s", + ), + tslice=dict( + argstr="-slice %s", + xor=["tzero"], + ), + tzero=dict( + argstr="-tzero %s", + xor=["tslice"], + ), ) inputs = TShift.input_spec() @@ -36,7 +72,14 @@ def test_TShift_inputs(): def test_TShift_outputs(): - output_map = dict(out_file=dict(extensions=None), timing_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + timing_file=dict( + extensions=None, + ), + ) outputs = TShift.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TSmooth.py b/nipype/interfaces/afni/tests/test_auto_TSmooth.py index e891d3ba0a..1836d31c22 100644 --- a/nipype/interfaces/afni/tests/test_auto_TSmooth.py +++ b/nipype/interfaces/afni/tests/test_auto_TSmooth.py @@ -4,21 +4,52 @@ def test_TSmooth_inputs(): input_map = dict( - adaptive=dict(argstr="-adaptive %d"), - args=dict(argstr="%s"), - blackman=dict(argstr="-blackman %d"), - custom=dict(argstr="-custom %s", extensions=None), - datum=dict(argstr="-datum %s"), - environ=dict(nohash=True, usedefault=True), - hamming=dict(argstr="-hamming %d"), + adaptive=dict( + argstr="-adaptive %d", + ), + args=dict( + argstr="%s", + ), + blackman=dict( + argstr="-blackman %d", + ), + custom=dict( + argstr="-custom %s", + extensions=None, + ), + datum=dict( + argstr="-datum %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hamming=dict( + argstr="-hamming %d", + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + lin=dict( + argstr="-lin", + ), + lin3=dict( + argstr="-3lin %d", + ), + med=dict( + argstr="-med", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + osf=dict( + argstr="-osf", ), - lin=dict(argstr="-lin"), - lin3=dict(argstr="-3lin %d"), - med=dict(argstr="-med"), - num_threads=dict(nohash=True, usedefault=True), - osf=dict(argstr="-osf"), out_file=dict( argstr="-prefix %s", extensions=None, @@ -35,7 +66,11 @@ def test_TSmooth_inputs(): def test_TSmooth_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_TStat.py b/nipype/interfaces/afni/tests/test_auto_TStat.py index 7ee0dbe53c..b7499c5442 100644 --- a/nipype/interfaces/afni/tests/test_auto_TStat.py +++ b/nipype/interfaces/afni/tests/test_auto_TStat.py @@ -4,14 +4,31 @@ def test_TStat_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + options=dict( + argstr="%s", ), - mask=dict(argstr="-mask %s", extensions=None), - num_threads=dict(nohash=True, usedefault=True), - options=dict(argstr="%s"), out_file=dict( argstr="-prefix %s", extensions=None, @@ -28,7 +45,11 @@ def test_TStat_inputs(): def test_TStat_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TStat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_To3D.py b/nipype/interfaces/afni/tests/test_auto_To3D.py index 02b5beda62..6121efbe57 100644 --- a/nipype/interfaces/afni/tests/test_auto_To3D.py +++ b/nipype/interfaces/afni/tests/test_auto_To3D.py @@ -4,14 +4,34 @@ def test_To3D_inputs(): input_map = dict( - args=dict(argstr="%s"), - assumemosaic=dict(argstr="-assume_dicom_mosaic"), - datatype=dict(argstr="-datum %s"), - environ=dict(nohash=True, usedefault=True), - filetype=dict(argstr="-%s"), - funcparams=dict(argstr="-time:zt %s alt+z2"), - in_folder=dict(argstr="%s/*.dcm", mandatory=True, position=-1), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + assumemosaic=dict( + argstr="-assume_dicom_mosaic", + ), + datatype=dict( + argstr="-datum %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + filetype=dict( + argstr="-%s", + ), + funcparams=dict( + argstr="-time:zt %s alt+z2", + ), + in_folder=dict( + argstr="%s/*.dcm", + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -19,7 +39,9 @@ def test_To3D_inputs(): name_template="%s", ), outputtype=dict(), - skipoutliers=dict(argstr="-skip_outliers"), + skipoutliers=dict( + argstr="-skip_outliers", + ), ) inputs = To3D.input_spec() @@ -29,7 +51,11 @@ def test_To3D_inputs(): def test_To3D_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = To3D.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Undump.py b/nipype/interfaces/afni/tests/test_auto_Undump.py index 456f4cfab4..64c98cf25a 100644 --- a/nipype/interfaces/afni/tests/test_auto_Undump.py +++ b/nipype/interfaces/afni/tests/test_auto_Undump.py @@ -4,13 +4,28 @@ def test_Undump_inputs(): input_map = dict( - args=dict(argstr="%s"), - coordinates_specification=dict(argstr="-%s"), - datatype=dict(argstr="-datum %s"), - default_value=dict(argstr="-dval %f"), - environ=dict(nohash=True, usedefault=True), - fill_value=dict(argstr="-fval %f"), - head_only=dict(argstr="-head_only"), + args=dict( + argstr="%s", + ), + coordinates_specification=dict( + argstr="-%s", + ), + datatype=dict( + argstr="-datum %s", + ), + default_value=dict( + argstr="-dval %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill_value=dict( + argstr="-fval %f", + ), + head_only=dict( + argstr="-head_only", + ), in_file=dict( argstr="-master %s", copyfile=False, @@ -18,12 +33,26 @@ def test_Undump_inputs(): mandatory=True, position=-1, ), - mask_file=dict(argstr="-mask %s", extensions=None), - num_threads=dict(nohash=True, usedefault=True), - orient=dict(argstr="-orient %s"), - out_file=dict(argstr="-prefix %s", extensions=None, name_source="in_file"), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + orient=dict( + argstr="-orient %s", + ), + out_file=dict( + argstr="-prefix %s", + extensions=None, + name_source="in_file", + ), outputtype=dict(), - srad=dict(argstr="-srad %f"), + srad=dict( + argstr="-srad %f", + ), ) inputs = Undump.input_spec() @@ -33,7 +62,11 @@ def test_Undump_inputs(): def test_Undump_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Undump.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Unifize.py b/nipype/interfaces/afni/tests/test_auto_Unifize.py index b4cb930a0a..15832152c2 100644 --- a/nipype/interfaces/afni/tests/test_auto_Unifize.py +++ b/nipype/interfaces/afni/tests/test_auto_Unifize.py @@ -4,11 +4,24 @@ def test_Unifize_inputs(): input_map = dict( - args=dict(argstr="%s"), - cl_frac=dict(argstr="-clfrac %f"), - environ=dict(nohash=True, usedefault=True), - epi=dict(argstr="-EPI", requires=["no_duplo", "t2"], xor=["gm"]), - gm=dict(argstr="-GM"), + args=dict( + argstr="%s", + ), + cl_frac=dict( + argstr="-clfrac %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi=dict( + argstr="-EPI", + requires=["no_duplo", "t2"], + xor=["gm"], + ), + gm=dict( + argstr="-GM", + ), in_file=dict( argstr="-input %s", copyfile=False, @@ -16,8 +29,13 @@ def test_Unifize_inputs(): mandatory=True, position=-1, ), - no_duplo=dict(argstr="-noduplo"), - num_threads=dict(nohash=True, usedefault=True), + no_duplo=dict( + argstr="-noduplo", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -25,12 +43,25 @@ def test_Unifize_inputs(): name_template="%s_unifized", ), outputtype=dict(), - quiet=dict(argstr="-quiet"), - rbt=dict(argstr="-rbt %f %f %f"), - scale_file=dict(argstr="-ssave %s", extensions=None), - t2=dict(argstr="-T2"), - t2_up=dict(argstr="-T2up %f"), - urad=dict(argstr="-Urad %s"), + quiet=dict( + argstr="-quiet", + ), + rbt=dict( + argstr="-rbt %f %f %f", + ), + scale_file=dict( + argstr="-ssave %s", + extensions=None, + ), + t2=dict( + argstr="-T2", + ), + t2_up=dict( + argstr="-T2up %f", + ), + urad=dict( + argstr="-Urad %s", + ), ) inputs = Unifize.input_spec() @@ -40,7 +71,14 @@ def test_Unifize_inputs(): def test_Unifize_outputs(): - output_map = dict(out_file=dict(extensions=None), scale_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + scale_file=dict( + extensions=None, + ), + ) outputs = Unifize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Volreg.py b/nipype/interfaces/afni/tests/test_auto_Volreg.py index 56159816be..658b933391 100644 --- a/nipype/interfaces/afni/tests/test_auto_Volreg.py +++ b/nipype/interfaces/afni/tests/test_auto_Volreg.py @@ -4,15 +4,34 @@ def test_Volreg_inputs(): input_map = dict( - args=dict(argstr="%s"), - basefile=dict(argstr="-base %s", extensions=None, position=-6), - copyorigin=dict(argstr="-twodup"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + basefile=dict( + argstr="-base %s", + extensions=None, + position=-6, + ), + copyorigin=dict( + argstr="-twodup", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + in_weight_volume=dict( + argstr="-weight '%s[%d]'", + ), + interp=dict( + argstr="-%s", ), - in_weight_volume=dict(argstr="-weight '%s[%d]'"), - interp=dict(argstr="-%s"), md1d_file=dict( argstr="-maxdisp1D %s", extensions=None, @@ -21,7 +40,10 @@ def test_Volreg_inputs(): name_template="%s_md.1D", position=-4, ), - num_threads=dict(nohash=True, usedefault=True), + num_threads=dict( + nohash=True, + usedefault=True, + ), oned_file=dict( argstr="-1Dfile %s", extensions=None, @@ -43,9 +65,16 @@ def test_Volreg_inputs(): name_template="%s_volreg", ), outputtype=dict(), - timeshift=dict(argstr="-tshift 0"), - verbose=dict(argstr="-verbose"), - zpad=dict(argstr="-zpad %d", position=-5), + timeshift=dict( + argstr="-tshift 0", + ), + verbose=dict( + argstr="-verbose", + ), + zpad=dict( + argstr="-zpad %d", + position=-5, + ), ) inputs = Volreg.input_spec() @@ -56,10 +85,18 @@ def test_Volreg_inputs(): def test_Volreg_outputs(): output_map = dict( - md1d_file=dict(extensions=None), - oned_file=dict(extensions=None), - oned_matrix_save=dict(extensions=None), - out_file=dict(extensions=None), + md1d_file=dict( + extensions=None, + ), + oned_file=dict( + extensions=None, + ), + oned_matrix_save=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = Volreg.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Warp.py b/nipype/interfaces/afni/tests/test_auto_Warp.py index 90abbd521c..9d694f8425 100644 --- a/nipype/interfaces/afni/tests/test_auto_Warp.py +++ b/nipype/interfaces/afni/tests/test_auto_Warp.py @@ -4,19 +4,48 @@ def test_Warp_inputs(): input_map = dict( - args=dict(argstr="%s"), - deoblique=dict(argstr="-deoblique"), - environ=dict(nohash=True, usedefault=True), - gridset=dict(argstr="-gridset %s", extensions=None), + args=dict( + argstr="%s", + ), + deoblique=dict( + argstr="-deoblique", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gridset=dict( + argstr="-gridset %s", + extensions=None, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 - ), - interp=dict(argstr="-%s"), - matparent=dict(argstr="-matparent %s", extensions=None), - mni2tta=dict(argstr="-mni2tta"), - newgrid=dict(argstr="-newgrid %f"), - num_threads=dict(nohash=True, usedefault=True), - oblique_parent=dict(argstr="-oblique_parent %s", extensions=None), + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + interp=dict( + argstr="-%s", + ), + matparent=dict( + argstr="-matparent %s", + extensions=None, + ), + mni2tta=dict( + argstr="-mni2tta", + ), + newgrid=dict( + argstr="-newgrid %f", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + oblique_parent=dict( + argstr="-oblique_parent %s", + extensions=None, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -25,10 +54,18 @@ def test_Warp_inputs(): name_template="%s_warp", ), outputtype=dict(), - save_warp=dict(requires=["verbose"]), - tta2mni=dict(argstr="-tta2mni"), - verbose=dict(argstr="-verb"), - zpad=dict(argstr="-zpad %d"), + save_warp=dict( + requires=["verbose"], + ), + tta2mni=dict( + argstr="-tta2mni", + ), + verbose=dict( + argstr="-verb", + ), + zpad=dict( + argstr="-zpad %d", + ), ) inputs = Warp.input_spec() @@ -38,7 +75,14 @@ def test_Warp_inputs(): def test_Warp_outputs(): - output_map = dict(out_file=dict(extensions=None), warp_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + warp_file=dict( + extensions=None, + ), + ) outputs = Warp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_ZCutUp.py b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py index c5c3d25844..3c51d6dd1d 100644 --- a/nipype/interfaces/afni/tests/test_auto_ZCutUp.py +++ b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py @@ -4,13 +4,27 @@ def test_ZCutUp_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, + ), + keep=dict( + argstr="-keep %s", + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - keep=dict(argstr="-keep %s"), - num_threads=dict(nohash=True, usedefault=True), out_file=dict( argstr="-prefix %s", extensions=None, @@ -27,7 +41,11 @@ def test_ZCutUp_inputs(): def test_ZCutUp_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ZCutUp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Zcat.py b/nipype/interfaces/afni/tests/test_auto_Zcat.py index 2ce7ac7792..e06f343591 100644 --- a/nipype/interfaces/afni/tests/test_auto_Zcat.py +++ b/nipype/interfaces/afni/tests/test_auto_Zcat.py @@ -4,13 +4,34 @@ def test_Zcat_inputs(): input_map = dict( - args=dict(argstr="%s"), - datum=dict(argstr="-datum %s"), - environ=dict(nohash=True, usedefault=True), - fscale=dict(argstr="-fscale", xor=["nscale"]), - in_files=dict(argstr="%s", copyfile=False, mandatory=True, position=-1), - nscale=dict(argstr="-nscale", xor=["fscale"]), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + datum=dict( + argstr="-datum %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fscale=dict( + argstr="-fscale", + xor=["nscale"], + ), + in_files=dict( + argstr="%s", + copyfile=False, + mandatory=True, + position=-1, + ), + nscale=dict( + argstr="-nscale", + xor=["fscale"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_file=dict( argstr="-prefix %s", extensions=None, @@ -18,7 +39,9 @@ def test_Zcat_inputs(): name_template="%s_zcat", ), outputtype=dict(), - verb=dict(argstr="-verb"), + verb=dict( + argstr="-verb", + ), ) inputs = Zcat.input_spec() @@ -28,7 +51,11 @@ def test_Zcat_inputs(): def test_Zcat_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Zcat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/afni/tests/test_auto_Zeropad.py b/nipype/interfaces/afni/tests/test_auto_Zeropad.py index 694e7f6228..4d6742f21e 100644 --- a/nipype/interfaces/afni/tests/test_auto_Zeropad.py +++ b/nipype/interfaces/afni/tests/test_auto_Zeropad.py @@ -4,30 +4,79 @@ def test_Zeropad_inputs(): input_map = dict( - A=dict(argstr="-A %i", xor=["master"]), - AP=dict(argstr="-AP %i", xor=["master"]), - I=dict(argstr="-I %i", xor=["master"]), - IS=dict(argstr="-IS %i", xor=["master"]), - L=dict(argstr="-L %i", xor=["master"]), - P=dict(argstr="-P %i", xor=["master"]), - R=dict(argstr="-R %i", xor=["master"]), - RL=dict(argstr="-RL %i", xor=["master"]), - S=dict(argstr="-S %i", xor=["master"]), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + A=dict( + argstr="-A %i", + xor=["master"], + ), + AP=dict( + argstr="-AP %i", + xor=["master"], + ), + I=dict( + argstr="-I %i", + xor=["master"], + ), + IS=dict( + argstr="-IS %i", + xor=["master"], + ), + L=dict( + argstr="-L %i", + xor=["master"], + ), + P=dict( + argstr="-P %i", + xor=["master"], + ), + R=dict( + argstr="-R %i", + xor=["master"], + ), + RL=dict( + argstr="-RL %i", + xor=["master"], + ), + S=dict( + argstr="-S %i", + xor=["master"], + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_files=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-1, ), master=dict( argstr="-master %s", extensions=None, xor=["I", "S", "A", "P", "L", "R", "z", "RL", "AP", "IS", "mm"], ), - mm=dict(argstr="-mm", xor=["master"]), - num_threads=dict(nohash=True, usedefault=True), - out_file=dict(argstr="-prefix %s", extensions=None, name_template="zeropad"), + mm=dict( + argstr="-mm", + xor=["master"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="-prefix %s", + extensions=None, + name_template="zeropad", + ), outputtype=dict(), - z=dict(argstr="-z %i", xor=["master"]), + z=dict( + argstr="-z %i", + xor=["master"], + ), ) inputs = Zeropad.input_spec() @@ -37,7 +86,11 @@ def test_Zeropad_inputs(): def test_Zeropad_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Zeropad.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_AI.py b/nipype/interfaces/ants/tests/test_auto_AI.py index 8dd876782e..bef56b7ee6 100644 --- a/nipype/interfaces/ants/tests/test_auto_AI.py +++ b/nipype/interfaces/ants/tests/test_auto_AI.py @@ -4,22 +4,71 @@ def test_AI_inputs(): input_map = dict( - args=dict(argstr="%s"), - convergence=dict(argstr="-c [%d,%g,%d]", usedefault=True), - dimension=dict(argstr="-d %d", usedefault=True), - environ=dict(nohash=True, usedefault=True), - fixed_image=dict(extensions=None, mandatory=True), - fixed_image_mask=dict(argstr="-x %s", extensions=None), - metric=dict(argstr="-m %s", mandatory=True), - moving_image=dict(extensions=None, mandatory=True), - moving_image_mask=dict(extensions=None, requires=["fixed_image_mask"]), - num_threads=dict(nohash=True, usedefault=True), - output_transform=dict(argstr="-o %s", extensions=None, usedefault=True), - principal_axes=dict(argstr="-p %d", usedefault=True, xor=["blobs"]), - search_factor=dict(argstr="-s [%g,%g]", usedefault=True), - search_grid=dict(argstr="-g %s", min_ver="2.3.0"), - transform=dict(argstr="-t %s[%g]", usedefault=True), - verbose=dict(argstr="-v %d", usedefault=True), + args=dict( + argstr="%s", + ), + convergence=dict( + argstr="-c [%d,%g,%d]", + usedefault=True, + ), + dimension=dict( + argstr="-d %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + extensions=None, + mandatory=True, + ), + fixed_image_mask=dict( + argstr="-x %s", + extensions=None, + ), + metric=dict( + argstr="-m %s", + mandatory=True, + ), + moving_image=dict( + extensions=None, + mandatory=True, + ), + moving_image_mask=dict( + extensions=None, + requires=["fixed_image_mask"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_transform=dict( + argstr="-o %s", + extensions=None, + usedefault=True, + ), + principal_axes=dict( + argstr="-p %d", + usedefault=True, + xor=["blobs"], + ), + search_factor=dict( + argstr="-s [%g,%g]", + usedefault=True, + ), + search_grid=dict( + argstr="-g %s", + min_ver="2.3.0", + ), + transform=dict( + argstr="-t %s[%g]", + usedefault=True, + ), + verbose=dict( + argstr="-v %d", + usedefault=True, + ), ) inputs = AI.input_spec() @@ -29,7 +78,11 @@ def test_AI_inputs(): def test_AI_outputs(): - output_map = dict(output_transform=dict(extensions=None)) + output_map = dict( + output_transform=dict( + extensions=None, + ), + ) outputs = AI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_ANTS.py b/nipype/interfaces/ants/tests/test_auto_ANTS.py index 28721815ed..17f456e0dd 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTS.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTS.py @@ -4,35 +4,97 @@ def test_ANTS_inputs(): input_map = dict( - affine_gradient_descent_option=dict(argstr="%s"), - args=dict(argstr="%s"), - delta_time=dict(requires=["number_of_time_steps"]), - dimension=dict(argstr="%d", position=1), - environ=dict(nohash=True, usedefault=True), - fixed_image=dict(mandatory=True), - gradient_step_length=dict(requires=["transformation_model"]), - metric=dict(mandatory=True), - metric_weight=dict(mandatory=True, requires=["metric"], usedefault=True), - mi_option=dict(argstr="--MI-option %s", sep="x"), - moving_image=dict(argstr="%s", mandatory=True), - num_threads=dict(nohash=True, usedefault=True), + affine_gradient_descent_option=dict( + argstr="%s", + ), + args=dict( + argstr="%s", + ), + delta_time=dict( + requires=["number_of_time_steps"], + ), + dimension=dict( + argstr="%d", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + mandatory=True, + ), + gradient_step_length=dict( + requires=["transformation_model"], + ), + metric=dict( + mandatory=True, + ), + metric_weight=dict( + mandatory=True, + requires=["metric"], + usedefault=True, + ), + mi_option=dict( + argstr="--MI-option %s", + sep="x", + ), + moving_image=dict( + argstr="%s", + mandatory=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), number_of_affine_iterations=dict( - argstr="--number-of-affine-iterations %s", sep="x" + argstr="--number-of-affine-iterations %s", + sep="x", + ), + number_of_iterations=dict( + argstr="--number-of-iterations %s", + sep="x", + ), + number_of_time_steps=dict( + requires=["gradient_step_length"], ), - number_of_iterations=dict(argstr="--number-of-iterations %s", sep="x"), - number_of_time_steps=dict(requires=["gradient_step_length"]), output_transform_prefix=dict( - argstr="--output-naming %s", mandatory=True, usedefault=True - ), - radius=dict(mandatory=True, requires=["metric"]), - regularization=dict(argstr="%s"), - regularization_deformation_field_sigma=dict(requires=["regularization"]), - regularization_gradient_field_sigma=dict(requires=["regularization"]), - smoothing_sigmas=dict(argstr="--gaussian-smoothing-sigmas %s", sep="x"), - subsampling_factors=dict(argstr="--subsampling-factors %s", sep="x"), - symmetry_type=dict(requires=["delta_time"]), - transformation_model=dict(argstr="%s", mandatory=True), - use_histogram_matching=dict(argstr="%s", usedefault=True), + argstr="--output-naming %s", + mandatory=True, + usedefault=True, + ), + radius=dict( + mandatory=True, + requires=["metric"], + ), + regularization=dict( + argstr="%s", + ), + regularization_deformation_field_sigma=dict( + requires=["regularization"], + ), + regularization_gradient_field_sigma=dict( + requires=["regularization"], + ), + smoothing_sigmas=dict( + argstr="--gaussian-smoothing-sigmas %s", + sep="x", + ), + subsampling_factors=dict( + argstr="--subsampling-factors %s", + sep="x", + ), + symmetry_type=dict( + requires=["delta_time"], + ), + transformation_model=dict( + argstr="%s", + mandatory=True, + ), + use_histogram_matching=dict( + argstr="%s", + usedefault=True, + ), ) inputs = ANTS.input_spec() @@ -43,11 +105,21 @@ def test_ANTS_inputs(): def test_ANTS_outputs(): output_map = dict( - affine_transform=dict(extensions=None), - inverse_warp_transform=dict(extensions=None), - metaheader=dict(extensions=None), - metaheader_raw=dict(extensions=None), - warp_transform=dict(extensions=None), + affine_transform=dict( + extensions=None, + ), + inverse_warp_transform=dict( + extensions=None, + ), + metaheader=dict( + extensions=None, + ), + metaheader_raw=dict( + extensions=None, + ), + warp_transform=dict( + extensions=None, + ), ) outputs = ANTS.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py b/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py index 86d860293f..8907c4ab91 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py @@ -4,9 +4,17 @@ def test_ANTSCommand_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), ) inputs = ANTSCommand.input_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py index ad65c1887a..24ef0d655e 100644 --- a/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py +++ b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py @@ -4,17 +4,60 @@ def test_AffineInitializer_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="%s", position=0, usedefault=True), - environ=dict(nohash=True, usedefault=True), - fixed_image=dict(argstr="%s", extensions=None, mandatory=True, position=1), - local_search=dict(argstr="%d", position=7, usedefault=True), - moving_image=dict(argstr="%s", extensions=None, mandatory=True, position=2), - num_threads=dict(nohash=True, usedefault=True), - out_file=dict(argstr="%s", extensions=None, position=3, usedefault=True), - principal_axes=dict(argstr="%d", position=6, usedefault=True), - radian_fraction=dict(argstr="%f", position=5, usedefault=True), - search_factor=dict(argstr="%f", position=4, usedefault=True), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%s", + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + local_search=dict( + argstr="%d", + position=7, + usedefault=True, + ), + moving_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + position=3, + usedefault=True, + ), + principal_axes=dict( + argstr="%d", + position=6, + usedefault=True, + ), + radian_fraction=dict( + argstr="%f", + position=5, + usedefault=True, + ), + search_factor=dict( + argstr="%f", + position=4, + usedefault=True, + ), ) inputs = AffineInitializer.input_spec() @@ -24,7 +67,11 @@ def test_AffineInitializer_inputs(): def test_AffineInitializer_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AffineInitializer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py index 062bac034a..a18a3b60b4 100644 --- a/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py @@ -4,24 +4,62 @@ def test_ApplyTransforms_inputs(): input_map = dict( - args=dict(argstr="%s"), - default_value=dict(argstr="--default-value %g", usedefault=True), - dimension=dict(argstr="--dimensionality %d"), - environ=dict(nohash=True, usedefault=True), - float=dict(argstr="--float %d", usedefault=True), - input_image=dict(argstr="--input %s", extensions=None, mandatory=True), - input_image_type=dict(argstr="--input-image-type %d"), - interpolation=dict(argstr="%s", usedefault=True), + args=dict( + argstr="%s", + ), + default_value=dict( + argstr="--default-value %g", + usedefault=True, + ), + dimension=dict( + argstr="--dimensionality %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + float=dict( + argstr="--float %d", + usedefault=True, + ), + input_image=dict( + argstr="--input %s", + extensions=None, + mandatory=True, + ), + input_image_type=dict( + argstr="--input-image-type %d", + ), + interpolation=dict( + argstr="%s", + usedefault=True, + ), interpolation_parameters=dict(), invert_transform_flags=dict(), - num_threads=dict(nohash=True, usedefault=True), - out_postfix=dict(usedefault=True), - output_image=dict(argstr="--output %s", genfile=True, hash_files=False), - print_out_composite_warp_file=dict(requires=["output_image"]), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_postfix=dict( + usedefault=True, + ), + output_image=dict( + argstr="--output %s", + genfile=True, + hash_files=False, + ), + print_out_composite_warp_file=dict( + requires=["output_image"], + ), reference_image=dict( - argstr="--reference-image %s", extensions=None, mandatory=True + argstr="--reference-image %s", + extensions=None, + mandatory=True, + ), + transforms=dict( + argstr="%s", + mandatory=True, ), - transforms=dict(argstr="%s", mandatory=True), ) inputs = ApplyTransforms.input_spec() @@ -31,7 +69,11 @@ def test_ApplyTransforms_inputs(): def test_ApplyTransforms_outputs(): - output_map = dict(output_image=dict(extensions=None)) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = ApplyTransforms.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py index 5d1723ba73..472c22c8b2 100644 --- a/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py @@ -4,19 +4,36 @@ def test_ApplyTransformsToPoints_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="--dimensionality %d"), - environ=dict(nohash=True, usedefault=True), - input_file=dict(argstr="--input %s", extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="--dimensionality %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="--input %s", + extensions=None, + mandatory=True, + ), invert_transform_flags=dict(), - num_threads=dict(nohash=True, usedefault=True), + num_threads=dict( + nohash=True, + usedefault=True, + ), output_file=dict( argstr="--output %s", hash_files=False, name_source=["input_file"], name_template="%s_transformed.csv", ), - transforms=dict(argstr="%s", mandatory=True), + transforms=dict( + argstr="%s", + mandatory=True, + ), ) inputs = ApplyTransformsToPoints.input_spec() @@ -26,7 +43,11 @@ def test_ApplyTransformsToPoints_inputs(): def test_ApplyTransformsToPoints_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = ApplyTransformsToPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_Atropos.py b/nipype/interfaces/ants/tests/test_auto_Atropos.py index b99146eafa..9f02bb5151 100644 --- a/nipype/interfaces/ants/tests/test_auto_Atropos.py +++ b/nipype/interfaces/ants/tests/test_auto_Atropos.py @@ -4,35 +4,85 @@ def test_Atropos_inputs(): input_map = dict( - args=dict(argstr="%s"), - convergence_threshold=dict(requires=["n_iterations"]), - dimension=dict(argstr="--image-dimensionality %d", usedefault=True), - environ=dict(nohash=True, usedefault=True), - icm_use_synchronous_update=dict(argstr="%s"), + args=dict( + argstr="%s", + ), + convergence_threshold=dict( + requires=["n_iterations"], + ), + dimension=dict( + argstr="--image-dimensionality %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + icm_use_synchronous_update=dict( + argstr="%s", + ), initialization=dict( - argstr="%s", mandatory=True, requires=["number_of_tissue_classes"] + argstr="%s", + mandatory=True, + requires=["number_of_tissue_classes"], + ), + intensity_images=dict( + argstr="--intensity-image %s...", + mandatory=True, ), - intensity_images=dict(argstr="--intensity-image %s...", mandatory=True), kmeans_init_centers=dict(), - likelihood_model=dict(argstr="--likelihood-model %s"), - mask_image=dict(argstr="--mask-image %s", extensions=None, mandatory=True), - maximum_number_of_icm_terations=dict(requires=["icm_use_synchronous_update"]), - mrf_radius=dict(requires=["mrf_smoothing_factor"]), - mrf_smoothing_factor=dict(argstr="%s"), - n_iterations=dict(argstr="%s"), - num_threads=dict(nohash=True, usedefault=True), - number_of_tissue_classes=dict(mandatory=True), + likelihood_model=dict( + argstr="--likelihood-model %s", + ), + mask_image=dict( + argstr="--mask-image %s", + extensions=None, + mandatory=True, + ), + maximum_number_of_icm_terations=dict( + requires=["icm_use_synchronous_update"], + ), + mrf_radius=dict( + requires=["mrf_smoothing_factor"], + ), + mrf_smoothing_factor=dict( + argstr="%s", + ), + n_iterations=dict( + argstr="%s", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + number_of_tissue_classes=dict( + mandatory=True, + ), out_classified_image_name=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + ), + output_posteriors_name_template=dict( + usedefault=True, + ), + posterior_formulation=dict( + argstr="%s", ), - output_posteriors_name_template=dict(usedefault=True), - posterior_formulation=dict(argstr="%s"), prior_image=dict(), - prior_probability_threshold=dict(requires=["prior_weighting"]), + prior_probability_threshold=dict( + requires=["prior_weighting"], + ), prior_weighting=dict(), save_posteriors=dict(), - use_mixture_model_proportions=dict(requires=["posterior_formulation"]), - use_random_seed=dict(argstr="--use-random-seed %d", usedefault=True), + use_mixture_model_proportions=dict( + requires=["posterior_formulation"], + ), + use_random_seed=dict( + argstr="--use-random-seed %d", + usedefault=True, + ), ) inputs = Atropos.input_spec() @@ -42,7 +92,12 @@ def test_Atropos_inputs(): def test_Atropos_outputs(): - output_map = dict(classified_image=dict(extensions=None), posteriors=dict()) + output_map = dict( + classified_image=dict( + extensions=None, + ), + posteriors=dict(), + ) outputs = Atropos.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py index 46cd73b1f0..3da6956de2 100644 --- a/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py @@ -4,14 +4,33 @@ def test_AverageAffineTransform_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="%d", mandatory=True, position=0), - environ=dict(nohash=True, usedefault=True), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + mandatory=True, + position=0, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), output_affine_transform=dict( - argstr="%s", extensions=None, mandatory=True, position=1 + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + transforms=dict( + argstr="%s", + mandatory=True, + position=3, ), - transforms=dict(argstr="%s", mandatory=True, position=3), ) inputs = AverageAffineTransform.input_spec() @@ -21,7 +40,11 @@ def test_AverageAffineTransform_inputs(): def test_AverageAffineTransform_outputs(): - output_map = dict(affine_transform=dict(extensions=None)) + output_map = dict( + affine_transform=dict( + extensions=None, + ), + ) outputs = AverageAffineTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_AverageImages.py b/nipype/interfaces/ants/tests/test_auto_AverageImages.py index 94dedee9f9..8eb03ea7b4 100644 --- a/nipype/interfaces/ants/tests/test_auto_AverageImages.py +++ b/nipype/interfaces/ants/tests/test_auto_AverageImages.py @@ -4,14 +4,38 @@ def test_AverageImages_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="%d", mandatory=True, position=0), - environ=dict(nohash=True, usedefault=True), - images=dict(argstr="%s", mandatory=True, position=3), - normalize=dict(argstr="%d", mandatory=True, position=2), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + mandatory=True, + position=0, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + images=dict( + argstr="%s", + mandatory=True, + position=3, + ), + normalize=dict( + argstr="%d", + mandatory=True, + position=2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), output_average_image=dict( - argstr="%s", extensions=None, hash_files=False, position=1, usedefault=True + argstr="%s", + extensions=None, + hash_files=False, + position=1, + usedefault=True, ), ) inputs = AverageImages.input_spec() @@ -22,7 +46,11 @@ def test_AverageImages_inputs(): def test_AverageImages_outputs(): - output_map = dict(output_average_image=dict(extensions=None)) + output_map = dict( + output_average_image=dict( + extensions=None, + ), + ) outputs = AverageImages.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py index e6a2e1d445..454a102f2d 100644 --- a/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py +++ b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py @@ -4,22 +4,61 @@ def test_BrainExtraction_inputs(): input_map = dict( - anatomical_image=dict(argstr="-a %s", extensions=None, mandatory=True), - args=dict(argstr="%s"), + anatomical_image=dict( + argstr="-a %s", + extensions=None, + mandatory=True, + ), + args=dict( + argstr="%s", + ), brain_probability_mask=dict( - argstr="-m %s", copyfile=False, extensions=None, mandatory=True - ), - brain_template=dict(argstr="-e %s", extensions=None, mandatory=True), - debug=dict(argstr="-z 1"), - dimension=dict(argstr="-d %d", usedefault=True), - environ=dict(nohash=True, usedefault=True), - extraction_registration_mask=dict(argstr="-f %s", extensions=None), - image_suffix=dict(argstr="-s %s", usedefault=True), - keep_temporary_files=dict(argstr="-k %d"), - num_threads=dict(nohash=True, usedefault=True), - out_prefix=dict(argstr="-o %s", usedefault=True), - use_floatingpoint_precision=dict(argstr="-q %d"), - use_random_seeding=dict(argstr="-u %d"), + argstr="-m %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + brain_template=dict( + argstr="-e %s", + extensions=None, + mandatory=True, + ), + debug=dict( + argstr="-z 1", + ), + dimension=dict( + argstr="-d %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + extraction_registration_mask=dict( + argstr="-f %s", + extensions=None, + ), + image_suffix=dict( + argstr="-s %s", + usedefault=True, + ), + keep_temporary_files=dict( + argstr="-k %d", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_prefix=dict( + argstr="-o %s", + usedefault=True, + ), + use_floatingpoint_precision=dict( + argstr="-q %d", + ), + use_random_seeding=dict( + argstr="-u %d", + ), ) inputs = BrainExtraction.input_spec() @@ -30,24 +69,60 @@ def test_BrainExtraction_inputs(): def test_BrainExtraction_outputs(): output_map = dict( - BrainExtractionBrain=dict(extensions=None), - BrainExtractionCSF=dict(extensions=None), - BrainExtractionGM=dict(extensions=None), - BrainExtractionInitialAffine=dict(extensions=None), - BrainExtractionInitialAffineFixed=dict(extensions=None), - BrainExtractionInitialAffineMoving=dict(extensions=None), - BrainExtractionLaplacian=dict(extensions=None), - BrainExtractionMask=dict(extensions=None), - BrainExtractionPrior0GenericAffine=dict(extensions=None), - BrainExtractionPrior1InverseWarp=dict(extensions=None), - BrainExtractionPrior1Warp=dict(extensions=None), - BrainExtractionPriorWarped=dict(extensions=None), - BrainExtractionSegmentation=dict(extensions=None), - BrainExtractionTemplateLaplacian=dict(extensions=None), - BrainExtractionTmp=dict(extensions=None), - BrainExtractionWM=dict(extensions=None), - N4Corrected0=dict(extensions=None), - N4Truncated0=dict(extensions=None), + BrainExtractionBrain=dict( + extensions=None, + ), + BrainExtractionCSF=dict( + extensions=None, + ), + BrainExtractionGM=dict( + extensions=None, + ), + BrainExtractionInitialAffine=dict( + extensions=None, + ), + BrainExtractionInitialAffineFixed=dict( + extensions=None, + ), + BrainExtractionInitialAffineMoving=dict( + extensions=None, + ), + BrainExtractionLaplacian=dict( + extensions=None, + ), + BrainExtractionMask=dict( + extensions=None, + ), + BrainExtractionPrior0GenericAffine=dict( + extensions=None, + ), + BrainExtractionPrior1InverseWarp=dict( + extensions=None, + ), + BrainExtractionPrior1Warp=dict( + extensions=None, + ), + BrainExtractionPriorWarped=dict( + extensions=None, + ), + BrainExtractionSegmentation=dict( + extensions=None, + ), + BrainExtractionTemplateLaplacian=dict( + extensions=None, + ), + BrainExtractionTmp=dict( + extensions=None, + ), + BrainExtractionWM=dict( + extensions=None, + ), + N4Corrected0=dict( + extensions=None, + ), + N4Truncated0=dict( + extensions=None, + ), ) outputs = BrainExtraction.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py index 741bf5e820..78afc21df2 100644 --- a/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py @@ -4,10 +4,22 @@ def test_ComposeMultiTransform_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="%d", position=0, usedefault=True), - environ=dict(nohash=True, usedefault=True), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), output_transform=dict( argstr="%s", extensions=None, @@ -16,8 +28,16 @@ def test_ComposeMultiTransform_inputs(): name_template="%s_composed", position=1, ), - reference_image=dict(argstr="%s", extensions=None, position=2), - transforms=dict(argstr="%s", mandatory=True, position=3), + reference_image=dict( + argstr="%s", + extensions=None, + position=2, + ), + transforms=dict( + argstr="%s", + mandatory=True, + position=3, + ), ) inputs = ComposeMultiTransform.input_spec() @@ -27,7 +47,11 @@ def test_ComposeMultiTransform_inputs(): def test_ComposeMultiTransform_outputs(): - output_map = dict(output_transform=dict(extensions=None)) + output_map = dict( + output_transform=dict( + extensions=None, + ), + ) outputs = ComposeMultiTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py b/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py index dd16a5e85b..54b428db2a 100644 --- a/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py +++ b/nipype/interfaces/ants/tests/test_auto_CompositeTransformUtil.py @@ -4,13 +4,37 @@ def test_CompositeTransformUtil_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s...", mandatory=True, position=3), - num_threads=dict(nohash=True, usedefault=True), - out_file=dict(argstr="%s", extensions=None, position=2), - output_prefix=dict(argstr="%s", position=4, usedefault=True), - process=dict(argstr="--%s", position=1, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s...", + mandatory=True, + position=3, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + position=2, + ), + output_prefix=dict( + argstr="%s", + position=4, + usedefault=True, + ), + process=dict( + argstr="--%s", + position=1, + usedefault=True, + ), ) inputs = CompositeTransformUtil.input_spec() @@ -21,9 +45,15 @@ def test_CompositeTransformUtil_inputs(): def test_CompositeTransformUtil_outputs(): output_map = dict( - affine_transform=dict(extensions=None), - displacement_field=dict(extensions=None), - out_file=dict(extensions=None), + affine_transform=dict( + extensions=None, + ), + displacement_field=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = CompositeTransformUtil.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py index 419c090958..a49239ebae 100644 --- a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py +++ b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py @@ -4,19 +4,70 @@ def test_ConvertScalarImageToRGB_inputs(): input_map = dict( - args=dict(argstr="%s"), - colormap=dict(argstr="%s", mandatory=True, position=4), - custom_color_map_file=dict(argstr="%s", position=5, usedefault=True), - dimension=dict(argstr="%d", mandatory=True, position=0, usedefault=True), - environ=dict(nohash=True, usedefault=True), - input_image=dict(argstr="%s", extensions=None, mandatory=True, position=1), - mask_image=dict(argstr="%s", extensions=None, position=3, usedefault=True), - maximum_RGB_output=dict(argstr="%d", position=9, usedefault=True), - maximum_input=dict(argstr="%d", mandatory=True, position=7), - minimum_RGB_output=dict(argstr="%d", position=8, usedefault=True), - minimum_input=dict(argstr="%d", mandatory=True, position=6), - num_threads=dict(nohash=True, usedefault=True), - output_image=dict(argstr="%s", position=2, usedefault=True), + args=dict( + argstr="%s", + ), + colormap=dict( + argstr="%s", + mandatory=True, + position=4, + ), + custom_color_map_file=dict( + argstr="%s", + position=5, + usedefault=True, + ), + dimension=dict( + argstr="%d", + mandatory=True, + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + mask_image=dict( + argstr="%s", + extensions=None, + position=3, + usedefault=True, + ), + maximum_RGB_output=dict( + argstr="%d", + position=9, + usedefault=True, + ), + maximum_input=dict( + argstr="%d", + mandatory=True, + position=7, + ), + minimum_RGB_output=dict( + argstr="%d", + position=8, + usedefault=True, + ), + minimum_input=dict( + argstr="%d", + mandatory=True, + position=6, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_image=dict( + argstr="%s", + position=2, + usedefault=True, + ), ) inputs = ConvertScalarImageToRGB.input_spec() @@ -26,7 +77,11 @@ def test_ConvertScalarImageToRGB_inputs(): def test_ConvertScalarImageToRGB_outputs(): - output_map = dict(output_image=dict(extensions=None)) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = ConvertScalarImageToRGB.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py index cbf89e085e..a42551788b 100644 --- a/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py @@ -4,32 +4,94 @@ def test_CorticalThickness_inputs(): input_map = dict( - anatomical_image=dict(argstr="-a %s", extensions=None, mandatory=True), - args=dict(argstr="%s"), - b_spline_smoothing=dict(argstr="-v"), + anatomical_image=dict( + argstr="-a %s", + extensions=None, + mandatory=True, + ), + args=dict( + argstr="%s", + ), + b_spline_smoothing=dict( + argstr="-v", + ), brain_probability_mask=dict( - argstr="-m %s", copyfile=False, extensions=None, mandatory=True - ), - brain_template=dict(argstr="-e %s", extensions=None, mandatory=True), - cortical_label_image=dict(extensions=None), - debug=dict(argstr="-z 1"), - dimension=dict(argstr="-d %d", usedefault=True), - environ=dict(nohash=True, usedefault=True), - extraction_registration_mask=dict(argstr="-f %s", extensions=None), - image_suffix=dict(argstr="-s %s", usedefault=True), - keep_temporary_files=dict(argstr="-k %d"), - label_propagation=dict(argstr="-l %s"), - max_iterations=dict(argstr="-i %d"), - num_threads=dict(nohash=True, usedefault=True), - out_prefix=dict(argstr="-o %s", usedefault=True), - posterior_formulation=dict(argstr="-b %s"), - prior_segmentation_weight=dict(argstr="-w %f"), - quick_registration=dict(argstr="-q 1"), - segmentation_iterations=dict(argstr="-n %d"), - segmentation_priors=dict(argstr="-p %s", mandatory=True), - t1_registration_template=dict(argstr="-t %s", extensions=None, mandatory=True), - use_floatingpoint_precision=dict(argstr="-j %d"), - use_random_seeding=dict(argstr="-u %d"), + argstr="-m %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + brain_template=dict( + argstr="-e %s", + extensions=None, + mandatory=True, + ), + cortical_label_image=dict( + extensions=None, + ), + debug=dict( + argstr="-z 1", + ), + dimension=dict( + argstr="-d %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + extraction_registration_mask=dict( + argstr="-f %s", + extensions=None, + ), + image_suffix=dict( + argstr="-s %s", + usedefault=True, + ), + keep_temporary_files=dict( + argstr="-k %d", + ), + label_propagation=dict( + argstr="-l %s", + ), + max_iterations=dict( + argstr="-i %d", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_prefix=dict( + argstr="-o %s", + usedefault=True, + ), + posterior_formulation=dict( + argstr="-b %s", + ), + prior_segmentation_weight=dict( + argstr="-w %f", + ), + quick_registration=dict( + argstr="-q 1", + ), + segmentation_iterations=dict( + argstr="-n %d", + ), + segmentation_priors=dict( + argstr="-p %s", + mandatory=True, + ), + t1_registration_template=dict( + argstr="-t %s", + extensions=None, + mandatory=True, + ), + use_floatingpoint_precision=dict( + argstr="-j %d", + ), + use_random_seeding=dict( + argstr="-u %d", + ), ) inputs = CorticalThickness.input_spec() @@ -40,19 +102,43 @@ def test_CorticalThickness_inputs(): def test_CorticalThickness_outputs(): output_map = dict( - BrainExtractionMask=dict(extensions=None), - BrainSegmentation=dict(extensions=None), - BrainSegmentationN4=dict(extensions=None), + BrainExtractionMask=dict( + extensions=None, + ), + BrainSegmentation=dict( + extensions=None, + ), + BrainSegmentationN4=dict( + extensions=None, + ), BrainSegmentationPosteriors=dict(), - BrainVolumes=dict(extensions=None), - CorticalThickness=dict(extensions=None), - CorticalThicknessNormedToTemplate=dict(extensions=None), - ExtractedBrainN4=dict(extensions=None), - SubjectToTemplate0GenericAffine=dict(extensions=None), - SubjectToTemplate1Warp=dict(extensions=None), - SubjectToTemplateLogJacobian=dict(extensions=None), - TemplateToSubject0Warp=dict(extensions=None), - TemplateToSubject1GenericAffine=dict(extensions=None), + BrainVolumes=dict( + extensions=None, + ), + CorticalThickness=dict( + extensions=None, + ), + CorticalThicknessNormedToTemplate=dict( + extensions=None, + ), + ExtractedBrainN4=dict( + extensions=None, + ), + SubjectToTemplate0GenericAffine=dict( + extensions=None, + ), + SubjectToTemplate1Warp=dict( + extensions=None, + ), + SubjectToTemplateLogJacobian=dict( + extensions=None, + ), + TemplateToSubject0Warp=dict( + extensions=None, + ), + TemplateToSubject1GenericAffine=dict( + extensions=None, + ), ) outputs = CorticalThickness.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py index f9e5e893df..197ec2ad23 100644 --- a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py +++ b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py @@ -4,14 +4,42 @@ def test_CreateJacobianDeterminantImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - deformationField=dict(argstr="%s", extensions=None, mandatory=True, position=1), - doLogJacobian=dict(argstr="%d", position=3), - environ=dict(nohash=True, usedefault=True), - imageDimension=dict(argstr="%d", mandatory=True, position=0), - num_threads=dict(nohash=True, usedefault=True), - outputImage=dict(argstr="%s", extensions=None, mandatory=True, position=2), - useGeometric=dict(argstr="%d", position=4), + args=dict( + argstr="%s", + ), + deformationField=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + doLogJacobian=dict( + argstr="%d", + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + imageDimension=dict( + argstr="%d", + mandatory=True, + position=0, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + outputImage=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + useGeometric=dict( + argstr="%d", + position=4, + ), ) inputs = CreateJacobianDeterminantImage.input_spec() @@ -21,7 +49,11 @@ def test_CreateJacobianDeterminantImage_inputs(): def test_CreateJacobianDeterminantImage_outputs(): - output_map = dict(jacobian_image=dict(extensions=None)) + output_map = dict( + jacobian_image=dict( + extensions=None, + ), + ) outputs = CreateJacobianDeterminantImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py index 31303a5a89..6516b03ad7 100644 --- a/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py +++ b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py @@ -4,20 +4,56 @@ def test_CreateTiledMosaic_inputs(): input_map = dict( - alpha_value=dict(argstr="-a %.2f"), - args=dict(argstr="%s"), - direction=dict(argstr="-d %d"), - environ=dict(nohash=True, usedefault=True), - flip_slice=dict(argstr="-f %s"), - input_image=dict(argstr="-i %s", extensions=None, mandatory=True), - mask_image=dict(argstr="-x %s", extensions=None), - num_threads=dict(nohash=True, usedefault=True), - output_image=dict(argstr="-o %s", usedefault=True), - pad_or_crop=dict(argstr="-p %s"), - permute_axes=dict(argstr="-g"), - rgb_image=dict(argstr="-r %s", extensions=None, mandatory=True), - slices=dict(argstr="-s %s"), - tile_geometry=dict(argstr="-t %s"), + alpha_value=dict( + argstr="-a %.2f", + ), + args=dict( + argstr="%s", + ), + direction=dict( + argstr="-d %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip_slice=dict( + argstr="-f %s", + ), + input_image=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + mask_image=dict( + argstr="-x %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_image=dict( + argstr="-o %s", + usedefault=True, + ), + pad_or_crop=dict( + argstr="-p %s", + ), + permute_axes=dict( + argstr="-g", + ), + rgb_image=dict( + argstr="-r %s", + extensions=None, + mandatory=True, + ), + slices=dict( + argstr="-s %s", + ), + tile_geometry=dict( + argstr="-t %s", + ), ) inputs = CreateTiledMosaic.input_spec() @@ -27,7 +63,11 @@ def test_CreateTiledMosaic_inputs(): def test_CreateTiledMosaic_outputs(): - output_map = dict(output_image=dict(extensions=None)) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = CreateTiledMosaic.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py index f917fc2240..7af4764633 100644 --- a/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py +++ b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py @@ -4,10 +4,21 @@ def test_DenoiseImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="-d %d"), - environ=dict(nohash=True, usedefault=True), - input_image=dict(argstr="-i %s", extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-d %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), noise_image=dict( extensions=None, hash_files=False, @@ -15,8 +26,14 @@ def test_DenoiseImage_inputs(): name_source=["input_image"], name_template="%s_noise", ), - noise_model=dict(argstr="-n %s", usedefault=True), - num_threads=dict(nohash=True, usedefault=True), + noise_model=dict( + argstr="-n %s", + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), output_image=dict( argstr="-o %s", extensions=None, @@ -25,9 +42,18 @@ def test_DenoiseImage_inputs(): name_source=["input_image"], name_template="%s_noise_corrected", ), - save_noise=dict(mandatory=True, usedefault=True, xor=["noise_image"]), - shrink_factor=dict(argstr="-s %s", usedefault=True), - verbose=dict(argstr="-v"), + save_noise=dict( + mandatory=True, + usedefault=True, + xor=["noise_image"], + ), + shrink_factor=dict( + argstr="-s %s", + usedefault=True, + ), + verbose=dict( + argstr="-v", + ), ) inputs = DenoiseImage.input_spec() @@ -38,7 +64,12 @@ def test_DenoiseImage_inputs(): def test_DenoiseImage_outputs(): output_map = dict( - noise_image=dict(extensions=None), output_image=dict(extensions=None) + noise_image=dict( + extensions=None, + ), + output_image=dict( + extensions=None, + ), ) outputs = DenoiseImage.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py b/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py index d320e48ecb..6fde6f5b44 100644 --- a/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py +++ b/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py @@ -4,24 +4,61 @@ def test_GenWarpFields_inputs(): input_map = dict( - args=dict(argstr="%s"), - bias_field_correction=dict(argstr="-n 1"), - dimension=dict(argstr="-d %d", position=1, usedefault=True), - environ=dict(nohash=True, usedefault=True), - force_proceed=dict(argstr="-f 1"), + args=dict( + argstr="%s", + ), + bias_field_correction=dict( + argstr="-n 1", + ), + dimension=dict( + argstr="-d %d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + force_proceed=dict( + argstr="-f 1", + ), input_image=dict( - argstr="-i %s", copyfile=False, extensions=None, mandatory=True + argstr="-i %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + inverse_warp_template_labels=dict( + argstr="-l", + ), + max_iterations=dict( + argstr="-m %s", + sep="x", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_prefix=dict( + argstr="-o %s", + usedefault=True, + ), + quality_check=dict( + argstr="-q 1", ), - inverse_warp_template_labels=dict(argstr="-l"), - max_iterations=dict(argstr="-m %s", sep="x"), - num_threads=dict(nohash=True, usedefault=True), - out_prefix=dict(argstr="-o %s", usedefault=True), - quality_check=dict(argstr="-q 1"), reference_image=dict( - argstr="-r %s", copyfile=True, extensions=None, mandatory=True + argstr="-r %s", + copyfile=True, + extensions=None, + mandatory=True, + ), + similarity_metric=dict( + argstr="-s %s", + ), + transformation_model=dict( + argstr="-t %s", + usedefault=True, ), - similarity_metric=dict(argstr="-s %s"), - transformation_model=dict(argstr="-t %s", usedefault=True), ) inputs = GenWarpFields.input_spec() @@ -32,11 +69,21 @@ def test_GenWarpFields_inputs(): def test_GenWarpFields_outputs(): output_map = dict( - affine_transformation=dict(extensions=None), - input_file=dict(extensions=None), - inverse_warp_field=dict(extensions=None), - output_file=dict(extensions=None), - warp_field=dict(extensions=None), + affine_transformation=dict( + extensions=None, + ), + input_file=dict( + extensions=None, + ), + inverse_warp_field=dict( + extensions=None, + ), + output_file=dict( + extensions=None, + ), + warp_field=dict( + extensions=None, + ), ) outputs = GenWarpFields.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ImageMath.py b/nipype/interfaces/ants/tests/test_auto_ImageMath.py index b19e64b1d2..d71d4c476a 100644 --- a/nipype/interfaces/ants/tests/test_auto_ImageMath.py +++ b/nipype/interfaces/ants/tests/test_auto_ImageMath.py @@ -4,14 +4,40 @@ def test_ImageMath_inputs(): input_map = dict( - args=dict(argstr="%s"), - copy_header=dict(usedefault=True), - dimension=dict(argstr="%d", position=1, usedefault=True), - environ=dict(nohash=True, usedefault=True), - num_threads=dict(nohash=True, usedefault=True), - op1=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - op2=dict(argstr="%s", position=-1), - operation=dict(argstr="%s", mandatory=True, position=3), + args=dict( + argstr="%s", + ), + copy_header=dict( + usedefault=True, + ), + dimension=dict( + argstr="%d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + op1=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + op2=dict( + argstr="%s", + position=-1, + ), + operation=dict( + argstr="%s", + mandatory=True, + position=3, + ), output_image=dict( argstr="%s", extensions=None, @@ -29,7 +55,11 @@ def test_ImageMath_inputs(): def test_ImageMath_outputs(): - output_map = dict(output_image=dict(extensions=None)) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = ImageMath.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_JointFusion.py b/nipype/interfaces/ants/tests/test_auto_JointFusion.py index a3e6d61714..f234ceea7c 100644 --- a/nipype/interfaces/ants/tests/test_auto_JointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_JointFusion.py @@ -4,39 +4,95 @@ def test_JointFusion_inputs(): input_map = dict( - alpha=dict(argstr="-a %s", usedefault=True), - args=dict(argstr="%s"), - atlas_image=dict(argstr="-g %s...", mandatory=True), - atlas_segmentation_image=dict(argstr="-l %s...", mandatory=True), - beta=dict(argstr="-b %s", usedefault=True), - constrain_nonnegative=dict(argstr="-c", usedefault=True), - dimension=dict(argstr="-d %d"), - environ=dict(nohash=True, usedefault=True), + alpha=dict( + argstr="-a %s", + usedefault=True, + ), + args=dict( + argstr="%s", + ), + atlas_image=dict( + argstr="-g %s...", + mandatory=True, + ), + atlas_segmentation_image=dict( + argstr="-l %s...", + mandatory=True, + ), + beta=dict( + argstr="-b %s", + usedefault=True, + ), + constrain_nonnegative=dict( + argstr="-c", + usedefault=True, + ), + dimension=dict( + argstr="-d %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), exclusion_image=dict(), - exclusion_image_label=dict(argstr="-e %s", requires=["exclusion_image"]), - mask_image=dict(argstr="-x %s", extensions=None), - num_threads=dict(nohash=True, usedefault=True), + exclusion_image_label=dict( + argstr="-e %s", + requires=["exclusion_image"], + ), + mask_image=dict( + argstr="-x %s", + extensions=None, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_atlas_voting_weight_name_format=dict( requires=[ "out_label_fusion", "out_intensity_fusion_name_format", "out_label_post_prob_name_format", - ] + ], + ), + out_intensity_fusion_name_format=dict( + argstr="", + ), + out_label_fusion=dict( + argstr="%s", + extensions=None, + hash_files=False, ), - out_intensity_fusion_name_format=dict(argstr=""), - out_label_fusion=dict(argstr="%s", extensions=None, hash_files=False), out_label_post_prob_name_format=dict( - requires=["out_label_fusion", "out_intensity_fusion_name_format"] + requires=["out_label_fusion", "out_intensity_fusion_name_format"], + ), + patch_metric=dict( + argstr="-m %s", + ), + patch_radius=dict( + argstr="-p %s", + maxlen=3, + minlen=3, + ), + retain_atlas_voting_images=dict( + argstr="-f", + usedefault=True, ), - patch_metric=dict(argstr="-m %s"), - patch_radius=dict(argstr="-p %s", maxlen=3, minlen=3), - retain_atlas_voting_images=dict(argstr="-f", usedefault=True), retain_label_posterior_images=dict( - argstr="-r", requires=["atlas_segmentation_image"], usedefault=True + argstr="-r", + requires=["atlas_segmentation_image"], + usedefault=True, + ), + search_radius=dict( + argstr="-s %s", + usedefault=True, + ), + target_image=dict( + argstr="-t %s", + mandatory=True, + ), + verbose=dict( + argstr="-v", ), - search_radius=dict(argstr="-s %s", usedefault=True), - target_image=dict(argstr="-t %s", mandatory=True), - verbose=dict(argstr="-v"), ) inputs = JointFusion.input_spec() @@ -49,7 +105,9 @@ def test_JointFusion_outputs(): output_map = dict( out_atlas_voting_weight=dict(), out_intensity_fusion=dict(), - out_label_fusion=dict(extensions=None), + out_label_fusion=dict( + extensions=None, + ), out_label_post_prob=dict(), ) outputs = JointFusion.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py index d44c1f9a9c..00c1ec53a9 100644 --- a/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py +++ b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py @@ -4,8 +4,13 @@ def test_KellyKapowski_inputs(): input_map = dict( - args=dict(argstr="%s"), - convergence=dict(argstr='--convergence "%s"', usedefault=True), + args=dict( + argstr="%s", + ), + convergence=dict( + argstr='--convergence "%s"', + usedefault=True, + ), cortical_thickness=dict( argstr='--output "%s"', extensions=None, @@ -14,35 +19,61 @@ def test_KellyKapowski_inputs(): name_source=["segmentation_image"], name_template="%s_cortical_thickness", ), - dimension=dict(argstr="--image-dimensionality %d", usedefault=True), - environ=dict(nohash=True, usedefault=True), - gradient_step=dict(argstr="--gradient-step %f", usedefault=True), - gray_matter_label=dict(usedefault=True), + dimension=dict( + argstr="--image-dimensionality %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gradient_step=dict( + argstr="--gradient-step %f", + usedefault=True, + ), + gray_matter_label=dict( + usedefault=True, + ), gray_matter_prob_image=dict( - argstr='--gray-matter-probability-image "%s"', extensions=None + argstr='--gray-matter-probability-image "%s"', + extensions=None, ), max_invert_displacement_field_iters=dict( argstr="--maximum-number-of-invert-displacement-field-iterations %d", usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True), + num_threads=dict( + nohash=True, + usedefault=True, + ), number_integration_points=dict( - argstr="--number-of-integration-points %d", usedefault=True + argstr="--number-of-integration-points %d", + usedefault=True, ), segmentation_image=dict( - argstr='--segmentation-image "%s"', extensions=None, mandatory=True + argstr='--segmentation-image "%s"', + extensions=None, + mandatory=True, + ), + smoothing_variance=dict( + argstr="--smoothing-variance %f", + usedefault=True, ), - smoothing_variance=dict(argstr="--smoothing-variance %f", usedefault=True), smoothing_velocity_field=dict( - argstr="--smoothing-velocity-field-parameter %f", usedefault=True + argstr="--smoothing-velocity-field-parameter %f", + usedefault=True, ), thickness_prior_estimate=dict( - argstr="--thickness-prior-estimate %f", usedefault=True + argstr="--thickness-prior-estimate %f", + usedefault=True, ), thickness_prior_image=dict( - argstr='--thickness-prior-image "%s"', extensions=None + argstr='--thickness-prior-image "%s"', + extensions=None, + ), + use_bspline_smoothing=dict( + argstr="--use-bspline-smoothing 1", ), - use_bspline_smoothing=dict(argstr="--use-bspline-smoothing 1"), warped_white_matter=dict( extensions=None, hash_files=False, @@ -50,9 +81,12 @@ def test_KellyKapowski_inputs(): name_source=["segmentation_image"], name_template="%s_warped_white_matter", ), - white_matter_label=dict(usedefault=True), + white_matter_label=dict( + usedefault=True, + ), white_matter_prob_image=dict( - argstr='--white-matter-probability-image "%s"', extensions=None + argstr='--white-matter-probability-image "%s"', + extensions=None, ), ) inputs = KellyKapowski.input_spec() @@ -64,8 +98,12 @@ def test_KellyKapowski_inputs(): def test_KellyKapowski_outputs(): output_map = dict( - cortical_thickness=dict(extensions=None), - warped_white_matter=dict(extensions=None), + cortical_thickness=dict( + extensions=None, + ), + warped_white_matter=dict( + extensions=None, + ), ) outputs = KellyKapowski.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py b/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py index 5159775684..306c7aa17e 100644 --- a/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py +++ b/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py @@ -4,16 +4,40 @@ def test_LabelGeometry_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="%d", position=0, usedefault=True), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), intensity_image=dict( - argstr="%s", extensions=None, mandatory=True, position=2, usedefault=True + argstr="%s", + extensions=None, + mandatory=True, + position=2, + usedefault=True, + ), + label_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - label_image=dict(argstr="%s", extensions=None, mandatory=True, position=1), - num_threads=dict(nohash=True, usedefault=True), output_file=dict( - argstr="%s", name_source=["label_image"], name_template="%s.csv", position=3 + argstr="%s", + name_source=["label_image"], + name_template="%s.csv", + position=3, ), ) inputs = LabelGeometry.input_spec() @@ -24,7 +48,11 @@ def test_LabelGeometry_inputs(): def test_LabelGeometry_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = LabelGeometry.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py index 9dd1c79fc5..f6a8ffde25 100644 --- a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py @@ -4,16 +4,36 @@ def test_LaplacianThickness_inputs(): input_map = dict( - args=dict(argstr="%s"), - dT=dict(argstr="%s", position=6, requires=["prior_thickness"]), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + dT=dict( + argstr="%s", + position=6, + requires=["prior_thickness"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), input_gm=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=2 + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=2, ), input_wm=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=1 + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=1, + ), + num_threads=dict( + nohash=True, + usedefault=True, ), - num_threads=dict(nohash=True, usedefault=True), output_image=dict( argstr="%s", hash_files=False, @@ -22,10 +42,25 @@ def test_LaplacianThickness_inputs(): name_template="%s_thickness", position=3, ), - prior_thickness=dict(argstr="%s", position=5, requires=["smooth_param"]), - smooth_param=dict(argstr="%s", position=4), - sulcus_prior=dict(argstr="%s", position=7, requires=["dT"]), - tolerance=dict(argstr="%s", position=8, requires=["sulcus_prior"]), + prior_thickness=dict( + argstr="%s", + position=5, + requires=["smooth_param"], + ), + smooth_param=dict( + argstr="%s", + position=4, + ), + sulcus_prior=dict( + argstr="%s", + position=7, + requires=["dT"], + ), + tolerance=dict( + argstr="%s", + position=8, + requires=["sulcus_prior"], + ), ) inputs = LaplacianThickness.input_spec() @@ -35,7 +70,11 @@ def test_LaplacianThickness_inputs(): def test_LaplacianThickness_outputs(): - output_map = dict(output_image=dict(extensions=None)) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = LaplacianThickness.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py index cf76c01234..05279d8017 100644 --- a/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py +++ b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py @@ -4,19 +4,57 @@ def test_MeasureImageSimilarity_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="--dimensionality %d", position=1), - environ=dict(nohash=True, usedefault=True), - fixed_image=dict(extensions=None, mandatory=True), - fixed_image_mask=dict(argstr="%s", extensions=None), - metric=dict(argstr="%s", mandatory=True), - metric_weight=dict(requires=["metric"], usedefault=True), - moving_image=dict(extensions=None, mandatory=True), - moving_image_mask=dict(extensions=None, requires=["fixed_image_mask"]), - num_threads=dict(nohash=True, usedefault=True), - radius_or_number_of_bins=dict(mandatory=True, requires=["metric"]), - sampling_percentage=dict(mandatory=True, requires=["metric"]), - sampling_strategy=dict(requires=["metric"], usedefault=True), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="--dimensionality %d", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + extensions=None, + mandatory=True, + ), + fixed_image_mask=dict( + argstr="%s", + extensions=None, + ), + metric=dict( + argstr="%s", + mandatory=True, + ), + metric_weight=dict( + requires=["metric"], + usedefault=True, + ), + moving_image=dict( + extensions=None, + mandatory=True, + ), + moving_image_mask=dict( + extensions=None, + requires=["fixed_image_mask"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + radius_or_number_of_bins=dict( + mandatory=True, + requires=["metric"], + ), + sampling_percentage=dict( + mandatory=True, + requires=["metric"], + ), + sampling_strategy=dict( + requires=["metric"], + usedefault=True, + ), ) inputs = MeasureImageSimilarity.input_spec() @@ -26,7 +64,9 @@ def test_MeasureImageSimilarity_inputs(): def test_MeasureImageSimilarity_outputs(): - output_map = dict(similarity=dict()) + output_map = dict( + similarity=dict(), + ) outputs = MeasureImageSimilarity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py index ad74271e6f..89f8fa60ae 100644 --- a/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py +++ b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py @@ -4,15 +4,39 @@ def test_MultiplyImages_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="%d", mandatory=True, position=0), - environ=dict(nohash=True, usedefault=True), - first_input=dict(argstr="%s", extensions=None, mandatory=True, position=1), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + mandatory=True, + position=0, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + first_input=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), output_product_image=dict( - argstr="%s", extensions=None, mandatory=True, position=3 + argstr="%s", + extensions=None, + mandatory=True, + position=3, + ), + second_input=dict( + argstr="%s", + mandatory=True, + position=2, ), - second_input=dict(argstr="%s", mandatory=True, position=2), ) inputs = MultiplyImages.input_spec() @@ -22,7 +46,11 @@ def test_MultiplyImages_inputs(): def test_MultiplyImages_outputs(): - output_map = dict(output_product_image=dict(extensions=None)) + output_map = dict( + output_product_image=dict( + extensions=None, + ), + ) outputs = MultiplyImages.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py index 912a71c137..59775df2ea 100644 --- a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py +++ b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py @@ -4,19 +4,53 @@ def test_N4BiasFieldCorrection_inputs(): input_map = dict( - args=dict(argstr="%s"), - bias_image=dict(extensions=None, hash_files=False), - bspline_fitting_distance=dict(argstr="--bspline-fitting %s"), - bspline_order=dict(requires=["bspline_fitting_distance"]), - convergence_threshold=dict(requires=["n_iterations"]), - copy_header=dict(mandatory=True, usedefault=True), - dimension=dict(argstr="-d %d", usedefault=True), - environ=dict(nohash=True, usedefault=True), - histogram_sharpening=dict(argstr="--histogram-sharpening [%g,%g,%d]"), - input_image=dict(argstr="--input-image %s", extensions=None, mandatory=True), - mask_image=dict(argstr="--mask-image %s", extensions=None), - n_iterations=dict(argstr="--convergence %s"), - num_threads=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + bias_image=dict( + extensions=None, + hash_files=False, + ), + bspline_fitting_distance=dict( + argstr="--bspline-fitting %s", + ), + bspline_order=dict( + requires=["bspline_fitting_distance"], + ), + convergence_threshold=dict( + requires=["n_iterations"], + ), + copy_header=dict( + mandatory=True, + usedefault=True, + ), + dimension=dict( + argstr="-d %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + histogram_sharpening=dict( + argstr="--histogram-sharpening [%g,%g,%d]", + ), + input_image=dict( + argstr="--input-image %s", + extensions=None, + mandatory=True, + ), + mask_image=dict( + argstr="--mask-image %s", + extensions=None, + ), + n_iterations=dict( + argstr="--convergence %s", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), output_image=dict( argstr="--output %s", hash_files=False, @@ -24,10 +58,23 @@ def test_N4BiasFieldCorrection_inputs(): name_source=["input_image"], name_template="%s_corrected", ), - rescale_intensities=dict(argstr="-r", min_ver="2.1.0", usedefault=True), - save_bias=dict(mandatory=True, usedefault=True, xor=["bias_image"]), - shrink_factor=dict(argstr="--shrink-factor %d"), - weight_image=dict(argstr="--weight-image %s", extensions=None), + rescale_intensities=dict( + argstr="-r", + min_ver="2.1.0", + usedefault=True, + ), + save_bias=dict( + mandatory=True, + usedefault=True, + xor=["bias_image"], + ), + shrink_factor=dict( + argstr="--shrink-factor %d", + ), + weight_image=dict( + argstr="--weight-image %s", + extensions=None, + ), ) inputs = N4BiasFieldCorrection.input_spec() @@ -38,7 +85,12 @@ def test_N4BiasFieldCorrection_inputs(): def test_N4BiasFieldCorrection_outputs(): output_map = dict( - bias_image=dict(extensions=None), output_image=dict(extensions=None) + bias_image=dict( + extensions=None, + ), + output_image=dict( + extensions=None, + ), ) outputs = N4BiasFieldCorrection.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_Registration.py b/nipype/interfaces/ants/tests/test_auto_Registration.py index a2d875c745..507e0effe2 100644 --- a/nipype/interfaces/ants/tests/test_auto_Registration.py +++ b/nipype/interfaces/ants/tests/test_auto_Registration.py @@ -4,81 +4,170 @@ def test_Registration_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), collapse_output_transforms=dict( - argstr="--collapse-output-transforms %d", usedefault=True + argstr="--collapse-output-transforms %d", + usedefault=True, + ), + convergence_threshold=dict( + requires=["number_of_iterations"], + usedefault=True, ), - convergence_threshold=dict(requires=["number_of_iterations"], usedefault=True), convergence_window_size=dict( - requires=["convergence_threshold"], usedefault=True + requires=["convergence_threshold"], + usedefault=True, + ), + dimension=dict( + argstr="--dimensionality %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + mandatory=True, ), - dimension=dict(argstr="--dimensionality %d", usedefault=True), - environ=dict(nohash=True, usedefault=True), - fixed_image=dict(mandatory=True), fixed_image_mask=dict( - argstr="%s", extensions=None, max_ver="2.1.0", xor=["fixed_image_masks"] + argstr="%s", + extensions=None, + max_ver="2.1.0", + xor=["fixed_image_masks"], + ), + fixed_image_masks=dict( + min_ver="2.2.0", + xor=["fixed_image_mask"], + ), + float=dict( + argstr="--float %d", ), - fixed_image_masks=dict(min_ver="2.2.0", xor=["fixed_image_mask"]), - float=dict(argstr="--float %d"), initial_moving_transform=dict( - argstr="%s", xor=["initial_moving_transform_com"] + argstr="%s", + xor=["initial_moving_transform_com"], ), initial_moving_transform_com=dict( - argstr="%s", xor=["initial_moving_transform"] + argstr="%s", + xor=["initial_moving_transform"], ), initialize_transforms_per_stage=dict( - argstr="--initialize-transforms-per-stage %d", usedefault=True + argstr="--initialize-transforms-per-stage %d", + usedefault=True, + ), + interpolation=dict( + argstr="%s", + usedefault=True, ), - interpolation=dict(argstr="%s", usedefault=True), interpolation_parameters=dict(), invert_initial_moving_transform=dict( - requires=["initial_moving_transform"], xor=["initial_moving_transform_com"] + requires=["initial_moving_transform"], + xor=["initial_moving_transform_com"], + ), + metric=dict( + mandatory=True, ), - metric=dict(mandatory=True), metric_item_trait=dict(), metric_stage_trait=dict(), - metric_weight=dict(mandatory=True, requires=["metric"], usedefault=True), - metric_weight_item_trait=dict(usedefault=True), + metric_weight=dict( + mandatory=True, + requires=["metric"], + usedefault=True, + ), + metric_weight_item_trait=dict( + usedefault=True, + ), metric_weight_stage_trait=dict(), - moving_image=dict(mandatory=True), + moving_image=dict( + mandatory=True, + ), moving_image_mask=dict( extensions=None, max_ver="2.1.0", requires=["fixed_image_mask"], xor=["moving_image_masks"], ), - moving_image_masks=dict(min_ver="2.2.0", xor=["moving_image_mask"]), - num_threads=dict(nohash=True, usedefault=True), + moving_image_masks=dict( + min_ver="2.2.0", + xor=["moving_image_mask"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), number_of_iterations=dict(), output_inverse_warped_image=dict( - hash_files=False, requires=["output_warped_image"] + hash_files=False, + requires=["output_warped_image"], + ), + output_transform_prefix=dict( + argstr="%s", + usedefault=True, + ), + output_warped_image=dict( + hash_files=False, + ), + radius_bins_item_trait=dict( + usedefault=True, ), - output_transform_prefix=dict(argstr="%s", usedefault=True), - output_warped_image=dict(hash_files=False), - radius_bins_item_trait=dict(usedefault=True), radius_bins_stage_trait=dict(), - radius_or_number_of_bins=dict(requires=["metric_weight"], usedefault=True), - restore_state=dict(argstr="--restore-state %s", extensions=None), + radius_or_number_of_bins=dict( + requires=["metric_weight"], + usedefault=True, + ), + restore_state=dict( + argstr="--restore-state %s", + extensions=None, + ), restrict_deformation=dict(), - sampling_percentage=dict(requires=["sampling_strategy"]), + sampling_percentage=dict( + requires=["sampling_strategy"], + ), sampling_percentage_item_trait=dict(), sampling_percentage_stage_trait=dict(), - sampling_strategy=dict(requires=["metric_weight"]), + sampling_strategy=dict( + requires=["metric_weight"], + ), sampling_strategy_item_trait=dict(), sampling_strategy_stage_trait=dict(), - save_state=dict(argstr="--save-state %s", extensions=None), - shrink_factors=dict(mandatory=True), - sigma_units=dict(requires=["smoothing_sigmas"]), - smoothing_sigmas=dict(mandatory=True), + save_state=dict( + argstr="--save-state %s", + extensions=None, + ), + shrink_factors=dict( + mandatory=True, + ), + sigma_units=dict( + requires=["smoothing_sigmas"], + ), + smoothing_sigmas=dict( + mandatory=True, + ), transform_parameters=dict(), - transforms=dict(argstr="%s", mandatory=True), + transforms=dict( + argstr="%s", + mandatory=True, + ), use_estimate_learning_rate_once=dict(), - use_histogram_matching=dict(usedefault=True), - verbose=dict(argstr="-v", nohash=True, usedefault=True), - winsorize_lower_quantile=dict(argstr="%s", usedefault=True), - winsorize_upper_quantile=dict(argstr="%s", usedefault=True), + use_histogram_matching=dict( + usedefault=True, + ), + verbose=dict( + argstr="-v", + nohash=True, + usedefault=True, + ), + winsorize_lower_quantile=dict( + argstr="%s", + usedefault=True, + ), + winsorize_upper_quantile=dict( + argstr="%s", + usedefault=True, + ), write_composite_transform=dict( - argstr="--write-composite-transform %d", usedefault=True + argstr="--write-composite-transform %d", + usedefault=True, ), ) inputs = Registration.input_spec() @@ -90,19 +179,29 @@ def test_Registration_inputs(): def test_Registration_outputs(): output_map = dict( - composite_transform=dict(extensions=None), + composite_transform=dict( + extensions=None, + ), elapsed_time=dict(), forward_invert_flags=dict(), forward_transforms=dict(), - inverse_composite_transform=dict(extensions=None), - inverse_warped_image=dict(extensions=None), + inverse_composite_transform=dict( + extensions=None, + ), + inverse_warped_image=dict( + extensions=None, + ), metric_value=dict(), reverse_forward_invert_flags=dict(), reverse_forward_transforms=dict(), reverse_invert_flags=dict(), reverse_transforms=dict(), - save_state=dict(extensions=None), - warped_image=dict(extensions=None), + save_state=dict( + extensions=None, + ), + warped_image=dict( + extensions=None, + ), ) outputs = Registration.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py b/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py index 8c73bc62e6..c09f08d17a 100644 --- a/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py +++ b/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py @@ -4,18 +4,52 @@ def test_RegistrationSynQuick_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="-d %d", usedefault=True), - environ=dict(nohash=True, usedefault=True), - fixed_image=dict(argstr="-f %s...", mandatory=True), - histogram_bins=dict(argstr="-r %d", usedefault=True), - moving_image=dict(argstr="-m %s...", mandatory=True), - num_threads=dict(argstr="-n %d", usedefault=True), - output_prefix=dict(argstr="-o %s", usedefault=True), - precision_type=dict(argstr="-p %s", usedefault=True), - spline_distance=dict(argstr="-s %d", usedefault=True), - transform_type=dict(argstr="-t %s", usedefault=True), - use_histogram_matching=dict(argstr="-j %d"), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-d %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + argstr="-f %s...", + mandatory=True, + ), + histogram_bins=dict( + argstr="-r %d", + usedefault=True, + ), + moving_image=dict( + argstr="-m %s...", + mandatory=True, + ), + num_threads=dict( + argstr="-n %d", + usedefault=True, + ), + output_prefix=dict( + argstr="-o %s", + usedefault=True, + ), + precision_type=dict( + argstr="-p %s", + usedefault=True, + ), + spline_distance=dict( + argstr="-s %d", + usedefault=True, + ), + transform_type=dict( + argstr="-t %s", + usedefault=True, + ), + use_histogram_matching=dict( + argstr="-j %d", + ), ) inputs = RegistrationSynQuick.input_spec() @@ -26,11 +60,21 @@ def test_RegistrationSynQuick_inputs(): def test_RegistrationSynQuick_outputs(): output_map = dict( - forward_warp_field=dict(extensions=None), - inverse_warp_field=dict(extensions=None), - inverse_warped_image=dict(extensions=None), - out_matrix=dict(extensions=None), - warped_image=dict(extensions=None), + forward_warp_field=dict( + extensions=None, + ), + inverse_warp_field=dict( + extensions=None, + ), + inverse_warped_image=dict( + extensions=None, + ), + out_matrix=dict( + extensions=None, + ), + warped_image=dict( + extensions=None, + ), ) outputs = RegistrationSynQuick.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py b/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py index 25249aee91..9bf5c6c6a6 100644 --- a/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py +++ b/nipype/interfaces/ants/tests/test_auto_ResampleImageBySpacing.py @@ -4,15 +4,47 @@ def test_ResampleImageBySpacing_inputs(): input_map = dict( - addvox=dict(argstr="%d", position=6, requires=["apply_smoothing"]), - apply_smoothing=dict(argstr="%d", position=5), - args=dict(argstr="%s"), - dimension=dict(argstr="%d", position=1, usedefault=True), - environ=dict(nohash=True, usedefault=True), - input_image=dict(argstr="%s", extensions=None, mandatory=True, position=2), - nn_interp=dict(argstr="%d", position=-1, requires=["addvox"]), - num_threads=dict(nohash=True, usedefault=True), - out_spacing=dict(argstr="%s", mandatory=True, position=4), + addvox=dict( + argstr="%d", + position=6, + requires=["apply_smoothing"], + ), + apply_smoothing=dict( + argstr="%d", + position=5, + ), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + nn_interp=dict( + argstr="%d", + position=-1, + requires=["addvox"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_spacing=dict( + argstr="%s", + mandatory=True, + position=4, + ), output_image=dict( argstr="%s", extensions=None, @@ -30,7 +62,11 @@ def test_ResampleImageBySpacing_inputs(): def test_ResampleImageBySpacing_outputs(): - output_map = dict(output_image=dict(extensions=None)) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = ResampleImageBySpacing.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_ThresholdImage.py b/nipype/interfaces/ants/tests/test_auto_ThresholdImage.py index 85788d4181..c17f340b69 100644 --- a/nipype/interfaces/ants/tests/test_auto_ThresholdImage.py +++ b/nipype/interfaces/ants/tests/test_auto_ThresholdImage.py @@ -4,21 +4,52 @@ def test_ThresholdImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - copy_header=dict(mandatory=True, usedefault=True), - dimension=dict(argstr="%d", position=1, usedefault=True), - environ=dict(nohash=True, usedefault=True), - input_image=dict(argstr="%s", extensions=None, mandatory=True, position=2), - input_mask=dict(argstr="%s", extensions=None, requires=["num_thresholds"]), - inside_value=dict(argstr="%f", position=6, requires=["th_low"]), + args=dict( + argstr="%s", + ), + copy_header=dict( + mandatory=True, + usedefault=True, + ), + dimension=dict( + argstr="%d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + input_mask=dict( + argstr="%s", + extensions=None, + requires=["num_thresholds"], + ), + inside_value=dict( + argstr="%f", + position=6, + requires=["th_low"], + ), mode=dict( argstr="%s", position=4, requires=["num_thresholds"], xor=["th_low", "th_high"], ), - num_threads=dict(nohash=True, usedefault=True), - num_thresholds=dict(argstr="%d", position=5), + num_threads=dict( + nohash=True, + usedefault=True, + ), + num_thresholds=dict( + argstr="%d", + position=5, + ), output_image=dict( argstr="%s", extensions=None, @@ -27,9 +58,21 @@ def test_ThresholdImage_inputs(): name_template="%s_resampled", position=3, ), - outside_value=dict(argstr="%f", position=7, requires=["th_low"]), - th_high=dict(argstr="%f", position=5, xor=["mode"]), - th_low=dict(argstr="%f", position=4, xor=["mode"]), + outside_value=dict( + argstr="%f", + position=7, + requires=["th_low"], + ), + th_high=dict( + argstr="%f", + position=5, + xor=["mode"], + ), + th_low=dict( + argstr="%f", + position=4, + xor=["mode"], + ), ) inputs = ThresholdImage.input_spec() @@ -39,7 +82,11 @@ def test_ThresholdImage_inputs(): def test_ThresholdImage_outputs(): - output_map = dict(output_image=dict(extensions=None)) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = ThresholdImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py index c4e59f3779..66f45d0cd7 100644 --- a/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py @@ -4,14 +4,34 @@ def test_WarpImageMultiTransform_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="%d", position=1, usedefault=True), - environ=dict(nohash=True, usedefault=True), - input_image=dict(argstr="%s", extensions=None, mandatory=True, position=2), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), invert_affine=dict(), - num_threads=dict(nohash=True, usedefault=True), + num_threads=dict( + nohash=True, + usedefault=True, + ), out_postfix=dict( - extensions=None, hash_files=False, usedefault=True, xor=["output_image"] + extensions=None, + hash_files=False, + usedefault=True, + xor=["output_image"], ), output_image=dict( argstr="%s", @@ -21,12 +41,29 @@ def test_WarpImageMultiTransform_inputs(): position=3, xor=["out_postfix"], ), - reference_image=dict(argstr="-R %s", extensions=None, xor=["tightest_box"]), - reslice_by_header=dict(argstr="--reslice-by-header"), - tightest_box=dict(argstr="--tightest-bounding-box", xor=["reference_image"]), - transformation_series=dict(argstr="%s", mandatory=True, position=-1), - use_bspline=dict(argstr="--use-BSpline"), - use_nearest=dict(argstr="--use-NN"), + reference_image=dict( + argstr="-R %s", + extensions=None, + xor=["tightest_box"], + ), + reslice_by_header=dict( + argstr="--reslice-by-header", + ), + tightest_box=dict( + argstr="--tightest-bounding-box", + xor=["reference_image"], + ), + transformation_series=dict( + argstr="%s", + mandatory=True, + position=-1, + ), + use_bspline=dict( + argstr="--use-BSpline", + ), + use_nearest=dict( + argstr="--use-NN", + ), ) inputs = WarpImageMultiTransform.input_spec() @@ -36,7 +73,11 @@ def test_WarpImageMultiTransform_inputs(): def test_WarpImageMultiTransform_outputs(): - output_map = dict(output_image=dict(extensions=None)) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = WarpImageMultiTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py index cd68fe57ae..63d8d8365e 100644 --- a/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py @@ -4,19 +4,56 @@ def test_WarpTimeSeriesImageMultiTransform_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="%d", position=1, usedefault=True), - environ=dict(nohash=True, usedefault=True), - input_image=dict(argstr="%s", copyfile=True, extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="%d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + ), invert_affine=dict(), - num_threads=dict(nohash=True, usedefault=True), - out_postfix=dict(argstr="%s", usedefault=True), - reference_image=dict(argstr="-R %s", extensions=None, xor=["tightest_box"]), - reslice_by_header=dict(argstr="--reslice-by-header"), - tightest_box=dict(argstr="--tightest-bounding-box", xor=["reference_image"]), - transformation_series=dict(argstr="%s", copyfile=False, mandatory=True), - use_bspline=dict(argstr="--use-Bspline"), - use_nearest=dict(argstr="--use-NN"), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_postfix=dict( + argstr="%s", + usedefault=True, + ), + reference_image=dict( + argstr="-R %s", + extensions=None, + xor=["tightest_box"], + ), + reslice_by_header=dict( + argstr="--reslice-by-header", + ), + tightest_box=dict( + argstr="--tightest-bounding-box", + xor=["reference_image"], + ), + transformation_series=dict( + argstr="%s", + copyfile=False, + mandatory=True, + ), + use_bspline=dict( + argstr="--use-Bspline", + ), + use_nearest=dict( + argstr="--use-NN", + ), ) inputs = WarpTimeSeriesImageMultiTransform.input_spec() @@ -26,7 +63,11 @@ def test_WarpTimeSeriesImageMultiTransform_inputs(): def test_WarpTimeSeriesImageMultiTransform_outputs(): - output_map = dict(output_image=dict(extensions=None)) + output_map = dict( + output_image=dict( + extensions=None, + ), + ) outputs = WarpTimeSeriesImageMultiTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py b/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py index befec00f52..0318db7624 100644 --- a/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py +++ b/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py @@ -4,24 +4,61 @@ def test_antsIntroduction_inputs(): input_map = dict( - args=dict(argstr="%s"), - bias_field_correction=dict(argstr="-n 1"), - dimension=dict(argstr="-d %d", position=1, usedefault=True), - environ=dict(nohash=True, usedefault=True), - force_proceed=dict(argstr="-f 1"), + args=dict( + argstr="%s", + ), + bias_field_correction=dict( + argstr="-n 1", + ), + dimension=dict( + argstr="-d %d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + force_proceed=dict( + argstr="-f 1", + ), input_image=dict( - argstr="-i %s", copyfile=False, extensions=None, mandatory=True + argstr="-i %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + inverse_warp_template_labels=dict( + argstr="-l", + ), + max_iterations=dict( + argstr="-m %s", + sep="x", + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_prefix=dict( + argstr="-o %s", + usedefault=True, + ), + quality_check=dict( + argstr="-q 1", ), - inverse_warp_template_labels=dict(argstr="-l"), - max_iterations=dict(argstr="-m %s", sep="x"), - num_threads=dict(nohash=True, usedefault=True), - out_prefix=dict(argstr="-o %s", usedefault=True), - quality_check=dict(argstr="-q 1"), reference_image=dict( - argstr="-r %s", copyfile=True, extensions=None, mandatory=True + argstr="-r %s", + copyfile=True, + extensions=None, + mandatory=True, + ), + similarity_metric=dict( + argstr="-s %s", + ), + transformation_model=dict( + argstr="-t %s", + usedefault=True, ), - similarity_metric=dict(argstr="-s %s"), - transformation_model=dict(argstr="-t %s", usedefault=True), ) inputs = antsIntroduction.input_spec() @@ -32,11 +69,21 @@ def test_antsIntroduction_inputs(): def test_antsIntroduction_outputs(): output_map = dict( - affine_transformation=dict(extensions=None), - input_file=dict(extensions=None), - inverse_warp_field=dict(extensions=None), - output_file=dict(extensions=None), - warp_field=dict(extensions=None), + affine_transformation=dict( + extensions=None, + ), + input_file=dict( + extensions=None, + ), + inverse_warp_field=dict( + extensions=None, + ), + output_file=dict( + extensions=None, + ), + warp_field=dict( + extensions=None, + ), ) outputs = antsIntroduction.output_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py b/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py index 4732bab229..2713c6af54 100644 --- a/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py +++ b/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py @@ -4,21 +4,63 @@ def test_buildtemplateparallel_inputs(): input_map = dict( - args=dict(argstr="%s"), - bias_field_correction=dict(argstr="-n 1"), - dimension=dict(argstr="-d %d", position=1, usedefault=True), - environ=dict(nohash=True, usedefault=True), - gradient_step_size=dict(argstr="-g %f"), - in_files=dict(argstr="%s", mandatory=True, position=-1), - iteration_limit=dict(argstr="-i %d", usedefault=True), - max_iterations=dict(argstr="-m %s", sep="x"), - num_cores=dict(argstr="-j %d", requires=["parallelization"]), - num_threads=dict(nohash=True, usedefault=True), - out_prefix=dict(argstr="-o %s", usedefault=True), - parallelization=dict(argstr="-c %d", usedefault=True), - rigid_body_registration=dict(argstr="-r 1"), - similarity_metric=dict(argstr="-s %s"), - transformation_model=dict(argstr="-t %s", usedefault=True), + args=dict( + argstr="%s", + ), + bias_field_correction=dict( + argstr="-n 1", + ), + dimension=dict( + argstr="-d %d", + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gradient_step_size=dict( + argstr="-g %f", + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=-1, + ), + iteration_limit=dict( + argstr="-i %d", + usedefault=True, + ), + max_iterations=dict( + argstr="-m %s", + sep="x", + ), + num_cores=dict( + argstr="-j %d", + requires=["parallelization"], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_prefix=dict( + argstr="-o %s", + usedefault=True, + ), + parallelization=dict( + argstr="-c %d", + usedefault=True, + ), + rigid_body_registration=dict( + argstr="-r 1", + ), + similarity_metric=dict( + argstr="-s %s", + ), + transformation_model=dict( + argstr="-t %s", + usedefault=True, + ), use_first_as_target=dict(), ) inputs = buildtemplateparallel.input_spec() @@ -30,7 +72,9 @@ def test_buildtemplateparallel_inputs(): def test_buildtemplateparallel_outputs(): output_map = dict( - final_template_file=dict(extensions=None), + final_template_file=dict( + extensions=None, + ), subject_outfiles=dict(), template_files=dict(), ) diff --git a/nipype/interfaces/base/tests/test_auto_CommandLine.py b/nipype/interfaces/base/tests/test_auto_CommandLine.py index 9a114f0d3c..b03e4adfca 100644 --- a/nipype/interfaces/base/tests/test_auto_CommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_CommandLine.py @@ -3,7 +3,15 @@ def test_CommandLine_inputs(): - input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) + input_map = dict( + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) inputs = CommandLine.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py b/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py index c8ec0bf3be..908943c754 100644 --- a/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py @@ -4,10 +4,17 @@ def test_MpiCommandLine_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), n_procs=dict(), - use_mpi=dict(usedefault=True), + use_mpi=dict( + usedefault=True, + ), ) inputs = MpiCommandLine.input_spec() diff --git a/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py b/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py index 51161ea121..1197b2479c 100644 --- a/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py @@ -3,7 +3,15 @@ def test_SEMLikeCommandLine_inputs(): - input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) + input_map = dict( + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) inputs = SEMLikeCommandLine.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py index 9a632f9b42..39b80d487b 100644 --- a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py @@ -4,9 +4,19 @@ def test_StdOutCommandLine_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), ) inputs = StdOutCommandLine.input_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_BDP.py b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py index 495ff8fb0b..0da29c372d 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_BDP.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py @@ -10,7 +10,9 @@ def test_BDP_inputs(): position=-1, xor=["bMatrixFile"], ), - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), bMatrixFile=dict( argstr="--bmat %s", extensions=None, @@ -18,7 +20,9 @@ def test_BDP_inputs(): position=-1, xor=["BVecBValPair"], ), - bValRatioThreshold=dict(argstr="--bval-ratio-threshold %f"), + bValRatioThreshold=dict( + argstr="--bval-ratio-threshold %f", + ), bfcFile=dict( argstr="%s", extensions=None, @@ -27,66 +31,152 @@ def test_BDP_inputs(): xor=["noStructuralRegistration"], ), customDiffusionLabel=dict( - argstr="--custom-diffusion-label %s", extensions=None - ), - customLabelXML=dict(argstr="--custom-label-xml %s", extensions=None), - customT1Label=dict(argstr="--custom-t1-label %s", extensions=None), - dataSinkDelay=dict(argstr="%s"), - dcorrRegMeasure=dict(argstr="--dcorr-reg-method %s"), - dcorrWeight=dict(argstr="--dcorr-regularization-wt %f"), - dwiMask=dict(argstr="--dwi-mask %s", extensions=None), - echoSpacing=dict(argstr="--echo-spacing=%f"), - environ=dict(nohash=True, usedefault=True), - estimateODF_3DShore=dict(argstr="--3dshore --diffusion_time_ms %f"), - estimateODF_FRACT=dict(argstr="--FRACT"), - estimateODF_FRT=dict(argstr="--FRT"), - estimateTensors=dict(argstr="--tensors"), + argstr="--custom-diffusion-label %s", + extensions=None, + ), + customLabelXML=dict( + argstr="--custom-label-xml %s", + extensions=None, + ), + customT1Label=dict( + argstr="--custom-t1-label %s", + extensions=None, + ), + dataSinkDelay=dict( + argstr="%s", + ), + dcorrRegMeasure=dict( + argstr="--dcorr-reg-method %s", + ), + dcorrWeight=dict( + argstr="--dcorr-regularization-wt %f", + ), + dwiMask=dict( + argstr="--dwi-mask %s", + extensions=None, + ), + echoSpacing=dict( + argstr="--echo-spacing=%f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + estimateODF_3DShore=dict( + argstr="--3dshore --diffusion_time_ms %f", + ), + estimateODF_FRACT=dict( + argstr="--FRACT", + ), + estimateODF_FRT=dict( + argstr="--FRT", + ), + estimateTensors=dict( + argstr="--tensors", + ), fieldmapCorrection=dict( - argstr="--fieldmap-correction %s", extensions=None, requires=["echoSpacing"] + argstr="--fieldmap-correction %s", + extensions=None, + requires=["echoSpacing"], ), fieldmapCorrectionMethod=dict( - argstr="--fieldmap-correction-method %s", xor=["skipIntensityCorr"] - ), - fieldmapSmooth=dict(argstr="--fieldmap-smooth3=%f"), - flagConfigFile=dict(argstr="--flag-conf-file %s", extensions=None), - forcePartialROIStats=dict(argstr="--force-partial-roi-stats"), - generateStats=dict(argstr="--generate-stats"), - ignoreFieldmapFOV=dict(argstr="--ignore-fieldmap-fov"), - ignoreMemory=dict(argstr="--ignore-memory"), + argstr="--fieldmap-correction-method %s", + xor=["skipIntensityCorr"], + ), + fieldmapSmooth=dict( + argstr="--fieldmap-smooth3=%f", + ), + flagConfigFile=dict( + argstr="--flag-conf-file %s", + extensions=None, + ), + forcePartialROIStats=dict( + argstr="--force-partial-roi-stats", + ), + generateStats=dict( + argstr="--generate-stats", + ), + ignoreFieldmapFOV=dict( + argstr="--ignore-fieldmap-fov", + ), + ignoreMemory=dict( + argstr="--ignore-memory", + ), inputDiffusionData=dict( - argstr="--nii %s", extensions=None, mandatory=True, position=-2 + argstr="--nii %s", + extensions=None, + mandatory=True, + position=-2, + ), + lowMemory=dict( + argstr="--low-memory", ), - lowMemory=dict(argstr="--low-memory"), noStructuralRegistration=dict( argstr="--no-structural-registration", mandatory=True, position=0, xor=["bfcFile"], ), - odfLambta=dict(argstr="--odf-lambda "), - onlyStats=dict(argstr="--generate-only-stats"), - outPrefix=dict(argstr="--output-fileprefix %s"), - outputDiffusionCoordinates=dict(argstr="--output-diffusion-coordinate"), - outputSubdir=dict(argstr="--output-subdir %s"), - phaseEncodingDirection=dict(argstr="--dir=%s"), - rigidRegMeasure=dict(argstr="--rigid-reg-measure %s"), - skipDistortionCorr=dict(argstr="--no-distortion-correction"), + odfLambta=dict( + argstr="--odf-lambda ", + ), + onlyStats=dict( + argstr="--generate-only-stats", + ), + outPrefix=dict( + argstr="--output-fileprefix %s", + ), + outputDiffusionCoordinates=dict( + argstr="--output-diffusion-coordinate", + ), + outputSubdir=dict( + argstr="--output-subdir %s", + ), + phaseEncodingDirection=dict( + argstr="--dir=%s", + ), + rigidRegMeasure=dict( + argstr="--rigid-reg-measure %s", + ), + skipDistortionCorr=dict( + argstr="--no-distortion-correction", + ), skipIntensityCorr=dict( - argstr="--no-intensity-correction", xor=["fieldmapCorrectionMethod"] + argstr="--no-intensity-correction", + xor=["fieldmapCorrectionMethod"], + ), + skipNonuniformityCorr=dict( + argstr="--no-nonuniformity-correction", + ), + t1Mask=dict( + argstr="--t1-mask %s", + extensions=None, + ), + threads=dict( + argstr="--threads=%d", + ), + transformDataOnly=dict( + argstr="--transform-data-only", ), - skipNonuniformityCorr=dict(argstr="--no-nonuniformity-correction"), - t1Mask=dict(argstr="--t1-mask %s", extensions=None), - threads=dict(argstr="--threads=%d"), - transformDataOnly=dict(argstr="--transform-data-only"), transformDiffusionSurface=dict( - argstr="--transform-diffusion-surface %s", extensions=None + argstr="--transform-diffusion-surface %s", + extensions=None, ), transformDiffusionVolume=dict( - argstr="--transform-diffusion-volume %s", extensions=None + argstr="--transform-diffusion-volume %s", + extensions=None, + ), + transformInterpolation=dict( + argstr="--transform-interpolation %s", + ), + transformT1Surface=dict( + argstr="--transform-t1-surface %s", + extensions=None, + ), + transformT1Volume=dict( + argstr="--transform-t1-volume %s", + extensions=None, ), - transformInterpolation=dict(argstr="--transform-interpolation %s"), - transformT1Surface=dict(argstr="--transform-t1-surface %s", extensions=None), - transformT1Volume=dict(argstr="--transform-t1-volume %s", extensions=None), ) inputs = BDP.input_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py index d108db5840..dbb1f3d839 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py @@ -4,35 +4,96 @@ def test_Bfc_inputs(): input_map = dict( - args=dict(argstr="%s"), - biasEstimateConvergenceThreshold=dict(argstr="--beps %f"), - biasEstimateSpacing=dict(argstr="-s %d"), - biasFieldEstimatesOutputPrefix=dict(argstr="--biasprefix %s"), - biasRange=dict(argstr="%s"), - controlPointSpacing=dict(argstr="-c %d"), - convergenceThreshold=dict(argstr="--eps %f"), - correctWholeVolume=dict(argstr="--extrapolate"), - correctedImagesOutputPrefix=dict(argstr="--prefix %s"), - correctionScheduleFile=dict(argstr="--schedule %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - histogramRadius=dict(argstr="-r %d"), - histogramType=dict(argstr="%s"), - inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True), - inputMaskFile=dict(argstr="-m %s", extensions=None, hash_files=False), - intermediate_file_type=dict(argstr="%s"), - iterativeMode=dict(argstr="--iterate"), - maxBias=dict(argstr="-U %f", usedefault=True), - minBias=dict(argstr="-L %f", usedefault=True), - outputBiasField=dict(argstr="--bias %s", extensions=None, hash_files=False), + args=dict( + argstr="%s", + ), + biasEstimateConvergenceThreshold=dict( + argstr="--beps %f", + ), + biasEstimateSpacing=dict( + argstr="-s %d", + ), + biasFieldEstimatesOutputPrefix=dict( + argstr="--biasprefix %s", + ), + biasRange=dict( + argstr="%s", + ), + controlPointSpacing=dict( + argstr="-c %d", + ), + convergenceThreshold=dict( + argstr="--eps %f", + ), + correctWholeVolume=dict( + argstr="--extrapolate", + ), + correctedImagesOutputPrefix=dict( + argstr="--prefix %s", + ), + correctionScheduleFile=dict( + argstr="--schedule %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + histogramRadius=dict( + argstr="-r %d", + ), + histogramType=dict( + argstr="%s", + ), + inputMRIFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + inputMaskFile=dict( + argstr="-m %s", + extensions=None, + hash_files=False, + ), + intermediate_file_type=dict( + argstr="%s", + ), + iterativeMode=dict( + argstr="--iterate", + ), + maxBias=dict( + argstr="-U %f", + usedefault=True, + ), + minBias=dict( + argstr="-L %f", + usedefault=True, + ), + outputBiasField=dict( + argstr="--bias %s", + extensions=None, + hash_files=False, + ), outputMRIVolume=dict( - argstr="-o %s", extensions=None, genfile=True, hash_files=False + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, ), outputMaskedBiasField=dict( - argstr="--maskedbias %s", extensions=None, hash_files=False + argstr="--maskedbias %s", + extensions=None, + hash_files=False, + ), + splineLambda=dict( + argstr="-w %f", + ), + timer=dict( + argstr="--timer", + ), + verbosityLevel=dict( + argstr="-v %d", ), - splineLambda=dict(argstr="-w %f"), - timer=dict(argstr="--timer"), - verbosityLevel=dict(argstr="-v %d"), ) inputs = Bfc.input_spec() @@ -43,10 +104,18 @@ def test_Bfc_inputs(): def test_Bfc_outputs(): output_map = dict( - correctionScheduleFile=dict(extensions=None), - outputBiasField=dict(extensions=None), - outputMRIVolume=dict(extensions=None), - outputMaskedBiasField=dict(extensions=None), + correctionScheduleFile=dict( + extensions=None, + ), + outputBiasField=dict( + extensions=None, + ), + outputMRIVolume=dict( + extensions=None, + ), + outputMaskedBiasField=dict( + extensions=None, + ), ) outputs = Bfc.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bse.py b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py index 09641067ed..748defcc00 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Bse.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py @@ -4,32 +4,84 @@ def test_Bse_inputs(): input_map = dict( - args=dict(argstr="%s"), - diffusionConstant=dict(argstr="-d %f", usedefault=True), - diffusionIterations=dict(argstr="-n %d", usedefault=True), - dilateFinalMask=dict(argstr="-p", usedefault=True), - edgeDetectionConstant=dict(argstr="-s %f", usedefault=True), - environ=dict(nohash=True, usedefault=True), - inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True), - noRotate=dict(argstr="--norotate"), - outputCortexFile=dict(argstr="--cortex %s", extensions=None, hash_files=False), + args=dict( + argstr="%s", + ), + diffusionConstant=dict( + argstr="-d %f", + usedefault=True, + ), + diffusionIterations=dict( + argstr="-n %d", + usedefault=True, + ), + dilateFinalMask=dict( + argstr="-p", + usedefault=True, + ), + edgeDetectionConstant=dict( + argstr="-s %f", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMRIFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + noRotate=dict( + argstr="--norotate", + ), + outputCortexFile=dict( + argstr="--cortex %s", + extensions=None, + hash_files=False, + ), outputDetailedBrainMask=dict( - argstr="--hires %s", extensions=None, hash_files=False + argstr="--hires %s", + extensions=None, + hash_files=False, ), outputDiffusionFilter=dict( - argstr="--adf %s", extensions=None, hash_files=False + argstr="--adf %s", + extensions=None, + hash_files=False, + ), + outputEdgeMap=dict( + argstr="--edge %s", + extensions=None, + hash_files=False, ), - outputEdgeMap=dict(argstr="--edge %s", extensions=None, hash_files=False), outputMRIVolume=dict( - argstr="-o %s", extensions=None, genfile=True, hash_files=False + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, ), outputMaskFile=dict( - argstr="--mask %s", extensions=None, genfile=True, hash_files=False + argstr="--mask %s", + extensions=None, + genfile=True, + hash_files=False, + ), + radius=dict( + argstr="-r %f", + usedefault=True, + ), + timer=dict( + argstr="--timer", + ), + trim=dict( + argstr="--trim", + usedefault=True, + ), + verbosityLevel=dict( + argstr="-v %f", + usedefault=True, ), - radius=dict(argstr="-r %f", usedefault=True), - timer=dict(argstr="--timer"), - trim=dict(argstr="--trim", usedefault=True), - verbosityLevel=dict(argstr="-v %f", usedefault=True), ) inputs = Bse.input_spec() @@ -40,12 +92,24 @@ def test_Bse_inputs(): def test_Bse_outputs(): output_map = dict( - outputCortexFile=dict(extensions=None), - outputDetailedBrainMask=dict(extensions=None), - outputDiffusionFilter=dict(extensions=None), - outputEdgeMap=dict(extensions=None), - outputMRIVolume=dict(extensions=None), - outputMaskFile=dict(extensions=None), + outputCortexFile=dict( + extensions=None, + ), + outputDetailedBrainMask=dict( + extensions=None, + ), + outputDiffusionFilter=dict( + extensions=None, + ), + outputEdgeMap=dict( + extensions=None, + ), + outputMRIVolume=dict( + extensions=None, + ), + outputMaskFile=dict( + extensions=None, + ), ) outputs = Bse.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py index d3932ce340..a924d7ce0f 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py @@ -4,29 +4,80 @@ def test_Cerebro_inputs(): input_map = dict( - args=dict(argstr="%s"), - costFunction=dict(argstr="-c %d", usedefault=True), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + costFunction=dict( + argstr="-c %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputAtlasLabelFile=dict( - argstr="--atlaslabels %s", extensions=None, mandatory=True + argstr="--atlaslabels %s", + extensions=None, + mandatory=True, + ), + inputAtlasMRIFile=dict( + argstr="--atlas %s", + extensions=None, + mandatory=True, + ), + inputBrainMaskFile=dict( + argstr="-m %s", + extensions=None, + ), + inputMRIFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + keepTempFiles=dict( + argstr="--keep", + ), + linearConvergence=dict( + argstr="--linconv %f", ), - inputAtlasMRIFile=dict(argstr="--atlas %s", extensions=None, mandatory=True), - inputBrainMaskFile=dict(argstr="-m %s", extensions=None), - inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True), - keepTempFiles=dict(argstr="--keep"), - linearConvergence=dict(argstr="--linconv %f"), outputAffineTransformFile=dict( - argstr="--air %s", extensions=None, genfile=True - ), - outputCerebrumMaskFile=dict(argstr="-o %s", extensions=None, genfile=True), - outputLabelVolumeFile=dict(argstr="-l %s", extensions=None, genfile=True), - outputWarpTransformFile=dict(argstr="--warp %s", extensions=None, genfile=True), - tempDirectory=dict(argstr="--tempdir %s"), - tempDirectoryBase=dict(argstr="--tempdirbase %s"), - useCentroids=dict(argstr="--centroids"), - verbosity=dict(argstr="-v %d"), - warpConvergence=dict(argstr="--warpconv %f"), - warpLabel=dict(argstr="--warplevel %d"), + argstr="--air %s", + extensions=None, + genfile=True, + ), + outputCerebrumMaskFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + outputLabelVolumeFile=dict( + argstr="-l %s", + extensions=None, + genfile=True, + ), + outputWarpTransformFile=dict( + argstr="--warp %s", + extensions=None, + genfile=True, + ), + tempDirectory=dict( + argstr="--tempdir %s", + ), + tempDirectoryBase=dict( + argstr="--tempdirbase %s", + ), + useCentroids=dict( + argstr="--centroids", + ), + verbosity=dict( + argstr="-v %d", + ), + warpConvergence=dict( + argstr="--warpconv %f", + ), + warpLabel=dict( + argstr="--warplevel %d", + ), ) inputs = Cerebro.input_spec() @@ -37,10 +88,18 @@ def test_Cerebro_inputs(): def test_Cerebro_outputs(): output_map = dict( - outputAffineTransformFile=dict(extensions=None), - outputCerebrumMaskFile=dict(extensions=None), - outputLabelVolumeFile=dict(extensions=None), - outputWarpTransformFile=dict(extensions=None), + outputAffineTransformFile=dict( + extensions=None, + ), + outputCerebrumMaskFile=dict( + extensions=None, + ), + outputLabelVolumeFile=dict( + extensions=None, + ), + outputWarpTransformFile=dict( + extensions=None, + ), ) outputs = Cerebro.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py index 0935fddbc1..30287edf90 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py @@ -4,17 +4,49 @@ def test_Cortex_inputs(): input_map = dict( - args=dict(argstr="%s"), - computeGCBoundary=dict(argstr="-g"), - computeWGBoundary=dict(argstr="-w", usedefault=True), - environ=dict(nohash=True, usedefault=True), - includeAllSubcorticalAreas=dict(argstr="-a", usedefault=True), - inputHemisphereLabelFile=dict(argstr="-h %s", extensions=None, mandatory=True), - inputTissueFractionFile=dict(argstr="-f %s", extensions=None, mandatory=True), - outputCerebrumMask=dict(argstr="-o %s", extensions=None, genfile=True), - timer=dict(argstr="--timer"), - tissueFractionThreshold=dict(argstr="-p %f", usedefault=True), - verbosity=dict(argstr="-v %d"), + args=dict( + argstr="%s", + ), + computeGCBoundary=dict( + argstr="-g", + ), + computeWGBoundary=dict( + argstr="-w", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + includeAllSubcorticalAreas=dict( + argstr="-a", + usedefault=True, + ), + inputHemisphereLabelFile=dict( + argstr="-h %s", + extensions=None, + mandatory=True, + ), + inputTissueFractionFile=dict( + argstr="-f %s", + extensions=None, + mandatory=True, + ), + outputCerebrumMask=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + timer=dict( + argstr="--timer", + ), + tissueFractionThreshold=dict( + argstr="-p %f", + usedefault=True, + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Cortex.input_spec() @@ -24,7 +56,11 @@ def test_Cortex_inputs(): def test_Cortex_outputs(): - output_map = dict(outputCerebrumMask=dict(extensions=None)) + output_map = dict( + outputCerebrumMask=dict( + extensions=None, + ), + ) outputs = Cortex.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py index b91f40790c..4d986e22f3 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py @@ -4,14 +4,35 @@ def test_Dewisp_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputMaskFile=dict(argstr="-i %s", extensions=None, mandatory=True), - maximumIterations=dict(argstr="-n %d"), - outputMaskFile=dict(argstr="-o %s", extensions=None, genfile=True), - sizeThreshold=dict(argstr="-t %d"), - timer=dict(argstr="--timer"), - verbosity=dict(argstr="-v %d"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + maximumIterations=dict( + argstr="-n %d", + ), + outputMaskFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + sizeThreshold=dict( + argstr="-t %d", + ), + timer=dict( + argstr="--timer", + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Dewisp.input_spec() @@ -21,7 +42,11 @@ def test_Dewisp_inputs(): def test_Dewisp_outputs(): - output_map = dict(outputMaskFile=dict(extensions=None)) + output_map = dict( + outputMaskFile=dict( + extensions=None, + ), + ) outputs = Dewisp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py index ee3a473c21..3122791cf5 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py @@ -4,30 +4,70 @@ def test_Dfs_inputs(): input_map = dict( - args=dict(argstr="%s"), - curvatureWeighting=dict(argstr="-w %f", usedefault=True), - environ=dict(nohash=True, usedefault=True), - inputShadingVolume=dict(argstr="-c %s", extensions=None), - inputVolumeFile=dict(argstr="-i %s", extensions=None, mandatory=True), - noNormalsFlag=dict(argstr="--nonormals"), + args=dict( + argstr="%s", + ), + curvatureWeighting=dict( + argstr="-w %f", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputShadingVolume=dict( + argstr="-c %s", + extensions=None, + ), + inputVolumeFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + noNormalsFlag=dict( + argstr="--nonormals", + ), nonZeroTessellation=dict( - argstr="-nz", xor=("nonZeroTessellation", "specialTessellation") + argstr="-nz", + xor=("nonZeroTessellation", "specialTessellation"), + ), + outputSurfaceFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + postSmoothFlag=dict( + argstr="--postsmooth", + ), + scalingPercentile=dict( + argstr="-f %f", + ), + smoothingConstant=dict( + argstr="-a %f", + usedefault=True, + ), + smoothingIterations=dict( + argstr="-n %d", + usedefault=True, ), - outputSurfaceFile=dict(argstr="-o %s", extensions=None, genfile=True), - postSmoothFlag=dict(argstr="--postsmooth"), - scalingPercentile=dict(argstr="-f %f"), - smoothingConstant=dict(argstr="-a %f", usedefault=True), - smoothingIterations=dict(argstr="-n %d", usedefault=True), specialTessellation=dict( argstr="%s", position=-1, requires=["tessellationThreshold"], xor=("nonZeroTessellation", "specialTessellation"), ), - tessellationThreshold=dict(argstr="%f"), - timer=dict(argstr="--timer"), - verbosity=dict(argstr="-v %d"), - zeroPadFlag=dict(argstr="-z"), + tessellationThreshold=dict( + argstr="%f", + ), + timer=dict( + argstr="--timer", + ), + verbosity=dict( + argstr="-v %d", + ), + zeroPadFlag=dict( + argstr="-z", + ), ) inputs = Dfs.input_spec() @@ -37,7 +77,11 @@ def test_Dfs_inputs(): def test_Dfs_outputs(): - output_map = dict(outputSurfaceFile=dict(extensions=None)) + output_map = dict( + outputSurfaceFile=dict( + extensions=None, + ), + ) outputs = Dfs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py index 9ffcc8162e..0696f11992 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py @@ -4,17 +4,53 @@ def test_Hemisplit_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputHemisphereLabelFile=dict(argstr="-l %s", extensions=None, mandatory=True), - inputSurfaceFile=dict(argstr="-i %s", extensions=None, mandatory=True), - outputLeftHemisphere=dict(argstr="--left %s", extensions=None, genfile=True), - outputLeftPialHemisphere=dict(argstr="-pl %s", extensions=None, genfile=True), - outputRightHemisphere=dict(argstr="--right %s", extensions=None, genfile=True), - outputRightPialHemisphere=dict(argstr="-pr %s", extensions=None, genfile=True), - pialSurfaceFile=dict(argstr="-p %s", extensions=None), - timer=dict(argstr="--timer"), - verbosity=dict(argstr="-v %d"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputHemisphereLabelFile=dict( + argstr="-l %s", + extensions=None, + mandatory=True, + ), + inputSurfaceFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + outputLeftHemisphere=dict( + argstr="--left %s", + extensions=None, + genfile=True, + ), + outputLeftPialHemisphere=dict( + argstr="-pl %s", + extensions=None, + genfile=True, + ), + outputRightHemisphere=dict( + argstr="--right %s", + extensions=None, + genfile=True, + ), + outputRightPialHemisphere=dict( + argstr="-pr %s", + extensions=None, + genfile=True, + ), + pialSurfaceFile=dict( + argstr="-p %s", + extensions=None, + ), + timer=dict( + argstr="--timer", + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Hemisplit.input_spec() @@ -25,10 +61,18 @@ def test_Hemisplit_inputs(): def test_Hemisplit_outputs(): output_map = dict( - outputLeftHemisphere=dict(extensions=None), - outputLeftPialHemisphere=dict(extensions=None), - outputRightHemisphere=dict(extensions=None), - outputRightPialHemisphere=dict(extensions=None), + outputLeftHemisphere=dict( + extensions=None, + ), + outputLeftPialHemisphere=dict( + extensions=None, + ), + outputRightHemisphere=dict( + extensions=None, + ), + outputRightPialHemisphere=dict( + extensions=None, + ), ) outputs = Hemisplit.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py index 202b4257a7..f5ba0725df 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py @@ -4,25 +4,80 @@ def test_Pialmesh_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - exportPrefix=dict(argstr="--prefix %s"), - inputMaskFile=dict(argstr="-m %s", extensions=None, mandatory=True), - inputSurfaceFile=dict(argstr="-i %s", extensions=None, mandatory=True), - inputTissueFractionFile=dict(argstr="-f %s", extensions=None, mandatory=True), - laplacianSmoothing=dict(argstr="--smooth %f", usedefault=True), - maxThickness=dict(argstr="--max %f", usedefault=True), - normalSmoother=dict(argstr="--nc %f", usedefault=True), - numIterations=dict(argstr="-n %d", usedefault=True), - outputInterval=dict(argstr="--interval %d", usedefault=True), - outputSurfaceFile=dict(argstr="-o %s", extensions=None, genfile=True), - recomputeNormals=dict(argstr="--norm"), - searchRadius=dict(argstr="-r %f", usedefault=True), - stepSize=dict(argstr="-s %f", usedefault=True), - tangentSmoother=dict(argstr="--tc %f"), - timer=dict(argstr="--timer"), - tissueThreshold=dict(argstr="-t %f", usedefault=True), - verbosity=dict(argstr="-v %d"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + exportPrefix=dict( + argstr="--prefix %s", + ), + inputMaskFile=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + ), + inputSurfaceFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + inputTissueFractionFile=dict( + argstr="-f %s", + extensions=None, + mandatory=True, + ), + laplacianSmoothing=dict( + argstr="--smooth %f", + usedefault=True, + ), + maxThickness=dict( + argstr="--max %f", + usedefault=True, + ), + normalSmoother=dict( + argstr="--nc %f", + usedefault=True, + ), + numIterations=dict( + argstr="-n %d", + usedefault=True, + ), + outputInterval=dict( + argstr="--interval %d", + usedefault=True, + ), + outputSurfaceFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + recomputeNormals=dict( + argstr="--norm", + ), + searchRadius=dict( + argstr="-r %f", + usedefault=True, + ), + stepSize=dict( + argstr="-s %f", + usedefault=True, + ), + tangentSmoother=dict( + argstr="--tc %f", + ), + timer=dict( + argstr="--timer", + ), + tissueThreshold=dict( + argstr="-t %f", + usedefault=True, + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Pialmesh.input_spec() @@ -32,7 +87,11 @@ def test_Pialmesh_inputs(): def test_Pialmesh_outputs(): - output_map = dict(outputSurfaceFile=dict(extensions=None)) + output_map = dict( + outputSurfaceFile=dict( + extensions=None, + ), + ) outputs = Pialmesh.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py index 818776e71c..a6f52a26a7 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py @@ -4,16 +4,44 @@ def test_Pvc_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True), - inputMaskFile=dict(argstr="-m %s", extensions=None), - outputLabelFile=dict(argstr="-o %s", extensions=None, genfile=True), - outputTissueFractionFile=dict(argstr="-f %s", extensions=None, genfile=True), - spatialPrior=dict(argstr="-l %f"), - threeClassFlag=dict(argstr="-3"), - timer=dict(argstr="--timer"), - verbosity=dict(argstr="-v %d"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMRIFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + inputMaskFile=dict( + argstr="-m %s", + extensions=None, + ), + outputLabelFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + outputTissueFractionFile=dict( + argstr="-f %s", + extensions=None, + genfile=True, + ), + spatialPrior=dict( + argstr="-l %f", + ), + threeClassFlag=dict( + argstr="-3", + ), + timer=dict( + argstr="--timer", + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Pvc.input_spec() @@ -24,8 +52,12 @@ def test_Pvc_inputs(): def test_Pvc_outputs(): output_map = dict( - outputLabelFile=dict(extensions=None), - outputTissueFractionFile=dict(extensions=None), + outputLabelFile=dict( + extensions=None, + ), + outputTissueFractionFile=dict( + extensions=None, + ), ) outputs = Pvc.output_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py b/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py index e57f6864aa..4c29c2bfda 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py @@ -4,29 +4,82 @@ def test_SVReg_inputs(): input_map = dict( - args=dict(argstr="%s"), - atlasFilePrefix=dict(argstr="'%s'", position=1), - curveMatchingInstructions=dict(argstr="'-cur %s'"), - dataSinkDelay=dict(argstr="%s"), - displayModuleName=dict(argstr="'-m'"), - displayTimestamps=dict(argstr="'-t'"), - environ=dict(nohash=True, usedefault=True), - iterations=dict(argstr="'-H %d'"), - keepIntermediates=dict(argstr="'-k'"), - pialSurfaceMaskDilation=dict(argstr="'-D %d'"), - refineOutputs=dict(argstr="'-r'"), - shortMessages=dict(argstr="'-gui'"), - skipToIntensityReg=dict(argstr="'-p'"), - skipToVolumeReg=dict(argstr="'-s'"), - skipVolumetricProcessing=dict(argstr="'-S'"), - subjectFilePrefix=dict(argstr="'%s'", mandatory=True, position=0), - useCerebrumMask=dict(argstr="'-C'"), - useManualMaskFile=dict(argstr="'-cbm'"), - useMultiThreading=dict(argstr="'-P'"), - useSingleThreading=dict(argstr="'-U'"), - verbosity0=dict(argstr="'-v0'", xor=("verbosity0", "verbosity1", "verbosity2")), - verbosity1=dict(argstr="'-v1'", xor=("verbosity0", "verbosity1", "verbosity2")), - verbosity2=dict(argstr="'v2'", xor=("verbosity0", "verbosity1", "verbosity2")), + args=dict( + argstr="%s", + ), + atlasFilePrefix=dict( + argstr="'%s'", + position=1, + ), + curveMatchingInstructions=dict( + argstr="'-cur %s'", + ), + dataSinkDelay=dict( + argstr="%s", + ), + displayModuleName=dict( + argstr="'-m'", + ), + displayTimestamps=dict( + argstr="'-t'", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + iterations=dict( + argstr="'-H %d'", + ), + keepIntermediates=dict( + argstr="'-k'", + ), + pialSurfaceMaskDilation=dict( + argstr="'-D %d'", + ), + refineOutputs=dict( + argstr="'-r'", + ), + shortMessages=dict( + argstr="'-gui'", + ), + skipToIntensityReg=dict( + argstr="'-p'", + ), + skipToVolumeReg=dict( + argstr="'-s'", + ), + skipVolumetricProcessing=dict( + argstr="'-S'", + ), + subjectFilePrefix=dict( + argstr="'%s'", + mandatory=True, + position=0, + ), + useCerebrumMask=dict( + argstr="'-C'", + ), + useManualMaskFile=dict( + argstr="'-cbm'", + ), + useMultiThreading=dict( + argstr="'-P'", + ), + useSingleThreading=dict( + argstr="'-U'", + ), + verbosity0=dict( + argstr="'-v0'", + xor=("verbosity0", "verbosity1", "verbosity2"), + ), + verbosity1=dict( + argstr="'-v1'", + xor=("verbosity0", "verbosity1", "verbosity2"), + ), + verbosity2=dict( + argstr="'v2'", + xor=("verbosity0", "verbosity1", "verbosity2"), + ), ) inputs = SVReg.input_spec() diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py index 63f9696cd9..97094db018 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py @@ -4,15 +4,40 @@ def test_Scrubmask_inputs(): input_map = dict( - args=dict(argstr="%s"), - backgroundFillThreshold=dict(argstr="-b %d", usedefault=True), - environ=dict(nohash=True, usedefault=True), - foregroundTrimThreshold=dict(argstr="-f %d", usedefault=True), - inputMaskFile=dict(argstr="-i %s", extensions=None, mandatory=True), - numberIterations=dict(argstr="-n %d"), - outputMaskFile=dict(argstr="-o %s", extensions=None, genfile=True), - timer=dict(argstr="--timer"), - verbosity=dict(argstr="-v %d"), + args=dict( + argstr="%s", + ), + backgroundFillThreshold=dict( + argstr="-b %d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + foregroundTrimThreshold=dict( + argstr="-f %d", + usedefault=True, + ), + inputMaskFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + numberIterations=dict( + argstr="-n %d", + ), + outputMaskFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + timer=dict( + argstr="--timer", + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Scrubmask.input_spec() @@ -22,7 +47,11 @@ def test_Scrubmask_inputs(): def test_Scrubmask_outputs(): - output_map = dict(outputMaskFile=dict(extensions=None)) + output_map = dict( + outputMaskFile=dict( + extensions=None, + ), + ) outputs = Scrubmask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py index 137b2959c1..3120f00184 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py @@ -4,21 +4,58 @@ def test_Skullfinder_inputs(): input_map = dict( - args=dict(argstr="%s"), - bgLabelValue=dict(argstr="--bglabel %d"), - brainLabelValue=dict(argstr="--brainlabel %d"), - environ=dict(nohash=True, usedefault=True), - inputMRIFile=dict(argstr="-i %s", extensions=None, mandatory=True), - inputMaskFile=dict(argstr="-m %s", extensions=None, mandatory=True), - lowerThreshold=dict(argstr="-l %d"), - outputLabelFile=dict(argstr="-o %s", extensions=None, genfile=True), - performFinalOpening=dict(argstr="--finalOpening"), - scalpLabelValue=dict(argstr="--scalplabel %d"), - skullLabelValue=dict(argstr="--skulllabel %d"), - spaceLabelValue=dict(argstr="--spacelabel %d"), - surfaceFilePrefix=dict(argstr="-s %s"), - upperThreshold=dict(argstr="-u %d"), - verbosity=dict(argstr="-v %d"), + args=dict( + argstr="%s", + ), + bgLabelValue=dict( + argstr="--bglabel %d", + ), + brainLabelValue=dict( + argstr="--brainlabel %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMRIFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + inputMaskFile=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + ), + lowerThreshold=dict( + argstr="-l %d", + ), + outputLabelFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + performFinalOpening=dict( + argstr="--finalOpening", + ), + scalpLabelValue=dict( + argstr="--scalplabel %d", + ), + skullLabelValue=dict( + argstr="--skulllabel %d", + ), + spaceLabelValue=dict( + argstr="--spacelabel %d", + ), + surfaceFilePrefix=dict( + argstr="-s %s", + ), + upperThreshold=dict( + argstr="-u %d", + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Skullfinder.input_spec() @@ -28,7 +65,11 @@ def test_Skullfinder_inputs(): def test_Skullfinder_outputs(): - output_map = dict(outputLabelFile=dict(extensions=None)) + output_map = dict( + outputLabelFile=dict( + extensions=None, + ), + ) outputs = Skullfinder.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Tca.py b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py index 253b2a0a2f..eaba6a1d5f 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Tca.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py @@ -4,15 +4,40 @@ def test_Tca_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - foregroundDelta=dict(argstr="--delta %d", usedefault=True), - inputMaskFile=dict(argstr="-i %s", extensions=None, mandatory=True), - maxCorrectionSize=dict(argstr="-n %d"), - minCorrectionSize=dict(argstr="-m %d", usedefault=True), - outputMaskFile=dict(argstr="-o %s", extensions=None, genfile=True), - timer=dict(argstr="--timer"), - verbosity=dict(argstr="-v %d"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + foregroundDelta=dict( + argstr="--delta %d", + usedefault=True, + ), + inputMaskFile=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + maxCorrectionSize=dict( + argstr="-n %d", + ), + minCorrectionSize=dict( + argstr="-m %d", + usedefault=True, + ), + outputMaskFile=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + timer=dict( + argstr="--timer", + ), + verbosity=dict( + argstr="-v %d", + ), ) inputs = Tca.input_spec() @@ -22,7 +47,11 @@ def test_Tca_inputs(): def test_Tca_outputs(): - output_map = dict(outputMaskFile=dict(extensions=None)) + output_map = dict( + outputMaskFile=dict( + extensions=None, + ), + ) outputs = Tca.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py b/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py index 983d571fec..8b043c63c7 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py @@ -4,9 +4,17 @@ def test_ThicknessPVC_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - subjectFilePrefix=dict(argstr="%s", mandatory=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + subjectFilePrefix=dict( + argstr="%s", + mandatory=True, + ), ) inputs = ThicknessPVC.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py index 7bfc0b5200..9b6110d30d 100644 --- a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py +++ b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py @@ -4,32 +4,109 @@ def test_AnalyzeHeader_inputs(): input_map = dict( - args=dict(argstr="%s"), - centre=dict(argstr="-centre %s", units="mm"), - data_dims=dict(argstr="-datadims %s", units="voxels"), - datatype=dict(argstr="-datatype %s", mandatory=True), - description=dict(argstr="-description %s"), - environ=dict(nohash=True, usedefault=True), - greylevels=dict(argstr="-gl %s", units="NA"), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1), - initfromheader=dict(argstr="-initfromheader %s", extensions=None, position=3), - intelbyteorder=dict(argstr="-intelbyteorder"), - networkbyteorder=dict(argstr="-networkbyteorder"), - nimages=dict(argstr="-nimages %d", units="NA"), - offset=dict(argstr="-offset %d", units="NA"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - picoseed=dict(argstr="-picoseed %s", units="mm"), - printbigendian=dict(argstr="-printbigendian %s", extensions=None, position=3), - printimagedims=dict(argstr="-printimagedims %s", extensions=None, position=3), + args=dict( + argstr="%s", + ), + centre=dict( + argstr="-centre %s", + units="mm", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + datatype=dict( + argstr="-datatype %s", + mandatory=True, + ), + description=dict( + argstr="-description %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + greylevels=dict( + argstr="-gl %s", + units="NA", + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=1, + ), + initfromheader=dict( + argstr="-initfromheader %s", + extensions=None, + position=3, + ), + intelbyteorder=dict( + argstr="-intelbyteorder", + ), + networkbyteorder=dict( + argstr="-networkbyteorder", + ), + nimages=dict( + argstr="-nimages %d", + units="NA", + ), + offset=dict( + argstr="-offset %d", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + picoseed=dict( + argstr="-picoseed %s", + units="mm", + ), + printbigendian=dict( + argstr="-printbigendian %s", + extensions=None, + position=3, + ), + printimagedims=dict( + argstr="-printimagedims %s", + extensions=None, + position=3, + ), printintelbyteorder=dict( - argstr="-printintelbyteorder %s", extensions=None, position=3 - ), - printprogargs=dict(argstr="-printprogargs %s", extensions=None, position=3), - readheader=dict(argstr="-readheader %s", extensions=None, position=3), - scaleinter=dict(argstr="-scaleinter %d", units="NA"), - scaleslope=dict(argstr="-scaleslope %d", units="NA"), - scheme_file=dict(argstr="%s", extensions=None, position=2), - voxel_dims=dict(argstr="-voxeldims %s", units="mm"), + argstr="-printintelbyteorder %s", + extensions=None, + position=3, + ), + printprogargs=dict( + argstr="-printprogargs %s", + extensions=None, + position=3, + ), + readheader=dict( + argstr="-readheader %s", + extensions=None, + position=3, + ), + scaleinter=dict( + argstr="-scaleinter %d", + units="NA", + ), + scaleslope=dict( + argstr="-scaleslope %d", + units="NA", + ), + scheme_file=dict( + argstr="%s", + extensions=None, + position=2, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = AnalyzeHeader.input_spec() @@ -39,7 +116,11 @@ def test_AnalyzeHeader_inputs(): def test_AnalyzeHeader_outputs(): - output_map = dict(header=dict(extensions=None)) + output_map = dict( + header=dict( + extensions=None, + ), + ) outputs = AnalyzeHeader.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py index 7745dbcac2..6181cf7541 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py @@ -4,14 +4,39 @@ def test_ComputeEigensystem_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1), - inputdatatype=dict(argstr="-inputdatatype %s", usedefault=True), - inputmodel=dict(argstr="-inputmodel %s"), - maxcomponents=dict(argstr="-maxcomponents %d"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - outputdatatype=dict(argstr="-outputdatatype %s", usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + usedefault=True, + ), + inputmodel=dict( + argstr="-inputmodel %s", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + outputdatatype=dict( + argstr="-outputdatatype %s", + usedefault=True, + ), ) inputs = ComputeEigensystem.input_spec() @@ -21,7 +46,11 @@ def test_ComputeEigensystem_inputs(): def test_ComputeEigensystem_outputs(): - output_map = dict(eigen=dict(extensions=None)) + output_map = dict( + eigen=dict( + extensions=None, + ), + ) outputs = ComputeEigensystem.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py index 90f477dd87..0a13ac4f64 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py @@ -4,14 +4,39 @@ def test_ComputeFractionalAnisotropy_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1), - inputdatatype=dict(argstr="-inputdatatype %s"), - inputmodel=dict(argstr="-inputmodel %s"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - outputdatatype=dict(argstr="-outputdatatype %s"), - scheme_file=dict(argstr="%s", extensions=None, position=2), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + outputdatatype=dict( + argstr="-outputdatatype %s", + ), + scheme_file=dict( + argstr="%s", + extensions=None, + position=2, + ), ) inputs = ComputeFractionalAnisotropy.input_spec() @@ -21,7 +46,11 @@ def test_ComputeFractionalAnisotropy_inputs(): def test_ComputeFractionalAnisotropy_outputs(): - output_map = dict(fa=dict(extensions=None)) + output_map = dict( + fa=dict( + extensions=None, + ), + ) outputs = ComputeFractionalAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py index 361e42521e..822bd0306e 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py @@ -4,14 +4,39 @@ def test_ComputeMeanDiffusivity_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1), - inputdatatype=dict(argstr="-inputdatatype %s"), - inputmodel=dict(argstr="-inputmodel %s"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - outputdatatype=dict(argstr="-outputdatatype %s"), - scheme_file=dict(argstr="%s", extensions=None, position=2), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + outputdatatype=dict( + argstr="-outputdatatype %s", + ), + scheme_file=dict( + argstr="%s", + extensions=None, + position=2, + ), ) inputs = ComputeMeanDiffusivity.input_spec() @@ -21,7 +46,11 @@ def test_ComputeMeanDiffusivity_inputs(): def test_ComputeMeanDiffusivity_outputs(): - output_map = dict(md=dict(extensions=None)) + output_map = dict( + md=dict( + extensions=None, + ), + ) outputs = ComputeMeanDiffusivity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py index 4b081a950a..8a912685ae 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py @@ -4,14 +4,39 @@ def test_ComputeTensorTrace_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1), - inputdatatype=dict(argstr="-inputdatatype %s"), - inputmodel=dict(argstr="-inputmodel %s"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - outputdatatype=dict(argstr="-outputdatatype %s"), - scheme_file=dict(argstr="%s", extensions=None, position=2), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + outputdatatype=dict( + argstr="-outputdatatype %s", + ), + scheme_file=dict( + argstr="%s", + extensions=None, + position=2, + ), ) inputs = ComputeTensorTrace.input_spec() @@ -21,7 +46,11 @@ def test_ComputeTensorTrace_inputs(): def test_ComputeTensorTrace_outputs(): - output_map = dict(trace=dict(extensions=None)) + output_map = dict( + trace=dict( + extensions=None, + ), + ) outputs = ComputeTensorTrace.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Conmat.py b/nipype/interfaces/camino/tests/test_auto_Conmat.py index fa85ce4dd5..a9ea16865d 100644 --- a/nipype/interfaces/camino/tests/test_auto_Conmat.py +++ b/nipype/interfaces/camino/tests/test_auto_Conmat.py @@ -4,16 +4,42 @@ def test_Conmat_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True), - output_root=dict(argstr="-outputroot %s", extensions=None, genfile=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + mandatory=True, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + genfile=True, + ), scalar_file=dict( - argstr="-scalarfile %s", extensions=None, requires=["tract_stat"] + argstr="-scalarfile %s", + extensions=None, + requires=["tract_stat"], + ), + target_file=dict( + argstr="-targetfile %s", + extensions=None, + mandatory=True, + ), + targetname_file=dict( + argstr="-targetnamefile %s", + extensions=None, + ), + tract_prop=dict( + argstr="-tractstat %s", + units="NA", + xor=["tract_stat"], ), - target_file=dict(argstr="-targetfile %s", extensions=None, mandatory=True), - targetname_file=dict(argstr="-targetnamefile %s", extensions=None), - tract_prop=dict(argstr="-tractstat %s", units="NA", xor=["tract_stat"]), tract_stat=dict( argstr="-tractstat %s", requires=["scalar_file"], @@ -29,7 +55,14 @@ def test_Conmat_inputs(): def test_Conmat_outputs(): - output_map = dict(conmat_sc=dict(extensions=None), conmat_ts=dict(extensions=None)) + output_map = dict( + conmat_sc=dict( + extensions=None, + ), + conmat_ts=dict( + extensions=None, + ), + ) outputs = Conmat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py index 44576fba9f..b88fe01ba9 100644 --- a/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py +++ b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py @@ -4,16 +4,30 @@ def test_DT2NIfTI_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), header_file=dict( - argstr="-header %s", extensions=None, mandatory=True, position=3 + argstr="-header %s", + extensions=None, + mandatory=True, + position=3, ), in_file=dict( - argstr="-inputfile %s", extensions=None, mandatory=True, position=1 + argstr="-inputfile %s", + extensions=None, + mandatory=True, + position=1, ), output_root=dict( - argstr="-outputroot %s", extensions=None, genfile=True, position=2 + argstr="-outputroot %s", + extensions=None, + genfile=True, + position=2, ), ) inputs = DT2NIfTI.input_spec() @@ -25,9 +39,15 @@ def test_DT2NIfTI_inputs(): def test_DT2NIfTI_outputs(): output_map = dict( - dt=dict(extensions=None), - exitcode=dict(extensions=None), - lns0=dict(extensions=None), + dt=dict( + extensions=None, + ), + exitcode=dict( + extensions=None, + ), + lns0=dict( + extensions=None, + ), ) outputs = DT2NIfTI.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_DTIFit.py b/nipype/interfaces/camino/tests/test_auto_DTIFit.py index 1bd82d0f95..757f870fe3 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/camino/tests/test_auto_DTIFit.py @@ -4,13 +4,39 @@ def test_DTIFit_inputs(): input_map = dict( - args=dict(argstr="%s"), - bgmask=dict(argstr="-bgmask %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), - non_linear=dict(argstr="-nonlinear", position=3), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - scheme_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + args=dict( + argstr="%s", + ), + bgmask=dict( + argstr="-bgmask %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + non_linear=dict( + argstr="-nonlinear", + position=3, + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + scheme_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), ) inputs = DTIFit.input_spec() @@ -20,7 +46,11 @@ def test_DTIFit_inputs(): def test_DTIFit_outputs(): - output_map = dict(tensor_fitted=dict(extensions=None)) + output_map = dict( + tensor_fitted=dict( + extensions=None, + ), + ) outputs = DTIFit.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py index 20861e0e09..0ee1ffea8f 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py @@ -4,22 +4,64 @@ def test_DTLUTGen_inputs(): input_map = dict( - acg=dict(argstr="-acg"), - args=dict(argstr="%s"), - bingham=dict(argstr="-bingham"), - environ=dict(nohash=True, usedefault=True), - frange=dict(argstr="-frange %s", position=1, units="NA"), - inversion=dict(argstr="-inversion %d", units="NA"), - lrange=dict(argstr="-lrange %s", position=1, units="NA"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - samples=dict(argstr="-samples %d", units="NA"), + acg=dict( + argstr="-acg", + ), + args=dict( + argstr="%s", + ), + bingham=dict( + argstr="-bingham", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + frange=dict( + argstr="-frange %s", + position=1, + units="NA", + ), + inversion=dict( + argstr="-inversion %d", + units="NA", + ), + lrange=dict( + argstr="-lrange %s", + position=1, + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + samples=dict( + argstr="-samples %d", + units="NA", + ), scheme_file=dict( - argstr="-schemefile %s", extensions=None, mandatory=True, position=2 + argstr="-schemefile %s", + extensions=None, + mandatory=True, + position=2, + ), + snr=dict( + argstr="-snr %f", + units="NA", + ), + step=dict( + argstr="-step %f", + units="NA", + ), + trace=dict( + argstr="-trace %G", + units="NA", + ), + watson=dict( + argstr="-watson", ), - snr=dict(argstr="-snr %f", units="NA"), - step=dict(argstr="-step %f", units="NA"), - trace=dict(argstr="-trace %G", units="NA"), - watson=dict(argstr="-watson"), ) inputs = DTLUTGen.input_spec() @@ -29,7 +71,11 @@ def test_DTLUTGen_inputs(): def test_DTLUTGen_outputs(): - output_map = dict(dtLUT=dict(extensions=None)) + output_map = dict( + dtLUT=dict( + extensions=None, + ), + ) outputs = DTLUTGen.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_DTMetric.py b/nipype/interfaces/camino/tests/test_auto_DTMetric.py index 665dee649d..11e971b28b 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTMetric.py +++ b/nipype/interfaces/camino/tests/test_auto_DTMetric.py @@ -4,14 +4,39 @@ def test_DTMetric_inputs(): input_map = dict( - args=dict(argstr="%s"), - data_header=dict(argstr="-header %s", extensions=None), - eigen_data=dict(argstr="-inputfile %s", extensions=None, mandatory=True), - environ=dict(nohash=True, usedefault=True), - inputdatatype=dict(argstr="-inputdatatype %s", usedefault=True), - metric=dict(argstr="-stat %s", mandatory=True), - outputdatatype=dict(argstr="-outputdatatype %s", usedefault=True), - outputfile=dict(argstr="-outputfile %s", extensions=None, genfile=True), + args=dict( + argstr="%s", + ), + data_header=dict( + argstr="-header %s", + extensions=None, + ), + eigen_data=dict( + argstr="-inputfile %s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + usedefault=True, + ), + metric=dict( + argstr="-stat %s", + mandatory=True, + ), + outputdatatype=dict( + argstr="-outputdatatype %s", + usedefault=True, + ), + outputfile=dict( + argstr="-outputfile %s", + extensions=None, + genfile=True, + ), ) inputs = DTMetric.input_spec() @@ -21,7 +46,11 @@ def test_DTMetric_inputs(): def test_DTMetric_outputs(): - output_map = dict(metric_stats=dict(extensions=None)) + output_map = dict( + metric_stats=dict( + extensions=None, + ), + ) outputs = DTMetric.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py index 01b481ceb4..376fa1bf3e 100644 --- a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py +++ b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py @@ -4,23 +4,58 @@ def test_FSL2Scheme_inputs(): input_map = dict( - args=dict(argstr="%s"), - bscale=dict(argstr="-bscale %d", units="NA"), + args=dict( + argstr="%s", + ), + bscale=dict( + argstr="-bscale %d", + units="NA", + ), bval_file=dict( - argstr="-bvalfile %s", extensions=None, mandatory=True, position=2 + argstr="-bvalfile %s", + extensions=None, + mandatory=True, + position=2, ), bvec_file=dict( - argstr="-bvecfile %s", extensions=None, mandatory=True, position=1 - ), - diffusiontime=dict(argstr="-diffusiontime %f", units="NA"), - environ=dict(nohash=True, usedefault=True), - flipx=dict(argstr="-flipx"), - flipy=dict(argstr="-flipy"), - flipz=dict(argstr="-flipz"), - interleave=dict(argstr="-interleave"), - numscans=dict(argstr="-numscans %d", units="NA"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - usegradmod=dict(argstr="-usegradmod"), + argstr="-bvecfile %s", + extensions=None, + mandatory=True, + position=1, + ), + diffusiontime=dict( + argstr="-diffusiontime %f", + units="NA", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flipx=dict( + argstr="-flipx", + ), + flipy=dict( + argstr="-flipy", + ), + flipz=dict( + argstr="-flipz", + ), + interleave=dict( + argstr="-interleave", + ), + numscans=dict( + argstr="-numscans %d", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + usegradmod=dict( + argstr="-usegradmod", + ), ) inputs = FSL2Scheme.input_spec() @@ -30,7 +65,11 @@ def test_FSL2Scheme_inputs(): def test_FSL2Scheme_outputs(): - output_map = dict(scheme=dict(extensions=None)) + output_map = dict( + scheme=dict( + extensions=None, + ), + ) outputs = FSL2Scheme.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py index 904b0cd097..ae49936d02 100644 --- a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py +++ b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py @@ -4,11 +4,30 @@ def test_Image2Voxel_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-4dimage %s", extensions=None, mandatory=True, position=1), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - out_type=dict(argstr="-outputdatatype %s", position=2, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-4dimage %s", + extensions=None, + mandatory=True, + position=1, + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + out_type=dict( + argstr="-outputdatatype %s", + position=2, + usedefault=True, + ), ) inputs = Image2Voxel.input_spec() @@ -18,7 +37,11 @@ def test_Image2Voxel_inputs(): def test_Image2Voxel_outputs(): - output_map = dict(voxel_order=dict(extensions=None)) + output_map = dict( + voxel_order=dict( + extensions=None, + ), + ) outputs = Image2Voxel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ImageStats.py b/nipype/interfaces/camino/tests/test_auto_ImageStats.py index e1e56e167d..4bc6aa941b 100644 --- a/nipype/interfaces/camino/tests/test_auto_ImageStats.py +++ b/nipype/interfaces/camino/tests/test_auto_ImageStats.py @@ -4,12 +4,32 @@ def test_ImageStats_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_files=dict(argstr="-images %s", mandatory=True, position=-1), - out_type=dict(argstr="-outputdatatype %s", usedefault=True), - output_root=dict(argstr="-outputroot %s", extensions=None, mandatory=True), - stat=dict(argstr="-stat %s", mandatory=True, units="NA"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="-images %s", + mandatory=True, + position=-1, + ), + out_type=dict( + argstr="-outputdatatype %s", + usedefault=True, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + mandatory=True, + ), + stat=dict( + argstr="-stat %s", + mandatory=True, + units="NA", + ), ) inputs = ImageStats.input_spec() @@ -19,7 +39,11 @@ def test_ImageStats_inputs(): def test_ImageStats_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ImageStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_LinRecon.py b/nipype/interfaces/camino/tests/test_auto_LinRecon.py index 9b1ed7364e..1ed5bbbe6b 100644 --- a/nipype/interfaces/camino/tests/test_auto_LinRecon.py +++ b/nipype/interfaces/camino/tests/test_auto_LinRecon.py @@ -4,15 +4,47 @@ def test_LinRecon_inputs(): input_map = dict( - args=dict(argstr="%s"), - bgmask=dict(argstr="-bgmask %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), - log=dict(argstr="-log"), - normalize=dict(argstr="-normalize"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - qball_mat=dict(argstr="%s", extensions=None, mandatory=True, position=3), - scheme_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + args=dict( + argstr="%s", + ), + bgmask=dict( + argstr="-bgmask %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + log=dict( + argstr="-log", + ), + normalize=dict( + argstr="-normalize", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + qball_mat=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=3, + ), + scheme_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), ) inputs = LinRecon.input_spec() @@ -22,7 +54,11 @@ def test_LinRecon_inputs(): def test_LinRecon_outputs(): - output_map = dict(recon_data=dict(extensions=None)) + output_map = dict( + recon_data=dict( + extensions=None, + ), + ) outputs = LinRecon.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_MESD.py b/nipype/interfaces/camino/tests/test_auto_MESD.py index be3edd23cc..189dd2e2d4 100644 --- a/nipype/interfaces/camino/tests/test_auto_MESD.py +++ b/nipype/interfaces/camino/tests/test_auto_MESD.py @@ -4,19 +4,56 @@ def test_MESD_inputs(): input_map = dict( - args=dict(argstr="%s"), - bgmask=dict(argstr="-bgmask %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - fastmesd=dict(argstr="-fastmesd", requires=["mepointset"]), + args=dict( + argstr="%s", + ), + bgmask=dict( + argstr="-bgmask %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fastmesd=dict( + argstr="-fastmesd", + requires=["mepointset"], + ), in_file=dict( - argstr="-inputfile %s", extensions=None, mandatory=True, position=1 - ), - inputdatatype=dict(argstr="-inputdatatype %s"), - inverter=dict(argstr="-filter %s", mandatory=True, position=2), - inverter_param=dict(argstr="%f", mandatory=True, position=3, units="NA"), - mepointset=dict(argstr="-mepointset %d", units="NA"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True), + argstr="-inputfile %s", + extensions=None, + mandatory=True, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inverter=dict( + argstr="-filter %s", + mandatory=True, + position=2, + ), + inverter_param=dict( + argstr="%f", + mandatory=True, + position=3, + units="NA", + ), + mepointset=dict( + argstr="-mepointset %d", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + scheme_file=dict( + argstr="-schemefile %s", + extensions=None, + mandatory=True, + ), ) inputs = MESD.input_spec() @@ -26,7 +63,11 @@ def test_MESD_inputs(): def test_MESD_outputs(): - output_map = dict(mesd_data=dict(extensions=None)) + output_map = dict( + mesd_data=dict( + extensions=None, + ), + ) outputs = MESD.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ModelFit.py b/nipype/interfaces/camino/tests/test_auto_ModelFit.py index 4d445a03b8..82bd1a8400 100644 --- a/nipype/interfaces/camino/tests/test_auto_ModelFit.py +++ b/nipype/interfaces/camino/tests/test_auto_ModelFit.py @@ -4,24 +4,74 @@ def test_ModelFit_inputs(): input_map = dict( - args=dict(argstr="%s"), - bgmask=dict(argstr="-bgmask %s", extensions=None), - bgthresh=dict(argstr="-bgthresh %G"), - cfthresh=dict(argstr="-csfthresh %G"), - environ=dict(nohash=True, usedefault=True), - fixedbvalue=dict(argstr="-fixedbvalue %s"), - fixedmodq=dict(argstr="-fixedmod %s"), - in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True), - inputdatatype=dict(argstr="-inputdatatype %s"), - model=dict(argstr="-model %s", mandatory=True), - noisemap=dict(argstr="-noisemap %s", extensions=None), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - outlier=dict(argstr="-outliermap %s", extensions=None), - outputfile=dict(argstr="-outputfile %s", extensions=None), - residualmap=dict(argstr="-residualmap %s", extensions=None), - scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True), - sigma=dict(argstr="-sigma %G"), - tau=dict(argstr="-tau %G"), + args=dict( + argstr="%s", + ), + bgmask=dict( + argstr="-bgmask %s", + extensions=None, + ), + bgthresh=dict( + argstr="-bgthresh %G", + ), + cfthresh=dict( + argstr="-csfthresh %G", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedbvalue=dict( + argstr="-fixedbvalue %s", + ), + fixedmodq=dict( + argstr="-fixedmod %s", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + mandatory=True, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + model=dict( + argstr="-model %s", + mandatory=True, + ), + noisemap=dict( + argstr="-noisemap %s", + extensions=None, + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + outlier=dict( + argstr="-outliermap %s", + extensions=None, + ), + outputfile=dict( + argstr="-outputfile %s", + extensions=None, + ), + residualmap=dict( + argstr="-residualmap %s", + extensions=None, + ), + scheme_file=dict( + argstr="-schemefile %s", + extensions=None, + mandatory=True, + ), + sigma=dict( + argstr="-sigma %G", + ), + tau=dict( + argstr="-tau %G", + ), ) inputs = ModelFit.input_spec() @@ -31,7 +81,11 @@ def test_ModelFit_inputs(): def test_ModelFit_outputs(): - output_map = dict(fitted_data=dict(extensions=None)) + output_map = dict( + fitted_data=dict( + extensions=None, + ), + ) outputs = ModelFit.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py index 35efc7c84c..82b4276a0f 100644 --- a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py +++ b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py @@ -4,18 +4,46 @@ def test_NIfTIDT2Camino_inputs(): input_map = dict( - args=dict(argstr="%s"), - bgmask=dict(argstr="-bgmask %s", extensions=None), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + bgmask=dict( + argstr="-bgmask %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="-inputfile %s", extensions=None, mandatory=True, position=1 - ), - lns0_file=dict(argstr="-lns0 %s", extensions=None), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - s0_file=dict(argstr="-s0 %s", extensions=None), - scaleinter=dict(argstr="-scaleinter %s"), - scaleslope=dict(argstr="-scaleslope %s"), - uppertriangular=dict(argstr="-uppertriangular %s"), + argstr="-inputfile %s", + extensions=None, + mandatory=True, + position=1, + ), + lns0_file=dict( + argstr="-lns0 %s", + extensions=None, + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + s0_file=dict( + argstr="-s0 %s", + extensions=None, + ), + scaleinter=dict( + argstr="-scaleinter %s", + ), + scaleslope=dict( + argstr="-scaleslope %s", + ), + uppertriangular=dict( + argstr="-uppertriangular %s", + ), ) inputs = NIfTIDT2Camino.input_spec() @@ -25,7 +53,11 @@ def test_NIfTIDT2Camino_inputs(): def test_NIfTIDT2Camino_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = NIfTIDT2Camino.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py index f2c29a01b7..09f3a93cac 100644 --- a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py +++ b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py @@ -4,16 +4,50 @@ def test_PicoPDFs_inputs(): input_map = dict( - args=dict(argstr="%s"), - directmap=dict(argstr="-directmap"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=1), - inputmodel=dict(argstr="-inputmodel %s", position=2, usedefault=True), - luts=dict(argstr="-luts %s", mandatory=True), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), - numpds=dict(argstr="-numpds %d", units="NA"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - pdf=dict(argstr="-pdf %s", position=4, usedefault=True), + args=dict( + argstr="%s", + ), + directmap=dict( + argstr="-directmap", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=1, + ), + inputmodel=dict( + argstr="-inputmodel %s", + position=2, + usedefault=True, + ), + luts=dict( + argstr="-luts %s", + mandatory=True, + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + pdf=dict( + argstr="-pdf %s", + position=4, + usedefault=True, + ), ) inputs = PicoPDFs.input_spec() @@ -23,7 +57,11 @@ def test_PicoPDFs_inputs(): def test_PicoPDFs_outputs(): - output_map = dict(pdfs=dict(extensions=None)) + output_map = dict( + pdfs=dict( + extensions=None, + ), + ) outputs = PicoPDFs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py index 03fe10f87b..b1b9fda588 100644 --- a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py @@ -4,44 +4,138 @@ def test_ProcStreamlines_inputs(): input_map = dict( - allowmultitargets=dict(argstr="-allowmultitargets"), - args=dict(argstr="%s"), - datadims=dict(argstr="-datadims %s", units="voxels"), - directional=dict(argstr="-directional %s", units="NA"), - discardloops=dict(argstr="-discardloops"), - endpointfile=dict(argstr="-endpointfile %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - exclusionfile=dict(argstr="-exclusionfile %s", extensions=None), - gzip=dict(argstr="-gzip"), + allowmultitargets=dict( + argstr="-allowmultitargets", + ), + args=dict( + argstr="%s", + ), + datadims=dict( + argstr="-datadims %s", + units="voxels", + ), + directional=dict( + argstr="-directional %s", + units="NA", + ), + discardloops=dict( + argstr="-discardloops", + ), + endpointfile=dict( + argstr="-endpointfile %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + exclusionfile=dict( + argstr="-exclusionfile %s", + extensions=None, + ), + gzip=dict( + argstr="-gzip", + ), in_file=dict( - argstr="-inputfile %s", extensions=None, mandatory=True, position=1 - ), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True), - iterations=dict(argstr="-iterations %d", units="NA"), - maxtractlength=dict(argstr="-maxtractlength %d", units="mm"), - maxtractpoints=dict(argstr="-maxtractpoints %d", units="NA"), - mintractlength=dict(argstr="-mintractlength %d", units="mm"), - mintractpoints=dict(argstr="-mintractpoints %d", units="NA"), - noresample=dict(argstr="-noresample"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - outputacm=dict(argstr="-outputacm", requires=["outputroot", "seedfile"]), + argstr="-inputfile %s", + extensions=None, + mandatory=True, + position=1, + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + iterations=dict( + argstr="-iterations %d", + units="NA", + ), + maxtractlength=dict( + argstr="-maxtractlength %d", + units="mm", + ), + maxtractpoints=dict( + argstr="-maxtractpoints %d", + units="NA", + ), + mintractlength=dict( + argstr="-mintractlength %d", + units="mm", + ), + mintractpoints=dict( + argstr="-mintractpoints %d", + units="NA", + ), + noresample=dict( + argstr="-noresample", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + outputacm=dict( + argstr="-outputacm", + requires=["outputroot", "seedfile"], + ), outputcbs=dict( - argstr="-outputcbs", requires=["outputroot", "targetfile", "seedfile"] - ), - outputcp=dict(argstr="-outputcp", requires=["outputroot", "seedfile"]), - outputroot=dict(argstr="-outputroot %s", extensions=None), - outputsc=dict(argstr="-outputsc", requires=["outputroot", "seedfile"]), - outputtracts=dict(argstr="-outputtracts"), - regionindex=dict(argstr="-regionindex %d", units="mm"), - resamplestepsize=dict(argstr="-resamplestepsize %d", units="NA"), - seedfile=dict(argstr="-seedfile %s", extensions=None), - seedpointmm=dict(argstr="-seedpointmm %s", units="mm"), - seedpointvox=dict(argstr="-seedpointvox %s", units="voxels"), - targetfile=dict(argstr="-targetfile %s", extensions=None), - truncateinexclusion=dict(argstr="-truncateinexclusion"), - truncateloops=dict(argstr="-truncateloops"), - voxeldims=dict(argstr="-voxeldims %s", units="mm"), - waypointfile=dict(argstr="-waypointfile %s", extensions=None), + argstr="-outputcbs", + requires=["outputroot", "targetfile", "seedfile"], + ), + outputcp=dict( + argstr="-outputcp", + requires=["outputroot", "seedfile"], + ), + outputroot=dict( + argstr="-outputroot %s", + extensions=None, + ), + outputsc=dict( + argstr="-outputsc", + requires=["outputroot", "seedfile"], + ), + outputtracts=dict( + argstr="-outputtracts", + ), + regionindex=dict( + argstr="-regionindex %d", + units="mm", + ), + resamplestepsize=dict( + argstr="-resamplestepsize %d", + units="NA", + ), + seedfile=dict( + argstr="-seedfile %s", + extensions=None, + ), + seedpointmm=dict( + argstr="-seedpointmm %s", + units="mm", + ), + seedpointvox=dict( + argstr="-seedpointvox %s", + units="voxels", + ), + targetfile=dict( + argstr="-targetfile %s", + extensions=None, + ), + truncateinexclusion=dict( + argstr="-truncateinexclusion", + ), + truncateloops=dict( + argstr="-truncateloops", + ), + voxeldims=dict( + argstr="-voxeldims %s", + units="mm", + ), + waypointfile=dict( + argstr="-waypointfile %s", + extensions=None, + ), ) inputs = ProcStreamlines.input_spec() @@ -51,7 +145,12 @@ def test_ProcStreamlines_inputs(): def test_ProcStreamlines_outputs(): - output_map = dict(outputroot_files=dict(), proc=dict(extensions=None)) + output_map = dict( + outputroot_files=dict(), + proc=dict( + extensions=None, + ), + ) outputs = ProcStreamlines.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_QBallMX.py b/nipype/interfaces/camino/tests/test_auto_QBallMX.py index f55702e52b..30fda3a483 100644 --- a/nipype/interfaces/camino/tests/test_auto_QBallMX.py +++ b/nipype/interfaces/camino/tests/test_auto_QBallMX.py @@ -4,15 +4,44 @@ def test_QBallMX_inputs(): input_map = dict( - args=dict(argstr="%s"), - basistype=dict(argstr="-basistype %s", usedefault=True), - environ=dict(nohash=True, usedefault=True), - order=dict(argstr="-order %d", units="NA"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - rbfpointset=dict(argstr="-rbfpointset %d", units="NA"), - rbfsigma=dict(argstr="-rbfsigma %f", units="NA"), - scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True), - smoothingsigma=dict(argstr="-smoothingsigma %f", units="NA"), + args=dict( + argstr="%s", + ), + basistype=dict( + argstr="-basistype %s", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + order=dict( + argstr="-order %d", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + rbfpointset=dict( + argstr="-rbfpointset %d", + units="NA", + ), + rbfsigma=dict( + argstr="-rbfsigma %f", + units="NA", + ), + scheme_file=dict( + argstr="-schemefile %s", + extensions=None, + mandatory=True, + ), + smoothingsigma=dict( + argstr="-smoothingsigma %f", + units="NA", + ), ) inputs = QBallMX.input_spec() @@ -22,7 +51,11 @@ def test_QBallMX_inputs(): def test_QBallMX_outputs(): - output_map = dict(qmat=dict(extensions=None)) + output_map = dict( + qmat=dict( + extensions=None, + ), + ) outputs = QBallMX.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py index 33d420dc21..fc58b2f2e9 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py @@ -4,17 +4,52 @@ def test_SFLUTGen_inputs(): input_map = dict( - args=dict(argstr="%s"), - binincsize=dict(argstr="-binincsize %d", units="NA"), - directmap=dict(argstr="-directmap"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True), - info_file=dict(argstr="-infofile %s", extensions=None, mandatory=True), - minvectsperbin=dict(argstr="-minvectsperbin %d", units="NA"), - order=dict(argstr="-order %d", units="NA"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - outputstem=dict(argstr="-outputstem %s", usedefault=True), - pdf=dict(argstr="-pdf %s", usedefault=True), + args=dict( + argstr="%s", + ), + binincsize=dict( + argstr="-binincsize %d", + units="NA", + ), + directmap=dict( + argstr="-directmap", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + mandatory=True, + ), + info_file=dict( + argstr="-infofile %s", + extensions=None, + mandatory=True, + ), + minvectsperbin=dict( + argstr="-minvectsperbin %d", + units="NA", + ), + order=dict( + argstr="-order %d", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + outputstem=dict( + argstr="-outputstem %s", + usedefault=True, + ), + pdf=dict( + argstr="-pdf %s", + usedefault=True, + ), ) inputs = SFLUTGen.input_spec() @@ -25,7 +60,12 @@ def test_SFLUTGen_inputs(): def test_SFLUTGen_outputs(): output_map = dict( - lut_one_fibre=dict(extensions=None), lut_two_fibres=dict(extensions=None) + lut_one_fibre=dict( + extensions=None, + ), + lut_two_fibres=dict( + extensions=None, + ), ) outputs = SFLUTGen.output_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py index aa4f480d96..5c20399cbc 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py @@ -4,8 +4,13 @@ def test_SFPICOCalibData_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), info_file=dict( argstr="-infooutputfile %s", extensions=None, @@ -13,19 +18,61 @@ def test_SFPICOCalibData_inputs(): hash_files=False, mandatory=True, ), - onedtfarange=dict(argstr="-onedtfarange %s", units="NA"), - onedtfastep=dict(argstr="-onedtfastep %f", units="NA"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True), - seed=dict(argstr="-seed %f", units="NA"), - snr=dict(argstr="-snr %f", units="NA"), - trace=dict(argstr="-trace %f", units="NA"), - twodtanglerange=dict(argstr="-twodtanglerange %s", units="NA"), - twodtanglestep=dict(argstr="-twodtanglestep %f", units="NA"), - twodtfarange=dict(argstr="-twodtfarange %s", units="NA"), - twodtfastep=dict(argstr="-twodtfastep %f", units="NA"), - twodtmixmax=dict(argstr="-twodtmixmax %f", units="NA"), - twodtmixstep=dict(argstr="-twodtmixstep %f", units="NA"), + onedtfarange=dict( + argstr="-onedtfarange %s", + units="NA", + ), + onedtfastep=dict( + argstr="-onedtfastep %f", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + scheme_file=dict( + argstr="-schemefile %s", + extensions=None, + mandatory=True, + ), + seed=dict( + argstr="-seed %f", + units="NA", + ), + snr=dict( + argstr="-snr %f", + units="NA", + ), + trace=dict( + argstr="-trace %f", + units="NA", + ), + twodtanglerange=dict( + argstr="-twodtanglerange %s", + units="NA", + ), + twodtanglestep=dict( + argstr="-twodtanglestep %f", + units="NA", + ), + twodtfarange=dict( + argstr="-twodtfarange %s", + units="NA", + ), + twodtfastep=dict( + argstr="-twodtfastep %f", + units="NA", + ), + twodtmixmax=dict( + argstr="-twodtmixmax %f", + units="NA", + ), + twodtmixstep=dict( + argstr="-twodtmixstep %f", + units="NA", + ), ) inputs = SFPICOCalibData.input_spec() @@ -35,7 +82,14 @@ def test_SFPICOCalibData_inputs(): def test_SFPICOCalibData_outputs(): - output_map = dict(PICOCalib=dict(extensions=None), calib_info=dict(extensions=None)) + output_map = dict( + PICOCalib=dict( + extensions=None, + ), + calib_info=dict( + extensions=None, + ), + ) outputs = SFPICOCalibData.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py index 117a3bfa09..775a9061e6 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py @@ -4,22 +4,71 @@ def test_SFPeaks_inputs(): input_map = dict( - args=dict(argstr="%s"), - density=dict(argstr="-density %d", units="NA"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-inputfile %s", extensions=None, mandatory=True), - inputmodel=dict(argstr="-inputmodel %s", mandatory=True), - mepointset=dict(argstr="-mepointset %d", units="NA"), - noconsistencycheck=dict(argstr="-noconsistencycheck"), - numpds=dict(argstr="-numpds %d", units="NA"), - order=dict(argstr="-order %d", units="NA"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - pdthresh=dict(argstr="-pdthresh %f", units="NA"), - pointset=dict(argstr="-pointset %d", units="NA"), - rbfpointset=dict(argstr="-rbfpointset %d", units="NA"), - scheme_file=dict(argstr="%s", extensions=None), - searchradius=dict(argstr="-searchradius %f", units="NA"), - stdsfrommean=dict(argstr="-stdsfrommean %f", units="NA"), + args=dict( + argstr="%s", + ), + density=dict( + argstr="-density %d", + units="NA", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + mandatory=True, + ), + inputmodel=dict( + argstr="-inputmodel %s", + mandatory=True, + ), + mepointset=dict( + argstr="-mepointset %d", + units="NA", + ), + noconsistencycheck=dict( + argstr="-noconsistencycheck", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), + order=dict( + argstr="-order %d", + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + pdthresh=dict( + argstr="-pdthresh %f", + units="NA", + ), + pointset=dict( + argstr="-pointset %d", + units="NA", + ), + rbfpointset=dict( + argstr="-rbfpointset %d", + units="NA", + ), + scheme_file=dict( + argstr="%s", + extensions=None, + ), + searchradius=dict( + argstr="-searchradius %f", + units="NA", + ), + stdsfrommean=dict( + argstr="-stdsfrommean %f", + units="NA", + ), ) inputs = SFPeaks.input_spec() @@ -29,7 +78,11 @@ def test_SFPeaks_inputs(): def test_SFPeaks_outputs(): - output_map = dict(peaks=dict(extensions=None)) + output_map = dict( + peaks=dict( + extensions=None, + ), + ) outputs = SFPeaks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Shredder.py b/nipype/interfaces/camino/tests/test_auto_Shredder.py index 3e55b5a39f..bf0f9dc9dc 100644 --- a/nipype/interfaces/camino/tests/test_auto_Shredder.py +++ b/nipype/interfaces/camino/tests/test_auto_Shredder.py @@ -4,13 +4,40 @@ def test_Shredder_inputs(): input_map = dict( - args=dict(argstr="%s"), - chunksize=dict(argstr="%d", position=2, units="NA"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=-2), - offset=dict(argstr="%d", position=1, units="NA"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - space=dict(argstr="%d", position=3, units="NA"), + args=dict( + argstr="%s", + ), + chunksize=dict( + argstr="%d", + position=2, + units="NA", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=-2, + ), + offset=dict( + argstr="%d", + position=1, + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + space=dict( + argstr="%d", + position=3, + units="NA", + ), ) inputs = Shredder.input_spec() @@ -20,7 +47,11 @@ def test_Shredder_inputs(): def test_Shredder_outputs(): - output_map = dict(shredded=dict(extensions=None)) + output_map = dict( + shredded=dict( + extensions=None, + ), + ) outputs = Shredder.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_Track.py b/nipype/interfaces/camino/tests/test_auto_Track.py index 5fba585144..697b2d5275 100644 --- a/nipype/interfaces/camino/tests/test_auto_Track.py +++ b/nipype/interfaces/camino/tests/test_auto_Track.py @@ -4,30 +4,91 @@ def test_Track_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None), - anisthresh=dict(argstr="-anisthresh %f"), - args=dict(argstr="%s"), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), - curvethresh=dict(argstr="-curvethresh %f"), - data_dims=dict(argstr="-datadims %s", units="voxels"), - environ=dict(nohash=True, usedefault=True), - gzip=dict(argstr="-gzip"), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1), - inputdatatype=dict(argstr="-inputdatatype %s"), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True), - interpolator=dict(argstr="-interpolator %s"), - ipthresh=dict(argstr="-ipthresh %f"), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), - numpds=dict(argstr="-numpds %d", units="NA"), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1 - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), - outputtracts=dict(argstr="-outputtracts %s"), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), - tracker=dict(argstr="-tracker %s", usedefault=True), - voxel_dims=dict(argstr="-voxeldims %s", units="mm"), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = Track.input_spec() @@ -37,7 +98,11 @@ def test_Track_inputs(): def test_Track_outputs(): - output_map = dict(tracked=dict(extensions=None)) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = Track.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py index 4e7379dbfc..a117d5d782 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py @@ -4,30 +4,91 @@ def test_TrackBallStick_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None), - anisthresh=dict(argstr="-anisthresh %f"), - args=dict(argstr="%s"), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), - curvethresh=dict(argstr="-curvethresh %f"), - data_dims=dict(argstr="-datadims %s", units="voxels"), - environ=dict(nohash=True, usedefault=True), - gzip=dict(argstr="-gzip"), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1), - inputdatatype=dict(argstr="-inputdatatype %s"), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True), - interpolator=dict(argstr="-interpolator %s"), - ipthresh=dict(argstr="-ipthresh %f"), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), - numpds=dict(argstr="-numpds %d", units="NA"), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1 - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), - outputtracts=dict(argstr="-outputtracts %s"), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), - tracker=dict(argstr="-tracker %s", usedefault=True), - voxel_dims=dict(argstr="-voxeldims %s", units="mm"), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = TrackBallStick.input_spec() @@ -37,7 +98,11 @@ def test_TrackBallStick_inputs(): def test_TrackBallStick_outputs(): - output_map = dict(tracked=dict(extensions=None)) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = TrackBallStick.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py index dd78dcb8fc..56ca8ece97 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py @@ -4,39 +4,122 @@ def test_TrackBayesDirac_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None), - anisthresh=dict(argstr="-anisthresh %f"), - args=dict(argstr="%s"), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), - curvepriorg=dict(argstr="-curvepriorg %G"), - curvepriork=dict(argstr="-curvepriork %G"), - curvethresh=dict(argstr="-curvethresh %f"), - data_dims=dict(argstr="-datadims %s", units="voxels"), - datamodel=dict(argstr="-datamodel %s"), - environ=dict(nohash=True, usedefault=True), - extpriordatatype=dict(argstr="-extpriordatatype %s"), - extpriorfile=dict(argstr="-extpriorfile %s", extensions=None), - gzip=dict(argstr="-gzip"), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1), - inputdatatype=dict(argstr="-inputdatatype %s"), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True), - interpolator=dict(argstr="-interpolator %s"), - ipthresh=dict(argstr="-ipthresh %f"), - iterations=dict(argstr="-iterations %d", units="NA"), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), - numpds=dict(argstr="-numpds %d", units="NA"), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvepriorg=dict( + argstr="-curvepriorg %G", + ), + curvepriork=dict( + argstr="-curvepriork %G", + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + datamodel=dict( + argstr="-datamodel %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + extpriordatatype=dict( + argstr="-extpriordatatype %s", + ), + extpriorfile=dict( + argstr="-extpriorfile %s", + extensions=None, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + iterations=dict( + argstr="-iterations %d", + units="NA", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1 - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), - outputtracts=dict(argstr="-outputtracts %s"), - pdf=dict(argstr="-pdf %s"), - pointset=dict(argstr="-pointset %s"), - scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), - tracker=dict(argstr="-tracker %s", usedefault=True), - voxel_dims=dict(argstr="-voxeldims %s", units="mm"), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + pdf=dict( + argstr="-pdf %s", + ), + pointset=dict( + argstr="-pointset %s", + ), + scheme_file=dict( + argstr="-schemefile %s", + extensions=None, + mandatory=True, + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = TrackBayesDirac.input_spec() @@ -46,7 +129,11 @@ def test_TrackBayesDirac_inputs(): def test_TrackBayesDirac_outputs(): - output_map = dict(tracked=dict(extensions=None)) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = TrackBayesDirac.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py index 21faf179cc..dbd8f89478 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py @@ -4,32 +4,99 @@ def test_TrackBedpostxDeter_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None), - anisthresh=dict(argstr="-anisthresh %f"), - args=dict(argstr="%s"), - bedpostxdir=dict(argstr="-bedpostxdir %s", mandatory=True), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), - curvethresh=dict(argstr="-curvethresh %f"), - data_dims=dict(argstr="-datadims %s", units="voxels"), - environ=dict(nohash=True, usedefault=True), - gzip=dict(argstr="-gzip"), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1), - inputdatatype=dict(argstr="-inputdatatype %s"), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True), - interpolator=dict(argstr="-interpolator %s"), - ipthresh=dict(argstr="-ipthresh %f"), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), - min_vol_frac=dict(argstr="-bedpostxminf %d", units="NA"), - numpds=dict(argstr="-numpds %d", units="NA"), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + bedpostxdir=dict( + argstr="-bedpostxdir %s", + mandatory=True, + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + min_vol_frac=dict( + argstr="-bedpostxminf %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1 - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), - outputtracts=dict(argstr="-outputtracts %s"), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), - tracker=dict(argstr="-tracker %s", usedefault=True), - voxel_dims=dict(argstr="-voxeldims %s", units="mm"), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = TrackBedpostxDeter.input_spec() @@ -39,7 +106,11 @@ def test_TrackBedpostxDeter_inputs(): def test_TrackBedpostxDeter_outputs(): - output_map = dict(tracked=dict(extensions=None)) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = TrackBedpostxDeter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py index be378e55ac..7d1baa0e43 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py @@ -4,33 +4,103 @@ def test_TrackBedpostxProba_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None), - anisthresh=dict(argstr="-anisthresh %f"), - args=dict(argstr="%s"), - bedpostxdir=dict(argstr="-bedpostxdir %s", mandatory=True), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), - curvethresh=dict(argstr="-curvethresh %f"), - data_dims=dict(argstr="-datadims %s", units="voxels"), - environ=dict(nohash=True, usedefault=True), - gzip=dict(argstr="-gzip"), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1), - inputdatatype=dict(argstr="-inputdatatype %s"), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True), - interpolator=dict(argstr="-interpolator %s"), - ipthresh=dict(argstr="-ipthresh %f"), - iterations=dict(argstr="-iterations %d", units="NA"), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), - min_vol_frac=dict(argstr="-bedpostxminf %d", units="NA"), - numpds=dict(argstr="-numpds %d", units="NA"), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + bedpostxdir=dict( + argstr="-bedpostxdir %s", + mandatory=True, + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + iterations=dict( + argstr="-iterations %d", + units="NA", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + min_vol_frac=dict( + argstr="-bedpostxminf %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1 - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), - outputtracts=dict(argstr="-outputtracts %s"), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), - tracker=dict(argstr="-tracker %s", usedefault=True), - voxel_dims=dict(argstr="-voxeldims %s", units="mm"), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = TrackBedpostxProba.input_spec() @@ -40,7 +110,11 @@ def test_TrackBedpostxProba_inputs(): def test_TrackBedpostxProba_outputs(): - output_map = dict(tracked=dict(extensions=None)) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = TrackBedpostxProba.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py index 6562af75c2..75cd2e3d11 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py @@ -4,35 +4,111 @@ def test_TrackBootstrap_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None), - anisthresh=dict(argstr="-anisthresh %f"), - args=dict(argstr="%s"), - bgmask=dict(argstr="-bgmask %s", extensions=None), - bsdatafiles=dict(argstr="-bsdatafile %s", mandatory=True), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), - curvethresh=dict(argstr="-curvethresh %f"), - data_dims=dict(argstr="-datadims %s", units="voxels"), - environ=dict(nohash=True, usedefault=True), - gzip=dict(argstr="-gzip"), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1), - inputdatatype=dict(argstr="-inputdatatype %s"), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True), - interpolator=dict(argstr="-interpolator %s"), - inversion=dict(argstr="-inversion %s"), - ipthresh=dict(argstr="-ipthresh %f"), - iterations=dict(argstr="-iterations %d", units="NA"), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), - numpds=dict(argstr="-numpds %d", units="NA"), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + bgmask=dict( + argstr="-bgmask %s", + extensions=None, + ), + bsdatafiles=dict( + argstr="-bsdatafile %s", + mandatory=True, + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + inversion=dict( + argstr="-inversion %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + iterations=dict( + argstr="-iterations %d", + units="NA", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1 - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), - outputtracts=dict(argstr="-outputtracts %s"), - scheme_file=dict(argstr="-schemefile %s", extensions=None, mandatory=True), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), - tracker=dict(argstr="-tracker %s", usedefault=True), - voxel_dims=dict(argstr="-voxeldims %s", units="mm"), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + scheme_file=dict( + argstr="-schemefile %s", + extensions=None, + mandatory=True, + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = TrackBootstrap.input_spec() @@ -42,7 +118,11 @@ def test_TrackBootstrap_inputs(): def test_TrackBootstrap_outputs(): - output_map = dict(tracked=dict(extensions=None)) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = TrackBootstrap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackDT.py b/nipype/interfaces/camino/tests/test_auto_TrackDT.py index 2a84aadbd9..c60ba7b5f5 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackDT.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackDT.py @@ -4,30 +4,91 @@ def test_TrackDT_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None), - anisthresh=dict(argstr="-anisthresh %f"), - args=dict(argstr="%s"), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), - curvethresh=dict(argstr="-curvethresh %f"), - data_dims=dict(argstr="-datadims %s", units="voxels"), - environ=dict(nohash=True, usedefault=True), - gzip=dict(argstr="-gzip"), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1), - inputdatatype=dict(argstr="-inputdatatype %s"), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True), - interpolator=dict(argstr="-interpolator %s"), - ipthresh=dict(argstr="-ipthresh %f"), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), - numpds=dict(argstr="-numpds %d", units="NA"), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1 - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), - outputtracts=dict(argstr="-outputtracts %s"), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), - tracker=dict(argstr="-tracker %s", usedefault=True), - voxel_dims=dict(argstr="-voxeldims %s", units="mm"), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = TrackDT.input_spec() @@ -37,7 +98,11 @@ def test_TrackDT_inputs(): def test_TrackDT_outputs(): - output_map = dict(tracked=dict(extensions=None)) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = TrackDT.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TrackPICo.py b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py index 6392df92c0..1d3647e151 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackPICo.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py @@ -4,32 +4,98 @@ def test_TrackPICo_inputs(): input_map = dict( - anisfile=dict(argstr="-anisfile %s", extensions=None), - anisthresh=dict(argstr="-anisthresh %f"), - args=dict(argstr="%s"), - curveinterval=dict(argstr="-curveinterval %f", requires=["curvethresh"]), - curvethresh=dict(argstr="-curvethresh %f"), - data_dims=dict(argstr="-datadims %s", units="voxels"), - environ=dict(nohash=True, usedefault=True), - gzip=dict(argstr="-gzip"), - in_file=dict(argstr="-inputfile %s", extensions=None, position=1), - inputdatatype=dict(argstr="-inputdatatype %s"), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True), - interpolator=dict(argstr="-interpolator %s"), - ipthresh=dict(argstr="-ipthresh %f"), - iterations=dict(argstr="-iterations %d", units="NA"), - maxcomponents=dict(argstr="-maxcomponents %d", units="NA"), - numpds=dict(argstr="-numpds %d", units="NA"), + anisfile=dict( + argstr="-anisfile %s", + extensions=None, + ), + anisthresh=dict( + argstr="-anisthresh %f", + ), + args=dict( + argstr="%s", + ), + curveinterval=dict( + argstr="-curveinterval %f", + requires=["curvethresh"], + ), + curvethresh=dict( + argstr="-curvethresh %f", + ), + data_dims=dict( + argstr="-datadims %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict( + argstr="-gzip", + ), + in_file=dict( + argstr="-inputfile %s", + extensions=None, + position=1, + ), + inputdatatype=dict( + argstr="-inputdatatype %s", + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolator=dict( + argstr="-interpolator %s", + ), + ipthresh=dict( + argstr="-ipthresh %f", + ), + iterations=dict( + argstr="-iterations %d", + units="NA", + ), + maxcomponents=dict( + argstr="-maxcomponents %d", + units="NA", + ), + numpds=dict( + argstr="-numpds %d", + units="NA", + ), out_file=dict( - argstr="-outputfile %s", extensions=None, genfile=True, position=-1 - ), - output_root=dict(argstr="-outputroot %s", extensions=None, position=-1), - outputtracts=dict(argstr="-outputtracts %s"), - pdf=dict(argstr="-pdf %s"), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=2), - stepsize=dict(argstr="-stepsize %f", requires=["tracker"]), - tracker=dict(argstr="-tracker %s", usedefault=True), - voxel_dims=dict(argstr="-voxeldims %s", units="mm"), + argstr="-outputfile %s", + extensions=None, + genfile=True, + position=-1, + ), + output_root=dict( + argstr="-outputroot %s", + extensions=None, + position=-1, + ), + outputtracts=dict( + argstr="-outputtracts %s", + ), + pdf=dict( + argstr="-pdf %s", + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=2, + ), + stepsize=dict( + argstr="-stepsize %f", + requires=["tracker"], + ), + tracker=dict( + argstr="-tracker %s", + usedefault=True, + ), + voxel_dims=dict( + argstr="-voxeldims %s", + units="mm", + ), ) inputs = TrackPICo.input_spec() @@ -39,7 +105,11 @@ def test_TrackPICo_inputs(): def test_TrackPICo_outputs(): - output_map = dict(tracked=dict(extensions=None)) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = TrackPICo.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_TractShredder.py b/nipype/interfaces/camino/tests/test_auto_TractShredder.py index 83146b1ca4..07678c5d64 100644 --- a/nipype/interfaces/camino/tests/test_auto_TractShredder.py +++ b/nipype/interfaces/camino/tests/test_auto_TractShredder.py @@ -4,13 +4,40 @@ def test_TractShredder_inputs(): input_map = dict( - args=dict(argstr="%s"), - bunchsize=dict(argstr="%d", position=2, units="NA"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="< %s", extensions=None, mandatory=True, position=-2), - offset=dict(argstr="%d", position=1, units="NA"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - space=dict(argstr="%d", position=3, units="NA"), + args=dict( + argstr="%s", + ), + bunchsize=dict( + argstr="%d", + position=2, + units="NA", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="< %s", + extensions=None, + mandatory=True, + position=-2, + ), + offset=dict( + argstr="%d", + position=1, + units="NA", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + space=dict( + argstr="%d", + position=3, + units="NA", + ), ) inputs = TractShredder.input_spec() @@ -20,7 +47,11 @@ def test_TractShredder_inputs(): def test_TractShredder_outputs(): - output_map = dict(shredded=dict(extensions=None)) + output_map = dict( + shredded=dict( + extensions=None, + ), + ) outputs = TractShredder.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py index d466743ba1..194f233cc1 100644 --- a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py @@ -4,18 +4,58 @@ def test_VtkStreamlines_inputs(): input_map = dict( - args=dict(argstr="%s"), - colourorient=dict(argstr="-colourorient"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr=" < %s", extensions=None, mandatory=True, position=-2), - inputmodel=dict(argstr="-inputmodel %s", usedefault=True), - interpolate=dict(argstr="-interpolate"), - interpolatescalars=dict(argstr="-interpolatescalars"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), - scalar_file=dict(argstr="-scalarfile %s", extensions=None, position=3), - seed_file=dict(argstr="-seedfile %s", extensions=None, position=1), - target_file=dict(argstr="-targetfile %s", extensions=None, position=2), - voxeldims=dict(argstr="-voxeldims %s", position=4, units="mm"), + args=dict( + argstr="%s", + ), + colourorient=dict( + argstr="-colourorient", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr=" < %s", + extensions=None, + mandatory=True, + position=-2, + ), + inputmodel=dict( + argstr="-inputmodel %s", + usedefault=True, + ), + interpolate=dict( + argstr="-interpolate", + ), + interpolatescalars=dict( + argstr="-interpolatescalars", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), + scalar_file=dict( + argstr="-scalarfile %s", + extensions=None, + position=3, + ), + seed_file=dict( + argstr="-seedfile %s", + extensions=None, + position=1, + ), + target_file=dict( + argstr="-targetfile %s", + extensions=None, + position=2, + ), + voxeldims=dict( + argstr="-voxeldims %s", + position=4, + units="mm", + ), ) inputs = VtkStreamlines.input_spec() @@ -25,7 +65,11 @@ def test_VtkStreamlines_inputs(): def test_VtkStreamlines_outputs(): - output_map = dict(vtk=dict(extensions=None)) + output_map = dict( + vtk=dict( + extensions=None, + ), + ) outputs = VtkStreamlines.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py index 1d4fe9d7ec..fb076c1107 100644 --- a/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py @@ -4,16 +4,52 @@ def test_Camino2Trackvis_inputs(): input_map = dict( - args=dict(argstr="%s"), - data_dims=dict(argstr="-d %s", mandatory=True, position=4, sep=","), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1), - min_length=dict(argstr="-l %d", position=3, units="mm"), - nifti_file=dict(argstr="--nifti %s", extensions=None, position=7), - out_file=dict(argstr="-o %s", extensions=None, genfile=True, position=2), - voxel_dims=dict(argstr="-x %s", mandatory=True, position=5, sep=","), + args=dict( + argstr="%s", + ), + data_dims=dict( + argstr="-d %s", + mandatory=True, + position=4, + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=1, + ), + min_length=dict( + argstr="-l %d", + position=3, + units="mm", + ), + nifti_file=dict( + argstr="--nifti %s", + extensions=None, + position=7, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + genfile=True, + position=2, + ), + voxel_dims=dict( + argstr="-x %s", + mandatory=True, + position=5, + sep=",", + ), voxel_order=dict( - argstr="--voxel-order %s", extensions=None, mandatory=True, position=6 + argstr="--voxel-order %s", + extensions=None, + mandatory=True, + position=6, ), ) inputs = Camino2Trackvis.input_spec() @@ -24,7 +60,11 @@ def test_Camino2Trackvis_inputs(): def test_Camino2Trackvis_outputs(): - output_map = dict(trackvis=dict(extensions=None)) + output_map = dict( + trackvis=dict( + extensions=None, + ), + ) outputs = Camino2Trackvis.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py index e06d93c713..ec7ed22d0c 100644 --- a/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py @@ -4,11 +4,30 @@ def test_Trackvis2Camino_inputs(): input_map = dict( - append_file=dict(argstr="-a %s", extensions=None, position=2), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1), - out_file=dict(argstr="-o %s", extensions=None, genfile=True, position=2), + append_file=dict( + argstr="-a %s", + extensions=None, + position=2, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=1, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + genfile=True, + position=2, + ), ) inputs = Trackvis2Camino.input_spec() @@ -18,7 +37,11 @@ def test_Trackvis2Camino_inputs(): def test_Trackvis2Camino_outputs(): - output_map = dict(camino=dict(extensions=None)) + output_map = dict( + camino=dict( + extensions=None, + ), + ) outputs = Trackvis2Camino.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cat12/tests/test_auto_CAT12SANLMDenoising.py b/nipype/interfaces/cat12/tests/test_auto_CAT12SANLMDenoising.py index 1ded9a16e7..43c0d5e4ea 100644 --- a/nipype/interfaces/cat12/tests/test_auto_CAT12SANLMDenoising.py +++ b/nipype/interfaces/cat12/tests/test_auto_CAT12SANLMDenoising.py @@ -4,20 +4,53 @@ def test_CAT12SANLMDenoising_inputs(): input_map = dict( - addnoise=dict(field="addnoise", usedefault=True), - filename_prefix=dict(field="prefix", usedefault=True), - filename_suffix=dict(field="suffix", usedefault=True), - in_files=dict(copyfile=False, field="data", mandatory=True), - intlim=dict(field="intlim", usedefault=True), + addnoise=dict( + field="addnoise", + usedefault=True, + ), + filename_prefix=dict( + field="prefix", + usedefault=True, + ), + filename_suffix=dict( + field="suffix", + usedefault=True, + ), + in_files=dict( + copyfile=False, + field="data", + mandatory=True, + ), + intlim=dict( + field="intlim", + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - noisecorr_strength=dict(field="nlmfilter.optimized.NCstr", usedefault=True), + mfile=dict( + usedefault=True, + ), + noisecorr_strength=dict( + field="nlmfilter.optimized.NCstr", + usedefault=True, + ), paths=dict(), - replace_nan_and_inf=dict(field="replaceNANandINF", usedefault=True), - rician=dict(field="rician", usedefault=True), - spm_type=dict(field="spm_type", usedefault=True), + replace_nan_and_inf=dict( + field="replaceNANandINF", + usedefault=True, + ), + rician=dict( + field="rician", + usedefault=True, + ), + spm_type=dict( + field="spm_type", + usedefault=True, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = CAT12SANLMDenoising.input_spec() @@ -27,7 +60,11 @@ def test_CAT12SANLMDenoising_inputs(): def test_CAT12SANLMDenoising_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = CAT12SANLMDenoising.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py b/nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py index 5ebbd6af82..979b3afa6b 100644 --- a/nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py +++ b/nipype/interfaces/cat12/tests/test_auto_CAT12Segment.py @@ -4,44 +4,132 @@ def test_CAT12Segment_inputs(): input_map = dict( - affine_preprocessing=dict(field="extopts.APP", usedefault=True), - affine_regularization=dict(field="opts.affreg", usedefault=True), - cobra=dict(field="output.ROImenu.atlases.hammers", usedefault=True), - csf_output_dartel=dict(field="output.CSF.dartel", usedefault=True), - csf_output_modulated=dict(field="output.CSF.mod", usedefault=True), - csf_output_native=dict(field="output.CSF.native", usedefault=True), - gm_output_dartel=dict(field="output.GM.dartel", usedefault=True), - gm_output_modulated=dict(field="output.GM.mod", usedefault=True), - gm_output_native=dict(field="output.GM.native", usedefault=True), - hammers=dict(field="output.ROImenu.atlases.cobra", usedefault=True), - ignore_errors=dict(field="extopts.ignoreErrors", usedefault=True), - in_files=dict(copyfile=False, field="data", mandatory=True), - initial_segmentation=dict(field="extopts.spm_kamap", usedefault=True), + affine_preprocessing=dict( + field="extopts.APP", + usedefault=True, + ), + affine_regularization=dict( + field="opts.affreg", + usedefault=True, + ), + cobra=dict( + field="output.ROImenu.atlases.hammers", + usedefault=True, + ), + csf_output_dartel=dict( + field="output.CSF.dartel", + usedefault=True, + ), + csf_output_modulated=dict( + field="output.CSF.mod", + usedefault=True, + ), + csf_output_native=dict( + field="output.CSF.native", + usedefault=True, + ), + gm_output_dartel=dict( + field="output.GM.dartel", + usedefault=True, + ), + gm_output_modulated=dict( + field="output.GM.mod", + usedefault=True, + ), + gm_output_native=dict( + field="output.GM.native", + usedefault=True, + ), + hammers=dict( + field="output.ROImenu.atlases.cobra", + usedefault=True, + ), + ignore_errors=dict( + field="extopts.ignoreErrors", + usedefault=True, + ), + in_files=dict( + copyfile=False, + field="data", + mandatory=True, + ), + initial_segmentation=dict( + field="extopts.spm_kamap", + usedefault=True, + ), internal_resampling_process=dict( - field="extopts.restypes.optimal", maxlen=2, minlen=2, usedefault=True - ), - jacobianwarped=dict(field="output.jacobianwarped", usedefault=True), - label_dartel=dict(field="output.label.dartel", usedefault=True), - label_native=dict(field="output.label.native", usedefault=True), - label_warped=dict(field="output.label.warped", usedefault=True), - las_dartel=dict(field="output.las.dartel", usedefault=True), - las_native=dict(field="output.las.native", usedefault=True), - las_warped=dict(field="output.las.warped", usedefault=True), - local_adaptive_seg=dict(field="extopts.LASstr", usedefault=True), - lpba40=dict(field="output.ROImenu.atlases.lpba40", usedefault=True), + field="extopts.restypes.optimal", + maxlen=2, + minlen=2, + usedefault=True, + ), + jacobianwarped=dict( + field="output.jacobianwarped", + usedefault=True, + ), + label_dartel=dict( + field="output.label.dartel", + usedefault=True, + ), + label_native=dict( + field="output.label.native", + usedefault=True, + ), + label_warped=dict( + field="output.label.warped", + usedefault=True, + ), + las_dartel=dict( + field="output.las.dartel", + usedefault=True, + ), + las_native=dict( + field="output.las.native", + usedefault=True, + ), + las_warped=dict( + field="output.las.warped", + usedefault=True, + ), + local_adaptive_seg=dict( + field="extopts.LASstr", + usedefault=True, + ), + lpba40=dict( + field="output.ROImenu.atlases.lpba40", + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - n_jobs=dict(field="nproc", mandatory=True, usedefault=True), + mfile=dict( + usedefault=True, + ), + n_jobs=dict( + field="nproc", + mandatory=True, + usedefault=True, + ), neuromorphometrics=dict( - field="output.ROImenu.atlases.neuromorphometrics", usedefault=True + field="output.ROImenu.atlases.neuromorphometrics", + usedefault=True, + ), + output_labelnative=dict( + field="output.labelnative", + usedefault=True, ), - output_labelnative=dict(field="output.labelnative", usedefault=True), own_atlas=dict( - copyfile=False, field="output.ROImenu.atlases.ownatlas", mandatory=False + copyfile=False, + field="output.ROImenu.atlases.ownatlas", + mandatory=False, ), paths=dict(), - power_spm_inhomogeneity_correction=dict(field="opts.biasacc", usedefault=True), - save_bias_corrected=dict(field="output.bias.warped", usedefault=True), + power_spm_inhomogeneity_correction=dict( + field="opts.biasacc", + usedefault=True, + ), + save_bias_corrected=dict( + field="output.bias.warped", + usedefault=True, + ), shooting_tpm=dict( copyfile=False, extensions=[".hdr", ".img", ".img.gz", ".nii"], @@ -68,18 +156,54 @@ def test_CAT12Segment_inputs(): extensions=[".hdr", ".img", ".img.gz", ".nii"], mandatory=False, ), - skull_strip=dict(field="extopts.gcutstr", usedefault=True), - surface_and_thickness_estimation=dict(field="surface", usedefault=True), - surface_measures=dict(field="output.surf_measures", usedefault=True), - tpm=dict(copyfile=False, field="tpm", mandatory=False), + skull_strip=dict( + field="extopts.gcutstr", + usedefault=True, + ), + surface_and_thickness_estimation=dict( + field="surface", + usedefault=True, + ), + surface_measures=dict( + field="output.surf_measures", + usedefault=True, + ), + tpm=dict( + copyfile=False, + field="tpm", + mandatory=False, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - voxel_size=dict(field="extopts.vox", usedefault=True), - warps=dict(field="output.warps", maxlen=2, minlen=2, usedefault=True), - wm_hyper_intensity_correction=dict(field="extopts.WMHC", usedefault=True), - wm_output_dartel=dict(field="output.WM.dartel", usedefault=True), - wm_output_modulated=dict(field="output.WM.mod", usedefault=True), - wm_output_native=dict(field="output.WM.native", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + voxel_size=dict( + field="extopts.vox", + usedefault=True, + ), + warps=dict( + field="output.warps", + maxlen=2, + minlen=2, + usedefault=True, + ), + wm_hyper_intensity_correction=dict( + field="extopts.WMHC", + usedefault=True, + ), + wm_output_dartel=dict( + field="output.WM.dartel", + usedefault=True, + ), + wm_output_modulated=dict( + field="output.WM.mod", + usedefault=True, + ), + wm_output_native=dict( + field="output.WM.native", + usedefault=True, + ), ) inputs = CAT12Segment.input_spec() @@ -90,27 +214,61 @@ def test_CAT12Segment_inputs(): def test_CAT12Segment_outputs(): output_map = dict( - bias_corrected_image=dict(extensions=None), - csf_dartel_image=dict(extensions=None), - csf_modulated_image=dict(extensions=None), - csf_native_image=dict(extensions=None), - gm_dartel_image=dict(extensions=None), - gm_modulated_image=dict(extensions=None), - gm_native_image=dict(extensions=None), + bias_corrected_image=dict( + extensions=None, + ), + csf_dartel_image=dict( + extensions=None, + ), + csf_modulated_image=dict( + extensions=None, + ), + csf_native_image=dict( + extensions=None, + ), + gm_dartel_image=dict( + extensions=None, + ), + gm_modulated_image=dict( + extensions=None, + ), + gm_native_image=dict( + extensions=None, + ), label_files=dict(), - label_roi=dict(extensions=None), - label_rois=dict(extensions=None), - lh_central_surface=dict(extensions=None), - lh_sphere_surface=dict(extensions=None), + label_roi=dict( + extensions=None, + ), + label_rois=dict( + extensions=None, + ), + lh_central_surface=dict( + extensions=None, + ), + lh_sphere_surface=dict( + extensions=None, + ), mri_images=dict(), - report=dict(extensions=None), + report=dict( + extensions=None, + ), report_files=dict(), - rh_central_surface=dict(extensions=None), - rh_sphere_surface=dict(extensions=None), + rh_central_surface=dict( + extensions=None, + ), + rh_sphere_surface=dict( + extensions=None, + ), surface_files=dict(), - wm_dartel_image=dict(extensions=None), - wm_modulated_image=dict(extensions=None), - wm_native_image=dict(extensions=None), + wm_dartel_image=dict( + extensions=None, + ), + wm_modulated_image=dict( + extensions=None, + ), + wm_native_image=dict( + extensions=None, + ), ) outputs = CAT12Segment.output_spec() diff --git a/nipype/interfaces/cat12/tests/test_auto_ExtractAdditionalSurfaceParameters.py b/nipype/interfaces/cat12/tests/test_auto_ExtractAdditionalSurfaceParameters.py index 2be548cbd8..cde7f2057e 100644 --- a/nipype/interfaces/cat12/tests/test_auto_ExtractAdditionalSurfaceParameters.py +++ b/nipype/interfaces/cat12/tests/test_auto_ExtractAdditionalSurfaceParameters.py @@ -4,18 +4,45 @@ def test_ExtractAdditionalSurfaceParameters_inputs(): input_map = dict( - area=dict(field="area", usedefault=True), - depth=dict(field="SD", usedefault=True), - fractal_dimension=dict(field="FD", usedefault=True), - gmv=dict(field="gmv", usedefault=True), - gyrification=dict(field="GI", usedefault=True), - left_central_surfaces=dict(copyfile=False, field="data_surf", mandatory=True), + area=dict( + field="area", + usedefault=True, + ), + depth=dict( + field="SD", + usedefault=True, + ), + fractal_dimension=dict( + field="FD", + usedefault=True, + ), + gmv=dict( + field="gmv", + usedefault=True, + ), + gyrification=dict( + field="GI", + usedefault=True, + ), + left_central_surfaces=dict( + copyfile=False, + field="data_surf", + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), + mfile=dict( + usedefault=True, + ), paths=dict(), - surface_files=dict(copyfile=False, mandatory=False), + surface_files=dict( + copyfile=False, + mandatory=False, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = ExtractAdditionalSurfaceParameters.input_spec() diff --git a/nipype/interfaces/cat12/tests/test_auto_ExtractROIBasedSurfaceMeasures.py b/nipype/interfaces/cat12/tests/test_auto_ExtractROIBasedSurfaceMeasures.py index 9c8d5f994a..ffc18324aa 100644 --- a/nipype/interfaces/cat12/tests/test_auto_ExtractROIBasedSurfaceMeasures.py +++ b/nipype/interfaces/cat12/tests/test_auto_ExtractROIBasedSurfaceMeasures.py @@ -4,16 +4,38 @@ def test_ExtractROIBasedSurfaceMeasures_inputs(): input_map = dict( - lh_roi_atlas=dict(copyfile=False, field="rdata", mandatory=True), - lh_surface_measure=dict(copyfile=False, field="cdata", mandatory=True), + lh_roi_atlas=dict( + copyfile=False, + field="rdata", + mandatory=True, + ), + lh_surface_measure=dict( + copyfile=False, + field="cdata", + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), + mfile=dict( + usedefault=True, + ), paths=dict(), - rh_roi_atlas=dict(copyfile=False, mandatory=False), - rh_surface_measure=dict(copyfile=False, mandatory=False), - surface_files=dict(copyfile=False, mandatory=False), + rh_roi_atlas=dict( + copyfile=False, + mandatory=False, + ), + rh_surface_measure=dict( + copyfile=False, + mandatory=False, + ), + surface_files=dict( + copyfile=False, + mandatory=False, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = ExtractROIBasedSurfaceMeasures.input_spec() @@ -23,7 +45,9 @@ def test_ExtractROIBasedSurfaceMeasures_inputs(): def test_ExtractROIBasedSurfaceMeasures_outputs(): - output_map = dict(label_files=dict()) + output_map = dict( + label_files=dict(), + ) outputs = ExtractROIBasedSurfaceMeasures.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py index 6f78f0c2cb..e115acad83 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py +++ b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py @@ -4,11 +4,21 @@ def test_AverageNetworks_inputs(): input_map = dict( - group_id=dict(usedefault=True), - in_files=dict(mandatory=True), - out_gexf_groupavg=dict(extensions=None), - out_gpickled_groupavg=dict(extensions=None), - resolution_network_file=dict(extensions=None), + group_id=dict( + usedefault=True, + ), + in_files=dict( + mandatory=True, + ), + out_gexf_groupavg=dict( + extensions=None, + ), + out_gpickled_groupavg=dict( + extensions=None, + ), + resolution_network_file=dict( + extensions=None, + ), ) inputs = AverageNetworks.input_spec() @@ -19,8 +29,12 @@ def test_AverageNetworks_inputs(): def test_AverageNetworks_outputs(): output_map = dict( - gexf_groupavg=dict(extensions=None), - gpickled_groupavg=dict(extensions=None), + gexf_groupavg=dict( + extensions=None, + ), + gpickled_groupavg=dict( + extensions=None, + ), matlab_groupavgs=dict(), ) outputs = AverageNetworks.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py index 1482d8adb1..ee7b0459ef 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py @@ -6,7 +6,9 @@ def test_CFFConverter_inputs(): input_map = dict( creator=dict(), data_files=dict(), - description=dict(usedefault=True), + description=dict( + usedefault=True, + ), email=dict(), gifti_labels=dict(), gifti_surfaces=dict(), @@ -14,13 +16,18 @@ def test_CFFConverter_inputs(): graphml_networks=dict(), license=dict(), nifti_volumes=dict(), - out_file=dict(extensions=None, usedefault=True), + out_file=dict( + extensions=None, + usedefault=True, + ), publisher=dict(), references=dict(), relation=dict(), rights=dict(), script_files=dict(), - species=dict(usedefault=True), + species=dict( + usedefault=True, + ), timeseries_files=dict(), title=dict(), tract_files=dict(), @@ -33,7 +40,11 @@ def test_CFFConverter_inputs(): def test_CFFConverter_outputs(): - output_map = dict(connectome_file=dict(extensions=None)) + output_map = dict( + connectome_file=dict( + extensions=None, + ), + ) outputs = CFFConverter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py b/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py index d426213f1d..a9466f91be 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py @@ -4,17 +4,49 @@ def test_CreateMatrix_inputs(): input_map = dict( - count_region_intersections=dict(usedefault=True), - out_endpoint_array_name=dict(extensions=None, genfile=True), - out_fiber_length_std_matrix_mat_file=dict(extensions=None, genfile=True), - out_intersection_matrix_mat_file=dict(extensions=None, genfile=True), - out_matrix_file=dict(extensions=None, genfile=True), - out_matrix_mat_file=dict(extensions=None, usedefault=True), - out_mean_fiber_length_matrix_mat_file=dict(extensions=None, genfile=True), - out_median_fiber_length_matrix_mat_file=dict(extensions=None, genfile=True), - resolution_network_file=dict(extensions=None, mandatory=True), - roi_file=dict(extensions=None, mandatory=True), - tract_file=dict(extensions=None, mandatory=True), + count_region_intersections=dict( + usedefault=True, + ), + out_endpoint_array_name=dict( + extensions=None, + genfile=True, + ), + out_fiber_length_std_matrix_mat_file=dict( + extensions=None, + genfile=True, + ), + out_intersection_matrix_mat_file=dict( + extensions=None, + genfile=True, + ), + out_matrix_file=dict( + extensions=None, + genfile=True, + ), + out_matrix_mat_file=dict( + extensions=None, + usedefault=True, + ), + out_mean_fiber_length_matrix_mat_file=dict( + extensions=None, + genfile=True, + ), + out_median_fiber_length_matrix_mat_file=dict( + extensions=None, + genfile=True, + ), + resolution_network_file=dict( + extensions=None, + mandatory=True, + ), + roi_file=dict( + extensions=None, + mandatory=True, + ), + tract_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CreateMatrix.input_spec() @@ -25,24 +57,54 @@ def test_CreateMatrix_inputs(): def test_CreateMatrix_outputs(): output_map = dict( - endpoint_file=dict(extensions=None), - endpoint_file_mm=dict(extensions=None), - fiber_label_file=dict(extensions=None), - fiber_labels_noorphans=dict(extensions=None), - fiber_length_file=dict(extensions=None), - fiber_length_std_matrix_mat_file=dict(extensions=None), + endpoint_file=dict( + extensions=None, + ), + endpoint_file_mm=dict( + extensions=None, + ), + fiber_label_file=dict( + extensions=None, + ), + fiber_labels_noorphans=dict( + extensions=None, + ), + fiber_length_file=dict( + extensions=None, + ), + fiber_length_std_matrix_mat_file=dict( + extensions=None, + ), filtered_tractographies=dict(), - filtered_tractography=dict(extensions=None), - filtered_tractography_by_intersections=dict(extensions=None), - intersection_matrix_file=dict(extensions=None), - intersection_matrix_mat_file=dict(extensions=None), + filtered_tractography=dict( + extensions=None, + ), + filtered_tractography_by_intersections=dict( + extensions=None, + ), + intersection_matrix_file=dict( + extensions=None, + ), + intersection_matrix_mat_file=dict( + extensions=None, + ), matlab_matrix_files=dict(), - matrix_file=dict(extensions=None), + matrix_file=dict( + extensions=None, + ), matrix_files=dict(), - matrix_mat_file=dict(extensions=None), - mean_fiber_length_matrix_mat_file=dict(extensions=None), - median_fiber_length_matrix_mat_file=dict(extensions=None), - stats_file=dict(extensions=None), + matrix_mat_file=dict( + extensions=None, + ), + mean_fiber_length_matrix_mat_file=dict( + extensions=None, + ), + median_fiber_length_matrix_mat_file=dict( + extensions=None, + ), + stats_file=dict( + extensions=None, + ), ) outputs = CreateMatrix.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py index 71f3e9a395..f88950d758 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py @@ -4,9 +4,18 @@ def test_CreateNodes_inputs(): input_map = dict( - out_filename=dict(extensions=None, usedefault=True), - resolution_network_file=dict(extensions=None, mandatory=True), - roi_file=dict(extensions=None, mandatory=True), + out_filename=dict( + extensions=None, + usedefault=True, + ), + resolution_network_file=dict( + extensions=None, + mandatory=True, + ), + roi_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CreateNodes.input_spec() @@ -16,7 +25,11 @@ def test_CreateNodes_inputs(): def test_CreateNodes_outputs(): - output_map = dict(node_network=dict(extensions=None)) + output_map = dict( + node_network=dict( + extensions=None, + ), + ) outputs = CreateNodes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py index f1d74aa5c4..17f8990a08 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py +++ b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py @@ -4,7 +4,13 @@ def test_MergeCNetworks_inputs(): input_map = dict( - in_files=dict(mandatory=True), out_file=dict(extensions=None, usedefault=True) + in_files=dict( + mandatory=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), ) inputs = MergeCNetworks.input_spec() @@ -14,7 +20,11 @@ def test_MergeCNetworks_inputs(): def test_MergeCNetworks_outputs(): - output_map = dict(connectome_file=dict(extensions=None)) + output_map = dict( + connectome_file=dict( + extensions=None, + ), + ) outputs = MergeCNetworks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py index af744db17a..975e4741cd 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py @@ -4,15 +4,33 @@ def test_NetworkBasedStatistic_inputs(): input_map = dict( - edge_key=dict(usedefault=True), - in_group1=dict(mandatory=True), - in_group2=dict(mandatory=True), - node_position_network=dict(extensions=None), - number_of_permutations=dict(usedefault=True), - out_nbs_network=dict(extensions=None), - out_nbs_pval_network=dict(extensions=None), - t_tail=dict(usedefault=True), - threshold=dict(usedefault=True), + edge_key=dict( + usedefault=True, + ), + in_group1=dict( + mandatory=True, + ), + in_group2=dict( + mandatory=True, + ), + node_position_network=dict( + extensions=None, + ), + number_of_permutations=dict( + usedefault=True, + ), + out_nbs_network=dict( + extensions=None, + ), + out_nbs_pval_network=dict( + extensions=None, + ), + t_tail=dict( + usedefault=True, + ), + threshold=dict( + usedefault=True, + ), ) inputs = NetworkBasedStatistic.input_spec() @@ -23,8 +41,12 @@ def test_NetworkBasedStatistic_inputs(): def test_NetworkBasedStatistic_outputs(): output_map = dict( - nbs_network=dict(extensions=None), - nbs_pval_network=dict(extensions=None), + nbs_network=dict( + extensions=None, + ), + nbs_pval_network=dict( + extensions=None, + ), network_files=dict(), ) outputs = NetworkBasedStatistic.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py index 6b920cda30..d171e6ab7a 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py @@ -4,16 +4,44 @@ def test_NetworkXMetrics_inputs(): input_map = dict( - compute_clique_related_measures=dict(usedefault=True), - in_file=dict(extensions=None, mandatory=True), - out_edge_metrics_matlab=dict(extensions=None, genfile=True), - out_global_metrics_matlab=dict(extensions=None, genfile=True), - out_k_core=dict(extensions=None, usedefault=True), - out_k_crust=dict(extensions=None, usedefault=True), - out_k_shell=dict(extensions=None, usedefault=True), - out_node_metrics_matlab=dict(extensions=None, genfile=True), - out_pickled_extra_measures=dict(extensions=None, usedefault=True), - treat_as_weighted_graph=dict(usedefault=True), + compute_clique_related_measures=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_edge_metrics_matlab=dict( + extensions=None, + genfile=True, + ), + out_global_metrics_matlab=dict( + extensions=None, + genfile=True, + ), + out_k_core=dict( + extensions=None, + usedefault=True, + ), + out_k_crust=dict( + extensions=None, + usedefault=True, + ), + out_k_shell=dict( + extensions=None, + usedefault=True, + ), + out_node_metrics_matlab=dict( + extensions=None, + genfile=True, + ), + out_pickled_extra_measures=dict( + extensions=None, + usedefault=True, + ), + treat_as_weighted_graph=dict( + usedefault=True, + ), ) inputs = NetworkXMetrics.input_spec() @@ -25,18 +53,32 @@ def test_NetworkXMetrics_inputs(): def test_NetworkXMetrics_outputs(): output_map = dict( edge_measure_networks=dict(), - edge_measures_matlab=dict(extensions=None), - global_measures_matlab=dict(extensions=None), + edge_measures_matlab=dict( + extensions=None, + ), + global_measures_matlab=dict( + extensions=None, + ), gpickled_network_files=dict(), - k_core=dict(extensions=None), - k_crust=dict(extensions=None), + k_core=dict( + extensions=None, + ), + k_crust=dict( + extensions=None, + ), k_networks=dict(), - k_shell=dict(extensions=None), + k_shell=dict( + extensions=None, + ), matlab_dict_measures=dict(), matlab_matrix_files=dict(), node_measure_networks=dict(), - node_measures_matlab=dict(extensions=None), - pickled_extra_measures=dict(extensions=None), + node_measures_matlab=dict( + extensions=None, + ), + pickled_extra_measures=dict( + extensions=None, + ), ) outputs = NetworkXMetrics.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py index 5d056eced0..800b5b516b 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py +++ b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py @@ -4,11 +4,20 @@ def test_Parcellate_inputs(): input_map = dict( - dilation=dict(usedefault=True), + dilation=dict( + usedefault=True, + ), freesurfer_dir=dict(), - out_roi_file=dict(extensions=None, genfile=True), - parcellation_name=dict(usedefault=True), - subject_id=dict(mandatory=True), + out_roi_file=dict( + extensions=None, + genfile=True, + ), + parcellation_name=dict( + usedefault=True, + ), + subject_id=dict( + mandatory=True, + ), subjects_dir=dict(), ) inputs = Parcellate.input_spec() @@ -20,14 +29,30 @@ def test_Parcellate_inputs(): def test_Parcellate_outputs(): output_map = dict( - aseg_file=dict(extensions=None), - cc_unknown_file=dict(extensions=None), - dilated_roi_file_in_structural_space=dict(extensions=None), - ribbon_file=dict(extensions=None), - roi_file=dict(extensions=None), - roi_file_in_structural_space=dict(extensions=None), - roiv_file=dict(extensions=None), - white_matter_mask_file=dict(extensions=None), + aseg_file=dict( + extensions=None, + ), + cc_unknown_file=dict( + extensions=None, + ), + dilated_roi_file_in_structural_space=dict( + extensions=None, + ), + ribbon_file=dict( + extensions=None, + ), + roi_file=dict( + extensions=None, + ), + roi_file_in_structural_space=dict( + extensions=None, + ), + roiv_file=dict( + extensions=None, + ), + white_matter_mask_file=dict( + extensions=None, + ), ) outputs = Parcellate.output_spec() diff --git a/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py index 750a927f26..54fd9e46e9 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py +++ b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py @@ -4,12 +4,28 @@ def test_ROIGen_inputs(): input_map = dict( - LUT_file=dict(extensions=None, xor=["use_freesurfer_LUT"]), - aparc_aseg_file=dict(extensions=None, mandatory=True), - freesurfer_dir=dict(requires=["use_freesurfer_LUT"]), - out_dict_file=dict(extensions=None, genfile=True), - out_roi_file=dict(extensions=None, genfile=True), - use_freesurfer_LUT=dict(xor=["LUT_file"]), + LUT_file=dict( + extensions=None, + xor=["use_freesurfer_LUT"], + ), + aparc_aseg_file=dict( + extensions=None, + mandatory=True, + ), + freesurfer_dir=dict( + requires=["use_freesurfer_LUT"], + ), + out_dict_file=dict( + extensions=None, + genfile=True, + ), + out_roi_file=dict( + extensions=None, + genfile=True, + ), + use_freesurfer_LUT=dict( + xor=["LUT_file"], + ), ) inputs = ROIGen.input_spec() @@ -19,7 +35,14 @@ def test_ROIGen_inputs(): def test_ROIGen_outputs(): - output_map = dict(dict_file=dict(extensions=None), roi_file=dict(extensions=None)) + output_map = dict( + dict_file=dict( + extensions=None, + ), + roi_file=dict( + extensions=None, + ), + ) outputs = ROIGen.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py index 95019cbf6a..f7bf46f327 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py @@ -4,17 +4,49 @@ def test_DTIRecon_inputs(): input_map = dict( - DWI=dict(argstr="%s", extensions=None, mandatory=True, position=1), - args=dict(argstr="%s"), - b0_threshold=dict(argstr="-b0_th"), - bvals=dict(extensions=None, mandatory=True), - bvecs=dict(argstr="-gm %s", extensions=None, mandatory=True), - environ=dict(nohash=True, usedefault=True), - image_orientation_vectors=dict(argstr="-iop %f"), - n_averages=dict(argstr="-nex %s"), - oblique_correction=dict(argstr="-oc"), - out_prefix=dict(argstr="%s", position=2, usedefault=True), - output_type=dict(argstr="-ot %s", usedefault=True), + DWI=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + args=dict( + argstr="%s", + ), + b0_threshold=dict( + argstr="-b0_th", + ), + bvals=dict( + extensions=None, + mandatory=True, + ), + bvecs=dict( + argstr="-gm %s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + image_orientation_vectors=dict( + argstr="-iop %f", + ), + n_averages=dict( + argstr="-nex %s", + ), + oblique_correction=dict( + argstr="-oc", + ), + out_prefix=dict( + argstr="%s", + position=2, + usedefault=True, + ), + output_type=dict( + argstr="-ot %s", + usedefault=True, + ), ) inputs = DTIRecon.input_spec() @@ -25,18 +57,42 @@ def test_DTIRecon_inputs(): def test_DTIRecon_outputs(): output_map = dict( - ADC=dict(extensions=None), - B0=dict(extensions=None), - FA=dict(extensions=None), - FA_color=dict(extensions=None), - L1=dict(extensions=None), - L2=dict(extensions=None), - L3=dict(extensions=None), - V1=dict(extensions=None), - V2=dict(extensions=None), - V3=dict(extensions=None), - exp=dict(extensions=None), - tensor=dict(extensions=None), + ADC=dict( + extensions=None, + ), + B0=dict( + extensions=None, + ), + FA=dict( + extensions=None, + ), + FA_color=dict( + extensions=None, + ), + L1=dict( + extensions=None, + ), + L2=dict( + extensions=None, + ), + L3=dict( + extensions=None, + ), + V1=dict( + extensions=None, + ), + V2=dict( + extensions=None, + ), + V3=dict( + extensions=None, + ), + exp=dict( + extensions=None, + ), + tensor=dict( + extensions=None, + ), ) outputs = DTIRecon.output_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py index 872b9c6b2e..e550bc4b27 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py @@ -4,29 +4,87 @@ def test_DTITracker_inputs(): input_map = dict( - angle_threshold=dict(argstr="-at %f"), - angle_threshold_weight=dict(argstr="-atw %f"), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - input_data_prefix=dict(argstr="%s", position=0, usedefault=True), - input_type=dict(argstr="-it %s"), - invert_x=dict(argstr="-ix"), - invert_y=dict(argstr="-iy"), - invert_z=dict(argstr="-iz"), - mask1_file=dict(argstr="-m %s", extensions=None, mandatory=True, position=2), - mask1_threshold=dict(position=3), - mask2_file=dict(argstr="-m2 %s", extensions=None, position=4), - mask2_threshold=dict(position=5), - output_file=dict(argstr="%s", extensions=None, position=1, usedefault=True), - output_mask=dict(argstr="-om %s", extensions=None), - primary_vector=dict(argstr="-%s"), - random_seed=dict(argstr="-rseed %d"), - step_length=dict(argstr="-l %f"), - swap_xy=dict(argstr="-sxy"), - swap_yz=dict(argstr="-syz"), - swap_zx=dict(argstr="-szx"), - tensor_file=dict(extensions=None), - tracking_method=dict(argstr="-%s"), + angle_threshold=dict( + argstr="-at %f", + ), + angle_threshold_weight=dict( + argstr="-atw %f", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_data_prefix=dict( + argstr="%s", + position=0, + usedefault=True, + ), + input_type=dict( + argstr="-it %s", + ), + invert_x=dict( + argstr="-ix", + ), + invert_y=dict( + argstr="-iy", + ), + invert_z=dict( + argstr="-iz", + ), + mask1_file=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + position=2, + ), + mask1_threshold=dict( + position=3, + ), + mask2_file=dict( + argstr="-m2 %s", + extensions=None, + position=4, + ), + mask2_threshold=dict( + position=5, + ), + output_file=dict( + argstr="%s", + extensions=None, + position=1, + usedefault=True, + ), + output_mask=dict( + argstr="-om %s", + extensions=None, + ), + primary_vector=dict( + argstr="-%s", + ), + random_seed=dict( + argstr="-rseed %d", + ), + step_length=dict( + argstr="-l %f", + ), + swap_xy=dict( + argstr="-sxy", + ), + swap_yz=dict( + argstr="-syz", + ), + swap_zx=dict( + argstr="-szx", + ), + tensor_file=dict( + extensions=None, + ), + tracking_method=dict( + argstr="-%s", + ), ) inputs = DTITracker.input_spec() @@ -36,7 +94,14 @@ def test_DTITracker_inputs(): def test_DTITracker_outputs(): - output_map = dict(mask_file=dict(extensions=None), track_file=dict(extensions=None)) + output_map = dict( + mask_file=dict( + extensions=None, + ), + track_file=dict( + extensions=None, + ), + ) outputs = DTITracker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py index cf68542831..a933495672 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py @@ -4,17 +4,50 @@ def test_HARDIMat_inputs(): input_map = dict( - args=dict(argstr="%s"), - bvals=dict(extensions=None, mandatory=True), - bvecs=dict(argstr="%s", extensions=None, mandatory=True, position=1), - environ=dict(nohash=True, usedefault=True), - image_info=dict(argstr="-info %s", extensions=None), - image_orientation_vectors=dict(argstr="-iop %f"), - oblique_correction=dict(argstr="-oc"), - odf_file=dict(argstr="-odf %s", extensions=None), - order=dict(argstr="-order %s"), - out_file=dict(argstr="%s", extensions=None, position=2, usedefault=True), - reference_file=dict(argstr="-ref %s", extensions=None), + args=dict( + argstr="%s", + ), + bvals=dict( + extensions=None, + mandatory=True, + ), + bvecs=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + image_info=dict( + argstr="-info %s", + extensions=None, + ), + image_orientation_vectors=dict( + argstr="-iop %f", + ), + oblique_correction=dict( + argstr="-oc", + ), + odf_file=dict( + argstr="-odf %s", + extensions=None, + ), + order=dict( + argstr="-order %s", + ), + out_file=dict( + argstr="%s", + extensions=None, + position=2, + usedefault=True, + ), + reference_file=dict( + argstr="-ref %s", + extensions=None, + ), ) inputs = HARDIMat.input_spec() @@ -24,7 +57,11 @@ def test_HARDIMat_inputs(): def test_HARDIMat_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = HARDIMat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py index ccfc2e4170..b6a18aaf77 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py @@ -4,22 +4,68 @@ def test_ODFRecon_inputs(): input_map = dict( - DWI=dict(argstr="%s", extensions=None, mandatory=True, position=1), - args=dict(argstr="%s"), - dsi=dict(argstr="-dsi"), - environ=dict(nohash=True, usedefault=True), - filter=dict(argstr="-f"), - image_orientation_vectors=dict(argstr="-iop %f"), - matrix=dict(argstr="-mat %s", extensions=None, mandatory=True), - n_b0=dict(argstr="-b0 %s", mandatory=True), - n_directions=dict(argstr="%s", mandatory=True, position=2), - n_output_directions=dict(argstr="%s", mandatory=True, position=3), - oblique_correction=dict(argstr="-oc"), - out_prefix=dict(argstr="%s", position=4, usedefault=True), - output_entropy=dict(argstr="-oe"), - output_type=dict(argstr="-ot %s", usedefault=True), - sharpness=dict(argstr="-s %f"), - subtract_background=dict(argstr="-bg"), + DWI=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + args=dict( + argstr="%s", + ), + dsi=dict( + argstr="-dsi", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + filter=dict( + argstr="-f", + ), + image_orientation_vectors=dict( + argstr="-iop %f", + ), + matrix=dict( + argstr="-mat %s", + extensions=None, + mandatory=True, + ), + n_b0=dict( + argstr="-b0 %s", + mandatory=True, + ), + n_directions=dict( + argstr="%s", + mandatory=True, + position=2, + ), + n_output_directions=dict( + argstr="%s", + mandatory=True, + position=3, + ), + oblique_correction=dict( + argstr="-oc", + ), + out_prefix=dict( + argstr="%s", + position=4, + usedefault=True, + ), + output_entropy=dict( + argstr="-oe", + ), + output_type=dict( + argstr="-ot %s", + usedefault=True, + ), + sharpness=dict( + argstr="-s %f", + ), + subtract_background=dict( + argstr="-bg", + ), ) inputs = ODFRecon.input_spec() @@ -30,11 +76,21 @@ def test_ODFRecon_inputs(): def test_ODFRecon_outputs(): output_map = dict( - B0=dict(extensions=None), - DWI=dict(extensions=None), - ODF=dict(extensions=None), - entropy=dict(extensions=None), - max=dict(extensions=None), + B0=dict( + extensions=None, + ), + DWI=dict( + extensions=None, + ), + ODF=dict( + extensions=None, + ), + entropy=dict( + extensions=None, + ), + max=dict( + extensions=None, + ), ) outputs = ODFRecon.output_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py index e335036ecc..2118745f3f 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py @@ -4,33 +4,101 @@ def test_ODFTracker_inputs(): input_map = dict( - ODF=dict(extensions=None, mandatory=True), - angle_threshold=dict(argstr="-at %f"), - args=dict(argstr="%s"), - disc=dict(argstr="-disc"), - dsi=dict(argstr="-dsi"), - environ=dict(nohash=True, usedefault=True), - image_orientation_vectors=dict(argstr="-iop %f"), - input_data_prefix=dict(argstr="%s", position=0, usedefault=True), - input_output_type=dict(argstr="-it %s", usedefault=True), - invert_x=dict(argstr="-ix"), - invert_y=dict(argstr="-iy"), - invert_z=dict(argstr="-iz"), - limit=dict(argstr="-limit %d"), - mask1_file=dict(argstr="-m %s", extensions=None, mandatory=True, position=2), - mask1_threshold=dict(position=3), - mask2_file=dict(argstr="-m2 %s", extensions=None, position=4), - mask2_threshold=dict(position=5), - max=dict(extensions=None, mandatory=True), - out_file=dict(argstr="%s", extensions=None, position=1, usedefault=True), - random_seed=dict(argstr="-rseed %s"), - runge_kutta2=dict(argstr="-rk2"), - slice_order=dict(argstr="-sorder %d"), - step_length=dict(argstr="-l %f"), - swap_xy=dict(argstr="-sxy"), - swap_yz=dict(argstr="-syz"), - swap_zx=dict(argstr="-szx"), - voxel_order=dict(argstr="-vorder %s"), + ODF=dict( + extensions=None, + mandatory=True, + ), + angle_threshold=dict( + argstr="-at %f", + ), + args=dict( + argstr="%s", + ), + disc=dict( + argstr="-disc", + ), + dsi=dict( + argstr="-dsi", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + image_orientation_vectors=dict( + argstr="-iop %f", + ), + input_data_prefix=dict( + argstr="%s", + position=0, + usedefault=True, + ), + input_output_type=dict( + argstr="-it %s", + usedefault=True, + ), + invert_x=dict( + argstr="-ix", + ), + invert_y=dict( + argstr="-iy", + ), + invert_z=dict( + argstr="-iz", + ), + limit=dict( + argstr="-limit %d", + ), + mask1_file=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + position=2, + ), + mask1_threshold=dict( + position=3, + ), + mask2_file=dict( + argstr="-m2 %s", + extensions=None, + position=4, + ), + mask2_threshold=dict( + position=5, + ), + max=dict( + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + position=1, + usedefault=True, + ), + random_seed=dict( + argstr="-rseed %s", + ), + runge_kutta2=dict( + argstr="-rk2", + ), + slice_order=dict( + argstr="-sorder %d", + ), + step_length=dict( + argstr="-l %f", + ), + swap_xy=dict( + argstr="-sxy", + ), + swap_yz=dict( + argstr="-syz", + ), + swap_zx=dict( + argstr="-szx", + ), + voxel_order=dict( + argstr="-vorder %s", + ), ) inputs = ODFTracker.input_spec() @@ -40,7 +108,11 @@ def test_ODFTracker_inputs(): def test_ODFTracker_outputs(): - output_map = dict(track_file=dict(extensions=None)) + output_map = dict( + track_file=dict( + extensions=None, + ), + ) outputs = ODFTracker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py index 60b35bbc16..65450952a4 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py @@ -4,11 +4,30 @@ def test_SplineFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - output_file=dict(argstr="%s", extensions=None, position=2, usedefault=True), - step_length=dict(argstr="%f", mandatory=True, position=1), - track_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + output_file=dict( + argstr="%s", + extensions=None, + position=2, + usedefault=True, + ), + step_length=dict( + argstr="%f", + mandatory=True, + position=1, + ), + track_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), ) inputs = SplineFilter.input_spec() @@ -18,7 +37,11 @@ def test_SplineFilter_inputs(): def test_SplineFilter_outputs(): - output_map = dict(smoothed_track_file=dict(extensions=None)) + output_map = dict( + smoothed_track_file=dict( + extensions=None, + ), + ) outputs = SplineFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py index c0e8039dd1..7f668df568 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py @@ -4,10 +4,24 @@ def test_TrackMerge_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - output_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True), - track_files=dict(argstr="%s...", mandatory=True, position=0), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + output_file=dict( + argstr="%s", + extensions=None, + position=-1, + usedefault=True, + ), + track_files=dict( + argstr="%s...", + mandatory=True, + position=0, + ), ) inputs = TrackMerge.input_spec() @@ -17,7 +31,11 @@ def test_TrackMerge_inputs(): def test_TrackMerge_outputs(): - output_map = dict(track_file=dict(extensions=None)) + output_map = dict( + track_file=dict( + extensions=None, + ), + ) outputs = TrackMerge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_APMQball.py b/nipype/interfaces/dipy/tests/test_auto_APMQball.py index df94f72b25..81ff397cb8 100644 --- a/nipype/interfaces/dipy/tests/test_auto_APMQball.py +++ b/nipype/interfaces/dipy/tests/test_auto_APMQball.py @@ -4,11 +4,24 @@ def test_APMQball_inputs(): input_map = dict( - b0_thres=dict(usedefault=True), - in_bval=dict(extensions=None, mandatory=True), - in_bvec=dict(extensions=None, mandatory=True), - in_file=dict(extensions=None, mandatory=True), - mask_file=dict(extensions=None), + b0_thres=dict( + usedefault=True, + ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + mask_file=dict( + extensions=None, + ), out_prefix=dict(), ) inputs = APMQball.input_spec() @@ -19,7 +32,11 @@ def test_APMQball_inputs(): def test_APMQball_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = APMQball.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_CSD.py b/nipype/interfaces/dipy/tests/test_auto_CSD.py index 96c5531679..7cdab47f9b 100644 --- a/nipype/interfaces/dipy/tests/test_auto_CSD.py +++ b/nipype/interfaces/dipy/tests/test_auto_CSD.py @@ -4,16 +4,37 @@ def test_CSD_inputs(): input_map = dict( - b0_thres=dict(usedefault=True), - in_bval=dict(extensions=None, mandatory=True), - in_bvec=dict(extensions=None, mandatory=True), - in_file=dict(extensions=None, mandatory=True), - in_mask=dict(extensions=None), - out_fods=dict(extensions=None), + b0_thres=dict( + usedefault=True, + ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict( + extensions=None, + ), + out_fods=dict( + extensions=None, + ), out_prefix=dict(), - response=dict(extensions=None), - save_fods=dict(usedefault=True), - sh_order=dict(usedefault=True), + response=dict( + extensions=None, + ), + save_fods=dict( + usedefault=True, + ), + sh_order=dict( + usedefault=True, + ), ) inputs = CSD.input_spec() @@ -23,7 +44,14 @@ def test_CSD_inputs(): def test_CSD_outputs(): - output_map = dict(model=dict(extensions=None), out_fods=dict(extensions=None)) + output_map = dict( + model=dict( + extensions=None, + ), + out_fods=dict( + extensions=None, + ), + ) outputs = CSD.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_DTI.py b/nipype/interfaces/dipy/tests/test_auto_DTI.py index fd47004082..1cea142a36 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DTI.py +++ b/nipype/interfaces/dipy/tests/test_auto_DTI.py @@ -4,11 +4,24 @@ def test_DTI_inputs(): input_map = dict( - b0_thres=dict(usedefault=True), - in_bval=dict(extensions=None, mandatory=True), - in_bvec=dict(extensions=None, mandatory=True), - in_file=dict(extensions=None, mandatory=True), - mask_file=dict(extensions=None), + b0_thres=dict( + usedefault=True, + ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + mask_file=dict( + extensions=None, + ), out_prefix=dict(), ) inputs = DTI.input_spec() @@ -20,12 +33,24 @@ def test_DTI_inputs(): def test_DTI_outputs(): output_map = dict( - ad_file=dict(extensions=None), - color_fa_file=dict(extensions=None), - fa_file=dict(extensions=None), - md_file=dict(extensions=None), - out_file=dict(extensions=None), - rd_file=dict(extensions=None), + ad_file=dict( + extensions=None, + ), + color_fa_file=dict( + extensions=None, + ), + fa_file=dict( + extensions=None, + ), + md_file=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + rd_file=dict( + extensions=None, + ), ) outputs = DTI.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_Denoise.py b/nipype/interfaces/dipy/tests/test_auto_Denoise.py index a0089f8d67..e85d2644c2 100644 --- a/nipype/interfaces/dipy/tests/test_auto_Denoise.py +++ b/nipype/interfaces/dipy/tests/test_auto_Denoise.py @@ -4,13 +4,29 @@ def test_Denoise_inputs(): input_map = dict( - block_radius=dict(usedefault=True), - in_file=dict(extensions=None, mandatory=True), - in_mask=dict(extensions=None), - noise_mask=dict(extensions=None), - noise_model=dict(mandatory=True, usedefault=True), - patch_radius=dict(usedefault=True), - signal_mask=dict(extensions=None), + block_radius=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict( + extensions=None, + ), + noise_mask=dict( + extensions=None, + ), + noise_model=dict( + mandatory=True, + usedefault=True, + ), + patch_radius=dict( + usedefault=True, + ), + signal_mask=dict( + extensions=None, + ), snr=dict(), ) inputs = Denoise.input_spec() @@ -21,7 +37,11 @@ def test_Denoise_inputs(): def test_Denoise_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Denoise.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py index cb94de4bda..e292135ba0 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py +++ b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py @@ -4,10 +4,21 @@ def test_DipyDiffusionInterface_inputs(): input_map = dict( - b0_thres=dict(usedefault=True), - in_bval=dict(extensions=None, mandatory=True), - in_bvec=dict(extensions=None, mandatory=True), - in_file=dict(extensions=None, mandatory=True), + b0_thres=dict( + usedefault=True, + ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), out_prefix=dict(), ) inputs = DipyDiffusionInterface.input_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py index 3031621e3e..00c8c1ba0d 100644 --- a/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py +++ b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py @@ -4,19 +4,49 @@ def test_EstimateResponseSH_inputs(): input_map = dict( - auto=dict(xor=["recursive"]), - b0_thres=dict(usedefault=True), - fa_thresh=dict(usedefault=True), - in_bval=dict(extensions=None, mandatory=True), - in_bvec=dict(extensions=None, mandatory=True), - in_evals=dict(extensions=None, mandatory=True), - in_file=dict(extensions=None, mandatory=True), - in_mask=dict(extensions=None), - out_mask=dict(extensions=None, usedefault=True), + auto=dict( + xor=["recursive"], + ), + b0_thres=dict( + usedefault=True, + ), + fa_thresh=dict( + usedefault=True, + ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_evals=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict( + extensions=None, + ), + out_mask=dict( + extensions=None, + usedefault=True, + ), out_prefix=dict(), - recursive=dict(xor=["auto"]), - response=dict(extensions=None, usedefault=True), - roi_radius=dict(usedefault=True), + recursive=dict( + xor=["auto"], + ), + response=dict( + extensions=None, + usedefault=True, + ), + roi_radius=dict( + usedefault=True, + ), ) inputs = EstimateResponseSH.input_spec() @@ -26,7 +56,14 @@ def test_EstimateResponseSH_inputs(): def test_EstimateResponseSH_outputs(): - output_map = dict(out_mask=dict(extensions=None), response=dict(extensions=None)) + output_map = dict( + out_mask=dict( + extensions=None, + ), + response=dict( + extensions=None, + ), + ) outputs = EstimateResponseSH.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_RESTORE.py b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py index bf137fb14d..f25127f9c9 100644 --- a/nipype/interfaces/dipy/tests/test_auto_RESTORE.py +++ b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py @@ -4,12 +4,27 @@ def test_RESTORE_inputs(): input_map = dict( - b0_thres=dict(usedefault=True), - in_bval=dict(extensions=None, mandatory=True), - in_bvec=dict(extensions=None, mandatory=True), - in_file=dict(extensions=None, mandatory=True), - in_mask=dict(extensions=None), - noise_mask=dict(extensions=None), + b0_thres=dict( + usedefault=True, + ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_mask=dict( + extensions=None, + ), + noise_mask=dict( + extensions=None, + ), out_prefix=dict(), ) inputs = RESTORE.input_spec() @@ -21,13 +36,27 @@ def test_RESTORE_inputs(): def test_RESTORE_outputs(): output_map = dict( - evals=dict(extensions=None), - evecs=dict(extensions=None), - fa=dict(extensions=None), - md=dict(extensions=None), - mode=dict(extensions=None), - rd=dict(extensions=None), - trace=dict(extensions=None), + evals=dict( + extensions=None, + ), + evecs=dict( + extensions=None, + ), + fa=dict( + extensions=None, + ), + md=dict( + extensions=None, + ), + mode=dict( + extensions=None, + ), + rd=dict( + extensions=None, + ), + trace=dict( + extensions=None, + ), ) outputs = RESTORE.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_Resample.py b/nipype/interfaces/dipy/tests/test_auto_Resample.py index fcde24b209..6c765b2fa9 100644 --- a/nipype/interfaces/dipy/tests/test_auto_Resample.py +++ b/nipype/interfaces/dipy/tests/test_auto_Resample.py @@ -4,8 +4,14 @@ def test_Resample_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True), - interp=dict(mandatory=True, usedefault=True), + in_file=dict( + extensions=None, + mandatory=True, + ), + interp=dict( + mandatory=True, + usedefault=True, + ), vox_size=dict(), ) inputs = Resample.input_spec() @@ -16,7 +22,11 @@ def test_Resample_inputs(): def test_Resample_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py index 3e24a53485..16fff2aeff 100644 --- a/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py +++ b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py @@ -4,24 +4,65 @@ def test_SimulateMultiTensor_inputs(): input_map = dict( - baseline=dict(extensions=None, mandatory=True), - bvalues=dict(usedefault=True), - diff_iso=dict(usedefault=True), - diff_sf=dict(usedefault=True), - gradients=dict(extensions=None), - in_bval=dict(extensions=None), - in_bvec=dict(extensions=None), - in_dirs=dict(mandatory=True), - in_frac=dict(mandatory=True), - in_mask=dict(extensions=None), - in_vfms=dict(mandatory=True), - n_proc=dict(usedefault=True), - num_dirs=dict(usedefault=True), - out_bval=dict(extensions=None, usedefault=True), - out_bvec=dict(extensions=None, usedefault=True), - out_file=dict(extensions=None, usedefault=True), - out_mask=dict(extensions=None, usedefault=True), - snr=dict(usedefault=True), + baseline=dict( + extensions=None, + mandatory=True, + ), + bvalues=dict( + usedefault=True, + ), + diff_iso=dict( + usedefault=True, + ), + diff_sf=dict( + usedefault=True, + ), + gradients=dict( + extensions=None, + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + extensions=None, + ), + in_dirs=dict( + mandatory=True, + ), + in_frac=dict( + mandatory=True, + ), + in_mask=dict( + extensions=None, + ), + in_vfms=dict( + mandatory=True, + ), + n_proc=dict( + usedefault=True, + ), + num_dirs=dict( + usedefault=True, + ), + out_bval=dict( + extensions=None, + usedefault=True, + ), + out_bvec=dict( + extensions=None, + usedefault=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), + out_mask=dict( + extensions=None, + usedefault=True, + ), + snr=dict( + usedefault=True, + ), ) inputs = SimulateMultiTensor.input_spec() @@ -32,10 +73,18 @@ def test_SimulateMultiTensor_inputs(): def test_SimulateMultiTensor_outputs(): output_map = dict( - out_bval=dict(extensions=None), - out_bvec=dict(extensions=None), - out_file=dict(extensions=None), - out_mask=dict(extensions=None), + out_bval=dict( + extensions=None, + ), + out_bvec=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + out_mask=dict( + extensions=None, + ), ) outputs = SimulateMultiTensor.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py index 7f7b61c7df..ad97523ce2 100644 --- a/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py +++ b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py @@ -4,19 +4,50 @@ def test_StreamlineTractography_inputs(): input_map = dict( - gfa_thresh=dict(mandatory=True, usedefault=True), - in_file=dict(extensions=None, mandatory=True), - in_model=dict(extensions=None), - in_peaks=dict(extensions=None), - min_angle=dict(mandatory=True, usedefault=True), - multiprocess=dict(mandatory=True, usedefault=True), - num_seeds=dict(mandatory=True, usedefault=True), + gfa_thresh=dict( + mandatory=True, + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + in_model=dict( + extensions=None, + ), + in_peaks=dict( + extensions=None, + ), + min_angle=dict( + mandatory=True, + usedefault=True, + ), + multiprocess=dict( + mandatory=True, + usedefault=True, + ), + num_seeds=dict( + mandatory=True, + usedefault=True, + ), out_prefix=dict(), - peak_threshold=dict(mandatory=True, usedefault=True), - save_seeds=dict(mandatory=True, usedefault=True), - seed_coord=dict(extensions=None), - seed_mask=dict(extensions=None), - tracking_mask=dict(extensions=None), + peak_threshold=dict( + mandatory=True, + usedefault=True, + ), + save_seeds=dict( + mandatory=True, + usedefault=True, + ), + seed_coord=dict( + extensions=None, + ), + seed_mask=dict( + extensions=None, + ), + tracking_mask=dict( + extensions=None, + ), ) inputs = StreamlineTractography.input_spec() @@ -27,10 +58,18 @@ def test_StreamlineTractography_inputs(): def test_StreamlineTractography_outputs(): output_map = dict( - gfa=dict(extensions=None), - odf_peaks=dict(extensions=None), - out_seeds=dict(extensions=None), - tracks=dict(extensions=None), + gfa=dict( + extensions=None, + ), + odf_peaks=dict( + extensions=None, + ), + out_seeds=dict( + extensions=None, + ), + tracks=dict( + extensions=None, + ), ) outputs = StreamlineTractography.output_spec() diff --git a/nipype/interfaces/dipy/tests/test_auto_TensorMode.py b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py index 8f2eee5294..59b1b30e8b 100644 --- a/nipype/interfaces/dipy/tests/test_auto_TensorMode.py +++ b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py @@ -4,11 +4,24 @@ def test_TensorMode_inputs(): input_map = dict( - b0_thres=dict(usedefault=True), - in_bval=dict(extensions=None, mandatory=True), - in_bvec=dict(extensions=None, mandatory=True), - in_file=dict(extensions=None, mandatory=True), - mask_file=dict(extensions=None), + b0_thres=dict( + usedefault=True, + ), + in_bval=dict( + extensions=None, + mandatory=True, + ), + in_bvec=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + mask_file=dict( + extensions=None, + ), out_prefix=dict(), ) inputs = TensorMode.input_spec() @@ -19,7 +32,11 @@ def test_TensorMode_inputs(): def test_TensorMode_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TensorMode.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py index 70ee723c69..0c7855c507 100644 --- a/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py +++ b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py @@ -5,10 +5,20 @@ def test_TrackDensityMap_inputs(): input_map = dict( data_dims=dict(), - in_file=dict(extensions=None, mandatory=True), - out_filename=dict(extensions=None, usedefault=True), - points_space=dict(usedefault=True), - reference=dict(extensions=None), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_filename=dict( + extensions=None, + usedefault=True, + ), + points_space=dict( + usedefault=True, + ), + reference=dict( + extensions=None, + ), voxel_dims=dict(), ) inputs = TrackDensityMap.input_spec() @@ -19,7 +29,11 @@ def test_TrackDensityMap_inputs(): def test_TrackDensityMap_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TrackDensityMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py index 0d15b9a115..2988e44e8d 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py @@ -4,12 +4,30 @@ def test_AffScalarVol_inputs(): input_map = dict( - args=dict(argstr="%s"), - deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"]), - environ=dict(nohash=True, usedefault=True), - euler=dict(argstr="-euler %g %g %g", xor=["transform"]), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - interpolation=dict(argstr="-interp %s", usedefault=True), + args=dict( + argstr="%s", + ), + deformation=dict( + argstr="-deformation %g %g %g %g %g %g", + xor=["transform"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + euler=dict( + argstr="-euler %g %g %g", + xor=["transform"], + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -17,13 +35,20 @@ def test_AffScalarVol_inputs(): name_source="in_file", name_template="%s_affxfmd", ), - target=dict(argstr="-target %s", extensions=None, xor=["transform"]), + target=dict( + argstr="-target %s", + extensions=None, + xor=["transform"], + ), transform=dict( argstr="-trans %s", extensions=None, xor=["target", "translation", "euler", "deformation"], ), - translation=dict(argstr="-translation %g %g %g", xor=["transform"]), + translation=dict( + argstr="-translation %g %g %g", + xor=["transform"], + ), ) inputs = AffScalarVol.input_spec() @@ -33,7 +58,11 @@ def test_AffScalarVol_inputs(): def test_AffScalarVol_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AffScalarVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py index 96dfaa3584..d1ba18a8ac 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py @@ -4,12 +4,30 @@ def test_AffSymTensor3DVol_inputs(): input_map = dict( - args=dict(argstr="%s"), - deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"]), - environ=dict(nohash=True, usedefault=True), - euler=dict(argstr="-euler %g %g %g", xor=["transform"]), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - interpolation=dict(argstr="-interp %s", usedefault=True), + args=dict( + argstr="%s", + ), + deformation=dict( + argstr="-deformation %g %g %g %g %g %g", + xor=["transform"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + euler=dict( + argstr="-euler %g %g %g", + xor=["transform"], + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -17,14 +35,24 @@ def test_AffSymTensor3DVol_inputs(): name_source="in_file", name_template="%s_affxfmd", ), - reorient=dict(argstr="-reorient %s", usedefault=True), - target=dict(argstr="-target %s", extensions=None, xor=["transform"]), + reorient=dict( + argstr="-reorient %s", + usedefault=True, + ), + target=dict( + argstr="-target %s", + extensions=None, + xor=["transform"], + ), transform=dict( argstr="-trans %s", extensions=None, xor=["target", "translation", "euler", "deformation"], ), - translation=dict(argstr="-translation %g %g %g", xor=["transform"]), + translation=dict( + argstr="-translation %g %g %g", + xor=["transform"], + ), ) inputs = AffSymTensor3DVol.input_spec() @@ -34,7 +62,11 @@ def test_AffSymTensor3DVol_inputs(): def test_AffSymTensor3DVol_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AffSymTensor3DVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_Affine.py b/nipype/interfaces/dtitk/tests/test_auto_Affine.py index 7ff9cc9e2b..0ff24b788c 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Affine.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Affine.py @@ -4,21 +4,50 @@ def test_Affine_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fixed_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=0, + ), + ftol=dict( + argstr="%g", + mandatory=True, + position=4, + usedefault=True, + ), + initialize_xfm=dict( + argstr="%s", + copyfile=True, + extensions=None, + position=5, ), - ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True), - initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5), moving_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=1, ), sampling_xyz=dict( - argstr="%g %g %g", mandatory=True, position=3, usedefault=True + argstr="%g %g %g", + mandatory=True, + position=3, + usedefault=True, ), similarity_metric=dict( - argstr="%s", mandatory=True, position=2, usedefault=True + argstr="%s", + mandatory=True, + position=2, + usedefault=True, ), ) inputs = Affine.input_spec() @@ -30,7 +59,12 @@ def test_Affine_inputs(): def test_Affine_outputs(): output_map = dict( - out_file=dict(extensions=None), out_file_xfm=dict(extensions=None) + out_file=dict( + extensions=None, + ), + out_file_xfm=dict( + extensions=None, + ), ) outputs = Affine.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py index f332eb35b5..c680c08815 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py @@ -4,21 +4,50 @@ def test_AffineTask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fixed_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=0, + ), + ftol=dict( + argstr="%g", + mandatory=True, + position=4, + usedefault=True, + ), + initialize_xfm=dict( + argstr="%s", + copyfile=True, + extensions=None, + position=5, ), - ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True), - initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5), moving_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=1, ), sampling_xyz=dict( - argstr="%g %g %g", mandatory=True, position=3, usedefault=True + argstr="%g %g %g", + mandatory=True, + position=3, + usedefault=True, ), similarity_metric=dict( - argstr="%s", mandatory=True, position=2, usedefault=True + argstr="%s", + mandatory=True, + position=2, + usedefault=True, ), ) inputs = AffineTask.input_spec() @@ -30,7 +59,12 @@ def test_AffineTask_inputs(): def test_AffineTask_outputs(): output_map = dict( - out_file=dict(extensions=None), out_file_xfm=dict(extensions=None) + out_file=dict( + extensions=None, + ), + out_file_xfm=dict( + extensions=None, + ), ) outputs = AffineTask.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py index 807c16ea73..994c8a2b8d 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py +++ b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py @@ -4,11 +4,31 @@ def test_BinThresh_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), - inside_value=dict(argstr="%g", mandatory=True, position=4, usedefault=True), - lower_bound=dict(argstr="%g", mandatory=True, position=2, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), + inside_value=dict( + argstr="%g", + mandatory=True, + position=4, + usedefault=True, + ), + lower_bound=dict( + argstr="%g", + mandatory=True, + position=2, + usedefault=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -17,8 +37,18 @@ def test_BinThresh_inputs(): name_template="%s_thrbin", position=1, ), - outside_value=dict(argstr="%g", mandatory=True, position=5, usedefault=True), - upper_bound=dict(argstr="%g", mandatory=True, position=3, usedefault=True), + outside_value=dict( + argstr="%g", + mandatory=True, + position=5, + usedefault=True, + ), + upper_bound=dict( + argstr="%g", + mandatory=True, + position=3, + usedefault=True, + ), ) inputs = BinThresh.input_spec() @@ -28,7 +58,11 @@ def test_BinThresh_inputs(): def test_BinThresh_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BinThresh.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py index fd430797b4..cccf7eed27 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py @@ -4,11 +4,31 @@ def test_BinThreshTask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), - inside_value=dict(argstr="%g", mandatory=True, position=4, usedefault=True), - lower_bound=dict(argstr="%g", mandatory=True, position=2, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), + inside_value=dict( + argstr="%g", + mandatory=True, + position=4, + usedefault=True, + ), + lower_bound=dict( + argstr="%g", + mandatory=True, + position=2, + usedefault=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -17,8 +37,18 @@ def test_BinThreshTask_inputs(): name_template="%s_thrbin", position=1, ), - outside_value=dict(argstr="%g", mandatory=True, position=5, usedefault=True), - upper_bound=dict(argstr="%g", mandatory=True, position=3, usedefault=True), + outside_value=dict( + argstr="%g", + mandatory=True, + position=5, + usedefault=True, + ), + upper_bound=dict( + argstr="%g", + mandatory=True, + position=3, + usedefault=True, + ), ) inputs = BinThreshTask.input_spec() @@ -28,7 +58,11 @@ def test_BinThreshTask_inputs(): def test_BinThreshTask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BinThreshTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py b/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py index ca290894cc..03044b6bc4 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py +++ b/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py @@ -3,7 +3,15 @@ def test_CommandLineDtitk_inputs(): - input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) + input_map = dict( + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) inputs = CommandLineDtitk.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py index 7351bb228d..fa34fdcdb3 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py +++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py @@ -4,11 +4,28 @@ def test_ComposeXfm_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_aff=dict(argstr="-aff %s", extensions=None, mandatory=True), - in_df=dict(argstr="-df %s", extensions=None, mandatory=True), - out_file=dict(argstr="-out %s", extensions=None, genfile=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_aff=dict( + argstr="-aff %s", + extensions=None, + mandatory=True, + ), + in_df=dict( + argstr="-df %s", + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="-out %s", + extensions=None, + genfile=True, + ), ) inputs = ComposeXfm.input_spec() @@ -18,7 +35,11 @@ def test_ComposeXfm_inputs(): def test_ComposeXfm_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ComposeXfm.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py index b79316c6ba..b4b2f7509e 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py @@ -4,11 +4,28 @@ def test_ComposeXfmTask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_aff=dict(argstr="-aff %s", extensions=None, mandatory=True), - in_df=dict(argstr="-df %s", extensions=None, mandatory=True), - out_file=dict(argstr="-out %s", extensions=None, genfile=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_aff=dict( + argstr="-aff %s", + extensions=None, + mandatory=True, + ), + in_df=dict( + argstr="-df %s", + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="-out %s", + extensions=None, + genfile=True, + ), ) inputs = ComposeXfmTask.input_spec() @@ -18,7 +35,11 @@ def test_ComposeXfmTask_inputs(): def test_ComposeXfmTask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ComposeXfmTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py index dcb9115575..89a1bdcfc8 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py @@ -4,14 +4,47 @@ def test_Diffeo_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fixed_file=dict(argstr="%s", extensions=None, position=0), - ftol=dict(argstr="%g", mandatory=True, position=5, usedefault=True), - legacy=dict(argstr="%d", mandatory=True, position=3, usedefault=True), - mask_file=dict(argstr="%s", extensions=None, position=2), - moving_file=dict(argstr="%s", copyfile=False, extensions=None, position=1), - n_iters=dict(argstr="%d", mandatory=True, position=4, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_file=dict( + argstr="%s", + extensions=None, + position=0, + ), + ftol=dict( + argstr="%g", + mandatory=True, + position=5, + usedefault=True, + ), + legacy=dict( + argstr="%d", + mandatory=True, + position=3, + usedefault=True, + ), + mask_file=dict( + argstr="%s", + extensions=None, + position=2, + ), + moving_file=dict( + argstr="%s", + copyfile=False, + extensions=None, + position=1, + ), + n_iters=dict( + argstr="%d", + mandatory=True, + position=4, + usedefault=True, + ), ) inputs = Diffeo.input_spec() @@ -22,7 +55,12 @@ def test_Diffeo_inputs(): def test_Diffeo_outputs(): output_map = dict( - out_file=dict(extensions=None), out_file_xfm=dict(extensions=None) + out_file=dict( + extensions=None, + ), + out_file_xfm=dict( + extensions=None, + ), ) outputs = Diffeo.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py index 97fdde0e5b..39255fb5c1 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py @@ -4,11 +4,25 @@ def test_DiffeoScalarVol_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - flip=dict(argstr="-flip %d %d %d"), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - interpolation=dict(argstr="-interp %s", usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip=dict( + argstr="-flip %d %d %d", + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -16,10 +30,23 @@ def test_DiffeoScalarVol_inputs(): name_source="in_file", name_template="%s_diffeoxfmd", ), - resampling_type=dict(argstr="-type %s"), - target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"]), - transform=dict(argstr="-trans %s", extensions=None, mandatory=True), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"]), + resampling_type=dict( + argstr="-type %s", + ), + target=dict( + argstr="-target %s", + extensions=None, + xor=["voxel_size"], + ), + transform=dict( + argstr="-trans %s", + extensions=None, + mandatory=True, + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target"], + ), ) inputs = DiffeoScalarVol.input_spec() @@ -29,7 +56,11 @@ def test_DiffeoScalarVol_inputs(): def test_DiffeoScalarVol_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = DiffeoScalarVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py index b50fdfd3be..123b741645 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py @@ -4,12 +4,29 @@ def test_DiffeoSymTensor3DVol_inputs(): input_map = dict( - args=dict(argstr="%s"), - df=dict(argstr="-df %s", usedefault=True), - environ=dict(nohash=True, usedefault=True), - flip=dict(argstr="-flip %d %d %d"), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - interpolation=dict(argstr="-interp %s", usedefault=True), + args=dict( + argstr="%s", + ), + df=dict( + argstr="-df %s", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip=dict( + argstr="-flip %d %d %d", + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -17,11 +34,27 @@ def test_DiffeoSymTensor3DVol_inputs(): name_source="in_file", name_template="%s_diffeoxfmd", ), - reorient=dict(argstr="-reorient %s", usedefault=True), - resampling_type=dict(argstr="-type %s"), - target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"]), - transform=dict(argstr="-trans %s", extensions=None, mandatory=True), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"]), + reorient=dict( + argstr="-reorient %s", + usedefault=True, + ), + resampling_type=dict( + argstr="-type %s", + ), + target=dict( + argstr="-target %s", + extensions=None, + xor=["voxel_size"], + ), + transform=dict( + argstr="-trans %s", + extensions=None, + mandatory=True, + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target"], + ), ) inputs = DiffeoSymTensor3DVol.input_spec() @@ -31,7 +64,11 @@ def test_DiffeoSymTensor3DVol_inputs(): def test_DiffeoSymTensor3DVol_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = DiffeoSymTensor3DVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py index 80be334b0a..f7914ab9cc 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py @@ -4,14 +4,47 @@ def test_DiffeoTask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fixed_file=dict(argstr="%s", extensions=None, position=0), - ftol=dict(argstr="%g", mandatory=True, position=5, usedefault=True), - legacy=dict(argstr="%d", mandatory=True, position=3, usedefault=True), - mask_file=dict(argstr="%s", extensions=None, position=2), - moving_file=dict(argstr="%s", copyfile=False, extensions=None, position=1), - n_iters=dict(argstr="%d", mandatory=True, position=4, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_file=dict( + argstr="%s", + extensions=None, + position=0, + ), + ftol=dict( + argstr="%g", + mandatory=True, + position=5, + usedefault=True, + ), + legacy=dict( + argstr="%d", + mandatory=True, + position=3, + usedefault=True, + ), + mask_file=dict( + argstr="%s", + extensions=None, + position=2, + ), + moving_file=dict( + argstr="%s", + copyfile=False, + extensions=None, + position=1, + ), + n_iters=dict( + argstr="%d", + mandatory=True, + position=4, + usedefault=True, + ), ) inputs = DiffeoTask.input_spec() @@ -22,7 +55,12 @@ def test_DiffeoTask_inputs(): def test_DiffeoTask_outputs(): output_map = dict( - out_file=dict(extensions=None), out_file_xfm=dict(extensions=None) + out_file=dict( + extensions=None, + ), + out_file_xfm=dict( + extensions=None, + ), ) outputs = DiffeoTask.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_Rigid.py b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py index 066fb8e42a..dbcc6f0fcf 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_Rigid.py +++ b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py @@ -4,21 +4,50 @@ def test_Rigid_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fixed_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=0, + ), + ftol=dict( + argstr="%g", + mandatory=True, + position=4, + usedefault=True, + ), + initialize_xfm=dict( + argstr="%s", + copyfile=True, + extensions=None, + position=5, ), - ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True), - initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5), moving_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=1, ), sampling_xyz=dict( - argstr="%g %g %g", mandatory=True, position=3, usedefault=True + argstr="%g %g %g", + mandatory=True, + position=3, + usedefault=True, ), similarity_metric=dict( - argstr="%s", mandatory=True, position=2, usedefault=True + argstr="%s", + mandatory=True, + position=2, + usedefault=True, ), ) inputs = Rigid.input_spec() @@ -30,7 +59,12 @@ def test_Rigid_inputs(): def test_Rigid_outputs(): output_map = dict( - out_file=dict(extensions=None), out_file_xfm=dict(extensions=None) + out_file=dict( + extensions=None, + ), + out_file_xfm=dict( + extensions=None, + ), ) outputs = Rigid.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py index d915ea9e71..6c5236607c 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py @@ -4,21 +4,50 @@ def test_RigidTask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fixed_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=0, + ), + ftol=dict( + argstr="%g", + mandatory=True, + position=4, + usedefault=True, + ), + initialize_xfm=dict( + argstr="%s", + copyfile=True, + extensions=None, + position=5, ), - ftol=dict(argstr="%g", mandatory=True, position=4, usedefault=True), - initialize_xfm=dict(argstr="%s", copyfile=True, extensions=None, position=5), moving_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=1 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=1, ), sampling_xyz=dict( - argstr="%g %g %g", mandatory=True, position=3, usedefault=True + argstr="%g %g %g", + mandatory=True, + position=3, + usedefault=True, ), similarity_metric=dict( - argstr="%s", mandatory=True, position=2, usedefault=True + argstr="%s", + mandatory=True, + position=2, + usedefault=True, ), ) inputs = RigidTask.input_spec() @@ -30,7 +59,12 @@ def test_RigidTask_inputs(): def test_RigidTask_outputs(): output_map = dict( - out_file=dict(extensions=None), out_file_xfm=dict(extensions=None) + out_file=dict( + extensions=None, + ), + out_file_xfm=dict( + extensions=None, + ), ) outputs = RigidTask.output_spec() diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py index 7a7e65d01c..3d32a314bd 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py @@ -4,10 +4,22 @@ def test_SVAdjustVoxSp_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -16,9 +28,14 @@ def test_SVAdjustVoxSp_inputs(): name_template="%s_avs", ), target_file=dict( - argstr="-target %s", extensions=None, xor=["voxel_size", "origin"] + argstr="-target %s", + extensions=None, + xor=["voxel_size", "origin"], + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), ) inputs = SVAdjustVoxSp.input_spec() @@ -28,7 +45,11 @@ def test_SVAdjustVoxSp_inputs(): def test_SVAdjustVoxSp_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SVAdjustVoxSp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py index 40cf1f5c48..cedc693a24 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py @@ -4,10 +4,22 @@ def test_SVAdjustVoxSpTask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -16,9 +28,14 @@ def test_SVAdjustVoxSpTask_inputs(): name_template="%s_avs", ), target_file=dict( - argstr="-target %s", extensions=None, xor=["voxel_size", "origin"] + argstr="-target %s", + extensions=None, + xor=["voxel_size", "origin"], + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), ) inputs = SVAdjustVoxSpTask.input_spec() @@ -28,7 +45,11 @@ def test_SVAdjustVoxSpTask_inputs(): def test_SVAdjustVoxSpTask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SVAdjustVoxSpTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResample.py b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py index 295605706d..fc880cd3e7 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVResample.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py @@ -4,12 +4,29 @@ def test_SVResample_inputs(): input_map = dict( - align=dict(argstr="-align %s"), - args=dict(argstr="%s"), - array_size=dict(argstr="-size %d %d %d", xor=["target_file"]), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), + align=dict( + argstr="-align %s", + ), + args=dict( + argstr="%s", + ), + array_size=dict( + argstr="-size %d %d %d", + xor=["target_file"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -22,7 +39,10 @@ def test_SVResample_inputs(): extensions=None, xor=["array_size", "voxel_size", "origin"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], + ), ) inputs = SVResample.input_spec() @@ -32,7 +52,11 @@ def test_SVResample_inputs(): def test_SVResample_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SVResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py index 7012a9af30..88412e68ae 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py @@ -4,12 +4,29 @@ def test_SVResampleTask_inputs(): input_map = dict( - align=dict(argstr="-align %s"), - args=dict(argstr="%s"), - array_size=dict(argstr="-size %d %d %d", xor=["target_file"]), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), + align=dict( + argstr="-align %s", + ), + args=dict( + argstr="%s", + ), + array_size=dict( + argstr="-size %d %d %d", + xor=["target_file"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -22,7 +39,10 @@ def test_SVResampleTask_inputs(): extensions=None, xor=["array_size", "voxel_size", "origin"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], + ), ) inputs = SVResampleTask.input_spec() @@ -32,7 +52,11 @@ def test_SVResampleTask_inputs(): def test_SVResampleTask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SVResampleTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py index 97da04bc0e..333e05628d 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py @@ -4,10 +4,22 @@ def test_TVAdjustOriginTask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -16,9 +28,14 @@ def test_TVAdjustOriginTask_inputs(): name_template="%s_avs", ), target_file=dict( - argstr="-target %s", extensions=None, xor=["voxel_size", "origin"] + argstr="-target %s", + extensions=None, + xor=["voxel_size", "origin"], + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), ) inputs = TVAdjustOriginTask.input_spec() @@ -28,7 +45,11 @@ def test_TVAdjustOriginTask_inputs(): def test_TVAdjustOriginTask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TVAdjustOriginTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py index 8c775b628e..9c6596042b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py @@ -4,10 +4,22 @@ def test_TVAdjustVoxSp_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -16,9 +28,14 @@ def test_TVAdjustVoxSp_inputs(): name_template="%s_avs", ), target_file=dict( - argstr="-target %s", extensions=None, xor=["voxel_size", "origin"] + argstr="-target %s", + extensions=None, + xor=["voxel_size", "origin"], + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), ) inputs = TVAdjustVoxSp.input_spec() @@ -28,7 +45,11 @@ def test_TVAdjustVoxSp_inputs(): def test_TVAdjustVoxSp_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TVAdjustVoxSp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py index 1d53c3949b..f34a76ae7b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py @@ -4,10 +4,22 @@ def test_TVAdjustVoxSpTask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -16,9 +28,14 @@ def test_TVAdjustVoxSpTask_inputs(): name_template="%s_avs", ), target_file=dict( - argstr="-target %s", extensions=None, xor=["voxel_size", "origin"] + argstr="-target %s", + extensions=None, + xor=["voxel_size", "origin"], + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), ) inputs = TVAdjustVoxSpTask.input_spec() @@ -28,7 +45,11 @@ def test_TVAdjustVoxSpTask_inputs(): def test_TVAdjustVoxSpTask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TVAdjustVoxSpTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResample.py b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py index d5aa76f872..2ca99176f8 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVResample.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py @@ -4,13 +4,32 @@ def test_TVResample_inputs(): input_map = dict( - align=dict(argstr="-align %s"), - args=dict(argstr="%s"), - array_size=dict(argstr="-size %d %d %d", xor=["target_file"]), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - interpolation=dict(argstr="-interp %s"), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), + align=dict( + argstr="-align %s", + ), + args=dict( + argstr="%s", + ), + array_size=dict( + argstr="-size %d %d %d", + xor=["target_file"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -23,7 +42,10 @@ def test_TVResample_inputs(): extensions=None, xor=["array_size", "voxel_size", "origin"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], + ), ) inputs = TVResample.input_spec() @@ -33,7 +55,11 @@ def test_TVResample_inputs(): def test_TVResample_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TVResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py index 1fdcf2dfd1..d1f908fca8 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py @@ -4,13 +4,32 @@ def test_TVResampleTask_inputs(): input_map = dict( - align=dict(argstr="-align %s"), - args=dict(argstr="%s"), - array_size=dict(argstr="-size %d %d %d", xor=["target_file"]), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - interpolation=dict(argstr="-interp %s"), - origin=dict(argstr="-origin %g %g %g", xor=["target_file"]), + align=dict( + argstr="-align %s", + ), + args=dict( + argstr="%s", + ), + array_size=dict( + argstr="-size %d %d %d", + xor=["target_file"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + ), + origin=dict( + argstr="-origin %g %g %g", + xor=["target_file"], + ), out_file=dict( argstr="-out %s", extensions=None, @@ -23,7 +42,10 @@ def test_TVResampleTask_inputs(): extensions=None, xor=["array_size", "voxel_size", "origin"], ), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target_file"]), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target_file"], + ), ) inputs = TVResampleTask.input_spec() @@ -33,7 +55,11 @@ def test_TVResampleTask_inputs(): def test_TVResampleTask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TVResampleTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtool.py b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py index e1f9f2a164..11e2d05acc 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVtool.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py @@ -4,11 +4,26 @@ def test_TVtool_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - in_flag=dict(argstr="-%s"), - out_file=dict(argstr="-out %s", extensions=None, genfile=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + in_flag=dict( + argstr="-%s", + ), + out_file=dict( + argstr="-out %s", + extensions=None, + genfile=True, + ), ) inputs = TVtool.input_spec() @@ -18,7 +33,11 @@ def test_TVtool_inputs(): def test_TVtool_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TVtool.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py index b7df6423be..7af7bcb75b 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py @@ -4,11 +4,26 @@ def test_TVtoolTask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - in_flag=dict(argstr="-%s"), - out_file=dict(argstr="-out %s", extensions=None, genfile=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + in_flag=dict( + argstr="-%s", + ), + out_file=dict( + argstr="-out %s", + extensions=None, + genfile=True, + ), ) inputs = TVtoolTask.input_spec() @@ -18,7 +33,11 @@ def test_TVtoolTask_inputs(): def test_TVtoolTask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TVtoolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py index 3c74c498ae..8d3ebfad98 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py @@ -4,12 +4,30 @@ def test_affScalarVolTask_inputs(): input_map = dict( - args=dict(argstr="%s"), - deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"]), - environ=dict(nohash=True, usedefault=True), - euler=dict(argstr="-euler %g %g %g", xor=["transform"]), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - interpolation=dict(argstr="-interp %s", usedefault=True), + args=dict( + argstr="%s", + ), + deformation=dict( + argstr="-deformation %g %g %g %g %g %g", + xor=["transform"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + euler=dict( + argstr="-euler %g %g %g", + xor=["transform"], + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -17,13 +35,20 @@ def test_affScalarVolTask_inputs(): name_source="in_file", name_template="%s_affxfmd", ), - target=dict(argstr="-target %s", extensions=None, xor=["transform"]), + target=dict( + argstr="-target %s", + extensions=None, + xor=["transform"], + ), transform=dict( argstr="-trans %s", extensions=None, xor=["target", "translation", "euler", "deformation"], ), - translation=dict(argstr="-translation %g %g %g", xor=["transform"]), + translation=dict( + argstr="-translation %g %g %g", + xor=["transform"], + ), ) inputs = affScalarVolTask.input_spec() @@ -33,7 +58,11 @@ def test_affScalarVolTask_inputs(): def test_affScalarVolTask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = affScalarVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py index dd49b3cd01..fbfde68e86 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py @@ -4,12 +4,30 @@ def test_affSymTensor3DVolTask_inputs(): input_map = dict( - args=dict(argstr="%s"), - deformation=dict(argstr="-deformation %g %g %g %g %g %g", xor=["transform"]), - environ=dict(nohash=True, usedefault=True), - euler=dict(argstr="-euler %g %g %g", xor=["transform"]), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - interpolation=dict(argstr="-interp %s", usedefault=True), + args=dict( + argstr="%s", + ), + deformation=dict( + argstr="-deformation %g %g %g %g %g %g", + xor=["transform"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + euler=dict( + argstr="-euler %g %g %g", + xor=["transform"], + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -17,14 +35,24 @@ def test_affSymTensor3DVolTask_inputs(): name_source="in_file", name_template="%s_affxfmd", ), - reorient=dict(argstr="-reorient %s", usedefault=True), - target=dict(argstr="-target %s", extensions=None, xor=["transform"]), + reorient=dict( + argstr="-reorient %s", + usedefault=True, + ), + target=dict( + argstr="-target %s", + extensions=None, + xor=["transform"], + ), transform=dict( argstr="-trans %s", extensions=None, xor=["target", "translation", "euler", "deformation"], ), - translation=dict(argstr="-translation %g %g %g", xor=["transform"]), + translation=dict( + argstr="-translation %g %g %g", + xor=["transform"], + ), ) inputs = affSymTensor3DVolTask.input_spec() @@ -34,7 +62,11 @@ def test_affSymTensor3DVolTask_inputs(): def test_affSymTensor3DVolTask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = affSymTensor3DVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py index 8f44e64ba9..4a7ffee6f4 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py @@ -4,11 +4,25 @@ def test_diffeoScalarVolTask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - flip=dict(argstr="-flip %d %d %d"), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - interpolation=dict(argstr="-interp %s", usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip=dict( + argstr="-flip %d %d %d", + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -16,10 +30,23 @@ def test_diffeoScalarVolTask_inputs(): name_source="in_file", name_template="%s_diffeoxfmd", ), - resampling_type=dict(argstr="-type %s"), - target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"]), - transform=dict(argstr="-trans %s", extensions=None, mandatory=True), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"]), + resampling_type=dict( + argstr="-type %s", + ), + target=dict( + argstr="-target %s", + extensions=None, + xor=["voxel_size"], + ), + transform=dict( + argstr="-trans %s", + extensions=None, + mandatory=True, + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target"], + ), ) inputs = diffeoScalarVolTask.input_spec() @@ -29,7 +56,11 @@ def test_diffeoScalarVolTask_inputs(): def test_diffeoScalarVolTask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = diffeoScalarVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py index 2b5f7e2e1a..6724343e69 100644 --- a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py +++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py @@ -4,12 +4,29 @@ def test_diffeoSymTensor3DVolTask_inputs(): input_map = dict( - args=dict(argstr="%s"), - df=dict(argstr="-df %s", usedefault=True), - environ=dict(nohash=True, usedefault=True), - flip=dict(argstr="-flip %d %d %d"), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - interpolation=dict(argstr="-interp %s", usedefault=True), + args=dict( + argstr="%s", + ), + df=dict( + argstr="-df %s", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip=dict( + argstr="-flip %d %d %d", + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -17,11 +34,27 @@ def test_diffeoSymTensor3DVolTask_inputs(): name_source="in_file", name_template="%s_diffeoxfmd", ), - reorient=dict(argstr="-reorient %s", usedefault=True), - resampling_type=dict(argstr="-type %s"), - target=dict(argstr="-target %s", extensions=None, xor=["voxel_size"]), - transform=dict(argstr="-trans %s", extensions=None, mandatory=True), - voxel_size=dict(argstr="-vsize %g %g %g", xor=["target"]), + reorient=dict( + argstr="-reorient %s", + usedefault=True, + ), + resampling_type=dict( + argstr="-type %s", + ), + target=dict( + argstr="-target %s", + extensions=None, + xor=["voxel_size"], + ), + transform=dict( + argstr="-trans %s", + extensions=None, + mandatory=True, + ), + voxel_size=dict( + argstr="-vsize %g %g %g", + xor=["target"], + ), ) inputs = diffeoSymTensor3DVolTask.input_spec() @@ -31,7 +64,11 @@ def test_diffeoSymTensor3DVolTask_inputs(): def test_diffeoSymTensor3DVolTask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = diffeoSymTensor3DVolTask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py index ed5ab33ead..b7895e1cf1 100644 --- a/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py @@ -4,15 +4,45 @@ def test_AnalyzeWarp_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - jac=dict(argstr="-jac %s", usedefault=True), - jacmat=dict(argstr="-jacmat %s", usedefault=True), - moving_image=dict(argstr="-in %s", extensions=None), - num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True), - output_path=dict(argstr="-out %s", mandatory=True, usedefault=True), - points=dict(argstr="-def %s", position=0, usedefault=True), - transform_file=dict(argstr="-tp %s", extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + jac=dict( + argstr="-jac %s", + usedefault=True, + ), + jacmat=dict( + argstr="-jacmat %s", + usedefault=True, + ), + moving_image=dict( + argstr="-in %s", + extensions=None, + ), + num_threads=dict( + argstr="-threads %01d", + nohash=True, + usedefault=True, + ), + output_path=dict( + argstr="-out %s", + mandatory=True, + usedefault=True, + ), + points=dict( + argstr="-def %s", + position=0, + usedefault=True, + ), + transform_file=dict( + argstr="-tp %s", + extensions=None, + mandatory=True, + ), ) inputs = AnalyzeWarp.input_spec() @@ -23,9 +53,15 @@ def test_AnalyzeWarp_inputs(): def test_AnalyzeWarp_outputs(): output_map = dict( - disp_field=dict(extensions=None), - jacdet_map=dict(extensions=None), - jacmat_map=dict(extensions=None), + disp_field=dict( + extensions=None, + ), + jacdet_map=dict( + extensions=None, + ), + jacmat_map=dict( + extensions=None, + ), ) outputs = AnalyzeWarp.output_spec() diff --git a/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py index aaab8a1031..0eebfb0665 100644 --- a/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py @@ -4,12 +4,33 @@ def test_ApplyWarp_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - moving_image=dict(argstr="-in %s", extensions=None, mandatory=True), - num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True), - output_path=dict(argstr="-out %s", mandatory=True, usedefault=True), - transform_file=dict(argstr="-tp %s", extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + moving_image=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + num_threads=dict( + argstr="-threads %01d", + nohash=True, + usedefault=True, + ), + output_path=dict( + argstr="-out %s", + mandatory=True, + usedefault=True, + ), + transform_file=dict( + argstr="-tp %s", + extensions=None, + mandatory=True, + ), ) inputs = ApplyWarp.input_spec() @@ -19,7 +40,11 @@ def test_ApplyWarp_inputs(): def test_ApplyWarp_outputs(): - output_map = dict(warped_file=dict(extensions=None)) + output_map = dict( + warped_file=dict( + extensions=None, + ), + ) outputs = ApplyWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_EditTransform.py b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py index 2f05e789ad..a8617c4988 100644 --- a/nipype/interfaces/elastix/tests/test_auto_EditTransform.py +++ b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py @@ -4,12 +4,26 @@ def test_EditTransform_inputs(): input_map = dict( - interpolation=dict(argstr="FinalBSplineInterpolationOrder", usedefault=True), - output_file=dict(extensions=None), - output_format=dict(argstr="ResultImageFormat"), - output_type=dict(argstr="ResultImagePixelType"), - reference_image=dict(extensions=None), - transform_file=dict(extensions=None, mandatory=True), + interpolation=dict( + argstr="FinalBSplineInterpolationOrder", + usedefault=True, + ), + output_file=dict( + extensions=None, + ), + output_format=dict( + argstr="ResultImageFormat", + ), + output_type=dict( + argstr="ResultImagePixelType", + ), + reference_image=dict( + extensions=None, + ), + transform_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = EditTransform.input_spec() @@ -19,7 +33,11 @@ def test_EditTransform_inputs(): def test_EditTransform_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = EditTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py index 5b44e81088..bce2a3c662 100644 --- a/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py @@ -4,12 +4,33 @@ def test_PointsWarp_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True), - output_path=dict(argstr="-out %s", mandatory=True, usedefault=True), - points_file=dict(argstr="-def %s", extensions=None, mandatory=True), - transform_file=dict(argstr="-tp %s", extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + argstr="-threads %01d", + nohash=True, + usedefault=True, + ), + output_path=dict( + argstr="-out %s", + mandatory=True, + usedefault=True, + ), + points_file=dict( + argstr="-def %s", + extensions=None, + mandatory=True, + ), + transform_file=dict( + argstr="-tp %s", + extensions=None, + mandatory=True, + ), ) inputs = PointsWarp.input_spec() @@ -19,7 +40,11 @@ def test_PointsWarp_inputs(): def test_PointsWarp_outputs(): - output_map = dict(warped_file=dict(extensions=None)) + output_map = dict( + warped_file=dict( + extensions=None, + ), + ) outputs = PointsWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/elastix/tests/test_auto_Registration.py b/nipype/interfaces/elastix/tests/test_auto_Registration.py index 0c4759d1e7..ae6d929950 100644 --- a/nipype/interfaces/elastix/tests/test_auto_Registration.py +++ b/nipype/interfaces/elastix/tests/test_auto_Registration.py @@ -4,16 +4,49 @@ def test_Registration_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fixed_image=dict(argstr="-f %s", extensions=None, mandatory=True), - fixed_mask=dict(argstr="-fMask %s", extensions=None), - initial_transform=dict(argstr="-t0 %s", extensions=None), - moving_image=dict(argstr="-m %s", extensions=None, mandatory=True), - moving_mask=dict(argstr="-mMask %s", extensions=None), - num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True), - output_path=dict(argstr="-out %s", mandatory=True, usedefault=True), - parameters=dict(argstr="-p %s...", mandatory=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + argstr="-f %s", + extensions=None, + mandatory=True, + ), + fixed_mask=dict( + argstr="-fMask %s", + extensions=None, + ), + initial_transform=dict( + argstr="-t0 %s", + extensions=None, + ), + moving_image=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + ), + moving_mask=dict( + argstr="-mMask %s", + extensions=None, + ), + num_threads=dict( + argstr="-threads %01d", + nohash=True, + usedefault=True, + ), + output_path=dict( + argstr="-out %s", + mandatory=True, + usedefault=True, + ), + parameters=dict( + argstr="-p %s...", + mandatory=True, + ), ) inputs = Registration.input_spec() @@ -25,7 +58,9 @@ def test_Registration_inputs(): def test_Registration_outputs(): output_map = dict( transform=dict(), - warped_file=dict(extensions=None), + warped_file=dict( + extensions=None, + ), warped_files=dict(), warped_files_flags=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py index 42aa882edf..23618b2aa8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py @@ -4,14 +4,38 @@ def test_AddXFormToHeader_inputs(): input_map = dict( - args=dict(argstr="%s"), - copy_name=dict(argstr="-c"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - out_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True), + args=dict( + argstr="%s", + ), + copy_name=dict( + argstr="-c", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + out_file=dict( + argstr="%s", + extensions=None, + position=-1, + usedefault=True, + ), subjects_dir=dict(), - transform=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - verbose=dict(argstr="-v"), + transform=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + verbose=dict( + argstr="-v", + ), ) inputs = AddXFormToHeader.input_spec() @@ -21,7 +45,11 @@ def test_AddXFormToHeader_inputs(): def test_AddXFormToHeader_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AddXFormToHeader.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py index b89ca9877c..bc65ee096c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py @@ -4,29 +4,87 @@ def test_Aparc2Aseg_inputs(): input_map = dict( - a2009s=dict(argstr="--a2009s"), - args=dict(argstr="%s"), - aseg=dict(argstr="--aseg %s", extensions=None), + a2009s=dict( + argstr="--a2009s", + ), + args=dict( + argstr="%s", + ), + aseg=dict( + argstr="--aseg %s", + extensions=None, + ), copy_inputs=dict(), - ctxseg=dict(argstr="--ctxseg %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - filled=dict(extensions=None), - hypo_wm=dict(argstr="--hypo-as-wm"), - label_wm=dict(argstr="--labelwm"), - lh_annotation=dict(extensions=None, mandatory=True), - lh_pial=dict(extensions=None, mandatory=True), - lh_ribbon=dict(extensions=None, mandatory=True), - lh_white=dict(extensions=None, mandatory=True), - out_file=dict(argstr="--o %s", extensions=None, mandatory=True), - rh_annotation=dict(extensions=None, mandatory=True), - rh_pial=dict(extensions=None, mandatory=True), - rh_ribbon=dict(extensions=None, mandatory=True), - rh_white=dict(extensions=None, mandatory=True), - ribbon=dict(extensions=None, mandatory=True), - rip_unknown=dict(argstr="--rip-unknown"), - subject_id=dict(argstr="--s %s", mandatory=True, usedefault=True), + ctxseg=dict( + argstr="--ctxseg %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + filled=dict( + extensions=None, + ), + hypo_wm=dict( + argstr="--hypo-as-wm", + ), + label_wm=dict( + argstr="--labelwm", + ), + lh_annotation=dict( + extensions=None, + mandatory=True, + ), + lh_pial=dict( + extensions=None, + mandatory=True, + ), + lh_ribbon=dict( + extensions=None, + mandatory=True, + ), + lh_white=dict( + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="--o %s", + extensions=None, + mandatory=True, + ), + rh_annotation=dict( + extensions=None, + mandatory=True, + ), + rh_pial=dict( + extensions=None, + mandatory=True, + ), + rh_ribbon=dict( + extensions=None, + mandatory=True, + ), + rh_white=dict( + extensions=None, + mandatory=True, + ), + ribbon=dict( + extensions=None, + mandatory=True, + ), + rip_unknown=dict( + argstr="--rip-unknown", + ), + subject_id=dict( + argstr="--s %s", + mandatory=True, + usedefault=True, + ), subjects_dir=dict(), - volmask=dict(argstr="--volmask"), + volmask=dict( + argstr="--volmask", + ), ) inputs = Aparc2Aseg.input_spec() @@ -36,7 +94,12 @@ def test_Aparc2Aseg_inputs(): def test_Aparc2Aseg_outputs(): - output_map = dict(out_file=dict(argstr="%s", extensions=None)) + output_map = dict( + out_file=dict( + argstr="%s", + extensions=None, + ), + ) outputs = Aparc2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py index dd9ebb4ae9..e2738ac746 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py @@ -4,10 +4,23 @@ def test_Apas2Aseg_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="--i %s", extensions=None, mandatory=True), - out_file=dict(argstr="--o %s", extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="--i %s", + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="--o %s", + extensions=None, + mandatory=True, + ), subjects_dir=dict(), ) inputs = Apas2Aseg.input_spec() @@ -18,7 +31,12 @@ def test_Apas2Aseg_inputs(): def test_Apas2Aseg_outputs(): - output_map = dict(out_file=dict(argstr="%s", extensions=None)) + output_map = dict( + out_file=dict( + argstr="%s", + extensions=None, + ), + ) outputs = Apas2Aseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py index 952ee81323..b3a8e80806 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py @@ -4,13 +4,34 @@ def test_ApplyMask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - invert_xfm=dict(argstr="-invert"), - keep_mask_deletion_edits=dict(argstr="-keep_mask_deletion_edits"), - mask_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - mask_thresh=dict(argstr="-T %.4f"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + invert_xfm=dict( + argstr="-invert", + ), + keep_mask_deletion_edits=dict( + argstr="-keep_mask_deletion_edits", + ), + mask_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + mask_thresh=dict( + argstr="-T %.4f", + ), out_file=dict( argstr="%s", extensions=None, @@ -21,11 +42,24 @@ def test_ApplyMask_inputs(): position=-1, ), subjects_dir=dict(), - transfer=dict(argstr="-transfer %d"), - use_abs=dict(argstr="-abs"), - xfm_file=dict(argstr="-xform %s", extensions=None), - xfm_source=dict(argstr="-lta_src %s", extensions=None), - xfm_target=dict(argstr="-lta_dst %s", extensions=None), + transfer=dict( + argstr="-transfer %d", + ), + use_abs=dict( + argstr="-abs", + ), + xfm_file=dict( + argstr="-xform %s", + extensions=None, + ), + xfm_source=dict( + argstr="-lta_src %s", + extensions=None, + ), + xfm_target=dict( + argstr="-lta_dst %s", + extensions=None, + ), ) inputs = ApplyMask.input_spec() @@ -35,7 +69,11 @@ def test_ApplyMask_inputs(): def test_ApplyMask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ApplyMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py index 02c09ab30f..2aa4ae59f8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py @@ -4,8 +4,13 @@ def test_ApplyVolTransform_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fs_target=dict( argstr="--fstarg", mandatory=True, @@ -27,9 +32,16 @@ def test_ApplyVolTransform_inputs(): "subject", ), ), - interp=dict(argstr="--interp %s"), - inverse=dict(argstr="--inv"), - invert_morph=dict(argstr="--inv-morph", requires=["m3z_file"]), + interp=dict( + argstr="--interp %s", + ), + inverse=dict( + argstr="--inv", + ), + invert_morph=dict( + argstr="--inv-morph", + requires=["m3z_file"], + ), lta_file=dict( argstr="--lta %s", extensions=None, @@ -60,7 +72,10 @@ def test_ApplyVolTransform_inputs(): "subject", ), ), - m3z_file=dict(argstr="--m3z %s", extensions=None), + m3z_file=dict( + argstr="--m3z %s", + extensions=None, + ), mni_152_reg=dict( argstr="--regheader", mandatory=True, @@ -75,8 +90,13 @@ def test_ApplyVolTransform_inputs(): "subject", ), ), - no_ded_m3z_path=dict(argstr="--noDefM3zPath", requires=["m3z_file"]), - no_resample=dict(argstr="--no-resample"), + no_ded_m3z_path=dict( + argstr="--noDefM3zPath", + requires=["m3z_file"], + ), + no_resample=dict( + argstr="--no-resample", + ), reg_file=dict( argstr="--reg %s", extensions=None, @@ -107,7 +127,10 @@ def test_ApplyVolTransform_inputs(): ), ), source_file=dict( - argstr="--mov %s", copyfile=False, extensions=None, mandatory=True + argstr="--mov %s", + copyfile=False, + extensions=None, + mandatory=True, ), subject=dict( argstr="--s %s", @@ -125,16 +148,24 @@ def test_ApplyVolTransform_inputs(): ), subjects_dir=dict(), tal=dict( - argstr="--tal", mandatory=True, xor=("target_file", "tal", "fs_target") + argstr="--tal", + mandatory=True, + xor=("target_file", "tal", "fs_target"), + ), + tal_resolution=dict( + argstr="--talres %.10f", ), - tal_resolution=dict(argstr="--talres %.10f"), target_file=dict( argstr="--targ %s", extensions=None, mandatory=True, xor=("target_file", "tal", "fs_target"), ), - transformed_file=dict(argstr="--o %s", extensions=None, genfile=True), + transformed_file=dict( + argstr="--o %s", + extensions=None, + genfile=True, + ), xfm_reg_file=dict( argstr="--xfm %s", extensions=None, @@ -159,7 +190,11 @@ def test_ApplyVolTransform_inputs(): def test_ApplyVolTransform_outputs(): - output_map = dict(transformed_file=dict(extensions=None)) + output_map = dict( + transformed_file=dict( + extensions=None, + ), + ) outputs = ApplyVolTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py index 06c22a2d7b..e7a95d1a3d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py @@ -4,35 +4,102 @@ def test_Binarize_inputs(): input_map = dict( - abs=dict(argstr="--abs"), - args=dict(argstr="%s"), - bin_col_num=dict(argstr="--bincol"), - bin_val=dict(argstr="--binval %d"), - bin_val_not=dict(argstr="--binvalnot %d"), - binary_file=dict(argstr="--o %s", extensions=None, genfile=True), - count_file=dict(argstr="--count %s"), - dilate=dict(argstr="--dilate %d"), - environ=dict(nohash=True, usedefault=True), - erode=dict(argstr="--erode %d"), - erode2d=dict(argstr="--erode2d %d"), - frame_no=dict(argstr="--frame %s"), - in_file=dict(argstr="--i %s", copyfile=False, extensions=None, mandatory=True), - invert=dict(argstr="--inv"), - mask_file=dict(argstr="--mask maskvol", extensions=None), - mask_thresh=dict(argstr="--mask-thresh %f"), - match=dict(argstr="--match %d..."), - max=dict(argstr="--max %f", xor=["wm_ven_csf"]), - merge_file=dict(argstr="--merge %s", extensions=None), - min=dict(argstr="--min %f", xor=["wm_ven_csf"]), - out_type=dict(argstr=""), - rmax=dict(argstr="--rmax %f"), - rmin=dict(argstr="--rmin %f"), + abs=dict( + argstr="--abs", + ), + args=dict( + argstr="%s", + ), + bin_col_num=dict( + argstr="--bincol", + ), + bin_val=dict( + argstr="--binval %d", + ), + bin_val_not=dict( + argstr="--binvalnot %d", + ), + binary_file=dict( + argstr="--o %s", + extensions=None, + genfile=True, + ), + count_file=dict( + argstr="--count %s", + ), + dilate=dict( + argstr="--dilate %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + erode=dict( + argstr="--erode %d", + ), + erode2d=dict( + argstr="--erode2d %d", + ), + frame_no=dict( + argstr="--frame %s", + ), + in_file=dict( + argstr="--i %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + invert=dict( + argstr="--inv", + ), + mask_file=dict( + argstr="--mask maskvol", + extensions=None, + ), + mask_thresh=dict( + argstr="--mask-thresh %f", + ), + match=dict( + argstr="--match %d...", + ), + max=dict( + argstr="--max %f", + xor=["wm_ven_csf"], + ), + merge_file=dict( + argstr="--merge %s", + extensions=None, + ), + min=dict( + argstr="--min %f", + xor=["wm_ven_csf"], + ), + out_type=dict( + argstr="", + ), + rmax=dict( + argstr="--rmax %f", + ), + rmin=dict( + argstr="--rmin %f", + ), subjects_dir=dict(), - ventricles=dict(argstr="--ventricles"), - wm=dict(argstr="--wm"), - wm_ven_csf=dict(argstr="--wm+vcsf", xor=["min", "max"]), - zero_edges=dict(argstr="--zero-edges"), - zero_slice_edge=dict(argstr="--zero-slice-edges"), + ventricles=dict( + argstr="--ventricles", + ), + wm=dict( + argstr="--wm", + ), + wm_ven_csf=dict( + argstr="--wm+vcsf", + xor=["min", "max"], + ), + zero_edges=dict( + argstr="--zero-edges", + ), + zero_slice_edge=dict( + argstr="--zero-slice-edges", + ), ) inputs = Binarize.input_spec() @@ -43,7 +110,12 @@ def test_Binarize_inputs(): def test_Binarize_outputs(): output_map = dict( - binary_file=dict(extensions=None), count_file=dict(extensions=None) + binary_file=dict( + extensions=None, + ), + count_file=dict( + extensions=None, + ), ) outputs = Binarize.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py index f03acba7d3..d89e51841b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py @@ -4,22 +4,67 @@ def test_CALabel_inputs(): input_map = dict( - align=dict(argstr="-align"), - args=dict(argstr="%s"), - aseg=dict(argstr="-aseg %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4), - in_vol=dict(argstr="-r %s", extensions=None), - intensities=dict(argstr="-r %s", extensions=None), - label=dict(argstr="-l %s", extensions=None), - no_big_ventricles=dict(argstr="-nobigventricles"), + align=dict( + argstr="-align", + ), + args=dict( + argstr="%s", + ), + aseg=dict( + argstr="-aseg %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), + in_vol=dict( + argstr="-r %s", + extensions=None, + ), + intensities=dict( + argstr="-r %s", + extensions=None, + ), + label=dict( + argstr="-l %s", + extensions=None, + ), + no_big_ventricles=dict( + argstr="-nobigventricles", + ), num_threads=dict(), - out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), - prior=dict(argstr="-prior %.1f"), - relabel_unlikely=dict(argstr="-relabel_unlikely %d %.1f"), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + prior=dict( + argstr="-prior %.1f", + ), + relabel_unlikely=dict( + argstr="-relabel_unlikely %d %.1f", + ), subjects_dir=dict(), - template=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - transform=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + template=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + transform=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), ) inputs = CALabel.input_spec() @@ -29,7 +74,11 @@ def test_CALabel_inputs(): def test_CALabel_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = CALabel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py index 2b35b6b2fa..58788da3a4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py @@ -4,13 +4,37 @@ def test_CANormalize_inputs(): input_map = dict( - args=dict(argstr="%s"), - atlas=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - control_points=dict(argstr="-c %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4), - long_file=dict(argstr="-long %s", extensions=None), - mask=dict(argstr="-mask %s", extensions=None), + args=dict( + argstr="%s", + ), + atlas=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + control_points=dict( + argstr="-c %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), + long_file=dict( + argstr="-long %s", + extensions=None, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), out_file=dict( argstr="%s", extensions=None, @@ -21,7 +45,12 @@ def test_CANormalize_inputs(): position=-1, ), subjects_dir=dict(), - transform=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + transform=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), ) inputs = CANormalize.input_spec() @@ -32,7 +61,12 @@ def test_CANormalize_inputs(): def test_CANormalize_outputs(): output_map = dict( - control_points=dict(extensions=None), out_file=dict(extensions=None) + control_points=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), ) outputs = CANormalize.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py index c0e7b76b5c..3f410524f0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py @@ -4,21 +4,59 @@ def test_CARegister_inputs(): input_map = dict( - A=dict(argstr="-A %d"), - align=dict(argstr="-align-%s"), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - invert_and_save=dict(argstr="-invert-and-save", position=-4), - l_files=dict(argstr="-l %s"), - levels=dict(argstr="-levels %d"), - mask=dict(argstr="-mask %s", extensions=None), - no_big_ventricles=dict(argstr="-nobigventricles"), + A=dict( + argstr="-A %d", + ), + align=dict( + argstr="-align-%s", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + invert_and_save=dict( + argstr="-invert-and-save", + position=-4, + ), + l_files=dict( + argstr="-l %s", + ), + levels=dict( + argstr="-levels %d", + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + no_big_ventricles=dict( + argstr="-nobigventricles", + ), num_threads=dict(), - out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), subjects_dir=dict(), - template=dict(argstr="%s", extensions=None, position=-2), - transform=dict(argstr="-T %s", extensions=None), + template=dict( + argstr="%s", + extensions=None, + position=-2, + ), + transform=dict( + argstr="-T %s", + extensions=None, + ), ) inputs = CARegister.input_spec() @@ -28,7 +66,11 @@ def test_CARegister_inputs(): def test_CARegister_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = CARegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py index 338ceb29be..d14e203079 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py @@ -4,8 +4,13 @@ def test_CheckTalairachAlignment_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( argstr="-xfm %s", extensions=None, @@ -13,9 +18,17 @@ def test_CheckTalairachAlignment_inputs(): position=-1, xor=["subject"], ), - subject=dict(argstr="-subj %s", mandatory=True, position=-1, xor=["in_file"]), + subject=dict( + argstr="-subj %s", + mandatory=True, + position=-1, + xor=["in_file"], + ), subjects_dir=dict(), - threshold=dict(argstr="-T %.3f", usedefault=True), + threshold=dict( + argstr="-T %.3f", + usedefault=True, + ), ) inputs = CheckTalairachAlignment.input_spec() @@ -25,7 +38,11 @@ def test_CheckTalairachAlignment_inputs(): def test_CheckTalairachAlignment_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = CheckTalairachAlignment.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py index c2a434cedf..992f3e308c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py @@ -4,26 +4,70 @@ def test_Concatenate_inputs(): input_map = dict( - add_val=dict(argstr="--add %f"), - args=dict(argstr="%s"), - combine=dict(argstr="--combine"), - concatenated_file=dict(argstr="--o %s", extensions=None, genfile=True), - environ=dict(nohash=True, usedefault=True), - gmean=dict(argstr="--gmean %d"), - in_files=dict(argstr="--i %s...", mandatory=True), - keep_dtype=dict(argstr="--keep-datatype"), - mask_file=dict(argstr="--mask %s", extensions=None), - max_bonfcor=dict(argstr="--max-bonfcor"), - max_index=dict(argstr="--max-index"), - mean_div_n=dict(argstr="--mean-div-n"), - multiply_by=dict(argstr="--mul %f"), - multiply_matrix_file=dict(argstr="--mtx %s", extensions=None), - paired_stats=dict(argstr="--paired-%s"), - sign=dict(argstr="--%s"), - sort=dict(argstr="--sort"), - stats=dict(argstr="--%s"), + add_val=dict( + argstr="--add %f", + ), + args=dict( + argstr="%s", + ), + combine=dict( + argstr="--combine", + ), + concatenated_file=dict( + argstr="--o %s", + extensions=None, + genfile=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gmean=dict( + argstr="--gmean %d", + ), + in_files=dict( + argstr="--i %s...", + mandatory=True, + ), + keep_dtype=dict( + argstr="--keep-datatype", + ), + mask_file=dict( + argstr="--mask %s", + extensions=None, + ), + max_bonfcor=dict( + argstr="--max-bonfcor", + ), + max_index=dict( + argstr="--max-index", + ), + mean_div_n=dict( + argstr="--mean-div-n", + ), + multiply_by=dict( + argstr="--mul %f", + ), + multiply_matrix_file=dict( + argstr="--mtx %s", + extensions=None, + ), + paired_stats=dict( + argstr="--paired-%s", + ), + sign=dict( + argstr="--%s", + ), + sort=dict( + argstr="--sort", + ), + stats=dict( + argstr="--%s", + ), subjects_dir=dict(), - vote=dict(argstr="--vote"), + vote=dict( + argstr="--vote", + ), ) inputs = Concatenate.input_spec() @@ -33,7 +77,11 @@ def test_Concatenate_inputs(): def test_Concatenate_outputs(): - output_map = dict(concatenated_file=dict(extensions=None)) + output_map = dict( + concatenated_file=dict( + extensions=None, + ), + ) outputs = Concatenate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py index 848eb2eb00..dad221b734 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py @@ -4,13 +4,33 @@ def test_ConcatenateLTA_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_lta1=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - in_lta2=dict(argstr="%s", mandatory=True, position=-2), - invert_1=dict(argstr="-invert1"), - invert_2=dict(argstr="-invert2"), - invert_out=dict(argstr="-invertout"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_lta1=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + in_lta2=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + invert_1=dict( + argstr="-invert1", + ), + invert_2=dict( + argstr="-invert2", + ), + invert_out=dict( + argstr="-invertout", + ), out_file=dict( argstr="%s", extensions=None, @@ -20,8 +40,12 @@ def test_ConcatenateLTA_inputs(): name_template="%s_concat", position=-1, ), - out_type=dict(argstr="-out_type %d"), - subject=dict(argstr="-subject %s"), + out_type=dict( + argstr="-out_type %d", + ), + subject=dict( + argstr="-subject %s", + ), subjects_dir=dict(), tal_source_file=dict( argstr="-tal %s", @@ -30,7 +54,10 @@ def test_ConcatenateLTA_inputs(): requires=["tal_template_file"], ), tal_template_file=dict( - argstr="%s", extensions=None, position=-4, requires=["tal_source_file"] + argstr="%s", + extensions=None, + position=-4, + requires=["tal_source_file"], ), ) inputs = ConcatenateLTA.input_spec() @@ -41,7 +68,11 @@ def test_ConcatenateLTA_inputs(): def test_ConcatenateLTA_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ConcatenateLTA.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py index ed6055072f..8409f26757 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py @@ -4,18 +4,48 @@ def test_Contrast_inputs(): input_map = dict( - annotation=dict(extensions=None, mandatory=True), - args=dict(argstr="%s"), + annotation=dict( + extensions=None, + mandatory=True, + ), + args=dict( + argstr="%s", + ), copy_inputs=dict(), - cortex=dict(extensions=None, mandatory=True), - environ=dict(nohash=True, usedefault=True), - hemisphere=dict(argstr="--%s-only", mandatory=True), - orig=dict(extensions=None, mandatory=True), - rawavg=dict(extensions=None, mandatory=True), - subject_id=dict(argstr="--s %s", mandatory=True, usedefault=True), + cortex=dict( + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr="--%s-only", + mandatory=True, + ), + orig=dict( + extensions=None, + mandatory=True, + ), + rawavg=dict( + extensions=None, + mandatory=True, + ), + subject_id=dict( + argstr="--s %s", + mandatory=True, + usedefault=True, + ), subjects_dir=dict(), - thickness=dict(extensions=None, mandatory=True), - white=dict(extensions=None, mandatory=True), + thickness=dict( + extensions=None, + mandatory=True, + ), + white=dict( + extensions=None, + mandatory=True, + ), ) inputs = Contrast.input_spec() @@ -26,9 +56,15 @@ def test_Contrast_inputs(): def test_Contrast_outputs(): output_map = dict( - out_contrast=dict(extensions=None), - out_log=dict(extensions=None), - out_stats=dict(extensions=None), + out_contrast=dict( + extensions=None, + ), + out_log=dict( + extensions=None, + ), + out_stats=dict( + extensions=None, + ), ) outputs = Contrast.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py index 50145374d5..c230edb8ba 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py @@ -4,18 +4,37 @@ def test_Curvature_inputs(): input_map = dict( - args=dict(argstr="%s"), - averages=dict(argstr="-a %d"), + args=dict( + argstr="%s", + ), + averages=dict( + argstr="-a %d", + ), copy_input=dict(), - distances=dict(argstr="-distances %d %d"), - environ=dict(nohash=True, usedefault=True), + distances=dict( + argstr="-distances %d %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2 + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-2, + ), + n=dict( + argstr="-n", + ), + save=dict( + argstr="-w", ), - n=dict(argstr="-n"), - save=dict(argstr="-w"), subjects_dir=dict(), - threshold=dict(argstr="-thresh %.3f"), + threshold=dict( + argstr="-thresh %.3f", + ), ) inputs = Curvature.input_spec() @@ -25,7 +44,14 @@ def test_Curvature_inputs(): def test_Curvature_outputs(): - output_map = dict(out_gauss=dict(extensions=None), out_mean=dict(extensions=None)) + output_map = dict( + out_gauss=dict( + extensions=None, + ), + out_mean=dict( + extensions=None, + ), + ) outputs = Curvature.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py index b8dea1cb96..4e8e3d5bc2 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py @@ -4,13 +4,34 @@ def test_CurvatureStats_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), copy_inputs=dict(), - curvfile1=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - curvfile2=dict(argstr="%s", extensions=None, mandatory=True, position=-1), - environ=dict(nohash=True, usedefault=True), - hemisphere=dict(argstr="%s", mandatory=True, position=-3), - min_max=dict(argstr="-m"), + curvfile1=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + curvfile2=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr="%s", + mandatory=True, + position=-3, + ), + min_max=dict( + argstr="-m", + ), out_file=dict( argstr="-o %s", extensions=None, @@ -18,11 +39,23 @@ def test_CurvatureStats_inputs(): name_source=["hemisphere"], name_template="%s.curv.stats", ), - subject_id=dict(argstr="%s", mandatory=True, position=-4, usedefault=True), + subject_id=dict( + argstr="%s", + mandatory=True, + position=-4, + usedefault=True, + ), subjects_dir=dict(), - surface=dict(argstr="-F %s", extensions=None), - values=dict(argstr="-G"), - write=dict(argstr="--writeCurvatureFiles"), + surface=dict( + argstr="-F %s", + extensions=None, + ), + values=dict( + argstr="-G", + ), + write=dict( + argstr="--writeCurvatureFiles", + ), ) inputs = CurvatureStats.input_spec() @@ -32,7 +65,11 @@ def test_CurvatureStats_inputs(): def test_CurvatureStats_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = CurvatureStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py index 49654f2af3..bda2620fe1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py @@ -4,16 +4,35 @@ def test_DICOMConvert_inputs(): input_map = dict( - args=dict(argstr="%s"), - base_output_dir=dict(mandatory=True), - dicom_dir=dict(mandatory=True), - dicom_info=dict(extensions=None), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + base_output_dir=dict( + mandatory=True, + ), + dicom_dir=dict( + mandatory=True, + ), + dicom_info=dict( + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), file_mapping=dict(), - ignore_single_slice=dict(requires=["dicom_info"]), - out_type=dict(usedefault=True), - seq_list=dict(requires=["dicom_info"]), - subject_dir_template=dict(usedefault=True), + ignore_single_slice=dict( + requires=["dicom_info"], + ), + out_type=dict( + usedefault=True, + ), + seq_list=dict( + requires=["dicom_info"], + ), + subject_dir_template=dict( + usedefault=True, + ), subject_id=dict(), subjects_dir=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py index 84b757c3b0..7bf1b895f1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py @@ -4,11 +4,26 @@ def test_EMRegister_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - mask=dict(argstr="-mask %s", extensions=None), - nbrspacing=dict(argstr="-uns %d"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + nbrspacing=dict( + argstr="-uns %d", + ), num_threads=dict(), out_file=dict( argstr="%s", @@ -19,10 +34,20 @@ def test_EMRegister_inputs(): name_template="%s_transform.lta", position=-1, ), - skull=dict(argstr="-skull"), + skull=dict( + argstr="-skull", + ), subjects_dir=dict(), - template=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - transform=dict(argstr="-t %s", extensions=None), + template=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + transform=dict( + argstr="-t %s", + extensions=None, + ), ) inputs = EMRegister.input_spec() @@ -32,7 +57,11 @@ def test_EMRegister_inputs(): def test_EMRegister_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = EMRegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py index c9906d9a05..5c51ed848d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py @@ -4,13 +4,40 @@ def test_EditWMwithAseg_inputs(): input_map = dict( - args=dict(argstr="%s"), - brain_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4), - keep_in=dict(argstr="-keep-in"), - out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), - seg_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + args=dict( + argstr="%s", + ), + brain_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), + keep_in=dict( + argstr="-keep-in", + ), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + seg_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), subjects_dir=dict(), ) inputs = EditWMwithAseg.input_spec() @@ -21,7 +48,11 @@ def test_EditWMwithAseg_inputs(): def test_EditWMwithAseg_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = EditWMwithAseg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py index b1168b6ad8..9d05019824 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py @@ -4,9 +4,19 @@ def test_EulerNumber_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), subjects_dir=dict(), ) inputs = EulerNumber.input_spec() @@ -17,7 +27,10 @@ def test_EulerNumber_inputs(): def test_EulerNumber_outputs(): - output_map = dict(defects=dict(), euler=dict()) + output_map = dict( + defects=dict(), + euler=dict(), + ) outputs = EulerNumber.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py index d7d682d9bc..59997ad5b4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py @@ -4,9 +4,19 @@ def test_ExtractMainComponent_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), out_file=dict( argstr="%s", extensions=None, @@ -23,7 +33,11 @@ def test_ExtractMainComponent_inputs(): def test_ExtractMainComponent_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ExtractMainComponent.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py b/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py index 6be464aa79..aa53727cc8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py @@ -4,8 +4,13 @@ def test_FSCommand_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), subjects_dir=dict(), ) inputs = FSCommand.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py b/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py index 1405ee51e2..f61b52c1ea 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py @@ -4,8 +4,13 @@ def test_FSCommandOpenMP_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), num_threads=dict(), subjects_dir=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py b/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py index e761c7e82d..03cf55eb69 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py @@ -4,8 +4,13 @@ def test_FSScriptCommand_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), subjects_dir=dict(), ) inputs = FSScriptCommand.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py index dc2af5fb8d..7842c5333a 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py @@ -4,11 +4,24 @@ def test_FitMSParams_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), flip_list=dict(), - in_files=dict(argstr="%s", mandatory=True, position=-2), - out_dir=dict(argstr="%s", genfile=True, position=-1), + in_files=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + out_dir=dict( + argstr="%s", + genfile=True, + position=-1, + ), subjects_dir=dict(), te_list=dict(), tr_list=dict(), @@ -23,9 +36,15 @@ def test_FitMSParams_inputs(): def test_FitMSParams_outputs(): output_map = dict( - pd_image=dict(extensions=None), - t1_image=dict(extensions=None), - t2star_image=dict(extensions=None), + pd_image=dict( + extensions=None, + ), + t1_image=dict( + extensions=None, + ), + t2star_image=dict( + extensions=None, + ), ) outputs = FitMSParams.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py index 063254527a..0037c02270 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py @@ -4,19 +4,56 @@ def test_FixTopology_inputs(): input_map = dict( - args=dict(argstr="%s"), - copy_inputs=dict(mandatory=True), - environ=dict(nohash=True, usedefault=True), - ga=dict(argstr="-ga"), - hemisphere=dict(argstr="%s", mandatory=True, position=-1), - in_brain=dict(extensions=None, mandatory=True), - in_inflated=dict(extensions=None, mandatory=True), - in_orig=dict(extensions=None, mandatory=True), - in_wm=dict(extensions=None, mandatory=True), - mgz=dict(argstr="-mgz"), - seed=dict(argstr="-seed %d"), - sphere=dict(argstr="-sphere %s", extensions=None), - subject_id=dict(argstr="%s", mandatory=True, position=-2, usedefault=True), + args=dict( + argstr="%s", + ), + copy_inputs=dict( + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ga=dict( + argstr="-ga", + ), + hemisphere=dict( + argstr="%s", + mandatory=True, + position=-1, + ), + in_brain=dict( + extensions=None, + mandatory=True, + ), + in_inflated=dict( + extensions=None, + mandatory=True, + ), + in_orig=dict( + extensions=None, + mandatory=True, + ), + in_wm=dict( + extensions=None, + mandatory=True, + ), + mgz=dict( + argstr="-mgz", + ), + seed=dict( + argstr="-seed %d", + ), + sphere=dict( + argstr="-sphere %s", + extensions=None, + ), + subject_id=dict( + argstr="%s", + mandatory=True, + position=-2, + usedefault=True, + ), subjects_dir=dict(), ) inputs = FixTopology.input_spec() @@ -27,7 +64,11 @@ def test_FixTopology_inputs(): def test_FixTopology_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = FixTopology.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py index 90a4e0ca3c..12550be8b3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py @@ -4,15 +4,40 @@ def test_FuseSegmentations_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_norms=dict(argstr="-n %s", mandatory=True), - in_segmentations=dict(argstr="-a %s", mandatory=True), - in_segmentations_noCC=dict(argstr="-c %s", mandatory=True), - out_file=dict(extensions=None, mandatory=True, position=-1), - subject_id=dict(argstr="%s", position=-3), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_norms=dict( + argstr="-n %s", + mandatory=True, + ), + in_segmentations=dict( + argstr="-a %s", + mandatory=True, + ), + in_segmentations_noCC=dict( + argstr="-c %s", + mandatory=True, + ), + out_file=dict( + extensions=None, + mandatory=True, + position=-1, + ), + subject_id=dict( + argstr="%s", + position=-3, + ), subjects_dir=dict(), - timepoints=dict(argstr="%s", mandatory=True, position=-2), + timepoints=dict( + argstr="%s", + mandatory=True, + position=-2, + ), ) inputs = FuseSegmentations.input_spec() @@ -22,7 +47,11 @@ def test_FuseSegmentations_inputs(): def test_FuseSegmentations_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = FuseSegmentations.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py index 319efa2a8d..21c41eb691 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py @@ -4,68 +4,192 @@ def test_GLMFit_inputs(): input_map = dict( - allow_ill_cond=dict(argstr="--illcond"), - allow_repeated_subjects=dict(argstr="--allowsubjrep"), - args=dict(argstr="%s"), - calc_AR1=dict(argstr="--tar1"), - check_opts=dict(argstr="--checkopts"), - compute_log_y=dict(argstr="--logy"), - contrast=dict(argstr="--C %s..."), - cortex=dict(argstr="--cortex", xor=["label_file"]), - debug=dict(argstr="--debug"), + allow_ill_cond=dict( + argstr="--illcond", + ), + allow_repeated_subjects=dict( + argstr="--allowsubjrep", + ), + args=dict( + argstr="%s", + ), + calc_AR1=dict( + argstr="--tar1", + ), + check_opts=dict( + argstr="--checkopts", + ), + compute_log_y=dict( + argstr="--logy", + ), + contrast=dict( + argstr="--C %s...", + ), + cortex=dict( + argstr="--cortex", + xor=["label_file"], + ), + debug=dict( + argstr="--debug", + ), design=dict( - argstr="--X %s", extensions=None, xor=("fsgd", "design", "one_sample") + argstr="--X %s", + extensions=None, + xor=("fsgd", "design", "one_sample"), + ), + diag=dict( + argstr="--diag %d", + ), + diag_cluster=dict( + argstr="--diag-cluster", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_fx_dof=dict( + argstr="--ffxdof %d", + xor=["fixed_fx_dof_file"], ), - diag=dict(argstr="--diag %d"), - diag_cluster=dict(argstr="--diag-cluster"), - environ=dict(nohash=True, usedefault=True), - fixed_fx_dof=dict(argstr="--ffxdof %d", xor=["fixed_fx_dof_file"]), fixed_fx_dof_file=dict( - argstr="--ffxdofdat %d", extensions=None, xor=["fixed_fx_dof"] + argstr="--ffxdofdat %d", + extensions=None, + xor=["fixed_fx_dof"], + ), + fixed_fx_var=dict( + argstr="--yffxvar %s", + extensions=None, + ), + force_perm=dict( + argstr="--perm-force", + ), + fsgd=dict( + argstr="--fsgd %s %s", + xor=("fsgd", "design", "one_sample"), + ), + fwhm=dict( + argstr="--fwhm %f", + ), + glm_dir=dict( + argstr="--glmdir %s", + genfile=True, ), - fixed_fx_var=dict(argstr="--yffxvar %s", extensions=None), - force_perm=dict(argstr="--perm-force"), - fsgd=dict(argstr="--fsgd %s %s", xor=("fsgd", "design", "one_sample")), - fwhm=dict(argstr="--fwhm %f"), - glm_dir=dict(argstr="--glmdir %s", genfile=True), hemi=dict(), - in_file=dict(argstr="--y %s", copyfile=False, extensions=None, mandatory=True), - invert_mask=dict(argstr="--mask-inv"), - label_file=dict(argstr="--label %s", extensions=None, xor=["cortex"]), - mask_file=dict(argstr="--mask %s", extensions=None), - no_contrast_ok=dict(argstr="--no-contrasts-ok"), - no_est_fwhm=dict(argstr="--no-est-fwhm"), - no_mask_smooth=dict(argstr="--no-mask-smooth"), - no_prune=dict(argstr="--no-prune", xor=["prunethresh"]), + in_file=dict( + argstr="--y %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + invert_mask=dict( + argstr="--mask-inv", + ), + label_file=dict( + argstr="--label %s", + extensions=None, + xor=["cortex"], + ), + mask_file=dict( + argstr="--mask %s", + extensions=None, + ), + no_contrast_ok=dict( + argstr="--no-contrasts-ok", + ), + no_est_fwhm=dict( + argstr="--no-est-fwhm", + ), + no_mask_smooth=dict( + argstr="--no-mask-smooth", + ), + no_prune=dict( + argstr="--no-prune", + xor=["prunethresh"], + ), one_sample=dict( - argstr="--osgm", xor=("one_sample", "fsgd", "design", "contrast") - ), - pca=dict(argstr="--pca"), - per_voxel_reg=dict(argstr="--pvr %s..."), - profile=dict(argstr="--profile %d"), - prune=dict(argstr="--prune"), - prune_thresh=dict(argstr="--prune_thr %f", xor=["noprune"]), - resynth_test=dict(argstr="--resynthtest %d"), - save_cond=dict(argstr="--save-cond"), - save_estimate=dict(argstr="--yhat-save"), - save_res_corr_mtx=dict(argstr="--eres-scm"), - save_residual=dict(argstr="--eres-save"), - seed=dict(argstr="--seed %d"), - self_reg=dict(argstr="--selfreg %d %d %d"), - sim_done_file=dict(argstr="--sim-done %s", extensions=None), - sim_sign=dict(argstr="--sim-sign %s"), - simulation=dict(argstr="--sim %s %d %f %s"), + argstr="--osgm", + xor=("one_sample", "fsgd", "design", "contrast"), + ), + pca=dict( + argstr="--pca", + ), + per_voxel_reg=dict( + argstr="--pvr %s...", + ), + profile=dict( + argstr="--profile %d", + ), + prune=dict( + argstr="--prune", + ), + prune_thresh=dict( + argstr="--prune_thr %f", + xor=["noprune"], + ), + resynth_test=dict( + argstr="--resynthtest %d", + ), + save_cond=dict( + argstr="--save-cond", + ), + save_estimate=dict( + argstr="--yhat-save", + ), + save_res_corr_mtx=dict( + argstr="--eres-scm", + ), + save_residual=dict( + argstr="--eres-save", + ), + seed=dict( + argstr="--seed %d", + ), + self_reg=dict( + argstr="--selfreg %d %d %d", + ), + sim_done_file=dict( + argstr="--sim-done %s", + extensions=None, + ), + sim_sign=dict( + argstr="--sim-sign %s", + ), + simulation=dict( + argstr="--sim %s %d %f %s", + ), subject_id=dict(), subjects_dir=dict(), - surf=dict(argstr="--surf %s %s %s", requires=["subject_id", "hemi"]), - surf_geo=dict(usedefault=True), - synth=dict(argstr="--synth"), - uniform=dict(argstr="--uniform %f %f"), - var_fwhm=dict(argstr="--var-fwhm %f"), - vox_dump=dict(argstr="--voxdump %d %d %d"), - weight_file=dict(extensions=None, xor=["weighted_ls"]), - weight_inv=dict(argstr="--w-inv", xor=["weighted_ls"]), - weight_sqrt=dict(argstr="--w-sqrt", xor=["weighted_ls"]), + surf=dict( + argstr="--surf %s %s %s", + requires=["subject_id", "hemi"], + ), + surf_geo=dict( + usedefault=True, + ), + synth=dict( + argstr="--synth", + ), + uniform=dict( + argstr="--uniform %f %f", + ), + var_fwhm=dict( + argstr="--var-fwhm %f", + ), + vox_dump=dict( + argstr="--voxdump %d %d %d", + ), + weight_file=dict( + extensions=None, + xor=["weighted_ls"], + ), + weight_inv=dict( + argstr="--w-inv", + xor=["weighted_ls"], + ), + weight_sqrt=dict( + argstr="--w-sqrt", + xor=["weighted_ls"], + ), weighted_ls=dict( argstr="--wls %s", extensions=None, @@ -81,23 +205,47 @@ def test_GLMFit_inputs(): def test_GLMFit_outputs(): output_map = dict( - beta_file=dict(extensions=None), - dof_file=dict(extensions=None), - error_file=dict(extensions=None), - error_stddev_file=dict(extensions=None), - error_var_file=dict(extensions=None), - estimate_file=dict(extensions=None), - frame_eigenvectors=dict(extensions=None), + beta_file=dict( + extensions=None, + ), + dof_file=dict( + extensions=None, + ), + error_file=dict( + extensions=None, + ), + error_stddev_file=dict( + extensions=None, + ), + error_var_file=dict( + extensions=None, + ), + estimate_file=dict( + extensions=None, + ), + frame_eigenvectors=dict( + extensions=None, + ), ftest_file=dict(), - fwhm_file=dict(extensions=None), + fwhm_file=dict( + extensions=None, + ), gamma_file=dict(), gamma_var_file=dict(), glm_dir=dict(), - mask_file=dict(extensions=None), + mask_file=dict( + extensions=None, + ), sig_file=dict(), - singular_values=dict(extensions=None), - spatial_eigenvectors=dict(extensions=None), - svd_stats_file=dict(extensions=None), + singular_values=dict( + extensions=None, + ), + spatial_eigenvectors=dict( + extensions=None, + ), + svd_stats_file=dict( + extensions=None, + ), ) outputs = GLMFit.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py index 40d3c62268..aa6d5d302e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py @@ -4,9 +4,18 @@ def test_ImageInfo_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, position=1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + position=1, + ), subjects_dir=dict(), ) inputs = ImageInfo.input_spec() @@ -26,7 +35,9 @@ def test_ImageInfo_outputs(): file_format=dict(), info=dict(), orientation=dict(), - out_file=dict(extensions=None), + out_file=dict( + extensions=None, + ), ph_enc_dir=dict(), vox_sizes=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py index a4c52778dd..f62c085839 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py @@ -4,10 +4,25 @@ def test_Jacobian_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_mappedsurf=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - in_origsurf=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_mappedsurf=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + in_origsurf=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), out_file=dict( argstr="%s", extensions=None, @@ -27,7 +42,11 @@ def test_Jacobian_inputs(): def test_Jacobian_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Jacobian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py index d7633157e8..b70bd34c45 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py @@ -4,8 +4,13 @@ def test_LTAConvert_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_fsl=dict( argstr="--infsl %s", extensions=None, @@ -41,16 +46,39 @@ def test_LTAConvert_inputs(): mandatory=True, xor=("in_lta", "in_fsl", "in_mni", "in_reg", "in_niftyreg", "in_itk"), ), - invert=dict(argstr="--invert"), - ltavox2vox=dict(argstr="--ltavox2vox", requires=["out_lta"]), - out_fsl=dict(argstr="--outfsl %s"), - out_itk=dict(argstr="--outitk %s"), - out_lta=dict(argstr="--outlta %s"), - out_mni=dict(argstr="--outmni %s"), - out_reg=dict(argstr="--outreg %s"), - source_file=dict(argstr="--src %s", extensions=None), - target_conform=dict(argstr="--trgconform"), - target_file=dict(argstr="--trg %s", extensions=None), + invert=dict( + argstr="--invert", + ), + ltavox2vox=dict( + argstr="--ltavox2vox", + requires=["out_lta"], + ), + out_fsl=dict( + argstr="--outfsl %s", + ), + out_itk=dict( + argstr="--outitk %s", + ), + out_lta=dict( + argstr="--outlta %s", + ), + out_mni=dict( + argstr="--outmni %s", + ), + out_reg=dict( + argstr="--outreg %s", + ), + source_file=dict( + argstr="--src %s", + extensions=None, + ), + target_conform=dict( + argstr="--trgconform", + ), + target_file=dict( + argstr="--trg %s", + extensions=None, + ), ) inputs = LTAConvert.input_spec() @@ -61,11 +89,21 @@ def test_LTAConvert_inputs(): def test_LTAConvert_outputs(): output_map = dict( - out_fsl=dict(extensions=None), - out_itk=dict(extensions=None), - out_lta=dict(extensions=None), - out_mni=dict(extensions=None), - out_reg=dict(extensions=None), + out_fsl=dict( + extensions=None, + ), + out_itk=dict( + extensions=None, + ), + out_lta=dict( + extensions=None, + ), + out_mni=dict( + extensions=None, + ), + out_reg=dict( + extensions=None, + ), ) outputs = LTAConvert.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py index 3400725272..7e1caf88cc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py @@ -4,18 +4,46 @@ def test_Label2Annot_inputs(): input_map = dict( - args=dict(argstr="%s"), - color_table=dict(argstr="--ctab %s", extensions=None), + args=dict( + argstr="%s", + ), + color_table=dict( + argstr="--ctab %s", + extensions=None, + ), copy_inputs=dict(), - environ=dict(nohash=True, usedefault=True), - hemisphere=dict(argstr="--hemi %s", mandatory=True), - in_labels=dict(argstr="--l %s...", mandatory=True), - keep_max=dict(argstr="--maxstatwinner"), - orig=dict(extensions=None, mandatory=True), - out_annot=dict(argstr="--a %s", mandatory=True), - subject_id=dict(argstr="--s %s", mandatory=True, usedefault=True), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr="--hemi %s", + mandatory=True, + ), + in_labels=dict( + argstr="--l %s...", + mandatory=True, + ), + keep_max=dict( + argstr="--maxstatwinner", + ), + orig=dict( + extensions=None, + mandatory=True, + ), + out_annot=dict( + argstr="--a %s", + mandatory=True, + ), + subject_id=dict( + argstr="--s %s", + mandatory=True, + usedefault=True, + ), subjects_dir=dict(), - verbose_off=dict(argstr="--noverbose"), + verbose_off=dict( + argstr="--noverbose", + ), ) inputs = Label2Annot.input_spec() @@ -25,7 +53,11 @@ def test_Label2Annot_inputs(): def test_Label2Annot_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Label2Annot.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py index 80c62567c5..34f99e1a24 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py @@ -4,10 +4,18 @@ def test_Label2Label_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), copy_inputs=dict(), - environ=dict(nohash=True, usedefault=True), - hemisphere=dict(argstr="--hemi %s", mandatory=True), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr="--hemi %s", + mandatory=True, + ), out_file=dict( argstr="--trglabel %s", extensions=None, @@ -16,15 +24,41 @@ def test_Label2Label_inputs(): name_source=["source_label"], name_template="%s_converted", ), - registration_method=dict(argstr="--regmethod %s", usedefault=True), - source_label=dict(argstr="--srclabel %s", extensions=None, mandatory=True), - source_sphere_reg=dict(extensions=None, mandatory=True), - source_subject=dict(argstr="--srcsubject %s", mandatory=True), - source_white=dict(extensions=None, mandatory=True), - sphere_reg=dict(extensions=None, mandatory=True), - subject_id=dict(argstr="--trgsubject %s", mandatory=True, usedefault=True), + registration_method=dict( + argstr="--regmethod %s", + usedefault=True, + ), + source_label=dict( + argstr="--srclabel %s", + extensions=None, + mandatory=True, + ), + source_sphere_reg=dict( + extensions=None, + mandatory=True, + ), + source_subject=dict( + argstr="--srcsubject %s", + mandatory=True, + ), + source_white=dict( + extensions=None, + mandatory=True, + ), + sphere_reg=dict( + extensions=None, + mandatory=True, + ), + subject_id=dict( + argstr="--trgsubject %s", + mandatory=True, + usedefault=True, + ), subjects_dir=dict(), - white=dict(extensions=None, mandatory=True), + white=dict( + extensions=None, + mandatory=True, + ), ) inputs = Label2Label.input_spec() @@ -34,7 +68,11 @@ def test_Label2Label_inputs(): def test_Label2Label_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Label2Label.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py index 5722e24ab6..aa1b19f564 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py @@ -17,23 +17,50 @@ def test_Label2Vol_inputs(): mandatory=True, xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), ), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fill_thresh=dict(argstr="--fillthresh %g"), - hemi=dict(argstr="--hemi %s"), - identity=dict(argstr="--identity", xor=("reg_file", "reg_header", "identity")), - invert_mtx=dict(argstr="--invertmtx"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill_thresh=dict( + argstr="--fillthresh %g", + ), + hemi=dict( + argstr="--hemi %s", + ), + identity=dict( + argstr="--identity", + xor=("reg_file", "reg_header", "identity"), + ), + invert_mtx=dict( + argstr="--invertmtx", + ), label_file=dict( argstr="--label %s...", copyfile=False, mandatory=True, xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), ), - label_hit_file=dict(argstr="--hits %s", extensions=None), - label_voxel_volume=dict(argstr="--labvoxvol %f"), - map_label_stat=dict(argstr="--label-stat %s", extensions=None), - native_vox2ras=dict(argstr="--native-vox2ras"), - proj=dict(argstr="--proj %s %f %f %f", requires=("subject_id", "hemi")), + label_hit_file=dict( + argstr="--hits %s", + extensions=None, + ), + label_voxel_volume=dict( + argstr="--labvoxvol %f", + ), + map_label_stat=dict( + argstr="--label-stat %s", + extensions=None, + ), + native_vox2ras=dict( + argstr="--native-vox2ras", + ), + proj=dict( + argstr="--proj %s %f %f %f", + requires=("subject_id", "hemi"), + ), reg_file=dict( argstr="--reg %s", extensions=None, @@ -51,11 +78,23 @@ def test_Label2Vol_inputs(): mandatory=True, xor=("label_file", "annot_file", "seg_file", "aparc_aseg"), ), - subject_id=dict(argstr="--subject %s"), + subject_id=dict( + argstr="--subject %s", + ), subjects_dir=dict(), - surface=dict(argstr="--surf %s"), - template_file=dict(argstr="--temp %s", extensions=None, mandatory=True), - vol_label_file=dict(argstr="--o %s", extensions=None, genfile=True), + surface=dict( + argstr="--surf %s", + ), + template_file=dict( + argstr="--temp %s", + extensions=None, + mandatory=True, + ), + vol_label_file=dict( + argstr="--o %s", + extensions=None, + genfile=True, + ), ) inputs = Label2Vol.input_spec() @@ -65,7 +104,11 @@ def test_Label2Vol_inputs(): def test_Label2Vol_outputs(): - output_map = dict(vol_label_file=dict(extensions=None)) + output_map = dict( + vol_label_file=dict( + extensions=None, + ), + ) outputs = Label2Vol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py index f9a86b5b67..3b3c2f0852 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py @@ -4,13 +4,32 @@ def test_MNIBiasCorrection_inputs(): input_map = dict( - args=dict(argstr="%s"), - distance=dict(argstr="--distance %d"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="--i %s", extensions=None, mandatory=True), - iterations=dict(argstr="--n %d", usedefault=True), - mask=dict(argstr="--mask %s", extensions=None), - no_rescale=dict(argstr="--no-rescale"), + args=dict( + argstr="%s", + ), + distance=dict( + argstr="--distance %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="--i %s", + extensions=None, + mandatory=True, + ), + iterations=dict( + argstr="--n %d", + usedefault=True, + ), + mask=dict( + argstr="--mask %s", + extensions=None, + ), + no_rescale=dict( + argstr="--no-rescale", + ), out_file=dict( argstr="--o %s", extensions=None, @@ -19,11 +38,20 @@ def test_MNIBiasCorrection_inputs(): name_source=["in_file"], name_template="%s_output", ), - protocol_iterations=dict(argstr="--proto-iters %d"), - shrink=dict(argstr="--shrink %d"), - stop=dict(argstr="--stop %f"), + protocol_iterations=dict( + argstr="--proto-iters %d", + ), + shrink=dict( + argstr="--shrink %d", + ), + stop=dict( + argstr="--stop %f", + ), subjects_dir=dict(), - transform=dict(argstr="--uchar %s", extensions=None), + transform=dict( + argstr="--uchar %s", + extensions=None, + ), ) inputs = MNIBiasCorrection.input_spec() @@ -33,7 +61,11 @@ def test_MNIBiasCorrection_inputs(): def test_MNIBiasCorrection_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MNIBiasCorrection.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py index b2c62ba590..4f21cc2f61 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py @@ -4,12 +4,27 @@ def test_MPRtoMNI305_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, usedefault=True), - reference_dir=dict(mandatory=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + usedefault=True, + ), + reference_dir=dict( + mandatory=True, + usedefault=True, + ), subjects_dir=dict(), - target=dict(mandatory=True, usedefault=True), + target=dict( + mandatory=True, + usedefault=True, + ), ) inputs = MPRtoMNI305.input_spec() @@ -20,7 +35,13 @@ def test_MPRtoMNI305_inputs(): def test_MPRtoMNI305_outputs(): output_map = dict( - log_file=dict(extensions=None, usedefault=True), out_file=dict(extensions=None) + log_file=dict( + extensions=None, + usedefault=True, + ), + out_file=dict( + extensions=None, + ), ) outputs = MPRtoMNI305.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py index 19fb1874f1..9e229078ef 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py @@ -5,97 +5,276 @@ def test_MRIConvert_inputs(): input_map = dict( apply_inv_transform=dict( - argstr="--apply_inverse_transform %s", extensions=None - ), - apply_transform=dict(argstr="--apply_transform %s", extensions=None), - args=dict(argstr="%s"), - ascii=dict(argstr="--ascii"), - autoalign_matrix=dict(argstr="--autoalign %s", extensions=None), - color_file=dict(argstr="--color_file %s", extensions=None), - conform=dict(argstr="--conform"), - conform_min=dict(argstr="--conform_min"), - conform_size=dict(argstr="--conform_size %s"), - crop_center=dict(argstr="--crop %d %d %d"), - crop_gdf=dict(argstr="--crop_gdf"), - crop_size=dict(argstr="--cropsize %d %d %d"), - cut_ends=dict(argstr="--cutends %d"), - cw256=dict(argstr="--cw256"), - devolve_transform=dict(argstr="--devolvexfm %s"), - drop_n=dict(argstr="--ndrop %d"), - environ=dict(nohash=True, usedefault=True), - fill_parcellation=dict(argstr="--fill_parcellation"), - force_ras=dict(argstr="--force_ras_good"), - frame=dict(argstr="--frame %d"), - frame_subsample=dict(argstr="--fsubsample %d %d %d"), - fwhm=dict(argstr="--fwhm %f"), - in_center=dict(argstr="--in_center %s"), + argstr="--apply_inverse_transform %s", + extensions=None, + ), + apply_transform=dict( + argstr="--apply_transform %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + ascii=dict( + argstr="--ascii", + ), + autoalign_matrix=dict( + argstr="--autoalign %s", + extensions=None, + ), + color_file=dict( + argstr="--color_file %s", + extensions=None, + ), + conform=dict( + argstr="--conform", + ), + conform_min=dict( + argstr="--conform_min", + ), + conform_size=dict( + argstr="--conform_size %s", + ), + crop_center=dict( + argstr="--crop %d %d %d", + ), + crop_gdf=dict( + argstr="--crop_gdf", + ), + crop_size=dict( + argstr="--cropsize %d %d %d", + ), + cut_ends=dict( + argstr="--cutends %d", + ), + cw256=dict( + argstr="--cw256", + ), + devolve_transform=dict( + argstr="--devolvexfm %s", + ), + drop_n=dict( + argstr="--ndrop %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill_parcellation=dict( + argstr="--fill_parcellation", + ), + force_ras=dict( + argstr="--force_ras_good", + ), + frame=dict( + argstr="--frame %d", + ), + frame_subsample=dict( + argstr="--fsubsample %d %d %d", + ), + fwhm=dict( + argstr="--fwhm %f", + ), + in_center=dict( + argstr="--in_center %s", + ), in_file=dict( - argstr="--input_volume %s", extensions=None, mandatory=True, position=-2 - ), - in_i_dir=dict(argstr="--in_i_direction %f %f %f"), - in_i_size=dict(argstr="--in_i_size %d"), - in_info=dict(argstr="--in_info"), - in_j_dir=dict(argstr="--in_j_direction %f %f %f"), - in_j_size=dict(argstr="--in_j_size %d"), - in_k_dir=dict(argstr="--in_k_direction %f %f %f"), - in_k_size=dict(argstr="--in_k_size %d"), - in_like=dict(argstr="--in_like %s", extensions=None), - in_matrix=dict(argstr="--in_matrix"), - in_orientation=dict(argstr="--in_orientation %s"), - in_scale=dict(argstr="--scale %f"), - in_stats=dict(argstr="--in_stats"), - in_type=dict(argstr="--in_type %s"), - invert_contrast=dict(argstr="--invert_contrast %f"), - midframe=dict(argstr="--mid-frame"), - no_change=dict(argstr="--nochange"), - no_scale=dict(argstr="--no_scale 1"), - no_translate=dict(argstr="--no_translate"), - no_write=dict(argstr="--no_write"), - out_center=dict(argstr="--out_center %f %f %f"), - out_datatype=dict(argstr="--out_data_type %s"), + argstr="--input_volume %s", + extensions=None, + mandatory=True, + position=-2, + ), + in_i_dir=dict( + argstr="--in_i_direction %f %f %f", + ), + in_i_size=dict( + argstr="--in_i_size %d", + ), + in_info=dict( + argstr="--in_info", + ), + in_j_dir=dict( + argstr="--in_j_direction %f %f %f", + ), + in_j_size=dict( + argstr="--in_j_size %d", + ), + in_k_dir=dict( + argstr="--in_k_direction %f %f %f", + ), + in_k_size=dict( + argstr="--in_k_size %d", + ), + in_like=dict( + argstr="--in_like %s", + extensions=None, + ), + in_matrix=dict( + argstr="--in_matrix", + ), + in_orientation=dict( + argstr="--in_orientation %s", + ), + in_scale=dict( + argstr="--scale %f", + ), + in_stats=dict( + argstr="--in_stats", + ), + in_type=dict( + argstr="--in_type %s", + ), + invert_contrast=dict( + argstr="--invert_contrast %f", + ), + midframe=dict( + argstr="--mid-frame", + ), + no_change=dict( + argstr="--nochange", + ), + no_scale=dict( + argstr="--no_scale 1", + ), + no_translate=dict( + argstr="--no_translate", + ), + no_write=dict( + argstr="--no_write", + ), + out_center=dict( + argstr="--out_center %f %f %f", + ), + out_datatype=dict( + argstr="--out_data_type %s", + ), out_file=dict( - argstr="--output_volume %s", extensions=None, genfile=True, position=-1 - ), - out_i_count=dict(argstr="--out_i_count %d"), - out_i_dir=dict(argstr="--out_i_direction %f %f %f"), - out_i_size=dict(argstr="--out_i_size %d"), - out_info=dict(argstr="--out_info"), - out_j_count=dict(argstr="--out_j_count %d"), - out_j_dir=dict(argstr="--out_j_direction %f %f %f"), - out_j_size=dict(argstr="--out_j_size %d"), - out_k_count=dict(argstr="--out_k_count %d"), - out_k_dir=dict(argstr="--out_k_direction %f %f %f"), - out_k_size=dict(argstr="--out_k_size %d"), - out_matrix=dict(argstr="--out_matrix"), - out_orientation=dict(argstr="--out_orientation %s"), - out_scale=dict(argstr="--out-scale %d"), - out_stats=dict(argstr="--out_stats"), - out_type=dict(argstr="--out_type %s"), - parse_only=dict(argstr="--parse_only"), - read_only=dict(argstr="--read_only"), - reorder=dict(argstr="--reorder %d %d %d"), - resample_type=dict(argstr="--resample_type %s"), - reslice_like=dict(argstr="--reslice_like %s", extensions=None), - sdcm_list=dict(argstr="--sdcmlist %s", extensions=None), - skip_n=dict(argstr="--nskip %d"), - slice_bias=dict(argstr="--slice-bias %f"), - slice_crop=dict(argstr="--slice-crop %d %d"), - slice_reverse=dict(argstr="--slice-reverse"), - smooth_parcellation=dict(argstr="--smooth_parcellation"), - sphinx=dict(argstr="--sphinx"), - split=dict(argstr="--split"), - status_file=dict(argstr="--status %s", extensions=None), - subject_name=dict(argstr="--subject_name %s"), + argstr="--output_volume %s", + extensions=None, + genfile=True, + position=-1, + ), + out_i_count=dict( + argstr="--out_i_count %d", + ), + out_i_dir=dict( + argstr="--out_i_direction %f %f %f", + ), + out_i_size=dict( + argstr="--out_i_size %d", + ), + out_info=dict( + argstr="--out_info", + ), + out_j_count=dict( + argstr="--out_j_count %d", + ), + out_j_dir=dict( + argstr="--out_j_direction %f %f %f", + ), + out_j_size=dict( + argstr="--out_j_size %d", + ), + out_k_count=dict( + argstr="--out_k_count %d", + ), + out_k_dir=dict( + argstr="--out_k_direction %f %f %f", + ), + out_k_size=dict( + argstr="--out_k_size %d", + ), + out_matrix=dict( + argstr="--out_matrix", + ), + out_orientation=dict( + argstr="--out_orientation %s", + ), + out_scale=dict( + argstr="--out-scale %d", + ), + out_stats=dict( + argstr="--out_stats", + ), + out_type=dict( + argstr="--out_type %s", + ), + parse_only=dict( + argstr="--parse_only", + ), + read_only=dict( + argstr="--read_only", + ), + reorder=dict( + argstr="--reorder %d %d %d", + ), + resample_type=dict( + argstr="--resample_type %s", + ), + reslice_like=dict( + argstr="--reslice_like %s", + extensions=None, + ), + sdcm_list=dict( + argstr="--sdcmlist %s", + extensions=None, + ), + skip_n=dict( + argstr="--nskip %d", + ), + slice_bias=dict( + argstr="--slice-bias %f", + ), + slice_crop=dict( + argstr="--slice-crop %d %d", + ), + slice_reverse=dict( + argstr="--slice-reverse", + ), + smooth_parcellation=dict( + argstr="--smooth_parcellation", + ), + sphinx=dict( + argstr="--sphinx", + ), + split=dict( + argstr="--split", + ), + status_file=dict( + argstr="--status %s", + extensions=None, + ), + subject_name=dict( + argstr="--subject_name %s", + ), subjects_dir=dict(), - te=dict(argstr="-te %d"), - template_info=dict(argstr="--template_info"), - template_type=dict(argstr="--template_type %s"), - ti=dict(argstr="-ti %d"), - tr=dict(argstr="-tr %d"), - unwarp_gradient=dict(argstr="--unwarp_gradient_nonlinearity"), - vox_size=dict(argstr="-voxsize %f %f %f"), - zero_ge_z_offset=dict(argstr="--zero_ge_z_offset"), - zero_outlines=dict(argstr="--zero_outlines"), + te=dict( + argstr="-te %d", + ), + template_info=dict( + argstr="--template_info", + ), + template_type=dict( + argstr="--template_type %s", + ), + ti=dict( + argstr="-ti %d", + ), + tr=dict( + argstr="-tr %d", + ), + unwarp_gradient=dict( + argstr="--unwarp_gradient_nonlinearity", + ), + vox_size=dict( + argstr="-voxsize %f %f %f", + ), + zero_ge_z_offset=dict( + argstr="--zero_ge_z_offset", + ), + zero_outlines=dict( + argstr="--zero_outlines", + ), ) inputs = MRIConvert.input_spec() @@ -105,7 +284,9 @@ def test_MRIConvert_inputs(): def test_MRIConvert_outputs(): - output_map = dict(out_file=dict()) + output_map = dict( + out_file=dict(), + ) outputs = MRIConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py index 72b2fa7913..3d85129f3d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py @@ -4,29 +4,79 @@ def test_MRICoreg_inputs(): input_map = dict( - args=dict(argstr="%s"), - brute_force_limit=dict(argstr="--bf-lim %g", xor=["no_brute_force"]), - brute_force_samples=dict(argstr="--bf-nsamp %d", xor=["no_brute_force"]), - conform_reference=dict(argstr="--conf-ref"), - dof=dict(argstr="--dof %d"), - environ=dict(nohash=True, usedefault=True), - ftol=dict(argstr="--ftol %e"), - initial_rotation=dict(argstr="--rot %g %g %g"), - initial_scale=dict(argstr="--scale %g %g %g"), - initial_shear=dict(argstr="--shear %g %g %g"), - initial_translation=dict(argstr="--trans %g %g %g"), - linmintol=dict(argstr="--linmintol %e"), - max_iters=dict(argstr="--nitersmax %d"), - no_brute_force=dict(argstr="--no-bf"), - no_coord_dithering=dict(argstr="--no-coord-dither"), - no_cras0=dict(argstr="--no-cras0"), - no_intensity_dithering=dict(argstr="--no-intensity-dither"), - no_smooth=dict(argstr="--no-smooth"), - num_threads=dict(argstr="--threads %d"), - out_lta_file=dict(argstr="--lta %s", usedefault=True), - out_params_file=dict(argstr="--params %s"), - out_reg_file=dict(argstr="--regdat %s"), - ref_fwhm=dict(argstr="--ref-fwhm"), + args=dict( + argstr="%s", + ), + brute_force_limit=dict( + argstr="--bf-lim %g", + xor=["no_brute_force"], + ), + brute_force_samples=dict( + argstr="--bf-nsamp %d", + xor=["no_brute_force"], + ), + conform_reference=dict( + argstr="--conf-ref", + ), + dof=dict( + argstr="--dof %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ftol=dict( + argstr="--ftol %e", + ), + initial_rotation=dict( + argstr="--rot %g %g %g", + ), + initial_scale=dict( + argstr="--scale %g %g %g", + ), + initial_shear=dict( + argstr="--shear %g %g %g", + ), + initial_translation=dict( + argstr="--trans %g %g %g", + ), + linmintol=dict( + argstr="--linmintol %e", + ), + max_iters=dict( + argstr="--nitersmax %d", + ), + no_brute_force=dict( + argstr="--no-bf", + ), + no_coord_dithering=dict( + argstr="--no-coord-dither", + ), + no_cras0=dict( + argstr="--no-cras0", + ), + no_intensity_dithering=dict( + argstr="--no-intensity-dither", + ), + no_smooth=dict( + argstr="--no-smooth", + ), + num_threads=dict( + argstr="--threads %d", + ), + out_lta_file=dict( + argstr="--lta %s", + usedefault=True, + ), + out_params_file=dict( + argstr="--params %s", + ), + out_reg_file=dict( + argstr="--regdat %s", + ), + ref_fwhm=dict( + argstr="--ref-fwhm", + ), reference_file=dict( argstr="--ref %s", copyfile=False, @@ -34,14 +84,28 @@ def test_MRICoreg_inputs(): mandatory=True, xor=["subject_id"], ), - reference_mask=dict(argstr="--ref-mask %s", position=2), - saturation_threshold=dict(argstr="--sat %g"), - sep=dict(argstr="--sep %s..."), + reference_mask=dict( + argstr="--ref-mask %s", + position=2, + ), + saturation_threshold=dict( + argstr="--sat %g", + ), + sep=dict( + argstr="--sep %s...", + ), source_file=dict( - argstr="--mov %s", copyfile=False, extensions=None, mandatory=True + argstr="--mov %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + source_mask=dict( + argstr="--mov-mask", + ), + source_oob=dict( + argstr="--mov-oob", ), - source_mask=dict(argstr="--mov-mask"), - source_oob=dict(argstr="--mov-oob"), subject_id=dict( argstr="--s %s", mandatory=True, @@ -49,7 +113,9 @@ def test_MRICoreg_inputs(): requires=["subjects_dir"], xor=["reference_file"], ), - subjects_dir=dict(argstr="--sd %s"), + subjects_dir=dict( + argstr="--sd %s", + ), ) inputs = MRICoreg.input_spec() @@ -60,9 +126,15 @@ def test_MRICoreg_inputs(): def test_MRICoreg_outputs(): output_map = dict( - out_lta_file=dict(extensions=None), - out_params_file=dict(extensions=None), - out_reg_file=dict(extensions=None), + out_lta_file=dict( + extensions=None, + ), + out_params_file=dict( + extensions=None, + ), + out_reg_file=dict( + extensions=None, + ), ) outputs = MRICoreg.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py index c07c843181..bf359364ba 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py @@ -4,14 +4,38 @@ def test_MRIFill_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - log_file=dict(argstr="-a %s", extensions=None), - out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), - segmentation=dict(argstr="-segmentation %s", extensions=None), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + log_file=dict( + argstr="-a %s", + extensions=None, + ), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + segmentation=dict( + argstr="-segmentation %s", + extensions=None, + ), subjects_dir=dict(), - transform=dict(argstr="-xform %s", extensions=None), + transform=dict( + argstr="-xform %s", + extensions=None, + ), ) inputs = MRIFill.input_spec() @@ -21,7 +45,14 @@ def test_MRIFill_inputs(): def test_MRIFill_outputs(): - output_map = dict(log_file=dict(extensions=None), out_file=dict(extensions=None)) + output_map = dict( + log_file=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + ) outputs = MRIFill.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py index 0233931794..ccb2ab4388 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py @@ -4,12 +4,35 @@ def test_MRIMarchingCubes_inputs(): input_map = dict( - args=dict(argstr="%s"), - connectivity_value=dict(argstr="%d", position=-1, usedefault=True), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), - label_value=dict(argstr="%d", mandatory=True, position=2), - out_file=dict(argstr="./%s", extensions=None, genfile=True, position=-2), + args=dict( + argstr="%s", + ), + connectivity_value=dict( + argstr="%d", + position=-1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + label_value=dict( + argstr="%d", + mandatory=True, + position=2, + ), + out_file=dict( + argstr="./%s", + extensions=None, + genfile=True, + position=-2, + ), subjects_dir=dict(), ) inputs = MRIMarchingCubes.input_spec() @@ -20,7 +43,11 @@ def test_MRIMarchingCubes_inputs(): def test_MRIMarchingCubes_outputs(): - output_map = dict(surface=dict(extensions=None)) + output_map = dict( + surface=dict( + extensions=None, + ), + ) outputs = MRIMarchingCubes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py index 2ebbbc120d..e6a239fbd5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py @@ -4,13 +4,37 @@ def test_MRIPretess_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_filled=dict(argstr="%s", extensions=None, mandatory=True, position=-4), - in_norm=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - keep=dict(argstr="-keep"), - label=dict(argstr="%s", mandatory=True, position=-3, usedefault=True), - nocorners=dict(argstr="-nocorners"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_filled=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), + in_norm=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + keep=dict( + argstr="-keep", + ), + label=dict( + argstr="%s", + mandatory=True, + position=-3, + usedefault=True, + ), + nocorners=dict( + argstr="-nocorners", + ), out_file=dict( argstr="%s", extensions=None, @@ -20,7 +44,9 @@ def test_MRIPretess_inputs(): position=-1, ), subjects_dir=dict(), - test=dict(argstr="-test"), + test=dict( + argstr="-test", + ), ) inputs = MRIPretess.input_spec() @@ -30,7 +56,11 @@ def test_MRIPretess_inputs(): def test_MRIPretess_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRIPretess.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py index d672071076..845e6c6c3c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py @@ -4,43 +4,84 @@ def test_MRISPreproc_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fsgd_file=dict( argstr="--fsgd %s", extensions=None, xor=("subjects", "fsgd_file", "subject_file"), ), - fwhm=dict(argstr="--fwhm %f", xor=["num_iters"]), - fwhm_source=dict(argstr="--fwhm-src %f", xor=["num_iters_source"]), - hemi=dict(argstr="--hemi %s", mandatory=True), - num_iters=dict(argstr="--niters %d", xor=["fwhm"]), - num_iters_source=dict(argstr="--niterssrc %d", xor=["fwhm_source"]), - out_file=dict(argstr="--out %s", extensions=None, genfile=True), - proj_frac=dict(argstr="--projfrac %s"), - smooth_cortex_only=dict(argstr="--smooth-cortex-only"), - source_format=dict(argstr="--srcfmt %s"), + fwhm=dict( + argstr="--fwhm %f", + xor=["num_iters"], + ), + fwhm_source=dict( + argstr="--fwhm-src %f", + xor=["num_iters_source"], + ), + hemi=dict( + argstr="--hemi %s", + mandatory=True, + ), + num_iters=dict( + argstr="--niters %d", + xor=["fwhm"], + ), + num_iters_source=dict( + argstr="--niterssrc %d", + xor=["fwhm_source"], + ), + out_file=dict( + argstr="--out %s", + extensions=None, + genfile=True, + ), + proj_frac=dict( + argstr="--projfrac %s", + ), + smooth_cortex_only=dict( + argstr="--smooth-cortex-only", + ), + source_format=dict( + argstr="--srcfmt %s", + ), subject_file=dict( argstr="--f %s", extensions=None, xor=("subjects", "fsgd_file", "subject_file"), ), subjects=dict( - argstr="--s %s...", xor=("subjects", "fsgd_file", "subject_file") + argstr="--s %s...", + xor=("subjects", "fsgd_file", "subject_file"), ), subjects_dir=dict(), surf_area=dict( - argstr="--area %s", xor=("surf_measure", "surf_measure_file", "surf_area") + argstr="--area %s", + xor=("surf_measure", "surf_measure_file", "surf_area"), + ), + surf_dir=dict( + argstr="--surfdir %s", ), - surf_dir=dict(argstr="--surfdir %s"), surf_measure=dict( - argstr="--meas %s", xor=("surf_measure", "surf_measure_file", "surf_area") + argstr="--meas %s", + xor=("surf_measure", "surf_measure_file", "surf_area"), ), surf_measure_file=dict( - argstr="--is %s...", xor=("surf_measure", "surf_measure_file", "surf_area") + argstr="--is %s...", + xor=("surf_measure", "surf_measure_file", "surf_area"), + ), + target=dict( + argstr="--target %s", + mandatory=True, + ), + vol_measure_file=dict( + argstr="--iv %s %s...", ), - target=dict(argstr="--target %s", mandatory=True), - vol_measure_file=dict(argstr="--iv %s %s..."), ) inputs = MRISPreproc.input_spec() @@ -50,7 +91,11 @@ def test_MRISPreproc_inputs(): def test_MRISPreproc_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRISPreproc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py index d3778f299d..5bdb0614e5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py @@ -4,25 +4,61 @@ def test_MRISPreprocReconAll_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), copy_inputs=dict(), - environ=dict(nohash=True, usedefault=True), + environ=dict( + nohash=True, + usedefault=True, + ), fsgd_file=dict( argstr="--fsgd %s", extensions=None, xor=("subjects", "fsgd_file", "subject_file"), ), - fwhm=dict(argstr="--fwhm %f", xor=["num_iters"]), - fwhm_source=dict(argstr="--fwhm-src %f", xor=["num_iters_source"]), - hemi=dict(argstr="--hemi %s", mandatory=True), - lh_surfreg_target=dict(extensions=None, requires=["surfreg_files"]), - num_iters=dict(argstr="--niters %d", xor=["fwhm"]), - num_iters_source=dict(argstr="--niterssrc %d", xor=["fwhm_source"]), - out_file=dict(argstr="--out %s", extensions=None, genfile=True), - proj_frac=dict(argstr="--projfrac %s"), - rh_surfreg_target=dict(extensions=None, requires=["surfreg_files"]), - smooth_cortex_only=dict(argstr="--smooth-cortex-only"), - source_format=dict(argstr="--srcfmt %s"), + fwhm=dict( + argstr="--fwhm %f", + xor=["num_iters"], + ), + fwhm_source=dict( + argstr="--fwhm-src %f", + xor=["num_iters_source"], + ), + hemi=dict( + argstr="--hemi %s", + mandatory=True, + ), + lh_surfreg_target=dict( + extensions=None, + requires=["surfreg_files"], + ), + num_iters=dict( + argstr="--niters %d", + xor=["fwhm"], + ), + num_iters_source=dict( + argstr="--niterssrc %d", + xor=["fwhm_source"], + ), + out_file=dict( + argstr="--out %s", + extensions=None, + genfile=True, + ), + proj_frac=dict( + argstr="--projfrac %s", + ), + rh_surfreg_target=dict( + extensions=None, + requires=["surfreg_files"], + ), + smooth_cortex_only=dict( + argstr="--smooth-cortex-only", + ), + source_format=dict( + argstr="--srcfmt %s", + ), subject_file=dict( argstr="--f %s", extensions=None, @@ -34,15 +70,20 @@ def test_MRISPreprocReconAll_inputs(): xor=("subjects", "fsgd_file", "subject_file", "subject_id"), ), subjects=dict( - argstr="--s %s...", xor=("subjects", "fsgd_file", "subject_file") + argstr="--s %s...", + xor=("subjects", "fsgd_file", "subject_file"), ), subjects_dir=dict(), surf_area=dict( - argstr="--area %s", xor=("surf_measure", "surf_measure_file", "surf_area") + argstr="--area %s", + xor=("surf_measure", "surf_measure_file", "surf_area"), + ), + surf_dir=dict( + argstr="--surfdir %s", ), - surf_dir=dict(argstr="--surfdir %s"), surf_measure=dict( - argstr="--meas %s", xor=("surf_measure", "surf_measure_file", "surf_area") + argstr="--meas %s", + xor=("surf_measure", "surf_measure_file", "surf_area"), ), surf_measure_file=dict( argstr="--meas %s", @@ -50,10 +91,16 @@ def test_MRISPreprocReconAll_inputs(): xor=("surf_measure", "surf_measure_file", "surf_area"), ), surfreg_files=dict( - argstr="--surfreg %s", requires=["lh_surfreg_target", "rh_surfreg_target"] + argstr="--surfreg %s", + requires=["lh_surfreg_target", "rh_surfreg_target"], + ), + target=dict( + argstr="--target %s", + mandatory=True, + ), + vol_measure_file=dict( + argstr="--iv %s %s...", ), - target=dict(argstr="--target %s", mandatory=True), - vol_measure_file=dict(argstr="--iv %s %s..."), ) inputs = MRISPreprocReconAll.input_spec() @@ -63,7 +110,11 @@ def test_MRISPreprocReconAll_inputs(): def test_MRISPreprocReconAll_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRISPreprocReconAll.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py index 0101492c42..8aa7210d0e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py @@ -4,14 +4,37 @@ def test_MRITessellate_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - label_value=dict(argstr="%d", mandatory=True, position=-2), - out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + label_value=dict( + argstr="%d", + mandatory=True, + position=-2, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), subjects_dir=dict(), - tesselate_all_voxels=dict(argstr="-a"), - use_real_RAS_coordinates=dict(argstr="-n"), + tesselate_all_voxels=dict( + argstr="-a", + ), + use_real_RAS_coordinates=dict( + argstr="-n", + ), ) inputs = MRITessellate.input_spec() @@ -21,7 +44,11 @@ def test_MRITessellate_inputs(): def test_MRITessellate_outputs(): - output_map = dict(surface=dict(extensions=None)) + output_map = dict( + surface=dict( + extensions=None, + ), + ) outputs = MRITessellate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py index 906aea741e..e37cf0723a 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py @@ -4,15 +4,43 @@ def test_MRIsCALabel_inputs(): input_map = dict( - args=dict(argstr="%s"), - aseg=dict(argstr="-aseg %s", extensions=None), - canonsurf=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - classifier=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + args=dict( + argstr="%s", + ), + aseg=dict( + argstr="-aseg %s", + extensions=None, + ), + canonsurf=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + classifier=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), copy_inputs=dict(), - curv=dict(extensions=None, mandatory=True), - environ=dict(nohash=True, usedefault=True), - hemisphere=dict(argstr="%s", mandatory=True, position=-4), - label=dict(argstr="-l %s", extensions=None), + curv=dict( + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr="%s", + mandatory=True, + position=-4, + ), + label=dict( + argstr="-l %s", + extensions=None, + ), num_threads=dict(), out_file=dict( argstr="%s", @@ -23,11 +51,24 @@ def test_MRIsCALabel_inputs(): name_template="%s.aparc.annot", position=-1, ), - seed=dict(argstr="-seed %d"), - smoothwm=dict(extensions=None, mandatory=True), - subject_id=dict(argstr="%s", mandatory=True, position=-5, usedefault=True), + seed=dict( + argstr="-seed %d", + ), + smoothwm=dict( + extensions=None, + mandatory=True, + ), + subject_id=dict( + argstr="%s", + mandatory=True, + position=-5, + usedefault=True, + ), subjects_dir=dict(), - sulc=dict(extensions=None, mandatory=True), + sulc=dict( + extensions=None, + mandatory=True, + ), ) inputs = MRIsCALabel.input_spec() @@ -37,7 +78,11 @@ def test_MRIsCALabel_inputs(): def test_MRIsCALabel_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRIsCALabel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py index 6d0d91d620..1ef9c95c46 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py @@ -4,16 +4,45 @@ def test_MRIsCalc_inputs(): input_map = dict( - action=dict(argstr="%s", mandatory=True, position=-2), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file1=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + action=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file1=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), in_file2=dict( - argstr="%s", extensions=None, position=-1, xor=["in_float", "in_int"] + argstr="%s", + extensions=None, + position=-1, + xor=["in_float", "in_int"], + ), + in_float=dict( + argstr="%f", + position=-1, + xor=["in_file2", "in_int"], + ), + in_int=dict( + argstr="%d", + position=-1, + xor=["in_file2", "in_float"], + ), + out_file=dict( + argstr="-o %s", + extensions=None, + mandatory=True, ), - in_float=dict(argstr="%f", position=-1, xor=["in_file2", "in_int"]), - in_int=dict(argstr="%d", position=-1, xor=["in_file2", "in_float"]), - out_file=dict(argstr="-o %s", extensions=None, mandatory=True), subjects_dir=dict(), ) inputs = MRIsCalc.input_spec() @@ -24,7 +53,11 @@ def test_MRIsCalc_inputs(): def test_MRIsCalc_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRIsCalc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py index 3b901a1a2f..01aef41a01 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py @@ -4,11 +4,24 @@ def test_MRIsCombine_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_files=dict(argstr="--combinesurfs %s", mandatory=True, position=1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="--combinesurfs %s", + mandatory=True, + position=1, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, mandatory=True, position=-1 + argstr="%s", + extensions=None, + genfile=True, + mandatory=True, + position=-1, ), subjects_dir=dict(), ) @@ -20,7 +33,11 @@ def test_MRIsCombine_inputs(): def test_MRIsCombine_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRIsCombine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py index 98c45ae030..daf4462ff8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py @@ -4,17 +4,48 @@ def test_MRIsConvert_inputs(): input_map = dict( - annot_file=dict(argstr="--annot %s", extensions=None), - args=dict(argstr="%s"), - dataarray_num=dict(argstr="--da_num %d"), - environ=dict(nohash=True, usedefault=True), - functional_file=dict(argstr="-f %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - label_file=dict(argstr="--label %s", extensions=None), - labelstats_outfile=dict(argstr="--labelstats %s", extensions=None), - normal=dict(argstr="-n"), - origname=dict(argstr="-o %s"), - out_datatype=dict(mandatory=True, xor=["out_file"]), + annot_file=dict( + argstr="--annot %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + dataarray_num=dict( + argstr="--da_num %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + functional_file=dict( + argstr="-f %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + label_file=dict( + argstr="--label %s", + extensions=None, + ), + labelstats_outfile=dict( + argstr="--labelstats %s", + extensions=None, + ), + normal=dict( + argstr="-n", + ), + origname=dict( + argstr="-o %s", + ), + out_datatype=dict( + mandatory=True, + xor=["out_file"], + ), out_file=dict( argstr="%s", extensions=None, @@ -23,17 +54,39 @@ def test_MRIsConvert_inputs(): position=-1, xor=["out_datatype"], ), - parcstats_file=dict(argstr="--parcstats %s", extensions=None), - patch=dict(argstr="-p"), - rescale=dict(argstr="-r"), - scalarcurv_file=dict(argstr="-c %s", extensions=None), - scale=dict(argstr="-s %.3f"), + parcstats_file=dict( + argstr="--parcstats %s", + extensions=None, + ), + patch=dict( + argstr="-p", + ), + rescale=dict( + argstr="-r", + ), + scalarcurv_file=dict( + argstr="-c %s", + extensions=None, + ), + scale=dict( + argstr="-s %.3f", + ), subjects_dir=dict(), - talairachxfm_subjid=dict(argstr="-t %s"), - to_scanner=dict(argstr="--to-scanner"), - to_tkr=dict(argstr="--to-tkr"), - vertex=dict(argstr="-v"), - xyz_ascii=dict(argstr="-a"), + talairachxfm_subjid=dict( + argstr="-t %s", + ), + to_scanner=dict( + argstr="--to-scanner", + ), + to_tkr=dict( + argstr="--to-tkr", + ), + vertex=dict( + argstr="-v", + ), + xyz_ascii=dict( + argstr="-a", + ), ) inputs = MRIsConvert.input_spec() @@ -43,7 +96,11 @@ def test_MRIsConvert_inputs(): def test_MRIsConvert_outputs(): - output_map = dict(converted=dict(extensions=None)) + output_map = dict( + converted=dict( + extensions=None, + ), + ) outputs = MRIsConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py index fff96b681f..05e34a29b5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py @@ -4,23 +4,61 @@ def test_MRIsExpand_inputs(): input_map = dict( - args=dict(argstr="%s"), - distance=dict(argstr="%g", mandatory=True, position=-2), - dt=dict(argstr="-T %g"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + distance=dict( + argstr="%g", + mandatory=True, + position=-2, + ), + dt=dict( + argstr="-T %g", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=-3 - ), - nsurfaces=dict(argstr="-N %d"), - out_name=dict(argstr="%s", position=-1, usedefault=True), - pial=dict(argstr="-pial %s", copyfile=False), - smooth_averages=dict(argstr="-A %d"), - sphere=dict(copyfile=False, usedefault=True), - spring=dict(argstr="-S %g"), + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=-3, + ), + nsurfaces=dict( + argstr="-N %d", + ), + out_name=dict( + argstr="%s", + position=-1, + usedefault=True, + ), + pial=dict( + argstr="-pial %s", + copyfile=False, + ), + smooth_averages=dict( + argstr="-A %d", + ), + sphere=dict( + copyfile=False, + usedefault=True, + ), + spring=dict( + argstr="-S %g", + ), subjects_dir=dict(), - thickness=dict(argstr="-thickness"), - thickness_name=dict(argstr="-thickness_name %s", copyfile=False), - write_iterations=dict(argstr="-W %d"), + thickness=dict( + argstr="-thickness", + ), + thickness_name=dict( + argstr="-thickness_name %s", + copyfile=False, + ), + write_iterations=dict( + argstr="-W %d", + ), ) inputs = MRIsExpand.input_spec() @@ -30,7 +68,11 @@ def test_MRIsExpand_inputs(): def test_MRIsExpand_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRIsExpand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py index 2ef9a9043f..9cc45189a0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py @@ -4,12 +4,24 @@ def test_MRIsInflate_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2 + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-2, + ), + no_save_sulc=dict( + argstr="-no-save-sulc", + xor=["out_sulc"], ), - no_save_sulc=dict(argstr="-no-save-sulc", xor=["out_sulc"]), out_file=dict( argstr="%s", extensions=None, @@ -19,7 +31,10 @@ def test_MRIsInflate_inputs(): name_template="%s.inflated", position=-1, ), - out_sulc=dict(extensions=None, xor=["no_save_sulc"]), + out_sulc=dict( + extensions=None, + xor=["no_save_sulc"], + ), subjects_dir=dict(), ) inputs = MRIsInflate.input_spec() @@ -30,7 +45,14 @@ def test_MRIsInflate_inputs(): def test_MRIsInflate_outputs(): - output_map = dict(out_file=dict(extensions=None), out_sulc=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + out_sulc=dict( + extensions=None, + ), + ) outputs = MRIsInflate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py index fa6b161543..093dd3d9b8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py @@ -4,18 +4,52 @@ def test_MS_LDA_inputs(): input_map = dict( - args=dict(argstr="%s"), - conform=dict(argstr="-conform"), - environ=dict(nohash=True, usedefault=True), - images=dict(argstr="%s", copyfile=False, mandatory=True, position=-1), - label_file=dict(argstr="-label %s", extensions=None), - lda_labels=dict(argstr="-lda %s", mandatory=True, sep=" "), - mask_file=dict(argstr="-mask %s", extensions=None), - shift=dict(argstr="-shift %d"), + args=dict( + argstr="%s", + ), + conform=dict( + argstr="-conform", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + images=dict( + argstr="%s", + copyfile=False, + mandatory=True, + position=-1, + ), + label_file=dict( + argstr="-label %s", + extensions=None, + ), + lda_labels=dict( + argstr="-lda %s", + mandatory=True, + sep=" ", + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + shift=dict( + argstr="-shift %d", + ), subjects_dir=dict(), - use_weights=dict(argstr="-W"), - vol_synth_file=dict(argstr="-synth %s", extensions=None, mandatory=True), - weight_file=dict(argstr="-weight %s", extensions=None, mandatory=True), + use_weights=dict( + argstr="-W", + ), + vol_synth_file=dict( + argstr="-synth %s", + extensions=None, + mandatory=True, + ), + weight_file=dict( + argstr="-weight %s", + extensions=None, + mandatory=True, + ), ) inputs = MS_LDA.input_spec() @@ -26,7 +60,12 @@ def test_MS_LDA_inputs(): def test_MS_LDA_outputs(): output_map = dict( - vol_synth_file=dict(extensions=None), weight_file=dict(extensions=None) + vol_synth_file=dict( + extensions=None, + ), + weight_file=dict( + extensions=None, + ), ) outputs = MS_LDA.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py index bf21f14842..e3778911e6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py @@ -4,11 +4,24 @@ def test_MakeAverageSubject_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - out_name=dict(argstr="--out %s", extensions=None, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + out_name=dict( + argstr="--out %s", + extensions=None, + usedefault=True, + ), subjects_dir=dict(), - subjects_ids=dict(argstr="--subjects %s", mandatory=True, sep=" "), + subjects_ids=dict( + argstr="--subjects %s", + mandatory=True, + sep=" ", + ), ) inputs = MakeAverageSubject.input_spec() @@ -18,7 +31,9 @@ def test_MakeAverageSubject_inputs(): def test_MakeAverageSubject_outputs(): - output_map = dict(average_subject_name=dict()) + output_map = dict( + average_subject_name=dict(), + ) outputs = MakeAverageSubject.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py index 8926586954..06316d071a 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py @@ -4,29 +4,88 @@ def test_MakeSurfaces_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), copy_inputs=dict(), - environ=dict(nohash=True, usedefault=True), - fix_mtl=dict(argstr="-fix_mtl"), - hemisphere=dict(argstr="%s", mandatory=True, position=-1), - in_T1=dict(argstr="-T1 %s", extensions=None), - in_aseg=dict(argstr="-aseg %s", extensions=None), - in_filled=dict(extensions=None, mandatory=True), - in_label=dict(extensions=None, xor=["noaparc"]), - in_orig=dict(argstr="-orig %s", extensions=None, mandatory=True), - in_white=dict(extensions=None), - in_wm=dict(extensions=None, mandatory=True), - longitudinal=dict(argstr="-long"), - maximum=dict(argstr="-max %.1f"), - mgz=dict(argstr="-mgz"), - no_white=dict(argstr="-nowhite"), - noaparc=dict(argstr="-noaparc", xor=["in_label"]), - orig_pial=dict(argstr="-orig_pial %s", extensions=None, requires=["in_label"]), - orig_white=dict(argstr="-orig_white %s", extensions=None), - subject_id=dict(argstr="%s", mandatory=True, position=-2, usedefault=True), + environ=dict( + nohash=True, + usedefault=True, + ), + fix_mtl=dict( + argstr="-fix_mtl", + ), + hemisphere=dict( + argstr="%s", + mandatory=True, + position=-1, + ), + in_T1=dict( + argstr="-T1 %s", + extensions=None, + ), + in_aseg=dict( + argstr="-aseg %s", + extensions=None, + ), + in_filled=dict( + extensions=None, + mandatory=True, + ), + in_label=dict( + extensions=None, + xor=["noaparc"], + ), + in_orig=dict( + argstr="-orig %s", + extensions=None, + mandatory=True, + ), + in_white=dict( + extensions=None, + ), + in_wm=dict( + extensions=None, + mandatory=True, + ), + longitudinal=dict( + argstr="-long", + ), + maximum=dict( + argstr="-max %.1f", + ), + mgz=dict( + argstr="-mgz", + ), + no_white=dict( + argstr="-nowhite", + ), + noaparc=dict( + argstr="-noaparc", + xor=["in_label"], + ), + orig_pial=dict( + argstr="-orig_pial %s", + extensions=None, + requires=["in_label"], + ), + orig_white=dict( + argstr="-orig_white %s", + extensions=None, + ), + subject_id=dict( + argstr="%s", + mandatory=True, + position=-2, + usedefault=True, + ), subjects_dir=dict(), - white=dict(argstr="-white %s"), - white_only=dict(argstr="-whiteonly"), + white=dict( + argstr="-white %s", + ), + white_only=dict( + argstr="-whiteonly", + ), ) inputs = MakeSurfaces.input_spec() @@ -37,12 +96,24 @@ def test_MakeSurfaces_inputs(): def test_MakeSurfaces_outputs(): output_map = dict( - out_area=dict(extensions=None), - out_cortex=dict(extensions=None), - out_curv=dict(extensions=None), - out_pial=dict(extensions=None), - out_thickness=dict(extensions=None), - out_white=dict(extensions=None), + out_area=dict( + extensions=None, + ), + out_cortex=dict( + extensions=None, + ), + out_curv=dict( + extensions=None, + ), + out_pial=dict( + extensions=None, + ), + out_thickness=dict( + extensions=None, + ), + out_white=dict( + extensions=None, + ), ) outputs = MakeSurfaces.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py index a50beab155..271f0bb328 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py @@ -4,11 +4,26 @@ def test_Normalize_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - gradient=dict(argstr="-g %d"), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - mask=dict(argstr="-mask %s", extensions=None), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gradient=dict( + argstr="-g %d", + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), out_file=dict( argstr="%s", extensions=None, @@ -18,9 +33,14 @@ def test_Normalize_inputs(): name_template="%s_norm", position=-1, ), - segmentation=dict(argstr="-aseg %s", extensions=None), + segmentation=dict( + argstr="-aseg %s", + extensions=None, + ), subjects_dir=dict(), - transform=dict(extensions=None), + transform=dict( + extensions=None, + ), ) inputs = Normalize.input_spec() @@ -30,7 +50,11 @@ def test_Normalize_inputs(): def test_Normalize_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Normalize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py index b27728f6c0..533c0a17a9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py @@ -4,68 +4,192 @@ def test_OneSampleTTest_inputs(): input_map = dict( - allow_ill_cond=dict(argstr="--illcond"), - allow_repeated_subjects=dict(argstr="--allowsubjrep"), - args=dict(argstr="%s"), - calc_AR1=dict(argstr="--tar1"), - check_opts=dict(argstr="--checkopts"), - compute_log_y=dict(argstr="--logy"), - contrast=dict(argstr="--C %s..."), - cortex=dict(argstr="--cortex", xor=["label_file"]), - debug=dict(argstr="--debug"), + allow_ill_cond=dict( + argstr="--illcond", + ), + allow_repeated_subjects=dict( + argstr="--allowsubjrep", + ), + args=dict( + argstr="%s", + ), + calc_AR1=dict( + argstr="--tar1", + ), + check_opts=dict( + argstr="--checkopts", + ), + compute_log_y=dict( + argstr="--logy", + ), + contrast=dict( + argstr="--C %s...", + ), + cortex=dict( + argstr="--cortex", + xor=["label_file"], + ), + debug=dict( + argstr="--debug", + ), design=dict( - argstr="--X %s", extensions=None, xor=("fsgd", "design", "one_sample") + argstr="--X %s", + extensions=None, + xor=("fsgd", "design", "one_sample"), + ), + diag=dict( + argstr="--diag %d", + ), + diag_cluster=dict( + argstr="--diag-cluster", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_fx_dof=dict( + argstr="--ffxdof %d", + xor=["fixed_fx_dof_file"], ), - diag=dict(argstr="--diag %d"), - diag_cluster=dict(argstr="--diag-cluster"), - environ=dict(nohash=True, usedefault=True), - fixed_fx_dof=dict(argstr="--ffxdof %d", xor=["fixed_fx_dof_file"]), fixed_fx_dof_file=dict( - argstr="--ffxdofdat %d", extensions=None, xor=["fixed_fx_dof"] + argstr="--ffxdofdat %d", + extensions=None, + xor=["fixed_fx_dof"], + ), + fixed_fx_var=dict( + argstr="--yffxvar %s", + extensions=None, + ), + force_perm=dict( + argstr="--perm-force", + ), + fsgd=dict( + argstr="--fsgd %s %s", + xor=("fsgd", "design", "one_sample"), + ), + fwhm=dict( + argstr="--fwhm %f", + ), + glm_dir=dict( + argstr="--glmdir %s", + genfile=True, ), - fixed_fx_var=dict(argstr="--yffxvar %s", extensions=None), - force_perm=dict(argstr="--perm-force"), - fsgd=dict(argstr="--fsgd %s %s", xor=("fsgd", "design", "one_sample")), - fwhm=dict(argstr="--fwhm %f"), - glm_dir=dict(argstr="--glmdir %s", genfile=True), hemi=dict(), - in_file=dict(argstr="--y %s", copyfile=False, extensions=None, mandatory=True), - invert_mask=dict(argstr="--mask-inv"), - label_file=dict(argstr="--label %s", extensions=None, xor=["cortex"]), - mask_file=dict(argstr="--mask %s", extensions=None), - no_contrast_ok=dict(argstr="--no-contrasts-ok"), - no_est_fwhm=dict(argstr="--no-est-fwhm"), - no_mask_smooth=dict(argstr="--no-mask-smooth"), - no_prune=dict(argstr="--no-prune", xor=["prunethresh"]), + in_file=dict( + argstr="--y %s", + copyfile=False, + extensions=None, + mandatory=True, + ), + invert_mask=dict( + argstr="--mask-inv", + ), + label_file=dict( + argstr="--label %s", + extensions=None, + xor=["cortex"], + ), + mask_file=dict( + argstr="--mask %s", + extensions=None, + ), + no_contrast_ok=dict( + argstr="--no-contrasts-ok", + ), + no_est_fwhm=dict( + argstr="--no-est-fwhm", + ), + no_mask_smooth=dict( + argstr="--no-mask-smooth", + ), + no_prune=dict( + argstr="--no-prune", + xor=["prunethresh"], + ), one_sample=dict( - argstr="--osgm", xor=("one_sample", "fsgd", "design", "contrast") - ), - pca=dict(argstr="--pca"), - per_voxel_reg=dict(argstr="--pvr %s..."), - profile=dict(argstr="--profile %d"), - prune=dict(argstr="--prune"), - prune_thresh=dict(argstr="--prune_thr %f", xor=["noprune"]), - resynth_test=dict(argstr="--resynthtest %d"), - save_cond=dict(argstr="--save-cond"), - save_estimate=dict(argstr="--yhat-save"), - save_res_corr_mtx=dict(argstr="--eres-scm"), - save_residual=dict(argstr="--eres-save"), - seed=dict(argstr="--seed %d"), - self_reg=dict(argstr="--selfreg %d %d %d"), - sim_done_file=dict(argstr="--sim-done %s", extensions=None), - sim_sign=dict(argstr="--sim-sign %s"), - simulation=dict(argstr="--sim %s %d %f %s"), + argstr="--osgm", + xor=("one_sample", "fsgd", "design", "contrast"), + ), + pca=dict( + argstr="--pca", + ), + per_voxel_reg=dict( + argstr="--pvr %s...", + ), + profile=dict( + argstr="--profile %d", + ), + prune=dict( + argstr="--prune", + ), + prune_thresh=dict( + argstr="--prune_thr %f", + xor=["noprune"], + ), + resynth_test=dict( + argstr="--resynthtest %d", + ), + save_cond=dict( + argstr="--save-cond", + ), + save_estimate=dict( + argstr="--yhat-save", + ), + save_res_corr_mtx=dict( + argstr="--eres-scm", + ), + save_residual=dict( + argstr="--eres-save", + ), + seed=dict( + argstr="--seed %d", + ), + self_reg=dict( + argstr="--selfreg %d %d %d", + ), + sim_done_file=dict( + argstr="--sim-done %s", + extensions=None, + ), + sim_sign=dict( + argstr="--sim-sign %s", + ), + simulation=dict( + argstr="--sim %s %d %f %s", + ), subject_id=dict(), subjects_dir=dict(), - surf=dict(argstr="--surf %s %s %s", requires=["subject_id", "hemi"]), - surf_geo=dict(usedefault=True), - synth=dict(argstr="--synth"), - uniform=dict(argstr="--uniform %f %f"), - var_fwhm=dict(argstr="--var-fwhm %f"), - vox_dump=dict(argstr="--voxdump %d %d %d"), - weight_file=dict(extensions=None, xor=["weighted_ls"]), - weight_inv=dict(argstr="--w-inv", xor=["weighted_ls"]), - weight_sqrt=dict(argstr="--w-sqrt", xor=["weighted_ls"]), + surf=dict( + argstr="--surf %s %s %s", + requires=["subject_id", "hemi"], + ), + surf_geo=dict( + usedefault=True, + ), + synth=dict( + argstr="--synth", + ), + uniform=dict( + argstr="--uniform %f %f", + ), + var_fwhm=dict( + argstr="--var-fwhm %f", + ), + vox_dump=dict( + argstr="--voxdump %d %d %d", + ), + weight_file=dict( + extensions=None, + xor=["weighted_ls"], + ), + weight_inv=dict( + argstr="--w-inv", + xor=["weighted_ls"], + ), + weight_sqrt=dict( + argstr="--w-sqrt", + xor=["weighted_ls"], + ), weighted_ls=dict( argstr="--wls %s", extensions=None, @@ -81,23 +205,47 @@ def test_OneSampleTTest_inputs(): def test_OneSampleTTest_outputs(): output_map = dict( - beta_file=dict(extensions=None), - dof_file=dict(extensions=None), - error_file=dict(extensions=None), - error_stddev_file=dict(extensions=None), - error_var_file=dict(extensions=None), - estimate_file=dict(extensions=None), - frame_eigenvectors=dict(extensions=None), + beta_file=dict( + extensions=None, + ), + dof_file=dict( + extensions=None, + ), + error_file=dict( + extensions=None, + ), + error_stddev_file=dict( + extensions=None, + ), + error_var_file=dict( + extensions=None, + ), + estimate_file=dict( + extensions=None, + ), + frame_eigenvectors=dict( + extensions=None, + ), ftest_file=dict(), - fwhm_file=dict(extensions=None), + fwhm_file=dict( + extensions=None, + ), gamma_file=dict(), gamma_var_file=dict(), glm_dir=dict(), - mask_file=dict(extensions=None), + mask_file=dict( + extensions=None, + ), sig_file=dict(), - singular_values=dict(extensions=None), - spatial_eigenvectors=dict(extensions=None), - svd_stats_file=dict(extensions=None), + singular_values=dict( + extensions=None, + ), + spatial_eigenvectors=dict( + extensions=None, + ), + svd_stats_file=dict( + extensions=None, + ), ) outputs = OneSampleTTest.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Paint.py b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py index 24463dad25..d95c4c9fa3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Paint.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py @@ -4,10 +4,22 @@ def test_Paint_inputs(): input_map = dict( - args=dict(argstr="%s"), - averages=dict(argstr="-a %d"), - environ=dict(nohash=True, usedefault=True), - in_surf=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + args=dict( + argstr="%s", + ), + averages=dict( + argstr="-a %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_surf=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), out_file=dict( argstr="%s", extensions=None, @@ -18,7 +30,12 @@ def test_Paint_inputs(): position=-1, ), subjects_dir=dict(), - template=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + template=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), template_param=dict(), ) inputs = Paint.input_spec() @@ -29,7 +46,11 @@ def test_Paint_inputs(): def test_Paint_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Paint.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py index 74929dfd51..e1632020b5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py @@ -4,36 +4,109 @@ def test_ParcellationStats_inputs(): input_map = dict( - args=dict(argstr="%s"), - aseg=dict(extensions=None, mandatory=True), - brainmask=dict(extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + aseg=dict( + extensions=None, + mandatory=True, + ), + brainmask=dict( + extensions=None, + mandatory=True, + ), copy_inputs=dict(), - cortex_label=dict(extensions=None), - environ=dict(nohash=True, usedefault=True), - hemisphere=dict(argstr="%s", mandatory=True, position=-2), - in_annotation=dict(argstr="-a %s", extensions=None, xor=["in_label"]), - in_cortex=dict(argstr="-cortex %s", extensions=None), + cortex_label=dict( + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + in_annotation=dict( + argstr="-a %s", + extensions=None, + xor=["in_label"], + ), + in_cortex=dict( + argstr="-cortex %s", + extensions=None, + ), in_label=dict( - argstr="-l %s", extensions=None, xor=["in_annotatoin", "out_color"] + argstr="-l %s", + extensions=None, + xor=["in_annotatoin", "out_color"], + ), + lh_pial=dict( + extensions=None, + mandatory=True, + ), + lh_white=dict( + extensions=None, + mandatory=True, + ), + mgz=dict( + argstr="-mgz", + ), + out_color=dict( + argstr="-c %s", + extensions=None, + genfile=True, + xor=["in_label"], ), - lh_pial=dict(extensions=None, mandatory=True), - lh_white=dict(extensions=None, mandatory=True), - mgz=dict(argstr="-mgz"), - out_color=dict(argstr="-c %s", extensions=None, genfile=True, xor=["in_label"]), out_table=dict( - argstr="-f %s", extensions=None, genfile=True, requires=["tabular_output"] + argstr="-f %s", + extensions=None, + genfile=True, + requires=["tabular_output"], + ), + rh_pial=dict( + extensions=None, + mandatory=True, + ), + rh_white=dict( + extensions=None, + mandatory=True, + ), + ribbon=dict( + extensions=None, + mandatory=True, + ), + subject_id=dict( + argstr="%s", + mandatory=True, + position=-3, + usedefault=True, ), - rh_pial=dict(extensions=None, mandatory=True), - rh_white=dict(extensions=None, mandatory=True), - ribbon=dict(extensions=None, mandatory=True), - subject_id=dict(argstr="%s", mandatory=True, position=-3, usedefault=True), subjects_dir=dict(), - surface=dict(argstr="%s", position=-1), - tabular_output=dict(argstr="-b"), - th3=dict(argstr="-th3", requires=["cortex_label"]), - thickness=dict(extensions=None, mandatory=True), - transform=dict(extensions=None, mandatory=True), - wm=dict(extensions=None, mandatory=True), + surface=dict( + argstr="%s", + position=-1, + ), + tabular_output=dict( + argstr="-b", + ), + th3=dict( + argstr="-th3", + requires=["cortex_label"], + ), + thickness=dict( + extensions=None, + mandatory=True, + ), + transform=dict( + extensions=None, + mandatory=True, + ), + wm=dict( + extensions=None, + mandatory=True, + ), ) inputs = ParcellationStats.input_spec() @@ -43,7 +116,14 @@ def test_ParcellationStats_inputs(): def test_ParcellationStats_outputs(): - output_map = dict(out_color=dict(extensions=None), out_table=dict(extensions=None)) + output_map = dict( + out_color=dict( + extensions=None, + ), + out_table=dict( + extensions=None, + ), + ) outputs = ParcellationStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py index 0f3f4ef2be..3168ac64ec 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py @@ -4,13 +4,29 @@ def test_ParseDICOMDir_inputs(): input_map = dict( - args=dict(argstr="%s"), - dicom_dir=dict(argstr="--d %s", mandatory=True), - dicom_info_file=dict(argstr="--o %s", extensions=None, usedefault=True), - environ=dict(nohash=True, usedefault=True), - sortbyrun=dict(argstr="--sortbyrun"), + args=dict( + argstr="%s", + ), + dicom_dir=dict( + argstr="--d %s", + mandatory=True, + ), + dicom_info_file=dict( + argstr="--o %s", + extensions=None, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + sortbyrun=dict( + argstr="--sortbyrun", + ), subjects_dir=dict(), - summarize=dict(argstr="--summarize"), + summarize=dict( + argstr="--summarize", + ), ) inputs = ParseDICOMDir.input_spec() @@ -20,7 +36,11 @@ def test_ParseDICOMDir_inputs(): def test_ParseDICOMDir_outputs(): - output_map = dict(dicom_info_file=dict(extensions=None)) + output_map = dict( + dicom_info_file=dict( + extensions=None, + ), + ) outputs = ParseDICOMDir.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py index d5cbe65c7f..aa270f30b3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py @@ -4,58 +4,168 @@ def test_ReconAll_inputs(): input_map = dict( - FLAIR_file=dict(argstr="-FLAIR %s", extensions=None, min_ver="5.3.0"), - T1_files=dict(argstr="-i %s..."), - T2_file=dict(argstr="-T2 %s", extensions=None, min_ver="5.3.0"), - args=dict(argstr="%s"), - big_ventricles=dict(argstr="-bigventricles"), - brainstem=dict(argstr="-brainstem-structures"), - directive=dict(argstr="-%s", position=0, usedefault=True), - environ=dict(nohash=True, usedefault=True), - expert=dict(argstr="-expert %s", extensions=None), - flags=dict(argstr="%s"), - hemi=dict(argstr="-hemi %s"), + FLAIR_file=dict( + argstr="-FLAIR %s", + extensions=None, + min_ver="5.3.0", + ), + T1_files=dict( + argstr="-i %s...", + ), + T2_file=dict( + argstr="-T2 %s", + extensions=None, + min_ver="5.3.0", + ), + args=dict( + argstr="%s", + ), + big_ventricles=dict( + argstr="-bigventricles", + ), + brainstem=dict( + argstr="-brainstem-structures", + ), + directive=dict( + argstr="-%s", + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expert=dict( + argstr="-expert %s", + extensions=None, + ), + flags=dict( + argstr="%s", + ), + hemi=dict( + argstr="-hemi %s", + ), hippocampal_subfields_T1=dict( - argstr="-hippocampal-subfields-T1", min_ver="6.0.0" + argstr="-hippocampal-subfields-T1", + min_ver="6.0.0", ), hippocampal_subfields_T2=dict( - argstr="-hippocampal-subfields-T2 %s %s", min_ver="6.0.0" - ), - hires=dict(argstr="-hires", min_ver="6.0.0"), - mprage=dict(argstr="-mprage"), - mri_aparc2aseg=dict(xor=["expert"]), - mri_ca_label=dict(xor=["expert"]), - mri_ca_normalize=dict(xor=["expert"]), - mri_ca_register=dict(xor=["expert"]), - mri_edit_wm_with_aseg=dict(xor=["expert"]), - mri_em_register=dict(xor=["expert"]), - mri_fill=dict(xor=["expert"]), - mri_mask=dict(xor=["expert"]), - mri_normalize=dict(xor=["expert"]), - mri_pretess=dict(xor=["expert"]), - mri_remove_neck=dict(xor=["expert"]), - mri_segment=dict(xor=["expert"]), - mri_segstats=dict(xor=["expert"]), - mri_tessellate=dict(xor=["expert"]), - mri_watershed=dict(xor=["expert"]), - mris_anatomical_stats=dict(xor=["expert"]), - mris_ca_label=dict(xor=["expert"]), - mris_fix_topology=dict(xor=["expert"]), - mris_inflate=dict(xor=["expert"]), - mris_make_surfaces=dict(xor=["expert"]), - mris_register=dict(xor=["expert"]), - mris_smooth=dict(xor=["expert"]), - mris_sphere=dict(xor=["expert"]), - mris_surf2vol=dict(xor=["expert"]), - mrisp_paint=dict(xor=["expert"]), - openmp=dict(argstr="-openmp %d"), - parallel=dict(argstr="-parallel"), - subject_id=dict(argstr="-subjid %s", usedefault=True), - subjects_dir=dict(argstr="-sd %s", genfile=True, hash_files=False), - talairach=dict(xor=["expert"]), - use_FLAIR=dict(argstr="-FLAIRpial", min_ver="5.3.0", xor=["use_T2"]), - use_T2=dict(argstr="-T2pial", min_ver="5.3.0", xor=["use_FLAIR"]), - xopts=dict(argstr="-xopts-%s"), + argstr="-hippocampal-subfields-T2 %s %s", + min_ver="6.0.0", + ), + hires=dict( + argstr="-hires", + min_ver="6.0.0", + ), + mprage=dict( + argstr="-mprage", + ), + mri_aparc2aseg=dict( + xor=["expert"], + ), + mri_ca_label=dict( + xor=["expert"], + ), + mri_ca_normalize=dict( + xor=["expert"], + ), + mri_ca_register=dict( + xor=["expert"], + ), + mri_edit_wm_with_aseg=dict( + xor=["expert"], + ), + mri_em_register=dict( + xor=["expert"], + ), + mri_fill=dict( + xor=["expert"], + ), + mri_mask=dict( + xor=["expert"], + ), + mri_normalize=dict( + xor=["expert"], + ), + mri_pretess=dict( + xor=["expert"], + ), + mri_remove_neck=dict( + xor=["expert"], + ), + mri_segment=dict( + xor=["expert"], + ), + mri_segstats=dict( + xor=["expert"], + ), + mri_tessellate=dict( + xor=["expert"], + ), + mri_watershed=dict( + xor=["expert"], + ), + mris_anatomical_stats=dict( + xor=["expert"], + ), + mris_ca_label=dict( + xor=["expert"], + ), + mris_fix_topology=dict( + xor=["expert"], + ), + mris_inflate=dict( + xor=["expert"], + ), + mris_make_surfaces=dict( + xor=["expert"], + ), + mris_register=dict( + xor=["expert"], + ), + mris_smooth=dict( + xor=["expert"], + ), + mris_sphere=dict( + xor=["expert"], + ), + mris_surf2vol=dict( + xor=["expert"], + ), + mrisp_paint=dict( + xor=["expert"], + ), + openmp=dict( + argstr="-openmp %d", + ), + parallel=dict( + argstr="-parallel", + ), + subject_id=dict( + argstr="-subjid %s", + usedefault=True, + ), + subjects_dir=dict( + argstr="-sd %s", + genfile=True, + hash_files=False, + ), + talairach=dict( + xor=["expert"], + ), + use_FLAIR=dict( + argstr="-FLAIRpial", + min_ver="5.3.0", + xor=["use_T2"], + ), + use_T2=dict( + argstr="-T2pial", + min_ver="5.3.0", + xor=["use_FLAIR"], + ), + xopts=dict( + argstr="-xopts-%s", + ), ) inputs = ReconAll.input_spec() @@ -66,45 +176,145 @@ def test_ReconAll_inputs(): def test_ReconAll_outputs(): output_map = dict( - BA_stats=dict(altkey="BA", loc="stats"), - T1=dict(extensions=None, loc="mri"), - annot=dict(altkey="*annot", loc="label"), - aparc_a2009s_stats=dict(altkey="aparc.a2009s", loc="stats"), - aparc_aseg=dict(altkey="aparc*aseg", loc="mri"), - aparc_stats=dict(altkey="aparc", loc="stats"), - area_pial=dict(altkey="area.pial", loc="surf"), - aseg=dict(extensions=None, loc="mri"), - aseg_stats=dict(altkey="aseg", loc="stats"), - avg_curv=dict(loc="surf"), - brain=dict(extensions=None, loc="mri"), - brainmask=dict(extensions=None, loc="mri"), - curv=dict(loc="surf"), - curv_pial=dict(altkey="curv.pial", loc="surf"), - curv_stats=dict(altkey="curv", loc="stats"), - entorhinal_exvivo_stats=dict(altkey="entorhinal_exvivo", loc="stats"), - filled=dict(extensions=None, loc="mri"), - graymid=dict(altkey=["graymid", "midthickness"], loc="surf"), - inflated=dict(loc="surf"), - jacobian_white=dict(loc="surf"), - label=dict(altkey="*label", loc="label"), - norm=dict(extensions=None, loc="mri"), - nu=dict(extensions=None, loc="mri"), - orig=dict(extensions=None, loc="mri"), - pial=dict(loc="surf"), - rawavg=dict(extensions=None, loc="mri"), - ribbon=dict(altkey="*ribbon", loc="mri"), - smoothwm=dict(loc="surf"), - sphere=dict(loc="surf"), - sphere_reg=dict(altkey="sphere.reg", loc="surf"), + BA_stats=dict( + altkey="BA", + loc="stats", + ), + T1=dict( + extensions=None, + loc="mri", + ), + annot=dict( + altkey="*annot", + loc="label", + ), + aparc_a2009s_stats=dict( + altkey="aparc.a2009s", + loc="stats", + ), + aparc_aseg=dict( + altkey="aparc*aseg", + loc="mri", + ), + aparc_stats=dict( + altkey="aparc", + loc="stats", + ), + area_pial=dict( + altkey="area.pial", + loc="surf", + ), + aseg=dict( + extensions=None, + loc="mri", + ), + aseg_stats=dict( + altkey="aseg", + loc="stats", + ), + avg_curv=dict( + loc="surf", + ), + brain=dict( + extensions=None, + loc="mri", + ), + brainmask=dict( + extensions=None, + loc="mri", + ), + curv=dict( + loc="surf", + ), + curv_pial=dict( + altkey="curv.pial", + loc="surf", + ), + curv_stats=dict( + altkey="curv", + loc="stats", + ), + entorhinal_exvivo_stats=dict( + altkey="entorhinal_exvivo", + loc="stats", + ), + filled=dict( + extensions=None, + loc="mri", + ), + graymid=dict( + altkey=["graymid", "midthickness"], + loc="surf", + ), + inflated=dict( + loc="surf", + ), + jacobian_white=dict( + loc="surf", + ), + label=dict( + altkey="*label", + loc="label", + ), + norm=dict( + extensions=None, + loc="mri", + ), + nu=dict( + extensions=None, + loc="mri", + ), + orig=dict( + extensions=None, + loc="mri", + ), + pial=dict( + loc="surf", + ), + rawavg=dict( + extensions=None, + loc="mri", + ), + ribbon=dict( + altkey="*ribbon", + loc="mri", + ), + smoothwm=dict( + loc="surf", + ), + sphere=dict( + loc="surf", + ), + sphere_reg=dict( + altkey="sphere.reg", + loc="surf", + ), subject_id=dict(), subjects_dir=dict(), - sulc=dict(loc="surf"), - thickness=dict(loc="surf"), - volume=dict(loc="surf"), - white=dict(loc="surf"), - wm=dict(extensions=None, loc="mri"), - wmparc=dict(extensions=None, loc="mri"), - wmparc_stats=dict(altkey="wmparc", loc="stats"), + sulc=dict( + loc="surf", + ), + thickness=dict( + loc="surf", + ), + volume=dict( + loc="surf", + ), + white=dict( + loc="surf", + ), + wm=dict( + extensions=None, + loc="mri", + ), + wmparc=dict( + extensions=None, + loc="mri", + ), + wmparc_stats=dict( + altkey="wmparc", + loc="stats", + ), ) outputs = ReconAll.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Register.py b/nipype/interfaces/freesurfer/tests/test_auto_Register.py index 4e1187f62d..c10daabd58 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Register.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Register.py @@ -4,17 +4,46 @@ def test_Register_inputs(): input_map = dict( - args=dict(argstr="%s"), - curv=dict(argstr="-curv", requires=["in_smoothwm"]), - environ=dict(nohash=True, usedefault=True), - in_smoothwm=dict(copyfile=True, extensions=None), - in_sulc=dict(copyfile=True, extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + curv=dict( + argstr="-curv", + requires=["in_smoothwm"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_smoothwm=dict( + copyfile=True, + extensions=None, + ), + in_sulc=dict( + copyfile=True, + extensions=None, + mandatory=True, + ), in_surf=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-3 + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-3, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, ), - out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), subjects_dir=dict(), - target=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + target=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), ) inputs = Register.input_spec() @@ -24,7 +53,11 @@ def test_Register_inputs(): def test_Register_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Register.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py index ef3763fe65..f66ac1bda7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py @@ -4,13 +4,38 @@ def test_RegisterAVItoTalairach_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), - out_file=dict(argstr="%s", extensions=None, position=3, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), + out_file=dict( + argstr="%s", + extensions=None, + position=3, + usedefault=True, + ), subjects_dir=dict(), - target=dict(argstr="%s", extensions=None, mandatory=True, position=1), - vox2vox=dict(argstr="%s", extensions=None, mandatory=True, position=2), + target=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + vox2vox=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), ) inputs = RegisterAVItoTalairach.input_spec() @@ -21,7 +46,13 @@ def test_RegisterAVItoTalairach_inputs(): def test_RegisterAVItoTalairach_outputs(): output_map = dict( - log_file=dict(extensions=None, usedefault=True), out_file=dict(extensions=None) + log_file=dict( + extensions=None, + usedefault=True, + ), + out_file=dict( + extensions=None, + ), ) outputs = RegisterAVItoTalairach.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py index 5c48d3e406..eeac74f722 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py @@ -4,10 +4,24 @@ def test_RelabelHypointensities_inputs(): input_map = dict( - args=dict(argstr="%s"), - aseg=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - environ=dict(nohash=True, usedefault=True), - lh_white=dict(copyfile=True, extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + aseg=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + lh_white=dict( + copyfile=True, + extensions=None, + mandatory=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -17,9 +31,17 @@ def test_RelabelHypointensities_inputs(): name_template="%s.hypos.mgz", position=-1, ), - rh_white=dict(copyfile=True, extensions=None, mandatory=True), + rh_white=dict( + copyfile=True, + extensions=None, + mandatory=True, + ), subjects_dir=dict(), - surf_directory=dict(argstr="%s", position=-2, usedefault=True), + surf_directory=dict( + argstr="%s", + position=-2, + usedefault=True, + ), ) inputs = RelabelHypointensities.input_spec() @@ -29,7 +51,12 @@ def test_RelabelHypointensities_inputs(): def test_RelabelHypointensities_outputs(): - output_map = dict(out_file=dict(argstr="%s", extensions=None)) + output_map = dict( + out_file=dict( + argstr="%s", + extensions=None, + ), + ) outputs = RelabelHypointensities.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py index 421e90e4fa..735ea7b84a 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py @@ -4,10 +4,19 @@ def test_RemoveIntersection_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2 + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-2, ), out_file=dict( argstr="%s", @@ -28,7 +37,11 @@ def test_RemoveIntersection_inputs(): def test_RemoveIntersection_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RemoveIntersection.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py index b8211ba7f4..3d2ce30cbd 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py @@ -4,9 +4,19 @@ def test_RemoveNeck_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), out_file=dict( argstr="%s", extensions=None, @@ -16,10 +26,22 @@ def test_RemoveNeck_inputs(): name_template="%s_noneck", position=-1, ), - radius=dict(argstr="-radius %d"), + radius=dict( + argstr="-radius %d", + ), subjects_dir=dict(), - template=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - transform=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + template=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + transform=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), ) inputs = RemoveNeck.input_spec() @@ -29,7 +51,11 @@ def test_RemoveNeck_inputs(): def test_RemoveNeck_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RemoveNeck.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Resample.py b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py index cd109ec2b3..280a8a4cc1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Resample.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py @@ -4,12 +4,30 @@ def test_Resample_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=-2), - resampled_file=dict(argstr="-o %s", extensions=None, genfile=True, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=-2, + ), + resampled_file=dict( + argstr="-o %s", + extensions=None, + genfile=True, + position=-1, + ), subjects_dir=dict(), - voxel_size=dict(argstr="-vs %.2f %.2f %.2f", mandatory=True), + voxel_size=dict( + argstr="-vs %.2f %.2f %.2f", + mandatory=True, + ), ) inputs = Resample.input_spec() @@ -19,7 +37,11 @@ def test_Resample_inputs(): def test_Resample_outputs(): - output_map = dict(resampled_file=dict(extensions=None)) + output_map = dict( + resampled_file=dict( + extensions=None, + ), + ) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py index d64445faa9..3f7e1b96a0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py @@ -4,38 +4,113 @@ def test_RobustRegister_inputs(): input_map = dict( - args=dict(argstr="%s"), - auto_sens=dict(argstr="--satit", mandatory=True, xor=["outlier_sens"]), - environ=dict(nohash=True, usedefault=True), - est_int_scale=dict(argstr="--iscale"), - force_double=dict(argstr="--doubleprec"), - force_float=dict(argstr="--floattype"), - half_source=dict(argstr="--halfmov %s"), - half_source_xfm=dict(argstr="--halfmovlta %s"), - half_targ=dict(argstr="--halfdst %s"), - half_targ_xfm=dict(argstr="--halfdstlta %s"), - half_weights=dict(argstr="--halfweights %s"), - high_iterations=dict(argstr="--highit %d"), - in_xfm_file=dict(argstr="--transform", extensions=None), - init_orient=dict(argstr="--initorient"), - iteration_thresh=dict(argstr="--epsit %.3f"), - least_squares=dict(argstr="--leastsquares"), - mask_source=dict(argstr="--maskmov %s", extensions=None), - mask_target=dict(argstr="--maskdst %s", extensions=None), - max_iterations=dict(argstr="--maxit %d"), - no_init=dict(argstr="--noinit"), - no_multi=dict(argstr="--nomulti"), - out_reg_file=dict(argstr="--lta %s", usedefault=True), - outlier_limit=dict(argstr="--wlimit %.3f"), - outlier_sens=dict(argstr="--sat %.4f", mandatory=True, xor=["auto_sens"]), - registered_file=dict(argstr="--warp %s"), - source_file=dict(argstr="--mov %s", extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + auto_sens=dict( + argstr="--satit", + mandatory=True, + xor=["outlier_sens"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + est_int_scale=dict( + argstr="--iscale", + ), + force_double=dict( + argstr="--doubleprec", + ), + force_float=dict( + argstr="--floattype", + ), + half_source=dict( + argstr="--halfmov %s", + ), + half_source_xfm=dict( + argstr="--halfmovlta %s", + ), + half_targ=dict( + argstr="--halfdst %s", + ), + half_targ_xfm=dict( + argstr="--halfdstlta %s", + ), + half_weights=dict( + argstr="--halfweights %s", + ), + high_iterations=dict( + argstr="--highit %d", + ), + in_xfm_file=dict( + argstr="--transform", + extensions=None, + ), + init_orient=dict( + argstr="--initorient", + ), + iteration_thresh=dict( + argstr="--epsit %.3f", + ), + least_squares=dict( + argstr="--leastsquares", + ), + mask_source=dict( + argstr="--maskmov %s", + extensions=None, + ), + mask_target=dict( + argstr="--maskdst %s", + extensions=None, + ), + max_iterations=dict( + argstr="--maxit %d", + ), + no_init=dict( + argstr="--noinit", + ), + no_multi=dict( + argstr="--nomulti", + ), + out_reg_file=dict( + argstr="--lta %s", + usedefault=True, + ), + outlier_limit=dict( + argstr="--wlimit %.3f", + ), + outlier_sens=dict( + argstr="--sat %.4f", + mandatory=True, + xor=["auto_sens"], + ), + registered_file=dict( + argstr="--warp %s", + ), + source_file=dict( + argstr="--mov %s", + extensions=None, + mandatory=True, + ), subjects_dir=dict(), - subsample_thresh=dict(argstr="--subsample %d"), - target_file=dict(argstr="--dst %s", extensions=None, mandatory=True), - trans_only=dict(argstr="--transonly"), - weights_file=dict(argstr="--weights %s"), - write_vo2vox=dict(argstr="--vox2vox"), + subsample_thresh=dict( + argstr="--subsample %d", + ), + target_file=dict( + argstr="--dst %s", + extensions=None, + mandatory=True, + ), + trans_only=dict( + argstr="--transonly", + ), + weights_file=dict( + argstr="--weights %s", + ), + write_vo2vox=dict( + argstr="--vox2vox", + ), ) inputs = RobustRegister.input_spec() @@ -46,14 +121,30 @@ def test_RobustRegister_inputs(): def test_RobustRegister_outputs(): output_map = dict( - half_source=dict(extensions=None), - half_source_xfm=dict(extensions=None), - half_targ=dict(extensions=None), - half_targ_xfm=dict(extensions=None), - half_weights=dict(extensions=None), - out_reg_file=dict(extensions=None), - registered_file=dict(extensions=None), - weights_file=dict(extensions=None), + half_source=dict( + extensions=None, + ), + half_source_xfm=dict( + extensions=None, + ), + half_targ=dict( + extensions=None, + ), + half_targ_xfm=dict( + extensions=None, + ), + half_weights=dict( + extensions=None, + ), + out_reg_file=dict( + extensions=None, + ), + registered_file=dict( + extensions=None, + ), + weights_file=dict( + extensions=None, + ), ) outputs = RobustRegister.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py index 53391af994..8c180332db 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py @@ -4,30 +4,65 @@ def test_RobustTemplate_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), auto_detect_sensitivity=dict( - argstr="--satit", mandatory=True, xor=["outlier_sensitivity"] - ), - average_metric=dict(argstr="--average %d"), - environ=dict(nohash=True, usedefault=True), - fixed_timepoint=dict(argstr="--fixtp"), - in_files=dict(argstr="--mov %s", mandatory=True), - in_intensity_scales=dict(argstr="--iscalein %s"), - initial_timepoint=dict(argstr="--inittp %d"), - initial_transforms=dict(argstr="--ixforms %s"), - intensity_scaling=dict(argstr="--iscale"), - no_iteration=dict(argstr="--noit"), + argstr="--satit", + mandatory=True, + xor=["outlier_sensitivity"], + ), + average_metric=dict( + argstr="--average %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_timepoint=dict( + argstr="--fixtp", + ), + in_files=dict( + argstr="--mov %s", + mandatory=True, + ), + in_intensity_scales=dict( + argstr="--iscalein %s", + ), + initial_timepoint=dict( + argstr="--inittp %d", + ), + initial_transforms=dict( + argstr="--ixforms %s", + ), + intensity_scaling=dict( + argstr="--iscale", + ), + no_iteration=dict( + argstr="--noit", + ), num_threads=dict(), out_file=dict( - argstr="--template %s", extensions=None, mandatory=True, usedefault=True + argstr="--template %s", + extensions=None, + mandatory=True, + usedefault=True, ), outlier_sensitivity=dict( - argstr="--sat %.4f", mandatory=True, xor=["auto_detect_sensitivity"] + argstr="--sat %.4f", + mandatory=True, + xor=["auto_detect_sensitivity"], + ), + scaled_intensity_outputs=dict( + argstr="--iscaleout %s", ), - scaled_intensity_outputs=dict(argstr="--iscaleout %s"), subjects_dir=dict(), - subsample_threshold=dict(argstr="--subsample %d"), - transform_outputs=dict(argstr="--lta %s"), + subsample_threshold=dict( + argstr="--subsample %d", + ), + transform_outputs=dict( + argstr="--lta %s", + ), ) inputs = RobustTemplate.input_spec() @@ -38,7 +73,9 @@ def test_RobustTemplate_inputs(): def test_RobustTemplate_outputs(): output_map = dict( - out_file=dict(extensions=None), + out_file=dict( + extensions=None, + ), scaled_intensity_outputs=dict(), transform_outputs=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py index 1dd20355b9..de9ffe2485 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py @@ -4,31 +4,83 @@ def test_SampleToSurface_inputs(): input_map = dict( - apply_rot=dict(argstr="--rot %.3f %.3f %.3f"), - apply_trans=dict(argstr="--trans %.3f %.3f %.3f"), - args=dict(argstr="%s"), - cortex_mask=dict(argstr="--cortex", xor=["mask_label"]), - environ=dict(nohash=True, usedefault=True), - fix_tk_reg=dict(argstr="--fixtkreg"), - float2int_method=dict(argstr="--float2int %s"), - frame=dict(argstr="--frame %d"), - hemi=dict(argstr="--hemi %s", mandatory=True), - hits_file=dict(argstr="--srchit %s"), - hits_type=dict(argstr="--srchit_type"), - ico_order=dict(argstr="--icoorder %d", requires=["target_subject"]), - interp_method=dict(argstr="--interp %s"), - mask_label=dict(argstr="--mask %s", extensions=None, xor=["cortex_mask"]), + apply_rot=dict( + argstr="--rot %.3f %.3f %.3f", + ), + apply_trans=dict( + argstr="--trans %.3f %.3f %.3f", + ), + args=dict( + argstr="%s", + ), + cortex_mask=dict( + argstr="--cortex", + xor=["mask_label"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fix_tk_reg=dict( + argstr="--fixtkreg", + ), + float2int_method=dict( + argstr="--float2int %s", + ), + frame=dict( + argstr="--frame %d", + ), + hemi=dict( + argstr="--hemi %s", + mandatory=True, + ), + hits_file=dict( + argstr="--srchit %s", + ), + hits_type=dict( + argstr="--srchit_type", + ), + ico_order=dict( + argstr="--icoorder %d", + requires=["target_subject"], + ), + interp_method=dict( + argstr="--interp %s", + ), + mask_label=dict( + argstr="--mask %s", + extensions=None, + xor=["cortex_mask"], + ), mni152reg=dict( argstr="--mni152reg", mandatory=True, xor=["reg_file", "reg_header", "mni152reg"], ), - no_reshape=dict(argstr="--noreshape", xor=["reshape"]), - out_file=dict(argstr="--o %s", extensions=None, genfile=True), - out_type=dict(argstr="--out_type %s"), - override_reg_subj=dict(argstr="--srcsubject %s", requires=["subject_id"]), - projection_stem=dict(mandatory=True, xor=["sampling_method"]), - reference_file=dict(argstr="--ref %s", extensions=None), + no_reshape=dict( + argstr="--noreshape", + xor=["reshape"], + ), + out_file=dict( + argstr="--o %s", + extensions=None, + genfile=True, + ), + out_type=dict( + argstr="--out_type %s", + ), + override_reg_subj=dict( + argstr="--srcsubject %s", + requires=["subject_id"], + ), + projection_stem=dict( + mandatory=True, + xor=["sampling_method"], + ), + reference_file=dict( + argstr="--ref %s", + extensions=None, + ), reg_file=dict( argstr="--reg %s", extensions=None, @@ -41,8 +93,13 @@ def test_SampleToSurface_inputs(): requires=["subject_id"], xor=["reg_file", "reg_header", "mni152reg"], ), - reshape=dict(argstr="--reshape", xor=["no_reshape"]), - reshape_slices=dict(argstr="--rf %d"), + reshape=dict( + argstr="--reshape", + xor=["no_reshape"], + ), + reshape_slices=dict( + argstr="--rf %d", + ), sampling_method=dict( argstr="%s", mandatory=True, @@ -51,16 +108,35 @@ def test_SampleToSurface_inputs(): ), sampling_range=dict(), sampling_units=dict(), - scale_input=dict(argstr="--scale %.3f"), - smooth_surf=dict(argstr="--surf-fwhm %.3f"), - smooth_vol=dict(argstr="--fwhm %.3f"), - source_file=dict(argstr="--mov %s", extensions=None, mandatory=True), + scale_input=dict( + argstr="--scale %.3f", + ), + smooth_surf=dict( + argstr="--surf-fwhm %.3f", + ), + smooth_vol=dict( + argstr="--fwhm %.3f", + ), + source_file=dict( + argstr="--mov %s", + extensions=None, + mandatory=True, + ), subject_id=dict(), subjects_dir=dict(), - surf_reg=dict(argstr="--surfreg %s", requires=["target_subject"]), - surface=dict(argstr="--surf %s"), - target_subject=dict(argstr="--trgsubject %s"), - vox_file=dict(argstr="--nvox %s"), + surf_reg=dict( + argstr="--surfreg %s", + requires=["target_subject"], + ), + surface=dict( + argstr="--surf %s", + ), + target_subject=dict( + argstr="--trgsubject %s", + ), + vox_file=dict( + argstr="--nvox %s", + ), ) inputs = SampleToSurface.input_spec() @@ -71,9 +147,15 @@ def test_SampleToSurface_inputs(): def test_SampleToSurface_outputs(): output_map = dict( - hits_file=dict(extensions=None), - out_file=dict(extensions=None), - vox_file=dict(extensions=None), + hits_file=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + vox_file=dict( + extensions=None, + ), ) outputs = SampleToSurface.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py index 3db964e8a3..dfb82e8b85 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py @@ -9,72 +9,146 @@ def test_SegStats_inputs(): mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - args=dict(argstr="%s"), - avgwf_file=dict(argstr="--avgwfvol %s"), - avgwf_txt_file=dict(argstr="--avgwf %s"), - brain_vol=dict(argstr="--%s"), - brainmask_file=dict(argstr="--brainmask %s", extensions=None), - calc_power=dict(argstr="--%s"), - calc_snr=dict(argstr="--snr"), + args=dict( + argstr="%s", + ), + avgwf_file=dict( + argstr="--avgwfvol %s", + ), + avgwf_txt_file=dict( + argstr="--avgwf %s", + ), + brain_vol=dict( + argstr="--%s", + ), + brainmask_file=dict( + argstr="--brainmask %s", + extensions=None, + ), + calc_power=dict( + argstr="--%s", + ), + calc_snr=dict( + argstr="--snr", + ), color_table_file=dict( argstr="--ctab %s", extensions=None, xor=("color_table_file", "default_color_table", "gca_color_table"), ), - cortex_vol_from_surf=dict(argstr="--surf-ctx-vol"), + cortex_vol_from_surf=dict( + argstr="--surf-ctx-vol", + ), default_color_table=dict( argstr="--ctab-default", xor=("color_table_file", "default_color_table", "gca_color_table"), ), - empty=dict(argstr="--empty"), - environ=dict(nohash=True, usedefault=True), - etiv=dict(argstr="--etiv"), + empty=dict( + argstr="--empty", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + etiv=dict( + argstr="--etiv", + ), etiv_only=dict(), - euler=dict(argstr="--euler"), - exclude_ctx_gm_wm=dict(argstr="--excl-ctxgmwm"), - exclude_id=dict(argstr="--excludeid %d"), - frame=dict(argstr="--frame %d"), + euler=dict( + argstr="--euler", + ), + exclude_ctx_gm_wm=dict( + argstr="--excl-ctxgmwm", + ), + exclude_id=dict( + argstr="--excludeid %d", + ), + frame=dict( + argstr="--frame %d", + ), gca_color_table=dict( argstr="--ctab-gca %s", extensions=None, xor=("color_table_file", "default_color_table", "gca_color_table"), ), - in_file=dict(argstr="--i %s", extensions=None), - in_intensity=dict(argstr="--in %s --in-intensity-name %s", extensions=None), + in_file=dict( + argstr="--i %s", + extensions=None, + ), + in_intensity=dict( + argstr="--in %s --in-intensity-name %s", + extensions=None, + ), intensity_units=dict( - argstr="--in-intensity-units %s", requires=["in_intensity"] + argstr="--in-intensity-units %s", + requires=["in_intensity"], + ), + mask_erode=dict( + argstr="--maskerode %d", + ), + mask_file=dict( + argstr="--mask %s", + extensions=None, + ), + mask_frame=dict( + requires=["mask_file"], + ), + mask_invert=dict( + argstr="--maskinvert", ), - mask_erode=dict(argstr="--maskerode %d"), - mask_file=dict(argstr="--mask %s", extensions=None), - mask_frame=dict(requires=["mask_file"]), - mask_invert=dict(argstr="--maskinvert"), mask_sign=dict(), - mask_thresh=dict(argstr="--maskthresh %f"), - multiply=dict(argstr="--mul %f"), - non_empty_only=dict(argstr="--nonempty"), - partial_volume_file=dict(argstr="--pv %s", extensions=None), - segment_id=dict(argstr="--id %s..."), + mask_thresh=dict( + argstr="--maskthresh %f", + ), + multiply=dict( + argstr="--mul %f", + ), + non_empty_only=dict( + argstr="--nonempty", + ), + partial_volume_file=dict( + argstr="--pv %s", + extensions=None, + ), + segment_id=dict( + argstr="--id %s...", + ), segmentation_file=dict( argstr="--seg %s", extensions=None, mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - sf_avg_file=dict(argstr="--sfavg %s"), - subcort_gm=dict(argstr="--subcortgray"), + sf_avg_file=dict( + argstr="--sfavg %s", + ), + subcort_gm=dict( + argstr="--subcortgray", + ), subjects_dir=dict(), summary_file=dict( - argstr="--sum %s", extensions=None, genfile=True, position=-1 + argstr="--sum %s", + extensions=None, + genfile=True, + position=-1, + ), + supratent=dict( + argstr="--supratent", ), - supratent=dict(argstr="--supratent"), surf_label=dict( argstr="--slabel %s %s %s", mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - total_gray=dict(argstr="--totalgray"), - vox=dict(argstr="--vox %s"), - wm_vol_from_surf=dict(argstr="--surf-wm-vol"), + total_gray=dict( + argstr="--totalgray", + ), + vox=dict( + argstr="--vox %s", + ), + wm_vol_from_surf=dict( + argstr="--surf-wm-vol", + ), ) inputs = SegStats.input_spec() @@ -85,10 +159,18 @@ def test_SegStats_inputs(): def test_SegStats_outputs(): output_map = dict( - avgwf_file=dict(extensions=None), - avgwf_txt_file=dict(extensions=None), - sf_avg_file=dict(extensions=None), - summary_file=dict(extensions=None), + avgwf_file=dict( + extensions=None, + ), + avgwf_txt_file=dict( + extensions=None, + ), + sf_avg_file=dict( + extensions=None, + ), + summary_file=dict( + extensions=None, + ), ) outputs = SegStats.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py index 04e12b227d..0121dd7d9e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py @@ -9,84 +9,190 @@ def test_SegStatsReconAll_inputs(): mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - args=dict(argstr="%s"), - aseg=dict(extensions=None), - avgwf_file=dict(argstr="--avgwfvol %s"), - avgwf_txt_file=dict(argstr="--avgwf %s"), - brain_vol=dict(argstr="--%s"), - brainmask_file=dict(argstr="--brainmask %s", extensions=None), - calc_power=dict(argstr="--%s"), - calc_snr=dict(argstr="--snr"), + args=dict( + argstr="%s", + ), + aseg=dict( + extensions=None, + ), + avgwf_file=dict( + argstr="--avgwfvol %s", + ), + avgwf_txt_file=dict( + argstr="--avgwf %s", + ), + brain_vol=dict( + argstr="--%s", + ), + brainmask_file=dict( + argstr="--brainmask %s", + extensions=None, + ), + calc_power=dict( + argstr="--%s", + ), + calc_snr=dict( + argstr="--snr", + ), color_table_file=dict( argstr="--ctab %s", extensions=None, xor=("color_table_file", "default_color_table", "gca_color_table"), ), copy_inputs=dict(), - cortex_vol_from_surf=dict(argstr="--surf-ctx-vol"), + cortex_vol_from_surf=dict( + argstr="--surf-ctx-vol", + ), default_color_table=dict( argstr="--ctab-default", xor=("color_table_file", "default_color_table", "gca_color_table"), ), - empty=dict(argstr="--empty"), - environ=dict(nohash=True, usedefault=True), - etiv=dict(argstr="--etiv"), + empty=dict( + argstr="--empty", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + etiv=dict( + argstr="--etiv", + ), etiv_only=dict(), - euler=dict(argstr="--euler"), - exclude_ctx_gm_wm=dict(argstr="--excl-ctxgmwm"), - exclude_id=dict(argstr="--excludeid %d"), - frame=dict(argstr="--frame %d"), + euler=dict( + argstr="--euler", + ), + exclude_ctx_gm_wm=dict( + argstr="--excl-ctxgmwm", + ), + exclude_id=dict( + argstr="--excludeid %d", + ), + frame=dict( + argstr="--frame %d", + ), gca_color_table=dict( argstr="--ctab-gca %s", extensions=None, xor=("color_table_file", "default_color_table", "gca_color_table"), ), - in_file=dict(argstr="--i %s", extensions=None), - in_intensity=dict(argstr="--in %s --in-intensity-name %s", extensions=None), + in_file=dict( + argstr="--i %s", + extensions=None, + ), + in_intensity=dict( + argstr="--in %s --in-intensity-name %s", + extensions=None, + ), intensity_units=dict( - argstr="--in-intensity-units %s", requires=["in_intensity"] - ), - lh_orig_nofix=dict(extensions=None, mandatory=True), - lh_pial=dict(extensions=None, mandatory=True), - lh_white=dict(extensions=None, mandatory=True), - mask_erode=dict(argstr="--maskerode %d"), - mask_file=dict(argstr="--mask %s", extensions=None), - mask_frame=dict(requires=["mask_file"]), - mask_invert=dict(argstr="--maskinvert"), + argstr="--in-intensity-units %s", + requires=["in_intensity"], + ), + lh_orig_nofix=dict( + extensions=None, + mandatory=True, + ), + lh_pial=dict( + extensions=None, + mandatory=True, + ), + lh_white=dict( + extensions=None, + mandatory=True, + ), + mask_erode=dict( + argstr="--maskerode %d", + ), + mask_file=dict( + argstr="--mask %s", + extensions=None, + ), + mask_frame=dict( + requires=["mask_file"], + ), + mask_invert=dict( + argstr="--maskinvert", + ), mask_sign=dict(), - mask_thresh=dict(argstr="--maskthresh %f"), - multiply=dict(argstr="--mul %f"), - non_empty_only=dict(argstr="--nonempty"), - partial_volume_file=dict(argstr="--pv %s", extensions=None), - presurf_seg=dict(extensions=None), - rh_orig_nofix=dict(extensions=None, mandatory=True), - rh_pial=dict(extensions=None, mandatory=True), - rh_white=dict(extensions=None, mandatory=True), - ribbon=dict(extensions=None, mandatory=True), - segment_id=dict(argstr="--id %s..."), + mask_thresh=dict( + argstr="--maskthresh %f", + ), + multiply=dict( + argstr="--mul %f", + ), + non_empty_only=dict( + argstr="--nonempty", + ), + partial_volume_file=dict( + argstr="--pv %s", + extensions=None, + ), + presurf_seg=dict( + extensions=None, + ), + rh_orig_nofix=dict( + extensions=None, + mandatory=True, + ), + rh_pial=dict( + extensions=None, + mandatory=True, + ), + rh_white=dict( + extensions=None, + mandatory=True, + ), + ribbon=dict( + extensions=None, + mandatory=True, + ), + segment_id=dict( + argstr="--id %s...", + ), segmentation_file=dict( argstr="--seg %s", extensions=None, mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - sf_avg_file=dict(argstr="--sfavg %s"), - subcort_gm=dict(argstr="--subcortgray"), - subject_id=dict(argstr="--subject %s", mandatory=True, usedefault=True), + sf_avg_file=dict( + argstr="--sfavg %s", + ), + subcort_gm=dict( + argstr="--subcortgray", + ), + subject_id=dict( + argstr="--subject %s", + mandatory=True, + usedefault=True, + ), subjects_dir=dict(), summary_file=dict( - argstr="--sum %s", extensions=None, genfile=True, position=-1 + argstr="--sum %s", + extensions=None, + genfile=True, + position=-1, + ), + supratent=dict( + argstr="--supratent", ), - supratent=dict(argstr="--supratent"), surf_label=dict( argstr="--slabel %s %s %s", mandatory=True, xor=("segmentation_file", "annot", "surf_label"), ), - total_gray=dict(argstr="--totalgray"), - transform=dict(extensions=None, mandatory=True), - vox=dict(argstr="--vox %s"), - wm_vol_from_surf=dict(argstr="--surf-wm-vol"), + total_gray=dict( + argstr="--totalgray", + ), + transform=dict( + extensions=None, + mandatory=True, + ), + vox=dict( + argstr="--vox %s", + ), + wm_vol_from_surf=dict( + argstr="--surf-wm-vol", + ), ) inputs = SegStatsReconAll.input_spec() @@ -97,10 +203,18 @@ def test_SegStatsReconAll_inputs(): def test_SegStatsReconAll_outputs(): output_map = dict( - avgwf_file=dict(extensions=None), - avgwf_txt_file=dict(extensions=None), - sf_avg_file=dict(extensions=None), - summary_file=dict(extensions=None), + avgwf_file=dict( + extensions=None, + ), + avgwf_txt_file=dict( + extensions=None, + ), + sf_avg_file=dict( + extensions=None, + ), + summary_file=dict( + extensions=None, + ), ) outputs = SegStatsReconAll.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py index 5b0083cb96..7c16a1f476 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py @@ -4,11 +4,23 @@ def test_SegmentCC_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), copy_inputs=dict(), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-aseg %s", extensions=None, mandatory=True), - in_norm=dict(extensions=None, mandatory=True), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-aseg %s", + extensions=None, + mandatory=True, + ), + in_norm=dict( + extensions=None, + mandatory=True, + ), out_file=dict( argstr="-o %s", extensions=None, @@ -17,8 +29,17 @@ def test_SegmentCC_inputs(): name_source=["in_file"], name_template="%s.auto.mgz", ), - out_rotation=dict(argstr="-lta %s", extensions=None, mandatory=True), - subject_id=dict(argstr="%s", mandatory=True, position=-1, usedefault=True), + out_rotation=dict( + argstr="-lta %s", + extensions=None, + mandatory=True, + ), + subject_id=dict( + argstr="%s", + mandatory=True, + position=-1, + usedefault=True, + ), subjects_dir=dict(), ) inputs = SegmentCC.input_spec() @@ -30,7 +51,12 @@ def test_SegmentCC_inputs(): def test_SegmentCC_outputs(): output_map = dict( - out_file=dict(extensions=None), out_rotation=dict(extensions=None) + out_file=dict( + extensions=None, + ), + out_rotation=dict( + extensions=None, + ), ) outputs = SegmentCC.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py index f2bebb610c..8aac066c26 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py @@ -4,10 +4,25 @@ def test_SegmentWM_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), subjects_dir=dict(), ) inputs = SegmentWM.input_spec() @@ -18,7 +33,11 @@ def test_SegmentWM_inputs(): def test_SegmentWM_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SegmentWM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py index d510fbac22..e20de1c795 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py @@ -4,19 +4,51 @@ def test_Smooth_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="--i %s", extensions=None, mandatory=True), - num_iters=dict(argstr="--niters %d", mandatory=True, xor=["surface_fwhm"]), - proj_frac=dict(argstr="--projfrac %s", xor=["proj_frac_avg"]), - proj_frac_avg=dict(argstr="--projfrac-avg %.2f %.2f %.2f", xor=["proj_frac"]), - reg_file=dict(argstr="--reg %s", extensions=None, mandatory=True), - smoothed_file=dict(argstr="--o %s", extensions=None, genfile=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="--i %s", + extensions=None, + mandatory=True, + ), + num_iters=dict( + argstr="--niters %d", + mandatory=True, + xor=["surface_fwhm"], + ), + proj_frac=dict( + argstr="--projfrac %s", + xor=["proj_frac_avg"], + ), + proj_frac_avg=dict( + argstr="--projfrac-avg %.2f %.2f %.2f", + xor=["proj_frac"], + ), + reg_file=dict( + argstr="--reg %s", + extensions=None, + mandatory=True, + ), + smoothed_file=dict( + argstr="--o %s", + extensions=None, + genfile=True, + ), subjects_dir=dict(), surface_fwhm=dict( - argstr="--fwhm %f", mandatory=True, requires=["reg_file"], xor=["num_iters"] + argstr="--fwhm %f", + mandatory=True, + requires=["reg_file"], + xor=["num_iters"], + ), + vol_fwhm=dict( + argstr="--vol-fwhm %f", ), - vol_fwhm=dict(argstr="--vol-fwhm %f"), ) inputs = Smooth.input_spec() @@ -26,7 +58,11 @@ def test_Smooth_inputs(): def test_Smooth_outputs(): - output_map = dict(smoothed_file=dict(extensions=None)) + output_map = dict( + smoothed_file=dict( + extensions=None, + ), + ) outputs = Smooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py index 983296b4bd..5f97cc281b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py @@ -4,25 +4,65 @@ def test_SmoothTessellation_inputs(): input_map = dict( - args=dict(argstr="%s"), - curvature_averaging_iterations=dict(argstr="-a %d"), - disable_estimates=dict(argstr="-nw"), - environ=dict(nohash=True, usedefault=True), - gaussian_curvature_norm_steps=dict(argstr="%d"), - gaussian_curvature_smoothing_steps=dict(argstr=" %d"), + args=dict( + argstr="%s", + ), + curvature_averaging_iterations=dict( + argstr="-a %d", + ), + disable_estimates=dict( + argstr="-nw", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gaussian_curvature_norm_steps=dict( + argstr="%d", + ), + gaussian_curvature_smoothing_steps=dict( + argstr=" %d", + ), in_file=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2 - ), - normalize_area=dict(argstr="-area"), - out_area_file=dict(argstr="-b %s", extensions=None), - out_curvature_file=dict(argstr="-c %s", extensions=None), - out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), - seed=dict(argstr="-seed %d"), - smoothing_iterations=dict(argstr="-n %d"), - snapshot_writing_iterations=dict(argstr="-w %d"), + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-2, + ), + normalize_area=dict( + argstr="-area", + ), + out_area_file=dict( + argstr="-b %s", + extensions=None, + ), + out_curvature_file=dict( + argstr="-c %s", + extensions=None, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + seed=dict( + argstr="-seed %d", + ), + smoothing_iterations=dict( + argstr="-n %d", + ), + snapshot_writing_iterations=dict( + argstr="-w %d", + ), subjects_dir=dict(), - use_gaussian_curvature_smoothing=dict(argstr="-g"), - use_momentum=dict(argstr="-m"), + use_gaussian_curvature_smoothing=dict( + argstr="-g", + ), + use_momentum=dict( + argstr="-m", + ), ) inputs = SmoothTessellation.input_spec() @@ -32,7 +72,11 @@ def test_SmoothTessellation_inputs(): def test_SmoothTessellation_outputs(): - output_map = dict(surface=dict(extensions=None)) + output_map = dict( + surface=dict( + extensions=None, + ), + ) outputs = SmoothTessellation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py index 35d47936ea..84673e2951 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py @@ -4,13 +4,27 @@ def test_Sphere_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=True, extensions=None, mandatory=True, position=-2 + argstr="%s", + copyfile=True, + extensions=None, + mandatory=True, + position=-2, + ), + in_smoothwm=dict( + copyfile=True, + extensions=None, + ), + magic=dict( + argstr="-q", ), - in_smoothwm=dict(copyfile=True, extensions=None), - magic=dict(argstr="-q"), num_threads=dict(), out_file=dict( argstr="%s", @@ -20,7 +34,9 @@ def test_Sphere_inputs(): name_template="%s.sphere", position=-1, ), - seed=dict(argstr="-seed %d"), + seed=dict( + argstr="-seed %d", + ), subjects_dir=dict(), ) inputs = Sphere.input_spec() @@ -31,7 +47,11 @@ def test_Sphere_inputs(): def test_Sphere_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Sphere.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py index f9fdc41306..73f5b3efd7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py @@ -4,19 +4,60 @@ def test_SphericalAverage_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - erode=dict(argstr="-erode %d"), - fname=dict(argstr="%s", mandatory=True, position=-5), - hemisphere=dict(argstr="%s", mandatory=True, position=-4), - in_average=dict(argstr="%s", genfile=True, position=-2), - in_orig=dict(argstr="-orig %s", extensions=None), - in_surf=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), - subject_id=dict(argstr="-o %s", mandatory=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + erode=dict( + argstr="-erode %d", + ), + fname=dict( + argstr="%s", + mandatory=True, + position=-5, + ), + hemisphere=dict( + argstr="%s", + mandatory=True, + position=-4, + ), + in_average=dict( + argstr="%s", + genfile=True, + position=-2, + ), + in_orig=dict( + argstr="-orig %s", + extensions=None, + ), + in_surf=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + subject_id=dict( + argstr="-o %s", + mandatory=True, + ), subjects_dir=dict(), - threshold=dict(argstr="-t %.1f"), - which=dict(argstr="%s", mandatory=True, position=-6), + threshold=dict( + argstr="-t %.1f", + ), + which=dict( + argstr="%s", + mandatory=True, + position=-6, + ), ) inputs = SphericalAverage.input_spec() @@ -26,7 +67,11 @@ def test_SphericalAverage_inputs(): def test_SphericalAverage_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SphericalAverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py index 999d67ff8e..01dc354710 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py @@ -4,13 +4,29 @@ def test_Surface2VolTransform_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - hemi=dict(argstr="--hemi %s", mandatory=True), - mkmask=dict(argstr="--mkmask", xor=["source_file"]), - projfrac=dict(argstr="--projfrac %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemi=dict( + argstr="--hemi %s", + mandatory=True, + ), + mkmask=dict( + argstr="--mkmask", + xor=["source_file"], + ), + projfrac=dict( + argstr="--projfrac %s", + ), reg_file=dict( - argstr="--volreg %s", extensions=None, mandatory=True, xor=["subject_id"] + argstr="--volreg %s", + extensions=None, + mandatory=True, + xor=["subject_id"], ), source_file=dict( argstr="--surfval %s", @@ -19,10 +35,20 @@ def test_Surface2VolTransform_inputs(): mandatory=True, xor=["mkmask"], ), - subject_id=dict(argstr="--identity %s", xor=["reg_file"]), - subjects_dir=dict(argstr="--sd %s"), - surf_name=dict(argstr="--surf %s"), - template_file=dict(argstr="--template %s", extensions=None), + subject_id=dict( + argstr="--identity %s", + xor=["reg_file"], + ), + subjects_dir=dict( + argstr="--sd %s", + ), + surf_name=dict( + argstr="--surf %s", + ), + template_file=dict( + argstr="--template %s", + extensions=None, + ), transformed_file=dict( argstr="--outvol %s", extensions=None, @@ -47,7 +73,12 @@ def test_Surface2VolTransform_inputs(): def test_Surface2VolTransform_outputs(): output_map = dict( - transformed_file=dict(extensions=None), vertexvol_file=dict(extensions=None) + transformed_file=dict( + extensions=None, + ), + vertexvol_file=dict( + extensions=None, + ), ) outputs = Surface2VolTransform.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py index 1f373f07a0..7876dfa1cc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py @@ -4,16 +4,46 @@ def test_SurfaceSmooth_inputs(): input_map = dict( - args=dict(argstr="%s"), - cortex=dict(argstr="--cortex", usedefault=True), - environ=dict(nohash=True, usedefault=True), - fwhm=dict(argstr="--fwhm %.4f", xor=["smooth_iters"]), - hemi=dict(argstr="--hemi %s", mandatory=True), - in_file=dict(argstr="--sval %s", extensions=None, mandatory=True), - out_file=dict(argstr="--tval %s", extensions=None, genfile=True), - reshape=dict(argstr="--reshape"), - smooth_iters=dict(argstr="--smooth %d", xor=["fwhm"]), - subject_id=dict(argstr="--s %s", mandatory=True), + args=dict( + argstr="%s", + ), + cortex=dict( + argstr="--cortex", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm=dict( + argstr="--fwhm %.4f", + xor=["smooth_iters"], + ), + hemi=dict( + argstr="--hemi %s", + mandatory=True, + ), + in_file=dict( + argstr="--sval %s", + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="--tval %s", + extensions=None, + genfile=True, + ), + reshape=dict( + argstr="--reshape", + ), + smooth_iters=dict( + argstr="--smooth %d", + xor=["fwhm"], + ), + subject_id=dict( + argstr="--s %s", + mandatory=True, + ), subjects_dir=dict(), ) inputs = SurfaceSmooth.input_spec() @@ -24,7 +54,11 @@ def test_SurfaceSmooth_inputs(): def test_SurfaceSmooth_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SurfaceSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py index 8035c97669..c778bcc959 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py @@ -4,49 +4,127 @@ def test_SurfaceSnapshots_inputs(): input_map = dict( - annot_file=dict(argstr="-annotation %s", extensions=None, xor=["annot_name"]), - annot_name=dict(argstr="-annotation %s", xor=["annot_file"]), - args=dict(argstr="%s"), - colortable=dict(argstr="-colortable %s", extensions=None), - demean_overlay=dict(argstr="-zm"), - environ=dict(nohash=True, usedefault=True), - hemi=dict(argstr="%s", mandatory=True, position=2), + annot_file=dict( + argstr="-annotation %s", + extensions=None, + xor=["annot_name"], + ), + annot_name=dict( + argstr="-annotation %s", + xor=["annot_file"], + ), + args=dict( + argstr="%s", + ), + colortable=dict( + argstr="-colortable %s", + extensions=None, + ), + demean_overlay=dict( + argstr="-zm", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemi=dict( + argstr="%s", + mandatory=True, + position=2, + ), identity_reg=dict( argstr="-overlay-reg-identity", xor=["overlay_reg", "identity_reg", "mni152_reg"], ), - invert_overlay=dict(argstr="-invphaseflag 1"), - label_file=dict(argstr="-label %s", extensions=None, xor=["label_name"]), - label_name=dict(argstr="-label %s", xor=["label_file"]), - label_outline=dict(argstr="-label-outline"), - label_under=dict(argstr="-labels-under"), + invert_overlay=dict( + argstr="-invphaseflag 1", + ), + label_file=dict( + argstr="-label %s", + extensions=None, + xor=["label_name"], + ), + label_name=dict( + argstr="-label %s", + xor=["label_file"], + ), + label_outline=dict( + argstr="-label-outline", + ), + label_under=dict( + argstr="-labels-under", + ), mni152_reg=dict( - argstr="-mni152reg", xor=["overlay_reg", "identity_reg", "mni152_reg"] + argstr="-mni152reg", + xor=["overlay_reg", "identity_reg", "mni152_reg"], + ), + orig_suffix=dict( + argstr="-orig %s", + ), + overlay=dict( + argstr="-overlay %s", + extensions=None, + requires=["overlay_range"], + ), + overlay_range=dict( + argstr="%s", + ), + overlay_range_offset=dict( + argstr="-foffset %.3f", ), - orig_suffix=dict(argstr="-orig %s"), - overlay=dict(argstr="-overlay %s", extensions=None, requires=["overlay_range"]), - overlay_range=dict(argstr="%s"), - overlay_range_offset=dict(argstr="-foffset %.3f"), overlay_reg=dict( argstr="-overlay-reg %s", extensions=None, xor=["overlay_reg", "identity_reg", "mni152_reg"], ), - patch_file=dict(argstr="-patch %s", extensions=None), - reverse_overlay=dict(argstr="-revphaseflag 1"), + patch_file=dict( + argstr="-patch %s", + extensions=None, + ), + reverse_overlay=dict( + argstr="-revphaseflag 1", + ), screenshot_stem=dict(), - show_color_scale=dict(argstr="-colscalebarflag 1"), - show_color_text=dict(argstr="-colscaletext 1"), - show_curv=dict(argstr="-curv", xor=["show_gray_curv"]), - show_gray_curv=dict(argstr="-gray", xor=["show_curv"]), + show_color_scale=dict( + argstr="-colscalebarflag 1", + ), + show_color_text=dict( + argstr="-colscaletext 1", + ), + show_curv=dict( + argstr="-curv", + xor=["show_gray_curv"], + ), + show_gray_curv=dict( + argstr="-gray", + xor=["show_curv"], + ), six_images=dict(), - sphere_suffix=dict(argstr="-sphere %s"), - stem_template_args=dict(requires=["screenshot_stem"]), - subject_id=dict(argstr="%s", mandatory=True, position=1), + sphere_suffix=dict( + argstr="-sphere %s", + ), + stem_template_args=dict( + requires=["screenshot_stem"], + ), + subject_id=dict( + argstr="%s", + mandatory=True, + position=1, + ), subjects_dir=dict(), - surface=dict(argstr="%s", mandatory=True, position=3), - tcl_script=dict(argstr="%s", extensions=None, genfile=True), - truncate_overlay=dict(argstr="-truncphaseflag 1"), + surface=dict( + argstr="%s", + mandatory=True, + position=3, + ), + tcl_script=dict( + argstr="%s", + extensions=None, + genfile=True, + ), + truncate_overlay=dict( + argstr="-truncphaseflag 1", + ), ) inputs = SurfaceSnapshots.input_spec() @@ -56,7 +134,9 @@ def test_SurfaceSnapshots_inputs(): def test_SurfaceSnapshots_outputs(): - output_map = dict(snapshots=dict()) + output_map = dict( + snapshots=dict(), + ) outputs = SurfaceSnapshots.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py index 3827bb161a..88923befd4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py @@ -4,12 +4,28 @@ def test_SurfaceTransform_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - hemi=dict(argstr="--hemi %s", mandatory=True), - out_file=dict(argstr="--tval %s", extensions=None, genfile=True), - reshape=dict(argstr="--reshape"), - reshape_factor=dict(argstr="--reshape-factor"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemi=dict( + argstr="--hemi %s", + mandatory=True, + ), + out_file=dict( + argstr="--tval %s", + extensions=None, + genfile=True, + ), + reshape=dict( + argstr="--reshape", + ), + reshape_factor=dict( + argstr="--reshape-factor", + ), source_annot_file=dict( argstr="--sval-annot %s", extensions=None, @@ -22,12 +38,25 @@ def test_SurfaceTransform_inputs(): mandatory=True, xor=["source_annot_file"], ), - source_subject=dict(argstr="--srcsubject %s", mandatory=True), - source_type=dict(argstr="--sfmt %s", requires=["source_file"]), + source_subject=dict( + argstr="--srcsubject %s", + mandatory=True, + ), + source_type=dict( + argstr="--sfmt %s", + requires=["source_file"], + ), subjects_dir=dict(), - target_ico_order=dict(argstr="--trgicoorder %d"), - target_subject=dict(argstr="--trgsubject %s", mandatory=True), - target_type=dict(argstr="--tfmt %s"), + target_ico_order=dict( + argstr="--trgicoorder %d", + ), + target_subject=dict( + argstr="--trgsubject %s", + mandatory=True, + ), + target_type=dict( + argstr="--tfmt %s", + ), ) inputs = SurfaceTransform.input_spec() @@ -37,7 +66,11 @@ def test_SurfaceTransform_inputs(): def test_SurfaceTransform_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SurfaceTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py index c30574d897..7d52c994bc 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py @@ -4,16 +4,50 @@ def test_SynthesizeFLASH_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fixed_weighting=dict(argstr="-w", position=1), - flip_angle=dict(argstr="%.2f", mandatory=True, position=3), - out_file=dict(argstr="%s", extensions=None, genfile=True), - pd_image=dict(argstr="%s", extensions=None, mandatory=True, position=6), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_weighting=dict( + argstr="-w", + position=1, + ), + flip_angle=dict( + argstr="%.2f", + mandatory=True, + position=3, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + ), + pd_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=6, + ), subjects_dir=dict(), - t1_image=dict(argstr="%s", extensions=None, mandatory=True, position=5), - te=dict(argstr="%.3f", mandatory=True, position=4), - tr=dict(argstr="%.2f", mandatory=True, position=2), + t1_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=5, + ), + te=dict( + argstr="%.3f", + mandatory=True, + position=4, + ), + tr=dict( + argstr="%.2f", + mandatory=True, + position=2, + ), ) inputs = SynthesizeFLASH.input_spec() @@ -23,7 +57,11 @@ def test_SynthesizeFLASH_inputs(): def test_SynthesizeFLASH_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SynthesizeFLASH.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py index 8266edd299..384f44edd2 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py @@ -4,11 +4,26 @@ def test_TalairachAVI_inputs(): input_map = dict( - args=dict(argstr="%s"), - atlas=dict(argstr="--atlas %s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="--i %s", extensions=None, mandatory=True), - out_file=dict(argstr="--xfm %s", extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + atlas=dict( + argstr="--atlas %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="--i %s", + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="--xfm %s", + extensions=None, + mandatory=True, + ), subjects_dir=dict(), ) inputs = TalairachAVI.input_spec() @@ -20,9 +35,15 @@ def test_TalairachAVI_inputs(): def test_TalairachAVI_outputs(): output_map = dict( - out_file=dict(extensions=None), - out_log=dict(extensions=None), - out_txt=dict(extensions=None), + out_file=dict( + extensions=None, + ), + out_log=dict( + extensions=None, + ), + out_txt=dict( + extensions=None, + ), ) outputs = TalairachAVI.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py index 3384f9c27f..c6536186aa 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py @@ -4,9 +4,19 @@ def test_TalairachQC_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - log_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + log_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), subjects_dir=dict(), ) inputs = TalairachQC.input_spec() @@ -17,7 +27,12 @@ def test_TalairachQC_inputs(): def test_TalairachQC_outputs(): - output_map = dict(log_file=dict(extensions=None, usedefault=True)) + output_map = dict( + log_file=dict( + extensions=None, + usedefault=True, + ), + ) outputs = TalairachQC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py index 4be76d5396..31cdedb679 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py @@ -4,27 +4,76 @@ def test_Tkregister2_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fsl_in_matrix=dict(argstr="--fsl %s", extensions=None), - fsl_out=dict(argstr="--fslregout %s"), - fstal=dict(argstr="--fstal", xor=["target_image", "moving_image", "reg_file"]), - fstarg=dict(argstr="--fstarg", xor=["target_image"]), - invert_lta_in=dict(requires=["lta_in"]), - invert_lta_out=dict(argstr="--ltaout-inv", requires=["lta_in"]), - lta_in=dict(argstr="--lta %s", extensions=None), - lta_out=dict(argstr="--ltaout %s"), - moving_image=dict(argstr="--mov %s", extensions=None, mandatory=True), - movscale=dict(argstr="--movscale %f"), - noedit=dict(argstr="--noedit", usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fsl_in_matrix=dict( + argstr="--fsl %s", + extensions=None, + ), + fsl_out=dict( + argstr="--fslregout %s", + ), + fstal=dict( + argstr="--fstal", + xor=["target_image", "moving_image", "reg_file"], + ), + fstarg=dict( + argstr="--fstarg", + xor=["target_image"], + ), + invert_lta_in=dict( + requires=["lta_in"], + ), + invert_lta_out=dict( + argstr="--ltaout-inv", + requires=["lta_in"], + ), + lta_in=dict( + argstr="--lta %s", + extensions=None, + ), + lta_out=dict( + argstr="--ltaout %s", + ), + moving_image=dict( + argstr="--mov %s", + extensions=None, + mandatory=True, + ), + movscale=dict( + argstr="--movscale %f", + ), + noedit=dict( + argstr="--noedit", + usedefault=True, + ), reg_file=dict( - argstr="--reg %s", extensions=None, mandatory=True, usedefault=True + argstr="--reg %s", + extensions=None, + mandatory=True, + usedefault=True, + ), + reg_header=dict( + argstr="--regheader", + ), + subject_id=dict( + argstr="--s %s", ), - reg_header=dict(argstr="--regheader"), - subject_id=dict(argstr="--s %s"), subjects_dir=dict(), - target_image=dict(argstr="--targ %s", extensions=None, xor=["fstarg"]), - xfm=dict(argstr="--xfm %s", extensions=None), + target_image=dict( + argstr="--targ %s", + extensions=None, + xor=["fstarg"], + ), + xfm=dict( + argstr="--xfm %s", + extensions=None, + ), ) inputs = Tkregister2.input_spec() @@ -35,9 +84,15 @@ def test_Tkregister2_inputs(): def test_Tkregister2_outputs(): output_map = dict( - fsl_file=dict(extensions=None), - lta_file=dict(extensions=None), - reg_file=dict(extensions=None), + fsl_file=dict( + extensions=None, + ), + lta_file=dict( + extensions=None, + ), + reg_file=dict( + extensions=None, + ), ) outputs = Tkregister2.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py index b04e8c00ed..b6b27e67b8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py @@ -4,33 +4,57 @@ def test_UnpackSDICOMDir_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), config=dict( argstr="-cfg %s", extensions=None, mandatory=True, xor=("run_info", "config", "seq_config"), ), - dir_structure=dict(argstr="-%s"), - environ=dict(nohash=True, usedefault=True), - log_file=dict(argstr="-log %s", extensions=None), - no_info_dump=dict(argstr="-noinfodump"), - no_unpack_err=dict(argstr="-no-unpackerr"), - output_dir=dict(argstr="-targ %s"), + dir_structure=dict( + argstr="-%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + log_file=dict( + argstr="-log %s", + extensions=None, + ), + no_info_dump=dict( + argstr="-noinfodump", + ), + no_unpack_err=dict( + argstr="-no-unpackerr", + ), + output_dir=dict( + argstr="-targ %s", + ), run_info=dict( argstr="-run %d %s %s %s", mandatory=True, xor=("run_info", "config", "seq_config"), ), - scan_only=dict(argstr="-scanonly %s", extensions=None), + scan_only=dict( + argstr="-scanonly %s", + extensions=None, + ), seq_config=dict( argstr="-seqcfg %s", extensions=None, mandatory=True, xor=("run_info", "config", "seq_config"), ), - source_dir=dict(argstr="-src %s", mandatory=True), - spm_zeropad=dict(argstr="-nspmzeropad %d"), + source_dir=dict( + argstr="-src %s", + mandatory=True, + ), + spm_zeropad=dict( + argstr="-nspmzeropad %d", + ), subjects_dir=dict(), ) inputs = UnpackSDICOMDir.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py index c3cf3788bc..152f03eaa8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py @@ -4,21 +4,64 @@ def test_VolumeMask_inputs(): input_map = dict( - args=dict(argstr="%s"), - aseg=dict(extensions=None, xor=["in_aseg"]), + args=dict( + argstr="%s", + ), + aseg=dict( + extensions=None, + xor=["in_aseg"], + ), copy_inputs=dict(), - environ=dict(nohash=True, usedefault=True), - in_aseg=dict(argstr="--aseg_name %s", extensions=None, xor=["aseg"]), - left_ribbonlabel=dict(argstr="--label_left_ribbon %d", mandatory=True), - left_whitelabel=dict(argstr="--label_left_white %d", mandatory=True), - lh_pial=dict(extensions=None, mandatory=True), - lh_white=dict(extensions=None, mandatory=True), - rh_pial=dict(extensions=None, mandatory=True), - rh_white=dict(extensions=None, mandatory=True), - right_ribbonlabel=dict(argstr="--label_right_ribbon %d", mandatory=True), - right_whitelabel=dict(argstr="--label_right_white %d", mandatory=True), - save_ribbon=dict(argstr="--save_ribbon"), - subject_id=dict(argstr="%s", mandatory=True, position=-1, usedefault=True), + environ=dict( + nohash=True, + usedefault=True, + ), + in_aseg=dict( + argstr="--aseg_name %s", + extensions=None, + xor=["aseg"], + ), + left_ribbonlabel=dict( + argstr="--label_left_ribbon %d", + mandatory=True, + ), + left_whitelabel=dict( + argstr="--label_left_white %d", + mandatory=True, + ), + lh_pial=dict( + extensions=None, + mandatory=True, + ), + lh_white=dict( + extensions=None, + mandatory=True, + ), + rh_pial=dict( + extensions=None, + mandatory=True, + ), + rh_white=dict( + extensions=None, + mandatory=True, + ), + right_ribbonlabel=dict( + argstr="--label_right_ribbon %d", + mandatory=True, + ), + right_whitelabel=dict( + argstr="--label_right_white %d", + mandatory=True, + ), + save_ribbon=dict( + argstr="--save_ribbon", + ), + subject_id=dict( + argstr="%s", + mandatory=True, + position=-1, + usedefault=True, + ), subjects_dir=dict(), ) inputs = VolumeMask.input_spec() @@ -30,9 +73,15 @@ def test_VolumeMask_inputs(): def test_VolumeMask_outputs(): output_map = dict( - lh_ribbon=dict(extensions=None), - out_ribbon=dict(extensions=None), - rh_ribbon=dict(extensions=None), + lh_ribbon=dict( + extensions=None, + ), + out_ribbon=dict( + extensions=None, + ), + rh_ribbon=dict( + extensions=None, + ), ) outputs = VolumeMask.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py index 656b26f869..5e8609c4c6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py @@ -4,16 +4,40 @@ def test_WatershedSkullStrip_inputs(): input_map = dict( - args=dict(argstr="%s"), - brain_atlas=dict(argstr="-brain_atlas %s", extensions=None, position=-4), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + args=dict( + argstr="%s", + ), + brain_atlas=dict( + argstr="-brain_atlas %s", + extensions=None, + position=-4, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, ), subjects_dir=dict(), - t1=dict(argstr="-T1"), - transform=dict(argstr="%s", extensions=None, position=-3), + t1=dict( + argstr="-T1", + ), + transform=dict( + argstr="%s", + extensions=None, + position=-3, + ), ) inputs = WatershedSkullStrip.input_spec() @@ -23,7 +47,11 @@ def test_WatershedSkullStrip_inputs(): def test_WatershedSkullStrip_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = WatershedSkullStrip.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_AR1Image.py b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py index 3f16d7235d..0f4cfc2bcc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AR1Image.py +++ b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py @@ -4,16 +4,43 @@ def test_AR1Image_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="-%sar1", position=4, usedefault=True), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%sar1", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = AR1Image.input_spec() @@ -24,7 +51,11 @@ def test_AR1Image_inputs(): def test_AR1Image_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = AR1Image.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py index c84d3694ce..359a076f2d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py +++ b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py @@ -4,11 +4,30 @@ def test_AccuracyTester_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - mel_icas=dict(argstr="%s", copyfile=False, mandatory=True, position=3), - output_directory=dict(argstr="%s", mandatory=True, position=2), - trained_wts_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + mel_icas=dict( + argstr="%s", + copyfile=False, + mandatory=True, + position=3, + ), + output_directory=dict( + argstr="%s", + mandatory=True, + position=2, + ), + trained_wts_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), ) inputs = AccuracyTester.input_spec() @@ -18,7 +37,12 @@ def test_AccuracyTester_inputs(): def test_AccuracyTester_outputs(): - output_map = dict(output_directory=dict(argstr="%s", position=1)) + output_map = dict( + output_directory=dict( + argstr="%s", + position=1, + ), + ) outputs = AccuracyTester.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py index afcb8e7267..f38990e572 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py @@ -4,16 +4,44 @@ def test_ApplyMask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - mask_file=dict(argstr="-mas %s", extensions=None, mandatory=True, position=4), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + mask_file=dict( + argstr="-mas %s", + extensions=None, + mandatory=True, + position=4, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = ApplyMask.input_spec() @@ -24,7 +52,11 @@ def test_ApplyMask_inputs(): def test_ApplyMask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ApplyMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py index e346363525..4c5bcc13a7 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py @@ -4,12 +4,30 @@ def test_ApplyTOPUP_inputs(): input_map = dict( - args=dict(argstr="%s"), - datatype=dict(argstr="-d=%s"), - encoding_file=dict(argstr="--datain=%s", extensions=None, mandatory=True), - environ=dict(nohash=True, usedefault=True), - in_files=dict(argstr="--imain=%s", mandatory=True, sep=","), - in_index=dict(argstr="--inindex=%s", sep=","), + args=dict( + argstr="%s", + ), + datatype=dict( + argstr="-d=%s", + ), + encoding_file=dict( + argstr="--datain=%s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="--imain=%s", + mandatory=True, + sep=",", + ), + in_index=dict( + argstr="--inindex=%s", + sep=",", + ), in_topup_fieldcoef=dict( argstr="--topup=%s", copyfile=False, @@ -17,10 +35,16 @@ def test_ApplyTOPUP_inputs(): requires=["in_topup_movpar"], ), in_topup_movpar=dict( - copyfile=False, extensions=None, requires=["in_topup_fieldcoef"] + copyfile=False, + extensions=None, + requires=["in_topup_fieldcoef"], + ), + interp=dict( + argstr="--interp=%s", + ), + method=dict( + argstr="--method=%s", ), - interp=dict(argstr="--interp=%s"), - method=dict(argstr="--method=%s"), out_corrected=dict( argstr="--out=%s", extensions=None, @@ -37,7 +61,11 @@ def test_ApplyTOPUP_inputs(): def test_ApplyTOPUP_outputs(): - output_map = dict(out_corrected=dict(extensions=None)) + output_map = dict( + out_corrected=dict( + extensions=None, + ), + ) outputs = ApplyTOPUP.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py index ec6d5fe07a..1eaf3eb2e7 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py @@ -4,14 +4,38 @@ def test_ApplyWarp_inputs(): input_map = dict( - abswarp=dict(argstr="--abs", xor=["relwarp"]), - args=dict(argstr="%s"), - datatype=dict(argstr="--datatype=%s"), - environ=dict(nohash=True, usedefault=True), - field_file=dict(argstr="--warp=%s", extensions=None), - in_file=dict(argstr="--in=%s", extensions=None, mandatory=True, position=0), - interp=dict(argstr="--interp=%s", position=-2), - mask_file=dict(argstr="--mask=%s", extensions=None), + abswarp=dict( + argstr="--abs", + xor=["relwarp"], + ), + args=dict( + argstr="%s", + ), + datatype=dict( + argstr="--datatype=%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + field_file=dict( + argstr="--warp=%s", + extensions=None, + ), + in_file=dict( + argstr="--in=%s", + extensions=None, + mandatory=True, + position=0, + ), + interp=dict( + argstr="--interp=%s", + position=-2, + ), + mask_file=dict( + argstr="--mask=%s", + extensions=None, + ), out_file=dict( argstr="--out=%s", extensions=None, @@ -20,12 +44,31 @@ def test_ApplyWarp_inputs(): position=2, ), output_type=dict(), - postmat=dict(argstr="--postmat=%s", extensions=None), - premat=dict(argstr="--premat=%s", extensions=None), - ref_file=dict(argstr="--ref=%s", extensions=None, mandatory=True, position=1), - relwarp=dict(argstr="--rel", position=-1, xor=["abswarp"]), - superlevel=dict(argstr="--superlevel=%s"), - supersample=dict(argstr="--super"), + postmat=dict( + argstr="--postmat=%s", + extensions=None, + ), + premat=dict( + argstr="--premat=%s", + extensions=None, + ), + ref_file=dict( + argstr="--ref=%s", + extensions=None, + mandatory=True, + position=1, + ), + relwarp=dict( + argstr="--rel", + position=-1, + xor=["abswarp"], + ), + superlevel=dict( + argstr="--superlevel=%s", + ), + supersample=dict( + argstr="--super", + ), ) inputs = ApplyWarp.input_spec() @@ -35,7 +78,11 @@ def test_ApplyWarp_inputs(): def test_ApplyWarp_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ApplyWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py index 0a904031fd..0fc914cdde 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py @@ -4,35 +4,111 @@ def test_ApplyXFM_inputs(): input_map = dict( - angle_rep=dict(argstr="-anglerep %s"), - apply_isoxfm=dict(argstr="-applyisoxfm %f", xor=["apply_xfm"]), - apply_xfm=dict(argstr="-applyxfm", usedefault=True), - args=dict(argstr="%s"), - bbrslope=dict(argstr="-bbrslope %f", min_ver="5.0.0"), - bbrtype=dict(argstr="-bbrtype %s", min_ver="5.0.0"), - bgvalue=dict(argstr="-setbackground %f"), - bins=dict(argstr="-bins %d"), - coarse_search=dict(argstr="-coarsesearch %d", units="degrees"), - cost=dict(argstr="-cost %s"), - cost_func=dict(argstr="-searchcost %s"), - datatype=dict(argstr="-datatype %s"), - display_init=dict(argstr="-displayinit"), - dof=dict(argstr="-dof %d"), - echospacing=dict(argstr="-echospacing %f", min_ver="5.0.0"), - environ=dict(nohash=True, usedefault=True), - fieldmap=dict(argstr="-fieldmap %s", extensions=None, min_ver="5.0.0"), - fieldmapmask=dict(argstr="-fieldmapmask %s", extensions=None, min_ver="5.0.0"), - fine_search=dict(argstr="-finesearch %d", units="degrees"), - force_scaling=dict(argstr="-forcescaling"), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=0), - in_matrix_file=dict(argstr="-init %s", extensions=None), - in_weight=dict(argstr="-inweight %s", extensions=None), - interp=dict(argstr="-interp %s"), - min_sampling=dict(argstr="-minsampling %f", units="mm"), - no_clamp=dict(argstr="-noclamp"), - no_resample=dict(argstr="-noresample"), - no_resample_blur=dict(argstr="-noresampblur"), - no_search=dict(argstr="-nosearch"), + angle_rep=dict( + argstr="-anglerep %s", + ), + apply_isoxfm=dict( + argstr="-applyisoxfm %f", + xor=["apply_xfm"], + ), + apply_xfm=dict( + argstr="-applyxfm", + usedefault=True, + ), + args=dict( + argstr="%s", + ), + bbrslope=dict( + argstr="-bbrslope %f", + min_ver="5.0.0", + ), + bbrtype=dict( + argstr="-bbrtype %s", + min_ver="5.0.0", + ), + bgvalue=dict( + argstr="-setbackground %f", + ), + bins=dict( + argstr="-bins %d", + ), + coarse_search=dict( + argstr="-coarsesearch %d", + units="degrees", + ), + cost=dict( + argstr="-cost %s", + ), + cost_func=dict( + argstr="-searchcost %s", + ), + datatype=dict( + argstr="-datatype %s", + ), + display_init=dict( + argstr="-displayinit", + ), + dof=dict( + argstr="-dof %d", + ), + echospacing=dict( + argstr="-echospacing %f", + min_ver="5.0.0", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fieldmap=dict( + argstr="-fieldmap %s", + extensions=None, + min_ver="5.0.0", + ), + fieldmapmask=dict( + argstr="-fieldmapmask %s", + extensions=None, + min_ver="5.0.0", + ), + fine_search=dict( + argstr="-finesearch %d", + units="degrees", + ), + force_scaling=dict( + argstr="-forcescaling", + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + position=0, + ), + in_matrix_file=dict( + argstr="-init %s", + extensions=None, + ), + in_weight=dict( + argstr="-inweight %s", + extensions=None, + ), + interp=dict( + argstr="-interp %s", + ), + min_sampling=dict( + argstr="-minsampling %f", + units="mm", + ), + no_clamp=dict( + argstr="-noclamp", + ), + no_resample=dict( + argstr="-noresample", + ), + no_resample_blur=dict( + argstr="-noresampblur", + ), + no_search=dict( + argstr="-nosearch", + ), out_file=dict( argstr="-out %s", extensions=None, @@ -58,23 +134,72 @@ def test_ApplyXFM_inputs(): position=3, ), output_type=dict(), - padding_size=dict(argstr="-paddingsize %d", units="voxels"), - pedir=dict(argstr="-pedir %d", min_ver="5.0.0"), - ref_weight=dict(argstr="-refweight %s", extensions=None), - reference=dict(argstr="-ref %s", extensions=None, mandatory=True, position=1), - rigid2D=dict(argstr="-2D"), + padding_size=dict( + argstr="-paddingsize %d", + units="voxels", + ), + pedir=dict( + argstr="-pedir %d", + min_ver="5.0.0", + ), + ref_weight=dict( + argstr="-refweight %s", + extensions=None, + ), + reference=dict( + argstr="-ref %s", + extensions=None, + mandatory=True, + position=1, + ), + rigid2D=dict( + argstr="-2D", + ), save_log=dict(), - schedule=dict(argstr="-schedule %s", extensions=None), - searchr_x=dict(argstr="-searchrx %s", units="degrees"), - searchr_y=dict(argstr="-searchry %s", units="degrees"), - searchr_z=dict(argstr="-searchrz %s", units="degrees"), - sinc_width=dict(argstr="-sincwidth %d", units="voxels"), - sinc_window=dict(argstr="-sincwindow %s"), - uses_qform=dict(argstr="-usesqform"), - verbose=dict(argstr="-verbose %d"), - wm_seg=dict(argstr="-wmseg %s", extensions=None, min_ver="5.0.0"), - wmcoords=dict(argstr="-wmcoords %s", extensions=None, min_ver="5.0.0"), - wmnorms=dict(argstr="-wmnorms %s", extensions=None, min_ver="5.0.0"), + schedule=dict( + argstr="-schedule %s", + extensions=None, + ), + searchr_x=dict( + argstr="-searchrx %s", + units="degrees", + ), + searchr_y=dict( + argstr="-searchry %s", + units="degrees", + ), + searchr_z=dict( + argstr="-searchrz %s", + units="degrees", + ), + sinc_width=dict( + argstr="-sincwidth %d", + units="voxels", + ), + sinc_window=dict( + argstr="-sincwindow %s", + ), + uses_qform=dict( + argstr="-usesqform", + ), + verbose=dict( + argstr="-verbose %d", + ), + wm_seg=dict( + argstr="-wmseg %s", + extensions=None, + min_ver="5.0.0", + ), + wmcoords=dict( + argstr="-wmcoords %s", + extensions=None, + min_ver="5.0.0", + ), + wmnorms=dict( + argstr="-wmnorms %s", + extensions=None, + min_ver="5.0.0", + ), ) inputs = ApplyXFM.input_spec() @@ -85,9 +210,15 @@ def test_ApplyXFM_inputs(): def test_ApplyXFM_outputs(): output_map = dict( - out_file=dict(extensions=None), - out_log=dict(extensions=None), - out_matrix_file=dict(extensions=None), + out_file=dict( + extensions=None, + ), + out_log=dict( + extensions=None, + ), + out_matrix_file=dict( + extensions=None, + ), ) outputs = ApplyXFM.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_AvScale.py b/nipype/interfaces/fsl/tests/test_auto_AvScale.py index 32ed206773..4748d6a9bc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AvScale.py +++ b/nipype/interfaces/fsl/tests/test_auto_AvScale.py @@ -4,11 +4,26 @@ def test_AvScale_inputs(): input_map = dict( - all_param=dict(argstr="--allparams"), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - mat_file=dict(argstr="%s", extensions=None, position=-2), - ref_file=dict(argstr="%s", extensions=None, position=-1), + all_param=dict( + argstr="--allparams", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + mat_file=dict( + argstr="%s", + extensions=None, + position=-2, + ), + ref_file=dict( + argstr="%s", + extensions=None, + position=-1, + ), ) inputs = AvScale.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_B0Calc.py b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py index 07a086d23a..01727179ff 100644 --- a/nipype/interfaces/fsl/tests/test_auto_B0Calc.py +++ b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py @@ -4,14 +4,39 @@ def test_B0Calc_inputs(): input_map = dict( - args=dict(argstr="%s"), - chi_air=dict(argstr="--chi0=%e", usedefault=True), - compute_xyz=dict(argstr="--xyz", usedefault=True), - delta=dict(argstr="-d %e", usedefault=True), - directconv=dict(argstr="--directconv", usedefault=True), - environ=dict(nohash=True, usedefault=True), - extendboundary=dict(argstr="--extendboundary=%0.2f", usedefault=True), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0), + args=dict( + argstr="%s", + ), + chi_air=dict( + argstr="--chi0=%e", + usedefault=True, + ), + compute_xyz=dict( + argstr="--xyz", + usedefault=True, + ), + delta=dict( + argstr="-d %e", + usedefault=True, + ), + directconv=dict( + argstr="--directconv", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + extendboundary=dict( + argstr="--extendboundary=%0.2f", + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=0, + ), out_file=dict( argstr="-o %s", extensions=None, @@ -21,15 +46,37 @@ def test_B0Calc_inputs(): position=1, ), output_type=dict(), - x_b0=dict(argstr="--b0x=%0.2f", usedefault=True, xor=["xyz_b0"]), - x_grad=dict(argstr="--gx=%0.4f", usedefault=True), + x_b0=dict( + argstr="--b0x=%0.2f", + usedefault=True, + xor=["xyz_b0"], + ), + x_grad=dict( + argstr="--gx=%0.4f", + usedefault=True, + ), xyz_b0=dict( - argstr="--b0x=%0.2f --b0y=%0.2f --b0=%0.2f", xor=["x_b0", "y_b0", "z_b0"] + argstr="--b0x=%0.2f --b0y=%0.2f --b0=%0.2f", + xor=["x_b0", "y_b0", "z_b0"], + ), + y_b0=dict( + argstr="--b0y=%0.2f", + usedefault=True, + xor=["xyz_b0"], + ), + y_grad=dict( + argstr="--gy=%0.4f", + usedefault=True, + ), + z_b0=dict( + argstr="--b0=%0.2f", + usedefault=True, + xor=["xyz_b0"], + ), + z_grad=dict( + argstr="--gz=%0.4f", + usedefault=True, ), - y_b0=dict(argstr="--b0y=%0.2f", usedefault=True, xor=["xyz_b0"]), - y_grad=dict(argstr="--gy=%0.4f", usedefault=True), - z_b0=dict(argstr="--b0=%0.2f", usedefault=True, xor=["xyz_b0"]), - z_grad=dict(argstr="--gz=%0.4f", usedefault=True), ) inputs = B0Calc.input_spec() @@ -39,7 +86,11 @@ def test_B0Calc_inputs(): def test_B0Calc_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = B0Calc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py index 7b7b0680f1..2424307165 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py +++ b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py @@ -4,37 +4,114 @@ def test_BEDPOSTX5_inputs(): input_map = dict( - all_ard=dict(argstr="--allard", xor=("no_ard", "all_ard")), - args=dict(argstr="%s"), - burn_in=dict(argstr="-b %d", usedefault=True), - burn_in_no_ard=dict(argstr="--burnin_noard=%d", usedefault=True), - bvals=dict(extensions=None, mandatory=True), - bvecs=dict(extensions=None, mandatory=True), - cnlinear=dict(argstr="--cnonlinear", xor=("no_spat", "non_linear", "cnlinear")), - dwi=dict(extensions=None, mandatory=True), - environ=dict(nohash=True, usedefault=True), - f0_ard=dict(argstr="--f0 --ardf0", xor=["f0_noard", "f0_ard", "all_ard"]), - f0_noard=dict(argstr="--f0", xor=["f0_noard", "f0_ard"]), - force_dir=dict(argstr="--forcedir", usedefault=True), - fudge=dict(argstr="-w %d"), - grad_dev=dict(extensions=None), - gradnonlin=dict(argstr="-g"), - logdir=dict(argstr="--logdir=%s"), - mask=dict(extensions=None, mandatory=True), - model=dict(argstr="-model %d"), - n_fibres=dict(argstr="-n %d", mandatory=True, usedefault=True), - n_jumps=dict(argstr="-j %d", usedefault=True), - no_ard=dict(argstr="--noard", xor=("no_ard", "all_ard")), - no_spat=dict(argstr="--nospat", xor=("no_spat", "non_linear", "cnlinear")), + all_ard=dict( + argstr="--allard", + xor=("no_ard", "all_ard"), + ), + args=dict( + argstr="%s", + ), + burn_in=dict( + argstr="-b %d", + usedefault=True, + ), + burn_in_no_ard=dict( + argstr="--burnin_noard=%d", + usedefault=True, + ), + bvals=dict( + extensions=None, + mandatory=True, + ), + bvecs=dict( + extensions=None, + mandatory=True, + ), + cnlinear=dict( + argstr="--cnonlinear", + xor=("no_spat", "non_linear", "cnlinear"), + ), + dwi=dict( + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + f0_ard=dict( + argstr="--f0 --ardf0", + xor=["f0_noard", "f0_ard", "all_ard"], + ), + f0_noard=dict( + argstr="--f0", + xor=["f0_noard", "f0_ard"], + ), + force_dir=dict( + argstr="--forcedir", + usedefault=True, + ), + fudge=dict( + argstr="-w %d", + ), + grad_dev=dict( + extensions=None, + ), + gradnonlin=dict( + argstr="-g", + ), + logdir=dict( + argstr="--logdir=%s", + ), + mask=dict( + extensions=None, + mandatory=True, + ), + model=dict( + argstr="-model %d", + ), + n_fibres=dict( + argstr="-n %d", + mandatory=True, + usedefault=True, + ), + n_jumps=dict( + argstr="-j %d", + usedefault=True, + ), + no_ard=dict( + argstr="--noard", + xor=("no_ard", "all_ard"), + ), + no_spat=dict( + argstr="--nospat", + xor=("no_spat", "non_linear", "cnlinear"), + ), non_linear=dict( - argstr="--nonlinear", xor=("no_spat", "non_linear", "cnlinear") + argstr="--nonlinear", + xor=("no_spat", "non_linear", "cnlinear"), + ), + out_dir=dict( + argstr="%s", + mandatory=True, + position=1, + usedefault=True, ), - out_dir=dict(argstr="%s", mandatory=True, position=1, usedefault=True), output_type=dict(), - rician=dict(argstr="--rician"), - sample_every=dict(argstr="-s %d", usedefault=True), - seed=dict(argstr="--seed=%d"), - update_proposal_every=dict(argstr="--updateproposalevery=%d", usedefault=True), + rician=dict( + argstr="--rician", + ), + sample_every=dict( + argstr="-s %d", + usedefault=True, + ), + seed=dict( + argstr="--seed=%d", + ), + update_proposal_every=dict( + argstr="--updateproposalevery=%d", + usedefault=True, + ), use_gpu=dict(), ) inputs = BEDPOSTX5.input_spec() @@ -48,8 +125,12 @@ def test_BEDPOSTX5_outputs(): output_map = dict( dyads=dict(), dyads_dispersion=dict(), - mean_S0samples=dict(extensions=None), - mean_dsamples=dict(extensions=None), + mean_S0samples=dict( + extensions=None, + ), + mean_dsamples=dict( + extensions=None, + ), mean_fsamples=dict(), mean_phsamples=dict(), mean_thsamples=dict(), diff --git a/nipype/interfaces/fsl/tests/test_auto_BET.py b/nipype/interfaces/fsl/tests/test_auto_BET.py index d64186249d..82757a10a6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BET.py +++ b/nipype/interfaces/fsl/tests/test_auto_BET.py @@ -4,10 +4,20 @@ def test_BET_inputs(): input_map = dict( - args=dict(argstr="%s"), - center=dict(argstr="-c %s", units="voxels"), - environ=dict(nohash=True, usedefault=True), - frac=dict(argstr="-f %.2f"), + args=dict( + argstr="%s", + ), + center=dict( + argstr="-c %s", + units="voxels", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + frac=dict( + argstr="-f %.2f", + ), functional=dict( argstr="-F", xor=( @@ -21,15 +31,31 @@ def test_BET_inputs(): ), ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=0, + ), + mask=dict( + argstr="-m", + ), + mesh=dict( + argstr="-e", + ), + no_output=dict( + argstr="-n", ), - mask=dict(argstr="-m"), - mesh=dict(argstr="-e"), - no_output=dict(argstr="-n"), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=1 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=1, + ), + outline=dict( + argstr="-o", ), - outline=dict(argstr="-o"), output_type=dict(), padding=dict( argstr="-Z", @@ -43,7 +69,10 @@ def test_BET_inputs(): "t2_guided", ), ), - radius=dict(argstr="-r %d", units="mm"), + radius=dict( + argstr="-r %d", + units="mm", + ), reduce_bias=dict( argstr="-B", xor=( @@ -80,7 +109,9 @@ def test_BET_inputs(): "t2_guided", ), ), - skull=dict(argstr="-s"), + skull=dict( + argstr="-s", + ), surfaces=dict( argstr="-A", xor=( @@ -106,8 +137,12 @@ def test_BET_inputs(): "t2_guided", ), ), - threshold=dict(argstr="-t"), - vertical_gradient=dict(argstr="-g %.2f"), + threshold=dict( + argstr="-t", + ), + vertical_gradient=dict( + argstr="-g %.2f", + ), ) inputs = BET.input_spec() @@ -118,18 +153,42 @@ def test_BET_inputs(): def test_BET_outputs(): output_map = dict( - inskull_mask_file=dict(extensions=None), - inskull_mesh_file=dict(extensions=None), - mask_file=dict(extensions=None), - meshfile=dict(extensions=None), - out_file=dict(extensions=None), - outline_file=dict(extensions=None), - outskin_mask_file=dict(extensions=None), - outskin_mesh_file=dict(extensions=None), - outskull_mask_file=dict(extensions=None), - outskull_mesh_file=dict(extensions=None), - skull_file=dict(extensions=None), - skull_mask_file=dict(extensions=None), + inskull_mask_file=dict( + extensions=None, + ), + inskull_mesh_file=dict( + extensions=None, + ), + mask_file=dict( + extensions=None, + ), + meshfile=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + outline_file=dict( + extensions=None, + ), + outskin_mask_file=dict( + extensions=None, + ), + outskin_mesh_file=dict( + extensions=None, + ), + outskull_mask_file=dict( + extensions=None, + ), + outskull_mesh_file=dict( + extensions=None, + ), + skull_file=dict( + extensions=None, + ), + skull_mask_file=dict( + extensions=None, + ), ) outputs = BET.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py index b0febff205..405cd592f6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py @@ -4,11 +4,27 @@ def test_BinaryMaths_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), operand_file=dict( argstr="%s", extensions=None, @@ -17,13 +33,27 @@ def test_BinaryMaths_inputs(): xor=["operand_value"], ), operand_value=dict( - argstr="%.8f", mandatory=True, position=5, xor=["operand_file"] + argstr="%.8f", + mandatory=True, + position=5, + xor=["operand_file"], + ), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, ), - operation=dict(argstr="-%s", mandatory=True, position=4), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = BinaryMaths.input_spec() @@ -34,7 +64,11 @@ def test_BinaryMaths_inputs(): def test_BinaryMaths_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BinaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py index 8ef6b770cc..61f27be950 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py +++ b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py @@ -4,15 +4,39 @@ def test_ChangeDataType_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + mandatory=True, + position=-1, ), - output_datatype=dict(argstr="-odt %s", mandatory=True, position=-1), output_type=dict(), ) inputs = ChangeDataType.input_spec() @@ -23,7 +47,11 @@ def test_ChangeDataType_inputs(): def test_ChangeDataType_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ChangeDataType.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Classifier.py b/nipype/interfaces/fsl/tests/test_auto_Classifier.py index 3cb4616892..44fde8ae1a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Classifier.py +++ b/nipype/interfaces/fsl/tests/test_auto_Classifier.py @@ -4,13 +4,32 @@ def test_Classifier_inputs(): input_map = dict( - args=dict(argstr="%s"), - artifacts_list_file=dict(extensions=None), - environ=dict(nohash=True, usedefault=True), - mel_ica=dict(argstr="%s", copyfile=False, position=1), - thresh=dict(argstr="%d", mandatory=True, position=-1), + args=dict( + argstr="%s", + ), + artifacts_list_file=dict( + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + mel_ica=dict( + argstr="%s", + copyfile=False, + position=1, + ), + thresh=dict( + argstr="%d", + mandatory=True, + position=-1, + ), trained_wts_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=2 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=2, ), ) inputs = Classifier.input_spec() @@ -21,7 +40,11 @@ def test_Classifier_inputs(): def test_Classifier_outputs(): - output_map = dict(artifacts_list_file=dict(extensions=None)) + output_map = dict( + artifacts_list_file=dict( + extensions=None, + ), + ) outputs = Classifier.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py index d23dd308a7..55e6851603 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py @@ -4,17 +4,47 @@ def test_Cleaner_inputs(): input_map = dict( - aggressive=dict(argstr="-A", position=3), - args=dict(argstr="%s"), + aggressive=dict( + argstr="-A", + position=3, + ), + args=dict( + argstr="%s", + ), artifacts_list_file=dict( - argstr="%s", extensions=None, mandatory=True, position=1 - ), - cleanup_motion=dict(argstr="-m", position=2), - confound_file=dict(argstr="-x %s", extensions=None, position=4), - confound_file_1=dict(argstr="-x %s", extensions=None, position=5), - confound_file_2=dict(argstr="-x %s", extensions=None, position=6), - environ=dict(nohash=True, usedefault=True), - highpass=dict(argstr="-m -h %f", position=2, usedefault=True), + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + cleanup_motion=dict( + argstr="-m", + position=2, + ), + confound_file=dict( + argstr="-x %s", + extensions=None, + position=4, + ), + confound_file_1=dict( + argstr="-x %s", + extensions=None, + position=5, + ), + confound_file_2=dict( + argstr="-x %s", + extensions=None, + position=6, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + highpass=dict( + argstr="-m -h %f", + position=2, + usedefault=True, + ), ) inputs = Cleaner.input_spec() @@ -24,7 +54,11 @@ def test_Cleaner_inputs(): def test_Cleaner_outputs(): - output_map = dict(cleaned_functional_file=dict(extensions=None)) + output_map = dict( + cleaned_functional_file=dict( + extensions=None, + ), + ) outputs = Cleaner.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Cluster.py b/nipype/interfaces/fsl/tests/test_auto_Cluster.py index 3f1dda4725..ba4dfc8ae3 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cluster.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cluster.py @@ -4,34 +4,110 @@ def test_Cluster_inputs(): input_map = dict( - args=dict(argstr="%s"), - connectivity=dict(argstr="--connectivity=%d"), - cope_file=dict(argstr="--cope=%s", extensions=None), - dlh=dict(argstr="--dlh=%.10f"), - environ=dict(nohash=True, usedefault=True), - find_min=dict(argstr="--min", usedefault=True), - fractional=dict(argstr="--fractional", usedefault=True), - in_file=dict(argstr="--in=%s", extensions=None, mandatory=True), - minclustersize=dict(argstr="--minclustersize", usedefault=True), - no_table=dict(argstr="--no_table", usedefault=True), - num_maxima=dict(argstr="--num=%d"), - out_index_file=dict(argstr="--oindex=%s", hash_files=False), - out_localmax_txt_file=dict(argstr="--olmax=%s", hash_files=False), - out_localmax_vol_file=dict(argstr="--olmaxim=%s", hash_files=False), - out_max_file=dict(argstr="--omax=%s", hash_files=False), - out_mean_file=dict(argstr="--omean=%s", hash_files=False), - out_pval_file=dict(argstr="--opvals=%s", hash_files=False), - out_size_file=dict(argstr="--osize=%s", hash_files=False), - out_threshold_file=dict(argstr="--othresh=%s", hash_files=False), + args=dict( + argstr="%s", + ), + connectivity=dict( + argstr="--connectivity=%d", + ), + cope_file=dict( + argstr="--cope=%s", + extensions=None, + ), + dlh=dict( + argstr="--dlh=%.10f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + find_min=dict( + argstr="--min", + usedefault=True, + ), + fractional=dict( + argstr="--fractional", + usedefault=True, + ), + in_file=dict( + argstr="--in=%s", + extensions=None, + mandatory=True, + ), + minclustersize=dict( + argstr="--minclustersize", + usedefault=True, + ), + no_table=dict( + argstr="--no_table", + usedefault=True, + ), + num_maxima=dict( + argstr="--num=%d", + ), + out_index_file=dict( + argstr="--oindex=%s", + hash_files=False, + ), + out_localmax_txt_file=dict( + argstr="--olmax=%s", + hash_files=False, + ), + out_localmax_vol_file=dict( + argstr="--olmaxim=%s", + hash_files=False, + ), + out_max_file=dict( + argstr="--omax=%s", + hash_files=False, + ), + out_mean_file=dict( + argstr="--omean=%s", + hash_files=False, + ), + out_pval_file=dict( + argstr="--opvals=%s", + hash_files=False, + ), + out_size_file=dict( + argstr="--osize=%s", + hash_files=False, + ), + out_threshold_file=dict( + argstr="--othresh=%s", + hash_files=False, + ), output_type=dict(), - peak_distance=dict(argstr="--peakdist=%.10f"), - pthreshold=dict(argstr="--pthresh=%.10f", requires=["dlh", "volume"]), - std_space_file=dict(argstr="--stdvol=%s", extensions=None), - threshold=dict(argstr="--thresh=%.10f", mandatory=True), - use_mm=dict(argstr="--mm", usedefault=True), - volume=dict(argstr="--volume=%d"), - warpfield_file=dict(argstr="--warpvol=%s", extensions=None), - xfm_file=dict(argstr="--xfm=%s", extensions=None), + peak_distance=dict( + argstr="--peakdist=%.10f", + ), + pthreshold=dict( + argstr="--pthresh=%.10f", + requires=["dlh", "volume"], + ), + std_space_file=dict( + argstr="--stdvol=%s", + extensions=None, + ), + threshold=dict( + argstr="--thresh=%.10f", + mandatory=True, + ), + use_mm=dict( + argstr="--mm", + usedefault=True, + ), + volume=dict( + argstr="--volume=%d", + ), + warpfield_file=dict( + argstr="--warpvol=%s", + extensions=None, + ), + xfm_file=dict( + argstr="--xfm=%s", + extensions=None, + ), ) inputs = Cluster.input_spec() @@ -42,14 +118,30 @@ def test_Cluster_inputs(): def test_Cluster_outputs(): output_map = dict( - index_file=dict(extensions=None), - localmax_txt_file=dict(extensions=None), - localmax_vol_file=dict(extensions=None), - max_file=dict(extensions=None), - mean_file=dict(extensions=None), - pval_file=dict(extensions=None), - size_file=dict(extensions=None), - threshold_file=dict(extensions=None), + index_file=dict( + extensions=None, + ), + localmax_txt_file=dict( + extensions=None, + ), + localmax_vol_file=dict( + extensions=None, + ), + max_file=dict( + extensions=None, + ), + mean_file=dict( + extensions=None, + ), + pval_file=dict( + extensions=None, + ), + size_file=dict( + extensions=None, + ), + threshold_file=dict( + extensions=None, + ), ) outputs = Cluster.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Complex.py b/nipype/interfaces/fsl/tests/test_auto_Complex.py index b20e878bb9..80d6f20fe6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Complex.py +++ b/nipype/interfaces/fsl/tests/test_auto_Complex.py @@ -4,7 +4,9 @@ def test_Complex_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), complex_cartesian=dict( argstr="-complex", position=1, @@ -17,8 +19,16 @@ def test_Complex_inputs(): "complex_merge", ], ), - complex_in_file=dict(argstr="%s", extensions=None, position=2), - complex_in_file2=dict(argstr="%s", extensions=None, position=3), + complex_in_file=dict( + argstr="%s", + extensions=None, + position=2, + ), + complex_in_file2=dict( + argstr="%s", + extensions=None, + position=3, + ), complex_merge=dict( argstr="-complexmerge", position=1, @@ -72,9 +82,19 @@ def test_Complex_inputs(): "complex_merge", ], ), - end_vol=dict(argstr="%d", position=-1), - environ=dict(nohash=True, usedefault=True), - imaginary_in_file=dict(argstr="%s", extensions=None, position=3), + end_vol=dict( + argstr="%d", + position=-1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + imaginary_in_file=dict( + argstr="%s", + extensions=None, + position=3, + ), imaginary_out_file=dict( argstr="%s", extensions=None, @@ -91,7 +111,11 @@ def test_Complex_inputs(): "complex_merge", ], ), - magnitude_in_file=dict(argstr="%s", extensions=None, position=2), + magnitude_in_file=dict( + argstr="%s", + extensions=None, + position=2, + ), magnitude_out_file=dict( argstr="%s", extensions=None, @@ -109,7 +133,11 @@ def test_Complex_inputs(): ], ), output_type=dict(), - phase_in_file=dict(argstr="%s", extensions=None, position=3), + phase_in_file=dict( + argstr="%s", + extensions=None, + position=3, + ), phase_out_file=dict( argstr="%s", extensions=None, @@ -138,7 +166,11 @@ def test_Complex_inputs(): "complex_merge", ], ), - real_in_file=dict(argstr="%s", extensions=None, position=2), + real_in_file=dict( + argstr="%s", + extensions=None, + position=2, + ), real_out_file=dict( argstr="%s", extensions=None, @@ -167,7 +199,10 @@ def test_Complex_inputs(): "complex_merge", ], ), - start_vol=dict(argstr="%d", position=-2), + start_vol=dict( + argstr="%d", + position=-2, + ), ) inputs = Complex.input_spec() @@ -178,11 +213,21 @@ def test_Complex_inputs(): def test_Complex_outputs(): output_map = dict( - complex_out_file=dict(extensions=None), - imaginary_out_file=dict(extensions=None), - magnitude_out_file=dict(extensions=None), - phase_out_file=dict(extensions=None), - real_out_file=dict(extensions=None), + complex_out_file=dict( + extensions=None, + ), + imaginary_out_file=dict( + extensions=None, + ), + magnitude_out_file=dict( + extensions=None, + ), + phase_out_file=dict( + extensions=None, + ), + real_out_file=dict( + extensions=None, + ), ) outputs = Complex.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py index 966a030b42..35e5bd43a7 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py +++ b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py @@ -4,19 +4,53 @@ def test_ContrastMgr_inputs(): input_map = dict( - args=dict(argstr="%s"), - contrast_num=dict(argstr="-cope"), - corrections=dict(copyfile=False, extensions=None, mandatory=True), - dof_file=dict(argstr="", copyfile=False, extensions=None, mandatory=True), - environ=dict(nohash=True, usedefault=True), - fcon_file=dict(argstr="-f %s", extensions=None), + args=dict( + argstr="%s", + ), + contrast_num=dict( + argstr="-cope", + ), + corrections=dict( + copyfile=False, + extensions=None, + mandatory=True, + ), + dof_file=dict( + argstr="", + copyfile=False, + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fcon_file=dict( + argstr="-f %s", + extensions=None, + ), output_type=dict(), - param_estimates=dict(argstr="", copyfile=False, mandatory=True), + param_estimates=dict( + argstr="", + copyfile=False, + mandatory=True, + ), sigmasquareds=dict( - argstr="", copyfile=False, extensions=None, mandatory=True, position=-2 + argstr="", + copyfile=False, + extensions=None, + mandatory=True, + position=-2, + ), + suffix=dict( + argstr="-suffix %s", + ), + tcon_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, ), - suffix=dict(argstr="-suffix %s"), - tcon_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), ) inputs = ContrastMgr.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py index a7538b87bc..7ae7f7471b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py @@ -4,14 +4,34 @@ def test_ConvertWarp_inputs(): input_map = dict( - abswarp=dict(argstr="--abs", xor=["relwarp"]), - args=dict(argstr="%s"), - cons_jacobian=dict(argstr="--constrainj"), - environ=dict(nohash=True, usedefault=True), - jacobian_max=dict(argstr="--jmax=%f"), - jacobian_min=dict(argstr="--jmin=%f"), - midmat=dict(argstr="--midmat=%s", extensions=None), - out_abswarp=dict(argstr="--absout", xor=["out_relwarp"]), + abswarp=dict( + argstr="--abs", + xor=["relwarp"], + ), + args=dict( + argstr="%s", + ), + cons_jacobian=dict( + argstr="--constrainj", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + jacobian_max=dict( + argstr="--jmax=%f", + ), + jacobian_min=dict( + argstr="--jmin=%f", + ), + midmat=dict( + argstr="--midmat=%s", + extensions=None, + ), + out_abswarp=dict( + argstr="--absout", + xor=["out_relwarp"], + ), out_file=dict( argstr="--out=%s", extensions=None, @@ -20,16 +40,45 @@ def test_ConvertWarp_inputs(): output_name="out_file", position=-1, ), - out_relwarp=dict(argstr="--relout", xor=["out_abswarp"]), + out_relwarp=dict( + argstr="--relout", + xor=["out_abswarp"], + ), output_type=dict(), - postmat=dict(argstr="--postmat=%s", extensions=None), - premat=dict(argstr="--premat=%s", extensions=None), - reference=dict(argstr="--ref=%s", extensions=None, mandatory=True, position=1), - relwarp=dict(argstr="--rel", xor=["abswarp"]), - shift_direction=dict(argstr="--shiftdir=%s", requires=["shift_in_file"]), - shift_in_file=dict(argstr="--shiftmap=%s", extensions=None), - warp1=dict(argstr="--warp1=%s", extensions=None), - warp2=dict(argstr="--warp2=%s", extensions=None), + postmat=dict( + argstr="--postmat=%s", + extensions=None, + ), + premat=dict( + argstr="--premat=%s", + extensions=None, + ), + reference=dict( + argstr="--ref=%s", + extensions=None, + mandatory=True, + position=1, + ), + relwarp=dict( + argstr="--rel", + xor=["abswarp"], + ), + shift_direction=dict( + argstr="--shiftdir=%s", + requires=["shift_in_file"], + ), + shift_in_file=dict( + argstr="--shiftmap=%s", + extensions=None, + ), + warp1=dict( + argstr="--warp1=%s", + extensions=None, + ), + warp2=dict( + argstr="--warp2=%s", + extensions=None, + ), ) inputs = ConvertWarp.input_spec() @@ -39,7 +88,11 @@ def test_ConvertWarp_inputs(): def test_ConvertWarp_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ConvertWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py index dff16762b9..60e1d7553b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py @@ -4,22 +4,36 @@ def test_ConvertXFM_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), concat_xfm=dict( argstr="-concat", position=-3, requires=["in_file2"], xor=["invert_xfm", "concat_xfm", "fix_scale_skew"], ), - environ=dict(nohash=True, usedefault=True), + environ=dict( + nohash=True, + usedefault=True, + ), fix_scale_skew=dict( argstr="-fixscaleskew", position=-3, requires=["in_file2"], xor=["invert_xfm", "concat_xfm", "fix_scale_skew"], ), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), - in_file2=dict(argstr="%s", extensions=None, position=-2), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + in_file2=dict( + argstr="%s", + extensions=None, + position=-2, + ), invert_xfm=dict( argstr="-inverse", position=-3, @@ -42,7 +56,11 @@ def test_ConvertXFM_inputs(): def test_ConvertXFM_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ConvertXFM.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py index 783b0d0752..6f15d47bfc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py +++ b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py @@ -4,7 +4,9 @@ def test_CopyGeom_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), dest_file=dict( argstr="%s", copyfile=True, @@ -15,9 +17,20 @@ def test_CopyGeom_inputs(): output_name="out_file", position=1, ), - environ=dict(nohash=True, usedefault=True), - ignore_dims=dict(argstr="-d", position="-1"), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + environ=dict( + nohash=True, + usedefault=True, + ), + ignore_dims=dict( + argstr="-d", + position="-1", + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), output_type=dict(), ) inputs = CopyGeom.input_spec() @@ -28,7 +41,11 @@ def test_CopyGeom_inputs(): def test_CopyGeom_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = CopyGeom.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_DTIFit.py b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py index 745fdaeba6..546ffa6848 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py @@ -4,25 +4,78 @@ def test_DTIFit_inputs(): input_map = dict( - args=dict(argstr="%s"), - base_name=dict(argstr="-o %s", position=1, usedefault=True), - bvals=dict(argstr="-b %s", extensions=None, mandatory=True, position=4), - bvecs=dict(argstr="-r %s", extensions=None, mandatory=True, position=3), - cni=dict(argstr="--cni=%s", extensions=None), - dwi=dict(argstr="-k %s", extensions=None, mandatory=True, position=0), - environ=dict(nohash=True, usedefault=True), - gradnonlin=dict(argstr="--gradnonlin=%s", extensions=None), - little_bit=dict(argstr="--littlebit"), - mask=dict(argstr="-m %s", extensions=None, mandatory=True, position=2), - max_x=dict(argstr="-X %d"), - max_y=dict(argstr="-Y %d"), - max_z=dict(argstr="-Z %d"), - min_x=dict(argstr="-x %d"), - min_y=dict(argstr="-y %d"), - min_z=dict(argstr="-z %d"), + args=dict( + argstr="%s", + ), + base_name=dict( + argstr="-o %s", + position=1, + usedefault=True, + ), + bvals=dict( + argstr="-b %s", + extensions=None, + mandatory=True, + position=4, + ), + bvecs=dict( + argstr="-r %s", + extensions=None, + mandatory=True, + position=3, + ), + cni=dict( + argstr="--cni=%s", + extensions=None, + ), + dwi=dict( + argstr="-k %s", + extensions=None, + mandatory=True, + position=0, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gradnonlin=dict( + argstr="--gradnonlin=%s", + extensions=None, + ), + little_bit=dict( + argstr="--littlebit", + ), + mask=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + position=2, + ), + max_x=dict( + argstr="-X %d", + ), + max_y=dict( + argstr="-Y %d", + ), + max_z=dict( + argstr="-Z %d", + ), + min_x=dict( + argstr="-x %d", + ), + min_y=dict( + argstr="-y %d", + ), + min_z=dict( + argstr="-z %d", + ), output_type=dict(), - save_tensor=dict(argstr="--save_tensor"), - sse=dict(argstr="--sse"), + save_tensor=dict( + argstr="--save_tensor", + ), + sse=dict( + argstr="--sse", + ), ) inputs = DTIFit.input_spec() @@ -33,18 +86,42 @@ def test_DTIFit_inputs(): def test_DTIFit_outputs(): output_map = dict( - FA=dict(extensions=None), - L1=dict(extensions=None), - L2=dict(extensions=None), - L3=dict(extensions=None), - MD=dict(extensions=None), - MO=dict(extensions=None), - S0=dict(extensions=None), - V1=dict(extensions=None), - V2=dict(extensions=None), - V3=dict(extensions=None), - sse=dict(extensions=None), - tensor=dict(extensions=None), + FA=dict( + extensions=None, + ), + L1=dict( + extensions=None, + ), + L2=dict( + extensions=None, + ), + L3=dict( + extensions=None, + ), + MD=dict( + extensions=None, + ), + MO=dict( + extensions=None, + ), + S0=dict( + extensions=None, + ), + V1=dict( + extensions=None, + ), + V2=dict( + extensions=None, + ), + V3=dict( + extensions=None, + ), + sse=dict( + extensions=None, + ), + tensor=dict( + extensions=None, + ), ) outputs = DTIFit.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_DilateImage.py b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py index 867e5b2466..ac204e5a38 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DilateImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py @@ -4,19 +4,58 @@ def test_DilateImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - kernel_file=dict(argstr="%s", extensions=None, position=5, xor=["kernel_size"]), - kernel_shape=dict(argstr="-kernel %s", position=4), - kernel_size=dict(argstr="%.4f", position=5, xor=["kernel_file"]), - nan2zeros=dict(argstr="-nan", position=3), - operation=dict(argstr="-dil%s", mandatory=True, position=6), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + kernel_file=dict( + argstr="%s", + extensions=None, + position=5, + xor=["kernel_size"], + ), + kernel_shape=dict( + argstr="-kernel %s", + position=4, + ), + kernel_size=dict( + argstr="%.4f", + position=5, + xor=["kernel_file"], + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), + operation=dict( + argstr="-dil%s", + mandatory=True, + position=6, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = DilateImage.input_spec() @@ -27,7 +66,11 @@ def test_DilateImage_inputs(): def test_DilateImage_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = DilateImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py index db9891a2f0..74ea024917 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py +++ b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py @@ -4,15 +4,35 @@ def test_DistanceMap_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), distance_map=dict( - argstr="--out=%s", extensions=None, genfile=True, hash_files=False + argstr="--out=%s", + extensions=None, + genfile=True, + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="--in=%s", + extensions=None, + mandatory=True, + ), + invert_input=dict( + argstr="--invert", + ), + local_max_file=dict( + argstr="--localmax=%s", + hash_files=False, + ), + mask_file=dict( + argstr="--mask=%s", + extensions=None, ), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="--in=%s", extensions=None, mandatory=True), - invert_input=dict(argstr="--invert"), - local_max_file=dict(argstr="--localmax=%s", hash_files=False), - mask_file=dict(argstr="--mask=%s", extensions=None), output_type=dict(), ) inputs = DistanceMap.input_spec() @@ -24,7 +44,12 @@ def test_DistanceMap_inputs(): def test_DistanceMap_outputs(): output_map = dict( - distance_map=dict(extensions=None), local_max_file=dict(extensions=None) + distance_map=dict( + extensions=None, + ), + local_max_file=dict( + extensions=None, + ), ) outputs = DistanceMap.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_DualRegression.py b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py index 8b71777e8b..4b3d8f6851 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DualRegression.py +++ b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py @@ -4,16 +4,55 @@ def test_DualRegression_inputs(): input_map = dict( - args=dict(argstr="%s"), - con_file=dict(argstr="%s", extensions=None, position=4), - des_norm=dict(argstr="%i", position=2, usedefault=True), - design_file=dict(argstr="%s", extensions=None, position=3), - environ=dict(nohash=True, usedefault=True), - group_IC_maps_4D=dict(argstr="%s", extensions=None, mandatory=True, position=1), - in_files=dict(argstr="%s", mandatory=True, position=-1, sep=" "), - n_perm=dict(argstr="%i", mandatory=True, position=5), - one_sample_group_mean=dict(argstr="-1", position=3), - out_dir=dict(argstr="%s", genfile=True, position=6, usedefault=True), + args=dict( + argstr="%s", + ), + con_file=dict( + argstr="%s", + extensions=None, + position=4, + ), + des_norm=dict( + argstr="%i", + position=2, + usedefault=True, + ), + design_file=dict( + argstr="%s", + extensions=None, + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + group_IC_maps_4D=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=-1, + sep=" ", + ), + n_perm=dict( + argstr="%i", + mandatory=True, + position=5, + ), + one_sample_group_mean=dict( + argstr="-1", + position=3, + ), + out_dir=dict( + argstr="%s", + genfile=True, + position=6, + usedefault=True, + ), output_type=dict(), ) inputs = DualRegression.input_spec() @@ -24,7 +63,9 @@ def test_DualRegression_inputs(): def test_DualRegression_outputs(): - output_map = dict(out_dir=dict()) + output_map = dict( + out_dir=dict(), + ) outputs = DualRegression.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py index a398476793..0462fa9cbe 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py @@ -4,22 +4,68 @@ def test_EPIDeWarp_inputs(): input_map = dict( - args=dict(argstr="%s"), - cleanup=dict(argstr="--cleanup"), - dph_file=dict(argstr="--dph %s", extensions=None, mandatory=True), - environ=dict(nohash=True, usedefault=True), - epi_file=dict(argstr="--epi %s", extensions=None), - epidw=dict(argstr="--epidw %s", genfile=False), - esp=dict(argstr="--esp %s", usedefault=True), - exf_file=dict(argstr="--exf %s", extensions=None), - exfdw=dict(argstr="--exfdw %s", genfile=True), - mag_file=dict(argstr="--mag %s", extensions=None, mandatory=True, position=0), - nocleanup=dict(argstr="--nocleanup", usedefault=True), + args=dict( + argstr="%s", + ), + cleanup=dict( + argstr="--cleanup", + ), + dph_file=dict( + argstr="--dph %s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi_file=dict( + argstr="--epi %s", + extensions=None, + ), + epidw=dict( + argstr="--epidw %s", + genfile=False, + ), + esp=dict( + argstr="--esp %s", + usedefault=True, + ), + exf_file=dict( + argstr="--exf %s", + extensions=None, + ), + exfdw=dict( + argstr="--exfdw %s", + genfile=True, + ), + mag_file=dict( + argstr="--mag %s", + extensions=None, + mandatory=True, + position=0, + ), + nocleanup=dict( + argstr="--nocleanup", + usedefault=True, + ), output_type=dict(), - sigma=dict(argstr="--sigma %s", usedefault=True), - tediff=dict(argstr="--tediff %s", usedefault=True), - tmpdir=dict(argstr="--tmpdir %s", genfile=True), - vsm=dict(argstr="--vsm %s", genfile=True), + sigma=dict( + argstr="--sigma %s", + usedefault=True, + ), + tediff=dict( + argstr="--tediff %s", + usedefault=True, + ), + tmpdir=dict( + argstr="--tmpdir %s", + genfile=True, + ), + vsm=dict( + argstr="--vsm %s", + genfile=True, + ), ) inputs = EPIDeWarp.input_spec() @@ -30,10 +76,18 @@ def test_EPIDeWarp_inputs(): def test_EPIDeWarp_outputs(): output_map = dict( - exf_mask=dict(extensions=None), - exfdw=dict(extensions=None), - unwarped_file=dict(extensions=None), - vsm_file=dict(extensions=None), + exf_mask=dict( + extensions=None, + ), + exfdw=dict( + extensions=None, + ), + unwarped_file=dict( + extensions=None, + ), + vsm_file=dict( + extensions=None, + ), ) outputs = EPIDeWarp.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Eddy.py b/nipype/interfaces/fsl/tests/test_auto_Eddy.py index ae2f013f47..0005085474 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Eddy.py +++ b/nipype/interfaces/fsl/tests/test_auto_Eddy.py @@ -4,33 +4,99 @@ def test_Eddy_inputs(): input_map = dict( - args=dict(argstr="%s"), - cnr_maps=dict(argstr="--cnr_maps", min_ver="5.0.10"), - dont_peas=dict(argstr="--dont_peas"), - dont_sep_offs_move=dict(argstr="--dont_sep_offs_move"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + cnr_maps=dict( + argstr="--cnr_maps", + min_ver="5.0.10", + ), + dont_peas=dict( + argstr="--dont_peas", + ), + dont_sep_offs_move=dict( + argstr="--dont_sep_offs_move", + ), + environ=dict( + nohash=True, + usedefault=True, + ), estimate_move_by_susceptibility=dict( - argstr="--estimate_move_by_susceptibility", min_ver="6.0.1" - ), - fep=dict(argstr="--fep"), - field=dict(argstr="--field=%s", extensions=None), - field_mat=dict(argstr="--field_mat=%s", extensions=None), - flm=dict(argstr="--flm=%s", usedefault=True), - fudge_factor=dict(argstr="--ff=%s", usedefault=True), - fwhm=dict(argstr="--fwhm=%s"), - in_acqp=dict(argstr="--acqp=%s", extensions=None, mandatory=True), - in_bval=dict(argstr="--bvals=%s", extensions=None, mandatory=True), - in_bvec=dict(argstr="--bvecs=%s", extensions=None, mandatory=True), - in_file=dict(argstr="--imain=%s", extensions=None, mandatory=True), - in_index=dict(argstr="--index=%s", extensions=None, mandatory=True), - in_mask=dict(argstr="--mask=%s", extensions=None, mandatory=True), + argstr="--estimate_move_by_susceptibility", + min_ver="6.0.1", + ), + fep=dict( + argstr="--fep", + ), + field=dict( + argstr="--field=%s", + extensions=None, + ), + field_mat=dict( + argstr="--field_mat=%s", + extensions=None, + ), + flm=dict( + argstr="--flm=%s", + usedefault=True, + ), + fudge_factor=dict( + argstr="--ff=%s", + usedefault=True, + ), + fwhm=dict( + argstr="--fwhm=%s", + ), + in_acqp=dict( + argstr="--acqp=%s", + extensions=None, + mandatory=True, + ), + in_bval=dict( + argstr="--bvals=%s", + extensions=None, + mandatory=True, + ), + in_bvec=dict( + argstr="--bvecs=%s", + extensions=None, + mandatory=True, + ), + in_file=dict( + argstr="--imain=%s", + extensions=None, + mandatory=True, + ), + in_index=dict( + argstr="--index=%s", + extensions=None, + mandatory=True, + ), + in_mask=dict( + argstr="--mask=%s", + extensions=None, + mandatory=True, + ), in_topup_fieldcoef=dict( - argstr="--topup=%s", extensions=None, requires=["in_topup_movpar"] + argstr="--topup=%s", + extensions=None, + requires=["in_topup_movpar"], + ), + in_topup_movpar=dict( + extensions=None, + requires=["in_topup_fieldcoef"], + ), + initrand=dict( + argstr="--initrand", + min_ver="5.0.10", + ), + interp=dict( + argstr="--interp=%s", + usedefault=True, + ), + is_shelled=dict( + argstr="--data_is_shelled", ), - in_topup_movpar=dict(extensions=None, requires=["in_topup_fieldcoef"]), - initrand=dict(argstr="--initrand", min_ver="5.0.10"), - interp=dict(argstr="--interp=%s", usedefault=True), - is_shelled=dict(argstr="--data_is_shelled"), json=dict( argstr="--json=%s", min_ver="6.0.1", @@ -52,38 +118,102 @@ def test_Eddy_inputs(): min_ver="6.0.1", requires=["estimate_move_by_susceptibility"], ), - method=dict(argstr="--resamp=%s", usedefault=True), - mporder=dict(argstr="--mporder=%s", min_ver="5.0.11", requires=["use_cuda"]), - multiband_factor=dict(argstr="--mb=%s", min_ver="5.0.10"), + method=dict( + argstr="--resamp=%s", + usedefault=True, + ), + mporder=dict( + argstr="--mporder=%s", + min_ver="5.0.11", + requires=["use_cuda"], + ), + multiband_factor=dict( + argstr="--mb=%s", + min_ver="5.0.10", + ), multiband_offset=dict( - argstr="--mb_offs=%d", min_ver="5.0.10", requires=["multiband_factor"] - ), - niter=dict(argstr="--niter=%s", usedefault=True), - num_threads=dict(nohash=True, usedefault=True), - nvoxhp=dict(argstr="--nvoxhp=%s", usedefault=True), - out_base=dict(argstr="--out=%s", usedefault=True), - outlier_nstd=dict(argstr="--ol_nstd", min_ver="5.0.10", requires=["repol"]), - outlier_nvox=dict(argstr="--ol_nvox", min_ver="5.0.10", requires=["repol"]), - outlier_pos=dict(argstr="--ol_pos", min_ver="5.0.10", requires=["repol"]), - outlier_sqr=dict(argstr="--ol_sqr", min_ver="5.0.10", requires=["repol"]), - outlier_type=dict(argstr="--ol_type", min_ver="5.0.10", requires=["repol"]), + argstr="--mb_offs=%d", + min_ver="5.0.10", + requires=["multiband_factor"], + ), + niter=dict( + argstr="--niter=%s", + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + nvoxhp=dict( + argstr="--nvoxhp=%s", + usedefault=True, + ), + out_base=dict( + argstr="--out=%s", + usedefault=True, + ), + outlier_nstd=dict( + argstr="--ol_nstd", + min_ver="5.0.10", + requires=["repol"], + ), + outlier_nvox=dict( + argstr="--ol_nvox", + min_ver="5.0.10", + requires=["repol"], + ), + outlier_pos=dict( + argstr="--ol_pos", + min_ver="5.0.10", + requires=["repol"], + ), + outlier_sqr=dict( + argstr="--ol_sqr", + min_ver="5.0.10", + requires=["repol"], + ), + outlier_type=dict( + argstr="--ol_type", + min_ver="5.0.10", + requires=["repol"], + ), output_type=dict(), - repol=dict(argstr="--repol"), - residuals=dict(argstr="--residuals", min_ver="5.0.10"), - session=dict(argstr="--session=%s", extensions=None), + repol=dict( + argstr="--repol", + ), + residuals=dict( + argstr="--residuals", + min_ver="5.0.10", + ), + session=dict( + argstr="--session=%s", + extensions=None, + ), slice2vol_interp=dict( - argstr="--s2v_interp=%s", min_ver="5.0.11", requires=["mporder"] + argstr="--s2v_interp=%s", + min_ver="5.0.11", + requires=["mporder"], ), slice2vol_lambda=dict( - argstr="--s2v_lambda=%d", min_ver="5.0.11", requires=["mporder"] + argstr="--s2v_lambda=%d", + min_ver="5.0.11", + requires=["mporder"], ), slice2vol_niter=dict( - argstr="--s2v_niter=%d", min_ver="5.0.11", requires=["mporder"] + argstr="--s2v_niter=%d", + min_ver="5.0.11", + requires=["mporder"], ), slice_order=dict( - argstr="--slspec=%s", min_ver="5.0.11", requires=["mporder"], xor=["json"] + argstr="--slspec=%s", + min_ver="5.0.11", + requires=["mporder"], + xor=["json"], + ), + slm=dict( + argstr="--slm=%s", + usedefault=True, ), - slm=dict(argstr="--slm=%s", usedefault=True), use_cuda=dict(), ) inputs = Eddy.input_spec() @@ -95,21 +225,51 @@ def test_Eddy_inputs(): def test_Eddy_outputs(): output_map = dict( - out_cnr_maps=dict(extensions=None), - out_corrected=dict(extensions=None), - out_movement_over_time=dict(extensions=None), - out_movement_rms=dict(extensions=None), - out_outlier_free=dict(extensions=None), - out_outlier_map=dict(extensions=None), - out_outlier_n_sqr_stdev_map=dict(extensions=None), - out_outlier_n_stdev_map=dict(extensions=None), - out_outlier_report=dict(extensions=None), - out_parameter=dict(extensions=None), - out_residuals=dict(extensions=None), - out_restricted_movement_rms=dict(extensions=None), - out_rotated_bvecs=dict(extensions=None), - out_shell_alignment_parameters=dict(extensions=None), - out_shell_pe_translation_parameters=dict(extensions=None), + out_cnr_maps=dict( + extensions=None, + ), + out_corrected=dict( + extensions=None, + ), + out_movement_over_time=dict( + extensions=None, + ), + out_movement_rms=dict( + extensions=None, + ), + out_outlier_free=dict( + extensions=None, + ), + out_outlier_map=dict( + extensions=None, + ), + out_outlier_n_sqr_stdev_map=dict( + extensions=None, + ), + out_outlier_n_stdev_map=dict( + extensions=None, + ), + out_outlier_report=dict( + extensions=None, + ), + out_parameter=dict( + extensions=None, + ), + out_residuals=dict( + extensions=None, + ), + out_restricted_movement_rms=dict( + extensions=None, + ), + out_rotated_bvecs=dict( + extensions=None, + ), + out_shell_alignment_parameters=dict( + extensions=None, + ), + out_shell_pe_translation_parameters=dict( + extensions=None, + ), ) outputs = Eddy.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py index 4d63c36b51..e2ce1c0a3b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py +++ b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py @@ -4,9 +4,19 @@ def test_EddyCorrect_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), out_file=dict( argstr="%s", extensions=None, @@ -16,7 +26,12 @@ def test_EddyCorrect_inputs(): position=1, ), output_type=dict(), - ref_num=dict(argstr="%d", mandatory=True, position=2, usedefault=True), + ref_num=dict( + argstr="%d", + mandatory=True, + position=2, + usedefault=True, + ), ) inputs = EddyCorrect.input_spec() @@ -26,7 +41,11 @@ def test_EddyCorrect_inputs(): def test_EddyCorrect_outputs(): - output_map = dict(eddy_corrected=dict(extensions=None)) + output_map = dict( + eddy_corrected=dict( + extensions=None, + ), + ) outputs = EddyCorrect.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py index 3b96a2ad6e..3d9756a4be 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py +++ b/nipype/interfaces/fsl/tests/test_auto_EddyQuad.py @@ -4,21 +4,59 @@ def test_EddyQuad_inputs(): input_map = dict( - args=dict(argstr="%s"), - base_name=dict(argstr="%s", position=0, usedefault=True), - bval_file=dict(argstr="--bvals %s", extensions=None, mandatory=True), - bvec_file=dict(argstr="--bvecs %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - field=dict(argstr="--field %s", extensions=None), - idx_file=dict(argstr="--eddyIdx %s", extensions=None, mandatory=True), - mask_file=dict(argstr="--mask %s", extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + base_name=dict( + argstr="%s", + position=0, + usedefault=True, + ), + bval_file=dict( + argstr="--bvals %s", + extensions=None, + mandatory=True, + ), + bvec_file=dict( + argstr="--bvecs %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + field=dict( + argstr="--field %s", + extensions=None, + ), + idx_file=dict( + argstr="--eddyIdx %s", + extensions=None, + mandatory=True, + ), + mask_file=dict( + argstr="--mask %s", + extensions=None, + mandatory=True, + ), output_dir=dict( - argstr="--output-dir %s", name_source=["base_name"], name_template="%s.qc" + argstr="--output-dir %s", + name_source=["base_name"], + name_template="%s.qc", ), output_type=dict(), - param_file=dict(argstr="--eddyParams %s", extensions=None, mandatory=True), - slice_spec=dict(argstr="--slspec %s", extensions=None), - verbose=dict(argstr="--verbose"), + param_file=dict( + argstr="--eddyParams %s", + extensions=None, + mandatory=True, + ), + slice_spec=dict( + argstr="--slspec %s", + extensions=None, + ), + verbose=dict( + argstr="--verbose", + ), ) inputs = EddyQuad.input_spec() @@ -31,12 +69,22 @@ def test_EddyQuad_outputs(): output_map = dict( avg_b0_pe_png=dict(), avg_b_png=dict(), - clean_volumes=dict(extensions=None), + clean_volumes=dict( + extensions=None, + ), cnr_png=dict(), - qc_json=dict(extensions=None), - qc_pdf=dict(extensions=None), - residuals=dict(extensions=None), - vdm_png=dict(extensions=None), + qc_json=dict( + extensions=None, + ), + qc_pdf=dict( + extensions=None, + ), + residuals=dict( + extensions=None, + ), + vdm_png=dict( + extensions=None, + ), ) outputs = EddyQuad.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py index 2341c9229d..242c2e6040 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py @@ -4,24 +4,70 @@ def test_EpiReg_inputs(): input_map = dict( - args=dict(argstr="%s"), - echospacing=dict(argstr="--echospacing=%f"), - environ=dict(nohash=True, usedefault=True), - epi=dict(argstr="--epi=%s", extensions=None, mandatory=True, position=-4), - fmap=dict(argstr="--fmap=%s", extensions=None), - fmapmag=dict(argstr="--fmapmag=%s", extensions=None), - fmapmagbrain=dict(argstr="--fmapmagbrain=%s", extensions=None), - no_clean=dict(argstr="--noclean", usedefault=True), - no_fmapreg=dict(argstr="--nofmapreg"), - out_base=dict(argstr="--out=%s", position=-1, usedefault=True), + args=dict( + argstr="%s", + ), + echospacing=dict( + argstr="--echospacing=%f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi=dict( + argstr="--epi=%s", + extensions=None, + mandatory=True, + position=-4, + ), + fmap=dict( + argstr="--fmap=%s", + extensions=None, + ), + fmapmag=dict( + argstr="--fmapmag=%s", + extensions=None, + ), + fmapmagbrain=dict( + argstr="--fmapmagbrain=%s", + extensions=None, + ), + no_clean=dict( + argstr="--noclean", + usedefault=True, + ), + no_fmapreg=dict( + argstr="--nofmapreg", + ), + out_base=dict( + argstr="--out=%s", + position=-1, + usedefault=True, + ), output_type=dict(), - pedir=dict(argstr="--pedir=%s"), + pedir=dict( + argstr="--pedir=%s", + ), t1_brain=dict( - argstr="--t1brain=%s", extensions=None, mandatory=True, position=-2 + argstr="--t1brain=%s", + extensions=None, + mandatory=True, + position=-2, + ), + t1_head=dict( + argstr="--t1=%s", + extensions=None, + mandatory=True, + position=-3, + ), + weight_image=dict( + argstr="--weight=%s", + extensions=None, + ), + wmseg=dict( + argstr="--wmseg=%s", + extensions=None, ), - t1_head=dict(argstr="--t1=%s", extensions=None, mandatory=True, position=-3), - weight_image=dict(argstr="--weight=%s", extensions=None), - wmseg=dict(argstr="--wmseg=%s", extensions=None), ) inputs = EpiReg.input_spec() @@ -32,20 +78,48 @@ def test_EpiReg_inputs(): def test_EpiReg_outputs(): output_map = dict( - epi2str_inv=dict(extensions=None), - epi2str_mat=dict(extensions=None), - fmap2epi_mat=dict(extensions=None), - fmap2str_mat=dict(extensions=None), - fmap_epi=dict(extensions=None), - fmap_str=dict(extensions=None), - fmapmag_str=dict(extensions=None), - fullwarp=dict(extensions=None), - out_1vol=dict(extensions=None), - out_file=dict(extensions=None), - seg=dict(extensions=None), - shiftmap=dict(extensions=None), - wmedge=dict(extensions=None), - wmseg=dict(extensions=None), + epi2str_inv=dict( + extensions=None, + ), + epi2str_mat=dict( + extensions=None, + ), + fmap2epi_mat=dict( + extensions=None, + ), + fmap2str_mat=dict( + extensions=None, + ), + fmap_epi=dict( + extensions=None, + ), + fmap_str=dict( + extensions=None, + ), + fmapmag_str=dict( + extensions=None, + ), + fullwarp=dict( + extensions=None, + ), + out_1vol=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + seg=dict( + extensions=None, + ), + shiftmap=dict( + extensions=None, + ), + wmedge=dict( + extensions=None, + ), + wmseg=dict( + extensions=None, + ), ) outputs = EpiReg.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py index a4f066c297..1aad31cd16 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py @@ -4,19 +4,58 @@ def test_ErodeImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - kernel_file=dict(argstr="%s", extensions=None, position=5, xor=["kernel_size"]), - kernel_shape=dict(argstr="-kernel %s", position=4), - kernel_size=dict(argstr="%.4f", position=5, xor=["kernel_file"]), - minimum_filter=dict(argstr="%s", position=6, usedefault=True), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + kernel_file=dict( + argstr="%s", + extensions=None, + position=5, + xor=["kernel_size"], + ), + kernel_shape=dict( + argstr="-kernel %s", + position=4, + ), + kernel_size=dict( + argstr="%.4f", + position=5, + xor=["kernel_file"], + ), + minimum_filter=dict( + argstr="%s", + position=6, + usedefault=True, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = ErodeImage.input_spec() @@ -27,7 +66,11 @@ def test_ErodeImage_inputs(): def test_ErodeImage_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ErodeImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py index cfb7523757..bd6acb137c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py +++ b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py @@ -4,7 +4,9 @@ def test_ExtractROI_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), crop_list=dict( argstr="%s", position=2, @@ -19,20 +21,56 @@ def test_ExtractROI_inputs(): "t_size", ], ), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), output_type=dict(), roi_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=1 - ), - t_min=dict(argstr="%d", position=8), - t_size=dict(argstr="%d", position=9), - x_min=dict(argstr="%d", position=2), - x_size=dict(argstr="%d", position=3), - y_min=dict(argstr="%d", position=4), - y_size=dict(argstr="%d", position=5), - z_min=dict(argstr="%d", position=6), - z_size=dict(argstr="%d", position=7), + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=1, + ), + t_min=dict( + argstr="%d", + position=8, + ), + t_size=dict( + argstr="%d", + position=9, + ), + x_min=dict( + argstr="%d", + position=2, + ), + x_size=dict( + argstr="%d", + position=3, + ), + y_min=dict( + argstr="%d", + position=4, + ), + y_size=dict( + argstr="%d", + position=5, + ), + z_min=dict( + argstr="%d", + position=6, + ), + z_size=dict( + argstr="%d", + position=7, + ), ) inputs = ExtractROI.input_spec() @@ -42,7 +80,11 @@ def test_ExtractROI_inputs(): def test_ExtractROI_outputs(): - output_map = dict(roi_file=dict(extensions=None)) + output_map = dict( + roi_file=dict( + extensions=None, + ), + ) outputs = ExtractROI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FAST.py b/nipype/interfaces/fsl/tests/test_auto_FAST.py index 0be8b88bf6..e775d97b35 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FAST.py +++ b/nipype/interfaces/fsl/tests/test_auto_FAST.py @@ -4,31 +4,87 @@ def test_FAST_inputs(): input_map = dict( - args=dict(argstr="%s"), - bias_iters=dict(argstr="-I %d"), - bias_lowpass=dict(argstr="-l %d", units="mm"), - environ=dict(nohash=True, usedefault=True), - hyper=dict(argstr="-H %.2f"), - img_type=dict(argstr="-t %d"), - in_files=dict(argstr="%s", copyfile=False, mandatory=True, position=-1), - init_seg_smooth=dict(argstr="-f %.3f"), - init_transform=dict(argstr="-a %s", extensions=None), - iters_afterbias=dict(argstr="-O %d"), - manual_seg=dict(argstr="-s %s", extensions=None), - mixel_smooth=dict(argstr="-R %.2f"), - no_bias=dict(argstr="-N"), - no_pve=dict(argstr="--nopve"), - number_classes=dict(argstr="-n %d"), - other_priors=dict(argstr="-A %s"), - out_basename=dict(argstr="-o %s", extensions=None), - output_biascorrected=dict(argstr="-B"), - output_biasfield=dict(argstr="-b"), + args=dict( + argstr="%s", + ), + bias_iters=dict( + argstr="-I %d", + ), + bias_lowpass=dict( + argstr="-l %d", + units="mm", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hyper=dict( + argstr="-H %.2f", + ), + img_type=dict( + argstr="-t %d", + ), + in_files=dict( + argstr="%s", + copyfile=False, + mandatory=True, + position=-1, + ), + init_seg_smooth=dict( + argstr="-f %.3f", + ), + init_transform=dict( + argstr="-a %s", + extensions=None, + ), + iters_afterbias=dict( + argstr="-O %d", + ), + manual_seg=dict( + argstr="-s %s", + extensions=None, + ), + mixel_smooth=dict( + argstr="-R %.2f", + ), + no_bias=dict( + argstr="-N", + ), + no_pve=dict( + argstr="--nopve", + ), + number_classes=dict( + argstr="-n %d", + ), + other_priors=dict( + argstr="-A %s", + ), + out_basename=dict( + argstr="-o %s", + extensions=None, + ), + output_biascorrected=dict( + argstr="-B", + ), + output_biasfield=dict( + argstr="-b", + ), output_type=dict(), - probability_maps=dict(argstr="-p"), - segment_iters=dict(argstr="-W %d"), - segments=dict(argstr="-g"), - use_priors=dict(argstr="-P"), - verbose=dict(argstr="-v"), + probability_maps=dict( + argstr="-p", + ), + segment_iters=dict( + argstr="-W %d", + ), + segments=dict( + argstr="-g", + ), + use_priors=dict( + argstr="-P", + ), + verbose=dict( + argstr="-v", + ), ) inputs = FAST.input_spec() @@ -40,13 +96,19 @@ def test_FAST_inputs(): def test_FAST_outputs(): output_map = dict( bias_field=dict(), - mixeltype=dict(extensions=None), + mixeltype=dict( + extensions=None, + ), partial_volume_files=dict(), - partial_volume_map=dict(extensions=None), + partial_volume_map=dict( + extensions=None, + ), probability_maps=dict(), restored_image=dict(), tissue_class_files=dict(), - tissue_class_map=dict(extensions=None), + tissue_class_map=dict( + extensions=None, + ), ) outputs = FAST.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FEAT.py b/nipype/interfaces/fsl/tests/test_auto_FEAT.py index 4521b9d55c..b363dd290f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEAT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEAT.py @@ -4,9 +4,19 @@ def test_FEAT_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fsf_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fsf_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), output_type=dict(), ) inputs = FEAT.input_spec() @@ -17,7 +27,9 @@ def test_FEAT_inputs(): def test_FEAT_outputs(): - output_map = dict(feat_dir=dict()) + output_map = dict( + feat_dir=dict(), + ) outputs = FEAT.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATModel.py b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py index 304cd029e0..0e6c2f9e33 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEATModel.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py @@ -4,11 +4,25 @@ def test_FEATModel_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - ev_files=dict(argstr="%s", copyfile=False, mandatory=True, position=1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ev_files=dict( + argstr="%s", + copyfile=False, + mandatory=True, + position=1, + ), fsf_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=0, ), output_type=dict(), ) @@ -21,11 +35,21 @@ def test_FEATModel_inputs(): def test_FEATModel_outputs(): output_map = dict( - con_file=dict(extensions=None), - design_cov=dict(extensions=None), - design_file=dict(extensions=None), - design_image=dict(extensions=None), - fcon_file=dict(extensions=None), + con_file=dict( + extensions=None, + ), + design_cov=dict( + extensions=None, + ), + design_file=dict( + extensions=None, + ), + design_image=dict( + extensions=None, + ), + fcon_file=dict( + extensions=None, + ), ) outputs = FEATModel.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py index bb8637703c..fe09c468ec 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py @@ -4,9 +4,16 @@ def test_FEATRegister_inputs(): input_map = dict( - feat_dirs=dict(mandatory=True), - reg_dof=dict(usedefault=True), - reg_image=dict(extensions=None, mandatory=True), + feat_dirs=dict( + mandatory=True, + ), + reg_dof=dict( + usedefault=True, + ), + reg_image=dict( + extensions=None, + mandatory=True, + ), ) inputs = FEATRegister.input_spec() @@ -16,7 +23,11 @@ def test_FEATRegister_inputs(): def test_FEATRegister_outputs(): - output_map = dict(fsf_file=dict(extensions=None)) + output_map = dict( + fsf_file=dict( + extensions=None, + ), + ) outputs = FEATRegister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FIRST.py b/nipype/interfaces/fsl/tests/test_auto_FIRST.py index d3ff1c9c26..42ba79e799 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FIRST.py +++ b/nipype/interfaces/fsl/tests/test_auto_FIRST.py @@ -4,22 +4,48 @@ def test_FIRST_inputs(): input_map = dict( - affine_file=dict(argstr="-a %s", extensions=None, position=6), - args=dict(argstr="%s"), - brain_extracted=dict(argstr="-b", position=2), - environ=dict(nohash=True, usedefault=True), + affine_file=dict( + argstr="-a %s", + extensions=None, + position=6, + ), + args=dict( + argstr="%s", + ), + brain_extracted=dict( + argstr="-b", + position=2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="-i %s", copyfile=False, extensions=None, mandatory=True, position=-2 + argstr="-i %s", + copyfile=False, + extensions=None, + mandatory=True, + position=-2, + ), + list_of_specific_structures=dict( + argstr="-s %s", + position=5, + sep=",", ), - list_of_specific_structures=dict(argstr="-s %s", position=5, sep=","), method=dict( argstr="-m %s", position=4, usedefault=True, xor=["method_as_numerical_threshold"], ), - method_as_numerical_threshold=dict(argstr="-m %.4f", position=4), - no_cleanup=dict(argstr="-d", position=3), + method_as_numerical_threshold=dict( + argstr="-m %.4f", + position=4, + ), + no_cleanup=dict( + argstr="-d", + position=3, + ), out_file=dict( argstr="-o %s", extensions=None, @@ -29,7 +55,10 @@ def test_FIRST_inputs(): usedefault=True, ), output_type=dict(), - verbose=dict(argstr="-v", position=1), + verbose=dict( + argstr="-v", + position=1, + ), ) inputs = FIRST.input_spec() @@ -41,8 +70,12 @@ def test_FIRST_inputs(): def test_FIRST_outputs(): output_map = dict( bvars=dict(), - original_segmentations=dict(extensions=None), - segmentation_file=dict(extensions=None), + original_segmentations=dict( + extensions=None, + ), + segmentation_file=dict( + extensions=None, + ), vtk_surfaces=dict(), ) outputs = FIRST.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py index 465f322dc2..f25b225d6e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py +++ b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py @@ -4,29 +4,83 @@ def test_FLAMEO_inputs(): input_map = dict( - args=dict(argstr="%s"), - burnin=dict(argstr="--burnin=%d"), - cope_file=dict(argstr="--copefile=%s", extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + burnin=dict( + argstr="--burnin=%d", + ), + cope_file=dict( + argstr="--copefile=%s", + extensions=None, + mandatory=True, + ), cov_split_file=dict( - argstr="--covsplitfile=%s", extensions=None, mandatory=True - ), - design_file=dict(argstr="--designfile=%s", extensions=None, mandatory=True), - dof_var_cope_file=dict(argstr="--dofvarcopefile=%s", extensions=None), - environ=dict(nohash=True, usedefault=True), - f_con_file=dict(argstr="--fcontrastsfile=%s", extensions=None), - fix_mean=dict(argstr="--fixmean"), - infer_outliers=dict(argstr="--inferoutliers"), - log_dir=dict(argstr="--ld=%s", usedefault=True), - mask_file=dict(argstr="--maskfile=%s", extensions=None, mandatory=True), - n_jumps=dict(argstr="--njumps=%d"), - no_pe_outputs=dict(argstr="--nopeoutput"), - outlier_iter=dict(argstr="--ioni=%d"), + argstr="--covsplitfile=%s", + extensions=None, + mandatory=True, + ), + design_file=dict( + argstr="--designfile=%s", + extensions=None, + mandatory=True, + ), + dof_var_cope_file=dict( + argstr="--dofvarcopefile=%s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + f_con_file=dict( + argstr="--fcontrastsfile=%s", + extensions=None, + ), + fix_mean=dict( + argstr="--fixmean", + ), + infer_outliers=dict( + argstr="--inferoutliers", + ), + log_dir=dict( + argstr="--ld=%s", + usedefault=True, + ), + mask_file=dict( + argstr="--maskfile=%s", + extensions=None, + mandatory=True, + ), + n_jumps=dict( + argstr="--njumps=%d", + ), + no_pe_outputs=dict( + argstr="--nopeoutput", + ), + outlier_iter=dict( + argstr="--ioni=%d", + ), output_type=dict(), - run_mode=dict(argstr="--runmode=%s", mandatory=True), - sample_every=dict(argstr="--sampleevery=%d"), - sigma_dofs=dict(argstr="--sigma_dofs=%d"), - t_con_file=dict(argstr="--tcontrastsfile=%s", extensions=None, mandatory=True), - var_cope_file=dict(argstr="--varcopefile=%s", extensions=None), + run_mode=dict( + argstr="--runmode=%s", + mandatory=True, + ), + sample_every=dict( + argstr="--sampleevery=%d", + ), + sigma_dofs=dict( + argstr="--sigma_dofs=%d", + ), + t_con_file=dict( + argstr="--tcontrastsfile=%s", + extensions=None, + mandatory=True, + ), + var_cope_file=dict( + argstr="--varcopefile=%s", + extensions=None, + ), ) inputs = FLAMEO.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FLIRT.py b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py index 91f9d890f9..a9bdc38477 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FLIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py @@ -4,35 +4,110 @@ def test_FLIRT_inputs(): input_map = dict( - angle_rep=dict(argstr="-anglerep %s"), - apply_isoxfm=dict(argstr="-applyisoxfm %f", xor=["apply_xfm"]), - apply_xfm=dict(argstr="-applyxfm"), - args=dict(argstr="%s"), - bbrslope=dict(argstr="-bbrslope %f", min_ver="5.0.0"), - bbrtype=dict(argstr="-bbrtype %s", min_ver="5.0.0"), - bgvalue=dict(argstr="-setbackground %f"), - bins=dict(argstr="-bins %d"), - coarse_search=dict(argstr="-coarsesearch %d", units="degrees"), - cost=dict(argstr="-cost %s"), - cost_func=dict(argstr="-searchcost %s"), - datatype=dict(argstr="-datatype %s"), - display_init=dict(argstr="-displayinit"), - dof=dict(argstr="-dof %d"), - echospacing=dict(argstr="-echospacing %f", min_ver="5.0.0"), - environ=dict(nohash=True, usedefault=True), - fieldmap=dict(argstr="-fieldmap %s", extensions=None, min_ver="5.0.0"), - fieldmapmask=dict(argstr="-fieldmapmask %s", extensions=None, min_ver="5.0.0"), - fine_search=dict(argstr="-finesearch %d", units="degrees"), - force_scaling=dict(argstr="-forcescaling"), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=0), - in_matrix_file=dict(argstr="-init %s", extensions=None), - in_weight=dict(argstr="-inweight %s", extensions=None), - interp=dict(argstr="-interp %s"), - min_sampling=dict(argstr="-minsampling %f", units="mm"), - no_clamp=dict(argstr="-noclamp"), - no_resample=dict(argstr="-noresample"), - no_resample_blur=dict(argstr="-noresampblur"), - no_search=dict(argstr="-nosearch"), + angle_rep=dict( + argstr="-anglerep %s", + ), + apply_isoxfm=dict( + argstr="-applyisoxfm %f", + xor=["apply_xfm"], + ), + apply_xfm=dict( + argstr="-applyxfm", + ), + args=dict( + argstr="%s", + ), + bbrslope=dict( + argstr="-bbrslope %f", + min_ver="5.0.0", + ), + bbrtype=dict( + argstr="-bbrtype %s", + min_ver="5.0.0", + ), + bgvalue=dict( + argstr="-setbackground %f", + ), + bins=dict( + argstr="-bins %d", + ), + coarse_search=dict( + argstr="-coarsesearch %d", + units="degrees", + ), + cost=dict( + argstr="-cost %s", + ), + cost_func=dict( + argstr="-searchcost %s", + ), + datatype=dict( + argstr="-datatype %s", + ), + display_init=dict( + argstr="-displayinit", + ), + dof=dict( + argstr="-dof %d", + ), + echospacing=dict( + argstr="-echospacing %f", + min_ver="5.0.0", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fieldmap=dict( + argstr="-fieldmap %s", + extensions=None, + min_ver="5.0.0", + ), + fieldmapmask=dict( + argstr="-fieldmapmask %s", + extensions=None, + min_ver="5.0.0", + ), + fine_search=dict( + argstr="-finesearch %d", + units="degrees", + ), + force_scaling=dict( + argstr="-forcescaling", + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + position=0, + ), + in_matrix_file=dict( + argstr="-init %s", + extensions=None, + ), + in_weight=dict( + argstr="-inweight %s", + extensions=None, + ), + interp=dict( + argstr="-interp %s", + ), + min_sampling=dict( + argstr="-minsampling %f", + units="mm", + ), + no_clamp=dict( + argstr="-noclamp", + ), + no_resample=dict( + argstr="-noresample", + ), + no_resample_blur=dict( + argstr="-noresampblur", + ), + no_search=dict( + argstr="-nosearch", + ), out_file=dict( argstr="-out %s", extensions=None, @@ -58,23 +133,72 @@ def test_FLIRT_inputs(): position=3, ), output_type=dict(), - padding_size=dict(argstr="-paddingsize %d", units="voxels"), - pedir=dict(argstr="-pedir %d", min_ver="5.0.0"), - ref_weight=dict(argstr="-refweight %s", extensions=None), - reference=dict(argstr="-ref %s", extensions=None, mandatory=True, position=1), - rigid2D=dict(argstr="-2D"), + padding_size=dict( + argstr="-paddingsize %d", + units="voxels", + ), + pedir=dict( + argstr="-pedir %d", + min_ver="5.0.0", + ), + ref_weight=dict( + argstr="-refweight %s", + extensions=None, + ), + reference=dict( + argstr="-ref %s", + extensions=None, + mandatory=True, + position=1, + ), + rigid2D=dict( + argstr="-2D", + ), save_log=dict(), - schedule=dict(argstr="-schedule %s", extensions=None), - searchr_x=dict(argstr="-searchrx %s", units="degrees"), - searchr_y=dict(argstr="-searchry %s", units="degrees"), - searchr_z=dict(argstr="-searchrz %s", units="degrees"), - sinc_width=dict(argstr="-sincwidth %d", units="voxels"), - sinc_window=dict(argstr="-sincwindow %s"), - uses_qform=dict(argstr="-usesqform"), - verbose=dict(argstr="-verbose %d"), - wm_seg=dict(argstr="-wmseg %s", extensions=None, min_ver="5.0.0"), - wmcoords=dict(argstr="-wmcoords %s", extensions=None, min_ver="5.0.0"), - wmnorms=dict(argstr="-wmnorms %s", extensions=None, min_ver="5.0.0"), + schedule=dict( + argstr="-schedule %s", + extensions=None, + ), + searchr_x=dict( + argstr="-searchrx %s", + units="degrees", + ), + searchr_y=dict( + argstr="-searchry %s", + units="degrees", + ), + searchr_z=dict( + argstr="-searchrz %s", + units="degrees", + ), + sinc_width=dict( + argstr="-sincwidth %d", + units="voxels", + ), + sinc_window=dict( + argstr="-sincwindow %s", + ), + uses_qform=dict( + argstr="-usesqform", + ), + verbose=dict( + argstr="-verbose %d", + ), + wm_seg=dict( + argstr="-wmseg %s", + extensions=None, + min_ver="5.0.0", + ), + wmcoords=dict( + argstr="-wmcoords %s", + extensions=None, + min_ver="5.0.0", + ), + wmnorms=dict( + argstr="-wmnorms %s", + extensions=None, + min_ver="5.0.0", + ), ) inputs = FLIRT.input_spec() @@ -85,9 +209,15 @@ def test_FLIRT_inputs(): def test_FLIRT_outputs(): output_map = dict( - out_file=dict(extensions=None), - out_log=dict(extensions=None), - out_matrix_file=dict(extensions=None), + out_file=dict( + extensions=None, + ), + out_log=dict( + extensions=None, + ), + out_matrix_file=dict( + extensions=None, + ), ) outputs = FLIRT.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py index 56a4f518bb..eb6ae1f714 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py @@ -4,57 +4,169 @@ def test_FNIRT_inputs(): input_map = dict( - affine_file=dict(argstr="--aff=%s", extensions=None), - apply_inmask=dict(argstr="--applyinmask=%s", sep=",", xor=["skip_inmask"]), + affine_file=dict( + argstr="--aff=%s", + extensions=None, + ), + apply_inmask=dict( + argstr="--applyinmask=%s", + sep=",", + xor=["skip_inmask"], + ), apply_intensity_mapping=dict( - argstr="--estint=%s", sep=",", xor=["skip_intensity_mapping"] - ), - apply_refmask=dict(argstr="--applyrefmask=%s", sep=",", xor=["skip_refmask"]), - args=dict(argstr="%s"), - bias_regularization_lambda=dict(argstr="--biaslambda=%f"), - biasfield_resolution=dict(argstr="--biasres=%d,%d,%d"), - config_file=dict(argstr="--config=%s"), - derive_from_ref=dict(argstr="--refderiv"), - environ=dict(nohash=True, usedefault=True), - field_file=dict(argstr="--fout=%s", hash_files=False), - fieldcoeff_file=dict(argstr="--cout=%s"), - hessian_precision=dict(argstr="--numprec=%s"), - in_file=dict(argstr="--in=%s", extensions=None, mandatory=True), - in_fwhm=dict(argstr="--infwhm=%s", sep=","), - in_intensitymap_file=dict(argstr="--intin=%s", copyfile=False), - inmask_file=dict(argstr="--inmask=%s", extensions=None), - inmask_val=dict(argstr="--impinval=%f"), - intensity_mapping_model=dict(argstr="--intmod=%s"), - intensity_mapping_order=dict(argstr="--intorder=%d"), - inwarp_file=dict(argstr="--inwarp=%s", extensions=None), - jacobian_file=dict(argstr="--jout=%s", hash_files=False), - jacobian_range=dict(argstr="--jacrange=%f,%f"), + argstr="--estint=%s", + sep=",", + xor=["skip_intensity_mapping"], + ), + apply_refmask=dict( + argstr="--applyrefmask=%s", + sep=",", + xor=["skip_refmask"], + ), + args=dict( + argstr="%s", + ), + bias_regularization_lambda=dict( + argstr="--biaslambda=%f", + ), + biasfield_resolution=dict( + argstr="--biasres=%d,%d,%d", + ), + config_file=dict( + argstr="--config=%s", + ), + derive_from_ref=dict( + argstr="--refderiv", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + field_file=dict( + argstr="--fout=%s", + hash_files=False, + ), + fieldcoeff_file=dict( + argstr="--cout=%s", + ), + hessian_precision=dict( + argstr="--numprec=%s", + ), + in_file=dict( + argstr="--in=%s", + extensions=None, + mandatory=True, + ), + in_fwhm=dict( + argstr="--infwhm=%s", + sep=",", + ), + in_intensitymap_file=dict( + argstr="--intin=%s", + copyfile=False, + ), + inmask_file=dict( + argstr="--inmask=%s", + extensions=None, + ), + inmask_val=dict( + argstr="--impinval=%f", + ), + intensity_mapping_model=dict( + argstr="--intmod=%s", + ), + intensity_mapping_order=dict( + argstr="--intorder=%d", + ), + inwarp_file=dict( + argstr="--inwarp=%s", + extensions=None, + ), + jacobian_file=dict( + argstr="--jout=%s", + hash_files=False, + ), + jacobian_range=dict( + argstr="--jacrange=%f,%f", + ), log_file=dict( - argstr="--logout=%s", extensions=None, genfile=True, hash_files=False + argstr="--logout=%s", + extensions=None, + genfile=True, + hash_files=False, + ), + max_nonlin_iter=dict( + argstr="--miter=%s", + sep=",", + ), + modulatedref_file=dict( + argstr="--refout=%s", + hash_files=False, + ), + out_intensitymap_file=dict( + argstr="--intout=%s", + hash_files=False, ), - max_nonlin_iter=dict(argstr="--miter=%s", sep=","), - modulatedref_file=dict(argstr="--refout=%s", hash_files=False), - out_intensitymap_file=dict(argstr="--intout=%s", hash_files=False), output_type=dict(), - ref_file=dict(argstr="--ref=%s", extensions=None, mandatory=True), - ref_fwhm=dict(argstr="--reffwhm=%s", sep=","), - refmask_file=dict(argstr="--refmask=%s", extensions=None), - refmask_val=dict(argstr="--imprefval=%f"), - regularization_lambda=dict(argstr="--lambda=%s", sep=","), - regularization_model=dict(argstr="--regmod=%s"), - skip_implicit_in_masking=dict(argstr="--impinm=0"), - skip_implicit_ref_masking=dict(argstr="--imprefm=0"), - skip_inmask=dict(argstr="--applyinmask=0", xor=["apply_inmask"]), + ref_file=dict( + argstr="--ref=%s", + extensions=None, + mandatory=True, + ), + ref_fwhm=dict( + argstr="--reffwhm=%s", + sep=",", + ), + refmask_file=dict( + argstr="--refmask=%s", + extensions=None, + ), + refmask_val=dict( + argstr="--imprefval=%f", + ), + regularization_lambda=dict( + argstr="--lambda=%s", + sep=",", + ), + regularization_model=dict( + argstr="--regmod=%s", + ), + skip_implicit_in_masking=dict( + argstr="--impinm=0", + ), + skip_implicit_ref_masking=dict( + argstr="--imprefm=0", + ), + skip_inmask=dict( + argstr="--applyinmask=0", + xor=["apply_inmask"], + ), skip_intensity_mapping=dict( - argstr="--estint=0", xor=["apply_intensity_mapping"] + argstr="--estint=0", + xor=["apply_intensity_mapping"], + ), + skip_lambda_ssq=dict( + argstr="--ssqlambda=0", + ), + skip_refmask=dict( + argstr="--applyrefmask=0", + xor=["apply_refmask"], + ), + spline_order=dict( + argstr="--splineorder=%d", + ), + subsampling_scheme=dict( + argstr="--subsamp=%s", + sep=",", + ), + warp_resolution=dict( + argstr="--warpres=%d,%d,%d", ), - skip_lambda_ssq=dict(argstr="--ssqlambda=0"), - skip_refmask=dict(argstr="--applyrefmask=0", xor=["apply_refmask"]), - spline_order=dict(argstr="--splineorder=%d"), - subsampling_scheme=dict(argstr="--subsamp=%s", sep=","), - warp_resolution=dict(argstr="--warpres=%d,%d,%d"), warped_file=dict( - argstr="--iout=%s", extensions=None, genfile=True, hash_files=False + argstr="--iout=%s", + extensions=None, + genfile=True, + hash_files=False, ), ) inputs = FNIRT.input_spec() @@ -66,13 +178,25 @@ def test_FNIRT_inputs(): def test_FNIRT_outputs(): output_map = dict( - field_file=dict(extensions=None), - fieldcoeff_file=dict(extensions=None), - jacobian_file=dict(extensions=None), - log_file=dict(extensions=None), - modulatedref_file=dict(extensions=None), + field_file=dict( + extensions=None, + ), + fieldcoeff_file=dict( + extensions=None, + ), + jacobian_file=dict( + extensions=None, + ), + log_file=dict( + extensions=None, + ), + modulatedref_file=dict( + extensions=None, + ), out_intensitymap_file=dict(), - warped_file=dict(extensions=None), + warped_file=dict( + extensions=None, + ), ) outputs = FNIRT.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py b/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py index 0fc059c207..1b444c381e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py @@ -4,8 +4,13 @@ def test_FSLCommand_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), output_type=dict(), ) inputs = FSLCommand.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py index 5ecdc4135b..3948f3d650 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py @@ -4,34 +4,107 @@ def test_FSLXCommand_inputs(): input_map = dict( - all_ard=dict(argstr="--allard", xor=("no_ard", "all_ard")), - args=dict(argstr="%s"), - burn_in=dict(argstr="--burnin=%d", usedefault=True), - burn_in_no_ard=dict(argstr="--burnin_noard=%d", usedefault=True), - bvals=dict(argstr="--bvals=%s", extensions=None, mandatory=True), - bvecs=dict(argstr="--bvecs=%s", extensions=None, mandatory=True), - cnlinear=dict(argstr="--cnonlinear", xor=("no_spat", "non_linear", "cnlinear")), - dwi=dict(argstr="--data=%s", extensions=None, mandatory=True), - environ=dict(nohash=True, usedefault=True), - f0_ard=dict(argstr="--f0 --ardf0", xor=["f0_noard", "f0_ard", "all_ard"]), - f0_noard=dict(argstr="--f0", xor=["f0_noard", "f0_ard"]), - force_dir=dict(argstr="--forcedir", usedefault=True), - fudge=dict(argstr="--fudge=%d"), - logdir=dict(argstr="--logdir=%s", usedefault=True), - mask=dict(argstr="--mask=%s", extensions=None, mandatory=True), - model=dict(argstr="--model=%d"), - n_fibres=dict(argstr="--nfibres=%d", mandatory=True, usedefault=True), - n_jumps=dict(argstr="--njumps=%d", usedefault=True), - no_ard=dict(argstr="--noard", xor=("no_ard", "all_ard")), - no_spat=dict(argstr="--nospat", xor=("no_spat", "non_linear", "cnlinear")), + all_ard=dict( + argstr="--allard", + xor=("no_ard", "all_ard"), + ), + args=dict( + argstr="%s", + ), + burn_in=dict( + argstr="--burnin=%d", + usedefault=True, + ), + burn_in_no_ard=dict( + argstr="--burnin_noard=%d", + usedefault=True, + ), + bvals=dict( + argstr="--bvals=%s", + extensions=None, + mandatory=True, + ), + bvecs=dict( + argstr="--bvecs=%s", + extensions=None, + mandatory=True, + ), + cnlinear=dict( + argstr="--cnonlinear", + xor=("no_spat", "non_linear", "cnlinear"), + ), + dwi=dict( + argstr="--data=%s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + f0_ard=dict( + argstr="--f0 --ardf0", + xor=["f0_noard", "f0_ard", "all_ard"], + ), + f0_noard=dict( + argstr="--f0", + xor=["f0_noard", "f0_ard"], + ), + force_dir=dict( + argstr="--forcedir", + usedefault=True, + ), + fudge=dict( + argstr="--fudge=%d", + ), + logdir=dict( + argstr="--logdir=%s", + usedefault=True, + ), + mask=dict( + argstr="--mask=%s", + extensions=None, + mandatory=True, + ), + model=dict( + argstr="--model=%d", + ), + n_fibres=dict( + argstr="--nfibres=%d", + mandatory=True, + usedefault=True, + ), + n_jumps=dict( + argstr="--njumps=%d", + usedefault=True, + ), + no_ard=dict( + argstr="--noard", + xor=("no_ard", "all_ard"), + ), + no_spat=dict( + argstr="--nospat", + xor=("no_spat", "non_linear", "cnlinear"), + ), non_linear=dict( - argstr="--nonlinear", xor=("no_spat", "non_linear", "cnlinear") + argstr="--nonlinear", + xor=("no_spat", "non_linear", "cnlinear"), ), output_type=dict(), - rician=dict(argstr="--rician"), - sample_every=dict(argstr="--sampleevery=%d", usedefault=True), - seed=dict(argstr="--seed=%d"), - update_proposal_every=dict(argstr="--updateproposalevery=%d", usedefault=True), + rician=dict( + argstr="--rician", + ), + sample_every=dict( + argstr="--sampleevery=%d", + usedefault=True, + ), + seed=dict( + argstr="--seed=%d", + ), + update_proposal_every=dict( + argstr="--updateproposalevery=%d", + usedefault=True, + ), ) inputs = FSLXCommand.input_spec() @@ -44,10 +117,16 @@ def test_FSLXCommand_outputs(): output_map = dict( dyads=dict(), fsamples=dict(), - mean_S0samples=dict(extensions=None), - mean_dsamples=dict(extensions=None), + mean_S0samples=dict( + extensions=None, + ), + mean_dsamples=dict( + extensions=None, + ), mean_fsamples=dict(), - mean_tausamples=dict(extensions=None), + mean_tausamples=dict( + extensions=None, + ), phsamples=dict(), thsamples=dict(), ) diff --git a/nipype/interfaces/fsl/tests/test_auto_FUGUE.py b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py index b33d65309a..841bb2021f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FUGUE.py +++ b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py @@ -4,39 +4,115 @@ def test_FUGUE_inputs(): input_map = dict( - args=dict(argstr="%s"), - asym_se_time=dict(argstr="--asym=%.10f"), - despike_2dfilter=dict(argstr="--despike"), - despike_threshold=dict(argstr="--despikethreshold=%s"), - dwell_time=dict(argstr="--dwell=%.10f"), - dwell_to_asym_ratio=dict(argstr="--dwelltoasym=%.10f"), - environ=dict(nohash=True, usedefault=True), - fmap_in_file=dict(argstr="--loadfmap=%s", extensions=None), - fmap_out_file=dict(argstr="--savefmap=%s", extensions=None), - forward_warping=dict(usedefault=True), - fourier_order=dict(argstr="--fourier=%d"), - icorr=dict(argstr="--icorr", requires=["shift_in_file"]), - icorr_only=dict(argstr="--icorronly", requires=["unwarped_file"]), - in_file=dict(argstr="--in=%s", extensions=None), - mask_file=dict(argstr="--mask=%s", extensions=None), - median_2dfilter=dict(argstr="--median"), - no_extend=dict(argstr="--noextend"), - no_gap_fill=dict(argstr="--nofill"), - nokspace=dict(argstr="--nokspace"), + args=dict( + argstr="%s", + ), + asym_se_time=dict( + argstr="--asym=%.10f", + ), + despike_2dfilter=dict( + argstr="--despike", + ), + despike_threshold=dict( + argstr="--despikethreshold=%s", + ), + dwell_time=dict( + argstr="--dwell=%.10f", + ), + dwell_to_asym_ratio=dict( + argstr="--dwelltoasym=%.10f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fmap_in_file=dict( + argstr="--loadfmap=%s", + extensions=None, + ), + fmap_out_file=dict( + argstr="--savefmap=%s", + extensions=None, + ), + forward_warping=dict( + usedefault=True, + ), + fourier_order=dict( + argstr="--fourier=%d", + ), + icorr=dict( + argstr="--icorr", + requires=["shift_in_file"], + ), + icorr_only=dict( + argstr="--icorronly", + requires=["unwarped_file"], + ), + in_file=dict( + argstr="--in=%s", + extensions=None, + ), + mask_file=dict( + argstr="--mask=%s", + extensions=None, + ), + median_2dfilter=dict( + argstr="--median", + ), + no_extend=dict( + argstr="--noextend", + ), + no_gap_fill=dict( + argstr="--nofill", + ), + nokspace=dict( + argstr="--nokspace", + ), output_type=dict(), - pava=dict(argstr="--pava"), - phase_conjugate=dict(argstr="--phaseconj"), - phasemap_in_file=dict(argstr="--phasemap=%s", extensions=None), - poly_order=dict(argstr="--poly=%d"), - save_fmap=dict(xor=["save_unmasked_fmap"]), - save_shift=dict(xor=["save_unmasked_shift"]), - save_unmasked_fmap=dict(argstr="--unmaskfmap", xor=["save_fmap"]), - save_unmasked_shift=dict(argstr="--unmaskshift", xor=["save_shift"]), - shift_in_file=dict(argstr="--loadshift=%s", extensions=None), - shift_out_file=dict(argstr="--saveshift=%s", extensions=None), - smooth2d=dict(argstr="--smooth2=%.2f"), - smooth3d=dict(argstr="--smooth3=%.2f"), - unwarp_direction=dict(argstr="--unwarpdir=%s"), + pava=dict( + argstr="--pava", + ), + phase_conjugate=dict( + argstr="--phaseconj", + ), + phasemap_in_file=dict( + argstr="--phasemap=%s", + extensions=None, + ), + poly_order=dict( + argstr="--poly=%d", + ), + save_fmap=dict( + xor=["save_unmasked_fmap"], + ), + save_shift=dict( + xor=["save_unmasked_shift"], + ), + save_unmasked_fmap=dict( + argstr="--unmaskfmap", + xor=["save_fmap"], + ), + save_unmasked_shift=dict( + argstr="--unmaskshift", + xor=["save_shift"], + ), + shift_in_file=dict( + argstr="--loadshift=%s", + extensions=None, + ), + shift_out_file=dict( + argstr="--saveshift=%s", + extensions=None, + ), + smooth2d=dict( + argstr="--smooth2=%.2f", + ), + smooth3d=dict( + argstr="--smooth3=%.2f", + ), + unwarp_direction=dict( + argstr="--unwarpdir=%s", + ), unwarped_file=dict( argstr="--unwarp=%s", extensions=None, @@ -59,10 +135,18 @@ def test_FUGUE_inputs(): def test_FUGUE_outputs(): output_map = dict( - fmap_out_file=dict(extensions=None), - shift_out_file=dict(extensions=None), - unwarped_file=dict(extensions=None), - warped_file=dict(extensions=None), + fmap_out_file=dict( + extensions=None, + ), + shift_out_file=dict( + extensions=None, + ), + unwarped_file=dict( + extensions=None, + ), + warped_file=dict( + extensions=None, + ), ) outputs = FUGUE.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py index a759c16802..8531fe17c4 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py +++ b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py @@ -4,9 +4,18 @@ def test_FeatureExtractor_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - mel_ica=dict(argstr="%s", copyfile=False, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + mel_ica=dict( + argstr="%s", + copyfile=False, + position=-1, + ), ) inputs = FeatureExtractor.input_spec() @@ -16,7 +25,13 @@ def test_FeatureExtractor_inputs(): def test_FeatureExtractor_outputs(): - output_map = dict(mel_ica=dict(argstr="%s", copyfile=False, position=-1)) + output_map = dict( + mel_ica=dict( + argstr="%s", + copyfile=False, + position=-1, + ), + ) outputs = FeatureExtractor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py index ef5b798656..e4826db355 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py +++ b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py @@ -4,23 +4,55 @@ def test_FilterRegressor_inputs(): input_map = dict( - args=dict(argstr="%s"), - design_file=dict(argstr="-d %s", extensions=None, mandatory=True, position=3), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + design_file=dict( + argstr="-d %s", + extensions=None, + mandatory=True, + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), filter_all=dict( - argstr="-f '%s'", mandatory=True, position=4, xor=["filter_columns"] + argstr="-f '%s'", + mandatory=True, + position=4, + xor=["filter_columns"], ), filter_columns=dict( - argstr="-f '%s'", mandatory=True, position=4, xor=["filter_all"] + argstr="-f '%s'", + mandatory=True, + position=4, + xor=["filter_all"], + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=1, + ), + mask=dict( + argstr="-m %s", + extensions=None, ), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1), - mask=dict(argstr="-m %s", extensions=None), out_file=dict( - argstr="-o %s", extensions=None, genfile=True, hash_files=False, position=2 + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, + position=2, + ), + out_vnscales=dict( + argstr="--out_vnscales", ), - out_vnscales=dict(argstr="--out_vnscales"), output_type=dict(), - var_norm=dict(argstr="--vn"), + var_norm=dict( + argstr="--vn", + ), ) inputs = FilterRegressor.input_spec() @@ -30,7 +62,11 @@ def test_FilterRegressor_inputs(): def test_FilterRegressor_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = FilterRegressor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py index c11acc3dbc..14b3bbb8da 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py +++ b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py @@ -4,11 +4,24 @@ def test_FindTheBiggest_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_files=dict(argstr="%s", mandatory=True, position=0), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=0, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=2, ), output_type=dict(), ) @@ -20,7 +33,12 @@ def test_FindTheBiggest_inputs(): def test_FindTheBiggest_outputs(): - output_map = dict(out_file=dict(argstr="%s", extensions=None)) + output_map = dict( + out_file=dict( + argstr="%s", + extensions=None, + ), + ) outputs = FindTheBiggest.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_GLM.py b/nipype/interfaces/fsl/tests/test_auto_GLM.py index c9b7accb30..63105f128d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_GLM.py +++ b/nipype/interfaces/fsl/tests/test_auto_GLM.py @@ -4,19 +4,57 @@ def test_GLM_inputs(): input_map = dict( - args=dict(argstr="%s"), - contrasts=dict(argstr="-c %s", extensions=None), - dat_norm=dict(argstr="--dat_norm"), - demean=dict(argstr="--demean"), - des_norm=dict(argstr="--des_norm"), - design=dict(argstr="-d %s", extensions=None, mandatory=True, position=2), - dof=dict(argstr="--dof=%d"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1), - mask=dict(argstr="-m %s", extensions=None), - out_cope=dict(argstr="--out_cope=%s", extensions=None), - out_data_name=dict(argstr="--out_data=%s", extensions=None), - out_f_name=dict(argstr="--out_f=%s", extensions=None), + args=dict( + argstr="%s", + ), + contrasts=dict( + argstr="-c %s", + extensions=None, + ), + dat_norm=dict( + argstr="--dat_norm", + ), + demean=dict( + argstr="--demean", + ), + des_norm=dict( + argstr="--des_norm", + ), + design=dict( + argstr="-d %s", + extensions=None, + mandatory=True, + position=2, + ), + dof=dict( + argstr="--dof=%d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=1, + ), + mask=dict( + argstr="-m %s", + extensions=None, + ), + out_cope=dict( + argstr="--out_cope=%s", + extensions=None, + ), + out_data_name=dict( + argstr="--out_data=%s", + extensions=None, + ), + out_f_name=dict( + argstr="--out_f=%s", + extensions=None, + ), out_file=dict( argstr="-o %s", extensions=None, @@ -25,16 +63,42 @@ def test_GLM_inputs(): name_template="%s_glm", position=3, ), - out_p_name=dict(argstr="--out_p=%s", extensions=None), - out_pf_name=dict(argstr="--out_pf=%s", extensions=None), - out_res_name=dict(argstr="--out_res=%s", extensions=None), - out_sigsq_name=dict(argstr="--out_sigsq=%s", extensions=None), - out_t_name=dict(argstr="--out_t=%s", extensions=None), - out_varcb_name=dict(argstr="--out_varcb=%s", extensions=None), - out_vnscales_name=dict(argstr="--out_vnscales=%s", extensions=None), - out_z_name=dict(argstr="--out_z=%s", extensions=None), + out_p_name=dict( + argstr="--out_p=%s", + extensions=None, + ), + out_pf_name=dict( + argstr="--out_pf=%s", + extensions=None, + ), + out_res_name=dict( + argstr="--out_res=%s", + extensions=None, + ), + out_sigsq_name=dict( + argstr="--out_sigsq=%s", + extensions=None, + ), + out_t_name=dict( + argstr="--out_t=%s", + extensions=None, + ), + out_varcb_name=dict( + argstr="--out_varcb=%s", + extensions=None, + ), + out_vnscales_name=dict( + argstr="--out_vnscales=%s", + extensions=None, + ), + out_z_name=dict( + argstr="--out_z=%s", + extensions=None, + ), output_type=dict(), - var_norm=dict(argstr="--vn"), + var_norm=dict( + argstr="--vn", + ), ) inputs = GLM.input_spec() @@ -48,7 +112,9 @@ def test_GLM_outputs(): out_cope=dict(), out_data=dict(), out_f=dict(), - out_file=dict(extensions=None), + out_file=dict( + extensions=None, + ), out_p=dict(), out_pf=dict(), out_res=dict(), diff --git a/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py index 44dce0bf50..b49813e24d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py +++ b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py @@ -4,25 +4,64 @@ def test_ICA_AROMA_inputs(): input_map = dict( - TR=dict(argstr="-tr %.3f"), - args=dict(argstr="%s"), - denoise_type=dict(argstr="-den %s", mandatory=True, usedefault=True), - dim=dict(argstr="-dim %d"), - environ=dict(nohash=True, usedefault=True), + TR=dict( + argstr="-tr %.3f", + ), + args=dict( + argstr="%s", + ), + denoise_type=dict( + argstr="-den %s", + mandatory=True, + usedefault=True, + ), + dim=dict( + argstr="-dim %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), feat_dir=dict( argstr="-feat %s", mandatory=True, xor=["in_file", "mat_file", "fnirt_warp_file", "motion_parameters"], ), - fnirt_warp_file=dict(argstr="-warp %s", extensions=None, xor=["feat_dir"]), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, xor=["feat_dir"]), - mask=dict(argstr="-m %s", extensions=None, xor=["feat_dir"]), - mat_file=dict(argstr="-affmat %s", extensions=None, xor=["feat_dir"]), - melodic_dir=dict(argstr="-meldir %s"), + fnirt_warp_file=dict( + argstr="-warp %s", + extensions=None, + xor=["feat_dir"], + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + xor=["feat_dir"], + ), + mask=dict( + argstr="-m %s", + extensions=None, + xor=["feat_dir"], + ), + mat_file=dict( + argstr="-affmat %s", + extensions=None, + xor=["feat_dir"], + ), + melodic_dir=dict( + argstr="-meldir %s", + ), motion_parameters=dict( - argstr="-mc %s", extensions=None, mandatory=True, xor=["feat_dir"] + argstr="-mc %s", + extensions=None, + mandatory=True, + xor=["feat_dir"], + ), + out_dir=dict( + argstr="-o %s", + mandatory=True, + usedefault=True, ), - out_dir=dict(argstr="-o %s", mandatory=True, usedefault=True), ) inputs = ICA_AROMA.input_spec() @@ -33,8 +72,12 @@ def test_ICA_AROMA_inputs(): def test_ICA_AROMA_outputs(): output_map = dict( - aggr_denoised_file=dict(extensions=None), - nonaggr_denoised_file=dict(extensions=None), + aggr_denoised_file=dict( + extensions=None, + ), + nonaggr_denoised_file=dict( + extensions=None, + ), out_dir=dict(), ) outputs = ICA_AROMA.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py index dcd3829f03..d2c4737d65 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py @@ -4,15 +4,42 @@ def test_ImageMaths_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), - in_file2=dict(argstr="%s", extensions=None, position=3), - mask_file=dict(argstr="-mas %s", extensions=None), - op_string=dict(argstr="%s", position=2), - out_data_type=dict(argstr="-odt %s", position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + in_file2=dict( + argstr="%s", + extensions=None, + position=3, + ), + mask_file=dict( + argstr="-mas %s", + extensions=None, + ), + op_string=dict( + argstr="%s", + position=2, + ), + out_data_type=dict( + argstr="-odt %s", + position=-1, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, ), output_type=dict(), suffix=dict(), @@ -25,7 +52,11 @@ def test_ImageMaths_inputs(): def test_ImageMaths_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ImageMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py index 60846b9c3a..b050d8f50b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py @@ -4,19 +4,52 @@ def test_ImageMeants_inputs(): input_map = dict( - args=dict(argstr="%s"), - eig=dict(argstr="--eig"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0), - mask=dict(argstr="-m %s", extensions=None), - nobin=dict(argstr="--no_bin"), - order=dict(argstr="--order=%d", usedefault=True), - out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False), + args=dict( + argstr="%s", + ), + eig=dict( + argstr="--eig", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=0, + ), + mask=dict( + argstr="-m %s", + extensions=None, + ), + nobin=dict( + argstr="--no_bin", + ), + order=dict( + argstr="--order=%d", + usedefault=True, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, + ), output_type=dict(), - show_all=dict(argstr="--showall"), - spatial_coord=dict(argstr="-c %s"), - transpose=dict(argstr="--transpose"), - use_mm=dict(argstr="--usemm"), + show_all=dict( + argstr="--showall", + ), + spatial_coord=dict( + argstr="-c %s", + ), + transpose=dict( + argstr="--transpose", + ), + use_mm=dict( + argstr="--usemm", + ), ) inputs = ImageMeants.input_spec() @@ -26,7 +59,11 @@ def test_ImageMeants_inputs(): def test_ImageMeants_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ImageMeants.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py index 54e44b2b61..e4ddf5f06d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py @@ -4,14 +4,38 @@ def test_ImageStats_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=3), - index_mask_file=dict(argstr="-K %s", extensions=None, position=2), - mask_file=dict(argstr="", extensions=None), - op_string=dict(argstr="%s", mandatory=True, position=4), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=3, + ), + index_mask_file=dict( + argstr="-K %s", + extensions=None, + position=2, + ), + mask_file=dict( + argstr="", + extensions=None, + ), + op_string=dict( + argstr="%s", + mandatory=True, + position=4, + ), output_type=dict(), - split_4d=dict(argstr="-t", position=1), + split_4d=dict( + argstr="-t", + position=1, + ), ) inputs = ImageStats.input_spec() @@ -21,7 +45,9 @@ def test_ImageStats_inputs(): def test_ImageStats_outputs(): - output_map = dict(out_stat=dict()) + output_map = dict( + out_stat=dict(), + ) outputs = ImageStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_InvWarp.py b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py index 688e46da70..1dba5e578a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_InvWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py @@ -4,9 +4,17 @@ def test_InvWarp_inputs(): input_map = dict( - absolute=dict(argstr="--abs", xor=["relative"]), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + absolute=dict( + argstr="--abs", + xor=["relative"], + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inverse_warp=dict( argstr="--out=%s", extensions=None, @@ -14,15 +22,36 @@ def test_InvWarp_inputs(): name_source=["warp"], name_template="%s_inverse", ), - jacobian_max=dict(argstr="--jmax=%f"), - jacobian_min=dict(argstr="--jmin=%f"), - niter=dict(argstr="--niter=%d"), - noconstraint=dict(argstr="--noconstraint"), + jacobian_max=dict( + argstr="--jmax=%f", + ), + jacobian_min=dict( + argstr="--jmin=%f", + ), + niter=dict( + argstr="--niter=%d", + ), + noconstraint=dict( + argstr="--noconstraint", + ), output_type=dict(), - reference=dict(argstr="--ref=%s", extensions=None, mandatory=True), - regularise=dict(argstr="--regularise=%f"), - relative=dict(argstr="--rel", xor=["absolute"]), - warp=dict(argstr="--warp=%s", extensions=None, mandatory=True), + reference=dict( + argstr="--ref=%s", + extensions=None, + mandatory=True, + ), + regularise=dict( + argstr="--regularise=%f", + ), + relative=dict( + argstr="--rel", + xor=["absolute"], + ), + warp=dict( + argstr="--warp=%s", + extensions=None, + mandatory=True, + ), ) inputs = InvWarp.input_spec() @@ -32,7 +61,11 @@ def test_InvWarp_inputs(): def test_InvWarp_outputs(): - output_map = dict(inverse_warp=dict(extensions=None)) + output_map = dict( + inverse_warp=dict( + extensions=None, + ), + ) outputs = InvWarp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py index 0597d182dd..f9c5432d40 100644 --- a/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py +++ b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py @@ -4,18 +4,51 @@ def test_IsotropicSmooth_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fwhm=dict(argstr="-s %.5f", mandatory=True, position=4, xor=["sigma"]), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm=dict( + argstr="-s %.5f", + mandatory=True, + position=4, + xor=["sigma"], + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), - sigma=dict(argstr="-s %.5f", mandatory=True, position=4, xor=["fwhm"]), + sigma=dict( + argstr="-s %.5f", + mandatory=True, + position=4, + xor=["fwhm"], + ), ) inputs = IsotropicSmooth.input_spec() @@ -25,7 +58,11 @@ def test_IsotropicSmooth_inputs(): def test_IsotropicSmooth_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = IsotropicSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_L2Model.py b/nipype/interfaces/fsl/tests/test_auto_L2Model.py index daa465ea3a..c4547fc7a2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_L2Model.py +++ b/nipype/interfaces/fsl/tests/test_auto_L2Model.py @@ -3,7 +3,11 @@ def test_L2Model_inputs(): - input_map = dict(num_copes=dict(mandatory=True)) + input_map = dict( + num_copes=dict( + mandatory=True, + ), + ) inputs = L2Model.input_spec() for key, metadata in list(input_map.items()): @@ -13,9 +17,15 @@ def test_L2Model_inputs(): def test_L2Model_outputs(): output_map = dict( - design_con=dict(extensions=None), - design_grp=dict(extensions=None), - design_mat=dict(extensions=None), + design_con=dict( + extensions=None, + ), + design_grp=dict( + extensions=None, + ), + design_mat=dict( + extensions=None, + ), ) outputs = L2Model.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Level1Design.py b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py index c941ef0687..5a43989601 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Level1Design.py +++ b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py @@ -4,12 +4,22 @@ def test_Level1Design_inputs(): input_map = dict( - bases=dict(mandatory=True), + bases=dict( + mandatory=True, + ), contrasts=dict(), - interscan_interval=dict(mandatory=True), - model_serial_correlations=dict(mandatory=True), - orthogonalization=dict(usedefault=True), - session_info=dict(mandatory=True), + interscan_interval=dict( + mandatory=True, + ), + model_serial_correlations=dict( + mandatory=True, + ), + orthogonalization=dict( + usedefault=True, + ), + session_info=dict( + mandatory=True, + ), ) inputs = Level1Design.input_spec() @@ -19,7 +29,10 @@ def test_Level1Design_inputs(): def test_Level1Design_outputs(): - output_map = dict(ev_files=dict(), fsf_files=dict()) + output_map = dict( + ev_files=dict(), + fsf_files=dict(), + ) outputs = Level1Design.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py index 98f38d7186..768c52a7f4 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py @@ -4,31 +4,82 @@ def test_MCFLIRT_inputs(): input_map = dict( - args=dict(argstr="%s"), - bins=dict(argstr="-bins %d"), - cost=dict(argstr="-cost %s"), - dof=dict(argstr="-dof %d"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=0), - init=dict(argstr="-init %s", extensions=None), - interpolation=dict(argstr="-%s_final"), - mean_vol=dict(argstr="-meanvol"), + args=dict( + argstr="%s", + ), + bins=dict( + argstr="-bins %d", + ), + cost=dict( + argstr="-cost %s", + ), + dof=dict( + argstr="-dof %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + position=0, + ), + init=dict( + argstr="-init %s", + extensions=None, + ), + interpolation=dict( + argstr="-%s_final", + ), + mean_vol=dict( + argstr="-meanvol", + ), out_file=dict( - argstr="-out %s", extensions=None, genfile=True, hash_files=False + argstr="-out %s", + extensions=None, + genfile=True, + hash_files=False, ), output_type=dict(), - ref_file=dict(argstr="-reffile %s", extensions=None), - ref_vol=dict(argstr="-refvol %d"), - rotation=dict(argstr="-rotation %d"), - save_mats=dict(argstr="-mats"), - save_plots=dict(argstr="-plots"), - save_rms=dict(argstr="-rmsabs -rmsrel"), - scaling=dict(argstr="-scaling %.2f"), - smooth=dict(argstr="-smooth %.2f"), - stages=dict(argstr="-stages %d"), - stats_imgs=dict(argstr="-stats"), - use_contour=dict(argstr="-edge"), - use_gradient=dict(argstr="-gdt"), + ref_file=dict( + argstr="-reffile %s", + extensions=None, + ), + ref_vol=dict( + argstr="-refvol %d", + ), + rotation=dict( + argstr="-rotation %d", + ), + save_mats=dict( + argstr="-mats", + ), + save_plots=dict( + argstr="-plots", + ), + save_rms=dict( + argstr="-rmsabs -rmsrel", + ), + scaling=dict( + argstr="-scaling %.2f", + ), + smooth=dict( + argstr="-smooth %.2f", + ), + stages=dict( + argstr="-stages %d", + ), + stats_imgs=dict( + argstr="-stats", + ), + use_contour=dict( + argstr="-edge", + ), + use_gradient=dict( + argstr="-gdt", + ), ) inputs = MCFLIRT.input_spec() @@ -40,12 +91,22 @@ def test_MCFLIRT_inputs(): def test_MCFLIRT_outputs(): output_map = dict( mat_file=dict(), - mean_img=dict(extensions=None), - out_file=dict(extensions=None), - par_file=dict(extensions=None), + mean_img=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + par_file=dict( + extensions=None, + ), rms_files=dict(), - std_img=dict(extensions=None), - variance_img=dict(extensions=None), + std_img=dict( + extensions=None, + ), + variance_img=dict( + extensions=None, + ), ) outputs = MCFLIRT.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MELODIC.py b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py index 64fe88bcdd..db2406e30f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MELODIC.py +++ b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py @@ -4,57 +4,171 @@ def test_MELODIC_inputs(): input_map = dict( - ICs=dict(argstr="--ICs=%s", extensions=None), - approach=dict(argstr="-a %s"), - args=dict(argstr="%s"), - bg_image=dict(argstr="--bgimage=%s", extensions=None), - bg_threshold=dict(argstr="--bgthreshold=%f"), - cov_weight=dict(argstr="--covarweight=%f"), - dim=dict(argstr="-d %d"), - dim_est=dict(argstr="--dimest=%s"), - environ=dict(nohash=True, usedefault=True), - epsilon=dict(argstr="--eps=%f"), - epsilonS=dict(argstr="--epsS=%f"), - in_files=dict(argstr="-i %s", mandatory=True, position=0, sep=","), - log_power=dict(argstr="--logPower"), - mask=dict(argstr="-m %s", extensions=None), - max_restart=dict(argstr="--maxrestart=%d"), - maxit=dict(argstr="--maxit=%d"), - migp=dict(argstr="--migp"), - migpN=dict(argstr="--migpN %d"), - migp_factor=dict(argstr="--migp_factor %d"), - migp_shuffle=dict(argstr="--migp_shuffle"), - mix=dict(argstr="--mix=%s", extensions=None), - mm_thresh=dict(argstr="--mmthresh=%f"), - no_bet=dict(argstr="--nobet"), - no_mask=dict(argstr="--nomask"), - no_mm=dict(argstr="--no_mm"), - non_linearity=dict(argstr="--nl=%s"), - num_ICs=dict(argstr="-n %d"), - out_all=dict(argstr="--Oall"), - out_dir=dict(argstr="-o %s", genfile=True), - out_mean=dict(argstr="--Omean"), - out_orig=dict(argstr="--Oorig"), - out_pca=dict(argstr="--Opca"), - out_stats=dict(argstr="--Ostats"), - out_unmix=dict(argstr="--Ounmix"), - out_white=dict(argstr="--Owhite"), + ICs=dict( + argstr="--ICs=%s", + extensions=None, + ), + approach=dict( + argstr="-a %s", + ), + args=dict( + argstr="%s", + ), + bg_image=dict( + argstr="--bgimage=%s", + extensions=None, + ), + bg_threshold=dict( + argstr="--bgthreshold=%f", + ), + cov_weight=dict( + argstr="--covarweight=%f", + ), + dim=dict( + argstr="-d %d", + ), + dim_est=dict( + argstr="--dimest=%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + epsilon=dict( + argstr="--eps=%f", + ), + epsilonS=dict( + argstr="--epsS=%f", + ), + in_files=dict( + argstr="-i %s", + mandatory=True, + position=0, + sep=",", + ), + log_power=dict( + argstr="--logPower", + ), + mask=dict( + argstr="-m %s", + extensions=None, + ), + max_restart=dict( + argstr="--maxrestart=%d", + ), + maxit=dict( + argstr="--maxit=%d", + ), + migp=dict( + argstr="--migp", + ), + migpN=dict( + argstr="--migpN %d", + ), + migp_factor=dict( + argstr="--migp_factor %d", + ), + migp_shuffle=dict( + argstr="--migp_shuffle", + ), + mix=dict( + argstr="--mix=%s", + extensions=None, + ), + mm_thresh=dict( + argstr="--mmthresh=%f", + ), + no_bet=dict( + argstr="--nobet", + ), + no_mask=dict( + argstr="--nomask", + ), + no_mm=dict( + argstr="--no_mm", + ), + non_linearity=dict( + argstr="--nl=%s", + ), + num_ICs=dict( + argstr="-n %d", + ), + out_all=dict( + argstr="--Oall", + ), + out_dir=dict( + argstr="-o %s", + genfile=True, + ), + out_mean=dict( + argstr="--Omean", + ), + out_orig=dict( + argstr="--Oorig", + ), + out_pca=dict( + argstr="--Opca", + ), + out_stats=dict( + argstr="--Ostats", + ), + out_unmix=dict( + argstr="--Ounmix", + ), + out_white=dict( + argstr="--Owhite", + ), output_type=dict(), - pbsc=dict(argstr="--pbsc"), - rem_cmp=dict(argstr="-f %d"), - remove_deriv=dict(argstr="--remove_deriv"), - report=dict(argstr="--report"), - report_maps=dict(argstr="--report_maps=%s"), - s_con=dict(argstr="--Scon=%s", extensions=None), - s_des=dict(argstr="--Sdes=%s", extensions=None), - sep_vn=dict(argstr="--sep_vn"), - sep_whiten=dict(argstr="--sep_whiten"), - smode=dict(argstr="--smode=%s", extensions=None), - t_con=dict(argstr="--Tcon=%s", extensions=None), - t_des=dict(argstr="--Tdes=%s", extensions=None), - tr_sec=dict(argstr="--tr=%f"), - update_mask=dict(argstr="--update_mask"), - var_norm=dict(argstr="--vn"), + pbsc=dict( + argstr="--pbsc", + ), + rem_cmp=dict( + argstr="-f %d", + ), + remove_deriv=dict( + argstr="--remove_deriv", + ), + report=dict( + argstr="--report", + ), + report_maps=dict( + argstr="--report_maps=%s", + ), + s_con=dict( + argstr="--Scon=%s", + extensions=None, + ), + s_des=dict( + argstr="--Sdes=%s", + extensions=None, + ), + sep_vn=dict( + argstr="--sep_vn", + ), + sep_whiten=dict( + argstr="--sep_whiten", + ), + smode=dict( + argstr="--smode=%s", + extensions=None, + ), + t_con=dict( + argstr="--Tcon=%s", + extensions=None, + ), + t_des=dict( + argstr="--Tdes=%s", + extensions=None, + ), + tr_sec=dict( + argstr="--tr=%f", + ), + update_mask=dict( + argstr="--update_mask", + ), + var_norm=dict( + argstr="--vn", + ), ) inputs = MELODIC.input_spec() @@ -64,7 +178,10 @@ def test_MELODIC_inputs(): def test_MELODIC_outputs(): - output_map = dict(out_dir=dict(), report_dir=dict()) + output_map = dict( + out_dir=dict(), + report_dir=dict(), + ) outputs = MELODIC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py index 95172667f4..bfdb32146e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py +++ b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py @@ -4,16 +4,42 @@ def test_MakeDyadicVectors_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - mask=dict(argstr="%s", extensions=None, position=2), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + mask=dict( + argstr="%s", + extensions=None, + position=2, + ), output=dict( - argstr="%s", extensions=None, hash_files=False, position=3, usedefault=True + argstr="%s", + extensions=None, + hash_files=False, + position=3, + usedefault=True, ), output_type=dict(), - perc=dict(argstr="%f", position=4), - phi_vol=dict(argstr="%s", extensions=None, mandatory=True, position=1), - theta_vol=dict(argstr="%s", extensions=None, mandatory=True, position=0), + perc=dict( + argstr="%f", + position=4, + ), + phi_vol=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + theta_vol=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), ) inputs = MakeDyadicVectors.input_spec() @@ -23,7 +49,14 @@ def test_MakeDyadicVectors_inputs(): def test_MakeDyadicVectors_outputs(): - output_map = dict(dispersion=dict(extensions=None), dyads=dict(extensions=None)) + output_map = dict( + dispersion=dict( + extensions=None, + ), + dyads=dict( + extensions=None, + ), + ) outputs = MakeDyadicVectors.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py index 9cab619ecd..e14e4a4005 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py @@ -4,15 +4,38 @@ def test_MathsCommand_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = MathsCommand.input_spec() @@ -23,7 +46,11 @@ def test_MathsCommand_inputs(): def test_MathsCommand_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MathsCommand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py index 8ce0e9b7df..f96f931fcf 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MaxImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py @@ -4,16 +4,43 @@ def test_MaxImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="-%smax", position=4, usedefault=True), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%smax", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = MaxImage.input_spec() @@ -24,7 +51,11 @@ def test_MaxImage_inputs(): def test_MaxImage_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MaxImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py index 85f251bb15..30ada25d79 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py @@ -4,16 +4,43 @@ def test_MaxnImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="-%smaxn", position=4, usedefault=True), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%smaxn", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = MaxnImage.input_spec() @@ -24,7 +51,11 @@ def test_MaxnImage_inputs(): def test_MaxnImage_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MaxnImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MeanImage.py b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py index 5affe0d8f0..e29104476c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MeanImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py @@ -4,16 +4,43 @@ def test_MeanImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="-%smean", position=4, usedefault=True), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%smean", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = MeanImage.input_spec() @@ -24,7 +51,11 @@ def test_MeanImage_inputs(): def test_MeanImage_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MeanImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MedianImage.py b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py index 89b4fa63cd..7c8052fd31 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MedianImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py @@ -4,16 +4,43 @@ def test_MedianImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="-%smedian", position=4, usedefault=True), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%smedian", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = MedianImage.input_spec() @@ -24,7 +51,11 @@ def test_MedianImage_inputs(): def test_MedianImage_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MedianImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Merge.py b/nipype/interfaces/fsl/tests/test_auto_Merge.py index 5b5db8d3c6..847f9b7bd3 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Merge.py +++ b/nipype/interfaces/fsl/tests/test_auto_Merge.py @@ -4,10 +4,23 @@ def test_Merge_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="-%s", mandatory=True, position=0), - environ=dict(nohash=True, usedefault=True), - in_files=dict(argstr="%s", mandatory=True, position=2), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%s", + mandatory=True, + position=0, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=2, + ), merged_file=dict( argstr="%s", extensions=None, @@ -17,7 +30,10 @@ def test_Merge_inputs(): position=1, ), output_type=dict(), - tr=dict(argstr="%.2f", position=-1), + tr=dict( + argstr="%.2f", + position=-1, + ), ) inputs = Merge.input_spec() @@ -27,7 +43,11 @@ def test_Merge_inputs(): def test_Merge_outputs(): - output_map = dict(merged_file=dict(extensions=None)) + output_map = dict( + merged_file=dict( + extensions=None, + ), + ) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MinImage.py b/nipype/interfaces/fsl/tests/test_auto_MinImage.py index 7cf244a38d..bde76c1afc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MinImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MinImage.py @@ -4,16 +4,43 @@ def test_MinImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="-%smin", position=4, usedefault=True), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%smin", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = MinImage.input_spec() @@ -24,7 +51,11 @@ def test_MinImage_inputs(): def test_MinImage_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MinImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py index 7a9b0a2e2e..9a5773336f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py +++ b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py @@ -4,13 +4,31 @@ def test_MotionOutliers_inputs(): input_map = dict( - args=dict(argstr="%s"), - dummy=dict(argstr="--dummy=%d"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True), - mask=dict(argstr="-m %s", extensions=None), - metric=dict(argstr="--%s"), - no_motion_correction=dict(argstr="--nomoco"), + args=dict( + argstr="%s", + ), + dummy=dict( + argstr="--dummy=%d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + mask=dict( + argstr="-m %s", + extensions=None, + ), + metric=dict( + argstr="--%s", + ), + no_motion_correction=dict( + argstr="--nomoco", + ), out_file=dict( argstr="-o %s", extensions=None, @@ -36,7 +54,9 @@ def test_MotionOutliers_inputs(): name_template="%s_metrics.txt", ), output_type=dict(), - threshold=dict(argstr="--thresh=%g"), + threshold=dict( + argstr="--thresh=%g", + ), ) inputs = MotionOutliers.input_spec() @@ -47,9 +67,15 @@ def test_MotionOutliers_inputs(): def test_MotionOutliers_outputs(): output_map = dict( - out_file=dict(extensions=None), - out_metric_plot=dict(extensions=None), - out_metric_values=dict(extensions=None), + out_file=dict( + extensions=None, + ), + out_metric_plot=dict( + extensions=None, + ), + out_metric_values=dict( + extensions=None, + ), ) outputs = MotionOutliers.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py index 4f731da4bd..95de40d023 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py @@ -4,17 +4,46 @@ def test_MultiImageMaths_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - nan2zeros=dict(argstr="-nan", position=3), - op_string=dict(argstr="%s", mandatory=True, position=4), - operand_files=dict(mandatory=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), + op_string=dict( + argstr="%s", + mandatory=True, + position=4, + ), + operand_files=dict( + mandatory=True, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = MultiImageMaths.input_spec() @@ -25,7 +54,11 @@ def test_MultiImageMaths_inputs(): def test_MultiImageMaths_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MultiImageMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py index 30d76b56df..cae5e90cd4 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py +++ b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py @@ -4,7 +4,13 @@ def test_MultipleRegressDesign_inputs(): input_map = dict( - contrasts=dict(mandatory=True), groups=dict(), regressors=dict(mandatory=True) + contrasts=dict( + mandatory=True, + ), + groups=dict(), + regressors=dict( + mandatory=True, + ), ) inputs = MultipleRegressDesign.input_spec() @@ -15,10 +21,18 @@ def test_MultipleRegressDesign_inputs(): def test_MultipleRegressDesign_outputs(): output_map = dict( - design_con=dict(extensions=None), - design_fts=dict(extensions=None), - design_grp=dict(extensions=None), - design_mat=dict(extensions=None), + design_con=dict( + extensions=None, + ), + design_fts=dict( + extensions=None, + ), + design_grp=dict( + extensions=None, + ), + design_mat=dict( + extensions=None, + ), ) outputs = MultipleRegressDesign.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Overlay.py b/nipype/interfaces/fsl/tests/test_auto_Overlay.py index 27e4155245..22c4f08a44 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Overlay.py +++ b/nipype/interfaces/fsl/tests/test_auto_Overlay.py @@ -4,21 +4,31 @@ def test_Overlay_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), auto_thresh_bg=dict( argstr="-a", mandatory=True, position=5, xor=("auto_thresh_bg", "full_bg_range", "bg_thresh"), ), - background_image=dict(argstr="%s", extensions=None, mandatory=True, position=4), + background_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=4, + ), bg_thresh=dict( argstr="%.3f %.3f", mandatory=True, position=5, xor=("auto_thresh_bg", "full_bg_range", "bg_thresh"), ), - environ=dict(nohash=True, usedefault=True), + environ=dict( + nohash=True, + usedefault=True, + ), full_bg_range=dict( argstr="-A", mandatory=True, @@ -26,19 +36,53 @@ def test_Overlay_inputs(): xor=("auto_thresh_bg", "full_bg_range", "bg_thresh"), ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-1 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-1, + ), + out_type=dict( + argstr="%s", + position=2, + usedefault=True, ), - out_type=dict(argstr="%s", position=2, usedefault=True), output_type=dict(), - show_negative_stats=dict(argstr="%s", position=8, xor=["stat_image2"]), - stat_image=dict(argstr="%s", extensions=None, mandatory=True, position=6), + show_negative_stats=dict( + argstr="%s", + position=8, + xor=["stat_image2"], + ), + stat_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=6, + ), stat_image2=dict( - argstr="%s", extensions=None, position=9, xor=["show_negative_stats"] + argstr="%s", + extensions=None, + position=9, + xor=["show_negative_stats"], + ), + stat_thresh=dict( + argstr="%.2f %.2f", + mandatory=True, + position=7, + ), + stat_thresh2=dict( + argstr="%.2f %.2f", + position=10, + ), + transparency=dict( + argstr="%s", + position=1, + usedefault=True, + ), + use_checkerboard=dict( + argstr="-c", + position=3, ), - stat_thresh=dict(argstr="%.2f %.2f", mandatory=True, position=7), - stat_thresh2=dict(argstr="%.2f %.2f", position=10), - transparency=dict(argstr="%s", position=1, usedefault=True), - use_checkerboard=dict(argstr="-c", position=3), ) inputs = Overlay.input_spec() @@ -48,7 +92,11 @@ def test_Overlay_inputs(): def test_Overlay_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Overlay.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py index 90e9caa71a..0194526c70 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py +++ b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py @@ -4,25 +4,43 @@ def test_PRELUDE_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), complex_phase_file=dict( argstr="--complex=%s", extensions=None, mandatory=True, xor=["magnitude_file", "phase_file"], ), - end=dict(argstr="--end=%d"), - environ=dict(nohash=True, usedefault=True), - label_file=dict(argstr="--labels=%s", extensions=None, hash_files=False), - labelprocess2d=dict(argstr="--labelslices"), + end=dict( + argstr="--end=%d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + label_file=dict( + argstr="--labels=%s", + extensions=None, + hash_files=False, + ), + labelprocess2d=dict( + argstr="--labelslices", + ), magnitude_file=dict( argstr="--abs=%s", extensions=None, mandatory=True, xor=["complex_phase_file"], ), - mask_file=dict(argstr="--mask=%s", extensions=None), - num_partitions=dict(argstr="--numphasesplit=%d"), + mask_file=dict( + argstr="--mask=%s", + extensions=None, + ), + num_partitions=dict( + argstr="--numphasesplit=%d", + ), output_type=dict(), phase_file=dict( argstr="--phase=%s", @@ -30,15 +48,38 @@ def test_PRELUDE_inputs(): mandatory=True, xor=["complex_phase_file"], ), - process2d=dict(argstr="--slices", xor=["labelprocess2d"]), - process3d=dict(argstr="--force3D", xor=["labelprocess2d", "process2d"]), - rawphase_file=dict(argstr="--rawphase=%s", extensions=None, hash_files=False), - removeramps=dict(argstr="--removeramps"), - savemask_file=dict(argstr="--savemask=%s", extensions=None, hash_files=False), - start=dict(argstr="--start=%d"), - threshold=dict(argstr="--thresh=%.10f"), + process2d=dict( + argstr="--slices", + xor=["labelprocess2d"], + ), + process3d=dict( + argstr="--force3D", + xor=["labelprocess2d", "process2d"], + ), + rawphase_file=dict( + argstr="--rawphase=%s", + extensions=None, + hash_files=False, + ), + removeramps=dict( + argstr="--removeramps", + ), + savemask_file=dict( + argstr="--savemask=%s", + extensions=None, + hash_files=False, + ), + start=dict( + argstr="--start=%d", + ), + threshold=dict( + argstr="--thresh=%.10f", + ), unwrapped_phase_file=dict( - argstr="--unwrap=%s", extensions=None, genfile=True, hash_files=False + argstr="--unwrap=%s", + extensions=None, + genfile=True, + hash_files=False, ), ) inputs = PRELUDE.input_spec() @@ -49,7 +90,11 @@ def test_PRELUDE_inputs(): def test_PRELUDE_outputs(): - output_map = dict(unwrapped_phase_file=dict(extensions=None)) + output_map = dict( + unwrapped_phase_file=dict( + extensions=None, + ), + ) outputs = PRELUDE.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py index 1cc1217b82..3a3ae14a78 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py @@ -4,18 +4,48 @@ def test_PercentileImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="-%sperc", position=4, usedefault=True), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%sperc", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), - perc=dict(argstr="%f", position=5), + perc=dict( + argstr="%f", + position=5, + ), ) inputs = PercentileImage.input_spec() @@ -25,7 +55,11 @@ def test_PercentileImage_inputs(): def test_PercentileImage_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = PercentileImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py index 75a846aeb2..8cf1d2e214 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py +++ b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py @@ -4,14 +4,35 @@ def test_PlotMotionParams_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", mandatory=True, position=1), - in_source=dict(mandatory=True), - out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + mandatory=True, + position=1, + ), + in_source=dict( + mandatory=True, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, + ), output_type=dict(), - plot_size=dict(argstr="%s"), - plot_type=dict(argstr="%s", mandatory=True), + plot_size=dict( + argstr="%s", + ), + plot_type=dict( + argstr="%s", + mandatory=True, + ), ) inputs = PlotMotionParams.input_spec() @@ -21,7 +42,11 @@ def test_PlotMotionParams_inputs(): def test_PlotMotionParams_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = PlotMotionParams.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py index 703a347792..5b4ebc46aa 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py +++ b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py @@ -4,24 +4,72 @@ def test_PlotTimeSeries_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", mandatory=True, position=1), - labels=dict(argstr="%s"), - legend_file=dict(argstr="--legend=%s", extensions=None), - out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + mandatory=True, + position=1, + ), + labels=dict( + argstr="%s", + ), + legend_file=dict( + argstr="--legend=%s", + extensions=None, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, + ), output_type=dict(), - plot_finish=dict(argstr="--finish=%d", xor=("plot_range",)), - plot_range=dict(argstr="%s", xor=("plot_start", "plot_finish")), - plot_size=dict(argstr="%s"), - plot_start=dict(argstr="--start=%d", xor=("plot_range",)), - sci_notation=dict(argstr="--sci"), - title=dict(argstr="%s"), - x_precision=dict(argstr="--precision=%d"), - x_units=dict(argstr="-u %d", usedefault=True), - y_max=dict(argstr="--ymax=%.2f", xor=("y_range",)), - y_min=dict(argstr="--ymin=%.2f", xor=("y_range",)), - y_range=dict(argstr="%s", xor=("y_min", "y_max")), + plot_finish=dict( + argstr="--finish=%d", + xor=("plot_range",), + ), + plot_range=dict( + argstr="%s", + xor=("plot_start", "plot_finish"), + ), + plot_size=dict( + argstr="%s", + ), + plot_start=dict( + argstr="--start=%d", + xor=("plot_range",), + ), + sci_notation=dict( + argstr="--sci", + ), + title=dict( + argstr="%s", + ), + x_precision=dict( + argstr="--precision=%d", + ), + x_units=dict( + argstr="-u %d", + usedefault=True, + ), + y_max=dict( + argstr="--ymax=%.2f", + xor=("y_range",), + ), + y_min=dict( + argstr="--ymin=%.2f", + xor=("y_range",), + ), + y_range=dict( + argstr="%s", + xor=("y_min", "y_max"), + ), ) inputs = PlotTimeSeries.input_spec() @@ -31,7 +79,11 @@ def test_PlotTimeSeries_inputs(): def test_PlotTimeSeries_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = PlotTimeSeries.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py index b4c2f58ab1..874cbcf0e8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py +++ b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py @@ -4,11 +4,25 @@ def test_PowerSpectrum_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=1 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=1, ), output_type=dict(), ) @@ -20,7 +34,11 @@ def test_PowerSpectrum_inputs(): def test_PowerSpectrum_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = PowerSpectrum.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py index 4f731c95ad..2286dad026 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py +++ b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py @@ -4,15 +4,47 @@ def test_PrepareFieldmap_inputs(): input_map = dict( - args=dict(argstr="%s"), - delta_TE=dict(argstr="%f", mandatory=True, position=-2, usedefault=True), - environ=dict(nohash=True, usedefault=True), - in_magnitude=dict(argstr="%s", extensions=None, mandatory=True, position=3), - in_phase=dict(argstr="%s", extensions=None, mandatory=True, position=2), - nocheck=dict(argstr="--nocheck", position=-1, usedefault=True), - out_fieldmap=dict(argstr="%s", extensions=None, position=4), + args=dict( + argstr="%s", + ), + delta_TE=dict( + argstr="%f", + mandatory=True, + position=-2, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_magnitude=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=3, + ), + in_phase=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + nocheck=dict( + argstr="--nocheck", + position=-1, + usedefault=True, + ), + out_fieldmap=dict( + argstr="%s", + extensions=None, + position=4, + ), output_type=dict(), - scanner=dict(argstr="%s", position=1, usedefault=True), + scanner=dict( + argstr="%s", + position=1, + usedefault=True, + ), ) inputs = PrepareFieldmap.input_spec() @@ -22,7 +54,11 @@ def test_PrepareFieldmap_inputs(): def test_PrepareFieldmap_outputs(): - output_map = dict(out_fieldmap=dict(extensions=None)) + output_map = dict( + out_fieldmap=dict( + extensions=None, + ), + ) outputs = PrepareFieldmap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py index 0737972f17..aae5d80c57 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py @@ -4,45 +4,139 @@ def test_ProbTrackX_inputs(): input_map = dict( - args=dict(argstr="%s"), - avoid_mp=dict(argstr="--avoid=%s", extensions=None), - c_thresh=dict(argstr="--cthr=%.3f"), - correct_path_distribution=dict(argstr="--pd"), - dist_thresh=dict(argstr="--distthresh=%.3f"), - environ=dict(nohash=True, usedefault=True), - fibst=dict(argstr="--fibst=%d"), - force_dir=dict(argstr="--forcedir", usedefault=True), - fsamples=dict(mandatory=True), - inv_xfm=dict(argstr="--invxfm=%s", extensions=None), - loop_check=dict(argstr="--loopcheck"), - mask=dict(argstr="-m %s", extensions=None, mandatory=True), - mask2=dict(argstr="--mask2=%s", extensions=None), - mesh=dict(argstr="--mesh=%s", extensions=None), - mod_euler=dict(argstr="--modeuler"), - mode=dict(argstr="--mode=%s", genfile=True), - n_samples=dict(argstr="--nsamples=%d", usedefault=True), - n_steps=dict(argstr="--nsteps=%d"), - network=dict(argstr="--network"), - opd=dict(argstr="--opd", usedefault=True), - os2t=dict(argstr="--os2t"), - out_dir=dict(argstr="--dir=%s", genfile=True), + args=dict( + argstr="%s", + ), + avoid_mp=dict( + argstr="--avoid=%s", + extensions=None, + ), + c_thresh=dict( + argstr="--cthr=%.3f", + ), + correct_path_distribution=dict( + argstr="--pd", + ), + dist_thresh=dict( + argstr="--distthresh=%.3f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fibst=dict( + argstr="--fibst=%d", + ), + force_dir=dict( + argstr="--forcedir", + usedefault=True, + ), + fsamples=dict( + mandatory=True, + ), + inv_xfm=dict( + argstr="--invxfm=%s", + extensions=None, + ), + loop_check=dict( + argstr="--loopcheck", + ), + mask=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + ), + mask2=dict( + argstr="--mask2=%s", + extensions=None, + ), + mesh=dict( + argstr="--mesh=%s", + extensions=None, + ), + mod_euler=dict( + argstr="--modeuler", + ), + mode=dict( + argstr="--mode=%s", + genfile=True, + ), + n_samples=dict( + argstr="--nsamples=%d", + usedefault=True, + ), + n_steps=dict( + argstr="--nsteps=%d", + ), + network=dict( + argstr="--network", + ), + opd=dict( + argstr="--opd", + usedefault=True, + ), + os2t=dict( + argstr="--os2t", + ), + out_dir=dict( + argstr="--dir=%s", + genfile=True, + ), output_type=dict(), - phsamples=dict(mandatory=True), - rand_fib=dict(argstr="--randfib=%d"), - random_seed=dict(argstr="--rseed"), - s2tastext=dict(argstr="--s2tastext"), - sample_random_points=dict(argstr="--sampvox"), - samples_base_name=dict(argstr="--samples=%s", usedefault=True), - seed=dict(argstr="--seed=%s", mandatory=True), - seed_ref=dict(argstr="--seedref=%s", extensions=None), - step_length=dict(argstr="--steplength=%.3f"), - stop_mask=dict(argstr="--stop=%s", extensions=None), - target_masks=dict(argstr="--targetmasks=%s"), - thsamples=dict(mandatory=True), - use_anisotropy=dict(argstr="--usef"), - verbose=dict(argstr="--verbose=%d"), - waypoints=dict(argstr="--waypoints=%s", extensions=None), - xfm=dict(argstr="--xfm=%s", extensions=None), + phsamples=dict( + mandatory=True, + ), + rand_fib=dict( + argstr="--randfib=%d", + ), + random_seed=dict( + argstr="--rseed", + ), + s2tastext=dict( + argstr="--s2tastext", + ), + sample_random_points=dict( + argstr="--sampvox", + ), + samples_base_name=dict( + argstr="--samples=%s", + usedefault=True, + ), + seed=dict( + argstr="--seed=%s", + mandatory=True, + ), + seed_ref=dict( + argstr="--seedref=%s", + extensions=None, + ), + step_length=dict( + argstr="--steplength=%.3f", + ), + stop_mask=dict( + argstr="--stop=%s", + extensions=None, + ), + target_masks=dict( + argstr="--targetmasks=%s", + ), + thsamples=dict( + mandatory=True, + ), + use_anisotropy=dict( + argstr="--usef", + ), + verbose=dict( + argstr="--verbose=%d", + ), + waypoints=dict( + argstr="--waypoints=%s", + extensions=None, + ), + xfm=dict( + argstr="--xfm=%s", + extensions=None, + ), ) inputs = ProbTrackX.input_spec() @@ -54,10 +148,14 @@ def test_ProbTrackX_inputs(): def test_ProbTrackX_outputs(): output_map = dict( fdt_paths=dict(), - log=dict(extensions=None), + log=dict( + extensions=None, + ), particle_files=dict(), targets=dict(), - way_total=dict(extensions=None), + way_total=dict( + extensions=None, + ), ) outputs = ProbTrackX.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py index 057514614b..1813bd3c9c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py @@ -4,59 +4,186 @@ def test_ProbTrackX2_inputs(): input_map = dict( - args=dict(argstr="%s"), - avoid_mp=dict(argstr="--avoid=%s", extensions=None), - c_thresh=dict(argstr="--cthr=%.3f"), - colmask4=dict(argstr="--colmask4=%s", extensions=None), - correct_path_distribution=dict(argstr="--pd"), - dist_thresh=dict(argstr="--distthresh=%.3f"), - distthresh1=dict(argstr="--distthresh1=%.3f"), - distthresh3=dict(argstr="--distthresh3=%.3f"), - environ=dict(nohash=True, usedefault=True), - fibst=dict(argstr="--fibst=%d"), - fopd=dict(argstr="--fopd=%s", extensions=None), - force_dir=dict(argstr="--forcedir", usedefault=True), - fsamples=dict(mandatory=True), - inv_xfm=dict(argstr="--invxfm=%s", extensions=None), - loop_check=dict(argstr="--loopcheck"), - lrtarget3=dict(argstr="--lrtarget3=%s", extensions=None), - mask=dict(argstr="-m %s", extensions=None, mandatory=True), - meshspace=dict(argstr="--meshspace=%s"), - mod_euler=dict(argstr="--modeuler"), - n_samples=dict(argstr="--nsamples=%d", usedefault=True), - n_steps=dict(argstr="--nsteps=%d"), - network=dict(argstr="--network"), - omatrix1=dict(argstr="--omatrix1"), - omatrix2=dict(argstr="--omatrix2", requires=["target2"]), - omatrix3=dict(argstr="--omatrix3", requires=["target3", "lrtarget3"]), - omatrix4=dict(argstr="--omatrix4"), - onewaycondition=dict(argstr="--onewaycondition"), - opd=dict(argstr="--opd", usedefault=True), - os2t=dict(argstr="--os2t"), - out_dir=dict(argstr="--dir=%s", genfile=True), + args=dict( + argstr="%s", + ), + avoid_mp=dict( + argstr="--avoid=%s", + extensions=None, + ), + c_thresh=dict( + argstr="--cthr=%.3f", + ), + colmask4=dict( + argstr="--colmask4=%s", + extensions=None, + ), + correct_path_distribution=dict( + argstr="--pd", + ), + dist_thresh=dict( + argstr="--distthresh=%.3f", + ), + distthresh1=dict( + argstr="--distthresh1=%.3f", + ), + distthresh3=dict( + argstr="--distthresh3=%.3f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fibst=dict( + argstr="--fibst=%d", + ), + fopd=dict( + argstr="--fopd=%s", + extensions=None, + ), + force_dir=dict( + argstr="--forcedir", + usedefault=True, + ), + fsamples=dict( + mandatory=True, + ), + inv_xfm=dict( + argstr="--invxfm=%s", + extensions=None, + ), + loop_check=dict( + argstr="--loopcheck", + ), + lrtarget3=dict( + argstr="--lrtarget3=%s", + extensions=None, + ), + mask=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + ), + meshspace=dict( + argstr="--meshspace=%s", + ), + mod_euler=dict( + argstr="--modeuler", + ), + n_samples=dict( + argstr="--nsamples=%d", + usedefault=True, + ), + n_steps=dict( + argstr="--nsteps=%d", + ), + network=dict( + argstr="--network", + ), + omatrix1=dict( + argstr="--omatrix1", + ), + omatrix2=dict( + argstr="--omatrix2", + requires=["target2"], + ), + omatrix3=dict( + argstr="--omatrix3", + requires=["target3", "lrtarget3"], + ), + omatrix4=dict( + argstr="--omatrix4", + ), + onewaycondition=dict( + argstr="--onewaycondition", + ), + opd=dict( + argstr="--opd", + usedefault=True, + ), + os2t=dict( + argstr="--os2t", + ), + out_dir=dict( + argstr="--dir=%s", + genfile=True, + ), output_type=dict(), - phsamples=dict(mandatory=True), - rand_fib=dict(argstr="--randfib=%d"), - random_seed=dict(argstr="--rseed"), - s2tastext=dict(argstr="--s2tastext"), - sample_random_points=dict(argstr="--sampvox"), - samples_base_name=dict(argstr="--samples=%s", usedefault=True), - seed=dict(argstr="--seed=%s", mandatory=True), - seed_ref=dict(argstr="--seedref=%s", extensions=None), - simple=dict(argstr="--simple"), - step_length=dict(argstr="--steplength=%.3f"), - stop_mask=dict(argstr="--stop=%s", extensions=None), - target2=dict(argstr="--target2=%s", extensions=None), - target3=dict(argstr="--target3=%s", extensions=None), - target4=dict(argstr="--target4=%s", extensions=None), - target_masks=dict(argstr="--targetmasks=%s"), - thsamples=dict(mandatory=True), - use_anisotropy=dict(argstr="--usef"), - verbose=dict(argstr="--verbose=%d"), - waycond=dict(argstr="--waycond=%s"), - wayorder=dict(argstr="--wayorder"), - waypoints=dict(argstr="--waypoints=%s", extensions=None), - xfm=dict(argstr="--xfm=%s", extensions=None), + phsamples=dict( + mandatory=True, + ), + rand_fib=dict( + argstr="--randfib=%d", + ), + random_seed=dict( + argstr="--rseed", + ), + s2tastext=dict( + argstr="--s2tastext", + ), + sample_random_points=dict( + argstr="--sampvox", + ), + samples_base_name=dict( + argstr="--samples=%s", + usedefault=True, + ), + seed=dict( + argstr="--seed=%s", + mandatory=True, + ), + seed_ref=dict( + argstr="--seedref=%s", + extensions=None, + ), + simple=dict( + argstr="--simple", + ), + step_length=dict( + argstr="--steplength=%.3f", + ), + stop_mask=dict( + argstr="--stop=%s", + extensions=None, + ), + target2=dict( + argstr="--target2=%s", + extensions=None, + ), + target3=dict( + argstr="--target3=%s", + extensions=None, + ), + target4=dict( + argstr="--target4=%s", + extensions=None, + ), + target_masks=dict( + argstr="--targetmasks=%s", + ), + thsamples=dict( + mandatory=True, + ), + use_anisotropy=dict( + argstr="--usef", + ), + verbose=dict( + argstr="--verbose=%d", + ), + waycond=dict( + argstr="--waycond=%s", + ), + wayorder=dict( + argstr="--wayorder", + ), + waypoints=dict( + argstr="--waypoints=%s", + extensions=None, + ), + xfm=dict( + argstr="--xfm=%s", + extensions=None, + ), ) inputs = ProbTrackX2.input_spec() @@ -68,15 +195,29 @@ def test_ProbTrackX2_inputs(): def test_ProbTrackX2_outputs(): output_map = dict( fdt_paths=dict(), - log=dict(extensions=None), - lookup_tractspace=dict(extensions=None), - matrix1_dot=dict(extensions=None), - matrix2_dot=dict(extensions=None), - matrix3_dot=dict(extensions=None), - network_matrix=dict(extensions=None), + log=dict( + extensions=None, + ), + lookup_tractspace=dict( + extensions=None, + ), + matrix1_dot=dict( + extensions=None, + ), + matrix2_dot=dict( + extensions=None, + ), + matrix3_dot=dict( + extensions=None, + ), + network_matrix=dict( + extensions=None, + ), particle_files=dict(), targets=dict(), - way_total=dict(extensions=None), + way_total=dict( + extensions=None, + ), ) outputs = ProbTrackX2.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py b/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py index d6e3615eca..420eacb9c2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py @@ -4,11 +4,24 @@ def test_ProjThresh_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_files=dict(argstr="%s", mandatory=True, position=0), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=0, + ), output_type=dict(), - threshold=dict(argstr="%d", mandatory=True, position=1), + threshold=dict( + argstr="%d", + mandatory=True, + position=1, + ), ) inputs = ProjThresh.input_spec() @@ -18,7 +31,9 @@ def test_ProjThresh_inputs(): def test_ProjThresh_outputs(): - output_map = dict(out_files=dict()) + output_map = dict( + out_files=dict(), + ) outputs = ProjThresh.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Randomise.py b/nipype/interfaces/fsl/tests/test_auto_Randomise.py index 82993b4c2a..9b0b74bf28 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Randomise.py +++ b/nipype/interfaces/fsl/tests/test_auto_Randomise.py @@ -4,36 +4,107 @@ def test_Randomise_inputs(): input_map = dict( - args=dict(argstr="%s"), - base_name=dict(argstr='-o "%s"', position=1, usedefault=True), - c_thresh=dict(argstr="-c %.1f"), - cm_thresh=dict(argstr="-C %.1f"), - demean=dict(argstr="-D"), - design_mat=dict(argstr="-d %s", extensions=None, position=2), - environ=dict(nohash=True, usedefault=True), - f_c_thresh=dict(argstr="-F %.2f"), - f_cm_thresh=dict(argstr="-S %.2f"), - f_only=dict(argstr="--fonly"), - fcon=dict(argstr="-f %s", extensions=None), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0), - mask=dict(argstr="-m %s", extensions=None), - num_perm=dict(argstr="-n %d"), - one_sample_group_mean=dict(argstr="-1"), + args=dict( + argstr="%s", + ), + base_name=dict( + argstr='-o "%s"', + position=1, + usedefault=True, + ), + c_thresh=dict( + argstr="-c %.1f", + ), + cm_thresh=dict( + argstr="-C %.1f", + ), + demean=dict( + argstr="-D", + ), + design_mat=dict( + argstr="-d %s", + extensions=None, + position=2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + f_c_thresh=dict( + argstr="-F %.2f", + ), + f_cm_thresh=dict( + argstr="-S %.2f", + ), + f_only=dict( + argstr="--fonly", + ), + fcon=dict( + argstr="-f %s", + extensions=None, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=0, + ), + mask=dict( + argstr="-m %s", + extensions=None, + ), + num_perm=dict( + argstr="-n %d", + ), + one_sample_group_mean=dict( + argstr="-1", + ), output_type=dict(), - p_vec_n_dist_files=dict(argstr="-P"), - raw_stats_imgs=dict(argstr="-R"), - seed=dict(argstr="--seed=%d"), - show_info_parallel_mode=dict(argstr="-Q"), - show_total_perms=dict(argstr="-q"), - tcon=dict(argstr="-t %s", extensions=None, position=3), - tfce=dict(argstr="-T"), - tfce2D=dict(argstr="--T2"), - tfce_C=dict(argstr="--tfce_C=%.2f"), - tfce_E=dict(argstr="--tfce_E=%.2f"), - tfce_H=dict(argstr="--tfce_H=%.2f"), - var_smooth=dict(argstr="-v %d"), - vox_p_values=dict(argstr="-x"), - x_block_labels=dict(argstr="-e %s", extensions=None), + p_vec_n_dist_files=dict( + argstr="-P", + ), + raw_stats_imgs=dict( + argstr="-R", + ), + seed=dict( + argstr="--seed=%d", + ), + show_info_parallel_mode=dict( + argstr="-Q", + ), + show_total_perms=dict( + argstr="-q", + ), + tcon=dict( + argstr="-t %s", + extensions=None, + position=3, + ), + tfce=dict( + argstr="-T", + ), + tfce2D=dict( + argstr="--T2", + ), + tfce_C=dict( + argstr="--tfce_C=%.2f", + ), + tfce_E=dict( + argstr="--tfce_E=%.2f", + ), + tfce_H=dict( + argstr="--tfce_H=%.2f", + ), + var_smooth=dict( + argstr="-v %d", + ), + vox_p_values=dict( + argstr="-x", + ), + x_block_labels=dict( + argstr="-e %s", + extensions=None, + ), ) inputs = Randomise.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py index e26b2e2ed8..e008eb44e6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py +++ b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py @@ -4,10 +4,24 @@ def test_Reorient2Std_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True), - out_file=dict(argstr="%s", extensions=None, genfile=True, hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + ), output_type=dict(), ) inputs = Reorient2Std.input_spec() @@ -18,7 +32,11 @@ def test_Reorient2Std_inputs(): def test_Reorient2Std_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Reorient2Std.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py index b74f585f64..b5598f0de4 100644 --- a/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py +++ b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py @@ -4,10 +4,22 @@ def test_RobustFOV_inputs(): input_map = dict( - args=dict(argstr="%s"), - brainsize=dict(argstr="-b %d"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=0), + args=dict( + argstr="%s", + ), + brainsize=dict( + argstr="-b %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=0, + ), out_roi=dict( argstr="-r %s", extensions=None, @@ -33,7 +45,12 @@ def test_RobustFOV_inputs(): def test_RobustFOV_outputs(): output_map = dict( - out_roi=dict(extensions=None), out_transform=dict(extensions=None) + out_roi=dict( + extensions=None, + ), + out_transform=dict( + extensions=None, + ), ) outputs = RobustFOV.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_SMM.py b/nipype/interfaces/fsl/tests/test_auto_SMM.py index 0c7bb96c90..51777eaed9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SMM.py +++ b/nipype/interfaces/fsl/tests/test_auto_SMM.py @@ -4,8 +4,13 @@ def test_SMM_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), mask=dict( argstr='--mask="%s"', copyfile=False, @@ -13,7 +18,10 @@ def test_SMM_inputs(): mandatory=True, position=1, ), - no_deactivation_class=dict(argstr="--zfstatmode", position=2), + no_deactivation_class=dict( + argstr="--zfstatmode", + position=2, + ), output_type=dict(), spatial_data_file=dict( argstr='--sdf="%s"', @@ -32,9 +40,15 @@ def test_SMM_inputs(): def test_SMM_outputs(): output_map = dict( - activation_p_map=dict(extensions=None), - deactivation_p_map=dict(extensions=None), - null_p_map=dict(extensions=None), + activation_p_map=dict( + extensions=None, + ), + deactivation_p_map=dict( + extensions=None, + ), + null_p_map=dict( + extensions=None, + ), ) outputs = SMM.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_SUSAN.py b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py index 4ad74f7d39..427b770222 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SUSAN.py +++ b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py @@ -4,18 +4,52 @@ def test_SUSAN_inputs(): input_map = dict( - args=dict(argstr="%s"), - brightness_threshold=dict(argstr="%.10f", mandatory=True, position=2), - dimension=dict(argstr="%d", position=4, usedefault=True), - environ=dict(nohash=True, usedefault=True), - fwhm=dict(argstr="%.10f", mandatory=True, position=3), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), + args=dict( + argstr="%s", + ), + brightness_threshold=dict( + argstr="%.10f", + mandatory=True, + position=2, + ), + dimension=dict( + argstr="%d", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm=dict( + argstr="%.10f", + mandatory=True, + position=3, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-1 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-1, ), output_type=dict(), - usans=dict(argstr="", position=6, usedefault=True), - use_median=dict(argstr="%d", position=5, usedefault=True), + usans=dict( + argstr="", + position=6, + usedefault=True, + ), + use_median=dict( + argstr="%d", + position=5, + usedefault=True, + ), ) inputs = SUSAN.input_spec() @@ -25,7 +59,11 @@ def test_SUSAN_inputs(): def test_SUSAN_outputs(): - output_map = dict(smoothed_file=dict(extensions=None)) + output_map = dict( + smoothed_file=dict( + extensions=None, + ), + ) outputs = SUSAN.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SigLoss.py b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py index 91062bcbaa..11be93c5b9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SigLoss.py +++ b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py @@ -4,14 +4,34 @@ def test_SigLoss_inputs(): input_map = dict( - args=dict(argstr="%s"), - echo_time=dict(argstr="--te=%f"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True), - mask_file=dict(argstr="-m %s", extensions=None), - out_file=dict(argstr="-s %s", extensions=None, genfile=True), + args=dict( + argstr="%s", + ), + echo_time=dict( + argstr="--te=%f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + mask_file=dict( + argstr="-m %s", + extensions=None, + ), + out_file=dict( + argstr="-s %s", + extensions=None, + genfile=True, + ), output_type=dict(), - slice_direction=dict(argstr="-d %s"), + slice_direction=dict( + argstr="-d %s", + ), ) inputs = SigLoss.input_spec() @@ -21,7 +41,11 @@ def test_SigLoss_inputs(): def test_SigLoss_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SigLoss.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Slice.py b/nipype/interfaces/fsl/tests/test_auto_Slice.py index 3003cbd77c..f5360716c6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Slice.py +++ b/nipype/interfaces/fsl/tests/test_auto_Slice.py @@ -4,12 +4,24 @@ def test_Slice_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), in_file=dict( - argstr="%s", copyfile=False, extensions=None, mandatory=True, position=0 + argstr="%s", + copyfile=False, + extensions=None, + mandatory=True, + position=0, + ), + out_base_name=dict( + argstr="%s", + position=1, ), - out_base_name=dict(argstr="%s", position=1), output_type=dict(), ) inputs = Slice.input_spec() @@ -20,7 +32,9 @@ def test_Slice_inputs(): def test_Slice_outputs(): - output_map = dict(out_files=dict()) + output_map = dict( + out_files=dict(), + ) outputs = Slice.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py index 4bd3452b55..acdbd8c2ca 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py +++ b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py @@ -4,20 +4,49 @@ def test_SliceTimer_inputs(): input_map = dict( - args=dict(argstr="%s"), - custom_order=dict(argstr="--ocustom=%s", extensions=None), - custom_timings=dict(argstr="--tcustom=%s", extensions=None), - environ=dict(nohash=True, usedefault=True), - global_shift=dict(argstr="--tglobal"), - in_file=dict(argstr="--in=%s", extensions=None, mandatory=True, position=0), - index_dir=dict(argstr="--down"), - interleaved=dict(argstr="--odd"), + args=dict( + argstr="%s", + ), + custom_order=dict( + argstr="--ocustom=%s", + extensions=None, + ), + custom_timings=dict( + argstr="--tcustom=%s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + global_shift=dict( + argstr="--tglobal", + ), + in_file=dict( + argstr="--in=%s", + extensions=None, + mandatory=True, + position=0, + ), + index_dir=dict( + argstr="--down", + ), + interleaved=dict( + argstr="--odd", + ), out_file=dict( - argstr="--out=%s", extensions=None, genfile=True, hash_files=False + argstr="--out=%s", + extensions=None, + genfile=True, + hash_files=False, ), output_type=dict(), - slice_direction=dict(argstr="--direction=%d"), - time_repetition=dict(argstr="--repeat=%f"), + slice_direction=dict( + argstr="--direction=%d", + ), + time_repetition=dict( + argstr="--repeat=%f", + ), ) inputs = SliceTimer.input_spec() @@ -27,7 +56,11 @@ def test_SliceTimer_inputs(): def test_SliceTimer_outputs(): - output_map = dict(slice_time_corrected_file=dict(extensions=None)) + output_map = dict( + slice_time_corrected_file=dict( + extensions=None, + ), + ) outputs = SliceTimer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Slicer.py b/nipype/interfaces/fsl/tests/test_auto_Slicer.py index 36d676441d..8e3195fd39 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Slicer.py +++ b/nipype/interfaces/fsl/tests/test_auto_Slicer.py @@ -10,23 +10,61 @@ def test_Slicer_inputs(): requires=["image_width"], xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), ), - args=dict(argstr="%s"), - colour_map=dict(argstr="-l %s", extensions=None, position=4), - dither_edges=dict(argstr="-t", position=7), - environ=dict(nohash=True, usedefault=True), - image_edges=dict(argstr="%s", extensions=None, position=2), - image_width=dict(argstr="%d", position=-2), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), - intensity_range=dict(argstr="-i %.3f %.3f", position=5), - label_slices=dict(argstr="-L", position=3, usedefault=True), + args=dict( + argstr="%s", + ), + colour_map=dict( + argstr="-l %s", + extensions=None, + position=4, + ), + dither_edges=dict( + argstr="-t", + position=7, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + image_edges=dict( + argstr="%s", + extensions=None, + position=2, + ), + image_width=dict( + argstr="%d", + position=-2, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + intensity_range=dict( + argstr="-i %.3f %.3f", + position=5, + ), + label_slices=dict( + argstr="-L", + position=3, + usedefault=True, + ), middle_slices=dict( argstr="-a", position=10, xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), ), - nearest_neighbour=dict(argstr="-n", position=8), + nearest_neighbour=dict( + argstr="-n", + position=8, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-1 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-1, ), output_type=dict(), sample_axial=dict( @@ -35,16 +73,29 @@ def test_Slicer_inputs(): requires=["image_width"], xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), ), - scaling=dict(argstr="-s %f", position=0), - show_orientation=dict(argstr="%s", position=9, usedefault=True), + scaling=dict( + argstr="-s %f", + position=0, + ), + show_orientation=dict( + argstr="%s", + position=9, + usedefault=True, + ), single_slice=dict( argstr="-%s", position=10, requires=["slice_number"], xor=("single_slice", "middle_slices", "all_axial", "sample_axial"), ), - slice_number=dict(argstr="-%d", position=11), - threshold_edges=dict(argstr="-e %.3f", position=6), + slice_number=dict( + argstr="-%d", + position=11, + ), + threshold_edges=dict( + argstr="-e %.3f", + position=6, + ), ) inputs = Slicer.input_spec() @@ -54,7 +105,11 @@ def test_Slicer_inputs(): def test_Slicer_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Slicer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Smooth.py b/nipype/interfaces/fsl/tests/test_auto_Smooth.py index 3e5ac40fe1..9d9324770b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Smooth.py +++ b/nipype/interfaces/fsl/tests/test_auto_Smooth.py @@ -4,15 +4,25 @@ def test_Smooth_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fwhm=dict( argstr="-kernel gauss %.03f -fmean", mandatory=True, position=1, xor=["sigma"], ), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), output_type=dict(), sigma=dict( argstr="-kernel gauss %.03f -fmean", @@ -37,7 +47,11 @@ def test_Smooth_inputs(): def test_Smooth_outputs(): - output_map = dict(smoothed_file=dict(extensions=None)) + output_map = dict( + smoothed_file=dict( + extensions=None, + ), + ) outputs = Smooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py index f1e0a409f7..bf21438d1d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py +++ b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py @@ -4,13 +4,34 @@ def test_SmoothEstimate_inputs(): input_map = dict( - args=dict(argstr="%s"), - dof=dict(argstr="--dof=%d", mandatory=True, xor=["zstat_file"]), - environ=dict(nohash=True, usedefault=True), - mask_file=dict(argstr="--mask=%s", extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + dof=dict( + argstr="--dof=%d", + mandatory=True, + xor=["zstat_file"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + mask_file=dict( + argstr="--mask=%s", + extensions=None, + mandatory=True, + ), output_type=dict(), - residual_fit_file=dict(argstr="--res=%s", extensions=None, requires=["dof"]), - zstat_file=dict(argstr="--zstat=%s", extensions=None, xor=["dof"]), + residual_fit_file=dict( + argstr="--res=%s", + extensions=None, + requires=["dof"], + ), + zstat_file=dict( + argstr="--zstat=%s", + extensions=None, + xor=["dof"], + ), ) inputs = SmoothEstimate.input_spec() @@ -20,7 +41,11 @@ def test_SmoothEstimate_inputs(): def test_SmoothEstimate_outputs(): - output_map = dict(dlh=dict(), resels=dict(), volume=dict()) + output_map = dict( + dlh=dict(), + resels=dict(), + volume=dict(), + ) outputs = SmoothEstimate.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py index 60b6bc6e3e..0d6f68cbea 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py +++ b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py @@ -4,19 +4,58 @@ def test_SpatialFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - kernel_file=dict(argstr="%s", extensions=None, position=5, xor=["kernel_size"]), - kernel_shape=dict(argstr="-kernel %s", position=4), - kernel_size=dict(argstr="%.4f", position=5, xor=["kernel_file"]), - nan2zeros=dict(argstr="-nan", position=3), - operation=dict(argstr="-f%s", mandatory=True, position=6), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + kernel_file=dict( + argstr="%s", + extensions=None, + position=5, + xor=["kernel_size"], + ), + kernel_shape=dict( + argstr="-kernel %s", + position=4, + ), + kernel_size=dict( + argstr="%.4f", + position=5, + xor=["kernel_file"], + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), + operation=dict( + argstr="-f%s", + mandatory=True, + position=6, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = SpatialFilter.input_spec() @@ -27,7 +66,11 @@ def test_SpatialFilter_inputs(): def test_SpatialFilter_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SpatialFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Split.py b/nipype/interfaces/fsl/tests/test_auto_Split.py index d47bafb409..79aa3a7ade 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Split.py +++ b/nipype/interfaces/fsl/tests/test_auto_Split.py @@ -4,11 +4,28 @@ def test_Split_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="-%s", mandatory=True, position=2), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), - out_base_name=dict(argstr="%s", position=1), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%s", + mandatory=True, + position=2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), + out_base_name=dict( + argstr="%s", + position=1, + ), output_type=dict(), ) inputs = Split.input_spec() @@ -19,7 +36,9 @@ def test_Split_inputs(): def test_Split_outputs(): - output_map = dict(out_files=dict()) + output_map = dict( + out_files=dict(), + ) outputs = Split.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_StdImage.py b/nipype/interfaces/fsl/tests/test_auto_StdImage.py index c829c91224..226abb5e5e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_StdImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_StdImage.py @@ -4,16 +4,43 @@ def test_StdImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(argstr="-%sstd", position=4, usedefault=True), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + dimension=dict( + argstr="-%sstd", + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = StdImage.input_spec() @@ -24,7 +51,11 @@ def test_StdImage_inputs(): def test_StdImage_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = StdImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py index cdfaf37a42..26b3b3ff54 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py +++ b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py @@ -4,11 +4,29 @@ def test_SwapDimensions_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position="1"), - new_dims=dict(argstr="%s %s %s", mandatory=True), - out_file=dict(argstr="%s", extensions=None, genfile=True, hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position="1", + ), + new_dims=dict( + argstr="%s %s %s", + mandatory=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + ), output_type=dict(), ) inputs = SwapDimensions.input_spec() @@ -19,7 +37,11 @@ def test_SwapDimensions_inputs(): def test_SwapDimensions_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SwapDimensions.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TOPUP.py b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py index 4d77301342..3358190dfb 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TOPUP.py +++ b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py @@ -4,8 +4,13 @@ def test_TOPUP_inputs(): input_map = dict( - args=dict(argstr="%s"), - config=dict(argstr="--config=%s", usedefault=True), + args=dict( + argstr="%s", + ), + config=dict( + argstr="--config=%s", + usedefault=True, + ), encoding_direction=dict( argstr="--datain=%s", mandatory=True, @@ -18,14 +23,33 @@ def test_TOPUP_inputs(): mandatory=True, xor=["encoding_direction"], ), - environ=dict(nohash=True, usedefault=True), - estmov=dict(argstr="--estmov=%d"), - fwhm=dict(argstr="--fwhm=%f"), - in_file=dict(argstr="--imain=%s", extensions=None, mandatory=True), - interp=dict(argstr="--interp=%s"), - max_iter=dict(argstr="--miter=%d"), - minmet=dict(argstr="--minmet=%d"), - numprec=dict(argstr="--numprec=%s"), + environ=dict( + nohash=True, + usedefault=True, + ), + estmov=dict( + argstr="--estmov=%d", + ), + fwhm=dict( + argstr="--fwhm=%f", + ), + in_file=dict( + argstr="--imain=%s", + extensions=None, + mandatory=True, + ), + interp=dict( + argstr="--interp=%s", + ), + max_iter=dict( + argstr="--miter=%d", + ), + minmet=dict( + argstr="--minmet=%d", + ), + numprec=dict( + argstr="--numprec=%s", + ), out_base=dict( argstr="--out=%s", extensions=None, @@ -47,7 +71,11 @@ def test_TOPUP_inputs(): name_source=["in_file"], name_template="%s_field", ), - out_jac_prefix=dict(argstr="--jacout=%s", hash_files=False, usedefault=True), + out_jac_prefix=dict( + argstr="--jacout=%s", + hash_files=False, + usedefault=True, + ), out_logfile=dict( argstr="--logout=%s", extensions=None, @@ -56,20 +84,46 @@ def test_TOPUP_inputs(): name_source=["in_file"], name_template="%s_topup.log", ), - out_mat_prefix=dict(argstr="--rbmout=%s", hash_files=False, usedefault=True), - out_warp_prefix=dict(argstr="--dfout=%s", hash_files=False, usedefault=True), + out_mat_prefix=dict( + argstr="--rbmout=%s", + hash_files=False, + usedefault=True, + ), + out_warp_prefix=dict( + argstr="--dfout=%s", + hash_files=False, + usedefault=True, + ), output_type=dict(), readout_times=dict( - mandatory=True, requires=["encoding_direction"], xor=["encoding_file"] - ), - reg_lambda=dict(argstr="--lambda=%0.f"), - regmod=dict(argstr="--regmod=%s"), - regrid=dict(argstr="--regrid=%d"), - scale=dict(argstr="--scale=%d"), - splineorder=dict(argstr="--splineorder=%d"), - ssqlambda=dict(argstr="--ssqlambda=%d"), - subsamp=dict(argstr="--subsamp=%d"), - warp_res=dict(argstr="--warpres=%f"), + mandatory=True, + requires=["encoding_direction"], + xor=["encoding_file"], + ), + reg_lambda=dict( + argstr="--lambda=%0.f", + ), + regmod=dict( + argstr="--regmod=%s", + ), + regrid=dict( + argstr="--regrid=%d", + ), + scale=dict( + argstr="--scale=%d", + ), + splineorder=dict( + argstr="--splineorder=%d", + ), + ssqlambda=dict( + argstr="--ssqlambda=%d", + ), + subsamp=dict( + argstr="--subsamp=%d", + ), + warp_res=dict( + argstr="--warpres=%f", + ), ) inputs = TOPUP.input_spec() @@ -80,14 +134,26 @@ def test_TOPUP_inputs(): def test_TOPUP_outputs(): output_map = dict( - out_corrected=dict(extensions=None), - out_enc_file=dict(extensions=None), - out_field=dict(extensions=None), - out_fieldcoef=dict(extensions=None), + out_corrected=dict( + extensions=None, + ), + out_enc_file=dict( + extensions=None, + ), + out_field=dict( + extensions=None, + ), + out_fieldcoef=dict( + extensions=None, + ), out_jacs=dict(), - out_logfile=dict(extensions=None), + out_logfile=dict( + extensions=None, + ), out_mats=dict(), - out_movpar=dict(extensions=None), + out_movpar=dict( + extensions=None, + ), out_warps=dict(), ) outputs = TOPUP.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py index 8eaf910ed6..3a825e4e45 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py +++ b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py @@ -4,17 +4,48 @@ def test_TemporalFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - highpass_sigma=dict(argstr="-bptf %.6f", position=4, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - lowpass_sigma=dict(argstr="%.6f", position=5, usedefault=True), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + highpass_sigma=dict( + argstr="-bptf %.6f", + position=4, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + lowpass_sigma=dict( + argstr="%.6f", + position=5, + usedefault=True, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = TemporalFilter.input_spec() @@ -25,7 +56,11 @@ def test_TemporalFilter_inputs(): def test_TemporalFilter_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TemporalFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_Threshold.py b/nipype/interfaces/fsl/tests/test_auto_Threshold.py index ee6bdfdaac..cc3446bd47 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Threshold.py +++ b/nipype/interfaces/fsl/tests/test_auto_Threshold.py @@ -4,19 +4,50 @@ def test_Threshold_inputs(): input_map = dict( - args=dict(argstr="%s"), - direction=dict(usedefault=True), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - nan2zeros=dict(argstr="-nan", position=3), + args=dict( + argstr="%s", + ), + direction=dict( + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), - thresh=dict(argstr="%s", mandatory=True, position=4), - use_nonzero_voxels=dict(requires=["use_robust_range"]), + thresh=dict( + argstr="%s", + mandatory=True, + position=4, + ), + use_nonzero_voxels=dict( + requires=["use_robust_range"], + ), use_robust_range=dict(), ) inputs = Threshold.input_spec() @@ -27,7 +58,11 @@ def test_Threshold_inputs(): def test_Threshold_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Threshold.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py index e3dc720e45..631741da49 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py +++ b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py @@ -4,23 +4,52 @@ def test_TractSkeleton_inputs(): input_map = dict( - alt_data_file=dict(argstr="-a %s", extensions=None), - alt_skeleton=dict(argstr="-s %s", extensions=None), - args=dict(argstr="%s"), - data_file=dict(extensions=None), - distance_map=dict(extensions=None), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True), + alt_data_file=dict( + argstr="-a %s", + extensions=None, + ), + alt_skeleton=dict( + argstr="-s %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + data_file=dict( + extensions=None, + ), + distance_map=dict( + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), output_type=dict(), project_data=dict( argstr="-p %.3f %s %s %s %s", requires=["threshold", "distance_map", "data_file"], ), - projected_data=dict(extensions=None), - search_mask_file=dict(extensions=None, xor=["use_cingulum_mask"]), - skeleton_file=dict(argstr="-o %s"), + projected_data=dict( + extensions=None, + ), + search_mask_file=dict( + extensions=None, + xor=["use_cingulum_mask"], + ), + skeleton_file=dict( + argstr="-o %s", + ), threshold=dict(), - use_cingulum_mask=dict(usedefault=True, xor=["search_mask_file"]), + use_cingulum_mask=dict( + usedefault=True, + xor=["search_mask_file"], + ), ) inputs = TractSkeleton.input_spec() @@ -31,7 +60,12 @@ def test_TractSkeleton_inputs(): def test_TractSkeleton_outputs(): output_map = dict( - projected_data=dict(extensions=None), skeleton_file=dict(extensions=None) + projected_data=dict( + extensions=None, + ), + skeleton_file=dict( + extensions=None, + ), ) outputs = TractSkeleton.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Training.py b/nipype/interfaces/fsl/tests/test_auto_Training.py index 68cc67fe94..82a53d1408 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Training.py +++ b/nipype/interfaces/fsl/tests/test_auto_Training.py @@ -4,11 +4,26 @@ def test_Training_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - loo=dict(argstr="-l", position=2), - mel_icas=dict(argstr="%s", copyfile=False, position=-1), - trained_wts_filestem=dict(argstr="%s", position=1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + loo=dict( + argstr="-l", + position=2, + ), + mel_icas=dict( + argstr="%s", + copyfile=False, + position=-1, + ), + trained_wts_filestem=dict( + argstr="%s", + position=1, + ), ) inputs = Training.input_spec() @@ -18,7 +33,11 @@ def test_Training_inputs(): def test_Training_outputs(): - output_map = dict(trained_wts_file=dict(extensions=None)) + output_map = dict( + trained_wts_file=dict( + extensions=None, + ), + ) outputs = Training.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py index a47e552ad2..3ab307d6a8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py +++ b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py @@ -3,7 +3,13 @@ def test_TrainingSetCreator_inputs(): - input_map = dict(mel_icas_in=dict(argstr="%s", copyfile=False, position=-1)) + input_map = dict( + mel_icas_in=dict( + argstr="%s", + copyfile=False, + position=-1, + ), + ) inputs = TrainingSetCreator.input_spec() for key, metadata in list(input_map.items()): @@ -12,7 +18,13 @@ def test_TrainingSetCreator_inputs(): def test_TrainingSetCreator_outputs(): - output_map = dict(mel_icas_out=dict(argstr="%s", copyfile=False, position=-1)) + output_map = dict( + mel_icas_out=dict( + argstr="%s", + copyfile=False, + position=-1, + ), + ) outputs = TrainingSetCreator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py index 334340bb85..cb27a76876 100644 --- a/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py @@ -4,16 +4,43 @@ def test_UnaryMaths_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - internal_datatype=dict(argstr="-dt %s", position=1), - nan2zeros=dict(argstr="-nan", position=3), - operation=dict(argstr="-%s", mandatory=True, position=4), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr="-dt %s", + position=1, + ), + nan2zeros=dict( + argstr="-nan", + position=3, + ), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, + ), out_file=dict( - argstr="%s", extensions=None, genfile=True, hash_files=False, position=-2 + argstr="%s", + extensions=None, + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr="-odt %s", + position=-1, ), - output_datatype=dict(argstr="-odt %s", position=-1), output_type=dict(), ) inputs = UnaryMaths.input_spec() @@ -24,7 +51,11 @@ def test_UnaryMaths_inputs(): def test_UnaryMaths_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = UnaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_VecReg.py b/nipype/interfaces/fsl/tests/test_auto_VecReg.py index 423d3546fb..9564241cc3 100644 --- a/nipype/interfaces/fsl/tests/test_auto_VecReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_VecReg.py @@ -4,19 +4,57 @@ def test_VecReg_inputs(): input_map = dict( - affine_mat=dict(argstr="-t %s", extensions=None), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True), - interpolation=dict(argstr="--interp=%s"), - mask=dict(argstr="-m %s", extensions=None), - out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False), + affine_mat=dict( + argstr="-t %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + interpolation=dict( + argstr="--interp=%s", + ), + mask=dict( + argstr="-m %s", + extensions=None, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, + ), output_type=dict(), - ref_mask=dict(argstr="--refmask=%s", extensions=None), - ref_vol=dict(argstr="-r %s", extensions=None, mandatory=True), - rotation_mat=dict(argstr="--rotmat=%s", extensions=None), - rotation_warp=dict(argstr="--rotwarp=%s", extensions=None), - warp_field=dict(argstr="-w %s", extensions=None), + ref_mask=dict( + argstr="--refmask=%s", + extensions=None, + ), + ref_vol=dict( + argstr="-r %s", + extensions=None, + mandatory=True, + ), + rotation_mat=dict( + argstr="--rotmat=%s", + extensions=None, + ), + rotation_warp=dict( + argstr="--rotwarp=%s", + extensions=None, + ), + warp_field=dict( + argstr="-w %s", + extensions=None, + ), ) inputs = VecReg.input_spec() @@ -26,7 +64,11 @@ def test_VecReg_inputs(): def test_VecReg_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = VecReg.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py index 18659d0534..b7f7fc7d87 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py @@ -4,21 +4,53 @@ def test_WarpPoints_inputs(): input_map = dict( - args=dict(argstr="%s"), - coord_mm=dict(argstr="-mm", xor=["coord_vox"]), - coord_vox=dict(argstr="-vox", xor=["coord_mm"]), - dest_file=dict(argstr="-dest %s", extensions=None, mandatory=True), - environ=dict(nohash=True, usedefault=True), - in_coords=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + args=dict( + argstr="%s", + ), + coord_mm=dict( + argstr="-mm", + xor=["coord_vox"], + ), + coord_vox=dict( + argstr="-vox", + xor=["coord_mm"], + ), + dest_file=dict( + argstr="-dest %s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_coords=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), out_file=dict( extensions=None, name_source="in_coords", name_template="%s_warped", output_name="out_file", ), - src_file=dict(argstr="-src %s", extensions=None, mandatory=True), - warp_file=dict(argstr="-warp %s", extensions=None, xor=["xfm_file"]), - xfm_file=dict(argstr="-xfm %s", extensions=None, xor=["warp_file"]), + src_file=dict( + argstr="-src %s", + extensions=None, + mandatory=True, + ), + warp_file=dict( + argstr="-warp %s", + extensions=None, + xor=["xfm_file"], + ), + xfm_file=dict( + argstr="-xfm %s", + extensions=None, + xor=["warp_file"], + ), ) inputs = WarpPoints.input_spec() @@ -28,7 +60,11 @@ def test_WarpPoints_inputs(): def test_WarpPoints_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = WarpPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py index 27c1999c23..b8f4cbef97 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py @@ -4,15 +4,47 @@ def test_WarpPointsFromStd_inputs(): input_map = dict( - args=dict(argstr="%s"), - coord_mm=dict(argstr="-mm", xor=["coord_vox"]), - coord_vox=dict(argstr="-vox", xor=["coord_mm"]), - environ=dict(nohash=True, usedefault=True), - img_file=dict(argstr="-img %s", extensions=None, mandatory=True), - in_coords=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - std_file=dict(argstr="-std %s", extensions=None, mandatory=True), - warp_file=dict(argstr="-warp %s", extensions=None, xor=["xfm_file"]), - xfm_file=dict(argstr="-xfm %s", extensions=None, xor=["warp_file"]), + args=dict( + argstr="%s", + ), + coord_mm=dict( + argstr="-mm", + xor=["coord_vox"], + ), + coord_vox=dict( + argstr="-vox", + xor=["coord_mm"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + img_file=dict( + argstr="-img %s", + extensions=None, + mandatory=True, + ), + in_coords=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + std_file=dict( + argstr="-std %s", + extensions=None, + mandatory=True, + ), + warp_file=dict( + argstr="-warp %s", + extensions=None, + xor=["xfm_file"], + ), + xfm_file=dict( + argstr="-xfm %s", + extensions=None, + xor=["warp_file"], + ), ) inputs = WarpPointsFromStd.input_spec() @@ -22,7 +54,11 @@ def test_WarpPointsFromStd_inputs(): def test_WarpPointsFromStd_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = WarpPointsFromStd.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py index d123b2abf9..0b5881c776 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py @@ -4,22 +4,57 @@ def test_WarpPointsToStd_inputs(): input_map = dict( - args=dict(argstr="%s"), - coord_mm=dict(argstr="-mm", xor=["coord_vox"]), - coord_vox=dict(argstr="-vox", xor=["coord_mm"]), - environ=dict(nohash=True, usedefault=True), - img_file=dict(argstr="-img %s", extensions=None, mandatory=True), - in_coords=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + args=dict( + argstr="%s", + ), + coord_mm=dict( + argstr="-mm", + xor=["coord_vox"], + ), + coord_vox=dict( + argstr="-vox", + xor=["coord_mm"], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + img_file=dict( + argstr="-img %s", + extensions=None, + mandatory=True, + ), + in_coords=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), out_file=dict( extensions=None, name_source="in_coords", name_template="%s_warped", output_name="out_file", ), - premat_file=dict(argstr="-premat %s", extensions=None), - std_file=dict(argstr="-std %s", extensions=None, mandatory=True), - warp_file=dict(argstr="-warp %s", extensions=None, xor=["xfm_file"]), - xfm_file=dict(argstr="-xfm %s", extensions=None, xor=["warp_file"]), + premat_file=dict( + argstr="-premat %s", + extensions=None, + ), + std_file=dict( + argstr="-std %s", + extensions=None, + mandatory=True, + ), + warp_file=dict( + argstr="-warp %s", + extensions=None, + xor=["xfm_file"], + ), + xfm_file=dict( + argstr="-xfm %s", + extensions=None, + xor=["warp_file"], + ), ) inputs = WarpPointsToStd.input_spec() @@ -29,7 +64,11 @@ def test_WarpPointsToStd_inputs(): def test_WarpPointsToStd_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = WarpPointsToStd.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py index 9ede954f07..c8caa8da84 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py @@ -4,10 +4,21 @@ def test_WarpUtils_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="--in=%s", extensions=None, mandatory=True), - knot_space=dict(argstr="--knotspace=%d,%d,%d"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="--in=%s", + extensions=None, + mandatory=True, + ), + knot_space=dict( + argstr="--knotspace=%d,%d,%d", + ), out_file=dict( argstr="--out=%s", extensions=None, @@ -15,13 +26,29 @@ def test_WarpUtils_inputs(): output_name="out_file", position=-1, ), - out_format=dict(argstr="--outformat=%s"), - out_jacobian=dict(argstr="--jac=%s", extensions=None), + out_format=dict( + argstr="--outformat=%s", + ), + out_jacobian=dict( + argstr="--jac=%s", + extensions=None, + ), output_type=dict(), - reference=dict(argstr="--ref=%s", extensions=None, mandatory=True), - warp_resolution=dict(argstr="--warpres=%0.4f,%0.4f,%0.4f"), - with_affine=dict(argstr="--withaff"), - write_jacobian=dict(mandatory=True, usedefault=True), + reference=dict( + argstr="--ref=%s", + extensions=None, + mandatory=True, + ), + warp_resolution=dict( + argstr="--warpres=%0.4f,%0.4f,%0.4f", + ), + with_affine=dict( + argstr="--withaff", + ), + write_jacobian=dict( + mandatory=True, + usedefault=True, + ), ) inputs = WarpUtils.input_spec() @@ -32,7 +59,12 @@ def test_WarpUtils_inputs(): def test_WarpUtils_outputs(): output_map = dict( - out_file=dict(extensions=None), out_jacobian=dict(extensions=None) + out_file=dict( + extensions=None, + ), + out_jacobian=dict( + extensions=None, + ), ) outputs = WarpUtils.output_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_XFibres5.py b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py index f8e6abe71b..349b2f52e7 100644 --- a/nipype/interfaces/fsl/tests/test_auto_XFibres5.py +++ b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py @@ -4,35 +4,111 @@ def test_XFibres5_inputs(): input_map = dict( - all_ard=dict(argstr="--allard", xor=("no_ard", "all_ard")), - args=dict(argstr="%s"), - burn_in=dict(argstr="--burnin=%d", usedefault=True), - burn_in_no_ard=dict(argstr="--burnin_noard=%d", usedefault=True), - bvals=dict(argstr="--bvals=%s", extensions=None, mandatory=True), - bvecs=dict(argstr="--bvecs=%s", extensions=None, mandatory=True), - cnlinear=dict(argstr="--cnonlinear", xor=("no_spat", "non_linear", "cnlinear")), - dwi=dict(argstr="--data=%s", extensions=None, mandatory=True), - environ=dict(nohash=True, usedefault=True), - f0_ard=dict(argstr="--f0 --ardf0", xor=["f0_noard", "f0_ard", "all_ard"]), - f0_noard=dict(argstr="--f0", xor=["f0_noard", "f0_ard"]), - force_dir=dict(argstr="--forcedir", usedefault=True), - fudge=dict(argstr="--fudge=%d"), - gradnonlin=dict(argstr="--gradnonlin=%s", extensions=None), - logdir=dict(argstr="--logdir=%s", usedefault=True), - mask=dict(argstr="--mask=%s", extensions=None, mandatory=True), - model=dict(argstr="--model=%d"), - n_fibres=dict(argstr="--nfibres=%d", mandatory=True, usedefault=True), - n_jumps=dict(argstr="--njumps=%d", usedefault=True), - no_ard=dict(argstr="--noard", xor=("no_ard", "all_ard")), - no_spat=dict(argstr="--nospat", xor=("no_spat", "non_linear", "cnlinear")), + all_ard=dict( + argstr="--allard", + xor=("no_ard", "all_ard"), + ), + args=dict( + argstr="%s", + ), + burn_in=dict( + argstr="--burnin=%d", + usedefault=True, + ), + burn_in_no_ard=dict( + argstr="--burnin_noard=%d", + usedefault=True, + ), + bvals=dict( + argstr="--bvals=%s", + extensions=None, + mandatory=True, + ), + bvecs=dict( + argstr="--bvecs=%s", + extensions=None, + mandatory=True, + ), + cnlinear=dict( + argstr="--cnonlinear", + xor=("no_spat", "non_linear", "cnlinear"), + ), + dwi=dict( + argstr="--data=%s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + f0_ard=dict( + argstr="--f0 --ardf0", + xor=["f0_noard", "f0_ard", "all_ard"], + ), + f0_noard=dict( + argstr="--f0", + xor=["f0_noard", "f0_ard"], + ), + force_dir=dict( + argstr="--forcedir", + usedefault=True, + ), + fudge=dict( + argstr="--fudge=%d", + ), + gradnonlin=dict( + argstr="--gradnonlin=%s", + extensions=None, + ), + logdir=dict( + argstr="--logdir=%s", + usedefault=True, + ), + mask=dict( + argstr="--mask=%s", + extensions=None, + mandatory=True, + ), + model=dict( + argstr="--model=%d", + ), + n_fibres=dict( + argstr="--nfibres=%d", + mandatory=True, + usedefault=True, + ), + n_jumps=dict( + argstr="--njumps=%d", + usedefault=True, + ), + no_ard=dict( + argstr="--noard", + xor=("no_ard", "all_ard"), + ), + no_spat=dict( + argstr="--nospat", + xor=("no_spat", "non_linear", "cnlinear"), + ), non_linear=dict( - argstr="--nonlinear", xor=("no_spat", "non_linear", "cnlinear") + argstr="--nonlinear", + xor=("no_spat", "non_linear", "cnlinear"), ), output_type=dict(), - rician=dict(argstr="--rician"), - sample_every=dict(argstr="--sampleevery=%d", usedefault=True), - seed=dict(argstr="--seed=%d"), - update_proposal_every=dict(argstr="--updateproposalevery=%d", usedefault=True), + rician=dict( + argstr="--rician", + ), + sample_every=dict( + argstr="--sampleevery=%d", + usedefault=True, + ), + seed=dict( + argstr="--seed=%d", + ), + update_proposal_every=dict( + argstr="--updateproposalevery=%d", + usedefault=True, + ), ) inputs = XFibres5.input_spec() @@ -45,10 +121,16 @@ def test_XFibres5_outputs(): output_map = dict( dyads=dict(), fsamples=dict(), - mean_S0samples=dict(extensions=None), - mean_dsamples=dict(extensions=None), + mean_S0samples=dict( + extensions=None, + ), + mean_dsamples=dict( + extensions=None, + ), mean_fsamples=dict(), - mean_tausamples=dict(extensions=None), + mean_tausamples=dict( + extensions=None, + ), phsamples=dict(), thsamples=dict(), ) diff --git a/nipype/interfaces/minc/tests/test_auto_Average.py b/nipype/interfaces/minc/tests/test_auto_Average.py index 2b3cbd66e4..7017967d61 100644 --- a/nipype/interfaces/minc/tests/test_auto_Average.py +++ b/nipype/interfaces/minc/tests/test_auto_Average.py @@ -4,18 +4,40 @@ def test_Average_inputs(): input_map = dict( - args=dict(argstr="%s"), - avgdim=dict(argstr="-avgdim %s"), - binarize=dict(argstr="-binarize"), - binrange=dict(argstr="-binrange %s %s"), - binvalue=dict(argstr="-binvalue %s"), + args=dict( + argstr="%s", + ), + avgdim=dict( + argstr="-avgdim %s", + ), + binarize=dict( + argstr="-binarize", + ), + binrange=dict( + argstr="-binrange %s %s", + ), + binvalue=dict( + argstr="-binvalue %s", + ), check_dimensions=dict( - argstr="-check_dimensions", xor=("check_dimensions", "no_check_dimensions") + argstr="-check_dimensions", + xor=("check_dimensions", "no_check_dimensions"), + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + copy_header=dict( + argstr="-copy_header", + xor=("copy_header", "no_copy_header"), + ), + debug=dict( + argstr="-debug", + ), + environ=dict( + nohash=True, + usedefault=True, ), - clobber=dict(argstr="-clobber", usedefault=True), - copy_header=dict(argstr="-copy_header", xor=("copy_header", "no_copy_header")), - debug=dict(argstr="-debug"), - environ=dict(nohash=True, usedefault=True), filelist=dict( argstr="-filelist %s", extensions=None, @@ -155,16 +177,26 @@ def test_Average_inputs(): sep=" ", xor=("input_files", "filelist"), ), - max_buffer_size_in_kb=dict(argstr="-max_buffer_size_in_kb %d", usedefault=True), + max_buffer_size_in_kb=dict( + argstr="-max_buffer_size_in_kb %d", + usedefault=True, + ), no_check_dimensions=dict( argstr="-nocheck_dimensions", xor=("check_dimensions", "no_check_dimensions"), ), no_copy_header=dict( - argstr="-nocopy_header", xor=("copy_header", "no_copy_header") + argstr="-nocopy_header", + xor=("copy_header", "no_copy_header"), + ), + nonormalize=dict( + argstr="-nonormalize", + xor=("normalize", "nonormalize"), + ), + normalize=dict( + argstr="-normalize", + xor=("normalize", "nonormalize"), ), - nonormalize=dict(argstr="-nonormalize", xor=("normalize", "nonormalize")), - normalize=dict(argstr="-normalize", xor=("normalize", "nonormalize")), output_file=dict( argstr="%s", extensions=None, @@ -174,13 +206,32 @@ def test_Average_inputs(): name_template="%s_averaged.mnc", position=-1, ), - quiet=dict(argstr="-quiet", xor=("verbose", "quiet")), - sdfile=dict(argstr="-sdfile %s", extensions=None), - two=dict(argstr="-2"), - verbose=dict(argstr="-verbose", xor=("verbose", "quiet")), - voxel_range=dict(argstr="-range %d %d"), - weights=dict(argstr="-weights %s", sep=","), - width_weighted=dict(argstr="-width_weighted", requires=("avgdim",)), + quiet=dict( + argstr="-quiet", + xor=("verbose", "quiet"), + ), + sdfile=dict( + argstr="-sdfile %s", + extensions=None, + ), + two=dict( + argstr="-2", + ), + verbose=dict( + argstr="-verbose", + xor=("verbose", "quiet"), + ), + voxel_range=dict( + argstr="-range %d %d", + ), + weights=dict( + argstr="-weights %s", + sep=",", + ), + width_weighted=dict( + argstr="-width_weighted", + requires=("avgdim",), + ), ) inputs = Average.input_spec() @@ -190,7 +241,11 @@ def test_Average_inputs(): def test_Average_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Average.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_BBox.py b/nipype/interfaces/minc/tests/test_auto_BBox.py index eac3f53b48..532cb14d5f 100644 --- a/nipype/interfaces/minc/tests/test_auto_BBox.py +++ b/nipype/interfaces/minc/tests/test_auto_BBox.py @@ -4,14 +4,38 @@ def test_BBox_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - format_minccrop=dict(argstr="-minccrop"), - format_mincresample=dict(argstr="-mincresample"), - format_mincreshape=dict(argstr="-mincreshape"), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - one_line=dict(argstr="-one_line", xor=("one_line", "two_lines")), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + format_minccrop=dict( + argstr="-minccrop", + ), + format_mincresample=dict( + argstr="-mincresample", + ), + format_mincreshape=dict( + argstr="-mincreshape", + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + one_line=dict( + argstr="-one_line", + xor=("one_line", "two_lines"), + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), output_file=dict( extensions=None, hash_files=False, @@ -20,8 +44,13 @@ def test_BBox_inputs(): name_template="%s_bbox.txt", position=-1, ), - threshold=dict(argstr="-threshold"), - two_lines=dict(argstr="-two_lines", xor=("one_line", "two_lines")), + threshold=dict( + argstr="-threshold", + ), + two_lines=dict( + argstr="-two_lines", + xor=("one_line", "two_lines"), + ), ) inputs = BBox.input_spec() @@ -31,7 +60,11 @@ def test_BBox_inputs(): def test_BBox_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = BBox.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Beast.py b/nipype/interfaces/minc/tests/test_auto_Beast.py index caa25231aa..487550a33a 100644 --- a/nipype/interfaces/minc/tests/test_auto_Beast.py +++ b/nipype/interfaces/minc/tests/test_auto_Beast.py @@ -4,20 +4,59 @@ def test_Beast_inputs(): input_map = dict( - abspath=dict(argstr="-abspath", usedefault=True), - args=dict(argstr="%s"), - clobber=dict(argstr="-clobber", usedefault=True), - confidence_level_alpha=dict(argstr="-alpha %s", usedefault=True), - configuration_file=dict(argstr="-configuration %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - fill_holes=dict(argstr="-fill"), - flip_images=dict(argstr="-flip"), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - library_dir=dict(argstr="%s", mandatory=True, position=-3), - load_moments=dict(argstr="-load_moments"), - median_filter=dict(argstr="-median"), - nlm_filter=dict(argstr="-nlm_filter"), - number_selected_images=dict(argstr="-selection_num %s", usedefault=True), + abspath=dict( + argstr="-abspath", + usedefault=True, + ), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + confidence_level_alpha=dict( + argstr="-alpha %s", + usedefault=True, + ), + configuration_file=dict( + argstr="-configuration %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill_holes=dict( + argstr="-fill", + ), + flip_images=dict( + argstr="-flip", + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + library_dir=dict( + argstr="%s", + mandatory=True, + position=-3, + ), + load_moments=dict( + argstr="-load_moments", + ), + median_filter=dict( + argstr="-median", + ), + nlm_filter=dict( + argstr="-nlm_filter", + ), + number_selected_images=dict( + argstr="-selection_num %s", + usedefault=True, + ), output_file=dict( argstr="%s", extensions=None, @@ -26,13 +65,32 @@ def test_Beast_inputs(): name_template="%s_beast_mask.mnc", position=-1, ), - patch_size=dict(argstr="-patch_size %s", usedefault=True), - probability_map=dict(argstr="-probability"), - same_resolution=dict(argstr="-same_resolution"), - search_area=dict(argstr="-search_area %s", usedefault=True), - smoothness_factor_beta=dict(argstr="-beta %s", usedefault=True), - threshold_patch_selection=dict(argstr="-threshold %s", usedefault=True), - voxel_size=dict(argstr="-voxel_size %s", usedefault=True), + patch_size=dict( + argstr="-patch_size %s", + usedefault=True, + ), + probability_map=dict( + argstr="-probability", + ), + same_resolution=dict( + argstr="-same_resolution", + ), + search_area=dict( + argstr="-search_area %s", + usedefault=True, + ), + smoothness_factor_beta=dict( + argstr="-beta %s", + usedefault=True, + ), + threshold_patch_selection=dict( + argstr="-threshold %s", + usedefault=True, + ), + voxel_size=dict( + argstr="-voxel_size %s", + usedefault=True, + ), ) inputs = Beast.input_spec() @@ -42,7 +100,11 @@ def test_Beast_inputs(): def test_Beast_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Beast.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_BestLinReg.py b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py index 09f7ef0958..57a8929878 100644 --- a/nipype/interfaces/minc/tests/test_auto_BestLinReg.py +++ b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py @@ -4,9 +4,17 @@ def test_BestLinReg_inputs(): input_map = dict( - args=dict(argstr="%s"), - clobber=dict(argstr="-clobber", usedefault=True), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), output_mnc=dict( argstr="%s", extensions=None, @@ -27,9 +35,21 @@ def test_BestLinReg_inputs(): name_template="%s_bestlinreg.xfm", position=-2, ), - source=dict(argstr="%s", extensions=None, mandatory=True, position=-4), - target=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - verbose=dict(argstr="-verbose"), + source=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), + target=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + verbose=dict( + argstr="-verbose", + ), ) inputs = BestLinReg.input_spec() @@ -40,7 +60,12 @@ def test_BestLinReg_inputs(): def test_BestLinReg_outputs(): output_map = dict( - output_mnc=dict(extensions=None), output_xfm=dict(extensions=None) + output_mnc=dict( + extensions=None, + ), + output_xfm=dict( + extensions=None, + ), ) outputs = BestLinReg.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_BigAverage.py b/nipype/interfaces/minc/tests/test_auto_BigAverage.py index cea520ba5c..1eefb273d6 100644 --- a/nipype/interfaces/minc/tests/test_auto_BigAverage.py +++ b/nipype/interfaces/minc/tests/test_auto_BigAverage.py @@ -4,10 +4,23 @@ def test_BigAverage_inputs(): input_map = dict( - args=dict(argstr="%s"), - clobber=dict(argstr="--clobber", usedefault=True), - environ=dict(nohash=True, usedefault=True), - input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" "), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="--clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_files=dict( + argstr="%s", + mandatory=True, + position=-2, + sep=" ", + ), output_file=dict( argstr="%s", extensions=None, @@ -17,8 +30,12 @@ def test_BigAverage_inputs(): name_template="%s_bigaverage.mnc", position=-1, ), - output_float=dict(argstr="--float"), - robust=dict(argstr="-robust"), + output_float=dict( + argstr="--float", + ), + robust=dict( + argstr="-robust", + ), sd_file=dict( argstr="--sdfile %s", extensions=None, @@ -26,8 +43,12 @@ def test_BigAverage_inputs(): name_source=["input_files"], name_template="%s_bigaverage_stdev.mnc", ), - tmpdir=dict(argstr="-tmpdir %s"), - verbose=dict(argstr="--verbose"), + tmpdir=dict( + argstr="-tmpdir %s", + ), + verbose=dict( + argstr="--verbose", + ), ) inputs = BigAverage.input_spec() @@ -37,7 +58,14 @@ def test_BigAverage_inputs(): def test_BigAverage_outputs(): - output_map = dict(output_file=dict(extensions=None), sd_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + sd_file=dict( + extensions=None, + ), + ) outputs = BigAverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Blob.py b/nipype/interfaces/minc/tests/test_auto_Blob.py index a0ef171fd7..ae2b445c73 100644 --- a/nipype/interfaces/minc/tests/test_auto_Blob.py +++ b/nipype/interfaces/minc/tests/test_auto_Blob.py @@ -4,11 +4,25 @@ def test_Blob_inputs(): input_map = dict( - args=dict(argstr="%s"), - determinant=dict(argstr="-determinant"), - environ=dict(nohash=True, usedefault=True), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - magnitude=dict(argstr="-magnitude"), + args=dict( + argstr="%s", + ), + determinant=dict( + argstr="-determinant", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + magnitude=dict( + argstr="-magnitude", + ), output_file=dict( argstr="%s", extensions=None, @@ -18,8 +32,12 @@ def test_Blob_inputs(): name_template="%s_blob.mnc", position=-1, ), - trace=dict(argstr="-trace"), - translation=dict(argstr="-translation"), + trace=dict( + argstr="-trace", + ), + translation=dict( + argstr="-translation", + ), ) inputs = Blob.input_spec() @@ -29,7 +47,11 @@ def test_Blob_inputs(): def test_Blob_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Blob.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Blur.py b/nipype/interfaces/minc/tests/test_auto_Blur.py index 591957a6e8..87647b5f62 100644 --- a/nipype/interfaces/minc/tests/test_auto_Blur.py +++ b/nipype/interfaces/minc/tests/test_auto_Blur.py @@ -4,25 +4,58 @@ def test_Blur_inputs(): input_map = dict( - args=dict(argstr="%s"), - clobber=dict(argstr="-clobber", usedefault=True), - dimensions=dict(argstr="-dimensions %s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + dimensions=dict( + argstr="-dimensions %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), fwhm=dict( - argstr="-fwhm %s", mandatory=True, xor=("fwhm", "fwhm3d", "standard_dev") + argstr="-fwhm %s", + mandatory=True, + xor=("fwhm", "fwhm3d", "standard_dev"), ), fwhm3d=dict( argstr="-3dfwhm %s %s %s", mandatory=True, xor=("fwhm", "fwhm3d", "standard_dev"), ), - gaussian=dict(argstr="-gaussian", xor=("gaussian", "rect")), - gradient=dict(argstr="-gradient"), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - no_apodize=dict(argstr="-no_apodize"), - output_file_base=dict(argstr="%s", extensions=None, position=-1), - partial=dict(argstr="-partial"), - rect=dict(argstr="-rect", xor=("gaussian", "rect")), + gaussian=dict( + argstr="-gaussian", + xor=("gaussian", "rect"), + ), + gradient=dict( + argstr="-gradient", + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + no_apodize=dict( + argstr="-no_apodize", + ), + output_file_base=dict( + argstr="%s", + extensions=None, + position=-1, + ), + partial=dict( + argstr="-partial", + ), + rect=dict( + argstr="-rect", + xor=("gaussian", "rect"), + ), standard_dev=dict( argstr="-standarddev %s", mandatory=True, @@ -38,12 +71,24 @@ def test_Blur_inputs(): def test_Blur_outputs(): output_map = dict( - gradient_dxyz=dict(extensions=None), - output_file=dict(extensions=None), - partial_dx=dict(extensions=None), - partial_dxyz=dict(extensions=None), - partial_dy=dict(extensions=None), - partial_dz=dict(extensions=None), + gradient_dxyz=dict( + extensions=None, + ), + output_file=dict( + extensions=None, + ), + partial_dx=dict( + extensions=None, + ), + partial_dxyz=dict( + extensions=None, + ), + partial_dy=dict( + extensions=None, + ), + partial_dz=dict( + extensions=None, + ), ) outputs = Blur.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Calc.py b/nipype/interfaces/minc/tests/test_auto_Calc.py index 48f2e4b6c4..d0d4f61fbe 100644 --- a/nipype/interfaces/minc/tests/test_auto_Calc.py +++ b/nipype/interfaces/minc/tests/test_auto_Calc.py @@ -4,15 +4,31 @@ def test_Calc_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), check_dimensions=dict( - argstr="-check_dimensions", xor=("check_dimensions", "no_check_dimensions") + argstr="-check_dimensions", + xor=("check_dimensions", "no_check_dimensions"), + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + copy_header=dict( + argstr="-copy_header", + xor=("copy_header", "no_copy_header"), + ), + debug=dict( + argstr="-debug", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + eval_width=dict( + argstr="-eval_width %s", ), - clobber=dict(argstr="-clobber", usedefault=True), - copy_header=dict(argstr="-copy_header", xor=("copy_header", "no_copy_header")), - debug=dict(argstr="-debug"), - environ=dict(nohash=True, usedefault=True), - eval_width=dict(argstr="-eval_width %s"), expfile=dict( argstr="-expfile %s", extensions=None, @@ -20,7 +36,9 @@ def test_Calc_inputs(): xor=("expression", "expfile"), ), expression=dict( - argstr="-expression '%s'", mandatory=True, xor=("expression", "expfile") + argstr="-expression '%s'", + mandatory=True, + xor=("expression", "expfile"), ), filelist=dict( argstr="-filelist %s", @@ -154,15 +172,25 @@ def test_Calc_inputs(): "format_unsigned", ), ), - ignore_nan=dict(argstr="-ignore_nan"), - input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" "), - max_buffer_size_in_kb=dict(argstr="-max_buffer_size_in_kb %d"), + ignore_nan=dict( + argstr="-ignore_nan", + ), + input_files=dict( + argstr="%s", + mandatory=True, + position=-2, + sep=" ", + ), + max_buffer_size_in_kb=dict( + argstr="-max_buffer_size_in_kb %d", + ), no_check_dimensions=dict( argstr="-nocheck_dimensions", xor=("check_dimensions", "no_check_dimensions"), ), no_copy_header=dict( - argstr="-nocopy_header", xor=("copy_header", "no_copy_header") + argstr="-nocopy_header", + xor=("copy_header", "no_copy_header"), ), outfiles=dict(), output_file=dict( @@ -179,16 +207,30 @@ def test_Calc_inputs(): xor=("output_nan", "output_zero", "output_illegal_value"), ), output_nan=dict( - argstr="-nan", xor=("output_nan", "output_zero", "output_illegal_value") + argstr="-nan", + xor=("output_nan", "output_zero", "output_illegal_value"), ), output_zero=dict( - argstr="-zero", xor=("output_nan", "output_zero", "output_illegal_value") + argstr="-zero", + xor=("output_nan", "output_zero", "output_illegal_value"), + ), + propagate_nan=dict( + argstr="-propagate_nan", + ), + quiet=dict( + argstr="-quiet", + xor=("verbose", "quiet"), + ), + two=dict( + argstr="-2", + ), + verbose=dict( + argstr="-verbose", + xor=("verbose", "quiet"), + ), + voxel_range=dict( + argstr="-range %d %d", ), - propagate_nan=dict(argstr="-propagate_nan"), - quiet=dict(argstr="-quiet", xor=("verbose", "quiet")), - two=dict(argstr="-2"), - verbose=dict(argstr="-verbose", xor=("verbose", "quiet")), - voxel_range=dict(argstr="-range %d %d"), ) inputs = Calc.input_spec() @@ -198,7 +240,11 @@ def test_Calc_inputs(): def test_Calc_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Calc.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Convert.py b/nipype/interfaces/minc/tests/test_auto_Convert.py index e63dde6520..57963b9b81 100644 --- a/nipype/interfaces/minc/tests/test_auto_Convert.py +++ b/nipype/interfaces/minc/tests/test_auto_Convert.py @@ -4,12 +4,29 @@ def test_Convert_inputs(): input_map = dict( - args=dict(argstr="%s"), - chunk=dict(argstr="-chunk %d"), - clobber=dict(argstr="-clobber", usedefault=True), - compression=dict(argstr="-compress %s"), - environ=dict(nohash=True, usedefault=True), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + args=dict( + argstr="%s", + ), + chunk=dict( + argstr="-chunk %d", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + compression=dict( + argstr="-compress %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), output_file=dict( argstr="%s", extensions=None, @@ -19,8 +36,12 @@ def test_Convert_inputs(): name_template="%s_convert_output.mnc", position=-1, ), - template=dict(argstr="-template"), - two=dict(argstr="-2"), + template=dict( + argstr="-template", + ), + two=dict( + argstr="-2", + ), ) inputs = Convert.input_spec() @@ -30,7 +51,11 @@ def test_Convert_inputs(): def test_Convert_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Convert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Copy.py b/nipype/interfaces/minc/tests/test_auto_Copy.py index 3274e3b95e..a6bb527e7a 100644 --- a/nipype/interfaces/minc/tests/test_auto_Copy.py +++ b/nipype/interfaces/minc/tests/test_auto_Copy.py @@ -4,9 +4,19 @@ def test_Copy_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), output_file=dict( argstr="%s", extensions=None, @@ -16,8 +26,14 @@ def test_Copy_inputs(): name_template="%s_copy.mnc", position=-1, ), - pixel_values=dict(argstr="-pixel_values", xor=("pixel_values", "real_values")), - real_values=dict(argstr="-real_values", xor=("pixel_values", "real_values")), + pixel_values=dict( + argstr="-pixel_values", + xor=("pixel_values", "real_values"), + ), + real_values=dict( + argstr="-real_values", + xor=("pixel_values", "real_values"), + ), ) inputs = Copy.input_spec() @@ -27,7 +43,11 @@ def test_Copy_inputs(): def test_Copy_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Copy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Dump.py b/nipype/interfaces/minc/tests/test_auto_Dump.py index 548a5e6eed..4253bc20cc 100644 --- a/nipype/interfaces/minc/tests/test_auto_Dump.py +++ b/nipype/interfaces/minc/tests/test_auto_Dump.py @@ -5,19 +5,46 @@ def test_Dump_inputs(): input_map = dict( annotations_brief=dict( - argstr="-b %s", xor=("annotations_brief", "annotations_full") + argstr="-b %s", + xor=("annotations_brief", "annotations_full"), ), annotations_full=dict( - argstr="-f %s", xor=("annotations_brief", "annotations_full") - ), - args=dict(argstr="%s"), - coordinate_data=dict(argstr="-c", xor=("coordinate_data", "header_data")), - environ=dict(nohash=True, usedefault=True), - header_data=dict(argstr="-h", xor=("coordinate_data", "header_data")), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - line_length=dict(argstr="-l %d"), - netcdf_name=dict(argstr="-n %s"), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + argstr="-f %s", + xor=("annotations_brief", "annotations_full"), + ), + args=dict( + argstr="%s", + ), + coordinate_data=dict( + argstr="-c", + xor=("coordinate_data", "header_data"), + ), + environ=dict( + nohash=True, + usedefault=True, + ), + header_data=dict( + argstr="-h", + xor=("coordinate_data", "header_data"), + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + line_length=dict( + argstr="-l %d", + ), + netcdf_name=dict( + argstr="-n %s", + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), output_file=dict( extensions=None, hash_files=False, @@ -26,8 +53,13 @@ def test_Dump_inputs(): name_template="%s_dump.txt", position=-1, ), - precision=dict(argstr="%s"), - variables=dict(argstr="-v %s", sep=","), + precision=dict( + argstr="%s", + ), + variables=dict( + argstr="-v %s", + sep=",", + ), ) inputs = Dump.input_spec() @@ -37,7 +69,11 @@ def test_Dump_inputs(): def test_Dump_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Dump.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Extract.py b/nipype/interfaces/minc/tests/test_auto_Extract.py index ee5f85bf7b..6c34b443f0 100644 --- a/nipype/interfaces/minc/tests/test_auto_Extract.py +++ b/nipype/interfaces/minc/tests/test_auto_Extract.py @@ -4,9 +4,17 @@ def test_Extract_inputs(): input_map = dict( - args=dict(argstr="%s"), - count=dict(argstr="-count %s", sep=","), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + count=dict( + argstr="-count %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), flip_any_direction=dict( argstr="-any_direction", xor=( @@ -67,13 +75,35 @@ def test_Extract_inputs(): argstr="+zdirection", xor=("flip_z_positive", "flip_z_negative", "flip_z_any"), ), - image_maximum=dict(argstr="-image_maximum %s"), - image_minimum=dict(argstr="-image_minimum %s"), - image_range=dict(argstr="-image_range %s %s"), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - nonormalize=dict(argstr="-nonormalize", xor=("normalize", "nonormalize")), - normalize=dict(argstr="-normalize", xor=("normalize", "nonormalize")), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + image_maximum=dict( + argstr="-image_maximum %s", + ), + image_minimum=dict( + argstr="-image_minimum %s", + ), + image_range=dict( + argstr="-image_range %s %s", + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + nonormalize=dict( + argstr="-nonormalize", + xor=("normalize", "nonormalize"), + ), + normalize=dict( + argstr="-normalize", + xor=("normalize", "nonormalize"), + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), output_file=dict( extensions=None, hash_files=False, @@ -82,7 +112,10 @@ def test_Extract_inputs(): name_template="%s.raw", position=-1, ), - start=dict(argstr="-start %s", sep=","), + start=dict( + argstr="-start %s", + sep=",", + ), write_ascii=dict( argstr="-ascii", xor=( @@ -173,7 +206,9 @@ def test_Extract_inputs(): "write_unsigned", ), ), - write_range=dict(argstr="-range %s %s"), + write_range=dict( + argstr="-range %s %s", + ), write_short=dict( argstr="-short", xor=( @@ -189,8 +224,14 @@ def test_Extract_inputs(): "write_unsigned", ), ), - write_signed=dict(argstr="-signed", xor=("write_signed", "write_unsigned")), - write_unsigned=dict(argstr="-unsigned", xor=("write_signed", "write_unsigned")), + write_signed=dict( + argstr="-signed", + xor=("write_signed", "write_unsigned"), + ), + write_unsigned=dict( + argstr="-unsigned", + xor=("write_signed", "write_unsigned"), + ), ) inputs = Extract.input_spec() @@ -200,7 +241,11 @@ def test_Extract_inputs(): def test_Extract_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Extract.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py index 0ea0a5befc..91bcc57e3c 100644 --- a/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py +++ b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py @@ -4,11 +4,24 @@ def test_Gennlxfm_inputs(): input_map = dict( - args=dict(argstr="%s"), - clobber=dict(argstr="-clobber", usedefault=True), - environ=dict(nohash=True, usedefault=True), - ident=dict(argstr="-ident"), - like=dict(argstr="-like %s", extensions=None), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ident=dict( + argstr="-ident", + ), + like=dict( + argstr="-like %s", + extensions=None, + ), output_file=dict( argstr="%s", extensions=None, @@ -18,8 +31,12 @@ def test_Gennlxfm_inputs(): name_template="%s_gennlxfm.xfm", position=-1, ), - step=dict(argstr="-step %s"), - verbose=dict(argstr="-verbose"), + step=dict( + argstr="-step %s", + ), + verbose=dict( + argstr="-verbose", + ), ) inputs = Gennlxfm.input_spec() @@ -30,7 +47,12 @@ def test_Gennlxfm_inputs(): def test_Gennlxfm_outputs(): output_map = dict( - output_file=dict(extensions=None), output_grid=dict(extensions=None) + output_file=dict( + extensions=None, + ), + output_grid=dict( + extensions=None, + ), ) outputs = Gennlxfm.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Math.py b/nipype/interfaces/minc/tests/test_auto_Math.py index b8f48beb23..86858235cd 100644 --- a/nipype/interfaces/minc/tests/test_auto_Math.py +++ b/nipype/interfaces/minc/tests/test_auto_Math.py @@ -4,25 +4,61 @@ def test_Math_inputs(): input_map = dict( - abs=dict(argstr="-abs"), - args=dict(argstr="%s"), - calc_add=dict(argstr="-add"), - calc_and=dict(argstr="-and"), - calc_div=dict(argstr="-div"), - calc_mul=dict(argstr="-mult"), - calc_not=dict(argstr="-not"), - calc_or=dict(argstr="-or"), - calc_sub=dict(argstr="-sub"), + abs=dict( + argstr="-abs", + ), + args=dict( + argstr="%s", + ), + calc_add=dict( + argstr="-add", + ), + calc_and=dict( + argstr="-and", + ), + calc_div=dict( + argstr="-div", + ), + calc_mul=dict( + argstr="-mult", + ), + calc_not=dict( + argstr="-not", + ), + calc_or=dict( + argstr="-or", + ), + calc_sub=dict( + argstr="-sub", + ), check_dimensions=dict( - argstr="-check_dimensions", xor=("check_dimensions", "no_check_dimensions") - ), - clamp=dict(argstr="-clamp -const2 %s %s"), - clobber=dict(argstr="-clobber", usedefault=True), - copy_header=dict(argstr="-copy_header", xor=("copy_header", "no_copy_header")), - count_valid=dict(argstr="-count_valid"), - dimension=dict(argstr="-dimension %s"), - environ=dict(nohash=True, usedefault=True), - exp=dict(argstr="-exp -const2 %s %s"), + argstr="-check_dimensions", + xor=("check_dimensions", "no_check_dimensions"), + ), + clamp=dict( + argstr="-clamp -const2 %s %s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + copy_header=dict( + argstr="-copy_header", + xor=("copy_header", "no_copy_header"), + ), + count_valid=dict( + argstr="-count_valid", + ), + dimension=dict( + argstr="-dimension %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + exp=dict( + argstr="-exp -const2 %s %s", + ), filelist=dict( argstr="-filelist %s", extensions=None, @@ -155,7 +191,9 @@ def test_Math_inputs(): "format_unsigned", ), ), - ignore_nan=dict(argstr="-ignore_nan"), + ignore_nan=dict( + argstr="-ignore_nan", + ), input_files=dict( argstr="%s", mandatory=True, @@ -163,21 +201,39 @@ def test_Math_inputs(): sep=" ", xor=("input_files", "filelist"), ), - invert=dict(argstr="-invert -const %s"), - isnan=dict(argstr="-isnan"), - log=dict(argstr="-log -const2 %s %s"), - max_buffer_size_in_kb=dict(argstr="-max_buffer_size_in_kb %d", usedefault=True), - maximum=dict(argstr="-maximum"), - minimum=dict(argstr="-minimum"), - nisnan=dict(argstr="-nisnan"), + invert=dict( + argstr="-invert -const %s", + ), + isnan=dict( + argstr="-isnan", + ), + log=dict( + argstr="-log -const2 %s %s", + ), + max_buffer_size_in_kb=dict( + argstr="-max_buffer_size_in_kb %d", + usedefault=True, + ), + maximum=dict( + argstr="-maximum", + ), + minimum=dict( + argstr="-minimum", + ), + nisnan=dict( + argstr="-nisnan", + ), no_check_dimensions=dict( argstr="-nocheck_dimensions", xor=("check_dimensions", "no_check_dimensions"), ), no_copy_header=dict( - argstr="-nocopy_header", xor=("copy_header", "no_copy_header") + argstr="-nocopy_header", + xor=("copy_header", "no_copy_header"), + ), + nsegment=dict( + argstr="-nsegment -const2 %s %s", ), - nsegment=dict(argstr="-nsegment -const2 %s %s"), output_file=dict( argstr="%s", extensions=None, @@ -192,25 +248,55 @@ def test_Math_inputs(): xor=("output_nan", "output_zero", "output_illegal_value"), ), output_nan=dict( - argstr="-nan", xor=("output_nan", "output_zero", "output_illegal_value") + argstr="-nan", + xor=("output_nan", "output_zero", "output_illegal_value"), ), output_zero=dict( - argstr="-zero", xor=("output_nan", "output_zero", "output_illegal_value") - ), - percentdiff=dict(argstr="-percentdiff"), - propagate_nan=dict(argstr="-propagate_nan"), - scale=dict(argstr="-scale -const2 %s %s"), - segment=dict(argstr="-segment -const2 %s %s"), - sqrt=dict(argstr="-sqrt"), - square=dict(argstr="-square"), - test_eq=dict(argstr="-eq"), - test_ge=dict(argstr="-ge"), - test_gt=dict(argstr="-gt"), - test_le=dict(argstr="-le"), - test_lt=dict(argstr="-lt"), - test_ne=dict(argstr="-ne"), - two=dict(argstr="-2"), - voxel_range=dict(argstr="-range %d %d"), + argstr="-zero", + xor=("output_nan", "output_zero", "output_illegal_value"), + ), + percentdiff=dict( + argstr="-percentdiff", + ), + propagate_nan=dict( + argstr="-propagate_nan", + ), + scale=dict( + argstr="-scale -const2 %s %s", + ), + segment=dict( + argstr="-segment -const2 %s %s", + ), + sqrt=dict( + argstr="-sqrt", + ), + square=dict( + argstr="-square", + ), + test_eq=dict( + argstr="-eq", + ), + test_ge=dict( + argstr="-ge", + ), + test_gt=dict( + argstr="-gt", + ), + test_le=dict( + argstr="-le", + ), + test_lt=dict( + argstr="-lt", + ), + test_ne=dict( + argstr="-ne", + ), + two=dict( + argstr="-2", + ), + voxel_range=dict( + argstr="-range %d %d", + ), ) inputs = Math.input_spec() @@ -220,7 +306,11 @@ def test_Math_inputs(): def test_Math_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Math.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_NlpFit.py b/nipype/interfaces/minc/tests/test_auto_NlpFit.py index d054c8cafd..39b1df4743 100644 --- a/nipype/interfaces/minc/tests/test_auto_NlpFit.py +++ b/nipype/interfaces/minc/tests/test_auto_NlpFit.py @@ -4,17 +4,54 @@ def test_NlpFit_inputs(): input_map = dict( - args=dict(argstr="%s"), - clobber=dict(argstr="-clobber", usedefault=True), - config_file=dict(argstr="-config_file %s", extensions=None, mandatory=True), - environ=dict(nohash=True, usedefault=True), - init_xfm=dict(argstr="-init_xfm %s", extensions=None, mandatory=True), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + config_file=dict( + argstr="-config_file %s", + extensions=None, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + init_xfm=dict( + argstr="-init_xfm %s", + extensions=None, + mandatory=True, + ), input_grid_files=dict(), - output_xfm=dict(argstr="%s", extensions=None, genfile=True, position=-1), - source=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - source_mask=dict(argstr="-source_mask %s", extensions=None, mandatory=True), - target=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - verbose=dict(argstr="-verbose"), + output_xfm=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + source=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + source_mask=dict( + argstr="-source_mask %s", + extensions=None, + mandatory=True, + ), + target=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + verbose=dict( + argstr="-verbose", + ), ) inputs = NlpFit.input_spec() @@ -25,7 +62,12 @@ def test_NlpFit_inputs(): def test_NlpFit_outputs(): output_map = dict( - output_grid=dict(extensions=None), output_xfm=dict(extensions=None) + output_grid=dict( + extensions=None, + ), + output_xfm=dict( + extensions=None, + ), ) outputs = NlpFit.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Norm.py b/nipype/interfaces/minc/tests/test_auto_Norm.py index 4dee5debd3..f8c2060250 100644 --- a/nipype/interfaces/minc/tests/test_auto_Norm.py +++ b/nipype/interfaces/minc/tests/test_auto_Norm.py @@ -4,16 +4,43 @@ def test_Norm_inputs(): input_map = dict( - args=dict(argstr="%s"), - clamp=dict(argstr="-clamp", usedefault=True), - clobber=dict(argstr="-clobber", usedefault=True), - cutoff=dict(argstr="-cutoff %s"), - environ=dict(nohash=True, usedefault=True), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - lower=dict(argstr="-lower %s"), - mask=dict(argstr="-mask %s", extensions=None), - out_ceil=dict(argstr="-out_ceil %s"), - out_floor=dict(argstr="-out_floor %s"), + args=dict( + argstr="%s", + ), + clamp=dict( + argstr="-clamp", + usedefault=True, + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + cutoff=dict( + argstr="-cutoff %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + lower=dict( + argstr="-lower %s", + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), + out_ceil=dict( + argstr="-out_ceil %s", + ), + out_floor=dict( + argstr="-out_floor %s", + ), output_file=dict( argstr="%s", extensions=None, @@ -30,11 +57,21 @@ def test_Norm_inputs(): name_source=["input_file"], name_template="%s_norm_threshold_mask.mnc", ), - threshold=dict(argstr="-threshold"), - threshold_blur=dict(argstr="-threshold_blur %s"), - threshold_bmt=dict(argstr="-threshold_bmt"), - threshold_perc=dict(argstr="-threshold_perc %s"), - upper=dict(argstr="-upper %s"), + threshold=dict( + argstr="-threshold", + ), + threshold_blur=dict( + argstr="-threshold_blur %s", + ), + threshold_bmt=dict( + argstr="-threshold_bmt", + ), + threshold_perc=dict( + argstr="-threshold_perc %s", + ), + upper=dict( + argstr="-upper %s", + ), ) inputs = Norm.input_spec() @@ -45,7 +82,12 @@ def test_Norm_inputs(): def test_Norm_outputs(): output_map = dict( - output_file=dict(extensions=None), output_threshold_mask=dict(extensions=None) + output_file=dict( + extensions=None, + ), + output_threshold_mask=dict( + extensions=None, + ), ) outputs = Norm.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Pik.py b/nipype/interfaces/minc/tests/test_auto_Pik.py index a9191ff17b..3323ef74a1 100644 --- a/nipype/interfaces/minc/tests/test_auto_Pik.py +++ b/nipype/interfaces/minc/tests/test_auto_Pik.py @@ -4,23 +4,50 @@ def test_Pik_inputs(): input_map = dict( - annotated_bar=dict(argstr="--anot_bar"), - args=dict(argstr="%s"), - auto_range=dict(argstr="--auto_range", xor=("image_range", "auto_range")), - clobber=dict(argstr="-clobber", usedefault=True), - depth=dict(argstr="--depth %s"), - environ=dict(nohash=True, usedefault=True), + annotated_bar=dict( + argstr="--anot_bar", + ), + args=dict( + argstr="%s", + ), + auto_range=dict( + argstr="--auto_range", + xor=("image_range", "auto_range"), + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + depth=dict( + argstr="--depth %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), horizontal_triplanar_view=dict( argstr="--horizontal", xor=("vertical_triplanar_view", "horizontal_triplanar_view"), ), image_range=dict( - argstr="--image_range %s %s", xor=("image_range", "auto_range") + argstr="--image_range %s %s", + xor=("image_range", "auto_range"), + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + jpg=dict( + xor=("jpg", "png"), + ), + lookup=dict( + argstr="--lookup %s", + ), + minc_range=dict( + argstr="--range %s %s", ), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - jpg=dict(xor=("jpg", "png")), - lookup=dict(argstr="--lookup %s"), - minc_range=dict(argstr="--range %s %s"), output_file=dict( argstr="%s", extensions=None, @@ -31,23 +58,54 @@ def test_Pik_inputs(): name_template="%s.png", position=-1, ), - png=dict(xor=("jpg", "png")), - sagittal_offset=dict(argstr="--sagittal_offset %s"), - sagittal_offset_perc=dict(argstr="--sagittal_offset_perc %d"), - scale=dict(argstr="--scale %s", usedefault=True), - slice_x=dict(argstr="-x", xor=("slice_z", "slice_y", "slice_x")), - slice_y=dict(argstr="-y", xor=("slice_z", "slice_y", "slice_x")), - slice_z=dict(argstr="-z", xor=("slice_z", "slice_y", "slice_x")), - start=dict(argstr="--slice %s"), - tile_size=dict(argstr="--tilesize %s"), - title=dict(argstr="%s"), - title_size=dict(argstr="--title_size %s", requires=["title"]), - triplanar=dict(argstr="--triplanar"), + png=dict( + xor=("jpg", "png"), + ), + sagittal_offset=dict( + argstr="--sagittal_offset %s", + ), + sagittal_offset_perc=dict( + argstr="--sagittal_offset_perc %d", + ), + scale=dict( + argstr="--scale %s", + usedefault=True, + ), + slice_x=dict( + argstr="-x", + xor=("slice_z", "slice_y", "slice_x"), + ), + slice_y=dict( + argstr="-y", + xor=("slice_z", "slice_y", "slice_x"), + ), + slice_z=dict( + argstr="-z", + xor=("slice_z", "slice_y", "slice_x"), + ), + start=dict( + argstr="--slice %s", + ), + tile_size=dict( + argstr="--tilesize %s", + ), + title=dict( + argstr="%s", + ), + title_size=dict( + argstr="--title_size %s", + requires=["title"], + ), + triplanar=dict( + argstr="--triplanar", + ), vertical_triplanar_view=dict( argstr="--vertical", xor=("vertical_triplanar_view", "horizontal_triplanar_view"), ), - width=dict(argstr="--width %s"), + width=dict( + argstr="--width %s", + ), ) inputs = Pik.input_spec() @@ -57,7 +115,11 @@ def test_Pik_inputs(): def test_Pik_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Pik.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Resample.py b/nipype/interfaces/minc/tests/test_auto_Resample.py index 7454d38115..59f2ae180d 100644 --- a/nipype/interfaces/minc/tests/test_auto_Resample.py +++ b/nipype/interfaces/minc/tests/test_auto_Resample.py @@ -4,15 +4,33 @@ def test_Resample_inputs(): input_map = dict( - args=dict(argstr="%s"), - clobber=dict(argstr="-clobber", usedefault=True), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), coronal_slices=dict( - argstr="-coronal", xor=("transverse", "sagittal", "coronal") + argstr="-coronal", + xor=("transverse", "sagittal", "coronal"), + ), + dircos=dict( + argstr="-dircos %s %s %s", + xor=("nelements", "nelements_x_y_or_z"), + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill=dict( + argstr="-fill", + xor=("nofill", "fill"), + ), + fill_value=dict( + argstr="-fillvalue %s", + requires=["fill"], ), - dircos=dict(argstr="-dircos %s %s %s", xor=("nelements", "nelements_x_y_or_z")), - environ=dict(nohash=True, usedefault=True), - fill=dict(argstr="-fill", xor=("nofill", "fill")), - fill_value=dict(argstr="-fillvalue %s", requires=["fill"]), format_byte=dict( argstr="-byte", xor=( @@ -118,15 +136,27 @@ def test_Resample_inputs(): ), ), half_width_sinc_window=dict( - argstr="-width %s", requires=["sinc_interpolation"] + argstr="-width %s", + requires=["sinc_interpolation"], + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, ), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), input_grid_files=dict(), - invert_transformation=dict(argstr="-invert_transformation"), + invert_transformation=dict( + argstr="-invert_transformation", + ), keep_real_range=dict( - argstr="-keep_real_range", xor=("keep_real_range", "nokeep_real_range") + argstr="-keep_real_range", + xor=("keep_real_range", "nokeep_real_range"), + ), + like=dict( + argstr="-like %s", + extensions=None, ), - like=dict(argstr="-like %s", extensions=None), nearest_neighbour_interpolation=dict( argstr="-nearest_neighbour", xor=( @@ -137,16 +167,24 @@ def test_Resample_inputs(): ), ), nelements=dict( - argstr="-nelements %s %s %s", xor=("nelements", "nelements_x_y_or_z") + argstr="-nelements %s %s %s", + xor=("nelements", "nelements_x_y_or_z"), + ), + no_fill=dict( + argstr="-nofill", + xor=("nofill", "fill"), ), - no_fill=dict(argstr="-nofill", xor=("nofill", "fill")), no_input_sampling=dict( - argstr="-use_input_sampling", xor=("vio_transform", "no_input_sampling") + argstr="-use_input_sampling", + xor=("vio_transform", "no_input_sampling"), ), nokeep_real_range=dict( - argstr="-nokeep_real_range", xor=("keep_real_range", "nokeep_real_range") + argstr="-nokeep_real_range", + xor=("keep_real_range", "nokeep_real_range"), + ), + origin=dict( + argstr="-origin %s %s %s", ), - origin=dict(argstr="-origin %s %s %s"), output_file=dict( argstr="%s", extensions=None, @@ -156,9 +194,12 @@ def test_Resample_inputs(): name_template="%s_resample.mnc", position=-1, ), - output_range=dict(argstr="-range %s %s"), + output_range=dict( + argstr="-range %s %s", + ), sagittal_slices=dict( - argstr="-sagittal", xor=("transverse", "sagittal", "coronal") + argstr="-sagittal", + xor=("transverse", "sagittal", "coronal"), ), sinc_interpolation=dict( argstr="-sinc", @@ -179,14 +220,30 @@ def test_Resample_inputs(): requires=["sinc_interpolation"], xor=("sinc_window_hanning", "sinc_window_hamming"), ), - spacetype=dict(argstr="-spacetype %s"), - standard_sampling=dict(argstr="-standard_sampling"), - start=dict(argstr="-start %s %s %s", xor=("nelements", "nelements_x_y_or_z")), - step=dict(argstr="-step %s %s %s", xor=("nelements", "nelements_x_y_or_z")), - talairach=dict(argstr="-talairach"), - transformation=dict(argstr="-transformation %s", extensions=None), + spacetype=dict( + argstr="-spacetype %s", + ), + standard_sampling=dict( + argstr="-standard_sampling", + ), + start=dict( + argstr="-start %s %s %s", + xor=("nelements", "nelements_x_y_or_z"), + ), + step=dict( + argstr="-step %s %s %s", + xor=("nelements", "nelements_x_y_or_z"), + ), + talairach=dict( + argstr="-talairach", + ), + transformation=dict( + argstr="-transformation %s", + extensions=None, + ), transverse_slices=dict( - argstr="-transverse", xor=("transverse", "sagittal", "coronal") + argstr="-transverse", + xor=("transverse", "sagittal", "coronal"), ), tricubic_interpolation=dict( argstr="-tricubic", @@ -206,10 +263,15 @@ def test_Resample_inputs(): "sinc_interpolation", ), ), - two=dict(argstr="-2"), - units=dict(argstr="-units %s"), + two=dict( + argstr="-2", + ), + units=dict( + argstr="-units %s", + ), vio_transform=dict( - argstr="-tfm_input_sampling", xor=("vio_transform", "no_input_sampling") + argstr="-tfm_input_sampling", + xor=("vio_transform", "no_input_sampling"), ), xdircos=dict( argstr="-xdircos %s", @@ -280,7 +342,11 @@ def test_Resample_inputs(): def test_Resample_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Resample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Reshape.py b/nipype/interfaces/minc/tests/test_auto_Reshape.py index 3f2be25b13..50286b41e5 100644 --- a/nipype/interfaces/minc/tests/test_auto_Reshape.py +++ b/nipype/interfaces/minc/tests/test_auto_Reshape.py @@ -4,10 +4,23 @@ def test_Reshape_inputs(): input_map = dict( - args=dict(argstr="%s"), - clobber=dict(argstr="-clobber", usedefault=True), - environ=dict(nohash=True, usedefault=True), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), output_file=dict( argstr="%s", extensions=None, @@ -17,8 +30,12 @@ def test_Reshape_inputs(): name_template="%s_reshape.mnc", position=-1, ), - verbose=dict(argstr="-verbose"), - write_short=dict(argstr="-short"), + verbose=dict( + argstr="-verbose", + ), + write_short=dict( + argstr="-short", + ), ) inputs = Reshape.input_spec() @@ -28,7 +45,11 @@ def test_Reshape_inputs(): def test_Reshape_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Reshape.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_ToEcat.py b/nipype/interfaces/minc/tests/test_auto_ToEcat.py index 75bc57b662..f6ce521232 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToEcat.py +++ b/nipype/interfaces/minc/tests/test_auto_ToEcat.py @@ -4,18 +4,40 @@ def test_ToEcat_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - ignore_acquisition_variable=dict(argstr="-ignore_acquisition_variable"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignore_acquisition_variable=dict( + argstr="-ignore_acquisition_variable", + ), ignore_ecat_acquisition_variable=dict( - argstr="-ignore_ecat_acquisition_variable" - ), - ignore_ecat_main=dict(argstr="-ignore_ecat_main"), - ignore_ecat_subheader_variable=dict(argstr="-ignore_ecat_subheader_variable"), - ignore_patient_variable=dict(argstr="-ignore_patient_variable"), - ignore_study_variable=dict(argstr="-ignore_study_variable"), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - no_decay_corr_fctr=dict(argstr="-no_decay_corr_fctr"), + argstr="-ignore_ecat_acquisition_variable", + ), + ignore_ecat_main=dict( + argstr="-ignore_ecat_main", + ), + ignore_ecat_subheader_variable=dict( + argstr="-ignore_ecat_subheader_variable", + ), + ignore_patient_variable=dict( + argstr="-ignore_patient_variable", + ), + ignore_study_variable=dict( + argstr="-ignore_study_variable", + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + no_decay_corr_fctr=dict( + argstr="-no_decay_corr_fctr", + ), output_file=dict( argstr="%s", extensions=None, @@ -26,7 +48,9 @@ def test_ToEcat_inputs(): name_template="%s_to_ecat.v", position=-1, ), - voxels_as_integers=dict(argstr="-label"), + voxels_as_integers=dict( + argstr="-label", + ), ) inputs = ToEcat.input_spec() @@ -36,7 +60,11 @@ def test_ToEcat_inputs(): def test_ToEcat_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = ToEcat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_ToRaw.py b/nipype/interfaces/minc/tests/test_auto_ToRaw.py index a2bb27dbfd..39940170f6 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToRaw.py +++ b/nipype/interfaces/minc/tests/test_auto_ToRaw.py @@ -4,12 +4,33 @@ def test_ToRaw_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - nonormalize=dict(argstr="-nonormalize", xor=("normalize", "nonormalize")), - normalize=dict(argstr="-normalize", xor=("normalize", "nonormalize")), - out_file=dict(argstr="> %s", extensions=None, genfile=True, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + nonormalize=dict( + argstr="-nonormalize", + xor=("normalize", "nonormalize"), + ), + normalize=dict( + argstr="-normalize", + xor=("normalize", "nonormalize"), + ), + out_file=dict( + argstr="> %s", + extensions=None, + genfile=True, + position=-1, + ), output_file=dict( extensions=None, hash_files=False, @@ -73,7 +94,9 @@ def test_ToRaw_inputs(): "write_double", ), ), - write_range=dict(argstr="-range %s %s"), + write_range=dict( + argstr="-range %s %s", + ), write_short=dict( argstr="-short", xor=( @@ -85,8 +108,14 @@ def test_ToRaw_inputs(): "write_double", ), ), - write_signed=dict(argstr="-signed", xor=("write_signed", "write_unsigned")), - write_unsigned=dict(argstr="-unsigned", xor=("write_signed", "write_unsigned")), + write_signed=dict( + argstr="-signed", + xor=("write_signed", "write_unsigned"), + ), + write_unsigned=dict( + argstr="-unsigned", + xor=("write_signed", "write_unsigned"), + ), ) inputs = ToRaw.input_spec() @@ -96,7 +125,11 @@ def test_ToRaw_inputs(): def test_ToRaw_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = ToRaw.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_VolSymm.py b/nipype/interfaces/minc/tests/test_auto_VolSymm.py index d1827b4fb9..a1b89616f2 100644 --- a/nipype/interfaces/minc/tests/test_auto_VolSymm.py +++ b/nipype/interfaces/minc/tests/test_auto_VolSymm.py @@ -4,15 +4,37 @@ def test_VolSymm_inputs(): input_map = dict( - args=dict(argstr="%s"), - clobber=dict(argstr="-clobber", usedefault=True), - config_file=dict(argstr="-config_file %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - fit_linear=dict(argstr="-linear"), - fit_nonlinear=dict(argstr="-nonlinear"), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + config_file=dict( + argstr="-config_file %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fit_linear=dict( + argstr="-linear", + ), + fit_nonlinear=dict( + argstr="-nonlinear", + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), input_grid_files=dict(), - nofit=dict(argstr="-nofit"), + nofit=dict( + argstr="-nofit", + ), output_file=dict( argstr="%s", extensions=None, @@ -32,10 +54,18 @@ def test_VolSymm_inputs(): name_template="%s_vol_symm.xfm", position=-2, ), - verbose=dict(argstr="-verbose"), - x=dict(argstr="-x"), - y=dict(argstr="-y"), - z=dict(argstr="-z"), + verbose=dict( + argstr="-verbose", + ), + x=dict( + argstr="-x", + ), + y=dict( + argstr="-y", + ), + z=dict( + argstr="-z", + ), ) inputs = VolSymm.input_spec() @@ -46,9 +76,15 @@ def test_VolSymm_inputs(): def test_VolSymm_outputs(): output_map = dict( - output_file=dict(extensions=None), - output_grid=dict(extensions=None), - trans_file=dict(extensions=None), + output_file=dict( + extensions=None, + ), + output_grid=dict( + extensions=None, + ), + trans_file=dict( + extensions=None, + ), ) outputs = VolSymm.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Volcentre.py b/nipype/interfaces/minc/tests/test_auto_Volcentre.py index 0a9ef4ea81..cf9f777f70 100644 --- a/nipype/interfaces/minc/tests/test_auto_Volcentre.py +++ b/nipype/interfaces/minc/tests/test_auto_Volcentre.py @@ -4,12 +4,29 @@ def test_Volcentre_inputs(): input_map = dict( - args=dict(argstr="%s"), - centre=dict(argstr="-centre %s %s %s"), - clobber=dict(argstr="-clobber", usedefault=True), - com=dict(argstr="-com"), - environ=dict(nohash=True, usedefault=True), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + args=dict( + argstr="%s", + ), + centre=dict( + argstr="-centre %s %s %s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + com=dict( + argstr="-com", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), output_file=dict( argstr="%s", extensions=None, @@ -19,8 +36,12 @@ def test_Volcentre_inputs(): name_template="%s_volcentre.mnc", position=-1, ), - verbose=dict(argstr="-verbose"), - zero_dircos=dict(argstr="-zero_dircos"), + verbose=dict( + argstr="-verbose", + ), + zero_dircos=dict( + argstr="-zero_dircos", + ), ) inputs = Volcentre.input_spec() @@ -30,7 +51,11 @@ def test_Volcentre_inputs(): def test_Volcentre_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Volcentre.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Voliso.py b/nipype/interfaces/minc/tests/test_auto_Voliso.py index aa5cc338ab..40d01b5767 100644 --- a/nipype/interfaces/minc/tests/test_auto_Voliso.py +++ b/nipype/interfaces/minc/tests/test_auto_Voliso.py @@ -4,13 +4,32 @@ def test_Voliso_inputs(): input_map = dict( - args=dict(argstr="%s"), - avgstep=dict(argstr="--avgstep"), - clobber=dict(argstr="--clobber", usedefault=True), - environ=dict(nohash=True, usedefault=True), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - maxstep=dict(argstr="--maxstep %s"), - minstep=dict(argstr="--minstep %s"), + args=dict( + argstr="%s", + ), + avgstep=dict( + argstr="--avgstep", + ), + clobber=dict( + argstr="--clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + maxstep=dict( + argstr="--maxstep %s", + ), + minstep=dict( + argstr="--minstep %s", + ), output_file=dict( argstr="%s", extensions=None, @@ -20,7 +39,9 @@ def test_Voliso_inputs(): name_template="%s_voliso.mnc", position=-1, ), - verbose=dict(argstr="--verbose"), + verbose=dict( + argstr="--verbose", + ), ) inputs = Voliso.input_spec() @@ -30,7 +51,11 @@ def test_Voliso_inputs(): def test_Voliso_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Voliso.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_Volpad.py b/nipype/interfaces/minc/tests/test_auto_Volpad.py index f052e699e9..5102199657 100644 --- a/nipype/interfaces/minc/tests/test_auto_Volpad.py +++ b/nipype/interfaces/minc/tests/test_auto_Volpad.py @@ -4,13 +4,32 @@ def test_Volpad_inputs(): input_map = dict( - args=dict(argstr="%s"), - auto=dict(argstr="-auto"), - auto_freq=dict(argstr="-auto_freq %s"), - clobber=dict(argstr="-clobber", usedefault=True), - distance=dict(argstr="-distance %s"), - environ=dict(nohash=True, usedefault=True), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + args=dict( + argstr="%s", + ), + auto=dict( + argstr="-auto", + ), + auto_freq=dict( + argstr="-auto_freq %s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + distance=dict( + argstr="-distance %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), output_file=dict( argstr="%s", extensions=None, @@ -20,9 +39,15 @@ def test_Volpad_inputs(): name_template="%s_volpad.mnc", position=-1, ), - smooth=dict(argstr="-smooth"), - smooth_distance=dict(argstr="-smooth_distance %s"), - verbose=dict(argstr="-verbose"), + smooth=dict( + argstr="-smooth", + ), + smooth_distance=dict( + argstr="-smooth_distance %s", + ), + verbose=dict( + argstr="-verbose", + ), ) inputs = Volpad.input_spec() @@ -32,7 +57,11 @@ def test_Volpad_inputs(): def test_Volpad_outputs(): - output_map = dict(output_file=dict(extensions=None)) + output_map = dict( + output_file=dict( + extensions=None, + ), + ) outputs = Volpad.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_XfmAvg.py b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py index bd7b30897b..f5df6f4d54 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmAvg.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py @@ -4,17 +4,45 @@ def test_XfmAvg_inputs(): input_map = dict( - args=dict(argstr="%s"), - avg_linear=dict(argstr="-avg_linear"), - avg_nonlinear=dict(argstr="-avg_nonlinear"), - clobber=dict(argstr="-clobber", usedefault=True), - environ=dict(nohash=True, usedefault=True), - ignore_linear=dict(argstr="-ignore_linear"), - ignore_nonlinear=dict(argstr="-ignore_nonline"), - input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" "), + args=dict( + argstr="%s", + ), + avg_linear=dict( + argstr="-avg_linear", + ), + avg_nonlinear=dict( + argstr="-avg_nonlinear", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignore_linear=dict( + argstr="-ignore_linear", + ), + ignore_nonlinear=dict( + argstr="-ignore_nonline", + ), + input_files=dict( + argstr="%s", + mandatory=True, + position=-2, + sep=" ", + ), input_grid_files=dict(), - output_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), - verbose=dict(argstr="-verbose"), + output_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + verbose=dict( + argstr="-verbose", + ), ) inputs = XfmAvg.input_spec() @@ -25,7 +53,12 @@ def test_XfmAvg_inputs(): def test_XfmAvg_outputs(): output_map = dict( - output_file=dict(extensions=None), output_grid=dict(extensions=None) + output_file=dict( + extensions=None, + ), + output_grid=dict( + extensions=None, + ), ) outputs = XfmAvg.output_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_XfmConcat.py b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py index fedc80449d..58144779b8 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmConcat.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py @@ -4,10 +4,23 @@ def test_XfmConcat_inputs(): input_map = dict( - args=dict(argstr="%s"), - clobber=dict(argstr="-clobber", usedefault=True), - environ=dict(nohash=True, usedefault=True), - input_files=dict(argstr="%s", mandatory=True, position=-2, sep=" "), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_files=dict( + argstr="%s", + mandatory=True, + position=-2, + sep=" ", + ), input_grid_files=dict(), output_file=dict( argstr="%s", @@ -18,7 +31,9 @@ def test_XfmConcat_inputs(): name_template="%s_xfmconcat.xfm", position=-1, ), - verbose=dict(argstr="-verbose"), + verbose=dict( + argstr="-verbose", + ), ) inputs = XfmConcat.input_spec() @@ -28,7 +43,12 @@ def test_XfmConcat_inputs(): def test_XfmConcat_outputs(): - output_map = dict(output_file=dict(extensions=None), output_grids=dict()) + output_map = dict( + output_file=dict( + extensions=None, + ), + output_grids=dict(), + ) outputs = XfmConcat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py index dea70656e9..aa8fb61ccd 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py @@ -4,12 +4,32 @@ def test_XfmInvert_inputs(): input_map = dict( - args=dict(argstr="%s"), - clobber=dict(argstr="-clobber", usedefault=True), - environ=dict(nohash=True, usedefault=True), - input_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - output_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), - verbose=dict(argstr="-verbose"), + args=dict( + argstr="%s", + ), + clobber=dict( + argstr="-clobber", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + output_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + verbose=dict( + argstr="-verbose", + ), ) inputs = XfmInvert.input_spec() @@ -20,7 +40,12 @@ def test_XfmInvert_inputs(): def test_XfmInvert_outputs(): output_map = dict( - output_file=dict(extensions=None), output_grid=dict(extensions=None) + output_file=dict( + extensions=None, + ), + output_grid=dict( + extensions=None, + ), ) outputs = XfmInvert.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py index fd556e2ce1..9daa1b996e 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py @@ -4,32 +4,96 @@ def test_JistBrainMgdmSegmentation_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inAdjust=dict(argstr="--inAdjust %s"), - inAtlas=dict(argstr="--inAtlas %s", extensions=None), - inCompute=dict(argstr="--inCompute %s"), - inCurvature=dict(argstr="--inCurvature %f"), - inData=dict(argstr="--inData %f"), - inFLAIR=dict(argstr="--inFLAIR %s", extensions=None), - inMP2RAGE=dict(argstr="--inMP2RAGE %s", extensions=None), - inMP2RAGE2=dict(argstr="--inMP2RAGE2 %s", extensions=None), - inMPRAGE=dict(argstr="--inMPRAGE %s", extensions=None), - inMax=dict(argstr="--inMax %d"), - inMin=dict(argstr="--inMin %f"), - inOutput=dict(argstr="--inOutput %s"), - inPV=dict(argstr="--inPV %s", extensions=None), - inPosterior=dict(argstr="--inPosterior %f"), - inSteps=dict(argstr="--inSteps %d"), - inTopology=dict(argstr="--inTopology %s"), - null=dict(argstr="--null %s"), - outLevelset=dict(argstr="--outLevelset %s", hash_files=False), - outPosterior2=dict(argstr="--outPosterior2 %s", hash_files=False), - outPosterior3=dict(argstr="--outPosterior3 %s", hash_files=False), - outSegmented=dict(argstr="--outSegmented %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inAdjust=dict( + argstr="--inAdjust %s", + ), + inAtlas=dict( + argstr="--inAtlas %s", + extensions=None, + ), + inCompute=dict( + argstr="--inCompute %s", + ), + inCurvature=dict( + argstr="--inCurvature %f", + ), + inData=dict( + argstr="--inData %f", + ), + inFLAIR=dict( + argstr="--inFLAIR %s", + extensions=None, + ), + inMP2RAGE=dict( + argstr="--inMP2RAGE %s", + extensions=None, + ), + inMP2RAGE2=dict( + argstr="--inMP2RAGE2 %s", + extensions=None, + ), + inMPRAGE=dict( + argstr="--inMPRAGE %s", + extensions=None, + ), + inMax=dict( + argstr="--inMax %d", + ), + inMin=dict( + argstr="--inMin %f", + ), + inOutput=dict( + argstr="--inOutput %s", + ), + inPV=dict( + argstr="--inPV %s", + extensions=None, + ), + inPosterior=dict( + argstr="--inPosterior %f", + ), + inSteps=dict( + argstr="--inSteps %d", + ), + inTopology=dict( + argstr="--inTopology %s", + ), + null=dict( + argstr="--null %s", + ), + outLevelset=dict( + argstr="--outLevelset %s", + hash_files=False, + ), + outPosterior2=dict( + argstr="--outPosterior2 %s", + hash_files=False, + ), + outPosterior3=dict( + argstr="--outPosterior3 %s", + hash_files=False, + ), + outSegmented=dict( + argstr="--outSegmented %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistBrainMgdmSegmentation.input_spec() @@ -40,10 +104,18 @@ def test_JistBrainMgdmSegmentation_inputs(): def test_JistBrainMgdmSegmentation_outputs(): output_map = dict( - outLevelset=dict(extensions=None), - outPosterior2=dict(extensions=None), - outPosterior3=dict(extensions=None), - outSegmented=dict(extensions=None), + outLevelset=dict( + extensions=None, + ), + outPosterior2=dict( + extensions=None, + ), + outPosterior3=dict( + extensions=None, + ), + outSegmented=dict( + extensions=None, + ), ) outputs = JistBrainMgdmSegmentation.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py index da92f8b58f..e7706b16b5 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py @@ -4,17 +4,44 @@ def test_JistBrainMp2rageDuraEstimation_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inDistance=dict(argstr="--inDistance %f"), - inSecond=dict(argstr="--inSecond %s", extensions=None), - inSkull=dict(argstr="--inSkull %s", extensions=None), - inoutput=dict(argstr="--inoutput %s"), - null=dict(argstr="--null %s"), - outDura=dict(argstr="--outDura %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inDistance=dict( + argstr="--inDistance %f", + ), + inSecond=dict( + argstr="--inSecond %s", + extensions=None, + ), + inSkull=dict( + argstr="--inSkull %s", + extensions=None, + ), + inoutput=dict( + argstr="--inoutput %s", + ), + null=dict( + argstr="--null %s", + ), + outDura=dict( + argstr="--outDura %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistBrainMp2rageDuraEstimation.input_spec() @@ -24,7 +51,11 @@ def test_JistBrainMp2rageDuraEstimation_inputs(): def test_JistBrainMp2rageDuraEstimation_outputs(): - output_map = dict(outDura=dict(extensions=None)) + output_map = dict( + outDura=dict( + extensions=None, + ), + ) outputs = JistBrainMp2rageDuraEstimation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py index 216f256a80..637b50dcad 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py @@ -4,21 +4,61 @@ def test_JistBrainMp2rageSkullStripping_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inFilter=dict(argstr="--inFilter %s", extensions=None), - inSecond=dict(argstr="--inSecond %s", extensions=None), - inSkip=dict(argstr="--inSkip %s"), - inT1=dict(argstr="--inT1 %s", extensions=None), - inT1weighted=dict(argstr="--inT1weighted %s", extensions=None), - null=dict(argstr="--null %s"), - outBrain=dict(argstr="--outBrain %s", hash_files=False), - outMasked=dict(argstr="--outMasked %s", hash_files=False), - outMasked2=dict(argstr="--outMasked2 %s", hash_files=False), - outMasked3=dict(argstr="--outMasked3 %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inFilter=dict( + argstr="--inFilter %s", + extensions=None, + ), + inSecond=dict( + argstr="--inSecond %s", + extensions=None, + ), + inSkip=dict( + argstr="--inSkip %s", + ), + inT1=dict( + argstr="--inT1 %s", + extensions=None, + ), + inT1weighted=dict( + argstr="--inT1weighted %s", + extensions=None, + ), + null=dict( + argstr="--null %s", + ), + outBrain=dict( + argstr="--outBrain %s", + hash_files=False, + ), + outMasked=dict( + argstr="--outMasked %s", + hash_files=False, + ), + outMasked2=dict( + argstr="--outMasked2 %s", + hash_files=False, + ), + outMasked3=dict( + argstr="--outMasked3 %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistBrainMp2rageSkullStripping.input_spec() @@ -29,10 +69,18 @@ def test_JistBrainMp2rageSkullStripping_inputs(): def test_JistBrainMp2rageSkullStripping_outputs(): output_map = dict( - outBrain=dict(extensions=None), - outMasked=dict(extensions=None), - outMasked2=dict(extensions=None), - outMasked3=dict(extensions=None), + outBrain=dict( + extensions=None, + ), + outMasked=dict( + extensions=None, + ), + outMasked2=dict( + extensions=None, + ), + outMasked3=dict( + extensions=None, + ), ) outputs = JistBrainMp2rageSkullStripping.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py index f996db68a1..61a3e2b074 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py @@ -4,16 +4,40 @@ def test_JistBrainPartialVolumeFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inInput=dict(argstr="--inInput %s", extensions=None), - inPV=dict(argstr="--inPV %s"), - inoutput=dict(argstr="--inoutput %s"), - null=dict(argstr="--null %s"), - outPartial=dict(argstr="--outPartial %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inInput=dict( + argstr="--inInput %s", + extensions=None, + ), + inPV=dict( + argstr="--inPV %s", + ), + inoutput=dict( + argstr="--inoutput %s", + ), + null=dict( + argstr="--null %s", + ), + outPartial=dict( + argstr="--outPartial %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistBrainPartialVolumeFilter.input_spec() @@ -23,7 +47,11 @@ def test_JistBrainPartialVolumeFilter_inputs(): def test_JistBrainPartialVolumeFilter_outputs(): - output_map = dict(outPartial=dict(extensions=None)) + output_map = dict( + outPartial=dict( + extensions=None, + ), + ) outputs = JistBrainPartialVolumeFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py index 4c8919d848..41ae9c5cce 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py @@ -4,21 +4,56 @@ def test_JistCortexSurfaceMeshInflation_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inLevelset=dict(argstr="--inLevelset %s", extensions=None), - inLorentzian=dict(argstr="--inLorentzian %s"), - inMax=dict(argstr="--inMax %d"), - inMean=dict(argstr="--inMean %f"), - inSOR=dict(argstr="--inSOR %f"), - inStep=dict(argstr="--inStep %d"), - inTopology=dict(argstr="--inTopology %s"), - null=dict(argstr="--null %s"), - outInflated=dict(argstr="--outInflated %s", hash_files=False), - outOriginal=dict(argstr="--outOriginal %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inLevelset=dict( + argstr="--inLevelset %s", + extensions=None, + ), + inLorentzian=dict( + argstr="--inLorentzian %s", + ), + inMax=dict( + argstr="--inMax %d", + ), + inMean=dict( + argstr="--inMean %f", + ), + inSOR=dict( + argstr="--inSOR %f", + ), + inStep=dict( + argstr="--inStep %d", + ), + inTopology=dict( + argstr="--inTopology %s", + ), + null=dict( + argstr="--null %s", + ), + outInflated=dict( + argstr="--outInflated %s", + hash_files=False, + ), + outOriginal=dict( + argstr="--outOriginal %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistCortexSurfaceMeshInflation.input_spec() @@ -29,7 +64,12 @@ def test_JistCortexSurfaceMeshInflation_inputs(): def test_JistCortexSurfaceMeshInflation_outputs(): output_map = dict( - outInflated=dict(extensions=None), outOriginal=dict(extensions=None) + outInflated=dict( + extensions=None, + ), + outOriginal=dict( + extensions=None, + ), ) outputs = JistCortexSurfaceMeshInflation.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py index bb5eafcf3d..94d9cc525a 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py @@ -4,22 +4,63 @@ def test_JistIntensityMp2rageMasking_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inBackground=dict(argstr="--inBackground %s"), - inMasking=dict(argstr="--inMasking %s"), - inQuantitative=dict(argstr="--inQuantitative %s", extensions=None), - inSecond=dict(argstr="--inSecond %s", extensions=None), - inSkip=dict(argstr="--inSkip %s"), - inT1weighted=dict(argstr="--inT1weighted %s", extensions=None), - null=dict(argstr="--null %s"), - outMasked=dict(argstr="--outMasked_T1_Map %s", hash_files=False), - outMasked2=dict(argstr="--outMasked_T1weighted %s", hash_files=False), - outSignal=dict(argstr="--outSignal_Proba %s", hash_files=False), - outSignal2=dict(argstr="--outSignal_Mask %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inBackground=dict( + argstr="--inBackground %s", + ), + inMasking=dict( + argstr="--inMasking %s", + ), + inQuantitative=dict( + argstr="--inQuantitative %s", + extensions=None, + ), + inSecond=dict( + argstr="--inSecond %s", + extensions=None, + ), + inSkip=dict( + argstr="--inSkip %s", + ), + inT1weighted=dict( + argstr="--inT1weighted %s", + extensions=None, + ), + null=dict( + argstr="--null %s", + ), + outMasked=dict( + argstr="--outMasked_T1_Map %s", + hash_files=False, + ), + outMasked2=dict( + argstr="--outMasked_T1weighted %s", + hash_files=False, + ), + outSignal=dict( + argstr="--outSignal_Proba %s", + hash_files=False, + ), + outSignal2=dict( + argstr="--outSignal_Mask %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistIntensityMp2rageMasking.input_spec() @@ -30,10 +71,18 @@ def test_JistIntensityMp2rageMasking_inputs(): def test_JistIntensityMp2rageMasking_outputs(): output_map = dict( - outMasked=dict(extensions=None), - outMasked2=dict(extensions=None), - outSignal=dict(extensions=None), - outSignal2=dict(extensions=None), + outMasked=dict( + extensions=None, + ), + outMasked2=dict( + extensions=None, + ), + outSignal=dict( + extensions=None, + ), + outSignal2=dict( + extensions=None, + ), ) outputs = JistIntensityMp2rageMasking.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py index 10be662930..0cc1501e4f 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py @@ -4,16 +4,41 @@ def test_JistLaminarProfileCalculator_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inIntensity=dict(argstr="--inIntensity %s", extensions=None), - inMask=dict(argstr="--inMask %s", extensions=None), - incomputed=dict(argstr="--incomputed %s"), - null=dict(argstr="--null %s"), - outResult=dict(argstr="--outResult %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inIntensity=dict( + argstr="--inIntensity %s", + extensions=None, + ), + inMask=dict( + argstr="--inMask %s", + extensions=None, + ), + incomputed=dict( + argstr="--incomputed %s", + ), + null=dict( + argstr="--null %s", + ), + outResult=dict( + argstr="--outResult %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistLaminarProfileCalculator.input_spec() @@ -23,7 +48,11 @@ def test_JistLaminarProfileCalculator_inputs(): def test_JistLaminarProfileCalculator_outputs(): - output_map = dict(outResult=dict(extensions=None)) + output_map = dict( + outResult=dict( + extensions=None, + ), + ) outputs = JistLaminarProfileCalculator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py index 01bd958029..758d331935 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py @@ -4,18 +4,46 @@ def test_JistLaminarProfileGeometry_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inProfile=dict(argstr="--inProfile %s", extensions=None), - incomputed=dict(argstr="--incomputed %s"), - inoutside=dict(argstr="--inoutside %f"), - inregularization=dict(argstr="--inregularization %s"), - insmoothing=dict(argstr="--insmoothing %f"), - null=dict(argstr="--null %s"), - outResult=dict(argstr="--outResult %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inProfile=dict( + argstr="--inProfile %s", + extensions=None, + ), + incomputed=dict( + argstr="--incomputed %s", + ), + inoutside=dict( + argstr="--inoutside %f", + ), + inregularization=dict( + argstr="--inregularization %s", + ), + insmoothing=dict( + argstr="--insmoothing %f", + ), + null=dict( + argstr="--null %s", + ), + outResult=dict( + argstr="--outResult %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistLaminarProfileGeometry.input_spec() @@ -25,7 +53,11 @@ def test_JistLaminarProfileGeometry_inputs(): def test_JistLaminarProfileGeometry_outputs(): - output_map = dict(outResult=dict(extensions=None)) + output_map = dict( + outResult=dict( + extensions=None, + ), + ) outputs = JistLaminarProfileGeometry.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py index f60597b6fc..65841c48a9 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py @@ -4,17 +4,46 @@ def test_JistLaminarProfileSampling_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inCortex=dict(argstr="--inCortex %s", extensions=None), - inIntensity=dict(argstr="--inIntensity %s", extensions=None), - inProfile=dict(argstr="--inProfile %s", extensions=None), - null=dict(argstr="--null %s"), - outProfile2=dict(argstr="--outProfile2 %s", hash_files=False), - outProfilemapped=dict(argstr="--outProfilemapped %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inCortex=dict( + argstr="--inCortex %s", + extensions=None, + ), + inIntensity=dict( + argstr="--inIntensity %s", + extensions=None, + ), + inProfile=dict( + argstr="--inProfile %s", + extensions=None, + ), + null=dict( + argstr="--null %s", + ), + outProfile2=dict( + argstr="--outProfile2 %s", + hash_files=False, + ), + outProfilemapped=dict( + argstr="--outProfilemapped %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistLaminarProfileSampling.input_spec() @@ -25,7 +54,12 @@ def test_JistLaminarProfileSampling_inputs(): def test_JistLaminarProfileSampling_outputs(): output_map = dict( - outProfile2=dict(extensions=None), outProfilemapped=dict(extensions=None) + outProfile2=dict( + extensions=None, + ), + outProfilemapped=dict( + extensions=None, + ), ) outputs = JistLaminarProfileSampling.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py index dd56007f05..fed4abfca1 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py @@ -4,17 +4,45 @@ def test_JistLaminarROIAveraging_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inIntensity=dict(argstr="--inIntensity %s", extensions=None), - inMask=dict(argstr="--inMask %s", extensions=None), - inROI=dict(argstr="--inROI %s", extensions=None), - inROI2=dict(argstr="--inROI2 %s"), - null=dict(argstr="--null %s"), - outROI3=dict(argstr="--outROI3 %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inIntensity=dict( + argstr="--inIntensity %s", + extensions=None, + ), + inMask=dict( + argstr="--inMask %s", + extensions=None, + ), + inROI=dict( + argstr="--inROI %s", + extensions=None, + ), + inROI2=dict( + argstr="--inROI2 %s", + ), + null=dict( + argstr="--null %s", + ), + outROI3=dict( + argstr="--outROI3 %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistLaminarROIAveraging.input_spec() @@ -24,7 +52,11 @@ def test_JistLaminarROIAveraging_inputs(): def test_JistLaminarROIAveraging_outputs(): - output_map = dict(outROI3=dict(extensions=None)) + output_map = dict( + outROI3=dict( + extensions=None, + ), + ) outputs = JistLaminarROIAveraging.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py index 496bcf86b3..31d34ae32e 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py @@ -4,26 +4,73 @@ def test_JistLaminarVolumetricLayering_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inInner=dict(argstr="--inInner %s", extensions=None), - inLayering=dict(argstr="--inLayering %s"), - inLayering2=dict(argstr="--inLayering2 %s"), - inMax=dict(argstr="--inMax %d"), - inMin=dict(argstr="--inMin %f"), - inNumber=dict(argstr="--inNumber %d"), - inOuter=dict(argstr="--inOuter %s", extensions=None), - inTopology=dict(argstr="--inTopology %s"), - incurvature=dict(argstr="--incurvature %d"), - inpresmooth=dict(argstr="--inpresmooth %s"), - inratio=dict(argstr="--inratio %f"), - null=dict(argstr="--null %s"), - outContinuous=dict(argstr="--outContinuous %s", hash_files=False), - outDiscrete=dict(argstr="--outDiscrete %s", hash_files=False), - outLayer=dict(argstr="--outLayer %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inInner=dict( + argstr="--inInner %s", + extensions=None, + ), + inLayering=dict( + argstr="--inLayering %s", + ), + inLayering2=dict( + argstr="--inLayering2 %s", + ), + inMax=dict( + argstr="--inMax %d", + ), + inMin=dict( + argstr="--inMin %f", + ), + inNumber=dict( + argstr="--inNumber %d", + ), + inOuter=dict( + argstr="--inOuter %s", + extensions=None, + ), + inTopology=dict( + argstr="--inTopology %s", + ), + incurvature=dict( + argstr="--incurvature %d", + ), + inpresmooth=dict( + argstr="--inpresmooth %s", + ), + inratio=dict( + argstr="--inratio %f", + ), + null=dict( + argstr="--null %s", + ), + outContinuous=dict( + argstr="--outContinuous %s", + hash_files=False, + ), + outDiscrete=dict( + argstr="--outDiscrete %s", + hash_files=False, + ), + outLayer=dict( + argstr="--outLayer %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = JistLaminarVolumetricLayering.input_spec() @@ -34,9 +81,15 @@ def test_JistLaminarVolumetricLayering_inputs(): def test_JistLaminarVolumetricLayering_outputs(): output_map = dict( - outContinuous=dict(extensions=None), - outDiscrete=dict(extensions=None), - outLayer=dict(extensions=None), + outContinuous=dict( + extensions=None, + ), + outDiscrete=dict( + extensions=None, + ), + outLayer=dict( + extensions=None, + ), ) outputs = JistLaminarVolumetricLayering.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py index 01e93a70d9..7b9a0fc859 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py @@ -4,16 +4,41 @@ def test_MedicAlgorithmImageCalculator_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inOperation=dict(argstr="--inOperation %s"), - inVolume=dict(argstr="--inVolume %s", extensions=None), - inVolume2=dict(argstr="--inVolume2 %s", extensions=None), - null=dict(argstr="--null %s"), - outResult=dict(argstr="--outResult %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inOperation=dict( + argstr="--inOperation %s", + ), + inVolume=dict( + argstr="--inVolume %s", + extensions=None, + ), + inVolume2=dict( + argstr="--inVolume2 %s", + extensions=None, + ), + null=dict( + argstr="--null %s", + ), + outResult=dict( + argstr="--outResult %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = MedicAlgorithmImageCalculator.input_spec() @@ -23,7 +48,11 @@ def test_MedicAlgorithmImageCalculator_inputs(): def test_MedicAlgorithmImageCalculator_outputs(): - output_map = dict(outResult=dict(extensions=None)) + output_map = dict( + outResult=dict( + extensions=None, + ), + ) outputs = MedicAlgorithmImageCalculator.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py index a3ae228361..4fb5f2567b 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py @@ -4,42 +4,131 @@ def test_MedicAlgorithmLesionToads_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inAtlas=dict(argstr="--inAtlas %s"), - inAtlas2=dict(argstr="--inAtlas2 %s", extensions=None), - inAtlas3=dict(argstr="--inAtlas3 %s", extensions=None), - inAtlas4=dict(argstr="--inAtlas4 %s", extensions=None), - inAtlas5=dict(argstr="--inAtlas5 %f"), - inAtlas6=dict(argstr="--inAtlas6 %s"), - inConnectivity=dict(argstr="--inConnectivity %s"), - inCorrect=dict(argstr="--inCorrect %s"), - inFLAIR=dict(argstr="--inFLAIR %s", extensions=None), - inInclude=dict(argstr="--inInclude %s"), - inMaximum=dict(argstr="--inMaximum %d"), - inMaximum2=dict(argstr="--inMaximum2 %d"), - inMaximum3=dict(argstr="--inMaximum3 %d"), - inMaximum4=dict(argstr="--inMaximum4 %f"), - inMaximum5=dict(argstr="--inMaximum5 %d"), - inOutput=dict(argstr="--inOutput %s"), - inOutput2=dict(argstr="--inOutput2 %s"), - inOutput3=dict(argstr="--inOutput3 %s"), - inSmooting=dict(argstr="--inSmooting %f"), - inT1_MPRAGE=dict(argstr="--inT1_MPRAGE %s", extensions=None), - inT1_SPGR=dict(argstr="--inT1_SPGR %s", extensions=None), - null=dict(argstr="--null %s"), - outCortical=dict(argstr="--outCortical %s", hash_files=False), - outFilled=dict(argstr="--outFilled %s", hash_files=False), - outHard=dict(argstr="--outHard %s", hash_files=False), - outHard2=dict(argstr="--outHard2 %s", hash_files=False), - outInhomogeneity=dict(argstr="--outInhomogeneity %s", hash_files=False), - outLesion=dict(argstr="--outLesion %s", hash_files=False), - outMembership=dict(argstr="--outMembership %s", hash_files=False), - outSulcal=dict(argstr="--outSulcal %s", hash_files=False), - outWM=dict(argstr="--outWM %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inAtlas=dict( + argstr="--inAtlas %s", + ), + inAtlas2=dict( + argstr="--inAtlas2 %s", + extensions=None, + ), + inAtlas3=dict( + argstr="--inAtlas3 %s", + extensions=None, + ), + inAtlas4=dict( + argstr="--inAtlas4 %s", + extensions=None, + ), + inAtlas5=dict( + argstr="--inAtlas5 %f", + ), + inAtlas6=dict( + argstr="--inAtlas6 %s", + ), + inConnectivity=dict( + argstr="--inConnectivity %s", + ), + inCorrect=dict( + argstr="--inCorrect %s", + ), + inFLAIR=dict( + argstr="--inFLAIR %s", + extensions=None, + ), + inInclude=dict( + argstr="--inInclude %s", + ), + inMaximum=dict( + argstr="--inMaximum %d", + ), + inMaximum2=dict( + argstr="--inMaximum2 %d", + ), + inMaximum3=dict( + argstr="--inMaximum3 %d", + ), + inMaximum4=dict( + argstr="--inMaximum4 %f", + ), + inMaximum5=dict( + argstr="--inMaximum5 %d", + ), + inOutput=dict( + argstr="--inOutput %s", + ), + inOutput2=dict( + argstr="--inOutput2 %s", + ), + inOutput3=dict( + argstr="--inOutput3 %s", + ), + inSmooting=dict( + argstr="--inSmooting %f", + ), + inT1_MPRAGE=dict( + argstr="--inT1_MPRAGE %s", + extensions=None, + ), + inT1_SPGR=dict( + argstr="--inT1_SPGR %s", + extensions=None, + ), + null=dict( + argstr="--null %s", + ), + outCortical=dict( + argstr="--outCortical %s", + hash_files=False, + ), + outFilled=dict( + argstr="--outFilled %s", + hash_files=False, + ), + outHard=dict( + argstr="--outHard %s", + hash_files=False, + ), + outHard2=dict( + argstr="--outHard2 %s", + hash_files=False, + ), + outInhomogeneity=dict( + argstr="--outInhomogeneity %s", + hash_files=False, + ), + outLesion=dict( + argstr="--outLesion %s", + hash_files=False, + ), + outMembership=dict( + argstr="--outMembership %s", + hash_files=False, + ), + outSulcal=dict( + argstr="--outSulcal %s", + hash_files=False, + ), + outWM=dict( + argstr="--outWM %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = MedicAlgorithmLesionToads.input_spec() @@ -50,15 +139,33 @@ def test_MedicAlgorithmLesionToads_inputs(): def test_MedicAlgorithmLesionToads_outputs(): output_map = dict( - outCortical=dict(extensions=None), - outFilled=dict(extensions=None), - outHard=dict(extensions=None), - outHard2=dict(extensions=None), - outInhomogeneity=dict(extensions=None), - outLesion=dict(extensions=None), - outMembership=dict(extensions=None), - outSulcal=dict(extensions=None), - outWM=dict(extensions=None), + outCortical=dict( + extensions=None, + ), + outFilled=dict( + extensions=None, + ), + outHard=dict( + extensions=None, + ), + outHard2=dict( + extensions=None, + ), + outInhomogeneity=dict( + extensions=None, + ), + outLesion=dict( + extensions=None, + ), + outMembership=dict( + extensions=None, + ), + outSulcal=dict( + extensions=None, + ), + outWM=dict( + extensions=None, + ), ) outputs = MedicAlgorithmLesionToads.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py index 15080a09a6..49c307f27f 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py @@ -4,22 +4,59 @@ def test_MedicAlgorithmMipavReorient_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inInterpolation=dict(argstr="--inInterpolation %s"), - inNew=dict(argstr="--inNew %s"), - inResolution=dict(argstr="--inResolution %s"), - inSource=dict(argstr="--inSource %s", sep=";"), - inTemplate=dict(argstr="--inTemplate %s", extensions=None), - inUser=dict(argstr="--inUser %s"), - inUser2=dict(argstr="--inUser2 %s"), - inUser3=dict(argstr="--inUser3 %s"), - inUser4=dict(argstr="--inUser4 %s"), - null=dict(argstr="--null %s"), - outReoriented=dict(argstr="--outReoriented %s", sep=";"), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inInterpolation=dict( + argstr="--inInterpolation %s", + ), + inNew=dict( + argstr="--inNew %s", + ), + inResolution=dict( + argstr="--inResolution %s", + ), + inSource=dict( + argstr="--inSource %s", + sep=";", + ), + inTemplate=dict( + argstr="--inTemplate %s", + extensions=None, + ), + inUser=dict( + argstr="--inUser %s", + ), + inUser2=dict( + argstr="--inUser2 %s", + ), + inUser3=dict( + argstr="--inUser3 %s", + ), + inUser4=dict( + argstr="--inUser4 %s", + ), + null=dict( + argstr="--null %s", + ), + outReoriented=dict( + argstr="--outReoriented %s", + sep=";", + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = MedicAlgorithmMipavReorient.input_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py index 8104a4a868..bf895247a6 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py @@ -4,23 +4,62 @@ def test_MedicAlgorithmN3_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inAutomatic=dict(argstr="--inAutomatic %s"), - inEnd=dict(argstr="--inEnd %f"), - inField=dict(argstr="--inField %f"), - inInput=dict(argstr="--inInput %s", extensions=None), - inKernel=dict(argstr="--inKernel %f"), - inMaximum=dict(argstr="--inMaximum %d"), - inSignal=dict(argstr="--inSignal %f"), - inSubsample=dict(argstr="--inSubsample %f"), - inWeiner=dict(argstr="--inWeiner %f"), - null=dict(argstr="--null %s"), - outInhomogeneity=dict(argstr="--outInhomogeneity %s", hash_files=False), - outInhomogeneity2=dict(argstr="--outInhomogeneity2 %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inAutomatic=dict( + argstr="--inAutomatic %s", + ), + inEnd=dict( + argstr="--inEnd %f", + ), + inField=dict( + argstr="--inField %f", + ), + inInput=dict( + argstr="--inInput %s", + extensions=None, + ), + inKernel=dict( + argstr="--inKernel %f", + ), + inMaximum=dict( + argstr="--inMaximum %d", + ), + inSignal=dict( + argstr="--inSignal %f", + ), + inSubsample=dict( + argstr="--inSubsample %f", + ), + inWeiner=dict( + argstr="--inWeiner %f", + ), + null=dict( + argstr="--null %s", + ), + outInhomogeneity=dict( + argstr="--outInhomogeneity %s", + hash_files=False, + ), + outInhomogeneity2=dict( + argstr="--outInhomogeneity2 %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = MedicAlgorithmN3.input_spec() @@ -31,7 +70,12 @@ def test_MedicAlgorithmN3_inputs(): def test_MedicAlgorithmN3_outputs(): output_map = dict( - outInhomogeneity=dict(extensions=None), outInhomogeneity2=dict(extensions=None) + outInhomogeneity=dict( + extensions=None, + ), + outInhomogeneity2=dict( + extensions=None, + ), ) outputs = MedicAlgorithmN3.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py index 1cd9b882c6..b62def8a4f 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py @@ -4,55 +4,166 @@ def test_MedicAlgorithmSPECTRE2010_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inApply=dict(argstr="--inApply %s"), - inAtlas=dict(argstr="--inAtlas %s", extensions=None), - inBackground=dict(argstr="--inBackground %f"), - inCoarse=dict(argstr="--inCoarse %f"), - inCost=dict(argstr="--inCost %s"), - inDegrees=dict(argstr="--inDegrees %s"), - inFind=dict(argstr="--inFind %s"), - inFine=dict(argstr="--inFine %f"), - inImage=dict(argstr="--inImage %s"), - inInhomogeneity=dict(argstr="--inInhomogeneity %s"), - inInitial=dict(argstr="--inInitial %d"), - inInitial2=dict(argstr="--inInitial2 %f"), - inInput=dict(argstr="--inInput %s", extensions=None), - inMMC=dict(argstr="--inMMC %d"), - inMMC2=dict(argstr="--inMMC2 %d"), - inMaximum=dict(argstr="--inMaximum %f"), - inMinimum=dict(argstr="--inMinimum %f"), - inMinimum2=dict(argstr="--inMinimum2 %f"), - inMultiple=dict(argstr="--inMultiple %d"), - inMultithreading=dict(argstr="--inMultithreading %s"), - inNumber=dict(argstr="--inNumber %d"), - inNumber2=dict(argstr="--inNumber2 %d"), - inOutput=dict(argstr="--inOutput %s"), - inOutput2=dict(argstr="--inOutput2 %s"), - inOutput3=dict(argstr="--inOutput3 %s"), - inOutput4=dict(argstr="--inOutput4 %s"), - inOutput5=dict(argstr="--inOutput5 %s"), - inRegistration=dict(argstr="--inRegistration %s"), - inResample=dict(argstr="--inResample %s"), - inRun=dict(argstr="--inRun %s"), - inSkip=dict(argstr="--inSkip %s"), - inSmoothing=dict(argstr="--inSmoothing %f"), - inSubsample=dict(argstr="--inSubsample %s"), - inUse=dict(argstr="--inUse %s"), - null=dict(argstr="--null %s"), - outFANTASM=dict(argstr="--outFANTASM %s", hash_files=False), - outMask=dict(argstr="--outMask %s", hash_files=False), - outMidsagittal=dict(argstr="--outMidsagittal %s", hash_files=False), - outOriginal=dict(argstr="--outOriginal %s", hash_files=False), - outPrior=dict(argstr="--outPrior %s", hash_files=False), - outSegmentation=dict(argstr="--outSegmentation %s", hash_files=False), - outSplitHalves=dict(argstr="--outSplitHalves %s", hash_files=False), - outStripped=dict(argstr="--outStripped %s", hash_files=False), - outd0=dict(argstr="--outd0 %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inApply=dict( + argstr="--inApply %s", + ), + inAtlas=dict( + argstr="--inAtlas %s", + extensions=None, + ), + inBackground=dict( + argstr="--inBackground %f", + ), + inCoarse=dict( + argstr="--inCoarse %f", + ), + inCost=dict( + argstr="--inCost %s", + ), + inDegrees=dict( + argstr="--inDegrees %s", + ), + inFind=dict( + argstr="--inFind %s", + ), + inFine=dict( + argstr="--inFine %f", + ), + inImage=dict( + argstr="--inImage %s", + ), + inInhomogeneity=dict( + argstr="--inInhomogeneity %s", + ), + inInitial=dict( + argstr="--inInitial %d", + ), + inInitial2=dict( + argstr="--inInitial2 %f", + ), + inInput=dict( + argstr="--inInput %s", + extensions=None, + ), + inMMC=dict( + argstr="--inMMC %d", + ), + inMMC2=dict( + argstr="--inMMC2 %d", + ), + inMaximum=dict( + argstr="--inMaximum %f", + ), + inMinimum=dict( + argstr="--inMinimum %f", + ), + inMinimum2=dict( + argstr="--inMinimum2 %f", + ), + inMultiple=dict( + argstr="--inMultiple %d", + ), + inMultithreading=dict( + argstr="--inMultithreading %s", + ), + inNumber=dict( + argstr="--inNumber %d", + ), + inNumber2=dict( + argstr="--inNumber2 %d", + ), + inOutput=dict( + argstr="--inOutput %s", + ), + inOutput2=dict( + argstr="--inOutput2 %s", + ), + inOutput3=dict( + argstr="--inOutput3 %s", + ), + inOutput4=dict( + argstr="--inOutput4 %s", + ), + inOutput5=dict( + argstr="--inOutput5 %s", + ), + inRegistration=dict( + argstr="--inRegistration %s", + ), + inResample=dict( + argstr="--inResample %s", + ), + inRun=dict( + argstr="--inRun %s", + ), + inSkip=dict( + argstr="--inSkip %s", + ), + inSmoothing=dict( + argstr="--inSmoothing %f", + ), + inSubsample=dict( + argstr="--inSubsample %s", + ), + inUse=dict( + argstr="--inUse %s", + ), + null=dict( + argstr="--null %s", + ), + outFANTASM=dict( + argstr="--outFANTASM %s", + hash_files=False, + ), + outMask=dict( + argstr="--outMask %s", + hash_files=False, + ), + outMidsagittal=dict( + argstr="--outMidsagittal %s", + hash_files=False, + ), + outOriginal=dict( + argstr="--outOriginal %s", + hash_files=False, + ), + outPrior=dict( + argstr="--outPrior %s", + hash_files=False, + ), + outSegmentation=dict( + argstr="--outSegmentation %s", + hash_files=False, + ), + outSplitHalves=dict( + argstr="--outSplitHalves %s", + hash_files=False, + ), + outStripped=dict( + argstr="--outStripped %s", + hash_files=False, + ), + outd0=dict( + argstr="--outd0 %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = MedicAlgorithmSPECTRE2010.input_spec() @@ -63,15 +174,33 @@ def test_MedicAlgorithmSPECTRE2010_inputs(): def test_MedicAlgorithmSPECTRE2010_outputs(): output_map = dict( - outFANTASM=dict(extensions=None), - outMask=dict(extensions=None), - outMidsagittal=dict(extensions=None), - outOriginal=dict(extensions=None), - outPrior=dict(extensions=None), - outSegmentation=dict(extensions=None), - outSplitHalves=dict(extensions=None), - outStripped=dict(extensions=None), - outd0=dict(extensions=None), + outFANTASM=dict( + extensions=None, + ), + outMask=dict( + extensions=None, + ), + outMidsagittal=dict( + extensions=None, + ), + outOriginal=dict( + extensions=None, + ), + outPrior=dict( + extensions=None, + ), + outSegmentation=dict( + extensions=None, + ), + outSplitHalves=dict( + extensions=None, + ), + outStripped=dict( + extensions=None, + ), + outd0=dict( + extensions=None, + ), ) outputs = MedicAlgorithmSPECTRE2010.output_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py index c7b4761baa..bdd6e4e1b6 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py @@ -4,17 +4,43 @@ def test_MedicAlgorithmThresholdToBinaryMask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inLabel=dict(argstr="--inLabel %s", sep=";"), - inMaximum=dict(argstr="--inMaximum %f"), - inMinimum=dict(argstr="--inMinimum %f"), - inUse=dict(argstr="--inUse %s"), - null=dict(argstr="--null %s"), - outBinary=dict(argstr="--outBinary %s", sep=";"), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inLabel=dict( + argstr="--inLabel %s", + sep=";", + ), + inMaximum=dict( + argstr="--inMaximum %f", + ), + inMinimum=dict( + argstr="--inMinimum %f", + ), + inUse=dict( + argstr="--inUse %s", + ), + null=dict( + argstr="--null %s", + ), + outBinary=dict( + argstr="--outBinary %s", + sep=";", + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = MedicAlgorithmThresholdToBinaryMask.input_spec() diff --git a/nipype/interfaces/mipav/tests/test_auto_RandomVol.py b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py index 0119bab925..4929f54d6a 100644 --- a/nipype/interfaces/mipav/tests/test_auto_RandomVol.py +++ b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py @@ -4,22 +4,57 @@ def test_RandomVol_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inField=dict(argstr="--inField %s"), - inLambda=dict(argstr="--inLambda %f"), - inMaximum=dict(argstr="--inMaximum %d"), - inMinimum=dict(argstr="--inMinimum %d"), - inSize=dict(argstr="--inSize %d"), - inSize2=dict(argstr="--inSize2 %d"), - inSize3=dict(argstr="--inSize3 %d"), - inSize4=dict(argstr="--inSize4 %d"), - inStandard=dict(argstr="--inStandard %d"), - null=dict(argstr="--null %s"), - outRand1=dict(argstr="--outRand1 %s", hash_files=False), - xDefaultMem=dict(argstr="-xDefaultMem %d"), - xMaxProcess=dict(argstr="-xMaxProcess %d", usedefault=True), - xPrefExt=dict(argstr="--xPrefExt %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inField=dict( + argstr="--inField %s", + ), + inLambda=dict( + argstr="--inLambda %f", + ), + inMaximum=dict( + argstr="--inMaximum %d", + ), + inMinimum=dict( + argstr="--inMinimum %d", + ), + inSize=dict( + argstr="--inSize %d", + ), + inSize2=dict( + argstr="--inSize2 %d", + ), + inSize3=dict( + argstr="--inSize3 %d", + ), + inSize4=dict( + argstr="--inSize4 %d", + ), + inStandard=dict( + argstr="--inStandard %d", + ), + null=dict( + argstr="--null %s", + ), + outRand1=dict( + argstr="--outRand1 %s", + hash_files=False, + ), + xDefaultMem=dict( + argstr="-xDefaultMem %d", + ), + xMaxProcess=dict( + argstr="-xMaxProcess %d", + usedefault=True, + ), + xPrefExt=dict( + argstr="--xPrefExt %s", + ), ) inputs = RandomVol.input_spec() @@ -29,7 +64,11 @@ def test_RandomVol_inputs(): def test_RandomVol_outputs(): - output_map = dict(outRand1=dict(extensions=None)) + output_map = dict( + outRand1=dict( + extensions=None, + ), + ) outputs = RandomVol.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py index 124d3c7a9a..57f1b40e4d 100644 --- a/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py +++ b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py @@ -4,13 +4,32 @@ def test_WatershedBEM_inputs(): input_map = dict( - args=dict(argstr="%s"), - atlas_mode=dict(argstr="--atlas"), - environ=dict(nohash=True, usedefault=True), - overwrite=dict(argstr="--overwrite", usedefault=True), - subject_id=dict(argstr="--subject %s", mandatory=True), - subjects_dir=dict(mandatory=True, usedefault=True), - volume=dict(argstr="--volume %s", usedefault=True), + args=dict( + argstr="%s", + ), + atlas_mode=dict( + argstr="--atlas", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + overwrite=dict( + argstr="--overwrite", + usedefault=True, + ), + subject_id=dict( + argstr="--subject %s", + mandatory=True, + ), + subjects_dir=dict( + mandatory=True, + usedefault=True, + ), + volume=dict( + argstr="--volume %s", + usedefault=True, + ), ) inputs = WatershedBEM.input_spec() @@ -21,13 +40,32 @@ def test_WatershedBEM_inputs(): def test_WatershedBEM_outputs(): output_map = dict( - brain_surface=dict(extensions=None, loc="bem/watershed"), - cor_files=dict(altkey="COR", loc="bem/watershed/ws"), - fif_file=dict(altkey="fif", extensions=None, loc="bem"), - inner_skull_surface=dict(extensions=None, loc="bem/watershed"), + brain_surface=dict( + extensions=None, + loc="bem/watershed", + ), + cor_files=dict( + altkey="COR", + loc="bem/watershed/ws", + ), + fif_file=dict( + altkey="fif", + extensions=None, + loc="bem", + ), + inner_skull_surface=dict( + extensions=None, + loc="bem/watershed", + ), mesh_files=dict(), - outer_skin_surface=dict(extensions=None, loc="bem/watershed"), - outer_skull_surface=dict(extensions=None, loc="bem/watershed"), + outer_skin_surface=dict( + extensions=None, + loc="bem/watershed", + ), + outer_skull_surface=dict( + extensions=None, + loc="bem/watershed", + ), ) outputs = WatershedBEM.output_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py index e2cfe0a687..ad93f35b9a 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py @@ -4,21 +4,70 @@ def test_ConstrainedSphericalDeconvolution_inputs(): input_map = dict( - args=dict(argstr="%s"), - debug=dict(argstr="-debug"), - directions_file=dict(argstr="-directions %s", extensions=None, position=-2), - encoding_file=dict(argstr="-grad %s", extensions=None, position=1), - environ=dict(nohash=True, usedefault=True), - filter_file=dict(argstr="-filter %s", extensions=None, position=-2), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - iterations=dict(argstr="-niter %s"), - lambda_value=dict(argstr="-lambda %s"), - mask_image=dict(argstr="-mask %s", extensions=None, position=2), - maximum_harmonic_order=dict(argstr="-lmax %s"), - normalise=dict(argstr="-normalise", position=3), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), - response_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - threshold_value=dict(argstr="-threshold %s"), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + ), + directions_file=dict( + argstr="-directions %s", + extensions=None, + position=-2, + ), + encoding_file=dict( + argstr="-grad %s", + extensions=None, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + filter_file=dict( + argstr="-filter %s", + extensions=None, + position=-2, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + iterations=dict( + argstr="-niter %s", + ), + lambda_value=dict( + argstr="-lambda %s", + ), + mask_image=dict( + argstr="-mask %s", + extensions=None, + position=2, + ), + maximum_harmonic_order=dict( + argstr="-lmax %s", + ), + normalise=dict( + argstr="-normalise", + position=3, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + response_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + threshold_value=dict( + argstr="-threshold %s", + ), ) inputs = ConstrainedSphericalDeconvolution.input_spec() @@ -28,7 +77,11 @@ def test_ConstrainedSphericalDeconvolution_inputs(): def test_ConstrainedSphericalDeconvolution_outputs(): - output_map = dict(spherical_harmonics_image=dict(extensions=None)) + output_map = dict( + spherical_harmonics_image=dict( + extensions=None, + ), + ) outputs = ConstrainedSphericalDeconvolution.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py index fef47d177b..97b5885baf 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py @@ -4,15 +4,38 @@ def test_DWI2SphericalHarmonicsImage_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), encoding_file=dict( - argstr="-grad %s", extensions=None, mandatory=True, position=1 + argstr="-grad %s", + extensions=None, + mandatory=True, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + maximum_harmonic_order=dict( + argstr="-lmax %s", + ), + normalise=dict( + argstr="-normalise", + position=3, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, ), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - maximum_harmonic_order=dict(argstr="-lmax %s"), - normalise=dict(argstr="-normalise", position=3), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), ) inputs = DWI2SphericalHarmonicsImage.input_spec() @@ -22,7 +45,11 @@ def test_DWI2SphericalHarmonicsImage_inputs(): def test_DWI2SphericalHarmonicsImage_outputs(): - output_map = dict(spherical_harmonics_image=dict(extensions=None)) + output_map = dict( + spherical_harmonics_image=dict( + extensions=None, + ), + ) outputs = DWI2SphericalHarmonicsImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py index b91a699925..72a2820cc1 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py @@ -4,14 +4,41 @@ def test_DWI2Tensor_inputs(): input_map = dict( - args=dict(argstr="%s"), - debug=dict(argstr="-debug", position=1), - encoding_file=dict(argstr="-grad %s", extensions=None, position=2), - environ=dict(nohash=True, usedefault=True), - ignore_slice_by_volume=dict(argstr="-ignoreslices %s", position=2, sep=" "), - ignore_volumes=dict(argstr="-ignorevolumes %s", position=2, sep=" "), - in_file=dict(argstr="%s", mandatory=True, position=-2), - mask=dict(argstr="-mask %s", extensions=None), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + encoding_file=dict( + argstr="-grad %s", + extensions=None, + position=2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignore_slice_by_volume=dict( + argstr="-ignoreslices %s", + position=2, + sep=" ", + ), + ignore_volumes=dict( + argstr="-ignorevolumes %s", + position=2, + sep=" ", + ), + in_file=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + ), out_filename=dict( argstr="%s", extensions=None, @@ -20,7 +47,10 @@ def test_DWI2Tensor_inputs(): output_name="tensor", position=-1, ), - quiet=dict(argstr="-quiet", position=1), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = DWI2Tensor.input_spec() @@ -30,7 +60,11 @@ def test_DWI2Tensor_inputs(): def test_DWI2Tensor_outputs(): - output_map = dict(tensor=dict(extensions=None)) + output_map = dict( + tensor=dict( + extensions=None, + ), + ) outputs = DWI2Tensor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py index 98dfdb1137..c6fe4f586e 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py @@ -4,13 +4,27 @@ def test_DiffusionTensorStreamlineTrack_inputs(): input_map = dict( - args=dict(argstr="%s"), - cutoff_value=dict(argstr="-cutoff %s", units="NA"), - desired_number_of_tracks=dict(argstr="-number %d"), - do_not_precompute=dict(argstr="-noprecomputed"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + cutoff_value=dict( + argstr="-cutoff %s", + units="NA", + ), + desired_number_of_tracks=dict( + argstr="-number %d", + ), + do_not_precompute=dict( + argstr="-noprecomputed", + ), + environ=dict( + nohash=True, + usedefault=True, + ), exclude_file=dict( - argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"] + argstr="-exclude %s", + extensions=None, + xor=["exclude_file", "exclude_spec"], ), exclude_spec=dict( argstr="-exclude %s", @@ -20,11 +34,21 @@ def test_DiffusionTensorStreamlineTrack_inputs(): xor=["exclude_file", "exclude_spec"], ), gradient_encoding_file=dict( - argstr="-grad %s", extensions=None, mandatory=True, position=-2 + argstr="-grad %s", + extensions=None, + mandatory=True, + position=-2, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, ), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), include_file=dict( - argstr="-include %s", extensions=None, xor=["include_file", "include_spec"] + argstr="-include %s", + extensions=None, + xor=["include_file", "include_spec"], ), include_spec=dict( argstr="-include %s", @@ -33,11 +57,23 @@ def test_DiffusionTensorStreamlineTrack_inputs(): units="mm", xor=["include_file", "include_spec"], ), - initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA"), - initial_direction=dict(argstr="-initdirection %s", units="voxels"), - inputmodel=dict(argstr="%s", position=-3, usedefault=True), + initial_cutoff_value=dict( + argstr="-initcutoff %s", + units="NA", + ), + initial_direction=dict( + argstr="-initdirection %s", + units="voxels", + ), + inputmodel=dict( + argstr="%s", + position=-3, + usedefault=True, + ), mask_file=dict( - argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"] + argstr="-mask %s", + extensions=None, + xor=["mask_file", "mask_spec"], ), mask_spec=dict( argstr="-mask %s", @@ -46,11 +82,24 @@ def test_DiffusionTensorStreamlineTrack_inputs(): units="mm", xor=["mask_file", "mask_spec"], ), - maximum_number_of_tracks=dict(argstr="-maxnum %d"), - maximum_tract_length=dict(argstr="-length %s", units="mm"), - minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm"), - minimum_tract_length=dict(argstr="-minlength %s", units="mm"), - no_mask_interpolation=dict(argstr="-nomaskinterp"), + maximum_number_of_tracks=dict( + argstr="-maxnum %d", + ), + maximum_tract_length=dict( + argstr="-length %s", + units="mm", + ), + minimum_radius_of_curvature=dict( + argstr="-curvature %s", + units="mm", + ), + minimum_tract_length=dict( + argstr="-minlength %s", + units="mm", + ), + no_mask_interpolation=dict( + argstr="-nomaskinterp", + ), out_file=dict( argstr="%s", extensions=None, @@ -60,7 +109,9 @@ def test_DiffusionTensorStreamlineTrack_inputs(): position=-1, ), seed_file=dict( - argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"] + argstr="-seed %s", + extensions=None, + xor=["seed_file", "seed_spec"], ), seed_spec=dict( argstr="-seed %s", @@ -69,9 +120,16 @@ def test_DiffusionTensorStreamlineTrack_inputs(): units="mm", xor=["seed_file", "seed_spec"], ), - step_size=dict(argstr="-step %s", units="mm"), - stop=dict(argstr="-stop"), - unidirectional=dict(argstr="-unidirectional"), + step_size=dict( + argstr="-step %s", + units="mm", + ), + stop=dict( + argstr="-stop", + ), + unidirectional=dict( + argstr="-unidirectional", + ), ) inputs = DiffusionTensorStreamlineTrack.input_spec() @@ -81,7 +139,11 @@ def test_DiffusionTensorStreamlineTrack_inputs(): def test_DiffusionTensorStreamlineTrack_outputs(): - output_map = dict(tracked=dict(extensions=None)) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = DiffusionTensorStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py index a20c3e28c6..4685b0e9d6 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py @@ -4,12 +4,28 @@ def test_Directions2Amplitude_inputs(): input_map = dict( - args=dict(argstr="%s"), - display_debug=dict(argstr="-debug"), - display_info=dict(argstr="-info"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - num_peaks=dict(argstr="-num %s"), + args=dict( + argstr="%s", + ), + display_debug=dict( + argstr="-debug", + ), + display_info=dict( + argstr="-info", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + num_peaks=dict( + argstr="-num %s", + ), out_file=dict( argstr="%s", extensions=None, @@ -19,9 +35,17 @@ def test_Directions2Amplitude_inputs(): name_template="%s_amplitudes.mif", position=-1, ), - peak_directions=dict(argstr="-direction %s", sep=" "), - peaks_image=dict(argstr="-peaks %s", extensions=None), - quiet_display=dict(argstr="-quiet"), + peak_directions=dict( + argstr="-direction %s", + sep=" ", + ), + peaks_image=dict( + argstr="-peaks %s", + extensions=None, + ), + quiet_display=dict( + argstr="-quiet", + ), ) inputs = Directions2Amplitude.input_spec() @@ -31,7 +55,11 @@ def test_Directions2Amplitude_inputs(): def test_Directions2Amplitude_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Directions2Amplitude.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Erode.py b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py index 6cafdb0c9d..86a6a3d0b1 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Erode.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py @@ -4,14 +4,40 @@ def test_Erode_inputs(): input_map = dict( - args=dict(argstr="%s"), - debug=dict(argstr="-debug", position=1), - dilate=dict(argstr="-dilate", position=1), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - number_of_passes=dict(argstr="-npass %s"), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), - quiet=dict(argstr="-quiet", position=1), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + dilate=dict( + argstr="-dilate", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + number_of_passes=dict( + argstr="-npass %s", + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = Erode.input_spec() @@ -21,7 +47,11 @@ def test_Erode_inputs(): def test_Erode_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Erode.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py index 9469f142ad..e93a7744fc 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py @@ -4,18 +4,49 @@ def test_EstimateResponseForSH_inputs(): input_map = dict( - args=dict(argstr="%s"), - debug=dict(argstr="-debug"), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + ), encoding_file=dict( - argstr="-grad %s", extensions=None, mandatory=True, position=1 - ), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - mask_image=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - maximum_harmonic_order=dict(argstr="-lmax %s"), - normalise=dict(argstr="-normalise"), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), - quiet=dict(argstr="-quiet"), + argstr="-grad %s", + extensions=None, + mandatory=True, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + mask_image=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + maximum_harmonic_order=dict( + argstr="-lmax %s", + ), + normalise=dict( + argstr="-normalise", + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + ), ) inputs = EstimateResponseForSH.input_spec() @@ -25,7 +56,11 @@ def test_EstimateResponseForSH_inputs(): def test_EstimateResponseForSH_outputs(): - output_map = dict(response=dict(extensions=None)) + output_map = dict( + response=dict( + extensions=None, + ), + ) outputs = EstimateResponseForSH.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py b/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py index fa1968c5ec..3e11a7db45 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py @@ -4,12 +4,27 @@ def test_FSL2MRTrix_inputs(): input_map = dict( - bval_file=dict(extensions=None, mandatory=True), - bvec_file=dict(extensions=None, mandatory=True), - invert_x=dict(usedefault=True), - invert_y=dict(usedefault=True), - invert_z=dict(usedefault=True), - out_encoding_file=dict(extensions=None, genfile=True), + bval_file=dict( + extensions=None, + mandatory=True, + ), + bvec_file=dict( + extensions=None, + mandatory=True, + ), + invert_x=dict( + usedefault=True, + ), + invert_y=dict( + usedefault=True, + ), + invert_z=dict( + usedefault=True, + ), + out_encoding_file=dict( + extensions=None, + genfile=True, + ), ) inputs = FSL2MRTrix.input_spec() @@ -19,7 +34,11 @@ def test_FSL2MRTrix_inputs(): def test_FSL2MRTrix_outputs(): - output_map = dict(encoding_file=dict(extensions=None)) + output_map = dict( + encoding_file=dict( + extensions=None, + ), + ) outputs = FSL2MRTrix.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py index ce6143503c..c7b9c19d24 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py @@ -4,11 +4,21 @@ def test_FilterTracks_inputs(): input_map = dict( - args=dict(argstr="%s"), - debug=dict(argstr="-debug", position=1), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), exclude_file=dict( - argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"] + argstr="-exclude %s", + extensions=None, + xor=["exclude_file", "exclude_spec"], ), exclude_spec=dict( argstr="-exclude %s", @@ -17,9 +27,16 @@ def test_FilterTracks_inputs(): units="mm", xor=["exclude_file", "exclude_spec"], ), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), include_file=dict( - argstr="-include %s", extensions=None, xor=["include_file", "include_spec"] + argstr="-include %s", + extensions=None, + xor=["include_file", "include_spec"], ), include_spec=dict( argstr="-include %s", @@ -28,9 +45,16 @@ def test_FilterTracks_inputs(): units="mm", xor=["include_file", "include_spec"], ), - invert=dict(argstr="-invert"), - minimum_tract_length=dict(argstr="-minlength %s", units="mm"), - no_mask_interpolation=dict(argstr="-nomaskinterp"), + invert=dict( + argstr="-invert", + ), + minimum_tract_length=dict( + argstr="-minlength %s", + units="mm", + ), + no_mask_interpolation=dict( + argstr="-nomaskinterp", + ), out_file=dict( argstr="%s", extensions=None, @@ -39,7 +63,10 @@ def test_FilterTracks_inputs(): name_template="%s_filt", position=-1, ), - quiet=dict(argstr="-quiet", position=1), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = FilterTracks.input_spec() @@ -49,7 +76,11 @@ def test_FilterTracks_inputs(): def test_FilterTracks_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = FilterTracks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py index 3efa6690b1..62132f795a 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py @@ -4,13 +4,34 @@ def test_FindShPeaks_inputs(): input_map = dict( - args=dict(argstr="%s"), - directions_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - display_debug=dict(argstr="-debug"), - display_info=dict(argstr="-info"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - num_peaks=dict(argstr="-num %s"), + args=dict( + argstr="%s", + ), + directions_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + display_debug=dict( + argstr="-debug", + ), + display_info=dict( + argstr="-info", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + num_peaks=dict( + argstr="-num %s", + ), out_file=dict( argstr="%s", extensions=None, @@ -20,10 +41,20 @@ def test_FindShPeaks_inputs(): name_template="%s_peak_dirs.mif", position=-1, ), - peak_directions=dict(argstr="-direction %s", sep=" "), - peak_threshold=dict(argstr="-threshold %s"), - peaks_image=dict(argstr="-peaks %s", extensions=None), - quiet_display=dict(argstr="-quiet"), + peak_directions=dict( + argstr="-direction %s", + sep=" ", + ), + peak_threshold=dict( + argstr="-threshold %s", + ), + peaks_image=dict( + argstr="-peaks %s", + extensions=None, + ), + quiet_display=dict( + argstr="-quiet", + ), ) inputs = FindShPeaks.input_spec() @@ -33,7 +64,11 @@ def test_FindShPeaks_inputs(): def test_FindShPeaks_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = FindShPeaks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py index 128185da4e..63b66ab1e6 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py @@ -4,12 +4,27 @@ def test_GenerateDirections_inputs(): input_map = dict( - args=dict(argstr="%s"), - display_debug=dict(argstr="-debug"), - display_info=dict(argstr="-info"), - environ=dict(nohash=True, usedefault=True), - niter=dict(argstr="-niter %s"), - num_dirs=dict(argstr="%s", mandatory=True, position=-2), + args=dict( + argstr="%s", + ), + display_debug=dict( + argstr="-debug", + ), + display_info=dict( + argstr="-info", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + niter=dict( + argstr="-niter %s", + ), + num_dirs=dict( + argstr="%s", + mandatory=True, + position=-2, + ), out_file=dict( argstr="%s", extensions=None, @@ -18,8 +33,12 @@ def test_GenerateDirections_inputs(): name_template="directions_%d.txt", position=-1, ), - power=dict(argstr="-power %s"), - quiet_display=dict(argstr="-quiet"), + power=dict( + argstr="-power %s", + ), + quiet_display=dict( + argstr="-quiet", + ), ) inputs = GenerateDirections.input_spec() @@ -29,7 +48,11 @@ def test_GenerateDirections_inputs(): def test_GenerateDirections_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = GenerateDirections.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py index 4c3717459c..09b893f105 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py @@ -4,16 +4,39 @@ def test_GenerateWhiteMatterMask_inputs(): input_map = dict( - args=dict(argstr="%s"), - binary_mask=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + args=dict( + argstr="%s", + ), + binary_mask=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), encoding_file=dict( - argstr="-grad %s", extensions=None, mandatory=True, position=1 + argstr="-grad %s", + extensions=None, + mandatory=True, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + noise_level_margin=dict( + argstr="-margin %s", ), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - noise_level_margin=dict(argstr="-margin %s"), out_WMProb_filename=dict( - argstr="%s", extensions=None, genfile=True, position=-1 + argstr="%s", + extensions=None, + genfile=True, + position=-1, ), ) inputs = GenerateWhiteMatterMask.input_spec() @@ -24,7 +47,11 @@ def test_GenerateWhiteMatterMask_inputs(): def test_GenerateWhiteMatterMask_outputs(): - output_map = dict(WMprobabilitymap=dict(extensions=None)) + output_map = dict( + WMprobabilitymap=dict( + extensions=None, + ), + ) outputs = GenerateWhiteMatterMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py index 2daffeafec..7e819a66da 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py @@ -4,20 +4,69 @@ def test_MRConvert_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - extension=dict(position=2, usedefault=True), - extract_at_axis=dict(argstr="-coord %s", position=1), - extract_at_coordinate=dict(argstr="%s", position=2, sep=","), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - layout=dict(argstr="-output %s", position=2), - offset_bias=dict(argstr="-scale %d", position=3, units="mm"), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), - output_datatype=dict(argstr="-output %s", position=2), - prs=dict(argstr="-prs", position=3), - replace_NaN_with_zero=dict(argstr="-zero", position=3), - resample=dict(argstr="-scale %d", position=3, units="mm"), - voxel_dims=dict(argstr="-vox %s", position=3, sep=","), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + extension=dict( + position=2, + usedefault=True, + ), + extract_at_axis=dict( + argstr="-coord %s", + position=1, + ), + extract_at_coordinate=dict( + argstr="%s", + position=2, + sep=",", + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + layout=dict( + argstr="-output %s", + position=2, + ), + offset_bias=dict( + argstr="-scale %d", + position=3, + units="mm", + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + output_datatype=dict( + argstr="-output %s", + position=2, + ), + prs=dict( + argstr="-prs", + position=3, + ), + replace_NaN_with_zero=dict( + argstr="-zero", + position=3, + ), + resample=dict( + argstr="-scale %d", + position=3, + units="mm", + ), + voxel_dims=dict( + argstr="-vox %s", + position=3, + sep=",", + ), ) inputs = MRConvert.input_spec() @@ -27,7 +76,11 @@ def test_MRConvert_inputs(): def test_MRConvert_outputs(): - output_map = dict(converted=dict(extensions=None)) + output_map = dict( + converted=dict( + extensions=None, + ), + ) outputs = MRConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py index 1076605c1e..daa1231f06 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py @@ -4,12 +4,32 @@ def test_MRMultiply_inputs(): input_map = dict( - args=dict(argstr="%s"), - debug=dict(argstr="-debug", position=1), - environ=dict(nohash=True, usedefault=True), - in_files=dict(argstr="%s", mandatory=True, position=-2), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), - quiet=dict(argstr="-quiet", position=1), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = MRMultiply.input_spec() @@ -19,7 +39,11 @@ def test_MRMultiply_inputs(): def test_MRMultiply_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRMultiply.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py index 4ec839b0c8..f619525575 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py @@ -4,19 +4,64 @@ def test_MRTransform_inputs(): input_map = dict( - args=dict(argstr="%s"), - debug=dict(argstr="-debug", position=1), - environ=dict(nohash=True, usedefault=True), - flip_x=dict(argstr="-flipx", position=1), - in_files=dict(argstr="%s", mandatory=True, position=-2), - invert=dict(argstr="-inverse", position=1), - linear_transform=dict(argstr="-linear %s", extensions=None, position=1), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), - quiet=dict(argstr="-quiet", position=1), - reference_image=dict(argstr="-reference %s", extensions=None, position=1), - replace_transform=dict(argstr="-replace", position=1), - template_image=dict(argstr="-template %s", extensions=None, position=1), - transformation_file=dict(argstr="-transform %s", extensions=None, position=1), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip_x=dict( + argstr="-flipx", + position=1, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + invert=dict( + argstr="-inverse", + position=1, + ), + linear_transform=dict( + argstr="-linear %s", + extensions=None, + position=1, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), + reference_image=dict( + argstr="-reference %s", + extensions=None, + position=1, + ), + replace_transform=dict( + argstr="-replace", + position=1, + ), + template_image=dict( + argstr="-template %s", + extensions=None, + position=1, + ), + transformation_file=dict( + argstr="-transform %s", + extensions=None, + position=1, + ), ) inputs = MRTransform.input_spec() @@ -26,7 +71,11 @@ def test_MRTransform_inputs(): def test_MRTransform_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py index d4393db863..0fb54a3020 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py @@ -4,11 +4,24 @@ def test_MRTrix2TrackVis_inputs(): input_map = dict( - image_file=dict(extensions=None), - in_file=dict(extensions=None, mandatory=True), - matrix_file=dict(extensions=None), - out_filename=dict(extensions=None, genfile=True, usedefault=True), - registration_image_file=dict(extensions=None), + image_file=dict( + extensions=None, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + matrix_file=dict( + extensions=None, + ), + out_filename=dict( + extensions=None, + genfile=True, + usedefault=True, + ), + registration_image_file=dict( + extensions=None, + ), ) inputs = MRTrix2TrackVis.input_spec() @@ -18,7 +31,11 @@ def test_MRTrix2TrackVis_inputs(): def test_MRTrix2TrackVis_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRTrix2TrackVis.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py index 3b0e2413d9..eecdc39bf5 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py @@ -4,9 +4,19 @@ def test_MRTrixInfo_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), ) inputs = MRTrixInfo.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py index ad3dfb5bb3..8eab033221 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py @@ -4,11 +4,26 @@ def test_MRTrixViewer_inputs(): input_map = dict( - args=dict(argstr="%s"), - debug=dict(argstr="-debug", position=1), - environ=dict(nohash=True, usedefault=True), - in_files=dict(argstr="%s", mandatory=True, position=-2), - quiet=dict(argstr="-quiet", position=1), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = MRTrixViewer.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py index 634c7e63eb..7a0974d9b8 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py @@ -4,12 +4,33 @@ def test_MedianFilter3D_inputs(): input_map = dict( - args=dict(argstr="%s"), - debug=dict(argstr="-debug", position=1), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), - quiet=dict(argstr="-quiet", position=1), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = MedianFilter3D.input_spec() @@ -19,7 +40,11 @@ def test_MedianFilter3D_inputs(): def test_MedianFilter3D_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MedianFilter3D.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py index dd1d9e6649..8dded55576 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py @@ -4,13 +4,27 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): input_map = dict( - args=dict(argstr="%s"), - cutoff_value=dict(argstr="-cutoff %s", units="NA"), - desired_number_of_tracks=dict(argstr="-number %d"), - do_not_precompute=dict(argstr="-noprecomputed"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + cutoff_value=dict( + argstr="-cutoff %s", + units="NA", + ), + desired_number_of_tracks=dict( + argstr="-number %d", + ), + do_not_precompute=dict( + argstr="-noprecomputed", + ), + environ=dict( + nohash=True, + usedefault=True, + ), exclude_file=dict( - argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"] + argstr="-exclude %s", + extensions=None, + xor=["exclude_file", "exclude_spec"], ), exclude_spec=dict( argstr="-exclude %s", @@ -19,9 +33,16 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["exclude_file", "exclude_spec"], ), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), include_file=dict( - argstr="-include %s", extensions=None, xor=["include_file", "include_spec"] + argstr="-include %s", + extensions=None, + xor=["include_file", "include_spec"], ), include_spec=dict( argstr="-include %s", @@ -30,11 +51,23 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["include_file", "include_spec"], ), - initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA"), - initial_direction=dict(argstr="-initdirection %s", units="voxels"), - inputmodel=dict(argstr="%s", position=-3, usedefault=True), + initial_cutoff_value=dict( + argstr="-initcutoff %s", + units="NA", + ), + initial_direction=dict( + argstr="-initdirection %s", + units="voxels", + ), + inputmodel=dict( + argstr="%s", + position=-3, + usedefault=True, + ), mask_file=dict( - argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"] + argstr="-mask %s", + extensions=None, + xor=["mask_file", "mask_spec"], ), mask_spec=dict( argstr="-mask %s", @@ -43,12 +76,27 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["mask_file", "mask_spec"], ), - maximum_number_of_tracks=dict(argstr="-maxnum %d"), - maximum_number_of_trials=dict(argstr="-trials %s"), - maximum_tract_length=dict(argstr="-length %s", units="mm"), - minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm"), - minimum_tract_length=dict(argstr="-minlength %s", units="mm"), - no_mask_interpolation=dict(argstr="-nomaskinterp"), + maximum_number_of_tracks=dict( + argstr="-maxnum %d", + ), + maximum_number_of_trials=dict( + argstr="-trials %s", + ), + maximum_tract_length=dict( + argstr="-length %s", + units="mm", + ), + minimum_radius_of_curvature=dict( + argstr="-curvature %s", + units="mm", + ), + minimum_tract_length=dict( + argstr="-minlength %s", + units="mm", + ), + no_mask_interpolation=dict( + argstr="-nomaskinterp", + ), out_file=dict( argstr="%s", extensions=None, @@ -58,7 +106,9 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): position=-1, ), seed_file=dict( - argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"] + argstr="-seed %s", + extensions=None, + xor=["seed_file", "seed_spec"], ), seed_spec=dict( argstr="-seed %s", @@ -67,9 +117,16 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["seed_file", "seed_spec"], ), - step_size=dict(argstr="-step %s", units="mm"), - stop=dict(argstr="-stop"), - unidirectional=dict(argstr="-unidirectional"), + step_size=dict( + argstr="-step %s", + units="mm", + ), + stop=dict( + argstr="-stop", + ), + unidirectional=dict( + argstr="-unidirectional", + ), ) inputs = ProbabilisticSphericallyDeconvolutedStreamlineTrack.input_spec() @@ -79,7 +136,11 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_outputs(): - output_map = dict(tracked=dict(extensions=None)) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = ProbabilisticSphericallyDeconvolutedStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py index 8dfdd1047a..c717eb628b 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py @@ -4,13 +4,27 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): input_map = dict( - args=dict(argstr="%s"), - cutoff_value=dict(argstr="-cutoff %s", units="NA"), - desired_number_of_tracks=dict(argstr="-number %d"), - do_not_precompute=dict(argstr="-noprecomputed"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + cutoff_value=dict( + argstr="-cutoff %s", + units="NA", + ), + desired_number_of_tracks=dict( + argstr="-number %d", + ), + do_not_precompute=dict( + argstr="-noprecomputed", + ), + environ=dict( + nohash=True, + usedefault=True, + ), exclude_file=dict( - argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"] + argstr="-exclude %s", + extensions=None, + xor=["exclude_file", "exclude_spec"], ), exclude_spec=dict( argstr="-exclude %s", @@ -19,9 +33,16 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["exclude_file", "exclude_spec"], ), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), include_file=dict( - argstr="-include %s", extensions=None, xor=["include_file", "include_spec"] + argstr="-include %s", + extensions=None, + xor=["include_file", "include_spec"], ), include_spec=dict( argstr="-include %s", @@ -30,11 +51,23 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["include_file", "include_spec"], ), - initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA"), - initial_direction=dict(argstr="-initdirection %s", units="voxels"), - inputmodel=dict(argstr="%s", position=-3, usedefault=True), + initial_cutoff_value=dict( + argstr="-initcutoff %s", + units="NA", + ), + initial_direction=dict( + argstr="-initdirection %s", + units="voxels", + ), + inputmodel=dict( + argstr="%s", + position=-3, + usedefault=True, + ), mask_file=dict( - argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"] + argstr="-mask %s", + extensions=None, + xor=["mask_file", "mask_spec"], ), mask_spec=dict( argstr="-mask %s", @@ -43,11 +76,24 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["mask_file", "mask_spec"], ), - maximum_number_of_tracks=dict(argstr="-maxnum %d"), - maximum_tract_length=dict(argstr="-length %s", units="mm"), - minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm"), - minimum_tract_length=dict(argstr="-minlength %s", units="mm"), - no_mask_interpolation=dict(argstr="-nomaskinterp"), + maximum_number_of_tracks=dict( + argstr="-maxnum %d", + ), + maximum_tract_length=dict( + argstr="-length %s", + units="mm", + ), + minimum_radius_of_curvature=dict( + argstr="-curvature %s", + units="mm", + ), + minimum_tract_length=dict( + argstr="-minlength %s", + units="mm", + ), + no_mask_interpolation=dict( + argstr="-nomaskinterp", + ), out_file=dict( argstr="%s", extensions=None, @@ -57,7 +103,9 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): position=-1, ), seed_file=dict( - argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"] + argstr="-seed %s", + extensions=None, + xor=["seed_file", "seed_spec"], ), seed_spec=dict( argstr="-seed %s", @@ -66,9 +114,16 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): units="mm", xor=["seed_file", "seed_spec"], ), - step_size=dict(argstr="-step %s", units="mm"), - stop=dict(argstr="-stop"), - unidirectional=dict(argstr="-unidirectional"), + step_size=dict( + argstr="-step %s", + units="mm", + ), + stop=dict( + argstr="-stop", + ), + unidirectional=dict( + argstr="-unidirectional", + ), ) inputs = SphericallyDeconvolutedStreamlineTrack.input_spec() @@ -78,7 +133,11 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): def test_SphericallyDeconvolutedStreamlineTrack_outputs(): - output_map = dict(tracked=dict(extensions=None)) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = SphericallyDeconvolutedStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py index 1b0ea2e187..d8f1a5c869 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py @@ -4,13 +4,27 @@ def test_StreamlineTrack_inputs(): input_map = dict( - args=dict(argstr="%s"), - cutoff_value=dict(argstr="-cutoff %s", units="NA"), - desired_number_of_tracks=dict(argstr="-number %d"), - do_not_precompute=dict(argstr="-noprecomputed"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + cutoff_value=dict( + argstr="-cutoff %s", + units="NA", + ), + desired_number_of_tracks=dict( + argstr="-number %d", + ), + do_not_precompute=dict( + argstr="-noprecomputed", + ), + environ=dict( + nohash=True, + usedefault=True, + ), exclude_file=dict( - argstr="-exclude %s", extensions=None, xor=["exclude_file", "exclude_spec"] + argstr="-exclude %s", + extensions=None, + xor=["exclude_file", "exclude_spec"], ), exclude_spec=dict( argstr="-exclude %s", @@ -19,9 +33,16 @@ def test_StreamlineTrack_inputs(): units="mm", xor=["exclude_file", "exclude_spec"], ), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), include_file=dict( - argstr="-include %s", extensions=None, xor=["include_file", "include_spec"] + argstr="-include %s", + extensions=None, + xor=["include_file", "include_spec"], ), include_spec=dict( argstr="-include %s", @@ -30,11 +51,23 @@ def test_StreamlineTrack_inputs(): units="mm", xor=["include_file", "include_spec"], ), - initial_cutoff_value=dict(argstr="-initcutoff %s", units="NA"), - initial_direction=dict(argstr="-initdirection %s", units="voxels"), - inputmodel=dict(argstr="%s", position=-3, usedefault=True), + initial_cutoff_value=dict( + argstr="-initcutoff %s", + units="NA", + ), + initial_direction=dict( + argstr="-initdirection %s", + units="voxels", + ), + inputmodel=dict( + argstr="%s", + position=-3, + usedefault=True, + ), mask_file=dict( - argstr="-mask %s", extensions=None, xor=["mask_file", "mask_spec"] + argstr="-mask %s", + extensions=None, + xor=["mask_file", "mask_spec"], ), mask_spec=dict( argstr="-mask %s", @@ -43,11 +76,24 @@ def test_StreamlineTrack_inputs(): units="mm", xor=["mask_file", "mask_spec"], ), - maximum_number_of_tracks=dict(argstr="-maxnum %d"), - maximum_tract_length=dict(argstr="-length %s", units="mm"), - minimum_radius_of_curvature=dict(argstr="-curvature %s", units="mm"), - minimum_tract_length=dict(argstr="-minlength %s", units="mm"), - no_mask_interpolation=dict(argstr="-nomaskinterp"), + maximum_number_of_tracks=dict( + argstr="-maxnum %d", + ), + maximum_tract_length=dict( + argstr="-length %s", + units="mm", + ), + minimum_radius_of_curvature=dict( + argstr="-curvature %s", + units="mm", + ), + minimum_tract_length=dict( + argstr="-minlength %s", + units="mm", + ), + no_mask_interpolation=dict( + argstr="-nomaskinterp", + ), out_file=dict( argstr="%s", extensions=None, @@ -57,7 +103,9 @@ def test_StreamlineTrack_inputs(): position=-1, ), seed_file=dict( - argstr="-seed %s", extensions=None, xor=["seed_file", "seed_spec"] + argstr="-seed %s", + extensions=None, + xor=["seed_file", "seed_spec"], ), seed_spec=dict( argstr="-seed %s", @@ -66,9 +114,16 @@ def test_StreamlineTrack_inputs(): units="mm", xor=["seed_file", "seed_spec"], ), - step_size=dict(argstr="-step %s", units="mm"), - stop=dict(argstr="-stop"), - unidirectional=dict(argstr="-unidirectional"), + step_size=dict( + argstr="-step %s", + units="mm", + ), + stop=dict( + argstr="-stop", + ), + unidirectional=dict( + argstr="-unidirectional", + ), ) inputs = StreamlineTrack.input_spec() @@ -78,7 +133,11 @@ def test_StreamlineTrack_inputs(): def test_StreamlineTrack_outputs(): - output_map = dict(tracked=dict(extensions=None)) + output_map = dict( + tracked=dict( + extensions=None, + ), + ) outputs = StreamlineTrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py index 985e9b1e41..c32daa3574 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py @@ -4,12 +4,33 @@ def test_Tensor2ApparentDiffusion_inputs(): input_map = dict( - args=dict(argstr="%s"), - debug=dict(argstr="-debug", position=1), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), - quiet=dict(argstr="-quiet", position=1), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = Tensor2ApparentDiffusion.input_spec() @@ -19,7 +40,11 @@ def test_Tensor2ApparentDiffusion_inputs(): def test_Tensor2ApparentDiffusion_outputs(): - output_map = dict(ADC=dict(extensions=None)) + output_map = dict( + ADC=dict( + extensions=None, + ), + ) outputs = Tensor2ApparentDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py index d1b02c4ac2..bf90806f74 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py @@ -4,12 +4,33 @@ def test_Tensor2FractionalAnisotropy_inputs(): input_map = dict( - args=dict(argstr="%s"), - debug=dict(argstr="-debug", position=1), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), - quiet=dict(argstr="-quiet", position=1), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = Tensor2FractionalAnisotropy.input_spec() @@ -19,7 +40,11 @@ def test_Tensor2FractionalAnisotropy_inputs(): def test_Tensor2FractionalAnisotropy_outputs(): - output_map = dict(FA=dict(extensions=None)) + output_map = dict( + FA=dict( + extensions=None, + ), + ) outputs = Tensor2FractionalAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py index 8ec90cbaa7..a87eefef7e 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py @@ -4,12 +4,33 @@ def test_Tensor2Vector_inputs(): input_map = dict( - args=dict(argstr="%s"), - debug=dict(argstr="-debug", position=1), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), - quiet=dict(argstr="-quiet", position=1), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + quiet=dict( + argstr="-quiet", + position=1, + ), ) inputs = Tensor2Vector.input_spec() @@ -19,7 +40,11 @@ def test_Tensor2Vector_inputs(): def test_Tensor2Vector_outputs(): - output_map = dict(vector=dict(extensions=None)) + output_map = dict( + vector=dict( + extensions=None, + ), + ) outputs = Tensor2Vector.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py index 59e0696956..0a67f4db56 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py @@ -4,16 +4,47 @@ def test_Threshold_inputs(): input_map = dict( - absolute_threshold_value=dict(argstr="-abs %s"), - args=dict(argstr="%s"), - debug=dict(argstr="-debug", position=1), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - invert=dict(argstr="-invert", position=1), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), - percentage_threshold_value=dict(argstr="-percent %s"), - quiet=dict(argstr="-quiet", position=1), - replace_zeros_with_NaN=dict(argstr="-nan", position=1), + absolute_threshold_value=dict( + argstr="-abs %s", + ), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-debug", + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + invert=dict( + argstr="-invert", + position=1, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + percentage_threshold_value=dict( + argstr="-percent %s", + ), + quiet=dict( + argstr="-quiet", + position=1, + ), + replace_zeros_with_NaN=dict( + argstr="-nan", + position=1, + ), ) inputs = Threshold.input_spec() @@ -23,7 +54,11 @@ def test_Threshold_inputs(): def test_Threshold_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Threshold.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py index c976790aaf..9323ba74f2 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py @@ -4,16 +4,52 @@ def test_Tracks2Prob_inputs(): input_map = dict( - args=dict(argstr="%s"), - colour=dict(argstr="-colour", position=3), - environ=dict(nohash=True, usedefault=True), - fraction=dict(argstr="-fraction", position=3), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - out_filename=dict(argstr="%s", extensions=None, genfile=True, position=-1), - output_datatype=dict(argstr="-datatype %s", position=2), - resample=dict(argstr="-resample %d", position=3, units="mm"), - template_file=dict(argstr="-template %s", extensions=None, position=1), - voxel_dims=dict(argstr="-vox %s", position=2, sep=","), + args=dict( + argstr="%s", + ), + colour=dict( + argstr="-colour", + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fraction=dict( + argstr="-fraction", + position=3, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + output_datatype=dict( + argstr="-datatype %s", + position=2, + ), + resample=dict( + argstr="-resample %d", + position=3, + units="mm", + ), + template_file=dict( + argstr="-template %s", + extensions=None, + position=1, + ), + voxel_dims=dict( + argstr="-vox %s", + position=2, + sep=",", + ), ) inputs = Tracks2Prob.input_spec() @@ -23,7 +59,11 @@ def test_Tracks2Prob_inputs(): def test_Tracks2Prob_outputs(): - output_map = dict(tract_image=dict(extensions=None)) + output_map = dict( + tract_image=dict( + extensions=None, + ), + ) outputs = Tracks2Prob.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py index a54a60285a..a9334f5412 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py @@ -4,11 +4,25 @@ def test_ACTPrepareFSL_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, ), ) inputs = ACTPrepareFSL.input_spec() @@ -19,7 +33,11 @@ def test_ACTPrepareFSL_inputs(): def test_ACTPrepareFSL_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ACTPrepareFSL.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py index 635bc0c9ab..ddf96a9c5f 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py @@ -4,17 +4,48 @@ def test_BrainMask_inputs(): input_map = dict( - args=dict(argstr="%s"), - bval_scale=dict(argstr="-bvalue_scaling %s"), - environ=dict(nohash=True, usedefault=True), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - nthreads=dict(argstr="-nthreads %d", nohash=True), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, ), ) inputs = BrainMask.input_spec() @@ -25,7 +56,11 @@ def test_BrainMask_inputs(): def test_BrainMask_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BrainMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py index b94051ab99..bfb0b1f0c9 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py @@ -4,23 +4,64 @@ def test_BuildConnectome_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - in_parc=dict(argstr="%s", extensions=None, position=-2), - in_scalar=dict(argstr="-image %s", extensions=None), - in_weights=dict(argstr="-tck_weights_in %s", extensions=None), - keep_unassigned=dict(argstr="-keep_unassigned"), - metric=dict(argstr="-metric %s"), - nthreads=dict(argstr="-nthreads %d", nohash=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + in_parc=dict( + argstr="%s", + extensions=None, + position=-2, + ), + in_scalar=dict( + argstr="-image %s", + extensions=None, + ), + in_weights=dict( + argstr="-tck_weights_in %s", + extensions=None, + ), + keep_unassigned=dict( + argstr="-keep_unassigned", + ), + metric=dict( + argstr="-metric %s", + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + search_forward=dict( + argstr="-assignment_forward_search %f", + ), + search_radius=dict( + argstr="-assignment_radial_search %f", + ), + search_reverse=dict( + argstr="-assignment_reverse_search %f", + ), + vox_lookup=dict( + argstr="-assignment_voxel_lookup", + ), + zero_diagonal=dict( + argstr="-zero_diagonal", ), - search_forward=dict(argstr="-assignment_forward_search %f"), - search_radius=dict(argstr="-assignment_radial_search %f"), - search_reverse=dict(argstr="-assignment_reverse_search %f"), - vox_lookup=dict(argstr="-assignment_voxel_lookup"), - zero_diagonal=dict(argstr="-zero_diagonal"), ) inputs = BuildConnectome.input_spec() @@ -30,7 +71,11 @@ def test_BuildConnectome_inputs(): def test_BuildConnectome_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BuildConnectome.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py index 03ec89168e..ab1d984425 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py @@ -4,27 +4,82 @@ def test_ComputeTDI_inputs(): input_map = dict( - args=dict(argstr="%s"), - contrast=dict(argstr="-constrast %s"), - data_type=dict(argstr="-datatype %s"), - dixel=dict(argstr="-dixel %s", extensions=None), - ends_only=dict(argstr="-ends_only"), - environ=dict(nohash=True, usedefault=True), - fwhm_tck=dict(argstr="-fwhm_tck %f"), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - in_map=dict(argstr="-image %s", extensions=None), - map_zero=dict(argstr="-map_zero"), - max_tod=dict(argstr="-tod %d"), - nthreads=dict(argstr="-nthreads %d", nohash=True), - out_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True), - precise=dict(argstr="-precise"), - reference=dict(argstr="-template %s", extensions=None), - stat_tck=dict(argstr="-stat_tck %s"), - stat_vox=dict(argstr="-stat_vox %s"), - tck_weights=dict(argstr="-tck_weights_in %s", extensions=None), - upsample=dict(argstr="-upsample %d"), - use_dec=dict(argstr="-dec"), - vox_size=dict(argstr="-vox %s", sep=","), + args=dict( + argstr="%s", + ), + contrast=dict( + argstr="-constrast %s", + ), + data_type=dict( + argstr="-datatype %s", + ), + dixel=dict( + argstr="-dixel %s", + extensions=None, + ), + ends_only=dict( + argstr="-ends_only", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm_tck=dict( + argstr="-fwhm_tck %f", + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + in_map=dict( + argstr="-image %s", + extensions=None, + ), + map_zero=dict( + argstr="-map_zero", + ), + max_tod=dict( + argstr="-tod %d", + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + position=-1, + usedefault=True, + ), + precise=dict( + argstr="-precise", + ), + reference=dict( + argstr="-template %s", + extensions=None, + ), + stat_tck=dict( + argstr="-stat_tck %s", + ), + stat_vox=dict( + argstr="-stat_vox %s", + ), + tck_weights=dict( + argstr="-tck_weights_in %s", + extensions=None, + ), + upsample=dict( + argstr="-upsample %d", + ), + use_dec=dict( + argstr="-dec", + ), + vox_size=dict( + argstr="-vox %s", + sep=",", + ), ) inputs = ComputeTDI.input_spec() @@ -34,7 +89,11 @@ def test_ComputeTDI_inputs(): def test_ComputeTDI_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ComputeTDI.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py b/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py index ab85e9c8d4..c395f0d1c8 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ConstrainedSphericalDeconvolution.py @@ -4,29 +4,100 @@ def test_ConstrainedSphericalDeconvolution_inputs(): input_map = dict( - algorithm=dict(argstr="%s", mandatory=True, position=-8), - args=dict(argstr="%s"), - bval_scale=dict(argstr="-bvalue_scaling %s"), - csf_odf=dict(argstr="%s", extensions=None, position=-1), - csf_txt=dict(argstr="%s", extensions=None, position=-2), - environ=dict(nohash=True, usedefault=True), - gm_odf=dict(argstr="%s", extensions=None, position=-3), - gm_txt=dict(argstr="%s", extensions=None, position=-4), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_dirs=dict(argstr="-directions %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-7), - mask_file=dict(argstr="-mask %s", extensions=None), - max_sh=dict(argstr="-lmax %s", sep=","), - nthreads=dict(argstr="-nthreads %d", nohash=True), - predicted_signal=dict(argstr="-predicted_signal %s", extensions=None), - shell=dict(argstr="-shell %s", sep=","), + algorithm=dict( + argstr="%s", + mandatory=True, + position=-8, + ), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + csf_odf=dict( + argstr="%s", + extensions=None, + position=-1, + ), + csf_txt=dict( + argstr="%s", + extensions=None, + position=-2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gm_odf=dict( + argstr="%s", + extensions=None, + position=-3, + ), + gm_txt=dict( + argstr="%s", + extensions=None, + position=-4, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_dirs=dict( + argstr="-directions %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-7, + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + max_sh=dict( + argstr="-lmax %s", + sep=",", + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + predicted_signal=dict( + argstr="-predicted_signal %s", + extensions=None, + ), + shell=dict( + argstr="-shell %s", + sep=",", + ), wm_odf=dict( - argstr="%s", extensions=None, mandatory=True, position=-5, usedefault=True + argstr="%s", + extensions=None, + mandatory=True, + position=-5, + usedefault=True, + ), + wm_txt=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-6, ), - wm_txt=dict(argstr="%s", extensions=None, mandatory=True, position=-6), ) inputs = ConstrainedSphericalDeconvolution.input_spec() @@ -37,10 +108,21 @@ def test_ConstrainedSphericalDeconvolution_inputs(): def test_ConstrainedSphericalDeconvolution_outputs(): output_map = dict( - csf_odf=dict(argstr="%s", extensions=None), - gm_odf=dict(argstr="%s", extensions=None), - predicted_signal=dict(extensions=None), - wm_odf=dict(argstr="%s", extensions=None), + csf_odf=dict( + argstr="%s", + extensions=None, + ), + gm_odf=dict( + argstr="%s", + extensions=None, + ), + predicted_signal=dict( + extensions=None, + ), + wm_odf=dict( + argstr="%s", + extensions=None, + ), ) outputs = ConstrainedSphericalDeconvolution.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py index 0a53b5c858..0028748ab9 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIBiasCorrect.py @@ -4,17 +4,50 @@ def test_DWIBiasCorrect_inputs(): input_map = dict( - args=dict(argstr="%s"), - bias=dict(argstr="-bias %s", extensions=None), - bval_scale=dict(argstr="-bvalue_scaling %s"), - environ=dict(nohash=True, usedefault=True), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - in_mask=dict(argstr="-mask %s", extensions=None), - nthreads=dict(argstr="-nthreads %d", nohash=True), + args=dict( + argstr="%s", + ), + bias=dict( + argstr="-bias %s", + extensions=None, + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + in_mask=dict( + argstr="-mask %s", + extensions=None, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -24,8 +57,18 @@ def test_DWIBiasCorrect_inputs(): name_template="%s_biascorr", position=-1, ), - use_ants=dict(argstr="ants", mandatory=True, position=0, xor=["use_fsl"]), - use_fsl=dict(argstr="fsl", mandatory=True, position=0, xor=["use_ants"]), + use_ants=dict( + argstr="ants", + mandatory=True, + position=0, + xor=["use_fsl"], + ), + use_fsl=dict( + argstr="fsl", + mandatory=True, + position=0, + xor=["use_ants"], + ), ) inputs = DWIBiasCorrect.input_spec() @@ -35,7 +78,14 @@ def test_DWIBiasCorrect_inputs(): def test_DWIBiasCorrect_outputs(): - output_map = dict(bias=dict(extensions=None), out_file=dict(extensions=None)) + output_map = dict( + bias=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + ) outputs = DWIBiasCorrect.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py index 4c7dd7d8d6..efa722c81d 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py @@ -4,16 +4,46 @@ def test_DWIDenoise_inputs(): input_map = dict( - args=dict(argstr="%s"), - bval_scale=dict(argstr="-bvalue_scaling %s"), - environ=dict(nohash=True, usedefault=True), - extent=dict(argstr="-extent %d,%d,%d"), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - mask=dict(argstr="-mask %s", extensions=None, position=1), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + extent=dict( + argstr="-extent %d,%d,%d", + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + position=1, + ), noise=dict( argstr="-noise %s", extensions=None, @@ -21,7 +51,10 @@ def test_DWIDenoise_inputs(): name_source="in_file", name_template="%s_noise", ), - nthreads=dict(argstr="-nthreads %d", nohash=True), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -39,7 +72,14 @@ def test_DWIDenoise_inputs(): def test_DWIDenoise_outputs(): - output_map = dict(noise=dict(extensions=None), out_file=dict(extensions=None)) + output_map = dict( + noise=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + ) outputs = DWIDenoise.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py index e6a17f277e..379e67d397 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py @@ -4,20 +4,61 @@ def test_DWIExtract_inputs(): input_map = dict( - args=dict(argstr="%s"), - bval_scale=dict(argstr="-bvalue_scaling %s"), - bzero=dict(argstr="-bzero"), - environ=dict(nohash=True, usedefault=True), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - nobzero=dict(argstr="-no_bzero"), - nthreads=dict(argstr="-nthreads %d", nohash=True), - out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), - shell=dict(argstr="-shell %s", sep=","), - singleshell=dict(argstr="-singleshell"), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + bzero=dict( + argstr="-bzero", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + nobzero=dict( + argstr="-no_bzero", + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + shell=dict( + argstr="-shell %s", + sep=",", + ), + singleshell=dict( + argstr="-singleshell", + ), ) inputs = DWIExtract.input_spec() @@ -27,7 +68,11 @@ def test_DWIExtract_inputs(): def test_DWIExtract_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = DWIExtract.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py index 76fae6548f..7f226fe3cd 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIPreproc.py @@ -4,34 +4,90 @@ def test_DWIPreproc_inputs(): input_map = dict( - align_seepi=dict(argstr="-align_seepi"), - args=dict(argstr="%s"), - bval_scale=dict(argstr="-bvalue_scaling %s"), - eddy_options=dict(argstr='-eddy_options "%s"'), - environ=dict(nohash=True, usedefault=True), - export_grad_fsl=dict(argstr="-export_grad_fsl"), - export_grad_mrtrix=dict(argstr="-export_grad_mrtrix"), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_epi=dict(argstr="-se_epi %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), - nthreads=dict(argstr="-nthreads %d", nohash=True), + align_seepi=dict( + argstr="-align_seepi", + ), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + eddy_options=dict( + argstr='-eddy_options "%s"', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + export_grad_fsl=dict( + argstr="-export_grad_fsl", + ), + export_grad_mrtrix=dict( + argstr="-export_grad_mrtrix", + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_epi=dict( + argstr="-se_epi %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=1, usedefault=True + argstr="%s", + extensions=None, + mandatory=True, + position=1, + usedefault=True, + ), + out_grad_fsl=dict( + argstr="%s, %s", + requires=["export_grad_fsl"], ), - out_grad_fsl=dict(argstr="%s, %s", requires=["export_grad_fsl"]), out_grad_mrtrix=dict( argstr="%s", extensions=None, requires=["export_grad_mrtrix"], usedefault=True, ), - pe_dir=dict(argstr="-pe_dir %s", mandatory=True), - ro_time=dict(argstr="-readout_time %f"), - rpe_options=dict(argstr="-rpe_%s", mandatory=True, position=2), - topup_options=dict(argstr='-topup_options "%s"'), + pe_dir=dict( + argstr="-pe_dir %s", + mandatory=True, + ), + ro_time=dict( + argstr="-readout_time %f", + ), + rpe_options=dict( + argstr="-rpe_%s", + mandatory=True, + position=2, + ), + topup_options=dict( + argstr='-topup_options "%s"', + ), ) inputs = DWIPreproc.input_spec() @@ -42,10 +98,25 @@ def test_DWIPreproc_inputs(): def test_DWIPreproc_outputs(): output_map = dict( - out_file=dict(argstr="%s", extensions=None), - out_fsl_bval=dict(argstr="%s", extensions=None, usedefault=True), - out_fsl_bvec=dict(argstr="%s", extensions=None, usedefault=True), - out_grad_mrtrix=dict(argstr="%s", extensions=None, usedefault=True), + out_file=dict( + argstr="%s", + extensions=None, + ), + out_fsl_bval=dict( + argstr="%s", + extensions=None, + usedefault=True, + ), + out_fsl_bvec=dict( + argstr="%s", + extensions=None, + usedefault=True, + ), + out_grad_mrtrix=dict( + argstr="%s", + extensions=None, + usedefault=True, + ), ) outputs = DWIPreproc.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py index 270e131a04..2d15207571 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py @@ -4,29 +4,103 @@ def test_EstimateFOD_inputs(): input_map = dict( - algorithm=dict(argstr="%s", mandatory=True, position=-8), - args=dict(argstr="%s"), - bval_scale=dict(argstr="-bvalue_scaling %s"), - csf_odf=dict(argstr="%s", extensions=None, position=-1, usedefault=True), - csf_txt=dict(argstr="%s", extensions=None, position=-2), - environ=dict(nohash=True, usedefault=True), - gm_odf=dict(argstr="%s", extensions=None, position=-3, usedefault=True), - gm_txt=dict(argstr="%s", extensions=None, position=-4), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_dirs=dict(argstr="-directions %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-7), - mask_file=dict(argstr="-mask %s", extensions=None), - max_sh=dict(argstr="-lmax %s", sep=",", usedefault=True), - nthreads=dict(argstr="-nthreads %d", nohash=True), - predicted_signal=dict(argstr="-predicted_signal %s", extensions=None), - shell=dict(argstr="-shell %s", sep=","), + algorithm=dict( + argstr="%s", + mandatory=True, + position=-8, + ), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + csf_odf=dict( + argstr="%s", + extensions=None, + position=-1, + usedefault=True, + ), + csf_txt=dict( + argstr="%s", + extensions=None, + position=-2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gm_odf=dict( + argstr="%s", + extensions=None, + position=-3, + usedefault=True, + ), + gm_txt=dict( + argstr="%s", + extensions=None, + position=-4, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_dirs=dict( + argstr="-directions %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-7, + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + max_sh=dict( + argstr="-lmax %s", + sep=",", + usedefault=True, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + predicted_signal=dict( + argstr="-predicted_signal %s", + extensions=None, + ), + shell=dict( + argstr="-shell %s", + sep=",", + ), wm_odf=dict( - argstr="%s", extensions=None, mandatory=True, position=-5, usedefault=True + argstr="%s", + extensions=None, + mandatory=True, + position=-5, + usedefault=True, + ), + wm_txt=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-6, ), - wm_txt=dict(argstr="%s", extensions=None, mandatory=True, position=-6), ) inputs = EstimateFOD.input_spec() @@ -37,10 +111,21 @@ def test_EstimateFOD_inputs(): def test_EstimateFOD_outputs(): output_map = dict( - csf_odf=dict(argstr="%s", extensions=None), - gm_odf=dict(argstr="%s", extensions=None), - predicted_signal=dict(extensions=None), - wm_odf=dict(argstr="%s", extensions=None), + csf_odf=dict( + argstr="%s", + extensions=None, + ), + gm_odf=dict( + argstr="%s", + extensions=None, + ), + predicted_signal=dict( + extensions=None, + ), + wm_odf=dict( + argstr="%s", + extensions=None, + ), ) outputs = EstimateFOD.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py index b296614696..7cf38faf8c 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py @@ -4,22 +4,64 @@ def test_FitTensor_inputs(): input_map = dict( - args=dict(argstr="%s"), - bval_scale=dict(argstr="-bvalue_scaling %s"), - environ=dict(nohash=True, usedefault=True), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - in_mask=dict(argstr="-mask %s", extensions=None), - method=dict(argstr="-method %s"), - nthreads=dict(argstr="-nthreads %d", nohash=True), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + in_mask=dict( + argstr="-mask %s", + extensions=None, + ), + method=dict( + argstr="-method %s", + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + predicted_signal=dict( + argstr="-predicted_signal %s", + extensions=None, + ), + reg_term=dict( + argstr="-regularisation %f", + max_ver="0.3.13", ), - predicted_signal=dict(argstr="-predicted_signal %s", extensions=None), - reg_term=dict(argstr="-regularisation %f", max_ver="0.3.13"), ) inputs = FitTensor.input_spec() @@ -30,7 +72,12 @@ def test_FitTensor_inputs(): def test_FitTensor_outputs(): output_map = dict( - out_file=dict(extensions=None), predicted_signal=dict(extensions=None) + out_file=dict( + extensions=None, + ), + predicted_signal=dict( + extensions=None, + ), ) outputs = FitTensor.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py index 1e4fef12be..1b135a5917 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py @@ -4,17 +4,53 @@ def test_Generate5tt_inputs(): input_map = dict( - algorithm=dict(argstr="%s", mandatory=True, position=-3), - args=dict(argstr="%s"), - bval_scale=dict(argstr="-bvalue_scaling %s"), - environ=dict(nohash=True, usedefault=True), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - nthreads=dict(argstr="-nthreads %d", nohash=True), - out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + algorithm=dict( + argstr="%s", + mandatory=True, + position=-3, + ), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), ) inputs = Generate5tt.input_spec() @@ -24,7 +60,11 @@ def test_Generate5tt_inputs(): def test_Generate5tt_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Generate5tt.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py index d505cdfb4f..15116f9bb8 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py @@ -4,19 +4,55 @@ def test_LabelConfig_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_config=dict(argstr="%s", extensions=None, position=-2), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - lut_aal=dict(argstr="-lut_aal %s", extensions=None), - lut_basic=dict(argstr="-lut_basic %s", extensions=None), - lut_fs=dict(argstr="-lut_freesurfer %s", extensions=None), - lut_itksnap=dict(argstr="-lut_itksnap %s", extensions=None), - nthreads=dict(argstr="-nthreads %d", nohash=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_config=dict( + argstr="%s", + extensions=None, + position=-2, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + lut_aal=dict( + argstr="-lut_aal %s", + extensions=None, + ), + lut_basic=dict( + argstr="-lut_basic %s", + extensions=None, + ), + lut_fs=dict( + argstr="-lut_freesurfer %s", + extensions=None, + ), + lut_itksnap=dict( + argstr="-lut_itksnap %s", + extensions=None, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + spine=dict( + argstr="-spine %s", + extensions=None, ), - spine=dict(argstr="-spine %s", extensions=None), ) inputs = LabelConfig.input_spec() @@ -26,7 +62,11 @@ def test_LabelConfig_inputs(): def test_LabelConfig_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = LabelConfig.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py index 554fb4d374..98512ac317 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py @@ -4,16 +4,45 @@ def test_LabelConvert_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_config=dict(argstr="%s", extensions=None, position=-2), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4), - in_lut=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - num_threads=dict(argstr="-nthreads %d", nohash=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_config=dict( + argstr="%s", + extensions=None, + position=-2, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), + in_lut=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + num_threads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + spine=dict( + argstr="-spine %s", + extensions=None, ), - spine=dict(argstr="-spine %s", extensions=None), ) inputs = LabelConvert.input_spec() @@ -23,7 +52,11 @@ def test_LabelConvert_inputs(): def test_LabelConvert_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = LabelConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py index f23f1a599b..578ffb9b1a 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py @@ -4,22 +4,65 @@ def test_MRConvert_inputs(): input_map = dict( - args=dict(argstr="%s"), - axes=dict(argstr="-axes %s", sep=","), - bval_scale=dict(argstr="-bvalue_scaling %s"), - coord=dict(argstr="-coord %s", sep=" "), - environ=dict(nohash=True, usedefault=True), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - nthreads=dict(argstr="-nthreads %d", nohash=True), + args=dict( + argstr="%s", + ), + axes=dict( + argstr="-axes %s", + sep=",", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + coord=dict( + argstr="-coord %s", + sep=" ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + scaling=dict( + argstr="-scaling %s", + sep=",", + ), + vox=dict( + argstr="-vox %s", + sep=",", ), - scaling=dict(argstr="-scaling %s", sep=","), - vox=dict(argstr="-vox %s", sep=","), ) inputs = MRConvert.input_spec() @@ -29,7 +72,11 @@ def test_MRConvert_inputs(): def test_MRConvert_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRConvert.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py index 042b1a745a..f1ef52ab88 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRDeGibbs.py @@ -4,19 +4,61 @@ def test_MRDeGibbs_inputs(): input_map = dict( - args=dict(argstr="%s"), - axes=dict(argstr="-axes %s", maxlen=2, minlen=2, sep=",", usedefault=True), - bval_scale=dict(argstr="-bvalue_scaling %s"), - environ=dict(nohash=True, usedefault=True), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - maxW=dict(argstr="-maxW %d", usedefault=True), - minW=dict(argstr="-minW %d", usedefault=True), - nshifts=dict(argstr="-nshifts %d", usedefault=True), - nthreads=dict(argstr="-nthreads %d", nohash=True), + args=dict( + argstr="%s", + ), + axes=dict( + argstr="-axes %s", + maxlen=2, + minlen=2, + sep=",", + usedefault=True, + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + maxW=dict( + argstr="-maxW %d", + usedefault=True, + ), + minW=dict( + argstr="-minW %d", + usedefault=True, + ), + nshifts=dict( + argstr="-nshifts %d", + usedefault=True, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -34,7 +76,11 @@ def test_MRDeGibbs_inputs(): def test_MRDeGibbs_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRDeGibbs.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py index 0962705f36..6446b2ceda 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py @@ -4,18 +4,56 @@ def test_MRMath_inputs(): input_map = dict( - args=dict(argstr="%s"), - axis=dict(argstr="-axis %d"), - bval_scale=dict(argstr="-bvalue_scaling %s"), - environ=dict(nohash=True, usedefault=True), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - nthreads=dict(argstr="-nthreads %d", nohash=True), - operation=dict(argstr="%s", mandatory=True, position=-2), - out_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), + args=dict( + argstr="%s", + ), + axis=dict( + argstr="-axis %d", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + operation=dict( + argstr="%s", + mandatory=True, + position=-2, + ), + out_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), ) inputs = MRMath.input_spec() @@ -25,7 +63,11 @@ def test_MRMath_inputs(): def test_MRMath_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRMath.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py index 2415b4d9dd..ee982c9561 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRResize.py @@ -4,19 +4,51 @@ def test_MRResize_inputs(): input_map = dict( - args=dict(argstr="%s"), - bval_scale=dict(argstr="-bvalue_scaling %s"), - environ=dict(nohash=True, usedefault=True), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), image_size=dict( - argstr="-size %d,%d,%d", mandatory=True, xor=["voxel_size", "scale_factor"] + argstr="-size %d,%d,%d", + mandatory=True, + xor=["voxel_size", "scale_factor"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + interpolation=dict( + argstr="-interp %s", + usedefault=True, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, ), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - interpolation=dict(argstr="-interp %s", usedefault=True), - nthreads=dict(argstr="-nthreads %d", nohash=True), out_file=dict( argstr="%s", extensions=None, @@ -26,10 +58,14 @@ def test_MRResize_inputs(): position=-1, ), scale_factor=dict( - argstr="-scale %g,%g,%g", mandatory=True, xor=["image_size", "voxel_size"] + argstr="-scale %g,%g,%g", + mandatory=True, + xor=["image_size", "voxel_size"], ), voxel_size=dict( - argstr="-voxel %g,%g,%g", mandatory=True, xor=["image_size", "scale_factor"] + argstr="-voxel %g,%g,%g", + mandatory=True, + xor=["image_size", "scale_factor"], ), ) inputs = MRResize.input_spec() @@ -40,7 +76,11 @@ def test_MRResize_inputs(): def test_MRResize_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MRResize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py index 6ece1ac2a4..7689f14f11 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py @@ -3,7 +3,15 @@ def test_MRTrix3Base_inputs(): - input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) + input_map = dict( + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) inputs = MRTrix3Base.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py index 720f4b12ad..0fd63be8af 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py @@ -4,14 +4,36 @@ def test_Mesh2PVE_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - in_first=dict(argstr="-first %s", extensions=None), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + in_first=dict( + argstr="-first %s", + extensions=None, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + reference=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, ), - reference=dict(argstr="%s", extensions=None, mandatory=True, position=-2), ) inputs = Mesh2PVE.input_spec() @@ -21,7 +43,11 @@ def test_Mesh2PVE_inputs(): def test_Mesh2PVE_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Mesh2PVE.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py index 3058252e62..ef3053cede 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py @@ -4,13 +4,36 @@ def test_ReplaceFSwithFIRST_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_config=dict(argstr="%s", extensions=None, position=-2), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-4), - in_t1w=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_config=dict( + argstr="%s", + extensions=None, + position=-2, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-4, + ), + in_t1w=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, ), ) inputs = ReplaceFSwithFIRST.input_spec() @@ -21,7 +44,11 @@ def test_ReplaceFSwithFIRST_inputs(): def test_ReplaceFSwithFIRST_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ReplaceFSwithFIRST.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py index 15bd5704f3..f7e556f466 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py @@ -4,22 +4,76 @@ def test_ResponseSD_inputs(): input_map = dict( - algorithm=dict(argstr="%s", mandatory=True, position=1), - args=dict(argstr="%s"), - bval_scale=dict(argstr="-bvalue_scaling %s"), - csf_file=dict(argstr="%s", extensions=None, position=-1), - environ=dict(nohash=True, usedefault=True), - gm_file=dict(argstr="%s", extensions=None, position=-2), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-5), - in_mask=dict(argstr="-mask %s", extensions=None), - max_sh=dict(argstr="-lmax %s", sep=","), - mtt_file=dict(argstr="%s", extensions=None, position=-4), - nthreads=dict(argstr="-nthreads %d", nohash=True), - wm_file=dict(argstr="%s", extensions=None, position=-3, usedefault=True), + algorithm=dict( + argstr="%s", + mandatory=True, + position=1, + ), + args=dict( + argstr="%s", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + csf_file=dict( + argstr="%s", + extensions=None, + position=-1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gm_file=dict( + argstr="%s", + extensions=None, + position=-2, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-5, + ), + in_mask=dict( + argstr="-mask %s", + extensions=None, + ), + max_sh=dict( + argstr="-lmax %s", + sep=",", + ), + mtt_file=dict( + argstr="%s", + extensions=None, + position=-4, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + wm_file=dict( + argstr="%s", + extensions=None, + position=-3, + usedefault=True, + ), ) inputs = ResponseSD.input_spec() @@ -30,9 +84,18 @@ def test_ResponseSD_inputs(): def test_ResponseSD_outputs(): output_map = dict( - csf_file=dict(argstr="%s", extensions=None), - gm_file=dict(argstr="%s", extensions=None), - wm_file=dict(argstr="%s", extensions=None), + csf_file=dict( + argstr="%s", + extensions=None, + ), + gm_file=dict( + argstr="%s", + extensions=None, + ), + wm_file=dict( + argstr="%s", + extensions=None, + ), ) outputs = ResponseSD.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py b/nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py index 9079e3c128..ab75fc1f8a 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_SH2Amp.py @@ -4,11 +4,28 @@ def test_SH2Amp_inputs(): input_map = dict( - args=dict(argstr="%s"), - directions=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), - nonnegative=dict(argstr="-nonnegative"), + args=dict( + argstr="%s", + ), + directions=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), + nonnegative=dict( + argstr="-nonnegative", + ), out_file=dict( argstr="%s", extensions=None, @@ -26,7 +43,11 @@ def test_SH2Amp_inputs(): def test_SH2Amp_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SH2Amp.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py b/nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py index 4bcc4d8155..992e6984a8 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_SHConv.py @@ -4,9 +4,19 @@ def test_SHConv_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-3), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-3, + ), out_file=dict( argstr="%s", extensions=None, @@ -15,7 +25,12 @@ def test_SHConv_inputs(): position=-1, usedefault=True, ), - response=dict(argstr="%s", extensions=None, mandatory=True, position=-2), + response=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), ) inputs = SHConv.input_spec() @@ -25,7 +40,11 @@ def test_SHConv_inputs(): def test_SHConv_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SHConv.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py index f47f46adbe..215dafedc0 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py @@ -4,13 +4,37 @@ def test_TCK2VTK_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - nthreads=dict(argstr="-nthreads %d", nohash=True), - out_file=dict(argstr="%s", extensions=None, position=-1, usedefault=True), - reference=dict(argstr="-image %s", extensions=None), - voxel=dict(argstr="-image %s", extensions=None), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + position=-1, + usedefault=True, + ), + reference=dict( + argstr="-image %s", + extensions=None, + ), + voxel=dict( + argstr="-image %s", + extensions=None, + ), ) inputs = TCK2VTK.input_spec() @@ -20,7 +44,11 @@ def test_TCK2VTK_inputs(): def test_TCK2VTK_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TCK2VTK.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py index a91f571f35..b23813aaf4 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py @@ -4,16 +4,47 @@ def test_TensorMetrics_inputs(): input_map = dict( - args=dict(argstr="%s"), - component=dict(argstr="-num %s", sep=",", usedefault=True), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-1), - in_mask=dict(argstr="-mask %s", extensions=None), - modulate=dict(argstr="-modulate %s"), - out_adc=dict(argstr="-adc %s", extensions=None), - out_eval=dict(argstr="-value %s", extensions=None), - out_evec=dict(argstr="-vector %s", extensions=None), - out_fa=dict(argstr="-fa %s", extensions=None), + args=dict( + argstr="%s", + ), + component=dict( + argstr="-num %s", + sep=",", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + ), + in_mask=dict( + argstr="-mask %s", + extensions=None, + ), + modulate=dict( + argstr="-modulate %s", + ), + out_adc=dict( + argstr="-adc %s", + extensions=None, + ), + out_eval=dict( + argstr="-value %s", + extensions=None, + ), + out_evec=dict( + argstr="-vector %s", + extensions=None, + ), + out_fa=dict( + argstr="-fa %s", + extensions=None, + ), ) inputs = TensorMetrics.input_spec() @@ -24,10 +55,18 @@ def test_TensorMetrics_inputs(): def test_TensorMetrics_outputs(): output_map = dict( - out_adc=dict(extensions=None), - out_eval=dict(extensions=None), - out_evec=dict(extensions=None), - out_fa=dict(extensions=None), + out_adc=dict( + extensions=None, + ), + out_eval=dict( + extensions=None, + ), + out_evec=dict( + extensions=None, + ), + out_fa=dict( + extensions=None, + ), ) outputs = TensorMetrics.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py index 08926ebe48..2a70fe09f6 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py @@ -4,59 +4,168 @@ def test_Tractography_inputs(): input_map = dict( - act_file=dict(argstr="-act %s", extensions=None), - algorithm=dict(argstr="-algorithm %s", usedefault=True), - angle=dict(argstr="-angle %f"), - args=dict(argstr="%s"), - backtrack=dict(argstr="-backtrack"), - bval_scale=dict(argstr="-bvalue_scaling %s"), - crop_at_gmwmi=dict(argstr="-crop_at_gmwmi"), - cutoff=dict(argstr="-cutoff %f"), - cutoff_init=dict(argstr="-initcutoff %f"), - downsample=dict(argstr="-downsample %f"), - environ=dict(nohash=True, usedefault=True), - grad_file=dict(argstr="-grad %s", extensions=None, xor=["grad_fsl"]), - grad_fsl=dict(argstr="-fslgrad %s %s", xor=["grad_file"]), - in_bval=dict(extensions=None), - in_bvec=dict(argstr="-fslgrad %s %s", extensions=None), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=-2), - init_dir=dict(argstr="-initdirection %f,%f,%f"), - max_length=dict(argstr="-maxlength %f"), - max_seed_attempts=dict(argstr="-max_seed_attempts %d"), - max_tracks=dict(argstr="-maxnum %d"), - min_length=dict(argstr="-minlength %f"), - n_samples=dict(argstr="-samples %d", usedefault=True), - n_tracks=dict(argstr="-number %d", max_ver="0.4"), - n_trials=dict(argstr="-trials %d"), - noprecompt=dict(argstr="-noprecomputed"), - nthreads=dict(argstr="-nthreads %d", nohash=True), + act_file=dict( + argstr="-act %s", + extensions=None, + ), + algorithm=dict( + argstr="-algorithm %s", + usedefault=True, + ), + angle=dict( + argstr="-angle %f", + ), + args=dict( + argstr="%s", + ), + backtrack=dict( + argstr="-backtrack", + ), + bval_scale=dict( + argstr="-bvalue_scaling %s", + ), + crop_at_gmwmi=dict( + argstr="-crop_at_gmwmi", + ), + cutoff=dict( + argstr="-cutoff %f", + ), + cutoff_init=dict( + argstr="-initcutoff %f", + ), + downsample=dict( + argstr="-downsample %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict( + argstr="-grad %s", + extensions=None, + xor=["grad_fsl"], + ), + grad_fsl=dict( + argstr="-fslgrad %s %s", + xor=["grad_file"], + ), + in_bval=dict( + extensions=None, + ), + in_bvec=dict( + argstr="-fslgrad %s %s", + extensions=None, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=-2, + ), + init_dir=dict( + argstr="-initdirection %f,%f,%f", + ), + max_length=dict( + argstr="-maxlength %f", + ), + max_seed_attempts=dict( + argstr="-max_seed_attempts %d", + ), + max_tracks=dict( + argstr="-maxnum %d", + ), + min_length=dict( + argstr="-minlength %f", + ), + n_samples=dict( + argstr="-samples %d", + usedefault=True, + ), + n_tracks=dict( + argstr="-number %d", + max_ver="0.4", + ), + n_trials=dict( + argstr="-trials %d", + ), + noprecompt=dict( + argstr="-noprecomputed", + ), + nthreads=dict( + argstr="-nthreads %d", + nohash=True, + ), out_file=dict( - argstr="%s", extensions=None, mandatory=True, position=-1, usedefault=True - ), - out_seeds=dict(argstr="-output_seeds %s", extensions=None, usedefault=True), - power=dict(argstr="-power %d"), - roi_excl=dict(argstr="-exclude %s"), - roi_incl=dict(argstr="-include %s"), - roi_mask=dict(argstr="-mask %s"), - seed_dynamic=dict(argstr="-seed_dynamic %s", extensions=None), + argstr="%s", + extensions=None, + mandatory=True, + position=-1, + usedefault=True, + ), + out_seeds=dict( + argstr="-output_seeds %s", + extensions=None, + usedefault=True, + ), + power=dict( + argstr="-power %d", + ), + roi_excl=dict( + argstr="-exclude %s", + ), + roi_incl=dict( + argstr="-include %s", + ), + roi_mask=dict( + argstr="-mask %s", + ), + seed_dynamic=dict( + argstr="-seed_dynamic %s", + extensions=None, + ), seed_gmwmi=dict( - argstr="-seed_gmwmi %s", extensions=None, requires=["act_file"] + argstr="-seed_gmwmi %s", + extensions=None, + requires=["act_file"], ), seed_grid_voxel=dict( - argstr="-seed_grid_per_voxel %s %d", xor=["seed_image", "seed_rnd_voxel"] + argstr="-seed_grid_per_voxel %s %d", + xor=["seed_image", "seed_rnd_voxel"], + ), + seed_image=dict( + argstr="-seed_image %s", + extensions=None, + ), + seed_rejection=dict( + argstr="-seed_rejection %s", + extensions=None, ), - seed_image=dict(argstr="-seed_image %s", extensions=None), - seed_rejection=dict(argstr="-seed_rejection %s", extensions=None), seed_rnd_voxel=dict( - argstr="-seed_random_per_voxel %s %d", xor=["seed_image", "seed_grid_voxel"] - ), - seed_sphere=dict(argstr="-seed_sphere %f,%f,%f,%f"), - select=dict(argstr="-select %d", min_ver="3"), - sph_trait=dict(argstr="%f,%f,%f,%f"), - step_size=dict(argstr="-step %f"), - stop=dict(argstr="-stop"), - unidirectional=dict(argstr="-unidirectional"), - use_rk4=dict(argstr="-rk4"), + argstr="-seed_random_per_voxel %s %d", + xor=["seed_image", "seed_grid_voxel"], + ), + seed_sphere=dict( + argstr="-seed_sphere %f,%f,%f,%f", + ), + select=dict( + argstr="-select %d", + min_ver="3", + ), + sph_trait=dict( + argstr="%f,%f,%f,%f", + ), + step_size=dict( + argstr="-step %f", + ), + stop=dict( + argstr="-stop", + ), + unidirectional=dict( + argstr="-unidirectional", + ), + use_rk4=dict( + argstr="-rk4", + ), ) inputs = Tractography.input_spec() @@ -66,7 +175,14 @@ def test_Tractography_inputs(): def test_Tractography_outputs(): - output_map = dict(out_file=dict(extensions=None), out_seeds=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + out_seeds=dict( + extensions=None, + ), + ) outputs = Tractography.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py index 76ca1f72dd..ca14384031 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py @@ -4,8 +4,14 @@ def test_DwiTool_inputs(): input_map = dict( - args=dict(argstr="%s"), - b0_file=dict(argstr="-b0 %s", extensions=None, position=4), + args=dict( + argstr="%s", + ), + b0_file=dict( + argstr="-b0 %s", + extensions=None, + position=4, + ), ball_flag=dict( argstr="-ball", position=6, @@ -32,10 +38,23 @@ def test_DwiTool_inputs(): "nodv_flag", ], ), - bval_file=dict(argstr="-bval %s", extensions=None, mandatory=True, position=2), - bvec_file=dict(argstr="-bvec %s", extensions=None, position=3), - diso_val=dict(argstr="-diso %f"), - dpr_val=dict(argstr="-dpr %f"), + bval_file=dict( + argstr="-bval %s", + extensions=None, + mandatory=True, + position=2, + ), + bvec_file=dict( + argstr="-bvec %s", + extensions=None, + position=3, + ), + diso_val=dict( + argstr="-diso %f", + ), + dpr_val=dict( + argstr="-dpr %f", + ), dti_flag=dict( argstr="-dti", position=6, @@ -62,7 +81,10 @@ def test_DwiTool_inputs(): "nodv_flag", ], ), - environ=dict(nohash=True, usedefault=True), + environ=dict( + nohash=True, + usedefault=True, + ), famap_file=dict( argstr="-famap %s", extensions=None, @@ -88,7 +110,11 @@ def test_DwiTool_inputs(): name_source=["source_file"], name_template="%s_logdti2.nii.gz", ), - mask_file=dict(argstr="-mask %s", extensions=None, position=5), + mask_file=dict( + argstr="-mask %s", + extensions=None, + position=5, + ), mcmap_file=dict( argstr="-mcmap %s", extensions=None, @@ -147,7 +173,10 @@ def test_DwiTool_inputs(): name_template="%s_rgbmap.nii.gz", ), source_file=dict( - argstr="-source %s", extensions=None, mandatory=True, position=1 + argstr="-source %s", + extensions=None, + mandatory=True, + position=1, ), syn_file=dict( argstr="-syn %s", @@ -172,13 +201,27 @@ def test_DwiTool_inputs(): def test_DwiTool_outputs(): output_map = dict( - famap_file=dict(extensions=None), - logdti_file=dict(extensions=None), - mcmap_file=dict(extensions=None), - mdmap_file=dict(extensions=None), - rgbmap_file=dict(extensions=None), - syn_file=dict(extensions=None), - v1map_file=dict(extensions=None), + famap_file=dict( + extensions=None, + ), + logdti_file=dict( + extensions=None, + ), + mcmap_file=dict( + extensions=None, + ), + mdmap_file=dict( + extensions=None, + ), + rgbmap_file=dict( + extensions=None, + ), + syn_file=dict( + extensions=None, + ), + v1map_file=dict( + extensions=None, + ), ) outputs = DwiTool.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py index 8f8781c5e3..14093322cc 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py @@ -4,48 +4,115 @@ def test_FitAsl_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), cbf_file=dict( argstr="-cbf %s", extensions=None, name_source=["source_file"], name_template="%s_cbf.nii.gz", ), - dpld=dict(argstr="-dPLD %f"), - dt_inv2=dict(argstr="-dTinv2 %f"), - eff=dict(argstr="-eff %f"), - environ=dict(nohash=True, usedefault=True), + dpld=dict( + argstr="-dPLD %f", + ), + dt_inv2=dict( + argstr="-dTinv2 %f", + ), + eff=dict( + argstr="-eff %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), error_file=dict( argstr="-error %s", extensions=None, name_source=["source_file"], name_template="%s_error.nii.gz", ), - gm_plasma=dict(argstr="-gmL %f"), - gm_t1=dict(argstr="-gmT1 %f"), - gm_ttt=dict(argstr="-gmTTT %f"), - ir_output=dict(argstr="-IRoutput %s", extensions=None), - ir_volume=dict(argstr="-IRvolume %s", extensions=None), - ldd=dict(argstr="-LDD %f"), - m0map=dict(argstr="-m0map %s", extensions=None), - m0mape=dict(argstr="-m0mape %s", extensions=None), - mask=dict(argstr="-mask %s", extensions=None, position=2), - mul=dict(argstr="-mul %f"), - mulgm=dict(argstr="-sig"), - out=dict(argstr="-out %f"), - pasl=dict(argstr="-pasl"), - pcasl=dict(argstr="-pcasl"), - plasma_coeff=dict(argstr="-L %f"), - pld=dict(argstr="-PLD %f"), - pv0=dict(argstr="-pv0 %d"), - pv2=dict(argstr="-pv2 %d"), - pv3=dict(argstr="-pv3 %d %d %d"), - pv_threshold=dict(argstr="-pvthreshold"), - seg=dict(argstr="-seg %s", extensions=None), - segstyle=dict(argstr="-segstyle"), - sig=dict(argstr="-sig"), + gm_plasma=dict( + argstr="-gmL %f", + ), + gm_t1=dict( + argstr="-gmT1 %f", + ), + gm_ttt=dict( + argstr="-gmTTT %f", + ), + ir_output=dict( + argstr="-IRoutput %s", + extensions=None, + ), + ir_volume=dict( + argstr="-IRvolume %s", + extensions=None, + ), + ldd=dict( + argstr="-LDD %f", + ), + m0map=dict( + argstr="-m0map %s", + extensions=None, + ), + m0mape=dict( + argstr="-m0mape %s", + extensions=None, + ), + mask=dict( + argstr="-mask %s", + extensions=None, + position=2, + ), + mul=dict( + argstr="-mul %f", + ), + mulgm=dict( + argstr="-sig", + ), + out=dict( + argstr="-out %f", + ), + pasl=dict( + argstr="-pasl", + ), + pcasl=dict( + argstr="-pcasl", + ), + plasma_coeff=dict( + argstr="-L %f", + ), + pld=dict( + argstr="-PLD %f", + ), + pv0=dict( + argstr="-pv0 %d", + ), + pv2=dict( + argstr="-pv2 %d", + ), + pv3=dict( + argstr="-pv3 %d %d %d", + ), + pv_threshold=dict( + argstr="-pvthreshold", + ), + seg=dict( + argstr="-seg %s", + extensions=None, + ), + segstyle=dict( + argstr="-segstyle", + ), + sig=dict( + argstr="-sig", + ), source_file=dict( - argstr="-source %s", extensions=None, mandatory=True, position=1 + argstr="-source %s", + extensions=None, + mandatory=True, + position=1, ), syn_file=dict( argstr="-syn %s", @@ -53,13 +120,28 @@ def test_FitAsl_inputs(): name_source=["source_file"], name_template="%s_syn.nii.gz", ), - t1_art_cmp=dict(argstr="-T1a %f"), - t1map=dict(argstr="-t1map %s", extensions=None), - t_inv1=dict(argstr="-Tinv1 %f"), - t_inv2=dict(argstr="-Tinv2 %f"), - wm_plasma=dict(argstr="-wmL %f"), - wm_t1=dict(argstr="-wmT1 %f"), - wm_ttt=dict(argstr="-wmTTT %f"), + t1_art_cmp=dict( + argstr="-T1a %f", + ), + t1map=dict( + argstr="-t1map %s", + extensions=None, + ), + t_inv1=dict( + argstr="-Tinv1 %f", + ), + t_inv2=dict( + argstr="-Tinv2 %f", + ), + wm_plasma=dict( + argstr="-wmL %f", + ), + wm_t1=dict( + argstr="-wmT1 %f", + ), + wm_ttt=dict( + argstr="-wmTTT %f", + ), ) inputs = FitAsl.input_spec() @@ -70,9 +152,15 @@ def test_FitAsl_inputs(): def test_FitAsl_outputs(): output_map = dict( - cbf_file=dict(extensions=None), - error_file=dict(extensions=None), - syn_file=dict(extensions=None), + cbf_file=dict( + extensions=None, + ), + error_file=dict( + extensions=None, + ), + syn_file=dict( + extensions=None, + ), ) outputs = FitAsl.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py index f6a09ecea1..87650ffbd6 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py @@ -4,8 +4,12 @@ def test_FitDwi_inputs(): input_map = dict( - acceptance=dict(argstr="-accpetance %f"), - args=dict(argstr="%s"), + acceptance=dict( + argstr="-accpetance %f", + ), + args=dict( + argstr="%s", + ), ball_flag=dict( argstr="-ball", position=4, @@ -30,12 +34,31 @@ def test_FitDwi_inputs(): "nodv_flag", ], ), - bval_file=dict(argstr="-bval %s", extensions=None, mandatory=True, position=2), - bvec_file=dict(argstr="-bvec %s", extensions=None, mandatory=True, position=3), - cov_file=dict(argstr="-cov %s", extensions=None), - csf_t2_val=dict(argstr="-csfT2 %f"), - diso_val=dict(argstr="-diso %f"), - dpr_val=dict(argstr="-dpr %f"), + bval_file=dict( + argstr="-bval %s", + extensions=None, + mandatory=True, + position=2, + ), + bvec_file=dict( + argstr="-bvec %s", + extensions=None, + mandatory=True, + position=3, + ), + cov_file=dict( + argstr="-cov %s", + extensions=None, + ), + csf_t2_val=dict( + argstr="-csfT2 %f", + ), + diso_val=dict( + argstr="-diso %f", + ), + dpr_val=dict( + argstr="-dpr %f", + ), dti_flag=dict( argstr="-dti", position=4, @@ -48,7 +71,10 @@ def test_FitDwi_inputs(): "nodv_flag", ], ), - environ=dict(nohash=True, usedefault=True), + environ=dict( + nohash=True, + usedefault=True, + ), error_file=dict( argstr="-error %s", extensions=None, @@ -61,7 +87,10 @@ def test_FitDwi_inputs(): name_source=["source_file"], name_template="%s_famap.nii.gz", ), - gn_flag=dict(argstr="-gn", xor=["wls_flag"]), + gn_flag=dict( + argstr="-gn", + xor=["wls_flag"], + ), ivim_flag=dict( argstr="-ivim", position=4, @@ -74,9 +103,18 @@ def test_FitDwi_inputs(): "nodv_flag", ], ), - lm_vals=dict(argstr="-lm %f %f", requires=["gn_flag"]), - mask_file=dict(argstr="-mask %s", extensions=None), - maxit_val=dict(argstr="-maxit %d", requires=["gn_flag"]), + lm_vals=dict( + argstr="-lm %f %f", + requires=["gn_flag"], + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + maxit_val=dict( + argstr="-maxit %d", + requires=["gn_flag"], + ), mcmap_file=dict( argstr="-mcmap %s", extensions=None, @@ -84,14 +122,18 @@ def test_FitDwi_inputs(): name_template="%s_mcmap.nii.gz", requires=["nodv_flag"], ), - mcmaxit=dict(argstr="-mcmaxit %d"), + mcmaxit=dict( + argstr="-mcmaxit %d", + ), mcout=dict( argstr="-mcout %s", extensions=None, name_source=["source_file"], name_template="%s_mcout.txt", ), - mcsamples=dict(argstr="-mcsamples %d"), + mcsamples=dict( + argstr="-mcsamples %d", + ), mdmap_file=dict( argstr="-mdmap %s", extensions=None, @@ -140,8 +182,13 @@ def test_FitDwi_inputs(): "nod_flag", ], ), - perf_thr=dict(argstr="-perfthreshold %f"), - prior_file=dict(argstr="-prior %s", extensions=None), + perf_thr=dict( + argstr="-perfthreshold %f", + ), + prior_file=dict( + argstr="-prior %s", + extensions=None, + ), res_file=dict( argstr="-res %s", extensions=None, @@ -155,21 +202,40 @@ def test_FitDwi_inputs(): name_template="%s_rgbmap.nii.gz", requires=["dti_flag"], ), - rot_sform_flag=dict(argstr="-rotsform %d"), - slice_no=dict(argstr="-slice %d"), + rot_sform_flag=dict( + argstr="-rotsform %d", + ), + slice_no=dict( + argstr="-slice %d", + ), source_file=dict( - argstr="-source %s", extensions=None, mandatory=True, position=1 + argstr="-source %s", + extensions=None, + mandatory=True, + position=1, + ), + swls_val=dict( + argstr="-swls %f", ), - swls_val=dict(argstr="-swls %f"), syn_file=dict( argstr="-syn %s", extensions=None, name_source=["source_file"], name_template="%s_syn.nii.gz", ), - te_file=dict(argstr="-TE %s", extensions=None, xor=["te_file"]), - te_value=dict(argstr="-TE %s", extensions=None, xor=["te_file"]), - ten_type=dict(usedefault=True), + te_file=dict( + argstr="-TE %s", + extensions=None, + xor=["te_file"], + ), + te_value=dict( + argstr="-TE %s", + extensions=None, + xor=["te_file"], + ), + ten_type=dict( + usedefault=True, + ), tenmap2_file=dict( argstr="-tenmap2 %s", extensions=None, @@ -190,10 +256,19 @@ def test_FitDwi_inputs(): name_source=["source_file"], name_template="%s_v1map.nii.gz", ), - vb_flag=dict(argstr="-vb"), - voxel=dict(argstr="-voxel %d %d %d"), - wls_flag=dict(argstr="-wls", xor=["gn_flag"]), - wm_t2_val=dict(argstr="-wmT2 %f"), + vb_flag=dict( + argstr="-vb", + ), + voxel=dict( + argstr="-voxel %d %d %d", + ), + wls_flag=dict( + argstr="-wls", + xor=["gn_flag"], + ), + wm_t2_val=dict( + argstr="-wmT2 %f", + ), ) inputs = FitDwi.input_spec() @@ -204,18 +279,42 @@ def test_FitDwi_inputs(): def test_FitDwi_outputs(): output_map = dict( - error_file=dict(extensions=None), - famap_file=dict(extensions=None), - mcmap_file=dict(extensions=None), - mcout=dict(extensions=None), - mdmap_file=dict(extensions=None), - nodiff_file=dict(extensions=None), - res_file=dict(extensions=None), - rgbmap_file=dict(extensions=None), - syn_file=dict(extensions=None), - tenmap2_file=dict(extensions=None), - tenmap_file=dict(extensions=None), - v1map_file=dict(extensions=None), + error_file=dict( + extensions=None, + ), + famap_file=dict( + extensions=None, + ), + mcmap_file=dict( + extensions=None, + ), + mcout=dict( + extensions=None, + ), + mdmap_file=dict( + extensions=None, + ), + nodiff_file=dict( + extensions=None, + ), + res_file=dict( + extensions=None, + ), + rgbmap_file=dict( + extensions=None, + ), + syn_file=dict( + extensions=None, + ), + tenmap2_file=dict( + extensions=None, + ), + tenmap_file=dict( + extensions=None, + ), + v1map_file=dict( + extensions=None, + ), ) outputs = FitDwi.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py index 7547842c8b..ee82b5c900 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py @@ -4,78 +4,158 @@ def test_FitQt1_inputs(): input_map = dict( - acceptance=dict(argstr="-acceptance %f"), - args=dict(argstr="%s"), - b1map=dict(argstr="-b1map %s", extensions=None), + acceptance=dict( + argstr="-acceptance %f", + ), + args=dict( + argstr="%s", + ), + b1map=dict( + argstr="-b1map %s", + extensions=None, + ), comp_file=dict( argstr="-comp %s", extensions=None, name_source=["source_file"], name_template="%s_comp.nii.gz", ), - environ=dict(nohash=True, usedefault=True), + environ=dict( + nohash=True, + usedefault=True, + ), error_file=dict( argstr="-error %s", extensions=None, name_source=["source_file"], name_template="%s_error.nii.gz", ), - flips=dict(argstr="-flips %s", sep=" "), - flips_list=dict(argstr="-fliplist %s", extensions=None), - gn_flag=dict(argstr="-gn", position=8), - ir_flag=dict(argstr="-IR", position=13), - lm_val=dict(argstr="-lm %f %f", position=7), + flips=dict( + argstr="-flips %s", + sep=" ", + ), + flips_list=dict( + argstr="-fliplist %s", + extensions=None, + ), + gn_flag=dict( + argstr="-gn", + position=8, + ), + ir_flag=dict( + argstr="-IR", + position=13, + ), + lm_val=dict( + argstr="-lm %f %f", + position=7, + ), m0map_file=dict( argstr="-m0map %s", extensions=None, name_source=["source_file"], name_template="%s_m0map.nii.gz", ), - mask=dict(argstr="-mask %s", extensions=None, position=2), - maxit=dict(argstr="-maxit %d", position=11), + mask=dict( + argstr="-mask %s", + extensions=None, + position=2, + ), + maxit=dict( + argstr="-maxit %d", + position=11, + ), mcmap_file=dict( argstr="-mcmap %s", extensions=None, name_source=["source_file"], name_template="%s_mcmap.nii.gz", ), - mcmaxit=dict(argstr="-mcmaxit %d"), - mcout=dict(argstr="-mcout %s", extensions=None), - mcsamples=dict(argstr="-mcsamples %d"), - nb_comp=dict(argstr="-nc %d", position=6), - prior=dict(argstr="-prior %s", extensions=None, position=3), + mcmaxit=dict( + argstr="-mcmaxit %d", + ), + mcout=dict( + argstr="-mcout %s", + extensions=None, + ), + mcsamples=dict( + argstr="-mcsamples %d", + ), + nb_comp=dict( + argstr="-nc %d", + position=6, + ), + prior=dict( + argstr="-prior %s", + extensions=None, + position=3, + ), res_file=dict( argstr="-res %s", extensions=None, name_source=["source_file"], name_template="%s_res.nii.gz", ), - slice_no=dict(argstr="-slice %d", position=9), + slice_no=dict( + argstr="-slice %d", + position=9, + ), source_file=dict( - argstr="-source %s", extensions=None, mandatory=True, position=1 + argstr="-source %s", + extensions=None, + mandatory=True, + position=1, + ), + spgr=dict( + argstr="-SPGR", + ), + sr_flag=dict( + argstr="-SR", + position=12, ), - spgr=dict(argstr="-SPGR"), - sr_flag=dict(argstr="-SR", position=12), syn_file=dict( argstr="-syn %s", extensions=None, name_source=["source_file"], name_template="%s_syn.nii.gz", ), - t1_list=dict(argstr="-T1list %s", extensions=None), + t1_list=dict( + argstr="-T1list %s", + extensions=None, + ), t1map_file=dict( argstr="-t1map %s", extensions=None, name_source=["source_file"], name_template="%s_t1map.nii.gz", ), - t1max=dict(argstr="-T1max %f"), - t1min=dict(argstr="-T1min %f"), - te_value=dict(argstr="-TE %f", position=4), - tis=dict(argstr="-TIs %s", position=14, sep=" "), - tis_list=dict(argstr="-TIlist %s", extensions=None), - tr_value=dict(argstr="-TR %f", position=5), - voxel=dict(argstr="-voxel %d %d %d", position=10), + t1max=dict( + argstr="-T1max %f", + ), + t1min=dict( + argstr="-T1min %f", + ), + te_value=dict( + argstr="-TE %f", + position=4, + ), + tis=dict( + argstr="-TIs %s", + position=14, + sep=" ", + ), + tis_list=dict( + argstr="-TIlist %s", + extensions=None, + ), + tr_value=dict( + argstr="-TR %f", + position=5, + ), + voxel=dict( + argstr="-voxel %d %d %d", + position=10, + ), ) inputs = FitQt1.input_spec() @@ -86,13 +166,27 @@ def test_FitQt1_inputs(): def test_FitQt1_outputs(): output_map = dict( - comp_file=dict(extensions=None), - error_file=dict(extensions=None), - m0map_file=dict(extensions=None), - mcmap_file=dict(extensions=None), - res_file=dict(extensions=None), - syn_file=dict(extensions=None), - t1map_file=dict(extensions=None), + comp_file=dict( + extensions=None, + ), + error_file=dict( + extensions=None, + ), + m0map_file=dict( + extensions=None, + ), + mcmap_file=dict( + extensions=None, + ), + res_file=dict( + extensions=None, + ), + syn_file=dict( + extensions=None, + ), + t1map_file=dict( + extensions=None, + ), ) outputs = FitQt1.output_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py b/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py index 7b0048ae15..f12ccad480 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py @@ -3,7 +3,15 @@ def test_NiftyFitCommand_inputs(): - input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) + input_map = dict( + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) inputs = NiftyFitCommand.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py b/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py index 7aa97f4103..43f72df69f 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py @@ -4,9 +4,17 @@ def test_NiftyRegCommand_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - omp_core_val=dict(argstr="-omp %i", usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), ) inputs = NiftyRegCommand.input_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py index ae9013a82b..a4485d0e20 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py @@ -4,45 +4,108 @@ def test_RegAladin_inputs(): input_map = dict( - aff_direct_flag=dict(argstr="-affDirect"), + aff_direct_flag=dict( + argstr="-affDirect", + ), aff_file=dict( argstr="-aff %s", extensions=None, name_source=["flo_file"], name_template="%s_aff.txt", ), - args=dict(argstr="%s"), - cog_flag=dict(argstr="-cog"), - environ=dict(nohash=True, usedefault=True), - flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True), - flo_low_val=dict(argstr="-floLowThr %f"), - flo_up_val=dict(argstr="-floUpThr %f"), - fmask_file=dict(argstr="-fmask %s", extensions=None), - gpuid_val=dict(argstr="-gpuid %i"), - i_val=dict(argstr="-pi %d"), - in_aff_file=dict(argstr="-inaff %s", extensions=None), - ln_val=dict(argstr="-ln %d"), - lp_val=dict(argstr="-lp %d"), - maxit_val=dict(argstr="-maxit %d"), - nac_flag=dict(argstr="-nac"), - nosym_flag=dict(argstr="-noSym"), - omp_core_val=dict(argstr="-omp %i", usedefault=True), - platform_val=dict(argstr="-platf %i"), - ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True), - ref_low_val=dict(argstr="-refLowThr %f"), - ref_up_val=dict(argstr="-refUpThr %f"), + args=dict( + argstr="%s", + ), + cog_flag=dict( + argstr="-cog", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flo_file=dict( + argstr="-flo %s", + extensions=None, + mandatory=True, + ), + flo_low_val=dict( + argstr="-floLowThr %f", + ), + flo_up_val=dict( + argstr="-floUpThr %f", + ), + fmask_file=dict( + argstr="-fmask %s", + extensions=None, + ), + gpuid_val=dict( + argstr="-gpuid %i", + ), + i_val=dict( + argstr="-pi %d", + ), + in_aff_file=dict( + argstr="-inaff %s", + extensions=None, + ), + ln_val=dict( + argstr="-ln %d", + ), + lp_val=dict( + argstr="-lp %d", + ), + maxit_val=dict( + argstr="-maxit %d", + ), + nac_flag=dict( + argstr="-nac", + ), + nosym_flag=dict( + argstr="-noSym", + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), + platform_val=dict( + argstr="-platf %i", + ), + ref_file=dict( + argstr="-ref %s", + extensions=None, + mandatory=True, + ), + ref_low_val=dict( + argstr="-refLowThr %f", + ), + ref_up_val=dict( + argstr="-refUpThr %f", + ), res_file=dict( argstr="-res %s", extensions=None, name_source=["flo_file"], name_template="%s_res.nii.gz", ), - rig_only_flag=dict(argstr="-rigOnly"), - rmask_file=dict(argstr="-rmask %s", extensions=None), - smoo_f_val=dict(argstr="-smooF %f"), - smoo_r_val=dict(argstr="-smooR %f"), - v_val=dict(argstr="-pv %d"), - verbosity_off_flag=dict(argstr="-voff"), + rig_only_flag=dict( + argstr="-rigOnly", + ), + rmask_file=dict( + argstr="-rmask %s", + extensions=None, + ), + smoo_f_val=dict( + argstr="-smooF %f", + ), + smoo_r_val=dict( + argstr="-smooR %f", + ), + v_val=dict( + argstr="-pv %d", + ), + verbosity_off_flag=dict( + argstr="-voff", + ), ) inputs = RegAladin.input_spec() @@ -53,9 +116,13 @@ def test_RegAladin_inputs(): def test_RegAladin_outputs(): output_map = dict( - aff_file=dict(extensions=None), + aff_file=dict( + extensions=None, + ), avg_output=dict(), - res_file=dict(extensions=None), + res_file=dict( + extensions=None, + ), ) outputs = RegAladin.output_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py index 42b7e2db8c..0077b85faa 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py @@ -4,7 +4,9 @@ def test_RegAverage_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), avg_files=dict( argstr="-avg %s", position=1, @@ -83,11 +85,25 @@ def test_RegAverage_inputs(): "demean2_ref_file", ], ), - environ=dict(nohash=True, usedefault=True), - omp_core_val=dict(argstr="-omp %i", usedefault=True), - out_file=dict(argstr="%s", extensions=None, genfile=True, position=0), + environ=dict( + nohash=True, + usedefault=True, + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=0, + ), warp_files=dict( - argstr="%s", position=-1, sep=" ", xor=["avg_files", "avg_lts_files"] + argstr="%s", + position=-1, + sep=" ", + xor=["avg_files", "avg_lts_files"], ), ) inputs = RegAverage.input_spec() @@ -98,7 +114,11 @@ def test_RegAverage_inputs(): def test_RegAverage_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RegAverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py index 830b6966aa..b760ebb3d1 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py @@ -4,69 +4,181 @@ def test_RegF3D_inputs(): input_map = dict( - aff_file=dict(argstr="-aff %s", extensions=None), - amc_flag=dict(argstr="-amc"), - args=dict(argstr="%s"), - be_val=dict(argstr="-be %f"), + aff_file=dict( + argstr="-aff %s", + extensions=None, + ), + amc_flag=dict( + argstr="-amc", + ), + args=dict( + argstr="%s", + ), + be_val=dict( + argstr="-be %f", + ), cpp_file=dict( argstr="-cpp %s", extensions=None, name_source=["flo_file"], name_template="%s_cpp.nii.gz", ), - environ=dict(nohash=True, usedefault=True), - fbn2_val=dict(argstr="-fbn %d %d"), - fbn_val=dict(argstr="--fbn %d"), - flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True), - flo_smooth_val=dict(argstr="-smooF %f"), - flwth2_thr_val=dict(argstr="-fLwTh %d %f"), - flwth_thr_val=dict(argstr="--fLwTh %f"), - fmask_file=dict(argstr="-fmask %s", extensions=None), - fupth2_thr_val=dict(argstr="-fUpTh %d %f"), - fupth_thr_val=dict(argstr="--fUpTh %f"), - incpp_file=dict(argstr="-incpp %s", extensions=None), - jl_val=dict(argstr="-jl %f"), - kld2_flag=dict(argstr="-kld %d"), - kld_flag=dict(argstr="--kld"), - le_val=dict(argstr="-le %f"), - ln_val=dict(argstr="-ln %d"), - lncc2_val=dict(argstr="-lncc %d %f"), - lncc_val=dict(argstr="--lncc %f"), - lp_val=dict(argstr="-lp %d"), - maxit_val=dict(argstr="-maxit %d"), - nmi_flag=dict(argstr="--nmi"), - no_app_jl_flag=dict(argstr="-noAppJL"), - noconj_flag=dict(argstr="-noConj"), - nopy_flag=dict(argstr="-nopy"), - nox_flag=dict(argstr="-nox"), - noy_flag=dict(argstr="-noy"), - noz_flag=dict(argstr="-noz"), - omp_core_val=dict(argstr="-omp %i", usedefault=True), - pad_val=dict(argstr="-pad %f"), - pert_val=dict(argstr="-pert %d"), - rbn2_val=dict(argstr="-rbn %d %d"), - rbn_val=dict(argstr="--rbn %d"), - ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True), - ref_smooth_val=dict(argstr="-smooR %f"), + environ=dict( + nohash=True, + usedefault=True, + ), + fbn2_val=dict( + argstr="-fbn %d %d", + ), + fbn_val=dict( + argstr="--fbn %d", + ), + flo_file=dict( + argstr="-flo %s", + extensions=None, + mandatory=True, + ), + flo_smooth_val=dict( + argstr="-smooF %f", + ), + flwth2_thr_val=dict( + argstr="-fLwTh %d %f", + ), + flwth_thr_val=dict( + argstr="--fLwTh %f", + ), + fmask_file=dict( + argstr="-fmask %s", + extensions=None, + ), + fupth2_thr_val=dict( + argstr="-fUpTh %d %f", + ), + fupth_thr_val=dict( + argstr="--fUpTh %f", + ), + incpp_file=dict( + argstr="-incpp %s", + extensions=None, + ), + jl_val=dict( + argstr="-jl %f", + ), + kld2_flag=dict( + argstr="-kld %d", + ), + kld_flag=dict( + argstr="--kld", + ), + le_val=dict( + argstr="-le %f", + ), + ln_val=dict( + argstr="-ln %d", + ), + lncc2_val=dict( + argstr="-lncc %d %f", + ), + lncc_val=dict( + argstr="--lncc %f", + ), + lp_val=dict( + argstr="-lp %d", + ), + maxit_val=dict( + argstr="-maxit %d", + ), + nmi_flag=dict( + argstr="--nmi", + ), + no_app_jl_flag=dict( + argstr="-noAppJL", + ), + noconj_flag=dict( + argstr="-noConj", + ), + nopy_flag=dict( + argstr="-nopy", + ), + nox_flag=dict( + argstr="-nox", + ), + noy_flag=dict( + argstr="-noy", + ), + noz_flag=dict( + argstr="-noz", + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), + pad_val=dict( + argstr="-pad %f", + ), + pert_val=dict( + argstr="-pert %d", + ), + rbn2_val=dict( + argstr="-rbn %d %d", + ), + rbn_val=dict( + argstr="--rbn %d", + ), + ref_file=dict( + argstr="-ref %s", + extensions=None, + mandatory=True, + ), + ref_smooth_val=dict( + argstr="-smooR %f", + ), res_file=dict( argstr="-res %s", extensions=None, name_source=["flo_file"], name_template="%s_res.nii.gz", ), - rlwth2_thr_val=dict(argstr="-rLwTh %d %f"), - rlwth_thr_val=dict(argstr="--rLwTh %f"), - rmask_file=dict(argstr="-rmask %s", extensions=None), - rupth2_thr_val=dict(argstr="-rUpTh %d %f"), - rupth_thr_val=dict(argstr="--rUpTh %f"), - smooth_grad_val=dict(argstr="-smoothGrad %f"), - ssd2_flag=dict(argstr="-ssd %d"), - ssd_flag=dict(argstr="--ssd"), - sx_val=dict(argstr="-sx %f"), - sy_val=dict(argstr="-sy %f"), - sz_val=dict(argstr="-sz %f"), - vel_flag=dict(argstr="-vel"), - verbosity_off_flag=dict(argstr="-voff"), + rlwth2_thr_val=dict( + argstr="-rLwTh %d %f", + ), + rlwth_thr_val=dict( + argstr="--rLwTh %f", + ), + rmask_file=dict( + argstr="-rmask %s", + extensions=None, + ), + rupth2_thr_val=dict( + argstr="-rUpTh %d %f", + ), + rupth_thr_val=dict( + argstr="--rUpTh %f", + ), + smooth_grad_val=dict( + argstr="-smoothGrad %f", + ), + ssd2_flag=dict( + argstr="-ssd %d", + ), + ssd_flag=dict( + argstr="--ssd", + ), + sx_val=dict( + argstr="-sx %f", + ), + sy_val=dict( + argstr="-sy %f", + ), + sz_val=dict( + argstr="-sz %f", + ), + vel_flag=dict( + argstr="-vel", + ), + verbosity_off_flag=dict( + argstr="-voff", + ), ) inputs = RegF3D.input_spec() @@ -78,10 +190,18 @@ def test_RegF3D_inputs(): def test_RegF3D_outputs(): output_map = dict( avg_output=dict(), - cpp_file=dict(extensions=None), - invcpp_file=dict(extensions=None), - invres_file=dict(extensions=None), - res_file=dict(extensions=None), + cpp_file=dict( + extensions=None, + ), + invcpp_file=dict( + extensions=None, + ), + invres_file=dict( + extensions=None, + ), + res_file=dict( + extensions=None, + ), ) outputs = RegF3D.output_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py index 971fa36b44..60c8ce5c08 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py @@ -4,9 +4,17 @@ def test_RegJacobian_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - omp_core_val=dict(argstr="-omp %i", usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -14,9 +22,20 @@ def test_RegJacobian_inputs(): name_template="%s", position=-1, ), - ref_file=dict(argstr="-ref %s", extensions=None), - trans_file=dict(argstr="-trans %s", extensions=None, mandatory=True), - type=dict(argstr="-%s", position=-2, usedefault=True), + ref_file=dict( + argstr="-ref %s", + extensions=None, + ), + trans_file=dict( + argstr="-trans %s", + extensions=None, + mandatory=True, + ), + type=dict( + argstr="-%s", + position=-2, + usedefault=True, + ), ) inputs = RegJacobian.input_spec() @@ -26,7 +45,11 @@ def test_RegJacobian_inputs(): def test_RegJacobian_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RegJacobian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py index 0579447aa0..8a7e470e6c 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py @@ -4,18 +4,37 @@ def test_RegMeasure_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True), - measure_type=dict(argstr="-%s", mandatory=True), - omp_core_val=dict(argstr="-omp %i", usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flo_file=dict( + argstr="-flo %s", + extensions=None, + mandatory=True, + ), + measure_type=dict( + argstr="-%s", + mandatory=True, + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, name_source=["flo_file"], name_template="%s", ), - ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True), + ref_file=dict( + argstr="-ref %s", + extensions=None, + mandatory=True, + ), ) inputs = RegMeasure.input_spec() @@ -25,7 +44,11 @@ def test_RegMeasure_inputs(): def test_RegMeasure_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RegMeasure.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py index 24533ad9e2..6d9c9a93e5 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py @@ -4,11 +4,25 @@ def test_RegResample_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - flo_file=dict(argstr="-flo %s", extensions=None, mandatory=True), - inter_val=dict(argstr="-inter %d"), - omp_core_val=dict(argstr="-omp %i", usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flo_file=dict( + argstr="-flo %s", + extensions=None, + mandatory=True, + ), + inter_val=dict( + argstr="-inter %d", + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), out_file=dict( argstr="%s", extensions=None, @@ -16,14 +30,35 @@ def test_RegResample_inputs(): name_template="%s", position=-1, ), - pad_val=dict(argstr="-pad %f"), - psf_alg=dict(argstr="-psf_alg %d"), - psf_flag=dict(argstr="-psf"), - ref_file=dict(argstr="-ref %s", extensions=None, mandatory=True), - tensor_flag=dict(argstr="-tensor "), - trans_file=dict(argstr="-trans %s", extensions=None), - type=dict(argstr="-%s", position=-2, usedefault=True), - verbosity_off_flag=dict(argstr="-voff"), + pad_val=dict( + argstr="-pad %f", + ), + psf_alg=dict( + argstr="-psf_alg %d", + ), + psf_flag=dict( + argstr="-psf", + ), + ref_file=dict( + argstr="-ref %s", + extensions=None, + mandatory=True, + ), + tensor_flag=dict( + argstr="-tensor ", + ), + trans_file=dict( + argstr="-trans %s", + extensions=None, + ), + type=dict( + argstr="-%s", + position=-2, + usedefault=True, + ), + verbosity_off_flag=dict( + argstr="-voff", + ), ) inputs = RegResample.input_spec() @@ -33,7 +68,11 @@ def test_RegResample_inputs(): def test_RegResample_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RegResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py index 6c6afecb36..9abf8184ec 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py @@ -4,31 +4,75 @@ def test_RegTools_inputs(): input_map = dict( - add_val=dict(argstr="-add %s"), - args=dict(argstr="%s"), - bin_flag=dict(argstr="-bin"), - chg_res_val=dict(argstr="-chgres %f %f %f"), - div_val=dict(argstr="-div %s"), - down_flag=dict(argstr="-down"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True), - inter_val=dict(argstr="-interp %d"), - iso_flag=dict(argstr="-iso"), - mask_file=dict(argstr="-nan %s", extensions=None), - mul_val=dict(argstr="-mul %s"), - noscl_flag=dict(argstr="-noscl"), - omp_core_val=dict(argstr="-omp %i", usedefault=True), + add_val=dict( + argstr="-add %s", + ), + args=dict( + argstr="%s", + ), + bin_flag=dict( + argstr="-bin", + ), + chg_res_val=dict( + argstr="-chgres %f %f %f", + ), + div_val=dict( + argstr="-div %s", + ), + down_flag=dict( + argstr="-down", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + ), + inter_val=dict( + argstr="-interp %d", + ), + iso_flag=dict( + argstr="-iso", + ), + mask_file=dict( + argstr="-nan %s", + extensions=None, + ), + mul_val=dict( + argstr="-mul %s", + ), + noscl_flag=dict( + argstr="-noscl", + ), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), out_file=dict( argstr="-out %s", extensions=None, name_source=["in_file"], name_template="%s_tools.nii.gz", ), - rms_val=dict(argstr="-rms %s", extensions=None), - smo_g_val=dict(argstr="-smoG %f %f %f"), - smo_s_val=dict(argstr="-smoS %f %f %f"), - sub_val=dict(argstr="-sub %s"), - thr_val=dict(argstr="-thr %f"), + rms_val=dict( + argstr="-rms %s", + extensions=None, + ), + smo_g_val=dict( + argstr="-smoG %f %f %f", + ), + smo_s_val=dict( + argstr="-smoS %f %f %f", + ), + sub_val=dict( + argstr="-sub %s", + ), + thr_val=dict( + argstr="-thr %f", + ), ) inputs = RegTools.input_spec() @@ -38,7 +82,11 @@ def test_RegTools_inputs(): def test_RegTools_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RegTools.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py index 572e8bdad3..b9ee8bf2af 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py @@ -21,7 +21,9 @@ def test_RegTransform_inputs(): "flirt_2_nr_input", ], ), - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), comp_input=dict( argstr="-comp %s", extensions=None, @@ -40,7 +42,11 @@ def test_RegTransform_inputs(): "flirt_2_nr_input", ], ), - comp_input2=dict(argstr="%s", extensions=None, position=-2), + comp_input2=dict( + argstr="%s", + extensions=None, + position=-2, + ), def_input=dict( argstr="-def %s", extensions=None, @@ -75,7 +81,10 @@ def test_RegTransform_inputs(): "flirt_2_nr_input", ], ), - environ=dict(nohash=True, usedefault=True), + environ=dict( + nohash=True, + usedefault=True, + ), flirt_2_nr_input=dict( argstr="-flirtAff2NR %s %s %s", position=-2, @@ -175,11 +184,26 @@ def test_RegTransform_inputs(): "flirt_2_nr_input", ], ), - omp_core_val=dict(argstr="-omp %i", usedefault=True), - out_file=dict(argstr="%s", extensions=None, genfile=True, position=-1), - ref1_file=dict(argstr="-ref %s", extensions=None, position=0), + omp_core_val=dict( + argstr="-omp %i", + usedefault=True, + ), + out_file=dict( + argstr="%s", + extensions=None, + genfile=True, + position=-1, + ), + ref1_file=dict( + argstr="-ref %s", + extensions=None, + position=0, + ), ref2_file=dict( - argstr="-ref2 %s", extensions=None, position=1, requires=["ref1_file"] + argstr="-ref2 %s", + extensions=None, + position=1, + requires=["ref1_file"], ), upd_s_form_input=dict( argstr="-updSform %s", @@ -200,7 +224,10 @@ def test_RegTransform_inputs(): ], ), upd_s_form_input2=dict( - argstr="%s", extensions=None, position=-2, requires=["upd_s_form_input"] + argstr="%s", + extensions=None, + position=-2, + requires=["upd_s_form_input"], ), ) inputs = RegTransform.input_spec() @@ -211,7 +238,11 @@ def test_RegTransform_inputs(): def test_RegTransform_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = RegTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py index eff9e4de49..4c0a962a21 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py @@ -4,9 +4,19 @@ def test_BinaryMaths_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), operand_file=dict( argstr="%s", extensions=None, @@ -26,7 +36,11 @@ def test_BinaryMaths_inputs(): position=5, xor=["operand_file", "operand_str"], ), - operation=dict(argstr="-%s", mandatory=True, position=4), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, + ), out_file=dict( argstr="%s", extensions=None, @@ -34,7 +48,10 @@ def test_BinaryMaths_inputs(): name_template="%s", position=-2, ), - output_datatype=dict(argstr="-odt %s", position=-3), + output_datatype=dict( + argstr="-odt %s", + position=-3, + ), ) inputs = BinaryMaths.input_spec() @@ -44,7 +61,11 @@ def test_BinaryMaths_inputs(): def test_BinaryMaths_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BinaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py index bceba25b22..440cb92bbc 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py @@ -4,11 +4,29 @@ def test_BinaryMathsInteger_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - operand_value=dict(argstr="%d", mandatory=True, position=5), - operation=dict(argstr="-%s", mandatory=True, position=4), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + operand_value=dict( + argstr="%d", + mandatory=True, + position=5, + ), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, + ), out_file=dict( argstr="%s", extensions=None, @@ -16,7 +34,10 @@ def test_BinaryMathsInteger_inputs(): name_template="%s", position=-2, ), - output_datatype=dict(argstr="-odt %s", position=-3), + output_datatype=dict( + argstr="-odt %s", + position=-3, + ), ) inputs = BinaryMathsInteger.input_spec() @@ -26,7 +47,11 @@ def test_BinaryMathsInteger_inputs(): def test_BinaryMathsInteger_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = BinaryMathsInteger.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py index a01dd5ba16..5a5ac7298b 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py @@ -4,11 +4,28 @@ def test_BinaryStats_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - larger_voxel=dict(argstr="-t %f", position=-3), - mask_file=dict(argstr="-m %s", extensions=None, position=-2), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + larger_voxel=dict( + argstr="-t %f", + position=-3, + ), + mask_file=dict( + argstr="-m %s", + extensions=None, + position=-2, + ), operand_file=dict( argstr="%s", extensions=None, @@ -17,9 +34,16 @@ def test_BinaryStats_inputs(): xor=["operand_value"], ), operand_value=dict( - argstr="%.8f", mandatory=True, position=5, xor=["operand_file"] + argstr="%.8f", + mandatory=True, + position=5, + xor=["operand_file"], + ), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, ), - operation=dict(argstr="-%s", mandatory=True, position=4), ) inputs = BinaryStats.input_spec() @@ -29,7 +53,9 @@ def test_BinaryStats_inputs(): def test_BinaryStats_outputs(): - output_map = dict(output=dict()) + output_map = dict( + output=dict(), + ) outputs = BinaryStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py index 40bf7f7926..e0943be61e 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py @@ -4,13 +4,38 @@ def test_CalcTopNCC_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-target %s", extensions=None, mandatory=True, position=1), - in_templates=dict(argstr="%s", mandatory=True, position=3), - mask_file=dict(argstr="-mask %s", extensions=None), - num_templates=dict(argstr="-templates %s", mandatory=True, position=2), - top_templates=dict(argstr="-n %s", mandatory=True, position=4), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-target %s", + extensions=None, + mandatory=True, + position=1, + ), + in_templates=dict( + argstr="%s", + mandatory=True, + position=3, + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + num_templates=dict( + argstr="-templates %s", + mandatory=True, + position=2, + ), + top_templates=dict( + argstr="-n %s", + mandatory=True, + position=4, + ), ) inputs = CalcTopNCC.input_spec() @@ -20,7 +45,9 @@ def test_CalcTopNCC_inputs(): def test_CalcTopNCC_outputs(): - output_map = dict(out_files=dict()) + output_map = dict( + out_files=dict(), + ) outputs = CalcTopNCC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_EM.py b/nipype/interfaces/niftyseg/tests/test_auto_EM.py index 977ae7ce44..a0394b174e 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_EM.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_EM.py @@ -4,17 +4,46 @@ def test_EM_inputs(): input_map = dict( - args=dict(argstr="%s"), - bc_order_val=dict(argstr="-bc_order %s", usedefault=True), - bc_thresh_val=dict(argstr="-bc_thresh %s", usedefault=True), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=4), - mask_file=dict(argstr="-mask %s", extensions=None), - max_iter=dict(argstr="-max_iter %s", usedefault=True), - min_iter=dict(argstr="-min_iter %s", usedefault=True), - mrf_beta_val=dict(argstr="-mrf_beta %s"), + args=dict( + argstr="%s", + ), + bc_order_val=dict( + argstr="-bc_order %s", + usedefault=True, + ), + bc_thresh_val=dict( + argstr="-bc_thresh %s", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + position=4, + ), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + max_iter=dict( + argstr="-max_iter %s", + usedefault=True, + ), + min_iter=dict( + argstr="-min_iter %s", + usedefault=True, + ), + mrf_beta_val=dict( + argstr="-mrf_beta %s", + ), no_prior=dict( - argstr="-nopriors %s", mandatory=True, xor=["prior_4D", "priors"] + argstr="-nopriors %s", + mandatory=True, + xor=["prior_4D", "priors"], ), out_bc_file=dict( argstr="-bc_out %s", @@ -34,16 +63,26 @@ def test_EM_inputs(): name_source=["in_file"], name_template="%s_outlier_em.nii.gz", ), - outlier_val=dict(argstr="-outlier %s %s"), + outlier_val=dict( + argstr="-outlier %s %s", + ), prior_4D=dict( argstr="-prior4D %s", extensions=None, mandatory=True, xor=["no_prior", "priors"], ), - priors=dict(argstr="%s", mandatory=True, xor=["no_prior", "prior_4D"]), - reg_val=dict(argstr="-reg %s"), - relax_priors=dict(argstr="-rf %s %s"), + priors=dict( + argstr="%s", + mandatory=True, + xor=["no_prior", "prior_4D"], + ), + reg_val=dict( + argstr="-reg %s", + ), + relax_priors=dict( + argstr="-rf %s %s", + ), ) inputs = EM.input_spec() @@ -54,9 +93,15 @@ def test_EM_inputs(): def test_EM_outputs(): output_map = dict( - out_bc_file=dict(extensions=None), - out_file=dict(extensions=None), - out_outlier_file=dict(extensions=None), + out_bc_file=dict( + extensions=None, + ), + out_file=dict( + extensions=None, + ), + out_outlier_file=dict( + extensions=None, + ), ) outputs = EM.output_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py index 9d694c103b..9e1b06a892 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py @@ -4,17 +4,47 @@ def test_FillLesions_inputs(): input_map = dict( - args=dict(argstr="%s"), - bin_mask=dict(argstr="-mask %s", extensions=None), - cwf=dict(argstr="-cwf %f"), - debug=dict(argstr="-debug"), - environ=dict(nohash=True, usedefault=True), - in_dilation=dict(argstr="-dil %d"), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1), - lesion_mask=dict(argstr="-l %s", extensions=None, mandatory=True, position=2), - match=dict(argstr="-match %f"), - other=dict(argstr="-other"), - out_datatype=dict(argstr="-odt %s"), + args=dict( + argstr="%s", + ), + bin_mask=dict( + argstr="-mask %s", + extensions=None, + ), + cwf=dict( + argstr="-cwf %f", + ), + debug=dict( + argstr="-debug", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_dilation=dict( + argstr="-dil %d", + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=1, + ), + lesion_mask=dict( + argstr="-l %s", + extensions=None, + mandatory=True, + position=2, + ), + match=dict( + argstr="-match %f", + ), + other=dict( + argstr="-other", + ), + out_datatype=dict( + argstr="-odt %s", + ), out_file=dict( argstr="-o %s", extensions=None, @@ -22,11 +52,21 @@ def test_FillLesions_inputs(): name_template="%s_lesions_filled.nii.gz", position=3, ), - search=dict(argstr="-search %f"), - size=dict(argstr="-size %d"), - smooth=dict(argstr="-smo %f"), - use_2d=dict(argstr="-2D"), - verbose=dict(argstr="-v"), + search=dict( + argstr="-search %f", + ), + size=dict( + argstr="-size %d", + ), + smooth=dict( + argstr="-smo %f", + ), + use_2d=dict( + argstr="-2D", + ), + verbose=dict( + argstr="-v", + ), ) inputs = FillLesions.input_spec() @@ -36,7 +76,11 @@ def test_FillLesions_inputs(): def test_FillLesions_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = FillLesions.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py index 82c676a5f0..dc4590a15b 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py @@ -4,33 +4,79 @@ def test_LabelFusion_inputs(): input_map = dict( - args=dict(argstr="%s"), - classifier_type=dict(argstr="-%s", mandatory=True, position=2), - conv=dict(argstr="-conv %f"), + args=dict( + argstr="%s", + ), + classifier_type=dict( + argstr="-%s", + mandatory=True, + position=2, + ), + conv=dict( + argstr="-conv %f", + ), dilation_roi=dict(), - environ=dict(nohash=True, usedefault=True), - file_to_seg=dict(extensions=None, mandatory=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=1), + environ=dict( + nohash=True, + usedefault=True, + ), + file_to_seg=dict( + extensions=None, + mandatory=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + position=1, + ), kernel_size=dict(), - mask_file=dict(argstr="-mask %s", extensions=None), - max_iter=dict(argstr="-max_iter %d"), - mrf_value=dict(argstr="-MRF_beta %f"), + mask_file=dict( + argstr="-mask %s", + extensions=None, + ), + max_iter=dict( + argstr="-max_iter %d", + ), + mrf_value=dict( + argstr="-MRF_beta %f", + ), out_file=dict( argstr="-out %s", extensions=None, name_source=["in_file"], name_template="%s", ), - prob_flag=dict(argstr="-outProb"), - prob_update_flag=dict(argstr="-prop_update"), - proportion=dict(argstr="-prop %s"), - set_pq=dict(argstr="-setPQ %f %f"), - sm_ranking=dict(argstr="-%s", position=3, usedefault=True), - template_file=dict(extensions=None), + prob_flag=dict( + argstr="-outProb", + ), + prob_update_flag=dict( + argstr="-prop_update", + ), + proportion=dict( + argstr="-prop %s", + ), + set_pq=dict( + argstr="-setPQ %f %f", + ), + sm_ranking=dict( + argstr="-%s", + position=3, + usedefault=True, + ), + template_file=dict( + extensions=None, + ), template_num=dict(), - unc=dict(argstr="-unc"), - unc_thresh=dict(argstr="-uncthres %f"), - verbose=dict(argstr="-v %s"), + unc=dict( + argstr="-unc", + ), + unc_thresh=dict( + argstr="-uncthres %f", + ), + verbose=dict( + argstr="-v %s", + ), ) inputs = LabelFusion.input_spec() @@ -40,7 +86,11 @@ def test_LabelFusion_inputs(): def test_LabelFusion_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = LabelFusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py index b0a32ba7ab..963ddf96f8 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py @@ -4,9 +4,19 @@ def test_MathsCommand_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), out_file=dict( argstr="%s", extensions=None, @@ -14,7 +24,10 @@ def test_MathsCommand_inputs(): name_template="%s", position=-2, ), - output_datatype=dict(argstr="-odt %s", position=-3), + output_datatype=dict( + argstr="-odt %s", + position=-3, + ), ) inputs = MathsCommand.input_spec() @@ -24,7 +37,11 @@ def test_MathsCommand_inputs(): def test_MathsCommand_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MathsCommand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_Merge.py b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py index 7e91b79484..de8dc903e6 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_Merge.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py @@ -4,11 +4,27 @@ def test_Merge_inputs(): input_map = dict( - args=dict(argstr="%s"), - dimension=dict(mandatory=True), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - merge_files=dict(argstr="%s", mandatory=True, position=4), + args=dict( + argstr="%s", + ), + dimension=dict( + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + merge_files=dict( + argstr="%s", + mandatory=True, + position=4, + ), out_file=dict( argstr="%s", extensions=None, @@ -16,7 +32,10 @@ def test_Merge_inputs(): name_template="%s", position=-2, ), - output_datatype=dict(argstr="-odt %s", position=-3), + output_datatype=dict( + argstr="-odt %s", + position=-3, + ), ) inputs = Merge.input_spec() @@ -26,7 +45,11 @@ def test_Merge_inputs(): def test_Merge_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py index 4db7d817f6..37a6ee059c 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py @@ -3,7 +3,15 @@ def test_NiftySegCommand_inputs(): - input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) + input_map = dict( + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) inputs = NiftySegCommand.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py index 067349ac24..c5b9dba115 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py @@ -4,16 +4,40 @@ def test_PatchMatch_inputs(): input_map = dict( - args=dict(argstr="%s"), - cs_size=dict(argstr="-cs %i"), + args=dict( + argstr="%s", + ), + cs_size=dict( + argstr="-cs %i", + ), database_file=dict( - argstr="-db %s", extensions=None, mandatory=True, position=3 + argstr="-db %s", + extensions=None, + mandatory=True, + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + position=1, + ), + it_num=dict( + argstr="-it %i", + ), + mask_file=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + position=2, + ), + match_num=dict( + argstr="-match %i", ), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True, position=1), - it_num=dict(argstr="-it %i"), - mask_file=dict(argstr="-m %s", extensions=None, mandatory=True, position=2), - match_num=dict(argstr="-match %i"), out_file=dict( argstr="-o %s", extensions=None, @@ -21,8 +45,12 @@ def test_PatchMatch_inputs(): name_template="%s_pm.nii.gz", position=4, ), - patch_size=dict(argstr="-size %i"), - pm_num=dict(argstr="-pm %i"), + patch_size=dict( + argstr="-size %i", + ), + pm_num=dict( + argstr="-pm %i", + ), ) inputs = PatchMatch.input_spec() @@ -32,7 +60,11 @@ def test_PatchMatch_inputs(): def test_PatchMatch_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = PatchMatch.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py index e710f3dd9b..6b173663a9 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py @@ -4,11 +4,28 @@ def test_StatsCommand_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - larger_voxel=dict(argstr="-t %f", position=-3), - mask_file=dict(argstr="-m %s", extensions=None, position=-2), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + larger_voxel=dict( + argstr="-t %f", + position=-3, + ), + mask_file=dict( + argstr="-m %s", + extensions=None, + position=-2, + ), ) inputs = StatsCommand.input_spec() @@ -18,7 +35,9 @@ def test_StatsCommand_inputs(): def test_StatsCommand_outputs(): - output_map = dict(output=dict()) + output_map = dict( + output=dict(), + ) outputs = StatsCommand.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py index b82d0cb9c1..ef1d4c401f 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py @@ -4,9 +4,19 @@ def test_TupleMaths_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), operand_file1=dict( argstr="%s", extensions=None, @@ -22,12 +32,22 @@ def test_TupleMaths_inputs(): xor=["operand_value2"], ), operand_value1=dict( - argstr="%.8f", mandatory=True, position=5, xor=["operand_file1"] + argstr="%.8f", + mandatory=True, + position=5, + xor=["operand_file1"], ), operand_value2=dict( - argstr="%.8f", mandatory=True, position=6, xor=["operand_file2"] + argstr="%.8f", + mandatory=True, + position=6, + xor=["operand_file2"], + ), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, ), - operation=dict(argstr="-%s", mandatory=True, position=4), out_file=dict( argstr="%s", extensions=None, @@ -35,7 +55,10 @@ def test_TupleMaths_inputs(): name_template="%s", position=-2, ), - output_datatype=dict(argstr="-odt %s", position=-3), + output_datatype=dict( + argstr="-odt %s", + position=-3, + ), ) inputs = TupleMaths.input_spec() @@ -45,7 +68,11 @@ def test_TupleMaths_inputs(): def test_TupleMaths_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = TupleMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py index 3cabcf15a5..f8189f0f84 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py @@ -4,10 +4,24 @@ def test_UnaryMaths_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - operation=dict(argstr="-%s", mandatory=True, position=4), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, + ), out_file=dict( argstr="%s", extensions=None, @@ -15,7 +29,10 @@ def test_UnaryMaths_inputs(): name_template="%s", position=-2, ), - output_datatype=dict(argstr="-odt %s", position=-3), + output_datatype=dict( + argstr="-odt %s", + position=-3, + ), ) inputs = UnaryMaths.input_spec() @@ -25,7 +42,11 @@ def test_UnaryMaths_inputs(): def test_UnaryMaths_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = UnaryMaths.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py index bf742096aa..117ab819b6 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py @@ -4,12 +4,33 @@ def test_UnaryStats_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), - larger_voxel=dict(argstr="-t %f", position=-3), - mask_file=dict(argstr="-m %s", extensions=None, position=-2), - operation=dict(argstr="-%s", mandatory=True, position=4), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), + larger_voxel=dict( + argstr="-t %f", + position=-3, + ), + mask_file=dict( + argstr="-m %s", + extensions=None, + position=-2, + ), + operation=dict( + argstr="-%s", + mandatory=True, + position=4, + ), ) inputs = UnaryStats.input_spec() @@ -19,7 +40,9 @@ def test_UnaryStats_inputs(): def test_UnaryStats_outputs(): - output_map = dict(output=dict()) + output_map = dict( + output=dict(), + ) outputs = UnaryStats.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py index 0dd1fbe7fa..db1b784a03 100644 --- a/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py +++ b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py @@ -7,8 +7,13 @@ def test_ComputeMask_inputs(): M=dict(), cc=dict(), m=dict(), - mean_volume=dict(extensions=None, mandatory=True), - reference_volume=dict(extensions=None), + mean_volume=dict( + extensions=None, + mandatory=True, + ), + reference_volume=dict( + extensions=None, + ), ) inputs = ComputeMask.input_spec() @@ -18,7 +23,11 @@ def test_ComputeMask_inputs(): def test_ComputeMask_outputs(): - output_map = dict(brain_mask=dict(extensions=None)) + output_map = dict( + brain_mask=dict( + extensions=None, + ), + ) outputs = ComputeMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py index 53369383c7..172f2205fd 100644 --- a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py @@ -4,15 +4,35 @@ def test_EstimateContrast_inputs(): input_map = dict( - axis=dict(mandatory=True), - beta=dict(extensions=None, mandatory=True), - constants=dict(mandatory=True), - contrasts=dict(mandatory=True), - dof=dict(mandatory=True), - mask=dict(extensions=None), - nvbeta=dict(mandatory=True), - reg_names=dict(mandatory=True), - s2=dict(extensions=None, mandatory=True), + axis=dict( + mandatory=True, + ), + beta=dict( + extensions=None, + mandatory=True, + ), + constants=dict( + mandatory=True, + ), + contrasts=dict( + mandatory=True, + ), + dof=dict( + mandatory=True, + ), + mask=dict( + extensions=None, + ), + nvbeta=dict( + mandatory=True, + ), + reg_names=dict( + mandatory=True, + ), + s2=dict( + extensions=None, + mandatory=True, + ), ) inputs = EstimateContrast.input_spec() @@ -22,7 +42,11 @@ def test_EstimateContrast_inputs(): def test_EstimateContrast_outputs(): - output_map = dict(p_maps=dict(), stat_maps=dict(), z_maps=dict()) + output_map = dict( + p_maps=dict(), + stat_maps=dict(), + z_maps=dict(), + ) outputs = EstimateContrast.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py index fc8bc8852e..f04081214c 100644 --- a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py +++ b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py @@ -4,16 +4,36 @@ def test_FitGLM_inputs(): input_map = dict( - TR=dict(mandatory=True), - drift_model=dict(usedefault=True), - hrf_model=dict(usedefault=True), - mask=dict(extensions=None), - method=dict(usedefault=True), - model=dict(usedefault=True), - normalize_design_matrix=dict(usedefault=True), - plot_design_matrix=dict(usedefault=True), - save_residuals=dict(usedefault=True), - session_info=dict(mandatory=True), + TR=dict( + mandatory=True, + ), + drift_model=dict( + usedefault=True, + ), + hrf_model=dict( + usedefault=True, + ), + mask=dict( + extensions=None, + ), + method=dict( + usedefault=True, + ), + model=dict( + usedefault=True, + ), + normalize_design_matrix=dict( + usedefault=True, + ), + plot_design_matrix=dict( + usedefault=True, + ), + save_residuals=dict( + usedefault=True, + ), + session_info=dict( + mandatory=True, + ), ) inputs = FitGLM.input_spec() @@ -24,15 +44,23 @@ def test_FitGLM_inputs(): def test_FitGLM_outputs(): output_map = dict( - a=dict(extensions=None), + a=dict( + extensions=None, + ), axis=dict(), - beta=dict(extensions=None), + beta=dict( + extensions=None, + ), constants=dict(), dof=dict(), nvbeta=dict(), reg_names=dict(), - residuals=dict(extensions=None), - s2=dict(extensions=None), + residuals=dict( + extensions=None, + ), + s2=dict( + extensions=None, + ), ) outputs = FitGLM.output_spec() diff --git a/nipype/interfaces/nipy/tests/test_auto_Similarity.py b/nipype/interfaces/nipy/tests/test_auto_Similarity.py index 61f50389db..81e8622078 100644 --- a/nipype/interfaces/nipy/tests/test_auto_Similarity.py +++ b/nipype/interfaces/nipy/tests/test_auto_Similarity.py @@ -4,11 +4,23 @@ def test_Similarity_inputs(): input_map = dict( - mask1=dict(extensions=None), - mask2=dict(extensions=None), - metric=dict(usedefault=True), - volume1=dict(extensions=None, mandatory=True), - volume2=dict(extensions=None, mandatory=True), + mask1=dict( + extensions=None, + ), + mask2=dict( + extensions=None, + ), + metric=dict( + usedefault=True, + ), + volume1=dict( + extensions=None, + mandatory=True, + ), + volume2=dict( + extensions=None, + mandatory=True, + ), ) inputs = Similarity.input_spec() @@ -18,7 +30,9 @@ def test_Similarity_inputs(): def test_Similarity_outputs(): - output_map = dict(similarity=dict()) + output_map = dict( + similarity=dict(), + ) outputs = Similarity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py b/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py index 1f063e24e1..fd65848f72 100644 --- a/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py +++ b/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py @@ -4,10 +4,17 @@ def test_SpaceTimeRealigner_inputs(): input_map = dict( - in_file=dict(mandatory=True, min_ver="0.4.0.dev"), - slice_info=dict(requires=["slice_times"]), + in_file=dict( + mandatory=True, + min_ver="0.4.0.dev", + ), + slice_info=dict( + requires=["slice_times"], + ), slice_times=dict(), - tr=dict(requires=["slice_times"]), + tr=dict( + requires=["slice_times"], + ), ) inputs = SpaceTimeRealigner.input_spec() @@ -17,7 +24,10 @@ def test_SpaceTimeRealigner_inputs(): def test_SpaceTimeRealigner_outputs(): - output_map = dict(out_file=dict(), par_file=dict()) + output_map = dict( + out_file=dict(), + par_file=dict(), + ) outputs = SpaceTimeRealigner.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nipy/tests/test_auto_Trim.py b/nipype/interfaces/nipy/tests/test_auto_Trim.py index 5855e3b27a..c4ecee3007 100644 --- a/nipype/interfaces/nipy/tests/test_auto_Trim.py +++ b/nipype/interfaces/nipy/tests/test_auto_Trim.py @@ -4,11 +4,22 @@ def test_Trim_inputs(): input_map = dict( - begin_index=dict(usedefault=True), - end_index=dict(usedefault=True), - in_file=dict(extensions=None, mandatory=True), - out_file=dict(extensions=None), - suffix=dict(usedefault=True), + begin_index=dict( + usedefault=True, + ), + end_index=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_file=dict( + extensions=None, + ), + suffix=dict( + usedefault=True, + ), ) inputs = Trim.input_spec() @@ -18,7 +29,11 @@ def test_Trim_inputs(): def test_Trim_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Trim.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py index 56c04ae43e..8c70d059ab 100644 --- a/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py +++ b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py @@ -4,15 +4,30 @@ def test_CoherenceAnalyzer_inputs(): input_map = dict( - NFFT=dict(usedefault=True), + NFFT=dict( + usedefault=True, + ), TR=dict(), - figure_type=dict(usedefault=True), - frequency_range=dict(usedefault=True), + figure_type=dict( + usedefault=True, + ), + frequency_range=dict( + usedefault=True, + ), in_TS=dict(), - in_file=dict(extensions=None, requires=("TR",)), - n_overlap=dict(usedefault=True), - output_csv_file=dict(extensions=None), - output_figure_file=dict(extensions=None), + in_file=dict( + extensions=None, + requires=("TR",), + ), + n_overlap=dict( + usedefault=True, + ), + output_csv_file=dict( + extensions=None, + ), + output_figure_file=dict( + extensions=None, + ), ) inputs = CoherenceAnalyzer.input_spec() @@ -24,11 +39,19 @@ def test_CoherenceAnalyzer_inputs(): def test_CoherenceAnalyzer_outputs(): output_map = dict( coherence_array=dict(), - coherence_csv=dict(extensions=None), - coherence_fig=dict(extensions=None), + coherence_csv=dict( + extensions=None, + ), + coherence_fig=dict( + extensions=None, + ), timedelay_array=dict(), - timedelay_csv=dict(extensions=None), - timedelay_fig=dict(extensions=None), + timedelay_csv=dict( + extensions=None, + ), + timedelay_fig=dict( + extensions=None, + ), ) outputs = CoherenceAnalyzer.output_spec() diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py index e222b58c36..9098ee2640 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py @@ -4,16 +4,45 @@ def test_BRAINSPosteriorToContinuousClass_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputBasalGmVolume=dict(argstr="--inputBasalGmVolume %s", extensions=None), - inputCrblGmVolume=dict(argstr="--inputCrblGmVolume %s", extensions=None), - inputCrblWmVolume=dict(argstr="--inputCrblWmVolume %s", extensions=None), - inputCsfVolume=dict(argstr="--inputCsfVolume %s", extensions=None), - inputSurfaceGmVolume=dict(argstr="--inputSurfaceGmVolume %s", extensions=None), - inputVbVolume=dict(argstr="--inputVbVolume %s", extensions=None), - inputWhiteVolume=dict(argstr="--inputWhiteVolume %s", extensions=None), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBasalGmVolume=dict( + argstr="--inputBasalGmVolume %s", + extensions=None, + ), + inputCrblGmVolume=dict( + argstr="--inputCrblGmVolume %s", + extensions=None, + ), + inputCrblWmVolume=dict( + argstr="--inputCrblWmVolume %s", + extensions=None, + ), + inputCsfVolume=dict( + argstr="--inputCsfVolume %s", + extensions=None, + ), + inputSurfaceGmVolume=dict( + argstr="--inputSurfaceGmVolume %s", + extensions=None, + ), + inputVbVolume=dict( + argstr="--inputVbVolume %s", + extensions=None, + ), + inputWhiteVolume=dict( + argstr="--inputWhiteVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = BRAINSPosteriorToContinuousClass.input_spec() @@ -23,7 +52,11 @@ def test_BRAINSPosteriorToContinuousClass_inputs(): def test_BRAINSPosteriorToContinuousClass_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSPosteriorToContinuousClass.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py index bfdbe33f46..195ebdcad0 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py @@ -4,19 +4,53 @@ def test_BRAINSTalairach_inputs(): input_map = dict( - AC=dict(argstr="--AC %s", sep=","), - ACisIndex=dict(argstr="--ACisIndex "), - IRP=dict(argstr="--IRP %s", sep=","), - IRPisIndex=dict(argstr="--IRPisIndex "), - PC=dict(argstr="--PC %s", sep=","), - PCisIndex=dict(argstr="--PCisIndex "), - SLA=dict(argstr="--SLA %s", sep=","), - SLAisIndex=dict(argstr="--SLAisIndex "), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - outputBox=dict(argstr="--outputBox %s", hash_files=False), - outputGrid=dict(argstr="--outputGrid %s", hash_files=False), + AC=dict( + argstr="--AC %s", + sep=",", + ), + ACisIndex=dict( + argstr="--ACisIndex ", + ), + IRP=dict( + argstr="--IRP %s", + sep=",", + ), + IRPisIndex=dict( + argstr="--IRPisIndex ", + ), + PC=dict( + argstr="--PC %s", + sep=",", + ), + PCisIndex=dict( + argstr="--PCisIndex ", + ), + SLA=dict( + argstr="--SLA %s", + sep=",", + ), + SLAisIndex=dict( + argstr="--SLAisIndex ", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputBox=dict( + argstr="--outputBox %s", + hash_files=False, + ), + outputGrid=dict( + argstr="--outputGrid %s", + hash_files=False, + ), ) inputs = BRAINSTalairach.input_spec() @@ -26,7 +60,14 @@ def test_BRAINSTalairach_inputs(): def test_BRAINSTalairach_outputs(): - output_map = dict(outputBox=dict(extensions=None), outputGrid=dict(extensions=None)) + output_map = dict( + outputBox=dict( + extensions=None, + ), + outputGrid=dict( + extensions=None, + ), + ) outputs = BRAINSTalairach.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py index 35ec58c6e1..2470e42f47 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py @@ -4,14 +4,35 @@ def test_BRAINSTalairachMask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - expand=dict(argstr="--expand "), - hemisphereMode=dict(argstr="--hemisphereMode %s"), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - talairachBox=dict(argstr="--talairachBox %s", extensions=None), - talairachParameters=dict(argstr="--talairachParameters %s", extensions=None), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expand=dict( + argstr="--expand ", + ), + hemisphereMode=dict( + argstr="--hemisphereMode %s", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + talairachBox=dict( + argstr="--talairachBox %s", + extensions=None, + ), + talairachParameters=dict( + argstr="--talairachParameters %s", + extensions=None, + ), ) inputs = BRAINSTalairachMask.input_spec() @@ -21,7 +42,11 @@ def test_BRAINSTalairachMask_inputs(): def test_BRAINSTalairachMask_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSTalairachMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py index efce068e3b..218c67a4b0 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py @@ -4,19 +4,43 @@ def test_GenerateEdgeMapImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputMRVolumes=dict(argstr="--inputMRVolumes %s..."), - inputMask=dict(argstr="--inputMask %s", extensions=None), - lowerPercentileMatching=dict(argstr="--lowerPercentileMatching %f"), - maximumOutputRange=dict(argstr="--maximumOutputRange %d"), - minimumOutputRange=dict(argstr="--minimumOutputRange %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputEdgeMap=dict(argstr="--outputEdgeMap %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMRVolumes=dict( + argstr="--inputMRVolumes %s...", + ), + inputMask=dict( + argstr="--inputMask %s", + extensions=None, + ), + lowerPercentileMatching=dict( + argstr="--lowerPercentileMatching %f", + ), + maximumOutputRange=dict( + argstr="--maximumOutputRange %d", + ), + minimumOutputRange=dict( + argstr="--minimumOutputRange %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputEdgeMap=dict( + argstr="--outputEdgeMap %s", + hash_files=False, + ), outputMaximumGradientImage=dict( - argstr="--outputMaximumGradientImage %s", hash_files=False + argstr="--outputMaximumGradientImage %s", + hash_files=False, + ), + upperPercentileMatching=dict( + argstr="--upperPercentileMatching %f", ), - upperPercentileMatching=dict(argstr="--upperPercentileMatching %f"), ) inputs = GenerateEdgeMapImage.input_spec() @@ -27,8 +51,12 @@ def test_GenerateEdgeMapImage_inputs(): def test_GenerateEdgeMapImage_outputs(): output_map = dict( - outputEdgeMap=dict(extensions=None), - outputMaximumGradientImage=dict(extensions=None), + outputEdgeMap=dict( + extensions=None, + ), + outputMaximumGradientImage=dict( + extensions=None, + ), ) outputs = GenerateEdgeMapImage.output_spec() diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py index ff38a9dc96..e68b03dcf9 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py @@ -4,12 +4,27 @@ def test_GeneratePurePlugMask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputImageModalities=dict(argstr="--inputImageModalities %s..."), - numberOfSubSamples=dict(argstr="--numberOfSubSamples %s", sep=","), - outputMaskFile=dict(argstr="--outputMaskFile %s", hash_files=False), - threshold=dict(argstr="--threshold %f"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputImageModalities=dict( + argstr="--inputImageModalities %s...", + ), + numberOfSubSamples=dict( + argstr="--numberOfSubSamples %s", + sep=",", + ), + outputMaskFile=dict( + argstr="--outputMaskFile %s", + hash_files=False, + ), + threshold=dict( + argstr="--threshold %f", + ), ) inputs = GeneratePurePlugMask.input_spec() @@ -19,7 +34,11 @@ def test_GeneratePurePlugMask_inputs(): def test_GeneratePurePlugMask_outputs(): - output_map = dict(outputMaskFile=dict(extensions=None)) + output_map = dict( + outputMaskFile=dict( + extensions=None, + ), + ) outputs = GeneratePurePlugMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py index bc49e87966..110aec4891 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py @@ -4,20 +4,48 @@ def test_HistogramMatchingFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - histogramAlgorithm=dict(argstr="--histogramAlgorithm %s"), - inputBinaryVolume=dict(argstr="--inputBinaryVolume %s", extensions=None), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + histogramAlgorithm=dict( + argstr="--histogramAlgorithm %s", + ), + inputBinaryVolume=dict( + argstr="--inputBinaryVolume %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), referenceBinaryVolume=dict( - argstr="--referenceBinaryVolume %s", extensions=None + argstr="--referenceBinaryVolume %s", + extensions=None, + ), + referenceVolume=dict( + argstr="--referenceVolume %s", + extensions=None, + ), + verbose=dict( + argstr="--verbose ", + ), + writeHistogram=dict( + argstr="--writeHistogram %s", ), - referenceVolume=dict(argstr="--referenceVolume %s", extensions=None), - verbose=dict(argstr="--verbose "), - writeHistogram=dict(argstr="--writeHistogram %s"), ) inputs = HistogramMatchingFilter.input_spec() @@ -27,7 +55,11 @@ def test_HistogramMatchingFilter_inputs(): def test_HistogramMatchingFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = HistogramMatchingFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py index 7c5d7d1303..881e3379de 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py @@ -4,12 +4,28 @@ def test_SimilarityIndex_inputs(): input_map = dict( - ANNContinuousVolume=dict(argstr="--ANNContinuousVolume %s", extensions=None), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputManualVolume=dict(argstr="--inputManualVolume %s", extensions=None), - outputCSVFilename=dict(argstr="--outputCSVFilename %s", extensions=None), - thresholdInterval=dict(argstr="--thresholdInterval %f"), + ANNContinuousVolume=dict( + argstr="--ANNContinuousVolume %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputManualVolume=dict( + argstr="--inputManualVolume %s", + extensions=None, + ), + outputCSVFilename=dict( + argstr="--outputCSVFilename %s", + extensions=None, + ), + thresholdInterval=dict( + argstr="--thresholdInterval %f", + ), ) inputs = SimilarityIndex.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py index f15a03e47b..4dfb6943cb 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py @@ -4,26 +4,76 @@ def test_DWIConvert_inputs(): input_map = dict( - allowLossyConversion=dict(argstr="--allowLossyConversion "), - args=dict(argstr="%s"), - conversionMode=dict(argstr="--conversionMode %s"), - environ=dict(nohash=True, usedefault=True), - fMRI=dict(argstr="--fMRI "), - fslNIFTIFile=dict(argstr="--fslNIFTIFile %s", extensions=None), - gradientVectorFile=dict(argstr="--gradientVectorFile %s", hash_files=False), - inputBValues=dict(argstr="--inputBValues %s", extensions=None), - inputBVectors=dict(argstr="--inputBVectors %s", extensions=None), - inputDicomDirectory=dict(argstr="--inputDicomDirectory %s"), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - outputBValues=dict(argstr="--outputBValues %s", hash_files=False), - outputBVectors=dict(argstr="--outputBVectors %s", hash_files=False), - outputDirectory=dict(argstr="--outputDirectory %s", hash_files=False), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - smallGradientThreshold=dict(argstr="--smallGradientThreshold %f"), - transposeInputBVectors=dict(argstr="--transposeInputBVectors "), - useBMatrixGradientDirections=dict(argstr="--useBMatrixGradientDirections "), - useIdentityMeaseurementFrame=dict(argstr="--useIdentityMeaseurementFrame "), - writeProtocolGradientsFile=dict(argstr="--writeProtocolGradientsFile "), + allowLossyConversion=dict( + argstr="--allowLossyConversion ", + ), + args=dict( + argstr="%s", + ), + conversionMode=dict( + argstr="--conversionMode %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fMRI=dict( + argstr="--fMRI ", + ), + fslNIFTIFile=dict( + argstr="--fslNIFTIFile %s", + extensions=None, + ), + gradientVectorFile=dict( + argstr="--gradientVectorFile %s", + hash_files=False, + ), + inputBValues=dict( + argstr="--inputBValues %s", + extensions=None, + ), + inputBVectors=dict( + argstr="--inputBVectors %s", + extensions=None, + ), + inputDicomDirectory=dict( + argstr="--inputDicomDirectory %s", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputBValues=dict( + argstr="--outputBValues %s", + hash_files=False, + ), + outputBVectors=dict( + argstr="--outputBVectors %s", + hash_files=False, + ), + outputDirectory=dict( + argstr="--outputDirectory %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + smallGradientThreshold=dict( + argstr="--smallGradientThreshold %f", + ), + transposeInputBVectors=dict( + argstr="--transposeInputBVectors ", + ), + useBMatrixGradientDirections=dict( + argstr="--useBMatrixGradientDirections ", + ), + useIdentityMeaseurementFrame=dict( + argstr="--useIdentityMeaseurementFrame ", + ), + writeProtocolGradientsFile=dict( + argstr="--writeProtocolGradientsFile ", + ), ) inputs = DWIConvert.input_spec() @@ -34,11 +84,19 @@ def test_DWIConvert_inputs(): def test_DWIConvert_outputs(): output_map = dict( - gradientVectorFile=dict(extensions=None), - outputBValues=dict(extensions=None), - outputBVectors=dict(extensions=None), + gradientVectorFile=dict( + extensions=None, + ), + outputBValues=dict( + extensions=None, + ), + outputBVectors=dict( + extensions=None, + ), outputDirectory=dict(), - outputVolume=dict(extensions=None), + outputVolume=dict( + extensions=None, + ), ) outputs = DWIConvert.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py index fe06bb927d..38e8f92b0b 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py @@ -4,16 +4,39 @@ def test_compareTractInclusion_inputs(): input_map = dict( - args=dict(argstr="%s"), - closeness=dict(argstr="--closeness %f"), - environ=dict(nohash=True, usedefault=True), - numberOfPoints=dict(argstr="--numberOfPoints %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - standardFiber=dict(argstr="--standardFiber %s", extensions=None), - testFiber=dict(argstr="--testFiber %s", extensions=None), - testForBijection=dict(argstr="--testForBijection "), - testForFiberCardinality=dict(argstr="--testForFiberCardinality "), - writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile "), + args=dict( + argstr="%s", + ), + closeness=dict( + argstr="--closeness %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + numberOfPoints=dict( + argstr="--numberOfPoints %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + standardFiber=dict( + argstr="--standardFiber %s", + extensions=None, + ), + testFiber=dict( + argstr="--testFiber %s", + extensions=None, + ), + testForBijection=dict( + argstr="--testForBijection ", + ), + testForFiberCardinality=dict( + argstr="--testForFiberCardinality ", + ), + writeXMLPolyDataFile=dict( + argstr="--writeXMLPolyDataFile ", + ), ) inputs = compareTractInclusion.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py index fa8bb55297..17d4d19b4c 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py @@ -4,12 +4,26 @@ def test_dtiaverage_inputs(): input_map = dict( - DTI_double=dict(argstr="--DTI_double "), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputs=dict(argstr="--inputs %s..."), - tensor_output=dict(argstr="--tensor_output %s", hash_files=False), - verbose=dict(argstr="--verbose "), + DTI_double=dict( + argstr="--DTI_double ", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputs=dict( + argstr="--inputs %s...", + ), + tensor_output=dict( + argstr="--tensor_output %s", + hash_files=False, + ), + verbose=dict( + argstr="--verbose ", + ), ) inputs = dtiaverage.input_spec() @@ -19,7 +33,11 @@ def test_dtiaverage_inputs(): def test_dtiaverage_outputs(): - output_map = dict(tensor_output=dict(extensions=None)) + output_map = dict( + tensor_output=dict( + extensions=None, + ), + ) outputs = dtiaverage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py index ef40c6e373..0a36716e87 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py @@ -4,26 +4,75 @@ def test_dtiestim_inputs(): input_map = dict( - B0=dict(argstr="--B0 %s", hash_files=False), - B0_mask_output=dict(argstr="--B0_mask_output %s", hash_files=False), - DTI_double=dict(argstr="--DTI_double "), - args=dict(argstr="%s"), - bad_region_mask=dict(argstr="--bad_region_mask %s", extensions=None), - brain_mask=dict(argstr="--brain_mask %s", extensions=None), - correction=dict(argstr="--correction %s"), - defaultTensor=dict(argstr="--defaultTensor %s", sep=","), - dwi_image=dict(argstr="--dwi_image %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - idwi=dict(argstr="--idwi %s", hash_files=False), - method=dict(argstr="--method %s"), - shiftNeg=dict(argstr="--shiftNeg "), - shiftNegCoeff=dict(argstr="--shiftNegCoeff %f"), - sigma=dict(argstr="--sigma %f"), - step=dict(argstr="--step %f"), - tensor_output=dict(argstr="--tensor_output %s", hash_files=False), - threshold=dict(argstr="--threshold %d"), - verbose=dict(argstr="--verbose "), - weight_iterations=dict(argstr="--weight_iterations %d"), + B0=dict( + argstr="--B0 %s", + hash_files=False, + ), + B0_mask_output=dict( + argstr="--B0_mask_output %s", + hash_files=False, + ), + DTI_double=dict( + argstr="--DTI_double ", + ), + args=dict( + argstr="%s", + ), + bad_region_mask=dict( + argstr="--bad_region_mask %s", + extensions=None, + ), + brain_mask=dict( + argstr="--brain_mask %s", + extensions=None, + ), + correction=dict( + argstr="--correction %s", + ), + defaultTensor=dict( + argstr="--defaultTensor %s", + sep=",", + ), + dwi_image=dict( + argstr="--dwi_image %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + idwi=dict( + argstr="--idwi %s", + hash_files=False, + ), + method=dict( + argstr="--method %s", + ), + shiftNeg=dict( + argstr="--shiftNeg ", + ), + shiftNegCoeff=dict( + argstr="--shiftNegCoeff %f", + ), + sigma=dict( + argstr="--sigma %f", + ), + step=dict( + argstr="--step %f", + ), + tensor_output=dict( + argstr="--tensor_output %s", + hash_files=False, + ), + threshold=dict( + argstr="--threshold %d", + ), + verbose=dict( + argstr="--verbose ", + ), + weight_iterations=dict( + argstr="--weight_iterations %d", + ), ) inputs = dtiestim.input_spec() @@ -34,10 +83,18 @@ def test_dtiestim_inputs(): def test_dtiestim_outputs(): output_map = dict( - B0=dict(extensions=None), - B0_mask_output=dict(extensions=None), - idwi=dict(extensions=None), - tensor_output=dict(extensions=None), + B0=dict( + extensions=None, + ), + B0_mask_output=dict( + extensions=None, + ), + idwi=dict( + extensions=None, + ), + tensor_output=dict( + extensions=None, + ), ) outputs = dtiestim.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py index 4fb5b05acb..24352abbe3 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py @@ -4,43 +4,121 @@ def test_dtiprocess_inputs(): input_map = dict( - DTI_double=dict(argstr="--DTI_double "), - RD_output=dict(argstr="--RD_output %s", hash_files=False), - affineitk_file=dict(argstr="--affineitk_file %s", extensions=None), - args=dict(argstr="%s"), - color_fa_output=dict(argstr="--color_fa_output %s", hash_files=False), - correction=dict(argstr="--correction %s"), - deformation_output=dict(argstr="--deformation_output %s", hash_files=False), - dof_file=dict(argstr="--dof_file %s", extensions=None), - dti_image=dict(argstr="--dti_image %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - fa_gradient_output=dict(argstr="--fa_gradient_output %s", hash_files=False), - fa_gradmag_output=dict(argstr="--fa_gradmag_output %s", hash_files=False), - fa_output=dict(argstr="--fa_output %s", hash_files=False), - forward=dict(argstr="--forward %s", extensions=None), + DTI_double=dict( + argstr="--DTI_double ", + ), + RD_output=dict( + argstr="--RD_output %s", + hash_files=False, + ), + affineitk_file=dict( + argstr="--affineitk_file %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + color_fa_output=dict( + argstr="--color_fa_output %s", + hash_files=False, + ), + correction=dict( + argstr="--correction %s", + ), + deformation_output=dict( + argstr="--deformation_output %s", + hash_files=False, + ), + dof_file=dict( + argstr="--dof_file %s", + extensions=None, + ), + dti_image=dict( + argstr="--dti_image %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fa_gradient_output=dict( + argstr="--fa_gradient_output %s", + hash_files=False, + ), + fa_gradmag_output=dict( + argstr="--fa_gradmag_output %s", + hash_files=False, + ), + fa_output=dict( + argstr="--fa_output %s", + hash_files=False, + ), + forward=dict( + argstr="--forward %s", + extensions=None, + ), frobenius_norm_output=dict( - argstr="--frobenius_norm_output %s", hash_files=False - ), - hField=dict(argstr="--hField "), - interpolation=dict(argstr="--interpolation %s"), - lambda1_output=dict(argstr="--lambda1_output %s", hash_files=False), - lambda2_output=dict(argstr="--lambda2_output %s", hash_files=False), - lambda3_output=dict(argstr="--lambda3_output %s", hash_files=False), - mask=dict(argstr="--mask %s", extensions=None), - md_output=dict(argstr="--md_output %s", hash_files=False), + argstr="--frobenius_norm_output %s", + hash_files=False, + ), + hField=dict( + argstr="--hField ", + ), + interpolation=dict( + argstr="--interpolation %s", + ), + lambda1_output=dict( + argstr="--lambda1_output %s", + hash_files=False, + ), + lambda2_output=dict( + argstr="--lambda2_output %s", + hash_files=False, + ), + lambda3_output=dict( + argstr="--lambda3_output %s", + hash_files=False, + ), + mask=dict( + argstr="--mask %s", + extensions=None, + ), + md_output=dict( + argstr="--md_output %s", + hash_files=False, + ), negative_eigenvector_output=dict( - argstr="--negative_eigenvector_output %s", hash_files=False + argstr="--negative_eigenvector_output %s", + hash_files=False, + ), + newdof_file=dict( + argstr="--newdof_file %s", + extensions=None, + ), + outmask=dict( + argstr="--outmask %s", + hash_files=False, ), - newdof_file=dict(argstr="--newdof_file %s", extensions=None), - outmask=dict(argstr="--outmask %s", hash_files=False), principal_eigenvector_output=dict( - argstr="--principal_eigenvector_output %s", hash_files=False + argstr="--principal_eigenvector_output %s", + hash_files=False, + ), + reorientation=dict( + argstr="--reorientation %s", + ), + rot_output=dict( + argstr="--rot_output %s", + hash_files=False, + ), + scalar_float=dict( + argstr="--scalar_float ", + ), + sigma=dict( + argstr="--sigma %f", + ), + verbose=dict( + argstr="--verbose ", ), - reorientation=dict(argstr="--reorientation %s"), - rot_output=dict(argstr="--rot_output %s", hash_files=False), - scalar_float=dict(argstr="--scalar_float "), - sigma=dict(argstr="--sigma %f"), - verbose=dict(argstr="--verbose "), ) inputs = dtiprocess.input_spec() @@ -51,21 +129,51 @@ def test_dtiprocess_inputs(): def test_dtiprocess_outputs(): output_map = dict( - RD_output=dict(extensions=None), - color_fa_output=dict(extensions=None), - deformation_output=dict(extensions=None), - fa_gradient_output=dict(extensions=None), - fa_gradmag_output=dict(extensions=None), - fa_output=dict(extensions=None), - frobenius_norm_output=dict(extensions=None), - lambda1_output=dict(extensions=None), - lambda2_output=dict(extensions=None), - lambda3_output=dict(extensions=None), - md_output=dict(extensions=None), - negative_eigenvector_output=dict(extensions=None), - outmask=dict(extensions=None), - principal_eigenvector_output=dict(extensions=None), - rot_output=dict(extensions=None), + RD_output=dict( + extensions=None, + ), + color_fa_output=dict( + extensions=None, + ), + deformation_output=dict( + extensions=None, + ), + fa_gradient_output=dict( + extensions=None, + ), + fa_gradmag_output=dict( + extensions=None, + ), + fa_output=dict( + extensions=None, + ), + frobenius_norm_output=dict( + extensions=None, + ), + lambda1_output=dict( + extensions=None, + ), + lambda2_output=dict( + extensions=None, + ), + lambda3_output=dict( + extensions=None, + ), + md_output=dict( + extensions=None, + ), + negative_eigenvector_output=dict( + extensions=None, + ), + outmask=dict( + extensions=None, + ), + principal_eigenvector_output=dict( + extensions=None, + ), + rot_output=dict( + extensions=None, + ), ) outputs = dtiprocess.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py index 6856fbb8cd..aaa516e9dc 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py @@ -4,13 +4,30 @@ def test_extractNrrdVectorIndex_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - setImageOrientation=dict(argstr="--setImageOrientation %s"), - vectorIndex=dict(argstr="--vectorIndex %d"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + setImageOrientation=dict( + argstr="--setImageOrientation %s", + ), + vectorIndex=dict( + argstr="--vectorIndex %d", + ), ) inputs = extractNrrdVectorIndex.input_spec() @@ -20,7 +37,11 @@ def test_extractNrrdVectorIndex_inputs(): def test_extractNrrdVectorIndex_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = extractNrrdVectorIndex.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py index 3b6e262354..da3e02c37b 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py @@ -4,12 +4,27 @@ def test_gtractAnisotropyMap_inputs(): input_map = dict( - anisotropyType=dict(argstr="--anisotropyType %s"), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + anisotropyType=dict( + argstr="--anisotropyType %s", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTensorVolume=dict( + argstr="--inputTensorVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = gtractAnisotropyMap.input_spec() @@ -19,7 +34,11 @@ def test_gtractAnisotropyMap_inputs(): def test_gtractAnisotropyMap_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractAnisotropyMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py index eb463c2a08..a37b0e65ce 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py @@ -4,13 +4,30 @@ def test_gtractAverageBvalues_inputs(): input_map = dict( - args=dict(argstr="%s"), - averageB0only=dict(argstr="--averageB0only "), - directionsTolerance=dict(argstr="--directionsTolerance %f"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + averageB0only=dict( + argstr="--averageB0only ", + ), + directionsTolerance=dict( + argstr="--directionsTolerance %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = gtractAverageBvalues.input_spec() @@ -20,7 +37,11 @@ def test_gtractAverageBvalues_inputs(): def test_gtractAverageBvalues_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractAverageBvalues.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py index 720573f0eb..3d6e24aee3 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py @@ -4,13 +4,30 @@ def test_gtractClipAnisotropy_inputs(): input_map = dict( - args=dict(argstr="%s"), - clipFirstSlice=dict(argstr="--clipFirstSlice "), - clipLastSlice=dict(argstr="--clipLastSlice "), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + clipFirstSlice=dict( + argstr="--clipFirstSlice ", + ), + clipLastSlice=dict( + argstr="--clipLastSlice ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = gtractClipAnisotropy.input_spec() @@ -20,7 +37,11 @@ def test_gtractClipAnisotropy_inputs(): def test_gtractClipAnisotropy_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractClipAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py index e82c6e3ada..1ab780c1b9 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py @@ -4,34 +4,90 @@ def test_gtractCoRegAnatomy_inputs(): input_map = dict( - args=dict(argstr="%s"), - borderSize=dict(argstr="--borderSize %d"), - convergence=dict(argstr="--convergence %f"), - environ=dict(nohash=True, usedefault=True), - gradientTolerance=dict(argstr="--gradientTolerance %f"), - gridSize=dict(argstr="--gridSize %s", sep=","), + args=dict( + argstr="%s", + ), + borderSize=dict( + argstr="--borderSize %d", + ), + convergence=dict( + argstr="--convergence %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gradientTolerance=dict( + argstr="--gradientTolerance %f", + ), + gridSize=dict( + argstr="--gridSize %s", + sep=",", + ), inputAnatomicalVolume=dict( - argstr="--inputAnatomicalVolume %s", extensions=None - ), - inputRigidTransform=dict(argstr="--inputRigidTransform %s", extensions=None), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - maxBSplineDisplacement=dict(argstr="--maxBSplineDisplacement %f"), - maximumStepSize=dict(argstr="--maximumStepSize %f"), - minimumStepSize=dict(argstr="--minimumStepSize %f"), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), - numberOfIterations=dict(argstr="--numberOfIterations %d"), - numberOfSamples=dict(argstr="--numberOfSamples %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputTransformName=dict(argstr="--outputTransformName %s", hash_files=False), - relaxationFactor=dict(argstr="--relaxationFactor %f"), - samplingPercentage=dict(argstr="--samplingPercentage %f"), - spatialScale=dict(argstr="--spatialScale %d"), - transformType=dict(argstr="--transformType %s"), - translationScale=dict(argstr="--translationScale %f"), - useCenterOfHeadAlign=dict(argstr="--useCenterOfHeadAlign "), - useGeometryAlign=dict(argstr="--useGeometryAlign "), - useMomentsAlign=dict(argstr="--useMomentsAlign "), - vectorIndex=dict(argstr="--vectorIndex %d"), + argstr="--inputAnatomicalVolume %s", + extensions=None, + ), + inputRigidTransform=dict( + argstr="--inputRigidTransform %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + maxBSplineDisplacement=dict( + argstr="--maxBSplineDisplacement %f", + ), + maximumStepSize=dict( + argstr="--maximumStepSize %f", + ), + minimumStepSize=dict( + argstr="--minimumStepSize %f", + ), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfIterations=dict( + argstr="--numberOfIterations %d", + ), + numberOfSamples=dict( + argstr="--numberOfSamples %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputTransformName=dict( + argstr="--outputTransformName %s", + hash_files=False, + ), + relaxationFactor=dict( + argstr="--relaxationFactor %f", + ), + samplingPercentage=dict( + argstr="--samplingPercentage %f", + ), + spatialScale=dict( + argstr="--spatialScale %d", + ), + transformType=dict( + argstr="--transformType %s", + ), + translationScale=dict( + argstr="--translationScale %f", + ), + useCenterOfHeadAlign=dict( + argstr="--useCenterOfHeadAlign ", + ), + useGeometryAlign=dict( + argstr="--useGeometryAlign ", + ), + useMomentsAlign=dict( + argstr="--useMomentsAlign ", + ), + vectorIndex=dict( + argstr="--vectorIndex %d", + ), ) inputs = gtractCoRegAnatomy.input_spec() @@ -41,7 +97,11 @@ def test_gtractCoRegAnatomy_inputs(): def test_gtractCoRegAnatomy_outputs(): - output_map = dict(outputTransformName=dict(extensions=None)) + output_map = dict( + outputTransformName=dict( + extensions=None, + ), + ) outputs = gtractCoRegAnatomy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py index 73815728a8..d2a6ca3288 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py @@ -4,12 +4,26 @@ def test_gtractConcatDwi_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - ignoreOrigins=dict(argstr="--ignoreOrigins "), - inputVolume=dict(argstr="--inputVolume %s..."), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignoreOrigins=dict( + argstr="--ignoreOrigins ", + ), + inputVolume=dict( + argstr="--inputVolume %s...", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = gtractConcatDwi.input_spec() @@ -19,7 +33,11 @@ def test_gtractConcatDwi_inputs(): def test_gtractConcatDwi_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractConcatDwi.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py index 0f29823049..ba03837015 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py @@ -4,12 +4,28 @@ def test_gtractCopyImageOrientation_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputReferenceVolume=dict( + argstr="--inputReferenceVolume %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = gtractCopyImageOrientation.input_spec() @@ -19,7 +35,11 @@ def test_gtractCopyImageOrientation_inputs(): def test_gtractCopyImageOrientation_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractCopyImageOrientation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py index 0dab7966ce..0122bf7636 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py @@ -4,24 +4,65 @@ def test_gtractCoregBvalues_inputs(): input_map = dict( - args=dict(argstr="%s"), - debugLevel=dict(argstr="--debugLevel %d"), - eddyCurrentCorrection=dict(argstr="--eddyCurrentCorrection "), - environ=dict(nohash=True, usedefault=True), - fixedVolume=dict(argstr="--fixedVolume %s", extensions=None), - fixedVolumeIndex=dict(argstr="--fixedVolumeIndex %d"), - maximumStepSize=dict(argstr="--maximumStepSize %f"), - minimumStepSize=dict(argstr="--minimumStepSize %f"), - movingVolume=dict(argstr="--movingVolume %s", extensions=None), - numberOfIterations=dict(argstr="--numberOfIterations %d"), - numberOfSpatialSamples=dict(argstr="--numberOfSpatialSamples %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - registerB0Only=dict(argstr="--registerB0Only "), - relaxationFactor=dict(argstr="--relaxationFactor %f"), - samplingPercentage=dict(argstr="--samplingPercentage %f"), - spatialScale=dict(argstr="--spatialScale %f"), + args=dict( + argstr="%s", + ), + debugLevel=dict( + argstr="--debugLevel %d", + ), + eddyCurrentCorrection=dict( + argstr="--eddyCurrentCorrection ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedVolume=dict( + argstr="--fixedVolume %s", + extensions=None, + ), + fixedVolumeIndex=dict( + argstr="--fixedVolumeIndex %d", + ), + maximumStepSize=dict( + argstr="--maximumStepSize %f", + ), + minimumStepSize=dict( + argstr="--minimumStepSize %f", + ), + movingVolume=dict( + argstr="--movingVolume %s", + extensions=None, + ), + numberOfIterations=dict( + argstr="--numberOfIterations %d", + ), + numberOfSpatialSamples=dict( + argstr="--numberOfSpatialSamples %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + registerB0Only=dict( + argstr="--registerB0Only ", + ), + relaxationFactor=dict( + argstr="--relaxationFactor %f", + ), + samplingPercentage=dict( + argstr="--samplingPercentage %f", + ), + spatialScale=dict( + argstr="--spatialScale %f", + ), ) inputs = gtractCoregBvalues.input_spec() @@ -32,7 +73,12 @@ def test_gtractCoregBvalues_inputs(): def test_gtractCoregBvalues_outputs(): output_map = dict( - outputTransform=dict(extensions=None), outputVolume=dict(extensions=None) + outputTransform=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = gtractCoregBvalues.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py index 7353cee2c3..7d086cd7c0 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py @@ -4,22 +4,48 @@ def test_gtractCostFastMarching_inputs(): input_map = dict( - anisotropyWeight=dict(argstr="--anisotropyWeight %f"), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + anisotropyWeight=dict( + argstr="--anisotropyWeight %f", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputAnisotropyVolume=dict( - argstr="--inputAnisotropyVolume %s", extensions=None + argstr="--inputAnisotropyVolume %s", + extensions=None, ), inputStartingSeedsLabelMapVolume=dict( - argstr="--inputStartingSeedsLabelMapVolume %s", extensions=None - ), - inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputCostVolume=dict(argstr="--outputCostVolume %s", hash_files=False), - outputSpeedVolume=dict(argstr="--outputSpeedVolume %s", hash_files=False), - seedThreshold=dict(argstr="--seedThreshold %f"), - startingSeedsLabel=dict(argstr="--startingSeedsLabel %d"), - stoppingValue=dict(argstr="--stoppingValue %f"), + argstr="--inputStartingSeedsLabelMapVolume %s", + extensions=None, + ), + inputTensorVolume=dict( + argstr="--inputTensorVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputCostVolume=dict( + argstr="--outputCostVolume %s", + hash_files=False, + ), + outputSpeedVolume=dict( + argstr="--outputSpeedVolume %s", + hash_files=False, + ), + seedThreshold=dict( + argstr="--seedThreshold %f", + ), + startingSeedsLabel=dict( + argstr="--startingSeedsLabel %d", + ), + stoppingValue=dict( + argstr="--stoppingValue %f", + ), ) inputs = gtractCostFastMarching.input_spec() @@ -30,7 +56,12 @@ def test_gtractCostFastMarching_inputs(): def test_gtractCostFastMarching_outputs(): output_map = dict( - outputCostVolume=dict(extensions=None), outputSpeedVolume=dict(extensions=None) + outputCostVolume=dict( + extensions=None, + ), + outputSpeedVolume=dict( + extensions=None, + ), ) outputs = gtractCostFastMarching.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py index 5d0b75f7e6..1990cc2057 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py @@ -4,13 +4,30 @@ def test_gtractCreateGuideFiber_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputFiber=dict(argstr="--inputFiber %s", extensions=None), - numberOfPoints=dict(argstr="--numberOfPoints %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputFiber=dict(argstr="--outputFiber %s", hash_files=False), - writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile "), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputFiber=dict( + argstr="--inputFiber %s", + extensions=None, + ), + numberOfPoints=dict( + argstr="--numberOfPoints %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputFiber=dict( + argstr="--outputFiber %s", + hash_files=False, + ), + writeXMLPolyDataFile=dict( + argstr="--writeXMLPolyDataFile ", + ), ) inputs = gtractCreateGuideFiber.input_spec() @@ -20,7 +37,11 @@ def test_gtractCreateGuideFiber_inputs(): def test_gtractCreateGuideFiber_outputs(): - output_map = dict(outputFiber=dict(extensions=None)) + output_map = dict( + outputFiber=dict( + extensions=None, + ), + ) outputs = gtractCreateGuideFiber.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py index 202cb0f86d..4059d45f6a 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py @@ -4,26 +4,60 @@ def test_gtractFastMarchingTracking_inputs(): input_map = dict( - args=dict(argstr="%s"), - costStepSize=dict(argstr="--costStepSize %f"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + costStepSize=dict( + argstr="--costStepSize %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputAnisotropyVolume=dict( - argstr="--inputAnisotropyVolume %s", extensions=None + argstr="--inputAnisotropyVolume %s", + extensions=None, + ), + inputCostVolume=dict( + argstr="--inputCostVolume %s", + extensions=None, ), - inputCostVolume=dict(argstr="--inputCostVolume %s", extensions=None), inputStartingSeedsLabelMapVolume=dict( - argstr="--inputStartingSeedsLabelMapVolume %s", extensions=None - ), - inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None), - maximumStepSize=dict(argstr="--maximumStepSize %f"), - minimumStepSize=dict(argstr="--minimumStepSize %f"), - numberOfIterations=dict(argstr="--numberOfIterations %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputTract=dict(argstr="--outputTract %s", hash_files=False), - seedThreshold=dict(argstr="--seedThreshold %f"), - startingSeedsLabel=dict(argstr="--startingSeedsLabel %d"), - trackingThreshold=dict(argstr="--trackingThreshold %f"), - writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile "), + argstr="--inputStartingSeedsLabelMapVolume %s", + extensions=None, + ), + inputTensorVolume=dict( + argstr="--inputTensorVolume %s", + extensions=None, + ), + maximumStepSize=dict( + argstr="--maximumStepSize %f", + ), + minimumStepSize=dict( + argstr="--minimumStepSize %f", + ), + numberOfIterations=dict( + argstr="--numberOfIterations %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputTract=dict( + argstr="--outputTract %s", + hash_files=False, + ), + seedThreshold=dict( + argstr="--seedThreshold %f", + ), + startingSeedsLabel=dict( + argstr="--startingSeedsLabel %d", + ), + trackingThreshold=dict( + argstr="--trackingThreshold %f", + ), + writeXMLPolyDataFile=dict( + argstr="--writeXMLPolyDataFile ", + ), ) inputs = gtractFastMarchingTracking.input_spec() @@ -33,7 +67,11 @@ def test_gtractFastMarchingTracking_inputs(): def test_gtractFastMarchingTracking_outputs(): - output_map = dict(outputTract=dict(extensions=None)) + output_map = dict( + outputTract=dict( + extensions=None, + ), + ) outputs = gtractFastMarchingTracking.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py index 4e3846fb7e..9837774d3e 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py @@ -4,42 +4,103 @@ def test_gtractFiberTracking_inputs(): input_map = dict( - args=dict(argstr="%s"), - branchingAngle=dict(argstr="--branchingAngle %f"), - branchingThreshold=dict(argstr="--branchingThreshold %f"), - curvatureThreshold=dict(argstr="--curvatureThreshold %f"), - endingSeedsLabel=dict(argstr="--endingSeedsLabel %d"), - environ=dict(nohash=True, usedefault=True), - guidedCurvatureThreshold=dict(argstr="--guidedCurvatureThreshold %f"), + args=dict( + argstr="%s", + ), + branchingAngle=dict( + argstr="--branchingAngle %f", + ), + branchingThreshold=dict( + argstr="--branchingThreshold %f", + ), + curvatureThreshold=dict( + argstr="--curvatureThreshold %f", + ), + endingSeedsLabel=dict( + argstr="--endingSeedsLabel %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + guidedCurvatureThreshold=dict( + argstr="--guidedCurvatureThreshold %f", + ), inputAnisotropyVolume=dict( - argstr="--inputAnisotropyVolume %s", extensions=None + argstr="--inputAnisotropyVolume %s", + extensions=None, ), inputEndingSeedsLabelMapVolume=dict( - argstr="--inputEndingSeedsLabelMapVolume %s", extensions=None + argstr="--inputEndingSeedsLabelMapVolume %s", + extensions=None, ), inputStartingSeedsLabelMapVolume=dict( - argstr="--inputStartingSeedsLabelMapVolume %s", extensions=None - ), - inputTensorVolume=dict(argstr="--inputTensorVolume %s", extensions=None), - inputTract=dict(argstr="--inputTract %s", extensions=None), - maximumBranchPoints=dict(argstr="--maximumBranchPoints %d"), - maximumGuideDistance=dict(argstr="--maximumGuideDistance %f"), - maximumLength=dict(argstr="--maximumLength %f"), - minimumLength=dict(argstr="--minimumLength %f"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputTract=dict(argstr="--outputTract %s", hash_files=False), - randomSeed=dict(argstr="--randomSeed %d"), - seedThreshold=dict(argstr="--seedThreshold %f"), - startingSeedsLabel=dict(argstr="--startingSeedsLabel %d"), - stepSize=dict(argstr="--stepSize %f"), - tendF=dict(argstr="--tendF %f"), - tendG=dict(argstr="--tendG %f"), - trackingMethod=dict(argstr="--trackingMethod %s"), - trackingThreshold=dict(argstr="--trackingThreshold %f"), - useLoopDetection=dict(argstr="--useLoopDetection "), - useRandomWalk=dict(argstr="--useRandomWalk "), - useTend=dict(argstr="--useTend "), - writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile "), + argstr="--inputStartingSeedsLabelMapVolume %s", + extensions=None, + ), + inputTensorVolume=dict( + argstr="--inputTensorVolume %s", + extensions=None, + ), + inputTract=dict( + argstr="--inputTract %s", + extensions=None, + ), + maximumBranchPoints=dict( + argstr="--maximumBranchPoints %d", + ), + maximumGuideDistance=dict( + argstr="--maximumGuideDistance %f", + ), + maximumLength=dict( + argstr="--maximumLength %f", + ), + minimumLength=dict( + argstr="--minimumLength %f", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputTract=dict( + argstr="--outputTract %s", + hash_files=False, + ), + randomSeed=dict( + argstr="--randomSeed %d", + ), + seedThreshold=dict( + argstr="--seedThreshold %f", + ), + startingSeedsLabel=dict( + argstr="--startingSeedsLabel %d", + ), + stepSize=dict( + argstr="--stepSize %f", + ), + tendF=dict( + argstr="--tendF %f", + ), + tendG=dict( + argstr="--tendG %f", + ), + trackingMethod=dict( + argstr="--trackingMethod %s", + ), + trackingThreshold=dict( + argstr="--trackingThreshold %f", + ), + useLoopDetection=dict( + argstr="--useLoopDetection ", + ), + useRandomWalk=dict( + argstr="--useRandomWalk ", + ), + useTend=dict( + argstr="--useTend ", + ), + writeXMLPolyDataFile=dict( + argstr="--writeXMLPolyDataFile ", + ), ) inputs = gtractFiberTracking.input_spec() @@ -49,7 +110,11 @@ def test_gtractFiberTracking_inputs(): def test_gtractFiberTracking_outputs(): - output_map = dict(outputTract=dict(extensions=None)) + output_map = dict( + outputTract=dict( + extensions=None, + ), + ) outputs = gtractFiberTracking.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py index 57eadb5d08..64b896e0ca 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py @@ -4,12 +4,28 @@ def test_gtractImageConformity_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputReferenceVolume=dict( + argstr="--inputReferenceVolume %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = gtractImageConformity.input_spec() @@ -19,7 +35,11 @@ def test_gtractImageConformity_inputs(): def test_gtractImageConformity_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractImageConformity.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py index 9271a04262..ed43c90dc6 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py @@ -4,13 +4,32 @@ def test_gtractInvertBSplineTransform_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None), - inputTransform=dict(argstr="--inputTransform %s", extensions=None), - landmarkDensity=dict(argstr="--landmarkDensity %s", sep=","), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputReferenceVolume=dict( + argstr="--inputReferenceVolume %s", + extensions=None, + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + landmarkDensity=dict( + argstr="--landmarkDensity %s", + sep=",", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, + ), ) inputs = gtractInvertBSplineTransform.input_spec() @@ -20,7 +39,11 @@ def test_gtractInvertBSplineTransform_inputs(): def test_gtractInvertBSplineTransform_outputs(): - output_map = dict(outputTransform=dict(extensions=None)) + output_map = dict( + outputTransform=dict( + extensions=None, + ), + ) outputs = gtractInvertBSplineTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py index 033a325642..83129902aa 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py @@ -4,13 +4,31 @@ def test_gtractInvertDisplacementField_inputs(): input_map = dict( - args=dict(argstr="%s"), - baseImage=dict(argstr="--baseImage %s", extensions=None), - deformationImage=dict(argstr="--deformationImage %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - subsamplingFactor=dict(argstr="--subsamplingFactor %d"), + args=dict( + argstr="%s", + ), + baseImage=dict( + argstr="--baseImage %s", + extensions=None, + ), + deformationImage=dict( + argstr="--deformationImage %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + subsamplingFactor=dict( + argstr="--subsamplingFactor %d", + ), ) inputs = gtractInvertDisplacementField.input_spec() @@ -20,7 +38,11 @@ def test_gtractInvertDisplacementField_inputs(): def test_gtractInvertDisplacementField_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractInvertDisplacementField.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py index e6030498f3..73ba9c576f 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py @@ -4,11 +4,24 @@ def test_gtractInvertRigidTransform_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputTransform=dict(argstr="--inputTransform %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, + ), ) inputs = gtractInvertRigidTransform.input_spec() @@ -18,7 +31,11 @@ def test_gtractInvertRigidTransform_inputs(): def test_gtractInvertRigidTransform_outputs(): - output_map = dict(outputTransform=dict(extensions=None)) + output_map = dict( + outputTransform=dict( + extensions=None, + ), + ) outputs = gtractInvertRigidTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py index 14477f1b61..7b38abe0b5 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py @@ -4,18 +4,35 @@ def test_gtractResampleAnisotropy_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputAnatomicalVolume=dict( - argstr="--inputAnatomicalVolume %s", extensions=None + argstr="--inputAnatomicalVolume %s", + extensions=None, ), inputAnisotropyVolume=dict( - argstr="--inputAnisotropyVolume %s", extensions=None + argstr="--inputAnisotropyVolume %s", + extensions=None, + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + transformType=dict( + argstr="--transformType %s", ), - inputTransform=dict(argstr="--inputTransform %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - transformType=dict(argstr="--transformType %s"), ) inputs = gtractResampleAnisotropy.input_spec() @@ -25,7 +42,11 @@ def test_gtractResampleAnisotropy_inputs(): def test_gtractResampleAnisotropy_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractResampleAnisotropy.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py index c0b27b8ad9..7271e8a42a 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py @@ -4,17 +4,38 @@ def test_gtractResampleB0_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputAnatomicalVolume=dict( - argstr="--inputAnatomicalVolume %s", extensions=None - ), - inputTransform=dict(argstr="--inputTransform %s", extensions=None), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - transformType=dict(argstr="--transformType %s"), - vectorIndex=dict(argstr="--vectorIndex %d"), + argstr="--inputAnatomicalVolume %s", + extensions=None, + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + transformType=dict( + argstr="--transformType %s", + ), + vectorIndex=dict( + argstr="--vectorIndex %d", + ), ) inputs = gtractResampleB0.input_spec() @@ -24,7 +45,11 @@ def test_gtractResampleB0_inputs(): def test_gtractResampleB0_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractResampleB0.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py index 8ec22d7e7d..6649ecfc1f 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py @@ -4,14 +4,35 @@ def test_gtractResampleCodeImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputCodeVolume=dict(argstr="--inputCodeVolume %s", extensions=None), - inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None), - inputTransform=dict(argstr="--inputTransform %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - transformType=dict(argstr="--transformType %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputCodeVolume=dict( + argstr="--inputCodeVolume %s", + extensions=None, + ), + inputReferenceVolume=dict( + argstr="--inputReferenceVolume %s", + extensions=None, + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + transformType=dict( + argstr="--transformType %s", + ), ) inputs = gtractResampleCodeImage.input_spec() @@ -21,7 +42,11 @@ def test_gtractResampleCodeImage_inputs(): def test_gtractResampleCodeImage_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractResampleCodeImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py index 827433588e..3b61312e54 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py @@ -4,17 +4,47 @@ def test_gtractResampleDWIInPlace_inputs(): input_map = dict( - args=dict(argstr="%s"), - debugLevel=dict(argstr="--debugLevel %d"), - environ=dict(nohash=True, usedefault=True), - imageOutputSize=dict(argstr="--imageOutputSize %s", sep=","), - inputTransform=dict(argstr="--inputTransform %s", extensions=None), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputResampledB0=dict(argstr="--outputResampledB0 %s", hash_files=False), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - referenceVolume=dict(argstr="--referenceVolume %s", extensions=None), - warpDWITransform=dict(argstr="--warpDWITransform %s", extensions=None), + args=dict( + argstr="%s", + ), + debugLevel=dict( + argstr="--debugLevel %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + imageOutputSize=dict( + argstr="--imageOutputSize %s", + sep=",", + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputResampledB0=dict( + argstr="--outputResampledB0 %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + referenceVolume=dict( + argstr="--referenceVolume %s", + extensions=None, + ), + warpDWITransform=dict( + argstr="--warpDWITransform %s", + extensions=None, + ), ) inputs = gtractResampleDWIInPlace.input_spec() @@ -25,7 +55,12 @@ def test_gtractResampleDWIInPlace_inputs(): def test_gtractResampleDWIInPlace_outputs(): output_map = dict( - outputResampledB0=dict(extensions=None), outputVolume=dict(extensions=None) + outputResampledB0=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = gtractResampleDWIInPlace.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py index 2c342945c8..d64d2d8581 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py @@ -4,18 +4,35 @@ def test_gtractResampleFibers_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputForwardDeformationFieldVolume=dict( - argstr="--inputForwardDeformationFieldVolume %s", extensions=None + argstr="--inputForwardDeformationFieldVolume %s", + extensions=None, ), inputReverseDeformationFieldVolume=dict( - argstr="--inputReverseDeformationFieldVolume %s", extensions=None + argstr="--inputReverseDeformationFieldVolume %s", + extensions=None, + ), + inputTract=dict( + argstr="--inputTract %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputTract=dict( + argstr="--outputTract %s", + hash_files=False, + ), + writeXMLPolyDataFile=dict( + argstr="--writeXMLPolyDataFile ", ), - inputTract=dict(argstr="--inputTract %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputTract=dict(argstr="--outputTract %s", hash_files=False), - writeXMLPolyDataFile=dict(argstr="--writeXMLPolyDataFile "), ) inputs = gtractResampleFibers.input_spec() @@ -25,7 +42,11 @@ def test_gtractResampleFibers_inputs(): def test_gtractResampleFibers_outputs(): - output_map = dict(outputTract=dict(extensions=None)) + output_map = dict( + outputTract=dict( + extensions=None, + ), + ) outputs = gtractResampleFibers.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py index d21d4a11a6..eabe7c6f50 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py @@ -4,22 +4,54 @@ def test_gtractTensor_inputs(): input_map = dict( - applyMeasurementFrame=dict(argstr="--applyMeasurementFrame "), - args=dict(argstr="%s"), - b0Index=dict(argstr="--b0Index %d"), + applyMeasurementFrame=dict( + argstr="--applyMeasurementFrame ", + ), + args=dict( + argstr="%s", + ), + b0Index=dict( + argstr="--b0Index %d", + ), backgroundSuppressingThreshold=dict( - argstr="--backgroundSuppressingThreshold %d" - ), - environ=dict(nohash=True, usedefault=True), - ignoreIndex=dict(argstr="--ignoreIndex %s", sep=","), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - maskProcessingMode=dict(argstr="--maskProcessingMode %s"), - maskVolume=dict(argstr="--maskVolume %s", extensions=None), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - resampleIsotropic=dict(argstr="--resampleIsotropic "), - size=dict(argstr="--size %f"), + argstr="--backgroundSuppressingThreshold %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignoreIndex=dict( + argstr="--ignoreIndex %s", + sep=",", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + maskProcessingMode=dict( + argstr="--maskProcessingMode %s", + ), + maskVolume=dict( + argstr="--maskVolume %s", + extensions=None, + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + resampleIsotropic=dict( + argstr="--resampleIsotropic ", + ), + size=dict( + argstr="--size %f", + ), ) inputs = gtractTensor.input_spec() @@ -29,7 +61,11 @@ def test_gtractTensor_inputs(): def test_gtractTensor_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = gtractTensor.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py index 234fbb38e8..64daec32fb 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py @@ -4,13 +4,27 @@ def test_gtractTransformToDisplacementField_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None), - inputTransform=dict(argstr="--inputTransform %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputReferenceVolume=dict( + argstr="--inputReferenceVolume %s", + extensions=None, + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), outputDeformationFieldVolume=dict( - argstr="--outputDeformationFieldVolume %s", hash_files=False + argstr="--outputDeformationFieldVolume %s", + hash_files=False, ), ) inputs = gtractTransformToDisplacementField.input_spec() @@ -21,7 +35,11 @@ def test_gtractTransformToDisplacementField_inputs(): def test_gtractTransformToDisplacementField_outputs(): - output_map = dict(outputDeformationFieldVolume=dict(extensions=None)) + output_map = dict( + outputDeformationFieldVolume=dict( + extensions=None, + ), + ) outputs = gtractTransformToDisplacementField.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py index b37f51e28c..6638ef34cd 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py @@ -4,12 +4,27 @@ def test_maxcurvature_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - image=dict(argstr="--image %s", extensions=None), - output=dict(argstr="--output %s", hash_files=False), - sigma=dict(argstr="--sigma %f"), - verbose=dict(argstr="--verbose "), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + image=dict( + argstr="--image %s", + extensions=None, + ), + output=dict( + argstr="--output %s", + hash_files=False, + ), + sigma=dict( + argstr="--sigma %f", + ), + verbose=dict( + argstr="--verbose ", + ), ) inputs = maxcurvature.input_spec() @@ -19,7 +34,11 @@ def test_maxcurvature_inputs(): def test_maxcurvature_outputs(): - output_map = dict(output=dict(extensions=None)) + output_map = dict( + output=dict( + extensions=None, + ), + ) outputs = maxcurvature.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py index 62e18e16d0..92050c6e43 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py @@ -4,43 +4,118 @@ def test_UKFTractography_inputs(): input_map = dict( - Ql=dict(argstr="--Ql %f"), - Qm=dict(argstr="--Qm %f"), - Qw=dict(argstr="--Qw %f"), - Rs=dict(argstr="--Rs %f"), - args=dict(argstr="%s"), - dwiFile=dict(argstr="--dwiFile %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - freeWater=dict(argstr="--freeWater "), - fullTensorModel=dict(argstr="--fullTensorModel "), - labels=dict(argstr="--labels %s", sep=","), - maskFile=dict(argstr="--maskFile %s", extensions=None), - maxBranchingAngle=dict(argstr="--maxBranchingAngle %f"), - maxHalfFiberLength=dict(argstr="--maxHalfFiberLength %f"), - minBranchingAngle=dict(argstr="--minBranchingAngle %f"), - minFA=dict(argstr="--minFA %f"), - minGA=dict(argstr="--minGA %f"), - numTensor=dict(argstr="--numTensor %s"), - numThreads=dict(argstr="--numThreads %d"), - recordCovariance=dict(argstr="--recordCovariance "), - recordFA=dict(argstr="--recordFA "), - recordFreeWater=dict(argstr="--recordFreeWater "), - recordLength=dict(argstr="--recordLength %f"), - recordNMSE=dict(argstr="--recordNMSE "), - recordState=dict(argstr="--recordState "), - recordTensors=dict(argstr="--recordTensors "), - recordTrace=dict(argstr="--recordTrace "), - seedFALimit=dict(argstr="--seedFALimit %f"), - seedsFile=dict(argstr="--seedsFile %s", extensions=None), - seedsPerVoxel=dict(argstr="--seedsPerVoxel %d"), - stepLength=dict(argstr="--stepLength %f"), - storeGlyphs=dict(argstr="--storeGlyphs "), - tracts=dict(argstr="--tracts %s", hash_files=False), + Ql=dict( + argstr="--Ql %f", + ), + Qm=dict( + argstr="--Qm %f", + ), + Qw=dict( + argstr="--Qw %f", + ), + Rs=dict( + argstr="--Rs %f", + ), + args=dict( + argstr="%s", + ), + dwiFile=dict( + argstr="--dwiFile %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + freeWater=dict( + argstr="--freeWater ", + ), + fullTensorModel=dict( + argstr="--fullTensorModel ", + ), + labels=dict( + argstr="--labels %s", + sep=",", + ), + maskFile=dict( + argstr="--maskFile %s", + extensions=None, + ), + maxBranchingAngle=dict( + argstr="--maxBranchingAngle %f", + ), + maxHalfFiberLength=dict( + argstr="--maxHalfFiberLength %f", + ), + minBranchingAngle=dict( + argstr="--minBranchingAngle %f", + ), + minFA=dict( + argstr="--minFA %f", + ), + minGA=dict( + argstr="--minGA %f", + ), + numTensor=dict( + argstr="--numTensor %s", + ), + numThreads=dict( + argstr="--numThreads %d", + ), + recordCovariance=dict( + argstr="--recordCovariance ", + ), + recordFA=dict( + argstr="--recordFA ", + ), + recordFreeWater=dict( + argstr="--recordFreeWater ", + ), + recordLength=dict( + argstr="--recordLength %f", + ), + recordNMSE=dict( + argstr="--recordNMSE ", + ), + recordState=dict( + argstr="--recordState ", + ), + recordTensors=dict( + argstr="--recordTensors ", + ), + recordTrace=dict( + argstr="--recordTrace ", + ), + seedFALimit=dict( + argstr="--seedFALimit %f", + ), + seedsFile=dict( + argstr="--seedsFile %s", + extensions=None, + ), + seedsPerVoxel=dict( + argstr="--seedsPerVoxel %d", + ), + stepLength=dict( + argstr="--stepLength %f", + ), + storeGlyphs=dict( + argstr="--storeGlyphs ", + ), + tracts=dict( + argstr="--tracts %s", + hash_files=False, + ), tractsWithSecondTensor=dict( - argstr="--tractsWithSecondTensor %s", hash_files=False + argstr="--tractsWithSecondTensor %s", + hash_files=False, + ), + writeAsciiTracts=dict( + argstr="--writeAsciiTracts ", + ), + writeUncompressedTracts=dict( + argstr="--writeUncompressedTracts ", ), - writeAsciiTracts=dict(argstr="--writeAsciiTracts "), - writeUncompressedTracts=dict(argstr="--writeUncompressedTracts "), ) inputs = UKFTractography.input_spec() @@ -51,7 +126,12 @@ def test_UKFTractography_inputs(): def test_UKFTractography_outputs(): output_map = dict( - tracts=dict(extensions=None), tractsWithSecondTensor=dict(extensions=None) + tracts=dict( + extensions=None, + ), + tractsWithSecondTensor=dict( + extensions=None, + ), ) outputs = UKFTractography.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py index 63fcdccc71..506d3f8f90 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py @@ -4,22 +4,61 @@ def test_fiberprocess_inputs(): input_map = dict( - args=dict(argstr="%s"), - displacement_field=dict(argstr="--displacement_field %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - fiber_file=dict(argstr="--fiber_file %s", extensions=None), - fiber_output=dict(argstr="--fiber_output %s", hash_files=False), - fiber_radius=dict(argstr="--fiber_radius %f"), - h_field=dict(argstr="--h_field %s", extensions=None), - index_space=dict(argstr="--index_space "), - noDataChange=dict(argstr="--noDataChange "), - no_warp=dict(argstr="--no_warp "), - saveProperties=dict(argstr="--saveProperties "), - tensor_volume=dict(argstr="--tensor_volume %s", extensions=None), - verbose=dict(argstr="--verbose "), - voxel_label=dict(argstr="--voxel_label %d"), - voxelize=dict(argstr="--voxelize %s", hash_files=False), - voxelize_count_fibers=dict(argstr="--voxelize_count_fibers "), + args=dict( + argstr="%s", + ), + displacement_field=dict( + argstr="--displacement_field %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fiber_file=dict( + argstr="--fiber_file %s", + extensions=None, + ), + fiber_output=dict( + argstr="--fiber_output %s", + hash_files=False, + ), + fiber_radius=dict( + argstr="--fiber_radius %f", + ), + h_field=dict( + argstr="--h_field %s", + extensions=None, + ), + index_space=dict( + argstr="--index_space ", + ), + noDataChange=dict( + argstr="--noDataChange ", + ), + no_warp=dict( + argstr="--no_warp ", + ), + saveProperties=dict( + argstr="--saveProperties ", + ), + tensor_volume=dict( + argstr="--tensor_volume %s", + extensions=None, + ), + verbose=dict( + argstr="--verbose ", + ), + voxel_label=dict( + argstr="--voxel_label %d", + ), + voxelize=dict( + argstr="--voxelize %s", + hash_files=False, + ), + voxelize_count_fibers=dict( + argstr="--voxelize_count_fibers ", + ), ) inputs = fiberprocess.input_spec() @@ -30,7 +69,12 @@ def test_fiberprocess_inputs(): def test_fiberprocess_outputs(): output_map = dict( - fiber_output=dict(extensions=None), voxelize=dict(extensions=None) + fiber_output=dict( + extensions=None, + ), + voxelize=dict( + extensions=None, + ), ) outputs = fiberprocess.output_spec() diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py index 7ceb5d7579..a9df738d28 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py @@ -4,10 +4,20 @@ def test_fiberstats_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fiber_file=dict(argstr="--fiber_file %s", extensions=None), - verbose=dict(argstr="--verbose "), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fiber_file=dict( + argstr="--fiber_file %s", + extensions=None, + ), + verbose=dict( + argstr="--verbose ", + ), ) inputs = fiberstats.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py index 2a34252eb3..28798b14ff 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py @@ -4,21 +4,55 @@ def test_fibertrack_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - forbidden_label=dict(argstr="--forbidden_label %d"), - force=dict(argstr="--force "), - input_roi_file=dict(argstr="--input_roi_file %s", extensions=None), - input_tensor_file=dict(argstr="--input_tensor_file %s", extensions=None), - max_angle=dict(argstr="--max_angle %f"), - min_fa=dict(argstr="--min_fa %f"), - output_fiber_file=dict(argstr="--output_fiber_file %s", hash_files=False), - really_verbose=dict(argstr="--really_verbose "), - source_label=dict(argstr="--source_label %d"), - step_size=dict(argstr="--step_size %f"), - target_label=dict(argstr="--target_label %d"), - verbose=dict(argstr="--verbose "), - whole_brain=dict(argstr="--whole_brain "), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + forbidden_label=dict( + argstr="--forbidden_label %d", + ), + force=dict( + argstr="--force ", + ), + input_roi_file=dict( + argstr="--input_roi_file %s", + extensions=None, + ), + input_tensor_file=dict( + argstr="--input_tensor_file %s", + extensions=None, + ), + max_angle=dict( + argstr="--max_angle %f", + ), + min_fa=dict( + argstr="--min_fa %f", + ), + output_fiber_file=dict( + argstr="--output_fiber_file %s", + hash_files=False, + ), + really_verbose=dict( + argstr="--really_verbose ", + ), + source_label=dict( + argstr="--source_label %d", + ), + step_size=dict( + argstr="--step_size %f", + ), + target_label=dict( + argstr="--target_label %d", + ), + verbose=dict( + argstr="--verbose ", + ), + whole_brain=dict( + argstr="--whole_brain ", + ), ) inputs = fibertrack.input_spec() @@ -28,7 +62,11 @@ def test_fibertrack_inputs(): def test_fibertrack_outputs(): - output_map = dict(output_fiber_file=dict(extensions=None)) + output_map = dict( + output_fiber_file=dict( + extensions=None, + ), + ) outputs = fibertrack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py index 7bee4b1cbb..222c460279 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py @@ -4,13 +4,30 @@ def test_CannyEdge_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - lowerThreshold=dict(argstr="--lowerThreshold %f"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - upperThreshold=dict(argstr="--upperThreshold %f"), - variance=dict(argstr="--variance %f"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + lowerThreshold=dict( + argstr="--lowerThreshold %f", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + upperThreshold=dict( + argstr="--upperThreshold %f", + ), + variance=dict( + argstr="--variance %f", + ), ) inputs = CannyEdge.input_spec() @@ -20,7 +37,11 @@ def test_CannyEdge_inputs(): def test_CannyEdge_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = CannyEdge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py index 58b492e14b..21f8e5da6f 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py @@ -4,17 +4,44 @@ def test_CannySegmentationLevelSetImageFilter_inputs(): input_map = dict( - advectionWeight=dict(argstr="--advectionWeight %f"), - args=dict(argstr="%s"), - cannyThreshold=dict(argstr="--cannyThreshold %f"), - cannyVariance=dict(argstr="--cannyVariance %f"), - environ=dict(nohash=True, usedefault=True), - initialModel=dict(argstr="--initialModel %s", extensions=None), - initialModelIsovalue=dict(argstr="--initialModelIsovalue %f"), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - maxIterations=dict(argstr="--maxIterations %d"), - outputSpeedVolume=dict(argstr="--outputSpeedVolume %s", hash_files=False), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + advectionWeight=dict( + argstr="--advectionWeight %f", + ), + args=dict( + argstr="%s", + ), + cannyThreshold=dict( + argstr="--cannyThreshold %f", + ), + cannyVariance=dict( + argstr="--cannyVariance %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + initialModel=dict( + argstr="--initialModel %s", + extensions=None, + ), + initialModelIsovalue=dict( + argstr="--initialModelIsovalue %f", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + maxIterations=dict( + argstr="--maxIterations %d", + ), + outputSpeedVolume=dict( + argstr="--outputSpeedVolume %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = CannySegmentationLevelSetImageFilter.input_spec() @@ -25,7 +52,12 @@ def test_CannySegmentationLevelSetImageFilter_inputs(): def test_CannySegmentationLevelSetImageFilter_outputs(): output_map = dict( - outputSpeedVolume=dict(extensions=None), outputVolume=dict(extensions=None) + outputSpeedVolume=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = CannySegmentationLevelSetImageFilter.output_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py index 700db119a7..2be2940aeb 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py @@ -4,12 +4,28 @@ def test_DilateImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), - inputRadius=dict(argstr="--inputRadius %d"), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputRadius=dict( + argstr="--inputRadius %d", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = DilateImage.input_spec() @@ -19,7 +35,11 @@ def test_DilateImage_inputs(): def test_DilateImage_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = DilateImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py index 28c7c239f8..66cc444a94 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py @@ -4,13 +4,31 @@ def test_DilateMask_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputBinaryVolume=dict(argstr="--inputBinaryVolume %s", extensions=None), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - lowerThreshold=dict(argstr="--lowerThreshold %f"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - sizeStructuralElement=dict(argstr="--sizeStructuralElement %d"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBinaryVolume=dict( + argstr="--inputBinaryVolume %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + lowerThreshold=dict( + argstr="--lowerThreshold %f", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + sizeStructuralElement=dict( + argstr="--sizeStructuralElement %d", + ), ) inputs = DilateMask.input_spec() @@ -20,7 +38,11 @@ def test_DilateMask_inputs(): def test_DilateMask_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = DilateMask.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py index 0018acc6fd..255145cfc6 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py @@ -4,12 +4,28 @@ def test_DistanceMaps_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputLabelVolume=dict(argstr="--inputLabelVolume %s", extensions=None), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), - inputTissueLabel=dict(argstr="--inputTissueLabel %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputLabelVolume=dict( + argstr="--inputLabelVolume %s", + extensions=None, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputTissueLabel=dict( + argstr="--inputTissueLabel %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = DistanceMaps.input_spec() @@ -19,7 +35,11 @@ def test_DistanceMaps_inputs(): def test_DistanceMaps_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = DistanceMaps.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py index 2e06afc214..b62a21fdbe 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py @@ -4,10 +4,21 @@ def test_DumpBinaryTrainingVectors_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputHeaderFilename=dict(argstr="--inputHeaderFilename %s", extensions=None), - inputVectorFilename=dict(argstr="--inputVectorFilename %s", extensions=None), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputHeaderFilename=dict( + argstr="--inputHeaderFilename %s", + extensions=None, + ), + inputVectorFilename=dict( + argstr="--inputVectorFilename %s", + extensions=None, + ), ) inputs = DumpBinaryTrainingVectors.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py index 2752e5b306..ae7140754f 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py @@ -4,12 +4,28 @@ def test_ErodeImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), - inputRadius=dict(argstr="--inputRadius %d"), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputRadius=dict( + argstr="--inputRadius %d", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = ErodeImage.input_spec() @@ -19,7 +35,11 @@ def test_ErodeImage_inputs(): def test_ErodeImage_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = ErodeImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py index d10d66e618..704bc01820 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py @@ -4,11 +4,25 @@ def test_FlippedDifference_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = FlippedDifference.input_spec() @@ -18,7 +32,11 @@ def test_FlippedDifference_inputs(): def test_FlippedDifference_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = FlippedDifference.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py index 7e7827ac7b..e48d622ab3 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py @@ -4,12 +4,28 @@ def test_GenerateBrainClippedImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputImg=dict(argstr="--inputImg %s", extensions=None), - inputMsk=dict(argstr="--inputMsk %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputFileName=dict(argstr="--outputFileName %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputImg=dict( + argstr="--inputImg %s", + extensions=None, + ), + inputMsk=dict( + argstr="--inputMsk %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputFileName=dict( + argstr="--outputFileName %s", + hash_files=False, + ), ) inputs = GenerateBrainClippedImage.input_spec() @@ -19,7 +35,11 @@ def test_GenerateBrainClippedImage_inputs(): def test_GenerateBrainClippedImage_outputs(): - output_map = dict(outputFileName=dict(extensions=None)) + output_map = dict( + outputFileName=dict( + extensions=None, + ), + ) outputs = GenerateBrainClippedImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py index 7ee1cbd5d2..b53396f396 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py @@ -4,13 +4,31 @@ def test_GenerateSummedGradientImage_inputs(): input_map = dict( - MaximumGradient=dict(argstr="--MaximumGradient "), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None), - inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputFileName=dict(argstr="--outputFileName %s", hash_files=False), + MaximumGradient=dict( + argstr="--MaximumGradient ", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="--inputVolume1 %s", + extensions=None, + ), + inputVolume2=dict( + argstr="--inputVolume2 %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputFileName=dict( + argstr="--outputFileName %s", + hash_files=False, + ), ) inputs = GenerateSummedGradientImage.input_spec() @@ -20,7 +38,11 @@ def test_GenerateSummedGradientImage_inputs(): def test_GenerateSummedGradientImage_outputs(): - output_map = dict(outputFileName=dict(extensions=None)) + output_map = dict( + outputFileName=dict( + extensions=None, + ), + ) outputs = GenerateSummedGradientImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py index 116a3d6cd6..c6e8fdfb12 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py @@ -4,13 +4,30 @@ def test_GenerateTestImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - lowerBoundOfOutputVolume=dict(argstr="--lowerBoundOfOutputVolume %f"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - outputVolumeSize=dict(argstr="--outputVolumeSize %f"), - upperBoundOfOutputVolume=dict(argstr="--upperBoundOfOutputVolume %f"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + lowerBoundOfOutputVolume=dict( + argstr="--lowerBoundOfOutputVolume %f", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + outputVolumeSize=dict( + argstr="--outputVolumeSize %f", + ), + upperBoundOfOutputVolume=dict( + argstr="--upperBoundOfOutputVolume %f", + ), ) inputs = GenerateTestImage.input_spec() @@ -20,7 +37,11 @@ def test_GenerateTestImage_inputs(): def test_GenerateTestImage_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = GenerateTestImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py index e25d090992..c7828aaccc 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py @@ -4,13 +4,30 @@ def test_GradientAnisotropicDiffusionImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - conductance=dict(argstr="--conductance %f"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - numberOfIterations=dict(argstr="--numberOfIterations %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - timeStep=dict(argstr="--timeStep %f"), + args=dict( + argstr="%s", + ), + conductance=dict( + argstr="--conductance %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfIterations=dict( + argstr="--numberOfIterations %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + timeStep=dict( + argstr="--timeStep %f", + ), ) inputs = GradientAnisotropicDiffusionImageFilter.input_spec() @@ -20,7 +37,11 @@ def test_GradientAnisotropicDiffusionImageFilter_inputs(): def test_GradientAnisotropicDiffusionImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = GradientAnisotropicDiffusionImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py index e8d82e3316..8188ad0432 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py @@ -4,14 +4,34 @@ def test_HammerAttributeCreator_inputs(): input_map = dict( - Scale=dict(argstr="--Scale %d"), - Strength=dict(argstr="--Strength %f"), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputCSFVolume=dict(argstr="--inputCSFVolume %s", extensions=None), - inputGMVolume=dict(argstr="--inputGMVolume %s", extensions=None), - inputWMVolume=dict(argstr="--inputWMVolume %s", extensions=None), - outputVolumeBase=dict(argstr="--outputVolumeBase %s"), + Scale=dict( + argstr="--Scale %d", + ), + Strength=dict( + argstr="--Strength %f", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputCSFVolume=dict( + argstr="--inputCSFVolume %s", + extensions=None, + ), + inputGMVolume=dict( + argstr="--inputGMVolume %s", + extensions=None, + ), + inputWMVolume=dict( + argstr="--inputWMVolume %s", + extensions=None, + ), + outputVolumeBase=dict( + argstr="--outputVolumeBase %s", + ), ) inputs = HammerAttributeCreator.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py index e5d801e261..839bfe5f11 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py @@ -4,12 +4,28 @@ def test_NeighborhoodMean_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), - inputRadius=dict(argstr="--inputRadius %d"), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputRadius=dict( + argstr="--inputRadius %d", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = NeighborhoodMean.input_spec() @@ -19,7 +35,11 @@ def test_NeighborhoodMean_inputs(): def test_NeighborhoodMean_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = NeighborhoodMean.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py index ead623c7bb..476a93595e 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py @@ -4,12 +4,28 @@ def test_NeighborhoodMedian_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), - inputRadius=dict(argstr="--inputRadius %d"), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputRadius=dict( + argstr="--inputRadius %d", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = NeighborhoodMedian.input_spec() @@ -19,7 +35,11 @@ def test_NeighborhoodMedian_inputs(): def test_NeighborhoodMedian_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = NeighborhoodMedian.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py index 1b01e17bcf..f6ee369414 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py @@ -4,11 +4,23 @@ def test_STAPLEAnalysis_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputDimension=dict(argstr="--inputDimension %d"), - inputLabelVolume=dict(argstr="--inputLabelVolume %s..."), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputDimension=dict( + argstr="--inputDimension %d", + ), + inputLabelVolume=dict( + argstr="--inputLabelVolume %s...", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = STAPLEAnalysis.input_spec() @@ -18,7 +30,11 @@ def test_STAPLEAnalysis_inputs(): def test_STAPLEAnalysis_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = STAPLEAnalysis.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py index d96e7e487f..e16d7a9522 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py @@ -4,11 +4,24 @@ def test_TextureFromNoiseImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputRadius=dict(argstr="--inputRadius %d"), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputRadius=dict( + argstr="--inputRadius %d", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = TextureFromNoiseImageFilter.input_spec() @@ -18,7 +31,11 @@ def test_TextureFromNoiseImageFilter_inputs(): def test_TextureFromNoiseImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = TextureFromNoiseImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py index 6cef038c09..7e6c4f6263 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py @@ -4,13 +4,31 @@ def test_TextureMeasureFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - distance=dict(argstr="--distance %d"), - environ=dict(nohash=True, usedefault=True), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - insideROIValue=dict(argstr="--insideROIValue %f"), - outputFilename=dict(argstr="--outputFilename %s", hash_files=False), + args=dict( + argstr="%s", + ), + distance=dict( + argstr="--distance %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + insideROIValue=dict( + argstr="--insideROIValue %f", + ), + outputFilename=dict( + argstr="--outputFilename %s", + hash_files=False, + ), ) inputs = TextureMeasureFilter.input_spec() @@ -20,7 +38,11 @@ def test_TextureMeasureFilter_inputs(): def test_TextureMeasureFilter_outputs(): - output_map = dict(outputFilename=dict(extensions=None)) + output_map = dict( + outputFilename=dict( + extensions=None, + ), + ) outputs = TextureMeasureFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py index a68b1fd398..194d556a8e 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py @@ -4,15 +4,40 @@ def test_UnbiasedNonLocalMeans_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - hp=dict(argstr="--hp %f"), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), - ps=dict(argstr="--ps %f"), - rc=dict(argstr="--rc %s", sep=","), - rs=dict(argstr="--rs %s", sep=","), - sigma=dict(argstr="--sigma %f"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hp=dict( + argstr="--hp %f", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + ps=dict( + argstr="--ps %f", + ), + rc=dict( + argstr="--rc %s", + sep=",", + ), + rs=dict( + argstr="--rs %s", + sep=",", + ), + sigma=dict( + argstr="--sigma %f", + ), ) inputs = UnbiasedNonLocalMeans.input_spec() @@ -22,7 +47,12 @@ def test_UnbiasedNonLocalMeans_inputs(): def test_UnbiasedNonLocalMeans_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = UnbiasedNonLocalMeans.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py index 0a0dd8d6f7..6af2e14039 100644 --- a/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py +++ b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py @@ -4,15 +4,38 @@ def test_scalartransform_inputs(): input_map = dict( - args=dict(argstr="%s"), - deformation=dict(argstr="--deformation %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - h_field=dict(argstr="--h_field "), - input_image=dict(argstr="--input_image %s", extensions=None), - interpolation=dict(argstr="--interpolation %s"), - invert=dict(argstr="--invert "), - output_image=dict(argstr="--output_image %s", hash_files=False), - transformation=dict(argstr="--transformation %s", hash_files=False), + args=dict( + argstr="%s", + ), + deformation=dict( + argstr="--deformation %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + h_field=dict( + argstr="--h_field ", + ), + input_image=dict( + argstr="--input_image %s", + extensions=None, + ), + interpolation=dict( + argstr="--interpolation %s", + ), + invert=dict( + argstr="--invert ", + ), + output_image=dict( + argstr="--output_image %s", + hash_files=False, + ), + transformation=dict( + argstr="--transformation %s", + hash_files=False, + ), ) inputs = scalartransform.input_spec() @@ -23,7 +46,12 @@ def test_scalartransform_inputs(): def test_scalartransform_outputs(): output_map = dict( - output_image=dict(extensions=None), transformation=dict(extensions=None) + output_image=dict( + extensions=None, + ), + transformation=dict( + extensions=None, + ), ) outputs = scalartransform.output_spec() diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py index 845cf63f3c..d1c8055df3 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py @@ -4,60 +4,149 @@ def test_BRAINSDemonWarp_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), arrayOfPyramidLevelIterations=dict( - argstr="--arrayOfPyramidLevelIterations %s", sep="," + argstr="--arrayOfPyramidLevelIterations %s", + sep=",", + ), + backgroundFillValue=dict( + argstr="--backgroundFillValue %d", ), - backgroundFillValue=dict(argstr="--backgroundFillValue %d"), checkerboardPatternSubdivisions=dict( - argstr="--checkerboardPatternSubdivisions %s", sep="," - ), - environ=dict(nohash=True, usedefault=True), - fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None), - fixedVolume=dict(argstr="--fixedVolume %s", extensions=None), - gradient_type=dict(argstr="--gradient_type %s"), - gui=dict(argstr="--gui "), - histogramMatch=dict(argstr="--histogramMatch "), + argstr="--checkerboardPatternSubdivisions %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedBinaryVolume=dict( + argstr="--fixedBinaryVolume %s", + extensions=None, + ), + fixedVolume=dict( + argstr="--fixedVolume %s", + extensions=None, + ), + gradient_type=dict( + argstr="--gradient_type %s", + ), + gui=dict( + argstr="--gui ", + ), + histogramMatch=dict( + argstr="--histogramMatch ", + ), initializeWithDisplacementField=dict( - argstr="--initializeWithDisplacementField %s", extensions=None + argstr="--initializeWithDisplacementField %s", + extensions=None, ), initializeWithTransform=dict( - argstr="--initializeWithTransform %s", extensions=None - ), - inputPixelType=dict(argstr="--inputPixelType %s"), - interpolationMode=dict(argstr="--interpolationMode %s"), - lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d"), - maskProcessingMode=dict(argstr="--maskProcessingMode %s"), - max_step_length=dict(argstr="--max_step_length %f"), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), - minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=","), - minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=","), - movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None), - movingVolume=dict(argstr="--movingVolume %s", extensions=None), - neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=","), - numberOfBCHApproximationTerms=dict(argstr="--numberOfBCHApproximationTerms %d"), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), - numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), + argstr="--initializeWithTransform %s", + extensions=None, + ), + inputPixelType=dict( + argstr="--inputPixelType %s", + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + lowerThresholdForBOBF=dict( + argstr="--lowerThresholdForBOBF %d", + ), + maskProcessingMode=dict( + argstr="--maskProcessingMode %s", + ), + max_step_length=dict( + argstr="--max_step_length %f", + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), + minimumFixedPyramid=dict( + argstr="--minimumFixedPyramid %s", + sep=",", + ), + minimumMovingPyramid=dict( + argstr="--minimumMovingPyramid %s", + sep=",", + ), + movingBinaryVolume=dict( + argstr="--movingBinaryVolume %s", + extensions=None, + ), + movingVolume=dict( + argstr="--movingVolume %s", + extensions=None, + ), + neighborhoodForBOBF=dict( + argstr="--neighborhoodForBOBF %s", + sep=",", + ), + numberOfBCHApproximationTerms=dict( + argstr="--numberOfBCHApproximationTerms %d", + ), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + numberOfPyramidLevels=dict( + argstr="--numberOfPyramidLevels %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), outputCheckerboardVolume=dict( - argstr="--outputCheckerboardVolume %s", hash_files=False + argstr="--outputCheckerboardVolume %s", + hash_files=False, + ), + outputDebug=dict( + argstr="--outputDebug ", + ), + outputDisplacementFieldPrefix=dict( + argstr="--outputDisplacementFieldPrefix %s", ), - outputDebug=dict(argstr="--outputDebug "), - outputDisplacementFieldPrefix=dict(argstr="--outputDisplacementFieldPrefix %s"), outputDisplacementFieldVolume=dict( - argstr="--outputDisplacementFieldVolume %s", hash_files=False - ), - outputNormalized=dict(argstr="--outputNormalized "), - outputPixelType=dict(argstr="--outputPixelType %s"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - promptUser=dict(argstr="--promptUser "), - registrationFilterType=dict(argstr="--registrationFilterType %s"), - seedForBOBF=dict(argstr="--seedForBOBF %s", sep=","), - smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f"), - upFieldSmoothing=dict(argstr="--upFieldSmoothing %f"), - upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d"), - use_vanilla_dem=dict(argstr="--use_vanilla_dem "), + argstr="--outputDisplacementFieldVolume %s", + hash_files=False, + ), + outputNormalized=dict( + argstr="--outputNormalized ", + ), + outputPixelType=dict( + argstr="--outputPixelType %s", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + promptUser=dict( + argstr="--promptUser ", + ), + registrationFilterType=dict( + argstr="--registrationFilterType %s", + ), + seedForBOBF=dict( + argstr="--seedForBOBF %s", + sep=",", + ), + smoothDisplacementFieldSigma=dict( + argstr="--smoothDisplacementFieldSigma %f", + ), + upFieldSmoothing=dict( + argstr="--upFieldSmoothing %f", + ), + upperThresholdForBOBF=dict( + argstr="--upperThresholdForBOBF %d", + ), + use_vanilla_dem=dict( + argstr="--use_vanilla_dem ", + ), ) inputs = BRAINSDemonWarp.input_spec() @@ -68,9 +157,15 @@ def test_BRAINSDemonWarp_inputs(): def test_BRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(extensions=None), - outputDisplacementFieldVolume=dict(extensions=None), - outputVolume=dict(extensions=None), + outputCheckerboardVolume=dict( + extensions=None, + ), + outputDisplacementFieldVolume=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = BRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py index da777409c1..69111d9212 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py @@ -4,79 +4,228 @@ def test_BRAINSFit_inputs(): input_map = dict( - ROIAutoClosingSize=dict(argstr="--ROIAutoClosingSize %f"), - ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f"), - args=dict(argstr="%s"), - backgroundFillValue=dict(argstr="--backgroundFillValue %f"), - bsplineTransform=dict(argstr="--bsplineTransform %s", hash_files=False), - costFunctionConvergenceFactor=dict(argstr="--costFunctionConvergenceFactor %f"), - costMetric=dict(argstr="--costMetric %s"), - debugLevel=dict(argstr="--debugLevel %d"), - environ=dict(nohash=True, usedefault=True), - failureExitCode=dict(argstr="--failureExitCode %d"), - fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None), - fixedVolume=dict(argstr="--fixedVolume %s", extensions=None), - fixedVolume2=dict(argstr="--fixedVolume2 %s", extensions=None), - fixedVolumeTimeIndex=dict(argstr="--fixedVolumeTimeIndex %d"), - gui=dict(argstr="--gui "), - histogramMatch=dict(argstr="--histogramMatch "), - initialTransform=dict(argstr="--initialTransform %s", extensions=None), + ROIAutoClosingSize=dict( + argstr="--ROIAutoClosingSize %f", + ), + ROIAutoDilateSize=dict( + argstr="--ROIAutoDilateSize %f", + ), + args=dict( + argstr="%s", + ), + backgroundFillValue=dict( + argstr="--backgroundFillValue %f", + ), + bsplineTransform=dict( + argstr="--bsplineTransform %s", + hash_files=False, + ), + costFunctionConvergenceFactor=dict( + argstr="--costFunctionConvergenceFactor %f", + ), + costMetric=dict( + argstr="--costMetric %s", + ), + debugLevel=dict( + argstr="--debugLevel %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + failureExitCode=dict( + argstr="--failureExitCode %d", + ), + fixedBinaryVolume=dict( + argstr="--fixedBinaryVolume %s", + extensions=None, + ), + fixedVolume=dict( + argstr="--fixedVolume %s", + extensions=None, + ), + fixedVolume2=dict( + argstr="--fixedVolume2 %s", + extensions=None, + ), + fixedVolumeTimeIndex=dict( + argstr="--fixedVolumeTimeIndex %d", + ), + gui=dict( + argstr="--gui ", + ), + histogramMatch=dict( + argstr="--histogramMatch ", + ), + initialTransform=dict( + argstr="--initialTransform %s", + extensions=None, + ), initializeRegistrationByCurrentGenericTransform=dict( - argstr="--initializeRegistrationByCurrentGenericTransform " - ), - initializeTransformMode=dict(argstr="--initializeTransformMode %s"), - interpolationMode=dict(argstr="--interpolationMode %s"), - linearTransform=dict(argstr="--linearTransform %s", hash_files=False), - logFileReport=dict(argstr="--logFileReport %s", hash_files=False), - maskInferiorCutOffFromCenter=dict(argstr="--maskInferiorCutOffFromCenter %f"), - maskProcessingMode=dict(argstr="--maskProcessingMode %s"), - maxBSplineDisplacement=dict(argstr="--maxBSplineDisplacement %f"), - maximumNumberOfCorrections=dict(argstr="--maximumNumberOfCorrections %d"), - maximumNumberOfEvaluations=dict(argstr="--maximumNumberOfEvaluations %d"), - maximumStepLength=dict(argstr="--maximumStepLength %f"), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), - metricSamplingStrategy=dict(argstr="--metricSamplingStrategy %s"), - minimumStepLength=dict(argstr="--minimumStepLength %s", sep=","), - movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None), - movingVolume=dict(argstr="--movingVolume %s", extensions=None), - movingVolume2=dict(argstr="--movingVolume2 %s", extensions=None), - movingVolumeTimeIndex=dict(argstr="--movingVolumeTimeIndex %d"), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), - numberOfIterations=dict(argstr="--numberOfIterations %s", sep=","), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), - numberOfSamples=dict(argstr="--numberOfSamples %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputFixedVolumeROI=dict(argstr="--outputFixedVolumeROI %s", hash_files=False), + argstr="--initializeRegistrationByCurrentGenericTransform ", + ), + initializeTransformMode=dict( + argstr="--initializeTransformMode %s", + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + linearTransform=dict( + argstr="--linearTransform %s", + hash_files=False, + ), + logFileReport=dict( + argstr="--logFileReport %s", + hash_files=False, + ), + maskInferiorCutOffFromCenter=dict( + argstr="--maskInferiorCutOffFromCenter %f", + ), + maskProcessingMode=dict( + argstr="--maskProcessingMode %s", + ), + maxBSplineDisplacement=dict( + argstr="--maxBSplineDisplacement %f", + ), + maximumNumberOfCorrections=dict( + argstr="--maximumNumberOfCorrections %d", + ), + maximumNumberOfEvaluations=dict( + argstr="--maximumNumberOfEvaluations %d", + ), + maximumStepLength=dict( + argstr="--maximumStepLength %f", + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), + metricSamplingStrategy=dict( + argstr="--metricSamplingStrategy %s", + ), + minimumStepLength=dict( + argstr="--minimumStepLength %s", + sep=",", + ), + movingBinaryVolume=dict( + argstr="--movingBinaryVolume %s", + extensions=None, + ), + movingVolume=dict( + argstr="--movingVolume %s", + extensions=None, + ), + movingVolume2=dict( + argstr="--movingVolume2 %s", + extensions=None, + ), + movingVolumeTimeIndex=dict( + argstr="--movingVolumeTimeIndex %d", + ), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfIterations=dict( + argstr="--numberOfIterations %s", + sep=",", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + numberOfSamples=dict( + argstr="--numberOfSamples %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputFixedVolumeROI=dict( + argstr="--outputFixedVolumeROI %s", + hash_files=False, + ), outputMovingVolumeROI=dict( - argstr="--outputMovingVolumeROI %s", hash_files=False - ), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - outputVolumePixelType=dict(argstr="--outputVolumePixelType %s"), - projectedGradientTolerance=dict(argstr="--projectedGradientTolerance %f"), - promptUser=dict(argstr="--promptUser "), - relaxationFactor=dict(argstr="--relaxationFactor %f"), - removeIntensityOutliers=dict(argstr="--removeIntensityOutliers %f"), - reproportionScale=dict(argstr="--reproportionScale %f"), - samplingPercentage=dict(argstr="--samplingPercentage %f"), - scaleOutputValues=dict(argstr="--scaleOutputValues "), - skewScale=dict(argstr="--skewScale %f"), - splineGridSize=dict(argstr="--splineGridSize %s", sep=","), + argstr="--outputMovingVolumeROI %s", + hash_files=False, + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + outputVolumePixelType=dict( + argstr="--outputVolumePixelType %s", + ), + projectedGradientTolerance=dict( + argstr="--projectedGradientTolerance %f", + ), + promptUser=dict( + argstr="--promptUser ", + ), + relaxationFactor=dict( + argstr="--relaxationFactor %f", + ), + removeIntensityOutliers=dict( + argstr="--removeIntensityOutliers %f", + ), + reproportionScale=dict( + argstr="--reproportionScale %f", + ), + samplingPercentage=dict( + argstr="--samplingPercentage %f", + ), + scaleOutputValues=dict( + argstr="--scaleOutputValues ", + ), + skewScale=dict( + argstr="--skewScale %f", + ), + splineGridSize=dict( + argstr="--splineGridSize %s", + sep=",", + ), strippedOutputTransform=dict( - argstr="--strippedOutputTransform %s", hash_files=False - ), - transformType=dict(argstr="--transformType %s", sep=","), - translationScale=dict(argstr="--translationScale %f"), - useAffine=dict(argstr="--useAffine "), - useBSpline=dict(argstr="--useBSpline "), - useComposite=dict(argstr="--useComposite "), - useROIBSpline=dict(argstr="--useROIBSpline "), - useRigid=dict(argstr="--useRigid "), - useScaleSkewVersor3D=dict(argstr="--useScaleSkewVersor3D "), - useScaleVersor3D=dict(argstr="--useScaleVersor3D "), - useSyN=dict(argstr="--useSyN "), - writeOutputTransformInFloat=dict(argstr="--writeOutputTransformInFloat "), - writeTransformOnFailure=dict(argstr="--writeTransformOnFailure "), + argstr="--strippedOutputTransform %s", + hash_files=False, + ), + transformType=dict( + argstr="--transformType %s", + sep=",", + ), + translationScale=dict( + argstr="--translationScale %f", + ), + useAffine=dict( + argstr="--useAffine ", + ), + useBSpline=dict( + argstr="--useBSpline ", + ), + useComposite=dict( + argstr="--useComposite ", + ), + useROIBSpline=dict( + argstr="--useROIBSpline ", + ), + useRigid=dict( + argstr="--useRigid ", + ), + useScaleSkewVersor3D=dict( + argstr="--useScaleSkewVersor3D ", + ), + useScaleVersor3D=dict( + argstr="--useScaleVersor3D ", + ), + useSyN=dict( + argstr="--useSyN ", + ), + writeOutputTransformInFloat=dict( + argstr="--writeOutputTransformInFloat ", + ), + writeTransformOnFailure=dict( + argstr="--writeTransformOnFailure ", + ), ) inputs = BRAINSFit.input_spec() @@ -87,14 +236,30 @@ def test_BRAINSFit_inputs(): def test_BRAINSFit_outputs(): output_map = dict( - bsplineTransform=dict(extensions=None), - linearTransform=dict(extensions=None), - logFileReport=dict(extensions=None), - outputFixedVolumeROI=dict(extensions=None), - outputMovingVolumeROI=dict(extensions=None), - outputTransform=dict(extensions=None), - outputVolume=dict(extensions=None), - strippedOutputTransform=dict(extensions=None), + bsplineTransform=dict( + extensions=None, + ), + linearTransform=dict( + extensions=None, + ), + logFileReport=dict( + extensions=None, + ), + outputFixedVolumeROI=dict( + extensions=None, + ), + outputMovingVolumeROI=dict( + extensions=None, + ), + outputTransform=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), + strippedOutputTransform=dict( + extensions=None, + ), ) outputs = BRAINSFit.output_spec() diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py index f2b6760eea..46d175da07 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py @@ -4,19 +4,52 @@ def test_BRAINSResample_inputs(): input_map = dict( - args=dict(argstr="%s"), - defaultValue=dict(argstr="--defaultValue %f"), - deformationVolume=dict(argstr="--deformationVolume %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - gridSpacing=dict(argstr="--gridSpacing %s", sep=","), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - interpolationMode=dict(argstr="--interpolationMode %s"), - inverseTransform=dict(argstr="--inverseTransform "), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - pixelType=dict(argstr="--pixelType %s"), - referenceVolume=dict(argstr="--referenceVolume %s", extensions=None), - warpTransform=dict(argstr="--warpTransform %s", extensions=None), + args=dict( + argstr="%s", + ), + defaultValue=dict( + argstr="--defaultValue %f", + ), + deformationVolume=dict( + argstr="--deformationVolume %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gridSpacing=dict( + argstr="--gridSpacing %s", + sep=",", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + inverseTransform=dict( + argstr="--inverseTransform ", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + pixelType=dict( + argstr="--pixelType %s", + ), + referenceVolume=dict( + argstr="--referenceVolume %s", + extensions=None, + ), + warpTransform=dict( + argstr="--warpTransform %s", + extensions=None, + ), ) inputs = BRAINSResample.input_spec() @@ -26,7 +59,11 @@ def test_BRAINSResample_inputs(): def test_BRAINSResample_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py index b3d3a36e13..52d24e6fff 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py @@ -4,12 +4,27 @@ def test_BRAINSResize_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - pixelType=dict(argstr="--pixelType %s"), - scaleFactor=dict(argstr="--scaleFactor %f"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + pixelType=dict( + argstr="--pixelType %s", + ), + scaleFactor=dict( + argstr="--scaleFactor %f", + ), ) inputs = BRAINSResize.input_spec() @@ -19,7 +34,11 @@ def test_BRAINSResize_inputs(): def test_BRAINSResize_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSResize.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py index 5c4cfbc8ba..bc98fc2763 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py @@ -4,15 +4,37 @@ def test_BRAINSTransformFromFiducials_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fixedLandmarks=dict(argstr="--fixedLandmarks %s..."), - fixedLandmarksFile=dict(argstr="--fixedLandmarksFile %s", extensions=None), - movingLandmarks=dict(argstr="--movingLandmarks %s..."), - movingLandmarksFile=dict(argstr="--movingLandmarksFile %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - saveTransform=dict(argstr="--saveTransform %s", hash_files=False), - transformType=dict(argstr="--transformType %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedLandmarks=dict( + argstr="--fixedLandmarks %s...", + ), + fixedLandmarksFile=dict( + argstr="--fixedLandmarksFile %s", + extensions=None, + ), + movingLandmarks=dict( + argstr="--movingLandmarks %s...", + ), + movingLandmarksFile=dict( + argstr="--movingLandmarksFile %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + saveTransform=dict( + argstr="--saveTransform %s", + hash_files=False, + ), + transformType=dict( + argstr="--transformType %s", + ), ) inputs = BRAINSTransformFromFiducials.input_spec() @@ -22,7 +44,11 @@ def test_BRAINSTransformFromFiducials_inputs(): def test_BRAINSTransformFromFiducials_outputs(): - output_map = dict(saveTransform=dict(extensions=None)) + output_map = dict( + saveTransform=dict( + extensions=None, + ), + ) outputs = BRAINSTransformFromFiducials.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py index 45bfc67734..af8bac8680 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -4,61 +4,151 @@ def test_VBRAINSDemonWarp_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), arrayOfPyramidLevelIterations=dict( - argstr="--arrayOfPyramidLevelIterations %s", sep="," + argstr="--arrayOfPyramidLevelIterations %s", + sep=",", + ), + backgroundFillValue=dict( + argstr="--backgroundFillValue %d", ), - backgroundFillValue=dict(argstr="--backgroundFillValue %d"), checkerboardPatternSubdivisions=dict( - argstr="--checkerboardPatternSubdivisions %s", sep="," - ), - environ=dict(nohash=True, usedefault=True), - fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None), - fixedVolume=dict(argstr="--fixedVolume %s..."), - gradient_type=dict(argstr="--gradient_type %s"), - gui=dict(argstr="--gui "), - histogramMatch=dict(argstr="--histogramMatch "), + argstr="--checkerboardPatternSubdivisions %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedBinaryVolume=dict( + argstr="--fixedBinaryVolume %s", + extensions=None, + ), + fixedVolume=dict( + argstr="--fixedVolume %s...", + ), + gradient_type=dict( + argstr="--gradient_type %s", + ), + gui=dict( + argstr="--gui ", + ), + histogramMatch=dict( + argstr="--histogramMatch ", + ), initializeWithDisplacementField=dict( - argstr="--initializeWithDisplacementField %s", extensions=None + argstr="--initializeWithDisplacementField %s", + extensions=None, ), initializeWithTransform=dict( - argstr="--initializeWithTransform %s", extensions=None - ), - inputPixelType=dict(argstr="--inputPixelType %s"), - interpolationMode=dict(argstr="--interpolationMode %s"), - lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d"), - makeBOBF=dict(argstr="--makeBOBF "), - max_step_length=dict(argstr="--max_step_length %f"), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), - minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=","), - minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=","), - movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None), - movingVolume=dict(argstr="--movingVolume %s..."), - neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=","), - numberOfBCHApproximationTerms=dict(argstr="--numberOfBCHApproximationTerms %d"), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), - numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), + argstr="--initializeWithTransform %s", + extensions=None, + ), + inputPixelType=dict( + argstr="--inputPixelType %s", + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + lowerThresholdForBOBF=dict( + argstr="--lowerThresholdForBOBF %d", + ), + makeBOBF=dict( + argstr="--makeBOBF ", + ), + max_step_length=dict( + argstr="--max_step_length %f", + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), + minimumFixedPyramid=dict( + argstr="--minimumFixedPyramid %s", + sep=",", + ), + minimumMovingPyramid=dict( + argstr="--minimumMovingPyramid %s", + sep=",", + ), + movingBinaryVolume=dict( + argstr="--movingBinaryVolume %s", + extensions=None, + ), + movingVolume=dict( + argstr="--movingVolume %s...", + ), + neighborhoodForBOBF=dict( + argstr="--neighborhoodForBOBF %s", + sep=",", + ), + numberOfBCHApproximationTerms=dict( + argstr="--numberOfBCHApproximationTerms %d", + ), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + numberOfPyramidLevels=dict( + argstr="--numberOfPyramidLevels %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), outputCheckerboardVolume=dict( - argstr="--outputCheckerboardVolume %s", hash_files=False + argstr="--outputCheckerboardVolume %s", + hash_files=False, + ), + outputDebug=dict( + argstr="--outputDebug ", + ), + outputDisplacementFieldPrefix=dict( + argstr="--outputDisplacementFieldPrefix %s", ), - outputDebug=dict(argstr="--outputDebug "), - outputDisplacementFieldPrefix=dict(argstr="--outputDisplacementFieldPrefix %s"), outputDisplacementFieldVolume=dict( - argstr="--outputDisplacementFieldVolume %s", hash_files=False - ), - outputNormalized=dict(argstr="--outputNormalized "), - outputPixelType=dict(argstr="--outputPixelType %s"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - promptUser=dict(argstr="--promptUser "), - registrationFilterType=dict(argstr="--registrationFilterType %s"), - seedForBOBF=dict(argstr="--seedForBOBF %s", sep=","), - smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f"), - upFieldSmoothing=dict(argstr="--upFieldSmoothing %f"), - upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d"), - use_vanilla_dem=dict(argstr="--use_vanilla_dem "), - weightFactors=dict(argstr="--weightFactors %s", sep=","), + argstr="--outputDisplacementFieldVolume %s", + hash_files=False, + ), + outputNormalized=dict( + argstr="--outputNormalized ", + ), + outputPixelType=dict( + argstr="--outputPixelType %s", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + promptUser=dict( + argstr="--promptUser ", + ), + registrationFilterType=dict( + argstr="--registrationFilterType %s", + ), + seedForBOBF=dict( + argstr="--seedForBOBF %s", + sep=",", + ), + smoothDisplacementFieldSigma=dict( + argstr="--smoothDisplacementFieldSigma %f", + ), + upFieldSmoothing=dict( + argstr="--upFieldSmoothing %f", + ), + upperThresholdForBOBF=dict( + argstr="--upperThresholdForBOBF %d", + ), + use_vanilla_dem=dict( + argstr="--use_vanilla_dem ", + ), + weightFactors=dict( + argstr="--weightFactors %s", + sep=",", + ), ) inputs = VBRAINSDemonWarp.input_spec() @@ -69,9 +159,15 @@ def test_VBRAINSDemonWarp_inputs(): def test_VBRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(extensions=None), - outputDisplacementFieldVolume=dict(extensions=None), - outputVolume=dict(extensions=None), + outputCheckerboardVolume=dict( + extensions=None, + ), + outputDisplacementFieldVolume=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = VBRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py index e93a8f9e7e..5e66b2d82c 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py @@ -4,46 +4,123 @@ def test_BRAINSABC_inputs(): input_map = dict( - args=dict(argstr="%s"), - atlasDefinition=dict(argstr="--atlasDefinition %s", extensions=None), + args=dict( + argstr="%s", + ), + atlasDefinition=dict( + argstr="--atlasDefinition %s", + extensions=None, + ), atlasToSubjectInitialTransform=dict( - argstr="--atlasToSubjectInitialTransform %s", hash_files=False + argstr="--atlasToSubjectInitialTransform %s", + hash_files=False, ), atlasToSubjectTransform=dict( - argstr="--atlasToSubjectTransform %s", hash_files=False - ), - atlasToSubjectTransformType=dict(argstr="--atlasToSubjectTransformType %s"), - atlasWarpingOff=dict(argstr="--atlasWarpingOff "), - debuglevel=dict(argstr="--debuglevel %d"), - defaultSuffix=dict(argstr="--defaultSuffix %s"), - environ=dict(nohash=True, usedefault=True), - filterIteration=dict(argstr="--filterIteration %d"), - filterMethod=dict(argstr="--filterMethod %s"), - filterTimeStep=dict(argstr="--filterTimeStep %f"), - gridSize=dict(argstr="--gridSize %s", sep=","), - implicitOutputs=dict(argstr="--implicitOutputs %s...", hash_files=False), - inputVolumeTypes=dict(argstr="--inputVolumeTypes %s", sep=","), - inputVolumes=dict(argstr="--inputVolumes %s..."), - interpolationMode=dict(argstr="--interpolationMode %s"), - maxBiasDegree=dict(argstr="--maxBiasDegree %d"), - maxIterations=dict(argstr="--maxIterations %d"), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), + argstr="--atlasToSubjectTransform %s", + hash_files=False, + ), + atlasToSubjectTransformType=dict( + argstr="--atlasToSubjectTransformType %s", + ), + atlasWarpingOff=dict( + argstr="--atlasWarpingOff ", + ), + debuglevel=dict( + argstr="--debuglevel %d", + ), + defaultSuffix=dict( + argstr="--defaultSuffix %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + filterIteration=dict( + argstr="--filterIteration %d", + ), + filterMethod=dict( + argstr="--filterMethod %s", + ), + filterTimeStep=dict( + argstr="--filterTimeStep %f", + ), + gridSize=dict( + argstr="--gridSize %s", + sep=",", + ), + implicitOutputs=dict( + argstr="--implicitOutputs %s...", + hash_files=False, + ), + inputVolumeTypes=dict( + argstr="--inputVolumeTypes %s", + sep=",", + ), + inputVolumes=dict( + argstr="--inputVolumes %s...", + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + maxBiasDegree=dict( + argstr="--maxBiasDegree %d", + ), + maxIterations=dict( + argstr="--maxIterations %d", + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), numberOfSubSamplesInEachPlugArea=dict( - argstr="--numberOfSubSamplesInEachPlugArea %s", sep="," - ), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputDir=dict(argstr="--outputDir %s", hash_files=False), - outputDirtyLabels=dict(argstr="--outputDirtyLabels %s", hash_files=False), - outputFormat=dict(argstr="--outputFormat %s"), - outputLabels=dict(argstr="--outputLabels %s", hash_files=False), - outputVolumes=dict(argstr="--outputVolumes %s...", hash_files=False), - posteriorTemplate=dict(argstr="--posteriorTemplate %s"), - purePlugsThreshold=dict(argstr="--purePlugsThreshold %f"), - restoreState=dict(argstr="--restoreState %s", extensions=None), - saveState=dict(argstr="--saveState %s", hash_files=False), - subjectIntermodeTransformType=dict(argstr="--subjectIntermodeTransformType %s"), - useKNN=dict(argstr="--useKNN "), - writeLess=dict(argstr="--writeLess "), + argstr="--numberOfSubSamplesInEachPlugArea %s", + sep=",", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputDir=dict( + argstr="--outputDir %s", + hash_files=False, + ), + outputDirtyLabels=dict( + argstr="--outputDirtyLabels %s", + hash_files=False, + ), + outputFormat=dict( + argstr="--outputFormat %s", + ), + outputLabels=dict( + argstr="--outputLabels %s", + hash_files=False, + ), + outputVolumes=dict( + argstr="--outputVolumes %s...", + hash_files=False, + ), + posteriorTemplate=dict( + argstr="--posteriorTemplate %s", + ), + purePlugsThreshold=dict( + argstr="--purePlugsThreshold %f", + ), + restoreState=dict( + argstr="--restoreState %s", + extensions=None, + ), + saveState=dict( + argstr="--saveState %s", + hash_files=False, + ), + subjectIntermodeTransformType=dict( + argstr="--subjectIntermodeTransformType %s", + ), + useKNN=dict( + argstr="--useKNN ", + ), + writeLess=dict( + argstr="--writeLess ", + ), ) inputs = BRAINSABC.input_spec() @@ -54,14 +131,24 @@ def test_BRAINSABC_inputs(): def test_BRAINSABC_outputs(): output_map = dict( - atlasToSubjectInitialTransform=dict(extensions=None), - atlasToSubjectTransform=dict(extensions=None), + atlasToSubjectInitialTransform=dict( + extensions=None, + ), + atlasToSubjectTransform=dict( + extensions=None, + ), implicitOutputs=dict(), outputDir=dict(), - outputDirtyLabels=dict(extensions=None), - outputLabels=dict(extensions=None), + outputDirtyLabels=dict( + extensions=None, + ), + outputLabels=dict( + extensions=None, + ), outputVolumes=dict(), - saveState=dict(extensions=None), + saveState=dict( + extensions=None, + ), ) outputs = BRAINSABC.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py index 6a20f4abfc..2f77f419e7 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py @@ -4,62 +4,155 @@ def test_BRAINSConstellationDetector_inputs(): input_map = dict( - BackgroundFillValue=dict(argstr="--BackgroundFillValue %s"), - LLSModel=dict(argstr="--LLSModel %s", extensions=None), - acLowerBound=dict(argstr="--acLowerBound %f"), - args=dict(argstr="%s"), - atlasLandmarkWeights=dict(argstr="--atlasLandmarkWeights %s", extensions=None), - atlasLandmarks=dict(argstr="--atlasLandmarks %s", extensions=None), - atlasVolume=dict(argstr="--atlasVolume %s", extensions=None), - cutOutHeadInOutputVolume=dict(argstr="--cutOutHeadInOutputVolume "), - debug=dict(argstr="--debug "), - environ=dict(nohash=True, usedefault=True), - forceACPoint=dict(argstr="--forceACPoint %s", sep=","), + BackgroundFillValue=dict( + argstr="--BackgroundFillValue %s", + ), + LLSModel=dict( + argstr="--LLSModel %s", + extensions=None, + ), + acLowerBound=dict( + argstr="--acLowerBound %f", + ), + args=dict( + argstr="%s", + ), + atlasLandmarkWeights=dict( + argstr="--atlasLandmarkWeights %s", + extensions=None, + ), + atlasLandmarks=dict( + argstr="--atlasLandmarks %s", + extensions=None, + ), + atlasVolume=dict( + argstr="--atlasVolume %s", + extensions=None, + ), + cutOutHeadInOutputVolume=dict( + argstr="--cutOutHeadInOutputVolume ", + ), + debug=dict( + argstr="--debug ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + forceACPoint=dict( + argstr="--forceACPoint %s", + sep=",", + ), forceHoughEyeDetectorReportFailure=dict( - argstr="--forceHoughEyeDetectorReportFailure " - ), - forcePCPoint=dict(argstr="--forcePCPoint %s", sep=","), - forceRPPoint=dict(argstr="--forceRPPoint %s", sep=","), - forceVN4Point=dict(argstr="--forceVN4Point %s", sep=","), - houghEyeDetectorMode=dict(argstr="--houghEyeDetectorMode %d"), - inputLandmarksEMSP=dict(argstr="--inputLandmarksEMSP %s", extensions=None), - inputTemplateModel=dict(argstr="--inputTemplateModel %s", extensions=None), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - interpolationMode=dict(argstr="--interpolationMode %s"), - mspQualityLevel=dict(argstr="--mspQualityLevel %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f"), + argstr="--forceHoughEyeDetectorReportFailure ", + ), + forcePCPoint=dict( + argstr="--forcePCPoint %s", + sep=",", + ), + forceRPPoint=dict( + argstr="--forceRPPoint %s", + sep=",", + ), + forceVN4Point=dict( + argstr="--forceVN4Point %s", + sep=",", + ), + houghEyeDetectorMode=dict( + argstr="--houghEyeDetectorMode %d", + ), + inputLandmarksEMSP=dict( + argstr="--inputLandmarksEMSP %s", + extensions=None, + ), + inputTemplateModel=dict( + argstr="--inputTemplateModel %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + mspQualityLevel=dict( + argstr="--mspQualityLevel %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + otsuPercentileThreshold=dict( + argstr="--otsuPercentileThreshold %f", + ), outputLandmarksInACPCAlignedSpace=dict( - argstr="--outputLandmarksInACPCAlignedSpace %s", hash_files=False + argstr="--outputLandmarksInACPCAlignedSpace %s", + hash_files=False, ), outputLandmarksInInputSpace=dict( - argstr="--outputLandmarksInInputSpace %s", hash_files=False + argstr="--outputLandmarksInInputSpace %s", + hash_files=False, + ), + outputMRML=dict( + argstr="--outputMRML %s", + hash_files=False, ), - outputMRML=dict(argstr="--outputMRML %s", hash_files=False), outputResampledVolume=dict( - argstr="--outputResampledVolume %s", hash_files=False + argstr="--outputResampledVolume %s", + hash_files=False, + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, ), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False), outputUntransformedClippedVolume=dict( - argstr="--outputUntransformedClippedVolume %s", hash_files=False + argstr="--outputUntransformedClippedVolume %s", + hash_files=False, ), outputVerificationScript=dict( - argstr="--outputVerificationScript %s", hash_files=False + argstr="--outputVerificationScript %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + rVN4=dict( + argstr="--rVN4 %f", + ), + rac=dict( + argstr="--rac %f", + ), + rescaleIntensities=dict( + argstr="--rescaleIntensities ", ), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - rVN4=dict(argstr="--rVN4 %f"), - rac=dict(argstr="--rac %f"), - rescaleIntensities=dict(argstr="--rescaleIntensities "), rescaleIntensitiesOutputRange=dict( - argstr="--rescaleIntensitiesOutputRange %s", sep="," - ), - resultsDir=dict(argstr="--resultsDir %s", hash_files=False), - rmpj=dict(argstr="--rmpj %f"), - rpc=dict(argstr="--rpc %f"), - trimRescaledIntensities=dict(argstr="--trimRescaledIntensities %f"), - verbose=dict(argstr="--verbose "), - writeBranded2DImage=dict(argstr="--writeBranded2DImage %s", hash_files=False), - writedebuggingImagesLevel=dict(argstr="--writedebuggingImagesLevel %d"), + argstr="--rescaleIntensitiesOutputRange %s", + sep=",", + ), + resultsDir=dict( + argstr="--resultsDir %s", + hash_files=False, + ), + rmpj=dict( + argstr="--rmpj %f", + ), + rpc=dict( + argstr="--rpc %f", + ), + trimRescaledIntensities=dict( + argstr="--trimRescaledIntensities %f", + ), + verbose=dict( + argstr="--verbose ", + ), + writeBranded2DImage=dict( + argstr="--writeBranded2DImage %s", + hash_files=False, + ), + writedebuggingImagesLevel=dict( + argstr="--writedebuggingImagesLevel %d", + ), ) inputs = BRAINSConstellationDetector.input_spec() @@ -70,16 +163,34 @@ def test_BRAINSConstellationDetector_inputs(): def test_BRAINSConstellationDetector_outputs(): output_map = dict( - outputLandmarksInACPCAlignedSpace=dict(extensions=None), - outputLandmarksInInputSpace=dict(extensions=None), - outputMRML=dict(extensions=None), - outputResampledVolume=dict(extensions=None), - outputTransform=dict(extensions=None), - outputUntransformedClippedVolume=dict(extensions=None), - outputVerificationScript=dict(extensions=None), - outputVolume=dict(extensions=None), + outputLandmarksInACPCAlignedSpace=dict( + extensions=None, + ), + outputLandmarksInInputSpace=dict( + extensions=None, + ), + outputMRML=dict( + extensions=None, + ), + outputResampledVolume=dict( + extensions=None, + ), + outputTransform=dict( + extensions=None, + ), + outputUntransformedClippedVolume=dict( + extensions=None, + ), + outputVerificationScript=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), resultsDir=dict(), - writeBranded2DImage=dict(extensions=None), + writeBranded2DImage=dict( + extensions=None, + ), ) outputs = BRAINSConstellationDetector.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py index 541d635620..4a395fbc14 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py @@ -4,15 +4,39 @@ def test_BRAINSCreateLabelMapFromProbabilityMaps_inputs(): input_map = dict( - args=dict(argstr="%s"), - cleanLabelVolume=dict(argstr="--cleanLabelVolume %s", hash_files=False), - dirtyLabelVolume=dict(argstr="--dirtyLabelVolume %s", hash_files=False), - environ=dict(nohash=True, usedefault=True), - foregroundPriors=dict(argstr="--foregroundPriors %s", sep=","), - inclusionThreshold=dict(argstr="--inclusionThreshold %f"), - inputProbabilityVolume=dict(argstr="--inputProbabilityVolume %s..."), - nonAirRegionMask=dict(argstr="--nonAirRegionMask %s", extensions=None), - priorLabelCodes=dict(argstr="--priorLabelCodes %s", sep=","), + args=dict( + argstr="%s", + ), + cleanLabelVolume=dict( + argstr="--cleanLabelVolume %s", + hash_files=False, + ), + dirtyLabelVolume=dict( + argstr="--dirtyLabelVolume %s", + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + foregroundPriors=dict( + argstr="--foregroundPriors %s", + sep=",", + ), + inclusionThreshold=dict( + argstr="--inclusionThreshold %f", + ), + inputProbabilityVolume=dict( + argstr="--inputProbabilityVolume %s...", + ), + nonAirRegionMask=dict( + argstr="--nonAirRegionMask %s", + extensions=None, + ), + priorLabelCodes=dict( + argstr="--priorLabelCodes %s", + sep=",", + ), ) inputs = BRAINSCreateLabelMapFromProbabilityMaps.input_spec() @@ -23,7 +47,12 @@ def test_BRAINSCreateLabelMapFromProbabilityMaps_inputs(): def test_BRAINSCreateLabelMapFromProbabilityMaps_outputs(): output_map = dict( - cleanLabelVolume=dict(extensions=None), dirtyLabelVolume=dict(extensions=None) + cleanLabelVolume=dict( + extensions=None, + ), + dirtyLabelVolume=dict( + extensions=None, + ), ) outputs = BRAINSCreateLabelMapFromProbabilityMaps.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py index 26d1612061..e101cde2cc 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py @@ -4,27 +4,66 @@ def test_BRAINSCut_inputs(): input_map = dict( - NoTrainingVectorShuffling=dict(argstr="--NoTrainingVectorShuffling "), - applyModel=dict(argstr="--applyModel "), - args=dict(argstr="%s"), - computeSSEOn=dict(argstr="--computeSSEOn "), - createVectors=dict(argstr="--createVectors "), - environ=dict(nohash=True, usedefault=True), - generateProbability=dict(argstr="--generateProbability "), - histogramEqualization=dict(argstr="--histogramEqualization "), - method=dict(argstr="--method %s"), + NoTrainingVectorShuffling=dict( + argstr="--NoTrainingVectorShuffling ", + ), + applyModel=dict( + argstr="--applyModel ", + ), + args=dict( + argstr="%s", + ), + computeSSEOn=dict( + argstr="--computeSSEOn ", + ), + createVectors=dict( + argstr="--createVectors ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + generateProbability=dict( + argstr="--generateProbability ", + ), + histogramEqualization=dict( + argstr="--histogramEqualization ", + ), + method=dict( + argstr="--method %s", + ), modelConfigurationFilename=dict( - argstr="--modelConfigurationFilename %s", extensions=None - ), - modelFilename=dict(argstr="--modelFilename %s"), - multiStructureThreshold=dict(argstr="--multiStructureThreshold "), - netConfiguration=dict(argstr="--netConfiguration %s", extensions=None), - numberOfTrees=dict(argstr="--numberOfTrees %d"), - randomTreeDepth=dict(argstr="--randomTreeDepth %d"), - trainModel=dict(argstr="--trainModel "), - trainModelStartIndex=dict(argstr="--trainModelStartIndex %d"), - validate=dict(argstr="--validate "), - verbose=dict(argstr="--verbose %d"), + argstr="--modelConfigurationFilename %s", + extensions=None, + ), + modelFilename=dict( + argstr="--modelFilename %s", + ), + multiStructureThreshold=dict( + argstr="--multiStructureThreshold ", + ), + netConfiguration=dict( + argstr="--netConfiguration %s", + extensions=None, + ), + numberOfTrees=dict( + argstr="--numberOfTrees %d", + ), + randomTreeDepth=dict( + argstr="--randomTreeDepth %d", + ), + trainModel=dict( + argstr="--trainModel ", + ), + trainModelStartIndex=dict( + argstr="--trainModelStartIndex %d", + ), + validate=dict( + argstr="--validate ", + ), + verbose=dict( + argstr="--verbose %d", + ), ) inputs = BRAINSCut.input_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py index c52a2cc0d9..6ff468ba47 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py @@ -4,20 +4,40 @@ def test_BRAINSMultiSTAPLE_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputCompositeT1Volume=dict( - argstr="--inputCompositeT1Volume %s", extensions=None + argstr="--inputCompositeT1Volume %s", + extensions=None, + ), + inputLabelVolume=dict( + argstr="--inputLabelVolume %s...", + ), + inputTransform=dict( + argstr="--inputTransform %s...", + ), + labelForUndecidedPixels=dict( + argstr="--labelForUndecidedPixels %d", ), - inputLabelVolume=dict(argstr="--inputLabelVolume %s..."), - inputTransform=dict(argstr="--inputTransform %s..."), - labelForUndecidedPixels=dict(argstr="--labelForUndecidedPixels %d"), outputConfusionMatrix=dict( - argstr="--outputConfusionMatrix %s", hash_files=False + argstr="--outputConfusionMatrix %s", + hash_files=False, + ), + outputMultiSTAPLE=dict( + argstr="--outputMultiSTAPLE %s", + hash_files=False, + ), + resampledVolumePrefix=dict( + argstr="--resampledVolumePrefix %s", + ), + skipResampling=dict( + argstr="--skipResampling ", ), - outputMultiSTAPLE=dict(argstr="--outputMultiSTAPLE %s", hash_files=False), - resampledVolumePrefix=dict(argstr="--resampledVolumePrefix %s"), - skipResampling=dict(argstr="--skipResampling "), ) inputs = BRAINSMultiSTAPLE.input_spec() @@ -28,8 +48,12 @@ def test_BRAINSMultiSTAPLE_inputs(): def test_BRAINSMultiSTAPLE_outputs(): output_map = dict( - outputConfusionMatrix=dict(extensions=None), - outputMultiSTAPLE=dict(extensions=None), + outputConfusionMatrix=dict( + extensions=None, + ), + outputMultiSTAPLE=dict( + extensions=None, + ), ) outputs = BRAINSMultiSTAPLE.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py index 55643c74d4..f65c8c9b88 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -4,19 +4,49 @@ def test_BRAINSROIAuto_inputs(): input_map = dict( - ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f"), - args=dict(argstr="%s"), - closingSize=dict(argstr="--closingSize %f"), - cropOutput=dict(argstr="--cropOutput "), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - maskOutput=dict(argstr="--maskOutput "), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f"), - outputROIMaskVolume=dict(argstr="--outputROIMaskVolume %s", hash_files=False), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - outputVolumePixelType=dict(argstr="--outputVolumePixelType %s"), - thresholdCorrectionFactor=dict(argstr="--thresholdCorrectionFactor %f"), + ROIAutoDilateSize=dict( + argstr="--ROIAutoDilateSize %f", + ), + args=dict( + argstr="%s", + ), + closingSize=dict( + argstr="--closingSize %f", + ), + cropOutput=dict( + argstr="--cropOutput ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + maskOutput=dict( + argstr="--maskOutput ", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + otsuPercentileThreshold=dict( + argstr="--otsuPercentileThreshold %f", + ), + outputROIMaskVolume=dict( + argstr="--outputROIMaskVolume %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + outputVolumePixelType=dict( + argstr="--outputVolumePixelType %s", + ), + thresholdCorrectionFactor=dict( + argstr="--thresholdCorrectionFactor %f", + ), ) inputs = BRAINSROIAuto.input_spec() @@ -27,7 +57,12 @@ def test_BRAINSROIAuto_inputs(): def test_BRAINSROIAuto_outputs(): output_map = dict( - outputROIMaskVolume=dict(extensions=None), outputVolume=dict(extensions=None) + outputROIMaskVolume=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = BRAINSROIAuto.output_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py index a0ad0610f8..1dd20adecd 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py @@ -4,22 +4,40 @@ def test_BinaryMaskEditorBasedOnLandmarks_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputBinaryVolume=dict(argstr="--inputBinaryVolume %s", extensions=None), - inputLandmarkNames=dict(argstr="--inputLandmarkNames %s", sep=","), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBinaryVolume=dict( + argstr="--inputBinaryVolume %s", + extensions=None, + ), + inputLandmarkNames=dict( + argstr="--inputLandmarkNames %s", + sep=",", + ), inputLandmarkNamesForObliquePlane=dict( - argstr="--inputLandmarkNamesForObliquePlane %s", sep="," + argstr="--inputLandmarkNamesForObliquePlane %s", + sep=",", ), inputLandmarksFilename=dict( - argstr="--inputLandmarksFilename %s", extensions=None + argstr="--inputLandmarksFilename %s", + extensions=None, + ), + outputBinaryVolume=dict( + argstr="--outputBinaryVolume %s", + hash_files=False, ), - outputBinaryVolume=dict(argstr="--outputBinaryVolume %s", hash_files=False), setCutDirectionForLandmark=dict( - argstr="--setCutDirectionForLandmark %s", sep="," + argstr="--setCutDirectionForLandmark %s", + sep=",", ), setCutDirectionForObliquePlane=dict( - argstr="--setCutDirectionForObliquePlane %s", sep="," + argstr="--setCutDirectionForObliquePlane %s", + sep=",", ), ) inputs = BinaryMaskEditorBasedOnLandmarks.input_spec() @@ -30,7 +48,11 @@ def test_BinaryMaskEditorBasedOnLandmarks_inputs(): def test_BinaryMaskEditorBasedOnLandmarks_outputs(): - output_map = dict(outputBinaryVolume=dict(extensions=None)) + output_map = dict( + outputBinaryVolume=dict( + extensions=None, + ), + ) outputs = BinaryMaskEditorBasedOnLandmarks.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py index 8ce8cdac7e..272327f4db 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py @@ -4,17 +4,42 @@ def test_ESLR_inputs(): input_map = dict( - args=dict(argstr="%s"), - closingSize=dict(argstr="--closingSize %d"), - environ=dict(nohash=True, usedefault=True), - high=dict(argstr="--high %d"), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - low=dict(argstr="--low %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - openingSize=dict(argstr="--openingSize %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - preserveOutside=dict(argstr="--preserveOutside "), - safetySize=dict(argstr="--safetySize %d"), + args=dict( + argstr="%s", + ), + closingSize=dict( + argstr="--closingSize %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + high=dict( + argstr="--high %d", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + low=dict( + argstr="--low %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + openingSize=dict( + argstr="--openingSize %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + preserveOutside=dict( + argstr="--preserveOutside ", + ), + safetySize=dict( + argstr="--safetySize %d", + ), ) inputs = ESLR.input_spec() @@ -24,7 +49,11 @@ def test_ESLR_inputs(): def test_ESLR_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = ESLR.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/tests/test_auto_DWICompare.py b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py index 5b4312b22e..b691c097a8 100644 --- a/nipype/interfaces/semtools/tests/test_auto_DWICompare.py +++ b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py @@ -4,10 +4,21 @@ def test_DWICompare_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None), - inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="--inputVolume1 %s", + extensions=None, + ), + inputVolume2=dict( + argstr="--inputVolume2 %s", + extensions=None, + ), ) inputs = DWICompare.input_spec() diff --git a/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py index 3c2818ed53..ad4dcb12d4 100644 --- a/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py +++ b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py @@ -4,11 +4,24 @@ def test_DWISimpleCompare_inputs(): input_map = dict( - args=dict(argstr="%s"), - checkDWIData=dict(argstr="--checkDWIData "), - environ=dict(nohash=True, usedefault=True), - inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None), - inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None), + args=dict( + argstr="%s", + ), + checkDWIData=dict( + argstr="--checkDWIData ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="--inputVolume1 %s", + extensions=None, + ), + inputVolume2=dict( + argstr="--inputVolume2 %s", + extensions=None, + ), ) inputs = DWISimpleCompare.input_spec() diff --git a/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py index 414fbe1291..3f857d8085 100644 --- a/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py +++ b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py @@ -4,10 +4,21 @@ def test_GenerateCsfClippedFromClassifiedImage_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputCassifiedVolume=dict(argstr="--inputCassifiedVolume %s", extensions=None), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputCassifiedVolume=dict( + argstr="--inputCassifiedVolume %s", + extensions=None, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = GenerateCsfClippedFromClassifiedImage.input_spec() @@ -17,7 +28,11 @@ def test_GenerateCsfClippedFromClassifiedImage_inputs(): def test_GenerateCsfClippedFromClassifiedImage_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = GenerateCsfClippedFromClassifiedImage.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py index 7464c0cc00..81fee98e93 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py @@ -4,22 +4,53 @@ def test_BRAINSAlignMSP_inputs(): input_map = dict( - BackgroundFillValue=dict(argstr="--BackgroundFillValue %s"), - OutputresampleMSP=dict(argstr="--OutputresampleMSP %s", hash_files=False), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - interpolationMode=dict(argstr="--interpolationMode %s"), - mspQualityLevel=dict(argstr="--mspQualityLevel %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - rescaleIntensities=dict(argstr="--rescaleIntensities "), + BackgroundFillValue=dict( + argstr="--BackgroundFillValue %s", + ), + OutputresampleMSP=dict( + argstr="--OutputresampleMSP %s", + hash_files=False, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + mspQualityLevel=dict( + argstr="--mspQualityLevel %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + rescaleIntensities=dict( + argstr="--rescaleIntensities ", + ), rescaleIntensitiesOutputRange=dict( - argstr="--rescaleIntensitiesOutputRange %s", sep="," + argstr="--rescaleIntensitiesOutputRange %s", + sep=",", + ), + resultsDir=dict( + argstr="--resultsDir %s", + hash_files=False, + ), + trimRescaledIntensities=dict( + argstr="--trimRescaledIntensities %f", + ), + verbose=dict( + argstr="--verbose ", + ), + writedebuggingImagesLevel=dict( + argstr="--writedebuggingImagesLevel %d", ), - resultsDir=dict(argstr="--resultsDir %s", hash_files=False), - trimRescaledIntensities=dict(argstr="--trimRescaledIntensities %f"), - verbose=dict(argstr="--verbose "), - writedebuggingImagesLevel=dict(argstr="--writedebuggingImagesLevel %d"), ) inputs = BRAINSAlignMSP.input_spec() @@ -29,7 +60,12 @@ def test_BRAINSAlignMSP_inputs(): def test_BRAINSAlignMSP_outputs(): - output_map = dict(OutputresampleMSP=dict(extensions=None), resultsDir=dict()) + output_map = dict( + OutputresampleMSP=dict( + extensions=None, + ), + resultsDir=dict(), + ) outputs = BRAINSAlignMSP.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py index 645b2c0664..92e45758b1 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py @@ -4,13 +4,30 @@ def test_BRAINSClipInferior_inputs(): input_map = dict( - BackgroundFillValue=dict(argstr="--BackgroundFillValue %s"), - acLowerBound=dict(argstr="--acLowerBound %f"), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + BackgroundFillValue=dict( + argstr="--BackgroundFillValue %s", + ), + acLowerBound=dict( + argstr="--acLowerBound %f", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = BRAINSClipInferior.input_spec() @@ -20,7 +37,11 @@ def test_BRAINSClipInferior_inputs(): def test_BRAINSClipInferior_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSClipInferior.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py index ec9a115c16..2e26a91e05 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py @@ -4,25 +4,56 @@ def test_BRAINSConstellationModeler_inputs(): input_map = dict( - BackgroundFillValue=dict(argstr="--BackgroundFillValue %s"), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputTrainingList=dict(argstr="--inputTrainingList %s", extensions=None), - mspQualityLevel=dict(argstr="--mspQualityLevel %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), + BackgroundFillValue=dict( + argstr="--BackgroundFillValue %s", + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTrainingList=dict( + argstr="--inputTrainingList %s", + extensions=None, + ), + mspQualityLevel=dict( + argstr="--mspQualityLevel %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), optimizedLandmarksFilenameExtender=dict( - argstr="--optimizedLandmarksFilenameExtender %s" + argstr="--optimizedLandmarksFilenameExtender %s", + ), + outputModel=dict( + argstr="--outputModel %s", + hash_files=False, + ), + rescaleIntensities=dict( + argstr="--rescaleIntensities ", ), - outputModel=dict(argstr="--outputModel %s", hash_files=False), - rescaleIntensities=dict(argstr="--rescaleIntensities "), rescaleIntensitiesOutputRange=dict( - argstr="--rescaleIntensitiesOutputRange %s", sep="," + argstr="--rescaleIntensitiesOutputRange %s", + sep=",", + ), + resultsDir=dict( + argstr="--resultsDir %s", + hash_files=False, + ), + saveOptimizedLandmarks=dict( + argstr="--saveOptimizedLandmarks ", + ), + trimRescaledIntensities=dict( + argstr="--trimRescaledIntensities %f", + ), + verbose=dict( + argstr="--verbose ", + ), + writedebuggingImagesLevel=dict( + argstr="--writedebuggingImagesLevel %d", ), - resultsDir=dict(argstr="--resultsDir %s", hash_files=False), - saveOptimizedLandmarks=dict(argstr="--saveOptimizedLandmarks "), - trimRescaledIntensities=dict(argstr="--trimRescaledIntensities %f"), - verbose=dict(argstr="--verbose "), - writedebuggingImagesLevel=dict(argstr="--writedebuggingImagesLevel %d"), ) inputs = BRAINSConstellationModeler.input_spec() @@ -32,7 +63,12 @@ def test_BRAINSConstellationModeler_inputs(): def test_BRAINSConstellationModeler_outputs(): - output_map = dict(outputModel=dict(extensions=None), resultsDir=dict()) + output_map = dict( + outputModel=dict( + extensions=None, + ), + resultsDir=dict(), + ) outputs = BRAINSConstellationModeler.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py index 2de2ff7fd0..c511be64c5 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py @@ -4,12 +4,27 @@ def test_BRAINSEyeDetector_inputs(): input_map = dict( - args=dict(argstr="%s"), - debugDir=dict(argstr="--debugDir %s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + debugDir=dict( + argstr="--debugDir %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = BRAINSEyeDetector.input_spec() @@ -19,7 +34,11 @@ def test_BRAINSEyeDetector_inputs(): def test_BRAINSEyeDetector_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSEyeDetector.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py index db35fef86e..05ba1ae7d6 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py @@ -4,14 +4,35 @@ def test_BRAINSInitializedControlPoints_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputLandmarksFile=dict(argstr="--outputLandmarksFile %s"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - permuteOrder=dict(argstr="--permuteOrder %s", sep=","), - splineGridSize=dict(argstr="--splineGridSize %s", sep=","), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputLandmarksFile=dict( + argstr="--outputLandmarksFile %s", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + permuteOrder=dict( + argstr="--permuteOrder %s", + sep=",", + ), + splineGridSize=dict( + argstr="--splineGridSize %s", + sep=",", + ), ) inputs = BRAINSInitializedControlPoints.input_spec() @@ -21,7 +42,11 @@ def test_BRAINSInitializedControlPoints_inputs(): def test_BRAINSInitializedControlPoints_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSInitializedControlPoints.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py index 75bdf24ae5..efaa484008 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py @@ -4,17 +4,28 @@ def test_BRAINSLandmarkInitializer_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputFixedLandmarkFilename=dict( - argstr="--inputFixedLandmarkFilename %s", extensions=None + argstr="--inputFixedLandmarkFilename %s", + extensions=None, ), inputMovingLandmarkFilename=dict( - argstr="--inputMovingLandmarkFilename %s", extensions=None + argstr="--inputMovingLandmarkFilename %s", + extensions=None, + ), + inputWeightFilename=dict( + argstr="--inputWeightFilename %s", + extensions=None, ), - inputWeightFilename=dict(argstr="--inputWeightFilename %s", extensions=None), outputTransformFilename=dict( - argstr="--outputTransformFilename %s", hash_files=False + argstr="--outputTransformFilename %s", + hash_files=False, ), ) inputs = BRAINSLandmarkInitializer.input_spec() @@ -25,7 +36,11 @@ def test_BRAINSLandmarkInitializer_inputs(): def test_BRAINSLandmarkInitializer_outputs(): - output_map = dict(outputTransformFilename=dict(extensions=None)) + output_map = dict( + outputTransformFilename=dict( + extensions=None, + ), + ) outputs = BRAINSLandmarkInitializer.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py index 911dad6116..beb7a5f664 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py @@ -4,10 +4,20 @@ def test_BRAINSLinearModelerEPCA_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputTrainingList=dict(argstr="--inputTrainingList %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTrainingList=dict( + argstr="--inputTrainingList %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), ) inputs = BRAINSLinearModelerEPCA.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py index 58c035444a..d0cdc8cc7f 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py @@ -4,18 +4,39 @@ def test_BRAINSLmkTransform_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputFixedLandmarks=dict(argstr="--inputFixedLandmarks %s", extensions=None), - inputMovingLandmarks=dict(argstr="--inputMovingLandmarks %s", extensions=None), - inputMovingVolume=dict(argstr="--inputMovingVolume %s", extensions=None), - inputReferenceVolume=dict(argstr="--inputReferenceVolume %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputFixedLandmarks=dict( + argstr="--inputFixedLandmarks %s", + extensions=None, + ), + inputMovingLandmarks=dict( + argstr="--inputMovingLandmarks %s", + extensions=None, + ), + inputMovingVolume=dict( + argstr="--inputMovingVolume %s", + extensions=None, + ), + inputReferenceVolume=dict( + argstr="--inputReferenceVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), outputAffineTransform=dict( - argstr="--outputAffineTransform %s", hash_files=False + argstr="--outputAffineTransform %s", + hash_files=False, ), outputResampledVolume=dict( - argstr="--outputResampledVolume %s", hash_files=False + argstr="--outputResampledVolume %s", + hash_files=False, ), ) inputs = BRAINSLmkTransform.input_spec() @@ -27,8 +48,12 @@ def test_BRAINSLmkTransform_inputs(): def test_BRAINSLmkTransform_outputs(): output_map = dict( - outputAffineTransform=dict(extensions=None), - outputResampledVolume=dict(extensions=None), + outputAffineTransform=dict( + extensions=None, + ), + outputResampledVolume=dict( + extensions=None, + ), ) outputs = BRAINSLmkTransform.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py index 85181b2078..e6eac9cf2d 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py @@ -4,24 +4,70 @@ def test_BRAINSMush_inputs(): input_map = dict( - args=dict(argstr="%s"), - boundingBoxSize=dict(argstr="--boundingBoxSize %s", sep=","), - boundingBoxStart=dict(argstr="--boundingBoxStart %s", sep=","), - desiredMean=dict(argstr="--desiredMean %f"), - desiredVariance=dict(argstr="--desiredVariance %f"), - environ=dict(nohash=True, usedefault=True), - inputFirstVolume=dict(argstr="--inputFirstVolume %s", extensions=None), - inputMaskVolume=dict(argstr="--inputMaskVolume %s", extensions=None), - inputSecondVolume=dict(argstr="--inputSecondVolume %s", extensions=None), - lowerThresholdFactor=dict(argstr="--lowerThresholdFactor %f"), - lowerThresholdFactorPre=dict(argstr="--lowerThresholdFactorPre %f"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputMask=dict(argstr="--outputMask %s", hash_files=False), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - outputWeightsFile=dict(argstr="--outputWeightsFile %s", hash_files=False), - seed=dict(argstr="--seed %s", sep=","), - upperThresholdFactor=dict(argstr="--upperThresholdFactor %f"), - upperThresholdFactorPre=dict(argstr="--upperThresholdFactorPre %f"), + args=dict( + argstr="%s", + ), + boundingBoxSize=dict( + argstr="--boundingBoxSize %s", + sep=",", + ), + boundingBoxStart=dict( + argstr="--boundingBoxStart %s", + sep=",", + ), + desiredMean=dict( + argstr="--desiredMean %f", + ), + desiredVariance=dict( + argstr="--desiredVariance %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputFirstVolume=dict( + argstr="--inputFirstVolume %s", + extensions=None, + ), + inputMaskVolume=dict( + argstr="--inputMaskVolume %s", + extensions=None, + ), + inputSecondVolume=dict( + argstr="--inputSecondVolume %s", + extensions=None, + ), + lowerThresholdFactor=dict( + argstr="--lowerThresholdFactor %f", + ), + lowerThresholdFactorPre=dict( + argstr="--lowerThresholdFactorPre %f", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputMask=dict( + argstr="--outputMask %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + outputWeightsFile=dict( + argstr="--outputWeightsFile %s", + hash_files=False, + ), + seed=dict( + argstr="--seed %s", + sep=",", + ), + upperThresholdFactor=dict( + argstr="--upperThresholdFactor %f", + ), + upperThresholdFactorPre=dict( + argstr="--upperThresholdFactorPre %f", + ), ) inputs = BRAINSMush.input_spec() @@ -32,9 +78,15 @@ def test_BRAINSMush_inputs(): def test_BRAINSMush_outputs(): output_map = dict( - outputMask=dict(extensions=None), - outputVolume=dict(extensions=None), - outputWeightsFile=dict(extensions=None), + outputMask=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), + outputWeightsFile=dict( + extensions=None, + ), ) outputs = BRAINSMush.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py index a18785c071..a120b062a4 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py @@ -4,21 +4,39 @@ def test_BRAINSSnapShotWriter_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputBinaryVolumes=dict(argstr="--inputBinaryVolumes %s..."), - inputPlaneDirection=dict(argstr="--inputPlaneDirection %s", sep=","), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBinaryVolumes=dict( + argstr="--inputBinaryVolumes %s...", + ), + inputPlaneDirection=dict( + argstr="--inputPlaneDirection %s", + sep=",", + ), inputSliceToExtractInIndex=dict( - argstr="--inputSliceToExtractInIndex %s", sep="," + argstr="--inputSliceToExtractInIndex %s", + sep=",", ), inputSliceToExtractInPercent=dict( - argstr="--inputSliceToExtractInPercent %s", sep="," + argstr="--inputSliceToExtractInPercent %s", + sep=",", ), inputSliceToExtractInPhysicalPoint=dict( - argstr="--inputSliceToExtractInPhysicalPoint %s", sep="," + argstr="--inputSliceToExtractInPhysicalPoint %s", + sep=",", + ), + inputVolumes=dict( + argstr="--inputVolumes %s...", + ), + outputFilename=dict( + argstr="--outputFilename %s", + hash_files=False, ), - inputVolumes=dict(argstr="--inputVolumes %s..."), - outputFilename=dict(argstr="--outputFilename %s", hash_files=False), ) inputs = BRAINSSnapShotWriter.input_spec() @@ -28,7 +46,11 @@ def test_BRAINSSnapShotWriter_inputs(): def test_BRAINSSnapShotWriter_outputs(): - output_map = dict(outputFilename=dict(extensions=None)) + output_map = dict( + outputFilename=dict( + extensions=None, + ), + ) outputs = BRAINSSnapShotWriter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py index d47e38deb3..1eebbb0cec 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py @@ -4,14 +4,35 @@ def test_BRAINSTransformConvert_inputs(): input_map = dict( - args=dict(argstr="%s"), - displacementVolume=dict(argstr="--displacementVolume %s", hash_files=False), - environ=dict(nohash=True, usedefault=True), - inputTransform=dict(argstr="--inputTransform %s", extensions=None), - outputPrecisionType=dict(argstr="--outputPrecisionType %s"), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False), - outputTransformType=dict(argstr="--outputTransformType %s"), - referenceVolume=dict(argstr="--referenceVolume %s", extensions=None), + args=dict( + argstr="%s", + ), + displacementVolume=dict( + argstr="--displacementVolume %s", + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTransform=dict( + argstr="--inputTransform %s", + extensions=None, + ), + outputPrecisionType=dict( + argstr="--outputPrecisionType %s", + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, + ), + outputTransformType=dict( + argstr="--outputTransformType %s", + ), + referenceVolume=dict( + argstr="--referenceVolume %s", + extensions=None, + ), ) inputs = BRAINSTransformConvert.input_spec() @@ -22,7 +43,12 @@ def test_BRAINSTransformConvert_inputs(): def test_BRAINSTransformConvert_outputs(): output_map = dict( - displacementVolume=dict(extensions=None), outputTransform=dict(extensions=None) + displacementVolume=dict( + extensions=None, + ), + outputTransform=dict( + extensions=None, + ), ) outputs = BRAINSTransformConvert.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py index ee4699326d..fa68f51e21 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py @@ -4,16 +4,39 @@ def test_BRAINSTrimForegroundInDirection_inputs(): input_map = dict( - BackgroundFillValue=dict(argstr="--BackgroundFillValue %s"), - args=dict(argstr="%s"), - closingSize=dict(argstr="--closingSize %d"), - directionCode=dict(argstr="--directionCode %d"), - environ=dict(nohash=True, usedefault=True), - headSizeLimit=dict(argstr="--headSizeLimit %f"), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), + BackgroundFillValue=dict( + argstr="--BackgroundFillValue %s", + ), + args=dict( + argstr="%s", + ), + closingSize=dict( + argstr="--closingSize %d", + ), + directionCode=dict( + argstr="--directionCode %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + headSizeLimit=dict( + argstr="--headSizeLimit %f", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + otsuPercentileThreshold=dict( + argstr="--otsuPercentileThreshold %f", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), ) inputs = BRAINSTrimForegroundInDirection.input_spec() @@ -23,7 +46,11 @@ def test_BRAINSTrimForegroundInDirection_inputs(): def test_BRAINSTrimForegroundInDirection_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSTrimForegroundInDirection.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py b/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py index 78fda5d9ad..14ec09298c 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py @@ -4,11 +4,19 @@ def test_CleanUpOverlapLabels_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputBinaryVolumes=dict(argstr="--inputBinaryVolumes %s..."), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBinaryVolumes=dict( + argstr="--inputBinaryVolumes %s...", + ), outputBinaryVolumes=dict( - argstr="--outputBinaryVolumes %s...", hash_files=False + argstr="--outputBinaryVolumes %s...", + hash_files=False, ), ) inputs = CleanUpOverlapLabels.input_spec() @@ -19,7 +27,9 @@ def test_CleanUpOverlapLabels_inputs(): def test_CleanUpOverlapLabels_outputs(): - output_map = dict(outputBinaryVolumes=dict()) + output_map = dict( + outputBinaryVolumes=dict(), + ) outputs = CleanUpOverlapLabels.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py index 5ae2523f7f..4a0d7c89c2 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py @@ -4,28 +4,69 @@ def test_FindCenterOfBrain_inputs(): input_map = dict( - args=dict(argstr="%s"), - axis=dict(argstr="--axis %d"), - backgroundValue=dict(argstr="--backgroundValue %d"), - clippedImageMask=dict(argstr="--clippedImageMask %s", hash_files=False), - closingSize=dict(argstr="--closingSize %d"), + args=dict( + argstr="%s", + ), + axis=dict( + argstr="--axis %d", + ), + backgroundValue=dict( + argstr="--backgroundValue %d", + ), + clippedImageMask=dict( + argstr="--clippedImageMask %s", + hash_files=False, + ), + closingSize=dict( + argstr="--closingSize %d", + ), debugAfterGridComputationsForegroundImage=dict( - argstr="--debugAfterGridComputationsForegroundImage %s", hash_files=False + argstr="--debugAfterGridComputationsForegroundImage %s", + hash_files=False, ), debugClippedImageMask=dict( - argstr="--debugClippedImageMask %s", hash_files=False - ), - debugDistanceImage=dict(argstr="--debugDistanceImage %s", hash_files=False), - debugGridImage=dict(argstr="--debugGridImage %s", hash_files=False), - debugTrimmedImage=dict(argstr="--debugTrimmedImage %s", hash_files=False), - environ=dict(nohash=True, usedefault=True), - generateDebugImages=dict(argstr="--generateDebugImages "), - headSizeEstimate=dict(argstr="--headSizeEstimate %f"), - headSizeLimit=dict(argstr="--headSizeLimit %f"), - imageMask=dict(argstr="--imageMask %s", extensions=None), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - maximize=dict(argstr="--maximize "), - otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f"), + argstr="--debugClippedImageMask %s", + hash_files=False, + ), + debugDistanceImage=dict( + argstr="--debugDistanceImage %s", + hash_files=False, + ), + debugGridImage=dict( + argstr="--debugGridImage %s", + hash_files=False, + ), + debugTrimmedImage=dict( + argstr="--debugTrimmedImage %s", + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + generateDebugImages=dict( + argstr="--generateDebugImages ", + ), + headSizeEstimate=dict( + argstr="--headSizeEstimate %f", + ), + headSizeLimit=dict( + argstr="--headSizeLimit %f", + ), + imageMask=dict( + argstr="--imageMask %s", + extensions=None, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + maximize=dict( + argstr="--maximize ", + ), + otsuPercentileThreshold=dict( + argstr="--otsuPercentileThreshold %f", + ), ) inputs = FindCenterOfBrain.input_spec() @@ -36,12 +77,24 @@ def test_FindCenterOfBrain_inputs(): def test_FindCenterOfBrain_outputs(): output_map = dict( - clippedImageMask=dict(extensions=None), - debugAfterGridComputationsForegroundImage=dict(extensions=None), - debugClippedImageMask=dict(extensions=None), - debugDistanceImage=dict(extensions=None), - debugGridImage=dict(extensions=None), - debugTrimmedImage=dict(extensions=None), + clippedImageMask=dict( + extensions=None, + ), + debugAfterGridComputationsForegroundImage=dict( + extensions=None, + ), + debugClippedImageMask=dict( + extensions=None, + ), + debugDistanceImage=dict( + extensions=None, + ), + debugGridImage=dict( + extensions=None, + ), + debugTrimmedImage=dict( + extensions=None, + ), ) outputs = FindCenterOfBrain.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py index ecdc4dd682..d3840ec7bd 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py @@ -4,11 +4,23 @@ def test_GenerateLabelMapFromProbabilityMap_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolumes=dict(argstr="--inputVolumes %s..."), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputLabelVolume=dict(argstr="--outputLabelVolume %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolumes=dict( + argstr="--inputVolumes %s...", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputLabelVolume=dict( + argstr="--outputLabelVolume %s", + hash_files=False, + ), ) inputs = GenerateLabelMapFromProbabilityMap.input_spec() @@ -18,7 +30,11 @@ def test_GenerateLabelMapFromProbabilityMap_inputs(): def test_GenerateLabelMapFromProbabilityMap_outputs(): - output_map = dict(outputLabelVolume=dict(extensions=None)) + output_map = dict( + outputLabelVolume=dict( + extensions=None, + ), + ) outputs = GenerateLabelMapFromProbabilityMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py index 2159df3114..5b71204a67 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py @@ -4,17 +4,44 @@ def test_ImageRegionPlotter_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputBinaryROIVolume=dict(argstr="--inputBinaryROIVolume %s", extensions=None), - inputLabelVolume=dict(argstr="--inputLabelVolume %s", extensions=None), - inputVolume1=dict(argstr="--inputVolume1 %s", extensions=None), - inputVolume2=dict(argstr="--inputVolume2 %s", extensions=None), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), - outputJointHistogramData=dict(argstr="--outputJointHistogramData %s"), - useIntensityForHistogram=dict(argstr="--useIntensityForHistogram "), - useROIAUTO=dict(argstr="--useROIAUTO "), - verbose=dict(argstr="--verbose "), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBinaryROIVolume=dict( + argstr="--inputBinaryROIVolume %s", + extensions=None, + ), + inputLabelVolume=dict( + argstr="--inputLabelVolume %s", + extensions=None, + ), + inputVolume1=dict( + argstr="--inputVolume1 %s", + extensions=None, + ), + inputVolume2=dict( + argstr="--inputVolume2 %s", + extensions=None, + ), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + outputJointHistogramData=dict( + argstr="--outputJointHistogramData %s", + ), + useIntensityForHistogram=dict( + argstr="--useIntensityForHistogram ", + ), + useROIAUTO=dict( + argstr="--useROIAUTO ", + ), + verbose=dict( + argstr="--verbose ", + ), ) inputs = ImageRegionPlotter.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py index 33ba5789f6..d5e61c867a 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py @@ -4,18 +4,35 @@ def test_JointHistogram_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputMaskVolumeInXAxis=dict( - argstr="--inputMaskVolumeInXAxis %s", extensions=None + argstr="--inputMaskVolumeInXAxis %s", + extensions=None, ), inputMaskVolumeInYAxis=dict( - argstr="--inputMaskVolumeInYAxis %s", extensions=None + argstr="--inputMaskVolumeInYAxis %s", + extensions=None, + ), + inputVolumeInXAxis=dict( + argstr="--inputVolumeInXAxis %s", + extensions=None, + ), + inputVolumeInYAxis=dict( + argstr="--inputVolumeInYAxis %s", + extensions=None, + ), + outputJointHistogramImage=dict( + argstr="--outputJointHistogramImage %s", + ), + verbose=dict( + argstr="--verbose ", ), - inputVolumeInXAxis=dict(argstr="--inputVolumeInXAxis %s", extensions=None), - inputVolumeInYAxis=dict(argstr="--inputVolumeInYAxis %s", extensions=None), - outputJointHistogramImage=dict(argstr="--outputJointHistogramImage %s"), - verbose=dict(argstr="--verbose "), ) inputs = JointHistogram.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py index 77f9ceb548..4b7d3431bd 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py @@ -4,15 +4,24 @@ def test_ShuffleVectorsModule_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), inputVectorFileBaseName=dict( - argstr="--inputVectorFileBaseName %s", extensions=None + argstr="--inputVectorFileBaseName %s", + extensions=None, ), outputVectorFileBaseName=dict( - argstr="--outputVectorFileBaseName %s", hash_files=False + argstr="--outputVectorFileBaseName %s", + hash_files=False, + ), + resampleProportion=dict( + argstr="--resampleProportion %f", ), - resampleProportion=dict(argstr="--resampleProportion %f"), ) inputs = ShuffleVectorsModule.input_spec() @@ -22,7 +31,11 @@ def test_ShuffleVectorsModule_inputs(): def test_ShuffleVectorsModule_outputs(): - output_map = dict(outputVectorFileBaseName=dict(extensions=None)) + output_map = dict( + outputVectorFileBaseName=dict( + extensions=None, + ), + ) outputs = ShuffleVectorsModule.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py index c95c635974..896f630839 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py @@ -4,16 +4,34 @@ def test_fcsv_to_hdf5_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - landmarkGlobPattern=dict(argstr="--landmarkGlobPattern %s"), - landmarkTypesList=dict(argstr="--landmarkTypesList %s", extensions=None), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + landmarkGlobPattern=dict( + argstr="--landmarkGlobPattern %s", + ), + landmarkTypesList=dict( + argstr="--landmarkTypesList %s", + extensions=None, + ), landmarksInformationFile=dict( - argstr="--landmarksInformationFile %s", hash_files=False + argstr="--landmarksInformationFile %s", + hash_files=False, + ), + modelFile=dict( + argstr="--modelFile %s", + hash_files=False, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + versionID=dict( + argstr="--versionID %s", ), - modelFile=dict(argstr="--modelFile %s", hash_files=False), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - versionID=dict(argstr="--versionID %s"), ) inputs = fcsv_to_hdf5.input_spec() @@ -24,7 +42,12 @@ def test_fcsv_to_hdf5_inputs(): def test_fcsv_to_hdf5_outputs(): output_map = dict( - landmarksInformationFile=dict(extensions=None), modelFile=dict(extensions=None) + landmarksInformationFile=dict( + extensions=None, + ), + modelFile=dict( + extensions=None, + ), ) outputs = fcsv_to_hdf5.output_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py index 39b0fa64bc..05aa2a3910 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py @@ -4,10 +4,21 @@ def test_insertMidACPCpoint_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputLandmarkFile=dict(argstr="--inputLandmarkFile %s", extensions=None), - outputLandmarkFile=dict(argstr="--outputLandmarkFile %s", hash_files=False), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputLandmarkFile=dict( + argstr="--inputLandmarkFile %s", + extensions=None, + ), + outputLandmarkFile=dict( + argstr="--outputLandmarkFile %s", + hash_files=False, + ), ) inputs = insertMidACPCpoint.input_spec() @@ -17,7 +28,11 @@ def test_insertMidACPCpoint_inputs(): def test_insertMidACPCpoint_outputs(): - output_map = dict(outputLandmarkFile=dict(extensions=None)) + output_map = dict( + outputLandmarkFile=dict( + extensions=None, + ), + ) outputs = insertMidACPCpoint.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py index 09d2426eb9..472a1326e0 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py @@ -4,11 +4,20 @@ def test_landmarksConstellationAligner_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputLandmarksPaired=dict(argstr="--inputLandmarksPaired %s", extensions=None), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputLandmarksPaired=dict( + argstr="--inputLandmarksPaired %s", + extensions=None, + ), outputLandmarksPaired=dict( - argstr="--outputLandmarksPaired %s", hash_files=False + argstr="--outputLandmarksPaired %s", + hash_files=False, ), ) inputs = landmarksConstellationAligner.input_spec() @@ -19,7 +28,11 @@ def test_landmarksConstellationAligner_inputs(): def test_landmarksConstellationAligner_outputs(): - output_map = dict(outputLandmarksPaired=dict(extensions=None)) + output_map = dict( + outputLandmarksPaired=dict( + extensions=None, + ), + ) outputs = landmarksConstellationAligner.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py index f41a5c2a5a..c14fdff775 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py @@ -4,12 +4,29 @@ def test_landmarksConstellationWeights_inputs(): input_map = dict( - LLSModel=dict(argstr="--LLSModel %s", extensions=None), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputTemplateModel=dict(argstr="--inputTemplateModel %s", extensions=None), - inputTrainingList=dict(argstr="--inputTrainingList %s", extensions=None), - outputWeightsList=dict(argstr="--outputWeightsList %s", hash_files=False), + LLSModel=dict( + argstr="--LLSModel %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTemplateModel=dict( + argstr="--inputTemplateModel %s", + extensions=None, + ), + inputTrainingList=dict( + argstr="--inputTrainingList %s", + extensions=None, + ), + outputWeightsList=dict( + argstr="--outputWeightsList %s", + hash_files=False, + ), ) inputs = landmarksConstellationWeights.input_spec() @@ -19,7 +36,11 @@ def test_landmarksConstellationWeights_inputs(): def test_landmarksConstellationWeights_outputs(): - output_map = dict(outputWeightsList=dict(extensions=None)) + output_map = dict( + outputWeightsList=dict( + extensions=None, + ), + ) outputs = landmarksConstellationWeights.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py index 3b2e3d9909..9017db6760 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py @@ -4,10 +4,23 @@ def test_DTIexport_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputTensor=dict(argstr="%s", extensions=None, position=-2), - outputFile=dict(argstr="%s", hash_files=False, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTensor=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputFile=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = DTIexport.input_spec() @@ -17,7 +30,12 @@ def test_DTIexport_inputs(): def test_DTIexport_outputs(): - output_map = dict(outputFile=dict(extensions=None, position=-1)) + output_map = dict( + outputFile=dict( + extensions=None, + position=-1, + ), + ) outputs = DTIexport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py index c9588f6c71..802d4ce9bc 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py @@ -4,11 +4,26 @@ def test_DTIimport_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputFile=dict(argstr="%s", extensions=None, position=-2), - outputTensor=dict(argstr="%s", hash_files=False, position=-1), - testingmode=dict(argstr="--testingmode "), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputFile=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputTensor=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + testingmode=dict( + argstr="--testingmode ", + ), ) inputs = DTIimport.input_spec() @@ -18,7 +33,12 @@ def test_DTIimport_inputs(): def test_DTIimport_outputs(): - output_map = dict(outputTensor=dict(extensions=None, position=-1)) + output_map = dict( + outputTensor=dict( + extensions=None, + position=-1, + ), + ) outputs = DTIimport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py index 381b97b677..e004599c12 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py @@ -4,14 +4,37 @@ def test_DWIJointRicianLMMSEFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - compressOutput=dict(argstr="--compressOutput "), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - ng=dict(argstr="--ng %d"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), - re=dict(argstr="--re %s", sep=","), - rf=dict(argstr="--rf %s", sep=","), + args=dict( + argstr="%s", + ), + compressOutput=dict( + argstr="--compressOutput ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + ng=dict( + argstr="--ng %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + re=dict( + argstr="--re %s", + sep=",", + ), + rf=dict( + argstr="--rf %s", + sep=",", + ), ) inputs = DWIJointRicianLMMSEFilter.input_spec() @@ -21,7 +44,12 @@ def test_DWIJointRicianLMMSEFilter_inputs(): def test_DWIJointRicianLMMSEFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = DWIJointRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py index f167c84564..3c38117737 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py @@ -4,20 +4,55 @@ def test_DWIRicianLMMSEFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - compressOutput=dict(argstr="--compressOutput "), - environ=dict(nohash=True, usedefault=True), - hrf=dict(argstr="--hrf %f"), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - iter=dict(argstr="--iter %d"), - maxnstd=dict(argstr="--maxnstd %d"), - minnstd=dict(argstr="--minnstd %d"), - mnve=dict(argstr="--mnve %d"), - mnvf=dict(argstr="--mnvf %d"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), - re=dict(argstr="--re %s", sep=","), - rf=dict(argstr="--rf %s", sep=","), - uav=dict(argstr="--uav "), + args=dict( + argstr="%s", + ), + compressOutput=dict( + argstr="--compressOutput ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hrf=dict( + argstr="--hrf %f", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + iter=dict( + argstr="--iter %d", + ), + maxnstd=dict( + argstr="--maxnstd %d", + ), + minnstd=dict( + argstr="--minnstd %d", + ), + mnve=dict( + argstr="--mnve %d", + ), + mnvf=dict( + argstr="--mnvf %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + re=dict( + argstr="--re %s", + sep=",", + ), + rf=dict( + argstr="--rf %s", + sep=",", + ), + uav=dict( + argstr="--uav ", + ), ) inputs = DWIRicianLMMSEFilter.input_spec() @@ -27,7 +62,12 @@ def test_DWIRicianLMMSEFilter_inputs(): def test_DWIRicianLMMSEFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = DWIRicianLMMSEFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py index a17f9a8e9d..76b305283c 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py @@ -4,14 +4,38 @@ def test_DWIToDTIEstimation_inputs(): input_map = dict( - args=dict(argstr="%s"), - enumeration=dict(argstr="--enumeration %s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="%s", extensions=None, position=-3), - mask=dict(argstr="--mask %s", extensions=None), - outputBaseline=dict(argstr="%s", hash_files=False, position=-1), - outputTensor=dict(argstr="%s", hash_files=False, position=-2), - shiftNeg=dict(argstr="--shiftNeg "), + args=dict( + argstr="%s", + ), + enumeration=dict( + argstr="--enumeration %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-3, + ), + mask=dict( + argstr="--mask %s", + extensions=None, + ), + outputBaseline=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + outputTensor=dict( + argstr="%s", + hash_files=False, + position=-2, + ), + shiftNeg=dict( + argstr="--shiftNeg ", + ), ) inputs = DWIToDTIEstimation.input_spec() @@ -22,8 +46,14 @@ def test_DWIToDTIEstimation_inputs(): def test_DWIToDTIEstimation_outputs(): output_map = dict( - outputBaseline=dict(extensions=None, position=-1), - outputTensor=dict(extensions=None, position=-2), + outputBaseline=dict( + extensions=None, + position=-1, + ), + outputTensor=dict( + extensions=None, + position=-2, + ), ) outputs = DWIToDTIEstimation.output_spec() diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py index 11f556ff53..143194f493 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py @@ -4,11 +4,26 @@ def test_DiffusionTensorScalarMeasurements_inputs(): input_map = dict( - args=dict(argstr="%s"), - enumeration=dict(argstr="--enumeration %s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="%s", extensions=None, position=-3), - outputScalar=dict(argstr="%s", hash_files=False, position=-1), + args=dict( + argstr="%s", + ), + enumeration=dict( + argstr="--enumeration %s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-3, + ), + outputScalar=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = DiffusionTensorScalarMeasurements.input_spec() @@ -18,7 +33,12 @@ def test_DiffusionTensorScalarMeasurements_inputs(): def test_DiffusionTensorScalarMeasurements_outputs(): - output_map = dict(outputScalar=dict(extensions=None, position=-1)) + output_map = dict( + outputScalar=dict( + extensions=None, + position=-1, + ), + ) outputs = DiffusionTensorScalarMeasurements.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py index 85dfe36117..5b11d2f578 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py @@ -4,13 +4,34 @@ def test_DiffusionWeightedVolumeMasking_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="%s", extensions=None, position=-4), - otsuomegathreshold=dict(argstr="--otsuomegathreshold %f"), - outputBaseline=dict(argstr="%s", hash_files=False, position=-2), - removeislands=dict(argstr="--removeislands "), - thresholdMask=dict(argstr="%s", hash_files=False, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-4, + ), + otsuomegathreshold=dict( + argstr="--otsuomegathreshold %f", + ), + outputBaseline=dict( + argstr="%s", + hash_files=False, + position=-2, + ), + removeislands=dict( + argstr="--removeislands ", + ), + thresholdMask=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = DiffusionWeightedVolumeMasking.input_spec() @@ -21,8 +42,14 @@ def test_DiffusionWeightedVolumeMasking_inputs(): def test_DiffusionWeightedVolumeMasking_outputs(): output_map = dict( - outputBaseline=dict(extensions=None, position=-2), - thresholdMask=dict(extensions=None, position=-1), + outputBaseline=dict( + extensions=None, + position=-2, + ), + thresholdMask=dict( + extensions=None, + position=-1, + ), ) outputs = DiffusionWeightedVolumeMasking.output_spec() diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py index e0f4e56932..c52bb5357d 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py @@ -4,34 +4,102 @@ def test_ResampleDTIVolume_inputs(): input_map = dict( - Inverse_ITK_Transformation=dict(argstr="--Inverse_ITK_Transformation "), - Reference=dict(argstr="--Reference %s", extensions=None), - args=dict(argstr="%s"), - centered_transform=dict(argstr="--centered_transform "), - correction=dict(argstr="--correction %s"), - defField=dict(argstr="--defField %s", extensions=None), - default_pixel_value=dict(argstr="--default_pixel_value %f"), - direction_matrix=dict(argstr="--direction_matrix %s", sep=","), - environ=dict(nohash=True, usedefault=True), - hfieldtype=dict(argstr="--hfieldtype %s"), - image_center=dict(argstr="--image_center %s"), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - interpolation=dict(argstr="--interpolation %s"), - notbulk=dict(argstr="--notbulk "), - number_of_thread=dict(argstr="--number_of_thread %d"), - origin=dict(argstr="--origin %s"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), - rotation_point=dict(argstr="--rotation_point %s"), - size=dict(argstr="--size %s", sep=","), - spaceChange=dict(argstr="--spaceChange "), - spacing=dict(argstr="--spacing %s", sep=","), - spline_order=dict(argstr="--spline_order %d"), - transform=dict(argstr="--transform %s"), - transform_matrix=dict(argstr="--transform_matrix %s", sep=","), - transform_order=dict(argstr="--transform_order %s"), - transform_tensor_method=dict(argstr="--transform_tensor_method %s"), - transformationFile=dict(argstr="--transformationFile %s", extensions=None), - window_function=dict(argstr="--window_function %s"), + Inverse_ITK_Transformation=dict( + argstr="--Inverse_ITK_Transformation ", + ), + Reference=dict( + argstr="--Reference %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + centered_transform=dict( + argstr="--centered_transform ", + ), + correction=dict( + argstr="--correction %s", + ), + defField=dict( + argstr="--defField %s", + extensions=None, + ), + default_pixel_value=dict( + argstr="--default_pixel_value %f", + ), + direction_matrix=dict( + argstr="--direction_matrix %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hfieldtype=dict( + argstr="--hfieldtype %s", + ), + image_center=dict( + argstr="--image_center %s", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + interpolation=dict( + argstr="--interpolation %s", + ), + notbulk=dict( + argstr="--notbulk ", + ), + number_of_thread=dict( + argstr="--number_of_thread %d", + ), + origin=dict( + argstr="--origin %s", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + rotation_point=dict( + argstr="--rotation_point %s", + ), + size=dict( + argstr="--size %s", + sep=",", + ), + spaceChange=dict( + argstr="--spaceChange ", + ), + spacing=dict( + argstr="--spacing %s", + sep=",", + ), + spline_order=dict( + argstr="--spline_order %d", + ), + transform=dict( + argstr="--transform %s", + ), + transform_matrix=dict( + argstr="--transform_matrix %s", + sep=",", + ), + transform_order=dict( + argstr="--transform_order %s", + ), + transform_tensor_method=dict( + argstr="--transform_tensor_method %s", + ), + transformationFile=dict( + argstr="--transformationFile %s", + extensions=None, + ), + window_function=dict( + argstr="--window_function %s", + ), ) inputs = ResampleDTIVolume.input_spec() @@ -41,7 +109,12 @@ def test_ResampleDTIVolume_inputs(): def test_ResampleDTIVolume_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = ResampleDTIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py index e28b877a19..f8b1a3ddff 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py @@ -4,25 +4,70 @@ def test_TractographyLabelMapSeeding_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-2), - OutputFibers=dict(argstr="%s", hash_files=False, position=-1), - args=dict(argstr="%s"), - clthreshold=dict(argstr="--clthreshold %f"), - environ=dict(nohash=True, usedefault=True), - inputroi=dict(argstr="--inputroi %s", extensions=None), - integrationsteplength=dict(argstr="--integrationsteplength %f"), - label=dict(argstr="--label %d"), - maximumlength=dict(argstr="--maximumlength %f"), - minimumlength=dict(argstr="--minimumlength %f"), - name=dict(argstr="--name %s"), - outputdirectory=dict(argstr="--outputdirectory %s", hash_files=False), - randomgrid=dict(argstr="--randomgrid "), - seedspacing=dict(argstr="--seedspacing %f"), - stoppingcurvature=dict(argstr="--stoppingcurvature %f"), - stoppingmode=dict(argstr="--stoppingmode %s"), - stoppingvalue=dict(argstr="--stoppingvalue %f"), - useindexspace=dict(argstr="--useindexspace "), - writetofile=dict(argstr="--writetofile "), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputFibers=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + clthreshold=dict( + argstr="--clthreshold %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputroi=dict( + argstr="--inputroi %s", + extensions=None, + ), + integrationsteplength=dict( + argstr="--integrationsteplength %f", + ), + label=dict( + argstr="--label %d", + ), + maximumlength=dict( + argstr="--maximumlength %f", + ), + minimumlength=dict( + argstr="--minimumlength %f", + ), + name=dict( + argstr="--name %s", + ), + outputdirectory=dict( + argstr="--outputdirectory %s", + hash_files=False, + ), + randomgrid=dict( + argstr="--randomgrid ", + ), + seedspacing=dict( + argstr="--seedspacing %f", + ), + stoppingcurvature=dict( + argstr="--stoppingcurvature %f", + ), + stoppingmode=dict( + argstr="--stoppingmode %s", + ), + stoppingvalue=dict( + argstr="--stoppingvalue %f", + ), + useindexspace=dict( + argstr="--useindexspace ", + ), + writetofile=dict( + argstr="--writetofile ", + ), ) inputs = TractographyLabelMapSeeding.input_spec() @@ -33,7 +78,11 @@ def test_TractographyLabelMapSeeding_inputs(): def test_TractographyLabelMapSeeding_outputs(): output_map = dict( - OutputFibers=dict(extensions=None, position=-1), outputdirectory=dict() + OutputFibers=dict( + extensions=None, + position=-1, + ), + outputdirectory=dict(), ) outputs = TractographyLabelMapSeeding.output_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py index b4b9a19029..2cd0ac229d 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py @@ -4,12 +4,31 @@ def test_AddScalarVolumes_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume1=dict(argstr="%s", extensions=None, position=-3), - inputVolume2=dict(argstr="%s", extensions=None, position=-2), - order=dict(argstr="--order %s"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="%s", + extensions=None, + position=-3, + ), + inputVolume2=dict( + argstr="%s", + extensions=None, + position=-2, + ), + order=dict( + argstr="--order %s", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = AddScalarVolumes.input_spec() @@ -19,7 +38,12 @@ def test_AddScalarVolumes_inputs(): def test_AddScalarVolumes_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = AddScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py index 08641f03a6..8417ab1a90 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py @@ -4,11 +4,26 @@ def test_CastScalarVolume_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-2), - OutputVolume=dict(argstr="%s", hash_files=False, position=-1), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - type=dict(argstr="--type %s"), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + type=dict( + argstr="--type %s", + ), ) inputs = CastScalarVolume.input_spec() @@ -18,7 +33,12 @@ def test_CastScalarVolume_inputs(): def test_CastScalarVolume_outputs(): - output_map = dict(OutputVolume=dict(extensions=None, position=-1)) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = CastScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py index 9eac1b6c21..49b5133faa 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py @@ -4,12 +4,32 @@ def test_CheckerBoardFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - checkerPattern=dict(argstr="--checkerPattern %s", sep=","), - environ=dict(nohash=True, usedefault=True), - inputVolume1=dict(argstr="%s", extensions=None, position=-3), - inputVolume2=dict(argstr="%s", extensions=None, position=-2), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), + args=dict( + argstr="%s", + ), + checkerPattern=dict( + argstr="--checkerPattern %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="%s", + extensions=None, + position=-3, + ), + inputVolume2=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = CheckerBoardFilter.input_spec() @@ -19,7 +39,12 @@ def test_CheckerBoardFilter_inputs(): def test_CheckerBoardFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = CheckerBoardFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py index 49b59aa007..48421b7c21 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py @@ -4,13 +4,32 @@ def test_CurvatureAnisotropicDiffusion_inputs(): input_map = dict( - args=dict(argstr="%s"), - conductance=dict(argstr="--conductance %f"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - iterations=dict(argstr="--iterations %d"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), - timeStep=dict(argstr="--timeStep %f"), + args=dict( + argstr="%s", + ), + conductance=dict( + argstr="--conductance %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + iterations=dict( + argstr="--iterations %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + timeStep=dict( + argstr="--timeStep %f", + ), ) inputs = CurvatureAnisotropicDiffusion.input_spec() @@ -20,7 +39,12 @@ def test_CurvatureAnisotropicDiffusion_inputs(): def test_CurvatureAnisotropicDiffusion_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = CurvatureAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py index 00cc92cc4e..430e299787 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py @@ -4,14 +4,35 @@ def test_ExtractSkeleton_inputs(): input_map = dict( - InputImageFileName=dict(argstr="%s", extensions=None, position=-2), - OutputImageFileName=dict(argstr="%s", hash_files=False, position=-1), - args=dict(argstr="%s"), - dontPrune=dict(argstr="--dontPrune "), - environ=dict(nohash=True, usedefault=True), - numPoints=dict(argstr="--numPoints %d"), - pointsFile=dict(argstr="--pointsFile %s"), - type=dict(argstr="--type %s"), + InputImageFileName=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputImageFileName=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + dontPrune=dict( + argstr="--dontPrune ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + numPoints=dict( + argstr="--numPoints %d", + ), + pointsFile=dict( + argstr="--pointsFile %s", + ), + type=dict( + argstr="--type %s", + ), ) inputs = ExtractSkeleton.input_spec() @@ -21,7 +42,12 @@ def test_ExtractSkeleton_inputs(): def test_ExtractSkeleton_outputs(): - output_map = dict(OutputImageFileName=dict(extensions=None, position=-1)) + output_map = dict( + OutputImageFileName=dict( + extensions=None, + position=-1, + ), + ) outputs = ExtractSkeleton.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py index 15f571fceb..113490472d 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py @@ -4,11 +4,26 @@ def test_GaussianBlurImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), - sigma=dict(argstr="--sigma %f"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + sigma=dict( + argstr="--sigma %f", + ), ) inputs = GaussianBlurImageFilter.input_spec() @@ -18,7 +33,12 @@ def test_GaussianBlurImageFilter_inputs(): def test_GaussianBlurImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = GaussianBlurImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py index 6445abfb6e..95810788c7 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py @@ -4,13 +4,32 @@ def test_GradientAnisotropicDiffusion_inputs(): input_map = dict( - args=dict(argstr="%s"), - conductance=dict(argstr="--conductance %f"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - iterations=dict(argstr="--iterations %d"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), - timeStep=dict(argstr="--timeStep %f"), + args=dict( + argstr="%s", + ), + conductance=dict( + argstr="--conductance %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + iterations=dict( + argstr="--iterations %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + timeStep=dict( + argstr="--timeStep %f", + ), ) inputs = GradientAnisotropicDiffusion.input_spec() @@ -20,7 +39,12 @@ def test_GradientAnisotropicDiffusion_inputs(): def test_GradientAnisotropicDiffusion_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = GradientAnisotropicDiffusion.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py index c1541d7820..8891232347 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py @@ -4,10 +4,23 @@ def test_GrayscaleFillHoleImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = GrayscaleFillHoleImageFilter.input_spec() @@ -17,7 +30,12 @@ def test_GrayscaleFillHoleImageFilter_inputs(): def test_GrayscaleFillHoleImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = GrayscaleFillHoleImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py index 41a2a8e838..d48d9ded63 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py @@ -4,10 +4,23 @@ def test_GrayscaleGrindPeakImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = GrayscaleGrindPeakImageFilter.input_spec() @@ -17,7 +30,12 @@ def test_GrayscaleGrindPeakImageFilter_inputs(): def test_GrayscaleGrindPeakImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = GrayscaleGrindPeakImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py index 033141d738..cf2a959ff9 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py @@ -4,14 +4,37 @@ def test_HistogramMatching_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="%s", extensions=None, position=-3), - numberOfHistogramLevels=dict(argstr="--numberOfHistogramLevels %d"), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), - referenceVolume=dict(argstr="%s", extensions=None, position=-2), - threshold=dict(argstr="--threshold "), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-3, + ), + numberOfHistogramLevels=dict( + argstr="--numberOfHistogramLevels %d", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + referenceVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + threshold=dict( + argstr="--threshold ", + ), ) inputs = HistogramMatching.input_spec() @@ -21,7 +44,12 @@ def test_HistogramMatching_inputs(): def test_HistogramMatching_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = HistogramMatching.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py index 0c4af24580..802baf5f38 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py @@ -4,12 +4,31 @@ def test_ImageLabelCombine_inputs(): input_map = dict( - InputLabelMap_A=dict(argstr="%s", extensions=None, position=-3), - InputLabelMap_B=dict(argstr="%s", extensions=None, position=-2), - OutputLabelMap=dict(argstr="%s", hash_files=False, position=-1), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - first_overwrites=dict(argstr="--first_overwrites "), + InputLabelMap_A=dict( + argstr="%s", + extensions=None, + position=-3, + ), + InputLabelMap_B=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputLabelMap=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + first_overwrites=dict( + argstr="--first_overwrites ", + ), ) inputs = ImageLabelCombine.input_spec() @@ -19,7 +38,12 @@ def test_ImageLabelCombine_inputs(): def test_ImageLabelCombine_outputs(): - output_map = dict(OutputLabelMap=dict(extensions=None, position=-1)) + output_map = dict( + OutputLabelMap=dict( + extensions=None, + position=-1, + ), + ) outputs = ImageLabelCombine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py index b9872b7e4e..5070718d66 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py @@ -4,13 +4,34 @@ def test_MaskScalarVolume_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-3), - MaskVolume=dict(argstr="%s", extensions=None, position=-2), - OutputVolume=dict(argstr="%s", hash_files=False, position=-1), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - label=dict(argstr="--label %d"), - replace=dict(argstr="--replace %d"), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-3, + ), + MaskVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + label=dict( + argstr="--label %d", + ), + replace=dict( + argstr="--replace %d", + ), ) inputs = MaskScalarVolume.input_spec() @@ -20,7 +41,12 @@ def test_MaskScalarVolume_inputs(): def test_MaskScalarVolume_outputs(): - output_map = dict(OutputVolume=dict(extensions=None, position=-1)) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = MaskScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py index ceea660265..8b86a90c3b 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py @@ -4,11 +4,27 @@ def test_MedianImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - neighborhood=dict(argstr="--neighborhood %s", sep=","), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + neighborhood=dict( + argstr="--neighborhood %s", + sep=",", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = MedianImageFilter.input_spec() @@ -18,7 +34,12 @@ def test_MedianImageFilter_inputs(): def test_MedianImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = MedianImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py index e28dfb2a66..817fdbbe95 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py @@ -4,12 +4,31 @@ def test_MultiplyScalarVolumes_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume1=dict(argstr="%s", extensions=None, position=-3), - inputVolume2=dict(argstr="%s", extensions=None, position=-2), - order=dict(argstr="--order %s"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="%s", + extensions=None, + position=-3, + ), + inputVolume2=dict( + argstr="%s", + extensions=None, + position=-2, + ), + order=dict( + argstr="--order %s", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = MultiplyScalarVolumes.input_spec() @@ -19,7 +38,12 @@ def test_MultiplyScalarVolumes_inputs(): def test_MultiplyScalarVolumes_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = MultiplyScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py index f1ab2c1d90..43038036d5 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py @@ -4,20 +4,57 @@ def test_N4ITKBiasFieldCorrection_inputs(): input_map = dict( - args=dict(argstr="%s"), - bsplineorder=dict(argstr="--bsplineorder %d"), - convergencethreshold=dict(argstr="--convergencethreshold %f"), - environ=dict(nohash=True, usedefault=True), - histogramsharpening=dict(argstr="--histogramsharpening %s", sep=","), - inputimage=dict(argstr="--inputimage %s", extensions=None), - iterations=dict(argstr="--iterations %s", sep=","), - maskimage=dict(argstr="--maskimage %s", extensions=None), - meshresolution=dict(argstr="--meshresolution %s", sep=","), - outputbiasfield=dict(argstr="--outputbiasfield %s", hash_files=False), - outputimage=dict(argstr="--outputimage %s", hash_files=False), - shrinkfactor=dict(argstr="--shrinkfactor %d"), - splinedistance=dict(argstr="--splinedistance %f"), - weightimage=dict(argstr="--weightimage %s", extensions=None), + args=dict( + argstr="%s", + ), + bsplineorder=dict( + argstr="--bsplineorder %d", + ), + convergencethreshold=dict( + argstr="--convergencethreshold %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + histogramsharpening=dict( + argstr="--histogramsharpening %s", + sep=",", + ), + inputimage=dict( + argstr="--inputimage %s", + extensions=None, + ), + iterations=dict( + argstr="--iterations %s", + sep=",", + ), + maskimage=dict( + argstr="--maskimage %s", + extensions=None, + ), + meshresolution=dict( + argstr="--meshresolution %s", + sep=",", + ), + outputbiasfield=dict( + argstr="--outputbiasfield %s", + hash_files=False, + ), + outputimage=dict( + argstr="--outputimage %s", + hash_files=False, + ), + shrinkfactor=dict( + argstr="--shrinkfactor %d", + ), + splinedistance=dict( + argstr="--splinedistance %f", + ), + weightimage=dict( + argstr="--weightimage %s", + extensions=None, + ), ) inputs = N4ITKBiasFieldCorrection.input_spec() @@ -28,7 +65,12 @@ def test_N4ITKBiasFieldCorrection_inputs(): def test_N4ITKBiasFieldCorrection_outputs(): output_map = dict( - outputbiasfield=dict(extensions=None), outputimage=dict(extensions=None) + outputbiasfield=dict( + extensions=None, + ), + outputimage=dict( + extensions=None, + ), ) outputs = N4ITKBiasFieldCorrection.output_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py index d3b61640bb..37dca6437c 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py @@ -4,32 +4,96 @@ def test_ResampleScalarVectorDWIVolume_inputs(): input_map = dict( - Inverse_ITK_Transformation=dict(argstr="--Inverse_ITK_Transformation "), - Reference=dict(argstr="--Reference %s", extensions=None), - args=dict(argstr="%s"), - centered_transform=dict(argstr="--centered_transform "), - defField=dict(argstr="--defField %s", extensions=None), - default_pixel_value=dict(argstr="--default_pixel_value %f"), - direction_matrix=dict(argstr="--direction_matrix %s", sep=","), - environ=dict(nohash=True, usedefault=True), - hfieldtype=dict(argstr="--hfieldtype %s"), - image_center=dict(argstr="--image_center %s"), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - interpolation=dict(argstr="--interpolation %s"), - notbulk=dict(argstr="--notbulk "), - number_of_thread=dict(argstr="--number_of_thread %d"), - origin=dict(argstr="--origin %s"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), - rotation_point=dict(argstr="--rotation_point %s"), - size=dict(argstr="--size %s", sep=","), - spaceChange=dict(argstr="--spaceChange "), - spacing=dict(argstr="--spacing %s", sep=","), - spline_order=dict(argstr="--spline_order %d"), - transform=dict(argstr="--transform %s"), - transform_matrix=dict(argstr="--transform_matrix %s", sep=","), - transform_order=dict(argstr="--transform_order %s"), - transformationFile=dict(argstr="--transformationFile %s", extensions=None), - window_function=dict(argstr="--window_function %s"), + Inverse_ITK_Transformation=dict( + argstr="--Inverse_ITK_Transformation ", + ), + Reference=dict( + argstr="--Reference %s", + extensions=None, + ), + args=dict( + argstr="%s", + ), + centered_transform=dict( + argstr="--centered_transform ", + ), + defField=dict( + argstr="--defField %s", + extensions=None, + ), + default_pixel_value=dict( + argstr="--default_pixel_value %f", + ), + direction_matrix=dict( + argstr="--direction_matrix %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hfieldtype=dict( + argstr="--hfieldtype %s", + ), + image_center=dict( + argstr="--image_center %s", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + interpolation=dict( + argstr="--interpolation %s", + ), + notbulk=dict( + argstr="--notbulk ", + ), + number_of_thread=dict( + argstr="--number_of_thread %d", + ), + origin=dict( + argstr="--origin %s", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + rotation_point=dict( + argstr="--rotation_point %s", + ), + size=dict( + argstr="--size %s", + sep=",", + ), + spaceChange=dict( + argstr="--spaceChange ", + ), + spacing=dict( + argstr="--spacing %s", + sep=",", + ), + spline_order=dict( + argstr="--spline_order %d", + ), + transform=dict( + argstr="--transform %s", + ), + transform_matrix=dict( + argstr="--transform_matrix %s", + sep=",", + ), + transform_order=dict( + argstr="--transform_order %s", + ), + transformationFile=dict( + argstr="--transformationFile %s", + extensions=None, + ), + window_function=dict( + argstr="--window_function %s", + ), ) inputs = ResampleScalarVectorDWIVolume.input_spec() @@ -39,7 +103,12 @@ def test_ResampleScalarVectorDWIVolume_inputs(): def test_ResampleScalarVectorDWIVolume_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = ResampleScalarVectorDWIVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py index 2bf2568637..abe3d9ad00 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py @@ -4,12 +4,31 @@ def test_SubtractScalarVolumes_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume1=dict(argstr="%s", extensions=None, position=-3), - inputVolume2=dict(argstr="%s", extensions=None, position=-2), - order=dict(argstr="--order %s"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="%s", + extensions=None, + position=-3, + ), + inputVolume2=dict( + argstr="%s", + extensions=None, + position=-2, + ), + order=dict( + argstr="--order %s", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = SubtractScalarVolumes.input_spec() @@ -19,7 +38,12 @@ def test_SubtractScalarVolumes_inputs(): def test_SubtractScalarVolumes_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = SubtractScalarVolumes.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py index cede949179..0aaab0ff7a 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py @@ -4,15 +4,38 @@ def test_ThresholdScalarVolume_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-2), - OutputVolume=dict(argstr="%s", hash_files=False, position=-1), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - lower=dict(argstr="--lower %d"), - outsidevalue=dict(argstr="--outsidevalue %d"), - threshold=dict(argstr="--threshold %d"), - thresholdtype=dict(argstr="--thresholdtype %s"), - upper=dict(argstr="--upper %d"), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + lower=dict( + argstr="--lower %d", + ), + outsidevalue=dict( + argstr="--outsidevalue %d", + ), + threshold=dict( + argstr="--threshold %d", + ), + thresholdtype=dict( + argstr="--thresholdtype %s", + ), + upper=dict( + argstr="--upper %d", + ), ) inputs = ThresholdScalarVolume.input_spec() @@ -22,7 +45,12 @@ def test_ThresholdScalarVolume_inputs(): def test_ThresholdScalarVolume_outputs(): - output_map = dict(OutputVolume=dict(extensions=None, position=-1)) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = ThresholdScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py index f6bb34bbdf..bbaa19f848 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py @@ -4,14 +4,36 @@ def test_VotingBinaryHoleFillingImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - background=dict(argstr="--background %d"), - environ=dict(nohash=True, usedefault=True), - foreground=dict(argstr="--foreground %d"), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - majorityThreshold=dict(argstr="--majorityThreshold %d"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), - radius=dict(argstr="--radius %s", sep=","), + args=dict( + argstr="%s", + ), + background=dict( + argstr="--background %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + foreground=dict( + argstr="--foreground %d", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + majorityThreshold=dict( + argstr="--majorityThreshold %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + radius=dict( + argstr="--radius %s", + sep=",", + ), ) inputs = VotingBinaryHoleFillingImageFilter.input_spec() @@ -21,7 +43,12 @@ def test_VotingBinaryHoleFillingImageFilter_inputs(): def test_VotingBinaryHoleFillingImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = VotingBinaryHoleFillingImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py index 4d5ee32124..8aa18dc6a3 100644 --- a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py +++ b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py @@ -4,15 +4,41 @@ def test_DWIUnbiasedNonLocalMeansFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - hp=dict(argstr="--hp %f"), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - ng=dict(argstr="--ng %d"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), - rc=dict(argstr="--rc %s", sep=","), - re=dict(argstr="--re %s", sep=","), - rs=dict(argstr="--rs %s", sep=","), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hp=dict( + argstr="--hp %f", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + ng=dict( + argstr="--ng %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + rc=dict( + argstr="--rc %s", + sep=",", + ), + re=dict( + argstr="--re %s", + sep=",", + ), + rs=dict( + argstr="--rs %s", + sep=",", + ), ) inputs = DWIUnbiasedNonLocalMeansFilter.input_spec() @@ -22,7 +48,12 @@ def test_DWIUnbiasedNonLocalMeansFilter_inputs(): def test_DWIUnbiasedNonLocalMeansFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = DWIUnbiasedNonLocalMeansFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py index 82c345813f..bb2de08cfb 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py @@ -4,21 +4,53 @@ def test_AffineRegistration_inputs(): input_map = dict( - FixedImageFileName=dict(argstr="%s", extensions=None, position=-2), - MovingImageFileName=dict(argstr="%s", extensions=None, position=-1), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fixedsmoothingfactor=dict(argstr="--fixedsmoothingfactor %d"), - histogrambins=dict(argstr="--histogrambins %d"), - initialtransform=dict(argstr="--initialtransform %s", extensions=None), - iterations=dict(argstr="--iterations %d"), - movingsmoothingfactor=dict(argstr="--movingsmoothingfactor %d"), - outputtransform=dict(argstr="--outputtransform %s", hash_files=False), + FixedImageFileName=dict( + argstr="%s", + extensions=None, + position=-2, + ), + MovingImageFileName=dict( + argstr="%s", + extensions=None, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedsmoothingfactor=dict( + argstr="--fixedsmoothingfactor %d", + ), + histogrambins=dict( + argstr="--histogrambins %d", + ), + initialtransform=dict( + argstr="--initialtransform %s", + extensions=None, + ), + iterations=dict( + argstr="--iterations %d", + ), + movingsmoothingfactor=dict( + argstr="--movingsmoothingfactor %d", + ), + outputtransform=dict( + argstr="--outputtransform %s", + hash_files=False, + ), resampledmovingfilename=dict( - argstr="--resampledmovingfilename %s", hash_files=False + argstr="--resampledmovingfilename %s", + hash_files=False, + ), + spatialsamples=dict( + argstr="--spatialsamples %d", + ), + translationscale=dict( + argstr="--translationscale %f", ), - spatialsamples=dict(argstr="--spatialsamples %d"), - translationscale=dict(argstr="--translationscale %f"), ) inputs = AffineRegistration.input_spec() @@ -29,8 +61,12 @@ def test_AffineRegistration_inputs(): def test_AffineRegistration_outputs(): output_map = dict( - outputtransform=dict(extensions=None), - resampledmovingfilename=dict(extensions=None), + outputtransform=dict( + extensions=None, + ), + resampledmovingfilename=dict( + extensions=None, + ), ) outputs = AffineRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py index 7e5762a0db..41b316e7dc 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py @@ -4,23 +4,60 @@ def test_BSplineDeformableRegistration_inputs(): input_map = dict( - FixedImageFileName=dict(argstr="%s", extensions=None, position=-2), - MovingImageFileName=dict(argstr="%s", extensions=None, position=-1), - args=dict(argstr="%s"), - constrain=dict(argstr="--constrain "), - default=dict(argstr="--default %d"), - environ=dict(nohash=True, usedefault=True), - gridSize=dict(argstr="--gridSize %d"), - histogrambins=dict(argstr="--histogrambins %d"), - initialtransform=dict(argstr="--initialtransform %s", extensions=None), - iterations=dict(argstr="--iterations %d"), - maximumDeformation=dict(argstr="--maximumDeformation %f"), - outputtransform=dict(argstr="--outputtransform %s", hash_files=False), - outputwarp=dict(argstr="--outputwarp %s", hash_files=False), + FixedImageFileName=dict( + argstr="%s", + extensions=None, + position=-2, + ), + MovingImageFileName=dict( + argstr="%s", + extensions=None, + position=-1, + ), + args=dict( + argstr="%s", + ), + constrain=dict( + argstr="--constrain ", + ), + default=dict( + argstr="--default %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gridSize=dict( + argstr="--gridSize %d", + ), + histogrambins=dict( + argstr="--histogrambins %d", + ), + initialtransform=dict( + argstr="--initialtransform %s", + extensions=None, + ), + iterations=dict( + argstr="--iterations %d", + ), + maximumDeformation=dict( + argstr="--maximumDeformation %f", + ), + outputtransform=dict( + argstr="--outputtransform %s", + hash_files=False, + ), + outputwarp=dict( + argstr="--outputwarp %s", + hash_files=False, + ), resampledmovingfilename=dict( - argstr="--resampledmovingfilename %s", hash_files=False + argstr="--resampledmovingfilename %s", + hash_files=False, + ), + spatialsamples=dict( + argstr="--spatialsamples %d", ), - spatialsamples=dict(argstr="--spatialsamples %d"), ) inputs = BSplineDeformableRegistration.input_spec() @@ -31,9 +68,15 @@ def test_BSplineDeformableRegistration_inputs(): def test_BSplineDeformableRegistration_outputs(): output_map = dict( - outputtransform=dict(extensions=None), - outputwarp=dict(extensions=None), - resampledmovingfilename=dict(extensions=None), + outputtransform=dict( + extensions=None, + ), + outputwarp=dict( + extensions=None, + ), + resampledmovingfilename=dict( + extensions=None, + ), ) outputs = BSplineDeformableRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py index 0987c9b33e..fbd37eeb8e 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py @@ -4,11 +4,25 @@ def test_BSplineToDeformationField_inputs(): input_map = dict( - args=dict(argstr="%s"), - defImage=dict(argstr="--defImage %s", hash_files=False), - environ=dict(nohash=True, usedefault=True), - refImage=dict(argstr="--refImage %s", extensions=None), - tfm=dict(argstr="--tfm %s", extensions=None), + args=dict( + argstr="%s", + ), + defImage=dict( + argstr="--defImage %s", + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + refImage=dict( + argstr="--refImage %s", + extensions=None, + ), + tfm=dict( + argstr="--tfm %s", + extensions=None, + ), ) inputs = BSplineToDeformationField.input_spec() @@ -18,7 +32,11 @@ def test_BSplineToDeformationField_inputs(): def test_BSplineToDeformationField_outputs(): - output_map = dict(defImage=dict(extensions=None)) + output_map = dict( + defImage=dict( + extensions=None, + ), + ) outputs = BSplineToDeformationField.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py index 699ac2133b..28f4e19d7b 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py @@ -4,36 +4,105 @@ def test_ExpertAutomatedRegistration_inputs(): input_map = dict( - affineMaxIterations=dict(argstr="--affineMaxIterations %d"), - affineSamplingRatio=dict(argstr="--affineSamplingRatio %f"), - args=dict(argstr="%s"), - bsplineMaxIterations=dict(argstr="--bsplineMaxIterations %d"), - bsplineSamplingRatio=dict(argstr="--bsplineSamplingRatio %f"), - controlPointSpacing=dict(argstr="--controlPointSpacing %d"), - environ=dict(nohash=True, usedefault=True), - expectedOffset=dict(argstr="--expectedOffset %f"), - expectedRotation=dict(argstr="--expectedRotation %f"), - expectedScale=dict(argstr="--expectedScale %f"), - expectedSkew=dict(argstr="--expectedSkew %f"), - fixedImage=dict(argstr="%s", extensions=None, position=-2), - fixedImageMask=dict(argstr="--fixedImageMask %s", extensions=None), - fixedLandmarks=dict(argstr="--fixedLandmarks %s..."), - initialization=dict(argstr="--initialization %s"), - interpolation=dict(argstr="--interpolation %s"), - loadTransform=dict(argstr="--loadTransform %s", extensions=None), - metric=dict(argstr="--metric %s"), - minimizeMemory=dict(argstr="--minimizeMemory "), - movingImage=dict(argstr="%s", extensions=None, position=-1), - movingLandmarks=dict(argstr="--movingLandmarks %s..."), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - randomNumberSeed=dict(argstr="--randomNumberSeed %d"), - registration=dict(argstr="--registration %s"), - resampledImage=dict(argstr="--resampledImage %s", hash_files=False), - rigidMaxIterations=dict(argstr="--rigidMaxIterations %d"), - rigidSamplingRatio=dict(argstr="--rigidSamplingRatio %f"), - sampleFromOverlap=dict(argstr="--sampleFromOverlap "), - saveTransform=dict(argstr="--saveTransform %s", hash_files=False), - verbosityLevel=dict(argstr="--verbosityLevel %s"), + affineMaxIterations=dict( + argstr="--affineMaxIterations %d", + ), + affineSamplingRatio=dict( + argstr="--affineSamplingRatio %f", + ), + args=dict( + argstr="%s", + ), + bsplineMaxIterations=dict( + argstr="--bsplineMaxIterations %d", + ), + bsplineSamplingRatio=dict( + argstr="--bsplineSamplingRatio %f", + ), + controlPointSpacing=dict( + argstr="--controlPointSpacing %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expectedOffset=dict( + argstr="--expectedOffset %f", + ), + expectedRotation=dict( + argstr="--expectedRotation %f", + ), + expectedScale=dict( + argstr="--expectedScale %f", + ), + expectedSkew=dict( + argstr="--expectedSkew %f", + ), + fixedImage=dict( + argstr="%s", + extensions=None, + position=-2, + ), + fixedImageMask=dict( + argstr="--fixedImageMask %s", + extensions=None, + ), + fixedLandmarks=dict( + argstr="--fixedLandmarks %s...", + ), + initialization=dict( + argstr="--initialization %s", + ), + interpolation=dict( + argstr="--interpolation %s", + ), + loadTransform=dict( + argstr="--loadTransform %s", + extensions=None, + ), + metric=dict( + argstr="--metric %s", + ), + minimizeMemory=dict( + argstr="--minimizeMemory ", + ), + movingImage=dict( + argstr="%s", + extensions=None, + position=-1, + ), + movingLandmarks=dict( + argstr="--movingLandmarks %s...", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + randomNumberSeed=dict( + argstr="--randomNumberSeed %d", + ), + registration=dict( + argstr="--registration %s", + ), + resampledImage=dict( + argstr="--resampledImage %s", + hash_files=False, + ), + rigidMaxIterations=dict( + argstr="--rigidMaxIterations %d", + ), + rigidSamplingRatio=dict( + argstr="--rigidSamplingRatio %f", + ), + sampleFromOverlap=dict( + argstr="--sampleFromOverlap ", + ), + saveTransform=dict( + argstr="--saveTransform %s", + hash_files=False, + ), + verbosityLevel=dict( + argstr="--verbosityLevel %s", + ), ) inputs = ExpertAutomatedRegistration.input_spec() @@ -44,7 +113,12 @@ def test_ExpertAutomatedRegistration_inputs(): def test_ExpertAutomatedRegistration_outputs(): output_map = dict( - resampledImage=dict(extensions=None), saveTransform=dict(extensions=None) + resampledImage=dict( + extensions=None, + ), + saveTransform=dict( + extensions=None, + ), ) outputs = ExpertAutomatedRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py index 8682fc92db..77fb5d69f6 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py @@ -4,22 +4,58 @@ def test_LinearRegistration_inputs(): input_map = dict( - FixedImageFileName=dict(argstr="%s", extensions=None, position=-2), - MovingImageFileName=dict(argstr="%s", extensions=None, position=-1), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fixedsmoothingfactor=dict(argstr="--fixedsmoothingfactor %d"), - histogrambins=dict(argstr="--histogrambins %d"), - initialtransform=dict(argstr="--initialtransform %s", extensions=None), - iterations=dict(argstr="--iterations %s", sep=","), - learningrate=dict(argstr="--learningrate %s", sep=","), - movingsmoothingfactor=dict(argstr="--movingsmoothingfactor %d"), - outputtransform=dict(argstr="--outputtransform %s", hash_files=False), + FixedImageFileName=dict( + argstr="%s", + extensions=None, + position=-2, + ), + MovingImageFileName=dict( + argstr="%s", + extensions=None, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedsmoothingfactor=dict( + argstr="--fixedsmoothingfactor %d", + ), + histogrambins=dict( + argstr="--histogrambins %d", + ), + initialtransform=dict( + argstr="--initialtransform %s", + extensions=None, + ), + iterations=dict( + argstr="--iterations %s", + sep=",", + ), + learningrate=dict( + argstr="--learningrate %s", + sep=",", + ), + movingsmoothingfactor=dict( + argstr="--movingsmoothingfactor %d", + ), + outputtransform=dict( + argstr="--outputtransform %s", + hash_files=False, + ), resampledmovingfilename=dict( - argstr="--resampledmovingfilename %s", hash_files=False + argstr="--resampledmovingfilename %s", + hash_files=False, + ), + spatialsamples=dict( + argstr="--spatialsamples %d", + ), + translationscale=dict( + argstr="--translationscale %f", ), - spatialsamples=dict(argstr="--spatialsamples %d"), - translationscale=dict(argstr="--translationscale %f"), ) inputs = LinearRegistration.input_spec() @@ -30,8 +66,12 @@ def test_LinearRegistration_inputs(): def test_LinearRegistration_outputs(): output_map = dict( - outputtransform=dict(extensions=None), - resampledmovingfilename=dict(extensions=None), + outputtransform=dict( + extensions=None, + ), + resampledmovingfilename=dict( + extensions=None, + ), ) outputs = LinearRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py index 59613bb9f4..0f1e19d4ba 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py @@ -4,19 +4,53 @@ def test_MultiResolutionAffineRegistration_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fixedImage=dict(argstr="%s", extensions=None, position=-2), - fixedImageMask=dict(argstr="--fixedImageMask %s", extensions=None), - fixedImageROI=dict(argstr="--fixedImageROI %s"), - metricTolerance=dict(argstr="--metricTolerance %f"), - movingImage=dict(argstr="%s", extensions=None, position=-1), - numIterations=dict(argstr="--numIterations %d"), - numLineIterations=dict(argstr="--numLineIterations %d"), - resampledImage=dict(argstr="--resampledImage %s", hash_files=False), - saveTransform=dict(argstr="--saveTransform %s", hash_files=False), - stepSize=dict(argstr="--stepSize %f"), - stepTolerance=dict(argstr="--stepTolerance %f"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedImage=dict( + argstr="%s", + extensions=None, + position=-2, + ), + fixedImageMask=dict( + argstr="--fixedImageMask %s", + extensions=None, + ), + fixedImageROI=dict( + argstr="--fixedImageROI %s", + ), + metricTolerance=dict( + argstr="--metricTolerance %f", + ), + movingImage=dict( + argstr="%s", + extensions=None, + position=-1, + ), + numIterations=dict( + argstr="--numIterations %d", + ), + numLineIterations=dict( + argstr="--numLineIterations %d", + ), + resampledImage=dict( + argstr="--resampledImage %s", + hash_files=False, + ), + saveTransform=dict( + argstr="--saveTransform %s", + hash_files=False, + ), + stepSize=dict( + argstr="--stepSize %f", + ), + stepTolerance=dict( + argstr="--stepTolerance %f", + ), ) inputs = MultiResolutionAffineRegistration.input_spec() @@ -27,7 +61,12 @@ def test_MultiResolutionAffineRegistration_inputs(): def test_MultiResolutionAffineRegistration_outputs(): output_map = dict( - resampledImage=dict(extensions=None), saveTransform=dict(extensions=None) + resampledImage=dict( + extensions=None, + ), + saveTransform=dict( + extensions=None, + ), ) outputs = MultiResolutionAffineRegistration.output_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py index 8a18403ddb..672d971471 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py @@ -4,13 +4,32 @@ def test_OtsuThresholdImageFilter_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - insideValue=dict(argstr="--insideValue %d"), - numberOfBins=dict(argstr="--numberOfBins %d"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), - outsideValue=dict(argstr="--outsideValue %d"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + insideValue=dict( + argstr="--insideValue %d", + ), + numberOfBins=dict( + argstr="--numberOfBins %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + outsideValue=dict( + argstr="--outsideValue %d", + ), ) inputs = OtsuThresholdImageFilter.input_spec() @@ -20,7 +39,12 @@ def test_OtsuThresholdImageFilter_inputs(): def test_OtsuThresholdImageFilter_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = OtsuThresholdImageFilter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py index 0ee06ba79a..a28c8231c8 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py @@ -4,14 +4,35 @@ def test_OtsuThresholdSegmentation_inputs(): input_map = dict( - args=dict(argstr="%s"), - brightObjects=dict(argstr="--brightObjects "), - environ=dict(nohash=True, usedefault=True), - faceConnected=dict(argstr="--faceConnected "), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - minimumObjectSize=dict(argstr="--minimumObjectSize %d"), - numberOfBins=dict(argstr="--numberOfBins %d"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), + args=dict( + argstr="%s", + ), + brightObjects=dict( + argstr="--brightObjects ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + faceConnected=dict( + argstr="--faceConnected ", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + minimumObjectSize=dict( + argstr="--minimumObjectSize %d", + ), + numberOfBins=dict( + argstr="--numberOfBins %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = OtsuThresholdSegmentation.input_spec() @@ -21,7 +42,12 @@ def test_OtsuThresholdSegmentation_inputs(): def test_OtsuThresholdSegmentation_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = OtsuThresholdSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py index fba4c4b1f1..2f47b3bd16 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py @@ -4,12 +4,30 @@ def test_ResampleScalarVolume_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-2), - OutputVolume=dict(argstr="%s", hash_files=False, position=-1), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - interpolation=dict(argstr="--interpolation %s"), - spacing=dict(argstr="--spacing %s", sep=","), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + interpolation=dict( + argstr="--interpolation %s", + ), + spacing=dict( + argstr="--spacing %s", + sep=",", + ), ) inputs = ResampleScalarVolume.input_spec() @@ -19,7 +37,12 @@ def test_ResampleScalarVolume_inputs(): def test_ResampleScalarVolume_outputs(): - output_map = dict(OutputVolume=dict(extensions=None, position=-1)) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = ResampleScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py index abab2f260c..3e8aba0e4d 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py @@ -4,23 +4,61 @@ def test_RigidRegistration_inputs(): input_map = dict( - FixedImageFileName=dict(argstr="%s", extensions=None, position=-2), - MovingImageFileName=dict(argstr="%s", extensions=None, position=-1), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fixedsmoothingfactor=dict(argstr="--fixedsmoothingfactor %d"), - histogrambins=dict(argstr="--histogrambins %d"), - initialtransform=dict(argstr="--initialtransform %s", extensions=None), - iterations=dict(argstr="--iterations %s", sep=","), - learningrate=dict(argstr="--learningrate %s", sep=","), - movingsmoothingfactor=dict(argstr="--movingsmoothingfactor %d"), - outputtransform=dict(argstr="--outputtransform %s", hash_files=False), + FixedImageFileName=dict( + argstr="%s", + extensions=None, + position=-2, + ), + MovingImageFileName=dict( + argstr="%s", + extensions=None, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedsmoothingfactor=dict( + argstr="--fixedsmoothingfactor %d", + ), + histogrambins=dict( + argstr="--histogrambins %d", + ), + initialtransform=dict( + argstr="--initialtransform %s", + extensions=None, + ), + iterations=dict( + argstr="--iterations %s", + sep=",", + ), + learningrate=dict( + argstr="--learningrate %s", + sep=",", + ), + movingsmoothingfactor=dict( + argstr="--movingsmoothingfactor %d", + ), + outputtransform=dict( + argstr="--outputtransform %s", + hash_files=False, + ), resampledmovingfilename=dict( - argstr="--resampledmovingfilename %s", hash_files=False + argstr="--resampledmovingfilename %s", + hash_files=False, + ), + spatialsamples=dict( + argstr="--spatialsamples %d", + ), + testingmode=dict( + argstr="--testingmode ", + ), + translationscale=dict( + argstr="--translationscale %f", ), - spatialsamples=dict(argstr="--spatialsamples %d"), - testingmode=dict(argstr="--testingmode "), - translationscale=dict(argstr="--translationscale %f"), ) inputs = RigidRegistration.input_spec() @@ -31,8 +69,12 @@ def test_RigidRegistration_inputs(): def test_RigidRegistration_outputs(): output_map = dict( - outputtransform=dict(extensions=None), - resampledmovingfilename=dict(extensions=None), + outputtransform=dict( + extensions=None, + ), + resampledmovingfilename=dict( + extensions=None, + ), ) outputs = RigidRegistration.output_spec() diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py index d5057e46c0..6bc91e4d5e 100644 --- a/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py @@ -4,15 +4,43 @@ def test_IntensityDifferenceMetric_inputs(): input_map = dict( - args=dict(argstr="%s"), - baselineSegmentationVolume=dict(argstr="%s", extensions=None, position=-3), - baselineVolume=dict(argstr="%s", extensions=None, position=-4), - changingBandSize=dict(argstr="--changingBandSize %d"), - environ=dict(nohash=True, usedefault=True), - followupVolume=dict(argstr="%s", extensions=None, position=-2), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), - reportFileName=dict(argstr="--reportFileName %s", hash_files=False), - sensitivityThreshold=dict(argstr="--sensitivityThreshold %f"), + args=dict( + argstr="%s", + ), + baselineSegmentationVolume=dict( + argstr="%s", + extensions=None, + position=-3, + ), + baselineVolume=dict( + argstr="%s", + extensions=None, + position=-4, + ), + changingBandSize=dict( + argstr="--changingBandSize %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + followupVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + reportFileName=dict( + argstr="--reportFileName %s", + hash_files=False, + ), + sensitivityThreshold=dict( + argstr="--sensitivityThreshold %f", + ), ) inputs = IntensityDifferenceMetric.input_spec() @@ -23,8 +51,13 @@ def test_IntensityDifferenceMetric_inputs(): def test_IntensityDifferenceMetric_outputs(): output_map = dict( - outputVolume=dict(extensions=None, position=-1), - reportFileName=dict(extensions=None), + outputVolume=dict( + extensions=None, + position=-1, + ), + reportFileName=dict( + extensions=None, + ), ) outputs = IntensityDifferenceMetric.output_spec() diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py index 8e60f24f83..aec22b541f 100644 --- a/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py @@ -4,18 +4,47 @@ def test_PETStandardUptakeValueComputation_inputs(): input_map = dict( - OutputLabel=dict(argstr="--OutputLabel %s"), - OutputLabelValue=dict(argstr="--OutputLabelValue %s"), - SUVMax=dict(argstr="--SUVMax %s"), - SUVMean=dict(argstr="--SUVMean %s"), - SUVMin=dict(argstr="--SUVMin %s"), - args=dict(argstr="%s"), - color=dict(argstr="--color %s", extensions=None), - csvFile=dict(argstr="--csvFile %s", hash_files=False), - environ=dict(nohash=True, usedefault=True), - labelMap=dict(argstr="--labelMap %s", extensions=None), - petDICOMPath=dict(argstr="--petDICOMPath %s"), - petVolume=dict(argstr="--petVolume %s", extensions=None), + OutputLabel=dict( + argstr="--OutputLabel %s", + ), + OutputLabelValue=dict( + argstr="--OutputLabelValue %s", + ), + SUVMax=dict( + argstr="--SUVMax %s", + ), + SUVMean=dict( + argstr="--SUVMean %s", + ), + SUVMin=dict( + argstr="--SUVMin %s", + ), + args=dict( + argstr="%s", + ), + color=dict( + argstr="--color %s", + extensions=None, + ), + csvFile=dict( + argstr="--csvFile %s", + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + labelMap=dict( + argstr="--labelMap %s", + extensions=None, + ), + petDICOMPath=dict( + argstr="--petDICOMPath %s", + ), + petVolume=dict( + argstr="--petVolume %s", + extensions=None, + ), ) inputs = PETStandardUptakeValueComputation.input_spec() @@ -25,7 +54,11 @@ def test_PETStandardUptakeValueComputation_inputs(): def test_PETStandardUptakeValueComputation_outputs(): - output_map = dict(csvFile=dict(extensions=None)) + output_map = dict( + csvFile=dict( + extensions=None, + ), + ) outputs = PETStandardUptakeValueComputation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py index fc4cfc7212..58c7c49f32 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py @@ -4,12 +4,26 @@ def test_ACPCTransform_inputs(): input_map = dict( - acpc=dict(argstr="--acpc %s..."), - args=dict(argstr="%s"), - debugSwitch=dict(argstr="--debugSwitch "), - environ=dict(nohash=True, usedefault=True), - midline=dict(argstr="--midline %s..."), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False), + acpc=dict( + argstr="--acpc %s...", + ), + args=dict( + argstr="%s", + ), + debugSwitch=dict( + argstr="--debugSwitch ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + midline=dict( + argstr="--midline %s...", + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, + ), ) inputs = ACPCTransform.input_spec() @@ -19,7 +33,11 @@ def test_ACPCTransform_inputs(): def test_ACPCTransform_outputs(): - output_map = dict(outputTransform=dict(extensions=None)) + output_map = dict( + outputTransform=dict( + extensions=None, + ), + ) outputs = ACPCTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py index 845cf63f3c..d1c8055df3 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py @@ -4,60 +4,149 @@ def test_BRAINSDemonWarp_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), arrayOfPyramidLevelIterations=dict( - argstr="--arrayOfPyramidLevelIterations %s", sep="," + argstr="--arrayOfPyramidLevelIterations %s", + sep=",", + ), + backgroundFillValue=dict( + argstr="--backgroundFillValue %d", ), - backgroundFillValue=dict(argstr="--backgroundFillValue %d"), checkerboardPatternSubdivisions=dict( - argstr="--checkerboardPatternSubdivisions %s", sep="," - ), - environ=dict(nohash=True, usedefault=True), - fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None), - fixedVolume=dict(argstr="--fixedVolume %s", extensions=None), - gradient_type=dict(argstr="--gradient_type %s"), - gui=dict(argstr="--gui "), - histogramMatch=dict(argstr="--histogramMatch "), + argstr="--checkerboardPatternSubdivisions %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedBinaryVolume=dict( + argstr="--fixedBinaryVolume %s", + extensions=None, + ), + fixedVolume=dict( + argstr="--fixedVolume %s", + extensions=None, + ), + gradient_type=dict( + argstr="--gradient_type %s", + ), + gui=dict( + argstr="--gui ", + ), + histogramMatch=dict( + argstr="--histogramMatch ", + ), initializeWithDisplacementField=dict( - argstr="--initializeWithDisplacementField %s", extensions=None + argstr="--initializeWithDisplacementField %s", + extensions=None, ), initializeWithTransform=dict( - argstr="--initializeWithTransform %s", extensions=None - ), - inputPixelType=dict(argstr="--inputPixelType %s"), - interpolationMode=dict(argstr="--interpolationMode %s"), - lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d"), - maskProcessingMode=dict(argstr="--maskProcessingMode %s"), - max_step_length=dict(argstr="--max_step_length %f"), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), - minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=","), - minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=","), - movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None), - movingVolume=dict(argstr="--movingVolume %s", extensions=None), - neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=","), - numberOfBCHApproximationTerms=dict(argstr="--numberOfBCHApproximationTerms %d"), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), - numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), + argstr="--initializeWithTransform %s", + extensions=None, + ), + inputPixelType=dict( + argstr="--inputPixelType %s", + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + lowerThresholdForBOBF=dict( + argstr="--lowerThresholdForBOBF %d", + ), + maskProcessingMode=dict( + argstr="--maskProcessingMode %s", + ), + max_step_length=dict( + argstr="--max_step_length %f", + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), + minimumFixedPyramid=dict( + argstr="--minimumFixedPyramid %s", + sep=",", + ), + minimumMovingPyramid=dict( + argstr="--minimumMovingPyramid %s", + sep=",", + ), + movingBinaryVolume=dict( + argstr="--movingBinaryVolume %s", + extensions=None, + ), + movingVolume=dict( + argstr="--movingVolume %s", + extensions=None, + ), + neighborhoodForBOBF=dict( + argstr="--neighborhoodForBOBF %s", + sep=",", + ), + numberOfBCHApproximationTerms=dict( + argstr="--numberOfBCHApproximationTerms %d", + ), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + numberOfPyramidLevels=dict( + argstr="--numberOfPyramidLevels %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), outputCheckerboardVolume=dict( - argstr="--outputCheckerboardVolume %s", hash_files=False + argstr="--outputCheckerboardVolume %s", + hash_files=False, + ), + outputDebug=dict( + argstr="--outputDebug ", + ), + outputDisplacementFieldPrefix=dict( + argstr="--outputDisplacementFieldPrefix %s", ), - outputDebug=dict(argstr="--outputDebug "), - outputDisplacementFieldPrefix=dict(argstr="--outputDisplacementFieldPrefix %s"), outputDisplacementFieldVolume=dict( - argstr="--outputDisplacementFieldVolume %s", hash_files=False - ), - outputNormalized=dict(argstr="--outputNormalized "), - outputPixelType=dict(argstr="--outputPixelType %s"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - promptUser=dict(argstr="--promptUser "), - registrationFilterType=dict(argstr="--registrationFilterType %s"), - seedForBOBF=dict(argstr="--seedForBOBF %s", sep=","), - smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f"), - upFieldSmoothing=dict(argstr="--upFieldSmoothing %f"), - upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d"), - use_vanilla_dem=dict(argstr="--use_vanilla_dem "), + argstr="--outputDisplacementFieldVolume %s", + hash_files=False, + ), + outputNormalized=dict( + argstr="--outputNormalized ", + ), + outputPixelType=dict( + argstr="--outputPixelType %s", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + promptUser=dict( + argstr="--promptUser ", + ), + registrationFilterType=dict( + argstr="--registrationFilterType %s", + ), + seedForBOBF=dict( + argstr="--seedForBOBF %s", + sep=",", + ), + smoothDisplacementFieldSigma=dict( + argstr="--smoothDisplacementFieldSigma %f", + ), + upFieldSmoothing=dict( + argstr="--upFieldSmoothing %f", + ), + upperThresholdForBOBF=dict( + argstr="--upperThresholdForBOBF %d", + ), + use_vanilla_dem=dict( + argstr="--use_vanilla_dem ", + ), ) inputs = BRAINSDemonWarp.input_spec() @@ -68,9 +157,15 @@ def test_BRAINSDemonWarp_inputs(): def test_BRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(extensions=None), - outputDisplacementFieldVolume=dict(extensions=None), - outputVolume=dict(extensions=None), + outputCheckerboardVolume=dict( + extensions=None, + ), + outputDisplacementFieldVolume=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = BRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py index f3d5a50759..0d7b124635 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py @@ -5,80 +5,213 @@ def test_BRAINSFit_inputs(): input_map = dict( NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00=dict( - argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00 " + argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00 ", ), NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01=dict( - argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01 " + argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01 ", ), NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02=dict( - argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 " - ), - ROIAutoClosingSize=dict(argstr="--ROIAutoClosingSize %f"), - ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f"), - args=dict(argstr="%s"), - backgroundFillValue=dict(argstr="--backgroundFillValue %f"), - bsplineTransform=dict(argstr="--bsplineTransform %s", hash_files=False), - costFunctionConvergenceFactor=dict(argstr="--costFunctionConvergenceFactor %f"), - costMetric=dict(argstr="--costMetric %s"), - debugLevel=dict(argstr="--debugLevel %d"), - environ=dict(nohash=True, usedefault=True), - failureExitCode=dict(argstr="--failureExitCode %d"), - fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None), - fixedVolume=dict(argstr="--fixedVolume %s", extensions=None), - fixedVolumeTimeIndex=dict(argstr="--fixedVolumeTimeIndex %d"), - forceMINumberOfThreads=dict(argstr="--forceMINumberOfThreads %d"), - gui=dict(argstr="--gui "), - histogramMatch=dict(argstr="--histogramMatch "), - initialTransform=dict(argstr="--initialTransform %s", extensions=None), - initializeTransformMode=dict(argstr="--initializeTransformMode %s"), - interpolationMode=dict(argstr="--interpolationMode %s"), - linearTransform=dict(argstr="--linearTransform %s", hash_files=False), - maskInferiorCutOffFromCenter=dict(argstr="--maskInferiorCutOffFromCenter %f"), - maskProcessingMode=dict(argstr="--maskProcessingMode %s"), - maxBSplineDisplacement=dict(argstr="--maxBSplineDisplacement %f"), - maximumStepLength=dict(argstr="--maximumStepLength %f"), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), - minimumStepLength=dict(argstr="--minimumStepLength %s", sep=","), - movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None), - movingVolume=dict(argstr="--movingVolume %s", extensions=None), - movingVolumeTimeIndex=dict(argstr="--movingVolumeTimeIndex %d"), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), - numberOfIterations=dict(argstr="--numberOfIterations %s", sep=","), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), - numberOfSamples=dict(argstr="--numberOfSamples %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputFixedVolumeROI=dict(argstr="--outputFixedVolumeROI %s", hash_files=False), + argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 ", + ), + ROIAutoClosingSize=dict( + argstr="--ROIAutoClosingSize %f", + ), + ROIAutoDilateSize=dict( + argstr="--ROIAutoDilateSize %f", + ), + args=dict( + argstr="%s", + ), + backgroundFillValue=dict( + argstr="--backgroundFillValue %f", + ), + bsplineTransform=dict( + argstr="--bsplineTransform %s", + hash_files=False, + ), + costFunctionConvergenceFactor=dict( + argstr="--costFunctionConvergenceFactor %f", + ), + costMetric=dict( + argstr="--costMetric %s", + ), + debugLevel=dict( + argstr="--debugLevel %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + failureExitCode=dict( + argstr="--failureExitCode %d", + ), + fixedBinaryVolume=dict( + argstr="--fixedBinaryVolume %s", + extensions=None, + ), + fixedVolume=dict( + argstr="--fixedVolume %s", + extensions=None, + ), + fixedVolumeTimeIndex=dict( + argstr="--fixedVolumeTimeIndex %d", + ), + forceMINumberOfThreads=dict( + argstr="--forceMINumberOfThreads %d", + ), + gui=dict( + argstr="--gui ", + ), + histogramMatch=dict( + argstr="--histogramMatch ", + ), + initialTransform=dict( + argstr="--initialTransform %s", + extensions=None, + ), + initializeTransformMode=dict( + argstr="--initializeTransformMode %s", + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + linearTransform=dict( + argstr="--linearTransform %s", + hash_files=False, + ), + maskInferiorCutOffFromCenter=dict( + argstr="--maskInferiorCutOffFromCenter %f", + ), + maskProcessingMode=dict( + argstr="--maskProcessingMode %s", + ), + maxBSplineDisplacement=dict( + argstr="--maxBSplineDisplacement %f", + ), + maximumStepLength=dict( + argstr="--maximumStepLength %f", + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), + minimumStepLength=dict( + argstr="--minimumStepLength %s", + sep=",", + ), + movingBinaryVolume=dict( + argstr="--movingBinaryVolume %s", + extensions=None, + ), + movingVolume=dict( + argstr="--movingVolume %s", + extensions=None, + ), + movingVolumeTimeIndex=dict( + argstr="--movingVolumeTimeIndex %d", + ), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfIterations=dict( + argstr="--numberOfIterations %s", + sep=",", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + numberOfSamples=dict( + argstr="--numberOfSamples %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputFixedVolumeROI=dict( + argstr="--outputFixedVolumeROI %s", + hash_files=False, + ), outputMovingVolumeROI=dict( - argstr="--outputMovingVolumeROI %s", hash_files=False - ), - outputTransform=dict(argstr="--outputTransform %s", hash_files=False), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - outputVolumePixelType=dict(argstr="--outputVolumePixelType %s"), - permitParameterVariation=dict(argstr="--permitParameterVariation %s", sep=","), - projectedGradientTolerance=dict(argstr="--projectedGradientTolerance %f"), - promptUser=dict(argstr="--promptUser "), - relaxationFactor=dict(argstr="--relaxationFactor %f"), - removeIntensityOutliers=dict(argstr="--removeIntensityOutliers %f"), - reproportionScale=dict(argstr="--reproportionScale %f"), - scaleOutputValues=dict(argstr="--scaleOutputValues "), - skewScale=dict(argstr="--skewScale %f"), - splineGridSize=dict(argstr="--splineGridSize %s", sep=","), + argstr="--outputMovingVolumeROI %s", + hash_files=False, + ), + outputTransform=dict( + argstr="--outputTransform %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + outputVolumePixelType=dict( + argstr="--outputVolumePixelType %s", + ), + permitParameterVariation=dict( + argstr="--permitParameterVariation %s", + sep=",", + ), + projectedGradientTolerance=dict( + argstr="--projectedGradientTolerance %f", + ), + promptUser=dict( + argstr="--promptUser ", + ), + relaxationFactor=dict( + argstr="--relaxationFactor %f", + ), + removeIntensityOutliers=dict( + argstr="--removeIntensityOutliers %f", + ), + reproportionScale=dict( + argstr="--reproportionScale %f", + ), + scaleOutputValues=dict( + argstr="--scaleOutputValues ", + ), + skewScale=dict( + argstr="--skewScale %f", + ), + splineGridSize=dict( + argstr="--splineGridSize %s", + sep=",", + ), strippedOutputTransform=dict( - argstr="--strippedOutputTransform %s", hash_files=False + argstr="--strippedOutputTransform %s", + hash_files=False, + ), + transformType=dict( + argstr="--transformType %s", + sep=",", + ), + translationScale=dict( + argstr="--translationScale %f", + ), + useAffine=dict( + argstr="--useAffine ", + ), + useBSpline=dict( + argstr="--useBSpline ", ), - transformType=dict(argstr="--transformType %s", sep=","), - translationScale=dict(argstr="--translationScale %f"), - useAffine=dict(argstr="--useAffine "), - useBSpline=dict(argstr="--useBSpline "), useCachingOfBSplineWeightsMode=dict( - argstr="--useCachingOfBSplineWeightsMode %s" - ), - useExplicitPDFDerivativesMode=dict(argstr="--useExplicitPDFDerivativesMode %s"), - useRigid=dict(argstr="--useRigid "), - useScaleSkewVersor3D=dict(argstr="--useScaleSkewVersor3D "), - useScaleVersor3D=dict(argstr="--useScaleVersor3D "), - writeOutputTransformInFloat=dict(argstr="--writeOutputTransformInFloat "), - writeTransformOnFailure=dict(argstr="--writeTransformOnFailure "), + argstr="--useCachingOfBSplineWeightsMode %s", + ), + useExplicitPDFDerivativesMode=dict( + argstr="--useExplicitPDFDerivativesMode %s", + ), + useRigid=dict( + argstr="--useRigid ", + ), + useScaleSkewVersor3D=dict( + argstr="--useScaleSkewVersor3D ", + ), + useScaleVersor3D=dict( + argstr="--useScaleVersor3D ", + ), + writeOutputTransformInFloat=dict( + argstr="--writeOutputTransformInFloat ", + ), + writeTransformOnFailure=dict( + argstr="--writeTransformOnFailure ", + ), ) inputs = BRAINSFit.input_spec() @@ -89,13 +222,27 @@ def test_BRAINSFit_inputs(): def test_BRAINSFit_outputs(): output_map = dict( - bsplineTransform=dict(extensions=None), - linearTransform=dict(extensions=None), - outputFixedVolumeROI=dict(extensions=None), - outputMovingVolumeROI=dict(extensions=None), - outputTransform=dict(extensions=None), - outputVolume=dict(extensions=None), - strippedOutputTransform=dict(extensions=None), + bsplineTransform=dict( + extensions=None, + ), + linearTransform=dict( + extensions=None, + ), + outputFixedVolumeROI=dict( + extensions=None, + ), + outputMovingVolumeROI=dict( + extensions=None, + ), + outputTransform=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), + strippedOutputTransform=dict( + extensions=None, + ), ) outputs = BRAINSFit.output_spec() diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py index f2b6760eea..46d175da07 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py @@ -4,19 +4,52 @@ def test_BRAINSResample_inputs(): input_map = dict( - args=dict(argstr="%s"), - defaultValue=dict(argstr="--defaultValue %f"), - deformationVolume=dict(argstr="--deformationVolume %s", extensions=None), - environ=dict(nohash=True, usedefault=True), - gridSpacing=dict(argstr="--gridSpacing %s", sep=","), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - interpolationMode=dict(argstr="--interpolationMode %s"), - inverseTransform=dict(argstr="--inverseTransform "), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - pixelType=dict(argstr="--pixelType %s"), - referenceVolume=dict(argstr="--referenceVolume %s", extensions=None), - warpTransform=dict(argstr="--warpTransform %s", extensions=None), + args=dict( + argstr="%s", + ), + defaultValue=dict( + argstr="--defaultValue %f", + ), + deformationVolume=dict( + argstr="--deformationVolume %s", + extensions=None, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gridSpacing=dict( + argstr="--gridSpacing %s", + sep=",", + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + inverseTransform=dict( + argstr="--inverseTransform ", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + pixelType=dict( + argstr="--pixelType %s", + ), + referenceVolume=dict( + argstr="--referenceVolume %s", + extensions=None, + ), + warpTransform=dict( + argstr="--warpTransform %s", + extensions=None, + ), ) inputs = BRAINSResample.input_spec() @@ -26,7 +59,11 @@ def test_BRAINSResample_inputs(): def test_BRAINSResample_outputs(): - output_map = dict(outputVolume=dict(extensions=None)) + output_map = dict( + outputVolume=dict( + extensions=None, + ), + ) outputs = BRAINSResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py index f70f00a4ed..6b511790c7 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py @@ -4,14 +4,32 @@ def test_FiducialRegistration_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fixedLandmarks=dict(argstr="--fixedLandmarks %s..."), - movingLandmarks=dict(argstr="--movingLandmarks %s..."), - outputMessage=dict(argstr="--outputMessage %s"), - rms=dict(argstr="--rms %f"), - saveTransform=dict(argstr="--saveTransform %s", hash_files=False), - transformType=dict(argstr="--transformType %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedLandmarks=dict( + argstr="--fixedLandmarks %s...", + ), + movingLandmarks=dict( + argstr="--movingLandmarks %s...", + ), + outputMessage=dict( + argstr="--outputMessage %s", + ), + rms=dict( + argstr="--rms %f", + ), + saveTransform=dict( + argstr="--saveTransform %s", + hash_files=False, + ), + transformType=dict( + argstr="--transformType %s", + ), ) inputs = FiducialRegistration.input_spec() @@ -21,7 +39,11 @@ def test_FiducialRegistration_inputs(): def test_FiducialRegistration_outputs(): - output_map = dict(saveTransform=dict(extensions=None)) + output_map = dict( + saveTransform=dict( + extensions=None, + ), + ) outputs = FiducialRegistration.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py index 45bfc67734..af8bac8680 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -4,61 +4,151 @@ def test_VBRAINSDemonWarp_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), arrayOfPyramidLevelIterations=dict( - argstr="--arrayOfPyramidLevelIterations %s", sep="," + argstr="--arrayOfPyramidLevelIterations %s", + sep=",", + ), + backgroundFillValue=dict( + argstr="--backgroundFillValue %d", ), - backgroundFillValue=dict(argstr="--backgroundFillValue %d"), checkerboardPatternSubdivisions=dict( - argstr="--checkerboardPatternSubdivisions %s", sep="," - ), - environ=dict(nohash=True, usedefault=True), - fixedBinaryVolume=dict(argstr="--fixedBinaryVolume %s", extensions=None), - fixedVolume=dict(argstr="--fixedVolume %s..."), - gradient_type=dict(argstr="--gradient_type %s"), - gui=dict(argstr="--gui "), - histogramMatch=dict(argstr="--histogramMatch "), + argstr="--checkerboardPatternSubdivisions %s", + sep=",", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedBinaryVolume=dict( + argstr="--fixedBinaryVolume %s", + extensions=None, + ), + fixedVolume=dict( + argstr="--fixedVolume %s...", + ), + gradient_type=dict( + argstr="--gradient_type %s", + ), + gui=dict( + argstr="--gui ", + ), + histogramMatch=dict( + argstr="--histogramMatch ", + ), initializeWithDisplacementField=dict( - argstr="--initializeWithDisplacementField %s", extensions=None + argstr="--initializeWithDisplacementField %s", + extensions=None, ), initializeWithTransform=dict( - argstr="--initializeWithTransform %s", extensions=None - ), - inputPixelType=dict(argstr="--inputPixelType %s"), - interpolationMode=dict(argstr="--interpolationMode %s"), - lowerThresholdForBOBF=dict(argstr="--lowerThresholdForBOBF %d"), - makeBOBF=dict(argstr="--makeBOBF "), - max_step_length=dict(argstr="--max_step_length %f"), - medianFilterSize=dict(argstr="--medianFilterSize %s", sep=","), - minimumFixedPyramid=dict(argstr="--minimumFixedPyramid %s", sep=","), - minimumMovingPyramid=dict(argstr="--minimumMovingPyramid %s", sep=","), - movingBinaryVolume=dict(argstr="--movingBinaryVolume %s", extensions=None), - movingVolume=dict(argstr="--movingVolume %s..."), - neighborhoodForBOBF=dict(argstr="--neighborhoodForBOBF %s", sep=","), - numberOfBCHApproximationTerms=dict(argstr="--numberOfBCHApproximationTerms %d"), - numberOfHistogramBins=dict(argstr="--numberOfHistogramBins %d"), - numberOfMatchPoints=dict(argstr="--numberOfMatchPoints %d"), - numberOfPyramidLevels=dict(argstr="--numberOfPyramidLevels %d"), - numberOfThreads=dict(argstr="--numberOfThreads %d"), + argstr="--initializeWithTransform %s", + extensions=None, + ), + inputPixelType=dict( + argstr="--inputPixelType %s", + ), + interpolationMode=dict( + argstr="--interpolationMode %s", + ), + lowerThresholdForBOBF=dict( + argstr="--lowerThresholdForBOBF %d", + ), + makeBOBF=dict( + argstr="--makeBOBF ", + ), + max_step_length=dict( + argstr="--max_step_length %f", + ), + medianFilterSize=dict( + argstr="--medianFilterSize %s", + sep=",", + ), + minimumFixedPyramid=dict( + argstr="--minimumFixedPyramid %s", + sep=",", + ), + minimumMovingPyramid=dict( + argstr="--minimumMovingPyramid %s", + sep=",", + ), + movingBinaryVolume=dict( + argstr="--movingBinaryVolume %s", + extensions=None, + ), + movingVolume=dict( + argstr="--movingVolume %s...", + ), + neighborhoodForBOBF=dict( + argstr="--neighborhoodForBOBF %s", + sep=",", + ), + numberOfBCHApproximationTerms=dict( + argstr="--numberOfBCHApproximationTerms %d", + ), + numberOfHistogramBins=dict( + argstr="--numberOfHistogramBins %d", + ), + numberOfMatchPoints=dict( + argstr="--numberOfMatchPoints %d", + ), + numberOfPyramidLevels=dict( + argstr="--numberOfPyramidLevels %d", + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), outputCheckerboardVolume=dict( - argstr="--outputCheckerboardVolume %s", hash_files=False + argstr="--outputCheckerboardVolume %s", + hash_files=False, + ), + outputDebug=dict( + argstr="--outputDebug ", + ), + outputDisplacementFieldPrefix=dict( + argstr="--outputDisplacementFieldPrefix %s", ), - outputDebug=dict(argstr="--outputDebug "), - outputDisplacementFieldPrefix=dict(argstr="--outputDisplacementFieldPrefix %s"), outputDisplacementFieldVolume=dict( - argstr="--outputDisplacementFieldVolume %s", hash_files=False - ), - outputNormalized=dict(argstr="--outputNormalized "), - outputPixelType=dict(argstr="--outputPixelType %s"), - outputVolume=dict(argstr="--outputVolume %s", hash_files=False), - promptUser=dict(argstr="--promptUser "), - registrationFilterType=dict(argstr="--registrationFilterType %s"), - seedForBOBF=dict(argstr="--seedForBOBF %s", sep=","), - smoothDisplacementFieldSigma=dict(argstr="--smoothDisplacementFieldSigma %f"), - upFieldSmoothing=dict(argstr="--upFieldSmoothing %f"), - upperThresholdForBOBF=dict(argstr="--upperThresholdForBOBF %d"), - use_vanilla_dem=dict(argstr="--use_vanilla_dem "), - weightFactors=dict(argstr="--weightFactors %s", sep=","), + argstr="--outputDisplacementFieldVolume %s", + hash_files=False, + ), + outputNormalized=dict( + argstr="--outputNormalized ", + ), + outputPixelType=dict( + argstr="--outputPixelType %s", + ), + outputVolume=dict( + argstr="--outputVolume %s", + hash_files=False, + ), + promptUser=dict( + argstr="--promptUser ", + ), + registrationFilterType=dict( + argstr="--registrationFilterType %s", + ), + seedForBOBF=dict( + argstr="--seedForBOBF %s", + sep=",", + ), + smoothDisplacementFieldSigma=dict( + argstr="--smoothDisplacementFieldSigma %f", + ), + upFieldSmoothing=dict( + argstr="--upFieldSmoothing %f", + ), + upperThresholdForBOBF=dict( + argstr="--upperThresholdForBOBF %d", + ), + use_vanilla_dem=dict( + argstr="--use_vanilla_dem ", + ), + weightFactors=dict( + argstr="--weightFactors %s", + sep=",", + ), ) inputs = VBRAINSDemonWarp.input_spec() @@ -69,9 +159,15 @@ def test_VBRAINSDemonWarp_inputs(): def test_VBRAINSDemonWarp_outputs(): output_map = dict( - outputCheckerboardVolume=dict(extensions=None), - outputDisplacementFieldVolume=dict(extensions=None), - outputVolume=dict(extensions=None), + outputCheckerboardVolume=dict( + extensions=None, + ), + outputDisplacementFieldVolume=dict( + extensions=None, + ), + outputVolume=dict( + extensions=None, + ), ) outputs = VBRAINSDemonWarp.output_spec() diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py index cffee26c1f..8990caaf1a 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -4,19 +4,43 @@ def test_BRAINSROIAuto_inputs(): input_map = dict( - ROIAutoDilateSize=dict(argstr="--ROIAutoDilateSize %f"), - args=dict(argstr="%s"), - closingSize=dict(argstr="--closingSize %f"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="--inputVolume %s", extensions=None), - numberOfThreads=dict(argstr="--numberOfThreads %d"), - otsuPercentileThreshold=dict(argstr="--otsuPercentileThreshold %f"), + ROIAutoDilateSize=dict( + argstr="--ROIAutoDilateSize %f", + ), + args=dict( + argstr="%s", + ), + closingSize=dict( + argstr="--closingSize %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="--inputVolume %s", + extensions=None, + ), + numberOfThreads=dict( + argstr="--numberOfThreads %d", + ), + otsuPercentileThreshold=dict( + argstr="--otsuPercentileThreshold %f", + ), outputClippedVolumeROI=dict( - argstr="--outputClippedVolumeROI %s", hash_files=False + argstr="--outputClippedVolumeROI %s", + hash_files=False, + ), + outputROIMaskVolume=dict( + argstr="--outputROIMaskVolume %s", + hash_files=False, + ), + outputVolumePixelType=dict( + argstr="--outputVolumePixelType %s", + ), + thresholdCorrectionFactor=dict( + argstr="--thresholdCorrectionFactor %f", ), - outputROIMaskVolume=dict(argstr="--outputROIMaskVolume %s", hash_files=False), - outputVolumePixelType=dict(argstr="--outputVolumePixelType %s"), - thresholdCorrectionFactor=dict(argstr="--thresholdCorrectionFactor %f"), ) inputs = BRAINSROIAuto.input_spec() @@ -27,8 +51,12 @@ def test_BRAINSROIAuto_inputs(): def test_BRAINSROIAuto_outputs(): output_map = dict( - outputClippedVolumeROI=dict(extensions=None), - outputROIMaskVolume=dict(extensions=None), + outputClippedVolumeROI=dict( + extensions=None, + ), + outputROIMaskVolume=dict( + extensions=None, + ), ) outputs = BRAINSROIAuto.output_spec() diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py index 68cc07d823..2ed2595d4e 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py @@ -4,35 +4,81 @@ def test_EMSegmentCommandLine_inputs(): input_map = dict( - args=dict(argstr="%s"), - atlasVolumeFileNames=dict(argstr="--atlasVolumeFileNames %s..."), - disableCompression=dict(argstr="--disableCompression "), - disableMultithreading=dict(argstr="--disableMultithreading %d"), - dontUpdateIntermediateData=dict(argstr="--dontUpdateIntermediateData %d"), - dontWriteResults=dict(argstr="--dontWriteResults "), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + atlasVolumeFileNames=dict( + argstr="--atlasVolumeFileNames %s...", + ), + disableCompression=dict( + argstr="--disableCompression ", + ), + disableMultithreading=dict( + argstr="--disableMultithreading %d", + ), + dontUpdateIntermediateData=dict( + argstr="--dontUpdateIntermediateData %d", + ), + dontWriteResults=dict( + argstr="--dontWriteResults ", + ), + environ=dict( + nohash=True, + usedefault=True, + ), generateEmptyMRMLSceneAndQuit=dict( - argstr="--generateEmptyMRMLSceneAndQuit %s", hash_files=False - ), - intermediateResultsDirectory=dict(argstr="--intermediateResultsDirectory %s"), - keepTempFiles=dict(argstr="--keepTempFiles "), - loadAtlasNonCentered=dict(argstr="--loadAtlasNonCentered "), - loadTargetCentered=dict(argstr="--loadTargetCentered "), - mrmlSceneFileName=dict(argstr="--mrmlSceneFileName %s", extensions=None), - parametersMRMLNodeName=dict(argstr="--parametersMRMLNodeName %s"), - registrationAffineType=dict(argstr="--registrationAffineType %d"), - registrationDeformableType=dict(argstr="--registrationDeformableType %d"), - registrationPackage=dict(argstr="--registrationPackage %s"), + argstr="--generateEmptyMRMLSceneAndQuit %s", + hash_files=False, + ), + intermediateResultsDirectory=dict( + argstr="--intermediateResultsDirectory %s", + ), + keepTempFiles=dict( + argstr="--keepTempFiles ", + ), + loadAtlasNonCentered=dict( + argstr="--loadAtlasNonCentered ", + ), + loadTargetCentered=dict( + argstr="--loadTargetCentered ", + ), + mrmlSceneFileName=dict( + argstr="--mrmlSceneFileName %s", + extensions=None, + ), + parametersMRMLNodeName=dict( + argstr="--parametersMRMLNodeName %s", + ), + registrationAffineType=dict( + argstr="--registrationAffineType %d", + ), + registrationDeformableType=dict( + argstr="--registrationDeformableType %d", + ), + registrationPackage=dict( + argstr="--registrationPackage %s", + ), resultMRMLSceneFileName=dict( - argstr="--resultMRMLSceneFileName %s", hash_files=False + argstr="--resultMRMLSceneFileName %s", + hash_files=False, ), resultStandardVolumeFileName=dict( - argstr="--resultStandardVolumeFileName %s", extensions=None + argstr="--resultStandardVolumeFileName %s", + extensions=None, + ), + resultVolumeFileName=dict( + argstr="--resultVolumeFileName %s", + hash_files=False, + ), + targetVolumeFileNames=dict( + argstr="--targetVolumeFileNames %s...", + ), + taskPreProcessingSetting=dict( + argstr="--taskPreProcessingSetting %s", + ), + verbose=dict( + argstr="--verbose ", ), - resultVolumeFileName=dict(argstr="--resultVolumeFileName %s", hash_files=False), - targetVolumeFileNames=dict(argstr="--targetVolumeFileNames %s..."), - taskPreProcessingSetting=dict(argstr="--taskPreProcessingSetting %s"), - verbose=dict(argstr="--verbose "), ) inputs = EMSegmentCommandLine.input_spec() @@ -43,9 +89,15 @@ def test_EMSegmentCommandLine_inputs(): def test_EMSegmentCommandLine_outputs(): output_map = dict( - generateEmptyMRMLSceneAndQuit=dict(extensions=None), - resultMRMLSceneFileName=dict(extensions=None), - resultVolumeFileName=dict(extensions=None), + generateEmptyMRMLSceneAndQuit=dict( + extensions=None, + ), + resultMRMLSceneFileName=dict( + extensions=None, + ), + resultVolumeFileName=dict( + extensions=None, + ), ) outputs = EMSegmentCommandLine.output_spec() diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py index fdd72676c4..4bd05c6fc2 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py @@ -4,16 +4,43 @@ def test_RobustStatisticsSegmenter_inputs(): input_map = dict( - args=dict(argstr="%s"), - curvatureWeight=dict(argstr="--curvatureWeight %f"), - environ=dict(nohash=True, usedefault=True), - expectedVolume=dict(argstr="--expectedVolume %f"), - intensityHomogeneity=dict(argstr="--intensityHomogeneity %f"), - labelImageFileName=dict(argstr="%s", extensions=None, position=-2), - labelValue=dict(argstr="--labelValue %d"), - maxRunningTime=dict(argstr="--maxRunningTime %f"), - originalImageFileName=dict(argstr="%s", extensions=None, position=-3), - segmentedImageFileName=dict(argstr="%s", hash_files=False, position=-1), + args=dict( + argstr="%s", + ), + curvatureWeight=dict( + argstr="--curvatureWeight %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expectedVolume=dict( + argstr="--expectedVolume %f", + ), + intensityHomogeneity=dict( + argstr="--intensityHomogeneity %f", + ), + labelImageFileName=dict( + argstr="%s", + extensions=None, + position=-2, + ), + labelValue=dict( + argstr="--labelValue %d", + ), + maxRunningTime=dict( + argstr="--maxRunningTime %f", + ), + originalImageFileName=dict( + argstr="%s", + extensions=None, + position=-3, + ), + segmentedImageFileName=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = RobustStatisticsSegmenter.input_spec() @@ -23,7 +50,12 @@ def test_RobustStatisticsSegmenter_inputs(): def test_RobustStatisticsSegmenter_outputs(): - output_map = dict(segmentedImageFileName=dict(extensions=None, position=-1)) + output_map = dict( + segmentedImageFileName=dict( + extensions=None, + position=-1, + ), + ) outputs = RobustStatisticsSegmenter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py index d70780d809..9a308ec959 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py @@ -4,17 +4,44 @@ def test_SimpleRegionGrowingSegmentation_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - iterations=dict(argstr="--iterations %d"), - labelvalue=dict(argstr="--labelvalue %d"), - multiplier=dict(argstr="--multiplier %f"), - neighborhood=dict(argstr="--neighborhood %d"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), - seed=dict(argstr="--seed %s..."), - smoothingIterations=dict(argstr="--smoothingIterations %d"), - timestep=dict(argstr="--timestep %f"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + iterations=dict( + argstr="--iterations %d", + ), + labelvalue=dict( + argstr="--labelvalue %d", + ), + multiplier=dict( + argstr="--multiplier %f", + ), + neighborhood=dict( + argstr="--neighborhood %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + seed=dict( + argstr="--seed %s...", + ), + smoothingIterations=dict( + argstr="--smoothingIterations %d", + ), + timestep=dict( + argstr="--timestep %f", + ), ) inputs = SimpleRegionGrowingSegmentation.input_spec() @@ -24,7 +51,12 @@ def test_SimpleRegionGrowingSegmentation_inputs(): def test_SimpleRegionGrowingSegmentation_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = SimpleRegionGrowingSegmentation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py b/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py index 50138f9388..b02dfd595d 100644 --- a/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py +++ b/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py @@ -4,15 +4,35 @@ def test_DicomToNrrdConverter_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputDicomDirectory=dict(argstr="--inputDicomDirectory %s"), - outputDirectory=dict(argstr="--outputDirectory %s", hash_files=False), - outputVolume=dict(argstr="--outputVolume %s"), - smallGradientThreshold=dict(argstr="--smallGradientThreshold %f"), - useBMatrixGradientDirections=dict(argstr="--useBMatrixGradientDirections "), - useIdentityMeaseurementFrame=dict(argstr="--useIdentityMeaseurementFrame "), - writeProtocolGradientsFile=dict(argstr="--writeProtocolGradientsFile "), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputDicomDirectory=dict( + argstr="--inputDicomDirectory %s", + ), + outputDirectory=dict( + argstr="--outputDirectory %s", + hash_files=False, + ), + outputVolume=dict( + argstr="--outputVolume %s", + ), + smallGradientThreshold=dict( + argstr="--smallGradientThreshold %f", + ), + useBMatrixGradientDirections=dict( + argstr="--useBMatrixGradientDirections ", + ), + useIdentityMeaseurementFrame=dict( + argstr="--useIdentityMeaseurementFrame ", + ), + writeProtocolGradientsFile=dict( + argstr="--writeProtocolGradientsFile ", + ), ) inputs = DicomToNrrdConverter.input_spec() @@ -22,7 +42,9 @@ def test_DicomToNrrdConverter_inputs(): def test_DicomToNrrdConverter_outputs(): - output_map = dict(outputDirectory=dict()) + output_map = dict( + outputDirectory=dict(), + ) outputs = DicomToNrrdConverter.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py index ef55ce0c3a..338fa49cae 100644 --- a/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py +++ b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py @@ -4,11 +4,24 @@ def test_EMSegmentTransformToNewFormat_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputMRMLFileName=dict(argstr="--inputMRMLFileName %s", extensions=None), - outputMRMLFileName=dict(argstr="--outputMRMLFileName %s", hash_files=False), - templateFlag=dict(argstr="--templateFlag "), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMRMLFileName=dict( + argstr="--inputMRMLFileName %s", + extensions=None, + ), + outputMRMLFileName=dict( + argstr="--outputMRMLFileName %s", + hash_files=False, + ), + templateFlag=dict( + argstr="--templateFlag ", + ), ) inputs = EMSegmentTransformToNewFormat.input_spec() @@ -18,7 +31,11 @@ def test_EMSegmentTransformToNewFormat_inputs(): def test_EMSegmentTransformToNewFormat_outputs(): - output_map = dict(outputMRMLFileName=dict(extensions=None)) + output_map = dict( + outputMRMLFileName=dict( + extensions=None, + ), + ) outputs = EMSegmentTransformToNewFormat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py index 93b911db65..8bab4bd963 100644 --- a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py @@ -4,16 +4,41 @@ def test_GrayscaleModelMaker_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-2), - OutputGeometry=dict(argstr="%s", hash_files=False, position=-1), - args=dict(argstr="%s"), - decimate=dict(argstr="--decimate %f"), - environ=dict(nohash=True, usedefault=True), - name=dict(argstr="--name %s"), - pointnormals=dict(argstr="--pointnormals "), - smooth=dict(argstr="--smooth %d"), - splitnormals=dict(argstr="--splitnormals "), - threshold=dict(argstr="--threshold %f"), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + OutputGeometry=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + decimate=dict( + argstr="--decimate %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + name=dict( + argstr="--name %s", + ), + pointnormals=dict( + argstr="--pointnormals ", + ), + smooth=dict( + argstr="--smooth %d", + ), + splitnormals=dict( + argstr="--splitnormals ", + ), + threshold=dict( + argstr="--threshold %f", + ), ) inputs = GrayscaleModelMaker.input_spec() @@ -23,7 +48,12 @@ def test_GrayscaleModelMaker_inputs(): def test_GrayscaleModelMaker_outputs(): - output_map = dict(OutputGeometry=dict(extensions=None, position=-1)) + output_map = dict( + OutputGeometry=dict( + extensions=None, + position=-1, + ), + ) outputs = GrayscaleModelMaker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py index 605560c896..3dab7b1498 100644 --- a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py +++ b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py @@ -4,14 +4,35 @@ def test_LabelMapSmoothing_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - gaussianSigma=dict(argstr="--gaussianSigma %f"), - inputVolume=dict(argstr="%s", extensions=None, position=-2), - labelToSmooth=dict(argstr="--labelToSmooth %d"), - maxRMSError=dict(argstr="--maxRMSError %f"), - numberOfIterations=dict(argstr="--numberOfIterations %d"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gaussianSigma=dict( + argstr="--gaussianSigma %f", + ), + inputVolume=dict( + argstr="%s", + extensions=None, + position=-2, + ), + labelToSmooth=dict( + argstr="--labelToSmooth %d", + ), + maxRMSError=dict( + argstr="--maxRMSError %f", + ), + numberOfIterations=dict( + argstr="--numberOfIterations %d", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = LabelMapSmoothing.input_spec() @@ -21,7 +42,12 @@ def test_LabelMapSmoothing_inputs(): def test_LabelMapSmoothing_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = LabelMapSmoothing.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py index c66ac2bc8b..dc93147248 100644 --- a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py +++ b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py @@ -4,11 +4,28 @@ def test_MergeModels_inputs(): input_map = dict( - Model1=dict(argstr="%s", extensions=None, position=-3), - Model2=dict(argstr="%s", extensions=None, position=-2), - ModelOutput=dict(argstr="%s", hash_files=False, position=-1), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + Model1=dict( + argstr="%s", + extensions=None, + position=-3, + ), + Model2=dict( + argstr="%s", + extensions=None, + position=-2, + ), + ModelOutput=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), ) inputs = MergeModels.input_spec() @@ -18,7 +35,12 @@ def test_MergeModels_inputs(): def test_MergeModels_outputs(): - output_map = dict(ModelOutput=dict(extensions=None, position=-1)) + output_map = dict( + ModelOutput=dict( + extensions=None, + position=-1, + ), + ) outputs = MergeModels.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py index 264e8b2f89..905b1417e9 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py @@ -4,26 +4,72 @@ def test_ModelMaker_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-1), - args=dict(argstr="%s"), - color=dict(argstr="--color %s", extensions=None), - debug=dict(argstr="--debug "), - decimate=dict(argstr="--decimate %f"), - end=dict(argstr="--end %d"), - environ=dict(nohash=True, usedefault=True), - filtertype=dict(argstr="--filtertype %s"), - generateAll=dict(argstr="--generateAll "), - jointsmooth=dict(argstr="--jointsmooth "), - labels=dict(argstr="--labels %s", sep=","), - modelSceneFile=dict(argstr="--modelSceneFile %s...", hash_files=False), - name=dict(argstr="--name %s"), - pad=dict(argstr="--pad "), - pointnormals=dict(argstr="--pointnormals "), - saveIntermediateModels=dict(argstr="--saveIntermediateModels "), - skipUnNamed=dict(argstr="--skipUnNamed "), - smooth=dict(argstr="--smooth %d"), - splitnormals=dict(argstr="--splitnormals "), - start=dict(argstr="--start %d"), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-1, + ), + args=dict( + argstr="%s", + ), + color=dict( + argstr="--color %s", + extensions=None, + ), + debug=dict( + argstr="--debug ", + ), + decimate=dict( + argstr="--decimate %f", + ), + end=dict( + argstr="--end %d", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + filtertype=dict( + argstr="--filtertype %s", + ), + generateAll=dict( + argstr="--generateAll ", + ), + jointsmooth=dict( + argstr="--jointsmooth ", + ), + labels=dict( + argstr="--labels %s", + sep=",", + ), + modelSceneFile=dict( + argstr="--modelSceneFile %s...", + hash_files=False, + ), + name=dict( + argstr="--name %s", + ), + pad=dict( + argstr="--pad ", + ), + pointnormals=dict( + argstr="--pointnormals ", + ), + saveIntermediateModels=dict( + argstr="--saveIntermediateModels ", + ), + skipUnNamed=dict( + argstr="--skipUnNamed ", + ), + smooth=dict( + argstr="--smooth %d", + ), + splitnormals=dict( + argstr="--splitnormals ", + ), + start=dict( + argstr="--start %d", + ), ) inputs = ModelMaker.input_spec() @@ -33,7 +79,9 @@ def test_ModelMaker_inputs(): def test_ModelMaker_outputs(): - output_map = dict(modelSceneFile=dict()) + output_map = dict( + modelSceneFile=dict(), + ) outputs = ModelMaker.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py index 274494e6c0..8449c15fce 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py @@ -4,12 +4,31 @@ def test_ModelToLabelMap_inputs(): input_map = dict( - InputVolume=dict(argstr="%s", extensions=None, position=-3), - OutputVolume=dict(argstr="%s", hash_files=False, position=-1), - args=dict(argstr="%s"), - distance=dict(argstr="--distance %f"), - environ=dict(nohash=True, usedefault=True), - surface=dict(argstr="%s", extensions=None, position=-2), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-3, + ), + OutputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + distance=dict( + argstr="--distance %f", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + surface=dict( + argstr="%s", + extensions=None, + position=-2, + ), ) inputs = ModelToLabelMap.input_spec() @@ -19,7 +38,12 @@ def test_ModelToLabelMap_inputs(): def test_ModelToLabelMap_outputs(): - output_map = dict(OutputVolume=dict(extensions=None, position=-1)) + output_map = dict( + OutputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = ModelToLabelMap.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py index e8a2f433eb..e2b4a1a2f7 100644 --- a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py +++ b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py @@ -4,11 +4,26 @@ def test_OrientScalarVolume_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - inputVolume1=dict(argstr="%s", extensions=None, position=-2), - orientation=dict(argstr="--orientation %s"), - outputVolume=dict(argstr="%s", hash_files=False, position=-1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr="%s", + extensions=None, + position=-2, + ), + orientation=dict( + argstr="--orientation %s", + ), + outputVolume=dict( + argstr="%s", + hash_files=False, + position=-1, + ), ) inputs = OrientScalarVolume.input_spec() @@ -18,7 +33,12 @@ def test_OrientScalarVolume_inputs(): def test_OrientScalarVolume_outputs(): - output_map = dict(outputVolume=dict(extensions=None, position=-1)) + output_map = dict( + outputVolume=dict( + extensions=None, + position=-1, + ), + ) outputs = OrientScalarVolume.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py index 952c029684..77498c0b08 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py +++ b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py @@ -4,11 +4,28 @@ def test_ProbeVolumeWithModel_inputs(): input_map = dict( - InputModel=dict(argstr="%s", extensions=None, position=-2), - InputVolume=dict(argstr="%s", extensions=None, position=-3), - OutputModel=dict(argstr="%s", hash_files=False, position=-1), - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + InputModel=dict( + argstr="%s", + extensions=None, + position=-2, + ), + InputVolume=dict( + argstr="%s", + extensions=None, + position=-3, + ), + OutputModel=dict( + argstr="%s", + hash_files=False, + position=-1, + ), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), ) inputs = ProbeVolumeWithModel.input_spec() @@ -18,7 +35,12 @@ def test_ProbeVolumeWithModel_inputs(): def test_ProbeVolumeWithModel_outputs(): - output_map = dict(OutputModel=dict(extensions=None, position=-1)) + output_map = dict( + OutputModel=dict( + extensions=None, + position=-1, + ), + ) outputs = ProbeVolumeWithModel.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py b/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py index 1961fb2879..7a16ed38bb 100644 --- a/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py +++ b/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py @@ -3,7 +3,15 @@ def test_SlicerCommandLine_inputs(): - input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) + input_map = dict( + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) inputs = SlicerCommandLine.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index 28f05ee32f..44831c5604 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -305,6 +305,7 @@ def _format_arg(self, opt, spec, val): if (self.inputs.jobtype == "calculatevdm") and ( opt in ["phase_file", "magnitude_file", "anat_file", "epi_file"] ): + return scans_for_fname(ensure_list(val)) if (self.inputs.jobtype == "applyvdm") and (opt == "in_files"): diff --git a/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py index 0a77d98abc..15fe9399ed 100644 --- a/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py +++ b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py @@ -4,12 +4,20 @@ def test_Analyze2nii_inputs(): input_map = dict( - analyze_file=dict(extensions=None, mandatory=True), + analyze_file=dict( + extensions=None, + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), + mfile=dict( + usedefault=True, + ), paths=dict(), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = Analyze2nii.input_spec() @@ -21,11 +29,18 @@ def test_Analyze2nii_inputs(): def test_Analyze2nii_outputs(): output_map = dict( matlab_cmd=dict(), - mfile=dict(usedefault=True), - nifti_file=dict(extensions=None), + mfile=dict( + usedefault=True, + ), + nifti_file=dict( + extensions=None, + ), paths=dict(), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) outputs = Analyze2nii.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py index 37e117d1ad..d41a0fb4b6 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py @@ -4,11 +4,22 @@ def test_ApplyDeformations_inputs(): input_map = dict( - deformation_field=dict(extensions=None, field="comp{1}.def", mandatory=True), - in_files=dict(field="fnames", mandatory=True), - interp=dict(field="interp"), + deformation_field=dict( + extensions=None, + field="comp{1}.def", + mandatory=True, + ), + in_files=dict( + field="fnames", + mandatory=True, + ), + interp=dict( + field="interp", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), + mfile=dict( + usedefault=True, + ), paths=dict(), reference_volume=dict( extensions=[".hdr", ".img", ".img.gz", ".nii"], @@ -16,7 +27,10 @@ def test_ApplyDeformations_inputs(): mandatory=True, ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = ApplyDeformations.input_spec() @@ -26,7 +40,9 @@ def test_ApplyDeformations_inputs(): def test_ApplyDeformations_outputs(): - output_map = dict(out_files=dict()) + output_map = dict( + out_files=dict(), + ) outputs = ApplyDeformations.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py index 9f152c27aa..c652bd7e12 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py @@ -4,24 +4,43 @@ def test_ApplyInverseDeformation_inputs(): input_map = dict( - bounding_box=dict(field="comp{1}.inv.comp{1}.sn2def.bb"), + bounding_box=dict( + field="comp{1}.inv.comp{1}.sn2def.bb", + ), deformation=dict( extensions=None, field="comp{1}.inv.comp{1}.sn2def.matname", xor=["deformation_field"], ), deformation_field=dict( - extensions=None, field="comp{1}.inv.comp{1}.def", xor=["deformation"] + extensions=None, + field="comp{1}.inv.comp{1}.def", + xor=["deformation"], + ), + in_files=dict( + field="fnames", + mandatory=True, + ), + interpolation=dict( + field="interp", ), - in_files=dict(field="fnames", mandatory=True), - interpolation=dict(field="interp"), matlab_cmd=dict(), - mfile=dict(usedefault=True), + mfile=dict( + usedefault=True, + ), paths=dict(), - target=dict(extensions=None, field="comp{1}.inv.space"), + target=dict( + extensions=None, + field="comp{1}.inv.space", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - voxel_sizes=dict(field="comp{1}.inv.comp{1}.sn2def.vox"), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + voxel_sizes=dict( + field="comp{1}.inv.comp{1}.sn2def.vox", + ), ) inputs = ApplyInverseDeformation.input_spec() @@ -31,7 +50,9 @@ def test_ApplyInverseDeformation_inputs(): def test_ApplyInverseDeformation_outputs(): - output_map = dict(out_files=dict()) + output_map = dict( + out_files=dict(), + ) outputs = ApplyInverseDeformation.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py index 89bfb2059a..ae0516370d 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py @@ -4,14 +4,29 @@ def test_ApplyTransform_inputs(): input_map = dict( - in_file=dict(copyfile=True, extensions=None, mandatory=True), - mat=dict(extensions=None, mandatory=True), + in_file=dict( + copyfile=True, + extensions=None, + mandatory=True, + ), + mat=dict( + extensions=None, + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - out_file=dict(extensions=None, genfile=True), + mfile=dict( + usedefault=True, + ), + out_file=dict( + extensions=None, + genfile=True, + ), paths=dict(), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = ApplyTransform.input_spec() @@ -21,7 +36,11 @@ def test_ApplyTransform_inputs(): def test_ApplyTransform_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ApplyTransform.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py index 1d11bbc500..156591cbb9 100644 --- a/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py +++ b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py @@ -4,15 +4,31 @@ def test_CalcCoregAffine_inputs(): input_map = dict( - invmat=dict(extensions=None), - mat=dict(extensions=None), + invmat=dict( + extensions=None, + ), + mat=dict( + extensions=None, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - moving=dict(copyfile=False, extensions=None, mandatory=True), + mfile=dict( + usedefault=True, + ), + moving=dict( + copyfile=False, + extensions=None, + mandatory=True, + ), paths=dict(), - target=dict(extensions=None, mandatory=True), + target=dict( + extensions=None, + mandatory=True, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = CalcCoregAffine.input_spec() @@ -22,7 +38,14 @@ def test_CalcCoregAffine_inputs(): def test_CalcCoregAffine_outputs(): - output_map = dict(invmat=dict(extensions=None), mat=dict(extensions=None)) + output_map = dict( + invmat=dict( + extensions=None, + ), + mat=dict( + extensions=None, + ), + ) outputs = CalcCoregAffine.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Coregister.py b/nipype/interfaces/spm/tests/test_auto_Coregister.py index 918659ad69..940f69ebbf 100644 --- a/nipype/interfaces/spm/tests/test_auto_Coregister.py +++ b/nipype/interfaces/spm/tests/test_auto_Coregister.py @@ -4,28 +4,59 @@ def test_Coregister_inputs(): input_map = dict( - apply_to_files=dict(copyfile=True, field="other"), - cost_function=dict(field="eoptions.cost_fun"), - fwhm=dict(field="eoptions.fwhm"), - jobtype=dict(usedefault=True), + apply_to_files=dict( + copyfile=True, + field="other", + ), + cost_function=dict( + field="eoptions.cost_fun", + ), + fwhm=dict( + field="eoptions.fwhm", + ), + jobtype=dict( + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - out_prefix=dict(field="roptions.prefix", usedefault=True), + mfile=dict( + usedefault=True, + ), + out_prefix=dict( + field="roptions.prefix", + usedefault=True, + ), paths=dict(), - separation=dict(field="eoptions.sep"), - source=dict(copyfile=True, field="source", mandatory=True), + separation=dict( + field="eoptions.sep", + ), + source=dict( + copyfile=True, + field="source", + mandatory=True, + ), target=dict( copyfile=False, extensions=[".hdr", ".img", ".img.gz", ".nii"], field="ref", mandatory=True, ), - tolerance=dict(field="eoptions.tol"), + tolerance=dict( + field="eoptions.tol", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - write_interp=dict(field="roptions.interp"), - write_mask=dict(field="roptions.mask"), - write_wrap=dict(field="roptions.wrap"), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + write_interp=dict( + field="roptions.interp", + ), + write_mask=dict( + field="roptions.mask", + ), + write_wrap=dict( + field="roptions.wrap", + ), ) inputs = Coregister.input_spec() @@ -35,7 +66,10 @@ def test_Coregister_inputs(): def test_Coregister_outputs(): - output_map = dict(coregistered_files=dict(), coregistered_source=dict()) + output_map = dict( + coregistered_files=dict(), + coregistered_source=dict(), + ) outputs = Coregister.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_CreateWarped.py b/nipype/interfaces/spm/tests/test_auto_CreateWarped.py index 8ca329690b..f62694641c 100644 --- a/nipype/interfaces/spm/tests/test_auto_CreateWarped.py +++ b/nipype/interfaces/spm/tests/test_auto_CreateWarped.py @@ -5,17 +5,34 @@ def test_CreateWarped_inputs(): input_map = dict( flowfield_files=dict( - copyfile=False, field="crt_warped.flowfields", mandatory=True + copyfile=False, + field="crt_warped.flowfields", + mandatory=True, + ), + image_files=dict( + copyfile=False, + field="crt_warped.images", + mandatory=True, + ), + interp=dict( + field="crt_warped.interp", + ), + iterations=dict( + field="crt_warped.K", ), - image_files=dict(copyfile=False, field="crt_warped.images", mandatory=True), - interp=dict(field="crt_warped.interp"), - iterations=dict(field="crt_warped.K"), matlab_cmd=dict(), - mfile=dict(usedefault=True), - modulate=dict(field="crt_warped.jactransf"), + mfile=dict( + usedefault=True, + ), + modulate=dict( + field="crt_warped.jactransf", + ), paths=dict(), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = CreateWarped.input_spec() @@ -25,7 +42,9 @@ def test_CreateWarped_inputs(): def test_CreateWarped_outputs(): - output_map = dict(warped_files=dict()) + output_map = dict( + warped_files=dict(), + ) outputs = CreateWarped.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_DARTEL.py b/nipype/interfaces/spm/tests/test_auto_DARTEL.py index 20be1d8767..5fbba0c287 100644 --- a/nipype/interfaces/spm/tests/test_auto_DARTEL.py +++ b/nipype/interfaces/spm/tests/test_auto_DARTEL.py @@ -4,16 +4,34 @@ def test_DARTEL_inputs(): input_map = dict( - image_files=dict(copyfile=False, field="warp.images", mandatory=True), - iteration_parameters=dict(field="warp.settings.param"), + image_files=dict( + copyfile=False, + field="warp.images", + mandatory=True, + ), + iteration_parameters=dict( + field="warp.settings.param", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - optimization_parameters=dict(field="warp.settings.optim"), + mfile=dict( + usedefault=True, + ), + optimization_parameters=dict( + field="warp.settings.optim", + ), paths=dict(), - regularization_form=dict(field="warp.settings.rform"), - template_prefix=dict(field="warp.settings.template", usedefault=True), + regularization_form=dict( + field="warp.settings.rform", + ), + template_prefix=dict( + field="warp.settings.template", + usedefault=True, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = DARTEL.input_spec() @@ -25,7 +43,9 @@ def test_DARTEL_inputs(): def test_DARTEL_outputs(): output_map = dict( dartel_flow_fields=dict(), - final_template_file=dict(extensions=None), + final_template_file=dict( + extensions=None, + ), template_files=dict(), ) outputs = DARTEL.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py index 7df27a3c96..d4b8ad8a78 100644 --- a/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py +++ b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py @@ -5,14 +5,27 @@ def test_DARTELNorm2MNI_inputs(): input_map = dict( apply_to_files=dict( - copyfile=False, field="mni_norm.data.subjs.images", mandatory=True + copyfile=False, + field="mni_norm.data.subjs.images", + mandatory=True, + ), + bounding_box=dict( + field="mni_norm.bb", + ), + flowfield_files=dict( + field="mni_norm.data.subjs.flowfields", + mandatory=True, + ), + fwhm=dict( + field="mni_norm.fwhm", ), - bounding_box=dict(field="mni_norm.bb"), - flowfield_files=dict(field="mni_norm.data.subjs.flowfields", mandatory=True), - fwhm=dict(field="mni_norm.fwhm"), matlab_cmd=dict(), - mfile=dict(usedefault=True), - modulate=dict(field="mni_norm.preserve"), + mfile=dict( + usedefault=True, + ), + modulate=dict( + field="mni_norm.preserve", + ), paths=dict(), template_file=dict( copyfile=False, @@ -21,8 +34,13 @@ def test_DARTELNorm2MNI_inputs(): mandatory=True, ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - voxel_size=dict(field="mni_norm.vox"), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + voxel_size=dict( + field="mni_norm.vox", + ), ) inputs = DARTELNorm2MNI.input_spec() @@ -33,7 +51,10 @@ def test_DARTELNorm2MNI_inputs(): def test_DARTELNorm2MNI_outputs(): output_map = dict( - normalization_parameter_file=dict(extensions=None), normalized_files=dict() + normalization_parameter_file=dict( + extensions=None, + ), + normalized_files=dict(), ) outputs = DARTELNorm2MNI.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_DicomImport.py b/nipype/interfaces/spm/tests/test_auto_DicomImport.py index 455c31acfb..e0459d467f 100644 --- a/nipype/interfaces/spm/tests/test_auto_DicomImport.py +++ b/nipype/interfaces/spm/tests/test_auto_DicomImport.py @@ -4,16 +4,36 @@ def test_DicomImport_inputs(): input_map = dict( - format=dict(field="convopts.format", usedefault=True), - icedims=dict(field="convopts.icedims", usedefault=True), - in_files=dict(field="data", mandatory=True), + format=dict( + field="convopts.format", + usedefault=True, + ), + icedims=dict( + field="convopts.icedims", + usedefault=True, + ), + in_files=dict( + field="data", + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - output_dir=dict(field="outdir", usedefault=True), - output_dir_struct=dict(field="root", usedefault=True), + mfile=dict( + usedefault=True, + ), + output_dir=dict( + field="outdir", + usedefault=True, + ), + output_dir_struct=dict( + field="root", + usedefault=True, + ), paths=dict(), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = DicomImport.input_spec() @@ -23,7 +43,9 @@ def test_DicomImport_inputs(): def test_DicomImport_outputs(): - output_map = dict(out_files=dict()) + output_map = dict( + out_files=dict(), + ) outputs = DicomImport.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py index 2ad12c60af..84fb496e9c 100644 --- a/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py @@ -4,19 +4,40 @@ def test_EstimateContrast_inputs(): input_map = dict( - beta_images=dict(copyfile=False, mandatory=True), - contrasts=dict(mandatory=True), - group_contrast=dict(xor=["use_derivs"]), + beta_images=dict( + copyfile=False, + mandatory=True, + ), + contrasts=dict( + mandatory=True, + ), + group_contrast=dict( + xor=["use_derivs"], + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), + mfile=dict( + usedefault=True, + ), paths=dict(), - residual_image=dict(copyfile=False, extensions=None, mandatory=True), + residual_image=dict( + copyfile=False, + extensions=None, + mandatory=True, + ), spm_mat_file=dict( - copyfile=True, extensions=None, field="spmmat", mandatory=True + copyfile=True, + extensions=None, + field="spmmat", + mandatory=True, + ), + use_derivs=dict( + xor=["group_contrast"], ), - use_derivs=dict(xor=["group_contrast"]), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = EstimateContrast.input_spec() @@ -31,7 +52,9 @@ def test_EstimateContrast_outputs(): ess_images=dict(), spmF_images=dict(), spmT_images=dict(), - spm_mat_file=dict(extensions=None), + spm_mat_file=dict( + extensions=None, + ), ) outputs = EstimateContrast.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateModel.py b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py index 5351469cc9..c78924de2f 100644 --- a/nipype/interfaces/spm/tests/test_auto_EstimateModel.py +++ b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py @@ -4,17 +4,30 @@ def test_EstimateModel_inputs(): input_map = dict( - estimation_method=dict(field="method", mandatory=True), + estimation_method=dict( + field="method", + mandatory=True, + ), flags=dict(), matlab_cmd=dict(), - mfile=dict(usedefault=True), + mfile=dict( + usedefault=True, + ), paths=dict(), spm_mat_file=dict( - copyfile=True, extensions=None, field="spmmat", mandatory=True + copyfile=True, + extensions=None, + field="spmmat", + mandatory=True, ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - write_residuals=dict(field="write_residuals"), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + write_residuals=dict( + field="write_residuals", + ), ) inputs = EstimateModel.input_spec() @@ -27,15 +40,25 @@ def test_EstimateModel_outputs(): output_map = dict( ARcoef=dict(), Cbetas=dict(), - RPVimage=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"]), + RPVimage=dict( + extensions=[".hdr", ".img", ".img.gz", ".nii"], + ), SDbetas=dict(), SDerror=dict(), beta_images=dict(), - labels=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"]), - mask_image=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"]), - residual_image=dict(extensions=[".hdr", ".img", ".img.gz", ".nii"]), + labels=dict( + extensions=[".hdr", ".img", ".img.gz", ".nii"], + ), + mask_image=dict( + extensions=[".hdr", ".img", ".img.gz", ".nii"], + ), + residual_image=dict( + extensions=[".hdr", ".img", ".img.gz", ".nii"], + ), residual_images=dict(), - spm_mat_file=dict(extensions=None), + spm_mat_file=dict( + extensions=None, + ), ) outputs = EstimateModel.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py index 872b8c7687..de435ea771 100644 --- a/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py @@ -4,24 +4,39 @@ def test_FactorialDesign_inputs(): input_map = dict( - covariates=dict(field="cov"), - explicit_mask_file=dict(extensions=None, field="masking.em"), + covariates=dict( + field="cov", + ), + explicit_mask_file=dict( + extensions=None, + field="masking.em", + ), global_calc_mean=dict( - field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"] + field="globalc.g_mean", + xor=["global_calc_omit", "global_calc_values"], ), global_calc_omit=dict( - field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"] + field="globalc.g_omit", + xor=["global_calc_mean", "global_calc_values"], ), global_calc_values=dict( field="globalc.g_user.global_uval", xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict(field="globalm.glonorm"), + global_normalization=dict( + field="globalm.glonorm", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no"), + mfile=dict( + usedefault=True, + ), + no_grand_mean_scaling=dict( + field="globalm.gmsca.gmsca_no", + ), paths=dict(), - spm_mat_dir=dict(field="dir"), + spm_mat_dir=dict( + field="dir", + ), threshold_mask_absolute=dict( field="masking.tm.tma.athresh", xor=["threshold_mask_none", "threshold_mask_relative"], @@ -34,9 +49,14 @@ def test_FactorialDesign_inputs(): field="masking.tm.tmr.rthresh", xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict(field="masking.im"), + use_implicit_threshold=dict( + field="masking.im", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = FactorialDesign.input_spec() @@ -46,7 +66,11 @@ def test_FactorialDesign_inputs(): def test_FactorialDesign_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None)) + output_map = dict( + spm_mat_file=dict( + extensions=None, + ), + ) outputs = FactorialDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_FieldMap.py b/nipype/interfaces/spm/tests/test_auto_FieldMap.py index 4061ca9ce2..a91eec64d8 100644 --- a/nipype/interfaces/spm/tests/test_auto_FieldMap.py +++ b/nipype/interfaces/spm/tests/test_auto_FieldMap.py @@ -4,59 +4,123 @@ def test_FieldMap_inputs(): input_map = dict( - anat_file=dict(copyfile=False, extensions=None, field="subj.anat"), - blip_direction=dict(field="subj.defaults.defaultsval.blipdir", mandatory=True), - distortion_direction=dict(field="roptions.pedir", usedefault=True), - echo_times=dict(field="subj.defaults.defaultsval.et"), - epi_file=dict(copyfile=False, extensions=None, field="subj.session.epi"), - epifm=dict(field="subj.defaults.defaultsval.epifm", usedefault=True), - in_files=dict(copyfile=True, field="data.scans", mandatory=True), - interpolation=dict(field="roptions.rinterp", usedefault=True), + anat_file=dict( + copyfile=False, + extensions=None, + field="subj.anat", + ), + blip_direction=dict( + field="subj.defaults.defaultsval.blipdir", + mandatory=True, + ), + echo_times=dict( + field="subj.defaults.defaultsval.et", + mandatory=True, + ), + epi_file=dict( + copyfile=False, + extensions=None, + field="subj.session.epi", + mandatory=True, + ), + epifm=dict( + field="subj.defaults.defaultsval.epifm", + usedefault=True, + ), jacobian_modulation=dict( - field="subj.defaults.defaultsval.ajm", usedefault=True + field="subj.defaults.defaultsval.ajm", + usedefault=True, + ), + jobtype=dict( + usedefault=True, ), - jobtype=dict(usedefault=True), magnitude_file=dict( - copyfile=False, extensions=None, field="subj.data.presubphasemag.magnitude" + copyfile=False, + extensions=None, + field="subj.data.presubphasemag.magnitude", + mandatory=True, + ), + mask_fwhm=dict( + field="subj.defaults.defaultsval.mflags.fwhm", + usedefault=True, + ), + maskbrain=dict( + field="subj.defaults.defaultsval.maskbrain", + usedefault=True, + ), + matchanat=dict( + field="subj.matchanat", + usedefault=True, + ), + matchvdm=dict( + field="subj.matchvdm", + usedefault=True, ), - mask_fwhm=dict(field="subj.defaults.defaultsval.mflags.fwhm", usedefault=True), - maskbrain=dict(field="subj.defaults.defaultsval.maskbrain", usedefault=True), - matchanat=dict(field="subj.matchanat", usedefault=True), - matchvdm=dict(field="subj.matchvdm", usedefault=True), matlab_cmd=dict(), - method=dict(field="subj.defaults.defaultsval.uflags.method", usedefault=True), - mfile=dict(usedefault=True), - ndilate=dict(field="subj.defaults.defaultsval.mflags.ndilate", usedefault=True), - nerode=dict(field="subj.defaults.defaultsval.mflags.nerode", usedefault=True), - out_prefix=dict(field="roptions.prefix", usedefault=True), - pad=dict(field="subj.defaults.defaultsval.uflags.pad", usedefault=True), + method=dict( + field="subj.defaults.defaultsval.uflags.method", + usedefault=True, + ), + mfile=dict( + usedefault=True, + ), + ndilate=dict( + field="subj.defaults.defaultsval.mflags.ndilate", + usedefault=True, + ), + nerode=dict( + field="subj.defaults.defaultsval.mflags.nerode", + usedefault=True, + ), + pad=dict( + field="subj.defaults.defaultsval.uflags.pad", + usedefault=True, + ), paths=dict(), phase_file=dict( - copyfile=False, extensions=None, field="subj.data.presubphasemag.phase" + copyfile=False, + extensions=None, + field="subj.data.presubphasemag.phase", + mandatory=True, + ), + reg=dict( + field="subj.defaults.defaultsval.mflags.reg", + usedefault=True, + ), + sessname=dict( + field="subj.sessname", + usedefault=True, ), - reg=dict(field="subj.defaults.defaultsval.mflags.reg", usedefault=True), - reslice_interp=dict(field="roptions.rinterp"), - sessname=dict(field="subj.sessname", usedefault=True), template=dict( copyfile=False, extensions=None, field="subj.defaults.defaultsval.mflags.template", ), - thresh=dict(field="subj.defaults.defaultsval.mflags.thresh", usedefault=True), - total_readout_time=dict(field="subj.defaults.defaultsval.tert", mandatory=True), + thresh=dict( + field="subj.defaults.defaultsval.mflags.thresh", + usedefault=True, + ), + total_readout_time=dict( + field="subj.defaults.defaultsval.tert", + mandatory=True, + ), unwarp_fwhm=dict( - field="subj.defaults.defaultsval.uflags.fwhm", usedefault=True + field="subj.defaults.defaultsval.uflags.fwhm", + usedefault=True, ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - vdmfile=dict( - copyfile=True, extensions=None, field="data.vdmfile", mandatory=True - ), - write_mask=dict(field="roptions.mask"), - write_which=dict(field="roptions.which", maxlen=2, minlen=2, usedefault=True), - write_wrap=dict(field="roptions.wrap"), - writeunwarped=dict(field="subj.writeunwarped", usedefault=True), - ws=dict(field="subj.defaults.defaultsval.uflags.ws", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + writeunwarped=dict( + field="subj.writeunwarped", + usedefault=True, + ), + ws=dict( + field="subj.defaults.defaultsval.uflags.ws", + usedefault=True, + ), ) inputs = FieldMap.input_spec() @@ -67,7 +131,9 @@ def test_FieldMap_inputs(): def test_FieldMap_outputs(): output_map = dict( - mean_image=dict(extensions=None), out_files=dict(), vdm=dict(extensions=None) + vdm=dict( + extensions=None, + ), ) outputs = FieldMap.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Level1Design.py b/nipype/interfaces/spm/tests/test_auto_Level1Design.py index db2690ea02..04c9f315ef 100644 --- a/nipype/interfaces/spm/tests/test_auto_Level1Design.py +++ b/nipype/interfaces/spm/tests/test_auto_Level1Design.py @@ -4,25 +4,61 @@ def test_Level1Design_inputs(): input_map = dict( - bases=dict(field="bases", mandatory=True), - factor_info=dict(field="fact"), + bases=dict( + field="bases", + mandatory=True, + ), + factor_info=dict( + field="fact", + ), flags=dict(), - global_intensity_normalization=dict(field="global"), - interscan_interval=dict(field="timing.RT", mandatory=True), - mask_image=dict(extensions=None, field="mask"), - mask_threshold=dict(usedefault=True), + global_intensity_normalization=dict( + field="global", + ), + interscan_interval=dict( + field="timing.RT", + mandatory=True, + ), + mask_image=dict( + extensions=None, + field="mask", + ), + mask_threshold=dict( + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - microtime_onset=dict(field="timing.fmri_t0"), - microtime_resolution=dict(field="timing.fmri_t"), - model_serial_correlations=dict(field="cvi"), + mfile=dict( + usedefault=True, + ), + microtime_onset=dict( + field="timing.fmri_t0", + ), + microtime_resolution=dict( + field="timing.fmri_t", + ), + model_serial_correlations=dict( + field="cvi", + ), paths=dict(), - session_info=dict(field="sess", mandatory=True), - spm_mat_dir=dict(field="dir"), - timing_units=dict(field="timing.units", mandatory=True), + session_info=dict( + field="sess", + mandatory=True, + ), + spm_mat_dir=dict( + field="dir", + ), + timing_units=dict( + field="timing.units", + mandatory=True, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - volterra_expansion_order=dict(field="volt"), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + volterra_expansion_order=dict( + field="volt", + ), ) inputs = Level1Design.input_spec() @@ -32,7 +68,11 @@ def test_Level1Design_inputs(): def test_Level1Design_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None)) + output_map = dict( + spm_mat_file=dict( + extensions=None, + ), + ) outputs = Level1Design.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_MultiChannelNewSegment.py b/nipype/interfaces/spm/tests/test_auto_MultiChannelNewSegment.py index efec3fa8c6..9c8f743d45 100644 --- a/nipype/interfaces/spm/tests/test_auto_MultiChannelNewSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_MultiChannelNewSegment.py @@ -4,17 +4,34 @@ def test_MultiChannelNewSegment_inputs(): input_map = dict( - affine_regularization=dict(field="warp.affreg"), - channels=dict(field="channel"), + affine_regularization=dict( + field="warp.affreg", + ), + channels=dict( + field="channel", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), + mfile=dict( + usedefault=True, + ), paths=dict(), - sampling_distance=dict(field="warp.samp"), - tissues=dict(field="tissue"), + sampling_distance=dict( + field="warp.samp", + ), + tissues=dict( + field="tissue", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - warping_regularization=dict(field="warp.reg"), - write_deformation_fields=dict(field="warp.write"), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + warping_regularization=dict( + field="warp.reg", + ), + write_deformation_fields=dict( + field="warp.write", + ), ) inputs = MultiChannelNewSegment.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py index cf2969ff67..61d3b38a74 100644 --- a/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py @@ -4,26 +4,47 @@ def test_MultipleRegressionDesign_inputs(): input_map = dict( - covariates=dict(field="cov"), - explicit_mask_file=dict(extensions=None, field="masking.em"), + covariates=dict( + field="cov", + ), + explicit_mask_file=dict( + extensions=None, + field="masking.em", + ), global_calc_mean=dict( - field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"] + field="globalc.g_mean", + xor=["global_calc_omit", "global_calc_values"], ), global_calc_omit=dict( - field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"] + field="globalc.g_omit", + xor=["global_calc_mean", "global_calc_values"], ), global_calc_values=dict( field="globalc.g_user.global_uval", xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict(field="globalm.glonorm"), - in_files=dict(field="des.mreg.scans", mandatory=True), - include_intercept=dict(field="des.mreg.incint", usedefault=True), + global_normalization=dict( + field="globalm.glonorm", + ), + in_files=dict( + field="des.mreg.scans", + mandatory=True, + ), + include_intercept=dict( + field="des.mreg.incint", + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no"), + mfile=dict( + usedefault=True, + ), + no_grand_mean_scaling=dict( + field="globalm.gmsca.gmsca_no", + ), paths=dict(), - spm_mat_dir=dict(field="dir"), + spm_mat_dir=dict( + field="dir", + ), threshold_mask_absolute=dict( field="masking.tm.tma.athresh", xor=["threshold_mask_none", "threshold_mask_relative"], @@ -36,10 +57,17 @@ def test_MultipleRegressionDesign_inputs(): field="masking.tm.tmr.rthresh", xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict(field="masking.im"), + use_implicit_threshold=dict( + field="masking.im", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - user_covariates=dict(field="des.mreg.mcov"), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + user_covariates=dict( + field="des.mreg.mcov", + ), ) inputs = MultipleRegressionDesign.input_spec() @@ -49,7 +77,11 @@ def test_MultipleRegressionDesign_inputs(): def test_MultipleRegressionDesign_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None)) + output_map = dict( + spm_mat_file=dict( + extensions=None, + ), + ) outputs = MultipleRegressionDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_NewSegment.py b/nipype/interfaces/spm/tests/test_auto_NewSegment.py index 77f84939da..dae310f6e2 100644 --- a/nipype/interfaces/spm/tests/test_auto_NewSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_NewSegment.py @@ -4,18 +4,39 @@ def test_NewSegment_inputs(): input_map = dict( - affine_regularization=dict(field="warp.affreg"), - channel_files=dict(copyfile=False, field="channel", mandatory=True), - channel_info=dict(field="channel"), + affine_regularization=dict( + field="warp.affreg", + ), + channel_files=dict( + copyfile=False, + field="channel", + mandatory=True, + ), + channel_info=dict( + field="channel", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), + mfile=dict( + usedefault=True, + ), paths=dict(), - sampling_distance=dict(field="warp.samp"), - tissues=dict(field="tissue"), + sampling_distance=dict( + field="warp.samp", + ), + tissues=dict( + field="tissue", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - warping_regularization=dict(field="warp.reg"), - write_deformation_fields=dict(field="warp.write"), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + warping_regularization=dict( + field="warp.reg", + ), + write_deformation_fields=dict( + field="warp.write", + ), ) inputs = NewSegment.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize.py b/nipype/interfaces/spm/tests/test_auto_Normalize.py index b2c34f37e8..caa063d923 100644 --- a/nipype/interfaces/spm/tests/test_auto_Normalize.py +++ b/nipype/interfaces/spm/tests/test_auto_Normalize.py @@ -4,15 +4,33 @@ def test_Normalize_inputs(): input_map = dict( - DCT_period_cutoff=dict(field="eoptions.cutoff"), - affine_regularization_type=dict(field="eoptions.regtype"), - apply_to_files=dict(copyfile=True, field="subj.resample"), - jobtype=dict(usedefault=True), + DCT_period_cutoff=dict( + field="eoptions.cutoff", + ), + affine_regularization_type=dict( + field="eoptions.regtype", + ), + apply_to_files=dict( + copyfile=True, + field="subj.resample", + ), + jobtype=dict( + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - nonlinear_iterations=dict(field="eoptions.nits"), - nonlinear_regularization=dict(field="eoptions.reg"), - out_prefix=dict(field="roptions.prefix", usedefault=True), + mfile=dict( + usedefault=True, + ), + nonlinear_iterations=dict( + field="eoptions.nits", + ), + nonlinear_regularization=dict( + field="eoptions.reg", + ), + out_prefix=dict( + field="roptions.prefix", + usedefault=True, + ), parameter_file=dict( copyfile=False, extensions=None, @@ -22,10 +40,19 @@ def test_Normalize_inputs(): ), paths=dict(), source=dict( - copyfile=True, field="subj.source", mandatory=True, xor=["parameter_file"] + copyfile=True, + field="subj.source", + mandatory=True, + xor=["parameter_file"], + ), + source_image_smoothing=dict( + field="eoptions.smosrc", + ), + source_weight=dict( + copyfile=False, + extensions=None, + field="subj.wtsrc", ), - source_image_smoothing=dict(field="eoptions.smosrc"), - source_weight=dict(copyfile=False, extensions=None, field="subj.wtsrc"), template=dict( copyfile=False, extensions=None, @@ -33,15 +60,34 @@ def test_Normalize_inputs(): mandatory=True, xor=["parameter_file"], ), - template_image_smoothing=dict(field="eoptions.smoref"), - template_weight=dict(copyfile=False, extensions=None, field="eoptions.weight"), + template_image_smoothing=dict( + field="eoptions.smoref", + ), + template_weight=dict( + copyfile=False, + extensions=None, + field="eoptions.weight", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - write_bounding_box=dict(field="roptions.bb"), - write_interp=dict(field="roptions.interp"), - write_preserve=dict(field="roptions.preserve"), - write_voxel_sizes=dict(field="roptions.vox"), - write_wrap=dict(field="roptions.wrap"), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + write_bounding_box=dict( + field="roptions.bb", + ), + write_interp=dict( + field="roptions.interp", + ), + write_preserve=dict( + field="roptions.preserve", + ), + write_voxel_sizes=dict( + field="roptions.vox", + ), + write_wrap=dict( + field="roptions.wrap", + ), ) inputs = Normalize.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize12.py b/nipype/interfaces/spm/tests/test_auto_Normalize12.py index c4bf661f90..965ffafec9 100644 --- a/nipype/interfaces/spm/tests/test_auto_Normalize12.py +++ b/nipype/interfaces/spm/tests/test_auto_Normalize12.py @@ -4,10 +4,19 @@ def test_Normalize12_inputs(): input_map = dict( - affine_regularization_type=dict(field="eoptions.affreg"), - apply_to_files=dict(copyfile=True, field="subj.resample"), - bias_fwhm=dict(field="eoptions.biasfwhm"), - bias_regularization=dict(field="eoptions.biasreg"), + affine_regularization_type=dict( + field="eoptions.affreg", + ), + apply_to_files=dict( + copyfile=True, + field="subj.resample", + ), + bias_fwhm=dict( + field="eoptions.biasfwhm", + ), + bias_regularization=dict( + field="eoptions.biasreg", + ), deformation_file=dict( copyfile=False, extensions=[".hdr", ".img", ".img.gz", ".nii"], @@ -22,13 +31,24 @@ def test_Normalize12_inputs(): mandatory=True, xor=["deformation_file"], ), - jobtype=dict(usedefault=True), + jobtype=dict( + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - out_prefix=dict(field="woptions.prefix", usedefault=True), + mfile=dict( + usedefault=True, + ), + out_prefix=dict( + field="woptions.prefix", + usedefault=True, + ), paths=dict(), - sampling_distance=dict(field="eoptions.samp"), - smoothness=dict(field="eoptions.fwhm"), + sampling_distance=dict( + field="eoptions.samp", + ), + smoothness=dict( + field="eoptions.fwhm", + ), tpm=dict( copyfile=False, extensions=None, @@ -36,11 +56,22 @@ def test_Normalize12_inputs(): xor=["deformation_file"], ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - warping_regularization=dict(field="eoptions.reg"), - write_bounding_box=dict(field="woptions.bb"), - write_interp=dict(field="woptions.interp"), - write_voxel_sizes=dict(field="woptions.vox"), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + warping_regularization=dict( + field="eoptions.reg", + ), + write_bounding_box=dict( + field="woptions.bb", + ), + write_interp=dict( + field="woptions.interp", + ), + write_voxel_sizes=dict( + field="woptions.vox", + ), ) inputs = Normalize12.input_spec() @@ -51,7 +82,9 @@ def test_Normalize12_inputs(): def test_Normalize12_outputs(): output_map = dict( - deformation_field=dict(), normalized_files=dict(), normalized_image=dict() + deformation_field=dict(), + normalized_files=dict(), + normalized_image=dict(), ) outputs = Normalize12.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py index 7d80fea70d..9b77ab0af7 100644 --- a/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py @@ -4,25 +4,43 @@ def test_OneSampleTTestDesign_inputs(): input_map = dict( - covariates=dict(field="cov"), - explicit_mask_file=dict(extensions=None, field="masking.em"), + covariates=dict( + field="cov", + ), + explicit_mask_file=dict( + extensions=None, + field="masking.em", + ), global_calc_mean=dict( - field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"] + field="globalc.g_mean", + xor=["global_calc_omit", "global_calc_values"], ), global_calc_omit=dict( - field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"] + field="globalc.g_omit", + xor=["global_calc_mean", "global_calc_values"], ), global_calc_values=dict( field="globalc.g_user.global_uval", xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict(field="globalm.glonorm"), - in_files=dict(field="des.t1.scans", mandatory=True), + global_normalization=dict( + field="globalm.glonorm", + ), + in_files=dict( + field="des.t1.scans", + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no"), + mfile=dict( + usedefault=True, + ), + no_grand_mean_scaling=dict( + field="globalm.gmsca.gmsca_no", + ), paths=dict(), - spm_mat_dir=dict(field="dir"), + spm_mat_dir=dict( + field="dir", + ), threshold_mask_absolute=dict( field="masking.tm.tma.athresh", xor=["threshold_mask_none", "threshold_mask_relative"], @@ -35,9 +53,14 @@ def test_OneSampleTTestDesign_inputs(): field="masking.tm.tmr.rthresh", xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict(field="masking.im"), + use_implicit_threshold=dict( + field="masking.im", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = OneSampleTTestDesign.input_spec() @@ -47,7 +70,11 @@ def test_OneSampleTTestDesign_inputs(): def test_OneSampleTTestDesign_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None)) + output_map = dict( + spm_mat_file=dict( + extensions=None, + ), + ) outputs = OneSampleTTestDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py index 7ca526ded8..6be1f6ec01 100644 --- a/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py @@ -4,27 +4,49 @@ def test_PairedTTestDesign_inputs(): input_map = dict( - ancova=dict(field="des.pt.ancova"), - covariates=dict(field="cov"), - explicit_mask_file=dict(extensions=None, field="masking.em"), + ancova=dict( + field="des.pt.ancova", + ), + covariates=dict( + field="cov", + ), + explicit_mask_file=dict( + extensions=None, + field="masking.em", + ), global_calc_mean=dict( - field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"] + field="globalc.g_mean", + xor=["global_calc_omit", "global_calc_values"], ), global_calc_omit=dict( - field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"] + field="globalc.g_omit", + xor=["global_calc_mean", "global_calc_values"], ), global_calc_values=dict( field="globalc.g_user.global_uval", xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict(field="globalm.glonorm"), - grand_mean_scaling=dict(field="des.pt.gmsca"), + global_normalization=dict( + field="globalm.glonorm", + ), + grand_mean_scaling=dict( + field="des.pt.gmsca", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no"), - paired_files=dict(field="des.pt.pair", mandatory=True), + mfile=dict( + usedefault=True, + ), + no_grand_mean_scaling=dict( + field="globalm.gmsca.gmsca_no", + ), + paired_files=dict( + field="des.pt.pair", + mandatory=True, + ), paths=dict(), - spm_mat_dir=dict(field="dir"), + spm_mat_dir=dict( + field="dir", + ), threshold_mask_absolute=dict( field="masking.tm.tma.athresh", xor=["threshold_mask_none", "threshold_mask_relative"], @@ -37,9 +59,14 @@ def test_PairedTTestDesign_inputs(): field="masking.tm.tmr.rthresh", xor=["threshold_mask_absolute", "threshold_mask_none"], ), - use_implicit_threshold=dict(field="masking.im"), + use_implicit_threshold=dict( + field="masking.im", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = PairedTTestDesign.input_spec() @@ -49,7 +76,11 @@ def test_PairedTTestDesign_inputs(): def test_PairedTTestDesign_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None)) + output_map = dict( + spm_mat_file=dict( + extensions=None, + ), + ) outputs = PairedTTestDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Realign.py b/nipype/interfaces/spm/tests/test_auto_Realign.py index 8682769634..5165d6f33e 100644 --- a/nipype/interfaces/spm/tests/test_auto_Realign.py +++ b/nipype/interfaces/spm/tests/test_auto_Realign.py @@ -4,25 +4,65 @@ def test_Realign_inputs(): input_map = dict( - fwhm=dict(field="eoptions.fwhm"), - in_files=dict(copyfile=True, field="data", mandatory=True), - interp=dict(field="eoptions.interp"), - jobtype=dict(usedefault=True), + fwhm=dict( + field="eoptions.fwhm", + ), + in_files=dict( + copyfile=True, + field="data", + mandatory=True, + ), + interp=dict( + field="eoptions.interp", + ), + jobtype=dict( + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - out_prefix=dict(field="roptions.prefix", usedefault=True), + mfile=dict( + usedefault=True, + ), + out_prefix=dict( + field="roptions.prefix", + usedefault=True, + ), paths=dict(), - quality=dict(field="eoptions.quality"), - register_to_mean=dict(field="eoptions.rtm"), - separation=dict(field="eoptions.sep"), + quality=dict( + field="eoptions.quality", + ), + register_to_mean=dict( + field="eoptions.rtm", + ), + separation=dict( + field="eoptions.sep", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - weight_img=dict(extensions=None, field="eoptions.weight"), - wrap=dict(field="eoptions.wrap"), - write_interp=dict(field="roptions.interp"), - write_mask=dict(field="roptions.mask"), - write_which=dict(field="roptions.which", maxlen=2, minlen=2, usedefault=True), - write_wrap=dict(field="roptions.wrap"), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + weight_img=dict( + extensions=None, + field="eoptions.weight", + ), + wrap=dict( + field="eoptions.wrap", + ), + write_interp=dict( + field="roptions.interp", + ), + write_mask=dict( + field="roptions.mask", + ), + write_which=dict( + field="roptions.which", + maxlen=2, + minlen=2, + usedefault=True, + ), + write_wrap=dict( + field="roptions.wrap", + ), ) inputs = Realign.input_spec() @@ -33,7 +73,9 @@ def test_Realign_inputs(): def test_Realign_outputs(): output_map = dict( - mean_image=dict(extensions=None), + mean_image=dict( + extensions=None, + ), modified_in_files=dict(), realigned_files=dict(), realignment_parameters=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py b/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py index 20eca6653f..bb27419547 100644 --- a/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py +++ b/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py @@ -4,41 +4,104 @@ def test_RealignUnwarp_inputs(): input_map = dict( - est_basis_func=dict(field="uweoptions.basfcn"), - est_first_order_effects=dict(field="uweoptions.fot"), - est_jacobian_deformations=dict(field="uweoptions.jm"), + est_basis_func=dict( + field="uweoptions.basfcn", + ), + est_first_order_effects=dict( + field="uweoptions.fot", + ), + est_jacobian_deformations=dict( + field="uweoptions.jm", + ), est_num_of_iterations=dict( - field="uweoptions.noi", maxlen=1, minlen=1, usedefault=True + field="uweoptions.noi", + maxlen=1, + minlen=1, + usedefault=True, + ), + est_re_est_mov_par=dict( + field="uweoptions.rem", ), - est_re_est_mov_par=dict(field="uweoptions.rem"), est_reg_factor=dict( - field="uweoptions.lambda", maxlen=1, minlen=1, usedefault=True - ), - est_reg_order=dict(field="uweoptions.regorder"), - est_second_order_effects=dict(field="uweoptions.sot"), - est_taylor_expansion_point=dict(field="uweoptions.expround", usedefault=True), - est_unwarp_fwhm=dict(field="uweoptions.uwfwhm"), - fwhm=dict(field="eoptions.fwhm"), - in_files=dict(copyfile=True, field="data.scans", mandatory=True), - interp=dict(field="eoptions.einterp"), + field="uweoptions.lambda", + maxlen=1, + minlen=1, + usedefault=True, + ), + est_reg_order=dict( + field="uweoptions.regorder", + ), + est_second_order_effects=dict( + field="uweoptions.sot", + ), + est_taylor_expansion_point=dict( + field="uweoptions.expround", + usedefault=True, + ), + est_unwarp_fwhm=dict( + field="uweoptions.uwfwhm", + ), + fwhm=dict( + field="eoptions.fwhm", + ), + in_files=dict( + copyfile=True, + field="data.scans", + mandatory=True, + ), + interp=dict( + field="eoptions.einterp", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - out_prefix=dict(field="uwroptions.prefix", usedefault=True), + mfile=dict( + usedefault=True, + ), + out_prefix=dict( + field="uwroptions.prefix", + usedefault=True, + ), paths=dict(), - phase_map=dict(copyfile=False, extensions=None, field="data.pmscan"), - quality=dict(field="eoptions.quality"), - register_to_mean=dict(field="eoptions.rtm"), - reslice_interp=dict(field="uwroptions.rinterp"), - reslice_mask=dict(field="uwroptions.mask"), + phase_map=dict( + copyfile=False, + extensions=None, + field="data.pmscan", + ), + quality=dict( + field="eoptions.quality", + ), + register_to_mean=dict( + field="eoptions.rtm", + ), + reslice_interp=dict( + field="uwroptions.rinterp", + ), + reslice_mask=dict( + field="uwroptions.mask", + ), reslice_which=dict( - field="uwroptions.uwwhich", maxlen=2, minlen=2, usedefault=True + field="uwroptions.uwwhich", + maxlen=2, + minlen=2, + usedefault=True, + ), + reslice_wrap=dict( + field="uwroptions.wrap", + ), + separation=dict( + field="eoptions.sep", ), - reslice_wrap=dict(field="uwroptions.wrap"), - separation=dict(field="eoptions.sep"), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - weight_img=dict(extensions=None, field="eoptions.weight"), - wrap=dict(field="eoptions.ewrap"), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + weight_img=dict( + extensions=None, + field="eoptions.weight", + ), + wrap=dict( + field="eoptions.ewrap", + ), ) inputs = RealignUnwarp.input_spec() @@ -49,7 +112,9 @@ def test_RealignUnwarp_inputs(): def test_RealignUnwarp_outputs(): output_map = dict( - mean_image=dict(extensions=None), + mean_image=dict( + extensions=None, + ), modified_in_files=dict(), realigned_unwarped_files=dict(), realignment_parameters=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_Reslice.py b/nipype/interfaces/spm/tests/test_auto_Reslice.py index b3ff842fd7..c48d1a4b88 100644 --- a/nipype/interfaces/spm/tests/test_auto_Reslice.py +++ b/nipype/interfaces/spm/tests/test_auto_Reslice.py @@ -4,15 +4,30 @@ def test_Reslice_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True), - interp=dict(usedefault=True), + in_file=dict( + extensions=None, + mandatory=True, + ), + interp=dict( + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - out_file=dict(extensions=None), + mfile=dict( + usedefault=True, + ), + out_file=dict( + extensions=None, + ), paths=dict(), - space_defining=dict(extensions=None, mandatory=True), + space_defining=dict( + extensions=None, + mandatory=True, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = Reslice.input_spec() @@ -22,7 +37,11 @@ def test_Reslice_inputs(): def test_Reslice_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Reslice.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py index bf2fe90c4e..8bbb1d1307 100644 --- a/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py +++ b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py @@ -4,16 +4,33 @@ def test_ResliceToReference_inputs(): input_map = dict( - bounding_box=dict(field="comp{2}.idbbvox.bb"), - in_files=dict(field="fnames", mandatory=True), - interpolation=dict(field="interp"), + bounding_box=dict( + field="comp{2}.idbbvox.bb", + ), + in_files=dict( + field="fnames", + mandatory=True, + ), + interpolation=dict( + field="interp", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), + mfile=dict( + usedefault=True, + ), paths=dict(), - target=dict(extensions=None, field="comp{1}.id.space"), + target=dict( + extensions=None, + field="comp{1}.id.space", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - voxel_sizes=dict(field="comp{2}.idbbvox.vox"), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + voxel_sizes=dict( + field="comp{2}.idbbvox.vox", + ), ) inputs = ResliceToReference.input_spec() @@ -23,7 +40,9 @@ def test_ResliceToReference_inputs(): def test_ResliceToReference_outputs(): - output_map = dict(out_files=dict()) + output_map = dict( + out_files=dict(), + ) outputs = ResliceToReference.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_SPMCommand.py b/nipype/interfaces/spm/tests/test_auto_SPMCommand.py index 3e9cecf29d..0c0a8d7506 100644 --- a/nipype/interfaces/spm/tests/test_auto_SPMCommand.py +++ b/nipype/interfaces/spm/tests/test_auto_SPMCommand.py @@ -5,10 +5,15 @@ def test_SPMCommand_inputs(): input_map = dict( matlab_cmd=dict(), - mfile=dict(usedefault=True), + mfile=dict( + usedefault=True, + ), paths=dict(), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = SPMCommand.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_Segment.py b/nipype/interfaces/spm/tests/test_auto_Segment.py index b498e1553a..4859c76a00 100644 --- a/nipype/interfaces/spm/tests/test_auto_Segment.py +++ b/nipype/interfaces/spm/tests/test_auto_Segment.py @@ -4,26 +4,64 @@ def test_Segment_inputs(): input_map = dict( - affine_regularization=dict(field="opts.regtype"), - bias_fwhm=dict(field="opts.biasfwhm"), - bias_regularization=dict(field="opts.biasreg"), - clean_masks=dict(field="output.cleanup"), - csf_output_type=dict(field="output.CSF"), - data=dict(copyfile=False, field="data", mandatory=True), - gaussians_per_class=dict(field="opts.ngaus"), - gm_output_type=dict(field="output.GM"), - mask_image=dict(extensions=None, field="opts.msk"), + affine_regularization=dict( + field="opts.regtype", + ), + bias_fwhm=dict( + field="opts.biasfwhm", + ), + bias_regularization=dict( + field="opts.biasreg", + ), + clean_masks=dict( + field="output.cleanup", + ), + csf_output_type=dict( + field="output.CSF", + ), + data=dict( + copyfile=False, + field="data", + mandatory=True, + ), + gaussians_per_class=dict( + field="opts.ngaus", + ), + gm_output_type=dict( + field="output.GM", + ), + mask_image=dict( + extensions=None, + field="opts.msk", + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), + mfile=dict( + usedefault=True, + ), paths=dict(), - sampling_distance=dict(field="opts.samp"), - save_bias_corrected=dict(field="output.biascor"), - tissue_prob_maps=dict(field="opts.tpm"), + sampling_distance=dict( + field="opts.samp", + ), + save_bias_corrected=dict( + field="output.biascor", + ), + tissue_prob_maps=dict( + field="opts.tpm", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), - warp_frequency_cutoff=dict(field="opts.warpco"), - warping_regularization=dict(field="opts.warpreg"), - wm_output_type=dict(field="output.WM"), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + warp_frequency_cutoff=dict( + field="opts.warpco", + ), + warping_regularization=dict( + field="opts.warpreg", + ), + wm_output_type=dict( + field="output.WM", + ), ) inputs = Segment.input_spec() @@ -34,21 +72,47 @@ def test_Segment_inputs(): def test_Segment_outputs(): output_map = dict( - bias_corrected_image=dict(extensions=None), - inverse_transformation_mat=dict(extensions=None), - modulated_csf_image=dict(extensions=None), - modulated_gm_image=dict(extensions=None), + bias_corrected_image=dict( + extensions=None, + ), + inverse_transformation_mat=dict( + extensions=None, + ), + modulated_csf_image=dict( + extensions=None, + ), + modulated_gm_image=dict( + extensions=None, + ), modulated_input_image=dict( - deprecated="0.10", extensions=None, new_name="bias_corrected_image" - ), - modulated_wm_image=dict(extensions=None), - native_csf_image=dict(extensions=None), - native_gm_image=dict(extensions=None), - native_wm_image=dict(extensions=None), - normalized_csf_image=dict(extensions=None), - normalized_gm_image=dict(extensions=None), - normalized_wm_image=dict(extensions=None), - transformation_mat=dict(extensions=None), + deprecated="0.10", + extensions=None, + new_name="bias_corrected_image", + ), + modulated_wm_image=dict( + extensions=None, + ), + native_csf_image=dict( + extensions=None, + ), + native_gm_image=dict( + extensions=None, + ), + native_wm_image=dict( + extensions=None, + ), + normalized_csf_image=dict( + extensions=None, + ), + normalized_gm_image=dict( + extensions=None, + ), + normalized_wm_image=dict( + extensions=None, + ), + transformation_mat=dict( + extensions=None, + ), ) outputs = Segment.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_SliceTiming.py b/nipype/interfaces/spm/tests/test_auto_SliceTiming.py index a9cba8fcdc..85ddf03c52 100644 --- a/nipype/interfaces/spm/tests/test_auto_SliceTiming.py +++ b/nipype/interfaces/spm/tests/test_auto_SliceTiming.py @@ -4,18 +4,45 @@ def test_SliceTiming_inputs(): input_map = dict( - in_files=dict(copyfile=False, field="scans", mandatory=True), + in_files=dict( + copyfile=False, + field="scans", + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - num_slices=dict(field="nslices", mandatory=True), - out_prefix=dict(field="prefix", usedefault=True), + mfile=dict( + usedefault=True, + ), + num_slices=dict( + field="nslices", + mandatory=True, + ), + out_prefix=dict( + field="prefix", + usedefault=True, + ), paths=dict(), - ref_slice=dict(field="refslice", mandatory=True), - slice_order=dict(field="so", mandatory=True), - time_acquisition=dict(field="ta", mandatory=True), - time_repetition=dict(field="tr", mandatory=True), + ref_slice=dict( + field="refslice", + mandatory=True, + ), + slice_order=dict( + field="so", + mandatory=True, + ), + time_acquisition=dict( + field="ta", + mandatory=True, + ), + time_repetition=dict( + field="tr", + mandatory=True, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = SliceTiming.input_spec() @@ -25,7 +52,9 @@ def test_SliceTiming_inputs(): def test_SliceTiming_outputs(): - output_map = dict(timecorrected_files=dict()) + output_map = dict( + timecorrected_files=dict(), + ) outputs = SliceTiming.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Smooth.py b/nipype/interfaces/spm/tests/test_auto_Smooth.py index 05512ade20..5ed7aa57c0 100644 --- a/nipype/interfaces/spm/tests/test_auto_Smooth.py +++ b/nipype/interfaces/spm/tests/test_auto_Smooth.py @@ -4,16 +4,34 @@ def test_Smooth_inputs(): input_map = dict( - data_type=dict(field="dtype"), - fwhm=dict(field="fwhm"), - implicit_masking=dict(field="im"), - in_files=dict(copyfile=False, field="data", mandatory=True), + data_type=dict( + field="dtype", + ), + fwhm=dict( + field="fwhm", + ), + implicit_masking=dict( + field="im", + ), + in_files=dict( + copyfile=False, + field="data", + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - out_prefix=dict(field="prefix", usedefault=True), + mfile=dict( + usedefault=True, + ), + out_prefix=dict( + field="prefix", + usedefault=True, + ), paths=dict(), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = Smooth.input_spec() @@ -23,7 +41,9 @@ def test_Smooth_inputs(): def test_Smooth_outputs(): - output_map = dict(smoothed_files=dict()) + output_map = dict( + smoothed_files=dict(), + ) outputs = Smooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_Threshold.py b/nipype/interfaces/spm/tests/test_auto_Threshold.py index 93bb95ac6f..470ae7f2d1 100644 --- a/nipype/interfaces/spm/tests/test_auto_Threshold.py +++ b/nipype/interfaces/spm/tests/test_auto_Threshold.py @@ -4,21 +4,50 @@ def test_Threshold_inputs(): input_map = dict( - contrast_index=dict(mandatory=True), - extent_fdr_p_threshold=dict(usedefault=True), - extent_threshold=dict(usedefault=True), - force_activation=dict(usedefault=True), - height_threshold=dict(usedefault=True), - height_threshold_type=dict(usedefault=True), + contrast_index=dict( + mandatory=True, + ), + extent_fdr_p_threshold=dict( + usedefault=True, + ), + extent_threshold=dict( + usedefault=True, + ), + force_activation=dict( + usedefault=True, + ), + height_threshold=dict( + usedefault=True, + ), + height_threshold_type=dict( + usedefault=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), + mfile=dict( + usedefault=True, + ), paths=dict(), - spm_mat_file=dict(copyfile=True, extensions=None, mandatory=True), - stat_image=dict(copyfile=False, extensions=None, mandatory=True), - use_fwe_correction=dict(usedefault=True), + spm_mat_file=dict( + copyfile=True, + extensions=None, + mandatory=True, + ), + stat_image=dict( + copyfile=False, + extensions=None, + mandatory=True, + ), + use_fwe_correction=dict( + usedefault=True, + ), use_mcr=dict(), - use_topo_fdr=dict(usedefault=True), - use_v8struct=dict(min_ver="8", usedefault=True), + use_topo_fdr=dict( + usedefault=True, + ), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = Threshold.input_spec() @@ -32,9 +61,13 @@ def test_Threshold_outputs(): activation_forced=dict(), cluster_forming_thr=dict(), n_clusters=dict(), - pre_topo_fdr_map=dict(extensions=None), + pre_topo_fdr_map=dict( + extensions=None, + ), pre_topo_n_clusters=dict(), - thresholded_map=dict(extensions=None), + thresholded_map=dict( + extensions=None, + ), ) outputs = Threshold.output_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py index 6ee8622b4d..89c5a42e57 100644 --- a/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py +++ b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py @@ -4,16 +4,35 @@ def test_ThresholdStatistics_inputs(): input_map = dict( - contrast_index=dict(mandatory=True), - extent_threshold=dict(usedefault=True), - height_threshold=dict(mandatory=True), + contrast_index=dict( + mandatory=True, + ), + extent_threshold=dict( + usedefault=True, + ), + height_threshold=dict( + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), + mfile=dict( + usedefault=True, + ), paths=dict(), - spm_mat_file=dict(copyfile=True, extensions=None, mandatory=True), - stat_image=dict(copyfile=False, extensions=None, mandatory=True), + spm_mat_file=dict( + copyfile=True, + extensions=None, + mandatory=True, + ), + stat_image=dict( + copyfile=False, + extensions=None, + mandatory=True, + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = ThresholdStatistics.input_spec() diff --git a/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py index 40760ba75d..fd03e6c867 100644 --- a/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py @@ -4,27 +4,50 @@ def test_TwoSampleTTestDesign_inputs(): input_map = dict( - covariates=dict(field="cov"), - dependent=dict(field="des.t2.dept"), - explicit_mask_file=dict(extensions=None, field="masking.em"), + covariates=dict( + field="cov", + ), + dependent=dict( + field="des.t2.dept", + ), + explicit_mask_file=dict( + extensions=None, + field="masking.em", + ), global_calc_mean=dict( - field="globalc.g_mean", xor=["global_calc_omit", "global_calc_values"] + field="globalc.g_mean", + xor=["global_calc_omit", "global_calc_values"], ), global_calc_omit=dict( - field="globalc.g_omit", xor=["global_calc_mean", "global_calc_values"] + field="globalc.g_omit", + xor=["global_calc_mean", "global_calc_values"], ), global_calc_values=dict( field="globalc.g_user.global_uval", xor=["global_calc_mean", "global_calc_omit"], ), - global_normalization=dict(field="globalm.glonorm"), - group1_files=dict(field="des.t2.scans1", mandatory=True), - group2_files=dict(field="des.t2.scans2", mandatory=True), + global_normalization=dict( + field="globalm.glonorm", + ), + group1_files=dict( + field="des.t2.scans1", + mandatory=True, + ), + group2_files=dict( + field="des.t2.scans2", + mandatory=True, + ), matlab_cmd=dict(), - mfile=dict(usedefault=True), - no_grand_mean_scaling=dict(field="globalm.gmsca.gmsca_no"), + mfile=dict( + usedefault=True, + ), + no_grand_mean_scaling=dict( + field="globalm.gmsca.gmsca_no", + ), paths=dict(), - spm_mat_dir=dict(field="dir"), + spm_mat_dir=dict( + field="dir", + ), threshold_mask_absolute=dict( field="masking.tm.tma.athresh", xor=["threshold_mask_none", "threshold_mask_relative"], @@ -37,10 +60,17 @@ def test_TwoSampleTTestDesign_inputs(): field="masking.tm.tmr.rthresh", xor=["threshold_mask_absolute", "threshold_mask_none"], ), - unequal_variance=dict(field="des.t2.variance"), - use_implicit_threshold=dict(field="masking.im"), + unequal_variance=dict( + field="des.t2.variance", + ), + use_implicit_threshold=dict( + field="masking.im", + ), use_mcr=dict(), - use_v8struct=dict(min_ver="8", usedefault=True), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), ) inputs = TwoSampleTTestDesign.input_spec() @@ -50,7 +80,11 @@ def test_TwoSampleTTestDesign_inputs(): def test_TwoSampleTTestDesign_outputs(): - output_map = dict(spm_mat_file=dict(extensions=None)) + output_map = dict( + spm_mat_file=dict( + extensions=None, + ), + ) outputs = TwoSampleTTestDesign.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/spm/tests/test_auto_VBMSegment.py b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py index 1c57968462..4bc4664c27 100644 --- a/nipype/interfaces/spm/tests/test_auto_VBMSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py @@ -5,64 +5,146 @@ def test_VBMSegment_inputs(): input_map = dict( bias_corrected_affine=dict( - field="estwrite.output.bias.affine", usedefault=True + field="estwrite.output.bias.affine", + usedefault=True, ), bias_corrected_native=dict( - field="estwrite.output.bias.native", usedefault=True + field="estwrite.output.bias.native", + usedefault=True, ), bias_corrected_normalized=dict( - field="estwrite.output.bias.warped", usedefault=True + field="estwrite.output.bias.warped", + usedefault=True, + ), + bias_fwhm=dict( + field="estwrite.opts.biasfwhm", + usedefault=True, + ), + bias_regularization=dict( + field="estwrite.opts.biasreg", + usedefault=True, + ), + cleanup_partitions=dict( + field="estwrite.extopts.cleanup", + usedefault=True, + ), + csf_dartel=dict( + field="estwrite.output.CSF.dartel", + usedefault=True, ), - bias_fwhm=dict(field="estwrite.opts.biasfwhm", usedefault=True), - bias_regularization=dict(field="estwrite.opts.biasreg", usedefault=True), - cleanup_partitions=dict(field="estwrite.extopts.cleanup", usedefault=True), - csf_dartel=dict(field="estwrite.output.CSF.dartel", usedefault=True), csf_modulated_normalized=dict( - field="estwrite.output.CSF.modulated", usedefault=True + field="estwrite.output.CSF.modulated", + usedefault=True, + ), + csf_native=dict( + field="estwrite.output.CSF.native", + usedefault=True, + ), + csf_normalized=dict( + field="estwrite.output.CSF.warped", + usedefault=True, ), - csf_native=dict(field="estwrite.output.CSF.native", usedefault=True), - csf_normalized=dict(field="estwrite.output.CSF.warped", usedefault=True), dartel_template=dict( extensions=[".hdr", ".img", ".img.gz", ".nii"], field="estwrite.extopts.dartelwarp.normhigh.darteltpm", ), - deformation_field=dict(field="estwrite.output.warps", usedefault=True), - display_results=dict(field="estwrite.extopts.print", usedefault=True), - gaussians_per_class=dict(usedefault=True), - gm_dartel=dict(field="estwrite.output.GM.dartel", usedefault=True), + deformation_field=dict( + field="estwrite.output.warps", + usedefault=True, + ), + display_results=dict( + field="estwrite.extopts.print", + usedefault=True, + ), + gaussians_per_class=dict( + usedefault=True, + ), + gm_dartel=dict( + field="estwrite.output.GM.dartel", + usedefault=True, + ), gm_modulated_normalized=dict( - field="estwrite.output.GM.modulated", usedefault=True + field="estwrite.output.GM.modulated", + usedefault=True, + ), + gm_native=dict( + field="estwrite.output.GM.native", + usedefault=True, + ), + gm_normalized=dict( + field="estwrite.output.GM.warped", + usedefault=True, + ), + in_files=dict( + copyfile=False, + field="estwrite.data", + mandatory=True, + ), + jacobian_determinant=dict( + field="estwrite.jacobian.warped", + usedefault=True, ), - gm_native=dict(field="estwrite.output.GM.native", usedefault=True), - gm_normalized=dict(field="estwrite.output.GM.warped", usedefault=True), - in_files=dict(copyfile=False, field="estwrite.data", mandatory=True), - jacobian_determinant=dict(field="estwrite.jacobian.warped", usedefault=True), matlab_cmd=dict(), - mfile=dict(usedefault=True), - mrf_weighting=dict(field="estwrite.extopts.mrf", usedefault=True), + mfile=dict( + usedefault=True, + ), + mrf_weighting=dict( + field="estwrite.extopts.mrf", + usedefault=True, + ), paths=dict(), - pve_label_dartel=dict(field="estwrite.output.label.dartel", usedefault=True), - pve_label_native=dict(field="estwrite.output.label.native", usedefault=True), + pve_label_dartel=dict( + field="estwrite.output.label.dartel", + usedefault=True, + ), + pve_label_native=dict( + field="estwrite.output.label.native", + usedefault=True, + ), pve_label_normalized=dict( - field="estwrite.output.label.warped", usedefault=True + field="estwrite.output.label.warped", + usedefault=True, + ), + sampling_distance=dict( + field="estwrite.opts.samp", + usedefault=True, + ), + spatial_normalization=dict( + usedefault=True, ), - sampling_distance=dict(field="estwrite.opts.samp", usedefault=True), - spatial_normalization=dict(usedefault=True), tissues=dict( - extensions=[".hdr", ".img", ".img.gz", ".nii"], field="estwrite.tpm" + extensions=[".hdr", ".img", ".img.gz", ".nii"], + field="estwrite.tpm", ), use_mcr=dict(), use_sanlm_denoising_filter=dict( - field="estwrite.extopts.sanlm", usedefault=True + field="estwrite.extopts.sanlm", + usedefault=True, + ), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + warping_regularization=dict( + field="estwrite.opts.warpreg", + usedefault=True, + ), + wm_dartel=dict( + field="estwrite.output.WM.dartel", + usedefault=True, ), - use_v8struct=dict(min_ver="8", usedefault=True), - warping_regularization=dict(field="estwrite.opts.warpreg", usedefault=True), - wm_dartel=dict(field="estwrite.output.WM.dartel", usedefault=True), wm_modulated_normalized=dict( - field="estwrite.output.WM.modulated", usedefault=True + field="estwrite.output.WM.modulated", + usedefault=True, + ), + wm_native=dict( + field="estwrite.output.WM.native", + usedefault=True, + ), + wm_normalized=dict( + field="estwrite.output.WM.warped", + usedefault=True, ), - wm_native=dict(field="estwrite.output.WM.native", usedefault=True), - wm_normalized=dict(field="estwrite.output.WM.warped", usedefault=True), ) inputs = VBMSegment.input_spec() diff --git a/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py index 74f9e7312c..dd7d45e42b 100644 --- a/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py +++ b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py @@ -4,11 +4,18 @@ def test_BIDSDataGrabber_inputs(): input_map = dict( - base_dir=dict(mandatory=True), + base_dir=dict( + mandatory=True, + ), extra_derivatives=dict(), - index_derivatives=dict(mandatory=True, usedefault=True), + index_derivatives=dict( + mandatory=True, + usedefault=True, + ), output_query=dict(), - raise_on_empty=dict(usedefault=True), + raise_on_empty=dict( + usedefault=True, + ), ) inputs = BIDSDataGrabber.input_spec() diff --git a/nipype/interfaces/tests/test_auto_Bru2.py b/nipype/interfaces/tests/test_auto_Bru2.py index 8537020701..7935d2fc97 100644 --- a/nipype/interfaces/tests/test_auto_Bru2.py +++ b/nipype/interfaces/tests/test_auto_Bru2.py @@ -4,14 +4,34 @@ def test_Bru2_inputs(): input_map = dict( - actual_size=dict(argstr="-a"), - append_protocol_name=dict(argstr="-p"), - args=dict(argstr="%s"), - compress=dict(argstr="-z"), - environ=dict(nohash=True, usedefault=True), - force_conversion=dict(argstr="-f"), - input_dir=dict(argstr="%s", mandatory=True, position=-1), - output_filename=dict(argstr="-o %s", genfile=True), + actual_size=dict( + argstr="-a", + ), + append_protocol_name=dict( + argstr="-p", + ), + args=dict( + argstr="%s", + ), + compress=dict( + argstr="-z", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + force_conversion=dict( + argstr="-f", + ), + input_dir=dict( + argstr="%s", + mandatory=True, + position=-1, + ), + output_filename=dict( + argstr="-o %s", + genfile=True, + ), ) inputs = Bru2.input_spec() @@ -21,7 +41,11 @@ def test_Bru2_inputs(): def test_Bru2_outputs(): - output_map = dict(nii_file=dict(extensions=None)) + output_map = dict( + nii_file=dict( + extensions=None, + ), + ) outputs = Bru2.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_C3d.py b/nipype/interfaces/tests/test_auto_C3d.py index 8d0a473d02..d4d26e8264 100644 --- a/nipype/interfaces/tests/test_auto_C3d.py +++ b/nipype/interfaces/tests/test_auto_C3d.py @@ -4,19 +4,55 @@ def test_C3d_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", mandatory=True, position=1), - interp=dict(argstr="-interpolation %s"), - is_4d=dict(usedefault=True), - multicomp_split=dict(argstr="-mcr", position=0, usedefault=True), - out_file=dict(argstr="-o %s", extensions=None, position=-1, xor=["out_files"]), - out_files=dict(argstr="-oo %s", position=-1, xor=["out_file"]), - pix_type=dict(argstr="-type %s"), - resample=dict(argstr="-resample %s"), - scale=dict(argstr="-scale %s"), - shift=dict(argstr="-shift %s"), - smooth=dict(argstr="-smooth %s"), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + mandatory=True, + position=1, + ), + interp=dict( + argstr="-interpolation %s", + ), + is_4d=dict( + usedefault=True, + ), + multicomp_split=dict( + argstr="-mcr", + position=0, + usedefault=True, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + position=-1, + xor=["out_files"], + ), + out_files=dict( + argstr="-oo %s", + position=-1, + xor=["out_file"], + ), + pix_type=dict( + argstr="-type %s", + ), + resample=dict( + argstr="-resample %s", + ), + scale=dict( + argstr="-scale %s", + ), + shift=dict( + argstr="-shift %s", + ), + smooth=dict( + argstr="-smooth %s", + ), ) inputs = C3d.input_spec() @@ -26,7 +62,9 @@ def test_C3d_inputs(): def test_C3d_outputs(): - output_map = dict(out_files=dict()) + output_map = dict( + out_files=dict(), + ) outputs = C3d.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_C3dAffineTool.py b/nipype/interfaces/tests/test_auto_C3dAffineTool.py index 851a354bd3..153f6090a7 100644 --- a/nipype/interfaces/tests/test_auto_C3dAffineTool.py +++ b/nipype/interfaces/tests/test_auto_C3dAffineTool.py @@ -4,13 +4,37 @@ def test_C3dAffineTool_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - fsl2ras=dict(argstr="-fsl2ras", position=4), - itk_transform=dict(argstr="-oitk %s", hash_files=False, position=5), - reference_file=dict(argstr="-ref %s", extensions=None, position=1), - source_file=dict(argstr="-src %s", extensions=None, position=2), - transform_file=dict(argstr="%s", extensions=None, position=3), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fsl2ras=dict( + argstr="-fsl2ras", + position=4, + ), + itk_transform=dict( + argstr="-oitk %s", + hash_files=False, + position=5, + ), + reference_file=dict( + argstr="-ref %s", + extensions=None, + position=1, + ), + source_file=dict( + argstr="-src %s", + extensions=None, + position=2, + ), + transform_file=dict( + argstr="%s", + extensions=None, + position=3, + ), ) inputs = C3dAffineTool.input_spec() @@ -20,7 +44,11 @@ def test_C3dAffineTool_inputs(): def test_C3dAffineTool_outputs(): - output_map = dict(itk_transform=dict(extensions=None)) + output_map = dict( + itk_transform=dict( + extensions=None, + ), + ) outputs = C3dAffineTool.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_CopyMeta.py b/nipype/interfaces/tests/test_auto_CopyMeta.py index aa7fe10301..f7a554226e 100644 --- a/nipype/interfaces/tests/test_auto_CopyMeta.py +++ b/nipype/interfaces/tests/test_auto_CopyMeta.py @@ -4,10 +4,16 @@ def test_CopyMeta_inputs(): input_map = dict( - dest_file=dict(extensions=None, mandatory=True), + dest_file=dict( + extensions=None, + mandatory=True, + ), exclude_classes=dict(), include_classes=dict(), - src_file=dict(extensions=None, mandatory=True), + src_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CopyMeta.input_spec() @@ -17,7 +23,11 @@ def test_CopyMeta_inputs(): def test_CopyMeta_outputs(): - output_map = dict(dest_file=dict(extensions=None)) + output_map = dict( + dest_file=dict( + extensions=None, + ), + ) outputs = CopyMeta.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_DataFinder.py b/nipype/interfaces/tests/test_auto_DataFinder.py index 18980655cf..e1f24b3517 100644 --- a/nipype/interfaces/tests/test_auto_DataFinder.py +++ b/nipype/interfaces/tests/test_auto_DataFinder.py @@ -5,11 +5,17 @@ def test_DataFinder_inputs(): input_map = dict( ignore_regexes=dict(), - match_regex=dict(usedefault=True), + match_regex=dict( + usedefault=True, + ), max_depth=dict(), min_depth=dict(), - root_paths=dict(mandatory=True), - unpack_single=dict(usedefault=True), + root_paths=dict( + mandatory=True, + ), + unpack_single=dict( + usedefault=True, + ), ) inputs = DataFinder.input_spec() diff --git a/nipype/interfaces/tests/test_auto_DataGrabber.py b/nipype/interfaces/tests/test_auto_DataGrabber.py index d2697f537d..a58e4dd434 100644 --- a/nipype/interfaces/tests/test_auto_DataGrabber.py +++ b/nipype/interfaces/tests/test_auto_DataGrabber.py @@ -5,10 +5,18 @@ def test_DataGrabber_inputs(): input_map = dict( base_directory=dict(), - drop_blank_outputs=dict(usedefault=True), - raise_on_empty=dict(usedefault=True), - sort_filelist=dict(mandatory=True), - template=dict(mandatory=True), + drop_blank_outputs=dict( + usedefault=True, + ), + raise_on_empty=dict( + usedefault=True, + ), + sort_filelist=dict( + mandatory=True, + ), + template=dict( + mandatory=True, + ), template_args=dict(), ) inputs = DataGrabber.input_spec() diff --git a/nipype/interfaces/tests/test_auto_DataSink.py b/nipype/interfaces/tests/test_auto_DataSink.py index 84bf211231..1ce4183b70 100644 --- a/nipype/interfaces/tests/test_auto_DataSink.py +++ b/nipype/interfaces/tests/test_auto_DataSink.py @@ -4,16 +4,22 @@ def test_DataSink_inputs(): input_map = dict( - _outputs=dict(usedefault=True), + _outputs=dict( + usedefault=True, + ), base_directory=dict(), bucket=dict(), container=dict(), creds_path=dict(), encrypt_bucket_keys=dict(), local_copy=dict(), - parameterization=dict(usedefault=True), + parameterization=dict( + usedefault=True, + ), regexp_substitutions=dict(), - remove_dest_dir=dict(usedefault=True), + remove_dest_dir=dict( + usedefault=True, + ), strip_dir=dict(), substitutions=dict(), ) @@ -25,7 +31,9 @@ def test_DataSink_inputs(): def test_DataSink_outputs(): - output_map = dict(out_file=dict()) + output_map = dict( + out_file=dict(), + ) outputs = DataSink.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_Dcm2nii.py b/nipype/interfaces/tests/test_auto_Dcm2nii.py index c45c4b71fb..948aafa083 100644 --- a/nipype/interfaces/tests/test_auto_Dcm2nii.py +++ b/nipype/interfaces/tests/test_auto_Dcm2nii.py @@ -4,27 +4,86 @@ def test_Dcm2nii_inputs(): input_map = dict( - anonymize=dict(argstr="-a", usedefault=True), - args=dict(argstr="%s"), - collapse_folders=dict(argstr="-c", usedefault=True), - config_file=dict(argstr="-b %s", extensions=None, genfile=True), - convert_all_pars=dict(argstr="-v", usedefault=True), - date_in_filename=dict(argstr="-d", usedefault=True), - environ=dict(nohash=True, usedefault=True), - events_in_filename=dict(argstr="-e", usedefault=True), - gzip_output=dict(argstr="-g", usedefault=True), - id_in_filename=dict(argstr="-i", usedefault=True), - nii_output=dict(argstr="-n", usedefault=True), - output_dir=dict(argstr="-o %s", genfile=True), - protocol_in_filename=dict(argstr="-p", usedefault=True), - reorient=dict(argstr="-r"), - reorient_and_crop=dict(argstr="-x", usedefault=True), - source_dir=dict(argstr="%s", mandatory=True, position=-1, xor=["source_names"]), - source_in_filename=dict(argstr="-f", usedefault=True), + anonymize=dict( + argstr="-a", + usedefault=True, + ), + args=dict( + argstr="%s", + ), + collapse_folders=dict( + argstr="-c", + usedefault=True, + ), + config_file=dict( + argstr="-b %s", + extensions=None, + genfile=True, + ), + convert_all_pars=dict( + argstr="-v", + usedefault=True, + ), + date_in_filename=dict( + argstr="-d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + events_in_filename=dict( + argstr="-e", + usedefault=True, + ), + gzip_output=dict( + argstr="-g", + usedefault=True, + ), + id_in_filename=dict( + argstr="-i", + usedefault=True, + ), + nii_output=dict( + argstr="-n", + usedefault=True, + ), + output_dir=dict( + argstr="-o %s", + genfile=True, + ), + protocol_in_filename=dict( + argstr="-p", + usedefault=True, + ), + reorient=dict( + argstr="-r", + ), + reorient_and_crop=dict( + argstr="-x", + usedefault=True, + ), + source_dir=dict( + argstr="%s", + mandatory=True, + position=-1, + xor=["source_names"], + ), + source_in_filename=dict( + argstr="-f", + usedefault=True, + ), source_names=dict( - argstr="%s", copyfile=False, mandatory=True, position=-1, xor=["source_dir"] + argstr="%s", + copyfile=False, + mandatory=True, + position=-1, + xor=["source_dir"], + ), + spm_analyze=dict( + argstr="-s", + xor=["nii_output"], ), - spm_analyze=dict(argstr="-s", xor=["nii_output"]), ) inputs = Dcm2nii.input_spec() diff --git a/nipype/interfaces/tests/test_auto_Dcm2niix.py b/nipype/interfaces/tests/test_auto_Dcm2niix.py index 918f839faf..dec95abcff 100644 --- a/nipype/interfaces/tests/test_auto_Dcm2niix.py +++ b/nipype/interfaces/tests/test_auto_Dcm2niix.py @@ -4,28 +4,83 @@ def test_Dcm2niix_inputs(): input_map = dict( - anon_bids=dict(argstr="-ba", requires=["bids_format"]), - args=dict(argstr="%s"), - bids_format=dict(argstr="-b", usedefault=True), - comment=dict(argstr="-c %s"), - compress=dict(argstr="-z %s", usedefault=True), - compression=dict(argstr="-%d"), - crop=dict(argstr="-x", usedefault=True), - environ=dict(nohash=True, usedefault=True), - has_private=dict(argstr="-t", usedefault=True), - ignore_deriv=dict(argstr="-i"), - merge_imgs=dict(argstr="-m", usedefault=True), - out_filename=dict(argstr="-f %s"), - output_dir=dict(argstr="-o %s", usedefault=True), - philips_float=dict(argstr="-p"), - series_numbers=dict(argstr="-n %s..."), - single_file=dict(argstr="-s", usedefault=True), - source_dir=dict(argstr="%s", mandatory=True, position=-1, xor=["source_names"]), + anon_bids=dict( + argstr="-ba", + requires=["bids_format"], + ), + args=dict( + argstr="%s", + ), + bids_format=dict( + argstr="-b", + usedefault=True, + ), + comment=dict( + argstr="-c %s", + ), + compress=dict( + argstr="-z %s", + usedefault=True, + ), + compression=dict( + argstr="-%d", + ), + crop=dict( + argstr="-x", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + has_private=dict( + argstr="-t", + usedefault=True, + ), + ignore_deriv=dict( + argstr="-i", + ), + merge_imgs=dict( + argstr="-m", + usedefault=True, + ), + out_filename=dict( + argstr="-f %s", + ), + output_dir=dict( + argstr="-o %s", + usedefault=True, + ), + philips_float=dict( + argstr="-p", + ), + series_numbers=dict( + argstr="-n %s...", + ), + single_file=dict( + argstr="-s", + usedefault=True, + ), + source_dir=dict( + argstr="%s", + mandatory=True, + position=-1, + xor=["source_names"], + ), source_names=dict( - argstr="%s", copyfile=False, mandatory=True, position=-1, xor=["source_dir"] + argstr="%s", + copyfile=False, + mandatory=True, + position=-1, + xor=["source_dir"], + ), + to_nrrd=dict( + argstr="-e", + ), + verbose=dict( + argstr="-v", + usedefault=True, ), - to_nrrd=dict(argstr="-e"), - verbose=dict(argstr="-v", usedefault=True), ) inputs = Dcm2niix.input_spec() @@ -35,7 +90,12 @@ def test_Dcm2niix_inputs(): def test_Dcm2niix_outputs(): - output_map = dict(bids=dict(), bvals=dict(), bvecs=dict(), converted_files=dict()) + output_map = dict( + bids=dict(), + bvals=dict(), + bvecs=dict(), + converted_files=dict(), + ) outputs = Dcm2niix.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_DcmStack.py b/nipype/interfaces/tests/test_auto_DcmStack.py index b4e9ac72e8..02bf268904 100644 --- a/nipype/interfaces/tests/test_auto_DcmStack.py +++ b/nipype/interfaces/tests/test_auto_DcmStack.py @@ -4,12 +4,18 @@ def test_DcmStack_inputs(): input_map = dict( - dicom_files=dict(mandatory=True), + dicom_files=dict( + mandatory=True, + ), embed_meta=dict(), exclude_regexes=dict(), - force_read=dict(usedefault=True), + force_read=dict( + usedefault=True, + ), include_regexes=dict(), - out_ext=dict(usedefault=True), + out_ext=dict( + usedefault=True, + ), out_format=dict(), out_path=dict(), ) @@ -21,7 +27,11 @@ def test_DcmStack_inputs(): def test_DcmStack_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = DcmStack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_ExportFile.py b/nipype/interfaces/tests/test_auto_ExportFile.py index 25b60efd23..adac52e161 100644 --- a/nipype/interfaces/tests/test_auto_ExportFile.py +++ b/nipype/interfaces/tests/test_auto_ExportFile.py @@ -4,10 +4,18 @@ def test_ExportFile_inputs(): input_map = dict( - check_extension=dict(usedefault=True), + check_extension=dict( + usedefault=True, + ), clobber=dict(), - in_file=dict(extensions=None, mandatory=True), - out_file=dict(extensions=None, mandatory=True), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = ExportFile.input_spec() @@ -17,7 +25,11 @@ def test_ExportFile_inputs(): def test_ExportFile_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = ExportFile.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_FreeSurferSource.py b/nipype/interfaces/tests/test_auto_FreeSurferSource.py index e645054392..875380f75f 100644 --- a/nipype/interfaces/tests/test_auto_FreeSurferSource.py +++ b/nipype/interfaces/tests/test_auto_FreeSurferSource.py @@ -4,9 +4,15 @@ def test_FreeSurferSource_inputs(): input_map = dict( - hemi=dict(usedefault=True), - subject_id=dict(mandatory=True), - subjects_dir=dict(mandatory=True), + hemi=dict( + usedefault=True, + ), + subject_id=dict( + mandatory=True, + ), + subjects_dir=dict( + mandatory=True, + ), ) inputs = FreeSurferSource.input_spec() @@ -17,43 +23,143 @@ def test_FreeSurferSource_inputs(): def test_FreeSurferSource_outputs(): output_map = dict( - BA_stats=dict(altkey="BA", loc="stats"), - T1=dict(extensions=None, loc="mri"), - annot=dict(altkey="*annot", loc="label"), - aparc_a2009s_stats=dict(altkey="aparc.a2009s", loc="stats"), - aparc_aseg=dict(altkey="aparc*aseg", loc="mri"), - aparc_stats=dict(altkey="aparc", loc="stats"), - area_pial=dict(altkey="area.pial", loc="surf"), - aseg=dict(extensions=None, loc="mri"), - aseg_stats=dict(altkey="aseg", loc="stats"), - avg_curv=dict(loc="surf"), - brain=dict(extensions=None, loc="mri"), - brainmask=dict(extensions=None, loc="mri"), - curv=dict(loc="surf"), - curv_pial=dict(altkey="curv.pial", loc="surf"), - curv_stats=dict(altkey="curv", loc="stats"), - entorhinal_exvivo_stats=dict(altkey="entorhinal_exvivo", loc="stats"), - filled=dict(extensions=None, loc="mri"), - graymid=dict(altkey=["graymid", "midthickness"], loc="surf"), - inflated=dict(loc="surf"), - jacobian_white=dict(loc="surf"), - label=dict(altkey="*label", loc="label"), - norm=dict(extensions=None, loc="mri"), - nu=dict(extensions=None, loc="mri"), - orig=dict(extensions=None, loc="mri"), - pial=dict(loc="surf"), - rawavg=dict(extensions=None, loc="mri"), - ribbon=dict(altkey="*ribbon", loc="mri"), - smoothwm=dict(loc="surf"), - sphere=dict(loc="surf"), - sphere_reg=dict(altkey="sphere.reg", loc="surf"), - sulc=dict(loc="surf"), - thickness=dict(loc="surf"), - volume=dict(loc="surf"), - white=dict(loc="surf"), - wm=dict(extensions=None, loc="mri"), - wmparc=dict(extensions=None, loc="mri"), - wmparc_stats=dict(altkey="wmparc", loc="stats"), + BA_stats=dict( + altkey="BA", + loc="stats", + ), + T1=dict( + extensions=None, + loc="mri", + ), + annot=dict( + altkey="*annot", + loc="label", + ), + aparc_a2009s_stats=dict( + altkey="aparc.a2009s", + loc="stats", + ), + aparc_aseg=dict( + altkey="aparc*aseg", + loc="mri", + ), + aparc_stats=dict( + altkey="aparc", + loc="stats", + ), + area_pial=dict( + altkey="area.pial", + loc="surf", + ), + aseg=dict( + extensions=None, + loc="mri", + ), + aseg_stats=dict( + altkey="aseg", + loc="stats", + ), + avg_curv=dict( + loc="surf", + ), + brain=dict( + extensions=None, + loc="mri", + ), + brainmask=dict( + extensions=None, + loc="mri", + ), + curv=dict( + loc="surf", + ), + curv_pial=dict( + altkey="curv.pial", + loc="surf", + ), + curv_stats=dict( + altkey="curv", + loc="stats", + ), + entorhinal_exvivo_stats=dict( + altkey="entorhinal_exvivo", + loc="stats", + ), + filled=dict( + extensions=None, + loc="mri", + ), + graymid=dict( + altkey=["graymid", "midthickness"], + loc="surf", + ), + inflated=dict( + loc="surf", + ), + jacobian_white=dict( + loc="surf", + ), + label=dict( + altkey="*label", + loc="label", + ), + norm=dict( + extensions=None, + loc="mri", + ), + nu=dict( + extensions=None, + loc="mri", + ), + orig=dict( + extensions=None, + loc="mri", + ), + pial=dict( + loc="surf", + ), + rawavg=dict( + extensions=None, + loc="mri", + ), + ribbon=dict( + altkey="*ribbon", + loc="mri", + ), + smoothwm=dict( + loc="surf", + ), + sphere=dict( + loc="surf", + ), + sphere_reg=dict( + altkey="sphere.reg", + loc="surf", + ), + sulc=dict( + loc="surf", + ), + thickness=dict( + loc="surf", + ), + volume=dict( + loc="surf", + ), + white=dict( + loc="surf", + ), + wm=dict( + extensions=None, + loc="mri", + ), + wmparc=dict( + extensions=None, + loc="mri", + ), + wmparc_stats=dict( + altkey="wmparc", + loc="stats", + ), ) outputs = FreeSurferSource.output_spec() diff --git a/nipype/interfaces/tests/test_auto_GroupAndStack.py b/nipype/interfaces/tests/test_auto_GroupAndStack.py index 4d96b6d507..7282e23a8b 100644 --- a/nipype/interfaces/tests/test_auto_GroupAndStack.py +++ b/nipype/interfaces/tests/test_auto_GroupAndStack.py @@ -4,12 +4,18 @@ def test_GroupAndStack_inputs(): input_map = dict( - dicom_files=dict(mandatory=True), + dicom_files=dict( + mandatory=True, + ), embed_meta=dict(), exclude_regexes=dict(), - force_read=dict(usedefault=True), + force_read=dict( + usedefault=True, + ), include_regexes=dict(), - out_ext=dict(usedefault=True), + out_ext=dict( + usedefault=True, + ), out_format=dict(), out_path=dict(), ) @@ -21,7 +27,9 @@ def test_GroupAndStack_inputs(): def test_GroupAndStack_outputs(): - output_map = dict(out_list=dict()) + output_map = dict( + out_list=dict(), + ) outputs = GroupAndStack.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_JSONFileGrabber.py b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py index 797fb74271..6e548f17c1 100644 --- a/nipype/interfaces/tests/test_auto_JSONFileGrabber.py +++ b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py @@ -3,7 +3,12 @@ def test_JSONFileGrabber_inputs(): - input_map = dict(defaults=dict(), in_file=dict(extensions=None)) + input_map = dict( + defaults=dict(), + in_file=dict( + extensions=None, + ), + ) inputs = JSONFileGrabber.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/tests/test_auto_JSONFileSink.py b/nipype/interfaces/tests/test_auto_JSONFileSink.py index 03e9eb4511..cdfa32195b 100644 --- a/nipype/interfaces/tests/test_auto_JSONFileSink.py +++ b/nipype/interfaces/tests/test_auto_JSONFileSink.py @@ -4,9 +4,15 @@ def test_JSONFileSink_inputs(): input_map = dict( - _outputs=dict(usedefault=True), - in_dict=dict(usedefault=True), - out_file=dict(extensions=None), + _outputs=dict( + usedefault=True, + ), + in_dict=dict( + usedefault=True, + ), + out_file=dict( + extensions=None, + ), ) inputs = JSONFileSink.input_spec() @@ -16,7 +22,11 @@ def test_JSONFileSink_inputs(): def test_JSONFileSink_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = JSONFileSink.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_LookupMeta.py b/nipype/interfaces/tests/test_auto_LookupMeta.py index d3508cf130..22f6678734 100644 --- a/nipype/interfaces/tests/test_auto_LookupMeta.py +++ b/nipype/interfaces/tests/test_auto_LookupMeta.py @@ -4,7 +4,13 @@ def test_LookupMeta_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True), meta_keys=dict(mandatory=True) + in_file=dict( + extensions=None, + mandatory=True, + ), + meta_keys=dict( + mandatory=True, + ), ) inputs = LookupMeta.input_spec() diff --git a/nipype/interfaces/tests/test_auto_MatlabCommand.py b/nipype/interfaces/tests/test_auto_MatlabCommand.py index ff9f51e77b..1dfd9c1dde 100644 --- a/nipype/interfaces/tests/test_auto_MatlabCommand.py +++ b/nipype/interfaces/tests/test_auto_MatlabCommand.py @@ -4,19 +4,54 @@ def test_MatlabCommand_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - logfile=dict(argstr="-logfile %s", extensions=None), - mfile=dict(usedefault=True), - nodesktop=dict(argstr="-nodesktop", nohash=True, usedefault=True), - nosplash=dict(argstr="-nosplash", nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + logfile=dict( + argstr="-logfile %s", + extensions=None, + ), + mfile=dict( + usedefault=True, + ), + nodesktop=dict( + argstr="-nodesktop", + nohash=True, + usedefault=True, + ), + nosplash=dict( + argstr="-nosplash", + nohash=True, + usedefault=True, + ), paths=dict(), - postscript=dict(usedefault=True), - prescript=dict(usedefault=True), - script=dict(argstr='-r "%s;exit"', mandatory=True, position=-1), - script_file=dict(extensions=None, usedefault=True), - single_comp_thread=dict(argstr="-singleCompThread", nohash=True), - uses_mcr=dict(nohash=True, xor=["nodesktop", "nosplash", "single_comp_thread"]), + postscript=dict( + usedefault=True, + ), + prescript=dict( + usedefault=True, + ), + script=dict( + argstr='-r "%s;exit"', + mandatory=True, + position=-1, + ), + script_file=dict( + extensions=None, + usedefault=True, + ), + single_comp_thread=dict( + argstr="-singleCompThread", + nohash=True, + ), + uses_mcr=dict( + nohash=True, + xor=["nodesktop", "nosplash", "single_comp_thread"], + ), ) inputs = MatlabCommand.input_spec() diff --git a/nipype/interfaces/tests/test_auto_MergeNifti.py b/nipype/interfaces/tests/test_auto_MergeNifti.py index 64c4a68dd0..f199fc5da6 100644 --- a/nipype/interfaces/tests/test_auto_MergeNifti.py +++ b/nipype/interfaces/tests/test_auto_MergeNifti.py @@ -4,9 +4,13 @@ def test_MergeNifti_inputs(): input_map = dict( - in_files=dict(mandatory=True), + in_files=dict( + mandatory=True, + ), merge_dim=dict(), - out_ext=dict(usedefault=True), + out_ext=dict( + usedefault=True, + ), out_format=dict(), out_path=dict(), sort_order=dict(), @@ -19,7 +23,11 @@ def test_MergeNifti_inputs(): def test_MergeNifti_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = MergeNifti.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_MeshFix.py b/nipype/interfaces/tests/test_auto_MeshFix.py index 9d4f8c8de0..1ae3b2b67c 100644 --- a/nipype/interfaces/tests/test_auto_MeshFix.py +++ b/nipype/interfaces/tests/test_auto_MeshFix.py @@ -4,18 +4,41 @@ def test_MeshFix_inputs(): input_map = dict( - args=dict(argstr="%s"), - cut_inner=dict(argstr="--cut-inner %d"), - cut_outer=dict(argstr="--cut-outer %d"), - decouple_inin=dict(argstr="--decouple-inin %d"), - decouple_outin=dict(argstr="--decouple-outin %d"), - decouple_outout=dict(argstr="--decouple-outout %d"), - dilation=dict(argstr="--dilate %d"), - dont_clean=dict(argstr="--no-clean"), - environ=dict(nohash=True, usedefault=True), - epsilon_angle=dict(argstr="-a %f"), + args=dict( + argstr="%s", + ), + cut_inner=dict( + argstr="--cut-inner %d", + ), + cut_outer=dict( + argstr="--cut-outer %d", + ), + decouple_inin=dict( + argstr="--decouple-inin %d", + ), + decouple_outin=dict( + argstr="--decouple-outin %d", + ), + decouple_outout=dict( + argstr="--decouple-outout %d", + ), + dilation=dict( + argstr="--dilate %d", + ), + dont_clean=dict( + argstr="--no-clean", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + epsilon_angle=dict( + argstr="-a %f", + ), finetuning_distance=dict( - argstr="%f", position=-2, requires=["finetuning_substeps"] + argstr="%f", + position=-2, + requires=["finetuning_substeps"], ), finetuning_inwards=dict( argstr="--fineTuneIn ", @@ -29,37 +52,75 @@ def test_MeshFix_inputs(): xor=["finetuning_inwards"], ), finetuning_substeps=dict( - argstr="%d", position=-1, requires=["finetuning_distance"] + argstr="%d", + position=-1, + requires=["finetuning_distance"], + ), + in_file1=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + in_file2=dict( + argstr="%s", + extensions=None, + position=2, + ), + join_closest_components=dict( + argstr="-jc", + xor=["join_closest_components"], ), - in_file1=dict(argstr="%s", extensions=None, mandatory=True, position=1), - in_file2=dict(argstr="%s", extensions=None, position=2), - join_closest_components=dict(argstr="-jc", xor=["join_closest_components"]), join_overlapping_largest_components=dict( - argstr="-j", xor=["join_closest_components"] - ), - laplacian_smoothing_steps=dict(argstr="--smooth %d"), - number_of_biggest_shells=dict(argstr="--shells %d"), - out_filename=dict(argstr="-o %s", extensions=None, genfile=True), - output_type=dict(usedefault=True), - quiet_mode=dict(argstr="-q"), - remove_handles=dict(argstr="--remove-handles"), + argstr="-j", + xor=["join_closest_components"], + ), + laplacian_smoothing_steps=dict( + argstr="--smooth %d", + ), + number_of_biggest_shells=dict( + argstr="--shells %d", + ), + out_filename=dict( + argstr="-o %s", + extensions=None, + genfile=True, + ), + output_type=dict( + usedefault=True, + ), + quiet_mode=dict( + argstr="-q", + ), + remove_handles=dict( + argstr="--remove-handles", + ), save_as_freesurfer_mesh=dict( - argstr="--fsmesh", xor=["save_as_vrml", "save_as_stl"] + argstr="--fsmesh", + xor=["save_as_vrml", "save_as_stl"], ), save_as_stl=dict( - argstr="--stl", xor=["save_as_vrml", "save_as_freesurfer_mesh"] + argstr="--stl", + xor=["save_as_vrml", "save_as_freesurfer_mesh"], ), save_as_vrml=dict( - argstr="--wrl", xor=["save_as_stl", "save_as_freesurfer_mesh"] + argstr="--wrl", + xor=["save_as_stl", "save_as_freesurfer_mesh"], + ), + set_intersections_to_one=dict( + argstr="--intersect", ), - set_intersections_to_one=dict(argstr="--intersect"), uniform_remeshing_steps=dict( - argstr="-u %d", requires=["uniform_remeshing_vertices"] + argstr="-u %d", + requires=["uniform_remeshing_vertices"], ), uniform_remeshing_vertices=dict( - argstr="--vertices %d", requires=["uniform_remeshing_steps"] + argstr="--vertices %d", + requires=["uniform_remeshing_steps"], + ), + x_shift=dict( + argstr="--smooth %d", ), - x_shift=dict(argstr="--smooth %d"), ) inputs = MeshFix.input_spec() @@ -69,7 +130,11 @@ def test_MeshFix_inputs(): def test_MeshFix_outputs(): - output_map = dict(mesh_file=dict(extensions=None)) + output_map = dict( + mesh_file=dict( + extensions=None, + ), + ) outputs = MeshFix.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_MySQLSink.py b/nipype/interfaces/tests/test_auto_MySQLSink.py index 56623680fe..702a21e9ce 100644 --- a/nipype/interfaces/tests/test_auto_MySQLSink.py +++ b/nipype/interfaces/tests/test_auto_MySQLSink.py @@ -4,8 +4,14 @@ def test_MySQLSink_inputs(): input_map = dict( - config=dict(extensions=None, mandatory=True, xor=["host"]), - database_name=dict(mandatory=True), + config=dict( + extensions=None, + mandatory=True, + xor=["host"], + ), + database_name=dict( + mandatory=True, + ), host=dict( mandatory=True, requires=["username", "password"], @@ -13,7 +19,9 @@ def test_MySQLSink_inputs(): xor=["config"], ), password=dict(), - table_name=dict(mandatory=True), + table_name=dict( + mandatory=True, + ), username=dict(), ) inputs = MySQLSink.input_spec() diff --git a/nipype/interfaces/tests/test_auto_PETPVC.py b/nipype/interfaces/tests/test_auto_PETPVC.py index 5bcddbdb0b..f7da7f45dd 100644 --- a/nipype/interfaces/tests/test_auto_PETPVC.py +++ b/nipype/interfaces/tests/test_auto_PETPVC.py @@ -4,20 +4,65 @@ def test_PETPVC_inputs(): input_map = dict( - alpha=dict(argstr="-a %.4f", usedefault=True), - args=dict(argstr="%s"), - debug=dict(argstr="-d", usedefault=True), - environ=dict(nohash=True, usedefault=True), - fwhm_x=dict(argstr="-x %.4f", mandatory=True), - fwhm_y=dict(argstr="-y %.4f", mandatory=True), - fwhm_z=dict(argstr="-z %.4f", mandatory=True), - in_file=dict(argstr="-i %s", extensions=None, mandatory=True), - mask_file=dict(argstr="-m %s", extensions=None, mandatory=True), - n_deconv=dict(argstr="-k %d", usedefault=True), - n_iter=dict(argstr="-n %d", usedefault=True), - out_file=dict(argstr="-o %s", extensions=None, genfile=True, hash_files=False), - pvc=dict(argstr="-p %s", mandatory=True), - stop_crit=dict(argstr="-s %.4f", usedefault=True), + alpha=dict( + argstr="-a %.4f", + usedefault=True, + ), + args=dict( + argstr="%s", + ), + debug=dict( + argstr="-d", + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm_x=dict( + argstr="-x %.4f", + mandatory=True, + ), + fwhm_y=dict( + argstr="-y %.4f", + mandatory=True, + ), + fwhm_z=dict( + argstr="-z %.4f", + mandatory=True, + ), + in_file=dict( + argstr="-i %s", + extensions=None, + mandatory=True, + ), + mask_file=dict( + argstr="-m %s", + extensions=None, + mandatory=True, + ), + n_deconv=dict( + argstr="-k %d", + usedefault=True, + ), + n_iter=dict( + argstr="-n %d", + usedefault=True, + ), + out_file=dict( + argstr="-o %s", + extensions=None, + genfile=True, + hash_files=False, + ), + pvc=dict( + argstr="-p %s", + mandatory=True, + ), + stop_crit=dict( + argstr="-s %.4f", + usedefault=True, + ), ) inputs = PETPVC.input_spec() @@ -27,7 +72,11 @@ def test_PETPVC_inputs(): def test_PETPVC_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = PETPVC.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_Quickshear.py b/nipype/interfaces/tests/test_auto_Quickshear.py index e763237eb6..92212335dc 100644 --- a/nipype/interfaces/tests/test_auto_Quickshear.py +++ b/nipype/interfaces/tests/test_auto_Quickshear.py @@ -4,11 +4,29 @@ def test_Quickshear_inputs(): input_map = dict( - args=dict(argstr="%s"), - buff=dict(argstr="%d", position=4), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=1), - mask_file=dict(argstr="%s", extensions=None, mandatory=True, position=2), + args=dict( + argstr="%s", + ), + buff=dict( + argstr="%d", + position=4, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + mask_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), out_file=dict( argstr="%s", extensions=None, @@ -26,7 +44,11 @@ def test_Quickshear_inputs(): def test_Quickshear_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Quickshear.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_RCommand.py b/nipype/interfaces/tests/test_auto_RCommand.py index 45a807fe84..adfcf36cf0 100644 --- a/nipype/interfaces/tests/test_auto_RCommand.py +++ b/nipype/interfaces/tests/test_auto_RCommand.py @@ -4,11 +4,25 @@ def test_RCommand_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - rfile=dict(usedefault=True), - script=dict(argstr='-e "%s"', mandatory=True, position=-1), - script_file=dict(extensions=None, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + rfile=dict( + usedefault=True, + ), + script=dict( + argstr='-e "%s"', + mandatory=True, + position=-1, + ), + script_file=dict( + extensions=None, + usedefault=True, + ), ) inputs = RCommand.input_spec() diff --git a/nipype/interfaces/tests/test_auto_Reorient.py b/nipype/interfaces/tests/test_auto_Reorient.py index 7cc171de37..eb7a0ce5ff 100644 --- a/nipype/interfaces/tests/test_auto_Reorient.py +++ b/nipype/interfaces/tests/test_auto_Reorient.py @@ -4,7 +4,13 @@ def test_Reorient_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True), orientation=dict(usedefault=True) + in_file=dict( + extensions=None, + mandatory=True, + ), + orientation=dict( + usedefault=True, + ), ) inputs = Reorient.input_spec() @@ -14,7 +20,14 @@ def test_Reorient_inputs(): def test_Reorient_outputs(): - output_map = dict(out_file=dict(extensions=None), transform=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + transform=dict( + extensions=None, + ), + ) outputs = Reorient.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_Rescale.py b/nipype/interfaces/tests/test_auto_Rescale.py index 4a9ccced93..5b14cfc8c0 100644 --- a/nipype/interfaces/tests/test_auto_Rescale.py +++ b/nipype/interfaces/tests/test_auto_Rescale.py @@ -4,10 +4,18 @@ def test_Rescale_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True), + in_file=dict( + extensions=None, + mandatory=True, + ), invert=dict(), - percentile=dict(usedefault=True), - ref_file=dict(extensions=None, mandatory=True), + percentile=dict( + usedefault=True, + ), + ref_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = Rescale.input_spec() @@ -17,7 +25,11 @@ def test_Rescale_inputs(): def test_Rescale_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Rescale.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_S3DataGrabber.py b/nipype/interfaces/tests/test_auto_S3DataGrabber.py index a73058f246..4b71fe49c3 100644 --- a/nipype/interfaces/tests/test_auto_S3DataGrabber.py +++ b/nipype/interfaces/tests/test_auto_S3DataGrabber.py @@ -4,14 +4,28 @@ def test_S3DataGrabber_inputs(): input_map = dict( - anon=dict(usedefault=True), - bucket=dict(mandatory=True), - bucket_path=dict(usedefault=True), + anon=dict( + usedefault=True, + ), + bucket=dict( + mandatory=True, + ), + bucket_path=dict( + usedefault=True, + ), local_directory=dict(), - raise_on_empty=dict(usedefault=True), - region=dict(usedefault=True), - sort_filelist=dict(mandatory=True), - template=dict(mandatory=True), + raise_on_empty=dict( + usedefault=True, + ), + region=dict( + usedefault=True, + ), + sort_filelist=dict( + mandatory=True, + ), + template=dict( + mandatory=True, + ), template_args=dict(), ) inputs = S3DataGrabber.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SQLiteSink.py b/nipype/interfaces/tests/test_auto_SQLiteSink.py index ea394509bc..fe33b65675 100644 --- a/nipype/interfaces/tests/test_auto_SQLiteSink.py +++ b/nipype/interfaces/tests/test_auto_SQLiteSink.py @@ -4,8 +4,13 @@ def test_SQLiteSink_inputs(): input_map = dict( - database_file=dict(extensions=None, mandatory=True), - table_name=dict(mandatory=True), + database_file=dict( + extensions=None, + mandatory=True, + ), + table_name=dict( + mandatory=True, + ), ) inputs = SQLiteSink.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SSHDataGrabber.py b/nipype/interfaces/tests/test_auto_SSHDataGrabber.py index ed8ead60e6..5a58cad276 100644 --- a/nipype/interfaces/tests/test_auto_SSHDataGrabber.py +++ b/nipype/interfaces/tests/test_auto_SSHDataGrabber.py @@ -4,17 +4,35 @@ def test_SSHDataGrabber_inputs(): input_map = dict( - base_directory=dict(mandatory=True), - download_files=dict(usedefault=True), - drop_blank_outputs=dict(usedefault=True), - hostname=dict(mandatory=True), + base_directory=dict( + mandatory=True, + ), + download_files=dict( + usedefault=True, + ), + drop_blank_outputs=dict( + usedefault=True, + ), + hostname=dict( + mandatory=True, + ), password=dict(), - raise_on_empty=dict(usedefault=True), - sort_filelist=dict(mandatory=True), - ssh_log_to_file=dict(usedefault=True), - template=dict(mandatory=True), + raise_on_empty=dict( + usedefault=True, + ), + sort_filelist=dict( + mandatory=True, + ), + ssh_log_to_file=dict( + usedefault=True, + ), + template=dict( + mandatory=True, + ), template_args=dict(), - template_expression=dict(usedefault=True), + template_expression=dict( + usedefault=True, + ), username=dict(), ) inputs = SSHDataGrabber.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SelectFiles.py b/nipype/interfaces/tests/test_auto_SelectFiles.py index ec04e42740..06f86c36f0 100644 --- a/nipype/interfaces/tests/test_auto_SelectFiles.py +++ b/nipype/interfaces/tests/test_auto_SelectFiles.py @@ -5,9 +5,15 @@ def test_SelectFiles_inputs(): input_map = dict( base_directory=dict(), - force_lists=dict(usedefault=True), - raise_on_empty=dict(usedefault=True), - sort_filelist=dict(usedefault=True), + force_lists=dict( + usedefault=True, + ), + raise_on_empty=dict( + usedefault=True, + ), + sort_filelist=dict( + usedefault=True, + ), ) inputs = SelectFiles.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SignalExtraction.py b/nipype/interfaces/tests/test_auto_SignalExtraction.py index 33b2512e75..272d94f54d 100644 --- a/nipype/interfaces/tests/test_auto_SignalExtraction.py +++ b/nipype/interfaces/tests/test_auto_SignalExtraction.py @@ -4,13 +4,29 @@ def test_SignalExtraction_inputs(): input_map = dict( - class_labels=dict(mandatory=True), - detrend=dict(usedefault=True), - in_file=dict(extensions=None, mandatory=True), - incl_shared_variance=dict(usedefault=True), - include_global=dict(usedefault=True), - label_files=dict(mandatory=True), - out_file=dict(extensions=None, usedefault=True), + class_labels=dict( + mandatory=True, + ), + detrend=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), + incl_shared_variance=dict( + usedefault=True, + ), + include_global=dict( + usedefault=True, + ), + label_files=dict( + mandatory=True, + ), + out_file=dict( + extensions=None, + usedefault=True, + ), ) inputs = SignalExtraction.input_spec() @@ -20,7 +36,11 @@ def test_SignalExtraction_inputs(): def test_SignalExtraction_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = SignalExtraction.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_SlicerCommandLine.py b/nipype/interfaces/tests/test_auto_SlicerCommandLine.py index 080db16b53..c13466483b 100644 --- a/nipype/interfaces/tests/test_auto_SlicerCommandLine.py +++ b/nipype/interfaces/tests/test_auto_SlicerCommandLine.py @@ -4,8 +4,13 @@ def test_SlicerCommandLine_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), module=dict(), ) inputs = SlicerCommandLine.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SplitNifti.py b/nipype/interfaces/tests/test_auto_SplitNifti.py index 3babd07241..14ccc6bdb0 100644 --- a/nipype/interfaces/tests/test_auto_SplitNifti.py +++ b/nipype/interfaces/tests/test_auto_SplitNifti.py @@ -4,8 +4,13 @@ def test_SplitNifti_inputs(): input_map = dict( - in_file=dict(extensions=None, mandatory=True), - out_ext=dict(usedefault=True), + in_file=dict( + extensions=None, + mandatory=True, + ), + out_ext=dict( + usedefault=True, + ), out_format=dict(), out_path=dict(), split_dim=dict(), @@ -18,7 +23,9 @@ def test_SplitNifti_inputs(): def test_SplitNifti_outputs(): - output_map = dict(out_list=dict()) + output_map = dict( + out_list=dict(), + ) outputs = SplitNifti.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/tests/test_auto_XNATSink.py b/nipype/interfaces/tests/test_auto_XNATSink.py index 92234e557b..ec6f920f57 100644 --- a/nipype/interfaces/tests/test_auto_XNATSink.py +++ b/nipype/interfaces/tests/test_auto_XNATSink.py @@ -4,17 +4,39 @@ def test_XNATSink_inputs(): input_map = dict( - _outputs=dict(usedefault=True), - assessor_id=dict(xor=["reconstruction_id"]), + _outputs=dict( + usedefault=True, + ), + assessor_id=dict( + xor=["reconstruction_id"], + ), cache_dir=dict(), - config=dict(extensions=None, mandatory=True, xor=["server"]), - experiment_id=dict(mandatory=True), - project_id=dict(mandatory=True), + config=dict( + extensions=None, + mandatory=True, + xor=["server"], + ), + experiment_id=dict( + mandatory=True, + ), + project_id=dict( + mandatory=True, + ), pwd=dict(), - reconstruction_id=dict(xor=["assessor_id"]), - server=dict(mandatory=True, requires=["user", "pwd"], xor=["config"]), - share=dict(usedefault=True), - subject_id=dict(mandatory=True), + reconstruction_id=dict( + xor=["assessor_id"], + ), + server=dict( + mandatory=True, + requires=["user", "pwd"], + xor=["config"], + ), + share=dict( + usedefault=True, + ), + subject_id=dict( + mandatory=True, + ), user=dict(), ) inputs = XNATSink.input_spec() diff --git a/nipype/interfaces/tests/test_auto_XNATSource.py b/nipype/interfaces/tests/test_auto_XNATSource.py index 7114d47840..f115657c9f 100644 --- a/nipype/interfaces/tests/test_auto_XNATSource.py +++ b/nipype/interfaces/tests/test_auto_XNATSource.py @@ -5,11 +5,23 @@ def test_XNATSource_inputs(): input_map = dict( cache_dir=dict(), - config=dict(extensions=None, mandatory=True, xor=["server"]), + config=dict( + extensions=None, + mandatory=True, + xor=["server"], + ), pwd=dict(), - query_template=dict(mandatory=True), - query_template_args=dict(usedefault=True), - server=dict(mandatory=True, requires=["user", "pwd"], xor=["config"]), + query_template=dict( + mandatory=True, + ), + query_template_args=dict( + usedefault=True, + ), + server=dict( + mandatory=True, + requires=["user", "pwd"], + xor=["config"], + ), user=dict(), ) inputs = XNATSource.input_spec() diff --git a/nipype/interfaces/utility/tests/test_auto_AssertEqual.py b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py index 43a63eb081..c550a5efba 100644 --- a/nipype/interfaces/utility/tests/test_auto_AssertEqual.py +++ b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py @@ -4,8 +4,14 @@ def test_AssertEqual_inputs(): input_map = dict( - volume1=dict(extensions=None, mandatory=True), - volume2=dict(extensions=None, mandatory=True), + volume1=dict( + extensions=None, + mandatory=True, + ), + volume2=dict( + extensions=None, + mandatory=True, + ), ) inputs = AssertEqual.input_spec() diff --git a/nipype/interfaces/utility/tests/test_auto_CSVReader.py b/nipype/interfaces/utility/tests/test_auto_CSVReader.py index 4aec1fbb9e..be24c59eb4 100644 --- a/nipype/interfaces/utility/tests/test_auto_CSVReader.py +++ b/nipype/interfaces/utility/tests/test_auto_CSVReader.py @@ -4,7 +4,13 @@ def test_CSVReader_inputs(): input_map = dict( - header=dict(usedefault=True), in_file=dict(extensions=None, mandatory=True) + header=dict( + usedefault=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), ) inputs = CSVReader.input_spec() diff --git a/nipype/interfaces/utility/tests/test_auto_Function.py b/nipype/interfaces/utility/tests/test_auto_Function.py index 790ffa1ad0..fdbccc3098 100644 --- a/nipype/interfaces/utility/tests/test_auto_Function.py +++ b/nipype/interfaces/utility/tests/test_auto_Function.py @@ -3,7 +3,11 @@ def test_Function_inputs(): - input_map = dict(function_str=dict(mandatory=True)) + input_map = dict( + function_str=dict( + mandatory=True, + ), + ) inputs = Function.input_spec() for key, metadata in list(input_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Merge.py b/nipype/interfaces/utility/tests/test_auto_Merge.py index 0e3af5c80e..f9304a9897 100644 --- a/nipype/interfaces/utility/tests/test_auto_Merge.py +++ b/nipype/interfaces/utility/tests/test_auto_Merge.py @@ -4,9 +4,15 @@ def test_Merge_inputs(): input_map = dict( - axis=dict(usedefault=True), - no_flatten=dict(usedefault=True), - ravel_inputs=dict(usedefault=True), + axis=dict( + usedefault=True, + ), + no_flatten=dict( + usedefault=True, + ), + ravel_inputs=dict( + usedefault=True, + ), ) inputs = Merge.input_spec() @@ -16,7 +22,9 @@ def test_Merge_inputs(): def test_Merge_outputs(): - output_map = dict(out=dict()) + output_map = dict( + out=dict(), + ) outputs = Merge.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Rename.py b/nipype/interfaces/utility/tests/test_auto_Rename.py index 6f426cca9c..5b9a183796 100644 --- a/nipype/interfaces/utility/tests/test_auto_Rename.py +++ b/nipype/interfaces/utility/tests/test_auto_Rename.py @@ -4,11 +4,18 @@ def test_Rename_inputs(): input_map = dict( - format_string=dict(mandatory=True), - in_file=dict(extensions=None, mandatory=True), + format_string=dict( + mandatory=True, + ), + in_file=dict( + extensions=None, + mandatory=True, + ), keep_ext=dict(), parse_string=dict(), - use_fullpath=dict(usedefault=True), + use_fullpath=dict( + usedefault=True, + ), ) inputs = Rename.input_spec() @@ -18,7 +25,11 @@ def test_Rename_inputs(): def test_Rename_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Rename.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Select.py b/nipype/interfaces/utility/tests/test_auto_Select.py index 7595b92f9a..21dcb91670 100644 --- a/nipype/interfaces/utility/tests/test_auto_Select.py +++ b/nipype/interfaces/utility/tests/test_auto_Select.py @@ -3,7 +3,14 @@ def test_Select_inputs(): - input_map = dict(index=dict(mandatory=True), inlist=dict(mandatory=True)) + input_map = dict( + index=dict( + mandatory=True, + ), + inlist=dict( + mandatory=True, + ), + ) inputs = Select.input_spec() for key, metadata in list(input_map.items()): @@ -12,7 +19,9 @@ def test_Select_inputs(): def test_Select_outputs(): - output_map = dict(out=dict()) + output_map = dict( + out=dict(), + ) outputs = Select.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/utility/tests/test_auto_Split.py b/nipype/interfaces/utility/tests/test_auto_Split.py index 5ad771031f..60f46fb73c 100644 --- a/nipype/interfaces/utility/tests/test_auto_Split.py +++ b/nipype/interfaces/utility/tests/test_auto_Split.py @@ -4,9 +4,15 @@ def test_Split_inputs(): input_map = dict( - inlist=dict(mandatory=True), - splits=dict(mandatory=True), - squeeze=dict(usedefault=True), + inlist=dict( + mandatory=True, + ), + splits=dict( + mandatory=True, + ), + squeeze=dict( + usedefault=True, + ), ) inputs = Split.input_spec() diff --git a/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py index e0d3f0b038..f06a118fc2 100644 --- a/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py +++ b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py @@ -4,10 +4,24 @@ def test_Vnifti2Image_inputs(): input_map = dict( - args=dict(argstr="%s"), - attributes=dict(argstr="-attr %s", extensions=None, position=2), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=1), + args=dict( + argstr="%s", + ), + attributes=dict( + argstr="-attr %s", + extensions=None, + position=2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + position=1, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -26,7 +40,11 @@ def test_Vnifti2Image_inputs(): def test_Vnifti2Image_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = Vnifti2Image.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/vista/tests/test_auto_VtoMat.py b/nipype/interfaces/vista/tests/test_auto_VtoMat.py index d35ff8661b..8d2b53ab0c 100644 --- a/nipype/interfaces/vista/tests/test_auto_VtoMat.py +++ b/nipype/interfaces/vista/tests/test_auto_VtoMat.py @@ -4,9 +4,19 @@ def test_VtoMat_inputs(): input_map = dict( - args=dict(argstr="%s"), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="-in %s", extensions=None, mandatory=True, position=1), + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="-in %s", + extensions=None, + mandatory=True, + position=1, + ), out_file=dict( argstr="-out %s", extensions=None, @@ -25,7 +35,11 @@ def test_VtoMat_inputs(): def test_VtoMat_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = VtoMat.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py b/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py index 47307b656b..03beac887a 100644 --- a/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py +++ b/nipype/interfaces/workbench/tests/test_auto_CiftiSmooth.py @@ -4,7 +4,9 @@ def test_CiftiSmooth_inputs(): input_map = dict( - args=dict(argstr="%s"), + args=dict( + argstr="%s", + ), cerebellum_corrected_areas=dict( argstr="cerebellum-corrected-areas %s", extensions=None, @@ -12,21 +14,53 @@ def test_CiftiSmooth_inputs(): requires=["cerebellum_surf"], ), cerebellum_surf=dict( - argstr="-cerebellum-surface %s", extensions=None, position=9 - ), - cifti_roi=dict(argstr="-cifti-roi %s", extensions=None, position=11), - direction=dict(argstr="%s", mandatory=True, position=3), - environ=dict(nohash=True, usedefault=True), - fix_zeros_surf=dict(argstr="-fix-zeros-surface", position=13), - fix_zeros_vol=dict(argstr="-fix-zeros-volume", position=12), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), + argstr="-cerebellum-surface %s", + extensions=None, + position=9, + ), + cifti_roi=dict( + argstr="-cifti-roi %s", + extensions=None, + position=11, + ), + direction=dict( + argstr="%s", + mandatory=True, + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fix_zeros_surf=dict( + argstr="-fix-zeros-surface", + position=13, + ), + fix_zeros_vol=dict( + argstr="-fix-zeros-volume", + position=12, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), left_corrected_areas=dict( - argstr="-left-corrected-areas %s", extensions=None, position=6 + argstr="-left-corrected-areas %s", + extensions=None, + position=6, ), left_surf=dict( - argstr="-left-surface %s", extensions=None, mandatory=True, position=5 + argstr="-left-surface %s", + extensions=None, + mandatory=True, + position=5, + ), + merged_volume=dict( + argstr="-merged-volume", + position=14, ), - merged_volume=dict(argstr="-merged-volume", position=14), out_file=dict( argstr="%s", extensions=None, @@ -36,13 +70,26 @@ def test_CiftiSmooth_inputs(): position=4, ), right_corrected_areas=dict( - argstr="-right-corrected-areas %s", extensions=None, position=8 + argstr="-right-corrected-areas %s", + extensions=None, + position=8, ), right_surf=dict( - argstr="-right-surface %s", extensions=None, mandatory=True, position=7 + argstr="-right-surface %s", + extensions=None, + mandatory=True, + position=7, + ), + sigma_surf=dict( + argstr="%s", + mandatory=True, + position=1, + ), + sigma_vol=dict( + argstr="%s", + mandatory=True, + position=2, ), - sigma_surf=dict(argstr="%s", mandatory=True, position=1), - sigma_vol=dict(argstr="%s", mandatory=True, position=2), ) inputs = CiftiSmooth.input_spec() @@ -52,7 +99,11 @@ def test_CiftiSmooth_inputs(): def test_CiftiSmooth_outputs(): - output_map = dict(out_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + ) outputs = CiftiSmooth.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/workbench/tests/test_auto_MetricResample.py b/nipype/interfaces/workbench/tests/test_auto_MetricResample.py index d717054d16..e7a861963b 100644 --- a/nipype/interfaces/workbench/tests/test_auto_MetricResample.py +++ b/nipype/interfaces/workbench/tests/test_auto_MetricResample.py @@ -4,17 +4,60 @@ def test_MetricResample_inputs(): input_map = dict( - area_metrics=dict(argstr="-area-metrics", position=5, xor=["area_surfs"]), - area_surfs=dict(argstr="-area-surfs", position=5, xor=["area_metrics"]), - args=dict(argstr="%s"), - current_area=dict(argstr="%s", extensions=None, position=6), - current_sphere=dict(argstr="%s", extensions=None, mandatory=True, position=1), - environ=dict(nohash=True, usedefault=True), - in_file=dict(argstr="%s", extensions=None, mandatory=True, position=0), - largest=dict(argstr="-largest", position=10), - method=dict(argstr="%s", mandatory=True, position=3), - new_area=dict(argstr="%s", extensions=None, position=7), - new_sphere=dict(argstr="%s", extensions=None, mandatory=True, position=2), + area_metrics=dict( + argstr="-area-metrics", + position=5, + xor=["area_surfs"], + ), + area_surfs=dict( + argstr="-area-surfs", + position=5, + xor=["area_metrics"], + ), + args=dict( + argstr="%s", + ), + current_area=dict( + argstr="%s", + extensions=None, + position=6, + ), + current_sphere=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=0, + ), + largest=dict( + argstr="-largest", + position=10, + ), + method=dict( + argstr="%s", + mandatory=True, + position=3, + ), + new_area=dict( + argstr="%s", + extensions=None, + position=7, + ), + new_sphere=dict( + argstr="%s", + extensions=None, + mandatory=True, + position=2, + ), out_file=dict( argstr="%s", extensions=None, @@ -23,8 +66,15 @@ def test_MetricResample_inputs(): name_template="%s.out", position=4, ), - roi_metric=dict(argstr="-current-roi %s", extensions=None, position=8), - valid_roi_out=dict(argstr="-valid-roi-out", position=9), + roi_metric=dict( + argstr="-current-roi %s", + extensions=None, + position=8, + ), + valid_roi_out=dict( + argstr="-valid-roi-out", + position=9, + ), ) inputs = MetricResample.input_spec() @@ -34,7 +84,14 @@ def test_MetricResample_inputs(): def test_MetricResample_outputs(): - output_map = dict(out_file=dict(extensions=None), roi_file=dict(extensions=None)) + output_map = dict( + out_file=dict( + extensions=None, + ), + roi_file=dict( + extensions=None, + ), + ) outputs = MetricResample.output_spec() for key, metadata in list(output_map.items()): diff --git a/nipype/interfaces/workbench/tests/test_auto_WBCommand.py b/nipype/interfaces/workbench/tests/test_auto_WBCommand.py index dd0e33ed98..d2cc9a6b96 100644 --- a/nipype/interfaces/workbench/tests/test_auto_WBCommand.py +++ b/nipype/interfaces/workbench/tests/test_auto_WBCommand.py @@ -3,7 +3,15 @@ def test_WBCommand_inputs(): - input_map = dict(args=dict(argstr="%s"), environ=dict(nohash=True, usedefault=True)) + input_map = dict( + args=dict( + argstr="%s", + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) inputs = WBCommand.input_spec() for key, metadata in list(input_map.items()): From 65863b8db29d39f459253408ac606f6192d9526a Mon Sep 17 00:00:00 2001 From: Fabio Bernardoni Date: Wed, 20 Oct 2021 12:17:11 +0200 Subject: [PATCH 1116/1665] added my name to zenodo --- .zenodo.json | 5 +++++ nipype/sphinxext/plot_workflow.py | 1 + 2 files changed, 6 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index 0b2d0f65fb..e152cb308f 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -313,6 +313,11 @@ "name": "Geisler, Daniel", "orcid": "0000-0003-2076-5329" }, + { + "affiliation": "Division of Psychological and Social Medicine and Developmental Neuroscience, Faculty of Medicine, Technische Universit\u00e4t Dresden, Dresden, Germany", + "name": "Bernardoni, Fabio", + "orcid": "0000-0002-5112-405X" + }, { "name": "Salvatore, John" }, diff --git a/nipype/sphinxext/plot_workflow.py b/nipype/sphinxext/plot_workflow.py index 95b87da157..a83b69f55c 100644 --- a/nipype/sphinxext/plot_workflow.py +++ b/nipype/sphinxext/plot_workflow.py @@ -106,6 +106,7 @@ Provide a customized template for preparing restructured text. """ + import sys import os import shutil From 7929843c71d0fdb1b06ae8f6f7874eecead5c6d4 Mon Sep 17 00:00:00 2001 From: Fabio Bernardoni Date: Wed, 20 Oct 2021 12:32:21 +0200 Subject: [PATCH 1117/1665] test_auto_FieldMap.py automatically added --- .../spm/tests/test_auto_FieldMap.py | 46 +++++++++++++++++-- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/spm/tests/test_auto_FieldMap.py b/nipype/interfaces/spm/tests/test_auto_FieldMap.py index a91eec64d8..1c223c715e 100644 --- a/nipype/interfaces/spm/tests/test_auto_FieldMap.py +++ b/nipype/interfaces/spm/tests/test_auto_FieldMap.py @@ -13,20 +13,31 @@ def test_FieldMap_inputs(): field="subj.defaults.defaultsval.blipdir", mandatory=True, ), + distortion_direction=dict( + field="roptions.pedir", + usedefault=True, + ), echo_times=dict( field="subj.defaults.defaultsval.et", - mandatory=True, ), epi_file=dict( copyfile=False, extensions=None, field="subj.session.epi", - mandatory=True, ), epifm=dict( field="subj.defaults.defaultsval.epifm", usedefault=True, ), + in_files=dict( + copyfile=True, + field="data.scans", + mandatory=True, + ), + interpolation=dict( + field="roptions.rinterp", + usedefault=True, + ), jacobian_modulation=dict( field="subj.defaults.defaultsval.ajm", usedefault=True, @@ -38,7 +49,6 @@ def test_FieldMap_inputs(): copyfile=False, extensions=None, field="subj.data.presubphasemag.magnitude", - mandatory=True, ), mask_fwhm=dict( field="subj.defaults.defaultsval.mflags.fwhm", @@ -72,6 +82,10 @@ def test_FieldMap_inputs(): field="subj.defaults.defaultsval.mflags.nerode", usedefault=True, ), + out_prefix=dict( + field="roptions.prefix", + usedefault=True, + ), pad=dict( field="subj.defaults.defaultsval.uflags.pad", usedefault=True, @@ -81,12 +95,14 @@ def test_FieldMap_inputs(): copyfile=False, extensions=None, field="subj.data.presubphasemag.phase", - mandatory=True, ), reg=dict( field="subj.defaults.defaultsval.mflags.reg", usedefault=True, ), + reslice_interp=dict( + field="roptions.rinterp", + ), sessname=dict( field="subj.sessname", usedefault=True, @@ -113,6 +129,24 @@ def test_FieldMap_inputs(): min_ver="8", usedefault=True, ), + vdmfile=dict( + copyfile=True, + extensions=None, + field="data.vdmfile", + mandatory=True, + ), + write_mask=dict( + field="roptions.mask", + ), + write_which=dict( + field="roptions.which", + maxlen=2, + minlen=2, + usedefault=True, + ), + write_wrap=dict( + field="roptions.wrap", + ), writeunwarped=dict( field="subj.writeunwarped", usedefault=True, @@ -131,6 +165,10 @@ def test_FieldMap_inputs(): def test_FieldMap_outputs(): output_map = dict( + mean_image=dict( + extensions=None, + ), + out_files=dict(), vdm=dict( extensions=None, ), From 550f1fa70f5480c0b5e69b35b3497e1525a3c706 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 20 Oct 2021 10:12:11 -0400 Subject: [PATCH 1118/1665] DOC: Update release date --- doc/changelog/1.X.X-changelog.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/changelog/1.X.X-changelog.rst b/doc/changelog/1.X.X-changelog.rst index 10011c6830..1bf81b42ab 100644 --- a/doc/changelog/1.X.X-changelog.rst +++ b/doc/changelog/1.X.X-changelog.rst @@ -1,4 +1,4 @@ -1.7.0 (October 18, 2021) +1.7.0 (October 20, 2021) ======================== (`Full changelog `__) From 20331fd74c9a541fd19a387de1b02178cd4e3ba6 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Wed, 20 Oct 2021 11:33:21 -0400 Subject: [PATCH 1119/1665] MNT: 1.7.1-dev --- doc/interfaces.rst | 2 +- nipype/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/interfaces.rst b/doc/interfaces.rst index a7523a9cc9..2ee0bf5762 100644 --- a/doc/interfaces.rst +++ b/doc/interfaces.rst @@ -8,7 +8,7 @@ Interfaces and Workflows :Release: |version| :Date: |today| -Previous versions: `1.6.1 `_ `1.6.0 `_ +Previous versions: `1.7.0 `_ `1.6.1 `_ Workflows --------- diff --git a/nipype/info.py b/nipype/info.py index c92f48df5d..f6d52f576d 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -5,7 +5,7 @@ # nipype version information # Remove -dev for release -__version__ = "1.7.0" +__version__ = "1.7.1-dev" def get_nipype_gitversion(): From 83fda45981ffe50c0115f1e49ac611b0185232ef Mon Sep 17 00:00:00 2001 From: Fabio Bernardoni Date: Wed, 20 Oct 2021 18:30:16 +0200 Subject: [PATCH 1120/1665] separate apply and calculate VDM in two classes --- nipype/interfaces/spm/preprocess.py | 202 +++++++++++++--------------- 1 file changed, 94 insertions(+), 108 deletions(-) diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index 44831c5604..6073f80e13 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -39,21 +39,16 @@ class FieldMapInputSpec(SPMCommandInputSpec): - jobtype = traits.Enum( - "calculatevdm", - "applyvdm", - usedefault=True, - desc="one of: calculatevdm, applyvdm", - ) + phase_file = File( - # mandatory=True, + mandatory=True, exists=True, copyfile=False, field="subj.data.presubphasemag.phase", desc="presubstracted phase file", ) magnitude_file = File( - # mandatory=True, + mandatory=True, exists=True, copyfile=False, field="subj.data.presubphasemag.magnitude", @@ -62,7 +57,7 @@ class FieldMapInputSpec(SPMCommandInputSpec): echo_times = traits.Tuple( traits.Float, traits.Float, - # mandatory=True, + mandatory=True, field="subj.defaults.defaultsval.et", desc="short and long echo times", ) @@ -169,7 +164,7 @@ class FieldMapInputSpec(SPMCommandInputSpec): epi_file = File( copyfile=False, exists=True, - # mandatory=True, + mandatory=True, field="subj.session.epi", desc="EPI to unwarp", ) @@ -195,6 +190,65 @@ class FieldMapInputSpec(SPMCommandInputSpec): desc="match anatomical image to EPI", ) + +class FieldMapOutputSpec(TraitedSpec): + vdm = File(exists=True, desc="voxel difference map") + + +class FieldMap(SPMCommand): + """Use the fieldmap toolbox from spm to calculate the voxel displacement map (VDM). + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=173 + + .. important:: + + This interface does not deal with real/imag magnitude images nor + with the two phase files case. + + Examples + -------- + >>> from nipype.interfaces.spm import FieldMap + >>> fm = FieldMap() + >>> fm.inputs.phase_file = 'phase.nii' + >>> fm.inputs.magnitude_file = 'magnitude.nii' + >>> fm.inputs.echo_times = (5.19, 7.65) + >>> fm.inputs.blip_direction = 1 + >>> fm.inputs.total_readout_time = 15.6 + >>> fm.inputs.epi_file = 'epi.nii' + >>> fm.run() # doctest: +SKIP + + """ + + input_spec = FieldMapInputSpec + output_spec = FieldMapOutputSpec + _jobtype = "tools" + _jobname = "fieldmap" + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm""" + + if opt in ["phase_file", "magnitude_file", "anat_file", "epi_file"]: + + return scans_for_fname(ensure_list(val)) + + return super(FieldMap, self)._format_arg(opt, spec, val) + + def _parse_inputs(self): + """validate spm fieldmap options if set to None ignore""" + + einputs = super(FieldMap, self)._parse_inputs() + return [{"calculatevdm": einputs[0]}] + + def _list_outputs(self): + outputs = self._outputs().get() + jobtype = self.inputs.jobtype + + outputs["vdm"] = fname_presuffix(self.inputs.phase_file, prefix="vdm5_sc") + + return outputs + +class ApplyVDMInputSpec(SPMCommandInputSpec): + in_files = InputMultiObject( traits.Either( ImageFileSPM(exists=True), traits.List(ImageFileSPM(exists=True)) @@ -207,7 +261,7 @@ class FieldMapInputSpec(SPMCommandInputSpec): vdmfile = File( field="data.vdmfile", desc="Voxel displacement map to use", - mandatory=True, + #mandatory=True, copyfile=True, ) distortion_direction = traits.Int( @@ -253,25 +307,17 @@ class FieldMapInputSpec(SPMCommandInputSpec): desc="fieldmap corrected output prefix", ) - -class FieldMapOutputSpec(TraitedSpec): - vdm = File(exists=True, desc="voxel difference map") - +class ApplyVDMOutputSpec(TraitedSpec): out_files = OutputMultiPath( traits.Either(traits.List(File(exists=True)), File(exists=True)), desc=( - "If jobtype is applyvdm, " - "these will be the fieldmap corrected files." - " Otherwise, they will be copies " - "of in_files that have had their " - "headers rewritten." + "These will be the fieldmap corrected files." ), ) mean_image = File(exists=True, desc="Mean image") - -class FieldMap(SPMCommand): - """Use the fieldmap toolbox from spm to calculate the voxel displacement map (VDM). +class ApplyVDM(SPMCommand): + """Use the fieldmap toolbox from spm to apply the voxel displacement map (VDM) to some epi files. http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=173 @@ -280,114 +326,54 @@ class FieldMap(SPMCommand): This interface does not deal with real/imag magnitude images nor with the two phase files case. - Examples - -------- - >>> from nipype.interfaces.spm import FieldMap - >>> fm = FieldMap() - >>> fm.inputs.phase_file = 'phase.nii' - >>> fm.inputs.magnitude_file = 'magnitude.nii' - >>> fm.inputs.echo_times = (5.19, 7.65) - >>> fm.inputs.blip_direction = 1 - >>> fm.inputs.total_readout_time = 15.6 - >>> fm.inputs.epi_file = 'epi.nii' - >>> fm.run() # doctest: +SKIP - """ - input_spec = FieldMapInputSpec - output_spec = FieldMapOutputSpec + input_spec = ApplyVDMInputSpec + output_spec = ApplyVDMOutputSpec _jobtype = "tools" _jobname = "fieldmap" def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm""" - if (self.inputs.jobtype == "calculatevdm") and ( - opt in ["phase_file", "magnitude_file", "anat_file", "epi_file"] - ): - - return scans_for_fname(ensure_list(val)) - - if (self.inputs.jobtype == "applyvdm") and (opt == "in_files"): - return scans_for_fnames(ensure_list(val)) - if (self.inputs.jobtype == "applyvdm") and (opt == "vdmfile"): + if opt in ["in_files", "vdmfile"]: return scans_for_fname(ensure_list(val)) return super(FieldMap, self)._format_arg(opt, spec, val) def _parse_inputs(self): """validate spm fieldmap options if set to None ignore""" - if self.inputs.jobtype == "applyvdm": - einputs = super(FieldMap, self)._parse_inputs( - skip=( - "jobtype", - "phase_file", - "magnitude_file", - "echo_times", - "blip_direction", - "total_readout_time", - "maskbrain", - "epifm", - "jacobian_modulation", - "method", - "unwarp_fwhm", - "pad", - "ws", - "template", - "mask_fwhm", - "nerode", - "ndilate", - "thresh", - "reg", - "epi_file", - "matchvdm", - "sessname", - "writeunwarped", - "anat_file", - "matchanat", - ) - ) - - else: - einputs = super(FieldMap, self)._parse_inputs( - skip=("jobtype", "in_files", "vdmfile") - ) - jobtype = self.inputs.jobtype + einputs = super(ApplyVDM, self)._parse_inputs() - return [{"%s" % (jobtype): einputs[0]}] + return [{"applymap": einputs[0]}] def _list_outputs(self): outputs = self._outputs().get() jobtype = self.inputs.jobtype resliced_all = self.inputs.write_which[0] > 0 resliced_mean = self.inputs.write_which[1] > 0 - if jobtype == "calculatevdm": - outputs["vdm"] = fname_presuffix(self.inputs.phase_file, prefix="vdm5_sc") - elif jobtype == "applyvdm": - if resliced_mean: - if isinstance(self.inputs.in_files[0], list): - first_image = self.inputs.in_files[0][0] - else: - first_image = self.inputs.in_files[0] - outputs["mean_image"] = fname_presuffix(first_image, prefix="meanu") + if resliced_mean: + if isinstance(self.inputs.in_files[0], list): + first_image = self.inputs.in_files[0][0] + else: + first_image = self.inputs.in_files[0] + outputs["mean_image"] = fname_presuffix(first_image, prefix="meanu") - if resliced_all: - outputs["out_files"] = [] - for idx, imgf in enumerate(ensure_list(self.inputs.in_files)): - appliedvdm_run = [] - if isinstance(imgf, list): - for i, inner_imgf in enumerate(ensure_list(imgf)): - newfile = fname_presuffix( - inner_imgf, prefix=self.inputs.out_prefix - ) - appliedvdm_run.append(newfile) - else: - appliedvdm_run = fname_presuffix( - imgf, prefix=self.inputs.out_prefix + if resliced_all: + outputs["out_files"] = [] + for idx, imgf in enumerate(ensure_list(self.inputs.in_files)): + appliedvdm_run = [] + if isinstance(imgf, list): + for i, inner_imgf in enumerate(ensure_list(imgf)): + newfile = fname_presuffix( + inner_imgf, prefix=self.inputs.out_prefix ) - outputs["out_files"].append(appliedvdm_run) - return outputs - + appliedvdm_run.append(newfile) + else: + appliedvdm_run = fname_presuffix( + imgf, prefix=self.inputs.out_prefix + ) + outputs["out_files"].append(appliedvdm_run) return outputs From 9bcf65c0075342518b56210093261134f4de06ff Mon Sep 17 00:00:00 2001 From: Fabio Bernardoni Date: Wed, 20 Oct 2021 18:37:36 +0200 Subject: [PATCH 1121/1665] added auto tests --- .../spm/tests/test_auto_ApplyVDM.py | 73 +++++++++++++++++++ .../spm/tests/test_auto_FieldMap.py | 49 +------------ 2 files changed, 77 insertions(+), 45 deletions(-) create mode 100644 nipype/interfaces/spm/tests/test_auto_ApplyVDM.py diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyVDM.py b/nipype/interfaces/spm/tests/test_auto_ApplyVDM.py new file mode 100644 index 0000000000..4c75db9881 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_ApplyVDM.py @@ -0,0 +1,73 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from ..preprocess import ApplyVDM + + +def test_ApplyVDM_inputs(): + input_map = dict( + distortion_direction=dict( + field="roptions.pedir", + usedefault=True, + ), + in_files=dict( + copyfile=True, + field="data.scans", + mandatory=True, + ), + interpolation=dict( + field="roptions.rinterp", + usedefault=True, + ), + matlab_cmd=dict(), + mfile=dict( + usedefault=True, + ), + out_prefix=dict( + field="roptions.prefix", + usedefault=True, + ), + paths=dict(), + reslice_interp=dict( + field="roptions.rinterp", + ), + use_mcr=dict(), + use_v8struct=dict( + min_ver="8", + usedefault=True, + ), + vdmfile=dict( + copyfile=True, + extensions=None, + field="data.vdmfile", + ), + write_mask=dict( + field="roptions.mask", + ), + write_which=dict( + field="roptions.which", + maxlen=2, + minlen=2, + usedefault=True, + ), + write_wrap=dict( + field="roptions.wrap", + ), + ) + inputs = ApplyVDM.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_ApplyVDM_outputs(): + output_map = dict( + mean_image=dict( + extensions=None, + ), + out_files=dict(), + ) + outputs = ApplyVDM.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_FieldMap.py b/nipype/interfaces/spm/tests/test_auto_FieldMap.py index 1c223c715e..e1a07ed41e 100644 --- a/nipype/interfaces/spm/tests/test_auto_FieldMap.py +++ b/nipype/interfaces/spm/tests/test_auto_FieldMap.py @@ -13,42 +13,29 @@ def test_FieldMap_inputs(): field="subj.defaults.defaultsval.blipdir", mandatory=True, ), - distortion_direction=dict( - field="roptions.pedir", - usedefault=True, - ), echo_times=dict( field="subj.defaults.defaultsval.et", + mandatory=True, ), epi_file=dict( copyfile=False, extensions=None, field="subj.session.epi", + mandatory=True, ), epifm=dict( field="subj.defaults.defaultsval.epifm", usedefault=True, ), - in_files=dict( - copyfile=True, - field="data.scans", - mandatory=True, - ), - interpolation=dict( - field="roptions.rinterp", - usedefault=True, - ), jacobian_modulation=dict( field="subj.defaults.defaultsval.ajm", usedefault=True, ), - jobtype=dict( - usedefault=True, - ), magnitude_file=dict( copyfile=False, extensions=None, field="subj.data.presubphasemag.magnitude", + mandatory=True, ), mask_fwhm=dict( field="subj.defaults.defaultsval.mflags.fwhm", @@ -82,10 +69,6 @@ def test_FieldMap_inputs(): field="subj.defaults.defaultsval.mflags.nerode", usedefault=True, ), - out_prefix=dict( - field="roptions.prefix", - usedefault=True, - ), pad=dict( field="subj.defaults.defaultsval.uflags.pad", usedefault=True, @@ -95,14 +78,12 @@ def test_FieldMap_inputs(): copyfile=False, extensions=None, field="subj.data.presubphasemag.phase", + mandatory=True, ), reg=dict( field="subj.defaults.defaultsval.mflags.reg", usedefault=True, ), - reslice_interp=dict( - field="roptions.rinterp", - ), sessname=dict( field="subj.sessname", usedefault=True, @@ -129,24 +110,6 @@ def test_FieldMap_inputs(): min_ver="8", usedefault=True, ), - vdmfile=dict( - copyfile=True, - extensions=None, - field="data.vdmfile", - mandatory=True, - ), - write_mask=dict( - field="roptions.mask", - ), - write_which=dict( - field="roptions.which", - maxlen=2, - minlen=2, - usedefault=True, - ), - write_wrap=dict( - field="roptions.wrap", - ), writeunwarped=dict( field="subj.writeunwarped", usedefault=True, @@ -165,10 +128,6 @@ def test_FieldMap_inputs(): def test_FieldMap_outputs(): output_map = dict( - mean_image=dict( - extensions=None, - ), - out_files=dict(), vdm=dict( extensions=None, ), From 2e4bc8386cef1be316e20f871e1a4be8112581fd Mon Sep 17 00:00:00 2001 From: Fabio Bernardoni Date: Wed, 20 Oct 2021 19:12:02 +0200 Subject: [PATCH 1122/1665] now black-reformatted --- nipype/interfaces/spm/preprocess.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index 6073f80e13..b6bd50c3ba 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -247,6 +247,7 @@ def _list_outputs(self): return outputs + class ApplyVDMInputSpec(SPMCommandInputSpec): in_files = InputMultiObject( @@ -261,7 +262,7 @@ class ApplyVDMInputSpec(SPMCommandInputSpec): vdmfile = File( field="data.vdmfile", desc="Voxel displacement map to use", - #mandatory=True, + # mandatory=True, copyfile=True, ) distortion_direction = traits.Int( @@ -307,15 +308,15 @@ class ApplyVDMInputSpec(SPMCommandInputSpec): desc="fieldmap corrected output prefix", ) + class ApplyVDMOutputSpec(TraitedSpec): out_files = OutputMultiPath( traits.Either(traits.List(File(exists=True)), File(exists=True)), - desc=( - "These will be the fieldmap corrected files." - ), + desc=("These will be the fieldmap corrected files."), ) mean_image = File(exists=True, desc="Mean image") + class ApplyVDM(SPMCommand): """Use the fieldmap toolbox from spm to apply the voxel displacement map (VDM) to some epi files. From f791fc1dd60c57088097053176dc793640a1125d Mon Sep 17 00:00:00 2001 From: Fabio Bernardoni Date: Thu, 21 Oct 2021 12:57:26 +0200 Subject: [PATCH 1123/1665] * edited preprocess according to nipype team suggestions * added applyVDM interface to __init__.py file --- nipype/interfaces/spm/__init__.py | 1 + nipype/interfaces/spm/preprocess.py | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/nipype/interfaces/spm/__init__.py b/nipype/interfaces/spm/__init__.py index 817b62a9e1..1823bef4da 100644 --- a/nipype/interfaces/spm/__init__.py +++ b/nipype/interfaces/spm/__init__.py @@ -4,6 +4,7 @@ """SPM is a software package for the analysis of brain imaging data sequences.""" from .base import Info, SPMCommand, logger, no_spm, scans_for_fname, scans_for_fnames from .preprocess import ( + ApplyVDM, FieldMap, SliceTiming, Realign, diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index b6bd50c3ba..87027ee61a 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -40,6 +40,13 @@ class FieldMapInputSpec(SPMCommandInputSpec): + jobtype = traits.Enum( + "calculatevdm", + usedefault=True, + deprecated="1.9.0", # Two minor releases in the future + desc="Must be 'calculatevdm'; to apply VDM, use the ApplyVDM interface.", + ) + phase_file = File( mandatory=True, exists=True, @@ -251,9 +258,7 @@ def _list_outputs(self): class ApplyVDMInputSpec(SPMCommandInputSpec): in_files = InputMultiObject( - traits.Either( - ImageFileSPM(exists=True), traits.List(ImageFileSPM(exists=True)) - ), + ImageFileSPM(exists=True), field="data.scans", mandatory=True, copyfile=True, @@ -262,7 +267,7 @@ class ApplyVDMInputSpec(SPMCommandInputSpec): vdmfile = File( field="data.vdmfile", desc="Voxel displacement map to use", - # mandatory=True, + mandatory=True, copyfile=True, ) distortion_direction = traits.Int( @@ -279,13 +284,8 @@ class ApplyVDMInputSpec(SPMCommandInputSpec): usedefault=True, desc="determines which images to apply vdm to", ) - interpolation = traits.Int( - 4, - field="roptions.rinterp", - desc="phase encode direction input data have been acquired with", - usedefault=True, - ) - reslice_interp = traits.Range( + interpolation = traits.Range( + value=4, low=0, high=7, field="roptions.rinterp", From f50b1caa27c0598075639fc10630db8c3003b4a4 Mon Sep 17 00:00:00 2001 From: Fabio Bernardoni Date: Thu, 21 Oct 2021 13:27:47 +0200 Subject: [PATCH 1124/1665] auto tests added to commit --- nipype/interfaces/spm/tests/test_auto_ApplyVDM.py | 5 +---- nipype/interfaces/spm/tests/test_auto_FieldMap.py | 4 ++++ 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyVDM.py b/nipype/interfaces/spm/tests/test_auto_ApplyVDM.py index 4c75db9881..2f56b49ef2 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyVDM.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyVDM.py @@ -15,7 +15,6 @@ def test_ApplyVDM_inputs(): ), interpolation=dict( field="roptions.rinterp", - usedefault=True, ), matlab_cmd=dict(), mfile=dict( @@ -26,9 +25,6 @@ def test_ApplyVDM_inputs(): usedefault=True, ), paths=dict(), - reslice_interp=dict( - field="roptions.rinterp", - ), use_mcr=dict(), use_v8struct=dict( min_ver="8", @@ -38,6 +34,7 @@ def test_ApplyVDM_inputs(): copyfile=True, extensions=None, field="data.vdmfile", + mandatory=True, ), write_mask=dict( field="roptions.mask", diff --git a/nipype/interfaces/spm/tests/test_auto_FieldMap.py b/nipype/interfaces/spm/tests/test_auto_FieldMap.py index e1a07ed41e..ccd9e70c6e 100644 --- a/nipype/interfaces/spm/tests/test_auto_FieldMap.py +++ b/nipype/interfaces/spm/tests/test_auto_FieldMap.py @@ -31,6 +31,10 @@ def test_FieldMap_inputs(): field="subj.defaults.defaultsval.ajm", usedefault=True, ), + jobtype=dict( + deprecated="1.9.0", + usedefault=True, + ), magnitude_file=dict( copyfile=False, extensions=None, From 457581096ef03546c28757c1d05af29b7cedf1eb Mon Sep 17 00:00:00 2001 From: fabioboh Date: Thu, 21 Oct 2021 16:26:00 +0200 Subject: [PATCH 1125/1665] Update nipype/interfaces/spm/preprocess.py Co-authored-by: Chris Markiewicz --- nipype/interfaces/spm/preprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index 87027ee61a..4b245f70bb 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -282,7 +282,7 @@ class ApplyVDMInputSpec(SPMCommandInputSpec): minlen=2, maxlen=2, usedefault=True, - desc="determines which images to apply vdm to", + desc="If the first value is non-zero, reslice all images. If the second value is non-zero, reslice a mean image.", ) interpolation = traits.Range( value=4, From f525c94f34a1521eb62bdca5647a16559fa1182a Mon Sep 17 00:00:00 2001 From: Fabio Bernardoni Date: Fri, 22 Oct 2021 13:20:30 +0200 Subject: [PATCH 1126/1665] corrected some bugs by testing within our workflow --- nipype/interfaces/spm/preprocess.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index 87027ee61a..d0a7e47bd1 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -339,18 +339,17 @@ def _format_arg(self, opt, spec, val): if opt in ["in_files", "vdmfile"]: return scans_for_fname(ensure_list(val)) - return super(FieldMap, self)._format_arg(opt, spec, val) + return super(ApplyVDM, self)._format_arg(opt, spec, val) def _parse_inputs(self): """validate spm fieldmap options if set to None ignore""" einputs = super(ApplyVDM, self)._parse_inputs() - return [{"applymap": einputs[0]}] + return [{"applyvdm": einputs[0]}] def _list_outputs(self): outputs = self._outputs().get() - jobtype = self.inputs.jobtype resliced_all = self.inputs.write_which[0] > 0 resliced_mean = self.inputs.write_which[1] > 0 if resliced_mean: @@ -3037,3 +3036,4 @@ def _parse_inputs(self): return einputs else: return super(VBMSegment, self)._parse_inputs(skip=("spatial_normalization")) + From 2d267b810be9269e9d9b5a8bdeadc042ea3cf5fe Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Fri, 22 Oct 2021 14:21:26 +0200 Subject: [PATCH 1127/1665] fix: improve error handling of ``CommandLine`` interfaces Currently, errors arising from interpolating the command line of these interfaces is handled poorly at the ``Node`` level. If the command line cannot be built, the error is printed in the logfile but the exception is caught and never raised (i.e., likely leading to an infinite loop as the execution is not stopped). I have experienced that while debugging fMRIPrep. To learn which of the inputs of a faulty interface derived from ``CommandLine`` was not being formatted, I had to also add the error annotation proposed for the ``_parse_inputs`` inner loop. --- nipype/interfaces/base/core.py | 35 +++++++++++++++++++++++++++++---- nipype/pipeline/engine/nodes.py | 8 ++++---- nipype/utils/subprocess.py | 6 +++++- 3 files changed, 40 insertions(+), 9 deletions(-) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py index 65b35cadce..6ffa843bf4 100644 --- a/nipype/interfaces/base/core.py +++ b/nipype/interfaces/base/core.py @@ -607,6 +607,7 @@ class must be instantiated with a command argument _cmd = None _version = None _terminal_output = "stream" + _write_cmdline = False @classmethod def set_default_terminal_output(cls, output_type): @@ -623,7 +624,7 @@ def set_default_terminal_output(cls, output_type): else: raise AttributeError("Invalid terminal output_type: %s" % output_type) - def __init__(self, command=None, terminal_output=None, **inputs): + def __init__(self, command=None, terminal_output=None, write_cmdline=False, **inputs): super(CommandLine, self).__init__(**inputs) self._environ = None # Set command. Input argument takes precedence @@ -638,6 +639,8 @@ def __init__(self, command=None, terminal_output=None, **inputs): if terminal_output is not None: self.terminal_output = terminal_output + self._write_cmdline = write_cmdline + @property def cmd(self): """sets base command, immutable""" @@ -669,6 +672,14 @@ def terminal_output(self, value): ) self._terminal_output = value + @property + def write_cmdline(self): + return self._write_cmdline + + @write_cmdline.setter + def write_cmdline(self, value): + self._write_cmdline = value is True + def raise_exception(self, runtime): raise RuntimeError( ( @@ -716,9 +727,14 @@ def _run_interface(self, runtime, correct_return_codes=(0,)): adds stdout, stderr, merged, cmdline, dependencies, command_path """ - out_environ = self._get_environ() # Initialize runtime Bunch + + try: + runtime.cmdline = self.cmdline + except Exception as exc: + raise RuntimeError("Error raised when interpolating the command line") from exc + runtime.stdout = None runtime.stderr = None runtime.cmdline = self.cmdline @@ -742,7 +758,11 @@ def _run_interface(self, runtime, correct_return_codes=(0,)): if self._ldd else "" ) - runtime = run_command(runtime, output=self.terminal_output) + runtime = run_command( + runtime, + output=self.terminal_output, + write_cmdline=self.write_cmdline, + ) return runtime def _format_arg(self, name, trait_spec, value): @@ -907,7 +927,14 @@ def _parse_inputs(self, skip=None): if not isdefined(value): continue - arg = self._format_arg(name, spec, value) + + try: + arg = self._format_arg(name, spec, value) + except Exception as exc: + raise ValueError( + f"Error formatting command line argument '{name}' with value '{value}'" + ) from exc + if arg is None: continue pos = spec.position diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index bda77b1956..6e251647f6 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -209,6 +209,9 @@ def __init__( self.needed_outputs = needed_outputs self.config = None + if issubclass(self._interface.__class__, CommandLine): + self._interface.write_cmdline = True + @property def interface(self): """Return the underlying interface object""" @@ -711,16 +714,13 @@ def _run_command(self, execute, copyfiles=True): f'[Node] Executing "{self.name}" <{self._interface.__module__}' f".{self._interface.__class__.__name__}>" ) + # Invoke core run method of the interface ignoring exceptions result = self._interface.run(cwd=outdir, ignore_exception=True) logger.info( f'[Node] Finished "{self.name}", elapsed time {result.runtime.duration}s.' ) - if issubclass(self._interface.__class__, CommandLine): - # Write out command line as it happened - Path.write_text(outdir / "command.txt", f"{result.runtime.cmdline}\n") - exc_tb = getattr(result.runtime, "traceback", None) if not exc_tb: diff --git a/nipype/utils/subprocess.py b/nipype/utils/subprocess.py index f590b337a0..5611cccc2b 100644 --- a/nipype/utils/subprocess.py +++ b/nipype/utils/subprocess.py @@ -10,6 +10,7 @@ import select import locale import datetime +from pathlib import Path from subprocess import Popen, STDOUT, PIPE from .filemanip import canonicalize_env, read_stream @@ -69,7 +70,7 @@ def _read(self, drain): self._lastidx = len(self._rows) -def run_command(runtime, output=None, timeout=0.01): +def run_command(runtime, output=None, timeout=0.01, write_cmdline=False): """Run a command, read stdout and stderr, prefix with timestamp. The returned runtime contains a merged stdout+stderr log with timestamps @@ -100,6 +101,9 @@ def run_command(runtime, output=None, timeout=0.01): errfile = os.path.join(runtime.cwd, "stderr.nipype") stderr = open(errfile, "wb") + if write_cmdline: + (Path(runtime.cwd) / "command.txt").write_text(cmdline) + proc = Popen( cmdline, stdout=stdout, From f4691be97456531a72c9c6ef8e21aade0d2695fe Mon Sep 17 00:00:00 2001 From: Fabio Bernardoni Date: Fri, 22 Oct 2021 14:32:53 +0200 Subject: [PATCH 1128/1665] formatting --- nipype/interfaces/spm/preprocess.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py index a589c4c80d..a4f4643f6b 100644 --- a/nipype/interfaces/spm/preprocess.py +++ b/nipype/interfaces/spm/preprocess.py @@ -3036,4 +3036,3 @@ def _parse_inputs(self): return einputs else: return super(VBMSegment, self)._parse_inputs(skip=("spatial_normalization")) - From 5be8307d34b948f9c29231683fc8dd3d830834e1 Mon Sep 17 00:00:00 2001 From: mnoergaard Date: Mon, 25 Oct 2021 15:38:10 +0200 Subject: [PATCH 1129/1665] Add save-yhat flag to MRTM2 class --- nipype/interfaces/freesurfer/petsurfer.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nipype/interfaces/freesurfer/petsurfer.py b/nipype/interfaces/freesurfer/petsurfer.py index 55adc7efd5..2b8d6ee877 100644 --- a/nipype/interfaces/freesurfer/petsurfer.py +++ b/nipype/interfaces/freesurfer/petsurfer.py @@ -493,9 +493,13 @@ class MRTM2InputSpec(GLMFitInputSpec): desc='save outputs as nii.gz', xor=_ext_xor ) + yhat_save = traits.Bool( + argstr="--save-yhat", desc="save signal estimate (yhat)" + ) class MRTM2OutputSpec(GLMFitInputSpec): bp = File(desc="BP estimates") + yhat = File(desc="Fitted values (yhat) to the TACs") class MRTM2(GLMFit): """Perform MRTM2 kinetic modeling. @@ -521,6 +525,7 @@ def _list_outputs(self): else: ext = '.mgh' outputs['bp'] = os.join(self.inputs.glm_dir, 'bp', ext) + outputs['yhat'] = os.join(self.inputs.glm_dir, 'yhat', ext) return outputs class LoganRefInputSpec(GLMFitInputSpec): From 55872425409b8a5d2446adae062b79da464ff780 Mon Sep 17 00:00:00 2001 From: Rinat Mukhometzianov <25082858+rmukh@users.noreply.github.com> Date: Mon, 25 Oct 2021 18:28:02 -0400 Subject: [PATCH 1130/1665] Side bar and main page fixes --- doc/_static/nipype.css | 4 ++++ doc/_templates/layout.html | 5 +++-- doc/_templates/navbar.html | 1 - doc/conf.py | 2 +- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/doc/_static/nipype.css b/doc/_static/nipype.css index cec080b3d6..5aceb4c42a 100644 --- a/doc/_static/nipype.css +++ b/doc/_static/nipype.css @@ -5,6 +5,10 @@ body { min-width: 70em; } +div.sphinxsidebar { + position: relative; +} + div.sphinxsidebar h4, div.sphinxsidebar h3 { background-color: #2F83C8; } diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html index a8de8d176b..2228087cec 100644 --- a/doc/_templates/layout.html +++ b/doc/_templates/layout.html @@ -2,6 +2,9 @@ {% set title = 'Neuroimaging in Python - Pipelines and Interfaces' %} {% set short_title = 'Nipype' %} +{% block sidebar1 %}{{ sidebar() }}{% endblock %} +{% block sidebar2 %}{% endblock %} + {% block extrahead %} {{ super() }} @@ -42,8 +45,6 @@ {% block relbar1 %}{% endblock %} {% block relbar2 %}{% endblock %} -{% block sidebar1 %}{{ sidebar() }}{% endblock %} -{% block sidebar2 %}{% endblock %} {% block footer %} {{ super() }} diff --git a/doc/_templates/navbar.html b/doc/_templates/navbar.html index fd0216b53f..9afb8ff8d5 100644 --- a/doc/_templates/navbar.html +++ b/doc/_templates/navbar.html @@ -1,7 +1,6 @@